summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile.inc11
-rw-r--r--ObsoleteFiles.inc8
-rw-r--r--contrib/llvm/include/llvm-c/Core.h24
-rw-r--r--contrib/llvm/include/llvm-c/EnhancedDisassembly.h2
-rw-r--r--contrib/llvm/include/llvm-c/Initialization.h40
-rw-r--r--contrib/llvm/include/llvm-c/LinkTimeOptimizer.h2
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/Scalar.h3
-rw-r--r--contrib/llvm/include/llvm-c/lto.h50
-rw-r--r--contrib/llvm/include/llvm/ADT/APFloat.h7
-rw-r--r--contrib/llvm/include/llvm/ADT/APInt.h142
-rw-r--r--contrib/llvm/include/llvm/ADT/APSInt.h18
-rw-r--r--contrib/llvm/include/llvm/ADT/ArrayRef.h121
-rw-r--r--contrib/llvm/include/llvm/ADT/BitVector.h36
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMap.h3
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMapInfo.h14
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseSet.h3
-rw-r--r--contrib/llvm/include/llvm/ADT/EquivalenceClasses.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/FoldingSet.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h54
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableList.h14
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableMap.h53
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableSet.h553
-rw-r--r--contrib/llvm/include/llvm/ADT/InMemoryStruct.h77
-rw-r--r--contrib/llvm/include/llvm/ADT/IndexedMap.h14
-rw-r--r--contrib/llvm/include/llvm/ADT/IntEqClasses.h88
-rw-r--r--contrib/llvm/include/llvm/ADT/IntervalMap.h2139
-rw-r--r--contrib/llvm/include/llvm/ADT/Optional.h54
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerIntPair.h7
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerUnion.h12
-rw-r--r--contrib/llvm/include/llvm/ADT/PostOrderIterator.h6
-rw-r--r--contrib/llvm/include/llvm/ADT/SCCIterator.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/ScopedHashTable.h103
-rw-r--r--contrib/llvm/include/llvm/ADT/SetVector.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallBitVector.h7
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallPtrSet.h5
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallString.h12
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallVector.h26
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseBitVector.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/Statistic.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/StringExtras.h9
-rw-r--r--contrib/llvm/include/llvm/ADT/StringMap.h21
-rw-r--r--contrib/llvm/include/llvm/ADT/StringRef.h25
-rw-r--r--contrib/llvm/include/llvm/ADT/Triple.h77
-rw-r--r--contrib/llvm/include/llvm/ADT/Twine.h30
-rw-r--r--contrib/llvm/include/llvm/ADT/ValueMap.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/ilist.h1
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasAnalysis.h368
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasSetTracker.h60
-rw-r--r--contrib/llvm/include/llvm/Analysis/CallGraph.h13
-rw-r--r--contrib/llvm/include/llvm/Analysis/CodeMetrics.h31
-rw-r--r--contrib/llvm/include/llvm/Analysis/ConstantFolding.h13
-rw-r--r--contrib/llvm/include/llvm/Analysis/DIBuilder.h459
-rw-r--r--contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/DebugInfo.h196
-rw-r--r--contrib/llvm/include/llvm/Analysis/DominanceFrontier.h189
-rw-r--r--contrib/llvm/include/llvm/Analysis/DominatorInternals.h189
-rw-r--r--contrib/llvm/include/llvm/Analysis/Dominators.h265
-rw-r--r--contrib/llvm/include/llvm/Analysis/FindUsedTypes.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/InlineCost.h32
-rw-r--r--contrib/llvm/include/llvm/Analysis/InstructionSimplify.h111
-rw-r--r--contrib/llvm/include/llvm/Analysis/IntervalPartition.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/LazyValueInfo.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h10
-rw-r--r--contrib/llvm/include/llvm/Analysis/LibCallSemantics.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfo.h43
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h109
-rw-r--r--contrib/llvm/include/llvm/Analysis/Passes.h35
-rw-r--r--contrib/llvm/include/llvm/Analysis/PathNumbering.h304
-rw-r--r--contrib/llvm/include/llvm/Analysis/PathProfileInfo.h113
-rw-r--r--contrib/llvm/include/llvm/Analysis/PointerTracking.h132
-rw-r--r--contrib/llvm/include/llvm/Analysis/PostDominators.h7
-rw-r--r--contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h33
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionInfo.h57
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionPass.h126
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolution.h146
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h138
-rw-r--r--contrib/llvm/include/llvm/Analysis/ValueTracking.h52
-rw-r--r--contrib/llvm/include/llvm/Attributes.h7
-rw-r--r--contrib/llvm/include/llvm/BasicBlock.h32
-rw-r--r--contrib/llvm/include/llvm/Bitcode/Archive.h13
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitCodes.h2
-rw-r--r--contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h12
-rw-r--r--contrib/llvm/include/llvm/Bitcode/ReaderWriter.h9
-rw-r--r--contrib/llvm/include/llvm/CallingConv.h27
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Analysis.h11
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AsmPrinter.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/BinaryObject.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CalcSpillWeights.h24
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CallingConvLower.h42
-rw-r--r--contrib/llvm/include/llvm/CodeGen/EdgeBundles.h61
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FastISel.h15
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadata.h9
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h52
-rw-r--r--contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h5
-rw-r--r--contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h22
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveInterval.h193
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h45
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveStackAnalysis.h18
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveVariables.h11
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachORelocation.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h19
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineCodeInfo.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineDominators.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h14
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunction.h26
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstr.h36
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h21
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineLocation.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineLoopRanges.h112
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h72
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h73
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineOperand.h134
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h58
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineRelocation.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h (renamed from contrib/llvm/lib/CodeGen/PBQP/Graph.h)0
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h (renamed from contrib/llvm/lib/CodeGen/PBQP/HeuristicBase.h)0
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicSolver.h (renamed from contrib/llvm/lib/CodeGen/PBQP/HeuristicSolver.h)0
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h (renamed from contrib/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h)8
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Math.h (renamed from contrib/llvm/lib/CodeGen/PBQP/Math.h)0
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Solution.h (renamed from contrib/llvm/lib/CodeGen/PBQP/Solution.h)9
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Passes.h35
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h94
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h167
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h85
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h28
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScoreboardHazardRecognizer.h129
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAG.h106
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h102
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h37
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SlotIndexes.h118
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.h59
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.td11
-rw-r--r--contrib/llvm/include/llvm/CompilerDriver/CompilationGraph.h2
-rw-r--r--contrib/llvm/include/llvm/CompilerDriver/Tool.h4
-rw-r--r--contrib/llvm/include/llvm/Constant.h13
-rw-r--r--contrib/llvm/include/llvm/Constants.h130
-rw-r--r--contrib/llvm/include/llvm/DerivedTypes.h3
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h164
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/GenericValue.h2
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h52
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h12
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h38
-rw-r--r--contrib/llvm/include/llvm/Function.h2
-rw-r--r--contrib/llvm/include/llvm/GlobalAlias.h3
-rw-r--r--contrib/llvm/include/llvm/GlobalValue.h17
-rw-r--r--contrib/llvm/include/llvm/GlobalVariable.h34
-rw-r--r--contrib/llvm/include/llvm/InitializePasses.h235
-rw-r--r--contrib/llvm/include/llvm/InlineAsm.h59
-rw-r--r--contrib/llvm/include/llvm/InstrTypes.h193
-rw-r--r--contrib/llvm/include/llvm/Instruction.h7
-rw-r--r--contrib/llvm/include/llvm/Instructions.h422
-rw-r--r--contrib/llvm/include/llvm/IntrinsicInst.h29
-rw-r--r--contrib/llvm/include/llvm/Intrinsics.td8
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsARM.td6
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsX86.td314
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsXCore.td22
-rw-r--r--contrib/llvm/include/llvm/LLVMContext.h39
-rw-r--r--contrib/llvm/include/llvm/LinkAllPasses.h18
-rw-r--r--contrib/llvm/include/llvm/LinkAllVMCore.h17
-rw-r--r--contrib/llvm/include/llvm/MC/EDInstInfo.h2
-rw-r--r--contrib/llvm/include/llvm/MC/ELFObjectWriter.h46
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfo.h102
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmLayout.h43
-rw-r--r--contrib/llvm/include/llvm/MC/MCAssembler.h189
-rw-r--r--contrib/llvm/include/llvm/MC/MCCodeEmitter.h38
-rw-r--r--contrib/llvm/include/llvm/MC/MCContext.h82
-rw-r--r--contrib/llvm/include/llvm/MC/MCDirectives.h9
-rw-r--r--contrib/llvm/include/llvm/MC/MCDisassembler.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCDwarf.h133
-rw-r--r--contrib/llvm/include/llvm/MC/MCELFObjectWriter.h47
-rw-r--r--contrib/llvm/include/llvm/MC/MCELFSymbolFlags.h11
-rw-r--r--contrib/llvm/include/llvm/MC/MCExpr.h44
-rw-r--r--contrib/llvm/include/llvm/MC/MCFixup.h16
-rw-r--r--contrib/llvm/include/llvm/MC/MCFixupKindInfo.h43
-rw-r--r--contrib/llvm/include/llvm/MC/MCInst.h54
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstPrinter.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCMachOSymbolFlags.h4
-rw-r--r--contrib/llvm/include/llvm/MC/MCMachObjectWriter.h65
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectStreamer.h18
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectWriter.h35
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h16
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h33
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h6
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h4
-rw-r--r--contrib/llvm/include/llvm/MC/MCSection.h11
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionCOFF.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionELF.h151
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionMachO.h22
-rw-r--r--contrib/llvm/include/llvm/MC/MCStreamer.h232
-rw-r--r--contrib/llvm/include/llvm/MC/MCSymbol.h19
-rw-r--r--contrib/llvm/include/llvm/MC/MCValue.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MachObjectWriter.h44
-rw-r--r--contrib/llvm/include/llvm/Metadata.h3
-rw-r--r--contrib/llvm/include/llvm/Module.h2
-rw-r--r--contrib/llvm/include/llvm/Object/MachOFormat.h367
-rw-r--r--contrib/llvm/include/llvm/Object/MachOObject.h180
-rw-r--r--contrib/llvm/include/llvm/Object/ObjectFile.h262
-rw-r--r--contrib/llvm/include/llvm/OperandTraits.h28
-rw-r--r--contrib/llvm/include/llvm/Operator.h159
-rw-r--r--contrib/llvm/include/llvm/Pass.h4
-rw-r--r--contrib/llvm/include/llvm/PassManagers.h11
-rw-r--r--contrib/llvm/include/llvm/PassRegistry.h75
-rw-r--r--contrib/llvm/include/llvm/PassSupport.h105
-rw-r--r--contrib/llvm/include/llvm/Support/AIXDataTypesFix.h (renamed from contrib/llvm/include/llvm/System/AIXDataTypesFix.h)2
-rw-r--r--contrib/llvm/include/llvm/Support/AlignOf.h6
-rw-r--r--contrib/llvm/include/llvm/Support/Allocator.h10
-rw-r--r--contrib/llvm/include/llvm/Support/Atomic.h (renamed from contrib/llvm/include/llvm/System/Atomic.h)4
-rw-r--r--contrib/llvm/include/llvm/Support/COFF.h12
-rw-r--r--contrib/llvm/include/llvm/Support/CallSite.h19
-rw-r--r--contrib/llvm/include/llvm/Support/Casting.h4
-rw-r--r--contrib/llvm/include/llvm/Support/Compiler.h86
-rw-r--r--contrib/llvm/include/llvm/Support/ConstantFolder.h72
-rw-r--r--contrib/llvm/include/llvm/Support/ConstantRange.h17
-rw-r--r--contrib/llvm/include/llvm/Support/CrashRecoveryContext.h8
-rw-r--r--contrib/llvm/include/llvm/Support/DataTypes.h.in111
-rw-r--r--contrib/llvm/include/llvm/Support/Disassembler.h (renamed from contrib/llvm/include/llvm/System/Disassembler.h)2
-rw-r--r--contrib/llvm/include/llvm/Support/Dwarf.h7
-rw-r--r--contrib/llvm/include/llvm/Support/DynamicLibrary.h (renamed from contrib/llvm/include/llvm/System/DynamicLibrary.h)2
-rw-r--r--contrib/llvm/include/llvm/Support/DynamicLinker.h40
-rw-r--r--contrib/llvm/include/llvm/Support/ELF.h299
-rw-r--r--contrib/llvm/include/llvm/Support/Endian.h213
-rw-r--r--contrib/llvm/include/llvm/Support/Errno.h (renamed from contrib/llvm/include/llvm/System/Errno.h)2
-rw-r--r--contrib/llvm/include/llvm/Support/ErrorHandling.h13
-rw-r--r--contrib/llvm/include/llvm/Support/FEnv.h56
-rw-r--r--contrib/llvm/include/llvm/Support/FileSystem.h690
-rw-r--r--contrib/llvm/include/llvm/Support/FileUtilities.h2
-rw-r--r--contrib/llvm/include/llvm/Support/GraphWriter.h45
-rw-r--r--contrib/llvm/include/llvm/Support/Host.h (renamed from contrib/llvm/include/llvm/System/Host.h)2
-rw-r--r--contrib/llvm/include/llvm/Support/IRBuilder.h479
-rw-r--r--contrib/llvm/include/llvm/Support/IRReader.h24
-rw-r--r--contrib/llvm/include/llvm/Support/IncludeFile.h (renamed from contrib/llvm/include/llvm/System/IncludeFile.h)20
-rw-r--r--contrib/llvm/include/llvm/Support/MachO.h38
-rw-r--r--contrib/llvm/include/llvm/Support/ManagedStatic.h10
-rw-r--r--contrib/llvm/include/llvm/Support/MathExtras.h48
-rw-r--r--contrib/llvm/include/llvm/Support/Memory.h (renamed from contrib/llvm/include/llvm/System/Memory.h)14
-rw-r--r--contrib/llvm/include/llvm/Support/MemoryBuffer.h58
-rw-r--r--contrib/llvm/include/llvm/Support/MemoryObject.h2
-rw-r--r--contrib/llvm/include/llvm/Support/Mutex.h (renamed from contrib/llvm/include/llvm/System/Mutex.h)24
-rw-r--r--contrib/llvm/include/llvm/Support/MutexGuard.h2
-rw-r--r--contrib/llvm/include/llvm/Support/NoFolder.h138
-rw-r--r--contrib/llvm/include/llvm/Support/Path.h (renamed from contrib/llvm/tools/clang/lib/Checker/ManagerRegistry.cpp)14
-rw-r--r--contrib/llvm/include/llvm/Support/PathV1.h (renamed from contrib/llvm/include/llvm/System/Path.h)101
-rw-r--r--contrib/llvm/include/llvm/Support/PathV2.h347
-rw-r--r--contrib/llvm/include/llvm/Support/PatternMatch.h339
-rw-r--r--contrib/llvm/include/llvm/Support/PointerLikeTypeTraits.h2
-rw-r--r--contrib/llvm/include/llvm/Support/Process.h (renamed from contrib/llvm/include/llvm/System/Process.h)4
-rw-r--r--contrib/llvm/include/llvm/Support/Program.h (renamed from contrib/llvm/include/llvm/System/Program.h)12
-rw-r--r--contrib/llvm/include/llvm/Support/RWMutex.h (renamed from contrib/llvm/include/llvm/System/RWMutex.h)40
-rw-r--r--contrib/llvm/include/llvm/Support/Signals.h (renamed from contrib/llvm/include/llvm/System/Signals.h)4
-rw-r--r--contrib/llvm/include/llvm/Support/Solaris.h (renamed from contrib/llvm/include/llvm/System/Solaris.h)2
-rw-r--r--contrib/llvm/include/llvm/Support/SourceMgr.h15
-rw-r--r--contrib/llvm/include/llvm/Support/StableBasicBlockNumbering.h59
-rw-r--r--contrib/llvm/include/llvm/Support/StandardPasses.h75
-rw-r--r--contrib/llvm/include/llvm/Support/SwapByteOrder.h101
-rw-r--r--contrib/llvm/include/llvm/Support/SystemUtils.h13
-rw-r--r--contrib/llvm/include/llvm/Support/TargetFolder.h68
-rw-r--r--contrib/llvm/include/llvm/Support/ThreadLocal.h (renamed from contrib/llvm/include/llvm/System/ThreadLocal.h)12
-rw-r--r--contrib/llvm/include/llvm/Support/Threading.h (renamed from contrib/llvm/include/llvm/System/Threading.h)26
-rw-r--r--contrib/llvm/include/llvm/Support/TimeValue.h (renamed from contrib/llvm/include/llvm/System/TimeValue.h)2
-rw-r--r--contrib/llvm/include/llvm/Support/Timer.h2
-rw-r--r--contrib/llvm/include/llvm/Support/ToolOutputFile.h62
-rw-r--r--contrib/llvm/include/llvm/Support/TypeBuilder.h6
-rw-r--r--contrib/llvm/include/llvm/Support/Valgrind.h (renamed from contrib/llvm/include/llvm/System/Valgrind.h)2
-rw-r--r--contrib/llvm/include/llvm/Support/raw_ostream.h66
-rw-r--r--contrib/llvm/include/llvm/Support/system_error.h910
-rw-r--r--contrib/llvm/include/llvm/System/Alarm.h51
-rw-r--r--contrib/llvm/include/llvm/Target/Mangler.h7
-rw-r--r--contrib/llvm/include/llvm/Target/SubtargetFeature.h2
-rw-r--r--contrib/llvm/include/llvm/Target/Target.td129
-rw-r--r--contrib/llvm/include/llvm/Target/TargetAsmBackend.h67
-rw-r--r--contrib/llvm/include/llvm/Target/TargetAsmInfo.h75
-rw-r--r--contrib/llvm/include/llvm/Target/TargetAsmParser.h16
-rw-r--r--contrib/llvm/include/llvm/Target/TargetCallingConv.h14
-rw-r--r--contrib/llvm/include/llvm/Target/TargetData.h3
-rw-r--r--contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h3
-rw-r--r--contrib/llvm/include/llvm/Target/TargetFrameInfo.h97
-rw-r--r--contrib/llvm/include/llvm/Target/TargetFrameLowering.h196
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrDesc.h13
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrInfo.h187
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrItineraries.h88
-rw-r--r--contrib/llvm/include/llvm/Target/TargetJITInfo.h2
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLibraryInfo.h66
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLowering.h171
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h15
-rw-r--r--contrib/llvm/include/llvm/Target/TargetMachine.h56
-rw-r--r--contrib/llvm/include/llvm/Target/TargetRegisterInfo.h214
-rw-r--r--contrib/llvm/include/llvm/Target/TargetRegistry.h66
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSchedule.td43
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSelectionDAG.td122
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h10
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO.h8
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation.h3
-rw-r--r--contrib/llvm/include/llvm/Transforms/RSProfiling.h42
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar.h30
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h8
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h26
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h14
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Cloning.h15
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Local.h16
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h3
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h49
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h4
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h25
-rw-r--r--contrib/llvm/include/llvm/Type.h26
-rw-r--r--contrib/llvm/include/llvm/TypeSymbolTable.h3
-rw-r--r--contrib/llvm/include/llvm/Use.h37
-rw-r--r--contrib/llvm/include/llvm/User.h41
-rw-r--r--contrib/llvm/include/llvm/Value.h18
-rw-r--r--contrib/llvm/include/llvm/ValueSymbolTable.h2
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysis.cpp187
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp45
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp36
-rw-r--r--contrib/llvm/lib/Analysis/AliasDebugger.cpp29
-rw-r--r--contrib/llvm/lib/Analysis/AliasSetTracker.cpp105
-rw-r--r--contrib/llvm/lib/Analysis/Analysis.cpp67
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp459
-rw-r--r--contrib/llvm/lib/Analysis/CFGPrinter.cpp27
-rw-r--r--contrib/llvm/lib/Analysis/CaptureTracking.cpp3
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp298
-rw-r--r--contrib/llvm/lib/Analysis/DIBuilder.cpp801
-rw-r--r--contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp129
-rw-r--r--contrib/llvm/lib/Analysis/DebugInfo.cpp244
-rw-r--r--contrib/llvm/lib/Analysis/DomPrinter.cpp50
-rw-r--r--contrib/llvm/lib/Analysis/DominanceFrontier.cpp137
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraph.cpp24
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp1
-rw-r--r--contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp77
-rw-r--r--contrib/llvm/lib/Analysis/IPA/IPA.cpp29
-rw-r--r--contrib/llvm/lib/Analysis/IVUsers.cpp12
-rw-r--r--contrib/llvm/lib/Analysis/InlineCost.cpp486
-rw-r--r--contrib/llvm/lib/Analysis/InstCount.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/InstructionSimplify.cpp1904
-rw-r--r--contrib/llvm/lib/Analysis/IntervalPartition.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/LazyValueInfo.cpp844
-rw-r--r--contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp20
-rw-r--r--contrib/llvm/lib/Analysis/Lint.cpp110
-rw-r--r--contrib/llvm/lib/Analysis/LiveValues.cpp15
-rw-r--r--contrib/llvm/lib/Analysis/Loads.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp17
-rw-r--r--contrib/llvm/lib/Analysis/LoopInfo.cpp22
-rw-r--r--contrib/llvm/lib/Analysis/LoopPass.cpp1
-rw-r--r--contrib/llvm/lib/Analysis/MemDepPrinter.cpp167
-rw-r--r--contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp373
-rw-r--r--contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp88
-rw-r--r--contrib/llvm/lib/Analysis/PHITransAddr.cpp154
-rw-r--r--contrib/llvm/lib/Analysis/PathNumbering.cpp525
-rw-r--r--contrib/llvm/lib/Analysis/PathProfileInfo.cpp434
-rw-r--r--contrib/llvm/lib/Analysis/PathProfileVerifier.cpp207
-rw-r--r--contrib/llvm/lib/Analysis/PointerTracking.cpp316
-rw-r--r--contrib/llvm/lib/Analysis/PostDominators.cpp10
-rw-r--r--contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp11
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfo.cpp17
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp3
-rw-r--r--contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp11
-rw-r--r--contrib/llvm/lib/Analysis/RegionInfo.cpp168
-rw-r--r--contrib/llvm/lib/Analysis/RegionPass.cpp275
-rw-r--r--contrib/llvm/lib/Analysis/RegionPrinter.cpp36
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp1170
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp41
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp51
-rw-r--r--contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp232
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp369
-rw-r--r--contrib/llvm/lib/Archive/Archive.cpp54
-rw-r--r--contrib/llvm/lib/Archive/ArchiveInternals.h2
-rw-r--r--contrib/llvm/lib/Archive/ArchiveWriter.cpp117
-rw-r--r--contrib/llvm/lib/AsmParser/LLLexer.cpp21
-rw-r--r--contrib/llvm/lib/AsmParser/LLLexer.h4
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.cpp151
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.h8
-rw-r--r--contrib/llvm/lib/AsmParser/LLToken.h3
-rw-r--r--contrib/llvm/lib/AsmParser/Parser.cpp10
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp181
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h5
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp32
-rw-r--r--contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp45
-rw-r--r--contrib/llvm/lib/CodeGen/AllocationOrder.cpp68
-rw-r--r--contrib/llvm/lib/CodeGen/AllocationOrder.h54
-rw-r--r--contrib/llvm/lib/CodeGen/Analysis.cpp30
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp64
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp61
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp122
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp138
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp681
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h139
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp338
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h155
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfTableException.cpp349
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp1
-rw-r--r--contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp15
-rw-r--r--contrib/llvm/lib/CodeGen/CallingConvLower.cpp40
-rw-r--r--contrib/llvm/lib/CodeGen/CodeGen.cpp61
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp99
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h12
-rw-r--r--contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp30
-rw-r--r--contrib/llvm/lib/CodeGen/ELF.h2
-rw-r--r--contrib/llvm/lib/CodeGen/ELFWriter.cpp17
-rw-r--r--contrib/llvm/lib/CodeGen/EdgeBundles.cpp86
-rw-r--r--contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp82
-rw-r--r--contrib/llvm/lib/CodeGen/GCMetadata.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/GCStrategy.cpp44
-rw-r--r--contrib/llvm/lib/CodeGen/IfConversion.cpp247
-rw-r--r--contrib/llvm/lib/CodeGen/InlineSpiller.cpp287
-rw-r--r--contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp32
-rw-r--r--contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp26
-rw-r--r--contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp711
-rw-r--r--contrib/llvm/lib/CodeGen/LiveDebugVariables.h63
-rw-r--r--contrib/llvm/lib/CodeGen/LiveInterval.cpp312
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp357
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp315
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalUnion.h258
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp129
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeEdit.h135
-rw-r--r--contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp20
-rw-r--r--contrib/llvm/lib/CodeGen/LiveVariables.cpp42
-rw-r--r--contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp29
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp79
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCSE.cpp162
-rw-r--r--contrib/llvm/lib/CodeGen/MachineDominators.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunction.cpp69
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstr.cpp171
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLICM.cpp506
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLoopRanges.cpp116
-rw-r--r--contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp76
-rw-r--r--contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp64
-rw-r--r--contrib/llvm/lib/CodeGen/MachineSink.cpp312
-rw-r--r--contrib/llvm/lib/CodeGen/MachineVerifier.cpp377
-rw-r--r--contrib/llvm/lib/CodeGen/OptimizePHIs.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/PHIElimination.cpp143
-rw-r--r--contrib/llvm/lib/CodeGen/PHIElimination.h115
-rw-r--r--contrib/llvm/lib/CodeGen/PHIEliminationUtils.cpp61
-rw-r--r--contrib/llvm/lib/CodeGen/PHIEliminationUtils.h25
-rw-r--r--contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp131
-rw-r--r--contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp74
-rw-r--r--contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp62
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.h4
-rw-r--r--contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBase.h181
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBasic.cpp523
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocFast.cpp68
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp1285
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp109
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp994
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/RenderMachineFunction.h4
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAG.cpp15
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp137
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h10
-rw-r--r--contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp (renamed from contrib/llvm/lib/CodeGen/PostRAHazardRecognizer.cpp)137
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp1211
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp84
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp1
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp141
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp1056
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp33
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp426
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp49
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h34
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp66
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp325
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp51
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp1969
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp333
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h35
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp684
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp850
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h14
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp791
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp669
-rw-r--r--contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp247
-rw-r--r--contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h13
-rw-r--r--contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp446
-rw-r--r--contrib/llvm/lib/CodeGen/SlotIndexes.cpp33
-rw-r--r--contrib/llvm/lib/CodeGen/SpillPlacement.cpp330
-rw-r--r--contrib/llvm/lib/CodeGen/SpillPlacement.h108
-rw-r--r--contrib/llvm/lib/CodeGen/Spiller.cpp316
-rw-r--r--contrib/llvm/lib/CodeGen/Spiller.h12
-rw-r--r--contrib/llvm/lib/CodeGen/SplitKit.cpp1491
-rw-r--r--contrib/llvm/lib/CodeGen/SplitKit.h419
-rw-r--r--contrib/llvm/lib/CodeGen/Splitter.cpp32
-rw-r--r--contrib/llvm/lib/CodeGen/Splitter.h4
-rw-r--r--contrib/llvm/lib/CodeGen/StackProtector.cpp28
-rw-r--r--contrib/llvm/lib/CodeGen/StackSlotColoring.cpp30
-rw-r--r--contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp1694
-rw-r--r--contrib/llvm/lib/CodeGen/TailDuplication.cpp21
-rw-r--r--contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp50
-rw-r--r--contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp253
-rw-r--r--contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp54
-rw-r--r--contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp15
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.cpp165
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.h33
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp896
-rw-r--r--contrib/llvm/lib/CompilerDriver/Action.cpp26
-rw-r--r--contrib/llvm/lib/CompilerDriver/CompilationGraph.cpp66
-rw-r--r--contrib/llvm/lib/CompilerDriver/Main.cpp9
-rw-r--r--contrib/llvm/lib/CompilerDriver/Tool.cpp6
-rw-r--r--contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp378
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp6
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp4
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/Intercept.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp23
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp4
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.h2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp11
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp4
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/TargetSelect.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/CMakeLists.txt4
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp92
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h68
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/Makefile13
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/TargetSelect.cpp91
-rw-r--r--contrib/llvm/lib/Linker/LinkItems.cpp13
-rw-r--r--contrib/llvm/lib/Linker/LinkModules.cpp100
-rw-r--r--contrib/llvm/lib/Linker/Linker.cpp18
-rw-r--r--contrib/llvm/lib/MC/ELFObjectWriter.cpp1858
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfo.cpp7
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp9
-rw-r--r--contrib/llvm/lib/MC/MCAsmStreamer.cpp382
-rw-r--r--contrib/llvm/lib/MC/MCAssembler.cpp794
-rw-r--r--contrib/llvm/lib/MC/MCCodeEmitter.cpp12
-rw-r--r--contrib/llvm/lib/MC/MCContext.cpp117
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h8
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDInst.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDInst.h2
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp23
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDOperand.h2
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDToken.h2
-rw-r--r--contrib/llvm/lib/MC/MCDwarf.cpp793
-rw-r--r--contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp23
-rw-r--r--contrib/llvm/lib/MC/MCELFStreamer.cpp350
-rw-r--r--contrib/llvm/lib/MC/MCExpr.cpp284
-rw-r--r--contrib/llvm/lib/MC/MCLoggingStreamer.cpp60
-rw-r--r--contrib/llvm/lib/MC/MCMachOStreamer.cpp355
-rw-r--r--contrib/llvm/lib/MC/MCMachObjectTargetWriter.cpp22
-rw-r--r--contrib/llvm/lib/MC/MCNullStreamer.cpp31
-rw-r--r--contrib/llvm/lib/MC/MCObjectStreamer.cpp187
-rw-r--r--contrib/llvm/lib/MC/MCObjectWriter.cpp65
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmLexer.cpp182
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmParser.cpp889
-rw-r--r--contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp144
-rw-r--r--contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp460
-rw-r--r--contrib/llvm/lib/MC/MCPureStreamer.cpp234
-rw-r--r--contrib/llvm/lib/MC/MCSectionCOFF.cpp8
-rw-r--r--contrib/llvm/lib/MC/MCSectionELF.cpp121
-rw-r--r--contrib/llvm/lib/MC/MCSectionMachO.cpp62
-rw-r--r--contrib/llvm/lib/MC/MCStreamer.cpp218
-rw-r--r--contrib/llvm/lib/MC/MCSymbol.cpp13
-rw-r--r--contrib/llvm/lib/MC/MachObjectWriter.cpp935
-rw-r--r--contrib/llvm/lib/MC/TargetAsmBackend.cpp25
-rw-r--r--contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp371
-rw-r--r--contrib/llvm/lib/MC/WinCOFFStreamer.cpp180
-rw-r--r--contrib/llvm/lib/Object/CMakeLists.txt6
-rw-r--r--contrib/llvm/lib/Object/COFFObjectFile.cpp375
-rw-r--r--contrib/llvm/lib/Object/ELFObjectFile.cpp686
-rw-r--r--contrib/llvm/lib/Object/MachOObject.cpp342
-rw-r--r--contrib/llvm/lib/Object/Makefile14
-rw-r--r--contrib/llvm/lib/Object/ObjectFile.cpp71
-rw-r--r--contrib/llvm/lib/Support/APFloat.cpp28
-rw-r--r--contrib/llvm/lib/Support/APInt.cpp234
-rw-r--r--contrib/llvm/lib/Support/Allocator.cpp10
-rw-r--r--contrib/llvm/lib/Support/Atomic.cpp (renamed from contrib/llvm/lib/System/Atomic.cpp)2
-rw-r--r--contrib/llvm/lib/Support/CommandLine.cpp84
-rw-r--r--contrib/llvm/lib/Support/ConstantRange.cpp94
-rw-r--r--contrib/llvm/lib/Support/CrashRecoveryContext.cpp30
-rw-r--r--contrib/llvm/lib/Support/Debug.cpp2
-rw-r--r--contrib/llvm/lib/Support/Disassembler.cpp (renamed from contrib/llvm/lib/System/Disassembler.cpp)2
-rw-r--r--contrib/llvm/lib/Support/Dwarf.cpp4
-rw-r--r--contrib/llvm/lib/Support/DynamicLibrary.cpp (renamed from contrib/llvm/lib/System/DynamicLibrary.cpp)17
-rw-r--r--contrib/llvm/lib/Support/Errno.cpp (renamed from contrib/llvm/lib/System/Errno.cpp)4
-rw-r--r--contrib/llvm/lib/Support/ErrorHandling.cpp11
-rw-r--r--contrib/llvm/lib/Support/FileUtilities.cpp26
-rw-r--r--contrib/llvm/lib/Support/FoldingSet.cpp29
-rw-r--r--contrib/llvm/lib/Support/FormattedStream.cpp1
-rw-r--r--contrib/llvm/lib/Support/GraphWriter.cpp48
-rw-r--r--contrib/llvm/lib/Support/Host.cpp (renamed from contrib/llvm/lib/System/Host.cpp)56
-rw-r--r--contrib/llvm/lib/Support/IncludeFile.cpp (renamed from contrib/llvm/lib/System/IncludeFile.cpp)4
-rw-r--r--contrib/llvm/lib/Support/IntEqClasses.cpp70
-rw-r--r--contrib/llvm/lib/Support/IntervalMap.cpp161
-rw-r--r--contrib/llvm/lib/Support/ManagedStatic.cpp2
-rw-r--r--contrib/llvm/lib/Support/Memory.cpp (renamed from contrib/llvm/lib/System/Memory.cpp)8
-rw-r--r--contrib/llvm/lib/Support/MemoryBuffer.cpp113
-rw-r--r--contrib/llvm/lib/Support/Mutex.cpp (renamed from contrib/llvm/lib/System/Mutex.cpp)6
-rw-r--r--contrib/llvm/lib/Support/Path.cpp (renamed from contrib/llvm/lib/System/Path.cpp)67
-rw-r--r--contrib/llvm/lib/Support/PathV2.cpp774
-rw-r--r--contrib/llvm/lib/Support/PluginLoader.cpp4
-rw-r--r--contrib/llvm/lib/Support/PrettyStackTrace.cpp17
-rw-r--r--contrib/llvm/lib/Support/Process.cpp (renamed from contrib/llvm/lib/System/Process.cpp)4
-rw-r--r--contrib/llvm/lib/Support/Program.cpp (renamed from contrib/llvm/lib/System/Program.cpp)6
-rw-r--r--contrib/llvm/lib/Support/RWMutex.cpp (renamed from contrib/llvm/lib/System/RWMutex.cpp)4
-rw-r--r--contrib/llvm/lib/Support/SearchForAddressOfSpecialSymbol.cpp (renamed from contrib/llvm/lib/System/SearchForAddressOfSpecialSymbol.cpp)11
-rw-r--r--contrib/llvm/lib/Support/Signals.cpp (renamed from contrib/llvm/lib/System/Signals.cpp)4
-rw-r--r--contrib/llvm/lib/Support/SourceMgr.cpp31
-rw-r--r--contrib/llvm/lib/Support/Statistic.cpp2
-rw-r--r--contrib/llvm/lib/Support/StringMap.cpp2
-rw-r--r--contrib/llvm/lib/Support/StringRef.cpp73
-rw-r--r--contrib/llvm/lib/Support/SystemUtils.cpp40
-rw-r--r--contrib/llvm/lib/Support/TargetRegistry.cpp2
-rw-r--r--contrib/llvm/lib/Support/ThreadLocal.cpp (renamed from contrib/llvm/lib/System/ThreadLocal.cpp)5
-rw-r--r--contrib/llvm/lib/Support/Threading.cpp (renamed from contrib/llvm/lib/System/Threading.cpp)66
-rw-r--r--contrib/llvm/lib/Support/TimeValue.cpp (renamed from contrib/llvm/lib/System/TimeValue.cpp)5
-rw-r--r--contrib/llvm/lib/Support/Timer.cpp4
-rw-r--r--contrib/llvm/lib/Support/ToolOutputFile.cpp43
-rw-r--r--contrib/llvm/lib/Support/Triple.cpp106
-rw-r--r--contrib/llvm/lib/Support/Twine.cpp34
-rw-r--r--contrib/llvm/lib/Support/Unix/Host.inc (renamed from contrib/llvm/lib/System/Unix/Host.inc)11
-rw-r--r--contrib/llvm/lib/Support/Unix/Memory.inc (renamed from contrib/llvm/lib/System/Unix/Memory.inc)12
-rw-r--r--contrib/llvm/lib/Support/Unix/Mutex.inc (renamed from contrib/llvm/lib/System/Unix/Mutex.inc)10
-rw-r--r--contrib/llvm/lib/Support/Unix/Path.inc (renamed from contrib/llvm/lib/System/Unix/Path.inc)150
-rw-r--r--contrib/llvm/lib/Support/Unix/PathV2.inc507
-rw-r--r--contrib/llvm/lib/Support/Unix/Process.inc (renamed from contrib/llvm/lib/System/Unix/Process.inc)26
-rw-r--r--contrib/llvm/lib/Support/Unix/Program.inc (renamed from contrib/llvm/lib/System/Unix/Program.inc)68
-rw-r--r--contrib/llvm/lib/Support/Unix/README.txt16
-rw-r--r--contrib/llvm/lib/Support/Unix/RWMutex.inc (renamed from contrib/llvm/lib/System/Unix/RWMutex.inc)6
-rw-r--r--contrib/llvm/lib/Support/Unix/Signals.inc (renamed from contrib/llvm/lib/System/Unix/Signals.inc)24
-rw-r--r--contrib/llvm/lib/Support/Unix/ThreadLocal.inc (renamed from contrib/llvm/lib/System/Unix/ThreadLocal.inc)6
-rw-r--r--contrib/llvm/lib/Support/Unix/TimeValue.inc (renamed from contrib/llvm/lib/System/Unix/TimeValue.inc)12
-rw-r--r--contrib/llvm/lib/Support/Unix/Unix.h (renamed from contrib/llvm/lib/System/Unix/Unix.h)4
-rw-r--r--contrib/llvm/lib/Support/Unix/system_error.inc34
-rw-r--r--contrib/llvm/lib/Support/Valgrind.cpp (renamed from contrib/llvm/lib/System/Valgrind.cpp)2
-rw-r--r--contrib/llvm/lib/Support/Windows/DynamicLibrary.inc (renamed from contrib/llvm/lib/System/Win32/DynamicLibrary.inc)104
-rw-r--r--contrib/llvm/lib/Support/Windows/Host.inc (renamed from contrib/llvm/lib/System/Win32/Host.inc)4
-rw-r--r--contrib/llvm/lib/Support/Windows/Memory.inc (renamed from contrib/llvm/lib/System/Win32/Memory.inc)12
-rw-r--r--contrib/llvm/lib/Support/Windows/Mutex.inc (renamed from contrib/llvm/lib/System/Win32/Mutex.inc)16
-rw-r--r--contrib/llvm/lib/Support/Windows/Path.inc (renamed from contrib/llvm/lib/System/Win32/Path.inc)129
-rw-r--r--contrib/llvm/lib/Support/Windows/PathV2.inc750
-rw-r--r--contrib/llvm/lib/Support/Windows/Process.inc (renamed from contrib/llvm/lib/System/Win32/Process.inc)17
-rw-r--r--contrib/llvm/lib/Support/Windows/Program.inc (renamed from contrib/llvm/lib/System/Win32/Program.inc)22
-rw-r--r--contrib/llvm/lib/Support/Windows/RWMutex.inc (renamed from contrib/llvm/lib/System/Win32/RWMutex.inc)10
-rw-r--r--contrib/llvm/lib/Support/Windows/Signals.inc (renamed from contrib/llvm/lib/System/Win32/Signals.inc)172
-rw-r--r--contrib/llvm/lib/Support/Windows/ThreadLocal.inc (renamed from contrib/llvm/lib/System/Win32/ThreadLocal.inc)11
-rw-r--r--contrib/llvm/lib/Support/Windows/TimeValue.inc (renamed from contrib/llvm/lib/System/Win32/TimeValue.inc)6
-rw-r--r--contrib/llvm/lib/Support/Windows/Windows.h120
-rw-r--r--contrib/llvm/lib/Support/Windows/explicit_symbols.inc66
-rw-r--r--contrib/llvm/lib/Support/Windows/system_error.inc142
-rw-r--r--contrib/llvm/lib/Support/raw_ostream.cpp103
-rw-r--r--contrib/llvm/lib/Support/regexec.c5
-rw-r--r--contrib/llvm/lib/Support/system_error.cpp130
-rw-r--r--contrib/llvm/lib/System/Alarm.cpp33
-rw-r--r--contrib/llvm/lib/System/Unix/Alarm.inc72
-rw-r--r--contrib/llvm/lib/System/Win32/Alarm.inc43
-rw-r--r--contrib/llvm/lib/System/Win32/Win32.h57
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.h114
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.td58
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAddressingModes.h12
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmBackend.cpp512
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp2225
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h112
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInfo.h249
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp1305
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h171
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp948
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h65
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h73
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCallingConv.h160
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCallingConv.td29
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp368
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp27
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp26
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h43
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp83
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h58
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp1227
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFastISel.cpp1670
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFixupKinds.h97
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameInfo.h32
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp1021
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameLowering.h74
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMGlobalMerge.cpp69
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp121
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h54
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp1823
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp2278
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.h88
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrFormats.td1191
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp33
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.td3554
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrNEON.td3650
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb.td1661
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td2725
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrVFP.td1146
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp13
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMJITInfo.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp519
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMCCodeEmitter.cpp1230
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMCExpr.cpp73
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMCExpr.h73
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp147
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h60
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h13122
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp1
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td90
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSchedule.td140
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA8.td862
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA9.td1799
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleV6.td130
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp16
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp119
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.h48
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp62
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.h36
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp19
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h11
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp192
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp1530
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp49
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp259
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h298
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp (renamed from contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp)457
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h (renamed from contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h)69
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/CMakeLists.txt6
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/Makefile15
-rw-r--r--contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp321
-rw-r--r--contrib/llvm/lib/Target/ARM/NEONPreAllocPass.cpp406
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp352
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.h52
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp84
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h17
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp332
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.cpp53
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.h40
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp44
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h8
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp1
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp133
-rw-r--r--contrib/llvm/lib/Target/Alpha/Alpha.h7
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaAsmPrinter.cpp (renamed from contrib/llvm/lib/Target/Alpha/AsmPrinter/AlphaAsmPrinter.cpp)2
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaCodeEmitter.cpp222
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.cpp143
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.h43
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp19
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp87
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h5
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td6
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaJITInfo.cpp310
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaJITInfo.h53
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp152
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h10
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaSchedule.td4
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.cpp10
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.h20
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinAsmPrinter.cpp (renamed from contrib/llvm/lib/Target/Blackfin/AsmPrinter/BlackfinAsmPrinter.cpp)0
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.cpp124
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.h46
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp6
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp61
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h6
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td8
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp106
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h8
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.td20
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.cpp2
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.h17
-rw-r--r--contrib/llvm/lib/Target/CBackend/CBackend.cpp337
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPU.h1
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td79
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp (renamed from contrib/llvm/lib/Target/CellSPU/AsmPrinter/SPUAsmPrinter.cpp)37
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.cpp29
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp276
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.h (renamed from contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.h)35
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp223
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp787
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h23
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp15
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td396
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUNodes.td18
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUNopFiller.cpp153
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUOperands.td18
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp264
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h16
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUSchedule.td8
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp21
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h6
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp13
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h15
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp37
-rw-r--r--contrib/llvm/lib/Target/MBlaze/AsmParser/CMakeLists.txt8
-rw-r--r--contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp127
-rw-r--r--contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp568
-rw-r--r--contrib/llvm/lib/Target/MBlaze/AsmParser/Makefile15
-rw-r--r--contrib/llvm/lib/Target/MBlaze/Disassembler/CMakeLists.txt16
-rw-r--r--contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp647
-rw-r--r--contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h55
-rw-r--r--contrib/llvm/lib/Target/MBlaze/Disassembler/Makefile16
-rw-r--r--contrib/llvm/lib/Target/MBlaze/InstPrinter/CMakeLists.txt8
-rw-r--r--contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp69
-rw-r--r--contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h43
-rw-r--r--contrib/llvm/lib/Target/MBlaze/InstPrinter/Makefile16
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlaze.h8
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlaze.td41
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeAsmBackend.cpp163
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp (renamed from contrib/llvm/lib/Target/MBlaze/AsmPrinter/MBlazeAsmPrinter.cpp)156
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeCallingConv.td14
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp191
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp111
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h58
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp450
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.h53
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp87
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp720
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h46
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td253
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td326
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td272
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp179
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h166
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td927
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td6
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp9
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h4
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCCodeEmitter.cpp223
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.cpp166
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h (renamed from contrib/llvm/lib/Target/ARM/ARMMCInstLower.h)30
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h86
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp343
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h20
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td140
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRelocations.h47
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td4
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp66
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h33
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp9
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.h7
-rw-r--r--contrib/llvm/lib/Target/MSP430/InstPrinter/CMakeLists.txt6
-rw-r--r--contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp (renamed from contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430InstPrinter.cpp)3
-rw-r--r--contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h (renamed from contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430InstPrinter.h)0
-rw-r--r--contrib/llvm/lib/Target/MSP430/InstPrinter/Makefile15
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430.td1
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp (renamed from contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430AsmPrinter.cpp)2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp223
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h53
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp17
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp22
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp52
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h9
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td16
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp (renamed from contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.cpp)0
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h (renamed from contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.h)0
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp170
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td8
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp14
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h12
-rw-r--r--contrib/llvm/lib/Target/Mangler.cpp10
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips.td30
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp (renamed from contrib/llvm/lib/Target/Mips/AsmPrinter/MipsAsmPrinter.cpp)25
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp13
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp314
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.h48
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp28
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp620
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.h18
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrFPU.td2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.td355
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMachineFunction.h34
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp287
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h5
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSchedule.td2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSubtarget.h4
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp20
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.h21
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp29
-rw-r--r--contrib/llvm/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.cpp512
-rw-r--r--contrib/llvm/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.h88
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16.h134
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16.td40
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16ABINames.h399
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.cpp490
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.h156
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp50
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16ISelDAGToDAG.h60
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.cpp2000
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.h253
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16InstrFormats.td117
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.cpp224
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.h76
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.td540
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.cpp59
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.h35
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16MachineFunctionInfo.h52
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16MemSelOpt.cpp254
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp299
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h83
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Overlay.cpp182
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Overlay.h60
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.cpp84
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.h64
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.td33
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Section.cpp104
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Section.h99
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16SelectionDAGInfo.cpp23
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16SelectionDAGInfo.h31
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Subtarget.cpp27
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Subtarget.h44
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16TargetMachine.cpp55
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16TargetMachine.h70
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16TargetObjectFile.cpp384
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16TargetObjectFile.h168
-rw-r--r--contrib/llvm/lib/Target/PIC16/TargetInfo/PIC16TargetInfo.cpp22
-rw-r--r--contrib/llvm/lib/Target/PTX/CMakeLists.txt26
-rw-r--r--contrib/llvm/lib/Target/PTX/Makefile26
-rw-r--r--contrib/llvm/lib/Target/PTX/PTX.h49
-rw-r--r--contrib/llvm/lib/Target/PTX/PTX.td54
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp347
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp24
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXFrameLowering.h43
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXISelDAGToDAG.cpp151
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp210
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXISelLowering.h67
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrFormats.td24
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp87
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrInfo.h75
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrInfo.td257
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMCAsmInfo.cpp30
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMCAsmInfo.h28
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp542
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp96
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h79
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp (renamed from contrib/llvm/lib/Target/TargetFrameInfo.cpp)12
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h63
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td102
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp23
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXSubtarget.h32
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp60
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXTargetMachine.h60
-rw-r--r--contrib/llvm/lib/Target/PTX/TargetInfo/CMakeLists.txt7
-rw-r--r--contrib/llvm/lib/Target/PTX/TargetInfo/Makefile15
-rw-r--r--contrib/llvm/lib/Target/PTX/TargetInfo/PTXTargetInfo.cpp21
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/CMakeLists.txt6
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/Makefile16
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp292
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h69
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPC.h62
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPC.td6
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCAsmBackend.cpp119
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp (renamed from contrib/llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp)452
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp253
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFixupKinds.h45
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp971
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h (renamed from contrib/llvm/lib/Target/PowerPC/PPCFrameInfo.h)50
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp56
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h20
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp210
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp731
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h7
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td57
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td39
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp81
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h26
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td177
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp5
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCMCCodeEmitter.cpp195
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp172
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp975
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h19
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td13
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp31
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h18
-rw-r--r--contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp230
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp (renamed from contrib/llvm/lib/Target/Sparc/AsmPrinter/SparcAsmPrinter.cpp)6
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcCallingConv.td10
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp80
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h41
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp18
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp721
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.h3
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp195
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h11
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td221
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h11
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp53
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h9
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td3
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp6
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h15
-rw-r--r--contrib/llvm/lib/Target/SubtargetFeature.cpp3
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp (renamed from contrib/llvm/lib/Target/SystemZ/AsmPrinter/SystemZAsmPrinter.cpp)6
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp386
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h57
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp31
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp18
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h6
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp150
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h10
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td56
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp5
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZOperands.td15
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp214
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h12
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td48
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h12
-rw-r--r--contrib/llvm/lib/Target/Target.cpp15
-rw-r--r--contrib/llvm/lib/Target/TargetAsmInfo.cpp27
-rw-r--r--contrib/llvm/lib/Target/TargetData.cpp58
-rw-r--r--contrib/llvm/lib/Target/TargetELFWriterInfo.cpp5
-rw-r--r--contrib/llvm/lib/Target/TargetFrameLowering.cpp53
-rw-r--r--contrib/llvm/lib/Target/TargetInstrInfo.cpp93
-rw-r--r--contrib/llvm/lib/Target/TargetLibraryInfo.cpp55
-rw-r--r--contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp8
-rw-r--r--contrib/llvm/lib/Target/TargetMachine.cpp4
-rw-r--r--contrib/llvm/lib/Target/TargetRegisterInfo.cpp43
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp9
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp437
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp15
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h2
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c31
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h4
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h3
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/CMakeLists.txt8
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/Makefile15
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp (renamed from contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp)2
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h (renamed from contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h)0
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp (renamed from contrib/llvm/lib/Target/X86/AsmPrinter/X86InstComments.cpp)2
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.h (renamed from contrib/llvm/lib/Target/X86/AsmPrinter/X86InstComments.h)0
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp (renamed from contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp)3
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h (renamed from contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h)0
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/CMakeLists.txt6
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/Makefile15
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp (renamed from contrib/llvm/lib/Target/X86/X86ShuffleDecode.h)53
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h69
-rw-r--r--contrib/llvm/lib/Target/X86/X86.h10
-rw-r--r--contrib/llvm/lib/Target/X86/X86.td28
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmBackend.cpp270
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp97
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86CallingConv.td67
-rw-r--r--contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp21
-rw-r--r--contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp55
-rw-r--r--contrib/llvm/lib/Target/X86/X86ELFWriterInfo.h19
-rw-r--r--contrib/llvm/lib/Target/X86/X86FastISel.cpp300
-rw-r--r--contrib/llvm/lib/Target/X86/X86FixupKinds.h16
-rw-r--r--contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp129
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.cpp994
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.h65
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp200
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.cpp3146
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.h243
-rw-r--r--contrib/llvm/lib/Target/X86/X86Instr3DNow.td77
-rw-r--r--contrib/llvm/lib/Target/X86/X86Instr64bit.td2250
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrArithmetic.td1125
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrBuilder.h37
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td104
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrCompiler.td1626
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrControl.td294
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrExtension.td172
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFPStack.td82
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFormats.td24
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td107
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.cpp448
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.h84
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.td4842
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrMMX.td607
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSSE.td571
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td746
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSystem.td390
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrVMX.td54
-rw-r--r--contrib/llvm/lib/Target/X86/X86JITInfo.cpp16
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCAsmInfo.cpp15
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp149
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.cpp117
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86MachObjectWriter.cpp32
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp955
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.h17
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.td100
-rw-r--r--contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp52
-rw-r--r--contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h9
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.cpp18
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.h36
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.cpp55
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.h75
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp (renamed from contrib/llvm/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp)0
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreCallingConv.td3
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameInfo.cpp27
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameInfo.h34
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp387
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h59
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp21
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp172
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.h1
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp66
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h9
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td76
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp284
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h11
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td4
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h8
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp49
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp117
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp86
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp76
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadTypeElimination.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp30
-rw-r--r--contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp141
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp830
-rw-r--r--contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/IPO/IPO.cpp38
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp11
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Inliner.cpp56
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Internalize.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp646
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PartialSpecialization.cpp216
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PruneEH.cpp11
-rw-r--r--contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp24
-rw-r--r--contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp11
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombine.h28
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp350
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp597
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp288
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp35
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp772
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp11
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp315
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp79
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp294
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp116
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp100
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp272
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp604
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp32
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp1423
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp22
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.h7
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ADCE.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/BasicBlockPlacement.cpp11
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp369
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp86
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DCE.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp847
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp470
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GEPSplitter.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GVN.cpp813
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp49
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp998
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LICM.cpp324
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp26
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp594
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp1270
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp170
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp491
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp200
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp57
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp60
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp146
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp729
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp38
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SCCP.cpp39
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Scalar.cpp55
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp1155
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp342
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Sink.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp18
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp139
-rw-r--r--contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp28
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp157
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp56
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp54
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp40
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp45
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneModule.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp173
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LCSSA.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Local.cpp148
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp69
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp38
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp27
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp209
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp171
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp2003
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp94
-rw-r--r--contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Utils.cpp37
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp178
-rw-r--r--contrib/llvm/lib/VMCore/AsmWriter.cpp40
-rw-r--r--contrib/llvm/lib/VMCore/Attributes.cpp33
-rw-r--r--contrib/llvm/lib/VMCore/AutoUpgrade.cpp569
-rw-r--r--contrib/llvm/lib/VMCore/BasicBlock.cpp9
-rw-r--r--contrib/llvm/lib/VMCore/ConstantFold.cpp159
-rw-r--r--contrib/llvm/lib/VMCore/ConstantFold.h2
-rw-r--r--contrib/llvm/lib/VMCore/Constants.cpp382
-rw-r--r--contrib/llvm/lib/VMCore/ConstantsContext.h30
-rw-r--r--contrib/llvm/lib/VMCore/Core.cpp234
-rw-r--r--contrib/llvm/lib/VMCore/Dominators.cpp275
-rw-r--r--contrib/llvm/lib/VMCore/Function.cpp21
-rw-r--r--contrib/llvm/lib/VMCore/Globals.cpp49
-rw-r--r--contrib/llvm/lib/VMCore/IRBuilder.cpp81
-rw-r--r--contrib/llvm/lib/VMCore/InlineAsm.cpp79
-rw-r--r--contrib/llvm/lib/VMCore/Instruction.cpp36
-rw-r--r--contrib/llvm/lib/VMCore/Instructions.cpp212
-rw-r--r--contrib/llvm/lib/VMCore/LLVMContext.cpp42
-rw-r--r--contrib/llvm/lib/VMCore/LLVMContextImpl.cpp13
-rw-r--r--contrib/llvm/lib/VMCore/LLVMContextImpl.h8
-rw-r--r--contrib/llvm/lib/VMCore/LeakDetector.cpp4
-rw-r--r--contrib/llvm/lib/VMCore/Metadata.cpp13
-rw-r--r--contrib/llvm/lib/VMCore/Module.cpp2
-rw-r--r--contrib/llvm/lib/VMCore/Pass.cpp1
-rw-r--r--contrib/llvm/lib/VMCore/PassManager.cpp128
-rw-r--r--contrib/llvm/lib/VMCore/PassRegistry.cpp173
-rw-r--r--contrib/llvm/lib/VMCore/PrintModulePass.cpp4
-rw-r--r--contrib/llvm/lib/VMCore/Type.cpp30
-rw-r--r--contrib/llvm/lib/VMCore/TypesContext.h2
-rw-r--r--contrib/llvm/lib/VMCore/Use.cpp122
-rw-r--r--contrib/llvm/lib/VMCore/User.cpp81
-rw-r--r--contrib/llvm/lib/VMCore/Value.cpp97
-rw-r--r--contrib/llvm/lib/VMCore/ValueTypes.cpp5
-rw-r--r--contrib/llvm/lib/VMCore/Verifier.cpp32
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/Index.h635
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTContext.h545
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h55
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h48
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Attr.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h20
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CharUnits.h50
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Decl.h581
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclBase.h75
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h563
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h29
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h119
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h402
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h82
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Expr.h1158
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h1061
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h386
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h48
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/FullExpr.h88
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Mangle.h (renamed from contrib/llvm/tools/clang/lib/CodeGen/Mangle.h)107
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h23
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h201
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ParentMap.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h169
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h325
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h20
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Stmt.h371
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h96
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h39
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h283
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TemplateName.h146
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Type.h1915
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h347
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def5
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValuesV2.h40
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h37
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/CFG.h409
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/DomainSpecific/CocoaConventions.h)9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowSolver.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h48
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h29
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtVisitor.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGStmtVisitor.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ABI.h126
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Attr.td241
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Builtins.def291
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Builtins.h23
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def22
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h440
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td29
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td14
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td48
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h212
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td14
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td87
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td720
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileManager.h190
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h101
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h93
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h90
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h70
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OpenCLExtensions.def28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h55
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h36
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td24
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def40
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Version.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Visibility.h48
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td386
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/Checkers/LocalCheckers.h61
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/ManagerRegistry.h53
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRAuditor.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSimpleAPICheck.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSubEngine.h107
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRTransferFuncs.h87
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRWorkList.h79
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SValuator.h70
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h26
-rw-r--r--contrib/llvm/tools/clang/include/clang/Config/config.h.cmake17
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ArgList.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td133
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Driver.h22
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptTable.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.td76
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h39
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Types.def1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h151
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def40
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h32
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CommandLineSourceLoc.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h102
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DeclXML.def5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h16
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h54
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/StmtXML.def7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/Utils.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticsClient.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h61
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Lexer.h53
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h139
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Pragma.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h93
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h106
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Token.h42
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/Parser.h358
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h108
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h282
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h328
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h41
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Initialization.h55
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Lookup.h20
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Overload.h48
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Ownership.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Scope.h23
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h38
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Sema.h1325
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Template.h211
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h25
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h135
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h21
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h484
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTSerializationListener.h44
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h230
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td38
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/Checkers/DereferenceChecker.h)14
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h51
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugReporter.h)99
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugType.h)8
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/BugReporter/PathDiagnostic.h)6
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h109
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerProvider.h54
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerV2.h93
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticClients.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathDiagnosticClients.h)20
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/AnalysisManager.h)64
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/BasicValueFactory.h)32
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRBlockCounter.h)24
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Checker.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Checker.h)166
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerHelpers.h)13
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.def (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.def)23
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.h)38
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ConstraintManager.h)28
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRCoreEngine.h)295
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Environment.h)40
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ExplodedGraph.h)77
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngine.h)310
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngineBuilders.h)40
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/GRState.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRState.h)267
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRStateTrait.h)31
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/MemRegion.h)119
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h210
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ValueManager.h)124
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SVals.h)119
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Store.h)166
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h116
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SummaryManager.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SummaryManager.h)8
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SymbolManager.h)18
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h93
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h101
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h26
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/FrontendActions.h)10
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTContext.cpp1999
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp56
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp1353
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXABI.h9
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp57
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Decl.cpp927
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclBase.cpp118
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp719
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp57
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp83
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp363
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp31
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DumpXML.cpp1028
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Expr.cpp1336
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp835
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp194
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp727
-rw-r--r--contrib/llvm/tools/clang/lib/AST/FullExpr.cpp45
-rw-r--r--contrib/llvm/tools/clang/lib/AST/InheritViz.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp (renamed from contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp)572
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Mangle.cpp135
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp25
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp1188
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp42
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ParentMap.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp649
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Stmt.cpp256
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp172
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp155
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp99
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp356
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateName.cpp57
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Type.cpp911
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp53
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp521
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp27
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFG.cpp1816
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp19
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CocoaConventions.cpp)96
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp54
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp44
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/UninitializedValuesV2.cpp610
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Builtins.cpp24
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp914
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp586
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileManager.cpp464
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp120
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp31
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp350
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp52
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Targets.cpp409
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Version.cpp61
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp585
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRCXXExprEngine.cpp240
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h30
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h52
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SValuator.cpp157
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ValueManager.cpp162
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h31
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp42
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp1806
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h253
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp680
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp251
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h36
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp174
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h28
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp157
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp188
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp1144
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h560
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp1006
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h56
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp349
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp194
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp442
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.h558
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp662
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp480
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp488
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp293
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp286
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp937
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp675
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp107
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp252
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h12
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp276
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h69
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp613
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp228
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp70
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp88
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp262
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h126
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGValue.h132
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp64
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp899
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h815
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp527
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h168
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp180
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h76
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp69
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h20
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp309
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp1178
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp414
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h9
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Action.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Arg.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ArgList.cpp40
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Compilation.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Driver.cpp153
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp68
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Job.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/OptTable.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Option.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Phases.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tool.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp516
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.h52
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.cpp1034
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.h45
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Types.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp40
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp770
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp221
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp334
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DeclXML.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp25
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DocumentXML.cpp19
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp125
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp33
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp113
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp233
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp221
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp114
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/StmtXML.cpp16
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp186
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TypeXML.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticsClient.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/altivec.h2454
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/emmintrin.h15
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/limits.h6
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mm_malloc.h47
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mmintrin.h84
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdbool.h8
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stddef.h17
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xmmintrin.h8
-rw-r--r--contrib/llvm/tools/clang/lib/Index/ASTVisitor.h3
-rw-r--r--contrib/llvm/tools/clang/lib/Index/CallGraph.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp44
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp230
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp275
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp212
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp51
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp152
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp44
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Pragma.cpp208
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp227
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp24
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp313
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp681
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp524
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp527
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp160
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp15
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp281
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp113
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.h40
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp326
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp97
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp236
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/Parser.cpp208
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h16
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp25
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp873
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp208
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp51
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp353
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp114
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.cpp267
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp95
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp81
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp496
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp806
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp3315
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp2809
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp1150
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp1969
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp354
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp37
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp4525
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp1736
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp378
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp1322
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp1007
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp218
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp3982
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp561
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp2303
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp2586
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp858
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp874
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp748
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaType.cpp1532
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TreeTransform.h2985
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h (renamed from contrib/llvm/tools/clang/include/clang/AST/TypeLocBuilder.h)47
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h8
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp1848
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp849
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp727
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp1154
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp260
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp277
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp24
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/CMakeLists.txt3
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/AdjustedReturnValueChecker.cpp)19
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp123
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/ArrayBoundChecker.cpp)39
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp277
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp)37
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp521
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.h (renamed from contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h)19
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp)27
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt70
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp)412
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CallAndMessageChecker.cpp)134
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp)38
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CastToStructChecker.cpp)20
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CheckObjCDealloc.cpp)35
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CheckObjCInstMethSignature.cpp)29
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp)53
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CheckSizeofPointer.cpp)26
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td197
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp167
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckerProvider.cpp137
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckerProvider.h29
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h34
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CheckDeadStores.cpp)126
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp80
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/DereferenceChecker.cpp)115
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/DivZeroChecker.cpp)15
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExperimentalChecks.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp)27
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExperimentalChecks.h31
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprEngine.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp)1675
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/FixedAddressChecker.cpp)16
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp)311
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InternalChecks.h51
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp)55
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/MacOSXAPIChecker.cpp)21
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Makefile24
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp)149
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/NSAutoreleasePoolChecker.cpp)30
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/NSErrorChecker.cpp)15
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/NoReturnFunctionChecker.cpp)9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp)41
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp100
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp356
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/ObjCUnusedIVarsChecker.cpp)33
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/PointerArithChecker.cpp)16
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/PointerSubChecker.cpp)16
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/PthreadLockChecker.cpp)35
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/ReturnPointerRangeChecker.cpp)30
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/ReturnUndefChecker.cpp)13
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrLeakChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp)26
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp)82
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/UndefBranchChecker.cpp)15
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/UndefCapturedBlockVarChecker.cpp)23
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/UndefResultChecker.cpp)15
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/UndefinedArraySubscriptChecker.cpp)13
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/UndefinedAssignmentChecker.cpp)13
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/UnixAPIChecker.cpp)90
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/UnreachableCodeChecker.cpp)70
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp)41
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AggExprVisitor.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/AggExprVisitor.cpp)29
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/AnalysisManager.cpp)5
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp)57
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicStore.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp)222
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/BasicValueFactory.cpp)11
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/GRBlockCounter.cpp)27
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp)227
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/BugReporterVisitors.cpp)47
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CFRefCount.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp)458
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CMakeLists.txt41
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CXXExprEngine.cpp322
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/Checker.cpp)8
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/CheckerHelpers.cpp)10
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp85
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp)360
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/Environment.cpp)76
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/ExplodedGraph.cpp)119
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FlatStore.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp)86
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/GRState.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/GRState.cpp)223
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/HTMLDiagnostics.cpp)20
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Makefile17
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp)93
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp99
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp)10
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/PlistDiagnostics.cpp)9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp)60
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp)633
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp310
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/SVals.cpp)58
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp)74
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h (renamed from contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h)42
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp)286
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/Store.cpp)52
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp)13
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp70
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/AnalysisConsumer.cpp)306
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h (renamed from contrib/llvm/tools/clang/include/clang/Checker/AnalysisConsumer.h)12
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CMakeLists.txt20
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp50
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp (renamed from contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp)5
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/Makefile19
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Makefile18
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/README.txt139
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1_main.cpp10
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp47
-rw-r--r--contrib/llvm/tools/clang/tools/driver/driver.cpp56
-rw-r--r--contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp313
-rw-r--r--contrib/llvm/utils/TableGen/ARMDecoderEmitter.h2
-rw-r--r--contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp2362
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp122
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterInst.cpp112
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterInst.h36
-rw-r--r--contrib/llvm/utils/TableGen/CallingConvEmitter.cpp12
-rw-r--r--contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h2
-rw-r--r--contrib/llvm/utils/TableGen/ClangAttrEmitter.cpp94
-rw-r--r--contrib/llvm/utils/TableGen/ClangAttrEmitter.h13
-rw-r--r--contrib/llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp26
-rw-r--r--contrib/llvm/utils/TableGen/ClangSACheckersEmitter.cpp229
-rw-r--r--contrib/llvm/utils/TableGen/ClangSACheckersEmitter.h31
-rw-r--r--contrib/llvm/utils/TableGen/CodeEmitterGen.cpp253
-rw-r--r--contrib/llvm/utils/TableGen/CodeEmitterGen.h8
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp796
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h208
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.cpp519
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.h231
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenRegisters.h6
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.cpp114
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.h17
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcher.cpp42
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcher.h368
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp38
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp273
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp18
-rw-r--r--contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp7
-rw-r--r--contrib/llvm/utils/TableGen/EDEmitter.cpp288
-rw-r--r--contrib/llvm/utils/TableGen/FastISelEmitter.cpp70
-rw-r--r--contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp1372
-rw-r--r--contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h56
-rw-r--r--contrib/llvm/utils/TableGen/InstrEnumEmitter.cpp2
-rw-r--r--contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp33
-rw-r--r--contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp204
-rw-r--r--contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp335
-rw-r--r--contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.h4
-rw-r--r--contrib/llvm/utils/TableGen/NeonEmitter.cpp1059
-rw-r--r--contrib/llvm/utils/TableGen/NeonEmitter.h78
-rw-r--r--contrib/llvm/utils/TableGen/Record.cpp200
-rw-r--r--contrib/llvm/utils/TableGen/Record.h42
-rw-r--r--contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp24
-rw-r--r--contrib/llvm/utils/TableGen/StringMatcher.cpp149
-rw-r--r--contrib/llvm/utils/TableGen/StringMatcher.h54
-rw-r--r--contrib/llvm/utils/TableGen/SubtargetEmitter.cpp106
-rw-r--r--contrib/llvm/utils/TableGen/SubtargetEmitter.h7
-rw-r--r--contrib/llvm/utils/TableGen/TGLexer.cpp68
-rw-r--r--contrib/llvm/utils/TableGen/TGLexer.h15
-rw-r--r--contrib/llvm/utils/TableGen/TGParser.cpp268
-rw-r--r--contrib/llvm/utils/TableGen/TGParser.h12
-rw-r--r--contrib/llvm/utils/TableGen/TableGen.cpp49
-rw-r--r--contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp16
-rw-r--r--contrib/llvm/utils/TableGen/X86ModRMFilters.h2
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp33
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.h5
-rw-r--r--etc/mtree/BSD.include.dist2
-rw-r--r--lib/clang/Makefile18
-rw-r--r--lib/clang/clang.build.mk66
-rw-r--r--lib/clang/include/ARMGenDecoderTables.inc2
-rw-r--r--lib/clang/include/ARMGenEDInfo.inc2
-rw-r--r--lib/clang/include/ARMGenMCCodeEmitter.inc2
-rw-r--r--lib/clang/include/Checkers.inc2
-rw-r--r--lib/clang/include/Makefile2
-rw-r--r--lib/clang/include/PPCGenMCCodeEmitter.inc2
-rw-r--r--lib/clang/include/clang/Basic/Version.inc8
-rw-r--r--lib/clang/include/clang/Lex/AttrSpellings.inc2
-rw-r--r--lib/clang/include/llvm/ADT/iterator.h78
-rw-r--r--lib/clang/include/llvm/Config/AsmPrinters.def2
-rw-r--r--lib/clang/include/llvm/Config/Disassemblers.def1
-rw-r--r--lib/clang/include/llvm/Config/Targets.def2
-rw-r--r--lib/clang/include/llvm/Config/config.h34
-rw-r--r--lib/clang/include/llvm/Config/llvm-config.h12
-rw-r--r--lib/clang/include/llvm/Support/DataTypes.h (renamed from lib/clang/include/llvm/System/DataTypes.h)6
-rw-r--r--lib/clang/libclanganalysis/Makefile4
-rw-r--r--lib/clang/libclangast/Makefile6
-rw-r--r--lib/clang/libclangbasic/Makefile2
-rw-r--r--lib/clang/libclangcodegen/Makefile4
-rw-r--r--lib/clang/libclangfrontend/Makefile2
-rw-r--r--lib/clang/libclanglex/Makefile3
-rw-r--r--lib/clang/libclangsema/Makefile1
-rw-r--r--lib/clang/libclangstaticanalyzercheckers/Makefile (renamed from lib/clang/libclangchecker/Makefile)54
-rw-r--r--lib/clang/libclangstaticanalyzercore/Makefile45
-rw-r--r--lib/clang/libclangstaticanalyzerfrontend/Makefile18
-rw-r--r--lib/clang/libllvmanalysis/Makefile9
-rw-r--r--lib/clang/libllvmarmcodegen/Makefile13
-rw-r--r--lib/clang/libllvmarmdisassembler/Makefile16
-rw-r--r--lib/clang/libllvmarminstprinter/Makefile (renamed from lib/clang/libllvmarmasmprinter/Makefile)4
-rw-r--r--lib/clang/libllvmasmprinter/Makefile2
-rw-r--r--lib/clang/libllvmcodegen/Makefile9
-rw-r--r--lib/clang/libllvmcore/Makefile1
-rw-r--r--lib/clang/libllvmipo/Makefile1
-rw-r--r--lib/clang/libllvmmc/Makefile4
-rw-r--r--lib/clang/libllvmmcparser/Makefile1
-rw-r--r--lib/clang/libllvmmipsasmprinter/Makefile14
-rw-r--r--lib/clang/libllvmmipscodegen/Makefile5
-rw-r--r--lib/clang/libllvmpowerpccodegen/Makefile8
-rw-r--r--lib/clang/libllvmpowerpcinstprinter/Makefile (renamed from lib/clang/libllvmpowerpcasmprinter/Makefile)6
-rw-r--r--lib/clang/libllvmscalaropts/Makefile3
-rw-r--r--lib/clang/libllvmsupport/Makefile23
-rw-r--r--lib/clang/libllvmsystem/Makefile24
-rw-r--r--lib/clang/libllvmtarget/Makefile4
-rw-r--r--lib/clang/libllvmx86codegen/Makefile2
-rw-r--r--lib/clang/libllvmx86instprinter/Makefile (renamed from lib/clang/libllvmx86asmprinter/Makefile)4
-rw-r--r--lib/clang/libllvmx86utils/Makefile9
-rw-r--r--lib/libz/Makefile4
-rw-r--r--sys/boot/i386/boot0/Makefile6
-rw-r--r--sys/boot/i386/btx/btx/Makefile5
-rw-r--r--sys/boot/i386/btx/btxldr/Makefile5
-rw-r--r--sys/boot/i386/gptboot/Makefile7
-rw-r--r--sys/boot/i386/gptzfsboot/Makefile7
-rw-r--r--sys/boot/i386/libi386/Makefile5
-rw-r--r--sys/boot/i386/pxeldr/Makefile5
-rw-r--r--sys/boot/i386/zfsboot/Makefile7
-rw-r--r--sys/boot/pc98/btx/btx/Makefile5
-rw-r--r--sys/boot/pc98/btx/btxldr/Makefile5
-rw-r--r--sys/conf/Makefile.amd648
-rw-r--r--sys/conf/Makefile.i3866
-rw-r--r--sys/modules/bios/smapi/Makefile4
-rw-r--r--sys/modules/linux/Makefile5
-rw-r--r--usr.bin/clang/clang/Makefile15
-rw-r--r--usr.bin/clang/tblgen/Makefile5
1959 files changed, 227548 insertions, 123501 deletions
diff --git a/Makefile.inc1 b/Makefile.inc1
index 28435d5..de00716 100644
--- a/Makefile.inc1
+++ b/Makefile.inc1
@@ -974,7 +974,6 @@ _crunchgen= usr.sbin/crunch/crunchgen
# with different per-architecture default values. Always build tblgen.
_clang_tblgen= \
lib/clang/libllvmsupport \
- lib/clang/libllvmsystem \
usr.bin/clang/tblgen
.if ${MK_CDDL} != "no"
diff --git a/ObsoleteFiles.inc b/ObsoleteFiles.inc
index 0103f1c..ebb1204 100644
--- a/ObsoleteFiles.inc
+++ b/ObsoleteFiles.inc
@@ -38,6 +38,14 @@
# xargs -n1 | sort | uniq -d;
# done
+# 20110220: new clang import which bumps version from 2.8 to 2.9
+OLD_FILES+=usr/include/clang/2.8/emmintrin.h
+OLD_FILES+=usr/include/clang/2.8/mm_malloc.h
+OLD_FILES+=usr/include/clang/2.8/mmintrin.h
+OLD_FILES+=usr/include/clang/2.8/pmmintrin.h
+OLD_FILES+=usr/include/clang/2.8/tmmintrin.h
+OLD_FILES+=usr/include/clang/2.8/xmmintrin.h
+OLD_DIRS+=usr/include/clang/2.8
# 20110119: Remove SYSCTL_*X* sysctl additions.
OLD_FILES+=usr/share/man/man9/SYSCTL_XINT.9.gz \
usr/share/man/man9/SYSCTL_XLONG.9.gz
diff --git a/contrib/llvm/include/llvm-c/Core.h b/contrib/llvm/include/llvm-c/Core.h
index 75cee7d..39c3cb4 100644
--- a/contrib/llvm/include/llvm-c/Core.h
+++ b/contrib/llvm/include/llvm-c/Core.h
@@ -33,13 +33,14 @@
#ifndef LLVM_C_CORE_H
#define LLVM_C_CORE_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#ifdef __cplusplus
/* Need these includes to support the LLVM 'cast' template for the C++ 'wrap'
and 'unwrap' conversion functions. */
#include "llvm/Module.h"
+#include "llvm/PassRegistry.h"
#include "llvm/Support/IRBuilder.h"
extern "C" {
@@ -92,6 +93,9 @@ typedef struct LLVMOpaqueMemoryBuffer *LLVMMemoryBufferRef;
/** See the llvm::PassManagerBase class. */
typedef struct LLVMOpaquePassManager *LLVMPassManagerRef;
+/** See the llvm::PassRegistry class. */
+typedef struct LLVMOpaquePassRegistry *LLVMPassRegistryRef;
+
/** Used to get the users and usees of a Value. See the llvm::Use class. */
typedef struct LLVMOpaqueUse *LLVMUseRef;
@@ -204,7 +208,8 @@ typedef enum {
LLVMPointerTypeKind, /**< Pointers */
LLVMOpaqueTypeKind, /**< Opaque: type with unknown structure */
LLVMVectorTypeKind, /**< SIMD 'packed' format, or other vector type */
- LLVMMetadataTypeKind /**< Metadata */
+ LLVMMetadataTypeKind, /**< Metadata */
+ LLVMX86_MMXTypeKind /**< X86 MMX */
} LLVMTypeKind;
typedef enum {
@@ -317,6 +322,7 @@ void LLVMSetTarget(LLVMModuleRef M, const char *Triple);
LLVMBool LLVMAddTypeName(LLVMModuleRef M, const char *Name, LLVMTypeRef Ty);
void LLVMDeleteTypeName(LLVMModuleRef M, const char *Name);
LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name);
+const char *LLVMGetTypeName(LLVMModuleRef M, LLVMTypeRef Ty);
/** See Module::dump. */
void LLVMDumpModule(LLVMModuleRef M);
@@ -324,6 +330,9 @@ void LLVMDumpModule(LLVMModuleRef M);
/** See Module::setModuleInlineAsm. */
void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm);
+/** See Module::getContext. */
+LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M);
+
/*===-- Types -------------------------------------------------------------===*/
/* LLVM types conform to the following hierarchy:
@@ -408,10 +417,12 @@ unsigned LLVMGetVectorSize(LLVMTypeRef VectorTy);
LLVMTypeRef LLVMVoidTypeInContext(LLVMContextRef C);
LLVMTypeRef LLVMLabelTypeInContext(LLVMContextRef C);
LLVMTypeRef LLVMOpaqueTypeInContext(LLVMContextRef C);
+LLVMTypeRef LLVMX86MMXTypeInContext(LLVMContextRef C);
LLVMTypeRef LLVMVoidType(void);
LLVMTypeRef LLVMLabelType(void);
LLVMTypeRef LLVMOpaqueType(void);
+LLVMTypeRef LLVMX86MMXType(void);
/* Operations on type handles */
LLVMTypeHandleRef LLVMCreateTypeHandle(LLVMTypeRef PotentiallyAbstractTy);
@@ -540,6 +551,9 @@ LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count);
/* Operations on scalar constants */
LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N,
LLVMBool SignExtend);
+LLVMValueRef LLVMConstIntOfArbitraryPrecision(LLVMTypeRef IntTy,
+ unsigned NumWords,
+ const uint64_t Words[]);
LLVMValueRef LLVMConstIntOfString(LLVMTypeRef IntTy, const char *Text,
uint8_t Radix);
LLVMValueRef LLVMConstIntOfStringAndSize(LLVMTypeRef IntTy, const char *Text,
@@ -1013,6 +1027,11 @@ LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf,
char **OutMessage);
void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf);
+/*===-- Pass Registry -----------------------------------------------------===*/
+
+/** Return the global pass registry, for use with initialization functions.
+ See llvm::PassRegistry::getPassRegistry. */
+LLVMPassRegistryRef LLVMGetGlobalPassRegistry(void);
/*===-- Pass Managers -----------------------------------------------------===*/
@@ -1101,6 +1120,7 @@ namespace llvm {
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLVMContext, LLVMContextRef )
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Use, LLVMUseRef )
DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassManagerBase, LLVMPassManagerRef )
+ DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassRegistry, LLVMPassRegistryRef )
/* LLVMModuleProviderRef exists for historical reasons, but now just holds a
* Module.
*/
diff --git a/contrib/llvm/include/llvm-c/EnhancedDisassembly.h b/contrib/llvm/include/llvm-c/EnhancedDisassembly.h
index d177381..28ac0ed 100644
--- a/contrib/llvm/include/llvm-c/EnhancedDisassembly.h
+++ b/contrib/llvm/include/llvm-c/EnhancedDisassembly.h
@@ -19,7 +19,7 @@
#ifndef LLVM_C_ENHANCEDDISASSEMBLY_H
#define LLVM_C_ENHANCEDDISASSEMBLY_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#ifdef __cplusplus
extern "C" {
diff --git a/contrib/llvm/include/llvm-c/Initialization.h b/contrib/llvm/include/llvm-c/Initialization.h
new file mode 100644
index 0000000..3b59abb
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Initialization.h
@@ -0,0 +1,40 @@
+/*===-- llvm-c/Initialization.h - Initialization C Interface ------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to LLVM initialization routines, *|
+|* which must be called before you can use the functionality provided by *|
+|* the corresponding LLVM library. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_INITIALIZEPASSES_H
+#define LLVM_C_INITIALIZEPASSES_H
+
+#include "llvm-c/Core.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void LLVMInitializeCore(LLVMPassRegistryRef R);
+void LLVMInitializeTransformUtils(LLVMPassRegistryRef R);
+void LLVMInitializeScalarOpts(LLVMPassRegistryRef R);
+void LLVMInitializeInstCombine(LLVMPassRegistryRef R);
+void LLVMInitializeIPO(LLVMPassRegistryRef R);
+void LLVMInitializeInstrumentation(LLVMPassRegistryRef R);
+void LLVMInitializeAnalysis(LLVMPassRegistryRef R);
+void LLVMInitializeIPA(LLVMPassRegistryRef R);
+void LLVMInitializeCodeGen(LLVMPassRegistryRef R);
+void LLVMInitializeTarget(LLVMPassRegistryRef R);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h b/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h
index ccfdcee..fca3946 100644
--- a/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h
+++ b/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This header provides a C API to use the LLVM link time optimization
-// library. This is inteded to be used by linkers which are C-only in
+// library. This is intended to be used by linkers which are C-only in
// their implementation for performing LTO.
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm-c/Transforms/Scalar.h b/contrib/llvm/include/llvm-c/Transforms/Scalar.h
index c94019a..2ddfb38 100644
--- a/contrib/llvm/include/llvm-c/Transforms/Scalar.h
+++ b/contrib/llvm/include/llvm-c/Transforms/Scalar.h
@@ -52,9 +52,6 @@ void LLVMAddLICMPass(LLVMPassManagerRef PM);
/** See llvm::createLoopDeletionPass function. */
void LLVMAddLoopDeletionPass(LLVMPassManagerRef PM);
-/** See llvm::createLoopIndexSplitPass function. */
-void LLVMAddLoopIndexSplitPass(LLVMPassManagerRef PM);
-
/** See llvm::createLoopRotatePass function. */
void LLVMAddLoopRotatePass(LLVMPassManagerRef PM);
diff --git a/contrib/llvm/include/llvm-c/lto.h b/contrib/llvm/include/llvm-c/lto.h
index e6f69af..1c42ce0 100644
--- a/contrib/llvm/include/llvm-c/lto.h
+++ b/contrib/llvm/include/llvm-c/lto.h
@@ -18,27 +18,28 @@
#include <stdbool.h>
#include <stddef.h>
-#include "llvm/System/DataTypes.h"
+#include <unistd.h>
-#define LTO_API_VERSION 3
+#define LTO_API_VERSION 4
typedef enum {
- LTO_SYMBOL_ALIGNMENT_MASK = 0x0000001F, /* log2 of alignment */
- LTO_SYMBOL_PERMISSIONS_MASK = 0x000000E0,
- LTO_SYMBOL_PERMISSIONS_CODE = 0x000000A0,
- LTO_SYMBOL_PERMISSIONS_DATA = 0x000000C0,
- LTO_SYMBOL_PERMISSIONS_RODATA = 0x00000080,
- LTO_SYMBOL_DEFINITION_MASK = 0x00000700,
- LTO_SYMBOL_DEFINITION_REGULAR = 0x00000100,
- LTO_SYMBOL_DEFINITION_TENTATIVE = 0x00000200,
- LTO_SYMBOL_DEFINITION_WEAK = 0x00000300,
- LTO_SYMBOL_DEFINITION_UNDEFINED = 0x00000400,
- LTO_SYMBOL_DEFINITION_WEAKUNDEF = 0x00000500,
- LTO_SYMBOL_SCOPE_MASK = 0x00003800,
- LTO_SYMBOL_SCOPE_INTERNAL = 0x00000800,
- LTO_SYMBOL_SCOPE_HIDDEN = 0x00001000,
- LTO_SYMBOL_SCOPE_PROTECTED = 0x00002000,
- LTO_SYMBOL_SCOPE_DEFAULT = 0x00001800
+ LTO_SYMBOL_ALIGNMENT_MASK = 0x0000001F, /* log2 of alignment */
+ LTO_SYMBOL_PERMISSIONS_MASK = 0x000000E0,
+ LTO_SYMBOL_PERMISSIONS_CODE = 0x000000A0,
+ LTO_SYMBOL_PERMISSIONS_DATA = 0x000000C0,
+ LTO_SYMBOL_PERMISSIONS_RODATA = 0x00000080,
+ LTO_SYMBOL_DEFINITION_MASK = 0x00000700,
+ LTO_SYMBOL_DEFINITION_REGULAR = 0x00000100,
+ LTO_SYMBOL_DEFINITION_TENTATIVE = 0x00000200,
+ LTO_SYMBOL_DEFINITION_WEAK = 0x00000300,
+ LTO_SYMBOL_DEFINITION_UNDEFINED = 0x00000400,
+ LTO_SYMBOL_DEFINITION_WEAKUNDEF = 0x00000500,
+ LTO_SYMBOL_SCOPE_MASK = 0x00003800,
+ LTO_SYMBOL_SCOPE_INTERNAL = 0x00000800,
+ LTO_SYMBOL_SCOPE_HIDDEN = 0x00001000,
+ LTO_SYMBOL_SCOPE_PROTECTED = 0x00002000,
+ LTO_SYMBOL_SCOPE_DEFAULT = 0x00001800,
+ LTO_SYMBOL_SCOPE_DEFAULT_CAN_BE_HIDDEN = 0x00002800
} lto_symbol_attributes;
typedef enum {
@@ -121,6 +122,13 @@ lto_module_create(const char* path);
extern lto_module_t
lto_module_create_from_memory(const void* mem, size_t length);
+/**
+ * Loads an object file from disk. The seek point of fd is not preserved.
+ * Returns NULL on error (check lto_get_error_message() for details).
+ */
+extern lto_module_t
+lto_module_create_from_fd(int fd, const char *path, off_t size);
+
/**
* Frees all memory internally allocated by the module.
@@ -146,7 +154,7 @@ lto_module_set_target_triple(lto_module_t mod, const char *triple);
/**
* Returns the number of symbols in the object module.
*/
-extern uint32_t
+extern unsigned int
lto_module_get_num_symbols(lto_module_t mod);
@@ -154,14 +162,14 @@ lto_module_get_num_symbols(lto_module_t mod);
* Returns the name of the ith symbol in the object module.
*/
extern const char*
-lto_module_get_symbol_name(lto_module_t mod, uint32_t index);
+lto_module_get_symbol_name(lto_module_t mod, unsigned int index);
/**
* Returns the attributes of the ith symbol in the object module.
*/
extern lto_symbol_attributes
-lto_module_get_symbol_attribute(lto_module_t mod, uint32_t index);
+lto_module_get_symbol_attribute(lto_module_t mod, unsigned int index);
/**
diff --git a/contrib/llvm/include/llvm/ADT/APFloat.h b/contrib/llvm/include/llvm/ADT/APFloat.h
index dfe4e0f..ca4138b 100644
--- a/contrib/llvm/include/llvm/ADT/APFloat.h
+++ b/contrib/llvm/include/llvm/ADT/APFloat.h
@@ -246,6 +246,13 @@ namespace llvm {
static APFloat getSmallestNormalized(const fltSemantics &Sem,
bool Negative = false);
+ /// getAllOnesValue - Returns a float which is bitcasted from
+ /// an all one value int.
+ ///
+ /// \param BitWidth - Select float type
+ /// \param isIEEE - If 128 bit number, select between PPC and IEEE
+ static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
+
/// Profile - Used to insert APFloat objects, or objects that contain
/// APFloat objects, into FoldingSets.
void Profile(FoldingSetNodeID& NID) const;
diff --git a/contrib/llvm/include/llvm/ADT/APInt.h b/contrib/llvm/include/llvm/ADT/APInt.h
index 8004cb4..b91d5dc 100644
--- a/contrib/llvm/include/llvm/ADT/APInt.h
+++ b/contrib/llvm/include/llvm/ADT/APInt.h
@@ -275,12 +275,6 @@ public:
/// objects, into FoldingSets.
void Profile(FoldingSetNodeID& id) const;
- /// @brief Used by the Bitcode serializer to emit APInts to Bitcode.
- void Emit(Serializer& S) const;
-
- /// @brief Used by the Bitcode deserializer to deserialize APInts.
- void Read(Deserializer& D);
-
/// @}
/// @name Value Tests
/// @{
@@ -302,7 +296,7 @@ public:
/// @returns true if this APInt is positive.
/// @brief Determine if this APInt Value is positive.
bool isStrictlyPositive() const {
- return isNonNegative() && (*this) != 0;
+ return isNonNegative() && !!*this;
}
/// This checks to see if the value has all bits of the APInt are set or not.
@@ -330,15 +324,14 @@ public:
/// value for the APInt's bit width.
/// @brief Determine if this is the smallest unsigned value.
bool isMinValue() const {
- return countPopulation() == 0;
+ return !*this;
}
/// This checks to see if the value of this APInt is the minimum signed
/// value for the APInt's bit width.
/// @brief Determine if this is the smallest signed value.
bool isMinSignedValue() const {
- return BitWidth == 1 ? VAL == 1 :
- isNegative() && countPopulation() == 1;
+ return BitWidth == 1 ? VAL == 1 : isNegative() && isPowerOf2();
}
/// @brief Check if this APInt has an N-bits unsigned integer value.
@@ -348,10 +341,8 @@ public:
return true;
if (isSingleWord())
- return VAL == (VAL & (~0ULL >> (64 - N)));
- APInt Tmp(N, getNumWords(), pVal);
- Tmp.zext(getBitWidth());
- return Tmp == (*this);
+ return isUIntN(N, VAL);
+ return APInt(N, getNumWords(), pVal).zext(getBitWidth()) == (*this);
}
/// @brief Check if this APInt has an N-bits signed integer value.
@@ -361,7 +352,11 @@ public:
}
/// @returns true if the argument APInt value is a power of two > 0.
- bool isPowerOf2() const;
+ bool isPowerOf2() const {
+ if (isSingleWord())
+ return isPowerOf2_64(VAL);
+ return countPopulationSlowCase() == 1;
+ }
/// isSignBit - Return true if this is the value returned by getSignBit.
bool isSignBit() const { return isMinSignedValue(); }
@@ -369,7 +364,7 @@ public:
/// This converts the APInt to a boolean value as a test against zero.
/// @brief Boolean conversion function.
bool getBoolValue() const {
- return *this != 0;
+ return !!*this;
}
/// getLimitedValue - If this value is smaller than the specified limit,
@@ -385,12 +380,14 @@ public:
/// @{
/// @brief Gets maximum unsigned value of APInt for specific bit width.
static APInt getMaxValue(unsigned numBits) {
- return APInt(numBits, 0).set();
+ return getAllOnesValue(numBits);
}
/// @brief Gets maximum signed value of APInt for a specific bit width.
static APInt getSignedMaxValue(unsigned numBits) {
- return APInt(numBits, 0).set().clear(numBits - 1);
+ APInt API = getAllOnesValue(numBits);
+ API.clearBit(numBits - 1);
+ return API;
}
/// @brief Gets minimum unsigned value of APInt for a specific bit width.
@@ -400,7 +397,9 @@ public:
/// @brief Gets minimum signed value of APInt for a specific bit width.
static APInt getSignedMinValue(unsigned numBits) {
- return APInt(numBits, 0).set(numBits - 1);
+ APInt API(numBits, 0);
+ API.setBit(numBits - 1);
+ return API;
}
/// getSignBit - This is just a wrapper function of getSignedMinValue(), and
@@ -413,7 +412,7 @@ public:
/// @returns the all-ones value for an APInt of the specified bit-width.
/// @brief Get the all-ones value.
static APInt getAllOnesValue(unsigned numBits) {
- return APInt(numBits, 0).set();
+ return APInt(numBits, -1ULL, true);
}
/// @returns the '0' value for an APInt of the specified bit-width.
@@ -432,6 +431,13 @@ public:
/// @returns the low "numBits" bits of this APInt.
APInt getLoBits(unsigned numBits) const;
+ /// getOneBitSet - Return an APInt with exactly one bit set in the result.
+ static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
+ APInt Res(numBits, 0);
+ Res.setBit(BitNo);
+ return Res;
+ }
+
/// Constructs an APInt value that has a contiguous range of bits set. The
/// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
/// bits will be zero. For example, with parameters(32, 0, 16) you would get
@@ -530,7 +536,7 @@ public:
/// @brief Unary bitwise complement operator.
APInt operator~() const {
APInt Result(*this);
- Result.flip();
+ Result.flipAllBits();
return Result;
}
@@ -741,11 +747,11 @@ public:
/// RHS are treated as unsigned quantities for purposes of this division.
/// @returns a new APInt value containing the division result
/// @brief Unsigned division operation.
- APInt udiv(const APInt& RHS) const;
+ APInt udiv(const APInt &RHS) const;
/// Signed divide this APInt by APInt RHS.
/// @brief Signed division function for APInt.
- APInt sdiv(const APInt& RHS) const {
+ APInt sdiv(const APInt &RHS) const {
if (isNegative())
if (RHS.isNegative())
return (-(*this)).udiv(-RHS);
@@ -763,11 +769,11 @@ public:
/// which is *this.
/// @returns a new APInt value containing the remainder result
/// @brief Unsigned remainder operation.
- APInt urem(const APInt& RHS) const;
+ APInt urem(const APInt &RHS) const;
/// Signed remainder operation on APInt.
/// @brief Function for signed remainder operation.
- APInt srem(const APInt& RHS) const {
+ APInt srem(const APInt &RHS) const {
if (isNegative())
if (RHS.isNegative())
return -((-(*this)).urem(-RHS));
@@ -788,8 +794,7 @@ public:
APInt &Quotient, APInt &Remainder);
static void sdivrem(const APInt &LHS, const APInt &RHS,
- APInt &Quotient, APInt &Remainder)
- {
+ APInt &Quotient, APInt &Remainder) {
if (LHS.isNegative()) {
if (RHS.isNegative())
APInt::udivrem(-LHS, -RHS, Quotient, Remainder);
@@ -804,6 +809,16 @@ public:
APInt::udivrem(LHS, RHS, Quotient, Remainder);
}
}
+
+
+ // Operations that return overflow indicators.
+ APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
+ APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
+ APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
+ APInt usub_ov(const APInt &RHS, bool &Overflow) const;
+ APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
+ APInt smul_ov(const APInt &RHS, bool &Overflow) const;
+ APInt sshl_ov(unsigned Amt, bool &Overflow) const;
/// @returns the bit value at bitPosition
/// @brief Array-indexing support.
@@ -868,7 +883,7 @@ public:
/// the validity of the less-than relationship.
/// @returns true if *this < RHS when both are considered unsigned.
/// @brief Unsigned less than comparison
- bool ult(const APInt& RHS) const;
+ bool ult(const APInt &RHS) const;
/// Regards both *this as an unsigned quantity and compares it with RHS for
/// the validity of the less-than relationship.
@@ -988,6 +1003,9 @@ public:
return sge(APInt(getBitWidth(), RHS));
}
+
+
+
/// This operation tests if there are any pairs of corresponding bits
/// between this APInt and RHS that are both set.
bool intersects(const APInt &RHS) const {
@@ -1000,80 +1018,78 @@ public:
/// Truncate the APInt to a specified width. It is an error to specify a width
/// that is greater than or equal to the current width.
/// @brief Truncate to new width.
- APInt &trunc(unsigned width);
+ APInt trunc(unsigned width) const;
/// This operation sign extends the APInt to a new width. If the high order
/// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
/// It is an error to specify a width that is less than or equal to the
/// current width.
/// @brief Sign extend to a new width.
- APInt &sext(unsigned width);
+ APInt sext(unsigned width) const;
/// This operation zero extends the APInt to a new width. The high order bits
/// are filled with 0 bits. It is an error to specify a width that is less
/// than or equal to the current width.
/// @brief Zero extend to a new width.
- APInt &zext(unsigned width);
+ APInt zext(unsigned width) const;
/// Make this APInt have the bit width given by \p width. The value is sign
/// extended, truncated, or left alone to make it that width.
/// @brief Sign extend or truncate to width
- APInt &sextOrTrunc(unsigned width);
+ APInt sextOrTrunc(unsigned width) const;
/// Make this APInt have the bit width given by \p width. The value is zero
/// extended, truncated, or left alone to make it that width.
/// @brief Zero extend or truncate to width
- APInt &zextOrTrunc(unsigned width);
+ APInt zextOrTrunc(unsigned width) const;
/// @}
/// @name Bit Manipulation Operators
/// @{
/// @brief Set every bit to 1.
- APInt& set() {
- if (isSingleWord()) {
+ void setAllBits() {
+ if (isSingleWord())
VAL = -1ULL;
- return clearUnusedBits();
+ else {
+ // Set all the bits in all the words.
+ for (unsigned i = 0; i < getNumWords(); ++i)
+ pVal[i] = -1ULL;
}
-
- // Set all the bits in all the words.
- for (unsigned i = 0; i < getNumWords(); ++i)
- pVal[i] = -1ULL;
// Clear the unused ones
- return clearUnusedBits();
+ clearUnusedBits();
}
/// Set the given bit to 1 whose position is given as "bitPosition".
/// @brief Set a given bit to 1.
- APInt& set(unsigned bitPosition);
+ void setBit(unsigned bitPosition);
/// @brief Set every bit to 0.
- APInt& clear() {
+ void clearAllBits() {
if (isSingleWord())
VAL = 0;
else
memset(pVal, 0, getNumWords() * APINT_WORD_SIZE);
- return *this;
}
/// Set the given bit to 0 whose position is given as "bitPosition".
/// @brief Set a given bit to 0.
- APInt& clear(unsigned bitPosition);
+ void clearBit(unsigned bitPosition);
/// @brief Toggle every bit to its opposite value.
- APInt& flip() {
- if (isSingleWord()) {
+ void flipAllBits() {
+ if (isSingleWord())
VAL ^= -1ULL;
- return clearUnusedBits();
+ else {
+ for (unsigned i = 0; i < getNumWords(); ++i)
+ pVal[i] ^= -1ULL;
}
- for (unsigned i = 0; i < getNumWords(); ++i)
- pVal[i] ^= -1ULL;
- return clearUnusedBits();
+ clearUnusedBits();
}
/// Toggle a given bit to its opposite value whose position is given
/// as "bitPosition".
/// @brief Toggles a given bit to its opposite value.
- APInt& flip(unsigned bitPosition);
+ void flipBit(unsigned bitPosition);
/// @}
/// @name Value Characterization Functions
@@ -1281,37 +1297,27 @@ public:
}
/// The conversion does not do a translation from double to integer, it just
- /// re-interprets the bits of the double. Note that it is valid to do this on
- /// any bit width but bits from V may get truncated.
+ /// re-interprets the bits of the double.
/// @brief Converts a double to APInt bits.
- APInt& doubleToBits(double V) {
+ static APInt doubleToBits(double V) {
union {
uint64_t I;
double D;
} T;
T.D = V;
- if (isSingleWord())
- VAL = T.I;
- else
- pVal[0] = T.I;
- return clearUnusedBits();
+ return APInt(sizeof T * CHAR_BIT, T.I);
}
/// The conversion does not do a translation from float to integer, it just
- /// re-interprets the bits of the float. Note that it is valid to do this on
- /// any bit width but bits from V may get truncated.
+ /// re-interprets the bits of the float.
/// @brief Converts a float to APInt bits.
- APInt& floatToBits(float V) {
+ static APInt floatToBits(float V) {
union {
unsigned I;
float F;
} T;
T.F = V;
- if (isSingleWord())
- VAL = T.I;
- else
- pVal[0] = T.I;
- return clearUnusedBits();
+ return APInt(sizeof T * CHAR_BIT, T.I);
}
/// @}
diff --git a/contrib/llvm/include/llvm/ADT/APSInt.h b/contrib/llvm/include/llvm/ADT/APSInt.h
index 1c9931c..54a7b60 100644
--- a/contrib/llvm/include/llvm/ADT/APSInt.h
+++ b/contrib/llvm/include/llvm/ADT/APSInt.h
@@ -68,20 +68,22 @@ public:
}
using APInt::toString;
- APSInt& extend(uint32_t width) {
+ APSInt trunc(uint32_t width) const {
+ return APSInt(APInt::trunc(width), IsUnsigned);
+ }
+
+ APSInt extend(uint32_t width) const {
if (IsUnsigned)
- zext(width);
+ return APSInt(zext(width), IsUnsigned);
else
- sext(width);
- return *this;
+ return APSInt(sext(width), IsUnsigned);
}
- APSInt& extOrTrunc(uint32_t width) {
+ APSInt extOrTrunc(uint32_t width) const {
if (IsUnsigned)
- zextOrTrunc(width);
+ return APSInt(zextOrTrunc(width), IsUnsigned);
else
- sextOrTrunc(width);
- return *this;
+ return APSInt(sextOrTrunc(width), IsUnsigned);
}
const APSInt &operator%=(const APSInt &RHS) {
diff --git a/contrib/llvm/include/llvm/ADT/ArrayRef.h b/contrib/llvm/include/llvm/ADT/ArrayRef.h
new file mode 100644
index 0000000..1c5470d
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/ArrayRef.h
@@ -0,0 +1,121 @@
+//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ARRAYREF_H
+#define LLVM_ADT_ARRAYREF_H
+
+#include "llvm/ADT/SmallVector.h"
+#include <vector>
+
+namespace llvm {
+ class APInt;
+
+ /// ArrayRef - Represent a constant reference to an array (0 or more elements
+ /// consecutively in memory), i.e. a start pointer and a length. It allows
+ /// various APIs to take consecutive elements easily and conveniently.
+ ///
+ /// This class does not own the underlying data, it is expected to be used in
+ /// situations where the data resides in some other buffer, whose lifetime
+ /// extends past that of the StringRef. For this reason, it is not in general
+ /// safe to store a ArrayRef.
+ ///
+ /// This is intended to be trivially copyable, so it should be passed by
+ /// value.
+ template<typename T>
+ class ArrayRef {
+ public:
+ typedef const T *iterator;
+ typedef const T *const_iterator;
+ typedef size_t size_type;
+
+ private:
+ /// The start of the array, in an external buffer.
+ const T *Data;
+
+ /// The number of elements.
+ size_t Length;
+
+ public:
+ /// @name Constructors
+ /// @{
+
+ /// Construct an empty ArrayRef.
+ /*implicit*/ ArrayRef() : Data(0), Length(0) {}
+
+ /// Construct an ArrayRef from a single element.
+ /*implicit*/ ArrayRef(const T &OneElt)
+ : Data(&OneElt), Length(1) {}
+
+ /// Construct an ArrayRef from a pointer and length.
+ /*implicit*/ ArrayRef(const T *data, size_t length)
+ : Data(data), Length(length) {}
+
+ /// Construct an ArrayRef from a SmallVector.
+ /*implicit*/ ArrayRef(const SmallVectorImpl<T> &Vec)
+ : Data(Vec.data()), Length(Vec.size()) {}
+
+ /// Construct an ArrayRef from a std::vector.
+ /*implicit*/ ArrayRef(const std::vector<T> &Vec)
+ : Data(Vec.empty() ? (T*)0 : &Vec[0]), Length(Vec.size()) {}
+
+ // TODO: C arrays.
+
+ /// @}
+ /// @name Simple Operations
+ /// @{
+
+ iterator begin() const { return Data; }
+ iterator end() const { return Data + Length; }
+
+ /// empty - Check if the array is empty.
+ bool empty() const { return Length == 0; }
+
+ /// size - Get the array size.
+ size_t size() const { return Length; }
+
+ /// front - Get the first element.
+ const T &front() const {
+ assert(!empty());
+ return Data[0];
+ }
+
+ /// back - Get the last element.
+ const T &back() const {
+ assert(!empty());
+ return Data[Length-1];
+ }
+
+ /// @}
+ /// @name Operator Overloads
+ /// @{
+
+ const T &operator[](size_t Index) const {
+ assert(Index < Length && "Invalid index!");
+ return Data[Index];
+ }
+
+ /// @}
+ /// @name Expensive Operations
+ /// @{
+
+ std::vector<T> vec() const {
+ return std::vector<T>(Data, Data+Length);
+ }
+
+ /// @}
+ };
+
+ // ArrayRefs can be treated like a POD type.
+ template <typename T> struct isPodLike;
+ template <typename T> struct isPodLike<ArrayRef<T> > {
+ static const bool value = true;
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/BitVector.h b/contrib/llvm/include/llvm/ADT/BitVector.h
index 9dcb9e1..ac1cf0c 100644
--- a/contrib/llvm/include/llvm/ADT/BitVector.h
+++ b/contrib/llvm/include/llvm/ADT/BitVector.h
@@ -18,6 +18,7 @@
#include <algorithm>
#include <cassert>
#include <climits>
+#include <cstdlib>
#include <cstring>
namespace llvm {
@@ -77,7 +78,7 @@ public:
/// bits are initialized to the specified value.
explicit BitVector(unsigned s, bool t = false) : Size(s) {
Capacity = NumBitWords(s);
- Bits = new BitWord[Capacity];
+ Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
init_words(Bits, Capacity, t);
if (t)
clear_unused_bits();
@@ -92,12 +93,12 @@ public:
}
Capacity = NumBitWords(RHS.size());
- Bits = new BitWord[Capacity];
- std::copy(RHS.Bits, &RHS.Bits[Capacity], Bits);
+ Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
+ std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord));
}
~BitVector() {
- delete[] Bits;
+ std::free(Bits);
}
/// empty - Tests whether there are no bits in this bitvector.
@@ -127,6 +128,12 @@ public:
return false;
}
+ /// all - Returns true if all bits are set.
+ bool all() const {
+ // TODO: Optimize this.
+ return count() == size();
+ }
+
/// none - Returns true if none of the bits are set.
bool none() const {
return !any();
@@ -335,18 +342,18 @@ public:
unsigned RHSWords = NumBitWords(Size);
if (Size <= Capacity * BITWORD_SIZE) {
if (Size)
- std::copy(RHS.Bits, &RHS.Bits[RHSWords], Bits);
+ std::memcpy(Bits, RHS.Bits, RHSWords * sizeof(BitWord));
clear_unused_bits();
return *this;
}
// Grow the bitvector to have enough elements.
Capacity = RHSWords;
- BitWord *NewBits = new BitWord[Capacity];
- std::copy(RHS.Bits, &RHS.Bits[RHSWords], NewBits);
+ BitWord *NewBits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
+ std::memcpy(NewBits, RHS.Bits, Capacity * sizeof(BitWord));
// Destroy the old bits.
- delete[] Bits;
+ std::free(Bits);
Bits = NewBits;
return *this;
@@ -384,17 +391,8 @@ private:
}
void grow(unsigned NewSize) {
- unsigned OldCapacity = Capacity;
- Capacity = NumBitWords(NewSize);
- BitWord *NewBits = new BitWord[Capacity];
-
- // Copy the old bits over.
- if (OldCapacity != 0)
- std::copy(Bits, &Bits[OldCapacity], NewBits);
-
- // Destroy the old bits.
- delete[] Bits;
- Bits = NewBits;
+ Capacity = std::max(NumBitWords(NewSize), Capacity * 2);
+ Bits = (BitWord *)std::realloc(Bits, Capacity * sizeof(BitWord));
clear_unused_bits();
}
diff --git a/contrib/llvm/include/llvm/ADT/DenseMap.h b/contrib/llvm/include/llvm/ADT/DenseMap.h
index 06a1575..61d6ae7 100644
--- a/contrib/llvm/include/llvm/ADT/DenseMap.h
+++ b/contrib/llvm/include/llvm/ADT/DenseMap.h
@@ -18,6 +18,7 @@
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
#include "llvm/ADT/DenseMapInfo.h"
+#include <algorithm>
#include <iterator>
#include <new>
#include <utility>
@@ -385,7 +386,7 @@ private:
// Insert the key/value into the new table.
BucketT *DestBucket;
bool FoundVal = LookupBucketFor(B->first, DestBucket);
- FoundVal = FoundVal; // silence warning.
+ (void)FoundVal; // silence warning.
assert(!FoundVal && "Key already in new map?");
DestBucket->first = B->first;
new (&DestBucket->second) ValueT(B->second);
diff --git a/contrib/llvm/include/llvm/ADT/DenseMapInfo.h b/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
index 5299386..25e341b 100644
--- a/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
+++ b/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
@@ -102,6 +102,20 @@ template<> struct DenseMapInfo<int> {
}
};
+// Provide DenseMapInfo for longs.
+template<> struct DenseMapInfo<long> {
+ static inline long getEmptyKey() {
+ return (1UL << (sizeof(long) * 8 - 1)) - 1L;
+ }
+ static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
+ static unsigned getHashValue(const long& Val) {
+ return (unsigned)(Val * 37L);
+ }
+ static bool isEqual(const long& LHS, const long& RHS) {
+ return LHS == RHS;
+ }
+};
+
// Provide DenseMapInfo for long longs.
template<> struct DenseMapInfo<long long> {
static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
diff --git a/contrib/llvm/include/llvm/ADT/DenseSet.h b/contrib/llvm/include/llvm/ADT/DenseSet.h
index 00bcf64..67321f5 100644
--- a/contrib/llvm/include/llvm/ADT/DenseSet.h
+++ b/contrib/llvm/include/llvm/ADT/DenseSet.h
@@ -33,6 +33,9 @@ public:
bool empty() const { return TheMap.empty(); }
unsigned size() const { return TheMap.size(); }
+ /// Grow the denseset so that it has at least Size buckets. Does not shrink
+ void resize(size_t Size) { TheMap.resize(Size); }
+
void clear() {
TheMap.clear();
}
diff --git a/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h b/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
index 07a5edf..771476c 100644
--- a/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
+++ b/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
@@ -15,7 +15,7 @@
#ifndef LLVM_ADT_EQUIVALENCECLASSES_H
#define LLVM_ADT_EQUIVALENCECLASSES_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <set>
diff --git a/contrib/llvm/include/llvm/ADT/FoldingSet.h b/contrib/llvm/include/llvm/ADT/FoldingSet.h
index 662b5e2..879dbd0 100644
--- a/contrib/llvm/include/llvm/ADT/FoldingSet.h
+++ b/contrib/llvm/include/llvm/ADT/FoldingSet.h
@@ -16,7 +16,7 @@
#ifndef LLVM_ADT_FOLDINGSET_H
#define LLVM_ADT_FOLDINGSET_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h b/contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h
index 968ce15..d3196ca 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h
@@ -94,7 +94,7 @@ public:
: ImutAVLFactory<ImutInfo>(Alloc) {}
TreeTy *Add(TreeTy *T, value_type_ref V) {
- T = Add_internal(V,T);
+ T = add_internal(V,T);
this->MarkImmutable(T);
return T;
}
@@ -103,20 +103,20 @@ public:
if (!T)
return NULL;
- key_type_ref CurrentKey = ImutInfo::KeyOfValue(this->Value(T));
+ key_type_ref CurrentKey = ImutInfo::KeyOfValue(this->getValue(T));
if (ImutInfo::isContainedIn(K, CurrentKey))
return T;
else if (ImutInfo::isLess(K, CurrentKey))
- return Find(this->Left(T), K);
+ return Find(this->getLeft(T), K);
else
- return Find(this->Right(T), K);
+ return Find(this->getRight(T), K);
}
private:
- TreeTy *Add_internal(value_type_ref V, TreeTy *T) {
+ TreeTy *add_internal(value_type_ref V, TreeTy *T) {
key_type_ref K = ImutInfo::KeyOfValue(V);
- T = RemoveAllOverlaps(T, K);
+ T = removeAllOverlaps(T, K);
if (this->isEmpty(T))
return this->CreateNode(NULL, V, NULL);
@@ -125,38 +125,38 @@ private:
key_type_ref KCurrent = ImutInfo::KeyOfValue(this->Value(T));
if (ImutInfo::isLess(K, KCurrent))
- return this->Balance(Add_internal(V, this->Left(T)), this->Value(T),
+ return this->Balance(add_internal(V, this->Left(T)), this->Value(T),
this->Right(T));
else
return this->Balance(this->Left(T), this->Value(T),
- Add_internal(V, this->Right(T)));
+ add_internal(V, this->Right(T)));
}
// Remove all overlaps from T.
- TreeTy *RemoveAllOverlaps(TreeTy *T, key_type_ref K) {
+ TreeTy *removeAllOverlaps(TreeTy *T, key_type_ref K) {
bool Changed;
do {
Changed = false;
- T = RemoveOverlap(T, K, Changed);
- this->MarkImmutable(T);
+ T = removeOverlap(T, K, Changed);
+ this->markImmutable(T);
} while (Changed);
return T;
}
// Remove one overlap from T.
- TreeTy *RemoveOverlap(TreeTy *T, key_type_ref K, bool &Changed) {
+ TreeTy *removeOverlap(TreeTy *T, key_type_ref K, bool &Changed) {
if (!T)
return NULL;
Interval CurrentK = ImutInfo::KeyOfValue(this->Value(T));
// If current key does not overlap the inserted key.
if (CurrentK.getStart() > K.getEnd())
- return this->Balance(RemoveOverlap(this->Left(T), K, Changed),
+ return this->Balance(removeOverlap(this->Left(T), K, Changed),
this->Value(T), this->Right(T));
else if (CurrentK.getEnd() < K.getStart())
return this->Balance(this->Left(T), this->Value(T),
- RemoveOverlap(this->Right(T), K, Changed));
+ removeOverlap(this->Right(T), K, Changed));
// Current key overlaps with the inserted key.
// Remove the current key.
@@ -167,18 +167,18 @@ private:
if (CurrentK.getStart() < K.getStart()) {
if (CurrentK.getEnd() <= K.getEnd()) {
Interval NewK(CurrentK.getStart(), K.getStart()-1);
- return Add_internal(std::make_pair(NewK, OldData), T);
+ return add_internal(std::make_pair(NewK, OldData), T);
} else {
Interval NewK1(CurrentK.getStart(), K.getStart()-1);
- T = Add_internal(std::make_pair(NewK1, OldData), T);
+ T = add_internal(std::make_pair(NewK1, OldData), T);
Interval NewK2(K.getEnd()+1, CurrentK.getEnd());
- return Add_internal(std::make_pair(NewK2, OldData), T);
+ return add_internal(std::make_pair(NewK2, OldData), T);
}
} else {
if (CurrentK.getEnd() > K.getEnd()) {
Interval NewK(K.getEnd()+1, CurrentK.getEnd());
- return Add_internal(std::make_pair(NewK, OldData), T);
+ return add_internal(std::make_pair(NewK, OldData), T);
} else
return T;
}
@@ -209,22 +209,22 @@ public:
public:
Factory(BumpPtrAllocator& Alloc) : F(Alloc) {}
- ImmutableIntervalMap GetEmptyMap() {
- return ImmutableIntervalMap(F.GetEmptyTree());
+ ImmutableIntervalMap getEmptyMap() {
+ return ImmutableIntervalMap(F.getEmptyTree());
}
- ImmutableIntervalMap Add(ImmutableIntervalMap Old,
+ ImmutableIntervalMap add(ImmutableIntervalMap Old,
key_type_ref K, data_type_ref D) {
- TreeTy *T = F.Add(Old.Root, std::make_pair<key_type, data_type>(K, D));
- return ImmutableIntervalMap(F.GetCanonicalTree(T));
+ TreeTy *T = F.add(Old.Root, std::make_pair<key_type, data_type>(K, D));
+ return ImmutableIntervalMap(F.getCanonicalTree(T));
}
- ImmutableIntervalMap Remove(ImmutableIntervalMap Old, key_type_ref K) {
- TreeTy *T = F.Remove(Old.Root, K);
- return ImmutableIntervalMap(F.GetCanonicalTree(T));
+ ImmutableIntervalMap remove(ImmutableIntervalMap Old, key_type_ref K) {
+ TreeTy *T = F.remove(Old.Root, K);
+ return ImmutableIntervalMap(F.getCanonicalTree(T));
}
- data_type *Lookup(ImmutableIntervalMap M, key_type_ref K) {
+ data_type *lookup(ImmutableIntervalMap M, key_type_ref K) {
TreeTy *T = F.Find(M.getRoot(), K);
if (T)
return &T->getValue().second;
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableList.h b/contrib/llvm/include/llvm/ADT/ImmutableList.h
index 7757c08..714355b 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableList.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableList.h
@@ -16,7 +16,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
@@ -156,7 +156,7 @@ public:
if (ownsAllocator()) delete &getAllocator();
}
- ImmutableList<T> Concat(const T& Head, ImmutableList<T> Tail) {
+ ImmutableList<T> concat(const T& Head, ImmutableList<T> Tail) {
// Profile the new list to see if it already exists in our cache.
FoldingSetNodeID ID;
void* InsertPos;
@@ -178,16 +178,16 @@ public:
return L;
}
- ImmutableList<T> Add(const T& D, ImmutableList<T> L) {
- return Concat(D, L);
+ ImmutableList<T> add(const T& D, ImmutableList<T> L) {
+ return concat(D, L);
}
- ImmutableList<T> GetEmptyList() const {
+ ImmutableList<T> getEmptyList() const {
return ImmutableList<T>(0);
}
- ImmutableList<T> Create(const T& X) {
- return Concat(X, GetEmptyList());
+ ImmutableList<T> create(const T& X) {
+ return Concat(X, getEmptyList());
}
};
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableMap.h b/contrib/llvm/include/llvm/ADT/ImmutableMap.h
index 8af128e..e439a09 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableMap.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableMap.h
@@ -76,7 +76,23 @@ public:
/// should use a Factory object to create maps instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
- explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {}
+ explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableMap(const ImmutableMap &X) : Root(X.Root) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableMap &operator=(const ImmutableMap &X) {
+ if (Root != X.Root) {
+ if (X.Root) { X.Root->retain(); }
+ if (Root) { Root->release(); }
+ Root = X.Root;
+ }
+ return *this;
+ }
+ ~ImmutableMap() {
+ if (Root) { Root->release(); }
+ }
class Factory {
typename TreeTy::Factory F;
@@ -89,16 +105,16 @@ public:
Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
: F(Alloc), Canonicalize(canonicalize) {}
- ImmutableMap GetEmptyMap() { return ImmutableMap(F.GetEmptyTree()); }
+ ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
- ImmutableMap Add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
- TreeTy *T = F.Add(Old.Root, std::make_pair<key_type,data_type>(K,D));
- return ImmutableMap(Canonicalize ? F.GetCanonicalTree(T): T);
+ ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
+ TreeTy *T = F.add(Old.Root, std::make_pair<key_type,data_type>(K,D));
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}
- ImmutableMap Remove(ImmutableMap Old, key_type_ref K) {
- TreeTy *T = F.Remove(Old.Root,K);
- return ImmutableMap(Canonicalize ? F.GetCanonicalTree(T): T);
+ ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
+ TreeTy *T = F.remove(Old.Root,K);
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}
private:
@@ -110,15 +126,30 @@ public:
return Root ? Root->contains(K) : false;
}
- bool operator==(ImmutableMap RHS) const {
+ bool operator==(const ImmutableMap &RHS) const {
return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
}
- bool operator!=(ImmutableMap RHS) const {
+ bool operator!=(const ImmutableMap &RHS) const {
return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
}
- TreeTy* getRoot() const { return Root; }
+ TreeTy *getRoot() const {
+ if (Root) { Root->retain(); }
+ return Root;
+ }
+
+ TreeTy *getRootWithoutRetain() const {
+ return Root;
+ }
+
+ void manualRetain() {
+ if (Root) Root->retain();
+ }
+
+ void manualRelease() {
+ if (Root) Root->release();
+ }
bool isEmpty() const { return !Root; }
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableSet.h b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
index 70c3caf..3ca910c 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableSet.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
@@ -15,10 +15,13 @@
#define LLVM_ADT_IMSET_H
#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <functional>
+#include <vector>
+#include <stdio.h>
namespace llvm {
@@ -32,7 +35,7 @@ template <typename ImutInfo> class ImutAVLTreeInOrderIterator;
template <typename ImutInfo> class ImutAVLTreeGenericIterator;
template <typename ImutInfo >
-class ImutAVLTree : public FoldingSetNode {
+class ImutAVLTree {
public:
typedef typename ImutInfo::key_type_ref key_type_ref;
typedef typename ImutInfo::value_type value_type;
@@ -43,7 +46,6 @@ public:
friend class ImutIntervalAVLFactory<ImutInfo>;
friend class ImutAVLTreeGenericIterator<ImutInfo>;
- friend class FoldingSet<ImutAVLTree>;
typedef ImutAVLTreeInOrderIterator<ImutInfo> iterator;
@@ -51,29 +53,27 @@ public:
// Public Interface.
//===----------------------------------------------------===//
- /// getLeft - Returns a pointer to the left subtree. This value
+ /// Return a pointer to the left subtree. This value
/// is NULL if there is no left subtree.
- ImutAVLTree *getLeft() const { return Left; }
+ ImutAVLTree *getLeft() const { return left; }
- /// getRight - Returns a pointer to the right subtree. This value is
+ /// Return a pointer to the right subtree. This value is
/// NULL if there is no right subtree.
- ImutAVLTree *getRight() const { return Right; }
+ ImutAVLTree *getRight() const { return right; }
/// getHeight - Returns the height of the tree. A tree with no subtrees
/// has a height of 1.
- unsigned getHeight() const { return Height; }
+ unsigned getHeight() const { return height; }
/// getValue - Returns the data value associated with the tree node.
- const value_type& getValue() const { return Value; }
+ const value_type& getValue() const { return value; }
/// find - Finds the subtree associated with the specified key value.
/// This method returns NULL if no matching subtree is found.
ImutAVLTree* find(key_type_ref K) {
ImutAVLTree *T = this;
-
while (T) {
key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue());
-
if (ImutInfo::isEqual(K,CurrentKey))
return T;
else if (ImutInfo::isLess(K,CurrentKey))
@@ -81,7 +81,6 @@ public:
else
T = T->getRight();
}
-
return NULL;
}
@@ -90,7 +89,7 @@ public:
ImutAVLTree* getMaxElement() {
ImutAVLTree *T = this;
ImutAVLTree *Right = T->getRight();
- while (Right) { T = Right; Right = T->getRight(); }
+ while (Right) { T = right; right = T->getRight(); }
return T;
}
@@ -98,10 +97,10 @@ public:
/// both leaves and non-leaf nodes.
unsigned size() const {
unsigned n = 1;
-
- if (const ImutAVLTree* L = getLeft()) n += L->size();
- if (const ImutAVLTree* R = getRight()) n += R->size();
-
+ if (const ImutAVLTree* L = getLeft())
+ n += L->size();
+ if (const ImutAVLTree* R = getRight())
+ n += R->size();
return n;
}
@@ -114,7 +113,7 @@ public:
/// inorder traversal.
iterator end() const { return iterator(); }
- bool ElementEqual(value_type_ref V) const {
+ bool isElementEqual(value_type_ref V) const {
// Compare the keys.
if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()),
ImutInfo::KeyOfValue(V)))
@@ -128,8 +127,8 @@ public:
return true;
}
- bool ElementEqual(const ImutAVLTree* RHS) const {
- return ElementEqual(RHS->getValue());
+ bool isElementEqual(const ImutAVLTree* RHS) const {
+ return isElementEqual(RHS->getValue());
}
/// isEqual - Compares two trees for structural equality and returns true
@@ -144,12 +143,12 @@ public:
while (LItr != LEnd && RItr != REnd) {
if (*LItr == *RItr) {
- LItr.SkipSubTree();
- RItr.SkipSubTree();
+ LItr.skipSubTree();
+ RItr.skipSubTree();
continue;
}
- if (!LItr->ElementEqual(*RItr))
+ if (!LItr->isElementEqual(*RItr))
return false;
++LItr;
@@ -173,22 +172,24 @@ public:
/// Nodes are visited using an inorder traversal.
template <typename Callback>
void foreach(Callback& C) {
- if (ImutAVLTree* L = getLeft()) L->foreach(C);
+ if (ImutAVLTree* L = getLeft())
+ L->foreach(C);
- C(Value);
+ C(value);
- if (ImutAVLTree* R = getRight()) R->foreach(C);
+ if (ImutAVLTree* R = getRight())
+ R->foreach(C);
}
- /// verify - A utility method that checks that the balancing and
+ /// validateTree - A utility method that checks that the balancing and
/// ordering invariants of the tree are satisifed. It is a recursive
/// method that returns the height of the tree, which is then consumed
- /// by the enclosing verify call. External callers should ignore the
+ /// by the enclosing validateTree call. External callers should ignore the
/// return value. An invalid tree will cause an assertion to fire in
/// a debug build.
- unsigned verify() const {
- unsigned HL = getLeft() ? getLeft()->verify() : 0;
- unsigned HR = getRight() ? getRight()->verify() : 0;
+ unsigned validateTree() const {
+ unsigned HL = getLeft() ? getLeft()->validateTree() : 0;
+ unsigned HR = getRight() ? getRight()->validateTree() : 0;
(void) HL;
(void) HR;
@@ -198,37 +199,39 @@ public:
assert((HL > HR ? HL-HR : HR-HL) <= 2
&& "Balancing invariant violated");
- assert(!getLeft()
- || ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
- ImutInfo::KeyOfValue(getValue()))
- && "Value in left child is not less that current value");
+ assert((!getLeft() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
+ ImutInfo::KeyOfValue(getValue()))) &&
+ "Value in left child is not less that current value");
- assert(!getRight()
- || ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
- ImutInfo::KeyOfValue(getRight()->getValue()))
- && "Current value is not less that value of right child");
+ assert(!(getRight() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
+ ImutInfo::KeyOfValue(getRight()->getValue()))) &&
+ "Current value is not less that value of right child");
return getHeight();
}
- /// Profile - Profiling for ImutAVLTree.
- void Profile(llvm::FoldingSetNodeID& ID) {
- ID.AddInteger(ComputeDigest());
- }
-
//===----------------------------------------------------===//
- // Internal Values.
+ // Internal values.
//===----------------------------------------------------===//
private:
- ImutAVLTree* Left;
- ImutAVLTree* Right;
- unsigned Height : 28;
- unsigned Mutable : 1;
- unsigned CachedDigest : 1;
- value_type Value;
- uint32_t Digest;
+ Factory *factory;
+ ImutAVLTree *left;
+ ImutAVLTree *right;
+ ImutAVLTree *prev;
+ ImutAVLTree *next;
+
+ unsigned height : 28;
+ unsigned IsMutable : 1;
+ unsigned IsDigestCached : 1;
+ unsigned IsCanonicalized : 1;
+
+ value_type value;
+ uint32_t digest;
+ uint32_t refCount;
//===----------------------------------------------------===//
// Internal methods (node manipulation; used by Factory).
@@ -237,10 +240,15 @@ private:
private:
/// ImutAVLTree - Internal constructor that is only called by
/// ImutAVLFactory.
- ImutAVLTree(ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
+ ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
unsigned height)
- : Left(l), Right(r), Height(height), Mutable(true), CachedDigest(false),
- Value(v), Digest(0) {}
+ : factory(f), left(l), right(r), prev(0), next(0), height(height),
+ IsMutable(true), IsDigestCached(false), IsCanonicalized(0),
+ value(v), digest(0), refCount(0)
+ {
+ if (left) left->retain();
+ if (right) right->retain();
+ }
/// isMutable - Returns true if the left and right subtree references
/// (as well as height) can be changed. If this method returns false,
@@ -248,11 +256,11 @@ private:
/// object should always have this method return true. Further, if this
/// method returns false for an instance of ImutAVLTree, all subtrees
/// will also have this method return false. The converse is not true.
- bool isMutable() const { return Mutable; }
+ bool isMutable() const { return IsMutable; }
/// hasCachedDigest - Returns true if the digest for this tree is cached.
/// This can only be true if the tree is immutable.
- bool hasCachedDigest() const { return CachedDigest; }
+ bool hasCachedDigest() const { return IsDigestCached; }
//===----------------------------------------------------===//
// Mutating operations. A tree root can be manipulated as
@@ -265,51 +273,32 @@ private:
// immutable.
//===----------------------------------------------------===//
- /// MarkImmutable - Clears the mutable flag for a tree. After this happens,
+ /// markImmutable - Clears the mutable flag for a tree. After this happens,
/// it is an error to call setLeft(), setRight(), and setHeight().
- void MarkImmutable() {
+ void markImmutable() {
assert(isMutable() && "Mutable flag already removed.");
- Mutable = false;
+ IsMutable = false;
}
- /// MarkedCachedDigest - Clears the NoCachedDigest flag for a tree.
- void MarkedCachedDigest() {
+ /// markedCachedDigest - Clears the NoCachedDigest flag for a tree.
+ void markedCachedDigest() {
assert(!hasCachedDigest() && "NoCachedDigest flag already removed.");
- CachedDigest = true;
- }
-
- /// setLeft - Changes the reference of the left subtree. Used internally
- /// by ImutAVLFactory.
- void setLeft(ImutAVLTree* NewLeft) {
- assert(isMutable() &&
- "Only a mutable tree can have its left subtree changed.");
- Left = NewLeft;
- CachedDigest = false;
- }
-
- /// setRight - Changes the reference of the right subtree. Used internally
- /// by ImutAVLFactory.
- void setRight(ImutAVLTree* NewRight) {
- assert(isMutable() &&
- "Only a mutable tree can have its right subtree changed.");
-
- Right = NewRight;
- CachedDigest = false;
+ IsDigestCached = true;
}
/// setHeight - Changes the height of the tree. Used internally by
/// ImutAVLFactory.
void setHeight(unsigned h) {
assert(isMutable() && "Only a mutable tree can have its height changed.");
- Height = h;
+ height = h;
}
static inline
- uint32_t ComputeDigest(ImutAVLTree* L, ImutAVLTree* R, value_type_ref V) {
+ uint32_t computeDigest(ImutAVLTree* L, ImutAVLTree* R, value_type_ref V) {
uint32_t digest = 0;
if (L)
- digest += L->ComputeDigest();
+ digest += L->computeDigest();
// Compute digest of stored data.
FoldingSetNodeID ID;
@@ -317,22 +306,54 @@ private:
digest += ID.ComputeHash();
if (R)
- digest += R->ComputeDigest();
+ digest += R->computeDigest();
return digest;
}
- inline uint32_t ComputeDigest() {
+ inline uint32_t computeDigest() {
// Check the lowest bit to determine if digest has actually been
// pre-computed.
if (hasCachedDigest())
- return Digest;
+ return digest;
- uint32_t X = ComputeDigest(getLeft(), getRight(), getValue());
- Digest = X;
- MarkedCachedDigest();
+ uint32_t X = computeDigest(getLeft(), getRight(), getValue());
+ digest = X;
+ markedCachedDigest();
return X;
}
+
+ //===----------------------------------------------------===//
+ // Reference count operations.
+ //===----------------------------------------------------===//
+
+public:
+ void retain() { ++refCount; }
+ void release() {
+ assert(refCount > 0);
+ if (--refCount == 0)
+ destroy();
+ }
+ void destroy() {
+ if (left)
+ left->release();
+ if (right)
+ right->release();
+ if (IsCanonicalized) {
+ if (next)
+ next->prev = prev;
+
+ if (prev)
+ prev->next = next;
+ else
+ factory->Cache[computeDigest()] = next;
+ }
+
+ // We need to clear the mutability bit in case we are
+ // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes().
+ IsMutable = false;
+ factory->freeNodes.push_back(this);
+ }
};
//===----------------------------------------------------------------------===//
@@ -341,14 +362,17 @@ private:
template <typename ImutInfo >
class ImutAVLFactory {
+ friend class ImutAVLTree<ImutInfo>;
typedef ImutAVLTree<ImutInfo> TreeTy;
typedef typename TreeTy::value_type_ref value_type_ref;
typedef typename TreeTy::key_type_ref key_type_ref;
- typedef FoldingSet<TreeTy> CacheTy;
+ typedef DenseMap<unsigned, TreeTy*> CacheTy;
CacheTy Cache;
uintptr_t Allocator;
+ std::vector<TreeTy*> createdNodes;
+ std::vector<TreeTy*> freeNodes;
bool ownsAllocator() const {
return Allocator & 0x1 ? false : true;
@@ -373,55 +397,56 @@ public:
if (ownsAllocator()) delete &getAllocator();
}
- TreeTy* Add(TreeTy* T, value_type_ref V) {
- T = Add_internal(V,T);
- MarkImmutable(T);
+ TreeTy* add(TreeTy* T, value_type_ref V) {
+ T = add_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
return T;
}
- TreeTy* Remove(TreeTy* T, key_type_ref V) {
- T = Remove_internal(V,T);
- MarkImmutable(T);
+ TreeTy* remove(TreeTy* T, key_type_ref V) {
+ T = remove_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
return T;
}
- TreeTy* GetEmptyTree() const { return NULL; }
+ TreeTy* getEmptyTree() const { return NULL; }
+protected:
+
//===--------------------------------------------------===//
// A bunch of quick helper functions used for reasoning
// about the properties of trees and their children.
// These have succinct names so that the balancing code
// is as terse (and readable) as possible.
//===--------------------------------------------------===//
-protected:
- bool isEmpty(TreeTy* T) const { return !T; }
- unsigned Height(TreeTy* T) const { return T ? T->getHeight() : 0; }
- TreeTy* Left(TreeTy* T) const { return T->getLeft(); }
- TreeTy* Right(TreeTy* T) const { return T->getRight(); }
- value_type_ref Value(TreeTy* T) const { return T->Value; }
+ bool isEmpty(TreeTy* T) const { return !T; }
+ unsigned getHeight(TreeTy* T) const { return T ? T->getHeight() : 0; }
+ TreeTy* getLeft(TreeTy* T) const { return T->getLeft(); }
+ TreeTy* getRight(TreeTy* T) const { return T->getRight(); }
+ value_type_ref getValue(TreeTy* T) const { return T->value; }
- unsigned IncrementHeight(TreeTy* L, TreeTy* R) const {
- unsigned hl = Height(L);
- unsigned hr = Height(R);
+ unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
return (hl > hr ? hl : hr) + 1;
}
- static bool CompareTreeWithSection(TreeTy* T,
+ static bool compareTreeWithSection(TreeTy* T,
typename TreeTy::iterator& TI,
typename TreeTy::iterator& TE) {
-
typename TreeTy::iterator I = T->begin(), E = T->end();
-
- for ( ; I!=E ; ++I, ++TI)
- if (TI == TE || !I->ElementEqual(*TI))
+ for ( ; I!=E ; ++I, ++TI) {
+ if (TI == TE || !I->isElementEqual(*TI))
return false;
-
+ }
return true;
}
//===--------------------------------------------------===//
- // "CreateNode" is used to generate new tree roots that link
+ // "createNode" is used to generate new tree roots that link
// to other trees. The functon may also simply move links
// in an existing root if that root is still marked mutable.
// This is necessary because otherwise our balancing code
@@ -430,181 +455,188 @@ protected:
// returned to the caller.
//===--------------------------------------------------===//
- TreeTy* CreateNode(TreeTy* L, value_type_ref V, TreeTy* R) {
+ TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
BumpPtrAllocator& A = getAllocator();
- TreeTy* T = (TreeTy*) A.Allocate<TreeTy>();
- new (T) TreeTy(L, R, V, IncrementHeight(L,R));
+ TreeTy* T;
+ if (!freeNodes.empty()) {
+ T = freeNodes.back();
+ freeNodes.pop_back();
+ assert(T != L);
+ assert(T != R);
+ }
+ else {
+ T = (TreeTy*) A.Allocate<TreeTy>();
+ }
+ new (T) TreeTy(this, L, R, V, incrementHeight(L,R));
+ createdNodes.push_back(T);
return T;
}
- TreeTy* CreateNode(TreeTy* L, TreeTy* OldTree, TreeTy* R) {
- assert(!isEmpty(OldTree));
+ TreeTy* createNode(TreeTy* newLeft, TreeTy* oldTree, TreeTy* newRight) {
+ return createNode(newLeft, getValue(oldTree), newRight);
+ }
- if (OldTree->isMutable()) {
- OldTree->setLeft(L);
- OldTree->setRight(R);
- OldTree->setHeight(IncrementHeight(L, R));
- return OldTree;
+ void recoverNodes() {
+ for (unsigned i = 0, n = createdNodes.size(); i < n; ++i) {
+ TreeTy *N = createdNodes[i];
+ if (N->isMutable() && N->refCount == 0)
+ N->destroy();
}
- else
- return CreateNode(L, Value(OldTree), R);
+ createdNodes.clear();
}
- /// Balance - Used by Add_internal and Remove_internal to
+ /// balanceTree - Used by add_internal and remove_internal to
/// balance a newly created tree.
- TreeTy* Balance(TreeTy* L, value_type_ref V, TreeTy* R) {
-
- unsigned hl = Height(L);
- unsigned hr = Height(R);
+ TreeTy* balanceTree(TreeTy* L, value_type_ref V, TreeTy* R) {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
if (hl > hr + 2) {
assert(!isEmpty(L) && "Left tree cannot be empty to have a height >= 2");
- TreeTy* LL = Left(L);
- TreeTy* LR = Right(L);
+ TreeTy *LL = getLeft(L);
+ TreeTy *LR = getRight(L);
- if (Height(LL) >= Height(LR))
- return CreateNode(LL, L, CreateNode(LR,V,R));
+ if (getHeight(LL) >= getHeight(LR))
+ return createNode(LL, L, createNode(LR,V,R));
assert(!isEmpty(LR) && "LR cannot be empty because it has a height >= 1");
- TreeTy* LRL = Left(LR);
- TreeTy* LRR = Right(LR);
+ TreeTy *LRL = getLeft(LR);
+ TreeTy *LRR = getRight(LR);
- return CreateNode(CreateNode(LL,L,LRL), LR, CreateNode(LRR,V,R));
+ return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R));
}
else if (hr > hl + 2) {
assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2");
- TreeTy* RL = Left(R);
- TreeTy* RR = Right(R);
+ TreeTy *RL = getLeft(R);
+ TreeTy *RR = getRight(R);
- if (Height(RR) >= Height(RL))
- return CreateNode(CreateNode(L,V,RL), R, RR);
+ if (getHeight(RR) >= getHeight(RL))
+ return createNode(createNode(L,V,RL), R, RR);
assert(!isEmpty(RL) && "RL cannot be empty because it has a height >= 1");
- TreeTy* RLL = Left(RL);
- TreeTy* RLR = Right(RL);
+ TreeTy *RLL = getLeft(RL);
+ TreeTy *RLR = getRight(RL);
- return CreateNode(CreateNode(L,V,RLL), RL, CreateNode(RLR,R,RR));
+ return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR));
}
else
- return CreateNode(L,V,R);
+ return createNode(L,V,R);
}
- /// Add_internal - Creates a new tree that includes the specified
+ /// add_internal - Creates a new tree that includes the specified
/// data and the data from the original tree. If the original tree
/// already contained the data item, the original tree is returned.
- TreeTy* Add_internal(value_type_ref V, TreeTy* T) {
+ TreeTy* add_internal(value_type_ref V, TreeTy* T) {
if (isEmpty(T))
- return CreateNode(T, V, T);
-
+ return createNode(T, V, T);
assert(!T->isMutable());
key_type_ref K = ImutInfo::KeyOfValue(V);
- key_type_ref KCurrent = ImutInfo::KeyOfValue(Value(T));
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
if (ImutInfo::isEqual(K,KCurrent))
- return CreateNode(Left(T), V, Right(T));
+ return createNode(getLeft(T), V, getRight(T));
else if (ImutInfo::isLess(K,KCurrent))
- return Balance(Add_internal(V,Left(T)), Value(T), Right(T));
+ return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T));
else
- return Balance(Left(T), Value(T), Add_internal(V,Right(T)));
+ return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T)));
}
- /// Remove_internal - Creates a new tree that includes all the data
+ /// remove_internal - Creates a new tree that includes all the data
/// from the original tree except the specified data. If the
/// specified data did not exist in the original tree, the original
/// tree is returned.
- TreeTy* Remove_internal(key_type_ref K, TreeTy* T) {
+ TreeTy* remove_internal(key_type_ref K, TreeTy* T) {
if (isEmpty(T))
return T;
assert(!T->isMutable());
- key_type_ref KCurrent = ImutInfo::KeyOfValue(Value(T));
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
- if (ImutInfo::isEqual(K,KCurrent))
- return CombineLeftRightTrees(Left(T),Right(T));
- else if (ImutInfo::isLess(K,KCurrent))
- return Balance(Remove_internal(K,Left(T)), Value(T), Right(T));
- else
- return Balance(Left(T), Value(T), Remove_internal(K,Right(T)));
+ if (ImutInfo::isEqual(K,KCurrent)) {
+ return combineTrees(getLeft(T), getRight(T));
+ } else if (ImutInfo::isLess(K,KCurrent)) {
+ return balanceTree(remove_internal(K, getLeft(T)),
+ getValue(T), getRight(T));
+ } else {
+ return balanceTree(getLeft(T), getValue(T),
+ remove_internal(K, getRight(T)));
+ }
}
- TreeTy* CombineLeftRightTrees(TreeTy* L, TreeTy* R) {
- if (isEmpty(L)) return R;
- if (isEmpty(R)) return L;
-
+ TreeTy* combineTrees(TreeTy* L, TreeTy* R) {
+ if (isEmpty(L))
+ return R;
+ if (isEmpty(R))
+ return L;
TreeTy* OldNode;
- TreeTy* NewRight = RemoveMinBinding(R,OldNode);
- return Balance(L,Value(OldNode),NewRight);
+ TreeTy* newRight = removeMinBinding(R,OldNode);
+ return balanceTree(L, getValue(OldNode), newRight);
}
- TreeTy* RemoveMinBinding(TreeTy* T, TreeTy*& NodeRemoved) {
+ TreeTy* removeMinBinding(TreeTy* T, TreeTy*& Noderemoved) {
assert(!isEmpty(T));
-
- if (isEmpty(Left(T))) {
- NodeRemoved = T;
- return Right(T);
+ if (isEmpty(getLeft(T))) {
+ Noderemoved = T;
+ return getRight(T);
}
-
- return Balance(RemoveMinBinding(Left(T),NodeRemoved),Value(T),Right(T));
+ return balanceTree(removeMinBinding(getLeft(T), Noderemoved),
+ getValue(T), getRight(T));
}
- /// MarkImmutable - Clears the mutable bits of a root and all of its
+ /// markImmutable - Clears the mutable bits of a root and all of its
/// descendants.
- void MarkImmutable(TreeTy* T) {
+ void markImmutable(TreeTy* T) {
if (!T || !T->isMutable())
return;
-
- T->MarkImmutable();
- MarkImmutable(Left(T));
- MarkImmutable(Right(T));
+ T->markImmutable();
+ markImmutable(getLeft(T));
+ markImmutable(getRight(T));
}
public:
- TreeTy *GetCanonicalTree(TreeTy *TNew) {
+ TreeTy *getCanonicalTree(TreeTy *TNew) {
if (!TNew)
- return NULL;
-
- // Search the FoldingSet bucket for a Tree with the same digest.
- FoldingSetNodeID ID;
- unsigned digest = TNew->ComputeDigest();
- ID.AddInteger(digest);
- unsigned hash = ID.ComputeHash();
-
- typename CacheTy::bucket_iterator I = Cache.bucket_begin(hash);
- typename CacheTy::bucket_iterator E = Cache.bucket_end(hash);
-
- for (; I != E; ++I) {
- TreeTy *T = &*I;
-
- if (T->ComputeDigest() != digest)
- continue;
-
- // We found a collision. Perform a comparison of Contents('T')
- // with Contents('TNew')
- typename TreeTy::iterator TI = T->begin(), TE = T->end();
-
- if (!CompareTreeWithSection(TNew, TI, TE))
- continue;
-
- if (TI != TE)
- continue; // T has more contents than TNew.
-
- // Trees did match! Return 'T'.
- return T;
+ return 0;
+
+ if (TNew->IsCanonicalized)
+ return TNew;
+
+ // Search the hashtable for another tree with the same digest, and
+ // if find a collision compare those trees by their contents.
+ unsigned digest = TNew->computeDigest();
+ TreeTy *&entry = Cache[digest];
+ do {
+ if (!entry)
+ break;
+ for (TreeTy *T = entry ; T != 0; T = T->next) {
+ // Compare the Contents('T') with Contents('TNew')
+ typename TreeTy::iterator TI = T->begin(), TE = T->end();
+ if (!compareTreeWithSection(TNew, TI, TE))
+ continue;
+ if (TI != TE)
+ continue; // T has more contents than TNew.
+ // Trees did match! Return 'T'.
+ if (TNew->refCount == 0)
+ TNew->destroy();
+ return T;
+ }
+ entry->prev = TNew;
+ TNew->next = entry;
}
+ while (false);
- // 'TNew' is the only tree of its kind. Return it.
- Cache.InsertNode(TNew, (void*) &*Cache.bucket_end(hash));
+ entry = TNew;
+ TNew->IsCanonicalized = true;
return TNew;
}
};
-
//===----------------------------------------------------------------------===//
// Immutable AVL-Tree Iterators.
//===----------------------------------------------------------------------===//
@@ -635,19 +667,17 @@ public:
}
- bool AtEnd() const { return stack.empty(); }
+ bool atEnd() const { return stack.empty(); }
- bool AtBeginning() const {
+ bool atBeginning() const {
return stack.size() == 1 && getVisitState() == VisitedNone;
}
- void SkipToParent() {
+ void skipToParent() {
assert(!stack.empty());
stack.pop_back();
-
if (stack.empty())
return;
-
switch (getVisitState()) {
case VisitedNone:
stack.back() |= VisitedLeft;
@@ -663,11 +693,9 @@ public:
inline bool operator==(const _Self& x) const {
if (stack.size() != x.stack.size())
return false;
-
for (unsigned i = 0 ; i < stack.size(); i++)
if (stack[i] != x.stack[i])
return false;
-
return true;
}
@@ -675,70 +703,52 @@ public:
_Self& operator++() {
assert(!stack.empty());
-
TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
assert(Current);
-
switch (getVisitState()) {
case VisitedNone:
if (TreeTy* L = Current->getLeft())
stack.push_back(reinterpret_cast<uintptr_t>(L));
else
stack.back() |= VisitedLeft;
-
break;
-
case VisitedLeft:
if (TreeTy* R = Current->getRight())
stack.push_back(reinterpret_cast<uintptr_t>(R));
else
stack.back() |= VisitedRight;
-
break;
-
case VisitedRight:
- SkipToParent();
+ skipToParent();
break;
-
default:
assert(false && "Unreachable.");
}
-
return *this;
}
_Self& operator--() {
assert(!stack.empty());
-
TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
assert(Current);
-
switch (getVisitState()) {
case VisitedNone:
stack.pop_back();
break;
-
case VisitedLeft:
stack.back() &= ~Flags; // Set state to "VisitedNone."
-
if (TreeTy* L = Current->getLeft())
stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight);
-
break;
-
case VisitedRight:
stack.back() &= ~Flags;
stack.back() |= VisitedLeft;
-
if (TreeTy* R = Current->getRight())
stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight);
-
break;
-
default:
assert(false && "Unreachable.");
}
-
return *this;
}
};
@@ -769,7 +779,7 @@ public:
inline _Self& operator++() {
do ++InternalItr;
- while (!InternalItr.AtEnd() &&
+ while (!InternalItr.atEnd() &&
InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
return *this;
@@ -777,16 +787,16 @@ public:
inline _Self& operator--() {
do --InternalItr;
- while (!InternalItr.AtBeginning() &&
+ while (!InternalItr.atBeginning() &&
InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
return *this;
}
- inline void SkipSubTree() {
- InternalItr.SkipToParent();
+ inline void skipSubTree() {
+ InternalItr.skipToParent();
- while (!InternalItr.AtEnd() &&
+ while (!InternalItr.atEnd() &&
InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft)
++InternalItr;
}
@@ -927,7 +937,23 @@ public:
/// should use a Factory object to create sets instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
- explicit ImmutableSet(TreeTy* R) : Root(R) {}
+ explicit ImmutableSet(TreeTy* R) : Root(R) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableSet(const ImmutableSet &X) : Root(X.Root) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableSet &operator=(const ImmutableSet &X) {
+ if (Root != X.Root) {
+ if (X.Root) { X.Root->retain(); }
+ if (Root) { Root->release(); }
+ Root = X.Root;
+ }
+ return *this;
+ }
+ ~ImmutableSet() {
+ if (Root) { Root->release(); }
+ }
class Factory {
typename TreeTy::Factory F;
@@ -940,33 +966,33 @@ public:
Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
: F(Alloc), Canonicalize(canonicalize) {}
- /// GetEmptySet - Returns an immutable set that contains no elements.
- ImmutableSet GetEmptySet() {
- return ImmutableSet(F.GetEmptyTree());
+ /// getEmptySet - Returns an immutable set that contains no elements.
+ ImmutableSet getEmptySet() {
+ return ImmutableSet(F.getEmptyTree());
}
- /// Add - Creates a new immutable set that contains all of the values
+ /// add - Creates a new immutable set that contains all of the values
/// of the original set with the addition of the specified value. If
/// the original set already included the value, then the original set is
/// returned and no memory is allocated. The time and space complexity
/// of this operation is logarithmic in the size of the original set.
/// The memory allocated to represent the set is released when the
/// factory object that created the set is destroyed.
- ImmutableSet Add(ImmutableSet Old, value_type_ref V) {
- TreeTy *NewT = F.Add(Old.Root, V);
- return ImmutableSet(Canonicalize ? F.GetCanonicalTree(NewT) : NewT);
+ ImmutableSet add(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.add(Old.Root, V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
}
- /// Remove - Creates a new immutable set that contains all of the values
+ /// remove - Creates a new immutable set that contains all of the values
/// of the original set with the exception of the specified value. If
/// the original set did not contain the value, the original set is
/// returned and no memory is allocated. The time and space complexity
/// of this operation is logarithmic in the size of the original set.
/// The memory allocated to represent the set is released when the
/// factory object that created the set is destroyed.
- ImmutableSet Remove(ImmutableSet Old, value_type_ref V) {
- TreeTy *NewT = F.Remove(Old.Root, V);
- return ImmutableSet(Canonicalize ? F.GetCanonicalTree(NewT) : NewT);
+ ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.remove(Old.Root, V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
}
BumpPtrAllocator& getAllocator() { return F.getAllocator(); }
@@ -978,20 +1004,21 @@ public:
friend class Factory;
- /// contains - Returns true if the set contains the specified value.
+ /// Returns true if the set contains the specified value.
bool contains(value_type_ref V) const {
return Root ? Root->contains(V) : false;
}
- bool operator==(ImmutableSet RHS) const {
+ bool operator==(const ImmutableSet &RHS) const {
return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
}
- bool operator!=(ImmutableSet RHS) const {
+ bool operator!=(const ImmutableSet &RHS) const {
return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
}
TreeTy *getRoot() {
+ if (Root) { Root->retain(); }
return Root;
}
@@ -1049,7 +1076,7 @@ public:
// For testing.
//===--------------------------------------------------===//
- void verify() const { if (Root) Root->verify(); }
+ void validateTree() const { if (Root) Root->validateTree(); }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/ADT/InMemoryStruct.h b/contrib/llvm/include/llvm/ADT/InMemoryStruct.h
new file mode 100644
index 0000000..a560845
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/InMemoryStruct.h
@@ -0,0 +1,77 @@
+//===- InMemoryStruct.h - Indirect Struct Access Smart Pointer --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INMEMORYSTRUCT_H
+#define LLVM_ADT_INMEMORYSTRUCT_H
+
+#include <cassert>
+
+namespace llvm {
+
+/// \brief Helper object for abstracting access to an in-memory structure which
+/// may require some kind of temporary storage.
+///
+/// This class is designed to be used for accessing file data structures which
+/// in the common case can be accessed from a direct pointer to a memory mapped
+/// object, but which in some cases may require indirect access to a temporary
+/// structure (which, for example, may have undergone endianness translation).
+template<typename T>
+class InMemoryStruct {
+ typedef T value_type;
+ typedef value_type &reference;
+ typedef value_type *pointer;
+ typedef const value_type &const_reference;
+ typedef const value_type *const_pointer;
+
+ /// \brief The smart pointer target.
+ value_type *Target;
+
+ /// \brief A temporary object which can be used as a target of the smart
+ /// pointer.
+ value_type Contents;
+
+private:
+
+public:
+ InMemoryStruct() : Target(0) {}
+ InMemoryStruct(reference Value) : Target(&Contents), Contents(Value) {}
+ InMemoryStruct(pointer Value) : Target(Value) {}
+ InMemoryStruct(const InMemoryStruct<T> &Value) { *this = Value; }
+
+ void operator=(const InMemoryStruct<T> &Value) {
+ if (Value.Target != &Value.Contents) {
+ Target = Value.Target;
+ } else {
+ Target = &Contents;
+ Contents = Value.Contents;
+ }
+ }
+
+ const_reference operator*() const {
+ assert(Target && "Cannot dereference null pointer");
+ return *Target;
+ }
+ reference operator*() {
+ assert(Target && "Cannot dereference null pointer");
+ return *Target;
+ }
+
+ const_pointer operator->() const {
+ return Target;
+ }
+ pointer operator->() {
+ return Target;
+ }
+
+ operator bool() const { return Target != 0; }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/IndexedMap.h b/contrib/llvm/include/llvm/ADT/IndexedMap.h
index 89f0dfa..87126ea 100644
--- a/contrib/llvm/include/llvm/ADT/IndexedMap.h
+++ b/contrib/llvm/include/llvm/ADT/IndexedMap.h
@@ -55,6 +55,14 @@ namespace llvm {
return storage_[toIndex_(n)];
}
+ void reserve(typename StorageT::size_type s) {
+ storage_.reserve(s);
+ }
+
+ void resize(typename StorageT::size_type s) {
+ storage_.resize(s, nullVal_);
+ }
+
void clear() {
storage_.clear();
}
@@ -62,7 +70,11 @@ namespace llvm {
void grow(IndexT n) {
unsigned NewSize = toIndex_(n) + 1;
if (NewSize > storage_.size())
- storage_.resize(NewSize, nullVal_);
+ resize(NewSize);
+ }
+
+ bool inBounds(IndexT n) const {
+ return toIndex_(n) < storage_.size();
}
typename StorageT::size_type size() const {
diff --git a/contrib/llvm/include/llvm/ADT/IntEqClasses.h b/contrib/llvm/include/llvm/ADT/IntEqClasses.h
new file mode 100644
index 0000000..8e75c48
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/IntEqClasses.h
@@ -0,0 +1,88 @@
+//===-- llvm/ADT/IntEqClasses.h - Equiv. Classes of Integers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Equivalence classes for small integers. This is a mapping of the integers
+// 0 .. N-1 into M equivalence classes numbered 0 .. M-1.
+//
+// Initially each integer has its own equivalence class. Classes are joined by
+// passing a representative member of each class to join().
+//
+// Once the classes are built, compress() will number them 0 .. M-1 and prevent
+// further changes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTEQCLASSES_H
+#define LLVM_ADT_INTEQCLASSES_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class IntEqClasses {
+ /// EC - When uncompressed, map each integer to a smaller member of its
+ /// equivalence class. The class leader is the smallest member and maps to
+ /// itself.
+ ///
+ /// When compressed, EC[i] is the equivalence class of i.
+ SmallVector<unsigned, 8> EC;
+
+ /// NumClasses - The number of equivalence classes when compressed, or 0 when
+ /// uncompressed.
+ unsigned NumClasses;
+
+public:
+ /// IntEqClasses - Create an equivalence class mapping for 0 .. N-1.
+ IntEqClasses(unsigned N = 0) : NumClasses(0) { grow(N); }
+
+ /// grow - Increase capacity to hold 0 .. N-1, putting new integers in unique
+ /// equivalence classes.
+ /// This requires an uncompressed map.
+ void grow(unsigned N);
+
+ /// clear - Clear all classes so that grow() will assign a unique class to
+ /// every integer.
+ void clear() {
+ EC.clear();
+ NumClasses = 0;
+ }
+
+ /// join - Join the equivalence classes of a and b. After joining classes,
+ /// findLeader(a) == findLeader(b).
+ /// This requires an uncompressed map.
+ void join(unsigned a, unsigned b);
+
+ /// findLeader - Compute the leader of a's equivalence class. This is the
+ /// smallest member of the class.
+ /// This requires an uncompressed map.
+ unsigned findLeader(unsigned a) const;
+
+ /// compress - Compress equivalence classes by numbering them 0 .. M.
+ /// This makes the equivalence class map immutable.
+ void compress();
+
+ /// getNumClasses - Return the number of equivalence classes after compress()
+ /// was called.
+ unsigned getNumClasses() const { return NumClasses; }
+
+ /// operator[] - Return a's equivalence class number, 0 .. getNumClasses()-1.
+ /// This requires a compressed map.
+ unsigned operator[](unsigned a) const {
+ assert(NumClasses && "operator[] called before compress()");
+ return EC[a];
+ }
+
+ /// uncompress - Change back to the uncompressed representation that allows
+ /// editing.
+ void uncompress();
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/IntervalMap.h b/contrib/llvm/include/llvm/ADT/IntervalMap.h
new file mode 100644
index 0000000..79f24d3
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/IntervalMap.h
@@ -0,0 +1,2139 @@
+//===- llvm/ADT/IntervalMap.h - A sorted interval map -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a coalescing interval map for small objects.
+//
+// KeyT objects are mapped to ValT objects. Intervals of keys that map to the
+// same value are represented in a compressed form.
+//
+// Iterators provide ordered access to the compressed intervals rather than the
+// individual keys, and insert and erase operations use key intervals as well.
+//
+// Like SmallVector, IntervalMap will store the first N intervals in the map
+// object itself without any allocations. When space is exhausted it switches to
+// a B+-tree representation with very small overhead for small key and value
+// objects.
+//
+// A Traits class specifies how keys are compared. It also allows IntervalMap to
+// work with both closed and half-open intervals.
+//
+// Keys and values are not stored next to each other in a std::pair, so we don't
+// provide such a value_type. Dereferencing iterators only returns the mapped
+// value. The interval bounds are accessible through the start() and stop()
+// iterator methods.
+//
+// IntervalMap is optimized for small key and value objects, 4 or 8 bytes each
+// is the optimal size. For large objects use std::map instead.
+//
+//===----------------------------------------------------------------------===//
+//
+// Synopsis:
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap {
+// public:
+// typedef KeyT key_type;
+// typedef ValT mapped_type;
+// typedef RecyclingAllocator<...> Allocator;
+// class iterator;
+// class const_iterator;
+//
+// explicit IntervalMap(Allocator&);
+// ~IntervalMap():
+//
+// bool empty() const;
+// KeyT start() const;
+// KeyT stop() const;
+// ValT lookup(KeyT x, Value NotFound = Value()) const;
+//
+// const_iterator begin() const;
+// const_iterator end() const;
+// iterator begin();
+// iterator end();
+// const_iterator find(KeyT x) const;
+// iterator find(KeyT x);
+//
+// void insert(KeyT a, KeyT b, ValT y);
+// void clear();
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::const_iterator :
+// public std::iterator<std::bidirectional_iterator_tag, ValT> {
+// public:
+// bool operator==(const const_iterator &) const;
+// bool operator!=(const const_iterator &) const;
+// bool valid() const;
+//
+// const KeyT &start() const;
+// const KeyT &stop() const;
+// const ValT &value() const;
+// const ValT &operator*() const;
+// const ValT *operator->() const;
+//
+// const_iterator &operator++();
+// const_iterator &operator++(int);
+// const_iterator &operator--();
+// const_iterator &operator--(int);
+// void goToBegin();
+// void goToEnd();
+// void find(KeyT x);
+// void advanceTo(KeyT x);
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::iterator : public const_iterator {
+// public:
+// void insert(KeyT a, KeyT b, Value y);
+// void erase();
+// };
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTERVALMAP_H
+#define LLVM_ADT_INTERVALMAP_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include <iterator>
+
+namespace llvm {
+
+
+//===----------------------------------------------------------------------===//
+//--- Key traits ---//
+//===----------------------------------------------------------------------===//
+//
+// The IntervalMap works with closed or half-open intervals.
+// Adjacent intervals that map to the same value are coalesced.
+//
+// The IntervalMapInfo traits class is used to determine if a key is contained
+// in an interval, and if two intervals are adjacent so they can be coalesced.
+// The provided implementation works for closed integer intervals, other keys
+// probably need a specialized version.
+//
+// The point x is contained in [a;b] when !startLess(x, a) && !stopLess(b, x).
+//
+// It is assumed that (a;b] half-open intervals are not used, only [a;b) is
+// allowed. This is so that stopLess(a, b) can be used to determine if two
+// intervals overlap.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T>
+struct IntervalMapInfo {
+
+ /// startLess - Return true if x is not in [a;b].
+ /// This is x < a both for closed intervals and for [a;b) half-open intervals.
+ static inline bool startLess(const T &x, const T &a) {
+ return x < a;
+ }
+
+ /// stopLess - Return true if x is not in [a;b].
+ /// This is b < x for a closed interval, b <= x for [a;b) half-open intervals.
+ static inline bool stopLess(const T &b, const T &x) {
+ return b < x;
+ }
+
+ /// adjacent - Return true when the intervals [x;a] and [b;y] can coalesce.
+ /// This is a+1 == b for closed intervals, a == b for half-open intervals.
+ static inline bool adjacent(const T &a, const T &b) {
+ return a+1 == b;
+ }
+
+};
+
+/// IntervalMapImpl - Namespace used for IntervalMap implementation details.
+/// It should be considered private to the implementation.
+namespace IntervalMapImpl {
+
+// Forward declarations.
+template <typename, typename, unsigned, typename> class LeafNode;
+template <typename, typename, unsigned, typename> class BranchNode;
+
+typedef std::pair<unsigned,unsigned> IdxPair;
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeBase ---//
+//===----------------------------------------------------------------------===//
+//
+// Both leaf and branch nodes store vectors of pairs.
+// Leaves store ((KeyT, KeyT), ValT) pairs, branches use (NodeRef, KeyT).
+//
+// Keys and values are stored in separate arrays to avoid padding caused by
+// different object alignments. This also helps improve locality of reference
+// when searching the keys.
+//
+// The nodes don't know how many elements they contain - that information is
+// stored elsewhere. Omitting the size field prevents padding and allows a node
+// to fill the allocated cache lines completely.
+//
+// These are typical key and value sizes, the node branching factor (N), and
+// wasted space when nodes are sized to fit in three cache lines (192 bytes):
+//
+// T1 T2 N Waste Used by
+// 4 4 24 0 Branch<4> (32-bit pointers)
+// 8 4 16 0 Leaf<4,4>, Branch<4>
+// 8 8 12 0 Leaf<4,8>, Branch<8>
+// 16 4 9 12 Leaf<8,4>
+// 16 8 8 0 Leaf<8,8>
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T1, typename T2, unsigned N>
+class NodeBase {
+public:
+ enum { Capacity = N };
+
+ T1 first[N];
+ T2 second[N];
+
+ /// copy - Copy elements from another node.
+ /// @param Other Node elements are copied from.
+ /// @param i Beginning of the source range in other.
+ /// @param j Beginning of the destination range in this.
+ /// @param Count Number of elements to copy.
+ template <unsigned M>
+ void copy(const NodeBase<T1, T2, M> &Other, unsigned i,
+ unsigned j, unsigned Count) {
+ assert(i + Count <= M && "Invalid source range");
+ assert(j + Count <= N && "Invalid dest range");
+ for (unsigned e = i + Count; i != e; ++i, ++j) {
+ first[j] = Other.first[i];
+ second[j] = Other.second[i];
+ }
+ }
+
+ /// moveLeft - Move elements to the left.
+ /// @param i Beginning of the source range.
+ /// @param j Beginning of the destination range.
+ /// @param Count Number of elements to copy.
+ void moveLeft(unsigned i, unsigned j, unsigned Count) {
+ assert(j <= i && "Use moveRight shift elements right");
+ copy(*this, i, j, Count);
+ }
+
+ /// moveRight - Move elements to the right.
+ /// @param i Beginning of the source range.
+ /// @param j Beginning of the destination range.
+ /// @param Count Number of elements to copy.
+ void moveRight(unsigned i, unsigned j, unsigned Count) {
+ assert(i <= j && "Use moveLeft shift elements left");
+ assert(j + Count <= N && "Invalid range");
+ while (Count--) {
+ first[j + Count] = first[i + Count];
+ second[j + Count] = second[i + Count];
+ }
+ }
+
+ /// erase - Erase elements [i;j).
+ /// @param i Beginning of the range to erase.
+ /// @param j End of the range. (Exclusive).
+ /// @param Size Number of elements in node.
+ void erase(unsigned i, unsigned j, unsigned Size) {
+ moveLeft(j, i, Size - j);
+ }
+
+ /// erase - Erase element at i.
+ /// @param i Index of element to erase.
+ /// @param Size Number of elements in node.
+ void erase(unsigned i, unsigned Size) {
+ erase(i, i+1, Size);
+ }
+
+ /// shift - Shift elements [i;size) 1 position to the right.
+ /// @param i Beginning of the range to move.
+ /// @param Size Number of elements in node.
+ void shift(unsigned i, unsigned Size) {
+ moveRight(i, i + 1, Size - i);
+ }
+
+ /// transferToLeftSib - Transfer elements to a left sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Left sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Count Number of elements to transfer.
+ void transferToLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+ unsigned Count) {
+ Sib.copy(*this, 0, SSize, Count);
+ erase(0, Count, Size);
+ }
+
+ /// transferToRightSib - Transfer elements to a right sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Right sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Count Number of elements to transfer.
+ void transferToRightSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+ unsigned Count) {
+ Sib.moveRight(0, Count, SSize);
+ Sib.copy(*this, Size-Count, 0, Count);
+ }
+
+ /// adjustFromLeftSib - Adjust the number if elements in this node by moving
+ /// elements to or from a left sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Right sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Add The number of elements to add to this node, possibly < 0.
+ /// @return Number of elements added to this node, possibly negative.
+ int adjustFromLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize, int Add) {
+ if (Add > 0) {
+ // We want to grow, copy from sib.
+ unsigned Count = std::min(std::min(unsigned(Add), SSize), N - Size);
+ Sib.transferToRightSib(SSize, *this, Size, Count);
+ return Count;
+ } else {
+ // We want to shrink, copy to sib.
+ unsigned Count = std::min(std::min(unsigned(-Add), Size), N - SSize);
+ transferToLeftSib(Size, Sib, SSize, Count);
+ return -Count;
+ }
+ }
+};
+
+/// IntervalMapImpl::adjustSiblingSizes - Move elements between sibling nodes.
+/// @param Node Array of pointers to sibling nodes.
+/// @param Nodes Number of nodes.
+/// @param CurSize Array of current node sizes, will be overwritten.
+/// @param NewSize Array of desired node sizes.
+template <typename NodeT>
+void adjustSiblingSizes(NodeT *Node[], unsigned Nodes,
+ unsigned CurSize[], const unsigned NewSize[]) {
+ // Move elements right.
+ for (int n = Nodes - 1; n; --n) {
+ if (CurSize[n] == NewSize[n])
+ continue;
+ for (int m = n - 1; m != -1; --m) {
+ int d = Node[n]->adjustFromLeftSib(CurSize[n], *Node[m], CurSize[m],
+ NewSize[n] - CurSize[n]);
+ CurSize[m] -= d;
+ CurSize[n] += d;
+ // Keep going if the current node was exhausted.
+ if (CurSize[n] >= NewSize[n])
+ break;
+ }
+ }
+
+ if (Nodes == 0)
+ return;
+
+ // Move elements left.
+ for (unsigned n = 0; n != Nodes - 1; ++n) {
+ if (CurSize[n] == NewSize[n])
+ continue;
+ for (unsigned m = n + 1; m != Nodes; ++m) {
+ int d = Node[m]->adjustFromLeftSib(CurSize[m], *Node[n], CurSize[n],
+ CurSize[n] - NewSize[n]);
+ CurSize[m] += d;
+ CurSize[n] -= d;
+ // Keep going if the current node was exhausted.
+ if (CurSize[n] >= NewSize[n])
+ break;
+ }
+ }
+
+#ifndef NDEBUG
+ for (unsigned n = 0; n != Nodes; n++)
+ assert(CurSize[n] == NewSize[n] && "Insufficient element shuffle");
+#endif
+}
+
+/// IntervalMapImpl::distribute - Compute a new distribution of node elements
+/// after an overflow or underflow. Reserve space for a new element at Position,
+/// and compute the node that will hold Position after redistributing node
+/// elements.
+///
+/// It is required that
+///
+/// Elements == sum(CurSize), and
+/// Elements + Grow <= Nodes * Capacity.
+///
+/// NewSize[] will be filled in such that:
+///
+/// sum(NewSize) == Elements, and
+/// NewSize[i] <= Capacity.
+///
+/// The returned index is the node where Position will go, so:
+///
+/// sum(NewSize[0..idx-1]) <= Position
+/// sum(NewSize[0..idx]) >= Position
+///
+/// The last equality, sum(NewSize[0..idx]) == Position, can only happen when
+/// Grow is set and NewSize[idx] == Capacity-1. The index points to the node
+/// before the one holding the Position'th element where there is room for an
+/// insertion.
+///
+/// @param Nodes The number of nodes.
+/// @param Elements Total elements in all nodes.
+/// @param Capacity The capacity of each node.
+/// @param CurSize Array[Nodes] of current node sizes, or NULL.
+/// @param NewSize Array[Nodes] to receive the new node sizes.
+/// @param Position Insert position.
+/// @param Grow Reserve space for a new element at Position.
+/// @return (node, offset) for Position.
+IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity,
+ const unsigned *CurSize, unsigned NewSize[],
+ unsigned Position, bool Grow);
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeSizer ---//
+//===----------------------------------------------------------------------===//
+//
+// Compute node sizes from key and value types.
+//
+// The branching factors are chosen to make nodes fit in three cache lines.
+// This may not be possible if keys or values are very large. Such large objects
+// are handled correctly, but a std::map would probably give better performance.
+//
+//===----------------------------------------------------------------------===//
+
+enum {
+ // Cache line size. Most architectures have 32 or 64 byte cache lines.
+ // We use 64 bytes here because it provides good branching factors.
+ Log2CacheLine = 6,
+ CacheLineBytes = 1 << Log2CacheLine,
+ DesiredNodeBytes = 3 * CacheLineBytes
+};
+
+template <typename KeyT, typename ValT>
+struct NodeSizer {
+ enum {
+ // Compute the leaf node branching factor that makes a node fit in three
+ // cache lines. The branching factor must be at least 3, or some B+-tree
+ // balancing algorithms won't work.
+ // LeafSize can't be larger than CacheLineBytes. This is required by the
+ // PointerIntPair used by NodeRef.
+ DesiredLeafSize = DesiredNodeBytes /
+ static_cast<unsigned>(2*sizeof(KeyT)+sizeof(ValT)),
+ MinLeafSize = 3,
+ LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize
+ };
+
+ typedef NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize> LeafBase;
+
+ enum {
+ // Now that we have the leaf branching factor, compute the actual allocation
+ // unit size by rounding up to a whole number of cache lines.
+ AllocBytes = (sizeof(LeafBase) + CacheLineBytes-1) & ~(CacheLineBytes-1),
+
+ // Determine the branching factor for branch nodes.
+ BranchSize = AllocBytes /
+ static_cast<unsigned>(sizeof(KeyT) + sizeof(void*))
+ };
+
+ /// Allocator - The recycling allocator used for both branch and leaf nodes.
+ /// This typedef is very likely to be identical for all IntervalMaps with
+ /// reasonably sized entries, so the same allocator can be shared among
+ /// different kinds of maps.
+ typedef RecyclingAllocator<BumpPtrAllocator, char,
+ AllocBytes, CacheLineBytes> Allocator;
+
+};
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeRef ---//
+//===----------------------------------------------------------------------===//
+//
+// B+-tree nodes can be leaves or branches, so we need a polymorphic node
+// pointer that can point to both kinds.
+//
+// All nodes are cache line aligned and the low 6 bits of a node pointer are
+// always 0. These bits are used to store the number of elements in the
+// referenced node. Besides saving space, placing node sizes in the parents
+// allow tree balancing algorithms to run without faulting cache lines for nodes
+// that may not need to be modified.
+//
+// A NodeRef doesn't know whether it references a leaf node or a branch node.
+// It is the responsibility of the caller to use the correct types.
+//
+// Nodes are never supposed to be empty, and it is invalid to store a node size
+// of 0 in a NodeRef. The valid range of sizes is 1-64.
+//
+//===----------------------------------------------------------------------===//
+
+class NodeRef {
+ struct CacheAlignedPointerTraits {
+ static inline void *getAsVoidPointer(void *P) { return P; }
+ static inline void *getFromVoidPointer(void *P) { return P; }
+ enum { NumLowBitsAvailable = Log2CacheLine };
+ };
+ PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip;
+
+public:
+ /// NodeRef - Create a null ref.
+ NodeRef() {}
+
+ /// operator bool - Detect a null ref.
+ operator bool() const { return pip.getOpaqueValue(); }
+
+ /// NodeRef - Create a reference to the node p with n elements.
+ template <typename NodeT>
+ NodeRef(NodeT *p, unsigned n) : pip(p, n - 1) {
+ assert(n <= NodeT::Capacity && "Size too big for node");
+ }
+
+ /// size - Return the number of elements in the referenced node.
+ unsigned size() const { return pip.getInt() + 1; }
+
+ /// setSize - Update the node size.
+ void setSize(unsigned n) { pip.setInt(n - 1); }
+
+ /// subtree - Access the i'th subtree reference in a branch node.
+ /// This depends on branch nodes storing the NodeRef array as their first
+ /// member.
+ NodeRef &subtree(unsigned i) const {
+ return reinterpret_cast<NodeRef*>(pip.getPointer())[i];
+ }
+
+ /// get - Dereference as a NodeT reference.
+ template <typename NodeT>
+ NodeT &get() const {
+ return *reinterpret_cast<NodeT*>(pip.getPointer());
+ }
+
+ bool operator==(const NodeRef &RHS) const {
+ if (pip == RHS.pip)
+ return true;
+ assert(pip.getPointer() != RHS.pip.getPointer() && "Inconsistent NodeRefs");
+ return false;
+ }
+
+ bool operator!=(const NodeRef &RHS) const {
+ return !operator==(RHS);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::LeafNode ---//
+//===----------------------------------------------------------------------===//
+//
+// Leaf nodes store up to N disjoint intervals with corresponding values.
+//
+// The intervals are kept sorted and fully coalesced so there are no adjacent
+// intervals mapping to the same value.
+//
+// These constraints are always satisfied:
+//
+// - Traits::stopLess(start(i), stop(i)) - Non-empty, sane intervals.
+//
+// - Traits::stopLess(stop(i), start(i + 1) - Sorted.
+//
+// - value(i) != value(i + 1) || !Traits::adjacent(stop(i), start(i + 1))
+// - Fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class LeafNode : public NodeBase<std::pair<KeyT, KeyT>, ValT, N> {
+public:
+ const KeyT &start(unsigned i) const { return this->first[i].first; }
+ const KeyT &stop(unsigned i) const { return this->first[i].second; }
+ const ValT &value(unsigned i) const { return this->second[i]; }
+
+ KeyT &start(unsigned i) { return this->first[i].first; }
+ KeyT &stop(unsigned i) { return this->first[i].second; }
+ ValT &value(unsigned i) { return this->second[i]; }
+
+ /// findFrom - Find the first interval after i that may contain x.
+ /// @param i Starting index for the search.
+ /// @param Size Number of elements in node.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i].stop, x), or size.
+ /// This is the first interval that can possibly contain x.
+ unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+ assert(i <= Size && Size <= N && "Bad indices");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+ return i;
+ }
+
+ /// safeFind - Find an interval that is known to exist. This is the same as
+ /// findFrom except is it assumed that x is at least within range of the last
+ /// interval.
+ /// @param i Starting index for the search.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i].stop, x), never size.
+ /// This is the first interval that can possibly contain x.
+ unsigned safeFind(unsigned i, KeyT x) const {
+ assert(i < N && "Bad index");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (Traits::stopLess(stop(i), x)) ++i;
+ assert(i < N && "Unsafe intervals");
+ return i;
+ }
+
+ /// safeLookup - Lookup mapped value for a safe key.
+ /// It is assumed that x is within range of the last entry.
+ /// @param x Key to search for.
+ /// @param NotFound Value to return if x is not in any interval.
+ /// @return The mapped value at x or NotFound.
+ ValT safeLookup(KeyT x, ValT NotFound) const {
+ unsigned i = safeFind(0, x);
+ return Traits::startLess(x, start(i)) ? NotFound : value(i);
+ }
+
+ unsigned insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y);
+};
+
+/// insertFrom - Add mapping of [a;b] to y if possible, coalescing as much as
+/// possible. This may cause the node to grow by 1, or it may cause the node
+/// to shrink because of coalescing.
+/// @param i Starting index = insertFrom(0, size, a)
+/// @param Size Number of elements in node.
+/// @param a Interval start.
+/// @param b Interval stop.
+/// @param y Value be mapped.
+/// @return (insert position, new size), or (i, Capacity+1) on overflow.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+unsigned LeafNode<KeyT, ValT, N, Traits>::
+insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y) {
+ unsigned i = Pos;
+ assert(i <= Size && Size <= N && "Invalid index");
+ assert(!Traits::stopLess(b, a) && "Invalid interval");
+
+ // Verify the findFrom invariant.
+ assert((i == 0 || Traits::stopLess(stop(i - 1), a)));
+ assert((i == Size || !Traits::stopLess(stop(i), a)));
+ assert((i == Size || Traits::stopLess(b, start(i))) && "Overlapping insert");
+
+ // Coalesce with previous interval.
+ if (i && value(i - 1) == y && Traits::adjacent(stop(i - 1), a)) {
+ Pos = i - 1;
+ // Also coalesce with next interval?
+ if (i != Size && value(i) == y && Traits::adjacent(b, start(i))) {
+ stop(i - 1) = stop(i);
+ this->erase(i, Size);
+ return Size - 1;
+ }
+ stop(i - 1) = b;
+ return Size;
+ }
+
+ // Detect overflow.
+ if (i == N)
+ return N + 1;
+
+ // Add new interval at end.
+ if (i == Size) {
+ start(i) = a;
+ stop(i) = b;
+ value(i) = y;
+ return Size + 1;
+ }
+
+ // Try to coalesce with following interval.
+ if (value(i) == y && Traits::adjacent(b, start(i))) {
+ start(i) = a;
+ return Size;
+ }
+
+ // We must insert before i. Detect overflow.
+ if (Size == N)
+ return N + 1;
+
+ // Insert before i.
+ this->shift(i, Size);
+ start(i) = a;
+ stop(i) = b;
+ value(i) = y;
+ return Size + 1;
+}
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::BranchNode ---//
+//===----------------------------------------------------------------------===//
+//
+// A branch node stores references to 1--N subtrees all of the same height.
+//
+// The key array in a branch node holds the rightmost stop key of each subtree.
+// It is redundant to store the last stop key since it can be found in the
+// parent node, but doing so makes tree balancing a lot simpler.
+//
+// It is unusual for a branch node to only have one subtree, but it can happen
+// in the root node if it is smaller than the normal nodes.
+//
+// When all of the leaf nodes from all the subtrees are concatenated, they must
+// satisfy the same constraints as a single leaf node. They must be sorted,
+// sane, and fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class BranchNode : public NodeBase<NodeRef, KeyT, N> {
+public:
+ const KeyT &stop(unsigned i) const { return this->second[i]; }
+ const NodeRef &subtree(unsigned i) const { return this->first[i]; }
+
+ KeyT &stop(unsigned i) { return this->second[i]; }
+ NodeRef &subtree(unsigned i) { return this->first[i]; }
+
+ /// findFrom - Find the first subtree after i that may contain x.
+ /// @param i Starting index for the search.
+ /// @param Size Number of elements in node.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i], x), or size.
+ /// This is the first subtree that can possibly contain x.
+ unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+ assert(i <= Size && Size <= N && "Bad indices");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index to findFrom is past the needed point");
+ while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+ return i;
+ }
+
+ /// safeFind - Find a subtree that is known to exist. This is the same as
+ /// findFrom except is it assumed that x is in range.
+ /// @param i Starting index for the search.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i], x), never size.
+ /// This is the first subtree that can possibly contain x.
+ unsigned safeFind(unsigned i, KeyT x) const {
+ assert(i < N && "Bad index");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (Traits::stopLess(stop(i), x)) ++i;
+ assert(i < N && "Unsafe intervals");
+ return i;
+ }
+
+ /// safeLookup - Get the subtree containing x, Assuming that x is in range.
+ /// @param x Key to search for.
+ /// @return Subtree containing x
+ NodeRef safeLookup(KeyT x) const {
+ return subtree(safeFind(0, x));
+ }
+
+ /// insert - Insert a new (subtree, stop) pair.
+ /// @param i Insert position, following entries will be shifted.
+ /// @param Size Number of elements in node.
+ /// @param Node Subtree to insert.
+ /// @param Stop Last key in subtree.
+ void insert(unsigned i, unsigned Size, NodeRef Node, KeyT Stop) {
+ assert(Size < N && "branch node overflow");
+ assert(i <= Size && "Bad insert position");
+ this->shift(i, Size);
+ subtree(i) = Node;
+ stop(i) = Stop;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::Path ---//
+//===----------------------------------------------------------------------===//
+//
+// A Path is used by iterators to represent a position in a B+-tree, and the
+// path to get there from the root.
+//
+// The Path class also constains the tree navigation code that doesn't have to
+// be templatized.
+//
+//===----------------------------------------------------------------------===//
+
+class Path {
+ /// Entry - Each step in the path is a node pointer and an offset into that
+ /// node.
+ struct Entry {
+ void *node;
+ unsigned size;
+ unsigned offset;
+
+ Entry(void *Node, unsigned Size, unsigned Offset)
+ : node(Node), size(Size), offset(Offset) {}
+
+ Entry(NodeRef Node, unsigned Offset)
+ : node(&Node.subtree(0)), size(Node.size()), offset(Offset) {}
+
+ NodeRef &subtree(unsigned i) const {
+ return reinterpret_cast<NodeRef*>(node)[i];
+ }
+ };
+
+ /// path - The path entries, path[0] is the root node, path.back() is a leaf.
+ SmallVector<Entry, 4> path;
+
+public:
+ // Node accessors.
+ template <typename NodeT> NodeT &node(unsigned Level) const {
+ return *reinterpret_cast<NodeT*>(path[Level].node);
+ }
+ unsigned size(unsigned Level) const { return path[Level].size; }
+ unsigned offset(unsigned Level) const { return path[Level].offset; }
+ unsigned &offset(unsigned Level) { return path[Level].offset; }
+
+ // Leaf accessors.
+ template <typename NodeT> NodeT &leaf() const {
+ return *reinterpret_cast<NodeT*>(path.back().node);
+ }
+ unsigned leafSize() const { return path.back().size; }
+ unsigned leafOffset() const { return path.back().offset; }
+ unsigned &leafOffset() { return path.back().offset; }
+
+ /// valid - Return true if path is at a valid node, not at end().
+ bool valid() const {
+ return !path.empty() && path.front().offset < path.front().size;
+ }
+
+ /// height - Return the height of the tree corresponding to this path.
+ /// This matches map->height in a full path.
+ unsigned height() const { return path.size() - 1; }
+
+ /// subtree - Get the subtree referenced from Level. When the path is
+ /// consistent, node(Level + 1) == subtree(Level).
+ /// @param Level 0..height-1. The leaves have no subtrees.
+ NodeRef &subtree(unsigned Level) const {
+ return path[Level].subtree(path[Level].offset);
+ }
+
+ /// reset - Reset cached information about node(Level) from subtree(Level -1).
+ /// @param Level 1..height. THe node to update after parent node changed.
+ void reset(unsigned Level) {
+ path[Level] = Entry(subtree(Level - 1), offset(Level));
+ }
+
+ /// push - Add entry to path.
+ /// @param Node Node to add, should be subtree(path.size()-1).
+ /// @param Offset Offset into Node.
+ void push(NodeRef Node, unsigned Offset) {
+ path.push_back(Entry(Node, Offset));
+ }
+
+ /// pop - Remove the last path entry.
+ void pop() {
+ path.pop_back();
+ }
+
+ /// setSize - Set the size of a node both in the path and in the tree.
+ /// @param Level 0..height. Note that setting the root size won't change
+ /// map->rootSize.
+ /// @param Size New node size.
+ void setSize(unsigned Level, unsigned Size) {
+ path[Level].size = Size;
+ if (Level)
+ subtree(Level - 1).setSize(Size);
+ }
+
+ /// setRoot - Clear the path and set a new root node.
+ /// @param Node New root node.
+ /// @param Size New root size.
+ /// @param Offset Offset into root node.
+ void setRoot(void *Node, unsigned Size, unsigned Offset) {
+ path.clear();
+ path.push_back(Entry(Node, Size, Offset));
+ }
+
+ /// replaceRoot - Replace the current root node with two new entries after the
+ /// tree height has increased.
+ /// @param Root The new root node.
+ /// @param Size Number of entries in the new root.
+ /// @param Offsets Offsets into the root and first branch nodes.
+ void replaceRoot(void *Root, unsigned Size, IdxPair Offsets);
+
+ /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+ /// @param Level Get the sibling to node(Level).
+ /// @return Left sibling, or NodeRef().
+ NodeRef getLeftSibling(unsigned Level) const;
+
+ /// moveLeft - Move path to the left sibling at Level. Leave nodes below Level
+ /// unaltered.
+ /// @param Level Move node(Level).
+ void moveLeft(unsigned Level);
+
+ /// fillLeft - Grow path to Height by taking leftmost branches.
+ /// @param Height The target height.
+ void fillLeft(unsigned Height) {
+ while (height() < Height)
+ push(subtree(height()), 0);
+ }
+
+ /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+ /// @param Level Get the sinbling to node(Level).
+ /// @return Left sibling, or NodeRef().
+ NodeRef getRightSibling(unsigned Level) const;
+
+ /// moveRight - Move path to the left sibling at Level. Leave nodes below
+ /// Level unaltered.
+ /// @param Level Move node(Level).
+ void moveRight(unsigned Level);
+
+ /// atBegin - Return true if path is at begin().
+ bool atBegin() const {
+ for (unsigned i = 0, e = path.size(); i != e; ++i)
+ if (path[i].offset != 0)
+ return false;
+ return true;
+ }
+
+ /// atLastEntry - Return true if the path is at the last entry of the node at
+ /// Level.
+ /// @param Level Node to examine.
+ bool atLastEntry(unsigned Level) const {
+ return path[Level].offset == path[Level].size - 1;
+ }
+
+ /// legalizeForInsert - Prepare the path for an insertion at Level. When the
+ /// path is at end(), node(Level) may not be a legal node. legalizeForInsert
+ /// ensures that node(Level) is real by moving back to the last node at Level,
+ /// and setting offset(Level) to size(Level) if required.
+ /// @param Level The level where an insertion is about to take place.
+ void legalizeForInsert(unsigned Level) {
+ if (valid())
+ return;
+ moveLeft(Level);
+ ++path[Level].offset;
+ }
+};
+
+} // namespace IntervalMapImpl
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT,
+ unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
+ typename Traits = IntervalMapInfo<KeyT> >
+class IntervalMap {
+ typedef IntervalMapImpl::NodeSizer<KeyT, ValT> Sizer;
+ typedef IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits> Leaf;
+ typedef IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>
+ Branch;
+ typedef IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits> RootLeaf;
+ typedef IntervalMapImpl::IdxPair IdxPair;
+
+ // The RootLeaf capacity is given as a template parameter. We must compute the
+ // corresponding RootBranch capacity.
+ enum {
+ DesiredRootBranchCap = (sizeof(RootLeaf) - sizeof(KeyT)) /
+ (sizeof(KeyT) + sizeof(IntervalMapImpl::NodeRef)),
+ RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1
+ };
+
+ typedef IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>
+ RootBranch;
+
+ // When branched, we store a global start key as well as the branch node.
+ struct RootBranchData {
+ KeyT start;
+ RootBranch node;
+ };
+
+ enum {
+ RootDataSize = sizeof(RootBranchData) > sizeof(RootLeaf) ?
+ sizeof(RootBranchData) : sizeof(RootLeaf)
+ };
+
+public:
+ typedef typename Sizer::Allocator Allocator;
+ typedef KeyT KeyType;
+ typedef ValT ValueType;
+ typedef Traits KeyTraits;
+
+private:
+ // The root data is either a RootLeaf or a RootBranchData instance.
+ // We can't put them in a union since C++03 doesn't allow non-trivial
+ // constructors in unions.
+ // Instead, we use a char array with pointer alignment. The alignment is
+ // ensured by the allocator member in the class, but still verified in the
+ // constructor. We don't support keys or values that are more aligned than a
+ // pointer.
+ char data[RootDataSize];
+
+ // Tree height.
+ // 0: Leaves in root.
+ // 1: Root points to leaf.
+ // 2: root->branch->leaf ...
+ unsigned height;
+
+ // Number of entries in the root node.
+ unsigned rootSize;
+
+ // Allocator used for creating external nodes.
+ Allocator &allocator;
+
+ /// dataAs - Represent data as a node type without breaking aliasing rules.
+ template <typename T>
+ T &dataAs() const {
+ union {
+ const char *d;
+ T *t;
+ } u;
+ u.d = data;
+ return *u.t;
+ }
+
+ const RootLeaf &rootLeaf() const {
+ assert(!branched() && "Cannot acces leaf data in branched root");
+ return dataAs<RootLeaf>();
+ }
+ RootLeaf &rootLeaf() {
+ assert(!branched() && "Cannot acces leaf data in branched root");
+ return dataAs<RootLeaf>();
+ }
+ RootBranchData &rootBranchData() const {
+ assert(branched() && "Cannot access branch data in non-branched root");
+ return dataAs<RootBranchData>();
+ }
+ RootBranchData &rootBranchData() {
+ assert(branched() && "Cannot access branch data in non-branched root");
+ return dataAs<RootBranchData>();
+ }
+ const RootBranch &rootBranch() const { return rootBranchData().node; }
+ RootBranch &rootBranch() { return rootBranchData().node; }
+ KeyT rootBranchStart() const { return rootBranchData().start; }
+ KeyT &rootBranchStart() { return rootBranchData().start; }
+
+ template <typename NodeT> NodeT *newNode() {
+ return new(allocator.template Allocate<NodeT>()) NodeT();
+ }
+
+ template <typename NodeT> void deleteNode(NodeT *P) {
+ P->~NodeT();
+ allocator.Deallocate(P);
+ }
+
+ IdxPair branchRoot(unsigned Position);
+ IdxPair splitRoot(unsigned Position);
+
+ void switchRootToBranch() {
+ rootLeaf().~RootLeaf();
+ height = 1;
+ new (&rootBranchData()) RootBranchData();
+ }
+
+ void switchRootToLeaf() {
+ rootBranchData().~RootBranchData();
+ height = 0;
+ new(&rootLeaf()) RootLeaf();
+ }
+
+ bool branched() const { return height > 0; }
+
+ ValT treeSafeLookup(KeyT x, ValT NotFound) const;
+ void visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef,
+ unsigned Level));
+ void deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level);
+
+public:
+ explicit IntervalMap(Allocator &a) : height(0), rootSize(0), allocator(a) {
+ assert((uintptr_t(data) & (alignOf<RootLeaf>() - 1)) == 0 &&
+ "Insufficient alignment");
+ new(&rootLeaf()) RootLeaf();
+ }
+
+ ~IntervalMap() {
+ clear();
+ rootLeaf().~RootLeaf();
+ }
+
+ /// empty - Return true when no intervals are mapped.
+ bool empty() const {
+ return rootSize == 0;
+ }
+
+ /// start - Return the smallest mapped key in a non-empty map.
+ KeyT start() const {
+ assert(!empty() && "Empty IntervalMap has no start");
+ return !branched() ? rootLeaf().start(0) : rootBranchStart();
+ }
+
+ /// stop - Return the largest mapped key in a non-empty map.
+ KeyT stop() const {
+ assert(!empty() && "Empty IntervalMap has no stop");
+ return !branched() ? rootLeaf().stop(rootSize - 1) :
+ rootBranch().stop(rootSize - 1);
+ }
+
+ /// lookup - Return the mapped value at x or NotFound.
+ ValT lookup(KeyT x, ValT NotFound = ValT()) const {
+ if (empty() || Traits::startLess(x, start()) || Traits::stopLess(stop(), x))
+ return NotFound;
+ return branched() ? treeSafeLookup(x, NotFound) :
+ rootLeaf().safeLookup(x, NotFound);
+ }
+
+ /// insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals.
+ /// It is assumed that no key in the interval is mapped to another value, but
+ /// overlapping intervals already mapped to y will be coalesced.
+ void insert(KeyT a, KeyT b, ValT y) {
+ if (branched() || rootSize == RootLeaf::Capacity)
+ return find(a).insert(a, b, y);
+
+ // Easy insert into root leaf.
+ unsigned p = rootLeaf().findFrom(0, rootSize, a);
+ rootSize = rootLeaf().insertFrom(p, rootSize, a, b, y);
+ }
+
+ /// clear - Remove all entries.
+ void clear();
+
+ class const_iterator;
+ class iterator;
+ friend class const_iterator;
+ friend class iterator;
+
+ const_iterator begin() const {
+ const_iterator I(*this);
+ I.goToBegin();
+ return I;
+ }
+
+ iterator begin() {
+ iterator I(*this);
+ I.goToBegin();
+ return I;
+ }
+
+ const_iterator end() const {
+ const_iterator I(*this);
+ I.goToEnd();
+ return I;
+ }
+
+ iterator end() {
+ iterator I(*this);
+ I.goToEnd();
+ return I;
+ }
+
+ /// find - Return an iterator pointing to the first interval ending at or
+ /// after x, or end().
+ const_iterator find(KeyT x) const {
+ const_iterator I(*this);
+ I.find(x);
+ return I;
+ }
+
+ iterator find(KeyT x) {
+ iterator I(*this);
+ I.find(x);
+ return I;
+ }
+};
+
+/// treeSafeLookup - Return the mapped value at x or NotFound, assuming a
+/// branched root.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+ValT IntervalMap<KeyT, ValT, N, Traits>::
+treeSafeLookup(KeyT x, ValT NotFound) const {
+ assert(branched() && "treeLookup assumes a branched root");
+
+ IntervalMapImpl::NodeRef NR = rootBranch().safeLookup(x);
+ for (unsigned h = height-1; h; --h)
+ NR = NR.get<Branch>().safeLookup(x);
+ return NR.get<Leaf>().safeLookup(x, NotFound);
+}
+
+
+// branchRoot - Switch from a leaf root to a branched root.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+branchRoot(unsigned Position) {
+ using namespace IntervalMapImpl;
+ // How many external leaf nodes to hold RootLeaf+1?
+ const unsigned Nodes = RootLeaf::Capacity / Leaf::Capacity + 1;
+
+ // Compute element distribution among new nodes.
+ unsigned size[Nodes];
+ IdxPair NewOffset(0, Position);
+
+ // Is is very common for the root node to be smaller than external nodes.
+ if (Nodes == 1)
+ size[0] = rootSize;
+ else
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, NULL, size,
+ Position, true);
+
+ // Allocate new nodes.
+ unsigned pos = 0;
+ NodeRef node[Nodes];
+ for (unsigned n = 0; n != Nodes; ++n) {
+ Leaf *L = newNode<Leaf>();
+ L->copy(rootLeaf(), pos, 0, size[n]);
+ node[n] = NodeRef(L, size[n]);
+ pos += size[n];
+ }
+
+ // Destroy the old leaf node, construct branch node instead.
+ switchRootToBranch();
+ for (unsigned n = 0; n != Nodes; ++n) {
+ rootBranch().stop(n) = node[n].template get<Leaf>().stop(size[n]-1);
+ rootBranch().subtree(n) = node[n];
+ }
+ rootBranchStart() = node[0].template get<Leaf>().start(0);
+ rootSize = Nodes;
+ return NewOffset;
+}
+
+// splitRoot - Split the current BranchRoot into multiple Branch nodes.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+splitRoot(unsigned Position) {
+ using namespace IntervalMapImpl;
+ // How many external leaf nodes to hold RootBranch+1?
+ const unsigned Nodes = RootBranch::Capacity / Branch::Capacity + 1;
+
+ // Compute element distribution among new nodes.
+ unsigned Size[Nodes];
+ IdxPair NewOffset(0, Position);
+
+ // Is is very common for the root node to be smaller than external nodes.
+ if (Nodes == 1)
+ Size[0] = rootSize;
+ else
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, NULL, Size,
+ Position, true);
+
+ // Allocate new nodes.
+ unsigned Pos = 0;
+ NodeRef Node[Nodes];
+ for (unsigned n = 0; n != Nodes; ++n) {
+ Branch *B = newNode<Branch>();
+ B->copy(rootBranch(), Pos, 0, Size[n]);
+ Node[n] = NodeRef(B, Size[n]);
+ Pos += Size[n];
+ }
+
+ for (unsigned n = 0; n != Nodes; ++n) {
+ rootBranch().stop(n) = Node[n].template get<Branch>().stop(Size[n]-1);
+ rootBranch().subtree(n) = Node[n];
+ }
+ rootSize = Nodes;
+ ++height;
+ return NewOffset;
+}
+
+/// visitNodes - Visit each external node.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef, unsigned Height)) {
+ if (!branched())
+ return;
+ SmallVector<IntervalMapImpl::NodeRef, 4> Refs, NextRefs;
+
+ // Collect level 0 nodes from the root.
+ for (unsigned i = 0; i != rootSize; ++i)
+ Refs.push_back(rootBranch().subtree(i));
+
+ // Visit all branch nodes.
+ for (unsigned h = height - 1; h; --h) {
+ for (unsigned i = 0, e = Refs.size(); i != e; ++i) {
+ for (unsigned j = 0, s = Refs[i].size(); j != s; ++j)
+ NextRefs.push_back(Refs[i].subtree(j));
+ (this->*f)(Refs[i], h);
+ }
+ Refs.clear();
+ Refs.swap(NextRefs);
+ }
+
+ // Visit all leaf nodes.
+ for (unsigned i = 0, e = Refs.size(); i != e; ++i)
+ (this->*f)(Refs[i], 0);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level) {
+ if (Level)
+ deleteNode(&Node.get<Branch>());
+ else
+ deleteNode(&Node.get<Leaf>());
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+clear() {
+ if (branched()) {
+ visitNodes(&IntervalMap::deleteNode);
+ switchRootToLeaf();
+ }
+ rootSize = 0;
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap::const_iterator ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::const_iterator :
+ public std::iterator<std::bidirectional_iterator_tag, ValT> {
+protected:
+ friend class IntervalMap;
+
+ // The map referred to.
+ IntervalMap *map;
+
+ // We store a full path from the root to the current position.
+ // The path may be partially filled, but never between iterator calls.
+ IntervalMapImpl::Path path;
+
+ explicit const_iterator(const IntervalMap &map) :
+ map(const_cast<IntervalMap*>(&map)) {}
+
+ bool branched() const {
+ assert(map && "Invalid iterator");
+ return map->branched();
+ }
+
+ void setRoot(unsigned Offset) {
+ if (branched())
+ path.setRoot(&map->rootBranch(), map->rootSize, Offset);
+ else
+ path.setRoot(&map->rootLeaf(), map->rootSize, Offset);
+ }
+
+ void pathFillFind(KeyT x);
+ void treeFind(KeyT x);
+ void treeAdvanceTo(KeyT x);
+
+ /// unsafeStart - Writable access to start() for iterator.
+ KeyT &unsafeStart() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().start(path.leafOffset()) :
+ path.leaf<RootLeaf>().start(path.leafOffset());
+ }
+
+ /// unsafeStop - Writable access to stop() for iterator.
+ KeyT &unsafeStop() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().stop(path.leafOffset()) :
+ path.leaf<RootLeaf>().stop(path.leafOffset());
+ }
+
+ /// unsafeValue - Writable access to value() for iterator.
+ ValT &unsafeValue() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().value(path.leafOffset()) :
+ path.leaf<RootLeaf>().value(path.leafOffset());
+ }
+
+public:
+ /// const_iterator - Create an iterator that isn't pointing anywhere.
+ const_iterator() : map(0) {}
+
+ /// valid - Return true if the current position is valid, false for end().
+ bool valid() const { return path.valid(); }
+
+ /// start - Return the beginning of the current interval.
+ const KeyT &start() const { return unsafeStart(); }
+
+ /// stop - Return the end of the current interval.
+ const KeyT &stop() const { return unsafeStop(); }
+
+ /// value - Return the mapped value at the current interval.
+ const ValT &value() const { return unsafeValue(); }
+
+ const ValT &operator*() const { return value(); }
+
+ bool operator==(const const_iterator &RHS) const {
+ assert(map == RHS.map && "Cannot compare iterators from different maps");
+ if (!valid())
+ return !RHS.valid();
+ if (path.leafOffset() != RHS.path.leafOffset())
+ return false;
+ return &path.template leaf<Leaf>() == &RHS.path.template leaf<Leaf>();
+ }
+
+ bool operator!=(const const_iterator &RHS) const {
+ return !operator==(RHS);
+ }
+
+ /// goToBegin - Move to the first interval in map.
+ void goToBegin() {
+ setRoot(0);
+ if (branched())
+ path.fillLeft(map->height);
+ }
+
+ /// goToEnd - Move beyond the last interval in map.
+ void goToEnd() {
+ setRoot(map->rootSize);
+ }
+
+ /// preincrement - move to the next interval.
+ const_iterator &operator++() {
+ assert(valid() && "Cannot increment end()");
+ if (++path.leafOffset() == path.leafSize() && branched())
+ path.moveRight(map->height);
+ return *this;
+ }
+
+ /// postincrement - Dont do that!
+ const_iterator operator++(int) {
+ const_iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ /// predecrement - move to the previous interval.
+ const_iterator &operator--() {
+ if (path.leafOffset() && (valid() || !branched()))
+ --path.leafOffset();
+ else
+ path.moveLeft(map->height);
+ return *this;
+ }
+
+ /// postdecrement - Dont do that!
+ const_iterator operator--(int) {
+ const_iterator tmp = *this;
+ operator--();
+ return tmp;
+ }
+
+ /// find - Move to the first interval with stop >= x, or end().
+ /// This is a full search from the root, the current position is ignored.
+ void find(KeyT x) {
+ if (branched())
+ treeFind(x);
+ else
+ setRoot(map->rootLeaf().findFrom(0, map->rootSize, x));
+ }
+
+ /// advanceTo - Move to the first interval with stop >= x, or end().
+ /// The search is started from the current position, and no earlier positions
+ /// can be found. This is much faster than find() for small moves.
+ void advanceTo(KeyT x) {
+ if (!valid())
+ return;
+ if (branched())
+ treeAdvanceTo(x);
+ else
+ path.leafOffset() =
+ map->rootLeaf().findFrom(path.leafOffset(), map->rootSize, x);
+ }
+
+};
+
+/// pathFillFind - Complete path by searching for x.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::pathFillFind(KeyT x) {
+ IntervalMapImpl::NodeRef NR = path.subtree(path.height());
+ for (unsigned i = map->height - path.height() - 1; i; --i) {
+ unsigned p = NR.get<Branch>().safeFind(0, x);
+ path.push(NR, p);
+ NR = NR.subtree(p);
+ }
+ path.push(NR, NR.get<Leaf>().safeFind(0, x));
+}
+
+/// treeFind - Find in a branched tree.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeFind(KeyT x) {
+ setRoot(map->rootBranch().findFrom(0, map->rootSize, x));
+ if (valid())
+ pathFillFind(x);
+}
+
+/// treeAdvanceTo - Find position after the current one.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeAdvanceTo(KeyT x) {
+ // Can we stay on the same leaf node?
+ if (!Traits::stopLess(path.leaf<Leaf>().stop(path.leafSize() - 1), x)) {
+ path.leafOffset() = path.leaf<Leaf>().safeFind(path.leafOffset(), x);
+ return;
+ }
+
+ // Drop the current leaf.
+ path.pop();
+
+ // Search towards the root for a usable subtree.
+ if (path.height()) {
+ for (unsigned l = path.height() - 1; l; --l) {
+ if (!Traits::stopLess(path.node<Branch>(l).stop(path.offset(l)), x)) {
+ // The branch node at l+1 is usable
+ path.offset(l + 1) =
+ path.node<Branch>(l + 1).safeFind(path.offset(l + 1), x);
+ return pathFillFind(x);
+ }
+ path.pop();
+ }
+ // Is the level-1 Branch usable?
+ if (!Traits::stopLess(map->rootBranch().stop(path.offset(0)), x)) {
+ path.offset(1) = path.node<Branch>(1).safeFind(path.offset(1), x);
+ return pathFillFind(x);
+ }
+ }
+
+ // We reached the root.
+ setRoot(map->rootBranch().findFrom(path.offset(0), map->rootSize, x));
+ if (valid())
+ pathFillFind(x);
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap::iterator ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
+ friend class IntervalMap;
+ typedef IntervalMapImpl::IdxPair IdxPair;
+
+ explicit iterator(IntervalMap &map) : const_iterator(map) {}
+
+ void setNodeStop(unsigned Level, KeyT Stop);
+ bool insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop);
+ template <typename NodeT> bool overflow(unsigned Level);
+ void treeInsert(KeyT a, KeyT b, ValT y);
+ void eraseNode(unsigned Level);
+ void treeErase(bool UpdateRoot = true);
+ bool canCoalesceLeft(KeyT Start, ValT x);
+ bool canCoalesceRight(KeyT Stop, ValT x);
+
+public:
+ /// iterator - Create null iterator.
+ iterator() {}
+
+ /// setStart - Move the start of the current interval.
+ /// This may cause coalescing with the previous interval.
+ /// @param a New start key, must not overlap the previous interval.
+ void setStart(KeyT a);
+
+ /// setStop - Move the end of the current interval.
+ /// This may cause coalescing with the following interval.
+ /// @param b New stop key, must not overlap the following interval.
+ void setStop(KeyT b);
+
+ /// setValue - Change the mapped value of the current interval.
+ /// This may cause coalescing with the previous and following intervals.
+ /// @param x New value.
+ void setValue(ValT x);
+
+ /// setStartUnchecked - Move the start of the current interval without
+ /// checking for coalescing or overlaps.
+ /// This should only be used when it is known that coalescing is not required.
+ /// @param a New start key.
+ void setStartUnchecked(KeyT a) { this->unsafeStart() = a; }
+
+ /// setStopUnchecked - Move the end of the current interval without checking
+ /// for coalescing or overlaps.
+ /// This should only be used when it is known that coalescing is not required.
+ /// @param b New stop key.
+ void setStopUnchecked(KeyT b) {
+ this->unsafeStop() = b;
+ // Update keys in branch nodes as well.
+ if (this->path.atLastEntry(this->path.height()))
+ setNodeStop(this->path.height(), b);
+ }
+
+ /// setValueUnchecked - Change the mapped value of the current interval
+ /// without checking for coalescing.
+ /// @param x New value.
+ void setValueUnchecked(ValT x) { this->unsafeValue() = x; }
+
+ /// insert - Insert mapping [a;b] -> y before the current position.
+ void insert(KeyT a, KeyT b, ValT y);
+
+ /// erase - Erase the current interval.
+ void erase();
+
+ iterator &operator++() {
+ const_iterator::operator++();
+ return *this;
+ }
+
+ iterator operator++(int) {
+ iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ iterator &operator--() {
+ const_iterator::operator--();
+ return *this;
+ }
+
+ iterator operator--(int) {
+ iterator tmp = *this;
+ operator--();
+ return tmp;
+ }
+
+};
+
+/// canCoalesceLeft - Can the current interval coalesce to the left after
+/// changing start or value?
+/// @param Start New start of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceLeft(KeyT Start, ValT Value) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ if (!this->branched()) {
+ unsigned i = P.leafOffset();
+ RootLeaf &Node = P.leaf<RootLeaf>();
+ return i && Node.value(i-1) == Value &&
+ Traits::adjacent(Node.stop(i-1), Start);
+ }
+ // Branched.
+ if (unsigned i = P.leafOffset()) {
+ Leaf &Node = P.leaf<Leaf>();
+ return Node.value(i-1) == Value && Traits::adjacent(Node.stop(i-1), Start);
+ } else if (NodeRef NR = P.getLeftSibling(P.height())) {
+ unsigned i = NR.size() - 1;
+ Leaf &Node = NR.get<Leaf>();
+ return Node.value(i) == Value && Traits::adjacent(Node.stop(i), Start);
+ }
+ return false;
+}
+
+/// canCoalesceRight - Can the current interval coalesce to the right after
+/// changing stop or value?
+/// @param Stop New stop of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceRight(KeyT Stop, ValT Value) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ unsigned i = P.leafOffset() + 1;
+ if (!this->branched()) {
+ if (i >= P.leafSize())
+ return false;
+ RootLeaf &Node = P.leaf<RootLeaf>();
+ return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+ }
+ // Branched.
+ if (i < P.leafSize()) {
+ Leaf &Node = P.leaf<Leaf>();
+ return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+ } else if (NodeRef NR = P.getRightSibling(P.height())) {
+ Leaf &Node = NR.get<Leaf>();
+ return Node.value(0) == Value && Traits::adjacent(Stop, Node.start(0));
+ }
+ return false;
+}
+
+/// setNodeStop - Update the stop key of the current node at level and above.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setNodeStop(unsigned Level, KeyT Stop) {
+ // There are no references to the root node, so nothing to update.
+ if (!Level)
+ return;
+ IntervalMapImpl::Path &P = this->path;
+ // Update nodes pointing to the current node.
+ while (--Level) {
+ P.node<Branch>(Level).stop(P.offset(Level)) = Stop;
+ if (!P.atLastEntry(Level))
+ return;
+ }
+ // Update root separately since it has a different layout.
+ P.node<RootBranch>(Level).stop(P.offset(Level)) = Stop;
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStart(KeyT a) {
+ assert(Traits::stopLess(a, this->stop()) && "Cannot move start beyond stop");
+ KeyT &CurStart = this->unsafeStart();
+ if (!Traits::startLess(a, CurStart) || !canCoalesceLeft(a, this->value())) {
+ CurStart = a;
+ return;
+ }
+ // Coalesce with the interval to the left.
+ --*this;
+ a = this->start();
+ erase();
+ setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStop(KeyT b) {
+ assert(Traits::stopLess(this->start(), b) && "Cannot move stop beyond start");
+ if (Traits::startLess(b, this->stop()) ||
+ !canCoalesceRight(b, this->value())) {
+ setStopUnchecked(b);
+ return;
+ }
+ // Coalesce with interval to the right.
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setValue(ValT x) {
+ setValueUnchecked(x);
+ if (canCoalesceRight(this->stop(), x)) {
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+ }
+ if (canCoalesceLeft(this->start(), x)) {
+ --*this;
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+ }
+}
+
+/// insertNode - insert a node before the current path at level.
+/// Leave the current path pointing at the new node.
+/// @param Level path index of the node to be inserted.
+/// @param Node The node to be inserted.
+/// @param Stop The last index in the new node.
+/// @return True if the tree height was increased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop) {
+ assert(Level && "Cannot insert next to the root");
+ bool SplitRoot = false;
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ if (Level == 1) {
+ // Insert into the root branch node.
+ if (IM.rootSize < RootBranch::Capacity) {
+ IM.rootBranch().insert(P.offset(0), IM.rootSize, Node, Stop);
+ P.setSize(0, ++IM.rootSize);
+ P.reset(Level);
+ return SplitRoot;
+ }
+
+ // We need to split the root while keeping our position.
+ SplitRoot = true;
+ IdxPair Offset = IM.splitRoot(P.offset(0));
+ P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+ // Fall through to insert at the new higher level.
+ ++Level;
+ }
+
+ // When inserting before end(), make sure we have a valid path.
+ P.legalizeForInsert(--Level);
+
+ // Insert into the branch node at Level-1.
+ if (P.size(Level) == Branch::Capacity) {
+ // Branch node is full, handle handle the overflow.
+ assert(!SplitRoot && "Cannot overflow after splitting the root");
+ SplitRoot = overflow<Branch>(Level);
+ Level += SplitRoot;
+ }
+ P.node<Branch>(Level).insert(P.offset(Level), P.size(Level), Node, Stop);
+ P.setSize(Level, P.size(Level) + 1);
+ if (P.atLastEntry(Level))
+ setNodeStop(Level, Stop);
+ P.reset(Level + 1);
+ return SplitRoot;
+}
+
+// insert
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insert(KeyT a, KeyT b, ValT y) {
+ if (this->branched())
+ return treeInsert(a, b, y);
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ // Try simple root leaf insert.
+ unsigned Size = IM.rootLeaf().insertFrom(P.leafOffset(), IM.rootSize, a, b, y);
+
+ // Was the root node insert successful?
+ if (Size <= RootLeaf::Capacity) {
+ P.setSize(0, IM.rootSize = Size);
+ return;
+ }
+
+ // Root leaf node is full, we must branch.
+ IdxPair Offset = IM.branchRoot(P.leafOffset());
+ P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+ // Now it fits in the new leaf.
+ treeInsert(a, b, y);
+}
+
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeInsert(KeyT a, KeyT b, ValT y) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+
+ if (!P.valid())
+ P.legalizeForInsert(this->map->height);
+
+ // Check if this insertion will extend the node to the left.
+ if (P.leafOffset() == 0 && Traits::startLess(a, P.leaf<Leaf>().start(0))) {
+ // Node is growing to the left, will it affect a left sibling node?
+ if (NodeRef Sib = P.getLeftSibling(P.height())) {
+ Leaf &SibLeaf = Sib.get<Leaf>();
+ unsigned SibOfs = Sib.size() - 1;
+ if (SibLeaf.value(SibOfs) == y &&
+ Traits::adjacent(SibLeaf.stop(SibOfs), a)) {
+ // This insertion will coalesce with the last entry in SibLeaf. We can
+ // handle it in two ways:
+ // 1. Extend SibLeaf.stop to b and be done, or
+ // 2. Extend a to SibLeaf, erase the SibLeaf entry and continue.
+ // We prefer 1., but need 2 when coalescing to the right as well.
+ Leaf &CurLeaf = P.leaf<Leaf>();
+ P.moveLeft(P.height());
+ if (Traits::stopLess(b, CurLeaf.start(0)) &&
+ (y != CurLeaf.value(0) || !Traits::adjacent(b, CurLeaf.start(0)))) {
+ // Easy, just extend SibLeaf and we're done.
+ setNodeStop(P.height(), SibLeaf.stop(SibOfs) = b);
+ return;
+ } else {
+ // We have both left and right coalescing. Erase the old SibLeaf entry
+ // and continue inserting the larger interval.
+ a = SibLeaf.start(SibOfs);
+ treeErase(/* UpdateRoot= */false);
+ }
+ }
+ } else {
+ // No left sibling means we are at begin(). Update cached bound.
+ this->map->rootBranchStart() = a;
+ }
+ }
+
+ // When we are inserting at the end of a leaf node, we must update stops.
+ unsigned Size = P.leafSize();
+ bool Grow = P.leafOffset() == Size;
+ Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), Size, a, b, y);
+
+ // Leaf insertion unsuccessful? Overflow and try again.
+ if (Size > Leaf::Capacity) {
+ overflow<Leaf>(P.height());
+ Grow = P.leafOffset() == P.leafSize();
+ Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), P.leafSize(), a, b, y);
+ assert(Size <= Leaf::Capacity && "overflow() didn't make room");
+ }
+
+ // Inserted, update offset and leaf size.
+ P.setSize(P.height(), Size);
+
+ // Insert was the last node entry, update stops.
+ if (Grow)
+ setNodeStop(P.height(), b);
+}
+
+/// erase - erase the current interval and move to the next position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::erase() {
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+ assert(P.valid() && "Cannot erase end()");
+ if (this->branched())
+ return treeErase();
+ IM.rootLeaf().erase(P.leafOffset(), IM.rootSize);
+ P.setSize(0, --IM.rootSize);
+}
+
+/// treeErase - erase() for a branched tree.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeErase(bool UpdateRoot) {
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+ Leaf &Node = P.leaf<Leaf>();
+
+ // Nodes are not allowed to become empty.
+ if (P.leafSize() == 1) {
+ IM.deleteNode(&Node);
+ eraseNode(IM.height);
+ // Update rootBranchStart if we erased begin().
+ if (UpdateRoot && IM.branched() && P.valid() && P.atBegin())
+ IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+ return;
+ }
+
+ // Erase current entry.
+ Node.erase(P.leafOffset(), P.leafSize());
+ unsigned NewSize = P.leafSize() - 1;
+ P.setSize(IM.height, NewSize);
+ // When we erase the last entry, update stop and move to a legal position.
+ if (P.leafOffset() == NewSize) {
+ setNodeStop(IM.height, Node.stop(NewSize - 1));
+ P.moveRight(IM.height);
+ } else if (UpdateRoot && P.atBegin())
+ IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+}
+
+/// eraseNode - Erase the current node at Level from its parent and move path to
+/// the first entry of the next sibling node.
+/// The node must be deallocated by the caller.
+/// @param Level 1..height, the root node cannot be erased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::eraseNode(unsigned Level) {
+ assert(Level && "Cannot erase root node");
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ if (--Level == 0) {
+ IM.rootBranch().erase(P.offset(0), IM.rootSize);
+ P.setSize(0, --IM.rootSize);
+ // If this cleared the root, switch to height=0.
+ if (IM.empty()) {
+ IM.switchRootToLeaf();
+ this->setRoot(0);
+ return;
+ }
+ } else {
+ // Remove node ref from branch node at Level.
+ Branch &Parent = P.node<Branch>(Level);
+ if (P.size(Level) == 1) {
+ // Branch node became empty, remove it recursively.
+ IM.deleteNode(&Parent);
+ eraseNode(Level);
+ } else {
+ // Branch node won't become empty.
+ Parent.erase(P.offset(Level), P.size(Level));
+ unsigned NewSize = P.size(Level) - 1;
+ P.setSize(Level, NewSize);
+ // If we removed the last branch, update stop and move to a legal pos.
+ if (P.offset(Level) == NewSize) {
+ setNodeStop(Level, Parent.stop(NewSize - 1));
+ P.moveRight(Level);
+ }
+ }
+ }
+ // Update path cache for the new right sibling position.
+ if (P.valid()) {
+ P.reset(Level + 1);
+ P.offset(Level + 1) = 0;
+ }
+}
+
+/// overflow - Distribute entries of the current node evenly among
+/// its siblings and ensure that the current node is not full.
+/// This may require allocating a new node.
+/// @param NodeT The type of node at Level (Leaf or Branch).
+/// @param Level path index of the overflowing node.
+/// @return True when the tree height was changed.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+template <typename NodeT>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::overflow(unsigned Level) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ unsigned CurSize[4];
+ NodeT *Node[4];
+ unsigned Nodes = 0;
+ unsigned Elements = 0;
+ unsigned Offset = P.offset(Level);
+
+ // Do we have a left sibling?
+ NodeRef LeftSib = P.getLeftSibling(Level);
+ if (LeftSib) {
+ Offset += Elements = CurSize[Nodes] = LeftSib.size();
+ Node[Nodes++] = &LeftSib.get<NodeT>();
+ }
+
+ // Current node.
+ Elements += CurSize[Nodes] = P.size(Level);
+ Node[Nodes++] = &P.node<NodeT>(Level);
+
+ // Do we have a right sibling?
+ NodeRef RightSib = P.getRightSibling(Level);
+ if (RightSib) {
+ Elements += CurSize[Nodes] = RightSib.size();
+ Node[Nodes++] = &RightSib.get<NodeT>();
+ }
+
+ // Do we need to allocate a new node?
+ unsigned NewNode = 0;
+ if (Elements + 1 > Nodes * NodeT::Capacity) {
+ // Insert NewNode at the penultimate position, or after a single node.
+ NewNode = Nodes == 1 ? 1 : Nodes - 1;
+ CurSize[Nodes] = CurSize[NewNode];
+ Node[Nodes] = Node[NewNode];
+ CurSize[NewNode] = 0;
+ Node[NewNode] = this->map->newNode<NodeT>();
+ ++Nodes;
+ }
+
+ // Compute the new element distribution.
+ unsigned NewSize[4];
+ IdxPair NewOffset = distribute(Nodes, Elements, NodeT::Capacity,
+ CurSize, NewSize, Offset, true);
+ adjustSiblingSizes(Node, Nodes, CurSize, NewSize);
+
+ // Move current location to the leftmost node.
+ if (LeftSib)
+ P.moveLeft(Level);
+
+ // Elements have been rearranged, now update node sizes and stops.
+ bool SplitRoot = false;
+ unsigned Pos = 0;
+ for (;;) {
+ KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1);
+ if (NewNode && Pos == NewNode) {
+ SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop);
+ Level += SplitRoot;
+ } else {
+ P.setSize(Level, NewSize[Pos]);
+ setNodeStop(Level, Stop);
+ }
+ if (Pos + 1 == Nodes)
+ break;
+ P.moveRight(Level);
+ ++Pos;
+ }
+
+ // Where was I? Find NewOffset.
+ while(Pos != NewOffset.first) {
+ P.moveLeft(Level);
+ --Pos;
+ }
+ P.offset(Level) = NewOffset.second;
+ return SplitRoot;
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapOverlaps ----//
+//===----------------------------------------------------------------------===//
+
+/// IntervalMapOverlaps - Iterate over the overlaps of mapped intervals in two
+/// IntervalMaps. The maps may be different, but the KeyT and Traits types
+/// should be the same.
+///
+/// Typical uses:
+///
+/// 1. Test for overlap:
+/// bool overlap = IntervalMapOverlaps(a, b).valid();
+///
+/// 2. Enumerate overlaps:
+/// for (IntervalMapOverlaps I(a, b); I.valid() ; ++I) { ... }
+///
+template <typename MapA, typename MapB>
+class IntervalMapOverlaps {
+ typedef typename MapA::KeyType KeyType;
+ typedef typename MapA::KeyTraits Traits;
+ typename MapA::const_iterator posA;
+ typename MapB::const_iterator posB;
+
+ /// advance - Move posA and posB forward until reaching an overlap, or until
+ /// either meets end.
+ /// Don't move the iterators if they are already overlapping.
+ void advance() {
+ if (!valid())
+ return;
+
+ if (Traits::stopLess(posA.stop(), posB.start())) {
+ // A ends before B begins. Catch up.
+ posA.advanceTo(posB.start());
+ if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+ return;
+ } else if (Traits::stopLess(posB.stop(), posA.start())) {
+ // B ends before A begins. Catch up.
+ posB.advanceTo(posA.start());
+ if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+ return;
+ } else
+ // Already overlapping.
+ return;
+
+ for (;;) {
+ // Make a.end > b.start.
+ posA.advanceTo(posB.start());
+ if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+ return;
+ // Make b.end > a.start.
+ posB.advanceTo(posA.start());
+ if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+ return;
+ }
+ }
+
+public:
+ /// IntervalMapOverlaps - Create an iterator for the overlaps of a and b.
+ IntervalMapOverlaps(const MapA &a, const MapB &b)
+ : posA(b.empty() ? a.end() : a.find(b.start())),
+ posB(posA.valid() ? b.find(posA.start()) : b.end()) { advance(); }
+
+ /// valid - Return true if iterator is at an overlap.
+ bool valid() const {
+ return posA.valid() && posB.valid();
+ }
+
+ /// a - access the left hand side in the overlap.
+ const typename MapA::const_iterator &a() const { return posA; }
+
+ /// b - access the right hand side in the overlap.
+ const typename MapB::const_iterator &b() const { return posB; }
+
+ /// start - Beginning of the overlapping interval.
+ KeyType start() const {
+ KeyType ak = a().start();
+ KeyType bk = b().start();
+ return Traits::startLess(ak, bk) ? bk : ak;
+ }
+
+ /// stop - End of the overlapping interval.
+ KeyType stop() const {
+ KeyType ak = a().stop();
+ KeyType bk = b().stop();
+ return Traits::startLess(ak, bk) ? ak : bk;
+ }
+
+ /// skipA - Move to the next overlap that doesn't involve a().
+ void skipA() {
+ ++posA;
+ advance();
+ }
+
+ /// skipB - Move to the next overlap that doesn't involve b().
+ void skipB() {
+ ++posB;
+ advance();
+ }
+
+ /// Preincrement - Move to the next overlap.
+ IntervalMapOverlaps &operator++() {
+ // Bump the iterator that ends first. The other one may have more overlaps.
+ if (Traits::startLess(posB.stop(), posA.stop()))
+ skipB();
+ else
+ skipA();
+ return *this;
+ }
+
+ /// advanceTo - Move to the first overlapping interval with
+ /// stopLess(x, stop()).
+ void advanceTo(KeyType x) {
+ if (!valid())
+ return;
+ // Make sure advanceTo sees monotonic keys.
+ if (Traits::stopLess(posA.stop(), x))
+ posA.advanceTo(x);
+ if (Traits::stopLess(posB.stop(), x))
+ posB.advanceTo(x);
+ advance();
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/Optional.h b/contrib/llvm/include/llvm/ADT/Optional.h
index 34e54a0..ee8b69f 100644
--- a/contrib/llvm/include/llvm/ADT/Optional.h
+++ b/contrib/llvm/include/llvm/ADT/Optional.h
@@ -61,6 +61,60 @@ template <typename T>
struct simplify_type<Optional<T> >
: public simplify_type<const Optional<T> > {};
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator==(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator!=(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator<(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator<=(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator>=(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator>(const Optional<T> &X, const Optional<U> &Y);
+
} // end llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/ADT/PointerIntPair.h b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
index 64f4a7c..85dbba2 100644
--- a/contrib/llvm/include/llvm/ADT/PointerIntPair.h
+++ b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
@@ -91,6 +91,13 @@ public:
Value |= IntVal << IntShift; // Set new integer.
}
+ PointerTy const *getAddrOfPointer() const {
+ assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
+ "Can only return the address if IntBits is cleared and "
+ "PtrTraits doesn't change the pointer");
+ return reinterpret_cast<PointerTy const *>(&Value);
+ }
+
void *getOpaqueValue() const { return reinterpret_cast<void*>(Value); }
void setFromOpaqueValue(void *Val) { Value = reinterpret_cast<intptr_t>(Val);}
diff --git a/contrib/llvm/include/llvm/ADT/PointerUnion.h b/contrib/llvm/include/llvm/ADT/PointerUnion.h
index 3a514b5..61de042 100644
--- a/contrib/llvm/include/llvm/ADT/PointerUnion.h
+++ b/contrib/llvm/include/llvm/ADT/PointerUnion.h
@@ -107,6 +107,18 @@ namespace llvm {
if (is<T>()) return get<T>();
return T();
}
+
+ /// \brief If the union is set to the first pointer type we can get an
+ /// address pointing to it.
+ template <typename T>
+ PT1 const *getAddrOf() const {
+ assert(is<PT1>() && "Val is not the first pointer");
+ assert(get<PT1>() == Val.getPointer() &&
+ "Can't get the address because PointerLikeTypeTraits changes the ptr");
+ T const *can_only_get_address_of_first_pointer_type
+ = reinterpret_cast<PT1 const *>(Val.getAddrOfPointer());
+ return can_only_get_address_of_first_pointer_type;
+ }
/// Assignment operators - Allow assigning into this union from either
/// pointer type, setting the discriminator to remember what it came from.
diff --git a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
index 47e5b2b..e3b4994 100644
--- a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
+++ b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
@@ -56,8 +56,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
void traverseChild() {
while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
NodeType *BB = *VisitStack.back().second++;
- if (!this->Visited.count(BB)) { // If the block is not visited...
- this->Visited.insert(BB);
+ if (this->Visited.insert(BB)) { // If the block is not visited...
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
@@ -72,8 +71,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
inline po_iterator(NodeType *BB, SetType &S) :
po_iterator_storage<SetType, ExtStorage>(S) {
- if(!S.count(BB)) {
- this->Visited.insert(BB);
+ if (this->Visited.insert(BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
diff --git a/contrib/llvm/include/llvm/ADT/SCCIterator.h b/contrib/llvm/include/llvm/ADT/SCCIterator.h
index c49d599..3e93cfe 100644
--- a/contrib/llvm/include/llvm/ADT/SCCIterator.h
+++ b/contrib/llvm/include/llvm/ADT/SCCIterator.h
@@ -60,7 +60,7 @@ class scc_iterator
// First element is basic block pointer, second is the 'next child' to visit
std::vector<std::pair<NodeType *, ChildItTy> > VisitStack;
- // MinVistNumStack - Stack holding the "min" values for each node in the DFS.
+ // MinVisitNumStack - Stack holding the "min" values for each node in the DFS.
// This is used to track the minimum uplink values for all children of
// the corresponding node on the VisitStack.
std::vector<unsigned> MinVisitNumStack;
diff --git a/contrib/llvm/include/llvm/ADT/ScopedHashTable.h b/contrib/llvm/include/llvm/ADT/ScopedHashTable.h
index c96ad19..af3c482 100644
--- a/contrib/llvm/include/llvm/ADT/ScopedHashTable.h
+++ b/contrib/llvm/include/llvm/ADT/ScopedHashTable.h
@@ -31,25 +31,23 @@
#ifndef LLVM_ADT_SCOPEDHASHTABLE_H
#define LLVM_ADT_SCOPEDHASHTABLE_H
-#include <cassert>
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Allocator.h"
namespace llvm {
-template <typename K, typename V, typename KInfo = DenseMapInfo<K> >
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+ typename AllocatorTy = MallocAllocator>
class ScopedHashTable;
-template <typename K, typename V, typename KInfo = DenseMapInfo<K> >
+template <typename K, typename V>
class ScopedHashTableVal {
ScopedHashTableVal *NextInScope;
ScopedHashTableVal *NextForKey;
K Key;
V Val;
+ ScopedHashTableVal(const K &key, const V &val) : Key(key), Val(val) {}
public:
- ScopedHashTableVal(ScopedHashTableVal *nextInScope,
- ScopedHashTableVal *nextForKey, const K &key, const V &val)
- : NextInScope(nextInScope), NextForKey(nextForKey), Key(key), Val(val) {
- }
const K &getKey() const { return Key; }
const V &getValue() const { return Val; }
@@ -57,33 +55,53 @@ public:
ScopedHashTableVal *getNextForKey() { return NextForKey; }
const ScopedHashTableVal *getNextForKey() const { return NextForKey; }
-public:
ScopedHashTableVal *getNextInScope() { return NextInScope; }
+
+ template <typename AllocatorTy>
+ static ScopedHashTableVal *Create(ScopedHashTableVal *nextInScope,
+ ScopedHashTableVal *nextForKey,
+ const K &key, const V &val,
+ AllocatorTy &Allocator) {
+ ScopedHashTableVal *New = Allocator.template Allocate<ScopedHashTableVal>();
+ // Set up the value.
+ new (New) ScopedHashTableVal(key, val);
+ New->NextInScope = nextInScope;
+ New->NextForKey = nextForKey;
+ return New;
+ }
+
+ template <typename AllocatorTy>
+ void Destroy(AllocatorTy &Allocator) {
+ // Free memory referenced by the item.
+ this->~ScopedHashTableVal();
+ Allocator.Deallocate(this);
+ }
};
-template <typename K, typename V, typename KInfo = DenseMapInfo<K> >
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+ typename AllocatorTy = MallocAllocator>
class ScopedHashTableScope {
/// HT - The hashtable that we are active for.
- ScopedHashTable<K, V, KInfo> &HT;
+ ScopedHashTable<K, V, KInfo, AllocatorTy> &HT;
/// PrevScope - This is the scope that we are shadowing in HT.
ScopedHashTableScope *PrevScope;
/// LastValInScope - This is the last value that was inserted for this scope
/// or null if none have been inserted yet.
- ScopedHashTableVal<K, V, KInfo> *LastValInScope;
+ ScopedHashTableVal<K, V> *LastValInScope;
void operator=(ScopedHashTableScope&); // DO NOT IMPLEMENT
ScopedHashTableScope(ScopedHashTableScope&); // DO NOT IMPLEMENT
public:
- ScopedHashTableScope(ScopedHashTable<K, V, KInfo> &HT);
+ ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
~ScopedHashTableScope();
private:
- friend class ScopedHashTable<K, V, KInfo>;
- ScopedHashTableVal<K, V, KInfo> *getLastValInScope() {
+ friend class ScopedHashTable<K, V, KInfo, AllocatorTy>;
+ ScopedHashTableVal<K, V> *getLastValInScope() {
return LastValInScope;
}
- void setLastValInScope(ScopedHashTableVal<K, V, KInfo> *Val) {
+ void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
LastValInScope = Val;
}
};
@@ -91,9 +109,9 @@ private:
template <typename K, typename V, typename KInfo = DenseMapInfo<K> >
class ScopedHashTableIterator {
- ScopedHashTableVal<K, V, KInfo> *Node;
+ ScopedHashTableVal<K, V> *Node;
public:
- ScopedHashTableIterator(ScopedHashTableVal<K, V, KInfo> *node) : Node(node) {}
+ ScopedHashTableIterator(ScopedHashTableVal<K, V> *node) : Node(node) {}
V &operator*() const {
assert(Node && "Dereference end()");
@@ -121,26 +139,40 @@ public:
};
-template <typename K, typename V, typename KInfo>
+template <typename K, typename V, typename KInfo, typename AllocatorTy>
class ScopedHashTable {
- DenseMap<K, ScopedHashTableVal<K, V, KInfo>*, KInfo> TopLevelMap;
- ScopedHashTableScope<K, V, KInfo> *CurScope;
+ typedef ScopedHashTableVal<K, V> ValTy;
+ DenseMap<K, ValTy*, KInfo> TopLevelMap;
+ ScopedHashTableScope<K, V, KInfo, AllocatorTy> *CurScope;
+
+ AllocatorTy Allocator;
+
ScopedHashTable(const ScopedHashTable&); // NOT YET IMPLEMENTED
void operator=(const ScopedHashTable&); // NOT YET IMPLEMENTED
- friend class ScopedHashTableScope<K, V, KInfo>;
+ friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
public:
ScopedHashTable() : CurScope(0) {}
+ ScopedHashTable(AllocatorTy A) : CurScope(0), Allocator(A) {}
~ScopedHashTable() {
assert(CurScope == 0 && TopLevelMap.empty() && "Scope imbalance!");
}
+
+ /// ScopeTy - This is a helpful typedef that allows clients to get easy access
+ /// to the name of the scope for this hash table.
+ typedef ScopedHashTableScope<K, V, KInfo, AllocatorTy> ScopeTy;
+
+ /// Access to the allocator.
+ typedef typename ReferenceAdder<AllocatorTy>::result AllocatorRefTy;
+ typedef typename ReferenceAdder<const AllocatorTy>::result AllocatorCRefTy;
+ AllocatorRefTy getAllocator() { return Allocator; }
+ AllocatorCRefTy getAllocator() const { return Allocator; }
bool count(const K &Key) const {
return TopLevelMap.count(Key);
}
V lookup(const K &Key) {
- typename DenseMap<K, ScopedHashTableVal<K, V, KInfo>*, KInfo>::iterator
- I = TopLevelMap.find(Key);
+ typename DenseMap<K, ValTy*, KInfo>::iterator I = TopLevelMap.find(Key);
if (I != TopLevelMap.end())
return I->second->getValue();
@@ -150,10 +182,10 @@ public:
void insert(const K &Key, const V &Val) {
assert(CurScope && "No scope active!");
- ScopedHashTableVal<K, V, KInfo> *&KeyEntry = TopLevelMap[Key];
+ ScopedHashTableVal<K, V> *&KeyEntry = TopLevelMap[Key];
- KeyEntry= new ScopedHashTableVal<K, V, KInfo>(CurScope->getLastValInScope(),
- KeyEntry, Key, Val);
+ KeyEntry = ValTy::Create(CurScope->getLastValInScope(), KeyEntry, Key, Val,
+ Allocator);
CurScope->setLastValInScope(KeyEntry);
}
@@ -162,7 +194,7 @@ public:
iterator end() { return iterator(0); }
iterator begin(const K &Key) {
- typename DenseMap<K, ScopedHashTableVal<K, V, KInfo>*, KInfo>::iterator I =
+ typename DenseMap<K, ValTy*, KInfo>::iterator I =
TopLevelMap.find(Key);
if (I == TopLevelMap.end()) return end();
return iterator(I->second);
@@ -171,29 +203,28 @@ public:
/// ScopedHashTableScope ctor - Install this as the current scope for the hash
/// table.
-template <typename K, typename V, typename KInfo>
-ScopedHashTableScope<K, V, KInfo>::
- ScopedHashTableScope(ScopedHashTable<K, V, KInfo> &ht) : HT(ht) {
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::
+ ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
PrevScope = HT.CurScope;
HT.CurScope = this;
LastValInScope = 0;
}
-template <typename K, typename V, typename KInfo>
-ScopedHashTableScope<K, V, KInfo>::~ScopedHashTableScope() {
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
assert(HT.CurScope == this && "Scope imbalance!");
HT.CurScope = PrevScope;
// Pop and delete all values corresponding to this scope.
- while (ScopedHashTableVal<K, V, KInfo> *ThisEntry = LastValInScope) {
+ while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
// Pop this value out of the TopLevelMap.
if (ThisEntry->getNextForKey() == 0) {
assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
"Scope imbalance!");
HT.TopLevelMap.erase(ThisEntry->getKey());
} else {
- ScopedHashTableVal<K, V, KInfo> *&KeyEntry =
- HT.TopLevelMap[ThisEntry->getKey()];
+ ScopedHashTableVal<K, V> *&KeyEntry = HT.TopLevelMap[ThisEntry->getKey()];
assert(KeyEntry == ThisEntry && "Scope imbalance!");
KeyEntry = ThisEntry->getNextForKey();
}
@@ -202,7 +233,7 @@ ScopedHashTableScope<K, V, KInfo>::~ScopedHashTableScope() {
LastValInScope = ThisEntry->getNextInScope();
// Delete this entry.
- delete ThisEntry;
+ ThisEntry->Destroy(HT.getAllocator());
}
}
diff --git a/contrib/llvm/include/llvm/ADT/SetVector.h b/contrib/llvm/include/llvm/ADT/SetVector.h
index bf8286c..abe2067 100644
--- a/contrib/llvm/include/llvm/ADT/SetVector.h
+++ b/contrib/llvm/include/llvm/ADT/SetVector.h
@@ -114,13 +114,15 @@ public:
}
/// @brief Remove an item from the set vector.
- void remove(const value_type& X) {
+ bool remove(const value_type& X) {
if (set_.erase(X)) {
typename vector_type::iterator I =
std::find(vector_.begin(), vector_.end(), X);
assert(I != vector_.end() && "Corrupted SetVector instances!");
vector_.erase(I);
+ return true;
}
+ return false;
}
diff --git a/contrib/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
index 3441d0a..b15b3ee 100644
--- a/contrib/llvm/include/llvm/ADT/SmallBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
@@ -187,6 +187,13 @@ public:
return getPointer()->any();
}
+ /// all - Returns true if all bits are set.
+ bool all() const {
+ if (isSmall())
+ return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1;
+ return getPointer()->all();
+ }
+
/// none - Returns true if none of the bits are set.
bool none() const {
if (isSmall())
diff --git a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
index 424bdba..ff32ba8 100644
--- a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
+++ b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -16,9 +16,10 @@
#define LLVM_ADT_SMALLPTRSET_H
#include <cassert>
+#include <cstddef>
#include <cstring>
#include <iterator>
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
namespace llvm {
@@ -56,7 +57,7 @@ protected:
/// it, so that the end iterator actually points to valid memory.
unsigned CurArraySize;
- // If small, this is # elts allocated consequtively
+ // If small, this is # elts allocated consecutively
unsigned NumElements;
unsigned NumTombstones;
diff --git a/contrib/llvm/include/llvm/ADT/SmallString.h b/contrib/llvm/include/llvm/ADT/SmallString.h
index 05bd8a4..da26416 100644
--- a/contrib/llvm/include/llvm/ADT/SmallString.h
+++ b/contrib/llvm/include/llvm/ADT/SmallString.h
@@ -27,6 +27,9 @@ public:
// Default ctor - Initialize to empty.
SmallString() {}
+ // Initialize from a StringRef.
+ SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
+
// Initialize with a range.
template<typename ItTy>
SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
@@ -38,15 +41,16 @@ public:
// Extra methods.
StringRef str() const { return StringRef(this->begin(), this->size()); }
- // Implicit conversion to StringRef.
- operator StringRef() const { return str(); }
-
- const char *c_str() {
+ // TODO: Make this const, if it's safe...
+ const char* c_str() {
this->push_back(0);
this->pop_back();
return this->data();
}
+ // Implicit conversion to StringRef.
+ operator StringRef() const { return str(); }
+
// Extra operators.
const SmallString &operator=(StringRef RHS) {
this->clear();
diff --git a/contrib/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm/include/llvm/ADT/SmallVector.h
index fec6bcd..8b0a13d 100644
--- a/contrib/llvm/include/llvm/ADT/SmallVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallVector.h
@@ -20,6 +20,7 @@
#include <cstddef>
#include <cstdlib>
#include <cstring>
+#include <iterator>
#include <memory>
#ifdef _MSC_VER
@@ -57,19 +58,13 @@ protected:
// Allocate raw space for N elements of type T. If T has a ctor or dtor, we
// don't want it to be automatically run, so we need to represent the space as
// something else. An array of char would work great, but might not be
- // aligned sufficiently. Instead, we either use GCC extensions, or some
- // number of union instances for the space, which guarantee maximal alignment.
- struct U {
-#ifdef __GNUC__
- char X __attribute__((aligned));
-#else
- union {
- double D;
- long double LD;
- long long L;
- void *P;
- } X;
-#endif
+ // aligned sufficiently. Instead we use some number of union instances for
+ // the space, which guarantee maximal alignment.
+ union U {
+ double D;
+ long double LD;
+ long long L;
+ void *P;
} FirstEl;
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
@@ -94,7 +89,7 @@ protected:
}
/// grow_pod - This is an implementation of the grow() method which only works
- /// on POD-like datatypes and is out of line to reduce code duplication.
+ /// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(size_t MinSizeInBytes, size_t TSize);
public:
@@ -269,7 +264,7 @@ public:
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
-
+
SmallVectorImpl(const SmallVectorImpl&); // DISABLED.
public:
typedef typename SuperClass::iterator iterator;
@@ -346,7 +341,6 @@ public:
return Result;
}
-
void swap(SmallVectorImpl &RHS);
/// append - Add the specified range to the end of the SmallVector.
diff --git a/contrib/llvm/include/llvm/ADT/SparseBitVector.h b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
index 0862981..d977136 100644
--- a/contrib/llvm/include/llvm/ADT/SparseBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
@@ -17,7 +17,7 @@
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
diff --git a/contrib/llvm/include/llvm/ADT/Statistic.h b/contrib/llvm/include/llvm/ADT/Statistic.h
index 3a1319f..f137ea2 100644
--- a/contrib/llvm/include/llvm/ADT/Statistic.h
+++ b/contrib/llvm/include/llvm/ADT/Statistic.h
@@ -26,7 +26,7 @@
#ifndef LLVM_ADT_STATISTIC_H
#define LLVM_ADT_STATISTIC_H
-#include "llvm/System/Atomic.h"
+#include "llvm/Support/Atomic.h"
namespace llvm {
class raw_ostream;
diff --git a/contrib/llvm/include/llvm/ADT/StringExtras.h b/contrib/llvm/include/llvm/ADT/StringExtras.h
index 3c53ade..acbed66 100644
--- a/contrib/llvm/include/llvm/ADT/StringExtras.h
+++ b/contrib/llvm/include/llvm/ADT/StringExtras.h
@@ -14,7 +14,7 @@
#ifndef LLVM_ADT_STRINGEXTRAS_H
#define LLVM_ADT_STRINGEXTRAS_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/StringRef.h"
#include <cctype>
@@ -25,10 +25,11 @@
namespace llvm {
template<typename T> class SmallVectorImpl;
-/// hexdigit - Return the (uppercase) hexadecimal character for the
+/// hexdigit - Return the hexadecimal character for the
/// given number \arg X (which should be less than 16).
-static inline char hexdigit(unsigned X) {
- return X < 10 ? '0' + X : 'A' + X - 10;
+static inline char hexdigit(unsigned X, bool LowerCase = false) {
+ const char HexChar = LowerCase ? 'a' : 'A';
+ return X < 10 ? '0' + X : HexChar + X - 10;
}
/// utohex_buffer - Emit the specified number into the buffer specified by
diff --git a/contrib/llvm/include/llvm/ADT/StringMap.h b/contrib/llvm/include/llvm/ADT/StringMap.h
index 59ff6aa..bad0e6f 100644
--- a/contrib/llvm/include/llvm/ADT/StringMap.h
+++ b/contrib/llvm/include/llvm/ADT/StringMap.h
@@ -137,8 +137,8 @@ public:
StringMapEntry(unsigned strLen, const ValueTy &V)
: StringMapEntryBase(strLen), second(V) {}
- StringRef getKey() const {
- return StringRef(getKeyData(), getKeyLength());
+ StringRef getKey() const {
+ return StringRef(getKeyData(), getKeyLength());
}
const ValueTy &getValue() const { return second; }
@@ -167,7 +167,7 @@ public:
unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry))+
KeyLength+1;
- unsigned Alignment = alignof<StringMapEntry>();
+ unsigned Alignment = alignOf<StringMapEntry>();
StringMapEntry *NewItem =
static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
@@ -216,14 +216,14 @@ public:
static const StringMapEntry &GetStringMapEntryFromValue(const ValueTy &V) {
return GetStringMapEntryFromValue(const_cast<ValueTy&>(V));
}
-
+
/// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
/// into a StringMapEntry, return the StringMapEntry itself.
static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) {
char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>);
return *reinterpret_cast<StringMapEntry*>(Ptr);
}
-
+
/// Destroy - Destroy this StringMapEntry, releasing memory back to the
/// specified allocator.
@@ -254,7 +254,7 @@ public:
StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(unsigned InitialSize)
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
-
+
explicit StringMap(AllocatorTy A)
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
@@ -262,16 +262,19 @@ public:
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {
assert(RHS.empty() &&
"Copy ctor from non-empty stringmap not implemented yet!");
+ (void)RHS;
}
void operator=(const StringMap &RHS) {
assert(RHS.empty() &&
"assignment from non-empty stringmap not implemented yet!");
+ (void)RHS;
clear();
}
-
- AllocatorTy &getAllocator() { return Allocator; }
- const AllocatorTy &getAllocator() const { return Allocator; }
+ typedef typename ReferenceAdder<AllocatorTy>::result AllocatorRefTy;
+ typedef typename ReferenceAdder<const AllocatorTy>::result AllocatorCRefTy;
+ AllocatorRefTy getAllocator() { return Allocator; }
+ AllocatorCRefTy getAllocator() const { return Allocator; }
typedef const char* key_type;
typedef ValueTy mapped_type;
diff --git a/contrib/llvm/include/llvm/ADT/StringRef.h b/contrib/llvm/include/llvm/ADT/StringRef.h
index 8386d3e..1766d2b 100644
--- a/contrib/llvm/include/llvm/ADT/StringRef.h
+++ b/contrib/llvm/include/llvm/ADT/StringRef.h
@@ -132,7 +132,7 @@ namespace llvm {
/// numbers.
int compare_numeric(StringRef RHS) const;
- /// \brief Determine the edit distance between this string and another
+ /// \brief Determine the edit distance between this string and another
/// string.
///
/// \param Other the string to compare this string against.
@@ -142,11 +142,16 @@ namespace llvm {
/// operation, rather than as two operations (an insertion and a
/// removal).
///
+ /// \param MaxEditDistance If non-zero, the maximum edit distance that
+ /// this routine is allowed to compute. If the edit distance will exceed
+ /// that maximum, returns \c MaxEditDistance+1.
+ ///
/// \returns the minimum number of character insertions, removals,
/// or (if \p AllowReplacements is \c true) replacements needed to
/// transform one of the given strings into the other. If zero,
/// the strings are identical.
- unsigned edit_distance(StringRef Other, bool AllowReplacements = true);
+ unsigned edit_distance(StringRef Other, bool AllowReplacements = true,
+ unsigned MaxEditDistance = 0);
/// str - Get the contents as an std::string.
std::string str() const {
@@ -251,6 +256,18 @@ namespace llvm {
/// Note: O(size() + Chars.size())
size_type find_first_not_of(StringRef Chars, size_t From = 0) const;
+ /// find_last_of - Find the last character in the string that is \arg C, or
+ /// npos if not found.
+ size_type find_last_of(char C, size_t From = npos) const {
+ return rfind(C, From);
+ }
+
+ /// find_last_of - Find the last character in the string that is in \arg C,
+ /// or npos if not found.
+ ///
+ /// Note: O(size() + Chars.size())
+ size_type find_last_of(StringRef Chars, size_t From = npos) const;
+
/// @}
/// @name Helpful Algorithms
/// @{
@@ -432,6 +449,10 @@ namespace llvm {
/// @}
+ // StringRefs can be treated like a POD type.
+ template <typename T> struct isPodLike;
+ template <> struct isPodLike<StringRef> { static const bool value = true; };
+
}
#endif
diff --git a/contrib/llvm/include/llvm/ADT/Triple.h b/contrib/llvm/include/llvm/ADT/Triple.h
index 8dca3c1..e6dcc23 100644
--- a/contrib/llvm/include/llvm/ADT/Triple.h
+++ b/contrib/llvm/include/llvm/ADT/Triple.h
@@ -45,7 +45,7 @@ class Triple {
public:
enum ArchType {
UnknownArch,
-
+
alpha, // Alpha: alpha
arm, // ARM; arm, armv.*, xscale
bfin, // Blackfin: bfin
@@ -53,7 +53,6 @@ public:
mips, // MIPS: mips, mipsallegrex
mipsel, // MIPSEL: mipsel, mipsallegrexel, psp
msp430, // MSP430: msp430
- pic16, // PIC16: pic16
ppc, // PPC: powerpc
ppc64, // PPC64: powerpc64, ppu
sparc, // Sparc: sparc
@@ -65,13 +64,14 @@ public:
x86_64, // X86-64: amd64, x86_64
xcore, // XCore: xcore
mblaze, // MBlaze: mblaze
+ ptx, // PTX: ptx
InvalidArch
};
enum VendorType {
UnknownVendor,
- Apple,
+ Apple,
PC
};
enum OSType {
@@ -84,8 +84,7 @@ public:
FreeBSD,
Linux,
Lv2, // PS3
- MinGW32,
- MinGW64,
+ MinGW32, // i*86-pc-mingw32, *-w64-mingw32
NetBSD,
OpenBSD,
Psp,
@@ -94,7 +93,15 @@ public:
Haiku,
Minix
};
-
+ enum EnvironmentType {
+ UnknownEnvironment,
+
+ GNU,
+ GNUEABI,
+ EABI,
+ MachO
+ };
+
private:
std::string Data;
@@ -107,16 +114,20 @@ private:
/// The parsed OS type.
mutable OSType OS;
+ /// The parsed Environment type.
+ mutable EnvironmentType Environment;
+
bool isInitialized() const { return Arch != InvalidArch; }
static ArchType ParseArch(StringRef ArchName);
static VendorType ParseVendor(StringRef VendorName);
static OSType ParseOS(StringRef OSName);
+ static EnvironmentType ParseEnvironment(StringRef EnvironmentName);
void Parse() const;
public:
/// @name Constructors
/// @{
-
+
Triple() : Data(), Arch(InvalidArch) {}
explicit Triple(StringRef Str) : Data(Str), Arch(InvalidArch) {}
explicit Triple(StringRef ArchStr, StringRef VendorStr, StringRef OSStr)
@@ -127,6 +138,17 @@ public:
Data += OSStr;
}
+ explicit Triple(StringRef ArchStr, StringRef VendorStr, StringRef OSStr,
+ StringRef EnvironmentStr)
+ : Data(ArchStr), Arch(InvalidArch) {
+ Data += '-';
+ Data += VendorStr;
+ Data += '-';
+ Data += OSStr;
+ Data += '-';
+ Data += EnvironmentStr;
+ }
+
/// @}
/// @name Normalization
/// @{
@@ -140,22 +162,22 @@ public:
/// @}
/// @name Typed Component Access
/// @{
-
+
/// getArch - Get the parsed architecture type of this triple.
- ArchType getArch() const {
- if (!isInitialized()) Parse();
+ ArchType getArch() const {
+ if (!isInitialized()) Parse();
return Arch;
}
-
+
/// getVendor - Get the parsed vendor type of this triple.
- VendorType getVendor() const {
- if (!isInitialized()) Parse();
+ VendorType getVendor() const {
+ if (!isInitialized()) Parse();
return Vendor;
}
-
+
/// getOS - Get the parsed operating system type of this triple.
- OSType getOS() const {
- if (!isInitialized()) Parse();
+ OSType getOS() const {
+ if (!isInitialized()) Parse();
return OS;
}
@@ -165,6 +187,12 @@ public:
return getEnvironmentName() != "";
}
+ /// getEnvironment - Get the parsed environment type of this triple.
+ EnvironmentType getEnvironment() const {
+ if (!isInitialized()) Parse();
+ return Environment;
+ }
+
/// @}
/// @name Direct Component Access
/// @{
@@ -193,13 +221,13 @@ public:
/// if the environment component is present).
StringRef getOSAndEnvironmentName() const;
-
+
/// getDarwinNumber - Parse the 'darwin number' out of the specific target
/// triple. For example, if we have darwin8.5 return 8,5,0. If any entry is
/// not defined, return 0's. This requires that the triple have an OSType of
/// darwin before it is called.
void getDarwinNumber(unsigned &Maj, unsigned &Min, unsigned &Revision) const;
-
+
/// getDarwinMajorNumber - Return just the major version number, this is
/// specialized because it is a common query.
unsigned getDarwinMajorNumber() const {
@@ -207,7 +235,7 @@ public:
getDarwinNumber(Maj, Min, Rev);
return Maj;
}
-
+
/// @}
/// @name Mutators
/// @{
@@ -224,6 +252,10 @@ public:
/// to a known type.
void setOS(OSType Kind);
+ /// setEnvironment - Set the environment (fourth) component of the triple
+ /// to a known type.
+ void setEnvironment(EnvironmentType Kind);
+
/// setTriple - Set all components to the new triple \arg Str.
void setTriple(const Twine &Str);
@@ -271,9 +303,14 @@ public:
/// vendor.
static const char *getVendorTypeName(VendorType Kind);
- /// getOSTypeName - Get the canonical name for the \arg Kind vendor.
+ /// getOSTypeName - Get the canonical name for the \arg Kind operating
+ /// system.
static const char *getOSTypeName(OSType Kind);
+ /// getEnvironmentTypeName - Get the canonical name for the \arg Kind
+ /// environment.
+ static const char *getEnvironmentTypeName(EnvironmentType Kind);
+
/// @}
/// @name Static helpers for converting alternate architecture names.
/// @{
diff --git a/contrib/llvm/include/llvm/ADT/Twine.h b/contrib/llvm/include/llvm/ADT/Twine.h
index b519a3e..ab8d365 100644
--- a/contrib/llvm/include/llvm/ADT/Twine.h
+++ b/contrib/llvm/include/llvm/ADT/Twine.h
@@ -11,7 +11,7 @@
#define LLVM_ADT_TWINE_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <string>
@@ -42,7 +42,7 @@ namespace llvm {
/// Twines support a special 'null' value, which always concatenates to form
/// itself, and renders as an empty string. This can be returned from APIs to
/// effectively nullify any concatenations performed on the result.
- ///
+ ///
/// \b Implementation \n
///
/// Given the nature of a Twine, it is not possible for the Twine's
@@ -99,7 +99,7 @@ namespace llvm {
/// A pointer to a StringRef instance.
StringRefKind,
- /// An unsigned int value reinterpreted as a pointer, to render as an
+ /// An unsigned int value reinterpreted as a pointer, to render as an
/// unsigned decimal integer.
DecUIKind,
@@ -260,32 +260,32 @@ namespace llvm {
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
- explicit Twine(unsigned Val)
+ explicit Twine(unsigned Val)
: LHS((void*)(intptr_t)Val), LHSKind(DecUIKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as a signed decimal integer.
- explicit Twine(int Val)
+ explicit Twine(int Val)
: LHS((void*)(intptr_t)Val), LHSKind(DecIKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
- explicit Twine(const unsigned long &Val)
+ explicit Twine(const unsigned long &Val)
: LHS(&Val), LHSKind(DecULKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as a signed decimal integer.
- explicit Twine(const long &Val)
+ explicit Twine(const long &Val)
: LHS(&Val), LHSKind(DecLKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
- explicit Twine(const unsigned long long &Val)
+ explicit Twine(const unsigned long long &Val)
: LHS(&Val), LHSKind(DecULLKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as a signed decimal integer.
- explicit Twine(const long long &Val)
+ explicit Twine(const long long &Val)
: LHS(&Val), LHSKind(DecLLKind), RHSKind(EmptyKind) {
}
@@ -330,12 +330,12 @@ namespace llvm {
bool isTriviallyEmpty() const {
return isNullary();
}
-
+
/// isSingleStringRef - Return true if this twine can be dynamically
/// accessed as a single StringRef value with getSingleStringRef().
bool isSingleStringRef() const {
if (getRHSKind() != EmptyKind) return false;
-
+
switch (getLHSKind()) {
case EmptyKind:
case CStringKind:
@@ -382,6 +382,14 @@ namespace llvm {
/// SmallVector and a StringRef to the SmallVector's data is returned.
StringRef toStringRef(SmallVectorImpl<char> &Out) const;
+ /// toNullTerminatedStringRef - This returns the twine as a single null
+ /// terminated StringRef if it can be represented as such. Otherwise the
+ /// twine is written into the given SmallVector and a StringRef to the
+ /// SmallVector's data is returned.
+ ///
+ /// The returned StringRef's size does not include the null terminator.
+ StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
+
/// print - Write the concatenated string represented by this twine to the
/// stream \arg OS.
void print(raw_ostream &OS) const;
diff --git a/contrib/llvm/include/llvm/ADT/ValueMap.h b/contrib/llvm/include/llvm/ADT/ValueMap.h
index ded17fc..d1f4e5a 100644
--- a/contrib/llvm/include/llvm/ADT/ValueMap.h
+++ b/contrib/llvm/include/llvm/ADT/ValueMap.h
@@ -29,7 +29,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/type_traits.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include <iterator>
diff --git a/contrib/llvm/include/llvm/ADT/ilist.h b/contrib/llvm/include/llvm/ADT/ilist.h
index 4e3afe1..865fcb3 100644
--- a/contrib/llvm/include/llvm/ADT/ilist.h
+++ b/contrib/llvm/include/llvm/ADT/ilist.h
@@ -38,6 +38,7 @@
#ifndef LLVM_ADT_ILIST_H
#define LLVM_ADT_ILIST_H
+#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
diff --git a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
index ad68d48..71a5982 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -16,11 +16,21 @@
// which automatically provides functionality for the entire suite of client
// APIs.
//
-// This API represents memory as a (Pointer, Size) pair. The Pointer component
-// specifies the base memory address of the region, the Size specifies how large
-// of an area is being queried, or UnknownSize if the size is not known.
-// Pointers that point to two completely different objects in memory never
-// alias, regardless of the value of the Size component.
+// This API identifies memory regions with the Location class. The pointer
+// component specifies the base memory address of the region. The Size specifies
+// the maximum size (in address units) of the memory region, or UnknownSize if
+// the size is not known. The TBAA tag identifies the "type" of the memory
+// reference; see the TypeBasedAliasAnalysis class for details.
+//
+// Some non-obvious details include:
+// - Pointers that point to two completely different objects in memory never
+// alias, regardless of the value of the Size component.
+// - NoAlias doesn't imply inequal pointers. The most obvious example of this
+// is two pointers to constant memory. Even if they are equal, constant
+// memory is never stored to, so there will never be any dependencies.
+// In this and other situations, the pointers may be both NoAlias and
+// MustAlias at the same time. The current API can only return one result,
+// though this is rarely a problem in practice.
//
//===----------------------------------------------------------------------===//
@@ -28,7 +38,6 @@
#define LLVM_ANALYSIS_ALIAS_ANALYSIS_H
#include "llvm/Support/CallSite.h"
-#include "llvm/System/IncludeFile.h"
#include <vector>
namespace llvm {
@@ -39,6 +48,8 @@ class VAArgInst;
class TargetData;
class Pass;
class AnalysisUsage;
+class MemTransferInst;
+class MemIntrinsic;
class AliasAnalysis {
protected:
@@ -67,7 +78,7 @@ public:
/// UnknownSize - This is a special value which can be used with the
/// size arguments in alias queries to indicate that the caller does not
/// know the sizes of the potential memory references.
- static unsigned const UnknownSize = ~0u;
+ static uint64_t const UnknownSize = ~UINT64_C(0);
/// getTargetData - Return a pointer to the current TargetData object, or
/// null if no TargetData object is available.
@@ -77,12 +88,57 @@ public:
/// getTypeStoreSize - Return the TargetData store size for the given type,
/// if known, or a conservative value otherwise.
///
- unsigned getTypeStoreSize(const Type *Ty);
+ uint64_t getTypeStoreSize(const Type *Ty);
//===--------------------------------------------------------------------===//
/// Alias Queries...
///
+ /// Location - A description of a memory location.
+ struct Location {
+ /// Ptr - The address of the start of the location.
+ const Value *Ptr;
+ /// Size - The maximum size of the location, in address-units, or
+ /// UnknownSize if the size is not known. Note that an unknown size does
+ /// not mean the pointer aliases the entire virtual address space, because
+ /// there are restrictions on stepping out of one object and into another.
+ /// See http://llvm.org/docs/LangRef.html#pointeraliasing
+ uint64_t Size;
+ /// TBAATag - The metadata node which describes the TBAA type of
+ /// the location, or null if there is no known unique tag.
+ const MDNode *TBAATag;
+
+ explicit Location(const Value *P = 0, uint64_t S = UnknownSize,
+ const MDNode *N = 0)
+ : Ptr(P), Size(S), TBAATag(N) {}
+
+ Location getWithNewPtr(const Value *NewPtr) const {
+ Location Copy(*this);
+ Copy.Ptr = NewPtr;
+ return Copy;
+ }
+
+ Location getWithNewSize(uint64_t NewSize) const {
+ Location Copy(*this);
+ Copy.Size = NewSize;
+ return Copy;
+ }
+
+ Location getWithoutTBAATag() const {
+ Location Copy(*this);
+ Copy.TBAATag = 0;
+ return Copy;
+ }
+ };
+
+ /// getLocation - Fill in Loc with information about the memory reference by
+ /// the given instruction.
+ Location getLocation(const LoadInst *LI);
+ Location getLocation(const StoreInst *SI);
+ Location getLocation(const VAArgInst *VI);
+ static Location getLocationForSource(const MemTransferInst *MTI);
+ static Location getLocationForDest(const MemIntrinsic *MI);
+
/// Alias analysis result - Either we know for sure that it does not alias, we
/// know for sure it must alias, or we don't know anything: The two pointers
/// _might_ alias. This enum is designed so you can do things like:
@@ -92,33 +148,63 @@ public:
/// See docs/AliasAnalysis.html for more information on the specific meanings
/// of these values.
///
- enum AliasResult { NoAlias = 0, MayAlias = 1, MustAlias = 2 };
+ enum AliasResult {
+ NoAlias = 0, ///< No dependencies.
+ MayAlias, ///< Anything goes.
+ PartialAlias, ///< Pointers differ, but pointees overlap.
+ MustAlias ///< Pointers are equal.
+ };
/// alias - The main low level interface to the alias analysis implementation.
- /// Returns a Result indicating whether the two pointers are aliased to each
- /// other. This is the interface that must be implemented by specific alias
- /// analysis implementations.
- ///
- virtual AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size);
+ /// Returns an AliasResult indicating whether the two pointers are aliased to
+ /// each other. This is the interface that must be implemented by specific
+ /// alias analysis implementations.
+ virtual AliasResult alias(const Location &LocA, const Location &LocB);
+
+ /// alias - A convenience wrapper.
+ AliasResult alias(const Value *V1, uint64_t V1Size,
+ const Value *V2, uint64_t V2Size) {
+ return alias(Location(V1, V1Size), Location(V2, V2Size));
+ }
- /// alias - A convenience wrapper for the case where the sizes are unknown.
+ /// alias - A convenience wrapper.
AliasResult alias(const Value *V1, const Value *V2) {
return alias(V1, UnknownSize, V2, UnknownSize);
}
/// isNoAlias - A trivial helper function to check to see if the specified
/// pointers are no-alias.
- bool isNoAlias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
- return alias(V1, V1Size, V2, V2Size) == NoAlias;
+ bool isNoAlias(const Location &LocA, const Location &LocB) {
+ return alias(LocA, LocB) == NoAlias;
}
- /// pointsToConstantMemory - If the specified pointer is known to point into
- /// constant global memory, return true. This allows disambiguation of store
- /// instructions from constant pointers.
- ///
- virtual bool pointsToConstantMemory(const Value *P);
+ /// isNoAlias - A convenience wrapper.
+ bool isNoAlias(const Value *V1, uint64_t V1Size,
+ const Value *V2, uint64_t V2Size) {
+ return isNoAlias(Location(V1, V1Size), Location(V2, V2Size));
+ }
+
+ /// isMustAlias - A convenience wrapper.
+ bool isMustAlias(const Location &LocA, const Location &LocB) {
+ return alias(LocA, LocB) == MustAlias;
+ }
+
+ /// isMustAlias - A convenience wrapper.
+ bool isMustAlias(const Value *V1, const Value *V2) {
+ return alias(V1, 1, V2, 1) == MustAlias;
+ }
+
+ /// pointsToConstantMemory - If the specified memory location is
+ /// known to be constant, return true. If OrLocal is true and the
+ /// specified memory location is known to be "local" (derived from
+ /// an alloca), return true. Otherwise return false.
+ virtual bool pointsToConstantMemory(const Location &Loc,
+ bool OrLocal = false);
+
+ /// pointsToConstantMemory - A convenient wrapper.
+ bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
+ return pointsToConstantMemory(Location(P), OrLocal);
+ }
//===--------------------------------------------------------------------===//
/// Simple mod/ref information...
@@ -129,36 +215,48 @@ public:
///
enum ModRefResult { NoModRef = 0, Ref = 1, Mod = 2, ModRef = 3 };
+ /// These values define additional bits used to define the
+ /// ModRefBehavior values.
+ enum { Nowhere = 0, ArgumentPointees = 4, Anywhere = 8 | ArgumentPointees };
/// ModRefBehavior - Summary of how a function affects memory in the program.
/// Loads from constant globals are not considered memory accesses for this
/// interface. Also, functions may freely modify stack space local to their
/// invocation without having to report it through these interfaces.
enum ModRefBehavior {
- // DoesNotAccessMemory - This function does not perform any non-local loads
- // or stores to memory.
- //
- // This property corresponds to the GCC 'const' attribute.
- DoesNotAccessMemory,
-
- // AccessesArguments - This function accesses function arguments in well
- // known (possibly volatile) ways, but does not access any other memory.
- AccessesArguments,
-
- // AccessesArgumentsAndGlobals - This function has accesses function
- // arguments and global variables well known (possibly volatile) ways, but
- // does not access any other memory.
- AccessesArgumentsAndGlobals,
-
- // OnlyReadsMemory - This function does not perform any non-local stores or
- // volatile loads, but may read from any memory location.
- //
- // This property corresponds to the GCC 'pure' attribute.
- OnlyReadsMemory,
-
- // UnknownModRefBehavior - This indicates that the function could not be
- // classified into one of the behaviors above.
- UnknownModRefBehavior
+ /// DoesNotAccessMemory - This function does not perform any non-local loads
+ /// or stores to memory.
+ ///
+ /// This property corresponds to the GCC 'const' attribute.
+ /// This property corresponds to the LLVM IR 'readnone' attribute.
+ /// This property corresponds to the IntrNoMem LLVM intrinsic flag.
+ DoesNotAccessMemory = Nowhere | NoModRef,
+
+ /// OnlyReadsArgumentPointees - The only memory references in this function
+ /// (if it has any) are non-volatile loads from objects pointed to by its
+ /// pointer-typed arguments, with arbitrary offsets.
+ ///
+ /// This property corresponds to the IntrReadArgMem LLVM intrinsic flag.
+ OnlyReadsArgumentPointees = ArgumentPointees | Ref,
+
+ /// OnlyAccessesArgumentPointees - The only memory references in this
+ /// function (if it has any) are non-volatile loads and stores from objects
+ /// pointed to by its pointer-typed arguments, with arbitrary offsets.
+ ///
+ /// This property corresponds to the IntrReadWriteArgMem LLVM intrinsic flag.
+ OnlyAccessesArgumentPointees = ArgumentPointees | ModRef,
+
+ /// OnlyReadsMemory - This function does not perform any non-local stores or
+ /// volatile loads, but may read from any memory location.
+ ///
+ /// This property corresponds to the GCC 'pure' attribute.
+ /// This property corresponds to the LLVM IR 'readonly' attribute.
+ /// This property corresponds to the IntrReadMem LLVM intrinsic flag.
+ OnlyReadsMemory = Anywhere | Ref,
+
+ /// UnknownModRefBehavior - This indicates that the function could not be
+ /// classified into one of the behaviors above.
+ UnknownModRefBehavior = Anywhere | ModRef
};
/// getModRefBehavior - Return the behavior when calling the given call site.
@@ -168,11 +266,6 @@ public:
/// For use when the call site is not known.
virtual ModRefBehavior getModRefBehavior(const Function *F);
- /// getIntrinsicModRefBehavior - Return the modref behavior of the intrinsic
- /// with the given id. Most clients won't need this, because the regular
- /// getModRefBehavior incorporates this information.
- static ModRefBehavior getIntrinsicModRefBehavior(unsigned iid);
-
/// doesNotAccessMemory - If the specified call is known to never read or
/// write memory, return true. If the call only reads from known-constant
/// memory, it is also legal to return true. Calls that unwind the stack
@@ -205,8 +298,7 @@ public:
/// This property corresponds to the GCC 'pure' attribute.
///
bool onlyReadsMemory(ImmutableCallSite CS) {
- ModRefBehavior MRB = getModRefBehavior(CS);
- return MRB == DoesNotAccessMemory || MRB == OnlyReadsMemory;
+ return onlyReadsMemory(getModRefBehavior(CS));
}
/// onlyReadsMemory - If the specified function is known to only read from
@@ -214,21 +306,114 @@ public:
/// when the call site is not known.
///
bool onlyReadsMemory(const Function *F) {
- ModRefBehavior MRB = getModRefBehavior(F);
- return MRB == DoesNotAccessMemory || MRB == OnlyReadsMemory;
+ return onlyReadsMemory(getModRefBehavior(F));
}
+ /// onlyReadsMemory - Return true if functions with the specified behavior are
+ /// known to only read from non-volatile memory (or not access memory at all).
+ ///
+ static bool onlyReadsMemory(ModRefBehavior MRB) {
+ return !(MRB & Mod);
+ }
+
+ /// onlyAccessesArgPointees - Return true if functions with the specified
+ /// behavior are known to read and write at most from objects pointed to by
+ /// their pointer-typed arguments (with arbitrary offsets).
+ ///
+ static bool onlyAccessesArgPointees(ModRefBehavior MRB) {
+ return !(MRB & Anywhere & ~ArgumentPointees);
+ }
+
+ /// doesAccessArgPointees - Return true if functions with the specified
+ /// behavior are known to potentially read or write from objects pointed
+ /// to be their pointer-typed arguments (with arbitrary offsets).
+ ///
+ static bool doesAccessArgPointees(ModRefBehavior MRB) {
+ return (MRB & ModRef) && (MRB & ArgumentPointees);
+ }
/// getModRefInfo - Return information about whether or not an instruction may
- /// read or write memory specified by the pointer operand. An instruction
+ /// read or write the specified memory location. An instruction
/// that doesn't read or write memory may be trivially LICM'd for example.
+ ModRefResult getModRefInfo(const Instruction *I,
+ const Location &Loc) {
+ switch (I->getOpcode()) {
+ case Instruction::VAArg: return getModRefInfo((const VAArgInst*)I, Loc);
+ case Instruction::Load: return getModRefInfo((const LoadInst*)I, Loc);
+ case Instruction::Store: return getModRefInfo((const StoreInst*)I, Loc);
+ case Instruction::Call: return getModRefInfo((const CallInst*)I, Loc);
+ case Instruction::Invoke: return getModRefInfo((const InvokeInst*)I,Loc);
+ default: return NoModRef;
+ }
+ }
+
+ /// getModRefInfo - A convenience wrapper.
+ ModRefResult getModRefInfo(const Instruction *I,
+ const Value *P, uint64_t Size) {
+ return getModRefInfo(I, Location(P, Size));
+ }
/// getModRefInfo (for call sites) - Return whether information about whether
- /// a particular call site modifies or reads the memory specified by the
- /// pointer.
- ///
+ /// a particular call site modifies or reads the specified memory location.
virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size);
+ const Location &Loc);
+
+ /// getModRefInfo (for call sites) - A convenience wrapper.
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, uint64_t Size) {
+ return getModRefInfo(CS, Location(P, Size));
+ }
+
+ /// getModRefInfo (for calls) - Return whether information about whether
+ /// a particular call modifies or reads the specified memory location.
+ ModRefResult getModRefInfo(const CallInst *C, const Location &Loc) {
+ return getModRefInfo(ImmutableCallSite(C), Loc);
+ }
+
+ /// getModRefInfo (for calls) - A convenience wrapper.
+ ModRefResult getModRefInfo(const CallInst *C, const Value *P, uint64_t Size) {
+ return getModRefInfo(C, Location(P, Size));
+ }
+
+ /// getModRefInfo (for invokes) - Return whether information about whether
+ /// a particular invoke modifies or reads the specified memory location.
+ ModRefResult getModRefInfo(const InvokeInst *I,
+ const Location &Loc) {
+ return getModRefInfo(ImmutableCallSite(I), Loc);
+ }
+
+ /// getModRefInfo (for invokes) - A convenience wrapper.
+ ModRefResult getModRefInfo(const InvokeInst *I,
+ const Value *P, uint64_t Size) {
+ return getModRefInfo(I, Location(P, Size));
+ }
+
+ /// getModRefInfo (for loads) - Return whether information about whether
+ /// a particular load modifies or reads the specified memory location.
+ ModRefResult getModRefInfo(const LoadInst *L, const Location &Loc);
+
+ /// getModRefInfo (for loads) - A convenience wrapper.
+ ModRefResult getModRefInfo(const LoadInst *L, const Value *P, uint64_t Size) {
+ return getModRefInfo(L, Location(P, Size));
+ }
+
+ /// getModRefInfo (for stores) - Return whether information about whether
+ /// a particular store modifies or reads the specified memory location.
+ ModRefResult getModRefInfo(const StoreInst *S, const Location &Loc);
+
+ /// getModRefInfo (for stores) - A convenience wrapper.
+ ModRefResult getModRefInfo(const StoreInst *S, const Value *P, uint64_t Size){
+ return getModRefInfo(S, Location(P, Size));
+ }
+
+ /// getModRefInfo (for va_args) - Return whether information about whether
+ /// a particular va_arg modifies or reads the specified memory location.
+ ModRefResult getModRefInfo(const VAArgInst* I, const Location &Loc);
+
+ /// getModRefInfo (for va_args) - A convenience wrapper.
+ ModRefResult getModRefInfo(const VAArgInst* I, const Value* P, uint64_t Size){
+ return getModRefInfo(I, Location(P, Size));
+ }
/// getModRefInfo - Return information about whether two call sites may refer
/// to the same set of memory locations. See
@@ -237,46 +422,31 @@ public:
virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2);
-public:
- /// Convenience functions...
- ModRefResult getModRefInfo(const LoadInst *L, const Value *P, unsigned Size);
- ModRefResult getModRefInfo(const StoreInst *S, const Value *P, unsigned Size);
- ModRefResult getModRefInfo(const VAArgInst* I, const Value* P, unsigned Size);
- ModRefResult getModRefInfo(const CallInst *C, const Value *P, unsigned Size) {
- return getModRefInfo(ImmutableCallSite(C), P, Size);
- }
- ModRefResult getModRefInfo(const InvokeInst *I,
- const Value *P, unsigned Size) {
- return getModRefInfo(ImmutableCallSite(I), P, Size);
- }
- ModRefResult getModRefInfo(const Instruction *I,
- const Value *P, unsigned Size) {
- switch (I->getOpcode()) {
- case Instruction::VAArg: return getModRefInfo((const VAArgInst*)I, P,Size);
- case Instruction::Load: return getModRefInfo((const LoadInst*)I, P, Size);
- case Instruction::Store: return getModRefInfo((const StoreInst*)I, P,Size);
- case Instruction::Call: return getModRefInfo((const CallInst*)I, P, Size);
- case Instruction::Invoke: return getModRefInfo((const InvokeInst*)I,P,Size);
- default: return NoModRef;
- }
- }
-
//===--------------------------------------------------------------------===//
/// Higher level methods for querying mod/ref information.
///
/// canBasicBlockModify - Return true if it is possible for execution of the
/// specified basic block to modify the value pointed to by Ptr.
- ///
- bool canBasicBlockModify(const BasicBlock &BB, const Value *P, unsigned Size);
+ bool canBasicBlockModify(const BasicBlock &BB, const Location &Loc);
+
+ /// canBasicBlockModify - A convenience wrapper.
+ bool canBasicBlockModify(const BasicBlock &BB, const Value *P, uint64_t Size){
+ return canBasicBlockModify(BB, Location(P, Size));
+ }
/// canInstructionRangeModify - Return true if it is possible for the
/// execution of the specified instructions to modify the value pointed to by
/// Ptr. The instructions to consider are all of the instructions in the
/// range of [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block.
- ///
bool canInstructionRangeModify(const Instruction &I1, const Instruction &I2,
- const Value *Ptr, unsigned Size);
+ const Location &Loc);
+
+ /// canInstructionRangeModify - A convenience wrapper.
+ bool canInstructionRangeModify(const Instruction &I1, const Instruction &I2,
+ const Value *Ptr, uint64_t Size) {
+ return canInstructionRangeModify(I1, I2, Location(Ptr, Size));
+ }
//===--------------------------------------------------------------------===//
/// Methods that clients should call when they transform the program to allow
@@ -299,6 +469,17 @@ public:
///
virtual void copyValue(Value *From, Value *To);
+ /// addEscapingUse - This method should be used whenever an escaping use is
+ /// added to a pointer value. Analysis implementations may either return
+ /// conservative responses for that value in the future, or may recompute
+ /// some or all internal state to continue providing precise responses.
+ ///
+ /// Escaping uses are considered by anything _except_ the following:
+ /// - GEPs or bitcasts of the pointer
+ /// - Loads through the pointer
+ /// - Stores through (but not of) the pointer
+ virtual void addEscapingUse(Use &U);
+
/// replaceWithNewValue - This method is the obvious combination of the two
/// above, and it provided as a helper to simplify client code.
///
@@ -323,11 +504,4 @@ bool isIdentifiedObject(const Value *V);
} // End llvm namespace
-// Because of the way .a files work, we must force the BasicAA implementation to
-// be pulled in if the AliasAnalysis header is included. Otherwise we run
-// the risk of AliasAnalysis being used, but the default implementation not
-// being linked into the tool that uses it.
-FORCE_DEFINING_FILE_TO_BE_LINKED(AliasAnalysis)
-FORCE_DEFINING_FILE_TO_BE_LINKED(BasicAliasAnalysis)
-
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
index 8e2f7fd..e844d10 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
@@ -40,10 +40,12 @@ class AliasSet : public ilist_node<AliasSet> {
Value *Val; // The pointer this record corresponds to.
PointerRec **PrevInList, *NextInList;
AliasSet *AS;
- unsigned Size;
+ uint64_t Size;
+ const MDNode *TBAAInfo;
public:
PointerRec(Value *V)
- : Val(V), PrevInList(0), NextInList(0), AS(0), Size(0) {}
+ : Val(V), PrevInList(0), NextInList(0), AS(0), Size(0),
+ TBAAInfo(DenseMapInfo<const MDNode *>::getEmptyKey()) {}
Value *getValue() const { return Val; }
@@ -55,11 +57,28 @@ class AliasSet : public ilist_node<AliasSet> {
return &NextInList;
}
- void updateSize(unsigned NewSize) {
+ void updateSizeAndTBAAInfo(uint64_t NewSize, const MDNode *NewTBAAInfo) {
if (NewSize > Size) Size = NewSize;
+
+ if (TBAAInfo == DenseMapInfo<const MDNode *>::getEmptyKey())
+ // We don't have a TBAAInfo yet. Set it to NewTBAAInfo.
+ TBAAInfo = NewTBAAInfo;
+ else if (TBAAInfo != NewTBAAInfo)
+ // NewTBAAInfo conflicts with TBAAInfo.
+ TBAAInfo = DenseMapInfo<const MDNode *>::getTombstoneKey();
}
- unsigned getSize() const { return Size; }
+ uint64_t getSize() const { return Size; }
+
+ /// getTBAAInfo - Return the TBAAInfo, or null if there is no
+ /// information or conflicting information.
+ const MDNode *getTBAAInfo() const {
+ // If we have missing or conflicting TBAAInfo, return null.
+ if (TBAAInfo == DenseMapInfo<const MDNode *>::getEmptyKey() ||
+ TBAAInfo == DenseMapInfo<const MDNode *>::getTombstoneKey())
+ return 0;
+ return TBAAInfo;
+ }
AliasSet *getAliasSet(AliasSetTracker &AST) {
assert(AS && "No AliasSet yet!");
@@ -186,7 +205,8 @@ public:
value_type *operator->() const { return &operator*(); }
Value *getPointer() const { return CurNode->getValue(); }
- unsigned getSize() const { return CurNode->getSize(); }
+ uint64_t getSize() const { return CurNode->getSize(); }
+ const MDNode *getTBAAInfo() const { return CurNode->getTBAAInfo(); }
iterator& operator++() { // Preincrement
assert(CurNode && "Advancing past AliasSet.end()!");
@@ -230,7 +250,8 @@ private:
void removeFromTracker(AliasSetTracker &AST);
- void addPointer(AliasSetTracker &AST, PointerRec &Entry, unsigned Size,
+ void addPointer(AliasSetTracker &AST, PointerRec &Entry, uint64_t Size,
+ const MDNode *TBAAInfo,
bool KnownMustAlias = false);
void addCallSite(CallSite CS, AliasAnalysis &AA);
void removeCallSite(CallSite CS) {
@@ -245,7 +266,8 @@ private:
/// aliasesPointer - Return true if the specified pointer "may" (or must)
/// alias one of the members in the set.
///
- bool aliasesPointer(const Value *Ptr, unsigned Size, AliasAnalysis &AA) const;
+ bool aliasesPointer(const Value *Ptr, uint64_t Size, const MDNode *TBAAInfo,
+ AliasAnalysis &AA) const;
bool aliasesCallSite(CallSite CS, AliasAnalysis &AA) const;
};
@@ -298,7 +320,7 @@ public:
/// These methods return true if inserting the instruction resulted in the
/// addition of a new alias set (i.e., the pointer did not alias anything).
///
- bool add(Value *Ptr, unsigned Size); // Add a location
+ bool add(Value *Ptr, uint64_t Size, const MDNode *TBAAInfo); // Add a location
bool add(LoadInst *LI);
bool add(StoreInst *SI);
bool add(VAArgInst *VAAI);
@@ -312,7 +334,8 @@ public:
/// remove methods - These methods are used to remove all entries that might
/// be aliased by the specified instruction. These methods return true if any
/// alias sets were eliminated.
- bool remove(Value *Ptr, unsigned Size); // Remove a location
+ // Remove a location
+ bool remove(Value *Ptr, uint64_t Size, const MDNode *TBAAInfo);
bool remove(LoadInst *LI);
bool remove(StoreInst *SI);
bool remove(VAArgInst *VAAI);
@@ -332,18 +355,21 @@ public:
/// lives in. If the New argument is non-null, this method sets the value to
/// true if a new alias set is created to contain the pointer (because the
/// pointer didn't alias anything).
- AliasSet &getAliasSetForPointer(Value *P, unsigned Size, bool *New = 0);
+ AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
+ const MDNode *TBAAInfo,
+ bool *New = 0);
/// getAliasSetForPointerIfExists - Return the alias set containing the
/// location specified if one exists, otherwise return null.
- AliasSet *getAliasSetForPointerIfExists(Value *P, unsigned Size) {
- return findAliasSetForPointer(P, Size);
+ AliasSet *getAliasSetForPointerIfExists(Value *P, uint64_t Size,
+ const MDNode *TBAAInfo) {
+ return findAliasSetForPointer(P, Size, TBAAInfo);
}
/// containsPointer - Return true if the specified location is represented by
/// this alias set, false otherwise. This does not modify the AST object or
/// alias sets.
- bool containsPointer(Value *P, unsigned Size) const;
+ bool containsPointer(Value *P, uint64_t Size, const MDNode *TBAAInfo) const;
/// getAliasAnalysis - Return the underlying alias analysis object used by
/// this tracker.
@@ -390,14 +416,16 @@ private:
return *Entry;
}
- AliasSet &addPointer(Value *P, unsigned Size, AliasSet::AccessType E,
+ AliasSet &addPointer(Value *P, uint64_t Size, const MDNode *TBAAInfo,
+ AliasSet::AccessType E,
bool &NewSet) {
NewSet = false;
- AliasSet &AS = getAliasSetForPointer(P, Size, &NewSet);
+ AliasSet &AS = getAliasSetForPointer(P, Size, TBAAInfo, &NewSet);
AS.AccessTy |= E;
return AS;
}
- AliasSet *findAliasSetForPointer(const Value *Ptr, unsigned Size);
+ AliasSet *findAliasSetForPointer(const Value *Ptr, uint64_t Size,
+ const MDNode *TBAAInfo);
AliasSet *findAliasSetForCallSite(CallSite CS);
};
diff --git a/contrib/llvm/include/llvm/Analysis/CallGraph.h b/contrib/llvm/include/llvm/Analysis/CallGraph.h
index a4884ed..089f322 100644
--- a/contrib/llvm/include/llvm/Analysis/CallGraph.h
+++ b/contrib/llvm/include/llvm/Analysis/CallGraph.h
@@ -57,7 +57,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ValueHandle.h"
-#include "llvm/System/IncludeFile.h"
+#include "llvm/Support/IncludeFile.h"
#include <map>
namespace llvm {
@@ -138,6 +138,13 @@ public:
/// not already exist.
CallGraphNode *getOrInsertFunction(const Function *F);
+ /// spliceFunction - Replace the function represented by this node by another.
+ /// This does not rescan the body of the function, so it is suitable when
+ /// splicing the body of one function to another while also updating all
+ /// callers from the old function to the new.
+ ///
+ void spliceFunction(const Function *From, const Function *To);
+
//===---------------------------------------------------------------------
// Pass infrastructure interface glue code.
//
@@ -163,8 +170,10 @@ protected:
// CallGraphNode class definition.
//
class CallGraphNode {
- AssertingVH<Function> F;
+ friend class CallGraph;
+ AssertingVH<Function> F;
+
// CallRecord - This is a pair of the calling instruction (a call or invoke)
// and the callgraph node being called.
public:
diff --git a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
index 58096f1..75edfbb 100644
--- a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
+++ b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
@@ -7,15 +7,16 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements various weight measurements for a function, helping
-// the Inliner and PartialSpecialization decide whether to duplicate its
-// contents.
+// This file implements various weight measurements for code, helping
+// the Inliner and other passes decide whether to duplicate its contents.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CODEMETRICS_H
#define LLVM_ANALYSIS_CODEMETRICS_H
+#include "llvm/ADT/DenseMap.h"
+
namespace llvm {
// CodeMetrics - Calculate size and a few similar metrics for a set of
// basic blocks.
@@ -45,6 +46,11 @@ namespace llvm {
/// NumCalls - Keep track of the number of calls to 'big' functions.
unsigned NumCalls;
+
+ /// NumInlineCandidates - Keep track of the number of calls to internal
+ /// functions with only a single caller. These are likely targets for
+ /// future inlining, likely exposed by interleaved devirtualization.
+ unsigned NumInlineCandidates;
/// NumVectorInsts - Keep track of how many instructions produce vector
/// values. The inliner is being more aggressive with inlining vector
@@ -56,7 +62,8 @@ namespace llvm {
CodeMetrics() : callsSetJmp(false), isRecursive(false),
containsIndirectBr(false), usesDynamicAlloca(false),
- NumInsts(0), NumBlocks(0), NumCalls(0), NumVectorInsts(0),
+ NumInsts(0), NumBlocks(0), NumCalls(0),
+ NumInlineCandidates(0), NumVectorInsts(0),
NumRets(0) {}
/// analyzeBasicBlock - Add information about the specified basic block
@@ -66,6 +73,22 @@ namespace llvm {
/// analyzeFunction - Add information about the specified function
/// to the current structure.
void analyzeFunction(Function *F);
+
+ /// CountCodeReductionForConstant - Figure out an approximation for how
+ /// many instructions will be constant folded if the specified value is
+ /// constant.
+ unsigned CountCodeReductionForConstant(Value *V);
+
+ /// CountBonusForConstant - Figure out an approximation for how much
+ /// per-call performance boost we can expect if the specified value is
+ /// constant.
+ unsigned CountBonusForConstant(Value *V);
+
+ /// CountCodeReductionForAlloca - Figure out an approximation of how much
+ /// smaller the function will be if it is inlined into a context where an
+ /// argument becomes an alloca.
+ ///
+ unsigned CountCodeReductionForAlloca(Value *V);
};
}
diff --git a/contrib/llvm/include/llvm/Analysis/ConstantFolding.h b/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
index e2675eb..f6b1f5a 100644
--- a/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
+++ b/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
@@ -7,7 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file declares routines for folding instructions into constants.
+// This file declares routines for folding instructions into constants when all
+// operands are constants, for example "sub i32 1, 0" -> "1".
//
// Also, to supplement the basic VMCore ConstantExpr simplifications,
// this file declares some additional folding routines that can make use of
@@ -27,11 +28,11 @@ namespace llvm {
class Function;
class Type;
-/// ConstantFoldInstruction - Attempt to constant fold the specified
-/// instruction. If successful, the constant result is returned, if not, null
-/// is returned. Note that this function can only fail when attempting to fold
-/// instructions like loads and stores, which have no constant expression form.
-///
+/// ConstantFoldInstruction - Try to constant fold the specified instruction.
+/// If successful, the constant result is returned, if not, null is returned.
+/// Note that this fails if not all of the operands are constant. Otherwise,
+/// this function can only fail when attempting to fold instructions like loads
+/// and stores, which have no constant expression form.
Constant *ConstantFoldInstruction(Instruction *I, const TargetData *TD = 0);
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
diff --git a/contrib/llvm/include/llvm/Analysis/DIBuilder.h b/contrib/llvm/include/llvm/Analysis/DIBuilder.h
new file mode 100644
index 0000000..bd22134
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DIBuilder.h
@@ -0,0 +1,459 @@
+//===--- llvm/Analysis/DIBuilder.h - Debug Information Builder --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DIBuilder that is useful for creating debugging
+// information entries in LLVM IR form.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DIBUILDER_H
+#define LLVM_ANALYSIS_DIBUILDER_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+ class BasicBlock;
+ class Instruction;
+ class Function;
+ class Module;
+ class Value;
+ class LLVMContext;
+ class MDNode;
+ class StringRef;
+ class DIDescriptor;
+ class DIFile;
+ class DIEnumerator;
+ class DIType;
+ class DIArray;
+ class DIGlobalVariable;
+ class DINameSpace;
+ class DIVariable;
+ class DISubrange;
+ class DILexicalBlock;
+ class DISubprogram;
+ class DITemplateTypeParameter;
+ class DITemplateValueParameter;
+
+ class DIBuilder {
+ private:
+ Module &M;
+ LLVMContext & VMContext;
+ MDNode *TheCU;
+
+ Function *DeclareFn; // llvm.dbg.declare
+ Function *ValueFn; // llvm.dbg.value
+
+ DIBuilder(const DIBuilder &); // DO NOT IMPLEMENT
+ void operator=(const DIBuilder &); // DO NOT IMPLEMENT
+
+ public:
+ explicit DIBuilder(Module &M);
+ const MDNode *getCU() { return TheCU; }
+ enum ComplexAddrKind { OpPlus=1, OpDeref };
+
+ /// CreateCompileUnit - A CompileUnit provides an anchor for all debugging
+ /// information generated during this instance of compilation.
+ /// @param Lang Source programming language, eg. dwarf::DW_LANG_C99
+ /// @param File File name
+ /// @param Dir Directory
+ /// @param Producer String identify producer of debugging information.
+ /// Usuall this is a compiler version string.
+ /// @param isOptimized A boolean flag which indicates whether optimization
+ /// is ON or not.
+ /// @param Flags This string lists command line options. This string is
+ /// directly embedded in debug info output which may be used
+ /// by a tool analyzing generated debugging information.
+ /// @param RV This indicates runtime version for languages like
+ /// Objective-C.
+ void CreateCompileUnit(unsigned Lang, StringRef File, StringRef Dir,
+ StringRef Producer,
+ bool isOptimized, StringRef Flags, unsigned RV);
+
+ /// CreateFile - Create a file descriptor to hold debugging information
+ /// for a file.
+ DIFile CreateFile(StringRef Filename, StringRef Directory);
+
+ /// CreateEnumerator - Create a single enumerator value.
+ DIEnumerator CreateEnumerator(StringRef Name, uint64_t Val);
+
+ /// CreateBasicType - Create debugging information entry for a basic
+ /// type.
+ /// @param Name Type name.
+ /// @param SizeInBits Size of the type.
+ /// @param AlignInBits Type alignment.
+ /// @param Encoding DWARF encoding code, e.g. dwarf::DW_ATE_float.
+ DIType CreateBasicType(StringRef Name, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Encoding);
+
+ /// CreateQualifiedType - Create debugging information entry for a qualified
+ /// type, e.g. 'const int'.
+ /// @param Tag Tag identifing type, e.g. dwarf::TAG_volatile_type
+ /// @param FromTy Base Type.
+ DIType CreateQualifiedType(unsigned Tag, DIType FromTy);
+
+ /// CreatePointerType - Create debugging information entry for a pointer.
+ /// @param PointeeTy Type pointed by this pointer.
+ /// @param SizeInBits Size.
+ /// @param AlignInBits Alignment. (optional)
+ /// @param Name Pointer type name. (optional)
+ DIType CreatePointerType(DIType PointeeTy, uint64_t SizeInBits,
+ uint64_t AlignInBits = 0,
+ StringRef Name = StringRef());
+
+ /// CreateReferenceType - Create debugging information entry for a c++
+ /// style reference.
+ DIType CreateReferenceType(DIType RTy);
+
+ /// CreateTypedef - Create debugging information entry for a typedef.
+ /// @param Ty Original type.
+ /// @param Name Typedef name.
+ /// @param File File where this type is defined.
+ /// @param LineNo Line number.
+ DIType CreateTypedef(DIType Ty, StringRef Name, DIFile File,
+ unsigned LineNo);
+
+ /// CreateFriend - Create debugging information entry for a 'friend'.
+ DIType CreateFriend(DIType Ty, DIType FriendTy);
+
+ /// CreateInheritance - Create debugging information entry to establish
+ /// inheritance relationship between two types.
+ /// @param Ty Original type.
+ /// @param BaseTy Base type. Ty is inherits from base.
+ /// @param BaseOffset Base offset.
+ /// @param Flags Flags to describe inheritance attribute,
+ /// e.g. private
+ DIType CreateInheritance(DIType Ty, DIType BaseTy, uint64_t BaseOffset,
+ unsigned Flags);
+
+ /// CreateMemberType - Create debugging information entry for a member.
+ /// @param Name Member name.
+ /// @param File File where this member is defined.
+ /// @param LineNo Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param OffsetInBits Member offset.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Ty Parent type.
+ DIType CreateMemberType(StringRef Name, DIFile File,
+ unsigned LineNo, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, DIType Ty);
+
+ /// CreateClassType - Create debugging information entry for a class.
+ /// @param Scope Scope in which this class is defined.
+ /// @param Name class name.
+ /// @param File File where this member is defined.
+ /// @param LineNo Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param OffsetInBits Member offset.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Elements class members.
+ /// @param VTableHolder Debug info of the base class that contains vtable
+ /// for this type. This is used in
+ /// DW_AT_containing_type. See DWARF documentation
+ /// for more info.
+ /// @param TemplateParms Template type parameters.
+ DIType CreateClassType(DIDescriptor Scope, StringRef Name, DIFile File,
+ unsigned LineNumber, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, DIType DerivedFrom,
+ DIArray Elements, MDNode *VTableHolder = 0,
+ MDNode *TemplateParms = 0);
+
+ /// CreateStructType - Create debugging information entry for a struct.
+ /// @param Scope Scope in which this struct is defined.
+ /// @param Name Struct name.
+ /// @param File File where this member is defined.
+ /// @param LineNo Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Elements Struct elements.
+ /// @param RunTimeLang Optional parameter, Objective-C runtime version.
+ DIType CreateStructType(DIDescriptor Scope, StringRef Name, DIFile File,
+ unsigned LineNumber, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Flags,
+ DIArray Elements, unsigned RunTimeLang = 0);
+
+ /// CreateUnionType - Create debugging information entry for an union.
+ /// @param Scope Scope in which this union is defined.
+ /// @param Name Union name.
+ /// @param File File where this member is defined.
+ /// @param LineNo Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Elements Union elements.
+ /// @param RunTimeLang Optional parameter, Objective-C runtime version.
+ DIType CreateUnionType(DIDescriptor Scope, StringRef Name, DIFile File,
+ unsigned LineNumber, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Flags,
+ DIArray Elements, unsigned RunTimeLang = 0);
+
+ /// CreateTemplateTypeParameter - Create debugging information for template
+ /// type parameter.
+ /// @param Scope Scope in which this type is defined.
+ /// @param Name Type parameter name.
+ /// @param Ty Parameter type.
+ /// @param File File where this type parameter is defined.
+ /// @param LineNo Line number.
+ /// @param ColumnNo Column Number.
+ DITemplateTypeParameter
+ CreateTemplateTypeParameter(DIDescriptor Scope, StringRef Name, DIType Ty,
+ MDNode *File = 0, unsigned LineNo = 0,
+ unsigned ColumnNo = 0);
+
+ /// CreateTemplateValueParameter - Create debugging information for template
+ /// value parameter.
+ /// @param Scope Scope in which this type is defined.
+ /// @param Name Value parameter name.
+ /// @param Ty Parameter type.
+ /// @param Value Constant parameter value.
+ /// @param File File where this type parameter is defined.
+ /// @param LineNo Line number.
+ /// @param ColumnNo Column Number.
+ DITemplateValueParameter
+ CreateTemplateValueParameter(DIDescriptor Scope, StringRef Name, DIType Ty,
+ uint64_t Value,
+ MDNode *File = 0, unsigned LineNo = 0,
+ unsigned ColumnNo = 0);
+
+ /// CreateArrayType - Create debugging information entry for an array.
+ /// @param Size Array size.
+ /// @param AlignInBits Alignment.
+ /// @param Ty Element type.
+ /// @param Subscripts Subscripts.
+ DIType CreateArrayType(uint64_t Size, uint64_t AlignInBits,
+ DIType Ty, DIArray Subscripts);
+
+ /// CreateVectorType - Create debugging information entry for a vector type.
+ /// @param Size Array size.
+ /// @param AlignInBits Alignment.
+ /// @param Ty Element type.
+ /// @param Subscripts Subscripts.
+ DIType CreateVectorType(uint64_t Size, uint64_t AlignInBits,
+ DIType Ty, DIArray Subscripts);
+
+ /// CreateEnumerationType - Create debugging information entry for an
+ /// enumeration.
+ /// @param Scope Scope in which this enumeration is defined.
+ /// @param Name Union name.
+ /// @param File File where this member is defined.
+ /// @param LineNo Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param Elements Enumeration elements.
+ DIType CreateEnumerationType(DIDescriptor Scope, StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits,
+ uint64_t AlignInBits, DIArray Elements);
+
+ /// CreateSubroutineType - Create subroutine type.
+ /// @param File File in which this subroutine is defined.
+ /// @param ParamterTypes An array of subroutine parameter types. This
+ /// includes return type at 0th index.
+ DIType CreateSubroutineType(DIFile File, DIArray ParameterTypes);
+
+ /// CreateArtificialType - Create a new DIType with "artificial" flag set.
+ DIType CreateArtificialType(DIType Ty);
+
+ /// CreateTemporaryType - Create a temporary forward-declared type.
+ DIType CreateTemporaryType();
+ DIType CreateTemporaryType(DIFile F);
+
+ /// RetainType - Retain DIType in a module even if it is not referenced
+ /// through debug info anchors.
+ void RetainType(DIType T);
+
+ /// CreateUnspecifiedParameter - Create unspeicified type descriptor
+ /// for a subroutine type.
+ DIDescriptor CreateUnspecifiedParameter();
+
+ /// GetOrCreateArray - Get a DIArray, create one if required.
+ DIArray GetOrCreateArray(Value *const *Elements, unsigned NumElements);
+
+ /// GetOrCreateSubrange - Create a descriptor for a value range. This
+ /// implicitly uniques the values returned.
+ DISubrange GetOrCreateSubrange(int64_t Lo, int64_t Hi);
+
+ /// CreateGlobalVariable - Create a new descriptor for the specified global.
+ /// @param Name Name of the variable.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Variable Type.
+ /// @param isLocalToUnit Boolean flag indicate whether this variable is
+ /// externally visible or not.
+ /// @param Val llvm::Value of the variable.
+ DIGlobalVariable
+ CreateGlobalVariable(StringRef Name, DIFile File, unsigned LineNo,
+ DIType Ty, bool isLocalToUnit, llvm::Value *Val);
+
+
+ /// CreateStaticVariable - Create a new descriptor for the specified
+ /// variable.
+ /// @param Conext Variable scope.
+ /// @param Name Name of the variable.
+ /// @param LinakgeName Mangled name of the variable.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Variable Type.
+ /// @param isLocalToUnit Boolean flag indicate whether this variable is
+ /// externally visible or not.
+ /// @param Val llvm::Value of the variable.
+ DIGlobalVariable
+ CreateStaticVariable(DIDescriptor Context, StringRef Name,
+ StringRef LinkageName, DIFile File, unsigned LineNo,
+ DIType Ty, bool isLocalToUnit, llvm::Value *Val);
+
+
+ /// CreateLocalVariable - Create a new descriptor for the specified
+ /// local variable.
+ /// @param Tag Dwarf TAG. Usually DW_TAG_auto_variable or
+ /// DW_TAG_arg_variable.
+ /// @param Scope Variable scope.
+ /// @param Name Variable name.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Variable Type
+ /// @param AlwaysPreserve Boolean. Set to true if debug info for this
+ /// variable should be preserved in optimized build.
+ /// @param Flags Flags, e.g. artificial variable.
+ DIVariable CreateLocalVariable(unsigned Tag, DIDescriptor Scope,
+ StringRef Name,
+ DIFile File, unsigned LineNo,
+ DIType Ty, bool AlwaysPreserve = false,
+ unsigned Flags = 0);
+
+
+ /// CreateComplexVariable - Create a new descriptor for the specified
+ /// variable which has a complex address expression for its address.
+ /// @param Tag Dwarf TAG. Usually DW_TAG_auto_variable or
+ /// DW_TAG_arg_variable.
+ /// @param Scope Variable scope.
+ /// @param Name Variable name.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Variable Type
+ /// @param Addr A pointer to a vector of complex address operations.
+ /// @param NumAddr Num of address operations in the vector.
+ DIVariable CreateComplexVariable(unsigned Tag, DIDescriptor Scope,
+ StringRef Name, DIFile F, unsigned LineNo,
+ DIType Ty, Value *const *Addr,
+ unsigned NumAddr);
+
+ /// CreateFunction - Create a new descriptor for the specified subprogram.
+ /// See comments in DISubprogram for descriptions of these fields.
+ /// @param Scope Function scope.
+ /// @param Name Function name.
+ /// @param LinkageName Mangled function name.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Function type.
+ /// @param isLocalToUnit True if this function is not externally visible..
+ /// @param isDefinition True if this is a function definition.
+ /// @param Flags e.g. is this function prototyped or not.
+ /// This flags are used to emit dwarf attributes.
+ /// @param isOptimized True if optimization is ON.
+ /// @param Fn llvm::Function pointer.
+ DISubprogram CreateFunction(DIDescriptor Scope, StringRef Name,
+ StringRef LinkageName,
+ DIFile File, unsigned LineNo,
+ DIType Ty, bool isLocalToUnit,
+ bool isDefinition,
+ unsigned Flags = 0,
+ bool isOptimized = false,
+ Function *Fn = 0);
+
+ /// CreateMethod - Create a new descriptor for the specified C++ method.
+ /// See comments in DISubprogram for descriptions of these fields.
+ /// @param Scope Function scope.
+ /// @param Name Function name.
+ /// @param LinkageName Mangled function name.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Function type.
+ /// @param isLocalToUnit True if this function is not externally visible..
+ /// @param isDefinition True if this is a function definition.
+ /// @param Virtuality Attributes describing virutallness. e.g. pure
+ /// virtual function.
+ /// @param VTableIndex Index no of this method in virtual table.
+ /// @param VTableHolder Type that holds vtable.
+ /// @param Flags e.g. is this function prototyped or not.
+ /// This flags are used to emit dwarf attributes.
+ /// @param isOptimized True if optimization is ON.
+ /// @param Fn llvm::Function pointer.
+ DISubprogram CreateMethod(DIDescriptor Scope, StringRef Name,
+ StringRef LinkageName,
+ DIFile File, unsigned LineNo,
+ DIType Ty, bool isLocalToUnit,
+ bool isDefinition,
+ unsigned Virtuality = 0, unsigned VTableIndex = 0,
+ MDNode *VTableHolder = 0,
+ unsigned Flags = 0,
+ bool isOptimized = false,
+ Function *Fn = 0);
+
+ /// CreateNameSpace - This creates new descriptor for a namespace
+ /// with the specified parent scope.
+ /// @param Scope Namespace scope
+ /// @param Name Name of this namespace
+ /// @param File Source file
+ /// @param LineNo Line number
+ DINameSpace CreateNameSpace(DIDescriptor Scope, StringRef Name,
+ DIFile File, unsigned LineNo);
+
+
+ /// CreateLexicalBlock - This creates a descriptor for a lexical block
+ /// with the specified parent context.
+ /// @param Scope Parent lexical scope.
+ /// @param File Source file
+ /// @param Line Line number
+ /// @param Col Column number
+ DILexicalBlock CreateLexicalBlock(DIDescriptor Scope, DIFile File,
+ unsigned Line, unsigned Col);
+
+ /// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call.
+ /// @param Storage llvm::Value of the variable
+ /// @param VarInfo Variable's debug info descriptor.
+ /// @param InsertAtEnd Location for the new intrinsic.
+ Instruction *InsertDeclare(llvm::Value *Storage, DIVariable VarInfo,
+ BasicBlock *InsertAtEnd);
+
+ /// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call.
+ /// @param Storage llvm::Value of the variable
+ /// @param VarInfo Variable's debug info descriptor.
+ /// @param InsertBefore Location for the new intrinsic.
+ Instruction *InsertDeclare(llvm::Value *Storage, DIVariable VarInfo,
+ Instruction *InsertBefore);
+
+
+ /// InsertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
+ /// @param Val llvm::Value of the variable
+ /// @param Offset Offset
+ /// @param VarInfo Variable's debug info descriptor.
+ /// @param InsertAtEnd Location for the new intrinsic.
+ Instruction *InsertDbgValueIntrinsic(llvm::Value *Val, uint64_t Offset,
+ DIVariable VarInfo,
+ BasicBlock *InsertAtEnd);
+
+ /// InsertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
+ /// @param Val llvm::Value of the variable
+ /// @param Offset Offset
+ /// @param VarInfo Variable's debug info descriptor.
+ /// @param InsertBefore Location for the new intrinsic.
+ Instruction *InsertDbgValueIntrinsic(llvm::Value *Val, uint64_t Offset,
+ DIVariable VarInfo,
+ Instruction *InsertBefore);
+
+ };
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h b/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
index d8daf51..30741c4 100644
--- a/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
+++ b/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
@@ -67,7 +67,7 @@ struct DOTGraphTraitsPrinter : public FunctionPass {
Title = GraphName + " for '" + F.getNameStr() + "' function";
if (ErrorInfo.empty())
- WriteGraph(File, Graph, Simple, Name, Title);
+ WriteGraph(File, Graph, Simple, Title);
else
errs() << " error opening file for writing!";
errs() << "\n";
diff --git a/contrib/llvm/include/llvm/Analysis/DebugInfo.h b/contrib/llvm/include/llvm/Analysis/DebugInfo.h
index 2d1418d..aa69088 100644
--- a/contrib/llvm/include/llvm/Analysis/DebugInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/DebugInfo.h
@@ -33,6 +33,7 @@ namespace llvm {
class DbgDeclareInst;
class Instruction;
class MDNode;
+ class NamedMDNode;
class LLVMContext;
class raw_ostream;
@@ -46,6 +47,18 @@ namespace llvm {
/// This should not be stored in a container, because underly MDNode may
/// change in certain situations.
class DIDescriptor {
+ public:
+ enum {
+ FlagPrivate = 1 << 0,
+ FlagProtected = 1 << 1,
+ FlagFwdDecl = 1 << 2,
+ FlagAppleBlock = 1 << 3,
+ FlagBlockByrefStruct = 1 << 4,
+ FlagVirtual = 1 << 5,
+ FlagArtificial = 1 << 6,
+ FlagExplicit = 1 << 7,
+ FlagPrototyped = 1 << 8
+ };
protected:
const MDNode *DbgNode;
@@ -108,6 +121,9 @@ namespace llvm {
bool isEnumerator() const;
bool isType() const;
bool isGlobal() const;
+ bool isUnspecifiedParameter() const;
+ bool isTemplateTypeParameter() const;
+ bool isTemplateValueParameter() const;
};
/// DISubrange - This is used to represent ranges, for array bounds.
@@ -160,8 +176,8 @@ namespace llvm {
/// module does not contain any main compile unit then the code generator
/// will emit multiple compile units in the output object file.
- bool isMain() const { return getUnsignedField(6); }
- bool isOptimized() const { return getUnsignedField(7); }
+ bool isMain() const { return getUnsignedField(6) != 0; }
+ bool isOptimized() const { return getUnsignedField(7) != 0; }
StringRef getFlags() const { return getStringField(8); }
unsigned getRunTimeVersion() const { return getUnsignedField(9); }
@@ -203,17 +219,6 @@ namespace llvm {
/// others do not require a huge and empty descriptor full of zeros.
class DIType : public DIScope {
public:
- enum {
- FlagPrivate = 1 << 0,
- FlagProtected = 1 << 1,
- FlagFwdDecl = 1 << 2,
- FlagAppleBlock = 1 << 3,
- FlagBlockByrefStruct = 1 << 4,
- FlagVirtual = 1 << 5,
- FlagArtificial = 1 << 6 // To identify artificial arguments in
- // a subroutine type. e.g. "this" in c++.
- };
-
protected:
// This ctor is used when the Tag has already been validated by a derived
// ctor.
@@ -231,12 +236,12 @@ namespace llvm {
DIScope getContext() const { return getFieldAs<DIScope>(1); }
StringRef getName() const { return getStringField(2); }
DICompileUnit getCompileUnit() const{
- if (getVersion() == llvm::LLVMDebugVersion7)
- return getFieldAs<DICompileUnit>(3);
-
- DIFile F = getFieldAs<DIFile>(3);
- return F.getCompileUnit();
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getFieldAs<DICompileUnit>(3);
+
+ return getFieldAs<DIFile>(3).getCompileUnit();
}
+ DIFile getFile() const { return getFieldAs<DIFile>(3); }
unsigned getLineNumber() const { return getUnsignedField(4); }
uint64_t getSizeInBits() const { return getUInt64Field(5); }
uint64_t getAlignInBits() const { return getUInt64Field(6); }
@@ -269,12 +274,23 @@ namespace llvm {
bool isValid() const {
return DbgNode && (isBasicType() || isDerivedType() || isCompositeType());
}
- StringRef getFilename() const { return getCompileUnit().getFilename();}
- StringRef getDirectory() const { return getCompileUnit().getDirectory();}
+ StringRef getDirectory() const {
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getCompileUnit().getDirectory();
+
+ return getFieldAs<DIFile>(3).getDirectory();
+ }
+ StringRef getFilename() const {
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getCompileUnit().getFilename();
+
+ return getFieldAs<DIFile>(3).getFilename();
+ }
/// replaceAllUsesWith - Replace all uses of debug info referenced by
/// this descriptor.
void replaceAllUsesWith(DIDescriptor &D);
+ void replaceAllUsesWith(MDNode *D);
/// print - print type.
void print(raw_ostream &OS) const;
@@ -342,6 +358,7 @@ namespace llvm {
DICompositeType getContainingType() const {
return getFieldAs<DICompositeType>(12);
}
+ DIArray getTemplateParams() const { return getFieldAs<DIArray>(13); }
/// Verify - Verify that a composite type descriptor is well formed.
bool Verify() const;
@@ -353,6 +370,43 @@ namespace llvm {
void dump() const;
};
+ /// DITemplateTypeParameter - This is a wrapper for template type parameter.
+ class DITemplateTypeParameter : public DIDescriptor {
+ public:
+ explicit DITemplateTypeParameter(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ DIScope getContext() const { return getFieldAs<DIScope>(1); }
+ StringRef getName() const { return getStringField(2); }
+ DIType getType() const { return getFieldAs<DIType>(3); }
+ StringRef getFilename() const {
+ return getFieldAs<DIFile>(4).getFilename();
+ }
+ StringRef getDirectory() const {
+ return getFieldAs<DIFile>(4).getDirectory();
+ }
+ unsigned getLineNumber() const { return getUnsignedField(5); }
+ unsigned getColumnNumber() const { return getUnsignedField(6); }
+ };
+
+ /// DITemplateValueParameter - This is a wrapper for template value parameter.
+ class DITemplateValueParameter : public DIDescriptor {
+ public:
+ explicit DITemplateValueParameter(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ DIScope getContext() const { return getFieldAs<DIScope>(1); }
+ StringRef getName() const { return getStringField(2); }
+ DIType getType() const { return getFieldAs<DIType>(3); }
+ uint64_t getValue() const { return getUInt64Field(4); }
+ StringRef getFilename() const {
+ return getFieldAs<DIFile>(5).getFilename();
+ }
+ StringRef getDirectory() const {
+ return getFieldAs<DIFile>(5).getDirectory();
+ }
+ unsigned getLineNumber() const { return getUnsignedField(6); }
+ unsigned getColumnNumber() const { return getUnsignedField(7); }
+ };
+
/// DISubprogram - This is a wrapper for a subprogram (e.g. a function).
class DISubprogram : public DIScope {
public:
@@ -366,8 +420,7 @@ namespace llvm {
if (getVersion() == llvm::LLVMDebugVersion7)
return getFieldAs<DICompileUnit>(6);
- DIFile F = getFieldAs<DIFile>(6);
- return F.getCompileUnit();
+ return getFieldAs<DIFile>(6).getCompileUnit();
}
unsigned getLineNumber() const { return getUnsignedField(7); }
DICompositeType getType() const { return getFieldAs<DICompositeType>(8); }
@@ -396,23 +449,52 @@ namespace llvm {
DICompositeType getContainingType() const {
return getFieldAs<DICompositeType>(13);
}
- unsigned isArtificial() const { return getUnsignedField(14); }
+ unsigned isArtificial() const {
+ if (getVersion() <= llvm::LLVMDebugVersion8)
+ return getUnsignedField(14);
+ return (getUnsignedField(14) & FlagArtificial) != 0;
+ }
+ /// isPrivate - Return true if this subprogram has "private"
+ /// access specifier.
+ bool isPrivate() const {
+ if (getVersion() <= llvm::LLVMDebugVersion8)
+ return false;
+ return (getUnsignedField(14) & FlagPrivate) != 0;
+ }
+ /// isProtected - Return true if this subprogram has "protected"
+ /// access specifier.
+ bool isProtected() const {
+ if (getVersion() <= llvm::LLVMDebugVersion8)
+ return false;
+ return (getUnsignedField(14) & FlagProtected) != 0;
+ }
+ /// isExplicit - Return true if this subprogram is marked as explicit.
+ bool isExplicit() const {
+ if (getVersion() <= llvm::LLVMDebugVersion8)
+ return false;
+ return (getUnsignedField(14) & FlagExplicit) != 0;
+ }
+ /// isPrototyped - Return true if this subprogram is prototyped.
+ bool isPrototyped() const {
+ if (getVersion() <= llvm::LLVMDebugVersion8)
+ return false;
+ return (getUnsignedField(14) & FlagPrototyped) != 0;
+ }
+
unsigned isOptimized() const;
StringRef getFilename() const {
if (getVersion() == llvm::LLVMDebugVersion7)
return getCompileUnit().getFilename();
- DIFile F = getFieldAs<DIFile>(6);
- return F.getFilename();
+ return getFieldAs<DIFile>(6).getFilename();
}
StringRef getDirectory() const {
if (getVersion() == llvm::LLVMDebugVersion7)
return getCompileUnit().getFilename();
- DIFile F = getFieldAs<DIFile>(6);
- return F.getDirectory();
+ return getFieldAs<DIFile>(6).getDirectory();
}
/// Verify - Verify that a subprogram descriptor is well formed.
@@ -484,6 +566,13 @@ namespace llvm {
}
unsigned getLineNumber() const { return getUnsignedField(4); }
DIType getType() const { return getFieldAs<DIType>(5); }
+
+ /// isArtificial - Return true if this variable is marked as "artificial".
+ bool isArtificial() const {
+ if (getVersion() <= llvm::LLVMDebugVersion8)
+ return false;
+ return (getUnsignedField(6) & FlagArtificial) != 0;
+ }
/// Verify - Verify that a variable descriptor is well formed.
@@ -525,13 +614,11 @@ namespace llvm {
unsigned getLineNumber() const { return getUnsignedField(2); }
unsigned getColumnNumber() const { return getUnsignedField(3); }
StringRef getDirectory() const {
- DIFile F = getFieldAs<DIFile>(4);
- StringRef dir = F.getDirectory();
+ StringRef dir = getFieldAs<DIFile>(4).getDirectory();
return !dir.empty() ? dir : getContext().getDirectory();
}
StringRef getFilename() const {
- DIFile F = getFieldAs<DIFile>(4);
- StringRef filename = F.getFilename();
+ StringRef filename = getFieldAs<DIFile>(4).getFilename();
return !filename.empty() ? filename : getContext().getFilename();
}
};
@@ -542,14 +629,17 @@ namespace llvm {
explicit DINameSpace(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
StringRef getName() const { return getStringField(2); }
- StringRef getDirectory() const { return getContext().getDirectory(); }
- StringRef getFilename() const { return getContext().getFilename(); }
+ StringRef getDirectory() const {
+ return getFieldAs<DIFile>(3).getDirectory();
+ }
+ StringRef getFilename() const {
+ return getFieldAs<DIFile>(3).getFilename();
+ }
DICompileUnit getCompileUnit() const{
if (getVersion() == llvm::LLVMDebugVersion7)
return getFieldAs<DICompileUnit>(3);
- DIFile F = getFieldAs<DIFile>(3);
- return F.getCompileUnit();
+ return getFieldAs<DIFile>(3).getCompileUnit();
}
unsigned getLineNumber() const { return getUnsignedField(4); }
bool Verify() const;
@@ -594,6 +684,10 @@ namespace llvm {
/// implicitly uniques the values returned.
DISubrange GetOrCreateSubrange(int64_t Lo, int64_t Hi);
+ /// CreateUnspecifiedParameter - Create unspeicified type descriptor
+ /// for a subroutine type.
+ DIDescriptor CreateUnspecifiedParameter();
+
/// CreateCompileUnit - Create a new descriptor for the specified compile
/// unit.
DICompileUnit CreateCompileUnit(unsigned LangID,
@@ -662,6 +756,7 @@ namespace llvm {
/// CreateTemporaryType - Create a temporary forward-declared type.
DIType CreateTemporaryType();
+ DIType CreateTemporaryType(DIFile F);
/// CreateArtificialType - Create a new DIType with "artificial" flag set.
DIType CreateArtificialType(DIType Ty);
@@ -690,8 +785,8 @@ namespace llvm {
bool isDefinition,
unsigned VK = 0,
unsigned VIndex = 0,
- DIType = DIType(),
- bool isArtificial = 0,
+ DIType ContainingType = DIType(),
+ unsigned Flags = 0,
bool isOptimized = false,
Function *Fn = 0);
@@ -721,15 +816,15 @@ namespace llvm {
DIVariable CreateVariable(unsigned Tag, DIDescriptor Context,
StringRef Name,
DIFile F, unsigned LineNo,
- DIType Ty, bool AlwaysPreserve = false);
+ DIType Ty, bool AlwaysPreserve = false,
+ unsigned Flags = 0);
/// CreateComplexVariable - Create a new descriptor for the specified
/// variable which has a complex address expression for its address.
DIVariable CreateComplexVariable(unsigned Tag, DIDescriptor Context,
- const std::string &Name,
- DIFile F, unsigned LineNo,
- DIType Ty,
- SmallVector<Value *, 9> &addr);
+ StringRef Name, DIFile F, unsigned LineNo,
+ DIType Ty, Value *const *Addr,
+ unsigned NumAddr);
/// CreateLexicalBlock - This creates a descriptor for a lexical block
/// with the specified parent context.
@@ -764,20 +859,29 @@ namespace llvm {
/// InsertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
Instruction *InsertDbgValueIntrinsic(llvm::Value *V, uint64_t Offset,
DIVariable D, Instruction *InsertBefore);
+
+ // RecordType - Record DIType in a module such that it is not lost even if
+ // it is not referenced through debug info anchors.
+ void RecordType(DIType T);
+
private:
Constant *GetTagConstant(unsigned TAG);
};
- bool getLocationInfo(const Value *V, std::string &DisplayName,
- std::string &Type, unsigned &LineNo, std::string &File,
- std::string &Dir);
-
/// getDISubprogram - Find subprogram that is enclosing this scope.
DISubprogram getDISubprogram(const MDNode *Scope);
/// getDICompositeType - Find underlying composite type.
DICompositeType getDICompositeType(DIType T);
+ /// getOrInsertFnSpecificMDNode - Return a NameMDNode that is suitable
+ /// to hold function specific information.
+ NamedMDNode *getOrInsertFnSpecificMDNode(Module &M, StringRef Name);
+
+ /// getFnSpecificMDNode - Return a NameMDNode, if available, that is
+ /// suitable to hold function specific information.
+ NamedMDNode *getFnSpecificMDNode(const Module &M, StringRef Name);
+
class DebugInfoFinder {
public:
/// processModule - Process entire module and collect debug info
diff --git a/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h b/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h
new file mode 100644
index 0000000..d7f74af
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h
@@ -0,0 +1,189 @@
+//===- llvm/Analysis/DominanceFrontier.h - Dominator Frontiers --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DominanceFrontier class, which calculate and holds the
+// dominance frontier for a function.
+//
+// This should be considered deprecated, don't add any more uses of this data
+// structure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DOMINANCEFRONTIER_H
+#define LLVM_ANALYSIS_DOMINANCEFRONTIER_H
+
+#include "llvm/Analysis/Dominators.h"
+#include <map>
+#include <set>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// DominanceFrontierBase - Common base class for computing forward and inverse
+/// dominance frontiers for a function.
+///
+class DominanceFrontierBase : public FunctionPass {
+public:
+ typedef std::set<BasicBlock*> DomSetType; // Dom set for a bb
+ typedef std::map<BasicBlock*, DomSetType> DomSetMapType; // Dom set map
+protected:
+ DomSetMapType Frontiers;
+ std::vector<BasicBlock*> Roots;
+ const bool IsPostDominators;
+
+public:
+ DominanceFrontierBase(char &ID, bool isPostDom)
+ : FunctionPass(ID), IsPostDominators(isPostDom) {}
+
+ /// getRoots - Return the root blocks of the current CFG. This may include
+ /// multiple blocks if we are computing post dominators. For forward
+ /// dominators, this will always be a single block (the entry node).
+ ///
+ inline const std::vector<BasicBlock*> &getRoots() const { return Roots; }
+
+ /// isPostDominator - Returns true if analysis based of postdoms
+ ///
+ bool isPostDominator() const { return IsPostDominators; }
+
+ virtual void releaseMemory() { Frontiers.clear(); }
+
+ // Accessor interface:
+ typedef DomSetMapType::iterator iterator;
+ typedef DomSetMapType::const_iterator const_iterator;
+ iterator begin() { return Frontiers.begin(); }
+ const_iterator begin() const { return Frontiers.begin(); }
+ iterator end() { return Frontiers.end(); }
+ const_iterator end() const { return Frontiers.end(); }
+ iterator find(BasicBlock *B) { return Frontiers.find(B); }
+ const_iterator find(BasicBlock *B) const { return Frontiers.find(B); }
+
+ iterator addBasicBlock(BasicBlock *BB, const DomSetType &frontier) {
+ assert(find(BB) == end() && "Block already in DominanceFrontier!");
+ return Frontiers.insert(std::make_pair(BB, frontier)).first;
+ }
+
+ /// removeBlock - Remove basic block BB's frontier.
+ void removeBlock(BasicBlock *BB) {
+ assert(find(BB) != end() && "Block is not in DominanceFrontier!");
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ I->second.erase(BB);
+ Frontiers.erase(BB);
+ }
+
+ void addToFrontier(iterator I, BasicBlock *Node) {
+ assert(I != end() && "BB is not in DominanceFrontier!");
+ I->second.insert(Node);
+ }
+
+ void removeFromFrontier(iterator I, BasicBlock *Node) {
+ assert(I != end() && "BB is not in DominanceFrontier!");
+ assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
+ I->second.erase(Node);
+ }
+
+ /// compareDomSet - Return false if two domsets match. Otherwise
+ /// return true;
+ bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const {
+ std::set<BasicBlock *> tmpSet;
+ for (DomSetType::const_iterator I = DS2.begin(),
+ E = DS2.end(); I != E; ++I)
+ tmpSet.insert(*I);
+
+ for (DomSetType::const_iterator I = DS1.begin(),
+ E = DS1.end(); I != E; ) {
+ BasicBlock *Node = *I++;
+
+ if (tmpSet.erase(Node) == 0)
+ // Node is in DS1 but not in DS2.
+ return true;
+ }
+
+ if (!tmpSet.empty())
+ // There are nodes that are in DS2 but not in DS1.
+ return true;
+
+ // DS1 and DS2 matches.
+ return false;
+ }
+
+ /// compare - Return true if the other dominance frontier base matches
+ /// this dominance frontier base. Otherwise return false.
+ bool compare(DominanceFrontierBase &Other) const {
+ DomSetMapType tmpFrontiers;
+ for (DomSetMapType::const_iterator I = Other.begin(),
+ E = Other.end(); I != E; ++I)
+ tmpFrontiers.insert(std::make_pair(I->first, I->second));
+
+ for (DomSetMapType::iterator I = tmpFrontiers.begin(),
+ E = tmpFrontiers.end(); I != E; ) {
+ BasicBlock *Node = I->first;
+ const_iterator DFI = find(Node);
+ if (DFI == end())
+ return true;
+
+ if (compareDomSet(I->second, DFI->second))
+ return true;
+
+ ++I;
+ tmpFrontiers.erase(Node);
+ }
+
+ if (!tmpFrontiers.empty())
+ return true;
+
+ return false;
+ }
+
+ /// print - Convert to human readable form
+ ///
+ virtual void print(raw_ostream &OS, const Module* = 0) const;
+
+ /// dump - Dump the dominance frontier to dbgs().
+ void dump() const;
+};
+
+
+//===-------------------------------------
+/// DominanceFrontier Class - Concrete subclass of DominanceFrontierBase that is
+/// used to compute a forward dominator frontiers.
+///
+class DominanceFrontier : public DominanceFrontierBase {
+public:
+ static char ID; // Pass ID, replacement for typeid
+ DominanceFrontier() :
+ DominanceFrontierBase(ID, false) {
+ initializeDominanceFrontierPass(*PassRegistry::getPassRegistry());
+ }
+
+ BasicBlock *getRoot() const {
+ assert(Roots.size() == 1 && "Should always have entry node!");
+ return Roots[0];
+ }
+
+ virtual bool runOnFunction(Function &) {
+ Frontiers.clear();
+ DominatorTree &DT = getAnalysis<DominatorTree>();
+ Roots = DT.getRoots();
+ assert(Roots.size() == 1 && "Only one entry block for forward domfronts!");
+ calculate(DT, DT[Roots[0]]);
+ return false;
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<DominatorTree>();
+ }
+
+ const DomSetType &calculate(const DominatorTree &DT,
+ const DomTreeNode *Node);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DominatorInternals.h b/contrib/llvm/include/llvm/Analysis/DominatorInternals.h
index 0419688..ae552b0 100644
--- a/contrib/llvm/include/llvm/Analysis/DominatorInternals.h
+++ b/contrib/llvm/include/llvm/Analysis/DominatorInternals.h
@@ -22,13 +22,9 @@
// A Fast Algorithm for Finding Dominators in a Flowgraph
// T. Lengauer & R. Tarjan, ACM TOPLAS July 1979, pgs 121-141.
//
-// This implements both the O(n*ack(n)) and the O(n*log(n)) versions of EVAL and
-// LINK, but it turns out that the theoretically slower O(n*log(n))
-// implementation is actually faster than the "efficient" algorithm (even for
-// large CFGs) because the constant overheads are substantially smaller. The
-// lower-complexity version can be enabled with the following #define:
-//
-#define BALANCE_IDOM_TREE 0
+// This implements the O(n*log(n)) versions of EVAL and LINK, because it turns
+// out that the theoretically slower O(n*log(n)) implementation is actually
+// faster than the almost-linear O(n*alpha(n)) version, even for large CFGs.
//
//===----------------------------------------------------------------------===//
@@ -46,9 +42,6 @@ unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
VInfo.Label = V;
Vertex.push_back(V); // Vertex[n] = V;
- //Info[V].Ancestor = 0; // Ancestor[n] = 0
- //Info[V].Child = 0; // Child[v] = 0
- VInfo.Size = 1; // Size[v] = 1
for (succ_iterator SI = succ_begin(V), E = succ_end(V); SI != E; ++SI) {
InfoRec &SuccVInfo = DT.Info[*SI];
@@ -58,10 +51,10 @@ unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
}
}
#else
- bool IsChilOfArtificialExit = (N != 0);
+ bool IsChildOfArtificialExit = (N != 0);
- std::vector<std::pair<typename GraphT::NodeType*,
- typename GraphT::ChildIteratorType> > Worklist;
+ SmallVector<std::pair<typename GraphT::NodeType*,
+ typename GraphT::ChildIteratorType>, 32> Worklist;
Worklist.push_back(std::make_pair(V, GraphT::child_begin(V)));
while (!Worklist.empty()) {
typename GraphT::NodeType* BB = Worklist.back().first;
@@ -76,14 +69,11 @@ unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
BBInfo.Label = BB;
DT.Vertex.push_back(BB); // Vertex[n] = V;
- //BBInfo[V].Ancestor = 0; // Ancestor[n] = 0
- //BBInfo[V].Child = 0; // Child[v] = 0
- BBInfo.Size = 1; // Size[v] = 1
- if (IsChilOfArtificialExit)
+ if (IsChildOfArtificialExit)
BBInfo.Parent = 1;
- IsChilOfArtificialExit = false;
+ IsChildOfArtificialExit = false;
}
// store the DFS number of the current BB - the reference to BBInfo might
@@ -114,118 +104,47 @@ unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
}
template<class GraphT>
-void Compress(DominatorTreeBase<typename GraphT::NodeType>& DT,
- typename GraphT::NodeType *VIn) {
- std::vector<typename GraphT::NodeType*> Work;
+typename GraphT::NodeType*
+Eval(DominatorTreeBase<typename GraphT::NodeType>& DT,
+ typename GraphT::NodeType *VIn, unsigned LastLinked) {
+ typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInInfo =
+ DT.Info[VIn];
+ if (VInInfo.DFSNum < LastLinked)
+ return VIn;
+
+ SmallVector<typename GraphT::NodeType*, 32> Work;
SmallPtrSet<typename GraphT::NodeType*, 32> Visited;
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInVAInfo =
- DT.Info[DT.Vertex[DT.Info[VIn].Ancestor]];
- if (VInVAInfo.Ancestor != 0)
+ if (VInInfo.Parent >= LastLinked)
Work.push_back(VIn);
while (!Work.empty()) {
typename GraphT::NodeType* V = Work.back();
typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo =
DT.Info[V];
- typename GraphT::NodeType* VAncestor = DT.Vertex[VInfo.Ancestor];
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VAInfo =
- DT.Info[VAncestor];
+ typename GraphT::NodeType* VAncestor = DT.Vertex[VInfo.Parent];
// Process Ancestor first
- if (Visited.insert(VAncestor) &&
- VAInfo.Ancestor != 0) {
+ if (Visited.insert(VAncestor) && VInfo.Parent >= LastLinked) {
Work.push_back(VAncestor);
continue;
}
Work.pop_back();
// Update VInfo based on Ancestor info
- if (VAInfo.Ancestor == 0)
+ if (VInfo.Parent < LastLinked)
continue;
+
+ typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VAInfo =
+ DT.Info[VAncestor];
typename GraphT::NodeType* VAncestorLabel = VAInfo.Label;
typename GraphT::NodeType* VLabel = VInfo.Label;
if (DT.Info[VAncestorLabel].Semi < DT.Info[VLabel].Semi)
VInfo.Label = VAncestorLabel;
- VInfo.Ancestor = VAInfo.Ancestor;
- }
-}
-
-template<class GraphT>
-typename GraphT::NodeType*
-Eval(DominatorTreeBase<typename GraphT::NodeType>& DT,
- typename GraphT::NodeType *V) {
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo =
- DT.Info[V];
-#if !BALANCE_IDOM_TREE
- // Higher-complexity but faster implementation
- if (VInfo.Ancestor == 0)
- return V;
- Compress<GraphT>(DT, V);
- return VInfo.Label;
-#else
- // Lower-complexity but slower implementation
- if (VInfo.Ancestor == 0)
- return VInfo.Label;
- Compress<GraphT>(DT, V);
- GraphT::NodeType* VLabel = VInfo.Label;
-
- GraphT::NodeType* VAncestorLabel = DT.Info[VInfo.Ancestor].Label;
- if (DT.Info[VAncestorLabel].Semi >= DT.Info[VLabel].Semi)
- return VLabel;
- else
- return VAncestorLabel;
-#endif
-}
-
-template<class GraphT>
-void Link(DominatorTreeBase<typename GraphT::NodeType>& DT,
- unsigned DFSNumV, typename GraphT::NodeType* W,
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &WInfo) {
-#if !BALANCE_IDOM_TREE
- // Higher-complexity but faster implementation
- WInfo.Ancestor = DFSNumV;
-#else
- // Lower-complexity but slower implementation
- GraphT::NodeType* WLabel = WInfo.Label;
- unsigned WLabelSemi = DT.Info[WLabel].Semi;
- GraphT::NodeType* S = W;
- InfoRec *SInfo = &DT.Info[S];
-
- GraphT::NodeType* SChild = SInfo->Child;
- InfoRec *SChildInfo = &DT.Info[SChild];
-
- while (WLabelSemi < DT.Info[SChildInfo->Label].Semi) {
- GraphT::NodeType* SChildChild = SChildInfo->Child;
- if (SInfo->Size+DT.Info[SChildChild].Size >= 2*SChildInfo->Size) {
- SChildInfo->Ancestor = S;
- SInfo->Child = SChild = SChildChild;
- SChildInfo = &DT.Info[SChild];
- } else {
- SChildInfo->Size = SInfo->Size;
- S = SInfo->Ancestor = SChild;
- SInfo = SChildInfo;
- SChild = SChildChild;
- SChildInfo = &DT.Info[SChild];
- }
+ VInfo.Parent = VAInfo.Parent;
}
- DominatorTreeBase::InfoRec &VInfo = DT.Info[V];
- SInfo->Label = WLabel;
-
- assert(V != W && "The optimization here will not work in this case!");
- unsigned WSize = WInfo.Size;
- unsigned VSize = (VInfo.Size += WSize);
-
- if (VSize < 2*WSize)
- std::swap(S, VInfo.Child);
-
- while (S) {
- SInfo = &DT.Info[S];
- SInfo->Ancestor = V;
- S = SInfo->Child;
- }
-#endif
+ return VInInfo.Label;
}
template<class FuncT, class NodeT>
@@ -242,9 +161,6 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
BBInfo.Label = NULL;
DT.Vertex.push_back(NULL); // Vertex[n] = V;
- //BBInfo[V].Ancestor = 0; // Ancestor[n] = 0
- //BBInfo[V].Child = 0; // Child[v] = 0
- BBInfo.Size = 1; // Size[v] = 1
}
// Step #1: Number blocks in depth-first order and initialize variables used
@@ -257,12 +173,34 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
// infinite loops). In these cases an artificial exit node is required.
MultipleRoots |= (DT.isPostDominator() && N != F.size());
+ // When naively implemented, the Lengauer-Tarjan algorithm requires a separate
+ // bucket for each vertex. However, this is unnecessary, because each vertex
+ // is only placed into a single bucket (that of its semidominator), and each
+ // vertex's bucket is processed before it is added to any bucket itself.
+ //
+ // Instead of using a bucket per vertex, we use a single array Buckets that
+ // has two purposes. Before the vertex V with preorder number i is processed,
+ // Buckets[i] stores the index of the first element in V's bucket. After V's
+ // bucket is processed, Buckets[i] stores the index of the next element in the
+ // bucket containing V, if any.
+ SmallVector<unsigned, 32> Buckets;
+ Buckets.resize(N + 1);
+ for (unsigned i = 1; i <= N; ++i)
+ Buckets[i] = i;
+
for (unsigned i = N; i >= 2; --i) {
typename GraphT::NodeType* W = DT.Vertex[i];
typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &WInfo =
DT.Info[W];
- // Step #2: Calculate the semidominators of all vertices
+ // Step #2: Implicitly define the immediate dominator of vertices
+ for (unsigned j = i; Buckets[j] != i; j = Buckets[j]) {
+ typename GraphT::NodeType* V = DT.Vertex[Buckets[j]];
+ typename GraphT::NodeType* U = Eval<GraphT>(DT, V, i + 1);
+ DT.IDoms[V] = DT.Info[U].Semi < i ? U : W;
+ }
+
+ // Step #3: Calculate the semidominators of all vertices
// initialize the semi dominator to point to the parent node
WInfo.Semi = WInfo.Parent;
@@ -272,25 +210,28 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
E = InvTraits::child_end(W); CI != E; ++CI) {
typename InvTraits::NodeType *N = *CI;
if (DT.Info.count(N)) { // Only if this predecessor is reachable!
- unsigned SemiU = DT.Info[Eval<GraphT>(DT, N)].Semi;
+ unsigned SemiU = DT.Info[Eval<GraphT>(DT, N, i + 1)].Semi;
if (SemiU < WInfo.Semi)
WInfo.Semi = SemiU;
}
}
- DT.Info[DT.Vertex[WInfo.Semi]].Bucket.push_back(W);
-
- typename GraphT::NodeType* WParent = DT.Vertex[WInfo.Parent];
- Link<GraphT>(DT, WInfo.Parent, W, WInfo);
+ // If V is a non-root vertex and sdom(V) = parent(V), then idom(V) is
+ // necessarily parent(V). In this case, set idom(V) here and avoid placing
+ // V into a bucket.
+ if (WInfo.Semi == WInfo.Parent) {
+ DT.IDoms[W] = DT.Vertex[WInfo.Parent];
+ } else {
+ Buckets[i] = Buckets[WInfo.Semi];
+ Buckets[WInfo.Semi] = i;
+ }
+ }
- // Step #3: Implicitly define the immediate dominator of vertices
- std::vector<typename GraphT::NodeType*> &WParentBucket =
- DT.Info[WParent].Bucket;
- while (!WParentBucket.empty()) {
- typename GraphT::NodeType* V = WParentBucket.back();
- WParentBucket.pop_back();
- typename GraphT::NodeType* U = Eval<GraphT>(DT, V);
- DT.IDoms[V] = DT.Info[U].Semi < DT.Info[V].Semi ? U : WParent;
+ if (N >= 1) {
+ typename GraphT::NodeType* Root = DT.Vertex[1];
+ for (unsigned j = 1; Buckets[j] != 1; j = Buckets[j]) {
+ typename GraphT::NodeType* V = DT.Vertex[Buckets[j]];
+ DT.IDoms[V] = Root;
}
}
diff --git a/contrib/llvm/include/llvm/Analysis/Dominators.h b/contrib/llvm/include/llvm/Analysis/Dominators.h
index 73c6e62..230e83d 100644
--- a/contrib/llvm/include/llvm/Analysis/Dominators.h
+++ b/contrib/llvm/include/llvm/Analysis/Dominators.h
@@ -7,14 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the following classes:
-// 1. DominatorTree: Represent dominators as an explicit tree structure.
-// 2. DominanceFrontier: Calculate and hold the dominance frontier for a
-// function.
-//
-// These data structures are listed in increasing order of complexity. It
-// takes longer to calculate the dominator frontier, for example, than the
-// DominatorTree mapping.
+// This file defines the DominatorTree class, which provides fast and efficient
+// dominance queries.
//
//===----------------------------------------------------------------------===//
@@ -23,19 +17,15 @@
#include "llvm/Pass.h"
#include "llvm/Function.h"
-#include "llvm/Instructions.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
-#include <map>
-#include <set>
namespace llvm {
@@ -205,15 +195,11 @@ protected:
// Information record used during immediate dominators computation.
struct InfoRec {
unsigned DFSNum;
+ unsigned Parent;
unsigned Semi;
- unsigned Size;
- NodeT *Label, *Child;
- unsigned Parent, Ancestor;
-
- std::vector<NodeT*> Bucket;
+ NodeT *Label;
- InfoRec() : DFSNum(0), Semi(0), Size(0), Label(0), Child(0), Parent(0),
- Ancestor(0) {}
+ InfoRec() : DFSNum(0), Parent(0), Semi(0), Label(0) {}
};
DenseMap<NodeT*, NodeT*> IDoms;
@@ -303,9 +289,6 @@ public:
: DominatorBase<NodeT>(isPostDom), DFSInfoValid(false), SlowQueries(0) {}
virtual ~DominatorTreeBase() { reset(); }
- // FIXME: Should remove this
- virtual bool runOnFunction(Function &F) { return false; }
-
/// compare - Return false if the other dominator tree base matches this
/// dominator tree base. Otherwise return true.
bool compare(DominatorTreeBase &Other) const {
@@ -361,8 +344,15 @@ public:
return dominatedBySlowTreeWalk(A, B);
}
- inline bool properlyDominates(NodeT *A, NodeT *B) {
- return properlyDominates(getNode(A), getNode(B));
+ inline bool properlyDominates(const NodeT *A, const NodeT *B) {
+ if (A == B)
+ return false;
+
+ // Cast away the const qualifiers here. This is ok since
+ // this function doesn't actually return the values returned
+ // from getNode.
+ return properlyDominates(getNode(const_cast<NodeT *>(A)),
+ getNode(const_cast<NodeT *>(B)));
}
bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
@@ -377,7 +367,7 @@ public:
/// isReachableFromEntry - Return true if A is dominated by the entry
/// block of the function containing it.
- bool isReachableFromEntry(NodeT* A) {
+ bool isReachableFromEntry(const NodeT* A) {
assert(!this->isPostDominator() &&
"This is not implemented for post dominators");
return dominates(&A->getParent()->front(), A);
@@ -478,6 +468,13 @@ public:
return NULL;
}
+ const NodeT *findNearestCommonDominator(const NodeT *A, const NodeT *B) {
+ // Cast away the const qualifiers here. This is ok since
+ // const is re-introduced on the return type.
+ return findNearestCommonDominator(const_cast<NodeT *>(A),
+ const_cast<NodeT *>(B));
+ }
+
//===--------------------------------------------------------------------===//
// API to update (Post)DominatorTree information based on modifications to
// the CFG...
@@ -509,7 +506,7 @@ public:
}
/// eraseNode - Removes a node from the dominator tree. Block must not
- /// domiante any other blocks. Removes node from its immediate dominator's
+ /// dominate any other blocks. Removes node from its immediate dominator's
/// children list. Deletes dominator node associated with basic block BB.
void eraseNode(NodeT *BB) {
DomTreeNodeBase<NodeT> *Node = getNode(BB);
@@ -556,7 +553,7 @@ public:
o << "Inorder PostDominator Tree: ";
else
o << "Inorder Dominator Tree: ";
- if (this->DFSInfoValid)
+ if (!this->DFSInfoValid)
o << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
o << "\n";
@@ -567,18 +564,10 @@ public:
protected:
template<class GraphT>
- friend void Compress(DominatorTreeBase<typename GraphT::NodeType>& DT,
- typename GraphT::NodeType* VIn);
-
- template<class GraphT>
friend typename GraphT::NodeType* Eval(
DominatorTreeBase<typename GraphT::NodeType>& DT,
- typename GraphT::NodeType* V);
-
- template<class GraphT>
- friend void Link(DominatorTreeBase<typename GraphT::NodeType>& DT,
- unsigned DFSNumV, typename GraphT::NodeType* W,
- typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &WInfo);
+ typename GraphT::NodeType* V,
+ unsigned LastLinked);
template<class GraphT>
friend unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
@@ -703,6 +692,7 @@ public:
DominatorTreeBase<BasicBlock>* DT;
DominatorTree() : FunctionPass(ID) {
+ initializeDominatorTreePass(*PassRegistry::getPassRegistry());
DT = new DominatorTreeBase<BasicBlock>(false);
}
@@ -751,7 +741,7 @@ public:
AU.setPreservesAll();
}
- inline bool dominates(DomTreeNode* A, DomTreeNode* B) const {
+ inline bool dominates(const DomTreeNode* A, const DomTreeNode* B) const {
return DT->dominates(A, B);
}
@@ -767,7 +757,7 @@ public:
return DT->properlyDominates(A, B);
}
- bool properlyDominates(BasicBlock *A, BasicBlock *B) const {
+ bool properlyDominates(const BasicBlock *A, const BasicBlock *B) const {
return DT->properlyDominates(A, B);
}
@@ -777,6 +767,11 @@ public:
return DT->findNearestCommonDominator(A, B);
}
+ inline const BasicBlock *findNearestCommonDominator(const BasicBlock *A,
+ const BasicBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
inline DomTreeNode *operator[](BasicBlock *BB) const {
return DT->getNode(BB);
}
@@ -807,7 +802,7 @@ public:
}
/// eraseNode - Removes a node from the dominator tree. Block must not
- /// domiante any other blocks. Removes node from its immediate dominator's
+ /// dominate any other blocks. Removes node from its immediate dominator's
/// children list. Deletes dominator node associated with basic block BB.
inline void eraseNode(BasicBlock *BB) {
DT->eraseNode(BB);
@@ -819,7 +814,7 @@ public:
DT->splitBlock(NewBB);
}
- bool isReachableFromEntry(BasicBlock* A) {
+ bool isReachableFromEntry(const BasicBlock* A) {
return DT->isReachableFromEntry(A);
}
@@ -876,194 +871,6 @@ template <> struct GraphTraits<DominatorTree*>
};
-//===----------------------------------------------------------------------===//
-/// DominanceFrontierBase - Common base class for computing forward and inverse
-/// dominance frontiers for a function.
-///
-class DominanceFrontierBase : public FunctionPass {
-public:
- typedef std::set<BasicBlock*> DomSetType; // Dom set for a bb
- typedef std::map<BasicBlock*, DomSetType> DomSetMapType; // Dom set map
-protected:
- DomSetMapType Frontiers;
- std::vector<BasicBlock*> Roots;
- const bool IsPostDominators;
-
-public:
- DominanceFrontierBase(char &ID, bool isPostDom)
- : FunctionPass(ID), IsPostDominators(isPostDom) {}
-
- /// getRoots - Return the root blocks of the current CFG. This may include
- /// multiple blocks if we are computing post dominators. For forward
- /// dominators, this will always be a single block (the entry node).
- ///
- inline const std::vector<BasicBlock*> &getRoots() const { return Roots; }
-
- /// isPostDominator - Returns true if analysis based of postdoms
- ///
- bool isPostDominator() const { return IsPostDominators; }
-
- virtual void releaseMemory() { Frontiers.clear(); }
-
- // Accessor interface:
- typedef DomSetMapType::iterator iterator;
- typedef DomSetMapType::const_iterator const_iterator;
- iterator begin() { return Frontiers.begin(); }
- const_iterator begin() const { return Frontiers.begin(); }
- iterator end() { return Frontiers.end(); }
- const_iterator end() const { return Frontiers.end(); }
- iterator find(BasicBlock *B) { return Frontiers.find(B); }
- const_iterator find(BasicBlock *B) const { return Frontiers.find(B); }
-
- iterator addBasicBlock(BasicBlock *BB, const DomSetType &frontier) {
- assert(find(BB) == end() && "Block already in DominanceFrontier!");
- return Frontiers.insert(std::make_pair(BB, frontier)).first;
- }
-
- /// removeBlock - Remove basic block BB's frontier.
- void removeBlock(BasicBlock *BB) {
- assert(find(BB) != end() && "Block is not in DominanceFrontier!");
- for (iterator I = begin(), E = end(); I != E; ++I)
- I->second.erase(BB);
- Frontiers.erase(BB);
- }
-
- void addToFrontier(iterator I, BasicBlock *Node) {
- assert(I != end() && "BB is not in DominanceFrontier!");
- I->second.insert(Node);
- }
-
- void removeFromFrontier(iterator I, BasicBlock *Node) {
- assert(I != end() && "BB is not in DominanceFrontier!");
- assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
- I->second.erase(Node);
- }
-
- /// compareDomSet - Return false if two domsets match. Otherwise
- /// return true;
- bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const {
- std::set<BasicBlock *> tmpSet;
- for (DomSetType::const_iterator I = DS2.begin(),
- E = DS2.end(); I != E; ++I)
- tmpSet.insert(*I);
-
- for (DomSetType::const_iterator I = DS1.begin(),
- E = DS1.end(); I != E; ) {
- BasicBlock *Node = *I++;
-
- if (tmpSet.erase(Node) == 0)
- // Node is in DS1 but not in DS2.
- return true;
- }
-
- if (!tmpSet.empty())
- // There are nodes that are in DS2 but not in DS1.
- return true;
-
- // DS1 and DS2 matches.
- return false;
- }
-
- /// compare - Return true if the other dominance frontier base matches
- /// this dominance frontier base. Otherwise return false.
- bool compare(DominanceFrontierBase &Other) const {
- DomSetMapType tmpFrontiers;
- for (DomSetMapType::const_iterator I = Other.begin(),
- E = Other.end(); I != E; ++I)
- tmpFrontiers.insert(std::make_pair(I->first, I->second));
-
- for (DomSetMapType::iterator I = tmpFrontiers.begin(),
- E = tmpFrontiers.end(); I != E; ) {
- BasicBlock *Node = I->first;
- const_iterator DFI = find(Node);
- if (DFI == end())
- return true;
-
- if (compareDomSet(I->second, DFI->second))
- return true;
-
- ++I;
- tmpFrontiers.erase(Node);
- }
-
- if (!tmpFrontiers.empty())
- return true;
-
- return false;
- }
-
- /// print - Convert to human readable form
- ///
- virtual void print(raw_ostream &OS, const Module* = 0) const;
-
- /// dump - Dump the dominance frontier to dbgs().
- void dump() const;
-};
-
-
-//===-------------------------------------
-/// DominanceFrontier Class - Concrete subclass of DominanceFrontierBase that is
-/// used to compute a forward dominator frontiers.
-///
-class DominanceFrontier : public DominanceFrontierBase {
-public:
- static char ID; // Pass ID, replacement for typeid
- DominanceFrontier() :
- DominanceFrontierBase(ID, false) {}
-
- BasicBlock *getRoot() const {
- assert(Roots.size() == 1 && "Should always have entry node!");
- return Roots[0];
- }
-
- virtual bool runOnFunction(Function &) {
- Frontiers.clear();
- DominatorTree &DT = getAnalysis<DominatorTree>();
- Roots = DT.getRoots();
- assert(Roots.size() == 1 && "Only one entry block for forward domfronts!");
- calculate(DT, DT[Roots[0]]);
- return false;
- }
-
- virtual void verifyAnalysis() const;
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequired<DominatorTree>();
- }
-
- /// splitBlock - BB is split and now it has one successor. Update dominance
- /// frontier to reflect this change.
- void splitBlock(BasicBlock *BB);
-
- /// BasicBlock BB's new dominator is NewBB. Update BB's dominance frontier
- /// to reflect this change.
- void changeImmediateDominator(BasicBlock *BB, BasicBlock *NewBB,
- DominatorTree *DT) {
- // NewBB is now dominating BB. Which means BB's dominance
- // frontier is now part of NewBB's dominance frontier. However, BB
- // itself is not member of NewBB's dominance frontier.
- DominanceFrontier::iterator NewDFI = find(NewBB);
- DominanceFrontier::iterator DFI = find(BB);
- // If BB was an entry block then its frontier is empty.
- if (DFI == end())
- return;
- DominanceFrontier::DomSetType BBSet = DFI->second;
- for (DominanceFrontier::DomSetType::iterator BBSetI = BBSet.begin(),
- BBSetE = BBSet.end(); BBSetI != BBSetE; ++BBSetI) {
- BasicBlock *DFMember = *BBSetI;
- // Insert only if NewBB dominates DFMember.
- if (!DT->dominates(NewBB, DFMember))
- NewDFI->second.insert(DFMember);
- }
- NewDFI->second.erase(BB);
- }
-
- const DomSetType &calculate(const DominatorTree &DT,
- const DomTreeNode *Node);
-};
-
-
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/FindUsedTypes.h b/contrib/llvm/include/llvm/Analysis/FindUsedTypes.h
index 8a78eb6..fc57e1a 100644
--- a/contrib/llvm/include/llvm/Analysis/FindUsedTypes.h
+++ b/contrib/llvm/include/llvm/Analysis/FindUsedTypes.h
@@ -26,7 +26,9 @@ class FindUsedTypes : public ModulePass {
std::set<const Type *> UsedTypes;
public:
static char ID; // Pass identification, replacement for typeid
- FindUsedTypes() : ModulePass(ID) {}
+ FindUsedTypes() : ModulePass(ID) {
+ initializeFindUsedTypesPass(*PassRegistry::getPassRegistry());
+ }
/// getTypes - After the pass has been run, return the set containing all of
/// the types used in the module.
diff --git a/contrib/llvm/include/llvm/Analysis/InlineCost.h b/contrib/llvm/include/llvm/Analysis/InlineCost.h
index 462bddd..b08bf57 100644
--- a/contrib/llvm/include/llvm/Analysis/InlineCost.h
+++ b/contrib/llvm/include/llvm/Analysis/InlineCost.h
@@ -33,7 +33,7 @@ namespace llvm {
namespace InlineConstants {
// Various magic constants used to adjust heuristics.
const int InstrCost = 5;
- const int IndirectCallBonus = 500;
+ const int IndirectCallBonus = -100;
const int CallPenalty = 25;
const int LastCallToStaticBonus = -15000;
const int ColdccPenalty = 2000;
@@ -98,7 +98,8 @@ namespace llvm {
unsigned AllocaWeight;
ArgInfo(unsigned CWeight, unsigned AWeight)
- : ConstantWeight(CWeight), AllocaWeight(AWeight) {}
+ : ConstantWeight(CWeight), AllocaWeight(AWeight)
+ {}
};
struct FunctionInfo {
@@ -110,17 +111,6 @@ namespace llvm {
/// entry here.
std::vector<ArgInfo> ArgumentWeights;
- /// CountCodeReductionForConstant - Figure out an approximation for how
- /// many instructions will be constant folded if the specified value is
- /// constant.
- unsigned CountCodeReductionForConstant(Value *V);
-
- /// CountCodeReductionForAlloca - Figure out an approximation of how much
- /// smaller the function will be if it is inlined into a context where an
- /// argument becomes an alloca.
- ///
- unsigned CountCodeReductionForAlloca(Value *V);
-
/// analyzeFunction - Add information about the specified function
/// to the current structure.
void analyzeFunction(Function *F);
@@ -134,6 +124,10 @@ namespace llvm {
// the ValueMap will update itself when this happens.
ValueMap<const Function *, FunctionInfo> CachedFunctionInfo;
+ int CountBonusForConstant(Value *V, Constant *C = NULL);
+ int ConstantFunctionBonus(CallSite CS, Constant *C);
+ int getInlineSize(CallSite CS, Function *Callee);
+ int getInlineBonuses(CallSite CS, Function *Callee);
public:
/// getInlineCost - The heuristic used to determine if we should inline the
@@ -150,6 +144,18 @@ namespace llvm {
Function *Callee,
SmallPtrSet<const Function *, 16> &NeverInline);
+ /// getSpecializationBonus - The heuristic used to determine the per-call
+ /// performance boost for using a specialization of Callee with argument
+ /// SpecializedArgNos replaced by a constant.
+ int getSpecializationBonus(Function *Callee,
+ SmallVectorImpl<unsigned> &SpecializedArgNo);
+
+ /// getSpecializationCost - The heuristic used to determine the code-size
+ /// impact of creating a specialized version of Callee with argument
+ /// SpecializedArgNo replaced by a constant.
+ InlineCost getSpecializationCost(Function *Callee,
+ SmallVectorImpl<unsigned> &SpecializedArgNo);
+
/// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
/// higher threshold to determine if the function call should be inlined.
float getInlineFudgeFactor(CallSite CS);
diff --git a/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
index f47e740..dff1ba2 100644
--- a/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
+++ b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
@@ -7,9 +7,12 @@
//
//===----------------------------------------------------------------------===//
//
-// This file declares routines for folding instructions into simpler forms that
-// do not require creating new instructions. For example, this does constant
-// folding, and can handle identities like (X&0)->0.
+// This file declares routines for folding instructions into simpler forms
+// that do not require creating new instructions. This does constant folding
+// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
+// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
+// ("and i32 %x, %x" -> "%x"). If the simplification is also an instruction
+// then it dominates the original instruction.
//
//===----------------------------------------------------------------------===//
@@ -17,6 +20,7 @@
#define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
namespace llvm {
+ class DominatorTree;
class Instruction;
class Value;
class TargetData;
@@ -24,56 +28,106 @@ namespace llvm {
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const TargetData *TD = 0);
-
+ const TargetData *TD = 0, const DominatorTree *DT = 0);
+
+ /// SimplifySubInst - Given operands for a Sub, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
+ const TargetData *TD = 0, const DominatorTree *DT = 0);
+
+ /// SimplifyMulInst - Given operands for a Mul, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyMulInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
+ /// SimplifySDivInst - Given operands for an SDiv, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifySDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
+ /// SimplifyUDivInst - Given operands for a UDiv, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyUDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
+ /// SimplifyFDivInst - Given operands for an FDiv, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyFDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
+ /// SimplifyShlInst - Given operands for a Shl, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+ const TargetData *TD = 0, const DominatorTree *DT = 0);
+
+ /// SimplifyLShrInst - Given operands for a LShr, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
+ const TargetData *TD = 0, const DominatorTree *DT=0);
+
+ /// SimplifyAShrInst - Given operands for a AShr, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
+ const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyAndInst(Value *LHS, Value *RHS,
- const TargetData *TD = 0);
+ Value *SimplifyAndInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyOrInst(Value *LHS, Value *RHS,
- const TargetData *TD = 0);
-
+ Value *SimplifyOrInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
+ /// SimplifyXorInst - Given operands for a Xor, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyXorInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD = 0);
-
+ const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD = 0);
-
+ const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const TargetData *TD = 0);
+ const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyGEPInst(Value * const *Ops, unsigned NumOps,
- const TargetData *TD = 0);
-
+ const TargetData *TD = 0, const DominatorTree *DT = 0);
+
//=== Helper functions for higher up the class hierarchy.
-
-
+
+
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD = 0);
-
+ const TargetData *TD = 0, const DominatorTree *DT = 0);
+
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD = 0);
-
+ Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ const TargetData *TD = 0, const DominatorTree *DT = 0);
+
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
- Value *SimplifyInstruction(Instruction *I, const TargetData *TD = 0);
-
-
+ Value *SimplifyInstruction(Instruction *I, const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
+
+
/// ReplaceAndSimplifyAllUses - Perform From->replaceAllUsesWith(To) and then
/// delete the From instruction. In addition to a basic RAUW, this does a
/// recursive simplification of the updated instructions. This catches
@@ -81,7 +135,8 @@ namespace llvm {
/// simplifies and deletes scalar operations, it does not change the CFG.
///
void ReplaceAndSimplifyAllUses(Instruction *From, Value *To,
- const TargetData *TD = 0);
+ const TargetData *TD = 0,
+ const DominatorTree *DT = 0);
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/IntervalPartition.h b/contrib/llvm/include/llvm/Analysis/IntervalPartition.h
index 75a5cdf..df7313f 100644
--- a/contrib/llvm/include/llvm/Analysis/IntervalPartition.h
+++ b/contrib/llvm/include/llvm/Analysis/IntervalPartition.h
@@ -48,7 +48,9 @@ class IntervalPartition : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- IntervalPartition() : FunctionPass(ID), RootInterval(0) {}
+ IntervalPartition() : FunctionPass(ID), RootInterval(0) {
+ initializeIntervalPartitionPass(*PassRegistry::getPassRegistry());
+ }
// run - Calculate the interval partition for this function
virtual bool runOnFunction(Function &F);
diff --git a/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h b/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
index b2a3afb..fc4d0af 100644
--- a/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
@@ -31,7 +31,9 @@ class LazyValueInfo : public FunctionPass {
void operator=(const LazyValueInfo&); // DO NOT IMPLEMENT.
public:
static char ID;
- LazyValueInfo() : FunctionPass(ID), PImpl(0) {}
+ LazyValueInfo() : FunctionPass(ID), PImpl(0) {
+ initializeLazyValueInfoPass(*PassRegistry::getPassRegistry());
+ }
~LazyValueInfo() { assert(PImpl == 0 && "releaseMemory not called"); }
/// Tristate - This is used to return true/false/dunno results.
diff --git a/contrib/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h
index c9adf3f..243234b 100644
--- a/contrib/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h
@@ -28,15 +28,17 @@ namespace llvm {
LibCallInfo *LCI;
explicit LibCallAliasAnalysis(LibCallInfo *LC = 0)
- : FunctionPass(ID), LCI(LC) {
+ : FunctionPass(ID), LCI(LC) {
+ initializeLibCallAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
explicit LibCallAliasAnalysis(char &ID, LibCallInfo *LC)
- : FunctionPass(ID), LCI(LC) {
+ : FunctionPass(ID), LCI(LC) {
+ initializeLibCallAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
~LibCallAliasAnalysis();
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size);
+ const Location &Loc);
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) {
@@ -64,7 +66,7 @@ namespace llvm {
private:
ModRefResult AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
ImmutableCallSite CS,
- const Value *P, unsigned Size);
+ const Location &Loc);
};
} // End of llvm namespace
diff --git a/contrib/llvm/include/llvm/Analysis/LibCallSemantics.h b/contrib/llvm/include/llvm/Analysis/LibCallSemantics.h
index 31d7cc5..f5a9e96 100644
--- a/contrib/llvm/include/llvm/Analysis/LibCallSemantics.h
+++ b/contrib/llvm/include/llvm/Analysis/LibCallSemantics.h
@@ -48,7 +48,7 @@ namespace llvm {
Yes, No, Unknown
};
LocResult (*isLocation)(ImmutableCallSite CS,
- const Value *Ptr, unsigned Size);
+ const AliasAnalysis::Location &Loc);
};
/// LibCallFunctionInfo - Each record in the array of FunctionInfo structs
diff --git a/contrib/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h
index 94fd990..f195d27 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h
@@ -91,7 +91,9 @@ class LoopDependenceAnalysis : public LoopPass {
public:
static char ID; // Class identification, replacement for typeinfo
- LoopDependenceAnalysis() : LoopPass(ID) {}
+ LoopDependenceAnalysis() : LoopPass(ID) {
+ initializeLoopDependenceAnalysisPass(*PassRegistry::getPassRegistry());
+ }
/// isDependencePair - Check whether two values can possibly give rise to
/// a data dependence: that is the case if both are instructions accessing
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfo.h b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
index 462620f..392bdad 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
@@ -32,6 +32,7 @@
#define LLVM_ANALYSIS_LOOP_INFO_H
#include "llvm/Pass.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallVector.h"
@@ -40,6 +41,7 @@
#include "llvm/Support/CFG.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <map>
namespace llvm {
@@ -53,6 +55,7 @@ static void RemoveFromVector(std::vector<T*> &V, T *N) {
class DominatorTree;
class LoopInfo;
class Loop;
+class PHINode;
template<class N, class M> class LoopInfoBase;
template<class N, class M> class LoopBase;
@@ -523,10 +526,9 @@ public:
///
bool isLoopInvariant(Value *V) const;
- /// isLoopInvariant - Return true if the specified instruction is
- /// loop-invariant.
- ///
- bool isLoopInvariant(Instruction *I) const;
+ /// hasLoopInvariantOperands - Return true if all the operands of the
+ /// specified instruction are loop invariant.
+ bool hasLoopInvariantOperands(Instruction *I) const;
/// makeLoopInvariant - If the given value is an instruction inside of the
/// loop and it can be hoisted, do so to make it trivially loop-invariant.
@@ -630,7 +632,7 @@ private:
template<class BlockT, class LoopT>
class LoopInfoBase {
// BBMap - Mapping of basic blocks to the inner most loop they occur in
- std::map<BlockT *, LoopT *> BBMap;
+ DenseMap<BlockT *, LoopT *> BBMap;
std::vector<LoopT *> TopLevelLoops;
friend class LoopBase<BlockT, LoopT>;
@@ -661,7 +663,7 @@ public:
/// block is in no loop (for example the entry node), null is returned.
///
LoopT *getLoopFor(const BlockT *BB) const {
- typename std::map<BlockT *, LoopT *>::const_iterator I=
+ typename DenseMap<BlockT *, LoopT *>::const_iterator I=
BBMap.find(const_cast<BlockT*>(BB));
return I != BBMap.end() ? I->second : 0;
}
@@ -729,7 +731,7 @@ public:
/// including all of the Loop objects it is nested in and our mapping from
/// BasicBlocks to loops.
void removeBlock(BlockT *BB) {
- typename std::map<BlockT *, LoopT *>::iterator I = BBMap.find(BB);
+ typename DenseMap<BlockT *, LoopT *>::iterator I = BBMap.find(BB);
if (I != BBMap.end()) {
for (LoopT *L = I->second; L; L = L->getParentLoop())
L->removeBlockFromLoop(BB);
@@ -923,7 +925,7 @@ public:
for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
TopLevelLoops[i]->print(OS);
#if 0
- for (std::map<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
+ for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
E = BBMap.end(); I != E; ++I)
OS << "BB '" << I->first->getName() << "' level = "
<< I->second->getLoopDepth() << "\n";
@@ -940,7 +942,9 @@ class LoopInfo : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- LoopInfo() : FunctionPass(ID) {}
+ LoopInfo() : FunctionPass(ID) {
+ initializeLoopInfoPass(*PassRegistry::getPassRegistry());
+ }
LoopInfoBase<BasicBlock, Loop>& getBase() { return LI; }
@@ -1019,6 +1023,27 @@ public:
void removeBlock(BasicBlock *BB) {
LI.removeBlock(BB);
}
+
+ /// replacementPreservesLCSSAForm - Returns true if replacing From with To
+ /// everywhere is guaranteed to preserve LCSSA form.
+ bool replacementPreservesLCSSAForm(Instruction *From, Value *To) {
+ // Preserving LCSSA form is only problematic if the replacing value is an
+ // instruction.
+ Instruction *I = dyn_cast<Instruction>(To);
+ if (!I) return true;
+ // If both instructions are defined in the same basic block then replacement
+ // cannot break LCSSA form.
+ if (I->getParent() == From->getParent())
+ return true;
+ // If the instruction is not defined in a loop then it can safely replace
+ // anything.
+ Loop *ToLoop = getLoopFor(I->getParent());
+ if (!ToLoop) return true;
+ // If the replacing instruction is defined in the same loop as the original
+ // instruction, or in a loop that contains it as an inner loop, then using
+ // it as a replacement will not break LCSSA form.
+ return ToLoop->contains(getLoopFor(From->getParent()));
+ }
};
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
index a4f9162..22493f6 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -74,6 +74,10 @@ Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
const CallInst *isFreeCall(const Value *I);
+
+static inline CallInst *isFreeCall(Value *I) {
+ return const_cast<CallInst*>(isFreeCall((const Value*)I));
+}
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
index f6aab03..4d5dd19 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -17,6 +17,7 @@
#include "llvm/BasicBlock.h"
#include "llvm/Pass.h"
#include "llvm/Support/ValueHandle.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/OwningPtr.h"
@@ -46,6 +47,9 @@ namespace llvm {
/// pair holds the instruction that clobbers the memory. For example,
/// this occurs when we see a may-aliased store to the memory location we
/// care about.
+ ///
+ /// A dependence query on the first instruction of the entry block will
+ /// return a clobber(self) result.
Clobber,
/// Def - This is a dependence on the specified instruction which
@@ -132,26 +136,49 @@ namespace llvm {
}
};
+ /// NonLocalDepEntry - This is an entry in the NonLocalDepInfo cache. For
+ /// each BasicBlock (the BB entry) it keeps a MemDepResult.
+ class NonLocalDepEntry {
+ BasicBlock *BB;
+ MemDepResult Result;
+ public:
+ NonLocalDepEntry(BasicBlock *bb, MemDepResult result)
+ : BB(bb), Result(result) {}
+
+ // This is used for searches.
+ NonLocalDepEntry(BasicBlock *bb) : BB(bb) {}
+
+ // BB is the sort key, it can't be changed.
+ BasicBlock *getBB() const { return BB; }
+
+ void setResult(const MemDepResult &R) { Result = R; }
+
+ const MemDepResult &getResult() const { return Result; }
+
+ bool operator<(const NonLocalDepEntry &RHS) const {
+ return BB < RHS.BB;
+ }
+ };
+
/// NonLocalDepResult - This is a result from a NonLocal dependence query.
/// For each BasicBlock (the BB entry) it keeps a MemDepResult and the
/// (potentially phi translated) address that was live in the block.
class NonLocalDepResult {
- BasicBlock *BB;
- MemDepResult Result;
+ NonLocalDepEntry Entry;
Value *Address;
public:
NonLocalDepResult(BasicBlock *bb, MemDepResult result, Value *address)
- : BB(bb), Result(result), Address(address) {}
+ : Entry(bb, result), Address(address) {}
// BB is the sort key, it can't be changed.
- BasicBlock *getBB() const { return BB; }
+ BasicBlock *getBB() const { return Entry.getBB(); }
void setResult(const MemDepResult &R, Value *Addr) {
- Result = R;
+ Entry.setResult(R);
Address = Addr;
}
- const MemDepResult &getResult() const { return Result; }
+ const MemDepResult &getResult() const { return Entry.getResult(); }
/// getAddress - Return the address of this pointer in this block. This can
/// be different than the address queried for the non-local result because
@@ -163,30 +190,6 @@ namespace llvm {
Value *getAddress() const { return Address; }
};
- /// NonLocalDepEntry - This is an entry in the NonLocalDepInfo cache. For
- /// each BasicBlock (the BB entry) it keeps a MemDepResult.
- class NonLocalDepEntry {
- BasicBlock *BB;
- MemDepResult Result;
- public:
- NonLocalDepEntry(BasicBlock *bb, MemDepResult result)
- : BB(bb), Result(result) {}
-
- // This is used for searches.
- NonLocalDepEntry(BasicBlock *bb) : BB(bb) {}
-
- // BB is the sort key, it can't be changed.
- BasicBlock *getBB() const { return BB; }
-
- void setResult(const MemDepResult &R) { Result = R; }
-
- const MemDepResult &getResult() const { return Result; }
-
- bool operator<(const NonLocalDepEntry &RHS) const {
- return BB < RHS.BB;
- }
- };
-
/// MemoryDependenceAnalysis - This is an analysis that determines, for a
/// given memory operation, what preceding memory operations it depends on.
/// It builds on alias analysis information, and tries to provide a lazy,
@@ -212,7 +215,7 @@ namespace llvm {
private:
/// ValueIsLoadPair - This is a pair<Value*, bool> where the bool is true if
/// the dependence is a read only dependence, false if read/write.
- typedef PointerIntPair<Value*, 1, bool> ValueIsLoadPair;
+ typedef PointerIntPair<const Value*, 1, bool> ValueIsLoadPair;
/// BBSkipFirstBlockPair - This pair is used when caching information for a
/// block. If the pointer is null, the cache value is not a full query that
@@ -220,11 +223,28 @@ namespace llvm {
/// or not the contents of the block was skipped.
typedef PointerIntPair<BasicBlock*, 1, bool> BBSkipFirstBlockPair;
+ /// NonLocalPointerInfo - This record is the information kept for each
+ /// (value, is load) pair.
+ struct NonLocalPointerInfo {
+ /// Pair - The pair of the block and the skip-first-block flag.
+ BBSkipFirstBlockPair Pair;
+ /// NonLocalDeps - The results of the query for each relevant block.
+ NonLocalDepInfo NonLocalDeps;
+ /// Size - The maximum size of the dereferences of the
+ /// pointer. May be UnknownSize if the sizes are unknown.
+ uint64_t Size;
+ /// TBAATag - The TBAA tag associated with dereferences of the
+ /// pointer. May be null if there are no tags or conflicting tags.
+ const MDNode *TBAATag;
+
+ NonLocalPointerInfo() : Size(AliasAnalysis::UnknownSize), TBAATag(0) {}
+ };
+
/// CachedNonLocalPointerInfo - This map stores the cached results of doing
/// a pointer lookup at the bottom of a block. The key of this map is the
/// pointer+isload bit, the value is a list of <bb->result> mappings.
- typedef DenseMap<ValueIsLoadPair, std::pair<BBSkipFirstBlockPair,
- NonLocalDepInfo> > CachedNonLocalPointerInfo;
+ typedef DenseMap<ValueIsLoadPair,
+ NonLocalPointerInfo> CachedNonLocalPointerInfo;
CachedNonLocalPointerInfo NonLocalPointerDeps;
// A map from instructions to their non-local pointer dependencies.
@@ -297,10 +317,10 @@ namespace llvm {
/// set of instructions that either define or clobber the value.
///
/// This method assumes the pointer has a "NonLocal" dependency within BB.
- void getNonLocalPointerDependency(Value *Pointer, bool isLoad,
- BasicBlock *BB,
+ void getNonLocalPointerDependency(const AliasAnalysis::Location &Loc,
+ bool isLoad, BasicBlock *BB,
SmallVectorImpl<NonLocalDepResult> &Result);
-
+
/// removeInstruction - Remove an instruction from the dependence analysis,
/// updating the dependence of instructions that previously depended on it.
void removeInstruction(Instruction *InstToRemove);
@@ -318,20 +338,29 @@ namespace llvm {
/// critical edges.
void invalidateCachedPredecessors();
- private:
- MemDepResult getPointerDependencyFrom(Value *Pointer, uint64_t MemSize,
+ /// getPointerDependencyFrom - Return the instruction on which a memory
+ /// location depends. If isLoad is true, this routine ignores may-aliases
+ /// with read-only operations. If isLoad is false, this routine ignores
+ /// may-aliases with reads from read-only locations.
+ ///
+ /// Note that this is an uncached query, and thus may be inefficient.
+ ///
+ MemDepResult getPointerDependencyFrom(const AliasAnalysis::Location &Loc,
bool isLoad,
BasicBlock::iterator ScanIt,
BasicBlock *BB);
+
+ private:
MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
BasicBlock::iterator ScanIt,
BasicBlock *BB);
- bool getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t Size,
+ bool getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
+ const AliasAnalysis::Location &Loc,
bool isLoad, BasicBlock *BB,
SmallVectorImpl<NonLocalDepResult> &Result,
DenseMap<BasicBlock*, Value*> &Visited,
bool SkipFirstBlock = false);
- MemDepResult GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize,
+ MemDepResult GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
bool isLoad, BasicBlock *BB,
NonLocalDepInfo *Cache,
unsigned NumSortedEntries);
diff --git a/contrib/llvm/include/llvm/Analysis/Passes.h b/contrib/llvm/include/llvm/Analysis/Passes.h
index 37425eb..5b0c5b1 100644
--- a/contrib/llvm/include/llvm/Analysis/Passes.h
+++ b/contrib/llvm/include/llvm/Analysis/Passes.h
@@ -59,7 +59,7 @@ namespace llvm {
//===--------------------------------------------------------------------===//
//
- // createBasicAliasAnalysisPass - This pass implements the default alias
+ // createBasicAliasAnalysisPass - This pass implements the stateless alias
// analysis.
//
ImmutablePass *createBasicAliasAnalysisPass();
@@ -116,6 +116,28 @@ namespace llvm {
//===--------------------------------------------------------------------===//
//
+ // createPathProfileLoaderPass - This pass loads information from a path
+ // profile dump file.
+ //
+ ModulePass *createPathProfileLoaderPass();
+ extern char &PathProfileLoaderPassID;
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createNoPathProfileInfoPass - This pass implements the default
+ // "no path profile".
+ //
+ ImmutablePass *createNoPathProfileInfoPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createPathProfileVerifierPass - This pass verifies path profiling
+ // information.
+ //
+ ModulePass *createPathProfileVerifierPass();
+
+ //===--------------------------------------------------------------------===//
+ //
// createDSAAPass - This pass implements simple context sensitive alias
// analysis.
//
@@ -140,7 +162,7 @@ namespace llvm {
// createLiveValuesPass - This creates an instance of the LiveValues pass.
//
FunctionPass *createLiveValuesPass();
-
+
//===--------------------------------------------------------------------===//
//
/// createLazyValueInfoPass - This creates an instance of the LazyValueInfo
@@ -153,7 +175,7 @@ namespace llvm {
// LoopDependenceAnalysis pass.
//
LoopPass *createLoopDependenceAnalysisPass();
-
+
// Minor pass prototypes, allowing us to expose them through bugpoint and
// analyze.
FunctionPass *createInstCountPass();
@@ -170,6 +192,13 @@ namespace llvm {
// Print module-level debug info metadata in human-readable form.
ModulePass *createModuleDebugInfoPrinterPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createMemDepPrinter - This pass exhaustively collects all memdep
+ // information and prints it with -analyze.
+ //
+ FunctionPass *createMemDepPrinter();
}
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/PathNumbering.h b/contrib/llvm/include/llvm/Analysis/PathNumbering.h
new file mode 100644
index 0000000..7025e28
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/PathNumbering.h
@@ -0,0 +1,304 @@
+//===- PathNumbering.h ----------------------------------------*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Ball-Larus path numbers uniquely identify paths through a directed acyclic
+// graph (DAG) [Ball96]. For a CFG backedges are removed and replaced by phony
+// edges to obtain a DAG, and thus the unique path numbers [Ball96].
+//
+// The purpose of this analysis is to enumerate the edges in a CFG in order
+// to obtain paths from path numbers in a convenient manner. As described in
+// [Ball96] edges can be enumerated such that given a path number by following
+// the CFG and updating the path number, the path is obtained.
+//
+// [Ball96]
+// T. Ball and J. R. Larus. "Efficient Path Profiling."
+// International Symposium on Microarchitecture, pages 46-57, 1996.
+// http://portal.acm.org/citation.cfm?id=243857
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PATH_NUMBERING_H
+#define LLVM_PATH_NUMBERING_H
+
+#include "llvm/BasicBlock.h"
+#include "llvm/Instructions.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Analysis/ProfileInfoTypes.h"
+#include <map>
+#include <stack>
+#include <vector>
+
+namespace llvm {
+class BallLarusNode;
+class BallLarusEdge;
+class BallLarusDag;
+
+// typedefs for storage/ interators of various DAG components
+typedef std::vector<BallLarusNode*> BLNodeVector;
+typedef std::vector<BallLarusNode*>::iterator BLNodeIterator;
+typedef std::vector<BallLarusEdge*> BLEdgeVector;
+typedef std::vector<BallLarusEdge*>::iterator BLEdgeIterator;
+typedef std::map<BasicBlock*, BallLarusNode*> BLBlockNodeMap;
+typedef std::stack<BallLarusNode*> BLNodeStack;
+
+// Represents a basic block with information necessary for the BallLarus
+// algorithms.
+class BallLarusNode {
+public:
+ enum NodeColor { WHITE, GRAY, BLACK };
+
+ // Constructor: Initializes a new Node for the given BasicBlock
+ BallLarusNode(BasicBlock* BB) :
+ _basicBlock(BB), _numberPaths(0), _color(WHITE) {
+ static unsigned nextUID = 0;
+ _uid = nextUID++;
+ }
+
+ // Returns the basic block for the BallLarusNode
+ BasicBlock* getBlock();
+
+ // Get/set the number of paths to the exit starting at the node.
+ unsigned getNumberPaths();
+ void setNumberPaths(unsigned numberPaths);
+
+ // Get/set the NodeColor used in graph algorithms.
+ NodeColor getColor();
+ void setColor(NodeColor color);
+
+ // Iterator information for predecessor edges. Includes phony and
+ // backedges.
+ BLEdgeIterator predBegin();
+ BLEdgeIterator predEnd();
+ unsigned getNumberPredEdges();
+
+ // Iterator information for successor edges. Includes phony and
+ // backedges.
+ BLEdgeIterator succBegin();
+ BLEdgeIterator succEnd();
+ unsigned getNumberSuccEdges();
+
+ // Add an edge to the predecessor list.
+ void addPredEdge(BallLarusEdge* edge);
+
+ // Remove an edge from the predecessor list.
+ void removePredEdge(BallLarusEdge* edge);
+
+ // Add an edge to the successor list.
+ void addSuccEdge(BallLarusEdge* edge);
+
+ // Remove an edge from the successor list.
+ void removeSuccEdge(BallLarusEdge* edge);
+
+ // Returns the name of the BasicBlock being represented. If BasicBlock
+ // is null then returns "<null>". If BasicBlock has no name, then
+ // "<unnamed>" is returned. Intended for use with debug output.
+ std::string getName();
+
+private:
+ // The corresponding underlying BB.
+ BasicBlock* _basicBlock;
+
+ // Holds the predecessor edges of this node.
+ BLEdgeVector _predEdges;
+
+ // Holds the successor edges of this node.
+ BLEdgeVector _succEdges;
+
+ // The number of paths from the node to the exit.
+ unsigned _numberPaths;
+
+ // 'Color' used by graph algorithms to mark the node.
+ NodeColor _color;
+
+ // Unique ID to ensure naming difference with dotgraphs
+ unsigned _uid;
+
+ // Removes an edge from an edgeVector. Used by removePredEdge and
+ // removeSuccEdge.
+ void removeEdge(BLEdgeVector& v, BallLarusEdge* e);
+};
+
+// Represents an edge in the Dag. For an edge, v -> w, v is the source, and
+// w is the target.
+class BallLarusEdge {
+public:
+ enum EdgeType { NORMAL, BACKEDGE, SPLITEDGE,
+ BACKEDGE_PHONY, SPLITEDGE_PHONY, CALLEDGE_PHONY };
+
+ // Constructor: Initializes an BallLarusEdge with a source and target.
+ BallLarusEdge(BallLarusNode* source, BallLarusNode* target,
+ unsigned duplicateNumber)
+ : _source(source), _target(target), _weight(0), _edgeType(NORMAL),
+ _realEdge(NULL), _duplicateNumber(duplicateNumber) {}
+
+ // Returns the source/ target node of this edge.
+ BallLarusNode* getSource() const;
+ BallLarusNode* getTarget() const;
+
+ // Sets the type of the edge.
+ EdgeType getType() const;
+
+ // Gets the type of the edge.
+ void setType(EdgeType type);
+
+ // Returns the weight of this edge. Used to decode path numbers to
+ // sequences of basic blocks.
+ unsigned getWeight();
+
+ // Sets the weight of the edge. Used during path numbering.
+ void setWeight(unsigned weight);
+
+ // Gets/sets the phony edge originating at the root.
+ BallLarusEdge* getPhonyRoot();
+ void setPhonyRoot(BallLarusEdge* phonyRoot);
+
+ // Gets/sets the phony edge terminating at the exit.
+ BallLarusEdge* getPhonyExit();
+ void setPhonyExit(BallLarusEdge* phonyExit);
+
+ // Gets/sets the associated real edge if this is a phony edge.
+ BallLarusEdge* getRealEdge();
+ void setRealEdge(BallLarusEdge* realEdge);
+
+ // Returns the duplicate number of the edge.
+ unsigned getDuplicateNumber();
+
+protected:
+ // Source node for this edge.
+ BallLarusNode* _source;
+
+ // Target node for this edge.
+ BallLarusNode* _target;
+
+private:
+ // Edge weight cooresponding to path number increments before removing
+ // increments along a spanning tree. The sum over the edge weights gives
+ // the path number.
+ unsigned _weight;
+
+ // Type to represent for what this edge is intended
+ EdgeType _edgeType;
+
+ // For backedges and split-edges, the phony edge which is linked to the
+ // root node of the DAG. This contains a path number initialization.
+ BallLarusEdge* _phonyRoot;
+
+ // For backedges and split-edges, the phony edge which is linked to the
+ // exit node of the DAG. This contains a path counter increment, and
+ // potentially a path number increment.
+ BallLarusEdge* _phonyExit;
+
+ // If this is a phony edge, _realEdge is a link to the back or split
+ // edge. Otherwise, this is null.
+ BallLarusEdge* _realEdge;
+
+ // An ID to differentiate between those edges which have the same source
+ // and destination blocks.
+ unsigned _duplicateNumber;
+};
+
+// Represents the Ball Larus DAG for a given Function. Can calculate
+// various properties required for instrumentation or analysis. E.g. the
+// edge weights that determine the path number.
+class BallLarusDag {
+public:
+ // Initializes a BallLarusDag from the CFG of a given function. Must
+ // call init() after creation, since some initialization requires
+ // virtual functions.
+ BallLarusDag(Function &F)
+ : _root(NULL), _exit(NULL), _function(F) {}
+
+ // Initialization that requires virtual functions which are not fully
+ // functional in the constructor.
+ void init();
+
+ // Frees all memory associated with the DAG.
+ virtual ~BallLarusDag();
+
+ // Calculate the path numbers by assigning edge increments as prescribed
+ // in Ball-Larus path profiling.
+ void calculatePathNumbers();
+
+ // Returns the number of paths for the DAG.
+ unsigned getNumberOfPaths();
+
+ // Returns the root (i.e. entry) node for the DAG.
+ BallLarusNode* getRoot();
+
+ // Returns the exit node for the DAG.
+ BallLarusNode* getExit();
+
+ // Returns the function for the DAG.
+ Function& getFunction();
+
+ // Clears the node colors.
+ void clearColors(BallLarusNode::NodeColor color);
+
+protected:
+ // All nodes in the DAG.
+ BLNodeVector _nodes;
+
+ // All edges in the DAG.
+ BLEdgeVector _edges;
+
+ // All backedges in the DAG.
+ BLEdgeVector _backEdges;
+
+ // Allows subclasses to determine which type of Node is created.
+ // Override this method to produce subclasses of BallLarusNode if
+ // necessary. The destructor of BallLarusDag will call free on each pointer
+ // created.
+ virtual BallLarusNode* createNode(BasicBlock* BB);
+
+ // Allows subclasses to determine which type of Edge is created.
+ // Override this method to produce subclasses of BallLarusEdge if
+ // necessary. Parameters source and target will have been created by
+ // createNode and can be cast to the subclass of BallLarusNode*
+ // returned by createNode. The destructor of BallLarusDag will call free
+ // on each pointer created.
+ virtual BallLarusEdge* createEdge(BallLarusNode* source, BallLarusNode*
+ target, unsigned duplicateNumber);
+
+ // Proxy to node's constructor. Updates the DAG state.
+ BallLarusNode* addNode(BasicBlock* BB);
+
+ // Proxy to edge's constructor. Updates the DAG state.
+ BallLarusEdge* addEdge(BallLarusNode* source, BallLarusNode* target,
+ unsigned duplicateNumber);
+
+private:
+ // The root (i.e. entry) node for this DAG.
+ BallLarusNode* _root;
+
+ // The exit node for this DAG.
+ BallLarusNode* _exit;
+
+ // The function represented by this DAG.
+ Function& _function;
+
+ // Processes one node and its imediate edges for building the DAG.
+ void buildNode(BLBlockNodeMap& inDag, std::stack<BallLarusNode*>& dfsStack);
+
+ // Process an edge in the CFG for DAG building.
+ void buildEdge(BLBlockNodeMap& inDag, std::stack<BallLarusNode*>& dfsStack,
+ BallLarusNode* currentNode, BasicBlock* succBB,
+ unsigned duplicateNumber);
+
+ // The weight on each edge is the increment required along any path that
+ // contains that edge.
+ void calculatePathNumbersFrom(BallLarusNode* node);
+
+ // Adds a backedge with its phony edges. Updates the DAG state.
+ void addBackedge(BallLarusNode* source, BallLarusNode* target,
+ unsigned duplicateCount);
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/PathProfileInfo.h b/contrib/llvm/include/llvm/Analysis/PathProfileInfo.h
new file mode 100644
index 0000000..263763f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/PathProfileInfo.h
@@ -0,0 +1,113 @@
+//===- PathProfileInfo.h --------------------------------------*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file outlines the interface used by optimizers to load path profiles.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PATHPROFILEINFO_H
+#define LLVM_PATHPROFILEINFO_H
+
+#include "llvm/BasicBlock.h"
+#include "llvm/Analysis/PathNumbering.h"
+#include <stack>
+
+namespace llvm {
+
+class ProfilePath;
+class ProfilePathEdge;
+class PathProfileInfo;
+
+typedef std::vector<ProfilePathEdge> ProfilePathEdgeVector;
+typedef std::vector<ProfilePathEdge>::iterator ProfilePathEdgeIterator;
+
+typedef std::vector<BasicBlock*> ProfilePathBlockVector;
+typedef std::vector<BasicBlock*>::iterator ProfilePathBlockIterator;
+
+typedef std::map<unsigned int,ProfilePath*> ProfilePathMap;
+typedef std::map<unsigned int,ProfilePath*>::iterator ProfilePathIterator;
+
+typedef std::map<Function*,unsigned int> FunctionPathCountMap;
+typedef std::map<Function*,ProfilePathMap> FunctionPathMap;
+typedef std::map<Function*,ProfilePathMap>::iterator FunctionPathIterator;
+
+class ProfilePathEdge {
+public:
+ ProfilePathEdge(BasicBlock* source, BasicBlock* target,
+ unsigned duplicateNumber);
+
+ inline unsigned getDuplicateNumber() { return _duplicateNumber; }
+ inline BasicBlock* getSource() { return _source; }
+ inline BasicBlock* getTarget() { return _target; }
+
+protected:
+ BasicBlock* _source;
+ BasicBlock* _target;
+ unsigned _duplicateNumber;
+};
+
+class ProfilePath {
+public:
+ ProfilePath(unsigned int number, unsigned int count,
+ double countStdDev, PathProfileInfo* ppi);
+
+ double getFrequency() const;
+
+ inline unsigned int getNumber() const { return _number; }
+ inline unsigned int getCount() const { return _count; }
+ inline double getCountStdDev() const { return _countStdDev; }
+
+ ProfilePathEdgeVector* getPathEdges() const;
+ ProfilePathBlockVector* getPathBlocks() const;
+
+ BasicBlock* getFirstBlockInPath() const;
+
+private:
+ unsigned int _number;
+ unsigned int _count;
+ double _countStdDev;
+
+ // double pointer back to the profiling info
+ PathProfileInfo* _ppi;
+};
+
+// TODO: overload [] operator for getting path
+// Add: getFunctionCallCount()
+class PathProfileInfo {
+ public:
+ PathProfileInfo();
+ ~PathProfileInfo();
+
+ void setCurrentFunction(Function* F);
+ Function* getCurrentFunction() const;
+ BasicBlock* getCurrentFunctionEntry();
+
+ ProfilePath* getPath(unsigned int number);
+ unsigned int getPotentialPathCount();
+
+ ProfilePathIterator pathBegin();
+ ProfilePathIterator pathEnd();
+ unsigned int pathsRun();
+
+ static char ID; // Pass identification
+ std::string argList;
+
+protected:
+ FunctionPathMap _functionPaths;
+ FunctionPathCountMap _functionPathCounts;
+
+private:
+ BallLarusDag* _currentDag;
+ Function* _currentFunction;
+
+ friend class ProfilePath;
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/PointerTracking.h b/contrib/llvm/include/llvm/Analysis/PointerTracking.h
deleted file mode 100644
index 6b49e18..0000000
--- a/contrib/llvm/include/llvm/Analysis/PointerTracking.h
+++ /dev/null
@@ -1,132 +0,0 @@
-//===- PointerTracking.h - Pointer Bounds Tracking --------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements tracking of pointer bounds.
-// It knows that the libc functions "calloc" and "realloc" allocate memory, thus
-// you should avoid using this pass if they mean something else for your
-// language.
-//
-// All methods assume that the pointer is not NULL, if it is then the returned
-// allocation size is wrong, and the result from checkLimits is wrong too.
-// It also assumes that pointers are valid, and that it is not analyzing a
-// use-after-free scenario.
-// Due to these limitations the "size" returned by these methods should be
-// considered as either 0 or the returned size.
-//
-// Another analysis pass should be used to find use-after-free/NULL dereference
-// bugs.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_POINTERTRACKING_H
-#define LLVM_ANALYSIS_POINTERTRACKING_H
-
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/Instructions.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/PredIteratorCache.h"
-
-namespace llvm {
- class DominatorTree;
- class ScalarEvolution;
- class SCEV;
- class Loop;
- class LoopInfo;
- class TargetData;
-
- // Result from solver, assuming pointer is not NULL,
- // and it is not a use-after-free situation.
- enum SolverResult {
- AlwaysFalse,// always false with above constraints
- AlwaysTrue,// always true with above constraints
- Unknown // it can sometimes be true, sometimes false, or it is undecided
- };
-
- class PointerTracking : public FunctionPass {
- public:
- typedef ICmpInst::Predicate Predicate;
- static char ID;
- PointerTracking();
-
- virtual bool doInitialization(Module &M);
-
- // If this pointer directly points to an allocation, return
- // the number of elements of type Ty allocated.
- // Otherwise return CouldNotCompute.
- // Since allocations can fail by returning NULL, the real element count
- // for every allocation is either 0 or the value returned by this function.
- const SCEV *getAllocationElementCount(Value *P) const;
-
- // Same as getAllocationSize() but returns size in bytes.
- // We consider one byte as 8 bits.
- const SCEV *getAllocationSizeInBytes(Value *V) const;
-
- // Given a Pointer, determine a base pointer of known size, and an offset
- // therefrom.
- // When unable to determine, sets Base to NULL, and Limit/Offset to
- // CouldNotCompute.
- // BaseSize, and Offset are in bytes: Pointer == Base + Offset
- void getPointerOffset(Value *Pointer, Value *&Base, const SCEV *& BaseSize,
- const SCEV *&Offset) const;
-
- // Compares the 2 scalar evolution expressions according to predicate,
- // and if it can prove that the result is always true or always false
- // return AlwaysTrue/AlwaysFalse. Otherwise it returns Unknown.
- enum SolverResult compareSCEV(const SCEV *A, Predicate Pred, const SCEV *B,
- const Loop *L);
-
- // Determines whether the condition LHS <Pred> RHS is sufficient
- // for the condition A <Pred> B to hold.
- // Currently only ULT/ULE is supported.
- // This errs on the side of returning false.
- bool conditionSufficient(const SCEV *LHS, Predicate Pred1, const SCEV *RHS,
- const SCEV *A, Predicate Pred2, const SCEV *B,
- const Loop *L);
-
- // Determines whether Offset is known to be always in [0, Limit) bounds.
- // This errs on the side of returning Unknown.
- enum SolverResult checkLimits(const SCEV *Offset, const SCEV *Limit,
- BasicBlock *BB);
-
- virtual bool runOnFunction(Function &F);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- void print(raw_ostream &OS, const Module* = 0) const;
- Value *computeAllocationCountValue(Value *P, const Type *&Ty) const;
- private:
- Function *FF;
- TargetData *TD;
- ScalarEvolution *SE;
- LoopInfo *LI;
- DominatorTree *DT;
-
- Function *callocFunc;
- Function *reallocFunc;
- PredIteratorCache predCache;
-
- SmallPtrSet<const SCEV*, 1> analyzing;
-
- enum SolverResult isLoopGuardedBy(const Loop *L, Predicate Pred,
- const SCEV *A, const SCEV *B) const;
- static bool isMonotonic(const SCEV *S);
- bool scevPositive(const SCEV *A, const Loop *L, bool strict=true) const;
- bool conditionSufficient(Value *Cond, bool negated,
- const SCEV *A, Predicate Pred, const SCEV *B);
- Value *getConditionToReach(BasicBlock *A,
- DomTreeNodeBase<BasicBlock> *B,
- bool &negated);
- Value *getConditionToReach(BasicBlock *A,
- BasicBlock *B,
- bool &negated);
- const SCEV *computeAllocationCount(Value *P, const Type *&Ty) const;
- const SCEV *computeAllocationCountForType(Value *P, const Type *Ty) const;
- };
-}
-#endif
-
diff --git a/contrib/llvm/include/llvm/Analysis/PostDominators.h b/contrib/llvm/include/llvm/Analysis/PostDominators.h
index 46ce820..2cd6ae3 100644
--- a/contrib/llvm/include/llvm/Analysis/PostDominators.h
+++ b/contrib/llvm/include/llvm/Analysis/PostDominators.h
@@ -14,7 +14,7 @@
#ifndef LLVM_ANALYSIS_POST_DOMINATORS_H
#define LLVM_ANALYSIS_POST_DOMINATORS_H
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/DominanceFrontier.h"
namespace llvm {
@@ -26,6 +26,7 @@ struct PostDominatorTree : public FunctionPass {
DominatorTreeBase<BasicBlock>* DT;
PostDominatorTree() : FunctionPass(ID) {
+ initializePostDominatorTreePass(*PassRegistry::getPassRegistry());
DT = new DominatorTreeBase<BasicBlock>(true);
}
@@ -106,7 +107,9 @@ template <> struct GraphTraits<PostDominatorTree*>
struct PostDominanceFrontier : public DominanceFrontierBase {
static char ID;
PostDominanceFrontier()
- : DominanceFrontierBase(ID, true) {}
+ : DominanceFrontierBase(ID, true) {
+ initializePostDominanceFrontierPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &) {
Frontiers.clear();
diff --git a/contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h b/contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h
index 0d531d5..6b4ac85 100644
--- a/contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h
+++ b/contrib/llvm/include/llvm/Analysis/ProfileInfoTypes.h
@@ -1,4 +1,4 @@
-/*===-- ProfileInfoTypes.h - Profiling info shared constants ------*- C -*-===*\
+/*===-- ProfileInfoTypes.h - Profiling info shared constants --------------===*\
|*
|* The LLVM Compiler Infrastructure
|*
@@ -16,6 +16,17 @@
#ifndef LLVM_ANALYSIS_PROFILEINFOTYPES_H
#define LLVM_ANALYSIS_PROFILEINFOTYPES_H
+/* Included by libprofile. */
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* IDs to distinguish between those path counters stored in hashses vs arrays */
+enum ProfilingStorageType {
+ ProfilingArray = 1,
+ ProfilingHash = 2
+};
+
enum ProfilingType {
ArgumentInfo = 1, /* The command line argument block */
FunctionInfo = 2, /* Function profiling information */
@@ -26,4 +37,24 @@ enum ProfilingType {
OptEdgeInfo = 7 /* Edge profiling information, optimal version */
};
+/*
+ * The header for tables that map path numbers to path counters.
+ */
+typedef struct {
+ unsigned fnNumber; /* function number for these counters */
+ unsigned numEntries; /* number of entries stored */
+} PathProfileHeader;
+
+/*
+ * Describes an entry in a tagged table for path counters.
+ */
+typedef struct {
+ unsigned pathNumber;
+ unsigned pathCounter;
+} PathProfileTableEntry;
+
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* LLVM_ANALYSIS_PROFILEINFOTYPES_H */
diff --git a/contrib/llvm/include/llvm/Analysis/RegionInfo.h b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
index 7a2670f..a36ca11 100644
--- a/contrib/llvm/include/llvm/Analysis/RegionInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
@@ -58,6 +58,7 @@ class RegionNode {
// DO NOT IMPLEMENT
const RegionNode &operator=(const RegionNode &);
+protected:
/// This is the entry basic block that starts this region node. If this is a
/// BasicBlock RegionNode, then entry is just the basic block, that this
/// RegionNode represents. Otherwise it is the entry of this (Sub)RegionNode.
@@ -70,7 +71,6 @@ class RegionNode {
/// RegionNode.
PointerIntPair<BasicBlock*, 1, bool> entry;
-protected:
/// @brief The parent Region of this RegionNode.
/// @see getParent()
Region* parent;
@@ -257,6 +257,18 @@ public:
/// @return The entry BasicBlock of the region.
BasicBlock *getEntry() const { return RegionNode::getEntry(); }
+ /// @brief Replace the entry basic block of the region with the new basic
+ /// block.
+ ///
+ /// @param BB The new entry basic block of the region.
+ void replaceEntry(BasicBlock *BB);
+
+ /// @brief Replace the exit basic block of the region with the new basic
+ /// block.
+ ///
+ /// @param BB The new exit basic block of the region.
+ void replaceExit(BasicBlock *BB);
+
/// @brief Get the exit BasicBlock of the Region.
/// @return The exit BasicBlock of the Region, NULL if this is the TopLevel
/// Region.
@@ -280,6 +292,33 @@ public:
/// @return The depth of the region.
unsigned getDepth() const;
+ /// @brief Check if a Region is the TopLevel region.
+ ///
+ /// The toplevel region represents the whole function.
+ bool isTopLevelRegion() const { return exit == NULL; }
+
+ /// @brief Return a new (non canonical) region, that is obtained by joining
+ /// this region with its predecessors.
+ ///
+ /// @return A region also starting at getEntry(), but reaching to the next
+ /// basic block that forms with getEntry() a (non canonical) region.
+ /// NULL if such a basic block does not exist.
+ Region *getExpandedRegion() const;
+
+ /// @brief Return the first block of this region's single entry edge,
+ /// if existing.
+ ///
+ /// @return The BasicBlock starting this region's single entry edge,
+ /// else NULL.
+ BasicBlock *getEnteringBlock() const;
+
+ /// @brief Return the first block of this region's single exit edge,
+ /// if existing.
+ ///
+ /// @return The BasicBlock starting this region's single exit edge,
+ /// else NULL.
+ BasicBlock *getExitingBlock() const;
+
/// @brief Is this a simple region?
///
/// A region is simple if it has exactly one exit and one entry edge.
@@ -386,7 +425,9 @@ public:
/// @brief Add a new subregion to this Region.
///
/// @param SubRegion The new subregion that will be added.
- void addSubRegion(Region *SubRegion);
+ /// @param moveChildren Move the children of this region, that are also
+ /// contained in SubRegion into SubRegion.
+ void addSubRegion(Region *SubRegion, bool moveChildren = false);
/// @brief Remove a subregion from this Region.
///
@@ -565,6 +606,12 @@ public:
/// region containing BB.
Region *getRegionFor(BasicBlock *BB) const;
+ /// @brief Set the smallest region that surrounds a basic block.
+ ///
+ /// @param BB The basic block surrounded by a region.
+ /// @param R The smallest region that surrounds BB.
+ void setRegionFor(BasicBlock *BB, Region *R);
+
/// @brief A shortcut for getRegionFor().
///
/// @param BB The basic block.
@@ -610,6 +657,12 @@ public:
return TopLevelRegion;
}
+ /// @brief Update RegionInfo after a basic block was split.
+ ///
+ /// @param NewBB The basic block that was created before OldBB.
+ /// @param OldBB The old basic block.
+ void splitBlock(BasicBlock* NewBB, BasicBlock *OldBB);
+
/// @brief Clear the Node Cache for all Regions.
///
/// @see Region::clearNodeCache()
diff --git a/contrib/llvm/include/llvm/Analysis/RegionPass.h b/contrib/llvm/include/llvm/Analysis/RegionPass.h
new file mode 100644
index 0000000..aedc06a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/RegionPass.h
@@ -0,0 +1,126 @@
+//===- RegionPass.h - RegionPass class ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RegionPass class. All region based analysis,
+// optimization and transformation passes are derived from RegionPass.
+// This class is implemented following the some ideas of the LoopPass.h class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_REGION_PASS_H
+#define LLVM_REGION_PASS_H
+
+#include "llvm/Analysis/RegionInfo.h"
+
+#include "llvm/Pass.h"
+#include "llvm/PassManagers.h"
+#include "llvm/Function.h"
+
+#include <deque>
+
+namespace llvm {
+
+class RGPassManager;
+class Function;
+
+//===----------------------------------------------------------------------===//
+/// @brief A pass that runs on each Region in a function.
+///
+/// RegionPass is managed by RGPassManager.
+class RegionPass : public Pass {
+public:
+ explicit RegionPass(char &pid) : Pass(PT_Region, pid) {}
+
+ //===--------------------------------------------------------------------===//
+ /// @name To be implemented by every RegionPass
+ ///
+ //@{
+ /// @brief Run the pass on a specific Region
+ ///
+ /// Accessing regions not contained in the current region is not allowed.
+ ///
+ /// @param R The region this pass is run on.
+ /// @param RGM The RegionPassManager that manages this Pass.
+ ///
+ /// @return True if the pass modifies this Region.
+ virtual bool runOnRegion(Region *R, RGPassManager &RGM) = 0;
+
+ /// @brief Get a pass to print the LLVM IR in the region.
+ ///
+ /// @param O The ouput stream to print the Region.
+ /// @param Banner The banner to seperate different printed passes.
+ ///
+ /// @return The pass to print the LLVM IR in the region.
+ Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const;
+
+ virtual bool doInitialization(Region *R, RGPassManager &RGM) { return false; }
+ virtual bool doFinalization() { return false; }
+ //@}
+
+ //===--------------------------------------------------------------------===//
+ /// @name PassManager API
+ ///
+ //@{
+ void preparePassManager(PMStack &PMS);
+
+ virtual void assignPassManager(PMStack &PMS,
+ PassManagerType PMT = PMT_RegionPassManager);
+
+ virtual PassManagerType getPotentialPassManagerType() const {
+ return PMT_RegionPassManager;
+ }
+ //@}
+};
+
+/// @brief The pass manager to schedule RegionPasses.
+class RGPassManager : public FunctionPass, public PMDataManager {
+ std::deque<Region*> RQ;
+ bool skipThisRegion;
+ bool redoThisRegion;
+ RegionInfo *RI;
+ Region *CurrentRegion;
+
+public:
+ static char ID;
+ explicit RGPassManager(int Depth);
+
+ /// @brief Execute all of the passes scheduled for execution.
+ ///
+ /// @return True if any of the passes modifies the function.
+ bool runOnFunction(Function &F);
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ /// RGPassManager needs RegionInfo.
+ void getAnalysisUsage(AnalysisUsage &Info) const;
+
+ virtual const char *getPassName() const {
+ return "Region Pass Manager";
+ }
+
+ virtual PMDataManager *getAsPMDataManager() { return this; }
+ virtual Pass *getAsPass() { return this; }
+
+ /// @brief Print passes managed by this manager.
+ void dumpPassStructure(unsigned Offset);
+
+ /// @brief Print passes contained by this manager.
+ Pass *getContainedPass(unsigned N) {
+ assert(N < PassVector.size() && "Pass number out of range!");
+ Pass *FP = static_cast<Pass *>(PassVector[N]);
+ return FP;
+ }
+
+ virtual PassManagerType getPassManagerType() const {
+ return PMT_RegionPassManager;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
index 1fa94e9..d193806 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -24,7 +24,7 @@
#include "llvm/Pass.h"
#include "llvm/Instructions.h"
#include "llvm/Function.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ConstantRange.h"
@@ -70,27 +70,16 @@ namespace llvm {
private:
SCEV(const SCEV &); // DO NOT IMPLEMENT
void operator=(const SCEV &); // DO NOT IMPLEMENT
- protected:
- virtual ~SCEV();
+
public:
explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy) :
FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
unsigned getSCEVType() const { return SCEVType; }
- /// isLoopInvariant - Return true if the value of this SCEV is unchanging in
- /// the specified loop.
- virtual bool isLoopInvariant(const Loop *L) const = 0;
-
- /// hasComputableLoopEvolution - Return true if this SCEV changes value in a
- /// known way in the specified loop. This property being true implies that
- /// the value is variant in the loop AND that we can emit an expression to
- /// compute the value of the expression at any particular loop iteration.
- virtual bool hasComputableLoopEvolution(const Loop *L) const = 0;
-
/// getType - Return the LLVM type of this SCEV expression.
///
- virtual const Type *getType() const = 0;
+ const Type *getType() const;
/// isZero - Return true if the expression is a constant zero.
///
@@ -105,22 +94,10 @@ namespace llvm {
///
bool isAllOnesValue() const;
- /// hasOperand - Test whether this SCEV has Op as a direct or
- /// indirect operand.
- virtual bool hasOperand(const SCEV *Op) const = 0;
-
- /// dominates - Return true if elements that makes up this SCEV dominates
- /// the specified basic block.
- virtual bool dominates(BasicBlock *BB, DominatorTree *DT) const = 0;
-
- /// properlyDominates - Return true if elements that makes up this SCEV
- /// properly dominate the specified basic block.
- virtual bool properlyDominates(BasicBlock *BB, DominatorTree *DT) const = 0;
-
/// print - Print out the internal representation of this scalar to the
/// specified stream. This should really only be used for debugging
/// purposes.
- virtual void print(raw_ostream &OS) const = 0;
+ void print(raw_ostream &OS) const;
/// dump - This method is used for debugging.
///
@@ -155,21 +132,6 @@ namespace llvm {
struct SCEVCouldNotCompute : public SCEV {
SCEVCouldNotCompute();
- // None of these methods are valid for this object.
- virtual bool isLoopInvariant(const Loop *L) const;
- virtual const Type *getType() const;
- virtual bool hasComputableLoopEvolution(const Loop *L) const;
- virtual void print(raw_ostream &OS) const;
- virtual bool hasOperand(const SCEV *Op) const;
-
- virtual bool dominates(BasicBlock *BB, DominatorTree *DT) const {
- return true;
- }
-
- virtual bool properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- return true;
- }
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVCouldNotCompute *S) { return true; }
static bool classof(const SCEV *S);
@@ -180,6 +142,24 @@ namespace llvm {
/// they must ask this class for services.
///
class ScalarEvolution : public FunctionPass {
+ public:
+ /// LoopDisposition - An enum describing the relationship between a
+ /// SCEV and a loop.
+ enum LoopDisposition {
+ LoopVariant, ///< The SCEV is loop-variant (unknown).
+ LoopInvariant, ///< The SCEV is loop-invariant.
+ LoopComputable ///< The SCEV varies predictably with the loop.
+ };
+
+ /// BlockDisposition - An enum describing the relationship between a
+ /// SCEV and a basic block.
+ enum BlockDisposition {
+ DoesNotDominateBlock, ///< The SCEV does not dominate the block.
+ DominatesBlock, ///< The SCEV dominates the block.
+ ProperlyDominatesBlock ///< The SCEV properly dominates the block.
+ };
+
+ private:
/// SCEVCallbackVH - A CallbackVH to arrange for ScalarEvolution to be
/// notified whenever a Value is deleted.
class SCEVCallbackVH : public CallbackVH {
@@ -267,6 +247,46 @@ namespace llvm {
std::map<const SCEV *,
std::map<const Loop *, const SCEV *> > ValuesAtScopes;
+ /// LoopDispositions - Memoized computeLoopDisposition results.
+ std::map<const SCEV *,
+ std::map<const Loop *, LoopDisposition> > LoopDispositions;
+
+ /// computeLoopDisposition - Compute a LoopDisposition value.
+ LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);
+
+ /// BlockDispositions - Memoized computeBlockDisposition results.
+ std::map<const SCEV *,
+ std::map<const BasicBlock *, BlockDisposition> > BlockDispositions;
+
+ /// computeBlockDisposition - Compute a BlockDisposition value.
+ BlockDisposition computeBlockDisposition(const SCEV *S, const BasicBlock *BB);
+
+ /// UnsignedRanges - Memoized results from getUnsignedRange
+ DenseMap<const SCEV *, ConstantRange> UnsignedRanges;
+
+ /// SignedRanges - Memoized results from getSignedRange
+ DenseMap<const SCEV *, ConstantRange> SignedRanges;
+
+ /// setUnsignedRange - Set the memoized unsigned range for the given SCEV.
+ const ConstantRange &setUnsignedRange(const SCEV *S,
+ const ConstantRange &CR) {
+ std::pair<DenseMap<const SCEV *, ConstantRange>::iterator, bool> Pair =
+ UnsignedRanges.insert(std::make_pair(S, CR));
+ if (!Pair.second)
+ Pair.first->second = CR;
+ return Pair.first->second;
+ }
+
+ /// setUnsignedRange - Set the memoized signed range for the given SCEV.
+ const ConstantRange &setSignedRange(const SCEV *S,
+ const ConstantRange &CR) {
+ std::pair<DenseMap<const SCEV *, ConstantRange>::iterator, bool> Pair =
+ SignedRanges.insert(std::make_pair(S, CR));
+ if (!Pair.second)
+ Pair.first->second = CR;
+ return Pair.first->second;
+ }
+
/// createSCEV - We know that there is no SCEV for the specified value.
/// Analyze the expression.
const SCEV *createSCEV(Value *V);
@@ -408,6 +428,9 @@ namespace llvm {
bool isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);
+ /// forgetMemoizedResults - Drop memoized information computed for S.
+ void forgetMemoizedResults(const SCEV *S);
+
public:
static char ID; // Pass identification, replacement for typeid
ScalarEvolution();
@@ -514,10 +537,11 @@ namespace llvm {
///
const SCEV *getNotSCEV(const SCEV *V);
- /// getMinusSCEV - Return LHS-RHS.
- ///
- const SCEV *getMinusSCEV(const SCEV *LHS,
- const SCEV *RHS);
+ /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1,
+ /// and thus the HasNUW and HasNSW bits apply to the resultant add, not
+ /// whether the sub would have overflowed.
+ const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+ bool HasNUW = false, bool HasNSW = false);
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion
/// of the input value to the specified type. If the type must be
@@ -675,6 +699,36 @@ namespace llvm {
const SCEV *&LHS,
const SCEV *&RHS);
+ /// getLoopDisposition - Return the "disposition" of the given SCEV with
+ /// respect to the given loop.
+ LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
+
+ /// isLoopInvariant - Return true if the value of the given SCEV is
+ /// unchanging in the specified loop.
+ bool isLoopInvariant(const SCEV *S, const Loop *L);
+
+ /// hasComputableLoopEvolution - Return true if the given SCEV changes value
+ /// in a known way in the specified loop. This property being true implies
+ /// that the value is variant in the loop AND that we can emit an expression
+ /// to compute the value of the expression at any particular loop iteration.
+ bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
+
+ /// getLoopDisposition - Return the "disposition" of the given SCEV with
+ /// respect to the given block.
+ BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
+
+ /// dominates - Return true if elements that makes up the given SCEV
+ /// dominate the specified basic block.
+ bool dominates(const SCEV *S, const BasicBlock *BB);
+
+ /// properlyDominates - Return true if elements that makes up the given SCEV
+ /// properly dominate the specified basic block.
+ bool properlyDominates(const SCEV *S, const BasicBlock *BB);
+
+ /// hasOperand - Test whether the given SCEV has Op as a direct or
+ /// indirect operand.
+ bool hasOperand(const SCEV *S, const SCEV *Op) const;
+
virtual bool runOnFunction(Function &F);
virtual void releaseMemory();
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
index 4b02f82..39d378e 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -35,6 +35,9 @@ namespace llvm {
std::set<AssertingVH<Value> > InsertedValues;
std::set<AssertingVH<Value> > InsertedPostIncValues;
+ /// RelevantLoops - A memoization of the "relevant" loop for a given SCEV.
+ DenseMap<const SCEV *, const Loop *> RelevantLoops;
+
/// PostIncLoops - Addrecs referring to any of the given loops are expanded
/// in post-inc mode. For example, expanding {1,+,1}<L> in post-inc mode
/// returns the add instruction that adds one to the phi for {0,+,1}<L>,
@@ -168,6 +171,9 @@ namespace llvm {
return InsertedValues.count(I) || InsertedPostIncValues.count(I);
}
+ /// getRelevantLoop - Determine the most "relevant" loop for the given SCEV.
+ const Loop *getRelevantLoop(const SCEV *);
+
Value *visitConstant(const SCEVConstant *S) {
return S->getValue();
}
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 4213a28..db432c8 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -42,29 +42,7 @@ namespace llvm {
public:
ConstantInt *getValue() const { return V; }
- virtual bool isLoopInvariant(const Loop *L) const {
- return true;
- }
-
- virtual bool hasComputableLoopEvolution(const Loop *L) const {
- return false; // Not loop variant
- }
-
- virtual const Type *getType() const;
-
- virtual bool hasOperand(const SCEV *) const {
- return false;
- }
-
- bool dominates(BasicBlock *BB, DominatorTree *DT) const {
- return true;
- }
-
- bool properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- return true;
- }
-
- virtual void print(raw_ostream &OS) const;
+ const Type *getType() const { return V->getType(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVConstant *S) { return true; }
@@ -86,23 +64,7 @@ namespace llvm {
public:
const SCEV *getOperand() const { return Op; }
- virtual const Type *getType() const { return Ty; }
-
- virtual bool isLoopInvariant(const Loop *L) const {
- return Op->isLoopInvariant(L);
- }
-
- virtual bool hasComputableLoopEvolution(const Loop *L) const {
- return Op->hasComputableLoopEvolution(L);
- }
-
- virtual bool hasOperand(const SCEV *O) const {
- return Op == O || Op->hasOperand(O);
- }
-
- virtual bool dominates(BasicBlock *BB, DominatorTree *DT) const;
-
- virtual bool properlyDominates(BasicBlock *BB, DominatorTree *DT) const;
+ const Type *getType() const { return Ty; }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVCastExpr *S) { return true; }
@@ -124,8 +86,6 @@ namespace llvm {
const SCEV *op, const Type *ty);
public:
- virtual void print(raw_ostream &OS) const;
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVTruncateExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
@@ -144,8 +104,6 @@ namespace llvm {
const SCEV *op, const Type *ty);
public:
- virtual void print(raw_ostream &OS) const;
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVZeroExtendExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
@@ -164,8 +122,6 @@ namespace llvm {
const SCEV *op, const Type *ty);
public:
- virtual void print(raw_ostream &OS) const;
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVSignExtendExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
@@ -202,20 +158,7 @@ namespace llvm {
op_iterator op_begin() const { return Operands; }
op_iterator op_end() const { return Operands + NumOperands; }
- virtual bool isLoopInvariant(const Loop *L) const;
-
- // hasComputableLoopEvolution - N-ary expressions have computable loop
- // evolutions iff they have at least one operand that varies with the loop,
- // but that all varying operands are computable.
- virtual bool hasComputableLoopEvolution(const Loop *L) const;
-
- virtual bool hasOperand(const SCEV *O) const;
-
- bool dominates(BasicBlock *BB, DominatorTree *DT) const;
-
- bool properlyDominates(BasicBlock *BB, DominatorTree *DT) const;
-
- virtual const Type *getType() const { return getOperand(0)->getType(); }
+ const Type *getType() const { return getOperand(0)->getType(); }
bool hasNoUnsignedWrap() const { return SubclassData & (1 << 0); }
void setHasNoUnsignedWrap(bool B) {
@@ -248,10 +191,6 @@ namespace llvm {
: SCEVNAryExpr(ID, T, O, N) {}
public:
- virtual const char *getOperationStr() const = 0;
-
- virtual void print(raw_ostream &OS) const;
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVCommutativeExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
@@ -275,9 +214,7 @@ namespace llvm {
}
public:
- virtual const char *getOperationStr() const { return " + "; }
-
- virtual const Type *getType() const {
+ const Type *getType() const {
// Use the type of the last operand, which is likely to be a pointer
// type, if there is one. This doesn't usually matter, but it can help
// reduce casts when the expressions are expanded.
@@ -303,8 +240,6 @@ namespace llvm {
}
public:
- virtual const char *getOperationStr() const { return " * "; }
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVMulExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
@@ -328,27 +263,15 @@ namespace llvm {
const SCEV *getLHS() const { return LHS; }
const SCEV *getRHS() const { return RHS; }
- virtual bool isLoopInvariant(const Loop *L) const {
- return LHS->isLoopInvariant(L) && RHS->isLoopInvariant(L);
- }
-
- virtual bool hasComputableLoopEvolution(const Loop *L) const {
- return LHS->hasComputableLoopEvolution(L) &&
- RHS->hasComputableLoopEvolution(L);
- }
-
- virtual bool hasOperand(const SCEV *O) const {
- return O == LHS || O == RHS || LHS->hasOperand(O) || RHS->hasOperand(O);
+ const Type *getType() const {
+ // In most cases the types of LHS and RHS will be the same, but in some
+ // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
+ // depend on the type for correctness, but handling types carefully can
+ // avoid extra casts in the SCEVExpander. The LHS is more likely to be
+ // a pointer type than the RHS, so use the RHS' type here.
+ return getRHS()->getType();
}
- bool dominates(BasicBlock *BB, DominatorTree *DT) const;
-
- bool properlyDominates(BasicBlock *BB, DominatorTree *DT) const;
-
- virtual const Type *getType() const;
-
- void print(raw_ostream &OS) const;
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVUDivExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
@@ -373,11 +296,7 @@ namespace llvm {
SCEVAddRecExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N, const Loop *l)
- : SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {
- for (size_t i = 0, e = NumOperands; i != e; ++i)
- assert(Operands[i]->isLoopInvariant(l) &&
- "Operands of AddRec must be loop-invariant!");
- }
+ : SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}
public:
const SCEV *getStart() const { return Operands[0]; }
@@ -393,16 +312,6 @@ namespace llvm {
getLoop());
}
- virtual bool hasComputableLoopEvolution(const Loop *QL) const {
- return L == QL;
- }
-
- virtual bool isLoopInvariant(const Loop *QueryLoop) const;
-
- bool dominates(BasicBlock *BB, DominatorTree *DT) const;
-
- bool properlyDominates(BasicBlock *BB, DominatorTree *DT) const;
-
/// isAffine - Return true if this is an affine AddRec (i.e., it represents
/// an expressions A+B*x where A and B are loop invariant values.
bool isAffine() const {
@@ -437,8 +346,6 @@ namespace llvm {
return cast<SCEVAddRecExpr>(SE.getAddExpr(this, getStepRecurrence(SE)));
}
- virtual void print(raw_ostream &OS) const;
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVAddRecExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
@@ -462,8 +369,6 @@ namespace llvm {
}
public:
- virtual const char *getOperationStr() const { return " smax "; }
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVSMaxExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
@@ -487,8 +392,6 @@ namespace llvm {
}
public:
- virtual const char *getOperationStr() const { return " umax "; }
-
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVUMaxExpr *S) { return true; }
static inline bool classof(const SCEV *S) {
@@ -534,22 +437,7 @@ namespace llvm {
bool isAlignOf(const Type *&AllocTy) const;
bool isOffsetOf(const Type *&STy, Constant *&FieldNo) const;
- virtual bool isLoopInvariant(const Loop *L) const;
- virtual bool hasComputableLoopEvolution(const Loop *QL) const {
- return false; // not computable
- }
-
- virtual bool hasOperand(const SCEV *) const {
- return false;
- }
-
- bool dominates(BasicBlock *BB, DominatorTree *DT) const;
-
- bool properlyDominates(BasicBlock *BB, DominatorTree *DT) const;
-
- virtual const Type *getType() const;
-
- virtual void print(raw_ostream &OS) const;
+ const Type *getType() const { return getValPtr()->getType(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEVUnknown *S) { return true; }
diff --git a/contrib/llvm/include/llvm/Analysis/ValueTracking.h b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
index 7b6026f..6df1693 100644
--- a/contrib/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
@@ -15,7 +15,7 @@
#ifndef LLVM_ANALYSIS_VALUETRACKING_H
#define LLVM_ANALYSIS_VALUETRACKING_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <string>
namespace llvm {
@@ -39,6 +39,23 @@ namespace llvm {
APInt &KnownOne, const TargetData *TD = 0,
unsigned Depth = 0);
+ /// ComputeSignBit - Determine whether the sign bit is known to be zero or
+ /// one. Convenience wrapper around ComputeMaskedBits.
+ void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
+ const TargetData *TD = 0, unsigned Depth = 0);
+
+ /// isPowerOfTwo - Return true if the given value is known to have exactly one
+ /// bit set when defined. For vectors return true if every element is known to
+ /// be a power of two when defined. Supports values with integer or pointer
+ /// type and vectors of integers.
+ bool isPowerOfTwo(Value *V, const TargetData *TD = 0, unsigned Depth = 0);
+
+ /// isKnownNonZero - Return true if the given value is known to be non-zero
+ /// when defined. For vectors return true if every element is known to be
+ /// non-zero when defined. Supports values with integer or pointer type and
+ /// vectors of integers.
+ bool isKnownNonZero(Value *V, const TargetData *TD = 0, unsigned Depth = 0);
+
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be
/// zero for bits that V cannot have.
@@ -77,7 +94,13 @@ namespace llvm {
///
bool CannotBeNegativeZero(const Value *V, unsigned Depth = 0);
-
+ /// isBytewiseValue - If the specified value can be set by repeating the same
+ /// byte in memory, return the i8 value that it is represented with. This is
+ /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
+ /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
+ /// byte store (e.g. i16 0x1234), return null.
+ Value *isBytewiseValue(Value *V);
+
/// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
/// the scalar value indexed is already around as a register, for example if
/// it were inserted directly into the aggregrate.
@@ -97,6 +120,17 @@ namespace llvm {
return FindInsertedValue(V, &Idxs[0], &Idxs[1], InsertBefore);
}
+ /// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if
+ /// it can be expressed as a base pointer plus a constant offset. Return the
+ /// base and offset to the caller.
+ Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
+ const TargetData &TD);
+ static inline const Value *
+ GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
+ const TargetData &TD) {
+ return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD);
+ }
+
/// GetConstantStringInfo - This function computes the length of a
/// null-terminated C string pointed to by V. If successful, it returns true
/// and returns the string in Str. If unsuccessful, it returns false. If
@@ -110,6 +144,20 @@ namespace llvm {
/// GetStringLength - If we can compute the length of the string pointed to by
/// the specified pointer, return 'len+1'. If we can't, return 0.
uint64_t GetStringLength(Value *V);
+
+ /// GetUnderlyingObject - This method strips off any GEP address adjustments
+ /// and pointer casts from the specified value, returning the original object
+ /// being addressed. Note that the returned value has pointer type if the
+ /// specified value does. If the MaxLookup value is non-zero, it limits the
+ /// number of instructions to be stripped off.
+ Value *GetUnderlyingObject(Value *V, const TargetData *TD = 0,
+ unsigned MaxLookup = 6);
+ static inline const Value *
+ GetUnderlyingObject(const Value *V, const TargetData *TD = 0,
+ unsigned MaxLookup = 6) {
+ return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup);
+ }
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Attributes.h b/contrib/llvm/include/llvm/Attributes.h
index 1296d67..da6188b 100644
--- a/contrib/llvm/include/llvm/Attributes.h
+++ b/contrib/llvm/include/llvm/Attributes.h
@@ -65,6 +65,8 @@ const Attributes StackAlignment = 7<<26; ///< Alignment of stack for
///of alignment with +1 bias
///0 means unaligned (different from
///alignstack(1))
+const Attributes Hotpatch = 1<<29; ///< Function should have special
+ ///'hotpatch' sequence in prologue
/// @brief Attributes that only apply to function parameters.
const Attributes ParameterOnly = ByVal | Nest | StructRet | NoCapture;
@@ -73,7 +75,8 @@ const Attributes ParameterOnly = ByVal | Nest | StructRet | NoCapture;
/// be used on return values or function parameters.
const Attributes FunctionOnly = NoReturn | NoUnwind | ReadNone | ReadOnly |
NoInline | AlwaysInline | OptimizeForSize | StackProtect | StackProtectReq |
- NoRedZone | NoImplicitFloat | Naked | InlineHint | StackAlignment;
+ NoRedZone | NoImplicitFloat | Naked | InlineHint | StackAlignment |
+ Hotpatch;
/// @brief Parameter attributes that do not apply to vararg call arguments.
const Attributes VarArgsIncompatible = StructRet;
@@ -223,7 +226,7 @@ public:
/// paramHasAttr - Return true if the specified parameter index has the
/// specified attribute set.
bool paramHasAttr(unsigned Idx, Attributes Attr) const {
- return getAttributes(Idx) & Attr;
+ return (getAttributes(Idx) & Attr) != 0;
}
/// getParamAlignment - Return the alignment for the specified function
diff --git a/contrib/llvm/include/llvm/BasicBlock.h b/contrib/llvm/include/llvm/BasicBlock.h
index bf5874f..7e7c9e7 100644
--- a/contrib/llvm/include/llvm/BasicBlock.h
+++ b/contrib/llvm/include/llvm/BasicBlock.h
@@ -18,7 +18,7 @@
#include "llvm/SymbolTableListTraits.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -58,9 +58,9 @@ private:
/// tables. The type of a BasicBlock is "Type::LabelTy" because the basic block
/// represents a label to which a branch can jump.
///
-/// A well formed basic block is formed of a list of non-terminating
-/// instructions followed by a single TerminatorInst instruction.
-/// TerminatorInst's may not occur in the middle of basic blocks, and must
+/// A well formed basic block is formed of a list of non-terminating
+/// instructions followed by a single TerminatorInst instruction.
+/// TerminatorInst's may not occur in the middle of basic blocks, and must
/// terminate the blocks. The BasicBlock class allows malformed basic blocks to
/// occur because it may be useful in the intermediate stage of constructing or
/// modifying a program. However, the verifier will ensure that basic blocks
@@ -90,7 +90,7 @@ private:
public:
/// getContext - Get the context in which this basic block lives.
LLVMContext &getContext() const;
-
+
/// Instruction iterators...
typedef InstListType::iterator iterator;
typedef InstListType::const_iterator const_iterator;
@@ -98,7 +98,7 @@ public:
/// Create - Creates a new BasicBlock. If the Parent parameter is specified,
/// the basic block is automatically inserted at either the end of the
/// function (if InsertBefore is 0), or before the specified basic block.
- static BasicBlock *Create(LLVMContext &Context, const Twine &Name = "",
+ static BasicBlock *Create(LLVMContext &Context, const Twine &Name = "",
Function *Parent = 0,BasicBlock *InsertBefore = 0) {
return new BasicBlock(Context, Name, Parent, InsertBefore);
}
@@ -114,15 +114,15 @@ public:
/// and BlockAddress's).
User *use_back() { return cast<User>(*use_begin());}
const User *use_back() const { return cast<User>(*use_begin());}
-
+
/// getTerminator() - If this is a well formed basic block, then this returns
/// a pointer to the terminator instruction. If it is not, then you get a
/// null pointer back.
///
TerminatorInst *getTerminator();
const TerminatorInst *getTerminator() const;
-
- /// Returns a pointer to the first instructon in this block that is not a
+
+ /// Returns a pointer to the first instructon in this block that is not a
/// PHINode instruction. When adding instruction to the beginning of the
/// basic block, they should be added before the returned value, not before
/// the first instruction, which might be PHI.
@@ -137,7 +137,7 @@ public:
const Instruction* getFirstNonPHIOrDbg() const {
return const_cast<BasicBlock*>(this)->getFirstNonPHIOrDbg();
}
-
+
/// removeFromParent - This method unlinks 'this' from the containing
/// function, but does not delete it.
///
@@ -147,15 +147,15 @@ public:
/// and deletes it.
///
void eraseFromParent();
-
+
/// moveBefore - Unlink this basic block from its current function and
/// insert it into the function that MovePos lives in, right before MovePos.
void moveBefore(BasicBlock *MovePos);
-
+
/// moveAfter - Unlink this basic block from its current function and
/// insert it into the function that MovePos lives in, right after MovePos.
void moveAfter(BasicBlock *MovePos);
-
+
/// getSinglePredecessor - If this basic block has a single predecessor block,
/// return the block, otherwise return a null pointer.
@@ -166,8 +166,8 @@ public:
/// getUniquePredecessor - If this basic block has a unique predecessor block,
/// return the block, otherwise return a null pointer.
- /// Note that unique predecessor doesn't mean single edge, there can be
- /// multiple edges from the unique predecessor to this block (for example
+ /// Note that unique predecessor doesn't mean single edge, there can be
+ /// multiple edges from the unique predecessor to this block (for example
/// a switch statement with multiple cases having the same destination).
BasicBlock *getUniquePredecessor();
const BasicBlock *getUniquePredecessor() const {
@@ -247,7 +247,7 @@ public:
/// hasAddressTaken - returns true if there are any uses of this basic block
/// other than direct branches, switches, etc. to it.
bool hasAddressTaken() const { return getSubclassDataFromValue() != 0; }
-
+
private:
/// AdjustBlockAddressRefCount - BasicBlock stores the number of BlockAddress
/// objects using it. This is almost always 0, sometimes one, possibly but
diff --git a/contrib/llvm/include/llvm/Bitcode/Archive.h b/contrib/llvm/include/llvm/Bitcode/Archive.h
index 934e764..c3c07d8 100644
--- a/contrib/llvm/include/llvm/Bitcode/Archive.h
+++ b/contrib/llvm/include/llvm/Bitcode/Archive.h
@@ -19,12 +19,13 @@
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include <map>
#include <set>
namespace llvm {
class MemoryBuffer;
+ class raw_ostream;
// Forward declare classes
class Module; // From VMCore
@@ -82,7 +83,7 @@ class ArchiveMember : public ilist_node<ArchiveMember> {
unsigned getGroup() const { return info.getGroup(); }
/// The "mode" specifies the access permissions for the file per Unix
- /// security. This may not have any applicabiity on non-Unix systems but is
+ /// security. This may not have any applicability on non-Unix systems but is
/// a required component of the "ar" file format.
/// @brief Get the permission mode associated with this archive member.
unsigned getMode() const { return info.getMode(); }
@@ -144,7 +145,7 @@ class ArchiveMember : public ilist_node<ArchiveMember> {
/// allowed that doesn't have this restriction. This method determines if
/// that "long format" is used for this member.
/// @returns true iff the file name uses the long form
- /// @brief Determin if the member has a long file name
+ /// @brief Determine if the member has a long file name
bool hasLongFilename() const { return flags&HasLongFilenameFlag; }
/// This method returns the status info (like Unix stat(2)) for the archive
@@ -402,7 +403,7 @@ class Archive {
/// bitcode archive. It first makes sure the symbol table has been loaded
/// and has a non-zero size. If it does, then it is an archive. If not,
/// then it tries to load all the bitcode modules of the archive. Finally,
- /// it returns whether it was successfull.
+ /// it returns whether it was successful.
/// @returns true if the archive is a proper llvm bitcode archive
/// @brief Determine whether the archive is a proper llvm bitcode archive.
bool isBitcodeArchive();
@@ -482,7 +483,7 @@ class Archive {
bool loadSymbolTable(std::string* ErrMessage);
/// @brief Write the symbol table to an ofstream.
- void writeSymbolTable(std::ofstream& ARFile);
+ void writeSymbolTable(raw_ostream& ARFile);
/// Writes one ArchiveMember to an ofstream. If an error occurs, returns
/// false, otherwise true. If an error occurs and error is non-null then
@@ -491,7 +492,7 @@ class Archive {
/// @returns true Writing member failed, \p error set to error message
bool writeMember(
const ArchiveMember& member, ///< The member to be written
- std::ofstream& ARFile, ///< The file to write member onto
+ raw_ostream& ARFile, ///< The file to write member onto
bool CreateSymbolTable, ///< Should symbol table be created?
bool TruncateNames, ///< Should names be truncated to 11 chars?
bool ShouldCompress, ///< Should the member be compressed?
diff --git a/contrib/llvm/include/llvm/Bitcode/BitCodes.h b/contrib/llvm/include/llvm/Bitcode/BitCodes.h
index ada2e65..449dc35 100644
--- a/contrib/llvm/include/llvm/Bitcode/BitCodes.h
+++ b/contrib/llvm/include/llvm/Bitcode/BitCodes.h
@@ -19,7 +19,7 @@
#define LLVM_BITCODE_BITCODES_H
#include "llvm/ADT/SmallVector.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
diff --git a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 4f9b783..7692bd2 100644
--- a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -94,7 +94,9 @@ namespace bitc {
TYPE_CODE_FP128 = 14, // LONG DOUBLE (112 bit mantissa)
TYPE_CODE_PPC_FP128= 15, // PPC LONG DOUBLE (2 doubles)
- TYPE_CODE_METADATA = 16 // METADATA
+ TYPE_CODE_METADATA = 16, // METADATA
+
+ TYPE_CODE_X86_MMX = 17 // X86 MMX
};
// The type symbol table only has one code (TST_ENTRY_CODE).
@@ -197,10 +199,10 @@ namespace bitc {
OBO_NO_SIGNED_WRAP = 1
};
- /// SDivOperatorOptionalFlags - Flags for serializing SDivOperator's
- /// SubclassOptionalData contents.
- enum SDivOperatorOptionalFlags {
- SDIV_EXACT = 0
+ /// PossiblyExactOperatorOptionalFlags - Flags for serializing
+ /// PossiblyExactOperator's SubclassOptionalData contents.
+ enum PossiblyExactOperatorOptionalFlags {
+ PEO_EXACT = 0
};
// The function body block (FUNCTION_BLOCK_ID) describes function bodies. It
diff --git a/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
index a186964..fa754c0 100644
--- a/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
+++ b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
@@ -33,6 +33,15 @@ namespace llvm {
LLVMContext& Context,
std::string *ErrMsg = 0);
+ /// getBitcodeTargetTriple - Read the header of the specified bitcode
+ /// buffer and extract just the triple information. If successful,
+ /// this returns a string and *does not* take ownership
+ /// of 'buffer'. On error, this returns "", and fills in *ErrMsg
+ /// if ErrMsg is non-null.
+ std::string getBitcodeTargetTriple(MemoryBuffer *Buffer,
+ LLVMContext& Context,
+ std::string *ErrMsg = 0);
+
/// ParseBitcodeFile - Read the specified bitcode file, returning the module.
/// If an error occurs, this returns null and fills in *ErrMsg if it is
/// non-null. This method *never* takes ownership of Buffer.
diff --git a/contrib/llvm/include/llvm/CallingConv.h b/contrib/llvm/include/llvm/CallingConv.h
index b0481b9..4c5ee62 100644
--- a/contrib/llvm/include/llvm/CallingConv.h
+++ b/contrib/llvm/include/llvm/CallingConv.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines LLVM's set of calling conventions.
+// This file defines LLVM's set of calling conventions.
//
//===----------------------------------------------------------------------===//
@@ -20,21 +20,21 @@ namespace llvm {
/// the well-known calling conventions.
///
namespace CallingConv {
- /// A set of enums which specify the assigned numeric values for known llvm
+ /// A set of enums which specify the assigned numeric values for known llvm
/// calling conventions.
/// @brief LLVM Calling Convention Representation
enum ID {
/// C - The default llvm calling convention, compatible with C. This
/// convention is the only calling convention that supports varargs calls.
- /// As with typical C calling conventions, the callee/caller have to
+ /// As with typical C calling conventions, the callee/caller have to
/// tolerate certain amounts of prototype mismatch.
C = 0,
-
+
// Generic LLVM calling conventions. None of these calling conventions
// support varargs calls, and all assume that the caller and callee
// prototype exactly match.
- /// Fast - This calling convention attempts to make calls as fast as
+ /// Fast - This calling convention attempts to make calls as fast as
/// possible (e.g. by passing things in registers).
Fast = 8,
@@ -79,7 +79,22 @@ namespace CallingConv {
/// X86_ThisCall - Similar to X86_StdCall. Passes first argument in ECX,
/// others via stack. Callee is responsible for stack cleaning. MSVC uses
/// this by default for methods in its ABI.
- X86_ThisCall = 70
+ X86_ThisCall = 70,
+
+ /// PTX_Kernel - Call to a PTX kernel.
+ /// Passes all arguments in parameter space.
+ PTX_Kernel = 71,
+
+ /// PTX_Device - Call to a PTX device function.
+ /// Passes all arguments in register or parameter space.
+ PTX_Device = 72,
+
+ /// MBLAZE_INTR - Calling convention used for MBlaze interrupt routines.
+ MBLAZE_INTR = 73,
+
+ /// MBLAZE_INTR - Calling convention used for MBlaze interrupt support
+ /// routines (i.e. GCC's save_volatiles attribute).
+ MBLAZE_SVOL = 74
};
} // End CallingConv namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/Analysis.h b/contrib/llvm/include/llvm/CodeGen/Analysis.h
index f33a9db..78bf9fc 100644
--- a/contrib/llvm/include/llvm/CodeGen/Analysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/Analysis.h
@@ -23,14 +23,16 @@
namespace llvm {
-class TargetLowering;
class GlobalVariable;
+class TargetLowering;
+class SDNode;
+class SelectionDAG;
/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
/// of insertvalue or extractvalue indices that identify a member, return
/// the linearized index of the start of the member.
///
-unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
+unsigned ComputeLinearIndex(const Type *Ty,
const unsigned *Indices,
const unsigned *IndicesEnd,
unsigned CurIndex = 0);
@@ -52,7 +54,7 @@ GlobalVariable *ExtractTypeInfo(Value *V);
/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
/// processed uses a memory 'm' constraint.
-bool hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
+bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
const TargetLowering &TLI);
/// getFCmpCondCode - Return the ISD condition code corresponding to
@@ -75,6 +77,9 @@ ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
bool isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
const TargetLowering &TLI);
+bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
+ const TargetLowering &TLI);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
index b018603..357b933 100644
--- a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -17,7 +17,7 @@
#define LLVM_CODEGEN_ASMPRINTER_H
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/Support/DebugLoc.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class BlockAddress;
@@ -49,6 +49,7 @@ namespace llvm {
class MCSection;
class MCStreamer;
class MCSymbol;
+ class MDNode;
class DwarfDebug;
class DwarfException;
class Mangler;
@@ -388,7 +389,7 @@ namespace llvm {
/// frame.
void EmitFrameMoves(const std::vector<MachineMove> &Moves,
MCSymbol *BaseLabel, bool isEH) const;
-
+ void EmitCFIFrameMoves(const std::vector<MachineMove> &Moves) const;
//===------------------------------------------------------------------===//
// Inline Asm Support
@@ -432,7 +433,7 @@ namespace llvm {
mutable unsigned SetCounter;
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
- void EmitInlineAsm(StringRef Str, unsigned LocCookie) const;
+ void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0) const;
/// EmitInlineAsm - This method formats and emits the specified machine
/// instruction that is an inline asm.
diff --git a/contrib/llvm/include/llvm/CodeGen/BinaryObject.h b/contrib/llvm/include/llvm/CodeGen/BinaryObject.h
index 3ade7c9..8c1431f 100644
--- a/contrib/llvm/include/llvm/CodeGen/BinaryObject.h
+++ b/contrib/llvm/include/llvm/CodeGen/BinaryObject.h
@@ -16,7 +16,7 @@
#define LLVM_CODEGEN_BINARYOBJECT_H
#include "llvm/CodeGen/MachineRelocation.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <string>
#include <vector>
diff --git a/contrib/llvm/include/llvm/CodeGen/CalcSpillWeights.h b/contrib/llvm/include/llvm/CodeGen/CalcSpillWeights.h
index 240734f..853ebf9 100644
--- a/contrib/llvm/include/llvm/CodeGen/CalcSpillWeights.h
+++ b/contrib/llvm/include/llvm/CodeGen/CalcSpillWeights.h
@@ -20,6 +20,26 @@ namespace llvm {
class LiveIntervals;
class MachineLoopInfo;
+ /// normalizeSpillWeight - The spill weight of a live interval is computed as:
+ ///
+ /// (sum(use freq) + sum(def freq)) / (K + size)
+ ///
+ /// @param UseDefFreq Expected number of executed use and def instructions
+ /// per function call. Derived from block frequencies.
+ /// @param Size Size of live interval as returnexd by getSize()
+ ///
+ static inline float normalizeSpillWeight(float UseDefFreq, unsigned Size) {
+ // The magic constant 200 corresponds to approx. 25 instructions since
+ // SlotIndexes allocate 8 slots per instruction.
+ //
+ // The constant is added to avoid depending too much on accidental SlotIndex
+ // gaps for small intervals. The effect is that small intervals have a spill
+ // weight that is mostly proportional to the number of uses, while large
+ // intervals get a spill weight that is closer to a use density.
+ //
+ return UseDefFreq / (Size + 200);
+ }
+
/// VirtRegAuxInfo - Calculate auxiliary information for a virtual
/// register such as its spill weight and allocation hint.
class VirtRegAuxInfo {
@@ -48,7 +68,9 @@ namespace llvm {
public:
static char ID;
- CalculateSpillWeights() : MachineFunctionPass(ID) {}
+ CalculateSpillWeights() : MachineFunctionPass(ID) {
+ initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &au) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
index 6fb8436..2a9bbdf 100644
--- a/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
+++ b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
@@ -57,14 +57,14 @@ private:
LocInfo HTP : 6;
/// ValVT - The type of the value being assigned.
- EVT ValVT;
+ MVT ValVT;
/// LocVT - The type of the location being assigned to.
- EVT LocVT;
+ MVT LocVT;
public:
- static CCValAssign getReg(unsigned ValNo, EVT ValVT,
- unsigned RegNo, EVT LocVT,
+ static CCValAssign getReg(unsigned ValNo, MVT ValVT,
+ unsigned RegNo, MVT LocVT,
LocInfo HTP) {
CCValAssign Ret;
Ret.ValNo = ValNo;
@@ -77,8 +77,8 @@ public:
return Ret;
}
- static CCValAssign getCustomReg(unsigned ValNo, EVT ValVT,
- unsigned RegNo, EVT LocVT,
+ static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT,
+ unsigned RegNo, MVT LocVT,
LocInfo HTP) {
CCValAssign Ret;
Ret = getReg(ValNo, ValVT, RegNo, LocVT, HTP);
@@ -86,8 +86,8 @@ public:
return Ret;
}
- static CCValAssign getMem(unsigned ValNo, EVT ValVT,
- unsigned Offset, EVT LocVT,
+ static CCValAssign getMem(unsigned ValNo, MVT ValVT,
+ unsigned Offset, MVT LocVT,
LocInfo HTP) {
CCValAssign Ret;
Ret.ValNo = ValNo;
@@ -100,8 +100,8 @@ public:
return Ret;
}
- static CCValAssign getCustomMem(unsigned ValNo, EVT ValVT,
- unsigned Offset, EVT LocVT,
+ static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT,
+ unsigned Offset, MVT LocVT,
LocInfo HTP) {
CCValAssign Ret;
Ret = getMem(ValNo, ValVT, Offset, LocVT, HTP);
@@ -110,7 +110,7 @@ public:
}
unsigned getValNo() const { return ValNo; }
- EVT getValVT() const { return ValVT; }
+ MVT getValVT() const { return ValVT; }
bool isRegLoc() const { return !isMem; }
bool isMemLoc() const { return isMem; }
@@ -119,7 +119,7 @@ public:
unsigned getLocReg() const { assert(isRegLoc()); return Loc; }
unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; }
- EVT getLocVT() const { return LocVT; }
+ MVT getLocVT() const { return LocVT; }
LocInfo getLocInfo() const { return HTP; }
bool isExtInLoc() const {
@@ -129,16 +129,16 @@ public:
};
/// CCAssignFn - This function assigns a location for Val, updating State to
-/// reflect the change.
-typedef bool CCAssignFn(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
+/// reflect the change. It returns 'true' if it failed to handle Val.
+typedef bool CCAssignFn(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State);
/// CCCustomFn - This function assigns a location for Val, possibly updating
/// all args to reflect changes and indicates if it handled it. It must set
/// isCustom if it handles the arg and returns true.
-typedef bool CCCustomFn(unsigned &ValNo, EVT &ValVT,
- EVT &LocVT, CCValAssign::LocInfo &LocInfo,
+typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags, CCState &State);
/// CCState - This class holds information needed while lowering arguments and
@@ -198,7 +198,7 @@ public:
/// AnalyzeCallOperands - Same as above except it takes vectors of types
/// and argument flags.
- void AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
+ void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn);
@@ -209,7 +209,7 @@ public:
/// AnalyzeCallResult - Same as above except it's specialized for calls which
/// produce a single value.
- void AnalyzeCallResult(EVT VT, CCAssignFn Fn);
+ void AnalyzeCallResult(MVT VT, CCAssignFn Fn);
/// getFirstUnallocated - Return the first unallocated register in the set, or
/// NumRegs if they are all allocated.
@@ -284,8 +284,8 @@ public:
// HandleByVal - Allocate a stack slot large enough to pass an argument by
// value. The size and alignment information of the argument is encoded in its
// parameter attribute.
- void HandleByVal(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
+ void HandleByVal(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
private:
diff --git a/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h b/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
new file mode 100644
index 0000000..2c5215a
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
@@ -0,0 +1,61 @@
+//===-------- EdgeBundles.h - Bundles of CFG edges --------------*- c++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The EdgeBundles analysis forms equivalence classes of CFG edges such that all
+// edges leaving a machine basic block are in the same bundle, and all edges
+// leaving a basic block are in the same bundle.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EDGEBUNDLES_H
+#define LLVM_CODEGEN_EDGEBUNDLES_H
+
+#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class EdgeBundles : public MachineFunctionPass {
+ const MachineFunction *MF;
+
+ /// EC - Each edge bundle is an equivalence class. The keys are:
+ /// 2*BB->getNumber() -> Ingoing bundle.
+ /// 2*BB->getNumber()+1 -> Outgoing bundle.
+ IntEqClasses EC;
+
+public:
+ static char ID;
+ EdgeBundles() : MachineFunctionPass(ID) {}
+
+ /// getBundle - Return the ingoing (Out = false) or outgoing (Out = true)
+ /// bundle number for basic block #N
+ unsigned getBundle(unsigned N, bool Out) const { return EC[2 * N + Out]; }
+
+ /// getNumBundles - Return the total number of bundles in the CFG.
+ unsigned getNumBundles() const { return EC.getNumClasses(); }
+
+ /// getMachineFunction - Return the last machine function computed.
+ const MachineFunction *getMachineFunction() const { return MF; }
+
+ /// view - Visualize the annotated bipartite CFG with Graphviz.
+ void view() const;
+
+private:
+ virtual bool runOnMachineFunction(MachineFunction&);
+ virtual void getAnalysisUsage(AnalysisUsage&) const;
+};
+
+/// Specialize WriteGraph, the standard implementation won't work.
+raw_ostream &WriteGraph(raw_ostream &O, const EdgeBundles &G,
+ bool ShortNames = false,
+ const std::string &Title = "");
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/FastISel.h b/contrib/llvm/include/llvm/CodeGen/FastISel.h
index 79b1554..fbb1200 100644
--- a/contrib/llvm/include/llvm/CodeGen/FastISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/FastISel.h
@@ -15,9 +15,6 @@
#define LLVM_CODEGEN_FASTISEL_H
#include "llvm/ADT/DenseMap.h"
-#ifndef NDEBUG
-#include "llvm/ADT/SmallSet.h"
-#endif
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
@@ -39,6 +36,7 @@ class TargetLowering;
class TargetMachine;
class TargetRegisterClass;
class TargetRegisterInfo;
+class LoadInst;
/// FastISel - This is a fast-path instruction selection class that
/// generates poor code and doesn't support illegal types or non-trivial
@@ -102,7 +100,16 @@ public:
/// index value.
std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
- /// recomputeInsertPt - Reset InsertPt to prepare for insterting instructions
+ /// TryToFoldLoad - The specified machine instr operand is a vreg, and that
+ /// vreg is being provided by the specified load instruction. If possible,
+ /// try to fold the load as an operand to the instruction, returning true if
+ /// possible.
+ virtual bool TryToFoldLoad(MachineInstr * /*MI*/, unsigned /*OpNo*/,
+ const LoadInst * /*LI*/) {
+ return false;
+ }
+
+ /// recomputeInsertPt - Reset InsertPt to prepare for inserting instructions
/// into the current block.
void recomputeInsertPt();
diff --git a/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
index f17fe5a..27631b7 100644
--- a/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -19,6 +19,7 @@
#include "llvm/Instructions.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
#ifndef NDEBUG
#include "llvm/ADT/SmallSet.h"
@@ -27,6 +28,7 @@
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/CallSite.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include <vector>
namespace llvm {
@@ -104,9 +106,8 @@ public:
LiveOutInfo() : NumSignBits(0), KnownOne(1, 0), KnownZero(1, 0) {}
};
- /// LiveOutRegInfo - Information about live out vregs, indexed by their
- /// register number offset by 'FirstVirtualRegister'.
- std::vector<LiveOutInfo> LiveOutRegInfo;
+ /// LiveOutRegInfo - Information about live out vregs.
+ IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo;
/// PHINodesToUpdate - A list of phi instructions whose operand list will
/// be updated after processing the current basic block.
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
index b401068..45469ed 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
@@ -36,6 +36,7 @@
#include "llvm/Pass.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/DebugLoc.h"
namespace llvm {
class AsmPrinter;
@@ -59,8 +60,10 @@ namespace llvm {
struct GCPoint {
GC::PointKind Kind; //< The kind of the safe point.
MCSymbol *Label; //< A label.
+ DebugLoc Loc;
- GCPoint(GC::PointKind K, MCSymbol *L) : Kind(K), Label(L) {}
+ GCPoint(GC::PointKind K, MCSymbol *L, DebugLoc DL)
+ : Kind(K), Label(L), Loc(DL) {}
};
/// GCRoot - Metadata for a pointer to an object managed by the garbage
@@ -121,8 +124,8 @@ namespace llvm {
/// addSafePoint - Notes the existence of a safe point. Num is the ID of the
/// label just prior to the safe point (if the code generator is using
/// MachineModuleInfo).
- void addSafePoint(GC::PointKind Kind, MCSymbol *Label) {
- SafePoints.push_back(GCPoint(Kind, Label));
+ void addSafePoint(GC::PointKind Kind, MCSymbol *Label, DebugLoc DL) {
+ SafePoints.push_back(GCPoint(Kind, Label, DL));
}
/// getFrameSize/setFrameSize - Records the function's frame size.
diff --git a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 2e23f4e..3da11c4 100644
--- a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -107,6 +107,13 @@ namespace ISD {
// and returns an outchain.
EH_SJLJ_LONGJMP,
+ // OUTCHAIN = EH_SJLJ_DISPATCHSETUP(INCHAIN, context)
+ // This corresponds to the eh.sjlj.dispatchsetup intrinsic. It takes an
+ // input chain and a pointer to the sjlj function context as inputs and
+ // returns an outchain. By default, this does nothing. Targets can lower
+ // this to unwind setup code if needed.
+ EH_SJLJ_DISPATCHSETUP,
+
// TargetConstant* - Like Constant*, but the DAG does not do any folding,
// simplification, or lowering of the constant. They are used for constants
// which are known to fit in the immediate fields of their users, or for
@@ -262,16 +269,24 @@ namespace ISD {
/// lengths of the input vectors.
CONCAT_VECTORS,
+ /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector
+ /// with VECTOR2 inserted into VECTOR1 at the (potentially
+ /// variable) element number IDX, which must be a multiple of the
+ /// VECTOR2 vector length. The elements of VECTOR1 starting at
+ /// IDX are overwritten with VECTOR2. Elements IDX through
+ /// vector_length(VECTOR2) must be valid VECTOR1 indices.
+ INSERT_SUBVECTOR,
+
/// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an
- /// vector value) starting with the (potentially variable) element number
- /// IDX, which must be a multiple of the result vector length.
+ /// vector value) starting with the element number IDX, which must be a
+ /// constant multiple of the result vector length.
EXTRACT_SUBVECTOR,
- /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
+ /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
/// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
/// values that indicate which value (or undef) each result element will
- /// get. These constant ints are accessible through the
- /// ShuffleVectorSDNode class. This is quite similar to the Altivec
+ /// get. These constant ints are accessible through the
+ /// ShuffleVectorSDNode class. This is quite similar to the Altivec
/// 'vperm' instruction, except that the indices must be constants and are
/// in terms of the element size of VEC1/VEC2, not in terms of bytes.
VECTOR_SHUFFLE,
@@ -288,13 +303,21 @@ namespace ISD {
// an unsigned/signed value of type i[2*N], then return the top part.
MULHU, MULHS,
- // Bitwise operators - logical and, logical or, logical xor, shift left,
- // shift right algebraic (shift in sign bits), shift right logical (shift in
- // zeroes), rotate left, rotate right, and byteswap.
- AND, OR, XOR, SHL, SRA, SRL, ROTL, ROTR, BSWAP,
+ /// Bitwise operators - logical and, logical or, logical xor.
+ AND, OR, XOR,
+
+ /// Shift and rotation operations. After legalization, the type of the
+ /// shift amount is known to be TLI.getShiftAmountTy(). Before legalization
+ /// the shift amount can be any type, but care must be taken to ensure it is
+ /// large enough. TLI.getShiftAmountTy() is i8 on some targets, but before
+ /// legalization, types like i1024 can occur and i8 doesn't have enough bits
+ /// to represent the shift amount. By convention, DAGCombine and
+ /// SelectionDAGBuilder forces these shift amounts to i32 for simplicity.
+ ///
+ SHL, SRA, SRL, ROTL, ROTR,
- // Counting operators
- CTTZ, CTLZ, CTPOP,
+ /// Byte Swap and Counting operators.
+ BSWAP, CTTZ, CTLZ, CTPOP,
// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
// i1 then the high bits must conform to getBooleanContents.
@@ -392,14 +415,14 @@ namespace ISD {
/// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
FP_EXTEND,
- // BIT_CONVERT - This operator converts between integer, vector and FP
+ // BITCAST - This operator converts between integer, vector and FP
// values, as if the value was stored to memory with one type and loaded
// from the same address with the other type (or equivalently for vector
// format conversions, etc). The source and result are required to have
// the same bit size (e.g. f32 <-> i32). This can also be used for
// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
// getNode().
- BIT_CONVERT,
+ BITCAST,
// CONVERT_RNDSAT - This operator is used to support various conversions
// between various types (float, signed, unsigned and vectors of those
@@ -475,6 +498,7 @@ namespace ISD {
// Operand #0 : Input chain.
// Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
// Operand #2 : a MDNodeSDNode with the !srcloc metadata.
+ // Operand #3 : HasSideEffect, IsAlignStack bits.
// After this, it is followed by a list of operands with this format:
// ConstantSDNode: Flags that encode whether it is a mem or not, the
// of operands that follow, etc. See InlineAsm.h.
@@ -525,7 +549,7 @@ namespace ISD {
// SRCVALUE - This is a node type that holds a Value* that is used to
// make reference to a value in the LLVM IR.
SRCVALUE,
-
+
// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
// reference metadata in the IR.
MDNODE_SDNODE,
diff --git a/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h b/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h
index eefbc45..767b666 100644
--- a/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h
+++ b/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h
@@ -48,6 +48,11 @@ namespace llvm {
/// be capable of handling this kind of change.
///
void LowerIntrinsicCall(CallInst *CI);
+
+ /// LowerToByteSwap - Replace a call instruction into a call to bswap
+ /// intrinsic. Return false if it has determined the call is not a
+ /// simple integer bswap.
+ static bool LowerToByteSwap(CallInst *CI);
};
}
diff --git a/contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h b/contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h
index eb373fb..fea8523 100644
--- a/contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h
+++ b/contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h
@@ -18,7 +18,7 @@
#define LLVM_CODEGEN_JITCODEEMITTER_H
#include <string>
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/CodeGen/MachineCodeEmitter.h"
#include "llvm/ADT/DenseMap.h"
diff --git a/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h b/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
index 13cebea..1ed2547 100644
--- a/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
+++ b/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
@@ -20,25 +20,25 @@
namespace llvm {
class LatencyPriorityQueue;
-
+
/// Sorting functions for the Available queue.
struct latency_sort : public std::binary_function<SUnit*, SUnit*, bool> {
LatencyPriorityQueue *PQ;
explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
-
+
bool operator()(const SUnit* left, const SUnit* right) const;
};
class LatencyPriorityQueue : public SchedulingPriorityQueue {
// SUnits - The SUnits for the current graph.
std::vector<SUnit> *SUnits;
-
+
/// NumNodesSolelyBlocking - This vector contains, for every node in the
/// Queue, the number of nodes that the node is the sole unscheduled
/// predecessor for. This is used as a tie-breaker heuristic for better
/// mobility.
std::vector<unsigned> NumNodesSolelyBlocking;
-
+
/// Queue - The queue.
std::vector<SUnit*> Queue;
latency_sort Picker;
@@ -47,6 +47,8 @@ namespace llvm {
LatencyPriorityQueue() : Picker(this) {
}
+ bool isBottomUp() const { return false; }
+
void initNodes(std::vector<SUnit> &sunits) {
SUnits = &sunits;
NumNodesSolelyBlocking.resize(SUnits->size(), 0);
@@ -62,25 +64,27 @@ namespace llvm {
void releaseState() {
SUnits = 0;
}
-
+
unsigned getLatency(unsigned NodeNum) const {
assert(NodeNum < (*SUnits).size());
return (*SUnits)[NodeNum].getHeight();
}
-
+
unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
assert(NodeNum < NumNodesSolelyBlocking.size());
return NumNodesSolelyBlocking[NodeNum];
}
-
+
bool empty() const { return Queue.empty(); }
-
+
virtual void push(SUnit *U);
-
+
virtual SUnit *pop();
virtual void remove(SUnit *SU);
+ virtual void dump(ScheduleDAG* DAG) const;
+
// ScheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that
// single predecessor has a higher priority, since scheduling it will make
diff --git a/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
index cd8293d..c931261 100644
--- a/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
+++ b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -34,8 +34,10 @@ namespace {
(void) llvm::createDeadMachineInstructionElimPass();
(void) llvm::createFastRegisterAllocator();
+ (void) llvm::createBasicRegisterAllocator();
(void) llvm::createLinearScanRegisterAllocator();
- (void) llvm::createPBQPRegisterAllocator();
+ (void) llvm::createGreedyRegisterAllocator();
+ (void) llvm::createDefaultPBQPRegisterAllocator();
(void) llvm::createSimpleRegisterCoalescer();
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
index 29e689a..88131fb 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -21,7 +21,7 @@
#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
#define LLVM_CODEGEN_LIVEINTERVAL_H
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/IntEqClasses.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/CodeGen/SlotIndexes.h"
@@ -39,32 +39,17 @@ namespace llvm {
/// This class holds information about a machine level values, including
/// definition and use points.
///
- /// Care must be taken in interpreting the def index of the value. The
- /// following rules apply:
- ///
- /// If the isDefAccurate() method returns false then def does not contain the
- /// index of the defining MachineInstr, or even (necessarily) to a
- /// MachineInstr at all. In general such a def index is not meaningful
- /// and should not be used. The exception is that, for values originally
- /// defined by PHI instructions, after PHI elimination def will contain the
- /// index of the MBB in which the PHI originally existed. This can be used
- /// to insert code (spills or copies) which deals with the value, which will
- /// be live in to the block.
class VNInfo {
private:
enum {
HAS_PHI_KILL = 1,
REDEF_BY_EC = 1 << 1,
IS_PHI_DEF = 1 << 2,
- IS_UNUSED = 1 << 3,
- IS_DEF_ACCURATE = 1 << 4
+ IS_UNUSED = 1 << 3
};
+ MachineInstr *copy;
unsigned char flags;
- union {
- MachineInstr *copy;
- unsigned reg;
- } cr;
public:
typedef BumpPtrAllocator Allocator;
@@ -76,20 +61,19 @@ namespace llvm {
SlotIndex def;
/// VNInfo constructor.
- /// d is presumed to point to the actual defining instr. If it doesn't
- /// setIsDefAccurate(false) should be called after construction.
VNInfo(unsigned i, SlotIndex d, MachineInstr *c)
- : flags(IS_DEF_ACCURATE), id(i), def(d) { cr.copy = c; }
+ : copy(c), flags(0), id(i), def(d)
+ { }
/// VNInfo construtor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
- : flags(orig.flags), cr(orig.cr), id(i), def(orig.def)
+ : copy(orig.copy), flags(orig.flags), id(i), def(orig.def)
{ }
/// Copy from the parameter into this VNInfo.
void copyFrom(VNInfo &src) {
flags = src.flags;
- cr = src.cr;
+ copy = src.copy;
def = src.def;
}
@@ -97,23 +81,23 @@ namespace llvm {
unsigned getFlags() const { return flags; }
void setFlags(unsigned flags) { this->flags = flags; }
+ /// Merge flags from another VNInfo
+ void mergeFlags(const VNInfo *VNI) {
+ flags = (flags | VNI->flags) & ~IS_UNUSED;
+ }
+
/// For a register interval, if this VN was definied by a copy instr
/// getCopy() returns a pointer to it, otherwise returns 0.
/// For a stack interval the behaviour of this method is undefined.
- MachineInstr* getCopy() const { return cr.copy; }
+ MachineInstr* getCopy() const { return copy; }
/// For a register interval, set the copy member.
/// This method should not be called on stack intervals as it may lead to
/// undefined behavior.
- void setCopy(MachineInstr *c) { cr.copy = c; }
+ void setCopy(MachineInstr *c) { copy = c; }
- /// For a stack interval, returns the reg which this stack interval was
- /// defined from.
- /// For a register interval the behaviour of this method is undefined.
- unsigned getReg() const { return cr.reg; }
- /// For a stack interval, set the defining register.
- /// This method should not be called on register intervals as it may lead
- /// to undefined behaviour.
- void setReg(unsigned reg) { cr.reg = reg; }
+ /// isDefByCopy - Return true when this value was defined by a copy-like
+ /// instruction as determined by MachineInstr::isCopyLike.
+ bool isDefByCopy() const { return copy != 0; }
/// Returns true if one or more kills are PHI nodes.
bool hasPHIKill() const { return flags & HAS_PHI_KILL; }
@@ -156,16 +140,6 @@ namespace llvm {
else
flags &= ~IS_UNUSED;
}
-
- /// Returns true if the def is accurate.
- bool isDefAccurate() const { return flags & IS_DEF_ACCURATE; }
- /// Set the "is def accurate" flag on this value.
- void setIsDefAccurate(bool defAccurate) {
- if (defAccurate)
- flags |= IS_DEF_ACCURATE;
- else
- flags &= ~IS_DEF_ACCURATE;
- }
};
/// LiveRange structure - This represents a simple register range in the
@@ -231,8 +205,7 @@ namespace llvm {
typedef SmallVector<LiveRange,4> Ranges;
typedef SmallVector<VNInfo*,4> VNInfoList;
- unsigned reg; // the register or stack slot of this interval
- // if the top bits is set, it represents a stack slot.
+ const unsigned reg; // the register or stack slot of this interval.
float weight; // weight of this interval
Ranges ranges; // the ranges in which this register is live
VNInfoList valnos; // value#'s
@@ -248,11 +221,8 @@ namespace llvm {
};
- LiveInterval(unsigned Reg, float Weight, bool IsSS = false)
- : reg(Reg), weight(Weight) {
- if (IsSS)
- reg = reg | (1U << (sizeof(unsigned)*CHAR_BIT-1));
- }
+ LiveInterval(unsigned Reg, float Weight)
+ : reg(Reg), weight(Weight) {}
typedef Ranges::iterator iterator;
iterator begin() { return ranges.begin(); }
@@ -276,28 +246,29 @@ namespace llvm {
/// position is in a hole, this method returns an iterator pointing to the
/// LiveRange immediately after the hole.
iterator advanceTo(iterator I, SlotIndex Pos) {
+ assert(I != end());
if (Pos >= endIndex())
return end();
while (I->end <= Pos) ++I;
return I;
}
- void clear() {
- valnos.clear();
- ranges.clear();
- }
-
- /// isStackSlot - Return true if this is a stack slot interval.
+ /// find - Return an iterator pointing to the first range that ends after
+ /// Pos, or end(). This is the same as advanceTo(begin(), Pos), but faster
+ /// when searching large intervals.
///
- bool isStackSlot() const {
- return reg & (1U << (sizeof(unsigned)*CHAR_BIT-1));
+ /// If Pos is contained in a LiveRange, that range is returned.
+ /// If Pos is in a hole, the following LiveRange is returned.
+ /// If Pos is beyond endIndex, end() is returned.
+ iterator find(SlotIndex Pos);
+
+ const_iterator find(SlotIndex Pos) const {
+ return const_cast<LiveInterval*>(this)->find(Pos);
}
- /// getStackSlotIndex - Return stack slot index if this is a stack slot
- /// interval.
- int getStackSlotIndex() const {
- assert(isStackSlot() && "Interval is not a stack slot interval!");
- return reg & ~(1U << (sizeof(unsigned)*CHAR_BIT-1));
+ void clear() {
+ valnos.clear();
+ ranges.clear();
}
bool hasAtLeastOneValue() const { return !valnos.empty(); }
@@ -318,10 +289,9 @@ namespace llvm {
/// getNextValue - Create a new value number and return it. MIIdx specifies
/// the instruction that defines the value number.
VNInfo *getNextValue(SlotIndex def, MachineInstr *CopyMI,
- bool isDefAccurate, VNInfo::Allocator &VNInfoAllocator) {
+ VNInfo::Allocator &VNInfoAllocator) {
VNInfo *VNI =
new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def, CopyMI);
- VNI->setIsDefAccurate(isDefAccurate);
valnos.push_back(VNI);
return VNI;
}
@@ -358,21 +328,6 @@ namespace llvm {
/// cause merging of V1/V2 values numbers and compaction of the value space.
VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2);
- /// MergeInClobberRanges - For any live ranges that are not defined in the
- /// current interval, but are defined in the Clobbers interval, mark them
- /// used with an unknown definition value. Caller must pass in reference to
- /// VNInfoAllocator since it will create a new val#.
- void MergeInClobberRanges(LiveIntervals &li_,
- const LiveInterval &Clobbers,
- VNInfo::Allocator &VNInfoAllocator);
-
- /// MergeInClobberRange - Same as MergeInClobberRanges except it merge in a
- /// single LiveRange only.
- void MergeInClobberRange(LiveIntervals &li_,
- SlotIndex Start,
- SlotIndex End,
- VNInfo::Allocator &VNInfoAllocator);
-
/// MergeValueInAsValue - Merge all of the live ranges of a specific val#
/// in RHS into this live interval as the specified value number.
/// The LiveRanges in RHS are allowed to overlap with LiveRanges in the
@@ -412,17 +367,18 @@ namespace llvm {
return index >= endIndex();
}
- bool liveAt(SlotIndex index) const;
-
- // liveBeforeAndAt - Check if the interval is live at the index and the
- // index just before it. If index is liveAt, check if it starts a new live
- // range.If it does, then check if the previous live range ends at index-1.
- bool liveBeforeAndAt(SlotIndex index) const;
+ bool liveAt(SlotIndex index) const {
+ const_iterator r = find(index);
+ return r != end() && r->start <= index;
+ }
/// killedAt - Return true if a live range ends at index. Note that the kill
/// point is not contained in the half-open live range. It is usually the
/// getDefIndex() slot following its last use.
- bool killedAt(SlotIndex index) const;
+ bool killedAt(SlotIndex index) const {
+ const_iterator r = find(index.getUseIndex());
+ return r != end() && r->end == index;
+ }
/// killedInRange - Return true if the interval has kills in [Start,End).
/// Note that the kill point is considered the end of a live range, so it is
@@ -452,20 +408,20 @@ namespace llvm {
/// FindLiveRangeContaining - Return an iterator to the live range that
/// contains the specified index, or end() if there is none.
- const_iterator FindLiveRangeContaining(SlotIndex Idx) const;
+ iterator FindLiveRangeContaining(SlotIndex Idx) {
+ iterator I = find(Idx);
+ return I != end() && I->start <= Idx ? I : end();
+ }
- /// FindLiveRangeContaining - Return an iterator to the live range that
- /// contains the specified index, or end() if there is none.
- iterator FindLiveRangeContaining(SlotIndex Idx);
+ const_iterator FindLiveRangeContaining(SlotIndex Idx) const {
+ const_iterator I = find(Idx);
+ return I != end() && I->start <= Idx ? I : end();
+ }
/// findDefinedVNInfo - Find the by the specified
/// index (register interval) or defined
VNInfo *findDefinedVNInfoForRegInt(SlotIndex Idx) const;
- /// findDefinedVNInfo - Find the VNInfo that's defined by the specified
- /// register (stack inteval only).
- VNInfo *findDefinedVNInfoForStackInt(unsigned Reg) const;
-
/// overlaps - Return true if the intersection of the two live intervals is
/// not empty.
@@ -502,7 +458,10 @@ namespace llvm {
/// isInOneLiveRange - Return true if the range specified is entirely in the
/// a single LiveRange of the live interval.
- bool isInOneLiveRange(SlotIndex Start, SlotIndex End);
+ bool isInOneLiveRange(SlotIndex Start, SlotIndex End) const {
+ const_iterator r = find(Start);
+ return r != end() && r->containsRange(Start, End);
+ }
/// removeRange - Remove the specified range from this interval. Note that
/// the range must be a single LiveRange in its entirety.
@@ -569,6 +528,46 @@ namespace llvm {
LI.print(OS);
return OS;
}
-}
+ /// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
+ /// LiveInterval into equivalence clases of connected components. A
+ /// LiveInterval that has multiple connected components can be broken into
+ /// multiple LiveIntervals.
+ ///
+ /// Given a LiveInterval that may have multiple connected components, run:
+ ///
+ /// unsigned numComps = ConEQ.Classify(LI);
+ /// if (numComps > 1) {
+ /// // allocate numComps-1 new LiveIntervals into LIS[1..]
+ /// ConEQ.Distribute(LIS);
+ /// }
+
+ class ConnectedVNInfoEqClasses {
+ LiveIntervals &lis_;
+ IntEqClasses eqClass_;
+
+ // Note that values a and b are connected.
+ void Connect(unsigned a, unsigned b);
+
+ unsigned Renumber();
+
+ public:
+ explicit ConnectedVNInfoEqClasses(LiveIntervals &lis) : lis_(lis) {}
+
+ /// Classify - Classify the values in LI into connected components.
+ /// Return the number of connected components.
+ unsigned Classify(const LiveInterval *LI);
+
+ /// getEqClass - Classify creates equivalence classes numbered 0..N. Return
+ /// the equivalence class assigned the VNI.
+ unsigned getEqClass(const VNInfo *VNI) const { return eqClass_[VNI->id]; }
+
+ /// Distribute - Distribute values in LIV[0] into a separate LiveInterval
+ /// for each connected component. LIV must have a LiveInterval for each
+ /// connected component. The LiveIntervals in Liv[1..] must be empty.
+ void Distribute(LiveInterval *LIV[]);
+
+ };
+
+}
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 2918c3c2a..b09f8d1 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -68,19 +68,13 @@ namespace llvm {
public:
static char ID; // Pass identification, replacement for typeid
- LiveIntervals() : MachineFunctionPass(ID) {}
+ LiveIntervals() : MachineFunctionPass(ID) {
+ initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
+ }
// Calculate the spill weight to assign to a single instruction.
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth);
- // After summing the spill weights of all defs and uses, the final weight
- // should be normalized, dividing the weight of the interval by its size.
- // This encourages spilling of intervals that are large and have few uses,
- // and discourages spilling of small intervals with many uses.
- void normalizeSpillWeight(LiveInterval &li) {
- li.weight /= getApproximateInstructionCount(li) + 25;
- }
-
typedef Reg2IntervalMap::iterator iterator;
typedef Reg2IntervalMap::const_iterator const_iterator;
const_iterator begin() const { return r2iMap_.begin(); }
@@ -161,6 +155,12 @@ namespace llvm {
LiveRange addLiveRangeToEndOfBlock(unsigned reg,
MachineInstr* startInst);
+ /// shrinkToUses - After removing some uses of a register, shrink its live
+ /// range to just the remaining uses. This method does not compute reaching
+ /// defs for new uses, and it doesn't remove dead defs.
+ /// Dead PHIDef values are marked as unused.
+ void shrinkToUses(LiveInterval *li);
+
// Interval removal
void removeInterval(unsigned Reg) {
@@ -169,6 +169,10 @@ namespace llvm {
r2iMap_.erase(I);
}
+ SlotIndexes *getSlotIndexes() const {
+ return indexes_;
+ }
+
SlotIndex getZeroIndex() const {
return indexes_->getZeroIndex();
}
@@ -227,10 +231,6 @@ namespace llvm {
return indexes_->getMBBFromIndex(index);
}
- SlotIndex getMBBTerminatorGap(const MachineBasicBlock *mbb) {
- return indexes_->getTerminatorGap(mbb);
- }
-
SlotIndex InsertMachineInstrInMaps(MachineInstr *MI) {
return indexes_->insertMachineInstrInMaps(MI);
}
@@ -272,7 +272,7 @@ namespace llvm {
/// (if any is created) by reference. This is temporary.
std::vector<LiveInterval*>
addIntervalsForSpills(const LiveInterval& i,
- SmallVectorImpl<LiveInterval*> &SpillIs,
+ const SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
@@ -285,7 +285,7 @@ namespace llvm {
/// val# of the specified interval is re-materializable. Also returns true
/// by reference if all of the defs are load instructions.
bool isReMaterializable(const LiveInterval &li,
- SmallVectorImpl<LiveInterval*> &SpillIs,
+ const SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad);
/// isReMaterializable - Returns true if the definition MI of the specified
@@ -306,6 +306,16 @@ namespace llvm {
/// within a single basic block.
bool intervalIsInOneMBB(const LiveInterval &li) const;
+ /// getLastSplitPoint - Return the last possible insertion point in mbb for
+ /// spilling and splitting code. This is the first terminator, or the call
+ /// instruction if li is live into a landing pad successor.
+ MachineBasicBlock::iterator getLastSplitPoint(const LiveInterval &li,
+ MachineBasicBlock *mbb) const;
+
+ /// addKillFlags - Add kill flags to any instruction that kills a virtual
+ /// register.
+ void addKillFlags();
+
private:
/// computeIntervals - Compute live intervals.
void computeIntervals();
@@ -362,7 +372,7 @@ namespace llvm {
/// by reference if the def is a load.
bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
MachineInstr *MI,
- SmallVectorImpl<LiveInterval*> &SpillIs,
+ const SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad);
/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
@@ -443,9 +453,6 @@ namespace llvm {
DenseMap<unsigned,unsigned> &MBBVRegsMap,
std::vector<LiveInterval*> &NewLIs);
- // Normalize the spill weight of all the intervals in NewLIs.
- void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs);
-
static LiveInterval* createInterval(unsigned Reg);
void printInstrs(raw_ostream &O) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveStackAnalysis.h b/contrib/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
index ad984db..8a8dcaf 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
@@ -39,7 +39,9 @@ namespace llvm {
public:
static char ID; // Pass identification, replacement for typeid
- LiveStacks() : MachineFunctionPass(ID) {}
+ LiveStacks() : MachineFunctionPass(ID) {
+ initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+ }
typedef SS2IntervalMap::iterator iterator;
typedef SS2IntervalMap::const_iterator const_iterator;
@@ -50,19 +52,7 @@ namespace llvm {
unsigned getNumIntervals() const { return (unsigned)S2IMap.size(); }
- LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC) {
- assert(Slot >= 0 && "Spill slot indice must be >= 0");
- SS2IntervalMap::iterator I = S2IMap.find(Slot);
- if (I == S2IMap.end()) {
- I = S2IMap.insert(I,std::make_pair(Slot, LiveInterval(Slot,0.0F,true)));
- S2RCMap.insert(std::make_pair(Slot, RC));
- } else {
- // Use the largest common subclass register class.
- const TargetRegisterClass *OldRC = S2RCMap[Slot];
- S2RCMap[Slot] = getCommonSubClass(OldRC, RC);
- }
- return I->second;
- }
+ LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC);
LiveInterval &getInterval(int Slot) {
assert(Slot >= 0 && "Spill slot indice must be >= 0");
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveVariables.h b/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
index c8182e0..f9b81b1 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
@@ -32,8 +32,10 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseBitVector.h"
@@ -46,7 +48,9 @@ class TargetRegisterInfo;
class LiveVariables : public MachineFunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- LiveVariables() : MachineFunctionPass(ID) {}
+ LiveVariables() : MachineFunctionPass(ID) {
+ initializeLiveVariablesPass(*PassRegistry::getPassRegistry());
+ }
/// VarInfo - This represents the regions where a virtual register is live in
/// the program. We represent this with three different pieces of
@@ -119,10 +123,9 @@ public:
private:
/// VirtRegInfo - This list is a mapping from virtual register number to
- /// variable information. FirstVirtualRegister is subtracted from the virtual
- /// register number before indexing into this list.
+ /// variable information.
///
- std::vector<VarInfo> VirtRegInfo;
+ IndexedMap<VarInfo, VirtReg2IndexFunctor> VirtRegInfo;
/// PHIJoins - list of virtual registers that are PHI joins. These registers
/// may have multiple definitions, and they require special handling when
diff --git a/contrib/llvm/include/llvm/CodeGen/MachORelocation.h b/contrib/llvm/include/llvm/CodeGen/MachORelocation.h
index 27306c6..21fe74f 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachORelocation.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachORelocation.h
@@ -15,7 +15,7 @@
#ifndef LLVM_CODEGEN_MACHO_RELOCATION_H
#define LLVM_CODEGEN_MACHO_RELOCATION_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index 3cfc47a..1785451 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -23,6 +23,7 @@ class Pass;
class BasicBlock;
class MachineFunction;
class MCSymbol;
+class SlotIndexes;
class StringRef;
class raw_ostream;
@@ -223,6 +224,10 @@ public:
/// this basic block is entered via an exception handler.
void setIsLandingPad() { IsLandingPad = true; }
+ /// getLandingPadSuccessor - If this block has a successor that is a landing
+ /// pad, return it. Otherwise return NULL.
+ const MachineBasicBlock *getLandingPadSuccessor() const;
+
// Code Layout methods.
/// moveBefore/moveAfter - move 'this' block before or after the specified
@@ -289,11 +294,20 @@ public:
/// Returns end() is there's no non-PHI instruction.
iterator getFirstNonPHI();
+ /// SkipPHIsAndLabels - Return the first instruction in MBB after I that is
+ /// not a PHI or a label. This is the correct point to insert copies at the
+ /// beginning of a basic block.
+ iterator SkipPHIsAndLabels(iterator I);
+
/// getFirstTerminator - returns an iterator to the first terminator
/// instruction of this basic block. If a terminator does not exist,
/// it returns end()
iterator getFirstTerminator();
+ /// getLastNonDebugInstr - returns an iterator to the last non-debug
+ /// instruction in the basic block, or end()
+ iterator getLastNonDebugInstr();
+
/// SplitCriticalEdge - Split the critical edge from this block to the
/// given successor block, and return the newly created block, or null
/// if splitting is not possible.
@@ -308,6 +322,9 @@ public:
template<typename IT>
void insert(iterator I, IT S, IT E) { Insts.insert(I, S, E); }
iterator insert(iterator I, MachineInstr *M) { return Insts.insert(I, M); }
+ iterator insertAfter(iterator I, MachineInstr *M) {
+ return Insts.insertAfter(I, M);
+ }
// erase - Remove the specified element or range from the instruction list.
// These functions delete any instructions removed.
@@ -358,7 +375,7 @@ public:
// Debugging methods.
void dump() const;
- void print(raw_ostream &OS) const;
+ void print(raw_ostream &OS, SlotIndexes* = 0) const;
/// getNumber - MachineBasicBlocks are uniquely numbered at the function
/// level, unless they're not in a MachineFunction yet, in which case this
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h b/contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h
index 7abb49a..8fc80ad 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h
@@ -17,7 +17,7 @@
#ifndef LLVM_CODEGEN_MACHINECODEEMITTER_H
#define LLVM_CODEGEN_MACHINECODEEMITTER_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/DebugLoc.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineCodeInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineCodeInfo.h
index a75c02a..c5c0c44 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineCodeInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineCodeInfo.h
@@ -17,7 +17,7 @@
#ifndef EE_MACHINE_CODE_INFO_H
#define EE_MACHINE_CODE_INFO_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineDominators.h b/contrib/llvm/include/llvm/CodeGen/MachineDominators.h
index 48695d5..ab944a2 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineDominators.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineDominators.h
@@ -145,7 +145,7 @@ public:
}
/// eraseNode - Removes a node from the dominator tree. Block must not
- /// domiante any other blocks. Removes node from its immediate dominator's
+ /// dominate any other blocks. Removes node from its immediate dominator's
/// children list. Deletes dominator node associated with basic block BB.
inline void eraseNode(MachineBasicBlock *BB) {
DT->eraseNode(BB);
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index dca65ef..22a82a9 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -16,7 +16,7 @@
#include "llvm/ADT/SmallVector.h"
//#include "llvm/ADT/IndexedMap.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <vector>
@@ -27,7 +27,7 @@ class TargetRegisterClass;
class Type;
class MachineFunction;
class MachineBasicBlock;
-class TargetFrameInfo;
+class TargetFrameLowering;
class BitVector;
/// The CalleeSavedInfo class tracks the information need to locate where a
@@ -192,13 +192,9 @@ class MachineFrameInfo {
/// CSIValid - Has CSInfo been set yet?
bool CSIValid;
- /// SpillObjects - A vector indicating which frame indices refer to
- /// spill slots.
- SmallVector<bool, 8> SpillObjects;
-
- /// TargetFrameInfo - Target information about frame layout.
+ /// TargetFrameLowering - Target information about frame layout.
///
- const TargetFrameInfo &TFI;
+ const TargetFrameLowering &TFI;
/// LocalFrameObjects - References to frame indices which are mapped
/// into the local frame allocation block. <FrameIdx, LocalOffset>
@@ -217,7 +213,7 @@ class MachineFrameInfo {
bool UseLocalStackAllocationBlock;
public:
- explicit MachineFrameInfo(const TargetFrameInfo &tfi) : TFI(tfi) {
+ explicit MachineFrameInfo(const TargetFrameLowering &tfi) : TFI(tfi) {
StackSize = NumFixedObjects = OffsetAdjustment = MaxAlignment = 0;
HasVarSizedObjects = false;
FrameAddressTaken = false;
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
index 5bb453d..abeaa4f 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -28,6 +28,7 @@ namespace llvm {
class Value;
class Function;
+class GCModuleInfo;
class MachineRegisterInfo;
class MachineFrameInfo;
class MachineConstantPool;
@@ -37,6 +38,7 @@ class MCContext;
class Pass;
class TargetMachine;
class TargetRegisterClass;
+struct MachinePointerInfo;
template <>
struct ilist_traits<MachineBasicBlock>
@@ -74,6 +76,7 @@ class MachineFunction {
const TargetMachine &Target;
MCContext &Ctx;
MachineModuleInfo &MMI;
+ GCModuleInfo *GMI;
// RegInfo - Information about each register in use in the function.
MachineRegisterInfo *RegInfo;
@@ -126,10 +129,12 @@ class MachineFunction {
void operator=(const MachineFunction&); // DO NOT IMPLEMENT
public:
MachineFunction(const Function *Fn, const TargetMachine &TM,
- unsigned FunctionNum, MachineModuleInfo &MMI);
+ unsigned FunctionNum, MachineModuleInfo &MMI,
+ GCModuleInfo* GMI);
~MachineFunction();
MachineModuleInfo &getMMI() const { return MMI; }
+ GCModuleInfo *getGMI() const { return GMI; }
MCContext &getContext() const { return Ctx; }
/// getFunction - Return the LLVM function that this machine code represents
@@ -243,7 +248,7 @@ public:
/// print - Print out the MachineFunction in a format suitable for debugging
/// to the specified stream.
///
- void print(raw_ostream &OS) const;
+ void print(raw_ostream &OS, SlotIndexes* = 0) const;
/// viewCFG - This function is meant for use from the debugger. You can just
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
@@ -266,7 +271,7 @@ public:
/// verify - Run the current MachineFunction through the machine code
/// verifier, useful for debugger use.
- void verify(Pass *p=NULL) const;
+ void verify(Pass *p = NULL, const char *Banner = NULL) const;
// Provide accessors for the MachineBasicBlock list...
typedef BasicBlockListType::iterator iterator;
@@ -276,7 +281,7 @@ public:
/// addLiveIn - Add the specified physical register as a live-in value and
/// create a corresponding virtual register for it.
- unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC);
+ unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC, DebugLoc DL);
//===--------------------------------------------------------------------===//
// BasicBlock accessor functions.
@@ -368,10 +373,11 @@ public:
/// getMachineMemOperand - Allocate a new MachineMemOperand.
/// MachineMemOperands are owned by the MachineFunction and need not be
/// explicitly deallocated.
- MachineMemOperand *getMachineMemOperand(const Value *v, unsigned f,
- int64_t o, uint64_t s,
- unsigned base_alignment);
-
+ MachineMemOperand *getMachineMemOperand(MachinePointerInfo PtrInfo,
+ unsigned f, uint64_t s,
+ unsigned base_alignment,
+ const MDNode *TBAAInfo = 0);
+
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
/// an existing one, adjusting by an offset and using the given size.
/// MachineMemOperands are owned by the MachineFunction and need not be
@@ -406,6 +412,10 @@ public:
/// normal 'L' label is returned.
MCSymbol *getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate = false) const;
+
+ /// getPICBaseSymbol - Return a function-local symbol to represent the PIC
+ /// base.
+ MCSymbol *getPICBaseSymbol() const;
};
//===--------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h b/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
index 75dbaab..50676ad 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
@@ -37,6 +37,10 @@ public:
MachineFunction &getMF() const { return *MF; }
CodeGenOpt::Level getOptLevel() const { return OptLevel; }
+
+ virtual const char* getPassName() const {
+ return "Machine Function Analysis";
+ }
private:
virtual bool doInitialization(Module &M);
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
index f843196..82c5332 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -127,6 +127,10 @@ public:
///
unsigned short getAsmPrinterFlags() const { return AsmPrinterFlags; }
+ /// clearAsmPrinterFlags - clear the AsmPrinter bitvector
+ ///
+ void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
+
/// getAsmPrinterFlag - Return whether an AsmPrinter flag is set.
///
bool getAsmPrinterFlag(CommentFlag Flag) const {
@@ -138,6 +142,12 @@ public:
void setAsmPrinterFlag(CommentFlag Flag) {
AsmPrinterFlags |= (unsigned short)Flag;
}
+
+ /// clearAsmPrinterFlag - clear specific AsmPrinter flags
+ ///
+ void clearAsmPrinterFlag(CommentFlag Flag) {
+ AsmPrinterFlags &= ~Flag;
+ }
/// getDebugLoc - Returns the debug location id of this MachineInstr.
///
@@ -167,7 +177,17 @@ public:
/// getNumExplicitOperands - Returns the number of non-implicit operands.
///
unsigned getNumExplicitOperands() const;
-
+
+ /// iterator/begin/end - Iterate over all operands of a machine instruction.
+ typedef std::vector<MachineOperand>::iterator mop_iterator;
+ typedef std::vector<MachineOperand>::const_iterator const_mop_iterator;
+
+ mop_iterator operands_begin() { return Operands.begin(); }
+ mop_iterator operands_end() { return Operands.end(); }
+
+ const_mop_iterator operands_begin() const { return Operands.begin(); }
+ const_mop_iterator operands_end() const { return Operands.end(); }
+
/// Access to memory operands of the instruction
mmo_iterator memoperands_begin() const { return MemRefs; }
mmo_iterator memoperands_end() const { return MemRefsEnd; }
@@ -217,6 +237,7 @@ public:
bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
+ bool isStackAligningInlineAsm() const;
bool isInsertSubreg() const {
return getOpcode() == TargetOpcode::INSERT_SUBREG;
}
@@ -412,10 +433,23 @@ public:
/// return 0.
unsigned isConstantValuePHI() const;
+ /// hasUnmodeledSideEffects - Return true if this instruction has side
+ /// effects that are not modeled by mayLoad / mayStore, etc.
+ /// For all instructions, the property is encoded in TargetInstrDesc::Flags
+ /// (see TargetInstrDesc::hasUnmodeledSideEffects(). The only exception is
+ /// INLINEASM instruction, in which case the side effect property is encoded
+ /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
+ ///
+ bool hasUnmodeledSideEffects() const;
+
/// allDefsAreDead - Return true if all the defs of this instruction are dead.
///
bool allDefsAreDead() const;
+ /// copyImplicitOps - Copy implicit register operands from specified
+ /// instruction to this instruction.
+ void copyImplicitOps(const MachineInstr *MI);
+
//
// Debugging support
//
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index 37ac24c..1eb9735 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -18,6 +18,7 @@
#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Support/ErrorHandling.h"
namespace llvm {
@@ -122,6 +123,13 @@ public:
return *this;
}
+ const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
+ MachineInstr::mmo_iterator e) const {
+ MI->setMemRefs(b, e);
+ return *this;
+ }
+
+
const MachineInstrBuilder &addOperand(const MachineOperand &MO) const {
MI->addOperand(MO);
return *this;
@@ -136,6 +144,19 @@ public:
MI->addOperand(MachineOperand::CreateMCSymbol(Sym));
return *this;
}
+
+ // Add a displacement from an existing MachineOperand with an added offset.
+ const MachineInstrBuilder &addDisp(const MachineOperand &Disp,
+ int64_t off) const {
+ switch (Disp.getType()) {
+ default:
+ llvm_unreachable("Unhandled operand type in addDisp()");
+ case MachineOperand::MO_Immediate:
+ return addImm(Disp.getImm() + off);
+ case MachineOperand::MO_GlobalAddress:
+ return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off);
+ }
+ }
};
/// BuildMI - Builder interface. Specify how to create the initial instruction
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineLocation.h b/contrib/llvm/include/llvm/CodeGen/MachineLocation.h
index a1fcb9f..21951b6 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineLocation.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineLocation.h
@@ -32,7 +32,7 @@ private:
public:
enum {
// The target register number for an abstract frame pointer. The value is
- // an arbitrary value greater than TargetRegisterInfo::FirstVirtualRegister.
+ // an arbitrary value that doesn't collide with any real target register.
VirtualFP = ~0U
};
MachineLocation()
@@ -41,6 +41,11 @@ public:
: IsRegister(true), Register(R), Offset(0) {}
MachineLocation(unsigned R, int O)
: IsRegister(false), Register(R), Offset(O) {}
+
+ bool operator==(const MachineLocation &Other) const {
+ return IsRegister == Other.IsRegister && Register == Other.Register &&
+ Offset == Other.Offset;
+ }
// Accessors
bool isReg() const { return IsRegister; }
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
index 9760eba..6dd9440 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
@@ -67,7 +67,9 @@ class MachineLoopInfo : public MachineFunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- MachineLoopInfo() : MachineFunctionPass(ID) {}
+ MachineLoopInfo() : MachineFunctionPass(ID) {
+ initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+ }
LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineLoopRanges.h b/contrib/llvm/include/llvm/CodeGen/MachineLoopRanges.h
new file mode 100644
index 0000000..6a30e8b
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineLoopRanges.h
@@ -0,0 +1,112 @@
+//===- MachineLoopRanges.h - Ranges of machine loops -----------*- c++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface to the MachineLoopRanges analysis.
+//
+// Provide on-demand information about the ranges of machine instructions
+// covered by a loop.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINELOOPRANGES_H
+#define LLVM_CODEGEN_MACHINELOOPRANGES_H
+
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+
+namespace llvm {
+
+class MachineLoop;
+class MachineLoopInfo;
+class raw_ostream;
+
+/// MachineLoopRange - Range information for a single loop.
+class MachineLoopRange {
+ friend class MachineLoopRanges;
+
+public:
+ typedef IntervalMap<SlotIndex, unsigned, 4> Map;
+ typedef Map::Allocator Allocator;
+
+private:
+ /// The mapped loop.
+ const MachineLoop *const Loop;
+
+ /// Map intervals to a bit mask.
+ /// Bit 0 = inside loop block.
+ Map Intervals;
+
+ /// Loop area as measured by SlotIndex::distance.
+ unsigned Area;
+
+ /// Create a MachineLoopRange, only accessible to MachineLoopRanges.
+ MachineLoopRange(const MachineLoop*, Allocator&, SlotIndexes&);
+
+public:
+ /// getLoop - Return the mapped machine loop.
+ const MachineLoop *getLoop() const { return Loop; }
+
+ /// overlaps - Return true if this loop overlaps the given range of machine
+ /// inteructions.
+ bool overlaps(SlotIndex Start, SlotIndex Stop);
+
+ /// getNumber - Return the loop number. This is the same as the number of the
+ /// header block.
+ unsigned getNumber() const;
+
+ /// getArea - Return the loop area. This number is approximately proportional
+ /// to the number of instructions in the loop.
+ unsigned getArea() const { return Area; }
+
+ /// getMap - Allow public read-only access for IntervalMapOverlaps.
+ const Map &getMap() { return Intervals; }
+
+ /// print - Print loop ranges on OS.
+ void print(raw_ostream&) const;
+
+ /// byNumber - Comparator for array_pod_sort that sorts a list of
+ /// MachineLoopRange pointers by number.
+ static int byNumber(const void*, const void*);
+
+ /// byAreaDesc - Comparator for array_pod_sort that sorts a list of
+ /// MachineLoopRange pointers by descending area, then by number.
+ static int byAreaDesc(const void*, const void*);
+};
+
+raw_ostream &operator<<(raw_ostream&, const MachineLoopRange&);
+
+/// MachineLoopRanges - Analysis pass that provides on-demand per-loop range
+/// information.
+class MachineLoopRanges : public MachineFunctionPass {
+ typedef DenseMap<const MachineLoop*, MachineLoopRange*> CacheMap;
+ typedef MachineLoopRange::Allocator MapAllocator;
+
+ MapAllocator Allocator;
+ SlotIndexes *Indexes;
+ CacheMap Cache;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ MachineLoopRanges() : MachineFunctionPass(ID), Indexes(0) {}
+ ~MachineLoopRanges() { releaseMemory(); }
+
+ /// getLoopRange - Return the range of loop.
+ MachineLoopRange *getLoopRange(const MachineLoop *Loop);
+
+private:
+ virtual bool runOnMachineFunction(MachineFunction&);
+ virtual void releaseMemory();
+ virtual void getAnalysisUsage(AnalysisUsage&) const;
+};
+
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINELOOPRANGES_H
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
index 7272aa5..768ce47 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
@@ -16,7 +16,7 @@
#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -24,6 +24,52 @@ class Value;
class FoldingSetNodeID;
class raw_ostream;
+/// MachinePointerInfo - This class contains a discriminated union of
+/// information about pointers in memory operands, relating them back to LLVM IR
+/// or to virtual locations (such as frame indices) that are exposed during
+/// codegen.
+struct MachinePointerInfo {
+ /// V - This is the IR pointer value for the access, or it is null if unknown.
+ /// If this is null, then the access is to a pointer in the default address
+ /// space.
+ const Value *V;
+
+ /// Offset - This is an offset from the base Value*.
+ int64_t Offset;
+
+ explicit MachinePointerInfo(const Value *v = 0, int64_t offset = 0)
+ : V(v), Offset(offset) {}
+
+ MachinePointerInfo getWithOffset(int64_t O) const {
+ if (V == 0) return MachinePointerInfo(0, 0);
+ return MachinePointerInfo(V, Offset+O);
+ }
+
+ /// getAddrSpace - Return the LLVM IR address space number that this pointer
+ /// points into.
+ unsigned getAddrSpace() const;
+
+ /// getConstantPool - Return a MachinePointerInfo record that refers to the
+ /// constant pool.
+ static MachinePointerInfo getConstantPool();
+
+ /// getFixedStack - Return a MachinePointerInfo record that refers to the
+ /// the specified FrameIndex.
+ static MachinePointerInfo getFixedStack(int FI, int64_t offset = 0);
+
+ /// getJumpTable - Return a MachinePointerInfo record that refers to a
+ /// jump table entry.
+ static MachinePointerInfo getJumpTable();
+
+ /// getGOT - Return a MachinePointerInfo record that refers to a
+ /// GOT entry.
+ static MachinePointerInfo getGOT();
+
+ /// getStack - stack pointer relative access.
+ static MachinePointerInfo getStack(int64_t Offset);
+};
+
+
//===----------------------------------------------------------------------===//
/// MachineMemOperand - A description of a memory reference used in the backend.
/// Instead of holding a StoreInst or LoadInst, this class holds the address
@@ -33,10 +79,10 @@ class raw_ostream;
/// that aren't explicit in the regular LLVM IR.
///
class MachineMemOperand {
- int64_t Offset;
+ MachinePointerInfo PtrInfo;
uint64_t Size;
- const Value *V;
- unsigned int Flags;
+ unsigned Flags;
+ const MDNode *TBAAInfo;
public:
/// Flags values. These may be or'd together.
@@ -54,10 +100,12 @@ public:
};
/// MachineMemOperand - Construct an MachineMemOperand object with the
- /// specified address Value, flags, offset, size, and base alignment.
- MachineMemOperand(const Value *v, unsigned int f, int64_t o, uint64_t s,
- unsigned int base_alignment);
+ /// specified PtrInfo, flags, size, and base alignment.
+ MachineMemOperand(MachinePointerInfo PtrInfo, unsigned flags, uint64_t s,
+ unsigned base_alignment, const MDNode *TBAAInfo = 0);
+ const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
+
/// getValue - Return the base address of the memory access. This may either
/// be a normal LLVM IR Value, or one of the special values used in CodeGen.
/// Special values are those obtained via
@@ -65,7 +113,7 @@ public:
/// other PseudoSourceValue member functions which return objects which stand
/// for frame/stack pointer relative references and other special references
/// which are not representable in the high-level IR.
- const Value *getValue() const { return V; }
+ const Value *getValue() const { return PtrInfo.V; }
/// getFlags - Return the raw flags of the source value, \see MemOperandFlags.
unsigned int getFlags() const { return Flags & ((1 << MOMaxBits) - 1); }
@@ -73,7 +121,7 @@ public:
/// getOffset - For normal values, this is a byte offset added to the base
/// address. For PseudoSourceValue::FPRel values, this is the FrameIndex
/// number.
- int64_t getOffset() const { return Offset; }
+ int64_t getOffset() const { return PtrInfo.Offset; }
/// getSize - Return the size in bytes of the memory reference.
uint64_t getSize() const { return Size; }
@@ -86,6 +134,9 @@ public:
/// base address, without the offset.
uint64_t getBaseAlignment() const { return (1u << (Flags >> MOMaxBits)) >> 1; }
+ /// getTBAAInfo - Return the TBAA tag for the memory reference.
+ const MDNode *getTBAAInfo() const { return TBAAInfo; }
+
bool isLoad() const { return Flags & MOLoad; }
bool isStore() const { return Flags & MOStore; }
bool isVolatile() const { return Flags & MOVolatile; }
@@ -99,7 +150,8 @@ public:
/// setValue - Change the SourceValue for this MachineMemOperand. This
/// should only be used when an object is being relocated and all references
/// to it are being updated.
- void setValue(const Value *NewSV) { V = NewSV; }
+ void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
+ void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
/// Profile - Gather unique data for the object.
///
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
index 0e719c8..6bc80b0 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
@@ -39,7 +39,7 @@
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/DebugLoc.h"
#include "llvm/Support/ValueHandle.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -57,7 +57,7 @@ class MachineFunction;
class Module;
class PointerType;
class StructType;
-
+
/// MachineModuleInfoImpl - This class can be derived from and used by targets
/// to hold private target-specific information for each Module. Objects of
/// type are accessed/created with MMI::getInfo and destroyed when the
@@ -70,8 +70,8 @@ public:
protected:
static SymbolListTy GetSortedStubs(const DenseMap<MCSymbol*, StubValueTy>&);
};
-
-
+
+
//===----------------------------------------------------------------------===//
/// LandingPadInfo - This structure is used to retain landing pad info for
@@ -90,19 +90,19 @@ struct LandingPadInfo {
};
class MMIAddrLabelMap;
-
+
//===----------------------------------------------------------------------===//
/// MachineModuleInfo - This class contains meta information specific to a
-/// module. Queries can be made by different debugging and exception handling
+/// module. Queries can be made by different debugging and exception handling
/// schemes and reformated for specific use.
///
class MachineModuleInfo : public ImmutablePass {
/// Context - This is the MCContext used for the entire code generator.
MCContext Context;
-
+
/// TheModule - This is the LLVM Module being worked on.
const Module *TheModule;
-
+
/// ObjFileMMI - This is the object-file-format-specific implementation of
/// MachineModuleInfoImpl, which lets targets accumulate whatever info they
/// want.
@@ -111,7 +111,7 @@ class MachineModuleInfo : public ImmutablePass {
// FrameMoves - List of moves done by a function's prolog. Used to construct
// frame maps by debug and exception handling consumers.
std::vector<MachineMove> FrameMoves;
-
+
// LandingPads - List of LandingPadInfo describing the landing pad information
// in the current function.
std::vector<LandingPadInfo> LandingPads;
@@ -145,18 +145,22 @@ class MachineModuleInfo : public ImmutablePass {
/// llvm.compiler.used.
SmallPtrSet<const Function *, 32> UsedFunctions;
-
+
/// AddrLabelSymbols - This map keeps track of which symbol is being used for
/// the specified basic block's address of label.
MMIAddrLabelMap *AddrLabelSymbols;
-
+
bool CallsEHReturn;
bool CallsUnwindInit;
-
+
/// DbgInfoAvailable - True if debugging information is available
/// in this module.
bool DbgInfoAvailable;
+ /// True if this module calls VarArg function with floating point arguments.
+ /// This is used to emit an undefined reference to fltused on Windows targets.
+ bool CallsExternalVAFunctionWithFloatingPointArguments;
+
public:
static char ID; // Pass identification, replacement for typeid
@@ -166,22 +170,23 @@ public:
VariableDbgInfoMapTy VariableDbgInfo;
MachineModuleInfo(); // DUMMY CONSTRUCTOR, DO NOT CALL.
- MachineModuleInfo(const MCAsmInfo &MAI); // Real constructor.
+ // Real constructor.
+ MachineModuleInfo(const MCAsmInfo &MAI, const TargetAsmInfo *TAI);
~MachineModuleInfo();
-
+
bool doInitialization();
bool doFinalization();
/// EndFunction - Discard function meta information.
///
void EndFunction();
-
+
const MCContext &getContext() const { return Context; }
MCContext &getContext() { return Context; }
void setModule(const Module *M) { TheModule = M; }
const Module *getModule() const { return TheModule; }
-
+
/// getInfo - Keep track of various per-function pieces of information for
/// backends that would like to do so.
///
@@ -191,32 +196,40 @@ public:
ObjFileMMI = new Ty(*this);
return *static_cast<Ty*>(ObjFileMMI);
}
-
+
template<typename Ty>
const Ty &getObjFileInfo() const {
return const_cast<MachineModuleInfo*>(this)->getObjFileInfo<Ty>();
}
-
+
/// AnalyzeModule - Scan the module for global debug information.
///
void AnalyzeModule(const Module &M);
-
+
/// hasDebugInfo - Returns true if valid debug info is present.
///
bool hasDebugInfo() const { return DbgInfoAvailable; }
- void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = true; }
+ void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = avail; }
bool callsEHReturn() const { return CallsEHReturn; }
void setCallsEHReturn(bool b) { CallsEHReturn = b; }
bool callsUnwindInit() const { return CallsUnwindInit; }
void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
-
+
+ bool callsExternalVAFunctionWithFloatingPointArguments() const {
+ return CallsExternalVAFunctionWithFloatingPointArguments;
+ }
+
+ void setCallsExternalVAFunctionWithFloatingPointArguments(bool b) {
+ CallsExternalVAFunctionWithFloatingPointArguments = b;
+ }
+
/// getFrameMoves - Returns a reference to a list of moves done in the current
/// function's prologue. Used to construct frame maps for debug and exception
/// handling comsumers.
std::vector<MachineMove> &getFrameMoves() { return FrameMoves; }
-
+
/// getAddrLabelSymbol - Return the symbol to be used for the specified basic
/// block when its address is taken. This cannot be its normal LBB label
/// because the block may be accessed outside its containing function.
@@ -226,15 +239,15 @@ public:
/// basic block when its address is taken. If other blocks were RAUW'd to
/// this one, we may have to emit them as well, return the whole set.
std::vector<MCSymbol*> getAddrLabelSymbolToEmit(const BasicBlock *BB);
-
+
/// takeDeletedSymbolsForFunction - If the specified function has had any
/// references to address-taken blocks generated, but the block got deleted,
/// return the symbol now so we can emit it. This prevents emitting a
/// reference to a symbol that has no definition.
- void takeDeletedSymbolsForFunction(const Function *F,
+ void takeDeletedSymbolsForFunction(const Function *F,
std::vector<MCSymbol*> &Result);
-
+
//===- EH ---------------------------------------------------------------===//
/// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the
@@ -245,11 +258,11 @@ public:
/// associate it with a try landing pad block.
void addInvoke(MachineBasicBlock *LandingPad,
MCSymbol *BeginLabel, MCSymbol *EndLabel);
-
- /// addLandingPad - Add a new panding pad. Returns the label ID for the
+
+ /// addLandingPad - Add a new panding pad. Returns the label ID for the
/// landing pad entry.
MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
-
+
/// addPersonality - Provide the personality function for the exception
/// information.
void addPersonality(MachineBasicBlock *LandingPad,
@@ -285,7 +298,7 @@ public:
///
void addCleanup(MachineBasicBlock *LandingPad);
- /// getTypeIDFor - Return the type id for the specified typeinfo. This is
+ /// getTypeIDFor - Return the type id for the specified typeinfo. This is
/// function wide.
unsigned getTypeIDFor(const GlobalVariable *TI);
@@ -296,7 +309,7 @@ public:
/// TidyLandingPads - Remap landing pad labels and remove any deleted landing
/// pads.
void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = 0);
-
+
/// getLandingPads - Return a reference to the landing pad info for the
/// current function.
const std::vector<LandingPadInfo> &getLandingPads() const {
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
index afa2c29..8acc949 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
@@ -14,11 +14,11 @@
#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
#define LLVM_CODEGEN_MACHINEOPERAND_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
-
+
class BlockAddress;
class ConstantFP;
class GlobalValue;
@@ -30,7 +30,7 @@ class TargetMachine;
class TargetRegisterInfo;
class raw_ostream;
class MCSymbol;
-
+
/// MachineOperand class - Representation of each machine instruction operand.
///
class MachineOperand {
@@ -54,21 +54,21 @@ private:
/// OpKind - Specify what kind of operand this is. This discriminates the
/// union.
unsigned char OpKind; // MachineOperandType
-
+
/// SubReg - Subregister number, only valid for MO_Register. A value of 0
/// indicates the MO_Register has no subReg.
unsigned char SubReg;
-
+
/// TargetFlags - This is a set of target-specific operand flags.
unsigned char TargetFlags;
-
+
/// IsDef/IsImp/IsKill/IsDead flags - These are only valid for MO_Register
/// operands.
-
+
/// IsDef - True if this is a def, false if this is a use of the register.
///
bool IsDef : 1;
-
+
/// IsImp - True if this is an implicit def or use, false if it is explicit.
///
bool IsImp : 1;
@@ -94,7 +94,16 @@ private:
/// not a real instruction. Such uses should be ignored during codegen.
bool IsDebug : 1;
- /// ParentMI - This is the instruction that this operand is embedded into.
+ /// SmallContents - Thisreally should be part of the Contents union, but lives
+ /// out here so we can get a better packed struct.
+ /// MO_Register: Register number.
+ /// OffsetedInfo: Low bits of offset.
+ union {
+ unsigned RegNo; // For MO_Register.
+ unsigned OffsetLo; // Matches Contents.OffsetedInfo.OffsetHi.
+ } SmallContents;
+
+ /// ParentMI - This is the instruction that this operand is embedded into.
/// This is valid for all operand types, when the operand is in an instr.
MachineInstr *ParentMI;
@@ -107,11 +116,11 @@ private:
MCSymbol *Sym; // For MO_MCSymbol
struct { // For MO_Register.
- unsigned RegNo;
+ // Register number is in SmallContents.RegNo.
MachineOperand **Prev; // Access list for register.
MachineOperand *Next;
} Reg;
-
+
/// OffsetedInfo - This struct contains the offset and an object identifier.
/// this represent the object as with an optional offset from it.
struct {
@@ -121,10 +130,11 @@ private:
const GlobalValue *GV; // For MO_GlobalAddress.
const BlockAddress *BA; // For MO_BlockAddress.
} Val;
- int64_t Offset; // An offset from the object.
+ // Low bits of offset are in SmallContents.OffsetLo.
+ int OffsetHi; // An offset from the object, high 32 bits.
} OffsetedInfo;
} Contents;
-
+
explicit MachineOperand(MachineOperandType K) : OpKind(K), ParentMI(0) {
TargetFlags = 0;
}
@@ -132,17 +142,27 @@ public:
/// getType - Returns the MachineOperandType for this operand.
///
MachineOperandType getType() const { return (MachineOperandType)OpKind; }
-
+
unsigned char getTargetFlags() const { return TargetFlags; }
void setTargetFlags(unsigned char F) { TargetFlags = F; }
void addTargetFlag(unsigned char F) { TargetFlags |= F; }
-
+
/// getParent - Return the instruction that this operand belongs to.
///
MachineInstr *getParent() { return ParentMI; }
const MachineInstr *getParent() const { return ParentMI; }
-
+
+ /// clearParent - Reset the parent pointer.
+ ///
+ /// The MachineOperand copy constructor also copies ParentMI, expecting the
+ /// original to be deleted. If a MachineOperand is ever stored outside a
+ /// MachineInstr, the parent pointer must be cleared.
+ ///
+ /// Never call clearParent() on an operand in a MachineInstr.
+ ///
+ void clearParent() { ParentMI = 0; }
+
void print(raw_ostream &os, const TargetMachine *TM = 0) const;
//===--------------------------------------------------------------------===//
@@ -180,44 +200,44 @@ public:
/// getReg - Returns the register number.
unsigned getReg() const {
assert(isReg() && "This is not a register operand!");
- return Contents.Reg.RegNo;
+ return SmallContents.RegNo;
}
-
+
unsigned getSubReg() const {
assert(isReg() && "Wrong MachineOperand accessor");
return (unsigned)SubReg;
}
-
- bool isUse() const {
+
+ bool isUse() const {
assert(isReg() && "Wrong MachineOperand accessor");
return !IsDef;
}
-
+
bool isDef() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsDef;
}
-
- bool isImplicit() const {
+
+ bool isImplicit() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsImp;
}
-
+
bool isDead() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsDead;
}
-
+
bool isKill() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsKill;
}
-
+
bool isUndef() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsUndef;
}
-
+
bool isEarlyClobber() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsEarlyClobber;
@@ -238,11 +258,11 @@ public:
//===--------------------------------------------------------------------===//
// Mutators for Register Operands
//===--------------------------------------------------------------------===//
-
+
/// Change the register this operand corresponds to.
///
void setReg(unsigned Reg);
-
+
void setSubReg(unsigned subReg) {
assert(isReg() && "Wrong MachineOperand accessor");
SubReg = (unsigned char)subReg;
@@ -266,14 +286,14 @@ public:
assert((Val || !isDebug()) && "Marking a debug operation as def");
IsDef = !Val;
}
-
+
void setIsDef(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
assert((!Val || !isDebug()) && "Marking a debug operation as def");
IsDef = Val;
}
- void setImplicit(bool Val = true) {
+ void setImplicit(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
IsImp = Val;
}
@@ -283,7 +303,7 @@ public:
assert((!Val || !isDebug()) && "Marking a debug operation as kill");
IsKill = Val;
}
-
+
void setIsDead(bool Val = true) {
assert(isReg() && IsDef && "Wrong MachineOperand accessor");
IsDead = Val;
@@ -293,7 +313,7 @@ public:
assert(isReg() && "Wrong MachineOperand accessor");
IsUndef = Val;
}
-
+
void setIsEarlyClobber(bool Val = true) {
assert(isReg() && IsDef && "Wrong MachineOperand accessor");
IsEarlyClobber = Val;
@@ -307,17 +327,17 @@ public:
//===--------------------------------------------------------------------===//
// Accessors for various operand types.
//===--------------------------------------------------------------------===//
-
+
int64_t getImm() const {
assert(isImm() && "Wrong MachineOperand accessor");
return Contents.ImmVal;
}
-
+
const ConstantFP *getFPImm() const {
assert(isFPImm() && "Wrong MachineOperand accessor");
return Contents.CFP;
}
-
+
MachineBasicBlock *getMBB() const {
assert(isMBB() && "Wrong MachineOperand accessor");
return Contents.MBB;
@@ -328,7 +348,7 @@ public:
"Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.Index;
}
-
+
const GlobalValue *getGlobal() const {
assert(isGlobal() && "Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.GV;
@@ -343,15 +363,16 @@ public:
assert(isMCSymbol() && "Wrong MachineOperand accessor");
return Contents.Sym;
}
-
+
/// getOffset - Return the offset from the symbol in this operand. This always
/// returns 0 for ExternalSymbol operands.
int64_t getOffset() const {
assert((isGlobal() || isSymbol() || isCPI() || isBlockAddress()) &&
"Wrong MachineOperand accessor");
- return Contents.OffsetedInfo.Offset;
+ return (int64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
+ SmallContents.OffsetLo;
}
-
+
const char *getSymbolName() const {
assert(isSymbol() && "Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.SymbolName;
@@ -361,11 +382,11 @@ public:
assert(isMetadata() && "Wrong MachineOperand accessor");
return Contents.MD;
}
-
+
//===--------------------------------------------------------------------===//
// Mutators for various operand types.
//===--------------------------------------------------------------------===//
-
+
void setImm(int64_t immVal) {
assert(isImm() && "Wrong MachineOperand mutator");
Contents.ImmVal = immVal;
@@ -374,56 +395,57 @@ public:
void setOffset(int64_t Offset) {
assert((isGlobal() || isSymbol() || isCPI() || isBlockAddress()) &&
"Wrong MachineOperand accessor");
- Contents.OffsetedInfo.Offset = Offset;
+ SmallContents.OffsetLo = unsigned(Offset);
+ Contents.OffsetedInfo.OffsetHi = int(Offset >> 32);
}
-
+
void setIndex(int Idx) {
assert((isFI() || isCPI() || isJTI()) &&
"Wrong MachineOperand accessor");
Contents.OffsetedInfo.Val.Index = Idx;
}
-
+
void setMBB(MachineBasicBlock *MBB) {
assert(isMBB() && "Wrong MachineOperand accessor");
Contents.MBB = MBB;
}
-
+
//===--------------------------------------------------------------------===//
// Other methods.
//===--------------------------------------------------------------------===//
-
+
/// isIdenticalTo - Return true if this operand is identical to the specified
/// operand. Note: This method ignores isKill and isDead properties.
bool isIdenticalTo(const MachineOperand &Other) const;
-
+
/// ChangeToImmediate - Replace this operand with a new immediate operand of
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
void ChangeToImmediate(int64_t ImmVal);
-
+
/// ChangeToRegister - Replace this operand with a new register operand of
/// the specified value. If an operand is known to be an register already,
/// the setReg method should be used.
void ChangeToRegister(unsigned Reg, bool isDef, bool isImp = false,
bool isKill = false, bool isDead = false,
bool isUndef = false, bool isDebug = false);
-
+
//===--------------------------------------------------------------------===//
// Construction methods.
//===--------------------------------------------------------------------===//
-
+
static MachineOperand CreateImm(int64_t Val) {
MachineOperand Op(MachineOperand::MO_Immediate);
Op.setImm(Val);
return Op;
}
-
+
static MachineOperand CreateFPImm(const ConstantFP *CFP) {
MachineOperand Op(MachineOperand::MO_FPImmediate);
Op.Contents.CFP = CFP;
return Op;
}
-
+
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp = false,
bool isKill = false, bool isDead = false,
bool isUndef = false,
@@ -438,7 +460,7 @@ public:
Op.IsUndef = isUndef;
Op.IsEarlyClobber = isEarlyClobber;
Op.IsDebug = isDebug;
- Op.Contents.Reg.RegNo = Reg;
+ Op.SmallContents.RegNo = Reg;
Op.Contents.Reg.Prev = 0;
Op.Contents.Reg.Next = 0;
Op.SubReg = SubReg;
@@ -506,7 +528,7 @@ public:
Op.Contents.Sym = Sym;
return Op;
}
-
+
friend class MachineInstr;
friend class MachineRegisterInfo;
private:
@@ -521,7 +543,7 @@ private:
assert(isReg() && "Can only add reg operand to use lists");
return Contents.Reg.Prev != 0;
}
-
+
/// AddRegOperandToRegInfo - Add this register operand to the specified
/// MachineRegisterInfo. If it is null, then the next/prev fields should be
/// explicitly nulled out.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
index 066c91b..79ff714 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -16,6 +16,9 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/DebugLoc.h"
#include <vector>
namespace llvm {
@@ -24,13 +27,12 @@ namespace llvm {
/// registers, including vreg register classes, use/def chains for registers,
/// etc.
class MachineRegisterInfo {
- /// VRegInfo - Information we keep for each virtual register. The entries in
- /// this vector are actually converted to vreg numbers by adding the
- /// TargetRegisterInfo::FirstVirtualRegister delta to their index.
+ /// VRegInfo - Information we keep for each virtual register.
///
/// Each element in this list contains the register class of the vreg and the
/// start of the use/def list for the register.
- std::vector<std::pair<const TargetRegisterClass*, MachineOperand*> > VRegInfo;
+ IndexedMap<std::pair<const TargetRegisterClass*, MachineOperand*>,
+ VirtReg2IndexFunctor> VRegInfo;
/// RegClassVRegMap - This vector acts as a map from TargetRegisterClass to
/// virtual registers. For each target register class, it keeps a list of
@@ -44,7 +46,7 @@ class MachineRegisterInfo {
/// register for allocation. For example, if the hint is <0, 1024>, it means
/// the allocator should prefer the physical register allocated to the virtual
/// register of the hint.
- std::vector<std::pair<unsigned, unsigned> > RegAllocHints;
+ IndexedMap<std::pair<unsigned, unsigned>, VirtReg2IndexFunctor> RegAllocHints;
/// PhysRegUseDefLists - This is an array of the head of the use/def list for
/// physical registers.
@@ -64,7 +66,10 @@ class MachineRegisterInfo {
/// stored in the second element.
std::vector<std::pair<unsigned, unsigned> > LiveIns;
std::vector<unsigned> LiveOuts;
-
+
+ /// LiveInLocs - Keep track of location livein registers.
+ DenseMap<unsigned, DebugLoc> LiveInLocs;
+
MachineRegisterInfo(const MachineRegisterInfo&); // DO NOT IMPLEMENT
void operator=(const MachineRegisterInfo&); // DO NOT IMPLEMENT
public:
@@ -159,17 +164,15 @@ public:
/// getRegUseDefListHead - Return the head pointer for the register use/def
/// list for the specified virtual or physical register.
MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
- if (RegNo < TargetRegisterInfo::FirstVirtualRegister)
- return PhysRegUseDefLists[RegNo];
- RegNo -= TargetRegisterInfo::FirstVirtualRegister;
- return VRegInfo[RegNo].second;
+ if (TargetRegisterInfo::isVirtualRegister(RegNo))
+ return VRegInfo[RegNo].second;
+ return PhysRegUseDefLists[RegNo];
}
MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
- if (RegNo < TargetRegisterInfo::FirstVirtualRegister)
- return PhysRegUseDefLists[RegNo];
- RegNo -= TargetRegisterInfo::FirstVirtualRegister;
- return VRegInfo[RegNo].second;
+ if (TargetRegisterInfo::isVirtualRegister(RegNo))
+ return VRegInfo[RegNo].second;
+ return PhysRegUseDefLists[RegNo];
}
/// getVRegDef - Return the machine instr that defines the specified virtual
@@ -194,8 +197,6 @@ public:
/// getRegClass - Return the register class of the specified virtual register.
///
const TargetRegisterClass *getRegClass(unsigned Reg) const {
- Reg -= TargetRegisterInfo::FirstVirtualRegister;
- assert(Reg < VRegInfo.size() && "Invalid vreg!");
return VRegInfo[Reg].first;
}
@@ -203,16 +204,22 @@ public:
///
void setRegClass(unsigned Reg, const TargetRegisterClass *RC);
+ /// constrainRegClass - Constrain the register class of the specified virtual
+ /// register to be a common subclass of RC and the current register class.
+ /// Return the new register class, or NULL if no such class exists.
+ /// This should only be used when the constraint is known to be trivial, like
+ /// GR32 -> GR32_NOSP. Beware of increasing register pressure.
+ const TargetRegisterClass *constrainRegClass(unsigned Reg,
+ const TargetRegisterClass *RC);
+
/// createVirtualRegister - Create and return a new virtual register in the
/// function with the specified register class.
///
unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
- /// getLastVirtReg - Return the highest currently assigned virtual register.
+ /// getNumVirtRegs - Return the number of virtual registers created.
///
- unsigned getLastVirtReg() const {
- return (unsigned)VRegInfo.size()+TargetRegisterInfo::FirstVirtualRegister-1;
- }
+ unsigned getNumVirtRegs() const { return VRegInfo.size(); }
/// getRegClassVirtRegs - Return the list of virtual registers of the given
/// target register class.
@@ -224,8 +231,6 @@ public:
/// setRegAllocationHint - Specify a register allocation hint for the
/// specified virtual register.
void setRegAllocationHint(unsigned Reg, unsigned Type, unsigned PrefReg) {
- Reg -= TargetRegisterInfo::FirstVirtualRegister;
- assert(Reg < VRegInfo.size() && "Invalid vreg!");
RegAllocHints[Reg].first = Type;
RegAllocHints[Reg].second = PrefReg;
}
@@ -234,8 +239,6 @@ public:
/// specified virtual register.
std::pair<unsigned, unsigned>
getRegAllocationHint(unsigned Reg) const {
- Reg -= TargetRegisterInfo::FirstVirtualRegister;
- assert(Reg < VRegInfo.size() && "Invalid vreg!");
return RegAllocHints[Reg];
}
@@ -273,7 +276,12 @@ public:
LiveIns.push_back(std::make_pair(Reg, vreg));
}
void addLiveOut(unsigned Reg) { LiveOuts.push_back(Reg); }
-
+
+ /// addLiveInLoc - Keep track of location info for live in reg.
+ void addLiveInLoc(unsigned VReg, DebugLoc DL) {
+ LiveInLocs[VReg] = DL;
+ }
+
// Iteration support for live in/out sets. These sets are kept in sorted
// order by their register number.
typedef std::vector<std::pair<unsigned,unsigned> >::const_iterator
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineRelocation.h b/contrib/llvm/include/llvm/CodeGen/MachineRelocation.h
index c316785..244b466 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineRelocation.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineRelocation.h
@@ -14,7 +14,7 @@
#ifndef LLVM_CODEGEN_MACHINERELOCATION_H
#define LLVM_CODEGEN_MACHINERELOCATION_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
diff --git a/contrib/llvm/lib/CodeGen/PBQP/Graph.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
index b2224cb..b2224cb 100644
--- a/contrib/llvm/lib/CodeGen/PBQP/Graph.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
diff --git a/contrib/llvm/lib/CodeGen/PBQP/HeuristicBase.h b/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h
index 791c227..791c227 100644
--- a/contrib/llvm/lib/CodeGen/PBQP/HeuristicBase.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h
diff --git a/contrib/llvm/lib/CodeGen/PBQP/HeuristicSolver.h b/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicSolver.h
index 35514f9..35514f9 100644
--- a/contrib/llvm/lib/CodeGen/PBQP/HeuristicSolver.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicSolver.h
diff --git a/contrib/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h
index 18eaf7c..47a287c 100644
--- a/contrib/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h
@@ -63,8 +63,12 @@ namespace PBQP {
SpillCostComparator(HeuristicSolverImpl<Briggs> &s)
: s(&s), g(&s.getGraph()) {}
bool operator()(Graph::NodeItr n1Itr, Graph::NodeItr n2Itr) const {
- PBQPNum cost1 = g->getNodeCosts(n1Itr)[0] / s->getSolverDegree(n1Itr),
- cost2 = g->getNodeCosts(n2Itr)[0] / s->getSolverDegree(n2Itr);
+ const PBQP::Vector &cv1 = g->getNodeCosts(n1Itr);
+ const PBQP::Vector &cv2 = g->getNodeCosts(n2Itr);
+
+ PBQPNum cost1 = cv1[0] / s->getSolverDegree(n1Itr);
+ PBQPNum cost2 = cv2[0] / s->getSolverDegree(n2Itr);
+
if (cost1 < cost2)
return true;
return false;
diff --git a/contrib/llvm/lib/CodeGen/PBQP/Math.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Math.h
index e7598bf..e7598bf 100644
--- a/contrib/llvm/lib/CodeGen/PBQP/Math.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Math.h
diff --git a/contrib/llvm/lib/CodeGen/PBQP/Solution.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Solution.h
index 047fd04..57d9b95 100644
--- a/contrib/llvm/lib/CodeGen/PBQP/Solution.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Solution.h
@@ -27,13 +27,18 @@ namespace PBQP {
class Solution {
private:
- typedef std::map<Graph::NodeItr, unsigned, NodeItrComparator> SelectionsMap;
+ typedef std::map<Graph::ConstNodeItr, unsigned,
+ NodeItrComparator> SelectionsMap;
SelectionsMap selections;
unsigned r0Reductions, r1Reductions, r2Reductions, rNReductions;
public:
+ /// \brief Initialise an empty solution.
+ Solution()
+ : r0Reductions(0), r1Reductions(0), r2Reductions(0), rNReductions(0) {}
+
/// \brief Number of nodes for which selections have been made.
/// @return Number of nodes for which selections have been made.
unsigned numNodes() const { return selections.size(); }
@@ -76,7 +81,7 @@ namespace PBQP {
/// \brief Get a node's selection.
/// @param nItr Node iterator.
/// @return The selection for nItr;
- unsigned getSelection(Graph::NodeItr nItr) const {
+ unsigned getSelection(Graph::ConstNodeItr nItr) const {
SelectionsMap::const_iterator sItr = selections.find(nItr);
assert(sItr != selections.end() && "No selection for node.");
return sItr->second;
diff --git a/contrib/llvm/include/llvm/CodeGen/Passes.h b/contrib/llvm/include/llvm/CodeGen/Passes.h
index 4762a39..53aee7a 100644
--- a/contrib/llvm/include/llvm/CodeGen/Passes.h
+++ b/contrib/llvm/include/llvm/CodeGen/Passes.h
@@ -45,10 +45,19 @@ namespace llvm {
///
extern char &MachineLoopInfoID;
+ /// MachineLoopRanges pass - This pass is an on-demand loop coverage
+ /// analysis pass.
+ ///
+ extern char &MachineLoopRangesID;
+
/// MachineDominators pass - This pass is a machine dominators analysis pass.
///
extern char &MachineDominatorsID;
+ /// EdgeBundles analysis - Bundle machine CFG edges.
+ ///
+ extern char &EdgeBundlesID;
+
/// PHIElimination pass - This pass eliminates machine instruction PHI nodes
/// by inserting copy instructions. This destroys SSA information, but is the
/// desired input for some register allocators. This pass is "required" by
@@ -66,6 +75,9 @@ namespace llvm {
extern char &PreAllocSplittingID;
+ /// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
+ extern char &LiveStacksID;
+
/// SimpleRegisterCoalescing pass. Aggressively coalesces every register
/// copy it can.
///
@@ -76,6 +88,11 @@ namespace llvm {
/// register allocators.
extern char &TwoAddressInstructionPassID;
+ /// SpillPlacement analysis. Suggest optimal placement of spill code between
+ /// basic blocks.
+ ///
+ extern char &SpillPlacementID;
+
/// UnreachableMachineBlockElimination pass - This pass removes unreachable
/// machine basic blocks.
extern char &UnreachableMachineBlockElimID;
@@ -95,6 +112,16 @@ namespace llvm {
///
FunctionPass *createFastRegisterAllocator();
+ /// BasicRegisterAllocation Pass - This pass implements a degenerate global
+ /// register allocator using the basic regalloc framework.
+ ///
+ FunctionPass *createBasicRegisterAllocator();
+
+ /// Greedy register allocation pass - This pass implements a global register
+ /// allocator for optimized builds.
+ ///
+ FunctionPass *createGreedyRegisterAllocator();
+
/// LinearScanRegisterAllocation Pass - This pass implements the linear scan
/// register allocation algorithm, a global register allocator.
///
@@ -103,7 +130,7 @@ namespace llvm {
/// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
/// Quadratic Prograaming (PBQP) based register allocator.
///
- FunctionPass *createPBQPRegisterAllocator();
+ FunctionPass *createDefaultPBQPRegisterAllocator();
/// SimpleRegisterCoalescing Pass - Coalesce all copies possible. Can run
/// independently of the register allocator.
@@ -188,7 +215,7 @@ namespace llvm {
/// createMachineVerifierPass - This pass verifies cenerated machine code
/// instructions for correctness.
- FunctionPass *createMachineVerifierPass();
+ FunctionPass *createMachineVerifierPass(const char *Banner = 0);
/// createDwarfEHPass - This pass mulches exception handling code into a form
/// adapted to code generation. Required if using dwarf exception handling.
@@ -205,6 +232,10 @@ namespace llvm {
/// addressing.
FunctionPass *createLocalStackSlotAllocationPass();
+ /// createExpandISelPseudosPass - This pass expands pseudo-instructions.
+ ///
+ FunctionPass *createExpandISelPseudosPass();
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h b/contrib/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h
deleted file mode 100644
index 24d73cb..0000000
--- a/contrib/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h
+++ /dev/null
@@ -1,94 +0,0 @@
-//=- llvm/CodeGen/PostRAHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PostRAHazardRecognizer class, which
-// implements hazard-avoidance heuristics for scheduling, based on the
-// scheduling itineraries specified for the target.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
-#define LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
-
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/System/DataTypes.h"
-
-#include <cassert>
-#include <cstring>
-#include <string>
-
-namespace llvm {
-
-class InstrItineraryData;
-class SUnit;
-
-class PostRAHazardRecognizer : public ScheduleHazardRecognizer {
- // ScoreBoard to track function unit usage. ScoreBoard[0] is a
- // mask of the FUs in use in the cycle currently being
- // schedule. ScoreBoard[1] is a mask for the next cycle. The
- // ScoreBoard is used as a circular buffer with the current cycle
- // indicated by Head.
- class ScoreBoard {
- unsigned *Data;
-
- // The maximum number of cycles monitored by the Scoreboard. This
- // value is determined based on the target itineraries to ensure
- // that all hazards can be tracked.
- size_t Depth;
- // Indices into the Scoreboard that represent the current cycle.
- size_t Head;
- public:
- ScoreBoard():Data(NULL), Depth(0), Head(0) { }
- ~ScoreBoard() {
- delete[] Data;
- }
-
- size_t getDepth() const { return Depth; }
- unsigned& operator[](size_t idx) const {
- assert(Depth && "ScoreBoard was not initialized properly!");
-
- return Data[(Head + idx) % Depth];
- }
-
- void reset(size_t d = 1) {
- if (Data == NULL) {
- Depth = d;
- Data = new unsigned[Depth];
- }
-
- memset(Data, 0, Depth * sizeof(Data[0]));
- Head = 0;
- }
-
- void advance() {
- Head = (Head + 1) % Depth;
- }
-
- // Print the scoreboard.
- void dump() const;
- };
-
- // Itinerary data for the target.
- const InstrItineraryData &ItinData;
-
- ScoreBoard ReservedScoreboard;
- ScoreBoard RequiredScoreboard;
-
-public:
- PostRAHazardRecognizer(const InstrItineraryData &ItinData);
-
- virtual HazardType getHazardType(SUnit *SU);
- virtual void Reset();
- virtual void EmitInstruction(SUnit *SU);
- virtual void AdvanceCycle();
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h b/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
index 1d743c1..e2ab899 100644
--- a/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
+++ b/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
@@ -31,7 +31,9 @@ namespace llvm {
public:
static char ID;
- ProcessImplicitDefs() : MachineFunctionPass(ID) {}
+ ProcessImplicitDefs() : MachineFunctionPass(ID) {
+ initializeProcessImplicitDefsPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &au) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h b/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h
new file mode 100644
index 0000000..7e8745e
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h
@@ -0,0 +1,167 @@
+//===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PBQPBuilder interface, for classes which build PBQP
+// instances to represent register allocation problems, and the RegAllocPBQP
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
+#define LLVM_CODEGEN_REGALLOCPBQP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/PBQP/Graph.h"
+#include "llvm/CodeGen/PBQP/Solution.h"
+
+#include <map>
+#include <set>
+
+namespace llvm {
+
+ class LiveIntervals;
+ class MachineFunction;
+ class MachineLoopInfo;
+
+ /// This class wraps up a PBQP instance representing a register allocation
+ /// problem, plus the structures necessary to map back from the PBQP solution
+ /// to a register allocation solution. (i.e. The PBQP-node <--> vreg map,
+ /// and the PBQP option <--> storage location map).
+
+ class PBQPRAProblem {
+ public:
+
+ typedef SmallVector<unsigned, 16> AllowedSet;
+
+ PBQP::Graph& getGraph() { return graph; }
+
+ const PBQP::Graph& getGraph() const { return graph; }
+
+ /// Record the mapping between the given virtual register and PBQP node,
+ /// and the set of allowed pregs for the vreg.
+ ///
+ /// If you are extending
+ /// PBQPBuilder you are unlikely to need this: Nodes and options for all
+ /// vregs will already have been set up for you by the base class.
+ template <typename AllowedRegsItr>
+ void recordVReg(unsigned vreg, PBQP::Graph::NodeItr node,
+ AllowedRegsItr arBegin, AllowedRegsItr arEnd) {
+ assert(node2VReg.find(node) == node2VReg.end() && "Re-mapping node.");
+ assert(vreg2Node.find(vreg) == vreg2Node.end() && "Re-mapping vreg.");
+ assert(allowedSets[vreg].empty() && "vreg already has pregs.");
+
+ node2VReg[node] = vreg;
+ vreg2Node[vreg] = node;
+ std::copy(arBegin, arEnd, std::back_inserter(allowedSets[vreg]));
+ }
+
+ /// Get the virtual register corresponding to the given PBQP node.
+ unsigned getVRegForNode(PBQP::Graph::ConstNodeItr node) const;
+
+ /// Get the PBQP node corresponding to the given virtual register.
+ PBQP::Graph::NodeItr getNodeForVReg(unsigned vreg) const;
+
+ /// Returns true if the given PBQP option represents a physical register,
+ /// false otherwise.
+ bool isPRegOption(unsigned vreg, unsigned option) const {
+ // At present we only have spills or pregs, so anything that's not a
+ // spill is a preg. (This might be extended one day to support remat).
+ return !isSpillOption(vreg, option);
+ }
+
+ /// Returns true if the given PBQP option represents spilling, false
+ /// otherwise.
+ bool isSpillOption(unsigned vreg, unsigned option) const {
+ // We hardcode option zero as the spill option.
+ return option == 0;
+ }
+
+ /// Returns the allowed set for the given virtual register.
+ const AllowedSet& getAllowedSet(unsigned vreg) const;
+
+ /// Get PReg for option.
+ unsigned getPRegForOption(unsigned vreg, unsigned option) const;
+
+ private:
+
+ typedef std::map<PBQP::Graph::ConstNodeItr, unsigned,
+ PBQP::NodeItrComparator> Node2VReg;
+ typedef DenseMap<unsigned, PBQP::Graph::NodeItr> VReg2Node;
+ typedef std::map<unsigned, AllowedSet> AllowedSetMap;
+
+ PBQP::Graph graph;
+ Node2VReg node2VReg;
+ VReg2Node vreg2Node;
+
+ AllowedSetMap allowedSets;
+
+ };
+
+ /// Builds PBQP instances to represent register allocation problems. Includes
+ /// spill, interference and coalescing costs by default. You can extend this
+ /// class to support additional constraints for your architecture.
+ class PBQPBuilder {
+ private:
+ PBQPBuilder(const PBQPBuilder&) {}
+ void operator=(const PBQPBuilder&) {}
+ public:
+
+ typedef std::set<unsigned> RegSet;
+
+ /// Default constructor.
+ PBQPBuilder() {}
+
+ /// Clean up a PBQPBuilder.
+ virtual ~PBQPBuilder() {}
+
+ /// Build a PBQP instance to represent the register allocation problem for
+ /// the given MachineFunction.
+ virtual std::auto_ptr<PBQPRAProblem> build(
+ MachineFunction *mf,
+ const LiveIntervals *lis,
+ const MachineLoopInfo *loopInfo,
+ const RegSet &vregs);
+ private:
+
+ void addSpillCosts(PBQP::Vector &costVec, PBQP::PBQPNum spillCost);
+
+ void addInterferenceCosts(PBQP::Matrix &costMat,
+ const PBQPRAProblem::AllowedSet &vr1Allowed,
+ const PBQPRAProblem::AllowedSet &vr2Allowed,
+ const TargetRegisterInfo *tri);
+ };
+
+ /// Extended builder which adds coalescing constraints to a problem.
+ class PBQPBuilderWithCoalescing : public PBQPBuilder {
+ public:
+
+ /// Build a PBQP instance to represent the register allocation problem for
+ /// the given MachineFunction.
+ virtual std::auto_ptr<PBQPRAProblem> build(
+ MachineFunction *mf,
+ const LiveIntervals *lis,
+ const MachineLoopInfo *loopInfo,
+ const RegSet &vregs);
+
+ private:
+
+ void addPhysRegCoalesce(PBQP::Vector &costVec, unsigned pregOption,
+ PBQP::PBQPNum benefit);
+
+ void addVirtRegCoalesce(PBQP::Matrix &costMat,
+ const PBQPRAProblem::AllowedSet &vr1Allowed,
+ const PBQPRAProblem::AllowedSet &vr2Allowed,
+ PBQP::PBQPNum benefit);
+ };
+
+ FunctionPass* createPBQPRegisterAllocator(std::auto_ptr<PBQPBuilder> builder);
+}
+
+#endif /* LLVM_CODEGEN_REGALLOCPBQP_H */
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h b/contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h
index 7644433..af0b394 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/IncludeFile.h"
+#include "llvm/Support/IncludeFile.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/ADT/SmallPtrSet.h"
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
index 076268b..3864ffd 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -221,6 +221,9 @@ namespace llvm {
}
};
+ template <>
+ struct isPodLike<SDep> { static const bool value = true; };
+
/// SUnit - Scheduling unit. This is a node in the scheduling DAG.
class SUnit {
private:
@@ -229,9 +232,8 @@ namespace llvm {
public:
SUnit *OrigNode; // If not this, the node from which
// this node was cloned.
-
- // Preds/Succs - The SUnits before/after us in the graph. The boolean value
- // is true if the edge is a token chain edge, false if it is a value edge.
+
+ // Preds/Succs - The SUnits before/after us in the graph.
SmallVector<SDep, 4> Preds; // All sunit predecessors.
SmallVector<SDep, 4> Succs; // All sunit successors.
@@ -242,11 +244,13 @@ namespace llvm {
unsigned NodeNum; // Entry # of node in the node vector.
unsigned NodeQueueId; // Queue id of node.
- unsigned short Latency; // Node latency.
unsigned NumPreds; // # of SDep::Data preds.
unsigned NumSuccs; // # of SDep::Data sucss.
unsigned NumPredsLeft; // # of preds not scheduled.
unsigned NumSuccsLeft; // # of succs not scheduled.
+ unsigned short NumRegDefsLeft; // # of reg defs with no scheduled use.
+ unsigned short Latency; // Node latency.
+ bool isCall : 1; // Is a function call.
bool isTwoAddress : 1; // Is a two-address instruction.
bool isCommutable : 1; // Is a commutable instruction.
bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
@@ -267,13 +271,14 @@ namespace llvm {
public:
const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
const TargetRegisterClass *CopySrcRC;
-
+
/// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
/// an SDNode and any nodes flagged to it.
SUnit(SDNode *node, unsigned nodenum)
: Node(node), Instr(0), OrigNode(0), NodeNum(nodenum),
- NodeQueueId(0), Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
- NumSuccsLeft(0), isTwoAddress(false), isCommutable(false),
+ NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
+ NumSuccsLeft(0), NumRegDefsLeft(0), Latency(0),
+ isCall(false), isTwoAddress(false), isCommutable(false),
hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
@@ -285,8 +290,9 @@ namespace llvm {
/// a MachineInstr.
SUnit(MachineInstr *instr, unsigned nodenum)
: Node(0), Instr(instr), OrigNode(0), NodeNum(nodenum),
- NodeQueueId(0), Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
- NumSuccsLeft(0), isTwoAddress(false), isCommutable(false),
+ NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
+ NumSuccsLeft(0), NumRegDefsLeft(0), Latency(0),
+ isCall(false), isTwoAddress(false), isCommutable(false),
hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
@@ -297,8 +303,9 @@ namespace llvm {
/// SUnit - Construct a placeholder SUnit.
SUnit()
: Node(0), Instr(0), OrigNode(0), NodeNum(~0u),
- NodeQueueId(0), Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
- NumSuccsLeft(0), isTwoAddress(false), isCommutable(false),
+ NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
+ NumSuccsLeft(0), NumRegDefsLeft(0), Latency(0),
+ isCall(false), isTwoAddress(false), isCommutable(false),
hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
@@ -320,6 +327,10 @@ namespace llvm {
return Node;
}
+ /// isInstr - Return true if this SUnit refers to a machine instruction as
+ /// opposed to an SDNode.
+ bool isInstr() const { return Instr; }
+
/// setInstr - Assign the instruction for the SUnit.
/// This may be used during post-regalloc scheduling.
void setInstr(MachineInstr *MI) {
@@ -337,7 +348,7 @@ namespace llvm {
/// addPred - This adds the specified edge as a pred of the current node if
/// not already. It also adds the current node as a successor of the
/// specified node.
- void addPred(const SDep &D);
+ bool addPred(const SDep &D);
/// removePred - This removes the specified edge as a pred of the current
/// node if it exists. It also removes the current node as a successor of
@@ -347,7 +358,7 @@ namespace llvm {
/// getDepth - Return the depth of this node, which is the length of the
/// maximum path up to any node with has no predecessors.
unsigned getDepth() const {
- if (!isDepthCurrent)
+ if (!isDepthCurrent)
const_cast<SUnit *>(this)->ComputeDepth();
return Depth;
}
@@ -355,7 +366,7 @@ namespace llvm {
/// getHeight - Return the height of this node, which is the length of the
/// maximum path down to any node with has no successors.
unsigned getHeight() const {
- if (!isHeightCurrent)
+ if (!isHeightCurrent)
const_cast<SUnit *>(this)->ComputeHeight();
return Height;
}
@@ -387,7 +398,7 @@ namespace llvm {
return true;
return false;
}
-
+
/// isSucc - Test if node N is a successor of this node.
bool isSucc(SUnit *N) {
for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i)
@@ -408,25 +419,38 @@ namespace llvm {
//===--------------------------------------------------------------------===//
/// SchedulingPriorityQueue - This interface is used to plug different
/// priorities computation algorithms into the list scheduler. It implements
- /// the interface of a standard priority queue, where nodes are inserted in
+ /// the interface of a standard priority queue, where nodes are inserted in
/// arbitrary order and returned in priority order. The computation of the
/// priority and the representation of the queue are totally up to the
/// implementation to decide.
- ///
+ ///
class SchedulingPriorityQueue {
unsigned CurCycle;
+ bool HasReadyFilter;
public:
- SchedulingPriorityQueue() : CurCycle(0) {}
+ SchedulingPriorityQueue(bool rf = false):
+ CurCycle(0), HasReadyFilter(rf) {}
virtual ~SchedulingPriorityQueue() {}
-
+
+ virtual bool isBottomUp() const = 0;
+
virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
virtual void addNode(const SUnit *SU) = 0;
virtual void updateNode(const SUnit *SU) = 0;
virtual void releaseState() = 0;
virtual bool empty() const = 0;
+
+ bool hasReadyFilter() const { return HasReadyFilter; }
+
+ virtual bool tracksRegPressure() const { return false; }
+
+ virtual bool isReady(SUnit *) const {
+ assert(!HasReadyFilter && "The ready filter must override isReady()");
+ return true;
+ }
virtual void push(SUnit *U) = 0;
-
+
void push_all(const std::vector<SUnit *> &Nodes) {
for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
E = Nodes.end(); I != E; ++I)
@@ -437,6 +461,8 @@ namespace llvm {
virtual void remove(SUnit *SU) = 0;
+ virtual void dump(ScheduleDAG *) const {}
+
/// ScheduledNode - As each node is scheduled, this method is invoked. This
/// allows the priority function to adjust the priority of related
/// unscheduled nodes, for example.
@@ -451,7 +477,7 @@ namespace llvm {
unsigned getCurCycle() const {
return CurCycle;
- }
+ }
};
class ScheduleDAG {
@@ -473,11 +499,18 @@ namespace llvm {
virtual ~ScheduleDAG();
+ /// getInstrDesc - Return the TargetInstrDesc of this SUnit.
+ /// Return NULL for SDNodes without a machine opcode.
+ const TargetInstrDesc *getInstrDesc(const SUnit *SU) const {
+ if (SU->isInstr()) return &SU->getInstr()->getDesc();
+ return getNodeDesc(SU->getNode());
+ }
+
/// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
/// using 'dot'.
///
void viewGraph();
-
+
/// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock
/// according to the order specified in Sequence.
///
@@ -536,6 +569,10 @@ namespace llvm {
void EmitNoop();
void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap);
+
+ private:
+ // Return the TargetInstrDesc of this SDNode or NULL.
+ const TargetInstrDesc *getNodeDesc(const SDNode *Node) const;
};
class SUnitIterator : public std::iterator<std::forward_iterator_tag,
@@ -627,7 +664,7 @@ namespace llvm {
/// Visited - a set of nodes visited during a DFS traversal.
BitVector Visited;
- /// DFS - make a DFS traversal and mark all nodes affected by the
+ /// DFS - make a DFS traversal and mark all nodes affected by the
/// edge insertion. These nodes will later get new topological indexes
/// by means of the Shift method.
void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
@@ -642,7 +679,7 @@ namespace llvm {
public:
explicit ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits);
- /// InitDAGTopologicalSorting - create the initial topological
+ /// InitDAGTopologicalSorting - create the initial topological
/// ordering from the DAG to be scheduled.
void InitDAGTopologicalSorting();
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h b/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h
index 09e3e88..2f53baa 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h
@@ -23,7 +23,15 @@ class SUnit;
/// issued this cycle, and whether or not a noop needs to be inserted to handle
/// the hazard.
class ScheduleHazardRecognizer {
+protected:
+ /// MaxLookAhead - Indicate the number of cycles in the scoreboard
+ /// state. Important to restore the state after backtracking. Additionally,
+ /// MaxLookAhead=0 identifies a fake recognizer, allowing the client to
+ /// bypass virtual calls. Currently the PostRA scheduler ignores it.
+ unsigned MaxLookAhead;
+
public:
+ ScheduleHazardRecognizer(): MaxLookAhead(0) {}
virtual ~ScheduleHazardRecognizer();
enum HazardType {
@@ -32,6 +40,14 @@ public:
NoopHazard // This instruction can't be emitted, and needs noops.
};
+ unsigned getMaxLookAhead() const { return MaxLookAhead; }
+
+ bool isEnabled() const { return MaxLookAhead != 0; }
+
+ /// atIssueLimit - Return true if no more instructions may be issued in this
+ /// cycle.
+ virtual bool atIssueLimit() const { return false; }
+
/// getHazardType - Return the hazard type of emitting this node. There are
/// three possible results. Either:
/// * NoHazard: it is legal to issue this instruction on this cycle.
@@ -39,7 +55,7 @@ public:
/// other instruction is available, issue it first.
/// * NoopHazard: issuing this instruction would break the program. If
/// some other instruction can be issued, do so, otherwise issue a noop.
- virtual HazardType getHazardType(SUnit *) {
+ virtual HazardType getHazardType(SUnit *m, int Stalls) {
return NoHazard;
}
@@ -52,12 +68,18 @@ public:
/// emitted, to advance the hazard state.
virtual void EmitInstruction(SUnit *) {}
- /// AdvanceCycle - This callback is invoked when no instructions can be
- /// issued on this cycle without a hazard. This should increment the
+ /// AdvanceCycle - This callback is invoked whenever the next top-down
+ /// instruction to be scheduled cannot issue in the current cycle, either
+ /// because of latency or resource conflicts. This should increment the
/// internal state of the hazard recognizer so that previously "Hazard"
/// instructions will now not be hazards.
virtual void AdvanceCycle() {}
+ /// RecedeCycle - This callback is invoked whenever the next bottom-up
+ /// instruction to be scheduled cannot issue in the current cycle, either
+ /// because of latency or resource conflicts.
+ virtual void RecedeCycle() {}
+
/// EmitNoop - This callback is invoked when a noop was added to the
/// instruction stream.
virtual void EmitNoop() {
diff --git a/contrib/llvm/include/llvm/CodeGen/ScoreboardHazardRecognizer.h b/contrib/llvm/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
new file mode 100644
index 0000000..8850006
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
@@ -0,0 +1,129 @@
+//=- llvm/CodeGen/ScoreboardHazardRecognizer.h - Schedule Support -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ScoreboardHazardRecognizer class, which
+// encapsulates hazard-avoidance heuristics for scheduling, based on the
+// scheduling itineraries specified for the target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
+
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/Support/DataTypes.h"
+
+#include <cassert>
+#include <cstring>
+#include <string>
+
+namespace llvm {
+
+class InstrItineraryData;
+class TargetInstrDesc;
+class ScheduleDAG;
+class SUnit;
+
+class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
+ // Scoreboard to track function unit usage. Scoreboard[0] is a
+ // mask of the FUs in use in the cycle currently being
+ // schedule. Scoreboard[1] is a mask for the next cycle. The
+ // Scoreboard is used as a circular buffer with the current cycle
+ // indicated by Head.
+ //
+ // Scoreboard always counts cycles in forward execution order. If used by a
+ // bottom-up scheduler, then the scoreboard cycles are the inverse of the
+ // scheduler's cycles.
+ class Scoreboard {
+ unsigned *Data;
+
+ // The maximum number of cycles monitored by the Scoreboard. This
+ // value is determined based on the target itineraries to ensure
+ // that all hazards can be tracked.
+ size_t Depth;
+ // Indices into the Scoreboard that represent the current cycle.
+ size_t Head;
+ public:
+ Scoreboard():Data(NULL), Depth(0), Head(0) { }
+ ~Scoreboard() {
+ delete[] Data;
+ }
+
+ size_t getDepth() const { return Depth; }
+ unsigned& operator[](size_t idx) const {
+ // Depth is expected to be a power-of-2.
+ assert(Depth && !(Depth & (Depth - 1)) &&
+ "Scoreboard was not initialized properly!");
+
+ return Data[(Head + idx) & (Depth-1)];
+ }
+
+ void reset(size_t d = 1) {
+ if (Data == NULL) {
+ Depth = d;
+ Data = new unsigned[Depth];
+ }
+
+ memset(Data, 0, Depth * sizeof(Data[0]));
+ Head = 0;
+ }
+
+ void advance() {
+ Head = (Head + 1) & (Depth-1);
+ }
+
+ void recede() {
+ Head = (Head - 1) & (Depth-1);
+ }
+
+ // Print the scoreboard.
+ void dump() const;
+ };
+
+#ifndef NDEBUG
+ // Support for tracing ScoreboardHazardRecognizer as a component within
+ // another module. Follows the current thread-unsafe model of tracing.
+ static const char *DebugType;
+#endif
+
+ // Itinerary data for the target.
+ const InstrItineraryData *ItinData;
+
+ const ScheduleDAG *DAG;
+
+ /// IssueWidth - Max issue per cycle. 0=Unknown.
+ unsigned IssueWidth;
+
+ /// IssueCount - Count instructions issued in this cycle.
+ unsigned IssueCount;
+
+ Scoreboard ReservedScoreboard;
+ Scoreboard RequiredScoreboard;
+
+public:
+ ScoreboardHazardRecognizer(const InstrItineraryData *ItinData,
+ const ScheduleDAG *DAG,
+ const char *ParentDebugType = "");
+
+ /// atIssueLimit - Return true if no more instructions may be issued in this
+ /// cycle.
+ virtual bool atIssueLimit() const;
+
+ // Stalls provides an cycle offset at which SU will be scheduled. It will be
+ // negative for bottom-up scheduling.
+ virtual HazardType getHazardType(SUnit *SU, int Stalls);
+ virtual void Reset();
+ virtual void EmitInstruction(SUnit *SU);
+ virtual void AdvanceCycle();
+ virtual void RecedeCycle();
+};
+
+}
+
+#endif //!LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
index 7723fa0..c9de95b 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -171,9 +171,6 @@ class SelectionDAG {
/// DbgInfo - Tracks dbg_value information through SDISel.
SDDbgInfo *DbgInfo;
- /// VerifyNode - Sanity check the given node. Aborts if it is invalid.
- void VerifyNode(SDNode *N);
-
/// setGraphColorHelper - Implementation of setSubgraphColor.
/// Return whether we had to truncate the search.
///
@@ -401,21 +398,21 @@ public:
}
// This version of the getCopyToReg method takes an extra operand, which
- // indicates that there is potentially an incoming flag value (if Flag is not
- // null) and that there should be a flag result.
+ // indicates that there is potentially an incoming glue value (if Glue is not
+ // null) and that there should be a glue result.
SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N,
- SDValue Flag) {
- SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
- SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Flag };
- return getNode(ISD::CopyToReg, dl, VTs, Ops, Flag.getNode() ? 4 : 3);
+ SDValue Glue) {
+ SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+ SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
+ return getNode(ISD::CopyToReg, dl, VTs, Ops, Glue.getNode() ? 4 : 3);
}
// Similar to last getCopyToReg() except parameter Reg is a SDValue
SDValue getCopyToReg(SDValue Chain, DebugLoc dl, SDValue Reg, SDValue N,
- SDValue Flag) {
- SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
- SDValue Ops[] = { Chain, Reg, N, Flag };
- return getNode(ISD::CopyToReg, dl, VTs, Ops, Flag.getNode() ? 4 : 3);
+ SDValue Glue) {
+ SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+ SDValue Ops[] = { Chain, Reg, N, Glue };
+ return getNode(ISD::CopyToReg, dl, VTs, Ops, Glue.getNode() ? 4 : 3);
}
SDValue getCopyFromReg(SDValue Chain, DebugLoc dl, unsigned Reg, EVT VT) {
@@ -425,13 +422,13 @@ public:
}
// This version of the getCopyFromReg method takes an extra operand, which
- // indicates that there is potentially an incoming flag value (if Flag is not
- // null) and that there should be a flag result.
+ // indicates that there is potentially an incoming glue value (if Glue is not
+ // null) and that there should be a glue result.
SDValue getCopyFromReg(SDValue Chain, DebugLoc dl, unsigned Reg, EVT VT,
- SDValue Flag) {
- SDVTList VTs = getVTList(VT, MVT::Other, MVT::Flag);
- SDValue Ops[] = { Chain, getRegister(Reg, VT), Flag };
- return getNode(ISD::CopyFromReg, dl, VTs, Ops, Flag.getNode() ? 3 : 2);
+ SDValue Glue) {
+ SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
+ SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
+ return getNode(ISD::CopyFromReg, dl, VTs, Ops, Glue.getNode() ? 3 : 2);
}
SDValue getCondCode(ISD::CondCode Cond);
@@ -465,27 +462,27 @@ public:
SDValue getNOT(DebugLoc DL, SDValue Val, EVT VT);
/// getCALLSEQ_START - Return a new CALLSEQ_START node, which always must have
- /// a flag result (to ensure it's not CSE'd). CALLSEQ_START does not have a
+ /// a glue result (to ensure it's not CSE'd). CALLSEQ_START does not have a
/// useful DebugLoc.
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op) {
- SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
+ SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Op };
return getNode(ISD::CALLSEQ_START, DebugLoc(), VTs, Ops, 2);
}
/// getCALLSEQ_END - Return a new CALLSEQ_END node, which always must have a
- /// flag result (to ensure it's not CSE'd). CALLSEQ_END does not have
+ /// glue result (to ensure it's not CSE'd). CALLSEQ_END does not have
/// a useful DebugLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
- SDValue InFlag) {
- SDVTList NodeTys = getVTList(MVT::Other, MVT::Flag);
+ SDValue InGlue) {
+ SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 4> Ops;
Ops.push_back(Chain);
Ops.push_back(Op1);
Ops.push_back(Op2);
- Ops.push_back(InFlag);
+ Ops.push_back(InGlue);
return getNode(ISD::CALLSEQ_END, DebugLoc(), NodeTys, &Ops[0],
- (unsigned)Ops.size() - (InFlag.getNode() == 0 ? 1 : 0));
+ (unsigned)Ops.size() - (InGlue.getNode() == 0 ? 1 : 0));
}
/// getUNDEF - Return an UNDEF node. UNDEF does not have a useful DebugLoc.
@@ -542,17 +539,17 @@ public:
SDValue getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVol, bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff);
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo);
SDValue getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVol,
- const Value *DstSV, uint64_t DstOSVff,
- const Value *SrcSV, uint64_t SrcSVOff);
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo);
SDValue getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVol,
- const Value *DstSV, uint64_t DstSVOff);
+ MachinePointerInfo DstPtrInfo);
/// getSetCC - Helper function to make it easier to build SetCC's if you just
/// have an ISD::CondCode instead of an SDValue.
@@ -587,8 +584,8 @@ public:
/// getAtomic - Gets a node for an atomic op, produces result and chain and
/// takes 3 operands
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Chain,
- SDValue Ptr, SDValue Cmp, SDValue Swp, const Value* PtrVal,
- unsigned Alignment=0);
+ SDValue Ptr, SDValue Cmp, SDValue Swp,
+ MachinePointerInfo PtrInfo, unsigned Alignment=0);
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Cmp, SDValue Swp,
MachineMemOperand *MMO);
@@ -609,13 +606,13 @@ public:
SDValue getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
const EVT *VTs, unsigned NumVTs,
const SDValue *Ops, unsigned NumOps,
- EVT MemVT, const Value *srcValue, int SVOff,
+ EVT MemVT, MachinePointerInfo PtrInfo,
unsigned Align = 0, bool Vol = false,
bool ReadMem = true, bool WriteMem = true);
SDValue getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
const SDValue *Ops, unsigned NumOps,
- EVT MemVT, const Value *srcValue, int SVOff,
+ EVT MemVT, MachinePointerInfo PtrInfo,
unsigned Align = 0, bool Vol = false,
bool ReadMem = true, bool WriteMem = true);
@@ -630,19 +627,22 @@ public:
/// determined by their operands, and they produce a value AND a token chain.
///
SDValue getLoad(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
- const Value *SV, int SVOffset, bool isVolatile,
- bool isNonTemporal, unsigned Alignment);
- SDValue getExtLoad(ISD::LoadExtType ExtType, EVT VT, DebugLoc dl,
- SDValue Chain, SDValue Ptr, const Value *SV,
- int SVOffset, EVT MemVT, bool isVolatile,
- bool isNonTemporal, unsigned Alignment);
+ MachinePointerInfo PtrInfo, bool isVolatile,
+ bool isNonTemporal, unsigned Alignment,
+ const MDNode *TBAAInfo = 0);
+ SDValue getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
+ SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo,
+ EVT MemVT, bool isVolatile,
+ bool isNonTemporal, unsigned Alignment,
+ const MDNode *TBAAInfo = 0);
SDValue getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr, SDValue Offset,
- const Value *SV, int SVOffset, EVT MemVT,
- bool isVolatile, bool isNonTemporal, unsigned Alignment);
+ MachinePointerInfo PtrInfo, EVT MemVT,
+ bool isVolatile, bool isNonTemporal, unsigned Alignment,
+ const MDNode *TBAAInfo = 0);
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr, SDValue Offset,
@@ -651,14 +651,16 @@ public:
/// getStore - Helper function to build ISD::STORE nodes.
///
SDValue getStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
- const Value *SV, int SVOffset, bool isVolatile,
- bool isNonTemporal, unsigned Alignment);
+ MachinePointerInfo PtrInfo, bool isVolatile,
+ bool isNonTemporal, unsigned Alignment,
+ const MDNode *TBAAInfo = 0);
SDValue getStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
MachineMemOperand *MMO);
SDValue getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
- const Value *SV, int SVOffset, EVT TVT,
+ MachinePointerInfo PtrInfo, EVT TVT,
bool isNonTemporal, bool isVolatile,
- unsigned Alignment);
+ unsigned Alignment,
+ const MDNode *TBAAInfo = 0);
SDValue getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
EVT TVT, MachineMemOperand *MMO);
SDValue getIndexedStore(SDValue OrigStoe, DebugLoc dl, SDValue Base,
@@ -899,6 +901,9 @@ public:
SmallVector<SDDbgValue*,2> &GetDbgValues(const SDNode* SD) {
return DbgInfo->getSDDbgValues(SD);
}
+
+ /// TransferDbgValues - Transfer SDDbgValues.
+ void TransferDbgValues(SDValue From, SDValue To);
/// hasDebugValues - Return true if there are any SDDbgValue nodes associated
/// with this SelectionDAG.
@@ -961,6 +966,13 @@ public:
/// class to allow target nodes to be understood.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
+ /// isBaseWithConstantOffset - Return true if the specified operand is an
+ /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
+ /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
+ /// semantics as an ADD. This handles the equivalence:
+ /// X|Cst == X+Cst iff X&Cst = 0.
+ bool isBaseWithConstantOffset(SDValue Op) const;
+
/// isKnownNeverNan - Test whether the given SDValue is known to never be NaN.
bool isKnownNeverNaN(SDValue Op) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index 01d05dd..62358e7 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -34,7 +34,8 @@ namespace llvm {
class ScheduleHazardRecognizer;
class GCFunctionInfo;
class ScheduleDAGSDNodes;
-
+ class LoadInst;
+
/// SelectionDAGISel - This is the common base class used for SelectionDAG-based
/// pattern-matching instruction selectors.
class SelectionDAGISel : public MachineFunctionPass {
@@ -54,7 +55,7 @@ public:
explicit SelectionDAGISel(const TargetMachine &tm,
CodeGenOpt::Level OL = CodeGenOpt::Default);
virtual ~SelectionDAGISel();
-
+
const TargetLowering &getTargetLowering() { return TLI; }
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
@@ -62,18 +63,18 @@ public:
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual void EmitFunctionEntryCode() {}
-
+
/// PreprocessISelDAG - This hook allows targets to hack on the graph before
/// instruction selection starts.
virtual void PreprocessISelDAG() {}
-
+
/// PostprocessISelDAG() - This hook allows the target to hack on the graph
/// right after selection.
virtual void PostprocessISelDAG() {}
-
+
/// Select - Main hook targets implement to select a node.
virtual SDNode *Select(SDNode *N) = 0;
-
+
/// SelectInlineAsmMemoryOperand - Select the specified address as a target
/// addressing mode, according to the specified constraint code. If this does
/// not match or is not implemented, return true. The resultant operands
@@ -91,25 +92,20 @@ public:
/// IsLegalToFold - Returns true if the specific operand node N of
/// U can be folded during instruction selection that starts at Root.
- /// FIXME: This is a static member function because the PIC16 target,
- /// which uses it during lowering.
+ /// FIXME: This is a static member function because the MSP430/SystemZ/X86
+ /// targets, which uses it during isel. This could become a proper member.
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
CodeGenOpt::Level OptLevel,
bool IgnoreChains = false);
- /// CreateTargetHazardRecognizer - Return a newly allocated hazard recognizer
- /// to use for this target when scheduling the DAG.
- virtual ScheduleHazardRecognizer *CreateTargetHazardRecognizer();
-
-
// Opcodes used by the DAG state machine:
enum BuiltinOpcodes {
OPC_Scope,
OPC_RecordNode,
- OPC_RecordChild0, OPC_RecordChild1, OPC_RecordChild2, OPC_RecordChild3,
+ OPC_RecordChild0, OPC_RecordChild1, OPC_RecordChild2, OPC_RecordChild3,
OPC_RecordChild4, OPC_RecordChild5, OPC_RecordChild6, OPC_RecordChild7,
OPC_RecordMemRef,
- OPC_CaptureFlagInput,
+ OPC_CaptureGlueInput,
OPC_MoveChild,
OPC_MoveParent,
OPC_CheckSame,
@@ -128,7 +124,7 @@ public:
OPC_CheckComplexPat,
OPC_CheckAndImm, OPC_CheckOrImm,
OPC_CheckFoldableChainNode,
-
+
OPC_EmitInteger,
OPC_EmitRegister,
OPC_EmitConvertToTarget,
@@ -139,15 +135,15 @@ public:
OPC_EmitNodeXForm,
OPC_EmitNode,
OPC_MorphNodeTo,
- OPC_MarkFlagResults,
+ OPC_MarkGlueResults,
OPC_CompleteMatch
};
-
+
enum {
- OPFL_None = 0, // Node has no chain or flag input and isn't variadic.
+ OPFL_None = 0, // Node has no chain or glue input and isn't variadic.
OPFL_Chain = 1, // Node has a chain input.
- OPFL_FlagInput = 2, // Node has a flag input.
- OPFL_FlagOutput = 4, // Node has a flag output.
+ OPFL_GlueInput = 2, // Node has a glue input.
+ OPFL_GlueOutput = 4, // Node has a glue output.
OPFL_MemRefs = 8, // Node gets accumulated MemRefs.
OPFL_Variadic0 = 1<<4, // Node is variadic, root has 0 fixed inputs.
OPFL_Variadic1 = 2<<4, // Node is variadic, root has 1 fixed inputs.
@@ -156,37 +152,37 @@ public:
OPFL_Variadic4 = 5<<4, // Node is variadic, root has 4 fixed inputs.
OPFL_Variadic5 = 6<<4, // Node is variadic, root has 5 fixed inputs.
OPFL_Variadic6 = 7<<4, // Node is variadic, root has 6 fixed inputs.
-
+
OPFL_VariadicInfo = OPFL_Variadic6
};
-
+
/// getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the
/// number of fixed arity values that should be skipped when copying from the
/// root.
static inline int getNumFixedFromVariadicInfo(unsigned Flags) {
return ((Flags&OPFL_VariadicInfo) >> 4)-1;
}
-
-
+
+
protected:
/// DAGSize - Size of DAG being instruction selected.
///
unsigned DAGSize;
-
+
/// ISelPosition - Node iterator marking the current position of
/// instruction selection as it procedes through the topologically-sorted
/// node list.
SelectionDAG::allnodes_iterator ISelPosition;
-
- /// ISelUpdater - helper class to handle updates of the
+
+ /// ISelUpdater - helper class to handle updates of the
/// instruction selection graph.
class ISelUpdater : public SelectionDAG::DAGUpdateListener {
SelectionDAG::allnodes_iterator &ISelPosition;
public:
explicit ISelUpdater(SelectionDAG::allnodes_iterator &isp)
: ISelPosition(isp) {}
-
+
/// NodeDeleted - Handle nodes deleted from the graph. If the
/// node being deleted is the current ISelPosition node, update
/// ISelPosition.
@@ -195,46 +191,46 @@ protected:
if (ISelPosition == SelectionDAG::allnodes_iterator(N))
++ISelPosition;
}
-
+
/// NodeUpdated - Ignore updates for now.
virtual void NodeUpdated(SDNode *N) {}
};
-
+
/// ReplaceUses - replace all uses of the old node F with the use
/// of the new node T.
void ReplaceUses(SDValue F, SDValue T) {
ISelUpdater ISU(ISelPosition);
CurDAG->ReplaceAllUsesOfValueWith(F, T, &ISU);
}
-
+
/// ReplaceUses - replace all uses of the old nodes F with the use
/// of the new nodes T.
void ReplaceUses(const SDValue *F, const SDValue *T, unsigned Num) {
ISelUpdater ISU(ISelPosition);
CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num, &ISU);
}
-
+
/// ReplaceUses - replace all uses of the old node F with the use
/// of the new node T.
void ReplaceUses(SDNode *F, SDNode *T) {
ISelUpdater ISU(ISelPosition);
CurDAG->ReplaceAllUsesWith(F, T, &ISU);
}
-
+
/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
/// by tblgen. Others should not call it.
void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops);
-
+
public:
// Calls to these predicates are generated by tblgen.
bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const;
bool CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const;
-
-
+
+
/// CheckPatternPredicate - This function is generated by tblgen in the
/// target. It runs the specified pattern predicate and returns true if it
/// succeeds or false if it fails. The number is a private implementation
@@ -252,13 +248,14 @@ public:
assert(0 && "Tblgen should generate the implementation of this!");
return 0;
}
-
- virtual bool CheckComplexPattern(SDNode *Root, SDValue N, unsigned PatternNo,
- SmallVectorImpl<SDValue> &Result) {
+
+ virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,
+ unsigned PatternNo,
+ SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {
assert(0 && "Tblgen should generate the implementation of this!");
return false;
}
-
+
virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
assert(0 && "Tblgen shoudl generate this!");
return SDValue();
@@ -267,9 +264,9 @@ public:
SDNode *SelectCodeCommon(SDNode *NodeToMatch,
const unsigned char *MatcherTable,
unsigned TableSize);
-
+
private:
-
+
// Calls to these functions are generated by tblgen.
SDNode *Select_INLINEASM(SDNode *N);
SDNode *Select_UNDEF(SDNode *N);
@@ -279,9 +276,10 @@ private:
void DoInstructionSelection();
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
-
+
void PrepareEHLandingPad();
void SelectAllBasicBlocks(const Function &Fn);
+ bool TryToFoldFastISelLoad(const LoadInst *LI, FastISel *FastIS);
void FinishBasicBlock();
void SelectBasicBlock(BasicBlock::const_iterator Begin,
@@ -289,7 +287,7 @@ private:
bool &HadTailCall);
void CodeGenAndEmitDAG();
void LowerArguments(const BasicBlock *BB);
-
+
void ComputeLiveOutVRegInfo();
/// Create the scheduler. If a specific scheduler was specified
@@ -297,16 +295,16 @@ private:
/// one preferred by the target.
///
ScheduleDAGSDNodes *CreateScheduler();
-
+
/// OpcodeOffset - This is a cache used to dispatch efficiently into isel
/// state machines that start with a OPC_SwitchOpcode node.
std::vector<unsigned> OpcodeOffset;
-
- void UpdateChainsAndFlags(SDNode *NodeToMatch, SDValue InputChain,
- const SmallVectorImpl<SDNode*> &ChainNodesMatched,
- SDValue InputFlag,const SmallVectorImpl<SDNode*> &F,
- bool isMorphNodeTo);
-
+
+ void UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
+ const SmallVectorImpl<SDNode*> &ChainNodesMatched,
+ SDValue InputGlue, const SmallVectorImpl<SDNode*> &F,
+ bool isMorphNodeTo);
+
};
}
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 4cf6f36..6454639 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -29,7 +29,7 @@
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/DebugLoc.h"
#include <cassert>
@@ -524,24 +524,24 @@ public:
return X;
}
- /// getFlaggedNode - If this node has a flag operand, return the node
- /// to which the flag operand points. Otherwise return NULL.
- SDNode *getFlaggedNode() const {
+ /// getGluedNode - If this node has a glue operand, return the node
+ /// to which the glue operand points. Otherwise return NULL.
+ SDNode *getGluedNode() const {
if (getNumOperands() != 0 &&
- getOperand(getNumOperands()-1).getValueType().getSimpleVT() == MVT::Flag)
+ getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
return getOperand(getNumOperands()-1).getNode();
return 0;
}
// If this is a pseudo op, like copyfromreg, look to see if there is a
- // real target node flagged to it. If so, return the target node.
- const SDNode *getFlaggedMachineNode() const {
+ // real target node glued to it. If so, return the target node.
+ const SDNode *getGluedMachineNode() const {
const SDNode *FoundNode = this;
- // Climb up flag edges until a machine-opcode node is found, or the
+ // Climb up glue edges until a machine-opcode node is found, or the
// end of the chain is reached.
while (!FoundNode->isMachineOpcode()) {
- const SDNode *N = FoundNode->getFlaggedNode();
+ const SDNode *N = FoundNode->getGluedNode();
if (!N) break;
FoundNode = N;
}
@@ -549,11 +549,11 @@ public:
return FoundNode;
}
- /// getFlaggedUser - If this node has a flag value with a user, return
+ /// getGluedUser - If this node has a glue value with a user, return
/// the user (there is at most one). Otherwise return NULL.
- SDNode *getFlaggedUser() const {
+ SDNode *getGluedUser() const {
for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
- if (UI.getUse().get().getValueType() == MVT::Flag)
+ if (UI.getUse().get().getValueType() == MVT::Glue)
return *UI;
return 0;
}
@@ -902,6 +902,9 @@ public:
const Value *getSrcValue() const { return MMO->getValue(); }
int64_t getSrcValueOffset() const { return MMO->getOffset(); }
+ /// Returns the TBAAInfo that describes the dereference.
+ const MDNode *getTBAAInfo() const { return MMO->getTBAAInfo(); }
+
/// getMemoryVT - Return the type of the in-memory value.
EVT getMemoryVT() const { return MemoryVT; }
@@ -909,6 +912,10 @@ public:
/// reference performed by operation.
MachineMemOperand *getMemOperand() const { return MMO; }
+ const MachinePointerInfo &getPointerInfo() const {
+ return MMO->getPointerInfo();
+ }
+
/// refineAlignment - Update this MemSDNode's MachineMemOperand information
/// to reflect the alignment of NewMMO, if it has a greater alignment.
/// This must only be used when the new alignment applies to all users of
@@ -929,6 +936,7 @@ public:
// with either an intrinsic or a target opcode.
return N->getOpcode() == ISD::LOAD ||
N->getOpcode() == ISD::STORE ||
+ N->getOpcode() == ISD::PREFETCH ||
N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
N->getOpcode() == ISD::ATOMIC_SWAP ||
N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
@@ -1004,8 +1012,8 @@ public:
/// MemIntrinsicSDNode - This SDNode is used for target intrinsics that touch
/// memory and need an associated MachineMemOperand. Its opcode may be
-/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, or a target-specific opcode with a
-/// value not less than FIRST_TARGET_MEMORY_OPCODE.
+/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
+/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
class MemIntrinsicSDNode : public MemSDNode {
public:
MemIntrinsicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
@@ -1021,6 +1029,7 @@ public:
// early a node with a target opcode can be of this class
return N->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
N->getOpcode() == ISD::INTRINSIC_VOID ||
+ N->getOpcode() == ISD::PREFETCH ||
N->isTargetMemoryOpcode();
}
};
diff --git a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
index 88044c7..1da1e91be 100644
--- a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
@@ -13,10 +13,7 @@
//
// SlotIndex is mostly a proxy for entries of the SlotIndexList, a class which
// is held is LiveIntervals and provides the real numbering. This allows
-// LiveIntervals to perform largely transparent renumbering. The SlotIndex
-// class does hold a PHI bit, which determines whether the index relates to a
-// PHI use or def point, or an actual instruction. See the SlotIndex class
-// description for futher information.
+// LiveIntervals to perform largely transparent renumbering.
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_SLOTINDEXES_H
@@ -130,12 +127,10 @@ namespace llvm {
enum Slot { LOAD, USE, DEF, STORE, NUM };
- static const unsigned PHI_BIT = 1 << 2;
+ PointerIntPair<IndexListEntry*, 2, unsigned> lie;
- PointerIntPair<IndexListEntry*, 3, unsigned> lie;
-
- SlotIndex(IndexListEntry *entry, unsigned phiAndSlot)
- : lie(entry, phiAndSlot) {
+ SlotIndex(IndexListEntry *entry, unsigned slot)
+ : lie(entry, slot) {
assert(entry != 0 && "Attempt to construct index with 0 pointer.");
}
@@ -149,7 +144,7 @@ namespace llvm {
/// Returns the slot for this SlotIndex.
Slot getSlot() const {
- return static_cast<Slot>(lie.getInt() & ~PHI_BIT);
+ return static_cast<Slot>(lie.getInt());
}
static inline unsigned getHashValue(const SlotIndex &v) {
@@ -166,22 +161,13 @@ namespace llvm {
static inline SlotIndex getTombstoneKey() {
return SlotIndex(IndexListEntry::getTombstoneKeyEntry(), 0);
}
-
+
/// Construct an invalid index.
SlotIndex() : lie(IndexListEntry::getEmptyKeyEntry(), 0) {}
- // Construct a new slot index from the given one, set the phi flag on the
- // new index to the value of the phi parameter.
- SlotIndex(const SlotIndex &li, bool phi)
- : lie(&li.entry(), phi ? PHI_BIT | li.getSlot() : (unsigned)li.getSlot()){
- assert(lie.getPointer() != 0 &&
- "Attempt to construct index with 0 pointer.");
- }
-
- // Construct a new slot index from the given one, set the phi flag on the
- // new index to the value of the phi parameter, and the slot to the new slot.
- SlotIndex(const SlotIndex &li, bool phi, Slot s)
- : lie(&li.entry(), phi ? PHI_BIT | s : (unsigned)s) {
+ // Construct a new slot index from the given one, and set the slot.
+ SlotIndex(const SlotIndex &li, Slot s)
+ : lie(&li.entry(), unsigned(s)) {
assert(lie.getPointer() != 0 &&
"Attempt to construct index with 0 pointer.");
}
@@ -236,11 +222,6 @@ namespace llvm {
return other.getIndex() - getIndex();
}
- /// Returns the state of the PHI bit.
- bool isPHI() const {
- return lie.getInt() & PHI_BIT;
- }
-
/// isLoad - Return true if this is a LOAD slot.
bool isLoad() const {
return getSlot() == LOAD;
@@ -405,9 +386,6 @@ namespace llvm {
/// and MBB id.
std::vector<IdxMBBPair> idx2MBBMap;
- typedef DenseMap<const MachineBasicBlock*, SlotIndex> TerminatorGapsMap;
- TerminatorGapsMap terminatorGaps;
-
// IndexListEntry allocator.
BumpPtrAllocator ileAllocator;
@@ -415,7 +393,7 @@ namespace llvm {
IndexListEntry *entry =
static_cast<IndexListEntry*>(
ileAllocator.Allocate(sizeof(IndexListEntry),
- alignof<IndexListEntry>()));
+ alignOf<IndexListEntry>()));
new (entry) IndexListEntry(mi, index);
@@ -491,7 +469,9 @@ namespace llvm {
public:
static char ID;
- SlotIndexes() : MachineFunctionPass(ID), indexListHead(0) {}
+ SlotIndexes() : MachineFunctionPass(ID), indexListHead(0) {
+ initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &au) const;
virtual void releaseMemory();
@@ -565,26 +545,22 @@ namespace llvm {
return nextNonNull;
}
- /// Returns the first index in the given basic block.
- SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
+ /// Return the (start,end) range of the given basic block.
+ const std::pair<SlotIndex, SlotIndex> &
+ getMBBRange(const MachineBasicBlock *mbb) const {
MBB2IdxMap::const_iterator itr = mbb2IdxMap.find(mbb);
assert(itr != mbb2IdxMap.end() && "MBB not found in maps.");
- return itr->second.first;
+ return itr->second;
}
- /// Returns the last index in the given basic block.
- SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
- MBB2IdxMap::const_iterator itr = mbb2IdxMap.find(mbb);
- assert(itr != mbb2IdxMap.end() && "MBB not found in maps.");
- return itr->second.second;
+ /// Returns the first index in the given basic block.
+ SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
+ return getMBBRange(mbb).first;
}
- /// Returns the terminator gap for the given index.
- SlotIndex getTerminatorGap(const MachineBasicBlock *mbb) {
- TerminatorGapsMap::iterator itr = terminatorGaps.find(mbb);
- assert(itr != terminatorGaps.end() &&
- "All MBBs should have terminator gaps in their indexes.");
- return itr->second;
+ /// Returns the last index in the given basic block.
+ SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
+ return getMBBRange(mbb).second;
}
/// Returns the basic block which the given index falls in.
@@ -618,29 +594,6 @@ namespace llvm {
return resVal;
}
- /// Return a list of MBBs that can be reach via any branches or
- /// fall-throughs.
- bool findReachableMBBs(SlotIndex start, SlotIndex end,
- SmallVectorImpl<MachineBasicBlock*> &mbbs) const {
- std::vector<IdxMBBPair>::const_iterator itr =
- std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start);
-
- bool resVal = false;
- while (itr != idx2MBBMap.end()) {
- if (itr->first > end)
- break;
- MachineBasicBlock *mbb = itr->second;
- if (getMBBEndIdx(mbb) > end)
- break;
- for (MachineBasicBlock::succ_iterator si = mbb->succ_begin(),
- se = mbb->succ_end(); si != se; ++si)
- mbbs.push_back(*si);
- resVal = true;
- ++itr;
- }
- return resVal;
- }
-
/// Returns the MBB covering the given range, or null if the range covers
/// more than one basic block.
MachineBasicBlock* getMBBCoveringRange(SlotIndex start, SlotIndex end) const {
@@ -672,6 +625,9 @@ namespace llvm {
SlotIndex insertMachineInstrInMaps(MachineInstr *mi,
bool *deferredRenumber = 0) {
assert(mi2iMap.find(mi) == mi2iMap.end() && "Instr already indexed.");
+ // Numbering DBG_VALUE instructions could cause code generation to be
+ // affected by debug information.
+ assert(!mi->isDebugValue() && "Cannot number DBG_VALUE instructions.");
MachineBasicBlock *mbb = mi->getParent();
@@ -789,7 +745,7 @@ namespace llvm {
MachineFunction::iterator nextMBB =
llvm::next(MachineFunction::iterator(mbb));
IndexListEntry *startEntry = createEntry(0, 0);
- IndexListEntry *terminatorEntry = createEntry(0, 0);
+ IndexListEntry *stopEntry = createEntry(0, 0);
IndexListEntry *nextEntry = 0;
if (nextMBB == mbb->getParent()->end()) {
@@ -799,15 +755,11 @@ namespace llvm {
}
insert(nextEntry, startEntry);
- insert(nextEntry, terminatorEntry);
+ insert(nextEntry, stopEntry);
SlotIndex startIdx(startEntry, SlotIndex::LOAD);
- SlotIndex terminatorIdx(terminatorEntry, SlotIndex::PHI_BIT);
SlotIndex endIdx(nextEntry, SlotIndex::LOAD);
- terminatorGaps.insert(
- std::make_pair(mbb, terminatorIdx));
-
mbb2IdxMap.insert(
std::make_pair(mbb, std::make_pair(startIdx, endIdx)));
@@ -828,6 +780,20 @@ namespace llvm {
};
+ // Specialize IntervalMapInfo for half-open slot index intervals.
+ template <typename> struct IntervalMapInfo;
+ template <> struct IntervalMapInfo<SlotIndex> {
+ static inline bool startLess(const SlotIndex &x, const SlotIndex &a) {
+ return x < a;
+ }
+ static inline bool stopLess(const SlotIndex &b, const SlotIndex &x) {
+ return b <= x;
+ }
+ static inline bool adjacent(const SlotIndex &a, const SlotIndex &b) {
+ return a == b;
+ }
+ };
+
}
#endif // LLVM_CODEGEN_LIVEINDEX_H
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index d8f0373..fba3e48 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -57,6 +57,8 @@ public:
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ virtual const MCSection *getEHFrameSection() const;
+
const MCSection *getDataRelSection() const { return DataRelSection; }
/// getSectionForConstant - Given a constant with the SectionKind, return a
@@ -121,6 +123,8 @@ public:
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ virtual const MCSection *getEHFrameSection() const;
+
virtual const MCSection *
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const;
@@ -184,6 +188,8 @@ public:
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ virtual const MCSection *getEHFrameSection() const;
+
virtual const MCSection *getDrectveSection() const { return DrectveSection; }
virtual const MCSection *
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
index 51f324c..22d1622 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -18,7 +18,7 @@
#include <cassert>
#include <string>
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
namespace llvm {
@@ -26,7 +26,10 @@ namespace llvm {
class LLVMContext;
struct EVT;
- class MVT { // MVT = Machine Value Type
+ /// MVT - Machine Value Type. Every type that is supported natively by some
+ /// processor targeted by LLVM occurs here. This means that any legal value
+ /// type can be represented by a MVT.
+ class MVT {
public:
enum SimpleValueType {
// If you change this numbering, you must change the values in
@@ -74,14 +77,16 @@ namespace llvm {
FIRST_VECTOR_VALUETYPE = v2i8,
LAST_VECTOR_VALUETYPE = v4f64,
- Flag = 33, // This glues nodes together during pre-RA sched
+ x86mmx = 33, // This is an X86 MMX value
- isVoid = 34, // This has no value
+ Glue = 34, // This glues nodes together during pre-RA sched
- LAST_VALUETYPE = 35, // This always remains at the end of the list.
+ isVoid = 35, // This has no value
+
+ LAST_VALUETYPE = 36, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
- // EVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
+ // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
// This value must be a multiple of 32.
MAX_ALLOWED_VALUETYPE = 64,
@@ -124,13 +129,14 @@ namespace llvm {
MVT() : SimpleTy((SimpleValueType)(INVALID_SIMPLE_VALUE_TYPE)) {}
MVT(SimpleValueType SVT) : SimpleTy(SVT) { }
-
+
bool operator>(const MVT& S) const { return SimpleTy > S.SimpleTy; }
bool operator<(const MVT& S) const { return SimpleTy < S.SimpleTy; }
bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
+ bool operator!=(const MVT& S) const { return SimpleTy != S.SimpleTy; }
bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }
-
+
/// isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool isFloatingPoint() const {
return ((SimpleTy >= MVT::f32 && SimpleTy <= MVT::ppcf128) ||
@@ -149,14 +155,14 @@ namespace llvm {
return (SimpleTy >= MVT::FIRST_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
}
-
+
/// isPow2VectorType - Returns true if the given vector is a power of 2.
bool isPow2VectorType() const {
unsigned NElts = getVectorNumElements();
return !(NElts & (NElts - 1));
}
- /// getPow2VectorType - Widens the length of the given vector EVT up to
+ /// getPow2VectorType - Widens the length of the given vector MVT up to
/// the nearest power of 2 and returns that type.
MVT getPow2VectorType() const {
if (isPow2VectorType())
@@ -172,7 +178,7 @@ namespace llvm {
MVT getScalarType() const {
return isVector() ? getVectorElementType() : *this;
}
-
+
MVT getVectorElementType() const {
switch (SimpleTy) {
default:
@@ -200,7 +206,7 @@ namespace llvm {
case v4f64: return f64;
}
}
-
+
unsigned getVectorNumElements() const {
switch (SimpleTy) {
default:
@@ -228,7 +234,7 @@ namespace llvm {
case v1i64: return 1;
}
}
-
+
unsigned getSizeInBits() const {
switch (SimpleTy) {
case iPTR:
@@ -247,6 +253,7 @@ namespace llvm {
case i32 :
case v4i8:
case v2i16: return 32;
+ case x86mmx:
case f64 :
case i64 :
case v8i8:
@@ -273,7 +280,19 @@ namespace llvm {
case v8i64: return 512;
}
}
-
+
+ /// getStoreSize - Return the number of bytes overwritten by a store
+ /// of the specified value type.
+ unsigned getStoreSize() const {
+ return (getSizeInBits() + 7) / 8;
+ }
+
+ /// getStoreSizeInBits - Return the number of bits overwritten by a store
+ /// of the specified value type.
+ unsigned getStoreSizeInBits() const {
+ return getStoreSize() * 8;
+ }
+
static MVT getFloatingPointVT(unsigned BitWidth) {
switch (BitWidth) {
default:
@@ -288,7 +307,7 @@ namespace llvm {
return MVT::f128;
}
}
-
+
static MVT getIntegerVT(unsigned BitWidth) {
switch (BitWidth) {
default:
@@ -307,7 +326,7 @@ namespace llvm {
return MVT::i128;
}
}
-
+
static MVT getVectorVT(MVT VT, unsigned NumElements) {
switch (VT.SimpleTy) {
default:
@@ -350,7 +369,11 @@ namespace llvm {
}
};
- struct EVT { // EVT = Extended Value Type
+
+ /// EVT - Extended Value Type. Capable of holding value types which are not
+ /// native for any processor (such as the i12345 type), as well as the types
+ /// a MVT can represent.
+ struct EVT {
private:
MVT V;
const Type *LLVMTy;
@@ -527,7 +550,7 @@ namespace llvm {
EVT getScalarType() const {
return isVector() ? getVectorElementType() : *this;
}
-
+
/// getVectorElementType - Given a vector type, return the type of
/// each element.
EVT getVectorElementType() const {
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
index 8151c0b..a1163f7 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -46,17 +46,18 @@ def v4i32 : ValueType<128, 22>; // 4 x i32 vector value
def v8i32 : ValueType<256, 23>; // 8 x i32 vector value
def v1i64 : ValueType<64 , 24>; // 1 x i64 vector value
def v2i64 : ValueType<128, 25>; // 2 x i64 vector value
-def v4i64 : ValueType<256, 26>; // 4 x f64 vector value
-def v8i64 : ValueType<512, 27>; // 4 x f64 vector value
+def v4i64 : ValueType<256, 26>; // 4 x i64 vector value
+def v8i64 : ValueType<512, 27>; // 8 x i64 vector value
-def v2f32 : ValueType<64, 28>; // 2 x f32 vector value
+def v2f32 : ValueType<64 , 28>; // 2 x f32 vector value
def v4f32 : ValueType<128, 29>; // 4 x f32 vector value
def v8f32 : ValueType<256, 30>; // 8 x f32 vector value
def v2f64 : ValueType<128, 31>; // 2 x f64 vector value
def v4f64 : ValueType<256, 32>; // 4 x f64 vector value
-def FlagVT : ValueType<0 , 33>; // Pre-RA sched glue
-def isVoid : ValueType<0 , 34>; // Produces no value
+def x86mmx : ValueType<64 , 33>; // X86 MMX value
+def FlagVT : ValueType<0 , 34>; // Pre-RA sched glue
+def isVoid : ValueType<0 , 35>; // Produces no value
def MetadataVT: ValueType<0, 250>; // Metadata
diff --git a/contrib/llvm/include/llvm/CompilerDriver/CompilationGraph.h b/contrib/llvm/include/llvm/CompilerDriver/CompilationGraph.h
index 619c904..e1eea32 100644
--- a/contrib/llvm/include/llvm/CompilerDriver/CompilationGraph.h
+++ b/contrib/llvm/include/llvm/CompilerDriver/CompilationGraph.h
@@ -21,7 +21,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include <cassert>
#include <string>
diff --git a/contrib/llvm/include/llvm/CompilerDriver/Tool.h b/contrib/llvm/include/llvm/CompilerDriver/Tool.h
index 45ef50d..d0926ba 100644
--- a/contrib/llvm/include/llvm/CompilerDriver/Tool.h
+++ b/contrib/llvm/include/llvm/CompilerDriver/Tool.h
@@ -18,7 +18,7 @@
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include <string>
#include <vector>
@@ -58,7 +58,7 @@ namespace llvmc {
virtual const char* Name() const = 0;
virtual const char** InputLanguages() const = 0;
- virtual const char* OutputLanguage() const = 0;
+ virtual const char** OutputLanguages() const = 0;
virtual bool IsJoin() const = 0;
virtual bool WorksOnEmpty() const = 0;
diff --git a/contrib/llvm/include/llvm/Constant.h b/contrib/llvm/include/llvm/Constant.h
index 8647299..38045fc 100644
--- a/contrib/llvm/include/llvm/Constant.h
+++ b/contrib/llvm/include/llvm/Constant.h
@@ -20,7 +20,6 @@ namespace llvm {
class APInt;
template<typename T> class SmallVectorImpl;
- class LLVMContext;
/// This is an important base class in LLVM. It provides the common facilities
/// of all constant values in an LLVM program. A constant is a value that is
@@ -142,16 +141,22 @@ public:
assert(0 && "Constants that do not have operands cannot be using 'From'!");
}
- static Constant* getNullValue(const Type* Ty);
+ static Constant *getNullValue(const Type* Ty);
/// @returns the value for an integer constant of the given type that has all
/// its bits set to true.
/// @brief Get the all ones value
- static Constant* getAllOnesValue(const Type* Ty);
+ static Constant *getAllOnesValue(const Type* Ty);
/// getIntegerValue - Return the value for an integer or pointer constant,
/// or a vector thereof, with the given scalar value.
- static Constant* getIntegerValue(const Type* Ty, const APInt &V);
+ static Constant *getIntegerValue(const Type* Ty, const APInt &V);
+
+ /// removeDeadConstantUsers - If there are any dead constant users dangling
+ /// off of this constant, remove them. This method is useful for clients
+ /// that want to check to see if a global is unused, but don't want to deal
+ /// with potentially dead constants hanging off of the globals.
+ void removeDeadConstantUsers() const;
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Constants.h b/contrib/llvm/include/llvm/Constants.h
index a7deae0..c4768f8 100644
--- a/contrib/llvm/include/llvm/Constants.h
+++ b/contrib/llvm/include/llvm/Constants.h
@@ -25,8 +25,7 @@
#include "llvm/OperandTraits.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/SmallVector.h"
-#include <vector>
+#include "llvm/ADT/ArrayRef.h"
namespace llvm {
@@ -265,8 +264,8 @@ public:
inline const APFloat& getValueAPF() const { return Val; }
/// isNullValue - Return true if this is the value that would be returned by
- /// getNullValue. Don't depend on == for doubles to tell us it's zero, it
- /// considers -0.0 to be null as well as 0.0. :(
+ /// getNullValue. For ConstantFP, this is +0.0, but not -0.0. To handle the
+ /// two the same, use isZero().
virtual bool isNullValue() const;
/// isNegativeZeroValue - Return true if the value is what would be returned
@@ -404,7 +403,8 @@ public:
};
template <>
-struct OperandTraits<ConstantArray> : public VariadicOperandTraits<> {
+struct OperandTraits<ConstantArray> :
+ public VariadicOperandTraits<ConstantArray> {
};
DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantArray, Constant)
@@ -453,7 +453,8 @@ public:
};
template <>
-struct OperandTraits<ConstantStruct> : public VariadicOperandTraits<> {
+struct OperandTraits<ConstantStruct> :
+ public VariadicOperandTraits<ConstantStruct> {
};
DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantStruct, Constant)
@@ -470,9 +471,9 @@ protected:
ConstantVector(const VectorType *T, const std::vector<Constant*> &Val);
public:
// ConstantVector accessors
+ static Constant *get(ArrayRef<Constant*> V);
+ // FIXME: Eliminate this constructor form.
static Constant *get(const VectorType *T, const std::vector<Constant*> &V);
- static Constant *get(const std::vector<Constant*> &V);
- static Constant *get(Constant *const *Vals, unsigned NumVals);
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
@@ -497,7 +498,7 @@ public:
/// getSplatValue - If this is a splat constant, meaning that all of the
/// elements have the same value, return that value. Otherwise return NULL.
- Constant *getSplatValue();
+ Constant *getSplatValue() const;
virtual void destroyConstant();
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
@@ -510,7 +511,8 @@ public:
};
template <>
-struct OperandTraits<ConstantVector> : public VariadicOperandTraits<> {
+struct OperandTraits<ConstantVector> :
+ public VariadicOperandTraits<ConstantVector> {
};
DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantVector, Constant)
@@ -591,7 +593,8 @@ public:
};
template <>
-struct OperandTraits<BlockAddress> : public FixedNumOperandTraits<2> {
+struct OperandTraits<BlockAddress> :
+ public FixedNumOperandTraits<BlockAddress, 2> {
};
DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(BlockAddress, Value)
@@ -624,11 +627,10 @@ protected:
Constant *C2);
static Constant *getSelectTy(const Type *Ty,
Constant *C1, Constant *C2, Constant *C3);
+ template<typename IndexTy>
static Constant *getGetElementPtrTy(const Type *Ty, Constant *C,
- Value* const *Idxs, unsigned NumIdxs);
- static Constant *getInBoundsGetElementPtrTy(const Type *Ty, Constant *C,
- Value* const *Idxs,
- unsigned NumIdxs);
+ IndexTy const *Idxs, unsigned NumIdxs,
+ bool InBounds);
static Constant *getExtractElementTy(const Type *Ty, Constant *Val,
Constant *Idx);
static Constant *getInsertElementTy(const Type *Ty, Constant *Val,
@@ -640,6 +642,10 @@ protected:
static Constant *getInsertValueTy(const Type *Ty, Constant *Agg,
Constant *Val,
const unsigned *Idxs, unsigned NumIdxs);
+ template<typename IndexTy>
+ static Constant *getGetElementPtrImpl(Constant *C,
+ IndexTy const *IdxList,
+ unsigned NumIdx, bool InBounds);
public:
// Static methods to construct a ConstantExpr of different kinds. Note that
@@ -649,35 +655,38 @@ public:
/// getAlignOf constant expr - computes the alignment of a type in a target
/// independent way (Note: the return type is an i64).
- static Constant *getAlignOf(const Type* Ty);
+ static Constant *getAlignOf(const Type *Ty);
/// getSizeOf constant expr - computes the (alloc) size of a type (in
/// address-units, not bits) in a target independent way (Note: the return
/// type is an i64).
///
- static Constant *getSizeOf(const Type* Ty);
+ static Constant *getSizeOf(const Type *Ty);
/// getOffsetOf constant expr - computes the offset of a struct field in a
/// target independent way (Note: the return type is an i64).
///
- static Constant *getOffsetOf(const StructType* STy, unsigned FieldNo);
+ static Constant *getOffsetOf(const StructType *STy, unsigned FieldNo);
/// getOffsetOf constant expr - This is a generalized form of getOffsetOf,
/// which supports any aggregate type, and any Constant index.
///
- static Constant *getOffsetOf(const Type* Ty, Constant *FieldNo);
+ static Constant *getOffsetOf(const Type *Ty, Constant *FieldNo);
- static Constant *getNeg(Constant *C);
+ static Constant *getNeg(Constant *C, bool HasNUW = false, bool HasNSW =false);
static Constant *getFNeg(Constant *C);
static Constant *getNot(Constant *C);
- static Constant *getAdd(Constant *C1, Constant *C2);
+ static Constant *getAdd(Constant *C1, Constant *C2,
+ bool HasNUW = false, bool HasNSW = false);
static Constant *getFAdd(Constant *C1, Constant *C2);
- static Constant *getSub(Constant *C1, Constant *C2);
+ static Constant *getSub(Constant *C1, Constant *C2,
+ bool HasNUW = false, bool HasNSW = false);
static Constant *getFSub(Constant *C1, Constant *C2);
- static Constant *getMul(Constant *C1, Constant *C2);
+ static Constant *getMul(Constant *C1, Constant *C2,
+ bool HasNUW = false, bool HasNSW = false);
static Constant *getFMul(Constant *C1, Constant *C2);
- static Constant *getUDiv(Constant *C1, Constant *C2);
- static Constant *getSDiv(Constant *C1, Constant *C2);
+ static Constant *getUDiv(Constant *C1, Constant *C2, bool isExact = false);
+ static Constant *getSDiv(Constant *C1, Constant *C2, bool isExact = false);
static Constant *getFDiv(Constant *C1, Constant *C2);
static Constant *getURem(Constant *C1, Constant *C2);
static Constant *getSRem(Constant *C1, Constant *C2);
@@ -685,9 +694,10 @@ public:
static Constant *getAnd(Constant *C1, Constant *C2);
static Constant *getOr(Constant *C1, Constant *C2);
static Constant *getXor(Constant *C1, Constant *C2);
- static Constant *getShl(Constant *C1, Constant *C2);
- static Constant *getLShr(Constant *C1, Constant *C2);
- static Constant *getAShr(Constant *C1, Constant *C2);
+ static Constant *getShl(Constant *C1, Constant *C2,
+ bool HasNUW = false, bool HasNSW = false);
+ static Constant *getLShr(Constant *C1, Constant *C2, bool isExact = false);
+ static Constant *getAShr(Constant *C1, Constant *C2, bool isExact = false);
static Constant *getTrunc (Constant *C, const Type *Ty);
static Constant *getSExt (Constant *C, const Type *Ty);
static Constant *getZExt (Constant *C, const Type *Ty);
@@ -701,15 +711,44 @@ public:
static Constant *getIntToPtr(Constant *C, const Type *Ty);
static Constant *getBitCast (Constant *C, const Type *Ty);
- static Constant *getNSWNeg(Constant *C);
- static Constant *getNUWNeg(Constant *C);
- static Constant *getNSWAdd(Constant *C1, Constant *C2);
- static Constant *getNUWAdd(Constant *C1, Constant *C2);
- static Constant *getNSWSub(Constant *C1, Constant *C2);
- static Constant *getNUWSub(Constant *C1, Constant *C2);
- static Constant *getNSWMul(Constant *C1, Constant *C2);
- static Constant *getNUWMul(Constant *C1, Constant *C2);
- static Constant *getExactSDiv(Constant *C1, Constant *C2);
+ static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); }
+ static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); }
+ static Constant *getNSWAdd(Constant *C1, Constant *C2) {
+ return getAdd(C1, C2, false, true);
+ }
+ static Constant *getNUWAdd(Constant *C1, Constant *C2) {
+ return getAdd(C1, C2, true, false);
+ }
+ static Constant *getNSWSub(Constant *C1, Constant *C2) {
+ return getSub(C1, C2, false, true);
+ }
+ static Constant *getNUWSub(Constant *C1, Constant *C2) {
+ return getSub(C1, C2, true, false);
+ }
+ static Constant *getNSWMul(Constant *C1, Constant *C2) {
+ return getMul(C1, C2, false, true);
+ }
+ static Constant *getNUWMul(Constant *C1, Constant *C2) {
+ return getMul(C1, C2, true, false);
+ }
+ static Constant *getNSWShl(Constant *C1, Constant *C2) {
+ return getShl(C1, C2, false, true);
+ }
+ static Constant *getNUWShl(Constant *C1, Constant *C2) {
+ return getShl(C1, C2, true, false);
+ }
+ static Constant *getExactSDiv(Constant *C1, Constant *C2) {
+ return getSDiv(C1, C2, true);
+ }
+ static Constant *getExactUDiv(Constant *C1, Constant *C2) {
+ return getUDiv(C1, C2, true);
+ }
+ static Constant *getExactAShr(Constant *C1, Constant *C2) {
+ return getAShr(C1, C2, true);
+ }
+ static Constant *getExactLShr(Constant *C1, Constant *C2) {
+ return getLShr(C1, C2, true);
+ }
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
@@ -801,18 +840,24 @@ public:
/// all elements must be Constant's.
///
static Constant *getGetElementPtr(Constant *C,
- Constant *const *IdxList, unsigned NumIdx);
+ Constant *const *IdxList, unsigned NumIdx,
+ bool InBounds = false);
static Constant *getGetElementPtr(Constant *C,
- Value* const *IdxList, unsigned NumIdx);
+ Value *const *IdxList, unsigned NumIdx,
+ bool InBounds = false);
/// Create an "inbounds" getelementptr. See the documentation for the
/// "inbounds" flag in LangRef.html for details.
static Constant *getInBoundsGetElementPtr(Constant *C,
Constant *const *IdxList,
- unsigned NumIdx);
+ unsigned NumIdx) {
+ return getGetElementPtr(C, IdxList, NumIdx, true);
+ }
static Constant *getInBoundsGetElementPtr(Constant *C,
Value* const *IdxList,
- unsigned NumIdx);
+ unsigned NumIdx) {
+ return getGetElementPtr(C, IdxList, NumIdx, true);
+ }
static Constant *getExtractElement(Constant *Vec, Constant *Idx);
static Constant *getInsertElement(Constant *Vec, Constant *Elt,Constant *Idx);
@@ -870,7 +915,8 @@ private:
};
template <>
-struct OperandTraits<ConstantExpr> : public VariadicOperandTraits<1> {
+struct OperandTraits<ConstantExpr> :
+ public VariadicOperandTraits<ConstantExpr, 1> {
};
DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantExpr, Constant)
diff --git a/contrib/llvm/include/llvm/DerivedTypes.h b/contrib/llvm/include/llvm/DerivedTypes.h
index 9b6b19f..56d1e3e 100644
--- a/contrib/llvm/include/llvm/DerivedTypes.h
+++ b/contrib/llvm/include/llvm/DerivedTypes.h
@@ -9,7 +9,7 @@
//
// This file contains the declarations of classes that represent "derived
// types". These are things like "arrays of x" or "structure of x, y, z" or
-// "method returning x taking (y,z) as parameters", etc...
+// "function returning x taking (y,z) as parameters", etc...
//
// The implementations of these classes live in the Type.cpp file.
//
@@ -19,6 +19,7 @@
#define LLVM_DERIVED_TYPES_H
#include "llvm/Type.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
index 3287b39..71698fa 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -22,7 +22,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/ValueMap.h"
#include "llvm/Support/ValueHandle.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
@@ -41,6 +41,8 @@ class MutexGuard;
class TargetData;
class Type;
+/// \brief Helper class for helping synchronize access to the global address map
+/// table.
class ExecutionEngineState {
public:
struct AddressMapConfig : public ValueMapConfig<const GlobalValue*> {
@@ -70,8 +72,7 @@ private:
public:
ExecutionEngineState(ExecutionEngine &EE);
- GlobalAddressMapTy &
- getGlobalAddressMap(const MutexGuard &) {
+ GlobalAddressMapTy &getGlobalAddressMap(const MutexGuard &) {
return GlobalAddressMap;
}
@@ -80,23 +81,41 @@ public:
return GlobalAddressReverseMap;
}
- // Returns the address ToUnmap was mapped to.
+ /// \brief Erase an entry from the mapping table.
+ ///
+ /// \returns The address that \arg ToUnmap was happed to.
void *RemoveMapping(const MutexGuard &, const GlobalValue *ToUnmap);
};
-
+/// \brief Abstract interface for implementation execution of LLVM modules,
+/// designed to support both interpreter and just-in-time (JIT) compiler
+/// implementations.
class ExecutionEngine {
- const TargetData *TD;
+ /// The state object holding the global address mapping, which must be
+ /// accessed synchronously.
+ //
+ // FIXME: There is no particular need the entire map needs to be
+ // synchronized. Wouldn't a reader-writer design be better here?
ExecutionEngineState EEState;
+
+ /// The target data for the platform for which execution is being performed.
+ const TargetData *TD;
+
+ /// Whether lazy JIT compilation is enabled.
bool CompilingLazily;
+
+ /// Whether JIT compilation of external global variables is allowed.
bool GVCompilationDisabled;
+
+ /// Whether the JIT should perform lookups of external symbols (e.g.,
+ /// using dlsym).
bool SymbolSearchingDisabled;
friend class EngineBuilder; // To allow access to JITCtor and InterpCtor.
protected:
- /// Modules - This is a list of Modules that we are JIT'ing from. We use a
- /// smallvector to optimize for the case where there is only one module.
+ /// The list of Modules that we are JIT'ing from. We use a SmallVector to
+ /// optimize for the case where there is only one module.
SmallVector<Module*, 1> Modules;
void setTargetData(const TargetData *td) {
@@ -104,11 +123,11 @@ protected:
}
/// getMemoryforGV - Allocate memory for a global variable.
- virtual char* getMemoryForGV(const GlobalVariable* GV);
+ virtual char *getMemoryForGV(const GlobalVariable *GV);
// To avoid having libexecutionengine depend on the JIT and interpreter
- // libraries, the JIT and Interpreter set these functions to ctor pointers
- // at startup time if they are linked in.
+ // libraries, the execution engine implementations set these functions to ctor
+ // pointers at startup time if they are linked in.
static ExecutionEngine *(*JITCtor)(
Module *M,
std::string *ErrorStr,
@@ -119,23 +138,36 @@ protected:
StringRef MArch,
StringRef MCPU,
const SmallVectorImpl<std::string>& MAttrs);
+ static ExecutionEngine *(*MCJITCtor)(
+ Module *M,
+ std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel,
+ bool GVsWithCode,
+ CodeModel::Model CMM,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs);
static ExecutionEngine *(*InterpCtor)(Module *M,
std::string *ErrorStr);
/// LazyFunctionCreator - If an unknown function is needed, this function
- /// pointer is invoked to create it. If this returns null, the JIT will abort.
- void* (*LazyFunctionCreator)(const std::string &);
+ /// pointer is invoked to create it. If this returns null, the JIT will
+ /// abort.
+ void *(*LazyFunctionCreator)(const std::string &);
- /// ExceptionTableRegister - If Exception Handling is set, the JIT will
- /// register dwarf tables with this function
+ /// ExceptionTableRegister - If Exception Handling is set, the JIT will
+ /// register dwarf tables with this function.
typedef void (*EERegisterFn)(void*);
- static EERegisterFn ExceptionTableRegister;
+ EERegisterFn ExceptionTableRegister;
+ EERegisterFn ExceptionTableDeregister;
+ std::vector<void*> AllExceptionTables;
public:
- /// lock - This lock is protects the ExecutionEngine, JIT, JITResolver and
+ /// lock - This lock protects the ExecutionEngine, JIT, JITResolver and
/// JITEmitter classes. It must be held while changing the internal state of
/// any of those classes.
- sys::Mutex lock; // Used to make this class and subclasses thread-safe
+ sys::Mutex lock;
//===--------------------------------------------------------------------===//
// ExecutionEngine Startup
@@ -146,20 +178,18 @@ public:
/// create - This is the factory method for creating an execution engine which
/// is appropriate for the current machine. This takes ownership of the
/// module.
+ ///
+ /// \param GVsWithCode - Allocating globals with code breaks
+ /// freeMachineCodeForFunction and is probably unsafe and bad for performance.
+ /// However, we have clients who depend on this behavior, so we must support
+ /// it. Eventually, when we're willing to break some backwards compatability,
+ /// this flag should be flipped to false, so that by default
+ /// freeMachineCodeForFunction works.
static ExecutionEngine *create(Module *M,
bool ForceInterpreter = false,
std::string *ErrorStr = 0,
CodeGenOpt::Level OptLevel =
CodeGenOpt::Default,
- // Allocating globals with code breaks
- // freeMachineCodeForFunction and is probably
- // unsafe and bad for performance. However,
- // we have clients who depend on this
- // behavior, so we must support it.
- // Eventually, when we're willing to break
- // some backwards compatability, this flag
- // should be flipped to false, so that by
- // default freeMachineCodeForFunction works.
bool GVsWithCode = true);
/// createJIT - This is the factory method for creating a JIT for the current
@@ -184,11 +214,10 @@ public:
Modules.push_back(M);
}
- //===----------------------------------------------------------------------===//
+ //===--------------------------------------------------------------------===//
const TargetData *getTargetData() const { return TD; }
-
/// removeModule - Remove a Module from the list of modules. Returns true if
/// M is found.
virtual bool removeModule(Module *M);
@@ -200,17 +229,19 @@ public:
/// runFunction - Execute the specified function with the specified arguments,
/// and return the result.
- ///
virtual GenericValue runFunction(Function *F,
const std::vector<GenericValue> &ArgValues) = 0;
/// runStaticConstructorsDestructors - This method is used to execute all of
- /// the static constructors or destructors for a program, depending on the
- /// value of isDtors.
+ /// the static constructors or destructors for a program.
+ ///
+ /// \param isDtors - Run the destructors instead of constructors.
void runStaticConstructorsDestructors(bool isDtors);
+
/// runStaticConstructorsDestructors - This method is used to execute all of
- /// the static constructors or destructors for a module, depending on the
- /// value of isDtors.
+ /// the static constructors or destructors for a particular module.
+ ///
+ /// \param isDtors - Run the destructors instead of constructors.
void runStaticConstructorsDestructors(Module *module, bool isDtors);
@@ -229,8 +260,8 @@ public:
/// GlobalValue is destroyed.
void addGlobalMapping(const GlobalValue *GV, void *Addr);
- /// clearAllGlobalMappings - Clear all global mappings and start over again
- /// use in dynamic compilation scenarios when you want to move globals
+ /// clearAllGlobalMappings - Clear all global mappings and start over again,
+ /// for use in dynamic compilation scenarios to move globals.
void clearAllGlobalMappings();
/// clearGlobalMappingsFromModule - Clear all global mappings that came from a
@@ -246,12 +277,10 @@ public:
/// getPointerToGlobalIfAvailable - This returns the address of the specified
/// global value if it is has already been codegen'd, otherwise it returns
/// null.
- ///
void *getPointerToGlobalIfAvailable(const GlobalValue *GV);
/// getPointerToGlobal - This returns the address of the specified global
- /// value. This may involve code generation if it's a function.
- ///
+ /// value. This may involve code generation if it's a function.
void *getPointerToGlobal(const GlobalValue *GV);
/// getPointerToFunction - The different EE's represent function bodies in
@@ -259,20 +288,17 @@ public:
/// pointer should look like. When F is destroyed, the ExecutionEngine will
/// remove its global mapping and free any machine code. Be sure no threads
/// are running inside F when that happens.
- ///
virtual void *getPointerToFunction(Function *F) = 0;
/// getPointerToBasicBlock - The different EE's represent basic blocks in
/// different ways. Return the representation for a blockaddress of the
/// specified block.
- ///
virtual void *getPointerToBasicBlock(BasicBlock *BB) = 0;
/// getPointerToFunctionOrStub - If the specified function has been
/// code-gen'd, return a pointer to the function. If not, compile it, or use
/// a stub to implement lazy compilation if available. See
/// getPointerToFunction for the requirements on destroying F.
- ///
virtual void *getPointerToFunctionOrStub(Function *F) {
// Default implementation, just codegen the function.
return getPointerToFunction(F);
@@ -286,23 +312,25 @@ public:
///
const GlobalValue *getGlobalValueAtAddress(void *Addr);
-
+ /// StoreValueToMemory - Stores the data in Val of type Ty at address Ptr.
+ /// Ptr is the address of the memory at which to store Val, cast to
+ /// GenericValue *. It is not a pointer to a GenericValue containing the
+ /// address at which to store Val.
void StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr,
const Type *Ty);
+
void InitializeMemory(const Constant *Init, void *Addr);
- /// recompileAndRelinkFunction - This method is used to force a function
- /// which has already been compiled to be compiled again, possibly
- /// after it has been modified. Then the entry to the old copy is overwritten
- /// with a branch to the new copy. If there was no old copy, this acts
- /// just like VM::getPointerToFunction().
- ///
+ /// recompileAndRelinkFunction - This method is used to force a function which
+ /// has already been compiled to be compiled again, possibly after it has been
+ /// modified. Then the entry to the old copy is overwritten with a branch to
+ /// the new copy. If there was no old copy, this acts just like
+ /// VM::getPointerToFunction().
virtual void *recompileAndRelinkFunction(Function *F) = 0;
/// freeMachineCodeForFunction - Release memory in the ExecutionEngine
/// corresponding to the machine code emitted to execute this function, useful
/// for garbage-collecting generated code.
- ///
virtual void freeMachineCodeForFunction(Function *F) = 0;
/// getOrEmitGlobalVariable - Return the address of the specified global
@@ -373,25 +401,31 @@ public:
/// InstallExceptionTableRegister - The JIT will use the given function
/// to register the exception tables it generates.
- static void InstallExceptionTableRegister(void (*F)(void*)) {
+ void InstallExceptionTableRegister(EERegisterFn F) {
ExceptionTableRegister = F;
}
+ void InstallExceptionTableDeregister(EERegisterFn F) {
+ ExceptionTableDeregister = F;
+ }
- /// RegisterTable - Registers the given pointer as an exception table. It uses
- /// the ExceptionTableRegister function.
- static void RegisterTable(void* res) {
- if (ExceptionTableRegister)
+ /// RegisterTable - Registers the given pointer as an exception table. It
+ /// uses the ExceptionTableRegister function.
+ void RegisterTable(void* res) {
+ if (ExceptionTableRegister) {
ExceptionTableRegister(res);
+ AllExceptionTables.push_back(res);
+ }
}
+ /// DeregisterAllTables - Deregisters all previously registered pointers to an
+ /// exception tables. It uses the ExceptionTableoDeregister function.
+ void DeregisterAllTables();
+
protected:
explicit ExecutionEngine(Module *M);
void emitGlobals();
- // EmitGlobalVariable - This method emits the specified global variable to the
- // address specified in GlobalAddresses, or allocates new memory if it's not
- // already in the map.
void EmitGlobalVariable(const GlobalVariable *GV);
GenericValue getConstantValue(const Constant *C);
@@ -412,8 +446,7 @@ namespace EngineKind {
/// stack-allocating a builder, chaining the various set* methods, and
/// terminating it with a .create() call.
class EngineBuilder {
-
- private:
+private:
Module *M;
EngineKind::Kind WhichEngine;
std::string *ErrorStr;
@@ -424,9 +457,9 @@ class EngineBuilder {
std::string MArch;
std::string MCPU;
SmallVector<std::string, 4> MAttrs;
+ bool UseMCJIT;
/// InitEngine - Does the common initialization of default options.
- ///
void InitEngine() {
WhichEngine = EngineKind::Either;
ErrorStr = NULL;
@@ -434,9 +467,10 @@ class EngineBuilder {
JMM = NULL;
AllocateGVsWithCode = false;
CMModel = CodeModel::Default;
+ UseMCJIT = false;
}
- public:
+public:
/// EngineBuilder - Constructor for EngineBuilder. If create() is called and
/// is successful, the created engine takes ownership of the module.
EngineBuilder(Module *m) : M(m) {
@@ -504,6 +538,12 @@ class EngineBuilder {
return *this;
}
+ /// setUseMCJIT - Set whether the MC-JIT implementation should be used
+ /// (experimental).
+ void setUseMCJIT(bool Value) {
+ UseMCJIT = Value;
+ }
+
/// setMAttrs - Set cpu-specific attributes.
template<typename StringSequence>
EngineBuilder &setMAttrs(const StringSequence &mattrs) {
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/GenericValue.h b/contrib/llvm/include/llvm/ExecutionEngine/GenericValue.h
index 1301320..a2fed98 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/GenericValue.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/GenericValue.h
@@ -16,7 +16,7 @@
#define GENERIC_VALUE_H
#include "llvm/ADT/APInt.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
index dcc66b2..abc063b 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
@@ -15,7 +15,7 @@
#ifndef LLVM_EXECUTION_ENGINE_JIT_EVENTLISTENER_H
#define LLVM_EXECUTION_ENGINE_JIT_EVENTLISTENER_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/DebugLoc.h"
#include <vector>
@@ -24,35 +24,38 @@ namespace llvm {
class Function;
class MachineFunction;
-/// Empty for now, but this object will contain all details about the
-/// generated machine code that a Listener might care about.
+/// JITEvent_EmittedFunctionDetails - Helper struct for containing information
+/// about a generated machine code function.
struct JITEvent_EmittedFunctionDetails {
- const MachineFunction *MF;
-
struct LineStart {
- // The address at which the current line changes.
+ /// The address at which the current line changes.
uintptr_t Address;
- // The new location information. These can be translated to
- // DebugLocTuples using MF->getDebugLocTuple().
+
+ /// The new location information. These can be translated to DebugLocTuples
+ /// using MF->getDebugLocTuple().
DebugLoc Loc;
};
- // This holds line boundary information sorted by address.
+
+ /// The machine function the struct contains information for.
+ const MachineFunction *MF;
+
+ /// The list of line boundary information, sorted by address.
std::vector<LineStart> LineStarts;
};
-/// JITEventListener - This interface is used by the JIT to notify clients about
-/// significant events during compilation. For example, we could have
-/// implementations for profilers and debuggers that need to know where
-/// functions have been emitted.
+/// JITEventListener - Abstract interface for use by the JIT to notify clients
+/// about significant events during compilation. For example, to notify
+/// profilers and debuggers that need to know where functions have been emitted.
///
-/// Each method defaults to doing nothing, so you only need to override the ones
-/// you care about.
+/// The default implementation of each method does nothing.
class JITEventListener {
public:
+ typedef JITEvent_EmittedFunctionDetails EmittedFunctionDetails;
+
+public:
JITEventListener() {}
- virtual ~JITEventListener(); // Defined in JIT.cpp.
+ virtual ~JITEventListener();
- typedef JITEvent_EmittedFunctionDetails EmittedFunctionDetails;
/// NotifyFunctionEmitted - Called after a function has been successfully
/// emitted to memory. The function still has its MachineFunction attached,
/// if you should happen to need that.
@@ -60,13 +63,14 @@ public:
void *Code, size_t Size,
const EmittedFunctionDetails &Details) {}
- /// NotifyFreeingMachineCode - This is called inside of
- /// freeMachineCodeForFunction(), after the global mapping is removed, but
- /// before the machine code is returned to the allocator. OldPtr is the
- /// address of the machine code and will be the same as the Code parameter to
- /// a previous NotifyFunctionEmitted call. The Function passed to
- /// NotifyFunctionEmitted may have been destroyed by the time of the matching
- /// NotifyFreeingMachineCode call.
+ /// NotifyFreeingMachineCode - Called from freeMachineCodeForFunction(), after
+ /// the global mapping is removed, but before the machine code is returned to
+ /// the allocator.
+ ///
+ /// OldPtr is the address of the machine code and will be the same as the Code
+ /// parameter to a previous NotifyFunctionEmitted call. The Function passed
+ /// to NotifyFunctionEmitted may have been destroyed by the time of the
+ /// matching NotifyFreeingMachineCode call.
virtual void NotifyFreeingMachineCode(void *OldPtr) {}
};
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h b/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
index e015930..3841418 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
@@ -6,15 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the JITMemoryManagerInterface
-//
-//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H
#define LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <string>
namespace llvm {
@@ -29,8 +25,8 @@ namespace llvm {
class JITMemoryManager {
protected:
bool HasGOT;
-public:
+public:
JITMemoryManager() : HasGOT(false) {}
virtual ~JITMemoryManager();
@@ -48,7 +44,7 @@ public:
/// setPoisonMemory - Setting this flag to true makes the memory manager
/// garbage values over freed memory. This is useful for testing and
- /// debugging, and is be turned on by default in debug mode.
+ /// debugging, and may be turned on by default in debug mode.
virtual void setPoisonMemory(bool poison) = 0;
//===--------------------------------------------------------------------===//
@@ -61,7 +57,6 @@ public:
virtual void AllocateGOT() = 0;
/// isManagingGOT - Return true if the AllocateGOT method is called.
- ///
bool isManagingGOT() const {
return HasGOT;
}
@@ -111,7 +106,6 @@ public:
virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) = 0;
/// allocateGlobal - Allocate memory for a global.
- ///
virtual uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) = 0;
/// deallocateFunctionBody - Free the specified function body. The argument
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h b/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h
new file mode 100644
index 0000000..f956a50
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h
@@ -0,0 +1,38 @@
+//===-- MCJIT.h - MC-Based Just-In-Time Execution Engine --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces the MCJIT to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTION_ENGINE_MCJIT_H
+#define LLVM_EXECUTION_ENGINE_MCJIT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include <cstdlib>
+
+extern "C" void LLVMLinkInMCJIT();
+
+namespace {
+ struct ForceMCJITLinking {
+ ForceMCJITLinking() {
+ // We must reference the passes in such a way that compilers will not
+ // delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough
+ // to know that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ LLVMLinkInMCJIT();
+ }
+ } ForceMCJITLinking;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Function.h b/contrib/llvm/include/llvm/Function.h
index 2b19fa5..9a0825a 100644
--- a/contrib/llvm/include/llvm/Function.h
+++ b/contrib/llvm/include/llvm/Function.h
@@ -152,7 +152,7 @@ public:
/// The particular intrinsic functions which correspond to this value are
/// defined in llvm/Intrinsics.h.
///
- unsigned getIntrinsicID() const ATTRIBUTE_READONLY;
+ unsigned getIntrinsicID() const LLVM_ATTRIBUTE_READONLY;
bool isIntrinsic() const { return getIntrinsicID() != 0; }
/// getCallingConv()/setCallingConv(CC) - These method get and set the
diff --git a/contrib/llvm/include/llvm/GlobalAlias.h b/contrib/llvm/include/llvm/GlobalAlias.h
index 9867c51..f4af5b1 100644
--- a/contrib/llvm/include/llvm/GlobalAlias.h
+++ b/contrib/llvm/include/llvm/GlobalAlias.h
@@ -89,7 +89,8 @@ public:
};
template <>
-struct OperandTraits<GlobalAlias> : public FixedNumOperandTraits<1> {
+struct OperandTraits<GlobalAlias> :
+ public FixedNumOperandTraits<GlobalAlias, 1> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalAlias, Value)
diff --git a/contrib/llvm/include/llvm/GlobalValue.h b/contrib/llvm/include/llvm/GlobalValue.h
index 62e84f8..b184b8e 100644
--- a/contrib/llvm/include/llvm/GlobalValue.h
+++ b/contrib/llvm/include/llvm/GlobalValue.h
@@ -60,7 +60,8 @@ protected:
GlobalValue(const Type *ty, ValueTy vty, Use *Ops, unsigned NumOps,
LinkageTypes linkage, const Twine &Name)
: Constant(ty, vty, Ops, NumOps), Parent(0),
- Linkage(linkage), Visibility(DefaultVisibility), Alignment(0) {
+ Linkage(linkage), Visibility(DefaultVisibility), Alignment(0),
+ UnnamedAddr(0) {
setName(Name);
}
@@ -70,6 +71,7 @@ protected:
LinkageTypes Linkage : 5; // The linkage of this global
unsigned Visibility : 2; // The visibility style of this global
unsigned Alignment : 16; // Alignment of this symbol, must be power of two
+ unsigned UnnamedAddr : 1; // This value's address is not significant
std::string Section; // Section to emit this into, empty mean default
public:
~GlobalValue() {
@@ -81,6 +83,9 @@ public:
}
void setAlignment(unsigned Align);
+ bool hasUnnamedAddr() const { return UnnamedAddr; }
+ void setUnnamedAddr(bool Val) { UnnamedAddr = Val; }
+
VisibilityTypes getVisibility() const { return VisibilityTypes(Visibility); }
bool hasDefaultVisibility() const { return Visibility == DefaultVisibility; }
bool hasHiddenVisibility() const { return Visibility == HiddenVisibility; }
@@ -173,7 +178,9 @@ public:
}
/// isWeakForLinker - Whether the definition of this global may be replaced at
- /// link time.
+ /// link time. NB: Using this method outside of the code generators is almost
+ /// always a mistake: when working at the IR level use mayBeOverridden instead
+ /// as it knows about ODR semantics.
static bool isWeakForLinker(LinkageTypes Linkage) {
return Linkage == AvailableExternallyLinkage ||
Linkage == WeakAnyLinkage ||
@@ -275,12 +282,6 @@ public:
inline Module *getParent() { return Parent; }
inline const Module *getParent() const { return Parent; }
- /// removeDeadConstantUsers - If there are any dead constant users dangling
- /// off of this global value, remove them. This method is useful for clients
- /// that want to check to see if a global is unused, but don't want to deal
- /// with potentially dead constants hanging off of the globals.
- void removeDeadConstantUsers() const;
-
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const GlobalValue *) { return true; }
static inline bool classof(const Value *V) {
diff --git a/contrib/llvm/include/llvm/GlobalVariable.h b/contrib/llvm/include/llvm/GlobalVariable.h
index 633e8b4..1769c66 100644
--- a/contrib/llvm/include/llvm/GlobalVariable.h
+++ b/contrib/llvm/include/llvm/GlobalVariable.h
@@ -68,7 +68,7 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- /// isDeclaration - Is this global variable lacking an initializer? If so,
+ /// isDeclaration - Is this global variable lacking an initializer? If so,
/// the global variable is defined in some other translation unit, and is thus
/// only a declaration here.
virtual bool isDeclaration() const { return getNumOperands() == 0; }
@@ -80,7 +80,21 @@ public:
inline bool hasInitializer() const { return !isDeclaration(); }
/// hasDefinitiveInitializer - Whether the global variable has an initializer,
- /// and this is the initializer that will be used in the final executable.
+ /// and any other instances of the global (this can happen due to weak
+ /// linkage) are guaranteed to have the same initializer.
+ ///
+ /// Note that if you want to transform a global, you must use
+ /// hasUniqueInitializer() instead, because of the *_odr linkage type.
+ ///
+ /// Example:
+ ///
+ /// @a = global SomeType* null - Initializer is both definitive and unique.
+ ///
+ /// @b = global weak SomeType* null - Initializer is neither definitive nor
+ /// unique.
+ ///
+ /// @c = global weak_odr SomeType* null - Initializer is definitive, but not
+ /// unique.
inline bool hasDefinitiveInitializer() const {
return hasInitializer() &&
// The initializer of a global variable with weak linkage may change at
@@ -88,6 +102,19 @@ public:
!mayBeOverridden();
}
+ /// hasUniqueInitializer - Whether the global variable has an initializer, and
+ /// any changes made to the initializer will turn up in the final executable.
+ inline bool hasUniqueInitializer() const {
+ return hasInitializer() &&
+ // It's not safe to modify initializers of global variables with weak
+ // linkage, because the linker might choose to discard the initializer and
+ // use the initializer from another instance of the global variable
+ // instead. It is wrong to modify the initializer of a global variable
+ // with *_odr linkage because then different instances of the global may
+ // have different initializers, breaking the One Definition Rule.
+ !isWeakForLinker();
+ }
+
/// getInitializer - Return the initializer for this global variable. It is
/// illegal to call this method if the global is external, because we cannot
/// tell what the value is initialized to!
@@ -142,7 +169,8 @@ public:
};
template <>
-struct OperandTraits<GlobalVariable> : public OptionalOperandTraits<> {
+struct OperandTraits<GlobalVariable> :
+ public OptionalOperandTraits<GlobalVariable> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value)
diff --git a/contrib/llvm/include/llvm/InitializePasses.h b/contrib/llvm/include/llvm/InitializePasses.h
new file mode 100644
index 0000000..02dbfbd
--- /dev/null
+++ b/contrib/llvm/include/llvm/InitializePasses.h
@@ -0,0 +1,235 @@
+//===- llvm/InitializePasses.h -------- Initialize All Passes ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations for the pass initialization routines
+// for the entire LLVM project.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_INITIALIZEPASSES_H
+#define LLVM_INITIALIZEPASSES_H
+
+namespace llvm {
+
+class PassRegistry;
+
+/// initializeCore - Initialize all passes linked into the
+/// TransformUtils library.
+void initializeCore(PassRegistry&);
+
+/// initializeTransformUtils - Initialize all passes linked into the
+/// TransformUtils library.
+void initializeTransformUtils(PassRegistry&);
+
+/// initializeScalarOpts - Initialize all passes linked into the
+/// ScalarOpts library.
+void initializeScalarOpts(PassRegistry&);
+
+/// initializeInstCombine - Initialize all passes linked into the
+/// ScalarOpts library.
+void initializeInstCombine(PassRegistry&);
+
+/// initializeIPO - Initialize all passes linked into the IPO library.
+void initializeIPO(PassRegistry&);
+
+/// initializeInstrumentation - Initialize all passes linked into the
+/// Instrumentation library.
+void initializeInstrumentation(PassRegistry&);
+
+/// initializeAnalysis - Initialize all passes linked into the Analysis library.
+void initializeAnalysis(PassRegistry&);
+
+/// initializeIPA - Initialize all passes linked into the IPA library.
+void initializeIPA(PassRegistry&);
+
+/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
+void initializeCodeGen(PassRegistry&);
+
+/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
+void initializeTarget(PassRegistry&);
+
+void initializeAAEvalPass(PassRegistry&);
+void initializeADCEPass(PassRegistry&);
+void initializeAliasAnalysisAnalysisGroup(PassRegistry&);
+void initializeAliasAnalysisCounterPass(PassRegistry&);
+void initializeAliasDebuggerPass(PassRegistry&);
+void initializeAliasSetPrinterPass(PassRegistry&);
+void initializeAlwaysInlinerPass(PassRegistry&);
+void initializeArgPromotionPass(PassRegistry&);
+void initializeBasicAliasAnalysisPass(PassRegistry&);
+void initializeBasicCallGraphPass(PassRegistry&);
+void initializeBlockExtractorPassPass(PassRegistry&);
+void initializeBlockPlacementPass(PassRegistry&);
+void initializeBreakCriticalEdgesPass(PassRegistry&);
+void initializeCFGOnlyPrinterPass(PassRegistry&);
+void initializeCFGOnlyViewerPass(PassRegistry&);
+void initializeCFGPrinterPass(PassRegistry&);
+void initializeCFGSimplifyPassPass(PassRegistry&);
+void initializeCFGViewerPass(PassRegistry&);
+void initializeCalculateSpillWeightsPass(PassRegistry&);
+void initializeCallGraphAnalysisGroup(PassRegistry&);
+void initializeCodeGenPreparePass(PassRegistry&);
+void initializeConstantMergePass(PassRegistry&);
+void initializeConstantPropagationPass(PassRegistry&);
+void initializeCorrelatedValuePropagationPass(PassRegistry&);
+void initializeDAEPass(PassRegistry&);
+void initializeDAHPass(PassRegistry&);
+void initializeDCEPass(PassRegistry&);
+void initializeDSEPass(PassRegistry&);
+void initializeDTEPass(PassRegistry&);
+void initializeDeadInstEliminationPass(PassRegistry&);
+void initializeDeadMachineInstructionElimPass(PassRegistry&);
+void initializeDomOnlyPrinterPass(PassRegistry&);
+void initializeDomOnlyViewerPass(PassRegistry&);
+void initializeDomPrinterPass(PassRegistry&);
+void initializeDomViewerPass(PassRegistry&);
+void initializeDominanceFrontierPass(PassRegistry&);
+void initializeDominatorTreePass(PassRegistry&);
+void initializeEdgeBundlesPass(PassRegistry&);
+void initializeEdgeProfilerPass(PassRegistry&);
+void initializePathProfilerPass(PassRegistry&);
+void initializeEarlyCSEPass(PassRegistry&);
+void initializeExpandISelPseudosPass(PassRegistry&);
+void initializeFindUsedTypesPass(PassRegistry&);
+void initializeFunctionAttrsPass(PassRegistry&);
+void initializeGCModuleInfoPass(PassRegistry&);
+void initializeGEPSplitterPass(PassRegistry&);
+void initializeGVNPass(PassRegistry&);
+void initializeGlobalDCEPass(PassRegistry&);
+void initializeGlobalOptPass(PassRegistry&);
+void initializeGlobalsModRefPass(PassRegistry&);
+void initializeIPCPPass(PassRegistry&);
+void initializeIPSCCPPass(PassRegistry&);
+void initializeIVUsersPass(PassRegistry&);
+void initializeIfConverterPass(PassRegistry&);
+void initializeIndVarSimplifyPass(PassRegistry&);
+void initializeInstCombinerPass(PassRegistry&);
+void initializeInstCountPass(PassRegistry&);
+void initializeInstNamerPass(PassRegistry&);
+void initializeInternalizePassPass(PassRegistry&);
+void initializeIntervalPartitionPass(PassRegistry&);
+void initializeJumpThreadingPass(PassRegistry&);
+void initializeLCSSAPass(PassRegistry&);
+void initializeLICMPass(PassRegistry&);
+void initializeLazyValueInfoPass(PassRegistry&);
+void initializeLibCallAliasAnalysisPass(PassRegistry&);
+void initializeLintPass(PassRegistry&);
+void initializeLiveDebugVariablesPass(PassRegistry&);
+void initializeLiveIntervalsPass(PassRegistry&);
+void initializeLiveStacksPass(PassRegistry&);
+void initializeLiveValuesPass(PassRegistry&);
+void initializeLiveVariablesPass(PassRegistry&);
+void initializeLoaderPassPass(PassRegistry&);
+void initializePathProfileLoaderPassPass(PassRegistry&);
+void initializeLoopDeletionPass(PassRegistry&);
+void initializeLoopDependenceAnalysisPass(PassRegistry&);
+void initializeLoopExtractorPass(PassRegistry&);
+void initializeLoopInfoPass(PassRegistry&);
+void initializeLoopInstSimplifyPass(PassRegistry&);
+void initializeLoopRotatePass(PassRegistry&);
+void initializeLoopSimplifyPass(PassRegistry&);
+void initializeLoopSplitterPass(PassRegistry&);
+void initializeLoopStrengthReducePass(PassRegistry&);
+void initializeLoopUnrollPass(PassRegistry&);
+void initializeLoopUnswitchPass(PassRegistry&);
+void initializeLoopIdiomRecognizePass(PassRegistry&);
+void initializeLowerAtomicPass(PassRegistry&);
+void initializeLowerIntrinsicsPass(PassRegistry&);
+void initializeLowerInvokePass(PassRegistry&);
+void initializeLowerSetJmpPass(PassRegistry&);
+void initializeLowerSwitchPass(PassRegistry&);
+void initializeMachineCSEPass(PassRegistry&);
+void initializeMachineDominatorTreePass(PassRegistry&);
+void initializeMachineLICMPass(PassRegistry&);
+void initializeMachineLoopInfoPass(PassRegistry&);
+void initializeMachineLoopRangesPass(PassRegistry&);
+void initializeMachineModuleInfoPass(PassRegistry&);
+void initializeMachineSinkingPass(PassRegistry&);
+void initializeMachineVerifierPassPass(PassRegistry&);
+void initializeMemCpyOptPass(PassRegistry&);
+void initializeMemDepPrinterPass(PassRegistry&);
+void initializeMemoryDependenceAnalysisPass(PassRegistry&);
+void initializeMergeFunctionsPass(PassRegistry&);
+void initializeModuleDebugInfoPrinterPass(PassRegistry&);
+void initializeNoAAPass(PassRegistry&);
+void initializeNoProfileInfoPass(PassRegistry&);
+void initializeNoPathProfileInfoPass(PassRegistry&);
+void initializeOptimalEdgeProfilerPass(PassRegistry&);
+void initializeOptimizePHIsPass(PassRegistry&);
+void initializePEIPass(PassRegistry&);
+void initializePHIEliminationPass(PassRegistry&);
+void initializePartialInlinerPass(PassRegistry&);
+void initializePeepholeOptimizerPass(PassRegistry&);
+void initializePostDomOnlyPrinterPass(PassRegistry&);
+void initializePostDomOnlyViewerPass(PassRegistry&);
+void initializePostDomPrinterPass(PassRegistry&);
+void initializePostDomViewerPass(PassRegistry&);
+void initializePostDominanceFrontierPass(PassRegistry&);
+void initializePostDominatorTreePass(PassRegistry&);
+void initializePreAllocSplittingPass(PassRegistry&);
+void initializePreVerifierPass(PassRegistry&);
+void initializePrintDbgInfoPass(PassRegistry&);
+void initializePrintFunctionPassPass(PassRegistry&);
+void initializePrintModulePassPass(PassRegistry&);
+void initializeProcessImplicitDefsPass(PassRegistry&);
+void initializeProfileEstimatorPassPass(PassRegistry&);
+void initializeProfileInfoAnalysisGroup(PassRegistry&);
+void initializePathProfileInfoAnalysisGroup(PassRegistry&);
+void initializePathProfileVerifierPass(PassRegistry&);
+void initializeProfileVerifierPassPass(PassRegistry&);
+void initializePromotePassPass(PassRegistry&);
+void initializePruneEHPass(PassRegistry&);
+void initializeRALinScanPass(PassRegistry&);
+void initializeReassociatePass(PassRegistry&);
+void initializeRegToMemPass(PassRegistry&);
+void initializeRegionInfoPass(PassRegistry&);
+void initializeRegionOnlyPrinterPass(PassRegistry&);
+void initializeRegionOnlyViewerPass(PassRegistry&);
+void initializeRegionPrinterPass(PassRegistry&);
+void initializeRegionViewerPass(PassRegistry&);
+void initializeRegisterCoalescerAnalysisGroup(PassRegistry&);
+void initializeRenderMachineFunctionPass(PassRegistry&);
+void initializeSCCPPass(PassRegistry&);
+void initializeSRETPromotionPass(PassRegistry&);
+void initializeSROA_DTPass(PassRegistry&);
+void initializeSROA_SSAUpPass(PassRegistry&);
+void initializeScalarEvolutionAliasAnalysisPass(PassRegistry&);
+void initializeScalarEvolutionPass(PassRegistry&);
+void initializeSimpleInlinerPass(PassRegistry&);
+void initializeSimpleRegisterCoalescingPass(PassRegistry&);
+void initializeSimplifyHalfPowrLibCallsPass(PassRegistry&);
+void initializeSimplifyLibCallsPass(PassRegistry&);
+void initializeSingleLoopExtractorPass(PassRegistry&);
+void initializeSinkingPass(PassRegistry&);
+void initializeSlotIndexesPass(PassRegistry&);
+void initializeSpillPlacementPass(PassRegistry&);
+void initializeStackProtectorPass(PassRegistry&);
+void initializeStackSlotColoringPass(PassRegistry&);
+void initializeStripDeadDebugInfoPass(PassRegistry&);
+void initializeStripDeadPrototypesPassPass(PassRegistry&);
+void initializeStripDebugDeclarePass(PassRegistry&);
+void initializeStripNonDebugSymbolsPass(PassRegistry&);
+void initializeStripSymbolsPass(PassRegistry&);
+void initializeStrongPHIEliminationPass(PassRegistry&);
+void initializeTailCallElimPass(PassRegistry&);
+void initializeTailDupPass(PassRegistry&);
+void initializeTargetDataPass(PassRegistry&);
+void initializeTargetLibraryInfoPass(PassRegistry&);
+void initializeTwoAddressInstructionPassPass(PassRegistry&);
+void initializeTypeBasedAliasAnalysisPass(PassRegistry&);
+void initializeUnifyFunctionExitNodesPass(PassRegistry&);
+void initializeUnreachableBlockElimPass(PassRegistry&);
+void initializeUnreachableMachineBlockElimPass(PassRegistry&);
+void initializeVerifierPass(PassRegistry&);
+void initializeVirtRegMapPass(PassRegistry&);
+void initializeInstSimplifierPass(PassRegistry&);
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/InlineAsm.h b/contrib/llvm/include/llvm/InlineAsm.h
index 105b1bc..ed8f0f7 100644
--- a/contrib/llvm/include/llvm/InlineAsm.h
+++ b/contrib/llvm/include/llvm/InlineAsm.h
@@ -87,6 +87,25 @@ public:
isClobber // '~x'
};
+ typedef std::vector<std::string> ConstraintCodeVector;
+
+ struct SubConstraintInfo {
+ /// MatchingInput - If this is not -1, this is an output constraint where an
+ /// input constraint is required to match it (e.g. "0"). The value is the
+ /// constraint number that matches this one (for example, if this is
+ /// constraint #0 and constraint #4 has the value "0", this will be 4).
+ signed char MatchingInput;
+ /// Code - The constraint code, either the register name (in braces) or the
+ /// constraint letter/number.
+ ConstraintCodeVector Codes;
+ /// Default constructor.
+ SubConstraintInfo() : MatchingInput(-1) {}
+ };
+
+ typedef std::vector<SubConstraintInfo> SubConstraintInfoVector;
+ struct ConstraintInfo;
+ typedef std::vector<ConstraintInfo> ConstraintInfoVector;
+
struct ConstraintInfo {
/// Type - The basic type of the constraint: input/output/clobber
///
@@ -118,25 +137,42 @@ public:
/// Code - The constraint code, either the register name (in braces) or the
/// constraint letter/number.
- std::vector<std::string> Codes;
+ ConstraintCodeVector Codes;
+
+ /// isMultipleAlternative - '|': has multiple-alternative constraints.
+ bool isMultipleAlternative;
+
+ /// multipleAlternatives - If there are multiple alternative constraints,
+ /// this array will contain them. Otherwise it will be empty.
+ SubConstraintInfoVector multipleAlternatives;
+
+ /// The currently selected alternative constraint index.
+ unsigned currentAlternativeIndex;
+
+ ///Default constructor.
+ ConstraintInfo();
+
+ /// Copy constructor.
+ ConstraintInfo(const ConstraintInfo &other);
/// Parse - Analyze the specified string (e.g. "=*&{eax}") and fill in the
/// fields in this structure. If the constraint string is not understood,
/// return true, otherwise return false.
- bool Parse(StringRef Str,
- std::vector<InlineAsm::ConstraintInfo> &ConstraintsSoFar);
+ bool Parse(StringRef Str, ConstraintInfoVector &ConstraintsSoFar);
+
+ /// selectAlternative - Point this constraint to the alternative constraint
+ /// indicated by the index.
+ void selectAlternative(unsigned index);
};
/// ParseConstraints - Split up the constraint string into the specific
/// constraints and their prefixes. If this returns an empty vector, and if
/// the constraint string itself isn't empty, there was an error parsing.
- static std::vector<ConstraintInfo>
- ParseConstraints(StringRef ConstraintString);
+ static ConstraintInfoVector ParseConstraints(StringRef ConstraintString);
/// ParseConstraints - Parse the constraints of this inlineasm object,
/// returning them the same way that ParseConstraints(str) does.
- std::vector<ConstraintInfo>
- ParseConstraints() const {
+ ConstraintInfoVector ParseConstraints() const {
return ParseConstraints(Constraints);
}
@@ -154,8 +190,15 @@ public:
Op_InputChain = 0,
Op_AsmString = 1,
Op_MDNode = 2,
- Op_IsAlignStack = 3,
+ Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack
Op_FirstOperand = 4,
+
+ MIOp_AsmString = 0,
+ MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack
+ MIOp_FirstOperand = 2,
+
+ Extra_HasSideEffects = 1,
+ Extra_IsAlignStack = 2,
Kind_RegUse = 1,
Kind_RegDef = 2,
diff --git a/contrib/llvm/include/llvm/InstrTypes.h b/contrib/llvm/include/llvm/InstrTypes.h
index 6715416..a166956 100644
--- a/contrib/llvm/include/llvm/InstrTypes.h
+++ b/contrib/llvm/include/llvm/InstrTypes.h
@@ -128,7 +128,8 @@ public:
};
template <>
-struct OperandTraits<UnaryInstruction> : public FixedNumOperandTraits<1> {
+struct OperandTraits<UnaryInstruction> :
+ public FixedNumOperandTraits<UnaryInstruction, 1> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)
@@ -193,154 +194,93 @@ public:
}
#include "llvm/Instruction.def"
-
- /// CreateNSWAdd - Create an Add operator with the NSW flag set.
- ///
- static BinaryOperator *CreateNSWAdd(Value *V1, Value *V2,
- const Twine &Name = "") {
- BinaryOperator *BO = CreateAdd(V1, V2, Name);
+ static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name = "") {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name);
BO->setHasNoSignedWrap(true);
return BO;
}
- static BinaryOperator *CreateNSWAdd(Value *V1, Value *V2,
- const Twine &Name, BasicBlock *BB) {
- BinaryOperator *BO = CreateAdd(V1, V2, Name, BB);
+ static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, BasicBlock *BB) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
BO->setHasNoSignedWrap(true);
return BO;
}
- static BinaryOperator *CreateNSWAdd(Value *V1, Value *V2,
- const Twine &Name, Instruction *I) {
- BinaryOperator *BO = CreateAdd(V1, V2, Name, I);
+ static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, Instruction *I) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
BO->setHasNoSignedWrap(true);
return BO;
}
-
- /// CreateNUWAdd - Create an Add operator with the NUW flag set.
- ///
- static BinaryOperator *CreateNUWAdd(Value *V1, Value *V2,
- const Twine &Name = "") {
- BinaryOperator *BO = CreateAdd(V1, V2, Name);
- BO->setHasNoUnsignedWrap(true);
- return BO;
- }
- static BinaryOperator *CreateNUWAdd(Value *V1, Value *V2,
- const Twine &Name, BasicBlock *BB) {
- BinaryOperator *BO = CreateAdd(V1, V2, Name, BB);
- BO->setHasNoUnsignedWrap(true);
- return BO;
- }
- static BinaryOperator *CreateNUWAdd(Value *V1, Value *V2,
- const Twine &Name, Instruction *I) {
- BinaryOperator *BO = CreateAdd(V1, V2, Name, I);
- BO->setHasNoUnsignedWrap(true);
- return BO;
- }
-
- /// CreateNSWSub - Create an Sub operator with the NSW flag set.
- ///
- static BinaryOperator *CreateNSWSub(Value *V1, Value *V2,
- const Twine &Name = "") {
- BinaryOperator *BO = CreateSub(V1, V2, Name);
- BO->setHasNoSignedWrap(true);
- return BO;
- }
- static BinaryOperator *CreateNSWSub(Value *V1, Value *V2,
- const Twine &Name, BasicBlock *BB) {
- BinaryOperator *BO = CreateSub(V1, V2, Name, BB);
- BO->setHasNoSignedWrap(true);
- return BO;
- }
- static BinaryOperator *CreateNSWSub(Value *V1, Value *V2,
- const Twine &Name, Instruction *I) {
- BinaryOperator *BO = CreateSub(V1, V2, Name, I);
- BO->setHasNoSignedWrap(true);
- return BO;
- }
-
- /// CreateNUWSub - Create an Sub operator with the NUW flag set.
- ///
- static BinaryOperator *CreateNUWSub(Value *V1, Value *V2,
- const Twine &Name = "") {
- BinaryOperator *BO = CreateSub(V1, V2, Name);
- BO->setHasNoUnsignedWrap(true);
- return BO;
- }
- static BinaryOperator *CreateNUWSub(Value *V1, Value *V2,
- const Twine &Name, BasicBlock *BB) {
- BinaryOperator *BO = CreateSub(V1, V2, Name, BB);
- BO->setHasNoUnsignedWrap(true);
- return BO;
- }
- static BinaryOperator *CreateNUWSub(Value *V1, Value *V2,
- const Twine &Name, Instruction *I) {
- BinaryOperator *BO = CreateSub(V1, V2, Name, I);
- BO->setHasNoUnsignedWrap(true);
- return BO;
- }
-
- /// CreateNSWMul - Create a Mul operator with the NSW flag set.
- ///
- static BinaryOperator *CreateNSWMul(Value *V1, Value *V2,
- const Twine &Name = "") {
- BinaryOperator *BO = CreateMul(V1, V2, Name);
- BO->setHasNoSignedWrap(true);
- return BO;
- }
- static BinaryOperator *CreateNSWMul(Value *V1, Value *V2,
- const Twine &Name, BasicBlock *BB) {
- BinaryOperator *BO = CreateMul(V1, V2, Name, BB);
- BO->setHasNoSignedWrap(true);
- return BO;
- }
- static BinaryOperator *CreateNSWMul(Value *V1, Value *V2,
- const Twine &Name, Instruction *I) {
- BinaryOperator *BO = CreateMul(V1, V2, Name, I);
- BO->setHasNoSignedWrap(true);
- return BO;
- }
-
- /// CreateNUWMul - Create a Mul operator with the NUW flag set.
- ///
- static BinaryOperator *CreateNUWMul(Value *V1, Value *V2,
- const Twine &Name = "") {
- BinaryOperator *BO = CreateMul(V1, V2, Name);
+
+ static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name = "") {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name);
BO->setHasNoUnsignedWrap(true);
return BO;
}
- static BinaryOperator *CreateNUWMul(Value *V1, Value *V2,
- const Twine &Name, BasicBlock *BB) {
- BinaryOperator *BO = CreateMul(V1, V2, Name, BB);
+ static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, BasicBlock *BB) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
BO->setHasNoUnsignedWrap(true);
return BO;
}
- static BinaryOperator *CreateNUWMul(Value *V1, Value *V2,
- const Twine &Name, Instruction *I) {
- BinaryOperator *BO = CreateMul(V1, V2, Name, I);
+ static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, Instruction *I) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
BO->setHasNoUnsignedWrap(true);
return BO;
}
-
- /// CreateExactSDiv - Create an SDiv operator with the exact flag set.
- ///
- static BinaryOperator *CreateExactSDiv(Value *V1, Value *V2,
- const Twine &Name = "") {
- BinaryOperator *BO = CreateSDiv(V1, V2, Name);
+
+ static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name = "") {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name);
BO->setIsExact(true);
return BO;
}
- static BinaryOperator *CreateExactSDiv(Value *V1, Value *V2,
- const Twine &Name, BasicBlock *BB) {
- BinaryOperator *BO = CreateSDiv(V1, V2, Name, BB);
+ static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, BasicBlock *BB) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
BO->setIsExact(true);
return BO;
}
- static BinaryOperator *CreateExactSDiv(Value *V1, Value *V2,
- const Twine &Name, Instruction *I) {
- BinaryOperator *BO = CreateSDiv(V1, V2, Name, I);
+ static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, Instruction *I) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
BO->setIsExact(true);
return BO;
}
-
+
+#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
+ static BinaryOperator *Create ## NUWNSWEXACT ## OPC \
+ (Value *V1, Value *V2, const Twine &Name = "") { \
+ return Create ## NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
+ } \
+ static BinaryOperator *Create ## NUWNSWEXACT ## OPC \
+ (Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
+ return Create ## NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
+ } \
+ static BinaryOperator *Create ## NUWNSWEXACT ## OPC \
+ (Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
+ return Create ## NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
+ }
+
+ DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
+ DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
+ DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
+ DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
+ DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
+ DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
+ DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
+ DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
+
+ DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
+ DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
+ DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
+ DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
+
+#undef DEFINE_HELPERS
+
/// Helper functions to construct and inspect unary operations (NEG and NOT)
/// via binary operators SUB and XOR:
///
@@ -432,7 +372,8 @@ public:
};
template <>
-struct OperandTraits<BinaryOperator> : public FixedNumOperandTraits<2> {
+struct OperandTraits<BinaryOperator> :
+ public FixedNumOperandTraits<BinaryOperator, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)
@@ -824,11 +765,11 @@ public:
/// This is just a convenience that dispatches to the subclasses.
/// @brief Determine if this CmpInst is commutative.
- bool isCommutative();
+ bool isCommutative() const;
/// This is just a convenience that dispatches to the subclasses.
/// @brief Determine if this is an equals/not equals predicate.
- bool isEquality();
+ bool isEquality() const;
/// @returns true if the comparison is signed, false otherwise.
/// @brief Determine if this instruction is using a signed comparison.
@@ -903,7 +844,7 @@ private:
// FIXME: these are redundant if CmpInst < BinaryOperator
template <>
-struct OperandTraits<CmpInst> : public FixedNumOperandTraits<2> {
+struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)
diff --git a/contrib/llvm/include/llvm/Instruction.h b/contrib/llvm/include/llvm/Instruction.h
index 88f5ce1..89bb9fd 100644
--- a/contrib/llvm/include/llvm/Instruction.h
+++ b/contrib/llvm/include/llvm/Instruction.h
@@ -200,11 +200,10 @@ public:
///
/// Associative operators satisfy: x op (y op z) === (x op y) op z
///
- /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative, when
- /// not applied to floating point types.
+ /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
///
- bool isAssociative() const { return isAssociative(getOpcode(), getType()); }
- static bool isAssociative(unsigned op, const Type *Ty);
+ bool isAssociative() const { return isAssociative(getOpcode()); }
+ static bool isAssociative(unsigned op);
/// isCommutative - Return true if the instruction is commutative:
///
diff --git a/contrib/llvm/include/llvm/Instructions.h b/contrib/llvm/include/llvm/Instructions.h
index bd1e889..17ff763 100644
--- a/contrib/llvm/include/llvm/Instructions.h
+++ b/contrib/llvm/include/llvm/Instructions.h
@@ -29,7 +29,6 @@ class ConstantInt;
class ConstantRange;
class APInt;
class LLVMContext;
-class DominatorTree;
//===----------------------------------------------------------------------===//
// AllocaInst Class
@@ -43,7 +42,7 @@ protected:
public:
explicit AllocaInst(const Type *Ty, Value *ArraySize = 0,
const Twine &Name = "", Instruction *InsertBefore = 0);
- AllocaInst(const Type *Ty, Value *ArraySize,
+ AllocaInst(const Type *Ty, Value *ArraySize,
const Twine &Name, BasicBlock *InsertAtEnd);
AllocaInst(const Type *Ty, const Twine &Name, Instruction *InsertBefore = 0);
@@ -166,8 +165,8 @@ public:
unsigned getPointerAddressSpace() const {
return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
}
-
-
+
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const LoadInst *) { return true; }
static inline bool classof(const Instruction *I) {
@@ -237,7 +236,7 @@ public:
Value *getValueOperand() { return getOperand(0); }
const Value *getValueOperand() const { return getOperand(0); }
-
+
Value *getPointerOperand() { return getOperand(1); }
const Value *getPointerOperand() const { return getOperand(1); }
static unsigned getPointerOperandIndex() { return 1U; }
@@ -245,7 +244,7 @@ public:
unsigned getPointerAddressSpace() const {
return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const StoreInst *) { return true; }
static inline bool classof(const Instruction *I) {
@@ -263,7 +262,7 @@ private:
};
template <>
-struct OperandTraits<StoreInst> : public FixedNumOperandTraits<2> {
+struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
@@ -289,8 +288,10 @@ class GetElementPtrInst : public Instruction {
const Twine &NameStr);
void init(Value *Ptr, Value *Idx, const Twine &NameStr);
- template<typename InputIterator>
- void init(Value *Ptr, InputIterator IdxBegin, InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ void init(Value *Ptr,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
// This argument ensures that we have an iterator we can
// do arithmetic on in constant time
@@ -313,10 +314,10 @@ class GetElementPtrInst : public Instruction {
/// Null is returned if the indices are invalid for the specified
/// pointer type.
///
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static const Type *getIndexedType(const Type *Ptr,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
// This argument ensures that we
// have an iterator we can do
// arithmetic on in constant time
@@ -331,18 +332,19 @@ class GetElementPtrInst : public Instruction {
}
/// Constructors - Create a getelementptr instruction with a base pointer an
- /// list of indices. The first ctor can optionally insert before an existing
+ /// list of indices. The first ctor can optionally insert before an existing
/// instruction, the second appends the new instruction to the specified
/// BasicBlock.
- template<typename InputIterator>
- inline GetElementPtrInst(Value *Ptr, InputIterator IdxBegin,
- InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ inline GetElementPtrInst(Value *Ptr, RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
unsigned Values,
const Twine &NameStr,
Instruction *InsertBefore);
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
inline GetElementPtrInst(Value *Ptr,
- InputIterator IdxBegin, InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
@@ -355,23 +357,24 @@ class GetElementPtrInst : public Instruction {
protected:
virtual GetElementPtrInst *clone_impl() const;
public:
- template<typename InputIterator>
- static GetElementPtrInst *Create(Value *Ptr, InputIterator IdxBegin,
- InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ static GetElementPtrInst *Create(Value *Ptr, RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
- typename std::iterator_traits<InputIterator>::difference_type Values =
- 1 + std::distance(IdxBegin, IdxEnd);
+ typename std::iterator_traits<RandomAccessIterator>::difference_type
+ Values = 1 + std::distance(IdxBegin, IdxEnd);
return new(Values)
GetElementPtrInst(Ptr, IdxBegin, IdxEnd, Values, NameStr, InsertBefore);
}
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static GetElementPtrInst *Create(Value *Ptr,
- InputIterator IdxBegin, InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
- typename std::iterator_traits<InputIterator>::difference_type Values =
- 1 + std::distance(IdxBegin, IdxEnd);
+ typename std::iterator_traits<RandomAccessIterator>::difference_type
+ Values = 1 + std::distance(IdxBegin, IdxEnd);
return new(Values)
GetElementPtrInst(Ptr, IdxBegin, IdxEnd, Values, NameStr, InsertAtEnd);
}
@@ -391,9 +394,10 @@ public:
/// Create an "inbounds" getelementptr. See the documentation for the
/// "inbounds" flag in LangRef.html for details.
- template<typename InputIterator>
- static GetElementPtrInst *CreateInBounds(Value *Ptr, InputIterator IdxBegin,
- InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ static GetElementPtrInst *CreateInBounds(Value *Ptr,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
GetElementPtrInst *GEP = Create(Ptr, IdxBegin, IdxEnd,
@@ -401,10 +405,10 @@ public:
GEP->setIsInBounds(true);
return GEP;
}
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static GetElementPtrInst *CreateInBounds(Value *Ptr,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
GetElementPtrInst *GEP = Create(Ptr, IdxBegin, IdxEnd,
@@ -441,12 +445,12 @@ public:
/// Null is returned if the indices are invalid for the specified
/// pointer type.
///
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static const Type *getIndexedType(const Type *Ptr,
- InputIterator IdxBegin,
- InputIterator IdxEnd) {
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd) {
return getIndexedType(Ptr, IdxBegin, IdxEnd,
- typename std::iterator_traits<InputIterator>::
+ typename std::iterator_traits<RandomAccessIterator>::
iterator_category());
}
@@ -454,6 +458,9 @@ public:
Value* const *Idx, unsigned NumIdx);
static const Type *getIndexedType(const Type *Ptr,
+ Constant* const *Idx, unsigned NumIdx);
+
+ static const Type *getIndexedType(const Type *Ptr,
uint64_t const *Idx, unsigned NumIdx);
static const Type *getIndexedType(const Type *Ptr, Value *Idx);
@@ -472,7 +479,7 @@ public:
static unsigned getPointerOperandIndex() {
return 0U; // get index for modifying correct operand
}
-
+
unsigned getPointerAddressSpace() const {
return cast<PointerType>(getType())->getAddressSpace();
}
@@ -520,13 +527,14 @@ public:
};
template <>
-struct OperandTraits<GetElementPtrInst> : public VariadicOperandTraits<1> {
+struct OperandTraits<GetElementPtrInst> :
+ public VariadicOperandTraits<GetElementPtrInst, 1> {
};
-template<typename InputIterator>
+template<typename RandomAccessIterator>
GetElementPtrInst::GetElementPtrInst(Value *Ptr,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
unsigned Values,
const Twine &NameStr,
Instruction *InsertBefore)
@@ -539,12 +547,13 @@ GetElementPtrInst::GetElementPtrInst(Value *Ptr,
OperandTraits<GetElementPtrInst>::op_end(this) - Values,
Values, InsertBefore) {
init(Ptr, IdxBegin, IdxEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
-template<typename InputIterator>
+template<typename RandomAccessIterator>
GetElementPtrInst::GetElementPtrInst(Value *Ptr,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
unsigned Values,
const Twine &NameStr,
BasicBlock *InsertAtEnd)
@@ -557,7 +566,8 @@ GetElementPtrInst::GetElementPtrInst(Value *Ptr,
OperandTraits<GetElementPtrInst>::op_end(this) - Values,
Values, InsertAtEnd) {
init(Ptr, IdxBegin, IdxEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
@@ -575,7 +585,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
class ICmpInst: public CmpInst {
protected:
/// @brief Clone an indentical ICmpInst
- virtual ICmpInst *clone_impl() const;
+ virtual ICmpInst *clone_impl() const;
public:
/// @brief Constructor with insert-before-instruction semantics.
ICmpInst(
@@ -746,7 +756,7 @@ public:
assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction");
}
-
+
/// @brief Constructor with insert-at-end semantics.
FCmpInst(
BasicBlock &InsertAtEnd, ///< Block to insert into.
@@ -838,8 +848,10 @@ class CallInst : public Instruction {
void init(Value *Func, Value *Actual);
void init(Value *Func);
- template<typename InputIterator>
- void init(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
+ template<typename RandomAccessIterator>
+ void init(Value *Func,
+ RandomAccessIterator ArgBegin,
+ RandomAccessIterator ArgEnd,
const Twine &NameStr,
// This argument ensures that we have an iterator we can
// do arithmetic on in constant time
@@ -851,24 +863,26 @@ class CallInst : public Instruction {
setName(NameStr);
}
- /// Construct a CallInst given a range of arguments. InputIterator
+ /// Construct a CallInst given a range of arguments. RandomAccessIterator
/// must be a random-access iterator pointing to contiguous storage
- /// (e.g. a std::vector<>::iterator). Checks are made for
+ /// (e.g. a std::vector<>::iterator). Checks are made for
/// random-accessness but not for contiguous storage as that would
/// incur runtime overhead.
/// @brief Construct a CallInst from a range of arguments
- template<typename InputIterator>
- CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
+ template<typename RandomAccessIterator>
+ CallInst(Value *Func,
+ RandomAccessIterator ArgBegin, RandomAccessIterator ArgEnd,
const Twine &NameStr, Instruction *InsertBefore);
- /// Construct a CallInst given a range of arguments. InputIterator
+ /// Construct a CallInst given a range of arguments. RandomAccessIterator
/// must be a random-access iterator pointing to contiguous storage
/// (e.g. a std::vector<>::iterator). Checks are made for
/// random-accessness but not for contiguous storage as that would
/// incur runtime overhead.
/// @brief Construct a CallInst from a range of arguments
- template<typename InputIterator>
- inline CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
+ template<typename RandomAccessIterator>
+ inline CallInst(Value *Func,
+ RandomAccessIterator ArgBegin, RandomAccessIterator ArgEnd,
const Twine &NameStr, BasicBlock *InsertAtEnd);
CallInst(Value *F, Value *Actual, const Twine &NameStr,
@@ -881,17 +895,19 @@ class CallInst : public Instruction {
protected:
virtual CallInst *clone_impl() const;
public:
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static CallInst *Create(Value *Func,
- InputIterator ArgBegin, InputIterator ArgEnd,
+ RandomAccessIterator ArgBegin,
+ RandomAccessIterator ArgEnd,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
return new(unsigned(ArgEnd - ArgBegin + 1))
CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertBefore);
}
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static CallInst *Create(Value *Func,
- InputIterator ArgBegin, InputIterator ArgEnd,
+ RandomAccessIterator ArgBegin,
+ RandomAccessIterator ArgEnd,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
return new(unsigned(ArgEnd - ArgBegin + 1))
CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertAtEnd);
@@ -984,7 +1000,7 @@ public:
unsigned getParamAlignment(unsigned i) const {
return AttributeList.getParamAlignment(i);
}
-
+
/// @brief Return true if the call should not be inlined.
bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
void setIsNoInline(bool Value = true) {
@@ -1052,7 +1068,7 @@ public:
void setCalledFunction(Value* Fn) {
Op<-1>() = Fn;
}
-
+
/// isInlineAsm - Check if this call is an inline asm statement.
bool isInlineAsm() const {
return isa<InlineAsm>(Op<-1>());
@@ -1075,11 +1091,12 @@ private:
};
template <>
-struct OperandTraits<CallInst> : public VariadicOperandTraits<1> {
+struct OperandTraits<CallInst> : public VariadicOperandTraits<CallInst, 1> {
};
-template<typename InputIterator>
-CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
+template<typename RandomAccessIterator>
+CallInst::CallInst(Value *Func,
+ RandomAccessIterator ArgBegin, RandomAccessIterator ArgEnd,
const Twine &NameStr, BasicBlock *InsertAtEnd)
: Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
->getElementType())->getReturnType(),
@@ -1087,11 +1104,13 @@ CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1),
unsigned(ArgEnd - ArgBegin + 1), InsertAtEnd) {
init(Func, ArgBegin, ArgEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
-template<typename InputIterator>
-CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
+template<typename RandomAccessIterator>
+CallInst::CallInst(Value *Func,
+ RandomAccessIterator ArgBegin, RandomAccessIterator ArgEnd,
const Twine &NameStr, Instruction *InsertBefore)
: Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
->getElementType())->getReturnType(),
@@ -1099,7 +1118,8 @@ CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1),
unsigned(ArgEnd - ArgBegin + 1), InsertBefore) {
init(Func, ArgBegin, ArgEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
@@ -1156,7 +1176,7 @@ public:
Value *getCondition() { return Op<0>(); }
Value *getTrueValue() { return Op<1>(); }
Value *getFalseValue() { return Op<2>(); }
-
+
/// areInvalidOperands - Return a string if the specified operands are invalid
/// for a select operation, otherwise return null.
static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
@@ -1179,7 +1199,7 @@ public:
};
template <>
-struct OperandTraits<SelectInst> : public FixedNumOperandTraits<3> {
+struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
@@ -1207,6 +1227,10 @@ public:
setName(NameStr);
}
+ Value *getPointerOperand() { return getOperand(0); }
+ const Value *getPointerOperand() const { return getOperand(0); }
+ static unsigned getPointerOperandIndex() { return 0U; }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const VAArgInst *) { return true; }
static inline bool classof(const Instruction *I) {
@@ -1252,12 +1276,12 @@ public:
Value *getIndexOperand() { return Op<1>(); }
const Value *getVectorOperand() const { return Op<0>(); }
const Value *getIndexOperand() const { return Op<1>(); }
-
+
const VectorType *getVectorOperandType() const {
return reinterpret_cast<const VectorType*>(getVectorOperand()->getType());
}
-
-
+
+
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@@ -1272,7 +1296,8 @@ public:
};
template <>
-struct OperandTraits<ExtractElementInst> : public FixedNumOperandTraits<2> {
+struct OperandTraits<ExtractElementInst> :
+ public FixedNumOperandTraits<ExtractElementInst, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
@@ -1330,7 +1355,8 @@ public:
};
template <>
-struct OperandTraits<InsertElementInst> : public FixedNumOperandTraits<3> {
+struct OperandTraits<InsertElementInst> :
+ public FixedNumOperandTraits<InsertElementInst, 3> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
@@ -1387,7 +1413,8 @@ public:
};
template <>
-struct OperandTraits<ShuffleVectorInst> : public FixedNumOperandTraits<3> {
+struct OperandTraits<ShuffleVectorInst> :
+ public FixedNumOperandTraits<ShuffleVectorInst, 3> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
@@ -1407,8 +1434,9 @@ class ExtractValueInst : public UnaryInstruction {
const Twine &NameStr);
void init(unsigned Idx, const Twine &NameStr);
- template<typename InputIterator>
- void init(InputIterator IdxBegin, InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ void init(RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
// This argument ensures that we have an iterator we can
// do arithmetic on in constant time
@@ -1429,16 +1457,15 @@ class ExtractValueInst : public UnaryInstruction {
/// getIndexedType - Returns the type of the element that would be extracted
/// with an extractvalue instruction with the specified parameters.
///
- /// Null is returned if the indices are invalid for the specified
- /// pointer type.
+ /// Null is returned if the indices are invalid for the specified type.
///
static const Type *getIndexedType(const Type *Agg,
const unsigned *Idx, unsigned NumIdx);
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static const Type *getIndexedType(const Type *Ptr,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
// This argument ensures that we
// have an iterator we can do
// arithmetic on in constant time
@@ -1456,14 +1483,16 @@ class ExtractValueInst : public UnaryInstruction {
/// value and a list of indices. The first ctor can optionally insert before
/// an existing instruction, the second appends the new instruction to the
/// specified BasicBlock.
- template<typename InputIterator>
- inline ExtractValueInst(Value *Agg, InputIterator IdxBegin,
- InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ inline ExtractValueInst(Value *Agg,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
Instruction *InsertBefore);
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
inline ExtractValueInst(Value *Agg,
- InputIterator IdxBegin, InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr, BasicBlock *InsertAtEnd);
// allocate space for exactly one operand
@@ -1474,17 +1503,19 @@ protected:
virtual ExtractValueInst *clone_impl() const;
public:
- template<typename InputIterator>
- static ExtractValueInst *Create(Value *Agg, InputIterator IdxBegin,
- InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ static ExtractValueInst *Create(Value *Agg,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
return new
ExtractValueInst(Agg, IdxBegin, IdxEnd, NameStr, InsertBefore);
}
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static ExtractValueInst *Create(Value *Agg,
- InputIterator IdxBegin, InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new ExtractValueInst(Agg, IdxBegin, IdxEnd, NameStr, InsertAtEnd);
@@ -1509,15 +1540,14 @@ public:
/// getIndexedType - Returns the type of the element that would be extracted
/// with an extractvalue instruction with the specified parameters.
///
- /// Null is returned if the indices are invalid for the specified
- /// pointer type.
+ /// Null is returned if the indices are invalid for the specified type.
///
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static const Type *getIndexedType(const Type *Ptr,
- InputIterator IdxBegin,
- InputIterator IdxEnd) {
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd) {
return getIndexedType(Ptr, IdxBegin, IdxEnd,
- typename std::iterator_traits<InputIterator>::
+ typename std::iterator_traits<RandomAccessIterator>::
iterator_category());
}
static const Type *getIndexedType(const Type *Ptr, unsigned Idx);
@@ -1554,29 +1584,31 @@ public:
}
};
-template<typename InputIterator>
+template<typename RandomAccessIterator>
ExtractValueInst::ExtractValueInst(Value *Agg,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
Instruction *InsertBefore)
: UnaryInstruction(checkType(getIndexedType(Agg->getType(),
IdxBegin, IdxEnd)),
ExtractValue, Agg, InsertBefore) {
init(IdxBegin, IdxEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
-template<typename InputIterator>
+template<typename RandomAccessIterator>
ExtractValueInst::ExtractValueInst(Value *Agg,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
BasicBlock *InsertAtEnd)
: UnaryInstruction(checkType(getIndexedType(Agg->getType(),
IdxBegin, IdxEnd)),
ExtractValue, Agg, InsertAtEnd) {
init(IdxBegin, IdxEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
@@ -1596,9 +1628,9 @@ class InsertValueInst : public Instruction {
const Twine &NameStr);
void init(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr);
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
void init(Value *Agg, Value *Val,
- InputIterator IdxBegin, InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin, RandomAccessIterator IdxEnd,
const Twine &NameStr,
// This argument ensures that we have an iterator we can
// do arithmetic on in constant time
@@ -1620,14 +1652,16 @@ class InsertValueInst : public Instruction {
/// value, a value to insert, and a list of indices. The first ctor can
/// optionally insert before an existing instruction, the second appends
/// the new instruction to the specified BasicBlock.
- template<typename InputIterator>
- inline InsertValueInst(Value *Agg, Value *Val, InputIterator IdxBegin,
- InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ inline InsertValueInst(Value *Agg, Value *Val,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
Instruction *InsertBefore);
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
inline InsertValueInst(Value *Agg, Value *Val,
- InputIterator IdxBegin, InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr, BasicBlock *InsertAtEnd);
/// Constructors - These two constructors are convenience methods because one
@@ -1645,17 +1679,19 @@ public:
return User::operator new(s, 2);
}
- template<typename InputIterator>
- static InsertValueInst *Create(Value *Agg, Value *Val, InputIterator IdxBegin,
- InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ static InsertValueInst *Create(Value *Agg, Value *Val,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
return new InsertValueInst(Agg, Val, IdxBegin, IdxEnd,
NameStr, InsertBefore);
}
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static InsertValueInst *Create(Value *Agg, Value *Val,
- InputIterator IdxBegin, InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new InsertValueInst(Agg, Val, IdxBegin, IdxEnd,
@@ -1722,34 +1758,37 @@ public:
};
template <>
-struct OperandTraits<InsertValueInst> : public FixedNumOperandTraits<2> {
+struct OperandTraits<InsertValueInst> :
+ public FixedNumOperandTraits<InsertValueInst, 2> {
};
-template<typename InputIterator>
+template<typename RandomAccessIterator>
InsertValueInst::InsertValueInst(Value *Agg,
Value *Val,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
Instruction *InsertBefore)
: Instruction(Agg->getType(), InsertValue,
OperandTraits<InsertValueInst>::op_begin(this),
2, InsertBefore) {
init(Agg, Val, IdxBegin, IdxEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
-template<typename InputIterator>
+template<typename RandomAccessIterator>
InsertValueInst::InsertValueInst(Value *Agg,
Value *Val,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &NameStr,
BasicBlock *InsertAtEnd)
: Instruction(Agg->getType(), InsertValue,
OperandTraits<InsertValueInst>::op_begin(this),
2, InsertAtEnd) {
init(Agg, Val, IdxBegin, IdxEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
@@ -1835,7 +1874,7 @@ public:
BasicBlock *getIncomingBlock(unsigned i) const {
return cast<BasicBlock>(getOperand(i*2+1));
}
-
+
/// getIncomingBlock - Return incoming basic block corresponding
/// to an operand of the PHI.
///
@@ -1843,7 +1882,7 @@ public:
assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
return cast<BasicBlock>((&U + 1)->get());
}
-
+
/// getIncomingBlock - Return incoming basic block corresponding
/// to value use iterator.
///
@@ -1851,8 +1890,8 @@ public:
BasicBlock *getIncomingBlock(value_use_iterator<U> I) const {
return getIncomingBlock(I.getUse());
}
-
-
+
+
void setIncomingBlock(unsigned i, BasicBlock *BB) {
setOperand(i*2+1, (Value*)BB);
}
@@ -1912,13 +1951,7 @@ public:
/// hasConstantValue - If the specified PHI node always merges together the
/// same value, return the value, otherwise return null.
- ///
- /// If the PHI has undef operands, but all the rest of the operands are
- /// some unique value, return that value if it can be proved that the
- /// value dominates the PHI. If DT is null, use a conservative check,
- /// otherwise use DT to test for dominance.
- ///
- Value *hasConstantValue(DominatorTree *DT = 0) const;
+ Value *hasConstantValue() const;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const PHINode *) { return true; }
@@ -1985,11 +2018,9 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- /// Convenience accessor
- Value *getReturnValue(unsigned n = 0) const {
- return n < getNumOperands()
- ? getOperand(n)
- : 0;
+ /// Convenience accessor. Returns null if there is no return value.
+ Value *getReturnValue() const {
+ return getNumOperands() != 0 ? getOperand(0) : 0;
}
unsigned getNumSuccessors() const { return 0; }
@@ -2009,7 +2040,7 @@ public:
};
template <>
-struct OperandTraits<ReturnInst> : public VariadicOperandTraits<> {
+struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
@@ -2045,22 +2076,20 @@ protected:
virtual BranchInst *clone_impl() const;
public:
static BranchInst *Create(BasicBlock *IfTrue, Instruction *InsertBefore = 0) {
- return new(1, true) BranchInst(IfTrue, InsertBefore);
+ return new(1) BranchInst(IfTrue, InsertBefore);
}
static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
Value *Cond, Instruction *InsertBefore = 0) {
return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
}
static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
- return new(1, true) BranchInst(IfTrue, InsertAtEnd);
+ return new(1) BranchInst(IfTrue, InsertAtEnd);
}
static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
Value *Cond, BasicBlock *InsertAtEnd) {
return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
}
- ~BranchInst();
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@@ -2077,19 +2106,6 @@ public:
Op<-3>() = V;
}
- // setUnconditionalDest - Change the current branch to an unconditional branch
- // targeting the specified block.
- // FIXME: Eliminate this ugly method.
- void setUnconditionalDest(BasicBlock *Dest) {
- Op<-1>() = (Value*)Dest;
- if (isConditional()) { // Convert this to an uncond branch.
- Op<-2>() = 0;
- Op<-3>() = 0;
- NumOperands = 1;
- OperandList = op_begin();
- }
- }
-
unsigned getNumSuccessors() const { return 1+isConditional(); }
BasicBlock *getSuccessor(unsigned i) const {
@@ -2117,7 +2133,8 @@ private:
};
template <>
-struct OperandTraits<BranchInst> : public VariadicOperandTraits<1> {};
+struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
+};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
@@ -2136,7 +2153,7 @@ class SwitchInst : public TerminatorInst {
// Operand[2n ] = Value to match
// Operand[2n+1] = BasicBlock to go to on match
SwitchInst(const SwitchInst &SI);
- void init(Value *Value, BasicBlock *Default, unsigned NumCases);
+ void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
void resizeOperands(unsigned No);
// allocate space for exactly zero operands
void *operator new(size_t s) {
@@ -2230,7 +2247,8 @@ public:
/// removeCase - This method removes the specified successor from the switch
/// instruction. Note that this cannot be used to remove the default
- /// destination (successor #0).
+ /// destination (successor #0). Also note that this operation may reorder the
+ /// remaining cases at index idx and above.
///
void removeCase(unsigned idx);
@@ -2298,7 +2316,7 @@ class IndirectBrInst : public TerminatorInst {
/// here to make memory allocation more efficient. This constructor can also
/// autoinsert before another instruction.
IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
-
+
/// IndirectBrInst ctor - Create a new indirectbr instruction, specifying an
/// Address to jump to. The number of expected destinations can be specified
/// here to make memory allocation more efficient. This constructor also
@@ -2316,32 +2334,32 @@ public:
return new IndirectBrInst(Address, NumDests, InsertAtEnd);
}
~IndirectBrInst();
-
+
/// Provide fast operand accessors.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
-
+
// Accessor Methods for IndirectBrInst instruction.
Value *getAddress() { return getOperand(0); }
const Value *getAddress() const { return getOperand(0); }
void setAddress(Value *V) { setOperand(0, V); }
-
-
+
+
/// getNumDestinations - return the number of possible destinations in this
/// indirectbr instruction.
unsigned getNumDestinations() const { return getNumOperands()-1; }
-
+
/// getDestination - Return the specified destination.
BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
-
+
/// addDestination - Add a destination.
///
void addDestination(BasicBlock *Dest);
-
+
/// removeDestination - This method removes the specified successor from the
/// indirectbr instruction.
void removeDestination(unsigned i);
-
+
unsigned getNumSuccessors() const { return getNumOperands()-1; }
BasicBlock *getSuccessor(unsigned i) const {
return cast<BasicBlock>(getOperand(i+1));
@@ -2349,7 +2367,7 @@ public:
void setSuccessor(unsigned i, BasicBlock *NewSucc) {
setOperand(i+1, (Value*)NewSucc);
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const IndirectBrInst *) { return true; }
static inline bool classof(const Instruction *I) {
@@ -2369,8 +2387,8 @@ struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)
-
-
+
+
//===----------------------------------------------------------------------===//
// InvokeInst Class
//===----------------------------------------------------------------------===//
@@ -2384,9 +2402,9 @@ class InvokeInst : public TerminatorInst {
void init(Value *Fn, BasicBlock *IfNormal, BasicBlock *IfException,
Value* const *Args, unsigned NumArgs);
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
- InputIterator ArgBegin, InputIterator ArgEnd,
+ RandomAccessIterator ArgBegin, RandomAccessIterator ArgEnd,
const Twine &NameStr,
// This argument ensures that we have an iterator we can
// do arithmetic on in constant time
@@ -2399,47 +2417,49 @@ class InvokeInst : public TerminatorInst {
}
/// Construct an InvokeInst given a range of arguments.
- /// InputIterator must be a random-access iterator pointing to
+ /// RandomAccessIterator must be a random-access iterator pointing to
/// contiguous storage (e.g. a std::vector<>::iterator). Checks are
/// made for random-accessness but not for contiguous storage as
/// that would incur runtime overhead.
///
/// @brief Construct an InvokeInst from a range of arguments
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
- InputIterator ArgBegin, InputIterator ArgEnd,
+ RandomAccessIterator ArgBegin, RandomAccessIterator ArgEnd,
unsigned Values,
const Twine &NameStr, Instruction *InsertBefore);
/// Construct an InvokeInst given a range of arguments.
- /// InputIterator must be a random-access iterator pointing to
+ /// RandomAccessIterator must be a random-access iterator pointing to
/// contiguous storage (e.g. a std::vector<>::iterator). Checks are
/// made for random-accessness but not for contiguous storage as
/// that would incur runtime overhead.
///
/// @brief Construct an InvokeInst from a range of arguments
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
- InputIterator ArgBegin, InputIterator ArgEnd,
+ RandomAccessIterator ArgBegin, RandomAccessIterator ArgEnd,
unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
protected:
virtual InvokeInst *clone_impl() const;
public:
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static InvokeInst *Create(Value *Func,
BasicBlock *IfNormal, BasicBlock *IfException,
- InputIterator ArgBegin, InputIterator ArgEnd,
+ RandomAccessIterator ArgBegin,
+ RandomAccessIterator ArgEnd,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
unsigned Values(ArgEnd - ArgBegin + 3);
return new(Values) InvokeInst(Func, IfNormal, IfException, ArgBegin, ArgEnd,
Values, NameStr, InsertBefore);
}
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
static InvokeInst *Create(Value *Func,
BasicBlock *IfNormal, BasicBlock *IfException,
- InputIterator ArgBegin, InputIterator ArgEnd,
+ RandomAccessIterator ArgBegin,
+ RandomAccessIterator ArgEnd,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
unsigned Values(ArgEnd - ArgBegin + 3);
@@ -2606,13 +2626,14 @@ private:
};
template <>
-struct OperandTraits<InvokeInst> : public VariadicOperandTraits<3> {
+struct OperandTraits<InvokeInst> : public VariadicOperandTraits<InvokeInst, 3> {
};
-template<typename InputIterator>
+template<typename RandomAccessIterator>
InvokeInst::InvokeInst(Value *Func,
BasicBlock *IfNormal, BasicBlock *IfException,
- InputIterator ArgBegin, InputIterator ArgEnd,
+ RandomAccessIterator ArgBegin,
+ RandomAccessIterator ArgEnd,
unsigned Values,
const Twine &NameStr, Instruction *InsertBefore)
: TerminatorInst(cast<FunctionType>(cast<PointerType>(Func->getType())
@@ -2621,12 +2642,14 @@ InvokeInst::InvokeInst(Value *Func,
OperandTraits<InvokeInst>::op_end(this) - Values,
Values, InsertBefore) {
init(Func, IfNormal, IfException, ArgBegin, ArgEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
-template<typename InputIterator>
+template<typename RandomAccessIterator>
InvokeInst::InvokeInst(Value *Func,
BasicBlock *IfNormal, BasicBlock *IfException,
- InputIterator ArgBegin, InputIterator ArgEnd,
+ RandomAccessIterator ArgBegin,
+ RandomAccessIterator ArgEnd,
unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd)
: TerminatorInst(cast<FunctionType>(cast<PointerType>(Func->getType())
@@ -2635,7 +2658,8 @@ InvokeInst::InvokeInst(Value *Func,
OperandTraits<InvokeInst>::op_end(this) - Values,
Values, InsertAtEnd) {
init(Func, IfNormal, IfException, ArgBegin, ArgEnd, NameStr,
- typename std::iterator_traits<InputIterator>::iterator_category());
+ typename std::iterator_traits<RandomAccessIterator>
+ ::iterator_category());
}
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value)
diff --git a/contrib/llvm/include/llvm/IntrinsicInst.h b/contrib/llvm/include/llvm/IntrinsicInst.h
index a17fa9c..74c30fb 100644
--- a/contrib/llvm/include/llvm/IntrinsicInst.h
+++ b/contrib/llvm/include/llvm/IntrinsicInst.h
@@ -55,7 +55,7 @@ namespace llvm {
return isa<CallInst>(V) && classof(cast<CallInst>(V));
}
};
-
+
/// DbgInfoIntrinsic - This is the common base class for debug info intrinsics
///
class DbgInfoIntrinsic : public IntrinsicInst {
@@ -139,6 +139,10 @@ namespace llvm {
return !getVolatileCst()->isZero();
}
+ unsigned getAddressSpace() const {
+ return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+ }
+
/// getDest - This is just like getRawDest, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
/// value is guaranteed to be a pointer.
@@ -297,29 +301,6 @@ namespace llvm {
}
};
- /// MemoryUseIntrinsic - This is the common base class for the memory use
- /// marker intrinsics.
- ///
- class MemoryUseIntrinsic : public IntrinsicInst {
- public:
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemoryUseIntrinsic *) { return true; }
- static inline bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start:
- case Intrinsic::invariant_end:
- return true;
- default: return false;
- }
- }
- static inline bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
}
#endif
diff --git a/contrib/llvm/include/llvm/Intrinsics.td b/contrib/llvm/include/llvm/Intrinsics.td
index fb4f750..0c9be78 100644
--- a/contrib/llvm/include/llvm/Intrinsics.td
+++ b/contrib/llvm/include/llvm/Intrinsics.td
@@ -109,6 +109,9 @@ def llvm_empty_ty : LLVMType<OtherVT>; // { }
def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>; // { }*
def llvm_metadata_ty : LLVMType<MetadataVT>; // !{...}
+def llvm_x86mmx_ty : LLVMType<x86mmx>;
+def llvm_ptrx86mmx_ty : LLVMPointerType<llvm_x86mmx_ty>; // <1 x i64>*
+
def llvm_v2i8_ty : LLVMType<v2i8>; // 2 x i8
def llvm_v4i8_ty : LLVMType<v4i8>; // 4 x i8
def llvm_v8i8_ty : LLVMType<v8i8>; // 8 x i8
@@ -256,7 +259,7 @@ def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>;
// Internal interface for object size checking
def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i1_ty],
- [IntrReadArgMem]>,
+ [IntrNoMem]>,
GCCBuiltin<"__builtin_object_size">;
//===-------------------- Bit Manipulation Intrinsics ---------------------===//
@@ -304,6 +307,7 @@ let Properties = [IntrNoMem] in {
def int_eh_sjlj_lsda : Intrinsic<[llvm_ptr_ty]>;
def int_eh_sjlj_callsite: Intrinsic<[], [llvm_i32_ty]>;
}
+def int_eh_sjlj_dispatch_setup : Intrinsic<[], [llvm_ptr_ty]>;
def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>;
@@ -432,7 +436,7 @@ def int_lifetime_end : Intrinsic<[],
[IntrReadWriteArgMem, NoCapture<1>]>;
def int_invariant_start : Intrinsic<[llvm_descriptor_ty],
[llvm_i64_ty, llvm_ptr_ty],
- [IntrReadArgMem, NoCapture<1>]>;
+ [IntrReadWriteArgMem, NoCapture<1>]>;
def int_invariant_end : Intrinsic<[],
[llvm_descriptor_ty, llvm_i64_ty,
llvm_ptr_ty],
diff --git a/contrib/llvm/include/llvm/IntrinsicsARM.td b/contrib/llvm/include/llvm/IntrinsicsARM.td
index 6c04771..546538a 100644
--- a/contrib/llvm/include/llvm/IntrinsicsARM.td
+++ b/contrib/llvm/include/llvm/IntrinsicsARM.td
@@ -286,6 +286,12 @@ def int_arm_neon_vcvtfp2fxu : Neon_CvtFPToFx_Intrinsic;
def int_arm_neon_vcvtfxs2fp : Neon_CvtFxToFP_Intrinsic;
def int_arm_neon_vcvtfxu2fp : Neon_CvtFxToFP_Intrinsic;
+// Vector Conversions Between Half-Precision and Single-Precision.
+def int_arm_neon_vcvtfp2hf
+ : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_arm_neon_vcvthf2fp
+ : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
+
// Narrowing Saturating Vector Moves.
def int_arm_neon_vqmovns : Neon_1Arg_Narrow_Intrinsic;
def int_arm_neon_vqmovnu : Neon_1Arg_Narrow_Intrinsic;
diff --git a/contrib/llvm/include/llvm/IntrinsicsX86.td b/contrib/llvm/include/llvm/IntrinsicsX86.td
index 06ea3ae..4946220 100644
--- a/contrib/llvm/include/llvm/IntrinsicsX86.td
+++ b/contrib/llvm/include/llvm/IntrinsicsX86.td
@@ -130,12 +130,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_i64_ty], [IntrNoMem]>;
def int_x86_sse_cvtps2pi : GCCBuiltin<"__builtin_ia32_cvtps2pi">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_sse_cvttps2pi: GCCBuiltin<"__builtin_ia32_cvttps2pi">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_sse_cvtpi2ps : GCCBuiltin<"__builtin_ia32_cvtpi2ps">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
- llvm_v2i32_ty], [IntrNoMem]>;
+ llvm_x86mmx_ty], [IntrNoMem]>;
}
// SIMD load ops
@@ -445,11 +445,11 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_sse_cvtpd2pi : GCCBuiltin<"__builtin_ia32_cvtpd2pi">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
def int_x86_sse_cvttpd2pi: GCCBuiltin<"__builtin_ia32_cvttpd2pi">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
def int_x86_sse_cvtpi2pd : GCCBuiltin<"__builtin_ia32_cvtpi2pd">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v2f64_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
}
// SIMD load ops
@@ -563,50 +563,50 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Horizontal arithmetic ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_phadd_w : GCCBuiltin<"__builtin_ia32_phaddw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_w_128 : GCCBuiltin<"__builtin_ia32_phaddw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_d : GCCBuiltin<"__builtin_ia32_phaddd">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
- llvm_v2i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_d_128 : GCCBuiltin<"__builtin_ia32_phaddd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_sw : GCCBuiltin<"__builtin_ia32_phaddsw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_sw_128 : GCCBuiltin<"__builtin_ia32_phaddsw128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_w : GCCBuiltin<"__builtin_ia32_phsubw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_w_128 : GCCBuiltin<"__builtin_ia32_phsubw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_d : GCCBuiltin<"__builtin_ia32_phsubd">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
- llvm_v2i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_d_128 : GCCBuiltin<"__builtin_ia32_phsubd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_sw : GCCBuiltin<"__builtin_ia32_phsubsw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_sw_128 : GCCBuiltin<"__builtin_ia32_phsubsw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_pmadd_ub_sw : GCCBuiltin<"__builtin_ia32_pmaddubsw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pmadd_ub_sw_128 : GCCBuiltin<"__builtin_ia32_pmaddubsw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>;
@@ -615,8 +615,8 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Packed multiply high with round and scale
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_pmul_hr_sw : GCCBuiltin<"__builtin_ia32_pmulhrsw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_ssse3_pmul_hr_sw_128 : GCCBuiltin<"__builtin_ia32_pmulhrsw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem, Commutative]>;
@@ -625,35 +625,35 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Shuffle ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pshuf_b_128 : GCCBuiltin<"__builtin_ia32_pshufb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
llvm_v16i8_ty], [IntrNoMem]>;
- def int_x86_ssse3_pshuf_w : GCCBuiltin<"__builtin_ia32_pshufw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty],
+ def int_x86_sse_pshuf_w : GCCBuiltin<"__builtin_ia32_pshufw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i8_ty],
[IntrNoMem]>;
}
// Sign ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_psign_b : GCCBuiltin<"__builtin_ia32_psignb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_b_128 : GCCBuiltin<"__builtin_ia32_psignb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
llvm_v16i8_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_w : GCCBuiltin<"__builtin_ia32_psignw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_w_128 : GCCBuiltin<"__builtin_ia32_psignw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_d : GCCBuiltin<"__builtin_ia32_psignd">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
- llvm_v2i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_d_128 : GCCBuiltin<"__builtin_ia32_psignd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty], [IntrNoMem]>;
@@ -662,17 +662,17 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Absolute value ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_pabs_b : GCCBuiltin<"__builtin_ia32_pabsb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_b_128 : GCCBuiltin<"__builtin_ia32_pabsb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_w : GCCBuiltin<"__builtin_ia32_pabsw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_w_128 : GCCBuiltin<"__builtin_ia32_pabsw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_d : GCCBuiltin<"__builtin_ia32_pabsd">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_d_128 : GCCBuiltin<"__builtin_ia32_pabsd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
}
@@ -1328,281 +1328,257 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Addition
def int_x86_mmx_padd_b : GCCBuiltin<"__builtin_ia32_paddb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_padd_w : GCCBuiltin<"__builtin_ia32_paddw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_padd_d : GCCBuiltin<"__builtin_ia32_paddd">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_padd_q : GCCBuiltin<"__builtin_ia32_paddq">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_padds_b : GCCBuiltin<"__builtin_ia32_paddsb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_padds_w : GCCBuiltin<"__builtin_ia32_paddsw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Subtraction
def int_x86_mmx_psub_b : GCCBuiltin<"__builtin_ia32_psubb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_psub_w : GCCBuiltin<"__builtin_ia32_psubw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_psub_d : GCCBuiltin<"__builtin_ia32_psubd">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_psub_q : GCCBuiltin<"__builtin_ia32_psubq">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
// Multiplication
def int_x86_mmx_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmull_w : GCCBuiltin<"__builtin_ia32_pmullw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
- llvm_v2i32_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Bitwise operations
def int_x86_mmx_pand : GCCBuiltin<"__builtin_ia32_pand">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_pandn : GCCBuiltin<"__builtin_ia32_pandn">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_por : GCCBuiltin<"__builtin_ia32_por">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_pxor : GCCBuiltin<"__builtin_ia32_pxor">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
// Averages
def int_x86_mmx_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Maximum
def int_x86_mmx_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Minimum
def int_x86_mmx_pminu_b : GCCBuiltin<"__builtin_ia32_pminub">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Packed sum of absolute differences
def int_x86_mmx_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem, Commutative]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
}
// Integer shift ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Shift left logical
def int_x86_mmx_psll_w : GCCBuiltin<"__builtin_ia32_psllw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psll_d : GCCBuiltin<"__builtin_ia32_pslld">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
- llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psll_q : GCCBuiltin<"__builtin_ia32_psllq">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty,
- llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psrl_d : GCCBuiltin<"__builtin_ia32_psrld">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
- llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty,
- llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psra_w : GCCBuiltin<"__builtin_ia32_psraw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psra_d : GCCBuiltin<"__builtin_ia32_psrad">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
- llvm_v1i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi">,
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrai_d : GCCBuiltin<"__builtin_ia32_psradi">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>;
}
// Pack ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_packsswb : GCCBuiltin<"__builtin_ia32_packsswb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_packssdw : GCCBuiltin<"__builtin_ia32_packssdw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v2i32_ty,
- llvm_v2i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_packuswb : GCCBuiltin<"__builtin_ia32_packuswb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
}
// Unpacking ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_punpckhbw : GCCBuiltin<"__builtin_ia32_punpckhbw">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_punpckhwd : GCCBuiltin<"__builtin_ia32_punpckhwd">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_punpckhdq : GCCBuiltin<"__builtin_ia32_punpckhdq">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_punpcklbw : GCCBuiltin<"__builtin_ia32_punpcklbw">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_punpcklwd : GCCBuiltin<"__builtin_ia32_punpcklwd">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
def int_x86_mmx_punpckldq : GCCBuiltin<"__builtin_ia32_punpckldq">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty],
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>;
}
// Integer comparison ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
- llvm_v2i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb">,
- Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw">,
- Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
- llvm_v4i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd">,
- Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty,
- llvm_v2i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
}
// Misc.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_maskmovq : GCCBuiltin<"__builtin_ia32_maskmovq">,
- Intrinsic<[], [llvm_v8i8_ty, llvm_v8i8_ty, llvm_ptr_ty], []>;
+ Intrinsic<[], [llvm_x86mmx_ty, llvm_x86mmx_ty, llvm_ptr_ty], []>;
def int_x86_mmx_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb">,
- Intrinsic<[llvm_i32_ty], [llvm_v8i8_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_movnt_dq : GCCBuiltin<"__builtin_ia32_movntq">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v1i64_ty], []>;
+ Intrinsic<[], [llvm_ptrx86mmx_ty, llvm_x86mmx_ty], []>;
-// def int_x86_mmx_palignr_b : GCCBuiltin<"__builtin_ia32_palignr">,
-// Intrinsic<[llvm_v1i64_ty], [llvm_1i64_ty,
-// llvm_v1i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_mmx_palignr_b : GCCBuiltin<"__builtin_ia32_palignr">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_mmx_pextr_w :
- Intrinsic<[llvm_i32_ty], [llvm_v1i64_ty, llvm_i32_ty],
+ def int_x86_mmx_pextr_w : GCCBuiltin<"__builtin_ia32_vec_ext_v4hi">,
+ Intrinsic<[llvm_i32_ty], [llvm_x86mmx_ty, llvm_i32_ty],
[IntrNoMem]>;
- def int_x86_mmx_pinsr_w :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty,
+ def int_x86_mmx_pinsr_w : GCCBuiltin<"__builtin_ia32_vec_set_v4hi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_x86_mmx_cvtsi32_si64 :
- Intrinsic<[llvm_v1i64_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_x86_mmx_cvtsi64_si32 :
- Intrinsic<[llvm_i32_ty], [llvm_v1i64_ty], [IntrNoMem]>;
-
- def int_x86_mmx_vec_init_b : GCCBuiltin<"__builtin_ia32_vec_init_v8qi">,
- Intrinsic<[llvm_v8i8_ty],
- [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty, llvm_i8_ty,
- llvm_i8_ty, llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
- [IntrNoMem]>;
- def int_x86_mmx_vec_init_w : GCCBuiltin<"__builtin_ia32_vec_init_v4hi">,
- Intrinsic<[llvm_v4i16_ty],
- [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
- [IntrNoMem]>;
- def int_x86_mmx_vec_init_d : GCCBuiltin<"__builtin_ia32_vec_init_v2si">,
- Intrinsic<[llvm_v2i32_ty],
- [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-
- def int_x86_mmx_vec_ext_d : GCCBuiltin<"__builtin_ia32_vec_ext_v2si">,
- Intrinsic<[llvm_v2i32_ty],
- [llvm_v2i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
}
diff --git a/contrib/llvm/include/llvm/IntrinsicsXCore.td b/contrib/llvm/include/llvm/IntrinsicsXCore.td
index a86cda2..97bac1d 100644
--- a/contrib/llvm/include/llvm/IntrinsicsXCore.td
+++ b/contrib/llvm/include/llvm/IntrinsicsXCore.td
@@ -11,4 +11,26 @@
let TargetPrefix = "xcore" in { // All intrinsics start with "llvm.xcore.".
def int_xcore_bitrev : Intrinsic<[llvm_i32_ty],[llvm_i32_ty],[IntrNoMem]>;
def int_xcore_getid : Intrinsic<[llvm_i32_ty],[],[IntrNoMem]>;
+
+ // Resource instructions.
+ def int_xcore_getr : Intrinsic<[llvm_anyptr_ty],[llvm_i32_ty]>;
+ def int_xcore_freer : Intrinsic<[],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_in : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],[NoCapture<0>]>;
+ def int_xcore_int : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_inct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_out : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_outt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_outct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_chkct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_setd : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_setc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
}
diff --git a/contrib/llvm/include/llvm/LLVMContext.h b/contrib/llvm/include/llvm/LLVMContext.h
index 7cb6579..3502ff7 100644
--- a/contrib/llvm/include/llvm/LLVMContext.h
+++ b/contrib/llvm/include/llvm/LLVMContext.h
@@ -20,6 +20,8 @@ namespace llvm {
class LLVMContextImpl;
class StringRef;
class Instruction;
+class Module;
+class SMDiagnostic;
template <typename T> class SmallVectorImpl;
/// This is an important class for using LLVM in a threaded context. It
@@ -28,10 +30,6 @@ template <typename T> class SmallVectorImpl;
/// LLVMContext itself provides no locking guarantees, so you should be careful
/// to have one context per thread.
class LLVMContext {
- // DO NOT IMPLEMENT
- LLVMContext(LLVMContext&);
- void operator=(LLVMContext&);
-
public:
LLVMContextImpl *const pImpl;
LLVMContext();
@@ -40,7 +38,8 @@ public:
// Pinned metadata names, which always have the same value. This is a
// compile-time performance optimization, not a correctness optimization.
enum {
- MD_dbg = 0 // "dbg"
+ MD_dbg = 0, // "dbg"
+ MD_tbaa = 1 // "tbaa"
};
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
@@ -51,18 +50,23 @@ public:
/// custom metadata IDs registered in this LLVMContext.
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
+
+ typedef void (*InlineAsmDiagHandlerTy)(const SMDiagnostic&, void *Context,
+ unsigned LocCookie);
+
/// setInlineAsmDiagnosticHandler - This method sets a handler that is invoked
/// when problems with inline asm are detected by the backend. The first
- /// argument is a function pointer (of type SourceMgr::DiagHandlerTy) and the
- /// second is a context pointer that gets passed into the DiagHandler.
+ /// argument is a function pointer and the second is a context pointer that
+ /// gets passed into the DiagHandler.
///
- /// LLVMContext doesn't take ownership or interpreter either of these
+ /// LLVMContext doesn't take ownership or interpret either of these
/// pointers.
- void setInlineAsmDiagnosticHandler(void *DiagHandler, void *DiagContext = 0);
+ void setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler,
+ void *DiagContext = 0);
/// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
/// setInlineAsmDiagnosticHandler.
- void *getInlineAsmDiagnosticHandler() const;
+ InlineAsmDiagHandlerTy getInlineAsmDiagnosticHandler() const;
/// getInlineAsmDiagnosticContext - Return the diagnostic context set by
/// setInlineAsmDiagnosticHandler.
@@ -77,6 +81,21 @@ public:
void emitError(unsigned LocCookie, StringRef ErrorStr);
void emitError(const Instruction *I, StringRef ErrorStr);
void emitError(StringRef ErrorStr);
+
+private:
+ // DO NOT IMPLEMENT
+ LLVMContext(LLVMContext&);
+ void operator=(LLVMContext&);
+
+ /// addModule - Register a module as being instantiated in this context. If
+ /// the context is deleted, the module will be deleted as well.
+ void addModule(Module*);
+
+ /// removeModule - Unregister a module from this context.
+ void removeModule(Module*);
+
+ // Module needs access to the add/removeModule methods.
+ friend class Module;
};
/// getGlobalContext - Returns a global context. This is for LLVM clients that
diff --git a/contrib/llvm/include/llvm/LinkAllPasses.h b/contrib/llvm/include/llvm/LinkAllPasses.h
index 35dab62..69e1bd9 100644
--- a/contrib/llvm/include/llvm/LinkAllPasses.h
+++ b/contrib/llvm/include/llvm/LinkAllPasses.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This header file pulls in all transformation and analysis passes for tools
+// This header file pulls in all transformation and analysis passes for tools
// like opt and bugpoint that need this functionality.
//
//===----------------------------------------------------------------------===//
@@ -20,8 +20,8 @@
#include "llvm/Analysis/FindUsedTypes.h"
#include "llvm/Analysis/IntervalPartition.h"
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/PointerTracking.h"
#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Analysis/RegionPass.h"
#include "llvm/Analysis/RegionPrinter.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/Lint.h"
@@ -70,6 +70,7 @@ namespace {
(void) llvm::createDomViewerPass();
(void) llvm::createEdgeProfilerPass();
(void) llvm::createOptimalEdgeProfilerPass();
+ (void) llvm::createPathProfilerPass();
(void) llvm::createFunctionInliningPass();
(void) llvm::createAlwaysInlinerPass();
(void) llvm::createGlobalDCEPass();
@@ -90,8 +91,8 @@ namespace {
(void) llvm::createLoopStrengthReducePass();
(void) llvm::createLoopUnrollPass();
(void) llvm::createLoopUnswitchPass();
+ (void) llvm::createLoopIdiomPass();
(void) llvm::createLoopRotatePass();
- (void) llvm::createLoopIndexSplitPass();
(void) llvm::createLowerInvokePass();
(void) llvm::createLowerSetJmpPass();
(void) llvm::createLowerSwitchPass();
@@ -99,7 +100,9 @@ namespace {
(void) llvm::createNoProfileInfoPass();
(void) llvm::createProfileEstimatorPass();
(void) llvm::createProfileVerifierPass();
+ (void) llvm::createPathProfileVerifierPass();
(void) llvm::createProfileLoaderPass();
+ (void) llvm::createPathProfileLoaderPass();
(void) llvm::createPromoteMemoryToRegisterPass();
(void) llvm::createDemoteRegisterToMemoryPass();
(void) llvm::createPruneEHPass();
@@ -128,13 +131,13 @@ namespace {
(void) llvm::createUnifyFunctionExitNodesPass();
(void) llvm::createInstCountPass();
(void) llvm::createCodeGenPreparePass();
+ (void) llvm::createEarlyCSEPass();
(void) llvm::createGVNPass();
(void) llvm::createMemCpyOptPass();
(void) llvm::createLoopDeletionPass();
(void) llvm::createPostDomTree();
(void) llvm::createPostDomFrontier();
(void) llvm::createInstructionNamerPass();
- (void) llvm::createPartialSpecializationPass();
(void) llvm::createFunctionAttrsPass();
(void) llvm::createMergeFunctionsPass();
(void) llvm::createPrintModulePass(0);
@@ -147,14 +150,17 @@ namespace {
(void) llvm::createSinkingPass();
(void) llvm::createLowerAtomicPass();
(void) llvm::createCorrelatedValuePropagationPass();
+ (void) llvm::createMemDepPrinter();
+ (void) llvm::createInstructionSimplifierPass();
(void)new llvm::IntervalPartition();
(void)new llvm::FindUsedTypes();
(void)new llvm::ScalarEvolution();
- (void)new llvm::PointerTracking();
((llvm::Function*)0)->viewCFGOnly();
+ llvm::RGPassManager RGM(0);
+ ((llvm::RegionPass*)0)->runOnRegion((llvm::Region*)0, RGM);
llvm::AliasSetTracker X(*(llvm::AliasAnalysis*)0);
- X.add((llvm::Value*)0, 0); // for -print-alias-sets
+ X.add((llvm::Value*)0, 0, 0); // for -print-alias-sets
}
} ForcePassLinking; // Force link by creating a global definition.
}
diff --git a/contrib/llvm/include/llvm/LinkAllVMCore.h b/contrib/llvm/include/llvm/LinkAllVMCore.h
index 6959cb6..83684c0 100644
--- a/contrib/llvm/include/llvm/LinkAllVMCore.h
+++ b/contrib/llvm/include/llvm/LinkAllVMCore.h
@@ -22,15 +22,14 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/InlineAsm.h"
#include "llvm/Analysis/Verifier.h"
-#include "llvm/System/Alarm.h"
-#include "llvm/System/DynamicLibrary.h"
-#include "llvm/System/Memory.h"
-#include "llvm/System/Mutex.h"
-#include "llvm/System/Path.h"
-#include "llvm/System/Process.h"
-#include "llvm/System/Program.h"
-#include "llvm/System/Signals.h"
-#include "llvm/System/TimeValue.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/TimeValue.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/MathExtras.h"
#include <cstdlib>
diff --git a/contrib/llvm/include/llvm/MC/EDInstInfo.h b/contrib/llvm/include/llvm/MC/EDInstInfo.h
index dded255..83d9e78 100644
--- a/contrib/llvm/include/llvm/MC/EDInstInfo.h
+++ b/contrib/llvm/include/llvm/MC/EDInstInfo.h
@@ -9,7 +9,7 @@
#ifndef EDINSTINFO_H
#define EDINSTINFO_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/MC/ELFObjectWriter.h b/contrib/llvm/include/llvm/MC/ELFObjectWriter.h
deleted file mode 100644
index 3b9951f..0000000
--- a/contrib/llvm/include/llvm/MC/ELFObjectWriter.h
+++ /dev/null
@@ -1,46 +0,0 @@
-//===-- llvm/MC/ELFObjectWriter.h - ELF File Writer ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_MC_ELFOBJECTWRITER_H
-#define LLVM_MC_ELFOBJECTWRITER_H
-
-#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cassert>
-
-namespace llvm {
-class MCAsmFixup;
-class MCAssembler;
-class MCFragment;
-class MCValue;
-class raw_ostream;
-
-class ELFObjectWriter : public MCObjectWriter {
- void *Impl;
-
-public:
- ELFObjectWriter(raw_ostream &OS, bool Is64Bit, bool IsLittleEndian = true,
- bool HasRelocationAddend = true);
-
- virtual ~ELFObjectWriter();
-
- virtual void ExecutePostLayoutBinding(MCAssembler &Asm);
-
- virtual void RecordRelocation(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup, MCValue Target,
- uint64_t &FixedValue);
-
- virtual void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfo.h b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
index 43952e0..9cfd004 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
@@ -22,10 +22,12 @@
namespace llvm {
class MCSection;
class MCContext;
-
+
/// MCAsmInfo - This class is intended to be used as a base class for asm
/// properties and features specific to the target.
- namespace ExceptionHandling { enum ExceptionsType { None, Dwarf, SjLj }; }
+ namespace ExceptionHandling {
+ enum ExceptionsType { None, DwarfTable, DwarfCFI, SjLj };
+ }
class MCAsmInfo {
protected:
@@ -36,25 +38,30 @@ namespace llvm {
/// HasSubsectionsViaSymbols - True if this target has the MachO
/// .subsections_via_symbols directive.
bool HasSubsectionsViaSymbols; // Default is false.
-
+
/// HasMachoZeroFillDirective - True if this is a MachO target that supports
/// the macho-specific .zerofill directive for emitting BSS Symbols.
bool HasMachoZeroFillDirective; // Default is false.
-
+
/// HasMachoTBSSDirective - True if this is a MachO target that supports
/// the macho-specific .tbss directive for emitting thread local BSS Symbols
bool HasMachoTBSSDirective; // Default is false.
-
+
/// HasStaticCtorDtorReferenceInStaticMode - True if the compiler should
/// emit a ".reference .constructors_used" or ".reference .destructors_used"
/// directive after the a static ctor/dtor list. This directive is only
/// emitted in Static relocation model.
bool HasStaticCtorDtorReferenceInStaticMode; // Default is false.
-
+
+ /// LinkerRequiresNonEmptyDwarfLines - True if the linker has a bug and
+ /// requires that the debug_line section be of a minimum size. In practice
+ /// such a linker requires a non empty line sequence if a file is present.
+ bool LinkerRequiresNonEmptyDwarfLines; // Default to false.
+
/// MaxInstLength - This is the maximum possible length of an instruction,
/// which is needed to compute the size of an inline asm.
unsigned MaxInstLength; // Defaults to 4.
-
+
/// PCSymbol - The symbol used to represent the current PC. Used in PC
/// relative expressions.
const char *PCSymbol; // Defaults to "$".
@@ -72,6 +79,9 @@ namespace llvm {
/// assembler.
const char *CommentString; // Defaults to "#"
+ /// LabelSuffix - This is appended to emitted labels.
+ const char *LabelSuffix; // Defaults to ":"
+
/// GlobalPrefix - If this is set to a non-empty string, it is prepended
/// onto all global symbols. This is often used for "_" or ".".
const char *GlobalPrefix; // Defaults to ""
@@ -80,12 +90,12 @@ namespace llvm {
/// pool entries that are completely private to the .s file and should not
/// have names in the .o file. This is often "." or "L".
const char *PrivateGlobalPrefix; // Defaults to "."
-
+
/// LinkerPrivateGlobalPrefix - This prefix is used for symbols that should
/// be passed through the assembler but be removed by the linker. This
/// is "l" on Darwin, currently used for some ObjC metadata.
const char *LinkerPrivateGlobalPrefix; // Defaults to ""
-
+
/// InlineAsmStart/End - If these are nonempty, they contain a directive to
/// emit before and after an inline assembly statement.
const char *InlineAsmStart; // Defaults to "#APP\n"
@@ -117,7 +127,7 @@ namespace llvm {
/// AsciiDirective - This directive allows emission of an ascii string with
/// the standard C escape characters embedded into it.
const char *AsciiDirective; // Defaults to "\t.ascii\t"
-
+
/// AscizDirective - If not null, this allows for special handling of
/// zero terminated strings on this target. This is commonly supported as
/// ".asciz". If a target doesn't support this, it can be set to null.
@@ -135,7 +145,7 @@ namespace llvm {
/// which should be relocated as a 32-bit GP-relative offset, e.g. .gpword
/// on Mips or .gprel32 on Alpha.
const char *GPRel32Directive; // Defaults to NULL.
-
+
/// getDataASDirective - Return the directive that should be used to emit
/// data of the specified size to the specified numeric address space.
virtual const char *getDataASDirective(unsigned Size, unsigned AS) const {
@@ -149,15 +159,15 @@ namespace llvm {
bool SunStyleELFSectionSwitchSyntax; // Defaults to false.
/// UsesELFSectionDirectiveForBSS - This is true if this target uses ELF
- /// '.section' directive before the '.bss' one. It's used for PPC/Linux
+ /// '.section' directive before the '.bss' one. It's used for PPC/Linux
/// which doesn't support the '.bss' directive only.
bool UsesELFSectionDirectiveForBSS; // Defaults to false.
-
+
/// HasMicrosoftFastStdCallMangling - True if this target uses microsoft
/// style mangling for functions with X86_StdCall/X86_FastCall calling
/// convention.
bool HasMicrosoftFastStdCallMangling; // Defaults to false.
-
+
//===--- Alignment Information ----------------------------------------===//
/// AlignDirective - The directive used to emit round up to an alignment
@@ -176,27 +186,34 @@ namespace llvm {
unsigned TextAlignFillValue; // Defaults to 0
//===--- Global Variable Emission Directives --------------------------===//
-
+
/// GlobalDirective - This is the directive used to declare a global entity.
///
const char *GlobalDirective; // Defaults to NULL.
- /// ExternDirective - This is the directive used to declare external
+ /// ExternDirective - This is the directive used to declare external
/// globals.
///
const char *ExternDirective; // Defaults to NULL.
-
+
/// HasSetDirective - True if the assembler supports the .set directive.
bool HasSetDirective; // Defaults to true.
-
+
+ /// HasAggressiveSymbolFolding - False if the assembler requires that we use
+ /// Lc = a - b
+ /// .long Lc
+ /// instead of
+ /// .long a - b
+ bool HasAggressiveSymbolFolding; // Defaults to true.
+
/// HasLCOMMDirective - This is true if the target supports the .lcomm
/// directive.
bool HasLCOMMDirective; // Defaults to false.
-
+
/// COMMDirectiveAlignmentIsInBytes - True is COMMDirective's optional
/// alignment is to be specified in bytes instead of log2(n).
bool COMMDirectiveAlignmentIsInBytes; // Defaults to true;
-
+
/// HasDotTypeDotSizeDirective - True if the target has .type and .size
/// directives, this is true for most ELF targets.
bool HasDotTypeDotSizeDirective; // Defaults to true.
@@ -209,10 +226,14 @@ namespace llvm {
/// directive.
bool HasNoDeadStrip; // Defaults to false.
+ /// HasSymbolResolver - True if this target supports the MachO
+ /// .symbol_resolver directive.
+ bool HasSymbolResolver; // Defaults to false.
+
/// WeakRefDirective - This directive, if non-null, is used to declare a
/// global as being a weak undefined symbol.
const char *WeakRefDirective; // Defaults to NULL.
-
+
/// WeakDefDirective - This directive, if non-null, is used to declare a
/// global as being a weak defined symbol.
const char *WeakDefDirective; // Defaults to NULL.
@@ -220,7 +241,7 @@ namespace llvm {
/// LinkOnceDirective - This directive, if non-null is used to declare a
/// global as being a weak defined symbol. This is used on cygwin/mingw.
const char *LinkOnceDirective; // Defaults to NULL.
-
+
/// HiddenVisibilityAttr - This attribute, if not MCSA_Invalid, is used to
/// declare a symbol as having hidden visibility.
MCSymbolAttr HiddenVisibilityAttr; // Defaults to MCSA_Hidden.
@@ -234,10 +255,6 @@ namespace llvm {
/// HasLEB128 - True if target asm supports leb128 directives.
bool HasLEB128; // Defaults to false.
- /// hasDotLocAndDotFile - True if target asm supports .loc and .file
- /// directives for emitting debugging information.
- bool HasDotLocAndDotFile; // Defaults to false.
-
/// SupportsDebugInformation - True if target supports emission of debugging
/// information.
bool SupportsDebugInformation; // Defaults to false.
@@ -254,8 +271,8 @@ namespace llvm {
/// DwarfSectionOffsetDirective - Special section offset directive.
const char* DwarfSectionOffsetDirective; // Defaults to NULL
-
- /// DwarfUsesAbsoluteLabelForStmtList - True if DW_AT_stmt_list needs
+
+ /// DwarfUsesAbsoluteLabelForStmtList - True if DW_AT_stmt_list needs
/// absolute label instead of offset.
bool DwarfUsesAbsoluteLabelForStmtList; // Defaults to true;
@@ -276,7 +293,7 @@ namespace llvm {
static unsigned getULEB128Size(unsigned Value);
bool hasSubsectionsViaSymbols() const { return HasSubsectionsViaSymbols; }
-
+
// Data directive accessors.
//
const char *getData8bitsDirective(unsigned AS = 0) const {
@@ -299,11 +316,11 @@ namespace llvm {
virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const{
return 0;
}
-
+
bool usesSunStyleELFSectionSwitchSyntax() const {
return SunStyleELFSectionSwitchSyntax;
}
-
+
bool usesELFSectionDirectiveForBSS() const {
return UsesELFSectionDirectiveForBSS;
}
@@ -311,7 +328,7 @@ namespace llvm {
bool hasMicrosoftFastStdCallMangling() const {
return HasMicrosoftFastStdCallMangling;
}
-
+
// Accessors.
//
bool hasMachoZeroFillDirective() const { return HasMachoZeroFillDirective; }
@@ -319,6 +336,9 @@ namespace llvm {
bool hasStaticCtorDtorReferenceInStaticMode() const {
return HasStaticCtorDtorReferenceInStaticMode;
}
+ bool getLinkerRequiresNonEmptyDwarfLines() const {
+ return LinkerRequiresNonEmptyDwarfLines;
+ }
unsigned getMaxInstLength() const {
return MaxInstLength;
}
@@ -334,6 +354,9 @@ namespace llvm {
const char *getCommentString() const {
return CommentString;
}
+ const char *getLabelSuffix() const {
+ return LabelSuffix;
+ }
const char *getGlobalPrefix() const {
return GlobalPrefix;
}
@@ -386,6 +409,9 @@ namespace llvm {
return ExternDirective;
}
bool hasSetDirective() const { return HasSetDirective; }
+ bool hasAggressiveSymbolFolding() const {
+ return HasAggressiveSymbolFolding;
+ }
bool hasLCOMMDirective() const { return HasLCOMMDirective; }
bool hasDotTypeDotSizeDirective() const {return HasDotTypeDotSizeDirective;}
bool getCOMMDirectiveAlignmentIsInBytes() const {
@@ -393,10 +419,11 @@ namespace llvm {
}
bool hasSingleParameterDotFile() const { return HasSingleParameterDotFile; }
bool hasNoDeadStrip() const { return HasNoDeadStrip; }
+ bool hasSymbolResolver() const { return HasSymbolResolver; }
const char *getWeakRefDirective() const { return WeakRefDirective; }
const char *getWeakDefDirective() const { return WeakDefDirective; }
const char *getLinkOnceDirective() const { return LinkOnceDirective; }
-
+
MCSymbolAttr getHiddenVisibilityAttr() const { return HiddenVisibilityAttr;}
MCSymbolAttr getProtectedVisibilityAttr() const {
return ProtectedVisibilityAttr;
@@ -404,9 +431,6 @@ namespace llvm {
bool hasLEB128() const {
return HasLEB128;
}
- bool hasDotLocAndDotFile() const {
- return HasDotLocAndDotFile;
- }
bool doesSupportDebugInformation() const {
return SupportsDebugInformation;
}
@@ -416,6 +440,12 @@ namespace llvm {
ExceptionHandling::ExceptionsType getExceptionHandlingType() const {
return ExceptionsType;
}
+ bool isExceptionHandlingDwarf() const {
+ return
+ (ExceptionsType == ExceptionHandling::DwarfTable ||
+ ExceptionsType == ExceptionHandling::DwarfCFI);
+ }
+
bool doesDwarfRequireFrameSection() const {
return DwarfRequiresFrameSection;
}
diff --git a/contrib/llvm/include/llvm/MC/MCAsmLayout.h b/contrib/llvm/include/llvm/MC/MCAsmLayout.h
index b9565ba..01cb000 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmLayout.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmLayout.h
@@ -39,13 +39,12 @@ private:
/// The last fragment which was layed out, or 0 if nothing has been layed
/// out. Fragments are always layed out in order, so all fragments with a
/// lower ordinal will be up to date.
- mutable MCFragment *LastValidFragment;
+ mutable DenseMap<const MCSectionData*, MCFragment *> LastValidFragment;
/// \brief Make sure that the layout for the given fragment is valid, lazily
/// computing it if necessary.
void EnsureValid(const MCFragment *F) const;
- bool isSectionUpToDate(const MCSectionData *SD) const;
bool isFragmentUpToDate(const MCFragment *F) const;
public:
@@ -54,27 +53,15 @@ public:
/// Get the assembler object this is a layout for.
MCAssembler &getAssembler() const { return Assembler; }
- /// \brief Update the layout because a fragment has been resized. The
- /// fragments size should have already been updated, the \arg SlideAmount is
- /// the delta from the old size.
- void UpdateForSlide(MCFragment *F, int SlideAmount);
-
- /// \brief Update the layout because a fragment has been replaced.
- void FragmentReplaced(MCFragment *Src, MCFragment *Dst);
-
- /// \brief Perform a full layout.
- void LayoutFile();
+ /// \brief Invalidate all following fragments because a fragment has been
+ /// resized. The fragments size should have already been updated.
+ void Invalidate(MCFragment *F);
/// \brief Perform layout for a single fragment, assuming that the previous
/// fragment has already been layed out correctly, and the parent section has
/// been initialized.
void LayoutFragment(MCFragment *Fragment);
- /// \brief Performs initial layout for a single section, assuming that the
- /// previous section (including its fragments) has already been layed out
- /// correctly.
- void LayoutSection(MCSectionData *SD);
-
/// @name Section Access (in layout order)
/// @{
@@ -89,28 +76,13 @@ public:
/// @name Fragment Layout Data
/// @{
- /// \brief Get the effective size of the given fragment, as computed in the
- /// current layout.
- uint64_t getFragmentEffectiveSize(const MCFragment *F) const;
-
/// \brief Get the offset of the given fragment inside its containing section.
uint64_t getFragmentOffset(const MCFragment *F) const;
/// @}
- /// @name Section Layout Data
- /// @{
-
- /// \brief Get the computed address of the given section.
- uint64_t getSectionAddress(const MCSectionData *SD) const;
-
- /// @}
/// @name Utility Functions
/// @{
- /// \brief Get the address of the given fragment, as computed in the current
- /// layout.
- uint64_t getFragmentAddress(const MCFragment *F) const;
-
/// \brief Get the address space size of the given section, as it effects
/// layout. This may differ from the size reported by \see getSectionSize() by
/// not including section tail padding.
@@ -120,12 +92,9 @@ public:
/// file. This may include additional padding, or be 0 for virtual sections.
uint64_t getSectionFileSize(const MCSectionData *SD) const;
- /// \brief Get the logical data size of the given section.
- uint64_t getSectionSize(const MCSectionData *SD) const;
-
- /// \brief Get the address of the given symbol, as computed in the current
+ /// \brief Get the offset of the given symbol, as computed in the current
/// layout.
- uint64_t getSymbolAddress(const MCSymbolData *SD) const;
+ uint64_t getSymbolOffset(const MCSymbolData *SD) const;
/// @}
};
diff --git a/contrib/llvm/include/llvm/MC/MCAssembler.h b/contrib/llvm/include/llvm/MC/MCAssembler.h
index d193b98..30971c6 100644
--- a/contrib/llvm/include/llvm/MC/MCAssembler.h
+++ b/contrib/llvm/include/llvm/MC/MCAssembler.h
@@ -11,13 +11,14 @@
#define LLVM_MC_MCASSEMBLER_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/Support/Casting.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <vector> // FIXME: Shouldn't be needed.
namespace llvm {
@@ -49,7 +50,10 @@ public:
FT_Data,
FT_Fill,
FT_Inst,
- FT_Org
+ FT_Org,
+ FT_Dwarf,
+ FT_DwarfFrame,
+ FT_LEB
};
private:
@@ -72,12 +76,7 @@ private:
/// initialized.
uint64_t Offset;
- /// EffectiveSize - The compute size of this section. This is ~0 until
- /// initialized.
- uint64_t EffectiveSize;
-
- /// LayoutOrder - The global layout order of this fragment. This is the index
- /// across all fragments in the file, not just within the section.
+ /// LayoutOrder - The layout order of this fragment.
unsigned LayoutOrder;
/// @}
@@ -234,19 +233,12 @@ class MCAlignFragment : public MCFragment {
/// target dependent.
bool EmitNops : 1;
- /// OnlyAlignAddress - Flag to indicate that this align is only used to adjust
- /// the address space size of a section and that it should not be included as
- /// part of the section size. This flag can only be used on the last fragment
- /// in a section.
- bool OnlyAlignAddress : 1;
-
public:
MCAlignFragment(unsigned _Alignment, int64_t _Value, unsigned _ValueSize,
unsigned _MaxBytesToEmit, MCSectionData *SD = 0)
: MCFragment(FT_Align, SD), Alignment(_Alignment),
Value(_Value),ValueSize(_ValueSize),
- MaxBytesToEmit(_MaxBytesToEmit), EmitNops(false),
- OnlyAlignAddress(false) {}
+ MaxBytesToEmit(_MaxBytesToEmit), EmitNops(false) {}
/// @name Accessors
/// @{
@@ -262,9 +254,6 @@ public:
bool hasEmitNops() const { return EmitNops; }
void setEmitNops(bool Value) { EmitNops = Value; }
- bool hasOnlyAlignAddress() const { return OnlyAlignAddress; }
- void setOnlyAlignAddress(bool Value) { OnlyAlignAddress = Value; }
-
/// @}
static bool classof(const MCFragment *F) {
@@ -337,6 +326,100 @@ public:
static bool classof(const MCOrgFragment *) { return true; }
};
+class MCLEBFragment : public MCFragment {
+ /// Value - The value this fragment should contain.
+ const MCExpr *Value;
+
+ /// IsSigned - True if this is a sleb128, false if uleb128.
+ bool IsSigned;
+
+ SmallString<8> Contents;
+public:
+ MCLEBFragment(const MCExpr &Value_, bool IsSigned_, MCSectionData *SD)
+ : MCFragment(FT_LEB, SD),
+ Value(&Value_), IsSigned(IsSigned_) { Contents.push_back(0); }
+
+ /// @name Accessors
+ /// @{
+
+ const MCExpr &getValue() const { return *Value; }
+
+ bool isSigned() const { return IsSigned; }
+
+ SmallString<8> &getContents() { return Contents; }
+ const SmallString<8> &getContents() const { return Contents; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_LEB;
+ }
+ static bool classof(const MCLEBFragment *) { return true; }
+};
+
+class MCDwarfLineAddrFragment : public MCFragment {
+ /// LineDelta - the value of the difference between the two line numbers
+ /// between two .loc dwarf directives.
+ int64_t LineDelta;
+
+ /// AddrDelta - The expression for the difference of the two symbols that
+ /// make up the address delta between two .loc dwarf directives.
+ const MCExpr *AddrDelta;
+
+ SmallString<8> Contents;
+
+public:
+ MCDwarfLineAddrFragment(int64_t _LineDelta, const MCExpr &_AddrDelta,
+ MCSectionData *SD)
+ : MCFragment(FT_Dwarf, SD),
+ LineDelta(_LineDelta), AddrDelta(&_AddrDelta) { Contents.push_back(0); }
+
+ /// @name Accessors
+ /// @{
+
+ int64_t getLineDelta() const { return LineDelta; }
+
+ const MCExpr &getAddrDelta() const { return *AddrDelta; }
+
+ SmallString<8> &getContents() { return Contents; }
+ const SmallString<8> &getContents() const { return Contents; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_Dwarf;
+ }
+ static bool classof(const MCDwarfLineAddrFragment *) { return true; }
+};
+
+class MCDwarfCallFrameFragment : public MCFragment {
+ /// AddrDelta - The expression for the difference of the two symbols that
+ /// make up the address delta between two .cfi_* dwarf directives.
+ const MCExpr *AddrDelta;
+
+ SmallString<8> Contents;
+
+public:
+ MCDwarfCallFrameFragment(const MCExpr &_AddrDelta, MCSectionData *SD)
+ : MCFragment(FT_DwarfFrame, SD),
+ AddrDelta(&_AddrDelta) { Contents.push_back(0); }
+
+ /// @name Accessors
+ /// @{
+
+ const MCExpr &getAddrDelta() const { return *AddrDelta; }
+
+ SmallString<8> &getContents() { return Contents; }
+ const SmallString<8> &getContents() const { return Contents; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_DwarfFrame;
+ }
+ static bool classof(const MCDwarfCallFrameFragment *) { return true; }
+};
+
// FIXME: Should this be a separate class, or just merged into MCSection? Since
// we anticipate the fast path being through an MCAssembler, the only reason to
// keep it out is for API abstraction.
@@ -373,10 +456,6 @@ private:
//
// FIXME: This could all be kept private to the assembler implementation.
- /// Address - The computed address of this section. This is ~0 until
- /// initialized.
- uint64_t Address;
-
/// HasInstructions - Whether this section has had instructions emitted into
/// it.
unsigned HasInstructions : 1;
@@ -585,6 +664,8 @@ private:
MCCodeEmitter &Emitter;
+ MCObjectWriter &Writer;
+
raw_ostream &OS;
iplist<MCSectionData> Sections;
@@ -603,7 +684,17 @@ private:
std::vector<IndirectSymbolData> IndirectSymbols;
+ /// The set of function symbols for which a .thumb_func directive has
+ /// been seen.
+ //
+ // FIXME: We really would like this in target specific code rather than
+ // here. Maybe when the relocation stuff moves to target specific,
+ // this can go with it? The streamer would need some target specific
+ // refactoring too.
+ SmallPtrSet<const MCSymbol*, 64> ThumbFuncs;
+
unsigned RelaxAll : 1;
+ unsigned NoExecStack : 1;
unsigned SubsectionsViaSymbols : 1;
private:
@@ -633,24 +724,34 @@ private:
bool FragmentNeedsRelaxation(const MCInstFragment *IF,
const MCAsmLayout &Layout) const;
- /// Compute the effective fragment size assuming it is layed out at the given
- /// \arg SectionAddress and \arg FragmentOffset.
- uint64_t ComputeFragmentSize(MCAsmLayout &Layout, const MCFragment &F,
- uint64_t SectionAddress,
- uint64_t FragmentOffset) const;
-
/// LayoutOnce - Perform one layout iteration and return true if any offsets
/// were adjusted.
bool LayoutOnce(MCAsmLayout &Layout);
+ bool LayoutSectionOnce(MCAsmLayout &Layout, MCSectionData &SD);
+
+ bool RelaxInstruction(MCAsmLayout &Layout, MCInstFragment &IF);
+
+ bool RelaxLEB(MCAsmLayout &Layout, MCLEBFragment &IF);
+
+ bool RelaxDwarfLineAddr(MCAsmLayout &Layout, MCDwarfLineAddrFragment &DF);
+ bool RelaxDwarfCallFrameFragment(MCAsmLayout &Layout,
+ MCDwarfCallFrameFragment &DF);
+
/// FinishLayout - Finalize a layout, including fragment lowering.
void FinishLayout(MCAsmLayout &Layout);
+ uint64_t HandleFixup(const MCAsmLayout &Layout,
+ MCFragment &F, const MCFixup &Fixup);
+
public:
+ /// Compute the effective fragment size assuming it is layed out at the given
+ /// \arg SectionAddress and \arg FragmentOffset.
+ uint64_t ComputeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const;
+
/// Find the symbol which defines the atom containing the given symbol, or
/// null if there is no such symbol.
- const MCSymbolData *getAtom(const MCAsmLayout &Layout,
- const MCSymbolData *Symbol) const;
+ const MCSymbolData *getAtom(const MCSymbolData *Symbol) const;
/// Check whether a particular symbol is visible to the linker and is required
/// in the symbol table, or whether it can be discarded by the assembler. This
@@ -659,12 +760,16 @@ public:
bool isSymbolLinkerVisible(const MCSymbol &SD) const;
/// Emit the section contents using the given object writer.
- //
- // FIXME: Should MCAssembler always have a reference to the object writer?
- void WriteSectionData(const MCSectionData *Section, const MCAsmLayout &Layout,
- MCObjectWriter *OW) const;
+ void WriteSectionData(const MCSectionData *Section,
+ const MCAsmLayout &Layout) const;
- void AddSectionToTheEnd(MCSectionData &SD, MCAsmLayout &Layout);
+ /// Check whether a given symbol has been flagged with .thumb_func.
+ bool isThumbFunc(const MCSymbol *Func) const {
+ return ThumbFuncs.count(Func);
+ }
+
+ /// Flag a function symbol as the target of a .thumb_func directive.
+ void setIsThumbFunc(const MCSymbol *Func) { ThumbFuncs.insert(Func); }
public:
/// Construct a new assembler instance.
@@ -675,8 +780,9 @@ public:
// concrete and require clients to pass in a target like object. The other
// option is to make this abstract, and have targets provide concrete
// implementations as we do with AsmParser.
- MCAssembler(MCContext &_Context, TargetAsmBackend &_Backend,
- MCCodeEmitter &_Emitter, raw_ostream &OS);
+ MCAssembler(MCContext &Context_, TargetAsmBackend &Backend_,
+ MCCodeEmitter &Emitter_, MCObjectWriter &Writer_,
+ raw_ostream &OS);
~MCAssembler();
MCContext &getContext() const { return Context; }
@@ -685,10 +791,12 @@ public:
MCCodeEmitter &getEmitter() const { return Emitter; }
+ MCObjectWriter &getWriter() const { return Writer; }
+
/// Finish - Do final processing and write the object to the output stream.
/// \arg Writer is used for custom object writer (as the MCJIT does),
/// if not specified it is automatically created from backend.
- void Finish(MCObjectWriter *Writer = 0);
+ void Finish();
// FIXME: This does not belong here.
bool getSubsectionsViaSymbols() const {
@@ -701,6 +809,9 @@ public:
bool getRelaxAll() const { return RelaxAll; }
void setRelaxAll(bool Value) { RelaxAll = Value; }
+ bool getNoExecStack() const { return NoExecStack; }
+ void setNoExecStack(bool Value) { NoExecStack = Value; }
+
/// @name Section List Access
/// @{
diff --git a/contrib/llvm/include/llvm/MC/MCCodeEmitter.h b/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
index 010a2e5..bc63241 100644
--- a/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
+++ b/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
@@ -20,33 +20,6 @@ class MCInst;
class raw_ostream;
template<typename T> class SmallVectorImpl;
-/// MCFixupKindInfo - Target independent information on a fixup kind.
-struct MCFixupKindInfo {
- enum FixupKindFlags {
- /// Is this fixup kind PCrelative. This is used by the assembler backend to
- /// evaluate fixup values in a target independent manner when possible.
- FKF_IsPCRel = (1 << 0)
- };
-
- /// A target specific name for the fixup kind. The names will be unique for
- /// distinct kinds on any given target.
- const char *Name;
-
- /// The bit offset to write the relocation into.
- //
- // FIXME: These two fields are under-specified and not general enough, but it
- // is covers many things, and is enough to let the AsmStreamer pretty-print
- // the encoding.
- unsigned TargetOffset;
-
- /// The number of bits written by this fixup. The bits are assumed to be
- /// contiguous.
- unsigned TargetSize;
-
- /// Flags describing additional information on this fixup kind.
- unsigned Flags;
-};
-
/// MCCodeEmitter - Generic instruction encoding interface.
class MCCodeEmitter {
private:
@@ -58,17 +31,6 @@ protected: // Can only create subclasses.
public:
virtual ~MCCodeEmitter();
- /// @name Target Independent Fixup Information
- /// @{
-
- /// getNumFixupKinds - Get the number of target specific fixup kinds.
- virtual unsigned getNumFixupKinds() const = 0;
-
- /// getFixupKindInfo - Get information on a fixup kind.
- virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const;
-
- /// @}
-
/// EncodeInstruction - Encode the given \arg Inst to bytes on the output
/// stream \arg OS.
virtual void EncodeInstruction(const MCInst &Inst, raw_ostream &OS,
diff --git a/contrib/llvm/include/llvm/MC/MCContext.h b/contrib/llvm/include/llvm/MC/MCContext.h
index d22868c..7b26d54 100644
--- a/contrib/llvm/include/llvm/MC/MCContext.h
+++ b/contrib/llvm/include/llvm/MC/MCContext.h
@@ -29,7 +29,9 @@ namespace llvm {
class MCLineSection;
class StringRef;
class Twine;
+ class TargetAsmInfo;
class MCSectionMachO;
+ class MCSectionELF;
/// MCContext - Context object for machine code objects. This class owns all
/// of the sections that it creates.
@@ -41,9 +43,15 @@ namespace llvm {
/// The MCAsmInfo for this target.
const MCAsmInfo &MAI;
+ const TargetAsmInfo *TAI;
+
/// Symbols - Bindings of names to symbols.
StringMap<MCSymbol*> Symbols;
+ /// UsedNames - Keeps tracks of names that were used both for used declared
+ /// and artificial symbols.
+ StringMap<bool> UsedNames;
+
/// NextUniqueID - The next ID to dole out to an unnamed assembler temporary
/// symbol.
unsigned NextUniqueID;
@@ -56,8 +64,8 @@ namespace llvm {
/// GetInstance() gets the current instance of the directional local label
/// for the LocalLabelVal and adds it to the map if needed.
unsigned GetInstance(int64_t LocalLabelVal);
-
- /// The file name of the log file from the enviromment variable
+
+ /// The file name of the log file from the environment variable
/// AS_SECURE_LOG_FILE. Which must be set before the .secure_log_unique
/// directive is used or it is an error.
char *SecureLogFile;
@@ -79,29 +87,37 @@ namespace llvm {
/// The dwarf line information from the .loc directives for the sections
/// with assembled machine instructions have after seeing .loc directives.
DenseMap<const MCSection *, MCLineSection *> MCLineSections;
+ /// We need a deterministic iteration order, so we remember the order
+ /// the elements were added.
+ std::vector<const MCSection *> MCLineSectionOrder;
/// Allocator - Allocator object used for creating machine code objects.
///
/// We use a bump pointer allocator to avoid the need to track all allocated
/// objects.
BumpPtrAllocator Allocator;
-
+
void *MachOUniquingMap, *ELFUniquingMap, *COFFUniquingMap;
+
+ MCSymbol *CreateSymbol(StringRef Name);
+
public:
- explicit MCContext(const MCAsmInfo &MAI);
+ explicit MCContext(const MCAsmInfo &MAI, const TargetAsmInfo *TAI);
~MCContext();
-
+
const MCAsmInfo &getAsmInfo() const { return MAI; }
- /// @name Symbol Managment
+ const TargetAsmInfo &getTargetAsmInfo() const { return *TAI; }
+
+ /// @name Symbol Management
/// @{
-
+
/// CreateTempSymbol - Create and return a new assembler temporary symbol
/// with a unique but unspecified name.
MCSymbol *CreateTempSymbol();
- /// CreateDirectionalLocalSymbol - Create the defintion of a directional
- /// local symbol for numbered label (used for "1:" defintions).
+ /// CreateDirectionalLocalSymbol - Create the definition of a directional
+ /// local symbol for numbered label (used for "1:" definitions).
MCSymbol *CreateDirectionalLocalSymbol(int64_t LocalLabelVal);
/// GetDirectionalLocalSymbol - Create and return a directional local
@@ -120,8 +136,8 @@ namespace llvm {
MCSymbol *LookupSymbol(StringRef Name) const;
/// @}
-
- /// @name Section Managment
+
+ /// @name Section Management
/// @{
/// getMachOSection - Return the MCSection for the specified mach-o section.
@@ -137,11 +153,15 @@ namespace llvm {
SectionKind K) {
return getMachOSection(Segment, Section, TypeAndAttributes, 0, K);
}
-
- const MCSection *getELFSection(StringRef Section, unsigned Type,
- unsigned Flags, SectionKind Kind,
- bool IsExplicit = false,
- unsigned EntrySize = 0);
+
+ const MCSectionELF *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags, SectionKind Kind);
+
+ const MCSectionELF *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags, SectionKind Kind,
+ unsigned EntrySize, StringRef Group);
+
+ const MCSectionELF *CreateELFGroupSection();
const MCSection *getCOFFSection(StringRef Section, unsigned Characteristics,
int Selection, SectionKind Kind);
@@ -151,16 +171,20 @@ namespace llvm {
return getCOFFSection (Section, Characteristics, 0, Kind);
}
-
+
/// @}
- /// @name Dwarf Managment
+ /// @name Dwarf Management
/// @{
/// GetDwarfFile - creates an entry in the dwarf file and directory tables.
unsigned GetDwarfFile(StringRef FileName, unsigned FileNumber);
- bool ValidateDwarfFileNumber(unsigned FileNumber);
+ bool isValidDwarfFileNumber(unsigned FileNumber);
+
+ bool hasDwarfFiles() const {
+ return !MCDwarfFiles.empty();
+ }
const std::vector<MCDwarfFile *> &getMCDwarfFiles() {
return MCDwarfFiles;
@@ -168,23 +192,35 @@ namespace llvm {
const std::vector<StringRef> &getMCDwarfDirs() {
return MCDwarfDirs;
}
- DenseMap<const MCSection *, MCLineSection *> &getMCLineSections() {
+
+ const DenseMap<const MCSection *, MCLineSection *>
+ &getMCLineSections() const {
return MCLineSections;
}
+ const std::vector<const MCSection *> &getMCLineSectionOrder() const {
+ return MCLineSectionOrder;
+ }
+ void addMCLineSection(const MCSection *Sec, MCLineSection *Line) {
+ MCLineSections[Sec] = Line;
+ MCLineSectionOrder.push_back(Sec);
+ }
/// setCurrentDwarfLoc - saves the information from the currently parsed
- /// dwarf .loc directive and sets DwarfLocSeen. When the next instruction /// is assembled an entry in the line number table with this information and
+ /// dwarf .loc directive and sets DwarfLocSeen. When the next instruction
+ /// is assembled an entry in the line number table with this information and
/// the address of the instruction will be created.
void setCurrentDwarfLoc(unsigned FileNum, unsigned Line, unsigned Column,
- unsigned Flags, unsigned Isa) {
+ unsigned Flags, unsigned Isa,
+ unsigned Discriminator) {
CurrentDwarfLoc.setFileNum(FileNum);
CurrentDwarfLoc.setLine(Line);
CurrentDwarfLoc.setColumn(Column);
CurrentDwarfLoc.setFlags(Flags);
CurrentDwarfLoc.setIsa(Isa);
+ CurrentDwarfLoc.setDiscriminator(Discriminator);
DwarfLocSeen = true;
}
- void clearDwarfLocSeen() { DwarfLocSeen = false; }
+ void ClearDwarfLocSeen() { DwarfLocSeen = false; }
bool getDwarfLocSeen() { return DwarfLocSeen; }
const MCDwarfLoc &getCurrentDwarfLoc() { return CurrentDwarfLoc; }
diff --git a/contrib/llvm/include/llvm/MC/MCDirectives.h b/contrib/llvm/include/llvm/MC/MCDirectives.h
index 223b09e..1df55dc 100644
--- a/contrib/llvm/include/llvm/MC/MCDirectives.h
+++ b/contrib/llvm/include/llvm/MC/MCDirectives.h
@@ -26,6 +26,7 @@ enum MCSymbolAttr {
MCSA_ELF_TypeTLS, ///< .type _foo, STT_TLS # aka @tls_object
MCSA_ELF_TypeCommon, ///< .type _foo, STT_COMMON # aka @common
MCSA_ELF_TypeNoType, ///< .type _foo, STT_NOTYPE # aka @notype
+ MCSA_ELF_TypeGnuUniqueObject, /// .type _foo, @gnu_unique_object
MCSA_Global, ///< .globl
MCSA_Hidden, ///< .hidden (ELF)
MCSA_IndirectSymbol, ///< .indirect_symbol (MachO)
@@ -33,6 +34,7 @@ enum MCSymbolAttr {
MCSA_LazyReference, ///< .lazy_reference (MachO)
MCSA_Local, ///< .local (ELF)
MCSA_NoDeadStrip, ///< .no_dead_strip (MachO)
+ MCSA_SymbolResolver, ///< .symbol_resolver (MachO)
MCSA_PrivateExtern, ///< .private_extern (MachO)
MCSA_Protected, ///< .protected (ELF)
MCSA_Reference, ///< .reference (MachO)
@@ -43,9 +45,12 @@ enum MCSymbolAttr {
};
enum MCAssemblerFlag {
- MCAF_SubsectionsViaSymbols ///< .subsections_via_symbols (MachO)
+ MCAF_SyntaxUnified, ///< .syntax (ARM/ELF)
+ MCAF_SubsectionsViaSymbols, ///< .subsections_via_symbols (MachO)
+ MCAF_Code16, ///< .code 16
+ MCAF_Code32 ///< .code 32
};
-
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCDisassembler.h b/contrib/llvm/include/llvm/MC/MCDisassembler.h
index dfb8ed5..c9e42eb 100644
--- a/contrib/llvm/include/llvm/MC/MCDisassembler.h
+++ b/contrib/llvm/include/llvm/MC/MCDisassembler.h
@@ -9,7 +9,7 @@
#ifndef MCDISASSEMBLER_H
#define MCDISASSEMBLER_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/MC/MCDwarf.h b/contrib/llvm/include/llvm/MC/MCDwarf.h
index dac875c..07a7bad 100644
--- a/contrib/llvm/include/llvm/MC/MCDwarf.h
+++ b/contrib/llvm/include/llvm/MC/MCDwarf.h
@@ -8,8 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCDwarfFile to support the dwarf
-// .file directive.
-// TODO: add the support needed for the .loc directive.
+// .file directive and the .loc directive.
//
//===----------------------------------------------------------------------===//
@@ -17,12 +16,21 @@
#define LLVM_MC_MCDWARF_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/MachineLocation.h" // FIXME
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Dwarf.h"
#include <vector>
namespace llvm {
+ class MachineMove;
class MCContext;
+ class MCExpr;
class MCSection;
+ class MCSectionData;
+ class MCStreamer;
class MCSymbol;
+ class MCObjectStreamer;
class raw_ostream;
/// MCDwarfFile - Instances of this class represent the name of the dwarf
@@ -78,6 +86,11 @@ namespace llvm {
unsigned Flags;
// Isa
unsigned Isa;
+ // Discriminator
+ unsigned Discriminator;
+
+// Flag that indicates the initial value of the is_stmt_start flag.
+#define DWARF2_LINE_DEFAULT_IS_STMT 1
#define DWARF2_FLAG_IS_STMT (1 << 0)
#define DWARF2_FLAG_BASIC_BLOCK (1 << 1)
@@ -88,13 +101,32 @@ namespace llvm {
friend class MCContext;
friend class MCLineEntry;
MCDwarfLoc(unsigned fileNum, unsigned line, unsigned column, unsigned flags,
- unsigned isa)
- : FileNum(fileNum), Line(line), Column(column), Flags(flags), Isa(isa) {}
+ unsigned isa, unsigned discriminator)
+ : FileNum(fileNum), Line(line), Column(column), Flags(flags), Isa(isa),
+ Discriminator(discriminator) {}
// Allow the default copy constructor and assignment operator to be used
// for an MCDwarfLoc object.
public:
+ /// getFileNum - Get the FileNum of this MCDwarfLoc.
+ unsigned getFileNum() const { return FileNum; }
+
+ /// getLine - Get the Line of this MCDwarfLoc.
+ unsigned getLine() const { return Line; }
+
+ /// getColumn - Get the Column of this MCDwarfLoc.
+ unsigned getColumn() const { return Column; }
+
+ /// getFlags - Get the Flags of this MCDwarfLoc.
+ unsigned getFlags() const { return Flags; }
+
+ /// getIsa - Get the Isa of this MCDwarfLoc.
+ unsigned getIsa() const { return Isa; }
+
+ /// getDiscriminator - Get the Discriminator of this MCDwarfLoc.
+ unsigned getDiscriminator() const { return Discriminator; }
+
/// setFileNum - Set the FileNum of this MCDwarfLoc.
void setFileNum(unsigned fileNum) { FileNum = fileNum; }
@@ -109,6 +141,11 @@ namespace llvm {
/// setIsa - Set the Isa of this MCDwarfLoc.
void setIsa(unsigned isa) { Isa = isa; }
+
+ /// setDiscriminator - Set the Discriminator of this MCDwarfLoc.
+ void setDiscriminator(unsigned discriminator) {
+ Discriminator = discriminator;
+ }
};
/// MCLineEntry - Instances of this class represent the line information for
@@ -127,6 +164,13 @@ namespace llvm {
// Constructor to create an MCLineEntry given a symbol and the dwarf loc.
MCLineEntry(MCSymbol *label, const MCDwarfLoc loc) : MCDwarfLoc(loc),
Label(label) {}
+
+ MCSymbol *getLabel() const { return Label; }
+
+ // This is called when an instruction is assembled into the specified
+ // section and if there is information from the last .loc directive that
+ // has yet to have a line entry made for it is made.
+ static void Make(MCStreamer *MCOS, const MCSection *Section);
};
/// MCLineSection - Instances of this class represent the line information
@@ -134,7 +178,6 @@ namespace llvm {
/// .loc directives. This is the information used to build the dwarf line
/// table for a section.
class MCLineSection {
- std::vector<MCLineEntry> MCLineEntries;
private:
MCLineSection(const MCLineSection&); // DO NOT IMPLEMENT
@@ -149,8 +192,88 @@ namespace llvm {
void addLineEntry(const MCLineEntry &LineEntry) {
MCLineEntries.push_back(LineEntry);
}
+
+ typedef std::vector<MCLineEntry> MCLineEntryCollection;
+ typedef MCLineEntryCollection::iterator iterator;
+ typedef MCLineEntryCollection::const_iterator const_iterator;
+
+ private:
+ MCLineEntryCollection MCLineEntries;
+
+ public:
+ const MCLineEntryCollection *getMCLineEntries() const {
+ return &MCLineEntries;
+ }
};
+ class MCDwarfFileTable {
+ public:
+ //
+ // This emits the Dwarf file and the line tables.
+ //
+ static void Emit(MCStreamer *MCOS);
+ };
+
+ class MCDwarfLineAddr {
+ public:
+ /// Utility function to encode a Dwarf pair of LineDelta and AddrDeltas.
+ static void Encode(int64_t LineDelta, uint64_t AddrDelta, raw_ostream &OS);
+
+ /// Utility function to emit the encoding to a streamer.
+ static void Emit(MCStreamer *MCOS,
+ int64_t LineDelta,uint64_t AddrDelta);
+
+ /// Utility function to write the encoding to an object writer.
+ static void Write(MCObjectWriter *OW,
+ int64_t LineDelta, uint64_t AddrDelta);
+ };
+
+ class MCCFIInstruction {
+ public:
+ enum OpType { Remember, Restore, Move };
+ private:
+ OpType Operation;
+ MCSymbol *Label;
+ // Move to & from location.
+ MachineLocation Destination;
+ MachineLocation Source;
+ public:
+ MCCFIInstruction(OpType Op, MCSymbol *L)
+ : Operation(Op), Label(L) {
+ assert(Op == Remember || Op == Restore);
+ }
+ MCCFIInstruction(MCSymbol *L, const MachineLocation &D,
+ const MachineLocation &S)
+ : Operation(Move), Label(L), Destination(D), Source(S) {
+ }
+ OpType getOperation() const { return Operation; }
+ MCSymbol *getLabel() const { return Label; }
+ const MachineLocation &getDestination() const { return Destination; }
+ const MachineLocation &getSource() const { return Source; }
+ };
+
+ struct MCDwarfFrameInfo {
+ MCDwarfFrameInfo() : Begin(0), End(0), Personality(0), Lsda(0),
+ Instructions(), PersonalityEncoding(0),
+ LsdaEncoding(0) {}
+ MCSymbol *Begin;
+ MCSymbol *End;
+ const MCSymbol *Personality;
+ const MCSymbol *Lsda;
+ std::vector<MCCFIInstruction> Instructions;
+ unsigned PersonalityEncoding;
+ unsigned LsdaEncoding;
+ };
+
+ class MCDwarfFrameEmitter {
+ public:
+ //
+ // This emits the frame info section.
+ //
+ static void Emit(MCStreamer &streamer);
+ static void EmitAdvanceLoc(MCStreamer &Streamer, uint64_t AddrDelta);
+ static void EncodeAdvanceLoc(uint64_t AddrDelta, raw_ostream &OS);
+ };
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
new file mode 100644
index 0000000..3c150dc
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
@@ -0,0 +1,47 @@
+//===-- llvm/MC/MCELFObjectWriter.h - ELF Object Writer ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCELFOBJECTWRITER_H
+#define LLVM_MC_MCELFOBJECTWRITER_H
+
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+class MCELFObjectTargetWriter {
+ const Triple::OSType OSType;
+ const uint16_t EMachine;
+ const unsigned HasRelocationAddend : 1;
+ const unsigned Is64Bit : 1;
+protected:
+ MCELFObjectTargetWriter(bool Is64Bit_, Triple::OSType OSType_,
+ uint16_t EMachine_, bool HasRelocationAddend_);
+
+public:
+ virtual ~MCELFObjectTargetWriter();
+
+ /// @name Accessors
+ /// @{
+ Triple::OSType getOSType() { return OSType; }
+ uint16_t getEMachine() { return EMachine; }
+ bool hasRelocationAddend() { return HasRelocationAddend; }
+ bool is64Bit() { return Is64Bit; }
+ /// @}
+};
+
+/// \brief Construct a new ELF writer instance.
+///
+/// \param MOTW - The target specific ELF writer subclass.
+/// \param OS - The stream to write to.
+/// \returns The constructed object writer.
+MCObjectWriter *createELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &OS, bool IsLittleEndian);
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCELFSymbolFlags.h b/contrib/llvm/include/llvm/MC/MCELFSymbolFlags.h
index eb7978b..d798fb0 100644
--- a/contrib/llvm/include/llvm/MC/MCELFSymbolFlags.h
+++ b/contrib/llvm/include/llvm/MC/MCELFSymbolFlags.h
@@ -21,9 +21,10 @@
namespace llvm {
enum {
- ELF_STT_Shift = 0, // Shift value for STT_* flags.
- ELF_STB_Shift = 4, // Shift value for STB_* flags.
- ELF_STV_Shift = 8 // Shift value ofr STV_* flags.
+ ELF_STT_Shift = 0, // Shift value for STT_* flags.
+ ELF_STB_Shift = 4, // Shift value for STB_* flags.
+ ELF_STV_Shift = 8, // Shift value for STV_* flags.
+ ELF_Other_Shift = 10 // Shift value for other flags.
};
enum SymbolFlags {
@@ -46,7 +47,9 @@ namespace llvm {
ELF_STV_Default = (ELF::STV_DEFAULT << ELF_STV_Shift),
ELF_STV_Internal = (ELF::STV_INTERNAL << ELF_STV_Shift),
ELF_STV_Hidden = (ELF::STV_HIDDEN << ELF_STV_Shift),
- ELF_STV_Protected = (ELF::STV_PROTECTED << ELF_STV_Shift)
+ ELF_STV_Protected = (ELF::STV_PROTECTED << ELF_STV_Shift),
+
+ ELF_Other_Weakref = (1 << ELF_Other_Shift)
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCExpr.h b/contrib/llvm/include/llvm/MC/MCExpr.h
index 1f9b8f2..fea5249 100644
--- a/contrib/llvm/include/llvm/MC/MCExpr.h
+++ b/contrib/llvm/include/llvm/MC/MCExpr.h
@@ -10,17 +10,21 @@
#ifndef LLVM_MC_MCEXPR_H
#define LLVM_MC_MCEXPR_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Casting.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class MCAsmInfo;
class MCAsmLayout;
+class MCAssembler;
class MCContext;
+class MCSectionData;
class MCSymbol;
class MCValue;
class raw_ostream;
class StringRef;
+typedef DenseMap<const MCSectionData*, uint64_t> SectionAddrMap;
/// MCExpr - Base class for the full range of assembler expressions which are
/// needed for parsing.
@@ -40,9 +44,16 @@ private:
MCExpr(const MCExpr&); // DO NOT IMPLEMENT
void operator=(const MCExpr&); // DO NOT IMPLEMENT
+ bool EvaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
+ const MCAsmLayout *Layout,
+ const SectionAddrMap *Addrs) const;
protected:
explicit MCExpr(ExprKind _Kind) : Kind(_Kind) {}
+ bool EvaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
+ const MCAsmLayout *Layout,
+ const SectionAddrMap *Addrs,
+ bool InSet) const;
public:
/// @name Accessors
/// @{
@@ -67,7 +78,11 @@ public:
/// values. If not given, then only non-symbolic expressions will be
/// evaluated.
/// @result - True on success.
- bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout *Layout = 0) const;
+ bool EvaluateAsAbsolute(int64_t &Res) const;
+ bool EvaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const;
+ bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout) const;
+ bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout,
+ const SectionAddrMap &Addrs) const;
/// EvaluateAsRelocatable - Try to evaluate the expression to a relocatable
/// value, i.e. an expression of the fixed form (a - b + constant).
@@ -75,7 +90,7 @@ public:
/// @param Res - The relocatable value, if evaluation succeeds.
/// @param Layout - The assembler layout object to use for evaluating values.
/// @result - True on success.
- bool EvaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout = 0) const;
+ bool EvaluateAsRelocatable(MCValue &Res, const MCAsmLayout &Layout) const;
/// @}
@@ -132,12 +147,25 @@ public:
VK_GOTTPOFF,
VK_INDNTPOFF,
VK_NTPOFF,
+ VK_GOTNTPOFF,
VK_PLT,
VK_TLSGD,
+ VK_TLSLD,
+ VK_TLSLDM,
VK_TPOFF,
- VK_ARM_HI16, // The R_ARM_MOVT_ABS relocation (:upper16: in the asm file)
- VK_ARM_LO16, // The R_ARM_MOVW_ABS_NC relocation (:lower16: in the asm file)
- VK_TLVP // Mach-O thread local variable relocation
+ VK_DTPOFF,
+ VK_TLVP, // Mach-O thread local variable relocation
+ // FIXME: We'd really like to use the generic Kinds listed above for these.
+ VK_ARM_PLT, // ARM-style PLT references. i.e., (PLT) instead of @PLT
+ VK_ARM_TLSGD, // ditto for TLSGD, GOT, GOTOFF, TPOFF and GOTTPOFF
+ VK_ARM_GOT,
+ VK_ARM_GOTOFF,
+ VK_ARM_TPOFF,
+ VK_ARM_GOTTPOFF,
+
+ VK_PPC_TOC,
+ VK_PPC_HA16, // ha16(symbol)
+ VK_PPC_LO16 // lo16(symbol)
};
private:
@@ -162,7 +190,7 @@ public:
MCContext &Ctx);
static const MCSymbolRefExpr *Create(StringRef Name, VariantKind Kind,
MCContext &Ctx);
-
+
/// @}
/// @name Accessors
/// @{
@@ -391,7 +419,7 @@ public:
virtual void PrintImpl(raw_ostream &OS) const = 0;
virtual bool EvaluateAsRelocatableImpl(MCValue &Res,
const MCAsmLayout *Layout) const = 0;
-
+ virtual void AddValueSymbols(MCAssembler *) const = 0;
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
diff --git a/contrib/llvm/include/llvm/MC/MCFixup.h b/contrib/llvm/include/llvm/MC/MCFixup.h
index eed4c34..6fde797 100644
--- a/contrib/llvm/include/llvm/MC/MCFixup.h
+++ b/contrib/llvm/include/llvm/MC/MCFixup.h
@@ -10,7 +10,7 @@
#ifndef LLVM_MC_MCFIXUP_H
#define LLVM_MC_MCFIXUP_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
@@ -22,6 +22,10 @@ enum MCFixupKind {
FK_Data_2, ///< A two-byte fixup.
FK_Data_4, ///< A four-byte fixup.
FK_Data_8, ///< A eight-byte fixup.
+ FK_PCRel_1, ///< A one-byte pc relative fixup.
+ FK_PCRel_2, ///< A two-byte pc relative fixup.
+ FK_PCRel_4, ///< A four-byte pc relative fixup.
+ FK_PCRel_8, ///< A eight-byte pc relative fixup.
FirstTargetFixupKind = 128,
@@ -77,13 +81,13 @@ public:
/// getKindForSize - Return the generic fixup kind for a value with the given
/// size. It is an error to pass an unsupported size.
- static MCFixupKind getKindForSize(unsigned Size) {
+ static MCFixupKind getKindForSize(unsigned Size, bool isPCRel) {
switch (Size) {
default: assert(0 && "Invalid generic fixup size!");
- case 1: return FK_Data_1;
- case 2: return FK_Data_2;
- case 4: return FK_Data_4;
- case 8: return FK_Data_8;
+ case 1: return isPCRel ? FK_PCRel_1 : FK_Data_1;
+ case 2: return isPCRel ? FK_PCRel_2 : FK_Data_2;
+ case 4: return isPCRel ? FK_PCRel_4 : FK_Data_4;
+ case 8: return isPCRel ? FK_PCRel_8 : FK_Data_8;
}
}
};
diff --git a/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h b/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h
new file mode 100644
index 0000000..1961687
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h
@@ -0,0 +1,43 @@
+//===-- llvm/MC/MCFixupKindInfo.h - Fixup Descriptors -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCFIXUPKINDINFO_H
+#define LLVM_MC_MCFIXUPKINDINFO_H
+
+namespace llvm {
+
+/// MCFixupKindInfo - Target independent information on a fixup kind.
+struct MCFixupKindInfo {
+ enum FixupKindFlags {
+ /// Is this fixup kind PCrelative? This is used by the assembler backend to
+ /// evaluate fixup values in a target independent manner when possible.
+ FKF_IsPCRel = (1 << 0),
+
+ /// Should this fixup kind force a 4-byte aligned effective PC value?
+ FKF_IsAlignedDownTo32Bits = (1 << 1)
+ };
+
+ /// A target specific name for the fixup kind. The names will be unique for
+ /// distinct kinds on any given target.
+ const char *Name;
+
+ /// The bit offset to write the relocation into.
+ unsigned TargetOffset;
+
+ /// The number of bits written by this fixup. The bits are assumed to be
+ /// contiguous.
+ unsigned TargetSize;
+
+ /// Flags describing additional information on this fixup kind.
+ unsigned Flags;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCInst.h b/contrib/llvm/include/llvm/MC/MCInst.h
index dc630fe..d6ef7b4 100644
--- a/contrib/llvm/include/llvm/MC/MCInst.h
+++ b/contrib/llvm/include/llvm/MC/MCInst.h
@@ -18,7 +18,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class raw_ostream;
@@ -33,24 +33,27 @@ class MCOperand {
kInvalid, ///< Uninitialized.
kRegister, ///< Register operand.
kImmediate, ///< Immediate operand.
+ kFPImmediate, ///< Floating-point immediate operand.
kExpr ///< Relocatable immediate operand.
};
unsigned char Kind;
-
+
union {
unsigned RegVal;
int64_t ImmVal;
+ double FPImmVal;
const MCExpr *ExprVal;
};
public:
-
- MCOperand() : Kind(kInvalid) {}
+
+ MCOperand() : Kind(kInvalid), FPImmVal(0.0) {}
bool isValid() const { return Kind != kInvalid; }
bool isReg() const { return Kind == kRegister; }
bool isImm() const { return Kind == kImmediate; }
+ bool isFPImm() const { return Kind == kFPImmediate; }
bool isExpr() const { return Kind == kExpr; }
-
+
/// getReg - Returns the register number.
unsigned getReg() const {
assert(isReg() && "This is not a register operand!");
@@ -62,7 +65,7 @@ public:
assert(isReg() && "This is not a register operand!");
RegVal = Reg;
}
-
+
int64_t getImm() const {
assert(isImm() && "This is not an immediate");
return ImmVal;
@@ -71,7 +74,17 @@ public:
assert(isImm() && "This is not an immediate");
ImmVal = Val;
}
-
+
+ double getFPImm() const {
+ assert(isFPImm() && "This is not an FP immediate");
+ return FPImmVal;
+ }
+
+ void setFPImm(double Val) {
+ assert(isFPImm() && "This is not an FP immediate");
+ FPImmVal = Val;
+ }
+
const MCExpr *getExpr() const {
assert(isExpr() && "This is not an expression");
return ExprVal;
@@ -80,7 +93,7 @@ public:
assert(isExpr() && "This is not an expression");
ExprVal = Val;
}
-
+
static MCOperand CreateReg(unsigned Reg) {
MCOperand Op;
Op.Kind = kRegister;
@@ -93,6 +106,12 @@ public:
Op.ImmVal = Val;
return Op;
}
+ static MCOperand CreateFPImm(double Val) {
+ MCOperand Op;
+ Op.Kind = kFPImmediate;
+ Op.FPImmVal = Val;
+ return Op;
+ }
static MCOperand CreateExpr(const MCExpr *Val) {
MCOperand Op;
Op.Kind = kExpr;
@@ -104,23 +123,23 @@ public:
void dump() const;
};
-
+
/// MCInst - Instances of this class represent a single low-level machine
-/// instruction.
+/// instruction.
class MCInst {
unsigned Opcode;
SmallVector<MCOperand, 8> Operands;
public:
MCInst() : Opcode(0) {}
-
+
void setOpcode(unsigned Op) { Opcode = Op; }
-
+
unsigned getOpcode() const { return Opcode; }
const MCOperand &getOperand(unsigned i) const { return Operands[i]; }
MCOperand &getOperand(unsigned i) { return Operands[i]; }
unsigned getNumOperands() const { return Operands.size(); }
-
+
void addOperand(const MCOperand &Op) {
Operands.push_back(Op);
}
@@ -136,6 +155,15 @@ public:
StringRef Separator = " ") const;
};
+inline raw_ostream& operator<<(raw_ostream &OS, const MCOperand &MO) {
+ MO.print(OS, 0);
+ return OS;
+}
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MCInst &MI) {
+ MI.print(OS, 0);
+ return OS;
+}
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCInstPrinter.h b/contrib/llvm/include/llvm/MC/MCInstPrinter.h
index 4839a83..96716c7 100644
--- a/contrib/llvm/include/llvm/MC/MCInstPrinter.h
+++ b/contrib/llvm/include/llvm/MC/MCInstPrinter.h
@@ -28,21 +28,21 @@ protected:
public:
MCInstPrinter(const MCAsmInfo &mai)
: CommentStream(0), MAI(mai) {}
-
+
virtual ~MCInstPrinter();
/// setCommentStream - Specify a stream to emit comments to.
void setCommentStream(raw_ostream &OS) { CommentStream = &OS; }
-
+
/// printInst - Print the specified MCInst to the specified raw_ostream.
///
virtual void printInst(const MCInst *MI, raw_ostream &OS) = 0;
-
+
/// getOpcodeName - Return the name of the specified opcode enum (e.g.
/// "MOV32ri") or empty if we can't resolve it.
virtual StringRef getOpcodeName(unsigned Opcode) const;
};
-
+
} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCMachOSymbolFlags.h b/contrib/llvm/include/llvm/MC/MCMachOSymbolFlags.h
index c938c81..696436d 100644
--- a/contrib/llvm/include/llvm/MC/MCMachOSymbolFlags.h
+++ b/contrib/llvm/include/llvm/MC/MCMachOSymbolFlags.h
@@ -34,9 +34,11 @@ namespace llvm {
SF_ReferenceTypePrivateUndefinedLazy = 0x0005,
// Other 'desc' flags.
+ SF_ThumbFunc = 0x0008,
SF_NoDeadStrip = 0x0020,
SF_WeakReference = 0x0040,
- SF_WeakDefinition = 0x0080
+ SF_WeakDefinition = 0x0080,
+ SF_SymbolResolver = 0x0100
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h b/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
new file mode 100644
index 0000000..ec51031
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
@@ -0,0 +1,65 @@
+//===-- llvm/MC/MCMachObjectWriter.h - Mach Object Writer -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCMACHOBJECTWRITER_H
+#define LLVM_MC_MCMACHOBJECTWRITER_H
+
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class MCMachObjectTargetWriter {
+ const unsigned Is64Bit : 1;
+ const uint32_t CPUType;
+ const uint32_t CPUSubtype;
+ // FIXME: Remove this, we should just always use it once we no longer care
+ // about Darwin 'as' compatibility.
+ const unsigned UseAggressiveSymbolFolding : 1;
+ unsigned LocalDifference_RIT;
+
+protected:
+ MCMachObjectTargetWriter(bool Is64Bit_, uint32_t CPUType_,
+ uint32_t CPUSubtype_,
+ bool UseAggressiveSymbolFolding_ = false);
+
+ void setLocalDifferenceRelocationType(unsigned Type) {
+ LocalDifference_RIT = Type;
+ }
+
+public:
+ virtual ~MCMachObjectTargetWriter();
+
+ /// @name Accessors
+ /// @{
+
+ bool is64Bit() const { return Is64Bit; }
+ bool useAggressiveSymbolFolding() const { return UseAggressiveSymbolFolding; }
+ uint32_t getCPUType() const { return CPUType; }
+ uint32_t getCPUSubtype() const { return CPUSubtype; }
+ unsigned getLocalDifferenceRelocationType() const {
+ return LocalDifference_RIT;
+ }
+
+ /// @}
+};
+
+/// \brief Construct a new Mach-O writer instance.
+///
+/// This routine takes ownership of the target writer subclass.
+///
+/// \param MOTW - The target specific Mach-O writer subclass.
+/// \param OS - The stream to write to.
+/// \returns The constructed object writer.
+MCObjectWriter *createMachObjectWriter(MCMachObjectTargetWriter *MOTW,
+ raw_ostream &OS, bool IsLittleEndian);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
index ea6d9c1..833341e 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -33,6 +33,8 @@ class MCObjectStreamer : public MCStreamer {
MCAssembler *Assembler;
MCSectionData *CurSectionData;
+ virtual void EmitInstToData(const MCInst &Inst) = 0;
+
protected:
MCObjectStreamer(MCContext &Context, TargetAsmBackend &TAB,
raw_ostream &_OS, MCCodeEmitter *_Emitter);
@@ -56,7 +58,21 @@ public:
/// @name MCStreamer Interface
/// @{
- virtual void SwitchSection(const MCSection *Section);
+ virtual void EmitLabel(MCSymbol *Symbol);
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ bool isPCRel, unsigned AddrSpace);
+ virtual void EmitULEB128Value(const MCExpr *Value, unsigned AddrSpace = 0);
+ virtual void EmitSLEB128Value(const MCExpr *Value, unsigned AddrSpace = 0);
+ virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
+ virtual void ChangeSection(const MCSection *Section);
+ virtual void EmitInstruction(const MCInst &Inst);
+ virtual void EmitInstToFragment(const MCInst &Inst);
+ virtual void EmitValueToOffset(const MCExpr *Offset, unsigned char Value);
+ virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
+ const MCSymbol *LastLabel,
+ const MCSymbol *Label);
+ virtual void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
+ const MCSymbol *Label);
virtual void Finish();
/// @}
diff --git a/contrib/llvm/include/llvm/MC/MCObjectWriter.h b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
index f1c1cb8..782d844 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
@@ -10,8 +10,9 @@
#ifndef LLVM_MC_MCOBJECTWRITER_H
#define LLVM_MC_MCOBJECTWRITER_H
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
@@ -19,6 +20,9 @@ class MCAsmLayout;
class MCAssembler;
class MCFixup;
class MCFragment;
+class MCSymbol;
+class MCSymbolData;
+class MCSymbolRefExpr;
class MCValue;
class raw_ostream;
@@ -61,7 +65,8 @@ public:
///
/// This routine is called by the assembler after layout and relaxation is
/// complete.
- virtual void ExecutePostLayoutBinding(MCAssembler &Asm) = 0;
+ virtual void ExecutePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout) = 0;
/// Record a relocation entry.
///
@@ -75,12 +80,31 @@ public:
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) = 0;
+ /// \brief Check whether the difference (A - B) between two symbol
+ /// references is fully resolved.
+ ///
+ /// Clients are not required to answer precisely and may conservatively return
+ /// false, even when a difference is fully resolved.
+ bool
+ IsSymbolRefDifferenceFullyResolved(const MCAssembler &Asm,
+ const MCSymbolRefExpr *A,
+ const MCSymbolRefExpr *B,
+ bool InSet) const;
+
+ virtual bool
+ IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+ const MCSymbolData &DataA,
+ const MCFragment &FB,
+ bool InSet,
+ bool IsPCRel) const;
+
+
/// Write the object file.
///
/// This routine is called by the assembler after layout and relaxation is
/// complete, fixups have been evaluated and applied, and relocations
/// generated.
- virtual void WriteObject(const MCAssembler &Asm,
+ virtual void WriteObject(MCAssembler &Asm,
const MCAsmLayout &Layout) = 0;
/// @}
@@ -160,6 +184,11 @@ public:
}
/// @}
+
+ /// Utility function to encode a SLEB128 value.
+ static void EncodeSLEB128(int64_t Value, raw_ostream &OS);
+ /// Utility function to encode a ULEB128 value.
+ static void EncodeULEB128(uint64_t Value, raw_ostream &OS);
};
MCObjectWriter *createWinCOFFObjectWriter(raw_ostream &OS, bool is64Bit);
diff --git a/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
index 2187889..252696b 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
@@ -17,7 +17,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <string>
#include <cassert>
@@ -29,10 +29,10 @@ class MCAsmInfo;
/// AsmLexer - Lexer class for assembly files.
class AsmLexer : public MCAsmLexer {
const MCAsmInfo &MAI;
-
+
const char *CurPtr;
const MemoryBuffer *CurBuf;
-
+
void operator=(const AsmLexer&); // DO NOT IMPLEMENT
AsmLexer(const AsmLexer&); // DO NOT IMPLEMENT
@@ -43,13 +43,13 @@ protected:
public:
AsmLexer(const MCAsmInfo &MAI);
~AsmLexer();
-
+
void setBuffer(const MemoryBuffer *buf, const char *ptr = NULL);
-
+
virtual StringRef LexUntilEndOfStatement();
bool isAtStartOfComment(char Char);
-
+
const MCAsmInfo &getMAI() const { return MAI; }
private:
@@ -60,9 +60,11 @@ private:
AsmToken LexSlash();
AsmToken LexLineComment();
AsmToken LexDigit();
+ AsmToken LexSingleQuote();
AsmToken LexQuote();
+ AsmToken LexFloatLiteral();
};
-
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
index d690e81..606725a 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -11,7 +11,7 @@
#define LLVM_MC_MCASMLEXER_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/SMLoc.h"
namespace llvm {
@@ -29,13 +29,16 @@ public:
// String values.
Identifier,
String,
-
+
// Integer values.
Integer,
-
+
+ // Real values.
+ Real,
+
// Register values (stored in IntVal). Only used by TargetAsmLexer.
Register,
-
+
// No-value.
EndOfStatement,
Colon,
@@ -43,8 +46,8 @@ public:
Slash, // '/'
LParen, RParen, LBrac, RBrac, LCurly, RCurly,
Star, Dot, Comma, Dollar, Equal, EqualEqual,
-
- Pipe, PipePipe, Caret,
+
+ Pipe, PipePipe, Caret,
Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
Less, LessEqual, LessLess, LessGreater,
Greater, GreaterEqual, GreaterGreater, At
@@ -70,7 +73,7 @@ public:
SMLoc getLoc() const;
/// getStringContents - Get the contents of a string token (without quotes).
- StringRef getStringContents() const {
+ StringRef getStringContents() const {
assert(Kind == String && "This token isn't a string!");
return Str.slice(1, Str.size() - 1);
}
@@ -95,11 +98,11 @@ public:
// FIXME: Don't compute this in advance, it makes every token larger, and is
// also not generally what we want (it is nicer for recovery etc. to lex 123br
// as a single token, then diagnose as an invalid number).
- int64_t getIntVal() const {
+ int64_t getIntVal() const {
assert(Kind == Integer && "This token isn't an integer!");
- return IntVal;
+ return IntVal;
}
-
+
/// getRegVal - Get the register number for the current token, which should
/// be a register.
unsigned getRegVal() const {
@@ -113,7 +116,7 @@ public:
class MCAsmLexer {
/// The current token, stored in the base class for faster access.
AsmToken CurTok;
-
+
/// The location and description of the current error
SMLoc ErrLoc;
std::string Err;
@@ -126,12 +129,12 @@ protected: // Can only create subclasses.
MCAsmLexer();
virtual AsmToken LexToken() = 0;
-
+
void SetError(const SMLoc &errLoc, const std::string &err) {
ErrLoc = errLoc;
Err = err;
}
-
+
public:
virtual ~MCAsmLexer();
@@ -152,12 +155,12 @@ public:
const AsmToken &getTok() {
return CurTok;
}
-
+
/// getErrLoc - Get the current error location
const SMLoc &getErrLoc() {
return ErrLoc;
}
-
+
/// getErr - Get the current error string
const std::string &getErr() {
return Err;
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
index b37d46c..54979d9 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
@@ -10,7 +10,7 @@
#ifndef LLVM_MC_MCASMPARSER_H
#define LLVM_MC_MCASMPARSER_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class AsmToken;
@@ -99,6 +99,10 @@ public:
/// will be either the EndOfStatement or EOF.
virtual StringRef ParseStringToEndOfStatement() = 0;
+ /// EatToEndOfStatement - Skip to the end of the current statement, for error
+ /// recovery.
+ virtual void EatToEndOfStatement() = 0;
+
/// ParseExpression - Parse an arbitrary expression.
///
/// @param Res - The value of the expression. The result is undefined
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
index 99fa5ad..91f5773 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
@@ -19,10 +19,10 @@ class raw_ostream;
/// base class is used by target-independent clients and is the interface
/// between parsing an asm instruction and recognizing it.
class MCParsedAsmOperand {
-public:
+public:
MCParsedAsmOperand() {}
virtual ~MCParsedAsmOperand() {}
-
+
/// getStartLoc - Get the location of the first token of this operand.
virtual SMLoc getStartLoc() const = 0;
/// getEndLoc - Get the location of the last token of this operand.
diff --git a/contrib/llvm/include/llvm/MC/MCSection.h b/contrib/llvm/include/llvm/MC/MCSection.h
index 5c99735..1c01b2f 100644
--- a/contrib/llvm/include/llvm/MC/MCSection.h
+++ b/contrib/llvm/include/llvm/MC/MCSection.h
@@ -32,8 +32,7 @@ namespace llvm {
enum SectionVariant {
SV_COFF = 0,
SV_ELF,
- SV_MachO,
- SV_PIC16
+ SV_MachO
};
private:
@@ -61,6 +60,14 @@ namespace llvm {
return false;
}
+ // UseCodeAlign - Return true if a .align directive should use
+ // "optimized nops" to fill instead of 0s.
+ virtual bool UseCodeAlign() const = 0;
+
+ /// isVirtualSection - Check whether this section is "virtual", that is
+ /// has no actual object file contents.
+ virtual bool isVirtualSection() const = 0;
+
static bool classof(const MCSection *) { return true; }
};
diff --git a/contrib/llvm/include/llvm/MC/MCSectionCOFF.h b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
index f828e10..b154cf5 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
@@ -19,12 +19,12 @@
#include "llvm/Support/COFF.h"
namespace llvm {
-
+
/// MCSectionCOFF - This represents a section on Windows
class MCSectionCOFF : public MCSection {
// The memory for this string is stored in the same MCContext as *this.
StringRef SectionName;
-
+
/// Characteristics - This is the Characteristics field of a section,
// drawn from the enums below.
unsigned Characteristics;
@@ -52,9 +52,11 @@ namespace llvm {
StringRef getSectionName() const { return SectionName; }
unsigned getCharacteristics() const { return Characteristics; }
int getSelection () const { return Selection; }
-
+
virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const;
+ virtual bool UseCodeAlign() const;
+ virtual bool isVirtualSection() const;
static bool classof(const MCSection *S) {
return S->getVariant() == SV_COFF;
diff --git a/contrib/llvm/include/llvm/MC/MCSectionELF.h b/contrib/llvm/include/llvm/MC/MCSectionELF.h
index 5de0bf5..c82de71 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionELF.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionELF.h
@@ -15,38 +15,39 @@
#define LLVM_MC_MCSECTIONELF_H
#include "llvm/MC/MCSection.h"
+#include "llvm/Support/ELF.h"
namespace llvm {
-
+
+class MCSymbol;
+
/// MCSectionELF - This represents a section on linux, lots of unix variants
/// and some bare metal systems.
class MCSectionELF : public MCSection {
/// SectionName - This is the name of the section. The referenced memory is
/// owned by TargetLoweringObjectFileELF's ELFUniqueMap.
StringRef SectionName;
-
+
/// Type - This is the sh_type field of a section, drawn from the enums below.
unsigned Type;
-
+
/// Flags - This is the sh_flags field of a section, drawn from the enums.
/// below.
unsigned Flags;
- /// IsExplicit - Indicates that this section comes from globals with an
- /// explicit section specified.
- bool IsExplicit;
-
/// EntrySize - The size of each entry in this section. This size only
/// makes sense for sections that contain fixed-sized entries. If a
/// section does not contain fixed-sized entries 'EntrySize' will be 0.
unsigned EntrySize;
-
+
+ const MCSymbol *Group;
+
private:
friend class MCContext;
MCSectionELF(StringRef Section, unsigned type, unsigned flags,
- SectionKind K, bool isExplicit, unsigned entrySize)
+ SectionKind K, unsigned entrySize, const MCSymbol *group)
: MCSection(SV_ELF, K), SectionName(Section), Type(type), Flags(flags),
- IsExplicit(isExplicit), EntrySize(entrySize) {}
+ EntrySize(entrySize), Group(group) {}
~MCSectionELF();
public:
@@ -54,141 +55,31 @@ public:
/// should be printed before the section name
bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
- /// ShouldPrintSectionType - Only prints the section type if supported
- bool ShouldPrintSectionType(unsigned Ty) const;
-
- /// HasCommonSymbols - True if this section holds common symbols, this is
- /// indicated on the ELF object file by a symbol with SHN_COMMON section
- /// header index.
- bool HasCommonSymbols() const;
-
- /// These are the section type and flags fields. An ELF section can have
- /// only one Type, but can have more than one of the flags specified.
- ///
- /// Valid section types.
- enum {
- // This value marks the section header as inactive.
- SHT_NULL = 0x00U,
-
- // Holds information defined by the program, with custom format and meaning.
- SHT_PROGBITS = 0x01U,
-
- // This section holds a symbol table.
- SHT_SYMTAB = 0x02U,
-
- // The section holds a string table.
- SHT_STRTAB = 0x03U,
-
- // The section holds relocation entries with explicit addends.
- SHT_RELA = 0x04U,
-
- // The section holds a symbol hash table.
- SHT_HASH = 0x05U,
-
- // Information for dynamic linking.
- SHT_DYNAMIC = 0x06U,
-
- // The section holds information that marks the file in some way.
- SHT_NOTE = 0x07U,
-
- // A section of this type occupies no space in the file.
- SHT_NOBITS = 0x08U,
-
- // The section holds relocation entries without explicit addends.
- SHT_REL = 0x09U,
-
- // This section type is reserved but has unspecified semantics.
- SHT_SHLIB = 0x0AU,
-
- // This section holds a symbol table.
- SHT_DYNSYM = 0x0BU,
-
- // This section contains an array of pointers to initialization functions.
- SHT_INIT_ARRAY = 0x0EU,
-
- // This section contains an array of pointers to termination functions.
- SHT_FINI_ARRAY = 0x0FU,
-
- // This section contains an array of pointers to functions that are invoked
- // before all other initialization functions.
- SHT_PREINIT_ARRAY = 0x10U,
-
- // A section group is a set of sections that are related and that must be
- // treated specially by the linker.
- SHT_GROUP = 0x11U,
-
- // This section is associated with a section of type SHT_SYMTAB, when the
- // referenced symbol table contain the escape value SHN_XINDEX
- SHT_SYMTAB_SHNDX = 0x12U,
-
- LAST_KNOWN_SECTION_TYPE = SHT_SYMTAB_SHNDX
- };
-
- /// Valid section flags.
- enum {
- // The section contains data that should be writable.
- SHF_WRITE = 0x1U,
-
- // The section occupies memory during execution.
- SHF_ALLOC = 0x2U,
-
- // The section contains executable machine instructions.
- SHF_EXECINSTR = 0x4U,
-
- // The data in the section may be merged to eliminate duplication.
- SHF_MERGE = 0x10U,
-
- // Elements in the section consist of null-terminated character strings.
- SHF_STRINGS = 0x20U,
-
- // A field in this section holds a section header table index.
- SHF_INFO_LINK = 0x40U,
-
- // Adds special ordering requirements for link editors.
- SHF_LINK_ORDER = 0x80U,
-
- // This section requires special OS-specific processing to avoid incorrect
- // behavior.
- SHF_OS_NONCONFORMING = 0x100U,
-
- // This section is a member of a section group.
- SHF_GROUP = 0x200U,
-
- // This section holds Thread-Local Storage.
- SHF_TLS = 0x400U,
-
-
- // Start of target-specific flags.
-
- /// XCORE_SHF_CP_SECTION - All sections with the "c" flag are grouped
- /// together by the linker to form the constant pool and the cp register is
- /// set to the start of the constant pool by the boot code.
- XCORE_SHF_CP_SECTION = 0x800U,
-
- /// XCORE_SHF_DP_SECTION - All sections with the "d" flag are grouped
- /// together by the linker to form the data section and the dp register is
- /// set to the start of the section by the boot code.
- XCORE_SHF_DP_SECTION = 0x1000U
- };
-
StringRef getSectionName() const { return SectionName; }
unsigned getType() const { return Type; }
unsigned getFlags() const { return Flags; }
unsigned getEntrySize() const { return EntrySize; }
-
+ const MCSymbol *getGroup() const { return Group; }
+
void PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const;
-
+ virtual bool UseCodeAlign() const;
+ virtual bool isVirtualSection() const;
+
/// isBaseAddressKnownZero - We know that non-allocatable sections (like
/// debug info) have a base of zero.
virtual bool isBaseAddressKnownZero() const {
- return (getFlags() & SHF_ALLOC) == 0;
+ return (getFlags() & ELF::SHF_ALLOC) == 0;
}
static bool classof(const MCSection *S) {
return S->getVariant() == SV_ELF;
}
static bool classof(const MCSectionELF *) { return true; }
+
+ // Return the entry size for sections with fixed-width data.
+ static unsigned DetermineEntrySize(SectionKind Kind);
+
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCSectionMachO.h b/contrib/llvm/include/llvm/MC/MCSectionMachO.h
index 2d9d133..7633515 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionMachO.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionMachO.h
@@ -17,36 +17,36 @@
#include "llvm/MC/MCSection.h"
namespace llvm {
-
+
/// MCSectionMachO - This represents a section on a Mach-O system (used by
/// Mac OS X). On a Mac system, these are also described in
/// /usr/include/mach-o/loader.h.
class MCSectionMachO : public MCSection {
char SegmentName[16]; // Not necessarily null terminated!
char SectionName[16]; // Not necessarily null terminated!
-
+
/// TypeAndAttributes - This is the SECTION_TYPE and SECTION_ATTRIBUTES
/// field of a section, drawn from the enums below.
unsigned TypeAndAttributes;
-
+
/// Reserved2 - The 'reserved2' field of a section, used to represent the
/// size of stubs, for example.
unsigned Reserved2;
-
+
MCSectionMachO(StringRef Segment, StringRef Section,
- unsigned TAA, unsigned reserved2, SectionKind K);
+ unsigned TAA, unsigned reserved2, SectionKind K);
friend class MCContext;
public:
-
+
/// These are the section type and attributes fields. A MachO section can
/// have only one Type, but can have any of the attributes specified.
enum {
// TypeAndAttributes bitmasks.
SECTION_TYPE = 0x000000FFU,
SECTION_ATTRIBUTES = 0xFFFFFF00U,
-
+
// Valid section types.
-
+
/// S_REGULAR - Regular section.
S_REGULAR = 0x00U,
/// S_ZEROFILL - Zero fill on demand section.
@@ -101,10 +101,10 @@ public:
S_THREAD_LOCAL_INIT_FUNCTION_POINTERS = 0x15U,
LAST_KNOWN_SECTION_TYPE = S_THREAD_LOCAL_INIT_FUNCTION_POINTERS,
-
+
// Valid section attributes.
-
+
/// S_ATTR_PURE_INSTRUCTIONS - Section contains only true machine
/// instructions.
S_ATTR_PURE_INSTRUCTIONS = 1U << 31,
@@ -165,6 +165,8 @@ public:
virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const;
+ virtual bool UseCodeAlign() const;
+ virtual bool isVirtualSection() const;
static bool classof(const MCSection *S) {
return S->getVariant() == SV_MachO;
diff --git a/contrib/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm/include/llvm/MC/MCStreamer.h
index 1ce1b0e..fc2451f 100644
--- a/contrib/llvm/include/llvm/MC/MCStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCStreamer.h
@@ -14,8 +14,10 @@
#ifndef LLVM_MC_MCSTREAMER_H
#define LLVM_MC_MCSTREAMER_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCDwarf.h"
namespace llvm {
class MCAsmInfo;
@@ -28,6 +30,7 @@ namespace llvm {
class MCSymbol;
class StringRef;
class TargetAsmBackend;
+ class TargetLoweringObjectFile;
class Twine;
class raw_ostream;
class formatted_raw_ostream;
@@ -47,29 +50,44 @@ namespace llvm {
MCStreamer(const MCStreamer&); // DO NOT IMPLEMENT
MCStreamer &operator=(const MCStreamer&); // DO NOT IMPLEMENT
- protected:
- MCStreamer(MCContext &Ctx);
+ void EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
+ bool isPCRel, unsigned AddrSpace);
+
+ std::vector<MCDwarfFrameInfo> FrameInfos;
+ MCDwarfFrameInfo *getCurrentFrameInfo();
+ void EnsureValidFrame();
+
+ /// CurSectionStack - This is stack of CurSection values saved by
+ /// PushSection.
+ SmallVector<const MCSection *, 4> CurSectionStack;
- /// CurSection - This is the current section code is being emitted to, it is
- /// kept up to date by SwitchSection.
- const MCSection *CurSection;
+ /// PrevSectionStack - This is stack of PrevSection values saved by
+ /// PushSection.
+ SmallVector<const MCSection *, 4> PrevSectionStack;
- /// PrevSection - This is the previous section code is being emitted to, it is
- /// kept up to date by SwitchSection.
- const MCSection *PrevSection;
+ protected:
+ MCStreamer(MCContext &Ctx);
public:
virtual ~MCStreamer();
MCContext &getContext() const { return Context; }
+ unsigned getNumFrameInfos() {
+ return FrameInfos.size();
+ }
+
+ const MCDwarfFrameInfo &getFrameInfo(unsigned i) {
+ return FrameInfos[i];
+ }
+
/// @name Assembly File Formatting.
/// @{
-
+
/// isVerboseAsm - Return true if this streamer supports verbose assembly
/// and if it is enabled.
virtual bool isVerboseAsm() const { return false; }
-
+
/// hasRawTextSupport - Return true if this asm streamer supports emitting
/// unformatted text to the .s file with EmitRawText.
virtual bool hasRawTextSupport() const { return false; }
@@ -82,34 +100,83 @@ namespace llvm {
/// If the comment includes embedded \n's, they will each get the comment
/// prefix as appropriate. The added comment should not end with a \n.
virtual void AddComment(const Twine &T) {}
-
+
/// GetCommentOS - Return a raw_ostream that comments can be written to.
/// Unlike AddComment, you are required to terminate comments with \n if you
/// use this method.
virtual raw_ostream &GetCommentOS();
-
+
/// AddBlankLine - Emit a blank line to a .s file to pretty it up.
virtual void AddBlankLine() {}
-
+
/// @}
-
+
/// @name Symbol & Section Management
/// @{
-
+
/// getCurrentSection - Return the current section that the streamer is
/// emitting code to.
- const MCSection *getCurrentSection() const { return CurSection; }
+ const MCSection *getCurrentSection() const {
+ if (!CurSectionStack.empty())
+ return CurSectionStack.back();
+ return NULL;
+ }
/// getPreviousSection - Return the previous section that the streamer is
/// emitting code to.
- const MCSection *getPreviousSection() const { return PrevSection; }
+ const MCSection *getPreviousSection() const {
+ if (!PrevSectionStack.empty())
+ return PrevSectionStack.back();
+ return NULL;
+ }
+
+ /// ChangeSection - Update streamer for a new active section.
+ ///
+ /// This is called by PopSection and SwitchSection, if the current
+ /// section changes.
+ virtual void ChangeSection(const MCSection *) = 0;
+
+ /// pushSection - Save the current and previous section on the
+ /// section stack.
+ void PushSection() {
+ PrevSectionStack.push_back(getPreviousSection());
+ CurSectionStack.push_back(getCurrentSection());
+ }
+
+ /// popSection - Restore the current and previous section from
+ /// the section stack. Calls ChangeSection as needed.
+ ///
+ /// Returns false if the stack was empty.
+ bool PopSection() {
+ if (PrevSectionStack.size() <= 1)
+ return false;
+ assert(CurSectionStack.size() > 1);
+ PrevSectionStack.pop_back();
+ const MCSection *oldSection = CurSectionStack.pop_back_val();
+ const MCSection *curSection = CurSectionStack.back();
+
+ if (oldSection != curSection)
+ ChangeSection(curSection);
+ return true;
+ }
/// SwitchSection - Set the current section where code is being emitted to
/// @p Section. This is required to update CurSection.
///
/// This corresponds to assembler directives like .section, .text, etc.
- virtual void SwitchSection(const MCSection *Section) = 0;
-
+ void SwitchSection(const MCSection *Section) {
+ assert(Section && "Cannot switch to a null section!");
+ const MCSection *curSection = CurSectionStack.back();
+ PrevSectionStack.back() = curSection;
+ if (Section != curSection) {
+ CurSectionStack.back() = Section;
+ ChangeSection(Section);
+ }
+ }
+
+ /// InitSections - Create the default sections and set the initial one.
+ virtual void InitSections() = 0;
+
/// EmitLabel - Emit a label for @p Symbol into the current section.
///
/// This corresponds to an assembler statement such as:
@@ -123,6 +190,10 @@ namespace llvm {
/// EmitAssemblerFlag - Note in the output the specified @p Flag
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) = 0;
+ /// EmitThumbFunc - Note in the output that the specified @p Func is
+ /// a Thumb mode function (ARM target only).
+ virtual void EmitThumbFunc(MCSymbol *Func) = 0;
+
/// EmitAssignment - Emit an assignment of @p Value to @p Symbol.
///
/// This corresponds to an assembler statement such as:
@@ -136,6 +207,15 @@ namespace llvm {
/// @param Value - The value for the symbol.
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) = 0;
+ /// EmitWeakReference - Emit an weak reference from @p Alias to @p Symbol.
+ ///
+ /// This corresponds to an assembler statement such as:
+ /// .weakref alias, symbol
+ ///
+ /// @param Alias - The alias that is being created.
+ /// @param Symbol - The symbol being aliased.
+ virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) = 0;
+
/// EmitSymbolAttribute - Add the given @p Attribute to @p Symbol.
virtual void EmitSymbolAttribute(MCSymbol *Symbol,
MCSymbolAttr Attribute) = 0;
@@ -170,7 +250,7 @@ namespace llvm {
/// .size symbol, expression
///
virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) = 0;
-
+
/// EmitCommonSymbol - Emit a common symbol.
///
/// @param Symbol - The common symbol to emit.
@@ -185,7 +265,7 @@ namespace llvm {
/// @param Symbol - The common symbol to emit.
/// @param Size - The size of the common symbol.
virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) = 0;
-
+
/// EmitZerofill - Emit the zerofill section and an optional symbol.
///
/// @param Section - The zerofill section to create and or to put the symbol
@@ -204,7 +284,7 @@ namespace llvm {
/// @param ByteAlignment - The alignment of the thread local common symbol
/// if non-zero. This must be a power of 2 on some targets.
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment = 0) = 0;
+ uint64_t Size, unsigned ByteAlignment = 0) = 0;
/// @}
/// @name Generating Data
/// @{
@@ -224,38 +304,67 @@ namespace llvm {
/// @param Value - The value to emit.
/// @param Size - The size of the integer (in bytes) to emit. This must
/// match a native machine width.
- virtual void EmitValue(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace = 0) = 0;
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ bool isPCRel, unsigned AddrSpace) = 0;
+
+ void EmitValue(const MCExpr *Value, unsigned Size, unsigned AddrSpace = 0);
+
+ void EmitPCRelValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace = 0);
/// EmitIntValue - Special case of EmitValue that avoids the client having
/// to pass in a MCExpr for constant integers.
virtual void EmitIntValue(uint64_t Value, unsigned Size,
unsigned AddrSpace = 0);
-
+
+ /// EmitAbsValue - Emit the Value, but try to avoid relocations. On MachO
+ /// this is done by producing
+ /// foo = value
+ /// .long foo
+ void EmitAbsValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace = 0);
+
+ virtual void EmitULEB128Value(const MCExpr *Value,
+ unsigned AddrSpace = 0) = 0;
+
+ virtual void EmitSLEB128Value(const MCExpr *Value,
+ unsigned AddrSpace = 0) = 0;
+
+ /// EmitULEB128Value - Special case of EmitULEB128Value that avoids the
+ /// client having to pass in a MCExpr for constant integers.
+ void EmitULEB128IntValue(uint64_t Value, unsigned AddrSpace = 0);
+
+ /// EmitSLEB128Value - Special case of EmitSLEB128Value that avoids the
+ /// client having to pass in a MCExpr for constant integers.
+ void EmitSLEB128IntValue(int64_t Value, unsigned AddrSpace = 0);
+
/// EmitSymbolValue - Special case of EmitValue that avoids the client
/// having to pass in a MCExpr for MCSymbols.
- virtual void EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
- unsigned AddrSpace);
-
+ void EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
+ unsigned AddrSpace = 0);
+
+ void EmitPCRelSymbolValue(const MCSymbol *Sym, unsigned Size,
+ unsigned AddrSpace = 0);
+
/// EmitGPRel32Value - Emit the expression @p Value into the output as a
/// gprel32 (32-bit GP relative) value.
///
/// This is used to implement assembler directives such as .gprel32 on
/// targets that support them.
- virtual void EmitGPRel32Value(const MCExpr *Value) = 0;
-
+ virtual void EmitGPRel32Value(const MCExpr *Value);
+
/// EmitFill - Emit NumBytes bytes worth of the value specified by
/// FillValue. This implements directives such as '.space'.
virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue,
unsigned AddrSpace);
-
+
/// EmitZeros - Emit NumBytes worth of zeros. This is a convenience
/// function that just wraps EmitFill.
void EmitZeros(uint64_t NumBytes, unsigned AddrSpace) {
EmitFill(NumBytes, 0, AddrSpace);
}
-
+
/// EmitValueToAlignment - Emit some number of copies of @p Value until
/// the byte alignment @p ByteAlignment is reached.
///
@@ -301,17 +410,47 @@ namespace llvm {
/// @param Value - The value to use when filling bytes.
virtual void EmitValueToOffset(const MCExpr *Offset,
unsigned char Value = 0) = 0;
-
+
/// @}
-
+
/// EmitFileDirective - Switch to a new logical file. This is used to
/// implement the '.file "foo.c"' assembler directive.
virtual void EmitFileDirective(StringRef Filename) = 0;
-
+
/// EmitDwarfFileDirective - Associate a filename with a specified logical
/// file number. This implements the DWARF2 '.file 4 "foo.c"' assembler
/// directive.
- virtual void EmitDwarfFileDirective(unsigned FileNo,StringRef Filename) = 0;
+ virtual bool EmitDwarfFileDirective(unsigned FileNo,StringRef Filename);
+
+ /// EmitDwarfLocDirective - This implements the DWARF2
+ // '.loc fileno lineno ...' assembler directive.
+ virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+ unsigned Column, unsigned Flags,
+ unsigned Isa,
+ unsigned Discriminator);
+
+ virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
+ const MCSymbol *LastLabel,
+ const MCSymbol *Label) = 0;
+
+ virtual void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
+ const MCSymbol *Label) {
+ }
+
+ void EmitDwarfSetLineAddr(int64_t LineDelta, const MCSymbol *Label,
+ int PointerSize);
+
+ virtual bool EmitCFIStartProc();
+ virtual bool EmitCFIEndProc();
+ virtual bool EmitCFIDefCfa(int64_t Register, int64_t Offset);
+ virtual bool EmitCFIDefCfaOffset(int64_t Offset);
+ virtual bool EmitCFIDefCfaRegister(int64_t Register);
+ virtual bool EmitCFIOffset(int64_t Register, int64_t Offset);
+ virtual bool EmitCFIPersonality(const MCSymbol *Sym,
+ unsigned Encoding);
+ virtual bool EmitCFILsda(const MCSymbol *Sym, unsigned Encoding);
+ virtual bool EmitCFIRememberState();
+ virtual bool EmitCFIRestoreState();
/// EmitInstruction - Emit the given @p Instruction into the current
/// section.
@@ -322,7 +461,7 @@ namespace llvm {
/// indicated by the hasRawTextSupport() predicate. By default this aborts.
virtual void EmitRawText(StringRef String);
void EmitRawText(const Twine &String);
-
+
/// Finish - Finish emission of machine code.
virtual void Finish() = 0;
};
@@ -342,12 +481,18 @@ namespace llvm {
/// \param CE - If given, a code emitter to use to show the instruction
/// encoding inline with the assembly. This method takes ownership of \arg CE.
///
+ /// \param TAB - If given, a target asm backend to use to show the fixup
+ /// information in conjunction with encoding information. This method takes
+ /// ownership of \arg TAB.
+ ///
/// \param ShowInst - Whether to show the MCInst representation inline with
/// the assembly.
MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- bool isLittleEndian, bool isVerboseAsm,
+ bool isVerboseAsm,
+ bool useLoc,
MCInstPrinter *InstPrint = 0,
MCCodeEmitter *CE = 0,
+ TargetAsmBackend *TAB = 0,
bool ShowInst = false);
/// createMachOStreamer - Create a machine code streamer which will generate
@@ -371,7 +516,7 @@ namespace llvm {
/// ELF format object files.
MCStreamer *createELFStreamer(MCContext &Ctx, TargetAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *CE,
- bool RelaxAll = false);
+ bool RelaxAll, bool NoExecStack);
/// createLoggingStreamer - Create a machine code streamer which just logs the
/// API calls and then dispatches to another streamer.
@@ -379,6 +524,13 @@ namespace llvm {
/// The new streamer takes ownership of the \arg Child.
MCStreamer *createLoggingStreamer(MCStreamer *Child, raw_ostream &OS);
+ /// createPureStreamer - Create a machine code streamer which will generate
+ /// "pure" MC object files, for use with MC-JIT and testing tools.
+ ///
+ /// Takes ownership of \arg TAB and \arg CE.
+ MCStreamer *createPureStreamer(MCContext &Ctx, TargetAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *CE);
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSymbol.h b/contrib/llvm/include/llvm/MC/MCSymbol.h
index 1b432c2..7da4d7c 100644
--- a/contrib/llvm/include/llvm/MC/MCSymbol.h
+++ b/contrib/llvm/include/llvm/MC/MCSymbol.h
@@ -52,15 +52,14 @@ namespace llvm {
/// "Lfoo" or ".foo".
unsigned IsTemporary : 1;
- /// IsUsedInExpr - True if this symbol has been used in an expression and
- /// cannot be redefined.
- unsigned IsUsedInExpr : 1;
+ /// IsUsed - True if this symbol has been used.
+ mutable unsigned IsUsed : 1;
private: // MCContext creates and uniques these.
friend class MCContext;
MCSymbol(StringRef name, bool isTemporary)
: Name(name), Section(0), Value(0),
- IsTemporary(isTemporary), IsUsedInExpr(false) {}
+ IsTemporary(isTemporary), IsUsed(false) {}
MCSymbol(const MCSymbol&); // DO NOT IMPLEMENT
void operator=(const MCSymbol&); // DO NOT IMPLEMENT
@@ -74,9 +73,9 @@ namespace llvm {
/// isTemporary - Check if this is an assembler temporary symbol.
bool isTemporary() const { return IsTemporary; }
- /// isUsedInExpr - Check if this is an assembler temporary symbol.
- bool isUsedInExpr() const { return IsUsedInExpr; }
- void setUsedInExpr(bool Value) { IsUsedInExpr = Value; }
+ /// isUsed - Check if this is used.
+ bool isUsed() const { return IsUsed; }
+ void setUsed(bool Value) const { IsUsed = Value; }
/// @}
/// @name Associated Sections
@@ -135,9 +134,15 @@ namespace llvm {
/// getValue() - Get the value for variable symbols.
const MCExpr *getVariableValue() const {
assert(isVariable() && "Invalid accessor!");
+ IsUsed = true;
return Value;
}
+ // AliasedSymbol() - If this is an alias (a = b), return the symbol
+ // we ultimately point to. For a non alias, this just returns the symbol
+ // itself.
+ const MCSymbol &AliasedSymbol() const;
+
void setVariableValue(const MCExpr *Value);
/// @}
diff --git a/contrib/llvm/include/llvm/MC/MCValue.h b/contrib/llvm/include/llvm/MC/MCValue.h
index 11b6c2a..df8dbd9 100644
--- a/contrib/llvm/include/llvm/MC/MCValue.h
+++ b/contrib/llvm/include/llvm/MC/MCValue.h
@@ -14,7 +14,7 @@
#ifndef LLVM_MC_MCVALUE_H
#define LLVM_MC_MCVALUE_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/MC/MCSymbol.h"
#include <cassert>
diff --git a/contrib/llvm/include/llvm/MC/MachObjectWriter.h b/contrib/llvm/include/llvm/MC/MachObjectWriter.h
deleted file mode 100644
index 9b1ff1d..0000000
--- a/contrib/llvm/include/llvm/MC/MachObjectWriter.h
+++ /dev/null
@@ -1,44 +0,0 @@
-//===-- llvm/MC/MachObjectWriter.h - Mach-O File Writer ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_MC_MACHOBJECTWRITER_H
-#define LLVM_MC_MACHOBJECTWRITER_H
-
-#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cassert>
-
-namespace llvm {
-class MCAssembler;
-class MCFragment;
-class MCFixup;
-class MCValue;
-class raw_ostream;
-
-class MachObjectWriter : public MCObjectWriter {
- void *Impl;
-
-public:
- MachObjectWriter(raw_ostream &OS, bool Is64Bit, bool IsLittleEndian = true);
- virtual ~MachObjectWriter();
-
- virtual void ExecutePostLayoutBinding(MCAssembler &Asm);
-
- virtual void RecordRelocation(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup, MCValue Target,
- uint64_t &FixedValue);
-
- virtual void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/include/llvm/Metadata.h b/contrib/llvm/include/llvm/Metadata.h
index f5a80a3..a6c3f03 100644
--- a/contrib/llvm/include/llvm/Metadata.h
+++ b/contrib/llvm/include/llvm/Metadata.h
@@ -144,9 +144,6 @@ public:
unsigned getNumOperands() const { return NumOperands; }
/// isFunctionLocal - Return whether MDNode is local to a function.
- /// Note: MDNodes are designated as function-local when created, and keep
- /// that designation even if their operands are modified to no longer
- /// refer to function-local IR.
bool isFunctionLocal() const {
return (getSubclassDataFromValue() & FunctionLocalBit) != 0;
}
diff --git a/contrib/llvm/include/llvm/Module.h b/contrib/llvm/include/llvm/Module.h
index b7880ca..f95895e 100644
--- a/contrib/llvm/include/llvm/Module.h
+++ b/contrib/llvm/include/llvm/Module.h
@@ -20,7 +20,7 @@
#include "llvm/GlobalAlias.h"
#include "llvm/Metadata.h"
#include "llvm/ADT/OwningPtr.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <vector>
namespace llvm {
diff --git a/contrib/llvm/include/llvm/Object/MachOFormat.h b/contrib/llvm/include/llvm/Object/MachOFormat.h
new file mode 100644
index 0000000..31cd523
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/MachOFormat.h
@@ -0,0 +1,367 @@
+//===- MachOFormat.h - Mach-O Format Structures And Constants ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares various structures and constants which are platform
+// independent and can be shared by any client which wishes to interact with
+// Mach object files.
+//
+// The definitions here are purposely chosen to match the LLVM style as opposed
+// to following the platform specific definition of the format.
+//
+// On a Mach system, see the <mach-o/...> includes for more information, in
+// particular <mach-o/loader.h>.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_MACHOFORMAT_H
+#define LLVM_OBJECT_MACHOFORMAT_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+namespace object {
+
+/// General Mach platform information.
+namespace mach {
+ /// @name CPU Type and Subtype Information
+ /// {
+
+ /// \brief Capability bits used in CPU type encoding.
+ enum CPUTypeFlagsMask {
+ CTFM_ArchMask = 0xFF000000,
+ CTFM_ArchABI64 = 0x01000000
+ };
+
+ /// \brief Machine type IDs used in CPU type encoding.
+ enum CPUTypeMachine {
+ CTM_i386 = 7,
+ CTM_x86_64 = CTM_i386 | CTFM_ArchABI64,
+ CTM_ARM = 12,
+ CTM_SPARC = 14,
+ CTM_PowerPC = 18,
+ CTM_PowerPC64 = CTM_PowerPC | CTFM_ArchABI64
+ };
+
+ /// \brief Capability bits used in CPU subtype encoding.
+ enum CPUSubtypeFlagsMask {
+ CSFM_SubtypeMask = 0xFF000000,
+ CSFM_SubtypeLib64 = 0x80000000
+ };
+
+ /// \brief ARM Machine Subtypes.
+ enum CPUSubtypeARM {
+ CSARM_ALL = 0,
+ CSARM_V4T = 5,
+ CSARM_V6 = 6,
+ CSARM_V5TEJ = 7,
+ CSARM_XSCALE = 8,
+ CSARM_V7 = 9
+ };
+
+ /// \brief PowerPC Machine Subtypes.
+ enum CPUSubtypePowerPC {
+ CSPPC_ALL = 0
+ };
+
+ /// \brief SPARC Machine Subtypes.
+ enum CPUSubtypeSPARC {
+ CSSPARC_ALL = 0
+ };
+
+ /// \brief x86 Machine Subtypes.
+ enum CPUSubtypeX86 {
+ CSX86_ALL = 3
+ };
+
+ /// @}
+
+} // end namespace mach
+
+/// Format information for Mach object files.
+namespace macho {
+ /// \brief Constants for structure sizes.
+ enum StructureSizes {
+ Header32Size = 28,
+ Header64Size = 32,
+ SegmentLoadCommand32Size = 56,
+ SegmentLoadCommand64Size = 72,
+ Section32Size = 68,
+ Section64Size = 80,
+ SymtabLoadCommandSize = 24,
+ DysymtabLoadCommandSize = 80,
+ Nlist32Size = 12,
+ Nlist64Size = 16,
+ RelocationInfoSize = 8
+ };
+
+ /// \brief Constants for header magic field.
+ enum HeaderMagic {
+ HM_Object32 = 0xFEEDFACE, ///< 32-bit mach object file
+ HM_Object64 = 0xFEEDFACF, ///< 64-bit mach object file
+ HM_Universal = 0xCAFEBABE ///< Universal object file
+ };
+
+ /// \brief Header common to all Mach object files.
+ struct Header {
+ uint32_t Magic;
+ uint32_t CPUType;
+ uint32_t CPUSubtype;
+ uint32_t FileType;
+ uint32_t NumLoadCommands;
+ uint32_t SizeOfLoadCommands;
+ uint32_t Flags;
+ };
+
+ /// \brief Extended header for 64-bit object files.
+ struct Header64Ext {
+ uint32_t Reserved;
+ };
+
+ // See <mach-o/loader.h>.
+ enum HeaderFileType {
+ HFT_Object = 0x1
+ };
+
+ enum HeaderFlags {
+ HF_SubsectionsViaSymbols = 0x2000
+ };
+
+ enum LoadCommandType {
+ LCT_Segment = 0x1,
+ LCT_Symtab = 0x2,
+ LCT_Dysymtab = 0xb,
+ LCT_Segment64 = 0x19,
+ LCT_UUID = 0x1b
+ };
+
+ /// \brief Load command structure.
+ struct LoadCommand {
+ uint32_t Type;
+ uint32_t Size;
+ };
+
+ /// @name Load Command Structures
+ /// @{
+
+ struct SegmentLoadCommand {
+ uint32_t Type;
+ uint32_t Size;
+ char Name[16];
+ uint32_t VMAddress;
+ uint32_t VMSize;
+ uint32_t FileOffset;
+ uint32_t FileSize;
+ uint32_t MaxVMProtection;
+ uint32_t InitialVMProtection;
+ uint32_t NumSections;
+ uint32_t Flags;
+ };
+
+ struct Segment64LoadCommand {
+ uint32_t Type;
+ uint32_t Size;
+ char Name[16];
+ uint64_t VMAddress;
+ uint64_t VMSize;
+ uint64_t FileOffset;
+ uint64_t FileSize;
+ uint32_t MaxVMProtection;
+ uint32_t InitialVMProtection;
+ uint32_t NumSections;
+ uint32_t Flags;
+ };
+
+ struct SymtabLoadCommand {
+ uint32_t Type;
+ uint32_t Size;
+ uint32_t SymbolTableOffset;
+ uint32_t NumSymbolTableEntries;
+ uint32_t StringTableOffset;
+ uint32_t StringTableSize;
+ };
+
+ struct DysymtabLoadCommand {
+ uint32_t Type;
+ uint32_t Size;
+
+ uint32_t LocalSymbolsIndex;
+ uint32_t NumLocalSymbols;
+
+ uint32_t ExternalSymbolsIndex;
+ uint32_t NumExternalSymbols;
+
+ uint32_t UndefinedSymbolsIndex;
+ uint32_t NumUndefinedSymbols;
+
+ uint32_t TOCOffset;
+ uint32_t NumTOCEntries;
+
+ uint32_t ModuleTableOffset;
+ uint32_t NumModuleTableEntries;
+
+ uint32_t ReferenceSymbolTableOffset;
+ uint32_t NumReferencedSymbolTableEntries;
+
+ uint32_t IndirectSymbolTableOffset;
+ uint32_t NumIndirectSymbolTableEntries;
+
+ uint32_t ExternalRelocationTableOffset;
+ uint32_t NumExternalRelocationTableEntries;
+
+ uint32_t LocalRelocationTableOffset;
+ uint32_t NumLocalRelocationTableEntries;
+ };
+
+ /// @}
+ /// @name Section Data
+ /// @{
+
+ struct Section {
+ char Name[16];
+ char SegmentName[16];
+ uint32_t Address;
+ uint32_t Size;
+ uint32_t Offset;
+ uint32_t Align;
+ uint32_t RelocationTableOffset;
+ uint32_t NumRelocationTableEntries;
+ uint32_t Flags;
+ uint32_t Reserved1;
+ uint32_t Reserved2;
+ };
+ struct Section64 {
+ char Name[16];
+ char SegmentName[16];
+ uint64_t Address;
+ uint64_t Size;
+ uint32_t Offset;
+ uint32_t Align;
+ uint32_t RelocationTableOffset;
+ uint32_t NumRelocationTableEntries;
+ uint32_t Flags;
+ uint32_t Reserved1;
+ uint32_t Reserved2;
+ uint32_t Reserved3;
+ };
+
+ /// @}
+ /// @name Symbol Table Entries
+ /// @{
+
+ struct SymbolTableEntry {
+ uint32_t StringIndex;
+ uint8_t Type;
+ uint8_t SectionIndex;
+ uint16_t Flags;
+ uint32_t Value;
+ };
+ struct Symbol64TableEntry {
+ uint32_t StringIndex;
+ uint8_t Type;
+ uint8_t SectionIndex;
+ uint16_t Flags;
+ uint64_t Value;
+ };
+
+ /// @}
+ /// @name Indirect Symbol Table
+ /// @{
+
+ struct IndirectSymbolTableEntry {
+ uint32_t Index;
+ };
+
+ /// @}
+ /// @name Relocation Data
+ /// @{
+
+ struct RelocationEntry {
+ uint32_t Word0;
+ uint32_t Word1;
+ };
+
+ /// @}
+
+ // See <mach-o/nlist.h>.
+ enum SymbolTypeType {
+ STT_Undefined = 0x00,
+ STT_Absolute = 0x02,
+ STT_Section = 0x0e
+ };
+
+ enum SymbolTypeFlags {
+ // If any of these bits are set, then the entry is a stab entry number (see
+ // <mach-o/stab.h>. Otherwise the other masks apply.
+ STF_StabsEntryMask = 0xe0,
+
+ STF_TypeMask = 0x0e,
+ STF_External = 0x01,
+ STF_PrivateExtern = 0x10
+ };
+
+ /// IndirectSymbolFlags - Flags for encoding special values in the indirect
+ /// symbol entry.
+ enum IndirectSymbolFlags {
+ ISF_Local = 0x80000000,
+ ISF_Absolute = 0x40000000
+ };
+
+ /// RelocationFlags - Special flags for addresses.
+ enum RelocationFlags {
+ RF_Scattered = 0x80000000
+ };
+
+ /// Common relocation info types.
+ enum RelocationInfoType {
+ RIT_Vanilla = 0,
+ RIT_Pair = 1,
+ RIT_Difference = 2
+ };
+
+ /// Generic relocation info types, which are shared by some (but not all)
+ /// platforms.
+ enum RelocationInfoType_Generic {
+ RIT_Generic_PreboundLazyPointer = 3,
+ RIT_Generic_LocalDifference = 4,
+ RIT_Generic_TLV = 5
+ };
+
+ /// X86_64 uses its own relocation types.
+ enum RelocationInfoTypeX86_64 {
+ // Note that x86_64 doesn't even share the common relocation types.
+ RIT_X86_64_Unsigned = 0,
+ RIT_X86_64_Signed = 1,
+ RIT_X86_64_Branch = 2,
+ RIT_X86_64_GOTLoad = 3,
+ RIT_X86_64_GOT = 4,
+ RIT_X86_64_Subtractor = 5,
+ RIT_X86_64_Signed1 = 6,
+ RIT_X86_64_Signed2 = 7,
+ RIT_X86_64_Signed4 = 8,
+ RIT_X86_64_TLV = 9
+ };
+
+ /// ARM uses its own relocation types.
+ enum RelocationInfoTypeARM {
+ RIT_ARM_LocalDifference = 3,
+ RIT_ARM_PreboundLazyPointer = 4,
+ RIT_ARM_Branch24Bit = 5,
+ RIT_ARM_ThumbBranch22Bit = 6,
+ RIT_ARM_ThumbBranch32Bit = 7,
+ RIT_ARM_Half = 8,
+ RIT_ARM_HalfDifference = 9
+
+ };
+
+} // end namespace macho
+
+} // end namespace object
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/MachOObject.h b/contrib/llvm/include/llvm/Object/MachOObject.h
new file mode 100644
index 0000000..03d9c14
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/MachOObject.h
@@ -0,0 +1,180 @@
+//===- MachOObject.h - Mach-O Object File Wrapper ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_MACHOOBJECT_H
+#define LLVM_OBJECT_MACHOOBJECT_H
+
+#include <string>
+#include "llvm/ADT/InMemoryStruct.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/MachOFormat.h"
+
+namespace llvm {
+
+class MemoryBuffer;
+
+namespace object {
+
+/// \brief Wrapper object for manipulating Mach-O object files.
+///
+/// This class is designed to implement a full-featured, efficient, portable,
+/// and robust Mach-O interface to Mach-O object files. It does not attempt to
+/// smooth over rough edges in the Mach-O format or generalize access to object
+/// independent features.
+///
+/// The class is designed around accessing the Mach-O object which is expected
+/// to be fully loaded into memory.
+///
+/// This class is *not* suitable for concurrent use. For efficient operation,
+/// the class uses APIs which rely on the ability to cache the results of
+/// certain calls in internal objects which are not safe for concurrent
+/// access. This allows the API to be zero-copy on the common paths.
+//
+// FIXME: It would be cool if we supported a "paged" MemoryBuffer
+// implementation. This would allow us to implement a more sensible version of
+// MemoryObject which can work like a MemoryBuffer, but be more efficient for
+// objects which are in the current address space.
+class MachOObject {
+public:
+ struct LoadCommandInfo {
+ /// The load command information.
+ macho::LoadCommand Command;
+
+ /// The offset to the start of the load command in memory.
+ uint64_t Offset;
+ };
+
+private:
+ OwningPtr<MemoryBuffer> Buffer;
+
+ /// Whether the object is little endian.
+ bool IsLittleEndian;
+ /// Whether the object is 64-bit.
+ bool Is64Bit;
+ /// Whether the object is swapped endianness from the host.
+ bool IsSwappedEndian;
+ /// Whether the string table has been registered.
+ bool HasStringTable;
+
+ /// The cached information on the load commands.
+ LoadCommandInfo *LoadCommands;
+ mutable unsigned NumLoadedCommands;
+
+ /// The cached copy of the header.
+ macho::Header Header;
+ macho::Header64Ext Header64Ext;
+
+ /// Cache string table information.
+ StringRef StringTable;
+
+private:
+ MachOObject(MemoryBuffer *Buffer, bool IsLittleEndian, bool Is64Bit);
+
+public:
+ ~MachOObject();
+
+ /// \brief Load a Mach-O object from a MemoryBuffer object.
+ ///
+ /// \param Buffer - The buffer to load the object from. This routine takes
+ /// exclusive ownership of the buffer (which is passed to the returned object
+ /// on success).
+ /// \param ErrorStr [out] - If given, will be set to a user readable error
+ /// message on failure.
+ /// \returns The loaded object, or null on error.
+ static MachOObject *LoadFromBuffer(MemoryBuffer *Buffer,
+ std::string *ErrorStr = 0);
+
+ /// @name File Information
+ /// @{
+
+ bool isLittleEndian() const { return IsLittleEndian; }
+ bool isSwappedEndian() const { return IsSwappedEndian; }
+ bool is64Bit() const { return Is64Bit; }
+
+ unsigned getHeaderSize() const {
+ return Is64Bit ? macho::Header64Size : macho::Header32Size;
+ }
+
+ StringRef getData(size_t Offset, size_t Size) const;
+
+ /// @}
+ /// @name String Table Data
+ /// @{
+
+ StringRef getStringTableData() const {
+ assert(HasStringTable && "String table has not been registered!");
+ return StringTable;
+ }
+
+ StringRef getStringAtIndex(unsigned Index) const {
+ size_t End = getStringTableData().find('\0', Index);
+ return getStringTableData().slice(Index, End);
+ }
+
+ void RegisterStringTable(macho::SymtabLoadCommand &SLC);
+
+ /// @}
+ /// @name Object Header Access
+ /// @{
+
+ const macho::Header &getHeader() const { return Header; }
+ const macho::Header64Ext &getHeader64Ext() const {
+ assert(is64Bit() && "Invalid access!");
+ return Header64Ext;
+ }
+
+ /// @}
+ /// @name Object Structure Access
+ /// @{
+
+ /// \brief Retrieve the information for the given load command.
+ const LoadCommandInfo &getLoadCommandInfo(unsigned Index) const;
+
+ void ReadSegmentLoadCommand(
+ const LoadCommandInfo &LCI,
+ InMemoryStruct<macho::SegmentLoadCommand> &Res) const;
+ void ReadSegment64LoadCommand(
+ const LoadCommandInfo &LCI,
+ InMemoryStruct<macho::Segment64LoadCommand> &Res) const;
+ void ReadSymtabLoadCommand(
+ const LoadCommandInfo &LCI,
+ InMemoryStruct<macho::SymtabLoadCommand> &Res) const;
+ void ReadDysymtabLoadCommand(
+ const LoadCommandInfo &LCI,
+ InMemoryStruct<macho::DysymtabLoadCommand> &Res) const;
+ void ReadIndirectSymbolTableEntry(
+ const macho::DysymtabLoadCommand &DLC,
+ unsigned Index,
+ InMemoryStruct<macho::IndirectSymbolTableEntry> &Res) const;
+ void ReadSection(
+ const LoadCommandInfo &LCI,
+ unsigned Index,
+ InMemoryStruct<macho::Section> &Res) const;
+ void ReadSection64(
+ const LoadCommandInfo &LCI,
+ unsigned Index,
+ InMemoryStruct<macho::Section64> &Res) const;
+ void ReadRelocationEntry(
+ uint64_t RelocationTableOffset, unsigned Index,
+ InMemoryStruct<macho::RelocationEntry> &Res) const;
+ void ReadSymbolTableEntry(
+ uint64_t SymbolTableOffset, unsigned Index,
+ InMemoryStruct<macho::SymbolTableEntry> &Res) const;
+ void ReadSymbol64TableEntry(
+ uint64_t SymbolTableOffset, unsigned Index,
+ InMemoryStruct<macho::Symbol64TableEntry> &Res) const;
+
+ /// @}
+};
+
+} // end namespace object
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/ObjectFile.h b/contrib/llvm/include/llvm/Object/ObjectFile.h
new file mode 100644
index 0000000..eee9d44
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/ObjectFile.h
@@ -0,0 +1,262 @@
+//===- ObjectFile.h - File format independent object file -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a file format independent ObjectFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_OBJECT_FILE_H
+#define LLVM_OBJECT_OBJECT_FILE_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <cstring>
+
+namespace llvm {
+
+class MemoryBuffer;
+class StringRef;
+
+namespace object {
+
+class ObjectFile;
+
+union DataRefImpl {
+ struct {
+ uint32_t a, b;
+ } d;
+ intptr_t p;
+};
+
+static bool operator ==(const DataRefImpl &a, const DataRefImpl &b) {
+ // Check bitwise identical. This is the only legal way to compare a union w/o
+ // knowing which member is in use.
+ return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
+}
+
+/// SymbolRef - This is a value type class that represents a single symbol in
+/// the list of symbols in the object file.
+class SymbolRef {
+ DataRefImpl SymbolPimpl;
+ const ObjectFile *OwningObject;
+
+public:
+ SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
+
+ bool operator==(const SymbolRef &Other) const;
+
+ SymbolRef getNext() const;
+
+ StringRef getName() const;
+ uint64_t getAddress() const;
+ uint64_t getSize() const;
+
+ /// Returns the ascii char that should be displayed in a symbol table dump via
+ /// nm for this symbol.
+ char getNMTypeChar() const;
+
+ /// Returns true for symbols that are internal to the object file format such
+ /// as section symbols.
+ bool isInternal() const;
+};
+
+/// SectionRef - This is a value type class that represents a single section in
+/// the list of sections in the object file.
+class SectionRef {
+ DataRefImpl SectionPimpl;
+ const ObjectFile *OwningObject;
+
+public:
+ SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
+
+ bool operator==(const SectionRef &Other) const;
+
+ SectionRef getNext() const;
+
+ StringRef getName() const;
+ uint64_t getAddress() const;
+ uint64_t getSize() const;
+ StringRef getContents() const;
+
+ // FIXME: Move to the normalization layer when it's created.
+ bool isText() const;
+};
+
+const uint64_t UnknownAddressOrSize = ~0ULL;
+
+/// ObjectFile - This class is the base class for all object file types.
+/// Concrete instances of this object are created by createObjectFile, which
+/// figure out which type to create.
+class ObjectFile {
+private:
+ ObjectFile(); // = delete
+ ObjectFile(const ObjectFile &other); // = delete
+
+protected:
+ MemoryBuffer *MapFile;
+ const uint8_t *base;
+
+ ObjectFile(MemoryBuffer *Object);
+
+ // These functions are for SymbolRef to call internally. The main goal of
+ // this is to allow SymbolRef::SymbolPimpl to point directly to the symbol
+ // entry in the memory mapped object file. SymbolPimpl cannot contain any
+ // virtual functions because then it could not point into the memory mapped
+ // file.
+ friend class SymbolRef;
+ virtual SymbolRef getSymbolNext(DataRefImpl Symb) const = 0;
+ virtual StringRef getSymbolName(DataRefImpl Symb) const = 0;
+ virtual uint64_t getSymbolAddress(DataRefImpl Symb) const = 0;
+ virtual uint64_t getSymbolSize(DataRefImpl Symb) const = 0;
+ virtual char getSymbolNMTypeChar(DataRefImpl Symb) const = 0;
+ virtual bool isSymbolInternal(DataRefImpl Symb) const = 0;
+
+ // Same as above for SectionRef.
+ friend class SectionRef;
+ virtual SectionRef getSectionNext(DataRefImpl Sec) const = 0;
+ virtual StringRef getSectionName(DataRefImpl Sec) const = 0;
+ virtual uint64_t getSectionAddress(DataRefImpl Sec) const = 0;
+ virtual uint64_t getSectionSize(DataRefImpl Sec) const = 0;
+ virtual StringRef getSectionContents(DataRefImpl Sec) const = 0;
+ virtual bool isSectionText(DataRefImpl Sec) const = 0;
+
+
+public:
+ template<class content_type>
+ class content_iterator {
+ content_type Current;
+ public:
+ content_iterator(content_type symb)
+ : Current(symb) {}
+
+ const content_type* operator->() const {
+ return &Current;
+ }
+
+ bool operator==(const content_iterator &other) const {
+ return Current == other.Current;
+ }
+
+ bool operator!=(const content_iterator &other) const {
+ return !(*this == other);
+ }
+
+ content_iterator& operator++() { // Preincrement
+ Current = Current.getNext();
+ return *this;
+ }
+ };
+
+ typedef content_iterator<SymbolRef> symbol_iterator;
+ typedef content_iterator<SectionRef> section_iterator;
+
+ virtual ~ObjectFile();
+
+ virtual symbol_iterator begin_symbols() const = 0;
+ virtual symbol_iterator end_symbols() const = 0;
+
+ virtual section_iterator begin_sections() const = 0;
+ virtual section_iterator end_sections() const = 0;
+
+ /// @brief The number of bytes used to represent an address in this object
+ /// file format.
+ virtual uint8_t getBytesInAddress() const = 0;
+
+ virtual StringRef getFileFormatName() const = 0;
+ virtual /* Triple::ArchType */ unsigned getArch() const = 0;
+
+ StringRef getFilename() const;
+
+ /// @returns Pointer to ObjectFile subclass to handle this type of object.
+ /// @param ObjectPath The path to the object file. ObjectPath.isObject must
+ /// return true.
+ /// @brief Create ObjectFile from path.
+ static ObjectFile *createObjectFile(StringRef ObjectPath);
+ static ObjectFile *createObjectFile(MemoryBuffer *Object);
+
+private:
+ static ObjectFile *createCOFFObjectFile(MemoryBuffer *Object);
+ static ObjectFile *createELFObjectFile(MemoryBuffer *Object);
+ static ObjectFile *createMachOObjectFile(MemoryBuffer *Object);
+ static ObjectFile *createArchiveObjectFile(MemoryBuffer *Object);
+ static ObjectFile *createLibObjectFile(MemoryBuffer *Object);
+};
+
+// Inline function definitions.
+inline SymbolRef::SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner)
+ : SymbolPimpl(SymbolP)
+ , OwningObject(Owner) {}
+
+inline bool SymbolRef::operator==(const SymbolRef &Other) const {
+ return SymbolPimpl == Other.SymbolPimpl;
+}
+
+inline SymbolRef SymbolRef::getNext() const {
+ return OwningObject->getSymbolNext(SymbolPimpl);
+}
+
+inline StringRef SymbolRef::getName() const {
+ return OwningObject->getSymbolName(SymbolPimpl);
+}
+
+inline uint64_t SymbolRef::getAddress() const {
+ return OwningObject->getSymbolAddress(SymbolPimpl);
+}
+
+inline uint64_t SymbolRef::getSize() const {
+ return OwningObject->getSymbolSize(SymbolPimpl);
+}
+
+inline char SymbolRef::getNMTypeChar() const {
+ return OwningObject->getSymbolNMTypeChar(SymbolPimpl);
+}
+
+inline bool SymbolRef::isInternal() const {
+ return OwningObject->isSymbolInternal(SymbolPimpl);
+}
+
+
+/// SectionRef
+inline SectionRef::SectionRef(DataRefImpl SectionP,
+ const ObjectFile *Owner)
+ : SectionPimpl(SectionP)
+ , OwningObject(Owner) {}
+
+inline bool SectionRef::operator==(const SectionRef &Other) const {
+ return SectionPimpl == Other.SectionPimpl;
+}
+
+inline SectionRef SectionRef::getNext() const {
+ return OwningObject->getSectionNext(SectionPimpl);
+}
+
+inline StringRef SectionRef::getName() const {
+ return OwningObject->getSectionName(SectionPimpl);
+}
+
+inline uint64_t SectionRef::getAddress() const {
+ return OwningObject->getSectionAddress(SectionPimpl);
+}
+
+inline uint64_t SectionRef::getSize() const {
+ return OwningObject->getSectionSize(SectionPimpl);
+}
+
+inline StringRef SectionRef::getContents() const {
+ return OwningObject->getSectionContents(SectionPimpl);
+}
+
+inline bool SectionRef::isText() const {
+ return OwningObject->isSectionText(SectionPimpl);
+}
+
+} // end namespace object
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/OperandTraits.h b/contrib/llvm/include/llvm/OperandTraits.h
index b614ccb..f0df5fa 100644
--- a/contrib/llvm/include/llvm/OperandTraits.h
+++ b/contrib/llvm/include/llvm/OperandTraits.h
@@ -27,27 +27,17 @@ namespace llvm {
/// when it is a prefix to the User object, and the number of Use objects is
/// known at compile time.
-template <unsigned ARITY>
+template <typename SubClass, unsigned ARITY>
struct FixedNumOperandTraits {
- static Use *op_begin(User* U) {
+ static Use *op_begin(SubClass* U) {
return reinterpret_cast<Use*>(U) - ARITY;
}
- static Use *op_end(User* U) {
+ static Use *op_end(SubClass* U) {
return reinterpret_cast<Use*>(U);
}
static unsigned operands(const User*) {
return ARITY;
}
- struct prefix {
- Use Ops[ARITY];
- prefix(); // DO NOT IMPLEMENT
- };
- template <class U>
- struct Layout {
- struct overlay : public prefix, public U {
- overlay(); // DO NOT IMPLEMENT
- };
- };
};
//===----------------------------------------------------------------------===//
@@ -57,8 +47,8 @@ struct FixedNumOperandTraits {
/// OptionalOperandTraits - when the number of operands may change at runtime.
/// Naturally it may only decrease, because the allocations may not change.
-template <unsigned ARITY = 1>
-struct OptionalOperandTraits : public FixedNumOperandTraits<ARITY> {
+template <typename SubClass, unsigned ARITY = 1>
+struct OptionalOperandTraits : public FixedNumOperandTraits<SubClass, ARITY> {
static unsigned operands(const User *U) {
return U->getNumOperands();
}
@@ -72,12 +62,12 @@ struct OptionalOperandTraits : public FixedNumOperandTraits<ARITY> {
/// when it is a prefix to the User object, and the number of Use objects is
/// only known at allocation time.
-template <unsigned MINARITY = 0>
+template <typename SubClass, unsigned MINARITY = 0>
struct VariadicOperandTraits {
- static Use *op_begin(User* U) {
- return reinterpret_cast<Use*>(U) - U->getNumOperands();
+ static Use *op_begin(SubClass* U) {
+ return reinterpret_cast<Use*>(U) - static_cast<User*>(U)->getNumOperands();
}
- static Use *op_end(User* U) {
+ static Use *op_end(SubClass* U) {
return reinterpret_cast<Use*>(U);
}
static unsigned operands(const User *U) {
diff --git a/contrib/llvm/include/llvm/Operator.h b/contrib/llvm/include/llvm/Operator.h
index 60865aa..ff2a0ad 100644
--- a/contrib/llvm/include/llvm/Operator.h
+++ b/contrib/llvm/include/llvm/Operator.h
@@ -99,19 +99,21 @@ public:
/// hasNoSignedWrap - Test whether this operation is known to never
/// undergo signed overflow, aka the nsw property.
bool hasNoSignedWrap() const {
- return SubclassOptionalData & NoSignedWrap;
+ return (SubclassOptionalData & NoSignedWrap) != 0;
}
static inline bool classof(const OverflowingBinaryOperator *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Add ||
I->getOpcode() == Instruction::Sub ||
- I->getOpcode() == Instruction::Mul;
+ I->getOpcode() == Instruction::Mul ||
+ I->getOpcode() == Instruction::Shl;
}
static inline bool classof(const ConstantExpr *CE) {
return CE->getOpcode() == Instruction::Add ||
CE->getOpcode() == Instruction::Sub ||
- CE->getOpcode() == Instruction::Mul;
+ CE->getOpcode() == Instruction::Mul ||
+ CE->getOpcode() == Instruction::Shl;
}
static inline bool classof(const Value *V) {
return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
@@ -119,105 +121,97 @@ public:
}
};
-/// AddOperator - Utility class for integer addition operators.
-///
-class AddOperator : public OverflowingBinaryOperator {
- ~AddOperator(); // do not implement
-public:
- static inline bool classof(const AddOperator *) { return true; }
- static inline bool classof(const Instruction *I) {
- return I->getOpcode() == Instruction::Add;
- }
- static inline bool classof(const ConstantExpr *CE) {
- return CE->getOpcode() == Instruction::Add;
- }
- static inline bool classof(const Value *V) {
- return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
- (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
- }
-};
-
-/// SubOperator - Utility class for integer subtraction operators.
-///
-class SubOperator : public OverflowingBinaryOperator {
- ~SubOperator(); // do not implement
-public:
- static inline bool classof(const SubOperator *) { return true; }
- static inline bool classof(const Instruction *I) {
- return I->getOpcode() == Instruction::Sub;
- }
- static inline bool classof(const ConstantExpr *CE) {
- return CE->getOpcode() == Instruction::Sub;
- }
- static inline bool classof(const Value *V) {
- return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
- (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
- }
-};
-
-/// MulOperator - Utility class for integer multiplication operators.
-///
-class MulOperator : public OverflowingBinaryOperator {
- ~MulOperator(); // do not implement
-public:
- static inline bool classof(const MulOperator *) { return true; }
- static inline bool classof(const Instruction *I) {
- return I->getOpcode() == Instruction::Mul;
- }
- static inline bool classof(const ConstantExpr *CE) {
- return CE->getOpcode() == Instruction::Mul;
- }
- static inline bool classof(const Value *V) {
- return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
- (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
- }
-};
-
-/// SDivOperator - An Operator with opcode Instruction::SDiv.
-///
-class SDivOperator : public Operator {
+/// PossiblyExactOperator - A udiv or sdiv instruction, which can be marked as
+/// "exact", indicating that no bits are destroyed.
+class PossiblyExactOperator : public Operator {
public:
enum {
IsExact = (1 << 0)
};
-
-private:
- ~SDivOperator(); // do not implement
-
+
friend class BinaryOperator;
friend class ConstantExpr;
void setIsExact(bool B) {
SubclassOptionalData = (SubclassOptionalData & ~IsExact) | (B * IsExact);
}
-
+
+private:
+ ~PossiblyExactOperator(); // do not implement
public:
/// isExact - Test whether this division is known to be exact, with
/// zero remainder.
bool isExact() const {
return SubclassOptionalData & IsExact;
}
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SDivOperator *) { return true; }
+
+ static bool isPossiblyExactOpcode(unsigned OpC) {
+ return OpC == Instruction::SDiv ||
+ OpC == Instruction::UDiv ||
+ OpC == Instruction::AShr ||
+ OpC == Instruction::LShr;
+ }
static inline bool classof(const ConstantExpr *CE) {
- return CE->getOpcode() == Instruction::SDiv;
+ return isPossiblyExactOpcode(CE->getOpcode());
}
static inline bool classof(const Instruction *I) {
- return I->getOpcode() == Instruction::SDiv;
+ return isPossiblyExactOpcode(I->getOpcode());
}
static inline bool classof(const Value *V) {
return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
(isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
}
};
+
-class GEPOperator : public Operator {
+
+/// ConcreteOperator - A helper template for defining operators for individual
+/// opcodes.
+template<typename SuperClass, unsigned Opc>
+class ConcreteOperator : public SuperClass {
+ ~ConcreteOperator(); // DO NOT IMPLEMENT
+public:
+ static inline bool classof(const ConcreteOperator<SuperClass, Opc> *) {
+ return true;
+ }
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Opc;
+ }
+ static inline bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Opc;
+ }
+ static inline bool classof(const Value *V) {
+ return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
+ (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
+ }
+};
+
+class AddOperator
+ : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Add> {};
+class SubOperator
+ : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Sub> {};
+class MulOperator
+ : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Mul> {};
+class ShlOperator
+ : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> {};
+
+
+class SDivOperator
+ : public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> {};
+class UDivOperator
+ : public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> {};
+class AShrOperator
+ : public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> {};
+class LShrOperator
+ : public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> {};
+
+
+
+class GEPOperator
+ : public ConcreteOperator<Operator, Instruction::GetElementPtr> {
enum {
IsInBounds = (1 << 0)
};
- ~GEPOperator(); // do not implement
-
friend class GetElementPtrInst;
friend class ConstantExpr;
void setIsInBounds(bool B) {
@@ -266,8 +260,8 @@ public:
/// value, just potentially different types.
bool hasAllZeroIndices() const {
for (const_op_iterator I = idx_begin(), E = idx_end(); I != E; ++I) {
- if (Constant *C = dyn_cast<Constant>(I))
- if (C->isNullValue())
+ if (ConstantInt *C = dyn_cast<ConstantInt>(I))
+ if (C->isZero())
continue;
return false;
}
@@ -284,21 +278,6 @@ public:
}
return true;
}
-
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const GEPOperator *) { return true; }
- static inline bool classof(const GetElementPtrInst *) { return true; }
- static inline bool classof(const ConstantExpr *CE) {
- return CE->getOpcode() == Instruction::GetElementPtr;
- }
- static inline bool classof(const Instruction *I) {
- return I->getOpcode() == Instruction::GetElementPtr;
- }
- static inline bool classof(const Value *V) {
- return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
- (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
- }
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Pass.h b/contrib/llvm/include/llvm/Pass.h
index f4c6eed..ed0fb39 100644
--- a/contrib/llvm/include/llvm/Pass.h
+++ b/contrib/llvm/include/llvm/Pass.h
@@ -57,6 +57,7 @@ enum PassManagerType {
PMT_CallGraphPassManager, ///< CGPassManager
PMT_FunctionPassManager, ///< FPPassManager
PMT_LoopPassManager, ///< LPPassManager
+ PMT_RegionPassManager, ///< RGPassManager
PMT_BasicBlockPassManager, ///< BBPassManager
PMT_Last
};
@@ -64,13 +65,14 @@ enum PassManagerType {
// Different types of passes.
enum PassKind {
PT_BasicBlock,
+ PT_Region,
PT_Loop,
PT_Function,
PT_CallGraphSCC,
PT_Module,
PT_PassManager
};
-
+
//===----------------------------------------------------------------------===//
/// Pass interface - Implemented by all 'passes'. Subclass this if you are an
/// interprocedural optimization or you do not fit into any of the more
diff --git a/contrib/llvm/include/llvm/PassManagers.h b/contrib/llvm/include/llvm/PassManagers.h
index 17f4a05..c4f409e 100644
--- a/contrib/llvm/include/llvm/PassManagers.h
+++ b/contrib/llvm/include/llvm/PassManagers.h
@@ -106,6 +106,7 @@ enum PassDebuggingString {
ON_BASICBLOCK_MSG, // "' on BasicBlock '" + PassName + "'...\n"
ON_FUNCTION_MSG, // "' on Function '" + FunctionName + "'...\n"
ON_MODULE_MSG, // "' on Module '" + ModuleName + "'...\n"
+ ON_REGION_MSG, // " 'on Region ...\n'"
ON_LOOP_MSG, // " 'on Loop ...\n'"
ON_CG_MSG // "' on Call Graph ...\n'"
};
@@ -184,10 +185,10 @@ public:
void schedulePass(Pass *P);
/// Set pass P as the last user of the given analysis passes.
- void setLastUser(SmallVector<Pass *, 12> &AnalysisPasses, Pass *P);
+ void setLastUser(const SmallVectorImpl<Pass *> &AnalysisPasses, Pass *P);
/// Collect passes whose last user is P
- void collectLastUses(SmallVector<Pass *, 12> &LastUses, Pass *P);
+ void collectLastUses(SmallVectorImpl<Pass *> &LastUses, Pass *P);
/// Find the pass that implements Analysis AID. Search immutable
/// passes and all pass managers. If desired pass is not found
@@ -205,7 +206,7 @@ public:
ImmutablePasses.push_back(P);
}
- inline SmallVector<ImmutablePass *, 8>& getImmutablePasses() {
+ inline SmallVectorImpl<ImmutablePass *>& getImmutablePasses() {
return ImmutablePasses;
}
@@ -313,8 +314,8 @@ public:
/// Populate RequiredPasses with analysis pass that are required by
/// pass P and are available. Populate ReqPassNotAvailable with analysis
/// pass that are required by pass P but are not available.
- void collectRequiredAnalysis(SmallVector<Pass *, 8> &RequiredPasses,
- SmallVector<AnalysisID, 8> &ReqPassNotAvailable,
+ void collectRequiredAnalysis(SmallVectorImpl<Pass *> &RequiredPasses,
+ SmallVectorImpl<AnalysisID> &ReqPassNotAvailable,
Pass *P);
/// All Required analyses should be available to the pass as it runs! Here
diff --git a/contrib/llvm/include/llvm/PassRegistry.h b/contrib/llvm/include/llvm/PassRegistry.h
index 5907139..5d89c49 100644
--- a/contrib/llvm/include/llvm/PassRegistry.h
+++ b/contrib/llvm/include/llvm/PassRegistry.h
@@ -8,61 +8,74 @@
//===----------------------------------------------------------------------===//
//
// This file defines PassRegistry, a class that is used in the initialization
-// and registration of passes. At initialization, passes are registered with
-// the PassRegistry, which is later provided to the PassManager for dependency
-// resolution and similar tasks.
+// and registration of passes. At application startup, passes are registered
+// with the PassRegistry, which is later provided to the PassManager for
+// dependency resolution and similar tasks.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_PASSREGISTRY_H
#define LLVM_PASSREGISTRY_H
-#include "llvm/ADT/StringMap.h"
-#include "llvm/System/DataTypes.h"
-#include "llvm/System/Mutex.h"
-#include <map>
-#include <set>
-#include <vector>
+#include "llvm/ADT/StringRef.h"
namespace llvm {
class PassInfo;
struct PassRegistrationListener;
+/// PassRegistry - This class manages the registration and intitialization of
+/// the pass subsystem as application startup, and assists the PassManager
+/// in resolving pass dependencies.
+/// NOTE: PassRegistry is NOT thread-safe. If you want to use LLVM on multiple
+/// threads simultaneously, you will need to use a separate PassRegistry on
+/// each thread.
class PassRegistry {
- /// Guards the contents of this class.
- mutable sys::SmartMutex<true> Lock;
-
- /// PassInfoMap - Keep track of the PassInfo object for each registered pass.
- typedef std::map<const void*, const PassInfo*> MapType;
- MapType PassInfoMap;
-
- typedef StringMap<const PassInfo*> StringMapType;
- StringMapType PassInfoStringMap;
-
- /// AnalysisGroupInfo - Keep track of information for each analysis group.
- struct AnalysisGroupInfo {
- std::set<const PassInfo *> Implementations;
- };
- std::map<const PassInfo*, AnalysisGroupInfo> AnalysisGroupInfoMap;
-
- std::vector<PassRegistrationListener*> Listeners;
-
+ mutable void *pImpl;
+ void *getImpl() const;
+
public:
+ PassRegistry() : pImpl(0) { }
+ ~PassRegistry();
+
+ /// getPassRegistry - Access the global registry object, which is
+ /// automatically initialized at application launch and destroyed by
+ /// llvm_shutdown.
static PassRegistry *getPassRegistry();
+ /// getPassInfo - Look up a pass' corresponding PassInfo, indexed by the pass'
+ /// type identifier (&MyPass::ID).
const PassInfo *getPassInfo(const void *TI) const;
+
+ /// getPassInfo - Look up a pass' corresponding PassInfo, indexed by the pass'
+ /// argument string.
const PassInfo *getPassInfo(StringRef Arg) const;
- void registerPass(const PassInfo &PI);
+ /// registerPass - Register a pass (by means of its PassInfo) with the
+ /// registry. Required in order to use the pass with a PassManager.
+ void registerPass(const PassInfo &PI, bool ShouldFree = false);
+
+ /// registerPass - Unregister a pass (by means of its PassInfo) with the
+ /// registry.
void unregisterPass(const PassInfo &PI);
- /// Analysis Group Mechanisms.
+ /// registerAnalysisGroup - Register an analysis group (or a pass implementing
+ // an analysis group) with the registry. Like registerPass, this is required
+ // in order for a PassManager to be able to use this group/pass.
void registerAnalysisGroup(const void *InterfaceID, const void *PassID,
- PassInfo& Registeree, bool isDefault);
+ PassInfo& Registeree, bool isDefault,
+ bool ShouldFree = false);
+ /// enumerateWith - Enumerate the registered passes, calling the provided
+ /// PassRegistrationListener's passEnumerate() callback on each of them.
void enumerateWith(PassRegistrationListener *L);
- void addRegistrationListener(PassRegistrationListener* L);
+
+ /// addRegistrationListener - Register the given PassRegistrationListener
+ /// to receive passRegistered() callbacks whenever a new pass is registered.
+ void addRegistrationListener(PassRegistrationListener *L);
+
+ /// removeRegistrationListener - Unregister a PassRegistrationListener so that
+ /// it no longer receives passRegistered() callbacks.
void removeRegistrationListener(PassRegistrationListener *L);
};
diff --git a/contrib/llvm/include/llvm/PassSupport.h b/contrib/llvm/include/llvm/PassSupport.h
index 0f559d6..0827909 100644
--- a/contrib/llvm/include/llvm/PassSupport.h
+++ b/contrib/llvm/include/llvm/PassSupport.h
@@ -23,6 +23,9 @@
#include "Pass.h"
#include "llvm/PassRegistry.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/Atomic.h"
+#include <vector>
namespace llvm {
@@ -54,17 +57,14 @@ public:
NormalCtor_t normal, bool isCFGOnly, bool is_analysis)
: PassName(name), PassArgument(arg), PassID(pi),
IsCFGOnlyPass(isCFGOnly),
- IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal) {
- PassRegistry::getPassRegistry()->registerPass(*this);
- }
+ IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal) { }
/// PassInfo ctor - Do not call this directly, this should only be invoked
/// through RegisterPass. This version is for use by analysis groups; it
/// does not auto-register the pass.
PassInfo(const char *name, const void *pi)
: PassName(name), PassArgument(""), PassID(pi),
IsCFGOnlyPass(false),
- IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(0) {
- }
+ IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(0) { }
/// getPassName - Return the friendly name for the pass, never returns null
///
@@ -129,8 +129,50 @@ private:
PassInfo(const PassInfo &); // do not implement
};
+#define CALL_ONCE_INITIALIZATION(function) \
+ static volatile sys::cas_flag initialized = 0; \
+ sys::cas_flag old_val = sys::CompareAndSwap(&initialized, 1, 0); \
+ if (old_val == 0) { \
+ function(Registry); \
+ sys::MemoryFence(); \
+ initialized = 2; \
+ } else { \
+ sys::cas_flag tmp = initialized; \
+ sys::MemoryFence(); \
+ while (tmp != 2) { \
+ tmp = initialized; \
+ sys::MemoryFence(); \
+ } \
+ }
+
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis) \
- static RegisterPass<passName> passName ## _info(arg, name, cfg, analysis)
+ static void* initialize##passName##PassOnce(PassRegistry &Registry) { \
+ PassInfo *PI = new PassInfo(name, arg, & passName ::ID, \
+ PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis); \
+ Registry.registerPass(*PI, true); \
+ return PI; \
+ } \
+ void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
+ }
+
+#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis) \
+ static void* initialize##passName##PassOnce(PassRegistry &Registry) {
+
+#define INITIALIZE_PASS_DEPENDENCY(depName) \
+ initialize##depName##Pass(Registry);
+#define INITIALIZE_AG_DEPENDENCY(depName) \
+ initialize##depName##AnalysisGroup(Registry);
+
+#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis) \
+ PassInfo *PI = new PassInfo(name, arg, & passName ::ID, \
+ PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis); \
+ Registry.registerPass(*PI, true); \
+ return PI; \
+ } \
+ void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
+ }
template<typename PassName>
Pass *callDefaultCtor() { return new PassName(); }
@@ -161,7 +203,7 @@ struct RegisterPass : public PassInfo {
: PassInfo(Name, PassArg, &passName::ID,
PassInfo::NormalCtor_t(callDefaultCtor<passName>),
CFGOnly, is_analysis) {
-
+ PassRegistry::getPassRegistry()->registerPass(*this);
}
};
@@ -186,7 +228,7 @@ struct RegisterPass : public PassInfo {
/// a nice name with the interface.
///
class RegisterAGBase : public PassInfo {
-protected:
+public:
RegisterAGBase(const char *Name,
const void *InterfaceID,
const void *PassID = 0,
@@ -206,9 +248,52 @@ struct RegisterAnalysisGroup : public RegisterAGBase {
}
};
+#define INITIALIZE_ANALYSIS_GROUP(agName, name, defaultPass) \
+ static void* initialize##agName##AnalysisGroupOnce(PassRegistry &Registry) { \
+ initialize##defaultPass##Pass(Registry); \
+ PassInfo *AI = new PassInfo(name, & agName :: ID); \
+ Registry.registerAnalysisGroup(& agName ::ID, 0, *AI, false, true); \
+ return AI; \
+ } \
+ void llvm::initialize##agName##AnalysisGroup(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##agName##AnalysisGroupOnce) \
+ }
+
+
#define INITIALIZE_AG_PASS(passName, agName, arg, name, cfg, analysis, def) \
- static RegisterPass<passName> passName ## _info(arg, name, cfg, analysis); \
- static RegisterAnalysisGroup<agName, def> passName ## _ag(passName ## _info)
+ static void* initialize##passName##PassOnce(PassRegistry &Registry) { \
+ if (!def) initialize##agName##AnalysisGroup(Registry); \
+ PassInfo *PI = new PassInfo(name, arg, & passName ::ID, \
+ PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis); \
+ Registry.registerPass(*PI, true); \
+ \
+ PassInfo *AI = new PassInfo(name, & agName :: ID); \
+ Registry.registerAnalysisGroup(& agName ::ID, & passName ::ID, \
+ *AI, def, true); \
+ return AI; \
+ } \
+ void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
+ }
+
+
+#define INITIALIZE_AG_PASS_BEGIN(passName, agName, arg, n, cfg, analysis, def) \
+ static void* initialize##passName##PassOnce(PassRegistry &Registry) { \
+ if (!def) initialize##agName##AnalysisGroup(Registry);
+
+#define INITIALIZE_AG_PASS_END(passName, agName, arg, n, cfg, analysis, def) \
+ PassInfo *PI = new PassInfo(n, arg, & passName ::ID, \
+ PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis); \
+ Registry.registerPass(*PI, true); \
+ \
+ PassInfo *AI = new PassInfo(n, & agName :: ID); \
+ Registry.registerAnalysisGroup(& agName ::ID, & passName ::ID, \
+ *AI, def, true); \
+ return AI; \
+ } \
+ void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
+ }
//===---------------------------------------------------------------------------
/// PassRegistrationListener class - This class is meant to be derived from by
diff --git a/contrib/llvm/include/llvm/System/AIXDataTypesFix.h b/contrib/llvm/include/llvm/Support/AIXDataTypesFix.h
index 8dbf02f..a9a9147 100644
--- a/contrib/llvm/include/llvm/System/AIXDataTypesFix.h
+++ b/contrib/llvm/include/llvm/Support/AIXDataTypesFix.h
@@ -1,4 +1,4 @@
-//===-- llvm/System/AIXDataTypesFix.h - Fix datatype defs ------*- C++ -*-===//
+//===-- llvm/Support/AIXDataTypesFix.h - Fix datatype defs ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/include/llvm/Support/AlignOf.h b/contrib/llvm/include/llvm/Support/AlignOf.h
index 6a7a1a6..cebfa79 100644
--- a/contrib/llvm/include/llvm/Support/AlignOf.h
+++ b/contrib/llvm/include/llvm/Support/AlignOf.h
@@ -49,12 +49,12 @@ struct AlignOf {
};
-/// alignof - A templated function that returns the mininum alignment of
+/// alignOf - A templated function that returns the minimum alignment of
/// of a type. This provides no extra functionality beyond the AlignOf
/// class besides some cosmetic cleanliness. Example usage:
-/// alignof<int>() returns the alignment of an int.
+/// alignOf<int>() returns the alignment of an int.
template <typename T>
-static inline unsigned alignof() { return AlignOf<T>::Alignment; }
+static inline unsigned alignOf() { return AlignOf<T>::Alignment; }
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Support/Allocator.h b/contrib/llvm/include/llvm/Support/Allocator.h
index 4a7251f..c680709 100644
--- a/contrib/llvm/include/llvm/Support/Allocator.h
+++ b/contrib/llvm/include/llvm/Support/Allocator.h
@@ -16,13 +16,15 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <cstddef>
namespace llvm {
+template <typename T> struct ReferenceAdder { typedef T& result; };
+template <typename T> struct ReferenceAdder<T&> { typedef T result; };
class MallocAllocator {
public:
@@ -201,7 +203,7 @@ public:
char *End = Slab == Allocator.CurSlab ? Allocator.CurPtr :
(char *)Slab + Slab->Size;
for (char *Ptr = (char*)(Slab+1); Ptr < End; Ptr += sizeof(T)) {
- Ptr = Allocator.AlignPtr(Ptr, alignof<T>());
+ Ptr = Allocator.AlignPtr(Ptr, alignOf<T>());
if (Ptr + sizeof(T) <= End)
reinterpret_cast<T*>(Ptr)->~T();
}
@@ -221,16 +223,12 @@ public:
inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) {
struct S {
char c;
-#ifdef __GNUC__
- char x __attribute__((aligned));
-#else
union {
double D;
long double LD;
long long L;
void *P;
} x;
-#endif
};
return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
offsetof(S, x)));
diff --git a/contrib/llvm/include/llvm/System/Atomic.h b/contrib/llvm/include/llvm/Support/Atomic.h
index fc19369..1a6c606 100644
--- a/contrib/llvm/include/llvm/System/Atomic.h
+++ b/contrib/llvm/include/llvm/Support/Atomic.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Atomic.h - Atomic Operations -----------------*- C++ -*-===//
+//===- llvm/Support/Atomic.h - Atomic Operations -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
#ifndef LLVM_SYSTEM_ATOMIC_H
#define LLVM_SYSTEM_ATOMIC_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
namespace sys {
diff --git a/contrib/llvm/include/llvm/Support/COFF.h b/contrib/llvm/include/llvm/Support/COFF.h
index 78254ae..6739255 100644
--- a/contrib/llvm/include/llvm/Support/COFF.h
+++ b/contrib/llvm/include/llvm/Support/COFF.h
@@ -11,7 +11,7 @@
//
// Structures and enums defined within this file where created using
// information from Microsoft's publicly available PE/COFF format document:
-//
+//
// Microsoft Portable Executable and Common Object File Format Specification
// Revision 8.1 - February 15, 2008
//
@@ -23,7 +23,7 @@
#ifndef LLVM_SUPPORT_WIN_COFF_H
#define LLVM_SUPPORT_WIN_COFF_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cstring>
namespace llvm {
@@ -69,7 +69,7 @@ namespace COFF {
SF_ClassMask = 0x00FF0000,
SF_ClassShift = 16,
- SF_WeakReference = 0x01000000
+ SF_WeakExternal = 0x01000000
};
enum SymbolSectionNumber {
@@ -133,13 +133,13 @@ namespace COFF {
};
enum SymbolComplexType {
- IMAGE_SYM_DTYPE_NULL = 0, ///< No complex type; simple scalar variable.
+ IMAGE_SYM_DTYPE_NULL = 0, ///< No complex type; simple scalar variable.
IMAGE_SYM_DTYPE_POINTER = 1, ///< A pointer to base type.
IMAGE_SYM_DTYPE_FUNCTION = 2, ///< A function that returns a base type.
IMAGE_SYM_DTYPE_ARRAY = 3, ///< An array of base type.
-
+
/// Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
- SCT_COMPLEX_TYPE_SHIFT = 8
+ SCT_COMPLEX_TYPE_SHIFT = 4
};
struct section {
diff --git a/contrib/llvm/include/llvm/Support/CallSite.h b/contrib/llvm/include/llvm/Support/CallSite.h
index 9b6a409..8a998a8 100644
--- a/contrib/llvm/include/llvm/Support/CallSite.h
+++ b/contrib/llvm/include/llvm/Support/CallSite.h
@@ -52,12 +52,7 @@ public:
CallSiteBase(CallTy *CI) : I(CI, true) { assert(CI); }
CallSiteBase(InvokeTy *II) : I(II, false) { assert(II); }
CallSiteBase(ValTy *II) { *this = get(II); }
- CallSiteBase(InstrTy *II) {
- assert(II && "Null instruction given?");
- *this = get(II);
- assert(I.getPointer() && "Not a call?");
- }
-
+protected:
/// CallSiteBase::get - This static method is sort of like a constructor. It
/// will create an appropriate call site for a Call or Invoke instruction, but
/// it can also create a null initialized CallSiteBase object for something
@@ -72,7 +67,7 @@ public:
}
return CallSiteBase();
}
-
+public:
/// isCall - true if a CallInst is enclosed.
/// Note that !isCall() does not mean it is an InvokeInst enclosed,
/// it also could signify a NULL Instruction pointer.
@@ -282,16 +277,6 @@ public:
bool operator==(const CallSite &CS) const { return I == CS.I; }
bool operator!=(const CallSite &CS) const { return I != CS.I; }
-
- /// CallSite::get - This static method is sort of like a constructor. It will
- /// create an appropriate call site for a Call or Invoke instruction, but it
- /// can also create a null initialized CallSite object for something which is
- /// NOT a call site.
- ///
- static CallSite get(Value *V) {
- return Base::get(V);
- }
-
bool operator<(const CallSite &CS) const {
return getInstruction() < CS.getInstruction();
}
diff --git a/contrib/llvm/include/llvm/Support/Casting.h b/contrib/llvm/include/llvm/Support/Casting.h
index c589171..6bb9806 100644
--- a/contrib/llvm/include/llvm/Support/Casting.h
+++ b/contrib/llvm/include/llvm/Support/Casting.h
@@ -232,8 +232,8 @@ inline typename cast_retty<X, Y>::ret_type dyn_cast(const Y &Val) {
// value is accepted.
//
template <class X, class Y>
-inline typename cast_retty<X, Y>::ret_type dyn_cast_or_null(const Y &Val) {
- return (Val && isa<X>(Val)) ? cast<X, Y>(Val) : 0;
+inline typename cast_retty<X, Y*>::ret_type dyn_cast_or_null(Y *Val) {
+ return (Val && isa<X>(Val)) ? cast<X>(Val) : 0;
}
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Support/Compiler.h b/contrib/llvm/include/llvm/Support/Compiler.h
index 14b36f8..67f0fd7 100644
--- a/contrib/llvm/include/llvm/Support/Compiler.h
+++ b/contrib/llvm/include/llvm/Support/Compiler.h
@@ -15,48 +15,50 @@
#ifndef LLVM_SUPPORT_COMPILER_H
#define LLVM_SUPPORT_COMPILER_H
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
/// into a shared library, then the class should be private to the library and
/// not accessible from outside it. Can also be used to mark variables and
/// functions, making them private to any shared library they are linked into.
-
-/// LLVM_GLOBAL_VISIBILITY - If a class marked with this attribute is linked
-/// into a shared library, then the class will be accessible from outside the
-/// the library. Can also be used to mark variables and functions, making them
-/// accessible from outside any shared library they are linked into.
-#if defined(__MINGW32__) || defined(__CYGWIN__)
-#define LLVM_LIBRARY_VISIBILITY
-#define LLVM_GLOBAL_VISIBILITY __declspec(dllexport)
-#elif (__GNUC__ >= 4)
+#if (__GNUC__ >= 4) && !defined(__MINGW32__) && !defined(__CYGWIN__)
#define LLVM_LIBRARY_VISIBILITY __attribute__ ((visibility("hidden")))
-#define LLVM_GLOBAL_VISIBILITY __attribute__ ((visibility("default")))
#else
#define LLVM_LIBRARY_VISIBILITY
-#define LLVM_GLOBAL_VISIBILITY
#endif
#if (__GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
-#define ATTRIBUTE_USED __attribute__((__used__))
+#define LLVM_ATTRIBUTE_USED __attribute__((__used__))
#else
-#define ATTRIBUTE_USED
+#define LLVM_ATTRIBUTE_USED
#endif
+// Some compilers warn about unused functions. When a function is sometimes
+// used or not depending on build settings (e.g. a function only called from
+// within "assert"), this attribute can be used to suppress such warnings.
+//
+// However, it shouldn't be used for unused *variables*, as those have a much
+// more portable solution:
+// (void)unused_var_name;
+// Prefer cast-to-void wherever it is sufficient.
#if (__GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
-#define ATTRIBUTE_UNUSED __attribute__((__unused__))
+#define LLVM_ATTRIBUTE_UNUSED __attribute__((__unused__))
#else
-#define ATTRIBUTE_UNUSED
+#define LLVM_ATTRIBUTE_UNUSED
#endif
#ifdef __GNUC__ // aka 'ATTRIBUTE_CONST' but following LLVM Conventions.
-#define ATTRIBUTE_READNONE __attribute__((__const__))
+#define LLVM_ATTRIBUTE_READNONE __attribute__((__const__))
#else
-#define ATTRIBUTE_READNONE
+#define LLVM_ATTRIBUTE_READNONE
#endif
#ifdef __GNUC__ // aka 'ATTRIBUTE_PURE' but following LLVM Conventions.
-#define ATTRIBUTE_READONLY __attribute__((__pure__))
+#define LLVM_ATTRIBUTE_READONLY __attribute__((__pure__))
#else
-#define ATTRIBUTE_READONLY
+#define LLVM_ATTRIBUTE_READONLY
#endif
#if (__GNUC__ >= 4)
@@ -78,34 +80,50 @@
#define TEMPLATE_INSTANTIATION(X)
#endif
-// DISABLE_INLINE - On compilers where we have a directive to do so, mark a
-// method "not for inlining".
+// LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so,
+// mark a method "not for inlining".
#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
-#define DISABLE_INLINE __attribute__((noinline))
+#define LLVM_ATTRIBUTE_NOINLINE __attribute__((noinline))
#elif defined(_MSC_VER)
-#define DISABLE_INLINE __declspec(noinline)
+#define LLVM_ATTRIBUTE_NOINLINE __declspec(noinline)
#else
-#define DISABLE_INLINE
+#define LLVM_ATTRIBUTE_NOINLINE
#endif
-// ALWAYS_INLINE - On compilers where we have a directive to do so, mark a
-// method "always inline" because it is performance sensitive.
-// GCC 3.4 supported this but is buggy in various cases and produces
-// unimplemented errors, just use it in GCC 4.0 and later.
+// LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do
+// so, mark a method "always inline" because it is performance sensitive. GCC
+// 3.4 supported this but is buggy in various cases and produces unimplemented
+// errors, just use it in GCC 4.0 and later.
#if __GNUC__ > 3
-#define ALWAYS_INLINE __attribute__((always_inline))
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
-// TODO: No idea how to do this with MSVC.
-#define ALWAYS_INLINE
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE
#endif
#ifdef __GNUC__
-#define NORETURN __attribute__((noreturn))
+#define LLVM_ATTRIBUTE_NORETURN __attribute__((noreturn))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_NORETURN __declspec(noreturn)
+#else
+#define LLVM_ATTRIBUTE_NORETURN
+#endif
+
+// LLVM_ATTRIBUTE_DEPRECATED(decl, "message")
+#if __has_feature(attribute_deprecated_with_message)
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+ decl __attribute__((deprecated(message)))
+#elif defined(__GNUC__)
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+ decl __attribute__((deprecated))
#elif defined(_MSC_VER)
-#define NORETURN __declspec(noreturn)
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+ __declspec(deprecated(message)) decl
#else
-#define NORETURN
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+ decl
#endif
#endif
diff --git a/contrib/llvm/include/llvm/Support/ConstantFolder.h b/contrib/llvm/include/llvm/Support/ConstantFolder.h
index ea6c5fd..bd3765d 100644
--- a/contrib/llvm/include/llvm/Support/ConstantFolder.h
+++ b/contrib/llvm/include/llvm/Support/ConstantFolder.h
@@ -33,50 +33,34 @@ public:
// Binary Operators
//===--------------------------------------------------------------------===//
- Constant *CreateAdd(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getAdd(LHS, RHS);
- }
- Constant *CreateNSWAdd(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getNSWAdd(LHS, RHS);
- }
- Constant *CreateNUWAdd(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getNUWAdd(LHS, RHS);
+ Constant *CreateAdd(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW);
}
Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFAdd(LHS, RHS);
}
- Constant *CreateSub(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getSub(LHS, RHS);
- }
- Constant *CreateNSWSub(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getNSWSub(LHS, RHS);
- }
- Constant *CreateNUWSub(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getNUWSub(LHS, RHS);
+ Constant *CreateSub(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW);
}
Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFSub(LHS, RHS);
}
- Constant *CreateMul(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getMul(LHS, RHS);
- }
- Constant *CreateNSWMul(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getNSWMul(LHS, RHS);
- }
- Constant *CreateNUWMul(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getNUWMul(LHS, RHS);
+ Constant *CreateMul(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW);
}
Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFMul(LHS, RHS);
}
- Constant *CreateUDiv(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getUDiv(LHS, RHS);
- }
- Constant *CreateSDiv(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getSDiv(LHS, RHS);
+ Constant *CreateUDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getUDiv(LHS, RHS, isExact);
}
- Constant *CreateExactSDiv(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getExactSDiv(LHS, RHS);
+ Constant *CreateSDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getSDiv(LHS, RHS, isExact);
}
Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFDiv(LHS, RHS);
@@ -90,14 +74,17 @@ public:
Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getFRem(LHS, RHS);
}
- Constant *CreateShl(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getShl(LHS, RHS);
+ Constant *CreateShl(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW);
}
- Constant *CreateLShr(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getLShr(LHS, RHS);
+ Constant *CreateLShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getLShr(LHS, RHS, isExact);
}
- Constant *CreateAShr(Constant *LHS, Constant *RHS) const {
- return ConstantExpr::getAShr(LHS, RHS);
+ Constant *CreateAShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getAShr(LHS, RHS, isExact);
}
Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getAnd(LHS, RHS);
@@ -118,14 +105,9 @@ public:
// Unary Operators
//===--------------------------------------------------------------------===//
- Constant *CreateNeg(Constant *C) const {
- return ConstantExpr::getNeg(C);
- }
- Constant *CreateNSWNeg(Constant *C) const {
- return ConstantExpr::getNSWNeg(C);
- }
- Constant *CreateNUWNeg(Constant *C) const {
- return ConstantExpr::getNUWNeg(C);
+ Constant *CreateNeg(Constant *C,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getNeg(C, HasNUW, HasNSW);
}
Constant *CreateFNeg(Constant *C) const {
return ConstantExpr::getFNeg(C);
diff --git a/contrib/llvm/include/llvm/Support/ConstantRange.h b/contrib/llvm/include/llvm/Support/ConstantRange.h
index 29086b2..ced3a2c 100644
--- a/contrib/llvm/include/llvm/Support/ConstantRange.h
+++ b/contrib/llvm/include/llvm/Support/ConstantRange.h
@@ -33,7 +33,7 @@
#define LLVM_SUPPORT_CONSTANT_RANGE_H
#include "llvm/ADT/APInt.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -54,7 +54,7 @@ public:
/// @brief Initialize a range of values explicitly. This will assert out if
/// Lower==Upper and Lower != Min or Max value for its type. It will also
/// assert out if the two APInt's are not the same bit width.
- ConstantRange(const APInt& Lower, const APInt& Upper);
+ ConstantRange(const APInt &Lower, const APInt &Upper);
/// makeICmpRegion - Produce the smallest range that contains all values that
/// might satisfy the comparison specified by Pred when compared to any value
@@ -92,6 +92,11 @@ public:
///
bool isWrappedSet() const;
+ /// isSignWrappedSet - Return true if this set wraps around the INT_MIN of
+ /// its bitwidth, for example: i8 [120, 140).
+ ///
+ bool isSignWrappedSet() const;
+
/// contains - Return true if the specified value is in the set.
///
bool contains(const APInt &Val) const;
@@ -219,6 +224,14 @@ public:
/// \p Other.
ConstantRange udiv(const ConstantRange &Other) const;
+ /// binaryAnd - return a new range representing the possible values resulting
+ /// from a binary-and of a value in this range by a value in \p Other.
+ ConstantRange binaryAnd(const ConstantRange &Other) const;
+
+ /// binaryOr - return a new range representing the possible values resulting
+ /// from a binary-or of a value in this range by a value in \p Other.
+ ConstantRange binaryOr(const ConstantRange &Other) const;
+
/// shl - Return a new range representing the possible values resulting
/// from a left shift of a value in this range by a value in \p Other.
/// TODO: This isn't fully implemented yet.
diff --git a/contrib/llvm/include/llvm/Support/CrashRecoveryContext.h b/contrib/llvm/include/llvm/Support/CrashRecoveryContext.h
index d66609f..2e9b5d4 100644
--- a/contrib/llvm/include/llvm/Support/CrashRecoveryContext.h
+++ b/contrib/llvm/include/llvm/Support/CrashRecoveryContext.h
@@ -67,6 +67,14 @@ public:
/// the backtrace of the crash on failures.
bool RunSafely(void (*Fn)(void*), void *UserData);
+ /// \brief Execute the provide callback function (with the given arguments) in
+ /// a protected context which is run in another thread (optionally with a
+ /// requested stack size).
+ ///
+ /// See RunSafely() and llvm_execute_on_thread().
+ bool RunSafelyOnThread(void (*Fn)(void*), void *UserData,
+ unsigned RequestedStackSize = 0);
+
/// \brief Explicitly trigger a crash recovery in the current process, and
/// return failure from RunSafely(). This function does not return.
void HandleCrash();
diff --git a/contrib/llvm/include/llvm/Support/DataTypes.h.in b/contrib/llvm/include/llvm/Support/DataTypes.h.in
new file mode 100644
index 0000000..5965e8c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/DataTypes.h.in
@@ -0,0 +1,111 @@
+/*===-- include/System/DataTypes.h - Define fixed size types -----*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file contains definitions to figure out the size of _HOST_ data types.*|
+|* This file is important because different host OS's define different macros,*|
+|* which makes portability tough. This file exports the following *|
+|* definitions: *|
+|* *|
+|* [u]int(32|64)_t : typedefs for signed and unsigned 32/64 bit system types*|
+|* [U]INT(8|16|32|64)_(MIN|MAX) : Constants for the min and max values. *|
+|* *|
+|* No library is required when using these functions. *|
+|* *|
+|*===----------------------------------------------------------------------===*/
+
+/* Please leave this file C-compatible. */
+
+#ifndef SUPPORT_DATATYPES_H
+#define SUPPORT_DATATYPES_H
+
+#undef HAVE_SYS_TYPES_H
+#undef HAVE_INTTYPES_H
+#undef HAVE_STDINT_H
+#undef HAVE_UINT64_T
+#undef HAVE_U_INT64_T
+
+#ifdef __cplusplus
+#include <cmath>
+#else
+#include <math.h>
+#endif
+
+/* Note that this header's correct operation depends on __STDC_LIMIT_MACROS
+ being defined. We would define it here, but in order to prevent Bad Things
+ happening when system headers or C++ STL headers include stdint.h before we
+ define it here, we define it on the g++ command line (in Makefile.rules). */
+#if !defined(__STDC_LIMIT_MACROS)
+# error "Must #define __STDC_LIMIT_MACROS before #including System/DataTypes.h"
+#endif
+
+#if !defined(__STDC_CONSTANT_MACROS)
+# error "Must #define __STDC_CONSTANT_MACROS before " \
+ "#including System/DataTypes.h"
+#endif
+
+/* Note that <inttypes.h> includes <stdint.h>, if this is a C99 system. */
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_INTTYPES_H
+#include <inttypes.h>
+#endif
+
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+
+#ifdef _AIX
+#include "llvm/Support/AIXDataTypesFix.h"
+#endif
+
+/* Handle incorrect definition of uint64_t as u_int64_t */
+#ifndef HAVE_UINT64_T
+#ifdef HAVE_U_INT64_T
+typedef u_int64_t uint64_t;
+#else
+# error "Don't have a definition for uint64_t on this platform"
+#endif
+#endif
+
+#ifdef _OpenBSD_
+#define INT8_MAX 127
+#define INT8_MIN -128
+#define UINT8_MAX 255
+#define INT16_MAX 32767
+#define INT16_MIN -32768
+#define UINT16_MAX 65535
+#define INT32_MAX 2147483647
+#define INT32_MIN -2147483648
+#define UINT32_MAX 4294967295U
+#endif
+
+/* Set defaults for constants which we cannot find. */
+#if !defined(INT64_MAX)
+# define INT64_MAX 9223372036854775807LL
+#endif
+#if !defined(INT64_MIN)
+# define INT64_MIN ((-INT64_MAX)-1)
+#endif
+#if !defined(UINT64_MAX)
+# define UINT64_MAX 0xffffffffffffffffULL
+#endif
+
+#if __GNUC__ > 3
+#define END_WITH_NULL __attribute__((sentinel))
+#else
+#define END_WITH_NULL
+#endif
+
+#ifndef HUGE_VALF
+#define HUGE_VALF (float)HUGE_VAL
+#endif
+
+#endif /* SUPPORT_DATATYPES_H */
diff --git a/contrib/llvm/include/llvm/System/Disassembler.h b/contrib/llvm/include/llvm/Support/Disassembler.h
index e11e792..6d1cc0f 100644
--- a/contrib/llvm/include/llvm/System/Disassembler.h
+++ b/contrib/llvm/include/llvm/Support/Disassembler.h
@@ -15,7 +15,7 @@
#ifndef LLVM_SYSTEM_DISASSEMBLER_H
#define LLVM_SYSTEM_DISASSEMBLER_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <string>
namespace llvm {
diff --git a/contrib/llvm/include/llvm/Support/Dwarf.h b/contrib/llvm/include/llvm/Support/Dwarf.h
index 3ca8d96..5d0b5a9 100644
--- a/contrib/llvm/include/llvm/Support/Dwarf.h
+++ b/contrib/llvm/include/llvm/Support/Dwarf.h
@@ -22,7 +22,8 @@ namespace llvm {
// Debug info constants.
enum {
- LLVMDebugVersion = (8 << 16), // Current version of debug information.
+ LLVMDebugVersion = (9 << 16), // Current version of debug information.
+ LLVMDebugVersion8 = (8 << 16), // Cconstant for version 8.
LLVMDebugVersion7 = (7 << 16), // Constant for version 7.
LLVMDebugVersion6 = (6 << 16), // Constant for version 6.
LLVMDebugVersion5 = (5 << 16), // Constant for version 5.
@@ -44,11 +45,9 @@ enum llvm_dwarf_constants {
// llvm mock tags
DW_TAG_invalid = ~0U, // Tag for invalid results.
- DW_TAG_anchor = 0, // Tag for descriptor anchors.
DW_TAG_auto_variable = 0x100, // Tag for local (auto) variables.
DW_TAG_arg_variable = 0x101, // Tag for argument variables.
DW_TAG_return_variable = 0x102, // Tag for return variables.
-
DW_TAG_vector_type = 0x103, // Tag for vector types.
DW_TAG_user_base = 0x1000, // Recommended base for user tags.
@@ -118,6 +117,7 @@ enum dwarf_constants {
DW_TAG_imported_unit = 0x3d,
DW_TAG_condition = 0x3f,
DW_TAG_shared_type = 0x40,
+ DW_TAG_rvalue_reference_type = 0x41,
DW_TAG_lo_user = 0x4080,
DW_TAG_hi_user = 0xffff,
@@ -509,6 +509,7 @@ enum dwarf_constants {
DW_DSC_range = 0x01,
// Line Number Standard Opcode Encodings
+ DW_LNS_extended_op = 0x00,
DW_LNS_copy = 0x01,
DW_LNS_advance_pc = 0x02,
DW_LNS_advance_line = 0x03,
diff --git a/contrib/llvm/include/llvm/System/DynamicLibrary.h b/contrib/llvm/include/llvm/Support/DynamicLibrary.h
index 745b8f8..e6d9ff5 100644
--- a/contrib/llvm/include/llvm/System/DynamicLibrary.h
+++ b/contrib/llvm/include/llvm/Support/DynamicLibrary.h
@@ -1,4 +1,4 @@
-//===-- llvm/System/DynamicLibrary.h - Portable Dynamic Library -*- C++ -*-===//
+//===-- llvm/Support/DynamicLibrary.h - Portable Dynamic Library -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/include/llvm/Support/DynamicLinker.h b/contrib/llvm/include/llvm/Support/DynamicLinker.h
deleted file mode 100644
index b60ffa8..0000000
--- a/contrib/llvm/include/llvm/Support/DynamicLinker.h
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- llvm/Support/DynamicLinker.h - Portable Dynamic Linker --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Lightweight interface to dynamic library linking and loading, and dynamic
-// symbol lookup functionality, in whatever form the operating system
-// provides it.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_SUPPORT_DYNAMICLINKER_H
-#define LLVM_SUPPORT_DYNAMICLINKER_H
-
-#include <string>
-
-namespace llvm {
-
-/// LinkDynamicObject - Load the named file as a dynamic library
-/// and link it with the currently running process. Returns false
-/// on success, true if there is an error (and sets ErrorMessage
-/// if it is not NULL). Analogous to dlopen().
-///
-bool LinkDynamicObject (const char *filename, std::string *ErrorMessage);
-
-/// GetAddressOfSymbol - Returns the address of the named symbol in
-/// the currently running process, as reported by the dynamic linker,
-/// or NULL if the symbol does not exist or some other error has
-/// occurred.
-///
-void *GetAddressOfSymbol (const char *symbolName);
-void *GetAddressOfSymbol (const std::string &symbolName);
-
-} // End llvm namespace
-
-#endif // SUPPORT_DYNAMICLINKER_H
diff --git a/contrib/llvm/include/llvm/Support/ELF.h b/contrib/llvm/include/llvm/Support/ELF.h
index 83478b7..cc72bd5 100644
--- a/contrib/llvm/include/llvm/Support/ELF.h
+++ b/contrib/llvm/include/llvm/Support/ELF.h
@@ -20,7 +20,7 @@
#ifndef LLVM_SUPPORT_ELF_H
#define LLVM_SUPPORT_ELF_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cstring>
namespace llvm {
@@ -126,25 +126,27 @@ enum {
// Machine architectures
enum {
- EM_NONE = 0, // No machine
- EM_M32 = 1, // AT&T WE 32100
- EM_SPARC = 2, // SPARC
- EM_386 = 3, // Intel 386
- EM_68K = 4, // Motorola 68000
- EM_88K = 5, // Motorola 88000
- EM_486 = 6, // Intel 486 (deprecated)
- EM_860 = 7, // Intel 80860
- EM_MIPS = 8, // MIPS R3000
- EM_PPC = 20, // PowerPC
- EM_PPC64 = 21, // PowerPC64
- EM_ARM = 40, // ARM
- EM_ALPHA = 41, // DEC Alpha
- EM_SPARCV9 = 43, // SPARC V9
- EM_X86_64 = 62 // AMD64
+ EM_NONE = 0, // No machine
+ EM_M32 = 1, // AT&T WE 32100
+ EM_SPARC = 2, // SPARC
+ EM_386 = 3, // Intel 386
+ EM_68K = 4, // Motorola 68000
+ EM_88K = 5, // Motorola 88000
+ EM_486 = 6, // Intel 486 (deprecated)
+ EM_860 = 7, // Intel 80860
+ EM_MIPS = 8, // MIPS R3000
+ EM_PPC = 20, // PowerPC
+ EM_PPC64 = 21, // PowerPC64
+ EM_ARM = 40, // ARM
+ EM_ALPHA = 41, // DEC Alpha
+ EM_SPARCV9 = 43, // SPARC V9
+ EM_X86_64 = 62, // AMD64
+ EM_MBLAZE = 47787 // Xilinx MicroBlaze
};
// Object file classes.
enum {
+ ELFCLASSNONE = 0,
ELFCLASS32 = 1, // 32-bit object file
ELFCLASS64 = 2 // 64-bit object file
};
@@ -231,12 +233,206 @@ enum {
R_386_GOTOFF = 9,
R_386_GOTPC = 10,
R_386_32PLT = 11,
+ R_386_TLS_TPOFF = 14,
+ R_386_TLS_IE = 15,
+ R_386_TLS_GOTIE = 16,
+ R_386_TLS_LE = 17,
+ R_386_TLS_GD = 18,
+ R_386_TLS_LDM = 19,
R_386_16 = 20,
R_386_PC16 = 21,
R_386_8 = 22,
- R_386_PC8 = 23
+ R_386_PC8 = 23,
+ R_386_TLS_GD_32 = 24,
+ R_386_TLS_GD_PUSH = 25,
+ R_386_TLS_GD_CALL = 26,
+ R_386_TLS_GD_POP = 27,
+ R_386_TLS_LDM_32 = 28,
+ R_386_TLS_LDM_PUSH = 29,
+ R_386_TLS_LDM_CALL = 30,
+ R_386_TLS_LDM_POP = 31,
+ R_386_TLS_LDO_32 = 32,
+ R_386_TLS_IE_32 = 33,
+ R_386_TLS_LE_32 = 34,
+ R_386_TLS_DTPMOD32 = 35,
+ R_386_TLS_DTPOFF32 = 36,
+ R_386_TLS_TPOFF32 = 37,
+ R_386_TLS_GOTDESC = 39,
+ R_386_TLS_DESC_CALL = 40,
+ R_386_TLS_DESC = 41,
+ R_386_IRELATIVE = 42,
+ R_386_NUM = 43
+};
+
+// MBlaze relocations.
+enum {
+ R_MICROBLAZE_NONE = 0,
+ R_MICROBLAZE_32 = 1,
+ R_MICROBLAZE_32_PCREL = 2,
+ R_MICROBLAZE_64_PCREL = 3,
+ R_MICROBLAZE_32_PCREL_LO = 4,
+ R_MICROBLAZE_64 = 5,
+ R_MICROBLAZE_32_LO = 6,
+ R_MICROBLAZE_SRO32 = 7,
+ R_MICROBLAZE_SRW32 = 8,
+ R_MICROBLAZE_64_NONE = 9,
+ R_MICROBLAZE_32_SYM_OP_SYM = 10,
+ R_MICROBLAZE_GNU_VTINHERIT = 11,
+ R_MICROBLAZE_GNU_VTENTRY = 12,
+ R_MICROBLAZE_GOTPC_64 = 13,
+ R_MICROBLAZE_GOT_64 = 14,
+ R_MICROBLAZE_PLT_64 = 15,
+ R_MICROBLAZE_REL = 16,
+ R_MICROBLAZE_JUMP_SLOT = 17,
+ R_MICROBLAZE_GLOB_DAT = 18,
+ R_MICROBLAZE_GOTOFF_64 = 19,
+ R_MICROBLAZE_GOTOFF_32 = 20,
+ R_MICROBLAZE_COPY = 21
+};
+
+
+// ARM Specific e_flags
+enum { EF_ARM_EABIMASK = 0xFF000000U };
+
+// ELF Relocation types for ARM
+// Meets 2.08 ABI Specs.
+
+enum {
+ R_ARM_NONE = 0x00,
+ R_ARM_PC24 = 0x01,
+ R_ARM_ABS32 = 0x02,
+ R_ARM_REL32 = 0x03,
+ R_ARM_LDR_PC_G0 = 0x04,
+ R_ARM_ABS16 = 0x05,
+ R_ARM_ABS12 = 0x06,
+ R_ARM_THM_ABS5 = 0x07,
+ R_ARM_ABS8 = 0x08,
+ R_ARM_SBREL32 = 0x09,
+ R_ARM_THM_CALL = 0x0a,
+ R_ARM_THM_PC8 = 0x0b,
+ R_ARM_BREL_ADJ = 0x0c,
+ R_ARM_TLS_DESC = 0x0d,
+ R_ARM_THM_SWI8 = 0x0e,
+ R_ARM_XPC25 = 0x0f,
+ R_ARM_THM_XPC22 = 0x10,
+ R_ARM_TLS_DTPMOD32 = 0x11,
+ R_ARM_TLS_DTPOFF32 = 0x12,
+ R_ARM_TLS_TPOFF32 = 0x13,
+ R_ARM_COPY = 0x14,
+ R_ARM_GLOB_DAT = 0x15,
+ R_ARM_JUMP_SLOT = 0x16,
+ R_ARM_RELATIVE = 0x17,
+ R_ARM_GOTOFF32 = 0x18,
+ R_ARM_BASE_PREL = 0x19,
+ R_ARM_GOT_BREL = 0x1a,
+ R_ARM_PLT32 = 0x1b,
+ R_ARM_CALL = 0x1c,
+ R_ARM_JUMP24 = 0x1d,
+ R_ARM_THM_JUMP24 = 0x1e,
+ R_ARM_BASE_ABS = 0x1f,
+ R_ARM_ALU_PCREL_7_0 = 0x20,
+ R_ARM_ALU_PCREL_15_8 = 0x21,
+ R_ARM_ALU_PCREL_23_15 = 0x22,
+ R_ARM_LDR_SBREL_11_0_NC = 0x23,
+ R_ARM_ALU_SBREL_19_12_NC = 0x24,
+ R_ARM_ALU_SBREL_27_20_CK = 0x25,
+ R_ARM_TARGET1 = 0x26,
+ R_ARM_SBREL31 = 0x27,
+ R_ARM_V4BX = 0x28,
+ R_ARM_TARGET2 = 0x29,
+ R_ARM_PREL31 = 0x2a,
+ R_ARM_MOVW_ABS_NC = 0x2b,
+ R_ARM_MOVT_ABS = 0x2c,
+ R_ARM_MOVW_PREL_NC = 0x2d,
+ R_ARM_MOVT_PREL = 0x2e,
+ R_ARM_THM_MOVW_ABS_NC = 0x2f,
+ R_ARM_THM_MOVT_ABS = 0x30,
+ R_ARM_THM_MOVW_PREL_NC = 0x31,
+ R_ARM_THM_MOVT_PREL = 0x32,
+ R_ARM_THM_JUMP19 = 0x33,
+ R_ARM_THM_JUMP6 = 0x34,
+ R_ARM_THM_ALU_PREL_11_0 = 0x35,
+ R_ARM_THM_PC12 = 0x36,
+ R_ARM_ABS32_NOI = 0x37,
+ R_ARM_REL32_NOI = 0x38,
+ R_ARM_ALU_PC_G0_NC = 0x39,
+ R_ARM_ALU_PC_G0 = 0x3a,
+ R_ARM_ALU_PC_G1_NC = 0x3b,
+ R_ARM_ALU_PC_G1 = 0x3c,
+ R_ARM_ALU_PC_G2 = 0x3d,
+ R_ARM_LDR_PC_G1 = 0x3e,
+ R_ARM_LDR_PC_G2 = 0x3f,
+ R_ARM_LDRS_PC_G0 = 0x40,
+ R_ARM_LDRS_PC_G1 = 0x41,
+ R_ARM_LDRS_PC_G2 = 0x42,
+ R_ARM_LDC_PC_G0 = 0x43,
+ R_ARM_LDC_PC_G1 = 0x44,
+ R_ARM_LDC_PC_G2 = 0x45,
+ R_ARM_ALU_SB_G0_NC = 0x46,
+ R_ARM_ALU_SB_G0 = 0x47,
+ R_ARM_ALU_SB_G1_NC = 0x48,
+ R_ARM_ALU_SB_G1 = 0x49,
+ R_ARM_ALU_SB_G2 = 0x4a,
+ R_ARM_LDR_SB_G0 = 0x4b,
+ R_ARM_LDR_SB_G1 = 0x4c,
+ R_ARM_LDR_SB_G2 = 0x4d,
+ R_ARM_LDRS_SB_G0 = 0x4e,
+ R_ARM_LDRS_SB_G1 = 0x4f,
+ R_ARM_LDRS_SB_G2 = 0x50,
+ R_ARM_LDC_SB_G0 = 0x51,
+ R_ARM_LDC_SB_G1 = 0x52,
+ R_ARM_LDC_SB_G2 = 0x53,
+ R_ARM_MOVW_BREL_NC = 0x54,
+ R_ARM_MOVT_BREL = 0x55,
+ R_ARM_MOVW_BREL = 0x56,
+ R_ARM_THM_MOVW_BREL_NC = 0x57,
+ R_ARM_THM_MOVT_BREL = 0x58,
+ R_ARM_THM_MOVW_BREL = 0x59,
+ R_ARM_TLS_GOTDESC = 0x5a,
+ R_ARM_TLS_CALL = 0x5b,
+ R_ARM_TLS_DESCSEQ = 0x5c,
+ R_ARM_THM_TLS_CALL = 0x5d,
+ R_ARM_PLT32_ABS = 0x5e,
+ R_ARM_GOT_ABS = 0x5f,
+ R_ARM_GOT_PREL = 0x60,
+ R_ARM_GOT_BREL12 = 0x61,
+ R_ARM_GOTOFF12 = 0x62,
+ R_ARM_GOTRELAX = 0x63,
+ R_ARM_GNU_VTENTRY = 0x64,
+ R_ARM_GNU_VTINHERIT = 0x65,
+ R_ARM_THM_JUMP11 = 0x66,
+ R_ARM_THM_JUMP8 = 0x67,
+ R_ARM_TLS_GD32 = 0x68,
+ R_ARM_TLS_LDM32 = 0x69,
+ R_ARM_TLS_LDO32 = 0x6a,
+ R_ARM_TLS_IE32 = 0x6b,
+ R_ARM_TLS_LE32 = 0x6c,
+ R_ARM_TLS_LDO12 = 0x6d,
+ R_ARM_TLS_LE12 = 0x6e,
+ R_ARM_TLS_IE12GP = 0x6f,
+ R_ARM_PRIVATE_0 = 0x70,
+ R_ARM_PRIVATE_1 = 0x71,
+ R_ARM_PRIVATE_2 = 0x72,
+ R_ARM_PRIVATE_3 = 0x73,
+ R_ARM_PRIVATE_4 = 0x74,
+ R_ARM_PRIVATE_5 = 0x75,
+ R_ARM_PRIVATE_6 = 0x76,
+ R_ARM_PRIVATE_7 = 0x77,
+ R_ARM_PRIVATE_8 = 0x78,
+ R_ARM_PRIVATE_9 = 0x79,
+ R_ARM_PRIVATE_10 = 0x7a,
+ R_ARM_PRIVATE_11 = 0x7b,
+ R_ARM_PRIVATE_12 = 0x7c,
+ R_ARM_PRIVATE_13 = 0x7d,
+ R_ARM_PRIVATE_14 = 0x7e,
+ R_ARM_PRIVATE_15 = 0x7f,
+ R_ARM_ME_TOO = 0x80,
+ R_ARM_THM_TLS_DESCSEQ16 = 0x81,
+ R_ARM_THM_TLS_DESCSEQ32 = 0x82
};
+
+
// Section header.
struct Elf32_Shdr {
Elf32_Word sh_name; // Section name (index into string table)
@@ -273,6 +469,7 @@ enum {
SHN_HIPROC = 0xff1f, // Highest processor-specific index
SHN_ABS = 0xfff1, // Symbol has absolute value; does not need relocation
SHN_COMMON = 0xfff2, // FORTRAN COMMON or C external global variables
+ SHN_XINDEX = 0xffff, // Mark that the index is >= SHN_LORESERVE
SHN_HIRESERVE = 0xffff // Highest reserved index
};
@@ -298,6 +495,18 @@ enum {
SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
SHT_HIOS = 0x6fffffff, // Highest operating system-specific type.
SHT_LOPROC = 0x70000000, // Lowest processor architecture-specific type.
+ // Fixme: All this is duplicated in MCSectionELF. Why??
+ // Exception Index table
+ SHT_ARM_EXIDX = 0x70000001U,
+ // BPABI DLL dynamic linking pre-emption map
+ SHT_ARM_PREEMPTMAP = 0x70000002U,
+ // Object file compatibility attributes
+ SHT_ARM_ATTRIBUTES = 0x70000003U,
+ SHT_ARM_DEBUGOVERLAY = 0x70000004U,
+ SHT_ARM_OVERLAYSECTION = 0x70000005U,
+
+ SHT_X86_64_UNWIND = 0x70000001, // Unwind information
+
SHT_HIPROC = 0x7fffffff, // Highest processor architecture-specific type.
SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
SHT_HIUSER = 0xffffffff // Highest type reserved for applications.
@@ -305,10 +514,58 @@ enum {
// Section flags.
enum {
- SHF_WRITE = 0x1, // Section data should be writable during execution.
- SHF_ALLOC = 0x2, // Section occupies memory during program execution.
- SHF_EXECINSTR = 0x4, // Section contains executable machine instructions.
- SHF_MASKPROC = 0xf0000000 // Bits indicating processor-specific flags.
+ // Section data should be writable during execution.
+ SHF_WRITE = 0x1,
+
+ // Section occupies memory during program execution.
+ SHF_ALLOC = 0x2,
+
+ // Section contains executable machine instructions.
+ SHF_EXECINSTR = 0x4,
+
+ // The data in this section may be merged.
+ SHF_MERGE = 0x10,
+
+ // The data in this section is null-terminated strings.
+ SHF_STRINGS = 0x20,
+
+ // A field in this section holds a section header table index.
+ SHF_INFO_LINK = 0x40U,
+
+ // Adds special ordering requirements for link editors.
+ SHF_LINK_ORDER = 0x80U,
+
+ // This section requires special OS-specific processing to avoid incorrect
+ // behavior.
+ SHF_OS_NONCONFORMING = 0x100U,
+
+ // This section is a member of a section group.
+ SHF_GROUP = 0x200U,
+
+ // This section holds Thread-Local Storage.
+ SHF_TLS = 0x400U,
+
+ // Start of target-specific flags.
+
+ /// XCORE_SHF_CP_SECTION - All sections with the "c" flag are grouped
+ /// together by the linker to form the constant pool and the cp register is
+ /// set to the start of the constant pool by the boot code.
+ XCORE_SHF_CP_SECTION = 0x800U,
+
+ /// XCORE_SHF_DP_SECTION - All sections with the "d" flag are grouped
+ /// together by the linker to form the data section and the dp register is
+ /// set to the start of the section by the boot code.
+ XCORE_SHF_DP_SECTION = 0x1000U,
+
+ // Bits indicating processor-specific flags.
+ SHF_MASKPROC = 0xf0000000
+};
+
+// Section Group Flags
+enum {
+ GRP_COMDAT = 0x1,
+ GRP_MASKOS = 0x0ff00000,
+ GRP_MASKPROC = 0xf0000000
};
// Symbol table entries for ELF32.
diff --git a/contrib/llvm/include/llvm/Support/Endian.h b/contrib/llvm/include/llvm/Support/Endian.h
new file mode 100644
index 0000000..f62eab0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Endian.h
@@ -0,0 +1,213 @@
+//===- Endian.h - Utilities for IO with endian specific data ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares generic functions to read and write endian specific data.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ENDIAN_H
+#define LLVM_SUPPORT_ENDIAN_H
+
+#include "llvm/Config/config.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/type_traits.h"
+
+namespace llvm {
+namespace support {
+
+enum endianness {big, little};
+enum alignment {unaligned, aligned};
+
+namespace detail {
+
+template<typename value_type, alignment align>
+struct alignment_access_helper;
+
+template<typename value_type>
+struct alignment_access_helper<value_type, aligned>
+{
+ value_type val;
+};
+
+// Provides unaligned loads and stores.
+#pragma pack(push)
+#pragma pack(1)
+template<typename value_type>
+struct alignment_access_helper<value_type, unaligned>
+{
+ value_type val;
+};
+#pragma pack(pop)
+
+} // end namespace detail
+
+namespace endian {
+ template<typename value_type, alignment align>
+ static value_type read_le(const void *memory) {
+ value_type t =
+ reinterpret_cast<const detail::alignment_access_helper
+ <value_type, align> *>(memory)->val;
+ if (sys::isBigEndianHost())
+ return sys::SwapByteOrder(t);
+ return t;
+ }
+
+ template<typename value_type, alignment align>
+ static void write_le(void *memory, value_type value) {
+ if (sys::isBigEndianHost())
+ value = sys::SwapByteOrder(value);
+ reinterpret_cast<detail::alignment_access_helper<value_type, align> *>
+ (memory)->val = value;
+ }
+
+ template<typename value_type, alignment align>
+ static value_type read_be(const void *memory) {
+ value_type t =
+ reinterpret_cast<const detail::alignment_access_helper
+ <value_type, align> *>(memory)->val;
+ if (sys::isLittleEndianHost())
+ return sys::SwapByteOrder(t);
+ return t;
+ }
+
+ template<typename value_type, alignment align>
+ static void write_be(void *memory, value_type value) {
+ if (sys::isLittleEndianHost())
+ value = sys::SwapByteOrder(value);
+ reinterpret_cast<detail::alignment_access_helper<value_type, align> *>
+ (memory)->val = value;
+ }
+}
+
+namespace detail {
+
+template<typename value_type,
+ endianness endian,
+ alignment align>
+class packed_endian_specific_integral;
+
+template<typename value_type>
+class packed_endian_specific_integral<value_type, little, unaligned> {
+public:
+ operator value_type() const {
+ return endian::read_le<value_type, unaligned>(Value);
+ }
+private:
+ uint8_t Value[sizeof(value_type)];
+};
+
+template<typename value_type>
+class packed_endian_specific_integral<value_type, big, unaligned> {
+public:
+ operator value_type() const {
+ return endian::read_be<value_type, unaligned>(Value);
+ }
+private:
+ uint8_t Value[sizeof(value_type)];
+};
+
+template<typename value_type>
+class packed_endian_specific_integral<value_type, little, aligned> {
+public:
+ operator value_type() const {
+ return endian::read_le<value_type, aligned>(&Value);
+ }
+private:
+ value_type Value;
+};
+
+template<typename value_type>
+class packed_endian_specific_integral<value_type, big, aligned> {
+public:
+ operator value_type() const {
+ return endian::read_be<value_type, aligned>(&Value);
+ }
+private:
+ value_type Value;
+};
+
+} // end namespace detail
+
+typedef detail::packed_endian_specific_integral
+ <uint8_t, little, unaligned> ulittle8_t;
+typedef detail::packed_endian_specific_integral
+ <uint16_t, little, unaligned> ulittle16_t;
+typedef detail::packed_endian_specific_integral
+ <uint32_t, little, unaligned> ulittle32_t;
+typedef detail::packed_endian_specific_integral
+ <uint64_t, little, unaligned> ulittle64_t;
+
+typedef detail::packed_endian_specific_integral
+ <int8_t, little, unaligned> little8_t;
+typedef detail::packed_endian_specific_integral
+ <int16_t, little, unaligned> little16_t;
+typedef detail::packed_endian_specific_integral
+ <int32_t, little, unaligned> little32_t;
+typedef detail::packed_endian_specific_integral
+ <int64_t, little, unaligned> little64_t;
+
+typedef detail::packed_endian_specific_integral
+ <uint8_t, little, aligned> aligned_ulittle8_t;
+typedef detail::packed_endian_specific_integral
+ <uint16_t, little, aligned> aligned_ulittle16_t;
+typedef detail::packed_endian_specific_integral
+ <uint32_t, little, aligned> aligned_ulittle32_t;
+typedef detail::packed_endian_specific_integral
+ <uint64_t, little, aligned> aligned_ulittle64_t;
+
+typedef detail::packed_endian_specific_integral
+ <int8_t, little, aligned> aligned_little8_t;
+typedef detail::packed_endian_specific_integral
+ <int16_t, little, aligned> aligned_little16_t;
+typedef detail::packed_endian_specific_integral
+ <int32_t, little, aligned> aligned_little32_t;
+typedef detail::packed_endian_specific_integral
+ <int64_t, little, aligned> aligned_little64_t;
+
+typedef detail::packed_endian_specific_integral
+ <uint8_t, big, unaligned> ubig8_t;
+typedef detail::packed_endian_specific_integral
+ <uint16_t, big, unaligned> ubig16_t;
+typedef detail::packed_endian_specific_integral
+ <uint32_t, big, unaligned> ubig32_t;
+typedef detail::packed_endian_specific_integral
+ <uint64_t, big, unaligned> ubig64_t;
+
+typedef detail::packed_endian_specific_integral
+ <int8_t, big, unaligned> big8_t;
+typedef detail::packed_endian_specific_integral
+ <int16_t, big, unaligned> big16_t;
+typedef detail::packed_endian_specific_integral
+ <int32_t, big, unaligned> big32_t;
+typedef detail::packed_endian_specific_integral
+ <int64_t, big, unaligned> big64_t;
+
+typedef detail::packed_endian_specific_integral
+ <uint8_t, big, aligned> aligned_ubig8_t;
+typedef detail::packed_endian_specific_integral
+ <uint16_t, big, aligned> aligned_ubig16_t;
+typedef detail::packed_endian_specific_integral
+ <uint32_t, big, aligned> aligned_ubig32_t;
+typedef detail::packed_endian_specific_integral
+ <uint64_t, big, aligned> aligned_ubig64_t;
+
+typedef detail::packed_endian_specific_integral
+ <int8_t, big, aligned> aligned_big8_t;
+typedef detail::packed_endian_specific_integral
+ <int16_t, big, aligned> aligned_big16_t;
+typedef detail::packed_endian_specific_integral
+ <int32_t, big, aligned> aligned_big32_t;
+typedef detail::packed_endian_specific_integral
+ <int64_t, big, aligned> aligned_big64_t;
+
+} // end namespace llvm
+} // end namespace support
+
+#endif
diff --git a/contrib/llvm/include/llvm/System/Errno.h b/contrib/llvm/include/llvm/Support/Errno.h
index 6e292ba..150bdb7 100644
--- a/contrib/llvm/include/llvm/System/Errno.h
+++ b/contrib/llvm/include/llvm/Support/Errno.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Errno.h - Portable+convenient errno handling -*- C++ -*-===//
+//===- llvm/Support/Errno.h - Portable+convenient errno handling -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/include/llvm/Support/ErrorHandling.h b/contrib/llvm/include/llvm/Support/ErrorHandling.h
index 9854657..5eca438 100644
--- a/contrib/llvm/include/llvm/Support/ErrorHandling.h
+++ b/contrib/llvm/include/llvm/Support/ErrorHandling.h
@@ -16,6 +16,7 @@
#define LLVM_SUPPORT_ERRORHANDLING_H
#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/StringRef.h"
#include <string>
namespace llvm {
@@ -72,15 +73,17 @@ namespace llvm {
/// standard error, followed by a newline.
/// After the error handler is called this function will call exit(1), it
/// does not return.
- NORETURN void report_fatal_error(const char *reason);
- NORETURN void report_fatal_error(const std::string &reason);
- NORETURN void report_fatal_error(const Twine &reason);
+ LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason);
+ LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const std::string &reason);
+ LLVM_ATTRIBUTE_NORETURN void report_fatal_error(StringRef reason);
+ LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const Twine &reason);
/// This function calls abort(), and prints the optional message to stderr.
/// Use the llvm_unreachable macro (that adds location info), instead of
/// calling this function directly.
- NORETURN void llvm_unreachable_internal(const char *msg=0,
- const char *file=0, unsigned line=0);
+ LLVM_ATTRIBUTE_NORETURN void llvm_unreachable_internal(const char *msg=0,
+ const char *file=0,
+ unsigned line=0);
}
/// Prints the message and location info to stderr in !NDEBUG builds.
diff --git a/contrib/llvm/include/llvm/Support/FEnv.h b/contrib/llvm/include/llvm/Support/FEnv.h
new file mode 100644
index 0000000..f6f4333
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/FEnv.h
@@ -0,0 +1,56 @@
+//===- llvm/Support/FEnv.h - Host floating-point exceptions ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an operating system independent interface to
+// floating-point exception interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SYSTEM_FENV_H
+#define LLVM_SYSTEM_FENV_H
+
+#include "llvm/Config/config.h"
+#include <cerrno>
+#ifdef HAVE_FENV_H
+#include <fenv.h>
+#endif
+
+// FIXME: Clang's #include handling apparently doesn't work for libstdc++'s
+// fenv.h; see PR6907 for details.
+#if defined(__clang__) && defined(_GLIBCXX_FENV_H)
+#undef HAVE_FENV_H
+#endif
+
+namespace llvm {
+namespace sys {
+
+/// llvm_fenv_clearexcept - Clear the floating-point exception state.
+static inline void llvm_fenv_clearexcept() {
+#ifdef HAVE_FENV_H
+ feclearexcept(FE_ALL_EXCEPT);
+#endif
+ errno = 0;
+}
+
+/// llvm_fenv_testexcept - Test if a floating-point exception was raised.
+static inline bool llvm_fenv_testexcept() {
+ int errno_val = errno;
+ if (errno_val == ERANGE || errno_val == EDOM)
+ return true;
+#ifdef HAVE_FENV_H
+ if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
+ return true;
+#endif
+ return false;
+}
+
+} // End sys namespace
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/FileSystem.h b/contrib/llvm/include/llvm/Support/FileSystem.h
new file mode 100644
index 0000000..4001bf0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/FileSystem.h
@@ -0,0 +1,690 @@
+//===- llvm/Support/FileSystem.h - File System OS Concept -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::fs namespace. It is designed after
+// TR2/boost filesystem (v3), but modified to remove exception handling and the
+// path class.
+//
+// All functions return an error_code and their actual work via the last out
+// argument. The out argument is defined if and only if errc::success is
+// returned. A function may return any error code in the generic or system
+// category. However, they shall be equivalent to any error conditions listed
+// in each functions respective documentation if the condition applies. [ note:
+// this does not guarantee that error_code will be in the set of explicitly
+// listed codes, but it does guarantee that if any of the explicitly listed
+// errors occur, the correct error_code will be used ]. All functions may
+// return errc::not_enough_memory if there is not enough memory to complete the
+// operation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FILE_SYSTEM_H
+#define LLVM_SUPPORT_FILE_SYSTEM_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/PathV1.h"
+#include "llvm/Support/system_error.h"
+#include <ctime>
+#include <iterator>
+#include <string>
+
+namespace llvm {
+namespace sys {
+namespace fs {
+
+/// file_type - An "enum class" enumeration for the file system's view of the
+/// type.
+struct file_type {
+ enum _ {
+ status_error,
+ file_not_found,
+ regular_file,
+ directory_file,
+ symlink_file,
+ block_file,
+ character_file,
+ fifo_file,
+ socket_file,
+ type_unknown
+ };
+
+ file_type(_ v) : v_(v) {}
+ explicit file_type(int v) : v_(_(v)) {}
+ operator int() const {return v_;}
+
+private:
+ int v_;
+};
+
+/// copy_option - An "enum class" enumeration of copy semantics for copy
+/// operations.
+struct copy_option {
+ enum _ {
+ fail_if_exists,
+ overwrite_if_exists
+ };
+
+ copy_option(_ v) : v_(v) {}
+ explicit copy_option(int v) : v_(_(v)) {}
+ operator int() const {return v_;}
+
+private:
+ int v_;
+};
+
+/// space_info - Self explanatory.
+struct space_info {
+ uint64_t capacity;
+ uint64_t free;
+ uint64_t available;
+};
+
+/// file_status - Represents the result of a call to stat and friends. It has
+/// a platform specific member to store the result.
+class file_status
+{
+ // implementation defined status field.
+ file_type Type;
+public:
+ explicit file_status(file_type v=file_type::status_error)
+ : Type(v) {}
+
+ file_type type() const { return Type; }
+ void type(file_type v) { Type = v; }
+};
+
+/// @}
+/// @name Physical Operators
+/// @{
+
+/// @brief Make \a path an absolute path.
+///
+/// Makes \a path absolute using the current directory if it is not already. An
+/// empty \a path will result in the current directory.
+///
+/// /absolute/path => /absolute/path
+/// relative/../path => <current-directory>/relative/../path
+///
+/// @param path A path that is modified to be an absolute path.
+/// @returns errc::success if \a path has been made absolute, otherwise a
+/// platform specific error_code.
+error_code make_absolute(SmallVectorImpl<char> &path);
+
+/// @brief Copy the file at \a from to the path \a to.
+///
+/// @param from The path to copy the file from.
+/// @param to The path to copy the file to.
+/// @param copt Behavior if \a to already exists.
+/// @returns errc::success if the file has been successfully copied.
+/// errc::file_exists if \a to already exists and \a copt ==
+/// copy_option::fail_if_exists. Otherwise a platform specific
+/// error_code.
+error_code copy_file(const Twine &from, const Twine &to,
+ copy_option copt = copy_option::fail_if_exists);
+
+/// @brief Create all the non-existent directories in path.
+///
+/// @param path Directories to create.
+/// @param existed Set to true if \a path already existed, false otherwise.
+/// @returns errc::success if is_directory(path) and existed have been set,
+/// otherwise a platform specific error_code.
+error_code create_directories(const Twine &path, bool &existed);
+
+/// @brief Create the directory in path.
+///
+/// @param path Directory to create.
+/// @param existed Set to true if \a path already existed, false otherwise.
+/// @returns errc::success if is_directory(path) and existed have been set,
+/// otherwise a platform specific error_code.
+error_code create_directory(const Twine &path, bool &existed);
+
+/// @brief Create a hard link from \a from to \a to.
+///
+/// @param to The path to hard link to.
+/// @param from The path to hard link from. This is created.
+/// @returns errc::success if exists(to) && exists(from) && equivalent(to, from)
+/// , otherwise a platform specific error_code.
+error_code create_hard_link(const Twine &to, const Twine &from);
+
+/// @brief Create a symbolic link from \a from to \a to.
+///
+/// @param to The path to symbolically link to.
+/// @param from The path to symbolically link from. This is created.
+/// @returns errc::success if exists(to) && exists(from) && is_symlink(from),
+/// otherwise a platform specific error_code.
+error_code create_symlink(const Twine &to, const Twine &from);
+
+/// @brief Get the current path.
+///
+/// @param result Holds the current path on return.
+/// @results errc::success if the current path has been stored in result,
+/// otherwise a platform specific error_code.
+error_code current_path(SmallVectorImpl<char> &result);
+
+/// @brief Remove path. Equivalent to POSIX remove().
+///
+/// @param path Input path.
+/// @param existed Set to true if \a path existed, false if it did not.
+/// undefined otherwise.
+/// @results errc::success if path has been removed and existed has been
+/// successfully set, otherwise a platform specific error_code.
+error_code remove(const Twine &path, bool &existed);
+
+/// @brief Recursively remove all files below \a path, then \a path. Files are
+/// removed as if by POSIX remove().
+///
+/// @param path Input path.
+/// @param num_removed Number of files removed.
+/// @results errc::success if path has been removed and num_removed has been
+/// successfully set, otherwise a platform specific error_code.
+error_code remove_all(const Twine &path, uint32_t &num_removed);
+
+/// @brief Rename \a from to \a to. Files are renamed as if by POSIX rename().
+///
+/// @param from The path to rename from.
+/// @param to The path to rename to. This is created.
+error_code rename(const Twine &from, const Twine &to);
+
+/// @brief Resize path to size. File is resized as if by POSIX truncate().
+///
+/// @param path Input path.
+/// @param size Size to resize to.
+/// @returns errc::success if \a path has been resized to \a size, otherwise a
+/// platform specific error_code.
+error_code resize_file(const Twine &path, uint64_t size);
+
+/// @brief Make file readable.
+///
+/// @param path Input path.
+/// @param value If true, make readable, else, make unreadable.
+/// @results errc::success if readability has been successfully set, otherwise a
+/// platform specific error_code.
+error_code set_read(const Twine &path, bool value);
+
+/// @brief Make file writeable.
+///
+/// @param path Input path.
+/// @param value If true, make writeable, else, make unwriteable.
+/// @results errc::success if writeability has been successfully set, otherwise
+/// a platform specific error_code.
+error_code set_write(const Twine &path, bool value);
+
+/// @brief Make file executable.
+///
+/// @param path Input path.
+/// @param value If true, make executable, else, make unexecutable.
+/// @results errc::success if executability has been successfully set, otherwise
+/// a platform specific error_code.
+error_code set_execute(const Twine &path, bool value);
+
+/// @}
+/// @name Physical Observers
+/// @{
+
+/// @brief Does file exist?
+///
+/// @param status A file_status previously returned from stat.
+/// @results True if the file represented by status exists, false if it does
+/// not.
+bool exists(file_status status);
+
+/// @brief Does file exist?
+///
+/// @param path Input path.
+/// @param result Set to true if the file represented by status exists, false if
+/// it does not. Undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code exists(const Twine &path, bool &result);
+
+/// @brief Do file_status's represent the same thing?
+///
+/// @param A Input file_status.
+/// @param B Input file_status.
+///
+/// assert(status_known(A) || status_known(B));
+///
+/// @results True if A and B both represent the same file system entity, false
+/// otherwise.
+bool equivalent(file_status A, file_status B);
+
+/// @brief Do paths represent the same thing?
+///
+/// @param A Input path A.
+/// @param B Input path B.
+/// @param result Set to true if stat(A) and stat(B) have the same device and
+/// inode (or equivalent).
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code equivalent(const Twine &A, const Twine &B, bool &result);
+
+/// @brief Get file size.
+///
+/// @param path Input path.
+/// @param result Set to the size of the file in \a path.
+/// @returns errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code file_size(const Twine &path, uint64_t &result);
+
+/// @brief Does status represent a directory?
+///
+/// @param status A file_status previously returned from status.
+/// @results status.type() == file_type::directory_file.
+bool is_directory(file_status status);
+
+/// @brief Is path a directory?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a directory, false if it is not.
+/// Undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code is_directory(const Twine &path, bool &result);
+
+/// @brief Is path an empty file?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a an empty file, false if it is not.
+/// Undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code is_empty(const Twine &path, bool &result);
+
+/// @brief Does status represent a regular file?
+///
+/// @param status A file_status previously returned from status.
+/// @results status_known(status) && status.type() == file_type::regular_file.
+bool is_regular_file(file_status status);
+
+/// @brief Is path a regular file?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a regular file, false if it is not.
+/// Undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code is_regular_file(const Twine &path, bool &result);
+
+/// @brief Does this status represent something that exists but is not a
+/// directory, regular file, or symlink?
+///
+/// @param status A file_status previously returned from status.
+/// @results exists(s) && !is_regular_file(s) && !is_directory(s) &&
+/// !is_symlink(s)
+bool is_other(file_status status);
+
+/// @brief Is path something that exists but is not a directory,
+/// regular file, or symlink?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path exists, but is not a directory, regular
+/// file, or a symlink, false if it does not. Undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code is_other(const Twine &path, bool &result);
+
+/// @brief Does status represent a symlink?
+///
+/// @param status A file_status previously returned from stat.
+/// @param result status.type() == symlink_file.
+bool is_symlink(file_status status);
+
+/// @brief Is path a symlink?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a symlink, false if it is not.
+/// Undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code is_symlink(const Twine &path, bool &result);
+
+/// @brief Get last write time without changing it.
+///
+/// @param path Input path.
+/// @param result Set to the last write time (UNIX time) of \a path if it
+/// exists.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code last_write_time(const Twine &path, std::time_t &result);
+
+/// @brief Set last write time.
+///
+/// @param path Input path.
+/// @param value Time to set (UNIX time) \a path's last write time to.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code set_last_write_time(const Twine &path, std::time_t value);
+
+/// @brief Read a symlink's value.
+///
+/// @param path Input path.
+/// @param result Set to the value of the symbolic link \a path.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code read_symlink(const Twine &path, SmallVectorImpl<char> &result);
+
+/// @brief Get disk space usage information.
+///
+/// @param path Input path.
+/// @param result Set to the capacity, free, and available space on the device
+/// \a path is on.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code disk_space(const Twine &path, space_info &result);
+
+/// @brief Get file status as if by POSIX stat().
+///
+/// @param path Input path.
+/// @param result Set to the file status.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code status(const Twine &path, file_status &result);
+
+/// @brief Is status available?
+///
+/// @param path Input path.
+/// @results True if status() != status_error.
+bool status_known(file_status s);
+
+/// @brief Is status available?
+///
+/// @param path Input path.
+/// @param result Set to true if status() != status_error.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code status_known(const Twine &path, bool &result);
+
+/// @brief Get file status as if by POSIX lstat().
+///
+/// Does not resolve symlinks.
+///
+/// @param path Input path.
+/// @param result Set to the file status.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code symlink_status(const Twine &path, file_status &result);
+
+/// @brief Generate a unique path and open it as a file.
+///
+/// Generates a unique path suitable for a temporary file and then opens it as a
+/// file. The name is based on \a model with '%' replaced by a random char in
+/// [0-9a-f]. If \a model is not an absolute path, a suitable temporary
+/// directory will be prepended.
+///
+/// This is an atomic operation. Either the file is created and opened, or the
+/// file system is left untouched.
+///
+/// clang-%%-%%-%%-%%-%%.s => /tmp/clang-a0-b1-c2-d3-e4.s
+///
+/// @param model Name to base unique path off of.
+/// @param result_fs Set to the opened file's file descriptor.
+/// @param result_path Set to the opened file's absolute path.
+/// @results errc::success if result_{fd,path} have been successfully set,
+/// otherwise a platform specific error_code.
+error_code unique_file(const Twine &model, int &result_fd,
+ SmallVectorImpl<char> &result_path);
+
+/// @brief Canonicalize path.
+///
+/// Sets result to the file system's idea of what path is. The result is always
+/// absolute and has the same capitalization as the file system.
+///
+/// @param path Input path.
+/// @param result Set to the canonicalized version of \a path.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code canonicalize(const Twine &path, SmallVectorImpl<char> &result);
+
+/// @brief Are \a path's first bytes \a magic?
+///
+/// @param path Input path.
+/// @param magic Byte sequence to compare \a path's first len(magic) bytes to.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code has_magic(const Twine &path, const Twine &magic, bool &result);
+
+/// @brief Get \a path's first \a len bytes.
+///
+/// @param path Input path.
+/// @param len Number of magic bytes to get.
+/// @param result Set to the first \a len bytes in the file pointed to by
+/// \a path. Or the entire file if file_size(path) < len, in which
+/// case result.size() returns the size of the file.
+/// @results errc::success if result has been successfully set,
+/// errc::value_too_large if len is larger then the file pointed to by
+/// \a path, otherwise a platform specific error_code.
+error_code get_magic(const Twine &path, uint32_t len,
+ SmallVectorImpl<char> &result);
+
+/// @brief Get and identify \a path's type based on its content.
+///
+/// @param path Input path.
+/// @param result Set to the type of file, or LLVMFileType::Unknown_FileType.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code identify_magic(const Twine &path, LLVMFileType &result);
+
+/// @brief Is file bitcode?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a bitcode file, false if it is not,
+/// undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code is_bitcode(const Twine &path, bool &result);
+
+/// @brief Is file a dynamic library?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a dynamic library, false if it is
+/// not, undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code is_dynamic_library(const Twine &path, bool &result);
+
+/// @brief Is an object file?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is an object file, false if it is not,
+/// undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code is_object_file(const Twine &path, bool &result);
+
+/// @brief Can file be read?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is readable, false it it is not,
+/// undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code can_read(const Twine &path, bool &result);
+
+/// @brief Can file be written?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is writeable, false it it is not,
+/// undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code can_write(const Twine &path, bool &result);
+
+/// @brief Can file be executed?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is executable, false it it is not,
+/// undefined otherwise.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code can_execute(const Twine &path, bool &result);
+
+/// @brief Get library paths the system linker uses.
+///
+/// @param result Set to the list of system library paths.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code GetSystemLibraryPaths(SmallVectorImpl<std::string> &result);
+
+/// @brief Get bitcode library paths the system linker uses
+/// + LLVM_LIB_SEARCH_PATH + LLVM_LIBDIR.
+///
+/// @param result Set to the list of bitcode library paths.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code GetBitcodeLibraryPaths(SmallVectorImpl<std::string> &result);
+
+/// @brief Find a library.
+///
+/// Find the path to a library using its short name. Use the system
+/// dependent library paths to locate the library.
+///
+/// c => /usr/lib/libc.so
+///
+/// @param short_name Library name one would give to the system linker.
+/// @param result Set to the absolute path \a short_name represents.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code FindLibrary(const Twine &short_name, SmallVectorImpl<char> &result);
+
+/// @brief Get absolute path of main executable.
+///
+/// @param argv0 The program name as it was spelled on the command line.
+/// @param MainAddr Address of some symbol in the executable (not in a library).
+/// @param result Set to the absolute path of the current executable.
+/// @results errc::success if result has been successfully set, otherwise a
+/// platform specific error_code.
+error_code GetMainExecutable(const char *argv0, void *MainAddr,
+ SmallVectorImpl<char> &result);
+
+/// @}
+/// @name Iterators
+/// @{
+
+/// directory_entry - A single entry in a directory. Caches the status either
+/// from the result of the iteration syscall, or the first time status or
+/// symlink_status is called.
+class directory_entry {
+ std::string Path;
+ mutable file_status Status;
+ mutable file_status SymlinkStatus;
+
+public:
+ explicit directory_entry(const Twine &path, file_status st = file_status(),
+ file_status symlink_st = file_status())
+ : Path(path.str())
+ , Status(st)
+ , SymlinkStatus(symlink_st) {}
+
+ directory_entry() {}
+
+ void assign(const Twine &path, file_status st = file_status(),
+ file_status symlink_st = file_status()) {
+ Path = path.str();
+ Status = st;
+ SymlinkStatus = symlink_st;
+ }
+
+ void replace_filename(const Twine &filename, file_status st = file_status(),
+ file_status symlink_st = file_status());
+
+ StringRef path() const { return Path; }
+ error_code status(file_status &result) const;
+ error_code symlink_status(file_status &result) const;
+
+ bool operator==(const directory_entry& rhs) const { return Path == rhs.Path; }
+ bool operator!=(const directory_entry& rhs) const { return !(*this == rhs); }
+ bool operator< (const directory_entry& rhs) const;
+ bool operator<=(const directory_entry& rhs) const;
+ bool operator> (const directory_entry& rhs) const;
+ bool operator>=(const directory_entry& rhs) const;
+};
+
+/// directory_iterator - Iterates through the entries in path. There is no
+/// operator++ because we need an error_code. If it's really needed we can make
+/// it call report_fatal_error on error.
+class directory_iterator {
+ intptr_t IterationHandle;
+ directory_entry CurrentEntry;
+
+ // Platform implementations implement these functions to handle iteration.
+ friend error_code directory_iterator_construct(directory_iterator &it,
+ StringRef path);
+ friend error_code directory_iterator_increment(directory_iterator &it);
+ friend error_code directory_iterator_destruct(directory_iterator &it);
+
+public:
+ explicit directory_iterator(const Twine &path, error_code &ec)
+ : IterationHandle(0) {
+ SmallString<128> path_storage;
+ ec = directory_iterator_construct(*this, path.toStringRef(path_storage));
+ }
+
+ /// Construct end iterator.
+ directory_iterator() : IterationHandle(0) {}
+
+ ~directory_iterator() {
+ directory_iterator_destruct(*this);
+ }
+
+ // No operator++ because we need error_code.
+ directory_iterator &increment(error_code &ec) {
+ ec = directory_iterator_increment(*this);
+ return *this;
+ }
+
+ const directory_entry &operator*() const { return CurrentEntry; }
+ const directory_entry *operator->() const { return &CurrentEntry; }
+
+ bool operator!=(const directory_iterator &RHS) const {
+ return CurrentEntry != RHS.CurrentEntry;
+ }
+ // Other members as required by
+ // C++ Std, 24.1.1 Input iterators [input.iterators]
+};
+
+/// recursive_directory_iterator - Same as directory_iterator except for it
+/// recurses down into child directories.
+class recursive_directory_iterator {
+ uint16_t Level;
+ bool HasNoPushRequest;
+ // implementation directory iterator status
+
+public:
+ explicit recursive_directory_iterator(const Twine &path, error_code &ec);
+ // No operator++ because we need error_code.
+ directory_iterator &increment(error_code &ec);
+
+ const directory_entry &operator*() const;
+ const directory_entry *operator->() const;
+
+ // observers
+ /// Gets the current level. path is at level 0.
+ int level() const;
+ /// Returns true if no_push has been called for this directory_entry.
+ bool no_push_request() const;
+
+ // modifiers
+ /// Goes up one level if Level > 0.
+ void pop();
+ /// Does not go down into the current directory_entry.
+ void no_push();
+
+ // Other members as required by
+ // C++ Std, 24.1.1 Input iterators [input.iterators]
+};
+
+/// @}
+
+} // end namespace fs
+} // end namespace sys
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/FileUtilities.h b/contrib/llvm/include/llvm/Support/FileUtilities.h
index d0dd4a7..748ce7c 100644
--- a/contrib/llvm/include/llvm/Support/FileUtilities.h
+++ b/contrib/llvm/include/llvm/Support/FileUtilities.h
@@ -15,7 +15,7 @@
#ifndef LLVM_SUPPORT_FILEUTILITIES_H
#define LLVM_SUPPORT_FILEUTILITIES_H
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/Support/GraphWriter.h b/contrib/llvm/include/llvm/Support/GraphWriter.h
index 287c5ba..7573ef0 100644
--- a/contrib/llvm/include/llvm/Support/GraphWriter.h
+++ b/contrib/llvm/include/llvm/Support/GraphWriter.h
@@ -26,7 +26,7 @@
#include "llvm/Support/DOTGraphTraits.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/GraphTraits.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include <vector>
#include <cassert>
@@ -89,14 +89,28 @@ class GraphWriter {
public:
GraphWriter(raw_ostream &o, const GraphType &g, bool SN) : O(o), G(g) {
- DTraits = DOTTraits(SN);
-}
+ DTraits = DOTTraits(SN);
+ }
+
+ void writeGraph(const std::string &Title = "") {
+ // Output the header for the graph...
+ writeHeader(Title);
+
+ // Emit all of the nodes in the graph...
+ writeNodes();
+
+ // Output any customizations on the graph
+ DOTGraphTraits<GraphType>::addCustomGraphFeatures(G, *this);
- void writeHeader(const std::string &Name) {
+ // Output the end of the graph
+ writeFooter();
+ }
+
+ void writeHeader(const std::string &Title) {
std::string GraphName = DTraits.getGraphName(G);
- if (!Name.empty())
- O << "digraph \"" << DOT::EscapeString(Name) << "\" {\n";
+ if (!Title.empty())
+ O << "digraph \"" << DOT::EscapeString(Title) << "\" {\n";
else if (!GraphName.empty())
O << "digraph \"" << DOT::EscapeString(GraphName) << "\" {\n";
else
@@ -105,8 +119,8 @@ public:
if (DTraits.renderGraphFromBottomUp())
O << "\trankdir=\"BT\";\n";
- if (!Name.empty())
- O << "\tlabel=\"" << DOT::EscapeString(Name) << "\";\n";
+ if (!Title.empty())
+ O << "\tlabel=\"" << DOT::EscapeString(Title) << "\";\n";
else if (!GraphName.empty())
O << "\tlabel=\"" << DOT::EscapeString(GraphName) << "\";\n";
O << DTraits.getGraphProperties(G);
@@ -282,22 +296,13 @@ public:
template<typename GraphType>
raw_ostream &WriteGraph(raw_ostream &O, const GraphType &G,
bool ShortNames = false,
- const std::string &Name = "",
const std::string &Title = "") {
// Start the graph emission process...
GraphWriter<GraphType> W(O, G, ShortNames);
- // Output the header for the graph...
- W.writeHeader(Title);
-
- // Emit all of the nodes in the graph...
- W.writeNodes();
-
- // Output any customizations on the graph
- DOTGraphTraits<GraphType>::addCustomGraphFeatures(G, W);
+ // Emit the graph.
+ W.writeGraph(Title);
- // Output the end of the graph
- W.writeFooter();
return O;
}
@@ -322,7 +327,7 @@ sys::Path WriteGraph(const GraphType &G, const std::string &Name,
raw_fd_ostream O(Filename.c_str(), ErrorInfo);
if (ErrorInfo.empty()) {
- llvm::WriteGraph(O, G, ShortNames, Name, Title);
+ llvm::WriteGraph(O, G, ShortNames, Title);
errs() << " done. \n";
} else {
errs() << "error opening file '" << Filename.str() << "' for writing!\n";
diff --git a/contrib/llvm/include/llvm/System/Host.h b/contrib/llvm/include/llvm/Support/Host.h
index 4fbf5c1..f77d4c1 100644
--- a/contrib/llvm/include/llvm/System/Host.h
+++ b/contrib/llvm/include/llvm/Support/Host.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Host.h - Host machine characteristics --------*- C++ -*-===//
+//===- llvm/Support/Host.h - Host machine characteristics --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/include/llvm/Support/IRBuilder.h b/contrib/llvm/include/llvm/Support/IRBuilder.h
index c827cce..2394a59 100644
--- a/contrib/llvm/include/llvm/Support/IRBuilder.h
+++ b/contrib/llvm/include/llvm/Support/IRBuilder.h
@@ -46,32 +46,39 @@ protected:
BasicBlock::iterator InsertPt;
LLVMContext &Context;
public:
-
+
IRBuilderBase(LLVMContext &context)
: Context(context) {
ClearInsertionPoint();
}
-
+
//===--------------------------------------------------------------------===//
// Builder configuration methods
//===--------------------------------------------------------------------===//
-
+
/// ClearInsertionPoint - Clear the insertion point: created instructions will
/// not be inserted into a block.
void ClearInsertionPoint() {
BB = 0;
}
-
+
BasicBlock *GetInsertBlock() const { return BB; }
BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
LLVMContext &getContext() const { return Context; }
-
+
/// SetInsertPoint - This specifies that created instructions should be
/// appended to the end of the specified block.
void SetInsertPoint(BasicBlock *TheBB) {
BB = TheBB;
InsertPt = BB->end();
}
+
+ /// SetInsertPoint - This specifies that created instructions should be
+ /// inserted before the specified instruction.
+ void SetInsertPoint(Instruction *I) {
+ BB = I->getParent();
+ InsertPt = I;
+ }
/// SetInsertPoint - This specifies that created instructions should be
/// inserted at the specified point.
@@ -79,17 +86,17 @@ public:
BB = TheBB;
InsertPt = IP;
}
-
+
/// SetCurrentDebugLocation - Set location information used by debugging
/// information.
void SetCurrentDebugLocation(const DebugLoc &L) {
CurDbgLocation = L;
}
-
+
/// getCurrentDebugLocation - Get location information used by debugging
/// information.
const DebugLoc &getCurrentDebugLocation() const { return CurDbgLocation; }
-
+
/// SetInstDebugLocation - If this builder has a current debug location, set
/// it on the specified instruction.
void SetInstDebugLocation(Instruction *I) const {
@@ -142,7 +149,7 @@ public:
//===--------------------------------------------------------------------===//
// Miscellaneous creation methods.
//===--------------------------------------------------------------------===//
-
+
/// CreateGlobalString - Make a new global variable with an initializer that
/// has array of i8 type filled in with the nul terminated string value
/// specified. If Name is specified, it is the name of the global variable
@@ -178,65 +185,100 @@ public:
ConstantInt *getInt32(uint32_t C) {
return ConstantInt::get(getInt32Ty(), C);
}
-
+
/// getInt64 - Get a constant 64-bit value.
ConstantInt *getInt64(uint64_t C) {
return ConstantInt::get(getInt64Ty(), C);
}
-
+
//===--------------------------------------------------------------------===//
// Type creation methods
//===--------------------------------------------------------------------===//
-
+
/// getInt1Ty - Fetch the type representing a single bit
const IntegerType *getInt1Ty() {
return Type::getInt1Ty(Context);
}
-
+
/// getInt8Ty - Fetch the type representing an 8-bit integer.
const IntegerType *getInt8Ty() {
return Type::getInt8Ty(Context);
}
-
+
/// getInt16Ty - Fetch the type representing a 16-bit integer.
const IntegerType *getInt16Ty() {
return Type::getInt16Ty(Context);
}
-
+
/// getInt32Ty - Fetch the type resepresenting a 32-bit integer.
const IntegerType *getInt32Ty() {
return Type::getInt32Ty(Context);
}
-
+
/// getInt64Ty - Fetch the type representing a 64-bit integer.
const IntegerType *getInt64Ty() {
return Type::getInt64Ty(Context);
}
-
+
/// getFloatTy - Fetch the type representing a 32-bit floating point value.
const Type *getFloatTy() {
return Type::getFloatTy(Context);
}
-
+
/// getDoubleTy - Fetch the type representing a 64-bit floating point value.
const Type *getDoubleTy() {
return Type::getDoubleTy(Context);
}
-
+
/// getVoidTy - Fetch the type representing void.
const Type *getVoidTy() {
return Type::getVoidTy(Context);
}
-
- const PointerType *getInt8PtrTy() {
- return Type::getInt8PtrTy(Context);
+
+ const PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
+ return Type::getInt8PtrTy(Context, AddrSpace);
}
-
+
/// getCurrentFunctionReturnType - Get the return type of the current function
/// that we're emitting into.
const Type *getCurrentFunctionReturnType() const;
-};
+ /// CreateMemSet - Create and insert a memset to the specified pointer and the
+ /// specified value. If the pointer isn't an i8*, it will be converted. If a
+ /// TBAA tag is specified, it will be added to the instruction.
+ CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = 0) {
+ return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, TBAATag);
+ }
+
+ CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = 0);
+
+ /// CreateMemCpy - Create and insert a memcpy between the specified pointers.
+ /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
+ /// specified, it will be added to the instruction.
+ CallInst *CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = 0) {
+ return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag);
+ }
+
+ CallInst *CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = 0);
+
+ /// CreateMemMove - Create and insert a memmove between the specified
+ /// pointers. If the pointers aren't i8*, they will be converted. If a TBAA
+ /// tag is specified, it will be added to the instruction.
+ CallInst *CreateMemMove(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = 0) {
+ return CreateMemMove(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag);
+ }
+
+ CallInst *CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = 0);
+private:
+ Value *getCastedInt8PtrValue(Value *Ptr);
+};
+
/// IRBuilder - This provides a uniform API for creating instructions and
/// inserting them into a basic block: either at the end of a BasicBlock, or
/// at a specific iterator location in a block.
@@ -258,25 +300,30 @@ public:
IRBuilder(LLVMContext &C, const T &F, const Inserter &I = Inserter())
: IRBuilderBase(C), Inserter(I), Folder(F) {
}
-
+
explicit IRBuilder(LLVMContext &C) : IRBuilderBase(C), Folder(C) {
}
-
+
explicit IRBuilder(BasicBlock *TheBB, const T &F)
: IRBuilderBase(TheBB->getContext()), Folder(F) {
SetInsertPoint(TheBB);
}
-
+
explicit IRBuilder(BasicBlock *TheBB)
: IRBuilderBase(TheBB->getContext()), Folder(Context) {
SetInsertPoint(TheBB);
}
+
+ explicit IRBuilder(Instruction *IP)
+ : IRBuilderBase(IP->getContext()), Folder(Context) {
+ SetInsertPoint(IP);
+ }
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F)
: IRBuilderBase(TheBB->getContext()), Folder(F) {
SetInsertPoint(TheBB, IP);
}
-
+
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP)
: IRBuilderBase(TheBB->getContext()), Folder(Context) {
SetInsertPoint(TheBB, IP);
@@ -288,7 +335,7 @@ public:
/// isNamePreserving - Return true if this builder is configured to actually
/// add the requested names to IR created through it.
bool isNamePreserving() const { return preserveNames; }
-
+
/// Insert - Insert and return the specified instruction.
template<typename InstTy>
InstTy *Insert(InstTy *I, const Twine &Name = "") const {
@@ -298,6 +345,11 @@ public:
return I;
}
+ /// Insert - No-op overload to handle constants.
+ Constant *Insert(Constant *C, const Twine& = "") const {
+ return C;
+ }
+
//===--------------------------------------------------------------------===//
// Instruction creation methods: Terminators
//===--------------------------------------------------------------------===//
@@ -313,7 +365,7 @@ public:
ReturnInst *CreateRet(Value *V) {
return Insert(ReturnInst::Create(Context, V));
}
-
+
/// CreateAggregateRet - Create a sequence of N insertvalue instructions,
/// with one Value from the retVals array each, that build a aggregate
/// return value one value at a time, and a ret instruction to return
@@ -375,10 +427,12 @@ public:
Args+3), Name);
}
/// CreateInvoke - Create an invoke instruction.
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
- BasicBlock *UnwindDest, InputIterator ArgBegin,
- InputIterator ArgEnd, const Twine &Name = "") {
+ BasicBlock *UnwindDest,
+ RandomAccessIterator ArgBegin,
+ RandomAccessIterator ArgEnd,
+ const Twine &Name = "") {
return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest,
ArgBegin, ArgEnd), Name);
}
@@ -394,177 +448,179 @@ public:
//===--------------------------------------------------------------------===//
// Instruction creation methods: Binary Operators
//===--------------------------------------------------------------------===//
-
- Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
+private:
+ BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
+ Value *LHS, Value *RHS,
+ const Twine &Name,
+ bool HasNUW, bool HasNSW) {
+ BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+public:
+ Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateAdd(LC, RC);
- return Insert(BinaryOperator::CreateAdd(LHS, RHS), Name);
+ return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
+ return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
+ HasNUW, HasNSW);
}
Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
- if (Constant *LC = dyn_cast<Constant>(LHS))
- if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateNSWAdd(LC, RC);
- return Insert(BinaryOperator::CreateNSWAdd(LHS, RHS), Name);
+ return CreateAdd(LHS, RHS, Name, false, true);
}
Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
- if (Constant *LC = dyn_cast<Constant>(LHS))
- if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateNUWAdd(LC, RC);
- return Insert(BinaryOperator::CreateNUWAdd(LHS, RHS), Name);
+ return CreateAdd(LHS, RHS, Name, true, false);
}
Value *CreateFAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateFAdd(LC, RC);
+ return Insert(Folder.CreateFAdd(LC, RC), Name);
return Insert(BinaryOperator::CreateFAdd(LHS, RHS), Name);
}
- Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "") {
+ Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateSub(LC, RC);
- return Insert(BinaryOperator::CreateSub(LHS, RHS), Name);
+ return Insert(Folder.CreateSub(LC, RC), Name);
+ return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
+ HasNUW, HasNSW);
}
Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
- if (Constant *LC = dyn_cast<Constant>(LHS))
- if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateNSWSub(LC, RC);
- return Insert(BinaryOperator::CreateNSWSub(LHS, RHS), Name);
+ return CreateSub(LHS, RHS, Name, false, true);
}
Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
- if (Constant *LC = dyn_cast<Constant>(LHS))
- if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateNUWSub(LC, RC);
- return Insert(BinaryOperator::CreateNUWSub(LHS, RHS), Name);
+ return CreateSub(LHS, RHS, Name, true, false);
}
Value *CreateFSub(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateFSub(LC, RC);
+ return Insert(Folder.CreateFSub(LC, RC), Name);
return Insert(BinaryOperator::CreateFSub(LHS, RHS), Name);
}
- Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "") {
+ Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateMul(LC, RC);
- return Insert(BinaryOperator::CreateMul(LHS, RHS), Name);
+ return Insert(Folder.CreateMul(LC, RC), Name);
+ return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
+ HasNUW, HasNSW);
}
Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
- if (Constant *LC = dyn_cast<Constant>(LHS))
- if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateNSWMul(LC, RC);
- return Insert(BinaryOperator::CreateNSWMul(LHS, RHS), Name);
+ return CreateMul(LHS, RHS, Name, false, true);
}
Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
- if (Constant *LC = dyn_cast<Constant>(LHS))
- if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateNUWMul(LC, RC);
- return Insert(BinaryOperator::CreateNUWMul(LHS, RHS), Name);
+ return CreateMul(LHS, RHS, Name, true, false);
}
Value *CreateFMul(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateFMul(LC, RC);
+ return Insert(Folder.CreateFMul(LC, RC), Name);
return Insert(BinaryOperator::CreateFMul(LHS, RHS), Name);
}
- Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
+ Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool isExact = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateUDiv(LC, RC);
- return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
+ return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
+ if (!isExact)
+ return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
+ return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
}
- Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
- if (Constant *LC = dyn_cast<Constant>(LHS))
- if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateSDiv(LC, RC);
- return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
+ Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateUDiv(LHS, RHS, Name, true);
}
- Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
+ Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool isExact = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateExactSDiv(LC, RC);
+ return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
+ if (!isExact)
+ return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
}
+ Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateSDiv(LHS, RHS, Name, true);
+ }
Value *CreateFDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateFDiv(LC, RC);
+ return Insert(Folder.CreateFDiv(LC, RC), Name);
return Insert(BinaryOperator::CreateFDiv(LHS, RHS), Name);
}
Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateURem(LC, RC);
+ return Insert(Folder.CreateURem(LC, RC), Name);
return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
}
Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateSRem(LC, RC);
+ return Insert(Folder.CreateSRem(LC, RC), Name);
return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
}
Value *CreateFRem(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateFRem(LC, RC);
+ return Insert(Folder.CreateFRem(LC, RC), Name);
return Insert(BinaryOperator::CreateFRem(LHS, RHS), Name);
}
- Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "") {
+ Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateShl(LC, RC);
- return Insert(BinaryOperator::CreateShl(LHS, RHS), Name);
+ return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
+ return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
+ HasNUW, HasNSW);
}
- Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateShl(LC, RHSC);
- return Insert(BinaryOperator::CreateShl(LHS, RHSC), Name);
+ Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
+ return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
+ HasNUW, HasNSW);
}
- Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateShl(LC, RHSC);
- return Insert(BinaryOperator::CreateShl(LHS, RHSC), Name);
+ Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
+ return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
+ HasNUW, HasNSW);
}
- Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "") {
+ Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool isExact = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateLShr(LC, RC);
- return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
+ return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
+ if (!isExact)
+ return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
+ return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
}
- Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateLShr(LC, RHSC);
- return Insert(BinaryOperator::CreateLShr(LHS, RHSC), Name);
+ Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
+ bool isExact = false) {
+ return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
}
- Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateLShr(LC, RHSC);
- return Insert(BinaryOperator::CreateLShr(LHS, RHSC), Name);
+ Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
+ bool isExact = false) {
+ return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
}
- Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "") {
+ Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool isExact = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateAShr(LC, RC);
- return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
+ return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
+ if (!isExact)
+ return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
+ return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
}
- Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateAShr(LC, RHSC);
- return Insert(BinaryOperator::CreateAShr(LHS, RHSC), Name);
+ Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
+ bool isExact = false) {
+ return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
}
- Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateAShr(LC, RHSC);
- return Insert(BinaryOperator::CreateAShr(LHS, RHSC), Name);
+ Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
+ bool isExact = false) {
+ return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
}
Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
@@ -572,21 +628,15 @@ public:
if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isAllOnesValue())
return LHS; // LHS & -1 -> LHS
if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateAnd(LC, RC);
+ return Insert(Folder.CreateAnd(LC, RC), Name);
}
return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
}
Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateAnd(LC, RHSC);
- return Insert(BinaryOperator::CreateAnd(LHS, RHSC), Name);
+ return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
}
Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateAnd(LC, RHSC);
- return Insert(BinaryOperator::CreateAnd(LHS, RHSC), Name);
+ return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
}
Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
@@ -594,73 +644,61 @@ public:
if (RC->isNullValue())
return LHS; // LHS | 0 -> LHS
if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateOr(LC, RC);
+ return Insert(Folder.CreateOr(LC, RC), Name);
}
return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
}
Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateOr(LC, RHSC);
- return Insert(BinaryOperator::CreateOr(LHS, RHSC), Name);
+ return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
}
Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateOr(LC, RHSC);
- return Insert(BinaryOperator::CreateOr(LHS, RHSC), Name);
+ return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
}
Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateXor(LC, RC);
+ return Insert(Folder.CreateXor(LC, RC), Name);
return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
}
Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateXor(LC, RHSC);
- return Insert(BinaryOperator::CreateXor(LHS, RHSC), Name);
+ return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
}
Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
- Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
- if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateXor(LC, RHSC);
- return Insert(BinaryOperator::CreateXor(LHS, RHSC), Name);
+ return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
}
Value *CreateBinOp(Instruction::BinaryOps Opc,
Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateBinOp(Opc, LC, RC);
+ return Insert(Folder.CreateBinOp(Opc, LC, RC), Name);
return Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
}
- Value *CreateNeg(Value *V, const Twine &Name = "") {
+ Value *CreateNeg(Value *V, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateNeg(VC);
- return Insert(BinaryOperator::CreateNeg(V), Name);
+ return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
+ BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
}
Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
- if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateNSWNeg(VC);
- return Insert(BinaryOperator::CreateNSWNeg(V), Name);
+ return CreateNeg(V, Name, false, true);
}
Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
- if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateNUWNeg(VC);
- return Insert(BinaryOperator::CreateNUWNeg(V), Name);
+ return CreateNeg(V, Name, true, false);
}
Value *CreateFNeg(Value *V, const Twine &Name = "") {
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateFNeg(VC);
+ return Insert(Folder.CreateFNeg(VC), Name);
return Insert(BinaryOperator::CreateFNeg(V), Name);
}
Value *CreateNot(Value *V, const Twine &Name = "") {
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateNot(VC);
+ return Insert(Folder.CreateNot(VC), Name);
return Insert(BinaryOperator::CreateNot(V), Name);
}
@@ -686,33 +724,39 @@ public:
StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
return Insert(new StoreInst(Val, Ptr, isVolatile));
}
- template<typename InputIterator>
- Value *CreateGEP(Value *Ptr, InputIterator IdxBegin, InputIterator IdxEnd,
+ template<typename RandomAccessIterator>
+ Value *CreateGEP(Value *Ptr,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &Name = "") {
if (Constant *PC = dyn_cast<Constant>(Ptr)) {
// Every index must be constant.
- InputIterator i;
+ RandomAccessIterator i;
for (i = IdxBegin; i < IdxEnd; ++i)
if (!isa<Constant>(*i))
break;
if (i == IdxEnd)
- return Folder.CreateGetElementPtr(PC, &IdxBegin[0], IdxEnd - IdxBegin);
+ return Insert(Folder.CreateGetElementPtr(PC, &IdxBegin[0],
+ IdxEnd - IdxBegin),
+ Name);
}
return Insert(GetElementPtrInst::Create(Ptr, IdxBegin, IdxEnd), Name);
}
- template<typename InputIterator>
- Value *CreateInBoundsGEP(Value *Ptr, InputIterator IdxBegin,
- InputIterator IdxEnd, const Twine &Name = "") {
+ template<typename RandomAccessIterator>
+ Value *CreateInBoundsGEP(Value *Ptr, RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
+ const Twine &Name = "") {
if (Constant *PC = dyn_cast<Constant>(Ptr)) {
// Every index must be constant.
- InputIterator i;
+ RandomAccessIterator i;
for (i = IdxBegin; i < IdxEnd; ++i)
if (!isa<Constant>(*i))
break;
if (i == IdxEnd)
- return Folder.CreateInBoundsGetElementPtr(PC,
- &IdxBegin[0],
- IdxEnd - IdxBegin);
+ return Insert(Folder.CreateInBoundsGetElementPtr(PC,
+ &IdxBegin[0],
+ IdxEnd - IdxBegin),
+ Name);
}
return Insert(GetElementPtrInst::CreateInBounds(Ptr, IdxBegin, IdxEnd),
Name);
@@ -720,33 +764,33 @@ public:
Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
if (Constant *PC = dyn_cast<Constant>(Ptr))
if (Constant *IC = dyn_cast<Constant>(Idx))
- return Folder.CreateGetElementPtr(PC, &IC, 1);
+ return Insert(Folder.CreateGetElementPtr(PC, &IC, 1), Name);
return Insert(GetElementPtrInst::Create(Ptr, Idx), Name);
}
Value *CreateInBoundsGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
if (Constant *PC = dyn_cast<Constant>(Ptr))
if (Constant *IC = dyn_cast<Constant>(Idx))
- return Folder.CreateInBoundsGetElementPtr(PC, &IC, 1);
+ return Insert(Folder.CreateInBoundsGetElementPtr(PC, &IC, 1), Name);
return Insert(GetElementPtrInst::CreateInBounds(Ptr, Idx), Name);
}
Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
if (Constant *PC = dyn_cast<Constant>(Ptr))
- return Folder.CreateGetElementPtr(PC, &Idx, 1);
+ return Insert(Folder.CreateGetElementPtr(PC, &Idx, 1), Name);
- return Insert(GetElementPtrInst::Create(Ptr, &Idx, &Idx+1), Name);
+ return Insert(GetElementPtrInst::Create(Ptr, &Idx, &Idx+1), Name);
}
Value *CreateConstInBoundsGEP1_32(Value *Ptr, unsigned Idx0,
const Twine &Name = "") {
Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
if (Constant *PC = dyn_cast<Constant>(Ptr))
- return Folder.CreateInBoundsGetElementPtr(PC, &Idx, 1);
+ return Insert(Folder.CreateInBoundsGetElementPtr(PC, &Idx, 1), Name);
return Insert(GetElementPtrInst::CreateInBounds(Ptr, &Idx, &Idx+1), Name);
}
- Value *CreateConstGEP2_32(Value *Ptr, unsigned Idx0, unsigned Idx1,
+ Value *CreateConstGEP2_32(Value *Ptr, unsigned Idx0, unsigned Idx1,
const Twine &Name = "") {
Value *Idxs[] = {
ConstantInt::get(Type::getInt32Ty(Context), Idx0),
@@ -754,9 +798,9 @@ public:
};
if (Constant *PC = dyn_cast<Constant>(Ptr))
- return Folder.CreateGetElementPtr(PC, Idxs, 2);
+ return Insert(Folder.CreateGetElementPtr(PC, Idxs, 2), Name);
- return Insert(GetElementPtrInst::Create(Ptr, Idxs, Idxs+2), Name);
+ return Insert(GetElementPtrInst::Create(Ptr, Idxs, Idxs+2), Name);
}
Value *CreateConstInBoundsGEP2_32(Value *Ptr, unsigned Idx0, unsigned Idx1,
const Twine &Name = "") {
@@ -766,7 +810,7 @@ public:
};
if (Constant *PC = dyn_cast<Constant>(Ptr))
- return Folder.CreateInBoundsGetElementPtr(PC, Idxs, 2);
+ return Insert(Folder.CreateInBoundsGetElementPtr(PC, Idxs, 2), Name);
return Insert(GetElementPtrInst::CreateInBounds(Ptr, Idxs, Idxs+2), Name);
}
@@ -774,16 +818,16 @@ public:
Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
if (Constant *PC = dyn_cast<Constant>(Ptr))
- return Folder.CreateGetElementPtr(PC, &Idx, 1);
+ return Insert(Folder.CreateGetElementPtr(PC, &Idx, 1), Name);
- return Insert(GetElementPtrInst::Create(Ptr, &Idx, &Idx+1), Name);
+ return Insert(GetElementPtrInst::Create(Ptr, &Idx, &Idx+1), Name);
}
Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
const Twine &Name = "") {
Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
if (Constant *PC = dyn_cast<Constant>(Ptr))
- return Folder.CreateInBoundsGetElementPtr(PC, &Idx, 1);
+ return Insert(Folder.CreateInBoundsGetElementPtr(PC, &Idx, 1), Name);
return Insert(GetElementPtrInst::CreateInBounds(Ptr, &Idx, &Idx+1), Name);
}
@@ -795,9 +839,9 @@ public:
};
if (Constant *PC = dyn_cast<Constant>(Ptr))
- return Folder.CreateGetElementPtr(PC, Idxs, 2);
+ return Insert(Folder.CreateGetElementPtr(PC, Idxs, 2), Name);
- return Insert(GetElementPtrInst::Create(Ptr, Idxs, Idxs+2), Name);
+ return Insert(GetElementPtrInst::Create(Ptr, Idxs, Idxs+2), Name);
}
Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
const Twine &Name = "") {
@@ -807,14 +851,14 @@ public:
};
if (Constant *PC = dyn_cast<Constant>(Ptr))
- return Folder.CreateInBoundsGetElementPtr(PC, Idxs, 2);
+ return Insert(Folder.CreateInBoundsGetElementPtr(PC, Idxs, 2), Name);
return Insert(GetElementPtrInst::CreateInBounds(Ptr, Idxs, Idxs+2), Name);
}
Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") {
return CreateConstInBoundsGEP2_32(Ptr, 0, Idx, Name);
}
-
+
/// CreateGlobalStringPtr - Same as CreateGlobalString, but return a pointer
/// with "i8*" type instead of a pointer to array of i8.
Value *CreateGlobalStringPtr(const char *Str = "", const Twine &Name = "") {
@@ -823,7 +867,7 @@ public:
Value *Args[] = { zero, zero };
return CreateInBoundsGEP(gv, Args, Args+2, Name);
}
-
+
//===--------------------------------------------------------------------===//
// Instruction creation methods: Cast/Conversion Operators
//===--------------------------------------------------------------------===//
@@ -873,7 +917,7 @@ public:
if (V->getType() == DestTy)
return V;
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateZExtOrBitCast(VC, DestTy);
+ return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
}
Value *CreateSExtOrBitCast(Value *V, const Type *DestTy,
@@ -881,7 +925,7 @@ public:
if (V->getType() == DestTy)
return V;
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateSExtOrBitCast(VC, DestTy);
+ return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
}
Value *CreateTruncOrBitCast(Value *V, const Type *DestTy,
@@ -889,7 +933,7 @@ public:
if (V->getType() == DestTy)
return V;
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateTruncOrBitCast(VC, DestTy);
+ return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
}
Value *CreateCast(Instruction::CastOps Op, Value *V, const Type *DestTy,
@@ -897,7 +941,7 @@ public:
if (V->getType() == DestTy)
return V;
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateCast(Op, VC, DestTy);
+ return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
return Insert(CastInst::Create(Op, V, DestTy), Name);
}
Value *CreatePointerCast(Value *V, const Type *DestTy,
@@ -905,7 +949,7 @@ public:
if (V->getType() == DestTy)
return V;
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreatePointerCast(VC, DestTy);
+ return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
}
Value *CreateIntCast(Value *V, const Type *DestTy, bool isSigned,
@@ -913,7 +957,7 @@ public:
if (V->getType() == DestTy)
return V;
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateIntCast(VC, DestTy, isSigned);
+ return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
}
private:
@@ -925,7 +969,7 @@ public:
if (V->getType() == DestTy)
return V;
if (Constant *VC = dyn_cast<Constant>(V))
- return Folder.CreateFPCast(VC, DestTy);
+ return Insert(Folder.CreateFPCast(VC, DestTy), Name);
return Insert(CastInst::CreateFPCast(V, DestTy), Name);
}
@@ -1011,14 +1055,14 @@ public:
const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateICmp(P, LC, RC);
+ return Insert(Folder.CreateICmp(P, LC, RC), Name);
return Insert(new ICmpInst(P, LHS, RHS), Name);
}
Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Folder.CreateFCmp(P, LC, RC);
+ return Insert(Folder.CreateFCmp(P, LC, RC), Name);
return Insert(new FCmpInst(P, LHS, RHS), Name);
}
@@ -1057,9 +1101,9 @@ public:
return Insert(CallInst::Create(Callee, Args, Args+5), Name);
}
- template<typename InputIterator>
- CallInst *CreateCall(Value *Callee, InputIterator ArgBegin,
- InputIterator ArgEnd, const Twine &Name = "") {
+ template<typename RandomAccessIterator>
+ CallInst *CreateCall(Value *Callee, RandomAccessIterator ArgBegin,
+ RandomAccessIterator ArgEnd, const Twine &Name = "") {
return Insert(CallInst::Create(Callee, ArgBegin, ArgEnd), Name);
}
@@ -1068,7 +1112,7 @@ public:
if (Constant *CC = dyn_cast<Constant>(C))
if (Constant *TC = dyn_cast<Constant>(True))
if (Constant *FC = dyn_cast<Constant>(False))
- return Folder.CreateSelect(CC, TC, FC);
+ return Insert(Folder.CreateSelect(CC, TC, FC), Name);
return Insert(SelectInst::Create(C, True, False), Name);
}
@@ -1080,7 +1124,7 @@ public:
const Twine &Name = "") {
if (Constant *VC = dyn_cast<Constant>(Vec))
if (Constant *IC = dyn_cast<Constant>(Idx))
- return Folder.CreateExtractElement(VC, IC);
+ return Insert(Folder.CreateExtractElement(VC, IC), Name);
return Insert(ExtractElementInst::Create(Vec, Idx), Name);
}
@@ -1089,7 +1133,7 @@ public:
if (Constant *VC = dyn_cast<Constant>(Vec))
if (Constant *NC = dyn_cast<Constant>(NewElt))
if (Constant *IC = dyn_cast<Constant>(Idx))
- return Folder.CreateInsertElement(VC, NC, IC);
+ return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
}
@@ -1098,24 +1142,25 @@ public:
if (Constant *V1C = dyn_cast<Constant>(V1))
if (Constant *V2C = dyn_cast<Constant>(V2))
if (Constant *MC = dyn_cast<Constant>(Mask))
- return Folder.CreateShuffleVector(V1C, V2C, MC);
+ return Insert(Folder.CreateShuffleVector(V1C, V2C, MC), Name);
return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
}
Value *CreateExtractValue(Value *Agg, unsigned Idx,
const Twine &Name = "") {
if (Constant *AggC = dyn_cast<Constant>(Agg))
- return Folder.CreateExtractValue(AggC, &Idx, 1);
+ return Insert(Folder.CreateExtractValue(AggC, &Idx, 1), Name);
return Insert(ExtractValueInst::Create(Agg, Idx), Name);
}
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
Value *CreateExtractValue(Value *Agg,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &Name = "") {
if (Constant *AggC = dyn_cast<Constant>(Agg))
- return Folder.CreateExtractValue(AggC, IdxBegin, IdxEnd - IdxBegin);
+ return Insert(Folder.CreateExtractValue(AggC, IdxBegin, IdxEnd-IdxBegin),
+ Name);
return Insert(ExtractValueInst::Create(Agg, IdxBegin, IdxEnd), Name);
}
@@ -1123,18 +1168,20 @@ public:
const Twine &Name = "") {
if (Constant *AggC = dyn_cast<Constant>(Agg))
if (Constant *ValC = dyn_cast<Constant>(Val))
- return Folder.CreateInsertValue(AggC, ValC, &Idx, 1);
+ return Insert(Folder.CreateInsertValue(AggC, ValC, &Idx, 1), Name);
return Insert(InsertValueInst::Create(Agg, Val, Idx), Name);
}
- template<typename InputIterator>
+ template<typename RandomAccessIterator>
Value *CreateInsertValue(Value *Agg, Value *Val,
- InputIterator IdxBegin,
- InputIterator IdxEnd,
+ RandomAccessIterator IdxBegin,
+ RandomAccessIterator IdxEnd,
const Twine &Name = "") {
if (Constant *AggC = dyn_cast<Constant>(Agg))
if (Constant *ValC = dyn_cast<Constant>(Val))
- return Folder.CreateInsertValue(AggC, ValC, IdxBegin, IdxEnd-IdxBegin);
+ return Insert(Folder.CreateInsertValue(AggC, ValC, IdxBegin,
+ IdxEnd - IdxBegin),
+ Name);
return Insert(InsertValueInst::Create(Agg, Val, IdxBegin, IdxEnd), Name);
}
diff --git a/contrib/llvm/include/llvm/Support/IRReader.h b/contrib/llvm/include/llvm/Support/IRReader.h
index a44da52..292c001 100644
--- a/contrib/llvm/include/llvm/Support/IRReader.h
+++ b/contrib/llvm/include/llvm/Support/IRReader.h
@@ -19,10 +19,12 @@
#ifndef LLVM_SUPPORT_IRREADER_H
#define LLVM_SUPPORT_IRREADER_H
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/Assembly/Parser.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/system_error.h"
namespace llvm {
@@ -56,15 +58,14 @@ namespace llvm {
inline Module *getLazyIRFileModule(const std::string &Filename,
SMDiagnostic &Err,
LLVMContext &Context) {
- std::string ErrMsg;
- MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrMsg);
- if (F == 0) {
- Err = SMDiagnostic(Filename,
- "Could not open input file: " + ErrMsg);
+ OwningPtr<MemoryBuffer> File;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), File)) {
+ Err = SMDiagnostic(Filename,
+ "Could not open input file: " + ec.message());
return 0;
}
- return getLazyIRModule(F, Err, Context);
+ return getLazyIRModule(File.take(), Err, Context);
}
/// If the given MemoryBuffer holds a bitcode image, return a Module
@@ -94,15 +95,14 @@ namespace llvm {
inline Module *ParseIRFile(const std::string &Filename,
SMDiagnostic &Err,
LLVMContext &Context) {
- std::string ErrMsg;
- MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrMsg);
- if (F == 0) {
- Err = SMDiagnostic(Filename,
- "Could not open input file: " + ErrMsg);
+ OwningPtr<MemoryBuffer> File;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), File)) {
+ Err = SMDiagnostic(Filename,
+ "Could not open input file: " + ec.message());
return 0;
}
- return ParseIR(F, Err, Context);
+ return ParseIR(File.take(), Err, Context);
}
}
diff --git a/contrib/llvm/include/llvm/System/IncludeFile.h b/contrib/llvm/include/llvm/Support/IncludeFile.h
index 3268ea2..a931972 100644
--- a/contrib/llvm/include/llvm/System/IncludeFile.h
+++ b/contrib/llvm/include/llvm/Support/IncludeFile.h
@@ -1,4 +1,4 @@
-//===- llvm/System/IncludeFile.h - Ensure Linking Of Library ---*- C++ -*-===//
+//===- llvm/Support/IncludeFile.h - Ensure Linking Of Library ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,14 +16,14 @@
#define LLVM_SYSTEM_INCLUDEFILE_H
/// This macro is the public interface that IncludeFile.h exports. This gives
-/// us the option to implement the "link the definition" capability in any
+/// us the option to implement the "link the definition" capability in any
/// manner that we choose. All header files that depend on a specific .cpp
/// file being linked at run time should use this macro instead of the
-/// IncludeFile class directly.
-///
+/// IncludeFile class directly.
+///
/// For example, foo.h would use:<br/>
/// <tt>FORCE_DEFINING_FILE_TO_BE_LINKED(foo)</tt><br/>
-///
+///
/// And, foo.cp would use:<br/>
/// <tt>DEFINING_FILE_FOR(foo)</tt><br/>
#ifdef __GNUC__
@@ -34,14 +34,14 @@
extern const char name ## LinkVar; \
__attribute__((used)) static const char *const name ## LinkObj = \
&name ## LinkVar; \
- }
+ }
#else
// Otherwise use a constructor call.
#define FORCE_DEFINING_FILE_TO_BE_LINKED(name) \
namespace llvm { \
extern const char name ## LinkVar; \
static const IncludeFile name ## LinkObj ( &name ## LinkVar ); \
- }
+ }
#endif
/// This macro is the counterpart to FORCE_DEFINING_FILE_TO_BE_LINKED. It should
@@ -53,9 +53,9 @@
namespace llvm {
/// This class is used in the implementation of FORCE_DEFINING_FILE_TO_BE_LINKED
-/// macro to make sure that the implementation of a header file is included
-/// into a tool that uses the header. This is solely
-/// to overcome problems linking .a files and not getting the implementation
+/// macro to make sure that the implementation of a header file is included
+/// into a tool that uses the header. This is solely
+/// to overcome problems linking .a files and not getting the implementation
/// of compilation units we need. This is commonly an issue with the various
/// Passes but also occurs elsewhere in LLVM. We like to use .a files because
/// they link faster and provide the smallest executables. However, sometimes
diff --git a/contrib/llvm/include/llvm/Support/MachO.h b/contrib/llvm/include/llvm/Support/MachO.h
index 4c13177..6841a0f 100644
--- a/contrib/llvm/include/llvm/Support/MachO.h
+++ b/contrib/llvm/include/llvm/Support/MachO.h
@@ -14,7 +14,7 @@
#ifndef LLVM_SUPPORT_MACHO_H
#define LLVM_SUPPORT_MACHO_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
// NOTE: The enums in this file are intentially named to be different than those
// in the headers in /usr/include/mach (on darwin systems) to avoid conflicts
@@ -23,7 +23,7 @@ namespace llvm {
namespace MachO {
// Enums from <mach-o/loader.h>
enum {
- // Constants for the "magic" field in llvm::MachO::mach_header and
+ // Constants for the "magic" field in llvm::MachO::mach_header and
// llvm::MachO::mach_header_64
HeaderMagic32 = 0xFEEDFACEu, // MH_MAGIC
HeaderMagic32Swapped = 0xCEFAEDFEu, // MH_CIGAM
@@ -71,7 +71,7 @@ namespace llvm {
HeaderFlagBitNoReexportedDylibs = 0x00100000u, // MH_NO_REEXPORTED_DYLIBS
HeaderFlagBitPIE = 0x00200000u, // MH_PIE
HeaderFlagBitDeadStrippableDylib = 0x00400000u, // MH_DEAD_STRIPPABLE_DYLIB
-
+
// Constants for the "cmd" field in llvm::MachO::load_command
LoadCommandDynamicLinkerRequired = 0x80000000u, // LC_REQ_DYLD
LoadCommandSegment32 = 0x00000001u, // LC_SEGMENT
@@ -110,7 +110,7 @@ namespace llvm {
LoadCommandDynamicLinkerInfo = 0x00000022u, // LC_DYLD_INFO
LoadCommandDynamicLinkerInfoOnly = 0x80000022u, // LC_DYLD_INFO_ONLY
LoadCommandDylibLoadUpward = 0x80000023u, // LC_LOAD_UPWARD_DYLIB
-
+
// Constant bits for the "flags" field in llvm::MachO::segment_command
SegmentCommandFlagBitHighVM = 0x1u, // SG_HIGHVM
SegmentCommandFlagBitFixedVMLibrary = 0x2u, // SG_FVMLIB
@@ -243,20 +243,20 @@ namespace llvm {
StabFunction = 0x24u, // N_FUN
StabStaticSymbol = 0x26u, // N_STSYM
StabLocalCommon = 0x28u, // N_LCSYM
- StabBeginSymbol = 0x2Eu, // N_BNSYM
+ StabBeginSymbol = 0x2Eu, // N_BNSYM
StabSourceFileOptions = 0x3Cu, // N_OPT
StabRegisterSymbol = 0x40u, // N_RSYM
StabSourceLine = 0x44u, // N_SLINE
- StabEndSymbol = 0x4Eu, // N_ENSYM
+ StabEndSymbol = 0x4Eu, // N_ENSYM
StabStructureType = 0x60u, // N_SSYM
StabSourceFileName = 0x64u, // N_SO
StabObjectFileName = 0x66u, // N_OSO
StabLocalSymbol = 0x80u, // N_LSYM
StabBeginIncludeFileName = 0x82u, // N_BINCL
StabIncludeFileName = 0x84u, // N_SOL
- StabCompilerParameters = 0x86u, // N_PARAMS
- StabCompilerVersion = 0x88u, // N_VERSION
- StabCompilerOptLevel = 0x8Au, // N_OLEVEL
+ StabCompilerParameters = 0x86u, // N_PARAMS
+ StabCompilerVersion = 0x88u, // N_VERSION
+ StabCompilerOptLevel = 0x8Au, // N_OLEVEL
StabParameter = 0xA0u, // N_PSYM
StabEndIncludeFile = 0xA2u, // N_EINCL
StabAlternateEntry = 0xA4u, // N_ENTRY
@@ -269,9 +269,9 @@ namespace llvm {
StabLength = 0xFEu // N_LENG
};
-
+
// Structs from <mach-o/loader.h>
-
+
struct mach_header {
uint32_t magic;
uint32_t cputype;
@@ -636,12 +636,12 @@ namespace llvm {
};
// Get/Set functions from <mach-o/nlist.h>
-
+
static inline uint16_t GET_LIBRARY_ORDINAL(uint16_t n_desc)
{
return (((n_desc) >> 8u) & 0xffu);
}
-
+
static inline void SET_LIBRARY_ORDINAL(uint16_t &n_desc, uint8_t ordinal)
{
n_desc = (((n_desc) & 0x00ff) | (((ordinal) & 0xff) << 8));
@@ -651,7 +651,7 @@ namespace llvm {
{
return (n_desc >> 8u) & 0x0fu;
}
-
+
static inline void SET_COMM_ALIGN (uint16_t &n_desc, uint8_t align)
{
n_desc = ((n_desc & 0xf0ffu) | ((align & 0x0fu) << 8u));
@@ -662,7 +662,7 @@ namespace llvm {
// Capability bits used in the definition of cpu_type.
CPUArchMask = 0xff000000, // Mask for architecture bits
CPUArchABI64 = 0x01000000, // 64 bit ABI
-
+
// Constants for the cputype field.
CPUTypeI386 = 7,
CPUTypeX86_64 = CPUTypeI386 | CPUArchABI64,
@@ -673,19 +673,21 @@ namespace llvm {
// Constants for the cpusubtype field.
-
+
// X86
CPUSubType_I386_ALL = 3,
CPUSubType_X86_64_ALL = 3,
-
+
// ARM
CPUSubType_ARM_ALL = 0,
CPUSubType_ARM_V4T = 5,
+ CPUSubType_ARM_V5 = 7,
CPUSubType_ARM_V6 = 6,
+ CPUSubType_ARM_V7 = 9,
// PowerPC
CPUSubType_POWERPC_ALL = 0,
-
+
CPUSubType_SPARC_ALL = 0
};
} // end namespace MachO
diff --git a/contrib/llvm/include/llvm/Support/ManagedStatic.h b/contrib/llvm/include/llvm/Support/ManagedStatic.h
index b8e2235..53e73ad 100644
--- a/contrib/llvm/include/llvm/Support/ManagedStatic.h
+++ b/contrib/llvm/include/llvm/Support/ManagedStatic.h
@@ -14,8 +14,8 @@
#ifndef LLVM_SUPPORT_MANAGED_STATIC_H
#define LLVM_SUPPORT_MANAGED_STATIC_H
-#include "llvm/System/Atomic.h"
-#include "llvm/System/Threading.h"
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Threading.h"
namespace llvm {
@@ -91,12 +91,6 @@ public:
}
};
-template<void (*CleanupFn)(void*)>
-class ManagedCleanup : public ManagedStaticBase {
-public:
- void Register() { RegisterManagedStatic(0, CleanupFn); }
-};
-
/// llvm_shutdown - Deallocate and destroy all ManagedStatic variables.
void llvm_shutdown();
diff --git a/contrib/llvm/include/llvm/Support/MathExtras.h b/contrib/llvm/include/llvm/Support/MathExtras.h
index 982813f..4627557 100644
--- a/contrib/llvm/include/llvm/Support/MathExtras.h
+++ b/contrib/llvm/include/llvm/Support/MathExtras.h
@@ -14,7 +14,7 @@
#ifndef LLVM_SUPPORT_MATHEXTRAS_H
#define LLVM_SUPPORT_MATHEXTRAS_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/SwapByteOrder.h"
namespace llvm {
@@ -70,6 +70,18 @@ inline bool isUInt<32>(uint64_t x) {
return static_cast<uint32_t>(x) == x;
}
+/// isUIntN - Checks if an unsigned integer fits into the given (dynamic)
+/// bit width.
+inline bool isUIntN(unsigned N, uint64_t x) {
+ return x == (x & (~0ULL >> (64 - N)));
+}
+
+/// isIntN - Checks if an signed integer fits into the given (dynamic)
+/// bit width.
+inline bool isIntN(unsigned N, int64_t x) {
+ return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
+}
+
/// isMask_32 - This function returns true if the argument is a sequence of ones
/// starting at the least significant bit with the remainder zero (32 bit
/// version). Ex. isMask_32(0x0000FFFFU) == true.
@@ -112,47 +124,19 @@ inline bool isPowerOf2_64(uint64_t Value) {
/// ByteSwap_16 - This function returns a byte-swapped representation of the
/// 16-bit argument, Value.
inline uint16_t ByteSwap_16(uint16_t Value) {
-#if defined(_MSC_VER) && !defined(_DEBUG)
- // The DLL version of the runtime lacks these functions (bug!?), but in a
- // release build they're replaced with BSWAP instructions anyway.
- return _byteswap_ushort(Value);
-#else
- uint16_t Hi = Value << 8;
- uint16_t Lo = Value >> 8;
- return Hi | Lo;
-#endif
+ return sys::SwapByteOrder_16(Value);
}
/// ByteSwap_32 - This function returns a byte-swapped representation of the
/// 32-bit argument, Value.
inline uint32_t ByteSwap_32(uint32_t Value) {
-#if defined(__llvm__) || \
- (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
- return __builtin_bswap32(Value);
-#elif defined(_MSC_VER) && !defined(_DEBUG)
- return _byteswap_ulong(Value);
-#else
- uint32_t Byte0 = Value & 0x000000FF;
- uint32_t Byte1 = Value & 0x0000FF00;
- uint32_t Byte2 = Value & 0x00FF0000;
- uint32_t Byte3 = Value & 0xFF000000;
- return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24);
-#endif
+ return sys::SwapByteOrder_32(Value);
}
/// ByteSwap_64 - This function returns a byte-swapped representation of the
/// 64-bit argument, Value.
inline uint64_t ByteSwap_64(uint64_t Value) {
-#if defined(__llvm__) || \
- (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
- return __builtin_bswap64(Value);
-#elif defined(_MSC_VER) && !defined(_DEBUG)
- return _byteswap_uint64(Value);
-#else
- uint64_t Hi = ByteSwap_32(uint32_t(Value));
- uint32_t Lo = ByteSwap_32(uint32_t(Value >> 32));
- return (Hi << 32) | Lo;
-#endif
+ return sys::SwapByteOrder_64(Value);
}
/// CountLeadingZeros_32 - this function performs the platform optimal form of
diff --git a/contrib/llvm/include/llvm/System/Memory.h b/contrib/llvm/include/llvm/Support/Memory.h
index 2dd36e8..9c3f85b 100644
--- a/contrib/llvm/include/llvm/System/Memory.h
+++ b/contrib/llvm/include/llvm/Support/Memory.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Memory.h - Memory Support --------------------*- C++ -*-===//
+//===- llvm/Support/Memory.h - Memory Support --------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
#ifndef LLVM_SYSTEM_MEMORY_H
#define LLVM_SYSTEM_MEMORY_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <string>
namespace llvm {
@@ -51,7 +51,7 @@ namespace sys {
///
/// On success, this returns a non-null memory block, otherwise it returns
/// a null memory block and fills in *ErrMsg.
- ///
+ ///
/// @brief Allocate Read/Write/Execute memory.
static MemoryBlock AllocateRWX(size_t NumBytes,
const MemoryBlock *NearBlock,
@@ -65,8 +65,8 @@ namespace sys {
/// in *ErrMsg.
/// @brief Release Read/Write/Execute memory.
static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg = 0);
-
-
+
+
/// InvalidateInstructionCache - Before the JIT can run a block of code
/// that has been emitted it must invalidate the instruction cache on some
/// platforms.
@@ -82,11 +82,11 @@ namespace sys {
/// boundaries, and the JIT internal allocations are not page aligned.
static bool setWritable (MemoryBlock &M, std::string *ErrMsg = 0);
- /// setRangeExecutable - Mark the page containing a range of addresses
+ /// setRangeExecutable - Mark the page containing a range of addresses
/// as executable.
static bool setRangeExecutable(const void *Addr, size_t Size);
- /// setRangeWritable - Mark the page containing a range of addresses
+ /// setRangeWritable - Mark the page containing a range of addresses
/// as writable.
static bool setRangeWritable(const void *Addr, size_t Size);
};
diff --git a/contrib/llvm/include/llvm/Support/MemoryBuffer.h b/contrib/llvm/include/llvm/Support/MemoryBuffer.h
index 8a41aa5..b6243b7 100644
--- a/contrib/llvm/include/llvm/Support/MemoryBuffer.h
+++ b/contrib/llvm/include/llvm/Support/MemoryBuffer.h
@@ -15,12 +15,13 @@
#define LLVM_SUPPORT_MEMORYBUFFER_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/System/DataTypes.h"
-#include <string>
-#include <sys/stat.h>
+#include "llvm/Support/DataTypes.h"
namespace llvm {
+class error_code;
+template<class T> class OwningPtr;
+
/// MemoryBuffer - This interface provides simple read-only access to a block
/// of memory, and provides simple methods for reading files and standard input
/// into a memory buffer. In addition to basic access to the characters in the
@@ -47,8 +48,8 @@ public:
const char *getBufferEnd() const { return BufferEnd; }
size_t getBufferSize() const { return BufferEnd-BufferStart; }
- StringRef getBuffer() const {
- return StringRef(BufferStart, getBufferSize());
+ StringRef getBuffer() const {
+ return StringRef(BufferStart, getBufferSize());
}
/// getBufferIdentifier - Return an identifier for this buffer, typically the
@@ -61,23 +62,26 @@ public:
/// MemoryBuffer if successful, otherwise returning null. If FileSize is
/// specified, this means that the client knows that the file exists and that
/// it has the specified size.
- static MemoryBuffer *getFile(StringRef Filename,
- std::string *ErrStr = 0,
- int64_t FileSize = -1,
- struct stat *FileInfo = 0);
- static MemoryBuffer *getFile(const char *Filename,
- std::string *ErrStr = 0,
- int64_t FileSize = -1,
- struct stat *FileInfo = 0);
+ static error_code getFile(StringRef Filename, OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize = -1);
+ static error_code getFile(const char *Filename,
+ OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize = -1);
+
+ /// getOpenFile - Given an already-open file descriptor, read the file and
+ /// return a MemoryBuffer.
+ static error_code getOpenFile(int FD, const char *Filename,
+ OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize = -1);
/// getMemBuffer - Open the specified memory range as a MemoryBuffer. Note
- /// that EndPtr[0] must be a null byte and be accessible!
+ /// that InputData must be null terminated.
static MemoryBuffer *getMemBuffer(StringRef InputData,
StringRef BufferName = "");
/// getMemBufferCopy - Open the specified memory range as a MemoryBuffer,
- /// copying the contents and taking ownership of it. This has no requirements
- /// on EndPtr[0].
+ /// copying the contents and taking ownership of it. InputData does not
+ /// have to be null terminated.
static MemoryBuffer *getMemBufferCopy(StringRef InputData,
StringRef BufferName = "");
@@ -95,21 +99,19 @@ public:
StringRef BufferName = "");
/// getSTDIN - Read all of stdin into a file buffer, and return it.
- /// If an error occurs, this returns null and fills in *ErrStr with a reason.
- static MemoryBuffer *getSTDIN(std::string *ErrStr = 0);
+ /// If an error occurs, this returns null and sets ec.
+ static error_code getSTDIN(OwningPtr<MemoryBuffer> &result);
/// getFileOrSTDIN - Open the specified file as a MemoryBuffer, or open stdin
- /// if the Filename is "-". If an error occurs, this returns null and fills
- /// in *ErrStr with a reason.
- static MemoryBuffer *getFileOrSTDIN(StringRef Filename,
- std::string *ErrStr = 0,
- int64_t FileSize = -1,
- struct stat *FileInfo = 0);
- static MemoryBuffer *getFileOrSTDIN(const char *Filename,
- std::string *ErrStr = 0,
- int64_t FileSize = -1,
- struct stat *FileInfo = 0);
+ /// if the Filename is "-". If an error occurs, this returns null and sets
+ /// ec.
+ static error_code getFileOrSTDIN(StringRef Filename,
+ OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize = -1);
+ static error_code getFileOrSTDIN(const char *Filename,
+ OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize = -1);
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Support/MemoryObject.h b/contrib/llvm/include/llvm/Support/MemoryObject.h
index e193ca2..dec0f13 100644
--- a/contrib/llvm/include/llvm/Support/MemoryObject.h
+++ b/contrib/llvm/include/llvm/Support/MemoryObject.h
@@ -10,7 +10,7 @@
#ifndef MEMORYOBJECT_H
#define MEMORYOBJECT_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/System/Mutex.h b/contrib/llvm/include/llvm/Support/Mutex.h
index 71d1006..42ea630 100644
--- a/contrib/llvm/include/llvm/System/Mutex.h
+++ b/contrib/llvm/include/llvm/Support/Mutex.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Mutex.h - Mutex Operating System Concept -----*- C++ -*-===//
+//===- llvm/Support/Mutex.h - Mutex Operating System Concept -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
#ifndef LLVM_SYSTEM_MUTEX_H
#define LLVM_SYSTEM_MUTEX_H
-#include "llvm/System/Threading.h"
+#include "llvm/Support/Threading.h"
#include <cassert>
namespace llvm
@@ -79,9 +79,9 @@ namespace llvm
void operator=(const MutexImpl &);
/// @}
};
-
-
- /// SmartMutex - A mutex with a compile time constant parameter that
+
+
+ /// SmartMutex - A mutex with a compile time constant parameter that
/// indicates whether this mutex should become a no-op when we're not
/// running in multithreaded mode.
template<bool mt_only>
@@ -91,7 +91,7 @@ namespace llvm
public:
explicit SmartMutex(bool rec = true) :
MutexImpl(rec), acquired(0), recursive(rec) { }
-
+
bool acquire() {
if (!mt_only || llvm_is_multithreaded()) {
return MutexImpl::acquire();
@@ -124,29 +124,29 @@ namespace llvm
return MutexImpl::tryacquire();
else return true;
}
-
+
private:
SmartMutex(const SmartMutex<mt_only> & original);
void operator=(const SmartMutex<mt_only> &);
};
-
+
/// Mutex - A standard, always enforced mutex.
typedef SmartMutex<false> Mutex;
-
+
template<bool mt_only>
class SmartScopedLock {
SmartMutex<mt_only>& mtx;
-
+
public:
SmartScopedLock(SmartMutex<mt_only>& m) : mtx(m) {
mtx.acquire();
}
-
+
~SmartScopedLock() {
mtx.release();
}
};
-
+
typedef SmartScopedLock<false> ScopedLock;
}
}
diff --git a/contrib/llvm/include/llvm/Support/MutexGuard.h b/contrib/llvm/include/llvm/Support/MutexGuard.h
index 9958b97..cd13bfe 100644
--- a/contrib/llvm/include/llvm/Support/MutexGuard.h
+++ b/contrib/llvm/include/llvm/Support/MutexGuard.h
@@ -15,7 +15,7 @@
#ifndef LLVM_SUPPORT_MUTEXGUARD_H
#define LLVM_SUPPORT_MUTEXGUARD_H
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
namespace llvm {
/// Instances of this class acquire a given Mutex Lock when constructed and
diff --git a/contrib/llvm/include/llvm/Support/NoFolder.h b/contrib/llvm/include/llvm/Support/NoFolder.h
index 01256e1..d7b5b42 100644
--- a/contrib/llvm/include/llvm/Support/NoFolder.h
+++ b/contrib/llvm/include/llvm/Support/NoFolder.h
@@ -15,8 +15,7 @@
// llvm/Analysis/ConstantFolding.h.
//
// Note: since it is not actually possible to create unfolded constants, this
-// class returns values rather than constants. The values do not have names,
-// even if names were provided to IRBuilder, which may be confusing.
+// class returns instructions rather than constants.
//
//===----------------------------------------------------------------------===//
@@ -30,7 +29,7 @@ namespace llvm {
class LLVMContext;
-/// NoFolder - Create "constants" (actually, values) with no folding.
+/// NoFolder - Create "constants" (actually, instructions) with no folding.
class NoFolder {
public:
explicit NoFolder(LLVMContext &) {}
@@ -39,84 +38,87 @@ public:
// Binary Operators
//===--------------------------------------------------------------------===//
- Value *CreateAdd(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateAdd(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateAdd(LHS, RHS);
}
- Value *CreateNSWAdd(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateNSWAdd(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNSWAdd(LHS, RHS);
}
- Value *CreateNUWAdd(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateNUWAdd(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNUWAdd(LHS, RHS);
}
- Value *CreateFAdd(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFAdd(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFAdd(LHS, RHS);
}
- Value *CreateSub(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateSub(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateSub(LHS, RHS);
}
- Value *CreateNSWSub(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateNSWSub(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNSWSub(LHS, RHS);
}
- Value *CreateNUWSub(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateNUWSub(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNUWSub(LHS, RHS);
}
- Value *CreateFSub(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFSub(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFSub(LHS, RHS);
}
- Value *CreateMul(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateMul(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateMul(LHS, RHS);
}
- Value *CreateNSWMul(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateNSWMul(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNSWMul(LHS, RHS);
}
- Value *CreateNUWMul(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateNUWMul(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateNUWMul(LHS, RHS);
}
- Value *CreateFMul(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFMul(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFMul(LHS, RHS);
}
- Value *CreateUDiv(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateUDiv(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateUDiv(LHS, RHS);
}
- Value *CreateSDiv(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateExactUDiv(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateExactUDiv(LHS, RHS);
+ }
+ Instruction *CreateSDiv(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateSDiv(LHS, RHS);
}
- Value *CreateExactSDiv(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateExactSDiv(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateExactSDiv(LHS, RHS);
}
- Value *CreateFDiv(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFDiv(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFDiv(LHS, RHS);
}
- Value *CreateURem(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateURem(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateURem(LHS, RHS);
}
- Value *CreateSRem(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateSRem(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateSRem(LHS, RHS);
}
- Value *CreateFRem(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFRem(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateFRem(LHS, RHS);
}
- Value *CreateShl(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateShl(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateShl(LHS, RHS);
}
- Value *CreateLShr(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateLShr(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateLShr(LHS, RHS);
}
- Value *CreateAShr(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateAShr(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateAShr(LHS, RHS);
}
- Value *CreateAnd(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateAnd(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateAnd(LHS, RHS);
}
- Value *CreateOr(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateOr(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateOr(LHS, RHS);
}
- Value *CreateXor(Constant *LHS, Constant *RHS) const {
+ Instruction *CreateXor(Constant *LHS, Constant *RHS) const {
return BinaryOperator::CreateXor(LHS, RHS);
}
- Value *CreateBinOp(Instruction::BinaryOps Opc,
- Constant *LHS, Constant *RHS) const {
+ Instruction *CreateBinOp(Instruction::BinaryOps Opc,
+ Constant *LHS, Constant *RHS) const {
return BinaryOperator::Create(Opc, LHS, RHS);
}
@@ -124,16 +126,19 @@ public:
// Unary Operators
//===--------------------------------------------------------------------===//
- Value *CreateNeg(Constant *C) const {
+ Instruction *CreateNeg(Constant *C) const {
return BinaryOperator::CreateNeg(C);
}
- Value *CreateNSWNeg(Constant *C) const {
+ Instruction *CreateNSWNeg(Constant *C) const {
return BinaryOperator::CreateNSWNeg(C);
}
- Value *CreateNUWNeg(Constant *C) const {
+ Instruction *CreateNUWNeg(Constant *C) const {
return BinaryOperator::CreateNUWNeg(C);
}
- Value *CreateNot(Constant *C) const {
+ Instruction *CreateFNeg(Constant *C) const {
+ return BinaryOperator::CreateFNeg(C);
+ }
+ Instruction *CreateNot(Constant *C) const {
return BinaryOperator::CreateNot(C);
}
@@ -145,8 +150,8 @@ public:
unsigned NumIdx) const {
return ConstantExpr::getGetElementPtr(C, IdxList, NumIdx);
}
- Value *CreateGetElementPtr(Constant *C, Value* const *IdxList,
- unsigned NumIdx) const {
+ Instruction *CreateGetElementPtr(Constant *C, Value* const *IdxList,
+ unsigned NumIdx) const {
return GetElementPtrInst::Create(C, IdxList, IdxList+NumIdx);
}
@@ -154,8 +159,8 @@ public:
unsigned NumIdx) const {
return ConstantExpr::getInBoundsGetElementPtr(C, IdxList, NumIdx);
}
- Value *CreateInBoundsGetElementPtr(Constant *C, Value* const *IdxList,
- unsigned NumIdx) const {
+ Instruction *CreateInBoundsGetElementPtr(Constant *C, Value* const *IdxList,
+ unsigned NumIdx) const {
return GetElementPtrInst::CreateInBounds(C, IdxList, IdxList+NumIdx);
}
@@ -163,23 +168,51 @@ public:
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
- Value *CreateCast(Instruction::CastOps Op, Constant *C,
+ Instruction *CreateCast(Instruction::CastOps Op, Constant *C,
const Type *DestTy) const {
return CastInst::Create(Op, C, DestTy);
}
- Value *CreateIntCast(Constant *C, const Type *DestTy,
+ Instruction *CreatePointerCast(Constant *C, const Type *DestTy) const {
+ return CastInst::CreatePointerCast(C, DestTy);
+ }
+ Instruction *CreateIntCast(Constant *C, const Type *DestTy,
bool isSigned) const {
return CastInst::CreateIntegerCast(C, DestTy, isSigned);
}
+ Instruction *CreateFPCast(Constant *C, const Type *DestTy) const {
+ return CastInst::CreateFPCast(C, DestTy);
+ }
+
+ Instruction *CreateBitCast(Constant *C, const Type *DestTy) const {
+ return CreateCast(Instruction::BitCast, C, DestTy);
+ }
+ Instruction *CreateIntToPtr(Constant *C, const Type *DestTy) const {
+ return CreateCast(Instruction::IntToPtr, C, DestTy);
+ }
+ Instruction *CreatePtrToInt(Constant *C, const Type *DestTy) const {
+ return CreateCast(Instruction::PtrToInt, C, DestTy);
+ }
+ Instruction *CreateZExtOrBitCast(Constant *C, const Type *DestTy) const {
+ return CastInst::CreateZExtOrBitCast(C, DestTy);
+ }
+ Instruction *CreateSExtOrBitCast(Constant *C, const Type *DestTy) const {
+ return CastInst::CreateSExtOrBitCast(C, DestTy);
+ }
+
+ Instruction *CreateTruncOrBitCast(Constant *C, const Type *DestTy) const {
+ return CastInst::CreateTruncOrBitCast(C, DestTy);
+ }
//===--------------------------------------------------------------------===//
// Compare Instructions
//===--------------------------------------------------------------------===//
- Value *CreateICmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const {
+ Instruction *CreateICmp(CmpInst::Predicate P,
+ Constant *LHS, Constant *RHS) const {
return new ICmpInst(P, LHS, RHS);
}
- Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const {
+ Instruction *CreateFCmp(CmpInst::Predicate P,
+ Constant *LHS, Constant *RHS) const {
return new FCmpInst(P, LHS, RHS);
}
@@ -187,30 +220,33 @@ public:
// Other Instructions
//===--------------------------------------------------------------------===//
- Value *CreateSelect(Constant *C, Constant *True, Constant *False) const {
+ Instruction *CreateSelect(Constant *C,
+ Constant *True, Constant *False) const {
return SelectInst::Create(C, True, False);
}
- Value *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+ Instruction *CreateExtractElement(Constant *Vec, Constant *Idx) const {
return ExtractElementInst::Create(Vec, Idx);
}
- Value *CreateInsertElement(Constant *Vec, Constant *NewElt,
- Constant *Idx) const {
+ Instruction *CreateInsertElement(Constant *Vec, Constant *NewElt,
+ Constant *Idx) const {
return InsertElementInst::Create(Vec, NewElt, Idx);
}
- Value *CreateShuffleVector(Constant *V1, Constant *V2, Constant *Mask) const {
+ Instruction *CreateShuffleVector(Constant *V1, Constant *V2,
+ Constant *Mask) const {
return new ShuffleVectorInst(V1, V2, Mask);
}
- Value *CreateExtractValue(Constant *Agg, const unsigned *IdxList,
- unsigned NumIdx) const {
+ Instruction *CreateExtractValue(Constant *Agg, const unsigned *IdxList,
+ unsigned NumIdx) const {
return ExtractValueInst::Create(Agg, IdxList, IdxList+NumIdx);
}
- Value *CreateInsertValue(Constant *Agg, Constant *Val,
- const unsigned *IdxList, unsigned NumIdx) const {
+ Instruction *CreateInsertValue(Constant *Agg, Constant *Val,
+ const unsigned *IdxList,
+ unsigned NumIdx) const {
return InsertValueInst::Create(Agg, Val, IdxList, IdxList+NumIdx);
}
};
diff --git a/contrib/llvm/tools/clang/lib/Checker/ManagerRegistry.cpp b/contrib/llvm/include/llvm/Support/Path.h
index d11a997..196eecc 100644
--- a/contrib/llvm/tools/clang/lib/Checker/ManagerRegistry.cpp
+++ b/contrib/llvm/include/llvm/Support/Path.h
@@ -1,4 +1,4 @@
-//===- ManagerRegistry.cpp - Pluggble Analyzer module creators --*- C++ -*-===//
+//===- llvm/Support/Path.h - Path Operating System Concept ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,14 +7,10 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the pluggable analyzer module creators.
+// This file currently includes both PathV1 and PathV2 to facilitate moving
+// clients over to the new interface.
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/ManagerRegistry.h"
-
-using namespace clang;
-
-StoreManagerCreator ManagerRegistry::StoreMgrCreator = 0;
-
-ConstraintManagerCreator ManagerRegistry::ConstraintMgrCreator = 0;
+#include "llvm/Support/PathV1.h"
+#include "llvm/Support/PathV2.h"
diff --git a/contrib/llvm/include/llvm/System/Path.h b/contrib/llvm/include/llvm/Support/PathV1.h
index 23b18d4..a1c3f6a 100644
--- a/contrib/llvm/include/llvm/System/Path.h
+++ b/contrib/llvm/include/llvm/Support/PathV1.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Path.h - Path Operating System Concept -------*- C++ -*-===//
+//===- llvm/Support/PathV1.h - Path Operating System Concept ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,11 +15,17 @@
#define LLVM_SYSTEM_PATH_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/System/TimeValue.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/TimeValue.h"
#include <set>
#include <string>
#include <vector>
+#define LLVM_PATH_DEPRECATED_MSG(replacement) \
+ "PathV1 has been deprecated and will be removed as soon as all LLVM and" \
+ " Clang clients have been moved over to PathV2. Please use `" #replacement \
+ "` from PathV2 instead."
+
namespace llvm {
namespace sys {
@@ -89,10 +95,11 @@ namespace sys {
/// Construct a path to the root directory of the file system. The root
/// directory is a top level directory above which there are no more
/// directories. For example, on UNIX, the root directory is /. On Windows
- /// it is C:\. Other operating systems may have different notions of
+ /// it is file:///. Other operating systems may have different notions of
/// what the root directory is or none at all. In that case, a consistent
/// default root directory will be used.
- static Path GetRootDirectory();
+ LLVM_ATTRIBUTE_DEPRECATED(static Path GetRootDirectory(),
+ LLVM_PATH_DEPRECATED_MSG(NOTHING));
/// Construct a path to a unique temporary directory that is created in
/// a "standard" place for the operating system. The directory is
@@ -100,14 +107,12 @@ namespace sys {
/// cannot be created, the function will throw an exception.
/// @returns an invalid path (empty) on error
/// @param ErrMsg Optional place for an error message if an error occurs
- /// @brief Constrct a path to an new, unique, existing temporary
+ /// @brief Construct a path to an new, unique, existing temporary
/// directory.
static Path GetTemporaryDirectory(std::string* ErrMsg = 0);
/// Construct a vector of sys::Path that contains the "standard" system
- /// library paths suitable for linking into programs. This function *must*
- /// return the value of LLVM_LIB_SEARCH_PATH as the first item in \p Paths
- /// if that environment variable is set and it references a directory.
+ /// library paths suitable for linking into programs.
/// @brief Construct a path to the system library directory
static void GetSystemLibraryPaths(std::vector<sys::Path>& Paths);
@@ -154,6 +159,12 @@ namespace sys {
/// @brief Returns the current working directory.
static Path GetCurrentDirectory();
+ /// Return the suffix commonly used on file names that contain an
+ /// executable.
+ /// @returns The executable file suffix for the current platform.
+ /// @brief Return the executable file suffix.
+ static StringRef GetEXESuffix();
+
/// Return the suffix commonly used on file names that contain a shared
/// object, shared archive, or dynamic link library. Such files are
/// linked at runtime into a process and their code images are shared
@@ -250,23 +261,27 @@ namespace sys {
bool isEmpty() const { return path.empty(); }
/// This function returns the last component of the path name. The last
- /// component is the file or directory name occuring after the last
+ /// component is the file or directory name occurring after the last
/// directory separator. If no directory separator is present, the entire
/// path name is returned (i.e. same as toString).
/// @returns StringRef containing the last component of the path name.
/// @brief Returns the last component of the path name.
- StringRef getLast() const;
+ LLVM_ATTRIBUTE_DEPRECATED(
+ StringRef getLast() const,
+ LLVM_PATH_DEPRECATED_MSG(path::filename));
/// This function strips off the path and suffix of the file or directory
/// name and returns just the basename. For example /a/foo.bar would cause
/// this function to return "foo".
/// @returns StringRef containing the basename of the path
/// @brief Get the base name of the path
- StringRef getBasename() const;
+ LLVM_ATTRIBUTE_DEPRECATED(StringRef getBasename() const,
+ LLVM_PATH_DEPRECATED_MSG(path::stem));
/// This function strips off the suffix of the path beginning with the
/// path separator ('/' on Unix, '\' on Windows) and returns the result.
- StringRef getDirname() const;
+ LLVM_ATTRIBUTE_DEPRECATED(StringRef getDirname() const,
+ LLVM_PATH_DEPRECATED_MSG(path::parent_path));
/// This function strips off the path and basename(up to and
/// including the last dot) of the file or directory name and
@@ -274,7 +289,8 @@ namespace sys {
/// this function to return "bar".
/// @returns StringRef containing the suffix of the path
/// @brief Get the suffix of the path
- StringRef getSuffix() const;
+ LLVM_ATTRIBUTE_DEPRECATED(StringRef getSuffix() const,
+ LLVM_PATH_DEPRECATED_MSG(path::extension));
/// Obtain a 'C' string for the path name.
/// @returns a 'C' string containing the path name.
@@ -296,12 +312,16 @@ namespace sys {
/// This function determines if the path name is absolute, as opposed to
/// relative.
/// @brief Determine if the path is absolute.
+//FIXME: LLVM_ATTRIBUTE_DEPRECATED(
bool isAbsolute() const;
+//FIXME: LLVMV_PATH_DEPRECATED_MSG(path::is_absolute));
/// This function determines if the path name is absolute, as opposed to
/// relative.
/// @brief Determine if the path is absolute.
- static bool isAbsolute(const char *NameStart, unsigned NameLen);
+ LLVM_ATTRIBUTE_DEPRECATED(
+ static bool isAbsolute(const char *NameStart, unsigned NameLen),
+ LLVM_PATH_DEPRECATED_MSG(path::is_absolute));
/// This function opens the file associated with the path name provided by
/// the Path object and reads its magic number. If the magic number at the
@@ -309,7 +329,8 @@ namespace sys {
/// cases (file not found, file not accessible, etc.) it returns false.
/// @returns true if the magic number of the file matches \p magic.
/// @brief Determine if file has a specific magic number
- bool hasMagicNumber(StringRef magic) const;
+ LLVM_ATTRIBUTE_DEPRECATED(bool hasMagicNumber(StringRef magic) const,
+ LLVM_PATH_DEPRECATED_MSG(fs::has_magic));
/// This function retrieves the first \p len bytes of the file associated
/// with \p this. These bytes are returned as the "magic number" in the
@@ -342,19 +363,39 @@ namespace sys {
/// @brief Determine if the path references a dynamic library.
bool isDynamicLibrary() const;
+ /// This function determines if the path name in the object references a
+ /// native object file by looking at it's magic number. The term object
+ /// file is defined as "an organized collection of separate, named
+ /// sequences of binary data." This covers the obvious file formats such
+ /// as COFF and ELF, but it also includes llvm ir bitcode, archives,
+ /// libraries, etc...
+ /// @returns true if the file starts with the magic number for an object
+ /// file.
+ /// @brief Determine if the path references an object file.
+ bool isObjectFile() const;
+
/// This function determines if the path name references an existing file
/// or directory in the file system.
/// @returns true if the pathname references an existing file or
/// directory.
/// @brief Determines if the path is a file or directory in
/// the file system.
- bool exists() const;
+ LLVM_ATTRIBUTE_DEPRECATED(bool exists() const,
+ LLVM_PATH_DEPRECATED_MSG(fs::exists));
- /// This function determines if the path name refences an
+ /// This function determines if the path name references an
/// existing directory.
/// @returns true if the pathname references an existing directory.
- /// @brief Determins if the path is a directory in the file system.
- bool isDirectory() const;
+ /// @brief Determines if the path is a directory in the file system.
+ LLVM_ATTRIBUTE_DEPRECATED(bool isDirectory() const,
+ LLVM_PATH_DEPRECATED_MSG(fs::is_directory));
+
+ /// This function determines if the path name references an
+ /// existing symbolic link.
+ /// @returns true if the pathname references an existing symlink.
+ /// @brief Determines if the path is a symlink in the file system.
+ LLVM_ATTRIBUTE_DEPRECATED(bool isSymLink() const,
+ LLVM_PATH_DEPRECATED_MSG(fs::is_symlink));
/// This function determines if the path name references a readable file
/// or directory in the file system. This function checks for
@@ -374,9 +415,9 @@ namespace sys {
/// in the file system.
bool canWrite() const;
- /// This function checks that what we're trying to work only on a regular file.
- /// Check for things like /dev/null, any block special file,
- /// or other things that aren't "regular" regular files.
+ /// This function checks that what we're trying to work only on a regular
+ /// file. Check for things like /dev/null, any block special file, or
+ /// other things that aren't "regular" regular files.
/// @returns true if the file is S_ISREG.
/// @brief Determines if the file is a regular file
bool isRegularFile() const;
@@ -434,13 +475,9 @@ namespace sys {
bool appendComponent(StringRef component);
/// A period and the \p suffix are appended to the end of the pathname.
- /// The precondition for this function is that the Path reference a file
- /// name (i.e. isFile() returns true). If the Path is not a file, no
- /// action is taken and the function returns false. If the path would
- /// become invalid for the host operating system, false is returned.
- /// @returns false if the suffix could not be added, true if it was.
+ /// When the \p suffix is empty, no action is performed.
/// @brief Adds a period and the \p suffix to the end of the pathname.
- bool appendSuffix(StringRef suffix);
+ void appendSuffix(StringRef suffix);
/// The suffix of the filename is erased. The suffix begins with and
/// includes the last . character in the filename after the last directory
@@ -462,7 +499,9 @@ namespace sys {
/// The current Path name is made absolute by prepending the
/// current working directory if necessary.
- void makeAbsolute();
+ LLVM_ATTRIBUTE_DEPRECATED(
+ void makeAbsolute(),
+ LLVM_PATH_DEPRECATED_MSG(fs::make_absolute));
/// @}
/// @name Disk Mutators
@@ -594,7 +633,7 @@ namespace sys {
/// efficiency. First, the file status requires additional space and the space
/// is incorporated directly into PathWithStatus without an additional malloc.
/// Second, obtaining status information is an expensive operation on most
- /// operating systems so we want to be careful and explicity about where we
+ /// operating systems so we want to be careful and explicit about where we
/// allow this operation in LLVM.
/// @brief Path with file status class.
class PathWithStatus : public Path {
@@ -688,7 +727,7 @@ namespace sys {
Mach_O_Executable_FileType, ///< Mach-O Executable
Mach_O_FixedVirtualMemorySharedLib_FileType, ///< Mach-O Shared Lib, FVM
Mach_O_Core_FileType, ///< Mach-O Core File
- Mach_O_PreloadExectuable_FileType, ///< Mach-O Preloaded Executable
+ Mach_O_PreloadExecutable_FileType, ///< Mach-O Preloaded Executable
Mach_O_DynamicallyLinkedSharedLib_FileType, ///< Mach-O dynlinked shared lib
Mach_O_DynamicLinker_FileType, ///< The Mach-O dynamic linker
Mach_O_Bundle_FileType, ///< Mach-O Bundle file
diff --git a/contrib/llvm/include/llvm/Support/PathV2.h b/contrib/llvm/include/llvm/Support/PathV2.h
new file mode 100644
index 0000000..2515633
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/PathV2.h
@@ -0,0 +1,347 @@
+//===- llvm/Support/PathV2.h - Path Operating System Concept ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::path namespace. It is designed after
+// TR2/boost filesystem (v3), but modified to remove exception handling and the
+// path class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PATHV2_H
+#define LLVM_SUPPORT_PATHV2_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/DataTypes.h"
+#include <iterator>
+
+namespace llvm {
+namespace sys {
+namespace path {
+
+/// @name Lexical Component Iterator
+/// @{
+
+/// @brief Path iterator.
+///
+/// This is a bidirectional iterator that iterates over the individual
+/// components in \a path. The forward traversal order is as follows:
+/// * The root-name element, if present.
+/// * The root-directory element, if present.
+/// * Each successive filename element, if present.
+/// * Dot, if one or more trailing non-root slash characters are present.
+/// The backwards traversal order is the reverse of forward traversal.
+///
+/// Iteration examples. Each component is separated by ',':
+/// / => /
+/// /foo => /,foo
+/// foo/ => foo,.
+/// /foo/bar => /,foo,bar
+/// ../ => ..,.
+/// C:\foo\bar => C:,/,foo,bar
+///
+class const_iterator {
+ StringRef Path; //< The entire path.
+ StringRef Component; //< The current component. Not necessarily in Path.
+ size_t Position; //< The iterators current position within Path.
+
+ // An end iterator has Position = Path.size() + 1.
+ friend const_iterator begin(StringRef path);
+ friend const_iterator end(StringRef path);
+
+public:
+ typedef const StringRef value_type;
+ typedef ptrdiff_t difference_type;
+ typedef value_type &reference;
+ typedef value_type *pointer;
+ typedef std::bidirectional_iterator_tag iterator_category;
+
+ reference operator*() const { return Component; }
+ pointer operator->() const { return &Component; }
+ const_iterator &operator++(); // preincrement
+ const_iterator &operator++(int); // postincrement
+ const_iterator &operator--(); // predecrement
+ const_iterator &operator--(int); // postdecrement
+ bool operator==(const const_iterator &RHS) const;
+ bool operator!=(const const_iterator &RHS) const;
+
+ /// @brief Difference in bytes between this and RHS.
+ ptrdiff_t operator-(const const_iterator &RHS) const;
+};
+
+typedef std::reverse_iterator<const_iterator> reverse_iterator;
+
+/// @brief Get begin iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized with the first component of \a path.
+const_iterator begin(StringRef path);
+
+/// @brief Get end iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized to the end of \a path.
+const_iterator end(StringRef path);
+
+/// @brief Get reverse begin iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized with the first reverse component of \a path.
+inline reverse_iterator rbegin(StringRef path) {
+ return reverse_iterator(end(path));
+}
+
+/// @brief Get reverse end iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized to the reverse end of \a path.
+inline reverse_iterator rend(StringRef path) {
+ return reverse_iterator(begin(path));
+}
+
+/// @}
+/// @name Lexical Modifiers
+/// @{
+
+/// @brief Remove the last component from \a path unless it is the root dir.
+///
+/// directory/filename.cpp => directory/
+/// directory/ => directory
+/// / => /
+///
+/// @param path A path that is modified to not have a file component.
+void remove_filename(SmallVectorImpl<char> &path);
+
+/// @brief Replace the file extension of \a path with \a extension.
+///
+/// ./filename.cpp => ./filename.extension
+/// ./filename => ./filename.extension
+/// ./ => ./.extension
+///
+/// @param path A path that has its extension replaced with \a extension.
+/// @param extension The extension to be added. It may be empty. It may also
+/// optionally start with a '.', if it does not, one will be
+/// prepended.
+void replace_extension(SmallVectorImpl<char> &path, const Twine &extension);
+
+/// @brief Append to path.
+///
+/// /foo + bar/f => /foo/bar/f
+/// /foo/ + bar/f => /foo/bar/f
+/// foo + bar/f => foo/bar/f
+///
+/// @param path Set to \a path + \a component.
+/// @param component The component to be appended to \a path.
+void append(SmallVectorImpl<char> &path, const Twine &a,
+ const Twine &b = "",
+ const Twine &c = "",
+ const Twine &d = "");
+
+/// @brief Append to path.
+///
+/// /foo + [bar,f] => /foo/bar/f
+/// /foo/ + [bar,f] => /foo/bar/f
+/// foo + [bar,f] => foo/bar/f
+///
+/// @param path Set to \a path + [\a begin, \a end).
+/// @param begin Start of components to append.
+/// @param end One past the end of components to append.
+void append(SmallVectorImpl<char> &path,
+ const_iterator begin, const_iterator end);
+
+/// @}
+/// @name Transforms (or some other better name)
+/// @{
+
+/// Convert path to the native form. This is used to give paths to users and
+/// operating system calls in the platform's normal way. For example, on Windows
+/// all '/' are converted to '\'.
+///
+/// @param path A path that is transformed to native format.
+/// @param result Holds the result of the transformation.
+void native(const Twine &path, SmallVectorImpl<char> &result);
+
+/// @}
+/// @name Lexical Observers
+/// @{
+
+/// @brief Get root name.
+///
+/// //net/hello => //net
+/// c:/hello => c: (on Windows, on other platforms nothing)
+/// /hello => <empty>
+///
+/// @param path Input path.
+/// @result The root name of \a path if it has one, otherwise "".
+const StringRef root_name(StringRef path);
+
+/// @brief Get root directory.
+///
+/// /goo/hello => /
+/// c:/hello => /
+/// d/file.txt => <empty>
+///
+/// @param path Input path.
+/// @result The root directory of \a path if it has one, otherwise
+/// "".
+const StringRef root_directory(StringRef path);
+
+/// @brief Get root path.
+///
+/// Equivalent to root_name + root_directory.
+///
+/// @param path Input path.
+/// @result The root path of \a path if it has one, otherwise "".
+const StringRef root_path(StringRef path);
+
+/// @brief Get relative path.
+///
+/// C:\hello\world => hello\world
+/// foo/bar => foo/bar
+/// /foo/bar => foo/bar
+///
+/// @param path Input path.
+/// @result The path starting after root_path if one exists, otherwise "".
+const StringRef relative_path(StringRef path);
+
+/// @brief Get parent path.
+///
+/// / => <empty>
+/// /foo => /
+/// foo/../bar => foo/..
+///
+/// @param path Input path.
+/// @result The parent path of \a path if one exists, otherwise "".
+const StringRef parent_path(StringRef path);
+
+/// @brief Get filename.
+///
+/// /foo.txt => foo.txt
+/// . => .
+/// .. => ..
+/// / => /
+///
+/// @param path Input path.
+/// @result The filename part of \a path. This is defined as the last component
+/// of \a path.
+const StringRef filename(StringRef path);
+
+/// @brief Get stem.
+///
+/// If filename contains a dot but not solely one or two dots, result is the
+/// substring of filename ending at (but not including) the last dot. Otherwise
+/// it is filename.
+///
+/// /foo/bar.txt => bar
+/// /foo/bar => bar
+/// /foo/.txt => <empty>
+/// /foo/. => .
+/// /foo/.. => ..
+///
+/// @param path Input path.
+/// @result The stem of \a path.
+const StringRef stem(StringRef path);
+
+/// @brief Get extension.
+///
+/// If filename contains a dot but not solely one or two dots, result is the
+/// substring of filename starting at (and including) the last dot, and ending
+/// at the end of \a path. Otherwise "".
+///
+/// /foo/bar.txt => .txt
+/// /foo/bar => <empty>
+/// /foo/.txt => .txt
+///
+/// @param path Input path.
+/// @result The extension of \a path.
+const StringRef extension(StringRef path);
+
+/// @brief Check whether the given char is a path separator on the host OS.
+///
+/// @param value a character
+/// @result true if \a value is a path separator character on the host OS
+bool is_separator(char value);
+
+/// @brief Has root name?
+///
+/// root_name != ""
+///
+/// @param path Input path.
+/// @result True if the path has a root name, false otherwise.
+bool has_root_name(const Twine &path);
+
+/// @brief Has root directory?
+///
+/// root_directory != ""
+///
+/// @param path Input path.
+/// @result True if the path has a root directory, false otherwise.
+bool has_root_directory(const Twine &path);
+
+/// @brief Has root path?
+///
+/// root_path != ""
+///
+/// @param path Input path.
+/// @result True if the path has a root path, false otherwise.
+bool has_root_path(const Twine &path);
+
+/// @brief Has relative path?
+///
+/// relative_path != ""
+///
+/// @param path Input path.
+/// @result True if the path has a relative path, false otherwise.
+bool has_relative_path(const Twine &path);
+
+/// @brief Has parent path?
+///
+/// parent_path != ""
+///
+/// @param path Input path.
+/// @result True if the path has a parent path, false otherwise.
+bool has_parent_path(const Twine &path);
+
+/// @brief Has filename?
+///
+/// filename != ""
+///
+/// @param path Input path.
+/// @result True if the path has a filename, false otherwise.
+bool has_filename(const Twine &path);
+
+/// @brief Has stem?
+///
+/// stem != ""
+///
+/// @param path Input path.
+/// @result True if the path has a stem, false otherwise.
+bool has_stem(const Twine &path);
+
+/// @brief Has extension?
+///
+/// extension != ""
+///
+/// @param path Input path.
+/// @result True if the path has a extension, false otherwise.
+bool has_extension(const Twine &path);
+
+/// @brief Is path absolute?
+///
+/// @param path Input path.
+/// @result True if the path is absolute, false if it is not.
+bool is_absolute(const Twine &path);
+
+/// @brief Is path relative?
+///
+/// @param path Input path.
+/// @result True if the path is relative, false if it is not.
+bool is_relative(const Twine &path);
+
+} // end namespace path
+} // end namespace sys
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/PatternMatch.h b/contrib/llvm/include/llvm/Support/PatternMatch.h
index bee6768..948ae51 100644
--- a/contrib/llvm/include/llvm/Support/PatternMatch.h
+++ b/contrib/llvm/include/llvm/Support/PatternMatch.h
@@ -41,18 +41,62 @@ bool match(Val *V, const Pattern &P) {
}
template<typename Class>
-struct leaf_ty {
+struct class_match {
template<typename ITy>
bool match(ITy *V) { return isa<Class>(V); }
};
/// m_Value() - Match an arbitrary value and ignore it.
-inline leaf_ty<Value> m_Value() { return leaf_ty<Value>(); }
+inline class_match<Value> m_Value() { return class_match<Value>(); }
/// m_ConstantInt() - Match an arbitrary ConstantInt and ignore it.
-inline leaf_ty<ConstantInt> m_ConstantInt() { return leaf_ty<ConstantInt>(); }
+inline class_match<ConstantInt> m_ConstantInt() {
+ return class_match<ConstantInt>();
+}
+/// m_Undef() - Match an arbitrary undef constant.
+inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
+inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
+
+struct match_zero {
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (const Constant *C = dyn_cast<Constant>(V))
+ return C->isNullValue();
+ return false;
+ }
+};
+
+/// m_Zero() - Match an arbitrary zero/null constant. This includes
+/// zero_initializer for vectors and ConstantPointerNull for pointers.
+inline match_zero m_Zero() { return match_zero(); }
+
+
+struct apint_match {
+ const APInt *&Res;
+ apint_match(const APInt *&R) : Res(R) {}
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ Res = &CI->getValue();
+ return true;
+ }
+ if (ConstantVector *CV = dyn_cast<ConstantVector>(V))
+ if (ConstantInt *CI =
+ dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
+ return false;
+ }
+};
+
+/// m_APInt - Match a ConstantInt or splatted ConstantVector, binding the
+/// specified pointer to the contained APInt.
+inline apint_match m_APInt(const APInt *&Res) { return Res; }
+
+
template<int64_t Val>
-struct constantint_ty {
+struct constantint_match {
template<typename ITy>
bool match(ITy *V) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
@@ -68,37 +112,82 @@ struct constantint_ty {
}
};
-/// m_ConstantInt(int64_t) - Match a ConstantInt with a specific value
-/// and ignore it.
+/// m_ConstantInt<int64_t> - Match a ConstantInt with a specific value.
template<int64_t Val>
-inline constantint_ty<Val> m_ConstantInt() {
- return constantint_ty<Val>();
+inline constantint_match<Val> m_ConstantInt() {
+ return constantint_match<Val>();
}
-struct zero_ty {
+/// cst_pred_ty - This helper class is used to match scalar and vector constants
+/// that satisfy a specified predicate.
+template<typename Predicate>
+struct cst_pred_ty : public Predicate {
template<typename ITy>
bool match(ITy *V) {
- if (const Constant *C = dyn_cast<Constant>(V))
- return C->isNullValue();
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
+ return this->isValue(CI->getValue());
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(V))
+ if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
+ return this->isValue(CI->getValue());
return false;
}
};
-
-/// m_Zero() - Match an arbitrary zero/null constant.
-inline zero_ty m_Zero() { return zero_ty(); }
-
-struct one_ty {
+
+/// api_pred_ty - This helper class is used to match scalar and vector constants
+/// that satisfy a specified predicate, and bind them to an APInt.
+template<typename Predicate>
+struct api_pred_ty : public Predicate {
+ const APInt *&Res;
+ api_pred_ty(const APInt *&R) : Res(R) {}
template<typename ITy>
bool match(ITy *V) {
- if (const ConstantInt *C = dyn_cast<ConstantInt>(V))
- return C->isOne();
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
+ if (this->isValue(CI->getValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(V))
+ if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
+ if (this->isValue(CI->getValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
return false;
}
};
+
+
+struct is_one {
+ bool isValue(const APInt &C) { return C == 1; }
+};
-/// m_One() - Match a an integer 1.
-inline one_ty m_One() { return one_ty(); }
+/// m_One() - Match an integer 1 or a vector with all elements equal to 1.
+inline cst_pred_ty<is_one> m_One() { return cst_pred_ty<is_one>(); }
+inline api_pred_ty<is_one> m_One(const APInt *&V) { return V; }
+
+struct is_all_ones {
+ bool isValue(const APInt &C) { return C.isAllOnesValue(); }
+};
+/// m_AllOnes() - Match an integer or vector with all bits set to true.
+inline cst_pred_ty<is_all_ones> m_AllOnes() {return cst_pred_ty<is_all_ones>();}
+inline api_pred_ty<is_all_ones> m_AllOnes(const APInt *&V) { return V; }
+
+struct is_sign_bit {
+ bool isValue(const APInt &C) { return C.isSignBit(); }
+};
+
+/// m_SignBit() - Match an integer or vector with only the sign bit(s) set.
+inline cst_pred_ty<is_sign_bit> m_SignBit() {return cst_pred_ty<is_sign_bit>();}
+inline api_pred_ty<is_sign_bit> m_SignBit(const APInt *&V) { return V; }
+
+struct is_power2 {
+ bool isValue(const APInt &C) { return C.isPowerOf2(); }
+};
+
+/// m_Power2() - Match an integer or vector power of 2.
+inline cst_pred_ty<is_power2> m_Power2() { return cst_pred_ty<is_power2>(); }
+inline api_pred_ty<is_power2> m_Power2(const APInt *&V) { return V; }
template<typename Class>
struct bind_ty {
@@ -121,6 +210,9 @@ inline bind_ty<Value> m_Value(Value *&V) { return V; }
/// m_ConstantInt - Match a ConstantInt, capturing the value if we match.
inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
+/// m_Constant - Match a Constant, capturing the value if we match.
+inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
+
/// specificval_ty - Match a specified Value*.
struct specificval_ty {
const Value *Val;
@@ -140,8 +232,7 @@ inline specificval_ty m_Specific(const Value *V) { return V; }
// Matchers for specific binary operators.
//
-template<typename LHS_t, typename RHS_t,
- unsigned Opcode, typename ConcreteTy = BinaryOperator>
+template<typename LHS_t, typename RHS_t, unsigned Opcode>
struct BinaryOp_match {
LHS_t L;
RHS_t R;
@@ -151,9 +242,8 @@ struct BinaryOp_match {
template<typename OpTy>
bool match(OpTy *V) {
if (V->getValueID() == Value::InstructionVal + Opcode) {
- ConcreteTy *I = cast<ConcreteTy>(V);
- return I->getOpcode() == Opcode && L.match(I->getOperand(0)) &&
- R.match(I->getOperand(1));
+ BinaryOperator *I = cast<BinaryOperator>(V);
+ return L.match(I->getOperand(0)) && R.match(I->getOperand(1));
}
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
return CE->getOpcode() == Opcode && L.match(CE->getOperand(0)) &&
@@ -163,193 +253,156 @@ struct BinaryOp_match {
};
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::Add>
+m_Add(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::FAdd>
+m_FAdd(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::Sub>
+m_Sub(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::FSub>
+m_FSub(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::Mul>
+m_Mul(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::FMul>
+m_FMul(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::UDiv>
+m_UDiv(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::SDiv>
+m_SDiv(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::FDiv>
+m_FDiv(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::URem>
+m_URem(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::SRem>
+m_SRem(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::FRem>
+m_FRem(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::And>
+m_And(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::Or>
+m_Or(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::Xor>
+m_Xor(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::Shl>
+m_Shl(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::LShr>
+m_LShr(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
}
template<typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
- const RHS &R) {
+inline BinaryOp_match<LHS, RHS, Instruction::AShr>
+m_AShr(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
}
//===----------------------------------------------------------------------===//
-// Matchers for either AShr or LShr .. for convenience
+// Class that matches two different binary ops.
//
-template<typename LHS_t, typename RHS_t, typename ConcreteTy = BinaryOperator>
-struct Shr_match {
+template<typename LHS_t, typename RHS_t, unsigned Opc1, unsigned Opc2>
+struct BinOp2_match {
LHS_t L;
RHS_t R;
- Shr_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+ BinOp2_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
template<typename OpTy>
bool match(OpTy *V) {
- if (V->getValueID() == Value::InstructionVal + Instruction::LShr ||
- V->getValueID() == Value::InstructionVal + Instruction::AShr) {
- ConcreteTy *I = cast<ConcreteTy>(V);
- return (I->getOpcode() == Instruction::AShr ||
- I->getOpcode() == Instruction::LShr) &&
- L.match(I->getOperand(0)) &&
- R.match(I->getOperand(1));
+ if (V->getValueID() == Value::InstructionVal + Opc1 ||
+ V->getValueID() == Value::InstructionVal + Opc2) {
+ BinaryOperator *I = cast<BinaryOperator>(V);
+ return L.match(I->getOperand(0)) && R.match(I->getOperand(1));
}
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- return (CE->getOpcode() == Instruction::LShr ||
- CE->getOpcode() == Instruction::AShr) &&
- L.match(CE->getOperand(0)) &&
- R.match(CE->getOperand(1));
+ return (CE->getOpcode() == Opc1 || CE->getOpcode() == Opc2) &&
+ L.match(CE->getOperand(0)) && R.match(CE->getOperand(1));
return false;
}
};
+/// m_Shr - Matches LShr or AShr.
template<typename LHS, typename RHS>
-inline Shr_match<LHS, RHS> m_Shr(const LHS &L, const RHS &R) {
- return Shr_match<LHS, RHS>(L, R);
+inline BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::AShr>
+m_Shr(const LHS &L, const RHS &R) {
+ return BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::AShr>(L, R);
}
-//===----------------------------------------------------------------------===//
-// Matchers for binary classes
-//
-
-template<typename LHS_t, typename RHS_t, typename Class, typename OpcType>
-struct BinaryOpClass_match {
- OpcType *Opcode;
- LHS_t L;
- RHS_t R;
-
- BinaryOpClass_match(OpcType &Op, const LHS_t &LHS,
- const RHS_t &RHS)
- : Opcode(&Op), L(LHS), R(RHS) {}
- BinaryOpClass_match(const LHS_t &LHS, const RHS_t &RHS)
- : Opcode(0), L(LHS), R(RHS) {}
-
- template<typename OpTy>
- bool match(OpTy *V) {
- if (Class *I = dyn_cast<Class>(V))
- if (L.match(I->getOperand(0)) &&
- R.match(I->getOperand(1))) {
- if (Opcode)
- *Opcode = I->getOpcode();
- return true;
- }
-#if 0 // Doesn't handle constantexprs yet!
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- return CE->getOpcode() == Opcode && L.match(CE->getOperand(0)) &&
- R.match(CE->getOperand(1));
-#endif
- return false;
- }
-};
-
+/// m_LogicalShift - Matches LShr or Shl.
template<typename LHS, typename RHS>
-inline BinaryOpClass_match<LHS, RHS, BinaryOperator, Instruction::BinaryOps>
-m_Shift(Instruction::BinaryOps &Op, const LHS &L, const RHS &R) {
- return BinaryOpClass_match<LHS, RHS,
- BinaryOperator, Instruction::BinaryOps>(Op, L, R);
+inline BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::Shl>
+m_LogicalShift(const LHS &L, const RHS &R) {
+ return BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::Shl>(L, R);
}
+/// m_IDiv - Matches UDiv and SDiv.
template<typename LHS, typename RHS>
-inline BinaryOpClass_match<LHS, RHS, BinaryOperator, Instruction::BinaryOps>
-m_Shift(const LHS &L, const RHS &R) {
- return BinaryOpClass_match<LHS, RHS,
- BinaryOperator, Instruction::BinaryOps>(L, R);
+inline BinOp2_match<LHS, RHS, Instruction::SDiv, Instruction::UDiv>
+m_IDiv(const LHS &L, const RHS &R) {
+ return BinOp2_match<LHS, RHS, Instruction::SDiv, Instruction::UDiv>(L, R);
}
//===----------------------------------------------------------------------===//
@@ -362,15 +415,13 @@ struct CmpClass_match {
LHS_t L;
RHS_t R;
- CmpClass_match(PredicateTy &Pred, const LHS_t &LHS,
- const RHS_t &RHS)
+ CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
: Predicate(Pred), L(LHS), R(RHS) {}
template<typename OpTy>
bool match(OpTy *V) {
if (Class *I = dyn_cast<Class>(V))
- if (L.match(I->getOperand(0)) &&
- R.match(I->getOperand(1))) {
+ if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) {
Predicate = I->getPredicate();
return true;
}
@@ -425,11 +476,9 @@ m_Select(const Cond &C, const LHS &L, const RHS &R) {
/// m_SelectCst - This matches a select of two constants, e.g.:
/// m_SelectCst<-1, 0>(m_Value(V))
template<int64_t L, int64_t R, typename Cond>
-inline SelectClass_match<Cond, constantint_ty<L>, constantint_ty<R> >
+inline SelectClass_match<Cond, constantint_match<L>, constantint_match<R> >
m_SelectCst(const Cond &C) {
- return SelectClass_match<Cond, constantint_ty<L>,
- constantint_ty<R> >(C, m_ConstantInt<L>(),
- m_ConstantInt<R>());
+ return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
}
@@ -507,20 +556,14 @@ struct not_match {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
if (CE->getOpcode() == Instruction::Xor)
return matchIfNot(CE->getOperand(0), CE->getOperand(1));
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
- return L.match(ConstantExpr::getNot(CI));
return false;
}
private:
bool matchIfNot(Value *LHS, Value *RHS) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
return CI->isAllOnesValue() && L.match(LHS);
- if (ConstantInt *CI = dyn_cast<ConstantInt>(LHS))
- return CI->isAllOnesValue() && L.match(RHS);
if (ConstantVector *CV = dyn_cast<ConstantVector>(RHS))
return CV->isAllOnesValue() && L.match(LHS);
- if (ConstantVector *CV = dyn_cast<ConstantVector>(LHS))
- return CV->isAllOnesValue() && L.match(RHS);
return false;
}
};
@@ -543,17 +586,17 @@ struct neg_match {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
if (CE->getOpcode() == Instruction::Sub)
return matchIfNeg(CE->getOperand(0), CE->getOperand(1));
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
- return L.match(ConstantExpr::getNeg(CI));
return false;
}
private:
bool matchIfNeg(Value *LHS, Value *RHS) {
- return LHS == ConstantFP::getZeroValueForNegation(LHS->getType()) &&
- L.match(RHS);
+ if (ConstantInt *C = dyn_cast<ConstantInt>(LHS))
+ return C->isZero() && L.match(RHS);
+ return false;
}
};
+/// m_Neg - Match an integer negate.
template<typename LHS>
inline neg_match<LHS> m_Neg(const LHS &L) { return L; }
@@ -572,23 +615,23 @@ struct fneg_match {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
if (CE->getOpcode() == Instruction::FSub)
return matchIfFNeg(CE->getOperand(0), CE->getOperand(1));
- if (ConstantFP *CF = dyn_cast<ConstantFP>(V))
- return L.match(ConstantExpr::getFNeg(CF));
return false;
}
private:
bool matchIfFNeg(Value *LHS, Value *RHS) {
- return LHS == ConstantFP::getZeroValueForNegation(LHS->getType()) &&
- L.match(RHS);
+ if (ConstantFP *C = dyn_cast<ConstantFP>(LHS))
+ return C->isNegativeZeroValue() && L.match(RHS);
+ return false;
}
};
+/// m_FNeg - Match a floating point negate.
template<typename LHS>
inline fneg_match<LHS> m_FNeg(const LHS &L) { return L; }
//===----------------------------------------------------------------------===//
-// Matchers for control flow
+// Matchers for control flow.
//
template<typename Cond_t>
@@ -602,12 +645,10 @@ struct brc_match {
template<typename OpTy>
bool match(OpTy *V) {
if (BranchInst *BI = dyn_cast<BranchInst>(V))
- if (BI->isConditional()) {
- if (Cond.match(BI->getCondition())) {
- T = BI->getSuccessor(0);
- F = BI->getSuccessor(1);
- return true;
- }
+ if (BI->isConditional() && Cond.match(BI->getCondition())) {
+ T = BI->getSuccessor(0);
+ F = BI->getSuccessor(1);
+ return true;
}
return false;
}
diff --git a/contrib/llvm/include/llvm/Support/PointerLikeTypeTraits.h b/contrib/llvm/include/llvm/Support/PointerLikeTypeTraits.h
index b851404..8370821 100644
--- a/contrib/llvm/include/llvm/Support/PointerLikeTypeTraits.h
+++ b/contrib/llvm/include/llvm/Support/PointerLikeTypeTraits.h
@@ -15,7 +15,7 @@
#ifndef LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
#define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/System/Process.h b/contrib/llvm/include/llvm/Support/Process.h
index 41bcd69..3379922 100644
--- a/contrib/llvm/include/llvm/System/Process.h
+++ b/contrib/llvm/include/llvm/Support/Process.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Process.h ------------------------------------*- C++ -*-===//
+//===- llvm/Support/Process.h ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
#ifndef LLVM_SYSTEM_PROCESS_H
#define LLVM_SYSTEM_PROCESS_H
-#include "llvm/System/TimeValue.h"
+#include "llvm/Support/TimeValue.h"
namespace llvm {
namespace sys {
diff --git a/contrib/llvm/include/llvm/System/Program.h b/contrib/llvm/include/llvm/Support/Program.h
index 7017305..78a495e 100644
--- a/contrib/llvm/include/llvm/System/Program.h
+++ b/contrib/llvm/include/llvm/Support/Program.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Program.h ------------------------------------*- C++ -*-===//
+//===- llvm/Support/Program.h ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
#ifndef LLVM_SYSTEM_PROGRAM_H
#define LLVM_SYSTEM_PROGRAM_H
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
namespace llvm {
namespace sys {
@@ -90,12 +90,13 @@ namespace sys {
/// @see Execute
/// @brief Waits for the program to exit.
int Wait
- ( unsigned secondsToWait = 0, ///< If non-zero, this specifies the amount
+ ( const Path& path, ///< The path to the child process executable.
+ unsigned secondsToWait, ///< If non-zero, this specifies the amount
///< of time to wait for the child process to exit. If the time
///< expires, the child is killed and this call returns. If zero,
///< this function will wait until the child finishes or forever if
///< it doesn't.
- std::string* ErrMsg = 0 ///< If non-zero, provides a pointer to a string
+ std::string* ErrMsg ///< If non-zero, provides a pointer to a string
///< instance in which error messages will be returned. If the string
///< is non-empty upon return an error occurred while waiting.
);
@@ -113,7 +114,8 @@ namespace sys {
/// This static constructor (factory) will attempt to locate a program in
/// the operating system's file system using some pre-determined set of
- /// locations to search (e.g. the PATH on Unix).
+ /// locations to search (e.g. the PATH on Unix). Paths with slashes are
+ /// returned unmodified.
/// @returns A Path object initialized to the path of the program or a
/// Path object that is empty (invalid) if the program could not be found.
/// @brief Construct a Program by finding it by name.
diff --git a/contrib/llvm/include/llvm/System/RWMutex.h b/contrib/llvm/include/llvm/Support/RWMutex.h
index 3a28818..0d4cb81 100644
--- a/contrib/llvm/include/llvm/System/RWMutex.h
+++ b/contrib/llvm/include/llvm/Support/RWMutex.h
@@ -14,7 +14,7 @@
#ifndef LLVM_SYSTEM_RWMUTEX_H
#define LLVM_SYSTEM_RWMUTEX_H
-#include "llvm/System/Threading.h"
+#include "llvm/Support/Threading.h"
#include <cassert>
namespace llvm
@@ -43,7 +43,7 @@ namespace llvm
/// Attempts to unconditionally acquire the lock in reader mode. If the
/// lock is held by a writer, this method will wait until it can acquire
- /// the lock.
+ /// the lock.
/// @returns false if any kind of error occurs, true otherwise.
/// @brief Unconditionally acquire the lock in reader mode.
bool reader_acquire();
@@ -55,7 +55,7 @@ namespace llvm
/// Attempts to unconditionally acquire the lock in reader mode. If the
/// lock is held by any readers, this method will wait until it can
- /// acquire the lock.
+ /// acquire the lock.
/// @returns false if any kind of error occurs, true otherwise.
/// @brief Unconditionally acquire the lock in writer mode.
bool writer_acquire();
@@ -79,8 +79,8 @@ namespace llvm
void operator=(const RWMutexImpl &);
/// @}
};
-
- /// SmartMutex - An R/W mutex with a compile time constant parameter that
+
+ /// SmartMutex - An R/W mutex with a compile time constant parameter that
/// indicates whether this mutex should become a no-op when we're not
/// running in multithreaded mode.
template<bool mt_only>
@@ -88,80 +88,80 @@ namespace llvm
unsigned readers, writers;
public:
explicit SmartRWMutex() : RWMutexImpl(), readers(0), writers(0) { }
-
+
bool reader_acquire() {
if (!mt_only || llvm_is_multithreaded())
return RWMutexImpl::reader_acquire();
-
+
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
++readers;
return true;
}
-
+
bool reader_release() {
if (!mt_only || llvm_is_multithreaded())
return RWMutexImpl::reader_release();
-
+
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
assert(readers > 0 && "Reader lock not acquired before release!");
--readers;
return true;
}
-
+
bool writer_acquire() {
if (!mt_only || llvm_is_multithreaded())
return RWMutexImpl::writer_acquire();
-
+
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
assert(writers == 0 && "Writer lock already acquired!");
++writers;
return true;
}
-
+
bool writer_release() {
if (!mt_only || llvm_is_multithreaded())
return RWMutexImpl::writer_release();
-
+
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
assert(writers == 1 && "Writer lock not acquired before release!");
--writers;
return true;
}
-
+
private:
SmartRWMutex(const SmartRWMutex<mt_only> & original);
void operator=(const SmartRWMutex<mt_only> &);
};
typedef SmartRWMutex<false> RWMutex;
-
+
/// ScopedReader - RAII acquisition of a reader lock
template<bool mt_only>
struct SmartScopedReader {
SmartRWMutex<mt_only>& mutex;
-
+
explicit SmartScopedReader(SmartRWMutex<mt_only>& m) : mutex(m) {
mutex.reader_acquire();
}
-
+
~SmartScopedReader() {
mutex.reader_release();
}
};
typedef SmartScopedReader<false> ScopedReader;
-
+
/// ScopedWriter - RAII acquisition of a writer lock
template<bool mt_only>
struct SmartScopedWriter {
SmartRWMutex<mt_only>& mutex;
-
+
explicit SmartScopedWriter(SmartRWMutex<mt_only>& m) : mutex(m) {
mutex.writer_acquire();
}
-
+
~SmartScopedWriter() {
mutex.writer_release();
}
diff --git a/contrib/llvm/include/llvm/System/Signals.h b/contrib/llvm/include/llvm/Support/Signals.h
index 7f1c87c..9a84df6 100644
--- a/contrib/llvm/include/llvm/System/Signals.h
+++ b/contrib/llvm/include/llvm/Support/Signals.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Signals.h - Signal Handling support ----------*- C++ -*-===//
+//===- llvm/Support/Signals.h - Signal Handling support ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,7 +15,7 @@
#ifndef LLVM_SYSTEM_SIGNALS_H
#define LLVM_SYSTEM_SIGNALS_H
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
namespace llvm {
namespace sys {
diff --git a/contrib/llvm/include/llvm/System/Solaris.h b/contrib/llvm/include/llvm/Support/Solaris.h
index 15adb74..57eee2c 100644
--- a/contrib/llvm/include/llvm/System/Solaris.h
+++ b/contrib/llvm/include/llvm/Support/Solaris.h
@@ -1,4 +1,4 @@
-/*===- llvm/System/Solaris.h ------------------------------------*- C++ -*-===*
+/*===- llvm/Support/Solaris.h ------------------------------------*- C++ -*-===*
*
* The LLVM Compiler Infrastructure
*
diff --git a/contrib/llvm/include/llvm/Support/SourceMgr.h b/contrib/llvm/include/llvm/Support/SourceMgr.h
index 270ab2b..a41a633 100644
--- a/contrib/llvm/include/llvm/Support/SourceMgr.h
+++ b/contrib/llvm/include/llvm/Support/SourceMgr.h
@@ -26,6 +26,7 @@ namespace llvm {
class MemoryBuffer;
class SourceMgr;
class SMDiagnostic;
+ class Twine;
class raw_ostream;
/// SourceMgr - This owns the files read by a parser, handles include stacks,
@@ -35,8 +36,7 @@ public:
/// DiagHandlerTy - Clients that want to handle their own diagnostics in a
/// custom way can register a function pointer+context as a diagnostic
/// handler. It gets called each time PrintMessage is invoked.
- typedef void (*DiagHandlerTy)(const SMDiagnostic&, void *Context,
- unsigned LocCookie);
+ typedef void (*DiagHandlerTy)(const SMDiagnostic&, void *Context);
private:
struct SrcBuffer {
/// Buffer - The memory buffer for the file.
@@ -60,7 +60,6 @@ private:
DiagHandlerTy DiagHandler;
void *DiagContext;
- unsigned DiagLocCookie;
SourceMgr(const SourceMgr&); // DO NOT IMPLEMENT
void operator=(const SourceMgr&); // DO NOT IMPLEMENT
@@ -73,12 +72,10 @@ public:
}
/// setDiagHandler - Specify a diagnostic handler to be invoked every time
- /// PrintMessage is called. Ctx and Cookie are passed into the handler when
- /// it is invoked.
- void setDiagHandler(DiagHandlerTy DH, void *Ctx = 0, unsigned Cookie = 0) {
+ /// PrintMessage is called. Ctx is passed into the handler when it is invoked.
+ void setDiagHandler(DiagHandlerTy DH, void *Ctx = 0) {
DiagHandler = DH;
DiagContext = Ctx;
- DiagLocCookie = Cookie;
}
const SrcBuffer &getBufferInfo(unsigned i) const {
@@ -125,7 +122,7 @@ public:
/// @param Type - If non-null, the kind of message (e.g., "error") which is
/// prefixed to the message.
/// @param ShowLine - Should the diagnostic show the source line.
- void PrintMessage(SMLoc Loc, const std::string &Msg, const char *Type,
+ void PrintMessage(SMLoc Loc, const Twine &Msg, const char *Type,
bool ShowLine = true) const;
@@ -136,7 +133,7 @@ public:
/// prefixed to the message.
/// @param ShowLine - Should the diagnostic show the source line.
SMDiagnostic GetMessage(SMLoc Loc,
- const std::string &Msg, const char *Type,
+ const Twine &Msg, const char *Type,
bool ShowLine = true) const;
diff --git a/contrib/llvm/include/llvm/Support/StableBasicBlockNumbering.h b/contrib/llvm/include/llvm/Support/StableBasicBlockNumbering.h
deleted file mode 100644
index 5e0f87e..0000000
--- a/contrib/llvm/include/llvm/Support/StableBasicBlockNumbering.h
+++ /dev/null
@@ -1,59 +0,0 @@
-//===- StableBasicBlockNumbering.h - Provide BB identifiers -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class provides a *stable* numbering of basic blocks that does not depend
-// on their address in memory (which is nondeterministic). When requested, this
-// class simply provides a unique ID for each basic block in the function
-// specified and the inverse mapping.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_SUPPORT_STABLEBASICBLOCKNUMBERING_H
-#define LLVM_SUPPORT_STABLEBASICBLOCKNUMBERING_H
-
-#include "llvm/Function.h"
-#include "llvm/ADT/UniqueVector.h"
-
-namespace llvm {
- class StableBasicBlockNumbering {
- // BBNumbering - Holds the numbering.
- UniqueVector<BasicBlock*> BBNumbering;
- public:
- StableBasicBlockNumbering(Function *F = 0) {
- if (F) compute(*F);
- }
-
- /// compute - If we have not computed a numbering for the function yet, do
- /// so.
- void compute(Function &F) {
- if (BBNumbering.empty()) {
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
- BBNumbering.insert(I);
- }
- }
-
- /// getNumber - Return the ID number for the specified BasicBlock.
- ///
- unsigned getNumber(BasicBlock *BB) const {
- unsigned Idx = BBNumbering.idFor(BB);
- assert(Idx && "Invalid basic block or numbering not computed!");
- return Idx-1;
- }
-
- /// getBlock - Return the BasicBlock corresponding to a particular ID.
- ///
- BasicBlock *getBlock(unsigned N) const {
- assert(N < BBNumbering.size() &&
- "Block ID out of range or numbering not computed!");
- return BBNumbering[N+1];
- }
- };
-}
-
-#endif
diff --git a/contrib/llvm/include/llvm/Support/StandardPasses.h b/contrib/llvm/include/llvm/Support/StandardPasses.h
index bb3bddd..d774faf 100644
--- a/contrib/llvm/include/llvm/Support/StandardPasses.h
+++ b/contrib/llvm/include/llvm/Support/StandardPasses.h
@@ -20,20 +20,35 @@
#define LLVM_SUPPORT_STANDARDPASSES_H
#include "llvm/PassManager.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/IPO.h"
namespace llvm {
+
+ static inline void createStandardAliasAnalysisPasses(PassManagerBase *PM) {
+ // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
+ // BasicAliasAnalysis wins if they disagree. This is intended to help
+ // support "obvious" type-punning idioms.
+ PM->add(createTypeBasedAliasAnalysisPass());
+ PM->add(createBasicAliasAnalysisPass());
+ }
+
/// createStandardFunctionPasses - Add the standard list of function passes to
/// the provided pass manager.
///
/// \arg OptimizationLevel - The optimization level, corresponding to -O0,
/// -O1, etc.
static inline void createStandardFunctionPasses(PassManagerBase *PM,
- unsigned OptimizationLevel);
+ unsigned OptimizationLevel) {
+ if (OptimizationLevel > 0) {
+ createStandardAliasAnalysisPasses(PM);
+ PM->add(createCFGSimplificationPass());
+ PM->add(createScalarReplAggregatesPass());
+ PM->add(createEarlyCSEPass());
+ }
+ }
/// createStandardModulePasses - Add the standard list of module passes to the
/// provided pass manager.
@@ -54,43 +69,9 @@ namespace llvm {
bool UnrollLoops,
bool SimplifyLibCalls,
bool HaveExceptions,
- Pass *InliningPass);
-
- /// createStandardLTOPasses - Add the standard list of module passes suitable
- /// for link time optimization.
- ///
- /// Internalize - Run the internalize pass.
- /// RunInliner - Use a function inlining pass.
- /// VerifyEach - Run the verifier after each pass.
- static inline void createStandardLTOPasses(PassManagerBase *PM,
- bool Internalize,
- bool RunInliner,
- bool VerifyEach);
-
- // Implementations
-
- static inline void createStandardFunctionPasses(PassManagerBase *PM,
- unsigned OptimizationLevel) {
- if (OptimizationLevel > 0) {
- PM->add(createCFGSimplificationPass());
- if (OptimizationLevel == 1)
- PM->add(createPromoteMemoryToRegisterPass());
- else
- PM->add(createScalarReplAggregatesPass());
- PM->add(createInstructionCombiningPass());
- }
- }
-
- /// createStandardModulePasses - Add the standard module passes. This is
- /// expected to be run after the standard function passes.
- static inline void createStandardModulePasses(PassManagerBase *PM,
- unsigned OptimizationLevel,
- bool OptimizeSize,
- bool UnitAtATime,
- bool UnrollLoops,
- bool SimplifyLibCalls,
- bool HaveExceptions,
Pass *InliningPass) {
+ createStandardAliasAnalysisPasses(PM);
+
if (OptimizationLevel == 0) {
if (InliningPass)
PM->add(InliningPass);
@@ -108,7 +89,7 @@ namespace llvm {
// Start of CallGraph SCC passes.
if (UnitAtATime && HaveExceptions)
- PM->add(createPruneEHPass()); // Remove dead EH info
+ PM->add(createPruneEHPass()); // Remove dead EH info
if (InliningPass)
PM->add(InliningPass);
if (UnitAtATime)
@@ -117,11 +98,13 @@ namespace llvm {
PM->add(createArgumentPromotionPass()); // Scalarize uninlined fn args
// Start of function pass.
- PM->add(createScalarReplAggregatesPass()); // Break up aggregate allocas
+ // Break up aggregate allocas, using SSAUpdater.
+ PM->add(createScalarReplAggregatesPass(-1, false));
+ PM->add(createEarlyCSEPass()); // Catch trivial redundancies
if (SimplifyLibCalls)
PM->add(createSimplifyLibCallsPass()); // Library Call Optimizations
- PM->add(createInstructionCombiningPass()); // Cleanup for scalarrepl.
PM->add(createJumpThreadingPass()); // Thread jumps.
+ PM->add(createCorrelatedValuePropagationPass()); // Propagate conditionals
PM->add(createCFGSimplificationPass()); // Merge & remove BBs
PM->add(createInstructionCombiningPass()); // Combine silly seq's
@@ -133,6 +116,7 @@ namespace llvm {
PM->add(createLoopUnswitchPass(OptimizeSize || OptimizationLevel < 3));
PM->add(createInstructionCombiningPass());
PM->add(createIndVarSimplifyPass()); // Canonicalize indvars
+ PM->add(createLoopIdiomPass()); // Recognize idioms like memset.
PM->add(createLoopDeletionPass()); // Delete dead loops
if (UnrollLoops)
PM->add(createLoopUnrollPass()); // Unroll small loops
@@ -172,10 +156,19 @@ namespace llvm {
PM->add(createVerifierPass());
}
+ /// createStandardLTOPasses - Add the standard list of module passes suitable
+ /// for link time optimization.
+ ///
+ /// Internalize - Run the internalize pass.
+ /// RunInliner - Use a function inlining pass.
+ /// VerifyEach - Run the verifier after each pass.
static inline void createStandardLTOPasses(PassManagerBase *PM,
bool Internalize,
bool RunInliner,
bool VerifyEach) {
+ // Provide AliasAnalysis services for optimizations.
+ createStandardAliasAnalysisPasses(PM);
+
// Now that composite has been compiled, scan through the module, looking
// for a main function. If main is defined, mark all other functions
// internal.
diff --git a/contrib/llvm/include/llvm/Support/SwapByteOrder.h b/contrib/llvm/include/llvm/Support/SwapByteOrder.h
new file mode 100644
index 0000000..6c0592c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/SwapByteOrder.h
@@ -0,0 +1,101 @@
+//===- SwapByteOrder.h - Generic and optimized byte swaps -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares generic and optimized functions to swap the byte order of
+// an integral type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SYSTEM_SWAP_BYTE_ORDER_H
+#define LLVM_SYSTEM_SWAP_BYTE_ORDER_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cstddef>
+#include <limits>
+
+namespace llvm {
+namespace sys {
+
+/// SwapByteOrder_16 - This function returns a byte-swapped representation of
+/// the 16-bit argument.
+inline uint16_t SwapByteOrder_16(uint16_t value) {
+#if defined(_MSC_VER) && !defined(_DEBUG)
+ // The DLL version of the runtime lacks these functions (bug!?), but in a
+ // release build they're replaced with BSWAP instructions anyway.
+ return _byteswap_ushort(value);
+#else
+ uint16_t Hi = value << 8;
+ uint16_t Lo = value >> 8;
+ return Hi | Lo;
+#endif
+}
+
+/// SwapByteOrder_32 - This function returns a byte-swapped representation of
+/// the 32-bit argument.
+inline uint32_t SwapByteOrder_32(uint32_t value) {
+#if defined(__llvm__) || \
+(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
+ return __builtin_bswap32(value);
+#elif defined(_MSC_VER) && !defined(_DEBUG)
+ return _byteswap_ulong(value);
+#else
+ uint32_t Byte0 = value & 0x000000FF;
+ uint32_t Byte1 = value & 0x0000FF00;
+ uint32_t Byte2 = value & 0x00FF0000;
+ uint32_t Byte3 = value & 0xFF000000;
+ return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24);
+#endif
+}
+
+/// SwapByteOrder_64 - This function returns a byte-swapped representation of
+/// the 64-bit argument.
+inline uint64_t SwapByteOrder_64(uint64_t value) {
+#if defined(__llvm__) || \
+(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
+ return __builtin_bswap64(value);
+#elif defined(_MSC_VER) && !defined(_DEBUG)
+ return _byteswap_uint64(value);
+#else
+ uint64_t Hi = SwapByteOrder_32(uint32_t(value));
+ uint32_t Lo = SwapByteOrder_32(uint32_t(value >> 32));
+ return (Hi << 32) | Lo;
+#endif
+}
+
+inline unsigned char SwapByteOrder(unsigned char C) { return C; }
+inline signed char SwapByteOrder(signed char C) { return C; }
+inline char SwapByteOrder(char C) { return C; }
+
+inline unsigned short SwapByteOrder(unsigned short C) { return SwapByteOrder_16(C); }
+inline signed short SwapByteOrder( signed short C) { return SwapByteOrder_16(C); }
+
+inline unsigned int SwapByteOrder(unsigned int C) { return SwapByteOrder_32(C); }
+inline signed int SwapByteOrder( signed int C) { return SwapByteOrder_32(C); }
+
+#if __LONG_MAX__ == __INT_MAX__
+inline unsigned long SwapByteOrder(unsigned long C) { return SwapByteOrder_32(C); }
+inline signed long SwapByteOrder( signed long C) { return SwapByteOrder_32(C); }
+#elif __LONG_MAX__ == __LONG_LONG_MAX__
+inline unsigned long SwapByteOrder(unsigned long C) { return SwapByteOrder_64(C); }
+inline signed long SwapByteOrder( signed long C) { return SwapByteOrder_64(C); }
+#else
+#error "Unknown long size!"
+#endif
+
+inline unsigned long long SwapByteOrder(unsigned long long C) {
+ return SwapByteOrder_64(C);
+}
+inline signed long long SwapByteOrder(signed long long C) {
+ return SwapByteOrder_64(C);
+}
+
+} // end namespace sys
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/SystemUtils.h b/contrib/llvm/include/llvm/Support/SystemUtils.h
index 3c182c1..399aee5 100644
--- a/contrib/llvm/include/llvm/Support/SystemUtils.h
+++ b/contrib/llvm/include/llvm/Support/SystemUtils.h
@@ -30,13 +30,14 @@ bool CheckBitcodeOutputToConsole(
bool print_warning = true ///< Control whether warnings are printed
);
-/// FindExecutable - Find a named executable, giving the argv[0] of program
-/// being executed. This allows us to find another LLVM tool if it is built in
-/// the same directory. If the executable cannot be found, return an
-/// empty string.
+/// PrependMainExecutablePath - Prepend the path to the program being executed
+/// to \p ExeName, given the value of argv[0] and the address of main()
+/// itself. This allows us to find another LLVM tool if it is built in the same
+/// directory. An empty string is returned on error; note that this function
+/// just mainpulates the path and doesn't check for executability.
/// @brief Find a named executable.
-sys::Path FindExecutable(const std::string &ExeName,
- const char *Argv0, void *MainAddr);
+sys::Path PrependMainExecutablePath(const std::string &ExeName,
+ const char *Argv0, void *MainAddr);
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Support/TargetFolder.h b/contrib/llvm/include/llvm/Support/TargetFolder.h
index d34f35f..20ca557 100644
--- a/contrib/llvm/include/llvm/Support/TargetFolder.h
+++ b/contrib/llvm/include/llvm/Support/TargetFolder.h
@@ -46,50 +46,32 @@ public:
// Binary Operators
//===--------------------------------------------------------------------===//
- Constant *CreateAdd(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getAdd(LHS, RHS));
- }
- Constant *CreateNSWAdd(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getNSWAdd(LHS, RHS));
- }
- Constant *CreateNUWAdd(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getNUWAdd(LHS, RHS));
+ Constant *CreateAdd(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW));
}
Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFAdd(LHS, RHS));
}
- Constant *CreateSub(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getSub(LHS, RHS));
- }
- Constant *CreateNSWSub(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getNSWSub(LHS, RHS));
- }
- Constant *CreateNUWSub(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getNUWSub(LHS, RHS));
+ Constant *CreateSub(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW));
}
Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFSub(LHS, RHS));
}
- Constant *CreateMul(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getMul(LHS, RHS));
- }
- Constant *CreateNSWMul(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getNSWMul(LHS, RHS));
- }
- Constant *CreateNUWMul(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getNUWMul(LHS, RHS));
+ Constant *CreateMul(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW));
}
Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFMul(LHS, RHS));
}
- Constant *CreateUDiv(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getUDiv(LHS, RHS));
- }
- Constant *CreateSDiv(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getSDiv(LHS, RHS));
+ Constant *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{
+ return Fold(ConstantExpr::getUDiv(LHS, RHS, isExact));
}
- Constant *CreateExactSDiv(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getExactSDiv(LHS, RHS));
+ Constant *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{
+ return Fold(ConstantExpr::getSDiv(LHS, RHS, isExact));
}
Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFDiv(LHS, RHS));
@@ -103,14 +85,15 @@ public:
Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFRem(LHS, RHS));
}
- Constant *CreateShl(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getShl(LHS, RHS));
+ Constant *CreateShl(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW));
}
- Constant *CreateLShr(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getLShr(LHS, RHS));
+ Constant *CreateLShr(Constant *LHS, Constant *RHS, bool isExact = false)const{
+ return Fold(ConstantExpr::getLShr(LHS, RHS, isExact));
}
- Constant *CreateAShr(Constant *LHS, Constant *RHS) const {
- return Fold(ConstantExpr::getAShr(LHS, RHS));
+ Constant *CreateAShr(Constant *LHS, Constant *RHS, bool isExact = false)const{
+ return Fold(ConstantExpr::getAShr(LHS, RHS, isExact));
}
Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getAnd(LHS, RHS));
@@ -131,14 +114,9 @@ public:
// Unary Operators
//===--------------------------------------------------------------------===//
- Constant *CreateNeg(Constant *C) const {
- return Fold(ConstantExpr::getNeg(C));
- }
- Constant *CreateNSWNeg(Constant *C) const {
- return Fold(ConstantExpr::getNSWNeg(C));
- }
- Constant *CreateNUWNeg(Constant *C) const {
- return Fold(ConstantExpr::getNUWNeg(C));
+ Constant *CreateNeg(Constant *C,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getNeg(C, HasNUW, HasNSW));
}
Constant *CreateFNeg(Constant *C) const {
return Fold(ConstantExpr::getFNeg(C));
diff --git a/contrib/llvm/include/llvm/System/ThreadLocal.h b/contrib/llvm/include/llvm/Support/ThreadLocal.h
index e6edd79..15350a7 100644
--- a/contrib/llvm/include/llvm/System/ThreadLocal.h
+++ b/contrib/llvm/include/llvm/Support/ThreadLocal.h
@@ -1,4 +1,4 @@
-//===- llvm/System/ThreadLocal.h - Thread Local Data ------------*- C++ -*-===//
+//===- llvm/Support/ThreadLocal.h - Thread Local Data ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
#ifndef LLVM_SYSTEM_THREAD_LOCAL_H
#define LLVM_SYSTEM_THREAD_LOCAL_H
-#include "llvm/System/Threading.h"
+#include "llvm/Support/Threading.h"
#include <cassert>
namespace llvm {
@@ -30,21 +30,21 @@ namespace llvm {
const void* getInstance();
void removeInstance();
};
-
+
/// ThreadLocal - A class used to abstract thread-local storage. It holds,
/// for each thread, a pointer a single object of type T.
template<class T>
class ThreadLocal : public ThreadLocalImpl {
public:
ThreadLocal() : ThreadLocalImpl() { }
-
+
/// get - Fetches a pointer to the object associated with the current
/// thread. If no object has yet been associated, it returns NULL;
T* get() { return static_cast<T*>(getInstance()); }
-
+
// set - Associates a pointer to an object with the current thread.
void set(T* d) { setInstance(d); }
-
+
// erase - Removes the pointer associated with the current thread.
void erase() { removeInstance(); }
};
diff --git a/contrib/llvm/include/llvm/System/Threading.h b/contrib/llvm/include/llvm/Support/Threading.h
index 42d2f89..c0e842c 100644
--- a/contrib/llvm/include/llvm/System/Threading.h
+++ b/contrib/llvm/include/llvm/Support/Threading.h
@@ -1,4 +1,4 @@
-//===-- llvm/System/Threading.h - Control multithreading mode --*- C++ -*-===//
+//===-- llvm/Support/Threading.h - Control multithreading mode --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,28 +18,42 @@ namespace llvm {
/// llvm_start_multithreaded - Allocate and initialize structures needed to
/// make LLVM safe for multithreading. The return value indicates whether
/// multithreaded initialization succeeded. LLVM will still be operational
- /// on "failed" return, and will still be safe for hosting threading
+ /// on "failed" return, and will still be safe for hosting threading
/// applications in the JIT, but will not be safe for concurrent calls to the
/// LLVM APIs.
/// THIS MUST EXECUTE IN ISOLATION FROM ALL OTHER LLVM API CALLS.
bool llvm_start_multithreaded();
-
+
/// llvm_stop_multithreaded - Deallocate structures necessary to make LLVM
/// safe for multithreading.
/// THIS MUST EXECUTE IN ISOLATION FROM ALL OTHER LLVM API CALLS.
void llvm_stop_multithreaded();
-
+
/// llvm_is_multithreaded - Check whether LLVM is executing in thread-safe
/// mode or not.
bool llvm_is_multithreaded();
-
+
/// acquire_global_lock - Acquire the global lock. This is a no-op if called
/// before llvm_start_multithreaded().
void llvm_acquire_global_lock();
-
+
/// release_global_lock - Release the global lock. This is a no-op if called
/// before llvm_start_multithreaded().
void llvm_release_global_lock();
+
+ /// llvm_execute_on_thread - Execute the given \arg UserFn on a separate
+ /// thread, passing it the provided \arg UserData.
+ ///
+ /// This function does not guarantee that the code will actually be executed
+ /// on a separate thread or honoring the requested stack size, but tries to do
+ /// so where system support is available.
+ ///
+ /// \param UserFn - The callback to execute.
+ /// \param UserData - An argument to pass to the callback function.
+ /// \param RequestedStackSize - If non-zero, a requested size (in bytes) for
+ /// the thread stack.
+ void llvm_execute_on_thread(void (*UserFn)(void*), void *UserData,
+ unsigned RequestedStackSize = 0);
}
#endif
diff --git a/contrib/llvm/include/llvm/System/TimeValue.h b/contrib/llvm/include/llvm/Support/TimeValue.h
index b82647f..e122711 100644
--- a/contrib/llvm/include/llvm/System/TimeValue.h
+++ b/contrib/llvm/include/llvm/Support/TimeValue.h
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <string>
#ifndef LLVM_SYSTEM_TIMEVALUE_H
diff --git a/contrib/llvm/include/llvm/Support/Timer.h b/contrib/llvm/include/llvm/Support/Timer.h
index f959136..404cb6d 100644
--- a/contrib/llvm/include/llvm/Support/Timer.h
+++ b/contrib/llvm/include/llvm/Support/Timer.h
@@ -15,7 +15,7 @@
#ifndef LLVM_SUPPORT_TIMER_H
#define LLVM_SUPPORT_TIMER_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <string>
diff --git a/contrib/llvm/include/llvm/Support/ToolOutputFile.h b/contrib/llvm/include/llvm/Support/ToolOutputFile.h
new file mode 100644
index 0000000..65b182a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ToolOutputFile.h
@@ -0,0 +1,62 @@
+//===- ToolOutputFile.h - Output files for compiler-like tools -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the tool_output_file class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TOOL_OUTPUT_FILE_H
+#define LLVM_SUPPORT_TOOL_OUTPUT_FILE_H
+
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// tool_output_file - This class contains a raw_fd_ostream and adds a
+/// few extra features commonly needed for compiler-like tool output files:
+/// - The file is automatically deleted if the process is killed.
+/// - The file is automatically deleted when the tool_output_file
+/// object is destroyed unless the client calls keep().
+class tool_output_file {
+ /// Installer - This class is declared before the raw_fd_ostream so that
+ /// it is constructed before the raw_fd_ostream is constructed and
+ /// destructed after the raw_fd_ostream is destructed. It installs
+ /// cleanups in its constructor and uninstalls them in its destructor.
+ class CleanupInstaller {
+ /// Filename - The name of the file.
+ std::string Filename;
+ public:
+ /// Keep - The flag which indicates whether we should not delete the file.
+ bool Keep;
+
+ explicit CleanupInstaller(const char *filename);
+ ~CleanupInstaller();
+ } Installer;
+
+ /// OS - The contained stream. This is intentionally declared after
+ /// Installer.
+ raw_fd_ostream OS;
+
+public:
+ /// tool_output_file - This constructor's arguments are passed to
+ /// to raw_fd_ostream's constructor.
+ tool_output_file(const char *filename, std::string &ErrorInfo,
+ unsigned Flags = 0);
+
+ /// os - Return the contained raw_fd_ostream.
+ raw_fd_ostream &os() { return OS; }
+
+ /// keep - Indicate that the tool's job wrt this output file has been
+ /// successful and the file should not be deleted.
+ void keep() { Installer.Keep = true; }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/TypeBuilder.h b/contrib/llvm/include/llvm/Support/TypeBuilder.h
index 81c2747..ea63da0 100644
--- a/contrib/llvm/include/llvm/Support/TypeBuilder.h
+++ b/contrib/llvm/include/llvm/Support/TypeBuilder.h
@@ -88,6 +88,8 @@ class ieee_double {};
class x86_fp80 {};
class fp128 {};
class ppc_fp128 {};
+// X86 MMX.
+class x86_mmx {};
} // namespace types
// LLVM doesn't have const or volatile types.
@@ -219,6 +221,10 @@ template<bool cross> class TypeBuilder<types::ppc_fp128, cross> {
public:
static const Type *get(LLVMContext& C) { return Type::getPPC_FP128Ty(C); }
};
+template<bool cross> class TypeBuilder<types::x86_mmx, cross> {
+public:
+ static const Type *get(LLVMContext& C) { return Type::getX86_MMXTy(C); }
+};
template<bool cross> class TypeBuilder<void, cross> {
public:
diff --git a/contrib/llvm/include/llvm/System/Valgrind.h b/contrib/llvm/include/llvm/Support/Valgrind.h
index 5ec79c3..7662eaa 100644
--- a/contrib/llvm/include/llvm/System/Valgrind.h
+++ b/contrib/llvm/include/llvm/Support/Valgrind.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Valgrind.h - Communication with Valgrind -----*- C++ -*-===//
+//===- llvm/Support/Valgrind.h - Communication with Valgrind -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/include/llvm/Support/raw_ostream.h b/contrib/llvm/include/llvm/Support/raw_ostream.h
index 39bdbd8..6bfae5e 100644
--- a/contrib/llvm/include/llvm/Support/raw_ostream.h
+++ b/contrib/llvm/include/llvm/Support/raw_ostream.h
@@ -15,7 +15,7 @@
#define LLVM_SUPPORT_RAW_OSTREAM_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class format_object_base;
@@ -165,7 +165,7 @@ public:
}
raw_ostream &operator<<(const char *Str) {
- // Inline fast path, particulary for constant strings where a sufficiently
+ // Inline fast path, particularly for constant strings where a sufficiently
// smart compiler will simplify strlen.
return this->operator<<(StringRef(Str));
@@ -196,7 +196,7 @@ public:
/// write_escaped - Output \arg Str, turning '\\', '\t', '\n', '"', and
/// anything that doesn't satisfy std::isprint into an escape sequence.
- raw_ostream &write_escaped(StringRef Str);
+ raw_ostream &write_escaped(StringRef Str, bool UseHexEscapes = false);
raw_ostream &write(unsigned char C);
raw_ostream &write(const char *Ptr, size_t Size);
@@ -301,6 +301,10 @@ class raw_fd_ostream : public raw_ostream {
///
bool Error;
+ /// Controls whether the stream should attempt to use atomic writes, when
+ /// possible.
+ bool UseAtomicWrites;
+
uint64_t pos;
/// write_impl - See raw_ostream::write_impl.
@@ -349,10 +353,7 @@ public:
/// raw_fd_ostream ctor - FD is the file descriptor that this writes to. If
/// ShouldClose is true, this closes the file when the stream is destroyed.
- raw_fd_ostream(int fd, bool shouldClose,
- bool unbuffered=false) : raw_ostream(unbuffered), FD(fd),
- ShouldClose(shouldClose),
- Error(false) {}
+ raw_fd_ostream(int fd, bool shouldClose, bool unbuffered=false);
~raw_fd_ostream();
@@ -361,9 +362,19 @@ public:
void close();
/// seek - Flushes the stream and repositions the underlying file descriptor
- /// positition to the offset specified from the beginning of the file.
+ /// position to the offset specified from the beginning of the file.
uint64_t seek(uint64_t off);
+ /// SetUseAtomicWrite - Set the stream to attempt to use atomic writes for
+ /// individual output routines where possible.
+ ///
+ /// Note that because raw_ostream's are typically buffered, this flag is only
+ /// sensible when used on unbuffered streams which will flush their output
+ /// immediately.
+ void SetUseAtomicWrites(bool Value) {
+ UseAtomicWrites = Value;
+ }
+
virtual raw_ostream &changeColor(enum Colors colors, bool bold=false,
bool bg=false);
virtual raw_ostream &resetColor();
@@ -475,45 +486,6 @@ public:
~raw_null_ostream();
};
-/// tool_output_file - This class contains a raw_fd_ostream and adds a
-/// few extra features commonly needed for compiler-like tool output files:
-/// - The file is automatically deleted if the process is killed.
-/// - The file is automatically deleted when the tool_output_file
-/// object is destroyed unless the client calls keep().
-class tool_output_file {
- /// Installer - This class is declared before the raw_fd_ostream so that
- /// it is constructed before the raw_fd_ostream is constructed and
- /// destructed after the raw_fd_ostream is destructed. It installs
- /// cleanups in its constructor and uninstalls them in its destructor.
- class CleanupInstaller {
- /// Filename - The name of the file.
- std::string Filename;
- public:
- /// Keep - The flag which indicates whether we should not delete the file.
- bool Keep;
-
- explicit CleanupInstaller(const char *filename);
- ~CleanupInstaller();
- } Installer;
-
- /// OS - The contained stream. This is intentionally declared after
- /// Installer.
- raw_fd_ostream OS;
-
-public:
- /// tool_output_file - This constructor's arguments are passed to
- /// to raw_fd_ostream's constructor.
- tool_output_file(const char *filename, std::string &ErrorInfo,
- unsigned Flags = 0);
-
- /// os - Return the contained raw_fd_ostream.
- raw_fd_ostream &os() { return OS; }
-
- /// keep - Indicate that the tool's job wrt this output file has been
- /// successful and the file should not be deleted.
- void keep() { Installer.Keep = true; }
-};
-
} // end llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Support/system_error.h b/contrib/llvm/include/llvm/Support/system_error.h
new file mode 100644
index 0000000..e5306ec
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/system_error.h
@@ -0,0 +1,910 @@
+//===---------------------------- system_error ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This was lifted from libc++ and modified for C++03. This is called
+// system_error even though it does not define that class because that's what
+// it's called in C++0x. We don't define system_error because it is only used
+// for exception handling, which we don't use in LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SYSTEM_SYSTEM_ERROR_H
+#define LLVM_SYSTEM_SYSTEM_ERROR_H
+
+/*
+ system_error synopsis
+
+namespace std
+{
+
+class error_category
+{
+public:
+ virtual ~error_category();
+
+ error_category(const error_category&) = delete;
+ error_category& operator=(const error_category&) = delete;
+
+ virtual const char* name() const = 0;
+ virtual error_condition default_error_condition(int ev) const;
+ virtual bool equivalent(int code, const error_condition& condition) const;
+ virtual bool equivalent(const error_code& code, int condition) const;
+ virtual std::string message(int ev) const = 0;
+
+ bool operator==(const error_category& rhs) const;
+ bool operator!=(const error_category& rhs) const;
+ bool operator<(const error_category& rhs) const;
+};
+
+const error_category& generic_category();
+const error_category& system_category();
+
+template <class T> struct is_error_code_enum
+ : public false_type {};
+
+template <class T> struct is_error_condition_enum
+ : public false_type {};
+
+class error_code
+{
+public:
+ // constructors:
+ error_code();
+ error_code(int val, const error_category& cat);
+ template <class ErrorCodeEnum>
+ error_code(ErrorCodeEnum e);
+
+ // modifiers:
+ void assign(int val, const error_category& cat);
+ template <class ErrorCodeEnum>
+ error_code& operator=(ErrorCodeEnum e);
+ void clear();
+
+ // observers:
+ int value() const;
+ const error_category& category() const;
+ error_condition default_error_condition() const;
+ std::string message() const;
+ explicit operator bool() const;
+};
+
+// non-member functions:
+bool operator<(const error_code& lhs, const error_code& rhs);
+template <class charT, class traits>
+ basic_ostream<charT,traits>&
+ operator<<(basic_ostream<charT,traits>& os, const error_code& ec);
+
+class error_condition
+{
+public:
+ // constructors:
+ error_condition();
+ error_condition(int val, const error_category& cat);
+ template <class ErrorConditionEnum>
+ error_condition(ErrorConditionEnum e);
+
+ // modifiers:
+ void assign(int val, const error_category& cat);
+ template <class ErrorConditionEnum>
+ error_condition& operator=(ErrorConditionEnum e);
+ void clear();
+
+ // observers:
+ int value() const;
+ const error_category& category() const;
+ std::string message() const;
+ explicit operator bool() const;
+};
+
+bool operator<(const error_condition& lhs, const error_condition& rhs);
+
+class system_error
+ : public runtime_error
+{
+public:
+ system_error(error_code ec, const std::string& what_arg);
+ system_error(error_code ec, const char* what_arg);
+ system_error(error_code ec);
+ system_error(int ev, const error_category& ecat, const std::string& what_arg);
+ system_error(int ev, const error_category& ecat, const char* what_arg);
+ system_error(int ev, const error_category& ecat);
+
+ const error_code& code() const throw();
+ const char* what() const throw();
+};
+
+enum class errc
+{
+ address_family_not_supported, // EAFNOSUPPORT
+ address_in_use, // EADDRINUSE
+ address_not_available, // EADDRNOTAVAIL
+ already_connected, // EISCONN
+ argument_list_too_long, // E2BIG
+ argument_out_of_domain, // EDOM
+ bad_address, // EFAULT
+ bad_file_descriptor, // EBADF
+ bad_message, // EBADMSG
+ broken_pipe, // EPIPE
+ connection_aborted, // ECONNABORTED
+ connection_already_in_progress, // EALREADY
+ connection_refused, // ECONNREFUSED
+ connection_reset, // ECONNRESET
+ cross_device_link, // EXDEV
+ destination_address_required, // EDESTADDRREQ
+ device_or_resource_busy, // EBUSY
+ directory_not_empty, // ENOTEMPTY
+ executable_format_error, // ENOEXEC
+ file_exists, // EEXIST
+ file_too_large, // EFBIG
+ filename_too_long, // ENAMETOOLONG
+ function_not_supported, // ENOSYS
+ host_unreachable, // EHOSTUNREACH
+ identifier_removed, // EIDRM
+ illegal_byte_sequence, // EILSEQ
+ inappropriate_io_control_operation, // ENOTTY
+ interrupted, // EINTR
+ invalid_argument, // EINVAL
+ invalid_seek, // ESPIPE
+ io_error, // EIO
+ is_a_directory, // EISDIR
+ message_size, // EMSGSIZE
+ network_down, // ENETDOWN
+ network_reset, // ENETRESET
+ network_unreachable, // ENETUNREACH
+ no_buffer_space, // ENOBUFS
+ no_child_process, // ECHILD
+ no_link, // ENOLINK
+ no_lock_available, // ENOLCK
+ no_message_available, // ENODATA
+ no_message, // ENOMSG
+ no_protocol_option, // ENOPROTOOPT
+ no_space_on_device, // ENOSPC
+ no_stream_resources, // ENOSR
+ no_such_device_or_address, // ENXIO
+ no_such_device, // ENODEV
+ no_such_file_or_directory, // ENOENT
+ no_such_process, // ESRCH
+ not_a_directory, // ENOTDIR
+ not_a_socket, // ENOTSOCK
+ not_a_stream, // ENOSTR
+ not_connected, // ENOTCONN
+ not_enough_memory, // ENOMEM
+ not_supported, // ENOTSUP
+ operation_canceled, // ECANCELED
+ operation_in_progress, // EINPROGRESS
+ operation_not_permitted, // EPERM
+ operation_not_supported, // EOPNOTSUPP
+ operation_would_block, // EWOULDBLOCK
+ owner_dead, // EOWNERDEAD
+ permission_denied, // EACCES
+ protocol_error, // EPROTO
+ protocol_not_supported, // EPROTONOSUPPORT
+ read_only_file_system, // EROFS
+ resource_deadlock_would_occur, // EDEADLK
+ resource_unavailable_try_again, // EAGAIN
+ result_out_of_range, // ERANGE
+ state_not_recoverable, // ENOTRECOVERABLE
+ stream_timeout, // ETIME
+ text_file_busy, // ETXTBSY
+ timed_out, // ETIMEDOUT
+ too_many_files_open_in_system, // ENFILE
+ too_many_files_open, // EMFILE
+ too_many_links, // EMLINK
+ too_many_symbolic_link_levels, // ELOOP
+ value_too_large, // EOVERFLOW
+ wrong_protocol_type // EPROTOTYPE
+};
+
+template <> struct is_error_condition_enum<errc> : true_type { }
+
+error_code make_error_code(errc e);
+error_condition make_error_condition(errc e);
+
+// Comparison operators:
+bool operator==(const error_code& lhs, const error_code& rhs);
+bool operator==(const error_code& lhs, const error_condition& rhs);
+bool operator==(const error_condition& lhs, const error_code& rhs);
+bool operator==(const error_condition& lhs, const error_condition& rhs);
+bool operator!=(const error_code& lhs, const error_code& rhs);
+bool operator!=(const error_code& lhs, const error_condition& rhs);
+bool operator!=(const error_condition& lhs, const error_code& rhs);
+bool operator!=(const error_condition& lhs, const error_condition& rhs);
+
+template <> struct hash<std::error_code>;
+
+} // std
+
+*/
+
+#include "llvm/Config/config.h"
+#include "llvm/Support/type_traits.h"
+#include <cerrno>
+#include <string>
+
+// This must be here instead of a .inc file because it is used in the definition
+// of the enum values below.
+#ifdef LLVM_ON_WIN32
+
+ // The following numbers were taken from VS2010.
+# ifndef EAFNOSUPPORT
+# define EAFNOSUPPORT 102
+# endif
+# ifndef EADDRINUSE
+# define EADDRINUSE 100
+# endif
+# ifndef EADDRNOTAVAIL
+# define EADDRNOTAVAIL 101
+# endif
+# ifndef EISCONN
+# define EISCONN 113
+# endif
+# ifndef E2BIG
+# define E2BIG 7
+# endif
+# ifndef EDOM
+# define EDOM 33
+# endif
+# ifndef EFAULT
+# define EFAULT 14
+# endif
+# ifndef EBADF
+# define EBADF 9
+# endif
+# ifndef EBADMSG
+# define EBADMSG 104
+# endif
+# ifndef EPIPE
+# define EPIPE 32
+# endif
+# ifndef ECONNABORTED
+# define ECONNABORTED 106
+# endif
+# ifndef EALREADY
+# define EALREADY 103
+# endif
+# ifndef ECONNREFUSED
+# define ECONNREFUSED 107
+# endif
+# ifndef ECONNRESET
+# define ECONNRESET 108
+# endif
+# ifndef EXDEV
+# define EXDEV 18
+# endif
+# ifndef EDESTADDRREQ
+# define EDESTADDRREQ 109
+# endif
+# ifndef EBUSY
+# define EBUSY 16
+# endif
+# ifndef ENOTEMPTY
+# define ENOTEMPTY 41
+# endif
+# ifndef ENOEXEC
+# define ENOEXEC 8
+# endif
+# ifndef EEXIST
+# define EEXIST 17
+# endif
+# ifndef EFBIG
+# define EFBIG 27
+# endif
+# ifndef ENAMETOOLONG
+# define ENAMETOOLONG 38
+# endif
+# ifndef ENOSYS
+# define ENOSYS 40
+# endif
+# ifndef EHOSTUNREACH
+# define EHOSTUNREACH 110
+# endif
+# ifndef EIDRM
+# define EIDRM 111
+# endif
+# ifndef EILSEQ
+# define EILSEQ 42
+# endif
+# ifndef ENOTTY
+# define ENOTTY 25
+# endif
+# ifndef EINTR
+# define EINTR 4
+# endif
+# ifndef EINVAL
+# define EINVAL 22
+# endif
+# ifndef ESPIPE
+# define ESPIPE 29
+# endif
+# ifndef EIO
+# define EIO 5
+# endif
+# ifndef EISDIR
+# define EISDIR 21
+# endif
+# ifndef EMSGSIZE
+# define EMSGSIZE 115
+# endif
+# ifndef ENETDOWN
+# define ENETDOWN 116
+# endif
+# ifndef ENETRESET
+# define ENETRESET 117
+# endif
+# ifndef ENETUNREACH
+# define ENETUNREACH 118
+# endif
+# ifndef ENOBUFS
+# define ENOBUFS 119
+# endif
+# ifndef ECHILD
+# define ECHILD 10
+# endif
+# ifndef ENOLINK
+# define ENOLINK 121
+# endif
+# ifndef ENOLCK
+# define ENOLCK 39
+# endif
+# ifndef ENODATA
+# define ENODATA 120
+# endif
+# ifndef ENOMSG
+# define ENOMSG 122
+# endif
+# ifndef ENOPROTOOPT
+# define ENOPROTOOPT 123
+# endif
+# ifndef ENOSPC
+# define ENOSPC 28
+# endif
+# ifndef ENOSR
+# define ENOSR 124
+# endif
+# ifndef ENXIO
+# define ENXIO 6
+# endif
+# ifndef ENODEV
+# define ENODEV 19
+# endif
+# ifndef ENOENT
+# define ENOENT 2
+# endif
+# ifndef ESRCH
+# define ESRCH 3
+# endif
+# ifndef ENOTDIR
+# define ENOTDIR 20
+# endif
+# ifndef ENOTSOCK
+# define ENOTSOCK 128
+# endif
+# ifndef ENOSTR
+# define ENOSTR 125
+# endif
+# ifndef ENOTCONN
+# define ENOTCONN 126
+# endif
+# ifndef ENOMEM
+# define ENOMEM 12
+# endif
+# ifndef ENOTSUP
+# define ENOTSUP 129
+# endif
+# ifndef ECANCELED
+# define ECANCELED 105
+# endif
+# ifndef EINPROGRESS
+# define EINPROGRESS 112
+# endif
+# ifndef EPERM
+# define EPERM 1
+# endif
+# ifndef EOPNOTSUPP
+# define EOPNOTSUPP 130
+# endif
+# ifndef EWOULDBLOCK
+# define EWOULDBLOCK 140
+# endif
+# ifndef EOWNERDEAD
+# define EOWNERDEAD 133
+# endif
+# ifndef EACCES
+# define EACCES 13
+# endif
+# ifndef EPROTO
+# define EPROTO 134
+# endif
+# ifndef EPROTONOSUPPORT
+# define EPROTONOSUPPORT 135
+# endif
+# ifndef EROFS
+# define EROFS 30
+# endif
+# ifndef EDEADLK
+# define EDEADLK 36
+# endif
+# ifndef EAGAIN
+# define EAGAIN 11
+# endif
+# ifndef ERANGE
+# define ERANGE 34
+# endif
+# ifndef ENOTRECOVERABLE
+# define ENOTRECOVERABLE 127
+# endif
+# ifndef ETIME
+# define ETIME 137
+# endif
+# ifndef ETXTBSY
+# define ETXTBSY 139
+# endif
+# ifndef ETIMEDOUT
+# define ETIMEDOUT 138
+# endif
+# ifndef ENFILE
+# define ENFILE 23
+# endif
+# ifndef EMFILE
+# define EMFILE 24
+# endif
+# ifndef EMLINK
+# define EMLINK 31
+# endif
+# ifndef ELOOP
+# define ELOOP 114
+# endif
+# ifndef EOVERFLOW
+# define EOVERFLOW 132
+# endif
+# ifndef EPROTOTYPE
+# define EPROTOTYPE 136
+# endif
+#endif
+
+namespace llvm {
+
+template <class T, T v>
+struct integral_constant {
+ typedef T value_type;
+ static const value_type value = v;
+ typedef integral_constant<T,v> type;
+ operator value_type() { return value; }
+};
+
+typedef integral_constant<bool, true> true_type;
+typedef integral_constant<bool, false> false_type;
+
+// is_error_code_enum
+
+template <class Tp> struct is_error_code_enum : public false_type {};
+
+// is_error_condition_enum
+
+template <class Tp> struct is_error_condition_enum : public false_type {};
+
+// Some error codes are not present on all platforms, so we provide equivalents
+// for them:
+
+//enum class errc
+struct errc {
+enum _ {
+ success = 0,
+ address_family_not_supported = EAFNOSUPPORT,
+ address_in_use = EADDRINUSE,
+ address_not_available = EADDRNOTAVAIL,
+ already_connected = EISCONN,
+ argument_list_too_long = E2BIG,
+ argument_out_of_domain = EDOM,
+ bad_address = EFAULT,
+ bad_file_descriptor = EBADF,
+#ifdef EBADMSG
+ bad_message = EBADMSG,
+#else
+ bad_message = EINVAL,
+#endif
+ broken_pipe = EPIPE,
+ connection_aborted = ECONNABORTED,
+ connection_already_in_progress = EALREADY,
+ connection_refused = ECONNREFUSED,
+ connection_reset = ECONNRESET,
+ cross_device_link = EXDEV,
+ destination_address_required = EDESTADDRREQ,
+ device_or_resource_busy = EBUSY,
+ directory_not_empty = ENOTEMPTY,
+ executable_format_error = ENOEXEC,
+ file_exists = EEXIST,
+ file_too_large = EFBIG,
+ filename_too_long = ENAMETOOLONG,
+ function_not_supported = ENOSYS,
+ host_unreachable = EHOSTUNREACH,
+ identifier_removed = EIDRM,
+ illegal_byte_sequence = EILSEQ,
+ inappropriate_io_control_operation = ENOTTY,
+ interrupted = EINTR,
+ invalid_argument = EINVAL,
+ invalid_seek = ESPIPE,
+ io_error = EIO,
+ is_a_directory = EISDIR,
+ message_size = EMSGSIZE,
+ network_down = ENETDOWN,
+ network_reset = ENETRESET,
+ network_unreachable = ENETUNREACH,
+ no_buffer_space = ENOBUFS,
+ no_child_process = ECHILD,
+#ifdef ENOLINK
+ no_link = ENOLINK,
+#else
+ no_link = EINVAL,
+#endif
+ no_lock_available = ENOLCK,
+#ifdef ENODATA
+ no_message_available = ENODATA,
+#else
+ no_message_available = ENOMSG,
+#endif
+ no_message = ENOMSG,
+ no_protocol_option = ENOPROTOOPT,
+ no_space_on_device = ENOSPC,
+#ifdef ENOSR
+ no_stream_resources = ENOSR,
+#else
+ no_stream_resources = ENOMEM,
+#endif
+ no_such_device_or_address = ENXIO,
+ no_such_device = ENODEV,
+ no_such_file_or_directory = ENOENT,
+ no_such_process = ESRCH,
+ not_a_directory = ENOTDIR,
+ not_a_socket = ENOTSOCK,
+#ifdef ENOSTR
+ not_a_stream = ENOSTR,
+#else
+ not_a_stream = EINVAL,
+#endif
+ not_connected = ENOTCONN,
+ not_enough_memory = ENOMEM,
+ not_supported = ENOTSUP,
+#ifdef ECANCELED
+ operation_canceled = ECANCELED,
+#else
+ operation_canceled = EINVAL,
+#endif
+ operation_in_progress = EINPROGRESS,
+ operation_not_permitted = EPERM,
+ operation_not_supported = EOPNOTSUPP,
+ operation_would_block = EWOULDBLOCK,
+#ifdef EOWNERDEAD
+ owner_dead = EOWNERDEAD,
+#else
+ owner_dead = EINVAL,
+#endif
+ permission_denied = EACCES,
+#ifdef EPROTO
+ protocol_error = EPROTO,
+#else
+ protocol_error = EINVAL,
+#endif
+ protocol_not_supported = EPROTONOSUPPORT,
+ read_only_file_system = EROFS,
+ resource_deadlock_would_occur = EDEADLK,
+ resource_unavailable_try_again = EAGAIN,
+ result_out_of_range = ERANGE,
+#ifdef ENOTRECOVERABLE
+ state_not_recoverable = ENOTRECOVERABLE,
+#else
+ state_not_recoverable = EINVAL,
+#endif
+#ifdef ETIME
+ stream_timeout = ETIME,
+#else
+ stream_timeout = ETIMEDOUT,
+#endif
+ text_file_busy = ETXTBSY,
+ timed_out = ETIMEDOUT,
+ too_many_files_open_in_system = ENFILE,
+ too_many_files_open = EMFILE,
+ too_many_links = EMLINK,
+ too_many_symbolic_link_levels = ELOOP,
+ value_too_large = EOVERFLOW,
+ wrong_protocol_type = EPROTOTYPE
+};
+
+ _ v_;
+
+ errc(_ v) : v_(v) {}
+ operator int() const {return v_;}
+};
+
+template <> struct is_error_condition_enum<errc> : true_type { };
+
+template <> struct is_error_condition_enum<errc::_> : true_type { };
+
+class error_condition;
+class error_code;
+
+// class error_category
+
+class _do_message;
+
+class error_category
+{
+public:
+ virtual ~error_category();
+
+private:
+ error_category();
+ error_category(const error_category&);// = delete;
+ error_category& operator=(const error_category&);// = delete;
+
+public:
+ virtual const char* name() const = 0;
+ virtual error_condition default_error_condition(int _ev) const;
+ virtual bool equivalent(int _code, const error_condition& _condition) const;
+ virtual bool equivalent(const error_code& _code, int _condition) const;
+ virtual std::string message(int _ev) const = 0;
+
+ bool operator==(const error_category& _rhs) const {return this == &_rhs;}
+
+ bool operator!=(const error_category& _rhs) const {return !(*this == _rhs);}
+
+ bool operator< (const error_category& _rhs) const {return this < &_rhs;}
+
+ friend class _do_message;
+};
+
+class _do_message : public error_category
+{
+public:
+ virtual std::string message(int ev) const;
+};
+
+const error_category& generic_category();
+const error_category& system_category();
+
+/// Get the error_category used for errno values from POSIX functions. This is
+/// the same as the system_category on POISIX systems, but is the same as the
+/// generic_category on Windows.
+const error_category& posix_category();
+
+class error_condition
+{
+ int _val_;
+ const error_category* _cat_;
+public:
+ error_condition() : _val_(0), _cat_(&generic_category()) {}
+
+ error_condition(int _val, const error_category& _cat)
+ : _val_(_val), _cat_(&_cat) {}
+
+ template <class E>
+ error_condition(E _e, typename enable_if_c<
+ is_error_condition_enum<E>::value
+ >::type* = 0)
+ {*this = make_error_condition(_e);}
+
+ void assign(int _val, const error_category& _cat) {
+ _val_ = _val;
+ _cat_ = &_cat;
+ }
+
+ template <class E>
+ typename enable_if_c
+ <
+ is_error_condition_enum<E>::value,
+ error_condition&
+ >::type
+ operator=(E _e)
+ {*this = make_error_condition(_e); return *this;}
+
+ void clear() {
+ _val_ = 0;
+ _cat_ = &generic_category();
+ }
+
+ int value() const {return _val_;}
+
+ const error_category& category() const {return *_cat_;}
+ std::string message() const;
+
+ typedef void (*unspecified_bool_type)();
+ static void unspecified_bool_true() {}
+
+ operator unspecified_bool_type() const { // true if error
+ return _val_ == 0 ? 0 : unspecified_bool_true;
+ }
+};
+
+inline error_condition make_error_condition(errc _e) {
+ return error_condition(static_cast<int>(_e), generic_category());
+}
+
+inline bool operator<(const error_condition& _x, const error_condition& _y) {
+ return _x.category() < _y.category()
+ || (_x.category() == _y.category() && _x.value() < _y.value());
+}
+
+// error_code
+
+class error_code {
+ int _val_;
+ const error_category* _cat_;
+public:
+ error_code() : _val_(0), _cat_(&system_category()) {}
+
+ error_code(int _val, const error_category& _cat)
+ : _val_(_val), _cat_(&_cat) {}
+
+ template <class E>
+ error_code(E _e, typename enable_if_c<
+ is_error_code_enum<E>::value
+ >::type* = 0) {
+ *this = make_error_code(_e);
+ }
+
+ void assign(int _val, const error_category& _cat) {
+ _val_ = _val;
+ _cat_ = &_cat;
+ }
+
+ template <class E>
+ typename enable_if_c
+ <
+ is_error_code_enum<E>::value,
+ error_code&
+ >::type
+ operator=(E _e)
+ {*this = make_error_code(_e); return *this;}
+
+ void clear() {
+ _val_ = 0;
+ _cat_ = &system_category();
+ }
+
+ int value() const {return _val_;}
+
+ const error_category& category() const {return *_cat_;}
+
+ error_condition default_error_condition() const
+ {return _cat_->default_error_condition(_val_);}
+
+ std::string message() const;
+
+ typedef void (*unspecified_bool_type)();
+ static void unspecified_bool_true() {}
+
+ operator unspecified_bool_type() const { // true if error
+ return _val_ == 0 ? 0 : unspecified_bool_true;
+ }
+};
+
+inline error_code make_error_code(errc _e) {
+ return error_code(static_cast<int>(_e), generic_category());
+}
+
+inline bool operator<(const error_code& _x, const error_code& _y) {
+ return _x.category() < _y.category()
+ || (_x.category() == _y.category() && _x.value() < _y.value());
+}
+
+inline bool operator==(const error_code& _x, const error_code& _y) {
+ return _x.category() == _y.category() && _x.value() == _y.value();
+}
+
+inline bool operator==(const error_code& _x, const error_condition& _y) {
+ return _x.category().equivalent(_x.value(), _y)
+ || _y.category().equivalent(_x, _y.value());
+}
+
+inline bool operator==(const error_condition& _x, const error_code& _y) {
+ return _y == _x;
+}
+
+inline bool operator==(const error_condition& _x, const error_condition& _y) {
+ return _x.category() == _y.category() && _x.value() == _y.value();
+}
+
+inline bool operator!=(const error_code& _x, const error_code& _y) {
+ return !(_x == _y);
+}
+
+inline bool operator!=(const error_code& _x, const error_condition& _y) {
+ return !(_x == _y);
+}
+
+inline bool operator!=(const error_condition& _x, const error_code& _y) {
+ return !(_x == _y);
+}
+
+inline bool operator!=(const error_condition& _x, const error_condition& _y) {
+ return !(_x == _y);
+}
+
+// Windows errors.
+
+// To construct an error_code after an API error:
+//
+// error_code( ::GetLastError(), system_category() )
+struct windows_error {
+enum _ {
+ success = 0,
+ // These names and values are based on Windows WinError.h
+ // This is not a complete list. Add to this list if you need to explicitly
+ // check for it.
+ invalid_function = 1, // ERROR_INVALID_FUNCTION,
+ file_not_found = 2, // ERROR_FILE_NOT_FOUND,
+ path_not_found = 3, // ERROR_PATH_NOT_FOUND,
+ too_many_open_files = 4, // ERROR_TOO_MANY_OPEN_FILES,
+ access_denied = 5, // ERROR_ACCESS_DENIED,
+ invalid_handle = 6, // ERROR_INVALID_HANDLE,
+ arena_trashed = 7, // ERROR_ARENA_TRASHED,
+ not_enough_memory = 8, // ERROR_NOT_ENOUGH_MEMORY,
+ invalid_block = 9, // ERROR_INVALID_BLOCK,
+ bad_environment = 10, // ERROR_BAD_ENVIRONMENT,
+ bad_format = 11, // ERROR_BAD_FORMAT,
+ invalid_access = 12, // ERROR_INVALID_ACCESS,
+ outofmemory = 14, // ERROR_OUTOFMEMORY,
+ invalid_drive = 15, // ERROR_INVALID_DRIVE,
+ current_directory = 16, // ERROR_CURRENT_DIRECTORY,
+ not_same_device = 17, // ERROR_NOT_SAME_DEVICE,
+ no_more_files = 18, // ERROR_NO_MORE_FILES,
+ write_protect = 19, // ERROR_WRITE_PROTECT,
+ bad_unit = 20, // ERROR_BAD_UNIT,
+ not_ready = 21, // ERROR_NOT_READY,
+ bad_command = 22, // ERROR_BAD_COMMAND,
+ crc = 23, // ERROR_CRC,
+ bad_length = 24, // ERROR_BAD_LENGTH,
+ seek = 25, // ERROR_SEEK,
+ not_dos_disk = 26, // ERROR_NOT_DOS_DISK,
+ sector_not_found = 27, // ERROR_SECTOR_NOT_FOUND,
+ out_of_paper = 28, // ERROR_OUT_OF_PAPER,
+ write_fault = 29, // ERROR_WRITE_FAULT,
+ read_fault = 30, // ERROR_READ_FAULT,
+ gen_failure = 31, // ERROR_GEN_FAILURE,
+ sharing_violation = 32, // ERROR_SHARING_VIOLATION,
+ lock_violation = 33, // ERROR_LOCK_VIOLATION,
+ wrong_disk = 34, // ERROR_WRONG_DISK,
+ sharing_buffer_exceeded = 36, // ERROR_SHARING_BUFFER_EXCEEDED,
+ handle_eof = 38, // ERROR_HANDLE_EOF,
+ handle_disk_full = 39, // ERROR_HANDLE_DISK_FULL,
+ rem_not_list = 51, // ERROR_REM_NOT_LIST,
+ dup_name = 52, // ERROR_DUP_NAME,
+ bad_net_path = 53, // ERROR_BAD_NETPATH,
+ network_busy = 54, // ERROR_NETWORK_BUSY,
+ file_exists = 80, // ERROR_FILE_EXISTS,
+ cannot_make = 82, // ERROR_CANNOT_MAKE,
+ broken_pipe = 109, // ERROR_BROKEN_PIPE,
+ open_failed = 110, // ERROR_OPEN_FAILED,
+ buffer_overflow = 111, // ERROR_BUFFER_OVERFLOW,
+ disk_full = 112, // ERROR_DISK_FULL,
+ insufficient_buffer = 122, // ERROR_INSUFFICIENT_BUFFER,
+ lock_failed = 167, // ERROR_LOCK_FAILED,
+ busy = 170, // ERROR_BUSY,
+ cancel_violation = 173, // ERROR_CANCEL_VIOLATION,
+ already_exists = 183 // ERROR_ALREADY_EXISTS
+};
+ _ v_;
+
+ windows_error(_ v) : v_(v) {}
+ explicit windows_error(int v) : v_(_(v)) {}
+ operator int() const {return v_;}
+};
+
+
+template <> struct is_error_code_enum<windows_error> : true_type { };
+
+template <> struct is_error_code_enum<windows_error::_> : true_type { };
+
+inline error_code make_error_code(windows_error e) {
+ return error_code(static_cast<int>(e), system_category());
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/System/Alarm.h b/contrib/llvm/include/llvm/System/Alarm.h
deleted file mode 100644
index 7c28416..0000000
--- a/contrib/llvm/include/llvm/System/Alarm.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//===- llvm/System/Alarm.h - Alarm Generation support ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides an operating system independent interface to alarm(2)
-// type functionality. The Alarm class allows a one-shot alarm to be set up
-// at some number of seconds in the future. When the alarm triggers, a method
-// is called to process the event
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_SYSTEM_ALARM_H
-#define LLVM_SYSTEM_ALARM_H
-
-namespace llvm {
-namespace sys {
-
- /// This function registers an alarm to trigger some number of \p seconds in
- /// the future. When that time arrives, the AlarmStatus function will begin
- /// to return 1 instead of 0. The user must poll the status of the alarm by
- /// making occasional calls to AlarmStatus. If the user sends an interrupt
- /// signal, AlarmStatus will begin returning -1, even if the alarm event
- /// occurred.
- /// @returns nothing
- void SetupAlarm(
- unsigned seconds ///< Number of seconds in future when alarm arrives
- );
-
- /// This function terminates the alarm previously set up
- /// @returns nothing
- void TerminateAlarm();
-
- /// This function acquires the status of the alarm.
- /// @returns -1=cancelled, 0=untriggered, 1=triggered
- int AlarmStatus();
-
- /// Sleep for n seconds. Warning: mixing calls to Sleep() and other *Alarm
- /// calls may be a bad idea on some platforms (source: Linux man page).
- /// @returns nothing.
- void Sleep(unsigned n);
-
-
-} // End sys namespace
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/include/llvm/Target/Mangler.h b/contrib/llvm/include/llvm/Target/Mangler.h
index a9f3576..c1c118b 100644
--- a/contrib/llvm/include/llvm/Target/Mangler.h
+++ b/contrib/llvm/include/llvm/Target/Mangler.h
@@ -15,7 +15,6 @@
#define LLVM_SUPPORT_MANGLER_H
#include "llvm/ADT/DenseMap.h"
-#include <string>
namespace llvm {
class StringRef;
@@ -69,12 +68,6 @@ public:
/// empty.
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const Twine &GVName,
ManglerPrefixTy PrefixTy = Mangler::Default);
-
- /// getNameWithPrefix - Return the name of the appropriate prefix
- /// and the specified global variable's name. If the global variable doesn't
- /// have a name, this fills in a unique name for the global.
- std::string getNameWithPrefix(const GlobalValue *GV,
- bool isImplicitlyPrivate = false);
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Target/SubtargetFeature.h b/contrib/llvm/include/llvm/Target/SubtargetFeature.h
index 4546871..6c21ae9 100644
--- a/contrib/llvm/include/llvm/Target/SubtargetFeature.h
+++ b/contrib/llvm/include/llvm/Target/SubtargetFeature.h
@@ -22,7 +22,7 @@
#include <vector>
#include <cstring>
#include "llvm/ADT/Triple.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class raw_ostream;
diff --git a/contrib/llvm/include/llvm/Target/Target.td b/contrib/llvm/include/llvm/Target/Target.td
index b141a77..0f7e6aa 100644
--- a/contrib/llvm/include/llvm/Target/Target.td
+++ b/contrib/llvm/include/llvm/Target/Target.td
@@ -1,10 +1,10 @@
//===- Target.td - Target Independent TableGen interface ---*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces which should be
@@ -47,7 +47,7 @@ class Register<string n> {
// modification of this register can potentially read or modify the aliased
// registers.
list<Register> Aliases = [];
-
+
// SubRegs - A list of registers that are parts of this register. Note these
// are "immediate" sub-registers and the registers within the list do not
// themselves overlap. e.g. For X86, EAX's SubRegs list contains only [AX],
@@ -84,7 +84,7 @@ class Register<string n> {
// need to specify sub-registers.
// List "subregs" specifies which registers are sub-registers to this one. This
// is used to populate the SubRegs and AliasSet fields of TargetRegisterDesc.
-// This allows the code generator to be careful not to put two values with
+// This allows the code generator to be careful not to put two values with
// overlapping live ranges into registers which alias.
class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> {
let SubRegs = subregs;
@@ -101,7 +101,7 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
// RegType - Specify the list ValueType of the registers in this register
// class. Note that all registers in a register class must have the same
- // ValueTypes. This is a list because some targets permit storing different
+ // ValueTypes. This is a list because some targets permit storing different
// types in same register, for example vector values with 128-bit total size,
// but different count/size of items, like SSE on x86.
//
@@ -127,13 +127,13 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
// allocation used by the register allocator.
//
list<Register> MemberList = regList;
-
+
// SubRegClasses - Specify the register class of subregisters as a list of
// dags: (RegClass SubRegIndex, SubRegindex, ...)
list<dag> SubRegClasses = [];
// MethodProtos/MethodBodies - These members can be used to insert arbitrary
- // code into a generated register class. The normal usage of this is to
+ // code into a generated register class. The normal usage of this is to
// overload virtual methods.
code MethodProtos = [{}];
code MethodBodies = [{}];
@@ -150,8 +150,8 @@ class DwarfRegNum<list<int> Numbers> {
// These values can be determined by locating the <target>.h file in the
// directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The
// order of these names correspond to the enumeration used by gcc. A value of
- // -1 indicates that the gcc number is undefined and -2 that register number is
- // invalid for this mode/flavour.
+ // -1 indicates that the gcc number is undefined and -2 that register number
+ // is invalid for this mode/flavour.
list<int> DwarfNumbers = Numbers;
}
@@ -199,6 +199,7 @@ class Instruction {
bit isBranch = 0; // Is this instruction a branch instruction?
bit isIndirectBranch = 0; // Is this instruction an indirect branch?
bit isCompare = 0; // Is this instruction a comparison instruction?
+ bit isMoveImm = 0; // Is this instruction a move immediate instruction?
bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction?
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
@@ -243,14 +244,29 @@ class Instruction {
/// be encoded into the output machineinstr.
string DisableEncoding = "";
+ string PostEncoderMethod = "";
+ string DecoderMethod = "";
+
/// Target-specific flags. This becomes the TSFlags field in TargetInstrDesc.
bits<64> TSFlags = 0;
+
+ ///@name Assembler Parser Support
+ ///@{
+
+ string AsmMatchConverter = "";
+
+ ///@}
}
/// Predicates - These are extra conditionals which are turned into instruction
/// selector matching code. Currently each predicate is just a string.
class Predicate<string cond> {
string CondString = cond;
+
+ /// AssemblerMatcherPredicate - If this feature can be used by the assembler
+ /// matcher, this is true. Targets should set this by inheriting their
+ /// feature from the AssemblerPredicate class in addition to Predicate.
+ bit AssemblerMatcherPredicate = 0;
}
/// NoHonorSignDependentRounding - This predicate is true if support for
@@ -262,9 +278,9 @@ class Requires<list<Predicate> preds> {
list<Predicate> Predicates = preds;
}
-/// ops definition - This is just a simple marker used to identify the operands
-/// list for an instruction. outs and ins are identical both syntatically and
-/// semantically, they are used to define def operands and use operands to
+/// ops definition - This is just a simple marker used to identify the operand
+/// list for an instruction. outs and ins are identical both syntactically and
+/// semanticallyr; they are used to define def operands and use operands to
/// improve readibility. This should be used like this:
/// (outs R32:$dst), (ins R32:$src1, R32:$src2) or something similar.
def ops;
@@ -326,18 +342,26 @@ class AsmOperandClass {
/// signature should be:
/// void addFooOperands(MCInst &Inst, unsigned N) const;
string RenderMethod = ?;
+
+ /// The name of the method on the target specific operand to call to custom
+ /// handle the operand parsing. This is useful when the operands do not relate
+ /// to immediates or registers and are very instruction specific (as flags to
+ /// set in a processor register, coprocessor number, ...).
+ string ParserMethod = ?;
}
def ImmAsmOperand : AsmOperandClass {
let Name = "Imm";
}
-
+
/// Operand Types - These provide the built-in operand types that may be used
/// by a target. Targets can optionally provide their own operand types as
/// needed, though this should not be needed for RISC targets.
class Operand<ValueType ty> {
ValueType Type = ty;
string PrintMethod = "printOperand";
+ string EncoderMethod = "";
+ string DecoderMethod = "";
string AsmOperandLowerMethod = ?;
dag MIOperandInfo = (ops);
@@ -409,6 +433,7 @@ def INLINEASM : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "";
+ let neverHasSideEffects = 1; // Note side effect is encoded in an operand.
}
def PROLOG_LABEL : Instruction {
let OutOperandList = (outs);
@@ -475,7 +500,7 @@ def DBG_VALUE : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "DBG_VALUE";
- let isAsCheapAsAMove = 1;
+ let neverHasSideEffects = 1;
}
def REG_SEQUENCE : Instruction {
let OutOperandList = (outs unknown:$dst);
@@ -506,9 +531,9 @@ class AsmParser {
// name.
string AsmParserClassName = "AsmParser";
- // AsmParserInstCleanup - If non-empty, this is the name of a custom function on the
- // AsmParser class to call on every matched instruction. This can be used to
- // perform target specific instruction post-processing.
+ // AsmParserInstCleanup - If non-empty, this is the name of a custom member
+ // function of the AsmParser class to call on every matched instruction.
+ // This can be used to perform target specific instruction post-processing.
string AsmParserInstCleanup = "";
// Variant - AsmParsers can be of multiple different variants. Variants are
@@ -529,6 +554,49 @@ class AsmParser {
}
def DefaultAsmParser : AsmParser;
+/// AssemblerPredicate - This is a Predicate that can be used when the assembler
+/// matches instructions and aliases.
+class AssemblerPredicate {
+ bit AssemblerMatcherPredicate = 1;
+}
+
+
+
+/// MnemonicAlias - This class allows targets to define assembler mnemonic
+/// aliases. This should be used when all forms of one mnemonic are accepted
+/// with a different mnemonic. For example, X86 allows:
+/// sal %al, 1 -> shl %al, 1
+/// sal %ax, %cl -> shl %ax, %cl
+/// sal %eax, %cl -> shl %eax, %cl
+/// etc. Though "sal" is accepted with many forms, all of them are directly
+/// translated to a shl, so it can be handled with (in the case of X86, it
+/// actually has one for each suffix as well):
+/// def : MnemonicAlias<"sal", "shl">;
+///
+/// Mnemonic aliases are mapped before any other translation in the match phase,
+/// and do allow Requires predicates, e.g.:
+///
+/// def : MnemonicAlias<"pushf", "pushfq">, Requires<[In64BitMode]>;
+/// def : MnemonicAlias<"pushf", "pushfl">, Requires<[In32BitMode]>;
+///
+class MnemonicAlias<string From, string To> {
+ string FromMnemonic = From;
+ string ToMnemonic = To;
+
+ // Predicates - Predicates that must be true for this remapping to happen.
+ list<Predicate> Predicates = [];
+}
+
+/// InstAlias - This defines an alternate assembly syntax that is allowed to
+/// match an instruction that has a different (more canonical) assembly
+/// representation.
+class InstAlias<string Asm, dag Result> {
+ string AsmString = Asm; // The .s format to match the instruction with.
+ dag ResultInst = Result; // The MCInst to generate.
+
+ // Predicates - Predicates that must be true for this to match.
+ list<Predicate> Predicates = [];
+}
//===----------------------------------------------------------------------===//
// AsmWriter - This class can be implemented by targets that need to customize
@@ -543,10 +611,6 @@ class AsmWriter {
// name.
string AsmWriterClassName = "AsmPrinter";
- // InstFormatName - AsmWriters can specify the name of the format string to
- // print instructions with.
- string InstFormatName = "AsmString";
-
// Variant - AsmWriters can be of multiple different variants. Variants are
// used to support targets that need to emit assembly code in ways that are
// mostly the same for different targets, but have minor differences in
@@ -554,17 +618,22 @@ class AsmWriter {
// will specify which alternative to use. For example "{x|y|z}" with Variant
// == 1, will expand to "y".
int Variant = 0;
-
-
+
+
// FirstOperandColumn/OperandSpacing - If the assembler syntax uses a columnar
// layout, the asmwriter can actually generate output in this columns (in
// verbose-asm mode). These two values indicate the width of the first column
// (the "opcode" area) and the width to reserve for subsequent operands. When
// verbose asm mode is enabled, operands will be indented to respect this.
int FirstOperandColumn = -1;
-
+
// OperandSpacing - Space between operand columns.
int OperandSpacing = -1;
+
+ // isMCAsmWriter - Is this assembly writer for an MC emitter? This controls
+ // generation of the printInstruction() method. For MC printers, it takes
+ // an MCInstr* operand, otherwise it takes a MachineInstr*.
+ bit isMCAsmWriter = 0;
}
def DefaultAsmWriter : AsmWriter;
@@ -592,15 +661,15 @@ class SubtargetFeature<string n, string a, string v, string d,
// appropriate target chip.
//
string Name = n;
-
+
// Attribute - Attribute to be set by feature.
//
string Attribute = a;
-
+
// Value - Value the attribute to be set to by feature.
//
string Value = v;
-
+
// Desc - Feature description. Used by command line (-mattr=) to display help
// information.
//
@@ -622,12 +691,12 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
// appropriate target chip.
//
string Name = n;
-
+
// ProcItin - The scheduling information for the target processor.
//
ProcessorItineraries ProcItin = pi;
-
- // Features - list of
+
+ // Features - list of
list<SubtargetFeature> Features = f;
}
diff --git a/contrib/llvm/include/llvm/Target/TargetAsmBackend.h b/contrib/llvm/include/llvm/Target/TargetAsmBackend.h
index 979595a..7527298 100644
--- a/contrib/llvm/include/llvm/Target/TargetAsmBackend.h
+++ b/contrib/llvm/include/llvm/Target/TargetAsmBackend.h
@@ -10,17 +10,18 @@
#ifndef LLVM_TARGET_TARGETASMBACKEND_H
#define LLVM_TARGET_TARGETASMBACKEND_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
-class MCDataFragment;
class MCFixup;
class MCInst;
class MCObjectWriter;
class MCSection;
template<typename T>
class SmallVectorImpl;
-class Target;
class raw_ostream;
/// TargetAsmBackend - Generic interface to target specific assembler backends.
@@ -28,37 +29,17 @@ class TargetAsmBackend {
TargetAsmBackend(const TargetAsmBackend &); // DO NOT IMPLEMENT
void operator=(const TargetAsmBackend &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
- TargetAsmBackend(const Target &);
+ TargetAsmBackend();
- /// TheTarget - The Target that this machine was created for.
- const Target &TheTarget;
-
- unsigned HasAbsolutizedSet : 1;
unsigned HasReliableSymbolDifference : 1;
- unsigned HasScatteredSymbols : 1;
public:
virtual ~TargetAsmBackend();
- const Target &getTarget() const { return TheTarget; }
-
/// createObjectWriter - Create a new MCObjectWriter instance for use by the
/// assembler backend to emit the final object file.
virtual MCObjectWriter *createObjectWriter(raw_ostream &OS) const = 0;
- /// hasAbsolutizedSet - Check whether this target "absolutizes"
- /// assignments. That is, given code like:
- /// a:
- /// ...
- /// b:
- /// tmp = a - b
- /// .long tmp
- /// will the value of 'tmp' be a relocatable expression, or the assembly time
- /// value of L0 - L1. This distinction is only relevant for platforms that
- /// support scattered symbols, since in the absence of scattered symbols (a -
- /// b) cannot change after assembly.
- bool hasAbsolutizedSet() const { return HasAbsolutizedSet; }
-
/// hasReliableSymbolDifference - Check whether this target implements
/// accurate relocations for differences between symbols. If not, differences
/// between symbols will always be relocatable expressions and any references
@@ -68,21 +49,11 @@ public:
/// This should always be true (since it results in fewer relocations with no
/// loss of functionality), but is currently supported as a way to maintain
/// exact object compatibility with Darwin 'as' (on non-x86_64). It should
- /// eventually should be eliminated. See also \see hasAbsolutizedSet.
+ /// eventually should be eliminated.
bool hasReliableSymbolDifference() const {
return HasReliableSymbolDifference;
}
- /// hasScatteredSymbols - Check whether this target supports scattered
- /// symbols. If so, the assembler should assume that atoms can be scattered by
- /// the linker. In particular, this means that the offsets between symbols
- /// which are in distinct atoms is not known at link time, and the assembler
- /// must generate fixups and relocations appropriately.
- ///
- /// Note that the assembler currently does not reason about atoms, instead it
- /// assumes all temporary symbols reside in the "current atom".
- bool hasScatteredSymbols() const { return HasScatteredSymbols; }
-
/// doesSectionRequireSymbols - Check whether the given section requires that
/// all symbols (even temporaries) have symbol table entries.
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
@@ -97,16 +68,28 @@ public:
return true;
}
- /// isVirtualSection - Check whether the given section is "virtual", that is
- /// has no actual object file contents.
- virtual bool isVirtualSection(const MCSection &Section) const = 0;
+ /// @name Target Fixup Interfaces
+ /// @{
+
+ /// getNumFixupKinds - Get the number of target specific fixup kinds.
+ virtual unsigned getNumFixupKinds() const = 0;
+
+ /// getFixupKindInfo - Get information on a fixup kind.
+ virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const;
+
+ /// @}
/// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
/// data fragment, at the offset specified by the fixup and following the
/// fixup kind as appropriate.
- virtual void ApplyFixup(const MCFixup &Fixup, MCDataFragment &Fragment,
+ virtual void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const = 0;
+ /// @}
+
+ /// @name Target Relaxation Interfaces
+ /// @{
+
/// MayNeedRelaxation - Check whether the given instruction may need
/// relaxation.
///
@@ -121,12 +104,18 @@ public:
/// \parm Res [output] - On return, the relaxed instruction.
virtual void RelaxInstruction(const MCInst &Inst, MCInst &Res) const = 0;
+ /// @}
+
/// WriteNopData - Write an (optimal) nop sequence of Count bytes to the given
/// output. If the target cannot generate such a sequence, it should return an
/// error.
///
/// \return - True on success.
virtual bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const = 0;
+
+ /// HandleAssemblerFlag - Handle any target-specific assembler flags.
+ /// By default, do nothing.
+ virtual void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Target/TargetAsmInfo.h b/contrib/llvm/include/llvm/Target/TargetAsmInfo.h
new file mode 100644
index 0000000..98aab14
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetAsmInfo.h
@@ -0,0 +1,75 @@
+//===-- llvm/Target/TargetAsmInfo.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to provide the information necessary for producing assembly files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETASMINFO_H
+#define LLVM_TARGET_TARGETASMINFO_H
+
+#include "llvm/CodeGen/MachineLocation.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+ class MCSection;
+ class MCContext;
+ class TargetMachine;
+ class TargetLoweringObjectFile;
+
+class TargetAsmInfo {
+ unsigned PointerSize;
+ bool IsLittleEndian;
+ TargetFrameLowering::StackDirection StackDir;
+ const TargetRegisterInfo *TRI;
+ std::vector<MachineMove> InitialFrameState;
+ const TargetLoweringObjectFile *TLOF;
+
+public:
+ explicit TargetAsmInfo(const TargetMachine &TM);
+
+ /// getPointerSize - Get the pointer size in bytes.
+ unsigned getPointerSize() const {
+ return PointerSize;
+ }
+
+ /// islittleendian - True if the target is little endian.
+ bool isLittleEndian() const {
+ return IsLittleEndian;
+ }
+
+ TargetFrameLowering::StackDirection getStackGrowthDirection() const {
+ return StackDir;
+ }
+
+ const MCSection *getDwarfLineSection() const {
+ return TLOF->getDwarfLineSection();
+ }
+
+ const MCSection *getEHFrameSection() const {
+ return TLOF->getEHFrameSection();
+ }
+
+ unsigned getDwarfRARegNum(bool isEH) const {
+ return TRI->getDwarfRegNum(TRI->getRARegister(), isEH);
+ }
+
+ const std::vector<MachineMove> &getInitialFrameState() const {
+ return InitialFrameState;
+ }
+
+ int getDwarfRegNum(unsigned RegNum, bool isEH) const {
+ return TRI->getDwarfRegNum(RegNum, isEH);
+ }
+};
+
+}
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetAsmParser.h b/contrib/llvm/include/llvm/Target/TargetAsmParser.h
index 5830d1f..9ff50cb 100644
--- a/contrib/llvm/include/llvm/Target/TargetAsmParser.h
+++ b/contrib/llvm/include/llvm/Target/TargetAsmParser.h
@@ -13,7 +13,7 @@
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
namespace llvm {
-class MCInst;
+class MCStreamer;
class StringRef;
class Target;
class SMLoc;
@@ -42,6 +42,8 @@ public:
unsigned getAvailableFeatures() const { return AvailableFeatures; }
void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; }
+ virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) = 0;
+
/// ParseInstruction - Parse one assembly instruction.
///
/// The parser is positioned following the instruction name. The target
@@ -70,16 +72,16 @@ public:
/// \param DirectiveID - the identifier token of the directive.
virtual bool ParseDirective(AsmToken DirectiveID) = 0;
- /// MatchInstruction - Recognize a series of operands of a parsed instruction
- /// as an actual MCInst. This returns false and fills in Inst on success and
- /// returns true on failure to match.
+ /// MatchAndEmitInstruction - Recognize a series of operands of a parsed
+ /// instruction as an actual MCInst and emit it to the specified MCStreamer.
+ /// This returns false on success and returns true on failure to match.
///
/// On failure, the target parser is responsible for emitting a diagnostic
/// explaining the match failure.
virtual bool
- MatchInstruction(SMLoc IDLoc,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCInst &Inst) = 0;
+ MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out) = 0;
};
diff --git a/contrib/llvm/include/llvm/Target/TargetCallingConv.h b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
index f368a2e..275957e 100644
--- a/contrib/llvm/include/llvm/Target/TargetCallingConv.h
+++ b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
@@ -106,14 +106,13 @@ namespace ISD {
///
struct InputArg {
ArgFlagsTy Flags;
- EVT VT;
+ MVT VT;
bool Used;
InputArg() : VT(MVT::Other), Used(false) {}
InputArg(ArgFlagsTy flags, EVT vt, bool used)
- : Flags(flags), VT(vt), Used(used) {
- assert(VT.isSimple() &&
- "InputArg value type must be Simple!");
+ : Flags(flags), Used(used) {
+ VT = vt.getSimpleVT();
}
};
@@ -123,16 +122,15 @@ namespace ISD {
///
struct OutputArg {
ArgFlagsTy Flags;
- EVT VT;
+ MVT VT;
/// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
bool IsFixed;
OutputArg() : IsFixed(false) {}
OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed)
- : Flags(flags), VT(vt), IsFixed(isfixed) {
- assert(VT.isSimple() &&
- "OutputArg value type must be Simple!");
+ : Flags(flags), IsFixed(isfixed) {
+ VT = vt.getSimpleVT();
}
};
}
diff --git a/contrib/llvm/include/llvm/Target/TargetData.h b/contrib/llvm/include/llvm/Target/TargetData.h
index b89cbe0..25065d3 100644
--- a/contrib/llvm/include/llvm/Target/TargetData.h
+++ b/contrib/llvm/include/llvm/Target/TargetData.h
@@ -22,6 +22,7 @@
#include "llvm/Pass.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -143,7 +144,7 @@ public:
std::string getStringRepresentation() const;
/// isLegalInteger - This function returns true if the specified type is
- /// known tobe a native integer type supported by the CPU. For example,
+ /// known to be a native integer type supported by the CPU. For example,
/// i64 is not native on most 32-bit CPUs and i37 is not native on any known
/// one. This returns false if the integer width is not legal.
///
diff --git a/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h b/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
index 7cb6931..b97f3e2 100644
--- a/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
@@ -28,7 +28,6 @@ namespace llvm {
// EMachine - This field is the target specific value to emit as the
// e_machine member of the ELF header.
unsigned short EMachine;
- TargetMachine &TM;
bool is64Bit, isLittleEndian;
public:
@@ -62,7 +61,7 @@ namespace llvm {
ELFDATA2MSB = 2 // Big-endian object file
};
- explicit TargetELFWriterInfo(TargetMachine &tm);
+ explicit TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_);
virtual ~TargetELFWriterInfo();
unsigned short getEMachine() const { return EMachine; }
diff --git a/contrib/llvm/include/llvm/Target/TargetFrameInfo.h b/contrib/llvm/include/llvm/Target/TargetFrameInfo.h
deleted file mode 100644
index 975d156..0000000
--- a/contrib/llvm/include/llvm/Target/TargetFrameInfo.h
+++ /dev/null
@@ -1,97 +0,0 @@
-//===-- llvm/Target/TargetFrameInfo.h ---------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Interface to describe the layout of a stack frame on the target machine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETFRAMEINFO_H
-#define LLVM_TARGET_TARGETFRAMEINFO_H
-
-#include <utility>
-
-namespace llvm {
-
-/// Information about stack frame layout on the target. It holds the direction
-/// of stack growth, the known stack alignment on entry to each function, and
-/// the offset to the locals area.
-///
-/// The offset to the local area is the offset from the stack pointer on
-/// function entry to the first location where function data (local variables,
-/// spill locations) can be stored.
-class TargetFrameInfo {
-public:
- enum StackDirection {
- StackGrowsUp, // Adding to the stack increases the stack address
- StackGrowsDown // Adding to the stack decreases the stack address
- };
-
- // Maps a callee saved register to a stack slot with a fixed offset.
- struct SpillSlot {
- unsigned Reg;
- int Offset; // Offset relative to stack pointer on function entry.
- };
-private:
- StackDirection StackDir;
- unsigned StackAlignment;
- unsigned TransientStackAlignment;
- int LocalAreaOffset;
-public:
- TargetFrameInfo(StackDirection D, unsigned StackAl, int LAO,
- unsigned TransAl = 1)
- : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
- LocalAreaOffset(LAO) {}
-
- virtual ~TargetFrameInfo();
-
- // These methods return information that describes the abstract stack layout
- // of the target machine.
-
- /// getStackGrowthDirection - Return the direction the stack grows
- ///
- StackDirection getStackGrowthDirection() const { return StackDir; }
-
- /// getStackAlignment - This method returns the number of bytes to which the
- /// stack pointer must be aligned on entry to a function. Typically, this
- /// is the largest alignment for any data object in the target.
- ///
- unsigned getStackAlignment() const { return StackAlignment; }
-
- /// getTransientStackAlignment - This method returns the number of bytes to
- /// which the stack pointer must be aligned at all times, even between
- /// calls.
- ///
- unsigned getTransientStackAlignment() const {
- return TransientStackAlignment;
- }
-
- /// getOffsetOfLocalArea - This method returns the offset of the local area
- /// from the stack pointer on entrance to a function.
- ///
- int getOffsetOfLocalArea() const { return LocalAreaOffset; }
-
- /// getCalleeSavedSpillSlots - This method returns a pointer to an array of
- /// pairs, that contains an entry for each callee saved register that must be
- /// spilled to a particular stack location if it is spilled.
- ///
- /// Each entry in this array contains a <register,offset> pair, indicating the
- /// fixed offset from the incoming stack pointer that each register should be
- /// spilled at. If a register is not listed here, the code generator is
- /// allowed to spill it anywhere it chooses.
- ///
- virtual const SpillSlot *
- getCalleeSavedSpillSlots(unsigned &NumEntries) const {
- NumEntries = 0;
- return 0;
- }
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetFrameLowering.h b/contrib/llvm/include/llvm/Target/TargetFrameLowering.h
new file mode 100644
index 0000000..e104b16
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetFrameLowering.h
@@ -0,0 +1,196 @@
+//===-- llvm/Target/TargetFrameLowering.h ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to describe the layout of a stack frame on the target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETFRAMELOWERING_H
+#define LLVM_TARGET_TARGETFRAMELOWERING_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+
+#include <utility>
+#include <vector>
+
+namespace llvm {
+ class CalleeSavedInfo;
+ class MachineFunction;
+ class MachineBasicBlock;
+ class MachineMove;
+ class RegScavenger;
+
+/// Information about stack frame layout on the target. It holds the direction
+/// of stack growth, the known stack alignment on entry to each function, and
+/// the offset to the locals area.
+///
+/// The offset to the local area is the offset from the stack pointer on
+/// function entry to the first location where function data (local variables,
+/// spill locations) can be stored.
+class TargetFrameLowering {
+public:
+ enum StackDirection {
+ StackGrowsUp, // Adding to the stack increases the stack address
+ StackGrowsDown // Adding to the stack decreases the stack address
+ };
+
+ // Maps a callee saved register to a stack slot with a fixed offset.
+ struct SpillSlot {
+ unsigned Reg;
+ int Offset; // Offset relative to stack pointer on function entry.
+ };
+private:
+ StackDirection StackDir;
+ unsigned StackAlignment;
+ unsigned TransientStackAlignment;
+ int LocalAreaOffset;
+public:
+ TargetFrameLowering(StackDirection D, unsigned StackAl, int LAO,
+ unsigned TransAl = 1)
+ : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
+ LocalAreaOffset(LAO) {}
+
+ virtual ~TargetFrameLowering();
+
+ // These methods return information that describes the abstract stack layout
+ // of the target machine.
+
+ /// getStackGrowthDirection - Return the direction the stack grows
+ ///
+ StackDirection getStackGrowthDirection() const { return StackDir; }
+
+ /// getStackAlignment - This method returns the number of bytes to which the
+ /// stack pointer must be aligned on entry to a function. Typically, this
+ /// is the largest alignment for any data object in the target.
+ ///
+ unsigned getStackAlignment() const { return StackAlignment; }
+
+ /// getTransientStackAlignment - This method returns the number of bytes to
+ /// which the stack pointer must be aligned at all times, even between
+ /// calls.
+ ///
+ unsigned getTransientStackAlignment() const {
+ return TransientStackAlignment;
+ }
+
+ /// getOffsetOfLocalArea - This method returns the offset of the local area
+ /// from the stack pointer on entrance to a function.
+ ///
+ int getOffsetOfLocalArea() const { return LocalAreaOffset; }
+
+ /// getCalleeSavedSpillSlots - This method returns a pointer to an array of
+ /// pairs, that contains an entry for each callee saved register that must be
+ /// spilled to a particular stack location if it is spilled.
+ ///
+ /// Each entry in this array contains a <register,offset> pair, indicating the
+ /// fixed offset from the incoming stack pointer that each register should be
+ /// spilled at. If a register is not listed here, the code generator is
+ /// allowed to spill it anywhere it chooses.
+ ///
+ virtual const SpillSlot *
+ getCalleeSavedSpillSlots(unsigned &NumEntries) const {
+ NumEntries = 0;
+ return 0;
+ }
+
+ /// targetHandlesStackFrameRounding - Returns true if the target is
+ /// responsible for rounding up the stack frame (probably at emitPrologue
+ /// time).
+ virtual bool targetHandlesStackFrameRounding() const {
+ return false;
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ virtual void emitPrologue(MachineFunction &MF) const = 0;
+ virtual void emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const = 0;
+
+ /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
+ /// saved registers and returns true if it isn't possible / profitable to do
+ /// so by issuing a series of store instructions via
+ /// storeRegToStackSlot(). Returns false otherwise.
+ virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ return false;
+ }
+
+ /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
+ /// saved registers and returns true if it isn't possible / profitable to do
+ /// so by issuing a series of load instructions via loadRegToStackSlot().
+ /// Returns false otherwise.
+ virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ return false;
+ }
+
+ /// hasFP - Return true if the specified function should have a dedicated
+ /// frame pointer register. For most targets this is true only if the function
+ /// has variable sized allocas or if frame pointer elimination is disabled.
+ virtual bool hasFP(const MachineFunction &MF) const = 0;
+
+ /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
+ /// not required, we reserve argument space for call sites in the function
+ /// immediately on entry to the current function. This eliminates the need for
+ /// add/sub sp brackets around call sites. Returns true if the call frame is
+ /// included as part of the stack frame.
+ virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
+ return !hasFP(MF);
+ }
+
+ /// canSimplifyCallFramePseudos - When possible, it's best to simplify the
+ /// call frame pseudo ops before doing frame index elimination. This is
+ /// possible only when frame index references between the pseudos won't
+ /// need adjusting for the call frame adjustments. Normally, that's true
+ /// if the function has a reserved call frame or a frame pointer. Some
+ /// targets (Thumb2, for example) may have more complicated criteria,
+ /// however, and can override this behavior.
+ virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
+ return hasReservedCallFrame(MF) || hasFP(MF);
+ }
+
+ /// getInitialFrameState - Returns a list of machine moves that are assumed
+ /// on entry to all functions. Note that LabelID is ignored (assumed to be
+ /// the beginning of the function.)
+ virtual void getInitialFrameState(std::vector<MachineMove> &Moves) const;
+
+ /// getFrameIndexOffset - Returns the displacement from the frame register to
+ /// the stack frame of the specified index.
+ virtual int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+
+ /// getFrameIndexReference - This method should return the base register
+ /// and offset used to reference a frame index location. The offset is
+ /// returned directly, and the base register is returned via FrameReg.
+ virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const;
+
+ /// processFunctionBeforeCalleeSavedScan - This method is called immediately
+ /// before PrologEpilogInserter scans the physical registers used to determine
+ /// what callee saved registers should be spilled. This method is optional.
+ virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS = NULL) const {
+
+ }
+
+ /// processFunctionBeforeFrameFinalized - This method is called immediately
+ /// before the specified function's frame layout (MF.getFrameInfo()) is
+ /// finalized. Once the frame is finalized, MO_FrameIndex operands are
+ /// replaced with direct constants. This method is optional.
+ ///
+ virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrDesc.h b/contrib/llvm/include/llvm/Target/TargetInstrDesc.h
index a127aed..8823d5a 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrDesc.h
+++ b/contrib/llvm/include/llvm/Target/TargetInstrDesc.h
@@ -15,7 +15,7 @@
#ifndef LLVM_TARGET_TARGETINSTRDESC_H
#define LLVM_TARGET_TARGETINSTRDESC_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -103,13 +103,14 @@ namespace TID {
Terminator,
Branch,
IndirectBranch,
- Predicable,
- NotDuplicable,
Compare,
+ MoveImm,
DelaySlot,
FoldableAsLoad,
MayLoad,
MayStore,
+ Predicable,
+ NotDuplicable,
UnmodeledSideEffects,
Commutable,
ConvertibleTo3Addr,
@@ -352,6 +353,12 @@ public:
return Flags & (1 << TID::Compare);
}
+ /// isMoveImmediate - Return true if this instruction is a move immediate
+ /// (including conditional moves) instruction.
+ bool isMoveImmediate() const {
+ return Flags & (1 << TID::MoveImm);
+ }
+
/// isNotDuplicable - Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
index 520c41b..fc7b51e 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
@@ -19,16 +19,17 @@
namespace llvm {
-class CalleeSavedInfo;
class InstrItineraryData;
class LiveVariables;
class MCAsmInfo;
class MachineMemOperand;
+class MachineRegisterInfo;
class MDNode;
class MCInst;
class SDNode;
class ScheduleHazardRecognizer;
class SelectionDAG;
+class ScheduleDAG;
class TargetRegisterClass;
class TargetRegisterInfo;
@@ -134,7 +135,7 @@ public:
int &FrameIndex) const {
return 0;
}
-
+
/// isStoreToStackSlot - If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
@@ -227,9 +228,12 @@ public:
/// produceSameValue - Return true if two machine instructions would produce
/// identical values. By default, this is only true when the two instructions
- /// are deemed identical except for defs.
+ /// are deemed identical except for defs. If this function is called when the
+ /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
+ /// aggressive checks.
virtual bool produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1) const = 0;
+ const MachineInstr *MI1,
+ const MachineRegisterInfo *MRI = 0) const = 0;
/// AnalyzeBranch - Analyze the branching code at the end of MBB, returning
/// true if it cannot be understood (e.g. it's a switch dispatch or isn't
@@ -267,7 +271,7 @@ public:
/// This is only invoked in cases where AnalyzeBranch returns success. It
/// returns the number of instructions that were removed.
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
- assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!");
+ assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!");
return 0;
}
@@ -285,7 +289,7 @@ public:
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const {
- assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
+ assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
return 0;
}
@@ -303,31 +307,45 @@ public:
return true;
}
- /// isProfitableToIfCvt - Return true if it's profitable to first "NumInstrs"
- /// of the specified basic block.
+ /// isProfitableToIfCvt - Return true if it's profitable to predicate
+ /// instructions with accumulated instruction latency of "NumCycles"
+ /// of the specified basic block, where the probability of the instructions
+ /// being executed is given by Probability, and Confidence is a measure
+ /// of our confidence that it will be properly predicted.
virtual
- bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ unsigned ExtraPredCycles,
+ float Probability, float Confidence) const {
return false;
}
-
+
/// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
/// checks for the case where two basic blocks from true and false path
/// of a if-then-else (diamond) are predicated on mutally exclusive
- /// predicates.
+ /// predicates, where the probability of the true path being taken is given
+ /// by Probability, and Confidence is a measure of our confidence that it
+ /// will be properly predicted.
virtual bool
- isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
- MachineBasicBlock &FMBB, unsigned NumFInstrs) const {
+ isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumTCycles, unsigned ExtraTCycles,
+ MachineBasicBlock &FMBB,
+ unsigned NumFCycles, unsigned ExtraFCycles,
+ float Probability, float Confidence) const {
return false;
}
/// isProfitableToDupForIfCvt - Return true if it's profitable for
- /// if-converter to duplicate a specific number of instructions in the
- /// specified MBB to enable if-conversion.
+ /// if-converter to duplicate instructions of specified accumulated
+ /// instruction latencies in the specified MBB to enable if-conversion.
+ /// The probability of the instructions being executed is given by
+ /// Probability, and Confidence is a measure of our confidence that it
+ /// will be properly predicted.
virtual bool
- isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs) const {
+ isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ float Probability, float Confidence) const {
return false;
}
-
+
/// copyPhysReg - Emit instructions to copy a pair of physical registers.
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
@@ -360,29 +378,7 @@ public:
const TargetRegisterInfo *TRI) const {
assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
}
-
- /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
- /// saved registers and returns true if it isn't possible / profitable to do
- /// so by issuing a series of store instructions via
- /// storeRegToStackSlot(). Returns false otherwise.
- virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- return false;
- }
- /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
- /// saved registers and returns true if it isn't possible / profitable to do
- /// so by issuing a series of load instructions via loadRegToStackSlot().
- /// Returns false otherwise.
- virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- return false;
- }
-
/// emitFrameIndexDebugValue - Emit a target-dependent form of
/// DBG_VALUE encoding the address of a frame index. Addresses would
/// normally be lowered the same way as other addresses on the target,
@@ -493,7 +489,7 @@ public:
unsigned NumLoads) const {
return false;
}
-
+
/// ReverseBranchCondition - Reverses the branch condition of the specified
/// condition list, returning false on success and true if it cannot be
/// reversed.
@@ -501,19 +497,19 @@ public:
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return true;
}
-
+
/// insertNoop - Insert a noop into the instruction stream at the specified
/// point.
- virtual void insertNoop(MachineBasicBlock &MBB,
+ virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
-
-
+
+
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
virtual void getNoopForMachoTarget(MCInst &NopInst) const {
// Default to just using 'nop' string.
}
-
-
+
+
/// isPredicated - Returns true if the instruction is already predicated.
///
virtual bool isPredicated(const MachineInstr *MI) const {
@@ -571,26 +567,98 @@ public:
virtual unsigned getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const;
- /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer
- /// to use for this target when scheduling the machine instructions after
+ /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer to
+ /// use for this target when scheduling the machine instructions before
/// register allocation.
virtual ScheduleHazardRecognizer*
- CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const = 0;
+ CreateTargetHazardRecognizer(const TargetMachine *TM,
+ const ScheduleDAG *DAG) const = 0;
+
+ /// CreateTargetPostRAHazardRecognizer - Allocate and return a hazard
+ /// recognizer to use for this target when scheduling the machine instructions
+ /// after register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
+ const ScheduleDAG *DAG) const = 0;
/// AnalyzeCompare - For a comparison instruction, return the source register
/// in SrcReg and the value it compares against in CmpValue. Return true if
/// the comparison instruction can be analyzed.
virtual bool AnalyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, int &CmpValue) const {
+ unsigned &SrcReg, int &Mask, int &Value) const {
+ return false;
+ }
+
+ /// OptimizeCompareInstr - See if the comparison instruction can be converted
+ /// into something more efficient. E.g., on ARM most instructions can set the
+ /// flags register, obviating the need for a separate CMP.
+ virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr,
+ unsigned SrcReg, int Mask, int Value,
+ const MachineRegisterInfo *MRI) const {
return false;
}
- /// ConvertToSetZeroFlag - Convert the instruction to set the zero flag so
- /// that we can remove a "comparison with zero".
- virtual bool ConvertToSetZeroFlag(MachineInstr *Instr,
- MachineInstr *CmpInstr) const {
+ /// FoldImmediate - 'Reg' is known to be defined by a move immediate
+ /// instruction, try to fold the immediate into the use instruction.
+ virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+ unsigned Reg, MachineRegisterInfo *MRI) const {
return false;
}
+
+ /// getNumMicroOps - Return the number of u-operations the given machine
+ /// instruction will be decoded to on the target cpu.
+ virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const;
+
+ /// isZeroCost - Return true for pseudo instructions that don't consume any
+ /// machine resources in their current form. These are common cases that the
+ /// scheduler should consider free, rather than conservatively handling them
+ /// as instructions with no itinerary.
+ bool isZeroCost(unsigned Opcode) const {
+ return Opcode <= TargetOpcode::COPY;
+ }
+
+ /// getOperandLatency - Compute and return the use operand latency of a given
+ /// pair of def and use.
+ /// In most cases, the static scheduling itinerary was enough to determine the
+ /// operand latency. But it may not be possible for instructions with variable
+ /// number of defs / uses.
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const;
+
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const;
+
+ /// getInstrLatency - Compute the instruction latency of a given instruction.
+ /// If the instruction has higher cost when predicated, it's returned via
+ /// PredCost.
+ virtual int getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost = 0) const;
+
+ virtual int getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const;
+
+ /// hasHighOperandLatency - Compute operand latency between a def of 'Reg'
+ /// and an use in the current loop, return true if the target considered
+ /// it 'high'. This is used by optimization passes such as machine LICM to
+ /// determine whether it makes sense to hoist an instruction out even in
+ /// high register pressure situation.
+ virtual
+ bool hasHighOperandLatency(const InstrItineraryData *ItinData,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const {
+ return false;
+ }
+
+ /// hasLowDefLatency - Compute operand latency of a def of 'Reg', return true
+ /// if the target considered it 'low'.
+ virtual
+ bool hasLowDefLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx) const;
};
/// TargetInstrInfoImpl - This is the default implementation of
@@ -620,13 +688,20 @@ public:
virtual MachineInstr *duplicate(MachineInstr *Orig,
MachineFunction &MF) const;
virtual bool produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1) const;
+ const MachineInstr *MI1,
+ const MachineRegisterInfo *MRI) const;
virtual bool isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const;
+ bool usePreRAHazardRecognizer() const;
+
+ virtual ScheduleHazardRecognizer *
+ CreateTargetHazardRecognizer(const TargetMachine*, const ScheduleDAG*) const;
+
virtual ScheduleHazardRecognizer *
- CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const;
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
+ const ScheduleDAG*) const;
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrItineraries.h b/contrib/llvm/include/llvm/Target/TargetInstrItineraries.h
index 39648c2..a95b70f 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrItineraries.h
+++ b/contrib/llvm/include/llvm/Target/TargetInstrItineraries.h
@@ -95,6 +95,7 @@ struct InstrStage {
/// operands are read and written.
///
struct InstrItinerary {
+ unsigned NumMicroOps; ///< # of micro-ops, 0 means it's variable
unsigned FirstStage; ///< Index of first stage in itinerary
unsigned LastStage; ///< Index of last + 1 stage in itinerary
unsigned FirstOperandCycle; ///< Index of first operand rd/wr
@@ -110,38 +111,42 @@ class InstrItineraryData {
public:
const InstrStage *Stages; ///< Array of stages selected
const unsigned *OperandCycles; ///< Array of operand cycles selected
- const InstrItinerary *Itineratries; ///< Array of itineraries selected
+ const unsigned *Forwardings; ///< Array of pipeline forwarding pathes
+ const InstrItinerary *Itineraries; ///< Array of itineraries selected
+ unsigned IssueWidth; ///< Max issue per cycle. 0=Unknown.
/// Ctors.
///
- InstrItineraryData() : Stages(0), OperandCycles(0), Itineratries(0) {}
+ InstrItineraryData() : Stages(0), OperandCycles(0), Forwardings(0),
+ Itineraries(0), IssueWidth(0) {}
+
InstrItineraryData(const InstrStage *S, const unsigned *OS,
- const InstrItinerary *I)
- : Stages(S), OperandCycles(OS), Itineratries(I) {}
-
+ const unsigned *F, const InstrItinerary *I)
+ : Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I) {}
+
/// isEmpty - Returns true if there are no itineraries.
///
- bool isEmpty() const { return Itineratries == 0; }
+ bool isEmpty() const { return Itineraries == 0; }
/// isEndMarker - Returns true if the index is for the end marker
/// itinerary.
///
bool isEndMarker(unsigned ItinClassIndx) const {
- return ((Itineratries[ItinClassIndx].FirstStage == ~0U) &&
- (Itineratries[ItinClassIndx].LastStage == ~0U));
+ return ((Itineraries[ItinClassIndx].FirstStage == ~0U) &&
+ (Itineraries[ItinClassIndx].LastStage == ~0U));
}
/// beginStage - Return the first stage of the itinerary.
- ///
+ ///
const InstrStage *beginStage(unsigned ItinClassIndx) const {
- unsigned StageIdx = Itineratries[ItinClassIndx].FirstStage;
+ unsigned StageIdx = Itineraries[ItinClassIndx].FirstStage;
return Stages + StageIdx;
}
/// endStage - Return the last+1 stage of the itinerary.
- ///
+ ///
const InstrStage *endStage(unsigned ItinClassIndx) const {
- unsigned StageIdx = Itineratries[ItinClassIndx].LastStage;
+ unsigned StageIdx = Itineraries[ItinClassIndx].LastStage;
return Stages + StageIdx;
}
@@ -173,13 +178,68 @@ public:
if (isEmpty())
return -1;
- unsigned FirstIdx = Itineratries[ItinClassIndx].FirstOperandCycle;
- unsigned LastIdx = Itineratries[ItinClassIndx].LastOperandCycle;
+ unsigned FirstIdx = Itineraries[ItinClassIndx].FirstOperandCycle;
+ unsigned LastIdx = Itineraries[ItinClassIndx].LastOperandCycle;
if ((FirstIdx + OperandIdx) >= LastIdx)
return -1;
return (int)OperandCycles[FirstIdx + OperandIdx];
}
+
+ /// hasPipelineForwarding - Return true if there is a pipeline forwarding
+ /// between instructions of itinerary classes DefClass and UseClasses so that
+ /// value produced by an instruction of itinerary class DefClass, operand
+ /// index DefIdx can be bypassed when it's read by an instruction of
+ /// itinerary class UseClass, operand index UseIdx.
+ bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx,
+ unsigned UseClass, unsigned UseIdx) const {
+ unsigned FirstDefIdx = Itineraries[DefClass].FirstOperandCycle;
+ unsigned LastDefIdx = Itineraries[DefClass].LastOperandCycle;
+ if ((FirstDefIdx + DefIdx) >= LastDefIdx)
+ return false;
+ if (Forwardings[FirstDefIdx + DefIdx] == 0)
+ return false;
+
+ unsigned FirstUseIdx = Itineraries[UseClass].FirstOperandCycle;
+ unsigned LastUseIdx = Itineraries[UseClass].LastOperandCycle;
+ if ((FirstUseIdx + UseIdx) >= LastUseIdx)
+ return false;
+
+ return Forwardings[FirstDefIdx + DefIdx] ==
+ Forwardings[FirstUseIdx + UseIdx];
+ }
+
+ /// getOperandLatency - Compute and return the use operand latency of a given
+ /// itinerary class and operand index if the value is produced by an
+ /// instruction of the specified itinerary class and def operand index.
+ int getOperandLatency(unsigned DefClass, unsigned DefIdx,
+ unsigned UseClass, unsigned UseIdx) const {
+ if (isEmpty())
+ return -1;
+
+ int DefCycle = getOperandCycle(DefClass, DefIdx);
+ if (DefCycle == -1)
+ return -1;
+
+ int UseCycle = getOperandCycle(UseClass, UseIdx);
+ if (UseCycle == -1)
+ return -1;
+
+ UseCycle = DefCycle - UseCycle + 1;
+ if (UseCycle > 0 &&
+ hasPipelineForwarding(DefClass, DefIdx, UseClass, UseIdx))
+ // FIXME: This assumes one cycle benefit for every pipeline forwarding.
+ --UseCycle;
+ return UseCycle;
+ }
+
+ /// isMicroCoded - Return true if the instructions in the given class decode
+ /// to more than one micro-ops.
+ bool isMicroCoded(unsigned ItinClassIndx) const {
+ if (isEmpty())
+ return false;
+ return Itineraries[ItinClassIndx].NumMicroOps != 1;
+ }
};
diff --git a/contrib/llvm/include/llvm/Target/TargetJITInfo.h b/contrib/llvm/include/llvm/Target/TargetJITInfo.h
index 7208a8d..b198eb6 100644
--- a/contrib/llvm/include/llvm/Target/TargetJITInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetJITInfo.h
@@ -19,7 +19,7 @@
#include <cassert>
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class Function;
diff --git a/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h b/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
new file mode 100644
index 0000000..bdd214b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
@@ -0,0 +1,66 @@
+//===-- llvm/Target/TargetLibraryInfo.h - Library information ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETLIBRARYINFO_H
+#define LLVM_TARGET_TARGETLIBRARYINFO_H
+
+#include "llvm/Pass.h"
+
+namespace llvm {
+ class Triple;
+
+ namespace LibFunc {
+ enum Func {
+ /// void *memset(void *b, int c, size_t len);
+ memset,
+
+ // void *memcpy(void *s1, const void *s2, size_t n);
+ memcpy,
+
+ /// void memset_pattern16(void *b, const void *pattern16, size_t len);
+ memset_pattern16,
+
+ NumLibFuncs
+ };
+ }
+
+/// TargetLibraryInfo - This immutable pass captures information about what
+/// library functions are available for the current target, and allows a
+/// frontend to disable optimizations through -fno-builtin etc.
+class TargetLibraryInfo : public ImmutablePass {
+ unsigned char AvailableArray[(LibFunc::NumLibFuncs+7)/8];
+public:
+ static char ID;
+ TargetLibraryInfo();
+ TargetLibraryInfo(const Triple &T);
+
+ /// has - This function is used by optimizations that want to match on or form
+ /// a given library function.
+ bool has(LibFunc::Func F) const {
+ return (AvailableArray[F/8] & (1 << (F&7))) != 0;
+ }
+
+ /// setUnavailable - this can be used by whatever sets up TargetLibraryInfo to
+ /// ban use of specific library functions.
+ void setUnavailable(LibFunc::Func F) {
+ AvailableArray[F/8] &= ~(1 << (F&7));
+ }
+
+ void setAvailable(LibFunc::Func F) {
+ AvailableArray[F/8] |= 1 << (F&7);
+ }
+
+ /// disableAllFunctions - This disables all builtins, which is used for
+ /// options like -fno-builtin.
+ void disableAllFunctions();
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetLowering.h b/contrib/llvm/include/llvm/Target/TargetLowering.h
index 29de994..5141b7b 100644
--- a/contrib/llvm/include/llvm/Target/TargetLowering.h
+++ b/contrib/llvm/include/llvm/Target/TargetLowering.h
@@ -25,13 +25,9 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
#include "llvm/Attributes.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/DebugLoc.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
@@ -41,10 +37,12 @@
namespace llvm {
class AllocaInst;
+ class APFloat;
class CallInst;
class Function;
class FastISel;
class FunctionLoweringInfo;
+ class ImmutableCallSite;
class MachineBasicBlock;
class MachineFunction;
class MachineFrameInfo;
@@ -55,6 +53,7 @@ namespace llvm {
class SDNode;
class SDValue;
class SelectionDAG;
+ template<typename T> class SmallVectorImpl;
class TargetData;
class TargetMachine;
class TargetRegisterClass;
@@ -126,6 +125,10 @@ public:
/// srl/add/sra.
bool isPow2DivCheap() const { return Pow2DivIsCheap; }
+ /// isJumpExpensive() - Return true if Flow Control is an expensive operation
+ /// that should be avoided.
+ bool isJumpExpensive() const { return JumpIsExpensive; }
+
/// getSetCCResultType - Return the ValueType of the result of SETCC
/// operations. Also used to obtain the target's preferred type for
/// the condition operand of SELECT and BRCOND nodes. In the case of
@@ -203,13 +206,6 @@ public:
return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
}
- /// isTypeSynthesizable - Return true if it's OK for the compiler to create
- /// new operations of this type. All Legal types are synthesizable except
- /// MMX vector types on X86. Non-Legal types are not synthesizable.
- bool isTypeSynthesizable(EVT VT) const {
- return isTypeLegal(VT) && Synthesizable[VT.getSimpleVT().SimpleTy];
- }
-
class ValueTypeActionImpl {
/// ValueTypeActions - For each value type, keep a LegalizeAction enum
/// that indicates how instruction selection should deal with the type.
@@ -441,7 +437,7 @@ public:
/// for it.
LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const {
assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
- (unsigned)VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
return (LegalizeAction)LoadExtActions[VT.getSimpleVT().SimpleTy][ExtType];
}
@@ -459,8 +455,8 @@ public:
/// to be expanded to some other code sequence, or the target has a custom
/// expander for it.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
- assert((unsigned)ValVT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
- (unsigned)MemVT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(ValVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
+ MemVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
return (LegalizeAction)TruncStoreActions[ValVT.getSimpleVT().SimpleTy]
[MemVT.getSimpleVT().SimpleTy];
@@ -480,8 +476,8 @@ public:
/// for it.
LegalizeAction
getIndexedLoadAction(unsigned IdxMode, EVT VT) const {
- assert( IdxMode < ISD::LAST_INDEXED_MODE &&
- ((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE &&
+ assert(IdxMode < ISD::LAST_INDEXED_MODE &&
+ VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
@@ -501,8 +497,8 @@ public:
/// for it.
LegalizeAction
getIndexedStoreAction(unsigned IdxMode, EVT VT) const {
- assert( IdxMode < ISD::LAST_INDEXED_MODE &&
- ((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE &&
+ assert(IdxMode < ISD::LAST_INDEXED_MODE &&
+ VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
@@ -646,21 +642,30 @@ public:
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memset. The value is set by the target at the
- /// performance threshold for such a replacement.
+ /// performance threshold for such a replacement. If OptSize is true,
+ /// return the limit for functions that have OptSize attribute.
/// @brief Get maximum # of store operations permitted for llvm.memset
- unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; }
+ unsigned getMaxStoresPerMemset(bool OptSize) const {
+ return OptSize ? maxStoresPerMemsetOptSize : maxStoresPerMemset;
+ }
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memcpy. The value is set by the target at the
- /// performance threshold for such a replacement.
+ /// performance threshold for such a replacement. If OptSize is true,
+ /// return the limit for functions that have OptSize attribute.
/// @brief Get maximum # of store operations permitted for llvm.memcpy
- unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; }
+ unsigned getMaxStoresPerMemcpy(bool OptSize) const {
+ return OptSize ? maxStoresPerMemcpyOptSize : maxStoresPerMemcpy;
+ }
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memmove. The value is set by the target at the
- /// performance threshold for such a replacement.
+ /// performance threshold for such a replacement. If OptSize is true,
+ /// return the limit for functions that have OptSize attribute.
/// @brief Get maximum # of store operations permitted for llvm.memmove
- unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; }
+ unsigned getMaxStoresPerMemmove(bool OptSize) const {
+ return OptSize ? maxStoresPerMemmoveOptSize : maxStoresPerMemmove;
+ }
/// This function returns true if the target allows unaligned memory accesses.
/// of the specified type. This is used, for example, in situations where an
@@ -958,6 +963,13 @@ public:
return isTypeLegal(VT);
}
+ /// isDesirableToPromoteOp - Return true if it is profitable for dag combiner
+ /// to transform a floating point op of specified opcode to a equivalent op of
+ /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM.
+ virtual bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const {
+ return false;
+ }
+
/// IsDesirableToPromoteOp - This method query the target whether it is
/// beneficial for dag combiner to promote the specified node. If true, it
/// should return the desired promotion type by reference.
@@ -1021,7 +1033,16 @@ protected:
/// SelectIsExpensive - Tells the code generator not to expand operations
/// into sequences that use the select operations if possible.
- void setSelectIsExpensive() { SelectIsExpensive = true; }
+ void setSelectIsExpensive(bool isExpensive = true) {
+ SelectIsExpensive = isExpensive;
+ }
+
+ /// JumpIsExpensive - Tells the code generator not to expand sequence of
+ /// operations into a seperate sequences that increases the amount of
+ /// flow control.
+ void setJumpIsExpensive(bool isExpensive = true) {
+ JumpIsExpensive = isExpensive;
+ }
/// setIntDivIsCheap - Tells the code generator that integer divide is
/// expensive, and if possible, should be replaced by an alternate sequence
@@ -1036,12 +1057,10 @@ protected:
/// addRegisterClass - Add the specified register class as an available
/// regclass for the specified value type. This indicates the selector can
/// handle values of that class natively.
- void addRegisterClass(EVT VT, TargetRegisterClass *RC,
- bool isSynthesizable = true) {
+ void addRegisterClass(EVT VT, TargetRegisterClass *RC) {
assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
AvailableRegClasses.push_back(std::make_pair(VT, RC));
RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
- Synthesizable[VT.getSimpleVT().SimpleTy] = isSynthesizable;
}
/// findRepresentativeClass - Return the largest legal super-reg register class
@@ -1065,8 +1084,7 @@ protected:
/// not work with the specified type and indicate what to do about it.
void setLoadExtAction(unsigned ExtType, MVT VT,
LegalizeAction Action) {
- assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
- (unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
}
@@ -1075,8 +1093,7 @@ protected:
/// not work with the specified type and indicate what to do about it.
void setTruncStoreAction(MVT ValVT, MVT MemVT,
LegalizeAction Action) {
- assert((unsigned)ValVT.SimpleTy < MVT::LAST_VALUETYPE &&
- (unsigned)MemVT.SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
}
@@ -1087,10 +1104,8 @@ protected:
/// TargetLowering.cpp
void setIndexedLoadAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
- IdxMode < ISD::LAST_INDEXED_MODE &&
- (unsigned)Action < 0xf &&
- "Table isn't big enough!");
+ assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf && "Table isn't big enough!");
// Load action are kept in the upper half.
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
@@ -1102,10 +1117,8 @@ protected:
/// TargetLowering.cpp
void setIndexedStoreAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
- IdxMode < ISD::LAST_INDEXED_MODE &&
- (unsigned)Action < 0xf &&
- "Table isn't big enough!");
+ assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf && "Table isn't big enough!");
// Store action are kept in the lower half.
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
@@ -1115,7 +1128,7 @@ protected:
/// supported on the target and indicate what to do about it.
void setCondCodeAction(ISD::CondCode CC, MVT VT,
LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(VT < MVT::LAST_VALUETYPE &&
(unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
@@ -1261,6 +1274,13 @@ public:
return SDValue(); // this is here to silence compiler errors
}
+ /// isUsedByReturnOnly - Return true if result of the specified node is used
+ /// by a return node only. This is used to determine whether it is possible
+ /// to codegen a libcall as tail call at legalization time.
+ virtual bool isUsedByReturnOnly(SDNode *N) const {
+ return false;
+ }
+
/// LowerOperationWrapper - This callback is invoked by the type legalizer
/// to legalize nodes with an illegal operand type but legal result types.
/// It replaces the LowerOperation callback in the type Legalizer.
@@ -1328,6 +1348,22 @@ public:
C_Unknown // Unsupported constraint.
};
+ enum ConstraintWeight {
+ // Generic weights.
+ CW_Invalid = -1, // No match.
+ CW_Okay = 0, // Acceptable.
+ CW_Good = 1, // Good weight.
+ CW_Better = 2, // Better weight.
+ CW_Best = 3, // Best weight.
+
+ // Well-known weights.
+ CW_SpecificReg = CW_Okay, // Specific register operands.
+ CW_Register = CW_Good, // Register operands.
+ CW_Memory = CW_Better, // Memory operands.
+ CW_Constant = CW_Best, // Constant operand.
+ CW_Default = CW_Okay // Default or don't know type.
+ };
+
/// AsmOperandInfo - This contains information for each constraint that we are
/// lowering.
struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
@@ -1356,12 +1392,41 @@ public:
/// returns the output operand it matches.
unsigned getMatchedOperand() const;
+ /// Copy constructor for copying from an AsmOperandInfo.
+ AsmOperandInfo(const AsmOperandInfo &info)
+ : InlineAsm::ConstraintInfo(info),
+ ConstraintCode(info.ConstraintCode),
+ ConstraintType(info.ConstraintType),
+ CallOperandVal(info.CallOperandVal),
+ ConstraintVT(info.ConstraintVT) {
+ }
+
+ /// Copy constructor for copying from a ConstraintInfo.
AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
: InlineAsm::ConstraintInfo(info),
ConstraintType(TargetLowering::C_Unknown),
CallOperandVal(0), ConstraintVT(MVT::Other) {
}
};
+
+ typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
+
+ /// ParseConstraints - Split up the constraint string from the inline
+ /// assembly value into the specific constraints and their prefixes,
+ /// and also tie in the associated operand values.
+ /// If this returns an empty vector, and if the constraint string itself
+ /// isn't empty, there was an error parsing.
+ virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
+
+ /// Examine constraint type and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ virtual ConstraintWeight getMultipleConstraintMatchWeight(
+ AsmOperandInfo &info, int maIndex) const;
+
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ virtual ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
/// ComputeConstraintToUse - Determines the constraint code and constraint
/// type to use for the specific AsmOperandInfo, setting
@@ -1568,6 +1633,11 @@ private:
/// it.
bool Pow2DivIsCheap;
+ /// JumpIsExpensive - Tells the code generator that it shouldn't generate
+ /// extra flow control instructions and should attempt to combine flow
+ /// control instructions via predication.
+ bool JumpIsExpensive;
+
/// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
/// llvm.setjmp. Defaults to false.
bool UseUnderscoreSetJmp;
@@ -1643,11 +1713,6 @@ private:
/// approximate register pressure.
uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
- /// Synthesizable indicates whether it is OK for the compiler to create new
- /// operations using this type. All Legal types are Synthesizable except
- /// MMX types on X86. Non-Legal types are not Synthesizable.
- bool Synthesizable[MVT::LAST_VALUETYPE];
-
/// TransformToType - For any value types we are promoting or expanding, this
/// contains the value type that we are changing to. For Expanded types, this
/// contains one step of the expand (e.g. i64 -> i32), even if there are
@@ -1727,6 +1792,10 @@ protected:
/// @brief Specify maximum number of store instructions per memset call.
unsigned maxStoresPerMemset;
+ /// Maximum number of stores operations that may be substituted for the call
+ /// to memset, used for functions with OptSize attribute.
+ unsigned maxStoresPerMemsetOptSize;
+
/// When lowering \@llvm.memcpy this field specifies the maximum number of
/// store operations that may be substituted for a call to memcpy. Targets
/// must set this value based on the cost threshold for that target. Targets
@@ -1739,6 +1808,10 @@ protected:
/// @brief Specify maximum bytes of store instructions per memcpy call.
unsigned maxStoresPerMemcpy;
+ /// Maximum number of store operations that may be substituted for a call
+ /// to memcpy, used for functions with OptSize attribute.
+ unsigned maxStoresPerMemcpyOptSize;
+
/// When lowering \@llvm.memmove this field specifies the maximum number of
/// store instructions that may be substituted for a call to memmove. Targets
/// must set this value based on the cost threshold for that target. Targets
@@ -1750,6 +1823,10 @@ protected:
/// @brief Specify maximum bytes of store instructions per memmove call.
unsigned maxStoresPerMemmove;
+ /// Maximum number of store instructions that may be substituted for a call
+ /// to memmove, used for functions with OpSize attribute.
+ unsigned maxStoresPerMemmoveOptSize;
+
/// This field specifies whether the target can benefit from code placement
/// optimization.
bool benefitFromCodePlacementOpt;
diff --git a/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
index 819709f..34bf271 100644
--- a/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
@@ -69,10 +69,6 @@ protected:
/// the section the Language Specific Data Area information is emitted to.
const MCSection *LSDASection;
- /// EHFrameSection - If exception handling is supported by the target, this is
- /// the section the EH Frame is emitted to.
- const MCSection *EHFrameSection;
-
// Dwarf sections for debug info. If a target supports debug info, these must
// be set.
const MCSection *DwarfAbbrevSection;
@@ -92,6 +88,11 @@ protected:
// information for a TLS variable, it'll go here.
const MCSection *TLSExtraDataSection;
+ /// CommDirectiveSupportsAlignment - True if .comm supports alignment. This
+ /// is a hack for as long as we support 10.4 Tiger, whose assembler doesn't
+ /// support alignment on comm.
+ bool CommDirectiveSupportsAlignment;
+
/// SupportsWeakEmptyEHFrame - True if target object file supports a
/// weak_definition of constant 0 for an omitted EH frame.
bool SupportsWeakOmittedEHFrame;
@@ -128,13 +129,17 @@ public:
return SupportsWeakOmittedEHFrame;
}
+ bool getCommDirectiveSupportsAlignment() const {
+ return CommDirectiveSupportsAlignment;
+ }
+
const MCSection *getTextSection() const { return TextSection; }
const MCSection *getDataSection() const { return DataSection; }
const MCSection *getBSSSection() const { return BSSSection; }
const MCSection *getStaticCtorSection() const { return StaticCtorSection; }
const MCSection *getStaticDtorSection() const { return StaticDtorSection; }
const MCSection *getLSDASection() const { return LSDASection; }
- const MCSection *getEHFrameSection() const { return EHFrameSection; }
+ virtual const MCSection *getEHFrameSection() const = 0;
const MCSection *getDwarfAbbrevSection() const { return DwarfAbbrevSection; }
const MCSection *getDwarfInfoSection() const { return DwarfInfoSection; }
const MCSection *getDwarfLineSection() const { return DwarfLineSection; }
diff --git a/contrib/llvm/include/llvm/Target/TargetMachine.h b/contrib/llvm/include/llvm/Target/TargetMachine.h
index 42e99e0..030bf5b 100644
--- a/contrib/llvm/include/llvm/Target/TargetMachine.h
+++ b/contrib/llvm/include/llvm/Target/TargetMachine.h
@@ -29,7 +29,7 @@ class TargetIntrinsicInfo;
class TargetJITInfo;
class TargetLowering;
class TargetSelectionDAGInfo;
-class TargetFrameInfo;
+class TargetFrameLowering;
class JITCodeEmitter;
class MCContext;
class TargetRegisterInfo;
@@ -98,12 +98,14 @@ protected: // Can only create subclasses.
/// TheTarget - The Target that this machine was created for.
const Target &TheTarget;
-
+
/// AsmInfo - Contains target specific asm information.
///
const MCAsmInfo *AsmInfo;
unsigned MCRelaxAll : 1;
+ unsigned MCNoExecStack : 1;
+ unsigned MCUseLoc : 1;
public:
virtual ~TargetMachine();
@@ -116,16 +118,16 @@ public:
// -- Stack frame information
// -- Selection DAG lowering information
//
- virtual const TargetInstrInfo *getInstrInfo() const { return 0; }
- virtual const TargetFrameInfo *getFrameInfo() const { return 0; }
+ virtual const TargetInstrInfo *getInstrInfo() const { return 0; }
+ virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
virtual const TargetLowering *getTargetLowering() const { return 0; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
- virtual const TargetData *getTargetData() const { return 0; }
-
+ virtual const TargetData *getTargetData() const { return 0; }
+
/// getMCAsmInfo - Return target specific asm information.
///
const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
-
+
/// getSubtarget - This method returns a pointer to the specified type of
/// TargetSubtarget. In debug builds, it verifies that the object being
/// returned is of the correct type.
@@ -138,7 +140,7 @@ public:
/// details of graph coloring register allocation removed from it.
///
virtual const TargetRegisterInfo *getRegisterInfo() const { return 0; }
-
+
/// getIntrinsicInfo - If intrinsic information is available, return it. If
/// not, return null.
///
@@ -148,17 +150,17 @@ public:
/// otherwise return null.
///
virtual TargetJITInfo *getJITInfo() { return 0; }
-
+
/// getInstrItineraryData - Returns instruction itinerary data for the target
/// or specific subtarget.
///
- virtual const InstrItineraryData getInstrItineraryData() const {
- return InstrItineraryData();
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return 0;
}
/// getELFWriterInfo - If this target supports an ELF writer, return
/// information for it, otherwise return null.
- ///
+ ///
virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; }
/// hasMCRelaxAll - Check whether all machine code instructions should be
@@ -169,6 +171,18 @@ public:
/// relaxed.
void setMCRelaxAll(bool Value) { MCRelaxAll = Value; }
+ /// hasMCNoExecStack - Check whether an executable stack is not needed.
+ bool hasMCNoExecStack() const { return MCNoExecStack; }
+
+ /// setMCNoExecStack - Set whether an executabel stack is not needed.
+ void setMCNoExecStack(bool Value) { MCNoExecStack = Value; }
+
+ /// hasMCUseLoc - Check whether we should use dwarf's .loc directive.
+ bool hasMCUseLoc() const { return MCUseLoc; }
+
+ /// setMCUseLoc - Set whether all we should use dwarf's .loc directive.
+ void setMCUseLoc(bool Value) { MCUseLoc = Value; }
+
/// getRelocationModel - Returns the code generation relocation model. The
/// choices are static, PIC, and dynamic-no-pic, and target default.
static Reloc::Model getRelocationModel();
@@ -267,7 +281,7 @@ class LLVMTargetMachine : public TargetMachine {
protected: // Can only create subclasses.
LLVMTargetMachine(const Target &T, const std::string &TargetTriple);
-
+
private:
/// addCommonCodeGenPasses - Add standard LLVM codegen passes used for
/// both emitting to assembly files or machine code output.
@@ -277,9 +291,11 @@ private:
virtual void setCodeModelForJIT();
virtual void setCodeModelForStatic();
-
+
public:
-
+
+ const std::string &getTargetTriple() const { return TargetTriple; }
+
/// addPassesToEmitFile - Add passes to the specified pass manager to get the
/// specified file emitted. Typically this will involve several steps of code
/// generation. If OptLevel is None, the code generator should emit code as
@@ -289,7 +305,7 @@ public:
CodeGenFileType FileType,
CodeGenOpt::Level,
bool DisableVerify = true);
-
+
/// addPassesToEmitMachineCode - Add passes to the specified pass manager to
/// get machine code emitted. This uses a JITCodeEmitter object to handle
/// actually outputting the machine code and resolving things like the address
@@ -310,7 +326,7 @@ public:
MCContext *&Ctx,
CodeGenOpt::Level OptLevel,
bool DisableVerify = true);
-
+
/// Target-Independent Code Generator Pass Configuration Options.
/// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
@@ -347,15 +363,15 @@ public:
virtual bool addPreSched2(PassManagerBase &, CodeGenOpt::Level) {
return false;
}
-
+
/// addPreEmitPass - This pass may be implemented by targets that want to run
/// passes immediately before machine code is emitted. This should return
/// true if -print-machineinstrs should print out the code after the passes.
virtual bool addPreEmitPass(PassManagerBase &, CodeGenOpt::Level) {
return false;
}
-
-
+
+
/// addCodeEmitter - This pass should be overridden by the target to add a
/// code emitter, if supported. If this is not supported, 'true' should be
/// returned.
diff --git a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
index 81dec3e..121091c 100644
--- a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
@@ -29,21 +29,21 @@ class MachineFunction;
class MachineMove;
class RegScavenger;
template<class T> class SmallVectorImpl;
+class raw_ostream;
/// TargetRegisterDesc - This record contains all of the information known about
-/// a particular register. The AliasSet field (if not null) contains a pointer
-/// to a Zero terminated array of registers that this register aliases. This is
-/// needed for architectures like X86 which have AL alias AX alias EAX.
-/// Registers that this does not apply to simply should set this to null.
-/// The SubRegs field is a zero terminated array of registers that are
-/// sub-registers of the specific register, e.g. AL, AH are sub-registers of AX.
-/// The SuperRegs field is a zero terminated array of registers that are
+/// a particular register. The Overlaps field contains a pointer to a zero
+/// terminated array of registers that this register aliases, starting with
+/// itself. This is needed for architectures like X86 which have AL alias AX
+/// alias EAX. The SubRegs field is a zero terminated array of registers that
+/// are sub-registers of the specific register, e.g. AL, AH are sub-registers of
+/// AX. The SuperRegs field is a zero terminated array of registers that are
/// super-registers of the specific register, e.g. RAX, EAX, are super-registers
/// of AX.
///
struct TargetRegisterDesc {
const char *Name; // Printable name for the reg (for debugging)
- const unsigned *AliasSet; // Register Alias Set, described above
+ const unsigned *Overlaps; // Overlapping registers, described above
const unsigned *SubRegs; // Sub-register set, described above
const unsigned *SuperRegs; // Super-register set, described above
};
@@ -123,7 +123,7 @@ public:
/// hasType - return true if this TargetRegisterClass has the ValueType vt.
///
bool hasType(EVT vt) const {
- for(int i = 0; VTs[i].getSimpleVT().SimpleTy != MVT::Other; ++i)
+ for(int i = 0; VTs[i] != MVT::Other; ++i)
if (VTs[i] == vt)
return true;
return false;
@@ -137,7 +137,7 @@ public:
vt_iterator vt_end() const {
vt_iterator I = VTs;
- while (I->getSimpleVT().SimpleTy != MVT::Other) ++I;
+ while (*I != MVT::Other) ++I;
return I;
}
@@ -227,9 +227,12 @@ public:
/// cheaper to allocate caller saved registers.
///
/// These methods take a MachineFunction argument, which can be used to tune
- /// the allocatable registers based on the characteristics of the function.
- /// One simple example is that the frame pointer register can be used if
- /// frame-pointer-elimination is performed.
+ /// the allocatable registers based on the characteristics of the function,
+ /// subtarget, or other criteria.
+ ///
+ /// Register allocators should account for the fact that an allocation
+ /// order iterator may return a reserved register and always check
+ /// if the register is allocatable (getAllocatableSet()) before using it.
///
/// By default, these methods return all registers in the class.
///
@@ -292,30 +295,68 @@ protected:
virtual ~TargetRegisterInfo();
public:
- enum { // Define some target independent constants
- /// NoRegister - This physical register is not a real target register. It
- /// is useful as a sentinal.
- NoRegister = 0,
+ // Register numbers can represent physical registers, virtual registers, and
+ // sometimes stack slots. The unsigned values are divided into these ranges:
+ //
+ // 0 Not a register, can be used as a sentinel.
+ // [1;2^30) Physical registers assigned by TableGen.
+ // [2^30;2^31) Stack slots. (Rarely used.)
+ // [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
+ //
+ // Further sentinels can be allocated from the small negative integers.
+ // DenseMapInfo<unsigned> uses -1u and -2u.
- /// FirstVirtualRegister - This is the first register number that is
- /// considered to be a 'virtual' register, which is part of the SSA
- /// namespace. This must be the same for all targets, which means that each
- /// target is limited to this fixed number of registers.
- FirstVirtualRegister = 16384
- };
+ /// isStackSlot - Sometimes it is useful the be able to store a non-negative
+ /// frame index in a variable that normally holds a register. isStackSlot()
+ /// returns true if Reg is in the range used for stack slots.
+ ///
+ /// Note that isVirtualRegister() and isPhysicalRegister() cannot handle stack
+ /// slots, so if a variable may contains a stack slot, always check
+ /// isStackSlot() first.
+ ///
+ static bool isStackSlot(unsigned Reg) {
+ return int(Reg) >= (1 << 30);
+ }
+
+ /// stackSlot2Index - Compute the frame index from a register value
+ /// representing a stack slot.
+ static int stackSlot2Index(unsigned Reg) {
+ assert(isStackSlot(Reg) && "Not a stack slot");
+ return int(Reg - (1u << 30));
+ }
+
+ /// index2StackSlot - Convert a non-negative frame index to a stack slot
+ /// register value.
+ static unsigned index2StackSlot(int FI) {
+ assert(FI >= 0 && "Cannot hold a negative frame index.");
+ return FI + (1u << 30);
+ }
/// isPhysicalRegister - Return true if the specified register number is in
/// the physical register namespace.
static bool isPhysicalRegister(unsigned Reg) {
- assert(Reg && "this is not a register!");
- return Reg < FirstVirtualRegister;
+ assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+ return int(Reg) > 0;
}
/// isVirtualRegister - Return true if the specified register number is in
/// the virtual register namespace.
static bool isVirtualRegister(unsigned Reg) {
- assert(Reg && "this is not a register!");
- return Reg >= FirstVirtualRegister;
+ assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+ return int(Reg) < 0;
+ }
+
+ /// virtReg2Index - Convert a virtual register number to a 0-based index.
+ /// The first virtual register in a function will get the index 0.
+ static unsigned virtReg2Index(unsigned Reg) {
+ assert(isVirtualRegister(Reg) && "Not a virtual register");
+ return Reg - (1u << 31);
+ }
+
+ /// index2VirtReg - Convert a 0-based index to a virtual register number.
+ /// This is the inverse operation of VirtReg2IndexFunctor below.
+ static unsigned index2VirtReg(unsigned Index) {
+ return Index + (1u << 31);
}
/// getMinimalPhysRegClass - Returns the Register Class of a physical
@@ -348,7 +389,17 @@ public:
/// terminated.
///
const unsigned *getAliasSet(unsigned RegNo) const {
- return get(RegNo).AliasSet;
+ // The Overlaps set always begins with Reg itself.
+ return get(RegNo).Overlaps + 1;
+ }
+
+ /// getOverlaps - Return a list of registers that overlap Reg, including
+ /// itself. This is the same as the alias set except Reg is included in the
+ /// list.
+ /// These are exactly the registers in { x | regsOverlap(x, Reg) }.
+ ///
+ const unsigned *getOverlaps(unsigned RegNo) const {
+ return get(RegNo).Overlaps;
}
/// getSubRegisters - Return the list of registers that are sub-registers of
@@ -574,13 +625,6 @@ public:
// Do nothing.
}
- /// targetHandlesStackFrameRounding - Returns true if the target is
- /// responsible for rounding up the stack frame (probably at emitPrologue
- /// time).
- virtual bool targetHandlesStackFrameRounding() const {
- return false;
- }
-
/// requiresRegisterScavenging - returns true if the target requires (and can
/// make use of) the register scavenger.
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
@@ -600,31 +644,6 @@ public:
return false;
}
- /// hasFP - Return true if the specified function should have a dedicated
- /// frame pointer register. For most targets this is true only if the function
- /// has variable sized allocas or if frame pointer elimination is disabled.
- virtual bool hasFP(const MachineFunction &MF) const = 0;
-
- /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
- /// not required, we reserve argument space for call sites in the function
- /// immediately on entry to the current function. This eliminates the need for
- /// add/sub sp brackets around call sites. Returns true if the call frame is
- /// included as part of the stack frame.
- virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
- return !hasFP(MF);
- }
-
- /// canSimplifyCallFramePseudos - When possible, it's best to simplify the
- /// call frame pseudo ops before doing frame index elimination. This is
- /// possible only when frame index references between the pseudos won't
- /// need adjusting for the call frame adjustments. Normally, that's true
- /// if the function has a reserved call frame or a frame pointer. Some
- /// targets (Thumb2, for example) may have more complicated criteria,
- /// however, and can override this behavior.
- virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
- return hasReservedCallFrame(MF) || hasFP(MF);
- }
-
/// hasReservedSpillSlot - Return true if target has reserved a spill slot in
/// the stack frame of the given function for the specified register. e.g. On
/// x86, if the frame register is required, the first fixed stack object is
@@ -644,7 +663,7 @@ public:
}
/// getFrameIndexInstrOffset - Get the offset from the referenced frame
- /// index in the instruction, if the is one.
+ /// index in the instruction, if there is one.
virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
int Idx) const {
return 0;
@@ -660,7 +679,7 @@ public:
/// materializeFrameBaseRegister - Insert defining instruction(s) for
/// BaseReg to be a pointer to FrameIdx before insertion point I.
- virtual void materializeFrameBaseRegister(MachineBasicBlock::iterator I,
+ virtual void materializeFrameBaseRegister(MachineBasicBlock *MBB,
unsigned BaseReg, int FrameIdx,
int64_t Offset) const {
assert(0 && "materializeFrameBaseRegister does not exist on this target");
@@ -707,21 +726,6 @@ public:
assert(0 && "Call Frame Pseudo Instructions do not exist on this target!");
}
- /// processFunctionBeforeCalleeSavedScan - This method is called immediately
- /// before PrologEpilogInserter scans the physical registers used to determine
- /// what callee saved registers should be spilled. This method is optional.
- virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const {
-
- }
-
- /// processFunctionBeforeFrameFinalized - This method is called immediately
- /// before the specified function's frame layout (MF.getFrameInfo()) is
- /// finalized. Once the frame is finalized, MO_FrameIndex operands are
- /// replaced with direct constants. This method is optional.
- ///
- virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
- }
/// saveScavengerRegister - Spill the register so it can be used by the
/// register scavenger. Return true if the register was spilled, false
@@ -746,12 +750,6 @@ public:
virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, RegScavenger *RS=NULL) const = 0;
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
- /// the function.
- virtual void emitPrologue(MachineFunction &MF) const = 0;
- virtual void emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const = 0;
-
//===--------------------------------------------------------------------===//
/// Debug information queries.
@@ -765,37 +763,16 @@ public:
/// for values allocated in the current stack frame.
virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
- /// getFrameIndexOffset - Returns the displacement from the frame register to
- /// the stack frame of the specified index.
- virtual int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
-
- /// getFrameIndexReference - This method should return the base register
- /// and offset used to reference a frame index location. The offset is
- /// returned directly, and the base register is returned via FrameReg.
- virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const {
- // By default, assume all frame indices are referenced via whatever
- // getFrameRegister() says. The target can override this if it's doing
- // something different.
- FrameReg = getFrameRegister(MF);
- return getFrameIndexOffset(MF, FI);
- }
-
/// getRARegister - This method should return the register where the return
/// address can be found.
virtual unsigned getRARegister() const = 0;
-
- /// getInitialFrameState - Returns a list of machine moves that are assumed
- /// on entry to all functions. Note that LabelID is ignored (assumed to be
- /// the beginning of the function.)
- virtual void getInitialFrameState(std::vector<MachineMove> &Moves) const;
};
// This is useful when building IndexedMaps keyed on virtual registers
struct VirtReg2IndexFunctor : public std::unary_function<unsigned, unsigned> {
unsigned operator()(unsigned Reg) const {
- return Reg - TargetRegisterInfo::FirstVirtualRegister;
+ return TargetRegisterInfo::virtReg2Index(Reg);
}
};
@@ -804,6 +781,33 @@ struct VirtReg2IndexFunctor : public std::unary_function<unsigned, unsigned> {
const TargetRegisterClass *getCommonSubClass(const TargetRegisterClass *A,
const TargetRegisterClass *B);
+/// PrintReg - Helper class for printing registers on a raw_ostream.
+/// Prints virtual and physical registers with or without a TRI instance.
+///
+/// The format is:
+/// %noreg - NoRegister
+/// %vreg5 - a virtual register.
+/// %vreg5:sub_8bit - a virtual register with sub-register index (with TRI).
+/// %EAX - a physical register
+/// %physreg17 - a physical register when no TRI instance given.
+///
+/// Usage: OS << PrintReg(Reg, TRI) << '\n';
+///
+class PrintReg {
+ const TargetRegisterInfo *TRI;
+ unsigned Reg;
+ unsigned SubIdx;
+public:
+ PrintReg(unsigned reg, const TargetRegisterInfo *tri = 0, unsigned subidx = 0)
+ : TRI(tri), Reg(reg), SubIdx(subidx) {}
+ void print(raw_ostream&) const;
+};
+
+static inline raw_ostream &operator<<(raw_ostream &OS, const PrintReg &PR) {
+ PR.print(OS);
+ return OS;
+}
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetRegistry.h b/contrib/llvm/include/llvm/Target/TargetRegistry.h
index 2817b0c..f851ad0 100644
--- a/contrib/llvm/include/llvm/Target/TargetRegistry.h
+++ b/contrib/llvm/include/llvm/Target/TargetRegistry.h
@@ -39,6 +39,15 @@ namespace llvm {
class TargetAsmParser;
class TargetMachine;
class raw_ostream;
+ class formatted_raw_ostream;
+
+ MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
+ bool isVerboseAsm,
+ bool useLoc,
+ MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE,
+ TargetAsmBackend *TAB,
+ bool ShowInst);
/// Target - Wrapper for Target specific information.
///
@@ -80,7 +89,16 @@ namespace llvm {
TargetAsmBackend &TAB,
raw_ostream &_OS,
MCCodeEmitter *_Emitter,
- bool RelaxAll);
+ bool RelaxAll,
+ bool NoExecStack);
+ typedef MCStreamer *(*AsmStreamerCtorTy)(MCContext &Ctx,
+ formatted_raw_ostream &OS,
+ bool isVerboseAsm,
+ bool useLoc,
+ MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE,
+ TargetAsmBackend *TAB,
+ bool ShowInst);
private:
/// Next - The next registered target in the linked list, maintained by the
@@ -138,7 +156,13 @@ namespace llvm {
/// ObjectStreamer, if registered.
ObjectStreamerCtorTy ObjectStreamerCtorFn;
+ /// AsmStreamerCtorFn - Construction function for this target's
+ /// AsmStreamer, if registered (default = llvm::createAsmStreamer).
+ AsmStreamerCtorTy AsmStreamerCtorFn;
+
public:
+ Target() : AsmStreamerCtorFn(llvm::createAsmStreamer) {}
+
/// @name Target Information
/// @{
@@ -185,6 +209,9 @@ namespace llvm {
/// hasObjectStreamer - Check if this target supports streaming to files.
bool hasObjectStreamer() const { return ObjectStreamerCtorFn != 0; }
+ /// hasAsmStreamer - Check if this target supports streaming to files.
+ bool hasAsmStreamer() const { return AsmStreamerCtorFn != 0; }
+
/// @}
/// @name Feature Constructors
/// @{
@@ -282,14 +309,31 @@ namespace llvm {
/// \arg _OS - The stream object.
/// \arg _Emitter - The target independent assembler object.Takes ownership.
/// \arg RelaxAll - Relax all fixups?
+ /// \arg NoExecStack - Mark file as not needing a executable stack.
MCStreamer *createObjectStreamer(const std::string &TT, MCContext &Ctx,
TargetAsmBackend &TAB,
raw_ostream &_OS,
MCCodeEmitter *_Emitter,
- bool RelaxAll) const {
+ bool RelaxAll,
+ bool NoExecStack) const {
if (!ObjectStreamerCtorFn)
return 0;
- return ObjectStreamerCtorFn(*this, TT, Ctx, TAB, _OS, _Emitter, RelaxAll);
+ return ObjectStreamerCtorFn(*this, TT, Ctx, TAB, _OS, _Emitter, RelaxAll,
+ NoExecStack);
+ }
+
+ /// createAsmStreamer - Create a target specific MCStreamer.
+ MCStreamer *createAsmStreamer(MCContext &Ctx,
+ formatted_raw_ostream &OS,
+ bool isVerboseAsm,
+ bool useLoc,
+ MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE,
+ TargetAsmBackend *TAB,
+ bool ShowInst) const {
+ // AsmStreamerCtorFn is default to llvm::createAsmStreamer
+ return AsmStreamerCtorFn(Ctx, OS, isVerboseAsm, useLoc,
+ InstPrint, CE, TAB, ShowInst);
}
/// @}
@@ -513,7 +557,7 @@ namespace llvm {
T.CodeEmitterCtorFn = Fn;
}
- /// RegisterObjectStreamer - Register an MCStreamer implementation
+ /// RegisterObjectStreamer - Register a object code MCStreamer implementation
/// for the given target.
///
/// Clients are responsible for ensuring that registration doesn't occur
@@ -527,6 +571,20 @@ namespace llvm {
T.ObjectStreamerCtorFn = Fn;
}
+ /// RegisterAsmStreamer - Register an assembly MCStreamer implementation
+ /// for the given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an MCStreamer for the target.
+ static void RegisterAsmStreamer(Target &T, Target::AsmStreamerCtorTy Fn) {
+ if (T.AsmStreamerCtorFn == createAsmStreamer)
+ T.AsmStreamerCtorFn = Fn;
+ }
+
/// @}
};
diff --git a/contrib/llvm/include/llvm/Target/TargetSchedule.td b/contrib/llvm/include/llvm/Target/TargetSchedule.td
index 96c8367..97ea82a 100644
--- a/contrib/llvm/include/llvm/Target/TargetSchedule.td
+++ b/contrib/llvm/include/llvm/Target/TargetSchedule.td
@@ -22,6 +22,13 @@
//
class FuncUnit;
+//===----------------------------------------------------------------------===//
+// Pipeline bypass / forwarding - These values specifies the symbolic names of
+// pipeline bypasses which can be used to forward results of instructions
+// that are forwarded to uses.
+class Bypass;
+def NoBypass : Bypass;
+
class ReservationKind<bits<1> val> {
int Value = val;
}
@@ -66,30 +73,58 @@ class InstrStage<int cycles, list<FuncUnit> units,
// across all chip sets. Thus a new chip set can be added without modifying
// instruction information.
//
-class InstrItinClass;
+// NumMicroOps represents the number of micro-operations that each instruction
+// in the class are decoded to. If the number is zero, then it means the
+// instruction can decode into variable number of micro-ops and it must be
+// determined dynamically.
+//
+class InstrItinClass<int ops = 1> {
+ int NumMicroOps = ops;
+}
def NoItinerary : InstrItinClass;
//===----------------------------------------------------------------------===//
// Instruction itinerary data - These values provide a runtime map of an
// instruction itinerary class (name) to its itinerary data.
//
+// OperandCycles are optional "cycle counts". They specify the cycle after
+// instruction issue the values which correspond to specific operand indices
+// are defined or read. Bypasses are optional "pipeline forwarding pathes", if
+// a def by an instruction is available on a specific bypass and the use can
+// read from the same bypass, then the operand use latency is reduced by one.
+//
+// InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
+// InstrStage<1, [A9_AGU]>],
+// [3, 1], [A9_LdBypass]>,
+// InstrItinData<IIC_iMVNr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
+// [1, 1], [NoBypass, A9_LdBypass]>,
+//
+// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
+// (after issue) and the result of the load is available on cycle 3. The result
+// is available via forwarding path A9_LdBypass. If it's used by the first
+// source operand of instructions of IIC_iMVNr class, then the operand latency
+// is reduced by 1.
class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
- list<int> operandcycles = []> {
+ list<int> operandcycles = [],
+ list<Bypass> bypasses = []> {
InstrItinClass TheClass = Class;
list<InstrStage> Stages = stages;
list<int> OperandCycles = operandcycles;
+ list<Bypass> Bypasses = bypasses;
}
//===----------------------------------------------------------------------===//
// Processor itineraries - These values represent the set of all itinerary
// classes for a given chip set.
//
-class ProcessorItineraries<list<FuncUnit> fu, list<InstrItinData> iid> {
+class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
+ list<InstrItinData> iid> {
list<FuncUnit> FU = fu;
+ list<Bypass> BP = bp;
list<InstrItinData> IID = iid;
}
// NoItineraries - A marker that can be used by processors without schedule
// info.
-def NoItineraries : ProcessorItineraries<[], []>;
+def NoItineraries : ProcessorItineraries<[], [], []>;
diff --git a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
index 58ccfba..c9be40d 100644
--- a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -1,10 +1,10 @@
//===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces used by SelectionDAG
@@ -61,6 +61,13 @@ class SDTCisEltOfVec<int ThisOp, int OtherOp>
int OtherOpNum = OtherOp;
}
+/// SDTCisSubVecOfVec - This indicates that ThisOp is a vector type
+/// with length less that of OtherOp, which is a vector type.
+class SDTCisSubVecOfVec<int ThisOp, int OtherOp>
+ : SDTypeConstraint<ThisOp> {
+ int OtherOpNum = OtherOp;
+}
+
//===----------------------------------------------------------------------===//
// Selection DAG Type Profile definitions.
//
@@ -123,10 +130,10 @@ def SDTFPRoundOp : SDTypeProfile<1, 1, [ // fround
def SDTFPExtendOp : SDTypeProfile<1, 1, [ // fextend
SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>
]>;
-def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp
+def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp
SDTCisFP<0>, SDTCisInt<1>
]>;
-def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int
+def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int
SDTCisInt<0>, SDTCisFP<1>
]>;
def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg
@@ -138,7 +145,7 @@ def SDTSetCC : SDTypeProfile<1, 3, [ // setcc
SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
]>;
-def SDTSelect : SDTypeProfile<1, 3, [ // select
+def SDTSelect : SDTypeProfile<1, 3, [ // select
SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>
]>;
@@ -162,11 +169,11 @@ def SDTBrind : SDTypeProfile<0, 1, [ // brind
def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap
def SDTLoad : SDTypeProfile<1, 1, [ // load
- SDTCisPtrTy<1>
+ SDTCisPtrTy<1>
]>;
def SDTStore : SDTypeProfile<0, 2, [ // store
- SDTCisPtrTy<1>
+ SDTCisPtrTy<1>
]>;
def SDTIStore : SDTypeProfile<1, 3, [ // indexed store
@@ -183,18 +190,25 @@ def SDTVecInsert : SDTypeProfile<1, 3, [ // vector insert
SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3>
]>;
-def STDPrefetch : SDTypeProfile<0, 3, [ // prefetch
+def SDTSubVecExtract : SDTypeProfile<1, 2, [// subvector extract
+ SDTCisSubVecOfVec<0,1>, SDTCisInt<2>
+]>;
+def SDTSubVecInsert : SDTypeProfile<1, 3, [ // subvector insert
+ SDTCisSubVecOfVec<2, 1>, SDTCisSameAs<0,1>, SDTCisInt<3>
+]>;
+
+def SDTPrefetch : SDTypeProfile<0, 3, [ // prefetch
SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisInt<1>
]>;
-def STDMemBarrier : SDTypeProfile<0, 5, [ // memory barier
+def SDTMemBarrier : SDTypeProfile<0, 5, [ // memory barier
SDTCisSameAs<0,1>, SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisSameAs<0,4>,
SDTCisInt<0>
]>;
-def STDAtomic3 : SDTypeProfile<1, 3, [
+def SDTAtomic3 : SDTypeProfile<1, 3, [
SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisInt<0>, SDTCisPtrTy<1>
]>;
-def STDAtomic2 : SDTypeProfile<1, 2, [
+def SDTAtomic2 : SDTypeProfile<1, 2, [
SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1>
]>;
@@ -216,20 +230,27 @@ class SDNodeProperty;
def SDNPCommutative : SDNodeProperty; // X op Y == Y op X
def SDNPAssociative : SDNodeProperty; // (X op Y) op Z == X op (Y op Z)
def SDNPHasChain : SDNodeProperty; // R/W chain operand and result
-def SDNPOutFlag : SDNodeProperty; // Write a flag result
-def SDNPInFlag : SDNodeProperty; // Read a flag operand
-def SDNPOptInFlag : SDNodeProperty; // Optionally read a flag operand
+def SDNPOutGlue : SDNodeProperty; // Write a flag result
+def SDNPInGlue : SDNodeProperty; // Read a flag operand
+def SDNPOptInGlue : SDNodeProperty; // Optionally read a flag operand
def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'.
def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'.
def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'.
def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand
def SDNPVariadic : SDNodeProperty; // Node has variable arguments.
+def SDNPWantRoot : SDNodeProperty; // ComplexPattern gets the root of match
+def SDNPWantParent : SDNodeProperty; // ComplexPattern gets the parent
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Pattern Operations
+class SDPatternOperator;
//===----------------------------------------------------------------------===//
// Selection DAG Node definitions.
//
class SDNode<string opcode, SDTypeProfile typeprof,
- list<SDNodeProperty> props = [], string sdclass = "SDNode"> {
+ list<SDNodeProperty> props = [], string sdclass = "SDNode">
+ : SDPatternOperator {
string Opcode = opcode;
string SDClass = sdclass;
list<SDNodeProperty> Properties = props;
@@ -305,14 +326,14 @@ def or : SDNode<"ISD::OR" , SDTIntBinOp,
def xor : SDNode<"ISD::XOR" , SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]>;
def addc : SDNode<"ISD::ADDC" , SDTIntBinOp,
- [SDNPCommutative, SDNPOutFlag]>;
+ [SDNPCommutative, SDNPOutGlue]>;
def adde : SDNode<"ISD::ADDE" , SDTIntBinOp,
- [SDNPCommutative, SDNPOutFlag, SDNPInFlag]>;
+ [SDNPCommutative, SDNPOutGlue, SDNPInGlue]>;
def subc : SDNode<"ISD::SUBC" , SDTIntBinOp,
- [SDNPOutFlag]>;
+ [SDNPOutGlue]>;
def sube : SDNode<"ISD::SUBE" , SDTIntBinOp,
- [SDNPOutFlag, SDNPInFlag]>;
-
+ [SDNPOutGlue, SDNPInGlue]>;
+
def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>;
def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>;
def ctlz : SDNode<"ISD::CTLZ" , SDTIntUnaryOp>;
@@ -322,11 +343,11 @@ def sext : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>;
def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>;
def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>;
def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>;
-def bitconvert : SDNode<"ISD::BIT_CONVERT", SDTUnaryOp>;
+def bitconvert : SDNode<"ISD::BITCAST" , SDTUnaryOp>;
def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>;
def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>;
-
+
def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>;
def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>;
def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>;
@@ -367,35 +388,36 @@ def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>;
def trap : SDNode<"ISD::TRAP" , SDTNone,
[SDNPHasChain, SDNPSideEffect]>;
-def prefetch : SDNode<"ISD::PREFETCH" , STDPrefetch,
- [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>;
+def prefetch : SDNode<"ISD::PREFETCH" , SDTPrefetch,
+ [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
+ SDNPMemOperand]>;
-def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier,
+def membarrier : SDNode<"ISD::MEMBARRIER" , SDTMemBarrier,
[SDNPHasChain, SDNPSideEffect]>;
-def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3,
+def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , SDTAtomic3,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
+def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
+def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
+def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
+def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
+def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2,
+def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2,
+def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2,
+def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2,
+def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2,
+def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2,
+def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
@@ -415,16 +437,26 @@ def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>;
def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
-
+
+// This operator does not do subvector type checking. The ARM
+// backend, at least, needs it.
+def vector_extract_subvec : SDNode<"ISD::EXTRACT_SUBVECTOR",
+ SDTypeProfile<1, 2, [SDTCisInt<2>, SDTCisVec<1>, SDTCisVec<0>]>,
+ []>;
+
+// This operator does subvector type checking.
+def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
+def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
+
// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
// these internally. Don't reference these directly.
-def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
+def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
[SDNPHasChain]>;
-def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
+def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>,
[SDNPHasChain]>;
-def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
+def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>;
// Do not use cvt directly. Use cvt forms below
@@ -469,10 +501,10 @@ def NOOP_SDNodeXForm : SDNodeXForm<imm, [{}]>;
//
/// PatFrag - Represents a pattern fragment. This can match something on the
-/// DAG, frame a single node to multiply nested other fragments.
+/// DAG, from a single node to multiple nested other fragments.
///
class PatFrag<dag ops, dag frag, code pred = [{}],
- SDNodeXForm xform = NOOP_SDNodeXForm> {
+ SDNodeXForm xform = NOOP_SDNodeXForm> : SDPatternOperator {
dag Operands = ops;
dag Fragment = frag;
code Predicate = pred;
@@ -822,7 +854,7 @@ def cvtfu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
//===----------------------------------------------------------------------===//
// Selection DAG Pattern Support.
//
-// Patterns are what are actually matched against the target-flavored
+// Patterns are what are actually matched against by the target-flavored
// instruction selection DAG. Instructions defined by the target implicitly
// define patterns in most cases, but patterns can also be explicitly added when
// an operation is defined by a sequence of instructions (e.g. loading a large
@@ -834,7 +866,7 @@ class Pattern<dag patternToMatch, list<dag> resultInstrs> {
dag PatternToMatch = patternToMatch;
list<dag> ResultInstrs = resultInstrs;
list<Predicate> Predicates = []; // See class Instruction in Target.td.
- int AddedComplexity = 0; // See class Instruction in Target.td.
+ int AddedComplexity = 0; // See class Instruction in Target.td.
}
// Pat - A simple (but common) form of a pattern, which produces a simple result
diff --git a/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h b/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
index 2be1834..c9ca722 100644
--- a/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
@@ -59,8 +59,8 @@ public:
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff) const {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
return SDValue();
}
@@ -75,8 +75,8 @@ public:
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff) const {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
return SDValue();
}
@@ -91,7 +91,7 @@ public:
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
- const Value *DstSV, uint64_t DstOff) const {
+ MachinePointerInfo DstPtrInfo) const {
return SDValue();
}
};
diff --git a/contrib/llvm/include/llvm/Transforms/IPO.h b/contrib/llvm/include/llvm/Transforms/IPO.h
index 0de1003..1239881 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO.h
@@ -99,6 +99,8 @@ ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool
/// createFunctionInliningPass - Return a new pass object that uses a heuristic
/// to inline direct function calls to small functions.
///
+/// The -inline-threshold command line option takes precedence over the
+/// threshold given here.
Pass *createFunctionInliningPass();
Pass *createFunctionInliningPass(int Threshold);
@@ -187,12 +189,6 @@ ModulePass *createBlockExtractorPass();
ModulePass *createStripDeadPrototypesPass();
//===----------------------------------------------------------------------===//
-/// createPartialSpecializationPass - This pass specializes functions for
-/// constant arguments.
-///
-ModulePass *createPartialSpecializationPass();
-
-//===----------------------------------------------------------------------===//
/// createFunctionAttrsPass - This pass discovers functions that do not access
/// memory, or only read memory, and gives them the readnone/readonly attribute.
/// It also discovers function arguments that are not captured by the function
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation.h b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
index 9c579ac..aa9873f 100644
--- a/contrib/llvm/include/llvm/Transforms/Instrumentation.h
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
@@ -25,6 +25,9 @@ ModulePass *createEdgeProfilerPass();
// Insert optimal edge profiling instrumentation
ModulePass *createOptimalEdgeProfilerPass();
+// Insert path profiling instrumentation
+ModulePass *createPathProfilerPass();
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/RSProfiling.h b/contrib/llvm/include/llvm/Transforms/RSProfiling.h
deleted file mode 100644
index 02439e8..0000000
--- a/contrib/llvm/include/llvm/Transforms/RSProfiling.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//===- RSProfiling.cpp - Various profiling using random sampling ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the abstract interface that a profiler must implement to
-// support the random profiling transform.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_RSPROFILING_H
-#define LLVM_TRANSFORMS_RSPROFILING_H
-
-#include "llvm/Pass.h"
-
-namespace llvm {
- class Value;
-
- //===--------------------------------------------------------------------===//
- /// RSProfilers - The basic Random Sampling Profiler Interface Any profiler
- /// that implements this interface can be transformed by the random sampling
- /// pass to be sample based rather than always on.
- ///
- /// The only exposed function can be queried to find out if an instruction
- /// was original or if it was inserted by the profiler. Implementations of
- /// this interface are expected to chain to other implementations, such that
- /// multiple profilers can be support simultaniously.
- struct RSProfilers : public ModulePass {
- static char ID; // Pass identification, replacement for typeinfo
- RSProfilers() : ModulePass(&ID) {}
-
- /// isProfiling - This method returns true if the value passed it was
- /// inserted by the profiler.
- virtual bool isProfiling(Value* v) = 0;
- };
-}
-
-#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar.h b/contrib/llvm/include/llvm/Transforms/Scalar.h
index 0c35d7e..6f2a38e 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar.h
@@ -73,7 +73,8 @@ FunctionPass *createAggressiveDCEPass();
// ScalarReplAggregates - Break up alloca's of aggregates into multiple allocas
// if possible.
//
-FunctionPass *createScalarReplAggregatesPass(signed Threshold = -1);
+FunctionPass *createScalarReplAggregatesPass(signed Threshold = -1,
+ bool UseDomTree = true);
//===----------------------------------------------------------------------===//
//
@@ -119,6 +120,12 @@ Pass *createLoopUnswitchPass(bool OptimizeForSize = false);
//===----------------------------------------------------------------------===//
//
+// LoopInstSimplify - This pass simplifies instructions in a loop's body.
+//
+Pass *createLoopInstSimplifyPass();
+
+//===----------------------------------------------------------------------===//
+//
// LoopUnroll - This pass is a simple loop unrolling pass.
//
Pass *createLoopUnrollPass();
@@ -131,11 +138,10 @@ Pass *createLoopRotatePass();
//===----------------------------------------------------------------------===//
//
-// LoopIndexSplit - This pass divides loop's iteration range by spliting loop
-// such that each individual loop is executed efficiently.
+// LoopIdiom - This pass recognizes and replaces idioms in loops.
//
-Pass *createLoopIndexSplitPass();
-
+Pass *createLoopIdiomPass();
+
//===----------------------------------------------------------------------===//
//
// PromoteMemoryToRegister - This pass is used to promote memory references to
@@ -261,6 +267,13 @@ extern char &LCSSAID;
//===----------------------------------------------------------------------===//
//
+// EarlyCSE - This pass performs a simple and fast CSE pass over the dominator
+// tree.
+//
+FunctionPass *createEarlyCSEPass();
+
+//===----------------------------------------------------------------------===//
+//
// GVN - This pass performs global value numbering and redundant load
// elimination cotemporaneously.
//
@@ -329,6 +342,13 @@ Pass *createLowerAtomicPass();
//
Pass *createCorrelatedValuePropagationPass();
+//===----------------------------------------------------------------------===//
+//
+// InstructionSimplifier - Remove redundant instructions.
+//
+FunctionPass *createInstructionSimplifierPass();
+extern char &InstructionSimplifierID;
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h b/contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h
index be601e2..0678ecc 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/AddrModeMatcher.h
@@ -39,6 +39,12 @@ struct ExtAddrMode : public TargetLowering::AddrMode {
ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
void print(raw_ostream &OS) const;
void dump() const;
+
+ bool operator==(const ExtAddrMode& O) const {
+ return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) &&
+ (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) &&
+ (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale);
+ }
};
static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
@@ -84,7 +90,7 @@ public:
bool Success =
AddressingModeMatcher(AddrModeInsts, TLI, AccessTy,
MemoryInst, Result).MatchAddr(V, 0);
- Success = Success; assert(Success && "Couldn't select *anything*?");
+ (void)Success; assert(Success && "Couldn't select *anything*?");
return Result;
}
private:
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 0f54450..5335860 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -22,9 +22,10 @@
namespace llvm {
+class AliasAnalysis;
class Instruction;
class Pass;
-class AliasAnalysis;
+class ReturnInst;
/// DeleteDeadBlock - Delete the specified block, which must have no
/// predecessors.
@@ -35,7 +36,7 @@ void DeleteDeadBlock(BasicBlock *BB);
/// any single-entry PHI nodes in it, fold them away. This handles the case
/// when all entries to the PHI nodes in a block are guaranteed equal, such as
/// when the block has exactly one predecessor.
-void FoldSingleEntryPHINodes(BasicBlock *BB);
+void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = 0);
/// DeleteDeadPHIs - Examine each PHI in the given block and delete it if it
/// is dead. Also recursively delete any operands that become dead as
@@ -46,7 +47,7 @@ bool DeleteDeadPHIs(BasicBlock *BB);
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure.
-bool MergeBlockIntoPredecessor(BasicBlock* BB, Pass* P = 0);
+bool MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P = 0);
// ReplaceInstWithValue - Replace all uses of an instruction (specified by BI)
// with a value, then remove and delete the original instruction.
@@ -75,15 +76,6 @@ void FindFunctionBackedges(const Function &F,
SmallVectorImpl<std::pair<const BasicBlock*,const BasicBlock*> > &Result);
-// RemoveSuccessor - Change the specified terminator instruction such that its
-// successor #SuccNum no longer exists. Because this reduces the outgoing
-// degree of the current basic block, the actual terminator instruction itself
-// may have to be changed. In the case where the last successor of the block is
-// deleted, a return instruction is inserted in its place which can cause a
-// suprising change in program behavior if it is not expected.
-//
-void RemoveSuccessor(TerminatorInst *TI, unsigned SuccNum);
-
/// GetSuccessorNumber - Search for the specified successor of basic block BB
/// and return its position in the terminator instruction's list of
/// successors. It is an error to call this with a block that is not a
@@ -180,7 +172,15 @@ BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P);
BasicBlock *SplitBlockPredecessors(BasicBlock *BB, BasicBlock *const *Preds,
unsigned NumPreds, const char *Suffix,
Pass *P = 0);
-
+
+/// FoldReturnIntoUncondBranch - This method duplicates the specified return
+/// instruction into a predecessor which ends in an unconditional branch. If
+/// the return instruction returns a value defined by a PHI, propagate the
+/// right value into the return. It returns the new return instruction in the
+/// predecessor.
+ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
+ BasicBlock *Pred);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
index c75c142..e825938 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -47,11 +47,6 @@ namespace llvm {
/// specified pointer arguments and length.
Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
const TargetData *TD, StringRef Name = "strncpy");
-
- /// EmitMemCpy - Emit a call to the memcpy function to the builder. This
- /// always expects that the size has type 'intptr_t' and Dst/Src are pointers.
- Value *EmitMemCpy(Value *Dst, Value *Src, Value *Len, unsigned Align,
- bool isVolatile, IRBuilder<> &B, const TargetData *TD);
/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
@@ -59,11 +54,6 @@ namespace llvm {
Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
IRBuilder<> &B, const TargetData *TD);
- /// EmitMemMove - Emit a call to the memmove function to the builder. This
- /// always expects that the size has type 'intptr_t' and Dst/Src are pointers.
- Value *EmitMemMove(Value *Dst, Value *Src, Value *Len, unsigned Align,
- bool isVolatile, IRBuilder<> &B, const TargetData *TD);
-
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
@@ -73,10 +63,6 @@ namespace llvm {
Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const TargetData *TD);
- /// EmitMemSet - Emit a call to the memset function
- Value *EmitMemSet(Value *Dst, Value *Val, Value *Len, bool isVolatile,
- IRBuilder<> &B, const TargetData *TD);
-
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
/// (e.g. 'floor'). This function is known to take a single of type matching
/// 'Op' and returns one value with the same type. If 'Op' is a long double,
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
index 62bf92a..24ebb10 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -22,6 +22,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ValueHandle.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
namespace llvm {
@@ -46,7 +47,7 @@ class AllocaInst;
/// CloneModule - Return an exact copy of the specified module
///
Module *CloneModule(const Module *M);
-Module *CloneModule(const Module *M, ValueMap<const Value*, Value*> &VMap);
+Module *CloneModule(const Module *M, ValueToValueMapTy &VMap);
/// ClonedCodeInfo - This struct can be used to capture information about code
/// being cloned, while it is being cloned.
@@ -102,7 +103,7 @@ struct ClonedCodeInfo {
/// parameter.
///
BasicBlock *CloneBasicBlock(const BasicBlock *BB,
- ValueMap<const Value*, Value*> &VMap,
+ ValueToValueMapTy &VMap,
const Twine &NameSuffix = "", Function *F = 0,
ClonedCodeInfo *CodeInfo = 0);
@@ -110,7 +111,7 @@ BasicBlock *CloneBasicBlock(const BasicBlock *BB,
/// CloneLoop - Clone Loop. Clone dominator info for loop insiders. Populate
/// VMap using old blocks to new blocks mapping.
Loop *CloneLoop(Loop *L, LPPassManager *LPM, LoopInfo *LI,
- ValueMap<const Value *, Value *> &VMap, Pass *P);
+ ValueToValueMapTy &VMap, Pass *P);
/// CloneFunction - Return a copy of the specified function, but without
/// embedding the function into another module. Also, any references specified
@@ -125,14 +126,14 @@ Loop *CloneLoop(Loop *L, LPPassManager *LPM, LoopInfo *LI,
/// mappings.
///
Function *CloneFunction(const Function *F,
- ValueMap<const Value*, Value*> &VMap,
+ ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
ClonedCodeInfo *CodeInfo = 0);
/// CloneFunction - Version of the function that doesn't need the VMap.
///
inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
- ValueMap<const Value*, Value*> VMap;
+ ValueToValueMapTy VMap;
return CloneFunction(F, VMap, CodeInfo);
}
@@ -146,7 +147,7 @@ inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
/// mappings.
///
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
- ValueMap<const Value*, Value*> &VMap,
+ ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
@@ -164,7 +165,7 @@ void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// mappings.
///
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
- ValueMap<const Value*, Value*> &VMap,
+ ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Local.h b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
index caae27f..26b5dd8 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
@@ -69,6 +69,10 @@ bool RecursivelyDeleteDeadPHINode(PHINode *PN);
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
+///
+/// WARNING: Do not use this function on unreachable blocks, as recursive
+/// simplification is not able to handle corner-case scenarios that can
+/// arise in them.
bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0);
//===----------------------------------------------------------------------===//
@@ -141,6 +145,18 @@ AllocaInst *DemoteRegToStack(Instruction &X,
/// The phi node is deleted and it returns the pointer to the alloca inserted.
AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = 0);
+/// getOrEnforceKnownAlignment - If the specified pointer has an alignment that
+/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
+/// and it is more than the alignment of the ultimate object, see if we can
+/// increase the alignment of the ultimate object, making this check succeed.
+unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
+ const TargetData *TD = 0);
+
+/// getKnownAlignment - Try to infer an alignment for the specified pointer.
+static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) {
+ return getOrEnforceKnownAlignment(V, 0, TD);
+}
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h b/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
index 35cfadd..98d51a2 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -38,8 +38,7 @@ bool isAllocaPromotable(const AllocaInst *AI);
/// made to the IR.
///
void PromoteMemToReg(const std::vector<AllocaInst*> &Allocas,
- DominatorTree &DT, DominanceFrontier &DF,
- AliasSetTracker *AST = 0);
+ DominatorTree &DT, AliasSetTracker *AST = 0);
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
index e50a6b1..b4048b9 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -108,6 +108,55 @@ private:
void operator=(const SSAUpdater&); // DO NOT IMPLEMENT
SSAUpdater(const SSAUpdater&); // DO NOT IMPLEMENT
};
+
+/// LoadAndStorePromoter - This little helper class provides a convenient way to
+/// promote a collection of loads and stores into SSA Form using the SSAUpdater.
+/// This handles complexities that SSAUpdater doesn't, such as multiple loads
+/// and stores in one block.
+///
+/// Clients of this class are expected to subclass this and implement the
+/// virtual methods.
+///
+class LoadAndStorePromoter {
+protected:
+ SSAUpdater &SSA;
+public:
+ LoadAndStorePromoter(const SmallVectorImpl<Instruction*> &Insts,
+ SSAUpdater &S, StringRef Name = StringRef());
+ virtual ~LoadAndStorePromoter() {}
+
+ /// run - This does the promotion. Insts is a list of loads and stores to
+ /// promote, and Name is the basename for the PHIs to insert. After this is
+ /// complete, the loads and stores are removed from the code.
+ void run(const SmallVectorImpl<Instruction*> &Insts) const;
+
+
+ /// Return true if the specified instruction is in the Inst list (which was
+ /// passed into the run method). Clients should implement this with a more
+ /// efficient version if possible.
+ virtual bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const {
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i)
+ if (Insts[i] == I)
+ return true;
+ return false;
+ }
+
+ /// doExtraRewritesBeforeFinalDeletion - This hook is invoked after all the
+ /// stores are found and inserted as available values, but
+ virtual void doExtraRewritesBeforeFinalDeletion() const {
+ }
+
+ /// replaceLoadWithValue - Clients can choose to implement this to get
+ /// notified right before a load is RAUW'd another value.
+ virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {
+ }
+
+ /// This is called before each instruction is deleted.
+ virtual void instructionDeleted(Instruction *I) const {
+ }
+
+};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/contrib/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
index a5060e6..54506cf 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -27,7 +27,9 @@ struct UnifyFunctionExitNodes : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
UnifyFunctionExitNodes() : FunctionPass(ID),
- ReturnBlock(0), UnwindBlock(0) {}
+ ReturnBlock(0), UnwindBlock(0) {
+ initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry());
+ }
// We can preserve non-critical-edgeness when we unify function exit nodes
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h b/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
index 5274112..d612213 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
@@ -20,12 +20,31 @@
namespace llvm {
class Value;
class Instruction;
- typedef ValueMap<const Value *, Value *> ValueToValueMapTy;
+ typedef ValueMap<const Value *, TrackingVH<Value> > ValueToValueMapTy;
+ /// RemapFlags - These are flags that the value mapping APIs allow.
+ enum RemapFlags {
+ RF_None = 0,
+
+ /// RF_NoModuleLevelChanges - If this flag is set, the remapper knows that
+ /// only local values within a function (such as an instruction or argument)
+ /// are mapped, not global values like functions and global metadata.
+ RF_NoModuleLevelChanges = 1,
+
+ /// RF_IgnoreMissingEntries - If this flag is set, the remapper ignores
+ /// entries that are not in the value map. If it is unset, it aborts if an
+ /// operand is asked to be remapped which doesn't exist in the mapping.
+ RF_IgnoreMissingEntries = 2
+ };
+
+ static inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) {
+ return RemapFlags(unsigned(LHS)|unsigned(RHS));
+ }
+
Value *MapValue(const Value *V, ValueToValueMapTy &VM,
- bool ModuleLevelChanges);
+ RemapFlags Flags = RF_None);
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
- bool ModuleLevelChanges);
+ RemapFlags Flags = RF_None);
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Type.h b/contrib/llvm/include/llvm/Type.h
index f7d6fd5..0939d67 100644
--- a/contrib/llvm/include/llvm/Type.h
+++ b/contrib/llvm/include/llvm/Type.h
@@ -12,7 +12,6 @@
#include "llvm/AbstractTypeUser.h"
#include "llvm/Support/Casting.h"
-#include "llvm/System/DataTypes.h"
#include "llvm/ADT/GraphTraits.h"
#include <string>
#include <vector>
@@ -76,19 +75,20 @@ public:
PPC_FP128TyID, ///< 5: 128 bit floating point type (two 64-bits)
LabelTyID, ///< 6: Labels
MetadataTyID, ///< 7: Metadata
+ X86_MMXTyID, ///< 8: MMX vectors (64 bits)
// Derived types... see DerivedTypes.h file...
// Make sure FirstDerivedTyID stays up to date!!!
- IntegerTyID, ///< 8: Arbitrary bit width integers
- FunctionTyID, ///< 9: Functions
- StructTyID, ///< 10: Structures
- ArrayTyID, ///< 11: Arrays
- PointerTyID, ///< 12: Pointers
- OpaqueTyID, ///< 13: Opaque: type with unknown structure
- VectorTyID, ///< 14: SIMD 'packed' format, or other vector type
+ IntegerTyID, ///< 9: Arbitrary bit width integers
+ FunctionTyID, ///< 10: Functions
+ StructTyID, ///< 11: Structures
+ ArrayTyID, ///< 12: Arrays
+ PointerTyID, ///< 13: Pointers
+ OpaqueTyID, ///< 14: Opaque: type with unknown structure
+ VectorTyID, ///< 15: SIMD 'packed' format, or other vector type
NumTypeIDs, // Must remain as last defined ID
- LastPrimitiveTyID = MetadataTyID,
+ LastPrimitiveTyID = X86_MMXTyID,
FirstDerivedTyID = IntegerTyID
};
@@ -212,6 +212,9 @@ public:
bool isFloatingPointTy() const { return ID == FloatTyID || ID == DoubleTyID ||
ID == X86_FP80TyID || ID == FP128TyID || ID == PPC_FP128TyID; }
+ /// isX86_MMXTy - Return true if this is X86 MMX.
+ bool isX86_MMXTy() const { return ID == X86_MMXTyID; }
+
/// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP.
///
bool isFPOrFPVectorTy() const;
@@ -310,7 +313,8 @@ public:
///
bool isSized() const {
// If it's a primitive, it is always sized.
- if (ID == IntegerTyID || isFloatingPointTy() || ID == PointerTyID)
+ if (ID == IntegerTyID || isFloatingPointTy() || ID == PointerTyID ||
+ ID == X86_MMXTyID)
return true;
// If it is not something that can have a size (e.g. a function or label),
// it doesn't have a size.
@@ -400,6 +404,7 @@ public:
static const Type *getX86_FP80Ty(LLVMContext &C);
static const Type *getFP128Ty(LLVMContext &C);
static const Type *getPPC_FP128Ty(LLVMContext &C);
+ static const Type *getX86_MMXTy(LLVMContext &C);
static const IntegerType *getIntNTy(LLVMContext &C, unsigned N);
static const IntegerType *getInt1Ty(LLVMContext &C);
static const IntegerType *getInt8Ty(LLVMContext &C);
@@ -416,6 +421,7 @@ public:
static const PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
static const PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
static const PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
+ static const PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
static const PointerType *getIntNPtrTy(LLVMContext &C, unsigned N,
unsigned AS = 0);
static const PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
diff --git a/contrib/llvm/include/llvm/TypeSymbolTable.h b/contrib/llvm/include/llvm/TypeSymbolTable.h
index 26b1dbf..9fdcb98 100644
--- a/contrib/llvm/include/llvm/TypeSymbolTable.h
+++ b/contrib/llvm/include/llvm/TypeSymbolTable.h
@@ -16,12 +16,11 @@
#include "llvm/Type.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
#include <map>
namespace llvm {
-class StringRef;
-
/// This class provides a symbol table of name/type pairs with operations to
/// support constructing, searching and iterating over the symbol table. The
/// class derives from AbstractTypeUser so that the contents of the symbol
diff --git a/contrib/llvm/include/llvm/Use.h b/contrib/llvm/include/llvm/Use.h
index e1ebc6a..ccbdd7f 100644
--- a/contrib/llvm/include/llvm/Use.h
+++ b/contrib/llvm/include/llvm/Use.h
@@ -25,7 +25,6 @@
#ifndef LLVM_USE_H
#define LLVM_USE_H
-#include "llvm/Support/Casting.h"
#include "llvm/ADT/PointerIntPair.h"
#include <cstddef>
#include <iterator>
@@ -35,9 +34,8 @@ namespace llvm {
class Value;
class User;
class Use;
-
-/// Tag - generic tag type for (at least 32 bit) pointers
-enum Tag { noTag, tagOne, tagTwo, tagThree };
+template<typename>
+struct simplify_type;
// Use** is only 4-byte aligned.
template<>
@@ -67,17 +65,19 @@ private:
Use(const Use &U);
/// Destructor - Only for zap()
- inline ~Use() {
+ ~Use() {
if (Val) removeFromList();
}
- /// Default ctor - This leaves the Use completely uninitialized. The only
- /// thing that is valid to do with this use is to call the "init" method.
- inline Use() {}
- enum PrevPtrTag { zeroDigitTag = noTag
- , oneDigitTag = tagOne
- , stopTag = tagTwo
- , fullStopTag = tagThree };
+ enum PrevPtrTag { zeroDigitTag
+ , oneDigitTag
+ , stopTag
+ , fullStopTag };
+
+ /// Constructor
+ Use(PrevPtrTag tag) : Val(0) {
+ Prev.setInt(tag);
+ }
public:
/// Normally Use will just implicitly convert to a Value* that it holds.
@@ -112,11 +112,9 @@ public:
/// a User changes.
static void zap(Use *Start, const Use *Stop, bool del = false);
- /// getPrefix - Return deletable pointer if appropriate
- Use *getPrefix();
private:
const Use* getImpliedUser() const;
- static Use *initTags(Use *Start, Use *Stop, ptrdiff_t Done = 0);
+ static Use *initTags(Use *Start, Use *Stop);
Value *Val;
Use *Next;
@@ -210,6 +208,15 @@ public:
unsigned getOperandNo() const;
};
+//===----------------------------------------------------------------------===//
+// AugmentedUse layout struct
+//===----------------------------------------------------------------------===//
+
+struct AugmentedUse : public Use {
+ PointerIntPair<User*, 1, unsigned> ref;
+ AugmentedUse(); // not implemented
+};
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/User.h b/contrib/llvm/include/llvm/User.h
index f827795..1363495 100644
--- a/contrib/llvm/include/llvm/User.h
+++ b/contrib/llvm/include/llvm/User.h
@@ -29,20 +29,6 @@ namespace llvm {
template <class>
struct OperandTraits;
-class User;
-
-/// OperandTraits<User> - specialization to User
-template <>
-struct OperandTraits<User> {
- static inline Use *op_begin(User*);
- static inline Use *op_end(User*);
- static inline unsigned operands(const User*);
- template <class U>
- struct Layout {
- typedef U overlay;
- };
-};
-
class User : public Value {
User(const User &); // Do not implement
void *operator new(size_t); // Do not implement
@@ -61,21 +47,18 @@ protected:
unsigned NumOperands;
void *operator new(size_t s, unsigned Us);
- void *operator new(size_t s, unsigned Us, bool Prefix);
User(const Type *ty, unsigned vty, Use *OpList, unsigned NumOps)
: Value(ty, vty), OperandList(OpList), NumOperands(NumOps) {}
Use *allocHungoffUses(unsigned) const;
- void dropHungoffUses(Use *U) {
- if (OperandList == U) {
- OperandList = 0;
- NumOperands = 0;
- }
- Use::zap(U, U->getImpliedUser(), true);
+ void dropHungoffUses() {
+ Use::zap(OperandList, OperandList + NumOperands, true);
+ OperandList = 0;
+ // Reset NumOperands so User::operator delete() does the right thing.
+ NumOperands = 0;
}
public:
~User() {
- if ((intptr_t(OperandList) & 1) == 0)
- Use::zap(OperandList, OperandList + NumOperands);
+ Use::zap(OperandList, OperandList + NumOperands);
}
/// operator delete - free memory allocated for User and Use objects
void operator delete(void *Usr);
@@ -158,18 +141,6 @@ public:
}
};
-inline Use *OperandTraits<User>::op_begin(User *U) {
- return U->op_begin();
-}
-
-inline Use *OperandTraits<User>::op_end(User *U) {
- return U->op_end();
-}
-
-inline unsigned OperandTraits<User>::operands(const User *U) {
- return U->getNumOperands();
-}
-
template<> struct simplify_type<User::op_iterator> {
typedef Value* SimpleType;
diff --git a/contrib/llvm/include/llvm/Value.h b/contrib/llvm/include/llvm/Value.h
index 8740f35..130e273 100644
--- a/contrib/llvm/include/llvm/Value.h
+++ b/contrib/llvm/include/llvm/Value.h
@@ -252,6 +252,12 @@ public:
return SubclassOptionalData;
}
+ /// clearSubclassOptionalData - Clear the optional flags contained in
+ /// this value.
+ void clearSubclassOptionalData() {
+ SubclassOptionalData = 0;
+ }
+
/// hasSameSubclassOptionalData - Test whether the optional flags contained
/// in this value are equal to the optional flags in the given value.
bool hasSameSubclassOptionalData(const Value *V) const {
@@ -285,15 +291,9 @@ public:
return const_cast<Value*>(this)->stripPointerCasts();
}
- /// getUnderlyingObject - This method strips off any GEP address adjustments
- /// and pointer casts from the specified value, returning the original object
- /// being addressed. Note that the returned value has pointer type if the
- /// specified value does. If the MaxLookup value is non-zero, it limits the
- /// number of instructions to be stripped off.
- Value *getUnderlyingObject(unsigned MaxLookup = 6);
- const Value *getUnderlyingObject(unsigned MaxLookup = 6) const {
- return const_cast<Value*>(this)->getUnderlyingObject(MaxLookup);
- }
+ /// isDereferenceablePointer - Test if this value is always a pointer to
+ /// allocated and suitably aligned memory for a simple load or store.
+ bool isDereferenceablePointer() const;
/// DoPHITranslation - If this value is a PHI node with CurBB as its parent,
/// return the value in the PHI node corresponding to PredBB. If not, return
diff --git a/contrib/llvm/include/llvm/ValueSymbolTable.h b/contrib/llvm/include/llvm/ValueSymbolTable.h
index 35fc97b..1738cc4 100644
--- a/contrib/llvm/include/llvm/ValueSymbolTable.h
+++ b/contrib/llvm/include/llvm/ValueSymbolTable.h
@@ -16,7 +16,7 @@
#include "llvm/Value.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
template<typename ValueSubClass, typename ItemParentClass>
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
index 1f2528f..be02ddb 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -30,12 +30,13 @@
#include "llvm/Function.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Instructions.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
#include "llvm/Target/TargetData.h"
using namespace llvm;
// Register the AliasAnalysis interface, providing a nice name to refer to.
-static RegisterAnalysisGroup<AliasAnalysis> Z("Alias Analysis");
+INITIALIZE_ANALYSIS_GROUP(AliasAnalysis, "Alias Analysis", NoAA)
char AliasAnalysis::ID = 0;
//===----------------------------------------------------------------------===//
@@ -43,15 +44,15 @@ char AliasAnalysis::ID = 0;
//===----------------------------------------------------------------------===//
AliasAnalysis::AliasResult
-AliasAnalysis::alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
+AliasAnalysis::alias(const Location &LocA, const Location &LocB) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->alias(V1, V1Size, V2, V2Size);
+ return AA->alias(LocA, LocB);
}
-bool AliasAnalysis::pointsToConstantMemory(const Value *P) {
+bool AliasAnalysis::pointsToConstantMemory(const Location &Loc,
+ bool OrLocal) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->pointsToConstantMemory(P);
+ return AA->pointsToConstantMemory(Loc, OrLocal);
}
void AliasAnalysis::deleteValue(Value *V) {
@@ -64,49 +65,55 @@ void AliasAnalysis::copyValue(Value *From, Value *To) {
AA->copyValue(From, To);
}
+void AliasAnalysis::addEscapingUse(Use &U) {
+ assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
+ AA->addEscapingUse(U);
+}
+
+
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size) {
- // Don't assert AA because BasicAA calls us in order to make use of the
- // logic here.
+ const Location &Loc) {
+ assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
ModRefBehavior MRB = getModRefBehavior(CS);
if (MRB == DoesNotAccessMemory)
return NoModRef;
ModRefResult Mask = ModRef;
- if (MRB == OnlyReadsMemory)
+ if (onlyReadsMemory(MRB))
Mask = Ref;
- else if (MRB == AliasAnalysis::AccessesArguments) {
+
+ if (onlyAccessesArgPointees(MRB)) {
bool doesAlias = false;
- for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
- AI != AE; ++AI)
- if (!isNoAlias(*AI, ~0U, P, Size)) {
- doesAlias = true;
- break;
- }
+ if (doesAccessArgPointees(MRB))
+ for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
+ AI != AE; ++AI)
+ if (!isNoAlias(Location(*AI), Loc)) {
+ doesAlias = true;
+ break;
+ }
if (!doesAlias)
return NoModRef;
}
- // If P points to a constant memory location, the call definitely could not
+ // If Loc is a constant memory location, the call definitely could not
// modify the memory location.
- if ((Mask & Mod) && pointsToConstantMemory(P))
+ if ((Mask & Mod) && pointsToConstantMemory(Loc))
Mask = ModRefResult(Mask & ~Mod);
- // If this is BasicAA, don't forward.
+ // If this is the end of the chain, don't forward.
if (!AA) return Mask;
// Otherwise, fall back to the next AA in the chain. But we can merge
// in any mask we've managed to compute.
- return ModRefResult(AA->getModRefInfo(CS, P, Size) & Mask);
+ return ModRefResult(AA->getModRefInfo(CS, Loc) & Mask);
}
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
- // Don't assert AA because BasicAA calls us in order to make use of the
- // logic here.
+ assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
// If CS1 or CS2 are readnone, they don't interact.
ModRefBehavior CS1B = getModRefBehavior(CS1);
@@ -116,45 +123,47 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
if (CS2B == DoesNotAccessMemory) return NoModRef;
// If they both only read from memory, there is no dependence.
- if (CS1B == OnlyReadsMemory && CS2B == OnlyReadsMemory)
+ if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
return NoModRef;
AliasAnalysis::ModRefResult Mask = ModRef;
// If CS1 only reads memory, the only dependence on CS2 can be
// from CS1 reading memory written by CS2.
- if (CS1B == OnlyReadsMemory)
+ if (onlyReadsMemory(CS1B))
Mask = ModRefResult(Mask & Ref);
// If CS2 only access memory through arguments, accumulate the mod/ref
// information from CS1's references to the memory referenced by
// CS2's arguments.
- if (CS2B == AccessesArguments) {
+ if (onlyAccessesArgPointees(CS2B)) {
AliasAnalysis::ModRefResult R = NoModRef;
- for (ImmutableCallSite::arg_iterator
- I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
- R = ModRefResult((R | getModRefInfo(CS1, *I, UnknownSize)) & Mask);
- if (R == Mask)
- break;
- }
+ if (doesAccessArgPointees(CS2B))
+ for (ImmutableCallSite::arg_iterator
+ I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
+ R = ModRefResult((R | getModRefInfo(CS1, *I, UnknownSize)) & Mask);
+ if (R == Mask)
+ break;
+ }
return R;
}
// If CS1 only accesses memory through arguments, check if CS2 references
// any of the memory referenced by CS1's arguments. If not, return NoModRef.
- if (CS1B == AccessesArguments) {
+ if (onlyAccessesArgPointees(CS1B)) {
AliasAnalysis::ModRefResult R = NoModRef;
- for (ImmutableCallSite::arg_iterator
- I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I)
- if (getModRefInfo(CS2, *I, UnknownSize) != NoModRef) {
- R = Mask;
- break;
- }
+ if (doesAccessArgPointees(CS1B))
+ for (ImmutableCallSite::arg_iterator
+ I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I)
+ if (getModRefInfo(CS2, *I, UnknownSize) != NoModRef) {
+ R = Mask;
+ break;
+ }
if (R == NoModRef)
return R;
}
- // If this is BasicAA, don't forward.
+ // If this is the end of the chain, don't forward.
if (!AA) return Mask;
// Otherwise, fall back to the next AA in the chain. But we can merge
@@ -164,8 +173,7 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
AliasAnalysis::ModRefBehavior
AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
- // Don't assert AA because BasicAA calls us in order to make use of the
- // logic here.
+ assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
ModRefBehavior Min = UnknownModRefBehavior;
@@ -174,12 +182,12 @@ AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
if (const Function *F = CS.getCalledFunction())
Min = getModRefBehavior(F);
- // If this is BasicAA, don't forward.
+ // If this is the end of the chain, don't forward.
if (!AA) return Min;
// Otherwise, fall back to the next AA in the chain. But we can merge
// in any result we've managed to compute.
- return std::min(AA->getModRefBehavior(CS), Min);
+ return ModRefBehavior(AA->getModRefBehavior(CS) & Min);
}
AliasAnalysis::ModRefBehavior
@@ -188,20 +196,66 @@ AliasAnalysis::getModRefBehavior(const Function *F) {
return AA->getModRefBehavior(F);
}
-
//===----------------------------------------------------------------------===//
// AliasAnalysis non-virtual helper method implementation
//===----------------------------------------------------------------------===//
+AliasAnalysis::Location AliasAnalysis::getLocation(const LoadInst *LI) {
+ return Location(LI->getPointerOperand(),
+ getTypeStoreSize(LI->getType()),
+ LI->getMetadata(LLVMContext::MD_tbaa));
+}
+
+AliasAnalysis::Location AliasAnalysis::getLocation(const StoreInst *SI) {
+ return Location(SI->getPointerOperand(),
+ getTypeStoreSize(SI->getValueOperand()->getType()),
+ SI->getMetadata(LLVMContext::MD_tbaa));
+}
+
+AliasAnalysis::Location AliasAnalysis::getLocation(const VAArgInst *VI) {
+ return Location(VI->getPointerOperand(),
+ UnknownSize,
+ VI->getMetadata(LLVMContext::MD_tbaa));
+}
+
+
+AliasAnalysis::Location
+AliasAnalysis::getLocationForSource(const MemTransferInst *MTI) {
+ uint64_t Size = UnknownSize;
+ if (ConstantInt *C = dyn_cast<ConstantInt>(MTI->getLength()))
+ Size = C->getValue().getZExtValue();
+
+ // memcpy/memmove can have TBAA tags. For memcpy, they apply
+ // to both the source and the destination.
+ MDNode *TBAATag = MTI->getMetadata(LLVMContext::MD_tbaa);
+
+ return Location(MTI->getRawSource(), Size, TBAATag);
+}
+
+AliasAnalysis::Location
+AliasAnalysis::getLocationForDest(const MemIntrinsic *MTI) {
+ uint64_t Size = UnknownSize;
+ if (ConstantInt *C = dyn_cast<ConstantInt>(MTI->getLength()))
+ Size = C->getValue().getZExtValue();
+
+ // memcpy/memmove can have TBAA tags. For memcpy, they apply
+ // to both the source and the destination.
+ MDNode *TBAATag = MTI->getMetadata(LLVMContext::MD_tbaa);
+
+ return Location(MTI->getRawDest(), Size, TBAATag);
+}
+
+
+
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const LoadInst *L, const Value *P, unsigned Size) {
+AliasAnalysis::getModRefInfo(const LoadInst *L, const Location &Loc) {
// Be conservative in the face of volatile.
if (L->isVolatile())
return ModRef;
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
- if (!alias(L->getOperand(0), getTypeStoreSize(L->getType()), P, Size))
+ if (!alias(getLocation(L), Loc))
return NoModRef;
// Otherwise, a load just reads.
@@ -209,20 +263,19 @@ AliasAnalysis::getModRefInfo(const LoadInst *L, const Value *P, unsigned Size) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const StoreInst *S, const Value *P, unsigned Size) {
+AliasAnalysis::getModRefInfo(const StoreInst *S, const Location &Loc) {
// Be conservative in the face of volatile.
if (S->isVolatile())
return ModRef;
// If the store address cannot alias the pointer in question, then the
// specified memory cannot be modified by the store.
- if (!alias(S->getOperand(1),
- getTypeStoreSize(S->getOperand(0)->getType()), P, Size))
+ if (!alias(getLocation(S), Loc))
return NoModRef;
// If the pointer is a pointer to constant memory, then it could not have been
// modified by this store.
- if (pointsToConstantMemory(P))
+ if (pointsToConstantMemory(Loc))
return NoModRef;
// Otherwise, a store just writes.
@@ -230,29 +283,21 @@ AliasAnalysis::getModRefInfo(const StoreInst *S, const Value *P, unsigned Size)
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const VAArgInst *V, const Value *P, unsigned Size) {
+AliasAnalysis::getModRefInfo(const VAArgInst *V, const Location &Loc) {
// If the va_arg address cannot alias the pointer in question, then the
// specified memory cannot be accessed by the va_arg.
- if (!alias(V->getOperand(0), UnknownSize, P, Size))
+ if (!alias(getLocation(V), Loc))
return NoModRef;
// If the pointer is a pointer to constant memory, then it could not have been
// modified by this va_arg.
- if (pointsToConstantMemory(P))
+ if (pointsToConstantMemory(Loc))
return NoModRef;
// Otherwise, a va_arg reads and writes.
return ModRef;
}
-
-AliasAnalysis::ModRefBehavior
-AliasAnalysis::getIntrinsicModRefBehavior(unsigned iid) {
-#define GET_INTRINSIC_MODREF_BEHAVIOR
-#include "llvm/Intrinsics.gen"
-#undef GET_INTRINSIC_MODREF_BEHAVIOR
-}
-
// AliasAnalysis destructor: DO NOT move this to the header file for
// AliasAnalysis or else clients of the AliasAnalysis class may not depend on
// the AliasAnalysis.o file in the current .a file, causing alias analysis
@@ -277,16 +322,16 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// getTypeStoreSize - Return the TargetData store size for the given type,
/// if known, or a conservative value otherwise.
///
-unsigned AliasAnalysis::getTypeStoreSize(const Type *Ty) {
- return TD ? TD->getTypeStoreSize(Ty) : ~0u;
+uint64_t AliasAnalysis::getTypeStoreSize(const Type *Ty) {
+ return TD ? TD->getTypeStoreSize(Ty) : UnknownSize;
}
/// canBasicBlockModify - Return true if it is possible for execution of the
/// specified basic block to modify the value pointed to by Ptr.
///
bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
- const Value *Ptr, unsigned Size) {
- return canInstructionRangeModify(BB.front(), BB.back(), Ptr, Size);
+ const Location &Loc) {
+ return canInstructionRangeModify(BB.front(), BB.back(), Loc);
}
/// canInstructionRangeModify - Return true if it is possible for the execution
@@ -296,7 +341,7 @@ bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
///
bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1,
const Instruction &I2,
- const Value *Ptr, unsigned Size) {
+ const Location &Loc) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
BasicBlock::const_iterator I = &I1;
@@ -304,7 +349,7 @@ bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1,
++E; // Convert from inclusive to exclusive range.
for (; I != E; ++I) // Check every instruction in range
- if (getModRefInfo(I, Ptr, Size) & Mod)
+ if (getModRefInfo(I, Loc) & Mod)
return true;
return false;
}
@@ -336,9 +381,3 @@ bool llvm::isIdentifiedObject(const Value *V) {
return A->hasNoAliasAttr() || A->hasByValAttr();
return false;
}
-
-// Because of the way .a files work, we must force the BasicAA implementation to
-// be pulled in if the AliasAnalysis classes are pulled in. Otherwise we run
-// the risk of AliasAnalysis being used, but the default implementation not
-// being linked into the tool that uses it.
-DEFINING_FILE_FOR(AliasAnalysis)
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp b/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
index b178041..d947220 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
@@ -29,13 +29,14 @@ PrintAllFailures("count-aa-print-all-failed-queries", cl::ReallyHidden);
namespace {
class AliasAnalysisCounter : public ModulePass, public AliasAnalysis {
- unsigned No, May, Must;
+ unsigned No, May, Partial, Must;
unsigned NoMR, JustRef, JustMod, MR;
Module *M;
public:
static char ID; // Class identification, replacement for typeinfo
AliasAnalysisCounter() : ModulePass(ID) {
- No = May = Must = 0;
+ initializeAliasAnalysisCounterPass(*PassRegistry::getPassRegistry());
+ No = May = Partial = Must = 0;
NoMR = JustRef = JustMod = MR = 0;
}
@@ -44,7 +45,7 @@ namespace {
<< Val*100/Sum << "%)\n";
}
~AliasAnalysisCounter() {
- unsigned AASum = No+May+Must;
+ unsigned AASum = No+May+Partial+Must;
unsigned MRSum = NoMR+JustRef+JustMod+MR;
if (AASum + MRSum) { // Print a report if any counted queries occurred...
errs() << "\n===== Alias Analysis Counter Report =====\n"
@@ -53,9 +54,12 @@ namespace {
if (AASum) {
printLine("no alias", No, AASum);
printLine("may alias", May, AASum);
+ printLine("partial alias", Partial, AASum);
printLine("must alias", Must, AASum);
errs() << " Alias Analysis Counter Summary: " << No*100/AASum << "%/"
- << May*100/AASum << "%/" << Must*100/AASum<<"%\n\n";
+ << May*100/AASum << "%/"
+ << Partial*100/AASum << "%/"
+ << Must*100/AASum<<"%\n\n";
}
errs() << " " << MRSum << " Total Mod/Ref Queries Performed\n";
@@ -94,17 +98,16 @@ namespace {
}
// FIXME: We could count these too...
- bool pointsToConstantMemory(const Value *P) {
- return getAnalysis<AliasAnalysis>().pointsToConstantMemory(P);
+ bool pointsToConstantMemory(const Location &Loc, bool OrLocal) {
+ return getAnalysis<AliasAnalysis>().pointsToConstantMemory(Loc, OrLocal);
}
// Forwarding functions: just delegate to a real AA implementation, counting
// the number of responses...
- AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size);
+ AliasResult alias(const Location &LocA, const Location &LocB);
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size);
+ const Location &Loc);
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) {
return AliasAnalysis::getModRefInfo(CS1,CS2);
@@ -114,32 +117,32 @@ namespace {
char AliasAnalysisCounter::ID = 0;
INITIALIZE_AG_PASS(AliasAnalysisCounter, AliasAnalysis, "count-aa",
- "Count Alias Analysis Query Responses", false, true, false);
+ "Count Alias Analysis Query Responses", false, true, false)
ModulePass *llvm::createAliasAnalysisCounterPass() {
return new AliasAnalysisCounter();
}
AliasAnalysis::AliasResult
-AliasAnalysisCounter::alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
- AliasResult R = getAnalysis<AliasAnalysis>().alias(V1, V1Size, V2, V2Size);
+AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
+ AliasResult R = getAnalysis<AliasAnalysis>().alias(LocA, LocB);
const char *AliasString;
switch (R) {
default: llvm_unreachable("Unknown alias type!");
case NoAlias: No++; AliasString = "No alias"; break;
case MayAlias: May++; AliasString = "May alias"; break;
+ case PartialAlias: Partial++; AliasString = "Partial alias"; break;
case MustAlias: Must++; AliasString = "Must alias"; break;
}
if (PrintAll || (PrintAllFailures && R == MayAlias)) {
errs() << AliasString << ":\t";
- errs() << "[" << V1Size << "B] ";
- WriteAsOperand(errs(), V1, true, M);
+ errs() << "[" << LocA.Size << "B] ";
+ WriteAsOperand(errs(), LocA.Ptr, true, M);
errs() << ", ";
- errs() << "[" << V2Size << "B] ";
- WriteAsOperand(errs(), V2, true, M);
+ errs() << "[" << LocB.Size << "B] ";
+ WriteAsOperand(errs(), LocB.Ptr, true, M);
errs() << "\n";
}
@@ -148,8 +151,8 @@ AliasAnalysisCounter::alias(const Value *V1, unsigned V1Size,
AliasAnalysis::ModRefResult
AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size) {
- ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, P, Size);
+ const Location &Loc) {
+ ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, Loc);
const char *MRString;
switch (R) {
@@ -162,8 +165,8 @@ AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
if (PrintAll || (PrintAllFailures && R == ModRef)) {
errs() << MRString << ": Ptr: ";
- errs() << "[" << Size << "B] ";
- WriteAsOperand(errs(), P, true, M);
+ errs() << "[" << Loc.Size << "B] ";
+ WriteAsOperand(errs(), Loc.Ptr, true, M);
errs() << "\t<->" << *CS.getInstruction() << '\n';
}
return R;
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index ce363cb..1afc1b7 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -36,6 +36,7 @@ static cl::opt<bool> PrintAll("print-all-alias-modref-info", cl::ReallyHidden);
static cl::opt<bool> PrintNoAlias("print-no-aliases", cl::ReallyHidden);
static cl::opt<bool> PrintMayAlias("print-may-aliases", cl::ReallyHidden);
+static cl::opt<bool> PrintPartialAlias("print-partial-aliases", cl::ReallyHidden);
static cl::opt<bool> PrintMustAlias("print-must-aliases", cl::ReallyHidden);
static cl::opt<bool> PrintNoModRef("print-no-modref", cl::ReallyHidden);
@@ -45,12 +46,14 @@ static cl::opt<bool> PrintModRef("print-modref", cl::ReallyHidden);
namespace {
class AAEval : public FunctionPass {
- unsigned NoAlias, MayAlias, MustAlias;
+ unsigned NoAlias, MayAlias, PartialAlias, MustAlias;
unsigned NoModRef, Mod, Ref, ModRef;
public:
static char ID; // Pass identification, replacement for typeid
- AAEval() : FunctionPass(ID) {}
+ AAEval() : FunctionPass(ID) {
+ initializeAAEvalPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
@@ -58,11 +61,12 @@ namespace {
}
bool doInitialization(Module &M) {
- NoAlias = MayAlias = MustAlias = 0;
+ NoAlias = MayAlias = PartialAlias = MustAlias = 0;
NoModRef = Mod = Ref = ModRef = 0;
if (PrintAll) {
- PrintNoAlias = PrintMayAlias = PrintMustAlias = true;
+ PrintNoAlias = PrintMayAlias = true;
+ PrintPartialAlias = PrintMustAlias = true;
PrintNoModRef = PrintMod = PrintRef = PrintModRef = true;
}
return false;
@@ -74,8 +78,11 @@ namespace {
}
char AAEval::ID = 0;
-INITIALIZE_PASS(AAEval, "aa-eval",
- "Exhaustive Alias Analysis Precision Evaluator", false, true);
+INITIALIZE_PASS_BEGIN(AAEval, "aa-eval",
+ "Exhaustive Alias Analysis Precision Evaluator", false, true)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(AAEval, "aa-eval",
+ "Exhaustive Alias Analysis Precision Evaluator", false, true)
FunctionPass *llvm::createAAEvalPass() { return new AAEval(); }
@@ -155,7 +162,7 @@ bool AAEval::runOnFunction(Function &F) {
}
}
- if (PrintNoAlias || PrintMayAlias || PrintMustAlias ||
+ if (PrintNoAlias || PrintMayAlias || PrintPartialAlias || PrintMustAlias ||
PrintNoModRef || PrintMod || PrintRef || PrintModRef)
errs() << "Function: " << F.getName() << ": " << Pointers.size()
<< " pointers, " << CallSites.size() << " call sites\n";
@@ -163,12 +170,12 @@ bool AAEval::runOnFunction(Function &F) {
// iterate over the worklist, and run the full (n^2)/2 disambiguations
for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
I1 != E; ++I1) {
- unsigned I1Size = ~0u;
+ uint64_t I1Size = AliasAnalysis::UnknownSize;
const Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
if (I1ElTy->isSized()) I1Size = AA.getTypeStoreSize(I1ElTy);
for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
- unsigned I2Size = ~0u;
+ uint64_t I2Size = AliasAnalysis::UnknownSize;
const Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
if (I2ElTy->isSized()) I2Size = AA.getTypeStoreSize(I2ElTy);
@@ -179,6 +186,10 @@ bool AAEval::runOnFunction(Function &F) {
case AliasAnalysis::MayAlias:
PrintResults("MayAlias", PrintMayAlias, *I1, *I2, F.getParent());
++MayAlias; break;
+ case AliasAnalysis::PartialAlias:
+ PrintResults("PartialAlias", PrintPartialAlias, *I1, *I2,
+ F.getParent());
+ ++PartialAlias; break;
case AliasAnalysis::MustAlias:
PrintResults("MustAlias", PrintMustAlias, *I1, *I2, F.getParent());
++MustAlias; break;
@@ -195,7 +206,7 @@ bool AAEval::runOnFunction(Function &F) {
for (SetVector<Value *>::iterator V = Pointers.begin(), Ve = Pointers.end();
V != Ve; ++V) {
- unsigned Size = ~0u;
+ uint64_t Size = AliasAnalysis::UnknownSize;
const Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
if (ElTy->isSized()) Size = AA.getTypeStoreSize(ElTy);
@@ -250,7 +261,7 @@ static void PrintPercent(unsigned Num, unsigned Sum) {
}
bool AAEval::doFinalization(Module &M) {
- unsigned AliasSum = NoAlias + MayAlias + MustAlias;
+ unsigned AliasSum = NoAlias + MayAlias + PartialAlias + MustAlias;
errs() << "===== Alias Analysis Evaluator Report =====\n";
if (AliasSum == 0) {
errs() << " Alias Analysis Evaluator Summary: No pointers!\n";
@@ -260,10 +271,13 @@ bool AAEval::doFinalization(Module &M) {
PrintPercent(NoAlias, AliasSum);
errs() << " " << MayAlias << " may alias responses ";
PrintPercent(MayAlias, AliasSum);
+ errs() << " " << PartialAlias << " partial alias responses ";
+ PrintPercent(PartialAlias, AliasSum);
errs() << " " << MustAlias << " must alias responses ";
PrintPercent(MustAlias, AliasSum);
errs() << " Alias Analysis Evaluator Pointer Alias Summary: "
<< NoAlias*100/AliasSum << "%/" << MayAlias*100/AliasSum << "%/"
+ << PartialAlias*100/AliasSum << "%/"
<< MustAlias*100/AliasSum << "%\n";
}
diff --git a/contrib/llvm/lib/Analysis/AliasDebugger.cpp b/contrib/llvm/lib/Analysis/AliasDebugger.cpp
index b9fe646..f15c051 100644
--- a/contrib/llvm/lib/Analysis/AliasDebugger.cpp
+++ b/contrib/llvm/lib/Analysis/AliasDebugger.cpp
@@ -39,7 +39,9 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- AliasDebugger() : ModulePass(ID) {}
+ AliasDebugger() : ModulePass(ID) {
+ initializeAliasDebuggerPass(*PassRegistry::getPassRegistry());
+ }
bool runOnModule(Module &M) {
InitializeAliasAnalysis(this); // set up super class
@@ -92,17 +94,18 @@ namespace {
//------------------------------------------------
// Implement the AliasAnalysis API
//
- AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
- assert(Vals.find(V1) != Vals.end() && "Never seen value in AA before");
- assert(Vals.find(V2) != Vals.end() && "Never seen value in AA before");
- return AliasAnalysis::alias(V1, V1Size, V2, V2Size);
+ AliasResult alias(const Location &LocA, const Location &LocB) {
+ assert(Vals.find(LocA.Ptr) != Vals.end() &&
+ "Never seen value in AA before");
+ assert(Vals.find(LocB.Ptr) != Vals.end() &&
+ "Never seen value in AA before");
+ return AliasAnalysis::alias(LocA, LocB);
}
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size) {
- assert(Vals.find(P) != Vals.end() && "Never seen value in AA before");
- return AliasAnalysis::getModRefInfo(CS, P, Size);
+ const Location &Loc) {
+ assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
+ return AliasAnalysis::getModRefInfo(CS, Loc);
}
ModRefResult getModRefInfo(ImmutableCallSite CS1,
@@ -110,9 +113,9 @@ namespace {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
- bool pointsToConstantMemory(const Value *P) {
- assert(Vals.find(P) != Vals.end() && "Never seen value in AA before");
- return AliasAnalysis::pointsToConstantMemory(P);
+ bool pointsToConstantMemory(const Location &Loc, bool OrLocal) {
+ assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
+ return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
}
virtual void deleteValue(Value *V) {
@@ -129,7 +132,7 @@ namespace {
char AliasDebugger::ID = 0;
INITIALIZE_AG_PASS(AliasDebugger, AliasAnalysis, "debug-aa",
- "AA use debugger", false, true, false);
+ "AA use debugger", false, true, false)
Pass *llvm::createAliasDebugger() { return new AliasDebugger(); }
diff --git a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
index e74543b..3a46976d 100644
--- a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -15,6 +15,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Pass.h"
#include "llvm/Type.h"
#include "llvm/Target/TargetData.h"
@@ -45,7 +46,12 @@ void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
PointerRec *R = AS.getSomePointer();
// If the pointers are not a must-alias pair, this set becomes a may alias.
- if (AA.alias(L->getValue(), L->getSize(), R->getValue(), R->getSize())
+ if (AA.alias(AliasAnalysis::Location(L->getValue(),
+ L->getSize(),
+ L->getTBAAInfo()),
+ AliasAnalysis::Location(R->getValue(),
+ R->getSize(),
+ R->getTBAAInfo()))
!= AliasAnalysis::MustAlias)
AliasTy = MayAlias;
}
@@ -87,7 +93,8 @@ void AliasSet::removeFromTracker(AliasSetTracker &AST) {
}
void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
- unsigned Size, bool KnownMustAlias) {
+ uint64_t Size, const MDNode *TBAAInfo,
+ bool KnownMustAlias) {
assert(!Entry.hasAliasSet() && "Entry already in set!");
// Check to see if we have to downgrade to _may_ alias.
@@ -95,16 +102,18 @@ void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
if (PointerRec *P = getSomePointer()) {
AliasAnalysis &AA = AST.getAliasAnalysis();
AliasAnalysis::AliasResult Result =
- AA.alias(P->getValue(), P->getSize(), Entry.getValue(), Size);
- if (Result == AliasAnalysis::MayAlias)
+ AA.alias(AliasAnalysis::Location(P->getValue(), P->getSize(),
+ P->getTBAAInfo()),
+ AliasAnalysis::Location(Entry.getValue(), Size, TBAAInfo));
+ if (Result != AliasAnalysis::MustAlias)
AliasTy = MayAlias;
else // First entry of must alias must have maximum size!
- P->updateSize(Size);
+ P->updateSizeAndTBAAInfo(Size, TBAAInfo);
assert(Result != AliasAnalysis::NoAlias && "Cannot be part of must set!");
}
Entry.setAliasSet(this);
- Entry.updateSize(Size);
+ Entry.updateSizeAndTBAAInfo(Size, TBAAInfo);
// Add it to the end of the list...
assert(*PtrListEnd == 0 && "End of list is not null?");
@@ -120,7 +129,7 @@ void AliasSet::addCallSite(CallSite CS, AliasAnalysis &AA) {
AliasAnalysis::ModRefBehavior Behavior = AA.getModRefBehavior(CS);
if (Behavior == AliasAnalysis::DoesNotAccessMemory)
return;
- else if (Behavior == AliasAnalysis::OnlyReadsMemory) {
+ if (AliasAnalysis::onlyReadsMemory(Behavior)) {
AliasTy = MayAlias;
AccessTy |= Refs;
return;
@@ -134,7 +143,8 @@ void AliasSet::addCallSite(CallSite CS, AliasAnalysis &AA) {
/// aliasesPointer - Return true if the specified pointer "may" (or must)
/// alias one of the members in the set.
///
-bool AliasSet::aliasesPointer(const Value *Ptr, unsigned Size,
+bool AliasSet::aliasesPointer(const Value *Ptr, uint64_t Size,
+ const MDNode *TBAAInfo,
AliasAnalysis &AA) const {
if (AliasTy == MustAlias) {
assert(CallSites.empty() && "Illegal must alias set!");
@@ -143,19 +153,26 @@ bool AliasSet::aliasesPointer(const Value *Ptr, unsigned Size,
// SOME value in the set.
PointerRec *SomePtr = getSomePointer();
assert(SomePtr && "Empty must-alias set??");
- return AA.alias(SomePtr->getValue(), SomePtr->getSize(), Ptr, Size);
+ return AA.alias(AliasAnalysis::Location(SomePtr->getValue(),
+ SomePtr->getSize(),
+ SomePtr->getTBAAInfo()),
+ AliasAnalysis::Location(Ptr, Size, TBAAInfo));
}
// If this is a may-alias set, we have to check all of the pointers in the set
// to be sure it doesn't alias the set...
for (iterator I = begin(), E = end(); I != E; ++I)
- if (AA.alias(Ptr, Size, I.getPointer(), I.getSize()))
+ if (AA.alias(AliasAnalysis::Location(Ptr, Size, TBAAInfo),
+ AliasAnalysis::Location(I.getPointer(), I.getSize(),
+ I.getTBAAInfo())))
return true;
// Check the call sites list and invoke list...
if (!CallSites.empty()) {
for (unsigned i = 0, e = CallSites.size(); i != e; ++i)
- if (AA.getModRefInfo(CallSites[i], Ptr, Size) != AliasAnalysis::NoModRef)
+ if (AA.getModRefInfo(CallSites[i],
+ AliasAnalysis::Location(Ptr, Size, TBAAInfo)) !=
+ AliasAnalysis::NoModRef)
return true;
}
@@ -198,10 +215,11 @@ void AliasSetTracker::clear() {
/// that may alias the pointer, merge them together and return the unified set.
///
AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
- unsigned Size) {
+ uint64_t Size,
+ const MDNode *TBAAInfo) {
AliasSet *FoundSet = 0;
for (iterator I = begin(), E = end(); I != E; ++I) {
- if (I->Forward || !I->aliasesPointer(Ptr, Size, AA)) continue;
+ if (I->Forward || !I->aliasesPointer(Ptr, Size, TBAAInfo, AA)) continue;
if (FoundSet == 0) { // If this is the first alias set ptr can go into.
FoundSet = I; // Remember it.
@@ -216,9 +234,10 @@ AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
/// containsPointer - Return true if the specified location is represented by
/// this alias set, false otherwise. This does not modify the AST object or
/// alias sets.
-bool AliasSetTracker::containsPointer(Value *Ptr, unsigned Size) const {
+bool AliasSetTracker::containsPointer(Value *Ptr, uint64_t Size,
+ const MDNode *TBAAInfo) const {
for (const_iterator I = begin(), E = end(); I != E; ++I)
- if (!I->Forward && I->aliasesPointer(Ptr, Size, AA))
+ if (!I->Forward && I->aliasesPointer(Ptr, Size, TBAAInfo, AA))
return true;
return false;
}
@@ -244,33 +263,34 @@ AliasSet *AliasSetTracker::findAliasSetForCallSite(CallSite CS) {
/// getAliasSetForPointer - Return the alias set that the specified pointer
/// lives in.
-AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer, unsigned Size,
+AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer, uint64_t Size,
+ const MDNode *TBAAInfo,
bool *New) {
AliasSet::PointerRec &Entry = getEntryFor(Pointer);
// Check to see if the pointer is already known.
if (Entry.hasAliasSet()) {
- Entry.updateSize(Size);
+ Entry.updateSizeAndTBAAInfo(Size, TBAAInfo);
// Return the set!
return *Entry.getAliasSet(*this)->getForwardedTarget(*this);
}
- if (AliasSet *AS = findAliasSetForPointer(Pointer, Size)) {
+ if (AliasSet *AS = findAliasSetForPointer(Pointer, Size, TBAAInfo)) {
// Add it to the alias set it aliases.
- AS->addPointer(*this, Entry, Size);
+ AS->addPointer(*this, Entry, Size, TBAAInfo);
return *AS;
}
if (New) *New = true;
// Otherwise create a new alias set to hold the loaded pointer.
AliasSets.push_back(new AliasSet());
- AliasSets.back().addPointer(*this, Entry, Size);
+ AliasSets.back().addPointer(*this, Entry, Size, TBAAInfo);
return AliasSets.back();
}
-bool AliasSetTracker::add(Value *Ptr, unsigned Size) {
+bool AliasSetTracker::add(Value *Ptr, uint64_t Size, const MDNode *TBAAInfo) {
bool NewPtr;
- addPointer(Ptr, Size, AliasSet::NoModRef, NewPtr);
+ addPointer(Ptr, Size, TBAAInfo, AliasSet::NoModRef, NewPtr);
return NewPtr;
}
@@ -279,6 +299,7 @@ bool AliasSetTracker::add(LoadInst *LI) {
bool NewPtr;
AliasSet &AS = addPointer(LI->getOperand(0),
AA.getTypeStoreSize(LI->getType()),
+ LI->getMetadata(LLVMContext::MD_tbaa),
AliasSet::Refs, NewPtr);
if (LI->isVolatile()) AS.setVolatile();
return NewPtr;
@@ -289,6 +310,7 @@ bool AliasSetTracker::add(StoreInst *SI) {
Value *Val = SI->getOperand(0);
AliasSet &AS = addPointer(SI->getOperand(1),
AA.getTypeStoreSize(Val->getType()),
+ SI->getMetadata(LLVMContext::MD_tbaa),
AliasSet::Mods, NewPtr);
if (SI->isVolatile()) AS.setVolatile();
return NewPtr;
@@ -296,7 +318,9 @@ bool AliasSetTracker::add(StoreInst *SI) {
bool AliasSetTracker::add(VAArgInst *VAAI) {
bool NewPtr;
- addPointer(VAAI->getOperand(0), ~0, AliasSet::ModRef, NewPtr);
+ addPointer(VAAI->getOperand(0), AliasAnalysis::UnknownSize,
+ VAAI->getMetadata(LLVMContext::MD_tbaa),
+ AliasSet::ModRef, NewPtr);
return NewPtr;
}
@@ -358,6 +382,7 @@ void AliasSetTracker::add(const AliasSetTracker &AST) {
bool X;
for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
AliasSet &NewAS = addPointer(ASI.getPointer(), ASI.getSize(),
+ ASI.getTBAAInfo(),
(AliasSet::AccessType)AS.AccessTy, X);
if (AS.isVolatile()) NewAS.setVolatile();
}
@@ -393,31 +418,36 @@ void AliasSetTracker::remove(AliasSet &AS) {
AS.removeFromTracker(*this);
}
-bool AliasSetTracker::remove(Value *Ptr, unsigned Size) {
- AliasSet *AS = findAliasSetForPointer(Ptr, Size);
+bool
+AliasSetTracker::remove(Value *Ptr, uint64_t Size, const MDNode *TBAAInfo) {
+ AliasSet *AS = findAliasSetForPointer(Ptr, Size, TBAAInfo);
if (!AS) return false;
remove(*AS);
return true;
}
bool AliasSetTracker::remove(LoadInst *LI) {
- unsigned Size = AA.getTypeStoreSize(LI->getType());
- AliasSet *AS = findAliasSetForPointer(LI->getOperand(0), Size);
+ uint64_t Size = AA.getTypeStoreSize(LI->getType());
+ const MDNode *TBAAInfo = LI->getMetadata(LLVMContext::MD_tbaa);
+ AliasSet *AS = findAliasSetForPointer(LI->getOperand(0), Size, TBAAInfo);
if (!AS) return false;
remove(*AS);
return true;
}
bool AliasSetTracker::remove(StoreInst *SI) {
- unsigned Size = AA.getTypeStoreSize(SI->getOperand(0)->getType());
- AliasSet *AS = findAliasSetForPointer(SI->getOperand(1), Size);
+ uint64_t Size = AA.getTypeStoreSize(SI->getOperand(0)->getType());
+ const MDNode *TBAAInfo = SI->getMetadata(LLVMContext::MD_tbaa);
+ AliasSet *AS = findAliasSetForPointer(SI->getOperand(1), Size, TBAAInfo);
if (!AS) return false;
remove(*AS);
return true;
}
bool AliasSetTracker::remove(VAArgInst *VAAI) {
- AliasSet *AS = findAliasSetForPointer(VAAI->getOperand(0), ~0);
+ AliasSet *AS = findAliasSetForPointer(VAAI->getOperand(0),
+ AliasAnalysis::UnknownSize,
+ VAAI->getMetadata(LLVMContext::MD_tbaa));
if (!AS) return false;
remove(*AS);
return true;
@@ -507,7 +537,9 @@ void AliasSetTracker::copyValue(Value *From, Value *To) {
// Add it to the alias set it aliases...
I = PointerMap.find(From);
AliasSet *AS = I->second->getAliasSet(*this);
- AS->addPointer(*this, Entry, I->second->getSize(), true);
+ AS->addPointer(*this, Entry, I->second->getSize(),
+ I->second->getTBAAInfo(),
+ true);
}
@@ -587,7 +619,9 @@ namespace {
AliasSetTracker *Tracker;
public:
static char ID; // Pass identification, replacement for typeid
- AliasSetPrinter() : FunctionPass(ID) {}
+ AliasSetPrinter() : FunctionPass(ID) {
+ initializeAliasSetPrinterPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -607,5 +641,8 @@ namespace {
}
char AliasSetPrinter::ID = 0;
-INITIALIZE_PASS(AliasSetPrinter, "print-alias-sets",
- "Alias Set Printer", false, true);
+INITIALIZE_PASS_BEGIN(AliasSetPrinter, "print-alias-sets",
+ "Alias Set Printer", false, true)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(AliasSetPrinter, "print-alias-sets",
+ "Alias Set Printer", false, true)
diff --git a/contrib/llvm/lib/Analysis/Analysis.cpp b/contrib/llvm/lib/Analysis/Analysis.cpp
index 398dec7..1af1c35 100644
--- a/contrib/llvm/lib/Analysis/Analysis.cpp
+++ b/contrib/llvm/lib/Analysis/Analysis.cpp
@@ -8,22 +8,83 @@
//===----------------------------------------------------------------------===//
#include "llvm-c/Analysis.h"
+#include "llvm/InitializePasses.h"
#include "llvm/Analysis/Verifier.h"
#include <cstring>
using namespace llvm;
+/// initializeAnalysis - Initialize all passes linked into the Analysis library.
+void llvm::initializeAnalysis(PassRegistry &Registry) {
+ initializeAliasAnalysisAnalysisGroup(Registry);
+ initializeAliasAnalysisCounterPass(Registry);
+ initializeAAEvalPass(Registry);
+ initializeAliasDebuggerPass(Registry);
+ initializeAliasSetPrinterPass(Registry);
+ initializeNoAAPass(Registry);
+ initializeBasicAliasAnalysisPass(Registry);
+ initializeCFGViewerPass(Registry);
+ initializeCFGPrinterPass(Registry);
+ initializeCFGOnlyViewerPass(Registry);
+ initializeCFGOnlyPrinterPass(Registry);
+ initializePrintDbgInfoPass(Registry);
+ initializeDominanceFrontierPass(Registry);
+ initializeDomViewerPass(Registry);
+ initializeDomPrinterPass(Registry);
+ initializeDomOnlyViewerPass(Registry);
+ initializePostDomViewerPass(Registry);
+ initializeDomOnlyPrinterPass(Registry);
+ initializePostDomPrinterPass(Registry);
+ initializePostDomOnlyViewerPass(Registry);
+ initializePostDomOnlyPrinterPass(Registry);
+ initializeIVUsersPass(Registry);
+ initializeInstCountPass(Registry);
+ initializeIntervalPartitionPass(Registry);
+ initializeLazyValueInfoPass(Registry);
+ initializeLibCallAliasAnalysisPass(Registry);
+ initializeLintPass(Registry);
+ initializeLiveValuesPass(Registry);
+ initializeLoopDependenceAnalysisPass(Registry);
+ initializeLoopInfoPass(Registry);
+ initializeMemDepPrinterPass(Registry);
+ initializeMemoryDependenceAnalysisPass(Registry);
+ initializeModuleDebugInfoPrinterPass(Registry);
+ initializePostDominatorTreePass(Registry);
+ initializePostDominanceFrontierPass(Registry);
+ initializeProfileEstimatorPassPass(Registry);
+ initializeNoProfileInfoPass(Registry);
+ initializeNoPathProfileInfoPass(Registry);
+ initializeProfileInfoAnalysisGroup(Registry);
+ initializePathProfileInfoAnalysisGroup(Registry);
+ initializeLoaderPassPass(Registry);
+ initializePathProfileLoaderPassPass(Registry);
+ initializeProfileVerifierPassPass(Registry);
+ initializePathProfileVerifierPass(Registry);
+ initializeRegionInfoPass(Registry);
+ initializeRegionViewerPass(Registry);
+ initializeRegionPrinterPass(Registry);
+ initializeRegionOnlyViewerPass(Registry);
+ initializeRegionOnlyPrinterPass(Registry);
+ initializeScalarEvolutionPass(Registry);
+ initializeScalarEvolutionAliasAnalysisPass(Registry);
+ initializeTypeBasedAliasAnalysisPass(Registry);
+}
+
+void LLVMInitializeAnalysis(LLVMPassRegistryRef R) {
+ initializeAnalysis(*unwrap(R));
+}
+
LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
char **OutMessages) {
std::string Messages;
-
+
LLVMBool Result = verifyModule(*unwrap(M),
static_cast<VerifierFailureAction>(Action),
OutMessages? &Messages : 0);
-
+
if (OutMessages)
*OutMessages = strdup(Messages.c_str());
-
+
return Result;
}
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 113c72b..f7bcd9e 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -1,4 +1,4 @@
-//===- BasicAliasAnalysis.cpp - Local Alias Analysis Impl -----------------===//
+//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,9 +7,9 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the default implementation of the Alias Analysis interface
-// that simply implements a few identities (two different globals cannot alias,
-// etc), but otherwise does no analysis.
+// This file defines the primary stateless implementation of the
+// Alias Analysis interface that implements identities (two different
+// globals cannot alias, etc), but does no stateful analysis.
//
//===----------------------------------------------------------------------===//
@@ -22,10 +22,12 @@
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Operator.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -95,104 +97,54 @@ static bool isEscapeSource(const Value *V) {
return false;
}
-/// isObjectSmallerThan - Return true if we can prove that the object specified
-/// by V is smaller than Size.
-static bool isObjectSmallerThan(const Value *V, unsigned Size,
- const TargetData &TD) {
+/// getObjectSize - Return the size of the object specified by V, or
+/// UnknownSize if unknown.
+static uint64_t getObjectSize(const Value *V, const TargetData &TD) {
const Type *AccessTy;
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
+ if (!GV->hasDefinitiveInitializer())
+ return AliasAnalysis::UnknownSize;
AccessTy = GV->getType()->getElementType();
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
if (!AI->isArrayAllocation())
AccessTy = AI->getType()->getElementType();
else
- return false;
+ return AliasAnalysis::UnknownSize;
} else if (const CallInst* CI = extractMallocCall(V)) {
if (!isArrayMalloc(V, &TD))
// The size is the argument to the malloc call.
if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
- return (C->getZExtValue() < Size);
- return false;
+ return C->getZExtValue();
+ return AliasAnalysis::UnknownSize;
} else if (const Argument *A = dyn_cast<Argument>(V)) {
if (A->hasByValAttr())
AccessTy = cast<PointerType>(A->getType())->getElementType();
else
- return false;
+ return AliasAnalysis::UnknownSize;
} else {
- return false;
+ return AliasAnalysis::UnknownSize;
}
if (AccessTy->isSized())
- return TD.getTypeAllocSize(AccessTy) < Size;
- return false;
+ return TD.getTypeAllocSize(AccessTy);
+ return AliasAnalysis::UnknownSize;
}
-//===----------------------------------------------------------------------===//
-// NoAA Pass
-//===----------------------------------------------------------------------===//
-
-namespace {
- /// NoAA - This class implements the -no-aa pass, which always returns "I
- /// don't know" for alias queries. NoAA is unlike other alias analysis
- /// implementations, in that it does not chain to a previous analysis. As
- /// such it doesn't follow many of the rules that other alias analyses must.
- ///
- struct NoAA : public ImmutablePass, public AliasAnalysis {
- static char ID; // Class identification, replacement for typeinfo
- NoAA() : ImmutablePass(ID) {}
- explicit NoAA(char &PID) : ImmutablePass(PID) { }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- }
-
- virtual void initializePass() {
- TD = getAnalysisIfAvailable<TargetData>();
- }
-
- virtual AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
- return MayAlias;
- }
-
- virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
- return UnknownModRefBehavior;
- }
- virtual ModRefBehavior getModRefBehavior(const Function *F) {
- return UnknownModRefBehavior;
- }
-
- virtual bool pointsToConstantMemory(const Value *P) { return false; }
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size) {
- return ModRef;
- }
- virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
- ImmutableCallSite CS2) {
- return ModRef;
- }
-
- virtual void deleteValue(Value *V) {}
- virtual void copyValue(Value *From, Value *To) {}
-
- /// getAdjustedAnalysisPointer - This method is used when a pass implements
- /// an analysis interface through multiple inheritance. If needed, it
- /// should override this to adjust the this pointer as needed for the
- /// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
- if (ID == &AliasAnalysis::ID)
- return (AliasAnalysis*)this;
- return this;
- }
- };
-} // End of anonymous namespace
-
-// Register this pass...
-char NoAA::ID = 0;
-INITIALIZE_AG_PASS(NoAA, AliasAnalysis, "no-aa",
- "No Alias Analysis (always returns 'may' alias)",
- true, true, false);
+/// isObjectSmallerThan - Return true if we can prove that the object specified
+/// by V is smaller than Size.
+static bool isObjectSmallerThan(const Value *V, uint64_t Size,
+ const TargetData &TD) {
+ uint64_t ObjectSize = getObjectSize(V, TD);
+ return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
+}
-ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
+/// isObjectSize - Return true if we can prove that the object specified
+/// by V has size Size.
+static bool isObjectSize(const Value *V, uint64_t Size,
+ const TargetData &TD) {
+ uint64_t ObjectSize = getObjectSize(V, TD);
+ return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
+}
//===----------------------------------------------------------------------===//
// GetElementPtr Instruction Decomposition and Analysis
@@ -272,14 +224,14 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
Value *CastOp = cast<CastInst>(V)->getOperand(0);
unsigned OldWidth = Scale.getBitWidth();
unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
- Scale.trunc(SmallWidth);
- Offset.trunc(SmallWidth);
+ Scale = Scale.trunc(SmallWidth);
+ Offset = Offset.trunc(SmallWidth);
Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension,
TD, Depth+1);
- Scale.zext(OldWidth);
- Offset.zext(OldWidth);
+ Scale = Scale.zext(OldWidth);
+ Offset = Offset.zext(OldWidth);
return Result;
}
@@ -299,7 +251,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
/// the gep cannot necessarily be reconstructed from its decomposed form.
///
/// When TargetData is around, this function is capable of analyzing everything
-/// that Value::getUnderlyingObject() can look through. When not, it just looks
+/// that GetUnderlyingObject can look through. When not, it just looks
/// through pointer casts.
///
static const Value *
@@ -328,6 +280,14 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
V = Op->getOperand(0);
continue;
}
+
+ if (const Instruction *I = dyn_cast<Instruction>(V))
+ // TODO: Get a DominatorTree and use it here.
+ if (const Value *Simplified =
+ SimplifyInstruction(const_cast<Instruction *>(I), TD)) {
+ V = Simplified;
+ continue;
+ }
const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
if (GEPOp == 0)
@@ -386,8 +346,8 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
- BaseOffs += IndexOffset.getZExtValue()*Scale;
- Scale *= IndexScale.getZExtValue();
+ BaseOffs += IndexOffset.getSExtValue()*Scale;
+ Scale *= IndexScale.getSExtValue();
// If we already had an occurrance of this index variable, merge this
@@ -407,7 +367,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// pointer size.
if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
Scale <<= ShiftBits;
- Scale >>= ShiftBits;
+ Scale = (int64_t)Scale >> ShiftBits;
}
if (Scale) {
@@ -485,25 +445,34 @@ static bool notDifferentParent(const Value *O1, const Value *O2) {
#endif
namespace {
- /// BasicAliasAnalysis - This is the default alias analysis implementation.
- /// Because it doesn't chain to a previous alias analysis (like -no-aa), it
- /// derives from the NoAA class.
- struct BasicAliasAnalysis : public NoAA {
+ /// BasicAliasAnalysis - This is the primary alias analysis implementation.
+ struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis {
static char ID; // Class identification, replacement for typeinfo
- BasicAliasAnalysis() : NoAA(ID) {}
+ BasicAliasAnalysis() : ImmutablePass(ID) {
+ initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void initializePass() {
+ InitializeAliasAnalysis(this);
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<AliasAnalysis>();
+ }
- virtual AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
+ virtual AliasResult alias(const Location &LocA,
+ const Location &LocB) {
assert(Visited.empty() && "Visited must be cleared after use!");
- assert(notDifferentParent(V1, V2) &&
+ assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
"BasicAliasAnalysis doesn't support interprocedural queries.");
- AliasResult Alias = aliasCheck(V1, V1Size, V2, V2Size);
+ AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag,
+ LocB.Ptr, LocB.Size, LocB.TBAATag);
Visited.clear();
return Alias;
}
virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size);
+ const Location &Loc);
virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) {
@@ -513,7 +482,7 @@ namespace {
/// pointsToConstantMemory - Chase pointers until we find a (constant
/// global) or not.
- virtual bool pointsToConstantMemory(const Value *P);
+ virtual bool pointsToConstantMemory(const Location &Loc, bool OrLocal);
/// getModRefBehavior - Return the behavior when calling the given
/// call site.
@@ -539,46 +508,102 @@ namespace {
// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
// instruction against another.
- AliasResult aliasGEP(const GEPOperator *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size,
+ AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
+ const Value *V2, uint64_t V2Size,
+ const MDNode *V2TBAAInfo,
const Value *UnderlyingV1, const Value *UnderlyingV2);
// aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI
// instruction against another.
- AliasResult aliasPHI(const PHINode *PN, unsigned PNSize,
- const Value *V2, unsigned V2Size);
+ AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
+ const MDNode *PNTBAAInfo,
+ const Value *V2, uint64_t V2Size,
+ const MDNode *V2TBAAInfo);
/// aliasSelect - Disambiguate a Select instruction against another value.
- AliasResult aliasSelect(const SelectInst *SI, unsigned SISize,
- const Value *V2, unsigned V2Size);
-
- AliasResult aliasCheck(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size);
+ AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
+ const MDNode *SITBAAInfo,
+ const Value *V2, uint64_t V2Size,
+ const MDNode *V2TBAAInfo);
+
+ AliasResult aliasCheck(const Value *V1, uint64_t V1Size,
+ const MDNode *V1TBAATag,
+ const Value *V2, uint64_t V2Size,
+ const MDNode *V2TBAATag);
};
} // End of anonymous namespace
// Register this pass...
char BasicAliasAnalysis::ID = 0;
INITIALIZE_AG_PASS(BasicAliasAnalysis, AliasAnalysis, "basicaa",
- "Basic Alias Analysis (default AA impl)",
- false, true, true);
+ "Basic Alias Analysis (stateless AA impl)",
+ false, true, false)
ImmutablePass *llvm::createBasicAliasAnalysisPass() {
return new BasicAliasAnalysis();
}
+/// pointsToConstantMemory - Returns whether the given pointer value
+/// points to memory that is local to the function, with global constants being
+/// considered local to all functions.
+bool
+BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
+ assert(Visited.empty() && "Visited must be cleared after use!");
+
+ unsigned MaxLookup = 8;
+ SmallVector<const Value *, 16> Worklist;
+ Worklist.push_back(Loc.Ptr);
+ do {
+ const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), TD);
+ if (!Visited.insert(V)) {
+ Visited.clear();
+ return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
+ }
+
+ // An alloca instruction defines local memory.
+ if (OrLocal && isa<AllocaInst>(V))
+ continue;
+
+ // A global constant counts as local memory for our purposes.
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
+ // Note: this doesn't require GV to be "ODR" because it isn't legal for a
+ // global to be marked constant in some modules and non-constant in
+ // others. GV may even be a declaration, not a definition.
+ if (!GV->isConstant()) {
+ Visited.clear();
+ return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
+ }
+ continue;
+ }
+
+ // If both select values point to local memory, then so does the select.
+ if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
+ Worklist.push_back(SI->getTrueValue());
+ Worklist.push_back(SI->getFalseValue());
+ continue;
+ }
+
+ // If all values incoming to a phi node point to local memory, then so does
+ // the phi.
+ if (const PHINode *PN = dyn_cast<PHINode>(V)) {
+ // Don't bother inspecting phi nodes with many operands.
+ if (PN->getNumIncomingValues() > MaxLookup) {
+ Visited.clear();
+ return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
+ }
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ Worklist.push_back(PN->getIncomingValue(i));
+ continue;
+ }
-/// pointsToConstantMemory - Chase pointers until we find a (constant
-/// global) or not.
-bool BasicAliasAnalysis::pointsToConstantMemory(const Value *P) {
- if (const GlobalVariable *GV =
- dyn_cast<GlobalVariable>(P->getUnderlyingObject()))
- // Note: this doesn't require GV to be "ODR" because it isn't legal for a
- // global to be marked constant in some modules and non-constant in others.
- // GV may even be a declaration, not a definition.
- return GV->isConstant();
+ // Otherwise be conservative.
+ Visited.clear();
+ return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
- return NoAA::pointsToConstantMemory(P);
+ } while (!Worklist.empty() && --MaxLookup);
+
+ Visited.clear();
+ return Worklist.empty();
}
/// getModRefBehavior - Return the behavior when calling the given call site.
@@ -596,22 +621,32 @@ BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
Min = OnlyReadsMemory;
// The AliasAnalysis base class has some smarts, lets use them.
- return std::min(AliasAnalysis::getModRefBehavior(CS), Min);
+ return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min);
}
/// getModRefBehavior - Return the behavior when calling the given function.
/// For use when the call site is not known.
AliasAnalysis::ModRefBehavior
BasicAliasAnalysis::getModRefBehavior(const Function *F) {
+ // If the function declares it doesn't access memory, we can't do better.
if (F->doesNotAccessMemory())
- // Can't do better than this.
return DoesNotAccessMemory;
+
+ // For intrinsics, we can check the table.
+ if (unsigned iid = F->getIntrinsicID()) {
+#define GET_INTRINSIC_MODREF_BEHAVIOR
+#include "llvm/Intrinsics.gen"
+#undef GET_INTRINSIC_MODREF_BEHAVIOR
+ }
+
+ ModRefBehavior Min = UnknownModRefBehavior;
+
+ // If the function declares it only reads memory, go with that.
if (F->onlyReadsMemory())
- return OnlyReadsMemory;
- if (unsigned id = F->getIntrinsicID())
- return getIntrinsicModRefBehavior(id);
+ Min = OnlyReadsMemory;
- return NoAA::getModRefBehavior(F);
+ // Otherwise be conservative.
+ return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
}
/// getModRefInfo - Check to see if the specified callsite can clobber the
@@ -620,13 +655,13 @@ BasicAliasAnalysis::getModRefBehavior(const Function *F) {
/// simple "address taken" analysis on local objects.
AliasAnalysis::ModRefResult
BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size) {
- assert(notDifferentParent(CS.getInstruction(), P) &&
+ const Location &Loc) {
+ assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
- const Value *Object = P->getUnderlyingObject();
+ const Value *Object = GetUnderlyingObject(Loc.Ptr, TD);
- // If this is a tail call and P points to a stack location, we know that
+ // If this is a tail call and Loc.Ptr points to a stack location, we know that
// the tail call cannot access or modify the local stack.
// We cannot exclude byval arguments here; these belong to the caller of
// the current function not to the current function, and a tail callee
@@ -650,11 +685,11 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
!CS.paramHasAttr(ArgNo+1, Attribute::NoCapture))
continue;
- // If this is a no-capture pointer argument, see if we can tell that it
+ // If this is a no-capture pointer argument, see if we can tell that it
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
- if (!isNoAlias(cast<Value>(CI), UnknownSize, P, UnknownSize)) {
+ if (!isNoAlias(Location(cast<Value>(CI)), Loc)) {
PassedAsArg = true;
break;
}
@@ -664,6 +699,8 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
return NoModRef;
}
+ ModRefResult Min = ModRef;
+
// Finally, handle specific knowledge of intrinsics.
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
if (II != 0)
@@ -671,15 +708,20 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
default: break;
case Intrinsic::memcpy:
case Intrinsic::memmove: {
- unsigned Len = UnknownSize;
+ uint64_t Len = UnknownSize;
if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
Len = LenCI->getZExtValue();
Value *Dest = II->getArgOperand(0);
Value *Src = II->getArgOperand(1);
- if (isNoAlias(Dest, Len, P, Size)) {
- if (isNoAlias(Src, Len, P, Size))
+ // If it can't overlap the source dest, then it doesn't modref the loc.
+ if (isNoAlias(Location(Dest, Len), Loc)) {
+ if (isNoAlias(Location(Src, Len), Loc))
return NoModRef;
- return Ref;
+ // If it can't overlap the dest, then worst case it reads the loc.
+ Min = Ref;
+ } else if (isNoAlias(Location(Src, Len), Loc)) {
+ // If it can't overlap the source, then worst case it mutates the loc.
+ Min = Mod;
}
break;
}
@@ -687,11 +729,13 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
// Since memset is 'accesses arguments' only, the AliasAnalysis base class
// will handle it for the variable length case.
if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
- unsigned Len = LenCI->getZExtValue();
+ uint64_t Len = LenCI->getZExtValue();
Value *Dest = II->getArgOperand(0);
- if (isNoAlias(Dest, Len, P, Size))
+ if (isNoAlias(Location(Dest, Len), Loc))
return NoModRef;
}
+ // We know that memset doesn't load anything.
+ Min = Mod;
break;
case Intrinsic::atomic_cmp_swap:
case Intrinsic::atomic_swap:
@@ -707,42 +751,49 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
case Intrinsic::atomic_load_umin:
if (TD) {
Value *Op1 = II->getArgOperand(0);
- unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
- if (isNoAlias(Op1, Op1Size, P, Size))
+ uint64_t Op1Size = TD->getTypeStoreSize(Op1->getType());
+ MDNode *Tag = II->getMetadata(LLVMContext::MD_tbaa);
+ if (isNoAlias(Location(Op1, Op1Size, Tag), Loc))
return NoModRef;
}
break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start: {
- unsigned PtrSize =
+ uint64_t PtrSize =
cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- if (isNoAlias(II->getArgOperand(1), PtrSize, P, Size))
+ if (isNoAlias(Location(II->getArgOperand(1),
+ PtrSize,
+ II->getMetadata(LLVMContext::MD_tbaa)),
+ Loc))
return NoModRef;
break;
}
case Intrinsic::invariant_end: {
- unsigned PtrSize =
+ uint64_t PtrSize =
cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
- if (isNoAlias(II->getArgOperand(2), PtrSize, P, Size))
+ if (isNoAlias(Location(II->getArgOperand(2),
+ PtrSize,
+ II->getMetadata(LLVMContext::MD_tbaa)),
+ Loc))
return NoModRef;
break;
}
}
// The AliasAnalysis base class has some smarts, lets use them.
- return AliasAnalysis::getModRefInfo(CS, P, Size);
+ return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min);
}
-
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
/// against another pointer. We know that V1 is a GEP, but we don't know
-/// anything about V2. UnderlyingV1 is GEP1->getUnderlyingObject(),
+/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD),
/// UnderlyingV2 is the same for V2.
///
AliasAnalysis::AliasResult
-BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
- const Value *V2, unsigned V2Size,
+BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
+ const Value *V2, uint64_t V2Size,
+ const MDNode *V2TBAAInfo,
const Value *UnderlyingV1,
const Value *UnderlyingV2) {
// If this GEP has been visited before, we're on a use-def cycle.
@@ -759,8 +810,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// out if the indexes to the GEP tell us anything about the derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
// Do the base pointers alias?
- AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize,
- UnderlyingV2, UnknownSize);
+ AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0,
+ UnderlyingV2, UnknownSize, 0);
// If we get a No or May, then return it immediately, no amount of analysis
// will improve this situation.
@@ -782,7 +833,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// to handle without it.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
assert(TD == 0 &&
- "DecomposeGEPExpression and getUnderlyingObject disagree!");
+ "DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
@@ -800,7 +851,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
if (V1Size == UnknownSize && V2Size == UnknownSize)
return MayAlias;
- AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, V2, V2Size);
+ AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, 0,
+ V2, V2Size, V2TBAAInfo);
if (R != MustAlias)
// If V2 may alias GEP base pointer, conservatively returns MayAlias.
// If V2 is known not to alias GEP base pointer, then the two values
@@ -817,7 +869,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// to handle without it.
if (GEP1BasePtr != UnderlyingV1) {
assert(TD == 0 &&
- "DecomposeGEPExpression and getUnderlyingObject disagree!");
+ "DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
}
@@ -831,6 +883,17 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty())
return MustAlias;
+ // If there is a difference betwen the pointers, but the difference is
+ // less than the size of the associated memory object, then we know
+ // that the objects are partially overlapping.
+ if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) {
+ if (GEP1BaseOffset >= 0 ?
+ (V2Size != UnknownSize && (uint64_t)GEP1BaseOffset < V2Size) :
+ (V1Size != UnknownSize && -(uint64_t)GEP1BaseOffset < V1Size &&
+ GEP1BaseOffset != INT64_MIN))
+ return PartialAlias;
+ }
+
// If we have a known constant offset, see if this offset is larger than the
// access size being queried. If so, and if no variable indices can remove
// pieces of this constant, then we know we have a no-alias. For example,
@@ -850,8 +913,10 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// If our known offset is bigger than the access size, we know we don't have
// an alias.
if (GEP1BaseOffset) {
- if (GEP1BaseOffset >= (int64_t)V2Size ||
- GEP1BaseOffset <= -(int64_t)V1Size)
+ if (GEP1BaseOffset >= 0 ?
+ (V2Size != UnknownSize && (uint64_t)GEP1BaseOffset >= V2Size) :
+ (V1Size != UnknownSize && -(uint64_t)GEP1BaseOffset >= V1Size &&
+ GEP1BaseOffset != INT64_MIN))
return NoAlias;
}
@@ -861,8 +926,10 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
/// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select
/// instruction against another.
AliasAnalysis::AliasResult
-BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
- const Value *V2, unsigned V2Size) {
+BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize,
+ const MDNode *SITBAAInfo,
+ const Value *V2, uint64_t V2Size,
+ const MDNode *V2TBAAInfo) {
// If this select has been visited before, we're on a use-def cycle.
// Such cycles are only valid when PHI nodes are involved or in unreachable
// code. The visitPHI function catches cycles containing PHIs, but there
@@ -875,13 +942,13 @@ BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
if (SI->getCondition() == SI2->getCondition()) {
AliasResult Alias =
- aliasCheck(SI->getTrueValue(), SISize,
- SI2->getTrueValue(), V2Size);
+ aliasCheck(SI->getTrueValue(), SISize, SITBAAInfo,
+ SI2->getTrueValue(), V2Size, V2TBAAInfo);
if (Alias == MayAlias)
return MayAlias;
AliasResult ThisAlias =
- aliasCheck(SI->getFalseValue(), SISize,
- SI2->getFalseValue(), V2Size);
+ aliasCheck(SI->getFalseValue(), SISize, SITBAAInfo,
+ SI2->getFalseValue(), V2Size, V2TBAAInfo);
if (ThisAlias != Alias)
return MayAlias;
return Alias;
@@ -890,7 +957,7 @@ BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
// If both arms of the Select node NoAlias or MustAlias V2, then returns
// NoAlias / MustAlias. Otherwise, returns MayAlias.
AliasResult Alias =
- aliasCheck(V2, V2Size, SI->getTrueValue(), SISize);
+ aliasCheck(V2, V2Size, V2TBAAInfo, SI->getTrueValue(), SISize, SITBAAInfo);
if (Alias == MayAlias)
return MayAlias;
@@ -900,7 +967,7 @@ BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
Visited.erase(V2);
AliasResult ThisAlias =
- aliasCheck(V2, V2Size, SI->getFalseValue(), SISize);
+ aliasCheck(V2, V2Size, V2TBAAInfo, SI->getFalseValue(), SISize, SITBAAInfo);
if (ThisAlias != Alias)
return MayAlias;
return Alias;
@@ -909,8 +976,10 @@ BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
// aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction
// against another.
AliasAnalysis::AliasResult
-BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
- const Value *V2, unsigned V2Size) {
+BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
+ const MDNode *PNTBAAInfo,
+ const Value *V2, uint64_t V2Size,
+ const MDNode *V2TBAAInfo) {
// The PHI node has already been visited, avoid recursion any further.
if (!Visited.insert(PN))
return MayAlias;
@@ -921,16 +990,16 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
if (PN2->getParent() == PN->getParent()) {
AliasResult Alias =
- aliasCheck(PN->getIncomingValue(0), PNSize,
+ aliasCheck(PN->getIncomingValue(0), PNSize, PNTBAAInfo,
PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)),
- V2Size);
+ V2Size, V2TBAAInfo);
if (Alias == MayAlias)
return MayAlias;
for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
AliasResult ThisAlias =
- aliasCheck(PN->getIncomingValue(i), PNSize,
+ aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo,
PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
- V2Size);
+ V2Size, V2TBAAInfo);
if (ThisAlias != Alias)
return MayAlias;
}
@@ -951,7 +1020,8 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
V1Srcs.push_back(PV1);
}
- AliasResult Alias = aliasCheck(V2, V2Size, V1Srcs[0], PNSize);
+ AliasResult Alias = aliasCheck(V2, V2Size, V2TBAAInfo,
+ V1Srcs[0], PNSize, PNTBAAInfo);
// Early exit if the check of the first PHI source against V2 is MayAlias.
// Other results are not possible.
if (Alias == MayAlias)
@@ -967,7 +1037,8 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
// don't need to assume that V2 is being visited recursively.
Visited.erase(V2);
- AliasResult ThisAlias = aliasCheck(V2, V2Size, V, PNSize);
+ AliasResult ThisAlias = aliasCheck(V2, V2Size, V2TBAAInfo,
+ V, PNSize, PNTBAAInfo);
if (ThisAlias != Alias || ThisAlias == MayAlias)
return MayAlias;
}
@@ -979,8 +1050,10 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
// such as array references.
//
AliasAnalysis::AliasResult
-BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
+BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
+ const MDNode *V1TBAAInfo,
+ const Value *V2, uint64_t V2Size,
+ const MDNode *V2TBAAInfo) {
// If either of the memory references is empty, it doesn't matter what the
// pointer values are.
if (V1Size == 0 || V2Size == 0)
@@ -997,8 +1070,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
return NoAlias; // Scalars cannot alias each other
// Figure out what objects these things are pointing to if we can.
- const Value *O1 = V1->getUnderlyingObject();
- const Value *O2 = V2->getUnderlyingObject();
+ const Value *O1 = GetUnderlyingObject(V1, TD);
+ const Value *O2 = GetUnderlyingObject(V2, TD);
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
@@ -1059,25 +1132,39 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
std::swap(V1Size, V2Size);
std::swap(O1, O2);
}
- if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1))
- return aliasGEP(GV1, V1Size, V2, V2Size, O1, O2);
+ if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
+ AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, V2TBAAInfo, O1, O2);
+ if (Result != MayAlias) return Result;
+ }
if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
std::swap(V1, V2);
std::swap(V1Size, V2Size);
}
- if (const PHINode *PN = dyn_cast<PHINode>(V1))
- return aliasPHI(PN, V1Size, V2, V2Size);
+ if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
+ AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo,
+ V2, V2Size, V2TBAAInfo);
+ if (Result != MayAlias) return Result;
+ }
if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
std::swap(V1, V2);
std::swap(V1Size, V2Size);
}
- if (const SelectInst *S1 = dyn_cast<SelectInst>(V1))
- return aliasSelect(S1, V1Size, V2, V2Size);
+ if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
+ AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo,
+ V2, V2Size, V2TBAAInfo);
+ if (Result != MayAlias) return Result;
+ }
- return NoAA::alias(V1, V1Size, V2, V2Size);
-}
+ // If both pointers are pointing into the same object and one of them
+ // accesses is accessing the entire object, then the accesses must
+ // overlap in some way.
+ if (TD && O1 == O2)
+ if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD)) ||
+ (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD)))
+ return PartialAlias;
-// Make sure that anything that uses AliasAnalysis pulls in this file.
-DEFINING_FILE_FOR(BasicAliasAnalysis)
+ return AliasAnalysis::alias(Location(V1, V1Size, V1TBAAInfo),
+ Location(V2, V2Size, V2TBAAInfo));
+}
diff --git a/contrib/llvm/lib/Analysis/CFGPrinter.cpp b/contrib/llvm/lib/Analysis/CFGPrinter.cpp
index 617a362..7bb063f 100644
--- a/contrib/llvm/lib/Analysis/CFGPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/CFGPrinter.cpp
@@ -25,7 +25,9 @@ using namespace llvm;
namespace {
struct CFGViewer : public FunctionPass {
static char ID; // Pass identifcation, replacement for typeid
- CFGViewer() : FunctionPass(ID) {}
+ CFGViewer() : FunctionPass(ID) {
+ initializeCFGOnlyViewerPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F) {
F.viewCFG();
@@ -41,12 +43,14 @@ namespace {
}
char CFGViewer::ID = 0;
-INITIALIZE_PASS(CFGViewer, "view-cfg", "View CFG of function", false, true);
+INITIALIZE_PASS(CFGViewer, "view-cfg", "View CFG of function", false, true)
namespace {
struct CFGOnlyViewer : public FunctionPass {
static char ID; // Pass identifcation, replacement for typeid
- CFGOnlyViewer() : FunctionPass(ID) {}
+ CFGOnlyViewer() : FunctionPass(ID) {
+ initializeCFGOnlyViewerPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F) {
F.viewCFGOnly();
@@ -63,13 +67,14 @@ namespace {
char CFGOnlyViewer::ID = 0;
INITIALIZE_PASS(CFGOnlyViewer, "view-cfg-only",
- "View CFG of function (with no function bodies)", false, true);
+ "View CFG of function (with no function bodies)", false, true)
namespace {
struct CFGPrinter : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- CFGPrinter() : FunctionPass(ID) {}
- explicit CFGPrinter(char &pid) : FunctionPass(pid) {}
+ CFGPrinter() : FunctionPass(ID) {
+ initializeCFGPrinterPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F) {
std::string Filename = "cfg." + F.getNameStr() + ".dot";
@@ -96,13 +101,15 @@ namespace {
char CFGPrinter::ID = 0;
INITIALIZE_PASS(CFGPrinter, "dot-cfg", "Print CFG of function to 'dot' file",
- false, true);
+ false, true)
namespace {
struct CFGOnlyPrinter : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- CFGOnlyPrinter() : FunctionPass(ID) {}
- explicit CFGOnlyPrinter(char &pid) : FunctionPass(pid) {}
+ CFGOnlyPrinter() : FunctionPass(ID) {
+ initializeCFGOnlyPrinterPass(*PassRegistry::getPassRegistry());
+ }
+
virtual bool runOnFunction(Function &F) {
std::string Filename = "cfg." + F.getNameStr() + ".dot";
errs() << "Writing '" << Filename << "'...";
@@ -128,7 +135,7 @@ namespace {
char CFGOnlyPrinter::ID = 0;
INITIALIZE_PASS(CFGOnlyPrinter, "dot-cfg-only",
"Print CFG of function to 'dot' file (with no function bodies)",
- false, true);
+ false, true)
/// viewCFG - This function is meant for use from the debugger. You can just
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
diff --git a/contrib/llvm/lib/Analysis/CaptureTracking.cpp b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
index 90eae20..42a54d9 100644
--- a/contrib/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
@@ -95,6 +95,9 @@ bool llvm::PointerMayBeCaptured(const Value *V,
case Instruction::Load:
// Loading from a pointer does not cause it to be captured.
break;
+ case Instruction::VAArg:
+ // "va-arg" from a pointer does not cause it to be captured.
+ break;
case Instruction::Ret:
if (ReturnCaptures)
return true;
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index 0bf7967..cd8d52c 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -30,6 +30,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/FEnv.h"
#include <cerrno>
#include <cmath>
using namespace llvm;
@@ -53,7 +54,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
// vector so the code below can handle it uniformly.
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
Constant *Ops = C; // don't take the address of C!
- return FoldBitCast(ConstantVector::get(&Ops, 1), DestTy, TD);
+ return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
}
// If this is a bitcast from constant vector -> vector, fold it.
@@ -166,7 +167,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
}
}
- return ConstantVector::get(Result.data(), Result.size());
+ return ConstantVector::get(Result);
}
@@ -339,6 +340,13 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
return true;
}
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ if (CE->getOpcode() == Instruction::IntToPtr &&
+ CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
+ return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
+ BytesLeft, TD);
+ }
+
// Otherwise, unknown initializer type.
return false;
}
@@ -466,7 +474,8 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getUnderlyingObject())){
+ if (GlobalVariable *GV =
+ dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
const Type *ResTy = cast<PointerType>(C->getType())->getElementType();
if (GV->getInitializer()->isNullValue())
@@ -537,7 +546,7 @@ static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
for (unsigned i = 1; i != NumOps; ++i) {
if ((i == 1 ||
!isa<StructType>(GetElementPtrInst::getIndexedType(Ops[0]->getType(),
- reinterpret_cast<Value *const *>(Ops+1),
+ reinterpret_cast<Value *const *>(Ops+1),
i-1))) &&
Ops[i]->getType() != IntPtrTy) {
Any = true;
@@ -567,16 +576,35 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
Constant *Ptr = Ops[0];
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
return 0;
-
- unsigned BitWidth =
- TD->getTypeSizeInBits(TD->getIntPtrType(Ptr->getContext()));
+
+ const Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
for (unsigned i = 1; i != NumOps; ++i)
- if (!isa<ConstantInt>(Ops[i]))
+ if (!isa<ConstantInt>(Ops[i])) {
+
+ // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
+ // "inttoptr (sub (ptrtoint Ptr), V)"
+ if (NumOps == 2 &&
+ cast<PointerType>(ResultTy)->getElementType()->isIntegerTy(8)) {
+ ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]);
+ assert((CE == 0 || CE->getType() == IntPtrTy) &&
+ "CastGEPIndices didn't canonicalize index types!");
+ if (CE && CE->getOpcode() == Instruction::Sub &&
+ CE->getOperand(0)->isNullValue()) {
+ Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
+ Res = ConstantExpr::getSub(Res, CE->getOperand(1));
+ Res = ConstantExpr::getIntToPtr(Res, ResultTy);
+ if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res))
+ Res = ConstantFoldConstantExpression(ResCE, TD);
+ return Res;
+ }
+ }
return 0;
+ }
+ unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
APInt Offset = APInt(BitWidth,
TD->getIndexedOffset(Ptr->getType(),
(Value**)Ops+1, NumOps-1));
@@ -609,10 +637,8 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
APInt BasePtr(BitWidth, 0);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
if (CE->getOpcode() == Instruction::IntToPtr)
- if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) {
- BasePtr = Base->getValue();
- BasePtr.zextOrTrunc(BitWidth);
- }
+ if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
+ BasePtr = Base->getValue().zextOrTrunc(BitWidth);
if (Ptr->isNullValue() || BasePtr != 0) {
Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr);
return ConstantExpr::getIntToPtr(C, ResultTy);
@@ -638,12 +664,19 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
+ const IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
if (ElemSize == 0)
- return 0;
- APInt NewIdx = Offset.udiv(ElemSize);
- Offset -= NewIdx * ElemSize;
- NewIdxs.push_back(ConstantInt::get(TD->getIntPtrType(Ty->getContext()),
- NewIdx));
+ // The element size is 0. This may be [0 x Ty]*, so just use a zero
+ // index for this level and proceed to the next level to see if it can
+ // accommodate the offset.
+ NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
+ else {
+ // The element size is non-zero divide the offset by the element
+ // size (rounding down), to compute the index at this level.
+ APInt NewIdx = Offset.udiv(ElemSize);
+ Offset -= NewIdx * ElemSize;
+ NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
+ }
Ty = ATy->getElementType();
} else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
// Determine which field of the struct the offset points into. The
@@ -687,27 +720,34 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
// Constant Folding public APIs
//===----------------------------------------------------------------------===//
-
-/// ConstantFoldInstruction - Attempt to constant fold the specified
-/// instruction. If successful, the constant result is returned, if not, null
-/// is returned. Note that this function can only fail when attempting to fold
-/// instructions like loads and stores, which have no constant expression form.
-///
+/// ConstantFoldInstruction - Try to constant fold the specified instruction.
+/// If successful, the constant result is returned, if not, null is returned.
+/// Note that this fails if not all of the operands are constant. Otherwise,
+/// this function can only fail when attempting to fold instructions like loads
+/// and stores, which have no constant expression form.
Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
+ // Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
- if (PN->getNumIncomingValues() == 0)
- return UndefValue::get(PN->getType());
-
- Constant *Result = dyn_cast<Constant>(PN->getIncomingValue(0));
- if (Result == 0) return 0;
-
- // Handle PHI nodes specially here...
- for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
- if (PN->getIncomingValue(i) != Result && PN->getIncomingValue(i) != PN)
- return 0; // Not all the same incoming constants...
+ Constant *CommonValue = 0;
+
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ Value *Incoming = PN->getIncomingValue(i);
+ // If the incoming value is undef then skip it. Note that while we could
+ // skip the value if it is equal to the phi node itself we choose not to
+ // because that would break the rule that constant folding only applies if
+ // all operands are constants.
+ if (isa<UndefValue>(Incoming))
+ continue;
+ // If the incoming value is not a constant, or is a different constant to
+ // the one we saw previously, then give up.
+ Constant *C = dyn_cast<Constant>(Incoming);
+ if (!C || (CommonValue && C != CommonValue))
+ return 0;
+ CommonValue = C;
+ }
- // If we reach here, all incoming values are the same constant.
- return Result;
+ // If we reach here, all incoming values are the same constant or undef.
+ return CommonValue ? CommonValue : UndefValue::get(PN->getType());
}
// Scan the operand list, checking to see if they are all constants, if so,
@@ -725,7 +765,18 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, TD);
-
+
+ if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I))
+ return ConstantExpr::getInsertValue(
+ cast<Constant>(IVI->getAggregateOperand()),
+ cast<Constant>(IVI->getInsertedValueOperand()),
+ IVI->idx_begin(), IVI->getNumIndices());
+
+ if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I))
+ return ConstantExpr::getExtractValue(
+ cast<Constant>(EVI->getAggregateOperand()),
+ EVI->idx_begin(), EVI->getNumIndices());
+
return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
Ops.data(), Ops.size(), TD);
}
@@ -736,7 +787,8 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
const TargetData *TD) {
SmallVector<Constant*, 8> Ops;
- for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i) {
+ for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end();
+ i != e; ++i) {
Constant *NewC = cast<Constant>(*i);
// Recursively fold the ConstantExpr's operands.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC))
@@ -1000,8 +1052,17 @@ llvm::canConstantFoldCallTo(const Function *F) {
case Intrinsic::usub_with_overflow:
case Intrinsic::sadd_with_overflow:
case Intrinsic::ssub_with_overflow:
+ case Intrinsic::smul_with_overflow:
case Intrinsic::convert_from_fp16:
case Intrinsic::convert_to_fp16:
+ case Intrinsic::x86_sse_cvtss2si:
+ case Intrinsic::x86_sse_cvtss2si64:
+ case Intrinsic::x86_sse_cvttss2si:
+ case Intrinsic::x86_sse_cvttss2si64:
+ case Intrinsic::x86_sse2_cvtsd2si:
+ case Intrinsic::x86_sse2_cvtsd2si64:
+ case Intrinsic::x86_sse2_cvttsd2si:
+ case Intrinsic::x86_sse2_cvttsd2si64:
return true;
default:
return false;
@@ -1039,10 +1100,10 @@ llvm::canConstantFoldCallTo(const Function *F) {
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
const Type *Ty) {
- errno = 0;
+ sys::llvm_fenv_clearexcept();
V = NativeFP(V);
- if (errno != 0) {
- errno = 0;
+ if (sys::llvm_fenv_testexcept()) {
+ sys::llvm_fenv_clearexcept();
return 0;
}
@@ -1056,10 +1117,10 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
double V, double W, const Type *Ty) {
- errno = 0;
+ sys::llvm_fenv_clearexcept();
V = NativeFP(V, W);
- if (errno != 0) {
- errno = 0;
+ if (sys::llvm_fenv_testexcept()) {
+ sys::llvm_fenv_clearexcept();
return 0;
}
@@ -1071,6 +1132,36 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
return 0; // dummy return to suppress warning
}
+/// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer
+/// conversion of a constant floating point. If roundTowardZero is false, the
+/// default IEEE rounding is used (toward nearest, ties to even). This matches
+/// the behavior of the non-truncating SSE instructions in the default rounding
+/// mode. The desired integer type Ty is used to select how many bits are
+/// available for the result. Returns null if the conversion cannot be
+/// performed, otherwise returns the Constant value resulting from the
+/// conversion.
+static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
+ const Type *Ty) {
+ assert(Op && "Called with NULL operand");
+ APFloat Val(Op->getValueAPF());
+
+ // All of these conversion intrinsics form an integer of at most 64bits.
+ unsigned ResultWidth = cast<IntegerType>(Ty)->getBitWidth();
+ assert(ResultWidth <= 64 &&
+ "Can only constant fold conversions to 64 and 32 bit ints");
+
+ uint64_t UIntVal;
+ bool isExact = false;
+ APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
+ : APFloat::rmNearestTiesToEven;
+ APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth,
+ /*isSigned=*/true, mode,
+ &isExact);
+ if (status != APFloat::opOK && status != APFloat::opInexact)
+ return 0;
+ return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
+}
+
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
Constant *
@@ -1082,7 +1173,7 @@ llvm::ConstantFoldCall(Function *F,
const Type *Ty = F->getReturnType();
if (NumOperands == 1) {
if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
- if (Name == "llvm.convert.to.fp16") {
+ if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) {
APFloat Val(Op->getValueAPF());
bool lost = false;
@@ -1093,6 +1184,13 @@ llvm::ConstantFoldCall(Function *F,
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
+
+ /// We only fold functions with finite arguments. Folding NaN and inf is
+ /// likely to be aborted with an exception anyway, and some host libms
+ /// have known errors raising exceptions.
+ if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
+ return 0;
+
/// Currently APFloat versions of these functions do not exist, so we use
/// the host native double versions. Float versions are not called
/// directly but for all these it is true (float)(f((double)arg)) ==
@@ -1133,8 +1231,8 @@ llvm::ConstantFoldCall(Function *F,
return ConstantFoldFP(log, V, Ty);
else if (Name == "log10" && V > 0)
return ConstantFoldFP(log10, V, Ty);
- else if (Name == "llvm.sqrt.f32" ||
- Name == "llvm.sqrt.f64") {
+ else if (F->getIntrinsicID() == Intrinsic::sqrt &&
+ (Ty->isFloatTy() || Ty->isDoubleTy())) {
if (V >= -0.0)
return ConstantFoldFP(sqrt, V, Ty);
else // Undefined
@@ -1164,18 +1262,18 @@ llvm::ConstantFoldCall(Function *F,
}
return 0;
}
-
-
+
if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
- if (Name.startswith("llvm.bswap"))
+ switch (F->getIntrinsicID()) {
+ case Intrinsic::bswap:
return ConstantInt::get(F->getContext(), Op->getValue().byteSwap());
- else if (Name.startswith("llvm.ctpop"))
+ case Intrinsic::ctpop:
return ConstantInt::get(Ty, Op->getValue().countPopulation());
- else if (Name.startswith("llvm.cttz"))
+ case Intrinsic::cttz:
return ConstantInt::get(Ty, Op->getValue().countTrailingZeros());
- else if (Name.startswith("llvm.ctlz"))
+ case Intrinsic::ctlz:
return ConstantInt::get(Ty, Op->getValue().countLeadingZeros());
- else if (Name == "llvm.convert.from.fp16") {
+ case Intrinsic::convert_from_fp16: {
APFloat Val(Op->getValue());
bool lost = false;
@@ -1183,24 +1281,44 @@ llvm::ConstantFoldCall(Function *F,
Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
// Conversion is always precise.
- status = status;
+ (void)status;
assert(status == APFloat::opOK && !lost &&
"Precision lost during fp16 constfolding");
return ConstantFP::get(F->getContext(), Val);
}
- return 0;
+ default:
+ return 0;
+ }
}
-
+
+ if (ConstantVector *Op = dyn_cast<ConstantVector>(Operands[0])) {
+ switch (F->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::x86_sse_cvtss2si:
+ case Intrinsic::x86_sse_cvtss2si64:
+ case Intrinsic::x86_sse2_cvtsd2si:
+ case Intrinsic::x86_sse2_cvtsd2si64:
+ if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
+ return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/false, Ty);
+ case Intrinsic::x86_sse_cvttss2si:
+ case Intrinsic::x86_sse_cvttss2si64:
+ case Intrinsic::x86_sse2_cvttsd2si:
+ case Intrinsic::x86_sse2_cvttsd2si64:
+ if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
+ return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/true, Ty);
+ }
+ }
+
if (isa<UndefValue>(Operands[0])) {
- if (Name.startswith("llvm.bswap"))
+ if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];
return 0;
}
return 0;
}
-
+
if (NumOperands == 2) {
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
@@ -1223,11 +1341,11 @@ llvm::ConstantFoldCall(Function *F,
if (Name == "atan2")
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
} else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
- if (Name == "llvm.powi.f32")
+ if (F->getIntrinsicID() == Intrinsic::powi && Ty->isFloatTy())
return ConstantFP::get(F->getContext(),
APFloat((float)std::pow((float)Op1V,
(int)Op2C->getZExtValue())));
- if (Name == "llvm.powi.f64")
+ if (F->getIntrinsicID() == Intrinsic::powi && Ty->isDoubleTy())
return ConstantFP::get(F->getContext(),
APFloat((double)std::pow((double)Op1V,
(int)Op2C->getZExtValue())));
@@ -1240,42 +1358,37 @@ llvm::ConstantFoldCall(Function *F,
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (F->getIntrinsicID()) {
default: break;
- case Intrinsic::uadd_with_overflow: {
- Constant *Res = ConstantExpr::getAdd(Op1, Op2); // result.
- Constant *Ops[] = {
- Res, ConstantExpr::getICmp(CmpInst::ICMP_ULT, Res, Op1) // overflow.
- };
- return ConstantStruct::get(F->getContext(), Ops, 2, false);
- }
- case Intrinsic::usub_with_overflow: {
- Constant *Res = ConstantExpr::getSub(Op1, Op2); // result.
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::smul_with_overflow: {
+ APInt Res;
+ bool Overflow;
+ switch (F->getIntrinsicID()) {
+ default: assert(0 && "Invalid case");
+ case Intrinsic::sadd_with_overflow:
+ Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
+ break;
+ case Intrinsic::uadd_with_overflow:
+ Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
+ break;
+ case Intrinsic::ssub_with_overflow:
+ Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
+ break;
+ case Intrinsic::usub_with_overflow:
+ Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
+ break;
+ case Intrinsic::smul_with_overflow:
+ Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
+ break;
+ }
Constant *Ops[] = {
- Res, ConstantExpr::getICmp(CmpInst::ICMP_UGT, Res, Op1) // overflow.
+ ConstantInt::get(F->getContext(), Res),
+ ConstantInt::get(Type::getInt1Ty(F->getContext()), Overflow)
};
return ConstantStruct::get(F->getContext(), Ops, 2, false);
}
- case Intrinsic::sadd_with_overflow: {
- Constant *Res = ConstantExpr::getAdd(Op1, Op2); // result.
- Constant *Overflow = ConstantExpr::getSelect(
- ConstantExpr::getICmp(CmpInst::ICMP_SGT,
- ConstantInt::get(Op1->getType(), 0), Op1),
- ConstantExpr::getICmp(CmpInst::ICMP_SGT, Res, Op2),
- ConstantExpr::getICmp(CmpInst::ICMP_SLT, Res, Op2)); // overflow.
-
- Constant *Ops[] = { Res, Overflow };
- return ConstantStruct::get(F->getContext(), Ops, 2, false);
- }
- case Intrinsic::ssub_with_overflow: {
- Constant *Res = ConstantExpr::getSub(Op1, Op2); // result.
- Constant *Overflow = ConstantExpr::getSelect(
- ConstantExpr::getICmp(CmpInst::ICMP_SGT,
- ConstantInt::get(Op2->getType(), 0), Op2),
- ConstantExpr::getICmp(CmpInst::ICMP_SLT, Res, Op1),
- ConstantExpr::getICmp(CmpInst::ICMP_SGT, Res, Op1)); // overflow.
-
- Constant *Ops[] = { Res, Overflow };
- return ConstantStruct::get(F->getContext(), Ops, 2, false);
- }
}
}
@@ -1285,4 +1398,3 @@ llvm::ConstantFoldCall(Function *F,
}
return 0;
}
-
diff --git a/contrib/llvm/lib/Analysis/DIBuilder.cpp b/contrib/llvm/lib/Analysis/DIBuilder.cpp
new file mode 100644
index 0000000..c1072df
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/DIBuilder.cpp
@@ -0,0 +1,801 @@
+//===--- DIBuilder.cpp - Debug Information Builder ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DIBuilder.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/DIBuilder.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Dwarf.h"
+
+using namespace llvm;
+using namespace llvm::dwarf;
+
+static Constant *GetTagConstant(LLVMContext &VMContext, unsigned Tag) {
+ assert((Tag & LLVMDebugVersionMask) == 0 &&
+ "Tag too large for debug encoding!");
+ return ConstantInt::get(Type::getInt32Ty(VMContext), Tag | LLVMDebugVersion);
+}
+
+DIBuilder::DIBuilder(Module &m)
+ : M(m), VMContext(M.getContext()), TheCU(0), DeclareFn(0), ValueFn(0) {}
+
+/// CreateCompileUnit - A CompileUnit provides an anchor for all debugging
+/// information generated during this instance of compilation.
+void DIBuilder::CreateCompileUnit(unsigned Lang, StringRef Filename,
+ StringRef Directory, StringRef Producer,
+ bool isOptimized, StringRef Flags,
+ unsigned RunTimeVer) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_compile_unit),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Lang),
+ MDString::get(VMContext, Filename),
+ MDString::get(VMContext, Directory),
+ MDString::get(VMContext, Producer),
+ // Deprecate isMain field.
+ ConstantInt::get(Type::getInt1Ty(VMContext), true), // isMain
+ ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
+ MDString::get(VMContext, Flags),
+ ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeVer)
+ };
+ TheCU = DICompileUnit(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateFile - Create a file descriptor to hold debugging information
+/// for a file.
+DIFile DIBuilder::CreateFile(StringRef Filename, StringRef Directory) {
+ assert(TheCU && "Unable to create DW_TAG_file_type without CompileUnit");
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_file_type),
+ MDString::get(VMContext, Filename),
+ MDString::get(VMContext, Directory),
+ TheCU
+ };
+ return DIFile(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateEnumerator - Create a single enumerator value.
+DIEnumerator DIBuilder::CreateEnumerator(StringRef Name, uint64_t Val) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_enumerator),
+ MDString::get(VMContext, Name),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Val)
+ };
+ return DIEnumerator(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateBasicType - Create debugging information entry for a basic
+/// type, e.g 'char'.
+DIType DIBuilder::CreateBasicType(StringRef Name, uint64_t SizeInBits,
+ uint64_t AlignInBits,
+ unsigned Encoding) {
+ // Basic types are encoded in DIBasicType format. Line number, filename,
+ // offset and flags are always empty here.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_base_type),
+ TheCU,
+ MDString::get(VMContext, Name),
+ NULL, // Filename
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags;
+ ConstantInt::get(Type::getInt32Ty(VMContext), Encoding)
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateQaulifiedType - Create debugging information entry for a qualified
+/// type, e.g. 'const int'.
+DIType DIBuilder::CreateQualifiedType(unsigned Tag, DIType FromTy) {
+ // Qualified types are encoded in DIDerivedType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, Tag),
+ TheCU,
+ MDString::get(VMContext, StringRef()), // Empty name.
+ NULL, // Filename
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags
+ FromTy
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreatePointerType - Create debugging information entry for a pointer.
+DIType DIBuilder::CreatePointerType(DIType PointeeTy, uint64_t SizeInBits,
+ uint64_t AlignInBits, StringRef Name) {
+ // Pointer types are encoded in DIDerivedType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_pointer_type),
+ TheCU,
+ MDString::get(VMContext, Name),
+ NULL, // Filename
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags
+ PointeeTy
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateReferenceType - Create debugging information entry for a reference.
+DIType DIBuilder::CreateReferenceType(DIType RTy) {
+ // References are encoded in DIDerivedType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_reference_type),
+ TheCU,
+ NULL, // Name
+ NULL, // Filename
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags
+ RTy
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateTypedef - Create debugging information entry for a typedef.
+DIType DIBuilder::CreateTypedef(DIType Ty, StringRef Name, DIFile File,
+ unsigned LineNo) {
+ // typedefs are encoded in DIDerivedType format.
+ assert(Ty.Verify() && "Invalid typedef type!");
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_typedef),
+ Ty.getContext(),
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags
+ Ty
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateFriend - Create debugging information entry for a 'friend'.
+DIType DIBuilder::CreateFriend(DIType Ty, DIType FriendTy) {
+ // typedefs are encoded in DIDerivedType format.
+ assert(Ty.Verify() && "Invalid type!");
+ assert(FriendTy.Verify() && "Invalid friend type!");
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_friend),
+ Ty,
+ NULL, // Name
+ Ty.getFile(),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags
+ FriendTy
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateInheritance - Create debugging information entry to establish
+/// inheritnace relationship between two types.
+DIType DIBuilder::CreateInheritance(DIType Ty, DIType BaseTy,
+ uint64_t BaseOffset, unsigned Flags) {
+ // TAG_inheritance is encoded in DIDerivedType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_inheritance),
+ Ty,
+ NULL, // Name
+ Ty.getFile(),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Line
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
+ ConstantInt::get(Type::getInt64Ty(VMContext), BaseOffset),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ BaseTy
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateMemberType - Create debugging information entry for a member.
+DIType DIBuilder::CreateMemberType(StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType Ty) {
+ // TAG_member is encoded in DIDerivedType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_member),
+ File, // Or TheCU ? Ty ?
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ Ty
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateClassType - Create debugging information entry for a class.
+DIType DIBuilder::CreateClassType(DIDescriptor Context, StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType DerivedFrom, DIArray Elements,
+ MDNode *VTableHoder, MDNode *TemplateParams) {
+ // TAG_class_type is encoded in DICompositeType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_class_type),
+ Context,
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), OffsetInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ DerivedFrom,
+ Elements,
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ VTableHoder,
+ TemplateParams
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateTemplateTypeParameter - Create debugging information for template
+/// type parameter.
+DITemplateTypeParameter
+DIBuilder::CreateTemplateTypeParameter(DIDescriptor Context, StringRef Name,
+ DIType Ty, MDNode *File, unsigned LineNo,
+ unsigned ColumnNo) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_template_type_parameter),
+ Context,
+ MDString::get(VMContext, Name),
+ Ty,
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo)
+ };
+ return DITemplateTypeParameter(MDNode::get(VMContext, &Elts[0],
+ array_lengthof(Elts)));
+}
+
+/// CreateTemplateValueParameter - Create debugging information for template
+/// value parameter.
+DITemplateValueParameter
+DIBuilder::CreateTemplateValueParameter(DIDescriptor Context, StringRef Name,
+ DIType Ty, uint64_t Val,
+ MDNode *File, unsigned LineNo,
+ unsigned ColumnNo) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_template_value_parameter),
+ Context,
+ MDString::get(VMContext, Name),
+ Ty,
+ ConstantInt::get(Type::getInt64Ty(VMContext), Val),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo)
+ };
+ return DITemplateValueParameter(MDNode::get(VMContext, &Elts[0],
+ array_lengthof(Elts)));
+}
+
+/// CreateStructType - Create debugging information entry for a struct.
+DIType DIBuilder::CreateStructType(DIDescriptor Context, StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ unsigned Flags, DIArray Elements,
+ unsigned RunTimeLang) {
+ // TAG_structure_type is encoded in DICompositeType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_structure_type),
+ Context,
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Elements,
+ ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateUnionType - Create debugging information entry for an union.
+DIType DIBuilder::CreateUnionType(DIDescriptor Scope, StringRef Name,
+ DIFile File,
+ unsigned LineNumber, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Flags,
+ DIArray Elements, unsigned RunTimeLang) {
+ // TAG_union_type is encoded in DICompositeType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_union_type),
+ Scope,
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Elements,
+ ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateSubroutineType - Create subroutine type.
+DIType DIBuilder::CreateSubroutineType(DIFile File, DIArray ParameterTypes) {
+ // TAG_subroutine_type is encoded in DICompositeType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_subroutine_type),
+ File,
+ MDString::get(VMContext, ""),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ ParameterTypes,
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateEnumerationType - Create debugging information entry for an
+/// enumeration.
+DIType DIBuilder::CreateEnumerationType(DIDescriptor Scope, StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits,
+ uint64_t AlignInBits, DIArray Elements) {
+ // TAG_enumeration_type is encoded in DICompositeType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_enumeration_type),
+ Scope,
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Elements,
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ };
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], array_lengthof(Elts));
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.enum");
+ NMD->addOperand(Node);
+ return DIType(Node);
+}
+
+/// CreateArrayType - Create debugging information entry for an array.
+DIType DIBuilder::CreateArrayType(uint64_t Size, uint64_t AlignInBits,
+ DIType Ty, DIArray Subscripts) {
+ // TAG_array_type is encoded in DICompositeType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_array_type),
+ TheCU,
+ MDString::get(VMContext, ""),
+ TheCU,
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Size),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ Ty,
+ Subscripts,
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateVectorType - Create debugging information entry for a vector.
+DIType DIBuilder::CreateVectorType(uint64_t Size, uint64_t AlignInBits,
+ DIType Ty, DIArray Subscripts) {
+ // TAG_vector_type is encoded in DICompositeType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_vector_type),
+ TheCU,
+ MDString::get(VMContext, ""),
+ TheCU,
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Size),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ Ty,
+ Subscripts,
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ };
+ return DIType(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// CreateArtificialType - Create a new DIType with "artificial" flag set.
+DIType DIBuilder::CreateArtificialType(DIType Ty) {
+ if (Ty.isArtificial())
+ return Ty;
+
+ SmallVector<Value *, 9> Elts;
+ MDNode *N = Ty;
+ assert (N && "Unexpected input DIType!");
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ if (Value *V = N->getOperand(i))
+ Elts.push_back(V);
+ else
+ Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext)));
+ }
+
+ unsigned CurFlags = Ty.getFlags();
+ CurFlags = CurFlags | DIType::FlagArtificial;
+
+ // Flags are stored at this slot.
+ Elts[8] = ConstantInt::get(Type::getInt32Ty(VMContext), CurFlags);
+
+ return DIType(MDNode::get(VMContext, Elts.data(), Elts.size()));
+}
+
+/// RetainType - Retain DIType in a module even if it is not referenced
+/// through debug info anchors.
+void DIBuilder::RetainType(DIType T) {
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.ty");
+ NMD->addOperand(T);
+}
+
+/// CreateUnspecifiedParameter - Create unspeicified type descriptor
+/// for the subroutine type.
+DIDescriptor DIBuilder::CreateUnspecifiedParameter() {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_unspecified_parameters)
+ };
+ return DIDescriptor(MDNode::get(VMContext, &Elts[0], 1));
+}
+
+/// CreateTemporaryType - Create a temporary forward-declared type.
+DIType DIBuilder::CreateTemporaryType() {
+ // Give the temporary MDNode a tag. It doesn't matter what tag we
+ // use here as long as DIType accepts it.
+ Value *Elts[] = { GetTagConstant(VMContext, DW_TAG_base_type) };
+ MDNode *Node = MDNode::getTemporary(VMContext, Elts, array_lengthof(Elts));
+ return DIType(Node);
+}
+
+/// CreateTemporaryType - Create a temporary forward-declared type.
+DIType DIBuilder::CreateTemporaryType(DIFile F) {
+ // Give the temporary MDNode a tag. It doesn't matter what tag we
+ // use here as long as DIType accepts it.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, DW_TAG_base_type),
+ F.getCompileUnit(),
+ NULL,
+ F
+ };
+ MDNode *Node = MDNode::getTemporary(VMContext, Elts, array_lengthof(Elts));
+ return DIType(Node);
+}
+
+/// GetOrCreateArray - Get a DIArray, create one if required.
+DIArray DIBuilder::GetOrCreateArray(Value *const *Elements, unsigned NumElements) {
+ if (NumElements == 0) {
+ Value *Null = llvm::Constant::getNullValue(Type::getInt32Ty(VMContext));
+ return DIArray(MDNode::get(VMContext, &Null, 1));
+ }
+ return DIArray(MDNode::get(VMContext, Elements, NumElements));
+}
+
+/// GetOrCreateSubrange - Create a descriptor for a value range. This
+/// implicitly uniques the values returned.
+DISubrange DIBuilder::GetOrCreateSubrange(int64_t Lo, int64_t Hi) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_subrange_type),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Lo),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Hi)
+ };
+
+ return DISubrange(MDNode::get(VMContext, &Elts[0], 3));
+}
+
+/// CreateGlobalVariable - Create a new descriptor for the specified global.
+DIGlobalVariable DIBuilder::
+CreateGlobalVariable(StringRef Name, DIFile F, unsigned LineNumber,
+ DIType Ty, bool isLocalToUnit, llvm::Value *Val) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_variable),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ TheCU,
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, Name),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ Ty,
+ ConstantInt::get(Type::getInt32Ty(VMContext), isLocalToUnit),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 1), /* isDefinition*/
+ Val
+ };
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], array_lengthof(Elts));
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.gv");
+ NMD->addOperand(Node);
+ return DIGlobalVariable(Node);
+}
+
+/// CreateStaticVariable - Create a new descriptor for the specified static
+/// variable.
+DIGlobalVariable DIBuilder::
+CreateStaticVariable(DIDescriptor Context, StringRef Name,
+ StringRef LinkageName, DIFile F, unsigned LineNumber,
+ DIType Ty, bool isLocalToUnit, llvm::Value *Val) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_variable),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Context,
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, LinkageName),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ Ty,
+ ConstantInt::get(Type::getInt32Ty(VMContext), isLocalToUnit),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 1), /* isDefinition*/
+ Val
+ };
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], array_lengthof(Elts));
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.gv");
+ NMD->addOperand(Node);
+ return DIGlobalVariable(Node);
+}
+
+/// CreateVariable - Create a new descriptor for the specified variable.
+DIVariable DIBuilder::CreateLocalVariable(unsigned Tag, DIDescriptor Scope,
+ StringRef Name, DIFile File,
+ unsigned LineNo, DIType Ty,
+ bool AlwaysPreserve, unsigned Flags) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, Tag),
+ Scope,
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ Ty,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags)
+ };
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], array_lengthof(Elts));
+ if (AlwaysPreserve) {
+ // The optimizer may remove local variable. If there is an interest
+ // to preserve variable info in such situation then stash it in a
+ // named mdnode.
+ DISubprogram Fn(getDISubprogram(Scope));
+ StringRef FName = "fn";
+ if (Fn.getFunction())
+ FName = Fn.getFunction()->getName();
+ char One = '\1';
+ if (FName.startswith(StringRef(&One, 1)))
+ FName = FName.substr(1);
+ NamedMDNode *FnLocals = getOrInsertFnSpecificMDNode(M, FName);
+ FnLocals->addOperand(Node);
+ }
+ return DIVariable(Node);
+}
+
+/// CreateComplexVariable - Create a new descriptor for the specified variable
+/// which has a complex address expression for its address.
+DIVariable DIBuilder::CreateComplexVariable(unsigned Tag, DIDescriptor Scope,
+ StringRef Name, DIFile F,
+ unsigned LineNo,
+ DIType Ty, Value *const *Addr,
+ unsigned NumAddr) {
+ SmallVector<Value *, 15> Elts;
+ Elts.push_back(GetTagConstant(VMContext, Tag));
+ Elts.push_back(Scope);
+ Elts.push_back(MDString::get(VMContext, Name));
+ Elts.push_back(F);
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext), LineNo));
+ Elts.push_back(Ty);
+ Elts.append(Addr, Addr+NumAddr);
+
+ return DIVariable(MDNode::get(VMContext, Elts.data(), Elts.size()));
+}
+
+/// CreateFunction - Create a new descriptor for the specified function.
+DISubprogram DIBuilder::CreateFunction(DIDescriptor Context,
+ StringRef Name,
+ StringRef LinkageName,
+ DIFile File, unsigned LineNo,
+ DIType Ty,
+ bool isLocalToUnit, bool isDefinition,
+ unsigned Flags, bool isOptimized,
+ Function *Fn) {
+
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_subprogram),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Context,
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, LinkageName),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ Ty,
+ ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
+ Fn
+ };
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], array_lengthof(Elts));
+
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+ NMD->addOperand(Node);
+ return DISubprogram(Node);
+}
+
+/// CreateMethod - Create a new descriptor for the specified C++ method.
+DISubprogram DIBuilder::CreateMethod(DIDescriptor Context,
+ StringRef Name,
+ StringRef LinkageName,
+ DIFile F,
+ unsigned LineNo, DIType Ty,
+ bool isLocalToUnit,
+ bool isDefinition,
+ unsigned VK, unsigned VIndex,
+ MDNode *VTableHolder,
+ unsigned Flags,
+ bool isOptimized,
+ Function *Fn) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_subprogram),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Context,
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, LinkageName),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ Ty,
+ ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
+ ConstantInt::get(Type::getInt32Ty(VMContext), (unsigned)VK),
+ ConstantInt::get(Type::getInt32Ty(VMContext), VIndex),
+ VTableHolder,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
+ Fn
+ };
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], array_lengthof(Elts));
+
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+ NMD->addOperand(Node);
+ return DISubprogram(Node);
+}
+
+/// CreateNameSpace - This creates new descriptor for a namespace
+/// with the specified parent scope.
+DINameSpace DIBuilder::CreateNameSpace(DIDescriptor Scope, StringRef Name,
+ DIFile File, unsigned LineNo) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_namespace),
+ Scope,
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo)
+ };
+ return DINameSpace(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+DILexicalBlock DIBuilder::CreateLexicalBlock(DIDescriptor Scope, DIFile File,
+ unsigned Line, unsigned Col) {
+ // Defeat MDNode uniqing for lexical blocks by using unique id.
+ static unsigned int unique_id = 0;
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_lexical_block),
+ Scope,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Line),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Col),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), unique_id++)
+ };
+ return DILexicalBlock(MDNode::get(VMContext, &Elts[0], array_lengthof(Elts)));
+}
+
+/// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call.
+Instruction *DIBuilder::InsertDeclare(Value *Storage, DIVariable VarInfo,
+ Instruction *InsertBefore) {
+ assert(Storage && "no storage passed to dbg.declare");
+ assert(VarInfo.Verify() && "empty DIVariable passed to dbg.declare");
+ if (!DeclareFn)
+ DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
+
+ Value *Args[] = { MDNode::get(Storage->getContext(), &Storage, 1), VarInfo };
+ return CallInst::Create(DeclareFn, Args, Args+2, "", InsertBefore);
+}
+
+/// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call.
+Instruction *DIBuilder::InsertDeclare(Value *Storage, DIVariable VarInfo,
+ BasicBlock *InsertAtEnd) {
+ assert(Storage && "no storage passed to dbg.declare");
+ assert(VarInfo.Verify() && "invalid DIVariable passed to dbg.declare");
+ if (!DeclareFn)
+ DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
+
+ Value *Args[] = { MDNode::get(Storage->getContext(), &Storage, 1), VarInfo };
+
+ // If this block already has a terminator then insert this intrinsic
+ // before the terminator.
+ if (TerminatorInst *T = InsertAtEnd->getTerminator())
+ return CallInst::Create(DeclareFn, Args, Args+2, "", T);
+ else
+ return CallInst::Create(DeclareFn, Args, Args+2, "", InsertAtEnd);
+}
+
+/// InsertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
+Instruction *DIBuilder::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
+ DIVariable VarInfo,
+ Instruction *InsertBefore) {
+ assert(V && "no value passed to dbg.value");
+ assert(VarInfo.Verify() && "invalid DIVariable passed to dbg.value");
+ if (!ValueFn)
+ ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
+
+ Value *Args[] = { MDNode::get(V->getContext(), &V, 1),
+ ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset),
+ VarInfo };
+ return CallInst::Create(ValueFn, Args, Args+3, "", InsertBefore);
+}
+
+/// InsertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
+Instruction *DIBuilder::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
+ DIVariable VarInfo,
+ BasicBlock *InsertAtEnd) {
+ assert(V && "no value passed to dbg.value");
+ assert(VarInfo.Verify() && "invalid DIVariable passed to dbg.value");
+ if (!ValueFn)
+ ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
+
+ Value *Args[] = { MDNode::get(V->getContext(), &V, 1),
+ ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset),
+ VarInfo };
+ return CallInst::Create(ValueFn, Args, Args+3, "", InsertAtEnd);
+}
+
diff --git a/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp b/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp
index 0567750..b23c351 100644
--- a/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/DbgInfoPrinter.cpp
@@ -20,6 +20,7 @@
#include "llvm/Function.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Metadata.h"
+#include "llvm/Module.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Analysis/Passes.h"
@@ -40,7 +41,9 @@ namespace {
void printVariableDeclaration(const Value *V);
public:
static char ID; // Pass identification
- PrintDbgInfo() : FunctionPass(ID), Out(errs()) {}
+ PrintDbgInfo() : FunctionPass(ID), Out(errs()) {
+ initializePrintDbgInfoPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
@@ -48,12 +51,124 @@ namespace {
}
};
char PrintDbgInfo::ID = 0;
- INITIALIZE_PASS(PrintDbgInfo, "print-dbginfo",
- "Print debug info in human readable form", false, false);
}
+INITIALIZE_PASS(PrintDbgInfo, "print-dbginfo",
+ "Print debug info in human readable form", false, false)
+
FunctionPass *llvm::createDbgInfoPrinterPass() { return new PrintDbgInfo(); }
+/// Find the debug info descriptor corresponding to this global variable.
+static Value *findDbgGlobalDeclare(GlobalVariable *V) {
+ const Module *M = V->getParent();
+ NamedMDNode *NMD = M->getNamedMetadata("llvm.dbg.gv");
+ if (!NMD)
+ return 0;
+
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+ DIDescriptor DIG(cast<MDNode>(NMD->getOperand(i)));
+ if (!DIG.isGlobalVariable())
+ continue;
+ if (DIGlobalVariable(DIG).getGlobal() == V)
+ return DIG;
+ }
+ return 0;
+}
+
+/// Find the debug info descriptor corresponding to this function.
+static Value *findDbgSubprogramDeclare(Function *V) {
+ const Module *M = V->getParent();
+ NamedMDNode *NMD = M->getNamedMetadata("llvm.dbg.sp");
+ if (!NMD)
+ return 0;
+
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+ DIDescriptor DIG(cast<MDNode>(NMD->getOperand(i)));
+ if (!DIG.isSubprogram())
+ continue;
+ if (DISubprogram(DIG).getFunction() == V)
+ return DIG;
+ }
+ return 0;
+}
+
+/// Finds the llvm.dbg.declare intrinsic corresponding to this value if any.
+/// It looks through pointer casts too.
+static const DbgDeclareInst *findDbgDeclare(const Value *V) {
+ V = V->stripPointerCasts();
+
+ if (!isa<Instruction>(V) && !isa<Argument>(V))
+ return 0;
+
+ const Function *F = NULL;
+ if (const Instruction *I = dyn_cast<Instruction>(V))
+ F = I->getParent()->getParent();
+ else if (const Argument *A = dyn_cast<Argument>(V))
+ F = A->getParent();
+
+ for (Function::const_iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
+ for (BasicBlock::const_iterator BI = (*FI).begin(), BE = (*FI).end();
+ BI != BE; ++BI)
+ if (const DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI))
+ if (DDI->getAddress() == V)
+ return DDI;
+
+ return 0;
+}
+
+static bool getLocationInfo(const Value *V, std::string &DisplayName,
+ std::string &Type, unsigned &LineNo,
+ std::string &File, std::string &Dir) {
+ DICompileUnit Unit;
+ DIType TypeD;
+
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(const_cast<Value*>(V))) {
+ Value *DIGV = findDbgGlobalDeclare(GV);
+ if (!DIGV) return false;
+ DIGlobalVariable Var(cast<MDNode>(DIGV));
+
+ StringRef D = Var.getDisplayName();
+ if (!D.empty())
+ DisplayName = D;
+ LineNo = Var.getLineNumber();
+ Unit = Var.getCompileUnit();
+ TypeD = Var.getType();
+ } else if (Function *F = dyn_cast<Function>(const_cast<Value*>(V))){
+ Value *DIF = findDbgSubprogramDeclare(F);
+ if (!DIF) return false;
+ DISubprogram Var(cast<MDNode>(DIF));
+
+ StringRef D = Var.getDisplayName();
+ if (!D.empty())
+ DisplayName = D;
+ LineNo = Var.getLineNumber();
+ Unit = Var.getCompileUnit();
+ TypeD = Var.getType();
+ } else {
+ const DbgDeclareInst *DDI = findDbgDeclare(V);
+ if (!DDI) return false;
+ DIVariable Var(cast<MDNode>(DDI->getVariable()));
+
+ StringRef D = Var.getName();
+ if (!D.empty())
+ DisplayName = D;
+ LineNo = Var.getLineNumber();
+ Unit = Var.getCompileUnit();
+ TypeD = Var.getType();
+ }
+
+ StringRef T = TypeD.getName();
+ if (!T.empty())
+ Type = T;
+ StringRef F = Unit.getFilename();
+ if (!F.empty())
+ File = F;
+ StringRef D = Unit.getDirectory();
+ if (!D.empty())
+ Dir = D;
+ return true;
+}
+
void PrintDbgInfo::printVariableDeclaration(const Value *V) {
std::string DisplayName, File, Directory, Type;
unsigned LineNo;
@@ -63,8 +178,12 @@ void PrintDbgInfo::printVariableDeclaration(const Value *V) {
Out << "; ";
WriteAsOperand(Out, V, false, 0);
- Out << " is variable " << DisplayName
- << " of type " << Type << " declared at ";
+ if (isa<Function>(V))
+ Out << " is function " << DisplayName
+ << " of type " << Type << " declared at ";
+ else
+ Out << " is variable " << DisplayName
+ << " of type " << Type << " declared at ";
if (PrintDirectory)
Out << Directory << "/";
diff --git a/contrib/llvm/lib/Analysis/DebugInfo.cpp b/contrib/llvm/lib/Analysis/DebugInfo.cpp
index 5ca89c6..9db1456 100644
--- a/contrib/llvm/lib/Analysis/DebugInfo.cpp
+++ b/contrib/llvm/lib/Analysis/DebugInfo.cpp
@@ -109,7 +109,9 @@ Function *DIDescriptor::getFunctionField(unsigned Elt) const {
}
unsigned DIVariable::getNumAddrElements() const {
- return DbgNode->getNumOperands()-6;
+ if (getVersion() <= llvm::LLVMDebugVersion8)
+ return DbgNode->getNumOperands()-6;
+ return DbgNode->getNumOperands()-7;
}
@@ -197,6 +199,12 @@ bool DIDescriptor::isGlobal() const {
return isGlobalVariable();
}
+/// isUnspecifiedParmeter - Return true if the specified tag is
+/// DW_TAG_unspecified_parameters.
+bool DIDescriptor::isUnspecifiedParameter() const {
+ return DbgNode && getTag() == dwarf::DW_TAG_unspecified_parameters;
+}
+
/// isScope - Return true if the specified tag is one of the scope
/// related tag.
bool DIDescriptor::isScope() const {
@@ -213,6 +221,18 @@ bool DIDescriptor::isScope() const {
return false;
}
+/// isTemplateTypeParameter - Return true if the specified tag is
+/// DW_TAG_template_type_parameter.
+bool DIDescriptor::isTemplateTypeParameter() const {
+ return DbgNode && getTag() == dwarf::DW_TAG_template_type_parameter;
+}
+
+/// isTemplateValueParameter - Return true if the specified tag is
+/// DW_TAG_template_value_parameter.
+bool DIDescriptor::isTemplateValueParameter() const {
+ return DbgNode && getTag() == dwarf::DW_TAG_template_value_parameter;
+}
+
/// isCompileUnit - Return true if the specified tag is DW_TAG_compile_unit.
bool DIDescriptor::isCompileUnit() const {
return DbgNode && getTag() == dwarf::DW_TAG_compile_unit;
@@ -280,6 +300,26 @@ void DIType::replaceAllUsesWith(DIDescriptor &D) {
}
}
+/// replaceAllUsesWith - Replace all uses of debug info referenced by
+/// this descriptor.
+void DIType::replaceAllUsesWith(MDNode *D) {
+ if (!DbgNode)
+ return;
+
+ // Since we use a TrackingVH for the node, its easy for clients to manufacture
+ // legitimate situations where they want to replaceAllUsesWith() on something
+ // which, due to uniquing, has merged with the source. We shield clients from
+ // this detail by allowing a value to be replaced with replaceAllUsesWith()
+ // itself.
+ if (DbgNode != D) {
+ MDNode *Node = const_cast<MDNode*>(DbgNode);
+ const MDNode *DN = D;
+ const Value *V = cast_or_null<Value>(DN);
+ Node->replaceAllUsesWith(const_cast<Value*>(V));
+ MDNode::deleteTemporary(Node);
+ }
+}
+
/// Verify - Verify that a compile unit is well formed.
bool DICompileUnit::Verify() const {
if (!DbgNode)
@@ -297,9 +337,13 @@ bool DIType::Verify() const {
return false;
if (!getContext().Verify())
return false;
-
- DICompileUnit CU = getCompileUnit();
- if (!CU.Verify())
+ unsigned Tag = getTag();
+ if (!isBasicType() && Tag != dwarf::DW_TAG_const_type &&
+ Tag != dwarf::DW_TAG_volatile_type && Tag != dwarf::DW_TAG_pointer_type &&
+ Tag != dwarf::DW_TAG_reference_type && Tag != dwarf::DW_TAG_restrict_type
+ && Tag != dwarf::DW_TAG_vector_type && Tag != dwarf::DW_TAG_array_type
+ && Tag != dwarf::DW_TAG_enumeration_type
+ && getFilename().empty())
return false;
return true;
}
@@ -701,15 +745,13 @@ Constant *DIFactory::GetTagConstant(unsigned TAG) {
/// GetOrCreateArray - Create an descriptor for an array of descriptors.
/// This implicitly uniques the arrays created.
DIArray DIFactory::GetOrCreateArray(DIDescriptor *Tys, unsigned NumTys) {
- SmallVector<Value*, 16> Elts;
-
- if (NumTys == 0)
- Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
- else
- for (unsigned i = 0; i != NumTys; ++i)
- Elts.push_back(Tys[i]);
+ if (NumTys == 0) {
+ Value *Null = llvm::Constant::getNullValue(Type::getInt32Ty(VMContext));
+ return DIArray(MDNode::get(VMContext, &Null, 1));
+ }
- return DIArray(MDNode::get(VMContext,Elts.data(), Elts.size()));
+ SmallVector<Value *, 16> Elts(Tys, Tys+NumTys);
+ return DIArray(MDNode::get(VMContext, Elts.data(), Elts.size()));
}
/// GetOrCreateSubrange - Create a descriptor for a value range. This
@@ -724,7 +766,14 @@ DISubrange DIFactory::GetOrCreateSubrange(int64_t Lo, int64_t Hi) {
return DISubrange(MDNode::get(VMContext, &Elts[0], 3));
}
-
+/// CreateUnspecifiedParameter - Create unspeicified type descriptor
+/// for the subroutine type.
+DIDescriptor DIFactory::CreateUnspecifiedParameter() {
+ Value *Elts[] = {
+ GetTagConstant(dwarf::DW_TAG_unspecified_parameters)
+ };
+ return DIDescriptor(MDNode::get(VMContext, &Elts[0], 1));
+}
/// CreateCompileUnit - Create a new descriptor for the specified compile
/// unit. Note that this does not unique compile units within the module.
@@ -946,7 +995,6 @@ DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
return DICompositeType(Node);
}
-
/// CreateTemporaryType - Create a temporary forward-declared type.
DIType DIFactory::CreateTemporaryType() {
// Give the temporary MDNode a tag. It doesn't matter what tag we
@@ -958,6 +1006,19 @@ DIType DIFactory::CreateTemporaryType() {
return DIType(Node);
}
+/// CreateTemporaryType - Create a temporary forward-declared type.
+DIType DIFactory::CreateTemporaryType(DIFile F) {
+ // Give the temporary MDNode a tag. It doesn't matter what tag we
+ // use here as long as DIType accepts it.
+ Value *Elts[] = {
+ GetTagConstant(DW_TAG_base_type),
+ F.getCompileUnit(),
+ NULL,
+ F
+ };
+ MDNode *Node = MDNode::getTemporary(VMContext, Elts, array_lengthof(Elts));
+ return DIType(Node);
+}
/// CreateCompositeType - Create a composite type like array, struct, etc.
DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag,
@@ -1011,7 +1072,7 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
bool isDefinition,
unsigned VK, unsigned VIndex,
DIType ContainingType,
- bool isArtificial,
+ unsigned Flags,
bool isOptimized,
Function *Fn) {
@@ -1030,7 +1091,7 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
ConstantInt::get(Type::getInt32Ty(VMContext), (unsigned)VK),
ConstantInt::get(Type::getInt32Ty(VMContext), VIndex),
ContainingType,
- ConstantInt::get(Type::getInt1Ty(VMContext), isArtificial),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
Fn
};
@@ -1064,7 +1125,7 @@ DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration){
DeclNode->getOperand(11), // Virtuality
DeclNode->getOperand(12), // VIndex
DeclNode->getOperand(13), // Containting Type
- DeclNode->getOperand(14), // isArtificial
+ DeclNode->getOperand(14), // Flags
DeclNode->getOperand(15), // isOptimized
SPDeclaration.getFunction()
};
@@ -1142,12 +1203,47 @@ DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
return DIGlobalVariable(Node);
}
+/// fixupObjcLikeName - Replace contains special characters used
+/// in a typical Objective-C names with '.' in a given string.
+static void fixupObjcLikeName(std::string &Str) {
+ for (size_t i = 0, e = Str.size(); i < e; ++i) {
+ char C = Str[i];
+ if (C == '[' || C == ']' || C == ' ' || C == ':' || C == '+' ||
+ C == '(' || C == ')')
+ Str[i] = '.';
+ }
+}
+
+/// getOrInsertFnSpecificMDNode - Return a NameMDNode that is suitable
+/// to hold function specific information.
+NamedMDNode *llvm::getOrInsertFnSpecificMDNode(Module &M, StringRef FuncName) {
+ SmallString<32> Out;
+ if (FuncName.find('[') == StringRef::npos)
+ return M.getOrInsertNamedMetadata(Twine("llvm.dbg.lv.", FuncName)
+ .toStringRef(Out));
+ std::string Name = FuncName;
+ fixupObjcLikeName(Name);
+ return M.getOrInsertNamedMetadata(Twine("llvm.dbg.lv.", Name)
+ .toStringRef(Out));
+}
+
+/// getFnSpecificMDNode - Return a NameMDNode, if available, that is
+/// suitable to hold function specific information.
+NamedMDNode *llvm::getFnSpecificMDNode(const Module &M, StringRef FuncName) {
+ if (FuncName.find('[') == StringRef::npos)
+ return M.getNamedMetadata(Twine("llvm.dbg.lv.", FuncName));
+ std::string Name = FuncName;
+ fixupObjcLikeName(Name);
+ return M.getNamedMetadata(Twine("llvm.dbg.lv.", Name));
+}
+
/// CreateVariable - Create a new descriptor for the specified variable.
DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
StringRef Name,
DIFile F,
unsigned LineNo,
- DIType Ty, bool AlwaysPreserve) {
+ DIType Ty, bool AlwaysPreserve,
+ unsigned Flags) {
Value *Elts[] = {
GetTagConstant(Tag),
Context,
@@ -1155,8 +1251,9 @@ DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
Ty,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags)
};
- MDNode *Node = MDNode::get(VMContext, &Elts[0], 6);
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], 7);
if (AlwaysPreserve) {
// The optimizer may remove local variable. If there is an interest
// to preserve variable info in such situation then stash it in a
@@ -1169,9 +1266,8 @@ DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
if (FName.startswith(StringRef(&One, 1)))
FName = FName.substr(1);
- SmallString<32> Out;
- NamedMDNode *FnLocals =
- M.getOrInsertNamedMetadata(Twine("llvm.dbg.lv.", FName).toStringRef(Out));
+
+ NamedMDNode *FnLocals = getOrInsertFnSpecificMDNode(M, FName);
FnLocals->addOperand(Node);
}
return DIVariable(Node);
@@ -1181,21 +1277,20 @@ DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
/// CreateComplexVariable - Create a new descriptor for the specified variable
/// which has a complex address expression for its address.
DIVariable DIFactory::CreateComplexVariable(unsigned Tag, DIDescriptor Context,
- const std::string &Name,
- DIFile F,
+ StringRef Name, DIFile F,
unsigned LineNo,
- DIType Ty,
- SmallVector<Value *, 9> &addr) {
- SmallVector<Value *, 9> Elts;
+ DIType Ty, Value *const *Addr,
+ unsigned NumAddr) {
+ SmallVector<Value *, 15> Elts;
Elts.push_back(GetTagConstant(Tag));
Elts.push_back(Context);
Elts.push_back(MDString::get(VMContext, Name));
Elts.push_back(F);
Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext), LineNo));
Elts.push_back(Ty);
- Elts.insert(Elts.end(), addr.begin(), addr.end());
+ Elts.append(Addr, Addr+NumAddr);
- return DIVariable(MDNode::get(VMContext, &Elts[0], 6+addr.size()));
+ return DIVariable(MDNode::get(VMContext, Elts.data(), Elts.size()));
}
@@ -1309,6 +1404,14 @@ Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
return CallInst::Create(ValueFn, Args, Args+3, "", InsertAtEnd);
}
+// RecordType - Record DIType in a module such that it is not lost even if
+// it is not referenced through debug info anchors.
+void DIFactory::RecordType(DIType T) {
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.ty");
+ NMD->addOperand(T);
+}
+
+
//===----------------------------------------------------------------------===//
// DebugInfoFinder implementations.
//===----------------------------------------------------------------------===//
@@ -1472,89 +1575,6 @@ bool DebugInfoFinder::addSubprogram(DISubprogram SP) {
return true;
}
-/// Find the debug info descriptor corresponding to this global variable.
-static Value *findDbgGlobalDeclare(GlobalVariable *V) {
- const Module *M = V->getParent();
- NamedMDNode *NMD = M->getNamedMetadata("llvm.dbg.gv");
- if (!NMD)
- return 0;
-
- for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
- DIDescriptor DIG(cast<MDNode>(NMD->getOperand(i)));
- if (!DIG.isGlobalVariable())
- continue;
- if (DIGlobalVariable(DIG).getGlobal() == V)
- return DIG;
- }
- return 0;
-}
-
-/// Finds the llvm.dbg.declare intrinsic corresponding to this value if any.
-/// It looks through pointer casts too.
-static const DbgDeclareInst *findDbgDeclare(const Value *V) {
- V = V->stripPointerCasts();
-
- if (!isa<Instruction>(V) && !isa<Argument>(V))
- return 0;
-
- const Function *F = NULL;
- if (const Instruction *I = dyn_cast<Instruction>(V))
- F = I->getParent()->getParent();
- else if (const Argument *A = dyn_cast<Argument>(V))
- F = A->getParent();
-
- for (Function::const_iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
- for (BasicBlock::const_iterator BI = (*FI).begin(), BE = (*FI).end();
- BI != BE; ++BI)
- if (const DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI))
- if (DDI->getAddress() == V)
- return DDI;
-
- return 0;
-}
-
-bool llvm::getLocationInfo(const Value *V, std::string &DisplayName,
- std::string &Type, unsigned &LineNo,
- std::string &File, std::string &Dir) {
- DICompileUnit Unit;
- DIType TypeD;
-
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(const_cast<Value*>(V))) {
- Value *DIGV = findDbgGlobalDeclare(GV);
- if (!DIGV) return false;
- DIGlobalVariable Var(cast<MDNode>(DIGV));
-
- StringRef D = Var.getDisplayName();
- if (!D.empty())
- DisplayName = D;
- LineNo = Var.getLineNumber();
- Unit = Var.getCompileUnit();
- TypeD = Var.getType();
- } else {
- const DbgDeclareInst *DDI = findDbgDeclare(V);
- if (!DDI) return false;
- DIVariable Var(cast<MDNode>(DDI->getVariable()));
-
- StringRef D = Var.getName();
- if (!D.empty())
- DisplayName = D;
- LineNo = Var.getLineNumber();
- Unit = Var.getCompileUnit();
- TypeD = Var.getType();
- }
-
- StringRef T = TypeD.getName();
- if (!T.empty())
- Type = T;
- StringRef F = Unit.getFilename();
- if (!F.empty())
- File = F;
- StringRef D = Unit.getDirectory();
- if (!D.empty())
- Dir = D;
- return true;
-}
-
/// getDISubprogram - Find subprogram that is enclosing this scope.
DISubprogram llvm::getDISubprogram(const MDNode *Scope) {
DIDescriptor D(Scope);
diff --git a/contrib/llvm/lib/Analysis/DomPrinter.cpp b/contrib/llvm/lib/Analysis/DomPrinter.cpp
index 9f34094..cde4314 100644
--- a/contrib/llvm/lib/Analysis/DomPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/DomPrinter.cpp
@@ -19,8 +19,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/DomPrinter.h"
-
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/DOTGraphTraitsPass.h"
#include "llvm/Analysis/PostDominators.h"
@@ -86,74 +84,90 @@ namespace {
struct DomViewer
: public DOTGraphTraitsViewer<DominatorTree, false> {
static char ID;
- DomViewer() : DOTGraphTraitsViewer<DominatorTree, false>("dom", ID){}
+ DomViewer() : DOTGraphTraitsViewer<DominatorTree, false>("dom", ID){
+ initializeDomViewerPass(*PassRegistry::getPassRegistry());
+ }
};
struct DomOnlyViewer
: public DOTGraphTraitsViewer<DominatorTree, true> {
static char ID;
- DomOnlyViewer() : DOTGraphTraitsViewer<DominatorTree, true>("domonly", ID){}
+ DomOnlyViewer() : DOTGraphTraitsViewer<DominatorTree, true>("domonly", ID){
+ initializeDomOnlyViewerPass(*PassRegistry::getPassRegistry());
+ }
};
struct PostDomViewer
: public DOTGraphTraitsViewer<PostDominatorTree, false> {
static char ID;
PostDomViewer() :
- DOTGraphTraitsViewer<PostDominatorTree, false>("postdom", ID){}
+ DOTGraphTraitsViewer<PostDominatorTree, false>("postdom", ID){
+ initializePostDomViewerPass(*PassRegistry::getPassRegistry());
+ }
};
struct PostDomOnlyViewer
: public DOTGraphTraitsViewer<PostDominatorTree, true> {
static char ID;
PostDomOnlyViewer() :
- DOTGraphTraitsViewer<PostDominatorTree, true>("postdomonly", ID){}
+ DOTGraphTraitsViewer<PostDominatorTree, true>("postdomonly", ID){
+ initializePostDomOnlyViewerPass(*PassRegistry::getPassRegistry());
+ }
};
} // end anonymous namespace
char DomViewer::ID = 0;
INITIALIZE_PASS(DomViewer, "view-dom",
- "View dominance tree of function", false, false);
+ "View dominance tree of function", false, false)
char DomOnlyViewer::ID = 0;
INITIALIZE_PASS(DomOnlyViewer, "view-dom-only",
"View dominance tree of function (with no function bodies)",
- false, false);
+ false, false)
char PostDomViewer::ID = 0;
INITIALIZE_PASS(PostDomViewer, "view-postdom",
- "View postdominance tree of function", false, false);
+ "View postdominance tree of function", false, false)
char PostDomOnlyViewer::ID = 0;
INITIALIZE_PASS(PostDomOnlyViewer, "view-postdom-only",
"View postdominance tree of function "
"(with no function bodies)",
- false, false);
+ false, false)
namespace {
struct DomPrinter
: public DOTGraphTraitsPrinter<DominatorTree, false> {
static char ID;
- DomPrinter() : DOTGraphTraitsPrinter<DominatorTree, false>("dom", ID) {}
+ DomPrinter() : DOTGraphTraitsPrinter<DominatorTree, false>("dom", ID) {
+ initializeDomPrinterPass(*PassRegistry::getPassRegistry());
+ }
};
struct DomOnlyPrinter
: public DOTGraphTraitsPrinter<DominatorTree, true> {
static char ID;
- DomOnlyPrinter() : DOTGraphTraitsPrinter<DominatorTree, true>("domonly", ID) {}
+ DomOnlyPrinter() : DOTGraphTraitsPrinter<DominatorTree, true>("domonly", ID) {
+ initializeDomOnlyPrinterPass(*PassRegistry::getPassRegistry());
+ }
};
struct PostDomPrinter
: public DOTGraphTraitsPrinter<PostDominatorTree, false> {
static char ID;
PostDomPrinter() :
- DOTGraphTraitsPrinter<PostDominatorTree, false>("postdom", ID) {}
+ DOTGraphTraitsPrinter<PostDominatorTree, false>("postdom", ID) {
+ initializePostDomPrinterPass(*PassRegistry::getPassRegistry());
+ }
};
struct PostDomOnlyPrinter
: public DOTGraphTraitsPrinter<PostDominatorTree, true> {
static char ID;
PostDomOnlyPrinter() :
- DOTGraphTraitsPrinter<PostDominatorTree, true>("postdomonly", ID) {}
+ DOTGraphTraitsPrinter<PostDominatorTree, true>("postdomonly", ID) {
+ initializePostDomOnlyPrinterPass(*PassRegistry::getPassRegistry());
+ }
};
} // end anonymous namespace
@@ -162,24 +176,24 @@ struct PostDomOnlyPrinter
char DomPrinter::ID = 0;
INITIALIZE_PASS(DomPrinter, "dot-dom",
"Print dominance tree of function to 'dot' file",
- false, false);
+ false, false)
char DomOnlyPrinter::ID = 0;
INITIALIZE_PASS(DomOnlyPrinter, "dot-dom-only",
"Print dominance tree of function to 'dot' file "
"(with no function bodies)",
- false, false);
+ false, false)
char PostDomPrinter::ID = 0;
INITIALIZE_PASS(PostDomPrinter, "dot-postdom",
"Print postdominance tree of function to 'dot' file",
- false, false);
+ false, false)
char PostDomOnlyPrinter::ID = 0;
INITIALIZE_PASS(PostDomOnlyPrinter, "dot-postdom-only",
"Print postdominance tree of function to 'dot' file "
"(with no function bodies)",
- false, false);
+ false, false)
// Create methods available outside of this file, to use them
// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
diff --git a/contrib/llvm/lib/Analysis/DominanceFrontier.cpp b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
new file mode 100644
index 0000000..6de4e1e
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
@@ -0,0 +1,137 @@
+//===- DominanceFrontier.cpp - Dominance Frontier Calculation -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+char DominanceFrontier::ID = 0;
+INITIALIZE_PASS_BEGIN(DominanceFrontier, "domfrontier",
+ "Dominance Frontier Construction", true, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(DominanceFrontier, "domfrontier",
+ "Dominance Frontier Construction", true, true)
+
+namespace {
+ class DFCalculateWorkObject {
+ public:
+ DFCalculateWorkObject(BasicBlock *B, BasicBlock *P,
+ const DomTreeNode *N,
+ const DomTreeNode *PN)
+ : currentBB(B), parentBB(P), Node(N), parentNode(PN) {}
+ BasicBlock *currentBB;
+ BasicBlock *parentBB;
+ const DomTreeNode *Node;
+ const DomTreeNode *parentNode;
+ };
+}
+
+const DominanceFrontier::DomSetType &
+DominanceFrontier::calculate(const DominatorTree &DT,
+ const DomTreeNode *Node) {
+ BasicBlock *BB = Node->getBlock();
+ DomSetType *Result = NULL;
+
+ std::vector<DFCalculateWorkObject> workList;
+ SmallPtrSet<BasicBlock *, 32> visited;
+
+ workList.push_back(DFCalculateWorkObject(BB, NULL, Node, NULL));
+ do {
+ DFCalculateWorkObject *currentW = &workList.back();
+ assert (currentW && "Missing work object.");
+
+ BasicBlock *currentBB = currentW->currentBB;
+ BasicBlock *parentBB = currentW->parentBB;
+ const DomTreeNode *currentNode = currentW->Node;
+ const DomTreeNode *parentNode = currentW->parentNode;
+ assert (currentBB && "Invalid work object. Missing current Basic Block");
+ assert (currentNode && "Invalid work object. Missing current Node");
+ DomSetType &S = Frontiers[currentBB];
+
+ // Visit each block only once.
+ if (visited.count(currentBB) == 0) {
+ visited.insert(currentBB);
+
+ // Loop over CFG successors to calculate DFlocal[currentNode]
+ for (succ_iterator SI = succ_begin(currentBB), SE = succ_end(currentBB);
+ SI != SE; ++SI) {
+ // Does Node immediately dominate this successor?
+ if (DT[*SI]->getIDom() != currentNode)
+ S.insert(*SI);
+ }
+ }
+
+ // At this point, S is DFlocal. Now we union in DFup's of our children...
+ // Loop through and visit the nodes that Node immediately dominates (Node's
+ // children in the IDomTree)
+ bool visitChild = false;
+ for (DomTreeNode::const_iterator NI = currentNode->begin(),
+ NE = currentNode->end(); NI != NE; ++NI) {
+ DomTreeNode *IDominee = *NI;
+ BasicBlock *childBB = IDominee->getBlock();
+ if (visited.count(childBB) == 0) {
+ workList.push_back(DFCalculateWorkObject(childBB, currentBB,
+ IDominee, currentNode));
+ visitChild = true;
+ }
+ }
+
+ // If all children are visited or there is any child then pop this block
+ // from the workList.
+ if (!visitChild) {
+
+ if (!parentBB) {
+ Result = &S;
+ break;
+ }
+
+ DomSetType::const_iterator CDFI = S.begin(), CDFE = S.end();
+ DomSetType &parentSet = Frontiers[parentBB];
+ for (; CDFI != CDFE; ++CDFI) {
+ if (!DT.properlyDominates(parentNode, DT[*CDFI]))
+ parentSet.insert(*CDFI);
+ }
+ workList.pop_back();
+ }
+
+ } while (!workList.empty());
+
+ return *Result;
+}
+
+void DominanceFrontierBase::print(raw_ostream &OS, const Module* ) const {
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ OS << " DomFrontier for BB ";
+ if (I->first)
+ WriteAsOperand(OS, I->first, false);
+ else
+ OS << " <<exit node>>";
+ OS << " is:\t";
+
+ const std::set<BasicBlock*> &BBs = I->second;
+
+ for (std::set<BasicBlock*>::const_iterator I = BBs.begin(), E = BBs.end();
+ I != E; ++I) {
+ OS << ' ';
+ if (*I)
+ WriteAsOperand(OS, *I, false);
+ else
+ OS << "<<exit node>>";
+ }
+ OS << "\n";
+ }
+}
+
+void DominanceFrontierBase::dump() const {
+ print(dbgs());
+}
+
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
index b363528..690c4b4 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
@@ -43,7 +43,9 @@ class BasicCallGraph : public ModulePass, public CallGraph {
public:
static char ID; // Class identification, replacement for typeinfo
BasicCallGraph() : ModulePass(ID), Root(0),
- ExternalCallingNode(0), CallsExternalNode(0) {}
+ ExternalCallingNode(0), CallsExternalNode(0) {
+ initializeBasicCallGraphPass(*PassRegistry::getPassRegistry());
+ }
// runOnModule - Compute the call graph for the specified module.
virtual bool runOnModule(Module &M) {
@@ -171,9 +173,9 @@ private:
} //End anonymous namespace
-static RegisterAnalysisGroup<CallGraph> X("Call Graph");
+INITIALIZE_ANALYSIS_GROUP(CallGraph, "Call Graph", BasicCallGraph)
INITIALIZE_AG_PASS(BasicCallGraph, CallGraph, "basiccg",
- "Basic CallGraph Construction", false, true, true);
+ "Basic CallGraph Construction", false, true, true)
char CallGraph::ID = 0;
char BasicCallGraph::ID = 0;
@@ -228,6 +230,21 @@ Function *CallGraph::removeFunctionFromModule(CallGraphNode *CGN) {
return F;
}
+/// spliceFunction - Replace the function represented by this node by another.
+/// This does not rescan the body of the function, so it is suitable when
+/// splicing the body of the old function to the new while also updating all
+/// callers from old to new.
+///
+void CallGraph::spliceFunction(const Function *From, const Function *To) {
+ assert(FunctionMap.count(From) && "No CallGraphNode for function!");
+ assert(!FunctionMap.count(To) &&
+ "Pointing CallGraphNode at a function that already exists");
+ FunctionMapTy::iterator I = FunctionMap.find(From);
+ I->second->F = const_cast<Function*>(To);
+ FunctionMap[To] = I->second;
+ FunctionMap.erase(I);
+}
+
// getOrInsertFunction - This method is identical to calling operator[], but
// it will insert a new CallGraphNode for the specified function if one does
// not already exist.
@@ -274,7 +291,6 @@ void CallGraphNode::removeCallEdgeFor(CallSite CS) {
}
}
-
// removeAnyCallEdgeTo - This method removes any call edges from this node to
// the specified callee function. This takes more time to execute than
// removeCallEdgeTo, so it should not be used unless necessary.
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
index b7a27cb..725ab72 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
@@ -582,7 +582,6 @@ namespace {
public:
static char ID;
- PrintCallGraphPass() : CallGraphSCCPass(ID), Out(dbgs()) {}
PrintCallGraphPass(const std::string &B, raw_ostream &o)
: CallGraphSCCPass(ID), Banner(B), Out(o) {}
diff --git a/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp b/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
index 8eed9d6..06ae34c 100644
--- a/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
@@ -24,7 +24,7 @@ using namespace llvm;
char FindUsedTypes::ID = 0;
INITIALIZE_PASS(FindUsedTypes, "print-used-types",
- "Find Used Types", false, true);
+ "Find Used Types", false, true)
// IncorporateType - Incorporate one type and all of its subtypes into the
// collection of used types.
diff --git a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index 6759b0a..116aaf4 100644
--- a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -24,6 +24,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/InstIterator.h"
#include "llvm/ADT/Statistic.h"
@@ -88,7 +89,9 @@ namespace {
public:
static char ID;
- GlobalsModRef() : ModulePass(ID) {}
+ GlobalsModRef() : ModulePass(ID) {
+ initializeGlobalsModRefPass(*PassRegistry::getPassRegistry());
+ }
bool runOnModule(Module &M) {
InitializeAliasAnalysis(this); // set up super class
@@ -106,10 +109,9 @@ namespace {
//------------------------------------------------
// Implement the AliasAnalysis API
//
- AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size);
+ AliasResult alias(const Location &LocA, const Location &LocB);
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size);
+ const Location &Loc);
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) {
return AliasAnalysis::getModRefInfo(CS1, CS2);
@@ -119,32 +121,38 @@ namespace {
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
ModRefBehavior getModRefBehavior(const Function *F) {
+ ModRefBehavior Min = UnknownModRefBehavior;
+
if (FunctionRecord *FR = getFunctionInfo(F)) {
if (FR->FunctionEffect == 0)
- return DoesNotAccessMemory;
+ Min = DoesNotAccessMemory;
else if ((FR->FunctionEffect & Mod) == 0)
- return OnlyReadsMemory;
+ Min = OnlyReadsMemory;
}
- return AliasAnalysis::getModRefBehavior(F);
+
+ return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
}
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
- const Function* F = CS.getCalledFunction();
- if (!F) return AliasAnalysis::getModRefBehavior(CS);
- if (FunctionRecord *FR = getFunctionInfo(F)) {
- if (FR->FunctionEffect == 0)
- return DoesNotAccessMemory;
- else if ((FR->FunctionEffect & Mod) == 0)
- return OnlyReadsMemory;
- }
- return AliasAnalysis::getModRefBehavior(CS);
+ ModRefBehavior Min = UnknownModRefBehavior;
+
+ if (const Function* F = CS.getCalledFunction())
+ if (FunctionRecord *FR = getFunctionInfo(F)) {
+ if (FR->FunctionEffect == 0)
+ Min = DoesNotAccessMemory;
+ else if ((FR->FunctionEffect & Mod) == 0)
+ Min = OnlyReadsMemory;
+ }
+
+ return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min);
}
virtual void deleteValue(Value *V);
virtual void copyValue(Value *From, Value *To);
+ virtual void addEscapingUse(Use &U);
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
@@ -177,9 +185,13 @@ namespace {
}
char GlobalsModRef::ID = 0;
-INITIALIZE_AG_PASS(GlobalsModRef, AliasAnalysis,
+INITIALIZE_AG_PASS_BEGIN(GlobalsModRef, AliasAnalysis,
"globalsmodref-aa", "Simple mod/ref analysis for globals",
- false, true, false);
+ false, true, false)
+INITIALIZE_AG_DEPENDENCY(CallGraph)
+INITIALIZE_AG_PASS_END(GlobalsModRef, AliasAnalysis,
+ "globalsmodref-aa", "Simple mod/ref analysis for globals",
+ false, true, false)
Pass *llvm::createGlobalsModRefPass() { return new GlobalsModRef(); }
@@ -314,7 +326,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
continue;
// Check the value being stored.
- Value *Ptr = SI->getOperand(0)->getUnderlyingObject();
+ Value *Ptr = GetUnderlyingObject(SI->getOperand(0));
if (isMalloc(Ptr)) {
// Okay, easy case.
@@ -476,11 +488,11 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
/// other is some random pointer, we know there cannot be an alias, because the
/// address of the global isn't taken.
AliasAnalysis::AliasResult
-GlobalsModRef::alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
+GlobalsModRef::alias(const Location &LocA,
+ const Location &LocB) {
// Get the base object these pointers point to.
- const Value *UV1 = V1->getUnderlyingObject();
- const Value *UV2 = V2->getUnderlyingObject();
+ const Value *UV1 = GetUnderlyingObject(LocA.Ptr);
+ const Value *UV2 = GetUnderlyingObject(LocB.Ptr);
// If either of the underlying values is a global, they may be non-addr-taken
// globals, which we can answer queries about.
@@ -528,17 +540,18 @@ GlobalsModRef::alias(const Value *V1, unsigned V1Size,
if ((GV1 || GV2) && GV1 != GV2)
return NoAlias;
- return AliasAnalysis::alias(V1, V1Size, V2, V2Size);
+ return AliasAnalysis::alias(LocA, LocB);
}
AliasAnalysis::ModRefResult
GlobalsModRef::getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size) {
+ const Location &Loc) {
unsigned Known = ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(P->getUnderlyingObject()))
+ if (const GlobalValue *GV =
+ dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr)))
if (GV->hasLocalLinkage())
if (const Function *F = CS.getCalledFunction())
if (NonAddressTakenGlobals.count(GV))
@@ -547,7 +560,7 @@ GlobalsModRef::getModRefInfo(ImmutableCallSite CS,
if (Known == NoModRef)
return NoModRef; // No need to query other mod/ref analyses
- return ModRefResult(Known & AliasAnalysis::getModRefInfo(CS, P, Size));
+ return ModRefResult(Known & AliasAnalysis::getModRefInfo(CS, Loc));
}
@@ -584,3 +597,13 @@ void GlobalsModRef::deleteValue(Value *V) {
void GlobalsModRef::copyValue(Value *From, Value *To) {
AliasAnalysis::copyValue(From, To);
}
+
+void GlobalsModRef::addEscapingUse(Use &U) {
+ // For the purposes of this analysis, it is conservatively correct to treat
+ // a newly escaping value equivalently to a deleted one. We could perhaps
+ // be more precise by processing the new use and attempting to update our
+ // saved analysis results to accomodate it.
+ deleteValue(U);
+
+ AliasAnalysis::addEscapingUse(U);
+}
diff --git a/contrib/llvm/lib/Analysis/IPA/IPA.cpp b/contrib/llvm/lib/Analysis/IPA/IPA.cpp
new file mode 100644
index 0000000..0ba2e04
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/IPA/IPA.cpp
@@ -0,0 +1,29 @@
+//===-- IPA.cpp -----------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the common initialization routines for the IPA library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/InitializePasses.h"
+#include "llvm-c/Initialization.h"
+
+using namespace llvm;
+
+/// initializeIPA - Initialize all passes linked into the IPA library.
+void llvm::initializeIPA(PassRegistry &Registry) {
+ initializeBasicCallGraphPass(Registry);
+ initializeCallGraphAnalysisGroup(Registry);
+ initializeFindUsedTypesPass(Registry);
+ initializeGlobalsModRefPass(Registry);
+}
+
+void LLVMInitializeIPA(LLVMPassRegistryRef R) {
+ initializeIPA(*unwrap(R));
+}
diff --git a/contrib/llvm/lib/Analysis/IVUsers.cpp b/contrib/llvm/lib/Analysis/IVUsers.cpp
index cdf667a..c838218 100644
--- a/contrib/llvm/lib/Analysis/IVUsers.cpp
+++ b/contrib/llvm/lib/Analysis/IVUsers.cpp
@@ -21,6 +21,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -28,7 +29,13 @@
using namespace llvm;
char IVUsers::ID = 0;
-INITIALIZE_PASS(IVUsers, "iv-users", "Induction Variable Users", false, true);
+INITIALIZE_PASS_BEGIN(IVUsers, "iv-users",
+ "Induction Variable Users", false, true)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_END(IVUsers, "iv-users",
+ "Induction Variable Users", false, true)
Pass *llvm::createIVUsersPass() {
return new IVUsers();
@@ -143,7 +150,8 @@ IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand) {
}
IVUsers::IVUsers()
- : LoopPass(ID) {
+ : LoopPass(ID) {
+ initializeIVUsersPass(*PassRegistry::getPassRegistry());
}
void IVUsers::getAnalysisUsage(AnalysisUsage &AU) const {
diff --git a/contrib/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm/lib/Analysis/InlineCost.cpp
index 3e550f3..47f91cf 100644
--- a/contrib/llvm/lib/Analysis/InlineCost.cpp
+++ b/contrib/llvm/lib/Analysis/InlineCost.cpp
@@ -16,97 +16,8 @@
#include "llvm/CallingConv.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/ADT/SmallPtrSet.h"
-using namespace llvm;
-
-// CountCodeReductionForConstant - Figure out an approximation for how many
-// instructions will be constant folded if the specified value is constant.
-//
-unsigned InlineCostAnalyzer::FunctionInfo::
-CountCodeReductionForConstant(Value *V) {
- unsigned Reduction = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- User *U = *UI;
- if (isa<BranchInst>(U) || isa<SwitchInst>(U)) {
- // We will be able to eliminate all but one of the successors.
- const TerminatorInst &TI = cast<TerminatorInst>(*U);
- const unsigned NumSucc = TI.getNumSuccessors();
- unsigned Instrs = 0;
- for (unsigned I = 0; I != NumSucc; ++I)
- Instrs += Metrics.NumBBInsts[TI.getSuccessor(I)];
- // We don't know which blocks will be eliminated, so use the average size.
- Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
- } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
- // Turning an indirect call into a direct call is a BIG win
- if (CI->getCalledValue() == V)
- Reduction += InlineConstants::IndirectCallBonus;
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
- // Turning an indirect call into a direct call is a BIG win
- if (II->getCalledValue() == V)
- Reduction += InlineConstants::IndirectCallBonus;
- } else {
- // Figure out if this instruction will be removed due to simple constant
- // propagation.
- Instruction &Inst = cast<Instruction>(*U);
-
- // We can't constant propagate instructions which have effects or
- // read memory.
- //
- // FIXME: It would be nice to capture the fact that a load from a
- // pointer-to-constant-global is actually a *really* good thing to zap.
- // Unfortunately, we don't know the pointer that may get propagated here,
- // so we can't make this decision.
- if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
- isa<AllocaInst>(Inst))
- continue;
-
- bool AllOperandsConstant = true;
- for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
- if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
- AllOperandsConstant = false;
- break;
- }
- if (AllOperandsConstant) {
- // We will get to remove this instruction...
- Reduction += InlineConstants::InstrCost;
-
- // And any other instructions that use it which become constants
- // themselves.
- Reduction += CountCodeReductionForConstant(&Inst);
- }
- }
- }
- return Reduction;
-}
-
-// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
-// the function will be if it is inlined into a context where an argument
-// becomes an alloca.
-//
-unsigned InlineCostAnalyzer::FunctionInfo::
- CountCodeReductionForAlloca(Value *V) {
- if (!V->getType()->isPointerTy()) return 0; // Not a pointer
- unsigned Reduction = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- Instruction *I = cast<Instruction>(*UI);
- if (isa<LoadInst>(I) || isa<StoreInst>(I))
- Reduction += InlineConstants::InstrCost;
- else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
- // If the GEP has variable indices, we won't be able to do much with it.
- if (GEP->hasAllConstantIndices())
- Reduction += CountCodeReductionForAlloca(GEP);
- } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
- // Track pointer through bitcasts.
- Reduction += CountCodeReductionForAlloca(BCI);
- } else {
- // If there is some other strange instruction, we're not going to be able
- // to do much if we inline this.
- return 0;
- }
- }
-
- return Reduction;
-}
+using namespace llvm;
/// callIsSmall - If a call is likely to lower to a single target instruction,
/// or is otherwise deemed small return true.
@@ -160,6 +71,12 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
// variables as volatile if they are live across a setjmp call, and they
// probably won't do this in callers.
if (const Function *F = CS.getCalledFunction()) {
+ // If a function is both internal and has a single use, then it is
+ // extremely likely to get inlined in the future (it was probably
+ // exposed by an interleaved devirtualization pass).
+ if (F->hasInternalLinkage() && F->hasOneUse())
+ ++NumInlineCandidates;
+
if (F->isDeclaration() &&
(F->getName() == "setjmp" || F->getName() == "_setjmp"))
callsSetJmp = true;
@@ -226,6 +143,86 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
}
+// CountCodeReductionForConstant - Figure out an approximation for how many
+// instructions will be constant folded if the specified value is constant.
+//
+unsigned CodeMetrics::CountCodeReductionForConstant(Value *V) {
+ unsigned Reduction = 0;
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+ User *U = *UI;
+ if (isa<BranchInst>(U) || isa<SwitchInst>(U)) {
+ // We will be able to eliminate all but one of the successors.
+ const TerminatorInst &TI = cast<TerminatorInst>(*U);
+ const unsigned NumSucc = TI.getNumSuccessors();
+ unsigned Instrs = 0;
+ for (unsigned I = 0; I != NumSucc; ++I)
+ Instrs += NumBBInsts[TI.getSuccessor(I)];
+ // We don't know which blocks will be eliminated, so use the average size.
+ Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
+ } else {
+ // Figure out if this instruction will be removed due to simple constant
+ // propagation.
+ Instruction &Inst = cast<Instruction>(*U);
+
+ // We can't constant propagate instructions which have effects or
+ // read memory.
+ //
+ // FIXME: It would be nice to capture the fact that a load from a
+ // pointer-to-constant-global is actually a *really* good thing to zap.
+ // Unfortunately, we don't know the pointer that may get propagated here,
+ // so we can't make this decision.
+ if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
+ isa<AllocaInst>(Inst))
+ continue;
+
+ bool AllOperandsConstant = true;
+ for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
+ if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
+ AllOperandsConstant = false;
+ break;
+ }
+
+ if (AllOperandsConstant) {
+ // We will get to remove this instruction...
+ Reduction += InlineConstants::InstrCost;
+
+ // And any other instructions that use it which become constants
+ // themselves.
+ Reduction += CountCodeReductionForConstant(&Inst);
+ }
+ }
+ }
+ return Reduction;
+}
+
+// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
+// the function will be if it is inlined into a context where an argument
+// becomes an alloca.
+//
+unsigned CodeMetrics::CountCodeReductionForAlloca(Value *V) {
+ if (!V->getType()->isPointerTy()) return 0; // Not a pointer
+ unsigned Reduction = 0;
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+ Instruction *I = cast<Instruction>(*UI);
+ if (isa<LoadInst>(I) || isa<StoreInst>(I))
+ Reduction += InlineConstants::InstrCost;
+ else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
+ // If the GEP has variable indices, we won't be able to do much with it.
+ if (GEP->hasAllConstantIndices())
+ Reduction += CountCodeReductionForAlloca(GEP);
+ } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
+ // Track pointer through bitcasts.
+ Reduction += CountCodeReductionForAlloca(BCI);
+ } else {
+ // If there is some other strange instruction, we're not going to be able
+ // to do much if we inline this.
+ return 0;
+ }
+ }
+
+ return Reduction;
+}
+
/// analyzeFunction - Fill in the current structure with information gleaned
/// from the specified function.
void CodeMetrics::analyzeFunction(Function *F) {
@@ -245,76 +242,246 @@ void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
if (Metrics.NumRets==1)
--Metrics.NumInsts;
- // Don't bother calculating argument weights if we are never going to inline
- // the function anyway.
- if (NeverInline())
- return;
-
// Check out all of the arguments to the function, figuring out how much
// code can be eliminated if one of the arguments is a constant.
ArgumentWeights.reserve(F->arg_size());
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
- ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
- CountCodeReductionForAlloca(I)));
+ ArgumentWeights.push_back(ArgInfo(Metrics.CountCodeReductionForConstant(I),
+ Metrics.CountCodeReductionForAlloca(I)));
}
/// NeverInline - returns true if the function should never be inlined into
/// any caller
-bool InlineCostAnalyzer::FunctionInfo::NeverInline()
-{
+bool InlineCostAnalyzer::FunctionInfo::NeverInline() {
return (Metrics.callsSetJmp || Metrics.isRecursive ||
Metrics.containsIndirectBr);
+}
+// getSpecializationBonus - The heuristic used to determine the per-call
+// performance boost for using a specialization of Callee with argument
+// specializedArgNo replaced by a constant.
+int InlineCostAnalyzer::getSpecializationBonus(Function *Callee,
+ SmallVectorImpl<unsigned> &SpecializedArgNos)
+{
+ if (Callee->mayBeOverridden())
+ return 0;
+
+ int Bonus = 0;
+ // If this function uses the coldcc calling convention, prefer not to
+ // specialize it.
+ if (Callee->getCallingConv() == CallingConv::Cold)
+ Bonus -= InlineConstants::ColdccPenalty;
+
+ // Get information about the callee.
+ FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
+
+ // If we haven't calculated this information yet, do so now.
+ if (CalleeFI->Metrics.NumBlocks == 0)
+ CalleeFI->analyzeFunction(Callee);
+ unsigned ArgNo = 0;
+ unsigned i = 0;
+ for (Function::arg_iterator I = Callee->arg_begin(), E = Callee->arg_end();
+ I != E; ++I, ++ArgNo)
+ if (ArgNo == SpecializedArgNos[i]) {
+ ++i;
+ Bonus += CountBonusForConstant(I);
+ }
+
+ // Calls usually take a long time, so they make the specialization gain
+ // smaller.
+ Bonus -= CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
+
+ return Bonus;
}
-// getInlineCost - The heuristic used to determine if we should inline the
-// function call or not.
-//
-InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
- SmallPtrSet<const Function*, 16> &NeverInline) {
- return getInlineCost(CS, CS.getCalledFunction(), NeverInline);
+
+// ConstantFunctionBonus - Figure out how much of a bonus we can get for
+// possibly devirtualizing a function. We'll subtract the size of the function
+// we may wish to inline from the indirect call bonus providing a limit on
+// growth. Leave an upper limit of 0 for the bonus - we don't want to penalize
+// inlining because we decide we don't want to give a bonus for
+// devirtualizing.
+int InlineCostAnalyzer::ConstantFunctionBonus(CallSite CS, Constant *C) {
+
+ // This could just be NULL.
+ if (!C) return 0;
+
+ Function *F = dyn_cast<Function>(C);
+ if (!F) return 0;
+
+ int Bonus = InlineConstants::IndirectCallBonus + getInlineSize(CS, F);
+ return (Bonus > 0) ? 0 : Bonus;
}
-InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
- Function *Callee,
- SmallPtrSet<const Function*, 16> &NeverInline) {
- Instruction *TheCall = CS.getInstruction();
- Function *Caller = TheCall->getParent()->getParent();
- bool isDirectCall = CS.getCalledFunction() == Callee;
+// CountBonusForConstant - Figure out an approximation for how much per-call
+// performance boost we can expect if the specified value is constant.
+int InlineCostAnalyzer::CountBonusForConstant(Value *V, Constant *C) {
+ unsigned Bonus = 0;
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+ User *U = *UI;
+ if (CallInst *CI = dyn_cast<CallInst>(U)) {
+ // Turning an indirect call into a direct call is a BIG win
+ if (CI->getCalledValue() == V)
+ Bonus += ConstantFunctionBonus(CallSite(CI), C);
+ } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
+ // Turning an indirect call into a direct call is a BIG win
+ if (II->getCalledValue() == V)
+ Bonus += ConstantFunctionBonus(CallSite(II), C);
+ }
+ // FIXME: Eliminating conditional branches and switches should
+ // also yield a per-call performance boost.
+ else {
+ // Figure out the bonuses that wll accrue due to simple constant
+ // propagation.
+ Instruction &Inst = cast<Instruction>(*U);
- // Don't inline functions which can be redefined at link-time to mean
- // something else. Don't inline functions marked noinline or call sites
- // marked noinline.
- if (Callee->mayBeOverridden() ||
- Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee) ||
- CS.isNoInline())
- return llvm::InlineCost::getNever();
+ // We can't constant propagate instructions which have effects or
+ // read memory.
+ //
+ // FIXME: It would be nice to capture the fact that a load from a
+ // pointer-to-constant-global is actually a *really* good thing to zap.
+ // Unfortunately, we don't know the pointer that may get propagated here,
+ // so we can't make this decision.
+ if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
+ isa<AllocaInst>(Inst))
+ continue;
+ bool AllOperandsConstant = true;
+ for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
+ if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
+ AllOperandsConstant = false;
+ break;
+ }
+
+ if (AllOperandsConstant)
+ Bonus += CountBonusForConstant(&Inst);
+ }
+ }
+
+ return Bonus;
+}
+
+int InlineCostAnalyzer::getInlineSize(CallSite CS, Function *Callee) {
+ // Get information about the callee.
+ FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
+
+ // If we haven't calculated this information yet, do so now.
+ if (CalleeFI->Metrics.NumBlocks == 0)
+ CalleeFI->analyzeFunction(Callee);
+
// InlineCost - This value measures how good of an inline candidate this call
// site is to inline. A lower inline cost make is more likely for the call to
// be inlined. This value may go negative.
//
int InlineCost = 0;
+ // Compute any size reductions we can expect due to arguments being passed into
+ // the function.
+ //
+ unsigned ArgNo = 0;
+ CallSite::arg_iterator I = CS.arg_begin();
+ for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
+ FI != FE; ++I, ++FI, ++ArgNo) {
+
+ // If an alloca is passed in, inlining this function is likely to allow
+ // significant future optimization possibilities (like scalar promotion, and
+ // scalarization), so encourage the inlining of the function.
+ //
+ if (isa<AllocaInst>(I))
+ InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;
+
+ // If this is a constant being passed into the function, use the argument
+ // weights calculated for the callee to determine how much will be folded
+ // away with this information.
+ else if (isa<Constant>(I))
+ InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
+ }
+
+ // Each argument passed in has a cost at both the caller and the callee
+ // sides. Measurements show that each argument costs about the same as an
+ // instruction.
+ InlineCost -= (CS.arg_size() * InlineConstants::InstrCost);
+
+ // Now that we have considered all of the factors that make the call site more
+ // likely to be inlined, look at factors that make us not want to inline it.
+
+ // Calls usually take a long time, so they make the inlining gain smaller.
+ InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
+
+ // Look at the size of the callee. Each instruction counts as 5.
+ InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;
+
+ return InlineCost;
+}
+
+int InlineCostAnalyzer::getInlineBonuses(CallSite CS, Function *Callee) {
+ // Get information about the callee.
+ FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
+
+ // If we haven't calculated this information yet, do so now.
+ if (CalleeFI->Metrics.NumBlocks == 0)
+ CalleeFI->analyzeFunction(Callee);
+
+ bool isDirectCall = CS.getCalledFunction() == Callee;
+ Instruction *TheCall = CS.getInstruction();
+ int Bonus = 0;
+
// If there is only one call of the function, and it has internal linkage,
// make it almost guaranteed to be inlined.
//
if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
- InlineCost += InlineConstants::LastCallToStaticBonus;
-
- // If this function uses the coldcc calling convention, prefer not to inline
- // it.
- if (Callee->getCallingConv() == CallingConv::Cold)
- InlineCost += InlineConstants::ColdccPenalty;
+ Bonus += InlineConstants::LastCallToStaticBonus;
// If the instruction after the call, or if the normal destination of the
// invoke is an unreachable instruction, the function is noreturn. As such,
// there is little point in inlining this.
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
if (isa<UnreachableInst>(II->getNormalDest()->begin()))
- InlineCost += InlineConstants::NoreturnPenalty;
+ Bonus += InlineConstants::NoreturnPenalty;
} else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
- InlineCost += InlineConstants::NoreturnPenalty;
+ Bonus += InlineConstants::NoreturnPenalty;
+
+ // If this function uses the coldcc calling convention, prefer not to inline
+ // it.
+ if (Callee->getCallingConv() == CallingConv::Cold)
+ Bonus += InlineConstants::ColdccPenalty;
+ // Add to the inline quality for properties that make the call valuable to
+ // inline. This includes factors that indicate that the result of inlining
+ // the function will be optimizable. Currently this just looks at arguments
+ // passed into the function.
+ //
+ CallSite::arg_iterator I = CS.arg_begin();
+ for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
+ FI != FE; ++I, ++FI)
+ // Compute any constant bonus due to inlining we want to give here.
+ if (isa<Constant>(I))
+ Bonus += CountBonusForConstant(FI, cast<Constant>(I));
+
+ return Bonus;
+}
+
+// getInlineCost - The heuristic used to determine if we should inline the
+// function call or not.
+//
+InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
+ SmallPtrSet<const Function*, 16> &NeverInline) {
+ return getInlineCost(CS, CS.getCalledFunction(), NeverInline);
+}
+
+InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
+ Function *Callee,
+ SmallPtrSet<const Function*, 16> &NeverInline) {
+ Instruction *TheCall = CS.getInstruction();
+ Function *Caller = TheCall->getParent()->getParent();
+
+ // Don't inline functions which can be redefined at link-time to mean
+ // something else. Don't inline functions marked noinline or call sites
+ // marked noinline.
+ if (Callee->mayBeOverridden() ||
+ Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee) ||
+ CS.isNoInline())
+ return llvm::InlineCost::getNever();
+
// Get information about the callee.
FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
@@ -353,46 +520,45 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
return InlineCost::getNever();
}
- // Add to the inline quality for properties that make the call valuable to
- // inline. This includes factors that indicate that the result of inlining
- // the function will be optimizable. Currently this just looks at arguments
- // passed into the function.
+ // InlineCost - This value measures how good of an inline candidate this call
+ // site is to inline. A lower inline cost make is more likely for the call to
+ // be inlined. This value may go negative due to the fact that bonuses
+ // are negative numbers.
//
- unsigned ArgNo = 0;
- for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
- I != E; ++I, ++ArgNo) {
- // Each argument passed in has a cost at both the caller and the callee
- // sides. Measurements show that each argument costs about the same as an
- // instruction.
- InlineCost -= InlineConstants::InstrCost;
+ int InlineCost = getInlineSize(CS, Callee) + getInlineBonuses(CS, Callee);
+ return llvm::InlineCost::get(InlineCost);
+}
- // If an alloca is passed in, inlining this function is likely to allow
- // significant future optimization possibilities (like scalar promotion, and
- // scalarization), so encourage the inlining of the function.
- //
- if (isa<AllocaInst>(I)) {
- if (ArgNo < CalleeFI->ArgumentWeights.size())
- InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;
-
- // If this is a constant being passed into the function, use the argument
- // weights calculated for the callee to determine how much will be folded
- // away with this information.
- } else if (isa<Constant>(I)) {
- if (ArgNo < CalleeFI->ArgumentWeights.size())
- InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
- }
- }
+// getSpecializationCost - The heuristic used to determine the code-size
+// impact of creating a specialized version of Callee with argument
+// SpecializedArgNo replaced by a constant.
+InlineCost InlineCostAnalyzer::getSpecializationCost(Function *Callee,
+ SmallVectorImpl<unsigned> &SpecializedArgNos)
+{
+ // Don't specialize functions which can be redefined at link-time to mean
+ // something else.
+ if (Callee->mayBeOverridden())
+ return llvm::InlineCost::getNever();
- // Now that we have considered all of the factors that make the call site more
- // likely to be inlined, look at factors that make us not want to inline it.
+ // Get information about the callee.
+ FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
+
+ // If we haven't calculated this information yet, do so now.
+ if (CalleeFI->Metrics.NumBlocks == 0)
+ CalleeFI->analyzeFunction(Callee);
- // Calls usually take a long time, so they make the inlining gain smaller.
- InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
+ int Cost = 0;
+
+ // Look at the orginal size of the callee. Each instruction counts as 5.
+ Cost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;
- // Look at the size of the callee. Each instruction counts as 5.
- InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;
+ // Offset that with the amount of code that can be constant-folded
+ // away with the given arguments replaced by constants.
+ for (SmallVectorImpl<unsigned>::iterator an = SpecializedArgNos.begin(),
+ ae = SpecializedArgNos.end(); an != ae; ++an)
+ Cost -= CalleeFI->ArgumentWeights[*an].ConstantWeight;
- return llvm::InlineCost::get(InlineCost);
+ return llvm::InlineCost::get(Cost);
}
// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
diff --git a/contrib/llvm/lib/Analysis/InstCount.cpp b/contrib/llvm/lib/Analysis/InstCount.cpp
index dcbcac0..3b385d2 100644
--- a/contrib/llvm/lib/Analysis/InstCount.cpp
+++ b/contrib/llvm/lib/Analysis/InstCount.cpp
@@ -51,7 +51,9 @@ namespace {
}
public:
static char ID; // Pass identification, replacement for typeid
- InstCount() : FunctionPass(ID) {}
+ InstCount() : FunctionPass(ID) {
+ initializeInstCountPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
@@ -65,7 +67,7 @@ namespace {
char InstCount::ID = 0;
INITIALIZE_PASS(InstCount, "instcount",
- "Counts the various types of Instructions", false, true);
+ "Counts the various types of Instructions", false, true)
FunctionPass *llvm::createInstCountPass() { return new InstCount(); }
diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
index 24cd343..a2f9862 100644
--- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -8,179 +8,1267 @@
//===----------------------------------------------------------------------===//
//
// This file implements routines for folding instructions into simpler forms
-// that do not require creating new instructions. For example, this does
-// constant folding, and can handle identities like (X&0)->0.
+// that do not require creating new instructions. This does constant folding
+// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
+// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
+// ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
+// simplified: This is usually true and assuming it simplifies the logic (if
+// they have not been simplified then results are correct but maybe suboptimal).
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "instsimplify"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Support/ValueHandle.h"
-#include "llvm/Instructions.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/PatternMatch.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Target/TargetData.h"
using namespace llvm;
using namespace llvm::PatternMatch;
+enum { RecursionLimit = 3 };
+
+STATISTIC(NumExpand, "Number of expansions");
+STATISTIC(NumFactor , "Number of factorizations");
+STATISTIC(NumReassoc, "Number of reassociations");
+
+static Value *SimplifyAndInst(Value *, Value *, const TargetData *,
+ const DominatorTree *, unsigned);
+static Value *SimplifyBinOp(unsigned, Value *, Value *, const TargetData *,
+ const DominatorTree *, unsigned);
+static Value *SimplifyCmpInst(unsigned, Value *, Value *, const TargetData *,
+ const DominatorTree *, unsigned);
+static Value *SimplifyOrInst(Value *, Value *, const TargetData *,
+ const DominatorTree *, unsigned);
+static Value *SimplifyXorInst(Value *, Value *, const TargetData *,
+ const DominatorTree *, unsigned);
+
+/// ValueDominatesPHI - Does the given value dominate the specified phi node?
+static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
+ Instruction *I = dyn_cast<Instruction>(V);
+ if (!I)
+ // Arguments and constants dominate all instructions.
+ return true;
+
+ // If we have a DominatorTree then do a precise test.
+ if (DT)
+ return DT->dominates(I, P);
+
+ // Otherwise, if the instruction is in the entry block, and is not an invoke,
+ // then it obviously dominates all phi nodes.
+ if (I->getParent() == &I->getParent()->getParent()->getEntryBlock() &&
+ !isa<InvokeInst>(I))
+ return true;
+
+ return false;
+}
+
+/// ExpandBinOp - Simplify "A op (B op' C)" by distributing op over op', turning
+/// it into "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is
+/// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS.
+/// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
+/// Returns the simplified value, or null if no simplification was performed.
+static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ unsigned OpcToExpand, const TargetData *TD,
+ const DominatorTree *DT, unsigned MaxRecurse) {
+ Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand;
+ // Recursion is always used, so bail out at once if we already hit the limit.
+ if (!MaxRecurse--)
+ return 0;
+
+ // Check whether the expression has the form "(A op' B) op C".
+ if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
+ if (Op0->getOpcode() == OpcodeToExpand) {
+ // It does! Try turning it into "(A op C) op' (B op C)".
+ Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
+ // Do "A op C" and "B op C" both simplify?
+ if (Value *L = SimplifyBinOp(Opcode, A, C, TD, DT, MaxRecurse))
+ if (Value *R = SimplifyBinOp(Opcode, B, C, TD, DT, MaxRecurse)) {
+ // They do! Return "L op' R" if it simplifies or is already available.
+ // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
+ if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
+ && L == B && R == A)) {
+ ++NumExpand;
+ return LHS;
+ }
+ // Otherwise return "L op' R" if it simplifies.
+ if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, TD, DT,
+ MaxRecurse)) {
+ ++NumExpand;
+ return V;
+ }
+ }
+ }
+
+ // Check whether the expression has the form "A op (B op' C)".
+ if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
+ if (Op1->getOpcode() == OpcodeToExpand) {
+ // It does! Try turning it into "(A op B) op' (A op C)".
+ Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
+ // Do "A op B" and "A op C" both simplify?
+ if (Value *L = SimplifyBinOp(Opcode, A, B, TD, DT, MaxRecurse))
+ if (Value *R = SimplifyBinOp(Opcode, A, C, TD, DT, MaxRecurse)) {
+ // They do! Return "L op' R" if it simplifies or is already available.
+ // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
+ if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
+ && L == C && R == B)) {
+ ++NumExpand;
+ return RHS;
+ }
+ // Otherwise return "L op' R" if it simplifies.
+ if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, TD, DT,
+ MaxRecurse)) {
+ ++NumExpand;
+ return V;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/// FactorizeBinOp - Simplify "LHS Opcode RHS" by factorizing out a common term
+/// using the operation OpCodeToExtract. For example, when Opcode is Add and
+/// OpCodeToExtract is Mul then this tries to turn "(A*B)+(A*C)" into "A*(B+C)".
+/// Returns the simplified value, or null if no simplification was performed.
+static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ unsigned OpcToExtract, const TargetData *TD,
+ const DominatorTree *DT, unsigned MaxRecurse) {
+ Instruction::BinaryOps OpcodeToExtract = (Instruction::BinaryOps)OpcToExtract;
+ // Recursion is always used, so bail out at once if we already hit the limit.
+ if (!MaxRecurse--)
+ return 0;
+
+ BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
+ BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
+
+ if (!Op0 || Op0->getOpcode() != OpcodeToExtract ||
+ !Op1 || Op1->getOpcode() != OpcodeToExtract)
+ return 0;
+
+ // The expression has the form "(A op' B) op (C op' D)".
+ Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
+ Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
+
+ // Use left distributivity, i.e. "X op' (Y op Z) = (X op' Y) op (X op' Z)".
+ // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
+ // commutative case, "(A op' B) op (C op' A)"?
+ if (A == C || (Instruction::isCommutative(OpcodeToExtract) && A == D)) {
+ Value *DD = A == C ? D : C;
+ // Form "A op' (B op DD)" if it simplifies completely.
+ // Does "B op DD" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, B, DD, TD, DT, MaxRecurse)) {
+ // It does! Return "A op' V" if it simplifies or is already available.
+ // If V equals B then "A op' V" is just the LHS. If V equals DD then
+ // "A op' V" is just the RHS.
+ if (V == B || V == DD) {
+ ++NumFactor;
+ return V == B ? LHS : RHS;
+ }
+ // Otherwise return "A op' V" if it simplifies.
+ if (Value *W = SimplifyBinOp(OpcodeToExtract, A, V, TD, DT, MaxRecurse)) {
+ ++NumFactor;
+ return W;
+ }
+ }
+ }
+
+ // Use right distributivity, i.e. "(X op Y) op' Z = (X op' Z) op (Y op' Z)".
+ // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
+ // commutative case, "(A op' B) op (B op' D)"?
+ if (B == D || (Instruction::isCommutative(OpcodeToExtract) && B == C)) {
+ Value *CC = B == D ? C : D;
+ // Form "(A op CC) op' B" if it simplifies completely..
+ // Does "A op CC" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, A, CC, TD, DT, MaxRecurse)) {
+ // It does! Return "V op' B" if it simplifies or is already available.
+ // If V equals A then "V op' B" is just the LHS. If V equals CC then
+ // "V op' B" is just the RHS.
+ if (V == A || V == CC) {
+ ++NumFactor;
+ return V == A ? LHS : RHS;
+ }
+ // Otherwise return "V op' B" if it simplifies.
+ if (Value *W = SimplifyBinOp(OpcodeToExtract, V, B, TD, DT, MaxRecurse)) {
+ ++NumFactor;
+ return W;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/// SimplifyAssociativeBinOp - Generic simplifications for associative binary
+/// operations. Returns the simpler value, or null if none was found.
+static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
+ const TargetData *TD,
+ const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ Instruction::BinaryOps Opcode = (Instruction::BinaryOps)Opc;
+ assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
+
+ // Recursion is always used, so bail out at once if we already hit the limit.
+ if (!MaxRecurse--)
+ return 0;
+
+ BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
+ BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
+
+ // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
+ if (Op0 && Op0->getOpcode() == Opcode) {
+ Value *A = Op0->getOperand(0);
+ Value *B = Op0->getOperand(1);
+ Value *C = RHS;
+
+ // Does "B op C" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, B, C, TD, DT, MaxRecurse)) {
+ // It does! Return "A op V" if it simplifies or is already available.
+ // If V equals B then "A op V" is just the LHS.
+ if (V == B) return LHS;
+ // Otherwise return "A op V" if it simplifies.
+ if (Value *W = SimplifyBinOp(Opcode, A, V, TD, DT, MaxRecurse)) {
+ ++NumReassoc;
+ return W;
+ }
+ }
+ }
+
+ // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
+ if (Op1 && Op1->getOpcode() == Opcode) {
+ Value *A = LHS;
+ Value *B = Op1->getOperand(0);
+ Value *C = Op1->getOperand(1);
+
+ // Does "A op B" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, A, B, TD, DT, MaxRecurse)) {
+ // It does! Return "V op C" if it simplifies or is already available.
+ // If V equals B then "V op C" is just the RHS.
+ if (V == B) return RHS;
+ // Otherwise return "V op C" if it simplifies.
+ if (Value *W = SimplifyBinOp(Opcode, V, C, TD, DT, MaxRecurse)) {
+ ++NumReassoc;
+ return W;
+ }
+ }
+ }
+
+ // The remaining transforms require commutativity as well as associativity.
+ if (!Instruction::isCommutative(Opcode))
+ return 0;
+
+ // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
+ if (Op0 && Op0->getOpcode() == Opcode) {
+ Value *A = Op0->getOperand(0);
+ Value *B = Op0->getOperand(1);
+ Value *C = RHS;
+
+ // Does "C op A" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, C, A, TD, DT, MaxRecurse)) {
+ // It does! Return "V op B" if it simplifies or is already available.
+ // If V equals A then "V op B" is just the LHS.
+ if (V == A) return LHS;
+ // Otherwise return "V op B" if it simplifies.
+ if (Value *W = SimplifyBinOp(Opcode, V, B, TD, DT, MaxRecurse)) {
+ ++NumReassoc;
+ return W;
+ }
+ }
+ }
+
+ // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
+ if (Op1 && Op1->getOpcode() == Opcode) {
+ Value *A = LHS;
+ Value *B = Op1->getOperand(0);
+ Value *C = Op1->getOperand(1);
+
+ // Does "C op A" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, C, A, TD, DT, MaxRecurse)) {
+ // It does! Return "B op V" if it simplifies or is already available.
+ // If V equals C then "B op V" is just the RHS.
+ if (V == C) return RHS;
+ // Otherwise return "B op V" if it simplifies.
+ if (Value *W = SimplifyBinOp(Opcode, B, V, TD, DT, MaxRecurse)) {
+ ++NumReassoc;
+ return W;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/// ThreadBinOpOverSelect - In the case of a binary operation with a select
+/// instruction as an operand, try to simplify the binop by seeing whether
+/// evaluating it on both branches of the select results in the same value.
+/// Returns the common value if so, otherwise returns null.
+static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
+ const TargetData *TD,
+ const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ // Recursion is always used, so bail out at once if we already hit the limit.
+ if (!MaxRecurse--)
+ return 0;
+
+ SelectInst *SI;
+ if (isa<SelectInst>(LHS)) {
+ SI = cast<SelectInst>(LHS);
+ } else {
+ assert(isa<SelectInst>(RHS) && "No select instruction operand!");
+ SI = cast<SelectInst>(RHS);
+ }
+
+ // Evaluate the BinOp on the true and false branches of the select.
+ Value *TV;
+ Value *FV;
+ if (SI == LHS) {
+ TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, TD, DT, MaxRecurse);
+ FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, TD, DT, MaxRecurse);
+ } else {
+ TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), TD, DT, MaxRecurse);
+ FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), TD, DT, MaxRecurse);
+ }
+
+ // If they simplified to the same value, then return the common value.
+ // If they both failed to simplify then return null.
+ if (TV == FV)
+ return TV;
+
+ // If one branch simplified to undef, return the other one.
+ if (TV && isa<UndefValue>(TV))
+ return FV;
+ if (FV && isa<UndefValue>(FV))
+ return TV;
+
+ // If applying the operation did not change the true and false select values,
+ // then the result of the binop is the select itself.
+ if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
+ return SI;
+
+ // If one branch simplified and the other did not, and the simplified
+ // value is equal to the unsimplified one, return the simplified value.
+ // For example, select (cond, X, X & Z) & Z -> X & Z.
+ if ((FV && !TV) || (TV && !FV)) {
+ // Check that the simplified value has the form "X op Y" where "op" is the
+ // same as the original operation.
+ Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
+ if (Simplified && Simplified->getOpcode() == Opcode) {
+ // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
+ // We already know that "op" is the same as for the simplified value. See
+ // if the operands match too. If so, return the simplified value.
+ Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
+ Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
+ Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
+ if (Simplified->getOperand(0) == UnsimplifiedLHS &&
+ Simplified->getOperand(1) == UnsimplifiedRHS)
+ return Simplified;
+ if (Simplified->isCommutative() &&
+ Simplified->getOperand(1) == UnsimplifiedLHS &&
+ Simplified->getOperand(0) == UnsimplifiedRHS)
+ return Simplified;
+ }
+ }
+
+ return 0;
+}
+
+/// ThreadCmpOverSelect - In the case of a comparison with a select instruction,
+/// try to simplify the comparison by seeing whether both branches of the select
+/// result in the same value. Returns the common value if so, otherwise returns
+/// null.
+static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
+ Value *RHS, const TargetData *TD,
+ const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ // Recursion is always used, so bail out at once if we already hit the limit.
+ if (!MaxRecurse--)
+ return 0;
+
+ // Make sure the select is on the LHS.
+ if (!isa<SelectInst>(LHS)) {
+ std::swap(LHS, RHS);
+ Pred = CmpInst::getSwappedPredicate(Pred);
+ }
+ assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
+ SelectInst *SI = cast<SelectInst>(LHS);
+
+ // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
+ // Does "cmp TV, RHS" simplify?
+ if (Value *TCmp = SimplifyCmpInst(Pred, SI->getTrueValue(), RHS, TD, DT,
+ MaxRecurse)) {
+ // It does! Does "cmp FV, RHS" simplify?
+ if (Value *FCmp = SimplifyCmpInst(Pred, SI->getFalseValue(), RHS, TD, DT,
+ MaxRecurse)) {
+ // It does! If they simplified to the same value, then use it as the
+ // result of the original comparison.
+ if (TCmp == FCmp)
+ return TCmp;
+ Value *Cond = SI->getCondition();
+ // If the false value simplified to false, then the result of the compare
+ // is equal to "Cond && TCmp". This also catches the case when the false
+ // value simplified to false and the true value to true, returning "Cond".
+ if (match(FCmp, m_Zero()))
+ if (Value *V = SimplifyAndInst(Cond, TCmp, TD, DT, MaxRecurse))
+ return V;
+ // If the true value simplified to true, then the result of the compare
+ // is equal to "Cond || FCmp".
+ if (match(TCmp, m_One()))
+ if (Value *V = SimplifyOrInst(Cond, FCmp, TD, DT, MaxRecurse))
+ return V;
+ // Finally, if the false value simplified to true and the true value to
+ // false, then the result of the compare is equal to "!Cond".
+ if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
+ if (Value *V =
+ SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
+ TD, DT, MaxRecurse))
+ return V;
+ }
+ }
+
+ return 0;
+}
+
+/// ThreadBinOpOverPHI - In the case of a binary operation with an operand that
+/// is a PHI instruction, try to simplify the binop by seeing whether evaluating
+/// it on the incoming phi values yields the same result for every value. If so
+/// returns the common value, otherwise returns null.
+static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ // Recursion is always used, so bail out at once if we already hit the limit.
+ if (!MaxRecurse--)
+ return 0;
+
+ PHINode *PI;
+ if (isa<PHINode>(LHS)) {
+ PI = cast<PHINode>(LHS);
+ // Bail out if RHS and the phi may be mutually interdependent due to a loop.
+ if (!ValueDominatesPHI(RHS, PI, DT))
+ return 0;
+ } else {
+ assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
+ PI = cast<PHINode>(RHS);
+ // Bail out if LHS and the phi may be mutually interdependent due to a loop.
+ if (!ValueDominatesPHI(LHS, PI, DT))
+ return 0;
+ }
+
+ // Evaluate the BinOp on the incoming phi values.
+ Value *CommonValue = 0;
+ for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
+ Value *Incoming = PI->getIncomingValue(i);
+ // If the incoming value is the phi node itself, it can safely be skipped.
+ if (Incoming == PI) continue;
+ Value *V = PI == LHS ?
+ SimplifyBinOp(Opcode, Incoming, RHS, TD, DT, MaxRecurse) :
+ SimplifyBinOp(Opcode, LHS, Incoming, TD, DT, MaxRecurse);
+ // If the operation failed to simplify, or simplified to a different value
+ // to previously, then give up.
+ if (!V || (CommonValue && V != CommonValue))
+ return 0;
+ CommonValue = V;
+ }
+
+ return CommonValue;
+}
+
+/// ThreadCmpOverPHI - In the case of a comparison with a PHI instruction, try
+/// try to simplify the comparison by seeing whether comparing with all of the
+/// incoming phi values yields the same result every time. If so returns the
+/// common result, otherwise returns null.
+static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ // Recursion is always used, so bail out at once if we already hit the limit.
+ if (!MaxRecurse--)
+ return 0;
+
+ // Make sure the phi is on the LHS.
+ if (!isa<PHINode>(LHS)) {
+ std::swap(LHS, RHS);
+ Pred = CmpInst::getSwappedPredicate(Pred);
+ }
+ assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
+ PHINode *PI = cast<PHINode>(LHS);
+
+ // Bail out if RHS and the phi may be mutually interdependent due to a loop.
+ if (!ValueDominatesPHI(RHS, PI, DT))
+ return 0;
+
+ // Evaluate the BinOp on the incoming phi values.
+ Value *CommonValue = 0;
+ for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
+ Value *Incoming = PI->getIncomingValue(i);
+ // If the incoming value is the phi node itself, it can safely be skipped.
+ if (Incoming == PI) continue;
+ Value *V = SimplifyCmpInst(Pred, Incoming, RHS, TD, DT, MaxRecurse);
+ // If the operation failed to simplify, or simplified to a different value
+ // to previously, then give up.
+ if (!V || (CommonValue && V != CommonValue))
+ return 0;
+ CommonValue = V;
+ }
+
+ return CommonValue;
+}
+
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD) {
+static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(),
Ops, 2, TD);
}
-
+
+ // Canonicalize the constant to the RHS.
+ std::swap(Op0, Op1);
+ }
+
+ // X + undef -> undef
+ if (match(Op1, m_Undef()))
+ return Op1;
+
+ // X + 0 -> X
+ if (match(Op1, m_Zero()))
+ return Op0;
+
+ // X + (Y - X) -> Y
+ // (Y - X) + X -> Y
+ // Eg: X + -X -> 0
+ Value *Y = 0;
+ if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
+ match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
+ return Y;
+
+ // X + ~X -> -1 since ~X = -X-1
+ if (match(Op0, m_Not(m_Specific(Op1))) ||
+ match(Op1, m_Not(m_Specific(Op0))))
+ return Constant::getAllOnesValue(Op0->getType());
+
+ /// i1 add -> xor.
+ if (MaxRecurse && Op0->getType()->isIntegerTy(1))
+ if (Value *V = SimplifyXorInst(Op0, Op1, TD, DT, MaxRecurse-1))
+ return V;
+
+ // Try some generic simplifications for associative operations.
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // Mul distributes over Add. Try some generic simplifications based on this.
+ if (Value *V = FactorizeBinOp(Instruction::Add, Op0, Op1, Instruction::Mul,
+ TD, DT, MaxRecurse))
+ return V;
+
+ // Threading Add over selects and phi nodes is pointless, so don't bother.
+ // Threading over the select in "A + select(cond, B, C)" means evaluating
+ // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
+ // only if B and C are equal. If B and C are equal then (since we assume
+ // that operands have already been simplified) "select(cond, B, C)" should
+ // have been simplified to the common value of B and C already. Analysing
+ // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
+ // for threading over phi nodes.
+
+ return 0;
+}
+
+Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+ const TargetData *TD, const DominatorTree *DT) {
+ return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, TD, DT, RecursionLimit);
+}
+
+/// SimplifySubInst - Given operands for a Sub, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ if (Constant *CLHS = dyn_cast<Constant>(Op0))
+ if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
+ Constant *Ops[] = { CLHS, CRHS };
+ return ConstantFoldInstOperands(Instruction::Sub, CLHS->getType(),
+ Ops, 2, TD);
+ }
+
+ // X - undef -> undef
+ // undef - X -> undef
+ if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
+ return UndefValue::get(Op0->getType());
+
+ // X - 0 -> X
+ if (match(Op1, m_Zero()))
+ return Op0;
+
+ // X - X -> 0
+ if (Op0 == Op1)
+ return Constant::getNullValue(Op0->getType());
+
+ // (X*2) - X -> X
+ // (X<<1) - X -> X
+ Value *X = 0;
+ if (match(Op0, m_Mul(m_Specific(Op1), m_ConstantInt<2>())) ||
+ match(Op0, m_Shl(m_Specific(Op1), m_One())))
+ return Op1;
+
+ // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
+ // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
+ Value *Y = 0, *Z = Op1;
+ if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
+ // See if "V === Y - Z" simplifies.
+ if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, TD, DT, MaxRecurse-1))
+ // It does! Now see if "X + V" simplifies.
+ if (Value *W = SimplifyBinOp(Instruction::Add, X, V, TD, DT,
+ MaxRecurse-1)) {
+ // It does, we successfully reassociated!
+ ++NumReassoc;
+ return W;
+ }
+ // See if "V === X - Z" simplifies.
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, TD, DT, MaxRecurse-1))
+ // It does! Now see if "Y + V" simplifies.
+ if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, TD, DT,
+ MaxRecurse-1)) {
+ // It does, we successfully reassociated!
+ ++NumReassoc;
+ return W;
+ }
+ }
+
+ // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
+ // For example, X - (X + 1) -> -1
+ X = Op0;
+ if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
+ // See if "V === X - Y" simplifies.
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, TD, DT, MaxRecurse-1))
+ // It does! Now see if "V - Z" simplifies.
+ if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, TD, DT,
+ MaxRecurse-1)) {
+ // It does, we successfully reassociated!
+ ++NumReassoc;
+ return W;
+ }
+ // See if "V === X - Z" simplifies.
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, TD, DT, MaxRecurse-1))
+ // It does! Now see if "V - Y" simplifies.
+ if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, TD, DT,
+ MaxRecurse-1)) {
+ // It does, we successfully reassociated!
+ ++NumReassoc;
+ return W;
+ }
+ }
+
+ // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
+ // For example, X - (X - Y) -> Y.
+ Z = Op0;
+ if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
+ // See if "V === Z - X" simplifies.
+ if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, TD, DT, MaxRecurse-1))
+ // It does! Now see if "V + Y" simplifies.
+ if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, TD, DT,
+ MaxRecurse-1)) {
+ // It does, we successfully reassociated!
+ ++NumReassoc;
+ return W;
+ }
+
+ // Mul distributes over Sub. Try some generic simplifications based on this.
+ if (Value *V = FactorizeBinOp(Instruction::Sub, Op0, Op1, Instruction::Mul,
+ TD, DT, MaxRecurse))
+ return V;
+
+ // i1 sub -> xor.
+ if (MaxRecurse && Op0->getType()->isIntegerTy(1))
+ if (Value *V = SimplifyXorInst(Op0, Op1, TD, DT, MaxRecurse-1))
+ return V;
+
+ // Threading Sub over selects and phi nodes is pointless, so don't bother.
+ // Threading over the select in "A - select(cond, B, C)" means evaluating
+ // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
+ // only if B and C are equal. If B and C are equal then (since we assume
+ // that operands have already been simplified) "select(cond, B, C)" should
+ // have been simplified to the common value of B and C already. Analysing
+ // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
+ // for threading over phi nodes.
+
+ return 0;
+}
+
+Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+ const TargetData *TD, const DominatorTree *DT) {
+ return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, TD, DT, RecursionLimit);
+}
+
+/// SimplifyMulInst - Given operands for a Mul, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT, unsigned MaxRecurse) {
+ if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
+ if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
+ Constant *Ops[] = { CLHS, CRHS };
+ return ConstantFoldInstOperands(Instruction::Mul, CLHS->getType(),
+ Ops, 2, TD);
+ }
+
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
-
- if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
- // X + undef -> undef
- if (isa<UndefValue>(Op1C))
- return Op1C;
-
- // X + 0 --> X
- if (Op1C->isNullValue())
- return Op0;
- }
-
- // FIXME: Could pull several more out of instcombine.
+
+ // X * undef -> 0
+ if (match(Op1, m_Undef()))
+ return Constant::getNullValue(Op0->getType());
+
+ // X * 0 -> 0
+ if (match(Op1, m_Zero()))
+ return Op1;
+
+ // X * 1 -> X
+ if (match(Op1, m_One()))
+ return Op0;
+
+ // (X / Y) * Y -> X if the division is exact.
+ Value *X = 0, *Y = 0;
+ if ((match(Op0, m_IDiv(m_Value(X), m_Value(Y))) && Y == Op1) || // (X / Y) * Y
+ (match(Op1, m_IDiv(m_Value(X), m_Value(Y))) && Y == Op0)) { // Y * (X / Y)
+ BinaryOperator *Div = cast<BinaryOperator>(Y == Op1 ? Op0 : Op1);
+ if (Div->isExact())
+ return X;
+ }
+
+ // i1 mul -> and.
+ if (MaxRecurse && Op0->getType()->isIntegerTy(1))
+ if (Value *V = SimplifyAndInst(Op0, Op1, TD, DT, MaxRecurse-1))
+ return V;
+
+ // Try some generic simplifications for associative operations.
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // Mul distributes over Add. Try some generic simplifications based on this.
+ if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
+ TD, DT, MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a select instruction, check whether
+ // operating on either branch of the select always yields the same value.
+ if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
+ if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a phi instruction, check whether
+ // operating on all incoming values of the phi always yields the same value.
+ if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
+ if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
+ return 0;
+}
+
+Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT) {
+ return ::SimplifyMulInst(Op0, Op1, TD, DT, RecursionLimit);
+}
+
+/// SimplifyDiv - Given operands for an SDiv or UDiv, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ if (Constant *C0 = dyn_cast<Constant>(Op0)) {
+ if (Constant *C1 = dyn_cast<Constant>(Op1)) {
+ Constant *Ops[] = { C0, C1 };
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, 2, TD);
+ }
+ }
+
+ bool isSigned = Opcode == Instruction::SDiv;
+
+ // X / undef -> undef
+ if (match(Op1, m_Undef()))
+ return Op1;
+
+ // undef / X -> 0
+ if (match(Op0, m_Undef()))
+ return Constant::getNullValue(Op0->getType());
+
+ // 0 / X -> 0, we don't need to preserve faults!
+ if (match(Op0, m_Zero()))
+ return Op0;
+
+ // X / 1 -> X
+ if (match(Op1, m_One()))
+ return Op0;
+
+ if (Op0->getType()->isIntegerTy(1))
+ // It can't be division by zero, hence it must be division by one.
+ return Op0;
+
+ // X / X -> 1
+ if (Op0 == Op1)
+ return ConstantInt::get(Op0->getType(), 1);
+
+ // (X * Y) / Y -> X if the multiplication does not overflow.
+ Value *X = 0, *Y = 0;
+ if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
+ if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
+ BinaryOperator *Mul = cast<BinaryOperator>(Op0);
+ // If the Mul knows it does not overflow, then we are good to go.
+ if ((isSigned && Mul->hasNoSignedWrap()) ||
+ (!isSigned && Mul->hasNoUnsignedWrap()))
+ return X;
+ // If X has the form X = A / Y then X * Y cannot overflow.
+ if (BinaryOperator *Div = dyn_cast<BinaryOperator>(X))
+ if (Div->getOpcode() == Opcode && Div->getOperand(1) == Y)
+ return X;
+ }
+
+ // (X rem Y) / Y -> 0
+ if ((isSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
+ (!isSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
+ return Constant::getNullValue(Op0->getType());
+
+ // If the operation is with the result of a select instruction, check whether
+ // operating on either branch of the select always yields the same value.
+ if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a phi instruction, check whether
+ // operating on all incoming values of the phi always yields the same value.
+ if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ return V;
+
+ return 0;
+}
+
+/// SimplifySDivInst - Given operands for an SDiv, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT, unsigned MaxRecurse) {
+ if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, TD, DT, MaxRecurse))
+ return V;
+
+ return 0;
+}
+
+Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT) {
+ return ::SimplifySDivInst(Op0, Op1, TD, DT, RecursionLimit);
+}
+
+/// SimplifyUDivInst - Given operands for a UDiv, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT, unsigned MaxRecurse) {
+ if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, TD, DT, MaxRecurse))
+ return V;
+
+ return 0;
+}
+
+Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT) {
+ return ::SimplifyUDivInst(Op0, Op1, TD, DT, RecursionLimit);
+}
+
+static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *,
+ const DominatorTree *, unsigned) {
+ // undef / X -> undef (the undef could be a snan).
+ if (match(Op0, m_Undef()))
+ return Op0;
+
+ // X / undef -> undef
+ if (match(Op1, m_Undef()))
+ return Op1;
+
+ return 0;
+}
+
+Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT) {
+ return ::SimplifyFDivInst(Op0, Op1, TD, DT, RecursionLimit);
+}
+
+/// SimplifyShift - Given operands for an Shl, LShr or AShr, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ if (Constant *C0 = dyn_cast<Constant>(Op0)) {
+ if (Constant *C1 = dyn_cast<Constant>(Op1)) {
+ Constant *Ops[] = { C0, C1 };
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, 2, TD);
+ }
+ }
+
+ // 0 shift by X -> 0
+ if (match(Op0, m_Zero()))
+ return Op0;
+
+ // X shift by 0 -> X
+ if (match(Op1, m_Zero()))
+ return Op0;
+
+ // X shift by undef -> undef because it may shift by the bitwidth.
+ if (match(Op1, m_Undef()))
+ return Op1;
+
+ // Shifting by the bitwidth or more is undefined.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1))
+ if (CI->getValue().getLimitedValue() >=
+ Op0->getType()->getScalarSizeInBits())
+ return UndefValue::get(Op0->getType());
+
+ // If the operation is with the result of a select instruction, check whether
+ // operating on either branch of the select always yields the same value.
+ if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a phi instruction, check whether
+ // operating on all incoming values of the phi always yields the same value.
+ if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ return V;
+
+ return 0;
+}
+
+/// SimplifyShlInst - Given operands for an Shl, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, TD, DT, MaxRecurse))
+ return V;
+
+ // undef << X -> 0
+ if (match(Op0, m_Undef()))
+ return Constant::getNullValue(Op0->getType());
+
+ // (X >> A) << A -> X
+ Value *X;
+ if (match(Op0, m_Shr(m_Value(X), m_Specific(Op1))) &&
+ cast<PossiblyExactOperator>(Op0)->isExact())
+ return X;
+ return 0;
+}
+
+Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+ const TargetData *TD, const DominatorTree *DT) {
+ return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, TD, DT, RecursionLimit);
+}
+
+/// SimplifyLShrInst - Given operands for an LShr, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ if (Value *V = SimplifyShift(Instruction::LShr, Op0, Op1, TD, DT, MaxRecurse))
+ return V;
+
+ // undef >>l X -> 0
+ if (match(Op0, m_Undef()))
+ return Constant::getNullValue(Op0->getType());
+
+ // (X << A) >> A -> X
+ Value *X;
+ if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1))) &&
+ cast<OverflowingBinaryOperator>(Op0)->hasNoUnsignedWrap())
+ return X;
+
return 0;
}
+Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
+ const TargetData *TD, const DominatorTree *DT) {
+ return ::SimplifyLShrInst(Op0, Op1, isExact, TD, DT, RecursionLimit);
+}
+
+/// SimplifyAShrInst - Given operands for an AShr, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ if (Value *V = SimplifyShift(Instruction::AShr, Op0, Op1, TD, DT, MaxRecurse))
+ return V;
+
+ // all ones >>a X -> all ones
+ if (match(Op0, m_AllOnes()))
+ return Op0;
+
+ // undef >>a X -> all ones
+ if (match(Op0, m_Undef()))
+ return Constant::getAllOnesValue(Op0->getType());
+
+ // (X << A) >> A -> X
+ Value *X;
+ if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1))) &&
+ cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap())
+ return X;
+
+ return 0;
+}
+
+Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
+ const TargetData *TD, const DominatorTree *DT) {
+ return ::SimplifyAShrInst(Op0, Op1, isExact, TD, DT, RecursionLimit);
+}
+
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD) {
+static Value *SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::And, CLHS->getType(),
Ops, 2, TD);
}
-
+
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
-
+
// X & undef -> 0
- if (isa<UndefValue>(Op1))
+ if (match(Op1, m_Undef()))
return Constant::getNullValue(Op0->getType());
-
+
// X & X = X
if (Op0 == Op1)
return Op0;
-
- // X & <0,0> = <0,0>
- if (isa<ConstantAggregateZero>(Op1))
+
+ // X & 0 = 0
+ if (match(Op1, m_Zero()))
return Op1;
-
- // X & <-1,-1> = X
- if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1))
- if (CP->isAllOnesValue())
- return Op0;
-
- if (ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1)) {
- // X & 0 = 0
- if (Op1CI->isZero())
- return Op1CI;
- // X & -1 = X
- if (Op1CI->isAllOnesValue())
- return Op0;
- }
-
+
+ // X & -1 = X
+ if (match(Op1, m_AllOnes()))
+ return Op0;
+
// A & ~A = ~A & A = 0
- Value *A, *B;
- if ((match(Op0, m_Not(m_Value(A))) && A == Op1) ||
- (match(Op1, m_Not(m_Value(A))) && A == Op0))
+ if (match(Op0, m_Not(m_Specific(Op1))) ||
+ match(Op1, m_Not(m_Specific(Op0))))
return Constant::getNullValue(Op0->getType());
-
+
// (A | ?) & A = A
+ Value *A = 0, *B = 0;
if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
-
+
// A & (A | ?) = A
if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
(A == Op0 || B == Op0))
return Op0;
-
+
+ // Try some generic simplifications for associative operations.
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // And distributes over Or. Try some generic simplifications based on this.
+ if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
+ TD, DT, MaxRecurse))
+ return V;
+
+ // And distributes over Xor. Try some generic simplifications based on this.
+ if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
+ TD, DT, MaxRecurse))
+ return V;
+
+ // Or distributes over And. Try some generic simplifications based on this.
+ if (Value *V = FactorizeBinOp(Instruction::And, Op0, Op1, Instruction::Or,
+ TD, DT, MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a select instruction, check whether
+ // operating on either branch of the select always yields the same value.
+ if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
+ if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a phi instruction, check whether
+ // operating on all incoming values of the phi always yields the same value.
+ if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
+ if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
return 0;
}
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT) {
+ return ::SimplifyAndInst(Op0, Op1, TD, DT, RecursionLimit);
+}
+
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD) {
+static Value *SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(),
Ops, 2, TD);
}
-
+
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
-
+
// X | undef -> -1
- if (isa<UndefValue>(Op1))
+ if (match(Op1, m_Undef()))
return Constant::getAllOnesValue(Op0->getType());
-
+
// X | X = X
if (Op0 == Op1)
return Op0;
- // X | <0,0> = X
- if (isa<ConstantAggregateZero>(Op1))
+ // X | 0 = X
+ if (match(Op1, m_Zero()))
return Op0;
-
- // X | <-1,-1> = <-1,-1>
- if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1))
- if (CP->isAllOnesValue())
- return Op1;
-
- if (ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1)) {
- // X | 0 = X
- if (Op1CI->isZero())
- return Op0;
- // X | -1 = -1
- if (Op1CI->isAllOnesValue())
- return Op1CI;
- }
-
+
+ // X | -1 = -1
+ if (match(Op1, m_AllOnes()))
+ return Op1;
+
// A | ~A = ~A | A = -1
- Value *A, *B;
- if ((match(Op0, m_Not(m_Value(A))) && A == Op1) ||
- (match(Op1, m_Not(m_Value(A))) && A == Op0))
+ if (match(Op0, m_Not(m_Specific(Op1))) ||
+ match(Op1, m_Not(m_Specific(Op0))))
return Constant::getAllOnesValue(Op0->getType());
-
+
// (A & ?) | A = A
+ Value *A = 0, *B = 0;
if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
-
+
// A | (A & ?) = A
if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
(A == Op0 || B == Op0))
return Op0;
-
+
+ // Try some generic simplifications for associative operations.
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // Or distributes over And. Try some generic simplifications based on this.
+ if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And,
+ TD, DT, MaxRecurse))
+ return V;
+
+ // And distributes over Or. Try some generic simplifications based on this.
+ if (Value *V = FactorizeBinOp(Instruction::Or, Op0, Op1, Instruction::And,
+ TD, DT, MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a select instruction, check whether
+ // operating on either branch of the select always yields the same value.
+ if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
+ if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a phi instruction, check whether
+ // operating on all incoming values of the phi always yields the same value.
+ if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
+ if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
return 0;
}
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT) {
+ return ::SimplifyOrInst(Op0, Op1, TD, DT, RecursionLimit);
+}
+
+/// SimplifyXorInst - Given operands for a Xor, see if we can
+/// fold the result. If not, this returns null.
+static Value *SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT, unsigned MaxRecurse) {
+ if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
+ if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
+ Constant *Ops[] = { CLHS, CRHS };
+ return ConstantFoldInstOperands(Instruction::Xor, CLHS->getType(),
+ Ops, 2, TD);
+ }
+
+ // Canonicalize the constant to the RHS.
+ std::swap(Op0, Op1);
+ }
+
+ // A ^ undef -> undef
+ if (match(Op1, m_Undef()))
+ return Op1;
+
+ // A ^ 0 = A
+ if (match(Op1, m_Zero()))
+ return Op0;
+
+ // A ^ A = 0
+ if (Op0 == Op1)
+ return Constant::getNullValue(Op0->getType());
+
+ // A ^ ~A = ~A ^ A = -1
+ if (match(Op0, m_Not(m_Specific(Op1))) ||
+ match(Op1, m_Not(m_Specific(Op0))))
+ return Constant::getAllOnesValue(Op0->getType());
+
+ // Try some generic simplifications for associative operations.
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // And distributes over Xor. Try some generic simplifications based on this.
+ if (Value *V = FactorizeBinOp(Instruction::Xor, Op0, Op1, Instruction::And,
+ TD, DT, MaxRecurse))
+ return V;
+
+ // Threading Xor over selects and phi nodes is pointless, so don't bother.
+ // Threading over the select in "A ^ select(cond, B, C)" means evaluating
+ // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
+ // only if B and C are equal. If B and C are equal then (since we assume
+ // that operands have already been simplified) "select(cond, B, C)" should
+ // have been simplified to the common value of B and C already. Analysing
+ // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
+ // for threading over phi nodes.
+
+ return 0;
+}
+
+Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const DominatorTree *DT) {
+ return ::SimplifyXorInst(Op0, Op1, TD, DT, RecursionLimit);
+}
static const Type *GetCompareTy(Value *Op) {
return CmpInst::makeCmpResultType(Op->getType());
}
-
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD) {
+static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
-
+
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD);
@@ -189,70 +1277,400 @@ Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
std::swap(LHS, RHS);
Pred = CmpInst::getSwappedPredicate(Pred);
}
-
- // ITy - This is the return type of the compare we're considering.
- const Type *ITy = GetCompareTy(LHS);
-
+
+ const Type *ITy = GetCompareTy(LHS); // The return type.
+ const Type *OpTy = LHS->getType(); // The operand type.
+
// icmp X, X -> true/false
// X icmp undef -> true/false. For example, icmp ugt %X, undef -> false
// because X could be 0.
if (LHS == RHS || isa<UndefValue>(RHS))
return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
-
- // icmp <global/alloca*/null>, <global/alloca*/null> - Global/Stack value
- // addresses never equal each other! We already know that Op0 != Op1.
- if ((isa<GlobalValue>(LHS) || isa<AllocaInst>(LHS) ||
- isa<ConstantPointerNull>(LHS)) &&
- (isa<GlobalValue>(RHS) || isa<AllocaInst>(RHS) ||
- isa<ConstantPointerNull>(RHS)))
- return ConstantInt::get(ITy, CmpInst::isFalseWhenEqual(Pred));
-
- // See if we are doing a comparison with a constant.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
- // If we have an icmp le or icmp ge instruction, turn it into the
- // appropriate icmp lt or icmp gt instruction. This allows us to rely on
- // them being folded in the code below.
+
+ // Special case logic when the operands have i1 type.
+ if (OpTy->isIntegerTy(1) || (OpTy->isVectorTy() &&
+ cast<VectorType>(OpTy)->getElementType()->isIntegerTy(1))) {
switch (Pred) {
default: break;
+ case ICmpInst::ICMP_EQ:
+ // X == 1 -> X
+ if (match(RHS, m_One()))
+ return LHS;
+ break;
+ case ICmpInst::ICMP_NE:
+ // X != 0 -> X
+ if (match(RHS, m_Zero()))
+ return LHS;
+ break;
+ case ICmpInst::ICMP_UGT:
+ // X >u 0 -> X
+ if (match(RHS, m_Zero()))
+ return LHS;
+ break;
+ case ICmpInst::ICMP_UGE:
+ // X >=u 1 -> X
+ if (match(RHS, m_One()))
+ return LHS;
+ break;
+ case ICmpInst::ICMP_SLT:
+ // X <s 0 -> X
+ if (match(RHS, m_Zero()))
+ return LHS;
+ break;
+ case ICmpInst::ICMP_SLE:
+ // X <=s -1 -> X
+ if (match(RHS, m_One()))
+ return LHS;
+ break;
+ }
+ }
+
+ // icmp <alloca*>, <global/alloca*/null> - Different stack variables have
+ // different addresses, and what's more the address of a stack variable is
+ // never null or equal to the address of a global. Note that generalizing
+ // to the case where LHS is a global variable address or null is pointless,
+ // since if both LHS and RHS are constants then we already constant folded
+ // the compare, and if only one of them is then we moved it to RHS already.
+ if (isa<AllocaInst>(LHS) && (isa<GlobalValue>(RHS) || isa<AllocaInst>(RHS) ||
+ isa<ConstantPointerNull>(RHS)))
+ // We already know that LHS != LHS.
+ return ConstantInt::get(ITy, CmpInst::isFalseWhenEqual(Pred));
+
+ // If we are comparing with zero then try hard since this is a common case.
+ if (match(RHS, m_Zero())) {
+ bool LHSKnownNonNegative, LHSKnownNegative;
+ switch (Pred) {
+ default:
+ assert(false && "Unknown ICmp predicate!");
+ case ICmpInst::ICMP_ULT:
+ return ConstantInt::getFalse(LHS->getContext());
+ case ICmpInst::ICMP_UGE:
+ return ConstantInt::getTrue(LHS->getContext());
+ case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
- if (CI->isMaxValue(false)) // A <=u MAX -> TRUE
- return ConstantInt::getTrue(CI->getContext());
+ if (isKnownNonZero(LHS, TD))
+ return ConstantInt::getFalse(LHS->getContext());
+ break;
+ case ICmpInst::ICMP_NE:
+ case ICmpInst::ICMP_UGT:
+ if (isKnownNonZero(LHS, TD))
+ return ConstantInt::getTrue(LHS->getContext());
+ break;
+ case ICmpInst::ICMP_SLT:
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
+ if (LHSKnownNegative)
+ return ConstantInt::getTrue(LHS->getContext());
+ if (LHSKnownNonNegative)
+ return ConstantInt::getFalse(LHS->getContext());
break;
case ICmpInst::ICMP_SLE:
- if (CI->isMaxValue(true)) // A <=s MAX -> TRUE
- return ConstantInt::getTrue(CI->getContext());
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
+ if (LHSKnownNegative)
+ return ConstantInt::getTrue(LHS->getContext());
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, TD))
+ return ConstantInt::getFalse(LHS->getContext());
+ break;
+ case ICmpInst::ICMP_SGE:
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
+ if (LHSKnownNegative)
+ return ConstantInt::getFalse(LHS->getContext());
+ if (LHSKnownNonNegative)
+ return ConstantInt::getTrue(LHS->getContext());
+ break;
+ case ICmpInst::ICMP_SGT:
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
+ if (LHSKnownNegative)
+ return ConstantInt::getFalse(LHS->getContext());
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, TD))
+ return ConstantInt::getTrue(LHS->getContext());
+ break;
+ }
+ }
+
+ // See if we are doing a comparison with a constant integer.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ switch (Pred) {
+ default: break;
+ case ICmpInst::ICMP_UGT:
+ if (CI->isMaxValue(false)) // A >u MAX -> FALSE
+ return ConstantInt::getFalse(CI->getContext());
break;
case ICmpInst::ICMP_UGE:
if (CI->isMinValue(false)) // A >=u MIN -> TRUE
return ConstantInt::getTrue(CI->getContext());
break;
+ case ICmpInst::ICMP_ULT:
+ if (CI->isMinValue(false)) // A <u MIN -> FALSE
+ return ConstantInt::getFalse(CI->getContext());
+ break;
+ case ICmpInst::ICMP_ULE:
+ if (CI->isMaxValue(false)) // A <=u MAX -> TRUE
+ return ConstantInt::getTrue(CI->getContext());
+ break;
+ case ICmpInst::ICMP_SGT:
+ if (CI->isMaxValue(true)) // A >s MAX -> FALSE
+ return ConstantInt::getFalse(CI->getContext());
+ break;
case ICmpInst::ICMP_SGE:
if (CI->isMinValue(true)) // A >=s MIN -> TRUE
return ConstantInt::getTrue(CI->getContext());
break;
+ case ICmpInst::ICMP_SLT:
+ if (CI->isMinValue(true)) // A <s MIN -> FALSE
+ return ConstantInt::getFalse(CI->getContext());
+ break;
+ case ICmpInst::ICMP_SLE:
+ if (CI->isMaxValue(true)) // A <=s MAX -> TRUE
+ return ConstantInt::getTrue(CI->getContext());
+ break;
+ }
+ }
+
+ // Compare of cast, for example (zext X) != 0 -> X != 0
+ if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
+ Instruction *LI = cast<CastInst>(LHS);
+ Value *SrcOp = LI->getOperand(0);
+ const Type *SrcTy = SrcOp->getType();
+ const Type *DstTy = LI->getType();
+
+ // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
+ // if the integer type is the same size as the pointer type.
+ if (MaxRecurse && TD && isa<PtrToIntInst>(LI) &&
+ TD->getPointerSizeInBits() == DstTy->getPrimitiveSizeInBits()) {
+ if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
+ // Transfer the cast to the constant.
+ if (Value *V = SimplifyICmpInst(Pred, SrcOp,
+ ConstantExpr::getIntToPtr(RHSC, SrcTy),
+ TD, DT, MaxRecurse-1))
+ return V;
+ } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
+ if (RI->getOperand(0)->getType() == SrcTy)
+ // Compare without the cast.
+ if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
+ TD, DT, MaxRecurse-1))
+ return V;
+ }
+ }
+
+ if (isa<ZExtInst>(LHS)) {
+ // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
+ // same type.
+ if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
+ if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
+ // Compare X and Y. Note that signed predicates become unsigned.
+ if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
+ SrcOp, RI->getOperand(0), TD, DT,
+ MaxRecurse-1))
+ return V;
+ }
+ // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
+ // too. If not, then try to deduce the result of the comparison.
+ else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ // Compute the constant that would happen if we truncated to SrcTy then
+ // reextended to DstTy.
+ Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
+ Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
+
+ // If the re-extended constant didn't change then this is effectively
+ // also a case of comparing two zero-extended values.
+ if (RExt == CI && MaxRecurse)
+ if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
+ SrcOp, Trunc, TD, DT, MaxRecurse-1))
+ return V;
+
+ // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
+ // there. Use this to work out the result of the comparison.
+ if (RExt != CI) {
+ switch (Pred) {
+ default:
+ assert(false && "Unknown ICmp predicate!");
+ // LHS <u RHS.
+ case ICmpInst::ICMP_EQ:
+ case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_UGE:
+ return ConstantInt::getFalse(CI->getContext());
+
+ case ICmpInst::ICMP_NE:
+ case ICmpInst::ICMP_ULT:
+ case ICmpInst::ICMP_ULE:
+ return ConstantInt::getTrue(CI->getContext());
+
+ // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
+ // is non-negative then LHS <s RHS.
+ case ICmpInst::ICMP_SGT:
+ case ICmpInst::ICMP_SGE:
+ return CI->getValue().isNegative() ?
+ ConstantInt::getTrue(CI->getContext()) :
+ ConstantInt::getFalse(CI->getContext());
+
+ case ICmpInst::ICMP_SLT:
+ case ICmpInst::ICMP_SLE:
+ return CI->getValue().isNegative() ?
+ ConstantInt::getFalse(CI->getContext()) :
+ ConstantInt::getTrue(CI->getContext());
+ }
+ }
+ }
+ }
+
+ if (isa<SExtInst>(LHS)) {
+ // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
+ // same type.
+ if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
+ if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
+ // Compare X and Y. Note that the predicate does not change.
+ if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
+ TD, DT, MaxRecurse-1))
+ return V;
+ }
+ // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
+ // too. If not, then try to deduce the result of the comparison.
+ else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ // Compute the constant that would happen if we truncated to SrcTy then
+ // reextended to DstTy.
+ Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
+ Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
+
+ // If the re-extended constant didn't change then this is effectively
+ // also a case of comparing two sign-extended values.
+ if (RExt == CI && MaxRecurse)
+ if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, TD, DT,
+ MaxRecurse-1))
+ return V;
+
+ // Otherwise the upper bits of LHS are all equal, while RHS has varying
+ // bits there. Use this to work out the result of the comparison.
+ if (RExt != CI) {
+ switch (Pred) {
+ default:
+ assert(false && "Unknown ICmp predicate!");
+ case ICmpInst::ICMP_EQ:
+ return ConstantInt::getFalse(CI->getContext());
+ case ICmpInst::ICMP_NE:
+ return ConstantInt::getTrue(CI->getContext());
+
+ // If RHS is non-negative then LHS <s RHS. If RHS is negative then
+ // LHS >s RHS.
+ case ICmpInst::ICMP_SGT:
+ case ICmpInst::ICMP_SGE:
+ return CI->getValue().isNegative() ?
+ ConstantInt::getTrue(CI->getContext()) :
+ ConstantInt::getFalse(CI->getContext());
+ case ICmpInst::ICMP_SLT:
+ case ICmpInst::ICMP_SLE:
+ return CI->getValue().isNegative() ?
+ ConstantInt::getFalse(CI->getContext()) :
+ ConstantInt::getTrue(CI->getContext());
+
+ // If LHS is non-negative then LHS <u RHS. If LHS is negative then
+ // LHS >u RHS.
+ case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_UGE:
+ // Comparison is true iff the LHS <s 0.
+ if (MaxRecurse)
+ if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
+ Constant::getNullValue(SrcTy),
+ TD, DT, MaxRecurse-1))
+ return V;
+ break;
+ case ICmpInst::ICMP_ULT:
+ case ICmpInst::ICMP_ULE:
+ // Comparison is true iff the LHS >=s 0.
+ if (MaxRecurse)
+ if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
+ Constant::getNullValue(SrcTy),
+ TD, DT, MaxRecurse-1))
+ return V;
+ break;
+ }
+ }
+ }
}
}
-
-
+
+ // Special logic for binary operators.
+ BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
+ BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
+ if (MaxRecurse && (LBO || RBO)) {
+ // Analyze the case when either LHS or RHS is an add instruction.
+ Value *A = 0, *B = 0, *C = 0, *D = 0;
+ // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
+ bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
+ if (LBO && LBO->getOpcode() == Instruction::Add) {
+ A = LBO->getOperand(0); B = LBO->getOperand(1);
+ NoLHSWrapProblem = ICmpInst::isEquality(Pred) ||
+ (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) ||
+ (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap());
+ }
+ if (RBO && RBO->getOpcode() == Instruction::Add) {
+ C = RBO->getOperand(0); D = RBO->getOperand(1);
+ NoRHSWrapProblem = ICmpInst::isEquality(Pred) ||
+ (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) ||
+ (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap());
+ }
+
+ // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
+ if ((A == RHS || B == RHS) && NoLHSWrapProblem)
+ if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
+ Constant::getNullValue(RHS->getType()),
+ TD, DT, MaxRecurse-1))
+ return V;
+
+ // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
+ if ((C == LHS || D == LHS) && NoRHSWrapProblem)
+ if (Value *V = SimplifyICmpInst(Pred,
+ Constant::getNullValue(LHS->getType()),
+ C == LHS ? D : C, TD, DT, MaxRecurse-1))
+ return V;
+
+ // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
+ if (A && C && (A == C || A == D || B == C || B == D) &&
+ NoLHSWrapProblem && NoRHSWrapProblem) {
+ // Determine Y and Z in the form icmp (X+Y), (X+Z).
+ Value *Y = (A == C || A == D) ? B : A;
+ Value *Z = (C == A || C == B) ? D : C;
+ if (Value *V = SimplifyICmpInst(Pred, Y, Z, TD, DT, MaxRecurse-1))
+ return V;
+ }
+ }
+
+ // If the comparison is with the result of a select instruction, check whether
+ // comparing with either branch of the select always yields the same value.
+ if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
+ if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ return V;
+
+ // If the comparison is with the result of a phi instruction, check whether
+ // doing the compare with each incoming phi value yields a common result.
+ if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
+ if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ return V;
+
return 0;
}
+Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT) {
+ return ::SimplifyICmpInst(Predicate, LHS, RHS, TD, DT, RecursionLimit);
+}
+
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD) {
+static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD);
-
+
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
Pred = CmpInst::getSwappedPredicate(Pred);
}
-
+
// Fold trivial predicates.
if (Pred == FCmpInst::FCMP_FALSE)
return ConstantInt::get(GetCompareTy(LHS), 0);
@@ -269,7 +1687,7 @@ Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (CmpInst::isFalseWhenEqual(Pred))
return ConstantInt::get(GetCompareTy(LHS), 0);
}
-
+
// Handle fcmp with constant RHS
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// If the constant is a nan, see if we can fold the comparison based on it.
@@ -310,23 +1728,40 @@ Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
}
}
-
+
+ // If the comparison is with the result of a select instruction, check whether
+ // comparing with either branch of the select always yields the same value.
+ if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
+ if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ return V;
+
+ // If the comparison is with the result of a phi instruction, check whether
+ // doing the compare with each incoming phi value yields a common result.
+ if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
+ if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ return V;
+
return 0;
}
+Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT) {
+ return ::SimplifyFCmpInst(Predicate, LHS, RHS, TD, DT, RecursionLimit);
+}
+
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
Value *llvm::SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal,
- const TargetData *TD) {
+ const TargetData *TD, const DominatorTree *) {
// select true, X, Y -> X
// select false, X, Y -> Y
if (ConstantInt *CB = dyn_cast<ConstantInt>(CondVal))
return CB->getZExtValue() ? TrueVal : FalseVal;
-
+
// select C, X, X -> X
if (TrueVal == FalseVal)
return TrueVal;
-
+
if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
return FalseVal;
if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
@@ -336,98 +1771,249 @@ Value *llvm::SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal,
return TrueVal;
return FalseVal;
}
-
-
-
+
return 0;
}
-
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
- const TargetData *TD) {
+ const TargetData *TD, const DominatorTree *) {
+ // The type of the GEP pointer operand.
+ const PointerType *PtrTy = cast<PointerType>(Ops[0]->getType());
+
// getelementptr P -> P.
if (NumOps == 1)
return Ops[0];
- // TODO.
- //if (isa<UndefValue>(Ops[0]))
- // return UndefValue::get(GEP.getType());
+ if (isa<UndefValue>(Ops[0])) {
+ // Compute the (pointer) type returned by the GEP instruction.
+ const Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, &Ops[1],
+ NumOps-1);
+ const Type *GEPTy = PointerType::get(LastType, PtrTy->getAddressSpace());
+ return UndefValue::get(GEPTy);
+ }
- // getelementptr P, 0 -> P.
- if (NumOps == 2)
+ if (NumOps == 2) {
+ // getelementptr P, 0 -> P.
if (ConstantInt *C = dyn_cast<ConstantInt>(Ops[1]))
if (C->isZero())
return Ops[0];
-
+ // getelementptr P, N -> P if P points to a type of zero size.
+ if (TD) {
+ const Type *Ty = PtrTy->getElementType();
+ if (Ty->isSized() && TD->getTypeAllocSize(Ty) == 0)
+ return Ops[0];
+ }
+ }
+
// Check to see if this is constant foldable.
for (unsigned i = 0; i != NumOps; ++i)
if (!isa<Constant>(Ops[i]))
return 0;
-
+
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]),
(Constant *const*)Ops+1, NumOps-1);
}
+/// SimplifyPHINode - See if we can fold the given phi. If not, returns null.
+static Value *SimplifyPHINode(PHINode *PN, const DominatorTree *DT) {
+ // If all of the PHI's incoming values are the same then replace the PHI node
+ // with the common value.
+ Value *CommonValue = 0;
+ bool HasUndefInput = false;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ Value *Incoming = PN->getIncomingValue(i);
+ // If the incoming value is the phi node itself, it can safely be skipped.
+ if (Incoming == PN) continue;
+ if (isa<UndefValue>(Incoming)) {
+ // Remember that we saw an undef value, but otherwise ignore them.
+ HasUndefInput = true;
+ continue;
+ }
+ if (CommonValue && Incoming != CommonValue)
+ return 0; // Not the same, bail out.
+ CommonValue = Incoming;
+ }
+
+ // If CommonValue is null then all of the incoming values were either undef or
+ // equal to the phi node itself.
+ if (!CommonValue)
+ return UndefValue::get(PN->getType());
+
+ // If we have a PHI node like phi(X, undef, X), where X is defined by some
+ // instruction, we cannot return X as the result of the PHI node unless it
+ // dominates the PHI block.
+ if (HasUndefInput)
+ return ValueDominatesPHI(CommonValue, PN, DT) ? CommonValue : 0;
+
+ return CommonValue;
+}
+
//=== Helper functions for higher up the class hierarchy.
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD) {
+static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
switch (Opcode) {
- case Instruction::And: return SimplifyAndInst(LHS, RHS, TD);
- case Instruction::Or: return SimplifyOrInst(LHS, RHS, TD);
+ case Instruction::Add:
+ return SimplifyAddInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
+ TD, DT, MaxRecurse);
+ case Instruction::Sub:
+ return SimplifySubInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
+ TD, DT, MaxRecurse);
+ case Instruction::Mul: return SimplifyMulInst (LHS, RHS, TD, DT, MaxRecurse);
+ case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, TD, DT, MaxRecurse);
+ case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, TD, DT, MaxRecurse);
+ case Instruction::FDiv: return SimplifyFDivInst(LHS, RHS, TD, DT, MaxRecurse);
+ case Instruction::Shl:
+ return SimplifyShlInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
+ TD, DT, MaxRecurse);
+ case Instruction::LShr:
+ return SimplifyLShrInst(LHS, RHS, /*isExact*/false, TD, DT, MaxRecurse);
+ case Instruction::AShr:
+ return SimplifyAShrInst(LHS, RHS, /*isExact*/false, TD, DT, MaxRecurse);
+ case Instruction::And: return SimplifyAndInst(LHS, RHS, TD, DT, MaxRecurse);
+ case Instruction::Or: return SimplifyOrInst (LHS, RHS, TD, DT, MaxRecurse);
+ case Instruction::Xor: return SimplifyXorInst(LHS, RHS, TD, DT, MaxRecurse);
default:
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
Constant *COps[] = {CLHS, CRHS};
return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, 2, TD);
}
+
+ // If the operation is associative, try some generic simplifications.
+ if (Instruction::isAssociative(Opcode))
+ if (Value *V = SimplifyAssociativeBinOp(Opcode, LHS, RHS, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a select instruction, check whether
+ // operating on either branch of the select always yields the same value.
+ if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, LHS, RHS, TD, DT,
+ MaxRecurse))
+ return V;
+
+ // If the operation is with the result of a phi instruction, check whether
+ // operating on all incoming values of the phi always yields the same value.
+ if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, TD, DT, MaxRecurse))
+ return V;
+
return 0;
}
}
+Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT) {
+ return ::SimplifyBinOp(Opcode, LHS, RHS, TD, DT, RecursionLimit);
+}
+
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result.
-Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD) {
+static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT,
+ unsigned MaxRecurse) {
if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
- return SimplifyICmpInst(Predicate, LHS, RHS, TD);
- return SimplifyFCmpInst(Predicate, LHS, RHS, TD);
+ return SimplifyICmpInst(Predicate, LHS, RHS, TD, DT, MaxRecurse);
+ return SimplifyFCmpInst(Predicate, LHS, RHS, TD, DT, MaxRecurse);
}
+Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ const TargetData *TD, const DominatorTree *DT) {
+ return ::SimplifyCmpInst(Predicate, LHS, RHS, TD, DT, RecursionLimit);
+}
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
-Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD) {
+Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
+ const DominatorTree *DT) {
+ Value *Result;
+
switch (I->getOpcode()) {
default:
- return ConstantFoldInstruction(I, TD);
+ Result = ConstantFoldInstruction(I, TD);
+ break;
case Instruction::Add:
- return SimplifyAddInst(I->getOperand(0), I->getOperand(1),
- cast<BinaryOperator>(I)->hasNoSignedWrap(),
- cast<BinaryOperator>(I)->hasNoUnsignedWrap(), TD);
+ Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
+ cast<BinaryOperator>(I)->hasNoSignedWrap(),
+ cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
+ TD, DT);
+ break;
+ case Instruction::Sub:
+ Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
+ cast<BinaryOperator>(I)->hasNoSignedWrap(),
+ cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
+ TD, DT);
+ break;
+ case Instruction::Mul:
+ Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ break;
+ case Instruction::SDiv:
+ Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ break;
+ case Instruction::UDiv:
+ Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ break;
+ case Instruction::FDiv:
+ Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ break;
+ case Instruction::Shl:
+ Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
+ cast<BinaryOperator>(I)->hasNoSignedWrap(),
+ cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
+ TD, DT);
+ break;
+ case Instruction::LShr:
+ Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
+ cast<BinaryOperator>(I)->isExact(),
+ TD, DT);
+ break;
+ case Instruction::AShr:
+ Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
+ cast<BinaryOperator>(I)->isExact(),
+ TD, DT);
+ break;
case Instruction::And:
- return SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD);
+ Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ break;
case Instruction::Or:
- return SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD);
+ Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ break;
+ case Instruction::Xor:
+ Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ break;
case Instruction::ICmp:
- return SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD);
+ Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
+ I->getOperand(0), I->getOperand(1), TD, DT);
+ break;
case Instruction::FCmp:
- return SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD);
+ Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
+ I->getOperand(0), I->getOperand(1), TD, DT);
+ break;
case Instruction::Select:
- return SimplifySelectInst(I->getOperand(0), I->getOperand(1),
- I->getOperand(2), TD);
+ Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
+ I->getOperand(2), TD, DT);
+ break;
case Instruction::GetElementPtr: {
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
- return SimplifyGEPInst(&Ops[0], Ops.size(), TD);
+ Result = SimplifyGEPInst(&Ops[0], Ops.size(), TD, DT);
+ break;
}
+ case Instruction::PHI:
+ Result = SimplifyPHINode(cast<PHINode>(I), DT);
+ break;
}
+
+ /// If called on unreachable code, the above logic may report that the
+ /// instruction simplified to itself. Make life easier for users by
+ /// detecting that case here, returning a safe value instead.
+ return Result == I ? UndefValue::get(I->getType()) : Result;
}
/// ReplaceAndSimplifyAllUses - Perform From->replaceAllUsesWith(To) and then
@@ -437,15 +2023,16 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD) {
/// simplifies and deletes scalar operations, it does not change the CFG.
///
void llvm::ReplaceAndSimplifyAllUses(Instruction *From, Value *To,
- const TargetData *TD) {
+ const TargetData *TD,
+ const DominatorTree *DT) {
assert(From != To && "ReplaceAndSimplifyAllUses(X,X) is not valid!");
-
+
// FromHandle/ToHandle - This keeps a WeakVH on the from/to values so that
// we can know if it gets deleted out from under us or replaced in a
// recursive simplification.
WeakVH FromHandle(From);
WeakVH ToHandle(To);
-
+
while (!From->use_empty()) {
// Update the instruction to use the new value.
Use &TheUse = From->use_begin().getUse();
@@ -460,27 +2047,26 @@ void llvm::ReplaceAndSimplifyAllUses(Instruction *From, Value *To,
// Sanity check to make sure 'User' doesn't dangle across
// SimplifyInstruction.
AssertingVH<> UserHandle(User);
-
- SimplifiedVal = SimplifyInstruction(User, TD);
+
+ SimplifiedVal = SimplifyInstruction(User, TD, DT);
if (SimplifiedVal == 0) continue;
}
-
+
// Recursively simplify this user to the new value.
- ReplaceAndSimplifyAllUses(User, SimplifiedVal, TD);
+ ReplaceAndSimplifyAllUses(User, SimplifiedVal, TD, DT);
From = dyn_cast_or_null<Instruction>((Value*)FromHandle);
To = ToHandle;
-
+
assert(ToHandle && "To value deleted by recursive simplification?");
-
+
// If the recursive simplification ended up revisiting and deleting
// 'From' then we're done.
if (From == 0)
return;
}
-
+
// If 'From' has value handles referring to it, do a real RAUW to update them.
From->replaceAllUsesWith(To);
-
+
From->eraseFromParent();
}
-
diff --git a/contrib/llvm/lib/Analysis/IntervalPartition.cpp b/contrib/llvm/lib/Analysis/IntervalPartition.cpp
index 1c9e148..2e259b1 100644
--- a/contrib/llvm/lib/Analysis/IntervalPartition.cpp
+++ b/contrib/llvm/lib/Analysis/IntervalPartition.cpp
@@ -17,7 +17,7 @@ using namespace llvm;
char IntervalPartition::ID = 0;
INITIALIZE_PASS(IntervalPartition, "intervals",
- "Interval Partition Construction", true, true);
+ "Interval Partition Construction", true, true)
//===----------------------------------------------------------------------===//
// IntervalPartition Implementation
diff --git a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
index e32dbc4..9e7da6c 100644
--- a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -14,8 +14,10 @@
#define DEBUG_TYPE "lazy-value-info"
#include "llvm/Analysis/LazyValueInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CFG.h"
@@ -26,11 +28,14 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
+#include <map>
+#include <set>
+#include <stack>
using namespace llvm;
char LazyValueInfo::ID = 0;
INITIALIZE_PASS(LazyValueInfo, "lazy-value-info",
- "Lazy Value Information Analysis", false, true);
+ "Lazy Value Information Analysis", false, true)
namespace llvm {
FunctionPass *createLazyValueInfoPass() { return new LazyValueInfo(); }
@@ -50,18 +55,18 @@ namespace llvm {
namespace {
class LVILatticeVal {
enum LatticeValueTy {
- /// undefined - This LLVM Value has no known value yet.
+ /// undefined - This Value has no known value yet.
undefined,
- /// constant - This LLVM Value has a specific constant value.
+ /// constant - This Value has a specific constant value.
constant,
- /// notconstant - This LLVM value is known to not have the specified value.
+ /// notconstant - This Value is known to not have the specified value.
notconstant,
- /// constantrange
+ /// constantrange - The Value falls within this range.
constantrange,
- /// overdefined - This instruction is not known to be constant, and we know
+ /// overdefined - This value is not known to be constant, and we know that
/// it has a value.
overdefined
};
@@ -77,17 +82,13 @@ public:
static LVILatticeVal get(Constant *C) {
LVILatticeVal Res;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
- Res.markConstantRange(ConstantRange(CI->getValue(), CI->getValue()+1));
- else if (!isa<UndefValue>(C))
+ if (!isa<UndefValue>(C))
Res.markConstant(C);
return Res;
}
static LVILatticeVal getNot(Constant *C) {
LVILatticeVal Res;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
- Res.markConstantRange(ConstantRange(CI->getValue()+1, CI->getValue()));
- else
+ if (!isa<UndefValue>(C))
Res.markNotConstant(C);
return Res;
}
@@ -129,32 +130,34 @@ public:
/// markConstant - Return true if this is a change in status.
bool markConstant(Constant *V) {
- if (isConstant()) {
- assert(getConstant() == V && "Marking constant with different value");
+ assert(V && "Marking constant with NULL");
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
+ return markConstantRange(ConstantRange(CI->getValue()));
+ if (isa<UndefValue>(V))
return false;
- }
-
+
+ assert((!isConstant() || getConstant() == V) &&
+ "Marking constant with different value");
assert(isUndefined());
Tag = constant;
- assert(V && "Marking constant with NULL");
Val = V;
return true;
}
/// markNotConstant - Return true if this is a change in status.
bool markNotConstant(Constant *V) {
- if (isNotConstant()) {
- assert(getNotConstant() == V && "Marking !constant with different value");
+ assert(V && "Marking constant with NULL");
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
+ return markConstantRange(ConstantRange(CI->getValue()+1, CI->getValue()));
+ if (isa<UndefValue>(V))
return false;
- }
-
- if (isConstant())
- assert(getConstant() != V && "Marking not constant with different value");
- else
- assert(isUndefined());
+ assert((!isConstant() || getConstant() != V) &&
+ "Marking constant !constant with same value");
+ assert((!isNotConstant() || getNotConstant() == V) &&
+ "Marking !constant with different value");
+ assert(isUndefined() || isConstant());
Tag = notconstant;
- assert(V && "Marking constant with NULL");
Val = V;
return true;
}
@@ -185,63 +188,81 @@ public:
if (RHS.isUndefined() || isOverdefined()) return false;
if (RHS.isOverdefined()) return markOverdefined();
- if (RHS.isNotConstant()) {
- if (isNotConstant()) {
- if (getNotConstant() != RHS.getNotConstant() ||
- isa<ConstantExpr>(getNotConstant()) ||
- isa<ConstantExpr>(RHS.getNotConstant()))
- return markOverdefined();
- return false;
- } else if (isConstant()) {
- if (getConstant() == RHS.getNotConstant() ||
- isa<ConstantExpr>(RHS.getNotConstant()) ||
- isa<ConstantExpr>(getConstant()))
+ if (isUndefined()) {
+ Tag = RHS.Tag;
+ Val = RHS.Val;
+ Range = RHS.Range;
+ return true;
+ }
+
+ if (isConstant()) {
+ if (RHS.isConstant()) {
+ if (Val == RHS.Val)
+ return false;
+ return markOverdefined();
+ }
+
+ if (RHS.isNotConstant()) {
+ if (Val == RHS.Val)
return markOverdefined();
- return markNotConstant(RHS.getNotConstant());
- } else if (isConstantRange()) {
+
+ // Unless we can prove that the two Constants are different, we must
+ // move to overdefined.
+ // FIXME: use TargetData for smarter constant folding.
+ if (ConstantInt *Res = dyn_cast<ConstantInt>(
+ ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
+ getConstant(),
+ RHS.getNotConstant())))
+ if (Res->isOne())
+ return markNotConstant(RHS.getNotConstant());
+
return markOverdefined();
}
-
- assert(isUndefined() && "Unexpected lattice");
- return markNotConstant(RHS.getNotConstant());
+
+ // RHS is a ConstantRange, LHS is a non-integer Constant.
+
+ // FIXME: consider the case where RHS is a range [1, 0) and LHS is
+ // a function. The correct result is to pick up RHS.
+
+ return markOverdefined();
}
-
- if (RHS.isConstantRange()) {
- if (isConstantRange()) {
- ConstantRange NewR = Range.unionWith(RHS.getConstantRange());
- if (NewR.isFullSet())
+
+ if (isNotConstant()) {
+ if (RHS.isConstant()) {
+ if (Val == RHS.Val)
return markOverdefined();
- else
- return markConstantRange(NewR);
- } else if (!isUndefined()) {
+
+ // Unless we can prove that the two Constants are different, we must
+ // move to overdefined.
+ // FIXME: use TargetData for smarter constant folding.
+ if (ConstantInt *Res = dyn_cast<ConstantInt>(
+ ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
+ getNotConstant(),
+ RHS.getConstant())))
+ if (Res->isOne())
+ return false;
+
return markOverdefined();
}
-
- assert(isUndefined() && "Unexpected lattice");
- return markConstantRange(RHS.getConstantRange());
- }
-
- // RHS must be a constant, we must be undef, constant, or notconstant.
- assert(!isConstantRange() &&
- "Constant and ConstantRange cannot be merged.");
-
- if (isUndefined())
- return markConstant(RHS.getConstant());
-
- if (isConstant()) {
- if (getConstant() != RHS.getConstant())
+
+ if (RHS.isNotConstant()) {
+ if (Val == RHS.Val)
+ return false;
return markOverdefined();
- return false;
+ }
+
+ return markOverdefined();
}
- // If we are known "!=4" and RHS is "==5", stay at "!=4".
- if (getNotConstant() == RHS.getConstant() ||
- isa<ConstantExpr>(getNotConstant()) ||
- isa<ConstantExpr>(RHS.getConstant()))
+ assert(isConstantRange() && "New LVILattice type?");
+ if (!RHS.isConstantRange())
return markOverdefined();
- return false;
+
+ ConstantRange NewR = Range.unionWith(RHS.getConstantRange());
+ if (NewR.isFullSet())
+ return markOverdefined();
+ return markConstantRange(NewR);
}
-
};
} // end anonymous namespace.
@@ -267,49 +288,136 @@ raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
//===----------------------------------------------------------------------===//
namespace {
+ /// LVIValueHandle - A callback value handle update the cache when
+ /// values are erased.
+ class LazyValueInfoCache;
+ struct LVIValueHandle : public CallbackVH {
+ LazyValueInfoCache *Parent;
+
+ LVIValueHandle(Value *V, LazyValueInfoCache *P)
+ : CallbackVH(V), Parent(P) { }
+
+ void deleted();
+ void allUsesReplacedWith(Value *V) {
+ deleted();
+ }
+ };
+}
+
+namespace llvm {
+ template<>
+ struct DenseMapInfo<LVIValueHandle> {
+ typedef DenseMapInfo<Value*> PointerInfo;
+ static inline LVIValueHandle getEmptyKey() {
+ return LVIValueHandle(PointerInfo::getEmptyKey(),
+ static_cast<LazyValueInfoCache*>(0));
+ }
+ static inline LVIValueHandle getTombstoneKey() {
+ return LVIValueHandle(PointerInfo::getTombstoneKey(),
+ static_cast<LazyValueInfoCache*>(0));
+ }
+ static unsigned getHashValue(const LVIValueHandle &Val) {
+ return PointerInfo::getHashValue(Val);
+ }
+ static bool isEqual(const LVIValueHandle &LHS, const LVIValueHandle &RHS) {
+ return LHS == RHS;
+ }
+ };
+
+ template<>
+ struct DenseMapInfo<std::pair<AssertingVH<BasicBlock>, Value*> > {
+ typedef std::pair<AssertingVH<BasicBlock>, Value*> PairTy;
+ typedef DenseMapInfo<AssertingVH<BasicBlock> > APointerInfo;
+ typedef DenseMapInfo<Value*> BPointerInfo;
+ static inline PairTy getEmptyKey() {
+ return std::make_pair(APointerInfo::getEmptyKey(),
+ BPointerInfo::getEmptyKey());
+ }
+ static inline PairTy getTombstoneKey() {
+ return std::make_pair(APointerInfo::getTombstoneKey(),
+ BPointerInfo::getTombstoneKey());
+ }
+ static unsigned getHashValue( const PairTy &Val) {
+ return APointerInfo::getHashValue(Val.first) ^
+ BPointerInfo::getHashValue(Val.second);
+ }
+ static bool isEqual(const PairTy &LHS, const PairTy &RHS) {
+ return APointerInfo::isEqual(LHS.first, RHS.first) &&
+ BPointerInfo::isEqual(LHS.second, RHS.second);
+ }
+ };
+}
+
+namespace {
/// LazyValueInfoCache - This is the cache kept by LazyValueInfo which
/// maintains information about queries across the clients' queries.
class LazyValueInfoCache {
- public:
- /// BlockCacheEntryTy - This is a computed lattice value at the end of the
- /// specified basic block for a Value* that depends on context.
- typedef std::pair<AssertingVH<BasicBlock>, LVILatticeVal> BlockCacheEntryTy;
-
/// ValueCacheEntryTy - This is all of the cached block information for
/// exactly one Value*. The entries are sorted by the BasicBlock* of the
/// entries, allowing us to do a lookup with a binary search.
typedef std::map<AssertingVH<BasicBlock>, LVILatticeVal> ValueCacheEntryTy;
- private:
- /// LVIValueHandle - A callback value handle update the cache when
- /// values are erased.
- struct LVIValueHandle : public CallbackVH {
+ /// ValueCache - This is all of the cached information for all values,
+ /// mapped from Value* to key information.
+ DenseMap<LVIValueHandle, ValueCacheEntryTy> ValueCache;
+
+ /// OverDefinedCache - This tracks, on a per-block basis, the set of
+ /// values that are over-defined at the end of that block. This is required
+ /// for cache updating.
+ typedef std::pair<AssertingVH<BasicBlock>, Value*> OverDefinedPairTy;
+ DenseSet<OverDefinedPairTy> OverDefinedCache;
+
+ /// BlockValueStack - This stack holds the state of the value solver
+ /// during a query. It basically emulates the callstack of the naive
+ /// recursive value lookup process.
+ std::stack<std::pair<BasicBlock*, Value*> > BlockValueStack;
+
+ friend struct LVIValueHandle;
+
+ /// OverDefinedCacheUpdater - A helper object that ensures that the
+ /// OverDefinedCache is updated whenever solveBlockValue returns.
+ struct OverDefinedCacheUpdater {
LazyValueInfoCache *Parent;
+ Value *Val;
+ BasicBlock *BB;
+ LVILatticeVal &BBLV;
- LVIValueHandle(Value *V, LazyValueInfoCache *P)
- : CallbackVH(V), Parent(P) { }
+ OverDefinedCacheUpdater(Value *V, BasicBlock *B, LVILatticeVal &LV,
+ LazyValueInfoCache *P)
+ : Parent(P), Val(V), BB(B), BBLV(LV) { }
- void deleted();
- void allUsesReplacedWith(Value* V) {
- deleted();
- }
-
- LVIValueHandle &operator=(Value *V) {
- return *this = LVIValueHandle(V, Parent);
+ bool markResult(bool changed) {
+ if (changed && BBLV.isOverdefined())
+ Parent->OverDefinedCache.insert(std::make_pair(BB, Val));
+ return changed;
}
};
+
- /// ValueCache - This is all of the cached information for all values,
- /// mapped from Value* to key information.
- std::map<LVIValueHandle, ValueCacheEntryTy> ValueCache;
+
+ LVILatticeVal getBlockValue(Value *Val, BasicBlock *BB);
+ bool getEdgeValue(Value *V, BasicBlock *F, BasicBlock *T,
+ LVILatticeVal &Result);
+ bool hasBlockValue(Value *Val, BasicBlock *BB);
+
+ // These methods process one work item and may add more. A false value
+ // returned means that the work item was not completely processed and must
+ // be revisited after going through the new items.
+ bool solveBlockValue(Value *Val, BasicBlock *BB);
+ bool solveBlockValueNonLocal(LVILatticeVal &BBLV,
+ Value *Val, BasicBlock *BB);
+ bool solveBlockValuePHINode(LVILatticeVal &BBLV,
+ PHINode *PN, BasicBlock *BB);
+ bool solveBlockValueConstantRange(LVILatticeVal &BBLV,
+ Instruction *BBI, BasicBlock *BB);
+
+ void solve();
- /// OverDefinedCache - This tracks, on a per-block basis, the set of
- /// values that are over-defined at the end of that block. This is required
- /// for cache updating.
- std::set<std::pair<AssertingVH<BasicBlock>, Value*> > OverDefinedCache;
+ ValueCacheEntryTy &lookup(Value *V) {
+ return ValueCache[LVIValueHandle(V, this)];
+ }
public:
-
/// getValueInBlock - This is the query interface to determine the lattice
/// value for the specified Value* at the end of the specified block.
LVILatticeVal getValueInBlock(Value *V, BasicBlock *BB);
@@ -335,199 +443,112 @@ namespace {
};
} // end anonymous namespace
-//===----------------------------------------------------------------------===//
-// LVIQuery Impl
-//===----------------------------------------------------------------------===//
-
-namespace {
- /// LVIQuery - This is a transient object that exists while a query is
- /// being performed.
- ///
- /// TODO: Reuse LVIQuery instead of recreating it for every query, this avoids
- /// reallocation of the densemap on every query.
- class LVIQuery {
- typedef LazyValueInfoCache::BlockCacheEntryTy BlockCacheEntryTy;
- typedef LazyValueInfoCache::ValueCacheEntryTy ValueCacheEntryTy;
-
- /// This is the current value being queried for.
- Value *Val;
-
- /// This is a pointer to the owning cache, for recursive queries.
- LazyValueInfoCache &Parent;
-
- /// This is all of the cached information about this value.
- ValueCacheEntryTy &Cache;
-
- /// This tracks, for each block, what values are overdefined.
- std::set<std::pair<AssertingVH<BasicBlock>, Value*> > &OverDefinedCache;
-
- /// NewBlocks - This is a mapping of the new BasicBlocks which have been
- /// added to cache but that are not in sorted order.
- DenseSet<BasicBlock*> NewBlockInfo;
-
- public:
-
- LVIQuery(Value *V, LazyValueInfoCache &P,
- ValueCacheEntryTy &VC,
- std::set<std::pair<AssertingVH<BasicBlock>, Value*> > &ODC)
- : Val(V), Parent(P), Cache(VC), OverDefinedCache(ODC) {
- }
-
- ~LVIQuery() {
- // When the query is done, insert the newly discovered facts into the
- // cache in sorted order.
- if (NewBlockInfo.empty()) return;
-
- for (DenseSet<BasicBlock*>::iterator I = NewBlockInfo.begin(),
- E = NewBlockInfo.end(); I != E; ++I) {
- if (Cache[*I].isOverdefined())
- OverDefinedCache.insert(std::make_pair(*I, Val));
- }
- }
-
- LVILatticeVal getBlockValue(BasicBlock *BB);
- LVILatticeVal getEdgeValue(BasicBlock *FromBB, BasicBlock *ToBB);
-
- private:
- LVILatticeVal getCachedEntryForBlock(BasicBlock *BB);
- };
-} // end anonymous namespace
-
-void LazyValueInfoCache::LVIValueHandle::deleted() {
- for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
+void LVIValueHandle::deleted() {
+ typedef std::pair<AssertingVH<BasicBlock>, Value*> OverDefinedPairTy;
+
+ SmallVector<OverDefinedPairTy, 4> ToErase;
+ for (DenseSet<OverDefinedPairTy>::iterator
I = Parent->OverDefinedCache.begin(),
E = Parent->OverDefinedCache.end();
- I != E; ) {
- std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator tmp = I;
- ++I;
- if (tmp->second == getValPtr())
- Parent->OverDefinedCache.erase(tmp);
+ I != E; ++I) {
+ if (I->second == getValPtr())
+ ToErase.push_back(*I);
}
+ for (SmallVector<OverDefinedPairTy, 4>::iterator I = ToErase.begin(),
+ E = ToErase.end(); I != E; ++I)
+ Parent->OverDefinedCache.erase(*I);
+
// This erasure deallocates *this, so it MUST happen after we're done
// using any and all members of *this.
Parent->ValueCache.erase(*this);
}
void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
- for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
- I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E; ) {
- std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator tmp = I;
- ++I;
- if (tmp->first == BB)
- OverDefinedCache.erase(tmp);
+ SmallVector<OverDefinedPairTy, 4> ToErase;
+ for (DenseSet<OverDefinedPairTy>::iterator I = OverDefinedCache.begin(),
+ E = OverDefinedCache.end(); I != E; ++I) {
+ if (I->first == BB)
+ ToErase.push_back(*I);
}
+
+ for (SmallVector<OverDefinedPairTy, 4>::iterator I = ToErase.begin(),
+ E = ToErase.end(); I != E; ++I)
+ OverDefinedCache.erase(*I);
- for (std::map<LVIValueHandle, ValueCacheEntryTy>::iterator
+ for (DenseMap<LVIValueHandle, ValueCacheEntryTy>::iterator
I = ValueCache.begin(), E = ValueCache.end(); I != E; ++I)
I->second.erase(BB);
}
-/// getCachedEntryForBlock - See if we already have a value for this block. If
-/// so, return it, otherwise create a new entry in the Cache map to use.
-LVILatticeVal LVIQuery::getCachedEntryForBlock(BasicBlock *BB) {
- NewBlockInfo.insert(BB);
- return Cache[BB];
+void LazyValueInfoCache::solve() {
+ while (!BlockValueStack.empty()) {
+ std::pair<BasicBlock*, Value*> &e = BlockValueStack.top();
+ if (solveBlockValue(e.second, e.first))
+ BlockValueStack.pop();
+ }
+}
+
+bool LazyValueInfoCache::hasBlockValue(Value *Val, BasicBlock *BB) {
+ // If already a constant, there is nothing to compute.
+ if (isa<Constant>(Val))
+ return true;
+
+ LVIValueHandle ValHandle(Val, this);
+ if (!ValueCache.count(ValHandle)) return false;
+ return ValueCache[ValHandle].count(BB);
+}
+
+LVILatticeVal LazyValueInfoCache::getBlockValue(Value *Val, BasicBlock *BB) {
+ // If already a constant, there is nothing to compute.
+ if (Constant *VC = dyn_cast<Constant>(Val))
+ return LVILatticeVal::get(VC);
+
+ return lookup(Val)[BB];
}
-LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
- // See if we already have a value for this block.
- LVILatticeVal BBLV = getCachedEntryForBlock(BB);
+bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
+ if (isa<Constant>(Val))
+ return true;
+
+ ValueCacheEntryTy &Cache = lookup(Val);
+ LVILatticeVal &BBLV = Cache[BB];
+ // OverDefinedCacheUpdater is a helper object that will update
+ // the OverDefinedCache for us when this method exits. Make sure to
+ // call markResult on it as we exist, passing a bool to indicate if the
+ // cache needs updating, i.e. if we have solve a new value or not.
+ OverDefinedCacheUpdater ODCacheUpdater(Val, BB, BBLV, this);
+
// If we've already computed this block's value, return it.
if (!BBLV.isUndefined()) {
DEBUG(dbgs() << " reuse BB '" << BB->getName() << "' val=" << BBLV <<'\n');
- return BBLV;
+
+ // Since we're reusing a cached value here, we don't need to update the
+ // OverDefinedCahce. The cache will have been properly updated
+ // whenever the cached value was inserted.
+ ODCacheUpdater.markResult(false);
+ return true;
}
// Otherwise, this is the first time we're seeing this block. Reset the
// lattice value to overdefined, so that cycles will terminate and be
// conservatively correct.
BBLV.markOverdefined();
- Cache[BB] = BBLV;
Instruction *BBI = dyn_cast<Instruction>(Val);
if (BBI == 0 || BBI->getParent() != BB) {
- LVILatticeVal Result; // Start Undefined.
-
- // If this is a pointer, and there's a load from that pointer in this BB,
- // then we know that the pointer can't be NULL.
- bool NotNull = false;
- if (Val->getType()->isPointerTy()) {
- for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
- LoadInst *L = dyn_cast<LoadInst>(BI);
- if (L && L->getPointerAddressSpace() == 0 &&
- L->getPointerOperand()->getUnderlyingObject() ==
- Val->getUnderlyingObject()) {
- NotNull = true;
- break;
- }
- }
- }
-
- unsigned NumPreds = 0;
- // Loop over all of our predecessors, merging what we know from them into
- // result.
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
- Result.mergeIn(getEdgeValue(*PI, BB));
-
- // If we hit overdefined, exit early. The BlockVals entry is already set
- // to overdefined.
- if (Result.isOverdefined()) {
- DEBUG(dbgs() << " compute BB '" << BB->getName()
- << "' - overdefined because of pred.\n");
- // If we previously determined that this is a pointer that can't be null
- // then return that rather than giving up entirely.
- if (NotNull) {
- const PointerType *PTy = cast<PointerType>(Val->getType());
- Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
- }
-
- return Result;
- }
- ++NumPreds;
- }
-
-
- // If this is the entry block, we must be asking about an argument. The
- // value is overdefined.
- if (NumPreds == 0 && BB == &BB->getParent()->front()) {
- assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
- Result.markOverdefined();
- return Result;
- }
-
- // Return the merged value, which is more precise than 'overdefined'.
- assert(!Result.isOverdefined());
- return Cache[BB] = Result;
+ return ODCacheUpdater.markResult(solveBlockValueNonLocal(BBLV, Val, BB));
}
-
- // If this value is defined by an instruction in this block, we have to
- // process it here somehow or return overdefined.
+
if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
- LVILatticeVal Result; // Start Undefined.
-
- // Loop over all of our predecessors, merging what we know from them into
- // result.
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
- Value* PhiVal = PN->getIncomingValueForBlock(*PI);
- Result.mergeIn(Parent.getValueOnEdge(PhiVal, *PI, BB));
-
- // If we hit overdefined, exit early. The BlockVals entry is already set
- // to overdefined.
- if (Result.isOverdefined()) {
- DEBUG(dbgs() << " compute BB '" << BB->getName()
- << "' - overdefined because of pred.\n");
- return Result;
- }
- }
-
- // Return the merged value, which is more precise than 'overdefined'.
- assert(!Result.isOverdefined());
- return Cache[BB] = Result;
+ return ODCacheUpdater.markResult(solveBlockValuePHINode(BBLV, PN, BB));
}
- assert(Cache[BB].isOverdefined() && "Recursive query changed our cache?");
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(BBI)) {
+ BBLV = LVILatticeVal::getNot(ConstantPointerNull::get(AI->getType()));
+ return ODCacheUpdater.markResult(true);
+ }
// We can only analyze the definitions of certain classes of instructions
// (integral binops and casts at the moment), so bail if this isn't one.
@@ -536,10 +557,10 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
!BBI->getType()->isIntegerTy()) {
DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined because inst def found.\n");
- Result.markOverdefined();
- return Result;
+ BBLV.markOverdefined();
+ return ODCacheUpdater.markResult(true);
}
-
+
// FIXME: We're currently limited to binops with a constant RHS. This should
// be improved.
BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
@@ -547,34 +568,177 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined because inst def found.\n");
- Result.markOverdefined();
- return Result;
- }
+ BBLV.markOverdefined();
+ return ODCacheUpdater.markResult(true);
+ }
+
+ return ODCacheUpdater.markResult(solveBlockValueConstantRange(BBLV, BBI, BB));
+}
+
+static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
+ if (LoadInst *L = dyn_cast<LoadInst>(I)) {
+ return L->getPointerAddressSpace() == 0 &&
+ GetUnderlyingObject(L->getPointerOperand()) ==
+ GetUnderlyingObject(Ptr);
+ }
+ if (StoreInst *S = dyn_cast<StoreInst>(I)) {
+ return S->getPointerAddressSpace() == 0 &&
+ GetUnderlyingObject(S->getPointerOperand()) ==
+ GetUnderlyingObject(Ptr);
+ }
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
+ if (MI->isVolatile()) return false;
+ if (MI->getAddressSpace() != 0) return false;
+
+ // FIXME: check whether it has a valuerange that excludes zero?
+ ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
+ if (!Len || Len->isZero()) return false;
+
+ if (MI->getRawDest() == Ptr || MI->getDest() == Ptr)
+ return true;
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
+ return MTI->getRawSource() == Ptr || MTI->getSource() == Ptr;
+ }
+ return false;
+}
+
+bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
+ Value *Val, BasicBlock *BB) {
+ LVILatticeVal Result; // Start Undefined.
+
+ // If this is a pointer, and there's a load from that pointer in this BB,
+ // then we know that the pointer can't be NULL.
+ bool NotNull = false;
+ if (Val->getType()->isPointerTy()) {
+ if (isa<AllocaInst>(Val)) {
+ NotNull = true;
+ } else {
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
+ if (InstructionDereferencesPointer(BI, Val)) {
+ NotNull = true;
+ break;
+ }
+ }
+ }
+ }
+
+ // If this is the entry block, we must be asking about an argument. The
+ // value is overdefined.
+ if (BB == &BB->getParent()->getEntryBlock()) {
+ assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
+ if (NotNull) {
+ const PointerType *PTy = cast<PointerType>(Val->getType());
+ Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
+ } else {
+ Result.markOverdefined();
+ }
+ BBLV = Result;
+ return true;
+ }
+
+ // Loop over all of our predecessors, merging what we know from them into
+ // result.
+ bool EdgesMissing = false;
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ LVILatticeVal EdgeResult;
+ EdgesMissing |= !getEdgeValue(Val, *PI, BB, EdgeResult);
+ if (EdgesMissing)
+ continue;
+ Result.mergeIn(EdgeResult);
+
+ // If we hit overdefined, exit early. The BlockVals entry is already set
+ // to overdefined.
+ if (Result.isOverdefined()) {
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because of pred.\n");
+ // If we previously determined that this is a pointer that can't be null
+ // then return that rather than giving up entirely.
+ if (NotNull) {
+ const PointerType *PTy = cast<PointerType>(Val->getType());
+ Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
+ }
+
+ BBLV = Result;
+ return true;
+ }
+ }
+ if (EdgesMissing)
+ return false;
+
+ // Return the merged value, which is more precise than 'overdefined'.
+ assert(!Result.isOverdefined());
+ BBLV = Result;
+ return true;
+}
+
+bool LazyValueInfoCache::solveBlockValuePHINode(LVILatticeVal &BBLV,
+ PHINode *PN, BasicBlock *BB) {
+ LVILatticeVal Result; // Start Undefined.
+
+ // Loop over all of our predecessors, merging what we know from them into
+ // result.
+ bool EdgesMissing = false;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ BasicBlock *PhiBB = PN->getIncomingBlock(i);
+ Value *PhiVal = PN->getIncomingValue(i);
+ LVILatticeVal EdgeResult;
+ EdgesMissing |= !getEdgeValue(PhiVal, PhiBB, BB, EdgeResult);
+ if (EdgesMissing)
+ continue;
+
+ Result.mergeIn(EdgeResult);
+
+ // If we hit overdefined, exit early. The BlockVals entry is already set
+ // to overdefined.
+ if (Result.isOverdefined()) {
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because of pred.\n");
+
+ BBLV = Result;
+ return true;
+ }
+ }
+ if (EdgesMissing)
+ return false;
+
+ // Return the merged value, which is more precise than 'overdefined'.
+ assert(!Result.isOverdefined() && "Possible PHI in entry block?");
+ BBLV = Result;
+ return true;
+}
+
+bool LazyValueInfoCache::solveBlockValueConstantRange(LVILatticeVal &BBLV,
+ Instruction *BBI,
+ BasicBlock *BB) {
// Figure out the range of the LHS. If that fails, bail.
- LVILatticeVal LHSVal = Parent.getValueInBlock(BBI->getOperand(0), BB);
+ if (!hasBlockValue(BBI->getOperand(0), BB)) {
+ BlockValueStack.push(std::make_pair(BB, BBI->getOperand(0)));
+ return false;
+ }
+
+ LVILatticeVal LHSVal = getBlockValue(BBI->getOperand(0), BB);
if (!LHSVal.isConstantRange()) {
- Result.markOverdefined();
- return Result;
+ BBLV.markOverdefined();
+ return true;
}
- ConstantInt *RHS = 0;
ConstantRange LHSRange = LHSVal.getConstantRange();
ConstantRange RHSRange(1);
const IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
if (isa<BinaryOperator>(BBI)) {
- RHS = dyn_cast<ConstantInt>(BBI->getOperand(1));
- if (!RHS) {
- Result.markOverdefined();
- return Result;
+ if (ConstantInt *RHS = dyn_cast<ConstantInt>(BBI->getOperand(1))) {
+ RHSRange = ConstantRange(RHS->getValue());
+ } else {
+ BBLV.markOverdefined();
+ return true;
}
-
- RHSRange = ConstantRange(RHS->getValue(), RHS->getValue()+1);
}
-
+
// NOTE: We're currently limited by the set of operations that ConstantRange
// can evaluate symbolically. Enhancing that set will allows us to analyze
// more definitions.
+ LVILatticeVal Result;
switch (BBI->getOpcode()) {
case Instruction::Add:
Result.markConstantRange(LHSRange.add(RHSRange));
@@ -606,6 +770,12 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
case Instruction::BitCast:
Result.markConstantRange(LHSRange);
break;
+ case Instruction::And:
+ Result.markConstantRange(LHSRange.binaryAnd(RHSRange));
+ break;
+ case Instruction::Or:
+ Result.markConstantRange(LHSRange.binaryOr(RHSRange));
+ break;
// Unhandled instructions are overdefined.
default:
@@ -615,12 +785,19 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
break;
}
- return Cache[BB] = Result;
+ BBLV = Result;
+ return true;
}
-
/// getEdgeValue - This method attempts to infer more complex
-LVILatticeVal LVIQuery::getEdgeValue(BasicBlock *BBFrom, BasicBlock *BBTo) {
+bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
+ BasicBlock *BBTo, LVILatticeVal &Result) {
+ // If already a constant, there is nothing to compute.
+ if (Constant *VC = dyn_cast<Constant>(Val)) {
+ Result = LVILatticeVal::get(VC);
+ return true;
+ }
+
// TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
// know that v != 0.
if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
@@ -634,9 +811,11 @@ LVILatticeVal LVIQuery::getEdgeValue(BasicBlock *BBFrom, BasicBlock *BBTo) {
// If V is the condition of the branch itself, then we know exactly what
// it is.
- if (BI->getCondition() == Val)
- return LVILatticeVal::get(ConstantInt::get(
+ if (BI->getCondition() == Val) {
+ Result = LVILatticeVal::get(ConstantInt::get(
Type::getInt1Ty(Val->getContext()), isTrueDest));
+ return true;
+ }
// If the condition of the branch is an equality comparison, we may be
// able to infer the value.
@@ -647,30 +826,40 @@ LVILatticeVal LVIQuery::getEdgeValue(BasicBlock *BBFrom, BasicBlock *BBTo) {
// We know that V has the RHS constant if this is a true SETEQ or
// false SETNE.
if (isTrueDest == (ICI->getPredicate() == ICmpInst::ICMP_EQ))
- return LVILatticeVal::get(cast<Constant>(ICI->getOperand(1)));
- return LVILatticeVal::getNot(cast<Constant>(ICI->getOperand(1)));
+ Result = LVILatticeVal::get(cast<Constant>(ICI->getOperand(1)));
+ else
+ Result = LVILatticeVal::getNot(cast<Constant>(ICI->getOperand(1)));
+ return true;
}
-
+
if (ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
// Calculate the range of values that would satisfy the comparison.
ConstantRange CmpRange(CI->getValue(), CI->getValue()+1);
ConstantRange TrueValues =
ConstantRange::makeICmpRegion(ICI->getPredicate(), CmpRange);
-
+
// If we're interested in the false dest, invert the condition.
if (!isTrueDest) TrueValues = TrueValues.inverse();
// Figure out the possible values of the query BEFORE this branch.
- LVILatticeVal InBlock = getBlockValue(BBFrom);
- if (!InBlock.isConstantRange())
- return LVILatticeVal::getRange(TrueValues);
-
+ if (!hasBlockValue(Val, BBFrom)) {
+ BlockValueStack.push(std::make_pair(BBFrom, Val));
+ return false;
+ }
+
+ LVILatticeVal InBlock = getBlockValue(Val, BBFrom);
+ if (!InBlock.isConstantRange()) {
+ Result = LVILatticeVal::getRange(TrueValues);
+ return true;
+ }
+
// Find all potential values that satisfy both the input and output
// conditions.
ConstantRange PossibleValues =
TrueValues.intersectWith(InBlock.getConstantRange());
-
- return LVILatticeVal::getRange(PossibleValues);
+
+ Result = LVILatticeVal::getRange(PossibleValues);
+ return true;
}
}
}
@@ -682,9 +871,8 @@ LVILatticeVal LVIQuery::getEdgeValue(BasicBlock *BBFrom, BasicBlock *BBTo) {
if (SI->getCondition() == Val) {
// We don't know anything in the default case.
if (SI->getDefaultDest() == BBTo) {
- LVILatticeVal Result;
Result.markOverdefined();
- return Result;
+ return true;
}
// We only know something if there is exactly one value that goes from
@@ -697,51 +885,48 @@ LVILatticeVal LVIQuery::getEdgeValue(BasicBlock *BBFrom, BasicBlock *BBTo) {
EdgeVal = SI->getCaseValue(i);
}
assert(EdgeVal && "Missing successor?");
- if (NumEdges == 1)
- return LVILatticeVal::get(EdgeVal);
+ if (NumEdges == 1) {
+ Result = LVILatticeVal::get(EdgeVal);
+ return true;
+ }
}
}
// Otherwise see if the value is known in the block.
- return getBlockValue(BBFrom);
+ if (hasBlockValue(Val, BBFrom)) {
+ Result = getBlockValue(Val, BBFrom);
+ return true;
+ }
+ BlockValueStack.push(std::make_pair(BBFrom, Val));
+ return false;
}
-
-//===----------------------------------------------------------------------===//
-// LazyValueInfoCache Impl
-//===----------------------------------------------------------------------===//
-
LVILatticeVal LazyValueInfoCache::getValueInBlock(Value *V, BasicBlock *BB) {
- // If already a constant, there is nothing to compute.
- if (Constant *VC = dyn_cast<Constant>(V))
- return LVILatticeVal::get(VC);
-
DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
<< BB->getName() << "'\n");
- LVILatticeVal Result = LVIQuery(V, *this,
- ValueCache[LVIValueHandle(V, this)],
- OverDefinedCache).getBlockValue(BB);
-
+ BlockValueStack.push(std::make_pair(BB, V));
+ solve();
+ LVILatticeVal Result = getBlockValue(V, BB);
+
DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
LVILatticeVal LazyValueInfoCache::
getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB) {
- // If already a constant, there is nothing to compute.
- if (Constant *VC = dyn_cast<Constant>(V))
- return LVILatticeVal::get(VC);
-
DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
<< FromBB->getName() << "' to '" << ToBB->getName() << "'\n");
- LVILatticeVal Result =
- LVIQuery(V, *this, ValueCache[LVIValueHandle(V, this)],
- OverDefinedCache).getEdgeValue(FromBB, ToBB);
-
+ LVILatticeVal Result;
+ if (!getEdgeValue(V, FromBB, ToBB, Result)) {
+ solve();
+ bool WasFastQuery = getEdgeValue(V, FromBB, ToBB, Result);
+ (void)WasFastQuery;
+ assert(WasFastQuery && "More work to do after problem solved?");
+ }
+
DEBUG(dbgs() << " Result = " << Result << "\n");
-
return Result;
}
@@ -761,8 +946,8 @@ void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
worklist.push_back(OldSucc);
DenseSet<Value*> ClearSet;
- for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
- I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E; ++I) {
+ for (DenseSet<OverDefinedPairTy>::iterator I = OverDefinedCache.begin(),
+ E = OverDefinedCache.end(); I != E; ++I) {
if (I->first == OldSucc)
ClearSet.insert(I->second);
}
@@ -779,17 +964,17 @@ void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
if (ToUpdate == NewSucc) continue;
bool changed = false;
- for (DenseSet<Value*>::iterator I = ClearSet.begin(),E = ClearSet.end();
+ for (DenseSet<Value*>::iterator I = ClearSet.begin(), E = ClearSet.end();
I != E; ++I) {
// If a value was marked overdefined in OldSucc, and is here too...
- std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator OI =
+ DenseSet<OverDefinedPairTy>::iterator OI =
OverDefinedCache.find(std::make_pair(ToUpdate, *I));
if (OI == OverDefinedCache.end()) continue;
// Remove it from the caches.
ValueCacheEntryTy &Entry = ValueCache[LVIValueHandle(*I, this)];
ValueCacheEntryTy::iterator CI = Entry.find(ToUpdate);
-
+
assert(CI != Entry.end() && "Couldn't find entry to update?");
Entry.erase(CI);
OverDefinedCache.erase(OI);
@@ -798,7 +983,7 @@ void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
// blocks successors too.
changed = true;
}
-
+
if (!changed) continue;
worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate));
@@ -838,7 +1023,7 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB) {
if (Result.isConstant())
return Result.getConstant();
- else if (Result.isConstantRange()) {
+ if (Result.isConstantRange()) {
ConstantRange CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
@@ -854,7 +1039,7 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (Result.isConstant())
return Result.getConstant();
- else if (Result.isConstantRange()) {
+ if (Result.isConstantRange()) {
ConstantRange CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
@@ -874,7 +1059,7 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
Constant *Res = 0;
if (Result.isConstant()) {
Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD);
- if (ConstantInt *ResCI = dyn_cast_or_null<ConstantInt>(Res))
+ if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
return ResCI->isZero() ? False : True;
return Unknown;
}
@@ -899,13 +1084,12 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
}
// Handle more complex predicates.
- ConstantRange RHS(CI->getValue(), CI->getValue()+1);
- ConstantRange TrueValues = ConstantRange::makeICmpRegion(Pred, RHS);
- if (CR.intersectWith(TrueValues).isEmptySet())
- return False;
- else if (TrueValues.contains(CR))
+ ConstantRange TrueValues =
+ ICmpInst::makeConstantRange((ICmpInst::Predicate)Pred, CI->getValue());
+ if (TrueValues.contains(CR))
return True;
-
+ if (TrueValues.inverse().contains(CR))
+ return False;
return Unknown;
}
@@ -932,7 +1116,7 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
}
void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
- BasicBlock* NewSucc) {
+ BasicBlock *NewSucc) {
if (PImpl) getCache(PImpl).threadEdge(PredBB, OldSucc, NewSucc);
}
diff --git a/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
index 7f51202..efb722b 100644
--- a/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
@@ -21,7 +21,7 @@ using namespace llvm;
// Register this pass...
char LibCallAliasAnalysis::ID = 0;
INITIALIZE_AG_PASS(LibCallAliasAnalysis, AliasAnalysis, "libcall-aa",
- "LibCall Alias Analysis", false, true, false);
+ "LibCall Alias Analysis", false, true, false)
FunctionPass *llvm::createLibCallAliasAnalysisPass(LibCallInfo *LCI) {
return new LibCallAliasAnalysis(LCI);
@@ -43,8 +43,8 @@ void LibCallAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// vs the specified pointer/size.
AliasAnalysis::ModRefResult
LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
- ImmutableCallSite CS, const Value *P,
- unsigned Size) {
+ ImmutableCallSite CS,
+ const Location &Loc) {
// If we have a function, check to see what kind of mod/ref effects it
// has. Start by including any info globally known about the function.
AliasAnalysis::ModRefResult MRInfo = FI->UniversalBehavior;
@@ -64,9 +64,9 @@ LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
if (FI->DetailsType == LibCallFunctionInfo::DoesNot) {
// Find out if the pointer refers to a known location.
for (unsigned i = 0; Details[i].LocationID != ~0U; ++i) {
- const LibCallLocationInfo &Loc =
+ const LibCallLocationInfo &LocInfo =
LCI->getLocationInfo(Details[i].LocationID);
- LibCallLocationInfo::LocResult Res = Loc.isLocation(CS, P, Size);
+ LibCallLocationInfo::LocResult Res = LocInfo.isLocation(CS, Loc);
if (Res != LibCallLocationInfo::Yes) continue;
// If we find a match against a location that we 'do not' interact with,
@@ -85,9 +85,9 @@ LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
// Find out if the pointer refers to a known location.
bool NoneMatch = true;
for (unsigned i = 0; Details[i].LocationID != ~0U; ++i) {
- const LibCallLocationInfo &Loc =
+ const LibCallLocationInfo &LocInfo =
LCI->getLocationInfo(Details[i].LocationID);
- LibCallLocationInfo::LocResult Res = Loc.isLocation(CS, P, Size);
+ LibCallLocationInfo::LocResult Res = LocInfo.isLocation(CS, Loc);
if (Res == LibCallLocationInfo::No) continue;
// If we don't know if this pointer points to the location, then we have to
@@ -118,7 +118,7 @@ LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
//
AliasAnalysis::ModRefResult
LibCallAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Value *P, unsigned Size) {
+ const Location &Loc) {
ModRefResult MRInfo = ModRef;
// If this is a direct call to a function that LCI knows about, get the
@@ -126,12 +126,12 @@ LibCallAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
if (LCI) {
if (const Function *F = CS.getCalledFunction()) {
if (const LibCallFunctionInfo *FI = LCI->getFunctionInfo(F)) {
- MRInfo = ModRefResult(MRInfo & AnalyzeLibCallDetails(FI, CS, P, Size));
+ MRInfo = ModRefResult(MRInfo & AnalyzeLibCallDetails(FI, CS, Loc));
if (MRInfo == NoModRef) return NoModRef;
}
}
}
// The AliasAnalysis base class has some smarts, lets use them.
- return (ModRefResult)(MRInfo | AliasAnalysis::getModRefInfo(CS, P, Size));
+ return (ModRefResult)(MRInfo | AliasAnalysis::getModRefInfo(CS, Loc));
}
diff --git a/contrib/llvm/lib/Analysis/Lint.cpp b/contrib/llvm/lib/Analysis/Lint.cpp
index a9d9724..fc7edc0 100644
--- a/contrib/llvm/lib/Analysis/Lint.cpp
+++ b/contrib/llvm/lib/Analysis/Lint.cpp
@@ -70,7 +70,7 @@ namespace {
void visitCallSite(CallSite CS);
void visitMemoryReference(Instruction &I, Value *Ptr,
- unsigned Size, unsigned Align,
+ uint64_t Size, unsigned Align,
const Type *Ty, unsigned Flags);
void visitCallInst(CallInst &I);
@@ -108,7 +108,9 @@ namespace {
raw_string_ostream MessagesStr;
static char ID; // Pass identification, replacement for typeid
- Lint() : FunctionPass(ID), MessagesStr(Messages) {}
+ Lint() : FunctionPass(ID), MessagesStr(Messages) {
+ initializeLintPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
@@ -129,12 +131,6 @@ namespace {
}
}
- void WriteType(const Type *T) {
- if (!T) return;
- MessagesStr << ' ';
- WriteTypeSymbolic(MessagesStr, T, Mod);
- }
-
// CheckFailed - A check failed, so print out the condition and the message
// that failed. This provides a nice place to put a breakpoint if you want
// to see why something is not correct.
@@ -147,27 +143,16 @@ namespace {
WriteValue(V3);
WriteValue(V4);
}
-
- void CheckFailed(const Twine &Message, const Value *V1,
- const Type *T2, const Value *V3 = 0) {
- MessagesStr << Message.str() << "\n";
- WriteValue(V1);
- WriteType(T2);
- WriteValue(V3);
- }
-
- void CheckFailed(const Twine &Message, const Type *T1,
- const Type *T2 = 0, const Type *T3 = 0) {
- MessagesStr << Message.str() << "\n";
- WriteType(T1);
- WriteType(T2);
- WriteType(T3);
- }
};
}
char Lint::ID = 0;
-INITIALIZE_PASS(Lint, "lint", "Statically lint-checks LLVM IR", false, true);
+INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
+ false, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR",
+ false, true)
// Assert - We know that cond should be true, if not print an error message.
#define Assert(C, M) \
@@ -208,7 +193,8 @@ void Lint::visitCallSite(CallSite CS) {
Instruction &I = *CS.getInstruction();
Value *Callee = CS.getCalledValue();
- visitMemoryReference(I, Callee, ~0u, 0, 0, MemRef::Callee);
+ visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
+ 0, 0, MemRef::Callee);
if (Function *F = dyn_cast<Function>(findValue(Callee, /*OffsetOk=*/false))) {
Assert1(CS.getCallingConv() == F->getCallingConv(),
@@ -240,15 +226,17 @@ void Lint::visitCallSite(CallSite CS) {
"Undefined behavior: Call argument type mismatches "
"callee parameter type", &I);
- // Check that noalias arguments don't alias other arguments. The
- // AliasAnalysis API isn't expressive enough for what we really want
- // to do. Known partial overlap is not distinguished from the case
- // where nothing is known.
+ // Check that noalias arguments don't alias other arguments. This is
+ // not fully precise because we don't know the sizes of the dereferenced
+ // memory regions.
if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy())
- for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI) {
- Assert1(AI == BI || AA->alias(*AI, *BI) != AliasAnalysis::MustAlias,
- "Unusual: noalias argument aliases another argument", &I);
- }
+ for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI)
+ if (AI != BI && (*BI)->getType()->isPointerTy()) {
+ AliasAnalysis::AliasResult Result = AA->alias(*AI, *BI);
+ Assert1(Result != AliasAnalysis::MustAlias &&
+ Result != AliasAnalysis::PartialAlias,
+ "Unusual: noalias argument aliases another argument", &I);
+ }
// Check that an sret argument points to valid memory.
if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
@@ -281,15 +269,17 @@ void Lint::visitCallSite(CallSite CS) {
case Intrinsic::memcpy: {
MemCpyInst *MCI = cast<MemCpyInst>(&I);
// TODO: If the size is known, use it.
- visitMemoryReference(I, MCI->getDest(), ~0u, MCI->getAlignment(), 0,
+ visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize,
+ MCI->getAlignment(), 0,
MemRef::Write);
- visitMemoryReference(I, MCI->getSource(), ~0u, MCI->getAlignment(), 0,
+ visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize,
+ MCI->getAlignment(), 0,
MemRef::Read);
// Check that the memcpy arguments don't overlap. The AliasAnalysis API
// isn't expressive enough for what we really want to do. Known partial
// overlap is not distinguished from the case where nothing is known.
- unsigned Size = 0;
+ uint64_t Size = 0;
if (const ConstantInt *Len =
dyn_cast<ConstantInt>(findValue(MCI->getLength(),
/*OffsetOk=*/false)))
@@ -303,16 +293,19 @@ void Lint::visitCallSite(CallSite CS) {
case Intrinsic::memmove: {
MemMoveInst *MMI = cast<MemMoveInst>(&I);
// TODO: If the size is known, use it.
- visitMemoryReference(I, MMI->getDest(), ~0u, MMI->getAlignment(), 0,
+ visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize,
+ MMI->getAlignment(), 0,
MemRef::Write);
- visitMemoryReference(I, MMI->getSource(), ~0u, MMI->getAlignment(), 0,
+ visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize,
+ MMI->getAlignment(), 0,
MemRef::Read);
break;
}
case Intrinsic::memset: {
MemSetInst *MSI = cast<MemSetInst>(&I);
// TODO: If the size is known, use it.
- visitMemoryReference(I, MSI->getDest(), ~0u, MSI->getAlignment(), 0,
+ visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize,
+ MSI->getAlignment(), 0,
MemRef::Write);
break;
}
@@ -322,24 +315,26 @@ void Lint::visitCallSite(CallSite CS) {
"Undefined behavior: va_start called in a non-varargs function",
&I);
- visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
- MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
+ 0, 0, MemRef::Read | MemRef::Write);
break;
case Intrinsic::vacopy:
- visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0, MemRef::Write);
- visitMemoryReference(I, CS.getArgument(1), ~0u, 0, 0, MemRef::Read);
+ visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
+ 0, 0, MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize,
+ 0, 0, MemRef::Read);
break;
case Intrinsic::vaend:
- visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
- MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
+ 0, 0, MemRef::Read | MemRef::Write);
break;
case Intrinsic::stackrestore:
// Stackrestore doesn't read or write memory, but it sets the
// stack pointer, which the compiler may read from or write to
// at any time, so check it for both readability and writeability.
- visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
- MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
+ 0, 0, MemRef::Read | MemRef::Write);
break;
}
}
@@ -368,7 +363,7 @@ void Lint::visitReturnInst(ReturnInst &I) {
// TODO: Check that the reference is in bounds.
// TODO: Check readnone/readonly function attributes.
void Lint::visitMemoryReference(Instruction &I,
- Value *Ptr, unsigned Size, unsigned Align,
+ Value *Ptr, uint64_t Size, unsigned Align,
const Type *Ty, unsigned Flags) {
// If no memory is being referenced, it doesn't matter if the pointer
// is valid.
@@ -512,12 +507,13 @@ void Lint::visitAllocaInst(AllocaInst &I) {
}
void Lint::visitVAArgInst(VAArgInst &I) {
- visitMemoryReference(I, I.getOperand(0), ~0u, 0, 0,
+ visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, 0,
MemRef::Read | MemRef::Write);
}
void Lint::visitIndirectBrInst(IndirectBrInst &I) {
- visitMemoryReference(I, I.getAddress(), ~0u, 0, 0, MemRef::Branchee);
+ visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, 0,
+ MemRef::Branchee);
Assert1(I.getNumDestinations() != 0,
"Undefined behavior: indirectbr with no destinations", &I);
@@ -571,7 +567,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// TODO: Look through eliminable cast pairs.
// TODO: Look through calls with unique return values.
// TODO: Look through vector insert/extract/shuffle.
- V = OffsetOk ? V->getUnderlyingObject() : V->stripPointerCasts();
+ V = OffsetOk ? GetUnderlyingObject(V, TD) : V->stripPointerCasts();
if (LoadInst *L = dyn_cast<LoadInst>(V)) {
BasicBlock::iterator BBI = L;
BasicBlock *BB = L->getParent();
@@ -587,8 +583,9 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
BBI = BB->end();
}
} else if (PHINode *PN = dyn_cast<PHINode>(V)) {
- if (Value *W = PN->hasConstantValue(DT))
- return findValueImpl(W, OffsetOk, Visited);
+ if (Value *W = PN->hasConstantValue())
+ if (W != V)
+ return findValueImpl(W, OffsetOk, Visited);
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
Type::getInt64Ty(V->getContext())))
@@ -620,9 +617,8 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// As a last resort, try SimplifyInstruction or constant folding.
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
- if (Value *W = SimplifyInstruction(Inst, TD))
- if (W != Inst)
- return findValueImpl(W, OffsetOk, Visited);
+ if (Value *W = SimplifyInstruction(Inst, TD, DT))
+ return findValueImpl(W, OffsetOk, Visited);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (Value *W = ConstantFoldConstantExpression(CE, TD))
if (W != V)
diff --git a/contrib/llvm/lib/Analysis/LiveValues.cpp b/contrib/llvm/lib/Analysis/LiveValues.cpp
index 0225f4f..a0e6034 100644
--- a/contrib/llvm/lib/Analysis/LiveValues.cpp
+++ b/contrib/llvm/lib/Analysis/LiveValues.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LiveValues.h"
+#include "llvm/Instructions.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
using namespace llvm;
@@ -22,10 +23,16 @@ namespace llvm {
}
char LiveValues::ID = 0;
-INITIALIZE_PASS(LiveValues, "live-values",
- "Value Liveness Analysis", false, true);
-
-LiveValues::LiveValues() : FunctionPass(ID) {}
+INITIALIZE_PASS_BEGIN(LiveValues, "live-values",
+ "Value Liveness Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_END(LiveValues, "live-values",
+ "Value Liveness Analysis", false, true)
+
+LiveValues::LiveValues() : FunctionPass(ID) {
+ initializeLiveValuesPass(*PassRegistry::getPassRegistry());
+}
void LiveValues::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTree>();
diff --git a/contrib/llvm/lib/Analysis/Loads.cpp b/contrib/llvm/lib/Analysis/Loads.cpp
index 2ba1d86..2ea27fb 100644
--- a/contrib/llvm/lib/Analysis/Loads.cpp
+++ b/contrib/llvm/lib/Analysis/Loads.cpp
@@ -49,7 +49,7 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
/// getUnderlyingObjectWithOffset - Strip off up to MaxLookup GEPs and
/// bitcasts to get back to the underlying object being addressed, keeping
/// track of the offset in bytes from the GEPs relative to the result.
-/// This is closely related to Value::getUnderlyingObject but is located
+/// This is closely related to GetUnderlyingObject but is located
/// here to avoid making VMCore depend on TargetData.
static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
uint64_t &ByteOffset,
@@ -166,7 +166,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
if (MaxInstsToScan == 0) MaxInstsToScan = ~0U;
// If we're using alias analysis to disambiguate get the size of *Ptr.
- unsigned AccessSize = 0;
+ uint64_t AccessSize = 0;
if (AA) {
const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
AccessSize = AA->getTypeStoreSize(AccessTy);
diff --git a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
index 82c02dc..c1afe8f 100644
--- a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
@@ -27,6 +27,8 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Assembly/Writer.h"
#include "llvm/Instructions.h"
#include "llvm/Operator.h"
#include "llvm/Support/Allocator.h"
@@ -46,8 +48,12 @@ LoopPass *llvm::createLoopDependenceAnalysisPass() {
return new LoopDependenceAnalysis();
}
-INITIALIZE_PASS(LoopDependenceAnalysis, "lda",
- "Loop Dependence Analysis", false, true);
+INITIALIZE_PASS_BEGIN(LoopDependenceAnalysis, "lda",
+ "Loop Dependence Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(LoopDependenceAnalysis, "lda",
+ "Loop Dependence Analysis", false, true)
char LoopDependenceAnalysis::ID = 0;
//===----------------------------------------------------------------------===//
@@ -86,8 +92,8 @@ static Value *GetPointerOperand(Value *I) {
static AliasAnalysis::AliasResult UnderlyingObjectsAlias(AliasAnalysis *AA,
const Value *A,
const Value *B) {
- const Value *aObj = A->getUnderlyingObject();
- const Value *bObj = B->getUnderlyingObject();
+ const Value *aObj = GetUnderlyingObject(A);
+ const Value *bObj = GetUnderlyingObject(B);
return AA->alias(aObj, AA->getTypeStoreSize(aObj->getType()),
bObj, AA->getTypeStoreSize(bObj->getType()));
}
@@ -128,7 +134,7 @@ void LoopDependenceAnalysis::getLoops(const SCEV *S,
DenseSet<const Loop*>* Loops) const {
// Refactor this into an SCEVVisitor, if efficiency becomes a concern.
for (const Loop *L = this->L; L != 0; L = L->getParentLoop())
- if (!S->isLoopInvariant(L))
+ if (!SE->isLoopInvariant(S, L))
Loops->insert(L);
}
@@ -217,6 +223,7 @@ LoopDependenceAnalysis::analysePair(DependencePair *P) const {
switch (UnderlyingObjectsAlias(AA, aPtr, bPtr)) {
case AliasAnalysis::MayAlias:
+ case AliasAnalysis::PartialAlias:
// We can not analyse objects if we do not know about their aliasing.
DEBUG(dbgs() << "---> [?] may alias\n");
return Unknown;
diff --git a/contrib/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm/lib/Analysis/LoopInfo.cpp
index 46219d1..0583140 100644
--- a/contrib/llvm/lib/Analysis/LoopInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LoopInfo.cpp
@@ -38,7 +38,9 @@ VerifyLoopInfoX("verify-loop-info", cl::location(VerifyLoopInfo),
cl::desc("Verify loop info (time consuming)"));
char LoopInfo::ID = 0;
-INITIALIZE_PASS(LoopInfo, "loops", "Natural Loop Information", true, true);
+INITIALIZE_PASS_BEGIN(LoopInfo, "loops", "Natural Loop Information", true, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(LoopInfo, "loops", "Natural Loop Information", true, true)
//===----------------------------------------------------------------------===//
// Loop implementation
@@ -48,15 +50,18 @@ INITIALIZE_PASS(LoopInfo, "loops", "Natural Loop Information", true, true);
///
bool Loop::isLoopInvariant(Value *V) const {
if (Instruction *I = dyn_cast<Instruction>(V))
- return isLoopInvariant(I);
+ return !contains(I);
return true; // All non-instructions are loop invariant
}
-/// isLoopInvariant - Return true if the specified instruction is
-/// loop-invariant.
-///
-bool Loop::isLoopInvariant(Instruction *I) const {
- return !contains(I);
+/// hasLoopInvariantOperands - Return true if all the operands of the
+/// specified instruction are loop invariant.
+bool Loop::hasLoopInvariantOperands(Instruction *I) const {
+ for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
+ if (!isLoopInvariant(I->getOperand(i)))
+ return false;
+
+ return true;
}
/// makeLoopInvariant - If the given value is an instruciton inside of the
@@ -105,6 +110,7 @@ bool Loop::makeLoopInvariant(Instruction *I, bool &Changed,
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (!makeLoopInvariant(I->getOperand(i), Changed, InsertPt))
return false;
+
// Hoist.
I->moveBefore(InsertPt);
Changed = true;
@@ -192,7 +198,7 @@ Value *Loop::getTripCount() const {
/// getSmallConstantTripCount - Returns the trip count of this loop as a
/// normal unsigned value, if possible. Returns 0 if the trip count is unknown
-/// of not constant. Will also return 0 if the trip count is very large
+/// or not constant. Will also return 0 if the trip count is very large
/// (>= 2^32)
unsigned Loop::getSmallConstantTripCount() const {
Value* TripCount = this->getTripCount();
diff --git a/contrib/llvm/lib/Analysis/LoopPass.cpp b/contrib/llvm/lib/Analysis/LoopPass.cpp
index 15d4db8..8e1a7bf 100644
--- a/contrib/llvm/lib/Analysis/LoopPass.cpp
+++ b/contrib/llvm/lib/Analysis/LoopPass.cpp
@@ -30,7 +30,6 @@ private:
public:
static char ID;
- PrintLoopPass() : LoopPass(ID), Out(dbgs()) {}
PrintLoopPass(const std::string &B, raw_ostream &o)
: LoopPass(ID), Banner(B), Out(o) {}
diff --git a/contrib/llvm/lib/Analysis/MemDepPrinter.cpp b/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
new file mode 100644
index 0000000..64d215c
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
@@ -0,0 +1,167 @@
+//===- MemDepPrinter.cpp - Printer for MemoryDependenceAnalysis -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/InstIterator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/SetVector.h"
+using namespace llvm;
+
+namespace {
+ struct MemDepPrinter : public FunctionPass {
+ const Function *F;
+
+ typedef PointerIntPair<const Instruction *, 1> InstAndClobberFlag;
+ typedef std::pair<InstAndClobberFlag, const BasicBlock *> Dep;
+ typedef SmallSetVector<Dep, 4> DepSet;
+ typedef DenseMap<const Instruction *, DepSet> DepSetMap;
+ DepSetMap Deps;
+
+ static char ID; // Pass identifcation, replacement for typeid
+ MemDepPrinter() : FunctionPass(ID) {
+ initializeMemDepPrinterPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual bool runOnFunction(Function &F);
+
+ void print(raw_ostream &OS, const Module * = 0) const;
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequiredTransitive<AliasAnalysis>();
+ AU.addRequiredTransitive<MemoryDependenceAnalysis>();
+ AU.setPreservesAll();
+ }
+
+ virtual void releaseMemory() {
+ Deps.clear();
+ F = 0;
+ }
+ };
+}
+
+char MemDepPrinter::ID = 0;
+INITIALIZE_PASS_BEGIN(MemDepPrinter, "print-memdeps",
+ "Print MemDeps of function", false, true)
+INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
+INITIALIZE_PASS_END(MemDepPrinter, "print-memdeps",
+ "Print MemDeps of function", false, true)
+
+FunctionPass *llvm::createMemDepPrinter() {
+ return new MemDepPrinter();
+}
+
+bool MemDepPrinter::runOnFunction(Function &F) {
+ this->F = &F;
+ AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
+ MemoryDependenceAnalysis &MDA = getAnalysis<MemoryDependenceAnalysis>();
+
+ // All this code uses non-const interfaces because MemDep is not
+ // const-friendly, though nothing is actually modified.
+ for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
+ Instruction *Inst = &*I;
+
+ if (!Inst->mayReadFromMemory() && !Inst->mayWriteToMemory())
+ continue;
+
+ MemDepResult Res = MDA.getDependency(Inst);
+ if (!Res.isNonLocal()) {
+ assert(Res.isClobber() != Res.isDef() &&
+ "Local dep should be def or clobber!");
+ Deps[Inst].insert(std::make_pair(InstAndClobberFlag(Res.getInst(),
+ Res.isClobber()),
+ static_cast<BasicBlock *>(0)));
+ } else if (CallSite CS = cast<Value>(Inst)) {
+ const MemoryDependenceAnalysis::NonLocalDepInfo &NLDI =
+ MDA.getNonLocalCallDependency(CS);
+
+ DepSet &InstDeps = Deps[Inst];
+ for (MemoryDependenceAnalysis::NonLocalDepInfo::const_iterator
+ I = NLDI.begin(), E = NLDI.end(); I != E; ++I) {
+ const MemDepResult &Res = I->getResult();
+ assert(Res.isClobber() != Res.isDef() &&
+ "Resolved non-local call dep should be def or clobber!");
+ InstDeps.insert(std::make_pair(InstAndClobberFlag(Res.getInst(),
+ Res.isClobber()),
+ I->getBB()));
+ }
+ } else {
+ SmallVector<NonLocalDepResult, 4> NLDI;
+ if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ // FIXME: Volatile is not handled properly here.
+ AliasAnalysis::Location Loc = AA.getLocation(LI);
+ MDA.getNonLocalPointerDependency(Loc, !LI->isVolatile(),
+ LI->getParent(), NLDI);
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ // FIXME: Volatile is not handled properly here.
+ AliasAnalysis::Location Loc = AA.getLocation(SI);
+ MDA.getNonLocalPointerDependency(Loc, false, SI->getParent(), NLDI);
+ } else if (VAArgInst *VI = dyn_cast<VAArgInst>(Inst)) {
+ AliasAnalysis::Location Loc = AA.getLocation(VI);
+ MDA.getNonLocalPointerDependency(Loc, false, VI->getParent(), NLDI);
+ } else {
+ llvm_unreachable("Unknown memory instruction!");
+ }
+
+ DepSet &InstDeps = Deps[Inst];
+ for (SmallVectorImpl<NonLocalDepResult>::const_iterator
+ I = NLDI.begin(), E = NLDI.end(); I != E; ++I) {
+ const MemDepResult &Res = I->getResult();
+ assert(Res.isClobber() != Res.isDef() &&
+ "Resolved non-local pointer dep should be def or clobber!");
+ InstDeps.insert(std::make_pair(InstAndClobberFlag(Res.getInst(),
+ Res.isClobber()),
+ I->getBB()));
+ }
+ }
+ }
+
+ return false;
+}
+
+void MemDepPrinter::print(raw_ostream &OS, const Module *M) const {
+ for (const_inst_iterator I = inst_begin(*F), E = inst_end(*F); I != E; ++I) {
+ const Instruction *Inst = &*I;
+
+ DepSetMap::const_iterator DI = Deps.find(Inst);
+ if (DI == Deps.end())
+ continue;
+
+ const DepSet &InstDeps = DI->second;
+
+ for (DepSet::const_iterator I = InstDeps.begin(), E = InstDeps.end();
+ I != E; ++I) {
+ const Instruction *DepInst = I->first.getPointer();
+ bool isClobber = I->first.getInt();
+ const BasicBlock *DepBB = I->second;
+
+ OS << " " << (isClobber ? "Clobber" : " Def");
+ if (DepBB) {
+ OS << " in block ";
+ WriteAsOperand(OS, DepBB, /*PrintType=*/false, M);
+ }
+ OS << " from: ";
+ if (DepInst == Inst)
+ OS << "<unspecified>";
+ else
+ DepInst->print(OS);
+ OS << "\n";
+ }
+
+ Inst->print(OS);
+ OS << "\n\n";
+ }
+}
diff --git a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index d18d5ce..35043bd 100644
--- a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -19,15 +19,18 @@
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Function.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/PHITransAddr.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/PredIteratorCache.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetData.h"
using namespace llvm;
STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
@@ -46,11 +49,15 @@ STATISTIC(NumCacheCompleteNonLocalPtr,
char MemoryDependenceAnalysis::ID = 0;
// Register this pass...
-INITIALIZE_PASS(MemoryDependenceAnalysis, "memdep",
- "Memory Dependence Analysis", false, true);
+INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
+ "Memory Dependence Analysis", false, true)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
+ "Memory Dependence Analysis", false, true)
MemoryDependenceAnalysis::MemoryDependenceAnalysis()
: FunctionPass(ID), PredCache(0) {
+ initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
}
MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
}
@@ -77,6 +84,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
+ TD = getAnalysisIfAvailable<TargetData>();
if (PredCache == 0)
PredCache.reset(new PredIteratorCache());
return false;
@@ -92,11 +100,79 @@ static void RemoveFromReverseMap(DenseMap<Instruction*,
InstIt = ReverseMap.find(Inst);
assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
bool Found = InstIt->second.erase(Val);
- assert(Found && "Invalid reverse map!"); Found=Found;
+ assert(Found && "Invalid reverse map!"); (void)Found;
if (InstIt->second.empty())
ReverseMap.erase(InstIt);
}
+/// GetLocation - If the given instruction references a specific memory
+/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
+/// Return a ModRefInfo value describing the general behavior of the
+/// instruction.
+static
+AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
+ AliasAnalysis::Location &Loc,
+ AliasAnalysis *AA) {
+ if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ if (LI->isVolatile()) {
+ Loc = AliasAnalysis::Location();
+ return AliasAnalysis::ModRef;
+ }
+ Loc = AA->getLocation(LI);
+ return AliasAnalysis::Ref;
+ }
+
+ if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ if (SI->isVolatile()) {
+ Loc = AliasAnalysis::Location();
+ return AliasAnalysis::ModRef;
+ }
+ Loc = AA->getLocation(SI);
+ return AliasAnalysis::Mod;
+ }
+
+ if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
+ Loc = AA->getLocation(V);
+ return AliasAnalysis::ModRef;
+ }
+
+ if (const CallInst *CI = isFreeCall(Inst)) {
+ // calls to free() deallocate the entire structure
+ Loc = AliasAnalysis::Location(CI->getArgOperand(0));
+ return AliasAnalysis::Mod;
+ }
+
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start:
+ Loc = AliasAnalysis::Location(II->getArgOperand(1),
+ cast<ConstantInt>(II->getArgOperand(0))
+ ->getZExtValue(),
+ II->getMetadata(LLVMContext::MD_tbaa));
+ // These intrinsics don't really modify the memory, but returning Mod
+ // will allow them to be handled conservatively.
+ return AliasAnalysis::Mod;
+ case Intrinsic::invariant_end:
+ Loc = AliasAnalysis::Location(II->getArgOperand(2),
+ cast<ConstantInt>(II->getArgOperand(1))
+ ->getZExtValue(),
+ II->getMetadata(LLVMContext::MD_tbaa));
+ // These intrinsics don't really modify the memory, but returning Mod
+ // will allow them to be handled conservatively.
+ return AliasAnalysis::Mod;
+ default:
+ break;
+ }
+
+ // Otherwise, just do the coarse-grained thing that always works.
+ if (Inst->mayWriteToMemory())
+ return AliasAnalysis::ModRef;
+ if (Inst->mayReadFromMemory())
+ return AliasAnalysis::Ref;
+ return AliasAnalysis::NoModRef;
+}
/// getCallSiteDependencyFrom - Private helper for finding the local
/// dependencies of a call site.
@@ -108,19 +184,16 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
Instruction *Inst = --ScanIt;
// If this inst is a memory op, get the pointer it accessed
- Value *Pointer = 0;
- uint64_t PointerSize = 0;
- if (StoreInst *S = dyn_cast<StoreInst>(Inst)) {
- Pointer = S->getPointerOperand();
- PointerSize = AA->getTypeStoreSize(S->getOperand(0)->getType());
- } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
- Pointer = V->getOperand(0);
- PointerSize = AA->getTypeStoreSize(V->getType());
- } else if (const CallInst *CI = isFreeCall(Inst)) {
- Pointer = CI->getArgOperand(0);
- // calls to free() erase the entire structure
- PointerSize = ~0ULL;
- } else if (CallSite InstCS = cast<Value>(Inst)) {
+ AliasAnalysis::Location Loc;
+ AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
+ if (Loc.Ptr) {
+ // A simple instruction.
+ if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
+ return MemDepResult::getClobber(Inst);
+ continue;
+ }
+
+ if (CallSite InstCS = cast<Value>(Inst)) {
// Debug intrinsics don't cause dependences.
if (isa<DbgInfoIntrinsic>(Inst)) continue;
// If these two calls do not interfere, look past it.
@@ -128,23 +201,17 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
case AliasAnalysis::NoModRef:
// If the two calls are the same, return InstCS as a Def, so that
// CS can be found redundant and eliminated.
- if (isReadOnlyCall && InstCS.onlyReadsMemory() &&
+ if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
CS.getInstruction()->isIdenticalToWhenDefined(Inst))
return MemDepResult::getDef(Inst);
// Otherwise if the two calls don't interact (e.g. InstCS is readnone)
// keep scanning.
- continue;
+ break;
default:
return MemDepResult::getClobber(Inst);
}
- } else {
- // Non-memory instruction.
- continue;
}
-
- if (AA->getModRefInfo(CS, Pointer, PointerSize) != AliasAnalysis::NoModRef)
- return MemDepResult::getClobber(Inst);
}
// No dependence found. If this is the entry block of the function, it is a
@@ -155,10 +222,11 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
}
/// getPointerDependencyFrom - Return the instruction on which a memory
-/// location depends. If isLoad is true, this routine ignore may-aliases with
-/// read-only operations.
+/// location depends. If isLoad is true, this routine ignores may-aliases with
+/// read-only operations. If isLoad is false, this routine ignores may-aliases
+/// with reads from read-only locations.
MemDepResult MemoryDependenceAnalysis::
-getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
+getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
BasicBlock::iterator ScanIt, BasicBlock *BB) {
Value *InvariantTag = 0;
@@ -175,8 +243,8 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
}
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
- // Debug intrinsics don't cause dependences.
- if (isa<DbgInfoIntrinsic>(Inst)) continue;
+ // Debug intrinsics don't (and can't) cause dependences.
+ if (isa<DbgInfoIntrinsic>(II)) continue;
// If we pass an invariant-end marker, then we've just entered an
// invariant region and can start ignoring dependencies.
@@ -184,43 +252,53 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// FIXME: This only considers queries directly on the invariant-tagged
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point.
- AliasAnalysis::AliasResult R = AA->alias(II->getArgOperand(2), MemPtr);
- if (R == AliasAnalysis::MustAlias) {
+ AliasAnalysis::AliasResult R =
+ AA->alias(AliasAnalysis::Location(II->getArgOperand(2)), MemLoc);
+ if (R == AliasAnalysis::MustAlias)
InvariantTag = II->getArgOperand(0);
- continue;
- }
-
+
+ continue;
+ }
+
// If we reach a lifetime begin or end marker, then the query ends here
// because the value is undefined.
- } else if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
// FIXME: This only considers queries directly on the invariant-tagged
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point.
- AliasAnalysis::AliasResult R = AA->alias(II->getArgOperand(1), MemPtr);
+ AliasAnalysis::AliasResult R =
+ AA->alias(AliasAnalysis::Location(II->getArgOperand(1)), MemLoc);
if (R == AliasAnalysis::MustAlias)
return MemDepResult::getDef(II);
+ continue;
}
}
// If we're querying on a load and we're in an invariant region, we're done
// at this point. Nothing a load depends on can live in an invariant region.
+ //
+ // FIXME: this will prevent us from returning load/load must-aliases, so GVN
+ // won't remove redundant loads.
if (isLoad && InvariantTag) continue;
// Values depend on loads if the pointers are must aliased. This means that
// a load depends on another must aliased load from the same value.
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
- Value *Pointer = LI->getPointerOperand();
- uint64_t PointerSize = AA->getTypeStoreSize(LI->getType());
+ AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
// If we found a pointer, check if it could be the same as our pointer.
- AliasAnalysis::AliasResult R =
- AA->alias(Pointer, PointerSize, MemPtr, MemSize);
+ AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
if (R == AliasAnalysis::NoAlias)
continue;
// May-alias loads don't depend on each other without a dependence.
- if (isLoad && R == AliasAnalysis::MayAlias)
+ if (isLoad && R != AliasAnalysis::MustAlias)
continue;
+
+ // Stores don't alias loads from read-only memory.
+ if (!isLoad && AA->pointsToConstantMemory(LoadLoc))
+ continue;
+
// Stores depend on may and must aliased loads, loads depend on must-alias
// loads.
return MemDepResult::getDef(Inst);
@@ -234,23 +312,21 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// If alias analysis can tell that this store is guaranteed to not modify
// the query pointer, ignore it. Use getModRefInfo to handle cases where
// the query pointer points to constant memory etc.
- if (AA->getModRefInfo(SI, MemPtr, MemSize) == AliasAnalysis::NoModRef)
+ if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
continue;
// Ok, this store might clobber the query pointer. Check to see if it is
// a must alias: in this case, we want to return this as a def.
- Value *Pointer = SI->getPointerOperand();
- uint64_t PointerSize = AA->getTypeStoreSize(SI->getOperand(0)->getType());
+ AliasAnalysis::Location StoreLoc = AA->getLocation(SI);
// If we found a pointer, check if it could be the same as our pointer.
- AliasAnalysis::AliasResult R =
- AA->alias(Pointer, PointerSize, MemPtr, MemSize);
+ AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
if (R == AliasAnalysis::NoAlias)
continue;
- if (R == AliasAnalysis::MayAlias)
- return MemDepResult::getClobber(Inst);
- return MemDepResult::getDef(Inst);
+ if (R == AliasAnalysis::MustAlias)
+ return MemDepResult::getDef(Inst);
+ return MemDepResult::getClobber(Inst);
}
// If this is an allocation, and if we know that the accessed pointer is to
@@ -263,7 +339,7 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// need to continue scanning until the malloc call.
if (isa<AllocaInst>(Inst) ||
(isa<CallInst>(Inst) && extractMallocCall(Inst))) {
- Value *AccessPtr = MemPtr->getUnderlyingObject();
+ const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
if (AccessPtr == Inst ||
AA->alias(Inst, 1, AccessPtr, 1) == AliasAnalysis::MustAlias)
@@ -272,7 +348,7 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
}
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
- switch (AA->getModRefInfo(Inst, MemPtr, MemSize)) {
+ switch (AA->getModRefInfo(Inst, MemLoc)) {
case AliasAnalysis::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;
@@ -322,9 +398,6 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
BasicBlock *QueryParent = QueryInst->getParent();
- Value *MemPtr = 0;
- uint64_t MemSize = 0;
-
// Do the scan.
if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
// No dependence found. If this is the entry block of the function, it is a
@@ -333,65 +406,25 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
LocalCache = MemDepResult::getNonLocal();
else
LocalCache = MemDepResult::getClobber(QueryInst);
- } else if (StoreInst *SI = dyn_cast<StoreInst>(QueryInst)) {
- // If this is a volatile store, don't mess around with it. Just return the
- // previous instruction as a clobber.
- if (SI->isVolatile())
- LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
- else {
- MemPtr = SI->getPointerOperand();
- MemSize = AA->getTypeStoreSize(SI->getOperand(0)->getType());
- }
- } else if (LoadInst *LI = dyn_cast<LoadInst>(QueryInst)) {
- // If this is a volatile load, don't mess around with it. Just return the
- // previous instruction as a clobber.
- if (LI->isVolatile())
- LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
- else {
- MemPtr = LI->getPointerOperand();
- MemSize = AA->getTypeStoreSize(LI->getType());
- }
- } else if (const CallInst *CI = isFreeCall(QueryInst)) {
- MemPtr = CI->getArgOperand(0);
- // calls to free() erase the entire structure, not just a field.
- MemSize = ~0UL;
- } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
- int IntrinsicID = 0; // Intrinsic IDs start at 1.
- IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst);
- if (II)
- IntrinsicID = II->getIntrinsicID();
-
- switch (IntrinsicID) {
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start:
- MemPtr = II->getArgOperand(1);
- MemSize = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- break;
- case Intrinsic::invariant_end:
- MemPtr = II->getArgOperand(2);
- MemSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
- break;
- default:
+ } else {
+ AliasAnalysis::Location MemLoc;
+ AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
+ if (MemLoc.Ptr) {
+ // If we can do a pointer scan, make it happen.
+ bool isLoad = !(MR & AliasAnalysis::Mod);
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
+ isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_end;
+
+ LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
+ QueryParent);
+ } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
CallSite QueryCS(QueryInst);
bool isReadOnly = AA->onlyReadsMemory(QueryCS);
LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
QueryParent);
- break;
- }
- } else {
- // Non-memory instruction.
- LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
- }
-
- // If we need to do a pointer scan, make it happen.
- if (MemPtr) {
- bool isLoad = !QueryInst->mayWriteToMemory();
- if (IntrinsicInst *II = dyn_cast<MemoryUseIntrinsic>(QueryInst)) {
- isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_end;
- }
- LocalCache = getPointerDependencyFrom(MemPtr, MemSize, isLoad, ScanPos,
- QueryParent);
+ } else
+ // Non-memory instruction.
+ LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
}
// Remember the result!
@@ -565,31 +598,27 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
/// own block.
///
void MemoryDependenceAnalysis::
-getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB,
+getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
+ BasicBlock *FromBB,
SmallVectorImpl<NonLocalDepResult> &Result) {
- assert(Pointer->getType()->isPointerTy() &&
+ assert(Loc.Ptr->getType()->isPointerTy() &&
"Can't get pointer deps of a non-pointer!");
Result.clear();
- // We know that the pointer value is live into FromBB find the def/clobbers
- // from presecessors.
- const Type *EltTy = cast<PointerType>(Pointer->getType())->getElementType();
- uint64_t PointeeSize = AA->getTypeStoreSize(EltTy);
-
- PHITransAddr Address(Pointer, TD);
+ PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
// This is the set of blocks we've inspected, and the pointer we consider in
// each block. Because of critical edges, we currently bail out if querying
// a block with multiple different pointers. This can happen during PHI
// translation.
DenseMap<BasicBlock*, Value*> Visited;
- if (!getNonLocalPointerDepFromBB(Address, PointeeSize, isLoad, FromBB,
+ if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
Result, Visited, true))
return;
Result.clear();
Result.push_back(NonLocalDepResult(FromBB,
MemDepResult::getClobber(FromBB->begin()),
- Pointer));
+ const_cast<Value *>(Loc.Ptr)));
}
/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
@@ -597,7 +626,7 @@ getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB,
/// lookup (which may use dirty cache info if available). If we do a lookup,
/// add the result to the cache.
MemDepResult MemoryDependenceAnalysis::
-GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize,
+GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
bool isLoad, BasicBlock *BB,
NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
@@ -631,15 +660,14 @@ GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize,
ScanPos = ExistingResult->getResult().getInst();
// Eliminating the dirty entry from 'Cache', so update the reverse info.
- ValueIsLoadPair CacheKey(Pointer, isLoad);
+ ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
} else {
++NumUncacheNonLocalPtr;
}
// Scan the block for the dependency.
- MemDepResult Dep = getPointerDependencyFrom(Pointer, PointeeSize, isLoad,
- ScanPos, BB);
+ MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
// If we had a dirty entry for the block, update it. Otherwise, just add
// a new entry.
@@ -658,7 +686,7 @@ GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize,
// update MemDep when we remove instructions.
Instruction *Inst = Dep.getInst();
assert(Inst && "Didn't depend on anything?");
- ValueIsLoadPair CacheKey(Pointer, isLoad);
+ ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
return Dep;
}
@@ -712,7 +740,8 @@ SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
/// not compute dependence information for some reason. This should be treated
/// as a clobber dependence on the first instruction in the predecessor block.
bool MemoryDependenceAnalysis::
-getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize,
+getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
+ const AliasAnalysis::Location &Loc,
bool isLoad, BasicBlock *StartBB,
SmallVectorImpl<NonLocalDepResult> &Result,
DenseMap<BasicBlock*, Value*> &Visited,
@@ -720,14 +749,68 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize,
// Look up the cached info for Pointer.
ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
-
- std::pair<BBSkipFirstBlockPair, NonLocalDepInfo> *CacheInfo =
- &NonLocalPointerDeps[CacheKey];
- NonLocalDepInfo *Cache = &CacheInfo->second;
+
+ // Set up a temporary NLPI value. If the map doesn't yet have an entry for
+ // CacheKey, this value will be inserted as the associated value. Otherwise,
+ // it'll be ignored, and we'll have to check to see if the cached size and
+ // tbaa tag are consistent with the current query.
+ NonLocalPointerInfo InitialNLPI;
+ InitialNLPI.Size = Loc.Size;
+ InitialNLPI.TBAATag = Loc.TBAATag;
+
+ // Get the NLPI for CacheKey, inserting one into the map if it doesn't
+ // already have one.
+ std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
+ NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
+ NonLocalPointerInfo *CacheInfo = &Pair.first->second;
+
+ // If we already have a cache entry for this CacheKey, we may need to do some
+ // work to reconcile the cache entry and the current query.
+ if (!Pair.second) {
+ if (CacheInfo->Size < Loc.Size) {
+ // The query's Size is greater than the cached one. Throw out the
+ // cached data and procede with the query at the greater size.
+ CacheInfo->Pair = BBSkipFirstBlockPair();
+ CacheInfo->Size = Loc.Size;
+ for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
+ DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
+ if (Instruction *Inst = DI->getResult().getInst())
+ RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
+ CacheInfo->NonLocalDeps.clear();
+ } else if (CacheInfo->Size > Loc.Size) {
+ // This query's Size is less than the cached one. Conservatively restart
+ // the query using the greater size.
+ return getNonLocalPointerDepFromBB(Pointer,
+ Loc.getWithNewSize(CacheInfo->Size),
+ isLoad, StartBB, Result, Visited,
+ SkipFirstBlock);
+ }
+
+ // If the query's TBAATag is inconsistent with the cached one,
+ // conservatively throw out the cached data and restart the query with
+ // no tag if needed.
+ if (CacheInfo->TBAATag != Loc.TBAATag) {
+ if (CacheInfo->TBAATag) {
+ CacheInfo->Pair = BBSkipFirstBlockPair();
+ CacheInfo->TBAATag = 0;
+ for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
+ DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
+ if (Instruction *Inst = DI->getResult().getInst())
+ RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
+ CacheInfo->NonLocalDeps.clear();
+ }
+ if (Loc.TBAATag)
+ return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutTBAATag(),
+ isLoad, StartBB, Result, Visited,
+ SkipFirstBlock);
+ }
+ }
+
+ NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
// If we have valid cached information for exactly the block we are
// investigating, just return it with no recomputation.
- if (CacheInfo->first == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
+ if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
// We have a fully cached result for this query then we can just return the
// cached results and populate the visited set. However, we have to verify
// that we don't already have conflicting results for these blocks. Check
@@ -763,9 +846,9 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize,
// than its valid cache info. If empty, the result will be valid cache info,
// otherwise it isn't.
if (Cache->empty())
- CacheInfo->first = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
+ CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
else
- CacheInfo->first = BBSkipFirstBlockPair();
+ CacheInfo->Pair = BBSkipFirstBlockPair();
SmallVector<BasicBlock*, 32> Worklist;
Worklist.push_back(StartBB);
@@ -790,8 +873,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize,
// Get the dependency info for Pointer in BB. If we have cached
// information, we will use it, otherwise we compute it.
DEBUG(AssertSorted(*Cache, NumSortedEntries));
- MemDepResult Dep = GetNonLocalInfoForBlock(Pointer.getAddr(), PointeeSize,
- isLoad, BB, Cache,
+ MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
NumSortedEntries);
// If we got a Def or Clobber, add this to the list of results.
@@ -888,7 +970,8 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize,
// queries. Mark this in NonLocalPointerDeps by setting the
// BBSkipFirstBlockPair pointer to null. This requires reuse of the
// cached value to do more work but not miss the phi trans failure.
- NonLocalPointerDeps[CacheKey].first = BBSkipFirstBlockPair();
+ NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
+ NLPI.Pair = BBSkipFirstBlockPair();
continue;
}
@@ -899,21 +982,23 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize,
// If we have a problem phi translating, fall through to the code below
// to handle the failure condition.
- if (getNonLocalPointerDepFromBB(PredPointer, PointeeSize, isLoad, Pred,
+ if (getNonLocalPointerDepFromBB(PredPointer,
+ Loc.getWithNewPtr(PredPointer.getAddr()),
+ isLoad, Pred,
Result, Visited))
goto PredTranslationFailure;
}
// Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
CacheInfo = &NonLocalPointerDeps[CacheKey];
- Cache = &CacheInfo->second;
+ Cache = &CacheInfo->NonLocalDeps;
NumSortedEntries = Cache->size();
// Since we did phi translation, the "Cache" set won't contain all of the
// results for the query. This is ok (we can still use it to accelerate
// specific block queries) but we can't do the fastpath "return all
// results from the set" Clear out the indicator for this.
- CacheInfo->first = BBSkipFirstBlockPair();
+ CacheInfo->Pair = BBSkipFirstBlockPair();
SkipFirstBlock = false;
continue;
@@ -922,7 +1007,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize,
if (Cache == 0) {
// Refresh the CacheInfo/Cache pointer if it got invalidated.
CacheInfo = &NonLocalPointerDeps[CacheKey];
- Cache = &CacheInfo->second;
+ Cache = &CacheInfo->NonLocalDeps;
NumSortedEntries = Cache->size();
}
@@ -930,7 +1015,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize,
// results for the query. This is ok (we can still use it to accelerate
// specific block queries) but we can't do the fastpath "return all
// results from the set". Clear out the indicator for this.
- CacheInfo->first = BBSkipFirstBlockPair();
+ CacheInfo->Pair = BBSkipFirstBlockPair();
// If *nothing* works, mark the pointer as being clobbered by the first
// instruction in this block.
@@ -972,7 +1057,7 @@ RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
// Remove all of the entries in the BB->val map. This involves removing
// instructions from the reverse map.
- NonLocalDepInfo &PInfo = It->second.second;
+ NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Instruction *Target = PInfo[i].getResult().getInst();
@@ -1143,10 +1228,10 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
assert(P.getPointer() != RemInst &&
"Already removed NonLocalPointerDeps info for RemInst");
- NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].second;
+ NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
// The cache is not valid for any specific block anymore.
- NonLocalPointerDeps[P].first = BBSkipFirstBlockPair();
+ NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
// Update any entries for RemInst to use the instruction after it.
for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
@@ -1192,7 +1277,7 @@ void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
E = NonLocalPointerDeps.end(); I != E; ++I) {
assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
- const NonLocalDepInfo &Val = I->second.second;
+ const NonLocalDepInfo &Val = I->second.NonLocalDeps;
for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
II != E; ++II)
assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
diff --git a/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp b/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
index 2cc1c2a..e7e999c 100644
--- a/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
@@ -30,7 +30,9 @@ namespace {
DebugInfoFinder Finder;
public:
static char ID; // Pass identification, replacement for typeid
- ModuleDebugInfoPrinter() : ModulePass(ID) {}
+ ModuleDebugInfoPrinter() : ModulePass(ID) {
+ initializeModuleDebugInfoPrinterPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnModule(Module &M);
@@ -43,7 +45,7 @@ namespace {
char ModuleDebugInfoPrinter::ID = 0;
INITIALIZE_PASS(ModuleDebugInfoPrinter, "module-debuginfo",
- "Decodes module-level debug info", false, true);
+ "Decodes module-level debug info", false, true)
ModulePass *llvm::createModuleDebugInfoPrinterPass() {
return new ModuleDebugInfoPrinter();
diff --git a/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
new file mode 100644
index 0000000..101c2d5
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/NoAliasAnalysis.cpp
@@ -0,0 +1,88 @@
+//===- NoAliasAnalysis.cpp - Minimal Alias Analysis Impl ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the default implementation of the Alias Analysis interface
+// that simply returns "I don't know" for all queries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Pass.h"
+#include "llvm/Target/TargetData.h"
+using namespace llvm;
+
+namespace {
+ /// NoAA - This class implements the -no-aa pass, which always returns "I
+ /// don't know" for alias queries. NoAA is unlike other alias analysis
+ /// implementations, in that it does not chain to a previous analysis. As
+ /// such it doesn't follow many of the rules that other alias analyses must.
+ ///
+ struct NoAA : public ImmutablePass, public AliasAnalysis {
+ static char ID; // Class identification, replacement for typeinfo
+ NoAA() : ImmutablePass(ID) {
+ initializeNoAAPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ }
+
+ virtual void initializePass() {
+ // Note: NoAA does not call InitializeAliasAnalysis because it's
+ // special and does not support chaining.
+ TD = getAnalysisIfAvailable<TargetData>();
+ }
+
+ virtual AliasResult alias(const Location &LocA, const Location &LocB) {
+ return MayAlias;
+ }
+
+ virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ return UnknownModRefBehavior;
+ }
+ virtual ModRefBehavior getModRefBehavior(const Function *F) {
+ return UnknownModRefBehavior;
+ }
+
+ virtual bool pointsToConstantMemory(const Location &Loc,
+ bool OrLocal) {
+ return false;
+ }
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Location &Loc) {
+ return ModRef;
+ }
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
+ return ModRef;
+ }
+
+ virtual void deleteValue(Value *V) {}
+ virtual void copyValue(Value *From, Value *To) {}
+ virtual void addEscapingUse(Use &U) {}
+
+ /// getAdjustedAnalysisPointer - This method is used when a pass implements
+ /// an analysis interface through multiple inheritance. If needed, it
+ /// should override this to adjust the this pointer as needed for the
+ /// specified pass info.
+ virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ if (ID == &AliasAnalysis::ID)
+ return (AliasAnalysis*)this;
+ return this;
+ }
+ };
+} // End of anonymous namespace
+
+// Register this pass...
+char NoAA::ID = 0;
+INITIALIZE_AG_PASS(NoAA, AliasAnalysis, "no-aa",
+ "No Alias Analysis (always returns 'may' alias)",
+ true, true, true)
+
+ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
diff --git a/contrib/llvm/lib/Analysis/PHITransAddr.cpp b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
index 8e4fa03..93da5a4 100644
--- a/contrib/llvm/lib/Analysis/PHITransAddr.cpp
+++ b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
@@ -12,22 +12,27 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/PHITransAddr.h"
+#include "llvm/Instructions.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
static bool CanPHITrans(Instruction *Inst) {
if (isa<PHINode>(Inst) ||
- isa<BitCastInst>(Inst) ||
isa<GetElementPtrInst>(Inst))
return true;
-
+
+ if (isa<CastInst>(Inst) &&
+ Inst->isSafeToSpeculativelyExecute())
+ return true;
+
if (Inst->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Inst->getOperand(1)))
return true;
-
+
// cerr << "MEMDEP: Could not PHI translate: " << *Pointer;
// if (isa<BitCastInst>(PtrInst) || isa<GetElementPtrInst>(PtrInst))
// cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0);
@@ -50,7 +55,7 @@ static bool VerifySubExpr(Value *Expr,
// If this is a non-instruction value, there is nothing to do.
Instruction *I = dyn_cast<Instruction>(Expr);
if (I == 0) return true;
-
+
// If it's an instruction, it is either in Tmp or its operands recursively
// are.
SmallVectorImpl<Instruction*>::iterator Entry =
@@ -59,16 +64,17 @@ static bool VerifySubExpr(Value *Expr,
InstInputs.erase(Entry);
return true;
}
-
+
// If it isn't in the InstInputs list it is a subexpr incorporated into the
// address. Sanity check that it is phi translatable.
if (!CanPHITrans(I)) {
- errs() << "Non phi translatable instruction found in PHITransAddr, either "
- "something is missing from InstInputs or CanPHITrans is wrong:\n";
+ errs() << "Non phi translatable instruction found in PHITransAddr:\n";
errs() << *I << '\n';
+ llvm_unreachable("Either something is missing from InstInputs or "
+ "CanPHITrans is wrong.");
return false;
}
-
+
// Validate the operands of the instruction.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (!VerifySubExpr(I->getOperand(i), InstInputs))
@@ -82,19 +88,20 @@ static bool VerifySubExpr(Value *Expr,
/// returns false.
bool PHITransAddr::Verify() const {
if (Addr == 0) return true;
-
- SmallVector<Instruction*, 8> Tmp(InstInputs.begin(), InstInputs.end());
-
+
+ SmallVector<Instruction*, 8> Tmp(InstInputs.begin(), InstInputs.end());
+
if (!VerifySubExpr(Addr, Tmp))
return false;
-
+
if (!Tmp.empty()) {
- errs() << "PHITransAddr inconsistent, contains extra instructions:\n";
+ errs() << "PHITransAddr contains extra instructions:\n";
for (unsigned i = 0, e = InstInputs.size(); i != e; ++i)
errs() << " InstInput #" << i << " is " << *InstInputs[i] << "\n";
+ llvm_unreachable("This is unexpected.");
return false;
}
-
+
// a-ok.
return true;
}
@@ -111,11 +118,11 @@ bool PHITransAddr::IsPotentiallyPHITranslatable() const {
}
-static void RemoveInstInputs(Value *V,
+static void RemoveInstInputs(Value *V,
SmallVectorImpl<Instruction*> &InstInputs) {
Instruction *I = dyn_cast<Instruction>(V);
if (I == 0) return;
-
+
// If the instruction is in the InstInputs list, remove it.
SmallVectorImpl<Instruction*>::iterator Entry =
std::find(InstInputs.begin(), InstInputs.end(), I);
@@ -123,9 +130,9 @@ static void RemoveInstInputs(Value *V,
InstInputs.erase(Entry);
return;
}
-
+
assert(!isa<PHINode>(I) && "Error, removing something that isn't an input");
-
+
// Otherwise, it must have instruction inputs itself. Zap them recursively.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
if (Instruction *Op = dyn_cast<Instruction>(I->getOperand(i)))
@@ -139,7 +146,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// If this is a non-instruction value, it can't require PHI translation.
Instruction *Inst = dyn_cast<Instruction>(V);
if (Inst == 0) return V;
-
+
// Determine whether 'Inst' is an input to our PHI translatable expression.
bool isInput = std::count(InstInputs.begin(), InstInputs.end(), Inst);
@@ -156,16 +163,16 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// In either case, the instruction itself isn't an input any longer.
InstInputs.erase(std::find(InstInputs.begin(), InstInputs.end(), Inst));
-
+
// If this is a PHI, go ahead and translate it.
if (PHINode *PN = dyn_cast<PHINode>(Inst))
return AddAsInput(PN->getIncomingValueForBlock(PredBB));
-
+
// If this is a non-phi value, and it is analyzable, we can incorporate it
// into the expression by making all instruction operands be inputs.
if (!CanPHITrans(Inst))
return 0;
-
+
// All instruction operands are now inputs (and of course, they may also be
// defined in this block, so they may need to be phi translated themselves.
for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
@@ -176,31 +183,34 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// Ok, it must be an intermediate result (either because it started that way
// or because we just incorporated it into the expression). See if its
// operands need to be phi translated, and if so, reconstruct it.
-
- if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) {
- Value *PHIIn = PHITranslateSubExpr(BC->getOperand(0), CurBB, PredBB, DT);
+
+ if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
+ if (!Cast->isSafeToSpeculativelyExecute()) return 0;
+ Value *PHIIn = PHITranslateSubExpr(Cast->getOperand(0), CurBB, PredBB, DT);
if (PHIIn == 0) return 0;
- if (PHIIn == BC->getOperand(0))
- return BC;
-
+ if (PHIIn == Cast->getOperand(0))
+ return Cast;
+
// Find an available version of this cast.
-
+
// Constants are trivial to find.
if (Constant *C = dyn_cast<Constant>(PHIIn))
- return AddAsInput(ConstantExpr::getBitCast(C, BC->getType()));
-
- // Otherwise we have to see if a bitcasted version of the incoming pointer
+ return AddAsInput(ConstantExpr::getCast(Cast->getOpcode(),
+ C, Cast->getType()));
+
+ // Otherwise we have to see if a casted version of the incoming pointer
// is available. If so, we can use it, otherwise we have to fail.
for (Value::use_iterator UI = PHIIn->use_begin(), E = PHIIn->use_end();
UI != E; ++UI) {
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI))
- if (BCI->getType() == BC->getType() &&
- (!DT || DT->dominates(BCI->getParent(), PredBB)))
- return BCI;
+ if (CastInst *CastI = dyn_cast<CastInst>(*UI))
+ if (CastI->getOpcode() == Cast->getOpcode() &&
+ CastI->getType() == Cast->getType() &&
+ (!DT || DT->dominates(CastI->getParent(), PredBB)))
+ return CastI;
}
return 0;
}
-
+
// Handle getelementptr with at least one PHI translatable operand.
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
SmallVector<Value*, 8> GEPOps;
@@ -208,22 +218,22 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
Value *GEPOp = PHITranslateSubExpr(GEP->getOperand(i), CurBB, PredBB, DT);
if (GEPOp == 0) return 0;
-
+
AnyChanged |= GEPOp != GEP->getOperand(i);
GEPOps.push_back(GEPOp);
}
-
+
if (!AnyChanged)
return GEP;
-
+
// Simplify the GEP to handle 'gep x, 0' -> x etc.
- if (Value *V = SimplifyGEPInst(&GEPOps[0], GEPOps.size(), TD)) {
+ if (Value *V = SimplifyGEPInst(&GEPOps[0], GEPOps.size(), TD, DT)) {
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
RemoveInstInputs(GEPOps[i], InstInputs);
-
+
return AddAsInput(V);
}
-
+
// Scan to see if we have this GEP available.
Value *APHIOp = GEPOps[0];
for (Value::use_iterator UI = APHIOp->use_begin(), E = APHIOp->use_end();
@@ -245,7 +255,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
}
return 0;
}
-
+
// Handle add with a constant RHS.
if (Inst->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Inst->getOperand(1))) {
@@ -253,10 +263,10 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
Constant *RHS = cast<ConstantInt>(Inst->getOperand(1));
bool isNSW = cast<BinaryOperator>(Inst)->hasNoSignedWrap();
bool isNUW = cast<BinaryOperator>(Inst)->hasNoUnsignedWrap();
-
+
Value *LHS = PHITranslateSubExpr(Inst->getOperand(0), CurBB, PredBB, DT);
if (LHS == 0) return 0;
-
+
// If the PHI translated LHS is an add of a constant, fold the immediates.
if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(LHS))
if (BOp->getOpcode() == Instruction::Add)
@@ -264,16 +274,16 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
LHS = BOp->getOperand(0);
RHS = ConstantExpr::getAdd(RHS, CI);
isNSW = isNUW = false;
-
+
// If the old 'LHS' was an input, add the new 'LHS' as an input.
if (std::count(InstInputs.begin(), InstInputs.end(), BOp)) {
RemoveInstInputs(BOp, InstInputs);
AddAsInput(LHS);
}
}
-
+
// See if the add simplifies away.
- if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD)) {
+ if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, DT)) {
// If we simplified the operands, the LHS is no longer an input, but Res
// is.
RemoveInstInputs(LHS, InstInputs);
@@ -283,7 +293,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// If we didn't modify the add, just return it.
if (LHS == Inst->getOperand(0) && RHS == Inst->getOperand(1))
return Inst;
-
+
// Otherwise, see if we have this add available somewhere.
for (Value::use_iterator UI = LHS->use_begin(), E = LHS->use_end();
UI != E; ++UI) {
@@ -294,10 +304,10 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
(!DT || DT->dominates(BO->getParent(), PredBB)))
return BO;
}
-
+
return 0;
}
-
+
// Otherwise, we failed.
return 0;
}
@@ -335,13 +345,13 @@ PHITranslateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
const DominatorTree &DT,
SmallVectorImpl<Instruction*> &NewInsts) {
unsigned NISize = NewInsts.size();
-
+
// Attempt to PHI translate with insertion.
Addr = InsertPHITranslatedSubExpr(Addr, CurBB, PredBB, DT, NewInsts);
-
+
// If successful, return the new value.
if (Addr) return Addr;
-
+
// If not, destroy any intermediate instructions inserted.
while (NewInsts.size() != NISize)
NewInsts.pop_back_val()->eraseFromParent();
@@ -367,21 +377,23 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
// If we don't have an available version of this value, it must be an
// instruction.
Instruction *Inst = cast<Instruction>(InVal);
-
- // Handle bitcast of PHI translatable value.
- if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) {
- Value *OpVal = InsertPHITranslatedSubExpr(BC->getOperand(0),
+
+ // Handle cast of PHI translatable value.
+ if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
+ if (!Cast->isSafeToSpeculativelyExecute()) return 0;
+ Value *OpVal = InsertPHITranslatedSubExpr(Cast->getOperand(0),
CurBB, PredBB, DT, NewInsts);
if (OpVal == 0) return 0;
-
- // Otherwise insert a bitcast at the end of PredBB.
- BitCastInst *New = new BitCastInst(OpVal, InVal->getType(),
- InVal->getName()+".phi.trans.insert",
- PredBB->getTerminator());
+
+ // Otherwise insert a cast at the end of PredBB.
+ CastInst *New = CastInst::Create(Cast->getOpcode(),
+ OpVal, InVal->getType(),
+ InVal->getName()+".phi.trans.insert",
+ PredBB->getTerminator());
NewInsts.push_back(New);
return New;
}
-
+
// Handle getelementptr with at least one PHI operand.
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
SmallVector<Value*, 8> GEPOps;
@@ -392,8 +404,8 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
if (OpVal == 0) return 0;
GEPOps.push_back(OpVal);
}
-
- GetElementPtrInst *Result =
+
+ GetElementPtrInst *Result =
GetElementPtrInst::Create(GEPOps[0], GEPOps.begin()+1, GEPOps.end(),
InVal->getName()+".phi.trans.insert",
PredBB->getTerminator());
@@ -401,12 +413,12 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
NewInsts.push_back(Result);
return Result;
}
-
+
#if 0
// FIXME: This code works, but it is unclear that we actually want to insert
// a big chain of computation in order to make a value available in a block.
// This needs to be evaluated carefully to consider its cost trade offs.
-
+
// Handle add with a constant RHS.
if (Inst->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Inst->getOperand(1))) {
@@ -414,7 +426,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
Value *OpVal = InsertPHITranslatedSubExpr(Inst->getOperand(0),
CurBB, PredBB, DT, NewInsts);
if (OpVal == 0) return 0;
-
+
BinaryOperator *Res = BinaryOperator::CreateAdd(OpVal, Inst->getOperand(1),
InVal->getName()+".phi.trans.insert",
PredBB->getTerminator());
@@ -424,6 +436,6 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
return Res;
}
#endif
-
+
return 0;
}
diff --git a/contrib/llvm/lib/Analysis/PathNumbering.cpp b/contrib/llvm/lib/Analysis/PathNumbering.cpp
new file mode 100644
index 0000000..5d3f6bb
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/PathNumbering.cpp
@@ -0,0 +1,525 @@
+//===- PathNumbering.cpp --------------------------------------*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Ball-Larus path numbers uniquely identify paths through a directed acyclic
+// graph (DAG) [Ball96]. For a CFG backedges are removed and replaced by phony
+// edges to obtain a DAG, and thus the unique path numbers [Ball96].
+//
+// The purpose of this analysis is to enumerate the edges in a CFG in order
+// to obtain paths from path numbers in a convenient manner. As described in
+// [Ball96] edges can be enumerated such that given a path number by following
+// the CFG and updating the path number, the path is obtained.
+//
+// [Ball96]
+// T. Ball and J. R. Larus. "Efficient Path Profiling."
+// International Symposium on Microarchitecture, pages 46-57, 1996.
+// http://portal.acm.org/citation.cfm?id=243857
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "ball-larus-numbering"
+
+#include "llvm/Analysis/PathNumbering.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/InstrTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/TypeBuilder.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <map>
+#include <queue>
+#include <set>
+#include <stack>
+#include <string>
+#include <utility>
+#include <vector>
+#include <sstream>
+
+using namespace llvm;
+
+// Are we enabling early termination
+static cl::opt<bool> ProcessEarlyTermination(
+ "path-profile-early-termination", cl::Hidden,
+ cl::desc("In path profiling, insert extra instrumentation to account for "
+ "unexpected function termination."));
+
+// Returns the basic block for the BallLarusNode
+BasicBlock* BallLarusNode::getBlock() {
+ return(_basicBlock);
+}
+
+// Returns the number of paths to the exit starting at the node.
+unsigned BallLarusNode::getNumberPaths() {
+ return(_numberPaths);
+}
+
+// Sets the number of paths to the exit starting at the node.
+void BallLarusNode::setNumberPaths(unsigned numberPaths) {
+ _numberPaths = numberPaths;
+}
+
+// Gets the NodeColor used in graph algorithms.
+BallLarusNode::NodeColor BallLarusNode::getColor() {
+ return(_color);
+}
+
+// Sets the NodeColor used in graph algorithms.
+void BallLarusNode::setColor(BallLarusNode::NodeColor color) {
+ _color = color;
+}
+
+// Returns an iterator over predecessor edges. Includes phony and
+// backedges.
+BLEdgeIterator BallLarusNode::predBegin() {
+ return(_predEdges.begin());
+}
+
+// Returns the end sentinel for the predecessor iterator.
+BLEdgeIterator BallLarusNode::predEnd() {
+ return(_predEdges.end());
+}
+
+// Returns the number of predecessor edges. Includes phony and
+// backedges.
+unsigned BallLarusNode::getNumberPredEdges() {
+ return(_predEdges.size());
+}
+
+// Returns an iterator over successor edges. Includes phony and
+// backedges.
+BLEdgeIterator BallLarusNode::succBegin() {
+ return(_succEdges.begin());
+}
+
+// Returns the end sentinel for the successor iterator.
+BLEdgeIterator BallLarusNode::succEnd() {
+ return(_succEdges.end());
+}
+
+// Returns the number of successor edges. Includes phony and
+// backedges.
+unsigned BallLarusNode::getNumberSuccEdges() {
+ return(_succEdges.size());
+}
+
+// Add an edge to the predecessor list.
+void BallLarusNode::addPredEdge(BallLarusEdge* edge) {
+ _predEdges.push_back(edge);
+}
+
+// Remove an edge from the predecessor list.
+void BallLarusNode::removePredEdge(BallLarusEdge* edge) {
+ removeEdge(_predEdges, edge);
+}
+
+// Add an edge to the successor list.
+void BallLarusNode::addSuccEdge(BallLarusEdge* edge) {
+ _succEdges.push_back(edge);
+}
+
+// Remove an edge from the successor list.
+void BallLarusNode::removeSuccEdge(BallLarusEdge* edge) {
+ removeEdge(_succEdges, edge);
+}
+
+// Returns the name of the BasicBlock being represented. If BasicBlock
+// is null then returns "<null>". If BasicBlock has no name, then
+// "<unnamed>" is returned. Intended for use with debug output.
+std::string BallLarusNode::getName() {
+ std::stringstream name;
+
+ if(getBlock() != NULL) {
+ if(getBlock()->hasName()) {
+ std::string tempName(getBlock()->getName());
+ name << tempName.c_str() << " (" << _uid << ")";
+ } else
+ name << "<unnamed> (" << _uid << ")";
+ } else
+ name << "<null> (" << _uid << ")";
+
+ return name.str();
+}
+
+// Removes an edge from an edgeVector. Used by removePredEdge and
+// removeSuccEdge.
+void BallLarusNode::removeEdge(BLEdgeVector& v, BallLarusEdge* e) {
+ // TODO: Avoid linear scan by using a set instead
+ for(BLEdgeIterator i = v.begin(),
+ end = v.end();
+ i != end;
+ ++i) {
+ if((*i) == e) {
+ v.erase(i);
+ break;
+ }
+ }
+}
+
+// Returns the source node of this edge.
+BallLarusNode* BallLarusEdge::getSource() const {
+ return(_source);
+}
+
+// Returns the target node of this edge.
+BallLarusNode* BallLarusEdge::getTarget() const {
+ return(_target);
+}
+
+// Sets the type of the edge.
+BallLarusEdge::EdgeType BallLarusEdge::getType() const {
+ return _edgeType;
+}
+
+// Gets the type of the edge.
+void BallLarusEdge::setType(EdgeType type) {
+ _edgeType = type;
+}
+
+// Returns the weight of this edge. Used to decode path numbers to sequences
+// of basic blocks.
+unsigned BallLarusEdge::getWeight() {
+ return(_weight);
+}
+
+// Sets the weight of the edge. Used during path numbering.
+void BallLarusEdge::setWeight(unsigned weight) {
+ _weight = weight;
+}
+
+// Gets the phony edge originating at the root.
+BallLarusEdge* BallLarusEdge::getPhonyRoot() {
+ return _phonyRoot;
+}
+
+// Sets the phony edge originating at the root.
+void BallLarusEdge::setPhonyRoot(BallLarusEdge* phonyRoot) {
+ _phonyRoot = phonyRoot;
+}
+
+// Gets the phony edge terminating at the exit.
+BallLarusEdge* BallLarusEdge::getPhonyExit() {
+ return _phonyExit;
+}
+
+// Sets the phony edge terminating at the exit.
+void BallLarusEdge::setPhonyExit(BallLarusEdge* phonyExit) {
+ _phonyExit = phonyExit;
+}
+
+// Gets the associated real edge if this is a phony edge.
+BallLarusEdge* BallLarusEdge::getRealEdge() {
+ return _realEdge;
+}
+
+// Sets the associated real edge if this is a phony edge.
+void BallLarusEdge::setRealEdge(BallLarusEdge* realEdge) {
+ _realEdge = realEdge;
+}
+
+// Returns the duplicate number of the edge.
+unsigned BallLarusEdge::getDuplicateNumber() {
+ return(_duplicateNumber);
+}
+
+// Initialization that requires virtual functions which are not fully
+// functional in the constructor.
+void BallLarusDag::init() {
+ BLBlockNodeMap inDag;
+ std::stack<BallLarusNode*> dfsStack;
+
+ _root = addNode(&(_function.getEntryBlock()));
+ _exit = addNode(NULL);
+
+ // start search from root
+ dfsStack.push(getRoot());
+
+ // dfs to add each bb into the dag
+ while(dfsStack.size())
+ buildNode(inDag, dfsStack);
+
+ // put in the final edge
+ addEdge(getExit(),getRoot(),0);
+}
+
+// Frees all memory associated with the DAG.
+BallLarusDag::~BallLarusDag() {
+ for(BLEdgeIterator edge = _edges.begin(), end = _edges.end(); edge != end;
+ ++edge)
+ delete (*edge);
+
+ for(BLNodeIterator node = _nodes.begin(), end = _nodes.end(); node != end;
+ ++node)
+ delete (*node);
+}
+
+// Calculate the path numbers by assigning edge increments as prescribed
+// in Ball-Larus path profiling.
+void BallLarusDag::calculatePathNumbers() {
+ BallLarusNode* node;
+ std::queue<BallLarusNode*> bfsQueue;
+ bfsQueue.push(getExit());
+
+ while(bfsQueue.size() > 0) {
+ node = bfsQueue.front();
+
+ DEBUG(dbgs() << "calculatePathNumbers on " << node->getName() << "\n");
+
+ bfsQueue.pop();
+ unsigned prevPathNumber = node->getNumberPaths();
+ calculatePathNumbersFrom(node);
+
+ // Check for DAG splitting
+ if( node->getNumberPaths() > 100000000 && node != getRoot() ) {
+ // Add new phony edge from the split-node to the DAG's exit
+ BallLarusEdge* exitEdge = addEdge(node, getExit(), 0);
+ exitEdge->setType(BallLarusEdge::SPLITEDGE_PHONY);
+
+ // Counters to handle the possibilty of a multi-graph
+ BasicBlock* oldTarget = 0;
+ unsigned duplicateNumber = 0;
+
+ // Iterate through each successor edge, adding phony edges
+ for( BLEdgeIterator succ = node->succBegin(), end = node->succEnd();
+ succ != end; oldTarget = (*succ)->getTarget()->getBlock(), succ++ ) {
+
+ if( (*succ)->getType() == BallLarusEdge::NORMAL ) {
+ // is this edge a duplicate?
+ if( oldTarget != (*succ)->getTarget()->getBlock() )
+ duplicateNumber = 0;
+
+ // create the new phony edge: root -> succ
+ BallLarusEdge* rootEdge =
+ addEdge(getRoot(), (*succ)->getTarget(), duplicateNumber++);
+ rootEdge->setType(BallLarusEdge::SPLITEDGE_PHONY);
+ rootEdge->setRealEdge(*succ);
+
+ // split on this edge and reference it's exit/root phony edges
+ (*succ)->setType(BallLarusEdge::SPLITEDGE);
+ (*succ)->setPhonyRoot(rootEdge);
+ (*succ)->setPhonyExit(exitEdge);
+ (*succ)->setWeight(0);
+ }
+ }
+
+ calculatePathNumbersFrom(node);
+ }
+
+ DEBUG(dbgs() << "prev, new number paths " << prevPathNumber << ", "
+ << node->getNumberPaths() << ".\n");
+
+ if(prevPathNumber == 0 && node->getNumberPaths() != 0) {
+ DEBUG(dbgs() << "node ready : " << node->getName() << "\n");
+ for(BLEdgeIterator pred = node->predBegin(), end = node->predEnd();
+ pred != end; pred++) {
+ if( (*pred)->getType() == BallLarusEdge::BACKEDGE ||
+ (*pred)->getType() == BallLarusEdge::SPLITEDGE )
+ continue;
+
+ BallLarusNode* nextNode = (*pred)->getSource();
+ // not yet visited?
+ if(nextNode->getNumberPaths() == 0)
+ bfsQueue.push(nextNode);
+ }
+ }
+ }
+
+ DEBUG(dbgs() << "\tNumber of paths: " << getRoot()->getNumberPaths() << "\n");
+}
+
+// Returns the number of paths for the Dag.
+unsigned BallLarusDag::getNumberOfPaths() {
+ return(getRoot()->getNumberPaths());
+}
+
+// Returns the root (i.e. entry) node for the DAG.
+BallLarusNode* BallLarusDag::getRoot() {
+ return _root;
+}
+
+// Returns the exit node for the DAG.
+BallLarusNode* BallLarusDag::getExit() {
+ return _exit;
+}
+
+// Returns the function for the DAG.
+Function& BallLarusDag::getFunction() {
+ return(_function);
+}
+
+// Clears the node colors.
+void BallLarusDag::clearColors(BallLarusNode::NodeColor color) {
+ for (BLNodeIterator nodeIt = _nodes.begin(); nodeIt != _nodes.end(); nodeIt++)
+ (*nodeIt)->setColor(color);
+}
+
+// Processes one node and its imediate edges for building the DAG.
+void BallLarusDag::buildNode(BLBlockNodeMap& inDag, BLNodeStack& dfsStack) {
+ BallLarusNode* currentNode = dfsStack.top();
+ BasicBlock* currentBlock = currentNode->getBlock();
+
+ if(currentNode->getColor() != BallLarusNode::WHITE) {
+ // we have already visited this node
+ dfsStack.pop();
+ currentNode->setColor(BallLarusNode::BLACK);
+ } else {
+ // are there any external procedure calls?
+ if( ProcessEarlyTermination ) {
+ for( BasicBlock::iterator bbCurrent = currentNode->getBlock()->begin(),
+ bbEnd = currentNode->getBlock()->end(); bbCurrent != bbEnd;
+ bbCurrent++ ) {
+ Instruction& instr = *bbCurrent;
+ if( instr.getOpcode() == Instruction::Call ) {
+ BallLarusEdge* callEdge = addEdge(currentNode, getExit(), 0);
+ callEdge->setType(BallLarusEdge::CALLEDGE_PHONY);
+ break;
+ }
+ }
+ }
+
+ TerminatorInst* terminator = currentNode->getBlock()->getTerminator();
+ if(isa<ReturnInst>(terminator) || isa<UnreachableInst>(terminator)
+ || isa<UnwindInst>(terminator))
+ addEdge(currentNode, getExit(),0);
+
+ currentNode->setColor(BallLarusNode::GRAY);
+ inDag[currentBlock] = currentNode;
+
+ BasicBlock* oldSuccessor = 0;
+ unsigned duplicateNumber = 0;
+
+ // iterate through this node's successors
+ for(succ_iterator successor = succ_begin(currentBlock),
+ succEnd = succ_end(currentBlock); successor != succEnd;
+ oldSuccessor = *successor, ++successor ) {
+ BasicBlock* succBB = *successor;
+
+ // is this edge a duplicate?
+ if (oldSuccessor == succBB)
+ duplicateNumber++;
+ else
+ duplicateNumber = 0;
+
+ buildEdge(inDag, dfsStack, currentNode, succBB, duplicateNumber);
+ }
+ }
+}
+
+// Process an edge in the CFG for DAG building.
+void BallLarusDag::buildEdge(BLBlockNodeMap& inDag, std::stack<BallLarusNode*>&
+ dfsStack, BallLarusNode* currentNode,
+ BasicBlock* succBB, unsigned duplicateCount) {
+ BallLarusNode* succNode = inDag[succBB];
+
+ if(succNode && succNode->getColor() == BallLarusNode::BLACK) {
+ // visited node and forward edge
+ addEdge(currentNode, succNode, duplicateCount);
+ } else if(succNode && succNode->getColor() == BallLarusNode::GRAY) {
+ // visited node and back edge
+ DEBUG(dbgs() << "Backedge detected.\n");
+ addBackedge(currentNode, succNode, duplicateCount);
+ } else {
+ BallLarusNode* childNode;
+ // not visited node and forward edge
+ if(succNode) // an unvisited node that is child of a gray node
+ childNode = succNode;
+ else { // an unvisited node that is a child of a an unvisted node
+ childNode = addNode(succBB);
+ inDag[succBB] = childNode;
+ }
+ addEdge(currentNode, childNode, duplicateCount);
+ dfsStack.push(childNode);
+ }
+}
+
+// The weight on each edge is the increment required along any path that
+// contains that edge.
+void BallLarusDag::calculatePathNumbersFrom(BallLarusNode* node) {
+ if(node == getExit())
+ // The Exit node must be base case
+ node->setNumberPaths(1);
+ else {
+ unsigned sumPaths = 0;
+ BallLarusNode* succNode;
+
+ for(BLEdgeIterator succ = node->succBegin(), end = node->succEnd();
+ succ != end; succ++) {
+ if( (*succ)->getType() == BallLarusEdge::BACKEDGE ||
+ (*succ)->getType() == BallLarusEdge::SPLITEDGE )
+ continue;
+
+ (*succ)->setWeight(sumPaths);
+ succNode = (*succ)->getTarget();
+
+ if( !succNode->getNumberPaths() )
+ return;
+ sumPaths += succNode->getNumberPaths();
+ }
+
+ node->setNumberPaths(sumPaths);
+ }
+}
+
+// Allows subclasses to determine which type of Node is created.
+// Override this method to produce subclasses of BallLarusNode if
+// necessary. The destructor of BallLarusDag will call free on each
+// pointer created.
+BallLarusNode* BallLarusDag::createNode(BasicBlock* BB) {
+ return( new BallLarusNode(BB) );
+}
+
+// Allows subclasses to determine which type of Edge is created.
+// Override this method to produce subclasses of BallLarusEdge if
+// necessary. The destructor of BallLarusDag will call free on each
+// pointer created.
+BallLarusEdge* BallLarusDag::createEdge(BallLarusNode* source,
+ BallLarusNode* target,
+ unsigned duplicateCount) {
+ return( new BallLarusEdge(source, target, duplicateCount) );
+}
+
+// Proxy to node's constructor. Updates the DAG state.
+BallLarusNode* BallLarusDag::addNode(BasicBlock* BB) {
+ BallLarusNode* newNode = createNode(BB);
+ _nodes.push_back(newNode);
+ return( newNode );
+}
+
+// Proxy to edge's constructor. Updates the DAG state.
+BallLarusEdge* BallLarusDag::addEdge(BallLarusNode* source,
+ BallLarusNode* target,
+ unsigned duplicateCount) {
+ BallLarusEdge* newEdge = createEdge(source, target, duplicateCount);
+ _edges.push_back(newEdge);
+ source->addSuccEdge(newEdge);
+ target->addPredEdge(newEdge);
+ return(newEdge);
+}
+
+// Adds a backedge with its phony edges. Updates the DAG state.
+void BallLarusDag::addBackedge(BallLarusNode* source, BallLarusNode* target,
+ unsigned duplicateCount) {
+ BallLarusEdge* childEdge = addEdge(source, target, duplicateCount);
+ childEdge->setType(BallLarusEdge::BACKEDGE);
+
+ childEdge->setPhonyRoot(addEdge(getRoot(), target,0));
+ childEdge->setPhonyExit(addEdge(source, getExit(),0));
+
+ childEdge->getPhonyRoot()->setRealEdge(childEdge);
+ childEdge->getPhonyRoot()->setType(BallLarusEdge::BACKEDGE_PHONY);
+
+ childEdge->getPhonyExit()->setRealEdge(childEdge);
+ childEdge->getPhonyExit()->setType(BallLarusEdge::BACKEDGE_PHONY);
+ _backEdges.push_back(childEdge);
+}
diff --git a/contrib/llvm/lib/Analysis/PathProfileInfo.cpp b/contrib/llvm/lib/Analysis/PathProfileInfo.cpp
new file mode 100644
index 0000000..b361d3f
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/PathProfileInfo.cpp
@@ -0,0 +1,434 @@
+//===- PathProfileInfo.cpp ------------------------------------*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface used by optimizers to load path profiles,
+// and provides a loader pass which reads a path profile file.
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "path-profile-info"
+
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/ProfileInfoTypes.h"
+#include "llvm/Analysis/PathProfileInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <cstdio>
+
+using namespace llvm;
+
+// command line option for loading path profiles
+static cl::opt<std::string>
+PathProfileInfoFilename("path-profile-loader-file", cl::init("llvmprof.out"),
+ cl::value_desc("filename"),
+ cl::desc("Path profile file loaded by -path-profile-loader"), cl::Hidden);
+
+namespace {
+ class PathProfileLoaderPass : public ModulePass, public PathProfileInfo {
+ public:
+ PathProfileLoaderPass() : ModulePass(ID) { }
+ ~PathProfileLoaderPass();
+
+ // this pass doesn't change anything (only loads information)
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+
+ // the full name of the loader pass
+ virtual const char* getPassName() const {
+ return "Path Profiling Information Loader";
+ }
+
+ // required since this pass implements multiple inheritance
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &PathProfileInfo::ID)
+ return (PathProfileInfo*)this;
+ return this;
+ }
+
+ // entry point to run the pass
+ bool runOnModule(Module &M);
+
+ // pass identification
+ static char ID;
+
+ private:
+ // make a reference table to refer to function by number
+ void buildFunctionRefs(Module &M);
+
+ // process argument info of a program from the input file
+ void handleArgumentInfo();
+
+ // process path number information from the input file
+ void handlePathInfo();
+
+ // array of references to the functions in the module
+ std::vector<Function*> _functions;
+
+ // path profile file handle
+ FILE* _file;
+
+ // path profile file name
+ std::string _filename;
+ };
+}
+
+// register PathLoader
+char PathProfileLoaderPass::ID = 0;
+
+INITIALIZE_ANALYSIS_GROUP(PathProfileInfo, "Path Profile Information",
+ NoPathProfileInfo)
+INITIALIZE_AG_PASS(PathProfileLoaderPass, PathProfileInfo,
+ "path-profile-loader",
+ "Load path profile information from file",
+ false, true, false)
+
+char &llvm::PathProfileLoaderPassID = PathProfileLoaderPass::ID;
+
+// link PathLoader as a pass, and make it available as an optimisation
+ModulePass *llvm::createPathProfileLoaderPass() {
+ return new PathProfileLoaderPass;
+}
+
+// ----------------------------------------------------------------------------
+// PathEdge implementation
+//
+ProfilePathEdge::ProfilePathEdge (BasicBlock* source, BasicBlock* target,
+ unsigned duplicateNumber)
+ : _source(source), _target(target), _duplicateNumber(duplicateNumber) {}
+
+// ----------------------------------------------------------------------------
+// Path implementation
+//
+
+ProfilePath::ProfilePath (unsigned int number, unsigned int count,
+ double countStdDev, PathProfileInfo* ppi)
+ : _number(number) , _count(count), _countStdDev(countStdDev), _ppi(ppi) {}
+
+double ProfilePath::getFrequency() const {
+ return 100 * double(_count) /
+ double(_ppi->_functionPathCounts[_ppi->_currentFunction]);
+}
+
+static BallLarusEdge* getNextEdge (BallLarusNode* node,
+ unsigned int pathNumber) {
+ BallLarusEdge* best = 0;
+
+ for( BLEdgeIterator next = node->succBegin(),
+ end = node->succEnd(); next != end; next++ ) {
+ if( (*next)->getType() != BallLarusEdge::BACKEDGE && // no backedges
+ (*next)->getType() != BallLarusEdge::SPLITEDGE && // no split edges
+ (*next)->getWeight() <= pathNumber && // weight must be <= pathNumber
+ (!best || (best->getWeight() < (*next)->getWeight())) ) // best one?
+ best = *next;
+ }
+
+ return best;
+}
+
+ProfilePathEdgeVector* ProfilePath::getPathEdges() const {
+ BallLarusNode* currentNode = _ppi->_currentDag->getRoot ();
+ unsigned int increment = _number;
+ ProfilePathEdgeVector* pev = new ProfilePathEdgeVector;
+
+ while (currentNode != _ppi->_currentDag->getExit()) {
+ BallLarusEdge* next = getNextEdge(currentNode, increment);
+
+ increment -= next->getWeight();
+
+ if( next->getType() != BallLarusEdge::BACKEDGE_PHONY &&
+ next->getType() != BallLarusEdge::SPLITEDGE_PHONY &&
+ next->getTarget() != _ppi->_currentDag->getExit() )
+ pev->push_back(ProfilePathEdge(
+ next->getSource()->getBlock(),
+ next->getTarget()->getBlock(),
+ next->getDuplicateNumber()));
+
+ if( next->getType() == BallLarusEdge::BACKEDGE_PHONY &&
+ next->getTarget() == _ppi->_currentDag->getExit() )
+ pev->push_back(ProfilePathEdge(
+ next->getRealEdge()->getSource()->getBlock(),
+ next->getRealEdge()->getTarget()->getBlock(),
+ next->getDuplicateNumber()));
+
+ if( next->getType() == BallLarusEdge::SPLITEDGE_PHONY &&
+ next->getSource() == _ppi->_currentDag->getRoot() )
+ pev->push_back(ProfilePathEdge(
+ next->getRealEdge()->getSource()->getBlock(),
+ next->getRealEdge()->getTarget()->getBlock(),
+ next->getDuplicateNumber()));
+
+ // set the new node
+ currentNode = next->getTarget();
+ }
+
+ return pev;
+}
+
+ProfilePathBlockVector* ProfilePath::getPathBlocks() const {
+ BallLarusNode* currentNode = _ppi->_currentDag->getRoot ();
+ unsigned int increment = _number;
+ ProfilePathBlockVector* pbv = new ProfilePathBlockVector;
+
+ while (currentNode != _ppi->_currentDag->getExit()) {
+ BallLarusEdge* next = getNextEdge(currentNode, increment);
+ increment -= next->getWeight();
+
+ // add block to the block list if it is a real edge
+ if( next->getType() == BallLarusEdge::NORMAL)
+ pbv->push_back (currentNode->getBlock());
+ // make the back edge the last edge since we are at the end
+ else if( next->getTarget() == _ppi->_currentDag->getExit() ) {
+ pbv->push_back (currentNode->getBlock());
+ pbv->push_back (next->getRealEdge()->getTarget()->getBlock());
+ }
+
+ // set the new node
+ currentNode = next->getTarget();
+ }
+
+ return pbv;
+}
+
+BasicBlock* ProfilePath::getFirstBlockInPath() const {
+ BallLarusNode* root = _ppi->_currentDag->getRoot();
+ BallLarusEdge* edge = getNextEdge(root, _number);
+
+ if( edge && (edge->getType() == BallLarusEdge::BACKEDGE_PHONY ||
+ edge->getType() == BallLarusEdge::SPLITEDGE_PHONY) )
+ return edge->getTarget()->getBlock();
+
+ return root->getBlock();
+}
+
+// ----------------------------------------------------------------------------
+// PathProfileInfo implementation
+//
+
+// Pass identification
+char llvm::PathProfileInfo::ID = 0;
+
+PathProfileInfo::PathProfileInfo () : _currentDag(0) , _currentFunction(0) {
+}
+
+PathProfileInfo::~PathProfileInfo() {
+ if (_currentDag)
+ delete _currentDag;
+}
+
+// set the function for which paths are currently begin processed
+void PathProfileInfo::setCurrentFunction(Function* F) {
+ // Make sure it exists
+ if (!F) return;
+
+ if (_currentDag)
+ delete _currentDag;
+
+ _currentFunction = F;
+ _currentDag = new BallLarusDag(*F);
+ _currentDag->init();
+ _currentDag->calculatePathNumbers();
+}
+
+// get the function for which paths are currently being processed
+Function* PathProfileInfo::getCurrentFunction() const {
+ return _currentFunction;
+}
+
+// get the entry block of the function
+BasicBlock* PathProfileInfo::getCurrentFunctionEntry() {
+ return _currentDag->getRoot()->getBlock();
+}
+
+// return the path based on its number
+ProfilePath* PathProfileInfo::getPath(unsigned int number) {
+ return _functionPaths[_currentFunction][number];
+}
+
+// return the number of paths which a function may potentially execute
+unsigned int PathProfileInfo::getPotentialPathCount() {
+ return _currentDag ? _currentDag->getNumberOfPaths() : 0;
+}
+
+// return an iterator for the beginning of a functions executed paths
+ProfilePathIterator PathProfileInfo::pathBegin() {
+ return _functionPaths[_currentFunction].begin();
+}
+
+// return an iterator for the end of a functions executed paths
+ProfilePathIterator PathProfileInfo::pathEnd() {
+ return _functionPaths[_currentFunction].end();
+}
+
+// returns the total number of paths run in the function
+unsigned int PathProfileInfo::pathsRun() {
+ return _currentFunction ? _functionPaths[_currentFunction].size() : 0;
+}
+
+// ----------------------------------------------------------------------------
+// PathLoader implementation
+//
+
+// remove all generated paths
+PathProfileLoaderPass::~PathProfileLoaderPass() {
+ for( FunctionPathIterator funcNext = _functionPaths.begin(),
+ funcEnd = _functionPaths.end(); funcNext != funcEnd; funcNext++)
+ for( ProfilePathIterator pathNext = funcNext->second.begin(),
+ pathEnd = funcNext->second.end(); pathNext != pathEnd; pathNext++)
+ delete pathNext->second;
+}
+
+// entry point of the pass; this loads and parses a file
+bool PathProfileLoaderPass::runOnModule(Module &M) {
+ // get the filename and setup the module's function references
+ _filename = PathProfileInfoFilename;
+ buildFunctionRefs (M);
+
+ if (!(_file = fopen(_filename.c_str(), "rb"))) {
+ errs () << "error: input '" << _filename << "' file does not exist.\n";
+ return false;
+ }
+
+ ProfilingType profType;
+
+ while( fread(&profType, sizeof(ProfilingType), 1, _file) ) {
+ switch (profType) {
+ case ArgumentInfo:
+ handleArgumentInfo ();
+ break;
+ case PathInfo:
+ handlePathInfo ();
+ break;
+ default:
+ errs () << "error: bad path profiling file syntax, " << profType << "\n";
+ fclose (_file);
+ return false;
+ }
+ }
+
+ fclose (_file);
+
+ return true;
+}
+
+// create a reference table for functions defined in the path profile file
+void PathProfileLoaderPass::buildFunctionRefs (Module &M) {
+ _functions.push_back(0); // make the 0 index a null pointer
+
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; F++) {
+ if (F->isDeclaration())
+ continue;
+ _functions.push_back(F);
+ }
+}
+
+// handle command like argument infor in the output file
+void PathProfileLoaderPass::handleArgumentInfo() {
+ // get the argument list's length
+ unsigned savedArgsLength;
+ if( fread(&savedArgsLength, sizeof(unsigned), 1, _file) != 1 ) {
+ errs() << "warning: argument info header/data mismatch\n";
+ return;
+ }
+
+ // allocate a buffer, and get the arguments
+ char* args = new char[savedArgsLength+1];
+ if( fread(args, 1, savedArgsLength, _file) != savedArgsLength )
+ errs() << "warning: argument info header/data mismatch\n";
+
+ args[savedArgsLength] = '\0';
+ argList = std::string(args);
+ delete [] args; // cleanup dynamic string
+
+ // byte alignment
+ if (savedArgsLength & 3)
+ fseek(_file, 4-(savedArgsLength&3), SEEK_CUR);
+}
+
+// Handle path profile information in the output file
+void PathProfileLoaderPass::handlePathInfo () {
+ // get the number of functions in this profile
+ unsigned functionCount;
+ if( fread(&functionCount, sizeof(functionCount), 1, _file) != 1 ) {
+ errs() << "warning: path info header/data mismatch\n";
+ return;
+ }
+
+ // gather path information for each function
+ for (unsigned i = 0; i < functionCount; i++) {
+ PathProfileHeader pathHeader;
+ if( fread(&pathHeader, sizeof(pathHeader), 1, _file) != 1 ) {
+ errs() << "warning: bad header for path function info\n";
+ break;
+ }
+
+ Function* f = _functions[pathHeader.fnNumber];
+
+ // dynamically allocate a table to store path numbers
+ PathProfileTableEntry* pathTable =
+ new PathProfileTableEntry[pathHeader.numEntries];
+
+ if( fread(pathTable, sizeof(PathProfileTableEntry),
+ pathHeader.numEntries, _file) != pathHeader.numEntries) {
+ delete [] pathTable;
+ errs() << "warning: path function info header/data mismatch\n";
+ return;
+ }
+
+ // Build a new path for the current function
+ unsigned int totalPaths = 0;
+ for (unsigned int j = 0; j < pathHeader.numEntries; j++) {
+ totalPaths += pathTable[j].pathCounter;
+ _functionPaths[f][pathTable[j].pathNumber]
+ = new ProfilePath(pathTable[j].pathNumber, pathTable[j].pathCounter,
+ 0, this);
+ }
+
+ _functionPathCounts[f] = totalPaths;
+
+ delete [] pathTable;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// NoProfile PathProfileInfo implementation
+//
+
+namespace {
+ struct NoPathProfileInfo : public ImmutablePass, public PathProfileInfo {
+ static char ID; // Class identification, replacement for typeinfo
+ NoPathProfileInfo() : ImmutablePass(ID) {
+ initializeNoPathProfileInfoPass(*PassRegistry::getPassRegistry());
+ }
+
+ /// getAdjustedAnalysisPointer - This method is used when a pass implements
+ /// an analysis interface through multiple inheritance. If needed, it
+ /// should override this to adjust the this pointer as needed for the
+ /// specified pass info.
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &PathProfileInfo::ID)
+ return (PathProfileInfo*)this;
+ return this;
+ }
+
+ virtual const char *getPassName() const {
+ return "NoPathProfileInfo";
+ }
+ };
+} // End of anonymous namespace
+
+char NoPathProfileInfo::ID = 0;
+// Register this pass...
+INITIALIZE_AG_PASS(NoPathProfileInfo, PathProfileInfo, "no-path-profile",
+ "No Path Profile Information", false, true, true)
+
+ImmutablePass *llvm::createNoPathProfileInfoPass() { return new NoPathProfileInfo(); }
diff --git a/contrib/llvm/lib/Analysis/PathProfileVerifier.cpp b/contrib/llvm/lib/Analysis/PathProfileVerifier.cpp
new file mode 100644
index 0000000..c549773
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/PathProfileVerifier.cpp
@@ -0,0 +1,207 @@
+//===- PathProfileVerifier.cpp --------------------------------*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This verifier derives an edge profile file from current path profile
+// information
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "path-profile-verifier"
+
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/ProfileInfoTypes.h"
+#include "llvm/Analysis/PathProfileInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <stdio.h>
+
+using namespace llvm;
+
+namespace {
+ class PathProfileVerifier : public ModulePass {
+ private:
+ bool runOnModule(Module &M);
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ PathProfileVerifier() : ModulePass(ID) {
+ initializePathProfileVerifierPass(*PassRegistry::getPassRegistry());
+ }
+
+
+ virtual const char *getPassName() const {
+ return "Path Profiler Verifier";
+ }
+
+ // The verifier requires the path profile and edge profile.
+ virtual void getAnalysisUsage(AnalysisUsage& AU) const;
+ };
+}
+
+static cl::opt<std::string>
+EdgeProfileFilename("path-profile-verifier-file",
+ cl::init("edgefrompath.llvmprof.out"),
+ cl::value_desc("filename"),
+ cl::desc("Edge profile file generated by -path-profile-verifier"),
+ cl::Hidden);
+
+char PathProfileVerifier::ID = 0;
+INITIALIZE_PASS(PathProfileVerifier, "path-profile-verifier",
+ "Compare the path profile derived edge profile against the "
+ "edge profile.", true, true)
+
+ModulePass *llvm::createPathProfileVerifierPass() {
+ return new PathProfileVerifier();
+}
+
+// The verifier requires the path profile and edge profile.
+void PathProfileVerifier::getAnalysisUsage(AnalysisUsage& AU) const {
+ AU.addRequired<PathProfileInfo>();
+ AU.addPreserved<PathProfileInfo>();
+}
+
+typedef std::map<unsigned, unsigned> DuplicateToIndexMap;
+typedef std::map<BasicBlock*,DuplicateToIndexMap> BlockToDuplicateMap;
+typedef std::map<BasicBlock*,BlockToDuplicateMap> NestedBlockToIndexMap;
+
+// the verifier iterates through each path to gather the total
+// number of edge frequencies
+bool PathProfileVerifier::runOnModule (Module &M) {
+ PathProfileInfo& pathProfileInfo = getAnalysis<PathProfileInfo>();
+
+ // setup a data structure to map path edges which index an
+ // array of edge counters
+ NestedBlockToIndexMap arrayMap;
+ unsigned i = 0;
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->isDeclaration()) continue;
+
+ arrayMap[0][F->begin()][0] = i++;
+
+ for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
+ TerminatorInst *TI = BB->getTerminator();
+
+ unsigned duplicate = 0;
+ BasicBlock* prev = 0;
+ for (unsigned s = 0, e = TI->getNumSuccessors(); s != e;
+ prev = TI->getSuccessor(s), ++s) {
+ if (prev == TI->getSuccessor(s))
+ duplicate++;
+ else duplicate = 0;
+
+ arrayMap[BB][TI->getSuccessor(s)][duplicate] = i++;
+ }
+ }
+ }
+
+ std::vector<unsigned> edgeArray(i);
+
+ // iterate through each path and increment the edge counters as needed
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->isDeclaration()) continue;
+
+ pathProfileInfo.setCurrentFunction(F);
+
+ DEBUG(dbgs() << "function '" << F->getName() << "' ran "
+ << pathProfileInfo.pathsRun()
+ << "/" << pathProfileInfo.getPotentialPathCount()
+ << " potential paths\n");
+
+ for( ProfilePathIterator nextPath = pathProfileInfo.pathBegin(),
+ endPath = pathProfileInfo.pathEnd();
+ nextPath != endPath; nextPath++ ) {
+ ProfilePath* currentPath = nextPath->second;
+
+ ProfilePathEdgeVector* pev = currentPath->getPathEdges();
+ DEBUG(dbgs () << "path #" << currentPath->getNumber() << ": "
+ << currentPath->getCount() << "\n");
+ // setup the entry edge (normally path profiling doens't care about this)
+ if (currentPath->getFirstBlockInPath() == &F->getEntryBlock())
+ edgeArray[arrayMap[0][currentPath->getFirstBlockInPath()][0]]
+ += currentPath->getCount();
+
+ for( ProfilePathEdgeIterator nextEdge = pev->begin(),
+ endEdge = pev->end(); nextEdge != endEdge; nextEdge++ ) {
+ if (nextEdge != pev->begin())
+ DEBUG(dbgs() << " :: ");
+
+ BasicBlock* source = nextEdge->getSource();
+ BasicBlock* target = nextEdge->getTarget();
+ unsigned duplicateNumber = nextEdge->getDuplicateNumber();
+ DEBUG(dbgs () << source->getNameStr() << " --{" << duplicateNumber
+ << "}--> " << target->getNameStr());
+
+ // Ensure all the referenced edges exist
+ // TODO: make this a separate function
+ if( !arrayMap.count(source) ) {
+ errs() << " error [" << F->getNameStr() << "()]: source '"
+ << source->getNameStr()
+ << "' does not exist in the array map.\n";
+ } else if( !arrayMap[source].count(target) ) {
+ errs() << " error [" << F->getNameStr() << "()]: target '"
+ << target->getNameStr()
+ << "' does not exist in the array map.\n";
+ } else if( !arrayMap[source][target].count(duplicateNumber) ) {
+ errs() << " error [" << F->getNameStr() << "()]: edge "
+ << source->getNameStr() << " -> " << target->getNameStr()
+ << " duplicate number " << duplicateNumber
+ << " does not exist in the array map.\n";
+ } else {
+ edgeArray[arrayMap[source][target][duplicateNumber]]
+ += currentPath->getCount();
+ }
+ }
+
+ DEBUG(errs() << "\n");
+
+ delete pev;
+ }
+ }
+
+ std::string errorInfo;
+ std::string filename = EdgeProfileFilename;
+
+ // Open a handle to the file
+ FILE* edgeFile = fopen(filename.c_str(),"wb");
+
+ if (!edgeFile) {
+ errs() << "error: unable to open file '" << filename << "' for output.\n";
+ return false;
+ }
+
+ errs() << "Generating edge profile '" << filename << "' ...\n";
+
+ // write argument info
+ unsigned type = ArgumentInfo;
+ unsigned num = pathProfileInfo.argList.size();
+ int zeros = 0;
+
+ fwrite(&type,sizeof(unsigned),1,edgeFile);
+ fwrite(&num,sizeof(unsigned),1,edgeFile);
+ fwrite(pathProfileInfo.argList.c_str(),1,num,edgeFile);
+ if (num&3)
+ fwrite(&zeros, 1, 4-(num&3), edgeFile);
+
+ type = EdgeInfo;
+ num = edgeArray.size();
+ fwrite(&type,sizeof(unsigned),1,edgeFile);
+ fwrite(&num,sizeof(unsigned),1,edgeFile);
+
+ // write each edge to the file
+ for( std::vector<unsigned>::iterator s = edgeArray.begin(),
+ e = edgeArray.end(); s != e; s++)
+ fwrite(&*s, sizeof (unsigned), 1, edgeFile);
+
+ fclose (edgeFile);
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Analysis/PointerTracking.cpp b/contrib/llvm/lib/Analysis/PointerTracking.cpp
deleted file mode 100644
index 07f4682..0000000
--- a/contrib/llvm/lib/Analysis/PointerTracking.cpp
+++ /dev/null
@@ -1,316 +0,0 @@
-//===- PointerTracking.cpp - Pointer Bounds Tracking ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements tracking of pointer bounds.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Analysis/PointerTracking.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Constants.h"
-#include "llvm/Module.h"
-#include "llvm/Value.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/InstIterator.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
-using namespace llvm;
-
-char PointerTracking::ID = 0;
-PointerTracking::PointerTracking() : FunctionPass(ID) {}
-
-bool PointerTracking::runOnFunction(Function &F) {
- predCache.clear();
- assert(analyzing.empty());
- FF = &F;
- TD = getAnalysisIfAvailable<TargetData>();
- SE = &getAnalysis<ScalarEvolution>();
- LI = &getAnalysis<LoopInfo>();
- DT = &getAnalysis<DominatorTree>();
- return false;
-}
-
-void PointerTracking::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequiredTransitive<DominatorTree>();
- AU.addRequiredTransitive<LoopInfo>();
- AU.addRequiredTransitive<ScalarEvolution>();
- AU.setPreservesAll();
-}
-
-bool PointerTracking::doInitialization(Module &M) {
- const Type *PTy = Type::getInt8PtrTy(M.getContext());
-
- // Find calloc(i64, i64) or calloc(i32, i32).
- callocFunc = M.getFunction("calloc");
- if (callocFunc) {
- const FunctionType *Ty = callocFunc->getFunctionType();
-
- std::vector<const Type*> args, args2;
- args.push_back(Type::getInt64Ty(M.getContext()));
- args.push_back(Type::getInt64Ty(M.getContext()));
- args2.push_back(Type::getInt32Ty(M.getContext()));
- args2.push_back(Type::getInt32Ty(M.getContext()));
- const FunctionType *Calloc1Type =
- FunctionType::get(PTy, args, false);
- const FunctionType *Calloc2Type =
- FunctionType::get(PTy, args2, false);
- if (Ty != Calloc1Type && Ty != Calloc2Type)
- callocFunc = 0; // Give up
- }
-
- // Find realloc(i8*, i64) or realloc(i8*, i32).
- reallocFunc = M.getFunction("realloc");
- if (reallocFunc) {
- const FunctionType *Ty = reallocFunc->getFunctionType();
- std::vector<const Type*> args, args2;
- args.push_back(PTy);
- args.push_back(Type::getInt64Ty(M.getContext()));
- args2.push_back(PTy);
- args2.push_back(Type::getInt32Ty(M.getContext()));
-
- const FunctionType *Realloc1Type =
- FunctionType::get(PTy, args, false);
- const FunctionType *Realloc2Type =
- FunctionType::get(PTy, args2, false);
- if (Ty != Realloc1Type && Ty != Realloc2Type)
- reallocFunc = 0; // Give up
- }
- return false;
-}
-
-// Calculates the number of elements allocated for pointer P,
-// the type of the element is stored in Ty.
-const SCEV *PointerTracking::computeAllocationCount(Value *P,
- const Type *&Ty) const {
- Value *V = P->stripPointerCasts();
- if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- Value *arraySize = AI->getArraySize();
- Ty = AI->getAllocatedType();
- // arraySize elements of type Ty.
- return SE->getSCEV(arraySize);
- }
-
- if (CallInst *CI = extractMallocCall(V)) {
- Value *arraySize = getMallocArraySize(CI, TD);
- const Type* AllocTy = getMallocAllocatedType(CI);
- if (!AllocTy || !arraySize) return SE->getCouldNotCompute();
- Ty = AllocTy;
- // arraySize elements of type Ty.
- return SE->getSCEV(arraySize);
- }
-
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
- if (GV->hasDefinitiveInitializer()) {
- Constant *C = GV->getInitializer();
- if (const ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
- Ty = ATy->getElementType();
- return SE->getConstant(Type::getInt32Ty(P->getContext()),
- ATy->getNumElements());
- }
- }
- Ty = GV->getType();
- return SE->getConstant(Type::getInt32Ty(P->getContext()), 1);
- //TODO: implement more tracking for globals
- }
-
- if (CallInst *CI = dyn_cast<CallInst>(V)) {
- CallSite CS(CI);
- Function *F = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
- const Loop *L = LI->getLoopFor(CI->getParent());
- if (F == callocFunc) {
- Ty = Type::getInt8Ty(P->getContext());
- // calloc allocates arg0*arg1 bytes.
- return SE->getSCEVAtScope(SE->getMulExpr(SE->getSCEV(CS.getArgument(0)),
- SE->getSCEV(CS.getArgument(1))),
- L);
- } else if (F == reallocFunc) {
- Ty = Type::getInt8Ty(P->getContext());
- // realloc allocates arg1 bytes.
- return SE->getSCEVAtScope(CS.getArgument(1), L);
- }
- }
-
- return SE->getCouldNotCompute();
-}
-
-Value *PointerTracking::computeAllocationCountValue(Value *P, const Type *&Ty) const
-{
- Value *V = P->stripPointerCasts();
- if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- Ty = AI->getAllocatedType();
- // arraySize elements of type Ty.
- return AI->getArraySize();
- }
-
- if (CallInst *CI = extractMallocCall(V)) {
- Ty = getMallocAllocatedType(CI);
- if (!Ty)
- return 0;
- Value *arraySize = getMallocArraySize(CI, TD);
- if (!arraySize) {
- Ty = Type::getInt8Ty(P->getContext());
- return CI->getArgOperand(0);
- }
- // arraySize elements of type Ty.
- return arraySize;
- }
-
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
- if (GV->hasDefinitiveInitializer()) {
- Constant *C = GV->getInitializer();
- if (const ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
- Ty = ATy->getElementType();
- return ConstantInt::get(Type::getInt32Ty(P->getContext()),
- ATy->getNumElements());
- }
- }
- Ty = cast<PointerType>(GV->getType())->getElementType();
- return ConstantInt::get(Type::getInt32Ty(P->getContext()), 1);
- //TODO: implement more tracking for globals
- }
-
- if (CallInst *CI = dyn_cast<CallInst>(V)) {
- CallSite CS(CI);
- Function *F = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
- if (F == reallocFunc) {
- Ty = Type::getInt8Ty(P->getContext());
- // realloc allocates arg1 bytes.
- return CS.getArgument(1);
- }
- }
-
- return 0;
-}
-
-// Calculates the number of elements of type Ty allocated for P.
-const SCEV *PointerTracking::computeAllocationCountForType(Value *P,
- const Type *Ty)
- const {
- const Type *elementTy;
- const SCEV *Count = computeAllocationCount(P, elementTy);
- if (isa<SCEVCouldNotCompute>(Count))
- return Count;
- if (elementTy == Ty)
- return Count;
-
- if (!TD) // need TargetData from this point forward
- return SE->getCouldNotCompute();
-
- uint64_t elementSize = TD->getTypeAllocSize(elementTy);
- uint64_t wantSize = TD->getTypeAllocSize(Ty);
- if (elementSize == wantSize)
- return Count;
- if (elementSize % wantSize) //fractional counts not possible
- return SE->getCouldNotCompute();
- return SE->getMulExpr(Count, SE->getConstant(Count->getType(),
- elementSize/wantSize));
-}
-
-const SCEV *PointerTracking::getAllocationElementCount(Value *V) const {
- // We only deal with pointers.
- const PointerType *PTy = cast<PointerType>(V->getType());
- return computeAllocationCountForType(V, PTy->getElementType());
-}
-
-const SCEV *PointerTracking::getAllocationSizeInBytes(Value *V) const {
- return computeAllocationCountForType(V, Type::getInt8Ty(V->getContext()));
-}
-
-// Helper for isLoopGuardedBy that checks the swapped and inverted predicate too
-enum SolverResult PointerTracking::isLoopGuardedBy(const Loop *L,
- Predicate Pred,
- const SCEV *A,
- const SCEV *B) const {
- if (SE->isLoopEntryGuardedByCond(L, Pred, A, B))
- return AlwaysTrue;
- Pred = ICmpInst::getSwappedPredicate(Pred);
- if (SE->isLoopEntryGuardedByCond(L, Pred, B, A))
- return AlwaysTrue;
-
- Pred = ICmpInst::getInversePredicate(Pred);
- if (SE->isLoopEntryGuardedByCond(L, Pred, B, A))
- return AlwaysFalse;
- Pred = ICmpInst::getSwappedPredicate(Pred);
- if (SE->isLoopEntryGuardedByCond(L, Pred, A, B))
- return AlwaysTrue;
- return Unknown;
-}
-
-enum SolverResult PointerTracking::checkLimits(const SCEV *Offset,
- const SCEV *Limit,
- BasicBlock *BB)
-{
- //FIXME: merge implementation
- return Unknown;
-}
-
-void PointerTracking::getPointerOffset(Value *Pointer, Value *&Base,
- const SCEV *&Limit,
- const SCEV *&Offset) const
-{
- Pointer = Pointer->stripPointerCasts();
- Base = Pointer->getUnderlyingObject();
- Limit = getAllocationSizeInBytes(Base);
- if (isa<SCEVCouldNotCompute>(Limit)) {
- Base = 0;
- Offset = Limit;
- return;
- }
-
- Offset = SE->getMinusSCEV(SE->getSCEV(Pointer), SE->getSCEV(Base));
- if (isa<SCEVCouldNotCompute>(Offset)) {
- Base = 0;
- Limit = Offset;
- }
-}
-
-void PointerTracking::print(raw_ostream &OS, const Module* M) const {
- // Calling some PT methods may cause caches to be updated, however
- // this should be safe for the same reason its safe for SCEV.
- PointerTracking &PT = *const_cast<PointerTracking*>(this);
- for (inst_iterator I=inst_begin(*FF), E=inst_end(*FF); I != E; ++I) {
- if (!I->getType()->isPointerTy())
- continue;
- Value *Base;
- const SCEV *Limit, *Offset;
- getPointerOffset(&*I, Base, Limit, Offset);
- if (!Base)
- continue;
-
- if (Base == &*I) {
- const SCEV *S = getAllocationElementCount(Base);
- OS << *Base << " ==> " << *S << " elements, ";
- OS << *Limit << " bytes allocated\n";
- continue;
- }
- OS << &*I << " -- base: " << *Base;
- OS << " offset: " << *Offset;
-
- enum SolverResult res = PT.checkLimits(Offset, Limit, I->getParent());
- switch (res) {
- case AlwaysTrue:
- OS << " always safe\n";
- break;
- case AlwaysFalse:
- OS << " always unsafe\n";
- break;
- case Unknown:
- OS << " <<unknown>>\n";
- break;
- }
- }
-}
-
-INITIALIZE_PASS(PointerTracking, "pointertracking",
- "Track pointer bounds", false, true);
diff --git a/contrib/llvm/lib/Analysis/PostDominators.cpp b/contrib/llvm/lib/Analysis/PostDominators.cpp
index cbe8d18..3f0deab 100644
--- a/contrib/llvm/lib/Analysis/PostDominators.cpp
+++ b/contrib/llvm/lib/Analysis/PostDominators.cpp
@@ -19,6 +19,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SetOperations.h"
+#include "llvm/Assembly/Writer.h"
#include "llvm/Analysis/DominatorInternals.h"
using namespace llvm;
@@ -29,7 +30,7 @@ using namespace llvm;
char PostDominatorTree::ID = 0;
char PostDominanceFrontier::ID = 0;
INITIALIZE_PASS(PostDominatorTree, "postdomtree",
- "Post-Dominator Tree Construction", true, true);
+ "Post-Dominator Tree Construction", true, true)
bool PostDominatorTree::runOnFunction(Function &F) {
DT->recalculate(F);
@@ -53,8 +54,11 @@ FunctionPass* llvm::createPostDomTree() {
// PostDominanceFrontier Implementation
//===----------------------------------------------------------------------===//
-INITIALIZE_PASS(PostDominanceFrontier, "postdomfrontier",
- "Post-Dominance Frontier Construction", true, true);
+INITIALIZE_PASS_BEGIN(PostDominanceFrontier, "postdomfrontier",
+ "Post-Dominance Frontier Construction", true, true)
+INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
+INITIALIZE_PASS_END(PostDominanceFrontier, "postdomfrontier",
+ "Post-Dominance Frontier Construction", true, true)
const DominanceFrontier::DomSetType &
PostDominanceFrontier::calculate(const PostDominatorTree &DT,
diff --git a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
index ecc0a18..667ee1c 100644
--- a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
@@ -39,7 +39,8 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
explicit ProfileEstimatorPass(const double execcount = 0)
- : FunctionPass(ID), ExecCount(execcount) {
+ : FunctionPass(ID), ExecCount(execcount) {
+ initializeProfileEstimatorPassPass(*PassRegistry::getPassRegistry());
if (execcount == 0) ExecCount = LoopWeight;
}
@@ -72,8 +73,11 @@ namespace {
} // End of anonymous namespace
char ProfileEstimatorPass::ID = 0;
-INITIALIZE_AG_PASS(ProfileEstimatorPass, ProfileInfo, "profile-estimator",
- "Estimate profiling information", false, true, false);
+INITIALIZE_AG_PASS_BEGIN(ProfileEstimatorPass, ProfileInfo, "profile-estimator",
+ "Estimate profiling information", false, true, false)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_AG_PASS_END(ProfileEstimatorPass, ProfileInfo, "profile-estimator",
+ "Estimate profiling information", false, true, false)
namespace llvm {
char &ProfileEstimatorPassID = ProfileEstimatorPass::ID;
@@ -319,6 +323,7 @@ bool ProfileEstimatorPass::runOnFunction(Function &F) {
FunctionInformation.erase(&F);
BlockInformation[&F].clear();
EdgeInformation[&F].clear();
+ BBToVisit.clear();
// Mark all blocks as to visit.
for (Function::iterator bi = F.begin(), be = F.end(); bi != be; ++bi)
diff --git a/contrib/llvm/lib/Analysis/ProfileInfo.cpp b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
index fc7f286..36f211e 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfo.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
@@ -24,8 +24,12 @@
#include <limits>
using namespace llvm;
+namespace llvm {
+ template<> char ProfileInfoT<Function,BasicBlock>::ID = 0;
+}
+
// Register the ProfileInfo interface, providing a nice name to refer to.
-static RegisterAnalysisGroup<ProfileInfo> Z("Profile Information");
+INITIALIZE_ANALYSIS_GROUP(ProfileInfo, "Profile Information", NoProfileInfo)
namespace llvm {
@@ -44,9 +48,6 @@ ProfileInfoT<Function, BasicBlock>::~ProfileInfoT() {
}
template<>
-char ProfileInfoT<Function,BasicBlock>::ID = 0;
-
-template<>
char ProfileInfoT<MachineFunction, MachineBasicBlock>::ID = 0;
template<>
@@ -888,7 +889,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
FI = Unvisited.begin(), FE = Unvisited.end();
while(FI != FE && !FoundPath) {
const BasicBlock *BB = *FI; ++FI;
- const BasicBlock *Dest;
+ const BasicBlock *Dest = 0;
Path P;
bool BackEdgeFound = false;
for (const_pred_iterator NBB = pred_begin(BB), End = pred_end(BB);
@@ -1076,7 +1077,9 @@ raw_ostream& operator<<(raw_ostream &O, std::pair<const MachineBasicBlock *, con
namespace {
struct NoProfileInfo : public ImmutablePass, public ProfileInfo {
static char ID; // Class identification, replacement for typeinfo
- NoProfileInfo() : ImmutablePass(ID) {}
+ NoProfileInfo() : ImmutablePass(ID) {
+ initializeNoProfileInfoPass(*PassRegistry::getPassRegistry());
+ }
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
@@ -1097,6 +1100,6 @@ namespace {
char NoProfileInfo::ID = 0;
// Register this pass...
INITIALIZE_AG_PASS(NoProfileInfo, ProfileInfo, "no-profile",
- "No Profile Information", false, true, true);
+ "No Profile Information", false, true, true)
ImmutablePass *llvm::createNoProfileInfoPass() { return new NoProfileInfo(); }
diff --git a/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp b/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
index d325b57..098079b 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
@@ -46,6 +46,7 @@ namespace {
static char ID; // Class identification, replacement for typeinfo
explicit LoaderPass(const std::string &filename = "")
: ModulePass(ID), Filename(filename) {
+ initializeLoaderPassPass(*PassRegistry::getPassRegistry());
if (filename.empty()) Filename = ProfileInfoFilename;
}
@@ -80,7 +81,7 @@ namespace {
char LoaderPass::ID = 0;
INITIALIZE_AG_PASS(LoaderPass, ProfileInfo, "profile-loader",
- "Load profile information from llvmprof.out", false, true, false);
+ "Load profile information from llvmprof.out", false, true, false)
char &llvm::ProfileLoaderPassID = LoaderPass::ID;
diff --git a/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp b/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp
index 3f01b2d..a017518 100644
--- a/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp
@@ -60,10 +60,12 @@ namespace llvm {
static char ID; // Class identification, replacement for typeinfo
explicit ProfileVerifierPassT () : FunctionPass(ID) {
+ initializeProfileVerifierPassPass(*PassRegistry::getPassRegistry());
DisableAssertions = ProfileVerifierDisableAssertions;
}
explicit ProfileVerifierPassT (bool da) : FunctionPass(ID),
DisableAssertions(da) {
+ initializeProfileVerifierPassPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const {
@@ -287,7 +289,7 @@ namespace llvm {
i != ie; ++i) {
if (const CallInst *CI = dyn_cast<CallInst>(&*i)) {
FType *F = CI->getCalledFunction();
- if (F && (F->getNameStr() == "_setjmp")) {
+ if (F && (F->getName() == "_setjmp")) {
isSetJmpTarget = true; break;
}
}
@@ -366,8 +368,11 @@ namespace llvm {
char ProfileVerifierPassT<FType, BType>::ID = 0;
}
-INITIALIZE_PASS(ProfileVerifierPass, "profile-verifier",
- "Verify profiling information", false, true);
+INITIALIZE_PASS_BEGIN(ProfileVerifierPass, "profile-verifier",
+ "Verify profiling information", false, true)
+INITIALIZE_AG_DEPENDENCY(ProfileInfo)
+INITIALIZE_PASS_END(ProfileVerifierPass, "profile-verifier",
+ "Verify profiling information", false, true)
namespace llvm {
FunctionPass *createProfileVerifierPass() {
diff --git a/contrib/llvm/lib/Analysis/RegionInfo.cpp b/contrib/llvm/lib/Analysis/RegionInfo.cpp
index abc057a..e2f6a8b 100644
--- a/contrib/llvm/lib/Analysis/RegionInfo.cpp
+++ b/contrib/llvm/lib/Analysis/RegionInfo.cpp
@@ -16,8 +16,8 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Assembly/Writer.h"
#define DEBUG_TYPE "region"
#include "llvm/Support/Debug.h"
@@ -45,7 +45,7 @@ STATISTIC(numSimpleRegions, "The # of simple regions");
/// PrintStyle - Print region in difference ways.
enum PrintStyle { PrintNone, PrintBB, PrintRN };
-cl::opt<enum PrintStyle> printStyle("print-region-style", cl::Hidden,
+static cl::opt<enum PrintStyle> printStyle("print-region-style", cl::Hidden,
cl::desc("style of printing regions"),
cl::values(
clEnumValN(PrintNone, "none", "print no details"),
@@ -72,6 +72,15 @@ Region::~Region() {
delete *I;
}
+void Region::replaceEntry(BasicBlock *BB) {
+ entry.setPointer(BB);
+}
+
+void Region::replaceExit(BasicBlock *BB) {
+ assert(exit && "No exit to replace!");
+ exit = BB;
+}
+
bool Region::contains(const BasicBlock *B) const {
BasicBlock *BB = const_cast<BasicBlock*>(B);
@@ -125,41 +134,49 @@ Loop *Region::outermostLoopInRegion(LoopInfo *LI, BasicBlock* BB) const {
return outermostLoopInRegion(L);
}
-bool Region::isSimple() const {
- bool isSimple = true;
- bool found = false;
-
- BasicBlock *entry = getEntry(), *exit = getExit();
-
- // TopLevelRegion
- if (!exit)
- return false;
+BasicBlock *Region::getEnteringBlock() const {
+ BasicBlock *entry = getEntry();
+ BasicBlock *Pred;
+ BasicBlock *enteringBlock = 0;
for (pred_iterator PI = pred_begin(entry), PE = pred_end(entry); PI != PE;
++PI) {
- BasicBlock *Pred = *PI;
+ Pred = *PI;
if (DT->getNode(Pred) && !contains(Pred)) {
- if (found) {
- isSimple = false;
- break;
- }
- found = true;
+ if (enteringBlock)
+ return 0;
+
+ enteringBlock = Pred;
}
}
- found = false;
+ return enteringBlock;
+}
+
+BasicBlock *Region::getExitingBlock() const {
+ BasicBlock *exit = getExit();
+ BasicBlock *Pred;
+ BasicBlock *exitingBlock = 0;
+
+ if (!exit)
+ return 0;
for (pred_iterator PI = pred_begin(exit), PE = pred_end(exit); PI != PE;
- ++PI)
- if (contains(*PI)) {
- if (found) {
- isSimple = false;
- break;
- }
- found = true;
+ ++PI) {
+ Pred = *PI;
+ if (contains(Pred)) {
+ if (exitingBlock)
+ return 0;
+
+ exitingBlock = Pred;
}
+ }
- return isSimple;
+ return exitingBlock;
+}
+
+bool Region::isSimple() const {
+ return !isTopLevelRegion() && getEnteringBlock() && getExitingBlock();
}
std::string Region::getNameStr() const {
@@ -311,13 +328,38 @@ void Region::transferChildrenTo(Region *To) {
children.clear();
}
-void Region::addSubRegion(Region *SubRegion) {
+void Region::addSubRegion(Region *SubRegion, bool moveChildren) {
assert(SubRegion->parent == 0 && "SubRegion already has a parent!");
+ assert(std::find(begin(), end(), SubRegion) == children.end()
+ && "Subregion already exists!");
+
SubRegion->parent = this;
- // Set up the region node.
- assert(std::find(children.begin(), children.end(), SubRegion) == children.end()
- && "Node already exist!");
children.push_back(SubRegion);
+
+ if (!moveChildren)
+ return;
+
+ assert(SubRegion->children.size() == 0
+ && "SubRegions that contain children are not supported");
+
+ for (element_iterator I = element_begin(), E = element_end(); I != E; ++I)
+ if (!(*I)->isSubRegion()) {
+ BasicBlock *BB = (*I)->getNodeAs<BasicBlock>();
+
+ if (SubRegion->contains(BB))
+ RI->setRegionFor(BB, SubRegion);
+ }
+
+ std::vector<Region*> Keep;
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (SubRegion->contains(*I) && *I != SubRegion) {
+ SubRegion->children.push_back(*I);
+ (*I)->parent = SubRegion;
+ } else
+ Keep.push_back(*I);
+
+ children.clear();
+ children.insert(children.begin(), Keep.begin(), Keep.end());
}
@@ -339,6 +381,38 @@ unsigned Region::getDepth() const {
return Depth;
}
+Region *Region::getExpandedRegion() const {
+ unsigned NumSuccessors = exit->getTerminator()->getNumSuccessors();
+
+ if (NumSuccessors == 0)
+ return NULL;
+
+ for (pred_iterator PI = pred_begin(getExit()), PE = pred_end(getExit());
+ PI != PE; ++PI)
+ if (!DT->dominates(getEntry(), *PI))
+ return NULL;
+
+ Region *R = RI->getRegionFor(exit);
+
+ if (R->getEntry() != exit) {
+ if (exit->getTerminator()->getNumSuccessors() == 1)
+ return new Region(getEntry(), *succ_begin(exit), RI, DT);
+ else
+ return NULL;
+ }
+
+ while (R->getParent() && R->getParent()->getEntry() == exit)
+ R = R->getParent();
+
+ if (!DT->dominates(getEntry(), R->getExit()))
+ for (pred_iterator PI = pred_begin(getExit()), PE = pred_end(getExit());
+ PI != PE; ++PI)
+ if (!DT->dominates(R->getExit(), *PI))
+ return NULL;
+
+ return new Region(getEntry(), R->getExit(), RI, DT);
+}
+
void Region::print(raw_ostream &OS, bool print_tree, unsigned level) const {
if (print_tree)
OS.indent(level*2) << "[" << level << "] " << getNameStr();
@@ -376,6 +450,11 @@ void Region::dump() const {
}
void Region::clearNodeCache() {
+ // Free the cached nodes.
+ for (BBNodeMapT::iterator I = BBNodeMap.begin(),
+ IE = BBNodeMap.end(); I != IE; ++I)
+ delete I->second;
+
BBNodeMap.clear();
for (Region::iterator RI = begin(), RE = end(); RI != RE; ++RI)
(*RI)->clearNodeCache();
@@ -592,6 +671,7 @@ void RegionInfo::releaseMemory() {
}
RegionInfo::RegionInfo() : FunctionPass(ID) {
+ initializeRegionInfoPass(*PassRegistry::getPassRegistry());
TopLevelRegion = 0;
}
@@ -654,11 +734,14 @@ Region *RegionInfo::getRegionFor(BasicBlock *BB) const {
return I != BBtoRegion.end() ? I->second : 0;
}
+void RegionInfo::setRegionFor(BasicBlock *BB, Region *R) {
+ BBtoRegion[BB] = R;
+}
+
Region *RegionInfo::operator[](BasicBlock *BB) const {
return getRegionFor(BB);
}
-
BasicBlock *RegionInfo::getMaxRegionExit(BasicBlock *BB) const {
BasicBlock *Exit = NULL;
@@ -733,9 +816,28 @@ RegionInfo::getCommonRegion(SmallVectorImpl<BasicBlock*> &BBs) const {
return ret;
}
+void RegionInfo::splitBlock(BasicBlock* NewBB, BasicBlock *OldBB)
+{
+ Region *R = getRegionFor(OldBB);
+
+ setRegionFor(NewBB, R);
+
+ while (R->getEntry() == OldBB && !R->isTopLevelRegion()) {
+ R->replaceEntry(NewBB);
+ R = R->getParent();
+ }
+
+ setRegionFor(OldBB, R);
+}
+
char RegionInfo::ID = 0;
-INITIALIZE_PASS(RegionInfo, "regions",
- "Detect single entry single exit regions", true, true);
+INITIALIZE_PASS_BEGIN(RegionInfo, "regions",
+ "Detect single entry single exit regions", true, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominanceFrontier)
+INITIALIZE_PASS_END(RegionInfo, "regions",
+ "Detect single entry single exit regions", true, true)
// Create methods available outside of this file, to use them
// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
diff --git a/contrib/llvm/lib/Analysis/RegionPass.cpp b/contrib/llvm/lib/Analysis/RegionPass.cpp
new file mode 100644
index 0000000..3269dcc
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/RegionPass.cpp
@@ -0,0 +1,275 @@
+//===- RegionPass.cpp - Region Pass and Region Pass Manager ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements RegionPass and RGPassManager. All region optimization
+// and transformation passes are derived from RegionPass. RGPassManager is
+// responsible for managing RegionPasses.
+// most of these codes are COPY from LoopPass.cpp
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/RegionPass.h"
+#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/Support/Timer.h"
+
+#define DEBUG_TYPE "regionpassmgr"
+#include "llvm/Support/Debug.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// RGPassManager
+//
+
+char RGPassManager::ID = 0;
+
+RGPassManager::RGPassManager(int Depth)
+ : FunctionPass(ID), PMDataManager(Depth) {
+ skipThisRegion = false;
+ redoThisRegion = false;
+ RI = NULL;
+ CurrentRegion = NULL;
+}
+
+// Recurse through all subregions and all regions into RQ.
+static void addRegionIntoQueue(Region *R, std::deque<Region *> &RQ) {
+ RQ.push_back(R);
+ for (Region::iterator I = R->begin(), E = R->end(); I != E; ++I)
+ addRegionIntoQueue(*I, RQ);
+}
+
+/// Pass Manager itself does not invalidate any analysis info.
+void RGPassManager::getAnalysisUsage(AnalysisUsage &Info) const {
+ Info.addRequired<RegionInfo>();
+ Info.setPreservesAll();
+}
+
+/// run - Execute all of the passes scheduled for execution. Keep track of
+/// whether any of the passes modifies the function, and if so, return true.
+bool RGPassManager::runOnFunction(Function &F) {
+ RI = &getAnalysis<RegionInfo>();
+ bool Changed = false;
+
+ // Collect inherited analysis from Module level pass manager.
+ populateInheritedAnalysis(TPM->activeStack);
+
+ addRegionIntoQueue(RI->getTopLevelRegion(), RQ);
+
+ if (RQ.empty()) // No regions, skip calling finalizers
+ return false;
+
+ // Initialization
+ for (std::deque<Region *>::const_iterator I = RQ.begin(), E = RQ.end();
+ I != E; ++I) {
+ Region *R = *I;
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ RegionPass *RP = (RegionPass *)getContainedPass(Index);
+ Changed |= RP->doInitialization(R, *this);
+ }
+ }
+
+ // Walk Regions
+ while (!RQ.empty()) {
+
+ CurrentRegion = RQ.back();
+ skipThisRegion = false;
+ redoThisRegion = false;
+
+ // Run all passes on the current Region.
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ RegionPass *P = (RegionPass*)getContainedPass(Index);
+
+ dumpPassInfo(P, EXECUTION_MSG, ON_REGION_MSG,
+ CurrentRegion->getNameStr());
+ dumpRequiredSet(P);
+
+ initializeAnalysisImpl(P);
+
+ {
+ PassManagerPrettyStackEntry X(P, *CurrentRegion->getEntry());
+
+ TimeRegion PassTimer(getPassTimer(P));
+ Changed |= P->runOnRegion(CurrentRegion, *this);
+ }
+
+ if (Changed)
+ dumpPassInfo(P, MODIFICATION_MSG, ON_REGION_MSG,
+ skipThisRegion ? "<deleted>" :
+ CurrentRegion->getNameStr());
+ dumpPreservedSet(P);
+
+ if (!skipThisRegion) {
+ // Manually check that this region is still healthy. This is done
+ // instead of relying on RegionInfo::verifyRegion since RegionInfo
+ // is a function pass and it's really expensive to verify every
+ // Region in the function every time. That level of checking can be
+ // enabled with the -verify-region-info option.
+ {
+ TimeRegion PassTimer(getPassTimer(P));
+ CurrentRegion->verifyRegion();
+ }
+
+ // Then call the regular verifyAnalysis functions.
+ verifyPreservedAnalysis(P);
+ }
+
+ removeNotPreservedAnalysis(P);
+ recordAvailableAnalysis(P);
+ removeDeadPasses(P,
+ skipThisRegion ? "<deleted>" :
+ CurrentRegion->getNameStr(),
+ ON_REGION_MSG);
+
+ if (skipThisRegion)
+ // Do not run other passes on this region.
+ break;
+ }
+
+ // If the region was deleted, release all the region passes. This frees up
+ // some memory, and avoids trouble with the pass manager trying to call
+ // verifyAnalysis on them.
+ if (skipThisRegion)
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ Pass *P = getContainedPass(Index);
+ freePass(P, "<deleted>", ON_REGION_MSG);
+ }
+
+ // Pop the region from queue after running all passes.
+ RQ.pop_back();
+
+ if (redoThisRegion)
+ RQ.push_back(CurrentRegion);
+
+ // Free all region nodes created in region passes.
+ RI->clearNodeCache();
+ }
+
+ // Finalization
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ RegionPass *P = (RegionPass*)getContainedPass(Index);
+ Changed |= P->doFinalization();
+ }
+
+ // Print the region tree after all pass.
+ DEBUG(
+ dbgs() << "\nRegion tree of function " << F.getName()
+ << " after all region Pass:\n";
+ RI->dump();
+ dbgs() << "\n";
+ );
+
+ return Changed;
+}
+
+/// Print passes managed by this manager
+void RGPassManager::dumpPassStructure(unsigned Offset) {
+ errs().indent(Offset*2) << "Region Pass Manager\n";
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ Pass *P = getContainedPass(Index);
+ P->dumpPassStructure(Offset + 1);
+ dumpLastUses(P, Offset+1);
+ }
+}
+
+namespace {
+//===----------------------------------------------------------------------===//
+// PrintRegionPass
+class PrintRegionPass : public RegionPass {
+private:
+ std::string Banner;
+ raw_ostream &Out; // raw_ostream to print on.
+
+public:
+ static char ID;
+ PrintRegionPass() : RegionPass(ID), Out(dbgs()) {}
+ PrintRegionPass(const std::string &B, raw_ostream &o)
+ : RegionPass(ID), Banner(B), Out(o) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+
+ virtual bool runOnRegion(Region *R, RGPassManager &RGM) {
+ Out << Banner;
+ for (Region::block_iterator I = R->block_begin(), E = R->block_end();
+ I != E; ++I)
+ (*I)->getEntry()->print(Out);
+
+ return false;
+ }
+};
+
+char PrintRegionPass::ID = 0;
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RegionPass
+
+// Check if this pass is suitable for the current RGPassManager, if
+// available. This pass P is not suitable for a RGPassManager if P
+// is not preserving higher level analysis info used by other
+// RGPassManager passes. In such case, pop RGPassManager from the
+// stack. This will force assignPassManager() to create new
+// LPPassManger as expected.
+void RegionPass::preparePassManager(PMStack &PMS) {
+
+ // Find RGPassManager
+ while (!PMS.empty() &&
+ PMS.top()->getPassManagerType() > PMT_RegionPassManager)
+ PMS.pop();
+
+
+ // If this pass is destroying high level information that is used
+ // by other passes that are managed by LPM then do not insert
+ // this pass in current LPM. Use new RGPassManager.
+ if (PMS.top()->getPassManagerType() == PMT_RegionPassManager &&
+ !PMS.top()->preserveHigherLevelAnalysis(this))
+ PMS.pop();
+}
+
+/// Assign pass manager to manage this pass.
+void RegionPass::assignPassManager(PMStack &PMS,
+ PassManagerType PreferredType) {
+ // Find RGPassManager
+ while (!PMS.empty() &&
+ PMS.top()->getPassManagerType() > PMT_RegionPassManager)
+ PMS.pop();
+
+ RGPassManager *RGPM;
+
+ // Create new Region Pass Manager if it does not exist.
+ if (PMS.top()->getPassManagerType() == PMT_RegionPassManager)
+ RGPM = (RGPassManager*)PMS.top();
+ else {
+
+ assert (!PMS.empty() && "Unable to create Region Pass Manager");
+ PMDataManager *PMD = PMS.top();
+
+ // [1] Create new Call Graph Pass Manager
+ RGPM = new RGPassManager(PMD->getDepth() + 1);
+ RGPM->populateInheritedAnalysis(PMS);
+
+ // [2] Set up new manager's top level manager
+ PMTopLevelManager *TPM = PMD->getTopLevelManager();
+ TPM->addIndirectPassManager(RGPM);
+
+ // [3] Assign manager to manage this new manager. This may create
+ // and push new managers into PMS
+ TPM->schedulePass(RGPM);
+
+ // [4] Push new manager into PMS
+ PMS.push(RGPM);
+ }
+
+ RGPM->add(this);
+}
+
+/// Get the printer pass
+Pass *RegionPass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+ return new PrintRegionPass(Banner, O);
+}
diff --git a/contrib/llvm/lib/Analysis/RegionPrinter.cpp b/contrib/llvm/lib/Analysis/RegionPrinter.cpp
index fee5c1b..0cf0f90 100644
--- a/contrib/llvm/lib/Analysis/RegionPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/RegionPrinter.cpp
@@ -121,35 +121,41 @@ namespace {
struct RegionViewer
: public DOTGraphTraitsViewer<RegionInfo, false> {
static char ID;
- RegionViewer() : DOTGraphTraitsViewer<RegionInfo, false>("reg", ID){}
+ RegionViewer() : DOTGraphTraitsViewer<RegionInfo, false>("reg", ID){
+ initializeRegionViewerPass(*PassRegistry::getPassRegistry());
+ }
};
-
char RegionViewer::ID = 0;
-INITIALIZE_PASS(RegionViewer, "view-regions", "View regions of function",
- true, true);
struct RegionOnlyViewer
: public DOTGraphTraitsViewer<RegionInfo, true> {
static char ID;
- RegionOnlyViewer() : DOTGraphTraitsViewer<RegionInfo, true>("regonly", ID){}
+ RegionOnlyViewer() : DOTGraphTraitsViewer<RegionInfo, true>("regonly", ID) {
+ initializeRegionOnlyViewerPass(*PassRegistry::getPassRegistry());
+ }
};
-
char RegionOnlyViewer::ID = 0;
-INITIALIZE_PASS(RegionOnlyViewer, "view-regions-only",
- "View regions of function (with no function bodies)",
- true, true);
struct RegionPrinter
: public DOTGraphTraitsPrinter<RegionInfo, false> {
static char ID;
RegionPrinter() :
- DOTGraphTraitsPrinter<RegionInfo, false>("reg", ID) {}
+ DOTGraphTraitsPrinter<RegionInfo, false>("reg", ID) {
+ initializeRegionPrinterPass(*PassRegistry::getPassRegistry());
+ }
};
+char RegionPrinter::ID = 0;
} //end anonymous namespace
-char RegionPrinter::ID = 0;
INITIALIZE_PASS(RegionPrinter, "dot-regions",
- "Print regions of function to 'dot' file", true, true);
+ "Print regions of function to 'dot' file", true, true)
+
+INITIALIZE_PASS(RegionViewer, "view-regions", "View regions of function",
+ true, true)
+
+INITIALIZE_PASS(RegionOnlyViewer, "view-regions-only",
+ "View regions of function (with no function bodies)",
+ true, true)
namespace {
@@ -157,7 +163,9 @@ struct RegionOnlyPrinter
: public DOTGraphTraitsPrinter<RegionInfo, true> {
static char ID;
RegionOnlyPrinter() :
- DOTGraphTraitsPrinter<RegionInfo, true>("reg", ID) {}
+ DOTGraphTraitsPrinter<RegionInfo, true>("reg", ID) {
+ initializeRegionOnlyPrinterPass(*PassRegistry::getPassRegistry());
+ }
};
}
@@ -166,7 +174,7 @@ char RegionOnlyPrinter::ID = 0;
INITIALIZE_PASS(RegionOnlyPrinter, "dot-regions-only",
"Print regions of function to 'dot' file "
"(with no function bodies)",
- true, true);
+ true, true)
FunctionPass* llvm::createRegionViewerPass() {
return new RegionViewer();
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index b892d85..62244cc 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -69,6 +69,7 @@
#include "llvm/Operator.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
@@ -103,8 +104,12 @@ MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
"derived loop"),
cl::init(100));
-INITIALIZE_PASS(ScalarEvolution, "scalar-evolution",
- "Scalar Evolution Analysis", false, true);
+INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
+ "Scalar Evolution Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
+ "Scalar Evolution Analysis", false, true)
char ScalarEvolution::ID = 0;
//===----------------------------------------------------------------------===//
@@ -115,13 +120,139 @@ char ScalarEvolution::ID = 0;
// Implementation of the SCEV class.
//
-SCEV::~SCEV() {}
-
void SCEV::dump() const {
print(dbgs());
dbgs() << '\n';
}
+void SCEV::print(raw_ostream &OS) const {
+ switch (getSCEVType()) {
+ case scConstant:
+ WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false);
+ return;
+ case scTruncate: {
+ const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
+ const SCEV *Op = Trunc->getOperand();
+ OS << "(trunc " << *Op->getType() << " " << *Op << " to "
+ << *Trunc->getType() << ")";
+ return;
+ }
+ case scZeroExtend: {
+ const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
+ const SCEV *Op = ZExt->getOperand();
+ OS << "(zext " << *Op->getType() << " " << *Op << " to "
+ << *ZExt->getType() << ")";
+ return;
+ }
+ case scSignExtend: {
+ const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
+ const SCEV *Op = SExt->getOperand();
+ OS << "(sext " << *Op->getType() << " " << *Op << " to "
+ << *SExt->getType() << ")";
+ return;
+ }
+ case scAddRecExpr: {
+ const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
+ OS << "{" << *AR->getOperand(0);
+ for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
+ OS << ",+," << *AR->getOperand(i);
+ OS << "}<";
+ if (AR->hasNoUnsignedWrap())
+ OS << "nuw><";
+ if (AR->hasNoSignedWrap())
+ OS << "nsw><";
+ WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
+ OS << ">";
+ return;
+ }
+ case scAddExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
+ const char *OpStr = 0;
+ switch (NAry->getSCEVType()) {
+ case scAddExpr: OpStr = " + "; break;
+ case scMulExpr: OpStr = " * "; break;
+ case scUMaxExpr: OpStr = " umax "; break;
+ case scSMaxExpr: OpStr = " smax "; break;
+ }
+ OS << "(";
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
+ I != E; ++I) {
+ OS << **I;
+ if (llvm::next(I) != E)
+ OS << OpStr;
+ }
+ OS << ")";
+ return;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
+ OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
+ return;
+ }
+ case scUnknown: {
+ const SCEVUnknown *U = cast<SCEVUnknown>(this);
+ const Type *AllocTy;
+ if (U->isSizeOf(AllocTy)) {
+ OS << "sizeof(" << *AllocTy << ")";
+ return;
+ }
+ if (U->isAlignOf(AllocTy)) {
+ OS << "alignof(" << *AllocTy << ")";
+ return;
+ }
+
+ const Type *CTy;
+ Constant *FieldNo;
+ if (U->isOffsetOf(CTy, FieldNo)) {
+ OS << "offsetof(" << *CTy << ", ";
+ WriteAsOperand(OS, FieldNo, false);
+ OS << ")";
+ return;
+ }
+
+ // Otherwise just print it normally.
+ WriteAsOperand(OS, U->getValue(), false);
+ return;
+ }
+ case scCouldNotCompute:
+ OS << "***COULDNOTCOMPUTE***";
+ return;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+}
+
+const Type *SCEV::getType() const {
+ switch (getSCEVType()) {
+ case scConstant:
+ return cast<SCEVConstant>(this)->getType();
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend:
+ return cast<SCEVCastExpr>(this)->getType();
+ case scAddRecExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr:
+ return cast<SCEVNAryExpr>(this)->getType();
+ case scAddExpr:
+ return cast<SCEVAddExpr>(this)->getType();
+ case scUDivExpr:
+ return cast<SCEVUDivExpr>(this)->getType();
+ case scUnknown:
+ return cast<SCEVUnknown>(this)->getType();
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ return 0;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+ return 0;
+}
+
bool SCEV::isZero() const {
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
return SC->getValue()->isZero();
@@ -143,30 +274,6 @@ bool SCEV::isAllOnesValue() const {
SCEVCouldNotCompute::SCEVCouldNotCompute() :
SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
-bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return false;
-}
-
-const Type *SCEVCouldNotCompute::getType() const {
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return 0;
-}
-
-bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return false;
-}
-
-bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return false;
-}
-
-void SCEVCouldNotCompute::print(raw_ostream &OS) const {
- OS << "***COULDNOTCOMPUTE***";
-}
-
bool SCEVCouldNotCompute::classof(const SCEV *S) {
return S->getSCEVType() == scCouldNotCompute;
}
@@ -192,24 +299,10 @@ ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
return getConstant(ConstantInt::get(ITy, V, isSigned));
}
-const Type *SCEVConstant::getType() const { return V->getType(); }
-
-void SCEVConstant::print(raw_ostream &OS) const {
- WriteAsOperand(OS, V, false);
-}
-
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
unsigned SCEVTy, const SCEV *op, const Type *ty)
: SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
-bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- return Op->dominates(BB, DT);
-}
-
-bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- return Op->properlyDominates(BB, DT);
-}
-
SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scTruncate, op, ty) {
@@ -218,10 +311,6 @@ SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
"Cannot truncate non-integer value!");
}
-void SCEVTruncateExpr::print(raw_ostream &OS) const {
- OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
-}
-
SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
@@ -230,10 +319,6 @@ SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
"Cannot zero extend non-integer value!");
}
-void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
- OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
-}
-
SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scSignExtend, op, ty) {
@@ -242,139 +327,9 @@ SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
"Cannot sign extend non-integer value!");
}
-void SCEVSignExtendExpr::print(raw_ostream &OS) const {
- OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
-}
-
-void SCEVCommutativeExpr::print(raw_ostream &OS) const {
- const char *OpStr = getOperationStr();
- OS << "(";
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
- OS << **I;
- if (llvm::next(I) != E)
- OS << OpStr;
- }
- OS << ")";
-}
-
-bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
- if (!(*I)->dominates(BB, DT))
- return false;
- return true;
-}
-
-bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
- if (!(*I)->properlyDominates(BB, DT))
- return false;
- return true;
-}
-
-bool SCEVNAryExpr::isLoopInvariant(const Loop *L) const {
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
- if (!(*I)->isLoopInvariant(L))
- return false;
- return true;
-}
-
-// hasComputableLoopEvolution - N-ary expressions have computable loop
-// evolutions iff they have at least one operand that varies with the loop,
-// but that all varying operands are computable.
-bool SCEVNAryExpr::hasComputableLoopEvolution(const Loop *L) const {
- bool HasVarying = false;
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
- const SCEV *S = *I;
- if (!S->isLoopInvariant(L)) {
- if (S->hasComputableLoopEvolution(L))
- HasVarying = true;
- else
- return false;
- }
- }
- return HasVarying;
-}
-
-bool SCEVNAryExpr::hasOperand(const SCEV *O) const {
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
- const SCEV *S = *I;
- if (O == S || S->hasOperand(O))
- return true;
- }
- return false;
-}
-
-bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
-}
-
-bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT);
-}
-
-void SCEVUDivExpr::print(raw_ostream &OS) const {
- OS << "(" << *LHS << " /u " << *RHS << ")";
-}
-
-const Type *SCEVUDivExpr::getType() const {
- // In most cases the types of LHS and RHS will be the same, but in some
- // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
- // depend on the type for correctness, but handling types carefully can
- // avoid extra casts in the SCEVExpander. The LHS is more likely to be
- // a pointer type than the RHS, so use the RHS' type here.
- return RHS->getType();
-}
-
-bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
- // Add recurrences are never invariant in the function-body (null loop).
- if (!QueryLoop)
- return false;
-
- // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
- if (QueryLoop->contains(L))
- return false;
-
- // This recurrence is invariant w.r.t. QueryLoop if L contains QueryLoop.
- if (L->contains(QueryLoop))
- return true;
-
- // This recurrence is variant w.r.t. QueryLoop if any of its operands
- // are variant.
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
- if (!(*I)->isLoopInvariant(QueryLoop))
- return false;
-
- // Otherwise it's loop-invariant.
- return true;
-}
-
-bool
-SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- return DT->dominates(L->getHeader(), BB) &&
- SCEVNAryExpr::dominates(BB, DT);
-}
-
-bool
-SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- // This uses a "dominates" query instead of "properly dominates" query because
- // the instruction which produces the addrec's value is a PHI, and a PHI
- // effectively properly dominates its entire containing block.
- return DT->dominates(L->getHeader(), BB) &&
- SCEVNAryExpr::properlyDominates(BB, DT);
-}
-
-void SCEVAddRecExpr::print(raw_ostream &OS) const {
- OS << "{" << *Operands[0];
- for (unsigned i = 1, e = NumOperands; i != e; ++i)
- OS << ",+," << *Operands[i];
- OS << "}<";
- WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
- OS << ">";
-}
-
void SCEVUnknown::deleted() {
- // Clear this SCEVUnknown from ValuesAtScopes.
- SE->ValuesAtScopes.erase(this);
+ // Clear this SCEVUnknown from various maps.
+ SE->forgetMemoizedResults(this);
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
@@ -384,8 +339,8 @@ void SCEVUnknown::deleted() {
}
void SCEVUnknown::allUsesReplacedWith(Value *New) {
- // Clear this SCEVUnknown from ValuesAtScopes.
- SE->ValuesAtScopes.erase(this);
+ // Clear this SCEVUnknown from various maps.
+ SE->forgetMemoizedResults(this);
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
@@ -396,32 +351,6 @@ void SCEVUnknown::allUsesReplacedWith(Value *New) {
setValPtr(New);
}
-bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
- // All non-instruction values are loop invariant. All instructions are loop
- // invariant if they are not contained in the specified loop.
- // Instructions are never considered invariant in the function body
- // (null loop) because they are defined within the "loop".
- if (Instruction *I = dyn_cast<Instruction>(getValue()))
- return L && !L->contains(I);
- return true;
-}
-
-bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
- if (Instruction *I = dyn_cast<Instruction>(getValue()))
- return DT->dominates(I->getParent(), BB);
- return true;
-}
-
-bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- if (Instruction *I = dyn_cast<Instruction>(getValue()))
- return DT->properlyDominates(I->getParent(), BB);
- return true;
-}
-
-const Type *SCEVUnknown::getType() const {
- return getValue()->getType();
-}
-
bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
@@ -486,30 +415,6 @@ bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
return false;
}
-void SCEVUnknown::print(raw_ostream &OS) const {
- const Type *AllocTy;
- if (isSizeOf(AllocTy)) {
- OS << "sizeof(" << *AllocTy << ")";
- return;
- }
- if (isAlignOf(AllocTy)) {
- OS << "alignof(" << *AllocTy << ")";
- return;
- }
-
- const Type *CTy;
- Constant *FieldNo;
- if (isOffsetOf(CTy, FieldNo)) {
- OS << "offsetof(" << *CTy << ", ";
- WriteAsOperand(OS, FieldNo, false);
- OS << ")";
- return;
- }
-
- // Otherwise just print it normally.
- WriteAsOperand(OS, getValue(), false);
-}
-
//===----------------------------------------------------------------------===//
// SCEV Utilities
//===----------------------------------------------------------------------===//
@@ -914,6 +819,36 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
+ // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
+ // eliminate all the truncates.
+ if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
+ SmallVector<const SCEV *, 4> Operands;
+ bool hasTrunc = false;
+ for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
+ const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
+ hasTrunc = isa<SCEVTruncateExpr>(S);
+ Operands.push_back(S);
+ }
+ if (!hasTrunc)
+ return getAddExpr(Operands, false, false);
+ UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
+ }
+
+ // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
+ // eliminate all the truncates.
+ if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
+ SmallVector<const SCEV *, 4> Operands;
+ bool hasTrunc = false;
+ for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
+ const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
+ hasTrunc = isa<SCEVTruncateExpr>(S);
+ Operands.push_back(S);
+ }
+ if (!hasTrunc)
+ return getMulExpr(Operands, false, false);
+ UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
+ }
+
// If the input value is a chrec scev, truncate the chrec's operands.
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
SmallVector<const SCEV *, 4> Operands;
@@ -965,6 +900,19 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ // zext(trunc(x)) --> zext(x) or x or trunc(x)
+ if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
+ // It's possible the bits taken off by the truncate were all zero bits. If
+ // so, we should be able to simplify this further.
+ const SCEV *X = ST->getOperand();
+ ConstantRange CR = getUnsignedRange(X);
+ unsigned TruncBits = getTypeSizeInBits(ST->getType());
+ unsigned NewBits = getTypeSizeInBits(Ty);
+ if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
+ CR.zextOrTrunc(NewBits)))
+ return getTruncateOrZeroExtend(X, Ty);
+ }
+
// If the input value is a chrec scev, and we can prove that the value
// did not overflow the old, smaller, value, we can zero extend all of the
// operands (often constants). This allows analysis of something like
@@ -1089,6 +1037,10 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
return getSignExtendExpr(SS->getOperand(), Ty);
+ // sext(zext(x)) --> zext(x)
+ if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
+ return getZeroExtendExpr(SZ->getOperand(), Ty);
+
// Before doing any expensive analysis, check to see if we've already
// computed a SCEV for this Op and Ty.
FoldingSetNodeID ID;
@@ -1098,6 +1050,23 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ // If the input value is provably positive, build a zext instead.
+ if (isKnownNonNegative(Op))
+ return getZeroExtendExpr(Op, Ty);
+
+ // sext(trunc(x)) --> sext(x) or x or trunc(x)
+ if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
+ // It's possible the bits taken off by the truncate were all sign bits. If
+ // so, we should be able to simplify this further.
+ const SCEV *X = ST->getOperand();
+ ConstantRange CR = getSignedRange(X);
+ unsigned TruncBits = getTypeSizeInBits(ST->getType());
+ unsigned NewBits = getTypeSizeInBits(Ty);
+ if (CR.truncate(TruncBits).signExtend(NewBits).contains(
+ CR.sextOrTrunc(NewBits)))
+ return getTruncateOrSignExtend(X, Ty);
+ }
+
// If the input value is a chrec scev, and we can prove that the value
// did not overflow the old, smaller, value, we can sign extend all of the
// operands (often constants). This allows analysis of something like
@@ -1639,7 +1608,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i]->isLoopInvariant(AddRecLoop)) {
+ if (isLoopInvariant(Ops[i], AddRecLoop)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
@@ -1711,7 +1680,6 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scAddExpr);
- ID.AddInteger(Ops.size());
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
@@ -1846,7 +1814,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i]->isLoopInvariant(AddRecLoop)) {
+ if (isLoopInvariant(Ops[i], AddRecLoop)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
@@ -1917,7 +1885,6 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scMulExpr);
- ID.AddInteger(Ops.size());
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
@@ -2066,6 +2033,9 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
"SCEVAddRecExpr operand types don't match!");
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
+ assert(isLoopInvariant(Operands[i], L) &&
+ "SCEVAddRecExpr operand is not loop-invariant!");
#endif
if (Operands.back()->isZero()) {
@@ -2106,7 +2076,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
// requirement.
bool AllInvariant = true;
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
- if (!Operands[i]->isLoopInvariant(L)) {
+ if (!isLoopInvariant(Operands[i], L)) {
AllInvariant = false;
break;
}
@@ -2114,7 +2084,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
NestedOperands[0] = getAddRecExpr(Operands, L);
AllInvariant = true;
for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
- if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
+ if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
AllInvariant = false;
break;
}
@@ -2131,7 +2101,6 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scAddRecExpr);
- ID.AddInteger(Operands.size());
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
ID.AddPointer(Operands[i]);
ID.AddPointer(L);
@@ -2242,7 +2211,6 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scSMaxExpr);
- ID.AddInteger(Ops.size());
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
@@ -2347,7 +2315,6 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scUMaxExpr);
- ID.AddInteger(Ops.size());
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
@@ -2543,24 +2510,24 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
return getMinusSCEV(AllOnes, V);
}
-/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
-///
-const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
- const SCEV *RHS) {
+/// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1,
+/// and thus the HasNUW and HasNSW bits apply to the resultant add, not
+/// whether the sub would have overflowed.
+const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+ bool HasNUW, bool HasNSW) {
// Fast path: X - X --> 0.
if (LHS == RHS)
return getConstant(LHS->getType(), 0);
// X - Y --> X + -Y
- return getAddExpr(LHS, getNegativeSCEV(RHS));
+ return getAddExpr(LHS, getNegativeSCEV(RHS), HasNUW, HasNSW);
}
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
/// input value to the specified type. If the type must be extended, it is zero
/// extended.
const SCEV *
-ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
- const Type *Ty) {
+ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
@@ -2714,9 +2681,11 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
ValueExprMapType::iterator It =
ValueExprMap.find(static_cast<Value *>(I));
if (It != ValueExprMap.end()) {
+ const SCEV *Old = It->second;
+
// Short-circuit the def-use traversal if the symbolic name
// ceases to appear in expressions.
- if (It->second != SymName && !It->second->hasOperand(SymName))
+ if (Old != SymName && !hasOperand(Old, SymName))
continue;
// SCEVUnknown for a PHI either means that it has an unrecognized
@@ -2727,9 +2696,9 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
// updates on its own when it gets to that point. In the third, we do
// want to forget the SCEVUnknown.
if (!isa<PHINode>(I) ||
- !isa<SCEVUnknown>(It->second) ||
- (I != PN && It->second == SymName)) {
- ValuesAtScopes.erase(It->second);
+ !isa<SCEVUnknown>(Old) ||
+ (I != PN && Old == SymName)) {
+ forgetMemoizedResults(Old);
ValueExprMap.erase(It);
}
}
@@ -2801,7 +2770,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// This is not a valid addrec if the step amount is varying each
// loop iteration, but is not itself an addrec in this loop.
- if (Accum->isLoopInvariant(L) ||
+ if (isLoopInvariant(Accum, L) ||
(isa<SCEVAddRecExpr>(Accum) &&
cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
bool HasNUW = false;
@@ -2814,6 +2783,23 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
HasNUW = true;
if (OBO->hasNoSignedWrap())
HasNSW = true;
+ } else if (const GEPOperator *GEP =
+ dyn_cast<GEPOperator>(BEValueV)) {
+ // If the increment is a GEP, then we know it won't perform a
+ // signed overflow, because the address space cannot be
+ // wrapped around.
+ //
+ // NOTE: This isn't strictly true, because you could have an
+ // object straddling the 2G address boundary in a 32-bit address
+ // space (for example). We really want to model this as a "has
+ // no signed/unsigned wrap" where the base pointer is treated as
+ // unsigned and the increment is known to not have signed
+ // wrapping.
+ //
+ // This is a highly theoretical concern though, and this is good
+ // enough for all cases we know of at this point. :)
+ //
+ HasNSW |= GEP->isInBounds();
}
const SCEV *StartVal = getSCEV(StartValueV);
@@ -2822,7 +2808,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// Since the no-wrap flags are on the increment, they apply to the
// post-incremented value as well.
- if (Accum->isLoopInvariant(L))
+ if (isLoopInvariant(Accum, L))
(void)getAddRecExpr(getAddExpr(StartVal, Accum),
Accum, L, HasNUW, HasNSW);
@@ -2867,17 +2853,9 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = PN->hasConstantValue(DT)) {
- bool AllSameLoop = true;
- Loop *PNLoop = LI->getLoopFor(PN->getParent());
- for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) {
- AllSameLoop = false;
- break;
- }
- if (AllSameLoop)
+ if (Value *V = SimplifyInstruction(PN, TD, DT))
+ if (LI->replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
- }
// If it's not a loop phi, we can't handle it yet.
return getUnknown(PN);
@@ -2892,6 +2870,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
// Add expression, because the Instruction may be guarded by control flow
// and the no-overflow bits may not be valid for the expression in any
// context.
+ bool isInBounds = GEP->isInBounds();
const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
@@ -2920,7 +2899,8 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
// Multiply the index by the element size to compute the element offset.
- const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize);
+ const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, /*NUW*/ false,
+ /*NSW*/ isInBounds);
// Add the element offset to the running total offset.
TotalOffset = getAddExpr(TotalOffset, LocalOffset);
@@ -2931,7 +2911,8 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
const SCEV *BaseS = getSCEV(Base);
// Add the total offset from all the GEP indices to the base.
- return getAddExpr(BaseS, TotalOffset);
+ return getAddExpr(BaseS, TotalOffset, /*NUW*/ false,
+ /*NSW*/ isInBounds);
}
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
@@ -3019,9 +3000,13 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
///
ConstantRange
ScalarEvolution::getUnsignedRange(const SCEV *S) {
+ // See if we've computed this range already.
+ DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
+ if (I != UnsignedRanges.end())
+ return I->second;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
- return ConstantRange(C->getValue()->getValue());
+ return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
unsigned BitWidth = getTypeSizeInBits(S->getType());
ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
@@ -3038,49 +3023,52 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
ConstantRange X = getUnsignedRange(Add->getOperand(0));
for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
X = X.add(getUnsignedRange(Add->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
}
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
ConstantRange X = getUnsignedRange(Mul->getOperand(0));
for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
}
if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
ConstantRange X = getUnsignedRange(SMax->getOperand(0));
for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
X = X.smax(getUnsignedRange(SMax->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
}
if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
ConstantRange X = getUnsignedRange(UMax->getOperand(0));
for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
X = X.umax(getUnsignedRange(UMax->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
}
if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
ConstantRange X = getUnsignedRange(UDiv->getLHS());
ConstantRange Y = getUnsignedRange(UDiv->getRHS());
- return ConservativeResult.intersectWith(X.udiv(Y));
+ return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
}
if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
ConstantRange X = getUnsignedRange(ZExt->getOperand());
- return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
+ return setUnsignedRange(ZExt,
+ ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
}
if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
ConstantRange X = getUnsignedRange(SExt->getOperand());
- return ConservativeResult.intersectWith(X.signExtend(BitWidth));
+ return setUnsignedRange(SExt,
+ ConservativeResult.intersectWith(X.signExtend(BitWidth)));
}
if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
ConstantRange X = getUnsignedRange(Trunc->getOperand());
- return ConservativeResult.intersectWith(X.truncate(BitWidth));
+ return setUnsignedRange(Trunc,
+ ConservativeResult.intersectWith(X.truncate(BitWidth)));
}
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
@@ -3120,19 +3108,20 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
ExtEndRange)
- return ConservativeResult;
+ return setUnsignedRange(AddRec, ConservativeResult);
APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
EndRange.getUnsignedMin());
APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
EndRange.getUnsignedMax());
if (Min.isMinValue() && Max.isMaxValue())
- return ConservativeResult;
- return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
+ return setUnsignedRange(AddRec, ConservativeResult);
+ return setUnsignedRange(AddRec,
+ ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
}
}
- return ConservativeResult;
+ return setUnsignedRange(AddRec, ConservativeResult);
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
@@ -3141,20 +3130,25 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
if (Ones == ~Zeros + 1)
- return ConservativeResult;
- return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
+ return setUnsignedRange(U, ConservativeResult);
+ return setUnsignedRange(U,
+ ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
}
- return ConservativeResult;
+ return setUnsignedRange(S, ConservativeResult);
}
/// getSignedRange - Determine the signed range for a particular SCEV.
///
ConstantRange
ScalarEvolution::getSignedRange(const SCEV *S) {
+ // See if we've computed this range already.
+ DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
+ if (I != SignedRanges.end())
+ return I->second;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
- return ConstantRange(C->getValue()->getValue());
+ return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
unsigned BitWidth = getTypeSizeInBits(S->getType());
ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
@@ -3171,49 +3165,52 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
ConstantRange X = getSignedRange(Add->getOperand(0));
for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
X = X.add(getSignedRange(Add->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setSignedRange(Add, ConservativeResult.intersectWith(X));
}
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
ConstantRange X = getSignedRange(Mul->getOperand(0));
for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
X = X.multiply(getSignedRange(Mul->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setSignedRange(Mul, ConservativeResult.intersectWith(X));
}
if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
ConstantRange X = getSignedRange(SMax->getOperand(0));
for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
X = X.smax(getSignedRange(SMax->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setSignedRange(SMax, ConservativeResult.intersectWith(X));
}
if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
ConstantRange X = getSignedRange(UMax->getOperand(0));
for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
X = X.umax(getSignedRange(UMax->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setSignedRange(UMax, ConservativeResult.intersectWith(X));
}
if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
ConstantRange X = getSignedRange(UDiv->getLHS());
ConstantRange Y = getSignedRange(UDiv->getRHS());
- return ConservativeResult.intersectWith(X.udiv(Y));
+ return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
}
if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
ConstantRange X = getSignedRange(ZExt->getOperand());
- return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
+ return setSignedRange(ZExt,
+ ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
}
if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
ConstantRange X = getSignedRange(SExt->getOperand());
- return ConservativeResult.intersectWith(X.signExtend(BitWidth));
+ return setSignedRange(SExt,
+ ConservativeResult.intersectWith(X.signExtend(BitWidth)));
}
if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
ConstantRange X = getSignedRange(Trunc->getOperand());
- return ConservativeResult.intersectWith(X.truncate(BitWidth));
+ return setSignedRange(Trunc,
+ ConservativeResult.intersectWith(X.truncate(BitWidth)));
}
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
@@ -3263,34 +3260,35 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
ExtEndRange)
- return ConservativeResult;
+ return setSignedRange(AddRec, ConservativeResult);
APInt Min = APIntOps::smin(StartRange.getSignedMin(),
EndRange.getSignedMin());
APInt Max = APIntOps::smax(StartRange.getSignedMax(),
EndRange.getSignedMax());
if (Min.isMinSignedValue() && Max.isMaxSignedValue())
- return ConservativeResult;
- return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
+ return setSignedRange(AddRec, ConservativeResult);
+ return setSignedRange(AddRec,
+ ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
}
}
- return ConservativeResult;
+ return setSignedRange(AddRec, ConservativeResult);
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
if (!U->getValue()->getType()->isIntegerTy() && !TD)
- return ConservativeResult;
+ return setSignedRange(U, ConservativeResult);
unsigned NS = ComputeNumSignBits(U->getValue(), TD);
if (NS == 1)
- return ConservativeResult;
- return ConservativeResult.intersectWith(
+ return setSignedRange(U, ConservativeResult);
+ return setSignedRange(U, ConservativeResult.intersectWith(
ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
- APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1));
+ APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
}
- return ConservativeResult;
+ return setSignedRange(S, ConservativeResult);
}
/// createSCEV - We know that there is no SCEV for the specified value.
@@ -3458,8 +3456,8 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// If C is a single bit, it may be in the sign-bit position
// before the zero-extend. In this case, represent the xor
// using an add, which is equivalent, and re-apply the zext.
- APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
- if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
+ APInt Trunc = CI->getValue().trunc(Z0TySize);
+ if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
Trunc.isSignBit())
return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
UTy);
@@ -3699,58 +3697,61 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
// backedge-taken count, which could result in infinite recursion.
std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
- if (Pair.second) {
- BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
- if (BECount.Exact != getCouldNotCompute()) {
- assert(BECount.Exact->isLoopInvariant(L) &&
- BECount.Max->isLoopInvariant(L) &&
- "Computed backedge-taken count isn't loop invariant for loop!");
- ++NumTripCountsComputed;
+ if (!Pair.second)
+ return Pair.first->second;
+ BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
+ if (BECount.Exact != getCouldNotCompute()) {
+ assert(isLoopInvariant(BECount.Exact, L) &&
+ isLoopInvariant(BECount.Max, L) &&
+ "Computed backedge-taken count isn't loop invariant for loop!");
+ ++NumTripCountsComputed;
+
+ // Update the value in the map.
+ Pair.first->second = BECount;
+ } else {
+ if (BECount.Max != getCouldNotCompute())
// Update the value in the map.
Pair.first->second = BECount;
- } else {
- if (BECount.Max != getCouldNotCompute())
- // Update the value in the map.
- Pair.first->second = BECount;
- if (isa<PHINode>(L->getHeader()->begin()))
- // Only count loops that have phi nodes as not being computable.
- ++NumTripCountsNotComputed;
- }
-
- // Now that we know more about the trip count for this loop, forget any
- // existing SCEV values for PHI nodes in this loop since they are only
- // conservative estimates made without the benefit of trip count
- // information. This is similar to the code in forgetLoop, except that
- // it handles SCEVUnknown PHI nodes specially.
- if (BECount.hasAnyInfo()) {
- SmallVector<Instruction *, 16> Worklist;
- PushLoopPHIs(L, Worklist);
-
- SmallPtrSet<Instruction *, 8> Visited;
- while (!Worklist.empty()) {
- Instruction *I = Worklist.pop_back_val();
- if (!Visited.insert(I)) continue;
-
- ValueExprMapType::iterator It =
- ValueExprMap.find(static_cast<Value *>(I));
- if (It != ValueExprMap.end()) {
- // SCEVUnknown for a PHI either means that it has an unrecognized
- // structure, or it's a PHI that's in the progress of being computed
- // by createNodeForPHI. In the former case, additional loop trip
- // count information isn't going to change anything. In the later
- // case, createNodeForPHI will perform the necessary updates on its
- // own when it gets to that point.
- if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
- ValuesAtScopes.erase(It->second);
- ValueExprMap.erase(It);
- }
- if (PHINode *PN = dyn_cast<PHINode>(I))
- ConstantEvolutionLoopExitValue.erase(PN);
+ if (isa<PHINode>(L->getHeader()->begin()))
+ // Only count loops that have phi nodes as not being computable.
+ ++NumTripCountsNotComputed;
+ }
+
+ // Now that we know more about the trip count for this loop, forget any
+ // existing SCEV values for PHI nodes in this loop since they are only
+ // conservative estimates made without the benefit of trip count
+ // information. This is similar to the code in forgetLoop, except that
+ // it handles SCEVUnknown PHI nodes specially.
+ if (BECount.hasAnyInfo()) {
+ SmallVector<Instruction *, 16> Worklist;
+ PushLoopPHIs(L, Worklist);
+
+ SmallPtrSet<Instruction *, 8> Visited;
+ while (!Worklist.empty()) {
+ Instruction *I = Worklist.pop_back_val();
+ if (!Visited.insert(I)) continue;
+
+ ValueExprMapType::iterator It =
+ ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
+ const SCEV *Old = It->second;
+
+ // SCEVUnknown for a PHI either means that it has an unrecognized
+ // structure, or it's a PHI that's in the progress of being computed
+ // by createNodeForPHI. In the former case, additional loop trip
+ // count information isn't going to change anything. In the later
+ // case, createNodeForPHI will perform the necessary updates on its
+ // own when it gets to that point.
+ if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
+ forgetMemoizedResults(Old);
+ ValueExprMap.erase(It);
}
-
- PushDefUseChildren(I, Worklist);
+ if (PHINode *PN = dyn_cast<PHINode>(I))
+ ConstantEvolutionLoopExitValue.erase(PN);
}
+
+ PushDefUseChildren(I, Worklist);
}
}
return Pair.first->second;
@@ -3774,7 +3775,7 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
if (It != ValueExprMap.end()) {
- ValuesAtScopes.erase(It->second);
+ forgetMemoizedResults(It->second);
ValueExprMap.erase(It);
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
@@ -3782,6 +3783,11 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
PushDefUseChildren(I, Worklist);
}
+
+ // Forget all contained loops too, to avoid dangling entries in the
+ // ValuesAtScopes map.
+ for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
+ forgetLoop(*I);
}
/// forgetValue - This method should be called by the client when it has
@@ -3802,7 +3808,7 @@ void ScalarEvolution::forgetValue(Value *V) {
ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
if (It != ValueExprMap.end()) {
- ValuesAtScopes.erase(It->second);
+ forgetMemoizedResults(It->second);
ValueExprMap.erase(It);
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
@@ -4016,6 +4022,105 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
}
+static const SCEVAddRecExpr *
+isSimpleUnwrappingAddRec(const SCEV *S, const Loop *L) {
+ const SCEVAddRecExpr *SA = dyn_cast<SCEVAddRecExpr>(S);
+
+ // The SCEV must be an addrec of this loop.
+ if (!SA || SA->getLoop() != L || !SA->isAffine())
+ return 0;
+
+ // The SCEV must be known to not wrap in some way to be interesting.
+ if (!SA->hasNoUnsignedWrap() && !SA->hasNoSignedWrap())
+ return 0;
+
+ // The stride must be a constant so that we know if it is striding up or down.
+ if (!isa<SCEVConstant>(SA->getOperand(1)))
+ return 0;
+ return SA;
+}
+
+/// getMinusSCEVForExitTest - When considering an exit test for a loop with a
+/// "x != y" exit test, we turn this into a computation that evaluates x-y != 0,
+/// and this function returns the expression to use for x-y. We know and take
+/// advantage of the fact that this subtraction is only being used in a
+/// comparison by zero context.
+///
+static const SCEV *getMinusSCEVForExitTest(const SCEV *LHS, const SCEV *RHS,
+ const Loop *L, ScalarEvolution &SE) {
+ // If either LHS or RHS is an AddRec SCEV (of this loop) that is known to not
+ // wrap (either NSW or NUW), then we know that the value will either become
+ // the other one (and thus the loop terminates), that the loop will terminate
+ // through some other exit condition first, or that the loop has undefined
+ // behavior. This information is useful when the addrec has a stride that is
+ // != 1 or -1, because it means we can't "miss" the exit value.
+ //
+ // In any of these three cases, it is safe to turn the exit condition into a
+ // "counting down" AddRec (to zero) by subtracting the two inputs as normal,
+ // but since we know that the "end cannot be missed" we can force the
+ // resulting AddRec to be a NUW addrec. Since it is counting down, this means
+ // that the AddRec *cannot* pass zero.
+
+ // See if LHS and RHS are addrec's we can handle.
+ const SCEVAddRecExpr *LHSA = isSimpleUnwrappingAddRec(LHS, L);
+ const SCEVAddRecExpr *RHSA = isSimpleUnwrappingAddRec(RHS, L);
+
+ // If neither addrec is interesting, just return a minus.
+ if (RHSA == 0 && LHSA == 0)
+ return SE.getMinusSCEV(LHS, RHS);
+
+ // If only one of LHS and RHS are an AddRec of this loop, make sure it is LHS.
+ if (RHSA && LHSA == 0) {
+ // Safe because a-b === b-a for comparisons against zero.
+ std::swap(LHS, RHS);
+ std::swap(LHSA, RHSA);
+ }
+
+ // Handle the case when only one is advancing in a non-overflowing way.
+ if (RHSA == 0) {
+ // If RHS is loop varying, then we can't predict when LHS will cross it.
+ if (!SE.isLoopInvariant(RHS, L))
+ return SE.getMinusSCEV(LHS, RHS);
+
+ // If LHS has a positive stride, then we compute RHS-LHS, because the loop
+ // is counting up until it crosses RHS (which must be larger than LHS). If
+ // it is negative, we compute LHS-RHS because we're counting down to RHS.
+ const ConstantInt *Stride =
+ cast<SCEVConstant>(LHSA->getOperand(1))->getValue();
+ if (Stride->getValue().isNegative())
+ std::swap(LHS, RHS);
+
+ return SE.getMinusSCEV(RHS, LHS, true /*HasNUW*/);
+ }
+
+ // If both LHS and RHS are interesting, we have something like:
+ // a+i*4 != b+i*8.
+ const ConstantInt *LHSStride =
+ cast<SCEVConstant>(LHSA->getOperand(1))->getValue();
+ const ConstantInt *RHSStride =
+ cast<SCEVConstant>(RHSA->getOperand(1))->getValue();
+
+ // If the strides are equal, then this is just a (complex) loop invariant
+ // comparison of a and b.
+ if (LHSStride == RHSStride)
+ return SE.getMinusSCEV(LHSA->getStart(), RHSA->getStart());
+
+ // If the signs of the strides differ, then the negative stride is counting
+ // down to the positive stride.
+ if (LHSStride->getValue().isNegative() != RHSStride->getValue().isNegative()){
+ if (RHSStride->getValue().isNegative())
+ std::swap(LHS, RHS);
+ } else {
+ // If LHS's stride is smaller than RHS's stride, then "b" must be less than
+ // "a" and "b" is RHS is counting up (catching up) to LHS. This is true
+ // whether the strides are positive or negative.
+ if (RHSStride->getValue().slt(LHSStride->getValue()))
+ std::swap(LHS, RHS);
+ }
+
+ return SE.getMinusSCEV(LHS, RHS, true /*HasNUW*/);
+}
+
/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
/// backedge of the specified loop will execute if its exit condition
/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
@@ -4050,7 +4155,7 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
// At this point, we would like to compute how many iterations of the
// loop the predicate will return true for these inputs.
- if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
+ if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
// If there is a loop-invariant, force it into the RHS.
std::swap(LHS, RHS);
Cond = ICmpInst::getSwappedPredicate(Cond);
@@ -4075,7 +4180,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
switch (Cond) {
case ICmpInst::ICMP_NE: { // while (X != Y)
// Convert to: while (X-Y != 0)
- BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
+ BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEVForExitTest(LHS, RHS, L,
+ *this), L);
if (BTI.hasAnyInfo()) return BTI;
break;
}
@@ -4212,7 +4318,7 @@ ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
// We can only recognize very limited forms of loop index expressions, in
// particular, only affine AddRec's like {C1,+,C2}.
const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
- if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
+ if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
!isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
!isa<SCEVConstant>(IdxExpr->getOperand(1)))
return getCouldNotCompute();
@@ -4686,7 +4792,7 @@ static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
// bit width during computations.
APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
APInt Mod(BW + 1, 0);
- Mod.set(BW - Mult2); // Mod = N / D
+ Mod.setBit(BW - Mult2); // Mod = N / D
APInt I = AD.multiplicativeInverse(Mod);
// 4. Compute the minimum unsigned root of the equation:
@@ -4778,58 +4884,26 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
if (!AddRec || AddRec->getLoop() != L)
return getCouldNotCompute();
- if (AddRec->isAffine()) {
- // If this is an affine expression, the execution count of this branch is
- // the minimum unsigned root of the following equation:
- //
- // Start + Step*N = 0 (mod 2^BW)
- //
- // equivalent to:
- //
- // Step*N = -Start (mod 2^BW)
- //
- // where BW is the common bit width of Start and Step.
-
- // Get the initial value for the loop.
- const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
- L->getParentLoop());
- const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
- L->getParentLoop());
-
- if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
- // For now we handle only constant steps.
-
- // First, handle unitary steps.
- if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
- return getNegativeSCEV(Start); // N = -Start (as unsigned)
- if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
- return Start; // N = Start (as unsigned)
-
- // Then, try to solve the above equation provided that Start is constant.
- if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
- return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
- -StartC->getValue()->getValue(),
- *this);
- }
- } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
- // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
- // the quadratic equation to solve it.
- std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
- *this);
+ // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
+ // the quadratic equation to solve it.
+ if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
+ std::pair<const SCEV *,const SCEV *> Roots =
+ SolveQuadraticEquation(AddRec, *this);
const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
- if (R1) {
+ if (R1 && R2) {
#if 0
dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
<< " sol#2: " << *R2 << "\n";
#endif
// Pick the smallest positive root value.
if (ConstantInt *CB =
- dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
- R1->getValue(), R2->getValue()))) {
+ dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
+ R1->getValue(),
+ R2->getValue()))) {
if (CB->getZExtValue() == false)
std::swap(R1, R2); // R1 is the minimum root now.
-
+
// We can only use this value if the chrec ends up with an exact zero
// value at this index. When solving for "X*X != 5", for example, we
// should not accept a root of 2.
@@ -4838,8 +4912,54 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
return R1; // We found a quadratic root!
}
}
+ return getCouldNotCompute();
}
+ // Otherwise we can only handle this if it is affine.
+ if (!AddRec->isAffine())
+ return getCouldNotCompute();
+
+ // If this is an affine expression, the execution count of this branch is
+ // the minimum unsigned root of the following equation:
+ //
+ // Start + Step*N = 0 (mod 2^BW)
+ //
+ // equivalent to:
+ //
+ // Step*N = -Start (mod 2^BW)
+ //
+ // where BW is the common bit width of Start and Step.
+
+ // Get the initial value for the loop.
+ const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
+ const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
+
+ // If the AddRec is NUW, then (in an unsigned sense) it cannot be counting up
+ // to wrap to 0, it must be counting down to equal 0. Also, while counting
+ // down, it cannot "miss" 0 (which would cause it to wrap), regardless of what
+ // the stride is. As such, NUW addrec's will always become zero in
+ // "start / -stride" steps, and we know that the division is exact.
+ if (AddRec->hasNoUnsignedWrap())
+ // FIXME: We really want an "isexact" bit for udiv.
+ return getUDivExpr(Start, getNegativeSCEV(Step));
+
+ // For now we handle only constant steps.
+ const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
+ if (StepC == 0)
+ return getCouldNotCompute();
+
+ // First, handle unitary steps.
+ if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
+ return getNegativeSCEV(Start); // N = -Start (as unsigned)
+
+ if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
+ return Start; // N = Start (as unsigned)
+
+ // Then, try to solve the above equation provided that Start is constant.
+ if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
+ return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
+ -StartC->getValue()->getValue(),
+ *this);
return getCouldNotCompute();
}
@@ -4939,7 +5059,7 @@ bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
// as both operands could be addrecs loop-invariant in each other's loop.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
const Loop *L = AR->getLoop();
- if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) {
+ if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
std::swap(LHS, RHS);
Pred = ICmpInst::getSwappedPredicate(Pred);
Changed = true;
@@ -5159,13 +5279,13 @@ bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
trivially_true:
// Return 0 == 0.
- LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
+ LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
Pred = ICmpInst::ICMP_EQ;
return true;
trivially_false:
// Return 0 != 0.
- LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
+ LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
Pred = ICmpInst::ICMP_NE;
return true;
}
@@ -5556,7 +5676,7 @@ ScalarEvolution::BackedgeTakenInfo
ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
const Loop *L, bool isSigned) {
// Only handle: "ADDREC < LoopInvariant".
- if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
+ if (!isLoopInvariant(RHS, L)) return getCouldNotCompute();
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
if (!AddRec || AddRec->getLoop() != L)
@@ -5836,6 +5956,7 @@ ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
ScalarEvolution::ScalarEvolution()
: FunctionPass(ID), FirstUnknown(0) {
+ initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
}
bool ScalarEvolution::runOnFunction(Function &F) {
@@ -5857,6 +5978,10 @@ void ScalarEvolution::releaseMemory() {
BackedgeTakenCounts.clear();
ConstantEvolutionLoopExitValue.clear();
ValuesAtScopes.clear();
+ LoopDispositions.clear();
+ BlockDispositions.clear();
+ UnsignedRanges.clear();
+ SignedRanges.clear();
UniqueSCEVs.clear();
SCEVAllocator.Reset();
}
@@ -5936,7 +6061,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
if (L) {
OS << "\t\t" "Exits: ";
const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
- if (!ExitValue->isLoopInvariant(L)) {
+ if (!SE.isLoopInvariant(ExitValue, L)) {
OS << "<<Unknown>>";
} else {
OS << *ExitValue;
@@ -5953,3 +6078,240 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
PrintLoopInfo(OS, &SE, *I);
}
+ScalarEvolution::LoopDisposition
+ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
+ std::map<const Loop *, LoopDisposition> &Values = LoopDispositions[S];
+ std::pair<std::map<const Loop *, LoopDisposition>::iterator, bool> Pair =
+ Values.insert(std::make_pair(L, LoopVariant));
+ if (!Pair.second)
+ return Pair.first->second;
+
+ LoopDisposition D = computeLoopDisposition(S, L);
+ return LoopDispositions[S][L] = D;
+}
+
+ScalarEvolution::LoopDisposition
+ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
+ switch (S->getSCEVType()) {
+ case scConstant:
+ return LoopInvariant;
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend:
+ return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
+ case scAddRecExpr: {
+ const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
+
+ // If L is the addrec's loop, it's computable.
+ if (AR->getLoop() == L)
+ return LoopComputable;
+
+ // Add recurrences are never invariant in the function-body (null loop).
+ if (!L)
+ return LoopVariant;
+
+ // This recurrence is variant w.r.t. L if L contains AR's loop.
+ if (L->contains(AR->getLoop()))
+ return LoopVariant;
+
+ // This recurrence is invariant w.r.t. L if AR's loop contains L.
+ if (AR->getLoop()->contains(L))
+ return LoopInvariant;
+
+ // This recurrence is variant w.r.t. L if any of its operands
+ // are variant.
+ for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
+ I != E; ++I)
+ if (!isLoopInvariant(*I, L))
+ return LoopVariant;
+
+ // Otherwise it's loop-invariant.
+ return LoopInvariant;
+ }
+ case scAddExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
+ bool HasVarying = false;
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
+ I != E; ++I) {
+ LoopDisposition D = getLoopDisposition(*I, L);
+ if (D == LoopVariant)
+ return LoopVariant;
+ if (D == LoopComputable)
+ HasVarying = true;
+ }
+ return HasVarying ? LoopComputable : LoopInvariant;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+ LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
+ if (LD == LoopVariant)
+ return LoopVariant;
+ LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
+ if (RD == LoopVariant)
+ return LoopVariant;
+ return (LD == LoopInvariant && RD == LoopInvariant) ?
+ LoopInvariant : LoopComputable;
+ }
+ case scUnknown:
+ // All non-instruction values are loop invariant. All instructions are loop
+ // invariant if they are not contained in the specified loop.
+ // Instructions are never considered invariant in the function body
+ // (null loop) because they are defined within the "loop".
+ if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
+ return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
+ return LoopInvariant;
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ return LoopVariant;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+ return LoopVariant;
+}
+
+bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
+ return getLoopDisposition(S, L) == LoopInvariant;
+}
+
+bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
+ return getLoopDisposition(S, L) == LoopComputable;
+}
+
+ScalarEvolution::BlockDisposition
+ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
+ std::map<const BasicBlock *, BlockDisposition> &Values = BlockDispositions[S];
+ std::pair<std::map<const BasicBlock *, BlockDisposition>::iterator, bool>
+ Pair = Values.insert(std::make_pair(BB, DoesNotDominateBlock));
+ if (!Pair.second)
+ return Pair.first->second;
+
+ BlockDisposition D = computeBlockDisposition(S, BB);
+ return BlockDispositions[S][BB] = D;
+}
+
+ScalarEvolution::BlockDisposition
+ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
+ switch (S->getSCEVType()) {
+ case scConstant:
+ return ProperlyDominatesBlock;
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend:
+ return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
+ case scAddRecExpr: {
+ // This uses a "dominates" query instead of "properly dominates" query
+ // to test for proper dominance too, because the instruction which
+ // produces the addrec's value is a PHI, and a PHI effectively properly
+ // dominates its entire containing block.
+ const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
+ if (!DT->dominates(AR->getLoop()->getHeader(), BB))
+ return DoesNotDominateBlock;
+ }
+ // FALL THROUGH into SCEVNAryExpr handling.
+ case scAddExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
+ bool Proper = true;
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
+ I != E; ++I) {
+ BlockDisposition D = getBlockDisposition(*I, BB);
+ if (D == DoesNotDominateBlock)
+ return DoesNotDominateBlock;
+ if (D == DominatesBlock)
+ Proper = false;
+ }
+ return Proper ? ProperlyDominatesBlock : DominatesBlock;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+ const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
+ BlockDisposition LD = getBlockDisposition(LHS, BB);
+ if (LD == DoesNotDominateBlock)
+ return DoesNotDominateBlock;
+ BlockDisposition RD = getBlockDisposition(RHS, BB);
+ if (RD == DoesNotDominateBlock)
+ return DoesNotDominateBlock;
+ return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
+ ProperlyDominatesBlock : DominatesBlock;
+ }
+ case scUnknown:
+ if (Instruction *I =
+ dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
+ if (I->getParent() == BB)
+ return DominatesBlock;
+ if (DT->properlyDominates(I->getParent(), BB))
+ return ProperlyDominatesBlock;
+ return DoesNotDominateBlock;
+ }
+ return ProperlyDominatesBlock;
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ return DoesNotDominateBlock;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+ return DoesNotDominateBlock;
+}
+
+bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
+ return getBlockDisposition(S, BB) >= DominatesBlock;
+}
+
+bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
+ return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
+}
+
+bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
+ switch (S->getSCEVType()) {
+ case scConstant:
+ return false;
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend: {
+ const SCEVCastExpr *Cast = cast<SCEVCastExpr>(S);
+ const SCEV *CastOp = Cast->getOperand();
+ return Op == CastOp || hasOperand(CastOp, Op);
+ }
+ case scAddRecExpr:
+ case scAddExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
+ I != E; ++I) {
+ const SCEV *NAryOp = *I;
+ if (NAryOp == Op || hasOperand(NAryOp, Op))
+ return true;
+ }
+ return false;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+ const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
+ return LHS == Op || hasOperand(LHS, Op) ||
+ RHS == Op || hasOperand(RHS, Op);
+ }
+ case scUnknown:
+ return false;
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ return false;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+ return false;
+}
+
+void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
+ ValuesAtScopes.erase(S);
+ LoopDispositions.erase(S);
+ BlockDispositions.erase(S);
+ UnsignedRanges.erase(S);
+ SignedRanges.erase(S);
+}
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 93b2a8b..e9edb3e 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -34,7 +34,10 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(0) {}
+ ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(0) {
+ initializeScalarEvolutionAliasAnalysisPass(
+ *PassRegistry::getPassRegistry());
+ }
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
@@ -49,8 +52,7 @@ namespace {
private:
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual bool runOnFunction(Function &F);
- virtual AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size);
+ virtual AliasResult alias(const Location &LocA, const Location &LocB);
Value *GetBaseValue(const SCEV *S);
};
@@ -58,8 +60,11 @@ namespace {
// Register this pass...
char ScalarEvolutionAliasAnalysis::ID = 0;
-INITIALIZE_AG_PASS(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
- "ScalarEvolution-based Alias Analysis", false, true, false);
+INITIALIZE_AG_PASS_BEGIN(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
+ "ScalarEvolution-based Alias Analysis", false, true, false)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_AG_PASS_END(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
+ "ScalarEvolution-based Alias Analysis", false, true, false)
FunctionPass *llvm::createScalarEvolutionAliasAnalysisPass() {
return new ScalarEvolutionAliasAnalysis();
@@ -101,17 +106,17 @@ ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) {
}
AliasAnalysis::AliasResult
-ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
- const Value *B, unsigned BSize) {
+ScalarEvolutionAliasAnalysis::alias(const Location &LocA,
+ const Location &LocB) {
// If either of the memory references is empty, it doesn't matter what the
// pointer values are. This allows the code below to ignore this special
// case.
- if (ASize == 0 || BSize == 0)
+ if (LocA.Size == 0 || LocB.Size == 0)
return NoAlias;
// This is ScalarEvolutionAliasAnalysis. Get the SCEVs!
- const SCEV *AS = SE->getSCEV(const_cast<Value *>(A));
- const SCEV *BS = SE->getSCEV(const_cast<Value *>(B));
+ const SCEV *AS = SE->getSCEV(const_cast<Value *>(LocA.Ptr));
+ const SCEV *BS = SE->getSCEV(const_cast<Value *>(LocB.Ptr));
// If they evaluate to the same expression, it's a MustAlias.
if (AS == BS) return MustAlias;
@@ -121,8 +126,8 @@ ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
if (SE->getEffectiveSCEVType(AS->getType()) ==
SE->getEffectiveSCEVType(BS->getType())) {
unsigned BitWidth = SE->getTypeSizeInBits(AS->getType());
- APInt ASizeInt(BitWidth, ASize);
- APInt BSizeInt(BitWidth, BSize);
+ APInt ASizeInt(BitWidth, LocA.Size);
+ APInt BSizeInt(BitWidth, LocB.Size);
// Compute the difference between the two pointers.
const SCEV *BA = SE->getMinusSCEV(BS, AS);
@@ -154,11 +159,15 @@ ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
// inttoptr and ptrtoint operators.
Value *AO = GetBaseValue(AS);
Value *BO = GetBaseValue(BS);
- if ((AO && AO != A) || (BO && BO != B))
- if (alias(AO ? AO : A, AO ? UnknownSize : ASize,
- BO ? BO : B, BO ? UnknownSize : BSize) == NoAlias)
+ if ((AO && AO != LocA.Ptr) || (BO && BO != LocB.Ptr))
+ if (alias(Location(AO ? AO : LocA.Ptr,
+ AO ? +UnknownSize : LocA.Size,
+ AO ? 0 : LocA.TBAATag),
+ Location(BO ? BO : LocB.Ptr,
+ BO ? +UnknownSize : LocB.Size,
+ BO ? 0 : LocB.TBAATag)) == NoAlias)
return NoAlias;
// Forward the query to the next analysis.
- return AliasAnalysis::alias(A, ASize, B, BSize);
+ return AliasAnalysis::alias(LocA, LocB);
}
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 66a06ae..b7c110f 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -608,15 +608,22 @@ static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
return A; // Arbitrarily break the tie.
}
-/// GetRelevantLoop - Get the most relevant loop associated with the given
+/// getRelevantLoop - Get the most relevant loop associated with the given
/// expression, according to PickMostRelevantLoop.
-static const Loop *GetRelevantLoop(const SCEV *S, LoopInfo &LI,
- DominatorTree &DT) {
+const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
+ // Test whether we've already computed the most relevant loop for this SCEV.
+ std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
+ RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
+ if (!Pair.second)
+ return Pair.first->second;
+
if (isa<SCEVConstant>(S))
+ // A constant has no relevant loops.
return 0;
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
- return LI.getLoopFor(I->getParent());
+ return Pair.first->second = SE.LI->getLoopFor(I->getParent());
+ // A non-instruction has no relevant loops.
return 0;
}
if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
@@ -625,16 +632,22 @@ static const Loop *GetRelevantLoop(const SCEV *S, LoopInfo &LI,
L = AR->getLoop();
for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
I != E; ++I)
- L = PickMostRelevantLoop(L, GetRelevantLoop(*I, LI, DT), DT);
- return L;
+ L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
+ return RelevantLoops[N] = L;
+ }
+ if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
+ const Loop *Result = getRelevantLoop(C->getOperand());
+ return RelevantLoops[C] = Result;
+ }
+ if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
+ const Loop *Result =
+ PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
+ getRelevantLoop(D->getRHS()),
+ *SE.DT);
+ return RelevantLoops[D] = Result;
}
- if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
- return GetRelevantLoop(C->getOperand(), LI, DT);
- if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S))
- return PickMostRelevantLoop(GetRelevantLoop(D->getLHS(), LI, DT),
- GetRelevantLoop(D->getRHS(), LI, DT),
- DT);
llvm_unreachable("Unexpected SCEV type!");
+ return 0;
}
namespace {
@@ -682,8 +695,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
E(S->op_begin()); I != E; ++I)
- OpsAndLoops.push_back(std::make_pair(GetRelevantLoop(*I, *SE.LI, *SE.DT),
- *I));
+ OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
// Sort by loop. Use a stable sort so that constants follow non-constants and
// pointer operands precede non-pointer operands.
@@ -752,8 +764,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
E(S->op_begin()); I != E; ++I)
- OpsAndLoops.push_back(std::make_pair(GetRelevantLoop(*I, *SE.LI, *SE.DT),
- *I));
+ OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
// Sort by loop. Use a stable sort so that constants follow non-constants.
std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
@@ -990,7 +1001,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Strip off any non-loop-dominating component from the addrec start.
const SCEV *Start = Normalized->getStart();
const SCEV *PostLoopOffset = 0;
- if (!Start->properlyDominates(L->getHeader(), SE.DT)) {
+ if (!SE.properlyDominates(Start, L->getHeader())) {
PostLoopOffset = Start;
Start = SE.getConstant(Normalized->getType(), 0);
Normalized =
@@ -1002,7 +1013,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Strip off any non-loop-dominating component from the addrec step.
const SCEV *Step = Normalized->getStepRecurrence(SE);
const SCEV *PostLoopScale = 0;
- if (!Step->dominates(L->getHeader(), SE.DT)) {
+ if (!SE.dominates(Step, L->getHeader())) {
PostLoopScale = Step;
Step = SE.getConstant(Normalized->getType(), 1);
Normalized =
@@ -1278,7 +1289,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
Instruction *InsertPt = Builder.GetInsertPoint();
for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
L = L->getParentLoop())
- if (S->isLoopInvariant(L)) {
+ if (SE.isLoopInvariant(S, L)) {
if (!L) break;
if (BasicBlock *Preheader = L->getLoopPreheader())
InsertPt = Preheader->getTerminator();
@@ -1286,7 +1297,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
// If the SCEV is computable at this level, insert it into the header
// after the PHIs (and after any other instructions that we've inserted
// there) so that it is guaranteed to dominate any user inside the loop.
- if (L && S->hasComputableLoopEvolution(L) && !PostIncLoops.count(L))
+ if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
InsertPt = L->getHeader()->getFirstNonPHI();
while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt))
InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
diff --git a/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
index bbfdcec..40e18ab 100644
--- a/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -12,29 +12,65 @@
//
// In LLVM IR, memory does not have types, so LLVM's own type system is not
// suitable for doing TBAA. Instead, metadata is added to the IR to describe
-// a type system of a higher level language.
+// a type system of a higher level language. This can be used to implement
+// typical C/C++ TBAA, but it can also be used to implement custom alias
+// analysis behavior for other languages.
//
-// This pass is language-independent. The type system is encoded in
-// metadata. This allows this pass to support typical C and C++ TBAA, but
-// it can also support custom aliasing behavior for other languages.
+// The current metadata format is very simple. TBAA MDNodes have up to
+// three fields, e.g.:
+// !0 = metadata !{ metadata !"an example type tree" }
+// !1 = metadata !{ metadata !"int", metadata !0 }
+// !2 = metadata !{ metadata !"float", metadata !0 }
+// !3 = metadata !{ metadata !"const float", metadata !2, i64 1 }
//
-// This is a work-in-progress. It doesn't work yet, and the metadata
-// format isn't stable.
+// The first field is an identity field. It can be any value, usually
+// an MDString, which uniquely identifies the type. The most important
+// name in the tree is the name of the root node. Two trees with
+// different root node names are entirely disjoint, even if they
+// have leaves with common names.
//
-// TODO: getModRefBehavior. The AliasAnalysis infrastructure will need to
-// be extended.
-// TODO: AA chaining
-// TODO: struct fields
+// The second field identifies the type's parent node in the tree, or
+// is null or omitted for a root node. A type is considered to alias
+// all of its decendents and all of its ancestors in the tree. Also,
+// a type is considered to alias all types in other trees, so that
+// bitcode produced from multiple front-ends is handled conservatively.
+//
+// If the third field is present, it's an integer which if equal to 1
+// indicates that the type is "constant" (meaning pointsToConstantMemory
+// should return true; see
+// http://llvm.org/docs/AliasAnalysis.html#OtherItfs).
+//
+// TODO: The current metadata format doesn't support struct
+// fields. For example:
+// struct X {
+// double d;
+// int i;
+// };
+// void foo(struct X *x, struct X *y, double *p) {
+// *x = *y;
+// *p = 0.0;
+// }
+// Struct X has a double member, so the store to *x can alias the store to *p.
+// Currently it's not possible to precisely describe all the things struct X
+// aliases, so struct assignments must use conservative TBAA nodes. There's
+// no scheme for attaching metadata to @llvm.memcpy yet either.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Passes.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Metadata.h"
#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
+// A handy option for disabling TBAA functionality. The same effect can also be
+// achieved by stripping the !tbaa tags from IR, but this option is sometimes
+// more convenient.
+static cl::opt<bool> EnableTBAA("enable-tbaa", cl::init(true));
+
namespace {
/// TBAANode - This is a simple wrapper around an MDNode which provides a
/// higher-level interface by hiding the details of how alias analysis
@@ -44,16 +80,16 @@ namespace {
public:
TBAANode() : Node(0) {}
- explicit TBAANode(MDNode *N) : Node(N) {}
+ explicit TBAANode(const MDNode *N) : Node(N) {}
/// getNode - Get the MDNode for this TBAANode.
const MDNode *getNode() const { return Node; }
- /// getParent - Get this TBAANode's Alias DAG parent.
+ /// getParent - Get this TBAANode's Alias tree parent.
TBAANode getParent() const {
if (Node->getNumOperands() < 2)
return TBAANode();
- MDNode *P = dyn_cast<MDNode>(Node->getOperand(1));
+ MDNode *P = dyn_cast_or_null<MDNode>(Node->getOperand(1));
if (!P)
return TBAANode();
// Ok, this node has a valid parent. Return it.
@@ -69,8 +105,7 @@ namespace {
ConstantInt *CI = dyn_cast<ConstantInt>(Node->getOperand(2));
if (!CI)
return false;
- // TODO: Think about the encoding.
- return CI->isOne();
+ return CI->getValue()[0];
}
};
}
@@ -82,7 +117,13 @@ namespace {
public AliasAnalysis {
public:
static char ID; // Class identification, replacement for typeinfo
- TypeBasedAliasAnalysis() : ImmutablePass(ID) {}
+ TypeBasedAliasAnalysis() : ImmutablePass(ID) {
+ initializeTypeBasedAliasAnalysisPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void initializePass() {
+ InitializeAliasAnalysis(this);
+ }
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
@@ -94,18 +135,25 @@ namespace {
return this;
}
+ bool Aliases(const MDNode *A, const MDNode *B) const;
+
private:
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size);
- virtual bool pointsToConstantMemory(const Value *P);
+ virtual AliasResult alias(const Location &LocA, const Location &LocB);
+ virtual bool pointsToConstantMemory(const Location &Loc, bool OrLocal);
+ virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+ virtual ModRefBehavior getModRefBehavior(const Function *F);
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Location &Loc);
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2);
};
} // End of anonymous namespace
// Register this pass...
char TypeBasedAliasAnalysis::ID = 0;
INITIALIZE_AG_PASS(TypeBasedAliasAnalysis, AliasAnalysis, "tbaa",
- "Type-Based Alias Analysis", false, true, false);
+ "Type-Based Alias Analysis", false, true, false)
ImmutablePass *llvm::createTypeBasedAliasAnalysisPass() {
return new TypeBasedAliasAnalysis();
@@ -117,34 +165,19 @@ TypeBasedAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AliasAnalysis::getAnalysisUsage(AU);
}
-AliasAnalysis::AliasResult
-TypeBasedAliasAnalysis::alias(const Value *A, unsigned ASize,
- const Value *B, unsigned BSize) {
- // Currently, metadata can only be attached to Instructions.
- const Instruction *AI = dyn_cast<Instruction>(A);
- if (!AI) return MayAlias;
- const Instruction *BI = dyn_cast<Instruction>(B);
- if (!BI) return MayAlias;
-
- // Get the attached MDNodes. If either value lacks a tbaa MDNode, we must
- // be conservative.
- MDNode *AM =
- AI->getMetadata(AI->getParent()->getParent()->getParent()
- ->getMDKindID("tbaa"));
- if (!AM) return MayAlias;
- MDNode *BM =
- BI->getMetadata(BI->getParent()->getParent()->getParent()
- ->getMDKindID("tbaa"));
- if (!BM) return MayAlias;
-
+/// Aliases - Test whether the type represented by A may alias the
+/// type represented by B.
+bool
+TypeBasedAliasAnalysis::Aliases(const MDNode *A,
+ const MDNode *B) const {
// Keep track of the root node for A and B.
TBAANode RootA, RootB;
- // Climb the DAG from A to see if we reach B.
- for (TBAANode T(AM); ; ) {
- if (T.getNode() == BM)
+ // Climb the tree from A to see if we reach B.
+ for (TBAANode T(A); ; ) {
+ if (T.getNode() == B)
// B is an ancestor of A.
- return MayAlias;
+ return true;
RootA = T;
T = T.getParent();
@@ -152,11 +185,11 @@ TypeBasedAliasAnalysis::alias(const Value *A, unsigned ASize,
break;
}
- // Climb the DAG from B to see if we reach A.
- for (TBAANode T(BM); ; ) {
- if (T.getNode() == AM)
+ // Climb the tree from B to see if we reach A.
+ for (TBAANode T(B); ; ) {
+ if (T.getNode() == A)
// A is an ancestor of B.
- return MayAlias;
+ return true;
RootB = T;
T = T.getParent();
@@ -166,26 +199,101 @@ TypeBasedAliasAnalysis::alias(const Value *A, unsigned ASize,
// Neither node is an ancestor of the other.
- // If they have the same root, then we've proved there's no alias.
- if (RootA.getNode() == RootB.getNode())
- return NoAlias;
-
// If they have different roots, they're part of different potentially
// unrelated type systems, so we must be conservative.
- return MayAlias;
+ if (RootA.getNode() != RootB.getNode())
+ return true;
+
+ // If they have the same root, then we've proved there's no alias.
+ return false;
+}
+
+AliasAnalysis::AliasResult
+TypeBasedAliasAnalysis::alias(const Location &LocA,
+ const Location &LocB) {
+ if (!EnableTBAA)
+ return AliasAnalysis::alias(LocA, LocB);
+
+ // Get the attached MDNodes. If either value lacks a tbaa MDNode, we must
+ // be conservative.
+ const MDNode *AM = LocA.TBAATag;
+ if (!AM) return AliasAnalysis::alias(LocA, LocB);
+ const MDNode *BM = LocB.TBAATag;
+ if (!BM) return AliasAnalysis::alias(LocA, LocB);
+
+ // If they may alias, chain to the next AliasAnalysis.
+ if (Aliases(AM, BM))
+ return AliasAnalysis::alias(LocA, LocB);
+
+ // Otherwise return a definitive result.
+ return NoAlias;
}
-bool TypeBasedAliasAnalysis::pointsToConstantMemory(const Value *P) {
- // Currently, metadata can only be attached to Instructions.
- const Instruction *I = dyn_cast<Instruction>(P);
- if (!I) return false;
+bool TypeBasedAliasAnalysis::pointsToConstantMemory(const Location &Loc,
+ bool OrLocal) {
+ if (!EnableTBAA)
+ return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
- MDNode *M =
- I->getMetadata(I->getParent()->getParent()->getParent()
- ->getMDKindID("tbaa"));
- if (!M) return false;
+ const MDNode *M = Loc.TBAATag;
+ if (!M) return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
// If this is an "immutable" type, we can assume the pointer is pointing
// to constant memory.
- return TBAANode(M).TypeIsImmutable();
+ if (TBAANode(M).TypeIsImmutable())
+ return true;
+
+ return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
+}
+
+AliasAnalysis::ModRefBehavior
+TypeBasedAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
+ if (!EnableTBAA)
+ return AliasAnalysis::getModRefBehavior(CS);
+
+ ModRefBehavior Min = UnknownModRefBehavior;
+
+ // If this is an "immutable" type, we can assume the call doesn't write
+ // to memory.
+ if (const MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
+ if (TBAANode(M).TypeIsImmutable())
+ Min = OnlyReadsMemory;
+
+ return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min);
+}
+
+AliasAnalysis::ModRefBehavior
+TypeBasedAliasAnalysis::getModRefBehavior(const Function *F) {
+ // Functions don't have metadata. Just chain to the next implementation.
+ return AliasAnalysis::getModRefBehavior(F);
+}
+
+AliasAnalysis::ModRefResult
+TypeBasedAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+ const Location &Loc) {
+ if (!EnableTBAA)
+ return AliasAnalysis::getModRefInfo(CS, Loc);
+
+ if (const MDNode *L = Loc.TBAATag)
+ if (const MDNode *M =
+ CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
+ if (!Aliases(L, M))
+ return NoModRef;
+
+ return AliasAnalysis::getModRefInfo(CS, Loc);
+}
+
+AliasAnalysis::ModRefResult
+TypeBasedAliasAnalysis::getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
+ if (!EnableTBAA)
+ return AliasAnalysis::getModRefInfo(CS1, CS2);
+
+ if (const MDNode *M1 =
+ CS1.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
+ if (const MDNode *M2 =
+ CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
+ if (!Aliases(M1, M2))
+ return NoModRef;
+
+ return AliasAnalysis::getModRefInfo(CS1, CS2);
}
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index 181c9b0..1060bc5 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/GlobalVariable.h"
@@ -23,9 +24,22 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/PatternMatch.h"
#include "llvm/ADT/SmallPtrSet.h"
#include <cstring>
using namespace llvm;
+using namespace llvm::PatternMatch;
+
+const unsigned MaxDepth = 6;
+
+/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
+/// unknown returns 0). For vector types, returns the element type's bitwidth.
+static unsigned getBitWidth(const Type *Ty, const TargetData *TD) {
+ if (unsigned BitWidth = Ty->getScalarSizeInBits())
+ return BitWidth;
+ assert(isa<PointerType>(Ty) && "Expected a pointer type!");
+ return TD ? TD->getPointerSizeInBits() : 0;
+}
/// ComputeMaskedBits - Determine which of the bits specified in Mask are
/// known to be either zero or one and return them in the KnownZero/KnownOne
@@ -46,7 +60,6 @@ using namespace llvm;
void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
APInt &KnownZero, APInt &KnownOne,
const TargetData *TD, unsigned Depth) {
- const unsigned MaxDepth = 6;
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = Mask.getBitWidth();
@@ -69,14 +82,14 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// Null and aggregate-zero are all-zeros.
if (isa<ConstantPointerNull>(V) ||
isa<ConstantAggregateZero>(V)) {
- KnownOne.clear();
+ KnownOne.clearAllBits();
KnownZero = Mask;
return;
}
// Handle a constant vector by taking the intersection of the known bits of
// each element.
if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
- KnownZero.set(); KnownOne.set();
+ KnownZero.setAllBits(); KnownOne.setAllBits();
for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
ComputeMaskedBits(CV->getOperand(i), Mask, KnownZero2, KnownOne2,
@@ -103,15 +116,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
CountTrailingZeros_32(Align));
else
- KnownZero.clear();
- KnownOne.clear();
+ KnownZero.clearAllBits();
+ KnownOne.clearAllBits();
return;
}
// A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
// the bits of its aliasee.
if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
if (GA->mayBeOverridden()) {
- KnownZero.clear(); KnownOne.clear();
+ KnownZero.clearAllBits(); KnownOne.clearAllBits();
} else {
ComputeMaskedBits(GA->getAliasee(), Mask, KnownZero, KnownOne,
TD, Depth+1);
@@ -119,7 +132,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
return;
}
- KnownZero.clear(); KnownOne.clear(); // Start out not knowing anything.
+ KnownZero.clearAllBits(); KnownOne.clearAllBits(); // Start out not knowing anything.
if (Depth == MaxDepth || Mask == 0)
return; // Limit search depth.
@@ -185,7 +198,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// Also compute a conserative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
- KnownOne.clear();
+ KnownOne.clearAllBits();
unsigned TrailZ = KnownZero.countTrailingOnes() +
KnownZero2.countTrailingOnes();
unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
@@ -208,8 +221,8 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
unsigned LeadZ = KnownZero2.countLeadingOnes();
- KnownOne2.clear();
- KnownZero2.clear();
+ KnownOne2.clearAllBits();
+ KnownZero2.clearAllBits();
ComputeMaskedBits(I->getOperand(1),
AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
@@ -255,14 +268,13 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
else
SrcBitWidth = SrcTy->getScalarSizeInBits();
- APInt MaskIn(Mask);
- MaskIn.zextOrTrunc(SrcBitWidth);
- KnownZero.zextOrTrunc(SrcBitWidth);
- KnownOne.zextOrTrunc(SrcBitWidth);
+ APInt MaskIn = Mask.zextOrTrunc(SrcBitWidth);
+ KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
+ KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
Depth+1);
- KnownZero.zextOrTrunc(BitWidth);
- KnownOne.zextOrTrunc(BitWidth);
+ KnownZero = KnownZero.zextOrTrunc(BitWidth);
+ KnownOne = KnownOne.zextOrTrunc(BitWidth);
// Any top bits are known to be zero.
if (BitWidth > SrcBitWidth)
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
@@ -284,15 +296,14 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
- APInt MaskIn(Mask);
- MaskIn.trunc(SrcBitWidth);
- KnownZero.trunc(SrcBitWidth);
- KnownOne.trunc(SrcBitWidth);
+ APInt MaskIn = Mask.trunc(SrcBitWidth);
+ KnownZero = KnownZero.trunc(SrcBitWidth);
+ KnownOne = KnownOne.trunc(SrcBitWidth);
ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
@@ -338,7 +349,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
// Compute the new bits that are at the top now.
- uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
+ uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
// Signed shift right.
APInt Mask2(Mask.shl(ShiftAmt));
@@ -474,7 +485,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
KnownZero2.countLeadingOnes());
- KnownOne.clear();
+ KnownOne.clearAllBits();
KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
break;
}
@@ -579,6 +590,10 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
}
+ // Unreachable blocks may have zero-operand PHI nodes.
+ if (P->getNumIncomingValues() == 0)
+ return;
+
// Otherwise take the unions of the known bit sets of the operands,
// taking conservative care to avoid excessive recursion.
if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
@@ -621,6 +636,156 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
}
+/// ComputeSignBit - Determine whether the sign bit is known to be zero or
+/// one. Convenience wrapper around ComputeMaskedBits.
+void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
+ const TargetData *TD, unsigned Depth) {
+ unsigned BitWidth = getBitWidth(V->getType(), TD);
+ if (!BitWidth) {
+ KnownZero = false;
+ KnownOne = false;
+ return;
+ }
+ APInt ZeroBits(BitWidth, 0);
+ APInt OneBits(BitWidth, 0);
+ ComputeMaskedBits(V, APInt::getSignBit(BitWidth), ZeroBits, OneBits, TD,
+ Depth);
+ KnownOne = OneBits[BitWidth - 1];
+ KnownZero = ZeroBits[BitWidth - 1];
+}
+
+/// isPowerOfTwo - Return true if the given value is known to have exactly one
+/// bit set when defined. For vectors return true if every element is known to
+/// be a power of two when defined. Supports values with integer or pointer
+/// types and vectors of integers.
+bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, unsigned Depth) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
+ return CI->getValue().isPowerOf2();
+ // TODO: Handle vector constants.
+
+ // 1 << X is clearly a power of two if the one is not shifted off the end. If
+ // it is shifted off the end then the result is undefined.
+ if (match(V, m_Shl(m_One(), m_Value())))
+ return true;
+
+ // (signbit) >>l X is clearly a power of two if the one is not shifted off the
+ // bottom. If it is shifted off the bottom then the result is undefined.
+ if (match(V, m_LShr(m_SignBit(), m_Value())))
+ return true;
+
+ // The remaining tests are all recursive, so bail out if we hit the limit.
+ if (Depth++ == MaxDepth)
+ return false;
+
+ if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
+ return isPowerOfTwo(ZI->getOperand(0), TD, Depth);
+
+ if (SelectInst *SI = dyn_cast<SelectInst>(V))
+ return isPowerOfTwo(SI->getTrueValue(), TD, Depth) &&
+ isPowerOfTwo(SI->getFalseValue(), TD, Depth);
+
+ return false;
+}
+
+/// isKnownNonZero - Return true if the given value is known to be non-zero
+/// when defined. For vectors return true if every element is known to be
+/// non-zero when defined. Supports values with integer or pointer type and
+/// vectors of integers.
+bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ if (C->isNullValue())
+ return false;
+ if (isa<ConstantInt>(C))
+ // Must be non-zero due to null test above.
+ return true;
+ // TODO: Handle vectors
+ return false;
+ }
+
+ // The remaining tests are all recursive, so bail out if we hit the limit.
+ if (Depth++ == MaxDepth)
+ return false;
+
+ unsigned BitWidth = getBitWidth(V->getType(), TD);
+
+ // X | Y != 0 if X != 0 or Y != 0.
+ Value *X = 0, *Y = 0;
+ if (match(V, m_Or(m_Value(X), m_Value(Y))))
+ return isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth);
+
+ // ext X != 0 if X != 0.
+ if (isa<SExtInst>(V) || isa<ZExtInst>(V))
+ return isKnownNonZero(cast<Instruction>(V)->getOperand(0), TD, Depth);
+
+ // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
+ // if the lowest bit is shifted off the end.
+ if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
+ APInt KnownZero(BitWidth, 0);
+ APInt KnownOne(BitWidth, 0);
+ ComputeMaskedBits(X, APInt(BitWidth, 1), KnownZero, KnownOne, TD, Depth);
+ if (KnownOne[0])
+ return true;
+ }
+ // shr X, Y != 0 if X is negative. Note that the value of the shift is not
+ // defined if the sign bit is shifted off the end.
+ else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
+ bool XKnownNonNegative, XKnownNegative;
+ ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth);
+ if (XKnownNegative)
+ return true;
+ }
+ // X + Y.
+ else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
+ bool XKnownNonNegative, XKnownNegative;
+ bool YKnownNonNegative, YKnownNegative;
+ ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth);
+ ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, TD, Depth);
+
+ // If X and Y are both non-negative (as signed values) then their sum is not
+ // zero unless both X and Y are zero.
+ if (XKnownNonNegative && YKnownNonNegative)
+ if (isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth))
+ return true;
+
+ // If X and Y are both negative (as signed values) then their sum is not
+ // zero unless both X and Y equal INT_MIN.
+ if (BitWidth && XKnownNegative && YKnownNegative) {
+ APInt KnownZero(BitWidth, 0);
+ APInt KnownOne(BitWidth, 0);
+ APInt Mask = APInt::getSignedMaxValue(BitWidth);
+ // The sign bit of X is set. If some other bit is set then X is not equal
+ // to INT_MIN.
+ ComputeMaskedBits(X, Mask, KnownZero, KnownOne, TD, Depth);
+ if ((KnownOne & Mask) != 0)
+ return true;
+ // The sign bit of Y is set. If some other bit is set then Y is not equal
+ // to INT_MIN.
+ ComputeMaskedBits(Y, Mask, KnownZero, KnownOne, TD, Depth);
+ if ((KnownOne & Mask) != 0)
+ return true;
+ }
+
+ // The sum of a non-negative number and a power of two is not zero.
+ if (XKnownNonNegative && isPowerOfTwo(Y, TD, Depth))
+ return true;
+ if (YKnownNonNegative && isPowerOfTwo(X, TD, Depth))
+ return true;
+ }
+ // (C ? X : Y) != 0 if X != 0 and Y != 0.
+ else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
+ if (isKnownNonZero(SI->getTrueValue(), TD, Depth) &&
+ isKnownNonZero(SI->getFalseValue(), TD, Depth))
+ return true;
+ }
+
+ if (!BitWidth) return false;
+ APInt KnownZero(BitWidth, 0);
+ APInt KnownOne(BitWidth, 0);
+ ComputeMaskedBits(V, APInt::getAllOnesValue(BitWidth), KnownZero, KnownOne,
+ TD, Depth);
+ return KnownOne != 0;
+}
+
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be zero
/// for bits that V cannot have.
@@ -679,6 +844,13 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
Tmp += C->getZExtValue();
if (Tmp > TyBits) Tmp = TyBits;
}
+ // vector ashr X, <C, C, C, C> -> adds C sign bits
+ if (ConstantVector *C = dyn_cast<ConstantVector>(U->getOperand(1))) {
+ if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
+ Tmp += CI->getZExtValue();
+ if (Tmp > TyBits) Tmp = TyBits;
+ }
+ }
return Tmp;
case Instruction::Shl:
if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
@@ -875,8 +1047,9 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
// Turn Op0 << Op1 into Op0 * 2^Op1
APInt Op1Int = Op1CI->getValue();
uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
- Op1 = ConstantInt::get(V->getContext(),
- APInt(Op1Int.getBitWidth(), 0).set(BitToSet));
+ APInt API(Op1Int.getBitWidth(), 0);
+ API.setBit(BitToSet);
+ Op1 = ConstantInt::get(V->getContext(), API);
}
Value *Mul0 = NULL;
@@ -982,6 +1155,80 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
return false;
}
+/// isBytewiseValue - If the specified value can be set by repeating the same
+/// byte in memory, return the i8 value that it is represented with. This is
+/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
+/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
+/// byte store (e.g. i16 0x1234), return null.
+Value *llvm::isBytewiseValue(Value *V) {
+ // All byte-wide stores are splatable, even of arbitrary variables.
+ if (V->getType()->isIntegerTy(8)) return V;
+
+ // Handle 'null' ConstantArrayZero etc.
+ if (Constant *C = dyn_cast<Constant>(V))
+ if (C->isNullValue())
+ return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
+
+ // Constant float and double values can be handled as integer values if the
+ // corresponding integer value is "byteable". An important case is 0.0.
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
+ if (CFP->getType()->isFloatTy())
+ V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
+ if (CFP->getType()->isDoubleTy())
+ V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
+ // Don't handle long double formats, which have strange constraints.
+ }
+
+ // We can handle constant integers that are power of two in size and a
+ // multiple of 8 bits.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ unsigned Width = CI->getBitWidth();
+ if (isPowerOf2_32(Width) && Width > 8) {
+ // We can handle this value if the recursive binary decomposition is the
+ // same at all levels.
+ APInt Val = CI->getValue();
+ APInt Val2;
+ while (Val.getBitWidth() != 8) {
+ unsigned NextWidth = Val.getBitWidth()/2;
+ Val2 = Val.lshr(NextWidth);
+ Val2 = Val2.trunc(Val.getBitWidth()/2);
+ Val = Val.trunc(Val.getBitWidth()/2);
+
+ // If the top/bottom halves aren't the same, reject it.
+ if (Val != Val2)
+ return 0;
+ }
+ return ConstantInt::get(V->getContext(), Val);
+ }
+ }
+
+ // A ConstantArray is splatable if all its members are equal and also
+ // splatable.
+ if (ConstantArray *CA = dyn_cast<ConstantArray>(V)) {
+ if (CA->getNumOperands() == 0)
+ return 0;
+
+ Value *Val = isBytewiseValue(CA->getOperand(0));
+ if (!Val)
+ return 0;
+
+ for (unsigned I = 1, E = CA->getNumOperands(); I != E; ++I)
+ if (CA->getOperand(I-1) != CA->getOperand(I))
+ return 0;
+
+ return Val;
+ }
+
+ // Conceptually, we could handle things like:
+ // %a = zext i8 %X to i16
+ // %b = shl i16 %a, 8
+ // %c = or i16 %a, %b
+ // but until there is an example that actually needs this, it doesn't seem
+ // worth worrying about.
+ return 0;
+}
+
+
// This is the recursive version of BuildSubAggregate. It takes a few different
// arguments. Idxs is the index within the nested struct From that we are
// looking at now (which is of type IndexedType). IdxSkip is the number of
@@ -1159,6 +1406,47 @@ Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
return 0;
}
+/// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if
+/// it can be expressed as a base pointer plus a constant offset. Return the
+/// base and offset to the caller.
+Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
+ const TargetData &TD) {
+ Operator *PtrOp = dyn_cast<Operator>(Ptr);
+ if (PtrOp == 0) return Ptr;
+
+ // Just look through bitcasts.
+ if (PtrOp->getOpcode() == Instruction::BitCast)
+ return GetPointerBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
+
+ // If this is a GEP with constant indices, we can look through it.
+ GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
+ if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
+
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
+ ++I, ++GTI) {
+ ConstantInt *OpC = cast<ConstantInt>(*I);
+ if (OpC->isZero()) continue;
+
+ // Handle a struct and array indices which add their offset to the pointer.
+ if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
+ } else {
+ uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ Offset += OpC->getSExtValue()*Size;
+ }
+ }
+
+ // Re-sign extend from the pointer size if needed to get overflow edge cases
+ // right.
+ unsigned PtrSize = TD.getPointerSizeInBits();
+ if (PtrSize < 64)
+ Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
+
+ return GetPointerBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
+}
+
+
/// GetConstantStringInfo - This function computes the length of a
/// null-terminated C string pointed to by V. If successful, it returns true
/// and returns the string in Str. If unsuccessful, it returns false.
@@ -1386,3 +1674,32 @@ uint64_t llvm::GetStringLength(Value *V) {
// an empty string as a length.
return Len == ~0ULL ? 1 : Len;
}
+
+Value *
+llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
+ if (!V->getType()->isPointerTy())
+ return V;
+ for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (GA->mayBeOverridden())
+ return V;
+ V = GA->getAliasee();
+ } else {
+ // See if InstructionSimplify knows any relevant tricks.
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ // TODO: Aquire a DominatorTree and use it.
+ if (Value *Simplified = SimplifyInstruction(I, TD, 0)) {
+ V = Simplified;
+ continue;
+ }
+
+ return V;
+ }
+ assert(V->getType()->isPointerTy() && "Unexpected operand type!");
+ }
+ return V;
+}
diff --git a/contrib/llvm/lib/Archive/Archive.cpp b/contrib/llvm/lib/Archive/Archive.cpp
index 54c715c..1eab27d 100644
--- a/contrib/llvm/lib/Archive/Archive.cpp
+++ b/contrib/llvm/lib/Archive/Archive.cpp
@@ -15,8 +15,10 @@
#include "ArchiveInternals.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Module.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/System/Process.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/system_error.h"
#include <memory>
#include <cstring>
using namespace llvm;
@@ -65,8 +67,9 @@ ArchiveMember::ArchiveMember(Archive* PAR)
// different file, presumably as an update to the member. It also makes sure
// the flags are reset correctly.
bool ArchiveMember::replaceWith(const sys::Path& newFile, std::string* ErrMsg) {
- if (!newFile.exists()) {
- if (ErrMsg)
+ bool Exists;
+ if (sys::fs::exists(newFile.str(), Exists) || !Exists) {
+ if (ErrMsg)
*ErrMsg = "Can not replace an archive member with a non-existent file";
return true;
}
@@ -113,11 +116,10 @@ bool ArchiveMember::replaceWith(const sys::Path& newFile, std::string* ErrMsg) {
// Get the signature and status info
const char* signature = (const char*) data;
- std::string magic;
+ SmallString<4> magic;
if (!signature) {
- path.getMagicNumber(magic,4);
+ sys::fs::get_magic(path.str(), magic.capacity(), magic);
signature = magic.c_str();
- std::string err;
const sys::FileStatus *FSinfo = path.getFileStatus(false, ErrMsg);
if (FSinfo)
info = *FSinfo;
@@ -147,9 +149,13 @@ Archive::Archive(const sys::Path& filename, LLVMContext& C)
bool
Archive::mapToMemory(std::string* ErrMsg) {
- mapfile = MemoryBuffer::getFile(archPath.c_str(), ErrMsg);
- if (mapfile == 0)
+ OwningPtr<MemoryBuffer> File;
+ if (error_code ec = MemoryBuffer::getFile(archPath.c_str(), File)) {
+ if (ErrMsg)
+ *ErrMsg = ec.message();
return true;
+ }
+ mapfile = File.take();
base = mapfile->getBufferStart();
return false;
}
@@ -159,19 +165,19 @@ void Archive::cleanUpMemory() {
delete mapfile;
mapfile = 0;
base = 0;
-
+
// Forget the entire symbol table
symTab.clear();
symTabSize = 0;
-
+
firstFileOffset = 0;
-
+
// Free the foreign symbol table member
if (foreignST) {
delete foreignST;
foreignST = 0;
}
-
+
// Delete any Modules and ArchiveMember's we've allocated as a result of
// symbol table searches.
for (ModuleMap::iterator I=modules.begin(), E=modules.end(); I != E; ++I ) {
@@ -193,7 +199,7 @@ static void getSymbols(Module*M, std::vector<std::string>& symbols) {
if (!GI->isDeclaration() && !GI->hasLocalLinkage())
if (!GI->getName().empty())
symbols.push_back(GI->getName());
-
+
// Loop over functions
for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI)
if (!FI->isDeclaration() && !FI->hasLocalLinkage())
@@ -213,20 +219,20 @@ bool llvm::GetBitcodeSymbols(const sys::Path& fName,
LLVMContext& Context,
std::vector<std::string>& symbols,
std::string* ErrMsg) {
- std::auto_ptr<MemoryBuffer> Buffer(
- MemoryBuffer::getFileOrSTDIN(fName.c_str()));
- if (!Buffer.get()) {
- if (ErrMsg) *ErrMsg = "Could not open file '" + fName.str() + "'";
+ OwningPtr<MemoryBuffer> Buffer;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(fName.c_str(), Buffer)) {
+ if (ErrMsg) *ErrMsg = "Could not open file '" + fName.str() + "'" + ": "
+ + ec.message();
return true;
}
-
+
Module *M = ParseBitcodeFile(Buffer.get(), Context, ErrMsg);
if (!M)
return true;
-
+
// Get the symbols
getSymbols(M, symbols);
-
+
// Done with the module.
delete M;
return true;
@@ -239,16 +245,16 @@ llvm::GetBitcodeSymbols(const char *BufPtr, unsigned Length,
std::vector<std::string>& symbols,
std::string* ErrMsg) {
// Get the module.
- std::auto_ptr<MemoryBuffer> Buffer(
+ OwningPtr<MemoryBuffer> Buffer(
MemoryBuffer::getMemBufferCopy(StringRef(BufPtr, Length),ModuleID.c_str()));
-
+
Module *M = ParseBitcodeFile(Buffer.get(), Context, ErrMsg);
if (!M)
return 0;
-
+
// Get the symbols
getSymbols(M, symbols);
-
+
// Done with the module. Note that it's the caller's responsibility to delete
// the Module.
return M;
diff --git a/contrib/llvm/lib/Archive/ArchiveInternals.h b/contrib/llvm/lib/Archive/ArchiveInternals.h
index 08f20e7..55684f7 100644
--- a/contrib/llvm/lib/Archive/ArchiveInternals.h
+++ b/contrib/llvm/lib/Archive/ArchiveInternals.h
@@ -15,7 +15,7 @@
#define LIB_ARCHIVE_ARCHIVEINTERNALS_H
#include "llvm/Bitcode/Archive.h"
-#include "llvm/System/TimeValue.h"
+#include "llvm/Support/TimeValue.h"
#include "llvm/ADT/StringExtras.h"
#include <cstring>
diff --git a/contrib/llvm/lib/Archive/ArchiveWriter.cpp b/contrib/llvm/lib/Archive/ArchiveWriter.cpp
index 7eeeb59..c5ad5fc 100644
--- a/contrib/llvm/lib/Archive/ArchiveWriter.cpp
+++ b/contrib/llvm/lib/Archive/ArchiveWriter.cpp
@@ -15,9 +15,12 @@
#include "llvm/Module.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/System/Process.h"
-#include "llvm/System/Signals.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/system_error.h"
#include <fstream>
#include <ostream>
#include <iomanip>
@@ -25,7 +28,7 @@ using namespace llvm;
// Write an integer using variable bit rate encoding. This saves a few bytes
// per entry in the symbol table.
-static inline void writeInteger(unsigned num, std::ofstream& ARFile) {
+static inline void writeInteger(unsigned num, raw_ostream& ARFile) {
while (1) {
if (num < 0x80) { // done?
ARFile << (unsigned char)num;
@@ -153,9 +156,10 @@ Archive::fillHeader(const ArchiveMember &mbr, ArchiveMemberHeader& hdr,
// Insert a file into the archive before some other member. This also takes care
// of extracting the necessary flags and information from the file.
bool
-Archive::addFileBefore(const sys::Path& filePath, iterator where,
+Archive::addFileBefore(const sys::Path& filePath, iterator where,
std::string* ErrMsg) {
- if (!filePath.exists()) {
+ bool Exists;
+ if (sys::fs::exists(filePath.str(), Exists) || !Exists) {
if (ErrMsg)
*ErrMsg = "Can not add a non-existent file to archive";
return true;
@@ -178,9 +182,11 @@ Archive::addFileBefore(const sys::Path& filePath, iterator where,
flags |= ArchiveMember::HasPathFlag;
if (hasSlash || filePath.str().length() > 15)
flags |= ArchiveMember::HasLongFilenameFlag;
- std::string magic;
- mbr->path.getMagicNumber(magic,4);
- switch (sys::IdentifyFileType(magic.c_str(),4)) {
+
+ sys::LLVMFileType type;
+ if (sys::fs::identify_magic(mbr->path.str(), type))
+ type = sys::Unknown_FileType;
+ switch (type) {
case sys::Bitcode_FileType:
flags |= ArchiveMember::BitcodeFlag;
break;
@@ -196,14 +202,14 @@ Archive::addFileBefore(const sys::Path& filePath, iterator where,
bool
Archive::writeMember(
const ArchiveMember& member,
- std::ofstream& ARFile,
+ raw_ostream& ARFile,
bool CreateSymbolTable,
bool TruncateNames,
bool ShouldCompress,
std::string* ErrMsg
) {
- unsigned filepos = ARFile.tellp();
+ unsigned filepos = ARFile.tell();
filepos -= 8;
// Get the data and its size either from the
@@ -212,9 +218,13 @@ Archive::writeMember(
const char *data = (const char*)member.getData();
MemoryBuffer *mFile = 0;
if (!data) {
- mFile = MemoryBuffer::getFile(member.getPath().c_str(), ErrMsg);
- if (mFile == 0)
+ OwningPtr<MemoryBuffer> File;
+ if (error_code ec = MemoryBuffer::getFile(member.getPath().c_str(), File)) {
+ if (ErrMsg)
+ *ErrMsg = ec.message();
return true;
+ }
+ mFile = File.take();
data = mFile->getBufferStart();
fSize = mFile->getBufferSize();
}
@@ -225,7 +235,7 @@ Archive::writeMember(
std::vector<std::string> symbols;
std::string FullMemberName = archPath.str() + "(" + member.getPath().str()
+ ")";
- Module* M =
+ Module* M =
GetBitcodeSymbols(data, fSize, FullMemberName, Context, symbols, ErrMsg);
// If the bitcode parsed successfully
@@ -272,7 +282,7 @@ Archive::writeMember(
ARFile.write(data,fSize);
// Make sure the member is an even length
- if ((ARFile.tellp() & 1) == 1)
+ if ((ARFile.tell() & 1) == 1)
ARFile << ARFILE_PAD;
// Close the mapped file if it was opened
@@ -282,7 +292,7 @@ Archive::writeMember(
// Write out the LLVM symbol table as an archive member to the file.
void
-Archive::writeSymbolTable(std::ofstream& ARFile) {
+Archive::writeSymbolTable(raw_ostream& ARFile) {
// Construct the symbol table's header
ArchiveMemberHeader Hdr;
@@ -306,7 +316,7 @@ Archive::writeSymbolTable(std::ofstream& ARFile) {
#ifndef NDEBUG
// Save the starting position of the symbol tables data content.
- unsigned startpos = ARFile.tellp();
+ unsigned startpos = ARFile.tell();
#endif
// Write out the symbols sequentially
@@ -323,7 +333,7 @@ Archive::writeSymbolTable(std::ofstream& ARFile) {
#ifndef NDEBUG
// Now that we're done with the symbol table, get the ending file position
- unsigned endpos = ARFile.tellp();
+ unsigned endpos = ARFile.tell();
#endif
// Make sure that the amount we wrote is what we pre-computed. This is
@@ -352,25 +362,20 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
}
// Create a temporary file to store the archive in
- sys::Path TmpArchive = archPath;
- if (TmpArchive.createTemporaryFileOnDisk(ErrMsg))
+ SmallString<128> TempArchivePath;
+ int ArchFD;
+ if (error_code ec =
+ sys::fs::unique_file("%%-%%-%%-%%-" + sys::path::filename(archPath.str()),
+ ArchFD, TempArchivePath)) {
+ if (ErrMsg) *ErrMsg = ec.message();
return true;
+ }
// Make sure the temporary gets removed if we crash
- sys::RemoveFileOnSignal(TmpArchive);
+ sys::RemoveFileOnSignal(sys::Path(TempArchivePath.str()));
// Create archive file for output.
- std::ios::openmode io_mode = std::ios::out | std::ios::trunc |
- std::ios::binary;
- std::ofstream ArchiveFile(TmpArchive.c_str(), io_mode);
-
- // Check for errors opening or creating archive file.
- if (!ArchiveFile.is_open() || ArchiveFile.bad()) {
- TmpArchive.eraseFromDisk();
- if (ErrMsg)
- *ErrMsg = "Error opening archive file: " + archPath.str();
- return true;
- }
+ raw_fd_ostream ArchiveFile(ArchFD, true);
// If we're creating a symbol table, reset it now
if (CreateSymbolTable) {
@@ -386,8 +391,9 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
for (MembersList::iterator I = begin(), E = end(); I != E; ++I) {
if (writeMember(*I, ArchiveFile, CreateSymbolTable,
TruncateNames, Compress, ErrMsg)) {
- TmpArchive.eraseFromDisk();
ArchiveFile.close();
+ bool existed;
+ sys::fs::remove(TempArchivePath.str(), existed);
return true;
}
}
@@ -402,27 +408,29 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
// ensure compatibility with other archivers we need to put the symbol
// table first in the file. Unfortunately, this means mapping the file
// we just wrote back in and copying it to the destination file.
- sys::Path FinalFilePath = archPath;
+ SmallString<128> TempArchiveWithSymbolTablePath;
// Map in the archive we just wrote.
{
- OwningPtr<MemoryBuffer> arch(MemoryBuffer::getFile(TmpArchive.c_str()));
- if (arch == 0) return true;
+ OwningPtr<MemoryBuffer> arch;
+ if (error_code ec = MemoryBuffer::getFile(TempArchivePath.c_str(), arch)) {
+ if (ErrMsg)
+ *ErrMsg = ec.message();
+ return true;
+ }
const char* base = arch->getBufferStart();
- // Open another temporary file in order to avoid invalidating the
+ // Open another temporary file in order to avoid invalidating the
// mmapped data
- if (FinalFilePath.createTemporaryFileOnDisk(ErrMsg))
- return true;
- sys::RemoveFileOnSignal(FinalFilePath);
-
- std::ofstream FinalFile(FinalFilePath.c_str(), io_mode);
- if (!FinalFile.is_open() || FinalFile.bad()) {
- TmpArchive.eraseFromDisk();
- if (ErrMsg)
- *ErrMsg = "Error opening archive file: " + FinalFilePath.str();
+ if (error_code ec =
+ sys::fs::unique_file("%%-%%-%%-%%-" + sys::path::filename(archPath.str()),
+ ArchFD, TempArchiveWithSymbolTablePath)) {
+ if (ErrMsg) *ErrMsg = ec.message();
return true;
}
+ sys::RemoveFileOnSignal(sys::Path(TempArchiveWithSymbolTablePath.str()));
+
+ raw_fd_ostream FinalFile(ArchFD, true);
// Write the file magic number
FinalFile << ARFILE_MAGIC;
@@ -435,7 +443,8 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
if (foreignST) {
if (writeMember(*foreignST, FinalFile, false, false, false, ErrMsg)) {
FinalFile.close();
- TmpArchive.eraseFromDisk();
+ bool existed;
+ sys::fs::remove(TempArchiveWithSymbolTablePath.str(), existed);
return true;
}
}
@@ -451,19 +460,25 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
// Close up shop
FinalFile.close();
} // free arch.
-
+
// Move the final file over top of TmpArchive
- if (FinalFilePath.renamePathOnDisk(TmpArchive, ErrMsg))
+ if (error_code ec = sys::fs::rename(TempArchiveWithSymbolTablePath.str(),
+ TempArchivePath.str())) {
+ if (ErrMsg) *ErrMsg = ec.message();
return true;
+ }
}
-
+
// Before we replace the actual archive, we need to forget all the
// members, since they point to data in that old archive. We need to do
// this because we cannot replace an open file on Windows.
cleanUpMemory();
-
- if (TmpArchive.renamePathOnDisk(archPath, ErrMsg))
+
+ if (error_code ec = sys::fs::rename(TempArchivePath.str(),
+ archPath.str())) {
+ if (ErrMsg) *ErrMsg = ec.message();
return true;
+ }
// Set correct read and write permissions after temporary file is moved
// to final destination path.
diff --git a/contrib/llvm/lib/AsmParser/LLLexer.cpp b/contrib/llvm/lib/AsmParser/LLLexer.cpp
index 032753a..857fa1e 100644
--- a/contrib/llvm/lib/AsmParser/LLLexer.cpp
+++ b/contrib/llvm/lib/AsmParser/LLLexer.cpp
@@ -15,18 +15,20 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Instruction.h"
#include "llvm/LLVMContext.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Assembly/Parser.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Assembly/Parser.h"
+#include <cctype>
#include <cstdio>
#include <cstdlib>
#include <cstring>
using namespace llvm;
-bool LLLexer::Error(LocTy ErrorLoc, const std::string &Msg) const {
+bool LLLexer::Error(LocTy ErrorLoc, const Twine &Msg) const {
ErrorInfo = SM.GetMessage(ErrorLoc, Msg, "error");
return true;
}
@@ -507,6 +509,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(default);
KEYWORD(hidden);
KEYWORD(protected);
+ KEYWORD(unnamed_addr);
KEYWORD(extern_weak);
KEYWORD(external);
KEYWORD(thread_local);
@@ -544,6 +547,8 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(arm_aapcscc);
KEYWORD(arm_aapcs_vfpcc);
KEYWORD(msp430_intrcc);
+ KEYWORD(ptx_kernel);
+ KEYWORD(ptx_device);
KEYWORD(cc);
KEYWORD(c);
@@ -570,6 +575,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(noredzone);
KEYWORD(noimplicitfloat);
KEYWORD(naked);
+ KEYWORD(hotpatch);
KEYWORD(type);
KEYWORD(opaque);
@@ -595,6 +601,7 @@ lltok::Kind LLLexer::LexIdentifier() {
TYPEKEYWORD("ppc_fp128", Type::getPPC_FP128Ty(Context));
TYPEKEYWORD("label", Type::getLabelTy(Context));
TYPEKEYWORD("metadata", Type::getMetadataTy(Context));
+ TYPEKEYWORD("x86_mmx", Type::getX86_MMXTy(Context));
#undef TYPEKEYWORD
// Handle special forms for autoupgrading. Drop these in LLVM 3.0. This is
@@ -677,7 +684,7 @@ lltok::Kind LLLexer::LexIdentifier() {
APInt Tmp(bits, StringRef(TokStart+3, len), 16);
uint32_t activeBits = Tmp.getActiveBits();
if (activeBits > 0 && activeBits < bits)
- Tmp.trunc(activeBits);
+ Tmp = Tmp.trunc(activeBits);
APSIntVal = APSInt(Tmp, TokStart[0] == 'u');
return lltok::APSInt;
}
@@ -804,12 +811,12 @@ lltok::Kind LLLexer::LexDigitOrNegative() {
if (TokStart[0] == '-') {
uint32_t minBits = Tmp.getMinSignedBits();
if (minBits > 0 && minBits < numBits)
- Tmp.trunc(minBits);
+ Tmp = Tmp.trunc(minBits);
APSIntVal = APSInt(Tmp, false);
} else {
uint32_t activeBits = Tmp.getActiveBits();
if (activeBits > 0 && activeBits < numBits)
- Tmp.trunc(activeBits);
+ Tmp = Tmp.trunc(activeBits);
APSIntVal = APSInt(Tmp, true);
}
return lltok::APSInt;
@@ -828,7 +835,7 @@ lltok::Kind LLLexer::LexDigitOrNegative() {
}
}
- APFloatVal = APFloat(atof(TokStart));
+ APFloatVal = APFloat(std::atof(TokStart));
return lltok::APFloat;
}
@@ -862,6 +869,6 @@ lltok::Kind LLLexer::LexPositive() {
}
}
- APFloatVal = APFloat(atof(TokStart));
+ APFloatVal = APFloat(std::atof(TokStart));
return lltok::APFloat;
}
diff --git a/contrib/llvm/lib/AsmParser/LLLexer.h b/contrib/llvm/lib/AsmParser/LLLexer.h
index 70f1cfd..09ae801 100644
--- a/contrib/llvm/lib/AsmParser/LLLexer.h
+++ b/contrib/llvm/lib/AsmParser/LLLexer.h
@@ -62,8 +62,8 @@ namespace llvm {
const APFloat &getAPFloatVal() const { return APFloatVal; }
- bool Error(LocTy L, const std::string &Msg) const;
- bool Error(const std::string &Msg) const { return Error(getLoc(), Msg); }
+ bool Error(LocTy L, const Twine &Msg) const;
+ bool Error(const Twine &Msg) const { return Error(getLoc(), Msg); }
std::string getFilename() const;
private:
diff --git a/contrib/llvm/lib/AsmParser/LLParser.cpp b/contrib/llvm/lib/AsmParser/LLParser.cpp
index f21a065..cdfacbe 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.cpp
+++ b/contrib/llvm/lib/AsmParser/LLParser.cpp
@@ -22,7 +22,6 @@
#include "llvm/Operator.h"
#include "llvm/ValueSymbolTable.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -52,7 +51,7 @@ bool LLParser::ValidateEndOfModule() {
if (SlotNo >= NumberedMetadata.size() || NumberedMetadata[SlotNo] == 0)
return Error(MDList[i].Loc, "use of undefined metadata '!" +
- utostr(SlotNo) + "'");
+ Twine(SlotNo) + "'");
Inst->setMetadata(MDList[i].MDKind, NumberedMetadata[SlotNo]);
}
}
@@ -109,7 +108,7 @@ bool LLParser::ValidateEndOfModule() {
if (!ForwardRefTypeIDs.empty())
return Error(ForwardRefTypeIDs.begin()->second.second,
"use of undefined type '%" +
- utostr(ForwardRefTypeIDs.begin()->first) + "'");
+ Twine(ForwardRefTypeIDs.begin()->first) + "'");
if (!ForwardRefVals.empty())
return Error(ForwardRefVals.begin()->second.second,
@@ -119,12 +118,12 @@ bool LLParser::ValidateEndOfModule() {
if (!ForwardRefValIDs.empty())
return Error(ForwardRefValIDs.begin()->second.second,
"use of undefined value '@" +
- utostr(ForwardRefValIDs.begin()->first) + "'");
+ Twine(ForwardRefValIDs.begin()->first) + "'");
if (!ForwardRefMDNodes.empty())
return Error(ForwardRefMDNodes.begin()->second.second,
"use of undefined metadata '!" +
- utostr(ForwardRefMDNodes.begin()->first) + "'");
+ Twine(ForwardRefMDNodes.begin()->first) + "'");
// Look for intrinsic functions and CallInst that need to be upgraded
@@ -195,7 +194,8 @@ bool LLParser::ParseTopLevelEntities() {
// The Global variable production with no name can have many different
// optional leading prefixes, the production is:
// GlobalVar ::= OptionalLinkage OptionalVisibility OptionalThreadLocal
- // OptionalAddrSpace ('constant'|'global') ...
+ // OptionalAddrSpace OptionalUnNammedAddr
+ // ('constant'|'global') ...
case lltok::kw_private: // OptionalLinkage
case lltok::kw_linker_private: // OptionalLinkage
case lltok::kw_linker_private_weak: // OptionalLinkage
@@ -317,7 +317,7 @@ bool LLParser::ParseUnnamedType() {
if (Lex.getKind() == lltok::LocalVarID) {
if (Lex.getUIntVal() != TypeID)
return Error(Lex.getLoc(), "type expected to be numbered '%" +
- utostr(TypeID) + "'");
+ Twine(TypeID) + "'");
Lex.Lex(); // eat LocalVarID;
if (ParseToken(lltok::equal, "expected '=' after name"))
@@ -444,7 +444,7 @@ bool LLParser::ParseUnnamedGlobal() {
if (Lex.getKind() == lltok::GlobalID) {
if (Lex.getUIntVal() != VarID)
return Error(Lex.getLoc(), "variable expected to be numbered '%" +
- utostr(VarID) + "'");
+ Twine(VarID) + "'");
Lex.Lex(); // eat GlobalID;
if (ParseToken(lltok::equal, "expected '=' after name"))
@@ -676,16 +676,16 @@ bool LLParser::ParseAlias(const std::string &Name, LocTy NameLoc,
// Insert into the module, we know its name won't collide now.
M->getAliasList().push_back(GA);
- assert(GA->getNameStr() == Name && "Should not be a name conflict!");
+ assert(GA->getName() == Name && "Should not be a name conflict!");
return false;
}
/// ParseGlobal
/// ::= GlobalVar '=' OptionalLinkage OptionalVisibility OptionalThreadLocal
-/// OptionalAddrSpace GlobalType Type Const
+/// OptionalAddrSpace OptionalUnNammedAddr GlobalType Type Const
/// ::= OptionalLinkage OptionalVisibility OptionalThreadLocal
-/// OptionalAddrSpace GlobalType Type Const
+/// OptionalAddrSpace OptionalUnNammedAddr GlobalType Type Const
///
/// Everything through visibility has been parsed already.
///
@@ -693,12 +693,15 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
unsigned Linkage, bool HasLinkage,
unsigned Visibility) {
unsigned AddrSpace;
- bool ThreadLocal, IsConstant;
+ bool ThreadLocal, IsConstant, UnnamedAddr;
+ LocTy UnnamedAddrLoc;
LocTy TyLoc;
PATypeHolder Ty(Type::getVoidTy(Context));
if (ParseOptionalToken(lltok::kw_thread_local, ThreadLocal) ||
ParseOptionalAddrSpace(AddrSpace) ||
+ ParseOptionalToken(lltok::kw_unnamed_addr, UnnamedAddr,
+ &UnnamedAddrLoc) ||
ParseGlobalType(IsConstant) ||
ParseType(Ty, TyLoc))
return true;
@@ -756,6 +759,7 @@ bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
GV->setLinkage((GlobalValue::LinkageTypes)Linkage);
GV->setVisibility((GlobalValue::VisibilityTypes)Visibility);
GV->setThreadLocal(ThreadLocal);
+ GV->setUnnamedAddr(UnnamedAddr);
// Parse attributes on the global.
while (Lex.getKind() == lltok::comma) {
@@ -855,7 +859,7 @@ GlobalValue *LLParser::GetGlobalVal(unsigned ID, const Type *Ty, LocTy Loc) {
// If we have the value in the symbol table or fwd-ref table, return it.
if (Val) {
if (Val->getType() == Ty) return Val;
- Error(Loc, "'@" + utostr(ID) + "' defined with type '" +
+ Error(Loc, "'@" + Twine(ID) + "' defined with type '" +
Val->getType()->getDescription() + "'");
return 0;
}
@@ -983,6 +987,7 @@ bool LLParser::ParseOptionalAttrs(unsigned &Attrs, unsigned AttrKind) {
case lltok::kw_noredzone: Attrs |= Attribute::NoRedZone; break;
case lltok::kw_noimplicitfloat: Attrs |= Attribute::NoImplicitFloat; break;
case lltok::kw_naked: Attrs |= Attribute::Naked; break;
+ case lltok::kw_hotpatch: Attrs |= Attribute::Hotpatch; break;
case lltok::kw_alignstack: {
unsigned Alignment;
@@ -1084,6 +1089,8 @@ bool LLParser::ParseOptionalVisibility(unsigned &Res) {
/// ::= 'arm_aapcscc'
/// ::= 'arm_aapcs_vfpcc'
/// ::= 'msp430_intrcc'
+/// ::= 'ptx_kernel'
+/// ::= 'ptx_device'
/// ::= 'cc' UINT
///
bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
@@ -1099,6 +1106,8 @@ bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
case lltok::kw_arm_aapcscc: CC = CallingConv::ARM_AAPCS; break;
case lltok::kw_arm_aapcs_vfpcc:CC = CallingConv::ARM_AAPCS_VFP; break;
case lltok::kw_msp430_intrcc: CC = CallingConv::MSP430_INTR; break;
+ case lltok::kw_ptx_kernel: CC = CallingConv::PTX_Kernel; break;
+ case lltok::kw_ptx_device: CC = CallingConv::PTX_Device; break;
case lltok::kw_cc: {
unsigned ArbitraryCC;
Lex.Lex();
@@ -1128,7 +1137,6 @@ bool LLParser::ParseInstructionMetadata(Instruction *Inst,
Lex.Lex();
MDNode *Node;
- unsigned NodeID;
SMLoc Loc = Lex.getLoc();
if (ParseToken(lltok::exclaim, "expected '!' here"))
@@ -1145,6 +1153,7 @@ bool LLParser::ParseInstructionMetadata(Instruction *Inst,
assert(ID.Kind == ValID::t_MDNode);
Inst->setMetadata(MDK, ID.MDNodeVal);
} else {
+ unsigned NodeID = 0;
if (ParseMDNodeID(Node, NodeID))
return true;
if (Node) {
@@ -1196,8 +1205,7 @@ bool LLParser::ParseOptionalCommaAlign(unsigned &Alignment,
if (Lex.getKind() != lltok::kw_align)
return Error(Lex.getLoc(), "expected metadata or 'align'");
-
- LocTy AlignLoc = Lex.getLoc();
+
if (ParseOptionalAlignment(Alignment)) return true;
}
@@ -1245,7 +1253,7 @@ bool LLParser::ParseIndexList(SmallVectorImpl<unsigned> &Indices,
AteExtraComma = true;
return false;
}
- unsigned Idx;
+ unsigned Idx = 0;
if (ParseUInt32(Idx)) return true;
Indices.push_back(Idx);
}
@@ -1778,7 +1786,7 @@ bool LLParser::PerFunctionState::FinishFunction() {
if (!ForwardRefValIDs.empty())
return P.Error(ForwardRefValIDs.begin()->second.second,
"use of undefined value '%" +
- utostr(ForwardRefValIDs.begin()->first) + "'");
+ Twine(ForwardRefValIDs.begin()->first) + "'");
return false;
}
@@ -1846,9 +1854,9 @@ Value *LLParser::PerFunctionState::GetVal(unsigned ID, const Type *Ty,
if (Val) {
if (Val->getType() == Ty) return Val;
if (Ty->isLabelTy())
- P.Error(Loc, "'%" + utostr(ID) + "' is not a basic block");
+ P.Error(Loc, "'%" + Twine(ID) + "' is not a basic block");
else
- P.Error(Loc, "'%" + utostr(ID) + "' defined with type '" +
+ P.Error(Loc, "'%" + Twine(ID) + "' defined with type '" +
Val->getType()->getDescription() + "'");
return 0;
}
@@ -1890,7 +1898,7 @@ bool LLParser::PerFunctionState::SetInstName(int NameID,
if (unsigned(NameID) != NumberedVals.size())
return P.Error(NameLoc, "instruction expected to be numbered '%" +
- utostr(NumberedVals.size()) + "'");
+ Twine(NumberedVals.size()) + "'");
std::map<unsigned, std::pair<Value*, LocTy> >::iterator FI =
ForwardRefValIDs.find(NameID);
@@ -1922,7 +1930,7 @@ bool LLParser::PerFunctionState::SetInstName(int NameID,
// Set the name on the instruction.
Inst->setName(NameStr);
- if (Inst->getNameStr() != NameStr)
+ if (Inst->getName() != NameStr)
return P.Error(NameLoc, "multiple definition of local value named '" +
NameStr + "'");
return false;
@@ -2068,10 +2076,10 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
for (unsigned i = 1, e = Elts.size(); i != e; ++i)
if (Elts[i]->getType() != Elts[0]->getType())
return Error(FirstEltLoc,
- "vector element #" + utostr(i) +
+ "vector element #" + Twine(i) +
" is not of type '" + Elts[0]->getType()->getDescription());
- ID.ConstantVal = ConstantVector::get(Elts.data(), Elts.size());
+ ID.ConstantVal = ConstantVector::get(Elts);
ID.Kind = ValID::t_Constant;
return false;
}
@@ -2101,7 +2109,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
if (Elts[i]->getType() != Elts[0]->getType())
return Error(FirstEltLoc,
- "array element #" + utostr(i) +
+ "array element #" + Twine(i) +
" is not of type '" +Elts[0]->getType()->getDescription());
}
@@ -2278,7 +2286,10 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
case lltok::kw_fdiv:
case lltok::kw_urem:
case lltok::kw_srem:
- case lltok::kw_frem: {
+ case lltok::kw_frem:
+ case lltok::kw_shl:
+ case lltok::kw_lshr:
+ case lltok::kw_ashr: {
bool NUW = false;
bool NSW = false;
bool Exact = false;
@@ -2286,9 +2297,8 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
Constant *Val0, *Val1;
Lex.Lex();
LocTy ModifierLoc = Lex.getLoc();
- if (Opc == Instruction::Add ||
- Opc == Instruction::Sub ||
- Opc == Instruction::Mul) {
+ if (Opc == Instruction::Add || Opc == Instruction::Sub ||
+ Opc == Instruction::Mul || Opc == Instruction::Shl) {
if (EatIfPresent(lltok::kw_nuw))
NUW = true;
if (EatIfPresent(lltok::kw_nsw)) {
@@ -2296,7 +2306,8 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
if (EatIfPresent(lltok::kw_nuw))
NUW = true;
}
- } else if (Opc == Instruction::SDiv) {
+ } else if (Opc == Instruction::SDiv || Opc == Instruction::UDiv ||
+ Opc == Instruction::LShr || Opc == Instruction::AShr) {
if (EatIfPresent(lltok::kw_exact))
Exact = true;
}
@@ -2323,6 +2334,9 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
case Instruction::SDiv:
case Instruction::URem:
case Instruction::SRem:
+ case Instruction::Shl:
+ case Instruction::AShr:
+ case Instruction::LShr:
if (!Val0->getType()->isIntOrIntVectorTy())
return Error(ID.Loc, "constexpr requires integer operands");
break;
@@ -2339,7 +2353,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
unsigned Flags = 0;
if (NUW) Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
if (NSW) Flags |= OverflowingBinaryOperator::NoSignedWrap;
- if (Exact) Flags |= SDivOperator::IsExact;
+ if (Exact) Flags |= PossiblyExactOperator::IsExact;
Constant *C = ConstantExpr::get(Opc, Val0, Val1, Flags);
ID.ConstantVal = C;
ID.Kind = ValID::t_Constant;
@@ -2347,9 +2361,6 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
}
// Logical Operations
- case lltok::kw_shl:
- case lltok::kw_lshr:
- case lltok::kw_ashr:
case lltok::kw_and:
case lltok::kw_or:
case lltok::kw_xor: {
@@ -2572,7 +2583,7 @@ bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
case ValID::t_APSInt:
if (!Ty->isIntegerTy())
return Error(ID.Loc, "integer constant must have integer type");
- ID.APSIntVal.extOrTrunc(Ty->getPrimitiveSizeInBits());
+ ID.APSIntVal = ID.APSIntVal.extOrTrunc(Ty->getPrimitiveSizeInBits());
V = ConstantInt::get(Context, ID.APSIntVal);
return false;
case ValID::t_APFloat:
@@ -2654,7 +2665,7 @@ bool LLParser::ParseTypeAndBasicBlock(BasicBlock *&BB, LocTy &Loc,
/// FunctionHeader
/// ::= OptionalLinkage OptionalVisibility OptionalCallingConv OptRetAttrs
-/// Type GlobalName '(' ArgList ')' OptFuncAttrs OptSection
+/// OptUnnamedAddr Type GlobalName '(' ArgList ')' OptFuncAttrs OptSection
/// OptionalAlign OptGC
bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
// Parse the linkage.
@@ -2714,7 +2725,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
if (NameID != NumberedVals.size())
return TokError("function expected to be numbered '%" +
- utostr(NumberedVals.size()) + "'");
+ Twine(NumberedVals.size()) + "'");
} else {
return TokError("expected function name");
}
@@ -2730,8 +2741,12 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
std::string Section;
unsigned Alignment;
std::string GC;
+ bool UnnamedAddr;
+ LocTy UnnamedAddrLoc;
if (ParseArgumentList(ArgList, isVarArg, false) ||
+ ParseOptionalToken(lltok::kw_unnamed_addr, UnnamedAddr,
+ &UnnamedAddrLoc) ||
ParseOptionalAttrs(FuncAttrs, 2) ||
(EatIfPresent(lltok::kw_section) &&
ParseStringConstant(Section)) ||
@@ -2821,7 +2836,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
Fn = cast<Function>(I->second.first);
if (Fn->getType() != PFT)
return Error(NameLoc, "type of definition and forward reference of '@" +
- utostr(NumberedVals.size()) +"' disagree");
+ Twine(NumberedVals.size()) + "' disagree");
ForwardRefValIDs.erase(I);
}
}
@@ -2838,6 +2853,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
Fn->setVisibility((GlobalValue::VisibilityTypes)Visibility);
Fn->setCallingConv(CC);
Fn->setAttributes(PAL);
+ Fn->setUnnamedAddr(UnnamedAddr);
Fn->setAlignment(Alignment);
Fn->setSection(Section);
if (!GC.empty()) Fn->setGC(GC.c_str());
@@ -2855,7 +2871,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
// Set the name, if it conflicted, it will be auto-renamed.
ArgIt->setName(ArgList[i].Name);
- if (ArgIt->getNameStr() != ArgList[i].Name)
+ if (ArgIt->getName() != ArgList[i].Name)
return Error(ArgList[i].Loc, "redefinition of argument '%" +
ArgList[i].Name + "'");
}
@@ -2989,55 +3005,38 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
// Binary Operators.
case lltok::kw_add:
case lltok::kw_sub:
- case lltok::kw_mul: {
- bool NUW = false;
- bool NSW = false;
+ case lltok::kw_mul:
+ case lltok::kw_shl: {
LocTy ModifierLoc = Lex.getLoc();
- if (EatIfPresent(lltok::kw_nuw))
- NUW = true;
- if (EatIfPresent(lltok::kw_nsw)) {
- NSW = true;
- if (EatIfPresent(lltok::kw_nuw))
- NUW = true;
- }
- bool Result = ParseArithmetic(Inst, PFS, KeywordVal, 1);
- if (!Result) {
- if (!Inst->getType()->isIntOrIntVectorTy()) {
- if (NUW)
- return Error(ModifierLoc, "nuw only applies to integer operations");
- if (NSW)
- return Error(ModifierLoc, "nsw only applies to integer operations");
- }
- if (NUW)
- cast<BinaryOperator>(Inst)->setHasNoUnsignedWrap(true);
- if (NSW)
- cast<BinaryOperator>(Inst)->setHasNoSignedWrap(true);
- }
- return Result;
+ bool NUW = EatIfPresent(lltok::kw_nuw);
+ bool NSW = EatIfPresent(lltok::kw_nsw);
+ if (!NUW) NUW = EatIfPresent(lltok::kw_nuw);
+
+ if (ParseArithmetic(Inst, PFS, KeywordVal, 1)) return true;
+
+ if (NUW) cast<BinaryOperator>(Inst)->setHasNoUnsignedWrap(true);
+ if (NSW) cast<BinaryOperator>(Inst)->setHasNoSignedWrap(true);
+ return false;
}
case lltok::kw_fadd:
case lltok::kw_fsub:
case lltok::kw_fmul: return ParseArithmetic(Inst, PFS, KeywordVal, 2);
- case lltok::kw_sdiv: {
- bool Exact = false;
- if (EatIfPresent(lltok::kw_exact))
- Exact = true;
- bool Result = ParseArithmetic(Inst, PFS, KeywordVal, 1);
- if (!Result)
- if (Exact)
- cast<BinaryOperator>(Inst)->setIsExact(true);
- return Result;
+ case lltok::kw_sdiv:
+ case lltok::kw_udiv:
+ case lltok::kw_lshr:
+ case lltok::kw_ashr: {
+ bool Exact = EatIfPresent(lltok::kw_exact);
+
+ if (ParseArithmetic(Inst, PFS, KeywordVal, 1)) return true;
+ if (Exact) cast<BinaryOperator>(Inst)->setIsExact(true);
+ return false;
}
- case lltok::kw_udiv:
case lltok::kw_urem:
case lltok::kw_srem: return ParseArithmetic(Inst, PFS, KeywordVal, 1);
case lltok::kw_fdiv:
case lltok::kw_frem: return ParseArithmetic(Inst, PFS, KeywordVal, 2);
- case lltok::kw_shl:
- case lltok::kw_lshr:
- case lltok::kw_ashr:
case lltok::kw_and:
case lltok::kw_or:
case lltok::kw_xor: return ParseLogical(Inst, PFS, KeywordVal);
diff --git a/contrib/llvm/lib/AsmParser/LLParser.h b/contrib/llvm/lib/AsmParser/LLParser.h
index 404cec3..93e7f77 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.h
+++ b/contrib/llvm/lib/AsmParser/LLParser.h
@@ -142,10 +142,10 @@ namespace llvm {
private:
- bool Error(LocTy L, const std::string &Msg) const {
+ bool Error(LocTy L, const Twine &Msg) const {
return Lex.Error(L, Msg);
}
- bool TokError(const std::string &Msg) const {
+ bool TokError(const Twine &Msg) const {
return Error(Lex.getLoc(), Msg);
}
@@ -162,10 +162,12 @@ namespace llvm {
Lex.Lex();
return true;
}
- bool ParseOptionalToken(lltok::Kind T, bool &Present) {
+ bool ParseOptionalToken(lltok::Kind T, bool &Present, LocTy *Loc = 0) {
if (Lex.getKind() != T) {
Present = false;
} else {
+ if (Loc)
+ *Loc = Lex.getLoc();
Lex.Lex();
Present = true;
}
diff --git a/contrib/llvm/lib/AsmParser/LLToken.h b/contrib/llvm/lib/AsmParser/LLToken.h
index 61f93a4..576da19 100644
--- a/contrib/llvm/lib/AsmParser/LLToken.h
+++ b/contrib/llvm/lib/AsmParser/LLToken.h
@@ -42,6 +42,7 @@ namespace lltok {
kw_linkonce, kw_linkonce_odr, kw_weak, kw_weak_odr, kw_appending,
kw_dllimport, kw_dllexport, kw_common, kw_available_externally,
kw_default, kw_hidden, kw_protected,
+ kw_unnamed_addr,
kw_extern_weak,
kw_external, kw_thread_local,
kw_zeroinitializer,
@@ -72,6 +73,7 @@ namespace lltok {
kw_x86_stdcallcc, kw_x86_fastcallcc, kw_x86_thiscallcc,
kw_arm_apcscc, kw_arm_aapcscc, kw_arm_aapcs_vfpcc,
kw_msp430_intrcc,
+ kw_ptx_kernel, kw_ptx_device,
kw_signext,
kw_zeroext,
@@ -95,6 +97,7 @@ namespace lltok {
kw_noredzone,
kw_noimplicitfloat,
kw_naked,
+ kw_hotpatch,
kw_type,
kw_opaque,
diff --git a/contrib/llvm/lib/AsmParser/Parser.cpp b/contrib/llvm/lib/AsmParser/Parser.cpp
index e7cef9b..59fb471 100644
--- a/contrib/llvm/lib/AsmParser/Parser.cpp
+++ b/contrib/llvm/lib/AsmParser/Parser.cpp
@@ -18,6 +18,7 @@
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
#include <cstring>
using namespace llvm;
@@ -41,15 +42,14 @@ Module *llvm::ParseAssembly(MemoryBuffer *F,
Module *llvm::ParseAssemblyFile(const std::string &Filename, SMDiagnostic &Err,
LLVMContext &Context) {
- std::string ErrorStr;
- MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrorStr);
- if (F == 0) {
+ OwningPtr<MemoryBuffer> File;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), File)) {
Err = SMDiagnostic(Filename,
- "Could not open input file: " + ErrorStr);
+ "Could not open input file: " + ec.message());
return 0;
}
- return ParseAssembly(F, 0, Err, Context);
+ return ParseAssembly(File.take(), 0, Err, Context);
}
Module *llvm::ParseAssemblyString(const char *AsmString, Module *M,
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 830c79a..dbf8da0 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -136,7 +136,6 @@ namespace {
/// @brief A class for maintaining the slot number definition
/// as a placeholder for the actual definition for forward constants defs.
class ConstantPlaceHolder : public ConstantExpr {
- ConstantPlaceHolder(); // DO NOT IMPLEMENT
void operator=(const ConstantPlaceHolder &); // DO NOT IMPLEMENT
public:
// allocate space for exactly one operand
@@ -149,7 +148,7 @@ namespace {
}
/// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const ConstantPlaceHolder *) { return true; }
+ //static inline bool classof(const ConstantPlaceHolder *) { return true; }
static bool classof(const Value *V) {
return isa<ConstantExpr>(V) &&
cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1;
@@ -163,7 +162,8 @@ namespace {
// FIXME: can we inherit this from ConstantExpr?
template <>
-struct OperandTraits<ConstantPlaceHolder> : public FixedNumOperandTraits<1> {
+struct OperandTraits<ConstantPlaceHolder> :
+ public FixedNumOperandTraits<ConstantPlaceHolder, 1> {
};
}
@@ -298,7 +298,7 @@ void BitcodeReaderValueList::ResolveConstantForwardRefs() {
NewC = ConstantStruct::get(Context, &NewOps[0], NewOps.size(),
UserCS->getType()->isPacked());
} else if (isa<ConstantVector>(UserC)) {
- NewC = ConstantVector::get(&NewOps[0], NewOps.size());
+ NewC = ConstantVector::get(NewOps);
} else {
assert(isa<ConstantExpr>(UserC) && "Must be a ConstantExpr.");
NewC = cast<ConstantExpr>(UserC)->getWithOperands(&NewOps[0],
@@ -550,6 +550,9 @@ bool BitcodeReader::ParseTypeTable() {
case bitc::TYPE_CODE_METADATA: // METADATA
ResultTy = Type::getMetadataTy(Context);
break;
+ case bitc::TYPE_CODE_X86_MMX: // X86_MMX
+ ResultTy = Type::getX86_MMXTy(Context);
+ break;
case bitc::TYPE_CODE_INTEGER: // INTEGER: [width]
if (Record.size() < 1)
return Error("Invalid Integer type record");
@@ -794,7 +797,7 @@ bool BitcodeReader::ParseMetadata() {
if (NextBitCode == bitc::METADATA_NAMED_NODE) {
LLVM2_7MetadataDetected = true;
} else if (NextBitCode != bitc::METADATA_NAMED_NODE2)
- assert ( 0 && "Inavlid Named Metadata record");
+ assert ( 0 && "Invalid Named Metadata record");
// Read named metadata elements.
unsigned Size = Record.size();
@@ -832,7 +835,8 @@ bool BitcodeReader::ParseMetadata() {
unsigned Size = Record.size();
SmallVector<Value*, 8> Elts;
for (unsigned i = 0; i != Size; i += 2) {
- const Type *Ty = getTypeByID(Record[i], false);
+ const Type *Ty = getTypeByID(Record[i]);
+ if (!Ty) return Error("Invalid METADATA_NODE2 record");
if (Ty->isMetadataTy())
Elts.push_back(MDValueList.getValueFwdRef(Record[i+1]));
else if (!Ty->isVoidTy())
@@ -1081,13 +1085,17 @@ bool BitcodeReader::ParseConstants() {
if (Record.size() >= 4) {
if (Opc == Instruction::Add ||
Opc == Instruction::Sub ||
- Opc == Instruction::Mul) {
+ Opc == Instruction::Mul ||
+ Opc == Instruction::Shl) {
if (Record[3] & (1 << bitc::OBO_NO_SIGNED_WRAP))
Flags |= OverflowingBinaryOperator::NoSignedWrap;
if (Record[3] & (1 << bitc::OBO_NO_UNSIGNED_WRAP))
Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
- } else if (Opc == Instruction::SDiv) {
- if (Record[3] & (1 << bitc::SDIV_EXACT))
+ } else if (Opc == Instruction::SDiv ||
+ Opc == Instruction::UDiv ||
+ Opc == Instruction::LShr ||
+ Opc == Instruction::AShr) {
+ if (Record[3] & (1 << bitc::PEO_EXACT))
Flags |= SDivOperator::IsExact;
}
}
@@ -1167,7 +1175,8 @@ bool BitcodeReader::ParseConstants() {
}
case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval]
const VectorType *RTy = dyn_cast<VectorType>(CurTy);
- const VectorType *OpTy = dyn_cast<VectorType>(getTypeByID(Record[0]));
+ const VectorType *OpTy =
+ dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
if (Record.size() < 4 || RTy == 0 || OpTy == 0)
return Error("Invalid CE_SHUFVEC_EX record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
@@ -1418,11 +1427,13 @@ bool BitcodeReader::ParseModule() {
break;
}
// GLOBALVAR: [pointer type, isconst, initid,
- // linkage, alignment, section, visibility, threadlocal]
+ // linkage, alignment, section, visibility, threadlocal,
+ // unnamed_addr]
case bitc::MODULE_CODE_GLOBALVAR: {
if (Record.size() < 6)
return Error("Invalid MODULE_CODE_GLOBALVAR record");
const Type *Ty = getTypeByID(Record[0]);
+ if (!Ty) return Error("Invalid MODULE_CODE_GLOBALVAR record");
if (!Ty->isPointerTy())
return Error("Global not a pointer type!");
unsigned AddressSpace = cast<PointerType>(Ty)->getAddressSpace();
@@ -1444,6 +1455,10 @@ bool BitcodeReader::ParseModule() {
if (Record.size() > 7)
isThreadLocal = Record[7];
+ bool UnnamedAddr = false;
+ if (Record.size() > 8)
+ UnnamedAddr = Record[8];
+
GlobalVariable *NewGV =
new GlobalVariable(*TheModule, Ty, isConstant, Linkage, 0, "", 0,
isThreadLocal, AddressSpace);
@@ -1452,6 +1467,7 @@ bool BitcodeReader::ParseModule() {
NewGV->setSection(Section);
NewGV->setVisibility(Visibility);
NewGV->setThreadLocal(isThreadLocal);
+ NewGV->setUnnamedAddr(UnnamedAddr);
ValueList.push_back(NewGV);
@@ -1461,11 +1477,12 @@ bool BitcodeReader::ParseModule() {
break;
}
// FUNCTION: [type, callingconv, isproto, linkage, paramattr,
- // alignment, section, visibility, gc]
+ // alignment, section, visibility, gc, unnamed_addr]
case bitc::MODULE_CODE_FUNCTION: {
if (Record.size() < 8)
return Error("Invalid MODULE_CODE_FUNCTION record");
const Type *Ty = getTypeByID(Record[0]);
+ if (!Ty) return Error("Invalid MODULE_CODE_FUNCTION record");
if (!Ty->isPointerTy())
return Error("Function not a pointer type!");
const FunctionType *FTy =
@@ -1493,6 +1510,10 @@ bool BitcodeReader::ParseModule() {
return Error("Invalid GC ID");
Func->setGC(GCTable[Record[8]-1].c_str());
}
+ bool UnnamedAddr = false;
+ if (Record.size() > 9)
+ UnnamedAddr = Record[9];
+ Func->setUnnamedAddr(UnnamedAddr);
ValueList.push_back(Func);
// If this is a function with a body, remember the prototype we are
@@ -1507,6 +1528,7 @@ bool BitcodeReader::ParseModule() {
if (Record.size() < 3)
return Error("Invalid MODULE_ALIAS record");
const Type *Ty = getTypeByID(Record[0]);
+ if (!Ty) return Error("Invalid MODULE_ALIAS record");
if (!Ty->isPointerTy())
return Error("Function not a pointer type!");
@@ -1598,6 +1620,112 @@ bool BitcodeReader::ParseBitcodeInto(Module *M) {
return false;
}
+bool BitcodeReader::ParseModuleTriple(std::string &Triple) {
+ if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+ return Error("Malformed block record");
+
+ SmallVector<uint64_t, 64> Record;
+
+ // Read all the records for this module.
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+ if (Code == bitc::END_BLOCK) {
+ if (Stream.ReadBlockEnd())
+ return Error("Error at end of module block");
+
+ return false;
+ }
+
+ if (Code == bitc::ENTER_SUBBLOCK) {
+ switch (Stream.ReadSubBlockID()) {
+ default: // Skip unknown content.
+ if (Stream.SkipBlock())
+ return Error("Malformed block record");
+ break;
+ }
+ continue;
+ }
+
+ if (Code == bitc::DEFINE_ABBREV) {
+ Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read a record.
+ switch (Stream.ReadRecord(Code, Record)) {
+ default: break; // Default behavior, ignore unknown content.
+ case bitc::MODULE_CODE_VERSION: // VERSION: [version#]
+ if (Record.size() < 1)
+ return Error("Malformed MODULE_CODE_VERSION");
+ // Only version #0 is supported so far.
+ if (Record[0] != 0)
+ return Error("Unknown bitstream version!");
+ break;
+ case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
+ std::string S;
+ if (ConvertToString(Record, 0, S))
+ return Error("Invalid MODULE_CODE_TRIPLE record");
+ Triple = S;
+ break;
+ }
+ }
+ Record.clear();
+ }
+
+ return Error("Premature end of bitstream");
+}
+
+bool BitcodeReader::ParseTriple(std::string &Triple) {
+ if (Buffer->getBufferSize() & 3)
+ return Error("Bitcode stream should be a multiple of 4 bytes in length");
+
+ unsigned char *BufPtr = (unsigned char *)Buffer->getBufferStart();
+ unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
+
+ // If we have a wrapper header, parse it and ignore the non-bc file contents.
+ // The magic number is 0x0B17C0DE stored in little endian.
+ if (isBitcodeWrapper(BufPtr, BufEnd))
+ if (SkipBitcodeWrapperHeader(BufPtr, BufEnd))
+ return Error("Invalid bitcode wrapper header");
+
+ StreamFile.init(BufPtr, BufEnd);
+ Stream.init(StreamFile);
+
+ // Sniff for the signature.
+ if (Stream.Read(8) != 'B' ||
+ Stream.Read(8) != 'C' ||
+ Stream.Read(4) != 0x0 ||
+ Stream.Read(4) != 0xC ||
+ Stream.Read(4) != 0xE ||
+ Stream.Read(4) != 0xD)
+ return Error("Invalid bitcode signature");
+
+ // We expect a number of well-defined blocks, though we don't necessarily
+ // need to understand them all.
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+
+ if (Code != bitc::ENTER_SUBBLOCK)
+ return Error("Invalid record at top-level");
+
+ unsigned BlockID = Stream.ReadSubBlockID();
+
+ // We only know the MODULE subblock ID.
+ switch (BlockID) {
+ case bitc::MODULE_BLOCK_ID:
+ if (ParseModuleTriple(Triple))
+ return true;
+ break;
+ default:
+ if (Stream.SkipBlock())
+ return Error("Malformed block record");
+ break;
+ }
+ }
+
+ return false;
+}
+
/// ParseMetadataAttachment - Parse metadata attachments.
bool BitcodeReader::ParseMetadataAttachment() {
if (Stream.EnterSubBlock(bitc::METADATA_ATTACHMENT_ID))
@@ -1776,13 +1904,17 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
if (OpNum < Record.size()) {
if (Opc == Instruction::Add ||
Opc == Instruction::Sub ||
- Opc == Instruction::Mul) {
+ Opc == Instruction::Mul ||
+ Opc == Instruction::Shl) {
if (Record[OpNum] & (1 << bitc::OBO_NO_SIGNED_WRAP))
cast<BinaryOperator>(I)->setHasNoSignedWrap(true);
if (Record[OpNum] & (1 << bitc::OBO_NO_UNSIGNED_WRAP))
cast<BinaryOperator>(I)->setHasNoUnsignedWrap(true);
- } else if (Opc == Instruction::SDiv) {
- if (Record[OpNum] & (1 << bitc::SDIV_EXACT))
+ } else if (Opc == Instruction::SDiv ||
+ Opc == Instruction::UDiv ||
+ Opc == Instruction::LShr ||
+ Opc == Instruction::AShr) {
+ if (Record[OpNum] & (1 << bitc::PEO_EXACT))
cast<BinaryOperator>(I)->setIsExact(true);
}
}
@@ -2535,7 +2667,24 @@ Module *llvm::ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext& Context,
// Read in the entire module, and destroy the BitcodeReader.
if (M->MaterializeAllPermanently(ErrMsg)) {
delete M;
- return NULL;
+ return 0;
}
+
return M;
}
+
+std::string llvm::getBitcodeTargetTriple(MemoryBuffer *Buffer,
+ LLVMContext& Context,
+ std::string *ErrMsg) {
+ BitcodeReader *R = new BitcodeReader(Buffer, Context);
+ // Don't let the BitcodeReader dtor delete 'Buffer'.
+ R->setBufferOwned(false);
+
+ std::string Triple("");
+ if (R->ParseTriple(Triple))
+ if (ErrMsg)
+ *ErrMsg = R->getErrorString();
+
+ delete R;
+ return Triple;
+}
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
index 053121b..f8fc079 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
@@ -212,6 +212,10 @@ public:
/// @brief Main interface to parsing a bitcode buffer.
/// @returns true if an error occurred.
bool ParseBitcodeInto(Module *M);
+
+ /// @brief Cheap mechanism to just extract module triple
+ /// @returns true if an error occurred.
+ bool ParseTriple(std::string &Triple);
private:
const Type *getTypeByID(unsigned ID, bool isTypeTable = false);
Value *getFnValueByID(unsigned ID, const Type *Ty) {
@@ -270,6 +274,7 @@ private:
bool ResolveGlobalAndAliasInits();
bool ParseMetadata();
bool ParseMetadataAttachment();
+ bool ParseModuleTriple(std::string &Triple);
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 7b6fc6c..f8ef8c6 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -26,7 +26,8 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Program.h"
+#include "llvm/Support/Program.h"
+#include <cctype>
using namespace llvm;
/// These are manifest constants used by the bitcode writer. They do not need to
@@ -211,6 +212,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
case Type::LabelTyID: Code = bitc::TYPE_CODE_LABEL; break;
case Type::OpaqueTyID: Code = bitc::TYPE_CODE_OPAQUE; break;
case Type::MetadataTyID: Code = bitc::TYPE_CODE_METADATA; break;
+ case Type::X86_MMXTyID: Code = bitc::TYPE_CODE_X86_MMX; break;
case Type::IntegerTyID:
// INTEGER: [width]
Code = bitc::TYPE_CODE_INTEGER;
@@ -402,7 +404,8 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
unsigned AbbrevToUse = 0;
// GLOBALVAR: [type, isconst, initid,
- // linkage, alignment, section, visibility, threadlocal]
+ // linkage, alignment, section, visibility, threadlocal,
+ // unnamed_addr]
Vals.push_back(VE.getTypeID(GV->getType()));
Vals.push_back(GV->isConstant());
Vals.push_back(GV->isDeclaration() ? 0 :
@@ -411,9 +414,11 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
Vals.push_back(Log2_32(GV->getAlignment())+1);
Vals.push_back(GV->hasSection() ? SectionMap[GV->getSection()] : 0);
if (GV->isThreadLocal() ||
- GV->getVisibility() != GlobalValue::DefaultVisibility) {
+ GV->getVisibility() != GlobalValue::DefaultVisibility ||
+ GV->hasUnnamedAddr()) {
Vals.push_back(getEncodedVisibility(GV));
Vals.push_back(GV->isThreadLocal());
+ Vals.push_back(GV->hasUnnamedAddr());
} else {
AbbrevToUse = SimpleGVarAbbrev;
}
@@ -425,7 +430,7 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
// Emit the function proto information.
for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {
// FUNCTION: [type, callingconv, isproto, paramattr,
- // linkage, alignment, section, visibility, gc]
+ // linkage, alignment, section, visibility, gc, unnamed_addr]
Vals.push_back(VE.getTypeID(F->getType()));
Vals.push_back(F->getCallingConv());
Vals.push_back(F->isDeclaration());
@@ -435,6 +440,7 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
Vals.push_back(F->hasSection() ? SectionMap[F->getSection()] : 0);
Vals.push_back(getEncodedVisibility(F));
Vals.push_back(F->hasGC() ? GCMap[F->getGC()] : 0);
+ Vals.push_back(F->hasUnnamedAddr());
unsigned AbbrevToUse = 0;
Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse);
@@ -464,9 +470,10 @@ static uint64_t GetOptimizationFlags(const Value *V) {
Flags |= 1 << bitc::OBO_NO_SIGNED_WRAP;
if (OBO->hasNoUnsignedWrap())
Flags |= 1 << bitc::OBO_NO_UNSIGNED_WRAP;
- } else if (const SDivOperator *Div = dyn_cast<SDivOperator>(V)) {
- if (Div->isExact())
- Flags |= 1 << bitc::SDIV_EXACT;
+ } else if (const PossiblyExactOperator *PEO =
+ dyn_cast<PossiblyExactOperator>(V)) {
+ if (PEO->isExact())
+ Flags |= 1 << bitc::PEO_EXACT;
}
return Flags;
@@ -1641,9 +1648,12 @@ void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out) {
/// WriteBitcodeToStream - Write the specified module to the specified output
/// stream.
void llvm::WriteBitcodeToStream(const Module *M, BitstreamWriter &Stream) {
- // If this is darwin, emit a file header and trailer if needed.
- bool isDarwin = M->getTargetTriple().find("-darwin") != std::string::npos;
- if (isDarwin)
+ // If this is darwin or another generic macho target, emit a file header and
+ // trailer if needed.
+ bool isMacho =
+ M->getTargetTriple().find("-darwin") != std::string::npos ||
+ M->getTargetTriple().find("-macho") != std::string::npos;
+ if (isMacho)
EmitDarwinBCHeader(Stream, M->getTargetTriple());
// Emit the file header.
@@ -1657,6 +1667,6 @@ void llvm::WriteBitcodeToStream(const Module *M, BitstreamWriter &Stream) {
// Emit the module.
WriteModule(M, Stream);
- if (isDarwin)
+ if (isMacho)
EmitDarwinBCTrailer(Stream, Stream.getBuffer().size());
}
diff --git a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 5a634d6..b520d8f 100644
--- a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -155,16 +155,11 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
E = MRI.liveout_end(); I != E; ++I) {
- unsigned Reg = *I;
- State->UnionGroups(Reg, 0);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- State->UnionGroups(AliasReg, 0);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = ~0u;
+ for (const unsigned *Alias = TRI->getOverlaps(*I);
+ unsigned Reg = *Alias; ++Alias) {
+ State->UnionGroups(Reg, 0);
+ KillIndices[Reg] = BB->size();
+ DefIndices[Reg] = ~0u;
}
}
}
@@ -176,16 +171,11 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
SE = BB->succ_end(); SI != SE; ++SI)
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- State->UnionGroups(Reg, 0);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- State->UnionGroups(AliasReg, 0);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = ~0u;
+ for (const unsigned *Alias = TRI->getOverlaps(*I);
+ unsigned Reg = *Alias; ++Alias) {
+ State->UnionGroups(Reg, 0);
+ KillIndices[Reg] = BB->size();
+ DefIndices[Reg] = ~0u;
}
}
@@ -197,12 +187,8 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
unsigned Reg = *I;
if (!IsReturnBlock && !Pristine.test(Reg)) continue;
- State->UnionGroups(Reg, 0);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
+ for (const unsigned *Alias = TRI->getOverlaps(Reg);
+ unsigned AliasReg = *Alias; ++Alias) {
State->UnionGroups(AliasReg, 0);
KillIndices[AliasReg] = BB->size();
DefIndices[AliasReg] = ~0u;
@@ -435,12 +421,9 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
continue;
// Update def for Reg and aliases.
- DefIndices[Reg] = Count;
- for (const unsigned *Alias = TRI->getAliasSet(Reg);
- *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
+ for (const unsigned *Alias = TRI->getOverlaps(Reg);
+ unsigned AliasReg = *Alias; ++Alias)
DefIndices[AliasReg] = Count;
- }
}
}
diff --git a/contrib/llvm/lib/CodeGen/AllocationOrder.cpp b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
new file mode 100644
index 0000000..20c7625
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
@@ -0,0 +1,68 @@
+//===-- llvm/CodeGen/AllocationOrder.cpp - Allocation Order ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an allocation order for virtual registers.
+//
+// The preferred allocation order for a virtual register depends on allocation
+// hints and target hooks. The AllocationOrder class encapsulates all of that.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AllocationOrder.h"
+#include "VirtRegMap.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+// Compare VirtRegMap::getRegAllocPref().
+AllocationOrder::AllocationOrder(unsigned VirtReg,
+ const VirtRegMap &VRM,
+ const BitVector &ReservedRegs)
+ : Pos(0), Reserved(ReservedRegs) {
+ const TargetRegisterClass *RC = VRM.getRegInfo().getRegClass(VirtReg);
+ std::pair<unsigned, unsigned> HintPair =
+ VRM.getRegInfo().getRegAllocationHint(VirtReg);
+
+ // HintPair.second is a register, phys or virt.
+ Hint = HintPair.second;
+
+ // Translate to physreg, or 0 if not assigned yet.
+ if (TargetRegisterInfo::isVirtualRegister(Hint))
+ Hint = VRM.getPhys(Hint);
+
+ // The remaining allocation order may depend on the hint.
+ tie(Begin, End) = VRM.getTargetRegInfo()
+ .getAllocationOrder(RC, HintPair.first, Hint, VRM.getMachineFunction());
+
+ // Target-dependent hints require resolution.
+ if (HintPair.first)
+ Hint = VRM.getTargetRegInfo().ResolveRegAllocHint(HintPair.first, Hint,
+ VRM.getMachineFunction());
+
+ // The hint must be a valid physreg for allocation.
+ if (Hint && (!TargetRegisterInfo::isPhysicalRegister(Hint) ||
+ !RC->contains(Hint) || ReservedRegs.test(Hint)))
+ Hint = 0;
+}
+
+unsigned AllocationOrder::next() {
+ // First take the hint.
+ if (!Pos) {
+ Pos = Begin;
+ if (Hint)
+ return Hint;
+ }
+ // Then look at the order from TRI.
+ while(Pos != End) {
+ unsigned Reg = *Pos++;
+ if (Reg != Hint && !Reserved.test(Reg))
+ return Reg;
+ }
+ return 0;
+}
diff --git a/contrib/llvm/lib/CodeGen/AllocationOrder.h b/contrib/llvm/lib/CodeGen/AllocationOrder.h
new file mode 100644
index 0000000..3db4b69
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AllocationOrder.h
@@ -0,0 +1,54 @@
+//===-- llvm/CodeGen/AllocationOrder.h - Allocation Order -*- C++ -*-------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an allocation order for virtual registers.
+//
+// The preferred allocation order for a virtual register depends on allocation
+// hints and target hooks. The AllocationOrder class encapsulates all of that.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ALLOCATIONORDER_H
+#define LLVM_CODEGEN_ALLOCATIONORDER_H
+
+namespace llvm {
+
+class BitVector;
+class VirtRegMap;
+
+class AllocationOrder {
+ const unsigned *Begin;
+ const unsigned *End;
+ const unsigned *Pos;
+ const BitVector &Reserved;
+ unsigned Hint;
+public:
+
+ /// AllocationOrder - Create a new AllocationOrder for VirtReg.
+ /// @param VirtReg Virtual register to allocate for.
+ /// @param VRM Virtual register map for function.
+ /// @param ReservedRegs Set of reserved registers as returned by
+ /// TargetRegisterInfo::getReservedRegs().
+ AllocationOrder(unsigned VirtReg,
+ const VirtRegMap &VRM,
+ const BitVector &ReservedRegs);
+
+ /// next - Return the next physical register in the allocation order, or 0.
+ /// It is safe to call next again after it returned 0.
+ /// It will keep returning 0 until rewind() is called.
+ unsigned next();
+
+ /// rewind - Start over from the beginning.
+ void rewind() { Pos = 0; }
+
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/Analysis.cpp b/contrib/llvm/lib/CodeGen/Analysis.cpp
index e3dd646..36638c3 100644
--- a/contrib/llvm/lib/CodeGen/Analysis.cpp
+++ b/contrib/llvm/lib/CodeGen/Analysis.cpp
@@ -19,6 +19,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
@@ -30,7 +31,7 @@ using namespace llvm;
/// of insertvalue or extractvalue indices that identify a member, return
/// the linearized index of the start of the member.
///
-unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
+unsigned llvm::ComputeLinearIndex(const Type *Ty,
const unsigned *Indices,
const unsigned *IndicesEnd,
unsigned CurIndex) {
@@ -45,8 +46,8 @@ unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
EE = STy->element_end();
EI != EE; ++EI) {
if (Indices && *Indices == unsigned(EI - EB))
- return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
- CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
+ return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
+ CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex);
}
return CurIndex;
}
@@ -55,8 +56,8 @@ unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
const Type *EltTy = ATy->getElementType();
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
if (Indices && *Indices == i)
- return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
- CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
+ return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
+ CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex);
}
return CurIndex;
}
@@ -125,7 +126,7 @@ GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
/// processed uses a memory 'm' constraint.
bool
-llvm::hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
+llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
const TargetLowering &TLI) {
for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
InlineAsm::ConstraintInfo &CI = CInfos[i];
@@ -283,3 +284,20 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
return true;
}
+bool llvm::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
+ const TargetLowering &TLI) {
+ const Function *F = DAG.getMachineFunction().getFunction();
+
+ // Conservatively require the attributes of the call to match those of
+ // the return. Ignore noalias because it doesn't affect the call sequence.
+ unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
+ if (CallerRetAttr & ~Attribute::NoAlias)
+ return false;
+
+ // It's not safe to eliminate the sign / zero extension of the return value.
+ if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
+ return false;
+
+ // Check if the only use is a function return node.
+ return TLI.isUsedByReturnOnly(Node);
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index d358ab2..43e8990 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -38,6 +38,7 @@
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/ErrorHandling.h"
@@ -178,16 +179,24 @@ bool AsmPrinter::doInitialization(Module &M) {
if (!M.getModuleInlineAsm().empty()) {
OutStreamer.AddComment("Start of file scope inline assembly");
OutStreamer.AddBlankLine();
- EmitInlineAsm(M.getModuleInlineAsm()+"\n", 0/*no loc cookie*/);
+ EmitInlineAsm(M.getModuleInlineAsm()+"\n");
OutStreamer.AddComment("End of file scope inline assembly");
OutStreamer.AddBlankLine();
}
if (MAI->doesSupportDebugInformation())
DD = new DwarfDebug(this, &M);
-
+
if (MAI->doesSupportExceptionHandling())
- DE = new DwarfException(this);
+ switch (MAI->getExceptionHandlingType()) {
+ default:
+ case ExceptionHandling::DwarfTable:
+ DE = new DwarfTableException(this);
+ break;
+ case ExceptionHandling::DwarfCFI:
+ DE = new DwarfCFIException(this);
+ break;
+ }
return false;
}
@@ -282,8 +291,12 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// Handle common symbols.
if (GVKind.isCommon()) {
+ unsigned Align = 1 << AlignLog;
+ if (!getObjFileLowering().getCommDirectiveSupportsAlignment())
+ Align = 0;
+
// .comm _foo, 42, 4
- OutStreamer.EmitCommonSymbol(GVSym, Size, 1 << AlignLog);
+ OutStreamer.EmitCommonSymbol(GVSym, Size, Align);
return;
}
@@ -301,11 +314,15 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
OutStreamer.EmitLocalCommonSymbol(GVSym, Size);
return;
}
+
+ unsigned Align = 1 << AlignLog;
+ if (!getObjFileLowering().getCommDirectiveSupportsAlignment())
+ Align = 0;
// .local _foo
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Local);
// .comm _foo, 42, 4
- OutStreamer.EmitCommonSymbol(GVSym, Size, 1 << AlignLog);
+ OutStreamer.EmitCommonSymbol(GVSym, Size, Align);
return;
}
@@ -327,6 +344,13 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// Handle thread local data for mach-o which requires us to output an
// additional structure of data and mangle the original symbol so that we
// can reference it later.
+ //
+ // TODO: This should become an "emit thread local global" method on TLOF.
+ // All of this macho specific stuff should be sunk down into TLOFMachO and
+ // stuff like "TLSExtraDataSection" should no longer be part of the parent
+ // TLOF class. This will also make it more obvious that stuff like
+ // MCStreamer::EmitTBSSSymbol is macho specific and only called from macho
+ // specific code.
if (GVKind.isThreadLocal() && MAI->hasMachoTBSSDirective()) {
// Emit the .tbss symbol
MCSymbol *MangSym =
@@ -623,7 +647,7 @@ void AsmPrinter::EmitFunctionBody() {
if (ShouldPrintDebugScopes) {
NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
- DD->beginScope(II);
+ DD->beginInstruction(II);
}
if (isVerbose())
@@ -657,7 +681,7 @@ void AsmPrinter::EmitFunctionBody() {
if (ShouldPrintDebugScopes) {
NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
- DD->endScope(II);
+ DD->endInstruction(II);
}
}
}
@@ -729,7 +753,20 @@ bool AsmPrinter::doFinalization(Module &M) {
for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I)
EmitGlobalVariable(I);
-
+
+ // Emit visibility info for declarations
+ for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ const Function &F = *I;
+ if (!F.isDeclaration())
+ continue;
+ GlobalValue::VisibilityTypes V = F.getVisibility();
+ if (V == GlobalValue::DefaultVisibility)
+ continue;
+
+ MCSymbol *Name = Mang->getSymbol(&F);
+ EmitVisibility(Name, V);
+ }
+
// Finalize debug and EH information.
if (DE) {
{
@@ -905,14 +942,6 @@ void AsmPrinter::EmitConstantPool() {
const Type *Ty = CPE.getType();
Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
-
- // Emit the label with a comment on it.
- if (isVerbose()) {
- OutStreamer.GetCommentOS() << "constant pool ";
- WriteTypeSymbolic(OutStreamer.GetCommentOS(), CPE.getType(),
- MF->getFunction()->getParent());
- OutStreamer.GetCommentOS() << '\n';
- }
OutStreamer.EmitLabel(GetCPISymbol(CPI));
if (CPE.isMachineConstantPoolEntry())
@@ -983,7 +1012,7 @@ void AsmPrinter::EmitJumpTableInfo() {
}
}
- // On some targets (e.g. Darwin) we want to emit two consequtive labels
+ // On some targets (e.g. Darwin) we want to emit two consecutive labels
// before each jump table. The first label is never referenced, but tells
// the assembler and linker the extents of the jump table object. The
// second label is actually referenced by the code.
@@ -1004,6 +1033,7 @@ void AsmPrinter::EmitJumpTableInfo() {
void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned UID) const {
+ assert(MBB && MBB->getNumber() >= 0 && "Invalid basic block");
const MCExpr *Value = 0;
switch (MJTI->getEntryKind()) {
case MachineJumpTableInfo::EK_Inline:
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
index ce4519c..98a1bf2 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
@@ -19,7 +19,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -36,9 +36,8 @@ void AsmPrinter::EmitSLEB128(int Value, const char *Desc) const {
if (isVerbose() && Desc)
OutStreamer.AddComment(Desc);
- if (MAI->hasLEB128() && OutStreamer.hasRawTextSupport()) {
- // FIXME: MCize.
- OutStreamer.EmitRawText("\t.sleb128\t" + Twine(Value));
+ if (MAI->hasLEB128()) {
+ OutStreamer.EmitSLEB128IntValue(Value);
return;
}
@@ -60,10 +59,10 @@ void AsmPrinter::EmitULEB128(unsigned Value, const char *Desc,
unsigned PadTo) const {
if (isVerbose() && Desc)
OutStreamer.AddComment(Desc);
-
- if (MAI->hasLEB128() && PadTo == 0 && OutStreamer.hasRawTextSupport()) {
- // FIXME: MCize.
- OutStreamer.EmitRawText("\t.uleb128\t" + Twine(Value));
+
+ // FIXME: Should we add a PadTo option to the streamer?
+ if (MAI->hasLEB128() && PadTo == 0) {
+ OutStreamer.EmitULEB128IntValue(Value);
return;
}
@@ -157,7 +156,7 @@ void AsmPrinter::EmitReference(const MCSymbol *Sym, unsigned Encoding) const {
const MCExpr *Exp =
TLOF.getExprForDwarfReference(Sym, Mang, MMI, Encoding, OutStreamer);
- OutStreamer.EmitValue(Exp, GetSizeOfEncodedValue(Encoding), /*addrspace*/0);
+ OutStreamer.EmitAbsValue(Exp, GetSizeOfEncodedValue(Encoding));
}
void AsmPrinter::EmitReference(const GlobalValue *GV, unsigned Encoding)const{
@@ -215,8 +214,8 @@ void AsmPrinter::EmitFrameMoves(const std::vector<MachineMove> &Moves,
const TargetRegisterInfo *RI = TM.getRegisterInfo();
int stackGrowth = TM.getTargetData()->getPointerSize();
- if (TM.getFrameInfo()->getStackGrowthDirection() !=
- TargetFrameInfo::StackGrowsUp)
+ if (TM.getFrameLowering()->getStackGrowthDirection() !=
+ TargetFrameLowering::StackGrowsUp)
stackGrowth *= -1;
for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
@@ -277,3 +276,43 @@ void AsmPrinter::EmitFrameMoves(const std::vector<MachineMove> &Moves,
}
}
}
+
+/// EmitFrameMoves - Emit frame instructions to describe the layout of the
+/// frame.
+void AsmPrinter::EmitCFIFrameMoves(const std::vector<MachineMove> &Moves) const {
+ const TargetRegisterInfo *RI = TM.getRegisterInfo();
+
+ int stackGrowth = TM.getTargetData()->getPointerSize();
+ if (TM.getFrameLowering()->getStackGrowthDirection() !=
+ TargetFrameLowering::StackGrowsUp)
+ stackGrowth *= -1;
+
+ for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
+ const MachineMove &Move = Moves[i];
+ MCSymbol *Label = Move.getLabel();
+ // Throw out move if the label is invalid.
+ if (Label && !Label->isDefined()) continue; // Not emitted, in dead code.
+
+ const MachineLocation &Dst = Move.getDestination();
+ const MachineLocation &Src = Move.getSource();
+
+ // If advancing cfa.
+ if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) {
+ assert(!Src.isReg() && "Machine move not supported yet.");
+
+ if (Src.getReg() == MachineLocation::VirtualFP) {
+ OutStreamer.EmitCFIDefCfaOffset(-Src.getOffset());
+ } else {
+ assert("Machine move not supported yet");
+ // Reg + Offset
+ }
+ } else if (Src.isReg() && Src.getReg() == MachineLocation::VirtualFP) {
+ assert(Dst.isReg() && "Machine move not supported yet.");
+ OutStreamer.EmitCFIDefCfaRegister(RI->getDwarfRegNum(Dst.getReg(), true));
+ } else {
+ assert(!Dst.isReg() && "Machine move not supported yet.");
+ OutStreamer.EmitCFIOffset(RI->getDwarfRegNum(Src.getReg(), true),
+ Dst.getOffset());
+ }
+ }
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
index df03168..c6166e2 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
@@ -34,15 +34,47 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+namespace {
+ struct SrcMgrDiagInfo {
+ const MDNode *LocInfo;
+ LLVMContext::InlineAsmDiagHandlerTy DiagHandler;
+ void *DiagContext;
+ };
+}
+
+/// SrcMgrDiagHandler - This callback is invoked when the SourceMgr for an
+/// inline asm has an error in it. diagInfo is a pointer to the SrcMgrDiagInfo
+/// struct above.
+static void SrcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) {
+ SrcMgrDiagInfo *DiagInfo = static_cast<SrcMgrDiagInfo *>(diagInfo);
+ assert(DiagInfo && "Diagnostic context not passed down?");
+
+ // If the inline asm had metadata associated with it, pull out a location
+ // cookie corresponding to which line the error occurred on.
+ unsigned LocCookie = 0;
+ if (const MDNode *LocInfo = DiagInfo->LocInfo) {
+ unsigned ErrorLine = Diag.getLineNo()-1;
+ if (ErrorLine >= LocInfo->getNumOperands())
+ ErrorLine = 0;
+
+ if (LocInfo->getNumOperands() != 0)
+ if (const ConstantInt *CI =
+ dyn_cast<ConstantInt>(LocInfo->getOperand(ErrorLine)))
+ LocCookie = CI->getZExtValue();
+ }
+
+ DiagInfo->DiagHandler(Diag, DiagInfo->DiagContext, LocCookie);
+}
+
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
-void AsmPrinter::EmitInlineAsm(StringRef Str, unsigned LocCookie) const {
+void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const {
assert(!Str.empty() && "Can't emit empty inline asm block");
-
+
// Remember if the buffer is nul terminated or not so we can avoid a copy.
bool isNullTerminated = Str.back() == 0;
if (isNullTerminated)
Str = Str.substr(0, Str.size()-1);
-
+
// If the output streamer is actually a .s file, just emit the blob textually.
// This is useful in case the asm parser doesn't handle something but the
// system assembler does.
@@ -50,18 +82,23 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, unsigned LocCookie) const {
OutStreamer.EmitRawText(Str);
return;
}
-
+
SourceMgr SrcMgr;
-
+ SrcMgrDiagInfo DiagInfo;
+
// If the current LLVMContext has an inline asm handler, set it in SourceMgr.
LLVMContext &LLVMCtx = MMI->getModule()->getContext();
bool HasDiagHandler = false;
- if (void *DiagHandler = LLVMCtx.getInlineAsmDiagnosticHandler()) {
- SrcMgr.setDiagHandler((SourceMgr::DiagHandlerTy)(intptr_t)DiagHandler,
- LLVMCtx.getInlineAsmDiagnosticContext(), LocCookie);
+ if (LLVMCtx.getInlineAsmDiagnosticHandler() != 0) {
+ // If the source manager has an issue, we arrange for SrcMgrDiagHandler
+ // to be invoked, getting DiagInfo passed into it.
+ DiagInfo.LocInfo = LocMDNode;
+ DiagInfo.DiagHandler = LLVMCtx.getInlineAsmDiagnosticHandler();
+ DiagInfo.DiagContext = LLVMCtx.getInlineAsmDiagnosticContext();
+ SrcMgr.setDiagHandler(SrcMgrDiagHandler, &DiagInfo);
HasDiagHandler = true;
}
-
+
MemoryBuffer *Buffer;
if (isNullTerminated)
Buffer = MemoryBuffer::getMemBuffer(Str, "<inline asm>");
@@ -70,7 +107,7 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, unsigned LocCookie) const {
// Tell SrcMgr about this buffer, it takes ownership of the buffer.
SrcMgr.AddNewSourceBuffer(Buffer, SMLoc());
-
+
OwningPtr<MCAsmParser> Parser(createMCAsmParser(TM.getTarget(), SrcMgr,
OutContext, OutStreamer,
*MAI));
@@ -92,15 +129,15 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, unsigned LocCookie) const {
/// instruction that is an inline asm.
void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
assert(MI->isInlineAsm() && "printInlineAsm only works on inline asms");
-
+
unsigned NumOperands = MI->getNumOperands();
-
+
// Count the number of register definitions to find the asm string.
unsigned NumDefs = 0;
for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
++NumDefs)
assert(NumDefs != NumOperands-2 && "No asm string?");
-
+
assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
// Disassemble the AsmStr, printing out the literal pieces, the operands, etc.
@@ -128,22 +165,23 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
// Get the !srcloc metadata node if we have it, and decode the loc cookie from
// it.
unsigned LocCookie = 0;
+ const MDNode *LocMD = 0;
for (unsigned i = MI->getNumOperands(); i != 0; --i) {
- if (MI->getOperand(i-1).isMetadata())
- if (const MDNode *SrcLoc = MI->getOperand(i-1).getMetadata())
- if (SrcLoc->getNumOperands() != 0)
- if (const ConstantInt *CI =
- dyn_cast<ConstantInt>(SrcLoc->getOperand(0))) {
- LocCookie = CI->getZExtValue();
- break;
- }
+ if (MI->getOperand(i-1).isMetadata() &&
+ (LocMD = MI->getOperand(i-1).getMetadata()) &&
+ LocMD->getNumOperands() != 0) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(LocMD->getOperand(0))) {
+ LocCookie = CI->getZExtValue();
+ break;
+ }
+ }
}
-
+
// Emit the inline asm to a temporary string so we can emit it through
// EmitInlineAsm.
SmallString<256> StringData;
raw_svector_ostream OS(StringData);
-
+
OS << '\t';
// The variant of the current asmprinter.
@@ -151,7 +189,7 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
int CurVariant = -1; // The number of the {.|.|.} region we are in.
const char *LastEmitted = AsmStr; // One past the last character emitted.
-
+
while (*LastEmitted) {
switch (*LastEmitted) {
default: {
@@ -199,18 +237,18 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
++LastEmitted; // consume ')' character.
if (CurVariant == -1)
OS << '}'; // this is gcc's behavior for } outside a variant
- else
+ else
CurVariant = -1;
break;
}
if (Done) break;
-
+
bool HasCurlyBraces = false;
if (*LastEmitted == '{') { // ${variable}
++LastEmitted; // Consume '{' character.
HasCurlyBraces = true;
}
-
+
// If we have ${:foo}, then this is not a real operand reference, it is a
// "magic" string reference, just like in .td files. Arrange to call
// PrintSpecial.
@@ -221,25 +259,25 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
if (StrEnd == 0)
report_fatal_error("Unterminated ${:foo} operand in inline asm"
" string: '" + Twine(AsmStr) + "'");
-
+
std::string Val(StrStart, StrEnd);
PrintSpecial(MI, OS, Val.c_str());
LastEmitted = StrEnd+1;
break;
}
-
+
const char *IDStart = LastEmitted;
const char *IDEnd = IDStart;
- while (*IDEnd >= '0' && *IDEnd <= '9') ++IDEnd;
-
+ while (*IDEnd >= '0' && *IDEnd <= '9') ++IDEnd;
+
unsigned Val;
if (StringRef(IDStart, IDEnd-IDStart).getAsInteger(10, Val))
report_fatal_error("Bad $ operand number in inline asm string: '" +
Twine(AsmStr) + "'");
LastEmitted = IDEnd;
-
+
char Modifier[2] = { 0, 0 };
-
+
if (HasCurlyBraces) {
// If we have curly braces, check for a modifier character. This
// supports syntax like ${0:u}, which correspond to "%u0" in GCC asm.
@@ -248,25 +286,25 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
if (*LastEmitted == 0)
report_fatal_error("Bad ${:} expression in inline asm string: '" +
Twine(AsmStr) + "'");
-
+
Modifier[0] = *LastEmitted;
++LastEmitted; // Consume modifier character.
}
-
+
if (*LastEmitted != '}')
report_fatal_error("Bad ${} expression in inline asm string: '" +
Twine(AsmStr) + "'");
++LastEmitted; // Consume '}' character.
}
-
+
if (Val >= NumOperands-1)
report_fatal_error("Invalid $ operand number in inline asm string: '" +
Twine(AsmStr) + "'");
-
+
// Okay, we finally have a value number. Ask the target to print this
// operand!
if (CurVariant == -1 || CurVariant == AsmPrinterVariant) {
- unsigned OpNo = 2;
+ unsigned OpNo = InlineAsm::MIOp_FirstOperand;
bool Error = false;
@@ -310,8 +348,8 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
}
}
OS << '\n' << (char)0; // null terminate string.
- EmitInlineAsm(OS.str(), LocCookie);
-
+ EmitInlineAsm(OS.str(), LocMD);
+
// Emit the #NOAPP end marker. This has to happen even if verbose-asm isn't
// enabled, so we use EmitRawText.
if (OutStreamer.hasRawTextSupport())
@@ -335,7 +373,7 @@ void AsmPrinter::PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
} else if (!strcmp(Code, "uid")) {
// Comparing the address of MI isn't sufficient, because machineinstrs may
// be allocated to the same address across functions.
-
+
// If this is a new LastFn instruction, bump the counter.
if (LastMI != MI || LastFn != getFunctionNumber()) {
++Counter;
@@ -349,7 +387,7 @@ void AsmPrinter::PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
Msg << "Unknown special formatter '" << Code
<< "' for machine instr: " << *MI;
report_fatal_error(Msg.str());
- }
+ }
}
/// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
new file mode 100644
index 0000000..68be2ee
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
@@ -0,0 +1,138 @@
+//===-- CodeGen/AsmPrinter/DwarfException.cpp - Dwarf Exception Impl ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing DWARF exception info into asm files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DwarfException.h"
+#include "llvm/Module.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLocation.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+using namespace llvm;
+
+DwarfCFIException::DwarfCFIException(AsmPrinter *A)
+ : DwarfException(A),
+ shouldEmitTable(false), shouldEmitMoves(false), shouldEmitTableModule(false)
+ {}
+
+DwarfCFIException::~DwarfCFIException() {}
+
+/// EndModule - Emit all exception information that should come after the
+/// content.
+void DwarfCFIException::EndModule() {
+ if (!Asm->MAI->isExceptionHandlingDwarf())
+ return;
+
+ if (!shouldEmitTableModule)
+ return;
+
+ const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
+ unsigned PerEncoding = TLOF.getPersonalityEncoding();
+
+ // Begin eh frame section.
+ Asm->OutStreamer.SwitchSection(TLOF.getEHFrameSection());
+
+ // Emit references to all used personality functions
+ const std::vector<const Function*> &Personalities = MMI->getPersonalities();
+ for (size_t i = 0, e = Personalities.size(); i != e; ++i) {
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("personality", i));
+ Asm->EmitReference(Personalities[i], PerEncoding);
+ }
+}
+
+/// BeginFunction - Gather pre-function exception information. Assumes it's
+/// being emitted immediately after the function entry point.
+void DwarfCFIException::BeginFunction(const MachineFunction *MF) {
+ shouldEmitTable = shouldEmitMoves = false;
+
+ // If any landing pads survive, we need an EH table.
+ shouldEmitTable = !MMI->getLandingPads().empty();
+
+ // See if we need frame move info.
+ shouldEmitMoves =
+ !Asm->MF->getFunction()->doesNotThrow() || UnwindTablesMandatory;
+
+ if (shouldEmitMoves || shouldEmitTable)
+ // Assumes in correct section after the entry point.
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_begin",
+ Asm->getFunctionNumber()));
+
+ shouldEmitTableModule |= shouldEmitTable;
+
+ if (shouldEmitMoves) {
+ const TargetFrameLowering *TFL = Asm->TM.getFrameLowering();
+ Asm->OutStreamer.EmitCFIStartProc();
+
+ // Indicate locations of general callee saved registers in frame.
+ std::vector<MachineMove> Moves;
+ TFL->getInitialFrameState(Moves);
+ Asm->EmitCFIFrameMoves(Moves);
+ Asm->EmitCFIFrameMoves(MMI->getFrameMoves());
+ }
+
+ if (!shouldEmitTable)
+ return;
+
+ const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
+
+ // Provide LSDA information.
+ unsigned LSDAEncoding = TLOF.getLSDAEncoding();
+ if (LSDAEncoding != dwarf::DW_EH_PE_omit)
+ Asm->OutStreamer.EmitCFILsda(Asm->GetTempSymbol("exception",
+ Asm->getFunctionNumber()),
+ LSDAEncoding);
+
+ // Indicate personality routine, if any.
+ unsigned PerEncoding = TLOF.getPersonalityEncoding();
+ if (PerEncoding != dwarf::DW_EH_PE_omit &&
+ MMI->getPersonalities()[MMI->getPersonalityIndex()])
+ Asm->OutStreamer.EmitCFIPersonality(Asm->GetTempSymbol("personality",
+ MMI->getPersonalityIndex()),
+ PerEncoding);
+}
+
+/// EndFunction - Gather and emit post-function exception information.
+///
+void DwarfCFIException::EndFunction() {
+ if (!shouldEmitMoves && !shouldEmitTable) return;
+
+ if (shouldEmitMoves)
+ Asm->OutStreamer.EmitCFIEndProc();
+
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_end",
+ Asm->getFunctionNumber()));
+
+ // Map all labels and get rid of any dead landing pads.
+ MMI->TidyLandingPads();
+
+ if (shouldEmitTable)
+ EmitExceptionTable();
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index c886a5e..5106d57 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -16,6 +16,7 @@
#include "DIE.h"
#include "llvm/Constants.h"
#include "llvm/Module.h"
+#include "llvm/Instructions.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -24,12 +25,13 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
@@ -38,7 +40,7 @@
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/Timer.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
using namespace llvm;
static cl::opt<bool> PrintDbgScope("print-dbgscope", cl::Hidden,
@@ -52,6 +54,10 @@ static cl::opt<bool> UnknownLocations("use-unknown-locations", cl::Hidden,
cl::desc("Make an absense of debug location information explicit."),
cl::init(false));
+#ifndef NDEBUG
+STATISTIC(BlocksWithoutLineNo, "Number of blocks without any line number");
+#endif
+
namespace {
const char *DWARFGroupName = "DWARF Emission";
const char *DbgTimerName = "DWARF Debug Writer";
@@ -507,8 +513,9 @@ void DwarfDebug::addSourceLine(DIE *Die, DIVariable V) {
return;
unsigned Line = V.getLineNumber();
- unsigned FileID = GetOrCreateSourceID(V.getContext().getDirectory(),
- V.getContext().getFilename());
+ if (Line == 0)
+ return;
+ unsigned FileID = GetOrCreateSourceID(V.getContext().getFilename());
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -522,8 +529,9 @@ void DwarfDebug::addSourceLine(DIE *Die, DIGlobalVariable G) {
return;
unsigned Line = G.getLineNumber();
- unsigned FileID = GetOrCreateSourceID(G.getContext().getDirectory(),
- G.getContext().getFilename());
+ if (Line == 0)
+ return;
+ unsigned FileID = GetOrCreateSourceID(G.getContext().getFilename());
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -542,8 +550,7 @@ void DwarfDebug::addSourceLine(DIE *Die, DISubprogram SP) {
unsigned Line = SP.getLineNumber();
if (!SP.getContext().Verify())
return;
- unsigned FileID = GetOrCreateSourceID(SP.getDirectory(),
- SP.getFilename());
+ unsigned FileID = GetOrCreateSourceID(SP.getFilename());
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -557,10 +564,9 @@ void DwarfDebug::addSourceLine(DIE *Die, DIType Ty) {
return;
unsigned Line = Ty.getLineNumber();
- if (!Ty.getContext().Verify())
+ if (Line == 0 || !Ty.getContext().Verify())
return;
- unsigned FileID = GetOrCreateSourceID(Ty.getContext().getDirectory(),
- Ty.getContext().getFilename());
+ unsigned FileID = GetOrCreateSourceID(Ty.getFilename());
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -574,10 +580,11 @@ void DwarfDebug::addSourceLine(DIE *Die, DINameSpace NS) {
return;
unsigned Line = NS.getLineNumber();
+ if (Line == 0)
+ return;
StringRef FN = NS.getFilename();
- StringRef Dir = NS.getDirectory();
- unsigned FileID = GetOrCreateSourceID(Dir, FN);
+ unsigned FileID = GetOrCreateSourceID(FN);
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -588,8 +595,8 @@ void DwarfDebug::addSourceLine(DIE *Die, DINameSpace NS) {
void DwarfDebug::addVariableAddress(DbgVariable *&DV, DIE *Die, int64_t FI) {
MachineLocation Location;
unsigned FrameReg;
- const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
- int Offset = RI->getFrameIndexReference(*Asm->MF, FI, FrameReg);
+ const TargetFrameLowering *TFI = Asm->TM.getFrameLowering();
+ int Offset = TFI->getFrameIndexReference(*Asm->MF, FI, FrameReg);
Location.set(FrameReg, Offset);
if (DV->variableHasComplexAddress())
@@ -620,8 +627,7 @@ void DwarfDebug::addComplexAddress(DbgVariable *&DV, DIE *Die,
if (Reg < 32) {
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_reg0 + Reg);
} else {
- Reg = Reg - dwarf::DW_OP_reg0;
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_breg0 + Reg);
+ addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_regx);
addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
}
} else {
@@ -760,8 +766,7 @@ void DwarfDebug::addBlockByrefAddress(DbgVariable *&DV, DIE *Die,
if (Reg < 32)
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_reg0 + Reg);
else {
- Reg = Reg - dwarf::DW_OP_reg0;
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_breg0 + Reg);
+ addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_regx);
addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
}
} else {
@@ -812,6 +817,15 @@ void DwarfDebug::addAddress(DIE *Die, unsigned Attribute,
unsigned Reg = RI->getDwarfRegNum(Location.getReg(), false);
DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+ if (RI->getFrameRegister(*Asm->MF) == Location.getReg()
+ && Location.getOffset()) {
+ // If variable offset is based in frame register then use fbreg.
+ addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_fbreg);
+ addSInt(Block, 0, dwarf::DW_FORM_sdata, Location.getOffset());
+ addBlock(Die, Attribute, 0, Block);
+ return;
+ }
+
if (Location.isReg()) {
if (Reg < 32) {
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_reg0 + Reg);
@@ -834,35 +848,28 @@ void DwarfDebug::addAddress(DIE *Die, unsigned Attribute,
}
/// addRegisterAddress - Add register location entry in variable DIE.
-bool DwarfDebug::addRegisterAddress(DIE *Die, const MCSymbol *VS,
- const MachineOperand &MO) {
+bool DwarfDebug::addRegisterAddress(DIE *Die, const MachineOperand &MO) {
assert (MO.isReg() && "Invalid machine operand!");
if (!MO.getReg())
return false;
MachineLocation Location;
Location.set(MO.getReg());
addAddress(Die, dwarf::DW_AT_location, Location);
- if (VS)
- addLabel(Die, dwarf::DW_AT_start_scope, dwarf::DW_FORM_addr, VS);
return true;
}
/// addConstantValue - Add constant value entry in variable DIE.
-bool DwarfDebug::addConstantValue(DIE *Die, const MCSymbol *VS,
- const MachineOperand &MO) {
+bool DwarfDebug::addConstantValue(DIE *Die, const MachineOperand &MO) {
assert (MO.isImm() && "Invalid machine operand!");
DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
unsigned Imm = MO.getImm();
addUInt(Block, 0, dwarf::DW_FORM_udata, Imm);
addBlock(Die, dwarf::DW_AT_const_value, 0, Block);
- if (VS)
- addLabel(Die, dwarf::DW_AT_start_scope, dwarf::DW_FORM_addr, VS);
return true;
}
/// addConstantFPValue - Add constant value entry in variable DIE.
-bool DwarfDebug::addConstantFPValue(DIE *Die, const MCSymbol *VS,
- const MachineOperand &MO) {
+bool DwarfDebug::addConstantFPValue(DIE *Die, const MachineOperand &MO) {
assert (MO.isFPImm() && "Invalid machine operand!");
DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
APFloat FPImm = MO.getFPImm()->getValueAPF();
@@ -883,11 +890,42 @@ bool DwarfDebug::addConstantFPValue(DIE *Die, const MCSymbol *VS,
(unsigned char)0xFF & FltPtr[Start]);
addBlock(Die, dwarf::DW_AT_const_value, 0, Block);
- if (VS)
- addLabel(Die, dwarf::DW_AT_start_scope, dwarf::DW_FORM_addr, VS);
return true;
}
+/// addConstantValue - Add constant value entry in variable DIE.
+bool DwarfDebug::addConstantValue(DIE *Die, ConstantInt *CI,
+ bool Unsigned) {
+ if (CI->getBitWidth() <= 64) {
+ if (Unsigned)
+ addUInt(Die, dwarf::DW_AT_const_value, dwarf::DW_FORM_udata,
+ CI->getZExtValue());
+ else
+ addSInt(Die, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata,
+ CI->getSExtValue());
+ return true;
+ }
+
+ DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+
+ // Get the raw data form of the large APInt.
+ const APInt Val = CI->getValue();
+ const char *Ptr = (const char*)Val.getRawData();
+
+ int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte.
+ bool LittleEndian = Asm->getTargetData().isLittleEndian();
+ int Incr = (LittleEndian ? 1 : -1);
+ int Start = (LittleEndian ? 0 : NumBytes - 1);
+ int Stop = (LittleEndian ? NumBytes : -1);
+
+ // Output the constant to DWARF one byte at a time.
+ for (; Start != Stop; Start += Incr)
+ addUInt(Block, 0, dwarf::DW_FORM_data1,
+ (unsigned char)0xFF & Ptr[Start]);
+
+ addBlock(Die, dwarf::DW_AT_const_value, 0, Block);
+ return true;
+}
/// addToContextOwner - Add Die into the list of its context owner's children.
void DwarfDebug::addToContextOwner(DIE *Die, DIDescriptor Context) {
@@ -898,8 +936,7 @@ void DwarfDebug::addToContextOwner(DIE *Die, DIDescriptor Context) {
DIE *ContextDIE = getOrCreateNameSpace(DINameSpace(Context));
ContextDIE->addChild(Die);
} else if (Context.isSubprogram()) {
- DIE *ContextDIE = createSubprogramDIE(DISubprogram(Context),
- /*MakeDecl=*/false);
+ DIE *ContextDIE = createSubprogramDIE(DISubprogram(Context));
ContextDIE->addChild(Die);
} else if (DIE *ContextDIE = getCompileUnit(Context)->getDIE(Context))
ContextDIE->addChild(Die);
@@ -1033,16 +1070,23 @@ void DwarfDebug::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
DIDescriptor RTy = Elements.getElement(0);
addType(&Buffer, DIType(RTy));
- // Add prototype flag.
- addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
-
+ bool isPrototyped = true;
// Add arguments.
for (unsigned i = 1, N = Elements.getNumElements(); i < N; ++i) {
- DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
DIDescriptor Ty = Elements.getElement(i);
- addType(Arg, DIType(Ty));
- Buffer.addChild(Arg);
+ if (Ty.isUnspecifiedParameter()) {
+ DIE *Arg = new DIE(dwarf::DW_TAG_unspecified_parameters);
+ Buffer.addChild(Arg);
+ isPrototyped = false;
+ } else {
+ DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
+ addType(Arg, DIType(Ty));
+ Buffer.addChild(Arg);
+ }
}
+ // Add prototype flag.
+ if (isPrototyped)
+ addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
}
break;
case dwarf::DW_TAG_structure_type:
@@ -1060,8 +1104,21 @@ void DwarfDebug::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
for (unsigned i = 0; i < N; ++i) {
DIDescriptor Element = Elements.getElement(i);
DIE *ElemDie = NULL;
- if (Element.isSubprogram())
+ if (Element.isSubprogram()) {
+ DISubprogram SP(Element);
ElemDie = createSubprogramDIE(DISubprogram(Element));
+ if (SP.isProtected())
+ addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+ dwarf::DW_ACCESS_protected);
+ else if (SP.isPrivate())
+ addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+ dwarf::DW_ACCESS_private);
+ else
+ addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+ dwarf::DW_ACCESS_public);
+ if (SP.isExplicit())
+ addUInt(ElemDie, dwarf::DW_AT_explicit, dwarf::DW_FORM_flag, 1);
+ }
else if (Element.isVariable()) {
DIVariable DV(Element);
ElemDie = new DIE(dwarf::DW_TAG_variable);
@@ -1094,6 +1151,21 @@ void DwarfDebug::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
DIDescriptor Context = CTy.getContext();
addToContextOwner(&Buffer, Context);
}
+
+ if (Tag == dwarf::DW_TAG_class_type) {
+ DIArray TParams = CTy.getTemplateParams();
+ unsigned N = TParams.getNumElements();
+ // Add template parameters.
+ for (unsigned i = 0; i < N; ++i) {
+ DIDescriptor Element = TParams.getElement(i);
+ if (Element.isTemplateTypeParameter())
+ Buffer.addChild(getOrCreateTemplateTypeParameterDIE(
+ DITemplateTypeParameter(Element)));
+ else if (Element.isTemplateValueParameter())
+ Buffer.addChild(getOrCreateTemplateValueParameterDIE(
+ DITemplateValueParameter(Element)));
+ }
+ }
break;
}
default:
@@ -1124,6 +1196,38 @@ void DwarfDebug::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
}
}
+/// getOrCreateTemplateTypeParameterDIE - Find existing DIE or create new DIE
+/// for the given DITemplateTypeParameter.
+DIE *
+DwarfDebug::getOrCreateTemplateTypeParameterDIE(DITemplateTypeParameter TP) {
+ CompileUnit *TypeCU = getCompileUnit(TP);
+ DIE *ParamDIE = TypeCU->getDIE(TP);
+ if (ParamDIE)
+ return ParamDIE;
+
+ ParamDIE = new DIE(dwarf::DW_TAG_template_type_parameter);
+ addType(ParamDIE, TP.getType());
+ addString(ParamDIE, dwarf::DW_AT_name, dwarf::DW_FORM_string, TP.getName());
+ return ParamDIE;
+}
+
+/// getOrCreateTemplateValueParameterDIE - Find existing DIE or create new DIE
+/// for the given DITemplateValueParameter.
+DIE *
+DwarfDebug::getOrCreateTemplateValueParameterDIE(DITemplateValueParameter TPV) {
+ CompileUnit *TVCU = getCompileUnit(TPV);
+ DIE *ParamDIE = TVCU->getDIE(TPV);
+ if (ParamDIE)
+ return ParamDIE;
+
+ ParamDIE = new DIE(dwarf::DW_TAG_template_value_parameter);
+ addType(ParamDIE, TPV.getType());
+ addString(ParamDIE, dwarf::DW_AT_name, dwarf::DW_FORM_string, TPV.getName());
+ addUInt(ParamDIE, dwarf::DW_AT_const_value, dwarf::DW_FORM_udata,
+ TPV.getValue());
+ return ParamDIE;
+}
+
/// constructSubrangeDIE - Construct subrange DIE from DISubrange.
void DwarfDebug::constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy){
int64_t L = SR.getLo();
@@ -1258,7 +1362,8 @@ DIE *DwarfDebug::createMemberDIE(DIDerivedType DT) {
else if (DT.isPrivate())
addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
dwarf::DW_ACCESS_private);
- else if (DT.getTag() == dwarf::DW_TAG_inheritance)
+ // Otherwise C++ member and base classes are considered public.
+ else if (DT.getCompileUnit().getLanguage() == dwarf::DW_LANG_C_plus_plus)
addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
dwarf::DW_ACCESS_public);
if (DT.isVirtual())
@@ -1268,7 +1373,7 @@ DIE *DwarfDebug::createMemberDIE(DIDerivedType DT) {
}
/// createSubprogramDIE - Create new DIE using SP.
-DIE *DwarfDebug::createSubprogramDIE(DISubprogram SP, bool MakeDecl) {
+DIE *DwarfDebug::createSubprogramDIE(DISubprogram SP) {
CompileUnit *SPCU = getCompileUnit(SP);
DIE *SPDie = SPCU->getDIE(SP);
if (SPDie)
@@ -1286,10 +1391,7 @@ DIE *DwarfDebug::createSubprogramDIE(DISubprogram SP, bool MakeDecl) {
addSourceLine(SPDie, SP);
- // Add prototyped tag, if C or ObjC.
- unsigned Lang = SP.getCompileUnit().getLanguage();
- if (Lang == dwarf::DW_LANG_C99 || Lang == dwarf::DW_LANG_C89 ||
- Lang == dwarf::DW_LANG_ObjC)
+ if (SP.isPrototyped())
addUInt(SPDie, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
// Add Return Type.
@@ -1307,13 +1409,13 @@ DIE *DwarfDebug::createSubprogramDIE(DISubprogram SP, bool MakeDecl) {
addUInt(SPDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_flag, VK);
DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
- addUInt(Block, 0, dwarf::DW_FORM_data1, SP.getVirtualIndex());
+ addUInt(Block, 0, dwarf::DW_FORM_udata, SP.getVirtualIndex());
addBlock(SPDie, dwarf::DW_AT_vtable_elem_location, 0, Block);
ContainingTypeMap.insert(std::make_pair(SPDie,
SP.getContainingType()));
}
- if (MakeDecl || !SP.isDefinition()) {
+ if (!SP.isDefinition()) {
addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
// Add arguments. Do not add arguments for subprogram definition. They will
@@ -1603,6 +1705,8 @@ DIE *DwarfDebug::constructVariableDIE(DbgVariable *DV, DbgScope *Scope) {
if (Tag == dwarf::DW_TAG_formal_parameter && DV->getType().isArtificial())
addUInt(VariableDie, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
+ else if (DIVariable(DV->getVariable()).isArtificial())
+ addUInt(VariableDie, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
if (Scope->isAbstractScope()) {
DV->setDIE(VariableDie);
@@ -1625,7 +1729,6 @@ DIE *DwarfDebug::constructVariableDIE(DbgVariable *DV, DbgScope *Scope) {
DbgVariableToDbgInstMap.find(DV);
if (DVI != DbgVariableToDbgInstMap.end()) {
const MachineInstr *DVInsn = DVI->second;
- const MCSymbol *DVLabel = findVariableLabel(DV);
bool updated = false;
// FIXME : Handle getNumOperands != 3
if (DVInsn->getNumOperands() == 3) {
@@ -1637,20 +1740,17 @@ DIE *DwarfDebug::constructVariableDIE(DbgVariable *DV, DbgScope *Scope) {
addVariableAddress(DV, VariableDie, DVInsn->getOperand(1).getImm());
updated = true;
} else
- updated = addRegisterAddress(VariableDie, DVLabel, RegOp);
+ updated = addRegisterAddress(VariableDie, RegOp);
}
else if (DVInsn->getOperand(0).isImm())
- updated = addConstantValue(VariableDie, DVLabel, DVInsn->getOperand(0));
+ updated = addConstantValue(VariableDie, DVInsn->getOperand(0));
else if (DVInsn->getOperand(0).isFPImm())
updated =
- addConstantFPValue(VariableDie, DVLabel, DVInsn->getOperand(0));
+ addConstantFPValue(VariableDie, DVInsn->getOperand(0));
} else {
MachineLocation Location = Asm->getDebugValueLocation(DVInsn);
if (Location.getReg()) {
addAddress(VariableDie, dwarf::DW_AT_location, Location);
- if (DVLabel)
- addLabel(VariableDie, dwarf::DW_AT_start_scope, dwarf::DW_FORM_addr,
- DVLabel);
updated = true;
}
}
@@ -1700,6 +1800,16 @@ DIE *DwarfDebug::constructScopeDIE(DbgScope *Scope) {
if (!Scope || !Scope->getScopeNode())
return NULL;
+ SmallVector <DIE *, 8> Children;
+ // Collect lexical scope childrens first.
+ const SmallVector<DbgVariable *, 8> &Variables = Scope->getDbgVariables();
+ for (unsigned i = 0, N = Variables.size(); i < N; ++i)
+ if (DIE *Variable = constructVariableDIE(Variables[i], Scope))
+ Children.push_back(Variable);
+ const SmallVector<DbgScope *, 4> &Scopes = Scope->getScopes();
+ for (unsigned j = 0, M = Scopes.size(); j < M; ++j)
+ if (DIE *Nested = constructScopeDIE(Scopes[j]))
+ Children.push_back(Nested);
DIScope DS(Scope->getScopeNode());
DIE *ScopeDIE = NULL;
if (Scope->getInlinedAt())
@@ -1715,26 +1825,19 @@ DIE *DwarfDebug::constructScopeDIE(DbgScope *Scope) {
else
ScopeDIE = updateSubprogramScopeDIE(DS);
}
- else
+ else {
+ // There is no need to emit empty lexical block DIE.
+ if (Children.empty())
+ return NULL;
ScopeDIE = constructLexicalScopeDIE(Scope);
- if (!ScopeDIE) return NULL;
-
- // Add variables to scope.
- const SmallVector<DbgVariable *, 8> &Variables = Scope->getDbgVariables();
- for (unsigned i = 0, N = Variables.size(); i < N; ++i) {
- DIE *VariableDIE = constructVariableDIE(Variables[i], Scope);
- if (VariableDIE)
- ScopeDIE->addChild(VariableDIE);
}
+
+ if (!ScopeDIE) return NULL;
- // Add nested scopes.
- const SmallVector<DbgScope *, 4> &Scopes = Scope->getScopes();
- for (unsigned j = 0, M = Scopes.size(); j < M; ++j) {
- // Define the Scope debug information entry.
- DIE *NestedDIE = constructScopeDIE(Scopes[j]);
- if (NestedDIE)
- ScopeDIE->addChild(NestedDIE);
- }
+ // Add children
+ for (SmallVector<DIE *, 8>::iterator I = Children.begin(),
+ E = Children.end(); I != E; ++I)
+ ScopeDIE->addChild(*I);
if (DS.isSubprogram())
addPubTypes(DISubprogram(DS));
@@ -1746,37 +1849,21 @@ DIE *DwarfDebug::constructScopeDIE(DbgScope *Scope) {
/// source file names. If none currently exists, create a new id and insert it
/// in the SourceIds map. This can update DirectoryNames and SourceFileNames
/// maps as well.
-unsigned DwarfDebug::GetOrCreateSourceID(StringRef DirName, StringRef FileName){
- unsigned DId;
- assert (DirName.empty() == false && "Invalid directory name!");
- StringMap<unsigned>::iterator DI = DirectoryIdMap.find(DirName);
- if (DI != DirectoryIdMap.end()) {
- DId = DI->getValue();
- } else {
- DId = DirectoryNames.size() + 1;
- DirectoryIdMap[DirName] = DId;
- DirectoryNames.push_back(DirName);
- }
+unsigned DwarfDebug::GetOrCreateSourceID(StringRef FileName){
+ // If FE did not provide a file name, then assume stdin.
+ if (FileName.empty())
+ return GetOrCreateSourceID("<stdin>");
- unsigned FId;
- StringMap<unsigned>::iterator FI = SourceFileIdMap.find(FileName);
- if (FI != SourceFileIdMap.end()) {
- FId = FI->getValue();
- } else {
- FId = SourceFileNames.size() + 1;
- SourceFileIdMap[FileName] = FId;
- SourceFileNames.push_back(FileName);
- }
+ StringMapEntry<unsigned> &Entry = SourceIdMap.GetOrCreateValue(FileName);
+ if (Entry.getValue())
+ return Entry.getValue();
- DenseMap<std::pair<unsigned, unsigned>, unsigned>::iterator SI =
- SourceIdMap.find(std::make_pair(DId, FId));
- if (SI != SourceIdMap.end())
- return SI->second;
+ unsigned SrcId = SourceIdMap.size();
+ Entry.setValue(SrcId);
- unsigned SrcId = SourceIds.size() + 1; // DW_AT_decl_file cannot be 0.
- SourceIdMap[std::make_pair(DId, FId)] = SrcId;
- SourceIds.push_back(std::make_pair(DId, FId));
+ // Print out a .file directive to specify files for .loc directives.
+ Asm->OutStreamer.EmitDwarfFileDirective(SrcId, FileName);
return SrcId;
}
@@ -1802,7 +1889,7 @@ void DwarfDebug::constructCompileUnit(const MDNode *N) {
DICompileUnit DIUnit(N);
StringRef FN = DIUnit.getFilename();
StringRef Dir = DIUnit.getDirectory();
- unsigned ID = GetOrCreateSourceID(Dir, FN);
+ unsigned ID = GetOrCreateSourceID(FN);
DIE *Die = new DIE(dwarf::DW_TAG_compile_unit);
addString(Die, dwarf::DW_AT_producer, dwarf::DW_FORM_string,
@@ -1886,6 +1973,32 @@ static bool isUnsignedDIType(DIType Ty) {
return false;
}
+// Return const exprssion if value is a GEP to access merged global
+// constant. e.g.
+// i8* getelementptr ({ i8, i8, i8, i8 }* @_MergedGlobals, i32 0, i32 0)
+static const ConstantExpr *getMergedGlobalExpr(const Value *V) {
+ const ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(V);
+ if (!CE || CE->getNumOperands() != 3 ||
+ CE->getOpcode() != Instruction::GetElementPtr)
+ return NULL;
+
+ // First operand points to a global value.
+ if (!isa<GlobalValue>(CE->getOperand(0)))
+ return NULL;
+
+ // Second operand is zero.
+ const ConstantInt *CI =
+ dyn_cast_or_null<ConstantInt>(CE->getOperand(1));
+ if (!CI || !CI->isZero())
+ return NULL;
+
+ // Third operand is offset.
+ if (!isa<ConstantInt>(CE->getOperand(2)))
+ return NULL;
+
+ return CE;
+}
+
/// constructGlobalVariableDIE - Construct global variable DIE.
void DwarfDebug::constructGlobalVariableDIE(const MDNode *N) {
DIGlobalVariable GV(N);
@@ -1952,16 +2065,22 @@ void DwarfDebug::constructGlobalVariableDIE(const MDNode *N) {
} else {
addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
}
- } else if (Constant *C = GV.getConstant()) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
- if (isUnsignedDIType(GTy))
- addUInt(VariableDIE, dwarf::DW_AT_const_value, dwarf::DW_FORM_udata,
- CI->getZExtValue());
- else
- addSInt(VariableDIE, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata,
- CI->getSExtValue());
- }
+ } else if (ConstantInt *CI =
+ dyn_cast_or_null<ConstantInt>(GV.getConstant()))
+ addConstantValue(VariableDIE, CI, isUnsignedDIType(GTy));
+ else if (const ConstantExpr *CE = getMergedGlobalExpr(N->getOperand(11))) {
+ // GV is a merged global.
+ DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+ addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr);
+ addLabel(Block, 0, dwarf::DW_FORM_udata,
+ Asm->Mang->getSymbol(cast<GlobalValue>(CE->getOperand(0))));
+ ConstantInt *CII = cast<ConstantInt>(CE->getOperand(2));
+ addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
+ addUInt(Block, 0, dwarf::DW_FORM_udata, CII->getZExtValue());
+ addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
+ addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
}
+
return;
}
@@ -2043,25 +2162,12 @@ void DwarfDebug::beginModule(Module *M) {
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
getOrCreateTypeDIE(DIType(NMD->getOperand(i)));
+ if (NamedMDNode *NMD = M->getNamedMetadata("llvm.dbg.ty"))
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ getOrCreateTypeDIE(DIType(NMD->getOperand(i)));
+
// Prime section data.
SectionMap.insert(Asm->getObjFileLowering().getTextSection());
-
- // Print out .file directives to specify files for .loc directives. These are
- // printed out early so that they precede any .loc directives.
- if (Asm->MAI->hasDotLocAndDotFile()) {
- for (unsigned i = 1, e = getNumSourceIds()+1; i != e; ++i) {
- // Remember source id starts at 1.
- std::pair<unsigned, unsigned> Id = getSourceDirectoryAndFileIds(i);
- // FIXME: don't use sys::path for this! This should not depend on the
- // host.
- sys::Path FullPath(getSourceDirectoryName(Id.first));
- bool AppendOk =
- FullPath.appendComponent(getSourceFileName(Id.second));
- assert(AppendOk && "Could not append filename to directory!");
- AppendOk = false;
- Asm->OutStreamer.EmitDwarfFileDirective(i, FullPath.str());
- }
- }
}
/// endModule - Emit all Dwarf sections that should come after the content.
@@ -2081,8 +2187,7 @@ void DwarfDebug::endModule() {
StringRef FName = SP.getLinkageName();
if (FName.empty())
FName = SP.getName();
- NamedMDNode *NMD =
- M->getNamedMetadata(Twine("llvm.dbg.lv.", getRealLinkageName(FName)));
+ NamedMDNode *NMD = getFnSpecificMDNode(*(MMI->getModule()), FName);
if (!NMD) continue;
unsigned E = NMD->getNumOperands();
if (!E) continue;
@@ -2152,9 +2257,6 @@ void DwarfDebug::endModule() {
// Corresponding abbreviations into a abbrev section.
emitAbbreviations();
- // Emit source line correspondence into a debug line section.
- emitDebugLines();
-
// Emit info into a debug pubnames section.
emitDebugPubNames();
@@ -2242,15 +2344,6 @@ DwarfDebug::collectVariableInfoFromMMITable(const MachineFunction * MF,
}
}
-/// isDbgValueInUndefinedReg - Return true if debug value, encoded by
-/// DBG_VALUE instruction, is in undefined reg.
-static bool isDbgValueInUndefinedReg(const MachineInstr *MI) {
- assert (MI->isDebugValue() && "Invalid DBG_VALUE machine instruction!");
- if (MI->getOperand(0).isReg() && !MI->getOperand(0).getReg())
- return true;
- return false;
-}
-
/// isDbgValueInDefinedReg - Return true if debug value, encoded by
/// DBG_VALUE instruction, is in a defined reg.
static bool isDbgValueInDefinedReg(const MachineInstr *MI) {
@@ -2275,7 +2368,7 @@ DwarfDebug::collectVariableInfo(const MachineFunction *MF,
for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
II != IE; ++II) {
const MachineInstr *MInsn = II;
- if (!MInsn->isDebugValue() || isDbgValueInUndefinedReg(MInsn))
+ if (!MInsn->isDebugValue())
continue;
DbgValues.push_back(MInsn);
}
@@ -2297,19 +2390,18 @@ DwarfDebug::collectVariableInfo(const MachineFunction *MF,
ME = DbgValues.end(); MI != ME; ++MI) {
const MDNode *Var =
(*MI)->getOperand((*MI)->getNumOperands()-1).getMetadata();
- if (Var == DV && isDbgValueInDefinedReg(*MI) &&
+ if (Var == DV &&
!PrevMI->isIdenticalTo(*MI))
MultipleValues.push_back(*MI);
PrevMI = *MI;
}
- DbgScope *Scope = findDbgScope(MInsn);
- bool CurFnArg = false;
+ DbgScope *Scope = NULL;
if (DV.getTag() == dwarf::DW_TAG_arg_variable &&
DISubprogram(DV.getContext()).describes(MF->getFunction()))
- CurFnArg = true;
- if (!Scope && CurFnArg)
Scope = CurrentFnDbgScope;
+ else
+ Scope = findDbgScope(MInsn);
// If variable scope is not found then skip this variable.
if (!Scope)
continue;
@@ -2317,8 +2409,6 @@ DwarfDebug::collectVariableInfo(const MachineFunction *MF,
Processed.insert(DV);
DbgVariable *RegVar = new DbgVariable(DV);
Scope->addVariable(RegVar);
- if (!CurFnArg)
- DbgVariableLabelsMap[RegVar] = getLabelBeforeInsn(MInsn);
if (DbgVariable *AbsVar = findAbstractVariable(DV, MInsn->getDebugLoc())) {
DbgVariableToDbgInstMap[AbsVar] = MInsn;
VarToAbstractVarMap[RegVar] = AbsVar;
@@ -2375,10 +2465,7 @@ DwarfDebug::collectVariableInfo(const MachineFunction *MF,
// Collect info for variables that were optimized out.
const Function *F = MF->getFunction();
- const Module *M = F->getParent();
- if (NamedMDNode *NMD =
- M->getNamedMetadata(Twine("llvm.dbg.lv.",
- getRealLinkageName(F->getName())))) {
+ if (NamedMDNode *NMD = getFnSpecificMDNode(*(F->getParent()), F->getName())) {
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
DIVariable DV(cast<MDNode>(NMD->getOperand(i)));
if (!DV || !Processed.insert(DV))
@@ -2409,8 +2496,8 @@ const MCSymbol *DwarfDebug::getLabelAfterInsn(const MachineInstr *MI) {
return I->second;
}
-/// beginScope - Process beginning of a scope.
-void DwarfDebug::beginScope(const MachineInstr *MI) {
+/// beginInstruction - Process beginning of an instruction.
+void DwarfDebug::beginInstruction(const MachineInstr *MI) {
if (InsnNeedsLabel.count(MI) == 0) {
LabelsBeforeInsn[MI] = PrevLabel;
return;
@@ -2444,8 +2531,8 @@ void DwarfDebug::beginScope(const MachineInstr *MI) {
assert (0 && "Instruction is not processed!");
}
-/// endScope - Process end of a scope.
-void DwarfDebug::endScope(const MachineInstr *MI) {
+/// endInstruction - Process end of an instruction.
+void DwarfDebug::endInstruction(const MachineInstr *MI) {
if (InsnsEndScopeSet.count(MI) != 0) {
// Emit a label if this instruction ends a scope.
MCSymbol *Label = MMI->getContext().CreateTempSymbol();
@@ -2624,6 +2711,10 @@ bool DwarfDebug::extractScopeInformation() {
continue;
}
+ // Ignore DBG_VALUE. It does not contribute any instruction in output.
+ if (MInsn->isDebugValue())
+ continue;
+
if (RangeBeginMI) {
// If we have alread seen a beginning of a instruction range and
// current instruction scope does not match scope of first instruction
@@ -2727,12 +2818,37 @@ static DebugLoc FindFirstDebugLoc(const MachineFunction *MF) {
return DebugLoc();
}
+#ifndef NDEBUG
+/// CheckLineNumbers - Count basicblocks whose instructions do not have any
+/// line number information.
+static void CheckLineNumbers(const MachineFunction *MF) {
+ for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
+ I != E; ++I) {
+ bool FoundLineNo = false;
+ for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
+ II != IE; ++II) {
+ const MachineInstr *MI = II;
+ if (!MI->getDebugLoc().isUnknown()) {
+ FoundLineNo = true;
+ break;
+ }
+ }
+ if (!FoundLineNo && I->size())
+ ++BlocksWithoutLineNo;
+ }
+}
+#endif
+
/// beginFunction - Gather pre-function debug information. Assumes being
/// emitted immediately after the function entry point.
void DwarfDebug::beginFunction(const MachineFunction *MF) {
if (!MMI->hasDebugInfo()) return;
if (!extractScopeInformation()) return;
+#ifndef NDEBUG
+ CheckLineNumbers(MF);
+#endif
+
FunctionBeginSym = Asm->GetTempSymbol("func_begin",
Asm->getFunctionNumber());
// Assumes in correct section after the entry point.
@@ -2775,16 +2891,14 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
DIVariable DV(MI->getOperand(MI->getNumOperands() - 1).getMetadata());
if (!DV.Verify()) continue;
// If DBG_VALUE is for a local variable then it needs a label.
- if (DV.getTag() != dwarf::DW_TAG_arg_variable
- && isDbgValueInUndefinedReg(MI) == false)
+ if (DV.getTag() != dwarf::DW_TAG_arg_variable)
InsnNeedsLabel.insert(MI);
// DBG_VALUE for inlined functions argument needs a label.
else if (!DISubprogram(getDISubprogram(DV.getContext())).
describes(MF->getFunction()))
InsnNeedsLabel.insert(MI);
// DBG_VALUE indicating argument location change needs a label.
- else if (isDbgValueInUndefinedReg(MI) == false
- && !ProcessedArgs.insert(DV))
+ else if (!ProcessedArgs.insert(DV))
InsnNeedsLabel.insert(MI);
} else {
// If location is unknown then instruction needs a location only if
@@ -2820,17 +2934,6 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
SmallPtrSet<const MDNode *, 16> ProcessedVars;
collectVariableInfo(MF, ProcessedVars);
- // Get function line info.
- if (!Lines.empty()) {
- // Get section line info.
- unsigned ID = SectionMap.insert(Asm->getCurrentSection());
- if (SectionSourceLines.size() < ID) SectionSourceLines.resize(ID);
- std::vector<SrcLineInfo> &SectionLineInfos = SectionSourceLines[ID-1];
- // Append the function info to section info.
- SectionLineInfos.insert(SectionLineInfos.end(),
- Lines.begin(), Lines.end());
- }
-
// Construct abstract scopes.
for (SmallVector<DbgScope *, 4>::iterator AI = AbstractScopesList.begin(),
AE = AbstractScopesList.end(); AI != AE; ++AI) {
@@ -2840,10 +2943,8 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
StringRef FName = SP.getLinkageName();
if (FName.empty())
FName = SP.getName();
- const Module *M = MF->getFunction()->getParent();
- if (NamedMDNode *NMD =
- M->getNamedMetadata(Twine("llvm.dbg.lv.",
- getRealLinkageName(FName)))) {
+ if (NamedMDNode *NMD =
+ getFnSpecificMDNode(*(MF->getFunction()->getParent()), FName)) {
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
DIVariable DV(cast<MDNode>(NMD->getOperand(i)));
if (!DV || !ProcessedVars.insert(DV))
@@ -2875,7 +2976,6 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
DbgVariableToFrameIndexMap.clear();
VarToAbstractVarMap.clear();
DbgVariableToDbgInstMap.clear();
- DbgVariableLabelsMap.clear();
DeleteContainerSeconds(DbgScopeMap);
InsnsEndScopeSet.clear();
ConcreteScopes.clear();
@@ -2884,7 +2984,6 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
AbstractVariables.clear();
LabelsBeforeInsn.clear();
LabelsAfterInsn.clear();
- Lines.clear();
PrevLabel = NULL;
}
@@ -2906,15 +3005,6 @@ bool DwarfDebug::findVariableFrameIndex(const DbgVariable *V, int *FI) {
return true;
}
-/// findVariableLabel - Find MCSymbol for the variable.
-const MCSymbol *DwarfDebug::findVariableLabel(const DbgVariable *V) {
- DenseMap<const DbgVariable *, const MCSymbol *>::iterator I
- = DbgVariableLabelsMap.find(V);
- if (I == DbgVariableLabelsMap.end())
- return NULL;
- else return I->second;
-}
-
/// findDbgScope - Find DbgScope for the debug loc attached with an
/// instruction.
DbgScope *DwarfDebug::findDbgScope(const MachineInstr *MInsn) {
@@ -2940,7 +3030,6 @@ DbgScope *DwarfDebug::findDbgScope(const MachineInstr *MInsn) {
/// the source line list.
MCSymbol *DwarfDebug::recordSourceLine(unsigned Line, unsigned Col,
const MDNode *S) {
- StringRef Dir;
StringRef Fn;
unsigned Src = 1;
@@ -2949,25 +3038,26 @@ MCSymbol *DwarfDebug::recordSourceLine(unsigned Line, unsigned Col,
if (Scope.isCompileUnit()) {
DICompileUnit CU(S);
- Dir = CU.getDirectory();
Fn = CU.getFilename();
+ } else if (Scope.isFile()) {
+ DIFile F(S);
+ Fn = F.getFilename();
} else if (Scope.isSubprogram()) {
DISubprogram SP(S);
- Dir = SP.getDirectory();
Fn = SP.getFilename();
} else if (Scope.isLexicalBlock()) {
DILexicalBlock DB(S);
- Dir = DB.getDirectory();
Fn = DB.getFilename();
} else
assert(0 && "Unexpected scope info");
- Src = GetOrCreateSourceID(Dir, Fn);
+ Src = GetOrCreateSourceID(Fn);
}
- MCSymbol *Label = MMI->getContext().CreateTempSymbol();
- Lines.push_back(SrcLineInfo(Line, Col, Src, Label));
+ Asm->OutStreamer.EmitDwarfLocDirective(Src, Line, Col, DWARF2_FLAG_IS_STMT,
+ 0, 0);
+ MCSymbol *Label = MMI->getContext().CreateTempSymbol();
Asm->OutStreamer.EmitLabel(Label);
return Label;
}
@@ -3151,6 +3241,14 @@ void DwarfDebug::emitDIE(DIE *Die) {
Values[i]->EmitValue(Asm, Form);
break;
}
+ case dwarf::DW_AT_accessibility: {
+ if (Asm->isVerbose()) {
+ DIEInteger *V = cast<DIEInteger>(Values[i]);
+ Asm->OutStreamer.AddComment(dwarf::AccessibilityString(V->getValue()));
+ }
+ Values[i]->EmitValue(Asm, Form);
+ break;
+ }
default:
// Emit an attribute using the defined form.
Values[i]->EmitValue(Asm, Form);
@@ -3270,185 +3368,6 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) {
Asm->EmitInt8(1);
}
-/// emitDebugLines - Emit source line information.
-///
-void DwarfDebug::emitDebugLines() {
- // If the target is using .loc/.file, the assembler will be emitting the
- // .debug_line table automatically.
- if (Asm->MAI->hasDotLocAndDotFile())
- return;
-
- // Minimum line delta, thus ranging from -10..(255-10).
- const int MinLineDelta = -(dwarf::DW_LNS_fixed_advance_pc + 1);
- // Maximum line delta, thus ranging from -10..(255-10).
- const int MaxLineDelta = 255 + MinLineDelta;
-
- // Start the dwarf line section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfLineSection());
-
- // Construct the section header.
- Asm->OutStreamer.AddComment("Length of Source Line Info");
- Asm->EmitLabelDifference(Asm->GetTempSymbol("line_end"),
- Asm->GetTempSymbol("line_begin"), 4);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("line_begin"));
-
- Asm->OutStreamer.AddComment("DWARF version number");
- Asm->EmitInt16(dwarf::DWARF_VERSION);
-
- Asm->OutStreamer.AddComment("Prolog Length");
- Asm->EmitLabelDifference(Asm->GetTempSymbol("line_prolog_end"),
- Asm->GetTempSymbol("line_prolog_begin"), 4);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("line_prolog_begin"));
-
- Asm->OutStreamer.AddComment("Minimum Instruction Length");
- Asm->EmitInt8(1);
- Asm->OutStreamer.AddComment("Default is_stmt_start flag");
- Asm->EmitInt8(1);
- Asm->OutStreamer.AddComment("Line Base Value (Special Opcodes)");
- Asm->EmitInt8(MinLineDelta);
- Asm->OutStreamer.AddComment("Line Range Value (Special Opcodes)");
- Asm->EmitInt8(MaxLineDelta);
- Asm->OutStreamer.AddComment("Special Opcode Base");
- Asm->EmitInt8(-MinLineDelta);
-
- // Line number standard opcode encodings argument count
- Asm->OutStreamer.AddComment("DW_LNS_copy arg count");
- Asm->EmitInt8(0);
- Asm->OutStreamer.AddComment("DW_LNS_advance_pc arg count");
- Asm->EmitInt8(1);
- Asm->OutStreamer.AddComment("DW_LNS_advance_line arg count");
- Asm->EmitInt8(1);
- Asm->OutStreamer.AddComment("DW_LNS_set_file arg count");
- Asm->EmitInt8(1);
- Asm->OutStreamer.AddComment("DW_LNS_set_column arg count");
- Asm->EmitInt8(1);
- Asm->OutStreamer.AddComment("DW_LNS_negate_stmt arg count");
- Asm->EmitInt8(0);
- Asm->OutStreamer.AddComment("DW_LNS_set_basic_block arg count");
- Asm->EmitInt8(0);
- Asm->OutStreamer.AddComment("DW_LNS_const_add_pc arg count");
- Asm->EmitInt8(0);
- Asm->OutStreamer.AddComment("DW_LNS_fixed_advance_pc arg count");
- Asm->EmitInt8(1);
-
- // Emit directories.
- for (unsigned DI = 1, DE = getNumSourceDirectories()+1; DI != DE; ++DI) {
- const std::string &Dir = getSourceDirectoryName(DI);
- if (Asm->isVerbose()) Asm->OutStreamer.AddComment("Directory");
- Asm->OutStreamer.EmitBytes(StringRef(Dir.c_str(), Dir.size()+1), 0);
- }
-
- Asm->OutStreamer.AddComment("End of directories");
- Asm->EmitInt8(0);
-
- // Emit files.
- for (unsigned SI = 1, SE = getNumSourceIds()+1; SI != SE; ++SI) {
- // Remember source id starts at 1.
- std::pair<unsigned, unsigned> Id = getSourceDirectoryAndFileIds(SI);
- const std::string &FN = getSourceFileName(Id.second);
- if (Asm->isVerbose()) Asm->OutStreamer.AddComment("Source");
- Asm->OutStreamer.EmitBytes(StringRef(FN.c_str(), FN.size()+1), 0);
-
- Asm->EmitULEB128(Id.first, "Directory #");
- Asm->EmitULEB128(0, "Mod date");
- Asm->EmitULEB128(0, "File size");
- }
-
- Asm->OutStreamer.AddComment("End of files");
- Asm->EmitInt8(0);
-
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("line_prolog_end"));
-
- // A sequence for each text section.
- unsigned SecSrcLinesSize = SectionSourceLines.size();
-
- for (unsigned j = 0; j < SecSrcLinesSize; ++j) {
- // Isolate current sections line info.
- const std::vector<SrcLineInfo> &LineInfos = SectionSourceLines[j];
-
- // Dwarf assumes we start with first line of first source file.
- unsigned Source = 1;
- unsigned Line = 1;
-
- // Construct rows of the address, source, line, column matrix.
- for (unsigned i = 0, N = LineInfos.size(); i < N; ++i) {
- const SrcLineInfo &LineInfo = LineInfos[i];
- MCSymbol *Label = LineInfo.getLabel();
- if (!Label->isDefined()) continue; // Not emitted, in dead code.
-
- if (Asm->isVerbose()) {
- std::pair<unsigned, unsigned> SrcID =
- getSourceDirectoryAndFileIds(LineInfo.getSourceID());
- Asm->OutStreamer.AddComment(Twine(getSourceDirectoryName(SrcID.first)) +
- "/" +
- Twine(getSourceFileName(SrcID.second)) +
- ":" + Twine(LineInfo.getLine()));
- }
-
- // Define the line address.
- Asm->OutStreamer.AddComment("Extended Op");
- Asm->EmitInt8(0);
- Asm->OutStreamer.AddComment("Op size");
- Asm->EmitInt8(Asm->getTargetData().getPointerSize() + 1);
-
- Asm->OutStreamer.AddComment("DW_LNE_set_address");
- Asm->EmitInt8(dwarf::DW_LNE_set_address);
-
- Asm->OutStreamer.AddComment("Location label");
- Asm->OutStreamer.EmitSymbolValue(Label,
- Asm->getTargetData().getPointerSize(),
- 0/*AddrSpace*/);
-
- // If change of source, then switch to the new source.
- if (Source != LineInfo.getSourceID()) {
- Source = LineInfo.getSourceID();
- Asm->OutStreamer.AddComment("DW_LNS_set_file");
- Asm->EmitInt8(dwarf::DW_LNS_set_file);
- Asm->EmitULEB128(Source, "New Source");
- }
-
- // If change of line.
- if (Line != LineInfo.getLine()) {
- // Determine offset.
- int Offset = LineInfo.getLine() - Line;
- int Delta = Offset - MinLineDelta;
-
- // Update line.
- Line = LineInfo.getLine();
-
- // If delta is small enough and in range...
- if (Delta >= 0 && Delta < (MaxLineDelta - 1)) {
- // ... then use fast opcode.
- Asm->OutStreamer.AddComment("Line Delta");
- Asm->EmitInt8(Delta - MinLineDelta);
- } else {
- // ... otherwise use long hand.
- Asm->OutStreamer.AddComment("DW_LNS_advance_line");
- Asm->EmitInt8(dwarf::DW_LNS_advance_line);
- Asm->EmitSLEB128(Offset, "Line Offset");
- Asm->OutStreamer.AddComment("DW_LNS_copy");
- Asm->EmitInt8(dwarf::DW_LNS_copy);
- }
- } else {
- // Copy the previous row (different address or source)
- Asm->OutStreamer.AddComment("DW_LNS_copy");
- Asm->EmitInt8(dwarf::DW_LNS_copy);
- }
- }
-
- emitEndOfLineMatrix(j + 1);
- }
-
- if (SecSrcLinesSize == 0)
- // Because we're emitting a debug_line section, we still need a line
- // table. The linker and friends expect it to exist. If there's nothing to
- // put into it, emit an empty table.
- emitEndOfLineMatrix(1);
-
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("line_end"));
-}
-
/// emitCommonDebugFrame - Emit common frame info into a debug frame section.
///
void DwarfDebug::emitCommonDebugFrame() {
@@ -3456,8 +3375,8 @@ void DwarfDebug::emitCommonDebugFrame() {
return;
int stackGrowth = Asm->getTargetData().getPointerSize();
- if (Asm->TM.getFrameInfo()->getStackGrowthDirection() ==
- TargetFrameInfo::StackGrowsDown)
+ if (Asm->TM.getFrameLowering()->getStackGrowthDirection() ==
+ TargetFrameLowering::StackGrowsDown)
stackGrowth *= -1;
// Start the dwarf frame section.
@@ -3480,10 +3399,11 @@ void DwarfDebug::emitCommonDebugFrame() {
Asm->EmitSLEB128(stackGrowth, "CIE Data Alignment Factor");
Asm->OutStreamer.AddComment("CIE RA Column");
const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = Asm->TM.getFrameLowering();
Asm->EmitInt8(RI->getDwarfRegNum(RI->getRARegister(), false));
std::vector<MachineMove> Moves;
- RI->getInitialFrameState(Moves);
+ TFI->getInitialFrameState(Moves);
Asm->EmitFrameMoves(Moves, 0, false);
@@ -3667,6 +3587,14 @@ void DwarfDebug::emitDebugLoc() {
if (DotDebugLocEntries.empty())
return;
+ for (SmallVector<DotDebugLocEntry, 4>::iterator
+ I = DotDebugLocEntries.begin(), E = DotDebugLocEntries.end();
+ I != E; ++I) {
+ DotDebugLocEntry &Entry = *I;
+ if (I + 1 != DotDebugLocEntries.end())
+ Entry.Merge(I+1);
+ }
+
// Start the dwarf loc section.
Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfLocSection());
@@ -3676,7 +3604,8 @@ void DwarfDebug::emitDebugLoc() {
for (SmallVector<DotDebugLocEntry, 4>::iterator
I = DotDebugLocEntries.begin(), E = DotDebugLocEntries.end();
I != E; ++I, ++index) {
- DotDebugLocEntry Entry = *I;
+ DotDebugLocEntry &Entry = *I;
+ if (Entry.isMerged()) continue;
if (Entry.isEmpty()) {
Asm->OutStreamer.EmitIntValue(0, Size, /*addrspace*/0);
Asm->OutStreamer.EmitIntValue(0, Size, /*addrspace*/0);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
index f0ff3bc..7df0510 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/UniqueVector.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DebugLoc.h"
namespace llvm {
@@ -51,6 +52,8 @@ class DIType;
class DINameSpace;
class DISubrange;
class DICompositeType;
+class DITemplateTypeParameter;
+class DITemplateValueParameter;
//===----------------------------------------------------------------------===//
/// SrcLineInfo - This class is used to record source line correspondence.
@@ -71,6 +74,28 @@ public:
MCSymbol *getLabel() const { return Label; }
};
+/// DotDebugLocEntry - This struct describes location entries emitted in
+/// .debug_loc section.
+typedef struct DotDebugLocEntry {
+ const MCSymbol *Begin;
+ const MCSymbol *End;
+ MachineLocation Loc;
+ bool Merged;
+ DotDebugLocEntry() : Begin(0), End(0), Merged(false) {}
+ DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, MachineLocation &L)
+ : Begin(B), End(E), Loc(L), Merged(false) {}
+ /// Empty entries are also used as a trigger to emit temp label. Such
+ /// labels are referenced is used to find debug_loc offset for a given DIE.
+ bool isEmpty() { return Begin == 0 && End == 0; }
+ bool isMerged() { return Merged; }
+ void Merge(DotDebugLocEntry *Next) {
+ if (!(Begin && Loc == Next->Loc && End == Next->Begin))
+ return;
+ Next->Begin = Begin;
+ Merged = true;
+ }
+} DotDebugLocEntry;
+
class DwarfDebug {
/// Asm - Target of Dwarf emission.
AsmPrinter *Asm;
@@ -93,30 +118,9 @@ class DwarfDebug {
///
std::vector<DIEAbbrev *> Abbreviations;
- /// DirectoryIdMap - Directory name to directory id map.
- ///
- StringMap<unsigned> DirectoryIdMap;
-
- /// DirectoryNames - A list of directory names.
- SmallVector<std::string, 8> DirectoryNames;
-
- /// SourceFileIdMap - Source file name to source file id map.
- ///
- StringMap<unsigned> SourceFileIdMap;
-
- /// SourceFileNames - A list of source file names.
- SmallVector<std::string, 8> SourceFileNames;
-
/// SourceIdMap - Source id map, i.e. pair of directory id and source file
/// id mapped to a unique id.
- DenseMap<std::pair<unsigned, unsigned>, unsigned> SourceIdMap;
-
- /// SourceIds - Reverse map from source id to directory id + file id pair.
- ///
- SmallVector<std::pair<unsigned, unsigned>, 8> SourceIds;
-
- /// Lines - List of source line correspondence.
- std::vector<SrcLineInfo> Lines;
+ StringMap<unsigned> SourceIdMap;
/// DIEBlocks - A list of all the DIEBlocks in use.
std::vector<DIEBlock *> DIEBlocks;
@@ -135,10 +139,6 @@ class DwarfDebug {
///
UniqueVector<const MCSection*> SectionMap;
- /// SectionSourceLines - Tracks line numbers per text section.
- ///
- std::vector<std::vector<SrcLineInfo> > SectionSourceLines;
-
// CurrentFnDbgScope - Top level scope for the current function.
//
DbgScope *CurrentFnDbgScope;
@@ -175,23 +175,6 @@ class DwarfDebug {
/// machine instruction.
DenseMap<const DbgVariable *, const MachineInstr *> DbgVariableToDbgInstMap;
- /// DbgVariableLabelsMap - Maps DbgVariable to corresponding MCSymbol.
- DenseMap<const DbgVariable *, const MCSymbol *> DbgVariableLabelsMap;
-
- /// DotDebugLocEntry - This struct describes location entries emitted in
- /// .debug_loc section.
- typedef struct DotDebugLocEntry {
- const MCSymbol *Begin;
- const MCSymbol *End;
- MachineLocation Loc;
- DotDebugLocEntry() : Begin(0), End(0) {}
- DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E,
- MachineLocation &L) : Begin(B), End(E), Loc(L) {}
- /// Empty entries are also used as a trigger to emit temp label. Such
- /// labels are referenced is used to find debug_loc offset for a given DIE.
- bool isEmpty() { return Begin == 0 && End == 0; }
- } DotDebugLocEntry;
-
/// DotDebugLocEntries - Collection of DotDebugLocEntry.
SmallVector<DotDebugLocEntry, 4> DotDebugLocEntries;
@@ -265,35 +248,10 @@ class DwarfDebug {
DIEInteger *DIEIntegerOne;
private:
-
- /// getSourceDirectoryAndFileIds - Return the directory and file ids that
- /// maps to the source id. Source id starts at 1.
- std::pair<unsigned, unsigned>
- getSourceDirectoryAndFileIds(unsigned SId) const {
- return SourceIds[SId-1];
- }
-
- /// getNumSourceDirectories - Return the number of source directories in the
- /// debug info.
- unsigned getNumSourceDirectories() const {
- return DirectoryNames.size();
- }
-
- /// getSourceDirectoryName - Return the name of the directory corresponding
- /// to the id.
- const std::string &getSourceDirectoryName(unsigned Id) const {
- return DirectoryNames[Id - 1];
- }
-
- /// getSourceFileName - Return the name of the source file corresponding
- /// to the id.
- const std::string &getSourceFileName(unsigned Id) const {
- return SourceFileNames[Id - 1];
- }
/// getNumSourceIds - Return the number of unique source ids.
unsigned getNumSourceIds() const {
- return SourceIds.size();
+ return SourceIdMap.size();
}
/// assignAbbrevNumber - Define a unique number for the abbreviation.
@@ -349,13 +307,14 @@ private:
const MachineLocation &Location);
/// addRegisterAddress - Add register location entry in variable DIE.
- bool addRegisterAddress(DIE *Die, const MCSymbol *VS, const MachineOperand &MO);
+ bool addRegisterAddress(DIE *Die, const MachineOperand &MO);
/// addConstantValue - Add constant value entry in variable DIE.
- bool addConstantValue(DIE *Die, const MCSymbol *VS, const MachineOperand &MO);
+ bool addConstantValue(DIE *Die, const MachineOperand &MO);
+ bool addConstantValue(DIE *Die, ConstantInt *CI, bool Unsigned);
/// addConstantFPValue - Add constant value entry in variable DIE.
- bool addConstantFPValue(DIE *Die, const MCSymbol *VS, const MachineOperand &MO);
+ bool addConstantFPValue(DIE *Die, const MachineOperand &MO);
/// addComplexAddress - Start with the address based on the location provided,
/// and generate the DWARF information necessary to find the actual variable
@@ -393,6 +352,14 @@ private:
/// given DIType.
DIE *getOrCreateTypeDIE(DIType Ty);
+ /// getOrCreateTemplateTypeParameterDIE - Find existing DIE or create new DIE
+ /// for the given DITemplateTypeParameter.
+ DIE *getOrCreateTemplateTypeParameterDIE(DITemplateTypeParameter TP);
+
+ /// getOrCreateTemplateValueParameterDIE - Find existing DIE or create new DIE
+ /// for the given DITemplateValueParameter.
+ DIE *getOrCreateTemplateValueParameterDIE(DITemplateValueParameter TVP);
+
void addPubTypes(DISubprogram SP);
/// constructTypeDIE - Construct basic type die from DIBasicType.
@@ -421,7 +388,7 @@ private:
DIE *createMemberDIE(DIDerivedType DT);
/// createSubprogramDIE - Create new DIE using SP.
- DIE *createSubprogramDIE(DISubprogram SP, bool MakeDecl = false);
+ DIE *createSubprogramDIE(DISubprogram SP);
/// getOrCreateDbgScope - Create DbgScope for the scope.
DbgScope *getOrCreateDbgScope(const MDNode *Scope, const MDNode *InlinedAt);
@@ -481,10 +448,6 @@ private:
///
void emitEndOfLineMatrix(unsigned SectionEnd);
- /// emitDebugLines - Emit source line information.
- ///
- void emitDebugLines();
-
/// emitCommonDebugFrame - Emit common frame info into a debug frame section.
///
void emitCommonDebugFrame();
@@ -543,9 +506,8 @@ private:
/// GetOrCreateSourceID - Look up the source id with the given directory and
/// source file names. If none currently exists, create a new id and insert it
- /// in the SourceIds map. This can update DirectoryNames and SourceFileNames
- /// maps as well.
- unsigned GetOrCreateSourceID(StringRef DirName, StringRef FileName);
+ /// in the SourceIds map.
+ unsigned GetOrCreateSourceID(StringRef FullName);
/// constructCompileUnit - Create new CompileUnit for the given
/// metadata node with tag DW_TAG_compile_unit.
@@ -565,12 +527,6 @@ private:
/// the source line list.
MCSymbol *recordSourceLine(unsigned Line, unsigned Col, const MDNode *Scope);
- /// getSourceLineCount - Return the number of source lines in the debug
- /// info.
- unsigned getSourceLineCount() const {
- return Lines.size();
- }
-
/// recordVariableFrameIndex - Record a variable's index.
void recordVariableFrameIndex(const DbgVariable *V, int Index);
@@ -578,9 +534,6 @@ private:
/// is found. Update FI to hold value of the index.
bool findVariableFrameIndex(const DbgVariable *V, int *FI);
- /// findVariableLabel - Find MCSymbol for the variable.
- const MCSymbol *findVariableLabel(const DbgVariable *V);
-
/// findDbgScope - Find DbgScope for the debug loc attached with an
/// instruction.
DbgScope *findDbgScope(const MachineInstr *MI);
@@ -630,11 +583,11 @@ public:
/// getLabelAfterInsn - Return Label immediately following the instruction.
const MCSymbol *getLabelAfterInsn(const MachineInstr *MI);
- /// beginScope - Process beginning of a scope.
- void beginScope(const MachineInstr *MI);
+ /// beginInstruction - Process beginning of an instruction.
+ void beginInstruction(const MachineInstr *MI);
- /// endScope - Prcess end of a scope.
- void endScope(const MachineInstr *MI);
+ /// endInstruction - Prcess end of an instruction.
+ void endInstruction(const MachineInstr *MI);
};
} // End of namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
index 86a3688..967a278 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
@@ -26,7 +26,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -39,238 +39,10 @@
using namespace llvm;
DwarfException::DwarfException(AsmPrinter *A)
- : Asm(A), MMI(Asm->MMI), shouldEmitTable(false), shouldEmitMoves(false),
- shouldEmitTableModule(false), shouldEmitMovesModule(false) {}
+ : Asm(A), MMI(Asm->MMI) {}
DwarfException::~DwarfException() {}
-/// EmitCIE - Emit a Common Information Entry (CIE). This holds information that
-/// is shared among many Frame Description Entries. There is at least one CIE
-/// in every non-empty .debug_frame section.
-void DwarfException::EmitCIE(const Function *PersonalityFn, unsigned Index) {
- // Size and sign of stack growth.
- int stackGrowth = Asm->getTargetData().getPointerSize();
- if (Asm->TM.getFrameInfo()->getStackGrowthDirection() ==
- TargetFrameInfo::StackGrowsDown)
- stackGrowth *= -1;
-
- const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
-
- // Begin eh frame section.
- Asm->OutStreamer.SwitchSection(TLOF.getEHFrameSection());
-
- MCSymbol *EHFrameSym;
- if (TLOF.isFunctionEHFrameSymbolPrivate())
- EHFrameSym = Asm->GetTempSymbol("EH_frame", Index);
- else
- EHFrameSym = Asm->OutContext.GetOrCreateSymbol(Twine("EH_frame") +
- Twine(Index));
- Asm->OutStreamer.EmitLabel(EHFrameSym);
-
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("section_eh_frame", Index));
-
- // Define base labels.
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_common", Index));
-
- // Define the eh frame length.
- Asm->OutStreamer.AddComment("Length of Common Information Entry");
- Asm->EmitLabelDifference(Asm->GetTempSymbol("eh_frame_common_end", Index),
- Asm->GetTempSymbol("eh_frame_common_begin", Index),
- 4);
-
- // EH frame header.
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_common_begin",Index));
- Asm->OutStreamer.AddComment("CIE Identifier Tag");
- Asm->OutStreamer.EmitIntValue(0, 4/*size*/, 0/*addrspace*/);
- Asm->OutStreamer.AddComment("DW_CIE_VERSION");
- Asm->OutStreamer.EmitIntValue(dwarf::DW_CIE_VERSION, 1/*size*/, 0/*addr*/);
-
- // The personality presence indicates that language specific information will
- // show up in the eh frame. Find out how we are supposed to lower the
- // personality function reference:
-
- unsigned LSDAEncoding = TLOF.getLSDAEncoding();
- unsigned FDEEncoding = TLOF.getFDEEncoding();
- unsigned PerEncoding = TLOF.getPersonalityEncoding();
-
- char Augmentation[6] = { 0 };
- unsigned AugmentationSize = 0;
- char *APtr = Augmentation + 1;
-
- if (PersonalityFn) {
- // There is a personality function.
- *APtr++ = 'P';
- AugmentationSize += 1 + Asm->GetSizeOfEncodedValue(PerEncoding);
- }
-
- if (UsesLSDA[Index]) {
- // An LSDA pointer is in the FDE augmentation.
- *APtr++ = 'L';
- ++AugmentationSize;
- }
-
- if (FDEEncoding != dwarf::DW_EH_PE_absptr) {
- // A non-default pointer encoding for the FDE.
- *APtr++ = 'R';
- ++AugmentationSize;
- }
-
- if (APtr != Augmentation + 1)
- Augmentation[0] = 'z';
-
- Asm->OutStreamer.AddComment("CIE Augmentation");
- Asm->OutStreamer.EmitBytes(StringRef(Augmentation, strlen(Augmentation)+1),0);
-
- // Round out reader.
- Asm->EmitULEB128(1, "CIE Code Alignment Factor");
- Asm->EmitSLEB128(stackGrowth, "CIE Data Alignment Factor");
- Asm->OutStreamer.AddComment("CIE Return Address Column");
-
- const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
- Asm->EmitInt8(RI->getDwarfRegNum(RI->getRARegister(), true));
-
- if (Augmentation[0]) {
- Asm->EmitULEB128(AugmentationSize, "Augmentation Size");
-
- // If there is a personality, we need to indicate the function's location.
- if (PersonalityFn) {
- Asm->EmitEncodingByte(PerEncoding, "Personality");
- Asm->OutStreamer.AddComment("Personality");
- Asm->EmitReference(PersonalityFn, PerEncoding);
- }
- if (UsesLSDA[Index])
- Asm->EmitEncodingByte(LSDAEncoding, "LSDA");
- if (FDEEncoding != dwarf::DW_EH_PE_absptr)
- Asm->EmitEncodingByte(FDEEncoding, "FDE");
- }
-
- // Indicate locations of general callee saved registers in frame.
- std::vector<MachineMove> Moves;
- RI->getInitialFrameState(Moves);
- Asm->EmitFrameMoves(Moves, 0, true);
-
- // On Darwin the linker honors the alignment of eh_frame, which means it must
- // be 8-byte on 64-bit targets to match what gcc does. Otherwise you get
- // holes which confuse readers of eh_frame.
- Asm->EmitAlignment(Asm->getTargetData().getPointerSize() == 4 ? 2 : 3);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_common_end", Index));
-}
-
-/// EmitFDE - Emit the Frame Description Entry (FDE) for the function.
-void DwarfException::EmitFDE(const FunctionEHFrameInfo &EHFrameInfo) {
- assert(!EHFrameInfo.function->hasAvailableExternallyLinkage() &&
- "Should not emit 'available externally' functions at all");
-
- const Function *TheFunc = EHFrameInfo.function;
- const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
-
- unsigned LSDAEncoding = TLOF.getLSDAEncoding();
- unsigned FDEEncoding = TLOF.getFDEEncoding();
-
- Asm->OutStreamer.SwitchSection(TLOF.getEHFrameSection());
-
- // Externally visible entry into the functions eh frame info. If the
- // corresponding function is static, this should not be externally visible.
- if (!TheFunc->hasLocalLinkage() && TLOF.isFunctionEHSymbolGlobal())
- Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,MCSA_Global);
-
- // If corresponding function is weak definition, this should be too.
- if (TheFunc->isWeakForLinker() && Asm->MAI->getWeakDefDirective())
- Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
- MCSA_WeakDefinition);
-
- // If corresponding function is hidden, this should be too.
- if (TheFunc->hasHiddenVisibility())
- if (MCSymbolAttr HiddenAttr = Asm->MAI->getHiddenVisibilityAttr())
- Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
- HiddenAttr);
-
- // If there are no calls then you can't unwind. This may mean we can omit the
- // EH Frame, but some environments do not handle weak absolute symbols. If
- // UnwindTablesMandatory is set we cannot do this optimization; the unwind
- // info is to be available for non-EH uses.
- if (!EHFrameInfo.adjustsStack && !UnwindTablesMandatory &&
- (!TheFunc->isWeakForLinker() ||
- !Asm->MAI->getWeakDefDirective() ||
- TLOF.getSupportsWeakOmittedEHFrame())) {
- Asm->OutStreamer.EmitAssignment(EHFrameInfo.FunctionEHSym,
- MCConstantExpr::Create(0, Asm->OutContext));
- // This name has no connection to the function, so it might get
- // dead-stripped when the function is not, erroneously. Prohibit
- // dead-stripping unconditionally.
- if (Asm->MAI->hasNoDeadStrip())
- Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
- MCSA_NoDeadStrip);
- } else {
- Asm->OutStreamer.EmitLabel(EHFrameInfo.FunctionEHSym);
-
- // EH frame header.
- Asm->OutStreamer.AddComment("Length of Frame Information Entry");
- Asm->EmitLabelDifference(
- Asm->GetTempSymbol("eh_frame_end", EHFrameInfo.Number),
- Asm->GetTempSymbol("eh_frame_begin", EHFrameInfo.Number), 4);
-
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_begin",
- EHFrameInfo.Number));
-
- Asm->OutStreamer.AddComment("FDE CIE offset");
- Asm->EmitLabelDifference(
- Asm->GetTempSymbol("eh_frame_begin", EHFrameInfo.Number),
- Asm->GetTempSymbol("eh_frame_common",
- EHFrameInfo.PersonalityIndex), 4);
-
- MCSymbol *EHFuncBeginSym =
- Asm->GetTempSymbol("eh_func_begin", EHFrameInfo.Number);
-
- Asm->OutStreamer.AddComment("FDE initial location");
- Asm->EmitReference(EHFuncBeginSym, FDEEncoding);
-
- Asm->OutStreamer.AddComment("FDE address range");
- Asm->EmitLabelDifference(Asm->GetTempSymbol("eh_func_end",
- EHFrameInfo.Number),
- EHFuncBeginSym,
- Asm->GetSizeOfEncodedValue(FDEEncoding));
-
- // If there is a personality and landing pads then point to the language
- // specific data area in the exception table.
- if (MMI->getPersonalities()[0] != NULL) {
- unsigned Size = Asm->GetSizeOfEncodedValue(LSDAEncoding);
-
- Asm->EmitULEB128(Size, "Augmentation size");
- Asm->OutStreamer.AddComment("Language Specific Data Area");
- if (EHFrameInfo.hasLandingPads)
- Asm->EmitReference(Asm->GetTempSymbol("exception", EHFrameInfo.Number),
- LSDAEncoding);
- else
- Asm->OutStreamer.EmitIntValue(0, Size/*size*/, 0/*addrspace*/);
-
- } else {
- Asm->EmitULEB128(0, "Augmentation size");
- }
-
- // Indicate locations of function specific callee saved registers in frame.
- Asm->EmitFrameMoves(EHFrameInfo.Moves, EHFuncBeginSym, true);
-
- // On Darwin the linker honors the alignment of eh_frame, which means it
- // must be 8-byte on 64-bit targets to match what gcc does. Otherwise you
- // get holes which confuse readers of eh_frame.
- Asm->EmitAlignment(Asm->getTargetData().getPointerSize() == 4 ? 2 : 3);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_end",
- EHFrameInfo.Number));
-
- // If the function is marked used, this table should be also. We cannot
- // make the mark unconditional in this case, since retaining the table also
- // retains the function in this case, and there is code around that depends
- // on unused functions (calling undefined externals) being dead-stripped to
- // link correctly. Yes, there really is.
- if (MMI->isUsedFunction(EHFrameInfo.function))
- if (Asm->MAI->hasNoDeadStrip())
- Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
- MCSA_NoDeadStrip);
- }
- Asm->OutStreamer.AddBlankLine();
-}
-
/// SharedTypeIds - How many leading type ids two landing pads have in common.
unsigned DwarfException::SharedTypeIds(const LandingPadInfo *L,
const LandingPadInfo *R) {
@@ -422,7 +194,7 @@ bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) {
const MachineOperand &MO = MI->getOperand(I);
if (!MO.isGlobal()) continue;
-
+
const Function *F = dyn_cast<Function>(MO.getGlobal());
if (F == 0) continue;
@@ -430,7 +202,7 @@ bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) {
// Be conservative. If we have more than one function operand for this
// call, then we can't make the assumption that it's the callee and
// not a parameter to the call.
- //
+ //
// FIXME: Determine if there's a way to say that `F' is the callee or
// parameter.
MarkedNoUnwind = false;
@@ -497,8 +269,7 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
// instruction between the previous try-range and this one may throw,
// create a call-site entry with no landing pad for the region between the
// try-ranges.
- if (SawPotentiallyThrowing &&
- Asm->MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf) {
+ if (SawPotentiallyThrowing && Asm->MAI->isExceptionHandlingDwarf()) {
CallSiteEntry Site = { LastLabel, BeginLabel, 0, 0 };
CallSites.push_back(Site);
PreviousIsInvoke = false;
@@ -520,8 +291,7 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
};
// Try to merge with the previous call-site. SJLJ doesn't do this
- if (PreviousIsInvoke &&
- Asm->MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf) {
+ if (PreviousIsInvoke && Asm->MAI->isExceptionHandlingDwarf()) {
CallSiteEntry &Prev = CallSites.back();
if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
// Extend the range of the previous entry.
@@ -531,7 +301,7 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
}
// Otherwise, create a new call-site.
- if (Asm->MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf)
+ if (Asm->MAI->isExceptionHandlingDwarf())
CallSites.push_back(Site);
else {
// SjLj EH must maintain the call sites in the order assigned
@@ -549,8 +319,7 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
// If some instruction between the previous try-range and the end of the
// function may throw, create a call-site entry with no landing pad for the
// region following the try-range.
- if (SawPotentiallyThrowing &&
- Asm->MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf) {
+ if (SawPotentiallyThrowing && Asm->MAI->isExceptionHandlingDwarf()) {
CallSiteEntry Site = { LastLabel, 0, 0, 0 };
CallSites.push_back(Site);
}
@@ -620,7 +389,7 @@ void DwarfException::EmitExceptionTable() {
// Call sites.
bool IsSJLJ = Asm->MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
bool HaveTTData = IsSJLJ ? (!TypeInfos.empty() || !FilterIds.empty()) : true;
-
+
unsigned CallSiteTableLength;
if (IsSJLJ)
CallSiteTableLength = 0;
@@ -628,7 +397,7 @@ void DwarfException::EmitExceptionTable() {
unsigned SiteStartSize = 4; // dwarf::DW_EH_PE_udata4
unsigned SiteLengthSize = 4; // dwarf::DW_EH_PE_udata4
unsigned LandingPadSize = 4; // dwarf::DW_EH_PE_udata4
- CallSiteTableLength =
+ CallSiteTableLength =
CallSites.size() * (SiteStartSize + SiteLengthSize + LandingPadSize);
}
@@ -656,15 +425,15 @@ void DwarfException::EmitExceptionTable() {
// mode, this reference will require a relocation by the dynamic linker.
//
// Because of this, we have a couple of options:
- //
+ //
// 1) If we are in -static mode, we can always use an absolute reference
// from the LSDA, because the static linker will resolve it.
- //
+ //
// 2) Otherwise, if the LSDA section is writable, we can output the direct
// reference to the typeinfo and allow the dynamic linker to relocate
// it. Since it is in a writable section, the dynamic linker won't
// have a problem.
- //
+ //
// 3) Finally, if we're in PIC mode and the LDSA section isn't writable,
// we need to use some form of indirection. For example, on Darwin,
// we can output a statically-relocatable reference to a dyld stub. The
@@ -682,11 +451,14 @@ void DwarfException::EmitExceptionTable() {
}
// Begin the exception table.
- Asm->OutStreamer.SwitchSection(LSDASection);
+ // Sometimes we want not to emit the data into separate section (e.g. ARM
+ // EHABI). In this case LSDASection will be NULL.
+ if (LSDASection)
+ Asm->OutStreamer.SwitchSection(LSDASection);
Asm->EmitAlignment(2);
// Emit the LSDA.
- MCSymbol *GCCETSym =
+ MCSymbol *GCCETSym =
Asm->OutContext.GetOrCreateSymbol(Twine("GCC_except_table")+
Twine(Asm->getFunctionNumber()));
Asm->OutStreamer.EmitLabel(GCCETSym);
@@ -764,7 +536,7 @@ void DwarfException::EmitExceptionTable() {
}
} else {
// DWARF Exception handling
- assert(Asm->MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf);
+ assert(Asm->MAI->isExceptionHandlingDwarf());
// The call-site table is a list of all call sites that may throw an
// exception (including C++ 'throw' statements) in the procedure
@@ -793,23 +565,23 @@ void DwarfException::EmitExceptionTable() {
for (SmallVectorImpl<CallSiteEntry>::const_iterator
I = CallSites.begin(), E = CallSites.end(); I != E; ++I) {
const CallSiteEntry &S = *I;
-
+
MCSymbol *EHFuncBeginSym =
Asm->GetTempSymbol("eh_func_begin", Asm->getFunctionNumber());
-
+
MCSymbol *BeginLabel = S.BeginLabel;
if (BeginLabel == 0)
BeginLabel = EHFuncBeginSym;
MCSymbol *EndLabel = S.EndLabel;
if (EndLabel == 0)
EndLabel = Asm->GetTempSymbol("eh_func_end", Asm->getFunctionNumber());
-
+
// Offset of the call site relative to the previous call site, counted in
// number of 16-byte bundles. The first call site is counted relative to
// the start of the procedure fragment.
Asm->OutStreamer.AddComment("Region start");
Asm->EmitLabelDifference(BeginLabel, EHFuncBeginSym, 4);
-
+
Asm->OutStreamer.AddComment("Region length");
Asm->EmitLabelDifference(EndLabel, BeginLabel, 4);
@@ -834,7 +606,7 @@ void DwarfException::EmitExceptionTable() {
Asm->OutStreamer.AddComment("-- Action Record Table --");
Asm->OutStreamer.AddBlankLine();
}
-
+
for (SmallVectorImpl<ActionEntry>::const_iterator
I = Actions.begin(), E = Actions.end(); I != E; ++I) {
const ActionEntry &Action = *I;
@@ -888,73 +660,17 @@ void DwarfException::EmitExceptionTable() {
/// EndModule - Emit all exception information that should come after the
/// content.
void DwarfException::EndModule() {
- if (Asm->MAI->getExceptionHandlingType() != ExceptionHandling::Dwarf)
- return;
-
- if (!shouldEmitMovesModule && !shouldEmitTableModule)
- return;
-
- const std::vector<const Function*> &Personalities = MMI->getPersonalities();
-
- for (unsigned I = 0, E = Personalities.size(); I < E; ++I)
- EmitCIE(Personalities[I], I);
-
- for (std::vector<FunctionEHFrameInfo>::iterator
- I = EHFrames.begin(), E = EHFrames.end(); I != E; ++I)
- EmitFDE(*I);
+ assert(0 && "Should be implemented");
}
/// BeginFunction - Gather pre-function exception information. Assumes it's
/// being emitted immediately after the function entry point.
void DwarfException::BeginFunction(const MachineFunction *MF) {
- shouldEmitTable = shouldEmitMoves = false;
-
- // If any landing pads survive, we need an EH table.
- shouldEmitTable = !MMI->getLandingPads().empty();
-
- // See if we need frame move info.
- shouldEmitMoves =
- !Asm->MF->getFunction()->doesNotThrow() || UnwindTablesMandatory;
-
- if (shouldEmitMoves || shouldEmitTable)
- // Assumes in correct section after the entry point.
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_begin",
- Asm->getFunctionNumber()));
-
- shouldEmitTableModule |= shouldEmitTable;
- shouldEmitMovesModule |= shouldEmitMoves;
+ assert(0 && "Should be implemented");
}
/// EndFunction - Gather and emit post-function exception information.
///
void DwarfException::EndFunction() {
- if (!shouldEmitMoves && !shouldEmitTable) return;
-
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_end",
- Asm->getFunctionNumber()));
-
- // Record if this personality index uses a landing pad.
- bool HasLandingPad = !MMI->getLandingPads().empty();
- UsesLSDA[MMI->getPersonalityIndex()] |= HasLandingPad;
-
- // Map all labels and get rid of any dead landing pads.
- MMI->TidyLandingPads();
-
- if (HasLandingPad)
- EmitExceptionTable();
-
- const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
- MCSymbol *FunctionEHSym =
- Asm->GetSymbolWithGlobalValueBase(Asm->MF->getFunction(), ".eh",
- TLOF.isFunctionEHFrameSymbolPrivate());
-
- // Save EH frame information
- EHFrames.
- push_back(FunctionEHFrameInfo(FunctionEHSym,
- Asm->getFunctionNumber(),
- MMI->getPersonalityIndex(),
- Asm->MF->getFrameInfo()->adjustsStack(),
- !MMI->getLandingPads().empty(),
- MMI->getFrameMoves(),
- Asm->MF->getFunction()));
+ assert(0 && "Should be implemented");
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
index bc311e6..a172e53 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
@@ -35,60 +35,13 @@ class AsmPrinter;
/// DwarfException - Emits Dwarf exception handling directives.
///
class DwarfException {
+protected:
/// Asm - Target of Dwarf emission.
AsmPrinter *Asm;
/// MMI - Collected machine module information.
MachineModuleInfo *MMI;
- struct FunctionEHFrameInfo {
- MCSymbol *FunctionEHSym; // L_foo.eh
- unsigned Number;
- unsigned PersonalityIndex;
- bool adjustsStack;
- bool hasLandingPads;
- std::vector<MachineMove> Moves;
- const Function *function;
-
- FunctionEHFrameInfo(MCSymbol *EHSym, unsigned Num, unsigned P,
- bool hC, bool hL,
- const std::vector<MachineMove> &M,
- const Function *f):
- FunctionEHSym(EHSym), Number(Num), PersonalityIndex(P),
- adjustsStack(hC), hasLandingPads(hL), Moves(M), function (f) { }
- };
-
- std::vector<FunctionEHFrameInfo> EHFrames;
-
- /// UsesLSDA - Indicates whether an FDE that uses the CIE at the given index
- /// uses an LSDA. If so, then we need to encode that information in the CIE's
- /// augmentation.
- DenseMap<unsigned, bool> UsesLSDA;
-
- /// shouldEmitTable - Per-function flag to indicate if EH tables should
- /// be emitted.
- bool shouldEmitTable;
-
- /// shouldEmitMoves - Per-function flag to indicate if frame moves info
- /// should be emitted.
- bool shouldEmitMoves;
-
- /// shouldEmitTableModule - Per-module flag to indicate if EH tables
- /// should be emitted.
- bool shouldEmitTableModule;
-
- /// shouldEmitFrameModule - Per-module flag to indicate if frame moves
- /// should be emitted.
- bool shouldEmitMovesModule;
-
- /// EmitCIE - Emit a Common Information Entry (CIE). This holds information
- /// that is shared among many Frame Description Entries. There is at least
- /// one CIE in every non-empty .debug_frame section.
- void EmitCIE(const Function *Personality, unsigned Index);
-
- /// EmitFDE - Emit the Frame Description Entry (FDE) for the function.
- void EmitFDE(const FunctionEHFrameInfo &EHFrameInfo);
-
/// EmitExceptionTable - Emit landing pads and actions.
///
/// The general organization of the table is complex, but the basic concepts
@@ -172,18 +125,116 @@ public:
// Main entry points.
//
DwarfException(AsmPrinter *A);
- ~DwarfException();
+ virtual ~DwarfException();
+
+ /// EndModule - Emit all exception information that should come after the
+ /// content.
+ virtual void EndModule();
+
+ /// BeginFunction - Gather pre-function exception information. Assumes being
+ /// emitted immediately after the function entry point.
+ virtual void BeginFunction(const MachineFunction *MF);
+
+ /// EndFunction - Gather and emit post-function exception information.
+ virtual void EndFunction();
+};
+
+class DwarfCFIException : public DwarfException {
+ /// shouldEmitTable - Per-function flag to indicate if EH tables should
+ /// be emitted.
+ bool shouldEmitTable;
+
+ /// shouldEmitMoves - Per-function flag to indicate if frame moves info
+ /// should be emitted.
+ bool shouldEmitMoves;
+
+ /// shouldEmitTableModule - Per-module flag to indicate if EH tables
+ /// should be emitted.
+ bool shouldEmitTableModule;
+public:
+ //===--------------------------------------------------------------------===//
+ // Main entry points.
+ //
+ DwarfCFIException(AsmPrinter *A);
+ virtual ~DwarfCFIException();
+
+ /// EndModule - Emit all exception information that should come after the
+ /// content.
+ virtual void EndModule();
+
+ /// BeginFunction - Gather pre-function exception information. Assumes being
+ /// emitted immediately after the function entry point.
+ virtual void BeginFunction(const MachineFunction *MF);
+
+ /// EndFunction - Gather and emit post-function exception information.
+ virtual void EndFunction();
+};
+
+class DwarfTableException : public DwarfException {
+ /// shouldEmitTable - Per-function flag to indicate if EH tables should
+ /// be emitted.
+ bool shouldEmitTable;
+
+ /// shouldEmitMoves - Per-function flag to indicate if frame moves info
+ /// should be emitted.
+ bool shouldEmitMoves;
+
+ /// shouldEmitTableModule - Per-module flag to indicate if EH tables
+ /// should be emitted.
+ bool shouldEmitTableModule;
+
+ /// shouldEmitMovesModule - Per-module flag to indicate if frame moves
+ /// should be emitted.
+ bool shouldEmitMovesModule;
+
+ struct FunctionEHFrameInfo {
+ MCSymbol *FunctionEHSym; // L_foo.eh
+ unsigned Number;
+ unsigned PersonalityIndex;
+ bool adjustsStack;
+ bool hasLandingPads;
+ std::vector<MachineMove> Moves;
+ const Function *function;
+
+ FunctionEHFrameInfo(MCSymbol *EHSym, unsigned Num, unsigned P,
+ bool hC, bool hL,
+ const std::vector<MachineMove> &M,
+ const Function *f):
+ FunctionEHSym(EHSym), Number(Num), PersonalityIndex(P),
+ adjustsStack(hC), hasLandingPads(hL), Moves(M), function (f) { }
+ };
+
+ std::vector<FunctionEHFrameInfo> EHFrames;
+
+ /// UsesLSDA - Indicates whether an FDE that uses the CIE at the given index
+ /// uses an LSDA. If so, then we need to encode that information in the CIE's
+ /// augmentation.
+ DenseMap<unsigned, bool> UsesLSDA;
+
+ /// EmitCIE - Emit a Common Information Entry (CIE). This holds information
+ /// that is shared among many Frame Description Entries. There is at least
+ /// one CIE in every non-empty .debug_frame section.
+ void EmitCIE(const Function *Personality, unsigned Index);
+
+ /// EmitFDE - Emit the Frame Description Entry (FDE) for the function.
+ void EmitFDE(const FunctionEHFrameInfo &EHFrameInfo);
+public:
+ //===--------------------------------------------------------------------===//
+ // Main entry points.
+ //
+ DwarfTableException(AsmPrinter *A);
+ virtual ~DwarfTableException();
/// EndModule - Emit all exception information that should come after the
/// content.
- void EndModule();
+ virtual void EndModule();
/// BeginFunction - Gather pre-function exception information. Assumes being
/// emitted immediately after the function entry point.
- void BeginFunction(const MachineFunction *MF);
+ virtual void BeginFunction(const MachineFunction *MF);
/// EndFunction - Gather and emit post-function exception information.
- void EndFunction();
+ virtual void EndFunction();
};
} // End of namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfTableException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfTableException.cpp
new file mode 100644
index 0000000..7519011
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfTableException.cpp
@@ -0,0 +1,349 @@
+//===-- CodeGen/AsmPrinter/DwarfTableException.cpp - Dwarf Exception Impl --==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing DWARF exception info into asm files.
+// The implementation emits all the necessary tables "by hands".
+//
+//===----------------------------------------------------------------------===//
+
+#include "DwarfException.h"
+#include "llvm/Module.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLocation.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+using namespace llvm;
+
+DwarfTableException::DwarfTableException(AsmPrinter *A)
+ : DwarfException(A),
+ shouldEmitTable(false), shouldEmitMoves(false),
+ shouldEmitTableModule(false), shouldEmitMovesModule(false) {}
+
+DwarfTableException::~DwarfTableException() {}
+
+/// EmitCIE - Emit a Common Information Entry (CIE). This holds information that
+/// is shared among many Frame Description Entries. There is at least one CIE
+/// in every non-empty .debug_frame section.
+void DwarfTableException::EmitCIE(const Function *PersonalityFn, unsigned Index) {
+ // Size and sign of stack growth.
+ int stackGrowth = Asm->getTargetData().getPointerSize();
+ if (Asm->TM.getFrameLowering()->getStackGrowthDirection() ==
+ TargetFrameLowering::StackGrowsDown)
+ stackGrowth *= -1;
+
+ const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
+
+ // Begin eh frame section.
+ Asm->OutStreamer.SwitchSection(TLOF.getEHFrameSection());
+
+ MCSymbol *EHFrameSym;
+ if (TLOF.isFunctionEHFrameSymbolPrivate())
+ EHFrameSym = Asm->GetTempSymbol("EH_frame", Index);
+ else
+ EHFrameSym = Asm->OutContext.GetOrCreateSymbol(Twine("EH_frame") +
+ Twine(Index));
+ Asm->OutStreamer.EmitLabel(EHFrameSym);
+
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("section_eh_frame", Index));
+
+ // Define base labels.
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_common", Index));
+
+ // Define the eh frame length.
+ Asm->OutStreamer.AddComment("Length of Common Information Entry");
+ Asm->EmitLabelDifference(Asm->GetTempSymbol("eh_frame_common_end", Index),
+ Asm->GetTempSymbol("eh_frame_common_begin", Index),
+ 4);
+
+ // EH frame header.
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_common_begin",Index));
+ Asm->OutStreamer.AddComment("CIE Identifier Tag");
+ Asm->OutStreamer.EmitIntValue(0, 4/*size*/, 0/*addrspace*/);
+ Asm->OutStreamer.AddComment("DW_CIE_VERSION");
+ Asm->OutStreamer.EmitIntValue(dwarf::DW_CIE_VERSION, 1/*size*/, 0/*addr*/);
+
+ // The personality presence indicates that language specific information will
+ // show up in the eh frame. Find out how we are supposed to lower the
+ // personality function reference:
+
+ unsigned LSDAEncoding = TLOF.getLSDAEncoding();
+ unsigned FDEEncoding = TLOF.getFDEEncoding();
+ unsigned PerEncoding = TLOF.getPersonalityEncoding();
+
+ char Augmentation[6] = { 0 };
+ unsigned AugmentationSize = 0;
+ char *APtr = Augmentation + 1;
+
+ if (PersonalityFn) {
+ // There is a personality function.
+ *APtr++ = 'P';
+ AugmentationSize += 1 + Asm->GetSizeOfEncodedValue(PerEncoding);
+ }
+
+ if (UsesLSDA[Index]) {
+ // An LSDA pointer is in the FDE augmentation.
+ *APtr++ = 'L';
+ ++AugmentationSize;
+ }
+
+ if (FDEEncoding != dwarf::DW_EH_PE_absptr) {
+ // A non-default pointer encoding for the FDE.
+ *APtr++ = 'R';
+ ++AugmentationSize;
+ }
+
+ if (APtr != Augmentation + 1)
+ Augmentation[0] = 'z';
+
+ Asm->OutStreamer.AddComment("CIE Augmentation");
+ Asm->OutStreamer.EmitBytes(StringRef(Augmentation, strlen(Augmentation)+1),0);
+
+ // Round out reader.
+ Asm->EmitULEB128(1, "CIE Code Alignment Factor");
+ Asm->EmitSLEB128(stackGrowth, "CIE Data Alignment Factor");
+ Asm->OutStreamer.AddComment("CIE Return Address Column");
+
+ const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = Asm->TM.getFrameLowering();
+ Asm->EmitInt8(RI->getDwarfRegNum(RI->getRARegister(), true));
+
+ if (Augmentation[0]) {
+ Asm->EmitULEB128(AugmentationSize, "Augmentation Size");
+
+ // If there is a personality, we need to indicate the function's location.
+ if (PersonalityFn) {
+ Asm->EmitEncodingByte(PerEncoding, "Personality");
+ Asm->OutStreamer.AddComment("Personality");
+ Asm->EmitReference(PersonalityFn, PerEncoding);
+ }
+ if (UsesLSDA[Index])
+ Asm->EmitEncodingByte(LSDAEncoding, "LSDA");
+ if (FDEEncoding != dwarf::DW_EH_PE_absptr)
+ Asm->EmitEncodingByte(FDEEncoding, "FDE");
+ }
+
+ // Indicate locations of general callee saved registers in frame.
+ std::vector<MachineMove> Moves;
+ TFI->getInitialFrameState(Moves);
+ Asm->EmitFrameMoves(Moves, 0, true);
+
+ // On Darwin the linker honors the alignment of eh_frame, which means it must
+ // be 8-byte on 64-bit targets to match what gcc does. Otherwise you get
+ // holes which confuse readers of eh_frame.
+ Asm->EmitAlignment(Asm->getTargetData().getPointerSize() == 4 ? 2 : 3);
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_common_end", Index));
+}
+
+/// EmitFDE - Emit the Frame Description Entry (FDE) for the function.
+void DwarfTableException::EmitFDE(const FunctionEHFrameInfo &EHFrameInfo) {
+ assert(!EHFrameInfo.function->hasAvailableExternallyLinkage() &&
+ "Should not emit 'available externally' functions at all");
+
+ const Function *TheFunc = EHFrameInfo.function;
+ const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
+
+ unsigned LSDAEncoding = TLOF.getLSDAEncoding();
+ unsigned FDEEncoding = TLOF.getFDEEncoding();
+
+ Asm->OutStreamer.SwitchSection(TLOF.getEHFrameSection());
+
+ // Externally visible entry into the functions eh frame info. If the
+ // corresponding function is static, this should not be externally visible.
+ if (!TheFunc->hasLocalLinkage() && TLOF.isFunctionEHSymbolGlobal())
+ Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,MCSA_Global);
+
+ // If corresponding function is weak definition, this should be too.
+ if (TheFunc->isWeakForLinker() && Asm->MAI->getWeakDefDirective())
+ Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
+ MCSA_WeakDefinition);
+
+ // If corresponding function is hidden, this should be too.
+ if (TheFunc->hasHiddenVisibility())
+ if (MCSymbolAttr HiddenAttr = Asm->MAI->getHiddenVisibilityAttr())
+ Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
+ HiddenAttr);
+
+ // If there are no calls then you can't unwind. This may mean we can omit the
+ // EH Frame, but some environments do not handle weak absolute symbols. If
+ // UnwindTablesMandatory is set we cannot do this optimization; the unwind
+ // info is to be available for non-EH uses.
+ if (!EHFrameInfo.adjustsStack && !UnwindTablesMandatory &&
+ (!TheFunc->isWeakForLinker() ||
+ !Asm->MAI->getWeakDefDirective() ||
+ TLOF.getSupportsWeakOmittedEHFrame())) {
+ Asm->OutStreamer.EmitAssignment(EHFrameInfo.FunctionEHSym,
+ MCConstantExpr::Create(0, Asm->OutContext));
+ // This name has no connection to the function, so it might get
+ // dead-stripped when the function is not, erroneously. Prohibit
+ // dead-stripping unconditionally.
+ if (Asm->MAI->hasNoDeadStrip())
+ Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
+ MCSA_NoDeadStrip);
+ } else {
+ Asm->OutStreamer.EmitLabel(EHFrameInfo.FunctionEHSym);
+
+ // EH frame header.
+ Asm->OutStreamer.AddComment("Length of Frame Information Entry");
+ Asm->EmitLabelDifference(
+ Asm->GetTempSymbol("eh_frame_end", EHFrameInfo.Number),
+ Asm->GetTempSymbol("eh_frame_begin", EHFrameInfo.Number), 4);
+
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_begin",
+ EHFrameInfo.Number));
+
+ Asm->OutStreamer.AddComment("FDE CIE offset");
+ Asm->EmitLabelDifference(
+ Asm->GetTempSymbol("eh_frame_begin", EHFrameInfo.Number),
+ Asm->GetTempSymbol("eh_frame_common",
+ EHFrameInfo.PersonalityIndex), 4);
+
+ MCSymbol *EHFuncBeginSym =
+ Asm->GetTempSymbol("eh_func_begin", EHFrameInfo.Number);
+
+ Asm->OutStreamer.AddComment("FDE initial location");
+ Asm->EmitReference(EHFuncBeginSym, FDEEncoding);
+
+ Asm->OutStreamer.AddComment("FDE address range");
+ Asm->EmitLabelDifference(Asm->GetTempSymbol("eh_func_end",
+ EHFrameInfo.Number),
+ EHFuncBeginSym,
+ Asm->GetSizeOfEncodedValue(FDEEncoding));
+
+ // If there is a personality and landing pads then point to the language
+ // specific data area in the exception table.
+ if (MMI->getPersonalities()[0] != NULL) {
+ unsigned Size = Asm->GetSizeOfEncodedValue(LSDAEncoding);
+
+ Asm->EmitULEB128(Size, "Augmentation size");
+ Asm->OutStreamer.AddComment("Language Specific Data Area");
+ if (EHFrameInfo.hasLandingPads)
+ Asm->EmitReference(Asm->GetTempSymbol("exception", EHFrameInfo.Number),
+ LSDAEncoding);
+ else
+ Asm->OutStreamer.EmitIntValue(0, Size/*size*/, 0/*addrspace*/);
+
+ } else {
+ Asm->EmitULEB128(0, "Augmentation size");
+ }
+
+ // Indicate locations of function specific callee saved registers in frame.
+ Asm->EmitFrameMoves(EHFrameInfo.Moves, EHFuncBeginSym, true);
+
+ // On Darwin the linker honors the alignment of eh_frame, which means it
+ // must be 8-byte on 64-bit targets to match what gcc does. Otherwise you
+ // get holes which confuse readers of eh_frame.
+ Asm->EmitAlignment(Asm->getTargetData().getPointerSize() == 4 ? 2 : 3);
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_frame_end",
+ EHFrameInfo.Number));
+
+ // If the function is marked used, this table should be also. We cannot
+ // make the mark unconditional in this case, since retaining the table also
+ // retains the function in this case, and there is code around that depends
+ // on unused functions (calling undefined externals) being dead-stripped to
+ // link correctly. Yes, there really is.
+ if (MMI->isUsedFunction(EHFrameInfo.function))
+ if (Asm->MAI->hasNoDeadStrip())
+ Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
+ MCSA_NoDeadStrip);
+ }
+ Asm->OutStreamer.AddBlankLine();
+}
+
+/// EndModule - Emit all exception information that should come after the
+/// content.
+void DwarfTableException::EndModule() {
+ if (!Asm->MAI->isExceptionHandlingDwarf())
+ return;
+
+ if (!shouldEmitMovesModule && !shouldEmitTableModule)
+ return;
+
+ const std::vector<const Function*> &Personalities = MMI->getPersonalities();
+
+ for (unsigned I = 0, E = Personalities.size(); I < E; ++I)
+ EmitCIE(Personalities[I], I);
+
+ for (std::vector<FunctionEHFrameInfo>::iterator
+ I = EHFrames.begin(), E = EHFrames.end(); I != E; ++I)
+ EmitFDE(*I);
+}
+
+/// BeginFunction - Gather pre-function exception information. Assumes it's
+/// being emitted immediately after the function entry point.
+void DwarfTableException::BeginFunction(const MachineFunction *MF) {
+ shouldEmitTable = shouldEmitMoves = false;
+
+ // If any landing pads survive, we need an EH table.
+ shouldEmitTable = !MMI->getLandingPads().empty();
+
+ // See if we need frame move info.
+ shouldEmitMoves =
+ !Asm->MF->getFunction()->doesNotThrow() || UnwindTablesMandatory;
+
+ if (shouldEmitMoves || shouldEmitTable)
+ // Assumes in correct section after the entry point.
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_begin",
+ Asm->getFunctionNumber()));
+
+ shouldEmitTableModule |= shouldEmitTable;
+ shouldEmitMovesModule |= shouldEmitMoves;
+}
+
+/// EndFunction - Gather and emit post-function exception information.
+///
+void DwarfTableException::EndFunction() {
+ if (!shouldEmitMoves && !shouldEmitTable) return;
+
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_end",
+ Asm->getFunctionNumber()));
+
+ // Record if this personality index uses a landing pad.
+ bool HasLandingPad = !MMI->getLandingPads().empty();
+ UsesLSDA[MMI->getPersonalityIndex()] |= HasLandingPad;
+
+ // Map all labels and get rid of any dead landing pads.
+ MMI->TidyLandingPads();
+
+ if (HasLandingPad)
+ EmitExceptionTable();
+
+ const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
+ MCSymbol *FunctionEHSym =
+ Asm->GetSymbolWithGlobalValueBase(Asm->MF->getFunction(), ".eh",
+ TLOF.isFunctionEHFrameSymbolPrivate());
+
+ // Save EH frame information
+ EHFrames.
+ push_back(FunctionEHFrameInfo(FunctionEHSym,
+ Asm->getFunctionNumber(),
+ MMI->getPersonalityIndex(),
+ Asm->MF->getFrameInfo()->adjustsStack(),
+ !MMI->getLandingPads().empty(),
+ MMI->getFrameMoves(),
+ Asm->MF->getFunction()));
+}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
index c8a63cf..1153817 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
+#include <cctype>
using namespace llvm;
namespace {
diff --git a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
index 1b7e08a..76bb3d1 100644
--- a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -25,8 +25,12 @@
using namespace llvm;
char CalculateSpillWeights::ID = 0;
-INITIALIZE_PASS(CalculateSpillWeights, "calcspillweights",
- "Calculate spill weights", false, false);
+INITIALIZE_PASS_BEGIN(CalculateSpillWeights, "calcspillweights",
+ "Calculate spill weights", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(CalculateSpillWeights, "calcspillweights",
+ "Calculate spill weights", false, false)
void CalculateSpillWeights::getAnalysisUsage(AnalysisUsage &au) const {
au.addRequired<LiveIntervals>();
@@ -170,8 +174,7 @@ void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) {
totalWeight *= 0.5F;
}
- li.weight = totalWeight;
- lis_.normalizeSpillWeight(li);
+ li.weight = normalizeSpillWeight(totalWeight, li.getSize());
}
void VirtRegAuxInfo::CalculateRegClass(unsigned reg) {
@@ -218,7 +221,7 @@ void VirtRegAuxInfo::CalculateRegClass(unsigned reg) {
if (rc == orc)
return;
- DEBUG(dbgs() << "Inflating " << orc->getName() << ":%reg" << reg << " to "
- << rc->getName() <<".\n");
+ DEBUG(dbgs() << "Inflating " << orc->getName() << ':' << PrintReg(reg)
+ << " to " << rc->getName() <<".\n");
mri.setRegClass(reg, rc);
}
diff --git a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
index 62ad817..2ad80b4 100644
--- a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
+++ b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -34,8 +34,8 @@ CCState::CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &tm,
// HandleByVal - Allocate a stack slot large enough to pass an argument by
// value. The size and alignment information of the argument is encoded in its
// parameter attribute.
-void CCState::HandleByVal(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
+void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
int MinSize, int MinAlign,
ISD::ArgFlagsTy ArgFlags) {
unsigned Align = ArgFlags.getByValAlign();
@@ -51,11 +51,9 @@ void CCState::HandleByVal(unsigned ValNo, EVT ValVT,
/// MarkAllocated - Mark a register and all of its aliases as allocated.
void CCState::MarkAllocated(unsigned Reg) {
- UsedRegs[Reg/32] |= 1 << (Reg&31);
-
- if (const unsigned *RegAliases = TRI.getAliasSet(Reg))
- for (; (Reg = *RegAliases); ++RegAliases)
- UsedRegs[Reg/32] |= 1 << (Reg&31);
+ for (const unsigned *Alias = TRI.getOverlaps(Reg);
+ unsigned Reg = *Alias; ++Alias)
+ UsedRegs[Reg/32] |= 1 << (Reg&31);
}
/// AnalyzeFormalArguments - Analyze an array of argument values,
@@ -66,12 +64,12 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
unsigned NumArgs = Ins.size();
for (unsigned i = 0; i != NumArgs; ++i) {
- EVT ArgVT = Ins[i].VT;
+ MVT ArgVT = Ins[i].VT;
ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
dbgs() << "Formal argument #" << i << " has unhandled type "
- << ArgVT.getEVTString();
+ << EVT(ArgVT).getEVTString();
#endif
llvm_unreachable(0);
}
@@ -84,7 +82,7 @@ bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- EVT VT = Outs[i].VT;
+ MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
return false;
@@ -98,12 +96,12 @@ void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- EVT VT = Outs[i].VT;
+ MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
dbgs() << "Return operand #" << i << " has unhandled type "
- << VT.getEVTString();
+ << EVT(VT).getEVTString();
#endif
llvm_unreachable(0);
}
@@ -116,12 +114,12 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
unsigned NumOps = Outs.size();
for (unsigned i = 0; i != NumOps; ++i) {
- EVT ArgVT = Outs[i].VT;
+ MVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
dbgs() << "Call operand #" << i << " has unhandled type "
- << ArgVT.getEVTString();
+ << EVT(ArgVT).getEVTString();
#endif
llvm_unreachable(0);
}
@@ -130,17 +128,17 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
/// AnalyzeCallOperands - Same as above except it takes vectors of types
/// and argument flags.
-void CCState::AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
+void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn) {
unsigned NumOps = ArgVTs.size();
for (unsigned i = 0; i != NumOps; ++i) {
- EVT ArgVT = ArgVTs[i];
+ MVT ArgVT = ArgVTs[i];
ISD::ArgFlagsTy ArgFlags = Flags[i];
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
dbgs() << "Call operand #" << i << " has unhandled type "
- << ArgVT.getEVTString();
+ << EVT(ArgVT).getEVTString();
#endif
llvm_unreachable(0);
}
@@ -152,12 +150,12 @@ void CCState::AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn) {
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- EVT VT = Ins[i].VT;
+ MVT VT = Ins[i].VT;
ISD::ArgFlagsTy Flags = Ins[i].Flags;
if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
#ifndef NDEBUG
dbgs() << "Call result #" << i << " has unhandled type "
- << VT.getEVTString();
+ << EVT(VT).getEVTString();
#endif
llvm_unreachable(0);
}
@@ -166,11 +164,11 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
/// AnalyzeCallResult - Same as above except it's specialized for calls which
/// produce a single value.
-void CCState::AnalyzeCallResult(EVT VT, CCAssignFn Fn) {
+void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
#ifndef NDEBUG
dbgs() << "Call result has unhandled type "
- << VT.getEVTString();
+ << EVT(VT).getEVTString();
#endif
llvm_unreachable(0);
}
diff --git a/contrib/llvm/lib/CodeGen/CodeGen.cpp b/contrib/llvm/lib/CodeGen/CodeGen.cpp
new file mode 100644
index 0000000..515e6f9
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/CodeGen.cpp
@@ -0,0 +1,61 @@
+//===-- CodeGen.cpp -------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the common initialization routines for the
+// CodeGen library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/InitializePasses.h"
+#include "llvm-c/Initialization.h"
+
+using namespace llvm;
+
+/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
+void llvm::initializeCodeGen(PassRegistry &Registry) {
+ initializeCalculateSpillWeightsPass(Registry);
+ initializeDeadMachineInstructionElimPass(Registry);
+ initializeGCModuleInfoPass(Registry);
+ initializeIfConverterPass(Registry);
+ initializeLiveDebugVariablesPass(Registry);
+ initializeLiveIntervalsPass(Registry);
+ initializeLiveStacksPass(Registry);
+ initializeLiveVariablesPass(Registry);
+ initializeMachineCSEPass(Registry);
+ initializeMachineDominatorTreePass(Registry);
+ initializeMachineLICMPass(Registry);
+ initializeMachineLoopInfoPass(Registry);
+ initializeMachineModuleInfoPass(Registry);
+ initializeMachineSinkingPass(Registry);
+ initializeMachineVerifierPassPass(Registry);
+ initializeOptimizePHIsPass(Registry);
+ initializePHIEliminationPass(Registry);
+ initializePeepholeOptimizerPass(Registry);
+ initializePreAllocSplittingPass(Registry);
+ initializeProcessImplicitDefsPass(Registry);
+ initializePEIPass(Registry);
+ initializeRALinScanPass(Registry);
+ initializeRegisterCoalescerAnalysisGroup(Registry);
+ initializeRenderMachineFunctionPass(Registry);
+ initializeSimpleRegisterCoalescingPass(Registry);
+ initializeSlotIndexesPass(Registry);
+ initializeLoopSplitterPass(Registry);
+ initializeStackProtectorPass(Registry);
+ initializeStackSlotColoringPass(Registry);
+ initializeStrongPHIEliminationPass(Registry);
+ initializeTwoAddressInstructionPassPass(Registry);
+ initializeUnreachableBlockElimPass(Registry);
+ initializeUnreachableMachineBlockElimPass(Registry);
+ initializeVirtRegMapPass(Registry);
+ initializeLowerIntrinsicsPass(Registry);
+}
+
+void LLVMInitializeCodeGen(LLVMPassRegistryRef R) {
+ initializeCodeGen(*unwrap(R));
+}
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 335d2d8..f79598d 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -130,21 +130,25 @@ void CriticalAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
return;
assert(Count < InsertPosIndex && "Instruction index out of expected range!");
- // Any register which was defined within the previous scheduling region
- // may have been rescheduled and its lifetime may overlap with registers
- // in ways not reflected in our current liveness state. For each such
- // register, adjust the liveness state to be conservatively correct.
- for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg)
- if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
- assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
-
- // Mark this register to be non-renamable.
+ for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
+ if (KillIndices[Reg] != ~0u) {
+ // If Reg is currently live, then mark that it can't be renamed as
+ // we don't know the extent of its live-range anymore (now that it
+ // has been scheduled).
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[Reg] = Count;
+ } else if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
+ // Any register which was defined within the previous scheduling region
+ // may have been rescheduled and its lifetime may overlap with registers
+ // in ways not reflected in our current liveness state. For each such
+ // register, adjust the liveness state to be conservatively correct.
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
// Move the def index to the end of the previous region, to reflect
// that the def could theoretically have been scheduled at the end.
DefIndices[Reg] = InsertPosIndex;
}
+ }
PrescanInstruction(MI);
ScanInstruction(MI, Count);
@@ -177,7 +181,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
// that have special allocation requirements. Also assume all registers
// used in a call must not be changed (ABI).
// FIXME: The issue with predicated instruction is more complex. We are being
- // conservatively here because the kill markers cannot be trusted after
+ // conservative here because the kill markers cannot be trusted after
// if-conversion:
// %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
// ...
@@ -321,8 +325,62 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
}
}
+// Check all machine operands that reference the antidependent register and must
+// be replaced by NewReg. Return true if any of their parent instructions may
+// clobber the new register.
+//
+// Note: AntiDepReg may be referenced by a two-address instruction such that
+// it's use operand is tied to a def operand. We guard against the case in which
+// the two-address instruction also defines NewReg, as may happen with
+// pre/postincrement loads. In this case, both the use and def operands are in
+// RegRefs because the def is inserted by PrescanInstruction and not erased
+// during ScanInstruction. So checking for an instructions with definitions of
+// both NewReg and AntiDepReg covers it.
+bool
+CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
+ RegRefIter RegRefEnd,
+ unsigned NewReg)
+{
+ for (RegRefIter I = RegRefBegin; I != RegRefEnd; ++I ) {
+ MachineOperand *RefOper = I->second;
+
+ // Don't allow the instruction defining AntiDepReg to earlyclobber its
+ // operands, in case they may be assigned to NewReg. In this case antidep
+ // breaking must fail, but it's too rare to bother optimizing.
+ if (RefOper->isDef() && RefOper->isEarlyClobber())
+ return true;
+
+ // Handle cases in which this instructions defines NewReg.
+ MachineInstr *MI = RefOper->getParent();
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &CheckOper = MI->getOperand(i);
+
+ if (!CheckOper.isReg() || !CheckOper.isDef() ||
+ CheckOper.getReg() != NewReg)
+ continue;
+
+ // Don't allow the instruction to define NewReg and AntiDepReg.
+ // When AntiDepReg is renamed it will be an illegal op.
+ if (RefOper->isDef())
+ return true;
+
+ // Don't allow an instruction using AntiDepReg to be earlyclobbered by
+ // NewReg
+ if (CheckOper.isEarlyClobber())
+ return true;
+
+ // Don't allow inline asm to define NewReg at all. Who know what it's
+ // doing with it.
+ if (MI->isInlineAsm())
+ return true;
+ }
+ }
+ return false;
+}
+
unsigned
-CriticalAntiDepBreaker::findSuitableFreeRegister(MachineInstr *MI,
+CriticalAntiDepBreaker::findSuitableFreeRegister(RegRefIter RegRefBegin,
+ RegRefIter RegRefEnd,
unsigned AntiDepReg,
unsigned LastNewReg,
const TargetRegisterClass *RC)
@@ -338,10 +396,10 @@ CriticalAntiDepBreaker::findSuitableFreeRegister(MachineInstr *MI,
// an anti-dependence with this AntiDepReg, because that would
// re-introduce that anti-dependence.
if (NewReg == LastNewReg) continue;
- // If the instruction already has a def of the NewReg, it's not suitable.
- // For example, Instruction with multiple definitions can result in this
- // condition.
- if (MI->modifiesRegister(NewReg, TRI)) continue;
+ // If any instructions that define AntiDepReg also define the NewReg, it's
+ // not suitable. For example, Instruction with multiple definitions can
+ // result in this condition.
+ if (isNewRegClobberedByRefs(RegRefBegin, RegRefEnd, NewReg)) continue;
// If NewReg is dead and NewReg's most recent def is not before
// AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u))
@@ -548,7 +606,11 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// TODO: Instead of picking the first free register, consider which might
// be the best.
if (AntiDepReg != 0) {
- if (unsigned NewReg = findSuitableFreeRegister(MI, AntiDepReg,
+ std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
+ std::multimap<unsigned, MachineOperand *>::iterator>
+ Range = RegRefs.equal_range(AntiDepReg);
+ if (unsigned NewReg = findSuitableFreeRegister(Range.first, Range.second,
+ AntiDepReg,
LastNewReg[AntiDepReg],
RC)) {
DEBUG(dbgs() << "Breaking anti-dependence edge on "
@@ -558,9 +620,6 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// Update the references to the old register to refer to the new
// register.
- std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
- std::multimap<unsigned, MachineOperand *>::iterator>
- Range = RegRefs.equal_range(AntiDepReg);
for (std::multimap<unsigned, MachineOperand *>::iterator
Q = Range.first, QE = Range.second; Q != QE; ++Q) {
Q->second->setReg(NewReg);
@@ -580,7 +639,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
}
// We just went back in time and modified history; the
- // liveness information for the anti-depenence reg is now
+ // liveness information for the anti-dependence reg is now
// inconsistent. Set the state as if it were dead.
Classes[NewReg] = Classes[AntiDepReg];
DefIndices[NewReg] = DefIndices[AntiDepReg];
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
index 0ed7c35..0daaef2 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
@@ -48,8 +48,10 @@ class TargetRegisterInfo;
/// pointer.
std::vector<const TargetRegisterClass*> Classes;
- /// RegRegs - Map registers to all their references within a live range.
+ /// RegRefs - Map registers to all their references within a live range.
std::multimap<unsigned, MachineOperand *> RegRefs;
+ typedef std::multimap<unsigned, MachineOperand *>::const_iterator
+ RegRefIter;
/// KillIndices - The index of the most recent kill (proceding bottom-up),
/// or ~0u if the register is not live.
@@ -90,10 +92,14 @@ class TargetRegisterInfo;
private:
void PrescanInstruction(MachineInstr *MI);
void ScanInstruction(MachineInstr *MI, unsigned Count);
- unsigned findSuitableFreeRegister(MachineInstr *MI,
+ bool isNewRegClobberedByRefs(RegRefIter RegRefBegin,
+ RegRefIter RegRefEnd,
+ unsigned NewReg);
+ unsigned findSuitableFreeRegister(RegRefIter RegRefBegin,
+ RegRefIter RegRefEnd,
unsigned AntiDepReg,
unsigned LastNewReg,
- const TargetRegisterClass *);
+ const TargetRegisterClass *RC);
};
}
diff --git a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
index 318d922..fdc1d91 100644
--- a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -36,7 +36,9 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- DeadMachineInstructionElim() : MachineFunctionPass(ID) {}
+ DeadMachineInstructionElim() : MachineFunctionPass(ID) {
+ initializeDeadMachineInstructionElimPass(*PassRegistry::getPassRegistry());
+ }
private:
bool isDead(const MachineInstr *MI) const;
@@ -45,13 +47,19 @@ namespace {
char DeadMachineInstructionElim::ID = 0;
INITIALIZE_PASS(DeadMachineInstructionElim, "dead-mi-elimination",
- "Remove dead machine instructions", false, false);
+ "Remove dead machine instructions", false, false)
FunctionPass *llvm::createDeadMachineInstructionElimPass() {
return new DeadMachineInstructionElim();
}
bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
+ // Technically speaking inline asm without side effects and no defs can still
+ // be deleted. But there is so much bad inline asm code out there, we should
+ // let them be.
+ if (MI->isInlineAsm())
+ return false;
+
// Don't delete instructions with side effects.
bool SawStore = false;
if (!MI->isSafeToMove(TII, 0, SawStore) && !MI->isPHI())
@@ -151,7 +159,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef()) {
unsigned Reg = MO.getReg();
- if (Reg != 0 && TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
LivePhysRegs.reset(Reg);
// Check the subreg set, not the alias set, because a def
// of a super-register may still be partially live after
@@ -168,7 +176,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse()) {
unsigned Reg = MO.getReg();
- if (Reg != 0 && TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
LivePhysRegs.set(Reg);
for (const unsigned *AliasSet = TRI->getAliasSet(Reg);
*AliasSet; ++AliasSet)
diff --git a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
index 550fd3e..0ebb5b0 100644
--- a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
@@ -43,7 +43,7 @@ namespace {
// The eh.selector intrinsic.
Function *SelectorIntrinsic;
- // _Unwind_Resume_or_Rethrow call.
+ // _Unwind_Resume_or_Rethrow or _Unwind_SjLj_Resume call.
Constant *URoR;
// The EH language-specific catch-all type.
@@ -82,11 +82,11 @@ namespace {
/// FindAllURoRInvokes - Find all URoR invokes in the function.
void FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes);
- /// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow"
- /// calls. The "unwind" part of these invokes jump to a landing pad within
- /// the current function. This is a candidate to merge the selector
- /// associated with the URoR invoke with the one from the URoR's landing
- /// pad.
+ /// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow" or
+ /// "_Unwind_SjLj_Resume" calls. The "unwind" part of these invokes jump to
+ /// a landing pad within the current function. This is a candidate to merge
+ /// the selector associated with the URoR invoke with the one from the
+ /// URoR's landing pad.
bool HandleURoRInvokes();
/// FindSelectorAndURoR - Find the eh.selector call and URoR call associated
@@ -100,7 +100,9 @@ namespace {
DwarfEHPrepare(const TargetMachine *tm) :
FunctionPass(ID), TM(tm), TLI(TM->getTargetLowering()),
ExceptionValueIntrinsic(0), SelectorIntrinsic(0),
- URoR(0), EHCatchAllValue(0), RewindFunction(0) {}
+ URoR(0), EHCatchAllValue(0), RewindFunction(0) {
+ initializeDominatorTreePass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &Fn);
@@ -224,10 +226,11 @@ DwarfEHPrepare::FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
return Changed;
}
-/// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow" calls. The
-/// "unwind" part of these invokes jump to a landing pad within the current
-/// function. This is a candidate to merge the selector associated with the URoR
-/// invoke with the one from the URoR's landing pad.
+/// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow" or
+/// "_Unwind_SjLj_Resume" calls. The "unwind" part of these invokes jump to a
+/// landing pad within the current function. This is a candidate to merge the
+/// selector associated with the URoR invoke with the one from the URoR's
+/// landing pad.
bool DwarfEHPrepare::HandleURoRInvokes() {
if (!EHCatchAllValue) {
EHCatchAllValue =
@@ -247,7 +250,10 @@ bool DwarfEHPrepare::HandleURoRInvokes() {
if (!URoR) {
URoR = F->getParent()->getFunction("_Unwind_Resume_or_Rethrow");
- if (!URoR) return CleanupSelectors(CatchAllSels);
+ if (!URoR) {
+ URoR = F->getParent()->getFunction("_Unwind_SjLj_Resume");
+ if (!URoR) return CleanupSelectors(CatchAllSels);
+ }
}
SmallPtrSet<InvokeInst*, 32> URoRInvokes;
diff --git a/contrib/llvm/lib/CodeGen/ELF.h b/contrib/llvm/lib/CodeGen/ELF.h
index fb884c9..e08feeb 100644
--- a/contrib/llvm/lib/CodeGen/ELF.h
+++ b/contrib/llvm/lib/CodeGen/ELF.h
@@ -23,7 +23,7 @@
#include "llvm/CodeGen/BinaryObject.h"
#include "llvm/CodeGen/MachineRelocation.h"
#include "llvm/Support/ELF.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class GlobalValue;
diff --git a/contrib/llvm/lib/CodeGen/ELFWriter.cpp b/contrib/llvm/lib/CodeGen/ELFWriter.cpp
index d14728d..0fd1e8e 100644
--- a/contrib/llvm/lib/CodeGen/ELFWriter.cpp
+++ b/contrib/llvm/lib/CodeGen/ELFWriter.cpp
@@ -45,6 +45,7 @@
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetELFWriterInfo.h"
#include "llvm/Target/TargetLowering.h"
@@ -64,7 +65,7 @@ char ELFWriter::ID = 0;
ELFWriter::ELFWriter(raw_ostream &o, TargetMachine &tm)
: MachineFunctionPass(ID), O(o), TM(tm),
- OutContext(*new MCContext(*TM.getMCAsmInfo())),
+ OutContext(*new MCContext(*TM.getMCAsmInfo(), new TargetAsmInfo(tm))),
TLOF(TM.getTargetLowering()->getObjFileLowering()),
is64Bit(TM.getTargetData()->getPointerSizeInBits() == 64),
isLittleEndian(TM.getTargetData()->isLittleEndian()),
@@ -327,6 +328,18 @@ void ELFWriter::AddToSymbolList(ELFSym *GblSym) {
}
}
+/// HasCommonSymbols - True if this section holds common symbols, this is
+/// indicated on the ELF object file by a symbol with SHN_COMMON section
+/// header index.
+static bool HasCommonSymbols(const MCSectionELF &S) {
+ // FIXME: this is wrong, a common symbol can be in .data for example.
+ if (StringRef(S.getSectionName()).startswith(".gnu.linkonce."))
+ return true;
+
+ return false;
+}
+
+
// EmitGlobal - Choose the right section for global and emit it
void ELFWriter::EmitGlobal(const GlobalValue *GV) {
@@ -363,7 +376,7 @@ void ELFWriter::EmitGlobal(const GlobalValue *GV) {
unsigned Size = TD->getTypeAllocSize(GVar->getInitializer()->getType());
GblSym->Size = Size;
- if (S->HasCommonSymbols()) { // Symbol must go to a common section
+ if (HasCommonSymbols(*S)) { // Symbol must go to a common section
GblSym->SectionIdx = ELF::SHN_COMMON;
// A new linkonce section is created for each global in the
diff --git a/contrib/llvm/lib/CodeGen/EdgeBundles.cpp b/contrib/llvm/lib/CodeGen/EdgeBundles.cpp
new file mode 100644
index 0000000..aed8bc9
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/EdgeBundles.cpp
@@ -0,0 +1,86 @@
+//===-------- EdgeBundles.cpp - Bundles of CFG edges ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the implementation of the EdgeBundles analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/EdgeBundles.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/GraphWriter.h"
+
+using namespace llvm;
+
+static cl::opt<bool>
+ViewEdgeBundles("view-edge-bundles", cl::Hidden,
+ cl::desc("Pop up a window to show edge bundle graphs"));
+
+char EdgeBundles::ID = 0;
+
+INITIALIZE_PASS(EdgeBundles, "edge-bundles", "Bundle Machine CFG Edges",
+ /* cfg = */true, /* analysis = */ true)
+
+char &llvm::EdgeBundlesID = EdgeBundles::ID;
+
+void EdgeBundles::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+bool EdgeBundles::runOnMachineFunction(MachineFunction &mf) {
+ MF = &mf;
+ EC.clear();
+ EC.grow(2 * MF->size());
+
+ for (MachineFunction::const_iterator I = MF->begin(), E = MF->end(); I != E;
+ ++I) {
+ const MachineBasicBlock &MBB = *I;
+ unsigned OutE = 2 * MBB.getNumber() + 1;
+ // Join the outgoing bundle with the ingoing bundles of all successors.
+ for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(),
+ SE = MBB.succ_end(); SI != SE; ++SI)
+ EC.join(OutE, 2 * (*SI)->getNumber());
+ }
+ EC.compress();
+ if (ViewEdgeBundles)
+ view();
+ return false;
+}
+
+/// view - Visualize the annotated bipartite CFG with Graphviz.
+void EdgeBundles::view() const {
+ ViewGraph(*this, "EdgeBundles");
+}
+
+/// Specialize WriteGraph, the standard implementation won't work.
+raw_ostream &llvm::WriteGraph(raw_ostream &O, const EdgeBundles &G,
+ bool ShortNames,
+ const std::string &Title) {
+ const MachineFunction *MF = G.getMachineFunction();
+
+ O << "digraph {\n";
+ for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
+ I != E; ++I) {
+ unsigned BB = I->getNumber();
+ O << "\t\"BB#" << BB << "\" [ shape=box ]\n"
+ << '\t' << G.getBundle(BB, false) << " -> \"BB#" << BB << "\"\n"
+ << "\t\"BB#" << BB << "\" -> " << G.getBundle(BB, true) << '\n';
+ for (MachineBasicBlock::const_succ_iterator SI = I->succ_begin(),
+ SE = I->succ_end(); SI != SE; ++SI)
+ O << "\t\"BB#" << BB << "\" -> \"BB#" << (*SI)->getNumber()
+ << "\" [ color=lightgray ]\n";
+ }
+ O << "}\n";
+ return O;
+}
+
+
diff --git a/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp b/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp
new file mode 100644
index 0000000..b5ec303
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp
@@ -0,0 +1,82 @@
+//===-- llvm/CodeGen/ExpandISelPseudos.cpp ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Expand Psuedo-instructions produced by ISel. These are usually to allow
+// the expansion to contain control flow, such as a conditional move
+// implemented with a conditional branch and a phi, or an atomic operation
+// implemented with a loop.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "expand-isel-pseudos"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/Debug.h"
+using namespace llvm;
+
+namespace {
+ class ExpandISelPseudos : public MachineFunctionPass {
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ ExpandISelPseudos() : MachineFunctionPass(ID) {}
+
+ private:
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const {
+ return "Expand ISel Pseudo-instructions";
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+} // end anonymous namespace
+
+char ExpandISelPseudos::ID = 0;
+INITIALIZE_PASS(ExpandISelPseudos, "expand-isel-pseudos",
+ "Expand CodeGen Pseudo-instructions", false, false)
+
+FunctionPass *llvm::createExpandISelPseudosPass() {
+ return new ExpandISelPseudos();
+}
+
+bool ExpandISelPseudos::runOnMachineFunction(MachineFunction &MF) {
+ bool Changed = false;
+ const TargetLowering *TLI = MF.getTarget().getTargetLowering();
+
+ // Iterate through each instruction in the function, looking for pseudos.
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
+ MachineBasicBlock *MBB = I;
+ for (MachineBasicBlock::iterator MBBI = MBB->begin(), MBBE = MBB->end();
+ MBBI != MBBE; ) {
+ MachineInstr *MI = MBBI++;
+
+ // If MI is a pseudo, expand it.
+ const TargetInstrDesc &TID = MI->getDesc();
+ if (TID.usesCustomInsertionHook()) {
+ Changed = true;
+ MachineBasicBlock *NewMBB =
+ TLI->EmitInstrWithCustomInserter(MI, MBB);
+ // The expansion may involve new basic blocks.
+ if (NewMBB != MBB) {
+ MBB = NewMBB;
+ I = NewMBB;
+ MBBI = NewMBB->begin();
+ MBBE = NewMBB->end();
+ }
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/CodeGen/GCMetadata.cpp b/contrib/llvm/lib/CodeGen/GCMetadata.cpp
index 0f6e882..d757cf4 100644
--- a/contrib/llvm/lib/CodeGen/GCMetadata.cpp
+++ b/contrib/llvm/lib/CodeGen/GCMetadata.cpp
@@ -30,7 +30,6 @@ namespace {
raw_ostream &OS;
public:
- Printer() : FunctionPass(ID), OS(errs()) {}
explicit Printer(raw_ostream &OS) : FunctionPass(ID), OS(OS) {}
@@ -56,7 +55,7 @@ namespace {
}
INITIALIZE_PASS(GCModuleInfo, "collector-metadata",
- "Create Garbage Collector Module Metadata", false, false);
+ "Create Garbage Collector Module Metadata", false, false)
// -----------------------------------------------------------------------------
@@ -70,7 +69,9 @@ GCFunctionInfo::~GCFunctionInfo() {}
char GCModuleInfo::ID = 0;
GCModuleInfo::GCModuleInfo()
- : ImmutablePass(ID) {}
+ : ImmutablePass(ID) {
+ initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
+}
GCModuleInfo::~GCModuleInfo() {
clear();
diff --git a/contrib/llvm/lib/CodeGen/GCStrategy.cpp b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
index 719fa19..766c6ee 100644
--- a/contrib/llvm/lib/CodeGen/GCStrategy.cpp
+++ b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
@@ -19,11 +19,12 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -123,6 +124,11 @@ GCFunctionInfo *GCStrategy::insertFunctionInfo(const Function &F) {
// -----------------------------------------------------------------------------
+INITIALIZE_PASS_BEGIN(LowerIntrinsics, "gc-lowering", "GC Lowering",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(GCModuleInfo)
+INITIALIZE_PASS_END(LowerIntrinsics, "gc-lowering", "GC Lowering", false, false)
+
FunctionPass *llvm::createGCLoweringPass() {
return new LowerIntrinsics();
}
@@ -130,7 +136,9 @@ FunctionPass *llvm::createGCLoweringPass() {
char LowerIntrinsics::ID = 0;
LowerIntrinsics::LowerIntrinsics()
- : FunctionPass(ID) {}
+ : FunctionPass(ID) {
+ initializeLowerIntrinsicsPass(*PassRegistry::getPassRegistry());
+ }
const char *LowerIntrinsics::getPassName() const {
return "Lower Garbage Collection Instructions";
@@ -139,6 +147,7 @@ const char *LowerIntrinsics::getPassName() const {
void LowerIntrinsics::getAnalysisUsage(AnalysisUsage &AU) const {
FunctionPass::getAnalysisUsage(AU);
AU.addRequired<GCModuleInfo>();
+ AU.addPreserved<DominatorTree>();
}
/// doInitialization - If this module uses the GC intrinsics, find them now.
@@ -249,9 +258,16 @@ bool LowerIntrinsics::runOnFunction(Function &F) {
if (NeedsDefaultLoweringPass(S))
MadeChange |= PerformDefaultLowering(F, S);
- if (NeedsCustomLoweringPass(S))
+ bool UseCustomLoweringPass = NeedsCustomLoweringPass(S);
+ if (UseCustomLoweringPass)
MadeChange |= S.performCustomLowering(F);
-
+
+ // Custom lowering may modify the CFG, so dominators must be recomputed.
+ if (UseCustomLoweringPass) {
+ if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>())
+ DT->DT->recalculate(F);
+ }
+
return MadeChange;
}
@@ -345,13 +361,15 @@ void MachineCodeAnalysis::VisitCallPoint(MachineBasicBlock::iterator CI) {
MachineBasicBlock::iterator RAI = CI;
++RAI;
- if (FI->getStrategy().needsSafePoint(GC::PreCall))
- FI->addSafePoint(GC::PreCall, InsertLabel(*CI->getParent(), CI,
- CI->getDebugLoc()));
+ if (FI->getStrategy().needsSafePoint(GC::PreCall)) {
+ MCSymbol* Label = InsertLabel(*CI->getParent(), CI, CI->getDebugLoc());
+ FI->addSafePoint(GC::PreCall, Label, CI->getDebugLoc());
+ }
- if (FI->getStrategy().needsSafePoint(GC::PostCall))
- FI->addSafePoint(GC::PostCall, InsertLabel(*CI->getParent(), RAI,
- CI->getDebugLoc()));
+ if (FI->getStrategy().needsSafePoint(GC::PostCall)) {
+ MCSymbol* Label = InsertLabel(*CI->getParent(), RAI, CI->getDebugLoc());
+ FI->addSafePoint(GC::PostCall, Label, CI->getDebugLoc());
+ }
}
void MachineCodeAnalysis::FindSafePoints(MachineFunction &MF) {
@@ -364,12 +382,12 @@ void MachineCodeAnalysis::FindSafePoints(MachineFunction &MF) {
}
void MachineCodeAnalysis::FindStackOffsets(MachineFunction &MF) {
- const TargetRegisterInfo *TRI = TM->getRegisterInfo();
- assert(TRI && "TargetRegisterInfo not available!");
+ const TargetFrameLowering *TFI = TM->getFrameLowering();
+ assert(TFI && "TargetRegisterInfo not available!");
for (GCFunctionInfo::roots_iterator RI = FI->roots_begin(),
RE = FI->roots_end(); RI != RE; ++RI)
- RI->StackOffset = TRI->getFrameIndexOffset(MF, RI->Num);
+ RI->StackOffset = TFI->getFrameIndexOffset(MF, RI->Num);
}
bool MachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
diff --git a/contrib/llvm/lib/CodeGen/IfConversion.cpp b/contrib/llvm/lib/CodeGen/IfConversion.cpp
index 0ea30d7..db53b04 100644
--- a/contrib/llvm/lib/CodeGen/IfConversion.cpp
+++ b/contrib/llvm/lib/CodeGen/IfConversion.cpp
@@ -17,7 +17,9 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetInstrItineraries.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -26,6 +28,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
@@ -91,6 +94,8 @@ namespace {
/// ClobbersPred - True if BB could modify predicates (e.g. has
/// cmp, call, etc.)
/// NonPredSize - Number of non-predicated instructions.
+ /// ExtraCost - Extra cost for multi-cycle instructions.
+ /// ExtraCost2 - Some instructions are slower when predicated
/// BB - Corresponding MachineBasicBlock.
/// TrueBB / FalseBB- See AnalyzeBranch().
/// BrCond - Conditions for end of block conditional branches.
@@ -106,6 +111,8 @@ namespace {
bool CannotBeCopied : 1;
bool ClobbersPred : 1;
unsigned NonPredSize;
+ unsigned ExtraCost;
+ unsigned ExtraCost2;
MachineBasicBlock *BB;
MachineBasicBlock *TrueBB;
MachineBasicBlock *FalseBB;
@@ -115,7 +122,7 @@ namespace {
IsAnalyzed(false), IsEnqueued(false), IsBrAnalyzable(false),
HasFallThrough(false), IsUnpredicable(false),
CannotBeCopied(false), ClobbersPred(false), NonPredSize(0),
- BB(0), TrueBB(0), FalseBB(0) {}
+ ExtraCost(0), ExtraCost2(0), BB(0), TrueBB(0), FalseBB(0) {}
};
/// IfcvtToken - Record information about pending if-conversions to attempt:
@@ -150,20 +157,31 @@ namespace {
const TargetLowering *TLI;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
+ const InstrItineraryData *InstrItins;
+ const MachineLoopInfo *MLI;
bool MadeChange;
int FnNum;
public:
static char ID;
- IfConverter() : MachineFunctionPass(ID), FnNum(-1) {}
+ IfConverter() : MachineFunctionPass(ID), FnNum(-1) {
+ initializeIfConverterPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual const char *getPassName() const { return "If Converter"; }
private:
bool ReverseBranchCondition(BBInfo &BBI);
- bool ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const;
+ bool ValidSimple(BBInfo &TrueBBI, unsigned &Dups,
+ float Prediction, float Confidence) const;
bool ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
- bool FalseBranch, unsigned &Dups) const;
+ bool FalseBranch, unsigned &Dups,
+ float Prediction, float Confidence) const;
bool ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
unsigned &Dups1, unsigned &Dups2) const;
void ScanInstructions(BBInfo &BBI);
@@ -188,14 +206,21 @@ namespace {
bool IgnoreBr = false);
void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges = true);
- bool MeetIfcvtSizeLimit(MachineBasicBlock &BB, unsigned Size) const {
- return Size > 0 && TII->isProfitableToIfCvt(BB, Size);
+ bool MeetIfcvtSizeLimit(MachineBasicBlock &BB,
+ unsigned Cycle, unsigned Extra,
+ float Prediction, float Confidence) const {
+ return Cycle > 0 && TII->isProfitableToIfCvt(BB, Cycle, Extra,
+ Prediction, Confidence);
}
- bool MeetIfcvtSizeLimit(MachineBasicBlock &TBB, unsigned TSize,
- MachineBasicBlock &FBB, unsigned FSize) const {
- return TSize > 0 && FSize > 0 &&
- TII->isProfitableToIfCvt(TBB, TSize, FBB, FSize);
+ bool MeetIfcvtSizeLimit(MachineBasicBlock &TBB,
+ unsigned TCycle, unsigned TExtra,
+ MachineBasicBlock &FBB,
+ unsigned FCycle, unsigned FExtra,
+ float Prediction, float Confidence) const {
+ return TCycle > 0 && FCycle > 0 &&
+ TII->isProfitableToIfCvt(TBB, TCycle, TExtra, FBB, FCycle, FExtra,
+ Prediction, Confidence);
}
// blockAlwaysFallThrough - Block ends without a terminator.
@@ -230,7 +255,9 @@ namespace {
char IfConverter::ID = 0;
}
-INITIALIZE_PASS(IfConverter, "if-converter", "If Converter", false, false);
+INITIALIZE_PASS_BEGIN(IfConverter, "if-converter", "If Converter", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(IfConverter, "if-converter", "If Converter", false, false)
FunctionPass *llvm::createIfConverterPass() { return new IfConverter(); }
@@ -238,6 +265,8 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
TLI = MF.getTarget().getTargetLowering();
TII = MF.getTarget().getInstrInfo();
TRI = MF.getTarget().getRegisterInfo();
+ MLI = &getAnalysis<MachineLoopInfo>();
+ InstrItins = MF.getTarget().getInstrItineraryData();
if (!TII) return false;
// Tail merge tend to expose more if-conversion opportunities.
@@ -431,7 +460,8 @@ static inline MachineBasicBlock *getNextBlock(MachineBasicBlock *BB) {
/// predecessor) forms a valid simple shape for ifcvt. It also returns the
/// number of instructions that the ifcvt would need to duplicate if performed
/// in Dups.
-bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const {
+bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups,
+ float Prediction, float Confidence) const {
Dups = 0;
if (TrueBBI.IsBeingAnalyzed || TrueBBI.IsDone)
return false;
@@ -441,7 +471,8 @@ bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const {
if (TrueBBI.BB->pred_size() > 1) {
if (TrueBBI.CannotBeCopied ||
- !TII->isProfitableToDupForIfCvt(*TrueBBI.BB, TrueBBI.NonPredSize))
+ !TII->isProfitableToDupForIfCvt(*TrueBBI.BB, TrueBBI.NonPredSize,
+ Prediction, Confidence))
return false;
Dups = TrueBBI.NonPredSize;
}
@@ -456,7 +487,8 @@ bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const {
/// returns the number of instructions that the ifcvt would need to duplicate
/// if performed in 'Dups'.
bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
- bool FalseBranch, unsigned &Dups) const {
+ bool FalseBranch, unsigned &Dups,
+ float Prediction, float Confidence) const {
Dups = 0;
if (TrueBBI.IsBeingAnalyzed || TrueBBI.IsDone)
return false;
@@ -478,7 +510,8 @@ bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
++Size;
}
}
- if (!TII->isProfitableToDupForIfCvt(*TrueBBI.BB, Size))
+ if (!TII->isProfitableToDupForIfCvt(*TrueBBI.BB, Size,
+ Prediction, Confidence))
return false;
Dups = Size;
}
@@ -493,18 +526,6 @@ bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
return TExit && TExit == FalseBBI.BB;
}
-static
-MachineBasicBlock::iterator firstNonBranchInst(MachineBasicBlock *BB,
- const TargetInstrInfo *TII) {
- MachineBasicBlock::iterator I = BB->end();
- while (I != BB->begin()) {
- --I;
- if (!I->getDesc().isBranch())
- break;
- }
- return I;
-}
-
/// ValidDiamond - Returns true if the 'true' and 'false' blocks (along
/// with their common predecessor) forms a valid diamond shape for ifcvt.
bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
@@ -533,64 +554,70 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
(TrueBBI.ClobbersPred && FalseBBI.ClobbersPred))
return false;
- MachineBasicBlock::iterator TI = TrueBBI.BB->begin();
- MachineBasicBlock::iterator FI = FalseBBI.BB->begin();
+ // Count duplicate instructions at the beginning of the true and false blocks.
+ MachineBasicBlock::iterator TIB = TrueBBI.BB->begin();
+ MachineBasicBlock::iterator FIB = FalseBBI.BB->begin();
MachineBasicBlock::iterator TIE = TrueBBI.BB->end();
MachineBasicBlock::iterator FIE = FalseBBI.BB->end();
- // Skip dbg_value instructions
- while (TI != TIE && TI->isDebugValue())
- ++TI;
- while (FI != FIE && FI->isDebugValue())
- ++FI;
- while (TI != TIE && FI != FIE) {
+ while (TIB != TIE && FIB != FIE) {
// Skip dbg_value instructions. These do not count.
- if (TI->isDebugValue()) {
- while (TI != TIE && TI->isDebugValue())
- ++TI;
- if (TI == TIE)
+ if (TIB->isDebugValue()) {
+ while (TIB != TIE && TIB->isDebugValue())
+ ++TIB;
+ if (TIB == TIE)
break;
}
- if (FI->isDebugValue()) {
- while (FI != FIE && FI->isDebugValue())
- ++FI;
- if (FI == FIE)
+ if (FIB->isDebugValue()) {
+ while (FIB != FIE && FIB->isDebugValue())
+ ++FIB;
+ if (FIB == FIE)
break;
}
- if (!TI->isIdenticalTo(FI))
+ if (!TIB->isIdenticalTo(FIB))
break;
++Dups1;
- ++TI;
- ++FI;
+ ++TIB;
+ ++FIB;
}
- TI = firstNonBranchInst(TrueBBI.BB, TII);
- FI = firstNonBranchInst(FalseBBI.BB, TII);
- MachineBasicBlock::iterator TIB = TrueBBI.BB->begin();
- MachineBasicBlock::iterator FIB = FalseBBI.BB->begin();
- // Skip dbg_value instructions at end of the bb's.
- while (TI != TIB && TI->isDebugValue())
- --TI;
- while (FI != FIB && FI->isDebugValue())
- --FI;
- while (TI != TIB && FI != FIB) {
+ // Now, in preparation for counting duplicate instructions at the ends of the
+ // blocks, move the end iterators up past any branch instructions.
+ while (TIE != TIB) {
+ --TIE;
+ if (!TIE->getDesc().isBranch())
+ break;
+ }
+ while (FIE != FIB) {
+ --FIE;
+ if (!FIE->getDesc().isBranch())
+ break;
+ }
+
+ // If Dups1 includes all of a block, then don't count duplicate
+ // instructions at the end of the blocks.
+ if (TIB == TIE || FIB == FIE)
+ return true;
+
+ // Count duplicate instructions at the ends of the blocks.
+ while (TIE != TIB && FIE != FIB) {
// Skip dbg_value instructions. These do not count.
- if (TI->isDebugValue()) {
- while (TI != TIB && TI->isDebugValue())
- --TI;
- if (TI == TIB)
+ if (TIE->isDebugValue()) {
+ while (TIE != TIB && TIE->isDebugValue())
+ --TIE;
+ if (TIE == TIB)
break;
}
- if (FI->isDebugValue()) {
- while (FI != FIB && FI->isDebugValue())
- --FI;
- if (FI == FIB)
+ if (FIE->isDebugValue()) {
+ while (FIE != FIB && FIE->isDebugValue())
+ --FIE;
+ if (FIE == FIB)
break;
}
- if (!TI->isIdenticalTo(FI))
+ if (!TIE->isIdenticalTo(FIE))
break;
++Dups2;
- --TI;
- --FI;
+ --TIE;
+ --FIE;
}
return true;
@@ -627,6 +654,8 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
// Then scan all the instructions.
BBI.NonPredSize = 0;
+ BBI.ExtraCost = 0;
+ BBI.ExtraCost2 = 0;
BBI.ClobbersPred = false;
for (MachineBasicBlock::iterator I = BBI.BB->begin(), E = BBI.BB->end();
I != E; ++I) {
@@ -641,9 +670,15 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
bool isCondBr = BBI.IsBrAnalyzable && TID.isConditionalBranch();
if (!isCondBr) {
- if (!isPredicated)
+ if (!isPredicated) {
BBI.NonPredSize++;
- else if (!AlreadyPredicated) {
+ unsigned ExtraPredCost = 0;
+ unsigned NumCycles = TII->getInstrLatency(InstrItins, &*I,
+ &ExtraPredCost);
+ if (NumCycles > 1)
+ BBI.ExtraCost += NumCycles-1;
+ BBI.ExtraCost2 += ExtraPredCost;
+ } else if (!AlreadyPredicated) {
// FIXME: This instruction is already predicated before the
// if-conversion pass. It's probably something like a conditional move.
// Mark this block unpredicable for now.
@@ -765,9 +800,35 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
bool TNeedSub = TrueBBI.Predicate.size() > 0;
bool FNeedSub = FalseBBI.Predicate.size() > 0;
bool Enqueued = false;
+
+ // Try to predict the branch, using loop info to guide us.
+ // General heuristics are:
+ // - backedge -> 90% taken
+ // - early exit -> 20% taken
+ // - branch predictor confidence -> 90%
+ float Prediction = 0.5f;
+ float Confidence = 0.9f;
+ MachineLoop *Loop = MLI->getLoopFor(BB);
+ if (Loop) {
+ if (TrueBBI.BB == Loop->getHeader())
+ Prediction = 0.9f;
+ else if (FalseBBI.BB == Loop->getHeader())
+ Prediction = 0.1f;
+
+ MachineLoop *TrueLoop = MLI->getLoopFor(TrueBBI.BB);
+ MachineLoop *FalseLoop = MLI->getLoopFor(FalseBBI.BB);
+ if (!TrueLoop || TrueLoop->getParentLoop() == Loop)
+ Prediction = 0.2f;
+ else if (!FalseLoop || FalseLoop->getParentLoop() == Loop)
+ Prediction = 0.8f;
+ }
+
if (CanRevCond && ValidDiamond(TrueBBI, FalseBBI, Dups, Dups2) &&
- MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize - (Dups + Dups2),
- *FalseBBI.BB, FalseBBI.NonPredSize - (Dups + Dups2)) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, (TrueBBI.NonPredSize - (Dups + Dups2) +
+ TrueBBI.ExtraCost), TrueBBI.ExtraCost2,
+ *FalseBBI.BB, (FalseBBI.NonPredSize - (Dups + Dups2) +
+ FalseBBI.ExtraCost),FalseBBI.ExtraCost2,
+ Prediction, Confidence) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
// Diamond:
@@ -783,8 +844,9 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
Enqueued = true;
}
- if (ValidTriangle(TrueBBI, FalseBBI, false, Dups) &&
- MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
+ if (ValidTriangle(TrueBBI, FalseBBI, false, Dups, Prediction, Confidence) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
+ TrueBBI.ExtraCost2, Prediction, Confidence) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true)) {
// Triangle:
// EBB
@@ -797,15 +859,17 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
Enqueued = true;
}
- if (ValidTriangle(TrueBBI, FalseBBI, true, Dups) &&
- MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
+ if (ValidTriangle(TrueBBI, FalseBBI, true, Dups, Prediction, Confidence) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
+ TrueBBI.ExtraCost2, Prediction, Confidence) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleRev, TNeedSub, Dups));
Enqueued = true;
}
- if (ValidSimple(TrueBBI, Dups) &&
- MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
+ if (ValidSimple(TrueBBI, Dups, Prediction, Confidence) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
+ TrueBBI.ExtraCost2, Prediction, Confidence) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond)) {
// Simple (split, no rejoin):
// EBB
@@ -820,22 +884,30 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
if (CanRevCond) {
// Try the other path...
- if (ValidTriangle(FalseBBI, TrueBBI, false, Dups) &&
- MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
+ if (ValidTriangle(FalseBBI, TrueBBI, false, Dups,
+ 1.0-Prediction, Confidence) &&
+ MeetIfcvtSizeLimit(*FalseBBI.BB,
+ FalseBBI.NonPredSize + FalseBBI.ExtraCost,
+ FalseBBI.ExtraCost2, 1.0-Prediction, Confidence) &&
FeasibilityAnalysis(FalseBBI, RevCond, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFalse, FNeedSub, Dups));
Enqueued = true;
}
- if (ValidTriangle(FalseBBI, TrueBBI, true, Dups) &&
- MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
+ if (ValidTriangle(FalseBBI, TrueBBI, true, Dups,
+ 1.0-Prediction, Confidence) &&
+ MeetIfcvtSizeLimit(*FalseBBI.BB,
+ FalseBBI.NonPredSize + FalseBBI.ExtraCost,
+ FalseBBI.ExtraCost2, 1.0-Prediction, Confidence) &&
FeasibilityAnalysis(FalseBBI, RevCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFRev, FNeedSub, Dups));
Enqueued = true;
}
- if (ValidSimple(FalseBBI, Dups) &&
- MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
+ if (ValidSimple(FalseBBI, Dups, 1.0-Prediction, Confidence) &&
+ MeetIfcvtSizeLimit(*FalseBBI.BB,
+ FalseBBI.NonPredSize + FalseBBI.ExtraCost,
+ FalseBBI.ExtraCost2, 1.0-Prediction, Confidence) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
Tokens.push_back(new IfcvtToken(BBI, ICSimpleFalse, FNeedSub, Dups));
Enqueued = true;
@@ -1365,6 +1437,11 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
MachineInstr *MI = MF.CloneMachineInstr(I);
ToBBI.BB->insert(ToBBI.BB->end(), MI);
ToBBI.NonPredSize++;
+ unsigned ExtraPredCost = 0;
+ unsigned NumCycles = TII->getInstrLatency(InstrItins, &*I, &ExtraPredCost);
+ if (NumCycles > 1)
+ ToBBI.ExtraCost += NumCycles-1;
+ ToBBI.ExtraCost2 += ExtraPredCost;
if (!TII->isPredicated(I) && !MI->isDebugValue()) {
if (!TII->PredicateInstruction(MI, Cond)) {
@@ -1438,7 +1515,11 @@ void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges) {
FromBBI.Predicate.clear();
ToBBI.NonPredSize += FromBBI.NonPredSize;
+ ToBBI.ExtraCost += FromBBI.ExtraCost;
+ ToBBI.ExtraCost2 += FromBBI.ExtraCost2;
FromBBI.NonPredSize = 0;
+ FromBBI.ExtraCost = 0;
+ FromBBI.ExtraCost2 = 0;
ToBBI.ClobbersPred |= FromBBI.ClobbersPred;
ToBBI.HasFallThrough = FromBBI.HasFallThrough;
diff --git a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
index b965bfd..a1bd972 100644
--- a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -12,28 +12,34 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "spiller"
+#define DEBUG_TYPE "regalloc"
#include "Spiller.h"
-#include "SplitKit.h"
+#include "LiveRangeEdit.h"
#include "VirtRegMap.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+static cl::opt<bool>
+VerifySpills("verify-spills", cl::desc("Verify after each spill/split"));
+
namespace {
class InlineSpiller : public Spiller {
MachineFunctionPass &pass_;
MachineFunction &mf_;
LiveIntervals &lis_;
- MachineLoopInfo &loops_;
+ LiveStacks &lss_;
+ AliasAnalysis *aa_;
VirtRegMap &vrm_;
MachineFrameInfo &mfi_;
MachineRegisterInfo &mri_;
@@ -41,19 +47,12 @@ class InlineSpiller : public Spiller {
const TargetRegisterInfo &tri_;
const BitVector reserved_;
- SplitAnalysis splitAnalysis_;
-
// Variables that are valid during spill(), but used by multiple methods.
- LiveInterval *li_;
- SmallVectorImpl<LiveInterval*> *newIntervals_;
+ LiveRangeEdit *edit_;
const TargetRegisterClass *rc_;
int stackSlot_;
- const SmallVectorImpl<LiveInterval*> *spillIs_;
- // Values of the current interval that can potentially remat.
- SmallPtrSet<VNInfo*, 8> reMattable_;
-
- // Values in reMattable_ that failed to remat at some point.
+ // Values that failed to remat at some point.
SmallPtrSet<VNInfo*, 8> usedValues_;
~InlineSpiller() {}
@@ -65,30 +64,29 @@ public:
: pass_(pass),
mf_(mf),
lis_(pass.getAnalysis<LiveIntervals>()),
- loops_(pass.getAnalysis<MachineLoopInfo>()),
+ lss_(pass.getAnalysis<LiveStacks>()),
+ aa_(&pass.getAnalysis<AliasAnalysis>()),
vrm_(vrm),
mfi_(*mf.getFrameInfo()),
mri_(mf.getRegInfo()),
tii_(*mf.getTarget().getInstrInfo()),
tri_(*mf.getTarget().getRegisterInfo()),
- reserved_(tri_.getReservedRegs(mf_)),
- splitAnalysis_(mf, lis_, loops_) {}
+ reserved_(tri_.getReservedRegs(mf_)) {}
void spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
- SmallVectorImpl<LiveInterval*> &spillIs);
+ const SmallVectorImpl<LiveInterval*> &spillIs);
-private:
- bool split();
+ void spill(LiveRangeEdit &);
- bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
- SlotIndex UseIdx);
+private:
bool reMaterializeFor(MachineBasicBlock::iterator MI);
void reMaterializeAll();
bool coalesceStackAccess(MachineInstr *MI);
bool foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops);
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr *LoadMI = 0);
void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
};
@@ -98,106 +96,41 @@ namespace llvm {
Spiller *createInlineSpiller(MachineFunctionPass &pass,
MachineFunction &mf,
VirtRegMap &vrm) {
+ if (VerifySpills)
+ mf.verify(&pass, "When creating inline spiller");
return new InlineSpiller(pass, mf, vrm);
}
}
-/// split - try splitting the current interval into pieces that may allocate
-/// separately. Return true if successful.
-bool InlineSpiller::split() {
- splitAnalysis_.analyze(li_);
-
- if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) {
- // We can split, but li_ may be left intact with fewer uses.
- if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_)
- .splitAroundLoop(loop))
- return true;
- }
-
- // Try splitting into single block intervals.
- SplitAnalysis::BlockPtrSet blocks;
- if (splitAnalysis_.getMultiUseBlocks(blocks)) {
- if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_)
- .splitSingleBlocks(blocks))
- return true;
- }
-
- // Try splitting inside a basic block.
- if (const MachineBasicBlock *MBB = splitAnalysis_.getBlockForInsideSplit()) {
- if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_)
- .splitInsideBlock(MBB))
- return true;
- }
-
- // We may have been able to split out some uses, but the original interval is
- // intact, and it should still be spilled.
- return false;
-}
-
-/// allUsesAvailableAt - Return true if all registers used by OrigMI at
-/// OrigIdx are also available with the same value at UseIdx.
-bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI,
- SlotIndex OrigIdx,
- SlotIndex UseIdx) {
- OrigIdx = OrigIdx.getUseIndex();
- UseIdx = UseIdx.getUseIndex();
- for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = OrigMI->getOperand(i);
- if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
- continue;
- // Reserved registers are OK.
- if (MO.isUndef() || !lis_.hasInterval(MO.getReg()))
- continue;
- // We don't want to move any defs.
- if (MO.isDef())
- return false;
- // We cannot depend on virtual registers in spillIs_. They will be spilled.
- for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
- if ((*spillIs_)[si]->reg == MO.getReg())
- return false;
-
- LiveInterval &LI = lis_.getInterval(MO.getReg());
- const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx);
- if (!OVNI)
- continue;
- if (OVNI != LI.getVNInfoAt(UseIdx))
- return false;
- }
- return true;
-}
-
-/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
+/// reMaterializeFor - Attempt to rematerialize edit_->getReg() before MI instead of
/// reloading it.
bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
- VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
+ VNInfo *OrigVNI = edit_->getParent().getVNInfoAt(UseIdx);
+
if (!OrigVNI) {
DEBUG(dbgs() << "\tadding <undef> flags: ");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
+ if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg())
MO.setIsUndef();
}
DEBUG(dbgs() << UseIdx << '\t' << *MI);
return true;
}
- if (!reMattable_.count(OrigVNI)) {
- DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
- << UseIdx << '\t' << *MI);
- return false;
- }
- MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
- if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
+
+ LiveRangeEdit::Remat RM(OrigVNI);
+ if (!edit_->canRematerializeAt(RM, UseIdx, false, lis_)) {
usedValues_.insert(OrigVNI);
DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
return false;
}
- // If the instruction also writes li_->reg, it had better not require the same
- // register for uses and defs.
+ // If the instruction also writes edit_->getReg(), it had better not require
+ // the same register for uses and defs.
bool Reads, Writes;
SmallVector<unsigned, 8> Ops;
- tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
+ tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit_->getReg(), &Ops);
if (Writes) {
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(Ops[i]);
@@ -209,62 +142,57 @@ bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
}
}
+ // Before rematerializing into a register for a single instruction, try to
+ // fold a load into the instruction. That avoids allocating a new register.
+ if (RM.OrigMI->getDesc().canFoldAsLoad() &&
+ foldMemoryOperand(MI, Ops, RM.OrigMI)) {
+ edit_->markRematerialized(RM.ParentVNI);
+ return true;
+ }
+
// Alocate a new register for the remat.
- unsigned NewVReg = mri_.createVirtualRegister(rc_);
- vrm_.grow();
- LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
+ LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_);
NewLI.markNotSpillable();
- newIntervals_->push_back(&NewLI);
+
+ // Rematting for a copy: Set allocation hint to be the destination register.
+ if (MI->isCopy())
+ mri_.setRegAllocationHint(NewLI.reg, 0, MI->getOperand(0).getReg());
// Finally we can rematerialize OrigMI before MI.
- MachineBasicBlock &MBB = *MI->getParent();
- tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
- MachineBasicBlock::iterator RematMI = MI;
- SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
- DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI);
+ SlotIndex DefIdx = edit_->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM,
+ lis_, tii_, tri_);
+ DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
+ << *lis_.getInstructionFromIndex(DefIdx));
// Replace operands
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(Ops[i]);
- if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
- MO.setReg(NewVReg);
+ if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) {
+ MO.setReg(NewLI.reg);
MO.setIsKill();
}
}
DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
- VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
- lis_.getVNInfoAllocator());
+ VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, lis_.getVNInfoAllocator());
NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
return true;
}
-/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
+/// reMaterializeAll - Try to rematerialize as many uses as possible,
/// and trim the live ranges after.
void InlineSpiller::reMaterializeAll() {
// Do a quick scan of the interval values to find if any are remattable.
- reMattable_.clear();
- usedValues_.clear();
- for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
- E = li_->vni_end(); I != E; ++I) {
- VNInfo *VNI = *I;
- if (VNI->isUnused() || !VNI->isDefAccurate())
- continue;
- MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
- if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
- continue;
- reMattable_.insert(VNI);
- }
-
- // Often, no defs are remattable.
- if (reMattable_.empty())
+ if (!edit_->anyRematerializable(lis_, tii_, aa_))
return;
- // Try to remat before all uses of li_->reg.
+ usedValues_.clear();
+
+ // Try to remat before all uses of edit_->getReg().
bool anyRemat = false;
for (MachineRegisterInfo::use_nodbg_iterator
- RI = mri_.use_nodbg_begin(li_->reg);
+ RI = mri_.use_nodbg_begin(edit_->getReg());
MachineInstr *MI = RI.skipInstruction();)
anyRemat |= reMaterializeFor(MI);
@@ -273,33 +201,35 @@ void InlineSpiller::reMaterializeAll() {
// Remove any values that were completely rematted.
bool anyRemoved = false;
- for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
- E = reMattable_.end(); I != E; ++I) {
+ for (LiveInterval::vni_iterator I = edit_->getParent().vni_begin(),
+ E = edit_->getParent().vni_end(); I != E; ++I) {
VNInfo *VNI = *I;
- if (VNI->hasPHIKill() || usedValues_.count(VNI))
+ if (VNI->hasPHIKill() || !edit_->didRematerialize(VNI) ||
+ usedValues_.count(VNI))
continue;
MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
lis_.RemoveMachineInstrFromMaps(DefMI);
vrm_.RemoveMachineInstrFromMaps(DefMI);
DefMI->eraseFromParent();
- VNI->setIsDefAccurate(false);
+ VNI->def = SlotIndex();
anyRemoved = true;
}
if (!anyRemoved)
return;
- // Removing values may cause debug uses where li_ is not live.
- for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg);
+ // Removing values may cause debug uses where parent is not live.
+ for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(edit_->getReg());
MachineInstr *MI = RI.skipInstruction();) {
if (!MI->isDebugValue())
continue;
- // Try to preserve the debug value if li_ is live immediately after it.
+ // Try to preserve the debug value if parent is live immediately after it.
MachineBasicBlock::iterator NextMI = MI;
++NextMI;
if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
- VNInfo *VNI = li_->getVNInfoAt(lis_.getInstructionIndex(NextMI));
+ SlotIndex Idx = lis_.getInstructionIndex(NextMI);
+ VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx);
if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI)))
continue;
}
@@ -317,7 +247,7 @@ bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
return false;
// We have a stack access. Is it the right register and slot?
- if (reg != li_->reg || FI != stackSlot_)
+ if (reg != edit_->getReg() || FI != stackSlot_)
return false;
DEBUG(dbgs() << "Coalescing stack access: " << *MI);
@@ -327,9 +257,13 @@ bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
}
/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
-/// Return true on success, and MI will be erased.
+/// @param MI Instruction using or defining the current register.
+/// @param Ops Operand indices from readsWritesVirtualRegister().
+/// @param LoadMI Load instruction to use instead of stack slot when non-null.
+/// @return True on success, and MI will be erased.
bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops) {
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr *LoadMI) {
// TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
// operands.
SmallVector<unsigned, 8> FoldOps;
@@ -341,16 +275,22 @@ bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
// FIXME: Teach targets to deal with subregs.
if (MO.getSubReg())
return false;
+ // We cannot fold a load instruction into a def.
+ if (LoadMI && MO.isDef())
+ return false;
// Tied use operands should not be passed to foldMemoryOperand.
if (!MI->isRegTiedToDefOperand(Idx))
FoldOps.push_back(Idx);
}
- MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
+ MachineInstr *FoldMI =
+ LoadMI ? tii_.foldMemoryOperand(MI, FoldOps, LoadMI)
+ : tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
if (!FoldMI)
return false;
lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
- vrm_.addSpillSlotUse(stackSlot_, FoldMI);
+ if (!LoadMI)
+ vrm_.addSpillSlotUse(stackSlot_, FoldMI);
MI->eraseFromParent();
DEBUG(dbgs() << "\tfolded: " << *FoldMI);
return true;
@@ -366,7 +306,7 @@ void InlineSpiller::insertReload(LiveInterval &NewLI,
SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
vrm_.addSpillSlotUse(stackSlot_, MI);
DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
- VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
+ VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0,
lis_.getVNInfoAllocator());
NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
}
@@ -375,44 +315,58 @@ void InlineSpiller::insertReload(LiveInterval &NewLI,
void InlineSpiller::insertSpill(LiveInterval &NewLI,
MachineBasicBlock::iterator MI) {
MachineBasicBlock &MBB = *MI->getParent();
+
+ // Get the defined value. It could be an early clobber so keep the def index.
SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
+ VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx);
+ assert(VNI && VNI->def.getDefIndex() == Idx && "Inconsistent VNInfo");
+ Idx = VNI->def;
+
tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
--MI; // Point to store instruction.
SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
vrm_.addSpillSlotUse(stackSlot_, MI);
DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
- VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
- lis_.getVNInfoAllocator());
+ VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, lis_.getVNInfoAllocator());
NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
}
void InlineSpiller::spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
- SmallVectorImpl<LiveInterval*> &spillIs) {
- DEBUG(dbgs() << "Inline spilling " << *li << "\n");
- assert(li->isSpillable() && "Attempting to spill already spilled value.");
- assert(!li->isStackSlot() && "Trying to spill a stack slot.");
-
- li_ = li;
- newIntervals_ = &newIntervals;
- rc_ = mri_.getRegClass(li->reg);
- spillIs_ = &spillIs;
+ const SmallVectorImpl<LiveInterval*> &spillIs) {
+ LiveRangeEdit edit(*li, newIntervals, spillIs);
+ spill(edit);
+ if (VerifySpills)
+ mf_.verify(&pass_, "After inline spill");
+}
- if (split())
- return;
+void InlineSpiller::spill(LiveRangeEdit &edit) {
+ edit_ = &edit;
+ assert(!TargetRegisterInfo::isStackSlot(edit.getReg())
+ && "Trying to spill a stack slot.");
+ DEBUG(dbgs() << "Inline spilling "
+ << mri_.getRegClass(edit.getReg())->getName()
+ << ':' << edit.getParent() << "\n");
+ assert(edit.getParent().isSpillable() &&
+ "Attempting to spill already spilled value.");
reMaterializeAll();
// Remat may handle everything.
- if (li_->empty())
+ if (edit_->getParent().empty())
return;
- stackSlot_ = vrm_.getStackSlot(li->reg);
- if (stackSlot_ == VirtRegMap::NO_STACK_SLOT)
- stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
+ rc_ = mri_.getRegClass(edit.getReg());
+ stackSlot_ = vrm_.assignVirt2StackSlot(edit_->getReg());
+
+ // Update LiveStacks now that we are committed to spilling.
+ LiveInterval &stacklvr = lss_.getOrCreateInterval(stackSlot_, rc_);
+ assert(stacklvr.empty() && "Just created stack slot not empty");
+ stacklvr.getNextValue(SlotIndex(), 0, lss_.getVNInfoAllocator());
+ stacklvr.MergeRangesInAsValue(edit_->getParent(), stacklvr.getValNumInfo(0));
// Iterate over instructions using register.
- for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
+ for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(edit.getReg());
MachineInstr *MI = RI.skipInstruction();) {
// Debug values are not allowed to affect codegen.
@@ -440,7 +394,7 @@ void InlineSpiller::spill(LiveInterval *li,
// Analyze instruction.
bool Reads, Writes;
SmallVector<unsigned, 8> Ops;
- tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
+ tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit.getReg(), &Ops);
// Attempt to fold memory ops.
if (foldMemoryOperand(MI, Ops))
@@ -448,9 +402,7 @@ void InlineSpiller::spill(LiveInterval *li,
// Allocate interval around instruction.
// FIXME: Infer regclass from instruction alone.
- unsigned NewVReg = mri_.createVirtualRegister(rc_);
- vrm_.grow();
- LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
+ LiveInterval &NewLI = edit.create(mri_, lis_, vrm_);
NewLI.markNotSpillable();
if (Reads)
@@ -460,7 +412,7 @@ void InlineSpiller::spill(LiveInterval *li,
bool hasLiveDef = false;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(Ops[i]);
- MO.setReg(NewVReg);
+ MO.setReg(NewLI.reg);
if (MO.isUse()) {
if (!MI->isRegTiedToDefOperand(Ops[i]))
MO.setIsKill();
@@ -475,6 +427,5 @@ void InlineSpiller::spill(LiveInterval *li,
insertSpill(NewLI, MI);
DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
- newIntervals.push_back(&NewLI);
}
}
diff --git a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 3852eba..3861dda 100644
--- a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -85,9 +85,11 @@ static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
}
// VisualStudio defines setjmp as _setjmp
-#if defined(_MSC_VER) && defined(setjmp)
-#define setjmp_undefined_for_visual_studio
-#undef setjmp
+#if defined(_MSC_VER) && defined(setjmp) && \
+ !defined(setjmp_undefined_for_msvc)
+# pragma push_macro("setjmp")
+# undef setjmp
+# define setjmp_undefined_for_msvc
#endif
void IntrinsicLowering::AddPrototypes(Module &M) {
@@ -536,3 +538,27 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
"Lowering should have eliminated any uses of the intrinsic call!");
CI->eraseFromParent();
}
+
+bool IntrinsicLowering::LowerToByteSwap(CallInst *CI) {
+ // Verify this is a simple bswap.
+ if (CI->getNumArgOperands() != 1 ||
+ CI->getType() != CI->getArgOperand(0)->getType() ||
+ !CI->getType()->isIntegerTy())
+ return false;
+
+ const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ if (!Ty)
+ return false;
+
+ // Okay, we can do this xform, do so now.
+ const Type *Tys[] = { Ty };
+ Module *M = CI->getParent()->getParent()->getParent();
+ Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
+
+ Value *Op = CI->getArgOperand(0);
+ Op = CallInst::Create(Int, Op, CI->getName(), CI);
+
+ CI->replaceAllUsesWith(Op);
+ CI->eraseFromParent();
+ return true;
+}
diff --git a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
index 3603802..80dfc76 100644
--- a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
@@ -20,9 +20,11 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h"
+#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/Transforms/Scalar.h"
@@ -30,6 +32,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/StandardPasses.h"
using namespace llvm;
namespace llvm {
@@ -140,13 +143,19 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
// Create a code emitter if asked to show the encoding.
MCCodeEmitter *MCE = 0;
- if (ShowMCEncoding)
+ TargetAsmBackend *TAB = 0;
+ if (ShowMCEncoding) {
MCE = getTarget().createCodeEmitter(*this, *Context);
-
- AsmStreamer.reset(createAsmStreamer(*Context, Out,
- getTargetData()->isLittleEndian(),
- getVerboseAsm(), InstPrinter,
- MCE, ShowMCInst));
+ TAB = getTarget().createAsmBackend(TargetTriple);
+ }
+
+ MCStreamer *S = getTarget().createAsmStreamer(*Context, Out,
+ getVerboseAsm(),
+ hasMCUseLoc(),
+ InstPrinter,
+ MCE, TAB,
+ ShowMCInst);
+ AsmStreamer.reset(S);
break;
}
case CGFT_ObjectFile: {
@@ -159,7 +168,9 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
AsmStreamer.reset(getTarget().createObjectStreamer(TargetTriple, *Context,
*TAB, Out, MCE,
- hasMCRelaxAll()));
+ hasMCRelaxAll(),
+ hasMCNoExecStack()));
+ AsmStreamer.get()->InitSections();
break;
}
case CGFT_Null:
@@ -241,7 +252,7 @@ static void printAndVerify(PassManagerBase &PM,
PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
if (VerifyMachineCode)
- PM.add(createMachineVerifierPass());
+ PM.add(createMachineVerifierPass(Banner));
}
/// addCommonCodeGenPasses - Add standard LLVM codegen passes used for both
@@ -253,6 +264,9 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
MCContext *&OutContext) {
// Standard LLVM-Level Passes.
+ // Basic AliasAnalysis support.
+ createStandardAliasAnalysisPasses(&PM);
+
// Before running any passes, run the verifier to determine if the input
// coming from the front-end and/or optimizer is valid.
if (!DisableVerify)
@@ -288,7 +302,8 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
// edge from elsewhere.
PM.add(createSjLjEHPass(getTargetLowering()));
// FALLTHROUGH
- case ExceptionHandling::Dwarf:
+ case ExceptionHandling::DwarfCFI:
+ case ExceptionHandling::DwarfTable:
PM.add(createDwarfEHPass(this));
break;
case ExceptionHandling::None:
@@ -320,7 +335,8 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
// Install a MachineModuleInfo class, which is an immutable pass that holds
// all the per-module stuff we're generating, including MCContext.
- MachineModuleInfo *MMI = new MachineModuleInfo(*getMCAsmInfo());
+ TargetAsmInfo *TAI = new TargetAsmInfo(*this);
+ MachineModuleInfo *MMI = new MachineModuleInfo(*getMCAsmInfo(), TAI);
PM.add(MMI);
OutContext = &MMI->getContext(); // Return the MCContext specifically by-ref.
@@ -339,6 +355,9 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
// Print the instruction selected machine code...
printAndVerify(PM, "After Instruction Selection");
+ // Expand pseudo-instructions emitted by ISel.
+ PM.add(createExpandISelPseudosPass());
+
// Optimize PHIs before DCE: removing dead PHI cycles may make more
// instructions dead.
if (OptLevel != CodeGenOpt::None)
@@ -356,13 +375,15 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
PM.add(createDeadMachineInstructionElimPass());
printAndVerify(PM, "After codegen DCE pass");
- PM.add(createPeepholeOptimizerPass());
if (!DisableMachineLICM)
PM.add(createMachineLICMPass());
PM.add(createMachineCSEPass());
if (!DisableMachineSink)
PM.add(createMachineSinkingPass());
printAndVerify(PM, "After Machine LICM, CSE and Sinking passes");
+
+ PM.add(createPeepholeOptimizerPass());
+ printAndVerify(PM, "After codegen peephole optimization pass");
}
// Pre-ra tail duplication.
diff --git a/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp b/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
index b9527fa..0eb009d 100644
--- a/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
+++ b/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
@@ -16,6 +16,7 @@
#define DEBUG_TYPE "scheduler"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
@@ -35,14 +36,14 @@ bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
unsigned RHSLatency = PQ->getLatency(RHSNum);
if (LHSLatency < RHSLatency) return true;
if (LHSLatency > RHSLatency) return false;
-
+
// After that, if two nodes have identical latencies, look to see if one will
// unblock more other nodes than the other.
unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
if (LHSBlocked < RHSBlocked) return true;
if (LHSBlocked > RHSBlocked) return false;
-
+
// Finally, just to provide a stable ordering, use the node number as a
// deciding factor.
return LHSNum < RHSNum;
@@ -64,7 +65,7 @@ SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
OnlyAvailablePred = &Pred;
}
}
-
+
return OnlyAvailablePred;
}
@@ -78,7 +79,7 @@ void LatencyPriorityQueue::push(SUnit *SU) {
++NumNodesBlocking;
}
NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
-
+
Queue.push_back(SU);
}
@@ -102,10 +103,10 @@ void LatencyPriorityQueue::ScheduledNode(SUnit *SU) {
/// node of the same priority that will not make a node available.
void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
if (SU->isAvailable) return; // All preds scheduled.
-
+
SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable) return;
-
+
// Okay, we found a single predecessor that is available, but not scheduled.
// Since it is available, it must be in the priority queue. First remove it.
remove(OnlyAvailablePred);
@@ -136,3 +137,16 @@ void LatencyPriorityQueue::remove(SUnit *SU) {
std::swap(*I, Queue.back());
Queue.pop_back();
}
+
+#ifdef NDEBUG
+void LatencyPriorityQueue::dump(ScheduleDAG *DAG) const {}
+#else
+void LatencyPriorityQueue::dump(ScheduleDAG *DAG) const {
+ LatencyPriorityQueue q = *this;
+ while (!q.empty()) {
+ SUnit *su = q.pop();
+ dbgs() << "Height " << su->getHeight() << ": ";
+ su->dump(DAG);
+ }
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
new file mode 100644
index 0000000..853ec1a
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
@@ -0,0 +1,711 @@
+//===- LiveDebugVariables.cpp - Tracking debug info variables -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveDebugVariables analysis.
+//
+// Remove all DBG_VALUE instructions referencing virtual registers and replace
+// them with a data structure tracking where live user variables are kept - in a
+// virtual register or in a stack slot.
+//
+// Allow the data structure to be updated during register allocation when values
+// are moved between registers and stack slots. Finally emit new DBG_VALUE
+// instructions after register allocation is complete.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "livedebug"
+#include "LiveDebugVariables.h"
+#include "VirtRegMap.h"
+#include "llvm/Constants.h"
+#include "llvm/Metadata.h"
+#include "llvm/Value.h"
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+static cl::opt<bool>
+EnableLDV("live-debug-variables", cl::init(true),
+ cl::desc("Enable the live debug variables pass"), cl::Hidden);
+
+char LiveDebugVariables::ID = 0;
+
+INITIALIZE_PASS_BEGIN(LiveDebugVariables, "livedebugvars",
+ "Debug Variable Analysis", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_END(LiveDebugVariables, "livedebugvars",
+ "Debug Variable Analysis", false, false)
+
+void LiveDebugVariables::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequiredTransitive<LiveIntervals>();
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+LiveDebugVariables::LiveDebugVariables() : MachineFunctionPass(ID), pImpl(0) {
+ initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
+}
+
+/// LocMap - Map of where a user value is live, and its location.
+typedef IntervalMap<SlotIndex, unsigned, 4> LocMap;
+
+/// UserValue - A user value is a part of a debug info user variable.
+///
+/// A DBG_VALUE instruction notes that (a sub-register of) a virtual register
+/// holds part of a user variable. The part is identified by a byte offset.
+///
+/// UserValues are grouped into equivalence classes for easier searching. Two
+/// user values are related if they refer to the same variable, or if they are
+/// held by the same virtual register. The equivalence class is the transitive
+/// closure of that relation.
+namespace {
+class UserValue {
+ const MDNode *variable; ///< The debug info variable we are part of.
+ unsigned offset; ///< Byte offset into variable.
+ DebugLoc dl; ///< The debug location for the variable. This is
+ ///< used by dwarf writer to find lexical scope.
+ UserValue *leader; ///< Equivalence class leader.
+ UserValue *next; ///< Next value in equivalence class, or null.
+
+ /// Numbered locations referenced by locmap.
+ SmallVector<MachineOperand, 4> locations;
+
+ /// Map of slot indices where this value is live.
+ LocMap locInts;
+
+ /// coalesceLocation - After LocNo was changed, check if it has become
+ /// identical to another location, and coalesce them. This may cause LocNo or
+ /// a later location to be erased, but no earlier location will be erased.
+ void coalesceLocation(unsigned LocNo);
+
+ /// insertDebugValue - Insert a DBG_VALUE into MBB at Idx for LocNo.
+ void insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx, unsigned LocNo,
+ LiveIntervals &LIS, const TargetInstrInfo &TII);
+
+ /// insertDebugKill - Insert an undef DBG_VALUE into MBB at Idx.
+ void insertDebugKill(MachineBasicBlock *MBB, SlotIndex Idx,
+ LiveIntervals &LIS, const TargetInstrInfo &TII);
+
+public:
+ /// UserValue - Create a new UserValue.
+ UserValue(const MDNode *var, unsigned o, DebugLoc L,
+ LocMap::Allocator &alloc)
+ : variable(var), offset(o), dl(L), leader(this), next(0), locInts(alloc)
+ {}
+
+ /// getLeader - Get the leader of this value's equivalence class.
+ UserValue *getLeader() {
+ UserValue *l = leader;
+ while (l != l->leader)
+ l = l->leader;
+ return leader = l;
+ }
+
+ /// getNext - Return the next UserValue in the equivalence class.
+ UserValue *getNext() const { return next; }
+
+ /// match - Does this UserValue match the aprameters?
+ bool match(const MDNode *Var, unsigned Offset) const {
+ return Var == variable && Offset == offset;
+ }
+
+ /// merge - Merge equivalence classes.
+ static UserValue *merge(UserValue *L1, UserValue *L2) {
+ L2 = L2->getLeader();
+ if (!L1)
+ return L2;
+ L1 = L1->getLeader();
+ if (L1 == L2)
+ return L1;
+ // Splice L2 before L1's members.
+ UserValue *End = L2;
+ while (End->next)
+ End->leader = L1, End = End->next;
+ End->leader = L1;
+ End->next = L1->next;
+ L1->next = L2;
+ return L1;
+ }
+
+ /// getLocationNo - Return the location number that matches Loc.
+ unsigned getLocationNo(const MachineOperand &LocMO) {
+ if (LocMO.isReg() && LocMO.getReg() == 0)
+ return ~0u;
+ for (unsigned i = 0, e = locations.size(); i != e; ++i)
+ if (LocMO.isIdenticalTo(locations[i]))
+ return i;
+ locations.push_back(LocMO);
+ // We are storing a MachineOperand outside a MachineInstr.
+ locations.back().clearParent();
+ return locations.size() - 1;
+ }
+
+ /// addDef - Add a definition point to this value.
+ void addDef(SlotIndex Idx, const MachineOperand &LocMO) {
+ // Add a singular (Idx,Idx) -> Loc mapping.
+ LocMap::iterator I = locInts.find(Idx);
+ if (!I.valid() || I.start() != Idx)
+ I.insert(Idx, Idx.getNextSlot(), getLocationNo(LocMO));
+ }
+
+ /// extendDef - Extend the current definition as far as possible down the
+ /// dominator tree. Stop when meeting an existing def or when leaving the live
+ /// range of VNI.
+ /// @param Idx Starting point for the definition.
+ /// @param LocNo Location number to propagate.
+ /// @param LI Restrict liveness to where LI has the value VNI. May be null.
+ /// @param VNI When LI is not null, this is the value to restrict to.
+ /// @param LIS Live intervals analysis.
+ /// @param MDT Dominator tree.
+ void extendDef(SlotIndex Idx, unsigned LocNo,
+ LiveInterval *LI, const VNInfo *VNI,
+ LiveIntervals &LIS, MachineDominatorTree &MDT);
+
+ /// computeIntervals - Compute the live intervals of all locations after
+ /// collecting all their def points.
+ void computeIntervals(LiveIntervals &LIS, MachineDominatorTree &MDT);
+
+ /// renameRegister - Update locations to rewrite OldReg as NewReg:SubIdx.
+ void renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx,
+ const TargetRegisterInfo *TRI);
+
+ /// rewriteLocations - Rewrite virtual register locations according to the
+ /// provided virtual register map.
+ void rewriteLocations(VirtRegMap &VRM, const TargetRegisterInfo &TRI);
+
+ /// emitDebugVariables - Recreate DBG_VALUE instruction from data structures.
+ void emitDebugValues(VirtRegMap *VRM,
+ LiveIntervals &LIS, const TargetInstrInfo &TRI);
+
+ /// findDebugLoc - Return DebugLoc used for this DBG_VALUE instruction. A
+ /// variable may have more than one corresponding DBG_VALUE instructions.
+ /// Only first one needs DebugLoc to identify variable's lexical scope
+ /// in source file.
+ DebugLoc findDebugLoc();
+ void print(raw_ostream&, const TargetRegisterInfo*);
+};
+} // namespace
+
+/// LDVImpl - Implementation of the LiveDebugVariables pass.
+namespace {
+class LDVImpl {
+ LiveDebugVariables &pass;
+ LocMap::Allocator allocator;
+ MachineFunction *MF;
+ LiveIntervals *LIS;
+ MachineDominatorTree *MDT;
+ const TargetRegisterInfo *TRI;
+
+ /// userValues - All allocated UserValue instances.
+ SmallVector<UserValue*, 8> userValues;
+
+ /// Map virtual register to eq class leader.
+ typedef DenseMap<unsigned, UserValue*> VRMap;
+ VRMap virtRegToEqClass;
+
+ /// Map user variable to eq class leader.
+ typedef DenseMap<const MDNode *, UserValue*> UVMap;
+ UVMap userVarMap;
+
+ /// getUserValue - Find or create a UserValue.
+ UserValue *getUserValue(const MDNode *Var, unsigned Offset, DebugLoc DL);
+
+ /// lookupVirtReg - Find the EC leader for VirtReg or null.
+ UserValue *lookupVirtReg(unsigned VirtReg);
+
+ /// mapVirtReg - Map virtual register to an equivalence class.
+ void mapVirtReg(unsigned VirtReg, UserValue *EC);
+
+ /// handleDebugValue - Add DBG_VALUE instruction to our maps.
+ /// @param MI DBG_VALUE instruction
+ /// @param Idx Last valid SLotIndex before instruction.
+ /// @return True if the DBG_VALUE instruction should be deleted.
+ bool handleDebugValue(MachineInstr *MI, SlotIndex Idx);
+
+ /// collectDebugValues - Collect and erase all DBG_VALUE instructions, adding
+ /// a UserValue def for each instruction.
+ /// @param mf MachineFunction to be scanned.
+ /// @return True if any debug values were found.
+ bool collectDebugValues(MachineFunction &mf);
+
+ /// computeIntervals - Compute the live intervals of all user values after
+ /// collecting all their def points.
+ void computeIntervals();
+
+public:
+ LDVImpl(LiveDebugVariables *ps) : pass(*ps) {}
+ bool runOnMachineFunction(MachineFunction &mf);
+
+ /// clear - Relase all memory.
+ void clear() {
+ DeleteContainerPointers(userValues);
+ userValues.clear();
+ virtRegToEqClass.clear();
+ userVarMap.clear();
+ }
+
+ /// renameRegister - Replace all references to OldReg wiht NewReg:SubIdx.
+ void renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx);
+
+ /// emitDebugVariables - Recreate DBG_VALUE instruction from data structures.
+ void emitDebugValues(VirtRegMap *VRM);
+
+ void print(raw_ostream&);
+};
+} // namespace
+
+void UserValue::print(raw_ostream &OS, const TargetRegisterInfo *TRI) {
+ if (const MDString *MDS = dyn_cast<MDString>(variable->getOperand(2)))
+ OS << "!\"" << MDS->getString() << "\"\t";
+ if (offset)
+ OS << '+' << offset;
+ for (LocMap::const_iterator I = locInts.begin(); I.valid(); ++I) {
+ OS << " [" << I.start() << ';' << I.stop() << "):";
+ if (I.value() == ~0u)
+ OS << "undef";
+ else
+ OS << I.value();
+ }
+ for (unsigned i = 0, e = locations.size(); i != e; ++i)
+ OS << " Loc" << i << '=' << locations[i];
+ OS << '\n';
+}
+
+void LDVImpl::print(raw_ostream &OS) {
+ OS << "********** DEBUG VARIABLES **********\n";
+ for (unsigned i = 0, e = userValues.size(); i != e; ++i)
+ userValues[i]->print(OS, TRI);
+}
+
+void UserValue::coalesceLocation(unsigned LocNo) {
+ unsigned KeepLoc = 0;
+ for (unsigned e = locations.size(); KeepLoc != e; ++KeepLoc) {
+ if (KeepLoc == LocNo)
+ continue;
+ if (locations[KeepLoc].isIdenticalTo(locations[LocNo]))
+ break;
+ }
+ // No matches.
+ if (KeepLoc == locations.size())
+ return;
+
+ // Keep the smaller location, erase the larger one.
+ unsigned EraseLoc = LocNo;
+ if (KeepLoc > EraseLoc)
+ std::swap(KeepLoc, EraseLoc);
+ locations.erase(locations.begin() + EraseLoc);
+
+ // Rewrite values.
+ for (LocMap::iterator I = locInts.begin(); I.valid(); ++I) {
+ unsigned v = I.value();
+ if (v == EraseLoc)
+ I.setValue(KeepLoc); // Coalesce when possible.
+ else if (v > EraseLoc)
+ I.setValueUnchecked(v-1); // Avoid coalescing with untransformed values.
+ }
+}
+
+UserValue *LDVImpl::getUserValue(const MDNode *Var, unsigned Offset,
+ DebugLoc DL) {
+ UserValue *&Leader = userVarMap[Var];
+ if (Leader) {
+ UserValue *UV = Leader->getLeader();
+ Leader = UV;
+ for (; UV; UV = UV->getNext())
+ if (UV->match(Var, Offset))
+ return UV;
+ }
+
+ UserValue *UV = new UserValue(Var, Offset, DL, allocator);
+ userValues.push_back(UV);
+ Leader = UserValue::merge(Leader, UV);
+ return UV;
+}
+
+void LDVImpl::mapVirtReg(unsigned VirtReg, UserValue *EC) {
+ assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Only map VirtRegs");
+ UserValue *&Leader = virtRegToEqClass[VirtReg];
+ Leader = UserValue::merge(Leader, EC);
+}
+
+UserValue *LDVImpl::lookupVirtReg(unsigned VirtReg) {
+ if (UserValue *UV = virtRegToEqClass.lookup(VirtReg))
+ return UV->getLeader();
+ return 0;
+}
+
+bool LDVImpl::handleDebugValue(MachineInstr *MI, SlotIndex Idx) {
+ // DBG_VALUE loc, offset, variable
+ if (MI->getNumOperands() != 3 ||
+ !MI->getOperand(1).isImm() || !MI->getOperand(2).isMetadata()) {
+ DEBUG(dbgs() << "Can't handle " << *MI);
+ return false;
+ }
+
+ // Get or create the UserValue for (variable,offset).
+ unsigned Offset = MI->getOperand(1).getImm();
+ const MDNode *Var = MI->getOperand(2).getMetadata();
+ UserValue *UV = getUserValue(Var, Offset, MI->getDebugLoc());
+
+ // If the location is a virtual register, make sure it is mapped.
+ if (MI->getOperand(0).isReg()) {
+ unsigned Reg = MI->getOperand(0).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ mapVirtReg(Reg, UV);
+ }
+
+ UV->addDef(Idx, MI->getOperand(0));
+ return true;
+}
+
+bool LDVImpl::collectDebugValues(MachineFunction &mf) {
+ bool Changed = false;
+ for (MachineFunction::iterator MFI = mf.begin(), MFE = mf.end(); MFI != MFE;
+ ++MFI) {
+ MachineBasicBlock *MBB = MFI;
+ for (MachineBasicBlock::iterator MBBI = MBB->begin(), MBBE = MBB->end();
+ MBBI != MBBE;) {
+ if (!MBBI->isDebugValue()) {
+ ++MBBI;
+ continue;
+ }
+ // DBG_VALUE has no slot index, use the previous instruction instead.
+ SlotIndex Idx = MBBI == MBB->begin() ?
+ LIS->getMBBStartIdx(MBB) :
+ LIS->getInstructionIndex(llvm::prior(MBBI)).getDefIndex();
+ // Handle consecutive DBG_VALUE instructions with the same slot index.
+ do {
+ if (handleDebugValue(MBBI, Idx)) {
+ MBBI = MBB->erase(MBBI);
+ Changed = true;
+ } else
+ ++MBBI;
+ } while (MBBI != MBBE && MBBI->isDebugValue());
+ }
+ }
+ return Changed;
+}
+
+void UserValue::extendDef(SlotIndex Idx, unsigned LocNo,
+ LiveInterval *LI, const VNInfo *VNI,
+ LiveIntervals &LIS, MachineDominatorTree &MDT) {
+ SmallVector<SlotIndex, 16> Todo;
+ Todo.push_back(Idx);
+
+ do {
+ SlotIndex Start = Todo.pop_back_val();
+ MachineBasicBlock *MBB = LIS.getMBBFromIndex(Start);
+ SlotIndex Stop = LIS.getMBBEndIdx(MBB);
+ LocMap::iterator I = locInts.find(Start);
+
+ // Limit to VNI's live range.
+ bool ToEnd = true;
+ if (LI && VNI) {
+ LiveRange *Range = LI->getLiveRangeContaining(Start);
+ if (!Range || Range->valno != VNI)
+ continue;
+ if (Range->end < Stop)
+ Stop = Range->end, ToEnd = false;
+ }
+
+ // There could already be a short def at Start.
+ if (I.valid() && I.start() <= Start) {
+ // Stop when meeting a different location or an already extended interval.
+ Start = Start.getNextSlot();
+ if (I.value() != LocNo || I.stop() != Start)
+ continue;
+ // This is a one-slot placeholder. Just skip it.
+ ++I;
+ }
+
+ // Limited by the next def.
+ if (I.valid() && I.start() < Stop)
+ Stop = I.start(), ToEnd = false;
+
+ if (Start >= Stop)
+ continue;
+
+ I.insert(Start, Stop, LocNo);
+
+ // If we extended to the MBB end, propagate down the dominator tree.
+ if (!ToEnd)
+ continue;
+ const std::vector<MachineDomTreeNode*> &Children =
+ MDT.getNode(MBB)->getChildren();
+ for (unsigned i = 0, e = Children.size(); i != e; ++i)
+ Todo.push_back(LIS.getMBBStartIdx(Children[i]->getBlock()));
+ } while (!Todo.empty());
+}
+
+void
+UserValue::computeIntervals(LiveIntervals &LIS, MachineDominatorTree &MDT) {
+ SmallVector<std::pair<SlotIndex, unsigned>, 16> Defs;
+
+ // Collect all defs to be extended (Skipping undefs).
+ for (LocMap::const_iterator I = locInts.begin(); I.valid(); ++I)
+ if (I.value() != ~0u)
+ Defs.push_back(std::make_pair(I.start(), I.value()));
+
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
+ SlotIndex Idx = Defs[i].first;
+ unsigned LocNo = Defs[i].second;
+ const MachineOperand &Loc = locations[LocNo];
+
+ // Register locations are constrained to where the register value is live.
+ if (Loc.isReg() && LIS.hasInterval(Loc.getReg())) {
+ LiveInterval *LI = &LIS.getInterval(Loc.getReg());
+ const VNInfo *VNI = LI->getVNInfoAt(Idx);
+ extendDef(Idx, LocNo, LI, VNI, LIS, MDT);
+ } else
+ extendDef(Idx, LocNo, 0, 0, LIS, MDT);
+ }
+
+ // Finally, erase all the undefs.
+ for (LocMap::iterator I = locInts.begin(); I.valid();)
+ if (I.value() == ~0u)
+ I.erase();
+ else
+ ++I;
+}
+
+void LDVImpl::computeIntervals() {
+ for (unsigned i = 0, e = userValues.size(); i != e; ++i)
+ userValues[i]->computeIntervals(*LIS, *MDT);
+}
+
+bool LDVImpl::runOnMachineFunction(MachineFunction &mf) {
+ MF = &mf;
+ LIS = &pass.getAnalysis<LiveIntervals>();
+ MDT = &pass.getAnalysis<MachineDominatorTree>();
+ TRI = mf.getTarget().getRegisterInfo();
+ clear();
+ DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: "
+ << ((Value*)mf.getFunction())->getName()
+ << " **********\n");
+
+ bool Changed = collectDebugValues(mf);
+ computeIntervals();
+ DEBUG(print(dbgs()));
+ return Changed;
+}
+
+bool LiveDebugVariables::runOnMachineFunction(MachineFunction &mf) {
+ if (!EnableLDV)
+ return false;
+ if (!pImpl)
+ pImpl = new LDVImpl(this);
+ return static_cast<LDVImpl*>(pImpl)->runOnMachineFunction(mf);
+}
+
+void LiveDebugVariables::releaseMemory() {
+ if (pImpl)
+ static_cast<LDVImpl*>(pImpl)->clear();
+}
+
+LiveDebugVariables::~LiveDebugVariables() {
+ if (pImpl)
+ delete static_cast<LDVImpl*>(pImpl);
+}
+
+void UserValue::
+renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx,
+ const TargetRegisterInfo *TRI) {
+ for (unsigned i = locations.size(); i; --i) {
+ unsigned LocNo = i - 1;
+ MachineOperand &Loc = locations[LocNo];
+ if (!Loc.isReg() || Loc.getReg() != OldReg)
+ continue;
+ if (TargetRegisterInfo::isPhysicalRegister(NewReg))
+ Loc.substPhysReg(NewReg, *TRI);
+ else
+ Loc.substVirtReg(NewReg, SubIdx, *TRI);
+ coalesceLocation(LocNo);
+ }
+}
+
+void LDVImpl::
+renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx) {
+ UserValue *UV = lookupVirtReg(OldReg);
+ if (!UV)
+ return;
+
+ if (TargetRegisterInfo::isVirtualRegister(NewReg))
+ mapVirtReg(NewReg, UV);
+ virtRegToEqClass.erase(OldReg);
+
+ do {
+ UV->renameRegister(OldReg, NewReg, SubIdx, TRI);
+ UV = UV->getNext();
+ } while (UV);
+}
+
+void LiveDebugVariables::
+renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx) {
+ if (pImpl)
+ static_cast<LDVImpl*>(pImpl)->renameRegister(OldReg, NewReg, SubIdx);
+}
+
+void
+UserValue::rewriteLocations(VirtRegMap &VRM, const TargetRegisterInfo &TRI) {
+ // Iterate over locations in reverse makes it easier to handle coalescing.
+ for (unsigned i = locations.size(); i ; --i) {
+ unsigned LocNo = i-1;
+ MachineOperand &Loc = locations[LocNo];
+ // Only virtual registers are rewritten.
+ if (!Loc.isReg() || !Loc.getReg() ||
+ !TargetRegisterInfo::isVirtualRegister(Loc.getReg()))
+ continue;
+ unsigned VirtReg = Loc.getReg();
+ if (VRM.isAssignedReg(VirtReg) &&
+ TargetRegisterInfo::isPhysicalRegister(VRM.getPhys(VirtReg))) {
+ Loc.substPhysReg(VRM.getPhys(VirtReg), TRI);
+ } else if (VRM.getStackSlot(VirtReg) != VirtRegMap::NO_STACK_SLOT &&
+ VRM.isSpillSlotUsed(VRM.getStackSlot(VirtReg))) {
+ // FIXME: Translate SubIdx to a stackslot offset.
+ Loc = MachineOperand::CreateFI(VRM.getStackSlot(VirtReg));
+ } else {
+ Loc.setReg(0);
+ Loc.setSubReg(0);
+ }
+ coalesceLocation(LocNo);
+ }
+ DEBUG(print(dbgs(), &TRI));
+}
+
+/// findInsertLocation - Find an iterator for inserting a DBG_VALUE
+/// instruction.
+static MachineBasicBlock::iterator
+findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx,
+ LiveIntervals &LIS) {
+ SlotIndex Start = LIS.getMBBStartIdx(MBB);
+ Idx = Idx.getBaseIndex();
+
+ // Try to find an insert location by going backwards from Idx.
+ MachineInstr *MI;
+ while (!(MI = LIS.getInstructionFromIndex(Idx))) {
+ // We've reached the beginning of MBB.
+ if (Idx == Start) {
+ MachineBasicBlock::iterator I = MBB->SkipPHIsAndLabels(MBB->begin());
+ return I;
+ }
+ Idx = Idx.getPrevIndex();
+ }
+
+ // Don't insert anything after the first terminator, though.
+ return MI->getDesc().isTerminator() ? MBB->getFirstTerminator() :
+ llvm::next(MachineBasicBlock::iterator(MI));
+}
+
+DebugLoc UserValue::findDebugLoc() {
+ DebugLoc D = dl;
+ dl = DebugLoc();
+ return D;
+}
+void UserValue::insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx,
+ unsigned LocNo,
+ LiveIntervals &LIS,
+ const TargetInstrInfo &TII) {
+ MachineBasicBlock::iterator I = findInsertLocation(MBB, Idx, LIS);
+ MachineOperand &Loc = locations[LocNo];
+
+ // Frame index locations may require a target callback.
+ if (Loc.isFI()) {
+ MachineInstr *MI = TII.emitFrameIndexDebugValue(*MBB->getParent(),
+ Loc.getIndex(), offset, variable,
+ findDebugLoc());
+ if (MI) {
+ MBB->insert(I, MI);
+ return;
+ }
+ }
+ // This is not a frame index, or the target is happy with a standard FI.
+ BuildMI(*MBB, I, findDebugLoc(), TII.get(TargetOpcode::DBG_VALUE))
+ .addOperand(Loc).addImm(offset).addMetadata(variable);
+}
+
+void UserValue::insertDebugKill(MachineBasicBlock *MBB, SlotIndex Idx,
+ LiveIntervals &LIS, const TargetInstrInfo &TII) {
+ MachineBasicBlock::iterator I = findInsertLocation(MBB, Idx, LIS);
+ BuildMI(*MBB, I, findDebugLoc(), TII.get(TargetOpcode::DBG_VALUE)).addReg(0)
+ .addImm(offset).addMetadata(variable);
+}
+
+void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS,
+ const TargetInstrInfo &TII) {
+ MachineFunction::iterator MFEnd = VRM->getMachineFunction().end();
+
+ for (LocMap::const_iterator I = locInts.begin(); I.valid();) {
+ SlotIndex Start = I.start();
+ SlotIndex Stop = I.stop();
+ unsigned LocNo = I.value();
+ DEBUG(dbgs() << "\t[" << Start << ';' << Stop << "):" << LocNo);
+ MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start);
+ SlotIndex MBBEnd = LIS.getMBBEndIdx(MBB);
+
+ DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd);
+ insertDebugValue(MBB, Start, LocNo, LIS, TII);
+
+ // This interval may span multiple basic blocks.
+ // Insert a DBG_VALUE into each one.
+ while(Stop > MBBEnd) {
+ // Move to the next block.
+ Start = MBBEnd;
+ if (++MBB == MFEnd)
+ break;
+ MBBEnd = LIS.getMBBEndIdx(MBB);
+ DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd);
+ insertDebugValue(MBB, Start, LocNo, LIS, TII);
+ }
+ DEBUG(dbgs() << '\n');
+ if (MBB == MFEnd)
+ break;
+
+ ++I;
+ if (Stop == MBBEnd)
+ continue;
+ // The current interval ends before MBB.
+ // Insert a kill if there is a gap.
+ if (!I.valid() || I.start() > Stop)
+ insertDebugKill(MBB, Stop, LIS, TII);
+ }
+}
+
+void LDVImpl::emitDebugValues(VirtRegMap *VRM) {
+ DEBUG(dbgs() << "********** EMITTING LIVE DEBUG VARIABLES **********\n");
+ const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ for (unsigned i = 0, e = userValues.size(); i != e; ++i) {
+ userValues[i]->rewriteLocations(*VRM, *TRI);
+ userValues[i]->emitDebugValues(VRM, *LIS, *TII);
+ }
+}
+
+void LiveDebugVariables::emitDebugValues(VirtRegMap *VRM) {
+ if (pImpl)
+ static_cast<LDVImpl*>(pImpl)->emitDebugValues(VRM);
+}
+
+
+#ifndef NDEBUG
+void LiveDebugVariables::dump() {
+ if (pImpl)
+ static_cast<LDVImpl*>(pImpl)->print(dbgs());
+}
+#endif
+
diff --git a/contrib/llvm/lib/CodeGen/LiveDebugVariables.h b/contrib/llvm/lib/CodeGen/LiveDebugVariables.h
new file mode 100644
index 0000000..a6e40a1
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/LiveDebugVariables.h
@@ -0,0 +1,63 @@
+//===- LiveDebugVariables.h - Tracking debug info variables ----*- c++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface to the LiveDebugVariables analysis.
+//
+// The analysis removes DBG_VALUE instructions for virtual registers and tracks
+// live user variables in a data structure that can be updated during register
+// allocation.
+//
+// After register allocation new DBG_VALUE instructions are emitted to reflect
+// the new locations of user variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEDEBUGVARIABLES_H
+#define LLVM_CODEGEN_LIVEDEBUGVARIABLES_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class VirtRegMap;
+
+class LiveDebugVariables : public MachineFunctionPass {
+ void *pImpl;
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ LiveDebugVariables();
+ ~LiveDebugVariables();
+
+ /// renameRegister - Move any user variables in OldReg to NewReg:SubIdx.
+ /// @param OldReg Old virtual register that is going away.
+ /// @param NewReg New register holding the user variables.
+ /// @param SubIdx If NewReg is a virtual register, SubIdx may indicate a sub-
+ /// register.
+ void renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx);
+
+ /// emitDebugValues - Emit new DBG_VALUE instructions reflecting the changes
+ /// that happened during register allocation.
+ /// @param VRM Rename virtual registers according to map.
+ void emitDebugValues(VirtRegMap *VRM);
+
+ /// dump - Print data structures to dbgs().
+ void dump();
+
+private:
+
+ virtual bool runOnMachineFunction(MachineFunction &);
+ virtual void releaseMemory();
+ virtual void getAnalysisUsage(AnalysisUsage &) const;
+
+};
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEDEBUGVARIABLES_H
diff --git a/contrib/llvm/lib/CodeGen/LiveInterval.cpp b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
index 59f380a..c2dbd6a 100644
--- a/contrib/llvm/lib/CodeGen/LiveInterval.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
@@ -30,58 +30,19 @@
#include <algorithm>
using namespace llvm;
-// An example for liveAt():
-//
-// this = [1,4), liveAt(0) will return false. The instruction defining this
-// spans slots [0,3]. The interval belongs to an spilled definition of the
-// variable it represents. This is because slot 1 is used (def slot) and spans
-// up to slot 3 (store slot).
-//
-bool LiveInterval::liveAt(SlotIndex I) const {
- Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
-
- if (r == ranges.begin())
- return false;
-
- --r;
- return r->contains(I);
-}
-
-// liveBeforeAndAt - Check if the interval is live at the index and the index
-// just before it. If index is liveAt, check if it starts a new live range.
-// If it does, then check if the previous live range ends at index-1.
-bool LiveInterval::liveBeforeAndAt(SlotIndex I) const {
- Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
-
- if (r == ranges.begin())
- return false;
-
- --r;
- if (!r->contains(I))
- return false;
- if (I != r->start)
- return true;
- // I is the start of a live range. Check if the previous live range ends
- // at I-1.
- if (r == ranges.begin())
- return false;
- return r->end == I;
+// CompEnd - Compare LiveRange ends.
+namespace {
+struct CompEnd {
+ bool operator()(const LiveRange &A, const LiveRange &B) const {
+ return A.end < B.end;
+ }
+};
}
-/// killedAt - Return true if a live range ends at index. Note that the kill
-/// point is not contained in the half-open live range. It is usually the
-/// getDefIndex() slot following its last use.
-bool LiveInterval::killedAt(SlotIndex I) const {
- Ranges::const_iterator r = std::lower_bound(ranges.begin(), ranges.end(), I);
-
- // Now r points to the first interval with start >= I, or ranges.end().
- if (r == ranges.begin())
- return false;
-
- --r;
- // Now r points to the last interval with end <= I.
- // r->end is the kill point.
- return r->end == I;
+LiveInterval::iterator LiveInterval::find(SlotIndex Pos) {
+ assert(Pos.isValid() && "Cannot search for an invalid index");
+ return std::upper_bound(begin(), end(), LiveRange(SlotIndex(), Pos, 0),
+ CompEnd());
}
/// killedInRange - Return true if the interval has kills in [Start,End).
@@ -330,25 +291,14 @@ LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
return ranges.insert(it, LR);
}
-/// isInOneLiveRange - Return true if the range specified is entirely in
-/// a single LiveRange of the live interval.
-bool LiveInterval::isInOneLiveRange(SlotIndex Start, SlotIndex End) {
- Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
- if (I == ranges.begin())
- return false;
- --I;
- return I->containsRange(Start, End);
-}
-
/// removeRange - Remove the specified range from this interval. Note that
/// the range must be in a single LiveRange in its entirety.
void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
bool RemoveDeadValNo) {
// Find the LiveRange containing this span.
- Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
- assert(I != ranges.begin() && "Range is not in interval!");
- --I;
+ Ranges::iterator I = find(Start);
+ assert(I != ranges.end() && "Range is not in interval!");
assert(I->containsRange(Start, End) && "Range is not entirely in interval!");
// If the span we are removing is at the start of the LiveRange, adjust it.
@@ -405,32 +355,6 @@ void LiveInterval::removeValNo(VNInfo *ValNo) {
markValNoForDeletion(ValNo);
}
-/// getLiveRangeContaining - Return the live range that contains the
-/// specified index, or null if there is none.
-LiveInterval::const_iterator
-LiveInterval::FindLiveRangeContaining(SlotIndex Idx) const {
- const_iterator It = std::upper_bound(begin(), end(), Idx);
- if (It != ranges.begin()) {
- --It;
- if (It->contains(Idx))
- return It;
- }
-
- return end();
-}
-
-LiveInterval::iterator
-LiveInterval::FindLiveRangeContaining(SlotIndex Idx) {
- iterator It = std::upper_bound(begin(), end(), Idx);
- if (It != begin()) {
- --It;
- if (It->contains(Idx))
- return It;
- }
-
- return end();
-}
-
/// findDefinedVNInfo - Find the VNInfo defined by the specified
/// index (register interval).
VNInfo *LiveInterval::findDefinedVNInfoForRegInt(SlotIndex Idx) const {
@@ -443,17 +367,6 @@ VNInfo *LiveInterval::findDefinedVNInfoForRegInt(SlotIndex Idx) const {
return 0;
}
-/// findDefinedVNInfo - Find the VNInfo defined by the specified
-/// register (stack inteval).
-VNInfo *LiveInterval::findDefinedVNInfoForStackInt(unsigned reg) const {
- for (LiveInterval::const_vni_iterator i = vni_begin(), e = vni_end();
- i != e; ++i) {
- if ((*i)->getReg() == reg)
- return *i;
- }
- return 0;
-}
-
/// join - Join two live intervals (this, and other) together. This applies
/// mappings to the value numbers in the LHS/RHS intervals as specified. If
/// the intervals are not joinable, this aborts.
@@ -616,103 +529,6 @@ void LiveInterval::MergeValueInAsValue(
}
-/// MergeInClobberRanges - For any live ranges that are not defined in the
-/// current interval, but are defined in the Clobbers interval, mark them
-/// used with an unknown definition value.
-void LiveInterval::MergeInClobberRanges(LiveIntervals &li_,
- const LiveInterval &Clobbers,
- VNInfo::Allocator &VNInfoAllocator) {
- if (Clobbers.empty()) return;
-
- DenseMap<VNInfo*, VNInfo*> ValNoMaps;
- VNInfo *UnusedValNo = 0;
- iterator IP = begin();
- for (const_iterator I = Clobbers.begin(), E = Clobbers.end(); I != E; ++I) {
- // For every val# in the Clobbers interval, create a new "unknown" val#.
- VNInfo *ClobberValNo = 0;
- DenseMap<VNInfo*, VNInfo*>::iterator VI = ValNoMaps.find(I->valno);
- if (VI != ValNoMaps.end())
- ClobberValNo = VI->second;
- else if (UnusedValNo)
- ClobberValNo = UnusedValNo;
- else {
- UnusedValNo = ClobberValNo =
- getNextValue(li_.getInvalidIndex(), 0, false, VNInfoAllocator);
- ValNoMaps.insert(std::make_pair(I->valno, ClobberValNo));
- }
-
- bool Done = false;
- SlotIndex Start = I->start, End = I->end;
- // If a clobber range starts before an existing range and ends after
- // it, the clobber range will need to be split into multiple ranges.
- // Loop until the entire clobber range is handled.
- while (!Done) {
- Done = true;
- IP = std::upper_bound(IP, end(), Start);
- SlotIndex SubRangeStart = Start;
- SlotIndex SubRangeEnd = End;
-
- // If the start of this range overlaps with an existing liverange, trim it.
- if (IP != begin() && IP[-1].end > SubRangeStart) {
- SubRangeStart = IP[-1].end;
- // Trimmed away the whole range?
- if (SubRangeStart >= SubRangeEnd) continue;
- }
- // If the end of this range overlaps with an existing liverange, trim it.
- if (IP != end() && SubRangeEnd > IP->start) {
- // If the clobber live range extends beyond the existing live range,
- // it'll need at least another live range, so set the flag to keep
- // iterating.
- if (SubRangeEnd > IP->end) {
- Start = IP->end;
- Done = false;
- }
- SubRangeEnd = IP->start;
- // If this trimmed away the whole range, ignore it.
- if (SubRangeStart == SubRangeEnd) continue;
- }
-
- // Insert the clobber interval.
- IP = addRangeFrom(LiveRange(SubRangeStart, SubRangeEnd, ClobberValNo),
- IP);
- UnusedValNo = 0;
- }
- }
-
- if (UnusedValNo) {
- // Delete the last unused val#.
- valnos.pop_back();
- }
-}
-
-void LiveInterval::MergeInClobberRange(LiveIntervals &li_,
- SlotIndex Start,
- SlotIndex End,
- VNInfo::Allocator &VNInfoAllocator) {
- // Find a value # to use for the clobber ranges. If there is already a value#
- // for unknown values, use it.
- VNInfo *ClobberValNo =
- getNextValue(li_.getInvalidIndex(), 0, false, VNInfoAllocator);
-
- iterator IP = begin();
- IP = std::upper_bound(IP, end(), Start);
-
- // If the start of this range overlaps with an existing liverange, trim it.
- if (IP != begin() && IP[-1].end > Start) {
- Start = IP[-1].end;
- // Trimmed away the whole range?
- if (Start >= End) return;
- }
- // If the end of this range overlaps with an existing liverange, trim it.
- if (IP != end() && End > IP->start) {
- End = IP->start;
- // If this trimmed away the whole range, ignore it.
- if (Start == End) return;
- }
-
- // Insert the clobber interval.
- addRangeFrom(LiveRange(Start, End, ClobberValNo), IP);
-}
/// MergeValueNumberInto - This method is called when two value nubmers
/// are found to be equivalent. This eliminates V1, replacing all
@@ -767,6 +583,9 @@ VNInfo* LiveInterval::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) {
}
}
+ // Merge the relevant flags.
+ V2->mergeFlags(V1);
+
// Now that V1 is dead, remove it.
markValNoForDeletion(V1);
@@ -831,14 +650,9 @@ void LiveRange::dump() const {
}
void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
- if (isStackSlot())
- OS << "SS#" << getStackSlotIndex();
- else if (TRI && TargetRegisterInfo::isPhysicalRegister(reg))
- OS << TRI->getName(reg);
- else
- OS << "%reg" << reg;
-
- OS << ',' << weight;
+ OS << PrintReg(reg, TRI);
+ if (weight != 0)
+ OS << ',' << weight;
if (empty())
OS << " EMPTY";
@@ -863,10 +677,9 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
if (vni->isUnused()) {
OS << "x";
} else {
- if (!vni->isDefAccurate() && !vni->isPHIDef())
- OS << "?";
- else
- OS << vni->def;
+ OS << vni->def;
+ if (vni->isPHIDef())
+ OS << "-phidef";
if (vni->hasPHIKill())
OS << "-phikill";
if (vni->hasRedefByEC())
@@ -884,3 +697,84 @@ void LiveInterval::dump() const {
void LiveRange::print(raw_ostream &os) const {
os << *this;
}
+
+unsigned ConnectedVNInfoEqClasses::Classify(const LiveInterval *LI) {
+ // Create initial equivalence classes.
+ eqClass_.clear();
+ eqClass_.grow(LI->getNumValNums());
+
+ const VNInfo *used = 0, *unused = 0;
+
+ // Determine connections.
+ for (LiveInterval::const_vni_iterator I = LI->vni_begin(), E = LI->vni_end();
+ I != E; ++I) {
+ const VNInfo *VNI = *I;
+ // Group all unused values into one class.
+ if (VNI->isUnused()) {
+ if (unused)
+ eqClass_.join(unused->id, VNI->id);
+ unused = VNI;
+ continue;
+ }
+ used = VNI;
+ if (VNI->isPHIDef()) {
+ const MachineBasicBlock *MBB = lis_.getMBBFromIndex(VNI->def);
+ assert(MBB && "Phi-def has no defining MBB");
+ // Connect to values live out of predecessors.
+ for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
+ PE = MBB->pred_end(); PI != PE; ++PI)
+ if (const VNInfo *PVNI =
+ LI->getVNInfoAt(lis_.getMBBEndIdx(*PI).getPrevSlot()))
+ eqClass_.join(VNI->id, PVNI->id);
+ } else {
+ // Normal value defined by an instruction. Check for two-addr redef.
+ // FIXME: This could be coincidental. Should we really check for a tied
+ // operand constraint?
+ // Note that VNI->def may be a use slot for an early clobber def.
+ if (const VNInfo *UVNI = LI->getVNInfoAt(VNI->def.getPrevSlot()))
+ eqClass_.join(VNI->id, UVNI->id);
+ }
+ }
+
+ // Lump all the unused values in with the last used value.
+ if (used && unused)
+ eqClass_.join(used->id, unused->id);
+
+ eqClass_.compress();
+ return eqClass_.getNumClasses();
+}
+
+void ConnectedVNInfoEqClasses::Distribute(LiveInterval *LIV[]) {
+ assert(LIV[0] && "LIV[0] must be set");
+ LiveInterval &LI = *LIV[0];
+
+ // First move runs to new intervals.
+ LiveInterval::iterator J = LI.begin(), E = LI.end();
+ while (J != E && eqClass_[J->valno->id] == 0)
+ ++J;
+ for (LiveInterval::iterator I = J; I != E; ++I) {
+ if (unsigned eq = eqClass_[I->valno->id]) {
+ assert((LIV[eq]->empty() || LIV[eq]->expiredAt(I->start)) &&
+ "New intervals should be empty");
+ LIV[eq]->ranges.push_back(*I);
+ } else
+ *J++ = *I;
+ }
+ LI.ranges.erase(J, E);
+
+ // Transfer VNInfos to their new owners and renumber them.
+ unsigned j = 0, e = LI.getNumValNums();
+ while (j != e && eqClass_[j] == 0)
+ ++j;
+ for (unsigned i = j; i != e; ++i) {
+ VNInfo *VNI = LI.getValNumInfo(i);
+ if (unsigned eq = eqClass_[i]) {
+ VNI->id = LIV[eq]->getNumValNums();
+ LIV[eq]->valnos.push_back(VNI);
+ } else {
+ VNI->id = j;
+ LI.valnos[j++] = VNI;
+ }
+ }
+ LI.valnos.resize(j);
+}
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index 2726fc3..aef5b5f 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -20,6 +20,7 @@
#include "VirtRegMap.h"
#include "llvm/Value.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -55,8 +56,17 @@ STATISTIC(numFolds , "Number of loads/stores folded into instructions");
STATISTIC(numSplits , "Number of intervals split");
char LiveIntervals::ID = 0;
-INITIALIZE_PASS(LiveIntervals, "liveintervals",
- "Live Interval Analysis", false, false);
+INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
+ "Live Interval Analysis", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveVariables)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(PHIElimination)
+INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
+INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
+ "Live Interval Analysis", false, false)
void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -132,19 +142,7 @@ void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
void LiveIntervals::printInstrs(raw_ostream &OS) const {
OS << "********** MACHINEINSTRS **********\n";
-
- for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
- mbbi != mbbe; ++mbbi) {
- OS << "BB#" << mbbi->getNumber()
- << ":\t\t# derived from " << mbbi->getName() << "\n";
- for (MachineBasicBlock::iterator mii = mbbi->begin(),
- mie = mbbi->end(); mii != mie; ++mii) {
- if (mii->isDebugValue())
- OS << " \t" << *mii;
- else
- OS << getInstructionIndex(mii) << '\t' << *mii;
- }
- }
+ mf_->print(OS, indexes_);
}
void LiveIntervals::dumpInstrs() const {
@@ -248,15 +246,6 @@ bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
return false;
}
-#ifndef NDEBUG
-static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
- if (TargetRegisterInfo::isPhysicalRegister(reg))
- dbgs() << tri_->getName(reg);
- else
- dbgs() << "%reg" << reg;
-}
-#endif
-
static
bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
unsigned Reg = MI.getOperand(MOIdx).getReg();
@@ -285,8 +274,8 @@ bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
SlotIndex RedefIndex = MIIdx.getDefIndex();
const LiveRange *OldLR =
interval.getLiveRangeContaining(RedefIndex.getUseIndex());
- if (OldLR->valno->isDefAccurate()) {
- MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
+ MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
+ if (DefMI != 0) {
return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
}
return false;
@@ -298,10 +287,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
MachineOperand& MO,
unsigned MOIdx,
LiveInterval &interval) {
- DEBUG({
- dbgs() << "\t\tregister: ";
- printRegName(interval.reg, tri_);
- });
+ DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
// Virtual registers may be defined multiple times (due to phi
// elimination and 2-addr elimination). Much of what we do only has to be
@@ -326,8 +312,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
CopyMI = mi;
}
- VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
- VNInfoAllocator);
+ VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
assert(ValNo->id == 0 && "First value in interval is not 0?");
// Loop over all of the blocks that the vreg is defined in. There are
@@ -393,8 +378,9 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// Create interval with one of a NEW value number. Note that this value
// number isn't actually defined by an instruction, weird huh? :)
if (PHIJoin) {
- ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
- VNInfoAllocator);
+ assert(getInstructionFromIndex(Start) == 0 &&
+ "PHI def index points at actual instruction.");
+ ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
ValNo->setIsPHIDef(true);
}
LiveRange LR(Start, killIdx, ValNo);
@@ -440,10 +426,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// The new value number (#1) is defined by the instruction we claimed
// defined value #0.
- VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
- false, // update at *
- VNInfoAllocator);
- ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
+ VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
// Value#0 is now defined by the 2-addr instruction.
OldValNo->def = RedefIndex;
@@ -481,7 +464,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
MachineInstr *CopyMI = NULL;
if (mi->isCopyLike())
CopyMI = mi;
- ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
+ ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
SlotIndex killIndex = getMBBEndIdx(mbb);
LiveRange LR(defIndex, killIndex, ValNo);
@@ -504,10 +487,7 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
MachineInstr *CopyMI) {
// A physical register cannot be live across basic block, so its
// lifetime must end somewhere in its defining basic block.
- DEBUG({
- dbgs() << "\t\tregister: ";
- printRegName(interval.reg, tri_);
- });
+ DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
SlotIndex baseIndex = MIIdx;
SlotIndex start = baseIndex.getDefIndex();
@@ -573,11 +553,11 @@ exit:
assert(start < end && "did not find end of interval?");
// Already exists? Extend old live interval.
- LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
- bool Extend = OldLR != interval.end();
- VNInfo *ValNo = Extend
- ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
- if (MO.isEarlyClobber() && Extend)
+ VNInfo *ValNo = interval.getVNInfoAt(start);
+ bool Extend = ValNo != 0;
+ if (!Extend)
+ ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
+ if (Extend && MO.isEarlyClobber())
ValNo->setHasRedefByEC(true);
LiveRange LR(start, end, ValNo);
interval.addRange(LR);
@@ -611,10 +591,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
SlotIndex MIIdx,
LiveInterval &interval, bool isAlias) {
- DEBUG({
- dbgs() << "\t\tlivein register: ";
- printRegName(interval.reg, tri_);
- });
+ DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
// Look for kills, if it reaches a def before it's killed, then it shouldn't
// be considered a livein.
@@ -672,9 +649,11 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
}
}
+ SlotIndex defIdx = getMBBStartIdx(MBB);
+ assert(getInstructionFromIndex(defIdx) == 0 &&
+ "PHI def index points at actual instruction.");
VNInfo *vni =
- interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
- 0, false, VNInfoAllocator);
+ interval.getNextValue(defIdx, 0, VNInfoAllocator);
vni->setIsPHIDef(true);
LiveRange LR(start, end, vni);
@@ -764,10 +743,177 @@ LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
return NewLI;
}
+/// shrinkToUses - After removing some uses of a register, shrink its live
+/// range to just the remaining uses. This method does not compute reaching
+/// defs for new uses, and it doesn't remove dead defs.
+void LiveIntervals::shrinkToUses(LiveInterval *li) {
+ DEBUG(dbgs() << "Shrink: " << *li << '\n');
+ assert(TargetRegisterInfo::isVirtualRegister(li->reg)
+ && "Can't only shrink physical registers");
+ // Find all the values used, including PHI kills.
+ SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
+
+ // Visit all instructions reading li->reg.
+ for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
+ MachineInstr *UseMI = I.skipInstruction();) {
+ if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
+ continue;
+ SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
+ VNInfo *VNI = li->getVNInfoAt(Idx);
+ assert(VNI && "Live interval not live into reading instruction");
+ if (VNI->def == Idx) {
+ // Special case: An early-clobber tied operand reads and writes the
+ // register one slot early.
+ Idx = Idx.getPrevSlot();
+ VNI = li->getVNInfoAt(Idx);
+ assert(VNI && "Early-clobber tied value not available");
+ }
+ WorkList.push_back(std::make_pair(Idx, VNI));
+ }
+
+ // Create a new live interval with only minimal live segments per def.
+ LiveInterval NewLI(li->reg, 0);
+ for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
+ I != E; ++I) {
+ VNInfo *VNI = *I;
+ if (VNI->isUnused())
+ continue;
+ NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
+ }
+
+ // Extend intervals to reach all uses in WorkList.
+ while (!WorkList.empty()) {
+ SlotIndex Idx = WorkList.back().first;
+ VNInfo *VNI = WorkList.back().second;
+ WorkList.pop_back();
+
+ // Extend the live range for VNI to be live at Idx.
+ LiveInterval::iterator I = NewLI.find(Idx);
+
+ // Already got it?
+ if (I != NewLI.end() && I->start <= Idx) {
+ assert(I->valno == VNI && "Unexpected existing value number");
+ continue;
+ }
+
+ // Is there already a live range in the block containing Idx?
+ const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
+ SlotIndex BlockStart = getMBBStartIdx(MBB);
+ DEBUG(dbgs() << "Shrink: Use val#" << VNI->id << " at " << Idx
+ << " in BB#" << MBB->getNumber() << '@' << BlockStart);
+ if (I != NewLI.begin() && (--I)->end > BlockStart) {
+ assert(I->valno == VNI && "Wrong reaching def");
+ DEBUG(dbgs() << " extend [" << I->start << ';' << I->end << ")\n");
+ // Is this the first use of a PHIDef in its defining block?
+ if (VNI->isPHIDef() && I->end == VNI->def.getNextSlot()) {
+ // The PHI is live, make sure the predecessors are live-out.
+ for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
+ PE = MBB->pred_end(); PI != PE; ++PI) {
+ SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
+ VNInfo *PVNI = li->getVNInfoAt(Stop);
+ // A predecessor is not required to have a live-out value for a PHI.
+ if (PVNI) {
+ assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
+ WorkList.push_back(std::make_pair(Stop, PVNI));
+ }
+ }
+ }
+
+ // Extend the live range in the block to include Idx.
+ NewLI.addRange(LiveRange(I->end, Idx.getNextSlot(), VNI));
+ continue;
+ }
+
+ // VNI is live-in to MBB.
+ DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
+ NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
+
+ // Make sure VNI is live-out from the predecessors.
+ for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
+ PE = MBB->pred_end(); PI != PE; ++PI) {
+ SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
+ assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
+ WorkList.push_back(std::make_pair(Stop, VNI));
+ }
+ }
+
+ // Handle dead values.
+ for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
+ I != E; ++I) {
+ VNInfo *VNI = *I;
+ if (VNI->isUnused())
+ continue;
+ LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
+ assert(LII != NewLI.end() && "Missing live range for PHI");
+ if (LII->end != VNI->def.getNextSlot())
+ continue;
+ if (!VNI->isPHIDef()) {
+ // This is a dead PHI. Remove it.
+ VNI->setIsUnused(true);
+ NewLI.removeRange(*LII);
+ } else {
+ // This is a dead def. Make sure the instruction knows.
+ MachineInstr *MI = getInstructionFromIndex(VNI->def);
+ assert(MI && "No instruction defining live value");
+ MI->addRegisterDead(li->reg, tri_);
+ }
+ }
+
+ // Move the trimmed ranges back.
+ li->ranges.swap(NewLI.ranges);
+ DEBUG(dbgs() << "Shrink: " << *li << '\n');
+}
+
+
//===----------------------------------------------------------------------===//
// Register allocator hooks.
//
+MachineBasicBlock::iterator
+LiveIntervals::getLastSplitPoint(const LiveInterval &li,
+ MachineBasicBlock *mbb) const {
+ const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
+
+ // If li is not live into a landing pad, we can insert spill code before the
+ // first terminator.
+ if (!lpad || !isLiveInToMBB(li, lpad))
+ return mbb->getFirstTerminator();
+
+ // When there is a landing pad, spill code must go before the call instruction
+ // that can throw.
+ MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
+ while (I != B) {
+ --I;
+ if (I->getDesc().isCall())
+ return I;
+ }
+ // The block contains no calls that can throw, so use the first terminator.
+ return mbb->getFirstTerminator();
+}
+
+void LiveIntervals::addKillFlags() {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ unsigned Reg = I->first;
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ continue;
+ if (mri_->reg_nodbg_empty(Reg))
+ continue;
+ LiveInterval *LI = I->second;
+
+ // Every instruction that kills Reg corresponds to a live range end point.
+ for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
+ ++RI) {
+ // A LOAD index indicates an MBB edge.
+ if (RI->end.isLoad())
+ continue;
+ MachineInstr *MI = getInstructionFromIndex(RI->end);
+ if (!MI)
+ continue;
+ MI->addRegisterKilled(Reg, NULL);
+ }
+ }
+}
+
/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
/// allow one) virtual register operand, then its uses are implicitly using
/// the register. Returns the virtual register.
@@ -800,18 +946,17 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
/// which reaches the given instruction also reaches the specified use index.
bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
SlotIndex UseIdx) const {
- SlotIndex Index = getInstructionIndex(MI);
- VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
- LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
- return UI != li.end() && UI->valno == ValNo;
+ VNInfo *UValNo = li.getVNInfoAt(UseIdx);
+ return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
}
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
-bool LiveIntervals::isReMaterializable(const LiveInterval &li,
- const VNInfo *ValNo, MachineInstr *MI,
- SmallVectorImpl<LiveInterval*> &SpillIs,
- bool &isLoad) {
+bool
+LiveIntervals::isReMaterializable(const LiveInterval &li,
+ const VNInfo *ValNo, MachineInstr *MI,
+ const SmallVectorImpl<LiveInterval*> &SpillIs,
+ bool &isLoad) {
if (DisableReMat)
return false;
@@ -829,7 +974,7 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
ri != re; ++ri) {
MachineInstr *UseMI = &*ri;
SlotIndex UseIdx = getInstructionIndex(UseMI);
- if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
+ if (li.getVNInfoAt(UseIdx) != ValNo)
continue;
if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
return false;
@@ -855,9 +1000,10 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable.
-bool LiveIntervals::isReMaterializable(const LiveInterval &li,
- SmallVectorImpl<LiveInterval*> &SpillIs,
- bool &isLoad) {
+bool
+LiveIntervals::isReMaterializable(const LiveInterval &li,
+ const SmallVectorImpl<LiveInterval*> &SpillIs,
+ bool &isLoad) {
isLoad = false;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
@@ -865,9 +1011,9 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
if (VNI->isUnused())
continue; // Dead val#.
// Is the def for the val# rematerializable?
- if (!VNI->isDefAccurate())
- return false;
MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
+ if (!ReMatDefMI)
+ return false;
bool DefIsLoad = false;
if (!ReMatDefMI ||
!isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
@@ -1010,7 +1156,7 @@ void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
- if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (!vrm.isReMaterialized(Reg))
continue;
@@ -1044,7 +1190,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
if (!mop.isReg())
continue;
unsigned Reg = mop.getReg();
- if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (Reg != li.reg)
continue;
@@ -1140,11 +1286,14 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
rewriteImplicitOps(li, MI, NewVReg, vrm);
// Reuse NewVReg for other reads.
+ bool HasEarlyClobber = false;
for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
MachineOperand &mopj = MI->getOperand(Ops[j]);
mopj.setReg(NewVReg);
if (mopj.isImplicit())
rewriteImplicitOps(li, MI, NewVReg, vrm);
+ if (mopj.isEarlyClobber())
+ HasEarlyClobber = true;
}
if (CreatedNewVReg) {
@@ -1190,7 +1339,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
if (HasUse) {
if (CreatedNewVReg) {
LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
- nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
+ nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
DEBUG(dbgs() << " +" << LR);
nI.addRange(LR);
} else {
@@ -1203,8 +1352,12 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
}
}
if (HasDef) {
- LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
- nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
+ // An early clobber starts at the use slot, except for an early clobber
+ // tied to a use operand (yes, that is a thing).
+ LiveRange LR(HasEarlyClobber && !HasUse ?
+ index.getUseIndex() : index.getDefIndex(),
+ index.getStoreIndex(),
+ nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
DEBUG(dbgs() << " +" << LR);
nI.addRange(LR);
}
@@ -1554,15 +1707,15 @@ LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
return (isDef + isUse) * lc;
}
-void
-LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
+static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
- normalizeSpillWeight(*NewLIs[i]);
+ NewLIs[i]->weight =
+ normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
}
std::vector<LiveInterval*> LiveIntervals::
addIntervalsForSpills(const LiveInterval &li,
- SmallVectorImpl<LiveInterval*> &SpillIs,
+ const SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
assert(li.isSpillable() && "attempt to spill already spilled interval!");
@@ -1653,8 +1806,7 @@ addIntervalsForSpills(const LiveInterval &li,
if (VNI->isUnused())
continue; // Dead val#.
// Is the def for the val# rematerializable?
- MachineInstr *ReMatDefMI = VNI->isDefAccurate()
- ? getInstructionFromIndex(VNI->def) : 0;
+ MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
bool dummy;
if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
// Remember how to remat the def of this val#.
@@ -1926,6 +2078,9 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
unsigned PhysReg, VirtRegMap &vrm) {
unsigned SpillReg = getRepresentativeReg(PhysReg);
+ DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
+ << " represented by " << tri_->getName(SpillReg) << '\n');
+
for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
// If there are registers which alias PhysReg, but which are not a
// sub-register of the chosen representative super register. Assert
@@ -1937,15 +2092,16 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
SmallVector<unsigned, 4> PRegs;
if (hasInterval(SpillReg))
PRegs.push_back(SpillReg);
- else {
- SmallSet<unsigned, 4> Added;
- for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
- if (Added.insert(*AS) && hasInterval(*AS)) {
- PRegs.push_back(*AS);
- for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
- Added.insert(*ASS);
- }
- }
+ for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
+ if (hasInterval(*SR))
+ PRegs.push_back(*SR);
+
+ DEBUG({
+ dbgs() << "Trying to spill:";
+ for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
+ dbgs() << ' ' << tri_->getName(PRegs[i]);
+ dbgs() << '\n';
+ });
SmallPtrSet<MachineInstr*, 8> SeenMIs;
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
@@ -1956,18 +2112,16 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
continue;
SeenMIs.insert(MI);
SlotIndex Index = getInstructionIndex(MI);
+ bool LiveReg = false;
for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
unsigned PReg = PRegs[i];
LiveInterval &pli = getInterval(PReg);
if (!pli.liveAt(Index))
continue;
- vrm.addEmergencySpill(PReg, MI);
+ LiveReg = true;
SlotIndex StartIdx = Index.getLoadIndex();
SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
- if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
- pli.removeRange(StartIdx, EndIdx);
- Cut = true;
- } else {
+ if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "Ran out of registers during register allocation!";
@@ -1978,15 +2132,14 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
}
report_fatal_error(Msg.str());
}
- for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
- if (!hasInterval(*AS))
- continue;
- LiveInterval &spli = getInterval(*AS);
- if (spli.liveAt(Index))
- spli.removeRange(Index.getLoadIndex(),
- Index.getNextIndex().getBaseIndex());
- }
+ pli.removeRange(StartIdx, EndIdx);
+ LiveReg = true;
}
+ if (!LiveReg)
+ continue;
+ DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
+ vrm.addEmergencySpill(SpillReg, MI);
+ Cut = true;
}
return Cut;
}
@@ -1996,7 +2149,7 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
LiveInterval& Interval = getOrCreateInterval(reg);
VNInfo* VN = Interval.getNextValue(
SlotIndex(getInstructionIndex(startInst).getDefIndex()),
- startInst, true, getVNInfoAllocator());
+ startInst, getVNInfoAllocator());
VN->setHasPHIKill(true);
LiveRange LR(
SlotIndex(getInstructionIndex(startInst).getDefIndex()),
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
new file mode 100644
index 0000000..205f28a
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
@@ -0,0 +1,315 @@
+//===-- LiveIntervalUnion.cpp - Live interval union data structure --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// LiveIntervalUnion represents a coalesced set of live intervals. This may be
+// used during coalescing to represent a congruence class, or during register
+// allocation to model liveness of a physical register.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "regalloc"
+#include "LiveIntervalUnion.h"
+#include "llvm/ADT/SparseBitVector.h"
+#include "llvm/CodeGen/MachineLoopRanges.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+
+// Merge a LiveInterval's segments. Guarantee no overlaps.
+void LiveIntervalUnion::unify(LiveInterval &VirtReg) {
+ if (VirtReg.empty())
+ return;
+ ++Tag;
+
+ // Insert each of the virtual register's live segments into the map.
+ LiveInterval::iterator RegPos = VirtReg.begin();
+ LiveInterval::iterator RegEnd = VirtReg.end();
+ SegmentIter SegPos = Segments.find(RegPos->start);
+
+ for (;;) {
+ SegPos.insert(RegPos->start, RegPos->end, &VirtReg);
+ if (++RegPos == RegEnd)
+ return;
+ SegPos.advanceTo(RegPos->start);
+ }
+}
+
+// Remove a live virtual register's segments from this union.
+void LiveIntervalUnion::extract(LiveInterval &VirtReg) {
+ if (VirtReg.empty())
+ return;
+ ++Tag;
+
+ // Remove each of the virtual register's live segments from the map.
+ LiveInterval::iterator RegPos = VirtReg.begin();
+ LiveInterval::iterator RegEnd = VirtReg.end();
+ SegmentIter SegPos = Segments.find(RegPos->start);
+
+ for (;;) {
+ assert(SegPos.value() == &VirtReg && "Inconsistent LiveInterval");
+ SegPos.erase();
+ if (!SegPos.valid())
+ return;
+
+ // Skip all segments that may have been coalesced.
+ RegPos = VirtReg.advanceTo(RegPos, SegPos.start());
+ if (RegPos == RegEnd)
+ return;
+
+ SegPos.advanceTo(RegPos->start);
+ }
+}
+
+void
+LiveIntervalUnion::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
+ OS << "LIU " << PrintReg(RepReg, TRI);
+ if (empty()) {
+ OS << " empty\n";
+ return;
+ }
+ for (LiveSegments::const_iterator SI = Segments.begin(); SI.valid(); ++SI) {
+ OS << " [" << SI.start() << ' ' << SI.stop() << "):"
+ << PrintReg(SI.value()->reg, TRI);
+ }
+ OS << '\n';
+}
+
+void LiveIntervalUnion::InterferenceResult::print(raw_ostream &OS,
+ const TargetRegisterInfo *TRI) const {
+ OS << '[' << start() << ';' << stop() << "):"
+ << PrintReg(interference()->reg, TRI);
+}
+
+void LiveIntervalUnion::Query::print(raw_ostream &OS,
+ const TargetRegisterInfo *TRI) {
+ OS << "Interferences with ";
+ LiveUnion->print(OS, TRI);
+ InterferenceResult IR = firstInterference();
+ while (isInterference(IR)) {
+ OS << " ";
+ IR.print(OS, TRI);
+ OS << '\n';
+ nextInterference(IR);
+ }
+}
+
+#ifndef NDEBUG
+// Verify the live intervals in this union and add them to the visited set.
+void LiveIntervalUnion::verify(LiveVirtRegBitSet& VisitedVRegs) {
+ for (SegmentIter SI = Segments.begin(); SI.valid(); ++SI)
+ VisitedVRegs.set(SI.value()->reg);
+}
+#endif //!NDEBUG
+
+// Private interface accessed by Query.
+//
+// Find a pair of segments that intersect, one in the live virtual register
+// (LiveInterval), and the other in this LiveIntervalUnion. The caller (Query)
+// is responsible for advancing the LiveIntervalUnion segments to find a
+// "notable" intersection, which requires query-specific logic.
+//
+// This design assumes only a fast mechanism for intersecting a single live
+// virtual register segment with a set of LiveIntervalUnion segments. This may
+// be ok since most virtual registers have very few segments. If we had a data
+// structure that optimizd MxN intersection of segments, then we would bypass
+// the loop that advances within the LiveInterval.
+//
+// If no intersection exists, set VirtRegI = VirtRegEnd, and set SI to the first
+// segment whose start point is greater than LiveInterval's end point.
+//
+// Assumes that segments are sorted by start position in both
+// LiveInterval and LiveSegments.
+void LiveIntervalUnion::Query::findIntersection(InterferenceResult &IR) const {
+ // Search until reaching the end of the LiveUnion segments.
+ LiveInterval::iterator VirtRegEnd = VirtReg->end();
+ if (IR.VirtRegI == VirtRegEnd)
+ return;
+ while (IR.LiveUnionI.valid()) {
+ // Slowly advance the live virtual reg iterator until we surpass the next
+ // segment in LiveUnion.
+ //
+ // Note: If this is ever used for coalescing of fixed registers and we have
+ // a live vreg with thousands of segments, then change this code to use
+ // upperBound instead.
+ IR.VirtRegI = VirtReg->advanceTo(IR.VirtRegI, IR.LiveUnionI.start());
+ if (IR.VirtRegI == VirtRegEnd)
+ break; // Retain current (nonoverlapping) LiveUnionI
+
+ // VirtRegI may have advanced far beyond LiveUnionI, catch up.
+ IR.LiveUnionI.advanceTo(IR.VirtRegI->start);
+
+ // Check if no LiveUnionI exists with VirtRegI->Start < LiveUnionI.end
+ if (!IR.LiveUnionI.valid())
+ break;
+ if (IR.LiveUnionI.start() < IR.VirtRegI->end) {
+ assert(overlap(*IR.VirtRegI, IR.LiveUnionI) &&
+ "upperBound postcondition");
+ break;
+ }
+ }
+ if (!IR.LiveUnionI.valid())
+ IR.VirtRegI = VirtRegEnd;
+}
+
+// Find the first intersection, and cache interference info
+// (retain segment iterators into both VirtReg and LiveUnion).
+const LiveIntervalUnion::InterferenceResult &
+LiveIntervalUnion::Query::firstInterference() {
+ if (CheckedFirstInterference)
+ return FirstInterference;
+ CheckedFirstInterference = true;
+ InterferenceResult &IR = FirstInterference;
+
+ // Quickly skip interference check for empty sets.
+ if (VirtReg->empty() || LiveUnion->empty()) {
+ IR.VirtRegI = VirtReg->end();
+ } else if (VirtReg->beginIndex() < LiveUnion->startIndex()) {
+ // VirtReg starts first, perform double binary search.
+ IR.VirtRegI = VirtReg->find(LiveUnion->startIndex());
+ if (IR.VirtRegI != VirtReg->end())
+ IR.LiveUnionI = LiveUnion->find(IR.VirtRegI->start);
+ } else {
+ // LiveUnion starts first, perform double binary search.
+ IR.LiveUnionI = LiveUnion->find(VirtReg->beginIndex());
+ if (IR.LiveUnionI.valid())
+ IR.VirtRegI = VirtReg->find(IR.LiveUnionI.start());
+ else
+ IR.VirtRegI = VirtReg->end();
+ }
+ findIntersection(FirstInterference);
+ assert((IR.VirtRegI == VirtReg->end() || IR.LiveUnionI.valid())
+ && "Uninitialized iterator");
+ return FirstInterference;
+}
+
+// Treat the result as an iterator and advance to the next interfering pair
+// of segments. This is a plain iterator with no filter.
+bool LiveIntervalUnion::Query::nextInterference(InterferenceResult &IR) const {
+ assert(isInterference(IR) && "iteration past end of interferences");
+
+ // Advance either the VirtReg or LiveUnion segment to ensure that we visit all
+ // unique overlapping pairs.
+ if (IR.VirtRegI->end < IR.LiveUnionI.stop()) {
+ if (++IR.VirtRegI == VirtReg->end())
+ return false;
+ }
+ else {
+ if (!(++IR.LiveUnionI).valid()) {
+ IR.VirtRegI = VirtReg->end();
+ return false;
+ }
+ }
+ // Short-circuit findIntersection() if possible.
+ if (overlap(*IR.VirtRegI, IR.LiveUnionI))
+ return true;
+
+ // Find the next intersection.
+ findIntersection(IR);
+ return isInterference(IR);
+}
+
+// Scan the vector of interfering virtual registers in this union. Assume it's
+// quite small.
+bool LiveIntervalUnion::Query::isSeenInterference(LiveInterval *VirtReg) const {
+ SmallVectorImpl<LiveInterval*>::const_iterator I =
+ std::find(InterferingVRegs.begin(), InterferingVRegs.end(), VirtReg);
+ return I != InterferingVRegs.end();
+}
+
+// Count the number of virtual registers in this union that interfere with this
+// query's live virtual register.
+//
+// The number of times that we either advance IR.VirtRegI or call
+// LiveUnion.upperBound() will be no more than the number of holes in
+// VirtReg. So each invocation of collectInterferingVRegs() takes
+// time proportional to |VirtReg Holes| * time(LiveUnion.upperBound()).
+//
+// For comments on how to speed it up, see Query::findIntersection().
+unsigned LiveIntervalUnion::Query::
+collectInterferingVRegs(unsigned MaxInterferingRegs) {
+ InterferenceResult IR = firstInterference();
+ LiveInterval::iterator VirtRegEnd = VirtReg->end();
+ LiveInterval *RecentInterferingVReg = NULL;
+ if (IR.VirtRegI != VirtRegEnd) while (IR.LiveUnionI.valid()) {
+ // Advance the union's iterator to reach an unseen interfering vreg.
+ do {
+ if (IR.LiveUnionI.value() == RecentInterferingVReg)
+ continue;
+
+ if (!isSeenInterference(IR.LiveUnionI.value()))
+ break;
+
+ // Cache the most recent interfering vreg to bypass isSeenInterference.
+ RecentInterferingVReg = IR.LiveUnionI.value();
+
+ } while ((++IR.LiveUnionI).valid());
+ if (!IR.LiveUnionI.valid())
+ break;
+
+ // Advance the VirtReg iterator until surpassing the next segment in
+ // LiveUnion.
+ IR.VirtRegI = VirtReg->advanceTo(IR.VirtRegI, IR.LiveUnionI.start());
+ if (IR.VirtRegI == VirtRegEnd)
+ break;
+
+ // Check for intersection with the union's segment.
+ if (overlap(*IR.VirtRegI, IR.LiveUnionI)) {
+
+ if (!IR.LiveUnionI.value()->isSpillable())
+ SeenUnspillableVReg = true;
+
+ if (InterferingVRegs.size() == MaxInterferingRegs)
+ // Leave SeenAllInterferences set to false to indicate that at least one
+ // interference exists beyond those we collected.
+ return MaxInterferingRegs;
+
+ InterferingVRegs.push_back(IR.LiveUnionI.value());
+
+ // Cache the most recent interfering vreg to bypass isSeenInterference.
+ RecentInterferingVReg = IR.LiveUnionI.value();
+ ++IR.LiveUnionI;
+ continue;
+ }
+ // VirtRegI may have advanced far beyond LiveUnionI,
+ // do a fast intersection test to "catch up"
+ IR.LiveUnionI.advanceTo(IR.VirtRegI->start);
+ }
+ SeenAllInterferences = true;
+ return InterferingVRegs.size();
+}
+
+bool LiveIntervalUnion::Query::checkLoopInterference(MachineLoopRange *Loop) {
+ // VirtReg is likely live throughout the loop, so start by checking LIU-Loop
+ // overlaps.
+ IntervalMapOverlaps<LiveIntervalUnion::Map, MachineLoopRange::Map>
+ Overlaps(LiveUnion->getMap(), Loop->getMap());
+ if (!Overlaps.valid())
+ return false;
+
+ // The loop is overlapping an LIU assignment. Check VirtReg as well.
+ LiveInterval::iterator VRI = VirtReg->find(Overlaps.start());
+
+ for (;;) {
+ if (VRI == VirtReg->end())
+ return false;
+ if (VRI->start < Overlaps.stop())
+ return true;
+
+ Overlaps.advanceTo(VRI->start);
+ if (!Overlaps.valid())
+ return false;
+ if (Overlaps.start() < VRI->end)
+ return true;
+
+ VRI = VirtReg->advanceTo(VRI, Overlaps.start());
+ }
+}
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
new file mode 100644
index 0000000..6f9c5f4
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
@@ -0,0 +1,258 @@
+//===-- LiveIntervalUnion.h - Live interval union data struct --*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// LiveIntervalUnion is a union of live segments across multiple live virtual
+// registers. This may be used during coalescing to represent a congruence
+// class, or during register allocation to model liveness of a physical
+// register.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVALUNION
+#define LLVM_CODEGEN_LIVEINTERVALUNION
+
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/CodeGen/LiveInterval.h"
+
+#include <algorithm>
+
+namespace llvm {
+
+class MachineLoopRange;
+class TargetRegisterInfo;
+
+#ifndef NDEBUG
+// forward declaration
+template <unsigned Element> class SparseBitVector;
+typedef SparseBitVector<128> LiveVirtRegBitSet;
+#endif
+
+/// Compare a live virtual register segment to a LiveIntervalUnion segment.
+inline bool
+overlap(const LiveRange &VRSeg,
+ const IntervalMap<SlotIndex, LiveInterval*>::const_iterator &LUSeg) {
+ return VRSeg.start < LUSeg.stop() && LUSeg.start() < VRSeg.end;
+}
+
+/// Union of live intervals that are strong candidates for coalescing into a
+/// single register (either physical or virtual depending on the context). We
+/// expect the constituent live intervals to be disjoint, although we may
+/// eventually make exceptions to handle value-based interference.
+class LiveIntervalUnion {
+ // A set of live virtual register segments that supports fast insertion,
+ // intersection, and removal.
+ // Mapping SlotIndex intervals to virtual register numbers.
+ typedef IntervalMap<SlotIndex, LiveInterval*> LiveSegments;
+
+public:
+ // SegmentIter can advance to the next segment ordered by starting position
+ // which may belong to a different live virtual register. We also must be able
+ // to reach the current segment's containing virtual register.
+ typedef LiveSegments::iterator SegmentIter;
+
+ // LiveIntervalUnions share an external allocator.
+ typedef LiveSegments::Allocator Allocator;
+
+ class InterferenceResult;
+ class Query;
+
+private:
+ const unsigned RepReg; // representative register number
+ unsigned Tag; // unique tag for current contents.
+ LiveSegments Segments; // union of virtual reg segments
+
+public:
+ LiveIntervalUnion(unsigned r, Allocator &a) : RepReg(r), Tag(0), Segments(a)
+ {}
+
+ // Iterate over all segments in the union of live virtual registers ordered
+ // by their starting position.
+ SegmentIter begin() { return Segments.begin(); }
+ SegmentIter end() { return Segments.end(); }
+ SegmentIter find(SlotIndex x) { return Segments.find(x); }
+ bool empty() const { return Segments.empty(); }
+ SlotIndex startIndex() const { return Segments.start(); }
+
+ // Provide public access to the underlying map to allow overlap iteration.
+ typedef LiveSegments Map;
+ const Map &getMap() { return Segments; }
+
+ /// getTag - Return an opaque tag representing the current state of the union.
+ unsigned getTag() const { return Tag; }
+
+ /// changedSince - Return true if the union change since getTag returned tag.
+ bool changedSince(unsigned tag) const { return tag != Tag; }
+
+ // Add a live virtual register to this union and merge its segments.
+ void unify(LiveInterval &VirtReg);
+
+ // Remove a live virtual register's segments from this union.
+ void extract(LiveInterval &VirtReg);
+
+ // Print union, using TRI to translate register names
+ void print(raw_ostream &OS, const TargetRegisterInfo *TRI) const;
+
+#ifndef NDEBUG
+ // Verify the live intervals in this union and add them to the visited set.
+ void verify(LiveVirtRegBitSet& VisitedVRegs);
+#endif
+
+ /// Cache a single interference test result in the form of two intersecting
+ /// segments. This allows efficiently iterating over the interferences. The
+ /// iteration logic is handled by LiveIntervalUnion::Query which may
+ /// filter interferences depending on the type of query.
+ class InterferenceResult {
+ friend class Query;
+
+ LiveInterval::iterator VirtRegI; // current position in VirtReg
+ SegmentIter LiveUnionI; // current position in LiveUnion
+
+ // Internal ctor.
+ InterferenceResult(LiveInterval::iterator VRegI, SegmentIter UnionI)
+ : VirtRegI(VRegI), LiveUnionI(UnionI) {}
+
+ public:
+ // Public default ctor.
+ InterferenceResult(): VirtRegI(), LiveUnionI() {}
+
+ /// start - Return the start of the current overlap.
+ SlotIndex start() const {
+ return std::max(VirtRegI->start, LiveUnionI.start());
+ }
+
+ /// stop - Return the end of the current overlap.
+ SlotIndex stop() const {
+ return std::min(VirtRegI->end, LiveUnionI.stop());
+ }
+
+ /// interference - Return the register that is interfering here.
+ LiveInterval *interference() const { return LiveUnionI.value(); }
+
+ // Note: this interface provides raw access to the iterators because the
+ // result has no way to tell if it's valid to dereference them.
+
+ // Access the VirtReg segment.
+ LiveInterval::iterator virtRegPos() const { return VirtRegI; }
+
+ // Access the LiveUnion segment.
+ const SegmentIter &liveUnionPos() const { return LiveUnionI; }
+
+ bool operator==(const InterferenceResult &IR) const {
+ return VirtRegI == IR.VirtRegI && LiveUnionI == IR.LiveUnionI;
+ }
+ bool operator!=(const InterferenceResult &IR) const {
+ return !operator==(IR);
+ }
+
+ void print(raw_ostream &OS, const TargetRegisterInfo *TRI) const;
+ };
+
+ /// Query interferences between a single live virtual register and a live
+ /// interval union.
+ class Query {
+ LiveIntervalUnion *LiveUnion;
+ LiveInterval *VirtReg;
+ InterferenceResult FirstInterference;
+ SmallVector<LiveInterval*,4> InterferingVRegs;
+ bool CheckedFirstInterference;
+ bool SeenAllInterferences;
+ bool SeenUnspillableVReg;
+ unsigned Tag;
+
+ public:
+ Query(): LiveUnion(), VirtReg() {}
+
+ Query(LiveInterval *VReg, LiveIntervalUnion *LIU):
+ LiveUnion(LIU), VirtReg(VReg), CheckedFirstInterference(false),
+ SeenAllInterferences(false), SeenUnspillableVReg(false)
+ {}
+
+ void clear() {
+ LiveUnion = NULL;
+ VirtReg = NULL;
+ InterferingVRegs.clear();
+ CheckedFirstInterference = false;
+ SeenAllInterferences = false;
+ SeenUnspillableVReg = false;
+ Tag = 0;
+ }
+
+ void init(LiveInterval *VReg, LiveIntervalUnion *LIU) {
+ assert(VReg && LIU && "Invalid arguments");
+ if (VirtReg == VReg && LiveUnion == LIU && !LIU->changedSince(Tag)) {
+ // Retain cached results, e.g. firstInterference.
+ return;
+ }
+ clear();
+ LiveUnion = LIU;
+ VirtReg = VReg;
+ Tag = LIU->getTag();
+ }
+
+ LiveInterval &virtReg() const {
+ assert(VirtReg && "uninitialized");
+ return *VirtReg;
+ }
+
+ bool isInterference(const InterferenceResult &IR) const {
+ if (IR.VirtRegI != VirtReg->end()) {
+ assert(overlap(*IR.VirtRegI, IR.LiveUnionI) &&
+ "invalid segment iterators");
+ return true;
+ }
+ return false;
+ }
+
+ // Does this live virtual register interfere with the union?
+ bool checkInterference() { return isInterference(firstInterference()); }
+
+ // Get the first pair of interfering segments, or a noninterfering result.
+ // This initializes the firstInterference_ cache.
+ const InterferenceResult &firstInterference();
+
+ // Treat the result as an iterator and advance to the next interfering pair
+ // of segments. Visiting each unique interfering pairs means that the same
+ // VirtReg or LiveUnion segment may be visited multiple times.
+ bool nextInterference(InterferenceResult &IR) const;
+
+ // Count the virtual registers in this union that interfere with this
+ // query's live virtual register, up to maxInterferingRegs.
+ unsigned collectInterferingVRegs(unsigned MaxInterferingRegs = UINT_MAX);
+
+ // Was this virtual register visited during collectInterferingVRegs?
+ bool isSeenInterference(LiveInterval *VReg) const;
+
+ // Did collectInterferingVRegs collect all interferences?
+ bool seenAllInterferences() const { return SeenAllInterferences; }
+
+ // Did collectInterferingVRegs encounter an unspillable vreg?
+ bool seenUnspillableVReg() const { return SeenUnspillableVReg; }
+
+ // Vector generated by collectInterferingVRegs.
+ const SmallVectorImpl<LiveInterval*> &interferingVRegs() const {
+ return InterferingVRegs;
+ }
+
+ /// checkLoopInterference - Return true if there is interference overlapping
+ /// Loop.
+ bool checkLoopInterference(MachineLoopRange*);
+
+ void print(raw_ostream &OS, const TargetRegisterInfo *TRI);
+ private:
+ Query(const Query&); // DO NOT IMPLEMENT
+ void operator=(const Query&); // DO NOT IMPLEMENT
+
+ // Private interface for queries
+ void findIntersection(InterferenceResult &IR) const;
+ };
+};
+
+} // end namespace llvm
+
+#endif // !defined(LLVM_CODEGEN_LIVEINTERVALUNION)
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
new file mode 100644
index 0000000..3bbda1c
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
@@ -0,0 +1,129 @@
+//===--- LiveRangeEdit.cpp - Basic tools for editing a register live range --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRangeEdit class represents changes done to a virtual register when it
+// is spilled or split.
+//===----------------------------------------------------------------------===//
+
+#include "LiveRangeEdit.h"
+#include "VirtRegMap.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+LiveInterval &LiveRangeEdit::create(MachineRegisterInfo &mri,
+ LiveIntervals &lis,
+ VirtRegMap &vrm) {
+ const TargetRegisterClass *RC = mri.getRegClass(getReg());
+ unsigned VReg = mri.createVirtualRegister(RC);
+ vrm.grow();
+ vrm.setIsSplitFromReg(VReg, vrm.getOriginal(getReg()));
+ LiveInterval &li = lis.getOrCreateInterval(VReg);
+ newRegs_.push_back(&li);
+ return li;
+}
+
+void LiveRangeEdit::scanRemattable(LiveIntervals &lis,
+ const TargetInstrInfo &tii,
+ AliasAnalysis *aa) {
+ for (LiveInterval::vni_iterator I = parent_.vni_begin(),
+ E = parent_.vni_end(); I != E; ++I) {
+ VNInfo *VNI = *I;
+ if (VNI->isUnused())
+ continue;
+ MachineInstr *DefMI = lis.getInstructionFromIndex(VNI->def);
+ if (!DefMI)
+ continue;
+ if (tii.isTriviallyReMaterializable(DefMI, aa))
+ remattable_.insert(VNI);
+ }
+ scannedRemattable_ = true;
+}
+
+bool LiveRangeEdit::anyRematerializable(LiveIntervals &lis,
+ const TargetInstrInfo &tii,
+ AliasAnalysis *aa) {
+ if (!scannedRemattable_)
+ scanRemattable(lis, tii, aa);
+ return !remattable_.empty();
+}
+
+/// allUsesAvailableAt - Return true if all registers used by OrigMI at
+/// OrigIdx are also available with the same value at UseIdx.
+bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
+ SlotIndex OrigIdx,
+ SlotIndex UseIdx,
+ LiveIntervals &lis) {
+ OrigIdx = OrigIdx.getUseIndex();
+ UseIdx = UseIdx.getUseIndex();
+ for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = OrigMI->getOperand(i);
+ if (!MO.isReg() || !MO.getReg() || MO.getReg() == getReg())
+ continue;
+ // Reserved registers are OK.
+ if (MO.isUndef() || !lis.hasInterval(MO.getReg()))
+ continue;
+ // We don't want to move any defs.
+ if (MO.isDef())
+ return false;
+ // We cannot depend on virtual registers in uselessRegs_.
+ for (unsigned ui = 0, ue = uselessRegs_.size(); ui != ue; ++ui)
+ if (uselessRegs_[ui]->reg == MO.getReg())
+ return false;
+
+ LiveInterval &li = lis.getInterval(MO.getReg());
+ const VNInfo *OVNI = li.getVNInfoAt(OrigIdx);
+ if (!OVNI)
+ continue;
+ if (OVNI != li.getVNInfoAt(UseIdx))
+ return false;
+ }
+ return true;
+}
+
+bool LiveRangeEdit::canRematerializeAt(Remat &RM,
+ SlotIndex UseIdx,
+ bool cheapAsAMove,
+ LiveIntervals &lis) {
+ assert(scannedRemattable_ && "Call anyRematerializable first");
+
+ // Use scanRemattable info.
+ if (!remattable_.count(RM.ParentVNI))
+ return false;
+
+ // No defining instruction.
+ RM.OrigMI = lis.getInstructionFromIndex(RM.ParentVNI->def);
+ assert(RM.OrigMI && "Defining instruction for remattable value disappeared");
+
+ // If only cheap remats were requested, bail out early.
+ if (cheapAsAMove && !RM.OrigMI->getDesc().isAsCheapAsAMove())
+ return false;
+
+ // Verify that all used registers are available with the same values.
+ if (!allUsesAvailableAt(RM.OrigMI, RM.ParentVNI->def, UseIdx, lis))
+ return false;
+
+ return true;
+}
+
+SlotIndex LiveRangeEdit::rematerializeAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg,
+ const Remat &RM,
+ LiveIntervals &lis,
+ const TargetInstrInfo &tii,
+ const TargetRegisterInfo &tri) {
+ assert(RM.OrigMI && "Invalid remat");
+ tii.reMaterialize(MBB, MI, DestReg, 0, RM.OrigMI, tri);
+ rematted_.insert(RM.ParentVNI);
+ return lis.InsertMachineInstrInMaps(--MI).getDefIndex();
+}
+
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeEdit.h b/contrib/llvm/lib/CodeGen/LiveRangeEdit.h
new file mode 100644
index 0000000..73f69ed
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/LiveRangeEdit.h
@@ -0,0 +1,135 @@
+//===---- LiveRangeEdit.h - Basic tools for split and spill -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRangeEdit class represents changes done to a virtual register when it
+// is spilled or split.
+//
+// The parent register is never changed. Instead, a number of new virtual
+// registers are created and added to the newRegs vector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVERANGEEDIT_H
+#define LLVM_CODEGEN_LIVERANGEEDIT_H
+
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/ADT/SmallPtrSet.h"
+
+namespace llvm {
+
+class AliasAnalysis;
+class LiveIntervals;
+class MachineRegisterInfo;
+class VirtRegMap;
+
+class LiveRangeEdit {
+ LiveInterval &parent_;
+ SmallVectorImpl<LiveInterval*> &newRegs_;
+ const SmallVectorImpl<LiveInterval*> &uselessRegs_;
+
+ /// firstNew_ - Index of the first register added to newRegs_.
+ const unsigned firstNew_;
+
+ /// scannedRemattable_ - true when remattable values have been identified.
+ bool scannedRemattable_;
+
+ /// remattable_ - Values defined by remattable instructions as identified by
+ /// tii.isTriviallyReMaterializable().
+ SmallPtrSet<VNInfo*,4> remattable_;
+
+ /// rematted_ - Values that were actually rematted, and so need to have their
+ /// live range trimmed or entirely removed.
+ SmallPtrSet<VNInfo*,4> rematted_;
+
+ /// scanRemattable - Identify the parent_ values that may rematerialize.
+ void scanRemattable(LiveIntervals &lis,
+ const TargetInstrInfo &tii,
+ AliasAnalysis *aa);
+
+ /// allUsesAvailableAt - Return true if all registers used by OrigMI at
+ /// OrigIdx are also available with the same value at UseIdx.
+ bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
+ SlotIndex UseIdx, LiveIntervals &lis);
+
+public:
+ /// Create a LiveRangeEdit for breaking down parent into smaller pieces.
+ /// @param parent The register being spilled or split.
+ /// @param newRegs List to receive any new registers created. This needn't be
+ /// empty initially, any existing registers are ignored.
+ /// @param uselessRegs List of registers that can't be used when
+ /// rematerializing values because they are about to be removed.
+ LiveRangeEdit(LiveInterval &parent,
+ SmallVectorImpl<LiveInterval*> &newRegs,
+ const SmallVectorImpl<LiveInterval*> &uselessRegs)
+ : parent_(parent), newRegs_(newRegs), uselessRegs_(uselessRegs),
+ firstNew_(newRegs.size()), scannedRemattable_(false) {}
+
+ LiveInterval &getParent() const { return parent_; }
+ unsigned getReg() const { return parent_.reg; }
+
+ /// Iterator for accessing the new registers added by this edit.
+ typedef SmallVectorImpl<LiveInterval*>::const_iterator iterator;
+ iterator begin() const { return newRegs_.begin()+firstNew_; }
+ iterator end() const { return newRegs_.end(); }
+ unsigned size() const { return newRegs_.size()-firstNew_; }
+ bool empty() const { return size() == 0; }
+ LiveInterval *get(unsigned idx) const { return newRegs_[idx+firstNew_]; }
+
+ /// create - Create a new register with the same class and stack slot as
+ /// parent.
+ LiveInterval &create(MachineRegisterInfo&, LiveIntervals&, VirtRegMap&);
+
+ /// anyRematerializable - Return true if any parent values may be
+ /// rematerializable.
+ /// This function must be called before ny rematerialization is attempted.
+ bool anyRematerializable(LiveIntervals&, const TargetInstrInfo&,
+ AliasAnalysis*);
+
+ /// Remat - Information needed to rematerialize at a specific location.
+ struct Remat {
+ VNInfo *ParentVNI; // parent_'s value at the remat location.
+ MachineInstr *OrigMI; // Instruction defining ParentVNI.
+ explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(0) {}
+ };
+
+ /// canRematerializeAt - Determine if ParentVNI can be rematerialized at
+ /// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.
+ /// When cheapAsAMove is set, only cheap remats are allowed.
+ bool canRematerializeAt(Remat &RM,
+ SlotIndex UseIdx,
+ bool cheapAsAMove,
+ LiveIntervals &lis);
+
+ /// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an
+ /// instruction into MBB before MI. The new instruction is mapped, but
+ /// liveness is not updated.
+ /// Return the SlotIndex of the new instruction.
+ SlotIndex rematerializeAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg,
+ const Remat &RM,
+ LiveIntervals&,
+ const TargetInstrInfo&,
+ const TargetRegisterInfo&);
+
+ /// markRematerialized - explicitly mark a value as rematerialized after doing
+ /// it manually.
+ void markRematerialized(VNInfo *ParentVNI) {
+ rematted_.insert(ParentVNI);
+ }
+
+ /// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
+ bool didRematerialize(VNInfo *ParentVNI) const {
+ return rematted_.count(ParentVNI);
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
index b5c385f..c75196a 100644
--- a/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
@@ -26,7 +26,9 @@ using namespace llvm;
char LiveStacks::ID = 0;
INITIALIZE_PASS(LiveStacks, "livestacks",
- "Live Stack Slot Analysis", false, false);
+ "Live Stack Slot Analysis", false, false)
+
+char &llvm::LiveStacksID = LiveStacks::ID;
void LiveStacks::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -48,6 +50,22 @@ bool LiveStacks::runOnMachineFunction(MachineFunction &) {
return false;
}
+LiveInterval &
+LiveStacks::getOrCreateInterval(int Slot, const TargetRegisterClass *RC) {
+ assert(Slot >= 0 && "Spill slot indice must be >= 0");
+ SS2IntervalMap::iterator I = S2IMap.find(Slot);
+ if (I == S2IMap.end()) {
+ I = S2IMap.insert(I, std::make_pair(Slot,
+ LiveInterval(TargetRegisterInfo::index2StackSlot(Slot), 0.0F)));
+ S2RCMap.insert(std::make_pair(Slot, RC));
+ } else {
+ // Use the largest common subclass register class.
+ const TargetRegisterClass *OldRC = S2RCMap[Slot];
+ S2RCMap[Slot] = getCommonSubClass(OldRC, RC);
+ }
+ return I->second;
+}
+
/// print - Implement the dump method.
void LiveStacks::print(raw_ostream &OS, const Module*) const {
diff --git a/contrib/llvm/lib/CodeGen/LiveVariables.cpp b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
index 375307b..dd43ef2 100644
--- a/contrib/llvm/lib/CodeGen/LiveVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
@@ -31,7 +31,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/DepthFirstIterator.h"
@@ -42,8 +41,11 @@
using namespace llvm;
char LiveVariables::ID = 0;
-INITIALIZE_PASS(LiveVariables, "livevars",
- "Live Variable Analysis", false, false);
+INITIALIZE_PASS_BEGIN(LiveVariables, "livevars",
+ "Live Variable Analysis", false, false)
+INITIALIZE_PASS_DEPENDENCY(UnreachableMachineBlockElim)
+INITIALIZE_PASS_END(LiveVariables, "livevars",
+ "Live Variable Analysis", false, false)
void LiveVariables::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -79,13 +81,7 @@ void LiveVariables::VarInfo::dump() const {
LiveVariables::VarInfo &LiveVariables::getVarInfo(unsigned RegIdx) {
assert(TargetRegisterInfo::isVirtualRegister(RegIdx) &&
"getVarInfo: not a virtual register!");
- RegIdx -= TargetRegisterInfo::FirstVirtualRegister;
- if (RegIdx >= VirtRegInfo.size()) {
- if (RegIdx >= 2*VirtRegInfo.size())
- VirtRegInfo.resize(RegIdx*2);
- else
- VirtRegInfo.resize(2*VirtRegInfo.size());
- }
+ VirtRegInfo.grow(RegIdx);
return VirtRegInfo[RegIdx];
}
@@ -498,9 +494,6 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
std::fill(PhysRegUse, PhysRegUse + NumRegs, (MachineInstr*)0);
PHIJoins.clear();
- /// Get some space for a respectable number of registers.
- VirtRegInfo.resize(64);
-
analyzePHINodes(mf);
// Calculate live variable information in depth first order on the CFG of the
@@ -628,19 +621,14 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
// Convert and transfer the dead / killed information we have gathered into
// VirtRegInfo onto MI's.
- for (unsigned i = 0, e1 = VirtRegInfo.size(); i != e1; ++i)
- for (unsigned j = 0, e2 = VirtRegInfo[i].Kills.size(); j != e2; ++j)
- if (VirtRegInfo[i].Kills[j] ==
- MRI->getVRegDef(i + TargetRegisterInfo::FirstVirtualRegister))
- VirtRegInfo[i]
- .Kills[j]->addRegisterDead(i +
- TargetRegisterInfo::FirstVirtualRegister,
- TRI);
+ for (unsigned i = 0, e1 = VirtRegInfo.size(); i != e1; ++i) {
+ const unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ for (unsigned j = 0, e2 = VirtRegInfo[Reg].Kills.size(); j != e2; ++j)
+ if (VirtRegInfo[Reg].Kills[j] == MRI->getVRegDef(Reg))
+ VirtRegInfo[Reg].Kills[j]->addRegisterDead(Reg, TRI);
else
- VirtRegInfo[i]
- .Kills[j]->addRegisterKilled(i +
- TargetRegisterInfo::FirstVirtualRegister,
- TRI);
+ VirtRegInfo[Reg].Kills[j]->addRegisterKilled(Reg, TRI);
+ }
// Check to make sure there are no unreachable blocks in the MC CFG for the
// function. If so, it is due to a bug in the instruction selector or some
@@ -775,8 +763,8 @@ void LiveVariables::addNewBlock(MachineBasicBlock *BB,
getVarInfo(BBI->getOperand(i).getReg()).AliveBlocks.set(NumNew);
// Update info for all live variables
- for (unsigned Reg = TargetRegisterInfo::FirstVirtualRegister,
- E = MRI->getLastVirtReg()+1; Reg != E; ++Reg) {
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
VarInfo &VI = getVarInfo(Reg);
if (!VI.AliveBlocks.test(NumNew) && VI.isLiveIn(*SuccBB, Reg, *MRI))
VI.AliveBlocks.set(NumNew);
diff --git a/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp b/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
index 7e366f0..1318d62 100644
--- a/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
+++ b/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
@@ -9,7 +9,7 @@
//
// This pass assigns local frame indices to stack slots relative to one another
// and allocates additional base registers to access them when the target
-// estimates the are likely to be out of range of stack pointer and frame
+// estimates they are likely to be out of range of stack pointer and frame
// pointer relative addressing.
//
//===----------------------------------------------------------------------===//
@@ -34,7 +34,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
using namespace llvm;
@@ -152,9 +152,9 @@ void LocalStackSlotPass::AdjustStackOffset(MachineFrameInfo *MFI,
void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Loop over all of the stack objects, assigning sequential addresses...
MachineFrameInfo *MFI = Fn.getFrameInfo();
- const TargetFrameInfo &TFI = *Fn.getTarget().getFrameInfo();
+ const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
bool StackGrowsDown =
- TFI.getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown;
+ TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
int64_t Offset = 0;
unsigned MaxAlign = 0;
@@ -227,27 +227,28 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
MachineFrameInfo *MFI = Fn.getFrameInfo();
const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
- const TargetFrameInfo &TFI = *Fn.getTarget().getFrameInfo();
+ const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
bool StackGrowsDown =
- TFI.getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown;
- MachineBasicBlock::iterator InsertionPt = Fn.begin()->begin();
+ TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
// Collect all of the instructions in the block that reference
// a frame index. Also store the frame index referenced to ease later
// lookup. (For any insn that has more than one FI reference, we arbitrarily
// choose the first one).
SmallVector<FrameRef, 64> FrameReferenceInsns;
- // A base register definition is a register+offset pair.
- SmallVector<std::pair<unsigned, int64_t>, 8> BaseRegisters;
+ // A base register definition is a register + offset pair.
+ SmallVector<std::pair<unsigned, int64_t>, 8> BaseRegisters;
for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
MachineInstr *MI = I;
+
// Debug value instructions can't be out of range, so they don't need
// any updates.
if (MI->isDebugValue())
continue;
+
// For now, allocate the base register(s) within the basic block
// where they're used, and don't try to keep them around outside
// of that. It may be beneficial to try sharing them more broadly
@@ -268,11 +269,13 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
}
}
}
+
// Sort the frame references by local offset
array_pod_sort(FrameReferenceInsns.begin(), FrameReferenceInsns.end());
+ MachineBasicBlock *Entry = Fn.begin();
- // Loop throught the frame references and allocate for them as necessary
+ // Loop through the frame references and allocate for them as necessary.
for (int ref = 0, e = FrameReferenceInsns.size(); ref < e ; ++ref) {
MachineBasicBlock::iterator I =
FrameReferenceInsns[ref].getMachineInstr();
@@ -321,10 +324,12 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
DEBUG(dbgs() << " Materializing base register " << BaseReg <<
" at frame local offset " <<
LocalOffsets[FrameIdx] + InstrOffset << "\n");
+
// Tell the target to insert the instruction to initialize
// the base register.
- TRI->materializeFrameBaseRegister(InsertionPt, BaseReg,
- FrameIdx, InstrOffset);
+ // MachineBasicBlock::iterator InsertionPt = Entry->begin();
+ TRI->materializeFrameBaseRegister(Entry, BaseReg, FrameIdx,
+ InstrOffset);
// The base register already includes any offset specified
// by the instruction, so account for that so it doesn't get
diff --git a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
index 50f3f67..ccbff0a 100644
--- a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -17,6 +17,7 @@
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -146,27 +147,46 @@ MachineBasicBlock::iterator MachineBasicBlock::getFirstNonPHI() {
return I;
}
+MachineBasicBlock::iterator
+MachineBasicBlock::SkipPHIsAndLabels(MachineBasicBlock::iterator I) {
+ while (I != end() && (I->isPHI() || I->isLabel() || I->isDebugValue()))
+ ++I;
+ return I;
+}
+
MachineBasicBlock::iterator MachineBasicBlock::getFirstTerminator() {
iterator I = end();
- while (I != begin() && (--I)->getDesc().isTerminator())
+ while (I != begin() && ((--I)->getDesc().isTerminator() || I->isDebugValue()))
; /*noop */
- if (I != end() && !I->getDesc().isTerminator()) ++I;
+ while (I != end() && !I->getDesc().isTerminator())
+ ++I;
return I;
}
-void MachineBasicBlock::dump() const {
- print(dbgs());
+MachineBasicBlock::iterator MachineBasicBlock::getLastNonDebugInstr() {
+ iterator B = begin(), I = end();
+ while (I != B) {
+ --I;
+ if (I->isDebugValue())
+ continue;
+ return I;
+ }
+ // The block is all debug values.
+ return end();
+}
+
+const MachineBasicBlock *MachineBasicBlock::getLandingPadSuccessor() const {
+ // A block with a landing pad successor only has one other successor.
+ if (succ_size() > 2)
+ return 0;
+ for (const_succ_iterator I = succ_begin(), E = succ_end(); I != E; ++I)
+ if ((*I)->isLandingPad())
+ return *I;
+ return 0;
}
-static inline void OutputReg(raw_ostream &os, unsigned RegNo,
- const TargetRegisterInfo *TRI = 0) {
- if (RegNo != 0 && TargetRegisterInfo::isPhysicalRegister(RegNo)) {
- if (TRI)
- os << " %" << TRI->get(RegNo).Name;
- else
- os << " %physreg" << RegNo;
- } else
- os << " %reg" << RegNo;
+void MachineBasicBlock::dump() const {
+ print(dbgs());
}
StringRef MachineBasicBlock::getName() const {
@@ -176,7 +196,7 @@ StringRef MachineBasicBlock::getName() const {
return "(null)";
}
-void MachineBasicBlock::print(raw_ostream &OS) const {
+void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
const MachineFunction *MF = getParent();
if (!MF) {
OS << "Can't print out MachineBasicBlock because parent MachineFunction"
@@ -186,6 +206,9 @@ void MachineBasicBlock::print(raw_ostream &OS) const {
if (Alignment) { OS << "Alignment " << Alignment << "\n"; }
+ if (Indexes)
+ OS << Indexes->getMBBStartIdx(this) << '\t';
+
OS << "BB#" << getNumber() << ": ";
const char *Comma = "";
@@ -198,28 +221,36 @@ void MachineBasicBlock::print(raw_ostream &OS) const {
if (hasAddressTaken()) { OS << Comma << "ADDRESS TAKEN"; Comma = ", "; }
OS << '\n';
- const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
if (!livein_empty()) {
+ if (Indexes) OS << '\t';
OS << " Live Ins:";
for (livein_iterator I = livein_begin(),E = livein_end(); I != E; ++I)
- OutputReg(OS, *I, TRI);
+ OS << ' ' << PrintReg(*I, TRI);
OS << '\n';
}
// Print the preds of this block according to the CFG.
if (!pred_empty()) {
+ if (Indexes) OS << '\t';
OS << " Predecessors according to CFG:";
for (const_pred_iterator PI = pred_begin(), E = pred_end(); PI != E; ++PI)
OS << " BB#" << (*PI)->getNumber();
OS << '\n';
}
-
+
for (const_iterator I = begin(); I != end(); ++I) {
+ if (Indexes) {
+ if (Indexes->hasIndex(I))
+ OS << Indexes->getInstructionIndex(I);
+ OS << '\t';
+ }
OS << '\t';
I->print(OS, &getParent()->getTarget());
}
// Print the successors of this block according to the CFG.
if (!succ_empty()) {
+ if (Indexes) OS << '\t';
OS << " Successors according to CFG:";
for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI)
OS << " BB#" << (*SI)->getNumber();
@@ -431,14 +462,24 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
MachineFunction *MF = getParent();
DebugLoc dl; // FIXME: this is nowhere
- // We may need to update this's terminator, but we can't do that if AnalyzeBranch
- // fails. If this uses a jump table, we won't touch it.
+ // We may need to update this's terminator, but we can't do that if
+ // AnalyzeBranch fails. If this uses a jump table, we won't touch it.
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
return NULL;
+ // Avoid bugpoint weirdness: A block may end with a conditional branch but
+ // jumps to the same MBB is either case. We have duplicate CFG edges in that
+ // case that we can't handle. Since this never happens in properly optimized
+ // code, just skip those edges.
+ if (TBB && TBB == FBB) {
+ DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
+ << getNumber() << '\n');
+ return NULL;
+ }
+
MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
DEBUG(dbgs() << "Splitting critical edge:"
diff --git a/contrib/llvm/lib/CodeGen/MachineCSE.cpp b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
index 272b54d..07a7d27 100644
--- a/contrib/llvm/lib/CodeGen/MachineCSE.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
@@ -22,15 +22,18 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ScopedHashTable.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/RecyclingAllocator.h"
using namespace llvm;
STATISTIC(NumCoalesces, "Number of copies coalesced");
STATISTIC(NumCSEs, "Number of common subexpression eliminated");
-STATISTIC(NumPhysCSEs, "Number of phyreg defining common subexpr eliminated");
+STATISTIC(NumPhysCSEs,
+ "Number of physreg referencing common subexpr eliminated");
+STATISTIC(NumCommutes, "Number of copies coalesced after commuting");
namespace {
class MachineCSE : public MachineFunctionPass {
@@ -41,7 +44,9 @@ namespace {
MachineRegisterInfo *MRI;
public:
static char ID; // Pass identification
- MachineCSE() : MachineFunctionPass(ID), LookAheadLimit(5), CurrVN(0) {}
+ MachineCSE() : MachineFunctionPass(ID), LookAheadLimit(5), CurrVN(0) {
+ initializeMachineCSEPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -61,10 +66,13 @@ namespace {
private:
const unsigned LookAheadLimit;
- typedef ScopedHashTableScope<MachineInstr*, unsigned,
- MachineInstrExpressionTrait> ScopeType;
+ typedef RecyclingAllocator<BumpPtrAllocator,
+ ScopedHashTableVal<MachineInstr*, unsigned> > AllocatorTy;
+ typedef ScopedHashTable<MachineInstr*, unsigned,
+ MachineInstrExpressionTrait, AllocatorTy> ScopedHTType;
+ typedef ScopedHTType::ScopeTy ScopeType;
DenseMap<MachineBasicBlock*, ScopeType*> ScopeMap;
- ScopedHashTable<MachineInstr*, unsigned, MachineInstrExpressionTrait> VNT;
+ ScopedHTType VNT;
SmallVector<MachineInstr*, 64> Exps;
unsigned CurrVN;
@@ -72,11 +80,11 @@ namespace {
bool isPhysDefTriviallyDead(unsigned Reg,
MachineBasicBlock::const_iterator I,
MachineBasicBlock::const_iterator E) const ;
- bool hasLivePhysRegDefUse(const MachineInstr *MI,
- const MachineBasicBlock *MBB,
- unsigned &PhysDef) const;
- bool PhysRegDefReaches(MachineInstr *CSMI, MachineInstr *MI,
- unsigned PhysDef) const;
+ bool hasLivePhysRegDefUses(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ SmallSet<unsigned,8> &PhysRefs) const;
+ bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
+ SmallSet<unsigned,8> &PhysRefs) const;
bool isCSECandidate(MachineInstr *MI);
bool isProfitableToCSE(unsigned CSReg, unsigned Reg,
MachineInstr *CSMI, MachineInstr *MI);
@@ -91,8 +99,12 @@ namespace {
} // end anonymous namespace
char MachineCSE::ID = 0;
-INITIALIZE_PASS(MachineCSE, "machine-cse",
- "Machine Common Subexpression Elimination", false, false);
+INITIALIZE_PASS_BEGIN(MachineCSE, "machine-cse",
+ "Machine Common Subexpression Elimination", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MachineCSE, "machine-cse",
+ "Machine Common Subexpression Elimination", false, false)
FunctionPass *llvm::createMachineCSEPass() { return new MachineCSE(); }
@@ -104,7 +116,7 @@ bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI,
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
- if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (!MRI->hasOneNonDBGUse(Reg))
// Only coalesce single use copies. This ensure the copy will be
@@ -120,17 +132,12 @@ bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI,
continue;
if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg())
continue;
- const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
- const TargetRegisterClass *RC = MRI->getRegClass(Reg);
- const TargetRegisterClass *NewRC = getCommonSubClass(RC, SRC);
- if (!NewRC)
+ if (!MRI->constrainRegClass(SrcReg, MRI->getRegClass(Reg)))
continue;
DEBUG(dbgs() << "Coalescing: " << *DefMI);
- DEBUG(dbgs() << "*** to: " << *MI);
+ DEBUG(dbgs() << "*** to: " << *MI);
MO.setReg(SrcReg);
MRI->clearKillFlags(SrcReg);
- if (NewRC != SRC)
- MRI->setRegClass(SrcReg, NewRC);
DefMI->eraseFromParent();
++NumCoalesces;
Changed = true;
@@ -176,14 +183,14 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
return false;
}
-/// hasLivePhysRegDefUse - Return true if the specified instruction read / write
+/// hasLivePhysRegDefUses - Return true if the specified instruction read/write
/// physical registers (except for dead defs of physical registers). It also
/// returns the physical register def by reference if it's the only one and the
/// instruction does not uses a physical register.
-bool MachineCSE::hasLivePhysRegDefUse(const MachineInstr *MI,
- const MachineBasicBlock *MBB,
- unsigned &PhysDef) const {
- PhysDef = 0;
+bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ SmallSet<unsigned,8> &PhysRefs) const {
+ MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
@@ -193,35 +200,22 @@ bool MachineCSE::hasLivePhysRegDefUse(const MachineInstr *MI,
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
continue;
- if (MO.isUse()) {
- // Can't touch anything to read a physical register.
- PhysDef = 0;
- return true;
- }
- if (MO.isDead())
- // If the def is dead, it's ok.
- continue;
- // Ok, this is a physical register def that's not marked "dead". That's
+ // If the def is dead, it's ok. But the def may not marked "dead". That's
// common since this pass is run before livevariables. We can scan
// forward a few instructions and check if it is obviously dead.
- if (PhysDef) {
- // Multiple physical register defs. These are rare, forget about it.
- PhysDef = 0;
- return true;
- }
- PhysDef = Reg;
+ if (MO.isDef() &&
+ (MO.isDead() || isPhysDefTriviallyDead(Reg, I, MBB->end())))
+ continue;
+ PhysRefs.insert(Reg);
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
+ PhysRefs.insert(*Alias);
}
- if (PhysDef) {
- MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
- if (!isPhysDefTriviallyDead(PhysDef, I, MBB->end()))
- return true;
- }
- return false;
+ return !PhysRefs.empty();
}
-bool MachineCSE::PhysRegDefReaches(MachineInstr *CSMI, MachineInstr *MI,
- unsigned PhysDef) const {
+bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
+ SmallSet<unsigned,8> &PhysRefs) const {
// For now conservatively returns false if the common subexpression is
// not in the same basic block as the given instruction.
MachineBasicBlock *MBB = MI->getParent();
@@ -237,8 +231,17 @@ bool MachineCSE::PhysRegDefReaches(MachineInstr *CSMI, MachineInstr *MI,
if (I == E)
return true;
- if (I->modifiesRegister(PhysDef, TRI))
- return false;
+
+ for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = I->getOperand(i);
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+ unsigned MOReg = MO.getReg();
+ if (TargetRegisterInfo::isVirtualRegister(MOReg))
+ continue;
+ if (PhysRefs.count(MOReg))
+ return false;
+ }
--LookAheadLeft;
++I;
@@ -259,7 +262,7 @@ bool MachineCSE::isCSECandidate(MachineInstr *MI) {
// Ignore stuff that we obviously can't move.
const TargetInstrDesc &TID = MI->getDesc();
if (TID.mayStore() || TID.isCall() || TID.isTerminator() ||
- TID.hasUnmodeledSideEffects())
+ MI->hasUnmodeledSideEffects())
return false;
if (TID.mayLoad()) {
@@ -281,14 +284,13 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
MachineInstr *CSMI, MachineInstr *MI) {
// FIXME: Heuristics that works around the lack the live range splitting.
- // Heuristics #1: Don't cse "cheap" computating if the def is not local or in an
- // immediate predecessor. We don't want to increase register pressure and end up
- // causing other computation to be spilled.
+ // Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
+ // an immediate predecessor. We don't want to increase register pressure and
+ // end up causing other computation to be spilled.
if (MI->getDesc().isAsCheapAsAMove()) {
MachineBasicBlock *CSBB = CSMI->getParent();
MachineBasicBlock *BB = MI->getParent();
- if (CSBB != BB &&
- find(CSBB->succ_begin(), CSBB->succ_end(), BB) == CSBB->succ_end())
+ if (CSBB != BB && !CSBB->isSuccessor(BB))
return false;
}
@@ -297,7 +299,7 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
bool HasVRegUse = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.isUse() && MO.getReg() &&
+ if (MO.isReg() && MO.isUse() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
HasVRegUse = true;
break;
@@ -359,7 +361,6 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
if (!isCSECandidate(MI))
continue;
- bool DefPhys = false;
bool FoundCSE = VNT.count(MI);
if (!FoundCSE) {
// Look for trivial copy coalescing opportunities.
@@ -370,24 +371,37 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
FoundCSE = VNT.count(MI);
}
}
- // FIXME: commute commutable instructions?
- // If the instruction defines a physical register and the value *may* be
+ // Commute commutable instructions.
+ bool Commuted = false;
+ if (!FoundCSE && MI->getDesc().isCommutable()) {
+ MachineInstr *NewMI = TII->commuteInstruction(MI);
+ if (NewMI) {
+ Commuted = true;
+ FoundCSE = VNT.count(NewMI);
+ if (NewMI != MI)
+ // New instruction. It doesn't need to be kept.
+ NewMI->eraseFromParent();
+ else if (!FoundCSE)
+ // MI was changed but it didn't help, commute it back!
+ (void)TII->commuteInstruction(MI);
+ }
+ }
+
+ // If the instruction defines physical registers and the values *may* be
// used, then it's not safe to replace it with a common subexpression.
- unsigned PhysDef = 0;
- if (FoundCSE && hasLivePhysRegDefUse(MI, MBB, PhysDef)) {
+ // It's also not safe if the instruction uses physical registers.
+ SmallSet<unsigned,8> PhysRefs;
+ if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs)) {
FoundCSE = false;
// ... Unless the CS is local and it also defines the physical register
- // which is not clobbered in between.
- if (PhysDef) {
- unsigned CSVN = VNT.lookup(MI);
- MachineInstr *CSMI = Exps[CSVN];
- if (PhysRegDefReaches(CSMI, MI, PhysDef)) {
- FoundCSE = true;
- DefPhys = true;
- }
- }
+ // which is not clobbered in between and the physical register uses
+ // were not clobbered.
+ unsigned CSVN = VNT.lookup(MI);
+ MachineInstr *CSMI = Exps[CSVN];
+ if (PhysRegDefsReach(CSMI, MI, PhysRefs))
+ FoundCSE = true;
}
if (!FoundCSE) {
@@ -432,8 +446,10 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
}
MI->eraseFromParent();
++NumCSEs;
- if (DefPhys)
+ if (!PhysRefs.empty())
++NumPhysCSEs;
+ if (Commuted)
+ ++NumCommutes;
} else {
DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
VNT.insert(MI, CurrVN++);
diff --git a/contrib/llvm/lib/CodeGen/MachineDominators.cpp b/contrib/llvm/lib/CodeGen/MachineDominators.cpp
index 3c67478..04c8ecb 100644
--- a/contrib/llvm/lib/CodeGen/MachineDominators.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineDominators.cpp
@@ -25,7 +25,7 @@ TEMPLATE_INSTANTIATION(class DominatorTreeBase<MachineBasicBlock>);
char MachineDominatorTree::ID = 0;
INITIALIZE_PASS(MachineDominatorTree, "machinedomtree",
- "MachineDominator Tree Construction", true, true);
+ "MachineDominator Tree Construction", true, true)
char &llvm::MachineDominatorsID = MachineDominatorTree::ID;
@@ -42,6 +42,7 @@ bool MachineDominatorTree::runOnMachineFunction(MachineFunction &F) {
MachineDominatorTree::MachineDominatorTree()
: MachineFunctionPass(ID) {
+ initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
DT = new DominatorTreeBase<MachineBasicBlock>(false);
}
diff --git a/contrib/llvm/lib/CodeGen/MachineFunction.cpp b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
index 0171700..8553240 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
@@ -33,7 +33,7 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/GraphWriter.h"
@@ -52,14 +52,15 @@ void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
}
MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
- unsigned FunctionNum, MachineModuleInfo &mmi)
- : Fn(F), Target(TM), Ctx(mmi.getContext()), MMI(mmi) {
+ unsigned FunctionNum, MachineModuleInfo &mmi,
+ GCModuleInfo* gmi)
+ : Fn(F), Target(TM), Ctx(mmi.getContext()), MMI(mmi), GMI(gmi) {
if (TM.getRegisterInfo())
RegInfo = new (Allocator) MachineRegisterInfo(*TM.getRegisterInfo());
else
RegInfo = 0;
MFInfo = 0;
- FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameInfo());
+ FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameLowering());
if (Fn->hasFnAttr(Attribute::StackAlignment))
FrameInfo->setMaxAlignment(Attribute::getStackAlignmentFromAttrs(
Fn->getAttributes().getFnAttributes()));
@@ -190,20 +191,21 @@ MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
}
MachineMemOperand *
-MachineFunction::getMachineMemOperand(const Value *v, unsigned f,
- int64_t o, uint64_t s,
- unsigned base_alignment) {
- return new (Allocator) MachineMemOperand(v, f, o, s, base_alignment);
+MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
+ uint64_t s, unsigned base_alignment,
+ const MDNode *TBAAInfo) {
+ return new (Allocator) MachineMemOperand(PtrInfo, f, s, base_alignment,
+ TBAAInfo);
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size) {
return new (Allocator)
- MachineMemOperand(MMO->getValue(), MMO->getFlags(),
- int64_t(uint64_t(MMO->getOffset()) +
- uint64_t(Offset)),
- Size, MMO->getBaseAlignment());
+ MachineMemOperand(MachinePointerInfo(MMO->getValue(),
+ MMO->getOffset()+Offset),
+ MMO->getFlags(), Size,
+ MMO->getBaseAlignment(), 0);
}
MachineInstr::mmo_iterator
@@ -231,10 +233,10 @@ MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
else {
// Clone the MMO and unset the store flag.
MachineMemOperand *JustLoad =
- getMachineMemOperand((*I)->getValue(),
+ getMachineMemOperand((*I)->getPointerInfo(),
(*I)->getFlags() & ~MachineMemOperand::MOStore,
- (*I)->getOffset(), (*I)->getSize(),
- (*I)->getBaseAlignment());
+ (*I)->getSize(), (*I)->getBaseAlignment(),
+ (*I)->getTBAAInfo());
Result[Index] = JustLoad;
}
++Index;
@@ -263,10 +265,10 @@ MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
else {
// Clone the MMO and unset the load flag.
MachineMemOperand *JustStore =
- getMachineMemOperand((*I)->getValue(),
+ getMachineMemOperand((*I)->getPointerInfo(),
(*I)->getFlags() & ~MachineMemOperand::MOLoad,
- (*I)->getOffset(), (*I)->getSize(),
- (*I)->getBaseAlignment());
+ (*I)->getSize(), (*I)->getBaseAlignment(),
+ (*I)->getTBAAInfo());
Result[Index] = JustStore;
}
++Index;
@@ -279,7 +281,7 @@ void MachineFunction::dump() const {
print(dbgs());
}
-void MachineFunction::print(raw_ostream &OS) const {
+void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
OS << "# Machine code for function " << Fn->getName() << ":\n";
// Print Frame Information
@@ -328,7 +330,7 @@ void MachineFunction::print(raw_ostream &OS) const {
for (const_iterator BB = begin(), E = end(); BB != E; ++BB) {
OS << '\n';
- BB->print(OS);
+ BB->print(OS, Indexes);
}
OS << "\n# End machine code for function " << Fn->getName() << ".\n\n";
@@ -346,17 +348,15 @@ namespace llvm {
std::string getNodeLabel(const MachineBasicBlock *Node,
const MachineFunction *Graph) {
- if (isSimple () && Node->getBasicBlock() &&
- !Node->getBasicBlock()->getName().empty())
- return Node->getBasicBlock()->getNameStr() + ":";
-
std::string OutStr;
{
raw_string_ostream OSS(OutStr);
-
- if (isSimple())
- OSS << Node->getNumber() << ':';
- else
+
+ if (isSimple()) {
+ OSS << "BB#" << Node->getNumber();
+ if (const BasicBlock *BB = Node->getBasicBlock())
+ OSS << ": " << BB->getName();
+ } else
Node->print(OSS);
}
@@ -396,7 +396,8 @@ void MachineFunction::viewCFGOnly() const
/// addLiveIn - Add the specified physical register as a live-in value and
/// create a corresponding virtual register for it.
unsigned MachineFunction::addLiveIn(unsigned PReg,
- const TargetRegisterClass *RC) {
+ const TargetRegisterClass *RC,
+ DebugLoc DL) {
MachineRegisterInfo &MRI = getRegInfo();
unsigned VReg = MRI.getLiveInVirtReg(PReg);
if (VReg) {
@@ -405,6 +406,7 @@ unsigned MachineFunction::addLiveIn(unsigned PReg,
}
VReg = MRI.createVirtualRegister(RC);
MRI.addLiveIn(PReg, VReg);
+ MRI.addLiveInLoc(VReg, DL);
return VReg;
}
@@ -426,6 +428,13 @@ MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
return Ctx.GetOrCreateSymbol(Name.str());
}
+/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
+/// base.
+MCSymbol *MachineFunction::getPICBaseSymbol() const {
+ const MCAsmInfo &MAI = *Target.getMCAsmInfo();
+ return Ctx.GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix())+
+ Twine(getFunctionNumber())+"$pb");
+}
//===----------------------------------------------------------------------===//
// MachineFrameInfo implementation
@@ -485,7 +494,7 @@ MachineFrameInfo::getPristineRegs(const MachineBasicBlock *MBB) const {
void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
if (Objects.empty()) return;
- const TargetFrameInfo *FI = MF.getTarget().getFrameInfo();
+ const TargetFrameLowering *FI = MF.getTarget().getFrameLowering();
int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
OS << "Frame Objects:\n";
diff --git a/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp b/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
index 4f84b95..054c750 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
@@ -12,22 +12,17 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
using namespace llvm;
-// Register this pass with PassInfo directly to avoid having to define
-// a default constructor.
-static PassInfo
-X("Machine Function Analysis", "machine-function-analysis",
- &MachineFunctionAnalysis::ID, 0,
- /*CFGOnly=*/false, /*is_analysis=*/true);
-
char MachineFunctionAnalysis::ID = 0;
MachineFunctionAnalysis::MachineFunctionAnalysis(const TargetMachine &tm,
CodeGenOpt::Level OL) :
FunctionPass(ID), TM(tm), OptLevel(OL), MF(0) {
+ initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
}
MachineFunctionAnalysis::~MachineFunctionAnalysis() {
@@ -52,7 +47,8 @@ bool MachineFunctionAnalysis::doInitialization(Module &M) {
bool MachineFunctionAnalysis::runOnFunction(Function &F) {
assert(!MF && "MachineFunctionAnalysis already initialized!");
MF = new MachineFunction(&F, TM, NextFnNum++,
- getAnalysis<MachineModuleInfo>());
+ getAnalysis<MachineModuleInfo>(),
+ getAnalysisIfAvailable<GCModuleInfo>());
return false;
}
diff --git a/contrib/llvm/lib/CodeGen/MachineInstr.cpp b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
index 446e461..aa9ea61 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
@@ -102,13 +102,13 @@ void MachineOperand::setReg(unsigned Reg) {
if (MachineBasicBlock *MBB = MI->getParent())
if (MachineFunction *MF = MBB->getParent()) {
RemoveRegOperandFromRegInfo();
- Contents.Reg.RegNo = Reg;
+ SmallContents.RegNo = Reg;
AddRegOperandToRegInfo(&MF->getRegInfo());
return;
}
// Otherwise, just change the register, no problem. :)
- Contents.Reg.RegNo = Reg;
+ SmallContents.RegNo = Reg;
}
void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx,
@@ -159,7 +159,7 @@ void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
} else {
// Otherwise, change this to a register and set the reg#.
OpKind = MO_Register;
- Contents.Reg.RegNo = Reg;
+ SmallContents.RegNo = Reg;
// If this operand is embedded in a function, add the operand to the
// register's use/def list.
@@ -227,24 +227,11 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
if (const MachineBasicBlock *MBB = MI->getParent())
if (const MachineFunction *MF = MBB->getParent())
TM = &MF->getTarget();
+ const TargetRegisterInfo *TRI = TM ? TM->getRegisterInfo() : 0;
switch (getType()) {
case MachineOperand::MO_Register:
- if (getReg() == 0 || TargetRegisterInfo::isVirtualRegister(getReg())) {
- OS << "%reg" << getReg();
- } else {
- if (TM)
- OS << "%" << TM->getRegisterInfo()->get(getReg()).Name;
- else
- OS << "%physreg" << getReg();
- }
-
- if (getSubReg() != 0) {
- if (TM)
- OS << ':' << TM->getRegisterInfo()->getSubRegIndexName(getSubReg());
- else
- OS << ':' << getSubReg();
- }
+ OS << PrintReg(getReg(), TRI, getSubReg());
if (isDef() || isKill() || isDead() || isImplicit() || isUndef() ||
isEarlyClobber()) {
@@ -335,10 +322,45 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
// MachineMemOperand Implementation
//===----------------------------------------------------------------------===//
-MachineMemOperand::MachineMemOperand(const Value *v, unsigned int f,
- int64_t o, uint64_t s, unsigned int a)
- : Offset(o), Size(s), V(v),
- Flags((f & ((1 << MOMaxBits) - 1)) | ((Log2_32(a) + 1) << MOMaxBits)) {
+/// getAddrSpace - Return the LLVM IR address space number that this pointer
+/// points into.
+unsigned MachinePointerInfo::getAddrSpace() const {
+ if (V == 0) return 0;
+ return cast<PointerType>(V->getType())->getAddressSpace();
+}
+
+/// getConstantPool - Return a MachinePointerInfo record that refers to the
+/// constant pool.
+MachinePointerInfo MachinePointerInfo::getConstantPool() {
+ return MachinePointerInfo(PseudoSourceValue::getConstantPool());
+}
+
+/// getFixedStack - Return a MachinePointerInfo record that refers to the
+/// the specified FrameIndex.
+MachinePointerInfo MachinePointerInfo::getFixedStack(int FI, int64_t offset) {
+ return MachinePointerInfo(PseudoSourceValue::getFixedStack(FI), offset);
+}
+
+MachinePointerInfo MachinePointerInfo::getJumpTable() {
+ return MachinePointerInfo(PseudoSourceValue::getJumpTable());
+}
+
+MachinePointerInfo MachinePointerInfo::getGOT() {
+ return MachinePointerInfo(PseudoSourceValue::getGOT());
+}
+
+MachinePointerInfo MachinePointerInfo::getStack(int64_t Offset) {
+ return MachinePointerInfo(PseudoSourceValue::getStack(), Offset);
+}
+
+MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, unsigned f,
+ uint64_t s, unsigned int a,
+ const MDNode *TBAAInfo)
+ : PtrInfo(ptrinfo), Size(s),
+ Flags((f & ((1 << MOMaxBits) - 1)) | ((Log2_32(a) + 1) << MOMaxBits)),
+ TBAAInfo(TBAAInfo) {
+ assert((PtrInfo.V == 0 || isa<PointerType>(PtrInfo.V->getType())) &&
+ "invalid pointer value");
assert(getBaseAlignment() == a && "Alignment is not a power of 2!");
assert((isLoad() || isStore()) && "Not a load/store!");
}
@@ -346,9 +368,9 @@ MachineMemOperand::MachineMemOperand(const Value *v, unsigned int f,
/// Profile - Gather unique data for the object.
///
void MachineMemOperand::Profile(FoldingSetNodeID &ID) const {
- ID.AddInteger(Offset);
+ ID.AddInteger(getOffset());
ID.AddInteger(Size);
- ID.AddPointer(V);
+ ID.AddPointer(getValue());
ID.AddInteger(Flags);
}
@@ -364,8 +386,7 @@ void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
((Log2_32(MMO->getBaseAlignment()) + 1) << MOMaxBits);
// Also update the base and offset, because the new alignment may
// not be applicable with the old ones.
- V = MMO->getValue();
- Offset = MMO->getOffset();
+ PtrInfo = MMO->PtrInfo;
}
}
@@ -410,6 +431,16 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MMO) {
MMO.getBaseAlignment() != MMO.getSize())
OS << "(align=" << MMO.getAlignment() << ")";
+ // Print TBAA info.
+ if (const MDNode *TBAAInfo = MMO.getTBAAInfo()) {
+ OS << "(tbaa=";
+ if (TBAAInfo->getNumOperands() > 0)
+ WriteAsOperand(OS, TBAAInfo->getOperand(0), /*PrintType=*/false);
+ else
+ OS << "<unknown>";
+ OS << ")";
+ }
+
return OS;
}
@@ -782,6 +813,14 @@ unsigned MachineInstr::getNumExplicitOperands() const {
return NumOperands;
}
+bool MachineInstr::isStackAligningInlineAsm() const {
+ if (isInlineAsm()) {
+ unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
+ return true;
+ }
+ return false;
+}
/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
/// the specific register or -1 if it is not found. It further tightens
@@ -881,14 +920,15 @@ int MachineInstr::findFirstPredOperandIdx() const {
bool MachineInstr::
isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
if (isInlineAsm()) {
- assert(DefOpIdx >= 3);
+ assert(DefOpIdx > InlineAsm::MIOp_FirstOperand);
const MachineOperand &MO = getOperand(DefOpIdx);
if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0)
return false;
// Determine the actual operand index that corresponds to this index.
unsigned DefNo = 0;
unsigned DefPart = 0;
- for (unsigned i = 2, e = getNumOperands(); i < e; ) {
+ for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands();
+ i < e; ) {
const MachineOperand &FMO = getOperand(i);
// After the normal asm operands there may be additional imp-def regs.
if (!FMO.isImm())
@@ -903,7 +943,8 @@ isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
}
++DefNo;
}
- for (unsigned i = 2, e = getNumOperands(); i != e; ++i) {
+ for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands();
+ i != e; ++i) {
const MachineOperand &FMO = getOperand(i);
if (!FMO.isImm())
continue;
@@ -946,7 +987,8 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
// Find the flag operand corresponding to UseOpIdx
unsigned FlagIdx, NumOps=0;
- for (FlagIdx = 2; FlagIdx < UseOpIdx; FlagIdx += NumOps+1) {
+ for (FlagIdx = InlineAsm::MIOp_FirstOperand;
+ FlagIdx < UseOpIdx; FlagIdx += NumOps+1) {
const MachineOperand &UFMO = getOperand(FlagIdx);
// After the normal asm operands there may be additional imp-def regs.
if (!UFMO.isImm())
@@ -964,9 +1006,9 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
if (!DefOpIdx)
return true;
- unsigned DefIdx = 2;
+ unsigned DefIdx = InlineAsm::MIOp_FirstOperand;
// Remember to adjust the index. First operand is asm string, second is
- // the AlignStack bit, then there is a flag for each.
+ // the HasSideEffects and AlignStack bits, then there is a flag for each.
while (DefNo) {
const MachineOperand &FMO = getOperand(DefIdx);
assert(FMO.isImm());
@@ -1071,7 +1113,9 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
SawStore = true;
return false;
}
- if (TID->isTerminator() || TID->hasUnmodeledSideEffects())
+
+ if (isLabel() || isDebugValue() ||
+ TID->isTerminator() || hasUnmodeledSideEffects())
return false;
// See if this instruction does a load. If so, we have to guarantee that the
@@ -1122,7 +1166,7 @@ bool MachineInstr::hasVolatileMemoryRef() const {
if (!TID->mayStore() &&
!TID->mayLoad() &&
!TID->isCall() &&
- !TID->hasUnmodeledSideEffects())
+ !hasUnmodeledSideEffects())
return false;
// Otherwise, if the instruction has no memory reference information,
@@ -1166,7 +1210,9 @@ bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
if (PSV->isConstant(MFI))
continue;
// If we have an AliasAnalysis, ask it whether the memory is constant.
- if (AA && AA->pointsToConstantMemory(V))
+ if (AA && AA->pointsToConstantMemory(
+ AliasAnalysis::Location(V, (*I)->getSize(),
+ (*I)->getTBAAInfo())))
continue;
}
@@ -1194,6 +1240,18 @@ unsigned MachineInstr::isConstantValuePHI() const {
return Reg;
}
+bool MachineInstr::hasUnmodeledSideEffects() const {
+ if (getDesc().hasUnmodeledSideEffects())
+ return true;
+ if (isInlineAsm()) {
+ unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
+ return true;
+ }
+
+ return false;
+}
+
/// allDefsAreDead - Return true if all the defs of this instruction are dead.
///
bool MachineInstr::allDefsAreDead() const {
@@ -1207,6 +1265,17 @@ bool MachineInstr::allDefsAreDead() const {
return true;
}
+/// copyImplicitOps - Copy implicit register operands from specified
+/// instruction to this instruction.
+void MachineInstr::copyImplicitOps(const MachineInstr *MI) {
+ for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands();
+ i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isImplicit())
+ addOperand(MO);
+ }
+}
+
void MachineInstr::dump() const {
dbgs() << " " << *this;
}
@@ -1257,7 +1326,7 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
if (StartOp != 0) OS << ", ";
getOperand(StartOp).print(OS, TM);
unsigned Reg = getOperand(StartOp).getReg();
- if (Reg && TargetRegisterInfo::isVirtualRegister(Reg))
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
VirtRegs.push_back(Reg);
}
@@ -1270,11 +1339,28 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
// Print the rest of the operands.
bool OmittedAnyCallClobbers = false;
bool FirstOp = true;
+
+ if (isInlineAsm()) {
+ // Print asm string.
+ OS << " ";
+ getOperand(InlineAsm::MIOp_AsmString).print(OS, TM);
+
+ // Print HasSideEffects, IsAlignStack
+ unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
+ OS << " [sideeffect]";
+ if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
+ OS << " [alignstack]";
+
+ StartOp = InlineAsm::MIOp_FirstOperand;
+ FirstOp = false;
+ }
+
+
for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
- if (MO.isReg() && MO.getReg() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
VirtRegs.push_back(MO.getReg());
// Omit call-clobbered registers which aren't used anywhere. This makes
@@ -1284,7 +1370,7 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
if (MF && getDesc().isCall() &&
MO.isReg() && MO.isImplicit() && MO.isDef()) {
unsigned Reg = MO.getReg();
- if (Reg != 0 && TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
const MachineRegisterInfo &MRI = MF->getRegInfo();
if (MRI.use_empty(Reg) && !MRI.isLiveOut(Reg)) {
bool HasAliasLive = false;
@@ -1348,14 +1434,14 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
if (!HaveSemi) OS << ";"; HaveSemi = true;
for (unsigned i = 0; i != VirtRegs.size(); ++i) {
const TargetRegisterClass *RC = MRI->getRegClass(VirtRegs[i]);
- OS << " " << RC->getName() << ":%reg" << VirtRegs[i];
+ OS << " " << RC->getName() << ':' << PrintReg(VirtRegs[i]);
for (unsigned j = i+1; j != VirtRegs.size();) {
if (MRI->getRegClass(VirtRegs[j]) != RC) {
++j;
continue;
}
if (VirtRegs[i] != VirtRegs[j])
- OS << "," << VirtRegs[j];
+ OS << "," << PrintReg(VirtRegs[j]);
VirtRegs.erase(VirtRegs.begin()+j);
}
}
@@ -1533,8 +1619,7 @@ MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
switch (MO.getType()) {
default: break;
case MachineOperand::MO_Register:
- if (MO.isDef() && MO.getReg() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ if (MO.isDef() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue; // Skip virtual register defs.
Key |= MO.getReg();
break;
diff --git a/contrib/llvm/lib/CodeGen/MachineLICM.cpp b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
index 1a74b74..443fc2d 100644
--- a/contrib/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
@@ -28,8 +28,10 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetInstrItineraries.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/ADT/DenseMap.h"
@@ -40,8 +42,14 @@
using namespace llvm;
-STATISTIC(NumHoisted, "Number of machine instructions hoisted out of loops");
-STATISTIC(NumCSEed, "Number of hoisted machine instructions CSEed");
+STATISTIC(NumHoisted,
+ "Number of machine instructions hoisted out of loops");
+STATISTIC(NumLowRP,
+ "Number of instructions hoisted in low reg pressure situation");
+STATISTIC(NumHighLatency,
+ "Number of high latency instructions hoisted");
+STATISTIC(NumCSEed,
+ "Number of hoisted machine instructions CSEed");
STATISTIC(NumPostRAHoisted,
"Number of machine instructions hoisted out of loops post regalloc");
@@ -51,9 +59,11 @@ namespace {
const TargetMachine *TM;
const TargetInstrInfo *TII;
+ const TargetLowering *TLI;
const TargetRegisterInfo *TRI;
const MachineFrameInfo *MFI;
- MachineRegisterInfo *RegInfo;
+ MachineRegisterInfo *MRI;
+ const InstrItineraryData *InstrItins;
// Various analyses that we use...
AliasAnalysis *AA; // Alias analysis info.
@@ -68,23 +78,37 @@ namespace {
BitVector AllocatableSet;
+ // Track 'estimated' register pressure.
+ SmallSet<unsigned, 32> RegSeen;
+ SmallVector<unsigned, 8> RegPressure;
+
+ // Register pressure "limit" per register class. If the pressure
+ // is higher than the limit, then it's considered high.
+ SmallVector<unsigned, 8> RegLimit;
+
+ // Register pressure on path leading from loop preheader to current BB.
+ SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
+
// For each opcode, keep a list of potential CSE instructions.
DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
public:
static char ID; // Pass identification, replacement for typeid
MachineLICM() :
- MachineFunctionPass(ID), PreRegAlloc(true) {}
+ MachineFunctionPass(ID), PreRegAlloc(true) {
+ initializeMachineLICMPass(*PassRegistry::getPassRegistry());
+ }
explicit MachineLICM(bool PreRA) :
- MachineFunctionPass(ID), PreRegAlloc(PreRA) {}
+ MachineFunctionPass(ID), PreRegAlloc(PreRA) {
+ initializeMachineLICMPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnMachineFunction(MachineFunction &MF);
const char *getPassName() const { return "Machine Instruction LICM"; }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
AU.addRequired<MachineLoopInfo>();
AU.addRequired<MachineDominatorTree>();
AU.addRequired<AliasAnalysis>();
@@ -94,6 +118,13 @@ namespace {
}
virtual void releaseMemory() {
+ RegSeen.clear();
+ RegPressure.clear();
+ RegLimit.clear();
+ BackTrace.clear();
+ for (DenseMap<unsigned,std::vector<const MachineInstr*> >::iterator
+ CI = CSEMap.begin(), CE = CSEMap.end(); CI != CE; ++CI)
+ CI->second.clear();
CSEMap.clear();
}
@@ -138,6 +169,24 @@ namespace {
///
bool IsLoopInvariantInst(MachineInstr &I);
+ /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
+ /// and an use in the current loop, return true if the target considered
+ /// it 'high'.
+ bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
+ unsigned Reg) const;
+
+ bool IsCheapInstruction(MachineInstr &MI) const;
+
+ /// CanCauseHighRegPressure - Visit BBs from header to current BB,
+ /// check if hoisting an instruction of the given cost matrix can cause high
+ /// register pressure.
+ bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost);
+
+ /// UpdateBackTraceRegPressure - Traverse the back trace from header to
+ /// the current block and update their register pressures to reflect the
+ /// effect of hoisting MI from the current block to the preheader.
+ void UpdateBackTraceRegPressure(const MachineInstr *MI);
+
/// IsProfitableToHoist - Return true if it is potentially profitable to
/// hoist the given loop invariant.
bool IsProfitableToHoist(MachineInstr &MI);
@@ -148,11 +197,16 @@ namespace {
/// visit definitions before uses, allowing us to hoist a loop body in one
/// pass without iteration.
///
- void HoistRegion(MachineDomTreeNode *N);
+ void HoistRegion(MachineDomTreeNode *N, bool IsHeader = false);
+
+ /// InitRegPressure - Find all virtual register references that are liveout
+ /// of the preheader to initialize the starting "register pressure". Note
+ /// this does not count live through (livein but not used) registers.
+ void InitRegPressure(MachineBasicBlock *BB);
- /// isLoadFromConstantMemory - Return true if the given instruction is a
- /// load from constant memory.
- bool isLoadFromConstantMemory(MachineInstr *MI);
+ /// UpdateRegPressure - Update estimate of register pressure after the
+ /// specified instruction.
+ void UpdateRegPressure(const MachineInstr *MI);
/// ExtractHoistableLoad - Unfold a load from the given machineinstr if
/// the load itself could be hoisted. Return the unfolded and hoistable
@@ -174,8 +228,8 @@ namespace {
/// Hoist - When an instruction is found to only use loop invariant operands
/// that is safe to hoist, this instruction is called to do the dirty work.
- ///
- void Hoist(MachineInstr *MI);
+ /// It returns true if the instruction is hoisted.
+ bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
/// InitCSEMap - Initialize the CSE map with instructions that are in the
/// current loop preheader that may become duplicates of instructions that
@@ -189,8 +243,13 @@ namespace {
} // end anonymous namespace
char MachineLICM::ID = 0;
-INITIALIZE_PASS(MachineLICM, "machinelicm",
- "Machine Loop Invariant Code Motion", false, false);
+INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
+ "Machine Loop Invariant Code Motion", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MachineLICM, "machinelicm",
+ "Machine Loop Invariant Code Motion", false, false)
FunctionPass *llvm::createMachineLICMPass(bool PreRegAlloc) {
return new MachineLICM(PreRegAlloc);
@@ -212,18 +271,32 @@ static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
if (PreRegAlloc)
- DEBUG(dbgs() << "******** Pre-regalloc Machine LICM ********\n");
+ DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
else
- DEBUG(dbgs() << "******** Post-regalloc Machine LICM ********\n");
+ DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
+ DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n");
Changed = FirstInLoop = false;
TM = &MF.getTarget();
TII = TM->getInstrInfo();
+ TLI = TM->getTargetLowering();
TRI = TM->getRegisterInfo();
MFI = MF.getFrameInfo();
- RegInfo = &MF.getRegInfo();
+ MRI = &MF.getRegInfo();
+ InstrItins = TM->getInstrItineraryData();
AllocatableSet = TRI->getAllocatableSet(MF);
+ if (PreRegAlloc) {
+ // Estimate register pressure during pre-regalloc pass.
+ unsigned NumRC = TRI->getNumRegClasses();
+ RegPressure.resize(NumRC);
+ std::fill(RegPressure.begin(), RegPressure.end(), 0);
+ RegLimit.resize(NumRC);
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I)
+ RegLimit[(*I)->getID()] = TLI->getRegPressureLimit(*I, MF);
+ }
+
// Get our Loop information...
MLI = &getAnalysis<MachineLoopInfo>();
DT = &getAnalysis<MachineDominatorTree>();
@@ -248,7 +321,7 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
// being hoisted.
MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
FirstInLoop = true;
- HoistRegion(N);
+ HoistRegion(N, true);
CSEMap.clear();
}
}
@@ -474,17 +547,33 @@ void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
/// first order w.r.t the DominatorTree. This allows us to visit definitions
/// before uses, allowing us to hoist a loop body in one pass without iteration.
///
-void MachineLICM::HoistRegion(MachineDomTreeNode *N) {
+void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) {
assert(N != 0 && "Null dominator tree node?");
MachineBasicBlock *BB = N->getBlock();
// If this subregion is not in the top level loop at all, exit.
if (!CurLoop->contains(BB)) return;
+ MachineBasicBlock *Preheader = getCurPreheader();
+ if (!Preheader)
+ return;
+
+ if (IsHeader) {
+ // Compute registers which are livein into the loop headers.
+ RegSeen.clear();
+ BackTrace.clear();
+ InitRegPressure(Preheader);
+ }
+
+ // Remember livein register pressure.
+ BackTrace.push_back(RegPressure);
+
for (MachineBasicBlock::iterator
MII = BB->begin(), E = BB->end(); MII != E; ) {
MachineBasicBlock::iterator NextMII = MII; ++NextMII;
- Hoist(&*MII);
+ MachineInstr *MI = &*MII;
+ if (!Hoist(MI, Preheader))
+ UpdateRegPressure(MI);
MII = NextMII;
}
@@ -496,6 +585,99 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N) {
for (unsigned I = 0, E = Children.size(); I != E; ++I)
HoistRegion(Children[I]);
}
+
+ BackTrace.pop_back();
+}
+
+static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
+ return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
+}
+
+/// InitRegPressure - Find all virtual register references that are liveout of
+/// the preheader to initialize the starting "register pressure". Note this
+/// does not count live through (livein but not used) registers.
+void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
+ std::fill(RegPressure.begin(), RegPressure.end(), 0);
+
+ // If the preheader has only a single predecessor and it ends with a
+ // fallthrough or an unconditional branch, then scan its predecessor for live
+ // defs as well. This happens whenever the preheader is created by splitting
+ // the critical edge from the loop predecessor to the loop header.
+ if (BB->pred_size() == 1) {
+ MachineBasicBlock *TBB = 0, *FBB = 0;
+ SmallVector<MachineOperand, 4> Cond;
+ if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
+ InitRegPressure(*BB->pred_begin());
+ }
+
+ for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end();
+ MII != E; ++MII) {
+ MachineInstr *MI = &*MII;
+ for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || MO.isImplicit())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+
+ bool isNew = RegSeen.insert(Reg);
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+ EVT VT = *RC->vt_begin();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (MO.isDef())
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ else {
+ bool isKill = isOperandKill(MO, MRI);
+ if (isNew && !isKill)
+ // Haven't seen this, it must be a livein.
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ else if (!isNew && isKill)
+ RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
+ }
+ }
+ }
+}
+
+/// UpdateRegPressure - Update estimate of register pressure after the
+/// specified instruction.
+void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
+ if (MI->isImplicitDef())
+ return;
+
+ SmallVector<unsigned, 4> Defs;
+ for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || MO.isImplicit())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+
+ bool isNew = RegSeen.insert(Reg);
+ if (MO.isDef())
+ Defs.push_back(Reg);
+ else if (!isNew && isOperandKill(MO, MRI)) {
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+ EVT VT = *RC->vt_begin();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+
+ if (RCCost > RegPressure[RCId])
+ RegPressure[RCId] = 0;
+ else
+ RegPressure[RCId] -= RCCost;
+ }
+ }
+
+ while (!Defs.empty()) {
+ unsigned Reg = Defs.pop_back_val();
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+ EVT VT = *RC->vt_begin();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+ RegPressure[RCId] += RCCost;
+ }
}
/// IsLICMCandidate - Returns true if the instruction may be a suitable
@@ -535,14 +717,14 @@ bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
- if (!RegInfo->def_empty(Reg))
+ if (!MRI->def_empty(Reg))
return false;
if (AllocatableSet.test(Reg))
return false;
// Check for a def among the register's aliases too.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
- if (!RegInfo->def_empty(AliasReg))
+ if (!MRI->def_empty(AliasReg))
return false;
if (AllocatableSet.test(AliasReg))
return false;
@@ -562,12 +744,12 @@ bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
if (!MO.isUse())
continue;
- assert(RegInfo->getVRegDef(Reg) &&
+ assert(MRI->getVRegDef(Reg) &&
"Machine instr not mapped for this vreg?!");
// If the loop contains the definition of an operand, then the instruction
// isn't loop invariant.
- if (CurLoop->contains(RegInfo->getVRegDef(Reg)))
+ if (CurLoop->contains(MRI->getVRegDef(Reg)))
return false;
}
@@ -577,9 +759,9 @@ bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
/// HasPHIUses - Return true if the specified register has any PHI use.
-static bool HasPHIUses(unsigned Reg, MachineRegisterInfo *RegInfo) {
- for (MachineRegisterInfo::use_iterator UI = RegInfo->use_begin(Reg),
- UE = RegInfo->use_end(); UI != UE; ++UI) {
+static bool HasPHIUses(unsigned Reg, MachineRegisterInfo *MRI) {
+ for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
+ UE = MRI->use_end(); UI != UE; ++UI) {
MachineInstr *UseMI = &*UI;
if (UseMI->isPHI())
return true;
@@ -587,37 +769,210 @@ static bool HasPHIUses(unsigned Reg, MachineRegisterInfo *RegInfo) {
return false;
}
-/// isLoadFromConstantMemory - Return true if the given instruction is a
-/// load from constant memory. Machine LICM will hoist these even if they are
-/// not re-materializable.
-bool MachineLICM::isLoadFromConstantMemory(MachineInstr *MI) {
- if (!MI->getDesc().mayLoad()) return false;
- if (!MI->hasOneMemOperand()) return false;
- MachineMemOperand *MMO = *MI->memoperands_begin();
- if (MMO->isVolatile()) return false;
- if (!MMO->getValue()) return false;
- const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(MMO->getValue());
- if (PSV) {
- MachineFunction &MF = *MI->getParent()->getParent();
- return PSV->isConstant(MF.getFrameInfo());
- } else {
- return AA->pointsToConstantMemory(MMO->getValue());
+
+/// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
+/// and an use in the current loop, return true if the target considered
+/// it 'high'.
+bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
+ unsigned DefIdx, unsigned Reg) const {
+ if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
+ return false;
+
+ for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
+ E = MRI->use_nodbg_end(); I != E; ++I) {
+ MachineInstr *UseMI = &*I;
+ if (UseMI->isCopyLike())
+ continue;
+ if (!CurLoop->contains(UseMI->getParent()))
+ continue;
+ for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = UseMI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse())
+ continue;
+ unsigned MOReg = MO.getReg();
+ if (MOReg != Reg)
+ continue;
+
+ if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, UseMI, i))
+ return true;
+ }
+
+ // Only look at the first in loop use.
+ break;
+ }
+
+ return false;
+}
+
+/// IsCheapInstruction - Return true if the instruction is marked "cheap" or
+/// the operand latency between its def and a use is one or less.
+bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
+ if (MI.getDesc().isAsCheapAsAMove() || MI.isCopyLike())
+ return true;
+ if (!InstrItins || InstrItins->isEmpty())
+ return false;
+
+ bool isCheap = false;
+ unsigned NumDefs = MI.getDesc().getNumDefs();
+ for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
+ MachineOperand &DefMO = MI.getOperand(i);
+ if (!DefMO.isReg() || !DefMO.isDef())
+ continue;
+ --NumDefs;
+ unsigned Reg = DefMO.getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ continue;
+
+ if (!TII->hasLowDefLatency(InstrItins, &MI, i))
+ return false;
+ isCheap = true;
+ }
+
+ return isCheap;
+}
+
+/// CanCauseHighRegPressure - Visit BBs from header to current BB, check
+/// if hoisting an instruction of the given cost matrix can cause high
+/// register pressure.
+bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost) {
+ for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
+ CI != CE; ++CI) {
+ if (CI->second <= 0)
+ continue;
+
+ unsigned RCId = CI->first;
+ for (unsigned i = BackTrace.size(); i != 0; --i) {
+ SmallVector<unsigned, 8> &RP = BackTrace[i-1];
+ if (RP[RCId] + CI->second >= RegLimit[RCId])
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// UpdateBackTraceRegPressure - Traverse the back trace from header to the
+/// current block and update their register pressures to reflect the effect
+/// of hoisting MI from the current block to the preheader.
+void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
+ if (MI->isImplicitDef())
+ return;
+
+ // First compute the 'cost' of the instruction, i.e. its contribution
+ // to register pressure.
+ DenseMap<unsigned, int> Cost;
+ for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || MO.isImplicit())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+ EVT VT = *RC->vt_begin();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+ if (MO.isDef()) {
+ DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
+ if (CI != Cost.end())
+ CI->second += RCCost;
+ else
+ Cost.insert(std::make_pair(RCId, RCCost));
+ } else if (isOperandKill(MO, MRI)) {
+ DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
+ if (CI != Cost.end())
+ CI->second -= RCCost;
+ else
+ Cost.insert(std::make_pair(RCId, -RCCost));
+ }
+ }
+
+ // Update register pressure of blocks from loop header to current block.
+ for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) {
+ SmallVector<unsigned, 8> &RP = BackTrace[i];
+ for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
+ CI != CE; ++CI) {
+ unsigned RCId = CI->first;
+ RP[RCId] += CI->second;
+ }
}
}
/// IsProfitableToHoist - Return true if it is potentially profitable to hoist
/// the given loop invariant.
bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
- // FIXME: For now, only hoist re-materilizable instructions. LICM will
- // increase register pressure. We want to make sure it doesn't increase
- // spilling.
+ if (MI.isImplicitDef())
+ return true;
+
+ // If the instruction is cheap, only hoist if it is re-materilizable. LICM
+ // will increase register pressure. It's probably not worth it if the
+ // instruction is cheap.
// Also hoist loads from constant memory, e.g. load from stubs, GOT. Hoisting
// these tend to help performance in low register pressure situation. The
// trade off is it may cause spill in high pressure situation. It will end up
// adding a store in the loop preheader. But the reload is no more expensive.
// The side benefit is these loads are frequently CSE'ed.
- if (!TII->isTriviallyReMaterializable(&MI, AA)) {
- if (!isLoadFromConstantMemory(&MI))
+ if (IsCheapInstruction(MI)) {
+ if (!TII->isTriviallyReMaterializable(&MI, AA))
+ return false;
+ } else {
+ // Estimate register pressure to determine whether to LICM the instruction.
+ // In low register pressure situation, we can be more aggressive about
+ // hoisting. Also, favors hoisting long latency instructions even in
+ // moderately high pressure situation.
+ // FIXME: If there are long latency loop-invariant instructions inside the
+ // loop at this point, why didn't the optimizer's LICM hoist them?
+ DenseMap<unsigned, int> Cost;
+ for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
+ if (!MO.isReg() || MO.isImplicit())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+ if (MO.isDef()) {
+ if (HasHighOperandLatency(MI, i, Reg)) {
+ ++NumHighLatency;
+ return true;
+ }
+
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+ EVT VT = *RC->vt_begin();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+ DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
+ if (CI != Cost.end())
+ CI->second += RCCost;
+ else
+ Cost.insert(std::make_pair(RCId, RCCost));
+ } else if (isOperandKill(MO, MRI)) {
+ // Is a virtual register use is a kill, hoisting it out of the loop
+ // may actually reduce register pressure or be register pressure
+ // neutral.
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+ EVT VT = *RC->vt_begin();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+ DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
+ if (CI != Cost.end())
+ CI->second -= RCCost;
+ else
+ Cost.insert(std::make_pair(RCId, -RCCost));
+ }
+ }
+
+ // Visit BBs from header to current BB, if hoisting this doesn't cause
+ // high register pressure, then it's safe to proceed.
+ if (!CanCauseHighRegPressure(Cost)) {
+ ++NumLowRP;
+ return true;
+ }
+
+ // High register pressure situation, only hoist if the instruction is going to
+ // be remat'ed.
+ if (!TII->isTriviallyReMaterializable(&MI, AA) &&
+ !MI.isInvariantLoad(AA))
return false;
}
@@ -628,7 +983,7 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
- if (HasPHIUses(MO.getReg(), RegInfo))
+ if (HasPHIUses(MO.getReg(), MRI))
return false;
}
@@ -636,10 +991,14 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
}
MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
+ // Don't unfold simple loads.
+ if (MI->getDesc().canFoldAsLoad())
+ return 0;
+
// If not, we may be able to unfold a load and hoist that.
// First test whether the instruction is loading from an amenable
// memory location.
- if (!isLoadFromConstantMemory(MI))
+ if (!MI->isInvariantLoad(AA))
return 0;
// Next determine the register class for a temporary register.
@@ -654,7 +1013,7 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
if (TID.getNumDefs() != 1) return 0;
const TargetRegisterClass *RC = TID.OpInfo[LoadRegIndex].getRegClass(TRI);
// Ok, we're unfolding. Create a temporary register and do the unfold.
- unsigned Reg = RegInfo->createVirtualRegister(RC);
+ unsigned Reg = MRI->createVirtualRegister(RC);
MachineFunction &MF = *MI->getParent()->getParent();
SmallVector<MachineInstr *, 2> NewMIs;
@@ -678,6 +1037,10 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
NewMIs[1]->eraseFromParent();
return 0;
}
+
+ // Update register pressure for the unfolded instruction.
+ UpdateRegPressure(NewMIs[1]);
+
// Otherwise we successfully unfolded a load that we can hoist.
MI->eraseFromParent();
return NewMIs[0];
@@ -686,20 +1049,15 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
void MachineLICM::InitCSEMap(MachineBasicBlock *BB) {
for (MachineBasicBlock::iterator I = BB->begin(),E = BB->end(); I != E; ++I) {
const MachineInstr *MI = &*I;
- // FIXME: For now, only hoist re-materilizable instructions. LICM will
- // increase register pressure. We want to make sure it doesn't increase
- // spilling.
- if (TII->isTriviallyReMaterializable(MI, AA)) {
- unsigned Opcode = MI->getOpcode();
- DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
- CI = CSEMap.find(Opcode);
- if (CI != CSEMap.end())
- CI->second.push_back(MI);
- else {
- std::vector<const MachineInstr*> CSEMIs;
- CSEMIs.push_back(MI);
- CSEMap.insert(std::make_pair(Opcode, CSEMIs));
- }
+ unsigned Opcode = MI->getOpcode();
+ DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
+ CI = CSEMap.find(Opcode);
+ if (CI != CSEMap.end())
+ CI->second.push_back(MI);
+ else {
+ std::vector<const MachineInstr*> CSEMIs;
+ CSEMIs.push_back(MI);
+ CSEMap.insert(std::make_pair(Opcode, CSEMIs));
}
}
}
@@ -709,7 +1067,7 @@ MachineLICM::LookForDuplicate(const MachineInstr *MI,
std::vector<const MachineInstr*> &PrevMIs) {
for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) {
const MachineInstr *PrevMI = PrevMIs[i];
- if (TII->produceSameValue(MI, PrevMI))
+ if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : 0)))
return PrevMI;
}
return 0;
@@ -738,8 +1096,8 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI,
if (MO.isReg() && MO.isDef() &&
!TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
- RegInfo->replaceRegWith(MO.getReg(), Dup->getOperand(i).getReg());
- RegInfo->clearKillFlags(Dup->getOperand(i).getReg());
+ MRI->replaceRegWith(MO.getReg(), Dup->getOperand(i).getReg());
+ MRI->clearKillFlags(Dup->getOperand(i).getReg());
}
}
MI->eraseFromParent();
@@ -752,15 +1110,12 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI,
/// Hoist - When an instruction is found to use only loop invariant operands
/// that are safe to hoist, this instruction is called to do the dirty work.
///
-void MachineLICM::Hoist(MachineInstr *MI) {
- MachineBasicBlock *Preheader = getCurPreheader();
- if (!Preheader) return;
-
+bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
// First check whether we should hoist this instruction.
if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
// If not, try unfolding a hoistable load.
MI = ExtractHoistableLoad(MI);
- if (!MI) return;
+ if (!MI) return false;
}
// Now move the instructions to the predecessor, inserting it before any
@@ -791,13 +1146,16 @@ void MachineLICM::Hoist(MachineInstr *MI) {
// Otherwise, splice the instruction to the preheader.
Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
+ // Update register pressure for BBs from header to this block.
+ UpdateBackTraceRegPressure(MI);
+
// Clear the kill flags of any register this instruction defines,
// since they may need to be live throughout the entire loop
// rather than just live for part of it.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef() && !MO.isDead())
- RegInfo->clearKillFlags(MO.getReg());
+ MRI->clearKillFlags(MO.getReg());
}
// Add to the CSE map.
@@ -812,6 +1170,8 @@ void MachineLICM::Hoist(MachineInstr *MI) {
++NumHoisted;
Changed = true;
+
+ return true;
}
MachineBasicBlock *MachineLICM::getCurPreheader() {
diff --git a/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp b/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
index bca4b0c..189cb2b 100644
--- a/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLoopInfo.cpp
@@ -30,8 +30,11 @@ TEMPLATE_INSTANTIATION(MLIB);
}
char MachineLoopInfo::ID = 0;
-INITIALIZE_PASS(MachineLoopInfo, "machine-loops",
- "Machine Natural Loop Construction", true, true);
+INITIALIZE_PASS_BEGIN(MachineLoopInfo, "machine-loops",
+ "Machine Natural Loop Construction", true, true)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(MachineLoopInfo, "machine-loops",
+ "Machine Natural Loop Construction", true, true)
char &llvm::MachineLoopInfoID = MachineLoopInfo::ID;
diff --git a/contrib/llvm/lib/CodeGen/MachineLoopRanges.cpp b/contrib/llvm/lib/CodeGen/MachineLoopRanges.cpp
new file mode 100644
index 0000000..17fe67f
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineLoopRanges.cpp
@@ -0,0 +1,116 @@
+//===- MachineLoopRanges.cpp - Ranges of machine loops --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the implementation of the MachineLoopRanges analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineLoopRanges.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/Passes.h"
+
+using namespace llvm;
+
+char MachineLoopRanges::ID = 0;
+INITIALIZE_PASS_BEGIN(MachineLoopRanges, "machine-loop-ranges",
+ "Machine Loop Ranges", true, true)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(MachineLoopRanges, "machine-loop-ranges",
+ "Machine Loop Ranges", true, true)
+
+char &llvm::MachineLoopRangesID = MachineLoopRanges::ID;
+
+void MachineLoopRanges::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<SlotIndexes>();
+ AU.addRequiredTransitive<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+/// runOnMachineFunction - Don't do much, loop ranges are computed on demand.
+bool MachineLoopRanges::runOnMachineFunction(MachineFunction &) {
+ releaseMemory();
+ Indexes = &getAnalysis<SlotIndexes>();
+ return false;
+}
+
+void MachineLoopRanges::releaseMemory() {
+ DeleteContainerSeconds(Cache);
+ Cache.clear();
+}
+
+MachineLoopRange *MachineLoopRanges::getLoopRange(const MachineLoop *Loop) {
+ MachineLoopRange *&Range = Cache[Loop];
+ if (!Range)
+ Range = new MachineLoopRange(Loop, Allocator, *Indexes);
+ return Range;
+}
+
+/// Create a MachineLoopRange, only accessible to MachineLoopRanges.
+MachineLoopRange::MachineLoopRange(const MachineLoop *loop,
+ MachineLoopRange::Allocator &alloc,
+ SlotIndexes &Indexes)
+ : Loop(loop), Intervals(alloc), Area(0) {
+ // Compute loop coverage.
+ for (MachineLoop::block_iterator I = Loop->block_begin(),
+ E = Loop->block_end(); I != E; ++I) {
+ const std::pair<SlotIndex, SlotIndex> &Range = Indexes.getMBBRange(*I);
+ Intervals.insert(Range.first, Range.second, 1u);
+ Area += Range.first.distance(Range.second);
+ }
+}
+
+/// overlaps - Return true if this loop overlaps the given range of machine
+/// instructions.
+bool MachineLoopRange::overlaps(SlotIndex Start, SlotIndex Stop) {
+ Map::const_iterator I = Intervals.find(Start);
+ return I.valid() && Stop > I.start();
+}
+
+unsigned MachineLoopRange::getNumber() const {
+ return Loop->getHeader()->getNumber();
+}
+
+/// byNumber - Comparator for array_pod_sort that sorts a list of
+/// MachineLoopRange pointers by number.
+int MachineLoopRange::byNumber(const void *pa, const void *pb) {
+ const MachineLoopRange *a = *static_cast<MachineLoopRange *const *>(pa);
+ const MachineLoopRange *b = *static_cast<MachineLoopRange *const *>(pb);
+ unsigned na = a->getNumber();
+ unsigned nb = b->getNumber();
+ if (na < nb)
+ return -1;
+ if (na > nb)
+ return 1;
+ return 0;
+}
+
+/// byAreaDesc - Comparator for array_pod_sort that sorts a list of
+/// MachineLoopRange pointers by:
+/// 1. Descending area.
+/// 2. Ascending number.
+int MachineLoopRange::byAreaDesc(const void *pa, const void *pb) {
+ const MachineLoopRange *a = *static_cast<MachineLoopRange *const *>(pa);
+ const MachineLoopRange *b = *static_cast<MachineLoopRange *const *>(pb);
+ if (a->getArea() != b->getArea())
+ return a->getArea() > b->getArea() ? -1 : 1;
+ return byNumber(pa, pb);
+}
+
+void MachineLoopRange::print(raw_ostream &OS) const {
+ OS << "Loop#" << getNumber() << " =";
+ for (Map::const_iterator I = Intervals.begin(); I.valid(); ++I)
+ OS << " [" << I.start() << ';' << I.stop() << ')';
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineLoopRange &MLR) {
+ MLR.print(OS);
+ return OS;
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
index b647a4d..fadc594 100644
--- a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
@@ -29,7 +29,7 @@ using namespace llvm::dwarf;
// Handle the Pass registration stuff necessary to use TargetData's.
INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo",
- "Machine Module Information", false, false);
+ "Machine Module Information", false, false)
char MachineModuleInfo::ID = 0;
// Out of line virtual method.
@@ -41,30 +41,30 @@ class MMIAddrLabelMapCallbackPtr : CallbackVH {
public:
MMIAddrLabelMapCallbackPtr() : Map(0) {}
MMIAddrLabelMapCallbackPtr(Value *V) : CallbackVH(V), Map(0) {}
-
+
void setPtr(BasicBlock *BB) {
ValueHandleBase::operator=(BB);
}
-
+
void setMap(MMIAddrLabelMap *map) { Map = map; }
-
+
virtual void deleted();
virtual void allUsesReplacedWith(Value *V2);
};
-
+
class MMIAddrLabelMap {
MCContext &Context;
struct AddrLabelSymEntry {
/// Symbols - The symbols for the label. This is a pointer union that is
/// either one symbol (the common case) or a list of symbols.
PointerUnion<MCSymbol *, std::vector<MCSymbol*>*> Symbols;
-
+
Function *Fn; // The containing function of the BasicBlock.
unsigned Index; // The index in BBCallbacks for the BasicBlock.
};
-
+
DenseMap<AssertingVH<BasicBlock>, AddrLabelSymEntry> AddrLabelSymbols;
-
+
/// BBCallbacks - Callbacks for the BasicBlock's that we have entries for. We
/// use this so we get notified if a block is deleted or RAUWd.
std::vector<MMIAddrLabelMapCallbackPtr> BBCallbacks;
@@ -76,23 +76,23 @@ class MMIAddrLabelMap {
DenseMap<AssertingVH<Function>, std::vector<MCSymbol*> >
DeletedAddrLabelsNeedingEmission;
public:
-
+
MMIAddrLabelMap(MCContext &context) : Context(context) {}
~MMIAddrLabelMap() {
assert(DeletedAddrLabelsNeedingEmission.empty() &&
"Some labels for deleted blocks never got emitted");
-
+
// Deallocate any of the 'list of symbols' case.
for (DenseMap<AssertingVH<BasicBlock>, AddrLabelSymEntry>::iterator
I = AddrLabelSymbols.begin(), E = AddrLabelSymbols.end(); I != E; ++I)
if (I->second.Symbols.is<std::vector<MCSymbol*>*>())
delete I->second.Symbols.get<std::vector<MCSymbol*>*>();
}
-
+
MCSymbol *getAddrLabelSymbol(BasicBlock *BB);
std::vector<MCSymbol*> getAddrLabelSymbolToEmit(BasicBlock *BB);
- void takeDeletedSymbolsForFunction(Function *F,
+ void takeDeletedSymbolsForFunction(Function *F,
std::vector<MCSymbol*> &Result);
void UpdateForDeletedBlock(BasicBlock *BB);
@@ -104,7 +104,7 @@ MCSymbol *MMIAddrLabelMap::getAddrLabelSymbol(BasicBlock *BB) {
assert(BB->hasAddressTaken() &&
"Shouldn't get label for block without address taken");
AddrLabelSymEntry &Entry = AddrLabelSymbols[BB];
-
+
// If we already had an entry for this block, just return it.
if (!Entry.Symbols.isNull()) {
assert(BB->getParent() == Entry.Fn && "Parent changed");
@@ -112,7 +112,7 @@ MCSymbol *MMIAddrLabelMap::getAddrLabelSymbol(BasicBlock *BB) {
return Entry.Symbols.get<MCSymbol*>();
return (*Entry.Symbols.get<std::vector<MCSymbol*>*>())[0];
}
-
+
// Otherwise, this is a new entry, create a new symbol for it and add an
// entry to BBCallbacks so we can be notified if the BB is deleted or RAUWd.
BBCallbacks.push_back(BB);
@@ -129,9 +129,9 @@ MMIAddrLabelMap::getAddrLabelSymbolToEmit(BasicBlock *BB) {
assert(BB->hasAddressTaken() &&
"Shouldn't get label for block without address taken");
AddrLabelSymEntry &Entry = AddrLabelSymbols[BB];
-
+
std::vector<MCSymbol*> Result;
-
+
// If we already had an entry for this block, just return it.
if (Entry.Symbols.isNull())
Result.push_back(getAddrLabelSymbol(BB));
@@ -152,7 +152,7 @@ takeDeletedSymbolsForFunction(Function *F, std::vector<MCSymbol*> &Result) {
// If there are no entries for the function, just return.
if (I == DeletedAddrLabelsNeedingEmission.end()) return;
-
+
// Otherwise, take the list.
std::swap(Result, I->second);
DeletedAddrLabelsNeedingEmission.erase(I);
@@ -175,7 +175,7 @@ void MMIAddrLabelMap::UpdateForDeletedBlock(BasicBlock *BB) {
if (MCSymbol *Sym = Entry.Symbols.dyn_cast<MCSymbol*>()) {
if (Sym->isDefined())
return;
-
+
// If the block is not yet defined, we need to emit it at the end of the
// function. Add the symbol to the DeletedAddrLabelsNeedingEmission list
// for the containing Function. Since the block is being deleted, its
@@ -187,7 +187,7 @@ void MMIAddrLabelMap::UpdateForDeletedBlock(BasicBlock *BB) {
for (unsigned i = 0, e = Syms->size(); i != e; ++i) {
MCSymbol *Sym = (*Syms)[i];
if (Sym->isDefined()) continue; // Ignore already emitted labels.
-
+
// If the block is not yet defined, we need to emit it at the end of the
// function. Add the symbol to the DeletedAddrLabelsNeedingEmission list
// for the containing Function. Since the block is being deleted, its
@@ -195,7 +195,7 @@ void MMIAddrLabelMap::UpdateForDeletedBlock(BasicBlock *BB) {
// 'Entry'.
DeletedAddrLabelsNeedingEmission[Entry.Fn].push_back(Sym);
}
-
+
// The entry is deleted, free the memory associated with the symbol list.
delete Syms;
}
@@ -225,7 +225,7 @@ void MMIAddrLabelMap::UpdateForRAUWBlock(BasicBlock *Old, BasicBlock *New) {
SymList->push_back(PrevSym);
NewEntry.Symbols = SymList;
}
-
+
std::vector<MCSymbol*> *SymList =
NewEntry.Symbols.get<std::vector<MCSymbol*>*>();
@@ -234,7 +234,7 @@ void MMIAddrLabelMap::UpdateForRAUWBlock(BasicBlock *Old, BasicBlock *New) {
SymList->push_back(Sym);
return;
}
-
+
// Otherwise, concatenate the list.
std::vector<MCSymbol*> *Syms =OldEntry.Symbols.get<std::vector<MCSymbol*>*>();
SymList->insert(SymList->end(), Syms->begin(), Syms->end());
@@ -253,10 +253,13 @@ void MMIAddrLabelMapCallbackPtr::allUsesReplacedWith(Value *V2) {
//===----------------------------------------------------------------------===//
-MachineModuleInfo::MachineModuleInfo(const MCAsmInfo &MAI)
-: ImmutablePass(ID), Context(MAI),
+MachineModuleInfo::MachineModuleInfo(const MCAsmInfo &MAI,
+ const TargetAsmInfo *TAI)
+: ImmutablePass(ID), Context(MAI, TAI),
ObjFileMMI(0),
- CurCallSite(0), CallsEHReturn(0), CallsUnwindInit(0), DbgInfoAvailable(false){
+ CurCallSite(0), CallsEHReturn(0), CallsUnwindInit(0), DbgInfoAvailable(false),
+ CallsExternalVAFunctionWithFloatingPointArguments(false) {
+ initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
// Always emit some info, by default "no personality" info.
Personalities.push_back(NULL);
AddrLabelSymbols = 0;
@@ -264,7 +267,7 @@ MachineModuleInfo::MachineModuleInfo(const MCAsmInfo &MAI)
}
MachineModuleInfo::MachineModuleInfo()
-: ImmutablePass(ID), Context(*(MCAsmInfo*)0) {
+: ImmutablePass(ID), Context(*(MCAsmInfo*)0, NULL) {
assert(0 && "This MachineModuleInfo constructor should never be called, MMI "
"should always be explicitly constructed by LLVMTargetMachine");
abort();
@@ -272,7 +275,7 @@ MachineModuleInfo::MachineModuleInfo()
MachineModuleInfo::~MachineModuleInfo() {
delete ObjFileMMI;
-
+
// FIXME: Why isn't doFinalization being called??
//assert(AddrLabelSymbols == 0 && "doFinalization not called");
delete AddrLabelSymbols;
@@ -472,7 +475,7 @@ void MachineModuleInfo::TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap) {
(LPMap && (*LPMap)[BeginLabel] != 0)) &&
(EndLabel->isDefined() ||
(LPMap && (*LPMap)[EndLabel] != 0))) continue;
-
+
LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
--j, --e;
@@ -562,20 +565,3 @@ unsigned MachineModuleInfo::getPersonalityIndex() const {
// in the zero index.
return 0;
}
-
-namespace {
- /// VariableDebugSorter - Comparison to sort the VariableDbgInfo map
- /// by source location, to avoid depending on the arbitrary order that
- /// instruction selection visits variables in.
- struct VariableDebugSorter {
- bool operator()(const MachineModuleInfo::VariableDbgInfoMapTy::value_type &A,
- const MachineModuleInfo::VariableDbgInfoMapTy::value_type &B)
- const {
- if (A.second.second.getLine() != B.second.second.getLine())
- return A.second.second.getLine() < B.second.second.getLine();
- if (A.second.second.getCol() != B.second.second.getCol())
- return A.second.second.getCol() < B.second.second.getCol();
- return false;
- }
- };
-}
diff --git a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index 5d852f2..b3fb337 100644
--- a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -30,8 +30,9 @@ MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) {
MachineRegisterInfo::~MachineRegisterInfo() {
#ifndef NDEBUG
- for (unsigned i = 0, e = VRegInfo.size(); i != e; ++i)
- assert(VRegInfo[i].second == 0 && "Vreg use list non-empty still?");
+ for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i)
+ assert(VRegInfo[TargetRegisterInfo::index2VirtReg(i)].second == 0 &&
+ "Vreg use list non-empty still?");
for (unsigned i = 0, e = UsedPhysRegs.size(); i != e; ++i)
assert(!PhysRegUseDefLists[i] &&
"PhysRegUseDefLists has entries after all instructions are deleted");
@@ -44,20 +45,32 @@ MachineRegisterInfo::~MachineRegisterInfo() {
///
void
MachineRegisterInfo::setRegClass(unsigned Reg, const TargetRegisterClass *RC) {
- unsigned VR = Reg;
- Reg -= TargetRegisterInfo::FirstVirtualRegister;
- assert(Reg < VRegInfo.size() && "Invalid vreg!");
const TargetRegisterClass *OldRC = VRegInfo[Reg].first;
VRegInfo[Reg].first = RC;
// Remove from old register class's vregs list. This may be slow but
// fortunately this operation is rarely needed.
std::vector<unsigned> &VRegs = RegClass2VRegMap[OldRC->getID()];
- std::vector<unsigned>::iterator I = std::find(VRegs.begin(), VRegs.end(), VR);
+ std::vector<unsigned>::iterator I =
+ std::find(VRegs.begin(), VRegs.end(), Reg);
VRegs.erase(I);
// Add to new register class's vregs list.
- RegClass2VRegMap[RC->getID()].push_back(VR);
+ RegClass2VRegMap[RC->getID()].push_back(Reg);
+}
+
+const TargetRegisterClass *
+MachineRegisterInfo::constrainRegClass(unsigned Reg,
+ const TargetRegisterClass *RC) {
+ const TargetRegisterClass *OldRC = getRegClass(Reg);
+ if (OldRC == RC)
+ return RC;
+ const TargetRegisterClass *NewRC = getCommonSubClass(OldRC, RC);
+ if (!NewRC)
+ return 0;
+ if (NewRC != OldRC)
+ setRegClass(Reg, NewRC);
+ return NewRC;
}
/// createVirtualRegister - Create and return a new virtual register in the
@@ -66,17 +79,22 @@ MachineRegisterInfo::setRegClass(unsigned Reg, const TargetRegisterClass *RC) {
unsigned
MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass){
assert(RegClass && "Cannot create register without RegClass!");
+
+ // New virtual register number.
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(getNumVirtRegs());
+
// Add a reg, but keep track of whether the vector reallocated or not.
- void *ArrayBase = VRegInfo.empty() ? 0 : &VRegInfo[0];
- VRegInfo.push_back(std::make_pair(RegClass, (MachineOperand*)0));
- RegAllocHints.push_back(std::make_pair(0, 0));
+ const unsigned FirstVirtReg = TargetRegisterInfo::index2VirtReg(0);
+ void *ArrayBase = getNumVirtRegs() == 0 ? 0 : &VRegInfo[FirstVirtReg];
+ VRegInfo.grow(Reg);
+ VRegInfo[Reg].first = RegClass;
+ RegAllocHints.grow(Reg);
- if (!((&VRegInfo[0] == ArrayBase || VRegInfo.size() == 1)))
+ if (ArrayBase && &VRegInfo[FirstVirtReg] != ArrayBase)
// The vector reallocated, handle this now.
HandleVRegListReallocation();
- unsigned VR = getLastVirtReg();
- RegClass2VRegMap[RegClass->getID()].push_back(VR);
- return VR;
+ RegClass2VRegMap[RegClass->getID()].push_back(Reg);
+ return Reg;
}
/// HandleVRegListReallocation - We just added a virtual register to the
@@ -85,11 +103,12 @@ MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass){
void MachineRegisterInfo::HandleVRegListReallocation() {
// The back pointers for the vreg lists point into the previous vector.
// Update them to point to their correct slots.
- for (unsigned i = 0, e = VRegInfo.size(); i != e; ++i) {
- MachineOperand *List = VRegInfo[i].second;
+ for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ MachineOperand *List = VRegInfo[Reg].second;
if (!List) continue;
// Update the back-pointer to be accurate once more.
- List->Contents.Reg.Prev = &VRegInfo[i].second;
+ List->Contents.Reg.Prev = &VRegInfo[Reg].second;
}
}
@@ -112,8 +131,6 @@ void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) {
/// register or null if none is found. This assumes that the code is in SSA
/// form, so there should only be one definition.
MachineInstr *MachineRegisterInfo::getVRegDef(unsigned Reg) const {
- assert(Reg-TargetRegisterInfo::FirstVirtualRegister < VRegInfo.size() &&
- "Invalid vreg!");
// Since we are in SSA form, we can use the first definition.
if (!def_empty(Reg))
return &*def_begin(Reg);
@@ -193,8 +210,15 @@ MachineRegisterInfo::EmitLiveInCopies(MachineBasicBlock *EntryMBB,
LiveIns.erase(LiveIns.begin() + i);
--i; --e;
} else {
+ DebugLoc DL;
+ // If there is a location for this live in then use it.
+ DenseMap<unsigned, DebugLoc>::iterator DLI =
+ LiveInLocs.find(LiveIns[i].second);
+ if (DLI != LiveInLocs.end())
+ DL = DLI->second;
+
// Emit a copy.
- BuildMI(*EntryMBB, EntryMBB->begin(), DebugLoc(),
+ BuildMI(*EntryMBB, EntryMBB->begin(), DL,
TII.get(TargetOpcode::COPY), LiveIns[i].second)
.addReg(LiveIns[i].first);
diff --git a/contrib/llvm/lib/CodeGen/MachineSink.cpp b/contrib/llvm/lib/CodeGen/MachineSink.cpp
index c8f8faf..8a93a24 100644
--- a/contrib/llvm/lib/CodeGen/MachineSink.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineSink.cpp
@@ -25,6 +25,7 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -34,27 +35,31 @@ using namespace llvm;
static cl::opt<bool>
SplitEdges("machine-sink-split",
cl::desc("Split critical edges during machine sinking"),
- cl::init(false), cl::Hidden);
-static cl::opt<unsigned>
-SplitLimit("split-limit",
- cl::init(~0u), cl::Hidden);
+ cl::init(true), cl::Hidden);
-STATISTIC(NumSunk, "Number of machine instructions sunk");
-STATISTIC(NumSplit, "Number of critical edges split");
+STATISTIC(NumSunk, "Number of machine instructions sunk");
+STATISTIC(NumSplit, "Number of critical edges split");
+STATISTIC(NumCoalesces, "Number of copies coalesced");
namespace {
class MachineSinking : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
- MachineRegisterInfo *RegInfo; // Machine register information
+ MachineRegisterInfo *MRI; // Machine register information
MachineDominatorTree *DT; // Machine dominator tree
MachineLoopInfo *LI;
AliasAnalysis *AA;
BitVector AllocatableSet; // Which physregs are allocatable?
+ // Remember which edges have been considered for breaking.
+ SmallSet<std::pair<MachineBasicBlock*,MachineBasicBlock*>, 8>
+ CEBCandidates;
+
public:
static char ID; // Pass identification
- MachineSinking() : MachineFunctionPass(ID) {}
+ MachineSinking() : MachineFunctionPass(ID) {
+ initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -67,43 +72,125 @@ namespace {
AU.addPreserved<MachineDominatorTree>();
AU.addPreserved<MachineLoopInfo>();
}
+
+ virtual void releaseMemory() {
+ CEBCandidates.clear();
+ }
+
private:
bool ProcessBlock(MachineBasicBlock &MBB);
- MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *From,
- MachineBasicBlock *To);
+ bool isWorthBreakingCriticalEdge(MachineInstr *MI,
+ MachineBasicBlock *From,
+ MachineBasicBlock *To);
+ MachineBasicBlock *SplitCriticalEdge(MachineInstr *MI,
+ MachineBasicBlock *From,
+ MachineBasicBlock *To,
+ bool BreakPHIEdge);
bool SinkInstruction(MachineInstr *MI, bool &SawStore);
bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
- MachineBasicBlock *DefMBB, bool &LocalUse) const;
+ MachineBasicBlock *DefMBB,
+ bool &BreakPHIEdge, bool &LocalUse) const;
+ bool PerformTrivialForwardCoalescing(MachineInstr *MI,
+ MachineBasicBlock *MBB);
};
} // end anonymous namespace
char MachineSinking::ID = 0;
-INITIALIZE_PASS(MachineSinking, "machine-sink",
- "Machine code sinking", false, false);
+INITIALIZE_PASS_BEGIN(MachineSinking, "machine-sink",
+ "Machine code sinking", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MachineSinking, "machine-sink",
+ "Machine code sinking", false, false)
FunctionPass *llvm::createMachineSinkingPass() { return new MachineSinking(); }
+bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ if (!MI->isCopy())
+ return false;
+
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ unsigned DstReg = MI->getOperand(0).getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
+ !TargetRegisterInfo::isVirtualRegister(DstReg) ||
+ !MRI->hasOneNonDBGUse(SrcReg))
+ return false;
+
+ const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
+ const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
+ if (SRC != DRC)
+ return false;
+
+ MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
+ if (DefMI->isCopyLike())
+ return false;
+ DEBUG(dbgs() << "Coalescing: " << *DefMI);
+ DEBUG(dbgs() << "*** to: " << *MI);
+ MRI->replaceRegWith(DstReg, SrcReg);
+ MI->eraseFromParent();
+ ++NumCoalesces;
+ return true;
+}
+
/// AllUsesDominatedByBlock - Return true if all uses of the specified register
/// occur in blocks dominated by the specified block. If any use is in the
/// definition block, then return false since it is never legal to move def
/// after uses.
-bool MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
- MachineBasicBlock *MBB,
- MachineBasicBlock *DefMBB,
- bool &LocalUse) const {
+bool
+MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
+ MachineBasicBlock *MBB,
+ MachineBasicBlock *DefMBB,
+ bool &BreakPHIEdge,
+ bool &LocalUse) const {
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Only makes sense for vregs");
+
+ if (MRI->use_nodbg_empty(Reg))
+ return true;
+
// Ignoring debug uses is necessary so debug info doesn't affect the code.
// This may leave a referencing dbg_value in the original block, before
// the definition of the vreg. Dwarf generator handles this although the
// user might not get the right info at runtime.
+
+ // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
+ // into and they are all PHI nodes. In this case, machine-sink must break
+ // the critical edge first. e.g.
+ //
+ // BB#1: derived from LLVM BB %bb4.preheader
+ // Predecessors according to CFG: BB#0
+ // ...
+ // %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead>
+ // ...
+ // JE_4 <BB#37>, %EFLAGS<imp-use>
+ // Successors according to CFG: BB#37 BB#2
+ //
+ // BB#2: derived from LLVM BB %bb.nph
+ // Predecessors according to CFG: BB#0 BB#1
+ // %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
+ BreakPHIEdge = true;
for (MachineRegisterInfo::use_nodbg_iterator
- I = RegInfo->use_nodbg_begin(Reg), E = RegInfo->use_nodbg_end();
+ I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
I != E; ++I) {
- // Determine the block of the use.
MachineInstr *UseInst = &*I;
MachineBasicBlock *UseBlock = UseInst->getParent();
+ if (!(UseBlock == MBB && UseInst->isPHI() &&
+ UseInst->getOperand(I.getOperandNo()+1).getMBB() == DefMBB)) {
+ BreakPHIEdge = false;
+ break;
+ }
+ }
+ if (BreakPHIEdge)
+ return true;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
+ I != E; ++I) {
+ // Determine the block of the use.
+ MachineInstr *UseInst = &*I;
+ MachineBasicBlock *UseBlock = UseInst->getParent();
if (UseInst->isPHI()) {
// PHI nodes use the operand in the predecessor block, not the block with
// the PHI.
@@ -127,7 +214,7 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
const TargetMachine &TM = MF.getTarget();
TII = TM.getInstrInfo();
TRI = TM.getRegisterInfo();
- RegInfo = &MF.getRegInfo();
+ MRI = &MF.getRegInfo();
DT = &getAnalysis<MachineDominatorTree>();
LI = &getAnalysis<MachineLoopInfo>();
AA = &getAnalysis<AliasAnalysis>();
@@ -139,6 +226,7 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
bool MadeChange = false;
// Process all basic blocks.
+ CEBCandidates.clear();
for (MachineFunction::iterator I = MF.begin(), E = MF.end();
I != E; ++I)
MadeChange |= ProcessBlock(*I);
@@ -177,6 +265,9 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
if (MI->isDebugValue())
continue;
+ if (PerformTrivialForwardCoalescing(MI, &MBB))
+ continue;
+
if (SinkInstruction(MI, SawStore))
++NumSunk, MadeChange = true;
@@ -186,51 +277,92 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
return MadeChange;
}
-MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineBasicBlock *FromBB,
- MachineBasicBlock *ToBB) {
+bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
+ MachineBasicBlock *From,
+ MachineBasicBlock *To) {
+ // FIXME: Need much better heuristics.
+
+ // If the pass has already considered breaking this edge (during this pass
+ // through the function), then let's go ahead and break it. This means
+ // sinking multiple "cheap" instructions into the same block.
+ if (!CEBCandidates.insert(std::make_pair(From, To)))
+ return true;
+
+ if (!MI->isCopy() && !MI->getDesc().isAsCheapAsAMove())
+ return true;
+
+ // MI is cheap, we probably don't want to break the critical edge for it.
+ // However, if this would allow some definitions of its source operands
+ // to be sunk then it's probably worth it.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg))
+ continue;
+ if (MRI->hasOneNonDBGUse(Reg))
+ return true;
+ }
+
+ return false;
+}
+
+MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineInstr *MI,
+ MachineBasicBlock *FromBB,
+ MachineBasicBlock *ToBB,
+ bool BreakPHIEdge) {
+ if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
+ return 0;
+
// Avoid breaking back edge. From == To means backedge for single BB loop.
- if (!SplitEdges || NumSplit == SplitLimit || FromBB == ToBB)
+ if (!SplitEdges || FromBB == ToBB)
+ return 0;
+
+ // Check for backedges of more "complex" loops.
+ if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
+ LI->isLoopHeader(ToBB))
return 0;
- // Check for more "complex" loops.
- if (LI->getLoopFor(FromBB) != LI->getLoopFor(ToBB) ||
- !LI->isLoopHeader(ToBB)) {
- // It's not always legal to break critical edges and sink the computation
- // to the edge.
- //
- // BB#1:
- // v1024
- // Beq BB#3
- // <fallthrough>
- // BB#2:
- // ... no uses of v1024
- // <fallthrough>
- // BB#3:
- // ...
- // = v1024
- //
- // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
- //
- // BB#1:
- // ...
- // Bne BB#2
- // BB#4:
- // v1024 =
- // B BB#3
- // BB#2:
- // ... no uses of v1024
- // <fallthrough>
- // BB#3:
- // ...
- // = v1024
- //
- // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
- // flow. We need to ensure the new basic block where the computation is
- // sunk to dominates all the uses.
- // It's only legal to break critical edge and sink the computation to the
- // new block if all the predecessors of "To", except for "From", are
- // not dominated by "From". Given SSA property, this means these
- // predecessors are dominated by "To".
+ // It's not always legal to break critical edges and sink the computation
+ // to the edge.
+ //
+ // BB#1:
+ // v1024
+ // Beq BB#3
+ // <fallthrough>
+ // BB#2:
+ // ... no uses of v1024
+ // <fallthrough>
+ // BB#3:
+ // ...
+ // = v1024
+ //
+ // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
+ //
+ // BB#1:
+ // ...
+ // Bne BB#2
+ // BB#4:
+ // v1024 =
+ // B BB#3
+ // BB#2:
+ // ... no uses of v1024
+ // <fallthrough>
+ // BB#3:
+ // ...
+ // = v1024
+ //
+ // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
+ // flow. We need to ensure the new basic block where the computation is
+ // sunk to dominates all the uses.
+ // It's only legal to break critical edge and sink the computation to the
+ // new block if all the predecessors of "To", except for "From", are
+ // not dominated by "From". Given SSA property, this means these
+ // predecessors are dominated by "To".
+ //
+ // There is no need to do this check if all the uses are PHI nodes. PHI
+ // sources are only defined on the specific predecessor edges.
+ if (!BreakPHIEdge) {
for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
E = ToBB->pred_end(); PI != E; ++PI) {
if (*PI == FromBB)
@@ -238,17 +370,23 @@ MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineBasicBlock *FromBB,
if (!DT->dominates(ToBB, *PI))
return 0;
}
-
- // FIXME: Determine if it's cost effective to break this edge.
- return FromBB->SplitCriticalEdge(ToBB, this);
}
- return 0;
+ return FromBB->SplitCriticalEdge(ToBB, this);
+}
+
+static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
+ return MI->isInsertSubreg() || MI->isSubregToReg() || MI->isRegSequence();
}
/// SinkInstruction - Determine whether it is safe to sink the specified machine
/// instruction out of its current block into a successor.
bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
+ // Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
+ // be close to the source to make it easier to coalesce.
+ if (AvoidsSinking(MI, MRI))
+ return false;
+
// Check if it's safe to move the instruction.
if (!MI->isSafeToMove(TII, AA, SawStore))
return false;
@@ -269,6 +407,7 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// decide.
MachineBasicBlock *SuccToSinkTo = 0;
+ bool BreakPHIEdge = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue; // Ignore non-register operands.
@@ -281,7 +420,7 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
- if (!RegInfo->def_empty(Reg))
+ if (!MRI->def_empty(Reg))
return false;
if (AllocatableSet.test(Reg))
@@ -290,7 +429,7 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// Check for a def among the register's aliases too.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
- if (!RegInfo->def_empty(AliasReg))
+ if (!MRI->def_empty(AliasReg))
return false;
if (AllocatableSet.test(AliasReg))
@@ -305,7 +444,7 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
if (MO.isUse()) continue;
// If it's not safe to move defs of the register class, then abort.
- if (!TII->isSafeToMoveRegClassDefs(RegInfo->getRegClass(Reg)))
+ if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
return false;
// FIXME: This picks a successor to sink into based on having one
@@ -327,7 +466,8 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// If a previous operand picked a block to sink to, then this operand
// must be sinkable to the same block.
bool LocalUse = false;
- if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, ParentBlock, LocalUse))
+ if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, ParentBlock,
+ BreakPHIEdge, LocalUse))
return false;
continue;
@@ -338,7 +478,8 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
for (MachineBasicBlock::succ_iterator SI = ParentBlock->succ_begin(),
E = ParentBlock->succ_end(); SI != E; ++SI) {
bool LocalUse = false;
- if (AllUsesDominatedByBlock(Reg, *SI, ParentBlock, LocalUse)) {
+ if (AllUsesDominatedByBlock(Reg, *SI, ParentBlock,
+ BreakPHIEdge, LocalUse)) {
SuccToSinkTo = *SI;
break;
}
@@ -384,7 +525,6 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// If the block has multiple predecessors, this would introduce computation on
// a path that it doesn't already exist. We could split the critical edge,
// but for now we just punt.
- // FIXME: Split critical edges if not backedges.
if (SuccToSinkTo->pred_size() > 1) {
// We cannot sink a load across a critical edge - there may be stores in
// other code paths.
@@ -412,10 +552,11 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
if (!TryBreak)
DEBUG(dbgs() << "Sinking along critical edge.\n");
else {
- MachineBasicBlock *NewSucc = SplitCriticalEdge(ParentBlock, SuccToSinkTo);
+ MachineBasicBlock *NewSucc =
+ SplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
if (!NewSucc) {
- DEBUG(dbgs() <<
- " *** PUNTING: Not legal or profitable to break critical edge\n");
+ DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
+ "break critical edge\n");
return false;
} else {
DEBUG(dbgs() << " *** Splitting critical edge:"
@@ -424,10 +565,31 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
<< " -- BB#" << SuccToSinkTo->getNumber() << '\n');
SuccToSinkTo = NewSucc;
++NumSplit;
+ BreakPHIEdge = false;
}
}
}
+ if (BreakPHIEdge) {
+ // BreakPHIEdge is true if all the uses are in the successor MBB being
+ // sunken into and they are all PHI nodes. In this case, machine-sink must
+ // break the critical edge first.
+ MachineBasicBlock *NewSucc = SplitCriticalEdge(MI, ParentBlock,
+ SuccToSinkTo, BreakPHIEdge);
+ if (!NewSucc) {
+ DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
+ "break critical edge\n");
+ return false;
+ }
+
+ DEBUG(dbgs() << " *** Splitting critical edge:"
+ " BB#" << ParentBlock->getNumber()
+ << " -- BB#" << NewSucc->getNumber()
+ << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
+ SuccToSinkTo = NewSucc;
+ ++NumSplit;
+ }
+
// Determine where to insert into. Skip phi nodes.
MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
diff --git a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
index 1e88562..7351119 100644
--- a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -26,6 +26,7 @@
#include "llvm/Function.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
@@ -45,14 +46,16 @@ using namespace llvm;
namespace {
struct MachineVerifier {
- MachineVerifier(Pass *pass) :
+ MachineVerifier(Pass *pass, const char *b) :
PASS(pass),
+ Banner(b),
OutFileName(getenv("LLVM_VERIFY_MACHINEINSTRS"))
{}
bool runOnMachineFunction(MachineFunction &MF);
Pass *const PASS;
+ const char *Banner;
const char *const OutFileName;
raw_ostream *OS;
const MachineFunction *MF;
@@ -71,6 +74,8 @@ namespace {
RegVector regsDefined, regsDead, regsKilled;
RegSet regsLiveInButUnused;
+ SlotIndex lastIndex;
+
// Add Reg and any sub-registers to RV
void addRegWithSubRegs(RegVector &RV, unsigned Reg) {
RV.push_back(Reg);
@@ -167,7 +172,9 @@ namespace {
// Analysis information if available
LiveVariables *LiveVars;
- const LiveIntervals *LiveInts;
+ LiveIntervals *LiveInts;
+ LiveStacks *LiveStks;
+ SlotIndexes *Indexes;
void visitMachineFunctionBefore();
void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
@@ -193,9 +200,12 @@ namespace {
struct MachineVerifierPass : public MachineFunctionPass {
static char ID; // Pass ID, replacement for typeid
+ const char *const Banner;
- MachineVerifierPass()
- : MachineFunctionPass(ID) {}
+ MachineVerifierPass(const char *b = 0)
+ : MachineFunctionPass(ID), Banner(b) {
+ initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
+ }
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -203,7 +213,7 @@ namespace {
}
bool runOnMachineFunction(MachineFunction &MF) {
- MF.verify(this);
+ MF.verify(this, Banner);
return false;
}
};
@@ -212,14 +222,15 @@ namespace {
char MachineVerifierPass::ID = 0;
INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
- "Verify generated machine code", false, false);
+ "Verify generated machine code", false, false)
-FunctionPass *llvm::createMachineVerifierPass() {
- return new MachineVerifierPass();
+FunctionPass *llvm::createMachineVerifierPass(const char *Banner) {
+ return new MachineVerifierPass(Banner);
}
-void MachineFunction::verify(Pass *p) const {
- MachineVerifier(p).runOnMachineFunction(const_cast<MachineFunction&>(*this));
+void MachineFunction::verify(Pass *p, const char *Banner) const {
+ MachineVerifier(p, Banner)
+ .runOnMachineFunction(const_cast<MachineFunction&>(*this));
}
bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
@@ -247,11 +258,15 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
LiveVars = NULL;
LiveInts = NULL;
+ LiveStks = NULL;
+ Indexes = NULL;
if (PASS) {
LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
// We don't want to verify LiveVariables if LiveIntervals is available.
if (!LiveInts)
LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
+ LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
+ Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
}
visitMachineFunctionBefore();
@@ -260,6 +275,11 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
visitMachineBasicBlockBefore(MFI);
for (MachineBasicBlock::const_iterator MBBI = MFI->begin(),
MBBE = MFI->end(); MBBI != MBBE; ++MBBI) {
+ if (MBBI->getParent() != MFI) {
+ report("Bad instruction parent pointer", MFI);
+ *OS << "Instruction: " << *MBBI;
+ continue;
+ }
visitMachineInstrBefore(MBBI);
for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I)
visitMachineOperand(&MBBI->getOperand(I), I);
@@ -288,8 +308,11 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
assert(MF);
*OS << '\n';
- if (!foundErrors++)
- MF->print(*OS);
+ if (!foundErrors++) {
+ if (Banner)
+ *OS << "# " << Banner << '\n';
+ MF->print(*OS, Indexes);
+ }
*OS << "*** Bad machine code: " << msg << " ***\n"
<< "- function: " << MF->getFunction()->getNameStr() << "\n";
}
@@ -299,13 +322,19 @@ void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
report(msg, MBB->getParent());
*OS << "- basic block: " << MBB->getName()
<< " " << (void*)MBB
- << " (BB#" << MBB->getNumber() << ")\n";
+ << " (BB#" << MBB->getNumber() << ")";
+ if (Indexes)
+ *OS << " [" << Indexes->getMBBStartIdx(MBB)
+ << ';' << Indexes->getMBBEndIdx(MBB) << ')';
+ *OS << '\n';
}
void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
assert(MI);
report(msg, MI->getParent());
*OS << "- instruction: ";
+ if (Indexes && Indexes->hasIndex(MI))
+ *OS << Indexes->getInstructionIndex(MI) << '\t';
MI->print(*OS, TM);
}
@@ -329,6 +358,7 @@ void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
}
void MachineVerifier::visitMachineFunctionBefore() {
+ lastIndex = SlotIndex();
regsReserved = TRI->getReservedRegs(*MF);
// A sub-register of a reserved register is also reserved
@@ -357,6 +387,16 @@ void
MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ // Count the number of landing pad successors.
+ SmallPtrSet<MachineBasicBlock*, 4> LandingPadSuccs;
+ for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
+ E = MBB->succ_end(); I != E; ++I) {
+ if ((*I)->isLandingPad())
+ LandingPadSuccs.insert(*I);
+ }
+ if (LandingPadSuccs.size() > 1)
+ report("MBB has more than one landing pad successor", MBB);
+
// Call AnalyzeBranch. If it succeeds, there several more conditions to check.
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
@@ -372,14 +412,14 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
// It's possible that the block legitimately ends with a noreturn
// call or an unreachable, in which case it won't actually fall
// out the bottom of the function.
- } else if (MBB->succ_empty()) {
+ } else if (MBB->succ_size() == LandingPadSuccs.size()) {
// It's possible that the block legitimately ends with a noreturn
// call or an unreachable, in which case it won't actuall fall
// out of the block.
- } else if (MBB->succ_size() != 1) {
+ } else if (MBB->succ_size() != 1+LandingPadSuccs.size()) {
report("MBB exits via unconditional fall-through but doesn't have "
"exactly one CFG successor!", MBB);
- } else if (MBB->succ_begin()[0] != MBBI) {
+ } else if (!MBB->isSuccessor(MBBI)) {
report("MBB exits via unconditional fall-through but its successor "
"differs from its CFG successor!", MBB);
}
@@ -394,10 +434,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
}
} else if (TBB && !FBB && Cond.empty()) {
// Block unconditionally branches somewhere.
- if (MBB->succ_size() != 1) {
+ if (MBB->succ_size() != 1+LandingPadSuccs.size()) {
report("MBB exits via unconditional branch but doesn't have "
"exactly one CFG successor!", MBB);
- } else if (MBB->succ_begin()[0] != TBB) {
+ } else if (!MBB->isSuccessor(TBB)) {
report("MBB exits via unconditional branch but the CFG "
"successor doesn't match the actual successor!", MBB);
}
@@ -487,6 +527,9 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
regsKilled.clear();
regsDefined.clear();
+
+ if (Indexes)
+ lastIndex = Indexes->getMBBStartIdx(MBB);
}
void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
@@ -525,6 +568,7 @@ void
MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
const MachineInstr *MI = MO->getParent();
const TargetInstrDesc &TI = MI->getDesc();
+ const TargetOperandInfo &TOI = TI.OpInfo[MONum];
// The first TI.NumDefs operands must be explicit register defines
if (MONum < TI.getNumDefs()) {
@@ -535,9 +579,11 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
else if (MO->isImplicit())
report("Explicit definition marked as implicit", MO, MONum);
} else if (MONum < TI.getNumOperands()) {
- if (MO->isReg()) {
- if (MO->isDef())
- report("Explicit operand marked as def", MO, MONum);
+ // Don't check if it's the last operand in a variadic instruction. See,
+ // e.g., LDM_RET in the arm back end.
+ if (MO->isReg() && !(TI.isVariadic() && MONum == TI.getNumOperands()-1)) {
+ if (MO->isDef() && !TOI.isOptionalDef())
+ report("Explicit operand marked as def", MO, MONum);
if (MO->isImplicit())
report("Explicit operand marked as implicit", MO, MONum);
}
@@ -554,7 +600,9 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
return;
// Check Live Variables.
- if (MO->isUndef()) {
+ if (MI->isDebugValue()) {
+ // Liveness checks are not valid for debug values.
+ } else if (MO->isUndef()) {
// An <undef> doesn't refer to any register, so just skip it.
} else if (MO->isUse()) {
regsLiveInButUnused.erase(Reg);
@@ -566,7 +614,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
unsigned DefReg = MI->getOperand(defIdx).getReg();
if (Reg == DefReg) {
isKill = true;
- // ANd in that case an explicit kill flag is not allowed.
+ // And in that case an explicit kill flag is not allowed.
if (MO->isKill())
report("Illegal kill flag on two-address instruction operand",
MO, MONum);
@@ -590,7 +638,8 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}
// Check LiveInts liveness and kill.
- if (LiveInts && !LiveInts->isNotInMIMap(MI)) {
+ if (TargetRegisterInfo::isVirtualRegister(Reg) &&
+ LiveInts && !LiveInts->isNotInMIMap(MI)) {
SlotIndex UseIdx = LiveInts->getInstructionIndex(MI).getUseIndex();
if (LiveInts->hasInterval(Reg)) {
const LiveInterval &LI = LiveInts->getInterval(Reg);
@@ -598,8 +647,13 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
report("No live range at use", MO, MONum);
*OS << UseIdx << " is not live in " << LI << '\n';
}
- // TODO: Verify isKill == LI.killedAt.
- } else if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ // Check for extra kill flags.
+ // Note that we allow missing kill flags for now.
+ if (MO->isKill() && !LI.killedAt(UseIdx.getDefIndex())) {
+ report("Live range continues after kill flag", MO, MONum);
+ *OS << "Live range: " << LI << '\n';
+ }
+ } else {
report("Virtual register has no Live interval", MO, MONum);
}
}
@@ -636,11 +690,11 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
SlotIndex DefIdx = LiveInts->getInstructionIndex(MI).getDefIndex();
if (LiveInts->hasInterval(Reg)) {
const LiveInterval &LI = LiveInts->getInterval(Reg);
- if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx)) {
- assert(LR->valno && "NULL valno is not allowed");
- if (LR->valno->def != DefIdx) {
+ if (const VNInfo *VNI = LI.getVNInfoAt(DefIdx)) {
+ assert(VNI && "NULL valno is not allowed");
+ if (VNI->def != DefIdx && !MO->isEarlyClobber()) {
report("Inconsistent valno->def", MO, MONum);
- *OS << "Valno " << LR->valno->id << " is not defined at "
+ *OS << "Valno " << VNI->id << " is not defined at "
<< DefIdx << " in " << LI << '\n';
}
} else {
@@ -655,7 +709,6 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
// Check register classes.
if (MONum < TI.getNumOperands() && !MO->isImplicit()) {
- const TargetOperandInfo &TOI = TI.OpInfo[MONum];
unsigned SubIdx = MO->getSubReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
@@ -706,6 +759,22 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
report("PHI operand is not in the CFG", MO, MONum);
break;
+ case MachineOperand::MO_FrameIndex:
+ if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
+ LiveInts && !LiveInts->isNotInMIMap(MI)) {
+ LiveInterval &LI = LiveStks->getInterval(MO->getIndex());
+ SlotIndex Idx = LiveInts->getInstructionIndex(MI);
+ if (TI.mayLoad() && !LI.liveAt(Idx.getUseIndex())) {
+ report("Instruction loads from dead spill slot", MO, MONum);
+ *OS << "Live stack: " << LI << '\n';
+ }
+ if (TI.mayStore() && !LI.liveAt(Idx.getDefIndex())) {
+ report("Instruction stores to dead spill slot", MO, MONum);
+ *OS << "Live stack: " << LI << '\n';
+ }
+ }
+ break;
+
default:
break;
}
@@ -717,12 +786,31 @@ void MachineVerifier::visitMachineInstrAfter(const MachineInstr *MI) {
set_subtract(regsLive, regsKilled); regsKilled.clear();
set_subtract(regsLive, regsDead); regsDead.clear();
set_union(regsLive, regsDefined); regsDefined.clear();
+
+ if (Indexes && Indexes->hasIndex(MI)) {
+ SlotIndex idx = Indexes->getInstructionIndex(MI);
+ if (!(idx > lastIndex)) {
+ report("Instruction index out of order", MI);
+ *OS << "Last instruction was at " << lastIndex << '\n';
+ }
+ lastIndex = idx;
+ }
}
void
MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
MBBInfoMap[MBB].regsLiveOut = regsLive;
regsLive.clear();
+
+ if (Indexes) {
+ SlotIndex stop = Indexes->getMBBEndIdx(MBB);
+ if (!(stop > lastIndex)) {
+ report("Block ends before last instruction index", MBB);
+ *OS << "Block ends at " << stop
+ << " last instruction was at " << lastIndex << '\n';
+ }
+ lastIndex = stop;
+ }
}
// Calculate the largest possible vregsPassed sets. These are the registers that
@@ -854,8 +942,8 @@ void MachineVerifier::visitMachineFunctionAfter() {
void MachineVerifier::verifyLiveVariables() {
assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
- for (unsigned Reg = TargetRegisterInfo::FirstVirtualRegister,
- RegE = MRI->getLastVirtReg()-1; Reg != RegE; ++Reg) {
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
MFI != MFE; ++MFI) {
@@ -865,13 +953,13 @@ void MachineVerifier::verifyLiveVariables() {
if (MInfo.vregsRequired.count(Reg)) {
if (!VI.AliveBlocks.test(MFI->getNumber())) {
report("LiveVariables: Block missing from AliveBlocks", MFI);
- *OS << "Virtual register %reg" << Reg
+ *OS << "Virtual register " << PrintReg(Reg)
<< " must be live through the block.\n";
}
} else {
if (VI.AliveBlocks.test(MFI->getNumber())) {
report("LiveVariables: Block should not be in AliveBlocks", MFI);
- *OS << "Virtual register %reg" << Reg
+ *OS << "Virtual register " << PrintReg(Reg)
<< " is not needed live through the block.\n";
}
}
@@ -884,14 +972,24 @@ void MachineVerifier::verifyLiveIntervals() {
for (LiveIntervals::const_iterator LVI = LiveInts->begin(),
LVE = LiveInts->end(); LVI != LVE; ++LVI) {
const LiveInterval &LI = *LVI->second;
+
+ // Spilling and splitting may leave unused registers around. Skip them.
+ if (MRI->use_empty(LI.reg))
+ continue;
+
+ // Physical registers have much weirdness going on, mostly from coalescing.
+ // We should probably fix it, but for now just ignore them.
+ if (TargetRegisterInfo::isPhysicalRegister(LI.reg))
+ continue;
+
assert(LVI->first == LI.reg && "Invalid reg to interval mapping");
for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
I!=E; ++I) {
VNInfo *VNI = *I;
- const LiveRange *DefLR = LI.getLiveRangeContaining(VNI->def);
+ const VNInfo *DefVNI = LI.getVNInfoAt(VNI->def);
- if (!DefLR) {
+ if (!DefVNI) {
if (!VNI->isUnused()) {
report("Valno not live at def and not marked unused", MF);
*OS << "Valno #" << VNI->id << " in " << LI << '\n';
@@ -902,31 +1000,216 @@ void MachineVerifier::verifyLiveIntervals() {
if (VNI->isUnused())
continue;
- if (DefLR->valno != VNI) {
+ if (DefVNI != VNI) {
report("Live range at def has different valno", MF);
- DefLR->print(*OS);
- *OS << " should use valno #" << VNI->id << " in " << LI << '\n';
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+ << " where valno #" << DefVNI->id << " is live in " << LI << '\n';
+ continue;
}
+ const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
+ if (!MBB) {
+ report("Invalid definition index", MF);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+ << " in " << LI << '\n';
+ continue;
+ }
+
+ if (VNI->isPHIDef()) {
+ if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
+ report("PHIDef value is not defined at MBB start", MF);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+ << ", not at the beginning of BB#" << MBB->getNumber()
+ << " in " << LI << '\n';
+ }
+ } else {
+ // Non-PHI def.
+ const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
+ if (!MI) {
+ report("No instruction at def index", MF);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+ << " in " << LI << '\n';
+ } else if (!MI->modifiesRegister(LI.reg, TRI)) {
+ report("Defining instruction does not modify register", MI);
+ *OS << "Valno #" << VNI->id << " in " << LI << '\n';
+ }
+
+ bool isEarlyClobber = false;
+ if (MI) {
+ for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end(); MOI != MOE; ++MOI) {
+ if (MOI->isReg() && MOI->getReg() == LI.reg && MOI->isDef() &&
+ MOI->isEarlyClobber()) {
+ isEarlyClobber = true;
+ break;
+ }
+ }
+ }
+
+ // Early clobber defs begin at USE slots, but other defs must begin at
+ // DEF slots.
+ if (isEarlyClobber) {
+ if (!VNI->def.isUse()) {
+ report("Early clobber def must be at a USE slot", MF);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+ << " in " << LI << '\n';
+ }
+ } else if (!VNI->def.isDef()) {
+ report("Non-PHI, non-early clobber def must be at a DEF slot", MF);
+ *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+ << " in " << LI << '\n';
+ }
+ }
}
for (LiveInterval::const_iterator I = LI.begin(), E = LI.end(); I!=E; ++I) {
- const LiveRange &LR = *I;
- assert(LR.valno && "Live range has no valno");
+ const VNInfo *VNI = I->valno;
+ assert(VNI && "Live range has no valno");
- if (LR.valno->id >= LI.getNumValNums() ||
- LR.valno != LI.getValNumInfo(LR.valno->id)) {
+ if (VNI->id >= LI.getNumValNums() || VNI != LI.getValNumInfo(VNI->id)) {
report("Foreign valno in live range", MF);
- LR.print(*OS);
+ I->print(*OS);
*OS << " has a valno not in " << LI << '\n';
}
- if (LR.valno->isUnused()) {
+ if (VNI->isUnused()) {
report("Live range valno is marked unused", MF);
- LR.print(*OS);
+ I->print(*OS);
+ *OS << " in " << LI << '\n';
+ }
+
+ const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(I->start);
+ if (!MBB) {
+ report("Bad start of live segment, no basic block", MF);
+ I->print(*OS);
*OS << " in " << LI << '\n';
+ continue;
+ }
+ SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
+ if (I->start != MBBStartIdx && I->start != VNI->def) {
+ report("Live segment must begin at MBB entry or valno def", MBB);
+ I->print(*OS);
+ *OS << " in " << LI << '\n' << "Basic block starts at "
+ << MBBStartIdx << '\n';
+ }
+
+ const MachineBasicBlock *EndMBB =
+ LiveInts->getMBBFromIndex(I->end.getPrevSlot());
+ if (!EndMBB) {
+ report("Bad end of live segment, no basic block", MF);
+ I->print(*OS);
+ *OS << " in " << LI << '\n';
+ continue;
+ }
+ if (I->end != LiveInts->getMBBEndIdx(EndMBB)) {
+ // The live segment is ending inside EndMBB
+ const MachineInstr *MI =
+ LiveInts->getInstructionFromIndex(I->end.getPrevSlot());
+ if (!MI) {
+ report("Live segment doesn't end at a valid instruction", EndMBB);
+ I->print(*OS);
+ *OS << " in " << LI << '\n' << "Basic block starts at "
+ << MBBStartIdx << '\n';
+ } else if (TargetRegisterInfo::isVirtualRegister(LI.reg) &&
+ !MI->readsVirtualRegister(LI.reg)) {
+ // A live range can end with either a redefinition, a kill flag on a
+ // use, or a dead flag on a def.
+ // FIXME: Should we check for each of these?
+ bool hasDeadDef = false;
+ for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end(); MOI != MOE; ++MOI) {
+ if (MOI->isReg() && MOI->getReg() == LI.reg && MOI->isDef() && MOI->isDead()) {
+ hasDeadDef = true;
+ break;
+ }
+ }
+
+ if (!hasDeadDef) {
+ report("Instruction killing live segment neither defines nor reads "
+ "register", MI);
+ I->print(*OS);
+ *OS << " in " << LI << '\n';
+ }
+ }
+ }
+
+ // Now check all the basic blocks in this live segment.
+ MachineFunction::const_iterator MFI = MBB;
+ // Is this live range the beginning of a non-PHIDef VN?
+ if (I->start == VNI->def && !VNI->isPHIDef()) {
+ // Not live-in to any blocks.
+ if (MBB == EndMBB)
+ continue;
+ // Skip this block.
+ ++MFI;
+ }
+ for (;;) {
+ assert(LiveInts->isLiveInToMBB(LI, MFI));
+ // We don't know how to track physregs into a landing pad.
+ if (TargetRegisterInfo::isPhysicalRegister(LI.reg) &&
+ MFI->isLandingPad()) {
+ if (&*MFI == EndMBB)
+ break;
+ ++MFI;
+ continue;
+ }
+ // Check that VNI is live-out of all predecessors.
+ for (MachineBasicBlock::const_pred_iterator PI = MFI->pred_begin(),
+ PE = MFI->pred_end(); PI != PE; ++PI) {
+ SlotIndex PEnd = LiveInts->getMBBEndIdx(*PI).getPrevSlot();
+ const VNInfo *PVNI = LI.getVNInfoAt(PEnd);
+
+ if (VNI->isPHIDef() && VNI->def == LiveInts->getMBBStartIdx(MFI)) {
+ if (PVNI && !PVNI->hasPHIKill()) {
+ report("Value live out of predecessor doesn't have PHIKill", MF);
+ *OS << "Valno #" << PVNI->id << " live out of BB#"
+ << (*PI)->getNumber() << '@' << PEnd
+ << " doesn't have PHIKill, but Valno #" << VNI->id
+ << " is PHIDef and defined at the beginning of BB#"
+ << MFI->getNumber() << '@' << LiveInts->getMBBStartIdx(MFI)
+ << " in " << LI << '\n';
+ }
+ continue;
+ }
+
+ if (!PVNI) {
+ report("Register not marked live out of predecessor", *PI);
+ *OS << "Valno #" << VNI->id << " live into BB#" << MFI->getNumber()
+ << '@' << LiveInts->getMBBStartIdx(MFI) << ", not live at "
+ << PEnd << " in " << LI << '\n';
+ continue;
+ }
+
+ if (PVNI != VNI) {
+ report("Different value live out of predecessor", *PI);
+ *OS << "Valno #" << PVNI->id << " live out of BB#"
+ << (*PI)->getNumber() << '@' << PEnd
+ << "\nValno #" << VNI->id << " live into BB#" << MFI->getNumber()
+ << '@' << LiveInts->getMBBStartIdx(MFI) << " in " << LI << '\n';
+ }
+ }
+ if (&*MFI == EndMBB)
+ break;
+ ++MFI;
}
+ }
+ // Check the LI only has one connected component.
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
+ ConnectedVNInfoEqClasses ConEQ(*LiveInts);
+ unsigned NumComp = ConEQ.Classify(&LI);
+ if (NumComp > 1) {
+ report("Multiple connected components in live interval", MF);
+ *OS << NumComp << " components in " << LI << '\n';
+ for (unsigned comp = 0; comp != NumComp; ++comp) {
+ *OS << comp << ": valnos";
+ for (LiveInterval::const_vni_iterator I = LI.vni_begin(),
+ E = LI.vni_end(); I!=E; ++I)
+ if (comp == ConEQ.getEqClass(*I))
+ *OS << ' ' << (*I)->id;
+ *OS << '\n';
+ }
+ }
}
}
}
diff --git a/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp b/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
index edb4eea..c05be13 100644
--- a/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
+++ b/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
@@ -33,7 +33,9 @@ namespace {
public:
static char ID; // Pass identification
- OptimizePHIs() : MachineFunctionPass(ID) {}
+ OptimizePHIs() : MachineFunctionPass(ID) {
+ initializeOptimizePHIsPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -55,7 +57,7 @@ namespace {
char OptimizePHIs::ID = 0;
INITIALIZE_PASS(OptimizePHIs, "opt-phis",
- "Optimize machine instruction PHIs", false, false);
+ "Optimize machine instruction PHIs", false, false)
FunctionPass *llvm::createOptimizePHIsPass() { return new OptimizePHIs(); }
diff --git a/contrib/llvm/lib/CodeGen/PHIElimination.cpp b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
index d4df4c5..5f7cf58 100644
--- a/contrib/llvm/lib/CodeGen/PHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
@@ -14,7 +14,7 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "phielim"
-#include "PHIElimination.h"
+#include "PHIEliminationUtils.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -34,23 +34,72 @@
#include <map>
using namespace llvm;
+namespace {
+ class PHIElimination : public MachineFunctionPass {
+ MachineRegisterInfo *MRI; // Machine register information
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ PHIElimination() : MachineFunctionPass(ID) {
+ initializePHIEliminationPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &Fn);
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ private:
+ /// EliminatePHINodes - Eliminate phi nodes by inserting copy instructions
+ /// in predecessor basic blocks.
+ ///
+ bool EliminatePHINodes(MachineFunction &MF, MachineBasicBlock &MBB);
+ void LowerAtomicPHINode(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator AfterPHIsIt);
+
+ /// analyzePHINodes - Gather information about the PHI nodes in
+ /// here. In particular, we want to map the number of uses of a virtual
+ /// register which is used in a PHI node. We map that to the BB the
+ /// vreg is coming from. This is used later to determine when the vreg
+ /// is killed in the BB.
+ ///
+ void analyzePHINodes(const MachineFunction& Fn);
+
+ /// Split critical edges where necessary for good coalescer performance.
+ bool SplitPHIEdges(MachineFunction &MF, MachineBasicBlock &MBB,
+ LiveVariables &LV, MachineLoopInfo *MLI);
+
+ typedef std::pair<unsigned, unsigned> BBVRegPair;
+ typedef DenseMap<BBVRegPair, unsigned> VRegPHIUse;
+
+ VRegPHIUse VRegPHIUseCount;
+
+ // Defs of PHI sources which are implicit_def.
+ SmallPtrSet<MachineInstr*, 4> ImpDefs;
+
+ // Map reusable lowered PHI node -> incoming join register.
+ typedef DenseMap<MachineInstr*, unsigned,
+ MachineInstrExpressionTrait> LoweredPHIMap;
+ LoweredPHIMap LoweredPHIs;
+ };
+}
+
STATISTIC(NumAtomic, "Number of atomic phis lowered");
+STATISTIC(NumCriticalEdgesSplit, "Number of critical edges split");
STATISTIC(NumReused, "Number of reused lowered phis");
char PHIElimination::ID = 0;
INITIALIZE_PASS(PHIElimination, "phi-node-elimination",
- "Eliminate PHI nodes for register allocation", false, false);
+ "Eliminate PHI nodes for register allocation", false, false)
-char &llvm::PHIEliminationID = PHIElimination::ID;
+char& llvm::PHIEliminationID = PHIElimination::ID;
-void llvm::PHIElimination::getAnalysisUsage(AnalysisUsage &AU) const {
+void PHIElimination::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<LiveVariables>();
AU.addPreserved<MachineDominatorTree>();
AU.addPreserved<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
-bool llvm::PHIElimination::runOnMachineFunction(MachineFunction &MF) {
+bool PHIElimination::runOnMachineFunction(MachineFunction &MF) {
MRI = &MF.getRegInfo();
bool Changed = false;
@@ -93,14 +142,14 @@ bool llvm::PHIElimination::runOnMachineFunction(MachineFunction &MF) {
/// EliminatePHINodes - Eliminate phi nodes by inserting copy instructions in
/// predecessor basic blocks.
///
-bool llvm::PHIElimination::EliminatePHINodes(MachineFunction &MF,
+bool PHIElimination::EliminatePHINodes(MachineFunction &MF,
MachineBasicBlock &MBB) {
if (MBB.empty() || !MBB.front().isPHI())
return false; // Quick exit for basic blocks without PHIs.
// Get an iterator to the first instruction after the last PHI node (this may
// also be the end of the basic block).
- MachineBasicBlock::iterator AfterPHIsIt = SkipPHIsAndLabels(MBB, MBB.begin());
+ MachineBasicBlock::iterator AfterPHIsIt = MBB.SkipPHIsAndLabels(MBB.begin());
while (MBB.front().isPHI())
LowerAtomicPHINode(MBB, AfterPHIsIt);
@@ -121,58 +170,14 @@ static bool isSourceDefinedByImplicitDef(const MachineInstr *MPhi,
return true;
}
-// FindCopyInsertPoint - Find a safe place in MBB to insert a copy from SrcReg
-// when following the CFG edge to SuccMBB. This needs to be after any def of
-// SrcReg, but before any subsequent point where control flow might jump out of
-// the basic block.
-MachineBasicBlock::iterator
-llvm::PHIElimination::FindCopyInsertPoint(MachineBasicBlock &MBB,
- MachineBasicBlock &SuccMBB,
- unsigned SrcReg) {
- // Handle the trivial case trivially.
- if (MBB.empty())
- return MBB.begin();
-
- // Usually, we just want to insert the copy before the first terminator
- // instruction. However, for the edge going to a landing pad, we must insert
- // the copy before the call/invoke instruction.
- if (!SuccMBB.isLandingPad())
- return MBB.getFirstTerminator();
-
- // Discover any defs/uses in this basic block.
- SmallPtrSet<MachineInstr*, 8> DefUsesInMBB;
- for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(SrcReg),
- RE = MRI->reg_end(); RI != RE; ++RI) {
- MachineInstr *DefUseMI = &*RI;
- if (DefUseMI->getParent() == &MBB)
- DefUsesInMBB.insert(DefUseMI);
- }
- MachineBasicBlock::iterator InsertPoint;
- if (DefUsesInMBB.empty()) {
- // No defs. Insert the copy at the start of the basic block.
- InsertPoint = MBB.begin();
- } else if (DefUsesInMBB.size() == 1) {
- // Insert the copy immediately after the def/use.
- InsertPoint = *DefUsesInMBB.begin();
- ++InsertPoint;
- } else {
- // Insert the copy immediately after the last def/use.
- InsertPoint = MBB.end();
- while (!DefUsesInMBB.count(&*--InsertPoint)) {}
- ++InsertPoint;
- }
-
- // Make sure the copy goes after any phi nodes however.
- return SkipPHIsAndLabels(MBB, InsertPoint);
-}
/// LowerAtomicPHINode - Lower the PHI node at the top of the specified block,
/// under the assuption that it needs to be lowered in a way that supports
/// atomic execution of PHIs. This lowering method is always correct all of the
/// time.
///
-void llvm::PHIElimination::LowerAtomicPHINode(
+void PHIElimination::LowerAtomicPHINode(
MachineBasicBlock &MBB,
MachineBasicBlock::iterator AfterPHIsIt) {
++NumAtomic;
@@ -207,7 +212,7 @@ void llvm::PHIElimination::LowerAtomicPHINode(
IncomingReg = entry;
reusedIncoming = true;
++NumReused;
- DEBUG(dbgs() << "Reusing %reg" << IncomingReg << " for " << *MPhi);
+ DEBUG(dbgs() << "Reusing " << PrintReg(IncomingReg) << " for " << *MPhi);
} else {
const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
entry = IncomingReg = MF.getRegInfo().createVirtualRegister(RC);
@@ -294,7 +299,7 @@ void llvm::PHIElimination::LowerAtomicPHINode(
// Find a safe location to insert the copy, this may be the first terminator
// in the block (or end()).
MachineBasicBlock::iterator InsertPos =
- FindCopyInsertPoint(opBlock, MBB, SrcReg);
+ findPHICopyInsertPoint(&opBlock, &MBB, SrcReg);
// Insert the copy.
if (!reusedIncoming && IncomingReg)
@@ -335,6 +340,8 @@ void llvm::PHIElimination::LowerAtomicPHINode(
#ifndef NDEBUG
for (MachineBasicBlock::iterator TI = llvm::next(Term);
TI != opBlock.end(); ++TI) {
+ if (TI->isDebugValue())
+ continue;
assert(!TI->readsRegister(SrcReg) &&
"Terminator instructions cannot use virtual registers unless"
"they are the first terminator in a block!");
@@ -343,9 +350,13 @@ void llvm::PHIElimination::LowerAtomicPHINode(
} else if (reusedIncoming || !IncomingReg) {
// We may have to rewind a bit if we didn't insert a copy this time.
KillInst = Term;
- while (KillInst != opBlock.begin())
- if ((--KillInst)->readsRegister(SrcReg))
+ while (KillInst != opBlock.begin()) {
+ --KillInst;
+ if (KillInst->isDebugValue())
+ continue;
+ if (KillInst->readsRegister(SrcReg))
break;
+ }
} else {
// We just inserted this copy.
KillInst = prior(InsertPos);
@@ -371,7 +382,7 @@ void llvm::PHIElimination::LowerAtomicPHINode(
/// used in a PHI node. We map that to the BB the vreg is coming from. This is
/// used later to determine when the vreg is killed in the BB.
///
-void llvm::PHIElimination::analyzePHINodes(const MachineFunction& MF) {
+void PHIElimination::analyzePHINodes(const MachineFunction& MF) {
for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
I != E; ++I)
for (MachineBasicBlock::const_iterator BBI = I->begin(), BBE = I->end();
@@ -381,10 +392,10 @@ void llvm::PHIElimination::analyzePHINodes(const MachineFunction& MF) {
BBI->getOperand(i).getReg())];
}
-bool llvm::PHIElimination::SplitPHIEdges(MachineFunction &MF,
- MachineBasicBlock &MBB,
- LiveVariables &LV,
- MachineLoopInfo *MLI) {
+bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ LiveVariables &LV,
+ MachineLoopInfo *MLI) {
if (MBB.empty() || !MBB.front().isPHI() || MBB.isLandingPad())
return false; // Quick exit for basic blocks without PHIs.
@@ -403,10 +414,14 @@ bool llvm::PHIElimination::SplitPHIEdges(MachineFunction &MF,
!LV.isLiveIn(Reg, MBB) && LV.isLiveOut(Reg, *PreMBB)) {
if (!MLI ||
!(MLI->getLoopFor(PreMBB) == MLI->getLoopFor(&MBB) &&
- MLI->isLoopHeader(&MBB)))
- Changed |= PreMBB->SplitCriticalEdge(&MBB, this) != 0;
+ MLI->isLoopHeader(&MBB))) {
+ if (PreMBB->SplitCriticalEdge(&MBB, this)) {
+ Changed = true;
+ ++NumCriticalEdgesSplit;
+ }
+ }
}
}
}
- return true;
+ return Changed;
}
diff --git a/contrib/llvm/lib/CodeGen/PHIElimination.h b/contrib/llvm/lib/CodeGen/PHIElimination.h
deleted file mode 100644
index 45a9718..0000000
--- a/contrib/llvm/lib/CodeGen/PHIElimination.h
+++ /dev/null
@@ -1,115 +0,0 @@
-//===-- lib/CodeGen/PHIElimination.h ----------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_PHIELIMINATION_HPP
-#define LLVM_CODEGEN_PHIELIMINATION_HPP
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-
-namespace llvm {
- class LiveVariables;
- class MachineRegisterInfo;
- class MachineLoopInfo;
-
- /// Lower PHI instructions to copies.
- class PHIElimination : public MachineFunctionPass {
- MachineRegisterInfo *MRI; // Machine register information
-
- public:
- static char ID; // Pass identification, replacement for typeid
- PHIElimination() : MachineFunctionPass(ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &Fn);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
-
- private:
- /// EliminatePHINodes - Eliminate phi nodes by inserting copy instructions
- /// in predecessor basic blocks.
- ///
- bool EliminatePHINodes(MachineFunction &MF, MachineBasicBlock &MBB);
- void LowerAtomicPHINode(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator AfterPHIsIt);
-
- /// analyzePHINodes - Gather information about the PHI nodes in
- /// here. In particular, we want to map the number of uses of a virtual
- /// register which is used in a PHI node. We map that to the BB the
- /// vreg is coming from. This is used later to determine when the vreg
- /// is killed in the BB.
- ///
- void analyzePHINodes(const MachineFunction& Fn);
-
- /// Split critical edges where necessary for good coalescer performance.
- bool SplitPHIEdges(MachineFunction &MF, MachineBasicBlock &MBB,
- LiveVariables &LV, MachineLoopInfo *MLI);
-
- /// SplitCriticalEdge - Split a critical edge from A to B by
- /// inserting a new MBB. Update branches in A and PHI instructions
- /// in B. Return the new block.
- MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *A,
- MachineBasicBlock *B);
-
- /// FindCopyInsertPoint - Find a safe place in MBB to insert a copy from
- /// SrcReg when following the CFG edge to SuccMBB. This needs to be after
- /// any def of SrcReg, but before any subsequent point where control flow
- /// might jump out of the basic block.
- MachineBasicBlock::iterator FindCopyInsertPoint(MachineBasicBlock &MBB,
- MachineBasicBlock &SuccMBB,
- unsigned SrcReg);
-
- // SkipPHIsAndLabels - Copies need to be inserted after phi nodes and
- // also after any exception handling labels: in landing pads execution
- // starts at the label, so any copies placed before it won't be executed!
- // We also deal with DBG_VALUEs, which are a bit tricky:
- // PHI
- // DBG_VALUE
- // LABEL
- // Here the DBG_VALUE needs to be skipped, and if it refers to a PHI it
- // needs to be annulled or, better, moved to follow the label, as well.
- // PHI
- // DBG_VALUE
- // no label
- // Here it is not a good idea to skip the DBG_VALUE.
- // FIXME: For now we skip and annul all DBG_VALUEs, maximally simple and
- // maximally stupid.
- MachineBasicBlock::iterator SkipPHIsAndLabels(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) {
- // Rather than assuming that EH labels come before other kinds of labels,
- // just skip all labels.
- while (I != MBB.end() &&
- (I->isPHI() || I->isLabel() || I->isDebugValue())) {
- if (I->isDebugValue() && I->getNumOperands()==3 &&
- I->getOperand(0).isReg())
- I->getOperand(0).setReg(0U);
- ++I;
- }
- return I;
- }
-
- typedef std::pair<unsigned, unsigned> BBVRegPair;
- typedef DenseMap<BBVRegPair, unsigned> VRegPHIUse;
-
- VRegPHIUse VRegPHIUseCount;
-
- // Defs of PHI sources which are implicit_def.
- SmallPtrSet<MachineInstr*, 4> ImpDefs;
-
- // Map reusable lowered PHI node -> incoming join register.
- typedef DenseMap<MachineInstr*, unsigned,
- MachineInstrExpressionTrait> LoweredPHIMap;
- LoweredPHIMap LoweredPHIs;
- };
-
-}
-
-#endif /* LLVM_CODEGEN_PHIELIMINATION_HPP */
diff --git a/contrib/llvm/lib/CodeGen/PHIEliminationUtils.cpp b/contrib/llvm/lib/CodeGen/PHIEliminationUtils.cpp
new file mode 100644
index 0000000..10bfdcc
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/PHIEliminationUtils.cpp
@@ -0,0 +1,61 @@
+//===-- PHIEliminationUtils.cpp - Helper functions for PHI elimination ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PHIEliminationUtils.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/ADT/SmallPtrSet.h"
+using namespace llvm;
+
+// findCopyInsertPoint - Find a safe place in MBB to insert a copy from SrcReg
+// when following the CFG edge to SuccMBB. This needs to be after any def of
+// SrcReg, but before any subsequent point where control flow might jump out of
+// the basic block.
+MachineBasicBlock::iterator
+llvm::findPHICopyInsertPoint(MachineBasicBlock* MBB, MachineBasicBlock* SuccMBB,
+ unsigned SrcReg) {
+ // Handle the trivial case trivially.
+ if (MBB->empty())
+ return MBB->begin();
+
+ // Usually, we just want to insert the copy before the first terminator
+ // instruction. However, for the edge going to a landing pad, we must insert
+ // the copy before the call/invoke instruction.
+ if (!SuccMBB->isLandingPad())
+ return MBB->getFirstTerminator();
+
+ // Discover any defs/uses in this basic block.
+ SmallPtrSet<MachineInstr*, 8> DefUsesInMBB;
+ MachineRegisterInfo& MRI = MBB->getParent()->getRegInfo();
+ for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(SrcReg),
+ RE = MRI.reg_end(); RI != RE; ++RI) {
+ MachineInstr* DefUseMI = &*RI;
+ if (DefUseMI->getParent() == MBB)
+ DefUsesInMBB.insert(DefUseMI);
+ }
+
+ MachineBasicBlock::iterator InsertPoint;
+ if (DefUsesInMBB.empty()) {
+ // No defs. Insert the copy at the start of the basic block.
+ InsertPoint = MBB->begin();
+ } else if (DefUsesInMBB.size() == 1) {
+ // Insert the copy immediately after the def/use.
+ InsertPoint = *DefUsesInMBB.begin();
+ ++InsertPoint;
+ } else {
+ // Insert the copy immediately after the last def/use.
+ InsertPoint = MBB->end();
+ while (!DefUsesInMBB.count(&*--InsertPoint)) {}
+ ++InsertPoint;
+ }
+
+ // Make sure the copy goes after any phi nodes however.
+ return MBB->SkipPHIsAndLabels(InsertPoint);
+}
diff --git a/contrib/llvm/lib/CodeGen/PHIEliminationUtils.h b/contrib/llvm/lib/CodeGen/PHIEliminationUtils.h
new file mode 100644
index 0000000..9ac47fb4
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/PHIEliminationUtils.h
@@ -0,0 +1,25 @@
+//=- PHIEliminationUtils.h - Helper functions for PHI elimination *- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PHIELIMINATIONUTILS_H
+#define LLVM_CODEGEN_PHIELIMINATIONUTILS_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+
+namespace llvm {
+ /// findPHICopyInsertPoint - Find a safe place in MBB to insert a copy from
+ /// SrcReg when following the CFG edge to SuccMBB. This needs to be after
+ /// any def of SrcReg, but before any subsequent point where control flow
+ /// might jump out of the basic block.
+ MachineBasicBlock::iterator
+ findPHICopyInsertPoint(MachineBasicBlock* MBB, MachineBasicBlock* SuccMBB,
+ unsigned SrcReg);
+}
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index 17cee46..5d7123c 100644
--- a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -41,7 +41,9 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
@@ -50,8 +52,13 @@ static cl::opt<bool>
Aggressive("aggressive-ext-opt", cl::Hidden,
cl::desc("Aggressive extension optimization"));
+static cl::opt<bool>
+DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
+ cl::desc("Disable the peephole optimizer"));
+
STATISTIC(NumReuse, "Number of extension results reused");
STATISTIC(NumEliminated, "Number of compares eliminated");
+STATISTIC(NumImmFold, "Number of move immediate foled");
namespace {
class PeepholeOptimizer : public MachineFunctionPass {
@@ -62,7 +69,9 @@ namespace {
public:
static char ID; // Pass identification
- PeepholeOptimizer() : MachineFunctionPass(ID) {}
+ PeepholeOptimizer() : MachineFunctionPass(ID) {
+ initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -79,12 +88,21 @@ namespace {
bool OptimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
bool OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
SmallPtrSet<MachineInstr*, 8> &LocalMIs);
+ bool isMoveImmediate(MachineInstr *MI,
+ SmallSet<unsigned, 4> &ImmDefRegs,
+ DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
+ bool FoldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
+ SmallSet<unsigned, 4> &ImmDefRegs,
+ DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
};
}
char PeepholeOptimizer::ID = 0;
-INITIALIZE_PASS(PeepholeOptimizer, "peephole-opts",
- "Peephole Optimizations", false, false);
+INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts",
+ "Peephole Optimizations", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
+ "Peephole Optimizations", false, false)
FunctionPass *llvm::createPeepholeOptimizerPass() {
return new PeepholeOptimizer();
@@ -102,12 +120,10 @@ FunctionPass *llvm::createPeepholeOptimizerPass() {
bool PeepholeOptimizer::
OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
- LocalMIs.insert(MI);
-
unsigned SrcReg, DstReg, SubIdx;
if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
return false;
-
+
if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
TargetRegisterInfo::isPhysicalRegister(SrcReg))
return false;
@@ -232,22 +248,17 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
/// set) the same flag as the compare, then we can remove the comparison and use
/// the flag from the previous instruction.
bool PeepholeOptimizer::OptimizeCmpInstr(MachineInstr *MI,
- MachineBasicBlock *MBB) {
+ MachineBasicBlock *MBB){
// If this instruction is a comparison against zero and isn't comparing a
// physical register, we can try to optimize it.
unsigned SrcReg;
- int CmpValue;
- if (!TII->AnalyzeCompare(MI, SrcReg, CmpValue) ||
- TargetRegisterInfo::isPhysicalRegister(SrcReg) || CmpValue != 0)
- return false;
-
- MachineRegisterInfo::def_iterator DI = MRI->def_begin(SrcReg);
- if (llvm::next(DI) != MRI->def_end())
- // Only support one definition.
+ int CmpMask, CmpValue;
+ if (!TII->AnalyzeCompare(MI, SrcReg, CmpMask, CmpValue) ||
+ TargetRegisterInfo::isPhysicalRegister(SrcReg))
return false;
- // Attempt to convert the defining instruction to set the "zero" flag.
- if (TII->ConvertToSetZeroFlag(&*DI, MI)) {
+ // Attempt to optimize the comparison instruction.
+ if (TII->OptimizeCompareInstr(MI, SrcReg, CmpMask, CmpValue, MRI)) {
++NumEliminated;
return true;
}
@@ -255,7 +266,53 @@ bool PeepholeOptimizer::OptimizeCmpInstr(MachineInstr *MI,
return false;
}
+bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
+ SmallSet<unsigned, 4> &ImmDefRegs,
+ DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
+ const TargetInstrDesc &TID = MI->getDesc();
+ if (!TID.isMoveImmediate())
+ return false;
+ if (TID.getNumDefs() != 1)
+ return false;
+ unsigned Reg = MI->getOperand(0).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ ImmDefMIs.insert(std::make_pair(Reg, MI));
+ ImmDefRegs.insert(Reg);
+ return true;
+ }
+
+ return false;
+}
+
+/// FoldImmediate - Try folding register operands that are defined by move
+/// immediate instructions, i.e. a trivial constant folding optimization, if
+/// and only if the def and use are in the same BB.
+bool PeepholeOptimizer::FoldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
+ SmallSet<unsigned, 4> &ImmDefRegs,
+ DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
+ for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || MO.isDef())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+ if (ImmDefRegs.count(Reg) == 0)
+ continue;
+ DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
+ assert(II != ImmDefMIs.end());
+ if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
+ ++NumImmFold;
+ return true;
+ }
+ }
+ return false;
+}
+
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
+ if (DisablePeephole)
+ return false;
+
TM = &MF.getTarget();
TII = TM->getInstrInfo();
MRI = &MF.getRegInfo();
@@ -264,22 +321,50 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
SmallPtrSet<MachineInstr*, 8> LocalMIs;
+ SmallSet<unsigned, 4> ImmDefRegs;
+ DenseMap<unsigned, MachineInstr*> ImmDefMIs;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
+
+ bool SeenMoveImm = false;
LocalMIs.clear();
+ ImmDefRegs.clear();
+ ImmDefMIs.clear();
+ bool First = true;
+ MachineBasicBlock::iterator PMII;
for (MachineBasicBlock::iterator
- MII = I->begin(), ME = I->end(); MII != ME; ) {
+ MII = I->begin(), MIE = I->end(); MII != MIE; ) {
MachineInstr *MI = &*MII;
+ LocalMIs.insert(MI);
- if (MI->getDesc().isCompare() &&
- !MI->getDesc().hasUnmodeledSideEffects()) {
- ++MII; // The iterator may become invalid if the compare is deleted.
- Changed |= OptimizeCmpInstr(MI, MBB);
+ if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
+ MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
+ MI->hasUnmodeledSideEffects()) {
+ ++MII;
+ continue;
+ }
+
+ if (MI->getDesc().isCompare()) {
+ if (OptimizeCmpInstr(MI, MBB)) {
+ // MI is deleted.
+ Changed = true;
+ MII = First ? I->begin() : llvm::next(PMII);
+ continue;
+ }
+ }
+
+ if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
+ SeenMoveImm = true;
} else {
Changed |= OptimizeExtInstr(MI, MBB, LocalMIs);
- ++MII;
+ if (SeenMoveImm)
+ Changed |= FoldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
}
+
+ First = false;
+ PMII = MII;
+ ++MII;
}
}
diff --git a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
index f0bd6d1..60c24b7 100644
--- a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
+++ b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
@@ -133,18 +133,12 @@ namespace {
std::vector<unsigned> KillIndices;
public:
- SchedulePostRATDList(MachineFunction &MF,
- const MachineLoopInfo &MLI,
- const MachineDominatorTree &MDT,
- ScheduleHazardRecognizer *HR,
- AntiDepBreaker *ADB,
- AliasAnalysis *aa)
- : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
- HazardRec(HR), AntiDepBreak(ADB), AA(aa),
- KillIndices(TRI->getNumRegs()) {}
-
- ~SchedulePostRATDList() {
- }
+ SchedulePostRATDList(
+ MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
+ AliasAnalysis *AA, TargetSubtarget::AntiDepBreakMode AntiDepMode,
+ SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs);
+
+ ~SchedulePostRATDList();
/// StartBlock - Initialize register live-range state for scheduling in
/// this block.
@@ -183,9 +177,34 @@ namespace {
};
}
+SchedulePostRATDList::SchedulePostRATDList(
+ MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
+ AliasAnalysis *AA, TargetSubtarget::AntiDepBreakMode AntiDepMode,
+ SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs)
+ : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), AA(AA),
+ KillIndices(TRI->getNumRegs())
+{
+ const TargetMachine &TM = MF.getTarget();
+ const InstrItineraryData *InstrItins = TM.getInstrItineraryData();
+ HazardRec =
+ TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this);
+ AntiDepBreak =
+ ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
+ (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, CriticalPathRCs) :
+ ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
+ (AntiDepBreaker *)new CriticalAntiDepBreaker(MF) : NULL));
+}
+
+SchedulePostRATDList::~SchedulePostRATDList() {
+ delete HazardRec;
+ delete AntiDepBreak;
+}
+
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
- AA = &getAnalysis<AliasAnalysis>();
TII = Fn.getTarget().getInstrInfo();
+ MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
+ MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+ AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
// Check for explicit enable/disable of post-ra scheduling.
TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
@@ -195,6 +214,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
return false;
} else {
// Check that post-RA scheduling is enabled for this target.
+ // This may upgrade the AntiDepMode.
const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs))
return false;
@@ -210,19 +230,8 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
DEBUG(dbgs() << "PostRAScheduler\n");
- const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
- const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
- const TargetMachine &TM = Fn.getTarget();
- const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
- ScheduleHazardRecognizer *HR =
- TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins);
- AntiDepBreaker *ADB =
- ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
- (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) :
- ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
- (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
-
- SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA);
+ SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, AntiDepMode,
+ CriticalPathRCs);
// Loop over all of the basic blocks
for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
@@ -270,9 +279,6 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
Scheduler.FixupKills(MBB);
}
- delete HR;
- delete ADB;
-
return true;
}
@@ -617,13 +623,7 @@ void SchedulePostRATDList::ListScheduleTopDown() {
MinDepth = PendingQueue[i]->getDepth();
}
- DEBUG(dbgs() << "\n*** Examining Available\n";
- LatencyPriorityQueue q = AvailableQueue;
- while (!q.empty()) {
- SUnit *su = q.pop();
- dbgs() << "Height " << su->getHeight() << ": ";
- su->dump(this);
- });
+ DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this));
SUnit *FoundSUnit = 0;
bool HasNoopHazards = false;
@@ -631,7 +631,7 @@ void SchedulePostRATDList::ListScheduleTopDown() {
SUnit *CurSUnit = AvailableQueue.pop();
ScheduleHazardRecognizer::HazardType HT =
- HazardRec->getHazardType(CurSUnit);
+ HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
if (HT == ScheduleHazardRecognizer::NoHazard) {
FoundSUnit = CurSUnit;
break;
diff --git a/contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp b/contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp
index cd9d83e..d6e31da 100644
--- a/contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp
+++ b/contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp
@@ -91,8 +91,9 @@ namespace {
public:
static char ID;
- PreAllocSplitting()
- : MachineFunctionPass(ID) {}
+ PreAllocSplitting() : MachineFunctionPass(ID) {
+ initializePreAllocSplittingPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -106,10 +107,8 @@ namespace {
AU.addPreserved<LiveStacks>();
AU.addPreserved<RegisterCoalescer>();
AU.addPreserved<CalculateSpillWeights>();
- if (StrongPHIElim)
- AU.addPreservedID(StrongPHIEliminationID);
- else
- AU.addPreservedID(PHIEliminationID);
+ AU.addPreservedID(StrongPHIEliminationID);
+ AU.addPreservedID(PHIEliminationID);
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addRequired<VirtRegMap>();
@@ -203,9 +202,18 @@ namespace {
char PreAllocSplitting::ID = 0;
-INITIALIZE_PASS(PreAllocSplitting, "pre-alloc-splitting",
+INITIALIZE_PASS_BEGIN(PreAllocSplitting, "pre-alloc-splitting",
+ "Pre-Register Allocation Live Interval Splitting",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(LiveStacks)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
+INITIALIZE_PASS_END(PreAllocSplitting, "pre-alloc-splitting",
"Pre-Register Allocation Live Interval Splitting",
- false, false);
+ false, false)
char &llvm::PreAllocSplittingID = PreAllocSplitting::ID;
@@ -324,7 +332,7 @@ int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
if (CurrSLI->hasAtLeastOneValue())
CurrSValNo = CurrSLI->getValNumInfo(0);
else
- CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
+ CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0,
LSs->getVNInfoAllocator());
return SS;
}
@@ -585,7 +593,7 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
SlotIndex StartIndex = LIs->getMBBStartIdx(MBB);
VNInfo *RetVNI = Phis[MBB] =
- LI->getNextValue(SlotIndex(), /*FIXME*/ 0, false,
+ LI->getNextValue(SlotIndex(), /*FIXME*/ 0,
LIs->getVNInfoAllocator());
if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
@@ -674,7 +682,7 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
DefIdx = DefIdx.getDefIndex();
assert(!DI->isPHI() && "PHI instr in code during pre-alloc splitting.");
- VNInfo* NewVN = LI->getNextValue(DefIdx, 0, true, Alloc);
+ VNInfo* NewVN = LI->getNextValue(DefIdx, 0, Alloc);
// If the def is a move, set the copy field.
if (DI->isCopyLike() && DI->getOperand(0).getReg() == LI->reg)
@@ -807,7 +815,7 @@ bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
MachineBasicBlock& MBB = *RestorePt->getParent();
MachineBasicBlock::iterator KillPt = BarrierMBB->end();
- if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
+ if (!DefMI || DefMI->getParent() == BarrierMBB)
KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB);
else
KillPt = llvm::next(MachineBasicBlock::iterator(DefMI));
@@ -872,7 +880,7 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
if (CurrSLI->hasAtLeastOneValue())
CurrSValNo = CurrSLI->getValNumInfo(0);
else
- CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
+ CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0,
LSs->getVNInfoAllocator());
}
@@ -967,8 +975,7 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
assert(!ValNo->isUnused() && "Val# is defined by a dead def?");
- MachineInstr *DefMI = ValNo->isDefAccurate()
- ? LIs->getInstructionFromIndex(ValNo->def) : NULL;
+ MachineInstr *DefMI = LIs->getInstructionFromIndex(ValNo->def);
// If this would create a new join point, do not split.
if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent())) {
@@ -1005,7 +1012,7 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
SlotIndex SpillIndex;
MachineInstr *SpillMI = NULL;
int SS = -1;
- if (!ValNo->isDefAccurate()) {
+ if (!DefMI) {
// If we don't know where the def is we must split just before the barrier.
if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier,
BarrierMBB, SS, RefsInMBB))) {
@@ -1199,12 +1206,12 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
// We also don't try to handle the results of PHI joins, since there's
// no defining instruction to analyze.
- if (!CurrVN->isDefAccurate() || CurrVN->isUnused()) continue;
+ MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
+ if (!DefMI || CurrVN->isUnused()) continue;
// We're only interested in eliminating cruft introduced by the splitter,
// is of the form load-use or load-use-store. First, check that the
// definition is a load, and remember what stack slot we loaded it from.
- MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
int FrameIndex;
if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
diff --git a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
index b8831db..9cd9941 100644
--- a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -26,8 +26,11 @@
using namespace llvm;
char ProcessImplicitDefs::ID = 0;
-INITIALIZE_PASS(ProcessImplicitDefs, "processimpdefs",
- "Process Implicit Definitions.", false, false);
+INITIALIZE_PASS_BEGIN(ProcessImplicitDefs, "processimpdefs",
+ "Process Implicit Definitions", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveVariables)
+INITIALIZE_PASS_END(ProcessImplicitDefs, "processimpdefs",
+ "Process Implicit Definitions", false, false)
void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index e2802c1..ad7b6e4 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -21,6 +21,7 @@
#define DEBUG_TYPE "pei"
#include "PrologEpilogInserter.h"
+#include "llvm/InlineAsm.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -29,7 +30,7 @@
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
@@ -44,8 +45,12 @@ using namespace llvm;
char PEI::ID = 0;
-INITIALIZE_PASS(PEI, "prologepilog",
- "Prologue/Epilogue Insertion", false, false);
+INITIALIZE_PASS_BEGIN(PEI, "prologepilog",
+ "Prologue/Epilogue Insertion", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(PEI, "prologepilog",
+ "Prologue/Epilogue Insertion", false, false)
STATISTIC(NumVirtualFrameRegs, "Number of virtual frame regs encountered");
STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
@@ -61,6 +66,8 @@ FunctionPass *llvm::createPrologEpilogCodeInserter() { return new PEI(); }
bool PEI::runOnMachineFunction(MachineFunction &Fn) {
const Function* F = Fn.getFunction();
const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
+ const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
+
RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : NULL;
FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn);
@@ -71,7 +78,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
// Allow the target machine to make some adjustments to the function
// e.g. UsedPhysRegs before calculateCalleeSavedRegisters.
- TRI->processFunctionBeforeCalleeSavedScan(Fn, RS);
+ TFI->processFunctionBeforeCalleeSavedScan(Fn, RS);
// Scan the function for modified callee saved registers and insert spill code
// for any callee saved registers that are modified.
@@ -91,7 +98,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
// Allow the target machine to make final modifications to the function
// before the frame layout is finalized.
- TRI->processFunctionBeforeFrameFinalized(Fn);
+ TFI->processFunctionBeforeFrameFinalized(Fn);
// Calculate actual frame offsets for all abstract stack objects...
calculateFrameObjectOffsets(Fn);
@@ -138,6 +145,7 @@ void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
/// pseudo instructions.
void PEI::calculateCallsInformation(MachineFunction &Fn) {
const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
+ const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
MachineFrameInfo *MFI = Fn.getFrameInfo();
unsigned MaxCallFrameSize = 0;
@@ -165,7 +173,8 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) {
FrameSDOps.push_back(I);
} else if (I->isInlineAsm()) {
// Some inline asm's need a stack frame, as indicated by operand 1.
- if (I->getOperand(1).getImm())
+ unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
AdjustsStack = true;
}
@@ -180,7 +189,7 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) {
// the target doesn't indicate otherwise, remove the call frame pseudos
// here. The sub/add sp instruction pairs are still inserted, but we don't
// need to track the SP adjustment for frame index elimination.
- if (RegInfo->canSimplifyCallFramePseudos(Fn))
+ if (TFI->canSimplifyCallFramePseudos(Fn))
RegInfo->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I);
}
}
@@ -190,7 +199,7 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) {
/// registers.
void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
- const TargetFrameInfo *TFI = Fn.getTarget().getFrameInfo();
+ const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
MachineFrameInfo *MFI = Fn.getFrameInfo();
// Get the callee saved register list...
@@ -229,7 +238,7 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
return; // Early exit if no callee saved registers are modified!
unsigned NumFixedSpillSlots;
- const TargetFrameInfo::SpillSlot *FixedSpillSlots =
+ const TargetFrameLowering::SpillSlot *FixedSpillSlots =
TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots);
// Now that we know which registers need to be saved and restored, allocate
@@ -247,7 +256,7 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
// Check to see if this physreg must be spilled to a particular stack slot
// on this target.
- const TargetFrameInfo::SpillSlot *FixedSlot = FixedSpillSlots;
+ const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots;
while (FixedSlot != FixedSpillSlots+NumFixedSpillSlots &&
FixedSlot->Reg != Reg)
++FixedSlot;
@@ -290,13 +299,14 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
return;
const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
+ const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
MachineBasicBlock::iterator I;
if (! ShrinkWrapThisFunction) {
// Spill using target interface.
I = EntryBlock->begin();
- if (!TII.spillCalleeSavedRegisters(*EntryBlock, I, CSI, TRI)) {
+ if (!TFI->spillCalleeSavedRegisters(*EntryBlock, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
// Add the callee-saved register as live-in.
// It's killed at the spill.
@@ -328,7 +338,7 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Restore all registers immediately before the return and any
// terminators that preceed it.
- if (!TII.restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
+ if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
@@ -480,10 +490,10 @@ AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
/// abstract stack objects.
///
void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
- const TargetFrameInfo &TFI = *Fn.getTarget().getFrameInfo();
+ const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
bool StackGrowsDown =
- TFI.getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown;
+ TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
// Loop over all of the stack objects, assigning sequential addresses...
MachineFrameInfo *MFI = Fn.getFrameInfo();
@@ -549,7 +559,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Make sure the special register scavenging spill slot is closest to the
// frame pointer if a frame pointer is required.
const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
- if (RS && RegInfo->hasFP(Fn) && !RegInfo->needsStackRealignment(Fn)) {
+ if (RS && TFI.hasFP(Fn) && !RegInfo->needsStackRealignment(Fn)) {
int SFI = RS->getScavengingFrameIndex();
if (SFI >= 0)
AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
@@ -631,17 +641,17 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Make sure the special register scavenging spill slot is closest to the
// stack pointer.
- if (RS && (!RegInfo->hasFP(Fn) || RegInfo->needsStackRealignment(Fn))) {
+ if (RS && (!TFI.hasFP(Fn) || RegInfo->needsStackRealignment(Fn))) {
int SFI = RS->getScavengingFrameIndex();
if (SFI >= 0)
AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
}
- if (!RegInfo->targetHandlesStackFrameRounding()) {
+ if (!TFI.targetHandlesStackFrameRounding()) {
// If we have reserved argument space for call sites in the function
// immediately on entry to the current function, count it as part of the
// overall stack size.
- if (MFI->adjustsStack() && RegInfo->hasReservedCallFrame(Fn))
+ if (MFI->adjustsStack() && TFI.hasReservedCallFrame(Fn))
Offset += MFI->getMaxCallFrameSize();
// Round up the size to a multiple of the alignment. If the function has
@@ -672,16 +682,16 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
/// prolog and epilog code to the function.
///
void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
- const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
+ const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
// Add prologue to the function...
- TRI->emitPrologue(Fn);
+ TFI.emitPrologue(Fn);
// Add epilogue to restore the callee-save registers in each exiting block
for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
// If last instruction is a return instruction, add an epilogue
if (!I->empty() && I->back().getDesc().isReturn())
- TRI->emitEpilogue(Fn, *I);
+ TFI.emitEpilogue(Fn, *I);
}
}
@@ -694,9 +704,9 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
const TargetMachine &TM = Fn.getTarget();
assert(TM.getRegisterInfo() && "TM::getRegisterInfo() must be implemented!");
const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
- const TargetFrameInfo *TFI = TM.getFrameInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
bool StackGrowsDown =
- TFI->getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown;
+ TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
int FrameSetupOpcode = TRI.getCallFrameSetupOpcode();
int FrameDestroyOpcode = TRI.getCallFrameDestroyOpcode();
@@ -755,8 +765,8 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
// If this instruction has a FrameIndex operand, we need to
// use that target machine register info object to eliminate
// it.
- TRI.eliminateFrameIndex(MI, SPAdj,
- FrameIndexVirtualScavenging ? NULL : RS);
+ TRI.eliminateFrameIndex(MI, SPAdj,
+ FrameIndexVirtualScavenging ? NULL : RS);
// Reset the iterator if we were at the beginning of the BB.
if (AtBeginning) {
@@ -825,7 +835,7 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
ScratchReg = RS->scavengeRegister(RC, I, SPAdj);
++NumScavengedRegs;
}
- // replace this reference to the virtual register with the
+ // Replace this reference to the virtual register with the
// scratch register.
assert (ScratchReg && "Missing scratch register!");
MI->getOperand(i).setReg(ScratchReg);
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h
index d575124..e239159 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h
@@ -36,7 +36,9 @@ namespace llvm {
class PEI : public MachineFunctionPass {
public:
static char ID;
- PEI() : MachineFunctionPass(ID) {}
+ PEI() : MachineFunctionPass(ID) {
+ initializePEIPass(*PassRegistry::getPassRegistry());
+ }
const char *getPassName() const {
return "Prolog/Epilog Insertion & Frame Finalization";
diff --git a/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp b/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp
index 5e86e5a..73b66d8 100644
--- a/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp
+++ b/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp
@@ -18,7 +18,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include <map>
using namespace llvm;
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBase.h b/contrib/llvm/lib/CodeGen/RegAllocBase.h
new file mode 100644
index 0000000..8c7e5f5
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/RegAllocBase.h
@@ -0,0 +1,181 @@
+//===-- RegAllocBase.h - basic regalloc interface and driver --*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RegAllocBase class, which is the skeleton of a basic
+// register allocation algorithm and interface for extending it. It provides the
+// building blocks on which to construct other experimental allocators and test
+// the validity of two principles:
+//
+// - If virtual and physical register liveness is modeled using intervals, then
+// on-the-fly interference checking is cheap. Furthermore, interferences can be
+// lazily cached and reused.
+//
+// - Register allocation complexity, and generated code performance is
+// determined by the effectiveness of live range splitting rather than optimal
+// coloring.
+//
+// Following the first principle, interfering checking revolves around the
+// LiveIntervalUnion data structure.
+//
+// To fulfill the second principle, the basic allocator provides a driver for
+// incremental splitting. It essentially punts on the problem of register
+// coloring, instead driving the assignment of virtual to physical registers by
+// the cost of splitting. The basic allocator allows for heuristic reassignment
+// of registers, if a more sophisticated allocator chooses to do that.
+//
+// This framework provides a way to engineer the compile time vs. code
+// quality trade-off without relying on a particular theoretical solver.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGALLOCBASE
+#define LLVM_CODEGEN_REGALLOCBASE
+
+#include "llvm/ADT/OwningPtr.h"
+#include "LiveIntervalUnion.h"
+#include <queue>
+
+namespace llvm {
+
+template<typename T> class SmallVectorImpl;
+class TargetRegisterInfo;
+class VirtRegMap;
+class LiveIntervals;
+class Spiller;
+
+// Forward declare a priority queue of live virtual registers. If an
+// implementation needs to prioritize by anything other than spill weight, then
+// this will become an abstract base class with virtual calls to push/get.
+class LiveVirtRegQueue;
+
+/// RegAllocBase provides the register allocation driver and interface that can
+/// be extended to add interesting heuristics.
+///
+/// Register allocators must override the selectOrSplit() method to implement
+/// live range splitting. They may also override getPriority() which otherwise
+/// defaults to the spill weight computed by CalculateSpillWeights.
+class RegAllocBase {
+ LiveIntervalUnion::Allocator UnionAllocator;
+protected:
+ // Array of LiveIntervalUnions indexed by physical register.
+ class LiveUnionArray {
+ unsigned NumRegs;
+ LiveIntervalUnion *Array;
+ public:
+ LiveUnionArray(): NumRegs(0), Array(0) {}
+ ~LiveUnionArray() { clear(); }
+
+ unsigned numRegs() const { return NumRegs; }
+
+ void init(LiveIntervalUnion::Allocator &, unsigned NRegs);
+
+ void clear();
+
+ LiveIntervalUnion& operator[](unsigned PhysReg) {
+ assert(PhysReg < NumRegs && "physReg out of bounds");
+ return Array[PhysReg];
+ }
+ };
+
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+ VirtRegMap *VRM;
+ LiveIntervals *LIS;
+ LiveUnionArray PhysReg2LiveUnion;
+
+ // Current queries, one per physreg. They must be reinitialized each time we
+ // query on a new live virtual register.
+ OwningArrayPtr<LiveIntervalUnion::Query> Queries;
+
+ RegAllocBase(): TRI(0), MRI(0), VRM(0), LIS(0) {}
+
+ virtual ~RegAllocBase() {}
+
+ // A RegAlloc pass should call this before allocatePhysRegs.
+ void init(VirtRegMap &vrm, LiveIntervals &lis);
+
+ // Get an initialized query to check interferences between lvr and preg. Note
+ // that Query::init must be called at least once for each physical register
+ // before querying a new live virtual register. This ties Queries and
+ // PhysReg2LiveUnion together.
+ LiveIntervalUnion::Query &query(LiveInterval &VirtReg, unsigned PhysReg) {
+ Queries[PhysReg].init(&VirtReg, &PhysReg2LiveUnion[PhysReg]);
+ return Queries[PhysReg];
+ }
+
+ // The top-level driver. The output is a VirtRegMap that us updated with
+ // physical register assignments.
+ //
+ // If an implementation wants to override the LiveInterval comparator, we
+ // should modify this interface to allow passing in an instance derived from
+ // LiveVirtRegQueue.
+ void allocatePhysRegs();
+
+ // Get a temporary reference to a Spiller instance.
+ virtual Spiller &spiller() = 0;
+
+ // getPriority - Calculate the allocation priority for VirtReg.
+ // Virtual registers with higher priorities are allocated first.
+ virtual float getPriority(LiveInterval *LI) = 0;
+
+ // A RegAlloc pass should override this to provide the allocation heuristics.
+ // Each call must guarantee forward progess by returning an available PhysReg
+ // or new set of split live virtual registers. It is up to the splitter to
+ // converge quickly toward fully spilled live ranges.
+ virtual unsigned selectOrSplit(LiveInterval &VirtReg,
+ SmallVectorImpl<LiveInterval*> &splitLVRs) = 0;
+
+ // A RegAlloc pass should call this when PassManager releases its memory.
+ virtual void releaseMemory();
+
+ // Helper for checking interference between a live virtual register and a
+ // physical register, including all its register aliases. If an interference
+ // exists, return the interfering register, which may be preg or an alias.
+ unsigned checkPhysRegInterference(LiveInterval& VirtReg, unsigned PhysReg);
+
+ /// assign - Assign VirtReg to PhysReg.
+ /// This should not be called from selectOrSplit for the current register.
+ void assign(LiveInterval &VirtReg, unsigned PhysReg);
+
+ /// unassign - Undo a previous assignment of VirtReg to PhysReg.
+ /// This can be invoked from selectOrSplit, but be careful to guarantee that
+ /// allocation is making progress.
+ void unassign(LiveInterval &VirtReg, unsigned PhysReg);
+
+ // Helper for spilling all live virtual registers currently unified under preg
+ // that interfere with the most recently queried lvr. Return true if spilling
+ // was successful, and append any new spilled/split intervals to splitLVRs.
+ bool spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
+ SmallVectorImpl<LiveInterval*> &SplitVRegs);
+
+ /// addMBBLiveIns - Add physreg liveins to basic blocks.
+ void addMBBLiveIns(MachineFunction *);
+
+#ifndef NDEBUG
+ // Verify each LiveIntervalUnion.
+ void verify();
+#endif
+
+ // Use this group name for NamedRegionTimer.
+ static const char *TimerGroupName;
+
+public:
+ /// VerifyEnabled - True when -verify-regalloc is given.
+ static bool VerifyEnabled;
+
+private:
+ void seedLiveVirtRegs(std::priority_queue<std::pair<float, unsigned> >&);
+
+ void spillReg(LiveInterval &VirtReg, unsigned PhysReg,
+ SmallVectorImpl<LiveInterval*> &SplitVRegs);
+};
+
+} // end namespace llvm
+
+#endif // !defined(LLVM_CODEGEN_REGALLOCBASE)
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
new file mode 100644
index 0000000..045c8db
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
@@ -0,0 +1,523 @@
+//===-- RegAllocBasic.cpp - basic register allocator ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RABasic function pass, which provides a minimal
+// implementation of the basic register allocator.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "regalloc"
+#include "LiveIntervalUnion.h"
+#include "RegAllocBase.h"
+#include "RenderMachineFunction.h"
+#include "Spiller.h"
+#include "VirtRegMap.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Function.h"
+#include "llvm/PassAnalysisSupport.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/RegisterCoalescer.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#ifndef NDEBUG
+#include "llvm/ADT/SparseBitVector.h"
+#endif
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Timer.h"
+
+#include <cstdlib>
+
+using namespace llvm;
+
+STATISTIC(NumAssigned , "Number of registers assigned");
+STATISTIC(NumUnassigned , "Number of registers unassigned");
+STATISTIC(NumNewQueued , "Number of new live ranges queued");
+
+static RegisterRegAlloc basicRegAlloc("basic", "basic register allocator",
+ createBasicRegisterAllocator);
+
+// Temporary verification option until we can put verification inside
+// MachineVerifier.
+static cl::opt<bool, true>
+VerifyRegAlloc("verify-regalloc", cl::location(RegAllocBase::VerifyEnabled),
+ cl::desc("Verify during register allocation"));
+
+const char *RegAllocBase::TimerGroupName = "Register Allocation";
+bool RegAllocBase::VerifyEnabled = false;
+
+namespace {
+/// RABasic provides a minimal implementation of the basic register allocation
+/// algorithm. It prioritizes live virtual registers by spill weight and spills
+/// whenever a register is unavailable. This is not practical in production but
+/// provides a useful baseline both for measuring other allocators and comparing
+/// the speed of the basic algorithm against other styles of allocators.
+class RABasic : public MachineFunctionPass, public RegAllocBase
+{
+ // context
+ MachineFunction *MF;
+ BitVector ReservedRegs;
+
+ // analyses
+ LiveStacks *LS;
+ RenderMachineFunction *RMF;
+
+ // state
+ std::auto_ptr<Spiller> SpillerInstance;
+
+public:
+ RABasic();
+
+ /// Return the pass name.
+ virtual const char* getPassName() const {
+ return "Basic Register Allocator";
+ }
+
+ /// RABasic analysis usage.
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ virtual void releaseMemory();
+
+ virtual Spiller &spiller() { return *SpillerInstance; }
+
+ virtual float getPriority(LiveInterval *LI) { return LI->weight; }
+
+ virtual unsigned selectOrSplit(LiveInterval &VirtReg,
+ SmallVectorImpl<LiveInterval*> &SplitVRegs);
+
+ /// Perform register allocation.
+ virtual bool runOnMachineFunction(MachineFunction &mf);
+
+ static char ID;
+};
+
+char RABasic::ID = 0;
+
+} // end anonymous namespace
+
+RABasic::RABasic(): MachineFunctionPass(ID) {
+ initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
+ initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
+ initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
+ initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
+ initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
+ initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+ initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
+ initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+ initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
+ initializeRenderMachineFunctionPass(*PassRegistry::getPassRegistry());
+}
+
+void RABasic::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<AliasAnalysis>();
+ AU.addPreserved<AliasAnalysis>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<SlotIndexes>();
+ if (StrongPHIElim)
+ AU.addRequiredID(StrongPHIEliminationID);
+ AU.addRequiredTransitive<RegisterCoalescer>();
+ AU.addRequired<CalculateSpillWeights>();
+ AU.addRequired<LiveStacks>();
+ AU.addPreserved<LiveStacks>();
+ AU.addRequiredID(MachineDominatorsID);
+ AU.addPreservedID(MachineDominatorsID);
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ AU.addRequired<VirtRegMap>();
+ AU.addPreserved<VirtRegMap>();
+ DEBUG(AU.addRequired<RenderMachineFunction>());
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+void RABasic::releaseMemory() {
+ SpillerInstance.reset(0);
+ RegAllocBase::releaseMemory();
+}
+
+#ifndef NDEBUG
+// Verify each LiveIntervalUnion.
+void RegAllocBase::verify() {
+ LiveVirtRegBitSet VisitedVRegs;
+ OwningArrayPtr<LiveVirtRegBitSet>
+ unionVRegs(new LiveVirtRegBitSet[PhysReg2LiveUnion.numRegs()]);
+
+ // Verify disjoint unions.
+ for (unsigned PhysReg = 0; PhysReg < PhysReg2LiveUnion.numRegs(); ++PhysReg) {
+ DEBUG(PhysReg2LiveUnion[PhysReg].print(dbgs(), TRI));
+ LiveVirtRegBitSet &VRegs = unionVRegs[PhysReg];
+ PhysReg2LiveUnion[PhysReg].verify(VRegs);
+ // Union + intersection test could be done efficiently in one pass, but
+ // don't add a method to SparseBitVector unless we really need it.
+ assert(!VisitedVRegs.intersects(VRegs) && "vreg in multiple unions");
+ VisitedVRegs |= VRegs;
+ }
+
+ // Verify vreg coverage.
+ for (LiveIntervals::iterator liItr = LIS->begin(), liEnd = LIS->end();
+ liItr != liEnd; ++liItr) {
+ unsigned reg = liItr->first;
+ if (TargetRegisterInfo::isPhysicalRegister(reg)) continue;
+ if (!VRM->hasPhys(reg)) continue; // spilled?
+ unsigned PhysReg = VRM->getPhys(reg);
+ if (!unionVRegs[PhysReg].test(reg)) {
+ dbgs() << "LiveVirtReg " << reg << " not in union " <<
+ TRI->getName(PhysReg) << "\n";
+ llvm_unreachable("unallocated live vreg");
+ }
+ }
+ // FIXME: I'm not sure how to verify spilled intervals.
+}
+#endif //!NDEBUG
+
+//===----------------------------------------------------------------------===//
+// RegAllocBase Implementation
+//===----------------------------------------------------------------------===//
+
+// Instantiate a LiveIntervalUnion for each physical register.
+void RegAllocBase::LiveUnionArray::init(LiveIntervalUnion::Allocator &allocator,
+ unsigned NRegs) {
+ NumRegs = NRegs;
+ Array =
+ static_cast<LiveIntervalUnion*>(malloc(sizeof(LiveIntervalUnion)*NRegs));
+ for (unsigned r = 0; r != NRegs; ++r)
+ new(Array + r) LiveIntervalUnion(r, allocator);
+}
+
+void RegAllocBase::init(VirtRegMap &vrm, LiveIntervals &lis) {
+ NamedRegionTimer T("Initialize", TimerGroupName, TimePassesIsEnabled);
+ TRI = &vrm.getTargetRegInfo();
+ MRI = &vrm.getRegInfo();
+ VRM = &vrm;
+ LIS = &lis;
+ PhysReg2LiveUnion.init(UnionAllocator, TRI->getNumRegs());
+ // Cache an interferece query for each physical reg
+ Queries.reset(new LiveIntervalUnion::Query[PhysReg2LiveUnion.numRegs()]);
+}
+
+void RegAllocBase::LiveUnionArray::clear() {
+ if (!Array)
+ return;
+ for (unsigned r = 0; r != NumRegs; ++r)
+ Array[r].~LiveIntervalUnion();
+ free(Array);
+ NumRegs = 0;
+ Array = 0;
+}
+
+void RegAllocBase::releaseMemory() {
+ PhysReg2LiveUnion.clear();
+}
+
+// Visit all the live virtual registers. If they are already assigned to a
+// physical register, unify them with the corresponding LiveIntervalUnion,
+// otherwise push them on the priority queue for later assignment.
+void RegAllocBase::
+seedLiveVirtRegs(std::priority_queue<std::pair<float, unsigned> > &VirtRegQ) {
+ for (LiveIntervals::iterator I = LIS->begin(), E = LIS->end(); I != E; ++I) {
+ unsigned RegNum = I->first;
+ LiveInterval &VirtReg = *I->second;
+ if (TargetRegisterInfo::isPhysicalRegister(RegNum))
+ PhysReg2LiveUnion[RegNum].unify(VirtReg);
+ else
+ VirtRegQ.push(std::make_pair(getPriority(&VirtReg), RegNum));
+ }
+}
+
+void RegAllocBase::assign(LiveInterval &VirtReg, unsigned PhysReg) {
+ DEBUG(dbgs() << "assigning " << PrintReg(VirtReg.reg, TRI)
+ << " to " << PrintReg(PhysReg, TRI) << '\n');
+ assert(!VRM->hasPhys(VirtReg.reg) && "Duplicate VirtReg assignment");
+ VRM->assignVirt2Phys(VirtReg.reg, PhysReg);
+ PhysReg2LiveUnion[PhysReg].unify(VirtReg);
+ ++NumAssigned;
+}
+
+void RegAllocBase::unassign(LiveInterval &VirtReg, unsigned PhysReg) {
+ DEBUG(dbgs() << "unassigning " << PrintReg(VirtReg.reg, TRI)
+ << " from " << PrintReg(PhysReg, TRI) << '\n');
+ assert(VRM->getPhys(VirtReg.reg) == PhysReg && "Inconsistent unassign");
+ PhysReg2LiveUnion[PhysReg].extract(VirtReg);
+ VRM->clearVirt(VirtReg.reg);
+ ++NumUnassigned;
+}
+
+// Top-level driver to manage the queue of unassigned VirtRegs and call the
+// selectOrSplit implementation.
+void RegAllocBase::allocatePhysRegs() {
+
+ // Push each vreg onto a queue or "precolor" by adding it to a physreg union.
+ std::priority_queue<std::pair<float, unsigned> > VirtRegQ;
+ seedLiveVirtRegs(VirtRegQ);
+
+ // Continue assigning vregs one at a time to available physical registers.
+ while (!VirtRegQ.empty()) {
+ // Pop the highest priority vreg.
+ LiveInterval &VirtReg = LIS->getInterval(VirtRegQ.top().second);
+ VirtRegQ.pop();
+
+ // selectOrSplit requests the allocator to return an available physical
+ // register if possible and populate a list of new live intervals that
+ // result from splitting.
+ DEBUG(dbgs() << "\nselectOrSplit " << MRI->getRegClass(VirtReg.reg)->getName()
+ << ':' << VirtReg << '\n');
+ typedef SmallVector<LiveInterval*, 4> VirtRegVec;
+ VirtRegVec SplitVRegs;
+ unsigned AvailablePhysReg = selectOrSplit(VirtReg, SplitVRegs);
+
+ if (AvailablePhysReg)
+ assign(VirtReg, AvailablePhysReg);
+
+ for (VirtRegVec::iterator I = SplitVRegs.begin(), E = SplitVRegs.end();
+ I != E; ++I) {
+ LiveInterval* SplitVirtReg = *I;
+ if (SplitVirtReg->empty()) continue;
+ DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n");
+ assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) &&
+ "expect split value in virtual register");
+ VirtRegQ.push(std::make_pair(getPriority(SplitVirtReg),
+ SplitVirtReg->reg));
+ ++NumNewQueued;
+ }
+ }
+}
+
+// Check if this live virtual register interferes with a physical register. If
+// not, then check for interference on each register that aliases with the
+// physical register. Return the interfering register.
+unsigned RegAllocBase::checkPhysRegInterference(LiveInterval &VirtReg,
+ unsigned PhysReg) {
+ for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI)
+ if (query(VirtReg, *AliasI).checkInterference())
+ return *AliasI;
+ return 0;
+}
+
+// Helper for spillInteferences() that spills all interfering vregs currently
+// assigned to this physical register.
+void RegAllocBase::spillReg(LiveInterval& VirtReg, unsigned PhysReg,
+ SmallVectorImpl<LiveInterval*> &SplitVRegs) {
+ LiveIntervalUnion::Query &Q = query(VirtReg, PhysReg);
+ assert(Q.seenAllInterferences() && "need collectInterferences()");
+ const SmallVectorImpl<LiveInterval*> &PendingSpills = Q.interferingVRegs();
+
+ for (SmallVectorImpl<LiveInterval*>::const_iterator I = PendingSpills.begin(),
+ E = PendingSpills.end(); I != E; ++I) {
+ LiveInterval &SpilledVReg = **I;
+ DEBUG(dbgs() << "extracting from " <<
+ TRI->getName(PhysReg) << " " << SpilledVReg << '\n');
+
+ // Deallocate the interfering vreg by removing it from the union.
+ // A LiveInterval instance may not be in a union during modification!
+ unassign(SpilledVReg, PhysReg);
+
+ // Spill the extracted interval.
+ spiller().spill(&SpilledVReg, SplitVRegs, PendingSpills);
+ }
+ // After extracting segments, the query's results are invalid. But keep the
+ // contents valid until we're done accessing pendingSpills.
+ Q.clear();
+}
+
+// Spill or split all live virtual registers currently unified under PhysReg
+// that interfere with VirtReg. The newly spilled or split live intervals are
+// returned by appending them to SplitVRegs.
+bool
+RegAllocBase::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
+ SmallVectorImpl<LiveInterval*> &SplitVRegs) {
+ // Record each interference and determine if all are spillable before mutating
+ // either the union or live intervals.
+ unsigned NumInterferences = 0;
+ // Collect interferences assigned to any alias of the physical register.
+ for (const unsigned *asI = TRI->getOverlaps(PhysReg); *asI; ++asI) {
+ LiveIntervalUnion::Query &QAlias = query(VirtReg, *asI);
+ NumInterferences += QAlias.collectInterferingVRegs();
+ if (QAlias.seenUnspillableVReg()) {
+ return false;
+ }
+ }
+ DEBUG(dbgs() << "spilling " << TRI->getName(PhysReg) <<
+ " interferences with " << VirtReg << "\n");
+ assert(NumInterferences > 0 && "expect interference");
+
+ // Spill each interfering vreg allocated to PhysReg or an alias.
+ for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI)
+ spillReg(VirtReg, *AliasI, SplitVRegs);
+ return true;
+}
+
+// Add newly allocated physical registers to the MBB live in sets.
+void RegAllocBase::addMBBLiveIns(MachineFunction *MF) {
+ NamedRegionTimer T("MBB Live Ins", TimerGroupName, TimePassesIsEnabled);
+ typedef SmallVector<MachineBasicBlock*, 8> MBBVec;
+ MBBVec liveInMBBs;
+ MachineBasicBlock &entryMBB = *MF->begin();
+
+ for (unsigned PhysReg = 0; PhysReg < PhysReg2LiveUnion.numRegs(); ++PhysReg) {
+ LiveIntervalUnion &LiveUnion = PhysReg2LiveUnion[PhysReg];
+ if (LiveUnion.empty())
+ continue;
+ for (LiveIntervalUnion::SegmentIter SI = LiveUnion.begin(); SI.valid();
+ ++SI) {
+
+ // Find the set of basic blocks which this range is live into...
+ liveInMBBs.clear();
+ if (!LIS->findLiveInMBBs(SI.start(), SI.stop(), liveInMBBs)) continue;
+
+ // And add the physreg for this interval to their live-in sets.
+ for (MBBVec::iterator I = liveInMBBs.begin(), E = liveInMBBs.end();
+ I != E; ++I) {
+ MachineBasicBlock *MBB = *I;
+ if (MBB == &entryMBB) continue;
+ if (MBB->isLiveIn(PhysReg)) continue;
+ MBB->addLiveIn(PhysReg);
+ }
+ }
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// RABasic Implementation
+//===----------------------------------------------------------------------===//
+
+// Driver for the register assignment and splitting heuristics.
+// Manages iteration over the LiveIntervalUnions.
+//
+// This is a minimal implementation of register assignment and splitting that
+// spills whenever we run out of registers.
+//
+// selectOrSplit can only be called once per live virtual register. We then do a
+// single interference test for each register the correct class until we find an
+// available register. So, the number of interference tests in the worst case is
+// |vregs| * |machineregs|. And since the number of interference tests is
+// minimal, there is no value in caching them outside the scope of
+// selectOrSplit().
+unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
+ SmallVectorImpl<LiveInterval*> &SplitVRegs) {
+ // Populate a list of physical register spill candidates.
+ SmallVector<unsigned, 8> PhysRegSpillCands;
+
+ // Check for an available register in this class.
+ const TargetRegisterClass *TRC = MRI->getRegClass(VirtReg.reg);
+
+ for (TargetRegisterClass::iterator I = TRC->allocation_order_begin(*MF),
+ E = TRC->allocation_order_end(*MF);
+ I != E; ++I) {
+
+ unsigned PhysReg = *I;
+ if (ReservedRegs.test(PhysReg)) continue;
+
+ // Check interference and as a side effect, intialize queries for this
+ // VirtReg and its aliases.
+ unsigned interfReg = checkPhysRegInterference(VirtReg, PhysReg);
+ if (interfReg == 0) {
+ // Found an available register.
+ return PhysReg;
+ }
+ LiveInterval *interferingVirtReg =
+ Queries[interfReg].firstInterference().liveUnionPos().value();
+
+ // The current VirtReg must either be spillable, or one of its interferences
+ // must have less spill weight.
+ if (interferingVirtReg->weight < VirtReg.weight ) {
+ PhysRegSpillCands.push_back(PhysReg);
+ }
+ }
+ // Try to spill another interfering reg with less spill weight.
+ for (SmallVectorImpl<unsigned>::iterator PhysRegI = PhysRegSpillCands.begin(),
+ PhysRegE = PhysRegSpillCands.end(); PhysRegI != PhysRegE; ++PhysRegI) {
+
+ if (!spillInterferences(VirtReg, *PhysRegI, SplitVRegs)) continue;
+
+ assert(checkPhysRegInterference(VirtReg, *PhysRegI) == 0 &&
+ "Interference after spill.");
+ // Tell the caller to allocate to this newly freed physical register.
+ return *PhysRegI;
+ }
+ // No other spill candidates were found, so spill the current VirtReg.
+ DEBUG(dbgs() << "spilling: " << VirtReg << '\n');
+ SmallVector<LiveInterval*, 1> pendingSpills;
+
+ spiller().spill(&VirtReg, SplitVRegs, pendingSpills);
+
+ // The live virtual register requesting allocation was spilled, so tell
+ // the caller not to allocate anything during this round.
+ return 0;
+}
+
+bool RABasic::runOnMachineFunction(MachineFunction &mf) {
+ DEBUG(dbgs() << "********** BASIC REGISTER ALLOCATION **********\n"
+ << "********** Function: "
+ << ((Value*)mf.getFunction())->getName() << '\n');
+
+ MF = &mf;
+ DEBUG(RMF = &getAnalysis<RenderMachineFunction>());
+
+ RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
+
+ ReservedRegs = TRI->getReservedRegs(*MF);
+
+ SpillerInstance.reset(createSpiller(*this, *MF, *VRM));
+
+ allocatePhysRegs();
+
+ addMBBLiveIns(MF);
+
+ // Diagnostic output before rewriting
+ DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *VRM << "\n");
+
+ // optional HTML output
+ DEBUG(RMF->renderMachineFunction("After basic register allocation.", VRM));
+
+ // FIXME: Verification currently must run before VirtRegRewriter. We should
+ // make the rewriter a separate pass and override verifyAnalysis instead. When
+ // that happens, verification naturally falls under VerifyMachineCode.
+#ifndef NDEBUG
+ if (VerifyEnabled) {
+ // Verify accuracy of LiveIntervals. The standard machine code verifier
+ // ensures that each LiveIntervals covers all uses of the virtual reg.
+
+ // FIXME: MachineVerifier is badly broken when using the standard
+ // spiller. Always use -spiller=inline with -verify-regalloc. Even with the
+ // inline spiller, some tests fail to verify because the coalescer does not
+ // always generate verifiable code.
+ MF->verify(this, "In RABasic::verify");
+
+ // Verify that LiveIntervals are partitioned into unions and disjoint within
+ // the unions.
+ verify();
+ }
+#endif // !NDEBUG
+
+ // Run rewriter
+ VRM->rewrite(LIS->getSlotIndexes());
+
+ // The pass output is in VirtRegMap. Release all the transient data.
+ releaseMemory();
+
+ return true;
+}
+
+FunctionPass* llvm::createBasicRegisterAllocator()
+{
+ return new RABasic();
+}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
index fc150d5..15036e3 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -48,7 +48,10 @@ namespace {
public:
static char ID;
RAFast() : MachineFunctionPass(ID), StackSlotForVirtReg(-1),
- isBulkSpilling(false) {}
+ isBulkSpilling(false) {
+ initializePHIEliminationPass(*PassRegistry::getPassRegistry());
+ initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry());
+ }
private:
const TargetMachine *TM;
MachineFunction *MF;
@@ -259,8 +262,8 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
// instruction, not on the spill.
bool SpillKill = LR.LastUse != MI;
LR.Dirty = false;
- DEBUG(dbgs() << "Spilling %reg" << LRI->first
- << " in " << TRI->getName(LR.PhysReg));
+ DEBUG(dbgs() << "Spilling " << PrintReg(LRI->first, TRI)
+ << " in " << PrintReg(LR.PhysReg, TRI));
const TargetRegisterClass *RC = MRI->getRegClass(LRI->first);
int FI = getStackSpaceFor(LRI->first, RC);
DEBUG(dbgs() << " to stack slot #" << FI << "\n");
@@ -331,7 +334,7 @@ void RAFast::usePhysReg(MachineOperand &MO) {
MO.setIsKill();
return;
default:
- // The physreg was allocated to a virtual register. That means to value we
+ // The physreg was allocated to a virtual register. That means the value we
// wanted has been clobbered.
llvm_unreachable("Instruction uses an allocated register");
}
@@ -458,8 +461,8 @@ unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
/// register must not be used for anything else when this is called.
///
void RAFast::assignVirtToPhysReg(LiveRegEntry &LRE, unsigned PhysReg) {
- DEBUG(dbgs() << "Assigning %reg" << LRE.first << " to "
- << TRI->getName(PhysReg) << "\n");
+ DEBUG(dbgs() << "Assigning " << PrintReg(LRE.first, TRI) << " to "
+ << PrintReg(PhysReg, TRI) << "\n");
PhysRegState[PhysReg] = LRE.first;
assert(!LRE.second.PhysReg && "Already assigned a physreg");
LRE.second.PhysReg = PhysReg;
@@ -503,8 +506,8 @@ void RAFast::allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint) {
return assignVirtToPhysReg(LRE, PhysReg);
}
- DEBUG(dbgs() << "Allocating %reg" << VirtReg << " from " << RC->getName()
- << "\n");
+ DEBUG(dbgs() << "Allocating " << PrintReg(VirtReg) << " from "
+ << RC->getName() << "\n");
unsigned BestReg = 0, BestCost = spillImpossible;
for (TargetRegisterClass::iterator I = AOB; I != AOE; ++I) {
@@ -584,8 +587,8 @@ RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
allocVirtReg(MI, *LRI, Hint);
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
int FrameIndex = getStackSpaceFor(VirtReg, RC);
- DEBUG(dbgs() << "Reloading %reg" << VirtReg << " into "
- << TRI->getName(LR.PhysReg) << "\n");
+ DEBUG(dbgs() << "Reloading " << PrintReg(VirtReg, TRI) << " into "
+ << PrintReg(LR.PhysReg, TRI) << "\n");
TII->loadRegFromStackSlot(*MBB, MI, LR.PhysReg, FrameIndex, RC, TRI);
++NumLoads;
} else if (LR.Dirty) {
@@ -653,11 +656,12 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
- if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
if (MO.isEarlyClobber() || MI->isRegTiedToDefOperand(i) ||
(MO.getSubReg() && MI->readsVirtualRegister(Reg))) {
if (ThroughRegs.insert(Reg))
- DEBUG(dbgs() << " %reg" << Reg);
+ DEBUG(dbgs() << ' ' << PrintReg(Reg));
}
}
@@ -685,7 +689,7 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
- if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (MO.isUse()) {
unsigned DefIdx = 0;
if (!MI->isRegTiedToDefOperand(i, &DefIdx)) continue;
@@ -731,6 +735,27 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
void RAFast::AllocateBasicBlock() {
DEBUG(dbgs() << "\nAllocating " << *MBB);
+ // FIXME: This should probably be added by instruction selection instead?
+ // If the last instruction in the block is a return, make sure to mark it as
+ // using all of the live-out values in the function. Things marked both call
+ // and return are tail calls; do not do this for them. The tail callee need
+ // not take the same registers as input that it produces as output, and there
+ // are dependencies for its input registers elsewhere.
+ if (!MBB->empty() && MBB->back().getDesc().isReturn() &&
+ !MBB->back().getDesc().isCall()) {
+ MachineInstr *Ret = &MBB->back();
+
+ for (MachineRegisterInfo::liveout_iterator
+ I = MF->getRegInfo().liveout_begin(),
+ E = MF->getRegInfo().liveout_end(); I != E; ++I) {
+ assert(TargetRegisterInfo::isPhysicalRegister(*I) &&
+ "Cannot have a live-out virtual register.");
+
+ // Add live-out registers as implicit uses.
+ Ret->addRegisterKilled(*I, TRI, true);
+ }
+ }
+
PhysRegState.assign(TRI->getNumRegs(), regDisabled);
assert(LiveVirtRegs.empty() && "Mapping not cleared form last block?");
@@ -761,7 +786,7 @@ void RAFast::AllocateBasicBlock() {
dbgs() << "*";
break;
default:
- dbgs() << "=%reg" << PhysRegState[Reg];
+ dbgs() << '=' << PrintReg(PhysRegState[Reg]);
if (LiveVirtRegs[PhysRegState[Reg]].Dirty)
dbgs() << "*";
assert(LiveVirtRegs[PhysRegState[Reg]].PhysReg == Reg &&
@@ -791,16 +816,18 @@ void RAFast::AllocateBasicBlock() {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
- if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
LiveDbgValueMap[Reg] = MI;
LiveRegMap::iterator LRI = LiveVirtRegs.find(Reg);
if (LRI != LiveVirtRegs.end())
setPhysReg(MI, i, LRI->second.PhysReg);
else {
int SS = StackSlotForVirtReg[Reg];
- if (SS == -1)
+ if (SS == -1) {
// We can't allocate a physreg for a DebugValue, sorry!
+ DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
MO.setReg(0);
+ }
else {
// Modify DBG_VALUE now that the value is in a spill slot.
int64_t Offset = MI->getOperand(1).getImm();
@@ -817,9 +844,11 @@ void RAFast::AllocateBasicBlock() {
MI = NewDV;
ScanDbgValue = true;
break;
- } else
+ } else {
// We can't allocate a physreg for a DebugValue; sorry!
+ DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
MO.setReg(0);
+ }
}
}
}
@@ -902,7 +931,7 @@ void RAFast::AllocateBasicBlock() {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
- if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (MO.isUse()) {
LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, CopyDst);
unsigned PhysReg = LRI->second.PhysReg;
@@ -1017,8 +1046,7 @@ bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
// initialize the virtual->physical register map to have a 'null'
// mapping for all virtual registers
- unsigned LastVirtReg = MRI->getLastVirtReg();
- StackSlotForVirtReg.grow(LastVirtReg);
+ StackSlotForVirtReg.resize(MRI->getNumVirtRegs());
// Loop over all of the basic blocks, eliminating virtual register references
for (MachineFunction::iterator MBBi = Fn.begin(), MBBe = Fn.end();
diff --git a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
new file mode 100644
index 0000000..c1372cd
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -0,0 +1,1285 @@
+//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RAGreedy function pass for register allocation in
+// optimized builds.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "regalloc"
+#include "AllocationOrder.h"
+#include "LiveIntervalUnion.h"
+#include "LiveRangeEdit.h"
+#include "RegAllocBase.h"
+#include "Spiller.h"
+#include "SpillPlacement.h"
+#include "SplitKit.h"
+#include "VirtRegMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Function.h"
+#include "llvm/PassAnalysisSupport.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
+#include "llvm/CodeGen/EdgeBundles.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineLoopRanges.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/RegisterCoalescer.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Timer.h"
+
+using namespace llvm;
+
+STATISTIC(NumGlobalSplits, "Number of split global live ranges");
+STATISTIC(NumLocalSplits, "Number of split local live ranges");
+STATISTIC(NumReassigned, "Number of interferences reassigned");
+STATISTIC(NumEvicted, "Number of interferences evicted");
+
+static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
+ createGreedyRegisterAllocator);
+
+namespace {
+class RAGreedy : public MachineFunctionPass, public RegAllocBase {
+ // context
+ MachineFunction *MF;
+ BitVector ReservedRegs;
+
+ // analyses
+ SlotIndexes *Indexes;
+ LiveStacks *LS;
+ MachineDominatorTree *DomTree;
+ MachineLoopInfo *Loops;
+ MachineLoopRanges *LoopRanges;
+ EdgeBundles *Bundles;
+ SpillPlacement *SpillPlacer;
+
+ // state
+ std::auto_ptr<Spiller> SpillerInstance;
+ std::auto_ptr<SplitAnalysis> SA;
+
+ // splitting state.
+
+ /// All basic blocks where the current register is live.
+ SmallVector<SpillPlacement::BlockConstraint, 8> SpillConstraints;
+
+ /// For every instruction in SA->UseSlots, store the previous non-copy
+ /// instruction.
+ SmallVector<SlotIndex, 8> PrevSlot;
+
+public:
+ RAGreedy();
+
+ /// Return the pass name.
+ virtual const char* getPassName() const {
+ return "Greedy Register Allocator";
+ }
+
+ /// RAGreedy analysis usage.
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ virtual void releaseMemory();
+
+ virtual Spiller &spiller() { return *SpillerInstance; }
+
+ virtual float getPriority(LiveInterval *LI);
+
+ virtual unsigned selectOrSplit(LiveInterval&,
+ SmallVectorImpl<LiveInterval*>&);
+
+ /// Perform register allocation.
+ virtual bool runOnMachineFunction(MachineFunction &mf);
+
+ static char ID;
+
+private:
+ bool checkUncachedInterference(LiveInterval&, unsigned);
+ LiveInterval *getSingleInterference(LiveInterval&, unsigned);
+ bool reassignVReg(LiveInterval &InterferingVReg, unsigned OldPhysReg);
+ float calcInterferenceWeight(LiveInterval&, unsigned);
+ float calcInterferenceInfo(LiveInterval&, unsigned);
+ float calcGlobalSplitCost(const BitVector&);
+ void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
+ SmallVectorImpl<LiveInterval*>&);
+ void calcGapWeights(unsigned, SmallVectorImpl<float>&);
+ SlotIndex getPrevMappedIndex(const MachineInstr*);
+ void calcPrevSlots();
+ unsigned nextSplitPoint(unsigned);
+
+ unsigned tryReassignOrEvict(LiveInterval&, AllocationOrder&,
+ SmallVectorImpl<LiveInterval*>&);
+ unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
+ SmallVectorImpl<LiveInterval*>&);
+ unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
+ SmallVectorImpl<LiveInterval*>&);
+ unsigned trySplit(LiveInterval&, AllocationOrder&,
+ SmallVectorImpl<LiveInterval*>&);
+ unsigned trySpillInterferences(LiveInterval&, AllocationOrder&,
+ SmallVectorImpl<LiveInterval*>&);
+};
+} // end anonymous namespace
+
+char RAGreedy::ID = 0;
+
+FunctionPass* llvm::createGreedyRegisterAllocator() {
+ return new RAGreedy();
+}
+
+RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
+ initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
+ initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
+ initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
+ initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
+ initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
+ initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
+ initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+ initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
+ initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+ initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
+ initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
+ initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
+ initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
+}
+
+void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<AliasAnalysis>();
+ AU.addPreserved<AliasAnalysis>();
+ AU.addRequired<LiveIntervals>();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
+ if (StrongPHIElim)
+ AU.addRequiredID(StrongPHIEliminationID);
+ AU.addRequiredTransitive<RegisterCoalescer>();
+ AU.addRequired<CalculateSpillWeights>();
+ AU.addRequired<LiveStacks>();
+ AU.addPreserved<LiveStacks>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ AU.addRequired<MachineLoopRanges>();
+ AU.addPreserved<MachineLoopRanges>();
+ AU.addRequired<VirtRegMap>();
+ AU.addPreserved<VirtRegMap>();
+ AU.addRequired<EdgeBundles>();
+ AU.addRequired<SpillPlacement>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+void RAGreedy::releaseMemory() {
+ SpillerInstance.reset(0);
+ RegAllocBase::releaseMemory();
+}
+
+float RAGreedy::getPriority(LiveInterval *LI) {
+ float Priority = LI->weight;
+
+ // Prioritize hinted registers so they are allocated first.
+ std::pair<unsigned, unsigned> Hint;
+ if (Hint.first || Hint.second) {
+ // The hint can be target specific, a virtual register, or a physreg.
+ Priority *= 2;
+
+ // Prefer physreg hints above anything else.
+ if (Hint.first == 0 && TargetRegisterInfo::isPhysicalRegister(Hint.second))
+ Priority *= 2;
+ }
+ return Priority;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Register Reassignment
+//===----------------------------------------------------------------------===//
+
+// Check interference without using the cache.
+bool RAGreedy::checkUncachedInterference(LiveInterval &VirtReg,
+ unsigned PhysReg) {
+ for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
+ LiveIntervalUnion::Query subQ(&VirtReg, &PhysReg2LiveUnion[*AliasI]);
+ if (subQ.checkInterference())
+ return true;
+ }
+ return false;
+}
+
+/// getSingleInterference - Return the single interfering virtual register
+/// assigned to PhysReg. Return 0 if more than one virtual register is
+/// interfering.
+LiveInterval *RAGreedy::getSingleInterference(LiveInterval &VirtReg,
+ unsigned PhysReg) {
+ // Check physreg and aliases.
+ LiveInterval *Interference = 0;
+ for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
+ LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
+ if (Q.checkInterference()) {
+ if (Interference)
+ return 0;
+ Q.collectInterferingVRegs(1);
+ if (!Q.seenAllInterferences())
+ return 0;
+ Interference = Q.interferingVRegs().front();
+ }
+ }
+ return Interference;
+}
+
+// Attempt to reassign this virtual register to a different physical register.
+//
+// FIXME: we are not yet caching these "second-level" interferences discovered
+// in the sub-queries. These interferences can change with each call to
+// selectOrSplit. However, we could implement a "may-interfere" cache that
+// could be conservatively dirtied when we reassign or split.
+//
+// FIXME: This may result in a lot of alias queries. We could summarize alias
+// live intervals in their parent register's live union, but it's messy.
+bool RAGreedy::reassignVReg(LiveInterval &InterferingVReg,
+ unsigned WantedPhysReg) {
+ assert(TargetRegisterInfo::isVirtualRegister(InterferingVReg.reg) &&
+ "Can only reassign virtual registers");
+ assert(TRI->regsOverlap(WantedPhysReg, VRM->getPhys(InterferingVReg.reg)) &&
+ "inconsistent phys reg assigment");
+
+ AllocationOrder Order(InterferingVReg.reg, *VRM, ReservedRegs);
+ while (unsigned PhysReg = Order.next()) {
+ // Don't reassign to a WantedPhysReg alias.
+ if (TRI->regsOverlap(PhysReg, WantedPhysReg))
+ continue;
+
+ if (checkUncachedInterference(InterferingVReg, PhysReg))
+ continue;
+
+ // Reassign the interfering virtual reg to this physical reg.
+ unsigned OldAssign = VRM->getPhys(InterferingVReg.reg);
+ DEBUG(dbgs() << "reassigning: " << InterferingVReg << " from " <<
+ TRI->getName(OldAssign) << " to " << TRI->getName(PhysReg) << '\n');
+ unassign(InterferingVReg, OldAssign);
+ assign(InterferingVReg, PhysReg);
+ ++NumReassigned;
+ return true;
+ }
+ return false;
+}
+
+/// tryReassignOrEvict - Try to reassign a single interferences to a different
+/// physreg, or evict a single interference with a lower spill weight.
+/// @param VirtReg Currently unassigned virtual register.
+/// @param Order Physregs to try.
+/// @return Physreg to assign VirtReg, or 0.
+unsigned RAGreedy::tryReassignOrEvict(LiveInterval &VirtReg,
+ AllocationOrder &Order,
+ SmallVectorImpl<LiveInterval*> &NewVRegs){
+ NamedRegionTimer T("Reassign", TimerGroupName, TimePassesIsEnabled);
+
+ // Keep track of the lightest single interference seen so far.
+ float BestWeight = VirtReg.weight;
+ LiveInterval *BestVirt = 0;
+ unsigned BestPhys = 0;
+
+ Order.rewind();
+ while (unsigned PhysReg = Order.next()) {
+ LiveInterval *InterferingVReg = getSingleInterference(VirtReg, PhysReg);
+ if (!InterferingVReg)
+ continue;
+ if (TargetRegisterInfo::isPhysicalRegister(InterferingVReg->reg))
+ continue;
+ if (reassignVReg(*InterferingVReg, PhysReg))
+ return PhysReg;
+
+ // Cannot reassign, is this an eviction candidate?
+ if (InterferingVReg->weight < BestWeight) {
+ BestVirt = InterferingVReg;
+ BestPhys = PhysReg;
+ BestWeight = InterferingVReg->weight;
+ }
+ }
+
+ // Nothing reassigned, can we evict a lighter single interference?
+ if (BestVirt) {
+ DEBUG(dbgs() << "evicting lighter " << *BestVirt << '\n');
+ unassign(*BestVirt, VRM->getPhys(BestVirt->reg));
+ ++NumEvicted;
+ NewVRegs.push_back(BestVirt);
+ return BestPhys;
+ }
+
+ return 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Region Splitting
+//===----------------------------------------------------------------------===//
+
+/// calcInterferenceInfo - Compute per-block outgoing and ingoing constraints
+/// when considering interference from PhysReg. Also compute an optimistic local
+/// cost of this interference pattern.
+///
+/// The final cost of a split is the local cost + global cost of preferences
+/// broken by SpillPlacement.
+///
+float RAGreedy::calcInterferenceInfo(LiveInterval &VirtReg, unsigned PhysReg) {
+ // Reset interference dependent info.
+ SpillConstraints.resize(SA->LiveBlocks.size());
+ for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
+ SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
+ SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
+ BC.Number = BI.MBB->getNumber();
+ BC.Entry = (BI.Uses && BI.LiveIn) ?
+ SpillPlacement::PrefReg : SpillPlacement::DontCare;
+ BC.Exit = (BI.Uses && BI.LiveOut) ?
+ SpillPlacement::PrefReg : SpillPlacement::DontCare;
+ BI.OverlapEntry = BI.OverlapExit = false;
+ }
+
+ // Add interference info from each PhysReg alias.
+ for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
+ if (!query(VirtReg, *AI).checkInterference())
+ continue;
+ LiveIntervalUnion::SegmentIter IntI =
+ PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
+ if (!IntI.valid())
+ continue;
+
+ // Determine which blocks have interference live in or after the last split
+ // point.
+ for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
+ SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
+ SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
+ SlotIndex Start, Stop;
+ tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
+
+ // Skip interference-free blocks.
+ if (IntI.start() >= Stop)
+ continue;
+
+ // Is the interference live-in?
+ if (BI.LiveIn) {
+ IntI.advanceTo(Start);
+ if (!IntI.valid())
+ break;
+ if (IntI.start() <= Start)
+ BC.Entry = SpillPlacement::MustSpill;
+ }
+
+ // Is the interference overlapping the last split point?
+ if (BI.LiveOut) {
+ if (IntI.stop() < BI.LastSplitPoint)
+ IntI.advanceTo(BI.LastSplitPoint.getPrevSlot());
+ if (!IntI.valid())
+ break;
+ if (IntI.start() < Stop)
+ BC.Exit = SpillPlacement::MustSpill;
+ }
+ }
+
+ // Rewind iterator and check other interferences.
+ IntI.find(VirtReg.beginIndex());
+ for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
+ SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
+ SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
+ SlotIndex Start, Stop;
+ tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
+
+ // Skip interference-free blocks.
+ if (IntI.start() >= Stop)
+ continue;
+
+ // Handle transparent blocks with interference separately.
+ // Transparent blocks never incur any fixed cost.
+ if (BI.LiveThrough && !BI.Uses) {
+ IntI.advanceTo(Start);
+ if (!IntI.valid())
+ break;
+ if (IntI.start() >= Stop)
+ continue;
+
+ if (BC.Entry != SpillPlacement::MustSpill)
+ BC.Entry = SpillPlacement::PrefSpill;
+ if (BC.Exit != SpillPlacement::MustSpill)
+ BC.Exit = SpillPlacement::PrefSpill;
+ continue;
+ }
+
+ // Now we only have blocks with uses left.
+ // Check if the interference overlaps the uses.
+ assert(BI.Uses && "Non-transparent block without any uses");
+
+ // Check interference on entry.
+ if (BI.LiveIn && BC.Entry != SpillPlacement::MustSpill) {
+ IntI.advanceTo(Start);
+ if (!IntI.valid())
+ break;
+ // Not live in, but before the first use.
+ if (IntI.start() < BI.FirstUse)
+ BC.Entry = SpillPlacement::PrefSpill;
+ }
+
+ // Does interference overlap the uses in the entry segment
+ // [FirstUse;Kill)?
+ if (BI.LiveIn && !BI.OverlapEntry) {
+ IntI.advanceTo(BI.FirstUse);
+ if (!IntI.valid())
+ break;
+ // A live-through interval has no kill.
+ // Check [FirstUse;LastUse) instead.
+ if (IntI.start() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
+ BI.OverlapEntry = true;
+ }
+
+ // Does interference overlap the uses in the exit segment [Def;LastUse)?
+ if (BI.LiveOut && !BI.LiveThrough && !BI.OverlapExit) {
+ IntI.advanceTo(BI.Def);
+ if (!IntI.valid())
+ break;
+ if (IntI.start() < BI.LastUse)
+ BI.OverlapExit = true;
+ }
+
+ // Check interference on exit.
+ if (BI.LiveOut && BC.Exit != SpillPlacement::MustSpill) {
+ // Check interference between LastUse and Stop.
+ if (BC.Exit != SpillPlacement::PrefSpill) {
+ IntI.advanceTo(BI.LastUse);
+ if (!IntI.valid())
+ break;
+ if (IntI.start() < Stop)
+ BC.Exit = SpillPlacement::PrefSpill;
+ }
+ }
+ }
+ }
+
+ // Accumulate a local cost of this interference pattern.
+ float LocalCost = 0;
+ for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
+ SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
+ if (!BI.Uses)
+ continue;
+ SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
+ unsigned Inserts = 0;
+
+ // Do we need spill code for the entry segment?
+ if (BI.LiveIn)
+ Inserts += BI.OverlapEntry || BC.Entry != SpillPlacement::PrefReg;
+
+ // For the exit segment?
+ if (BI.LiveOut)
+ Inserts += BI.OverlapExit || BC.Exit != SpillPlacement::PrefReg;
+
+ // The local cost of spill code in this block is the block frequency times
+ // the number of spill instructions inserted.
+ if (Inserts)
+ LocalCost += Inserts * SpillPlacer->getBlockFrequency(BI.MBB);
+ }
+ DEBUG(dbgs() << "Local cost of " << PrintReg(PhysReg, TRI) << " = "
+ << LocalCost << '\n');
+ return LocalCost;
+}
+
+/// calcGlobalSplitCost - Return the global split cost of following the split
+/// pattern in LiveBundles. This cost should be added to the local cost of the
+/// interference pattern in SpillConstraints.
+///
+float RAGreedy::calcGlobalSplitCost(const BitVector &LiveBundles) {
+ float GlobalCost = 0;
+ for (unsigned i = 0, e = SpillConstraints.size(); i != e; ++i) {
+ SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
+ unsigned Inserts = 0;
+ // Broken entry preference?
+ Inserts += LiveBundles[Bundles->getBundle(BC.Number, 0)] !=
+ (BC.Entry == SpillPlacement::PrefReg);
+ // Broken exit preference?
+ Inserts += LiveBundles[Bundles->getBundle(BC.Number, 1)] !=
+ (BC.Exit == SpillPlacement::PrefReg);
+ if (Inserts)
+ GlobalCost +=
+ Inserts * SpillPlacer->getBlockFrequency(SA->LiveBlocks[i].MBB);
+ }
+ DEBUG(dbgs() << "Global cost = " << GlobalCost << '\n');
+ return GlobalCost;
+}
+
+/// splitAroundRegion - Split VirtReg around the region determined by
+/// LiveBundles. Make an effort to avoid interference from PhysReg.
+///
+/// The 'register' interval is going to contain as many uses as possible while
+/// avoiding interference. The 'stack' interval is the complement constructed by
+/// SplitEditor. It will contain the rest.
+///
+void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
+ const BitVector &LiveBundles,
+ SmallVectorImpl<LiveInterval*> &NewVRegs) {
+ DEBUG({
+ dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
+ << " with bundles";
+ for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
+ dbgs() << " EB#" << i;
+ dbgs() << ".\n";
+ });
+
+ // First compute interference ranges in the live blocks.
+ typedef std::pair<SlotIndex, SlotIndex> IndexPair;
+ SmallVector<IndexPair, 8> InterferenceRanges;
+ InterferenceRanges.resize(SA->LiveBlocks.size());
+ for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
+ if (!query(VirtReg, *AI).checkInterference())
+ continue;
+ LiveIntervalUnion::SegmentIter IntI =
+ PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
+ if (!IntI.valid())
+ continue;
+ for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
+ const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
+ IndexPair &IP = InterferenceRanges[i];
+ SlotIndex Start, Stop;
+ tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
+ // Skip interference-free blocks.
+ if (IntI.start() >= Stop)
+ continue;
+
+ // First interference in block.
+ if (BI.LiveIn) {
+ IntI.advanceTo(Start);
+ if (!IntI.valid())
+ break;
+ if (IntI.start() >= Stop)
+ continue;
+ if (!IP.first.isValid() || IntI.start() < IP.first)
+ IP.first = IntI.start();
+ }
+
+ // Last interference in block.
+ if (BI.LiveOut) {
+ IntI.advanceTo(Stop);
+ if (!IntI.valid() || IntI.start() >= Stop)
+ --IntI;
+ if (IntI.stop() <= Start)
+ continue;
+ if (!IP.second.isValid() || IntI.stop() > IP.second)
+ IP.second = IntI.stop();
+ }
+ }
+ }
+
+ SmallVector<LiveInterval*, 4> SpillRegs;
+ LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
+ SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
+
+ // Create the main cross-block interval.
+ SE.openIntv();
+
+ // First add all defs that are live out of a block.
+ for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
+ SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
+ bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
+ bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
+
+ // Should the register be live out?
+ if (!BI.LiveOut || !RegOut)
+ continue;
+
+ IndexPair &IP = InterferenceRanges[i];
+ SlotIndex Start, Stop;
+ tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
+
+ DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
+ << Bundles->getBundle(BI.MBB->getNumber(), 1)
+ << " intf [" << IP.first << ';' << IP.second << ')');
+
+ // The interference interval should either be invalid or overlap MBB.
+ assert((!IP.first.isValid() || IP.first < Stop) && "Bad interference");
+ assert((!IP.second.isValid() || IP.second > Start) && "Bad interference");
+
+ // Check interference leaving the block.
+ if (!IP.second.isValid()) {
+ // Block is interference-free.
+ DEBUG(dbgs() << ", no interference");
+ if (!BI.Uses) {
+ assert(BI.LiveThrough && "No uses, but not live through block?");
+ // Block is live-through without interference.
+ DEBUG(dbgs() << ", no uses"
+ << (RegIn ? ", live-through.\n" : ", stack in.\n"));
+ if (!RegIn)
+ SE.enterIntvAtEnd(*BI.MBB);
+ continue;
+ }
+ if (!BI.LiveThrough) {
+ DEBUG(dbgs() << ", not live-through.\n");
+ SE.useIntv(SE.enterIntvBefore(BI.Def), Stop);
+ continue;
+ }
+ if (!RegIn) {
+ // Block is live-through, but entry bundle is on the stack.
+ // Reload just before the first use.
+ DEBUG(dbgs() << ", not live-in, enter before first use.\n");
+ SE.useIntv(SE.enterIntvBefore(BI.FirstUse), Stop);
+ continue;
+ }
+ DEBUG(dbgs() << ", live-through.\n");
+ continue;
+ }
+
+ // Block has interference.
+ DEBUG(dbgs() << ", interference to " << IP.second);
+
+ if (!BI.LiveThrough && IP.second <= BI.Def) {
+ // The interference doesn't reach the outgoing segment.
+ DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
+ SE.useIntv(BI.Def, Stop);
+ continue;
+ }
+
+
+ if (!BI.Uses) {
+ // No uses in block, avoid interference by reloading as late as possible.
+ DEBUG(dbgs() << ", no uses.\n");
+ SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
+ assert(SegStart >= IP.second && "Couldn't avoid interference");
+ continue;
+ }
+
+ if (IP.second.getBoundaryIndex() < BI.LastUse) {
+ // There are interference-free uses at the end of the block.
+ // Find the first use that can get the live-out register.
+ SmallVectorImpl<SlotIndex>::const_iterator UI =
+ std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
+ IP.second.getBoundaryIndex());
+ assert(UI != SA->UseSlots.end() && "Couldn't find last use");
+ SlotIndex Use = *UI;
+ assert(Use <= BI.LastUse && "Couldn't find last use");
+ // Only attempt a split befroe the last split point.
+ if (Use.getBaseIndex() <= BI.LastSplitPoint) {
+ DEBUG(dbgs() << ", free use at " << Use << ".\n");
+ SlotIndex SegStart = SE.enterIntvBefore(Use);
+ assert(SegStart >= IP.second && "Couldn't avoid interference");
+ assert(SegStart < BI.LastSplitPoint && "Impossible split point");
+ SE.useIntv(SegStart, Stop);
+ continue;
+ }
+ }
+
+ // Interference is after the last use.
+ DEBUG(dbgs() << " after last use.\n");
+ SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
+ assert(SegStart >= IP.second && "Couldn't avoid interference");
+ }
+
+ // Now all defs leading to live bundles are handled, do everything else.
+ for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
+ SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
+ bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
+ bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
+
+ // Is the register live-in?
+ if (!BI.LiveIn || !RegIn)
+ continue;
+
+ // We have an incoming register. Check for interference.
+ IndexPair &IP = InterferenceRanges[i];
+ SlotIndex Start, Stop;
+ tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
+
+ DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
+ << " -> BB#" << BI.MBB->getNumber());
+
+ // Check interference entering the block.
+ if (!IP.first.isValid()) {
+ // Block is interference-free.
+ DEBUG(dbgs() << ", no interference");
+ if (!BI.Uses) {
+ assert(BI.LiveThrough && "No uses, but not live through block?");
+ // Block is live-through without interference.
+ if (RegOut) {
+ DEBUG(dbgs() << ", no uses, live-through.\n");
+ SE.useIntv(Start, Stop);
+ } else {
+ DEBUG(dbgs() << ", no uses, stack-out.\n");
+ SE.leaveIntvAtTop(*BI.MBB);
+ }
+ continue;
+ }
+ if (!BI.LiveThrough) {
+ DEBUG(dbgs() << ", killed in block.\n");
+ SE.useIntv(Start, SE.leaveIntvAfter(BI.Kill));
+ continue;
+ }
+ if (!RegOut) {
+ // Block is live-through, but exit bundle is on the stack.
+ // Spill immediately after the last use.
+ if (BI.LastUse < BI.LastSplitPoint) {
+ DEBUG(dbgs() << ", uses, stack-out.\n");
+ SE.useIntv(Start, SE.leaveIntvAfter(BI.LastUse));
+ continue;
+ }
+ // The last use is after the last split point, it is probably an
+ // indirect jump.
+ DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
+ << BI.LastSplitPoint << ", stack-out.\n");
+ SlotIndex SegEnd = SE.leaveIntvBefore(BI.LastSplitPoint);
+ SE.useIntv(Start, SegEnd);
+ // Run a double interval from the split to the last use.
+ // This makes it possible to spill the complement without affecting the
+ // indirect branch.
+ SE.overlapIntv(SegEnd, BI.LastUse);
+ continue;
+ }
+ // Register is live-through.
+ DEBUG(dbgs() << ", uses, live-through.\n");
+ SE.useIntv(Start, Stop);
+ continue;
+ }
+
+ // Block has interference.
+ DEBUG(dbgs() << ", interference from " << IP.first);
+
+ if (!BI.LiveThrough && IP.first >= BI.Kill) {
+ // The interference doesn't reach the outgoing segment.
+ DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
+ SE.useIntv(Start, BI.Kill);
+ continue;
+ }
+
+ if (!BI.Uses) {
+ // No uses in block, avoid interference by spilling as soon as possible.
+ DEBUG(dbgs() << ", no uses.\n");
+ SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
+ assert(SegEnd <= IP.first && "Couldn't avoid interference");
+ continue;
+ }
+ if (IP.first.getBaseIndex() > BI.FirstUse) {
+ // There are interference-free uses at the beginning of the block.
+ // Find the last use that can get the register.
+ SmallVectorImpl<SlotIndex>::const_iterator UI =
+ std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
+ IP.first.getBaseIndex());
+ assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
+ SlotIndex Use = (--UI)->getBoundaryIndex();
+ DEBUG(dbgs() << ", free use at " << *UI << ".\n");
+ SlotIndex SegEnd = SE.leaveIntvAfter(Use);
+ assert(SegEnd <= IP.first && "Couldn't avoid interference");
+ SE.useIntv(Start, SegEnd);
+ continue;
+ }
+
+ // Interference is before the first use.
+ DEBUG(dbgs() << " before first use.\n");
+ SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
+ assert(SegEnd <= IP.first && "Couldn't avoid interference");
+ }
+
+ SE.closeIntv();
+
+ // FIXME: Should we be more aggressive about splitting the stack region into
+ // per-block segments? The current approach allows the stack region to
+ // separate into connected components. Some components may be allocatable.
+ SE.finish();
+ ++NumGlobalSplits;
+
+ if (VerifyEnabled) {
+ MF->verify(this, "After splitting live range around region");
+
+#ifndef NDEBUG
+ // Make sure that at least one of the new intervals can allocate to PhysReg.
+ // That was the whole point of splitting the live range.
+ bool found = false;
+ for (LiveRangeEdit::iterator I = LREdit.begin(), E = LREdit.end(); I != E;
+ ++I)
+ if (!checkUncachedInterference(**I, PhysReg)) {
+ found = true;
+ break;
+ }
+ assert(found && "No allocatable intervals after pointless splitting");
+#endif
+ }
+}
+
+unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
+ SmallVectorImpl<LiveInterval*> &NewVRegs) {
+ BitVector LiveBundles, BestBundles;
+ float BestCost = 0;
+ unsigned BestReg = 0;
+ Order.rewind();
+ while (unsigned PhysReg = Order.next()) {
+ float Cost = calcInterferenceInfo(VirtReg, PhysReg);
+ if (BestReg && Cost >= BestCost)
+ continue;
+
+ SpillPlacer->placeSpills(SpillConstraints, LiveBundles);
+ // No live bundles, defer to splitSingleBlocks().
+ if (!LiveBundles.any())
+ continue;
+
+ Cost += calcGlobalSplitCost(LiveBundles);
+ if (!BestReg || Cost < BestCost) {
+ BestReg = PhysReg;
+ BestCost = Cost;
+ BestBundles.swap(LiveBundles);
+ }
+ }
+
+ if (!BestReg)
+ return 0;
+
+ splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
+ return 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Local Splitting
+//===----------------------------------------------------------------------===//
+
+
+/// calcGapWeights - Compute the maximum spill weight that needs to be evicted
+/// in order to use PhysReg between two entries in SA->UseSlots.
+///
+/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
+///
+void RAGreedy::calcGapWeights(unsigned PhysReg,
+ SmallVectorImpl<float> &GapWeight) {
+ assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
+ const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
+ const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
+ const unsigned NumGaps = Uses.size()-1;
+
+ // Start and end points for the interference check.
+ SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
+ SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
+
+ GapWeight.assign(NumGaps, 0.0f);
+
+ // Add interference from each overlapping register.
+ for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
+ if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
+ .checkInterference())
+ continue;
+
+ // We know that VirtReg is a continuous interval from FirstUse to LastUse,
+ // so we don't need InterferenceQuery.
+ //
+ // Interference that overlaps an instruction is counted in both gaps
+ // surrounding the instruction. The exception is interference before
+ // StartIdx and after StopIdx.
+ //
+ LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
+ for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
+ // Skip the gaps before IntI.
+ while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
+ if (++Gap == NumGaps)
+ break;
+ if (Gap == NumGaps)
+ break;
+
+ // Update the gaps covered by IntI.
+ const float weight = IntI.value()->weight;
+ for (; Gap != NumGaps; ++Gap) {
+ GapWeight[Gap] = std::max(GapWeight[Gap], weight);
+ if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
+ break;
+ }
+ if (Gap == NumGaps)
+ break;
+ }
+ }
+}
+
+/// getPrevMappedIndex - Return the slot index of the last non-copy instruction
+/// before MI that has a slot index. If MI is the first mapped instruction in
+/// its block, return the block start index instead.
+///
+SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
+ assert(MI && "Missing MachineInstr");
+ const MachineBasicBlock *MBB = MI->getParent();
+ MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
+ while (I != B)
+ if (!(--I)->isDebugValue() && !I->isCopy())
+ return Indexes->getInstructionIndex(I);
+ return Indexes->getMBBStartIdx(MBB);
+}
+
+/// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
+/// real non-copy instruction for each instruction in SA->UseSlots.
+///
+void RAGreedy::calcPrevSlots() {
+ const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
+ PrevSlot.clear();
+ PrevSlot.reserve(Uses.size());
+ for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
+ const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
+ PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
+ }
+}
+
+/// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
+/// be beneficial to split before UseSlots[i].
+///
+/// 0 is always a valid split point
+unsigned RAGreedy::nextSplitPoint(unsigned i) {
+ const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
+ const unsigned Size = Uses.size();
+ assert(i != Size && "No split points after the end");
+ // Allow split before i when Uses[i] is not adjacent to the previous use.
+ while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
+ ;
+ return i;
+}
+
+/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
+/// basic block.
+///
+unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
+ SmallVectorImpl<LiveInterval*> &NewVRegs) {
+ assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
+ const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
+
+ // Note that it is possible to have an interval that is live-in or live-out
+ // while only covering a single block - A phi-def can use undef values from
+ // predecessors, and the block could be a single-block loop.
+ // We don't bother doing anything clever about such a case, we simply assume
+ // that the interval is continuous from FirstUse to LastUse. We should make
+ // sure that we don't do anything illegal to such an interval, though.
+
+ const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
+ if (Uses.size() <= 2)
+ return 0;
+ const unsigned NumGaps = Uses.size()-1;
+
+ DEBUG({
+ dbgs() << "tryLocalSplit: ";
+ for (unsigned i = 0, e = Uses.size(); i != e; ++i)
+ dbgs() << ' ' << SA->UseSlots[i];
+ dbgs() << '\n';
+ });
+
+ // For every use, find the previous mapped non-copy instruction.
+ // We use this to detect valid split points, and to estimate new interval
+ // sizes.
+ calcPrevSlots();
+
+ unsigned BestBefore = NumGaps;
+ unsigned BestAfter = 0;
+ float BestDiff = 0;
+
+ const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB);
+ SmallVector<float, 8> GapWeight;
+
+ Order.rewind();
+ while (unsigned PhysReg = Order.next()) {
+ // Keep track of the largest spill weight that would need to be evicted in
+ // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
+ calcGapWeights(PhysReg, GapWeight);
+
+ // Try to find the best sequence of gaps to close.
+ // The new spill weight must be larger than any gap interference.
+
+ // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
+ unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
+
+ // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
+ // It is the spill weight that needs to be evicted.
+ float MaxGap = GapWeight[0];
+ for (unsigned i = 1; i != SplitAfter; ++i)
+ MaxGap = std::max(MaxGap, GapWeight[i]);
+
+ for (;;) {
+ // Live before/after split?
+ const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
+ const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
+
+ DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
+ << Uses[SplitBefore] << '-' << Uses[SplitAfter]
+ << " i=" << MaxGap);
+
+ // Stop before the interval gets so big we wouldn't be making progress.
+ if (!LiveBefore && !LiveAfter) {
+ DEBUG(dbgs() << " all\n");
+ break;
+ }
+ // Should the interval be extended or shrunk?
+ bool Shrink = true;
+ if (MaxGap < HUGE_VALF) {
+ // Estimate the new spill weight.
+ //
+ // Each instruction reads and writes the register, except the first
+ // instr doesn't read when !FirstLive, and the last instr doesn't write
+ // when !LastLive.
+ //
+ // We will be inserting copies before and after, so the total number of
+ // reads and writes is 2 * EstUses.
+ //
+ const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
+ 2*(LiveBefore + LiveAfter);
+
+ // Try to guess the size of the new interval. This should be trivial,
+ // but the slot index of an inserted copy can be a lot smaller than the
+ // instruction it is inserted before if there are many dead indexes
+ // between them.
+ //
+ // We measure the distance from the instruction before SplitBefore to
+ // get a conservative estimate.
+ //
+ // The final distance can still be different if inserting copies
+ // triggers a slot index renumbering.
+ //
+ const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
+ PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
+ // Would this split be possible to allocate?
+ // Never allocate all gaps, we wouldn't be making progress.
+ float Diff = EstWeight - MaxGap;
+ DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
+ if (Diff > 0) {
+ Shrink = false;
+ if (Diff > BestDiff) {
+ DEBUG(dbgs() << " (best)");
+ BestDiff = Diff;
+ BestBefore = SplitBefore;
+ BestAfter = SplitAfter;
+ }
+ }
+ }
+
+ // Try to shrink.
+ if (Shrink) {
+ SplitBefore = nextSplitPoint(SplitBefore);
+ if (SplitBefore < SplitAfter) {
+ DEBUG(dbgs() << " shrink\n");
+ // Recompute the max when necessary.
+ if (GapWeight[SplitBefore - 1] >= MaxGap) {
+ MaxGap = GapWeight[SplitBefore];
+ for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
+ MaxGap = std::max(MaxGap, GapWeight[i]);
+ }
+ continue;
+ }
+ MaxGap = 0;
+ }
+
+ // Try to extend the interval.
+ if (SplitAfter >= NumGaps) {
+ DEBUG(dbgs() << " end\n");
+ break;
+ }
+
+ DEBUG(dbgs() << " extend\n");
+ for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
+ SplitAfter != e; ++SplitAfter)
+ MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
+ continue;
+ }
+ }
+
+ // Didn't find any candidates?
+ if (BestBefore == NumGaps)
+ return 0;
+
+ DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
+ << '-' << Uses[BestAfter] << ", " << BestDiff
+ << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
+
+ SmallVector<LiveInterval*, 4> SpillRegs;
+ LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
+ SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
+
+ SE.openIntv();
+ SlotIndex SegStart = SE.enterIntvBefore(Uses[BestBefore]);
+ SlotIndex SegStop = SE.leaveIntvAfter(Uses[BestAfter]);
+ SE.useIntv(SegStart, SegStop);
+ SE.closeIntv();
+ SE.finish();
+ ++NumLocalSplits;
+
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Live Range Splitting
+//===----------------------------------------------------------------------===//
+
+/// trySplit - Try to split VirtReg or one of its interferences, making it
+/// assignable.
+/// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
+unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
+ SmallVectorImpl<LiveInterval*>&NewVRegs) {
+ SA->analyze(&VirtReg);
+
+ // Local intervals are handled separately.
+ if (LIS->intervalIsInOneMBB(VirtReg)) {
+ NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
+ return tryLocalSplit(VirtReg, Order, NewVRegs);
+ }
+
+ NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
+
+ // First try to split around a region spanning multiple blocks.
+ unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
+ if (PhysReg || !NewVRegs.empty())
+ return PhysReg;
+
+ // Then isolate blocks with multiple uses.
+ SplitAnalysis::BlockPtrSet Blocks;
+ if (SA->getMultiUseBlocks(Blocks)) {
+ SmallVector<LiveInterval*, 4> SpillRegs;
+ LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
+ SplitEditor(*SA, *LIS, *VRM, *DomTree, LREdit).splitSingleBlocks(Blocks);
+ if (VerifyEnabled)
+ MF->verify(this, "After splitting live range around basic blocks");
+ }
+
+ // Don't assign any physregs.
+ return 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Spilling
+//===----------------------------------------------------------------------===//
+
+/// calcInterferenceWeight - Calculate the combined spill weight of
+/// interferences when assigning VirtReg to PhysReg.
+float RAGreedy::calcInterferenceWeight(LiveInterval &VirtReg, unsigned PhysReg){
+ float Sum = 0;
+ for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
+ LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
+ Q.collectInterferingVRegs();
+ if (Q.seenUnspillableVReg())
+ return HUGE_VALF;
+ for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i)
+ Sum += Q.interferingVRegs()[i]->weight;
+ }
+ return Sum;
+}
+
+/// trySpillInterferences - Try to spill interfering registers instead of the
+/// current one. Only do it if the accumulated spill weight is smaller than the
+/// current spill weight.
+unsigned RAGreedy::trySpillInterferences(LiveInterval &VirtReg,
+ AllocationOrder &Order,
+ SmallVectorImpl<LiveInterval*> &NewVRegs) {
+ NamedRegionTimer T("Spill Interference", TimerGroupName, TimePassesIsEnabled);
+ unsigned BestPhys = 0;
+ float BestWeight = 0;
+
+ Order.rewind();
+ while (unsigned PhysReg = Order.next()) {
+ float Weight = calcInterferenceWeight(VirtReg, PhysReg);
+ if (Weight == HUGE_VALF || Weight >= VirtReg.weight)
+ continue;
+ if (!BestPhys || Weight < BestWeight)
+ BestPhys = PhysReg, BestWeight = Weight;
+ }
+
+ // No candidates found.
+ if (!BestPhys)
+ return 0;
+
+ // Collect all interfering registers.
+ SmallVector<LiveInterval*, 8> Spills;
+ for (const unsigned *AI = TRI->getOverlaps(BestPhys); *AI; ++AI) {
+ LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
+ Spills.append(Q.interferingVRegs().begin(), Q.interferingVRegs().end());
+ for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
+ LiveInterval *VReg = Q.interferingVRegs()[i];
+ unassign(*VReg, *AI);
+ }
+ }
+
+ // Spill them all.
+ DEBUG(dbgs() << "spilling " << Spills.size() << " interferences with weight "
+ << BestWeight << '\n');
+ for (unsigned i = 0, e = Spills.size(); i != e; ++i)
+ spiller().spill(Spills[i], NewVRegs, Spills);
+ return BestPhys;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Main Entry Point
+//===----------------------------------------------------------------------===//
+
+unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
+ SmallVectorImpl<LiveInterval*> &NewVRegs) {
+ // First try assigning a free register.
+ AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
+ while (unsigned PhysReg = Order.next()) {
+ if (!checkPhysRegInterference(VirtReg, PhysReg))
+ return PhysReg;
+ }
+
+ // Try to reassign interferences.
+ if (unsigned PhysReg = tryReassignOrEvict(VirtReg, Order, NewVRegs))
+ return PhysReg;
+
+ assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
+
+ // Try splitting VirtReg or interferences.
+ unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
+ if (PhysReg || !NewVRegs.empty())
+ return PhysReg;
+
+ // Try to spill another interfering reg with less spill weight.
+ PhysReg = trySpillInterferences(VirtReg, Order, NewVRegs);
+ if (PhysReg)
+ return PhysReg;
+
+ // Finally spill VirtReg itself.
+ NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
+ SmallVector<LiveInterval*, 1> pendingSpills;
+ spiller().spill(&VirtReg, NewVRegs, pendingSpills);
+
+ // The live virtual register requesting allocation was spilled, so tell
+ // the caller not to allocate anything during this round.
+ return 0;
+}
+
+bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
+ DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
+ << "********** Function: "
+ << ((Value*)mf.getFunction())->getName() << '\n');
+
+ MF = &mf;
+ if (VerifyEnabled)
+ MF->verify(this, "Before greedy register allocator");
+
+ RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
+ Indexes = &getAnalysis<SlotIndexes>();
+ DomTree = &getAnalysis<MachineDominatorTree>();
+ ReservedRegs = TRI->getReservedRegs(*MF);
+ SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
+ Loops = &getAnalysis<MachineLoopInfo>();
+ LoopRanges = &getAnalysis<MachineLoopRanges>();
+ Bundles = &getAnalysis<EdgeBundles>();
+ SpillPlacer = &getAnalysis<SpillPlacement>();
+
+ SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
+
+ allocatePhysRegs();
+ addMBBLiveIns(MF);
+ LIS->addKillFlags();
+
+ // Run rewriter
+ {
+ NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
+ VRM->rewrite(Indexes);
+ }
+
+ // The pass output is in VirtRegMap. Release all the transient data.
+ releaseMemory();
+
+ return true;
+}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp b/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp
index 5c62354..b959878 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp
@@ -12,13 +12,14 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "regalloc"
+#include "LiveDebugVariables.h"
#include "VirtRegMap.h"
#include "VirtRegRewriter.h"
#include "Spiller.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
@@ -91,6 +92,19 @@ namespace {
struct RALinScan : public MachineFunctionPass {
static char ID;
RALinScan() : MachineFunctionPass(ID) {
+ initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
+ initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
+ initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
+ initializeRegisterCoalescerAnalysisGroup(
+ *PassRegistry::getPassRegistry());
+ initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
+ initializePreAllocSplittingPass(*PassRegistry::getPassRegistry());
+ initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+ initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
+ initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+ initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
+ initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
+
// Initialize the queue to record recently-used registers.
if (NumRecentlyUsedRegs > 0)
RecentRegs.resize(NumRecentlyUsedRegs, 0);
@@ -127,7 +141,6 @@ namespace {
BitVector allocatableRegs_;
BitVector reservedRegs_;
LiveIntervals* li_;
- LiveStacks* ls_;
MachineLoopInfo *loopInfo;
/// handled_ - Intervals are added to the handled_ set in the order of their
@@ -183,6 +196,8 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
+ AU.addRequired<AliasAnalysis>();
+ AU.addPreserved<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<SlotIndexes>();
if (StrongPHIElim)
@@ -193,12 +208,15 @@ namespace {
AU.addRequired<CalculateSpillWeights>();
if (PreSplitIntervals)
AU.addRequiredID(PreAllocSplittingID);
- AU.addRequired<LiveStacks>();
- AU.addPreserved<LiveStacks>();
+ AU.addRequiredID(LiveStacksID);
+ AU.addPreservedID(LiveStacksID);
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addRequired<VirtRegMap>();
AU.addPreserved<VirtRegMap>();
+ AU.addRequired<LiveDebugVariables>();
+ AU.addPreserved<LiveDebugVariables>();
+ AU.addRequiredID(MachineDominatorsID);
AU.addPreservedID(MachineDominatorsID);
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -370,8 +388,19 @@ namespace {
char RALinScan::ID = 0;
}
-INITIALIZE_PASS(RALinScan, "linearscan-regalloc",
- "Linear Scan Register Allocator", false, false);
+INITIALIZE_PASS_BEGIN(RALinScan, "linearscan-regalloc",
+ "Linear Scan Register Allocator", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
+INITIALIZE_PASS_DEPENDENCY(CalculateSpillWeights)
+INITIALIZE_PASS_DEPENDENCY(PreAllocSplitting)
+INITIALIZE_PASS_DEPENDENCY(LiveStacks)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
+INITIALIZE_AG_DEPENDENCY(RegisterCoalescer)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(RALinScan, "linearscan-regalloc",
+ "Linear Scan Register Allocator", false, false)
void RALinScan::ComputeRelatedRegClasses() {
// First pass, add all reg classes to the union, and determine at least one
@@ -402,8 +431,12 @@ void RALinScan::ComputeRelatedRegClasses() {
for (DenseMap<unsigned, const TargetRegisterClass*>::iterator
I = OneClassForEachPhysReg.begin(), E = OneClassForEachPhysReg.end();
I != E; ++I)
- for (const unsigned *AS = tri_->getAliasSet(I->first); *AS; ++AS)
- RelatedRegClasses.unionSets(I->second, OneClassForEachPhysReg[*AS]);
+ for (const unsigned *AS = tri_->getAliasSet(I->first); *AS; ++AS) {
+ const TargetRegisterClass *AliasClass =
+ OneClassForEachPhysReg.lookup(*AS);
+ if (AliasClass)
+ RelatedRegClasses.unionSets(I->second, AliasClass);
+ }
}
/// attemptTrivialCoalescing - If a simple interval is defined by a copy, try
@@ -431,8 +464,7 @@ unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
unsigned CandReg;
{
MachineInstr *CopyMI;
- if (vni->def != SlotIndex() && vni->isDefAccurate() &&
- (CopyMI = li_->getInstructionFromIndex(vni->def)) && CopyMI->isCopy())
+ if ((CopyMI = li_->getInstructionFromIndex(vni->def)) && CopyMI->isCopy())
// Defined by a copy, try to extend SrcReg forward
CandReg = CopyMI->getOperand(1).getReg();
else if (TrivCoalesceEnds &&
@@ -442,6 +474,10 @@ unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
CandReg = CopyMI->getOperand(0).getReg();
else
return Reg;
+
+ // If the target of the copy is a sub-register then don't coalesce.
+ if(CopyMI->getOperand(0).getSubReg())
+ return Reg;
}
if (TargetRegisterInfo::isVirtualRegister(CandReg)) {
@@ -478,7 +514,6 @@ bool RALinScan::runOnMachineFunction(MachineFunction &fn) {
allocatableRegs_ = tri_->getAllocatableSet(fn);
reservedRegs_ = tri_->getReservedRegs(fn);
li_ = &getAnalysis<LiveIntervals>();
- ls_ = &getAnalysis<LiveStacks>();
loopInfo = &getAnalysis<MachineLoopInfo>();
// We don't run the coalescer here because we have no reason to
@@ -505,6 +540,9 @@ bool RALinScan::runOnMachineFunction(MachineFunction &fn) {
// Rewrite spill code and update the PhysRegsUsed set.
rewriter_->runOnMachineFunction(*mf_, *vrm_, li_);
+ // Write out new DBG_VALUE instructions.
+ getAnalysis<LiveDebugVariables>().emitDebugValues(vrm_);
+
assert(unhandled_.empty() && "Unhandled live intervals remain!");
finalizeRegUses();
@@ -638,8 +676,6 @@ void RALinScan::linearScan() {
// Look for physical registers that end up not being allocated even though
// register allocator had to spill other registers in its register class.
- if (ls_->getNumIntervals() == 0)
- return;
if (!vrm_->FindUnusedRegisters(li_))
return;
}
@@ -784,30 +820,6 @@ static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V,
}
}
-/// addStackInterval - Create a LiveInterval for stack if the specified live
-/// interval has been spilled.
-static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
- LiveIntervals *li_,
- MachineRegisterInfo* mri_, VirtRegMap &vrm_) {
- int SS = vrm_.getStackSlot(cur->reg);
- if (SS == VirtRegMap::NO_STACK_SLOT)
- return;
-
- const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
- LiveInterval &SI = ls_->getOrCreateInterval(SS, RC);
-
- VNInfo *VNI;
- if (SI.hasAtLeastOneValue())
- VNI = SI.getValNumInfo(0);
- else
- VNI = SI.getNextValue(SlotIndex(), 0, false,
- ls_->getVNInfoAllocator());
-
- LiveInterval &RI = li_->getInterval(cur->reg);
- // FIXME: This may be overly conservative.
- SI.MergeRangesInAsValue(RI, VNI);
-}
-
/// getConflictWeight - Return the number of conflicts between cur
/// live interval and defs and uses of Reg weighted by loop depthes.
static
@@ -925,13 +937,9 @@ LiveInterval *RALinScan::hasNextReloadInterval(LiveInterval *cur) {
}
void RALinScan::DowngradeRegister(LiveInterval *li, unsigned Reg) {
- bool isNew = DowngradedRegs.insert(Reg);
- isNew = isNew; // Silence compiler warning.
- assert(isNew && "Multiple reloads holding the same register?");
- DowngradeMap.insert(std::make_pair(li->reg, Reg));
- for (const unsigned *AS = tri_->getAliasSet(Reg); *AS; ++AS) {
- isNew = DowngradedRegs.insert(*AS);
- isNew = isNew; // Silence compiler warning.
+ for (const unsigned *AS = tri_->getOverlaps(Reg); *AS; ++AS) {
+ bool isNew = DowngradedRegs.insert(*AS);
+ (void)isNew; // Silence compiler warning.
assert(isNew && "Multiple reloads holding the same register?");
DowngradeMap.insert(std::make_pair(li->reg, *AS));
}
@@ -957,10 +965,11 @@ namespace {
/// assignRegOrStackSlotAtInterval - assign a register if one is available, or
/// spill.
void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
- DEBUG(dbgs() << "\tallocating current interval: ");
+ const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
+ DEBUG(dbgs() << "\tallocating current interval from "
+ << RC->getName() << ": ");
// This is an implicitly defined live interval, just assign any register.
- const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
if (cur->empty()) {
unsigned physReg = vrm_->getRegAllocPref(cur->reg);
if (!physReg)
@@ -984,8 +993,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
// one, e.g. X86::mov32to32_. These move instructions are not coalescable.
if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) {
VNInfo *vni = cur->begin()->valno;
- if ((vni->def != SlotIndex()) && !vni->isUnused() &&
- vni->isDefAccurate()) {
+ if (!vni->isUnused()) {
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
if (CopyMI && CopyMI->isCopy()) {
unsigned DstSubReg = CopyMI->getOperand(0).getSubReg();
@@ -1225,7 +1233,6 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
spiller_->spill(cur, added, spillIs);
std::sort(added.begin(), added.end(), LISorter());
- addStackInterval(cur, ls_, li_, mri_, *vrm_);
if (added.empty())
return; // Early exit if all spills were folded.
@@ -1300,7 +1307,6 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
if (sli->beginIndex() < earliestStart)
earliestStart = sli->beginIndex();
spiller_->spill(sli, added, spillIs);
- addStackInterval(sli, ls_, li_, mri_, *vrm_);
spilled.insert(sli->reg);
}
@@ -1419,8 +1425,7 @@ unsigned RALinScan::getFreePhysReg(LiveInterval* cur,
std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(cur->reg);
// Resolve second part of the hint (if possible) given the current allocation.
unsigned physReg = Hint.second;
- if (physReg &&
- TargetRegisterInfo::isVirtualRegister(physReg) && vrm_->hasPhys(physReg))
+ if (TargetRegisterInfo::isVirtualRegister(physReg) && vrm_->hasPhys(physReg))
physReg = vrm_->getPhys(physReg);
TargetRegisterClass::iterator I, E;
diff --git a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
index 61f337b..ea0d1fe 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
@@ -31,9 +31,6 @@
#define DEBUG_TYPE "regalloc"
-#include "PBQP/HeuristicSolver.h"
-#include "PBQP/Graph.h"
-#include "PBQP/Heuristics/Briggs.h"
#include "RenderMachineFunction.h"
#include "Splitter.h"
#include "VirtRegMap.h"
@@ -41,9 +38,13 @@
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
+#include "llvm/CodeGen/RegAllocPBQP.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PBQP/HeuristicSolver.h"
+#include "llvm/CodeGen/PBQP/Graph.h"
+#include "llvm/CodeGen/PBQP/Heuristics/Briggs.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/RegisterCoalescer.h"
#include "llvm/Support/Debug.h"
@@ -51,7 +52,6 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include <limits>
-#include <map>
#include <memory>
#include <set>
#include <vector>
@@ -60,7 +60,7 @@ using namespace llvm;
static RegisterRegAlloc
registerPBQPRepAlloc("pbqp", "PBQP register allocator",
- llvm::createPBQPRegisterAllocator);
+ createDefaultPBQPRegisterAllocator);
static cl::opt<bool>
pbqpCoalescing("pbqp-coalescing",
@@ -69,698 +69,471 @@ pbqpCoalescing("pbqp-coalescing",
static cl::opt<bool>
pbqpPreSplitting("pbqp-pre-splitting",
- cl::desc("Pre-splite before PBQP register allocation."),
+ cl::desc("Pre-split before PBQP register allocation."),
cl::init(false), cl::Hidden);
namespace {
- ///
- /// PBQP based allocators solve the register allocation problem by mapping
- /// register allocation problems to Partitioned Boolean Quadratic
- /// Programming problems.
- class PBQPRegAlloc : public MachineFunctionPass {
- public:
+///
+/// PBQP based allocators solve the register allocation problem by mapping
+/// register allocation problems to Partitioned Boolean Quadratic
+/// Programming problems.
+class RegAllocPBQP : public MachineFunctionPass {
+public:
+
+ static char ID;
+
+ /// Construct a PBQP register allocator.
+ RegAllocPBQP(std::auto_ptr<PBQPBuilder> b)
+ : MachineFunctionPass(ID), builder(b) {
+ initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
+ initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
+ initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
+ initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
+ initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+ initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+ initializeLoopSplitterPass(*PassRegistry::getPassRegistry());
+ initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
+ initializeRenderMachineFunctionPass(*PassRegistry::getPassRegistry());
+ }
- static char ID;
+ /// Return the pass name.
+ virtual const char* getPassName() const {
+ return "PBQP Register Allocator";
+ }
- /// Construct a PBQP register allocator.
- PBQPRegAlloc() : MachineFunctionPass(ID) {}
+ /// PBQP analysis usage.
+ virtual void getAnalysisUsage(AnalysisUsage &au) const;
- /// Return the pass name.
- virtual const char* getPassName() const {
- return "PBQP Register Allocator";
- }
+ /// Perform register allocation
+ virtual bool runOnMachineFunction(MachineFunction &MF);
- /// PBQP analysis usage.
- virtual void getAnalysisUsage(AnalysisUsage &au) const {
- au.addRequired<SlotIndexes>();
- au.addPreserved<SlotIndexes>();
- au.addRequired<LiveIntervals>();
- //au.addRequiredID(SplitCriticalEdgesID);
- au.addRequired<RegisterCoalescer>();
- au.addRequired<CalculateSpillWeights>();
- au.addRequired<LiveStacks>();
- au.addPreserved<LiveStacks>();
- au.addRequired<MachineLoopInfo>();
- au.addPreserved<MachineLoopInfo>();
- if (pbqpPreSplitting)
- au.addRequired<LoopSplitter>();
- au.addRequired<VirtRegMap>();
- au.addRequired<RenderMachineFunction>();
- MachineFunctionPass::getAnalysisUsage(au);
- }
+private:
- /// Perform register allocation
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ typedef std::map<const LiveInterval*, unsigned> LI2NodeMap;
+ typedef std::vector<const LiveInterval*> Node2LIMap;
+ typedef std::vector<unsigned> AllowedSet;
+ typedef std::vector<AllowedSet> AllowedSetMap;
+ typedef std::pair<unsigned, unsigned> RegPair;
+ typedef std::map<RegPair, PBQP::PBQPNum> CoalesceMap;
+ typedef std::vector<PBQP::Graph::NodeItr> NodeVector;
+ typedef std::set<unsigned> RegSet;
- private:
- class LIOrdering {
- public:
- bool operator()(const LiveInterval *li1, const LiveInterval *li2) const {
- return li1->reg < li2->reg;
- }
- };
-
- typedef std::map<const LiveInterval*, unsigned, LIOrdering> LI2NodeMap;
- typedef std::vector<const LiveInterval*> Node2LIMap;
- typedef std::vector<unsigned> AllowedSet;
- typedef std::vector<AllowedSet> AllowedSetMap;
- typedef std::set<unsigned> RegSet;
- typedef std::pair<unsigned, unsigned> RegPair;
- typedef std::map<RegPair, PBQP::PBQPNum> CoalesceMap;
-
- typedef std::set<LiveInterval*, LIOrdering> LiveIntervalSet;
-
- typedef std::vector<PBQP::Graph::NodeItr> NodeVector;
-
- MachineFunction *mf;
- const TargetMachine *tm;
- const TargetRegisterInfo *tri;
- const TargetInstrInfo *tii;
- const MachineLoopInfo *loopInfo;
- MachineRegisterInfo *mri;
- RenderMachineFunction *rmf;
-
- LiveIntervals *lis;
- LiveStacks *lss;
- VirtRegMap *vrm;
-
- LI2NodeMap li2Node;
- Node2LIMap node2LI;
- AllowedSetMap allowedSets;
- LiveIntervalSet vregIntervalsToAlloc,
- emptyVRegIntervals;
- NodeVector problemNodes;
-
-
- /// Builds a PBQP cost vector.
- template <typename RegContainer>
- PBQP::Vector buildCostVector(unsigned vReg,
- const RegContainer &allowed,
- const CoalesceMap &cealesces,
- PBQP::PBQPNum spillCost) const;
-
- /// \brief Builds a PBQP interference matrix.
- ///
- /// @return Either a pointer to a non-zero PBQP matrix representing the
- /// allocation option costs, or a null pointer for a zero matrix.
- ///
- /// Expects allowed sets for two interfering LiveIntervals. These allowed
- /// sets should contain only allocable registers from the LiveInterval's
- /// register class, with any interfering pre-colored registers removed.
- template <typename RegContainer>
- PBQP::Matrix* buildInterferenceMatrix(const RegContainer &allowed1,
- const RegContainer &allowed2) const;
-
- ///
- /// Expects allowed sets for two potentially coalescable LiveIntervals,
- /// and an estimated benefit due to coalescing. The allowed sets should
- /// contain only allocable registers from the LiveInterval's register
- /// classes, with any interfering pre-colored registers removed.
- template <typename RegContainer>
- PBQP::Matrix* buildCoalescingMatrix(const RegContainer &allowed1,
- const RegContainer &allowed2,
- PBQP::PBQPNum cBenefit) const;
-
- /// \brief Finds coalescing opportunities and returns them as a map.
- ///
- /// Any entries in the map are guaranteed coalescable, even if their
- /// corresponding live intervals overlap.
- CoalesceMap findCoalesces();
-
- /// \brief Finds the initial set of vreg intervals to allocate.
- void findVRegIntervalsToAlloc();
-
- /// \brief Constructs a PBQP problem representation of the register
- /// allocation problem for this function.
- ///
- /// @return a PBQP solver object for the register allocation problem.
- PBQP::Graph constructPBQPProblem();
-
- /// \brief Adds a stack interval if the given live interval has been
- /// spilled. Used to support stack slot coloring.
- void addStackInterval(const LiveInterval *spilled,MachineRegisterInfo* mri);
-
- /// \brief Given a solved PBQP problem maps this solution back to a register
- /// assignment.
- bool mapPBQPToRegAlloc(const PBQP::Solution &solution);
-
- /// \brief Postprocessing before final spilling. Sets basic block "live in"
- /// variables.
- void finalizeAlloc() const;
-
- };
-
- char PBQPRegAlloc::ID = 0;
-}
+ std::auto_ptr<PBQPBuilder> builder;
+ MachineFunction *mf;
+ const TargetMachine *tm;
+ const TargetRegisterInfo *tri;
+ const TargetInstrInfo *tii;
+ const MachineLoopInfo *loopInfo;
+ MachineRegisterInfo *mri;
+ RenderMachineFunction *rmf;
-template <typename RegContainer>
-PBQP::Vector PBQPRegAlloc::buildCostVector(unsigned vReg,
- const RegContainer &allowed,
- const CoalesceMap &coalesces,
- PBQP::PBQPNum spillCost) const {
+ LiveIntervals *lis;
+ LiveStacks *lss;
+ VirtRegMap *vrm;
- typedef typename RegContainer::const_iterator AllowedItr;
+ RegSet vregsToAlloc, emptyIntervalVRegs;
- // Allocate vector. Additional element (0th) used for spill option
- PBQP::Vector v(allowed.size() + 1, 0);
+ /// \brief Finds the initial set of vreg intervals to allocate.
+ void findVRegIntervalsToAlloc();
- v[0] = spillCost;
+ /// \brief Adds a stack interval if the given live interval has been
+ /// spilled. Used to support stack slot coloring.
+ void addStackInterval(const LiveInterval *spilled,MachineRegisterInfo* mri);
- // Iterate over the allowed registers inserting coalesce benefits if there
- // are any.
- unsigned ai = 0;
- for (AllowedItr itr = allowed.begin(), end = allowed.end();
- itr != end; ++itr, ++ai) {
+ /// \brief Given a solved PBQP problem maps this solution back to a register
+ /// assignment.
+ bool mapPBQPToRegAlloc(const PBQPRAProblem &problem,
+ const PBQP::Solution &solution);
- unsigned pReg = *itr;
+ /// \brief Postprocessing before final spilling. Sets basic block "live in"
+ /// variables.
+ void finalizeAlloc() const;
- CoalesceMap::const_iterator cmItr =
- coalesces.find(RegPair(vReg, pReg));
+};
- // No coalesce - on to the next preg.
- if (cmItr == coalesces.end())
- continue;
+char RegAllocPBQP::ID = 0;
- // We have a coalesce - insert the benefit.
- v[ai + 1] = -cmItr->second;
- }
+} // End anonymous namespace.
- return v;
+unsigned PBQPRAProblem::getVRegForNode(PBQP::Graph::ConstNodeItr node) const {
+ Node2VReg::const_iterator vregItr = node2VReg.find(node);
+ assert(vregItr != node2VReg.end() && "No vreg for node.");
+ return vregItr->second;
}
-template <typename RegContainer>
-PBQP::Matrix* PBQPRegAlloc::buildInterferenceMatrix(
- const RegContainer &allowed1, const RegContainer &allowed2) const {
-
- typedef typename RegContainer::const_iterator RegContainerIterator;
-
- // Construct a PBQP matrix representing the cost of allocation options. The
- // rows and columns correspond to the allocation options for the two live
- // intervals. Elements will be infinite where corresponding registers alias,
- // since we cannot allocate aliasing registers to interfering live intervals.
- // All other elements (non-aliasing combinations) will have zero cost. Note
- // that the spill option (element 0,0) has zero cost, since we can allocate
- // both intervals to memory safely (the cost for each individual allocation
- // to memory is accounted for by the cost vectors for each live interval).
- PBQP::Matrix *m =
- new PBQP::Matrix(allowed1.size() + 1, allowed2.size() + 1, 0);
-
- // Assume this is a zero matrix until proven otherwise. Zero matrices occur
- // between interfering live ranges with non-overlapping register sets (e.g.
- // non-overlapping reg classes, or disjoint sets of allowed regs within the
- // same class). The term "overlapping" is used advisedly: sets which do not
- // intersect, but contain registers which alias, will have non-zero matrices.
- // We optimize zero matrices away to improve solver speed.
- bool isZeroMatrix = true;
-
-
- // Row index. Starts at 1, since the 0th row is for the spill option, which
- // is always zero.
- unsigned ri = 1;
-
- // Iterate over allowed sets, insert infinities where required.
- for (RegContainerIterator a1Itr = allowed1.begin(), a1End = allowed1.end();
- a1Itr != a1End; ++a1Itr) {
-
- // Column index, starts at 1 as for row index.
- unsigned ci = 1;
- unsigned reg1 = *a1Itr;
-
- for (RegContainerIterator a2Itr = allowed2.begin(), a2End = allowed2.end();
- a2Itr != a2End; ++a2Itr) {
-
- unsigned reg2 = *a2Itr;
-
- // If the row/column regs are identical or alias insert an infinity.
- if (tri->regsOverlap(reg1, reg2)) {
- (*m)[ri][ci] = std::numeric_limits<PBQP::PBQPNum>::infinity();
- isZeroMatrix = false;
- }
-
- ++ci;
- }
-
- ++ri;
- }
-
- // If this turns out to be a zero matrix...
- if (isZeroMatrix) {
- // free it and return null.
- delete m;
- return 0;
- }
-
- // ...otherwise return the cost matrix.
- return m;
+PBQP::Graph::NodeItr PBQPRAProblem::getNodeForVReg(unsigned vreg) const {
+ VReg2Node::const_iterator nodeItr = vreg2Node.find(vreg);
+ assert(nodeItr != vreg2Node.end() && "No node for vreg.");
+ return nodeItr->second;
+
}
-template <typename RegContainer>
-PBQP::Matrix* PBQPRegAlloc::buildCoalescingMatrix(
- const RegContainer &allowed1, const RegContainer &allowed2,
- PBQP::PBQPNum cBenefit) const {
-
- typedef typename RegContainer::const_iterator RegContainerIterator;
-
- // Construct a PBQP Matrix representing the benefits of coalescing. As with
- // interference matrices the rows and columns represent allowed registers
- // for the LiveIntervals which are (potentially) to be coalesced. The amount
- // -cBenefit will be placed in any element representing the same register
- // for both intervals.
- PBQP::Matrix *m =
- new PBQP::Matrix(allowed1.size() + 1, allowed2.size() + 1, 0);
-
- // Reset costs to zero.
- m->reset(0);
-
- // Assume the matrix is zero till proven otherwise. Zero matrices will be
- // optimized away as in the interference case.
- bool isZeroMatrix = true;
-
- // Row index. Starts at 1, since the 0th row is for the spill option, which
- // is always zero.
- unsigned ri = 1;
-
- // Iterate over the allowed sets, insert coalescing benefits where
- // appropriate.
- for (RegContainerIterator a1Itr = allowed1.begin(), a1End = allowed1.end();
- a1Itr != a1End; ++a1Itr) {
-
- // Column index, starts at 1 as for row index.
- unsigned ci = 1;
- unsigned reg1 = *a1Itr;
-
- for (RegContainerIterator a2Itr = allowed2.begin(), a2End = allowed2.end();
- a2Itr != a2End; ++a2Itr) {
-
- // If the row and column represent the same register insert a beneficial
- // cost to preference this allocation - it would allow us to eliminate a
- // move instruction.
- if (reg1 == *a2Itr) {
- (*m)[ri][ci] = -cBenefit;
- isZeroMatrix = false;
- }
-
- ++ci;
- }
-
- ++ri;
- }
-
- // If this turns out to be a zero matrix...
- if (isZeroMatrix) {
- // ...free it and return null.
- delete m;
- return 0;
- }
-
- return m;
+const PBQPRAProblem::AllowedSet&
+ PBQPRAProblem::getAllowedSet(unsigned vreg) const {
+ AllowedSetMap::const_iterator allowedSetItr = allowedSets.find(vreg);
+ assert(allowedSetItr != allowedSets.end() && "No pregs for vreg.");
+ const AllowedSet &allowedSet = allowedSetItr->second;
+ return allowedSet;
}
-PBQPRegAlloc::CoalesceMap PBQPRegAlloc::findCoalesces() {
-
- typedef MachineFunction::const_iterator MFIterator;
- typedef MachineBasicBlock::const_iterator MBBIterator;
- typedef LiveInterval::const_vni_iterator VNIIterator;
+unsigned PBQPRAProblem::getPRegForOption(unsigned vreg, unsigned option) const {
+ assert(isPRegOption(vreg, option) && "Not a preg option.");
- CoalesceMap coalescesFound;
+ const AllowedSet& allowedSet = getAllowedSet(vreg);
+ assert(option <= allowedSet.size() && "Option outside allowed set.");
+ return allowedSet[option - 1];
+}
- // To find coalesces we need to iterate over the function looking for
- // copy instructions.
- for (MFIterator bbItr = mf->begin(), bbEnd = mf->end();
- bbItr != bbEnd; ++bbItr) {
+std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
+ const LiveIntervals *lis,
+ const MachineLoopInfo *loopInfo,
+ const RegSet &vregs) {
- const MachineBasicBlock *mbb = &*bbItr;
+ typedef std::vector<const LiveInterval*> LIVector;
- for (MBBIterator iItr = mbb->begin(), iEnd = mbb->end();
- iItr != iEnd; ++iItr) {
+ MachineRegisterInfo *mri = &mf->getRegInfo();
+ const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();
- const MachineInstr *instr = &*iItr;
+ std::auto_ptr<PBQPRAProblem> p(new PBQPRAProblem());
+ PBQP::Graph &g = p->getGraph();
+ RegSet pregs;
- // If this isn't a copy then continue to the next instruction.
- if (!instr->isCopy())
- continue;
-
- unsigned srcReg = instr->getOperand(1).getReg();
- unsigned dstReg = instr->getOperand(0).getReg();
+ // Collect the set of preg intervals, record that they're used in the MF.
+ for (LiveIntervals::const_iterator itr = lis->begin(), end = lis->end();
+ itr != end; ++itr) {
+ if (TargetRegisterInfo::isPhysicalRegister(itr->first)) {
+ pregs.insert(itr->first);
+ mri->setPhysRegUsed(itr->first);
+ }
+ }
- // If the registers are already the same our job is nice and easy.
- if (dstReg == srcReg)
- continue;
+ BitVector reservedRegs = tri->getReservedRegs(*mf);
+
+ // Iterate over vregs.
+ for (RegSet::const_iterator vregItr = vregs.begin(), vregEnd = vregs.end();
+ vregItr != vregEnd; ++vregItr) {
+ unsigned vreg = *vregItr;
+ const TargetRegisterClass *trc = mri->getRegClass(vreg);
+ const LiveInterval *vregLI = &lis->getInterval(vreg);
+
+ // Compute an initial allowed set for the current vreg.
+ typedef std::vector<unsigned> VRAllowed;
+ VRAllowed vrAllowed;
+ for (TargetRegisterClass::iterator aoItr = trc->allocation_order_begin(*mf),
+ aoEnd = trc->allocation_order_end(*mf);
+ aoItr != aoEnd; ++aoItr) {
+ unsigned preg = *aoItr;
+ if (!reservedRegs.test(preg)) {
+ vrAllowed.push_back(preg);
+ }
+ }
- bool srcRegIsPhysical = TargetRegisterInfo::isPhysicalRegister(srcReg),
- dstRegIsPhysical = TargetRegisterInfo::isPhysicalRegister(dstReg);
+ // Remove any physical registers which overlap.
+ for (RegSet::const_iterator pregItr = pregs.begin(),
+ pregEnd = pregs.end();
+ pregItr != pregEnd; ++pregItr) {
+ unsigned preg = *pregItr;
+ const LiveInterval *pregLI = &lis->getInterval(preg);
- // If both registers are physical then we can't coalesce.
- if (srcRegIsPhysical && dstRegIsPhysical)
+ if (pregLI->empty()) {
continue;
+ }
- // If it's a copy that includes two virtual register but the source and
- // destination classes differ then we can't coalesce.
- if (!srcRegIsPhysical && !dstRegIsPhysical &&
- mri->getRegClass(srcReg) != mri->getRegClass(dstReg))
+ if (!vregLI->overlaps(*pregLI)) {
continue;
-
- // If one is physical and one is virtual, check that the physical is
- // allocatable in the class of the virtual.
- if (srcRegIsPhysical && !dstRegIsPhysical) {
- const TargetRegisterClass *dstRegClass = mri->getRegClass(dstReg);
- if (std::find(dstRegClass->allocation_order_begin(*mf),
- dstRegClass->allocation_order_end(*mf), srcReg) ==
- dstRegClass->allocation_order_end(*mf))
- continue;
}
- if (!srcRegIsPhysical && dstRegIsPhysical) {
- const TargetRegisterClass *srcRegClass = mri->getRegClass(srcReg);
- if (std::find(srcRegClass->allocation_order_begin(*mf),
- srcRegClass->allocation_order_end(*mf), dstReg) ==
- srcRegClass->allocation_order_end(*mf))
- continue;
- }
-
- // If we've made it here we have a copy with compatible register classes.
- // We can probably coalesce, but we need to consider overlap.
- const LiveInterval *srcLI = &lis->getInterval(srcReg),
- *dstLI = &lis->getInterval(dstReg);
- if (srcLI->overlaps(*dstLI)) {
- // Even in the case of an overlap we might still be able to coalesce,
- // but we need to make sure that no definition of either range occurs
- // while the other range is live.
+ // Remove the register from the allowed set.
+ VRAllowed::iterator eraseItr =
+ std::find(vrAllowed.begin(), vrAllowed.end(), preg);
- // Otherwise start by assuming we're ok.
- bool badDef = false;
-
- // Test all defs of the source range.
- for (VNIIterator
- vniItr = srcLI->vni_begin(), vniEnd = srcLI->vni_end();
- vniItr != vniEnd; ++vniItr) {
+ if (eraseItr != vrAllowed.end()) {
+ vrAllowed.erase(eraseItr);
+ }
- // If we find a poorly defined def we err on the side of caution.
- if (!(*vniItr)->def.isValid()) {
- badDef = true;
- break;
- }
+ // Also remove any aliases.
+ const unsigned *aliasItr = tri->getAliasSet(preg);
+ if (aliasItr != 0) {
+ for (; *aliasItr != 0; ++aliasItr) {
+ VRAllowed::iterator eraseItr =
+ std::find(vrAllowed.begin(), vrAllowed.end(), *aliasItr);
- // If we find a def that kills the coalescing opportunity then
- // record it and break from the loop.
- if (dstLI->liveAt((*vniItr)->def)) {
- badDef = true;
- break;
+ if (eraseItr != vrAllowed.end()) {
+ vrAllowed.erase(eraseItr);
}
}
+ }
+ }
- // If we have a bad def give up, continue to the next instruction.
- if (badDef)
- continue;
-
- // Otherwise test definitions of the destination range.
- for (VNIIterator
- vniItr = dstLI->vni_begin(), vniEnd = dstLI->vni_end();
- vniItr != vniEnd; ++vniItr) {
+ // Construct the node.
+ PBQP::Graph::NodeItr node =
+ g.addNode(PBQP::Vector(vrAllowed.size() + 1, 0));
- // We want to make sure we skip the copy instruction itself.
- if ((*vniItr)->getCopy() == instr)
- continue;
+ // Record the mapping and allowed set in the problem.
+ p->recordVReg(vreg, node, vrAllowed.begin(), vrAllowed.end());
- if (!(*vniItr)->def.isValid()) {
- badDef = true;
- break;
- }
+ PBQP::PBQPNum spillCost = (vregLI->weight != 0.0) ?
+ vregLI->weight : std::numeric_limits<PBQP::PBQPNum>::min();
- if (srcLI->liveAt((*vniItr)->def)) {
- badDef = true;
- break;
- }
- }
+ addSpillCosts(g.getNodeCosts(node), spillCost);
+ }
- // As before a bad def we give up and continue to the next instr.
- if (badDef)
- continue;
+ for (RegSet::const_iterator vr1Itr = vregs.begin(), vrEnd = vregs.end();
+ vr1Itr != vrEnd; ++vr1Itr) {
+ unsigned vr1 = *vr1Itr;
+ const LiveInterval &l1 = lis->getInterval(vr1);
+ const PBQPRAProblem::AllowedSet &vr1Allowed = p->getAllowedSet(vr1);
+
+ for (RegSet::const_iterator vr2Itr = llvm::next(vr1Itr);
+ vr2Itr != vrEnd; ++vr2Itr) {
+ unsigned vr2 = *vr2Itr;
+ const LiveInterval &l2 = lis->getInterval(vr2);
+ const PBQPRAProblem::AllowedSet &vr2Allowed = p->getAllowedSet(vr2);
+
+ assert(!l2.empty() && "Empty interval in vreg set?");
+ if (l1.overlaps(l2)) {
+ PBQP::Graph::EdgeItr edge =
+ g.addEdge(p->getNodeForVReg(vr1), p->getNodeForVReg(vr2),
+ PBQP::Matrix(vr1Allowed.size()+1, vr2Allowed.size()+1, 0));
+
+ addInterferenceCosts(g.getEdgeCosts(edge), vr1Allowed, vr2Allowed, tri);
}
-
- // If we make it to here then either the ranges didn't overlap, or they
- // did, but none of their definitions would prevent us from coalescing.
- // We're good to go with the coalesce.
-
- float cBenefit = std::pow(10.0f, (float)loopInfo->getLoopDepth(mbb)) / 5.0;
-
- coalescesFound[RegPair(srcReg, dstReg)] = cBenefit;
- coalescesFound[RegPair(dstReg, srcReg)] = cBenefit;
}
-
}
- return coalescesFound;
+ return p;
}
-void PBQPRegAlloc::findVRegIntervalsToAlloc() {
-
- // Iterate over all live ranges.
- for (LiveIntervals::iterator itr = lis->begin(), end = lis->end();
- itr != end; ++itr) {
-
- // Ignore physical ones.
- if (TargetRegisterInfo::isPhysicalRegister(itr->first))
- continue;
-
- LiveInterval *li = itr->second;
-
- // If this live interval is non-empty we will use pbqp to allocate it.
- // Empty intervals we allocate in a simple post-processing stage in
- // finalizeAlloc.
- if (!li->empty()) {
- vregIntervalsToAlloc.insert(li);
- }
- else {
- emptyVRegIntervals.insert(li);
- }
- }
+void PBQPBuilder::addSpillCosts(PBQP::Vector &costVec,
+ PBQP::PBQPNum spillCost) {
+ costVec[0] = spillCost;
}
-PBQP::Graph PBQPRegAlloc::constructPBQPProblem() {
-
- typedef std::vector<const LiveInterval*> LIVector;
- typedef std::vector<unsigned> RegVector;
+void PBQPBuilder::addInterferenceCosts(
+ PBQP::Matrix &costMat,
+ const PBQPRAProblem::AllowedSet &vr1Allowed,
+ const PBQPRAProblem::AllowedSet &vr2Allowed,
+ const TargetRegisterInfo *tri) {
+ assert(costMat.getRows() == vr1Allowed.size() + 1 && "Matrix height mismatch.");
+ assert(costMat.getCols() == vr2Allowed.size() + 1 && "Matrix width mismatch.");
- // This will store the physical intervals for easy reference.
- LIVector physIntervals;
+ for (unsigned i = 0; i != vr1Allowed.size(); ++i) {
+ unsigned preg1 = vr1Allowed[i];
- // Start by clearing the old node <-> live interval mappings & allowed sets
- li2Node.clear();
- node2LI.clear();
- allowedSets.clear();
-
- // Populate physIntervals, update preg use:
- for (LiveIntervals::iterator itr = lis->begin(), end = lis->end();
- itr != end; ++itr) {
+ for (unsigned j = 0; j != vr2Allowed.size(); ++j) {
+ unsigned preg2 = vr2Allowed[j];
- if (TargetRegisterInfo::isPhysicalRegister(itr->first)) {
- physIntervals.push_back(itr->second);
- mri->setPhysRegUsed(itr->second->reg);
+ if (tri->regsOverlap(preg1, preg2)) {
+ costMat[i + 1][j + 1] = std::numeric_limits<PBQP::PBQPNum>::infinity();
+ }
}
}
+}
- // Iterate over vreg intervals, construct live interval <-> node number
- // mappings.
- for (LiveIntervalSet::const_iterator
- itr = vregIntervalsToAlloc.begin(), end = vregIntervalsToAlloc.end();
- itr != end; ++itr) {
- const LiveInterval *li = *itr;
-
- li2Node[li] = node2LI.size();
- node2LI.push_back(li);
- }
-
- // Get the set of potential coalesces.
- CoalesceMap coalesces;
-
- if (pbqpCoalescing) {
- coalesces = findCoalesces();
- }
-
- // Construct a PBQP solver for this problem
- PBQP::Graph problem;
- problemNodes.resize(vregIntervalsToAlloc.size());
-
- // Resize allowedSets container appropriately.
- allowedSets.resize(vregIntervalsToAlloc.size());
-
- BitVector ReservedRegs = tri->getReservedRegs(*mf);
-
- // Iterate over virtual register intervals to compute allowed sets...
- for (unsigned node = 0; node < node2LI.size(); ++node) {
-
- // Grab pointers to the interval and its register class.
- const LiveInterval *li = node2LI[node];
- const TargetRegisterClass *liRC = mri->getRegClass(li->reg);
+std::auto_ptr<PBQPRAProblem> PBQPBuilderWithCoalescing::build(
+ MachineFunction *mf,
+ const LiveIntervals *lis,
+ const MachineLoopInfo *loopInfo,
+ const RegSet &vregs) {
- // Start by assuming all allocable registers in the class are allowed...
- RegVector liAllowed;
- TargetRegisterClass::iterator aob = liRC->allocation_order_begin(*mf);
- TargetRegisterClass::iterator aoe = liRC->allocation_order_end(*mf);
- for (TargetRegisterClass::iterator it = aob; it != aoe; ++it)
- if (!ReservedRegs.test(*it))
- liAllowed.push_back(*it);
+ std::auto_ptr<PBQPRAProblem> p = PBQPBuilder::build(mf, lis, loopInfo, vregs);
+ PBQP::Graph &g = p->getGraph();
- // Eliminate the physical registers which overlap with this range, along
- // with all their aliases.
- for (LIVector::iterator pItr = physIntervals.begin(),
- pEnd = physIntervals.end(); pItr != pEnd; ++pItr) {
+ const TargetMachine &tm = mf->getTarget();
+ CoalescerPair cp(*tm.getInstrInfo(), *tm.getRegisterInfo());
- if (!li->overlaps(**pItr))
- continue;
+ // Scan the machine function and add a coalescing cost whenever CoalescerPair
+ // gives the Ok.
+ for (MachineFunction::const_iterator mbbItr = mf->begin(),
+ mbbEnd = mf->end();
+ mbbItr != mbbEnd; ++mbbItr) {
+ const MachineBasicBlock *mbb = &*mbbItr;
- unsigned pReg = (*pItr)->reg;
-
- // If we get here then the live intervals overlap, but we're still ok
- // if they're coalescable.
- if (coalesces.find(RegPair(li->reg, pReg)) != coalesces.end())
- continue;
+ for (MachineBasicBlock::const_iterator miItr = mbb->begin(),
+ miEnd = mbb->end();
+ miItr != miEnd; ++miItr) {
+ const MachineInstr *mi = &*miItr;
- // If we get here then we have a genuine exclusion.
+ if (!cp.setRegisters(mi)) {
+ continue; // Not coalescable.
+ }
- // Remove the overlapping reg...
- RegVector::iterator eraseItr =
- std::find(liAllowed.begin(), liAllowed.end(), pReg);
+ if (cp.getSrcReg() == cp.getDstReg()) {
+ continue; // Already coalesced.
+ }
- if (eraseItr != liAllowed.end())
- liAllowed.erase(eraseItr);
+ unsigned dst = cp.getDstReg(),
+ src = cp.getSrcReg();
- const unsigned *aliasItr = tri->getAliasSet(pReg);
+ const float copyFactor = 0.5; // Cost of copy relative to load. Current
+ // value plucked randomly out of the air.
+
+ PBQP::PBQPNum cBenefit =
+ copyFactor * LiveIntervals::getSpillWeight(false, true,
+ loopInfo->getLoopDepth(mbb));
- if (aliasItr != 0) {
- // ...and its aliases.
- for (; *aliasItr != 0; ++aliasItr) {
- RegVector::iterator eraseItr =
- std::find(liAllowed.begin(), liAllowed.end(), *aliasItr);
+ if (cp.isPhys()) {
+ if (!lis->isAllocatable(dst)) {
+ continue;
+ }
- if (eraseItr != liAllowed.end()) {
- liAllowed.erase(eraseItr);
+ const PBQPRAProblem::AllowedSet &allowed = p->getAllowedSet(src);
+ unsigned pregOpt = 0;
+ while (pregOpt < allowed.size() && allowed[pregOpt] != dst) {
+ ++pregOpt;
+ }
+ if (pregOpt < allowed.size()) {
+ ++pregOpt; // +1 to account for spill option.
+ PBQP::Graph::NodeItr node = p->getNodeForVReg(src);
+ addPhysRegCoalesce(g.getNodeCosts(node), pregOpt, cBenefit);
+ }
+ } else {
+ const PBQPRAProblem::AllowedSet *allowed1 = &p->getAllowedSet(dst);
+ const PBQPRAProblem::AllowedSet *allowed2 = &p->getAllowedSet(src);
+ PBQP::Graph::NodeItr node1 = p->getNodeForVReg(dst);
+ PBQP::Graph::NodeItr node2 = p->getNodeForVReg(src);
+ PBQP::Graph::EdgeItr edge = g.findEdge(node1, node2);
+ if (edge == g.edgesEnd()) {
+ edge = g.addEdge(node1, node2, PBQP::Matrix(allowed1->size() + 1,
+ allowed2->size() + 1,
+ 0));
+ } else {
+ if (g.getEdgeNode1(edge) == node2) {
+ std::swap(node1, node2);
+ std::swap(allowed1, allowed2);
}
}
+
+ addVirtRegCoalesce(g.getEdgeCosts(edge), *allowed1, *allowed2,
+ cBenefit);
}
}
+ }
- // Copy the allowed set into a member vector for use when constructing cost
- // vectors & matrices, and mapping PBQP solutions back to assignments.
- allowedSets[node] = AllowedSet(liAllowed.begin(), liAllowed.end());
+ return p;
+}
- // Set the spill cost to the interval weight, or epsilon if the
- // interval weight is zero
- PBQP::PBQPNum spillCost = (li->weight != 0.0) ?
- li->weight : std::numeric_limits<PBQP::PBQPNum>::min();
+void PBQPBuilderWithCoalescing::addPhysRegCoalesce(PBQP::Vector &costVec,
+ unsigned pregOption,
+ PBQP::PBQPNum benefit) {
+ costVec[pregOption] += -benefit;
+}
- // Build a cost vector for this interval.
- problemNodes[node] =
- problem.addNode(
- buildCostVector(li->reg, allowedSets[node], coalesces, spillCost));
+void PBQPBuilderWithCoalescing::addVirtRegCoalesce(
+ PBQP::Matrix &costMat,
+ const PBQPRAProblem::AllowedSet &vr1Allowed,
+ const PBQPRAProblem::AllowedSet &vr2Allowed,
+ PBQP::PBQPNum benefit) {
- }
+ assert(costMat.getRows() == vr1Allowed.size() + 1 && "Size mismatch.");
+ assert(costMat.getCols() == vr2Allowed.size() + 1 && "Size mismatch.");
+ for (unsigned i = 0; i != vr1Allowed.size(); ++i) {
+ unsigned preg1 = vr1Allowed[i];
+ for (unsigned j = 0; j != vr2Allowed.size(); ++j) {
+ unsigned preg2 = vr2Allowed[j];
+
+ if (preg1 == preg2) {
+ costMat[i + 1][j + 1] += -benefit;
+ }
+ }
+ }
+}
- // Now add the cost matrices...
- for (unsigned node1 = 0; node1 < node2LI.size(); ++node1) {
- const LiveInterval *li = node2LI[node1];
- // Test for live range overlaps and insert interference matrices.
- for (unsigned node2 = node1 + 1; node2 < node2LI.size(); ++node2) {
- const LiveInterval *li2 = node2LI[node2];
+void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
+ au.addRequired<SlotIndexes>();
+ au.addPreserved<SlotIndexes>();
+ au.addRequired<LiveIntervals>();
+ //au.addRequiredID(SplitCriticalEdgesID);
+ au.addRequired<RegisterCoalescer>();
+ au.addRequired<CalculateSpillWeights>();
+ au.addRequired<LiveStacks>();
+ au.addPreserved<LiveStacks>();
+ au.addRequired<MachineLoopInfo>();
+ au.addPreserved<MachineLoopInfo>();
+ if (pbqpPreSplitting)
+ au.addRequired<LoopSplitter>();
+ au.addRequired<VirtRegMap>();
+ au.addRequired<RenderMachineFunction>();
+ MachineFunctionPass::getAnalysisUsage(au);
+}
- CoalesceMap::const_iterator cmItr =
- coalesces.find(RegPair(li->reg, li2->reg));
+void RegAllocPBQP::findVRegIntervalsToAlloc() {
- PBQP::Matrix *m = 0;
+ // Iterate over all live ranges.
+ for (LiveIntervals::iterator itr = lis->begin(), end = lis->end();
+ itr != end; ++itr) {
- if (cmItr != coalesces.end()) {
- m = buildCoalescingMatrix(allowedSets[node1], allowedSets[node2],
- cmItr->second);
- }
- else if (li->overlaps(*li2)) {
- m = buildInterferenceMatrix(allowedSets[node1], allowedSets[node2]);
- }
+ // Ignore physical ones.
+ if (TargetRegisterInfo::isPhysicalRegister(itr->first))
+ continue;
- if (m != 0) {
- problem.addEdge(problemNodes[node1],
- problemNodes[node2],
- *m);
+ LiveInterval *li = itr->second;
- delete m;
- }
+ // If this live interval is non-empty we will use pbqp to allocate it.
+ // Empty intervals we allocate in a simple post-processing stage in
+ // finalizeAlloc.
+ if (!li->empty()) {
+ vregsToAlloc.insert(li->reg);
+ } else {
+ emptyIntervalVRegs.insert(li->reg);
}
}
-
- assert(problem.getNumNodes() == allowedSets.size());
-/*
- std::cerr << "Allocating for " << problem.getNumNodes() << " nodes, "
- << problem.getNumEdges() << " edges.\n";
-
- problem.printDot(std::cerr);
-*/
- // We're done, PBQP problem constructed - return it.
- return problem;
}
-void PBQPRegAlloc::addStackInterval(const LiveInterval *spilled,
+void RegAllocPBQP::addStackInterval(const LiveInterval *spilled,
MachineRegisterInfo* mri) {
int stackSlot = vrm->getStackSlot(spilled->reg);
- if (stackSlot == VirtRegMap::NO_STACK_SLOT)
+ if (stackSlot == VirtRegMap::NO_STACK_SLOT) {
return;
+ }
const TargetRegisterClass *RC = mri->getRegClass(spilled->reg);
LiveInterval &stackInterval = lss->getOrCreateInterval(stackSlot, RC);
VNInfo *vni;
- if (stackInterval.getNumValNums() != 0)
+ if (stackInterval.getNumValNums() != 0) {
vni = stackInterval.getValNumInfo(0);
- else
+ } else {
vni = stackInterval.getNextValue(
- SlotIndex(), 0, false, lss->getVNInfoAllocator());
+ SlotIndex(), 0, lss->getVNInfoAllocator());
+ }
LiveInterval &rhsInterval = lis->getInterval(spilled->reg);
stackInterval.MergeRangesInAsValue(rhsInterval, vni);
}
-bool PBQPRegAlloc::mapPBQPToRegAlloc(const PBQP::Solution &solution) {
-
+bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAProblem &problem,
+ const PBQP::Solution &solution) {
// Set to true if we have any spills
bool anotherRoundNeeded = false;
// Clear the existing allocation.
vrm->clearAllVirt();
- // Iterate over the nodes mapping the PBQP solution to a register assignment.
- for (unsigned node = 0; node < node2LI.size(); ++node) {
- unsigned virtReg = node2LI[node]->reg,
- allocSelection = solution.getSelection(problemNodes[node]);
-
-
- // If the PBQP solution is non-zero it's a physical register...
- if (allocSelection != 0) {
- // Get the physical reg, subtracting 1 to account for the spill option.
- unsigned physReg = allowedSets[node][allocSelection - 1];
-
- DEBUG(dbgs() << "VREG " << virtReg << " -> "
- << tri->getName(physReg) << "\n");
-
- assert(physReg != 0);
-
- // Add to the virt reg map and update the used phys regs.
- vrm->assignVirt2Phys(virtReg, physReg);
- }
- // ...Otherwise it's a spill.
- else {
-
- // Make sure we ignore this virtual reg on the next round
- // of allocation
- vregIntervalsToAlloc.erase(&lis->getInterval(virtReg));
-
- // Insert spill ranges for this live range
- const LiveInterval *spillInterval = node2LI[node];
- double oldSpillWeight = spillInterval->weight;
+ const PBQP::Graph &g = problem.getGraph();
+ // Iterate over the nodes mapping the PBQP solution to a register
+ // assignment.
+ for (PBQP::Graph::ConstNodeItr node = g.nodesBegin(),
+ nodeEnd = g.nodesEnd();
+ node != nodeEnd; ++node) {
+ unsigned vreg = problem.getVRegForNode(node);
+ unsigned alloc = solution.getSelection(node);
+
+ if (problem.isPRegOption(vreg, alloc)) {
+ unsigned preg = problem.getPRegForOption(vreg, alloc);
+ DEBUG(dbgs() << "VREG " << vreg << " -> " << tri->getName(preg) << "\n");
+ assert(preg != 0 && "Invalid preg selected.");
+ vrm->assignVirt2Phys(vreg, preg);
+ } else if (problem.isSpillOption(vreg, alloc)) {
+ vregsToAlloc.erase(vreg);
+ const LiveInterval* spillInterval = &lis->getInterval(vreg);
+ double oldWeight = spillInterval->weight;
SmallVector<LiveInterval*, 8> spillIs;
rmf->rememberUseDefs(spillInterval);
std::vector<LiveInterval*> newSpills =
@@ -768,42 +541,42 @@ bool PBQPRegAlloc::mapPBQPToRegAlloc(const PBQP::Solution &solution) {
addStackInterval(spillInterval, mri);
rmf->rememberSpills(spillInterval, newSpills);
- (void) oldSpillWeight;
- DEBUG(dbgs() << "VREG " << virtReg << " -> SPILLED (Cost: "
- << oldSpillWeight << ", New vregs: ");
+ (void) oldWeight;
+ DEBUG(dbgs() << "VREG " << vreg << " -> SPILLED (Cost: "
+ << oldWeight << ", New vregs: ");
// Copy any newly inserted live intervals into the list of regs to
// allocate.
for (std::vector<LiveInterval*>::const_iterator
itr = newSpills.begin(), end = newSpills.end();
itr != end; ++itr) {
-
assert(!(*itr)->empty() && "Empty spill range.");
-
DEBUG(dbgs() << (*itr)->reg << " ");
-
- vregIntervalsToAlloc.insert(*itr);
+ vregsToAlloc.insert((*itr)->reg);
}
DEBUG(dbgs() << ")\n");
// We need another round if spill intervals were added.
anotherRoundNeeded |= !newSpills.empty();
+ } else {
+ assert(false && "Unknown allocation option.");
}
}
return !anotherRoundNeeded;
}
-void PBQPRegAlloc::finalizeAlloc() const {
+
+void RegAllocPBQP::finalizeAlloc() const {
typedef LiveIntervals::iterator LIIterator;
typedef LiveInterval::Ranges::const_iterator LRIterator;
// First allocate registers for the empty intervals.
- for (LiveIntervalSet::const_iterator
- itr = emptyVRegIntervals.begin(), end = emptyVRegIntervals.end();
+ for (RegSet::const_iterator
+ itr = emptyIntervalVRegs.begin(), end = emptyIntervalVRegs.end();
itr != end; ++itr) {
- LiveInterval *li = *itr;
+ LiveInterval *li = &lis->getInterval(*itr);
unsigned physReg = vrm->getRegAllocPref(li->reg);
@@ -828,11 +601,9 @@ void PBQPRegAlloc::finalizeAlloc() const {
// Get the physical register for this interval
if (TargetRegisterInfo::isPhysicalRegister(li->reg)) {
reg = li->reg;
- }
- else if (vrm->isAssignedReg(li->reg)) {
+ } else if (vrm->isAssignedReg(li->reg)) {
reg = vrm->getPhys(li->reg);
- }
- else {
+ } else {
// Ranges which are assigned a stack slot only are ignored.
continue;
}
@@ -849,7 +620,7 @@ void PBQPRegAlloc::finalizeAlloc() const {
// Find the set of basic blocks which this range is live into...
if (lis->findLiveInMBBs(lrItr->start, lrItr->end, liveInMBBs)) {
// And add the physreg for this interval to their live-in sets.
- for (unsigned i = 0; i < liveInMBBs.size(); ++i) {
+ for (unsigned i = 0; i != liveInMBBs.size(); ++i) {
if (liveInMBBs[i] != entryMBB) {
if (!liveInMBBs[i]->isLiveIn(reg)) {
liveInMBBs[i]->addLiveIn(reg);
@@ -863,7 +634,7 @@ void PBQPRegAlloc::finalizeAlloc() const {
}
-bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
+bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
mf = &MF;
tm = &mf->getTarget();
@@ -894,7 +665,7 @@ bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
findVRegIntervalsToAlloc();
// If there are non-empty intervals allocate them using pbqp.
- if (!vregIntervalsToAlloc.empty()) {
+ if (!vregsToAlloc.empty()) {
bool pbqpAllocComplete = false;
unsigned round = 0;
@@ -902,11 +673,13 @@ bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
while (!pbqpAllocComplete) {
DEBUG(dbgs() << " PBQP Regalloc round " << round << ":\n");
- PBQP::Graph problem = constructPBQPProblem();
+ std::auto_ptr<PBQPRAProblem> problem =
+ builder->build(mf, lis, loopInfo, vregsToAlloc);
PBQP::Solution solution =
- PBQP::HeuristicSolver<PBQP::Heuristics::Briggs>::solve(problem);
+ PBQP::HeuristicSolver<PBQP::Heuristics::Briggs>::solve(
+ problem->getGraph());
- pbqpAllocComplete = mapPBQPToRegAlloc(solution);
+ pbqpAllocComplete = mapPBQPToRegAlloc(*problem, solution);
++round;
}
@@ -917,12 +690,8 @@ bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
rmf->renderMachineFunction("After PBQP register allocation.", vrm);
- vregIntervalsToAlloc.clear();
- emptyVRegIntervals.clear();
- li2Node.clear();
- node2LI.clear();
- allowedSets.clear();
- problemNodes.clear();
+ vregsToAlloc.clear();
+ emptyIntervalVRegs.clear();
DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *vrm << "\n");
@@ -934,9 +703,18 @@ bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
return true;
}
-FunctionPass* llvm::createPBQPRegisterAllocator() {
- return new PBQPRegAlloc();
+FunctionPass* llvm::createPBQPRegisterAllocator(
+ std::auto_ptr<PBQPBuilder> builder) {
+ return new RegAllocPBQP(builder);
}
+FunctionPass* llvm::createDefaultPBQPRegisterAllocator() {
+ if (pbqpCoalescing) {
+ return createPBQPRegisterAllocator(
+ std::auto_ptr<PBQPBuilder>(new PBQPBuilderWithCoalescing()));
+ } // else
+ return createPBQPRegisterAllocator(
+ std::auto_ptr<PBQPBuilder>(new PBQPBuilder()));
+}
#undef DEBUG_TYPE
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 02b5539..407559a 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -24,7 +24,8 @@
using namespace llvm;
// Register the RegisterCoalescer interface, providing a nice name to refer to.
-static RegisterAnalysisGroup<RegisterCoalescer> Z("Register Coalescer");
+INITIALIZE_ANALYSIS_GROUP(RegisterCoalescer, "Register Coalescer",
+ SimpleRegisterCoalescing)
char RegisterCoalescer::ID = 0;
// RegisterCoalescer destructor: DO NOT move this to the header file
diff --git a/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp b/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp
index 93426ee..cbfd5a2 100644
--- a/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp
+++ b/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp
@@ -30,9 +30,14 @@
using namespace llvm;
char RenderMachineFunction::ID = 0;
-INITIALIZE_PASS(RenderMachineFunction, "rendermf",
+INITIALIZE_PASS_BEGIN(RenderMachineFunction, "rendermf",
"Render machine functions (and related info) to HTML pages",
- false, false);
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_END(RenderMachineFunction, "rendermf",
+ "Render machine functions (and related info) to HTML pages",
+ false, false)
static cl::opt<std::string>
outputFileSuffix("rmf-file-suffix",
@@ -458,14 +463,9 @@ namespace llvm {
liItr != liEnd; ++liItr) {
LiveInterval *li = liItr->second;
- const TargetRegisterClass *liTRC;
-
if (TargetRegisterInfo::isPhysicalRegister(li->reg))
continue;
- liTRC = mri->getRegClass(li->reg);
-
-
// For all ranges in the current interal.
for (LiveInterval::iterator lrItr = li->begin(),
lrEnd = li->end();
diff --git a/contrib/llvm/lib/CodeGen/RenderMachineFunction.h b/contrib/llvm/lib/CodeGen/RenderMachineFunction.h
index 8d56a82..8571992 100644
--- a/contrib/llvm/lib/CodeGen/RenderMachineFunction.h
+++ b/contrib/llvm/lib/CodeGen/RenderMachineFunction.h
@@ -202,7 +202,9 @@ namespace llvm {
public:
static char ID;
- RenderMachineFunction() : MachineFunctionPass(ID) {}
+ RenderMachineFunction() : MachineFunctionPass(ID) {
+ initializeRenderMachineFunctionPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &au) const;
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
index 7d39dc4..3388889 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
@@ -15,6 +15,7 @@
#define DEBUG_TYPE "pre-RA-sched"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -33,6 +34,12 @@ ScheduleDAG::ScheduleDAG(MachineFunction &mf)
ScheduleDAG::~ScheduleDAG() {}
+/// getInstrDesc helper to handle SDNodes.
+const TargetInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
+ if (!Node || !Node->isMachineOpcode()) return NULL;
+ return &TII->get(Node->getMachineOpcode());
+}
+
/// dump - dump the schedule.
void ScheduleDAG::dumpSchedule() const {
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
@@ -68,12 +75,12 @@ void ScheduleDAG::Run(MachineBasicBlock *bb,
/// addPred - This adds the specified edge as a pred of the current node if
/// not already. It also adds the current node as a successor of the
/// specified node.
-void SUnit::addPred(const SDep &D) {
+bool SUnit::addPred(const SDep &D) {
// If this node already has this depenence, don't add a redundant one.
for (SmallVector<SDep, 4>::const_iterator I = Preds.begin(), E = Preds.end();
I != E; ++I)
if (*I == D)
- return;
+ return false;
// Now add a corresponding succ to N.
SDep P = D;
P.setSUnit(this);
@@ -99,6 +106,7 @@ void SUnit::addPred(const SDep &D) {
this->setDepthDirty();
N->setHeightDirty();
}
+ return true;
}
/// removePred - This removes the specified edge as a pred of the current
@@ -278,6 +286,7 @@ void SUnit::dumpAll(const ScheduleDAG *G) const {
dbgs() << " # preds left : " << NumPredsLeft << "\n";
dbgs() << " # succs left : " << NumSuccsLeft << "\n";
+ dbgs() << " # rdefs left : " << NumRegDefsLeft << "\n";
dbgs() << " Latency : " << Latency << "\n";
dbgs() << " Depth : " << Depth << "\n";
dbgs() << " Height : " << Height << "\n";
@@ -492,7 +501,7 @@ void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
/// all nodes affected by the edge insertion. These nodes will later get new
/// topological indexes by means of the Shift method.
void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
- bool& HasLoop) {
+ bool &HasLoop) {
std::vector<const SUnit*> WorkList;
WorkList.reserve(SUnits.size());
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
index 0a2fb37..6b7a8c64 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
@@ -57,7 +57,7 @@ void ScheduleDAG::EmitPhysRegCopy(SUnit *SU,
assert(I->getReg() && "Unknown physical register!");
unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
- isNew = isNew; // Silence compiler warning.
+ (void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase)
.addReg(I->getReg());
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
index ea93dd5..f17023e 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -16,6 +16,7 @@
#include "ScheduleDAGInstrs.h"
#include "llvm/Operator.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -32,9 +33,9 @@ using namespace llvm;
ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo &mli,
const MachineDominatorTree &mdt)
- : ScheduleDAG(mf), MLI(mli), MDT(mdt), Defs(TRI->getNumRegs()),
- Uses(TRI->getNumRegs()), LoopRegs(MLI, MDT) {
- MFI = mf.getFrameInfo();
+ : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()),
+ InstrItins(mf.getTarget().getInstrItineraryData()),
+ Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()), LoopRegs(MLI, MDT) {
DbgValueVec.clear();
}
@@ -78,12 +79,12 @@ static const Value *getUnderlyingObjectFromInt(const Value *V) {
} while (1);
}
-/// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject
+/// getUnderlyingObject - This is a wrapper around GetUnderlyingObject
/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
static const Value *getUnderlyingObject(const Value *V) {
// First just call Value::getUnderlyingObject to let it do what it does.
do {
- V = V->getUnderlyingObject();
+ V = GetUnderlyingObject(V);
// If it found an inttoptr, use special code to continue climing.
if (Operator::getOpcode(V) != Instruction::IntToPtr)
break;
@@ -141,6 +142,46 @@ void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
}
}
+/// AddSchedBarrierDeps - Add dependencies from instructions in the current
+/// list of instructions being scheduled to scheduling barrier by adding
+/// the exit SU to the register defs and use list. This is because we want to
+/// make sure instructions which define registers that are either used by
+/// the terminator or are live-out are properly scheduled. This is
+/// especially important when the definition latency of the return value(s)
+/// are too high to be hidden by the branch or when the liveout registers
+/// used by instructions in the fallthrough block.
+void ScheduleDAGInstrs::AddSchedBarrierDeps() {
+ MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
+ ExitSU.setInstr(ExitMI);
+ bool AllDepKnown = ExitMI &&
+ (ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier());
+ if (ExitMI && AllDepKnown) {
+ // If it's a call or a barrier, add dependencies on the defs and uses of
+ // instruction.
+ for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = ExitMI->getOperand(i);
+ if (!MO.isReg() || MO.isDef()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+
+ assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
+ Uses[Reg].push_back(&ExitSU);
+ }
+ } else {
+ // For others, e.g. fallthrough, conditional branch, assume the exit
+ // uses all the registers that are livein to the successor blocks.
+ SmallSet<unsigned, 8> Seen;
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ SE = BB->succ_end(); SI != SE; ++SI)
+ for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
+ E = (*SI)->livein_end(); I != E; ++I) {
+ unsigned Reg = *I;
+ if (Seen.insert(Reg))
+ Uses[Reg].push_back(&ExitSU);
+ }
+ }
+}
+
void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// We'll be allocating one SUnit for each instruction, plus one for
// the region exit node.
@@ -175,6 +216,10 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// without emitting the info from the previous call.
DbgValueVec.clear();
+ // Model data dependencies between instructions being scheduled and the
+ // ExitSU.
+ AddSchedBarrierDeps();
+
// Walk the list of instructions, from bottom moving up.
for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
MII != MIE; --MII) {
@@ -194,6 +239,8 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
"Cannot schedule terminators or labels!");
// Create the SUnit for this MI.
SUnit *SU = NewSUnit(MI);
+ SU->isCall = TID.isCall();
+ SU->isCommutable = TID.isCommutable();
// Assign the Latency field of SU using target-provided information.
if (UnitLatencies)
@@ -228,6 +275,8 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1;
for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
SUnit *DefSU = DefList[i];
+ if (DefSU == &ExitSU)
+ continue;
if (DefSU != SU &&
(Kind != SDep::Output || !MO.isDead() ||
!DefSU->getInstr()->registerDefIsDead(Reg)))
@@ -237,6 +286,8 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
std::vector<SUnit *> &DefList = Defs[*Alias];
for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
SUnit *DefSU = DefList[i];
+ if (DefSU == &ExitSU)
+ continue;
if (DefSU != SU &&
(Kind != SDep::Output || !MO.isDead() ||
!DefSU->getInstr()->registerDefIsDead(*Alias)))
@@ -258,12 +309,14 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// TODO: Perhaps we should get rid of
// SpecialAddressLatency and just move this into
// adjustSchedDependency for the targets that care about it.
- if (SpecialAddressLatency != 0 && !UnitLatencies) {
+ if (SpecialAddressLatency != 0 && !UnitLatencies &&
+ UseSU != &ExitSU) {
MachineInstr *UseMI = UseSU->getInstr();
const TargetInstrDesc &UseTID = UseMI->getDesc();
int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
- if ((UseTID.mayLoad() || UseTID.mayStore()) &&
+ if (RegUseIndex >= 0 &&
+ (UseTID.mayLoad() || UseTID.mayStore()) &&
(unsigned)RegUseIndex < UseTID.getNumOperands() &&
UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
LDataLatency += SpecialAddressLatency;
@@ -357,7 +410,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// produce more precise dependence information.
#define STORE_LOAD_LATENCY 1
unsigned TrueMemOrderLatency = 0;
- if (TID.isCall() || TID.hasUnmodeledSideEffects() ||
+ if (TID.isCall() || MI->hasUnmodeledSideEffects() ||
(MI->hasVolatileMemoryRef() &&
(!TID.mayLoad() || !MI->isInvariantLoad(AA)))) {
// Be conservative with these and add dependencies on all memory
@@ -446,6 +499,14 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// Treat all other stores conservatively.
goto new_alias_chain;
}
+
+ if (!ExitSU.isPred(SU))
+ // Push store's up a bit to avoid them getting in between cmp
+ // and branches.
+ ExitSU.addPred(SDep(SU, SDep::Order, 0,
+ /*Reg=*/0, /*isNormalMemory=*/false,
+ /*isMustAlias=*/false,
+ /*isArtificial=*/true));
} else if (TID.mayLoad()) {
bool MayAlias = true;
TrueMemOrderLatency = 0;
@@ -498,23 +559,22 @@ void ScheduleDAGInstrs::FinishBlock() {
}
void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
- const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
-
// Compute the latency for the node.
- SU->Latency =
- InstrItins.getStageLatency(SU->getInstr()->getDesc().getSchedClass());
+ if (!InstrItins || InstrItins->isEmpty()) {
+ SU->Latency = 1;
- // Simplistic target-independent heuristic: assume that loads take
- // extra time.
- if (InstrItins.isEmpty())
+ // Simplistic target-independent heuristic: assume that loads take
+ // extra time.
if (SU->getInstr()->getDesc().mayLoad())
SU->Latency += 2;
+ } else {
+ SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr());
+ }
}
void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const {
- const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
- if (InstrItins.isEmpty())
+ if (!InstrItins || InstrItins->isEmpty())
return;
// For a data dependency with a known register...
@@ -528,14 +588,21 @@ void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
MachineInstr *DefMI = Def->getInstr();
int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
if (DefIdx != -1) {
- int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(),
- DefIdx);
- if (DefCycle >= 0) {
- MachineInstr *UseMI = Use->getInstr();
- const unsigned UseClass = UseMI->getDesc().getSchedClass();
-
- // For all uses of the register, calculate the maxmimum latency
- int Latency = -1;
+ const MachineOperand &MO = DefMI->getOperand(DefIdx);
+ if (MO.isReg() && MO.isImplicit() &&
+ DefIdx >= (int)DefMI->getDesc().getNumOperands()) {
+ // This is an implicit def, getOperandLatency() won't return the correct
+ // latency. e.g.
+ // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
+ // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
+ // What we want is to compute latency between def of %D6/%D7 and use of
+ // %Q3 instead.
+ DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI);
+ }
+ MachineInstr *UseMI = Use->getInstr();
+ // For all uses of the register, calculate the maxmimum latency
+ int Latency = -1;
+ if (UseMI) {
for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = UseMI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
@@ -544,15 +611,21 @@ void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
if (MOReg != Reg)
continue;
- int UseCycle = InstrItins.getOperandCycle(UseClass, i);
- if (UseCycle >= 0)
- Latency = std::max(Latency, DefCycle - UseCycle + 1);
+ int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx,
+ UseMI, i);
+ Latency = std::max(Latency, UseCycle);
}
-
- // If we found a latency, then replace the existing dependence latency.
- if (Latency >= 0)
- dep.setLatency(Latency);
+ } else {
+ // UseMI is null, then it must be a scheduling barrier.
+ if (!InstrItins || InstrItins->isEmpty())
+ return;
+ unsigned DefClass = DefMI->getDesc().getSchedClass();
+ Latency = InstrItins->getOperandCycle(DefClass, DefIdx);
}
+
+ // If we found a latency, then replace the existing dependence latency.
+ if (Latency >= 0)
+ dep.setLatency(Latency);
}
}
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h
index c8f543f..c878287 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h
@@ -101,6 +101,7 @@ namespace llvm {
const MachineLoopInfo &MLI;
const MachineDominatorTree &MDT;
const MachineFrameInfo *MFI;
+ const InstrItineraryData *InstrItins;
/// Defs, Uses - Remember where defs and uses of each physical register
/// are as we iterate upward through the instructions. This is allocated
@@ -163,6 +164,15 @@ namespace llvm {
/// input.
virtual void BuildSchedGraph(AliasAnalysis *AA);
+ /// AddSchedBarrierDeps - Add dependencies from instructions in the current
+ /// list of instructions being scheduled to scheduling barrier. We want to
+ /// make sure instructions which define registers that are either used by
+ /// the terminator or are live-out are properly scheduled. This is
+ /// especially important when the definition latency of the return value(s)
+ /// are too high to be hidden by the branch or when the liveout registers
+ /// used by instructions in the fallthrough block.
+ void AddSchedBarrierDeps();
+
/// ComputeLatency - Compute node latency.
///
virtual void ComputeLatency(SUnit *SU);
diff --git a/contrib/llvm/lib/CodeGen/PostRAHazardRecognizer.cpp b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
index cbde2b0..e6d7ded 100644
--- a/contrib/llvm/lib/CodeGen/PostRAHazardRecognizer.cpp
+++ b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
@@ -1,4 +1,4 @@
-//===----- PostRAHazardRecognizer.cpp - hazard recognizer -------- ---------===//
+//===----- ScoreboardHazardRecognizer.cpp - Scheduler Support -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,56 +7,81 @@
//
//===----------------------------------------------------------------------===//
//
-// This implements a hazard recognizer using the instructions itineraries
-// defined for the current target.
+// This file implements the ScoreboardHazardRecognizer class, which
+// encapsultes hazard-avoidance heuristics for scheduling, based on the
+// scheduling itineraries specified for the target.
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "post-RA-sched"
-#include "llvm/CodeGen/PostRAHazardRecognizer.h"
+#define DEBUG_TYPE ::llvm::ScoreboardHazardRecognizer::DebugType
+#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetInstrItineraries.h"
using namespace llvm;
-PostRAHazardRecognizer::
-PostRAHazardRecognizer(const InstrItineraryData &LItinData) :
- ScheduleHazardRecognizer(), ItinData(LItinData) {
+#ifndef NDEBUG
+const char *ScoreboardHazardRecognizer::DebugType = "";
+#endif
+
+ScoreboardHazardRecognizer::
+ScoreboardHazardRecognizer(const InstrItineraryData *II,
+ const ScheduleDAG *SchedDAG,
+ const char *ParentDebugType) :
+ ScheduleHazardRecognizer(), ItinData(II), DAG(SchedDAG), IssueWidth(0),
+ IssueCount(0) {
+
+#ifndef NDEBUG
+ DebugType = ParentDebugType;
+#endif
+
// Determine the maximum depth of any itinerary. This determines the
// depth of the scoreboard. We always make the scoreboard at least 1
// cycle deep to avoid dealing with the boundary condition.
unsigned ScoreboardDepth = 1;
- if (!ItinData.isEmpty()) {
+ if (ItinData && !ItinData->isEmpty()) {
+ IssueWidth = ItinData->IssueWidth;
+
for (unsigned idx = 0; ; ++idx) {
- if (ItinData.isEndMarker(idx))
+ if (ItinData->isEndMarker(idx))
break;
- const InstrStage *IS = ItinData.beginStage(idx);
- const InstrStage *E = ItinData.endStage(idx);
+ const InstrStage *IS = ItinData->beginStage(idx);
+ const InstrStage *E = ItinData->endStage(idx);
+ unsigned CurCycle = 0;
unsigned ItinDepth = 0;
- for (; IS != E; ++IS)
- ItinDepth += IS->getCycles();
+ for (; IS != E; ++IS) {
+ unsigned StageDepth = CurCycle + IS->getCycles();
+ if (ItinDepth < StageDepth) ItinDepth = StageDepth;
+ CurCycle += IS->getNextCycles();
+ }
- ScoreboardDepth = std::max(ScoreboardDepth, ItinDepth);
+ // Find the next power-of-2 >= ItinDepth
+ while (ItinDepth > ScoreboardDepth) {
+ ScoreboardDepth *= 2;
+ }
}
+ MaxLookAhead = ScoreboardDepth;
}
ReservedScoreboard.reset(ScoreboardDepth);
RequiredScoreboard.reset(ScoreboardDepth);
- DEBUG(dbgs() << "Using post-ra hazard recognizer: ScoreboardDepth = "
+ DEBUG(dbgs() << "Using scoreboard hazard recognizer: Depth = "
<< ScoreboardDepth << '\n');
}
-void PostRAHazardRecognizer::Reset() {
+void ScoreboardHazardRecognizer::Reset() {
+ IssueCount = 0;
RequiredScoreboard.reset();
ReservedScoreboard.reset();
}
-void PostRAHazardRecognizer::ScoreBoard::dump() const {
+void ScoreboardHazardRecognizer::Scoreboard::dump() const {
dbgs() << "Scoreboard:\n";
unsigned last = Depth - 1;
@@ -72,24 +97,46 @@ void PostRAHazardRecognizer::ScoreBoard::dump() const {
}
}
+bool ScoreboardHazardRecognizer::atIssueLimit() const {
+ if (IssueWidth == 0)
+ return false;
+
+ return IssueCount == IssueWidth;
+}
+
ScheduleHazardRecognizer::HazardType
-PostRAHazardRecognizer::getHazardType(SUnit *SU) {
- if (ItinData.isEmpty())
+ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
+ if (!ItinData || ItinData->isEmpty())
return NoHazard;
- unsigned cycle = 0;
+ // Note that stalls will be negative for bottom-up scheduling.
+ int cycle = Stalls;
// Use the itinerary for the underlying instruction to check for
// free FU's in the scoreboard at the appropriate future cycles.
- unsigned idx = SU->getInstr()->getDesc().getSchedClass();
- for (const InstrStage *IS = ItinData.beginStage(idx),
- *E = ItinData.endStage(idx); IS != E; ++IS) {
+
+ const TargetInstrDesc *TID = DAG->getInstrDesc(SU);
+ if (TID == NULL) {
+ // Don't check hazards for non-machineinstr Nodes.
+ return NoHazard;
+ }
+ unsigned idx = TID->getSchedClass();
+ for (const InstrStage *IS = ItinData->beginStage(idx),
+ *E = ItinData->endStage(idx); IS != E; ++IS) {
// We must find one of the stage's units free for every cycle the
// stage is occupied. FIXME it would be more accurate to find the
// same unit free in all the cycles.
for (unsigned int i = 0; i < IS->getCycles(); ++i) {
- assert(((cycle + i) < RequiredScoreboard.getDepth()) &&
- "Scoreboard depth exceeded!");
+ int StageCycle = cycle + (int)i;
+ if (StageCycle < 0)
+ continue;
+
+ if (StageCycle >= (int)RequiredScoreboard.getDepth()) {
+ assert((StageCycle - Stalls) < (int)RequiredScoreboard.getDepth() &&
+ "Scoreboard depth exceeded!");
+ // This stage was stalled beyond pipeline depth, so cannot conflict.
+ break;
+ }
unsigned freeUnits = IS->getUnits();
switch (IS->getReservationKind()) {
@@ -97,18 +144,18 @@ PostRAHazardRecognizer::getHazardType(SUnit *SU) {
assert(0 && "Invalid FU reservation");
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
- freeUnits &= ~ReservedScoreboard[cycle + i];
+ freeUnits &= ~ReservedScoreboard[StageCycle];
// FALLTHROUGH
case InstrStage::Reserved:
// Reserved FUs can conflict only with required ones.
- freeUnits &= ~RequiredScoreboard[cycle + i];
+ freeUnits &= ~RequiredScoreboard[StageCycle];
break;
}
if (!freeUnits) {
DEBUG(dbgs() << "*** Hazard in cycle " << (cycle + i) << ", ");
DEBUG(dbgs() << "SU(" << SU->NodeNum << "): ");
- DEBUG(SU->getInstr()->dump());
+ DEBUG(DAG->dumpNode(SU));
return Hazard;
}
}
@@ -120,17 +167,24 @@ PostRAHazardRecognizer::getHazardType(SUnit *SU) {
return NoHazard;
}
-void PostRAHazardRecognizer::EmitInstruction(SUnit *SU) {
- if (ItinData.isEmpty())
+void ScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
+ if (!ItinData || ItinData->isEmpty())
return;
- unsigned cycle = 0;
-
// Use the itinerary for the underlying instruction to reserve FU's
// in the scoreboard at the appropriate future cycles.
- unsigned idx = SU->getInstr()->getDesc().getSchedClass();
- for (const InstrStage *IS = ItinData.beginStage(idx),
- *E = ItinData.endStage(idx); IS != E; ++IS) {
+ const TargetInstrDesc *TID = DAG->getInstrDesc(SU);
+ assert(TID && "The scheduler must filter non-machineinstrs");
+ if (DAG->TII->isZeroCost(TID->Opcode))
+ return;
+
+ ++IssueCount;
+
+ unsigned cycle = 0;
+
+ unsigned idx = TID->getSchedClass();
+ for (const InstrStage *IS = ItinData->beginStage(idx),
+ *E = ItinData->endStage(idx); IS != E; ++IS) {
// We must reserve one of the stage's units for every cycle the
// stage is occupied. FIXME it would be more accurate to reserve
// the same unit free in all the cycles.
@@ -174,7 +228,16 @@ void PostRAHazardRecognizer::EmitInstruction(SUnit *SU) {
DEBUG(RequiredScoreboard.dump());
}
-void PostRAHazardRecognizer::AdvanceCycle() {
+void ScoreboardHazardRecognizer::AdvanceCycle() {
+ IssueCount = 0;
ReservedScoreboard[0] = 0; ReservedScoreboard.advance();
RequiredScoreboard[0] = 0; RequiredScoreboard.advance();
}
+
+void ScoreboardHazardRecognizer::RecedeCycle() {
+ IssueCount = 0;
+ ReservedScoreboard[ReservedScoreboard.getDepth()-1] = 0;
+ ReservedScoreboard.recede();
+ RequiredScoreboard[RequiredScoreboard.getDepth()-1] = 0;
+ RequiredScoreboard.recede();
+}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index c9c4d91..9035602 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -25,7 +25,6 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -43,6 +42,7 @@ STATISTIC(NodesCombined , "Number of dag nodes combined");
STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
STATISTIC(OpsNarrowed , "Number of load/op/store narrowed");
+STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int");
namespace {
static cl::opt<bool>
@@ -185,7 +185,7 @@ namespace {
SDValue visitANY_EXTEND(SDNode *N);
SDValue visitSIGN_EXTEND_INREG(SDNode *N);
SDValue visitTRUNCATE(SDNode *N);
- SDValue visitBIT_CONVERT(SDNode *N);
+ SDValue visitBITCAST(SDNode *N);
SDValue visitBUILD_PAIR(SDNode *N);
SDValue visitFADD(SDNode *N);
SDValue visitFSUB(SDNode *N);
@@ -229,12 +229,13 @@ namespace {
SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp);
SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
- SDValue ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, EVT);
+ SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
SDValue BuildSDIV(SDNode *N);
SDValue BuildUDIV(SDNode *N);
SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL);
SDValue ReduceLoadWidth(SDNode *N);
SDValue ReduceLoadOpStoreWidth(SDNode *N);
+ SDValue TransformFPLoadStorePair(SDNode *N);
SDValue GetDemandedBits(SDValue V, const APInt &Mask);
@@ -248,16 +249,19 @@ namespace {
bool isAlias(SDValue Ptr1, int64_t Size1,
const Value *SrcValue1, int SrcValueOffset1,
unsigned SrcValueAlign1,
+ const MDNode *TBAAInfo1,
SDValue Ptr2, int64_t Size2,
const Value *SrcValue2, int SrcValueOffset2,
- unsigned SrcValueAlign2) const;
+ unsigned SrcValueAlign2,
+ const MDNode *TBAAInfo2) const;
/// FindAliasInfo - Extracts the relevant alias information from the memory
/// node. Returns true if the operand was a load.
bool FindAliasInfo(SDNode *N,
SDValue &Ptr, int64_t &Size,
const Value *&SrcValue, int &SrcValueOffset,
- unsigned &SrcValueAlignment) const;
+ unsigned &SrcValueAlignment,
+ const MDNode *&TBAAInfo) const;
/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes,
/// looking for a better chain (aliasing node.)
@@ -270,15 +274,15 @@ namespace {
/// Run - runs the dag combiner on all nodes in the work list
void Run(CombineLevel AtLevel);
-
+
SelectionDAG &getDAG() const { return DAG; }
-
+
/// getShiftAmountTy - Returns a type large enough to hold any valid
/// shift amount - before type legalization these can be huge.
EVT getShiftAmountTy() {
return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
}
-
+
/// isTypeLegal - This method returns true if we are running before type
/// legalization or if the specified VT is legal.
bool isTypeLegal(const EVT &VT) {
@@ -631,7 +635,7 @@ bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
// Replace the old value with the new one.
++NodesCombined;
- DEBUG(dbgs() << "\nReplacing.2 ";
+ DEBUG(dbgs() << "\nReplacing.2 ";
TLO.Old.getNode()->dump(&DAG);
dbgs() << "\nWith: ";
TLO.New.getNode()->dump(&DAG);
@@ -666,12 +670,13 @@ SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
EVT MemVT = LD->getMemoryVT();
ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
- ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD : ISD::EXTLOAD)
+ ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
+ : ISD::EXTLOAD)
: LD->getExtensionType();
Replace = true;
- return DAG.getExtLoad(ExtType, PVT, dl,
+ return DAG.getExtLoad(ExtType, dl, PVT,
LD->getChain(), LD->getBasePtr(),
- LD->getSrcValue(), LD->getSrcValueOffset(),
+ LD->getPointerInfo(),
MemVT, LD->isVolatile(),
LD->isNonTemporal(), LD->getAlignment());
}
@@ -691,7 +696,7 @@ SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
unsigned ExtOpc =
Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
return DAG.getNode(ExtOpc, dl, PVT, Op);
- }
+ }
}
if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
@@ -889,11 +894,12 @@ bool DAGCombiner::PromoteLoad(SDValue Op) {
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT MemVT = LD->getMemoryVT();
ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
- ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD : ISD::EXTLOAD)
+ ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
+ : ISD::EXTLOAD)
: LD->getExtensionType();
- SDValue NewLD = DAG.getExtLoad(ExtType, PVT, dl,
+ SDValue NewLD = DAG.getExtLoad(ExtType, dl, PVT,
LD->getChain(), LD->getBasePtr(),
- LD->getSrcValue(), LD->getSrcValueOffset(),
+ LD->getPointerInfo(),
MemVT, LD->isVolatile(),
LD->isNonTemporal(), LD->getAlignment());
SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, VT, NewLD);
@@ -975,7 +981,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
RV.getNode()->getOpcode() != ISD::DELETED_NODE &&
"Node was deleted but visit returned new node!");
- DEBUG(dbgs() << "\nReplacing.3 ";
+ DEBUG(dbgs() << "\nReplacing.3 ";
N->dump(&DAG);
dbgs() << "\nWith: ";
RV.getNode()->dump(&DAG);
@@ -1054,7 +1060,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
case ISD::TRUNCATE: return visitTRUNCATE(N);
- case ISD::BIT_CONVERT: return visitBIT_CONVERT(N);
+ case ISD::BITCAST: return visitBITCAST(N);
case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
case ISD::FADD: return visitFADD(N);
case ISD::FSUB: return visitFSUB(N);
@@ -1225,7 +1231,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
}
}
}
-
+
SDValue Result;
// If we've change things around then replace token factor.
@@ -1424,6 +1430,29 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
N0.getOperand(0).getOperand(1),
N0.getOperand(1)));
+ if (N1.getOpcode() == ISD::AND) {
+ SDValue AndOp0 = N1.getOperand(0);
+ ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1));
+ unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
+ unsigned DestBits = VT.getScalarType().getSizeInBits();
+
+ // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
+ // and similar xforms where the inner op is either ~0 or 0.
+ if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) {
+ DebugLoc DL = N->getDebugLoc();
+ return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0);
+ }
+ }
+
+ // add (sext i1), X -> sub X, (zext i1)
+ if (N0.getOpcode() == ISD::SIGN_EXTEND &&
+ N0.getOperand(0).getValueType() == MVT::i1 &&
+ !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) {
+ DebugLoc DL = N->getDebugLoc();
+ SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
+ return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt);
+ }
+
return SDValue();
}
@@ -1438,7 +1467,7 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
if (N->hasNUsesOfValue(0, 1))
return CombineTo(N, DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0),
DAG.getNode(ISD::CARRY_FALSE,
- N->getDebugLoc(), MVT::Flag));
+ N->getDebugLoc(), MVT::Glue));
// canonicalize constant to RHS.
if (N0C && !N1C)
@@ -1447,7 +1476,7 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
// fold (addc x, 0) -> x + no carry out
if (N1C && N1C->isNullValue())
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
- N->getDebugLoc(), MVT::Flag));
+ N->getDebugLoc(), MVT::Glue));
// fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
APInt LHSZero, LHSOne;
@@ -1464,7 +1493,7 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
(LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE,
- N->getDebugLoc(), MVT::Flag));
+ N->getDebugLoc(), MVT::Glue));
}
return SDValue();
@@ -1489,6 +1518,22 @@ SDValue DAGCombiner::visitADDE(SDNode *N) {
return SDValue();
}
+// Since it may not be valid to emit a fold to zero for vector initializers
+// check if we can before folding.
+static SDValue tryFoldToZero(DebugLoc DL, const TargetLowering &TLI, EVT VT,
+ SelectionDAG &DAG, bool LegalOperations) {
+ if (!VT.isVector()) {
+ return DAG.getConstant(0, VT);
+ } else if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) {
+ // Produce a vector of zeros.
+ SDValue El = DAG.getConstant(0, VT.getVectorElementType());
+ std::vector<SDValue> Ops(VT.getVectorNumElements(), El);
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, VT,
+ &Ops[0], Ops.size());
+ }
+ return SDValue();
+}
+
SDValue DAGCombiner::visitSUB(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -1503,8 +1548,9 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
}
// fold (sub x, x) -> 0
+ // FIXME: Refactor this and xor and other similar operations together.
if (N0 == N1)
- return DAG.getConstant(0, N->getValueType(0));
+ return tryFoldToZero(N->getDebugLoc(), TLI, VT, DAG, LegalOperations);
// fold (sub c1, c2) -> c1-c2
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::SUB, VT, N0C, N1C);
@@ -1515,6 +1561,9 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
// Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
if (N0C && N0C->isAllOnesValue())
return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0);
+ // fold A-(A-B) -> B
+ if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0))
+ return N1.getOperand(1);
// fold (A+B)-A -> B
if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
return N0.getOperand(1);
@@ -1897,6 +1946,7 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
EVT VT = N->getValueType(0);
+ DebugLoc DL = N->getDebugLoc();
// fold (mulhs x, 0) -> 0
if (N1C && N1C->isNullValue())
@@ -1910,6 +1960,22 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, VT);
+ // If the type twice as wide is legal, transform the mulhs to a wider multiply
+ // plus a shift.
+ if (VT.isSimple() && !VT.isVector()) {
+ MVT Simple = VT.getSimpleVT();
+ unsigned SimpleSize = Simple.getSizeInBits();
+ EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
+ if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
+ N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0);
+ N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
+ N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
+ N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
+ DAG.getConstant(SimpleSize, getShiftAmountTy()));
+ return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
+ }
+ }
+
return SDValue();
}
@@ -1918,6 +1984,7 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
EVT VT = N->getValueType(0);
+ DebugLoc DL = N->getDebugLoc();
// fold (mulhu x, 0) -> 0
if (N1C && N1C->isNullValue())
@@ -1929,6 +1996,22 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, VT);
+ // If the type twice as wide is legal, transform the mulhu to a wider multiply
+ // plus a shift.
+ if (VT.isSimple() && !VT.isVector()) {
+ MVT Simple = VT.getSimpleVT();
+ unsigned SimpleSize = Simple.getSizeInBits();
+ EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
+ if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
+ N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0);
+ N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
+ N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
+ N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
+ DAG.getConstant(SimpleSize, getShiftAmountTy()));
+ return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
+ }
+ }
+
return SDValue();
}
@@ -1992,6 +2075,29 @@ SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS);
if (Res.getNode()) return Res;
+ EVT VT = N->getValueType(0);
+ DebugLoc DL = N->getDebugLoc();
+
+ // If the type twice as wide is legal, transform the mulhu to a wider multiply
+ // plus a shift.
+ if (VT.isSimple() && !VT.isVector()) {
+ MVT Simple = VT.getSimpleVT();
+ unsigned SimpleSize = Simple.getSizeInBits();
+ EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
+ if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
+ SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0));
+ SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1));
+ Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
+ // Compute the high part as N1.
+ Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
+ DAG.getConstant(SimpleSize, getShiftAmountTy()));
+ Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
+ // Compute the low part as N0.
+ Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
+ return CombineTo(N, Lo, Hi);
+ }
+ }
+
return SDValue();
}
@@ -1999,6 +2105,29 @@ SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU);
if (Res.getNode()) return Res;
+ EVT VT = N->getValueType(0);
+ DebugLoc DL = N->getDebugLoc();
+
+ // If the type twice as wide is legal, transform the mulhu to a wider multiply
+ // plus a shift.
+ if (VT.isSimple() && !VT.isVector()) {
+ MVT Simple = VT.getSimpleVT();
+ unsigned SimpleSize = Simple.getSizeInBits();
+ EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
+ if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
+ SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0));
+ SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1));
+ Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
+ // Compute the high part as N1.
+ Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
+ DAG.getConstant(SimpleSize, getShiftAmountTy()));
+ Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
+ // Compute the low part as N0.
+ Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
+ return CombineTo(N, Lo, Hi);
+ }
+ }
+
return SDValue();
}
@@ -2116,7 +2245,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
SDValue N0Op0 = N0.getOperand(0);
APInt Mask = ~N1C->getAPIntValue();
- Mask.trunc(N0Op0.getValueSizeInBits());
+ Mask = Mask.trunc(N0Op0.getValueSizeInBits());
if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(),
N0.getValueType(), N0Op0);
@@ -2198,10 +2327,9 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
LN0->getChain(), LN0->getBasePtr(),
- LN0->getSrcValue(),
- LN0->getSrcValueOffset(), MemVT,
+ LN0->getPointerInfo(), MemVT,
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
AddToWorkList(N);
@@ -2221,10 +2349,10 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
LN0->getChain(),
- LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(), MemVT,
+ LN0->getBasePtr(), LN0->getPointerInfo(),
+ MemVT,
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
AddToWorkList(N);
@@ -2253,18 +2381,18 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (ExtVT == LoadedVT &&
(!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
-
- SDValue NewLoad =
- DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
+
+ SDValue NewLoad =
+ DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy,
LN0->getChain(), LN0->getBasePtr(),
- LN0->getSrcValue(), LN0->getSrcValueOffset(),
+ LN0->getPointerInfo(),
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
AddToWorkList(N);
CombineTo(LN0, NewLoad, NewLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
-
+
// Do not change the width of a volatile load.
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
@@ -2288,12 +2416,12 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
}
AddToWorkList(NewPtr.getNode());
-
+
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
SDValue Load =
- DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
+ DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy,
LN0->getChain(), NewPtr,
- LN0->getSrcValue(), LN0->getSrcValueOffset(),
+ LN0->getPointerInfo(),
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
Alignment);
AddToWorkList(N);
@@ -2722,17 +2850,8 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
N01C->getAPIntValue(), VT));
}
// fold (xor x, x) -> 0
- if (N0 == N1) {
- if (!VT.isVector()) {
- return DAG.getConstant(0, VT);
- } else if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)){
- // Produce a vector of zeros.
- SDValue El = DAG.getConstant(0, VT.getVectorElementType());
- std::vector<SDValue> Ops(VT.getVectorNumElements(), El);
- return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
- &Ops[0], Ops.size());
- }
- }
+ if (N0 == N1)
+ return tryFoldToZero(N->getDebugLoc(), TLI, VT, DAG, LegalOperations);
// Simplify: xor (op x...), (op y...) -> (op (xor x, y))
if (N0.getOpcode() == N1.getOpcode()) {
@@ -2810,7 +2929,8 @@ SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) {
LHS->getOperand(1), N->getOperand(1));
// Create the new shift.
- SDValue NewShift = DAG.getNode(N->getOpcode(), LHS->getOperand(0).getDebugLoc(),
+ SDValue NewShift = DAG.getNode(N->getOpcode(),
+ LHS->getOperand(0).getDebugLoc(),
VT, LHS->getOperand(0), N->getOperand(1));
// Create the new binop.
@@ -2850,7 +2970,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
EVT TruncVT = N1.getValueType();
SDValue N100 = N1.getOperand(0).getOperand(0);
APInt TruncC = N101C->getAPIntValue();
- TruncC.trunc(TruncVT.getSizeInBits());
+ TruncC = TruncC.trunc(TruncVT.getSizeInBits());
return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
DAG.getNode(ISD::AND, N->getDebugLoc(), TruncVT,
DAG.getNode(ISD::TRUNCATE,
@@ -2868,11 +2988,37 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
N0.getOperand(1).getOpcode() == ISD::Constant) {
uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
- if (c1 + c2 > OpSizeInBits)
+ if (c1 + c2 >= OpSizeInBits)
return DAG.getConstant(0, VT);
return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0.getOperand(0),
DAG.getConstant(c1 + c2, N1.getValueType()));
}
+
+ // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
+ // For this to be valid, the second form must not preserve any of the bits
+ // that are shifted out by the inner shift in the first form. This means
+ // the outer shift size must be >= the number of bits added by the ext.
+ // As a corollary, we don't care what kind of ext it is.
+ if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND ||
+ N0.getOpcode() == ISD::ANY_EXTEND ||
+ N0.getOpcode() == ISD::SIGN_EXTEND) &&
+ N0.getOperand(0).getOpcode() == ISD::SHL &&
+ isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
+ uint64_t c1 =
+ cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
+ uint64_t c2 = N1C->getZExtValue();
+ EVT InnerShiftVT = N0.getOperand(0).getValueType();
+ uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits();
+ if (c2 >= OpSizeInBits - InnerShiftSize) {
+ if (c1 + c2 >= OpSizeInBits)
+ return DAG.getConstant(0, VT);
+ return DAG.getNode(ISD::SHL, N0->getDebugLoc(), VT,
+ DAG.getNode(N0.getOpcode(), N0->getDebugLoc(), VT,
+ N0.getOperand(0)->getOperand(0)),
+ DAG.getConstant(c1 + c2, N1.getValueType()));
+ }
+ }
+
// fold (shl (srl x, c1), c2) -> (shl (and x, (shl -1, c1)), (sub c2, c1)) or
// (srl (and x, (shl -1, c1)), (sub c1, c2))
if (N1C && N0.getOpcode() == ISD::SRL &&
@@ -2973,7 +3119,8 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
if (N01C && N1C) {
// Determine what the truncate's result bitsize and type would be.
EVT TruncVT =
- EVT::getIntegerVT(*DAG.getContext(), OpSizeInBits - N1C->getZExtValue());
+ EVT::getIntegerVT(*DAG.getContext(),
+ OpSizeInBits - N1C->getZExtValue());
// Determine the residual right-shift amount.
signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
@@ -3006,7 +3153,7 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
EVT TruncVT = N1.getValueType();
SDValue N100 = N1.getOperand(0).getOperand(0);
APInt TruncC = N101C->getAPIntValue();
- TruncC.trunc(TruncVT.getScalarType().getSizeInBits());
+ TruncC = TruncC.trunc(TruncVT.getScalarType().getSizeInBits());
return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0,
DAG.getNode(ISD::AND, N->getDebugLoc(),
TruncVT,
@@ -3017,6 +3164,29 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
}
}
+ // fold (sra (trunc (sr x, c1)), c2) -> (trunc (sra x, c1+c2))
+ // if c1 is equal to the number of bits the trunc removes
+ if (N0.getOpcode() == ISD::TRUNCATE &&
+ (N0.getOperand(0).getOpcode() == ISD::SRL ||
+ N0.getOperand(0).getOpcode() == ISD::SRA) &&
+ N0.getOperand(0).hasOneUse() &&
+ N0.getOperand(0).getOperand(1).hasOneUse() &&
+ N1C && isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) {
+ EVT LargeVT = N0.getOperand(0).getValueType();
+ ConstantSDNode *LargeShiftAmt =
+ cast<ConstantSDNode>(N0.getOperand(0).getOperand(1));
+
+ if (LargeVT.getScalarType().getSizeInBits() - OpSizeInBits ==
+ LargeShiftAmt->getZExtValue()) {
+ SDValue Amt =
+ DAG.getConstant(LargeShiftAmt->getZExtValue() + N1C->getZExtValue(),
+ getShiftAmountTy());
+ SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), LargeVT,
+ N0.getOperand(0).getOperand(0), Amt);
+ return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, SRA);
+ }
+ }
+
// Simplify, based on bits shifted out of the LHS.
if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
@@ -3065,12 +3235,33 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
N0.getOperand(1).getOpcode() == ISD::Constant) {
uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
- if (c1 + c2 > OpSizeInBits)
+ if (c1 + c2 >= OpSizeInBits)
return DAG.getConstant(0, VT);
return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0),
DAG.getConstant(c1 + c2, N1.getValueType()));
}
-
+
+ // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
+ if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
+ N0.getOperand(0).getOpcode() == ISD::SRL &&
+ isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
+ uint64_t c1 =
+ cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
+ uint64_t c2 = N1C->getZExtValue();
+ EVT InnerShiftVT = N0.getOperand(0).getValueType();
+ EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType();
+ uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits();
+ // This is only valid if the OpSizeInBits + c1 = size of inner shift.
+ if (c1 + OpSizeInBits == InnerShiftSize) {
+ if (c1 + c2 >= InnerShiftSize)
+ return DAG.getConstant(0, VT);
+ return DAG.getNode(ISD::TRUNCATE, N0->getDebugLoc(), VT,
+ DAG.getNode(ISD::SRL, N0->getDebugLoc(), InnerShiftVT,
+ N0.getOperand(0)->getOperand(0),
+ DAG.getConstant(c1 + c2, ShiftCountVT)));
+ }
+ }
+
// fold (srl (shl x, c), c) -> (and x, cst2)
if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
N0.getValueSizeInBits() <= 64) {
@@ -3078,7 +3269,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0),
DAG.getConstant(~0ULL >> ShAmt, VT));
}
-
+
// fold (srl (anyextend x), c) -> (anyextend (srl x, c))
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
@@ -3147,7 +3338,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
EVT TruncVT = N1.getValueType();
SDValue N100 = N1.getOperand(0).getOperand(0);
APInt TruncC = N101C->getAPIntValue();
- TruncC.trunc(TruncVT.getSizeInBits());
+ TruncC = TruncC.trunc(TruncVT.getSizeInBits());
return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0,
DAG.getNode(ISD::AND, N->getDebugLoc(),
TruncVT,
@@ -3182,7 +3373,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
// brcond i32 %c ...
//
// into
- //
+ //
// %a = ...
// %b = and %a, 2
// %c = setcc eq %b, 0
@@ -3422,7 +3613,7 @@ static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
}
if (BothLiveOut)
// Both unextended and extended values are live out. There had better be
- // good a reason for the transformation.
+ // a good reason for the transformation.
return ExtendNodes.size();
}
return true;
@@ -3503,10 +3694,9 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
- LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(),
+ LN0->getBasePtr(), LN0->getPointerInfo(),
N0.getValueType(),
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
@@ -3547,10 +3737,10 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
- LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(), MemVT,
+ LN0->getBasePtr(), LN0->getPointerInfo(),
+ MemVT,
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
CombineTo(N, ExtLoad);
@@ -3611,7 +3801,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
N0.getOperand(0), N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get()),
NegOne, DAG.getConstant(0, VT));
- }
+ }
// fold (sext x) -> (zext x) if the sign bit is known zero.
if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
@@ -3652,6 +3842,20 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
// fold (zext (truncate x)) -> (and x, mask)
if (N0.getOpcode() == ISD::TRUNCATE &&
(!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
+
+ // fold (zext (truncate (load x))) -> (zext (smaller load x))
+ // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
+ SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
+ if (NarrowLoad.getNode()) {
+ SDNode* oye = N0.getNode()->getOperand(0).getNode();
+ if (NarrowLoad.getNode() != N0.getNode()) {
+ CombineTo(N0.getNode(), NarrowLoad);
+ // CombineTo deleted the truncate, if needed, but not what's under it.
+ AddToWorkList(oye);
+ }
+ return SDValue(N, 0); // Return N so it doesn't get rechecked!
+ }
+
SDValue Op = N0.getOperand(0);
if (Op.getValueType().bitsLT(VT)) {
Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op);
@@ -3677,7 +3881,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X);
}
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
- Mask.zext(VT.getSizeInBits());
+ Mask = Mask.zext(VT.getSizeInBits());
return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
X, DAG.getConstant(Mask, VT));
}
@@ -3692,10 +3896,9 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
- LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(),
+ LN0->getBasePtr(), LN0->getPointerInfo(),
N0.getValueType(),
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
@@ -3736,10 +3939,10 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
- LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(), MemVT,
+ LN0->getBasePtr(), LN0->getPointerInfo(),
+ MemVT,
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
CombineTo(N, ExtLoad);
@@ -3805,21 +4008,27 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
isa<ConstantSDNode>(N0.getOperand(1)) &&
N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
N0.hasOneUse()) {
+ SDValue ShAmt = N0.getOperand(1);
+ unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue();
if (N0.getOpcode() == ISD::SHL) {
+ SDValue InnerZExt = N0.getOperand(0);
// If the original shl may be shifting out bits, do not perform this
// transformation.
- unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
- unsigned KnownZeroBits = N0.getOperand(0).getValueType().getSizeInBits() -
- N0.getOperand(0).getOperand(0).getValueType().getSizeInBits();
- if (ShAmt > KnownZeroBits)
+ unsigned KnownZeroBits = InnerZExt.getValueType().getSizeInBits() -
+ InnerZExt.getOperand(0).getValueType().getSizeInBits();
+ if (ShAmtVal > KnownZeroBits)
return SDValue();
}
- DebugLoc dl = N->getDebugLoc();
- return DAG.getNode(N0.getOpcode(), dl, VT,
- DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0.getOperand(0)),
- DAG.getNode(ISD::ZERO_EXTEND, dl,
- N0.getOperand(1).getValueType(),
- N0.getOperand(1)));
+
+ DebugLoc DL = N->getDebugLoc();
+
+ // Ensure that the shift amount is wide enough for the shifted value.
+ if (VT.getSizeInBits() >= 256)
+ ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt);
+
+ return DAG.getNode(N0.getOpcode(), DL, VT,
+ DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)),
+ ShAmt);
}
return SDValue();
@@ -3879,7 +4088,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
X = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, X);
}
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
- Mask.zext(VT.getSizeInBits());
+ Mask = Mask.zext(VT.getSizeInBits());
return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
X, DAG.getConstant(Mask, VT));
}
@@ -3894,10 +4103,9 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
- LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(),
+ LN0->getBasePtr(), LN0->getPointerInfo(),
N0.getValueType(),
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
@@ -3938,11 +4146,9 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
- SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT,
- N->getDebugLoc(),
- LN0->getChain(), LN0->getBasePtr(),
- LN0->getSrcValue(),
- LN0->getSrcValueOffset(), MemVT,
+ SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), N->getDebugLoc(),
+ VT, LN0->getChain(), LN0->getBasePtr(),
+ LN0->getPointerInfo(), MemVT,
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
CombineTo(N, ExtLoad);
@@ -4053,11 +4259,8 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
if (Opc == ISD::SIGN_EXTEND_INREG) {
ExtType = ISD::SEXTLOAD;
ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
- if (LegalOperations && !TLI.isLoadExtLegal(ISD::SEXTLOAD, ExtVT))
- return SDValue();
} else if (Opc == ISD::SRL) {
- // Annother special-case: SRL is basically zero-extending a narrower
- // value.
+ // Another special-case: SRL is basically zero-extending a narrower value.
ExtType = ISD::ZEXTLOAD;
N0 = SDValue(N, 0);
ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
@@ -4065,10 +4268,18 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
ExtVT = EVT::getIntegerVT(*DAG.getContext(),
VT.getSizeInBits() - N01->getZExtValue());
}
+ if (LegalOperations && !TLI.isLoadExtLegal(ExtType, ExtVT))
+ return SDValue();
unsigned EVTBits = ExtVT.getSizeInBits();
+
+ // Do not generate loads of non-round integer types since these can
+ // be expensive (and would be wrong if the type is not byte sized).
+ if (!ExtVT.isRound())
+ return SDValue();
+
unsigned ShAmt = 0;
- if (N0.getOpcode() == ISD::SRL && N0.hasOneUse() && ExtVT.isRound()) {
+ if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
ShAmt = N01->getZExtValue();
// Is the shift amount a multiple of size of VT?
@@ -4078,52 +4289,88 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
return SDValue();
}
+
+ // At this point, we must have a load or else we can't do the transform.
+ if (!isa<LoadSDNode>(N0)) return SDValue();
+
+ // If the shift amount is larger than the input type then we're not
+ // accessing any of the loaded bytes. If the load was a zextload/extload
+ // then the result of the shift+trunc is zero/undef (handled elsewhere).
+ // If the load was a sextload then the result is a splat of the sign bit
+ // of the extended byte. This is not worth optimizing for.
+ if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits())
+ return SDValue();
}
}
- // Do not generate loads of non-round integer types since these can
- // be expensive (and would be wrong if the type is not byte sized).
- if (isa<LoadSDNode>(N0) && N0.hasOneUse() && ExtVT.isRound() &&
- cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() >= EVTBits &&
- // Do not change the width of a volatile load.
- !cast<LoadSDNode>(N0)->isVolatile()) {
- LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- EVT PtrType = N0.getOperand(1).getValueType();
-
- // For big endian targets, we need to adjust the offset to the pointer to
- // load the correct bytes.
- if (TLI.isBigEndian()) {
- unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
- unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
- ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
- }
-
- uint64_t PtrOff = ShAmt / 8;
- unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
- SDValue NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(),
- PtrType, LN0->getBasePtr(),
- DAG.getConstant(PtrOff, PtrType));
- AddToWorkList(NewPtr.getNode());
-
- SDValue Load = (ExtType == ISD::NON_EXTLOAD)
- ? DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
- LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
- LN0->isVolatile(), LN0->isNonTemporal(), NewAlign)
- : DAG.getExtLoad(ExtType, VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
- LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
- ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
- NewAlign);
-
- // Replace the old load's chain with the new load's chain.
- WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1),
- &DeadNodes);
+ // If the load is shifted left (and the result isn't shifted back right),
+ // we can fold the truncate through the shift.
+ unsigned ShLeftAmt = 0;
+ if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
+ ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) {
+ if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
+ ShLeftAmt = N01->getZExtValue();
+ N0 = N0.getOperand(0);
+ }
+ }
+
+ // If we haven't found a load, we can't narrow it. Don't transform one with
+ // multiple uses, this would require adding a new load.
+ if (!isa<LoadSDNode>(N0) || !N0.hasOneUse() ||
+ // Don't change the width of a volatile load.
+ cast<LoadSDNode>(N0)->isVolatile())
+ return SDValue();
+
+ // Verify that we are actually reducing a load width here.
+ if (cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() < EVTBits)
+ return SDValue();
+
+ LoadSDNode *LN0 = cast<LoadSDNode>(N0);
+ EVT PtrType = N0.getOperand(1).getValueType();
+
+ // For big endian targets, we need to adjust the offset to the pointer to
+ // load the correct bytes.
+ if (TLI.isBigEndian()) {
+ unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
+ unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
+ ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
+ }
+
+ uint64_t PtrOff = ShAmt / 8;
+ unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
+ SDValue NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(),
+ PtrType, LN0->getBasePtr(),
+ DAG.getConstant(PtrOff, PtrType));
+ AddToWorkList(NewPtr.getNode());
+
+ SDValue Load;
+ if (ExtType == ISD::NON_EXTLOAD)
+ Load = DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
+ LN0->getPointerInfo().getWithOffset(PtrOff),
+ LN0->isVolatile(), LN0->isNonTemporal(), NewAlign);
+ else
+ Load = DAG.getExtLoad(ExtType, N0.getDebugLoc(), VT, LN0->getChain(),NewPtr,
+ LN0->getPointerInfo().getWithOffset(PtrOff),
+ ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
+ NewAlign);
+
+ // Replace the old load's chain with the new load's chain.
+ WorkListRemover DeadNodes(*this);
+ DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1),
+ &DeadNodes);
- // Return the new loaded value.
- return Load;
+ // Shift the result left, if we've swallowed a left shift.
+ SDValue Result = Load;
+ if (ShLeftAmt != 0) {
+ EVT ShImmTy = getShiftAmountTy();
+ if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt))
+ ShImmTy = VT;
+ Result = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT,
+ Result, DAG.getConstant(ShLeftAmt, ShImmTy));
}
- return SDValue();
+ // Return the new loaded value.
+ return Result;
}
SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
@@ -4196,10 +4443,10 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
- LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(), EVT,
+ LN0->getBasePtr(), LN0->getPointerInfo(),
+ EVT,
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
CombineTo(N, ExtLoad);
@@ -4213,10 +4460,10 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
- LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(), EVT,
+ LN0->getBasePtr(), LN0->getPointerInfo(),
+ EVT,
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
CombineTo(N, ExtLoad);
@@ -4295,7 +4542,9 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
- if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse())
+ if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
+ LD1->getPointerInfo().getAddrSpace() !=
+ LD2->getPointerInfo().getAddrSpace())
return SDValue();
EVT LD1VT = LD1->getValueType(0);
@@ -4313,14 +4562,14 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
if (NewAlign <= Align &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
return DAG.getLoad(VT, N->getDebugLoc(), LD1->getChain(),
- LD1->getBasePtr(), LD1->getSrcValue(),
- LD1->getSrcValueOffset(), false, false, Align);
+ LD1->getBasePtr(), LD1->getPointerInfo(),
+ false, false, Align);
}
return SDValue();
}
-SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
+SDValue DAGCombiner::visitBITCAST(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -4344,12 +4593,12 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
assert(!DestEltVT.isVector() &&
"Element type of vector ValueType must not be vector!");
if (isSimple)
- return ConstantFoldBIT_CONVERTofBUILD_VECTOR(N0.getNode(), DestEltVT);
+ return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT);
}
// If the input is a constant, let getNode fold it.
if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
- SDValue Res = DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, N0);
+ SDValue Res = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, N0);
if (Res.getNode() != N) {
if (!LegalOperations ||
TLI.isOperationLegal(Res.getNode()->getOpcode(), VT))
@@ -4365,8 +4614,8 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
}
// (conv (conv x, t1), t2) -> (conv x, t2)
- if (N0.getOpcode() == ISD::BIT_CONVERT)
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT,
+ if (N0.getOpcode() == ISD::BITCAST)
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT,
N0.getOperand(0));
// fold (conv (load x)) -> (load (conv*)x)
@@ -4382,13 +4631,12 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
if (Align <= OrigAlign) {
SDValue Load = DAG.getLoad(VT, N->getDebugLoc(), LN0->getChain(),
- LN0->getBasePtr(),
- LN0->getSrcValue(), LN0->getSrcValueOffset(),
+ LN0->getBasePtr(), LN0->getPointerInfo(),
LN0->isVolatile(), LN0->isNonTemporal(),
OrigAlign);
AddToWorkList(N);
CombineTo(N0.getNode(),
- DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
+ DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
N0.getValueType(), Load),
Load.getValue(1));
return Load;
@@ -4400,7 +4648,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
// This often reduces constant pool loads.
if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) &&
N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) {
- SDValue NewConv = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(), VT,
+ SDValue NewConv = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), VT,
N0.getOperand(0));
AddToWorkList(NewConv.getNode());
@@ -4423,7 +4671,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
if (isTypeLegal(IntXVT)) {
- SDValue X = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
+ SDValue X = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
IntXVT, N0.getOperand(1));
AddToWorkList(X.getNode());
@@ -4448,7 +4696,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
X, DAG.getConstant(SignBit, VT));
AddToWorkList(X.getNode());
- SDValue Cst = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
+ SDValue Cst = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
VT, N0.getOperand(0));
Cst = DAG.getNode(ISD::AND, Cst.getDebugLoc(), VT,
Cst, DAG.getConstant(~SignBit, VT));
@@ -4473,11 +4721,11 @@ SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
return CombineConsecutiveLoads(N, VT);
}
-/// ConstantFoldBIT_CONVERTofBUILD_VECTOR - We know that BV is a build_vector
+/// ConstantFoldBITCASTofBUILD_VECTOR - We know that BV is a build_vector
/// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the
/// destination element value type.
SDValue DAGCombiner::
-ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
+ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
// If this is already the right type, we're done.
@@ -4495,10 +4743,10 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
// Due to the FP element handling below calling this routine recursively,
// we can end up with a scalar-to-vector node here.
if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR)
- return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
- DAG.getNode(ISD::BIT_CONVERT, BV->getDebugLoc(),
+ return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
+ DAG.getNode(ISD::BITCAST, BV->getDebugLoc(),
DstEltVT, BV->getOperand(0)));
-
+
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
SDValue Op = BV->getOperand(i);
@@ -4506,7 +4754,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
// are promoted and implicitly truncated. Make that explicit here.
if (Op.getValueType() != SrcEltVT)
Op = DAG.getNode(ISD::TRUNCATE, BV->getDebugLoc(), SrcEltVT, Op);
- Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, BV->getDebugLoc(),
+ Ops.push_back(DAG.getNode(ISD::BITCAST, BV->getDebugLoc(),
DstEltVT, Op));
AddToWorkList(Ops.back().getNode());
}
@@ -4522,7 +4770,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
// same sizes.
assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
- BV = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, IntVT).getNode();
+ BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
SrcEltVT = IntVT;
}
@@ -4531,10 +4779,10 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
if (DstEltVT.isFloatingPoint()) {
assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
- SDNode *Tmp = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, TmpVT).getNode();
+ SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();
// Next, convert to FP elements of the same size.
- return ConstantFoldBIT_CONVERTofBUILD_VECTOR(Tmp, DstEltVT);
+ return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
}
// Okay, we know the src/dst types are both integers of differing types.
@@ -4556,7 +4804,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
if (Op.getOpcode() == ISD::UNDEF) continue;
EltIsUndef = false;
- NewBits |= APInt(cast<ConstantSDNode>(Op)->getAPIntValue()).
+ NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue().
zextOrTrunc(SrcBitSize).zext(DstBitSize);
}
@@ -4586,13 +4834,13 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
continue;
}
- APInt OpVal = APInt(cast<ConstantSDNode>(BV->getOperand(i))->
- getAPIntValue()).zextOrTrunc(SrcBitSize);
+ APInt OpVal = cast<ConstantSDNode>(BV->getOperand(i))->
+ getAPIntValue().zextOrTrunc(SrcBitSize);
for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
- APInt ThisVal = APInt(OpVal).trunc(DstBitSize);
+ APInt ThisVal = OpVal.trunc(DstBitSize);
Ops.push_back(DAG.getConstant(ThisVal, DstEltVT));
- if (isS2V && i == 0 && j == 0 && APInt(ThisVal).zext(SrcBitSize) == OpVal)
+ if (isS2V && i == 0 && j == 0 && ThisVal.zext(SrcBitSize) == OpVal)
// Simply turn this into a SCALAR_TO_VECTOR of the new type.
return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
Ops[0]);
@@ -4984,10 +5232,9 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
+ SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
- LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(),
+ LN0->getBasePtr(), LN0->getPointerInfo(),
N0.getValueType(),
LN0->isVolatile(), LN0->isNonTemporal(),
LN0->getAlignment());
@@ -5011,7 +5258,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
// Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
// constant pool values.
- if (N0.getOpcode() == ISD::BIT_CONVERT &&
+ if (N0.getOpcode() == ISD::BITCAST &&
!VT.isVector() &&
N0.getNode()->hasOneUse() &&
N0.getOperand(0).getValueType().isInteger()) {
@@ -5021,7 +5268,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int,
DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
AddToWorkList(Int.getNode());
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
VT, Int);
}
}
@@ -5047,7 +5294,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
// Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
// constant pool values.
- if (N0.getOpcode() == ISD::BIT_CONVERT && N0.getNode()->hasOneUse() &&
+ if (N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() &&
N0.getOperand(0).getValueType().isInteger() &&
!N0.getOperand(0).getValueType().isVector()) {
SDValue Int = N0.getOperand(0);
@@ -5056,7 +5303,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int,
DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
AddToWorkList(Int.getNode());
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
N->getValueType(0), Int);
}
}
@@ -5084,14 +5331,17 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
N1.getOperand(0), N1.getOperand(1), N2);
}
- SDNode *Trunc = 0;
- if (N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) {
- // Look past truncate.
- Trunc = N1.getNode();
- N1 = N1.getOperand(0);
- }
+ if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) ||
+ ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) &&
+ (N1.getOperand(0).hasOneUse() &&
+ N1.getOperand(0).getOpcode() == ISD::SRL))) {
+ SDNode *Trunc = 0;
+ if (N1.getOpcode() == ISD::TRUNCATE) {
+ // Look pass the truncate.
+ Trunc = N1.getNode();
+ N1 = N1.getOperand(0);
+ }
- if (N1.hasOneUse() && N1.getOpcode() == ISD::SRL) {
// Match this pattern so that we can generate simpler code:
//
// %a = ...
@@ -5100,7 +5350,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
// brcond i32 %c ...
//
// into
- //
+ //
// %a = ...
// %b = and i32 %a, 2
// %c = setcc eq %b, 0
@@ -5146,8 +5396,12 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
}
}
}
+
+ if (Trunc)
+ // Restore N1 if the above transformation doesn't match.
+ N1 = N->getOperand(1);
}
-
+
// Transform br(xor(x, y)) -> br(x != y)
// Transform br(xor(xor(x,y), 1)) -> br (x == y)
if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) {
@@ -5181,9 +5435,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
Equal = true;
}
- SDValue NodeToReplace = Trunc ? SDValue(Trunc, 0) : N1;
-
- EVT SetCCVT = NodeToReplace.getValueType();
+ EVT SetCCVT = N1.getValueType();
if (LegalTypes)
SetCCVT = TLI.getSetCCResultType(SetCCVT);
SDValue SetCC = DAG.getSetCC(TheXor->getDebugLoc(),
@@ -5192,9 +5444,9 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
Equal ? ISD::SETEQ : ISD::SETNE);
// Replace the uses of XOR with SETCC
WorkListRemover DeadNodes(*this);
- DAG.ReplaceAllUsesOfValueWith(NodeToReplace, SetCC, &DeadNodes);
- removeFromWorkList(NodeToReplace.getNode());
- DAG.DeleteNode(NodeToReplace.getNode());
+ DAG.ReplaceAllUsesOfValueWith(N1, SetCC, &DeadNodes);
+ removeFromWorkList(N1.getNode());
+ DAG.DeleteNode(N1.getNode());
return DAG.getNode(ISD::BRCOND, N->getDebugLoc(),
MVT::Other, Chain, SetCC, N2);
}
@@ -5568,10 +5820,10 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
if (Align > LD->getAlignment())
- return DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
- N->getDebugLoc(),
- Chain, Ptr, LD->getSrcValue(),
- LD->getSrcValueOffset(), LD->getMemoryVT(),
+ return DAG.getExtLoad(LD->getExtensionType(), N->getDebugLoc(),
+ LD->getValueType(0),
+ Chain, Ptr, LD->getPointerInfo(),
+ LD->getMemoryVT(),
LD->isVolatile(), LD->isNonTemporal(), Align);
}
}
@@ -5587,15 +5839,13 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
// Replace the chain to void dependency.
if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
ReplLoad = DAG.getLoad(N->getValueType(0), LD->getDebugLoc(),
- BetterChain, Ptr,
- LD->getSrcValue(), LD->getSrcValueOffset(),
+ BetterChain, Ptr, LD->getPointerInfo(),
LD->isVolatile(), LD->isNonTemporal(),
LD->getAlignment());
} else {
- ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
- LD->getDebugLoc(),
- BetterChain, Ptr, LD->getSrcValue(),
- LD->getSrcValueOffset(),
+ ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(),
+ LD->getValueType(0),
+ BetterChain, Ptr, LD->getPointerInfo(),
LD->getMemoryVT(),
LD->isVolatile(),
LD->isNonTemporal(),
@@ -5605,10 +5855,10 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
// Create token factor to keep old chain connected.
SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
MVT::Other, Chain, ReplLoad.getValue(1));
-
+
// Make sure the new and old chains are cleaned up.
AddToWorkList(Token.getNode());
-
+
// Replace uses with load result and token factor. Don't add users
// to work list.
return CombineTo(N, ReplLoad.getValue(0), Token, false);
@@ -5628,17 +5878,17 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
static std::pair<unsigned, unsigned>
CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
std::pair<unsigned, unsigned> Result(0, 0);
-
+
// Check for the structure we're looking for.
if (V->getOpcode() != ISD::AND ||
!isa<ConstantSDNode>(V->getOperand(1)) ||
!ISD::isNormalLoad(V->getOperand(0).getNode()))
return Result;
-
+
// Check the chain and pointer.
LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer.
-
+
// The store should be chained directly to the load or be an operand of a
// tokenfactor.
if (LD == Chain.getNode())
@@ -5654,7 +5904,7 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
}
if (!isOk) return Result;
}
-
+
// This only handles simple types.
if (V.getValueType() != MVT::i16 &&
V.getValueType() != MVT::i32 &&
@@ -5670,7 +5920,7 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
unsigned NotMaskTZ = CountTrailingZeros_64(NotMask);
if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
if (NotMaskLZ == 64) return Result; // All zero mask.
-
+
// See if we have a continuous run of bits. If so, we have 0*1+0*
if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64)
return Result;
@@ -5678,19 +5928,19 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
// Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
if (V.getValueType() != MVT::i64 && NotMaskLZ)
NotMaskLZ -= 64-V.getValueSizeInBits();
-
+
unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
switch (MaskedBytes) {
- case 1:
- case 2:
+ case 1:
+ case 2:
case 4: break;
default: return Result; // All one mask, or 5-byte mask.
}
-
+
// Verify that the first bit starts at a multiple of mask so that the access
// is aligned the same as the access width.
if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
-
+
Result.first = MaskedBytes;
Result.second = NotMaskTZ/8;
return Result;
@@ -5707,20 +5957,20 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
unsigned NumBytes = MaskInfo.first;
unsigned ByteShift = MaskInfo.second;
SelectionDAG &DAG = DC->getDAG();
-
+
// Check to see if IVal is all zeros in the part being masked in by the 'or'
// that uses this. If not, this is not a replacement.
APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
ByteShift*8, (ByteShift+NumBytes)*8);
if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0;
-
+
// Check that it is legal on the target to do this. It is legal if the new
// VT we're shrinking to (i8/i16/i32) is legal or we're still before type
// legalization.
MVT VT = MVT::getIntegerVT(NumBytes*8);
if (!DC->isTypeLegal(VT))
return 0;
-
+
// Okay, we can do this! Replace the 'St' store with a store of IVal that is
// shifted by ByteShift and truncated down to NumBytes.
if (ByteShift)
@@ -5735,20 +5985,20 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
StOffset = ByteShift;
else
StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
-
+
SDValue Ptr = St->getBasePtr();
if (StOffset) {
Ptr = DAG.getNode(ISD::ADD, IVal->getDebugLoc(), Ptr.getValueType(),
Ptr, DAG.getConstant(StOffset, Ptr.getValueType()));
NewAlign = MinAlign(NewAlign, StOffset);
}
-
+
// Truncate down to the new size.
IVal = DAG.getNode(ISD::TRUNCATE, IVal->getDebugLoc(), VT, IVal);
-
+
++OpsNarrowed;
- return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr,
- St->getSrcValue(), St->getSrcValueOffset()+StOffset,
+ return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr,
+ St->getPointerInfo().getWithOffset(StOffset),
false, false, NewAlign).getNode();
}
@@ -5771,7 +6021,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
return SDValue();
unsigned Opc = Value.getOpcode();
-
+
// If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
// is a byte mask indicating a consecutive number of bytes, check to see if
// Y is known to provide just those bytes. If so, we try to replace the
@@ -5784,7 +6034,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
Value.getOperand(1), ST,this))
return SDValue(NewST, 0);
-
+
// Or is commutative, so try swapping X and Y.
MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
if (MaskedLoad.first)
@@ -5792,7 +6042,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
Value.getOperand(0), ST,this))
return SDValue(NewST, 0);
}
-
+
if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
Value.getOperand(1).getOpcode() != ISD::Constant)
return SDValue();
@@ -5801,7 +6051,9 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
Chain == SDValue(N0.getNode(), 1)) {
LoadSDNode *LD = cast<LoadSDNode>(N0);
- if (LD->getBasePtr() != Ptr)
+ if (LD->getBasePtr() != Ptr ||
+ LD->getPointerInfo().getAddrSpace() !=
+ ST->getPointerInfo().getAddrSpace())
return SDValue();
// Find the type to narrow it the load / op / store to.
@@ -5850,14 +6102,14 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
DAG.getConstant(PtrOff, Ptr.getValueType()));
SDValue NewLD = DAG.getLoad(NewVT, N0.getDebugLoc(),
LD->getChain(), NewPtr,
- LD->getSrcValue(), LD->getSrcValueOffset(),
+ LD->getPointerInfo().getWithOffset(PtrOff),
LD->isVolatile(), LD->isNonTemporal(),
NewAlign);
SDValue NewVal = DAG.getNode(Opc, Value.getDebugLoc(), NewVT, NewLD,
DAG.getConstant(NewImm, NewVT));
SDValue NewST = DAG.getStore(Chain, N->getDebugLoc(),
NewVal, NewPtr,
- ST->getSrcValue(), ST->getSrcValueOffset(),
+ ST->getPointerInfo().getWithOffset(PtrOff),
false, false, NewAlign);
AddToWorkList(NewPtr.getNode());
@@ -5874,6 +6126,63 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
return SDValue();
}
+/// TransformFPLoadStorePair - For a given floating point load / store pair,
+/// if the load value isn't used by any other operations, then consider
+/// transforming the pair to integer load / store operations if the target
+/// deems the transformation profitable.
+SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ SDValue Chain = ST->getChain();
+ SDValue Value = ST->getValue();
+ if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) &&
+ Value.hasOneUse() &&
+ Chain == SDValue(Value.getNode(), 1)) {
+ LoadSDNode *LD = cast<LoadSDNode>(Value);
+ EVT VT = LD->getMemoryVT();
+ if (!VT.isFloatingPoint() ||
+ VT != ST->getMemoryVT() ||
+ LD->isNonTemporal() ||
+ ST->isNonTemporal() ||
+ LD->getPointerInfo().getAddrSpace() != 0 ||
+ ST->getPointerInfo().getAddrSpace() != 0)
+ return SDValue();
+
+ EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
+ if (!TLI.isOperationLegal(ISD::LOAD, IntVT) ||
+ !TLI.isOperationLegal(ISD::STORE, IntVT) ||
+ !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) ||
+ !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT))
+ return SDValue();
+
+ unsigned LDAlign = LD->getAlignment();
+ unsigned STAlign = ST->getAlignment();
+ const Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlign = TLI.getTargetData()->getABITypeAlignment(IntVTTy);
+ if (LDAlign < ABIAlign || STAlign < ABIAlign)
+ return SDValue();
+
+ SDValue NewLD = DAG.getLoad(IntVT, Value.getDebugLoc(),
+ LD->getChain(), LD->getBasePtr(),
+ LD->getPointerInfo(),
+ false, false, LDAlign);
+
+ SDValue NewST = DAG.getStore(NewLD.getValue(1), N->getDebugLoc(),
+ NewLD, ST->getBasePtr(),
+ ST->getPointerInfo(),
+ false, false, STAlign);
+
+ AddToWorkList(NewLD.getNode());
+ AddToWorkList(NewST.getNode());
+ WorkListRemover DeadNodes(*this);
+ DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1),
+ &DeadNodes);
+ ++LdStFP2Int;
+ return NewST;
+ }
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitSTORE(SDNode *N) {
StoreSDNode *ST = cast<StoreSDNode>(N);
SDValue Chain = ST->getChain();
@@ -5882,7 +6191,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
// If this is a store of a bit convert, store the input value if the
// resultant store does not need a higher alignment than the original.
- if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
+ if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
ST->isUnindexed()) {
unsigned OrigAlign = ST->getAlignment();
EVT SVT = Value.getOperand(0).getValueType();
@@ -5892,8 +6201,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
((!LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
return DAG.getStore(Chain, N->getDebugLoc(), Value.getOperand(0),
- Ptr, ST->getSrcValue(),
- ST->getSrcValueOffset(), ST->isVolatile(),
+ Ptr, ST->getPointerInfo(), ST->isVolatile(),
ST->isNonTemporal(), OrigAlign);
}
@@ -5917,8 +6225,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
bitcastToAPInt().getZExtValue(), MVT::i32);
return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
- Ptr, ST->getSrcValue(),
- ST->getSrcValueOffset(), ST->isVolatile(),
+ Ptr, ST->getPointerInfo(), ST->isVolatile(),
ST->isNonTemporal(), ST->getAlignment());
}
break;
@@ -5929,8 +6236,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
getZExtValue(), MVT::i64);
return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
- Ptr, ST->getSrcValue(),
- ST->getSrcValueOffset(), ST->isVolatile(),
+ Ptr, ST->getPointerInfo(), ST->isVolatile(),
ST->isNonTemporal(), ST->getAlignment());
} else if (!ST->isVolatile() &&
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
@@ -5942,23 +6248,20 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32);
if (TLI.isBigEndian()) std::swap(Lo, Hi);
- int SVOffset = ST->getSrcValueOffset();
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
SDValue St0 = DAG.getStore(Chain, ST->getDebugLoc(), Lo,
- Ptr, ST->getSrcValue(),
- ST->getSrcValueOffset(),
+ Ptr, ST->getPointerInfo(),
isVolatile, isNonTemporal,
ST->getAlignment());
Ptr = DAG.getNode(ISD::ADD, N->getDebugLoc(), Ptr.getValueType(), Ptr,
DAG.getConstant(4, Ptr.getValueType()));
- SVOffset += 4;
Alignment = MinAlign(Alignment, 4U);
SDValue St1 = DAG.getStore(Chain, ST->getDebugLoc(), Hi,
- Ptr, ST->getSrcValue(),
- SVOffset, isVolatile, isNonTemporal,
+ Ptr, ST->getPointerInfo().getWithOffset(4),
+ isVolatile, isNonTemporal,
Alignment);
return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
St0, St1);
@@ -5974,12 +6277,17 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
if (Align > ST->getAlignment())
return DAG.getTruncStore(Chain, N->getDebugLoc(), Value,
- Ptr, ST->getSrcValue(),
- ST->getSrcValueOffset(), ST->getMemoryVT(),
+ Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
ST->isVolatile(), ST->isNonTemporal(), Align);
}
}
+ // Try transforming a pair floating point load / store ops to integer
+ // load / store ops.
+ SDValue NewST = TransformFPLoadStorePair(N);
+ if (NewST.getNode())
+ return NewST;
+
if (CombinerAA) {
// Walk up chain skipping non-aliasing memory nodes.
SDValue BetterChain = FindBetterChain(N, Chain);
@@ -5991,12 +6299,12 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
// Replace the chain to avoid dependency.
if (ST->isTruncatingStore()) {
ReplStore = DAG.getTruncStore(BetterChain, N->getDebugLoc(), Value, Ptr,
- ST->getSrcValue(),ST->getSrcValueOffset(),
+ ST->getPointerInfo(),
ST->getMemoryVT(), ST->isVolatile(),
ST->isNonTemporal(), ST->getAlignment());
} else {
ReplStore = DAG.getStore(BetterChain, N->getDebugLoc(), Value, Ptr,
- ST->getSrcValue(), ST->getSrcValueOffset(),
+ ST->getPointerInfo(),
ST->isVolatile(), ST->isNonTemporal(),
ST->getAlignment());
}
@@ -6030,17 +6338,16 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
AddToWorkList(Value.getNode());
if (Shorter.getNode())
return DAG.getTruncStore(Chain, N->getDebugLoc(), Shorter,
- Ptr, ST->getSrcValue(),
- ST->getSrcValueOffset(), ST->getMemoryVT(),
+ Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
ST->isVolatile(), ST->isNonTemporal(),
ST->getAlignment());
// Otherwise, see if we can simplify the operation with
// SimplifyDemandedBits, which only works if the value has a single use.
if (SimplifyDemandedBits(Value,
- APInt::getLowBitsSet(
- Value.getValueType().getScalarType().getSizeInBits(),
- ST->getMemoryVT().getScalarType().getSizeInBits())))
+ APInt::getLowBitsSet(
+ Value.getValueType().getScalarType().getSizeInBits(),
+ ST->getMemoryVT().getScalarType().getSizeInBits())))
return SDValue(N, 0);
}
@@ -6064,8 +6371,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
ST->getMemoryVT())) {
return DAG.getTruncStore(Chain, N->getDebugLoc(), Value.getOperand(0),
- Ptr, ST->getSrcValue(),
- ST->getSrcValueOffset(), ST->getMemoryVT(),
+ Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
ST->isVolatile(), ST->isNonTemporal(),
ST->getAlignment());
}
@@ -6082,6 +6388,12 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
if (InVal.getOpcode() == ISD::UNDEF)
return InVec;
+ EVT VT = InVec.getValueType();
+
+ // If we can't generate a legal BUILD_VECTOR, exit
+ if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
+ return SDValue();
+
// If the invec is a BUILD_VECTOR and if EltNo is a constant, build a new
// vector with the inserted element.
if (InVec.getOpcode() == ISD::BUILD_VECTOR && isa<ConstantSDNode>(EltNo)) {
@@ -6091,13 +6403,12 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
if (Elt < Ops.size())
Ops[Elt] = InVal;
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
- InVec.getValueType(), &Ops[0], Ops.size());
+ VT, &Ops[0], Ops.size());
}
- // If the invec is an UNDEF and if EltNo is a constant, create a new
+ // If the invec is an UNDEF and if EltNo is a constant, create a new
// BUILD_VECTOR with undef elements and the inserted element.
- if (!LegalOperations && InVec.getOpcode() == ISD::UNDEF &&
+ if (InVec.getOpcode() == ISD::UNDEF &&
isa<ConstantSDNode>(EltNo)) {
- EVT VT = InVec.getValueType();
EVT EltVT = VT.getVectorElementType();
unsigned NElts = VT.getVectorNumElements();
SmallVector<SDValue, 8> Ops(NElts, DAG.getUNDEF(EltVT));
@@ -6106,7 +6417,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
if (Elt < Ops.size())
Ops[Elt] = InVal;
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
- InVec.getValueType(), &Ops[0], Ops.size());
+ VT, &Ops[0], Ops.size());
}
return SDValue();
}
@@ -6138,14 +6449,14 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
SDValue EltNo = N->getOperand(1);
if (isa<ConstantSDNode>(EltNo)) {
- unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
bool NewLoad = false;
bool BCNumEltsChanged = false;
EVT VT = InVec.getValueType();
EVT ExtVT = VT.getVectorElementType();
EVT LVT = ExtVT;
- if (InVec.getOpcode() == ISD::BIT_CONVERT) {
+ if (InVec.getOpcode() == ISD::BITCAST) {
EVT BCVT = InVec.getOperand(0).getValueType();
if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
return SDValue();
@@ -6176,10 +6487,10 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// Select the input vector, guarding against out of range extract vector.
unsigned NumElems = VT.getVectorNumElements();
- int Idx = (Elt > NumElems) ? -1 : SVN->getMaskElt(Elt);
+ int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt);
InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
- if (InVec.getOpcode() == ISD::BIT_CONVERT)
+ if (InVec.getOpcode() == ISD::BITCAST)
InVec = InVec.getOperand(0);
if (ISD::isNormalLoad(InVec.getNode())) {
LN0 = cast<LoadSDNode>(InVec);
@@ -6190,12 +6501,17 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
if (!LN0 || !LN0->hasOneUse() || LN0->isVolatile())
return SDValue();
+ // If Idx was -1 above, Elt is going to be -1, so just return undef.
+ if (Elt == -1)
+ return DAG.getUNDEF(LN0->getBasePtr().getValueType());
+
unsigned Align = LN0->getAlignment();
if (NewLoad) {
// Check the resultant load doesn't need a higher alignment than the
// original load.
unsigned NewAlign =
- TLI.getTargetData()->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext()));
+ TLI.getTargetData()
+ ->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT))
return SDValue();
@@ -6204,8 +6520,10 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
}
SDValue NewPtr = LN0->getBasePtr();
+ unsigned PtrOff = 0;
+
if (Elt) {
- unsigned PtrOff = LVT.getSizeInBits() * Elt / 8;
+ PtrOff = LVT.getSizeInBits() * Elt / 8;
EVT PtrType = NewPtr.getValueType();
if (TLI.isBigEndian())
PtrOff = VT.getSizeInBits() / 8 - PtrOff;
@@ -6214,7 +6532,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
}
return DAG.getLoad(LVT, N->getDebugLoc(), LN0->getChain(), NewPtr,
- LN0->getSrcValue(), LN0->getSrcValueOffset(),
+ LN0->getPointerInfo().getWithOffset(PtrOff),
LN0->isVolatile(), LN0->isNonTemporal(), Align);
}
@@ -6280,7 +6598,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue();
if (ExtIndex > VT.getVectorNumElements())
return SDValue();
-
+
Mask.push_back(ExtIndex);
continue;
}
@@ -6328,15 +6646,16 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
// FIXME: implement canonicalizations from DAG.getVectorShuffle()
- // If it is a splat, check if the argument vector is a build_vector with
- // all scalar elements the same.
- if (cast<ShuffleVectorSDNode>(N)->isSplat()) {
+ // If it is a splat, check if the argument vector is another splat or a
+ // build_vector with all scalar elements the same.
+ ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
+ if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
SDNode *V = N0.getNode();
// If this is a bit convert that changes the element type of the vector but
// not the number of vector elements, look through it. Be careful not to
// look though conversions that change things like v4f32 to v2f64.
- if (V->getOpcode() == ISD::BIT_CONVERT) {
+ if (V->getOpcode() == ISD::BITCAST) {
SDValue ConvInput = V->getOperand(0);
if (ConvInput.getValueType().isVector() &&
ConvInput.getValueType().getVectorNumElements() == NumElts)
@@ -6344,30 +6663,28 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
}
if (V->getOpcode() == ISD::BUILD_VECTOR) {
- unsigned NumElems = V->getNumOperands();
- unsigned BaseIdx = cast<ShuffleVectorSDNode>(N)->getSplatIndex();
- if (NumElems > BaseIdx) {
- SDValue Base;
- bool AllSame = true;
- for (unsigned i = 0; i != NumElems; ++i) {
- if (V->getOperand(i).getOpcode() != ISD::UNDEF) {
- Base = V->getOperand(i);
- break;
- }
+ assert(V->getNumOperands() == NumElts &&
+ "BUILD_VECTOR has wrong number of operands");
+ SDValue Base;
+ bool AllSame = true;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ if (V->getOperand(i).getOpcode() != ISD::UNDEF) {
+ Base = V->getOperand(i);
+ break;
}
- // Splat of <u, u, u, u>, return <u, u, u, u>
- if (!Base.getNode())
- return N0;
- for (unsigned i = 0; i != NumElems; ++i) {
- if (V->getOperand(i) != Base) {
- AllSame = false;
- break;
- }
+ }
+ // Splat of <u, u, u, u>, return <u, u, u, u>
+ if (!Base.getNode())
+ return N0;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ if (V->getOperand(i) != Base) {
+ AllSame = false;
+ break;
}
- // Splat of <x, x, x, x>, return <x, x, x, x>
- if (AllSame)
- return N0;
}
+ // Splat of <x, x, x, x>, return <x, x, x, x>
+ if (AllSame)
+ return N0;
}
}
return SDValue();
@@ -6436,7 +6753,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (N->getOpcode() == ISD::AND) {
- if (RHS.getOpcode() == ISD::BIT_CONVERT)
+ if (RHS.getOpcode() == ISD::BITCAST)
RHS = RHS.getOperand(0);
if (RHS.getOpcode() == ISD::BUILD_VECTOR) {
SmallVector<int, 8> Indices;
@@ -6464,9 +6781,9 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
DAG.getConstant(0, EltVT));
SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
RVT, &ZeroOps[0], ZeroOps.size());
- LHS = DAG.getNode(ISD::BIT_CONVERT, dl, RVT, LHS);
+ LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS);
SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Shuf);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Shuf);
}
}
@@ -6480,10 +6797,9 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
// things. Simplifying them may result in a loss of legality.
if (LegalOperations) return SDValue();
- EVT VT = N->getValueType(0);
- assert(VT.isVector() && "SimplifyVBinOp only works on vectors!");
+ assert(N->getValueType(0).isVector() &&
+ "SimplifyVBinOp only works on vectors!");
- EVT EltType = VT.getVectorElementType();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
SDValue Shuffle = XformToShuffleWithZero(N);
@@ -6516,14 +6832,10 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
break;
}
- // If the vector element type is not legal, the BUILD_VECTOR operands
- // are promoted and implicitly truncated. Make that explicit here.
- if (LHSOp.getValueType() != EltType)
- LHSOp = DAG.getNode(ISD::TRUNCATE, LHS.getDebugLoc(), EltType, LHSOp);
- if (RHSOp.getValueType() != EltType)
- RHSOp = DAG.getNode(ISD::TRUNCATE, RHS.getDebugLoc(), EltType, RHSOp);
-
- SDValue FoldOp = DAG.getNode(N->getOpcode(), LHS.getDebugLoc(), EltType,
+ EVT VT = LHSOp.getValueType();
+ assert(RHSOp.getValueType() == VT &&
+ "SimplifyVBinOp with different BUILD_VECTOR element types");
+ SDValue FoldOp = DAG.getNode(N->getOpcode(), LHS.getDebugLoc(), VT,
LHSOp, RHSOp);
if (FoldOp.getOpcode() != ISD::UNDEF &&
FoldOp.getOpcode() != ISD::Constant &&
@@ -6533,11 +6845,9 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
AddToWorkList(FoldOp.getNode());
}
- if (Ops.size() == LHS.getNumOperands()) {
- EVT VT = LHS.getValueType();
- return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
- &Ops[0], Ops.size());
- }
+ if (Ops.size() == LHS.getNumOperands())
+ return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
+ LHS.getValueType(), &Ops[0], Ops.size());
}
return SDValue();
@@ -6580,103 +6890,101 @@ SDValue DAGCombiner::SimplifySelect(DebugLoc DL, SDValue N0,
bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
SDValue RHS) {
+ // Cannot simplify select with vector condition
+ if (TheSelect->getOperand(0).getValueType().isVector()) return false;
+
// If this is a select from two identical things, try to pull the operation
// through the select.
- if (LHS.getOpcode() == RHS.getOpcode() && LHS.hasOneUse() && RHS.hasOneUse()){
- // If this is a load and the token chain is identical, replace the select
- // of two loads with a load through a select of the address to load from.
- // This triggers in things like "select bool X, 10.0, 123.0" after the FP
- // constants have been dropped into the constant pool.
- if (LHS.getOpcode() == ISD::LOAD &&
+ if (LHS.getOpcode() != RHS.getOpcode() ||
+ !LHS.hasOneUse() || !RHS.hasOneUse())
+ return false;
+
+ // If this is a load and the token chain is identical, replace the select
+ // of two loads with a load through a select of the address to load from.
+ // This triggers in things like "select bool X, 10.0, 123.0" after the FP
+ // constants have been dropped into the constant pool.
+ if (LHS.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LLD = cast<LoadSDNode>(LHS);
+ LoadSDNode *RLD = cast<LoadSDNode>(RHS);
+
+ // Token chains must be identical.
+ if (LHS.getOperand(0) != RHS.getOperand(0) ||
// Do not let this transformation reduce the number of volatile loads.
- !cast<LoadSDNode>(LHS)->isVolatile() &&
- !cast<LoadSDNode>(RHS)->isVolatile() &&
- // Token chains must be identical.
- LHS.getOperand(0) == RHS.getOperand(0)) {
- LoadSDNode *LLD = cast<LoadSDNode>(LHS);
- LoadSDNode *RLD = cast<LoadSDNode>(RHS);
-
- // If this is an EXTLOAD, the VT's must match.
- if (LLD->getMemoryVT() == RLD->getMemoryVT()) {
+ LLD->isVolatile() || RLD->isVolatile() ||
+ // If this is an EXTLOAD, the VT's must match.
+ LLD->getMemoryVT() != RLD->getMemoryVT() ||
+ // If this is an EXTLOAD, the kind of extension must match.
+ (LLD->getExtensionType() != RLD->getExtensionType() &&
+ // The only exception is if one of the extensions is anyext.
+ LLD->getExtensionType() != ISD::EXTLOAD &&
+ RLD->getExtensionType() != ISD::EXTLOAD) ||
// FIXME: this discards src value information. This is
// over-conservative. It would be beneficial to be able to remember
// both potential memory locations. Since we are discarding
// src value info, don't do the transformation if the memory
// locations are not in the default address space.
- unsigned LLDAddrSpace = 0, RLDAddrSpace = 0;
- if (const Value *LLDVal = LLD->getMemOperand()->getValue()) {
- if (const PointerType *PT = dyn_cast<PointerType>(LLDVal->getType()))
- LLDAddrSpace = PT->getAddressSpace();
- }
- if (const Value *RLDVal = RLD->getMemOperand()->getValue()) {
- if (const PointerType *PT = dyn_cast<PointerType>(RLDVal->getType()))
- RLDAddrSpace = PT->getAddressSpace();
- }
- SDValue Addr;
- if (LLDAddrSpace == 0 && RLDAddrSpace == 0) {
- if (TheSelect->getOpcode() == ISD::SELECT) {
- // Check that the condition doesn't reach either load. If so, folding
- // this will induce a cycle into the DAG.
- if ((!LLD->hasAnyUseOfValue(1) ||
- !LLD->isPredecessorOf(TheSelect->getOperand(0).getNode())) &&
- (!RLD->hasAnyUseOfValue(1) ||
- !RLD->isPredecessorOf(TheSelect->getOperand(0).getNode()))) {
- Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(),
- LLD->getBasePtr().getValueType(),
- TheSelect->getOperand(0), LLD->getBasePtr(),
- RLD->getBasePtr());
- }
- } else {
- // Check that the condition doesn't reach either load. If so, folding
- // this will induce a cycle into the DAG.
- if ((!LLD->hasAnyUseOfValue(1) ||
- (!LLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
- !LLD->isPredecessorOf(TheSelect->getOperand(1).getNode()))) &&
- (!RLD->hasAnyUseOfValue(1) ||
- (!RLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
- !RLD->isPredecessorOf(TheSelect->getOperand(1).getNode())))) {
- Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
- LLD->getBasePtr().getValueType(),
- TheSelect->getOperand(0),
- TheSelect->getOperand(1),
- LLD->getBasePtr(), RLD->getBasePtr(),
- TheSelect->getOperand(4));
- }
- }
- }
-
- if (Addr.getNode()) {
- SDValue Load;
- if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
- Load = DAG.getLoad(TheSelect->getValueType(0),
- TheSelect->getDebugLoc(),
- LLD->getChain(),
- Addr, 0, 0,
- LLD->isVolatile(),
- LLD->isNonTemporal(),
- LLD->getAlignment());
- } else {
- Load = DAG.getExtLoad(LLD->getExtensionType(),
- TheSelect->getValueType(0),
- TheSelect->getDebugLoc(),
- LLD->getChain(), Addr, 0, 0,
- LLD->getMemoryVT(),
- LLD->isVolatile(),
- LLD->isNonTemporal(),
- LLD->getAlignment());
- }
+ LLD->getPointerInfo().getAddrSpace() != 0 ||
+ RLD->getPointerInfo().getAddrSpace() != 0)
+ return false;
- // Users of the select now use the result of the load.
- CombineTo(TheSelect, Load);
+ // Check that the select condition doesn't reach either load. If so,
+ // folding this will induce a cycle into the DAG. If not, this is safe to
+ // xform, so create a select of the addresses.
+ SDValue Addr;
+ if (TheSelect->getOpcode() == ISD::SELECT) {
+ SDNode *CondNode = TheSelect->getOperand(0).getNode();
+ if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) ||
+ (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode)))
+ return false;
+ Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(),
+ LLD->getBasePtr().getValueType(),
+ TheSelect->getOperand(0), LLD->getBasePtr(),
+ RLD->getBasePtr());
+ } else { // Otherwise SELECT_CC
+ SDNode *CondLHS = TheSelect->getOperand(0).getNode();
+ SDNode *CondRHS = TheSelect->getOperand(1).getNode();
+
+ if ((LLD->hasAnyUseOfValue(1) &&
+ (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) ||
+ (LLD->hasAnyUseOfValue(1) &&
+ (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))))
+ return false;
- // Users of the old loads now use the new load's chain. We know the
- // old-load value is dead now.
- CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
- CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
- return true;
- }
- }
- }
+ Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
+ LLD->getBasePtr().getValueType(),
+ TheSelect->getOperand(0),
+ TheSelect->getOperand(1),
+ LLD->getBasePtr(), RLD->getBasePtr(),
+ TheSelect->getOperand(4));
+ }
+
+ SDValue Load;
+ if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
+ Load = DAG.getLoad(TheSelect->getValueType(0),
+ TheSelect->getDebugLoc(),
+ // FIXME: Discards pointer info.
+ LLD->getChain(), Addr, MachinePointerInfo(),
+ LLD->isVolatile(), LLD->isNonTemporal(),
+ LLD->getAlignment());
+ } else {
+ Load = DAG.getExtLoad(LLD->getExtensionType() == ISD::EXTLOAD ?
+ RLD->getExtensionType() : LLD->getExtensionType(),
+ TheSelect->getDebugLoc(),
+ TheSelect->getValueType(0),
+ // FIXME: Discards pointer info.
+ LLD->getChain(), Addr, MachinePointerInfo(),
+ LLD->getMemoryVT(), LLD->isVolatile(),
+ LLD->isNonTemporal(), LLD->getAlignment());
+ }
+
+ // Users of the select now use the result of the load.
+ CombineTo(TheSelect, Load);
+
+ // Users of the old loads now use the new load's chain. We know the
+ // old-load value is dead now.
+ CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
+ CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
+ return true;
}
return false;
@@ -6689,7 +6997,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
ISD::CondCode CC, bool NotExtCompare) {
// (x ? y : y) -> y.
if (N2 == N3) return N2;
-
+
EVT VT = N2.getValueType();
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
@@ -6725,7 +7033,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
return DAG.getNode(ISD::FABS, DL, VT, N3);
}
}
-
+
// Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
// where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
// in it. This is a win when the constant is not otherwise available because
@@ -6748,7 +7056,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
};
const Type *FPTy = Elts[0]->getType();
const TargetData &TD = *TLI.getTargetData();
-
+
// Create a ConstantArray of the two constants.
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts, 2);
SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(),
@@ -6760,7 +7068,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
SDValue Zero = DAG.getIntPtrConstant(0);
unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
SDValue One = DAG.getIntPtrConstant(EltSize);
-
+
SDValue Cond = DAG.getSetCC(DL,
TLI.getSetCCResultType(N0.getValueType()),
N0, N1, CC);
@@ -6769,11 +7077,11 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
CPIdx = DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), CPIdx,
CstOffset);
return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0, false,
+ MachinePointerInfo::getConstantPool(), false,
false, Alignment);
}
- }
+ }
// Check to see if we can perform the "gzip trick", transforming
// (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
@@ -6818,6 +7126,35 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
}
}
+ // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
+ // where y is has a single bit set.
+ // A plaintext description would be, we can turn the SELECT_CC into an AND
+ // when the condition can be materialized as an all-ones register. Any
+ // single bit-test can be materialized as an all-ones register with
+ // shift-left and shift-right-arith.
+ if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
+ N0->getValueType(0) == VT &&
+ N1C && N1C->isNullValue() &&
+ N2C && N2C->isNullValue()) {
+ SDValue AndLHS = N0->getOperand(0);
+ ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+ if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
+ // Shift the tested bit over the sign bit.
+ APInt AndMask = ConstAndRHS->getAPIntValue();
+ SDValue ShlAmt =
+ DAG.getConstant(AndMask.countLeadingZeros(), getShiftAmountTy());
+ SDValue Shl = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, AndLHS, ShlAmt);
+
+ // Now arithmetic right shift it all the way over, so the result is either
+ // all-ones, or zero.
+ SDValue ShrAmt =
+ DAG.getConstant(AndMask.getBitWidth()-1, getShiftAmountTy());
+ SDValue Shr = DAG.getNode(ISD::SRA, N0.getDebugLoc(), VT, Shl, ShrAmt);
+
+ return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
+ }
+ }
+
// fold select C, 16, 0 -> shl C, 4
if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() &&
TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent) {
@@ -6971,7 +7308,8 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) {
}
/// FindBaseOffset - Return true if base is a frame index, which is known not
-// to alias with anything but itself. Provides base object and offset as results.
+// to alias with anything but itself. Provides base object and offset as
+// results.
static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
const GlobalValue *&GV, void *&CV) {
// Assume it is a primitive operation.
@@ -6984,7 +7322,7 @@ static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
Offset += C->getZExtValue();
}
}
-
+
// Return the underlying GlobalValue, and update the Offset. Return false
// for GlobalAddressSDNode since the same GlobalAddress may be represented
// by multiple nodes with different offsets.
@@ -7012,9 +7350,11 @@ static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
const Value *SrcValue1, int SrcValueOffset1,
unsigned SrcValueAlign1,
+ const MDNode *TBAAInfo1,
SDValue Ptr2, int64_t Size2,
const Value *SrcValue2, int SrcValueOffset2,
- unsigned SrcValueAlign2) const {
+ unsigned SrcValueAlign2,
+ const MDNode *TBAAInfo2) const {
// If they are the same then they must be aliases.
if (Ptr1 == Ptr2) return true;
@@ -7030,8 +7370,19 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))
return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
- // If we know what the bases are, and they aren't identical, then we know they
- // cannot alias.
+ // It is possible for different frame indices to alias each other, mostly
+ // when tail call optimization reuses return address slots for arguments.
+ // To catch this case, look up the actual index of frame indices to compute
+ // the real alias relationship.
+ if (isFrameIndex1 && isFrameIndex2) {
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ Offset1 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex());
+ Offset2 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex());
+ return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
+ }
+
+ // Otherwise, if we know what the bases are, and they aren't identical, then
+ // we know they cannot alias.
if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
return false;
@@ -7044,20 +7395,21 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
(Size1 == Size2) && (SrcValueAlign1 > Size1)) {
int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1;
int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1;
-
+
// There is no overlap between these relatively aligned accesses of similar
// size, return no alias.
if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1)
return false;
}
-
+
if (CombinerGlobalAA) {
// Use alias analysis information.
int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2);
int64_t Overlap1 = Size1 + SrcValueOffset1 - MinOffset;
int64_t Overlap2 = Size2 + SrcValueOffset2 - MinOffset;
AliasAnalysis::AliasResult AAResult =
- AA.alias(SrcValue1, Overlap1, SrcValue2, Overlap2);
+ AA.alias(AliasAnalysis::Location(SrcValue1, Overlap1, TBAAInfo1),
+ AliasAnalysis::Location(SrcValue2, Overlap2, TBAAInfo2));
if (AAResult == AliasAnalysis::NoAlias)
return false;
}
@@ -7070,15 +7422,17 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
/// node. Returns true if the operand was a load.
bool DAGCombiner::FindAliasInfo(SDNode *N,
SDValue &Ptr, int64_t &Size,
- const Value *&SrcValue,
+ const Value *&SrcValue,
int &SrcValueOffset,
- unsigned &SrcValueAlign) const {
+ unsigned &SrcValueAlign,
+ const MDNode *&TBAAInfo) const {
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
Ptr = LD->getBasePtr();
Size = LD->getMemoryVT().getSizeInBits() >> 3;
SrcValue = LD->getSrcValue();
SrcValueOffset = LD->getSrcValueOffset();
SrcValueAlign = LD->getOriginalAlignment();
+ TBAAInfo = LD->getTBAAInfo();
return true;
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
Ptr = ST->getBasePtr();
@@ -7086,6 +7440,7 @@ bool DAGCombiner::FindAliasInfo(SDNode *N,
SrcValue = ST->getSrcValue();
SrcValueOffset = ST->getSrcValueOffset();
SrcValueAlign = ST->getOriginalAlignment();
+ TBAAInfo = ST->getTBAAInfo();
} else {
llvm_unreachable("FindAliasInfo expected a memory operand");
}
@@ -7106,26 +7461,27 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
const Value *SrcValue;
int SrcValueOffset;
unsigned SrcValueAlign;
- bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset,
- SrcValueAlign);
+ const MDNode *SrcTBAAInfo;
+ bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset,
+ SrcValueAlign, SrcTBAAInfo);
// Starting off.
Chains.push_back(OriginalChain);
unsigned Depth = 0;
-
+
// Look at each chain and determine if it is an alias. If so, add it to the
// aliases list. If not, then continue up the chain looking for the next
// candidate.
while (!Chains.empty()) {
SDValue Chain = Chains.back();
Chains.pop_back();
-
- // For TokenFactor nodes, look at each operand and only continue up the
- // chain until we find two aliases. If we've seen two aliases, assume we'll
+
+ // For TokenFactor nodes, look at each operand and only continue up the
+ // chain until we find two aliases. If we've seen two aliases, assume we'll
// find more and revert to original chain since the xform is unlikely to be
// profitable.
- //
- // FIXME: The depth check could be made to return the last non-aliasing
+ //
+ // FIXME: The depth check could be made to return the last non-aliasing
// chain we found before we hit a tokenfactor rather than the original
// chain.
if (Depth > 6 || Aliases.size() == 2) {
@@ -7151,15 +7507,18 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
const Value *OpSrcValue;
int OpSrcValueOffset;
unsigned OpSrcValueAlign;
+ const MDNode *OpSrcTBAAInfo;
bool IsOpLoad = FindAliasInfo(Chain.getNode(), OpPtr, OpSize,
OpSrcValue, OpSrcValueOffset,
- OpSrcValueAlign);
+ OpSrcValueAlign,
+ OpSrcTBAAInfo);
// If chain is alias then stop here.
if (!(IsLoad && IsOpLoad) &&
isAlias(Ptr, Size, SrcValue, SrcValueOffset, SrcValueAlign,
+ SrcTBAAInfo,
OpPtr, OpSize, OpSrcValue, OpSrcValueOffset,
- OpSrcValueAlign)) {
+ OpSrcValueAlign, OpSrcTBAAInfo)) {
Aliases.push_back(Chain);
} else {
// Look further up the chain.
@@ -7206,9 +7565,9 @@ SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
// If a single operand then chain to it. We don't need to revisit it.
return Aliases[0];
}
-
+
// Construct a custom tailored token factor.
- return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
+ return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
&Aliases[0], Aliases.size());
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index a4eed71..490b857 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -55,6 +55,7 @@
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Debug.h"
using namespace llvm;
/// startNewBlock - Set the current block to which generated machine
@@ -197,12 +198,12 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
-
+
// If target-independent code couldn't handle the value, give target-specific
// code a try.
if (!Reg && isa<Constant>(V))
Reg = TargetMaterializeConstant(cast<Constant>(V));
-
+
// Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate.
if (Reg != 0) {
@@ -234,7 +235,7 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
LocalValueMap[I] = Reg;
return Reg;
}
-
+
unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0)
// Use the new register.
@@ -414,7 +415,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
// If this is a constant subscript, handle it quickly.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero()) continue;
- uint64_t Offs =
+ uint64_t Offs =
TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
if (N == 0)
@@ -423,7 +424,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
NIsKill = true;
continue;
}
-
+
// N = N + Idx * ElementSize;
uint64_t ElementSize = TD.getTypeAllocSize(Ty);
std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
@@ -467,16 +468,28 @@ bool FastISel::SelectCall(const User *I) {
return true;
const Value *Address = DI->getAddress();
- if (!Address)
+ if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
return true;
- if (isa<UndefValue>(Address))
- return true;
- const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
- // Don't handle byval struct arguments or VLAs, for example.
- if (!AI)
- // Building the map above is target independent. Generating DBG_VALUE
- // inline is target dependent; do this now.
- (void)TargetSelectInstruction(cast<Instruction>(I));
+
+ unsigned Reg = 0;
+ unsigned Offset = 0;
+ if (const Argument *Arg = dyn_cast<Argument>(Address)) {
+ if (Arg->hasByValAttr()) {
+ // Byval arguments' frame index is recorded during argument lowering.
+ // Use this info directly.
+ Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
+ if (Offset)
+ Reg = TRI.getFrameRegister(*FuncInfo.MF);
+ }
+ }
+ if (!Reg)
+ Reg = getRegForValue(Address);
+
+ if (Reg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::DBG_VALUE))
+ .addReg(Reg, RegState::Debug).addImm(Offset)
+ .addMetadata(DI->getVariable());
return true;
}
case Intrinsic::dbg_value: {
@@ -505,11 +518,8 @@ bool FastISel::SelectCall(const User *I) {
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
- // Insert an undef so we can see what we dropped.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(0U).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
- }
+ DEBUG(dbgs() << "Dropping debug info for " << DI);
+ }
return true;
}
case Intrinsic::eh_exception: {
@@ -582,12 +592,12 @@ bool FastISel::SelectCall(const User *I) {
bool FastISel::SelectCast(const User *I, unsigned Opcode) {
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType());
-
+
if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
DstVT == MVT::Other || !DstVT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
-
+
// Check if the destination type is legal. Or as a special case,
// it may be i1 if we're doing a truncate because that's
// easy and somewhat common.
@@ -629,7 +639,7 @@ bool FastISel::SelectCast(const User *I, unsigned Opcode) {
InputReg, InputRegIsKill);
if (!ResultReg)
return false;
-
+
UpdateValueMap(I, ResultReg);
return true;
}
@@ -644,23 +654,23 @@ bool FastISel::SelectBitCast(const User *I) {
return true;
}
- // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
+ // Bitcasts of other values become reg-reg copies or BITCAST operators.
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType());
-
+
if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
DstVT == MVT::Other || !DstVT.isSimple() ||
!TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
// Unhandled type. Halt "fast" selection and bail.
return false;
-
+
unsigned Op0 = getRegForValue(I->getOperand(0));
if (Op0 == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
-
+
// First, try to perform the bitcast by inserting a reg-reg copy.
unsigned ResultReg = 0;
if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
@@ -673,15 +683,15 @@ bool FastISel::SelectBitCast(const User *I) {
ResultReg).addReg(Op0);
}
}
-
- // If the reg-reg copy failed, select a BIT_CONVERT opcode.
+
+ // If the reg-reg copy failed, select a BITCAST opcode.
if (!ResultReg)
ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
- ISD::BIT_CONVERT, Op0, Op0IsKill);
-
+ ISD::BITCAST, Op0, Op0IsKill);
+
if (!ResultReg)
return false;
-
+
UpdateValueMap(I, ResultReg);
return true;
}
@@ -753,7 +763,7 @@ FastISel::SelectFNeg(const User *I) {
return false;
unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
- ISD::BIT_CONVERT, OpReg, OpRegIsKill);
+ ISD::BITCAST, OpReg, OpRegIsKill);
if (IntReg == 0)
return false;
@@ -765,7 +775,7 @@ FastISel::SelectFNeg(const User *I) {
return false;
ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
- ISD::BIT_CONVERT, IntResultReg, /*Kill=*/true);
+ ISD::BITCAST, IntResultReg, /*Kill=*/true);
if (ResultReg == 0)
return false;
@@ -845,10 +855,10 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
// Dynamic-sized alloca is not handled yet.
return false;
-
+
case Instruction::Call:
return SelectCall(I);
-
+
case Instruction::BitCast:
return SelectBitCast(I);
@@ -911,7 +921,7 @@ unsigned FastISel::FastEmit_r(MVT, MVT,
return 0;
}
-unsigned FastISel::FastEmit_rr(MVT, MVT,
+unsigned FastISel::FastEmit_rr(MVT, MVT,
unsigned,
unsigned /*Op0*/, bool /*Op0IsKill*/,
unsigned /*Op1*/, bool /*Op1IsKill*/) {
@@ -1139,7 +1149,7 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
-
+
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
else {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 5ef6404..98582ba 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -29,7 +29,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 61c2a90..e309def 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -31,11 +31,11 @@
using namespace llvm;
/// CountResults - The results of target nodes have register or immediate
-/// operands first, then an optional chain, and optional flag operands (which do
+/// operands first, then an optional chain, and optional glue operands (which do
/// not go into the resulting MachineInstr).
unsigned InstrEmitter::CountResults(SDNode *Node) {
unsigned N = Node->getNumValues();
- while (N && Node->getValueType(N - 1) == MVT::Flag)
+ while (N && Node->getValueType(N - 1) == MVT::Glue)
--N;
if (N && Node->getValueType(N - 1) == MVT::Other)
--N; // Skip over chain result.
@@ -43,12 +43,12 @@ unsigned InstrEmitter::CountResults(SDNode *Node) {
}
/// CountOperands - The inputs to target nodes have any actual inputs first,
-/// followed by an optional chain operand, then an optional flag operand.
+/// followed by an optional chain operand, then an optional glue operand.
/// Compute the number of actual operands that will go into the resulting
/// MachineInstr.
unsigned InstrEmitter::CountOperands(SDNode *Node) {
unsigned N = Node->getNumOperands();
- while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
+ while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
--N;
if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
--N; // Ignore chain if it exists.
@@ -67,7 +67,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
if (IsClone)
VRBaseMap.erase(Op);
bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
- isNew = isNew; // Silence compiler warning.
+ (void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
return;
}
@@ -96,7 +96,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
if (Op.getNode() != Node || Op.getResNo() != ResNo)
continue;
EVT VT = Node->getValueType(Op.getResNo());
- if (VT == MVT::Other || VT == MVT::Flag)
+ if (VT == MVT::Other || VT == MVT::Glue)
continue;
Match = false;
if (User->isMachineOpcode()) {
@@ -150,7 +150,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
if (IsClone)
VRBaseMap.erase(Op);
bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
- isNew = isNew; // Silence compiler warning.
+ (void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
@@ -224,7 +224,7 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
if (IsClone)
VRBaseMap.erase(Op);
bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
- isNew = isNew; // Silence compiler warning.
+ (void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
}
@@ -264,8 +264,8 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsDebug, bool IsClone, bool IsCloned) {
assert(Op.getValueType() != MVT::Other &&
- Op.getValueType() != MVT::Flag &&
- "Chain and flag operands should occur at end of operand list!");
+ Op.getValueType() != MVT::Glue &&
+ "Chain and glue operands should occur at end of operand list!");
// Get/emit the operand.
unsigned VReg = getVR(Op, VRBaseMap);
assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
@@ -377,8 +377,8 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
BA->getTargetFlags()));
} else {
assert(Op.getValueType() != MVT::Other &&
- Op.getValueType() != MVT::Flag &&
- "Chain and flag operands should occur at end of operand list!");
+ Op.getValueType() != MVT::Glue &&
+ "Chain and glue operands should occur at end of operand list!");
AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap,
IsDebug, IsClone, IsCloned);
}
@@ -428,31 +428,47 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
// Figure out the register class to create for the destreg.
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
- const TargetRegisterClass *TRC = MRI->getRegClass(VReg);
- const TargetRegisterClass *SRC = TRC->getSubRegisterRegClass(SubIdx);
- assert(SRC && "Invalid subregister index in EXTRACT_SUBREG");
-
- // Figure out the register class to create for the destreg.
- // Note that if we're going to directly use an existing register,
- // it must be precisely the required class, and not a subclass
- // thereof.
- if (VRBase == 0 || SRC != MRI->getRegClass(VRBase)) {
- // Create the reg
- assert(SRC && "Couldn't find source register class");
- VRBase = MRI->createVirtualRegister(SRC);
- }
+ MachineInstr *DefMI = MRI->getVRegDef(VReg);
+ unsigned SrcReg, DstReg, DefSubIdx;
+ if (DefMI &&
+ TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
+ SubIdx == DefSubIdx) {
+ // Optimize these:
+ // r1025 = s/zext r1024, 4
+ // r1026 = extract_subreg r1025, 4
+ // to a copy
+ // r1026 = copy r1024
+ const TargetRegisterClass *TRC = MRI->getRegClass(SrcReg);
+ VRBase = MRI->createVirtualRegister(TRC);
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
+ } else {
+ const TargetRegisterClass *TRC = MRI->getRegClass(VReg);
+ const TargetRegisterClass *SRC = TRC->getSubRegisterRegClass(SubIdx);
+ assert(SRC && "Invalid subregister index in EXTRACT_SUBREG");
+
+ // Figure out the register class to create for the destreg.
+ // Note that if we're going to directly use an existing register,
+ // it must be precisely the required class, and not a subclass
+ // thereof.
+ if (VRBase == 0 || SRC != MRI->getRegClass(VRBase)) {
+ // Create the reg
+ assert(SRC && "Couldn't find source register class");
+ VRBase = MRI->createVirtualRegister(SRC);
+ }
- // Create the extract_subreg machine instruction.
- MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
- TII->get(TargetOpcode::COPY), VRBase);
+ // Create the extract_subreg machine instruction.
+ MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), VRBase);
- // Add source, and subreg index
- AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap, /*IsDebug=*/false,
- IsClone, IsCloned);
- assert(TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg()) &&
- "Cannot yet extract from physregs");
- MI->getOperand(1).setSubReg(SubIdx);
- MBB->insert(InsertPos, MI);
+ // Add source, and subreg index
+ AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap, /*IsDebug=*/false,
+ IsClone, IsCloned);
+ assert(TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg())&&
+ "Cannot yet extract from physregs");
+ MI->getOperand(1).setSubReg(SubIdx);
+ MBB->insert(InsertPos, MI);
+ }
} else if (Opc == TargetOpcode::INSERT_SUBREG ||
Opc == TargetOpcode::SUBREG_TO_REG) {
SDValue N0 = Node->getOperand(0);
@@ -496,7 +512,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
SDValue Op(Node, 0);
bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
- isNew = isNew; // Silence compiler warning.
+ (void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
@@ -518,7 +534,7 @@ InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
SDValue Op(Node, 0);
bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
- isNew = isNew; // Silence compiler warning.
+ (void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
@@ -543,9 +559,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
const TargetRegisterClass *SRC =
TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
- if (!SRC)
- llvm_unreachable("Invalid subregister index in REG_SEQUENCE");
- if (SRC != RC) {
+ if (SRC && SRC != RC) {
MRI->setRegClass(NewVReg, SRC);
RC = SRC;
}
@@ -557,7 +571,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
MBB->insert(InsertPos, MI);
SDValue Op(Node, 0);
bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
- isNew = isNew; // Silence compiler warning.
+ (void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
}
@@ -673,10 +687,10 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
// The MachineInstr constructor adds implicit-def operands. Scan through
// these to determine which are dead.
if (MI->getNumOperands() != 0 &&
- Node->getValueType(Node->getNumValues()-1) == MVT::Flag) {
+ Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
// First, collect all used registers.
SmallVector<unsigned, 8> UsedRegs;
- for (SDNode *F = Node->getFlaggedUser(); F; F = F->getFlaggedUser())
+ for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser())
if (F->getOpcode() == ISD::CopyFromReg)
UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
else {
@@ -689,7 +703,7 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
unsigned Reg = R->getReg();
- if (Reg != 0 && TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
UsedRegs.push_back(Reg);
}
}
@@ -721,20 +735,7 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
// hook knows where in the block to insert the replacement code.
MBB->insert(InsertPos, MI);
- if (II.usesCustomInsertionHook()) {
- // Insert this instruction into the basic block using a target
- // specific inserter which may returns a new basic block.
- bool AtEnd = InsertPos == MBB->end();
- MachineBasicBlock *NewMBB = TLI->EmitInstrWithCustomInserter(MI, MBB);
- if (NewMBB != MBB) {
- if (AtEnd)
- InsertPos = NewMBB->end();
- MBB = NewMBB;
- }
- return;
- }
-
- // Additional results must be an physical register def.
+ // Additional results must be physical register defs.
if (HasPhysRegOuts) {
for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
@@ -742,17 +743,17 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
// If there are no uses, mark the register as dead now, so that
// MachineLICM/Sink can see that it's dead. Don't do this if the
- // node has a Flag value, for the benefit of targets still using
- // Flag for values in physregs.
- else if (Node->getValueType(Node->getNumValues()-1) != MVT::Flag)
+ // node has a Glue value, for the benefit of targets still using
+ // Glue for values in physregs.
+ else if (Node->getValueType(Node->getNumValues()-1) != MVT::Glue)
MI->addRegisterDead(Reg, TRI);
}
}
// If the instruction has implicit defs and the node doesn't, mark the
- // implicit def as dead. If the node has any flag outputs, we don't do this
- // because we don't know what implicit defs are being used by flagged nodes.
- if (Node->getValueType(Node->getNumValues()-1) != MVT::Flag)
+ // implicit def as dead. If the node has any glue outputs, we don't do this
+ // because we don't know what implicit defs are being used by glued nodes.
+ if (Node->getValueType(Node->getNumValues()-1) != MVT::Glue)
if (const unsigned *IDList = II.getImplicitDefs()) {
for (unsigned i = NumResults, e = II.getNumDefs()+II.getNumImplicitDefs();
i != e; ++i)
@@ -808,8 +809,8 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
case ISD::INLINEASM: {
unsigned NumOps = Node->getNumOperands();
- if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
- --NumOps; // Ignore the flag operand.
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
+ --NumOps; // Ignore the glue operand.
// Create the inline asm machine instruction.
MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
@@ -820,11 +821,11 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
MI->addOperand(MachineOperand::CreateES(AsmStr));
- // Add the isAlignStack bit.
- int64_t isAlignStack =
- cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_IsAlignStack))->
+ // Add the HasSideEffect and isAlignStack bits.
+ int64_t ExtraInfo =
+ cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
getZExtValue();
- MI->addOperand(MachineOperand::CreateImm(isAlignStack));
+ MI->addOperand(MachineOperand::CreateImm(ExtraInfo));
// Add all of the operand registers to the instruction.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 2981cd3..49c862c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -11,14 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
@@ -65,11 +66,6 @@ class SelectionDAGLegalize {
/// against each other, including inserted libcalls.
SDValue LastCALLSEQ_END;
- /// IsLegalizingCall - This member is used *only* for purposes of providing
- /// helpful assertions that a libcall isn't created while another call is
- /// being legalized (which could lead to non-serialized call sequences).
- bool IsLegalizingCall;
-
enum LegalizeAction {
Legal, // The target natively supports this operation.
Promote, // This operation should be executed in a larger type.
@@ -91,6 +87,9 @@ class SelectionDAGLegalize {
// If someone requests legalization of the new node, return itself.
if (From != To)
LegalizedNodes.insert(std::make_pair(To, To));
+
+ // Transfer SDDbgValues.
+ DAG.TransferDbgValues(From, To);
}
public:
@@ -172,6 +171,7 @@ private:
SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl);
SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
+ SDValue ExpandInsertToVectorThroughStack(SDValue Op);
SDValue ExpandVectorBuildThroughStack(SDNode* Node);
std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
@@ -224,7 +224,6 @@ SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag,
void SelectionDAGLegalize::LegalizeDAG() {
LastCALLSEQ_END = DAG.getEntryNode();
- IsLegalizingCall = false;
// The legalize process is inherently a bottom-up recursive process (users
// legalize their uses before themselves). Given infinite stack space, we
@@ -251,9 +250,16 @@ void SelectionDAGLegalize::LegalizeDAG() {
/// FindCallEndFromCallStart - Given a chained node that is part of a call
/// sequence, find the CALLSEQ_END node that terminates the call sequence.
-static SDNode *FindCallEndFromCallStart(SDNode *Node) {
- if (Node->getOpcode() == ISD::CALLSEQ_END)
- return Node;
+static SDNode *FindCallEndFromCallStart(SDNode *Node, int depth = 0) {
+ // Nested CALLSEQ_START/END constructs aren't yet legal,
+ // but we can DTRT and handle them correctly here.
+ if (Node->getOpcode() == ISD::CALLSEQ_START)
+ depth++;
+ else if (Node->getOpcode() == ISD::CALLSEQ_END) {
+ depth--;
+ if (depth == 0)
+ return Node;
+ }
if (Node->use_empty())
return 0; // No CallSeqEnd
@@ -283,7 +289,7 @@ static SDNode *FindCallEndFromCallStart(SDNode *Node) {
SDNode *User = *UI;
for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i)
if (User->getOperand(i) == TheChain)
- if (SDNode *Result = FindCallEndFromCallStart(User))
+ if (SDNode *Result = FindCallEndFromCallStart(User, depth))
return Result;
}
return 0;
@@ -292,12 +298,26 @@ static SDNode *FindCallEndFromCallStart(SDNode *Node) {
/// FindCallStartFromCallEnd - Given a chained node that is part of a call
/// sequence, find the CALLSEQ_START node that initiates the call sequence.
static SDNode *FindCallStartFromCallEnd(SDNode *Node) {
+ int nested = 0;
assert(Node && "Didn't find callseq_start for a call??");
- if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
-
- assert(Node->getOperand(0).getValueType() == MVT::Other &&
- "Node doesn't have a token chain argument!");
- return FindCallStartFromCallEnd(Node->getOperand(0).getNode());
+ while (Node->getOpcode() != ISD::CALLSEQ_START || nested) {
+ Node = Node->getOperand(0).getNode();
+ assert(Node->getOperand(0).getValueType() == MVT::Other &&
+ "Node doesn't have a token chain argument!");
+ switch (Node->getOpcode()) {
+ default:
+ break;
+ case ISD::CALLSEQ_START:
+ if (!nested)
+ return Node;
+ nested--;
+ break;
+ case ISD::CALLSEQ_END:
+ nested++;
+ break;
+ }
+ }
+ return 0;
}
/// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to
@@ -377,12 +397,12 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
if (Extend)
- return DAG.getExtLoad(ISD::EXTLOAD, OrigVT, dl,
+ return DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT,
DAG.getEntryNode(),
- CPIdx, PseudoSourceValue::getConstantPool(),
- 0, VT, false, false, Alignment);
+ CPIdx, MachinePointerInfo::getConstantPool(),
+ VT, false, false, Alignment);
return DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0, false, false,
+ MachinePointerInfo::getConstantPool(), false, false,
Alignment);
}
@@ -395,7 +415,6 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
SDValue Val = ST->getValue();
EVT VT = Val.getValueType();
int Alignment = ST->getAlignment();
- int SVOffset = ST->getSrcValueOffset();
DebugLoc dl = ST->getDebugLoc();
if (ST->getMemoryVT().isFloatingPoint() ||
ST->getMemoryVT().isVector()) {
@@ -404,10 +423,9 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// Expand to a bitconvert of the value to the integer type of the
// same size, then a (misaligned) int store.
// FIXME: Does not handle truncating floating point stores!
- SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, intVT, Val);
- return DAG.getStore(Chain, dl, Result, Ptr, ST->getSrcValue(),
- SVOffset, ST->isVolatile(), ST->isNonTemporal(),
- Alignment);
+ SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
+ return DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
+ ST->isVolatile(), ST->isNonTemporal(), Alignment);
} else {
// Do a (aligned) store to a stack slot, then copy from the stack slot
// to the final destination using (unaligned) integer loads and stores.
@@ -425,8 +443,8 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// Perform the original store, only redirected to the stack slot.
SDValue Store = DAG.getTruncStore(Chain, dl,
- Val, StackPtr, NULL, 0, StoredVT,
- false, false, 0);
+ Val, StackPtr, MachinePointerInfo(),
+ StoredVT, false, false, 0);
SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy());
SmallVector<SDValue, 8> Stores;
unsigned Offset = 0;
@@ -434,11 +452,12 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// Do all but one copies using the full register width.
for (unsigned i = 1; i < NumRegs; i++) {
// Load one integer register's worth from the stack slot.
- SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, NULL, 0,
+ SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr,
+ MachinePointerInfo(),
false, false, 0);
// Store it to the final location. Remember the store.
Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
- ST->getSrcValue(), SVOffset + Offset,
+ ST->getPointerInfo().getWithOffset(Offset),
ST->isVolatile(), ST->isNonTemporal(),
MinAlign(ST->getAlignment(), Offset)));
// Increment the pointers.
@@ -455,11 +474,13 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
8 * (StoredBytes - Offset));
// Load from the stack slot.
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, RegVT, dl, Store, StackPtr,
- NULL, 0, MemVT, false, false, 0);
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
+ MachinePointerInfo(),
+ MemVT, false, false, 0);
Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
- ST->getSrcValue(), SVOffset + Offset,
+ ST->getPointerInfo()
+ .getWithOffset(Offset),
MemVT, ST->isVolatile(),
ST->isNonTemporal(),
MinAlign(ST->getAlignment(), Offset)));
@@ -484,13 +505,13 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// Store the two parts
SDValue Store1, Store2;
Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr,
- ST->getSrcValue(), SVOffset, NewStoredVT,
+ ST->getPointerInfo(), NewStoredVT,
ST->isVolatile(), ST->isNonTemporal(), Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, TLI.getPointerTy()));
Alignment = MinAlign(Alignment, IncrementSize);
Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr,
- ST->getSrcValue(), SVOffset + IncrementSize,
+ ST->getPointerInfo().getWithOffset(IncrementSize),
NewStoredVT, ST->isVolatile(), ST->isNonTemporal(),
Alignment);
@@ -501,7 +522,6 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
static
SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
const TargetLowering &TLI) {
- int SVOffset = LD->getSrcValueOffset();
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
EVT VT = LD->getValueType(0);
@@ -512,74 +532,75 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
if (TLI.isTypeLegal(intVT)) {
// Expand to a (misaligned) integer load of the same size,
// then bitconvert to floating point or vector.
- SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getSrcValue(),
- SVOffset, LD->isVolatile(),
+ SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(),
+ LD->isVolatile(),
LD->isNonTemporal(), LD->getAlignment());
- SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, LoadedVT, newLoad);
+ SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
if (VT.isFloatingPoint() && LoadedVT != VT)
Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result);
SDValue Ops[] = { Result, Chain };
return DAG.getMergeValues(Ops, 2, dl);
- } else {
- // Copy the value to a (aligned) stack slot using (unaligned) integer
- // loads and stores, then do a (aligned) load from the stack slot.
- EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT);
- unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
- unsigned RegBytes = RegVT.getSizeInBits() / 8;
- unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
-
- // Make sure the stack slot is also aligned for the register type.
- SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
-
- SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy());
- SmallVector<SDValue, 8> Stores;
- SDValue StackPtr = StackBase;
- unsigned Offset = 0;
-
- // Do all but one copies using the full register width.
- for (unsigned i = 1; i < NumRegs; i++) {
- // Load one integer register's worth from the original location.
- SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, LD->getSrcValue(),
- SVOffset + Offset, LD->isVolatile(),
- LD->isNonTemporal(),
- MinAlign(LD->getAlignment(), Offset));
- // Follow the load with a store to the stack slot. Remember the store.
- Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
- NULL, 0, false, false, 0));
- // Increment the pointers.
- Offset += RegBytes;
- Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
- StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
- Increment);
- }
+ }
- // The last copy may be partial. Do an extending load.
- EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
- 8 * (LoadedBytes - Offset));
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, RegVT, dl, Chain, Ptr,
- LD->getSrcValue(), SVOffset + Offset,
- MemVT, LD->isVolatile(),
- LD->isNonTemporal(),
- MinAlign(LD->getAlignment(), Offset));
+ // Copy the value to a (aligned) stack slot using (unaligned) integer
+ // loads and stores, then do a (aligned) load from the stack slot.
+ EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT);
+ unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
+ unsigned RegBytes = RegVT.getSizeInBits() / 8;
+ unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
+
+ // Make sure the stack slot is also aligned for the register type.
+ SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
+
+ SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy());
+ SmallVector<SDValue, 8> Stores;
+ SDValue StackPtr = StackBase;
+ unsigned Offset = 0;
+
+ // Do all but one copies using the full register width.
+ for (unsigned i = 1; i < NumRegs; i++) {
+ // Load one integer register's worth from the original location.
+ SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr,
+ LD->getPointerInfo().getWithOffset(Offset),
+ LD->isVolatile(), LD->isNonTemporal(),
+ MinAlign(LD->getAlignment(), Offset));
// Follow the load with a store to the stack slot. Remember the store.
- // On big-endian machines this requires a truncating store to ensure
- // that the bits end up in the right place.
- Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
- NULL, 0, MemVT, false, false, 0));
-
- // The order of the stores doesn't matter - say it with a TokenFactor.
- SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
- Stores.size());
-
- // Finally, perform the original load only redirected to the stack slot.
- Load = DAG.getExtLoad(LD->getExtensionType(), VT, dl, TF, StackBase,
- NULL, 0, LoadedVT, false, false, 0);
-
- // Callers expect a MERGE_VALUES node.
- SDValue Ops[] = { Load, TF };
- return DAG.getMergeValues(Ops, 2, dl);
+ Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
+ MachinePointerInfo(), false, false, 0));
+ // Increment the pointers.
+ Offset += RegBytes;
+ Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
+ StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
+ Increment);
}
+
+ // The last copy may be partial. Do an extending load.
+ EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
+ 8 * (LoadedBytes - Offset));
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
+ LD->getPointerInfo().getWithOffset(Offset),
+ MemVT, LD->isVolatile(),
+ LD->isNonTemporal(),
+ MinAlign(LD->getAlignment(), Offset));
+ // Follow the load with a store to the stack slot. Remember the store.
+ // On big-endian machines this requires a truncating store to ensure
+ // that the bits end up in the right place.
+ Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr,
+ MachinePointerInfo(), MemVT,
+ false, false, 0));
+
+ // The order of the stores doesn't matter - say it with a TokenFactor.
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
+ Stores.size());
+
+ // Finally, perform the original load only redirected to the stack slot.
+ Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
+ MachinePointerInfo(), LoadedVT, false, false, 0);
+
+ // Callers expect a MERGE_VALUES node.
+ SDValue Ops[] = { Load, TF };
+ return DAG.getMergeValues(Ops, 2, dl);
}
assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
"Unaligned load of unsupported type.");
@@ -602,22 +623,24 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
// Load the value in two parts
SDValue Lo, Hi;
if (TLI.isLittleEndian()) {
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, VT, dl, Chain, Ptr, LD->getSrcValue(),
- SVOffset, NewLoadedVT, LD->isVolatile(),
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
+ NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, TLI.getPointerTy()));
- Hi = DAG.getExtLoad(HiExtType, VT, dl, Chain, Ptr, LD->getSrcValue(),
- SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(),
+ Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
+ LD->getPointerInfo().getWithOffset(IncrementSize),
+ NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), MinAlign(Alignment,IncrementSize));
} else {
- Hi = DAG.getExtLoad(HiExtType, VT, dl, Chain, Ptr, LD->getSrcValue(),
- SVOffset, NewLoadedVT, LD->isVolatile(),
+ Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
+ NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, TLI.getPointerTy()));
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, VT, dl, Chain, Ptr, LD->getSrcValue(),
- SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(),
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
+ LD->getPointerInfo().getWithOffset(IncrementSize),
+ NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), MinAlign(Alignment,IncrementSize));
}
@@ -660,7 +683,7 @@ PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
// Store the vector.
SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr,
- PseudoSourceValue::getFixedStack(SPFI), 0,
+ MachinePointerInfo::getFixedStack(SPFI),
false, false, 0);
// Truncate or zero extend offset to target pointer type.
@@ -671,13 +694,11 @@ PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT));
SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr);
// Store the scalar value.
- Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2,
- PseudoSourceValue::getFixedStack(SPFI), 0, EltVT,
+ Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT,
false, false, 0);
// Load the updated vector.
return DAG.getLoad(VT, dl, Ch, StackPtr,
- PseudoSourceValue::getFixedStack(SPFI), 0,
- false, false, 0);
+ MachinePointerInfo::getFixedStack(SPFI), false, false, 0);
}
@@ -719,7 +740,6 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
SDValue Tmp1 = ST->getChain();
SDValue Tmp2 = ST->getBasePtr();
SDValue Tmp3;
- int SVOffset = ST->getSrcValueOffset();
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
@@ -730,29 +750,34 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
Tmp3 = DAG.getConstant(CFP->getValueAPF().
bitcastToAPInt().zextOrTrunc(32),
MVT::i32);
- return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
- SVOffset, isVolatile, isNonTemporal, Alignment);
- } else if (CFP->getValueType(0) == MVT::f64) {
+ return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ isVolatile, isNonTemporal, Alignment);
+ }
+
+ if (CFP->getValueType(0) == MVT::f64) {
// If this target supports 64-bit registers, do a single 64-bit store.
if (getTypeAction(MVT::i64) == Legal) {
Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
zextOrTrunc(64), MVT::i64);
- return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
- SVOffset, isVolatile, isNonTemporal, Alignment);
- } else if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) {
+ return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ isVolatile, isNonTemporal, Alignment);
+ }
+
+ if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) {
// Otherwise, if the target supports 32-bit registers, use 2 32-bit
// stores. If the target supports neither 32- nor 64-bits, this
// xform is certainly not worth it.
const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt();
- SDValue Lo = DAG.getConstant(APInt(IntVal).trunc(32), MVT::i32);
+ SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32);
SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32);
if (TLI.isBigEndian()) std::swap(Lo, Hi);
- Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getSrcValue(),
- SVOffset, isVolatile, isNonTemporal, Alignment);
+ Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getPointerInfo(), isVolatile,
+ isNonTemporal, Alignment);
Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
DAG.getIntPtrConstant(4));
- Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(), SVOffset+4,
+ Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2,
+ ST->getPointerInfo().getWithOffset(4),
isVolatile, isNonTemporal, MinAlign(Alignment, 4U));
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
@@ -792,7 +817,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
bool isCustom = false;
// Figure out the correct action; the way to query this varies by opcode
- TargetLowering::LegalizeAction Action;
+ TargetLowering::LegalizeAction Action = TargetLowering::Legal;
bool SimpleFinishLegalizing = true;
switch (Node->getOpcode()) {
case ISD::INTRINSIC_W_CHAIN:
@@ -860,6 +885,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::FRAME_TO_ARGS_OFFSET:
case ISD::EH_SJLJ_SETJMP:
case ISD::EH_SJLJ_LONGJMP:
+ case ISD::EH_SJLJ_DISPATCHSETUP:
// These operations lie about being legal: when they claim to be legal,
// they should actually be expanded.
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
@@ -996,6 +1022,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
}
break;
case ISD::CALLSEQ_START: {
+ static int depth = 0;
SDNode *CallEnd = FindCallEndFromCallStart(Node);
// Recursively Legalize all of the inputs of the call end that do not lead
@@ -1013,7 +1040,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Merge in the last call to ensure that this call starts after the last
// call ended.
- if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken) {
+ if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken && depth == 0) {
Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Tmp1, LastCALLSEQ_END);
Tmp1 = LegalizeOp(Tmp1);
@@ -1036,14 +1063,18 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// sequence have been legalized, legalize the call itself. During this
// process, no libcalls can/will be inserted, guaranteeing that no calls
// can overlap.
- assert(!IsLegalizingCall && "Inconsistent sequentialization of calls!");
+
+ SDValue Saved_LastCALLSEQ_END = LastCALLSEQ_END ;
// Note that we are selecting this call!
LastCALLSEQ_END = SDValue(CallEnd, 0);
- IsLegalizingCall = true;
+ depth++;
// Legalize the call, starting from the CALLSEQ_END.
LegalizeOp(LastCALLSEQ_END);
- assert(!IsLegalizingCall && "CALLSEQ_END should have cleared this!");
+ depth--;
+ assert(depth >= 0 && "Un-matched CALLSEQ_START?");
+ if (depth > 0)
+ LastCALLSEQ_END = Saved_LastCALLSEQ_END;
return Result;
}
case ISD::CALLSEQ_END:
@@ -1062,7 +1093,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain.
// Do not try to legalize the target-specific arguments (#1+), except for
// an optional flag input.
- if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Flag){
+ if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Glue){
if (Tmp1 != Node->getOperand(0)) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
@@ -1082,10 +1113,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Result.getResNo());
}
}
- assert(IsLegalizingCall && "Call sequence imbalance between start/end?");
// This finishes up call legalization.
- IsLegalizingCall = false;
-
// If the CALLSEQ_END node has a flag, remember that we legalized it.
AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0));
if (Node->getNumValues() == 2)
@@ -1136,11 +1164,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Change base type to a different vector type.
EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
- Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getSrcValue(),
- LD->getSrcValueOffset(),
+ Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(),
LD->isVolatile(), LD->isNonTemporal(),
LD->getAlignment());
- Tmp3 = LegalizeOp(DAG.getNode(ISD::BIT_CONVERT, dl, VT, Tmp1));
+ Tmp3 = LegalizeOp(DAG.getNode(ISD::BITCAST, dl, VT, Tmp1));
Tmp4 = LegalizeOp(Tmp1.getValue(1));
break;
}
@@ -1150,227 +1177,224 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
AddLegalizedOperand(SDValue(Node, 0), Tmp3);
AddLegalizedOperand(SDValue(Node, 1), Tmp4);
return Op.getResNo() ? Tmp4 : Tmp3;
- } else {
- EVT SrcVT = LD->getMemoryVT();
- unsigned SrcWidth = SrcVT.getSizeInBits();
- int SVOffset = LD->getSrcValueOffset();
- unsigned Alignment = LD->getAlignment();
- bool isVolatile = LD->isVolatile();
- bool isNonTemporal = LD->isNonTemporal();
-
- if (SrcWidth != SrcVT.getStoreSizeInBits() &&
- // Some targets pretend to have an i1 loading operation, and actually
- // load an i8. This trick is correct for ZEXTLOAD because the top 7
- // bits are guaranteed to be zero; it helps the optimizers understand
- // that these bits are zero. It is also useful for EXTLOAD, since it
- // tells the optimizers that those bits are undefined. It would be
- // nice to have an effective generic way of getting these benefits...
- // Until such a way is found, don't insist on promoting i1 here.
- (SrcVT != MVT::i1 ||
- TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
- // Promote to a byte-sized load if not loading an integral number of
- // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
- unsigned NewWidth = SrcVT.getStoreSizeInBits();
- EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth);
- SDValue Ch;
-
- // The extra bits are guaranteed to be zero, since we stored them that
- // way. A zext load from NVT thus automatically gives zext from SrcVT.
-
- ISD::LoadExtType NewExtType =
- ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
-
- Result = DAG.getExtLoad(NewExtType, Node->getValueType(0), dl,
- Tmp1, Tmp2, LD->getSrcValue(), SVOffset,
- NVT, isVolatile, isNonTemporal, Alignment);
-
- Ch = Result.getValue(1); // The chain.
-
- if (ExtType == ISD::SEXTLOAD)
- // Having the top bits zero doesn't help when sign extending.
- Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
- Result.getValueType(),
- Result, DAG.getValueType(SrcVT));
- else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
- // All the top bits are guaranteed to be zero - inform the optimizers.
- Result = DAG.getNode(ISD::AssertZext, dl,
- Result.getValueType(), Result,
- DAG.getValueType(SrcVT));
-
- Tmp1 = LegalizeOp(Result);
- Tmp2 = LegalizeOp(Ch);
- } else if (SrcWidth & (SrcWidth - 1)) {
- // If not loading a power-of-2 number of bits, expand as two loads.
- assert(!SrcVT.isVector() && "Unsupported extload!");
- unsigned RoundWidth = 1 << Log2_32(SrcWidth);
- assert(RoundWidth < SrcWidth);
- unsigned ExtraWidth = SrcWidth - RoundWidth;
- assert(ExtraWidth < RoundWidth);
- assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
- "Load size not an integral number of bytes!");
- EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
- EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
- SDValue Lo, Hi, Ch;
- unsigned IncrementSize;
+ }
- if (TLI.isLittleEndian()) {
- // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
- // Load the bottom RoundWidth bits.
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, Node->getValueType(0), dl,
- Tmp1, Tmp2,
- LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
- isNonTemporal, Alignment);
-
- // Load the remaining ExtraWidth bits.
- IncrementSize = RoundWidth / 8;
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), dl, Tmp1, Tmp2,
- LD->getSrcValue(), SVOffset + IncrementSize,
- ExtraVT, isVolatile, isNonTemporal,
- MinAlign(Alignment, IncrementSize));
-
- // Build a factor node to remember that this load is independent of
- // the other one.
- Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
- Hi.getValue(1));
-
- // Move the top bits to the right place.
- Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
- DAG.getConstant(RoundWidth, TLI.getShiftAmountTy()));
+ EVT SrcVT = LD->getMemoryVT();
+ unsigned SrcWidth = SrcVT.getSizeInBits();
+ unsigned Alignment = LD->getAlignment();
+ bool isVolatile = LD->isVolatile();
+ bool isNonTemporal = LD->isNonTemporal();
+
+ if (SrcWidth != SrcVT.getStoreSizeInBits() &&
+ // Some targets pretend to have an i1 loading operation, and actually
+ // load an i8. This trick is correct for ZEXTLOAD because the top 7
+ // bits are guaranteed to be zero; it helps the optimizers understand
+ // that these bits are zero. It is also useful for EXTLOAD, since it
+ // tells the optimizers that those bits are undefined. It would be
+ // nice to have an effective generic way of getting these benefits...
+ // Until such a way is found, don't insist on promoting i1 here.
+ (SrcVT != MVT::i1 ||
+ TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
+ // Promote to a byte-sized load if not loading an integral number of
+ // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
+ unsigned NewWidth = SrcVT.getStoreSizeInBits();
+ EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth);
+ SDValue Ch;
+
+ // The extra bits are guaranteed to be zero, since we stored them that
+ // way. A zext load from NVT thus automatically gives zext from SrcVT.
+
+ ISD::LoadExtType NewExtType =
+ ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
+
+ Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
+ Tmp1, Tmp2, LD->getPointerInfo(),
+ NVT, isVolatile, isNonTemporal, Alignment);
+
+ Ch = Result.getValue(1); // The chain.
+
+ if (ExtType == ISD::SEXTLOAD)
+ // Having the top bits zero doesn't help when sign extending.
+ Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
+ Result.getValueType(),
+ Result, DAG.getValueType(SrcVT));
+ else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
+ // All the top bits are guaranteed to be zero - inform the optimizers.
+ Result = DAG.getNode(ISD::AssertZext, dl,
+ Result.getValueType(), Result,
+ DAG.getValueType(SrcVT));
+
+ Tmp1 = LegalizeOp(Result);
+ Tmp2 = LegalizeOp(Ch);
+ } else if (SrcWidth & (SrcWidth - 1)) {
+ // If not loading a power-of-2 number of bits, expand as two loads.
+ assert(!SrcVT.isVector() && "Unsupported extload!");
+ unsigned RoundWidth = 1 << Log2_32(SrcWidth);
+ assert(RoundWidth < SrcWidth);
+ unsigned ExtraWidth = SrcWidth - RoundWidth;
+ assert(ExtraWidth < RoundWidth);
+ assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
+ "Load size not an integral number of bytes!");
+ EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
+ EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
+ SDValue Lo, Hi, Ch;
+ unsigned IncrementSize;
+
+ if (TLI.isLittleEndian()) {
+ // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
+ // Load the bottom RoundWidth bits.
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0),
+ Tmp1, Tmp2,
+ LD->getPointerInfo(), RoundVT, isVolatile,
+ isNonTemporal, Alignment);
+
+ // Load the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
+ DAG.getIntPtrConstant(IncrementSize));
+ Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2,
+ LD->getPointerInfo().getWithOffset(IncrementSize),
+ ExtraVT, isVolatile, isNonTemporal,
+ MinAlign(Alignment, IncrementSize));
+
+ // Build a factor node to remember that this load is independent of
+ // the other one.
+ Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
+ Hi.getValue(1));
+
+ // Move the top bits to the right place.
+ Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
+ DAG.getConstant(RoundWidth, TLI.getShiftAmountTy()));
+
+ // Join the hi and lo parts.
+ Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
+ } else {
+ // Big endian - avoid unaligned loads.
+ // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
+ // Load the top RoundWidth bits.
+ Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2,
+ LD->getPointerInfo(), RoundVT, isVolatile,
+ isNonTemporal, Alignment);
+
+ // Load the remaining ExtraWidth bits.
+ IncrementSize = RoundWidth / 8;
+ Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
+ DAG.getIntPtrConstant(IncrementSize));
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD,
+ dl, Node->getValueType(0), Tmp1, Tmp2,
+ LD->getPointerInfo().getWithOffset(IncrementSize),
+ ExtraVT, isVolatile, isNonTemporal,
+ MinAlign(Alignment, IncrementSize));
+
+ // Build a factor node to remember that this load is independent of
+ // the other one.
+ Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
+ Hi.getValue(1));
+
+ // Move the top bits to the right place.
+ Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
+ DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy()));
+
+ // Join the hi and lo parts.
+ Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
+ }
- // Join the hi and lo parts.
- Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
+ Tmp1 = LegalizeOp(Result);
+ Tmp2 = LegalizeOp(Ch);
+ } else {
+ switch (TLI.getLoadExtAction(ExtType, SrcVT)) {
+ default: assert(0 && "This action is not supported yet!");
+ case TargetLowering::Custom:
+ isCustom = true;
+ // FALLTHROUGH
+ case TargetLowering::Legal:
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp2, LD->getOffset()),
+ Result.getResNo());
+ Tmp1 = Result.getValue(0);
+ Tmp2 = Result.getValue(1);
+
+ if (isCustom) {
+ Tmp3 = TLI.LowerOperation(Result, DAG);
+ if (Tmp3.getNode()) {
+ Tmp1 = LegalizeOp(Tmp3);
+ Tmp2 = LegalizeOp(Tmp3.getValue(1));
+ }
} else {
- // Big endian - avoid unaligned loads.
- // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
- // Load the top RoundWidth bits.
- Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), dl, Tmp1, Tmp2,
- LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
- isNonTemporal, Alignment);
-
- // Load the remaining ExtraWidth bits.
- IncrementSize = RoundWidth / 8;
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(IncrementSize));
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD,
- Node->getValueType(0), dl, Tmp1, Tmp2,
- LD->getSrcValue(), SVOffset + IncrementSize,
- ExtraVT, isVolatile, isNonTemporal,
- MinAlign(Alignment, IncrementSize));
-
- // Build a factor node to remember that this load is independent of
- // the other one.
- Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
- Hi.getValue(1));
-
- // Move the top bits to the right place.
- Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
- DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy()));
-
- // Join the hi and lo parts.
- Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
- }
-
- Tmp1 = LegalizeOp(Result);
- Tmp2 = LegalizeOp(Ch);
- } else {
- switch (TLI.getLoadExtAction(ExtType, SrcVT)) {
- default: assert(0 && "This action is not supported yet!");
- case TargetLowering::Custom:
- isCustom = true;
- // FALLTHROUGH
- case TargetLowering::Legal:
- Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
- Tmp1, Tmp2, LD->getOffset()),
- Result.getResNo());
- Tmp1 = Result.getValue(0);
- Tmp2 = Result.getValue(1);
-
- if (isCustom) {
- Tmp3 = TLI.LowerOperation(Result, DAG);
- if (Tmp3.getNode()) {
- Tmp1 = LegalizeOp(Tmp3);
- Tmp2 = LegalizeOp(Tmp3.getValue(1));
- }
- } else {
- // If this is an unaligned load and the target doesn't support it,
- // expand it.
- if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- const Type *Ty =
- LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment =
- TLI.getTargetData()->getABITypeAlignment(Ty);
- if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
- DAG, TLI);
- Tmp1 = Result.getOperand(0);
- Tmp2 = Result.getOperand(1);
- Tmp1 = LegalizeOp(Tmp1);
- Tmp2 = LegalizeOp(Tmp2);
- }
+ // If this is an unaligned load and the target doesn't support it,
+ // expand it.
+ if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ const Type *Ty =
+ LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment =
+ TLI.getTargetData()->getABITypeAlignment(Ty);
+ if (LD->getAlignment() < ABIAlignment){
+ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
+ DAG, TLI);
+ Tmp1 = Result.getOperand(0);
+ Tmp2 = Result.getOperand(1);
+ Tmp1 = LegalizeOp(Tmp1);
+ Tmp2 = LegalizeOp(Tmp2);
}
}
- break;
- case TargetLowering::Expand:
- if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && isTypeLegal(SrcVT)) {
- SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, LD->getSrcValue(),
- LD->getSrcValueOffset(),
- LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
- unsigned ExtendOp;
- switch (ExtType) {
- case ISD::EXTLOAD:
- ExtendOp = (SrcVT.isFloatingPoint() ?
- ISD::FP_EXTEND : ISD::ANY_EXTEND);
- break;
- case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break;
- case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break;
- default: llvm_unreachable("Unexpected extend load type!");
- }
- Result = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
- Tmp1 = LegalizeOp(Result); // Relegalize new nodes.
- Tmp2 = LegalizeOp(Load.getValue(1));
+ }
+ break;
+ case TargetLowering::Expand:
+ if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && isTypeLegal(SrcVT)) {
+ SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2,
+ LD->getPointerInfo(),
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->getAlignment());
+ unsigned ExtendOp;
+ switch (ExtType) {
+ case ISD::EXTLOAD:
+ ExtendOp = (SrcVT.isFloatingPoint() ?
+ ISD::FP_EXTEND : ISD::ANY_EXTEND);
break;
+ case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break;
+ case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break;
+ default: llvm_unreachable("Unexpected extend load type!");
}
- // FIXME: This does not work for vectors on most targets. Sign- and
- // zero-extend operations are currently folded into extending loads,
- // whether they are legal or not, and then we end up here without any
- // support for legalizing them.
- assert(ExtType != ISD::EXTLOAD &&
- "EXTLOAD should always be supported!");
- // Turn the unsupported load into an EXTLOAD followed by an explicit
- // zero/sign extend inreg.
- Result = DAG.getExtLoad(ISD::EXTLOAD, Node->getValueType(0), dl,
- Tmp1, Tmp2, LD->getSrcValue(),
- LD->getSrcValueOffset(), SrcVT,
- LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
- SDValue ValRes;
- if (ExtType == ISD::SEXTLOAD)
- ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
- Result.getValueType(),
- Result, DAG.getValueType(SrcVT));
- else
- ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT);
- Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes.
- Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes.
+ Result = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
+ Tmp1 = LegalizeOp(Result); // Relegalize new nodes.
+ Tmp2 = LegalizeOp(Load.getValue(1));
break;
}
+ // FIXME: This does not work for vectors on most targets. Sign- and
+ // zero-extend operations are currently folded into extending loads,
+ // whether they are legal or not, and then we end up here without any
+ // support for legalizing them.
+ assert(ExtType != ISD::EXTLOAD &&
+ "EXTLOAD should always be supported!");
+ // Turn the unsupported load into an EXTLOAD followed by an explicit
+ // zero/sign extend inreg.
+ Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0),
+ Tmp1, Tmp2, LD->getPointerInfo(), SrcVT,
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->getAlignment());
+ SDValue ValRes;
+ if (ExtType == ISD::SEXTLOAD)
+ ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
+ Result.getValueType(),
+ Result, DAG.getValueType(SrcVT));
+ else
+ ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType());
+ Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes.
+ Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes.
+ break;
}
-
- // Since loads produce two values, make sure to remember that we legalized
- // both of them.
- AddLegalizedOperand(SDValue(Node, 0), Tmp1);
- AddLegalizedOperand(SDValue(Node, 1), Tmp2);
- return Op.getResNo() ? Tmp2 : Tmp1;
}
+
+ // Since loads produce two values, make sure to remember that we legalized
+ // both of them.
+ AddLegalizedOperand(SDValue(Node, 0), Tmp1);
+ AddLegalizedOperand(SDValue(Node, 1), Tmp2);
+ return Op.getResNo() ? Tmp2 : Tmp1;
}
case ISD::STORE: {
StoreSDNode *ST = cast<StoreSDNode>(Node);
Tmp1 = LegalizeOp(ST->getChain()); // Legalize the chain.
Tmp2 = LegalizeOp(ST->getBasePtr()); // Legalize the pointer.
- int SVOffset = ST->getSrcValueOffset();
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
@@ -1408,10 +1432,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
break;
case TargetLowering::Promote:
assert(VT.isVector() && "Unknown legal promote case!");
- Tmp3 = DAG.getNode(ISD::BIT_CONVERT, dl,
+ Tmp3 = DAG.getNode(ISD::BITCAST, dl,
TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3);
Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
- ST->getSrcValue(), SVOffset, isVolatile,
+ ST->getPointerInfo(), isVolatile,
isNonTemporal, Alignment);
break;
}
@@ -1430,9 +1454,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
StVT.getStoreSizeInBits());
Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT);
- Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
- SVOffset, NVT, isVolatile, isNonTemporal,
- Alignment);
+ Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ NVT, isVolatile, isNonTemporal, Alignment);
} else if (StWidth & (StWidth - 1)) {
// If not storing a power-of-2 number of bits, expand as two stores.
assert(!StVT.isVector() && "Unsupported truncstore!");
@@ -1450,8 +1473,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
if (TLI.isLittleEndian()) {
// TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
// Store the bottom RoundWidth bits.
- Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
- SVOffset, RoundVT,
+ Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ RoundVT,
isVolatile, isNonTemporal, Alignment);
// Store the remaining ExtraWidth bits.
@@ -1460,9 +1483,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
DAG.getIntPtrConstant(IncrementSize));
Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3,
DAG.getConstant(RoundWidth, TLI.getShiftAmountTy()));
- Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(),
- SVOffset + IncrementSize, ExtraVT, isVolatile,
- isNonTemporal,
+ Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2,
+ ST->getPointerInfo().getWithOffset(IncrementSize),
+ ExtraVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
} else {
// Big endian - avoid unaligned stores.
@@ -1470,17 +1493,16 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Store the top RoundWidth bits.
Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3,
DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy()));
- Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(),
- SVOffset, RoundVT, isVolatile, isNonTemporal,
- Alignment);
+ Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(),
+ RoundVT, isVolatile, isNonTemporal, Alignment);
// Store the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
DAG.getIntPtrConstant(IncrementSize));
- Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
- SVOffset + IncrementSize, ExtraVT, isVolatile,
- isNonTemporal,
+ Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
+ ST->getPointerInfo().getWithOffset(IncrementSize),
+ ExtraVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
}
@@ -1514,9 +1536,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// TRUNCSTORE:i16 i32 -> STORE i16
assert(isTypeLegal(StVT) && "Do not know how to expand this store!");
Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3);
- Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
- SVOffset, isVolatile, isNonTemporal,
- Alignment);
+ Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ isVolatile, isNonTemporal, Alignment);
break;
}
}
@@ -1543,8 +1564,8 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
DebugLoc dl = Op.getDebugLoc();
// Store the value to a temporary stack slot, then LOAD the returned part.
SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
- SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, NULL, 0,
- false, false, 0);
+ SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
+ MachinePointerInfo(), false, false, 0);
// Add the offset to the index.
unsigned EltSize =
@@ -1560,12 +1581,56 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr);
if (Op.getValueType().isVector())
- return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, NULL, 0,
+ return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(),
false, false, 0);
+ return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr,
+ MachinePointerInfo(),
+ Vec.getValueType().getVectorElementType(),
+ false, false, 0);
+}
+
+SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
+ assert(Op.getValueType().isVector() && "Non-vector insert subvector!");
+
+ SDValue Vec = Op.getOperand(0);
+ SDValue Part = Op.getOperand(1);
+ SDValue Idx = Op.getOperand(2);
+ DebugLoc dl = Op.getDebugLoc();
+
+ // Store the value to a temporary stack slot, then LOAD the returned part.
+
+ SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
+ int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
+ MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI);
+
+ // First store the whole vector.
+ SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
+ false, false, 0);
+
+ // Then store the inserted part.
+
+ // Add the offset to the index.
+ unsigned EltSize =
+ Vec.getValueType().getVectorElementType().getSizeInBits()/8;
+
+ Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
+ DAG.getConstant(EltSize, Idx.getValueType()));
+
+ if (Idx.getValueType().bitsGT(TLI.getPointerTy()))
+ Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx);
else
- return DAG.getExtLoad(ISD::EXTLOAD, Op.getValueType(), dl, Ch, StackPtr,
- NULL, 0, Vec.getValueType().getVectorElementType(),
- false, false, 0);
+ Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx);
+
+ SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx,
+ StackPtr);
+
+ // Store the subvector.
+ Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr,
+ MachinePointerInfo(), false, false, 0);
+
+ // Finally, load the updated vector.
+ return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo,
+ false, false, 0);
}
SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
@@ -1578,7 +1643,7 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
DebugLoc dl = Node->getDebugLoc();
SDValue FIPtr = DAG.CreateStackTemporary(VT);
int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
- const Value *SV = PseudoSourceValue::getFixedStack(FI);
+ MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI);
// Emit a store of each element to the stack slot.
SmallVector<SDValue, 8> Stores;
@@ -1597,11 +1662,13 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
// element type, only store the bits necessary.
if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) {
Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl,
- Node->getOperand(i), Idx, SV, Offset,
+ Node->getOperand(i), Idx,
+ PtrInfo.getWithOffset(Offset),
EltVT, false, false, 0));
} else
Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
- Node->getOperand(i), Idx, SV, Offset,
+ Node->getOperand(i), Idx,
+ PtrInfo.getWithOffset(Offset),
false, false, 0));
}
@@ -1613,7 +1680,7 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
StoreChain = DAG.getEntryNode();
// Result is a load from the stack slot.
- return DAG.getLoad(VT, dl, StoreChain, FIPtr, SV, 0, false, false, 0);
+ return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, false, false, 0);
}
SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
@@ -1628,7 +1695,7 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits());
if (isTypeLegal(IVT)) {
// Convert to an integer with the same sign bit.
- SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, IVT, Tmp2);
+ SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2);
} else {
// Store the float to memory, then load the sign part out as an integer.
MVT LoadTy = TLI.getPointerTy();
@@ -1636,12 +1703,13 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy);
// Then store the float to it.
SDValue Ch =
- DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, NULL, 0,
+ DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(),
false, false, 0);
if (TLI.isBigEndian()) {
assert(FloatVT.isByteSized() && "Unsupported floating point type!");
// Load out a legal integer with the same sign bit as the float.
- SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, NULL, 0, false, false, 0);
+ SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(),
+ false, false, 0);
} else { // Little endian
SDValue LoadPtr = StackPtr;
// The float may be wider than the integer we are going to load. Advance
@@ -1651,7 +1719,8 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(),
LoadPtr, DAG.getIntPtrConstant(ByteOffset));
// Load a legal integer containing the sign bit.
- SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, NULL, 0, false, false, 0);
+ SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(),
+ false, false, 0);
// Move the sign bit to the top bit of the loaded integer.
unsigned BitShift = LoadTy.getSizeInBits() -
(FloatVT.getSizeInBits() - 8 * ByteOffset);
@@ -1694,7 +1763,7 @@ void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
- unsigned StackAlign = TM.getFrameInfo()->getStackAlignment();
+ unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
if (Align > StackAlign)
SP = DAG.getNode(ISD::AND, dl, VT, SP,
DAG.getConstant(-(uint64_t)Align, VT));
@@ -1768,7 +1837,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
int SPFI = StackPtrFI->getIndex();
- const Value *SV = PseudoSourceValue::getFixedStack(SPFI);
+ MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI);
unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
unsigned SlotSize = SlotVT.getSizeInBits();
@@ -1782,21 +1851,21 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
if (SrcSize > SlotSize)
Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr,
- SV, 0, SlotVT, false, false, SrcAlign);
+ PtrInfo, SlotVT, false, false, SrcAlign);
else {
assert(SrcSize == SlotSize && "Invalid store");
Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr,
- SV, 0, false, false, SrcAlign);
+ PtrInfo, false, false, SrcAlign);
}
// Result is a load from the stack slot.
if (SlotSize == DestSize)
- return DAG.getLoad(DestVT, dl, Store, FIPtr, SV, 0, false, false,
- DestAlign);
+ return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo,
+ false, false, DestAlign);
assert(SlotSize < DestSize && "Unknown extension!");
- return DAG.getExtLoad(ISD::EXTLOAD, DestVT, dl, Store, FIPtr, SV, 0, SlotVT,
- false, false, DestAlign);
+ return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr,
+ PtrInfo, SlotVT, false, false, DestAlign);
}
SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) {
@@ -1810,11 +1879,11 @@ SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) {
SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0),
StackPtr,
- PseudoSourceValue::getFixedStack(SPFI), 0,
+ MachinePointerInfo::getFixedStack(SPFI),
Node->getValueType(0).getVectorElementType(),
false, false, 0);
return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr,
- PseudoSourceValue::getFixedStack(SPFI), 0,
+ MachinePointerInfo::getFixedStack(SPFI),
false, false, 0);
}
@@ -1888,7 +1957,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, Alignment);
}
@@ -1924,7 +1993,6 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
// and leave the Hi part unset.
SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
bool isSigned) {
- assert(!IsLegalizingCall && "Cannot overlap legalization of calls!");
// The input chain to this libcall is the entry node of the function.
// Legalizing the call will automatically add the previous call to the
// dependence.
@@ -1945,12 +2013,20 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
// Splice the libcall in wherever FindInputOutputChains tells us to.
const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+
+ // isTailCall may be true since the callee does not reference caller stack
+ // frame. Check if it's in the right position.
+ bool isTailCall = isInTailCallPosition(DAG, Node, TLI);
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
- 0, TLI.getLibcallCallingConv(LC), false,
+ 0, TLI.getLibcallCallingConv(LC), isTailCall,
/*isReturnValueUsed=*/true,
Callee, Args, DAG, Node->getDebugLoc());
+ if (!CallInfo.second.getNode())
+ // It's a tailcall, return the chain (which is the DAG root).
+ return DAG.getRoot();
+
// Legalize the call sequence, starting with the chain. This will advance
// the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
// was added by LowerCallTo (guaranteeing proper serialization of calls).
@@ -1964,7 +2040,6 @@ std::pair<SDValue, SDValue>
SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
SDNode *Node,
bool isSigned) {
- assert(!IsLegalizingCall && "Cannot overlap legalization of calls!");
SDValue InChain = Node->getOperand(0);
TargetLowering::ArgListTy Args;
@@ -1985,7 +2060,7 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
- 0, TLI.getLibcallCallingConv(LC), false,
+ 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
/*isReturnValueUsed=*/true,
Callee, Args, DAG, Node->getDebugLoc());
@@ -2064,16 +2139,17 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
}
// store the lo of the constructed double - based on integer input
SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl,
- Op0Mapped, Lo, NULL, 0,
+ Op0Mapped, Lo, MachinePointerInfo(),
false, false, 0);
// initial hi portion of constructed double
SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32);
// store the hi of the constructed double - biased exponent
- SDValue Store2=DAG.getStore(Store1, dl, InitialHi, Hi, NULL, 0,
- false, false, 0);
+ SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi,
+ MachinePointerInfo(),
+ false, false, 0);
// load the constructed double
- SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, NULL, 0,
- false, false, 0);
+ SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot,
+ MachinePointerInfo(), false, false, 0);
// FP constant to bias correct the final result
SDValue Bias = DAG.getConstantFP(isSigned ?
BitsToDouble(0x4330000080000000ULL) :
@@ -2116,17 +2192,40 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
DAG.getConstant(32, MVT::i64));
SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52);
SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84);
- SDValue LoFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, LoOr);
- SDValue HiFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, HiOr);
+ SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr);
+ SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr);
SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt,
TwoP84PlusTwoP52);
return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub);
}
- // Implementation of unsigned i64 to f32. This implementation has the
- // advantage of performing rounding correctly.
+ // Implementation of unsigned i64 to f32.
// TODO: Generalize this for use with other types.
if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) {
+ // For unsigned conversions, convert them to signed conversions using the
+ // algorithm from the x86_64 __floatundidf in compiler_rt.
+ if (!isSigned) {
+ SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0);
+
+ SDValue ShiftConst = DAG.getConstant(1, TLI.getShiftAmountTy());
+ SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst);
+ SDValue AndConst = DAG.getConstant(1, MVT::i64);
+ SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst);
+ SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr);
+
+ SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or);
+ SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt);
+
+ // TODO: This really should be implemented using a branch rather than a
+ // select. We happen to get lucky and machinesink does the right
+ // thing most of the time. This would be a good candidate for a
+ //pseudo-op, or, even better, for whole-function isel.
+ SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
+ Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT);
+ return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast);
+ }
+
+ // Otherwise, implement the fully general conversion.
EVT SHVT = TLI.getShiftAmountTy();
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
@@ -2140,7 +2239,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0);
SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64),
- ISD::SETUGE);
+ ISD::SETUGE);
SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0);
SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
@@ -2155,7 +2254,6 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2);
return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd,
DAG.getIntPtrConstant(0));
-
}
SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0);
@@ -2189,13 +2287,13 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue FudgeInReg;
if (DestVT == MVT::f32)
FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, Alignment);
else {
FudgeInReg =
- LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, DestVT, dl,
+ LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT,
DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
MVT::f32, false, false, Alignment));
}
@@ -2332,6 +2430,18 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
}
}
+/// SplatByte - Distribute ByteVal over NumBits bits.
+// FIXME: Move this helper to a common place.
+static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) {
+ APInt Val = APInt(NumBits, ByteVal);
+ unsigned Shift = 8;
+ for (unsigned i = NumBits; i > 8; i >>= 1) {
+ Val = (Val << Shift) | Val;
+ Shift <<= 1;
+ }
+ return Val;
+}
+
/// ExpandBitCount - Expand the specified bitcount instruction into operations.
///
SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
@@ -2339,26 +2449,45 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
switch (Opc) {
default: assert(0 && "Cannot expand this yet!");
case ISD::CTPOP: {
- static const uint64_t mask[6] = {
- 0x5555555555555555ULL, 0x3333333333333333ULL,
- 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL,
- 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL
- };
EVT VT = Op.getValueType();
EVT ShVT = TLI.getShiftAmountTy();
- unsigned len = VT.getSizeInBits();
- for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
- //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8])
- unsigned EltSize = VT.isVector() ?
- VT.getVectorElementType().getSizeInBits() : len;
- SDValue Tmp2 = DAG.getConstant(APInt(EltSize, mask[i]), VT);
- SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT);
- Op = DAG.getNode(ISD::ADD, dl, VT,
- DAG.getNode(ISD::AND, dl, VT, Op, Tmp2),
- DAG.getNode(ISD::AND, dl, VT,
- DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3),
- Tmp2));
- }
+ unsigned Len = VT.getSizeInBits();
+
+ assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 &&
+ "CTPOP not implemented for this type.");
+
+ // This is the "best" algorithm from
+ // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+
+ SDValue Mask55 = DAG.getConstant(SplatByte(Len, 0x55), VT);
+ SDValue Mask33 = DAG.getConstant(SplatByte(Len, 0x33), VT);
+ SDValue Mask0F = DAG.getConstant(SplatByte(Len, 0x0F), VT);
+ SDValue Mask01 = DAG.getConstant(SplatByte(Len, 0x01), VT);
+
+ // v = v - ((v >> 1) & 0x55555555...)
+ Op = DAG.getNode(ISD::SUB, dl, VT, Op,
+ DAG.getNode(ISD::AND, dl, VT,
+ DAG.getNode(ISD::SRL, dl, VT, Op,
+ DAG.getConstant(1, ShVT)),
+ Mask55));
+ // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
+ Op = DAG.getNode(ISD::ADD, dl, VT,
+ DAG.getNode(ISD::AND, dl, VT, Op, Mask33),
+ DAG.getNode(ISD::AND, dl, VT,
+ DAG.getNode(ISD::SRL, dl, VT, Op,
+ DAG.getConstant(2, ShVT)),
+ Mask33));
+ // v = (v + (v >> 4)) & 0x0F0F0F0F...
+ Op = DAG.getNode(ISD::AND, dl, VT,
+ DAG.getNode(ISD::ADD, dl, VT, Op,
+ DAG.getNode(ISD::SRL, dl, VT, Op,
+ DAG.getConstant(4, ShVT))),
+ Mask0F);
+ // v = (v * 0x01010101...) >> (Len - 8)
+ Op = DAG.getNode(ISD::SRL, dl, VT,
+ DAG.getNode(ISD::MUL, dl, VT, Op, Mask01),
+ DAG.getConstant(Len - 8, ShVT));
+
return Op;
}
case ISD::CTLZ: {
@@ -2516,9 +2645,14 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::PREFETCH:
case ISD::VAEND:
case ISD::EH_SJLJ_LONGJMP:
+ case ISD::EH_SJLJ_DISPATCHSETUP:
+ // If the target didn't expand these, there's nothing to do, so just
+ // preserve the chain and be done.
Results.push_back(Node->getOperand(0));
break;
case ISD::EH_SJLJ_SETJMP:
+ // If the target didn't expand this, just return 'zero' and preserve the
+ // chain.
Results.push_back(DAG.getConstant(0, MVT::i32));
Results.push_back(Node->getOperand(0));
break;
@@ -2527,7 +2661,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
TargetLowering::ArgListTy Args;
std::pair<SDValue, SDValue> CallResult =
TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
- false, false, false, false, 0, CallingConv::C, false,
+ false, false, false, false, 0, CallingConv::C,
+ /*isTailCall=*/false,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__sync_synchronize",
TLI.getPointerTy()),
@@ -2538,7 +2673,6 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
// By default, atomic intrinsics are marked Legal and lowered. Targets
// which don't support them directly, however, may want libcalls, in which
// case they mark them Expand, and we get here.
- // FIXME: Unimplemented for now. Add libcalls.
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
@@ -2578,7 +2712,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
TargetLowering::ArgListTy Args;
std::pair<SDValue, SDValue> CallResult =
TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
- false, false, false, false, 0, CallingConv::C, false,
+ false, false, false, false, 0, CallingConv::C,
+ /*isTailCall=*/false,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("abort", TLI.getPointerTy()),
Args, DAG, dl);
@@ -2586,7 +2721,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
break;
}
case ISD::FP_ROUND:
- case ISD::BIT_CONVERT:
+ case ISD::BITCAST:
Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
Node->getValueType(0), dl);
Results.push_back(Tmp1);
@@ -2637,8 +2772,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
SDValue True, False;
EVT VT = Node->getOperand(0).getValueType();
EVT NVT = Node->getValueType(0);
- const uint64_t zero[] = {0, 0};
- APFloat apf = APFloat(APInt(VT.getSizeInBits(), 2, zero));
+ APFloat apf(APInt::getNullValue(VT.getSizeInBits()));
APInt x = APInt::getSignBit(NVT.getSizeInBits());
(void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven);
Tmp1 = DAG.getConstantFP(apf, VT);
@@ -2662,8 +2796,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
Tmp2 = Node->getOperand(1);
unsigned Align = Node->getConstantOperandVal(3);
- SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0,
- false, false, 0);
+ SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2,
+ MachinePointerInfo(V), false, false, 0);
SDValue VAList = VAListLoad;
if (Align > TLI.getMinStackArgumentAlignment()) {
@@ -2674,7 +2808,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
TLI.getPointerTy()));
VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList,
- DAG.getConstant(-Align,
+ DAG.getConstant(-(int64_t)Align,
TLI.getPointerTy()));
}
@@ -2684,10 +2818,10 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
TLI.getPointerTy()));
// Store the incremented VAList to the legalized pointer
- Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, V, 0,
- false, false, 0);
+ Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2,
+ MachinePointerInfo(V), false, false, 0);
// Load the actual argument out of the pointer VAList
- Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0,
+ Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
false, false, 0));
Results.push_back(Results[0].getValue(1));
break;
@@ -2698,16 +2832,17 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0),
- Node->getOperand(2), VS, 0, false, false, 0);
- Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), VD, 0,
- false, false, 0);
+ Node->getOperand(2), MachinePointerInfo(VS),
+ false, false, 0);
+ Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
+ MachinePointerInfo(VD), false, false, 0);
Results.push_back(Tmp1);
break;
}
case ISD::EXTRACT_VECTOR_ELT:
if (Node->getOperand(0).getValueType().getVectorNumElements() == 1)
// This must be an access of the only element. Return it.
- Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, Node->getValueType(0),
+ Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0),
Node->getOperand(0));
else
Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0));
@@ -2716,6 +2851,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::EXTRACT_SUBVECTOR:
Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0)));
break;
+ case ISD::INSERT_SUBVECTOR:
+ Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0)));
+ break;
case ISD::CONCAT_VECTORS: {
Results.push_back(ExpandVectorBuildThroughStack(Node));
break;
@@ -3094,14 +3232,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
RHS);
TopHalf = BottomHalf.getValue(1);
- } else {
- // FIXME: We should be able to fall back to a libcall with an illegal
- // type in some cases.
- // Also, we can fall back to a division in some cases, but that's a big
- // performance hit in the general case.
- assert(TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(),
- VT.getSizeInBits() * 2)) &&
- "Don't know how to expand this operation yet!");
+ } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(),
+ VT.getSizeInBits() * 2))) {
EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2);
LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
@@ -3110,6 +3242,30 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
DAG.getIntPtrConstant(0));
TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
DAG.getIntPtrConstant(1));
+ } else {
+ // We can fall back to a libcall with an illegal type for the MUL if we
+ // have a libcall big enough.
+ // Also, we can fall back to a division in some cases, but that's a big
+ // performance hit in the general case.
+ EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2);
+ RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+ if (WideVT == MVT::i16)
+ LC = RTLIB::MUL_I16;
+ else if (WideVT == MVT::i32)
+ LC = RTLIB::MUL_I32;
+ else if (WideVT == MVT::i64)
+ LC = RTLIB::MUL_I64;
+ else if (WideVT == MVT::i128)
+ LC = RTLIB::MUL_I128;
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!");
+ LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
+ RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
+
+ SDValue Ret = ExpandLibCall(LC, Node, isSigned);
+ BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Ret);
+ TopHalf = DAG.getNode(ISD::SRL, dl, Ret.getValueType(), Ret,
+ DAG.getConstant(VT.getSizeInBits(), TLI.getPointerTy()));
+ TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, TopHalf);
}
if (isSigned) {
Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, TLI.getShiftAmountTy());
@@ -3165,8 +3321,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
- SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, PTy, dl, Chain, Addr,
- PseudoSourceValue::getJumpTable(), 0, MemVT,
+ SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr,
+ MachinePointerInfo::getJumpTable(), MemVT,
false, false, 0);
Addr = LD;
if (TM.getRelocationModel() == Reloc::PIC_) {
@@ -3329,8 +3485,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
case ISD::XOR: {
unsigned ExtOp, TruncOp;
if (OVT.isVector()) {
- ExtOp = ISD::BIT_CONVERT;
- TruncOp = ISD::BIT_CONVERT;
+ ExtOp = ISD::BITCAST;
+ TruncOp = ISD::BITCAST;
} else {
assert(OVT.isInteger() && "Cannot promote logic operation");
ExtOp = ISD::ANY_EXTEND;
@@ -3347,8 +3503,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
case ISD::SELECT: {
unsigned ExtOp, TruncOp;
if (Node->getValueType(0).isVector()) {
- ExtOp = ISD::BIT_CONVERT;
- TruncOp = ISD::BIT_CONVERT;
+ ExtOp = ISD::BITCAST;
+ TruncOp = ISD::BITCAST;
} else if (Node->getValueType(0).isInteger()) {
ExtOp = ISD::ANY_EXTEND;
TruncOp = ISD::TRUNCATE;
@@ -3375,12 +3531,12 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
cast<ShuffleVectorSDNode>(Node)->getMask(Mask);
// Cast the two input vectors.
- Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(0));
- Tmp2 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(1));
+ Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0));
+ Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1));
// Convert the shuffle mask to the right # elements.
Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask);
- Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, OVT, Tmp1);
+ Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1);
Results.push_back(Tmp1);
break;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 650ee5a..2775212 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -55,7 +55,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
#endif
llvm_unreachable("Do not know how to soften the result of this operator!");
- case ISD::BIT_CONVERT: R = SoftenFloatRes_BIT_CONVERT(N); break;
+ case ISD::BITCAST: R = SoftenFloatRes_BITCAST(N); break;
case ISD::BUILD_PAIR: R = SoftenFloatRes_BUILD_PAIR(N); break;
case ISD::ConstantFP:
R = SoftenFloatRes_ConstantFP(cast<ConstantFPSDNode>(N));
@@ -102,7 +102,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
SetSoftenedFloat(SDValue(N, ResNo), R);
}
-SDValue DAGTypeLegalizer::SoftenFloatRes_BIT_CONVERT(SDNode *N) {
+SDValue DAGTypeLegalizer::SoftenFloatRes_BITCAST(SDNode *N) {
return BitConvertToInteger(N->getOperand(0));
}
@@ -133,8 +133,9 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FABS(SDNode *N) {
unsigned Size = NVT.getSizeInBits();
// Mask = ~(1 << (Size-1))
- SDValue Mask = DAG.getConstant(APInt::getAllOnesValue(Size).clear(Size-1),
- NVT);
+ APInt API = APInt::getAllOnesValue(Size);
+ API.clearBit(Size-1);
+ SDValue Mask = DAG.getConstant(API, NVT);
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return DAG.getNode(ISD::AND, N->getDebugLoc(), NVT, Op, Mask);
}
@@ -455,7 +456,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
if (L->getExtensionType() == ISD::NON_EXTLOAD) {
NewL = DAG.getLoad(L->getAddressingMode(), L->getExtensionType(),
NVT, dl, L->getChain(), L->getBasePtr(), L->getOffset(),
- L->getSrcValue(), L->getSrcValueOffset(), NVT,
+ L->getPointerInfo(), NVT,
L->isVolatile(), L->isNonTemporal(), L->getAlignment());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
@@ -466,8 +467,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
// Do a non-extending load followed by FP_EXTEND.
NewL = DAG.getLoad(L->getAddressingMode(), ISD::NON_EXTLOAD,
L->getMemoryVT(), dl, L->getChain(),
- L->getBasePtr(), L->getOffset(),
- L->getSrcValue(), L->getSrcValueOffset(),
+ L->getBasePtr(), L->getOffset(), L->getPointerInfo(),
L->getMemoryVT(), L->isVolatile(),
L->isNonTemporal(), L->getAlignment());
// Legalized the chain result - switch anything that used the old chain to
@@ -558,7 +558,7 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
#endif
llvm_unreachable("Do not know how to soften this operator's operand!");
- case ISD::BIT_CONVERT: Res = SoftenFloatOp_BIT_CONVERT(N); break;
+ case ISD::BITCAST: Res = SoftenFloatOp_BITCAST(N); break;
case ISD::BR_CC: Res = SoftenFloatOp_BR_CC(N); break;
case ISD::FP_ROUND: Res = SoftenFloatOp_FP_ROUND(N); break;
case ISD::FP_TO_SINT: Res = SoftenFloatOp_FP_TO_SINT(N); break;
@@ -670,8 +670,8 @@ void DAGTypeLegalizer::SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
}
}
-SDValue DAGTypeLegalizer::SoftenFloatOp_BIT_CONVERT(SDNode *N) {
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), N->getValueType(0),
+SDValue DAGTypeLegalizer::SoftenFloatOp_BITCAST(SDNode *N) {
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getValueType(0),
GetSoftenedFloat(N->getOperand(0)));
}
@@ -780,7 +780,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) {
Val = GetSoftenedFloat(Val);
return DAG.getStore(ST->getChain(), dl, Val, ST->getBasePtr(),
- ST->getSrcValue(), ST->getSrcValueOffset(),
+ ST->getPointerInfo(),
ST->isVolatile(), ST->isNonTemporal(),
ST->getAlignment());
}
@@ -816,7 +816,7 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
- case ISD::BIT_CONVERT: ExpandRes_BIT_CONVERT(N, Lo, Hi); break;
+ case ISD::BITCAST: ExpandRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
@@ -1110,9 +1110,8 @@ void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDValue &Lo,
assert(NVT.isByteSized() && "Expanded type not byte sized!");
assert(LD->getMemoryVT().bitsLE(NVT) && "Float type not round?");
- Hi = DAG.getExtLoad(LD->getExtensionType(), NVT, dl, Chain, Ptr,
- LD->getSrcValue(), LD->getSrcValueOffset(),
- LD->getMemoryVT(), LD->isVolatile(),
+ Hi = DAG.getExtLoad(LD->getExtensionType(), dl, NVT, Chain, Ptr,
+ LD->getPointerInfo(), LD->getMemoryVT(), LD->isVolatile(),
LD->isNonTemporal(), LD->getAlignment());
// Remember the chain.
@@ -1222,7 +1221,7 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
#endif
llvm_unreachable("Do not know how to expand this operator's operand!");
- case ISD::BIT_CONVERT: Res = ExpandOp_BIT_CONVERT(N); break;
+ case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
@@ -1421,7 +1420,7 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) {
GetExpandedOp(ST->getValue(), Lo, Hi);
return DAG.getTruncStore(Chain, N->getDebugLoc(), Hi, Ptr,
- ST->getSrcValue(), ST->getSrcValueOffset(),
+ ST->getPointerInfo(),
ST->getMemoryVT(), ST->isVolatile(),
ST->isNonTemporal(), ST->getAlignment());
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index f8c5890..f0752df 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -49,7 +49,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
llvm_unreachable("Do not know how to promote this operator!");
case ISD::AssertSext: Res = PromoteIntRes_AssertSext(N); break;
case ISD::AssertZext: Res = PromoteIntRes_AssertZext(N); break;
- case ISD::BIT_CONVERT: Res = PromoteIntRes_BIT_CONVERT(N); break;
+ case ISD::BITCAST: Res = PromoteIntRes_BITCAST(N); break;
case ISD::BSWAP: Res = PromoteIntRes_BSWAP(N); break;
case ISD::BUILD_PAIR: Res = PromoteIntRes_BUILD_PAIR(N); break;
case ISD::Constant: Res = PromoteIntRes_Constant(N); break;
@@ -143,7 +143,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Atomic1(AtomicSDNode *N) {
SDValue Res = DAG.getAtomic(N->getOpcode(), N->getDebugLoc(),
N->getMemoryVT(),
N->getChain(), N->getBasePtr(),
- Op2, N->getSrcValue(), N->getAlignment());
+ Op2, N->getMemOperand());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
@@ -155,14 +155,14 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Atomic2(AtomicSDNode *N) {
SDValue Op3 = GetPromotedInteger(N->getOperand(3));
SDValue Res = DAG.getAtomic(N->getOpcode(), N->getDebugLoc(),
N->getMemoryVT(), N->getChain(), N->getBasePtr(),
- Op2, Op3, N->getSrcValue(), N->getAlignment());
+ Op2, Op3, N->getMemOperand());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
return Res;
}
-SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
+SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) {
SDValue InOp = N->getOperand(0);
EVT InVT = InOp.getValueType();
EVT NInVT = TLI.getTypeToTransformTo(*DAG.getContext(), InVT);
@@ -179,8 +179,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
case PromoteInteger:
if (NOutVT.bitsEq(NInVT))
// The input promotes to the same size. Convert the promoted value.
- return DAG.getNode(ISD::BIT_CONVERT, dl,
- NOutVT, GetPromotedInteger(InOp));
+ return DAG.getNode(ISD::BITCAST, dl, NOutVT, GetPromotedInteger(InOp));
break;
case SoftenFloat:
// Promote the integer operand by hand.
@@ -193,7 +192,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
BitConvertToInteger(GetScalarizedVector(InOp)));
case SplitVector: {
- // For example, i32 = BIT_CONVERT v2i16 on alpha. Convert the split
+ // For example, i32 = BITCAST v2i16 on alpha. Convert the split
// pieces of the input into integers and reassemble in the final type.
SDValue Lo, Hi;
GetSplitVector(N->getOperand(0), Lo, Hi);
@@ -207,12 +206,12 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
EVT::getIntegerVT(*DAG.getContext(),
NOutVT.getSizeInBits()),
JoinIntegers(Lo, Hi));
- return DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, InOp);
+ return DAG.getNode(ISD::BITCAST, dl, NOutVT, InOp);
}
case WidenVector:
if (OutVT.bitsEq(NInVT))
// The input is widened to the same size. Convert to the widened value.
- return DAG.getNode(ISD::BIT_CONVERT, dl, OutVT, GetWidenedVector(InOp));
+ return DAG.getNode(ISD::BITCAST, dl, OutVT, GetWidenedVector(InOp));
}
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
@@ -293,7 +292,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) {
// value was zero. This can be handled by setting the bit just off
// the top of the original type.
APInt TopBit(NVT.getSizeInBits(), 0);
- TopBit.set(OVT.getSizeInBits());
+ TopBit.setBit(OVT.getSizeInBits());
Op = DAG.getNode(ISD::OR, dl, NVT, Op, DAG.getConstant(TopBit, NVT));
return DAG.getNode(ISD::CTTZ, dl, NVT, Op);
}
@@ -371,8 +370,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_LOAD(LoadSDNode *N) {
ISD::LoadExtType ExtType =
ISD::isNON_EXTLoad(N) ? ISD::EXTLOAD : N->getExtensionType();
DebugLoc dl = N->getDebugLoc();
- SDValue Res = DAG.getExtLoad(ExtType, NVT, dl, N->getChain(), N->getBasePtr(),
- N->getSrcValue(), N->getSrcValueOffset(),
+ SDValue Res = DAG.getExtLoad(ExtType, dl, NVT, N->getChain(), N->getBasePtr(),
+ N->getPointerInfo(),
N->getMemoryVT(), N->isVolatile(),
N->isNonTemporal(), N->getAlignment());
@@ -549,6 +548,48 @@ SDValue DAGTypeLegalizer::PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo) {
return Res;
}
+SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) {
+ // Promote the overflow bit trivially.
+ if (ResNo == 1)
+ return PromoteIntRes_Overflow(N);
+
+ SDValue LHS = N->getOperand(0), RHS = N->getOperand(1);
+ DebugLoc DL = N->getDebugLoc();
+ EVT SmallVT = LHS.getValueType();
+
+ // To determine if the result overflowed in a larger type, we extend the input
+ // to the larger type, do the multiply, then check the high bits of the result
+ // to see if the overflow happened.
+ if (N->getOpcode() == ISD::SMULO) {
+ LHS = SExtPromotedInteger(LHS);
+ RHS = SExtPromotedInteger(RHS);
+ } else {
+ LHS = ZExtPromotedInteger(LHS);
+ RHS = ZExtPromotedInteger(RHS);
+ }
+ SDValue Mul = DAG.getNode(ISD::MUL, DL, LHS.getValueType(), LHS, RHS);
+
+ // Overflow occurred iff the high part of the result does not zero/sign-extend
+ // the low part.
+ SDValue Overflow;
+ if (N->getOpcode() == ISD::UMULO) {
+ // Unsigned overflow occurred iff the high part is non-zero.
+ SDValue Hi = DAG.getNode(ISD::SRL, DL, Mul.getValueType(), Mul,
+ DAG.getIntPtrConstant(SmallVT.getSizeInBits()));
+ Overflow = DAG.getSetCC(DL, N->getValueType(1), Hi,
+ DAG.getConstant(0, Hi.getValueType()), ISD::SETNE);
+ } else {
+ // Signed overflow occurred iff the high part does not sign extend the low.
+ SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Mul.getValueType(),
+ Mul, DAG.getValueType(SmallVT));
+ Overflow = DAG.getSetCC(DL, N->getValueType(1), SExt, Mul, ISD::SETNE);
+ }
+
+ // Use the calculated overflow everywhere.
+ ReplaceValueWith(SDValue(N, 1), Overflow);
+ return Mul;
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_UDIV(SDNode *N) {
// Zero extend the input.
SDValue LHS = ZExtPromotedInteger(N->getOperand(0));
@@ -602,11 +643,6 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
return Res;
}
-SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) {
- assert(ResNo == 1 && "Only boolean result promotion currently supported!");
- return PromoteIntRes_Overflow(N);
-}
-
//===----------------------------------------------------------------------===//
// Integer Operand Promotion
//===----------------------------------------------------------------------===//
@@ -631,7 +667,7 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
llvm_unreachable("Do not know how to promote this operator's operand!");
case ISD::ANY_EXTEND: Res = PromoteIntOp_ANY_EXTEND(N); break;
- case ISD::BIT_CONVERT: Res = PromoteIntOp_BIT_CONVERT(N); break;
+ case ISD::BITCAST: Res = PromoteIntOp_BITCAST(N); break;
case ISD::BR_CC: Res = PromoteIntOp_BR_CC(N, OpNo); break;
case ISD::BRCOND: Res = PromoteIntOp_BRCOND(N, OpNo); break;
case ISD::BUILD_PAIR: Res = PromoteIntOp_BUILD_PAIR(N); break;
@@ -713,7 +749,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_ANY_EXTEND(SDNode *N) {
return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), N->getValueType(0), Op);
}
-SDValue DAGTypeLegalizer::PromoteIntOp_BIT_CONVERT(SDNode *N) {
+SDValue DAGTypeLegalizer::PromoteIntOp_BITCAST(SDNode *N) {
// This should only occur in unusual situations like bitcasting to an
// x86_fp80, so just turn it into a store+load
return CreateStackStoreLoad(N->getOperand(0), N->getValueType(0));
@@ -889,7 +925,6 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SINT_TO_FP(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){
assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
SDValue Ch = N->getChain(), Ptr = N->getBasePtr();
- int SVOffset = N->getSrcValueOffset();
unsigned Alignment = N->getAlignment();
bool isVolatile = N->isVolatile();
bool isNonTemporal = N->isNonTemporal();
@@ -898,8 +933,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){
SDValue Val = GetPromotedInteger(N->getValue()); // Get promoted value.
// Truncate the value and store the result.
- return DAG.getTruncStore(Ch, dl, Val, Ptr, N->getSrcValue(),
- SVOffset, N->getMemoryVT(),
+ return DAG.getTruncStore(Ch, dl, Val, Ptr, N->getPointerInfo(),
+ N->getMemoryVT(),
isVolatile, isNonTemporal, Alignment);
}
@@ -951,7 +986,7 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
- case ISD::BIT_CONVERT: ExpandRes_BIT_CONVERT(N, Lo, Hi); break;
+ case ISD::BITCAST: ExpandRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
@@ -978,6 +1013,23 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::UREM: ExpandIntRes_UREM(N, Lo, Hi); break;
case ISD::ZERO_EXTEND: ExpandIntRes_ZERO_EXTEND(N, Lo, Hi); break;
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_AND:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_UMIN:
+ case ISD::ATOMIC_LOAD_UMAX:
+ case ISD::ATOMIC_SWAP: {
+ std::pair<SDValue, SDValue> Tmp = ExpandAtomic(N);
+ SplitInteger(Tmp.first, Lo, Hi);
+ ReplaceValueWith(SDValue(N, 1), Tmp.second);
+ break;
+ }
+
case ISD::AND:
case ISD::OR:
case ISD::XOR: ExpandIntRes_Logical(N, Lo, Hi); break;
@@ -999,6 +1051,8 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SSUBO: ExpandIntRes_SADDSUBO(N, Lo, Hi); break;
case ISD::UADDO:
case ISD::USUBO: ExpandIntRes_UADDSUBO(N, Lo, Hi); break;
+ case ISD::UMULO:
+ case ISD::SMULO: ExpandIntRes_UMULSMULO(N, Lo, Hi); break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
@@ -1006,11 +1060,98 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
SetExpandedInteger(SDValue(N, ResNo), Lo, Hi);
}
+/// Lower an atomic node to the appropriate builtin call.
+std::pair <SDValue, SDValue> DAGTypeLegalizer::ExpandAtomic(SDNode *Node) {
+ unsigned Opc = Node->getOpcode();
+ MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
+ RTLIB::Libcall LC;
+
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unhandled atomic intrinsic Expand!");
+ break;
+ case ISD::ATOMIC_SWAP:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break;
+ }
+ break;
+ case ISD::ATOMIC_CMP_SWAP:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_ADD:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_SUB:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_AND:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_OR:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_XOR:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_NAND:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break;
+ }
+ break;
+ }
+
+ return ExpandChainLibCall(LC, Node, false);
+}
+
/// ExpandShiftByConstant - N is a shift by a value that needs to be expanded,
/// and the shift amount is a constant 'Amt'. Expand the operation.
void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
SDValue &Lo, SDValue &Hi) {
- DebugLoc dl = N->getDebugLoc();
+ DebugLoc DL = N->getDebugLoc();
// Expand the incoming operand to be shifted, so that we have its parts
SDValue InL, InH;
GetExpandedInteger(N->getOperand(0), InL, InH);
@@ -1025,8 +1166,8 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
Lo = Hi = DAG.getConstant(0, NVT);
} else if (Amt > NVTBits) {
Lo = DAG.getConstant(0, NVT);
- Hi = DAG.getNode(ISD::SHL, dl,
- NVT, InL, DAG.getConstant(Amt-NVTBits,ShTy));
+ Hi = DAG.getNode(ISD::SHL, DL,
+ NVT, InL, DAG.getConstant(Amt-NVTBits, ShTy));
} else if (Amt == NVTBits) {
Lo = DAG.getConstant(0, NVT);
Hi = InL;
@@ -1034,17 +1175,17 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
TLI.isOperationLegalOrCustom(ISD::ADDC,
TLI.getTypeToExpandTo(*DAG.getContext(), NVT))) {
// Emit this X << 1 as X+X.
- SDVTList VTList = DAG.getVTList(NVT, MVT::Flag);
+ SDVTList VTList = DAG.getVTList(NVT, MVT::Glue);
SDValue LoOps[2] = { InL, InL };
- Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps, 2);
+ Lo = DAG.getNode(ISD::ADDC, DL, VTList, LoOps, 2);
SDValue HiOps[3] = { InH, InH, Lo.getValue(1) };
- Hi = DAG.getNode(ISD::ADDE, dl, VTList, HiOps, 3);
+ Hi = DAG.getNode(ISD::ADDE, DL, VTList, HiOps, 3);
} else {
- Lo = DAG.getNode(ISD::SHL, dl, NVT, InL, DAG.getConstant(Amt, ShTy));
- Hi = DAG.getNode(ISD::OR, dl, NVT,
- DAG.getNode(ISD::SHL, dl, NVT, InH,
+ Lo = DAG.getNode(ISD::SHL, DL, NVT, InL, DAG.getConstant(Amt, ShTy));
+ Hi = DAG.getNode(ISD::OR, DL, NVT,
+ DAG.getNode(ISD::SHL, DL, NVT, InH,
DAG.getConstant(Amt, ShTy)),
- DAG.getNode(ISD::SRL, dl, NVT, InL,
+ DAG.getNode(ISD::SRL, DL, NVT, InL,
DAG.getConstant(NVTBits-Amt, ShTy)));
}
return;
@@ -1055,43 +1196,43 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
Lo = DAG.getConstant(0, NVT);
Hi = DAG.getConstant(0, NVT);
} else if (Amt > NVTBits) {
- Lo = DAG.getNode(ISD::SRL, dl,
+ Lo = DAG.getNode(ISD::SRL, DL,
NVT, InH, DAG.getConstant(Amt-NVTBits,ShTy));
Hi = DAG.getConstant(0, NVT);
} else if (Amt == NVTBits) {
Lo = InH;
Hi = DAG.getConstant(0, NVT);
} else {
- Lo = DAG.getNode(ISD::OR, dl, NVT,
- DAG.getNode(ISD::SRL, dl, NVT, InL,
+ Lo = DAG.getNode(ISD::OR, DL, NVT,
+ DAG.getNode(ISD::SRL, DL, NVT, InL,
DAG.getConstant(Amt, ShTy)),
- DAG.getNode(ISD::SHL, dl, NVT, InH,
+ DAG.getNode(ISD::SHL, DL, NVT, InH,
DAG.getConstant(NVTBits-Amt, ShTy)));
- Hi = DAG.getNode(ISD::SRL, dl, NVT, InH, DAG.getConstant(Amt, ShTy));
+ Hi = DAG.getNode(ISD::SRL, DL, NVT, InH, DAG.getConstant(Amt, ShTy));
}
return;
}
assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
if (Amt > VTBits) {
- Hi = Lo = DAG.getNode(ISD::SRA, dl, NVT, InH,
+ Hi = Lo = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(NVTBits-1, ShTy));
} else if (Amt > NVTBits) {
- Lo = DAG.getNode(ISD::SRA, dl, NVT, InH,
+ Lo = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(Amt-NVTBits, ShTy));
- Hi = DAG.getNode(ISD::SRA, dl, NVT, InH,
+ Hi = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(NVTBits-1, ShTy));
} else if (Amt == NVTBits) {
Lo = InH;
- Hi = DAG.getNode(ISD::SRA, dl, NVT, InH,
+ Hi = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(NVTBits-1, ShTy));
} else {
- Lo = DAG.getNode(ISD::OR, dl, NVT,
- DAG.getNode(ISD::SRL, dl, NVT, InL,
+ Lo = DAG.getNode(ISD::OR, DL, NVT,
+ DAG.getNode(ISD::SRL, DL, NVT, InL,
DAG.getConstant(Amt, ShTy)),
- DAG.getNode(ISD::SHL, dl, NVT, InH,
+ DAG.getNode(ISD::SHL, DL, NVT, InH,
DAG.getConstant(NVTBits-Amt, ShTy)));
- Hi = DAG.getNode(ISD::SRA, dl, NVT, InH, DAG.getConstant(Amt, ShTy));
+ Hi = DAG.getNode(ISD::SRA, DL, NVT, InH, DAG.getConstant(Amt, ShTy));
}
}
@@ -1269,7 +1410,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
// Do not generate ADDC/ADDE or SUBC/SUBE if the target does not support
// them. TODO: Teach operation legalization how to expand unsupported
// ADDC/ADDE/SUBC/SUBE. The problem is that these operations generate
- // a carry of type MVT::Flag, but there doesn't seem to be any way to
+ // a carry of type MVT::Glue, but there doesn't seem to be any way to
// generate a value of this type in the expanded code sequence.
bool hasCarry =
TLI.isOperationLegalOrCustom(N->getOpcode() == ISD::ADD ?
@@ -1277,7 +1418,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
TLI.getTypeToExpandTo(*DAG.getContext(), NVT));
if (hasCarry) {
- SDVTList VTList = DAG.getVTList(NVT, MVT::Flag);
+ SDVTList VTList = DAG.getVTList(NVT, MVT::Glue);
if (N->getOpcode() == ISD::ADD) {
Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps, 2);
HiOps[2] = Lo.getValue(1);
@@ -1287,31 +1428,32 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
HiOps[2] = Lo.getValue(1);
Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps, 3);
}
+ return;
+ }
+
+ if (N->getOpcode() == ISD::ADD) {
+ Lo = DAG.getNode(ISD::ADD, dl, NVT, LoOps, 2);
+ Hi = DAG.getNode(ISD::ADD, dl, NVT, HiOps, 2);
+ SDValue Cmp1 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), Lo, LoOps[0],
+ ISD::SETULT);
+ SDValue Carry1 = DAG.getNode(ISD::SELECT, dl, NVT, Cmp1,
+ DAG.getConstant(1, NVT),
+ DAG.getConstant(0, NVT));
+ SDValue Cmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), Lo, LoOps[1],
+ ISD::SETULT);
+ SDValue Carry2 = DAG.getNode(ISD::SELECT, dl, NVT, Cmp2,
+ DAG.getConstant(1, NVT), Carry1);
+ Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, Carry2);
} else {
- if (N->getOpcode() == ISD::ADD) {
- Lo = DAG.getNode(ISD::ADD, dl, NVT, LoOps, 2);
- Hi = DAG.getNode(ISD::ADD, dl, NVT, HiOps, 2);
- SDValue Cmp1 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), Lo, LoOps[0],
- ISD::SETULT);
- SDValue Carry1 = DAG.getNode(ISD::SELECT, dl, NVT, Cmp1,
- DAG.getConstant(1, NVT),
- DAG.getConstant(0, NVT));
- SDValue Cmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), Lo, LoOps[1],
- ISD::SETULT);
- SDValue Carry2 = DAG.getNode(ISD::SELECT, dl, NVT, Cmp2,
- DAG.getConstant(1, NVT), Carry1);
- Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, Carry2);
- } else {
- Lo = DAG.getNode(ISD::SUB, dl, NVT, LoOps, 2);
- Hi = DAG.getNode(ISD::SUB, dl, NVT, HiOps, 2);
- SDValue Cmp =
- DAG.getSetCC(dl, TLI.getSetCCResultType(LoOps[0].getValueType()),
- LoOps[0], LoOps[1], ISD::SETULT);
- SDValue Borrow = DAG.getNode(ISD::SELECT, dl, NVT, Cmp,
- DAG.getConstant(1, NVT),
- DAG.getConstant(0, NVT));
- Hi = DAG.getNode(ISD::SUB, dl, NVT, Hi, Borrow);
- }
+ Lo = DAG.getNode(ISD::SUB, dl, NVT, LoOps, 2);
+ Hi = DAG.getNode(ISD::SUB, dl, NVT, HiOps, 2);
+ SDValue Cmp =
+ DAG.getSetCC(dl, TLI.getSetCCResultType(LoOps[0].getValueType()),
+ LoOps[0], LoOps[1], ISD::SETULT);
+ SDValue Borrow = DAG.getNode(ISD::SELECT, dl, NVT, Cmp,
+ DAG.getConstant(1, NVT),
+ DAG.getConstant(0, NVT));
+ Hi = DAG.getNode(ISD::SUB, dl, NVT, Hi, Borrow);
}
}
@@ -1322,7 +1464,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBC(SDNode *N,
DebugLoc dl = N->getDebugLoc();
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
- SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
+ SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Glue);
SDValue LoOps[2] = { LHSL, RHSL };
SDValue HiOps[3] = { LHSH, RHSH };
@@ -1348,7 +1490,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBE(SDNode *N,
DebugLoc dl = N->getDebugLoc();
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
- SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
+ SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Glue);
SDValue LoOps[3] = { LHSL, RHSL, N->getOperand(2) };
SDValue HiOps[3] = { LHSH, RHSH };
@@ -1437,7 +1579,7 @@ void DAGTypeLegalizer::ExpandIntRes_Constant(SDNode *N,
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned NBitWidth = NVT.getSizeInBits();
const APInt &Cst = cast<ConstantSDNode>(N)->getAPIntValue();
- Lo = DAG.getConstant(APInt(Cst).trunc(NBitWidth), NVT);
+ Lo = DAG.getConstant(Cst.trunc(NBitWidth), NVT);
Hi = DAG.getConstant(Cst.lshr(NBitWidth).trunc(NBitWidth), NVT);
}
@@ -1524,7 +1666,6 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
ISD::LoadExtType ExtType = N->getExtensionType();
- int SVOffset = N->getSrcValueOffset();
unsigned Alignment = N->getAlignment();
bool isVolatile = N->isVolatile();
bool isNonTemporal = N->isNonTemporal();
@@ -1535,7 +1676,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
if (N->getMemoryVT().bitsLE(NVT)) {
EVT MemVT = N->getMemoryVT();
- Lo = DAG.getExtLoad(ExtType, NVT, dl, Ch, Ptr, N->getSrcValue(), SVOffset,
+ Lo = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getPointerInfo(),
MemVT, isVolatile, isNonTemporal, Alignment);
// Remember the chain.
@@ -1557,7 +1698,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
}
} else if (TLI.isLittleEndian()) {
// Little-endian - low bits are at low addresses.
- Lo = DAG.getLoad(NVT, dl, Ch, Ptr, N->getSrcValue(), SVOffset,
+ Lo = DAG.getLoad(NVT, dl, Ch, Ptr, N->getPointerInfo(),
isVolatile, isNonTemporal, Alignment);
unsigned ExcessBits =
@@ -1568,8 +1709,8 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
unsigned IncrementSize = NVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getExtLoad(ExtType, NVT, dl, Ch, Ptr, N->getSrcValue(),
- SVOffset+IncrementSize, NEVT,
+ Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr,
+ N->getPointerInfo().getWithOffset(IncrementSize), NEVT,
isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
@@ -1586,7 +1727,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
unsigned ExcessBits = (EBytes - IncrementSize)*8;
// Load both the high bits and maybe some of the low bits.
- Hi = DAG.getExtLoad(ExtType, NVT, dl, Ch, Ptr, N->getSrcValue(), SVOffset,
+ Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getPointerInfo(),
EVT::getIntegerVT(*DAG.getContext(),
MemVT.getSizeInBits() - ExcessBits),
isVolatile, isNonTemporal, Alignment);
@@ -1595,8 +1736,8 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
// Load the rest of the low bits.
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, NVT, dl, Ch, Ptr, N->getSrcValue(),
- SVOffset+IncrementSize,
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, NVT, Ch, Ptr,
+ N->getPointerInfo().getWithOffset(IncrementSize),
EVT::getIntegerVT(*DAG.getContext(), ExcessBits),
isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
@@ -1987,6 +2128,31 @@ void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N,
ReplaceValueWith(SDValue(N, 1), Ofl);
}
+void DAGTypeLegalizer::ExpandIntRes_UMULSMULO(SDNode *N,
+ SDValue &Lo, SDValue &Hi) {
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ DebugLoc dl = N->getDebugLoc();
+ EVT VT = N->getValueType(0);
+ EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() / 2);
+ // Expand the result by simply replacing it with the equivalent
+ // non-overflow-checking operation.
+ SDValue Ret = DAG.getNode(ISD::MUL, dl, LHS.getValueType(), LHS, RHS);
+ SplitInteger(Ret, Lo, Hi);
+
+ // Now calculate overflow.
+ SDValue Ofl;
+ if (N->getOpcode() == ISD::UMULO)
+ Ofl = DAG.getSetCC(dl, N->getValueType(1), Hi,
+ DAG.getConstant(0, VT), ISD::SETNE);
+ else {
+ SDValue Tmp = DAG.getConstant(VT.getSizeInBits() - 1, HalfVT);
+ Tmp = DAG.getNode(ISD::SRA, dl, HalfVT, Lo, Tmp);
+ Ofl = DAG.getSetCC(dl, N->getValueType(1), Hi, Tmp, ISD::SETNE);
+ }
+ ReplaceValueWith(SDValue(N, 1), Ofl);
+}
+
void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
@@ -2078,7 +2244,7 @@ bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
#endif
llvm_unreachable("Do not know how to expand this operator's operand!");
- case ISD::BIT_CONVERT: Res = ExpandOp_BIT_CONVERT(N); break;
+ case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
case ISD::BR_CC: Res = ExpandIntOp_BR_CC(N); break;
case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
@@ -2308,7 +2474,6 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
- int SVOffset = N->getSrcValueOffset();
unsigned Alignment = N->getAlignment();
bool isVolatile = N->isVolatile();
bool isNonTemporal = N->isNonTemporal();
@@ -2319,14 +2484,16 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
if (N->getMemoryVT().bitsLE(NVT)) {
GetExpandedInteger(N->getValue(), Lo, Hi);
- return DAG.getTruncStore(Ch, dl, Lo, Ptr, N->getSrcValue(), SVOffset,
+ return DAG.getTruncStore(Ch, dl, Lo, Ptr, N->getPointerInfo(),
N->getMemoryVT(), isVolatile, isNonTemporal,
Alignment);
- } else if (TLI.isLittleEndian()) {
+ }
+
+ if (TLI.isLittleEndian()) {
// Little-endian - low bits are at low addresses.
GetExpandedInteger(N->getValue(), Lo, Hi);
- Lo = DAG.getStore(Ch, dl, Lo, Ptr, N->getSrcValue(), SVOffset,
+ Lo = DAG.getStore(Ch, dl, Lo, Ptr, N->getPointerInfo(),
isVolatile, isNonTemporal, Alignment);
unsigned ExcessBits =
@@ -2337,50 +2504,49 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
unsigned IncrementSize = NVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr, N->getSrcValue(),
- SVOffset+IncrementSize, NEVT,
- isVolatile, isNonTemporal,
+ Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr,
+ N->getPointerInfo().getWithOffset(IncrementSize),
+ NEVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
- } else {
- // Big-endian - high bits are at low addresses. Favor aligned stores at
- // the cost of some bit-fiddling.
- GetExpandedInteger(N->getValue(), Lo, Hi);
-
- EVT ExtVT = N->getMemoryVT();
- unsigned EBytes = ExtVT.getStoreSize();
- unsigned IncrementSize = NVT.getSizeInBits()/8;
- unsigned ExcessBits = (EBytes - IncrementSize)*8;
- EVT HiVT = EVT::getIntegerVT(*DAG.getContext(),
- ExtVT.getSizeInBits() - ExcessBits);
+ }
- if (ExcessBits < NVT.getSizeInBits()) {
- // Transfer high bits from the top of Lo to the bottom of Hi.
- Hi = DAG.getNode(ISD::SHL, dl, NVT, Hi,
- DAG.getConstant(NVT.getSizeInBits() - ExcessBits,
- TLI.getPointerTy()));
- Hi = DAG.getNode(ISD::OR, dl, NVT, Hi,
- DAG.getNode(ISD::SRL, dl, NVT, Lo,
- DAG.getConstant(ExcessBits,
- TLI.getPointerTy())));
- }
+ // Big-endian - high bits are at low addresses. Favor aligned stores at
+ // the cost of some bit-fiddling.
+ GetExpandedInteger(N->getValue(), Lo, Hi);
+
+ EVT ExtVT = N->getMemoryVT();
+ unsigned EBytes = ExtVT.getStoreSize();
+ unsigned IncrementSize = NVT.getSizeInBits()/8;
+ unsigned ExcessBits = (EBytes - IncrementSize)*8;
+ EVT HiVT = EVT::getIntegerVT(*DAG.getContext(),
+ ExtVT.getSizeInBits() - ExcessBits);
+
+ if (ExcessBits < NVT.getSizeInBits()) {
+ // Transfer high bits from the top of Lo to the bottom of Hi.
+ Hi = DAG.getNode(ISD::SHL, dl, NVT, Hi,
+ DAG.getConstant(NVT.getSizeInBits() - ExcessBits,
+ TLI.getPointerTy()));
+ Hi = DAG.getNode(ISD::OR, dl, NVT, Hi,
+ DAG.getNode(ISD::SRL, dl, NVT, Lo,
+ DAG.getConstant(ExcessBits,
+ TLI.getPointerTy())));
+ }
- // Store both the high bits and maybe some of the low bits.
- Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr, N->getSrcValue(),
- SVOffset, HiVT, isVolatile, isNonTemporal,
- Alignment);
+ // Store both the high bits and maybe some of the low bits.
+ Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr, N->getPointerInfo(),
+ HiVT, isVolatile, isNonTemporal, Alignment);
- // Increment the pointer to the other half.
- Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
- DAG.getIntPtrConstant(IncrementSize));
- // Store the lowest ExcessBits bits in the second half.
- Lo = DAG.getTruncStore(Ch, dl, Lo, Ptr, N->getSrcValue(),
- SVOffset+IncrementSize,
- EVT::getIntegerVT(*DAG.getContext(), ExcessBits),
- isVolatile, isNonTemporal,
- MinAlign(Alignment, IncrementSize));
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
- }
+ // Increment the pointer to the other half.
+ Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
+ DAG.getIntPtrConstant(IncrementSize));
+ // Store the lowest ExcessBits bits in the second half.
+ Lo = DAG.getTruncStore(Ch, dl, Lo, Ptr,
+ N->getPointerInfo().getWithOffset(IncrementSize),
+ EVT::getIntegerVT(*DAG.getContext(), ExcessBits),
+ isVolatile, isNonTemporal,
+ MinAlign(Alignment, IncrementSize));
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
SDValue DAGTypeLegalizer::ExpandIntOp_TRUNCATE(SDNode *N) {
@@ -2460,8 +2626,10 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
// Load the value out, extending it from f32 to the destination float type.
// FIXME: Avoid the extend by constructing the right constant pool?
- SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, DstVT, dl, DAG.getEntryNode(),
- FudgePtr, NULL, 0, MVT::f32,
+ SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, DstVT, DAG.getEntryNode(),
+ FudgePtr,
+ MachinePointerInfo::getConstantPool(),
+ MVT::f32,
false, false, Alignment);
return DAG.getNode(ISD::FADD, dl, DstVT, SignedConv, Fudge);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 6e56c98..cedda7e 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -714,6 +714,11 @@ void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) {
if (M->getNodeId() == Processed)
RemapValue(NewVal);
DAG.ReplaceAllUsesOfValueWith(OldVal, NewVal, &NUL);
+ // OldVal may be a target of the ReplacedValues map which was marked
+ // NewNode to force reanalysis because it was updated. Ensure that
+ // anything that ReplacedValues mapped to OldVal will now be mapped
+ // all the way to NewVal.
+ ReplacedValues[OldVal] = NewVal;
}
// The original node continues to exist in the DAG, marked NewNode.
}
@@ -858,7 +863,7 @@ void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {
/// BitConvertToInteger - Convert to an integer of the same size.
SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) {
unsigned BitWidth = Op.getValueType().getSizeInBits();
- return DAG.getNode(ISD::BIT_CONVERT, Op.getDebugLoc(),
+ return DAG.getNode(ISD::BITCAST, Op.getDebugLoc(),
EVT::getIntegerVT(*DAG.getContext(), BitWidth), Op);
}
@@ -869,7 +874,7 @@ SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) {
unsigned EltWidth = Op.getValueType().getVectorElementType().getSizeInBits();
EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
unsigned NumElts = Op.getValueType().getVectorNumElements();
- return DAG.getNode(ISD::BIT_CONVERT, Op.getDebugLoc(),
+ return DAG.getNode(ISD::BITCAST, Op.getDebugLoc(),
EVT::getVectorVT(*DAG.getContext(), EltNVT, NumElts), Op);
}
@@ -880,10 +885,11 @@ SDValue DAGTypeLegalizer::CreateStackStoreLoad(SDValue Op,
// the source and destination types.
SDValue StackPtr = DAG.CreateStackTemporary(Op.getValueType(), DestVT);
// Emit a store to the stack slot.
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op, StackPtr, NULL, 0,
- false, false, 0);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op, StackPtr,
+ MachinePointerInfo(), false, false, 0);
// Result is a load from the stack slot.
- return DAG.getLoad(DestVT, dl, Store, StackPtr, NULL, 0, false, false, 0);
+ return DAG.getLoad(DestVT, dl, Store, StackPtr, MachinePointerInfo(),
+ false, false, 0);
}
/// CustomLowerNode - Replace the node's results with custom code provided
@@ -1049,6 +1055,39 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, EVT RetVT,
return CallInfo.first;
}
+// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to
+// ExpandLibCall except that the first operand is the in-chain.
+std::pair<SDValue, SDValue>
+DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
+ SDNode *Node,
+ bool isSigned) {
+ SDValue InChain = Node->getOperand(0);
+
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
+ EVT ArgVT = Node->getOperand(i).getValueType();
+ const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Entry.Node = Node->getOperand(i);
+ Entry.Ty = ArgTy;
+ Entry.isSExt = isSigned;
+ Entry.isZExt = !isSigned;
+ Args.push_back(Entry);
+ }
+ SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
+ TLI.getPointerTy());
+
+ // Splice the libcall in wherever FindInputOutputChains tells us to.
+ const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ std::pair<SDValue, SDValue> CallInfo =
+ TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+ 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
+ /*isReturnValueUsed=*/true,
+ Callee, Args, DAG, Node->getDebugLoc());
+
+ return CallInfo;
+}
+
/// PromoteTargetBoolean - Promote the given target boolean to a target boolean
/// of the given type. A target boolean is an integer value, not necessarily of
/// type i1, the bits of which conform to getBooleanContents.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index d560292..3f81bbb 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -99,7 +99,7 @@ private:
return SoftenFloat;
return ExpandFloat;
}
-
+
if (VT.getVectorNumElements() == 1)
return ScalarizeVector;
return SplitVector;
@@ -192,6 +192,10 @@ private:
SDValue MakeLibCall(RTLIB::Libcall LC, EVT RetVT,
const SDValue *Ops, unsigned NumOps, bool isSigned,
DebugLoc dl);
+ std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
+ SDNode *Node, bool isSigned);
+ std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
+
SDValue PromoteTargetBoolean(SDValue Bool, EVT VT);
void ReplaceValueWith(SDValue From, SDValue To);
void SplitInteger(SDValue Op, SDValue &Lo, SDValue &Hi);
@@ -244,7 +248,7 @@ private:
SDValue PromoteIntRes_AssertZext(SDNode *N);
SDValue PromoteIntRes_Atomic1(AtomicSDNode *N);
SDValue PromoteIntRes_Atomic2(AtomicSDNode *N);
- SDValue PromoteIntRes_BIT_CONVERT(SDNode *N);
+ SDValue PromoteIntRes_BITCAST(SDNode *N);
SDValue PromoteIntRes_BSWAP(SDNode *N);
SDValue PromoteIntRes_BUILD_PAIR(SDNode *N);
SDValue PromoteIntRes_Constant(SDNode *N);
@@ -278,7 +282,7 @@ private:
// Integer Operand Promotion.
bool PromoteIntegerOperand(SDNode *N, unsigned OperandNo);
SDValue PromoteIntOp_ANY_EXTEND(SDNode *N);
- SDValue PromoteIntOp_BIT_CONVERT(SDNode *N);
+ SDValue PromoteIntOp_BITCAST(SDNode *N);
SDValue PromoteIntOp_BUILD_PAIR(SDNode *N);
SDValue PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo);
@@ -344,6 +348,7 @@ private:
void ExpandIntRes_SADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_UADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_UMULSMULO (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandShiftByConstant(SDNode *N, unsigned Amt,
SDValue &Lo, SDValue &Hi);
@@ -352,7 +357,7 @@ private:
// Integer Operand Expansion.
bool ExpandIntegerOperand(SDNode *N, unsigned OperandNo);
- SDValue ExpandIntOp_BIT_CONVERT(SDNode *N);
+ SDValue ExpandIntOp_BITCAST(SDNode *N);
SDValue ExpandIntOp_BR_CC(SDNode *N);
SDValue ExpandIntOp_BUILD_VECTOR(SDNode *N);
SDValue ExpandIntOp_EXTRACT_ELEMENT(SDNode *N);
@@ -387,7 +392,7 @@ private:
// Result Float to Integer Conversion.
void SoftenFloatResult(SDNode *N, unsigned OpNo);
- SDValue SoftenFloatRes_BIT_CONVERT(SDNode *N);
+ SDValue SoftenFloatRes_BITCAST(SDNode *N);
SDValue SoftenFloatRes_BUILD_PAIR(SDNode *N);
SDValue SoftenFloatRes_ConstantFP(ConstantFPSDNode *N);
SDValue SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N);
@@ -426,7 +431,7 @@ private:
// Operand Float to Integer Conversion.
bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
- SDValue SoftenFloatOp_BIT_CONVERT(SDNode *N);
+ SDValue SoftenFloatOp_BITCAST(SDNode *N);
SDValue SoftenFloatOp_BR_CC(SDNode *N);
SDValue SoftenFloatOp_FP_ROUND(SDNode *N);
SDValue SoftenFloatOp_FP_TO_SINT(SDNode *N);
@@ -515,7 +520,7 @@ private:
SDValue ScalarizeVecRes_UnaryOp(SDNode *N);
SDValue ScalarizeVecRes_InregOp(SDNode *N);
- SDValue ScalarizeVecRes_BIT_CONVERT(SDNode *N);
+ SDValue ScalarizeVecRes_BITCAST(SDNode *N);
SDValue ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N);
SDValue ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode *N);
SDValue ScalarizeVecRes_FPOWI(SDNode *N);
@@ -532,7 +537,7 @@ private:
// Vector Operand Scalarization: <1 x ty> -> ty.
bool ScalarizeVectorOperand(SDNode *N, unsigned OpNo);
- SDValue ScalarizeVecOp_BIT_CONVERT(SDNode *N);
+ SDValue ScalarizeVecOp_BITCAST(SDNode *N);
SDValue ScalarizeVecOp_CONCAT_VECTORS(SDNode *N);
SDValue ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo);
@@ -557,7 +562,7 @@ private:
void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_InregOp(SDNode *N, SDValue &Lo, SDValue &Hi);
- void SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BUILD_PAIR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -577,11 +582,12 @@ private:
bool SplitVectorOperand(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_UnaryOp(SDNode *N);
- SDValue SplitVecOp_BIT_CONVERT(SDNode *N);
+ SDValue SplitVecOp_BITCAST(SDNode *N);
SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo);
SDValue SplitVecOp_CONCAT_VECTORS(SDNode *N);
+ SDValue SplitVecOp_FP_ROUND(SDNode *N);
//===--------------------------------------------------------------------===//
// Vector Widening Support: LegalizeVectorTypes.cpp
@@ -603,7 +609,7 @@ private:
// Widen Vector Result Promotion.
void WidenVectorResult(SDNode *N, unsigned ResNo);
- SDValue WidenVecRes_BIT_CONVERT(SDNode* N);
+ SDValue WidenVecRes_BITCAST(SDNode* N);
SDValue WidenVecRes_BUILD_VECTOR(SDNode* N);
SDValue WidenVecRes_CONCAT_VECTORS(SDNode* N);
SDValue WidenVecRes_CONVERT_RNDSAT(SDNode* N);
@@ -628,7 +634,7 @@ private:
// Widen Vector Operand.
bool WidenVectorOperand(SDNode *N, unsigned ResNo);
- SDValue WidenVecOp_BIT_CONVERT(SDNode *N);
+ SDValue WidenVecOp_BITCAST(SDNode *N);
SDValue WidenVecOp_CONCAT_VECTORS(SDNode *N);
SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N);
@@ -721,7 +727,7 @@ private:
}
// Generic Result Expansion.
- void ExpandRes_BIT_CONVERT (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandRes_BITCAST (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_BUILD_PAIR (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_EXTRACT_ELEMENT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -729,7 +735,7 @@ private:
void ExpandRes_VAARG (SDNode *N, SDValue &Lo, SDValue &Hi);
// Generic Operand Expansion.
- SDValue ExpandOp_BIT_CONVERT (SDNode *N);
+ SDValue ExpandOp_BITCAST (SDNode *N);
SDValue ExpandOp_BUILD_VECTOR (SDNode *N);
SDValue ExpandOp_EXTRACT_ELEMENT (SDNode *N);
SDValue ExpandOp_INSERT_VECTOR_ELT(SDNode *N);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 9c2b1d9..a75ae87 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -32,8 +32,7 @@ using namespace llvm;
// little/big-endian machines, followed by the Hi/Lo part. This means that
// they cannot be used as is on vectors, for which Lo is always stored first.
-void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
+void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
SDValue InOp = N->getOperand(0);
@@ -50,31 +49,31 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
case SoftenFloat:
// Convert the integer operand instead.
SplitInteger(GetSoftenedFloat(InOp), Lo, Hi);
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
+ Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
+ Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
case ExpandInteger:
case ExpandFloat:
// Convert the expanded pieces of the input.
GetExpandedOp(InOp, Lo, Hi);
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
+ Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
+ Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
case SplitVector:
GetSplitVector(InOp, Lo, Hi);
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
+ Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
+ Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
case ScalarizeVector:
// Convert the element instead.
SplitInteger(BitConvertToInteger(GetScalarizedVector(InOp)), Lo, Hi);
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
+ Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
+ Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
case WidenVector: {
- assert(!(InVT.getVectorNumElements() & 1) && "Unsupported BIT_CONVERT");
+ assert(!(InVT.getVectorNumElements() & 1) && "Unsupported BITCAST");
InOp = GetWidenedVector(InOp);
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
InVT.getVectorNumElements()/2);
@@ -84,19 +83,19 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
+ Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
+ Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
}
}
if (InVT.isVector() && OutVT.isInteger()) {
- // Handle cases like i64 = BIT_CONVERT v1i64 on x86, where the operand
+ // Handle cases like i64 = BITCAST v1i64 on x86, where the operand
// is legal but the result is not.
EVT NVT = EVT::getVectorVT(*DAG.getContext(), NOutVT, 2);
if (isTypeLegal(NVT)) {
- SDValue CastInOp = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, InOp);
+ SDValue CastInOp = DAG.getNode(ISD::BITCAST, dl, NVT, InOp);
Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
DAG.getIntPtrConstant(0));
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
@@ -119,14 +118,14 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
getTypeForEVT(*DAG.getContext()));
SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
- const Value *SV = PseudoSourceValue::getFixedStack(SPFI);
+ MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI);
// Emit a store to the stack slot.
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, InOp, StackPtr, SV, 0,
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, InOp, StackPtr, PtrInfo,
false, false, 0);
// Load the first half from the stack slot.
- Lo = DAG.getLoad(NOutVT, dl, Store, StackPtr, SV, 0, false, false, 0);
+ Lo = DAG.getLoad(NOutVT, dl, Store, StackPtr, PtrInfo, false, false, 0);
// Increment the pointer to the other half.
unsigned IncrementSize = NOutVT.getSizeInBits() / 8;
@@ -134,7 +133,8 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
DAG.getIntPtrConstant(IncrementSize));
// Load the second half from the stack slot.
- Hi = DAG.getLoad(NOutVT, dl, Store, StackPtr, SV, IncrementSize, false,
+ Hi = DAG.getLoad(NOutVT, dl, Store, StackPtr,
+ PtrInfo.getWithOffset(IncrementSize), false,
false, MinAlign(Alignment, IncrementSize));
// Handle endianness of the load.
@@ -172,7 +172,7 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
EVT OldVT = N->getValueType(0);
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT);
- SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl,
+ SDValue NewVec = DAG.getNode(ISD::BITCAST, dl,
EVT::getVectorVT(*DAG.getContext(),
NewVT, 2*OldElts),
OldVec);
@@ -204,22 +204,21 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), LD->getValueType(0));
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
- int SVOffset = LD->getSrcValueOffset();
unsigned Alignment = LD->getAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
assert(NVT.isByteSized() && "Expanded type not byte sized!");
- Lo = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getSrcValue(), SVOffset,
+ Lo = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getPointerInfo(),
isVolatile, isNonTemporal, Alignment);
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits() / 8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getSrcValue(),
- SVOffset+IncrementSize,
+ Hi = DAG.getLoad(NVT, dl, Chain, Ptr,
+ LD->getPointerInfo().getWithOffset(IncrementSize),
isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
@@ -262,14 +261,14 @@ void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Generic Operand Expansion.
//===--------------------------------------------------------------------===//
-SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
+SDValue DAGTypeLegalizer::ExpandOp_BITCAST(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
if (N->getValueType(0).isVector()) {
// An illegal expanding type is being converted to a legal vector type.
// Make a two element vector out of the expanded parts and convert that
// instead, but only if the new vector type is legal (otherwise there
// is no point, and it might create expansion loops). For example, on
- // x86 this turns v1i64 = BIT_CONVERT i64 into v1i64 = BIT_CONVERT v2i32.
+ // x86 this turns v1i64 = BITCAST i64 into v1i64 = BITCAST v2i32.
EVT OVT = N->getOperand(0).getValueType();
EVT NVT = EVT::getVectorVT(*DAG.getContext(),
TLI.getTypeToTransformTo(*DAG.getContext(), OVT),
@@ -283,7 +282,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
std::swap(Parts[0], Parts[1]);
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Parts, 2);
- return DAG.getNode(ISD::BIT_CONVERT, dl, N->getValueType(0), Vec);
+ return DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), Vec);
}
}
@@ -322,7 +321,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
&NewElts[0], NewElts.size());
// Convert the new vector to the old vector type.
- return DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, NewVec);
+ return DAG.getNode(ISD::BITCAST, dl, VecVT, NewVec);
}
SDValue DAGTypeLegalizer::ExpandOp_EXTRACT_ELEMENT(SDNode *N) {
@@ -347,7 +346,7 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
// Bitconvert to a vector of twice the length with elements of the expanded
// type, insert the expanded vector elements, and then convert back.
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewEVT, NumElts*2);
- SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl,
+ SDValue NewVec = DAG.getNode(ISD::BITCAST, dl,
NewVecVT, N->getOperand(0));
SDValue Lo, Hi;
@@ -363,7 +362,7 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, NewVec, Hi, Idx);
// Convert the new vector to the old vector type.
- return DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, NewVec);
+ return DAG.getNode(ISD::BITCAST, dl, VecVT, NewVec);
}
SDValue DAGTypeLegalizer::ExpandOp_SCALAR_TO_VECTOR(SDNode *N) {
@@ -390,7 +389,6 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
St->getValue().getValueType());
SDValue Chain = St->getChain();
SDValue Ptr = St->getBasePtr();
- int SVOffset = St->getSrcValueOffset();
unsigned Alignment = St->getAlignment();
bool isVolatile = St->isVolatile();
bool isNonTemporal = St->isNonTemporal();
@@ -404,14 +402,14 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- Lo = DAG.getStore(Chain, dl, Lo, Ptr, St->getSrcValue(), SVOffset,
+ Lo = DAG.getStore(Chain, dl, Lo, Ptr, St->getPointerInfo(),
isVolatile, isNonTemporal, Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
assert(isTypeLegal(Ptr.getValueType()) && "Pointers must be legal!");
- Hi = DAG.getStore(Chain, dl, Hi, Ptr, St->getSrcValue(),
- SVOffset + IncrementSize,
+ Hi = DAG.getStore(Chain, dl, Hi, Ptr,
+ St->getPointerInfo().getWithOffset(IncrementSize),
isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 621c087..167dbe0 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -241,14 +241,14 @@ SDValue VectorLegalizer::PromoteVectorOp(SDValue Op) {
for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
if (Op.getOperand(j).getValueType().isVector())
- Operands[j] = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Op.getOperand(j));
+ Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j));
else
Operands[j] = Op.getOperand(j);
}
Op = DAG.getNode(Op.getOpcode(), dl, NVT, &Operands[0], Operands.size());
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Op);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Op);
}
SDValue VectorLegalizer::ExpandFNEG(SDValue Op) {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 93bc2d0..182f8fc 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -46,7 +46,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
#endif
llvm_unreachable("Do not know how to scalarize the result of this operator!");
- case ISD::BIT_CONVERT: R = ScalarizeVecRes_BIT_CONVERT(N); break;
+ case ISD::BITCAST: R = ScalarizeVecRes_BITCAST(N); break;
case ISD::BUILD_VECTOR: R = N->getOperand(0); break;
case ISD::CONVERT_RNDSAT: R = ScalarizeVecRes_CONVERT_RNDSAT(N); break;
case ISD::EXTRACT_SUBVECTOR: R = ScalarizeVecRes_EXTRACT_SUBVECTOR(N); break;
@@ -122,9 +122,9 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) {
LHS.getValueType(), LHS, RHS);
}
-SDValue DAGTypeLegalizer::ScalarizeVecRes_BIT_CONVERT(SDNode *N) {
+SDValue DAGTypeLegalizer::ScalarizeVecRes_BITCAST(SDNode *N) {
EVT NewVT = N->getValueType(0).getVectorElementType();
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
NewVT, N->getOperand(0));
}
@@ -171,7 +171,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) {
N->getDebugLoc(),
N->getChain(), N->getBasePtr(),
DAG.getUNDEF(N->getBasePtr().getValueType()),
- N->getSrcValue(), N->getSrcValueOffset(),
+ N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
N->isVolatile(), N->isNonTemporal(),
N->getOriginalAlignment());
@@ -296,8 +296,8 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
dbgs() << "\n";
#endif
llvm_unreachable("Do not know how to scalarize this operator's operand!");
- case ISD::BIT_CONVERT:
- Res = ScalarizeVecOp_BIT_CONVERT(N);
+ case ISD::BITCAST:
+ Res = ScalarizeVecOp_BITCAST(N);
break;
case ISD::CONCAT_VECTORS:
Res = ScalarizeVecOp_CONCAT_VECTORS(N);
@@ -326,11 +326,11 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
return false;
}
-/// ScalarizeVecOp_BIT_CONVERT - If the value to convert is a vector that needs
+/// ScalarizeVecOp_BITCAST - If the value to convert is a vector that needs
/// to be scalarized, it must be <1 x ty>. Convert the element instead.
-SDValue DAGTypeLegalizer::ScalarizeVecOp_BIT_CONVERT(SDNode *N) {
+SDValue DAGTypeLegalizer::ScalarizeVecOp_BITCAST(SDNode *N) {
SDValue Elt = GetScalarizedVector(N->getOperand(0));
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
N->getValueType(0), Elt);
}
@@ -365,14 +365,13 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){
if (N->isTruncatingStore())
return DAG.getTruncStore(N->getChain(), dl,
GetScalarizedVector(N->getOperand(1)),
- N->getBasePtr(),
- N->getSrcValue(), N->getSrcValueOffset(),
+ N->getBasePtr(), N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
N->isVolatile(), N->isNonTemporal(),
N->getAlignment());
return DAG.getStore(N->getChain(), dl, GetScalarizedVector(N->getOperand(1)),
- N->getBasePtr(), N->getSrcValue(), N->getSrcValueOffset(),
+ N->getBasePtr(), N->getPointerInfo(),
N->isVolatile(), N->isNonTemporal(),
N->getOriginalAlignment());
}
@@ -407,7 +406,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
- case ISD::BIT_CONVERT: SplitVecRes_BIT_CONVERT(N, Lo, Hi); break;
+ case ISD::BITCAST: SplitVecRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_VECTOR: SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break;
case ISD::CONCAT_VECTORS: SplitVecRes_CONCAT_VECTORS(N, Lo, Hi); break;
case ISD::CONVERT_RNDSAT: SplitVecRes_CONVERT_RNDSAT(N, Lo, Hi); break;
@@ -497,8 +496,8 @@ void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo,
Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi, RHSHi);
}
-void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
+void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
// We know the result is a vector. The input may be either a vector or a
// scalar value.
EVT LoVT, HiVT;
@@ -526,8 +525,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
GetExpandedOp(InOp, Lo, Hi);
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, LoVT, Lo);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HiVT, Hi);
+ Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
+ Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
return;
}
break;
@@ -535,8 +534,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
// If the input is a vector that needs to be split, convert each split
// piece of the input now.
GetSplitVector(InOp, Lo, Hi);
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, LoVT, Lo);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HiVT, Hi);
+ Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
+ Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
return;
}
@@ -550,8 +549,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, LoVT, Lo);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HiVT, Hi);
+ Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
+ Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
}
void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
@@ -626,9 +625,9 @@ void DAGTypeLegalizer::SplitVecRes_CONVERT_RNDSAT(SDNode *N, SDValue &Lo,
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
LoVT.getVectorNumElements());
VLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0));
VHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
- DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
+ DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
break;
}
}
@@ -646,16 +645,15 @@ void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Vec = N->getOperand(0);
SDValue Idx = N->getOperand(1);
- EVT IdxVT = Idx.getValueType();
DebugLoc dl = N->getDebugLoc();
EVT LoVT, HiVT;
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
- Idx = DAG.getNode(ISD::ADD, dl, IdxVT, Idx,
- DAG.getConstant(LoVT.getVectorNumElements(), IdxVT));
- Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec, Idx);
+ uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec,
+ DAG.getIntPtrConstant(IdxVal + LoVT.getVectorNumElements()));
}
void DAGTypeLegalizer::SplitVecRes_FPOWI(SDNode *N, SDValue &Lo,
@@ -705,8 +703,8 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, NULL, 0,
- false, false, 0);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
+ MachinePointerInfo(), false, false, 0);
// Store the new element. This may be larger than the vector element type,
// so use a truncating store.
@@ -714,11 +712,11 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
const Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
unsigned Alignment =
TLI.getTargetData()->getPrefTypeAlignment(VecType);
- Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, NULL, 0, EltVT,
+ Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
false, false, 0);
// Load the Lo part from the stack slot.
- Lo = DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, NULL, 0,
+ Lo = DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo(),
false, false, 0);
// Increment the pointer to the other part.
@@ -727,8 +725,8 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
DAG.getIntPtrConstant(IncrementSize));
// Load the Hi part from the stack slot.
- Hi = DAG.getLoad(Hi.getValueType(), dl, Store, StackPtr, NULL, 0, false,
- false, MinAlign(Alignment, IncrementSize));
+ Hi = DAG.getLoad(Hi.getValueType(), dl, Store, StackPtr, MachinePointerInfo(),
+ false, false, MinAlign(Alignment, IncrementSize));
}
void DAGTypeLegalizer::SplitVecRes_SCALAR_TO_VECTOR(SDNode *N, SDValue &Lo,
@@ -751,8 +749,6 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
SDValue Ch = LD->getChain();
SDValue Ptr = LD->getBasePtr();
SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
- const Value *SV = LD->getSrcValue();
- int SVOffset = LD->getSrcValueOffset();
EVT MemoryVT = LD->getMemoryVT();
unsigned Alignment = LD->getOriginalAlignment();
bool isVolatile = LD->isVolatile();
@@ -762,14 +758,15 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
GetSplitDestVTs(MemoryVT, LoMemVT, HiMemVT);
Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset,
- SV, SVOffset, LoMemVT, isVolatile, isNonTemporal, Alignment);
+ LD->getPointerInfo(), LoMemVT, isVolatile, isNonTemporal,
+ Alignment);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
- SVOffset += IncrementSize;
Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset,
- SV, SVOffset, HiMemVT, isVolatile, isNonTemporal, Alignment);
+ LD->getPointerInfo().getWithOffset(IncrementSize),
+ HiMemVT, isVolatile, isNonTemporal, Alignment);
// Build a factor node to remember that this load is independent of the
// other one.
@@ -980,10 +977,11 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
#endif
llvm_unreachable("Do not know how to split this operator's operand!");
- case ISD::BIT_CONVERT: Res = SplitVecOp_BIT_CONVERT(N); break;
+ case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break;
+ case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break;
case ISD::STORE:
Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
break;
@@ -995,6 +993,8 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
+ case ISD::FP_EXTEND:
+ case ISD::FTRUNC:
case ISD::TRUNCATE:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
@@ -1036,8 +1036,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
}
-SDValue DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) {
- // For example, i64 = BIT_CONVERT v4i16 on alpha. Typically the vector will
+SDValue DAGTypeLegalizer::SplitVecOp_BITCAST(SDNode *N) {
+ // For example, i64 = BITCAST v4i16 on alpha. Typically the vector will
// end up being split all the way down to individual components. Convert the
// split pieces into integers and reassemble.
SDValue Lo, Hi;
@@ -1048,13 +1048,12 @@ SDValue DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) {
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), N->getValueType(0),
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getValueType(0),
JoinIntegers(Lo, Hi));
}
SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
- // We know that the extracted result type is legal. For now, assume the index
- // is a constant.
+ // We know that the extracted result type is legal.
EVT SubVT = N->getValueType(0);
SDValue Idx = N->getOperand(1);
DebugLoc dl = N->getDebugLoc();
@@ -1099,15 +1098,13 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
EVT EltVT = VecVT.getVectorElementType();
DebugLoc dl = N->getDebugLoc();
SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
- int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
- const Value *SV = PseudoSourceValue::getFixedStack(SPFI);
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, SV, 0,
- false, false, 0);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
+ MachinePointerInfo(), false, false, 0);
// Load back the required element.
StackPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
- return DAG.getExtLoad(ISD::EXTLOAD, N->getValueType(0), dl, Store, StackPtr,
- SV, 0, EltVT, false, false, 0);
+ return DAG.getExtLoad(ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr,
+ MachinePointerInfo(), EltVT, false, false, 0);
}
SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
@@ -1118,7 +1115,6 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
bool isTruncating = N->isTruncatingStore();
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
- int SVOffset = N->getSrcValueOffset();
EVT MemoryVT = N->getMemoryVT();
unsigned Alignment = N->getOriginalAlignment();
bool isVol = N->isVolatile();
@@ -1132,22 +1128,23 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
if (isTruncating)
- Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getSrcValue(), SVOffset,
+ Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getPointerInfo(),
LoMemVT, isVol, isNT, Alignment);
else
- Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getSrcValue(), SVOffset,
+ Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(),
isVol, isNT, Alignment);
// Increment the pointer to the other half.
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
- SVOffset += IncrementSize;
if (isTruncating)
- Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr, N->getSrcValue(), SVOffset,
+ Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr,
+ N->getPointerInfo().getWithOffset(IncrementSize),
HiMemVT, isVol, isNT, Alignment);
else
- Hi = DAG.getStore(Ch, DL, Hi, Ptr, N->getSrcValue(), SVOffset,
+ Hi = DAG.getStore(Ch, DL, Hi, Ptr,
+ N->getPointerInfo().getWithOffset(IncrementSize),
isVol, isNT, Alignment);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
@@ -1155,7 +1152,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
DebugLoc DL = N->getDebugLoc();
-
+
// The input operands all must have the same type, and we know the result the
// result type is valid. Convert this to a buildvector which extracts all the
// input elements.
@@ -1172,11 +1169,29 @@ SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
}
}
-
+
return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0),
&Elts[0], Elts.size());
}
+SDValue DAGTypeLegalizer::SplitVecOp_FP_ROUND(SDNode *N) {
+ // The result has a legal vector type, but the input needs splitting.
+ EVT ResVT = N->getValueType(0);
+ SDValue Lo, Hi;
+ DebugLoc DL = N->getDebugLoc();
+ GetSplitVector(N->getOperand(0), Lo, Hi);
+ EVT InVT = Lo.getValueType();
+
+ EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
+ InVT.getVectorNumElements());
+
+ Lo = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Lo, N->getOperand(1));
+ Hi = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Hi, N->getOperand(1));
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Lo, Hi);
+}
+
+
//===----------------------------------------------------------------------===//
// Result Vector Widening
@@ -1201,7 +1216,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
#endif
llvm_unreachable("Do not know how to widen the result of this operator!");
- case ISD::BIT_CONVERT: Res = WidenVecRes_BIT_CONVERT(N); break;
+ case ISD::BITCAST: Res = WidenVecRes_BITCAST(N); break;
case ISD::BUILD_VECTOR: Res = WidenVecRes_BUILD_VECTOR(N); break;
case ISD::CONCAT_VECTORS: Res = WidenVecRes_CONCAT_VECTORS(N); break;
case ISD::CONVERT_RNDSAT: Res = WidenVecRes_CONVERT_RNDSAT(N); break;
@@ -1297,7 +1312,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
EVT WidenEltVT = WidenVT.getVectorElementType();
EVT VT = WidenVT;
unsigned NumElts = VT.getVectorNumElements();
- while (!TLI.isTypeSynthesizable(VT) && NumElts != 1) {
+ while (!TLI.isTypeLegal(VT) && NumElts != 1) {
NumElts = NumElts / 2;
VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
}
@@ -1308,11 +1323,11 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2);
}
-
+
// No legal vector version so unroll the vector operation and then widen.
if (NumElts == 1)
return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
-
+
// Since the operation can trap, apply operation on the original vector.
EVT MaxVT = VT;
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
@@ -1323,7 +1338,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
unsigned ConcatEnd = 0; // Current ConcatOps index.
int Idx = 0; // Current Idx into input vectors.
- // NumElts := greatest synthesizable vector size (at most WidenVT)
+ // NumElts := greatest legal vector size (at most WidenVT)
// while (orig. vector has unhandled elements) {
// take munches of size NumElts from the beginning and add to ConcatOps
// NumElts := next smaller supported vector size or 1
@@ -1341,13 +1356,13 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
do {
NumElts = NumElts / 2;
VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
- } while (!TLI.isTypeSynthesizable(VT) && NumElts != 1);
+ } while (!TLI.isTypeLegal(VT) && NumElts != 1);
if (NumElts == 1) {
for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
- SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
+ SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
InOp1, DAG.getIntPtrConstant(Idx));
- SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
+ SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
InOp2, DAG.getIntPtrConstant(Idx));
ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
EOp1, EOp2);
@@ -1378,7 +1393,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
do {
NextSize *= 2;
NextVT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NextSize);
- } while (!TLI.isTypeSynthesizable(NextVT));
+ } while (!TLI.isTypeLegal(NextVT));
if (!VT.isVector()) {
// Scalar type, create an INSERT_VECTOR_ELEMENT of type NextVT
@@ -1415,7 +1430,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
if (VT == WidenVT)
return ConcatOps[0];
}
-
+
// add undefs of size MaxVT until ConcatOps grows to length of WidenVT
unsigned NumOps = WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements();
if (NumOps != ConcatEnd ) {
@@ -1428,7 +1443,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
SDValue InOp = N->getOperand(0);
- DebugLoc dl = N->getDebugLoc();
+ DebugLoc DL = N->getDebugLoc();
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
@@ -1444,11 +1459,14 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
InOp = GetWidenedVector(N->getOperand(0));
InVT = InOp.getValueType();
InVTNumElts = InVT.getVectorNumElements();
- if (InVTNumElts == WidenNumElts)
- return DAG.getNode(Opcode, dl, WidenVT, InOp);
+ if (InVTNumElts == WidenNumElts) {
+ if (N->getNumOperands() == 1)
+ return DAG.getNode(Opcode, DL, WidenVT, InOp);
+ return DAG.getNode(Opcode, DL, WidenVT, InOp, N->getOperand(1));
+ }
}
- if (TLI.isTypeSynthesizable(InWidenVT)) {
+ if (TLI.isTypeLegal(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1462,16 +1480,20 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
SDValue UndefVal = DAG.getUNDEF(InVT);
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = UndefVal;
- return DAG.getNode(Opcode, dl, WidenVT,
- DAG.getNode(ISD::CONCAT_VECTORS, dl, InWidenVT,
- &Ops[0], NumConcat));
+ SDValue InVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InWidenVT,
+ &Ops[0], NumConcat);
+ if (N->getNumOperands() == 1)
+ return DAG.getNode(Opcode, DL, WidenVT, InVec);
+ return DAG.getNode(Opcode, DL, WidenVT, InVec, N->getOperand(1));
}
if (InVTNumElts % WidenNumElts == 0) {
+ SDValue InVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InWidenVT,
+ InOp, DAG.getIntPtrConstant(0));
// Extract the input and convert the shorten input vector.
- return DAG.getNode(Opcode, dl, WidenVT,
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InWidenVT,
- InOp, DAG.getIntPtrConstant(0)));
+ if (N->getNumOperands() == 1)
+ return DAG.getNode(Opcode, DL, WidenVT, InVal);
+ return DAG.getNode(Opcode, DL, WidenVT, InVal, N->getOperand(1));
}
}
@@ -1480,16 +1502,20 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
EVT EltVT = WidenVT.getVectorElementType();
unsigned MinElts = std::min(InVTNumElts, WidenNumElts);
unsigned i;
- for (i=0; i < MinElts; ++i)
- Ops[i] = DAG.getNode(Opcode, dl, EltVT,
- DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
- DAG.getIntPtrConstant(i)));
+ for (i=0; i < MinElts; ++i) {
+ SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, InEltVT, InOp,
+ DAG.getIntPtrConstant(i));
+ if (N->getNumOperands() == 1)
+ Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val);
+ else
+ Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val, N->getOperand(1));
+ }
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
- return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, WidenVT, &Ops[0], WidenNumElts);
}
SDValue DAGTypeLegalizer::WidenVecRes_POWI(SDNode *N) {
@@ -1536,7 +1562,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_InregOp(SDNode *N) {
WidenVT, WidenLHS, DAG.getValueType(ExtVT));
}
-SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
+SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) {
SDValue InOp = N->getOperand(0);
EVT InVT = InOp.getValueType();
EVT VT = N->getValueType(0);
@@ -1555,7 +1581,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
InOp = GetPromotedInteger(InOp);
InVT = InOp.getValueType();
if (WidenVT.bitsEq(InVT))
- return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, InOp);
+ return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp);
break;
case SoftenFloat:
case ExpandInteger:
@@ -1570,13 +1596,14 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
InVT = InOp.getValueType();
if (WidenVT.bitsEq(InVT))
// The input widens to the same size. Convert to the widen value.
- return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, InOp);
+ return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp);
break;
}
unsigned WidenSize = WidenVT.getSizeInBits();
unsigned InSize = InVT.getSizeInBits();
- if (WidenSize % InSize == 0) {
+ // x86mmx is not an acceptable vector element type, so don't try.
+ if (WidenSize % InSize == 0 && InVT != MVT::x86mmx) {
// Determine new input vector type. The new input vector type will use
// the same element type (if its a vector) or use the input type as a
// vector. It is the same size as the type to widen to.
@@ -1590,7 +1617,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
NewInVT = EVT::getVectorVT(*DAG.getContext(), InVT, NewNumElts);
}
- if (TLI.isTypeSynthesizable(NewInVT)) {
+ if (TLI.isTypeLegal(NewInVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1609,7 +1636,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
else
NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
NewInVT, &Ops[0], NewNumElts);
- return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, NewVec);
+ return DAG.getNode(ISD::BITCAST, dl, WidenVT, NewVec);
}
}
@@ -1730,7 +1757,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
SatOp, CvtCode);
}
- if (TLI.isTypeSynthesizable(InWidenVT)) {
+ if (TLI.isTypeLegal(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1794,39 +1821,25 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
EVT InVT = InOp.getValueType();
- ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx);
- if (CIdx) {
- unsigned IdxVal = CIdx->getZExtValue();
- // Check if we can just return the input vector after widening.
- if (IdxVal == 0 && InVT == WidenVT)
- return InOp;
-
- // Check if we can extract from the vector.
- unsigned InNumElts = InVT.getVectorNumElements();
- if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, WidenVT, InOp, Idx);
- }
+ // Check if we can just return the input vector after widening.
+ uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ if (IdxVal == 0 && InVT == WidenVT)
+ return InOp;
+
+ // Check if we can extract from the vector.
+ unsigned InNumElts = InVT.getVectorNumElements();
+ if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, WidenVT, InOp, Idx);
// We could try widening the input to the right length but for now, extract
// the original elements, fill the rest with undefs and build a vector.
SmallVector<SDValue, 16> Ops(WidenNumElts);
EVT EltVT = VT.getVectorElementType();
- EVT IdxVT = Idx.getValueType();
unsigned NumElts = VT.getVectorNumElements();
unsigned i;
- if (CIdx) {
- unsigned IdxVal = CIdx->getZExtValue();
- for (i=0; i < NumElts; ++i)
- Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getConstant(IdxVal+i, IdxVT));
- } else {
- Ops[0] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp, Idx);
- for (i=1; i < NumElts; ++i) {
- SDValue NewIdx = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx,
- DAG.getConstant(i, IdxVT));
- Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp, NewIdx);
- }
- }
+ for (i=0; i < NumElts; ++i)
+ Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
+ DAG.getIntPtrConstant(IdxVal+i));
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i < WidenNumElts; ++i)
@@ -1985,7 +1998,7 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned ResNo) {
#endif
llvm_unreachable("Do not know how to widen this operator's operand!");
- case ISD::BIT_CONVERT: Res = WidenVecOp_BIT_CONVERT(N); break;
+ case ISD::BITCAST: Res = WidenVecOp_BITCAST(N); break;
case ISD::CONCAT_VECTORS: Res = WidenVecOp_CONCAT_VECTORS(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT: Res = WidenVecOp_EXTRACT_VECTOR_ELT(N); break;
@@ -2044,7 +2057,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
}
-SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
+SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) {
EVT VT = N->getValueType(0);
SDValue InOp = GetWidenedVector(N->getOperand(0));
EVT InWidenVT = InOp.getValueType();
@@ -2053,11 +2066,12 @@ SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
// Check if we can convert between two legal vector types and extract.
unsigned InWidenSize = InWidenVT.getSizeInBits();
unsigned Size = VT.getSizeInBits();
- if (InWidenSize % Size == 0 && !VT.isVector()) {
+ // x86mmx is not an acceptable vector element type, so don't try.
+ if (InWidenSize % Size == 0 && !VT.isVector() && VT != MVT::x86mmx) {
unsigned NewNumElts = InWidenSize / Size;
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
- if (TLI.isTypeSynthesizable(NewVT)) {
- SDValue BitOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, InOp);
+ if (TLI.isTypeLegal(NewVT)) {
+ SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
DAG.getIntPtrConstant(0));
}
@@ -2146,7 +2160,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
if (Width == WidenEltWidth)
return RetVT;
- // See if there is larger legal integer than the element type to load/store
+ // See if there is larger legal integer than the element type to load/store
unsigned VT;
for (VT = (unsigned)MVT::LAST_INTEGER_VALUETYPE;
VT >= (unsigned)MVT::FIRST_INTEGER_VALUETYPE; --VT) {
@@ -2154,7 +2168,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
unsigned MemVTWidth = MemVT.getSizeInBits();
if (MemVT.getSizeInBits() <= WidenEltWidth)
break;
- if (TLI.isTypeSynthesizable(MemVT) && (WidenWidth % MemVTWidth) == 0 &&
+ if (TLI.isTypeLegal(MemVT) && (WidenWidth % MemVTWidth) == 0 &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
RetVT = MemVT;
@@ -2168,7 +2182,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
VT >= (unsigned)MVT::FIRST_VECTOR_VALUETYPE; --VT) {
EVT MemVT = (MVT::SimpleValueType) VT;
unsigned MemVTWidth = MemVT.getSizeInBits();
- if (TLI.isTypeSynthesizable(MemVT) && WidenEltVT == MemVT.getVectorElementType() &&
+ if (TLI.isTypeLegal(MemVT) && WidenEltVT == MemVT.getVectorElementType() &&
(WidenWidth % MemVTWidth) == 0 &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
@@ -2201,7 +2215,7 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
if (NewLdTy != LdTy) {
NumElts = Width / NewLdTy.getSizeInBits();
NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewLdTy, NumElts);
- VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVecVT, VecOp);
+ VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, VecOp);
// Readjust position and vector position based on new load type
Idx = Idx * LdTy.getSizeInBits() / NewLdTy.getSizeInBits();
LdTy = NewLdTy;
@@ -2209,11 +2223,11 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, VecOp, LdOps[i],
DAG.getIntPtrConstant(Idx++));
}
- return DAG.getNode(ISD::BIT_CONVERT, dl, VecTy, VecOp);
+ return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp);
}
-SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
- LoadSDNode * LD) {
+SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
+ LoadSDNode *LD) {
// The strategy assumes that we can efficiently load powers of two widths.
// The routines chops the vector into the largest vector loads with the same
// element type or scalar loads and then recombines it to the widen vector
@@ -2228,11 +2242,9 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
// Load information
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
- int SVOffset = LD->getSrcValueOffset();
unsigned Align = LD->getAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
- const Value *SV = LD->getSrcValue();
int LdWidth = LdVT.getSizeInBits();
int WidthDiff = WidenWidth - LdWidth; // Difference
@@ -2241,7 +2253,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
// Find the vector type that can load from.
EVT NewVT = FindMemType(DAG, TLI, LdWidth, WidenVT, LdAlign, WidthDiff);
int NewVTWidth = NewVT.getSizeInBits();
- SDValue LdOp = DAG.getLoad(NewVT, dl, Chain, BasePtr, SV, SVOffset,
+ SDValue LdOp = DAG.getLoad(NewVT, dl, Chain, BasePtr, LD->getPointerInfo(),
isVolatile, isNonTemporal, Align);
LdChain.push_back(LdOp.getValue(1));
@@ -2251,7 +2263,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
unsigned NumElts = WidenWidth / NewVTWidth;
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
- return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, VecOp);
+ return DAG.getNode(ISD::BITCAST, dl, WidenVT, VecOp);
}
if (NewVT == WidenVT)
return LdOp;
@@ -2286,8 +2298,9 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
NewVTWidth = NewVT.getSizeInBits();
}
- SDValue LdOp = DAG.getLoad(NewVT, dl, Chain, BasePtr, SV,
- SVOffset+Offset, isVolatile,
+ SDValue LdOp = DAG.getLoad(NewVT, dl, Chain, BasePtr,
+ LD->getPointerInfo().getWithOffset(Offset),
+ isVolatile,
isNonTemporal, MinAlign(Align, Increment));
LdChain.push_back(LdOp.getValue(1));
LdOps.push_back(LdOp);
@@ -2300,7 +2313,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
if (!LdOps[0].getValueType().isVector())
// All the loads are scalar loads.
return BuildVectorFromScalar(DAG, WidenVT, LdOps, 0, End);
-
+
// If the load contains vectors, build the vector using concat vector.
// All of the vectors used to loads are power of 2 and the scalars load
// can be combined to make a power of 2 vector.
@@ -2362,11 +2375,9 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
// Load information
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
- int SVOffset = LD->getSrcValueOffset();
unsigned Align = LD->getAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
- const Value *SV = LD->getSrcValue();
EVT EltVT = WidenVT.getVectorElementType();
EVT LdEltVT = LdVT.getVectorElementType();
@@ -2376,16 +2387,17 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(WidenNumElts);
unsigned Increment = LdEltVT.getSizeInBits() / 8;
- Ops[0] = DAG.getExtLoad(ExtType, EltVT, dl, Chain, BasePtr, SV, SVOffset,
+ Ops[0] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
+ LD->getPointerInfo(),
LdEltVT, isVolatile, isNonTemporal, Align);
LdChain.push_back(Ops[0].getValue(1));
unsigned i = 0, Offset = Increment;
for (i=1; i < NumElts; ++i, Offset += Increment) {
SDValue NewBasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
BasePtr, DAG.getIntPtrConstant(Offset));
- Ops[i] = DAG.getExtLoad(ExtType, EltVT, dl, Chain, NewBasePtr, SV,
- SVOffset + Offset, LdEltVT, isVolatile,
- isNonTemporal, Align);
+ Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
+ LD->getPointerInfo().getWithOffset(Offset), LdEltVT,
+ isVolatile, isNonTemporal, Align);
LdChain.push_back(Ops[i].getValue(1));
}
@@ -2405,8 +2417,6 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
// element type or scalar stores.
SDValue Chain = ST->getChain();
SDValue BasePtr = ST->getBasePtr();
- const Value *SV = ST->getSrcValue();
- int SVOffset = ST->getSrcValueOffset();
unsigned Align = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
@@ -2433,9 +2443,9 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
do {
SDValue EOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NewVT, ValOp,
DAG.getIntPtrConstant(Idx));
- StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr, SV,
- SVOffset + Offset, isVolatile,
- isNonTemporal,
+ StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr,
+ ST->getPointerInfo().getWithOffset(Offset),
+ isVolatile, isNonTemporal,
MinAlign(Align, Offset)));
StWidth -= NewVTWidth;
Offset += Increment;
@@ -2447,15 +2457,16 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
// Cast the vector to the scalar type we can store
unsigned NumElts = ValWidth / NewVTWidth;
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
- SDValue VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVecVT, ValOp);
+ SDValue VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, ValOp);
// Readjust index position based on new vector type
Idx = Idx * ValEltWidth / NewVTWidth;
do {
SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, VecOp,
DAG.getIntPtrConstant(Idx++));
- StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr, SV,
- SVOffset + Offset, isVolatile,
- isNonTemporal, MinAlign(Align, Offset)));
+ StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr,
+ ST->getPointerInfo().getWithOffset(Offset),
+ isVolatile, isNonTemporal,
+ MinAlign(Align, Offset)));
StWidth -= NewVTWidth;
Offset += Increment;
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
@@ -2474,14 +2485,12 @@ DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
// and then store it. Instead, we extract each element and then store it.
SDValue Chain = ST->getChain();
SDValue BasePtr = ST->getBasePtr();
- const Value *SV = ST->getSrcValue();
- int SVOffset = ST->getSrcValueOffset();
unsigned Align = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
SDValue ValOp = GetWidenedVector(ST->getValue());
DebugLoc dl = ST->getDebugLoc();
-
+
EVT StVT = ST->getMemoryVT();
EVT ValVT = ValOp.getValueType();
@@ -2499,8 +2508,8 @@ DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
unsigned NumElts = StVT.getVectorNumElements();
SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
DAG.getIntPtrConstant(0));
- StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, BasePtr, SV,
- SVOffset, StEltVT,
+ StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, BasePtr,
+ ST->getPointerInfo(), StEltVT,
isVolatile, isNonTemporal, Align));
unsigned Offset = Increment;
for (unsigned i=1; i < NumElts; ++i, Offset += Increment) {
@@ -2508,9 +2517,9 @@ DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
BasePtr, DAG.getIntPtrConstant(Offset));
SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
DAG.getIntPtrConstant(0));
- StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, NewBasePtr, SV,
- SVOffset + Offset, StEltVT,
- isVolatile, isNonTemporal,
+ StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, NewBasePtr,
+ ST->getPointerInfo().getWithOffset(Offset),
+ StEltVT, isVolatile, isNonTemporal,
MinAlign(Align, Offset)));
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
index ac2d338..2dcb229 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
@@ -16,7 +16,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DebugLoc.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index fae2729..e3da208 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -205,7 +205,7 @@ void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
/// successors to the newly created node.
SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
- if (SU->getNode()->getFlaggedNode())
+ if (SU->getNode()->getGluedNode())
return NULL;
SDNode *N = SU->getNode();
@@ -216,7 +216,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
- if (VT == MVT::Flag)
+ if (VT == MVT::Glue)
return NULL;
else if (VT == MVT::Other)
TryUnfold = true;
@@ -224,7 +224,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
const SDValue &Op = N->getOperand(i);
EVT VT = Op.getNode()->getValueType(Op.getResNo());
- if (VT == MVT::Flag)
+ if (VT == MVT::Glue)
return NULL;
}
@@ -476,12 +476,12 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
}
}
- for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) {
+ for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
if (Node->getOpcode() == ISD::INLINEASM) {
// Inline asm can clobber physical defs.
unsigned NumOps = Node->getNumOperands();
- if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
- --NumOps; // Ignore the flag operand.
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
+ --NumOps; // Ignore the glue operand.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags =
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
index 56f5ded..430283d 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
@@ -40,7 +40,7 @@ STATISTIC(NumStalls, "Number of pipeline stalls");
static RegisterScheduler
tdListDAGScheduler("list-td", "Top-down list scheduler",
createTDListDAGScheduler);
-
+
namespace {
//===----------------------------------------------------------------------===//
/// ScheduleDAGList - The actual list scheduler implementation. This supports
@@ -51,7 +51,7 @@ private:
/// AvailableQueue - The priority queue to use for the available SUnits.
///
SchedulingPriorityQueue *AvailableQueue;
-
+
/// PendingQueue - This contains all of the instructions whose operands have
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands become available, the instruction is
@@ -63,11 +63,12 @@ private:
public:
ScheduleDAGList(MachineFunction &mf,
- SchedulingPriorityQueue *availqueue,
- ScheduleHazardRecognizer *HR)
- : ScheduleDAGSDNodes(mf),
- AvailableQueue(availqueue), HazardRec(HR) {
- }
+ SchedulingPriorityQueue *availqueue)
+ : ScheduleDAGSDNodes(mf), AvailableQueue(availqueue) {
+
+ const TargetMachine &tm = mf.getTarget();
+ HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
+ }
~ScheduleDAGList() {
delete HazardRec;
@@ -87,14 +88,14 @@ private:
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGList::Schedule() {
DEBUG(dbgs() << "********** List Scheduling **********\n");
-
+
// Build the scheduling graph.
BuildSchedGraph(NULL);
AvailableQueue->initNodes(SUnits);
-
+
ListScheduleTopDown();
-
+
AvailableQueue->releaseState();
}
@@ -118,7 +119,7 @@ void ScheduleDAGList::ReleaseSucc(SUnit *SU, const SDep &D) {
--SuccSU->NumPredsLeft;
SuccSU->setDepthToAtLeast(SU->getDepth() + D.getLatency());
-
+
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
@@ -142,7 +143,7 @@ void ScheduleDAGList::ReleaseSuccessors(SUnit *SU) {
void ScheduleDAGList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
-
+
Sequence.push_back(SU);
assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
@@ -168,7 +169,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
SUnits[i].isAvailable = true;
}
}
-
+
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
std::vector<SUnit*> NotReady;
@@ -187,7 +188,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
assert(PendingQueue[i]->getDepth() > CurCycle && "Negative latency?");
}
}
-
+
// If there are no instructions available, don't try to issue anything, and
// don't advance the hazard recognizer.
if (AvailableQueue->empty()) {
@@ -196,24 +197,24 @@ void ScheduleDAGList::ListScheduleTopDown() {
}
SUnit *FoundSUnit = 0;
-
+
bool HasNoopHazards = false;
while (!AvailableQueue->empty()) {
SUnit *CurSUnit = AvailableQueue->pop();
-
+
ScheduleHazardRecognizer::HazardType HT =
- HazardRec->getHazardType(CurSUnit);
+ HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
if (HT == ScheduleHazardRecognizer::NoHazard) {
FoundSUnit = CurSUnit;
break;
}
-
+
// Remember if this is a noop hazard.
HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
-
+
NotReady.push_back(CurSUnit);
}
-
+
// Add the nodes that aren't ready back onto the available list.
if (!NotReady.empty()) {
AvailableQueue->push_all(NotReady);
@@ -228,7 +229,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
// If this is a pseudo-op node, we don't want to increment the current
// cycle.
if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
- ++CurCycle;
+ ++CurCycle;
} else if (!HasNoopHazards) {
// Otherwise, we have a pipeline stall, but no other problem, just advance
// the current cycle and try again.
@@ -257,12 +258,8 @@ void ScheduleDAGList::ListScheduleTopDown() {
// Public Constructor Functions
//===----------------------------------------------------------------------===//
-/// createTDListDAGScheduler - This creates a top-down list scheduler with a
-/// new hazard recognizer. This scheduler takes ownership of the hazard
-/// recognizer and deletes it when done.
+/// createTDListDAGScheduler - This creates a top-down list scheduler.
ScheduleDAGSDNodes *
llvm::createTDListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
- return new ScheduleDAGList(*IS->MF,
- new LatencyPriorityQueue(),
- IS->CreateTargetHazardRecognizer());
+ return new ScheduleDAGList(*IS->MF, new LatencyPriorityQueue());
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 4c3e4e3..0b548b2 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -20,6 +20,7 @@
#include "llvm/InlineAsm.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
@@ -65,6 +66,10 @@ static RegisterScheduler
"which tries to balance ILP and register pressure",
createILPListDAGScheduler);
+static cl::opt<bool> DisableSchedCycles(
+ "disable-sched-cycles", cl::Hidden, cl::init(false),
+ cl::desc("Disable cycle-level precision during preRA scheduling"));
+
namespace {
//===----------------------------------------------------------------------===//
/// ScheduleDAGRRList - The actual register reduction list scheduler
@@ -83,31 +88,56 @@ private:
/// AvailableQueue - The priority queue to use for the available SUnits.
SchedulingPriorityQueue *AvailableQueue;
+ /// PendingQueue - This contains all of the instructions whose operands have
+ /// been issued, but their results are not ready yet (due to the latency of
+ /// the operation). Once the operands becomes available, the instruction is
+ /// added to the AvailableQueue.
+ std::vector<SUnit*> PendingQueue;
+
+ /// HazardRec - The hazard recognizer to use.
+ ScheduleHazardRecognizer *HazardRec;
+
+ /// CurCycle - The current scheduler state corresponds to this cycle.
+ unsigned CurCycle;
+
+ /// MinAvailableCycle - Cycle of the soonest available instruction.
+ unsigned MinAvailableCycle;
+
/// LiveRegDefs - A set of physical registers and their definition
/// that are "live". These nodes must be scheduled before any other nodes that
/// modifies the registers can be scheduled.
unsigned NumLiveRegs;
std::vector<SUnit*> LiveRegDefs;
- std::vector<unsigned> LiveRegCycles;
+ std::vector<SUnit*> LiveRegGens;
/// Topo - A topological ordering for SUnits which permits fast IsReachable
/// and similar queries.
ScheduleDAGTopologicalSort Topo;
public:
- ScheduleDAGRRList(MachineFunction &mf,
- bool isbottomup, bool needlatency,
- SchedulingPriorityQueue *availqueue)
- : ScheduleDAGSDNodes(mf), isBottomUp(isbottomup), NeedLatency(needlatency),
- AvailableQueue(availqueue), Topo(SUnits) {
- }
+ ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
+ SchedulingPriorityQueue *availqueue,
+ CodeGenOpt::Level OptLevel)
+ : ScheduleDAGSDNodes(mf), isBottomUp(availqueue->isBottomUp()),
+ NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
+ Topo(SUnits) {
+
+ const TargetMachine &tm = mf.getTarget();
+ if (DisableSchedCycles || !NeedLatency)
+ HazardRec = new ScheduleHazardRecognizer();
+ else
+ HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
+ }
~ScheduleDAGRRList() {
+ delete HazardRec;
delete AvailableQueue;
}
void Schedule();
+ ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
+
/// IsReachable - Checks if SU is reachable from TargetSU.
bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
return Topo.IsReachable(SU, TargetSU);
@@ -136,24 +166,37 @@ public:
}
private:
+ bool isReady(SUnit *SU) {
+ return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
+ AvailableQueue->isReady(SU);
+ }
+
void ReleasePred(SUnit *SU, const SDep *PredEdge);
- void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
+ void ReleasePredecessors(SUnit *SU);
void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
void ReleaseSuccessors(SUnit *SU);
+ void ReleasePending();
+ void AdvanceToCycle(unsigned NextCycle);
+ void AdvancePastStalls(SUnit *SU);
+ void EmitNode(SUnit *SU);
+ void ScheduleNodeBottomUp(SUnit*);
void CapturePred(SDep *PredEdge);
- void ScheduleNodeBottomUp(SUnit*, unsigned);
- void ScheduleNodeTopDown(SUnit*, unsigned);
void UnscheduleNodeBottomUp(SUnit*);
- void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
+ void RestoreHazardCheckerBottomUp();
+ void BacktrackBottomUp(SUnit*, SUnit*);
SUnit *CopyAndMoveSuccessors(SUnit*);
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
const TargetRegisterClass*,
const TargetRegisterClass*,
SmallVector<SUnit*, 2>&);
bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
- void ListScheduleTopDown();
+
+ SUnit *PickNodeToScheduleBottomUp();
void ListScheduleBottomUp();
+ void ScheduleNodeTopDown(SUnit*);
+ void ListScheduleTopDown();
+
/// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
/// Updates the topological ordering if required.
@@ -190,11 +233,13 @@ private:
void ScheduleDAGRRList::Schedule() {
DEBUG(dbgs()
<< "********** List Scheduling BB#" << BB->getNumber()
- << " **********\n");
+ << " '" << BB->getName() << "' **********\n");
+ CurCycle = 0;
+ MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
NumLiveRegs = 0;
- LiveRegDefs.resize(TRI->getNumRegs(), NULL);
- LiveRegCycles.resize(TRI->getNumRegs(), 0);
+ LiveRegDefs.resize(TRI->getNumRegs(), NULL);
+ LiveRegGens.resize(TRI->getNumRegs(), NULL);
// Build the scheduling graph.
BuildSchedGraph(NULL);
@@ -204,13 +249,15 @@ void ScheduleDAGRRList::Schedule() {
Topo.InitDAGTopologicalSorting();
AvailableQueue->initNodes(SUnits);
-
+
+ HazardRec->Reset();
+
// Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
if (isBottomUp)
ListScheduleBottomUp();
else
ListScheduleTopDown();
-
+
AvailableQueue->releaseState();
}
@@ -243,33 +290,197 @@ void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
// to be scheduled. Ignore the special EntrySU node.
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
PredSU->isAvailable = true;
- AvailableQueue->push(PredSU);
+
+ unsigned Height = PredSU->getHeight();
+ if (Height < MinAvailableCycle)
+ MinAvailableCycle = Height;
+
+ if (isReady(SU)) {
+ AvailableQueue->push(PredSU);
+ }
+ // CapturePred and others may have left the node in the pending queue, avoid
+ // adding it twice.
+ else if (!PredSU->isPending) {
+ PredSU->isPending = true;
+ PendingQueue.push_back(PredSU);
+ }
}
}
-void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
+/// Call ReleasePred for each predecessor, then update register live def/gen.
+/// Always update LiveRegDefs for a register dependence even if the current SU
+/// also defines the register. This effectively create one large live range
+/// across a sequence of two-address node. This is important because the
+/// entire chain must be scheduled together. Example:
+///
+/// flags = (3) add
+/// flags = (2) addc flags
+/// flags = (1) addc flags
+///
+/// results in
+///
+/// LiveRegDefs[flags] = 3
+/// LiveRegGens[flags] = 1
+///
+/// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
+/// interference on flags.
+void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
// Bottom up: release predecessors
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
ReleasePred(SU, &*I);
if (I->isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
- // expensive to copy the register. Make sure nothing that can
+ // expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
- if (!LiveRegDefs[I->getReg()]) {
+ SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
+ assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
+ "interference on register dependence");
+ LiveRegDefs[I->getReg()] = I->getSUnit();
+ if (!LiveRegGens[I->getReg()]) {
++NumLiveRegs;
- LiveRegDefs[I->getReg()] = I->getSUnit();
- LiveRegCycles[I->getReg()] = CurCycle;
+ LiveRegGens[I->getReg()] = SU;
}
}
}
}
+/// Check to see if any of the pending instructions are ready to issue. If
+/// so, add them to the available queue.
+void ScheduleDAGRRList::ReleasePending() {
+ if (DisableSchedCycles) {
+ assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
+ return;
+ }
+
+ // If the available queue is empty, it is safe to reset MinAvailableCycle.
+ if (AvailableQueue->empty())
+ MinAvailableCycle = UINT_MAX;
+
+ // Check to see if any of the pending instructions are ready to issue. If
+ // so, add them to the available queue.
+ for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
+ unsigned ReadyCycle =
+ isBottomUp ? PendingQueue[i]->getHeight() : PendingQueue[i]->getDepth();
+ if (ReadyCycle < MinAvailableCycle)
+ MinAvailableCycle = ReadyCycle;
+
+ if (PendingQueue[i]->isAvailable) {
+ if (!isReady(PendingQueue[i]))
+ continue;
+ AvailableQueue->push(PendingQueue[i]);
+ }
+ PendingQueue[i]->isPending = false;
+ PendingQueue[i] = PendingQueue.back();
+ PendingQueue.pop_back();
+ --i; --e;
+ }
+}
+
+/// Move the scheduler state forward by the specified number of Cycles.
+void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
+ if (NextCycle <= CurCycle)
+ return;
+
+ AvailableQueue->setCurCycle(NextCycle);
+ if (!HazardRec->isEnabled()) {
+ // Bypass lots of virtual calls in case of long latency.
+ CurCycle = NextCycle;
+ }
+ else {
+ for (; CurCycle != NextCycle; ++CurCycle) {
+ if (isBottomUp)
+ HazardRec->RecedeCycle();
+ else
+ HazardRec->AdvanceCycle();
+ }
+ }
+ // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
+ // available Q to release pending nodes at least once before popping.
+ ReleasePending();
+}
+
+/// Move the scheduler state forward until the specified node's dependents are
+/// ready and can be scheduled with no resource conflicts.
+void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
+ if (DisableSchedCycles)
+ return;
+
+ unsigned ReadyCycle = isBottomUp ? SU->getHeight() : SU->getDepth();
+
+ // Bump CurCycle to account for latency. We assume the latency of other
+ // available instructions may be hidden by the stall (not a full pipe stall).
+ // This updates the hazard recognizer's cycle before reserving resources for
+ // this instruction.
+ AdvanceToCycle(ReadyCycle);
+
+ // Calls are scheduled in their preceding cycle, so don't conflict with
+ // hazards from instructions after the call. EmitNode will reset the
+ // scoreboard state before emitting the call.
+ if (isBottomUp && SU->isCall)
+ return;
+
+ // FIXME: For resource conflicts in very long non-pipelined stages, we
+ // should probably skip ahead here to avoid useless scoreboard checks.
+ int Stalls = 0;
+ while (true) {
+ ScheduleHazardRecognizer::HazardType HT =
+ HazardRec->getHazardType(SU, isBottomUp ? -Stalls : Stalls);
+
+ if (HT == ScheduleHazardRecognizer::NoHazard)
+ break;
+
+ ++Stalls;
+ }
+ AdvanceToCycle(CurCycle + Stalls);
+}
+
+/// Record this SUnit in the HazardRecognizer.
+/// Does not update CurCycle.
+void ScheduleDAGRRList::EmitNode(SUnit *SU) {
+ if (!HazardRec->isEnabled())
+ return;
+
+ // Check for phys reg copy.
+ if (!SU->getNode())
+ return;
+
+ switch (SU->getNode()->getOpcode()) {
+ default:
+ assert(SU->getNode()->isMachineOpcode() &&
+ "This target-independent node should not be scheduled.");
+ break;
+ case ISD::MERGE_VALUES:
+ case ISD::TokenFactor:
+ case ISD::CopyToReg:
+ case ISD::CopyFromReg:
+ case ISD::EH_LABEL:
+ // Noops don't affect the scoreboard state. Copies are likely to be
+ // removed.
+ return;
+ case ISD::INLINEASM:
+ // For inline asm, clear the pipeline state.
+ HazardRec->Reset();
+ return;
+ }
+ if (isBottomUp && SU->isCall) {
+ // Calls are scheduled with their preceding instructions. For bottom-up
+ // scheduling, clear the pipeline state before emitting.
+ HazardRec->Reset();
+ }
+
+ HazardRec->EmitInstruction(SU);
+
+ if (!isBottomUp && SU->isCall) {
+ HazardRec->Reset();
+ }
+}
+
/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
-void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
+void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
@@ -278,36 +489,51 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
DEBUG(dbgs() << " Height [" << SU->getHeight() << "] pipeline stall!\n");
#endif
- // FIXME: Handle noop hazard.
+ // FIXME: Do not modify node height. It may interfere with
+ // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
+ // node it's ready cycle can aid heuristics, and after scheduling it can
+ // indicate the scheduled cycle.
SU->setHeightToAtLeast(CurCycle);
+
+ // Reserve resources for the scheduled intruction.
+ EmitNode(SU);
+
Sequence.push_back(SU);
AvailableQueue->ScheduledNode(SU);
- ReleasePredecessors(SU, CurCycle);
+ // Update liveness of predecessors before successors to avoid treating a
+ // two-address node as a live range def.
+ ReleasePredecessors(SU);
// Release all the implicit physical register defs that are live.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
- if (I->isAssignedRegDep()) {
- if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
- assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
- assert(LiveRegDefs[I->getReg()] == SU &&
- "Physical register dependency violated?");
- --NumLiveRegs;
- LiveRegDefs[I->getReg()] = NULL;
- LiveRegCycles[I->getReg()] = 0;
- }
+ // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
+ if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
+ assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
+ --NumLiveRegs;
+ LiveRegDefs[I->getReg()] = NULL;
+ LiveRegGens[I->getReg()] = NULL;
}
}
SU->isScheduled = true;
+
+ // Conditions under which the scheduler should eagerly advance the cycle:
+ // (1) No available instructions
+ // (2) All pipelines full, so available instructions must have hazards.
+ //
+ // If HazardRec is disabled, count each inst as one cycle.
+ if (!HazardRec->isEnabled() || HazardRec->atIssueLimit()
+ || AvailableQueue->empty())
+ AdvanceToCycle(CurCycle + 1);
}
/// CapturePred - This does the opposite of ReleasePred. Since SU is being
/// unscheduled, incrcease the succ left count of its predecessors. Remove
/// them from AvailableQueue if necessary.
-void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
+void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
SUnit *PredSU = PredEdge->getSUnit();
if (PredSU->isAvailable) {
PredSU->isAvailable = false;
@@ -328,59 +554,98 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
CapturePred(&*I);
- if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]){
+ if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
"Physical register dependency violated?");
--NumLiveRegs;
LiveRegDefs[I->getReg()] = NULL;
- LiveRegCycles[I->getReg()] = 0;
+ LiveRegGens[I->getReg()] = NULL;
}
}
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isAssignedRegDep()) {
+ // This becomes the nearest def. Note that an earlier def may still be
+ // pending if this is a two-address node.
+ LiveRegDefs[I->getReg()] = SU;
if (!LiveRegDefs[I->getReg()]) {
- LiveRegDefs[I->getReg()] = SU;
++NumLiveRegs;
}
- if (I->getSUnit()->getHeight() < LiveRegCycles[I->getReg()])
- LiveRegCycles[I->getReg()] = I->getSUnit()->getHeight();
+ if (LiveRegGens[I->getReg()] == NULL ||
+ I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
+ LiveRegGens[I->getReg()] = I->getSUnit();
}
}
+ if (SU->getHeight() < MinAvailableCycle)
+ MinAvailableCycle = SU->getHeight();
SU->setHeightDirty();
SU->isScheduled = false;
SU->isAvailable = true;
- AvailableQueue->push(SU);
+ if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
+ // Don't make available until backtracking is complete.
+ SU->isPending = true;
+ PendingQueue.push_back(SU);
+ }
+ else {
+ AvailableQueue->push(SU);
+ }
AvailableQueue->UnscheduledNode(SU);
}
+/// After backtracking, the hazard checker needs to be restored to a state
+/// corresponding the the current cycle.
+void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
+ HazardRec->Reset();
+
+ unsigned LookAhead = std::min((unsigned)Sequence.size(),
+ HazardRec->getMaxLookAhead());
+ if (LookAhead == 0)
+ return;
+
+ std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
+ unsigned HazardCycle = (*I)->getHeight();
+ for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
+ SUnit *SU = *I;
+ for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
+ HazardRec->RecedeCycle();
+ }
+ EmitNode(SU);
+ }
+}
+
/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
/// BTCycle in order to schedule a specific node.
-void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
- unsigned &CurCycle) {
- SUnit *OldSU = NULL;
- while (CurCycle > BtCycle) {
- OldSU = Sequence.back();
+void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
+ SUnit *OldSU = Sequence.back();
+ while (true) {
Sequence.pop_back();
if (SU->isSucc(OldSU))
// Don't try to remove SU from AvailableQueue.
SU->isAvailable = false;
+ // FIXME: use ready cycle instead of height
+ CurCycle = OldSU->getHeight();
UnscheduleNodeBottomUp(OldSU);
- --CurCycle;
AvailableQueue->setCurCycle(CurCycle);
+ if (OldSU == BtSU)
+ break;
+ OldSU = Sequence.back();
}
assert(!SU->isSucc(OldSU) && "Something is wrong!");
+ RestoreHazardCheckerBottomUp();
+
+ ReleasePending();
+
++NumBacktracks;
}
static bool isOperandOf(const SUnit *SU, SDNode *N) {
for (const SDNode *SUNode = SU->getNode(); SUNode;
- SUNode = SUNode->getFlaggedNode()) {
+ SUNode = SUNode->getGluedNode()) {
if (SUNode->isOperandOf(N))
return true;
}
@@ -390,18 +655,18 @@ static bool isOperandOf(const SUnit *SU, SDNode *N) {
/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
/// successors to the newly created node.
SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
- if (SU->getNode()->getFlaggedNode())
- return NULL;
-
SDNode *N = SU->getNode();
if (!N)
return NULL;
+ if (SU->getNode()->getGluedNode())
+ return NULL;
+
SUnit *NewSU;
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
- if (VT == MVT::Flag)
+ if (VT == MVT::Glue)
return NULL;
else if (VT == MVT::Other)
TryUnfold = true;
@@ -409,7 +674,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
const SDValue &Op = N->getOperand(i);
EVT VT = Op.getNode()->getValueType(Op.getResNo());
- if (VT == MVT::Flag)
+ if (VT == MVT::Glue)
return NULL;
}
@@ -441,13 +706,15 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
} else {
LoadSU = CreateNewSUnit(LoadNode);
LoadNode->setNodeId(LoadSU->NodeNum);
+
+ InitNumRegDefsLeft(LoadSU);
ComputeLatency(LoadSU);
}
SUnit *NewSU = CreateNewSUnit(N);
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NewSU->NodeNum);
-
+
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
@@ -457,6 +724,8 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
}
if (TID.isCommutable())
NewSU->isCommutable = true;
+
+ InitNumRegDefsLeft(NewSU);
ComputeLatency(NewSU);
// Record all the edges to and from the old SU, by category.
@@ -507,6 +776,10 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
RemovePred(SuccDep, D);
D.setSUnit(NewSU);
AddPred(SuccDep, D);
+ // Balance register pressure.
+ if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
+ && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
+ --NewSU->NumRegDefsLeft;
}
for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
SDep D = ChainSuccs[i];
@@ -517,7 +790,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
D.setSUnit(LoadSU);
AddPred(SuccDep, D);
}
- }
+ }
// Add a data dependency to reflect that NewSU reads the value defined
// by LoadSU.
@@ -633,52 +906,52 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
/// CheckForLiveRegDef - Return true and update live register vector if the
/// specified register def of the specified SUnit clobbers any "live" registers.
-static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
+static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
SmallVector<unsigned, 4> &LRegs,
const TargetRegisterInfo *TRI) {
- bool Added = false;
- if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != SU) {
- if (RegAdded.insert(Reg)) {
+ for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
+
+ // Check if Ref is live.
+ if (!LiveRegDefs[Reg]) continue;
+
+ // Allow multiple uses of the same def.
+ if (LiveRegDefs[Reg] == SU) continue;
+
+ // Add Reg to the set of interfering live regs.
+ if (RegAdded.insert(Reg))
LRegs.push_back(Reg);
- Added = true;
- }
}
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
- if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
- if (RegAdded.insert(*Alias)) {
- LRegs.push_back(*Alias);
- Added = true;
- }
- }
- return Added;
}
/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
/// scheduling of the given node to satisfy live physical register dependencies.
/// If the specific node is the last one that's available to schedule, do
/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
-bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
- SmallVector<unsigned, 4> &LRegs){
+bool ScheduleDAGRRList::
+DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
if (NumLiveRegs == 0)
return false;
SmallSet<unsigned, 4> RegAdded;
// If this node would clobber any "live" register, then it's not ready.
+ //
+ // If SU is the currently live definition of the same register that it uses,
+ // then we are free to schedule it.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
- if (I->isAssignedRegDep())
+ if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
RegAdded, LRegs, TRI);
}
- for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) {
+ for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
if (Node->getOpcode() == ISD::INLINEASM) {
// Inline asm can clobber physical defs.
unsigned NumOps = Node->getNumOperands();
- if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
- --NumOps; // Ignore the flag operand.
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
+ --NumOps; // Ignore the glue operand.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags =
@@ -708,17 +981,151 @@ bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
+
return !LRegs.empty();
}
+/// Return a node that can be scheduled in this cycle. Requirements:
+/// (1) Ready: latency has been satisfied
+/// (2) No Hazards: resources are available
+/// (3) No Interferences: may unschedule to break register interferences.
+SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
+ SmallVector<SUnit*, 4> Interferences;
+ DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
+
+ SUnit *CurSU = AvailableQueue->pop();
+ while (CurSU) {
+ SmallVector<unsigned, 4> LRegs;
+ if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
+ break;
+ LRegsMap.insert(std::make_pair(CurSU, LRegs));
+
+ CurSU->isPending = true; // This SU is not in AvailableQueue right now.
+ Interferences.push_back(CurSU);
+ CurSU = AvailableQueue->pop();
+ }
+ if (CurSU) {
+ // Add the nodes that aren't ready back onto the available list.
+ for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
+ Interferences[i]->isPending = false;
+ assert(Interferences[i]->isAvailable && "must still be available");
+ AvailableQueue->push(Interferences[i]);
+ }
+ return CurSU;
+ }
+
+ // All candidates are delayed due to live physical reg dependencies.
+ // Try backtracking, code duplication, or inserting cross class copies
+ // to resolve it.
+ for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
+ SUnit *TrySU = Interferences[i];
+ SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
+
+ // Try unscheduling up to the point where it's safe to schedule
+ // this node.
+ SUnit *BtSU = NULL;
+ unsigned LiveCycle = UINT_MAX;
+ for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
+ unsigned Reg = LRegs[j];
+ if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
+ BtSU = LiveRegGens[Reg];
+ LiveCycle = BtSU->getHeight();
+ }
+ }
+ if (!WillCreateCycle(TrySU, BtSU)) {
+ BacktrackBottomUp(TrySU, BtSU);
+
+ // Force the current node to be scheduled before the node that
+ // requires the physical reg dep.
+ if (BtSU->isAvailable) {
+ BtSU->isAvailable = false;
+ if (!BtSU->isPending)
+ AvailableQueue->remove(BtSU);
+ }
+ AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
+ /*Reg=*/0, /*isNormalMemory=*/false,
+ /*isMustAlias=*/false, /*isArtificial=*/true));
+
+ // If one or more successors has been unscheduled, then the current
+ // node is no longer avaialable. Schedule a successor that's now
+ // available instead.
+ if (!TrySU->isAvailable) {
+ CurSU = AvailableQueue->pop();
+ }
+ else {
+ CurSU = TrySU;
+ TrySU->isPending = false;
+ Interferences.erase(Interferences.begin()+i);
+ }
+ break;
+ }
+ }
+
+ if (!CurSU) {
+ // Can't backtrack. If it's too expensive to copy the value, then try
+ // duplicate the nodes that produces these "too expensive to copy"
+ // values to break the dependency. In case even that doesn't work,
+ // insert cross class copies.
+ // If it's not too expensive, i.e. cost != -1, issue copies.
+ SUnit *TrySU = Interferences[0];
+ SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
+ assert(LRegs.size() == 1 && "Can't handle this yet!");
+ unsigned Reg = LRegs[0];
+ SUnit *LRDef = LiveRegDefs[Reg];
+ EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
+ const TargetRegisterClass *RC =
+ TRI->getMinimalPhysRegClass(Reg, VT);
+ const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
+
+ // If cross copy register class is null, then it must be possible copy
+ // the value directly. Do not try duplicate the def.
+ SUnit *NewDef = 0;
+ if (DestRC)
+ NewDef = CopyAndMoveSuccessors(LRDef);
+ else
+ DestRC = RC;
+ if (!NewDef) {
+ // Issue copies, these can be expensive cross register class copies.
+ SmallVector<SUnit*, 2> Copies;
+ InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
+ DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
+ << " to SU #" << Copies.front()->NodeNum << "\n");
+ AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
+ /*Reg=*/0, /*isNormalMemory=*/false,
+ /*isMustAlias=*/false,
+ /*isArtificial=*/true));
+ NewDef = Copies.back();
+ }
+
+ DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
+ << " to SU #" << TrySU->NodeNum << "\n");
+ LiveRegDefs[Reg] = NewDef;
+ AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
+ /*Reg=*/0, /*isNormalMemory=*/false,
+ /*isMustAlias=*/false,
+ /*isArtificial=*/true));
+ TrySU->isAvailable = false;
+ CurSU = NewDef;
+ }
+
+ assert(CurSU && "Unable to resolve live physical register dependencies!");
+
+ // Add the nodes that aren't ready back onto the available list.
+ for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
+ Interferences[i]->isPending = false;
+ // May no longer be available due to backtracking.
+ if (Interferences[i]->isAvailable) {
+ AvailableQueue->push(Interferences[i]);
+ }
+ }
+ return CurSU;
+}
/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
/// schedulers.
void ScheduleDAGRRList::ListScheduleBottomUp() {
- unsigned CurCycle = 0;
-
// Release any predecessors of the special Exit node.
- ReleasePredecessors(&ExitSU, CurCycle);
+ ReleasePredecessors(&ExitSU);
// Add root to Available queue.
if (!SUnits.empty()) {
@@ -730,135 +1137,29 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
- SmallVector<SUnit*, 4> NotReady;
- DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
Sequence.reserve(SUnits.size());
while (!AvailableQueue->empty()) {
- bool Delayed = false;
- LRegsMap.clear();
- SUnit *CurSU = AvailableQueue->pop();
- while (CurSU) {
- SmallVector<unsigned, 4> LRegs;
- if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
- break;
- Delayed = true;
- LRegsMap.insert(std::make_pair(CurSU, LRegs));
+ DEBUG(dbgs() << "\n*** Examining Available\n";
+ AvailableQueue->dump(this));
- CurSU->isPending = true; // This SU is not in AvailableQueue right now.
- NotReady.push_back(CurSU);
- CurSU = AvailableQueue->pop();
- }
+ // Pick the best node to schedule taking all constraints into
+ // consideration.
+ SUnit *SU = PickNodeToScheduleBottomUp();
- // All candidates are delayed due to live physical reg dependencies.
- // Try backtracking, code duplication, or inserting cross class copies
- // to resolve it.
- if (Delayed && !CurSU) {
- for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
- SUnit *TrySU = NotReady[i];
- SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
-
- // Try unscheduling up to the point where it's safe to schedule
- // this node.
- unsigned LiveCycle = CurCycle;
- for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
- unsigned Reg = LRegs[j];
- unsigned LCycle = LiveRegCycles[Reg];
- LiveCycle = std::min(LiveCycle, LCycle);
- }
- SUnit *OldSU = Sequence[LiveCycle];
- if (!WillCreateCycle(TrySU, OldSU)) {
- BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
- // Force the current node to be scheduled before the node that
- // requires the physical reg dep.
- if (OldSU->isAvailable) {
- OldSU->isAvailable = false;
- AvailableQueue->remove(OldSU);
- }
- AddPred(TrySU, SDep(OldSU, SDep::Order, /*Latency=*/1,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false, /*isArtificial=*/true));
- // If one or more successors has been unscheduled, then the current
- // node is no longer avaialable. Schedule a successor that's now
- // available instead.
- if (!TrySU->isAvailable)
- CurSU = AvailableQueue->pop();
- else {
- CurSU = TrySU;
- TrySU->isPending = false;
- NotReady.erase(NotReady.begin()+i);
- }
- break;
- }
- }
+ AdvancePastStalls(SU);
- if (!CurSU) {
- // Can't backtrack. If it's too expensive to copy the value, then try
- // duplicate the nodes that produces these "too expensive to copy"
- // values to break the dependency. In case even that doesn't work,
- // insert cross class copies.
- // If it's not too expensive, i.e. cost != -1, issue copies.
- SUnit *TrySU = NotReady[0];
- SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
- assert(LRegs.size() == 1 && "Can't handle this yet!");
- unsigned Reg = LRegs[0];
- SUnit *LRDef = LiveRegDefs[Reg];
- EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
- const TargetRegisterClass *RC =
- TRI->getMinimalPhysRegClass(Reg, VT);
- const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
-
- // If cross copy register class is null, then it must be possible copy
- // the value directly. Do not try duplicate the def.
- SUnit *NewDef = 0;
- if (DestRC)
- NewDef = CopyAndMoveSuccessors(LRDef);
- else
- DestRC = RC;
- if (!NewDef) {
- // Issue copies, these can be expensive cross register class copies.
- SmallVector<SUnit*, 2> Copies;
- InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
- DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
- << " to SU #" << Copies.front()->NodeNum << "\n");
- AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false,
- /*isArtificial=*/true));
- NewDef = Copies.back();
- }
+ ScheduleNodeBottomUp(SU);
- DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
- << " to SU #" << TrySU->NodeNum << "\n");
- LiveRegDefs[Reg] = NewDef;
- AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false,
- /*isArtificial=*/true));
- TrySU->isAvailable = false;
- CurSU = NewDef;
- }
-
- assert(CurSU && "Unable to resolve live physical register dependencies!");
- }
-
- // Add the nodes that aren't ready back onto the available list.
- for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
- NotReady[i]->isPending = false;
- // May no longer be available due to backtracking.
- if (NotReady[i]->isAvailable)
- AvailableQueue->push(NotReady[i]);
+ while (AvailableQueue->empty() && !PendingQueue.empty()) {
+ // Advance the cycle to free resources. Skip ahead to the next ready SU.
+ assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
+ AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
}
- NotReady.clear();
-
- if (CurSU)
- ScheduleNodeBottomUp(CurSU, CurCycle);
- ++CurCycle;
- AvailableQueue->setCurCycle(CurCycle);
}
// Reverse the order if it is bottom up.
std::reverse(Sequence.begin(), Sequence.end());
-
+
#ifndef NDEBUG
VerifySchedule(isBottomUp);
#endif
@@ -905,7 +1206,7 @@ void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
-void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
+void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU) {
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
@@ -921,7 +1222,6 @@ void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
/// ListScheduleTopDown - The main loop of list scheduling for top-down
/// schedulers.
void ScheduleDAGRRList::ListScheduleTopDown() {
- unsigned CurCycle = 0;
AvailableQueue->setCurCycle(CurCycle);
// Release any successors of the special Entry node.
@@ -935,19 +1235,19 @@ void ScheduleDAGRRList::ListScheduleTopDown() {
SUnits[i].isAvailable = true;
}
}
-
+
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
Sequence.reserve(SUnits.size());
while (!AvailableQueue->empty()) {
SUnit *CurSU = AvailableQueue->pop();
-
+
if (CurSU)
- ScheduleNodeTopDown(CurSU, CurCycle);
+ ScheduleNodeTopDown(CurSU);
++CurCycle;
AvailableQueue->setCurCycle(CurCycle);
}
-
+
#ifndef NDEBUG
VerifySchedule(isBottomUp);
#endif
@@ -955,70 +1255,288 @@ void ScheduleDAGRRList::ListScheduleTopDown() {
//===----------------------------------------------------------------------===//
-// RegReductionPriorityQueue Implementation
+// RegReductionPriorityQueue Definition
//===----------------------------------------------------------------------===//
//
// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
// to reduce register pressure.
-//
+//
namespace {
- template<class SF>
- class RegReductionPriorityQueue;
-
- /// bu_ls_rr_sort - Priority function for bottom up register pressure
- // reduction scheduler.
- struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
- RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
- bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
- bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
-
- bool operator()(const SUnit* left, const SUnit* right) const;
+class RegReductionPQBase;
+
+struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
+};
+
+/// bu_ls_rr_sort - Priority function for bottom up register pressure
+// reduction scheduler.
+struct bu_ls_rr_sort : public queue_sort {
+ enum {
+ IsBottomUp = true,
+ HasReadyFilter = false
};
- // td_ls_rr_sort - Priority function for top down register pressure reduction
- // scheduler.
- struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
- RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
- td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
- td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
-
- bool operator()(const SUnit* left, const SUnit* right) const;
+ RegReductionPQBase *SPQ;
+ bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
+ bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
+
+ bool operator()(SUnit* left, SUnit* right) const;
+};
+
+// td_ls_rr_sort - Priority function for top down register pressure reduction
+// scheduler.
+struct td_ls_rr_sort : public queue_sort {
+ enum {
+ IsBottomUp = false,
+ HasReadyFilter = false
};
- // src_ls_rr_sort - Priority function for source order scheduler.
- struct src_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
- RegReductionPriorityQueue<src_ls_rr_sort> *SPQ;
- src_ls_rr_sort(RegReductionPriorityQueue<src_ls_rr_sort> *spq)
- : SPQ(spq) {}
- src_ls_rr_sort(const src_ls_rr_sort &RHS)
- : SPQ(RHS.SPQ) {}
-
- bool operator()(const SUnit* left, const SUnit* right) const;
+ RegReductionPQBase *SPQ;
+ td_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
+ td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
+
+ bool operator()(const SUnit* left, const SUnit* right) const;
+};
+
+// src_ls_rr_sort - Priority function for source order scheduler.
+struct src_ls_rr_sort : public queue_sort {
+ enum {
+ IsBottomUp = true,
+ HasReadyFilter = false
};
- // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
- struct hybrid_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
- RegReductionPriorityQueue<hybrid_ls_rr_sort> *SPQ;
- hybrid_ls_rr_sort(RegReductionPriorityQueue<hybrid_ls_rr_sort> *spq)
- : SPQ(spq) {}
- hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
- : SPQ(RHS.SPQ) {}
+ RegReductionPQBase *SPQ;
+ src_ls_rr_sort(RegReductionPQBase *spq)
+ : SPQ(spq) {}
+ src_ls_rr_sort(const src_ls_rr_sort &RHS)
+ : SPQ(RHS.SPQ) {}
+
+ bool operator()(SUnit* left, SUnit* right) const;
+};
- bool operator()(const SUnit* left, const SUnit* right) const;
+// hybrid_ls_rr_sort - Priority function for hybrid scheduler.
+struct hybrid_ls_rr_sort : public queue_sort {
+ enum {
+ IsBottomUp = true,
+ HasReadyFilter = true
};
- // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
- // scheduler.
- struct ilp_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
- RegReductionPriorityQueue<ilp_ls_rr_sort> *SPQ;
- ilp_ls_rr_sort(RegReductionPriorityQueue<ilp_ls_rr_sort> *spq)
- : SPQ(spq) {}
- ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
- : SPQ(RHS.SPQ) {}
+ RegReductionPQBase *SPQ;
+ hybrid_ls_rr_sort(RegReductionPQBase *spq)
+ : SPQ(spq) {}
+ hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
+ : SPQ(RHS.SPQ) {}
+
+ bool isReady(SUnit *SU, unsigned CurCycle) const;
- bool operator()(const SUnit* left, const SUnit* right) const;
+ bool operator()(SUnit* left, SUnit* right) const;
+};
+
+// ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
+// scheduler.
+struct ilp_ls_rr_sort : public queue_sort {
+ enum {
+ IsBottomUp = true,
+ HasReadyFilter = true
};
-} // end anonymous namespace
+
+ RegReductionPQBase *SPQ;
+ ilp_ls_rr_sort(RegReductionPQBase *spq)
+ : SPQ(spq) {}
+ ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
+ : SPQ(RHS.SPQ) {}
+
+ bool isReady(SUnit *SU, unsigned CurCycle) const;
+
+ bool operator()(SUnit* left, SUnit* right) const;
+};
+
+class RegReductionPQBase : public SchedulingPriorityQueue {
+protected:
+ std::vector<SUnit*> Queue;
+ unsigned CurQueueId;
+ bool TracksRegPressure;
+
+ // SUnits - The SUnits for the current graph.
+ std::vector<SUnit> *SUnits;
+
+ MachineFunction &MF;
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ const TargetLowering *TLI;
+ ScheduleDAGRRList *scheduleDAG;
+
+ // SethiUllmanNumbers - The SethiUllman number for each node.
+ std::vector<unsigned> SethiUllmanNumbers;
+
+ /// RegPressure - Tracking current reg pressure per register class.
+ ///
+ std::vector<unsigned> RegPressure;
+
+ /// RegLimit - Tracking the number of allocatable registers per register
+ /// class.
+ std::vector<unsigned> RegLimit;
+
+public:
+ RegReductionPQBase(MachineFunction &mf,
+ bool hasReadyFilter,
+ bool tracksrp,
+ const TargetInstrInfo *tii,
+ const TargetRegisterInfo *tri,
+ const TargetLowering *tli)
+ : SchedulingPriorityQueue(hasReadyFilter),
+ CurQueueId(0), TracksRegPressure(tracksrp),
+ MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
+ if (TracksRegPressure) {
+ unsigned NumRC = TRI->getNumRegClasses();
+ RegLimit.resize(NumRC);
+ RegPressure.resize(NumRC);
+ std::fill(RegLimit.begin(), RegLimit.end(), 0);
+ std::fill(RegPressure.begin(), RegPressure.end(), 0);
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I)
+ RegLimit[(*I)->getID()] = tli->getRegPressureLimit(*I, MF);
+ }
+ }
+
+ void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
+ scheduleDAG = scheduleDag;
+ }
+
+ ScheduleHazardRecognizer* getHazardRec() {
+ return scheduleDAG->getHazardRec();
+ }
+
+ void initNodes(std::vector<SUnit> &sunits);
+
+ void addNode(const SUnit *SU);
+
+ void updateNode(const SUnit *SU);
+
+ void releaseState() {
+ SUnits = 0;
+ SethiUllmanNumbers.clear();
+ std::fill(RegPressure.begin(), RegPressure.end(), 0);
+ }
+
+ unsigned getNodePriority(const SUnit *SU) const;
+
+ unsigned getNodeOrdering(const SUnit *SU) const {
+ return scheduleDAG->DAG->GetOrdering(SU->getNode());
+ }
+
+ bool empty() const { return Queue.empty(); }
+
+ void push(SUnit *U) {
+ assert(!U->NodeQueueId && "Node in the queue already");
+ U->NodeQueueId = ++CurQueueId;
+ Queue.push_back(U);
+ }
+
+ void remove(SUnit *SU) {
+ assert(!Queue.empty() && "Queue is empty!");
+ assert(SU->NodeQueueId != 0 && "Not in queue!");
+ std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
+ SU);
+ if (I != prior(Queue.end()))
+ std::swap(*I, Queue.back());
+ Queue.pop_back();
+ SU->NodeQueueId = 0;
+ }
+
+ bool tracksRegPressure() const { return TracksRegPressure; }
+
+ void dumpRegPressure() const;
+
+ bool HighRegPressure(const SUnit *SU) const;
+
+ bool MayReduceRegPressure(SUnit *SU);
+
+ void ScheduledNode(SUnit *SU);
+
+ void UnscheduledNode(SUnit *SU);
+
+protected:
+ bool canClobber(const SUnit *SU, const SUnit *Op);
+ void AddPseudoTwoAddrDeps();
+ void PrescheduleNodesWithMultipleUses();
+ void CalculateSethiUllmanNumbers();
+};
+
+template<class SF>
+class RegReductionPriorityQueue : public RegReductionPQBase {
+ static SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker) {
+ std::vector<SUnit *>::iterator Best = Q.begin();
+ for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
+ E = Q.end(); I != E; ++I)
+ if (Picker(*Best, *I))
+ Best = I;
+ SUnit *V = *Best;
+ if (Best != prior(Q.end()))
+ std::swap(*Best, Q.back());
+ Q.pop_back();
+ return V;
+ }
+
+ SF Picker;
+
+public:
+ RegReductionPriorityQueue(MachineFunction &mf,
+ bool tracksrp,
+ const TargetInstrInfo *tii,
+ const TargetRegisterInfo *tri,
+ const TargetLowering *tli)
+ : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
+ Picker(this) {}
+
+ bool isBottomUp() const { return SF::IsBottomUp; }
+
+ bool isReady(SUnit *U) const {
+ return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
+ }
+
+ SUnit *pop() {
+ if (Queue.empty()) return NULL;
+
+ SUnit *V = popFromQueue(Queue, Picker);
+ V->NodeQueueId = 0;
+ return V;
+ }
+
+ void dump(ScheduleDAG *DAG) const {
+ // Emulate pop() without clobbering NodeQueueIds.
+ std::vector<SUnit*> DumpQueue = Queue;
+ SF DumpPicker = Picker;
+ while (!DumpQueue.empty()) {
+ SUnit *SU = popFromQueue(DumpQueue, DumpPicker);
+ if (isBottomUp())
+ dbgs() << "Height " << SU->getHeight() << ": ";
+ else
+ dbgs() << "Depth " << SU->getDepth() << ": ";
+ SU->dump(DAG);
+ }
+ }
+};
+
+typedef RegReductionPriorityQueue<bu_ls_rr_sort>
+BURegReductionPriorityQueue;
+
+typedef RegReductionPriorityQueue<td_ls_rr_sort>
+TDRegReductionPriorityQueue;
+
+typedef RegReductionPriorityQueue<src_ls_rr_sort>
+SrcRegReductionPriorityQueue;
+
+typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
+HybridBURRPriorityQueue;
+
+typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
+ILPBURRPriorityQueue;
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Static Node Priority for Register Pressure Reduction
+//===----------------------------------------------------------------------===//
/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
/// Smaller number is the higher priority.
@@ -1045,413 +1563,283 @@ CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
if (SethiUllmanNumber == 0)
SethiUllmanNumber = 1;
-
+
return SethiUllmanNumber;
}
-namespace {
- template<class SF>
- class RegReductionPriorityQueue : public SchedulingPriorityQueue {
- std::vector<SUnit*> Queue;
- SF Picker;
- unsigned CurQueueId;
- bool TracksRegPressure;
-
- protected:
- // SUnits - The SUnits for the current graph.
- std::vector<SUnit> *SUnits;
-
- MachineFunction &MF;
- const TargetInstrInfo *TII;
- const TargetRegisterInfo *TRI;
- const TargetLowering *TLI;
- ScheduleDAGRRList *scheduleDAG;
-
- // SethiUllmanNumbers - The SethiUllman number for each node.
- std::vector<unsigned> SethiUllmanNumbers;
-
- /// RegPressure - Tracking current reg pressure per register class.
- ///
- std::vector<unsigned> RegPressure;
-
- /// RegLimit - Tracking the number of allocatable registers per register
- /// class.
- std::vector<unsigned> RegLimit;
-
- public:
- RegReductionPriorityQueue(MachineFunction &mf,
- bool tracksrp,
- const TargetInstrInfo *tii,
- const TargetRegisterInfo *tri,
- const TargetLowering *tli)
- : Picker(this), CurQueueId(0), TracksRegPressure(tracksrp),
- MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
- if (TracksRegPressure) {
- unsigned NumRC = TRI->getNumRegClasses();
- RegLimit.resize(NumRC);
- RegPressure.resize(NumRC);
- std::fill(RegLimit.begin(), RegLimit.end(), 0);
- std::fill(RegPressure.begin(), RegPressure.end(), 0);
- for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
- E = TRI->regclass_end(); I != E; ++I)
- RegLimit[(*I)->getID()] = tli->getRegPressureLimit(*I, MF);
- }
- }
-
- void initNodes(std::vector<SUnit> &sunits) {
- SUnits = &sunits;
- // Add pseudo dependency edges for two-address nodes.
- AddPseudoTwoAddrDeps();
- // Reroute edges to nodes with multiple uses.
- PrescheduleNodesWithMultipleUses();
- // Calculate node priorities.
- CalculateSethiUllmanNumbers();
- }
-
- void addNode(const SUnit *SU) {
- unsigned SUSize = SethiUllmanNumbers.size();
- if (SUnits->size() > SUSize)
- SethiUllmanNumbers.resize(SUSize*2, 0);
- CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
- }
-
- void updateNode(const SUnit *SU) {
- SethiUllmanNumbers[SU->NodeNum] = 0;
- CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
- }
+/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
+/// scheduling units.
+void RegReductionPQBase::CalculateSethiUllmanNumbers() {
+ SethiUllmanNumbers.assign(SUnits->size(), 0);
- void releaseState() {
- SUnits = 0;
- SethiUllmanNumbers.clear();
- std::fill(RegPressure.begin(), RegPressure.end(), 0);
- }
+ for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
+ CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
+}
- unsigned getNodePriority(const SUnit *SU) const {
- assert(SU->NodeNum < SethiUllmanNumbers.size());
- unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
- if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
- // CopyToReg should be close to its uses to facilitate coalescing and
- // avoid spilling.
- return 0;
- if (Opc == TargetOpcode::EXTRACT_SUBREG ||
- Opc == TargetOpcode::SUBREG_TO_REG ||
- Opc == TargetOpcode::INSERT_SUBREG)
- // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
- // close to their uses to facilitate coalescing.
- return 0;
- if (SU->NumSuccs == 0 && SU->NumPreds != 0)
- // If SU does not have a register use, i.e. it doesn't produce a value
- // that would be consumed (e.g. store), then it terminates a chain of
- // computation. Give it a large SethiUllman number so it will be
- // scheduled right before its predecessors that it doesn't lengthen
- // their live ranges.
- return 0xffff;
- if (SU->NumPreds == 0 && SU->NumSuccs != 0)
- // If SU does not have a register def, schedule it close to its uses
- // because it does not lengthen any live ranges.
- return 0;
- return SethiUllmanNumbers[SU->NodeNum];
- }
+void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
+ SUnits = &sunits;
+ // Add pseudo dependency edges for two-address nodes.
+ AddPseudoTwoAddrDeps();
+ // Reroute edges to nodes with multiple uses.
+ if (!TracksRegPressure)
+ PrescheduleNodesWithMultipleUses();
+ // Calculate node priorities.
+ CalculateSethiUllmanNumbers();
+}
- unsigned getNodeOrdering(const SUnit *SU) const {
- return scheduleDAG->DAG->GetOrdering(SU->getNode());
- }
+void RegReductionPQBase::addNode(const SUnit *SU) {
+ unsigned SUSize = SethiUllmanNumbers.size();
+ if (SUnits->size() > SUSize)
+ SethiUllmanNumbers.resize(SUSize*2, 0);
+ CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
+}
- bool empty() const { return Queue.empty(); }
-
- void push(SUnit *U) {
- assert(!U->NodeQueueId && "Node in the queue already");
- U->NodeQueueId = ++CurQueueId;
- Queue.push_back(U);
- }
+void RegReductionPQBase::updateNode(const SUnit *SU) {
+ SethiUllmanNumbers[SU->NodeNum] = 0;
+ CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
+}
- SUnit *pop() {
- if (empty()) return NULL;
- std::vector<SUnit *>::iterator Best = Queue.begin();
- for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
- E = Queue.end(); I != E; ++I)
- if (Picker(*Best, *I))
- Best = I;
- SUnit *V = *Best;
- if (Best != prior(Queue.end()))
- std::swap(*Best, Queue.back());
- Queue.pop_back();
- V->NodeQueueId = 0;
- return V;
- }
+// Lower priority means schedule further down. For bottom-up scheduling, lower
+// priority SUs are scheduled before higher priority SUs.
+unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
+ assert(SU->NodeNum < SethiUllmanNumbers.size());
+ unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
+ if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
+ // CopyToReg should be close to its uses to facilitate coalescing and
+ // avoid spilling.
+ return 0;
+ if (Opc == TargetOpcode::EXTRACT_SUBREG ||
+ Opc == TargetOpcode::SUBREG_TO_REG ||
+ Opc == TargetOpcode::INSERT_SUBREG)
+ // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
+ // close to their uses to facilitate coalescing.
+ return 0;
+ if (SU->NumSuccs == 0 && SU->NumPreds != 0)
+ // If SU does not have a register use, i.e. it doesn't produce a value
+ // that would be consumed (e.g. store), then it terminates a chain of
+ // computation. Give it a large SethiUllman number so it will be
+ // scheduled right before its predecessors that it doesn't lengthen
+ // their live ranges.
+ return 0xffff;
+ if (SU->NumPreds == 0 && SU->NumSuccs != 0)
+ // If SU does not have a register def, schedule it close to its uses
+ // because it does not lengthen any live ranges.
+ return 0;
+ return SethiUllmanNumbers[SU->NodeNum];
+}
- void remove(SUnit *SU) {
- assert(!Queue.empty() && "Queue is empty!");
- assert(SU->NodeQueueId != 0 && "Not in queue!");
- std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
- SU);
- if (I != prior(Queue.end()))
- std::swap(*I, Queue.back());
- Queue.pop_back();
- SU->NodeQueueId = 0;
- }
+//===----------------------------------------------------------------------===//
+// Register Pressure Tracking
+//===----------------------------------------------------------------------===//
- bool HighRegPressure(const SUnit *SU) const {
- if (!TLI)
- return false;
+void RegReductionPQBase::dumpRegPressure() const {
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I) {
+ const TargetRegisterClass *RC = *I;
+ unsigned Id = RC->getID();
+ unsigned RP = RegPressure[Id];
+ if (!RP) continue;
+ DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
+ << '\n');
+ }
+}
- for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl())
- continue;
- SUnit *PredSU = I->getSUnit();
- const SDNode *PN = PredSU->getNode();
- if (!PN->isMachineOpcode()) {
- if (PN->getOpcode() == ISD::CopyFromReg) {
- EVT VT = PN->getValueType(0);
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- unsigned Cost = TLI->getRepRegClassCostFor(VT);
- if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
- return true;
- }
- continue;
- }
- unsigned POpc = PN->getMachineOpcode();
- if (POpc == TargetOpcode::IMPLICIT_DEF)
- continue;
- if (POpc == TargetOpcode::EXTRACT_SUBREG) {
- EVT VT = PN->getOperand(0).getValueType();
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- unsigned Cost = TLI->getRepRegClassCostFor(VT);
- // Check if this increases register pressure of the specific register
- // class to the point where it would cause spills.
- if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
- return true;
- continue;
- } else if (POpc == TargetOpcode::INSERT_SUBREG ||
- POpc == TargetOpcode::SUBREG_TO_REG) {
- EVT VT = PN->getValueType(0);
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- unsigned Cost = TLI->getRepRegClassCostFor(VT);
- // Check if this increases register pressure of the specific register
- // class to the point where it would cause spills.
- if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
- return true;
- continue;
- }
- unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
- for (unsigned i = 0; i != NumDefs; ++i) {
- EVT VT = PN->getValueType(i);
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- if (RegPressure[RCId] >= RegLimit[RCId])
- return true; // Reg pressure already high.
- unsigned Cost = TLI->getRepRegClassCostFor(VT);
- if (!PN->hasAnyUseOfValue(i))
- continue;
- // Check if this increases register pressure of the specific register
- // class to the point where it would cause spills.
- if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
- return true;
- }
- }
+bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
+ if (!TLI)
+ return false;
- return false;
+ for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+ SUnit *PredSU = I->getSUnit();
+ // NumRegDefsLeft is zero when enough uses of this node have been scheduled
+ // to cover the number of registers defined (they are all live).
+ if (PredSU->NumRegDefsLeft == 0) {
+ continue;
+ }
+ for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
+ RegDefPos.IsValid(); RegDefPos.Advance()) {
+ EVT VT = RegDefPos.GetValue();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned Cost = TLI->getRepRegClassCostFor(VT);
+ if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
+ return true;
}
+ }
+ return false;
+}
- void ScheduledNode(SUnit *SU) {
- if (!TracksRegPressure)
- return;
-
- const SDNode *N = SU->getNode();
- if (!N->isMachineOpcode()) {
- if (N->getOpcode() != ISD::CopyToReg)
- return;
- } else {
- unsigned Opc = N->getMachineOpcode();
- if (Opc == TargetOpcode::EXTRACT_SUBREG ||
- Opc == TargetOpcode::INSERT_SUBREG ||
- Opc == TargetOpcode::SUBREG_TO_REG ||
- Opc == TargetOpcode::REG_SEQUENCE ||
- Opc == TargetOpcode::IMPLICIT_DEF)
- return;
- }
+bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) {
+ const SDNode *N = SU->getNode();
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl())
- continue;
- SUnit *PredSU = I->getSUnit();
- if (PredSU->NumSuccsLeft != PredSU->NumSuccs)
- continue;
- const SDNode *PN = PredSU->getNode();
- if (!PN->isMachineOpcode()) {
- if (PN->getOpcode() == ISD::CopyFromReg) {
- EVT VT = PN->getValueType(0);
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
- }
- continue;
- }
- unsigned POpc = PN->getMachineOpcode();
- if (POpc == TargetOpcode::IMPLICIT_DEF)
- continue;
- if (POpc == TargetOpcode::EXTRACT_SUBREG) {
- EVT VT = PN->getOperand(0).getValueType();
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
- continue;
- } else if (POpc == TargetOpcode::INSERT_SUBREG ||
- POpc == TargetOpcode::SUBREG_TO_REG) {
- EVT VT = PN->getValueType(0);
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
- continue;
- }
- unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
- for (unsigned i = 0; i != NumDefs; ++i) {
- EVT VT = PN->getValueType(i);
- if (!PN->hasAnyUseOfValue(i))
- continue;
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
- }
- }
+ if (!N->isMachineOpcode() || !SU->NumSuccs)
+ return false;
- // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
- // may transfer data dependencies to CopyToReg.
- if (SU->NumSuccs && N->isMachineOpcode()) {
- unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
- for (unsigned i = 0; i != NumDefs; ++i) {
- EVT VT = N->getValueType(i);
- if (!N->hasAnyUseOfValue(i))
- continue;
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
- // Register pressure tracking is imprecise. This can happen.
- RegPressure[RCId] = 0;
- else
- RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
- }
- }
+ unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = N->getValueType(i);
+ if (!N->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (RegPressure[RCId] >= RegLimit[RCId])
+ return true;
+ }
+ return false;
+}
+
+void RegReductionPQBase::ScheduledNode(SUnit *SU) {
+ if (!TracksRegPressure)
+ return;
- dumpRegPressure();
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+ SUnit *PredSU = I->getSUnit();
+ // NumRegDefsLeft is zero when enough uses of this node have been scheduled
+ // to cover the number of registers defined (they are all live).
+ if (PredSU->NumRegDefsLeft == 0) {
+ continue;
+ }
+ // FIXME: The ScheduleDAG currently loses information about which of a
+ // node's values is consumed by each dependence. Consequently, if the node
+ // defines multiple register classes, we don't know which to pressurize
+ // here. Instead the following loop consumes the register defs in an
+ // arbitrary order. At least it handles the common case of clustered loads
+ // to the same class. For precise liveness, each SDep needs to indicate the
+ // result number. But that tightly couples the ScheduleDAG with the
+ // SelectionDAG making updates tricky. A simpler hack would be to attach a
+ // value type or register class to SDep.
+ //
+ // The most important aspect of register tracking is balancing the increase
+ // here with the reduction further below. Note that this SU may use multiple
+ // defs in PredSU. The can't be determined here, but we've already
+ // compensated by reducing NumRegDefsLeft in PredSU during
+ // ScheduleDAGSDNodes::AddSchedEdges.
+ --PredSU->NumRegDefsLeft;
+ unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
+ for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
+ RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
+ if (SkipRegDefs)
+ continue;
+ EVT VT = RegDefPos.GetValue();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ break;
}
+ }
- void UnscheduledNode(SUnit *SU) {
- if (!TracksRegPressure)
- return;
-
- const SDNode *N = SU->getNode();
- if (!N->isMachineOpcode()) {
- if (N->getOpcode() != ISD::CopyToReg)
- return;
- } else {
- unsigned Opc = N->getMachineOpcode();
- if (Opc == TargetOpcode::EXTRACT_SUBREG ||
- Opc == TargetOpcode::INSERT_SUBREG ||
- Opc == TargetOpcode::SUBREG_TO_REG ||
- Opc == TargetOpcode::REG_SEQUENCE ||
- Opc == TargetOpcode::IMPLICIT_DEF)
- return;
- }
+ // We should have this assert, but there may be dead SDNodes that never
+ // materialize as SUnits, so they don't appear to generate liveness.
+ //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
+ int SkipRegDefs = (int)SU->NumRegDefsLeft;
+ for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
+ RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
+ if (SkipRegDefs > 0)
+ continue;
+ EVT VT = RegDefPos.GetValue();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT)) {
+ // Register pressure tracking is imprecise. This can happen. But we try
+ // hard not to let it happen because it likely results in poor scheduling.
+ DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
+ RegPressure[RCId] = 0;
+ }
+ else {
+ RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
+ }
+ }
+ dumpRegPressure();
+}
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl())
- continue;
- SUnit *PredSU = I->getSUnit();
- if (PredSU->NumSuccsLeft != PredSU->NumSuccs)
- continue;
- const SDNode *PN = PredSU->getNode();
- if (!PN->isMachineOpcode()) {
- if (PN->getOpcode() == ISD::CopyFromReg) {
- EVT VT = PN->getValueType(0);
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
- }
- continue;
- }
- unsigned POpc = PN->getMachineOpcode();
- if (POpc == TargetOpcode::IMPLICIT_DEF)
- continue;
- if (POpc == TargetOpcode::EXTRACT_SUBREG) {
- EVT VT = PN->getOperand(0).getValueType();
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
- continue;
- } else if (POpc == TargetOpcode::INSERT_SUBREG ||
- POpc == TargetOpcode::SUBREG_TO_REG) {
- EVT VT = PN->getValueType(0);
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
- continue;
- }
- unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
- for (unsigned i = 0; i != NumDefs; ++i) {
- EVT VT = PN->getValueType(i);
- if (!PN->hasAnyUseOfValue(i))
- continue;
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
- // Register pressure tracking is imprecise. This can happen.
- RegPressure[RCId] = 0;
- else
- RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
- }
- }
+void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
+ if (!TracksRegPressure)
+ return;
+
+ const SDNode *N = SU->getNode();
+ if (!N->isMachineOpcode()) {
+ if (N->getOpcode() != ISD::CopyToReg)
+ return;
+ } else {
+ unsigned Opc = N->getMachineOpcode();
+ if (Opc == TargetOpcode::EXTRACT_SUBREG ||
+ Opc == TargetOpcode::INSERT_SUBREG ||
+ Opc == TargetOpcode::SUBREG_TO_REG ||
+ Opc == TargetOpcode::REG_SEQUENCE ||
+ Opc == TargetOpcode::IMPLICIT_DEF)
+ return;
+ }
- // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
- // may transfer data dependencies to CopyToReg.
- if (SU->NumSuccs && N->isMachineOpcode()) {
- unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
- for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
- EVT VT = N->getValueType(i);
- if (VT == MVT::Flag || VT == MVT::Other)
- continue;
- if (!N->hasAnyUseOfValue(i))
- continue;
- unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
- RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
- }
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+ SUnit *PredSU = I->getSUnit();
+ // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
+ // counts data deps.
+ if (PredSU->NumSuccsLeft != PredSU->Succs.size())
+ continue;
+ const SDNode *PN = PredSU->getNode();
+ if (!PN->isMachineOpcode()) {
+ if (PN->getOpcode() == ISD::CopyFromReg) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
}
-
- dumpRegPressure();
+ continue;
}
-
- void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
- scheduleDAG = scheduleDag;
+ unsigned POpc = PN->getMachineOpcode();
+ if (POpc == TargetOpcode::IMPLICIT_DEF)
+ continue;
+ if (POpc == TargetOpcode::EXTRACT_SUBREG) {
+ EVT VT = PN->getOperand(0).getValueType();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
+ } else if (POpc == TargetOpcode::INSERT_SUBREG ||
+ POpc == TargetOpcode::SUBREG_TO_REG) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
}
-
- void dumpRegPressure() const {
- for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
- E = TRI->regclass_end(); I != E; ++I) {
- const TargetRegisterClass *RC = *I;
- unsigned Id = RC->getID();
- unsigned RP = RegPressure[Id];
- if (!RP) continue;
- DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
- << '\n');
- }
+ unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = PN->getValueType(i);
+ if (!PN->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
+ // Register pressure tracking is imprecise. This can happen.
+ RegPressure[RCId] = 0;
+ else
+ RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
}
+ }
- protected:
- bool canClobber(const SUnit *SU, const SUnit *Op);
- void AddPseudoTwoAddrDeps();
- void PrescheduleNodesWithMultipleUses();
- void CalculateSethiUllmanNumbers();
- };
-
- typedef RegReductionPriorityQueue<bu_ls_rr_sort>
- BURegReductionPriorityQueue;
-
- typedef RegReductionPriorityQueue<td_ls_rr_sort>
- TDRegReductionPriorityQueue;
-
- typedef RegReductionPriorityQueue<src_ls_rr_sort>
- SrcRegReductionPriorityQueue;
-
- typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
- HybridBURRPriorityQueue;
+ // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
+ // may transfer data dependencies to CopyToReg.
+ if (SU->NumSuccs && N->isMachineOpcode()) {
+ unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
+ for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
+ EVT VT = N->getValueType(i);
+ if (VT == MVT::Glue || VT == MVT::Other)
+ continue;
+ if (!N->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ }
+ }
- typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
- ILPBURRPriorityQueue;
+ dumpRegPressure();
}
+//===----------------------------------------------------------------------===//
+// Dynamic Node Priority for Register Pressure Reduction
+//===----------------------------------------------------------------------===//
+
/// closestSucc - Returns the scheduled cycle of the successor which is
/// closest to the current cycle.
static unsigned closestSucc(const SUnit *SU) {
@@ -1483,9 +1871,123 @@ static unsigned calcMaxScratches(const SUnit *SU) {
return Scratches;
}
-template <typename RRSort>
-static bool BURRSort(const SUnit *left, const SUnit *right,
- const RegReductionPriorityQueue<RRSort> *SPQ) {
+/// hasOnlyLiveOutUse - Return true if SU has a single value successor that is a
+/// CopyToReg to a virtual register. This SU def is probably a liveout and
+/// it has no other use. It should be scheduled closer to the terminator.
+static bool hasOnlyLiveOutUses(const SUnit *SU) {
+ bool RetVal = false;
+ for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ if (I->isCtrl()) continue;
+ const SUnit *SuccSU = I->getSUnit();
+ if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
+ unsigned Reg =
+ cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ RetVal = true;
+ continue;
+ }
+ }
+ return false;
+ }
+ return RetVal;
+}
+
+/// UnitsSharePred - Return true if the two scheduling units share a common
+/// data predecessor.
+static bool UnitsSharePred(const SUnit *left, const SUnit *right) {
+ SmallSet<const SUnit*, 4> Preds;
+ for (SUnit::const_pred_iterator I = left->Preds.begin(),E = left->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl()) continue; // ignore chain preds
+ Preds.insert(I->getSUnit());
+ }
+ for (SUnit::const_pred_iterator I = right->Preds.begin(),E = right->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl()) continue; // ignore chain preds
+ if (Preds.count(I->getSUnit()))
+ return true;
+ }
+ return false;
+}
+
+// Check for either a dependence (latency) or resource (hazard) stall.
+//
+// Note: The ScheduleHazardRecognizer interface requires a non-const SU.
+static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
+ if ((int)SPQ->getCurCycle() < Height) return true;
+ if (SPQ->getHazardRec()->getHazardType(SU, 0)
+ != ScheduleHazardRecognizer::NoHazard)
+ return true;
+ return false;
+}
+
+// Return -1 if left has higher priority, 1 if right has higher priority.
+// Return 0 if latency-based priority is equivalent.
+static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
+ RegReductionPQBase *SPQ) {
+ // If the two nodes share an operand and one of them has a single
+ // use that is a live out copy, favor the one that is live out. Otherwise
+ // it will be difficult to eliminate the copy if the instruction is a
+ // loop induction variable update. e.g.
+ // BB:
+ // sub r1, r3, #1
+ // str r0, [r2, r3]
+ // mov r3, r1
+ // cmp
+ // bne BB
+ bool SharePred = UnitsSharePred(left, right);
+ // FIXME: Only adjust if BB is a loop back edge.
+ // FIXME: What's the cost of a copy?
+ int LBonus = (SharePred && hasOnlyLiveOutUses(left)) ? 1 : 0;
+ int RBonus = (SharePred && hasOnlyLiveOutUses(right)) ? 1 : 0;
+ int LHeight = (int)left->getHeight() - LBonus;
+ int RHeight = (int)right->getHeight() - RBonus;
+
+ bool LStall = (!checkPref || left->SchedulingPref == Sched::Latency) &&
+ BUHasStall(left, LHeight, SPQ);
+ bool RStall = (!checkPref || right->SchedulingPref == Sched::Latency) &&
+ BUHasStall(right, RHeight, SPQ);
+
+ // If scheduling one of the node will cause a pipeline stall, delay it.
+ // If scheduling either one of the node will cause a pipeline stall, sort
+ // them according to their height.
+ if (LStall) {
+ if (!RStall)
+ return 1;
+ if (LHeight != RHeight)
+ return LHeight > RHeight ? 1 : -1;
+ } else if (RStall)
+ return -1;
+
+ // If either node is scheduling for latency, sort them by height/depth
+ // and latency.
+ if (!checkPref || (left->SchedulingPref == Sched::Latency ||
+ right->SchedulingPref == Sched::Latency)) {
+ if (DisableSchedCycles) {
+ if (LHeight != RHeight)
+ return LHeight > RHeight ? 1 : -1;
+ }
+ else {
+ // If neither instruction stalls (!LStall && !RStall) then
+ // it's height is already covered so only its depth matters. We also reach
+ // this if both stall but have the same height.
+ unsigned LDepth = left->getDepth();
+ unsigned RDepth = right->getDepth();
+ if (LDepth != RDepth) {
+ DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
+ << ") depth " << LDepth << " vs SU (" << right->NodeNum
+ << ") depth " << RDepth << "\n");
+ return LDepth < RDepth ? 1 : -1;
+ }
+ }
+ if (left->Latency != right->Latency)
+ return left->Latency > right->Latency ? 1 : -1;
+ }
+ return 0;
+}
+
+static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
unsigned LPriority = SPQ->getNodePriority(left);
unsigned RPriority = SPQ->getNodePriority(right);
if (LPriority != RPriority)
@@ -1519,24 +2021,31 @@ static bool BURRSort(const SUnit *left, const SUnit *right,
if (LScratch != RScratch)
return LScratch > RScratch;
- if (left->getHeight() != right->getHeight())
- return left->getHeight() > right->getHeight();
-
- if (left->getDepth() != right->getDepth())
- return left->getDepth() < right->getDepth();
+ if (!DisableSchedCycles) {
+ int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
+ if (result != 0)
+ return result > 0;
+ }
+ else {
+ if (left->getHeight() != right->getHeight())
+ return left->getHeight() > right->getHeight();
- assert(left->NodeQueueId && right->NodeQueueId &&
+ if (left->getDepth() != right->getDepth())
+ return left->getDepth() < right->getDepth();
+ }
+
+ assert(left->NodeQueueId && right->NodeQueueId &&
"NodeQueueId cannot be zero");
return (left->NodeQueueId > right->NodeQueueId);
}
// Bottom up
-bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
+bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
return BURRSort(left, right, SPQ);
}
// Source order, otherwise bottom up.
-bool src_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
+bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
unsigned LOrder = SPQ->getNodeOrdering(left);
unsigned ROrder = SPQ->getNodeOrdering(right);
@@ -1548,49 +2057,69 @@ bool src_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
return BURRSort(left, right, SPQ);
}
-bool hybrid_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const{
+// If the time between now and when the instruction will be ready can cover
+// the spill code, then avoid adding it to the ready queue. This gives long
+// stalls highest priority and allows hoisting across calls. It should also
+// speed up processing the available queue.
+bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
+ static const unsigned ReadyDelay = 3;
+
+ if (SPQ->MayReduceRegPressure(SU)) return true;
+
+ if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
+
+ if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
+ != ScheduleHazardRecognizer::NoHazard)
+ return false;
+
+ return true;
+}
+
+// Return true if right should be scheduled with higher priority than left.
+bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
+ if (left->isCall || right->isCall)
+ // No way to compute latency of calls.
+ return BURRSort(left, right, SPQ);
+
bool LHigh = SPQ->HighRegPressure(left);
bool RHigh = SPQ->HighRegPressure(right);
// Avoid causing spills. If register pressure is high, schedule for
// register pressure reduction.
- if (LHigh && !RHigh)
+ if (LHigh && !RHigh) {
+ DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
+ << right->NodeNum << ")\n");
return true;
- else if (!LHigh && RHigh)
+ }
+ else if (!LHigh && RHigh) {
+ DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
+ << left->NodeNum << ")\n");
return false;
+ }
else if (!LHigh && !RHigh) {
- // Low register pressure situation, schedule for latency if possible.
- bool LStall = left->SchedulingPref == Sched::Latency &&
- SPQ->getCurCycle() < left->getHeight();
- bool RStall = right->SchedulingPref == Sched::Latency &&
- SPQ->getCurCycle() < right->getHeight();
- // If scheduling one of the node will cause a pipeline stall, delay it.
- // If scheduling either one of the node will cause a pipeline stall, sort
- // them according to their height.
- // If neither will cause a pipeline stall, try to reduce register pressure.
- if (LStall) {
- if (!RStall)
- return true;
- if (left->getHeight() != right->getHeight())
- return left->getHeight() > right->getHeight();
- } else if (RStall)
- return false;
-
- // If either node is scheduling for latency, sort them by height and latency
- // first.
- if (left->SchedulingPref == Sched::Latency ||
- right->SchedulingPref == Sched::Latency) {
- if (left->getHeight() != right->getHeight())
- return left->getHeight() > right->getHeight();
- if (left->Latency != right->Latency)
- return left->Latency > right->Latency;
- }
+ int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
+ if (result != 0)
+ return result > 0;
}
-
return BURRSort(left, right, SPQ);
}
-bool ilp_ls_rr_sort::operator()(const SUnit *left,
- const SUnit *right) const {
+// Schedule as many instructions in each cycle as possible. So don't make an
+// instruction available unless it is ready in the current cycle.
+bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
+ if (SU->getHeight() > CurCycle) return false;
+
+ if (SPQ->getHazardRec()->getHazardType(SU, 0)
+ != ScheduleHazardRecognizer::NoHazard)
+ return false;
+
+ return SU->getHeight() <= CurCycle;
+}
+
+bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
+ if (left->isCall || right->isCall)
+ // No way to compute latency of calls.
+ return BURRSort(left, right, SPQ);
+
bool LHigh = SPQ->HighRegPressure(left);
bool RHigh = SPQ->HighRegPressure(right);
// Avoid causing spills. If register pressure is high, schedule for
@@ -1611,9 +2140,11 @@ bool ilp_ls_rr_sort::operator()(const SUnit *left,
return BURRSort(left, right, SPQ);
}
-template<class SF>
-bool
-RegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
+//===----------------------------------------------------------------------===//
+// Preschedule for Register Pressure
+//===----------------------------------------------------------------------===//
+
+bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
if (SU->isTwoAddress) {
unsigned Opc = SU->getNode()->getMachineOpcode();
const TargetInstrDesc &TID = TII->get(Opc);
@@ -1631,19 +2162,6 @@ RegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
return false;
}
-/// hasCopyToRegUse - Return true if SU has a value successor that is a
-/// CopyToReg node.
-static bool hasCopyToRegUse(const SUnit *SU) {
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isCtrl()) continue;
- const SUnit *SuccSU = I->getSUnit();
- if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg)
- return true;
- }
- return false;
-}
-
/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
/// physical register defs.
static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
@@ -1654,7 +2172,7 @@ static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
assert(ImpDefs && "Caller should check hasPhysRegDefs");
for (const SDNode *SUNode = SU->getNode(); SUNode;
- SUNode = SUNode->getFlaggedNode()) {
+ SUNode = SUNode->getGluedNode()) {
if (!SUNode->isMachineOpcode())
continue;
const unsigned *SUImpDefs =
@@ -1663,7 +2181,7 @@ static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
return false;
for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
- if (VT == MVT::Flag || VT == MVT::Other)
+ if (VT == MVT::Glue || VT == MVT::Other)
continue;
if (!N->hasAnyUseOfValue(i))
continue;
@@ -1709,8 +2227,7 @@ static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
/// after N, which shortens the U->N live range, reducing
/// register pressure.
///
-template<class SF>
-void RegReductionPriorityQueue<SF>::PrescheduleNodesWithMultipleUses() {
+void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
// Visit all the nodes in topological order, working top-down.
for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
SUnit *SU = &(*SUnits)[i];
@@ -1748,7 +2265,7 @@ void RegReductionPriorityQueue<SF>::PrescheduleNodesWithMultipleUses() {
if (PredSU->NumSuccs == 1)
continue;
// Avoid prescheduling to copies from virtual registers, which don't behave
- // like other nodes from the perspective of scheduling // heuristics.
+ // like other nodes from the perspective of scheduling heuristics.
if (SDNode *N = SU->getNode())
if (N->getOpcode() == ISD::CopyFromReg &&
TargetRegisterInfo::isVirtualRegister
@@ -1802,17 +2319,17 @@ void RegReductionPriorityQueue<SF>::PrescheduleNodesWithMultipleUses() {
/// one that has a CopyToReg use (more likely to be a loop induction update).
/// If both are two-address, but one is commutable while the other is not
/// commutable, favor the one that's not commutable.
-template<class SF>
-void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
+void RegReductionPQBase::AddPseudoTwoAddrDeps() {
for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
SUnit *SU = &(*SUnits)[i];
if (!SU->isTwoAddress)
continue;
SDNode *Node = SU->getNode();
- if (!Node || !Node->isMachineOpcode() || SU->getNode()->getFlaggedNode())
+ if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
continue;
+ bool isLiveOut = hasOnlyLiveOutUses(SU);
unsigned Opc = Node->getMachineOpcode();
const TargetInstrDesc &TID = TII->get(Opc);
unsigned NumRes = TID.getNumDefs();
@@ -1862,7 +2379,7 @@ void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
SuccOpc == TargetOpcode::SUBREG_TO_REG)
continue;
if ((!canClobber(SuccSU, DUSU) ||
- (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
+ (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
(!SU->isCommutable && SuccSU->isCommutable)) &&
!scheduleDAG->IsReachable(SuccSU, SU)) {
DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
@@ -1877,20 +2394,10 @@ void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
}
}
-/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
-/// scheduling units.
-template<class SF>
-void RegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
- SethiUllmanNumbers.assign(SUnits->size(), 0);
-
- for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
- CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
-}
-
/// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
/// predecessors of the successors of the SUnit SU. Stop when the provided
/// limit is exceeded.
-static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
+static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
unsigned Limit) {
unsigned Sum = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
@@ -1942,7 +2449,7 @@ bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
if (left->NumSuccsLeft != right->NumSuccsLeft)
return left->NumSuccsLeft > right->NumSuccsLeft;
- assert(left->NodeQueueId && right->NodeQueueId &&
+ assert(left->NodeQueueId && right->NodeQueueId &&
"NodeQueueId cannot be zero");
return (left->NodeQueueId > right->NodeQueueId);
}
@@ -1952,68 +2459,74 @@ bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
//===----------------------------------------------------------------------===//
llvm::ScheduleDAGSDNodes *
-llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel) {
const TargetMachine &TM = IS->TM;
const TargetInstrInfo *TII = TM.getInstrInfo();
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
-
+
BURegReductionPriorityQueue *PQ =
new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
- ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, false, PQ);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
PQ->setScheduleDAG(SD);
- return SD;
+ return SD;
}
llvm::ScheduleDAGSDNodes *
-llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel) {
const TargetMachine &TM = IS->TM;
const TargetInstrInfo *TII = TM.getInstrInfo();
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
-
+
TDRegReductionPriorityQueue *PQ =
new TDRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
- ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, false, PQ);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
}
llvm::ScheduleDAGSDNodes *
-llvm::createSourceListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel) {
const TargetMachine &TM = IS->TM;
const TargetInstrInfo *TII = TM.getInstrInfo();
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
-
+
SrcRegReductionPriorityQueue *PQ =
new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
- ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, false, PQ);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
PQ->setScheduleDAG(SD);
- return SD;
+ return SD;
}
llvm::ScheduleDAGSDNodes *
-llvm::createHybridListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel) {
const TargetMachine &TM = IS->TM;
const TargetInstrInfo *TII = TM.getInstrInfo();
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
const TargetLowering *TLI = &IS->getTargetLowering();
-
+
HybridBURRPriorityQueue *PQ =
new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
- ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, true, PQ);
+
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
PQ->setScheduleDAG(SD);
- return SD;
+ return SD;
}
llvm::ScheduleDAGSDNodes *
-llvm::createILPListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel) {
const TargetMachine &TM = IS->TM;
const TargetInstrInfo *TII = TM.getInstrInfo();
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
const TargetLowering *TLI = &IS->getTargetLowering();
-
+
ILPBURRPriorityQueue *PQ =
new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
- ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, true, PQ);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
PQ->setScheduleDAG(SD);
- return SD;
+ return SD;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index f1bf82a..477c1ff 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -34,8 +34,8 @@ using namespace llvm;
STATISTIC(LoadsClustered, "Number of loads clustered together");
ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
- : ScheduleDAG(mf) {
-}
+ : ScheduleDAG(mf),
+ InstrItins(mf.getTarget().getInstrItineraryData()) {}
/// Run - perform scheduling.
///
@@ -72,6 +72,7 @@ SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
SUnit *SU = NewSUnit(Old->getNode());
SU->OrigNode = Old->OrigNode;
SU->Latency = Old->Latency;
+ SU->isCall = Old->isCall;
SU->isTwoAddress = Old->isTwoAddress;
SU->isCommutable = Old->isCommutable;
SU->hasPhysRegDefs = Old->hasPhysRegDefs;
@@ -85,7 +86,7 @@ SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
/// a specified operand is a physical register dependency. If so, returns the
/// register and the cost of copying the register.
static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
- const TargetRegisterInfo *TRI,
+ const TargetRegisterInfo *TRI,
const TargetInstrInfo *TII,
unsigned &PhysReg, int &Cost) {
if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
@@ -108,29 +109,28 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
}
}
-static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
- SelectionDAG *DAG) {
+static void AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) {
SmallVector<EVT, 4> VTs;
- SDNode *FlagDestNode = Flag.getNode();
+ SDNode *GlueDestNode = Glue.getNode();
- // Don't add a flag from a node to itself.
- if (FlagDestNode == N) return;
+ // Don't add glue from a node to itself.
+ if (GlueDestNode == N) return;
- // Don't add a flag to something which already has a flag.
- if (N->getValueType(N->getNumValues() - 1) == MVT::Flag) return;
+ // Don't add glue to something which already has glue.
+ if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return;
for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
VTs.push_back(N->getValueType(I));
- if (AddFlag)
- VTs.push_back(MVT::Flag);
+ if (AddGlue)
+ VTs.push_back(MVT::Glue);
SmallVector<SDValue, 4> Ops;
for (unsigned I = 0, E = N->getNumOperands(); I != E; ++I)
Ops.push_back(N->getOperand(I));
- if (FlagDestNode)
- Ops.push_back(Flag);
+ if (GlueDestNode)
+ Ops.push_back(Glue);
SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
MachineSDNode::mmo_iterator Begin = 0, End = 0;
@@ -149,9 +149,9 @@ static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
MN->setMemRefs(Begin, End);
}
-/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
+/// ClusterNeighboringLoads - Force nearby loads together by "gluing" them.
/// This function finds loads of the same base and different offsets. If the
-/// offsets are not far apart (target specific), it add MVT::Flag inputs and
+/// offsets are not far apart (target specific), it add MVT::Glue inputs and
/// outputs to ensure they are scheduled together and in order. This
/// optimization may benefit some targets by improving cache locality.
void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
@@ -213,20 +213,20 @@ void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
if (NumLoads == 0)
return;
- // Cluster loads by adding MVT::Flag outputs and inputs. This also
+ // Cluster loads by adding MVT::Glue outputs and inputs. This also
// ensure they are scheduled in order of increasing addresses.
SDNode *Lead = Loads[0];
- AddFlags(Lead, SDValue(0, 0), true, DAG);
+ AddGlue(Lead, SDValue(0, 0), true, DAG);
- SDValue InFlag = SDValue(Lead, Lead->getNumValues() - 1);
+ SDValue InGlue = SDValue(Lead, Lead->getNumValues() - 1);
for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
- bool OutFlag = I < E - 1;
+ bool OutGlue = I < E - 1;
SDNode *Load = Loads[I];
- AddFlags(Load, InFlag, OutFlag, DAG);
+ AddGlue(Load, InGlue, OutGlue, DAG);
- if (OutFlag)
- InFlag = SDValue(Load, Load->getNumValues() - 1);
+ if (OutGlue)
+ InGlue = SDValue(Load, Load->getNumValues() - 1);
++LoadsClustered;
}
@@ -266,68 +266,75 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
// FIXME: Multiply by 2 because we may clone nodes during scheduling.
// This is a temporary workaround.
SUnits.reserve(NumNodes * 2);
-
+
// Add all nodes in depth first order.
SmallVector<SDNode*, 64> Worklist;
SmallPtrSet<SDNode*, 64> Visited;
Worklist.push_back(DAG->getRoot().getNode());
Visited.insert(DAG->getRoot().getNode());
-
+
while (!Worklist.empty()) {
SDNode *NI = Worklist.pop_back_val();
-
+
// Add all operands to the worklist unless they've already been added.
for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
if (Visited.insert(NI->getOperand(i).getNode()))
Worklist.push_back(NI->getOperand(i).getNode());
-
+
if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
continue;
-
+
// If this node has already been processed, stop now.
if (NI->getNodeId() != -1) continue;
-
+
SUnit *NodeSUnit = NewSUnit(NI);
-
- // See if anything is flagged to this node, if so, add them to flagged
- // nodes. Nodes can have at most one flag input and one flag output. Flags
- // are required to be the last operand and result of a node.
-
- // Scan up to find flagged preds.
+
+ // See if anything is glued to this node, if so, add them to glued
+ // nodes. Nodes can have at most one glue input and one glue output. Glue
+ // is required to be the last operand and result of a node.
+
+ // Scan up to find glued preds.
SDNode *N = NI;
while (N->getNumOperands() &&
- N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
+ N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) {
N = N->getOperand(N->getNumOperands()-1).getNode();
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
+ if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
+ NodeSUnit->isCall = true;
}
-
- // Scan down to find any flagged succs.
+
+ // Scan down to find any glued succs.
N = NI;
- while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
- SDValue FlagVal(N, N->getNumValues()-1);
-
- // There are either zero or one users of the Flag result.
- bool HasFlagUse = false;
- for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
+ while (N->getValueType(N->getNumValues()-1) == MVT::Glue) {
+ SDValue GlueVal(N, N->getNumValues()-1);
+
+ // There are either zero or one users of the Glue result.
+ bool HasGlueUse = false;
+ for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
UI != E; ++UI)
- if (FlagVal.isOperandOf(*UI)) {
- HasFlagUse = true;
+ if (GlueVal.isOperandOf(*UI)) {
+ HasGlueUse = true;
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
N = *UI;
+ if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
+ NodeSUnit->isCall = true;
break;
}
- if (!HasFlagUse) break;
+ if (!HasGlueUse) break;
}
-
- // If there are flag operands involved, N is now the bottom-most node
- // of the sequence of nodes that are flagged together.
+
+ // If there are glue operands involved, N is now the bottom-most node
+ // of the sequence of nodes that are glued together.
// Update the SUnit.
NodeSUnit->setNode(N);
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
+ // Compute NumRegDefsLeft. This must be done before AddSchedEdges.
+ InitNumRegDefsLeft(NodeSUnit);
+
// Assign the Latency field of NodeSUnit using target-provided information.
ComputeLatency(NodeSUnit);
}
@@ -343,7 +350,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
SUnit *SU = &SUnits[su];
SDNode *MainNode = SU->getNode();
-
+
if (MainNode->isMachineOpcode()) {
unsigned Opc = MainNode->getMachineOpcode();
const TargetInstrDesc &TID = TII->get(Opc);
@@ -356,9 +363,9 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
if (TID.isCommutable())
SU->isCommutable = true;
}
-
+
// Find all predecessors and successors of the group.
- for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) {
+ for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) {
if (N->isMachineOpcode() &&
TII->get(N->getMachineOpcode()).getImplicitDefs()) {
SU->hasPhysRegClobbers = true;
@@ -368,7 +375,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
SU->hasPhysRegDefs = true;
}
-
+
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDNode *OpN = N->getOperand(i).getNode();
if (isPassiveNode(OpN)) continue; // Not scheduled.
@@ -377,7 +384,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
if (OpSU == SU) continue; // In the same group.
EVT OpVT = N->getOperand(i).getValueType();
- assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
+ assert(OpVT != MVT::Glue && "Glued nodes should be in same sunit!");
bool isChain = OpVT == MVT::Other;
unsigned PhysReg = 0;
@@ -403,7 +410,13 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
}
- SU->addPred(dep);
+ if (!SU->addPred(dep) && !dep.isCtrl() && OpSU->NumRegDefsLeft > 0) {
+ // Multiple register uses are combined in the same SUnit. For example,
+ // we could have a set of glued nodes with all their defs consumed by
+ // another set of glued nodes. Register pressure tracking sees this as
+ // a single use, so to keep pressure balanced we reduce the defs.
+ --OpSU->NumRegDefsLeft;
+ }
}
}
}
@@ -412,7 +425,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
/// are input. This SUnit graph is similar to the SelectionDAG, but
/// excludes nodes that aren't interesting to scheduling, and represents
-/// flagged together nodes with a single SUnit.
+/// glued together nodes with a single SUnit.
void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
// Cluster certain nodes which should be scheduled together.
ClusterNodes();
@@ -422,6 +435,69 @@ void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
AddSchedEdges();
}
+// Initialize NumNodeDefs for the current Node's opcode.
+void ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs() {
+ if (!Node->isMachineOpcode()) {
+ if (Node->getOpcode() == ISD::CopyFromReg)
+ NodeNumDefs = 1;
+ else
+ NodeNumDefs = 0;
+ return;
+ }
+ unsigned POpc = Node->getMachineOpcode();
+ if (POpc == TargetOpcode::IMPLICIT_DEF) {
+ // No register need be allocated for this.
+ NodeNumDefs = 0;
+ return;
+ }
+ unsigned NRegDefs = SchedDAG->TII->get(Node->getMachineOpcode()).getNumDefs();
+ // Some instructions define regs that are not represented in the selection DAG
+ // (e.g. unused flags). See tMOVi8. Make sure we don't access past NumValues.
+ NodeNumDefs = std::min(Node->getNumValues(), NRegDefs);
+ DefIdx = 0;
+}
+
+// Construct a RegDefIter for this SUnit and find the first valid value.
+ScheduleDAGSDNodes::RegDefIter::RegDefIter(const SUnit *SU,
+ const ScheduleDAGSDNodes *SD)
+ : SchedDAG(SD), Node(SU->getNode()), DefIdx(0), NodeNumDefs(0) {
+ InitNodeNumDefs();
+ Advance();
+}
+
+// Advance to the next valid value defined by the SUnit.
+void ScheduleDAGSDNodes::RegDefIter::Advance() {
+ for (;Node;) { // Visit all glued nodes.
+ for (;DefIdx < NodeNumDefs; ++DefIdx) {
+ if (!Node->hasAnyUseOfValue(DefIdx))
+ continue;
+ if (Node->isMachineOpcode() &&
+ Node->getMachineOpcode() == TargetOpcode::EXTRACT_SUBREG) {
+ // Propagate the incoming (full-register) type. I doubt it's needed.
+ ValueType = Node->getOperand(0).getValueType();
+ }
+ else {
+ ValueType = Node->getValueType(DefIdx);
+ }
+ ++DefIdx;
+ return; // Found a normal regdef.
+ }
+ Node = Node->getGluedNode();
+ if (Node == NULL) {
+ return; // No values left to visit.
+ }
+ InitNodeNumDefs();
+ }
+}
+
+void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) {
+ assert(SU->NumRegDefsLeft == 0 && "expect a new node");
+ for (RegDefIter I(SU, this); I.IsValid(); I.Advance()) {
+ assert(SU->NumRegDefsLeft < USHRT_MAX && "overflow is ok but unexpected");
+ ++SU->NumRegDefsLeft;
+ }
+}
+
void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
// Check to see if the scheduler cares about latencies.
if (ForceUnitLatencies()) {
@@ -429,20 +505,17 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
return;
}
- const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
- if (InstrItins.isEmpty()) {
+ if (!InstrItins || InstrItins->isEmpty()) {
SU->Latency = 1;
return;
}
-
+
// Compute the latency for the node. We use the sum of the latencies for
- // all nodes flagged together into this SUnit.
+ // all nodes glued together into this SUnit.
SU->Latency = 0;
- for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
- if (N->isMachineOpcode()) {
- SU->Latency += InstrItins.
- getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass());
- }
+ for (SDNode *N = SU->getNode(); N; N = N->getGluedNode())
+ if (N->isMachineOpcode())
+ SU->Latency += TII->getInstrLatency(InstrItins, N);
}
void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
@@ -451,32 +524,25 @@ void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
if (ForceUnitLatencies())
return;
- const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
- if (InstrItins.isEmpty())
- return;
-
if (dep.getKind() != SDep::Data)
return;
unsigned DefIdx = Use->getOperand(OpIdx).getResNo();
- if (Def->isMachineOpcode()) {
- const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
- if (DefIdx >= II.getNumDefs())
- return;
- int DefCycle = InstrItins.getOperandCycle(II.getSchedClass(), DefIdx);
- if (DefCycle < 0)
- return;
- int UseCycle = 1;
- if (Use->isMachineOpcode()) {
- const unsigned UseClass = TII->get(Use->getMachineOpcode()).getSchedClass();
- UseCycle = InstrItins.getOperandCycle(UseClass, OpIdx);
- }
- if (UseCycle >= 0) {
- int Latency = DefCycle - UseCycle + 1;
- if (Latency >= 0)
- dep.setLatency(Latency);
- }
+ if (Use->isMachineOpcode())
+ // Adjust the use operand index by num of defs.
+ OpIdx += TII->get(Use->getMachineOpcode()).getNumDefs();
+ int Latency = TII->getOperandLatency(InstrItins, Def, DefIdx, Use, OpIdx);
+ if (Latency > 1 && Use->getOpcode() == ISD::CopyToReg &&
+ !BB->succ_empty()) {
+ unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ // This copy is a liveout value. It is likely coalesced, so reduce the
+ // latency so not to penalize the def.
+ // FIXME: need target specific adjustment here?
+ Latency = (Latency > 1) ? Latency - 1 : 1;
}
+ if (Latency >= 0)
+ dep.setLatency(Latency);
}
void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
@@ -487,14 +553,14 @@ void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
SU->getNode()->dump(DAG);
dbgs() << "\n";
- SmallVector<SDNode *, 4> FlaggedNodes;
- for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode())
- FlaggedNodes.push_back(N);
- while (!FlaggedNodes.empty()) {
+ SmallVector<SDNode *, 4> GluedNodes;
+ for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode())
+ GluedNodes.push_back(N);
+ while (!GluedNodes.empty()) {
dbgs() << " ";
- FlaggedNodes.back()->dump(DAG);
+ GluedNodes.back()->dump(DAG);
dbgs() << "\n";
- FlaggedNodes.pop_back();
+ GluedNodes.pop_back();
}
}
@@ -507,37 +573,25 @@ namespace {
};
}
-// ProcessSourceNode - Process nodes with source order numbers. These are added
-// to a vector which EmitSchedule uses to determine how to insert dbg_value
-// instructions in the right order.
-static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
- InstrEmitter &Emitter,
- DenseMap<SDValue, unsigned> &VRBaseMap,
+/// ProcessSDDbgValues - Process SDDbgValues assoicated with this node.
+static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG,
+ InstrEmitter &Emitter,
SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
- SmallSet<unsigned, 8> &Seen) {
- unsigned Order = DAG->GetOrdering(N);
- if (!Order || !Seen.insert(Order))
- return;
-
- MachineBasicBlock *BB = Emitter.getBlock();
- if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) {
- // Did not insert any instruction.
- Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
- return;
- }
-
- Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ unsigned Order) {
if (!N->getHasDebugValue())
return;
+
// Opportunistically insert immediate dbg_value uses, i.e. those with source
// order number right after the N.
+ MachineBasicBlock *BB = Emitter.getBlock();
MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N);
for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
if (DVs[i]->isInvalidated())
continue;
unsigned DVOrder = DVs[i]->getOrder();
- if (DVOrder == ++Order) {
+ if (!Order || DVOrder == ++Order) {
MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap);
if (DbgMI) {
Orders.push_back(std::make_pair(DVOrder, DbgMI));
@@ -548,6 +602,33 @@ static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
}
}
+// ProcessSourceNode - Process nodes with source order numbers. These are added
+// to a vector which EmitSchedule uses to determine how to insert dbg_value
+// instructions in the right order.
+static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
+ InstrEmitter &Emitter,
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
+ SmallSet<unsigned, 8> &Seen) {
+ unsigned Order = DAG->GetOrdering(N);
+ if (!Order || !Seen.insert(Order)) {
+ // Process any valid SDDbgValues even if node does not have any order
+ // assigned.
+ ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0);
+ return;
+ }
+
+ MachineBasicBlock *BB = Emitter.getBlock();
+ if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) {
+ // Did not insert any instruction.
+ Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
+ return;
+ }
+
+ Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
+ ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order);
+}
+
/// EmitSchedule - Emit the machine code in scheduled order.
MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
@@ -578,25 +659,25 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
}
// For pre-regalloc scheduling, create instructions corresponding to the
- // SDNode and any flagged SDNodes and append them to the block.
+ // SDNode and any glued SDNodes and append them to the block.
if (!SU->getNode()) {
// Emit a copy.
EmitPhysRegCopy(SU, CopyVRBaseMap);
continue;
}
- SmallVector<SDNode *, 4> FlaggedNodes;
- for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
- N = N->getFlaggedNode())
- FlaggedNodes.push_back(N);
- while (!FlaggedNodes.empty()) {
- SDNode *N = FlaggedNodes.back();
- Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
+ SmallVector<SDNode *, 4> GluedNodes;
+ for (SDNode *N = SU->getNode()->getGluedNode(); N;
+ N = N->getGluedNode())
+ GluedNodes.push_back(N);
+ while (!GluedNodes.empty()) {
+ SDNode *N = GluedNodes.back();
+ Emitter.EmitNode(GluedNodes.back(), SU->OrigNode != SU, SU->isCloned,
VRBaseMap);
// Remember the source order of the inserted instruction.
if (HasDbg)
ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
- FlaggedNodes.pop_back();
+ GluedNodes.pop_back();
}
Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
VRBaseMap);
@@ -625,16 +706,8 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
// Insert all SDDbgValue's whose order(s) are before "Order".
if (!MI)
continue;
-#ifndef NDEBUG
- unsigned LastDIOrder = 0;
-#endif
for (; DI != DE &&
(*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
-#ifndef NDEBUG
- assert((*DI)->getOrder() >= LastDIOrder &&
- "SDDbgValue nodes must be in source order!");
- LastDIOrder = (*DI)->getOrder();
-#endif
if ((*DI)->isInvalidated())
continue;
MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 842fc8c..cc7310e 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -20,13 +20,13 @@
namespace llvm {
/// ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
- ///
+ ///
/// Edges between SUnits are initially based on edges in the SelectionDAG,
/// and additional edges can be added by the schedulers as heuristics.
/// SDNodes such as Constants, Registers, and a few others that are not
/// interesting to schedulers are not allocated SUnits.
///
- /// SDNodes with MVT::Flag operands are grouped along with the flagged
+ /// SDNodes with MVT::Glue operands are grouped along with the flagged
/// nodes into a single SUnit so that they are scheduled together.
///
/// SDNode-based scheduling graphs do not use SDep::Anti or SDep::Output
@@ -36,6 +36,7 @@ namespace llvm {
class ScheduleDAGSDNodes : public ScheduleDAG {
public:
SelectionDAG *DAG; // DAG of the current basic block
+ const InstrItineraryData *InstrItins;
explicit ScheduleDAGSDNodes(MachineFunction &mf);
@@ -72,13 +73,17 @@ namespace llvm {
/// predecessors / successors info nor the temporary scheduling states.
///
SUnit *Clone(SUnit *N);
-
+
/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
/// are input. This SUnit graph is similar to the SelectionDAG, but
/// excludes nodes that aren't interesting to scheduling, and represents
/// flagged together nodes with a single SUnit.
virtual void BuildSchedGraph(AliasAnalysis *AA);
+ /// InitNumRegDefsLeft - Determine the # of regs defined by this node.
+ ///
+ void InitNumRegDefsLeft(SUnit *SU);
+
/// ComputeLatency - Compute node latency.
///
virtual void ComputeLatency(SUnit *SU);
@@ -105,6 +110,30 @@ namespace llvm {
virtual void getCustomGraphFeatures(GraphWriter<ScheduleDAG*> &GW) const;
+ /// RegDefIter - In place iteration over the values defined by an
+ /// SUnit. This does not need copies of the iterator or any other STLisms.
+ /// The iterator creates itself, rather than being provided by the SchedDAG.
+ class RegDefIter {
+ const ScheduleDAGSDNodes *SchedDAG;
+ const SDNode *Node;
+ unsigned DefIdx;
+ unsigned NodeNumDefs;
+ EVT ValueType;
+ public:
+ RegDefIter(const SUnit *SU, const ScheduleDAGSDNodes *SD);
+
+ bool IsValid() const { return Node != NULL; }
+
+ EVT GetValue() const {
+ assert(IsValid() && "bad iterator");
+ return ValueType;
+ }
+
+ void Advance();
+ private:
+ void InitNodeNumDefs();
+ };
+
private:
/// ClusterNeighboringLoads - Cluster loads from "near" addresses into
/// combined SUnits.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index ad06ebd..2fb2f2d 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -31,7 +31,6 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetOptions.h"
@@ -44,7 +43,7 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
@@ -111,7 +110,7 @@ bool ConstantFPSDNode::isValueValidForType(EVT VT,
/// BUILD_VECTOR where all of the elements are ~0 or undef.
bool ISD::isBuildVectorAllOnes(const SDNode *N) {
// Look through a bit convert.
- if (N->getOpcode() == ISD::BIT_CONVERT)
+ if (N->getOpcode() == ISD::BITCAST)
N = N->getOperand(0).getNode();
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
@@ -152,7 +151,7 @@ bool ISD::isBuildVectorAllOnes(const SDNode *N) {
/// BUILD_VECTOR where all of the elements are 0 or undef.
bool ISD::isBuildVectorAllZeros(const SDNode *N) {
// Look through a bit convert.
- if (N->getOpcode() == ISD::BIT_CONVERT)
+ if (N->getOpcode() == ISD::BITCAST)
N = N->getOperand(0).getNode();
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
@@ -199,6 +198,8 @@ bool ISD::isScalarToVector(const SDNode *N) {
if (N->getOperand(0).getOpcode() == ISD::UNDEF)
return false;
unsigned NumElems = N->getNumOperands();
+ if (NumElems == 1)
+ return false;
for (unsigned i = 1; i < NumElems; ++i) {
SDValue V = N->getOperand(i);
if (V.getOpcode() != ISD::UNDEF)
@@ -489,7 +490,7 @@ encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
/// doNotCSE - Return true if CSE should not be performed for this node.
static bool doNotCSE(SDNode *N) {
- if (N->getValueType(0) == MVT::Flag)
+ if (N->getValueType(0) == MVT::Glue)
return true; // Never CSE anything that produces a flag.
switch (N->getOpcode()) {
@@ -501,7 +502,7 @@ static bool doNotCSE(SDNode *N) {
// Check that remaining values produced are not flags.
for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
- if (N->getValueType(i) == MVT::Flag)
+ if (N->getValueType(i) == MVT::Glue)
return true; // Never CSE anything that produces a flag.
return false;
@@ -609,9 +610,6 @@ void SelectionDAG::DeallocateNode(SDNode *N) {
bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
bool Erased = false;
switch (N->getOpcode()) {
- case ISD::EntryToken:
- llvm_unreachable("EntryToken should not be in CSEMaps!");
- return false;
case ISD::HANDLENODE: return false; // noop.
case ISD::CONDCODE:
assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
@@ -641,6 +639,8 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
}
default:
// Remove it from the CSE Map.
+ assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
+ assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
Erased = CSEMap.RemoveNode(N);
break;
}
@@ -648,7 +648,7 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
// Verify that the node was actually in one of the CSE maps, unless it has a
// flag result (which cannot be CSE'd) or is one of the special cases that are
// not subject to CSE.
- if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Flag &&
+ if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
!N->isMachineOpcode() && !doNotCSE(N)) {
N->dump(this);
dbgs() << "\n";
@@ -743,8 +743,9 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
return Node;
}
-/// VerifyNode - Sanity check the given node. Aborts if it is invalid.
-void SelectionDAG::VerifyNode(SDNode *N) {
+#ifndef NDEBUG
+/// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid.
+static void VerifyNodeCommon(SDNode *N) {
switch (N->getOpcode()) {
default:
break;
@@ -778,6 +779,44 @@ void SelectionDAG::VerifyNode(SDNode *N) {
}
}
+/// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
+static void VerifySDNode(SDNode *N) {
+ // The SDNode allocators cannot be used to allocate nodes with fields that are
+ // not present in an SDNode!
+ assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
+ assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
+ assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
+ assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
+ assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
+ assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
+ assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
+ assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
+ assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
+ assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
+ assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
+ assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
+ assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
+ assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
+ assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
+ assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
+ assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
+ assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
+ assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
+
+ VerifyNodeCommon(N);
+}
+
+/// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is
+/// invalid.
+static void VerifyMachineNode(SDNode *N) {
+ // The MachineNode allocators cannot be used to allocate nodes with fields
+ // that are not present in a MachineNode!
+ // Currently there are no such nodes.
+
+ VerifyNodeCommon(N);
+}
+#endif // NDEBUG
+
/// getEVTAlignment - Compute the default alignment value for the
/// given type.
///
@@ -1315,7 +1354,7 @@ SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) {
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
-
+
SDNode *N = new (NodeAllocator) EHLabelSDNode(dl, Root, Label);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
@@ -1365,11 +1404,11 @@ SDValue SelectionDAG::getMDNode(const MDNode *MD) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
ID.AddPointer(MD);
-
+
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
-
+
SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
@@ -1613,7 +1652,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
// Also compute a conserative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
- KnownOne.clear();
+ KnownOne.clearAllBits();
unsigned TrailZ = KnownZero.countTrailingOnes() +
KnownZero2.countTrailingOnes();
unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
@@ -1636,8 +1675,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
AllOnes, KnownZero2, KnownOne2, Depth+1);
unsigned LeadZ = KnownZero2.countLeadingOnes();
- KnownOne2.clear();
- KnownZero2.clear();
+ KnownOne2.clearAllBits();
+ KnownZero2.clearAllBits();
ComputeMaskedBits(Op.getOperand(1),
AllOnes, KnownZero2, KnownOne2, Depth+1);
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
@@ -1765,7 +1804,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
// If the sign extended bits are demanded, we know that the sign
// bit is demanded.
- InSignBit.zext(BitWidth);
+ InSignBit = InSignBit.zext(BitWidth);
if (NewBits.getBoolValue())
InputDemandedBits |= InSignBit;
@@ -1792,7 +1831,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
case ISD::CTPOP: {
unsigned LowBits = Log2_32(BitWidth)+1;
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
- KnownOne.clear();
+ KnownOne.clearAllBits();
return;
}
case ISD::LOAD: {
@@ -1808,13 +1847,12 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
- APInt InMask = Mask;
- InMask.trunc(InBits);
- KnownZero.trunc(InBits);
- KnownOne.trunc(InBits);
+ APInt InMask = Mask.trunc(InBits);
+ KnownZero = KnownZero.trunc(InBits);
+ KnownOne = KnownOne.trunc(InBits);
ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
KnownZero |= NewBits;
return;
}
@@ -1823,16 +1861,15 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
unsigned InBits = InVT.getScalarType().getSizeInBits();
APInt InSignBit = APInt::getSignBit(InBits);
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
- APInt InMask = Mask;
- InMask.trunc(InBits);
+ APInt InMask = Mask.trunc(InBits);
// If any of the sign extended bits are demanded, we know that the sign
// bit is demanded. Temporarily set this bit in the mask for our callee.
if (NewBits.getBoolValue())
InMask |= InSignBit;
- KnownZero.trunc(InBits);
- KnownOne.trunc(InBits);
+ KnownZero = KnownZero.trunc(InBits);
+ KnownOne = KnownOne.trunc(InBits);
ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
// Note if the sign bit is known to be zero or one.
@@ -1844,13 +1881,12 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
// If the sign bit wasn't actually demanded by our caller, we don't
// want it set in the KnownZero and KnownOne result values. Reset the
// mask and reapply it to the result values.
- InMask = Mask;
- InMask.trunc(InBits);
+ InMask = Mask.trunc(InBits);
KnownZero &= InMask;
KnownOne &= InMask;
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
// If the sign bit is known zero or one, the top bits match.
if (SignBitKnownZero)
@@ -1862,26 +1898,24 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
case ISD::ANY_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
- APInt InMask = Mask;
- InMask.trunc(InBits);
- KnownZero.trunc(InBits);
- KnownOne.trunc(InBits);
+ APInt InMask = Mask.trunc(InBits);
+ KnownZero = KnownZero.trunc(InBits);
+ KnownOne = KnownOne.trunc(InBits);
ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
return;
}
case ISD::TRUNCATE: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
- APInt InMask = Mask;
- InMask.zext(InBits);
- KnownZero.zext(InBits);
- KnownOne.zext(InBits);
+ APInt InMask = Mask.zext(InBits);
+ KnownZero = KnownZero.zext(InBits);
+ KnownOne = KnownOne.zext(InBits);
ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero.trunc(BitWidth);
- KnownOne.trunc(BitWidth);
+ KnownZero = KnownZero.trunc(BitWidth);
+ KnownOne = KnownOne.trunc(BitWidth);
break;
}
case ISD::AssertZext: {
@@ -1921,7 +1955,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
}
}
// fall through
- case ISD::ADD: {
+ case ISD::ADD:
+ case ISD::ADDE: {
// Output known-0 bits are known if clear or set in both the low clear bits
// common to both LHS & RHS. For example, 8+(X<<3) is known to have the
// low 3 bits clear.
@@ -1936,7 +1971,17 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
KnownZeroOut = std::min(KnownZeroOut,
KnownZero2.countTrailingOnes());
- KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
+ if (Op.getOpcode() == ISD::ADD) {
+ KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
+ return;
+ }
+
+ // With ADDE, a carry bit may be added in, so we can only use this
+ // information if we know (at least) that the low two bits are clear. We
+ // then return to the caller that the low bit is unknown but that other bits
+ // are known zero.
+ if (KnownZeroOut >= 2) // ADDE
+ KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
return;
}
case ISD::SREM:
@@ -1991,10 +2036,19 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
KnownZero2.countLeadingOnes());
- KnownOne.clear();
+ KnownOne.clearAllBits();
KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
return;
}
+ case ISD::FrameIndex:
+ case ISD::TargetFrameIndex:
+ if (unsigned Align = InferPtrAlignment(Op)) {
+ // The low bits are known zero if the pointer is aligned.
+ KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
+ return;
+ }
+ break;
+
default:
// Allow the target to implement this method for its nodes.
if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
@@ -2234,6 +2288,25 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
}
+/// isBaseWithConstantOffset - Return true if the specified operand is an
+/// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
+/// ISD::OR with a ConstantSDNode that is guaranteed to have the same
+/// semantics as an ADD. This handles the equivalence:
+/// X|Cst == X+Cst iff X&Cst = 0.
+bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
+ if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
+ !isa<ConstantSDNode>(Op.getOperand(1)))
+ return false;
+
+ if (Op.getOpcode() == ISD::OR &&
+ !MaskedValueIsZero(Op.getOperand(0),
+ cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
+ return false;
+
+ return true;
+}
+
+
bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
// If we're told that NaNs won't happen, assume they won't.
if (NoNaNsFPMath)
@@ -2295,7 +2368,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT) {
AllNodes.push_back(N);
#ifndef NDEBUG
- VerifyNode(N);
+ VerifySDNode(N);
#endif
return SDValue(N, 0);
}
@@ -2308,23 +2381,22 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
switch (Opcode) {
default: break;
case ISD::SIGN_EXTEND:
- return getConstant(APInt(Val).sextOrTrunc(VT.getSizeInBits()), VT);
+ return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT);
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::TRUNCATE:
- return getConstant(APInt(Val).zextOrTrunc(VT.getSizeInBits()), VT);
+ return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT);
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: {
- const uint64_t zero[] = {0, 0};
// No compile time operations on ppcf128.
if (VT == MVT::ppcf128) break;
- APFloat apf = APFloat(APInt(VT.getSizeInBits(), 2, zero));
+ APFloat apf(APInt::getNullValue(VT.getSizeInBits()));
(void)apf.convertFromAPInt(Val,
Opcode==ISD::SINT_TO_FP,
APFloat::rmNearestTiesToEven);
return getConstantFP(apf, VT);
}
- case ISD::BIT_CONVERT:
+ case ISD::BITCAST:
if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
return getConstantFP(Val.bitsToFloat(), VT);
else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
@@ -2375,7 +2447,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
APInt api(VT.getSizeInBits(), 2, x);
return getConstant(api, VT);
}
- case ISD::BIT_CONVERT:
+ case ISD::BITCAST:
if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
@@ -2477,13 +2549,13 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
return Operand.getNode()->getOperand(0);
}
break;
- case ISD::BIT_CONVERT:
+ case ISD::BITCAST:
// Basic sanity checking.
assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
- && "Cannot BIT_CONVERT between types of different sizes!");
+ && "Cannot BITCAST between types of different sizes!");
if (VT == Operand.getValueType()) return Operand; // noop conversion.
- if (OpOpcode == ISD::BIT_CONVERT) // bitconv(bitconv(x)) -> bitconv(x)
- return getNode(ISD::BIT_CONVERT, DL, VT, Operand.getOperand(0));
+ if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
+ return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT);
break;
@@ -2519,7 +2591,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
SDNode *N;
SDVTList VTs = getVTList(VT);
- if (VT != MVT::Flag) { // Don't CSE flag producing nodes
+ if (VT != MVT::Glue) { // Don't CSE flag producing nodes
FoldingSetNodeID ID;
SDValue Ops[1] = { Operand };
AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
@@ -2535,7 +2607,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
AllNodes.push_back(N);
#ifndef NDEBUG
- VerifyNode(N);
+ VerifySDNode(N);
#endif
return SDValue(N, 0);
}
@@ -2676,6 +2748,13 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
"Shift operators return type must be the same as their first arg");
assert(VT.isInteger() && N2.getValueType().isInteger() &&
"Shifts only work on integers");
+ // Verify that the shift amount VT is bit enough to hold valid shift
+ // amounts. This catches things like trying to shift an i1024 value by an
+ // i8, which is easy to fall into in generic code that uses
+ // TLI.getShiftAmount().
+ assert(N2.getValueType().getSizeInBits() >=
+ Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
+ "Invalid use of small shift amount with oversized value!");
// Always fold shifts of i1 values so the code generator doesn't need to
// handle them. Since we know the size of the shift has to be less than the
@@ -2820,11 +2899,30 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
return getConstant(ShiftedVal.trunc(ElementSize), VT);
}
break;
- case ISD::EXTRACT_SUBVECTOR:
- if (N1.getValueType() == VT) // Trivial extraction.
- return N1;
+ case ISD::EXTRACT_SUBVECTOR: {
+ SDValue Index = N2;
+ if (VT.isSimple() && N1.getValueType().isSimple()) {
+ assert(VT.isVector() && N1.getValueType().isVector() &&
+ "Extract subvector VTs must be a vectors!");
+ assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType() &&
+ "Extract subvector VTs must have the same element type!");
+ assert(VT.getSimpleVT() <= N1.getValueType().getSimpleVT() &&
+ "Extract subvector must be from larger vector to smaller vector!");
+
+ if (isa<ConstantSDNode>(Index.getNode())) {
+ assert((VT.getVectorNumElements() +
+ cast<ConstantSDNode>(Index.getNode())->getZExtValue()
+ <= N1.getValueType().getVectorNumElements())
+ && "Extract subvector overflow!");
+ }
+
+ // Trivial extraction.
+ if (VT.getSimpleVT() == N1.getValueType().getSimpleVT())
+ return N1;
+ }
break;
}
+ }
if (N1C) {
if (N2C) {
@@ -2961,7 +3059,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// Memoize this node if possible.
SDNode *N;
SDVTList VTs = getVTList(VT);
- if (VT != MVT::Flag) {
+ if (VT != MVT::Glue) {
SDValue Ops[] = { N1, N2 };
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
@@ -2977,7 +3075,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
AllNodes.push_back(N);
#ifndef NDEBUG
- VerifyNode(N);
+ VerifySDNode(N);
#endif
return SDValue(N, 0);
}
@@ -3019,7 +3117,31 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
case ISD::VECTOR_SHUFFLE:
llvm_unreachable("should use getVectorShuffle constructor!");
break;
- case ISD::BIT_CONVERT:
+ case ISD::INSERT_SUBVECTOR: {
+ SDValue Index = N3;
+ if (VT.isSimple() && N1.getValueType().isSimple()
+ && N2.getValueType().isSimple()) {
+ assert(VT.isVector() && N1.getValueType().isVector() &&
+ N2.getValueType().isVector() &&
+ "Insert subvector VTs must be a vectors");
+ assert(VT == N1.getValueType() &&
+ "Dest and insert subvector source types must match!");
+ assert(N2.getValueType().getSimpleVT() <= N1.getValueType().getSimpleVT() &&
+ "Insert subvector must be from smaller vector to larger vector!");
+ if (isa<ConstantSDNode>(Index.getNode())) {
+ assert((N2.getValueType().getVectorNumElements() +
+ cast<ConstantSDNode>(Index.getNode())->getZExtValue()
+ <= VT.getVectorNumElements())
+ && "Insert subvector overflow!");
+ }
+
+ // Trivial insertion.
+ if (VT.getSimpleVT() == N2.getValueType().getSimpleVT())
+ return N2;
+ }
+ break;
+ }
+ case ISD::BITCAST:
// Fold bit_convert nodes from a type to themselves.
if (N1.getValueType() == VT)
return N1;
@@ -3029,7 +3151,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// Memoize node if it doesn't produce a flag.
SDNode *N;
SDVTList VTs = getVTList(VT);
- if (VT != MVT::Flag) {
+ if (VT != MVT::Glue) {
SDValue Ops[] = { N1, N2, N3 };
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
@@ -3045,7 +3167,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
AllNodes.push_back(N);
#ifndef NDEBUG
- VerifyNode(N);
+ VerifySDNode(N);
#endif
return SDValue(N, 0);
}
@@ -3087,6 +3209,17 @@ SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
&ArgChains[0], ArgChains.size());
}
+/// SplatByte - Distribute ByteVal over NumBits bits.
+static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) {
+ APInt Val = APInt(NumBits, ByteVal);
+ unsigned Shift = 8;
+ for (unsigned i = NumBits; i > 8; i >>= 1) {
+ Val = (Val << Shift) | Val;
+ Shift <<= 1;
+ }
+ return Val;
+}
+
/// getMemsetValue - Vectorized representation of the memset value
/// operand.
static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
@@ -3095,27 +3228,18 @@ static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
unsigned NumBits = VT.getScalarType().getSizeInBits();
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
- APInt Val = APInt(NumBits, C->getZExtValue() & 255);
- unsigned Shift = 8;
- for (unsigned i = NumBits; i > 8; i >>= 1) {
- Val = (Val << Shift) | Val;
- Shift <<= 1;
- }
+ APInt Val = SplatByte(NumBits, C->getZExtValue() & 255);
if (VT.isInteger())
return DAG.getConstant(Val, VT);
return DAG.getConstantFP(APFloat(Val), VT);
}
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
- unsigned Shift = 8;
- for (unsigned i = NumBits; i > 8; i >>= 1) {
- Value = DAG.getNode(ISD::OR, dl, VT,
- DAG.getNode(ISD::SHL, dl, VT, Value,
- DAG.getConstant(Shift,
- TLI.getShiftAmountTy())),
- Value);
- Shift <<= 1;
+ if (NumBits > 8) {
+ // Use a multiplication with 0x010101... to extend the input to the
+ // required length.
+ APInt Magic = SplatByte(NumBits, 0x01);
+ Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
}
return Value;
@@ -3131,13 +3255,12 @@ static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
if (Str.empty()) {
if (VT.isInteger())
return DAG.getConstant(0, VT);
- else if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
- VT.getSimpleVT().SimpleTy == MVT::f64)
+ else if (VT == MVT::f32 || VT == MVT::f64)
return DAG.getConstantFP(0.0, VT);
else if (VT.isVector()) {
unsigned NumElts = VT.getVectorNumElements();
MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
+ return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
EltVT, NumElts)));
} else
@@ -3234,15 +3357,6 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
if (VT.bitsGT(LVT))
VT = LVT;
}
-
- // If we're optimizing for size, and there is a limit, bump the maximum number
- // of operations inserted down to 4. This is a wild guess that approximates
- // the size of a call to memcpy or memset (3 arguments + call).
- if (Limit != ~0U) {
- const Function *F = DAG.getMachineFunction().getFunction();
- if (F->hasFnAttr(Attribute::OptimizeForSize))
- Limit = 4;
- }
unsigned NumMemOps = 0;
while (Size != 0) {
@@ -3276,18 +3390,22 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Src, uint64_t Size,
unsigned Align, bool isVol,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff) {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) {
// Turn a memcpy of undef to nop.
if (Src.getOpcode() == ISD::UNDEF)
return Chain;
// Expand memcpy to a series of load and store ops if the size operand falls
// below a certain threshold.
+ // TODO: In the AlwaysInline case, if the size is big then generate a loop
+ // rather than maybe a humongous number of loads and stores.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
bool DstAlignCanChange = false;
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
@@ -3297,8 +3415,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
std::string Str;
bool CopyFromStr = isMemSrcFromString(Src, Str);
bool isZeroStr = CopyFromStr && Str.empty();
- unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy();
-
+ unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
+
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align),
(isZeroStr ? 0 : SrcAlign),
@@ -3334,7 +3452,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Value = getMemsetStringVal(VT, dl, DAG, TLI, Str, SrcOff);
Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, isVol, false, Align);
+ DstPtrInfo.getWithOffset(DstOff), isVol,
+ false, Align);
} else {
// The type might not be legal for the target. This should only happen
// if the type is smaller than a legal type, as on PPC, so the right
@@ -3343,14 +3462,14 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
// FIXME does the case above also need this?
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
assert(NVT.bitsGE(VT));
- Value = DAG.getExtLoad(ISD::EXTLOAD, NVT, dl, Chain,
+ Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
- SrcSV, SrcSVOff + SrcOff, VT, isVol, false,
+ SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
MinAlign(SrcAlign, SrcOff));
Store = DAG.getTruncStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, VT, isVol, false,
- Align);
+ DstPtrInfo.getWithOffset(DstOff), VT, isVol,
+ false, Align);
}
OutChains.push_back(Store);
SrcOff += VTSize;
@@ -3366,8 +3485,8 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Src, uint64_t Size,
unsigned Align, bool isVol,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff) {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) {
// Turn a memmove of undef to nop.
if (Src.getOpcode() == ISD::UNDEF)
return Chain;
@@ -3377,14 +3496,16 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
bool DstAlignCanChange = false;
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
unsigned SrcAlign = DAG.InferPtrAlignment(Src);
if (Align > SrcAlign)
SrcAlign = Align;
- unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove();
+ unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align),
@@ -3414,7 +3535,8 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Value = DAG.getLoad(VT, dl, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
- SrcSV, SrcSVOff + SrcOff, isVol, false, SrcAlign);
+ SrcPtrInfo.getWithOffset(SrcOff), isVol,
+ false, SrcAlign);
LoadValues.push_back(Value);
LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize;
@@ -3429,7 +3551,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Store = DAG.getStore(Chain, dl, LoadValues[i],
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, isVol, false, Align);
+ DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
OutChains.push_back(Store);
DstOff += VTSize;
}
@@ -3442,7 +3564,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size,
unsigned Align, bool isVol,
- const Value *DstSV, uint64_t DstSVOff) {
+ MachinePointerInfo DstPtrInfo) {
// Turn a memset of undef to nop.
if (Src.getOpcode() == ISD::UNDEF)
return Chain;
@@ -3452,13 +3574,15 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
bool DstAlignCanChange = false;
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
bool NonScalarIntSafe =
isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
- if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(),
+ if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
Size, (DstAlignCanChange ? 0 : Align), 0,
NonScalarIntSafe, false, DAG, TLI))
return SDValue();
@@ -3477,15 +3601,34 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SmallVector<SDValue, 8> OutChains;
uint64_t DstOff = 0;
unsigned NumMemOps = MemOps.size();
+
+ // Find the largest store and generate the bit pattern for it.
+ EVT LargestVT = MemOps[0];
+ for (unsigned i = 1; i < NumMemOps; i++)
+ if (MemOps[i].bitsGT(LargestVT))
+ LargestVT = MemOps[i];
+ SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
+
for (unsigned i = 0; i < NumMemOps; i++) {
EVT VT = MemOps[i];
- unsigned VTSize = VT.getSizeInBits() / 8;
- SDValue Value = getMemsetValue(Src, VT, DAG, dl);
+
+ // If this store is smaller than the largest store see whether we can get
+ // the smaller value for free with a truncate.
+ SDValue Value = MemSetValue;
+ if (VT.bitsLT(LargestVT)) {
+ if (!LargestVT.isVector() && !VT.isVector() &&
+ TLI.isTruncateFree(LargestVT, VT))
+ Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
+ else
+ Value = getMemsetValue(Src, VT, DAG, dl);
+ }
+ assert(Value.getValueType() == VT && "Value with wrong type.");
SDValue Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, isVol, false, 0);
+ DstPtrInfo.getWithOffset(DstOff),
+ isVol, false, Align);
OutChains.push_back(Store);
- DstOff += VTSize;
+ DstOff += VT.getSizeInBits() / 8;
}
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
@@ -3495,8 +3638,8 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
unsigned Align, bool isVol, bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff) {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) {
// Check to see if we should lower the memcpy to loads and stores first.
// For cases within the target-specified limits, this is the best choice.
@@ -3508,7 +3651,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
ConstantSize->getZExtValue(),Align,
- isVol, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ isVol, false, DstPtrInfo, SrcPtrInfo);
if (Result.getNode())
return Result;
}
@@ -3518,7 +3661,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
isVol, AlwaysInline,
- DstSV, DstSVOff, SrcSV, SrcSVOff);
+ DstPtrInfo, SrcPtrInfo);
if (Result.getNode())
return Result;
@@ -3528,7 +3671,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
assert(ConstantSize && "AlwaysInline requires a constant size!");
return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
ConstantSize->getZExtValue(), Align, isVol,
- true, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ true, DstPtrInfo, SrcPtrInfo);
}
// FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
@@ -3559,8 +3702,8 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
unsigned Align, bool isVol,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff) {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) {
// Check to see if we should lower the memmove to loads and stores first.
// For cases within the target-specified limits, this is the best choice.
@@ -3573,7 +3716,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
ConstantSize->getZExtValue(), Align, isVol,
- false, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ false, DstPtrInfo, SrcPtrInfo);
if (Result.getNode())
return Result;
}
@@ -3582,7 +3725,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
// code. If the target chooses to do this, this is the next best.
SDValue Result =
TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
- DstSV, DstSVOff, SrcSV, SrcSVOff);
+ DstPtrInfo, SrcPtrInfo);
if (Result.getNode())
return Result;
@@ -3611,7 +3754,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
unsigned Align, bool isVol,
- const Value *DstSV, uint64_t DstSVOff) {
+ MachinePointerInfo DstPtrInfo) {
// Check to see if we should lower the memset to stores first.
// For cases within the target-specified limits, this is the best choice.
@@ -3623,7 +3766,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
- Align, isVol, DstSV, DstSVOff);
+ Align, isVol, DstPtrInfo);
if (Result.getNode())
return Result;
@@ -3633,11 +3776,11 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
// code. If the target chooses to do this, this is the next best.
SDValue Result =
TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
- DstSV, DstSVOff);
+ DstPtrInfo);
if (Result.getNode())
return Result;
- // Emit a library call.
+ // Emit a library call.
const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -3669,19 +3812,12 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
}
SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
- SDValue Chain,
- SDValue Ptr, SDValue Cmp,
- SDValue Swp, const Value* PtrVal,
+ SDValue Chain, SDValue Ptr, SDValue Cmp,
+ SDValue Swp, MachinePointerInfo PtrInfo,
unsigned Alignment) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
- // Check if the memory reference references a frame index
- if (!PtrVal)
- if (const FrameIndexSDNode *FI =
- dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
- PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex());
-
MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
@@ -3689,8 +3825,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
Flags |= MachineMemOperand::MOVolatile;
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PtrVal, Flags, 0,
- MemVT.getStoreSize(), Alignment);
+ MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO);
}
@@ -3729,12 +3864,6 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
- // Check if the memory reference references a frame index
- if (!PtrVal)
- if (const FrameIndexSDNode *FI =
- dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
- PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex());
-
MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
@@ -3742,7 +3871,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
Flags |= MachineMemOperand::MOVolatile;
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PtrVal, Flags, 0,
+ MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
MemVT.getStoreSize(), Alignment);
return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
@@ -3785,7 +3914,6 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
}
/// getMergeValues - Create a MERGE_VALUES node from the given operands.
-/// Allowed to return something different (and simpler) if Simplify is true.
SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
DebugLoc dl) {
if (NumOps == 1)
@@ -3803,18 +3931,18 @@ SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
const EVT *VTs, unsigned NumVTs,
const SDValue *Ops, unsigned NumOps,
- EVT MemVT, const Value *srcValue, int SVOff,
+ EVT MemVT, MachinePointerInfo PtrInfo,
unsigned Align, bool Vol,
bool ReadMem, bool WriteMem) {
return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
- MemVT, srcValue, SVOff, Align, Vol,
+ MemVT, PtrInfo, Align, Vol,
ReadMem, WriteMem);
}
SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
const SDValue *Ops, unsigned NumOps,
- EVT MemVT, const Value *srcValue, int SVOff,
+ EVT MemVT, MachinePointerInfo PtrInfo,
unsigned Align, bool Vol,
bool ReadMem, bool WriteMem) {
if (Align == 0) // Ensure that codegen never sees alignment 0
@@ -3829,8 +3957,7 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
if (Vol)
Flags |= MachineMemOperand::MOVolatile;
MachineMemOperand *MMO =
- MF.getMachineMemOperand(srcValue, Flags, SVOff,
- MemVT.getStoreSize(), Align);
+ MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
}
@@ -3841,13 +3968,14 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
EVT MemVT, MachineMemOperand *MMO) {
assert((Opcode == ISD::INTRINSIC_VOID ||
Opcode == ISD::INTRINSIC_W_CHAIN ||
+ Opcode == ISD::PREFETCH ||
(Opcode <= INT_MAX &&
(int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
"Opcode is not a memory-accessing opcode!");
// Memoize the node unless it returns a flag.
MemIntrinsicSDNode *N;
- if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
+ if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
void *IP = 0;
@@ -3867,36 +3995,70 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
return SDValue(N, 0);
}
+/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
+/// MachinePointerInfo record from it. This is particularly useful because the
+/// code generator has many cases where it doesn't bother passing in a
+/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
+static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
+ // If this is FI+Offset, we can model it.
+ if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
+ return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
+
+ // If this is (FI+Offset1)+Offset2, we can model it.
+ if (Ptr.getOpcode() != ISD::ADD ||
+ !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
+ !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
+ return MachinePointerInfo();
+
+ int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
+ return MachinePointerInfo::getFixedStack(FI, Offset+
+ cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
+}
+
+/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
+/// MachinePointerInfo record from it. This is particularly useful because the
+/// code generator has many cases where it doesn't bother passing in a
+/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
+static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
+ // If the 'Offset' value isn't a constant, we can't handle this.
+ if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
+ return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
+ if (OffsetOp.getOpcode() == ISD::UNDEF)
+ return InferPointerInfo(Ptr);
+ return MachinePointerInfo();
+}
+
+
SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, DebugLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset,
- const Value *SV, int SVOffset, EVT MemVT,
+ MachinePointerInfo PtrInfo, EVT MemVT,
bool isVolatile, bool isNonTemporal,
- unsigned Alignment) {
+ unsigned Alignment, const MDNode *TBAAInfo) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(VT);
- // Check if the memory reference references a frame index
- if (!SV)
- if (const FrameIndexSDNode *FI =
- dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
- SV = PseudoSourceValue::getFixedStack(FI->getIndex());
-
- MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOLoad;
if (isVolatile)
Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
+
+ // If we don't have a PtrInfo, infer the trivial frame index case to simplify
+ // clients.
+ if (PtrInfo.V == 0)
+ PtrInfo = InferPointerInfo(Ptr, Offset);
+
+ MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(SV, Flags, SVOffset,
- MemVT.getStoreSize(), Alignment);
+ MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
+ TBAAInfo);
return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
}
SDValue
-SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, DebugLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset, EVT MemVT,
MachineMemOperand *MMO) {
@@ -3943,25 +4105,26 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
- const Value *SV, int SVOffset,
+ MachinePointerInfo PtrInfo,
bool isVolatile, bool isNonTemporal,
- unsigned Alignment) {
+ unsigned Alignment, const MDNode *TBAAInfo) {
SDValue Undef = getUNDEF(Ptr.getValueType());
return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
- SV, SVOffset, VT, isVolatile, isNonTemporal, Alignment);
+ PtrInfo, VT, isVolatile, isNonTemporal, Alignment, TBAAInfo);
}
-SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, EVT VT, DebugLoc dl,
+SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
SDValue Chain, SDValue Ptr,
- const Value *SV,
- int SVOffset, EVT MemVT,
+ MachinePointerInfo PtrInfo, EVT MemVT,
bool isVolatile, bool isNonTemporal,
- unsigned Alignment) {
+ unsigned Alignment, const MDNode *TBAAInfo) {
SDValue Undef = getUNDEF(Ptr.getValueType());
return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
- SV, SVOffset, MemVT, isVolatile, isNonTemporal, Alignment);
+ PtrInfo, MemVT, isVolatile, isNonTemporal, Alignment,
+ TBAAInfo);
}
+
SDValue
SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM) {
@@ -3969,33 +4132,32 @@ SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
"Load is already a indexed load!");
return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
- LD->getChain(), Base, Offset, LD->getSrcValue(),
- LD->getSrcValueOffset(), LD->getMemoryVT(),
+ LD->getChain(), Base, Offset, LD->getPointerInfo(),
+ LD->getMemoryVT(),
LD->isVolatile(), LD->isNonTemporal(), LD->getAlignment());
}
SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
- SDValue Ptr, const Value *SV, int SVOffset,
+ SDValue Ptr, MachinePointerInfo PtrInfo,
bool isVolatile, bool isNonTemporal,
- unsigned Alignment) {
+ unsigned Alignment, const MDNode *TBAAInfo) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(Val.getValueType());
- // Check if the memory reference references a frame index
- if (!SV)
- if (const FrameIndexSDNode *FI =
- dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
- SV = PseudoSourceValue::getFixedStack(FI->getIndex());
-
- MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOStore;
if (isVolatile)
Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
+
+ if (PtrInfo.V == 0)
+ PtrInfo = InferPointerInfo(Ptr);
+
+ MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(SV, Flags, SVOffset,
- Val.getValueType().getStoreSize(), Alignment);
+ MF.getMachineMemOperand(PtrInfo, Flags,
+ Val.getValueType().getStoreSize(), Alignment,
+ TBAAInfo);
return getStore(Chain, dl, Val, Ptr, MMO);
}
@@ -4024,27 +4186,26 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
}
SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
- SDValue Ptr, const Value *SV,
- int SVOffset, EVT SVT,
- bool isVolatile, bool isNonTemporal,
- unsigned Alignment) {
+ SDValue Ptr, MachinePointerInfo PtrInfo,
+ EVT SVT,bool isVolatile, bool isNonTemporal,
+ unsigned Alignment,
+ const MDNode *TBAAInfo) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(SVT);
- // Check if the memory reference references a frame index
- if (!SV)
- if (const FrameIndexSDNode *FI =
- dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
- SV = PseudoSourceValue::getFixedStack(FI->getIndex());
-
- MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOStore;
if (isVolatile)
Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
+
+ if (PtrInfo.V == 0)
+ PtrInfo = InferPointerInfo(Ptr);
+
+ MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(SV, Flags, SVOffset, SVT.getStoreSize(), Alignment);
+ MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
+ TBAAInfo);
return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
}
@@ -4170,7 +4331,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
SDNode *N;
SDVTList VTs = getVTList(VT);
- if (VT != MVT::Flag) {
+ if (VT != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
void *IP = 0;
@@ -4186,7 +4347,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
AllNodes.push_back(N);
#ifndef NDEBUG
- VerifyNode(N);
+ VerifySDNode(N);
#endif
return SDValue(N, 0);
}
@@ -4236,7 +4397,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
// Memoize the node unless it returns a flag.
SDNode *N;
- if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
+ if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
void *IP = 0;
@@ -4268,7 +4429,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
}
AllNodes.push_back(N);
#ifndef NDEBUG
- VerifyNode(N);
+ VerifySDNode(N);
#endif
return SDValue(N, 0);
}
@@ -4645,7 +4806,7 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
unsigned NumOps) {
// If an identical node already exists, use it.
void *IP = 0;
- if (VTs.VTs[VTs.NumVTs-1] != MVT::Flag) {
+ if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
@@ -4845,9 +5006,9 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
MachineSDNode *
SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
const SDValue *Ops, unsigned NumOps) {
- bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Flag;
+ bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
MachineSDNode *N;
- void *IP;
+ void *IP = 0;
if (DoCSE) {
FoldingSetNodeID ID;
@@ -4876,7 +5037,7 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
AllNodes.push_back(N);
#ifndef NDEBUG
- VerifyNode(N);
+ VerifyMachineNode(N);
#endif
return N;
}
@@ -4907,7 +5068,7 @@ SelectionDAG::getTargetInsertSubreg(int SRIdx, DebugLoc DL, EVT VT,
/// else return NULL.
SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
const SDValue *Ops, unsigned NumOps) {
- if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
+ if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
void *IP = 0;
@@ -5340,6 +5501,29 @@ void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
SD->setHasDebugValue(true);
}
+/// TransferDbgValues - Transfer SDDbgValues.
+void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
+ if (From == To || !From.getNode()->getHasDebugValue())
+ return;
+ SDNode *FromNode = From.getNode();
+ SDNode *ToNode = To.getNode();
+ SmallVector<SDDbgValue *, 2> &DVs = GetDbgValues(FromNode);
+ SmallVector<SDDbgValue *, 2> ClonedDVs;
+ for (SmallVector<SDDbgValue *, 2>::iterator I = DVs.begin(), E = DVs.end();
+ I != E; ++I) {
+ SDDbgValue *Dbg = *I;
+ if (Dbg->getKind() == SDDbgValue::SDNODE) {
+ SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
+ Dbg->getOffset(), Dbg->getDebugLoc(),
+ Dbg->getOrder());
+ ClonedDVs.push_back(Clone);
+ }
+ }
+ for (SmallVector<SDDbgValue *, 2>::iterator I = ClonedDVs.begin(),
+ E = ClonedDVs.end(); I != E; ++I)
+ AddDbgValue(*I, ToNode, false);
+}
+
//===----------------------------------------------------------------------===//
// SDNode Class
//===----------------------------------------------------------------------===//
@@ -5367,7 +5551,7 @@ MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
}
MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps, EVT memvt,
+ const SDValue *Ops, unsigned NumOps, EVT memvt,
MachineMemOperand *mmo)
: SDNode(Opc, dl, VTs, Ops, NumOps),
MemoryVT(memvt), MMO(mmo) {
@@ -5386,7 +5570,7 @@ void SDNode::Profile(FoldingSetNodeID &ID) const {
namespace {
struct EVTArray {
std::vector<EVT> VTs;
-
+
EVTArray() {
VTs.reserve(MVT::LAST_VALUETYPE);
for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
@@ -5406,7 +5590,7 @@ const EVT *SDNode::getValueTypeList(EVT VT) {
sys::SmartScopedLock<true> Lock(*VTMutex);
return &(*EVTs->insert(VT).first);
} else {
- assert(VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Value type out of range!");
return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
}
@@ -5478,9 +5662,9 @@ bool SDNode::isOperandOf(SDNode *N) const {
/// reachesChainWithoutSideEffects - Return true if this operand (which must
/// be a chain) reaches the specified operand without crossing any
-/// side-effecting instructions. In practice, this looks through token
-/// factors and non-volatile loads. In order to remain efficient, this only
-/// looks a couple of nodes in, it does not do an exhaustive search.
+/// side-effecting instructions on any chain path. In practice, this looks
+/// through token factors and non-volatile loads. In order to remain efficient,
+/// this only looks a couple of nodes in, it does not do an exhaustive search.
bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
unsigned Depth) const {
if (*this == Dest) return true;
@@ -5490,12 +5674,12 @@ bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
if (Depth == 0) return false;
// If this is a token factor, all inputs to the TF happen in parallel. If any
- // of the operands of the TF reach dest, then we can do the xform.
+ // of the operands of the TF does not reach dest, then we cannot do the xform.
if (getOpcode() == ISD::TokenFactor) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
- return true;
- return false;
+ if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
+ return false;
+ return true;
}
// Loads don't have side effects, look through them.
@@ -5600,6 +5784,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::EH_RETURN: return "EH_RETURN";
case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP";
case ISD::EH_SJLJ_LONGJMP: return "EH_SJLJ_LONGJMP";
+ case ISD::EH_SJLJ_DISPATCHSETUP: return "EH_SJLJ_DISPATCHSETUP";
case ISD::ConstantPool: return "ConstantPool";
case ISD::ExternalSymbol: return "ExternalSymbol";
case ISD::BlockAddress: return "BlockAddress";
@@ -5690,6 +5875,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::INSERT_VECTOR_ELT: return "insert_vector_elt";
case ISD::EXTRACT_VECTOR_ELT: return "extract_vector_elt";
case ISD::CONCAT_VECTORS: return "concat_vectors";
+ case ISD::INSERT_SUBVECTOR: return "insert_subvector";
case ISD::EXTRACT_SUBVECTOR: return "extract_subvector";
case ISD::SCALAR_TO_VECTOR: return "scalar_to_vector";
case ISD::VECTOR_SHUFFLE: return "vector_shuffle";
@@ -5723,7 +5909,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::UINT_TO_FP: return "uint_to_fp";
case ISD::FP_TO_SINT: return "fp_to_sint";
case ISD::FP_TO_UINT: return "fp_to_uint";
- case ISD::BIT_CONVERT: return "bit_convert";
+ case ISD::BITCAST: return "bit_convert";
case ISD::FP16_TO_FP32: return "fp16_to_fp32";
case ISD::FP32_TO_FP16: return "fp32_to_fp16";
@@ -5935,12 +6121,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << LBB->getName() << " ";
OS << (const void*)BBDN->getBasicBlock() << ">";
} else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
- if (G && R->getReg() &&
- TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
- OS << " %" << G->getTarget().getRegisterInfo()->getName(R->getReg());
- } else {
- OS << " %reg" << R->getReg();
- }
+ OS << ' ' << PrintReg(R->getReg(), G ? G->getTarget().getRegisterInfo() :0);
} else if (const ExternalSymbolSDNode *ES =
dyn_cast<ExternalSymbolSDNode>(this)) {
OS << "'" << ES->getSymbol() << "'";
@@ -5986,7 +6167,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
const char *AM = getIndexedModeName(ST->getAddressingMode());
if (*AM)
OS << ", " << AM;
-
+
OS << ">";
} else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
OS << "<" << *M->getMemOperand() << ">";
@@ -6037,7 +6218,7 @@ void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
const SelectionDAG *G, unsigned depth,
- unsigned indent)
+ unsigned indent)
{
if (depth == 0)
return;
@@ -6058,7 +6239,7 @@ static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
void SDNode::printrWithDepth(raw_ostream &OS, const SelectionDAG *G,
unsigned depth) const {
printrWithDepthHelper(OS, this, G, depth, 0);
-}
+}
void SDNode::printrFull(raw_ostream &OS, const SelectionDAG *G) const {
// Don't print impossibly deep things.
@@ -6072,7 +6253,7 @@ void SDNode::dumprWithDepth(const SelectionDAG *G, unsigned depth) const {
void SDNode::dumprFull(const SelectionDAG *G) const {
// Don't print impossibly deep things.
dumprWithDepth(G, 100);
-}
+}
static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
@@ -6156,10 +6337,10 @@ SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
}
-/// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
-/// location that is 'Dist' units away from the location that the 'Base' load
+/// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
+/// location that is 'Dist' units away from the location that the 'Base' load
/// is loading from.
-bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
+bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
unsigned Bytes, int Dist) const {
if (LD->getChain() != Base->getChain())
return false;
@@ -6180,11 +6361,11 @@ bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
if (FS != BFS || FS != (int)Bytes) return false;
return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
}
- if (Loc.getOpcode() == ISD::ADD && Loc.getOperand(0) == BaseLoc) {
- ConstantSDNode *V = dyn_cast<ConstantSDNode>(Loc.getOperand(1));
- if (V && (V->getSExtValue() == Dist*Bytes))
- return true;
- }
+
+ // Handle X+C
+ if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
+ cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
+ return true;
const GlobalValue *GV1 = NULL;
const GlobalValue *GV2 = NULL;
@@ -6225,15 +6406,14 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
int64_t FrameOffset = 0;
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
FrameIdx = FI->getIndex();
- } else if (Ptr.getOpcode() == ISD::ADD &&
- isa<ConstantSDNode>(Ptr.getOperand(1)) &&
+ } else if (isBaseWithConstantOffset(Ptr) &&
isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
+ // Handle FI+Cst
FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
FrameOffset = Ptr.getConstantOperandVal(1);
}
if (FrameIdx != (1 << 31)) {
- // FIXME: Handle FI+CST.
const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
FrameOffset);
@@ -6354,7 +6534,7 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
if (OpVal.getOpcode() == ISD::UNDEF)
SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
- SplatValue |= APInt(CN->getAPIntValue()).zextOrTrunc(EltBitSize).
+ SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
zextOrTrunc(sz) << BitPos;
else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
@@ -6369,10 +6549,10 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
while (sz > 8) {
unsigned HalfSize = sz / 2;
- APInt HighValue = APInt(SplatValue).lshr(HalfSize).trunc(HalfSize);
- APInt LowValue = APInt(SplatValue).trunc(HalfSize);
- APInt HighUndef = APInt(SplatUndef).lshr(HalfSize).trunc(HalfSize);
- APInt LowUndef = APInt(SplatUndef).trunc(HalfSize);
+ APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
+ APInt LowValue = SplatValue.trunc(HalfSize);
+ APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
+ APInt LowUndef = SplatUndef.trunc(HalfSize);
// If the two halves do not match (ignoring undef bits), stop here.
if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
@@ -6412,7 +6592,7 @@ static void checkForCyclesHelper(const SDNode *N,
// If this node has already been checked, don't check it again.
if (Checked.count(N))
return;
-
+
// If a node has already been visited on this depth-first walk, reject it as
// a cycle.
if (!Visited.insert(N)) {
@@ -6421,10 +6601,10 @@ static void checkForCyclesHelper(const SDNode *N,
errs() << "Detected cycle in SelectionDAG\n";
abort();
}
-
+
for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
-
+
Checked.insert(N);
Visited.erase(N);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index e657445..452f561 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -15,6 +15,7 @@
#include "SDNodeDbgValue.h"
#include "SelectionDAGBuilder.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
@@ -43,9 +44,8 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
@@ -70,10 +70,28 @@ LimitFPPrecision("limit-float-precision",
cl::location(LimitFloatPrecision),
cl::init(0));
+// Limit the width of DAG chains. This is important in general to prevent
+// prevent DAG-based analysis from blowing up. For example, alias analysis and
+// load clustering may not complete in reasonable time. It is difficult to
+// recognize and avoid this situation within each individual analysis, and
+// future analyses are likely to have the same behavior. Limiting DAG width is
+// the safe approach, and will be especially important with global DAGs.
+//
+// MaxParallelChains default is arbitrarily high to avoid affecting
+// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
+// sequence over this should have been converted to llvm.memcpy by the
+// frontend. It easy to induce this behavior with .ll code such as:
+// %buffer = alloca [4096 x i8]
+// %data = load [4096 x i8]* %argPtr
+// store [4096 x i8] %data, [4096 x i8]* %buffer
+static cl::opt<unsigned>
+MaxParallelChains("dag-chain-limit", cl::desc("Max parallel isel dag chains"),
+ cl::init(64), cl::Hidden);
+
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
const SDValue *Parts, unsigned NumParts,
EVT PartVT, EVT ValueVT);
-
+
/// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type
/// larger then ValueVT then AssertOp can be used to specify whether the extra
@@ -85,7 +103,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
ISD::NodeType AssertOp = ISD::DELETED_NODE) {
if (ValueVT.isVector())
return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT);
-
+
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Val = Parts[0];
@@ -112,8 +130,8 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
RoundParts / 2, PartVT, HalfVT);
} else {
- Lo = DAG.getNode(ISD::BIT_CONVERT, DL, HalfVT, Parts[0]);
- Hi = DAG.getNode(ISD::BIT_CONVERT, DL, HalfVT, Parts[1]);
+ Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
+ Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
}
if (TLI.isBigEndian())
@@ -145,8 +163,8 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
"Unexpected split");
SDValue Lo, Hi;
- Lo = DAG.getNode(ISD::BIT_CONVERT, DL, EVT(MVT::f64), Parts[0]);
- Hi = DAG.getNode(ISD::BIT_CONVERT, DL, EVT(MVT::f64), Parts[1]);
+ Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
+ Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
if (TLI.isBigEndian())
std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
@@ -188,7 +206,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
}
if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
- return DAG.getNode(ISD::BIT_CONVERT, DL, ValueVT, Val);
+ return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
llvm_unreachable("Unknown mismatch!");
return SDValue();
@@ -206,7 +224,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Val = Parts[0];
-
+
// Handle a multi-element vector.
if (NumParts > 1) {
EVT IntermediateVT, RegisterVT;
@@ -219,7 +237,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
assert(RegisterVT == Parts[0].getValueType() &&
"Part type doesn't match part!");
-
+
// Assemble the parts into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
if (NumIntermediates == NumParts) {
@@ -238,20 +256,20 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
PartVT, IntermediateVT);
}
-
+
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
// intermediate operands.
Val = DAG.getNode(IntermediateVT.isVector() ?
ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, DL,
ValueVT, &Ops[0], NumIntermediates);
}
-
+
// There is now one part, held in Val. Correct it to match ValueVT.
PartVT = Val.getValueType();
-
+
if (PartVT == ValueVT)
return Val;
-
+
if (PartVT.isVector()) {
// If the element type of the source/dest vectors are the same, but the
// parts vector has more elements than the value vector, then we have a
@@ -262,12 +280,12 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
"Cannot narrow, it would be a lossy transformation");
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
DAG.getIntPtrConstant(0));
- }
-
+ }
+
// Vector/Vector bitcast.
- return DAG.getNode(ISD::BIT_CONVERT, DL, ValueVT, Val);
+ return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
}
-
+
assert(ValueVT.getVectorElementType() == PartVT &&
ValueVT.getVectorNumElements() == 1 &&
"Only trivial scalar-to-vector conversions should get here!");
@@ -280,7 +298,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc dl,
SDValue Val, SDValue *Parts, unsigned NumParts,
EVT PartVT);
-
+
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
/// integers, ExtendKind can be used to specify how to generate the extra bits.
@@ -289,11 +307,11 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
EVT PartVT,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
EVT ValueVT = Val.getValueType();
-
+
// Handle the vector case separately.
if (ValueVT.isVector())
return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT);
-
+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned PartBits = PartVT.getSizeInBits();
unsigned OrigNumParts = NumParts;
@@ -316,14 +334,14 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
} else {
assert(PartVT.isInteger() && ValueVT.isInteger() &&
- "Unknown mismatch!");
+ "Unknown mismatch!");
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
}
} else if (PartBits == ValueVT.getSizeInBits()) {
// Different types of the same size.
assert(NumParts == 1 && PartVT != ValueVT);
- Val = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Val);
+ Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
} else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
// If the parts cover less bits than value has, truncate the value.
assert(PartVT.isInteger() && ValueVT.isInteger() &&
@@ -366,7 +384,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
// The number of parts is a power of 2. Repeatedly bisect the value using
// EXTRACT_ELEMENT.
- Parts[0] = DAG.getNode(ISD::BIT_CONVERT, DL,
+ Parts[0] = DAG.getNode(ISD::BITCAST, DL,
EVT::getIntegerVT(*DAG.getContext(),
ValueVT.getSizeInBits()),
Val);
@@ -384,8 +402,8 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
ThisVT, Part0, DAG.getIntPtrConstant(0));
if (ThisBits == PartBits && ThisVT != PartVT) {
- Part0 = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Part0);
- Part1 = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Part1);
+ Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
+ Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
}
}
}
@@ -403,13 +421,13 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
EVT ValueVT = Val.getValueType();
assert(ValueVT.isVector() && "Not a vector");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-
+
if (NumParts == 1) {
if (PartVT == ValueVT) {
// Nothing to do.
} else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
// Bitconvert vector->vector case.
- Val = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Val);
+ Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
} else if (PartVT.isVector() &&
PartVT.getVectorElementType() == ValueVT.getVectorElementType()&&
PartVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
@@ -420,7 +438,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
ElementVT, Val, DAG.getIntPtrConstant(i)));
-
+
for (unsigned i = ValueVT.getVectorNumElements(),
e = PartVT.getVectorNumElements(); i != e; ++i)
Ops.push_back(DAG.getUNDEF(ElementVT));
@@ -428,7 +446,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, &Ops[0], Ops.size());
// FIXME: Use CONCAT for 2x -> 4x.
-
+
//SDValue UndefElts = DAG.getUNDEF(VectorTy);
//Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
} else {
@@ -439,11 +457,11 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
PartVT, Val, DAG.getIntPtrConstant(0));
}
-
+
Parts[0] = Val;
return;
}
-
+
// Handle a multi-element vector.
EVT IntermediateVT, RegisterVT;
unsigned NumIntermediates;
@@ -451,11 +469,11 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
IntermediateVT,
NumIntermediates, RegisterVT);
unsigned NumElements = ValueVT.getVectorNumElements();
-
+
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
-
+
// Split the vector into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i) {
@@ -467,7 +485,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
IntermediateVT, Val, DAG.getIntPtrConstant(i));
}
-
+
// Split the intermediate operands into legal parts.
if (NumParts == NumIntermediates) {
// If the register was not expanded, promote or copy the value,
@@ -618,48 +636,49 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
}
Chain = P.getValue(1);
+ Parts[i] = P;
// If the source register was virtual and if we know something about it,
// add an assert node.
- if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
- RegisterVT.isInteger() && !RegisterVT.isVector()) {
- unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
- if (FuncInfo.LiveOutRegInfo.size() > SlotNo) {
- const FunctionLoweringInfo::LiveOutInfo &LOI =
- FuncInfo.LiveOutRegInfo[SlotNo];
-
- unsigned RegSize = RegisterVT.getSizeInBits();
- unsigned NumSignBits = LOI.NumSignBits;
- unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
-
- // FIXME: We capture more information than the dag can represent. For
- // now, just use the tightest assertzext/assertsext possible.
- bool isSExt = true;
- EVT FromVT(MVT::Other);
- if (NumSignBits == RegSize)
- isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
- else if (NumZeroBits >= RegSize-1)
- isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
- else if (NumSignBits > RegSize-8)
- isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
- else if (NumZeroBits >= RegSize-8)
- isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
- else if (NumSignBits > RegSize-16)
- isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
- else if (NumZeroBits >= RegSize-16)
- isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
- else if (NumSignBits > RegSize-32)
- isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
- else if (NumZeroBits >= RegSize-32)
- isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
-
- if (FromVT != MVT::Other)
- P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
- RegisterVT, P, DAG.getValueType(FromVT));
- }
- }
+ if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
+ !RegisterVT.isInteger() || RegisterVT.isVector() ||
+ !FuncInfo.LiveOutRegInfo.inBounds(Regs[Part+i]))
+ continue;
+
+ const FunctionLoweringInfo::LiveOutInfo &LOI =
+ FuncInfo.LiveOutRegInfo[Regs[Part+i]];
+
+ unsigned RegSize = RegisterVT.getSizeInBits();
+ unsigned NumSignBits = LOI.NumSignBits;
+ unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
+
+ // FIXME: We capture more information than the dag can represent. For
+ // now, just use the tightest assertzext/assertsext possible.
+ bool isSExt = true;
+ EVT FromVT(MVT::Other);
+ if (NumSignBits == RegSize)
+ isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
+ else if (NumZeroBits >= RegSize-1)
+ isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
+ else if (NumSignBits > RegSize-8)
+ isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
+ else if (NumZeroBits >= RegSize-8)
+ isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
+ else if (NumSignBits > RegSize-16)
+ isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
+ else if (NumZeroBits >= RegSize-16)
+ isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
+ else if (NumSignBits > RegSize-32)
+ isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
+ else if (NumZeroBits >= RegSize-32)
+ isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
+ else
+ continue;
- Parts[i] = P;
+ // Add an assertion node.
+ assert(FromVT != MVT::Other);
+ Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
+ RegisterVT, P, DAG.getValueType(FromVT));
}
Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
@@ -889,11 +908,8 @@ void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
Val.getResNo(), Offset, dl, DbgSDNodeOrder);
DAG.AddDbgValue(SDV, Val.getNode(), false);
}
- } else {
- SDV = DAG.getDbgValue(Variable, UndefValue::get(V->getType()),
- Offset, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, 0, false);
- }
+ } else
+ DEBUG(dbgs() << "Dropping debug info for " << DI);
DanglingDebugInfoMap[V] = DanglingDebugInfo();
}
}
@@ -913,7 +929,9 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) {
unsigned InReg = It->second;
RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
SDValue Chain = DAG.getEntryNode();
- return N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain,NULL);
+ N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain,NULL);
+ resolveDanglingDebugInfo(V, N);
+ return N;
}
// Otherwise create a new SDValue and remember it.
@@ -1088,7 +1106,8 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
Chains[i] =
DAG.getStore(Chain, getCurDebugLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + i),
- Add, NULL, Offsets[i], false, false, 0);
+ // FIXME: better loc info would be nice.
+ Add, MachinePointerInfo(), false, false, 0);
}
Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
@@ -1347,7 +1366,7 @@ SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
return false;
}
-
+
return true;
}
@@ -1383,6 +1402,7 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
// If this is a series of conditions that are or'd or and'd together, emit
// this as a sequence of branches instead of setcc's with and/or operations.
+ // As long as jumps are not expensive, this should improve performance.
// For example, instead of something like:
// cmp A, B
// C = seteq
@@ -1397,7 +1417,8 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
// jle foo
//
if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
- if (BOp->hasOneUse() &&
+ if (!TLI.isJumpExpensive() &&
+ BOp->hasOneUse() &&
(BOp->getOpcode() == Instruction::And ||
BOp->getOpcode() == Instruction::Or)) {
FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
@@ -1502,10 +1523,11 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
MVT::Other, getControlRoot(), Cond,
DAG.getBasicBlock(CB.TrueBB));
- // Insert the false branch.
- if (CB.FalseBB != NextBlock)
- BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
- DAG.getBasicBlock(CB.FalseBB));
+ // Insert the false branch. Do this even if it's a fall through branch,
+ // this makes it easier to do DAG optimizations which require inverting
+ // the branch condition.
+ BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
+ DAG.getBasicBlock(CB.FalseBB));
DAG.setRoot(BrCond);
}
@@ -1592,12 +1614,28 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
Sub, DAG.getConstant(B.Range, VT),
ISD::SETUGT);
- SDValue ShiftOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(),
- TLI.getPointerTy());
+ // Determine the type of the test operands.
+ bool UsePtrType = false;
+ if (!TLI.isTypeLegal(VT))
+ UsePtrType = true;
+ else {
+ for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
+ if ((uint64_t)((int64_t)B.Cases[i].Mask >> VT.getSizeInBits()) + 1 >= 2) {
+ // Switch table case range are encoded into series of masks.
+ // Just use pointer type, it's guaranteed to fit.
+ UsePtrType = true;
+ break;
+ }
+ }
+ if (UsePtrType) {
+ VT = TLI.getPointerTy();
+ Sub = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), VT);
+ }
- B.Reg = FuncInfo.CreateReg(TLI.getPointerTy());
+ B.RegVT = VT;
+ B.Reg = FuncInfo.CreateReg(VT);
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
- B.Reg, ShiftOp);
+ B.Reg, Sub);
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
@@ -1623,36 +1661,34 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
}
/// visitBitTestCase - this function produces one "bit test"
-void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
+void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
+ MachineBasicBlock* NextMBB,
unsigned Reg,
BitTestCase &B,
MachineBasicBlock *SwitchBB) {
- SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
- TLI.getPointerTy());
+ EVT VT = BB.RegVT;
+ SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
+ Reg, VT);
SDValue Cmp;
if (CountPopulation_64(B.Mask) == 1) {
// Testing for a single bit; just compare the shift count with what it
// would need to be to shift a 1 bit in that position.
Cmp = DAG.getSetCC(getCurDebugLoc(),
- TLI.getSetCCResultType(ShiftOp.getValueType()),
+ TLI.getSetCCResultType(VT),
ShiftOp,
- DAG.getConstant(CountTrailingZeros_64(B.Mask),
- TLI.getPointerTy()),
+ DAG.getConstant(CountTrailingZeros_64(B.Mask), VT),
ISD::SETEQ);
} else {
// Make desired shift
- SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
- TLI.getPointerTy(),
- DAG.getConstant(1, TLI.getPointerTy()),
- ShiftOp);
+ SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(), VT,
+ DAG.getConstant(1, VT), ShiftOp);
// Emit bit tests and jumps
SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
- TLI.getPointerTy(), SwitchVal,
- DAG.getConstant(B.Mask, TLI.getPointerTy()));
+ VT, SwitchVal, DAG.getConstant(B.Mask, VT));
Cmp = DAG.getSetCC(getCurDebugLoc(),
- TLI.getSetCCResultType(AndOp.getValueType()),
- AndOp, DAG.getConstant(0, TLI.getPointerTy()),
+ TLI.getSetCCResultType(VT),
+ AndOp, DAG.getConstant(0, VT),
ISD::SETNE);
}
@@ -1732,10 +1768,56 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
- // TODO: If any two of the cases has the same destination, and if one value
+ // If any two of the cases has the same destination, and if one value
// is the same as the other, but has one bit unset that the other has set,
// use bit manipulation to do two compares at once. For example:
// "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
+ // TODO: This could be extended to merge any 2 cases in switches with 3 cases.
+ // TODO: Handle cases where CR.CaseBB != SwitchBB.
+ if (Size == 2 && CR.CaseBB == SwitchBB) {
+ Case &Small = *CR.Range.first;
+ Case &Big = *(CR.Range.second-1);
+
+ if (Small.Low == Small.High && Big.Low == Big.High && Small.BB == Big.BB) {
+ const APInt& SmallValue = cast<ConstantInt>(Small.Low)->getValue();
+ const APInt& BigValue = cast<ConstantInt>(Big.Low)->getValue();
+
+ // Check that there is only one bit different.
+ if (BigValue.countPopulation() == SmallValue.countPopulation() + 1 &&
+ (SmallValue | BigValue) == BigValue) {
+ // Isolate the common bit.
+ APInt CommonBit = BigValue & ~SmallValue;
+ assert((SmallValue | CommonBit) == BigValue &&
+ CommonBit.countPopulation() == 1 && "Not a common bit?");
+
+ SDValue CondLHS = getValue(SV);
+ EVT VT = CondLHS.getValueType();
+ DebugLoc DL = getCurDebugLoc();
+
+ SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
+ DAG.getConstant(CommonBit, VT));
+ SDValue Cond = DAG.getSetCC(DL, MVT::i1,
+ Or, DAG.getConstant(BigValue, VT),
+ ISD::SETEQ);
+
+ // Update successor info.
+ SwitchBB->addSuccessor(Small.BB);
+ SwitchBB->addSuccessor(Default);
+
+ // Insert the true branch.
+ SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other,
+ getControlRoot(), Cond,
+ DAG.getBasicBlock(Small.BB));
+
+ // Insert the false branch.
+ BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
+ DAG.getBasicBlock(Default));
+
+ DAG.setRoot(BrCond);
+ return true;
+ }
+ }
+ }
// Rearrange the case blocks so that the last one falls through if possible.
if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
@@ -1800,9 +1882,8 @@ static inline bool areJTsAllowed(const TargetLowering &TLI) {
}
static APInt ComputeRange(const APInt &First, const APInt &Last) {
- APInt LastExt(Last), FirstExt(First);
uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
- LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
+ APInt LastExt = Last.sext(BitWidth), FirstExt = First.sext(BitWidth);
return (LastExt - FirstExt + 1ULL);
}
@@ -2151,7 +2232,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
}
BitTestBlock BTB(lowBound, cmpRange, SV,
- -1U, (CR.CaseBB == SwitchBB),
+ -1U, MVT::Other, (CR.CaseBB == SwitchBB),
CR.CaseBB, Default, BTC);
if (CR.CaseBB == SwitchBB)
@@ -2180,7 +2261,8 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
if (Cases.size() >= 2)
// Must recompute end() each iteration because it may be
// invalidated by erase if we hold on to it
- for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
+ for (CaseItr I = Cases.begin(), J = llvm::next(Cases.begin());
+ J != Cases.end(); ) {
const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
MachineBasicBlock* nextBB = J->BB;
@@ -2205,6 +2287,19 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
return numCmps;
}
+void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
+ MachineBasicBlock *Last) {
+ // Update JTCases.
+ for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
+ if (JTCases[i].first.HeaderBB == First)
+ JTCases[i].first.HeaderBB = Last;
+
+ // Update BitTestCases.
+ for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
+ if (BitTestCases[i].Parent == First)
+ BitTestCases[i].Parent = Last;
+}
+
void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
@@ -2292,30 +2387,14 @@ void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
void SelectionDAGBuilder::visitFSub(const User &I) {
// -0.0 - X --> fneg
const Type *Ty = I.getType();
- if (Ty->isVectorTy()) {
- if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
- const VectorType *DestTy = cast<VectorType>(I.getType());
- const Type *ElTy = DestTy->getElementType();
- unsigned VL = DestTy->getNumElements();
- std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
- Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
- if (CV == CNZ) {
- SDValue Op2 = getValue(I.getOperand(1));
- setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
- Op2.getValueType(), Op2));
- return;
- }
- }
+ if (isa<Constant>(I.getOperand(0)) &&
+ I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
+ SDValue Op2 = getValue(I.getOperand(1));
+ setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
+ Op2.getValueType(), Op2));
+ return;
}
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
- if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
- SDValue Op2 = getValue(I.getOperand(1));
- setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
- Op2.getValueType(), Op2));
- return;
- }
-
visitBinary(I, ISD::FSUB);
}
@@ -2329,31 +2408,29 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
- if (!I.getType()->isVectorTy() &&
- Op2.getValueType() != TLI.getShiftAmountTy()) {
+
+ MVT ShiftTy = TLI.getShiftAmountTy();
+
+ // Coerce the shift amount to the right type if we can.
+ if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
+ unsigned ShiftSize = ShiftTy.getSizeInBits();
+ unsigned Op2Size = Op2.getValueType().getSizeInBits();
+ DebugLoc DL = getCurDebugLoc();
+
// If the operand is smaller than the shift count type, promote it.
- EVT PTy = TLI.getPointerTy();
- EVT STy = TLI.getShiftAmountTy();
- if (STy.bitsGT(Op2.getValueType()))
- Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
- TLI.getShiftAmountTy(), Op2);
+ if (ShiftSize > Op2Size)
+ Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
+
// If the operand is larger than the shift count type but the shift
// count type has enough bits to represent any shift value, truncate
// it now. This is a common case and it exposes the truncate to
// optimization early.
- else if (STy.getSizeInBits() >=
- Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
- Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
- TLI.getShiftAmountTy(), Op2);
- // Otherwise we'll need to temporarily settle for some other
- // convenient type; type legalization will make adjustments as
- // needed.
- else if (PTy.bitsLT(Op2.getValueType()))
- Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
- TLI.getPointerTy(), Op2);
- else if (PTy.bitsGT(Op2.getValueType()))
- Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
- TLI.getPointerTy(), Op2);
+ else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
+ Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
+ // Otherwise we'll need to temporarily settle for some other convenient
+ // type. Type legalization will make adjustments once the shiftee is split.
+ else
+ Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
}
setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
@@ -2499,9 +2576,9 @@ void SelectionDAGBuilder::visitBitCast(const User &I) {
EVT DestVT = TLI.getValueType(I.getType());
// BitCast assures us that source and destination are the same size so this is
- // either a BIT_CONVERT or a no-op.
+ // either a BITCAST or a no-op.
if (DestVT != N.getValueType())
- setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
DestVT, N)); // convert types.
else
setValue(&I, N); // noop cast.
@@ -2650,7 +2727,7 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
} else {
StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
- StartIdx[Input] + MaskNumElts < SrcNumElts)
+ StartIdx[Input] + MaskNumElts <= SrcNumElts)
RangeUse[Input] = 1; // Extract from a multiple of the mask length.
}
}
@@ -2726,8 +2803,7 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
bool IntoUndef = isa<UndefValue>(Op0);
bool FromUndef = isa<UndefValue>(Op1);
- unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
- I.idx_begin(), I.idx_end());
+ unsigned LinearIndex = ComputeLinearIndex(AggTy, I.idx_begin(), I.idx_end());
SmallVector<EVT, 4> AggValueVTs;
ComputeValueVTs(TLI, AggTy, AggValueVTs);
@@ -2765,8 +2841,7 @@ void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
const Type *ValTy = I.getType();
bool OutOfUndef = isa<UndefValue>(Op0);
- unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
- I.idx_begin(), I.idx_end());
+ unsigned LinearIndex = ComputeLinearIndex(AggTy, I.idx_begin(), I.idx_end());
SmallVector<EVT, 4> ValValueVTs;
ComputeValueVTs(TLI, ValTy, ValValueVTs);
@@ -2884,7 +2959,7 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
// Handle alignment. If the requested alignment is less than or equal to
// the stack alignment, ignore it. If the size is greater than or equal to
// the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
- unsigned StackAlign = TM.getFrameInfo()->getStackAlignment();
+ unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
if (Align <= StackAlign)
Align = 0;
@@ -2920,6 +2995,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
bool isVolatile = I.isVolatile();
bool isNonTemporal = I.getMetadata("nontemporal") != 0;
unsigned Alignment = I.getAlignment();
+ const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
@@ -2930,10 +3006,11 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
SDValue Root;
bool ConstantMemory = false;
- if (I.isVolatile())
+ if (I.isVolatile() || NumValues > MaxParallelChains)
// Serialize volatile loads with other side effects.
Root = getRoot();
- else if (AA->pointsToConstantMemory(SV)) {
+ else if (AA->pointsToConstantMemory(
+ AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), TBAAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
Root = DAG.getEntryNode();
ConstantMemory = true;
@@ -2943,23 +3020,38 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
}
SmallVector<SDValue, 4> Values(NumValues);
- SmallVector<SDValue, 4> Chains(NumValues);
+ SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
+ NumValues));
EVT PtrVT = Ptr.getValueType();
- for (unsigned i = 0; i != NumValues; ++i) {
+ unsigned ChainI = 0;
+ for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
+ // Serializing loads here may result in excessive register pressure, and
+ // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
+ // could recover a bit by hoisting nodes upward in the chain by recognizing
+ // they are side-effect free or do not alias. The optimizer should really
+ // avoid this case by converting large object/array copies to llvm.memcpy
+ // (MaxParallelChains should always remain as failsafe).
+ if (ChainI == MaxParallelChains) {
+ assert(PendingLoads.empty() && "PendingLoads must be serialized first");
+ SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
+ MVT::Other, &Chains[0], ChainI);
+ Root = Chain;
+ ChainI = 0;
+ }
SDValue A = DAG.getNode(ISD::ADD, getCurDebugLoc(),
PtrVT, Ptr,
DAG.getConstant(Offsets[i], PtrVT));
SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
- A, SV, Offsets[i], isVolatile,
- isNonTemporal, Alignment);
+ A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
+ isNonTemporal, Alignment, TBAAInfo);
Values[i] = L;
- Chains[i] = L.getValue(1);
+ Chains[ChainI] = L.getValue(1);
}
if (!ConstantMemory) {
SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
- MVT::Other, &Chains[0], NumValues);
+ MVT::Other, &Chains[0], ChainI);
if (isVolatile)
DAG.setRoot(Chain);
else
@@ -2989,23 +3081,37 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
SDValue Ptr = getValue(PtrV);
SDValue Root = getRoot();
- SmallVector<SDValue, 4> Chains(NumValues);
+ SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
+ NumValues));
EVT PtrVT = Ptr.getValueType();
bool isVolatile = I.isVolatile();
bool isNonTemporal = I.getMetadata("nontemporal") != 0;
unsigned Alignment = I.getAlignment();
-
- for (unsigned i = 0; i != NumValues; ++i) {
+ const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
+
+ unsigned ChainI = 0;
+ for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
+ // See visitLoad comments.
+ if (ChainI == MaxParallelChains) {
+ SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
+ MVT::Other, &Chains[0], ChainI);
+ Root = Chain;
+ ChainI = 0;
+ }
SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, Ptr,
DAG.getConstant(Offsets[i], PtrVT));
- Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
- SDValue(Src.getNode(), Src.getResNo() + i),
- Add, PtrV, Offsets[i], isVolatile,
- isNonTemporal, Alignment);
- }
-
- DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
- MVT::Other, &Chains[0], NumValues));
+ SDValue St = DAG.getStore(Root, getCurDebugLoc(),
+ SDValue(Src.getNode(), Src.getResNo() + i),
+ Add, MachinePointerInfo(PtrV, Offsets[i]),
+ isVolatile, isNonTemporal, Alignment, TBAAInfo);
+ Chains[ChainI] = St;
+ }
+
+ SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
+ MVT::Other, &Chains[0], ChainI);
+ ++SDNodeOrder;
+ AssignOrderingToNode(StoreNode.getNode());
+ DAG.setRoot(StoreNode);
}
/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
@@ -3031,7 +3137,8 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
// Add the intrinsic ID as an integer operand if it's not a target intrinsic.
- if (!IsTgtIntrinsic)
+ if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
+ Info.opc == ISD::INTRINSIC_W_CHAIN)
Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
// Add all operands of the call to the operand list.
@@ -3062,7 +3169,8 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
// This is target intrinsic that touches memory
Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
VTs, &Ops[0], Ops.size(),
- Info.memVT, Info.ptrVal, Info.offset,
+ Info.memVT,
+ MachinePointerInfo(Info.ptrVal, Info.offset),
Info.align, Info.vol,
Info.readMem, Info.writeMem);
} else if (!HasChain) {
@@ -3087,7 +3195,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
if (!I.getType()->isVoidTy()) {
if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
EVT VT = TLI.getValueType(PTy);
- Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
+ Result = DAG.getNode(ISD::BITCAST, getCurDebugLoc(), VT, Result);
}
setValue(&I, Result);
@@ -3106,7 +3214,7 @@ GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
DAG.getConstant(0x007fffff, MVT::i32));
SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
DAG.getConstant(0x3f800000, MVT::i32));
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
}
/// GetExponent - Get the exponent:
@@ -3205,13 +3313,13 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f7f5e7e));
- SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
+ SDValue TwoToFracPartOfX = DAG.getNode(ISD::BITCAST, dl,MVT::i32, t5);
// Add the exponent into the result in integer domain.
SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
TwoToFracPartOfX, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
+ result = DAG.getNode(ISD::BITCAST, dl, MVT::f32, t6);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
//
@@ -3231,13 +3339,13 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3f7ff8fd));
- SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
+ SDValue TwoToFracPartOfX = DAG.getNode(ISD::BITCAST, dl,MVT::i32, t7);
// Add the exponent into the result in integer domain.
SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
TwoToFracPartOfX, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
+ result = DAG.getNode(ISD::BITCAST, dl, MVT::f32, t8);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18:
//
@@ -3269,14 +3377,14 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
getF32Constant(DAG, 0x3f800000));
- SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
+ SDValue TwoToFracPartOfX = DAG.getNode(ISD::BITCAST, dl,
MVT::i32, t13);
// Add the exponent into the result in integer domain.
SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
TwoToFracPartOfX, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
+ result = DAG.getNode(ISD::BITCAST, dl, MVT::f32, t14);
}
} else {
// No special expansion.
@@ -3298,7 +3406,7 @@ SelectionDAGBuilder::visitLog(const CallInst &I) {
if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op = getValue(I.getArgOperand(0));
- SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
+ SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
// Scale the exponent by log(2) [0.69314718f].
SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
@@ -3408,7 +3516,7 @@ SelectionDAGBuilder::visitLog2(const CallInst &I) {
if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op = getValue(I.getArgOperand(0));
- SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
+ SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
// Get the exponent.
SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
@@ -3517,7 +3625,7 @@ SelectionDAGBuilder::visitLog10(const CallInst &I) {
if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op = getValue(I.getArgOperand(0));
- SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
+ SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
// Scale the exponent by log10(2) [0.30102999f].
SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
@@ -3645,11 +3753,11 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f7f5e7e));
- SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
+ SDValue t6 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t5);
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
@@ -3670,11 +3778,11 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3f7ff8fd));
- SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
+ SDValue t8 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t7);
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18:
@@ -3706,11 +3814,11 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
getF32Constant(DAG, 0x3f800000));
- SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
+ SDValue t14 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t13);
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX);
}
} else {
@@ -3778,11 +3886,11 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f7f5e7e));
- SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
+ SDValue t6 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t5);
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
@@ -3803,11 +3911,11 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3f7ff8fd));
- SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
+ SDValue t8 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t7);
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18:
@@ -3839,11 +3947,11 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
getF32Constant(DAG, 0x3f800000));
- SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
+ SDValue t14 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t13);
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX);
}
} else {
@@ -3915,13 +4023,16 @@ static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
/// At the end of instruction selection, they will be inserted to the entry BB.
bool
SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
- int64_t Offset,
+ int64_t Offset,
const SDValue &N) {
const Argument *Arg = dyn_cast<Argument>(V);
if (!Arg)
return false;
MachineFunction &MF = DAG.getMachineFunction();
+ const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo();
+ const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
+
// Ignore inlined function arguments here.
DIVariable DV(Variable);
if (DV.isInlinedFnArgument(MF.getFunction()))
@@ -3935,14 +4046,16 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
if (Arg->hasByValAttr()) {
// Byval arguments' frame index is recorded during argument lowering.
// Use this info directly.
- const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
Reg = TRI->getFrameRegister(MF);
Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
+ // If byval argument ofset is not recorded then ignore this.
+ if (!Offset)
+ Reg = 0;
}
if (N.getNode() && N.getOpcode() == ISD::CopyFromReg) {
Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
- if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
MachineRegisterInfo &RegInfo = MF.getRegInfo();
unsigned PR = RegInfo.getLiveInPhysReg(Reg);
if (PR)
@@ -3951,13 +4064,25 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
}
if (!Reg) {
+ // Check if ValueMap has reg number.
DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
- if (VMI == FuncInfo.ValueMap.end())
- return false;
- Reg = VMI->second;
+ if (VMI != FuncInfo.ValueMap.end())
+ Reg = VMI->second;
}
- const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo();
+ if (!Reg && N.getNode()) {
+ // Check if frame index is available.
+ if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
+ if (FrameIndexSDNode *FINode =
+ dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) {
+ Reg = TRI->getFrameRegister(MF);
+ Offset = FINode->getIndex();
+ }
+ }
+
+ if (!Reg)
+ return false;
+
MachineInstrBuilder MIB = BuildMI(MF, getCurDebugLoc(),
TII->get(TargetOpcode::DBG_VALUE))
.addReg(Reg, RegState::Debug).addImm(Offset).addMetadata(Variable);
@@ -3966,9 +4091,11 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
}
// VisualStudio defines setjmp as _setjmp
-#if defined(_MSC_VER) && defined(setjmp)
-#define setjmp_undefined_for_visual_studio
-#undef setjmp
+#if defined(_MSC_VER) && defined(setjmp) && \
+ !defined(setjmp_undefined_for_msvc)
+# pragma push_macro("setjmp")
+# undef setjmp
+# define setjmp_undefined_for_msvc
#endif
/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
@@ -4013,7 +4140,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol, false,
- I.getArgOperand(0), 0, I.getArgOperand(1), 0));
+ MachinePointerInfo(I.getArgOperand(0)),
+ MachinePointerInfo(I.getArgOperand(1))));
return 0;
}
case Intrinsic::memset: {
@@ -4028,7 +4156,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
- I.getArgOperand(0), 0));
+ MachinePointerInfo(I.getArgOperand(0))));
return 0;
}
case Intrinsic::memmove: {
@@ -4044,22 +4172,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue Op3 = getValue(I.getArgOperand(2));
unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
-
- // If the source and destination are known to not be aliases, we can
- // lower memmove as memcpy.
- uint64_t Size = -1ULL;
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
- Size = C->getZExtValue();
- if (AA->alias(I.getArgOperand(0), Size, I.getArgOperand(1), Size) ==
- AliasAnalysis::NoAlias) {
- DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
- false, I.getArgOperand(0), 0,
- I.getArgOperand(1), 0));
- return 0;
- }
-
DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
- I.getArgOperand(0), 0, I.getArgOperand(1), 0));
+ MachinePointerInfo(I.getArgOperand(0)),
+ MachinePointerInfo(I.getArgOperand(1))));
return 0;
}
case Intrinsic::dbg_declare: {
@@ -4078,10 +4193,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Check if address has undef value.
if (isa<UndefValue>(Address) ||
(Address->use_empty() && !isa<Argument>(Address))) {
- SDDbgValue*SDV =
- DAG.getDbgValue(Variable, UndefValue::get(Address->getType()),
- 0, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, 0, false);
+ DEBUG(dbgs() << "Dropping debug info for " << DI);
return 0;
}
@@ -4092,7 +4204,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDDbgValue *SDV;
if (N.getNode()) {
// Parameters are handled specially.
- bool isParameter =
+ bool isParameter =
DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable;
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
Address = BCI->getOperand(0);
@@ -4104,25 +4216,40 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Byval parameter. We have a frame index at this point.
SDV = DAG.getDbgValue(Variable, FINode->getIndex(),
0, dl, SDNodeOrder);
- else
+ else {
// Can't do anything with other non-AI cases yet. This might be a
// parameter of a callee function that got inlined, for example.
+ DEBUG(dbgs() << "Dropping debug info for " << DI);
return 0;
+ }
} else if (AI)
SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(),
0, dl, SDNodeOrder);
- else
+ else {
// Can't do anything with other non-AI cases yet.
+ DEBUG(dbgs() << "Dropping debug info for " << DI);
return 0;
+ }
DAG.AddDbgValue(SDV, N.getNode(), isParameter);
} else {
- // If Address is an arugment then try to emits its dbg value using
- // virtual register info from the FuncInfo.ValueMap. Otherwise add undef
- // to help track missing debug info.
+ // If Address is an argument then try to emit its dbg value using
+ // virtual register info from the FuncInfo.ValueMap.
if (!EmitFuncArgumentDbgValue(Address, Variable, 0, N)) {
- SDV = DAG.getDbgValue(Variable, UndefValue::get(Address->getType()),
- 0, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, 0, false);
+ // If variable is pinned by a alloca in dominating bb then
+ // use StaticAllocaMap.
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
+ if (AI->getParent() != DI.getParent()) {
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ SDV = DAG.getDbgValue(Variable, SI->second,
+ 0, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, 0, false);
+ return 0;
+ }
+ }
+ }
+ DEBUG(dbgs() << "Dropping debug info for " << DI);
}
}
return 0;
@@ -4160,17 +4287,15 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
N.getResNo(), Offset, dl, SDNodeOrder);
DAG.AddDbgValue(SDV, N.getNode(), false);
}
- } else if (isa<PHINode>(V) && !V->use_empty() ) {
+ } else if (!V->use_empty() ) {
// Do not call getValue(V) yet, as we don't want to generate code.
// Remember it for later.
DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
DanglingDebugInfoMap[V] = DDI;
} else {
// We may expand this to cover more cases. One case where we have no
- // data available is an unreferenced parameter; we need this fallback.
- SDV = DAG.getDbgValue(Variable, UndefValue::get(V->getType()),
- Offset, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, 0, false);
+ // data available is an unreferenced parameter.
+ DEBUG(dbgs() << "Dropping debug info for " << DI);
}
}
@@ -4186,7 +4311,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
if (SI == FuncInfo.StaticAllocaMap.end())
return 0; // VLAs.
int FI = SI->second;
-
+
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
if (!DI.getDebugLoc().isUnknown() && MMI.hasDebugInfo())
MMI.setVariableDbgInfo(Variable, FI, DI.getDebugLoc());
@@ -4282,11 +4407,75 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::eh_sjlj_longjmp: {
DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, dl, MVT::Other,
- getRoot(),
- getValue(I.getArgOperand(0))));
+ getRoot(), getValue(I.getArgOperand(0))));
+ return 0;
+ }
+ case Intrinsic::eh_sjlj_dispatch_setup: {
+ DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other,
+ getRoot(), getValue(I.getArgOperand(0))));
return 0;
}
+ case Intrinsic::x86_mmx_pslli_w:
+ case Intrinsic::x86_mmx_pslli_d:
+ case Intrinsic::x86_mmx_pslli_q:
+ case Intrinsic::x86_mmx_psrli_w:
+ case Intrinsic::x86_mmx_psrli_d:
+ case Intrinsic::x86_mmx_psrli_q:
+ case Intrinsic::x86_mmx_psrai_w:
+ case Intrinsic::x86_mmx_psrai_d: {
+ SDValue ShAmt = getValue(I.getArgOperand(1));
+ if (isa<ConstantSDNode>(ShAmt)) {
+ visitTargetIntrinsic(I, Intrinsic);
+ return 0;
+ }
+ unsigned NewIntrinsic = 0;
+ EVT ShAmtVT = MVT::v2i32;
+ switch (Intrinsic) {
+ case Intrinsic::x86_mmx_pslli_w:
+ NewIntrinsic = Intrinsic::x86_mmx_psll_w;
+ break;
+ case Intrinsic::x86_mmx_pslli_d:
+ NewIntrinsic = Intrinsic::x86_mmx_psll_d;
+ break;
+ case Intrinsic::x86_mmx_pslli_q:
+ NewIntrinsic = Intrinsic::x86_mmx_psll_q;
+ break;
+ case Intrinsic::x86_mmx_psrli_w:
+ NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
+ break;
+ case Intrinsic::x86_mmx_psrli_d:
+ NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
+ break;
+ case Intrinsic::x86_mmx_psrli_q:
+ NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
+ break;
+ case Intrinsic::x86_mmx_psrai_w:
+ NewIntrinsic = Intrinsic::x86_mmx_psra_w;
+ break;
+ case Intrinsic::x86_mmx_psrai_d:
+ NewIntrinsic = Intrinsic::x86_mmx_psra_d;
+ break;
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
+ }
+
+ // The vector shift intrinsics with scalars uses 32b shift amounts but
+ // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
+ // to be zero.
+ // We must do this early because v2i32 is not a legal type.
+ DebugLoc dl = getCurDebugLoc();
+ SDValue ShOps[2];
+ ShOps[0] = ShAmt;
+ ShOps[1] = DAG.getConstant(0, MVT::i32);
+ ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
+ EVT DestVT = TLI.getValueType(I.getType());
+ ShAmt = DAG.getNode(ISD::BITCAST, dl, DestVT, ShAmt);
+ Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
+ DAG.getConstant(NewIntrinsic, MVT::i32),
+ getValue(I.getArgOperand(0)), ShAmt);
+ setValue(&I, Res);
+ return 0;
+ }
case Intrinsic::convertff:
case Intrinsic::convertfsi:
case Intrinsic::convertfui:
@@ -4430,8 +4619,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Store the stack protector onto the stack.
Res = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
- PseudoSourceValue::getFixedStack(FI),
- 0, true, false, 0);
+ MachinePointerInfo::getFixedStack(FI),
+ true, false, 0);
setValue(&I, Res);
DAG.setRoot(Res);
return 0;
@@ -4510,14 +4699,22 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::prefetch: {
SDValue Ops[4];
+ unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
Ops[0] = getRoot();
Ops[1] = getValue(I.getArgOperand(0));
Ops[2] = getValue(I.getArgOperand(1));
Ops[3] = getValue(I.getArgOperand(2));
- DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
+ DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, dl,
+ DAG.getVTList(MVT::Other),
+ &Ops[0], 4,
+ EVT::getIntegerVT(*Context, 8),
+ MachinePointerInfo(I.getArgOperand(0)),
+ 0, /* align */
+ false, /* volatile */
+ rw==0, /* read */
+ rw==1)); /* write */
return 0;
}
-
case Intrinsic::memory_barrier: {
SDValue Ops[6];
Ops[0] = getRoot();
@@ -4536,7 +4733,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)),
getValue(I.getArgOperand(2)),
- I.getArgOperand(0));
+ MachinePointerInfo(I.getArgOperand(0)));
setValue(&I, L);
DAG.setRoot(L.getValue(1));
return 0;
@@ -4599,6 +4796,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
FTy->isVarArg(), Outs, FTy->getContext());
SDValue DemoteStackSlot;
+ int DemoteStackIdx = -100;
if (!CanLowerReturn) {
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(
@@ -4606,10 +4804,10 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(
FTy->getReturnType());
MachineFunction &MF = DAG.getMachineFunction();
- int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
+ DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
const Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
- DemoteStackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
+ DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI.getPointerTy());
Entry.Node = DemoteStackSlot;
Entry.Ty = StackSlotPtrType;
Entry.isSExt = false;
@@ -4703,7 +4901,9 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
DemoteStackSlot,
DAG.getConstant(Offsets[i], PtrVT));
SDValue L = DAG.getLoad(Outs[i].VT, getCurDebugLoc(), Result.second,
- Add, NULL, Offsets[i], false, false, 1);
+ Add,
+ MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]),
+ false, false, 1);
Values[i] = L;
Chains[i] = L.getValue(1);
}
@@ -4711,7 +4911,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
MVT::Other, &Chains[0], NumValues);
PendingLoads.push_back(Chain);
-
+
// Collect the legal value parts into potentially illegal values
// that correspond to the original function's return values.
SmallVector<EVT, 4> RetTys;
@@ -4724,7 +4924,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
EVT VT = RetTys[I];
EVT RegisterVT = TLI.getRegisterType(RetTy->getContext(), VT);
unsigned NumRegs = TLI.getNumRegisters(RetTy->getContext(), VT);
-
+
SDValue ReturnValue =
getCopyFromParts(DAG, getCurDebugLoc(), &Values[CurReg], NumRegs,
RegisterVT, VT, AssertOp);
@@ -4806,7 +5006,7 @@ static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
SDValue Ptr = Builder.getValue(PtrVal);
SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurDebugLoc(), Root,
- Ptr, PtrVal /*SrcValue*/, 0/*SVOffset*/,
+ Ptr, MachinePointerInfo(PtrVal),
false /*volatile*/,
false /*nontemporal*/, 1 /* align=1 */);
@@ -4902,7 +5102,25 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
visitInlineAsm(&I);
return;
}
-
+
+ // See if any floating point values are being passed to this function. This is
+ // used to emit an undefined reference to fltused on Windows.
+ const FunctionType *FT =
+ cast<FunctionType>(I.getCalledValue()->getType()->getContainedType(0));
+ MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
+ if (FT->isVarArg() &&
+ !MMI.callsExternalVAFunctionWithFloatingPointArguments()) {
+ for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
+ const Type* T = I.getArgOperand(i)->getType();
+ for (po_iterator<const Type*> i = po_begin(T), e = po_end(T);
+ i != e; ++i) {
+ if (!i->isFloatingPointTy()) continue;
+ MMI.setCallsExternalVAFunctionWithFloatingPointArguments(true);
+ break;
+ }
+ }
+ }
+
const char *RenameFn = 0;
if (Function *F = I.getCalledFunction()) {
if (F->isDeclaration()) {
@@ -4980,7 +5198,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
}
}
}
-
+
SDValue Callee;
if (!RenameFn)
Callee = getValue(I.getCalledValue());
@@ -5008,7 +5226,7 @@ public:
/// contains the set of register corresponding to the operand.
RegsForValue AssignedRegs;
- explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
+ explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
: TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
}
@@ -5083,6 +5301,8 @@ private:
}
};
+typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
+
} // end llvm namespace.
/// isAllocatableRegister - If the specified register is safe to allocate,
@@ -5192,7 +5412,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
// vector types).
EVT RegVT = *PhysReg.second->vt_begin();
if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
- OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
+ OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT;
} else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
@@ -5202,7 +5422,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
// machine.
RegVT = EVT::getIntegerVT(Context,
OpInfo.ConstraintVT.getSizeInBits());
- OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
+ OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT;
}
@@ -5320,30 +5540,17 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
/// ConstraintOperands - Information about all of the constraints.
- std::vector<SDISelAsmOperandInfo> ConstraintOperands;
+ SDISelAsmOperandInfoVector ConstraintOperands;
std::set<unsigned> OutputRegs, InputRegs;
- // Do a prepass over the constraints, canonicalizing them, and building up the
- // ConstraintOperands list.
- std::vector<InlineAsm::ConstraintInfo>
- ConstraintInfos = IA->ParseConstraints();
-
- bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
-
- SDValue Chain, Flag;
-
- // We won't need to flush pending loads if this asm doesn't touch
- // memory and is nonvolatile.
- if (hasMemory || IA->hasSideEffects())
- Chain = getRoot();
- else
- Chain = DAG.getRoot();
+ TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(CS);
+ bool hasMemory = false;
unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
unsigned ResNo = 0; // ResNo - The result number of the next output.
- for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
- ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
+ for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
+ ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
EVT OpVT = MVT::Other;
@@ -5380,9 +5587,6 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// If this is an input or an indirect output, process the call argument.
// BasicBlocks are labels, currently appearing only in asm's.
if (OpInfo.CallOperandVal) {
- // Strip bitcasts, if any. This mostly comes up for functions.
- OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
-
if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
} else {
@@ -5393,11 +5597,33 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
OpInfo.ConstraintVT = OpVT;
+
+ // Indirect operand accesses access memory.
+ if (OpInfo.isIndirect)
+ hasMemory = true;
+ else {
+ for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) {
+ TargetLowering::ConstraintType CType = TLI.getConstraintType(OpInfo.Codes[j]);
+ if (CType == TargetLowering::C_Memory) {
+ hasMemory = true;
+ break;
+ }
+ }
+ }
}
+ SDValue Chain, Flag;
+
+ // We won't need to flush pending loads if this asm doesn't touch
+ // memory and is nonvolatile.
+ if (hasMemory || IA->hasSideEffects())
+ Chain = getRoot();
+ else
+ Chain = DAG.getRoot();
+
// Second pass over the constraints: compute which constraint option to use
// and assign registers to constraints that want a specific physreg.
- for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
+ for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
// If this is an output operand with a matching input operand, look up the
@@ -5406,7 +5632,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// error.
if (OpInfo.hasMatchingInput()) {
SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
-
+
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
if ((OpInfo.ConstraintVT.isInteger() !=
Input.ConstraintVT.isInteger()) ||
@@ -5427,7 +5653,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// need to to provide an address for the memory input.
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
!OpInfo.isIndirect) {
- assert(OpInfo.Type == InlineAsm::isInput &&
+ assert((OpInfo.isMultipleAlternative || (OpInfo.Type == InlineAsm::isInput)) &&
"Can only indirectify direct input operands!");
// Memory operands really want the address of the value. If we don't have
@@ -5451,7 +5677,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
Chain = DAG.getStore(Chain, getCurDebugLoc(),
- OpInfo.CallOperand, StackSlot, NULL, 0,
+ OpInfo.CallOperand, StackSlot,
+ MachinePointerInfo::getFixedStack(SSFI),
false, false, 0);
OpInfo.CallOperand = StackSlot;
}
@@ -5469,8 +5696,6 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
}
- ConstraintInfos.clear();
-
// Second pass - Loop over all of the operands, assigning virtual or physregs
// to register class operands.
for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
@@ -5495,9 +5720,14 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
- // Remember the AlignStack bit as operand 3.
- AsmNodeOperands.push_back(DAG.getTargetConstant(IA->isAlignStack() ? 1 : 0,
- MVT::i1));
+ // Remember the HasSideEffect and AlignStack bits as operand 3.
+ unsigned ExtraInfo = 0;
+ if (IA->hasSideEffects())
+ ExtraInfo |= InlineAsm::Extra_HasSideEffects;
+ if (IA->isAlignStack())
+ ExtraInfo |= InlineAsm::Extra_IsAlignStack;
+ AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo,
+ TLI.getPointerTy()));
// Loop over all of the inputs, copying the operand values into the
// appropriate registers and processing the output regs.
@@ -5588,7 +5818,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
" don't know how to handle tied "
"indirect register inputs");
}
-
+
RegsForValue MatchedRegs;
MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
@@ -5607,7 +5837,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
DAG, AsmNodeOperands);
break;
}
-
+
assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
"Unexpected number of operands");
@@ -5622,8 +5852,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
// Treat indirect 'X' constraint as memory.
- if (OpInfo.ConstraintType == TargetLowering::C_Other &&
- OpInfo.isIndirect)
+ if (OpInfo.ConstraintType == TargetLowering::C_Other &&
+ OpInfo.isIndirect)
OpInfo.ConstraintType = TargetLowering::C_Memory;
if (OpInfo.ConstraintType == TargetLowering::C_Other) {
@@ -5642,7 +5872,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
break;
}
-
+
if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
@@ -5693,7 +5923,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
- DAG.getVTList(MVT::Other, MVT::Flag),
+ DAG.getVTList(MVT::Other, MVT::Glue),
&AsmNodeOperands[0], AsmNodeOperands.size());
Flag = Chain.getValue(1);
@@ -5713,7 +5943,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// not have the same VT as was expected. Convert it to the right type
// with bit_convert.
if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
- Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
+ Val = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
ResultType, Val);
} else if (ResultType != Val.getValueType() &&
@@ -5751,7 +5981,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
SDValue Val = DAG.getStore(Chain, getCurDebugLoc(),
StoresToEmit[i].first,
getValue(StoresToEmit[i].second),
- StoresToEmit[i].second, 0,
+ MachinePointerInfo(StoresToEmit[i].second),
false, false, 0);
OutChains.push_back(Val);
}
@@ -5888,7 +6118,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags;
- MyFlags.VT = RegisterVT;
+ MyFlags.VT = RegisterVT.getSimpleVT();
MyFlags.Used = isReturnValueUsed;
if (RetSExt)
MyFlags.Flags.setSExt();
@@ -5924,7 +6154,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
assert(InVals[i].getNode() &&
"LowerCall emitted a null value!");
- assert(Ins[i].VT == InVals[i].getValueType() &&
+ assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
"LowerCall emitted a value with the wrong type!");
});
@@ -6085,7 +6315,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
assert(InVals[i].getNode() &&
"LowerFormalArguments emitted a null value!");
- assert(Ins[i].VT == InVals[i].getValueType() &&
+ assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
"LowerFormalArguments emitted a value with the wrong type!");
}
});
@@ -6154,7 +6384,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
// Note down frame index for byval arguments.
if (I->hasByValAttr() && !ArgValues.empty())
- if (FrameIndexSDNode *FI =
+ if (FrameIndexSDNode *FI =
dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
FuncInfo->setByValArgumentFrameIndex(I, FI->getIndex());
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 5f400e9..a1a70c3 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -258,15 +258,16 @@ private:
struct BitTestBlock {
BitTestBlock(APInt F, APInt R, const Value* SV,
- unsigned Rg, bool E,
+ unsigned Rg, EVT RgVT, bool E,
MachineBasicBlock* P, MachineBasicBlock* D,
const BitTestInfo& C):
- First(F), Range(R), SValue(SV), Reg(Rg), Emitted(E),
+ First(F), Range(R), SValue(SV), Reg(Rg), RegVT(RgVT), Emitted(E),
Parent(P), Default(D), Cases(C) { }
APInt First;
APInt Range;
const Value *SValue;
unsigned Reg;
+ EVT RegVT;
bool Emitted;
MachineBasicBlock *Parent;
MachineBasicBlock *Default;
@@ -347,7 +348,7 @@ public:
SDValue getControlRoot();
DebugLoc getCurDebugLoc() const { return CurDebugLoc; }
-
+ void setCurDebugLoc(DebugLoc dl){ CurDebugLoc = dl; }
unsigned getSDNodeOrder() const { return SDNodeOrder; }
void CopyValueToVirtualRegister(const Value *V, unsigned Reg);
@@ -398,6 +399,10 @@ public:
void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall,
MachineBasicBlock *LandingPad = NULL);
+ /// UpdateSplitBlock - When an MBB was split during scheduling, update the
+ /// references that ned to refer to the last resulting block.
+ void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last);
+
private:
// Terminator instructions.
void visitRet(const ReturnInst &I);
@@ -431,7 +436,8 @@ public:
void visitSwitchCase(CaseBlock &CB,
MachineBasicBlock *SwitchBB);
void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB);
- void visitBitTestCase(MachineBasicBlock* NextMBB,
+ void visitBitTestCase(BitTestBlock &BB,
+ MachineBasicBlock* NextMBB,
unsigned Reg,
BitTestCase &B,
MachineBasicBlock *SwitchBB);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 66cb5ce..62ebc81 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -43,6 +43,7 @@
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -53,8 +54,17 @@
using namespace llvm;
STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
+STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
+STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
+#ifndef NDEBUG
+STATISTIC(NumBBWithOutOfOrderLineInfo,
+ "Number of blocks with out of order line number info");
+STATISTIC(NumMBBWithOutOfOrderLineInfo,
+ "Number of machine blocks with out of order line number info");
+#endif
+
static cl::opt<bool>
EnableFastISelVerbose("fast-isel-verbose", cl::Hidden,
cl::desc("Enable verbose messages in the \"fast\" "
@@ -170,15 +180,18 @@ TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// SelectionDAGISel code
//===----------------------------------------------------------------------===//
-SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm, CodeGenOpt::Level OL) :
+SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm,
+ CodeGenOpt::Level OL) :
MachineFunctionPass(ID), TM(tm), TLI(*tm.getTargetLowering()),
FuncInfo(new FunctionLoweringInfo(TLI)),
CurDAG(new SelectionDAG(tm)),
SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
GFI(),
OptLevel(OL),
- DAGSize(0)
-{}
+ DAGSize(0) {
+ initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
+ initializeAliasAnalysisAnalysisGroup(*PassRegistry::getPassRegistry());
+ }
SelectionDAGISel::~SelectionDAGISel() {
delete SDB;
@@ -202,6 +215,7 @@ void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
static bool FunctionCallsSetJmp(const Function *F) {
const Module *M = F->getParent();
static const char *ReturnsTwiceFns[] = {
+ "_setjmp",
"setjmp",
"sigsetjmp",
"setjmp_syscall",
@@ -227,6 +241,44 @@ static bool FunctionCallsSetJmp(const Function *F) {
#undef NUM_RETURNS_TWICE_FNS
}
+/// SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that
+/// may trap on it. In this case we have to split the edge so that the path
+/// through the predecessor block that doesn't go to the phi block doesn't
+/// execute the possibly trapping instruction.
+///
+/// This is required for correctness, so it must be done at -O0.
+///
+static void SplitCriticalSideEffectEdges(Function &Fn, Pass *SDISel) {
+ // Loop for blocks with phi nodes.
+ for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
+ PHINode *PN = dyn_cast<PHINode>(BB->begin());
+ if (PN == 0) continue;
+
+ ReprocessBlock:
+ // For each block with a PHI node, check to see if any of the input values
+ // are potentially trapping constant expressions. Constant expressions are
+ // the only potentially trapping value that can occur as the argument to a
+ // PHI.
+ for (BasicBlock::iterator I = BB->begin(); (PN = dyn_cast<PHINode>(I)); ++I)
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ ConstantExpr *CE = dyn_cast<ConstantExpr>(PN->getIncomingValue(i));
+ if (CE == 0 || !CE->canTrap()) continue;
+
+ // The only case we have to worry about is when the edge is critical.
+ // Since this block has a PHI Node, we assume it has multiple input
+ // edges: check to see if the pred has multiple successors.
+ BasicBlock *Pred = PN->getIncomingBlock(i);
+ if (Pred->getTerminator()->getNumSuccessors() == 1)
+ continue;
+
+ // Okay, we have to split this edge.
+ SplitCriticalEdge(Pred->getTerminator(),
+ GetSuccessorNumber(Pred, BB), SDISel, true);
+ goto ReprocessBlock;
+ }
+ }
+}
+
bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// Do some sanity-checking on the command-line options.
assert((!EnableFastISelVerbose || EnableFastISel) &&
@@ -245,6 +297,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
+ SplitCriticalSideEffectEdges(const_cast<Function&>(Fn), this);
+
CurDAG->init(*MF);
FuncInfo->set(Fn, *MF);
SDB->init(GFI, *AA);
@@ -261,7 +315,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
if (!FuncInfo->ArgDbgValues.empty())
for (MachineRegisterInfo::livein_iterator LI = RegInfo->livein_begin(),
E = RegInfo->livein_end(); LI != E; ++LI)
- if (LI->second)
+ if (LI->second)
LiveInMap.insert(std::make_pair(LI->first, LI->second));
// Insert DBG_VALUE instructions for function arguments to the entry block.
@@ -282,14 +336,37 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
if (LDI != LiveInMap.end()) {
MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
MachineBasicBlock::iterator InsertPos = Def;
- const MDNode *Variable =
+ const MDNode *Variable =
MI->getOperand(MI->getNumOperands()-1).getMetadata();
unsigned Offset = MI->getOperand(1).getImm();
// Def is never a terminator here, so it is ok to increment InsertPos.
- BuildMI(*EntryMBB, ++InsertPos, MI->getDebugLoc(),
+ BuildMI(*EntryMBB, ++InsertPos, MI->getDebugLoc(),
TII.get(TargetOpcode::DBG_VALUE))
.addReg(LDI->second, RegState::Debug)
.addImm(Offset).addMetadata(Variable);
+
+ // If this vreg is directly copied into an exported register then
+ // that COPY instructions also need DBG_VALUE, if it is the only
+ // user of LDI->second.
+ MachineInstr *CopyUseMI = NULL;
+ for (MachineRegisterInfo::use_iterator
+ UI = RegInfo->use_begin(LDI->second);
+ MachineInstr *UseMI = UI.skipInstruction();) {
+ if (UseMI->isDebugValue()) continue;
+ if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
+ CopyUseMI = UseMI; continue;
+ }
+ // Otherwise this is another use or second copy use.
+ CopyUseMI = NULL; break;
+ }
+ if (CopyUseMI) {
+ MachineInstr *NewMI =
+ BuildMI(*MF, CopyUseMI->getDebugLoc(),
+ TII.get(TargetOpcode::DBG_VALUE))
+ .addReg(CopyUseMI->getOperand(0).getReg(), RegState::Debug)
+ .addImm(Offset).addMetadata(Variable);
+ EntryMBB->insertAfter(CopyUseMI, NewMI);
+ }
}
}
@@ -303,10 +380,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
II = MBB->begin(), IE = MBB->end(); II != IE; ++II) {
const TargetInstrDesc &TID = TM.getInstrInfo()->get(II->getOpcode());
- // Operand 1 of an inline asm instruction indicates whether the asm
- // needs stack or not.
- if ((II->isInlineAsm() && II->getOperand(1).getImm()) ||
- (TID.isCall() && !TID.isReturn())) {
+ if ((TID.isCall() && !TID.isReturn()) ||
+ II->isStackAligningInlineAsm()) {
MFI->setHasCalls(true);
goto done;
}
@@ -362,6 +437,7 @@ SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
// Final step, emit the lowered DAG as machine code.
CodeGenAndEmitDAG();
+ return;
}
void SelectionDAGISel::ComputeLiveOutVRegInfo() {
@@ -406,9 +482,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
// Only install this information if it tells us something.
if (NumSignBits != 1 || KnownZero != 0 || KnownOne != 0) {
- DestReg -= TargetRegisterInfo::FirstVirtualRegister;
- if (DestReg >= FuncInfo->LiveOutRegInfo.size())
- FuncInfo->LiveOutRegInfo.resize(DestReg+1);
+ FuncInfo->LiveOutRegInfo.grow(DestReg);
FunctionLoweringInfo::LiveOutInfo &LOI =
FuncInfo->LiveOutRegInfo[DestReg];
LOI.NumSignBits = NumSignBits;
@@ -541,13 +615,19 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
// Emit machine code to BB. This can change 'BB' to the last block being
// inserted into.
+ MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
{
NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
- FuncInfo->MBB = Scheduler->EmitSchedule();
+ LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule();
FuncInfo->InsertPt = Scheduler->InsertPos;
}
+ // If the block was split, make sure we update any references that are used to
+ // update PHI nodes later on.
+ if (FirstMBB != LastMBB)
+ SDB->UpdateSplitBlock(FirstMBB, LastMBB);
+
// Free the scheduler state.
{
NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName,
@@ -563,19 +643,19 @@ void SelectionDAGISel::DoInstructionSelection() {
DEBUG(errs() << "===== Instruction selection begins:\n");
PreprocessISelDAG();
-
+
// Select target instructions for the DAG.
{
// Number all nodes with a topological order and set DAGSize.
DAGSize = CurDAG->AssignTopologicalOrder();
-
+
// Create a dummy node (which is not added to allnodes), that adds
// a reference to the root node, preventing it from being deleted,
// and tracking any changes of the root.
HandleSDNode Dummy(CurDAG->getRoot());
ISelPosition = SelectionDAG::allnodes_iterator(CurDAG->getRoot().getNode());
++ISelPosition;
-
+
// The AllNodes list is now topological-sorted. Visit the
// nodes by starting at the end of the list (the root of the
// graph) and preceding back toward the beginning (the entry
@@ -587,19 +667,19 @@ void SelectionDAGISel::DoInstructionSelection() {
// makes it theoretically possible to disable the DAGCombiner.
if (Node->use_empty())
continue;
-
+
SDNode *ResNode = Select(Node);
-
+
// FIXME: This is pretty gross. 'Select' should be changed to not return
// anything at all and this code should be nuked with a tactical strike.
-
+
// If node should not be replaced, continue with the next one.
if (ResNode == Node || Node->getOpcode() == ISD::DELETED_NODE)
continue;
// Replace node.
if (ResNode)
ReplaceUses(Node, ResNode);
-
+
// If after the replacement this node is not used any more,
// remove this dead node.
if (Node->use_empty()) { // Don't delete EntryToken, etc.
@@ -607,9 +687,9 @@ void SelectionDAGISel::DoInstructionSelection() {
CurDAG->RemoveDeadNode(Node, &ISU);
}
}
-
+
CurDAG->setRoot(Dummy.getValue());
- }
+ }
DEBUG(errs() << "===== Instruction selection ends:\n");
@@ -661,6 +741,90 @@ void SelectionDAGISel::PrepareEHLandingPad() {
}
}
+
+
+
+bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
+ FastISel *FastIS) {
+ // Don't try to fold volatile loads. Target has to deal with alignment
+ // constraints.
+ if (LI->isVolatile()) return false;
+
+ // Figure out which vreg this is going into.
+ unsigned LoadReg = FastIS->getRegForValue(LI);
+ assert(LoadReg && "Load isn't already assigned a vreg? ");
+
+ // Check to see what the uses of this vreg are. If it has no uses, or more
+ // than one use (at the machine instr level) then we can't fold it.
+ MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(LoadReg);
+ if (RI == RegInfo->reg_end())
+ return false;
+
+ // See if there is exactly one use of the vreg. If there are multiple uses,
+ // then the instruction got lowered to multiple machine instructions or the
+ // use of the loaded value ended up being multiple operands of the result, in
+ // either case, we can't fold this.
+ MachineRegisterInfo::reg_iterator PostRI = RI; ++PostRI;
+ if (PostRI != RegInfo->reg_end())
+ return false;
+
+ assert(RI.getOperand().isUse() &&
+ "The only use of the vreg must be a use, we haven't emitted the def!");
+
+ MachineInstr *User = &*RI;
+
+ // Set the insertion point properly. Folding the load can cause generation of
+ // other random instructions (like sign extends) for addressing modes, make
+ // sure they get inserted in a logical place before the new instruction.
+ FuncInfo->InsertPt = User;
+ FuncInfo->MBB = User->getParent();
+
+ // Ask the target to try folding the load.
+ return FastIS->TryToFoldLoad(User, RI.getOperandNo(), LI);
+}
+
+#ifndef NDEBUG
+/// CheckLineNumbers - Check if basic block instructions follow source order
+/// or not.
+static void CheckLineNumbers(const BasicBlock *BB) {
+ unsigned Line = 0;
+ unsigned Col = 0;
+ for (BasicBlock::const_iterator BI = BB->begin(),
+ BE = BB->end(); BI != BE; ++BI) {
+ const DebugLoc DL = BI->getDebugLoc();
+ if (DL.isUnknown()) continue;
+ unsigned L = DL.getLine();
+ unsigned C = DL.getCol();
+ if (L < Line || (L == Line && C < Col)) {
+ ++NumBBWithOutOfOrderLineInfo;
+ return;
+ }
+ Line = L;
+ Col = C;
+ }
+}
+
+/// CheckLineNumbers - Check if machine basic block instructions follow source
+/// order or not.
+static void CheckLineNumbers(const MachineBasicBlock *MBB) {
+ unsigned Line = 0;
+ unsigned Col = 0;
+ for (MachineBasicBlock::const_iterator MBI = MBB->begin(),
+ MBE = MBB->end(); MBI != MBE; ++MBI) {
+ const DebugLoc DL = MBI->getDebugLoc();
+ if (DL.isUnknown()) continue;
+ unsigned L = DL.getLine();
+ unsigned C = DL.getCol();
+ if (L < Line || (L == Line && C < Col)) {
+ ++NumMBBWithOutOfOrderLineInfo;
+ return;
+ }
+ Line = L;
+ Col = C;
+ }
+}
+#endif
+
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = 0;
@@ -670,6 +834,9 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Iterate over all basic blocks in the function.
for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
const BasicBlock *LLVMBB = &*I;
+#ifndef NDEBUG
+ CheckLineNumbers(LLVMBB);
+#endif
FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
@@ -682,10 +849,19 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Setup an EH landing-pad block.
if (FuncInfo->MBB->isLandingPad())
PrepareEHLandingPad();
-
+
// Lower any arguments needed in this block if this is the entry block.
- if (LLVMBB == &Fn.getEntryBlock())
+ if (LLVMBB == &Fn.getEntryBlock()) {
+ for (BasicBlock::const_iterator DBI = LLVMBB->begin(), DBE = LLVMBB->end();
+ DBI != DBE; ++DBI) {
+ if (const DbgInfoIntrinsic *DI = dyn_cast<DbgInfoIntrinsic>(DBI)) {
+ const DebugLoc DL = DI->getDebugLoc();
+ SDB->setCurDebugLoc(DL);
+ break;
+ }
+ }
LowerArguments(LLVMBB);
+ }
// Before doing SelectionDAG ISel, see if FastISel has been requested.
if (FastIS) {
@@ -723,8 +899,19 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
FastIS->recomputeInsertPt();
// Try to select the instruction with FastISel.
- if (FastIS->SelectInstruction(Inst))
+ if (FastIS->SelectInstruction(Inst)) {
+ // If fast isel succeeded, check to see if there is a single-use
+ // non-volatile load right before the selected instruction, and see if
+ // the load is used by the instruction. If so, try to fold it.
+ const Instruction *BeforeInst = 0;
+ if (Inst != Begin)
+ BeforeInst = llvm::prior(llvm::prior(BI));
+ if (BeforeInst && isa<LoadInst>(BeforeInst) &&
+ BeforeInst->hasOneUse() && *BeforeInst->use_begin() == Inst &&
+ TryToFoldFastISelLoad(cast<LoadInst>(BeforeInst), FastIS))
+ --BI; // If we succeeded, don't re-select the load.
continue;
+ }
// Then handle certain instructions as single-LLVM-Instruction blocks.
if (isa<CallInst>(Inst)) {
@@ -771,6 +958,11 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
FastIS->recomputeInsertPt();
}
+ if (Begin != BI)
+ ++NumDAGBlocks;
+ else
+ ++NumFastIselBlocks;
+
// Run SelectionDAG instruction selection on the remainder of the block
// not handled by FastISel. If FastISel is not run, this is the entire
// block.
@@ -782,6 +974,11 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
}
delete FastIS;
+#ifndef NDEBUG
+ for (MachineFunction::const_iterator MBI = MF->begin(), MBE = MF->end();
+ MBI != MBE; ++MBI)
+ CheckLineNumbers(MBI);
+#endif
}
void
@@ -831,12 +1028,14 @@ SelectionDAGISel::FinishBasicBlock() {
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
if (j+1 != ej)
- SDB->visitBitTestCase(SDB->BitTestCases[i].Cases[j+1].ThisBB,
+ SDB->visitBitTestCase(SDB->BitTestCases[i],
+ SDB->BitTestCases[i].Cases[j+1].ThisBB,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
FuncInfo->MBB);
else
- SDB->visitBitTestCase(SDB->BitTestCases[i].Default,
+ SDB->visitBitTestCase(SDB->BitTestCases[i],
+ SDB->BitTestCases[i].Default,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
FuncInfo->MBB);
@@ -951,7 +1150,7 @@ SelectionDAGISel::FinishBasicBlock() {
// additional DAGs necessary.
for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
// Set the current basic block to the mbb we wish to insert the code into
- MachineBasicBlock *ThisBB = FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
+ FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Determine the unique successors.
@@ -960,13 +1159,15 @@ SelectionDAGISel::FinishBasicBlock() {
if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
Succs.push_back(SDB->SwitchCases[i].FalseBB);
- // Emit the code. Note that this could result in ThisBB being split, so
- // we need to check for updates.
+ // Emit the code. Note that this could result in FuncInfo->MBB being split.
SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
- ThisBB = FuncInfo->MBB;
+
+ // Remember the last block, now that any splitting is done, for use in
+ // populating PHI nodes in successors.
+ MachineBasicBlock *ThisBB = FuncInfo->MBB;
// Handle any PHI nodes in successors of this chunk, as if we were coming
// from the original BB before switch expansion. Note that PHI nodes can
@@ -1016,10 +1217,6 @@ ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
return Ctor(this, OptLevel);
}
-ScheduleHazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
- return new ScheduleHazardRecognizer();
-}
-
//===----------------------------------------------------------------------===//
// Helper functions used by the generated instruction selector.
//===----------------------------------------------------------------------===//
@@ -1099,11 +1296,11 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
- Ops.push_back(InOps[InlineAsm::Op_IsAlignStack]); // 3
+ Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
- if (InOps[e-1].getValueType() == MVT::Flag)
- --e; // Don't process a flag operand if it is here.
+ if (InOps[e-1].getValueType() == MVT::Glue)
+ --e; // Don't process a glue operand if it is here.
while (i != e) {
unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
@@ -1130,15 +1327,15 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
}
}
- // Add the flag input back if present.
+ // Add the glue input back if present.
if (e != InOps.size())
Ops.push_back(InOps.back());
}
-/// findFlagUse - Return use of EVT::Flag value produced by the specified
+/// findGlueUse - Return use of MVT::Glue value produced by the specified
/// SDNode.
///
-static SDNode *findFlagUse(SDNode *N) {
+static SDNode *findGlueUse(SDNode *N) {
unsigned FlagResNo = N->getNumValues()-1;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
SDUse &Use = I.getUse();
@@ -1160,11 +1357,11 @@ static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
// never find it.
//
// The Use may be -1 (unassigned) if it is a newly allocated node. This can
- // happen because we scan down to newly selected nodes in the case of flag
+ // happen because we scan down to newly selected nodes in the case of glue
// uses.
if ((Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1))
return false;
-
+
// Don't revisit nodes if we already scanned it and didn't fail, we know we
// won't fail if we scan it again.
if (!Visited.insert(Use))
@@ -1174,7 +1371,7 @@ static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
// Ignore chain uses, they are validated by HandleMergeInputChains.
if (Use->getOperand(i).getValueType() == MVT::Other && IgnoreChains)
continue;
-
+
SDNode *N = Use->getOperand(i).getNode();
if (N == Def) {
if (Use == ImmedUse || Use == Root)
@@ -1221,8 +1418,8 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
//
// * indicates nodes to be folded together.
//
- // If Root produces a flag, then it gets (even more) interesting. Since it
- // will be "glued" together with its flag use in the scheduler, we need to
+ // If Root produces glue, then it gets (even more) interesting. Since it
+ // will be "glued" together with its glue use in the scheduler, we need to
// check if it might reach N.
//
// [N*] //
@@ -1240,30 +1437,30 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
// ^ / //
// f / //
// | / //
- // [FU] //
+ // [GU] //
//
- // If FU (flag use) indirectly reaches N (the load), and Root folds N
- // (call it Fold), then X is a predecessor of FU and a successor of
- // Fold. But since Fold and FU are flagged together, this will create
+ // If GU (glue use) indirectly reaches N (the load), and Root folds N
+ // (call it Fold), then X is a predecessor of GU and a successor of
+ // Fold. But since Fold and GU are glued together, this will create
// a cycle in the scheduling graph.
- // If the node has flags, walk down the graph to the "lowest" node in the
- // flagged set.
+ // If the node has glue, walk down the graph to the "lowest" node in the
+ // glueged set.
EVT VT = Root->getValueType(Root->getNumValues()-1);
- while (VT == MVT::Flag) {
- SDNode *FU = findFlagUse(Root);
- if (FU == NULL)
+ while (VT == MVT::Glue) {
+ SDNode *GU = findGlueUse(Root);
+ if (GU == NULL)
break;
- Root = FU;
+ Root = GU;
VT = Root->getValueType(Root->getNumValues()-1);
-
- // If our query node has a flag result with a use, we've walked up it. If
+
+ // If our query node has a glue result with a use, we've walked up it. If
// the user (which has already been selected) has a chain or indirectly uses
// the chain, our WalkChainUsers predicate will not consider it. Because of
// this, we cannot ignore chains in this predicate.
IgnoreChains = false;
}
-
+
SmallPtrSet<SDNode*, 16> Visited;
return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains);
@@ -1272,10 +1469,10 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) {
std::vector<SDValue> Ops(N->op_begin(), N->op_end());
SelectInlineAsmMemoryOperands(Ops);
-
+
std::vector<EVT> VTs;
VTs.push_back(MVT::Other);
- VTs.push_back(MVT::Flag);
+ VTs.push_back(MVT::Glue);
SDValue New = CurDAG->getNode(ISD::INLINEASM, N->getDebugLoc(),
VTs, &Ops[0], Ops.size());
New->setNodeId(-1);
@@ -1287,11 +1484,11 @@ SDNode *SelectionDAGISel::Select_UNDEF(SDNode *N) {
}
/// GetVBR - decode a vbr encoding whose top bit is set.
-ALWAYS_INLINE static uint64_t
+LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
assert(Val >= 128 && "Not a VBR");
Val &= 127; // Remove first vbr bit.
-
+
unsigned Shift = 7;
uint64_t NextBits;
do {
@@ -1299,25 +1496,25 @@ GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
Val |= (NextBits&127) << Shift;
Shift += 7;
} while (NextBits & 128);
-
+
return Val;
}
-/// UpdateChainsAndFlags - When a match is complete, this method updates uses of
-/// interior flag and chain results to use the new flag and chain results.
+/// UpdateChainsAndGlue - When a match is complete, this method updates uses of
+/// interior glue and chain results to use the new glue and chain results.
void SelectionDAGISel::
-UpdateChainsAndFlags(SDNode *NodeToMatch, SDValue InputChain,
- const SmallVectorImpl<SDNode*> &ChainNodesMatched,
- SDValue InputFlag,
- const SmallVectorImpl<SDNode*> &FlagResultNodesMatched,
- bool isMorphNodeTo) {
+UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
+ const SmallVectorImpl<SDNode*> &ChainNodesMatched,
+ SDValue InputGlue,
+ const SmallVectorImpl<SDNode*> &GlueResultNodesMatched,
+ bool isMorphNodeTo) {
SmallVector<SDNode*, 4> NowDeadNodes;
-
+
ISelUpdater ISU(ISelPosition);
// Now that all the normal results are replaced, we replace the chain and
- // flag results if present.
+ // glue results if present.
if (!ChainNodesMatched.empty()) {
assert(InputChain.getNode() != 0 &&
"Matched input chains but didn't produce a chain");
@@ -1325,55 +1522,55 @@ UpdateChainsAndFlags(SDNode *NodeToMatch, SDValue InputChain,
// Replace all the chain results with the final chain we ended up with.
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
SDNode *ChainNode = ChainNodesMatched[i];
-
+
// If this node was already deleted, don't look at it.
if (ChainNode->getOpcode() == ISD::DELETED_NODE)
continue;
-
+
// Don't replace the results of the root node if we're doing a
// MorphNodeTo.
if (ChainNode == NodeToMatch && isMorphNodeTo)
continue;
-
+
SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
- if (ChainVal.getValueType() == MVT::Flag)
+ if (ChainVal.getValueType() == MVT::Glue)
ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain, &ISU);
-
+
// If the node became dead and we haven't already seen it, delete it.
if (ChainNode->use_empty() &&
!std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
NowDeadNodes.push_back(ChainNode);
}
}
-
- // If the result produces a flag, update any flag results in the matched
- // pattern with the flag result.
- if (InputFlag.getNode() != 0) {
+
+ // If the result produces glue, update any glue results in the matched
+ // pattern with the glue result.
+ if (InputGlue.getNode() != 0) {
// Handle any interior nodes explicitly marked.
- for (unsigned i = 0, e = FlagResultNodesMatched.size(); i != e; ++i) {
- SDNode *FRN = FlagResultNodesMatched[i];
-
+ for (unsigned i = 0, e = GlueResultNodesMatched.size(); i != e; ++i) {
+ SDNode *FRN = GlueResultNodesMatched[i];
+
// If this node was already deleted, don't look at it.
if (FRN->getOpcode() == ISD::DELETED_NODE)
continue;
-
- assert(FRN->getValueType(FRN->getNumValues()-1) == MVT::Flag &&
- "Doesn't have a flag result");
+
+ assert(FRN->getValueType(FRN->getNumValues()-1) == MVT::Glue &&
+ "Doesn't have a glue result");
CurDAG->ReplaceAllUsesOfValueWith(SDValue(FRN, FRN->getNumValues()-1),
- InputFlag, &ISU);
-
+ InputGlue, &ISU);
+
// If the node became dead and we haven't already seen it, delete it.
if (FRN->use_empty() &&
!std::count(NowDeadNodes.begin(), NowDeadNodes.end(), FRN))
NowDeadNodes.push_back(FRN);
}
}
-
+
if (!NowDeadNodes.empty())
CurDAG->RemoveDeadNodes(NowDeadNodes, &ISU);
-
+
DEBUG(errs() << "ISEL: Match complete!\n");
}
@@ -1392,17 +1589,17 @@ enum ChainResult {
///
/// The walk we do here is guaranteed to be small because we quickly get down to
/// already selected nodes "below" us.
-static ChainResult
+static ChainResult
WalkChainUsers(SDNode *ChainedNode,
SmallVectorImpl<SDNode*> &ChainedNodesInPattern,
SmallVectorImpl<SDNode*> &InteriorChainedNodes) {
ChainResult Result = CR_Simple;
-
+
for (SDNode::use_iterator UI = ChainedNode->use_begin(),
E = ChainedNode->use_end(); UI != E; ++UI) {
// Make sure the use is of the chain, not some other value we produce.
if (UI.getUse().getValueType() != MVT::Other) continue;
-
+
SDNode *User = *UI;
// If we see an already-selected machine node, then we've gone beyond the
@@ -1411,7 +1608,7 @@ WalkChainUsers(SDNode *ChainedNode,
if (User->isMachineOpcode() ||
User->getOpcode() == ISD::HANDLENODE) // Root of the graph.
continue;
-
+
if (User->getOpcode() == ISD::CopyToReg ||
User->getOpcode() == ISD::CopyFromReg ||
User->getOpcode() == ISD::INLINEASM ||
@@ -1437,7 +1634,7 @@ WalkChainUsers(SDNode *ChainedNode,
if (!std::count(ChainedNodesInPattern.begin(),
ChainedNodesInPattern.end(), User))
return CR_InducesCycle;
-
+
// Otherwise we found a node that is part of our pattern. For example in:
// x = load ptr
// y = x+4
@@ -1449,7 +1646,7 @@ WalkChainUsers(SDNode *ChainedNode,
InteriorChainedNodes.push_back(User);
continue;
}
-
+
// If we found a TokenFactor, there are two cases to consider: first if the
// TokenFactor is just hanging "below" the pattern we're matching (i.e. no
// uses of the TF are in our pattern) we just want to ignore it. Second,
@@ -1486,7 +1683,7 @@ WalkChainUsers(SDNode *ChainedNode,
case CR_LeadsToInteriorNode:
break; // Otherwise, keep processing.
}
-
+
// Okay, we know we're in the interesting interior case. The TokenFactor
// is now going to be considered part of the pattern so that we rewrite its
// uses (it may have uses that are not part of the pattern) with the
@@ -1497,7 +1694,7 @@ WalkChainUsers(SDNode *ChainedNode,
InteriorChainedNodes.push_back(User);
continue;
}
-
+
return Result;
}
@@ -1519,7 +1716,7 @@ HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
InteriorChainedNodes) == CR_InducesCycle)
return SDValue(); // Would induce a cycle.
}
-
+
// Okay, we have walked all the matched nodes and collected TokenFactor nodes
// that we are interested in. Form our input TokenFactor node.
SmallVector<SDValue, 3> InputChains;
@@ -1530,14 +1727,14 @@ HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
if (N->getOpcode() != ISD::TokenFactor) {
if (std::count(InteriorChainedNodes.begin(),InteriorChainedNodes.end(),N))
continue;
-
+
// Otherwise, add the input chain.
SDValue InChain = ChainNodesMatched[i]->getOperand(0);
assert(InChain.getValueType() == MVT::Other && "Not a chain");
InputChains.push_back(InChain);
continue;
}
-
+
// If we have a token factor, we want to add all inputs of the token factor
// that are not part of the pattern we're matching.
for (unsigned op = 0, e = N->getNumOperands(); op != e; ++op) {
@@ -1546,13 +1743,13 @@ HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
InputChains.push_back(N->getOperand(op));
}
}
-
+
SDValue Res;
if (InputChains.size() == 1)
return InputChains[0];
return CurDAG->getNode(ISD::TokenFactor, ChainNodesMatched[0]->getDebugLoc(),
MVT::Other, &InputChains[0], InputChains.size());
-}
+}
/// MorphNode - Handle morphing a node in place for the selector.
SDNode *SelectionDAGISel::
@@ -1560,15 +1757,15 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo) {
// It is possible we're using MorphNodeTo to replace a node with no
// normal results with one that has a normal result (or we could be
- // adding a chain) and the input could have flags and chains as well.
+ // adding a chain) and the input could have glue and chains as well.
// In this case we need to shift the operands down.
// FIXME: This is a horrible hack and broken in obscure cases, no worse
// than the old isel though.
- int OldFlagResultNo = -1, OldChainResultNo = -1;
+ int OldGlueResultNo = -1, OldChainResultNo = -1;
unsigned NTMNumResults = Node->getNumValues();
- if (Node->getValueType(NTMNumResults-1) == MVT::Flag) {
- OldFlagResultNo = NTMNumResults-1;
+ if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
+ OldGlueResultNo = NTMNumResults-1;
if (NTMNumResults != 1 &&
Node->getValueType(NTMNumResults-2) == MVT::Other)
OldChainResultNo = NTMNumResults-2;
@@ -1589,54 +1786,55 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
}
unsigned ResNumResults = Res->getNumValues();
- // Move the flag if needed.
- if ((EmitNodeInfo & OPFL_FlagOutput) && OldFlagResultNo != -1 &&
- (unsigned)OldFlagResultNo != ResNumResults-1)
- CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldFlagResultNo),
+ // Move the glue if needed.
+ if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
+ (unsigned)OldGlueResultNo != ResNumResults-1)
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldGlueResultNo),
SDValue(Res, ResNumResults-1));
- if ((EmitNodeInfo & OPFL_FlagOutput) != 0)
+ if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
--ResNumResults;
// Move the chain reference if needed.
if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
(unsigned)OldChainResultNo != ResNumResults-1)
- CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
SDValue(Res, ResNumResults-1));
// Otherwise, no replacement happened because the node already exists. Replace
// Uses of the old node with the new one.
if (Res != Node)
CurDAG->ReplaceAllUsesWith(Node, Res);
-
+
return Res;
}
/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
- SDValue N, const SmallVectorImpl<SDValue> &RecordedNodes) {
+ SDValue N,
+ const SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes) {
// Accept if it is exactly the same as a previously recorded node.
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
- return N == RecordedNodes[RecNo];
+ return N == RecordedNodes[RecNo].first;
}
-
+
/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SelectionDAGISel &SDISel) {
return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
}
/// CheckNodePredicate - Implements OP_CheckNodePredicate.
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SelectionDAGISel &SDISel, SDNode *N) {
return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
}
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDNode *N) {
uint16_t Opc = MatcherTable[MatcherIndex++];
@@ -1644,17 +1842,17 @@ CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
return N->getOpcode() == Opc;
}
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering &TLI) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (N.getValueType() == VT) return true;
-
+
// Handle the case when VT is iPTR.
return VT == MVT::iPTR && N.getValueType() == TLI.getPointerTy();
}
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering &TLI,
unsigned ChildNo) {
@@ -1664,57 +1862,57 @@ CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
}
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N) {
return cast<CondCodeSDNode>(N)->get() ==
(ISD::CondCode)MatcherTable[MatcherIndex++];
}
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering &TLI) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (cast<VTSDNode>(N)->getVT() == VT)
return true;
-
+
// Handle the case when VT is iPTR.
return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI.getPointerTy();
}
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
-
+
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
return C != 0 && C->getSExtValue() == Val;
}
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
-
+
if (N->getOpcode() != ISD::AND) return false;
-
+
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
return C != 0 && SDISel.CheckAndMask(N.getOperand(0), C, Val);
}
-ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
-
+
if (N->getOpcode() != ISD::OR) return false;
-
+
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
return C != 0 && SDISel.CheckOrMask(N.getOperand(0), C, Val);
}
@@ -1724,11 +1922,11 @@ CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
/// fail, set Result=true and return anything. If the current predicate is
/// known to pass, set Result=false and return the MatcherIndex to continue
/// with. If the current predicate is unknown, set Result=false and return the
-/// MatcherIndex to continue with.
+/// MatcherIndex to continue with.
static unsigned IsPredicateKnownToFail(const unsigned char *Table,
unsigned Index, SDValue N,
bool &Result, SelectionDAGISel &SDISel,
- SmallVectorImpl<SDValue> &RecordedNodes){
+ SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes) {
switch (Table[Index++]) {
default:
Result = false;
@@ -1782,21 +1980,21 @@ namespace {
struct MatchScope {
/// FailIndex - If this match fails, this is the index to continue with.
unsigned FailIndex;
-
+
/// NodeStack - The node stack when the scope was formed.
SmallVector<SDValue, 4> NodeStack;
-
+
/// NumRecordedNodes - The number of recorded nodes when the scope was formed.
unsigned NumRecordedNodes;
-
+
/// NumMatchedMemRefs - The number of matched memref entries.
unsigned NumMatchedMemRefs;
-
- /// InputChain/InputFlag - The current chain/flag
- SDValue InputChain, InputFlag;
+
+ /// InputChain/InputGlue - The current chain/glue
+ SDValue InputChain, InputGlue;
/// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
- bool HasChainNodesMatched, HasFlagResultNodesMatched;
+ bool HasChainNodesMatched, HasGlueResultNodesMatched;
};
}
@@ -1838,7 +2036,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case ISD::INLINEASM: return Select_INLINEASM(NodeToMatch);
case ISD::UNDEF: return Select_UNDEF(NodeToMatch);
}
-
+
assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
// Set up the node stack with NodeToMatch as the only node on the stack.
@@ -1849,37 +2047,38 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// MatchScopes - Scopes used when matching, if a match failure happens, this
// indicates where to continue checking.
SmallVector<MatchScope, 8> MatchScopes;
-
+
// RecordedNodes - This is the set of nodes that have been recorded by the
- // state machine.
- SmallVector<SDValue, 8> RecordedNodes;
-
+ // state machine. The second value is the parent of the node, or null if the
+ // root is recorded.
+ SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
+
// MatchedMemRefs - This is the set of MemRef's we've seen in the input
// pattern.
SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
-
- // These are the current input chain and flag for use when generating nodes.
+
+ // These are the current input chain and glue for use when generating nodes.
// Various Emit operations change these. For example, emitting a copytoreg
// uses and updates these.
- SDValue InputChain, InputFlag;
-
+ SDValue InputChain, InputGlue;
+
// ChainNodesMatched - If a pattern matches nodes that have input/output
// chains, the OPC_EmitMergeInputChains operation is emitted which indicates
// which ones they are. The result is captured into this list so that we can
// update the chain results when the pattern is complete.
SmallVector<SDNode*, 3> ChainNodesMatched;
- SmallVector<SDNode*, 3> FlagResultNodesMatched;
-
+ SmallVector<SDNode*, 3> GlueResultNodesMatched;
+
DEBUG(errs() << "ISEL: Starting pattern match on root node: ";
NodeToMatch->dump(CurDAG);
errs() << '\n');
-
+
// Determine where to start the interpreter. Normally we start at opcode #0,
// but if the state machine starts with an OPC_SwitchOpcode, then we
// accelerate the first lookup (which is guaranteed to be hot) with the
// OpcodeOffset table.
unsigned MatcherIndex = 0;
-
+
if (!OpcodeOffset.empty()) {
// Already computed the OpcodeOffset table, just index into it.
if (N.getOpcode() < OpcodeOffset.size())
@@ -1911,7 +2110,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (N.getOpcode() < OpcodeOffset.size())
MatcherIndex = OpcodeOffset[N.getOpcode()];
}
-
+
while (1) {
assert(MatcherIndex < TableSize && "Invalid index");
#ifndef NDEBUG
@@ -1926,7 +2125,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// determine immediately that the first check (or first several) will
// immediately fail, don't even bother pushing a scope for them.
unsigned FailIndex;
-
+
while (1) {
unsigned NumToSkip = MatcherTable[MatcherIndex++];
if (NumToSkip & 128)
@@ -1936,12 +2135,12 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
FailIndex = 0;
break;
}
-
+
FailIndex = MatcherIndex+NumToSkip;
-
+
unsigned MatcherIndexOfPredicate = MatcherIndex;
(void)MatcherIndexOfPredicate; // silence warning.
-
+
// If we can't evaluate this predicate without pushing a scope (e.g. if
// it is a 'MoveParent') or if the predicate succeeds on this node, we
// push the scope and evaluate the full predicate chain.
@@ -1950,20 +2149,20 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
Result, *this, RecordedNodes);
if (!Result)
break;
-
+
DEBUG(errs() << " Skipped scope entry (due to false predicate) at "
<< "index " << MatcherIndexOfPredicate
<< ", continuing at " << FailIndex << "\n");
++NumDAGIselRetries;
-
+
// Otherwise, we know that this case of the Scope is guaranteed to fail,
// move to the next case.
MatcherIndex = FailIndex;
}
-
+
// If the whole scope failed to match, bail.
if (FailIndex == 0) break;
-
+
// Push a MatchScope which indicates where to go if the first child fails
// to match.
MatchScope NewEntry;
@@ -1972,17 +2171,21 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
NewEntry.NumRecordedNodes = RecordedNodes.size();
NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
NewEntry.InputChain = InputChain;
- NewEntry.InputFlag = InputFlag;
+ NewEntry.InputGlue = InputGlue;
NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
- NewEntry.HasFlagResultNodesMatched = !FlagResultNodesMatched.empty();
+ NewEntry.HasGlueResultNodesMatched = !GlueResultNodesMatched.empty();
MatchScopes.push_back(NewEntry);
continue;
}
- case OPC_RecordNode:
+ case OPC_RecordNode: {
// Remember this node, it may end up being an operand in the pattern.
- RecordedNodes.push_back(N);
+ SDNode *Parent = 0;
+ if (NodeStack.size() > 1)
+ Parent = NodeStack[NodeStack.size()-2].getNode();
+ RecordedNodes.push_back(std::make_pair(N, Parent));
continue;
-
+ }
+
case OPC_RecordChild0: case OPC_RecordChild1:
case OPC_RecordChild2: case OPC_RecordChild3:
case OPC_RecordChild4: case OPC_RecordChild5:
@@ -1991,20 +2194,21 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (ChildNo >= N.getNumOperands())
break; // Match fails if out of range child #.
- RecordedNodes.push_back(N->getOperand(ChildNo));
+ RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
+ N.getNode()));
continue;
}
case OPC_RecordMemRef:
MatchedMemRefs.push_back(cast<MemSDNode>(N)->getMemOperand());
continue;
-
- case OPC_CaptureFlagInput:
- // If the current node has an input flag, capture it in InputFlag.
+
+ case OPC_CaptureGlueInput:
+ // If the current node has an input glue, capture it in InputGlue.
if (N->getNumOperands() != 0 &&
- N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag)
- InputFlag = N->getOperand(N->getNumOperands()-1);
+ N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
+ InputGlue = N->getOperand(N->getNumOperands()-1);
continue;
-
+
case OPC_MoveChild: {
unsigned ChildNo = MatcherTable[MatcherIndex++];
if (ChildNo >= N.getNumOperands())
@@ -2013,14 +2217,14 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
NodeStack.push_back(N);
continue;
}
-
+
case OPC_MoveParent:
// Pop the current node off the NodeStack.
NodeStack.pop_back();
assert(!NodeStack.empty() && "Node stack imbalance!");
- N = NodeStack.back();
+ N = NodeStack.back();
continue;
-
+
case OPC_CheckSame:
if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
continue;
@@ -2036,7 +2240,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned CPNum = MatcherTable[MatcherIndex++];
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
- if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo], CPNum,
+ if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
+ RecordedNodes[RecNo].first, CPNum,
RecordedNodes))
break;
continue;
@@ -2044,11 +2249,11 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case OPC_CheckOpcode:
if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
continue;
-
+
case OPC_CheckType:
if (!::CheckType(MatcherTable, MatcherIndex, N, TLI)) break;
continue;
-
+
case OPC_SwitchOpcode: {
unsigned CurNodeOpcode = N.getOpcode();
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
@@ -2066,22 +2271,22 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// If the opcode matches, then we will execute this case.
if (CurNodeOpcode == Opc)
break;
-
+
// Otherwise, skip over this case.
MatcherIndex += CaseSize;
}
-
+
// If no cases matched, bail out.
if (CaseSize == 0) break;
-
+
// Otherwise, execute the case we found.
DEBUG(errs() << " OpcodeSwitch from " << SwitchStart
<< " to " << MatcherIndex << "\n");
continue;
}
-
+
case OPC_SwitchType: {
- MVT::SimpleValueType CurNodeVT = N.getValueType().getSimpleVT().SimpleTy;
+ MVT CurNodeVT = N.getValueType().getSimpleVT();
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
unsigned CaseSize;
while (1) {
@@ -2090,23 +2295,22 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (CaseSize & 128)
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
if (CaseSize == 0) break;
-
- MVT::SimpleValueType CaseVT =
- (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
+
+ MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (CaseVT == MVT::iPTR)
- CaseVT = TLI.getPointerTy().SimpleTy;
-
+ CaseVT = TLI.getPointerTy();
+
// If the VT matches, then we will execute this case.
if (CurNodeVT == CaseVT)
break;
-
+
// Otherwise, skip over this case.
MatcherIndex += CaseSize;
}
-
+
// If no cases matched, bail out.
if (CaseSize == 0) break;
-
+
// Otherwise, execute the case we found.
DEBUG(errs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
<< "] from " << SwitchStart << " to " << MatcherIndex<<'\n');
@@ -2135,7 +2339,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case OPC_CheckOrImm:
if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
continue;
-
+
case OPC_CheckFoldableChainNode: {
assert(NodeStack.size() != 1 && "No parent node");
// Verify that all intermediate nodes between the root and this one have
@@ -2156,7 +2360,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
NodeToMatch, OptLevel,
true/*We validate our own chains*/))
break;
-
+
continue;
}
case OPC_EmitInteger: {
@@ -2165,22 +2369,24 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
- RecordedNodes.push_back(CurDAG->getTargetConstant(Val, VT));
+ RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
+ CurDAG->getTargetConstant(Val, VT), (SDNode*)0));
continue;
}
case OPC_EmitRegister: {
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
unsigned RegNo = MatcherTable[MatcherIndex++];
- RecordedNodes.push_back(CurDAG->getRegister(RegNo, VT));
+ RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
+ CurDAG->getRegister(RegNo, VT), (SDNode*)0));
continue;
}
-
+
case OPC_EmitConvertToTarget: {
// Convert from IMM/FPIMM to target version.
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
- SDValue Imm = RecordedNodes[RecNo];
+ SDValue Imm = RecordedNodes[RecNo].first;
if (Imm->getOpcode() == ISD::Constant) {
int64_t Val = cast<ConstantSDNode>(Imm)->getZExtValue();
@@ -2189,11 +2395,11 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
Imm = CurDAG->getTargetConstantFP(*Val, Imm.getValueType());
}
-
- RecordedNodes.push_back(Imm);
+
+ RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
continue;
}
-
+
case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
case OPC_EmitMergeInputChains1_1: { // OPC_EmitMergeInputChains, 1, 1
// These are space-optimized forms of OPC_EmitMergeInputChains.
@@ -2201,28 +2407,28 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
"EmitMergeInputChains should be the first chain producing node");
assert(ChainNodesMatched.empty() &&
"Should only have one EmitMergeInputChains per match");
-
+
// Read all of the chained nodes.
unsigned RecNo = Opcode == OPC_EmitMergeInputChains1_1;
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
- ChainNodesMatched.push_back(RecordedNodes[RecNo].getNode());
-
+ ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
+
// FIXME: What if other value results of the node have uses not matched
// by this pattern?
if (ChainNodesMatched.back() != NodeToMatch &&
- !RecordedNodes[RecNo].hasOneUse()) {
+ !RecordedNodes[RecNo].first.hasOneUse()) {
ChainNodesMatched.clear();
break;
}
-
+
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
-
+
if (InputChain.getNode() == 0)
break; // Failed to merge.
continue;
}
-
+
case OPC_EmitMergeInputChains: {
assert(InputChain.getNode() == 0 &&
"EmitMergeInputChains should be the first chain producing node");
@@ -2242,54 +2448,55 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
for (unsigned i = 0; i != NumChains; ++i) {
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
- ChainNodesMatched.push_back(RecordedNodes[RecNo].getNode());
-
+ ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
+
// FIXME: What if other value results of the node have uses not matched
// by this pattern?
if (ChainNodesMatched.back() != NodeToMatch &&
- !RecordedNodes[RecNo].hasOneUse()) {
+ !RecordedNodes[RecNo].first.hasOneUse()) {
ChainNodesMatched.clear();
break;
}
}
-
+
// If the inner loop broke out, the match fails.
if (ChainNodesMatched.empty())
break;
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
-
+
if (InputChain.getNode() == 0)
break; // Failed to merge.
continue;
}
-
+
case OPC_EmitCopyToReg: {
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
unsigned DestPhysReg = MatcherTable[MatcherIndex++];
-
+
if (InputChain.getNode() == 0)
InputChain = CurDAG->getEntryNode();
-
+
InputChain = CurDAG->getCopyToReg(InputChain, NodeToMatch->getDebugLoc(),
- DestPhysReg, RecordedNodes[RecNo],
- InputFlag);
-
- InputFlag = InputChain.getValue(1);
+ DestPhysReg, RecordedNodes[RecNo].first,
+ InputGlue);
+
+ InputGlue = InputChain.getValue(1);
continue;
}
-
+
case OPC_EmitNodeXForm: {
unsigned XFormNo = MatcherTable[MatcherIndex++];
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
- RecordedNodes.push_back(RunSDNodeXForm(RecordedNodes[RecNo], XFormNo));
+ SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
+ RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, (SDNode*) 0));
continue;
}
-
+
case OPC_EmitNode:
case OPC_MorphNodeTo: {
uint16_t TargetOpc = MatcherTable[MatcherIndex++];
@@ -2304,12 +2511,12 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (VT == MVT::iPTR) VT = TLI.getPointerTy().SimpleTy;
VTs.push_back(VT);
}
-
+
if (EmitNodeInfo & OPFL_Chain)
VTs.push_back(MVT::Other);
- if (EmitNodeInfo & OPFL_FlagOutput)
- VTs.push_back(MVT::Flag);
-
+ if (EmitNodeInfo & OPFL_GlueOutput)
+ VTs.push_back(MVT::Glue);
+
// This is hot code, so optimize the two most common cases of 1 and 2
// results.
SDVTList VTList;
@@ -2327,11 +2534,11 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned RecNo = MatcherTable[MatcherIndex++];
if (RecNo & 128)
RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
-
+
assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
- Ops.push_back(RecordedNodes[RecNo]);
+ Ops.push_back(RecordedNodes[RecNo].first);
}
-
+
// If there are variadic operands to add, handle them now.
if (EmitNodeInfo & OPFL_VariadicInfo) {
// Determine the start index to copy from.
@@ -2339,22 +2546,22 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
"Invalid variadic node");
- // Copy all of the variadic operands, not including a potential flag
+ // Copy all of the variadic operands, not including a potential glue
// input.
for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
i != e; ++i) {
SDValue V = NodeToMatch->getOperand(i);
- if (V.getValueType() == MVT::Flag) break;
+ if (V.getValueType() == MVT::Glue) break;
Ops.push_back(V);
}
}
-
- // If this has chain/flag inputs, add them.
+
+ // If this has chain/glue inputs, add them.
if (EmitNodeInfo & OPFL_Chain)
Ops.push_back(InputChain);
- if ((EmitNodeInfo & OPFL_FlagInput) && InputFlag.getNode() != 0)
- Ops.push_back(InputFlag);
-
+ if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != 0)
+ Ops.push_back(InputGlue);
+
// Create the node.
SDNode *Res = 0;
if (Opcode != OPC_MorphNodeTo) {
@@ -2362,28 +2569,29 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// add the results to the RecordedNodes list.
Res = CurDAG->getMachineNode(TargetOpc, NodeToMatch->getDebugLoc(),
VTList, Ops.data(), Ops.size());
-
- // Add all the non-flag/non-chain results to the RecordedNodes list.
+
+ // Add all the non-glue/non-chain results to the RecordedNodes list.
for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
- if (VTs[i] == MVT::Other || VTs[i] == MVT::Flag) break;
- RecordedNodes.push_back(SDValue(Res, i));
+ if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
+ RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
+ (SDNode*) 0));
}
-
+
} else {
Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops.data(), Ops.size(),
EmitNodeInfo);
}
-
- // If the node had chain/flag results, update our notion of the current
- // chain and flag.
- if (EmitNodeInfo & OPFL_FlagOutput) {
- InputFlag = SDValue(Res, VTs.size()-1);
+
+ // If the node had chain/glue results, update our notion of the current
+ // chain and glue.
+ if (EmitNodeInfo & OPFL_GlueOutput) {
+ InputGlue = SDValue(Res, VTs.size()-1);
if (EmitNodeInfo & OPFL_Chain)
InputChain = SDValue(Res, VTs.size()-2);
} else if (EmitNodeInfo & OPFL_Chain)
InputChain = SDValue(Res, VTs.size()-1);
- // If the OPFL_MemRefs flag is set on this node, slap all of the
+ // If the OPFL_MemRefs glue is set on this node, slap all of the
// accumulated memrefs onto it.
//
// FIXME: This is vastly incorrect for patterns with multiple outputs
@@ -2396,37 +2604,37 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
cast<MachineSDNode>(Res)
->setMemRefs(MemRefs, MemRefs + MatchedMemRefs.size());
}
-
+
DEBUG(errs() << " "
<< (Opcode == OPC_MorphNodeTo ? "Morphed" : "Created")
<< " node: "; Res->dump(CurDAG); errs() << "\n");
-
+
// If this was a MorphNodeTo then we're completely done!
if (Opcode == OPC_MorphNodeTo) {
- // Update chain and flag uses.
- UpdateChainsAndFlags(NodeToMatch, InputChain, ChainNodesMatched,
- InputFlag, FlagResultNodesMatched, true);
+ // Update chain and glue uses.
+ UpdateChainsAndGlue(NodeToMatch, InputChain, ChainNodesMatched,
+ InputGlue, GlueResultNodesMatched, true);
return Res;
}
-
+
continue;
}
-
- case OPC_MarkFlagResults: {
+
+ case OPC_MarkGlueResults: {
unsigned NumNodes = MatcherTable[MatcherIndex++];
-
- // Read and remember all the flag-result nodes.
+
+ // Read and remember all the glue-result nodes.
for (unsigned i = 0; i != NumNodes; ++i) {
unsigned RecNo = MatcherTable[MatcherIndex++];
if (RecNo & 128)
RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
- FlagResultNodesMatched.push_back(RecordedNodes[RecNo].getNode());
+ GlueResultNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
}
continue;
}
-
+
case OPC_CompleteMatch: {
// The match has been completed, and any new nodes (if any) have been
// created. Patch up references to the matched dag to use the newly
@@ -2437,13 +2645,13 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned ResSlot = MatcherTable[MatcherIndex++];
if (ResSlot & 128)
ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
-
+
assert(ResSlot < RecordedNodes.size() && "Invalid CheckSame");
- SDValue Res = RecordedNodes[ResSlot];
-
+ SDValue Res = RecordedNodes[ResSlot].first;
+
assert(i < NodeToMatch->getNumValues() &&
NodeToMatch->getValueType(i) != MVT::Other &&
- NodeToMatch->getValueType(i) != MVT::Flag &&
+ NodeToMatch->getValueType(i) != MVT::Glue &&
"Invalid number of results to complete!");
assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
NodeToMatch->getValueType(i) == MVT::iPTR ||
@@ -2454,24 +2662,23 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);
}
- // If the root node defines a flag, add it to the flag nodes to update
- // list.
- if (NodeToMatch->getValueType(NodeToMatch->getNumValues()-1) == MVT::Flag)
- FlagResultNodesMatched.push_back(NodeToMatch);
-
- // Update chain and flag uses.
- UpdateChainsAndFlags(NodeToMatch, InputChain, ChainNodesMatched,
- InputFlag, FlagResultNodesMatched, false);
-
+ // If the root node defines glue, add it to the glue nodes to update list.
+ if (NodeToMatch->getValueType(NodeToMatch->getNumValues()-1) == MVT::Glue)
+ GlueResultNodesMatched.push_back(NodeToMatch);
+
+ // Update chain and glue uses.
+ UpdateChainsAndGlue(NodeToMatch, InputChain, ChainNodesMatched,
+ InputGlue, GlueResultNodesMatched, false);
+
assert(NodeToMatch->use_empty() &&
"Didn't replace all uses of the node?");
-
+
// FIXME: We just return here, which interacts correctly with SelectRoot
// above. We should fix this to not return an SDNode* anymore.
return 0;
}
}
-
+
// If the code reached this point, then the match failed. See if there is
// another child to try in the current 'Scope', otherwise pop it until we
// find a case to check.
@@ -2494,15 +2701,15 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
MatcherIndex = LastScope.FailIndex;
-
+
DEBUG(errs() << " Continuing at " << MatcherIndex << "\n");
-
+
InputChain = LastScope.InputChain;
- InputFlag = LastScope.InputFlag;
+ InputGlue = LastScope.InputGlue;
if (!LastScope.HasChainNodesMatched)
ChainNodesMatched.clear();
- if (!LastScope.HasFlagResultNodesMatched)
- FlagResultNodesMatched.clear();
+ if (!LastScope.HasGlueResultNodesMatched)
+ GlueResultNodesMatched.clear();
// Check to see what the offset is at the new MatcherIndex. If it is zero
// we have reached the end of this scope, otherwise we have another child
@@ -2517,21 +2724,21 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
LastScope.FailIndex = MatcherIndex+NumToSkip;
break;
}
-
+
// End of this scope, pop it and try the next child in the containing
// scope.
MatchScopes.pop_back();
}
}
}
-
+
void SelectionDAGISel::CannotYetSelect(SDNode *N) {
std::string msg;
raw_string_ostream Msg(msg);
- Msg << "Cannot yet select: ";
-
+ Msg << "Cannot select: ";
+
if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_VOID) {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index 8313de5..76eb945 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -93,7 +93,7 @@ namespace llvm {
static std::string getEdgeAttributes(const void *Node, EdgeIter EI) {
SDValue Op = EI.getNode()->getOperand(EI.getOperand());
EVT VT = Op.getValueType();
- if (VT == MVT::Flag)
+ if (VT == MVT::Glue)
return "color=red,style=bold";
else if (VT == MVT::Other)
return "color=blue,style=dashed";
@@ -273,14 +273,14 @@ std::string ScheduleDAGSDNodes::getGraphNodeLabel(const SUnit *SU) const {
raw_string_ostream O(s);
O << "SU(" << SU->NodeNum << "): ";
if (SU->getNode()) {
- SmallVector<SDNode *, 4> FlaggedNodes;
- for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
- FlaggedNodes.push_back(N);
- while (!FlaggedNodes.empty()) {
+ SmallVector<SDNode *, 4> GluedNodes;
+ for (SDNode *N = SU->getNode(); N; N = N->getGluedNode())
+ GluedNodes.push_back(N);
+ while (!GluedNodes.empty()) {
O << DOTGraphTraits<SelectionDAG*>
- ::getSimpleNodeLabel(FlaggedNodes.back(), DAG);
- FlaggedNodes.pop_back();
- if (!FlaggedNodes.empty())
+ ::getSimpleNodeLabel(GluedNodes.back(), DAG);
+ GluedNodes.pop_back();
+ if (!GluedNodes.empty())
O << "\n ";
}
} else {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index b74f600..691390e 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -28,6 +28,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include <cctype>
using namespace llvm;
namespace llvm {
@@ -530,7 +531,7 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand);
setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand);
}
-
+
// These operations default to expand.
setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand);
@@ -538,8 +539,8 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
// Most targets ignore the @llvm.prefetch intrinsic.
setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
-
- // ConstantFP nodes default to expand. Targets can either change this to
+
+ // ConstantFP nodes default to expand. Targets can either change this to
// Legal, in which case all fp constants are legal, or use isFPImmLegal()
// to optimize expansions for certain constants.
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
@@ -560,18 +561,21 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
// Default ISD::TRAP to expand (which turns it into abort).
setOperationAction(ISD::TRAP, MVT::Other, Expand);
-
+
IsLittleEndian = TD->isLittleEndian();
ShiftAmountTy = PointerTy = MVT::getIntegerVT(8*TD->getPointerSize());
memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
+ maxStoresPerMemsetOptSize = maxStoresPerMemcpyOptSize
+ = maxStoresPerMemmoveOptSize = 4;
benefitFromCodePlacementOpt = false;
UseUnderscoreSetJmp = false;
UseUnderscoreLongJmp = false;
SelectIsExpensive = false;
IntDivIsCheap = false;
Pow2DivIsCheap = false;
+ JumpIsExpensive = false;
StackPointerRegisterToSaveRestore = 0;
ExceptionPointerRegister = 0;
ExceptionSelectorRegister = 0;
@@ -617,16 +621,16 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
// Figure out the right, legal destination reg to copy into.
unsigned NumElts = VT.getVectorNumElements();
MVT EltTy = VT.getVectorElementType();
-
+
unsigned NumVectorRegs = 1;
-
- // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
+
+ // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
// could break down into LHS/RHS like LegalizeDAG does.
if (!isPowerOf2_32(NumElts)) {
NumVectorRegs = NumElts;
NumElts = 1;
}
-
+
// Divide the input until we get to a supported size. This will always
// end with a scalar if the target doesn't support vectors.
while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
@@ -635,7 +639,7 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
}
NumIntermediates = NumVectorRegs;
-
+
MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
if (!TLI->isTypeLegal(NewVT))
NewVT = EltTy;
@@ -645,7 +649,7 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
RegisterVT = DestVT;
if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
-
+
// Otherwise, promotion or legal types use the same number of registers as
// the vector decimated to the appropriate level.
return NumVectorRegs;
@@ -750,7 +754,7 @@ void TargetLowering::computeRegisterProperties() {
RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
TransformToType[MVT::ppcf128] = MVT::f64;
ValueTypeActions.setTypeAction(MVT::ppcf128, Expand);
- }
+ }
// Decide how to handle f64. If the target does not have native f64 support,
// expand it to i64 and we will be generating soft float library calls.
@@ -776,13 +780,13 @@ void TargetLowering::computeRegisterProperties() {
ValueTypeActions.setTypeAction(MVT::f32, Expand);
}
}
-
+
// Loop over all of the vector value types to see which need transformations.
for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
MVT VT = (MVT::SimpleValueType)i;
if (isTypeLegal(VT)) continue;
-
+
// Determine if there is a legal wider type. If so, we should promote to
// that wider vector type.
EVT EltVT = VT.getVectorElementType();
@@ -792,8 +796,8 @@ void TargetLowering::computeRegisterProperties() {
for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
EVT SVT = (MVT::SimpleValueType)nVT;
if (SVT.getVectorElementType() == EltVT &&
- SVT.getVectorNumElements() > NElts &&
- isTypeSynthesizable(SVT)) {
+ SVT.getVectorNumElements() > NElts &&
+ isTypeLegal(SVT)) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
NumRegistersForVT[i] = 1;
@@ -804,7 +808,7 @@ void TargetLowering::computeRegisterProperties() {
}
if (IsLegalWiderType) continue;
}
-
+
MVT IntermediateVT;
EVT RegisterVT;
unsigned NumIntermediates;
@@ -812,7 +816,7 @@ void TargetLowering::computeRegisterProperties() {
getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
RegisterVT, this);
RegisterTypeForVT[i] = RegisterVT;
-
+
EVT NVT = VT.getPow2VectorType();
if (NVT == VT) {
// Type is already a power of 2. The default action is to split.
@@ -865,7 +869,7 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
unsigned &NumIntermediates,
EVT &RegisterVT) const {
unsigned NumElts = VT.getVectorNumElements();
-
+
// If there is a wider vector type with the same element type as this one,
// we should widen to that legal vector type. This handles things like
// <2 x float> -> <4 x float>.
@@ -877,19 +881,19 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
return 1;
}
}
-
+
// Figure out the right, legal destination reg to copy into.
EVT EltTy = VT.getVectorElementType();
-
+
unsigned NumVectorRegs = 1;
-
- // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
+
+ // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
// could break down into LHS/RHS like LegalizeDAG does.
if (!isPowerOf2_32(NumElts)) {
NumVectorRegs = NumElts;
NumElts = 1;
}
-
+
// Divide the input until we get to a supported size. This will always
// end with a scalar if the target doesn't support vectors.
while (NumElts > 1 && !isTypeLegal(
@@ -899,7 +903,7 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
}
NumIntermediates = NumVectorRegs;
-
+
EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
if (!isTypeLegal(NewVT))
NewVT = EltTy;
@@ -909,13 +913,13 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
RegisterVT = DestVT;
if (DestVT.bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
-
+
// Otherwise, promotion or legal types use the same number of registers as
// the vector decimated to the appropriate level.
return NumVectorRegs;
}
-/// Get the EVTs and ArgFlags collections that represent the legalized return
+/// Get the EVTs and ArgFlags collections that represent the legalized return
/// type of the given function. This does not require a DAG or a return value,
/// and is suitable for use before any DAGs for the function are constructed.
/// TODO: Move this out of TargetLowering.cpp.
@@ -988,11 +992,11 @@ unsigned TargetLowering::getJumpTableEncoding() const {
// In non-pic modes, just use the address of a block.
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return MachineJumpTableInfo::EK_BlockAddress;
-
+
// In PIC mode, if the target supports a GPRel32 directive, use it.
if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != 0)
return MachineJumpTableInfo::EK_GPRel32BlockAddress;
-
+
// Otherwise, use a label difference.
return MachineJumpTableInfo::EK_LabelDifference32;
}
@@ -1036,11 +1040,11 @@ TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// Optimization Methods
//===----------------------------------------------------------------------===//
-/// ShrinkDemandedConstant - Check to see if the specified operand of the
+/// ShrinkDemandedConstant - Check to see if the specified operand of the
/// specified instruction is a constant integer. If so, check to see if there
/// are any bits set in the constant that are not demanded. If so, shrink the
/// constant and return true.
-bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
+bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
const APInt &Demanded) {
DebugLoc dl = Op.getDebugLoc();
@@ -1062,7 +1066,7 @@ bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
EVT VT = Op.getValueType();
SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0),
DAG.getConstant(Demanded &
- C->getAPIntValue(),
+ C->getAPIntValue(),
VT));
return CombineTo(Op, New);
}
@@ -1139,9 +1143,9 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
KnownZero = KnownOne = APInt(BitWidth, 0);
// Other users may use these bits.
- if (!Op.getNode()->hasOneUse()) {
+ if (!Op.getNode()->hasOneUse()) {
if (Depth != 0) {
- // If not at the root, Just compute the KnownZero/KnownOne bits to
+ // If not at the root, Just compute the KnownZero/KnownOne bits to
// simplify things downstream.
TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
return false;
@@ -1149,7 +1153,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// If this is the root being simplified, allow it to have multiple uses,
// just set the NewMask to all bits.
NewMask = APInt::getAllOnesValue(BitWidth);
- } else if (DemandedMask == 0) {
+ } else if (DemandedMask == 0) {
// Not demanding any bits from Op.
if (Op.getOpcode() != ISD::UNDEF)
return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType()));
@@ -1172,8 +1176,9 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// the RHS.
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
APInt LHSZero, LHSOne;
+ // Do not increment Depth here; that can cause an infinite loop.
TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask,
- LHSZero, LHSOne, Depth+1);
+ LHSZero, LHSOne, Depth);
// If the LHS already has zeros where RHSC does, this and is dead.
if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
@@ -1182,16 +1187,16 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
return true;
}
-
+
if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
KnownZero2, KnownOne2, TLO, Depth+1))
return true;
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
// If all of the demanded bits are known one on one side, return the other.
// These bits cannot contribute to the result of the 'and'.
if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
@@ -1214,15 +1219,15 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
KnownZero |= KnownZero2;
break;
case ISD::OR:
- if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
KnownZero2, KnownOne2, TLO, Depth+1))
return true;
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'or'.
if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
@@ -1248,15 +1253,15 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
KnownOne |= KnownOne2;
break;
case ISD::XOR:
- if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'xor'.
if ((KnownZero & NewMask) == NewMask)
@@ -1274,12 +1279,12 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(),
Op.getOperand(0),
Op.getOperand(1)));
-
+
// Output known-0 bits are known if clear or set in both the LHS & RHS.
KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
-
+
// If all of the demanded bits on one side are known, and all of the set
// bits on that side are also known to be set on the other side, turn this
// into an AND, as we know the bits will be cleared.
@@ -1288,11 +1293,11 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if ((KnownOne & KnownOne2) == KnownOne) {
EVT VT = Op.getValueType();
SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
Op.getOperand(0), ANDC));
}
}
-
+
// If the RHS is a constant, see if we can simplify it.
// for XOR, we prefer to force bits to 1 if they will make a -1.
// if we can't force bits, try to shrink constant
@@ -1317,37 +1322,37 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
KnownOne = KnownOneOut;
break;
case ISD::SELECT:
- if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
// If the operands are constants, see if we can simplify them.
if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
-
+
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
break;
case ISD::SELECT_CC:
- if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
// If the operands are constants, see if we can simplify them.
if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
-
+
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
@@ -1373,16 +1378,16 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (Diff < 0) {
Diff = -Diff;
Opc = ISD::SRL;
- }
-
- SDValue NewSA =
+ }
+
+ SDValue NewSA =
TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
EVT VT = Op.getValueType();
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
InOp.getOperand(0), NewSA));
}
- }
-
+ }
+
if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt),
KnownZero, KnownOne, TLO, Depth+1))
return true;
@@ -1421,7 +1426,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
unsigned ShAmt = SA->getZExtValue();
unsigned VTSize = VT.getSizeInBits();
SDValue InOp = Op.getOperand(0);
-
+
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
break;
@@ -1438,20 +1443,20 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (Diff < 0) {
Diff = -Diff;
Opc = ISD::SHL;
- }
-
+ }
+
SDValue NewSA =
TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
InOp.getOperand(0), NewSA));
}
- }
-
+ }
+
// Compute the new bits that are at the top now.
if (SimplifyDemandedBits(InOp, (NewMask << ShAmt),
KnownZero, KnownOne, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
@@ -1472,7 +1477,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
EVT VT = Op.getValueType();
unsigned ShAmt = SA->getZExtValue();
-
+
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
break;
@@ -1484,21 +1489,21 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
if (HighBits.intersects(NewMask))
InDemandedMask |= APInt::getSignBit(VT.getScalarType().getSizeInBits());
-
+
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
-
+
// Handle the sign bit, adjusted to where it is now in the mask.
APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
-
+
// If the input sign bit is known to be zero, or if none of the top bits
// are demanded, turn this into an unsigned shift right.
if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT,
Op.getOperand(0),
Op.getOperand(1)));
} else if (KnownOne.intersects(SignBit)) { // New bits are known one.
@@ -1509,23 +1514,23 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
case ISD::SIGN_EXTEND_INREG: {
EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- // Sign extension. Compute the demanded bits in the result that are not
+ // Sign extension. Compute the demanded bits in the result that are not
// present in the input.
APInt NewBits =
APInt::getHighBitsSet(BitWidth,
BitWidth - EVT.getScalarType().getSizeInBits());
-
+
// If none of the extended bits are demanded, eliminate the sextinreg.
if ((NewBits & NewMask) == 0)
return TLO.CombineTo(Op, Op.getOperand(0));
- APInt InSignBit = APInt::getSignBit(EVT.getScalarType().getSizeInBits());
- InSignBit.zext(BitWidth);
+ APInt InSignBit =
+ APInt::getSignBit(EVT.getScalarType().getSizeInBits()).zext(BitWidth);
APInt InputDemandedBits =
APInt::getLowBitsSet(BitWidth,
EVT.getScalarType().getSizeInBits()) &
NewMask;
-
+
// Since the sign extended bits are demanded, we know that the sign
// bit is demanded.
InputDemandedBits |= InSignBit;
@@ -1533,16 +1538,16 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits,
KnownZero, KnownOne, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
-
+
// If the input sign bit is known zero, convert this into a zero extension.
if (KnownZero.intersects(InSignBit))
- return TLO.CombineTo(Op,
+ return TLO.CombineTo(Op,
TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,EVT));
-
+
if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
KnownOne |= NewBits;
KnownZero &= ~NewBits;
@@ -1555,23 +1560,22 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
case ISD::ZERO_EXTEND: {
unsigned OperandBitWidth =
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
- APInt InMask = NewMask;
- InMask.trunc(OperandBitWidth);
-
+ APInt InMask = NewMask.trunc(OperandBitWidth);
+
// If none of the top bits are demanded, convert this into an any_extend.
APInt NewBits =
APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
if (!NewBits.intersects(NewMask))
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
- Op.getValueType(),
+ Op.getValueType(),
Op.getOperand(0)));
-
+
if (SimplifyDemandedBits(Op.getOperand(0), InMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
KnownZero |= NewBits;
break;
}
@@ -1581,31 +1585,31 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
APInt NewBits = ~InMask & NewMask;
-
+
// If none of the top bits are demanded, convert this into an any_extend.
if (NewBits == 0)
return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl,
Op.getValueType(),
Op.getOperand(0)));
-
+
// Since some of the sign extended bits are demanded, we know that the sign
// bit is demanded.
APInt InDemandedBits = InMask & NewMask;
InDemandedBits |= InSignBit;
- InDemandedBits.trunc(InBits);
-
- if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
+ InDemandedBits = InDemandedBits.trunc(InBits);
+
+ if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
KnownOne, TLO, Depth+1))
return true;
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
-
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
+
// If the sign bit is known zero, convert this to a zero extend.
if (KnownZero.intersects(InSignBit))
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl,
- Op.getValueType(),
+ Op.getValueType(),
Op.getOperand(0)));
-
+
// If the sign bit is known one, the top bits match.
if (KnownOne.intersects(InSignBit)) {
KnownOne |= NewBits;
@@ -1619,14 +1623,13 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
case ISD::ANY_EXTEND: {
unsigned OperandBitWidth =
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
- APInt InMask = NewMask;
- InMask.trunc(OperandBitWidth);
+ APInt InMask = NewMask.trunc(OperandBitWidth);
if (SimplifyDemandedBits(Op.getOperand(0), InMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
break;
}
case ISD::TRUNCATE: {
@@ -1634,14 +1637,13 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// zero/one bits live out.
unsigned OperandBitWidth =
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
- APInt TruncMask = NewMask;
- TruncMask.zext(OperandBitWidth);
+ APInt TruncMask = NewMask.zext(OperandBitWidth);
if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
- KnownZero.trunc(BitWidth);
- KnownOne.trunc(BitWidth);
-
+ KnownZero = KnownZero.trunc(BitWidth);
+ KnownOne = KnownOne.trunc(BitWidth);
+
// If the input is only used by this truncate, see if we can shrink it based
// on the known demanded bits.
if (Op.getOperand(0).getNode()->hasOneUse()) {
@@ -1661,25 +1663,24 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
break;
APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
OperandBitWidth - BitWidth);
- HighBits = HighBits.lshr(ShAmt->getZExtValue());
- HighBits.trunc(BitWidth);
+ HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
// None of the shifted in bits are needed. Add a truncate of the
// shift input, then shift it.
SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
- Op.getValueType(),
+ Op.getValueType(),
In.getOperand(0));
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
Op.getValueType(),
- NewTrunc,
+ NewTrunc,
In.getOperand(1)));
}
break;
}
}
-
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
break;
}
case ISD::AssertZext: {
@@ -1689,7 +1690,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (SimplifyDemandedBits(Op.getOperand(0), NewMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
APInt InMask = APInt::getLowBitsSet(BitWidth,
@@ -1697,7 +1698,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
KnownZero |= ~InMask & NewMask;
break;
}
- case ISD::BIT_CONVERT:
+ case ISD::BITCAST:
#if 0
// If this is an FP->Int bitcast and if the sign bit is the only thing that
// is demanded, turn this into a FGETSIGN.
@@ -1709,7 +1710,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
isOperationLegal(ISD::FGETSIGN, Op.getValueType())) {
// Make a FGETSIGN + SHL to move the sign bit into the appropriate
// place. We expect the SHL to be eliminated by other optimizations.
- SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(),
+ SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(),
Op.getOperand(0));
unsigned ShVal = Op.getValueType().getSizeInBits()-1;
SDValue ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy());
@@ -1742,21 +1743,21 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth);
break;
}
-
+
// If we know the value of all of the demanded bits, return this as a
// constant.
if ((NewMask & (KnownZero|KnownOne)) == NewMask)
return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType()));
-
+
return false;
}
-/// computeMaskedBitsForTargetNode - Determine which of the bits specified
-/// in Mask are known to be either zero or one and return them in the
+/// computeMaskedBitsForTargetNode - Determine which of the bits specified
+/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
-void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
+void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
- APInt &KnownZero,
+ APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
@@ -1817,7 +1818,7 @@ static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
(KnownOne.countPopulation() == 1);
}
-/// SimplifySetCC - Try to simplify a setcc built with the specified operands
+/// SimplifySetCC - Try to simplify a setcc built with the specified operands
/// and cc. If it is unable to simplify it, return a null SDValue.
SDValue
TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
@@ -1869,6 +1870,30 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
}
+ SDValue CTPOP = N0;
+ // Look through truncs that don't change the value of a ctpop.
+ if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE)
+ CTPOP = N0.getOperand(0);
+
+ if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP &&
+ (N0 == CTPOP || N0.getValueType().getSizeInBits() >
+ Log2_32_Ceil(CTPOP.getValueType().getSizeInBits()))) {
+ EVT CTVT = CTPOP.getValueType();
+ SDValue CTOp = CTPOP.getOperand(0);
+
+ // (ctpop x) u< 2 -> (x & x-1) == 0
+ // (ctpop x) u> 1 -> (x & x-1) != 0
+ if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){
+ SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp,
+ DAG.getConstant(1, CTVT));
+ SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub);
+ ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
+ return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, CTVT), CC);
+ }
+
+ // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal.
+ }
+
// If the LHS is '(and load, const)', the RHS is 0,
// the test is for equality or unsigned, and all 1 bits of the const are
// in the same partial word, see if we can shorten the load.
@@ -1884,7 +1909,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (!Lod->isVolatile() && Lod->isUnindexed()) {
unsigned origWidth = N0.getValueType().getSizeInBits();
unsigned maskWidth = origWidth;
- // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
+ // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
// 8 bits, but have to be careful...
if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
origWidth = Lod->getMemoryVT().getSizeInBits();
@@ -1916,10 +1941,9 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
DAG.getConstant(bestOffset, PtrType));
unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
- Lod->getSrcValue(),
- Lod->getSrcValueOffset() + bestOffset,
+ Lod->getPointerInfo().getWithOffset(bestOffset),
false, false, NewAlign);
- return DAG.getSetCC(dl, VT,
+ return DAG.getSetCC(dl, VT,
DAG.getNode(ISD::AND, dl, newVT, NewLoad,
DAG.getConstant(bestMask.trunc(bestWidth),
newVT)),
@@ -1969,7 +1993,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
(isOperationLegal(ISD::SETCC, newVT) &&
getCondCodeAction(Cond, newVT)==Legal))
return DAG.getSetCC(dl, VT, N0.getOperand(0),
- DAG.getConstant(APInt(C1).trunc(InSize), newVT),
+ DAG.getConstant(C1.trunc(InSize), newVT),
Cond);
break;
}
@@ -1987,7 +2011,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// the sign extension, it is impossible for both sides to be equal.
if (C1.getMinSignedBits() > ExtSrcTyBits)
return DAG.getConstant(Cond == ISD::SETNE, VT);
-
+
SDValue ZextOp;
EVT Op0Ty = N0.getOperand(0).getValueType();
if (Op0Ty == ExtSrcTy) {
@@ -2000,10 +2024,10 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(ZextOp.getNode());
// Otherwise, make this a use of a zext.
- return DAG.getSetCC(dl, VT, ZextOp,
+ return DAG.getSetCC(dl, VT, ZextOp,
DAG.getConstant(C1 & APInt::getLowBitsSet(
ExtDstTyBits,
- ExtSrcTyBits),
+ ExtSrcTyBits),
ExtDstTy),
Cond);
} else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
@@ -2013,16 +2037,16 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) {
bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1);
if (TrueWhenTrue)
- return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
+ return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
// Invert the condition.
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
- CC = ISD::getSetCCInverse(CC,
+ CC = ISD::getSetCCInverse(CC,
N0.getOperand(0).getValueType().isInteger());
return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
}
if ((N0.getOpcode() == ISD::XOR ||
- (N0.getOpcode() == ISD::AND &&
+ (N0.getOpcode() == ISD::AND &&
N0.getOperand(0).getOpcode() == ISD::XOR &&
N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
isa<ConstantSDNode>(N0.getOperand(1)) &&
@@ -2038,7 +2062,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (N0.getOpcode() == ISD::XOR)
Val = N0.getOperand(0);
else {
- assert(N0.getOpcode() == ISD::AND &&
+ assert(N0.getOpcode() == ISD::AND &&
N0.getOperand(0).getOpcode() == ISD::XOR);
// ((X^1)&1)^1 -> X & 1
Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
@@ -2082,7 +2106,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
}
}
-
+
APInt MinVal, MaxVal;
unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
if (ISD::isSignedIntSetCC(Cond)) {
@@ -2097,7 +2121,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true
// X >= C0 --> X > (C0-1)
- return DAG.getSetCC(dl, VT, N0,
+ return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(C1-1, N1.getValueType()),
(Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT);
}
@@ -2105,7 +2129,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true
// X <= C0 --> X < (C0+1)
- return DAG.getSetCC(dl, VT, N0,
+ return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(C1+1, N1.getValueType()),
(Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT);
}
@@ -2128,12 +2152,12 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// If we have setult X, 1, turn it into seteq X, 0
if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
- return DAG.getSetCC(dl, VT, N0,
- DAG.getConstant(MinVal, N0.getValueType()),
+ return DAG.getSetCC(dl, VT, N0,
+ DAG.getConstant(MinVal, N0.getValueType()),
ISD::SETEQ);
// If we have setugt X, Max-1, turn it into seteq X, Max
else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
- return DAG.getSetCC(dl, VT, N0,
+ return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(MaxVal, N0.getValueType()),
ISD::SETEQ);
@@ -2141,9 +2165,9 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// by changing cc.
// SETUGT X, SINTMAX -> SETLT X, 0
- if (Cond == ISD::SETUGT &&
+ if (Cond == ISD::SETUGT &&
C1 == APInt::getSignedMaxValue(OperandBitSize))
- return DAG.getSetCC(dl, VT, N0,
+ return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(0, N1.getValueType()),
ISD::SETLT);
@@ -2203,7 +2227,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
return DAG.getUNDEF(VT);
}
}
-
+
// Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
// constant if knowing that the operand is non-nan is enough. We prefer to
// have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
@@ -2278,14 +2302,14 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (DAG.isCommutativeBinOp(N0.getOpcode())) {
// If X op Y == Y op X, try other combinations.
if (N0.getOperand(0) == N1.getOperand(1))
- return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
+ return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0),
Cond);
if (N0.getOperand(1) == N1.getOperand(0))
- return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
+ return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1),
Cond);
}
}
-
+
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) {
if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
// Turn (X+C1) == C2 --> X == C2-C1
@@ -2295,7 +2319,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
LHSR->getAPIntValue(),
N0.getValueType()), Cond);
}
-
+
// Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0.
if (N0.getOpcode() == ISD::XOR)
// If we know that all of the inverted bits are zero, don't bother
@@ -2308,7 +2332,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
N0.getValueType()),
Cond);
}
-
+
// Turn (C1-X) == C2 --> X == C1-C2
if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
@@ -2319,7 +2343,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
N0.getValueType()),
Cond);
}
- }
+ }
}
// Simplify (X+Z) == X --> Z == 0
@@ -2334,7 +2358,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
// (Z-X) == X --> Z == X<<1
SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(),
- N1,
+ N1,
DAG.getConstant(1, getShiftAmountTy()));
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(SH.getNode());
@@ -2356,7 +2380,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
} else if (N1.getNode()->hasOneUse()) {
assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
// X == (Z-X) --> X<<1 == Z
- SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0,
+ SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0,
DAG.getConstant(1, getShiftAmountTy()));
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(SH.getNode());
@@ -2443,7 +2467,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
-bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue* &GA,
+bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
int64_t &Offset) const {
if (isa<GlobalAddressSDNode>(N)) {
GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N);
@@ -2469,6 +2493,7 @@ bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue* &GA,
}
}
}
+
return false;
}
@@ -2497,7 +2522,10 @@ TargetLowering::getConstraintType(const std::string &Constraint) const {
return C_Memory;
case 'i': // Simple Integer or Relocatable Constant
case 'n': // Simple Integer
+ case 'E': // Floating Point Constant
+ case 'F': // Floating Point Constant
case 's': // Relocatable Constant
+ case 'p': // Address.
case 'X': // Allow ANY value.
case 'I': // Target registers.
case 'J':
@@ -2507,11 +2535,13 @@ TargetLowering::getConstraintType(const std::string &Constraint) const {
case 'N':
case 'O':
case 'P':
+ case '<':
+ case '>':
return C_Other;
}
}
-
- if (Constraint.size() > 1 && Constraint[0] == '{' &&
+
+ if (Constraint.size() > 1 && Constraint[0] == '{' &&
Constraint[Constraint.size()-1] == '}')
return C_Register;
return C_Unknown;
@@ -2550,7 +2580,7 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// is possible and fine if either GV or C are missing.
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
-
+
// If we have "(add GV, C)", pull out GV/C
if (Op.getOpcode() == ISD::ADD) {
C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
@@ -2562,14 +2592,14 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
if (C == 0 || GA == 0)
C = 0, GA = 0;
}
-
+
// If we find a valid operand, map to the TargetXXX version so that the
// value itself doesn't get selected.
if (GA) { // Either &GV or &GV+C
if (ConstraintLetter != 'n') {
int64_t Offs = GA->getOffset();
if (C) Offs += C->getZExtValue();
- Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
+ Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
C ? C->getDebugLoc() : DebugLoc(),
Op.getValueType(), Offs));
return;
@@ -2613,8 +2643,8 @@ getRegForInlineAsmConstraint(const std::string &Constraint,
for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
E = RI->regclass_end(); RCI != E; ++RCI) {
const TargetRegisterClass *RC = *RCI;
-
- // If none of the value types for this register class are valid, we
+
+ // If none of the value types for this register class are valid, we
// can't use it. For example, 64-bit reg classes on 32-bit targets.
bool isLegal = false;
for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
@@ -2624,16 +2654,16 @@ getRegForInlineAsmConstraint(const std::string &Constraint,
break;
}
}
-
+
if (!isLegal) continue;
-
- for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
+
+ for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I) {
if (RegName.equals_lower(RI->getName(*I)))
return std::make_pair(*I, RC);
}
}
-
+
return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
}
@@ -2655,6 +2685,186 @@ unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
}
+/// ParseConstraints - Split up the constraint string from the inline
+/// assembly value into the specific constraints and their prefixes,
+/// and also tie in the associated operand values.
+/// If this returns an empty vector, and if the constraint string itself
+/// isn't empty, there was an error parsing.
+TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
+ ImmutableCallSite CS) const {
+ /// ConstraintOperands - Information about all of the constraints.
+ AsmOperandInfoVector ConstraintOperands;
+ const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
+ unsigned maCount = 0; // Largest number of multiple alternative constraints.
+
+ // Do a prepass over the constraints, canonicalizing them, and building up the
+ // ConstraintOperands list.
+ InlineAsm::ConstraintInfoVector
+ ConstraintInfos = IA->ParseConstraints();
+
+ unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
+ unsigned ResNo = 0; // ResNo - The result number of the next output.
+
+ for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
+ ConstraintOperands.push_back(AsmOperandInfo(ConstraintInfos[i]));
+ AsmOperandInfo &OpInfo = ConstraintOperands.back();
+
+ // Update multiple alternative constraint count.
+ if (OpInfo.multipleAlternatives.size() > maCount)
+ maCount = OpInfo.multipleAlternatives.size();
+
+ OpInfo.ConstraintVT = MVT::Other;
+
+ // Compute the value type for each operand.
+ switch (OpInfo.Type) {
+ case InlineAsm::isOutput:
+ // Indirect outputs just consume an argument.
+ if (OpInfo.isIndirect) {
+ OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
+ break;
+ }
+
+ // The return value of the call is this value. As such, there is no
+ // corresponding argument.
+ assert(!CS.getType()->isVoidTy() &&
+ "Bad inline asm!");
+ if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
+ OpInfo.ConstraintVT = getValueType(STy->getElementType(ResNo));
+ } else {
+ assert(ResNo == 0 && "Asm only has one result!");
+ OpInfo.ConstraintVT = getValueType(CS.getType());
+ }
+ ++ResNo;
+ break;
+ case InlineAsm::isInput:
+ OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
+ break;
+ case InlineAsm::isClobber:
+ // Nothing to do.
+ break;
+ }
+
+ if (OpInfo.CallOperandVal) {
+ const llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
+ if (OpInfo.isIndirect) {
+ const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
+ if (!PtrTy)
+ report_fatal_error("Indirect operand for inline asm not a pointer!");
+ OpTy = PtrTy->getElementType();
+ }
+ // If OpTy is not a single value, it may be a struct/union that we
+ // can tile with integers.
+ if (!OpTy->isSingleValueType() && OpTy->isSized()) {
+ unsigned BitSize = TD->getTypeSizeInBits(OpTy);
+ switch (BitSize) {
+ default: break;
+ case 1:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ OpInfo.ConstraintVT =
+ EVT::getEVT(IntegerType::get(OpTy->getContext(), BitSize), true);
+ break;
+ }
+ } else if (dyn_cast<PointerType>(OpTy)) {
+ OpInfo.ConstraintVT = MVT::getIntegerVT(8*TD->getPointerSize());
+ } else {
+ OpInfo.ConstraintVT = EVT::getEVT(OpTy, true);
+ }
+ }
+ }
+
+ // If we have multiple alternative constraints, select the best alternative.
+ if (ConstraintInfos.size()) {
+ if (maCount) {
+ unsigned bestMAIndex = 0;
+ int bestWeight = -1;
+ // weight: -1 = invalid match, and 0 = so-so match to 5 = good match.
+ int weight = -1;
+ unsigned maIndex;
+ // Compute the sums of the weights for each alternative, keeping track
+ // of the best (highest weight) one so far.
+ for (maIndex = 0; maIndex < maCount; ++maIndex) {
+ int weightSum = 0;
+ for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
+ cIndex != eIndex; ++cIndex) {
+ AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
+ if (OpInfo.Type == InlineAsm::isClobber)
+ continue;
+
+ // If this is an output operand with a matching input operand,
+ // look up the matching input. If their types mismatch, e.g. one
+ // is an integer, the other is floating point, or their sizes are
+ // different, flag it as an maCantMatch.
+ if (OpInfo.hasMatchingInput()) {
+ AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
+ if (OpInfo.ConstraintVT != Input.ConstraintVT) {
+ if ((OpInfo.ConstraintVT.isInteger() !=
+ Input.ConstraintVT.isInteger()) ||
+ (OpInfo.ConstraintVT.getSizeInBits() !=
+ Input.ConstraintVT.getSizeInBits())) {
+ weightSum = -1; // Can't match.
+ break;
+ }
+ }
+ }
+ weight = getMultipleConstraintMatchWeight(OpInfo, maIndex);
+ if (weight == -1) {
+ weightSum = -1;
+ break;
+ }
+ weightSum += weight;
+ }
+ // Update best.
+ if (weightSum > bestWeight) {
+ bestWeight = weightSum;
+ bestMAIndex = maIndex;
+ }
+ }
+
+ // Now select chosen alternative in each constraint.
+ for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
+ cIndex != eIndex; ++cIndex) {
+ AsmOperandInfo& cInfo = ConstraintOperands[cIndex];
+ if (cInfo.Type == InlineAsm::isClobber)
+ continue;
+ cInfo.selectAlternative(bestMAIndex);
+ }
+ }
+ }
+
+ // Check and hook up tied operands, choose constraint code to use.
+ for (unsigned cIndex = 0, eIndex = ConstraintOperands.size();
+ cIndex != eIndex; ++cIndex) {
+ AsmOperandInfo& OpInfo = ConstraintOperands[cIndex];
+
+ // If this is an output operand with a matching input operand, look up the
+ // matching input. If their types mismatch, e.g. one is an integer, the
+ // other is floating point, or their sizes are different, flag it as an
+ // error.
+ if (OpInfo.hasMatchingInput()) {
+ AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
+
+ if (OpInfo.ConstraintVT != Input.ConstraintVT) {
+ if ((OpInfo.ConstraintVT.isInteger() !=
+ Input.ConstraintVT.isInteger()) ||
+ (OpInfo.ConstraintVT.getSizeInBits() !=
+ Input.ConstraintVT.getSizeInBits())) {
+ report_fatal_error("Unsupported asm: input constraint"
+ " with a matching output constraint of"
+ " incompatible type!");
+ }
+ }
+
+ }
+ }
+
+ return ConstraintOperands;
+}
+
+
/// getConstraintGenerality - Return an integer indicating how general CT
/// is.
static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
@@ -2672,6 +2882,79 @@ static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
}
}
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+ TargetLowering::getMultipleConstraintMatchWeight(
+ AsmOperandInfo &info, int maIndex) const {
+ InlineAsm::ConstraintCodeVector *rCodes;
+ if (maIndex >= (int)info.multipleAlternatives.size())
+ rCodes = &info.Codes;
+ else
+ rCodes = &info.multipleAlternatives[maIndex].Codes;
+ ConstraintWeight BestWeight = CW_Invalid;
+
+ // Loop over the options, keeping track of the most general one.
+ for (unsigned i = 0, e = rCodes->size(); i != e; ++i) {
+ ConstraintWeight weight =
+ getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str());
+ if (weight > BestWeight)
+ BestWeight = weight;
+ }
+
+ return BestWeight;
+}
+
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+ TargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ // Look at the constraint type.
+ switch (*constraint) {
+ case 'i': // immediate integer.
+ case 'n': // immediate integer with a known value.
+ if (isa<ConstantInt>(CallOperandVal))
+ weight = CW_Constant;
+ break;
+ case 's': // non-explicit intregal immediate.
+ if (isa<GlobalValue>(CallOperandVal))
+ weight = CW_Constant;
+ break;
+ case 'E': // immediate float if host format.
+ case 'F': // immediate float.
+ if (isa<ConstantFP>(CallOperandVal))
+ weight = CW_Constant;
+ break;
+ case '<': // memory operand with autodecrement.
+ case '>': // memory operand with autoincrement.
+ case 'm': // memory operand.
+ case 'o': // offsettable memory operand
+ case 'V': // non-offsettable memory operand
+ weight = CW_Memory;
+ break;
+ case 'r': // general register.
+ case 'g': // general register, memory operand or immediate integer.
+ // note: Clang converts "g" to "imr".
+ if (CallOperandVal->getType()->isIntegerTy())
+ weight = CW_Register;
+ break;
+ case 'X': // any operand.
+ default:
+ weight = CW_Default;
+ break;
+ }
+ return weight;
+}
+
/// ChooseConstraint - If there are multiple different constraints that we
/// could pick for this operand (e.g. "imr") try to pick the 'best' one.
/// This is somewhat tricky: constraints fall into four classes:
@@ -2721,12 +3004,12 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
break;
}
}
-
+
// Things with matching constraints can only be registers, per gcc
// documentation. This mainly affects "g" constraints.
if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
continue;
-
+
// This constraint letter is more general than the previous one, use it.
int Generality = getConstraintGenerality(CType);
if (Generality > BestGenerality) {
@@ -2735,7 +3018,7 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
BestGenerality = Generality;
}
}
-
+
OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
OpInfo.ConstraintType = BestType;
}
@@ -2744,10 +3027,10 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
/// type to use for the specific AsmOperandInfo, setting
/// OpInfo.ConstraintCode and OpInfo.ConstraintType.
void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
- SDValue Op,
+ SDValue Op,
SelectionDAG *DAG) const {
assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
-
+
// Single-letter constraints ('r') are very common.
if (OpInfo.Codes.size() == 1) {
OpInfo.ConstraintCode = OpInfo.Codes[0];
@@ -2755,7 +3038,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
} else {
ChooseConstraint(OpInfo, *this, Op, DAG);
}
-
+
// 'X' matches anything.
if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
// Labels and constants are handled elsewhere ('X' is the only thing
@@ -2766,7 +3049,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
OpInfo.CallOperandVal = v;
return;
}
-
+
// Otherwise, try to resolve it to something we know about by looking at
// the actual operand type.
if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
@@ -2782,7 +3065,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
-bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
+bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
const Type *Ty) const {
// The default implementation of this implements a conservative RISCy, r+r and
// r+i addr mode.
@@ -2790,12 +3073,12 @@ bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
// Allows a sign-extended 16-bit immediate field.
if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
return false;
-
+
// No global is ever allowed as a base.
if (AM.BaseGV)
return false;
-
- // Only support r+r,
+
+ // Only support r+r,
switch (AM.Scale) {
case 0: // "r+i" or just "i", depending on HasBaseReg.
break;
@@ -2810,7 +3093,7 @@ bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
// Allow 2*r as r+r.
break;
}
-
+
return true;
}
@@ -2818,19 +3101,19 @@ bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
-SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
+SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
std::vector<SDNode*>* Created) const {
EVT VT = N->getValueType(0);
DebugLoc dl= N->getDebugLoc();
-
+
// Check to see if we can do this.
// FIXME: We should be more aggressive here.
if (!isTypeLegal(VT))
return SDValue();
-
+
APInt d = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
APInt::ms magics = d.magic();
-
+
// Multiply the numerator (operand 0) by the magic value
// FIXME: We should support doing a MUL in a wider type
SDValue Q;
@@ -2844,7 +3127,7 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
else
return SDValue(); // No mulhs or equvialent
// If d > 0 and m < 0, add the numerator
- if (d.isStrictlyPositive() && magics.m.isNegative()) {
+ if (d.isStrictlyPositive() && magics.m.isNegative()) {
Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0));
if (Created)
Created->push_back(Q.getNode());
@@ -2857,7 +3140,7 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
}
// Shift right algebraic if shift value is nonzero
if (magics.s > 0) {
- Q = DAG.getNode(ISD::SRA, dl, VT, Q,
+ Q = DAG.getNode(ISD::SRA, dl, VT, Q,
DAG.getConstant(magics.s, getShiftAmountTy()));
if (Created)
Created->push_back(Q.getNode());
@@ -2908,20 +3191,20 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
if (magics.a == 0) {
assert(magics.s < N1C->getAPIntValue().getBitWidth() &&
"We shouldn't generate an undefined shift!");
- return DAG.getNode(ISD::SRL, dl, VT, Q,
+ return DAG.getNode(ISD::SRL, dl, VT, Q,
DAG.getConstant(magics.s, getShiftAmountTy()));
} else {
SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
if (Created)
Created->push_back(NPQ.getNode());
- NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ,
+ NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ,
DAG.getConstant(1, getShiftAmountTy()));
if (Created)
Created->push_back(NPQ.getNode());
NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
if (Created)
Created->push_back(NPQ.getNode());
- return DAG.getNode(ISD::SRL, dl, VT, NPQ,
+ return DAG.getNode(ISD::SRL, dl, VT, NPQ,
DAG.getConstant(magics.s-1, getShiftAmountTy()));
}
}
diff --git a/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp b/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp
index aeaa38b..7b5bca4 100644
--- a/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp
+++ b/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp
@@ -226,7 +226,7 @@ bool PEI::calcAnticInOut(MachineBasicBlock* MBB) {
// AnticIn[MBB] = UNION(CSRUsed[MBB], AnticOut[MBB]);
CSRegSet prevAnticIn = AnticIn[MBB];
AnticIn[MBB] = CSRUsed[MBB] | AnticOut[MBB];
- if (prevAnticIn |= AnticIn[MBB])
+ if (prevAnticIn != AnticIn[MBB])
changed = true;
return changed;
}
@@ -264,7 +264,7 @@ bool PEI::calcAvailInOut(MachineBasicBlock* MBB) {
// AvailOut[MBB] = UNION(CSRUsed[MBB], AvailIn[MBB]);
CSRegSet prevAvailOut = AvailOut[MBB];
AvailOut[MBB] = CSRUsed[MBB] | AvailIn[MBB];
- if (prevAvailOut |= AvailOut[MBB])
+ if (prevAvailOut != AvailOut[MBB])
changed = true;
return changed;
}
diff --git a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
index b29ea19..2843c1a 100644
--- a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
+++ b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
@@ -15,6 +15,7 @@
#define DEBUG_TYPE "regcoalescing"
#include "SimpleRegisterCoalescing.h"
#include "VirtRegMap.h"
+#include "LiveDebugVariables.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/Value.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -64,9 +65,25 @@ DisablePhysicalJoin("disable-physical-join",
cl::desc("Avoid coalescing physical register copies"),
cl::init(false), cl::Hidden);
-INITIALIZE_AG_PASS(SimpleRegisterCoalescing, RegisterCoalescer,
+static cl::opt<bool>
+VerifyCoalescing("verify-coalescing",
+ cl::desc("Verify machine instrs before and after register coalescing"),
+ cl::Hidden);
+
+INITIALIZE_AG_PASS_BEGIN(SimpleRegisterCoalescing, RegisterCoalescer,
"simple-register-coalescing", "Simple Register Coalescing",
- false, false, true);
+ false, false, true)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
+INITIALIZE_PASS_DEPENDENCY(PHIElimination)
+INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_AG_PASS_END(SimpleRegisterCoalescing, RegisterCoalescer,
+ "simple-register-coalescing", "Simple Register Coalescing",
+ false, false, true)
char &llvm::SimpleRegisterCoalescingID = SimpleRegisterCoalescing::ID;
@@ -75,14 +92,14 @@ void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
+ AU.addRequired<LiveDebugVariables>();
+ AU.addPreserved<LiveDebugVariables>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addPreservedID(MachineDominatorsID);
- if (StrongPHIElim)
- AU.addPreservedID(StrongPHIEliminationID);
- else
- AU.addPreservedID(PHIEliminationID);
+ AU.addPreservedID(StrongPHIEliminationID);
+ AU.addPreservedID(PHIEliminationID);
AU.addPreservedID(TwoAddressInstructionPassID);
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -124,7 +141,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP,
// Get the location that B is defined at. Two options: either this value has
// an unknown definition point or it is defined at CopyIdx. If unknown, we
// can't process it.
- if (!BValNo->getCopy()) return false;
+ if (!BValNo->isDefByCopy()) return false;
assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
// AValNo is the value number in A that defines the copy, A3 in the example.
@@ -218,7 +235,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP,
continue;
LiveInterval &SRLI = li_->getInterval(*SR);
SRLI.addRange(LiveRange(FillerStart, FillerEnd,
- SRLI.getNextValue(FillerStart, 0, true,
+ SRLI.getNextValue(FillerStart, 0,
li_->getVNInfoAllocator())));
}
}
@@ -266,9 +283,6 @@ bool SimpleRegisterCoalescing::HasOtherReachingDefs(LiveInterval &IntA,
for (; BI != IntB.ranges.end() && AI->end >= BI->start; ++BI) {
if (BI->valno == BValNo)
continue;
- // When BValNo is null, we're looking for a dummy clobber-value for a subreg.
- if (!BValNo && !BI->valno->isDefAccurate() && !BI->valno->getCopy())
- continue;
if (BI->start <= AI->start && BI->end > AI->start)
return true;
if (BI->start > AI->start && BI->start < AI->end)
@@ -278,16 +292,6 @@ bool SimpleRegisterCoalescing::HasOtherReachingDefs(LiveInterval &IntA,
return false;
}
-static void
-TransferImplicitOps(MachineInstr *MI, MachineInstr *NewMI) {
- for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands();
- i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.isImplicit())
- NewMI->addOperand(MO);
- }
-}
-
/// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with
/// IntA being the source and IntB being the dest, thus this defines a value
/// number in IntB. If the source value number (in IntA) is defined by a
@@ -324,8 +328,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
if (!li_->hasInterval(CP.getDstReg()))
return false;
- SlotIndex CopyIdx =
- li_->getInstructionIndex(CopyMI).getDefIndex();
+ SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
LiveInterval &IntA =
li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
@@ -334,27 +337,19 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
// the example above.
- LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
- if (BLR == IntB.end()) return false;
- VNInfo *BValNo = BLR->valno;
+ VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx);
+ if (!BValNo || !BValNo->isDefByCopy())
+ return false;
- // Get the location that B is defined at. Two options: either this value has
- // an unknown definition point or it is defined at CopyIdx. If unknown, we
- // can't process it.
- if (!BValNo->getCopy()) return false;
assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
// AValNo is the value number in A that defines the copy, A3 in the example.
- LiveInterval::iterator ALR =
- IntA.FindLiveRangeContaining(CopyIdx.getUseIndex()); //
+ VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getUseIndex());
+ assert(AValNo && "COPY source not live");
- assert(ALR != IntA.end() && "Live range not found!");
- VNInfo *AValNo = ALR->valno;
// If other defs can reach uses of this def, then it's not safe to perform
- // the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be
- // tested?
- if (AValNo->isPHIDef() || !AValNo->isDefAccurate() ||
- AValNo->isUnused() || AValNo->hasPHIKill())
+ // the optimization.
+ if (AValNo->isPHIDef() || AValNo->isUnused() || AValNo->hasPHIKill())
return false;
MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
if (!DefMI)
@@ -411,7 +406,8 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
return false;
}
- DEBUG(dbgs() << "\tRemoveCopyByCommutingDef: " << *DefMI);
+ DEBUG(dbgs() << "\tRemoveCopyByCommutingDef: " << AValNo->def << '\t'
+ << *DefMI);
// At this point we have decided that it is legal to do this
// transformation. Start by commuting the instruction.
@@ -427,10 +423,6 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false);
NewMI->getOperand(OpIdx).setIsKill();
- bool BHasPHIKill = BValNo->hasPHIKill();
- SmallVector<VNInfo*, 4> BDeadValNos;
- std::map<SlotIndex, SlotIndex> BExtend;
-
// If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
// A = or A, B
// ...
@@ -439,9 +431,6 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
// C = A<kill>
// ...
// = B
- bool Extended = BLR->end > ALR->end && ALR->end != ALR->start;
- if (Extended)
- BExtend[ALR->end] = BLR->end;
// Update uses of IntA of the specific Val# with IntB.
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
@@ -467,52 +456,24 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
UseMO.setReg(NewReg);
if (UseMI == CopyMI)
continue;
- if (UseMO.isKill()) {
- if (Extended)
- UseMO.setIsKill(false);
- }
if (!UseMI->isCopy())
continue;
if (UseMI->getOperand(0).getReg() != IntB.reg ||
UseMI->getOperand(0).getSubReg())
continue;
- // This copy will become a noop. If it's defining a new val#,
- // remove that val# as well. However this live range is being
- // extended to the end of the existing live range defined by the copy.
+ // This copy will become a noop. If it's defining a new val#, merge it into
+ // BValNo.
SlotIndex DefIdx = UseIdx.getDefIndex();
- const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
- if (!DLR)
+ VNInfo *DVNI = IntB.getVNInfoAt(DefIdx);
+ if (!DVNI)
continue;
- BHasPHIKill |= DLR->valno->hasPHIKill();
- assert(DLR->valno->def == DefIdx);
- BDeadValNos.push_back(DLR->valno);
- BExtend[DLR->start] = DLR->end;
+ DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
+ assert(DVNI->def == DefIdx);
+ BValNo = IntB.MergeValueNumberInto(BValNo, DVNI);
JoinedCopies.insert(UseMI);
}
- // We need to insert a new liverange: [ALR.start, LastUse). It may be we can
- // simply extend BLR if CopyMI doesn't end the range.
- DEBUG({
- dbgs() << "Extending: ";
- IntB.print(dbgs(), tri_);
- });
-
- // Remove val#'s defined by copies that will be coalesced away.
- for (unsigned i = 0, e = BDeadValNos.size(); i != e; ++i) {
- VNInfo *DeadVNI = BDeadValNos[i];
- if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
- for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS) {
- if (!li_->hasInterval(*AS))
- continue;
- LiveInterval &ASLI = li_->getInterval(*AS);
- if (const LiveRange *ASLR = ASLI.getLiveRangeContaining(DeadVNI->def))
- ASLI.removeValNo(ASLR->valno);
- }
- }
- IntB.removeValNo(BDeadValNos[i]);
- }
-
// Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
// is updated.
VNInfo *ValNo = BValNo;
@@ -521,30 +482,12 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
AI != AE; ++AI) {
if (AI->valno != AValNo) continue;
- SlotIndex End = AI->end;
- std::map<SlotIndex, SlotIndex>::iterator
- EI = BExtend.find(End);
- if (EI != BExtend.end())
- End = EI->second;
- IntB.addRange(LiveRange(AI->start, End, ValNo));
+ IntB.addRange(LiveRange(AI->start, AI->end, ValNo));
}
- ValNo->setHasPHIKill(BHasPHIKill);
-
- DEBUG({
- dbgs() << " result = ";
- IntB.print(dbgs(), tri_);
- dbgs() << "\nShortening: ";
- IntA.print(dbgs(), tri_);
- });
+ DEBUG(dbgs() << "\t\textended: " << IntB << '\n');
IntA.removeValNo(AValNo);
-
- DEBUG({
- dbgs() << " result = ";
- IntA.print(dbgs(), tri_);
- dbgs() << '\n';
- });
-
+ DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n');
++numCommutes;
return true;
}
@@ -644,6 +587,7 @@ SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(SlotIndex CopyIdx,
/// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
/// computation, replace the copy by rematerialize the definition.
bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
+ bool preserveSrcInt,
unsigned DstReg,
unsigned DstSubIdx,
MachineInstr *CopyMI) {
@@ -652,12 +596,12 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
assert(SrcLR != SrcInt.end() && "Live range not found!");
VNInfo *ValNo = SrcLR->valno;
// If other defs can reach uses of this def, then it's not safe to perform
- // the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be
- // tested?
- if (ValNo->isPHIDef() || !ValNo->isDefAccurate() ||
- ValNo->isUnused() || ValNo->hasPHIKill())
+ // the optimization.
+ if (ValNo->isPHIDef() || ValNo->isUnused() || ValNo->hasPHIKill())
return false;
MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
+ if (!DefMI)
+ return false;
assert(DefMI && "Defining instruction disappeared");
const TargetInstrDesc &TID = DefMI->getDesc();
if (!TID.isAsCheapAsAMove())
@@ -681,8 +625,8 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
return false;
}
- // If destination register has a sub-register index on it, make sure it mtches
- // the instruction register class.
+ // If destination register has a sub-register index on it, make sure it
+ // matches the instruction register class.
if (DstSubIdx) {
const TargetInstrDesc &TID = DefMI->getDesc();
if (TID.getNumDefs() != 1)
@@ -699,30 +643,12 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
RemoveCopyFlag(DstReg, CopyMI);
- // If copy kills the source register, find the last use and propagate
- // kill.
- bool checkForDeadDef = false;
MachineBasicBlock *MBB = CopyMI->getParent();
- if (SrcLR->end == CopyIdx.getDefIndex())
- if (!TrimLiveIntervalToLastUse(CopyIdx, MBB, SrcInt, SrcLR)) {
- checkForDeadDef = true;
- }
-
MachineBasicBlock::iterator MII =
llvm::next(MachineBasicBlock::iterator(CopyMI));
tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, *tri_);
MachineInstr *NewMI = prior(MII);
- if (checkForDeadDef) {
- // PR4090 fix: Trim interval failed because there was no use of the
- // source interval in this MBB. If the def is in this MBB too then we
- // should mark it dead:
- if (DefMI->getParent() == MBB) {
- DefMI->addRegisterDead(SrcInt.reg, tri_);
- SrcLR->end = SrcLR->start.getNextSlot();
- }
- }
-
// CopyMI may have implicit operands, transfer them over to the newly
// rematerialized instruction. And update implicit def interval valnos.
for (unsigned i = CopyMI->getDesc().getNumOperands(),
@@ -734,13 +660,18 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
RemoveCopyFlag(MO.getReg(), CopyMI);
}
- TransferImplicitOps(CopyMI, NewMI);
+ NewMI->copyImplicitOps(CopyMI);
li_->ReplaceMachineInstrInMaps(CopyMI, NewMI);
CopyMI->eraseFromParent();
ReMatCopies.insert(CopyMI);
ReMatDefs.insert(DefMI);
DEBUG(dbgs() << "Remat: " << *NewMI);
++NumReMats;
+
+ // The source interval can become smaller because we removed a use.
+ if (preserveSrcInt)
+ li_->shrinkToUses(&SrcInt);
+
return true;
}
@@ -756,6 +687,9 @@ SimpleRegisterCoalescing::UpdateRegDefsUses(const CoalescerPair &CP) {
unsigned DstReg = CP.getDstReg();
unsigned SubIdx = CP.getSubIdx();
+ // Update LiveDebugVariables.
+ ldv_->renameRegister(SrcReg, DstReg, SubIdx);
+
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg);
MachineInstr *UseMI = I.skipInstruction();) {
// A PhysReg copy that won't be coalesced can perhaps be rematerialized
@@ -768,7 +702,7 @@ SimpleRegisterCoalescing::UpdateRegDefsUses(const CoalescerPair &CP) {
UseMI->getOperand(0).getReg() != SrcReg &&
UseMI->getOperand(0).getReg() != DstReg &&
!JoinedCopies.count(UseMI) &&
- ReMaterializeTrivialDef(li_->getInterval(SrcReg),
+ ReMaterializeTrivialDef(li_->getInterval(SrcReg), false,
UseMI->getOperand(0).getReg(), 0, UseMI))
continue;
}
@@ -874,7 +808,7 @@ void SimpleRegisterCoalescing::RemoveCopyFlag(unsigned DstReg,
if (li_->hasInterval(DstReg)) {
LiveInterval &LI = li_->getInterval(DstReg);
if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
- if (LR->valno->getCopy() == CopyMI)
+ if (LR->valno->def == DefIdx)
LR->valno->setCopy(0);
}
if (!TargetRegisterInfo::isPhysicalRegister(DstReg))
@@ -884,7 +818,7 @@ void SimpleRegisterCoalescing::RemoveCopyFlag(unsigned DstReg,
continue;
LiveInterval &LI = li_->getInterval(*AS);
if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
- if (LR->valno->getCopy() == CopyMI)
+ if (LR->valno->def == DefIdx)
LR->valno->setCopy(0);
}
}
@@ -1044,23 +978,19 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
return false;
}
- DEBUG(dbgs() << "\tConsidering merging %reg" << CP.getSrcReg());
+ DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), tri_));
// Enforce policies.
if (CP.isPhys()) {
- DEBUG(dbgs() <<" with physreg %" << tri_->getName(CP.getDstReg()) << "\n");
+ DEBUG(dbgs() <<" with physreg " << PrintReg(CP.getDstReg(), tri_) << "\n");
// Only coalesce to allocatable physreg.
if (!li_->isAllocatable(CP.getDstReg())) {
DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
return false; // Not coalescable.
}
} else {
- DEBUG({
- dbgs() << " with reg%" << CP.getDstReg();
- if (CP.getSubIdx())
- dbgs() << ":" << tri_->getSubRegIndexName(CP.getSubIdx());
- dbgs() << " to " << CP.getNewRC()->getName() << "\n";
- });
+ DEBUG(dbgs() << " with " << PrintReg(CP.getDstReg(), tri_, CP.getSubIdx())
+ << " to " << CP.getNewRC()->getName() << "\n");
// Avoid constraining virtual register regclass too much.
if (CP.isCrossClass()) {
@@ -1114,7 +1044,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// Before giving up coalescing, if definition of source is defined by
// trivial computation, try rematerializing it.
if (!CP.isFlipped() &&
- ReMaterializeTrivialDef(JoinVInt, CP.getDstReg(), 0, CopyMI))
+ ReMaterializeTrivialDef(JoinVInt, true, CP.getDstReg(), 0, CopyMI))
return true;
++numAborts;
@@ -1134,7 +1064,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// If definition of source is defined by trivial computation, try
// rematerializing it.
if (!CP.isFlipped() &&
- ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()),
+ ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
CP.getDstReg(), 0, CopyMI))
return true;
@@ -1317,7 +1247,7 @@ bool SimpleRegisterCoalescing::JoinIntervals(CoalescerPair &CP) {
for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
i != e; ++i) {
VNInfo *VNI = *i;
- if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
+ if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy?
continue;
// Never join with a register that has EarlyClobber redefs.
@@ -1341,7 +1271,7 @@ bool SimpleRegisterCoalescing::JoinIntervals(CoalescerPair &CP) {
for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
i != e; ++i) {
VNInfo *VNI = *i;
- if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
+ if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy?
continue;
// Never join with a register that has EarlyClobber redefs.
@@ -1495,9 +1425,9 @@ void SimpleRegisterCoalescing::CopyCoalesceInMBB(MachineBasicBlock *MBB,
std::vector<CopyRec> &TryAgain) {
DEBUG(dbgs() << MBB->getName() << ":\n");
- std::vector<CopyRec> VirtCopies;
- std::vector<CopyRec> PhysCopies;
- std::vector<CopyRec> ImpDefCopies;
+ SmallVector<CopyRec, 8> VirtCopies;
+ SmallVector<CopyRec, 8> PhysCopies;
+ SmallVector<CopyRec, 8> ImpDefCopies;
for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
MII != E;) {
MachineInstr *Inst = MII++;
@@ -1690,6 +1620,7 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
tri_ = tm_->getRegisterInfo();
tii_ = tm_->getInstrInfo();
li_ = &getAnalysis<LiveIntervals>();
+ ldv_ = &getAnalysis<LiveDebugVariables>();
AA = &getAnalysis<AliasAnalysis>();
loopInfo = &getAnalysis<MachineLoopInfo>();
@@ -1697,6 +1628,9 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
<< "********** Function: "
<< ((Value*)mf_->getFunction())->getName() << '\n');
+ if (VerifyCoalescing)
+ mf_->verify(this, "Before register coalescing");
+
for (TargetRegisterInfo::regclass_iterator I = tri_->regclass_begin(),
E = tri_->regclass_end(); I != E; ++I)
allocatableRCRegs_.insert(std::make_pair(*I,
@@ -1739,9 +1673,11 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
DoDelete = false;
if (MI->allDefsAreDead()) {
- LiveInterval &li = li_->getInterval(SrcReg);
- if (!ShortenDeadCopySrcLiveRange(li, MI))
- ShortenDeadCopyLiveRange(li, MI);
+ if (li_->hasInterval(SrcReg)) {
+ LiveInterval &li = li_->getInterval(SrcReg);
+ if (!ShortenDeadCopySrcLiveRange(li, MI))
+ ShortenDeadCopyLiveRange(li, MI);
+ }
DoDelete = true;
}
if (!DoDelete) {
@@ -1821,13 +1757,26 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
if (!MO.isReg() || !MO.isKill()) continue;
unsigned reg = MO.getReg();
if (!reg || !li_->hasInterval(reg)) continue;
- if (!li_->getInterval(reg).killedAt(DefIdx))
+ if (!li_->getInterval(reg).killedAt(DefIdx)) {
MO.setIsKill(false);
+ continue;
+ }
+ // When leaving a kill flag on a physreg, check if any subregs should
+ // remain alive.
+ if (!TargetRegisterInfo::isPhysicalRegister(reg))
+ continue;
+ for (const unsigned *SR = tri_->getSubRegisters(reg);
+ unsigned S = *SR; ++SR)
+ if (li_->hasInterval(S) && li_->getInterval(S).liveAt(DefIdx))
+ MI->addRegisterDefined(S, tri_);
}
}
}
DEBUG(dump());
+ DEBUG(ldv_->dump());
+ if (VerifyCoalescing)
+ mf_->verify(this, "After register coalescing");
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h
index 855bdb9..56703df 100644
--- a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h
+++ b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h
@@ -21,7 +21,7 @@
namespace llvm {
class SimpleRegisterCoalescing;
- class LiveVariables;
+ class LiveDebugVariables;
class TargetRegisterInfo;
class TargetInstrInfo;
class VirtRegMap;
@@ -44,6 +44,7 @@ namespace llvm {
const TargetRegisterInfo* tri_;
const TargetInstrInfo* tii_;
LiveIntervals *li_;
+ LiveDebugVariables *ldv_;
const MachineLoopInfo* loopInfo;
AliasAnalysis *AA;
@@ -63,7 +64,9 @@ namespace llvm {
public:
static char ID; // Pass identifcation, replacement for typeid
- SimpleRegisterCoalescing() : MachineFunctionPass(ID) {}
+ SimpleRegisterCoalescing() : MachineFunctionPass(ID) {
+ initializeSimpleRegisterCoalescingPass(*PassRegistry::getPassRegistry());
+ }
struct InstrSlots {
enum {
@@ -140,8 +143,10 @@ namespace llvm {
/// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
/// computation, replace the copy by rematerialize the definition.
- bool ReMaterializeTrivialDef(LiveInterval &SrcInt, unsigned DstReg,
- unsigned DstSubIdx, MachineInstr *CopyMI);
+ /// If PreserveSrcInt is true, make sure SrcInt is valid after the call.
+ bool ReMaterializeTrivialDef(LiveInterval &SrcInt, bool PreserveSrcInt,
+ unsigned DstReg, unsigned DstSubIdx,
+ MachineInstr *CopyMI);
/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
/// two virtual registers from different register classes.
diff --git a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index b637980..13e1454 100644
--- a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -21,15 +21,14 @@
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/CommandLine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include <set>
using namespace llvm;
STATISTIC(NumInvokes, "Number of invokes replaced");
@@ -53,6 +52,7 @@ namespace {
Constant *SelectorFn;
Constant *ExceptionFn;
Constant *CallSiteFn;
+ Constant *DispatchSetupFn;
Value *CallSite;
public:
@@ -116,6 +116,8 @@ bool SjLjEHPass::doInitialization(Module &M) {
SelectorFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_selector);
ExceptionFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_exception);
CallSiteFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_callsite);
+ DispatchSetupFn
+ = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_dispatch_setup);
PersonalityFn = 0;
return true;
@@ -317,8 +319,12 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
Unwinds.push_back(UI);
}
}
- // If we don't have any invokes or unwinds, there's nothing to do.
- if (Unwinds.empty() && Invokes.empty()) return false;
+
+ NumInvokes += Invokes.size();
+ NumUnwinds += Unwinds.size();
+
+ // If we don't have any invokes, there's nothing to do.
+ if (Invokes.empty()) return false;
// Find the eh.selector.*, eh.exception and alloca calls.
//
@@ -332,6 +338,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
SmallVector<CallInst*,16> EH_Selectors;
SmallVector<CallInst*,16> EH_Exceptions;
SmallVector<Instruction*,16> JmpbufUpdatePoints;
+
// Note: Skip the entry block since there's nothing there that interests
// us. eh.selector and eh.exception shouldn't ever be there, and we
// want to disregard any allocas that are there.
@@ -351,228 +358,231 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
}
}
}
+
// If we don't have any eh.selector calls, we can't determine the personality
// function. Without a personality function, we can't process exceptions.
if (!PersonalityFn) return false;
- NumInvokes += Invokes.size();
- NumUnwinds += Unwinds.size();
+ // We have invokes, so we need to add register/unregister calls to get this
+ // function onto the global unwind stack.
+ //
+ // First thing we need to do is scan the whole function for values that are
+ // live across unwind edges. Each value that is live across an unwind edge we
+ // spill into a stack location, guaranteeing that there is nothing live across
+ // the unwind edge. This process also splits all critical edges coming out of
+ // invoke's.
+ splitLiveRangesAcrossInvokes(Invokes);
+
+ BasicBlock *EntryBB = F.begin();
+ // Create an alloca for the incoming jump buffer ptr and the new jump buffer
+ // that needs to be restored on all exits from the function. This is an
+ // alloca because the value needs to be added to the global context list.
+ unsigned Align = 4; // FIXME: Should be a TLI check?
+ AllocaInst *FunctionContext =
+ new AllocaInst(FunctionContextTy, 0, Align,
+ "fcn_context", F.begin()->begin());
+
+ Value *Idxs[2];
+ const Type *Int32Ty = Type::getInt32Ty(F.getContext());
+ Value *Zero = ConstantInt::get(Int32Ty, 0);
+ // We need to also keep around a reference to the call_site field
+ Idxs[0] = Zero;
+ Idxs[1] = ConstantInt::get(Int32Ty, 1);
+ CallSite = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
+ "call_site",
+ EntryBB->getTerminator());
+
+ // The exception selector comes back in context->data[1]
+ Idxs[1] = ConstantInt::get(Int32Ty, 2);
+ Value *FCData = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
+ "fc_data",
+ EntryBB->getTerminator());
+ Idxs[1] = ConstantInt::get(Int32Ty, 1);
+ Value *SelectorAddr = GetElementPtrInst::Create(FCData, Idxs, Idxs+2,
+ "exc_selector_gep",
+ EntryBB->getTerminator());
+ // The exception value comes back in context->data[0]
+ Idxs[1] = Zero;
+ Value *ExceptionAddr = GetElementPtrInst::Create(FCData, Idxs, Idxs+2,
+ "exception_gep",
+ EntryBB->getTerminator());
+
+ // The result of the eh.selector call will be replaced with a a reference to
+ // the selector value returned in the function context. We leave the selector
+ // itself so the EH analysis later can use it.
+ for (int i = 0, e = EH_Selectors.size(); i < e; ++i) {
+ CallInst *I = EH_Selectors[i];
+ Value *SelectorVal = new LoadInst(SelectorAddr, "select_val", true, I);
+ I->replaceAllUsesWith(SelectorVal);
+ }
- if (!Invokes.empty()) {
- // We have invokes, so we need to add register/unregister calls to get
- // this function onto the global unwind stack.
- //
- // First thing we need to do is scan the whole function for values that are
- // live across unwind edges. Each value that is live across an unwind edge
- // we spill into a stack location, guaranteeing that there is nothing live
- // across the unwind edge. This process also splits all critical edges
- // coming out of invoke's.
- splitLiveRangesAcrossInvokes(Invokes);
-
- BasicBlock *EntryBB = F.begin();
- // Create an alloca for the incoming jump buffer ptr and the new jump buffer
- // that needs to be restored on all exits from the function. This is an
- // alloca because the value needs to be added to the global context list.
- unsigned Align = 4; // FIXME: Should be a TLI check?
- AllocaInst *FunctionContext =
- new AllocaInst(FunctionContextTy, 0, Align,
- "fcn_context", F.begin()->begin());
-
- Value *Idxs[2];
- const Type *Int32Ty = Type::getInt32Ty(F.getContext());
- Value *Zero = ConstantInt::get(Int32Ty, 0);
- // We need to also keep around a reference to the call_site field
- Idxs[0] = Zero;
- Idxs[1] = ConstantInt::get(Int32Ty, 1);
- CallSite = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
- "call_site",
- EntryBB->getTerminator());
-
- // The exception selector comes back in context->data[1]
- Idxs[1] = ConstantInt::get(Int32Ty, 2);
- Value *FCData = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
- "fc_data",
- EntryBB->getTerminator());
- Idxs[1] = ConstantInt::get(Int32Ty, 1);
- Value *SelectorAddr = GetElementPtrInst::Create(FCData, Idxs, Idxs+2,
- "exc_selector_gep",
- EntryBB->getTerminator());
- // The exception value comes back in context->data[0]
- Idxs[1] = Zero;
- Value *ExceptionAddr = GetElementPtrInst::Create(FCData, Idxs, Idxs+2,
- "exception_gep",
- EntryBB->getTerminator());
-
- // The result of the eh.selector call will be replaced with a
- // a reference to the selector value returned in the function
- // context. We leave the selector itself so the EH analysis later
- // can use it.
- for (int i = 0, e = EH_Selectors.size(); i < e; ++i) {
- CallInst *I = EH_Selectors[i];
- Value *SelectorVal = new LoadInst(SelectorAddr, "select_val", true, I);
- I->replaceAllUsesWith(SelectorVal);
- }
- // eh.exception calls are replaced with references to the proper
- // location in the context. Unlike eh.selector, the eh.exception
- // calls are removed entirely.
- for (int i = 0, e = EH_Exceptions.size(); i < e; ++i) {
- CallInst *I = EH_Exceptions[i];
- // Possible for there to be duplicates, so check to make sure
- // the instruction hasn't already been removed.
- if (!I->getParent()) continue;
- Value *Val = new LoadInst(ExceptionAddr, "exception", true, I);
- const Type *Ty = Type::getInt8PtrTy(F.getContext());
- Val = CastInst::Create(Instruction::IntToPtr, Val, Ty, "", I);
-
- I->replaceAllUsesWith(Val);
- I->eraseFromParent();
- }
+ // eh.exception calls are replaced with references to the proper location in
+ // the context. Unlike eh.selector, the eh.exception calls are removed
+ // entirely.
+ for (int i = 0, e = EH_Exceptions.size(); i < e; ++i) {
+ CallInst *I = EH_Exceptions[i];
+ // Possible for there to be duplicates, so check to make sure the
+ // instruction hasn't already been removed.
+ if (!I->getParent()) continue;
+ Value *Val = new LoadInst(ExceptionAddr, "exception", true, I);
+ const Type *Ty = Type::getInt8PtrTy(F.getContext());
+ Val = CastInst::Create(Instruction::IntToPtr, Val, Ty, "", I);
+
+ I->replaceAllUsesWith(Val);
+ I->eraseFromParent();
+ }
- // The entry block changes to have the eh.sjlj.setjmp, with a conditional
- // branch to a dispatch block for non-zero returns. If we return normally,
- // we're not handling an exception and just register the function context
- // and continue.
-
- // Create the dispatch block. The dispatch block is basically a big switch
- // statement that goes to all of the invoke landing pads.
- BasicBlock *DispatchBlock =
- BasicBlock::Create(F.getContext(), "eh.sjlj.setjmp.catch", &F);
-
- // Insert a load in the Catch block, and a switch on its value. By default,
- // we go to a block that just does an unwind (which is the correct action
- // for a standard call).
- BasicBlock *UnwindBlock =
- BasicBlock::Create(F.getContext(), "unwindbb", &F);
- Unwinds.push_back(new UnwindInst(F.getContext(), UnwindBlock));
-
- Value *DispatchLoad = new LoadInst(CallSite, "invoke.num", true,
- DispatchBlock);
- SwitchInst *DispatchSwitch =
- SwitchInst::Create(DispatchLoad, UnwindBlock, Invokes.size(),
- DispatchBlock);
- // Split the entry block to insert the conditional branch for the setjmp.
- BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
- "eh.sjlj.setjmp.cont");
-
- // Populate the Function Context
- // 1. LSDA address
- // 2. Personality function address
- // 3. jmpbuf (save SP, FP and call eh.sjlj.setjmp)
-
- // LSDA address
- Idxs[0] = Zero;
- Idxs[1] = ConstantInt::get(Int32Ty, 4);
- Value *LSDAFieldPtr =
- GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
- "lsda_gep",
- EntryBB->getTerminator());
- Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr",
- EntryBB->getTerminator());
- new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator());
-
- Idxs[1] = ConstantInt::get(Int32Ty, 3);
- Value *PersonalityFieldPtr =
- GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
- "lsda_gep",
+ // The entry block changes to have the eh.sjlj.setjmp, with a conditional
+ // branch to a dispatch block for non-zero returns. If we return normally,
+ // we're not handling an exception and just register the function context and
+ // continue.
+
+ // Create the dispatch block. The dispatch block is basically a big switch
+ // statement that goes to all of the invoke landing pads.
+ BasicBlock *DispatchBlock =
+ BasicBlock::Create(F.getContext(), "eh.sjlj.setjmp.catch", &F);
+
+ // Add a call to dispatch_setup at the start of the dispatch block. This is
+ // expanded to any target-specific setup that needs to be done.
+ Value *SetupArg =
+ CastInst::Create(Instruction::BitCast, FunctionContext,
+ Type::getInt8PtrTy(F.getContext()), "",
+ DispatchBlock);
+ CallInst::Create(DispatchSetupFn, SetupArg, "", DispatchBlock);
+
+ // Insert a load of the callsite in the dispatch block, and a switch on its
+ // value. By default, we go to a block that just does an unwind (which is the
+ // correct action for a standard call).
+ BasicBlock *UnwindBlock =
+ BasicBlock::Create(F.getContext(), "unwindbb", &F);
+ Unwinds.push_back(new UnwindInst(F.getContext(), UnwindBlock));
+
+ Value *DispatchLoad = new LoadInst(CallSite, "invoke.num", true,
+ DispatchBlock);
+ SwitchInst *DispatchSwitch =
+ SwitchInst::Create(DispatchLoad, UnwindBlock, Invokes.size(),
+ DispatchBlock);
+ // Split the entry block to insert the conditional branch for the setjmp.
+ BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
+ "eh.sjlj.setjmp.cont");
+
+ // Populate the Function Context
+ // 1. LSDA address
+ // 2. Personality function address
+ // 3. jmpbuf (save SP, FP and call eh.sjlj.setjmp)
+
+ // LSDA address
+ Idxs[0] = Zero;
+ Idxs[1] = ConstantInt::get(Int32Ty, 4);
+ Value *LSDAFieldPtr =
+ GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
+ "lsda_gep",
+ EntryBB->getTerminator());
+ Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr",
+ EntryBB->getTerminator());
+ new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator());
+
+ Idxs[1] = ConstantInt::get(Int32Ty, 3);
+ Value *PersonalityFieldPtr =
+ GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
+ "lsda_gep",
+ EntryBB->getTerminator());
+ new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
+ EntryBB->getTerminator());
+
+ // Save the frame pointer.
+ Idxs[1] = ConstantInt::get(Int32Ty, 5);
+ Value *JBufPtr
+ = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
+ "jbuf_gep",
EntryBB->getTerminator());
- new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
- EntryBB->getTerminator());
-
- // Save the frame pointer.
- Idxs[1] = ConstantInt::get(Int32Ty, 5);
- Value *JBufPtr
- = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
- "jbuf_gep",
- EntryBB->getTerminator());
- Idxs[1] = ConstantInt::get(Int32Ty, 0);
- Value *FramePtr =
- GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_fp_gep",
+ Idxs[1] = ConstantInt::get(Int32Ty, 0);
+ Value *FramePtr =
+ GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_fp_gep",
+ EntryBB->getTerminator());
+
+ Value *Val = CallInst::Create(FrameAddrFn,
+ ConstantInt::get(Int32Ty, 0),
+ "fp",
EntryBB->getTerminator());
+ new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());
+
+ // Save the stack pointer.
+ Idxs[1] = ConstantInt::get(Int32Ty, 2);
+ Value *StackPtr =
+ GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_sp_gep",
+ EntryBB->getTerminator());
+
+ Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
+ new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());
+
+ // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
+ Value *SetjmpArg =
+ CastInst::Create(Instruction::BitCast, JBufPtr,
+ Type::getInt8PtrTy(F.getContext()), "",
+ EntryBB->getTerminator());
+ Value *DispatchVal = CallInst::Create(BuiltinSetjmpFn, SetjmpArg,
+ "dispatch",
+ EntryBB->getTerminator());
+ // check the return value of the setjmp. non-zero goes to dispatcher.
+ Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
+ ICmpInst::ICMP_EQ, DispatchVal, Zero,
+ "notunwind");
+ // Nuke the uncond branch.
+ EntryBB->getTerminator()->eraseFromParent();
+
+ // Put in a new condbranch in its place.
+ BranchInst::Create(ContBlock, DispatchBlock, IsNormal, EntryBB);
+
+ // Register the function context and make sure it's known to not throw
+ CallInst *Register =
+ CallInst::Create(RegisterFn, FunctionContext, "",
+ ContBlock->getTerminator());
+ Register->setDoesNotThrow();
+
+ // At this point, we are all set up, update the invoke instructions to mark
+ // their call_site values, and fill in the dispatch switch accordingly.
+ for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
+ markInvokeCallSite(Invokes[i], i+1, CallSite, DispatchSwitch);
+
+ // Mark call instructions that aren't nounwind as no-action (call_site ==
+ // -1). Skip the entry block, as prior to then, no function context has been
+ // created for this function and any unexpected exceptions thrown will go
+ // directly to the caller's context, which is what we want anyway, so no need
+ // to do anything here.
+ for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
+ for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
+ if (CallInst *CI = dyn_cast<CallInst>(I)) {
+ // Ignore calls to the EH builtins (eh.selector, eh.exception)
+ Constant *Callee = CI->getCalledFunction();
+ if (Callee != SelectorFn && Callee != ExceptionFn
+ && !CI->doesNotThrow())
+ insertCallSiteStore(CI, -1, CallSite);
+ }
+ }
- Value *Val = CallInst::Create(FrameAddrFn,
- ConstantInt::get(Int32Ty, 0),
- "fp",
- EntryBB->getTerminator());
- new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());
-
- // Save the stack pointer.
- Idxs[1] = ConstantInt::get(Int32Ty, 2);
- Value *StackPtr =
- GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_sp_gep",
- EntryBB->getTerminator());
-
- Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
- new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());
-
- // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
- Value *SetjmpArg =
- CastInst::Create(Instruction::BitCast, JBufPtr,
- Type::getInt8PtrTy(F.getContext()), "",
- EntryBB->getTerminator());
- Value *DispatchVal = CallInst::Create(BuiltinSetjmpFn, SetjmpArg,
- "dispatch",
- EntryBB->getTerminator());
- // check the return value of the setjmp. non-zero goes to dispatcher.
- Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
- ICmpInst::ICMP_EQ, DispatchVal, Zero,
- "notunwind");
- // Nuke the uncond branch.
- EntryBB->getTerminator()->eraseFromParent();
-
- // Put in a new condbranch in its place.
- BranchInst::Create(ContBlock, DispatchBlock, IsNormal, EntryBB);
-
- // Register the function context and make sure it's known to not throw
- CallInst *Register =
- CallInst::Create(RegisterFn, FunctionContext, "",
- ContBlock->getTerminator());
- Register->setDoesNotThrow();
-
- // At this point, we are all set up, update the invoke instructions
- // to mark their call_site values, and fill in the dispatch switch
- // accordingly.
- for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
- markInvokeCallSite(Invokes[i], i+1, CallSite, DispatchSwitch);
-
- // Mark call instructions that aren't nounwind as no-action
- // (call_site == -1). Skip the entry block, as prior to then, no function
- // context has been created for this function and any unexpected exceptions
- // thrown will go directly to the caller's context, which is what we want
- // anyway, so no need to do anything here.
- for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
- for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
- if (CallInst *CI = dyn_cast<CallInst>(I)) {
- // Ignore calls to the EH builtins (eh.selector, eh.exception)
- Constant *Callee = CI->getCalledFunction();
- if (Callee != SelectorFn && Callee != ExceptionFn
- && !CI->doesNotThrow())
- insertCallSiteStore(CI, -1, CallSite);
- }
- }
-
- // Replace all unwinds with a branch to the unwind handler.
- // ??? Should this ever happen with sjlj exceptions?
- for (unsigned i = 0, e = Unwinds.size(); i != e; ++i) {
- BranchInst::Create(UnwindBlock, Unwinds[i]);
- Unwinds[i]->eraseFromParent();
- }
-
- // Following any allocas not in the entry block, update the saved SP
- // in the jmpbuf to the new value.
- for (unsigned i = 0, e = JmpbufUpdatePoints.size(); i != e; ++i) {
- Instruction *AI = JmpbufUpdatePoints[i];
- Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
- StackAddr->insertAfter(AI);
- Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
- StoreStackAddr->insertAfter(StackAddr);
- }
+ // Replace all unwinds with a branch to the unwind handler.
+ // ??? Should this ever happen with sjlj exceptions?
+ for (unsigned i = 0, e = Unwinds.size(); i != e; ++i) {
+ BranchInst::Create(UnwindBlock, Unwinds[i]);
+ Unwinds[i]->eraseFromParent();
+ }
- // Finally, for any returns from this function, if this function contains an
- // invoke, add a call to unregister the function context.
- for (unsigned i = 0, e = Returns.size(); i != e; ++i)
- CallInst::Create(UnregisterFn, FunctionContext, "", Returns[i]);
+ // Following any allocas not in the entry block, update the saved SP in the
+ // jmpbuf to the new value.
+ for (unsigned i = 0, e = JmpbufUpdatePoints.size(); i != e; ++i) {
+ Instruction *AI = JmpbufUpdatePoints[i];
+ Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
+ StackAddr->insertAfter(AI);
+ Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
+ StoreStackAddr->insertAfter(StackAddr);
}
+ // Finally, for any returns from this function, if this function contains an
+ // invoke, add a call to unregister the function context.
+ for (unsigned i = 0, e = Returns.size(); i != e; ++i)
+ CallInst::Create(UnregisterFn, FunctionContext, "", Returns[i]);
+
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
index 1bc148f..6e3fa90 100644
--- a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
+++ b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
@@ -41,7 +41,7 @@ namespace {
char SlotIndexes::ID = 0;
INITIALIZE_PASS(SlotIndexes, "slotindexes",
- "Slot index numbering", false, false);
+ "Slot index numbering", false, false)
IndexListEntry* IndexListEntry::getEmptyKeyEntry() {
return &*IndexListEntryEmptyKey;
@@ -61,7 +61,6 @@ void SlotIndexes::releaseMemory() {
mi2iMap.clear();
mbb2IdxMap.clear();
idx2MBBMap.clear();
- terminatorGaps.clear();
clearList();
}
@@ -112,13 +111,6 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
if (mi->isDebugValue())
continue;
- if (miItr == mbb->getFirstTerminator()) {
- push_back(createEntry(0, index));
- terminatorGaps.insert(
- std::make_pair(mbb, SlotIndex(back(), SlotIndex::PHI_BIT)));
- index += SlotIndex::NUM;
- }
-
// Insert a store index for the instr.
push_back(createEntry(mi, index));
@@ -135,15 +127,12 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
index += (Slots + 1) * SlotIndex::NUM;
}
- if (mbb->getFirstTerminator() == mbb->end()) {
- push_back(createEntry(0, index));
- terminatorGaps.insert(
- std::make_pair(mbb, SlotIndex(back(), SlotIndex::PHI_BIT)));
- index += SlotIndex::NUM;
- }
+ // We insert two blank instructions between basic blocks.
+ // One to represent live-out registers and one to represent live-ins.
+ push_back(createEntry(0, index));
+ index += SlotIndex::NUM;
- // One blank instruction at the end.
- push_back(createEntry(0, index));
+ push_back(createEntry(0, index));
SlotIndex blockEndIndex(back(), SlotIndex::LOAD);
mbb2IdxMap.insert(
@@ -169,6 +158,7 @@ void SlotIndexes::renumberIndexes() {
// resulting numbering will match what would have been generated by the
// pass during the initial numbering of the function if the new instructions
// had been present.
+ DEBUG(dbgs() << "\n*** Renumbering SlotIndexes ***\n");
functionSize = 0;
unsigned index = 0;
@@ -179,7 +169,7 @@ void SlotIndexes::renumberIndexes() {
curEntry->setIndex(index);
if (curEntry->getInstr() == 0) {
- // MBB start entry or terminator gap. Just step index by 1.
+ // MBB start entry. Just step index by 1.
index += SlotIndex::NUM;
}
else {
@@ -214,11 +204,10 @@ void SlotIndexes::dump() const {
// Print a SlotIndex to a raw_ostream.
void SlotIndex::print(raw_ostream &os) const {
- os << entry().getIndex();
- if (isPHI())
- os << "*";
+ if (isValid())
+ os << entry().getIndex() << "LudS"[getSlot()];
else
- os << "LudS"[getSlot()];
+ os << "invalid";
}
// Dump a SlotIndex to stderr.
diff --git a/contrib/llvm/lib/CodeGen/SpillPlacement.cpp b/contrib/llvm/lib/CodeGen/SpillPlacement.cpp
new file mode 100644
index 0000000..9c0bf16
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/SpillPlacement.cpp
@@ -0,0 +1,330 @@
+//===-- SpillPlacement.cpp - Optimal Spill Code Placement -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the spill code placement analysis.
+//
+// Each edge bundle corresponds to a node in a Hopfield network. Constraints on
+// basic blocks are weighted by the block frequency and added to become the node
+// bias.
+//
+// Transparent basic blocks have the variable live through, but don't care if it
+// is spilled or in a register. These blocks become connections in the Hopfield
+// network, again weighted by block frequency.
+//
+// The Hopfield network minimizes (possibly locally) its energy function:
+//
+// E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
+//
+// The energy function represents the expected spill code execution frequency,
+// or the cost of spilling. This is a Lyapunov function which never increases
+// when a node is updated. It is guaranteed to converge to a local minimum.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "spillplacement"
+#include "SpillPlacement.h"
+#include "llvm/CodeGen/EdgeBundles.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+
+using namespace llvm;
+
+char SpillPlacement::ID = 0;
+INITIALIZE_PASS_BEGIN(SpillPlacement, "spill-code-placement",
+ "Spill Code Placement Analysis", true, true)
+INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(SpillPlacement, "spill-code-placement",
+ "Spill Code Placement Analysis", true, true)
+
+char &llvm::SpillPlacementID = SpillPlacement::ID;
+
+void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<EdgeBundles>();
+ AU.addRequiredTransitive<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+/// Node - Each edge bundle corresponds to a Hopfield node.
+///
+/// The node contains precomputed frequency data that only depends on the CFG,
+/// but Bias and Links are computed each time placeSpills is called.
+///
+/// The node Value is positive when the variable should be in a register. The
+/// value can change when linked nodes change, but convergence is very fast
+/// because all weights are positive.
+///
+struct SpillPlacement::Node {
+ /// Frequency - Total block frequency feeding into[0] or out of[1] the bundle.
+ /// Ideally, these two numbers should be identical, but inaccuracies in the
+ /// block frequency estimates means that we need to normalize ingoing and
+ /// outgoing frequencies separately so they are commensurate.
+ float Frequency[2];
+
+ /// Bias - Normalized contributions from non-transparent blocks.
+ /// A bundle connected to a MustSpill block has a huge negative bias,
+ /// otherwise it is a number in the range [-2;2].
+ float Bias;
+
+ /// Value - Output value of this node computed from the Bias and links.
+ /// This is always in the range [-1;1]. A positive number means the variable
+ /// should go in a register through this bundle.
+ float Value;
+
+ typedef SmallVector<std::pair<float, unsigned>, 4> LinkVector;
+
+ /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
+ /// bundles. The weights are all positive and add up to at most 2, weights
+ /// from ingoing and outgoing nodes separately add up to a most 1. The weight
+ /// sum can be less than 2 when the variable is not live into / out of some
+ /// connected basic blocks.
+ LinkVector Links;
+
+ /// preferReg - Return true when this node prefers to be in a register.
+ bool preferReg() const {
+ // Undecided nodes (Value==0) go on the stack.
+ return Value > 0;
+ }
+
+ /// mustSpill - Return True if this node is so biased that it must spill.
+ bool mustSpill() const {
+ // Actually, we must spill if Bias < sum(weights).
+ // It may be worth it to compute the weight sum here?
+ return Bias < -2.0f;
+ }
+
+ /// Node - Create a blank Node.
+ Node() {
+ Frequency[0] = Frequency[1] = 0;
+ }
+
+ /// clear - Reset per-query data, but preserve frequencies that only depend on
+ // the CFG.
+ void clear() {
+ Bias = Value = 0;
+ Links.clear();
+ }
+
+ /// addLink - Add a link to bundle b with weight w.
+ /// out=0 for an ingoing link, and 1 for an outgoing link.
+ void addLink(unsigned b, float w, bool out) {
+ // Normalize w relative to all connected blocks from that direction.
+ w /= Frequency[out];
+
+ // There can be multiple links to the same bundle, add them up.
+ for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
+ if (I->second == b) {
+ I->first += w;
+ return;
+ }
+ // This must be the first link to b.
+ Links.push_back(std::make_pair(w, b));
+ }
+
+ /// addBias - Bias this node from an ingoing[0] or outgoing[1] link.
+ void addBias(float w, bool out) {
+ // Normalize w relative to all connected blocks from that direction.
+ w /= Frequency[out];
+ Bias += w;
+ }
+
+ /// update - Recompute Value from Bias and Links. Return true when node
+ /// preference changes.
+ bool update(const Node nodes[]) {
+ // Compute the weighted sum of inputs.
+ float Sum = Bias;
+ for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
+ Sum += I->first * nodes[I->second].Value;
+
+ // The weighted sum is going to be in the range [-2;2]. Ideally, we should
+ // simply set Value = sign(Sum), but we will add a dead zone around 0 for
+ // two reasons:
+ // 1. It avoids arbitrary bias when all links are 0 as is possible during
+ // initial iterations.
+ // 2. It helps tame rounding errors when the links nominally sum to 0.
+ const float Thres = 1e-4f;
+ bool Before = preferReg();
+ if (Sum < -Thres)
+ Value = -1;
+ else if (Sum > Thres)
+ Value = 1;
+ else
+ Value = 0;
+ return Before != preferReg();
+ }
+};
+
+bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
+ MF = &mf;
+ bundles = &getAnalysis<EdgeBundles>();
+ loops = &getAnalysis<MachineLoopInfo>();
+
+ assert(!nodes && "Leaking node array");
+ nodes = new Node[bundles->getNumBundles()];
+
+ // Compute total ingoing and outgoing block frequencies for all bundles.
+ for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) {
+ float Freq = getBlockFrequency(I);
+ unsigned Num = I->getNumber();
+ nodes[bundles->getBundle(Num, 1)].Frequency[0] += Freq;
+ nodes[bundles->getBundle(Num, 0)].Frequency[1] += Freq;
+ }
+
+ // We never change the function.
+ return false;
+}
+
+void SpillPlacement::releaseMemory() {
+ delete[] nodes;
+ nodes = 0;
+}
+
+/// activate - mark node n as active if it wasn't already.
+void SpillPlacement::activate(unsigned n) {
+ if (ActiveNodes->test(n))
+ return;
+ ActiveNodes->set(n);
+ nodes[n].clear();
+}
+
+
+/// prepareNodes - Compute node biases and weights from a set of constraints.
+/// Set a bit in NodeMask for each active node.
+void SpillPlacement::
+prepareNodes(const SmallVectorImpl<BlockConstraint> &LiveBlocks) {
+ for (SmallVectorImpl<BlockConstraint>::const_iterator I = LiveBlocks.begin(),
+ E = LiveBlocks.end(); I != E; ++I) {
+ MachineBasicBlock *MBB = MF->getBlockNumbered(I->Number);
+ float Freq = getBlockFrequency(MBB);
+
+ // Is this a transparent block? Link ingoing and outgoing bundles.
+ if (I->Entry == DontCare && I->Exit == DontCare) {
+ unsigned ib = bundles->getBundle(I->Number, 0);
+ unsigned ob = bundles->getBundle(I->Number, 1);
+
+ // Ignore self-loops.
+ if (ib == ob)
+ continue;
+ activate(ib);
+ activate(ob);
+ nodes[ib].addLink(ob, Freq, 1);
+ nodes[ob].addLink(ib, Freq, 0);
+ continue;
+ }
+
+ // This block is not transparent, but it can still add bias.
+ const float Bias[] = {
+ 0, // DontCare,
+ 1, // PrefReg,
+ -1, // PrefSpill
+ -HUGE_VALF // MustSpill
+ };
+
+ // Live-in to block?
+ if (I->Entry != DontCare) {
+ unsigned ib = bundles->getBundle(I->Number, 0);
+ activate(ib);
+ nodes[ib].addBias(Freq * Bias[I->Entry], 1);
+ }
+
+ // Live-out from block?
+ if (I->Exit != DontCare) {
+ unsigned ob = bundles->getBundle(I->Number, 1);
+ activate(ob);
+ nodes[ob].addBias(Freq * Bias[I->Exit], 0);
+ }
+ }
+}
+
+/// iterate - Repeatedly update the Hopfield nodes until stability or the
+/// maximum number of iterations is reached.
+/// @param Linked - Numbers of linked nodes that need updating.
+void SpillPlacement::iterate(const SmallVectorImpl<unsigned> &Linked) {
+ if (Linked.empty())
+ return;
+
+ // Run up to 10 iterations. The edge bundle numbering is closely related to
+ // basic block numbering, so there is a strong tendency towards chains of
+ // linked nodes with sequential numbers. By scanning the linked nodes
+ // backwards and forwards, we make it very likely that a single node can
+ // affect the entire network in a single iteration. That means very fast
+ // convergence, usually in a single iteration.
+ for (unsigned iteration = 0; iteration != 10; ++iteration) {
+ // Scan backwards, skipping the last node which was just updated.
+ bool Changed = false;
+ for (SmallVectorImpl<unsigned>::const_reverse_iterator I =
+ llvm::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) {
+ unsigned n = *I;
+ bool C = nodes[n].update(nodes);
+ Changed |= C;
+ }
+ if (!Changed)
+ return;
+
+ // Scan forwards, skipping the first node which was just updated.
+ Changed = false;
+ for (SmallVectorImpl<unsigned>::const_iterator I =
+ llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
+ unsigned n = *I;
+ bool C = nodes[n].update(nodes);
+ Changed |= C;
+ }
+ if (!Changed)
+ return;
+ }
+}
+
+bool
+SpillPlacement::placeSpills(const SmallVectorImpl<BlockConstraint> &LiveBlocks,
+ BitVector &RegBundles) {
+ // Reuse RegBundles as our ActiveNodes vector.
+ ActiveNodes = &RegBundles;
+ ActiveNodes->clear();
+ ActiveNodes->resize(bundles->getNumBundles());
+
+ // Compute active nodes, links and biases.
+ prepareNodes(LiveBlocks);
+
+ // Update all active nodes, and find the ones that are actually linked to
+ // something so their value may change when iterating.
+ SmallVector<unsigned, 8> Linked;
+ for (int n = RegBundles.find_first(); n>=0; n = RegBundles.find_next(n)) {
+ nodes[n].update(nodes);
+ // A node that must spill, or a node without any links is not going to
+ // change its value ever again, so exclude it from iterations.
+ if (!nodes[n].Links.empty() && !nodes[n].mustSpill())
+ Linked.push_back(n);
+ }
+
+ // Iterate the network to convergence.
+ iterate(Linked);
+
+ // Write preferences back to RegBundles.
+ bool Perfect = true;
+ for (int n = RegBundles.find_first(); n>=0; n = RegBundles.find_next(n))
+ if (!nodes[n].preferReg()) {
+ RegBundles.reset(n);
+ Perfect = false;
+ }
+ return Perfect;
+}
+
+/// getBlockFrequency - Return our best estimate of the block frequency which is
+/// the expected number of block executions per function invocation.
+float SpillPlacement::getBlockFrequency(const MachineBasicBlock *MBB) {
+ // Use the unnormalized spill weight for real block frequencies.
+ return LiveIntervals::getSpillWeight(true, false, loops->getLoopDepth(MBB));
+}
+
diff --git a/contrib/llvm/lib/CodeGen/SpillPlacement.h b/contrib/llvm/lib/CodeGen/SpillPlacement.h
new file mode 100644
index 0000000..ef2d516
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/SpillPlacement.h
@@ -0,0 +1,108 @@
+//===-- SpillPlacement.h - Optimal Spill Code Placement --------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This analysis computes the optimal spill code placement between basic blocks.
+//
+// The runOnMachineFunction() method only precomputes some profiling information
+// about the CFG. The real work is done by placeSpills() which is called by the
+// register allocator.
+//
+// Given a variable that is live across multiple basic blocks, and given
+// constraints on the basic blocks where the variable is live, determine which
+// edge bundles should have the variable in a register and which edge bundles
+// should have the variable in a stack slot.
+//
+// The returned bit vector can be used to place optimal spill code at basic
+// block entries and exits. Spill code placement inside a basic block is not
+// considered.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SPILLPLACEMENT_H
+#define LLVM_CODEGEN_SPILLPLACEMENT_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class BitVector;
+class EdgeBundles;
+class MachineBasicBlock;
+class MachineLoopInfo;
+template <typename> class SmallVectorImpl;
+
+class SpillPlacement : public MachineFunctionPass {
+ struct Node;
+ const MachineFunction *MF;
+ const EdgeBundles *bundles;
+ const MachineLoopInfo *loops;
+ Node *nodes;
+
+ // Nodes that are active in the current computation. Owned by the placeSpills
+ // caller.
+ BitVector *ActiveNodes;
+
+public:
+ static char ID; // Pass identification, replacement for typeid.
+
+ SpillPlacement() : MachineFunctionPass(ID), nodes(0) {}
+ ~SpillPlacement() { releaseMemory(); }
+
+ /// BorderConstraint - A basic block has separate constraints for entry and
+ /// exit.
+ enum BorderConstraint {
+ DontCare, ///< Block doesn't care / variable not live.
+ PrefReg, ///< Block entry/exit prefers a register.
+ PrefSpill, ///< Block entry/exit prefers a stack slot.
+ MustSpill ///< A register is impossible, variable must be spilled.
+ };
+
+ /// BlockConstraint - Entry and exit constraints for a basic block.
+ struct BlockConstraint {
+ unsigned Number; ///< Basic block number (from MBB::getNumber()).
+ BorderConstraint Entry : 8; ///< Constraint on block entry.
+ BorderConstraint Exit : 8; ///< Constraint on block exit.
+ };
+
+ /// placeSpills - Compute the optimal spill code placement given the
+ /// constraints. No MustSpill constraints will be violated, and the smallest
+ /// possible number of PrefX constraints will be violated, weighted by
+ /// expected execution frequencies.
+ /// @param LiveBlocks Constraints for blocks that have the variable live in or
+ /// live out. DontCare/DontCare means the variable is live
+ /// through the block. DontCare/X means the variable is live
+ /// out, but not live in.
+ /// @param RegBundles Bit vector to receive the edge bundles where the
+ /// variable should be kept in a register. Each bit
+ /// corresponds to an edge bundle, a set bit means the
+ /// variable should be kept in a register through the
+ /// bundle. A clear bit means the variable should be
+ /// spilled.
+ /// @return True if a perfect solution was found, allowing the variable to be
+ /// in a register through all relevant bundles.
+ bool placeSpills(const SmallVectorImpl<BlockConstraint> &LiveBlocks,
+ BitVector &RegBundles);
+
+ /// getBlockFrequency - Return the estimated block execution frequency per
+ /// function invocation.
+ float getBlockFrequency(const MachineBasicBlock*);
+
+private:
+ virtual bool runOnMachineFunction(MachineFunction&);
+ virtual void getAnalysisUsage(AnalysisUsage&) const;
+ virtual void releaseMemory();
+
+ void activate(unsigned);
+ void prepareNodes(const SmallVectorImpl<BlockConstraint>&);
+ void iterate(const SmallVectorImpl<unsigned>&);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/CodeGen/Spiller.cpp b/contrib/llvm/lib/CodeGen/Spiller.cpp
index 59d5ab3..fd38582 100644
--- a/contrib/llvm/lib/CodeGen/Spiller.cpp
+++ b/contrib/llvm/lib/CodeGen/Spiller.cpp
@@ -12,6 +12,7 @@
#include "Spiller.h"
#include "VirtRegMap.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -28,7 +29,7 @@
using namespace llvm;
namespace {
- enum SpillerName { trivial, standard, splitting, inline_ };
+ enum SpillerName { trivial, standard, inline_ };
}
static cl::opt<SpillerName>
@@ -37,7 +38,6 @@ spillerOpt("spiller",
cl::Prefix,
cl::values(clEnumVal(trivial, "trivial spiller"),
clEnumVal(standard, "default spiller"),
- clEnumVal(splitting, "splitting spiller"),
clEnumValN(inline_, "inline", "inline spiller"),
clEnumValEnd),
cl::init(standard));
@@ -80,7 +80,7 @@ protected:
assert(li->weight != HUGE_VALF &&
"Attempting to spill already spilled value.");
- assert(!li->isStackSlot() &&
+ assert(!TargetRegisterInfo::isStackSlot(li->reg) &&
"Trying to spill a stack slot.");
DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
@@ -144,7 +144,7 @@ protected:
vrm->addSpillSlotUse(ss, loadInstr);
SlotIndex endIndex = loadIndex.getNextIndex();
VNInfo *loadVNI =
- newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator());
+ newLI->getNextValue(loadIndex, 0, lis->getVNInfoAllocator());
newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
}
@@ -158,7 +158,7 @@ protected:
vrm->addSpillSlotUse(ss, storeInstr);
SlotIndex beginIndex = storeIndex.getPrevIndex();
VNInfo *storeVNI =
- newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator());
+ newLI->getNextValue(beginIndex, 0, lis->getVNInfoAllocator());
newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
}
@@ -182,7 +182,7 @@ public:
void spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
- SmallVectorImpl<LiveInterval*> &) {
+ const SmallVectorImpl<LiveInterval*> &) {
// Ignore spillIs - we don't use it.
trivialSpillEverywhere(li, newIntervals);
}
@@ -195,315 +195,42 @@ namespace {
/// Falls back on LiveIntervals::addIntervalsForSpills.
class StandardSpiller : public Spiller {
protected:
+ MachineFunction *mf;
LiveIntervals *lis;
+ LiveStacks *lss;
MachineLoopInfo *loopInfo;
VirtRegMap *vrm;
public:
StandardSpiller(MachineFunctionPass &pass, MachineFunction &mf,
VirtRegMap &vrm)
- : lis(&pass.getAnalysis<LiveIntervals>()),
+ : mf(&mf),
+ lis(&pass.getAnalysis<LiveIntervals>()),
+ lss(&pass.getAnalysis<LiveStacks>()),
loopInfo(pass.getAnalysisIfAvailable<MachineLoopInfo>()),
vrm(&vrm) {}
/// Falls back on LiveIntervals::addIntervalsForSpills.
void spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
- SmallVectorImpl<LiveInterval*> &spillIs) {
+ const SmallVectorImpl<LiveInterval*> &spillIs) {
std::vector<LiveInterval*> added =
lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
newIntervals.insert(newIntervals.end(), added.begin(), added.end());
- }
-};
-
-} // end anonymous namespace
-
-namespace {
-
-/// When a call to spill is placed this spiller will first try to break the
-/// interval up into its component values (one new interval per value).
-/// If this fails, or if a call is placed to spill a previously split interval
-/// then the spiller falls back on the standard spilling mechanism.
-class SplittingSpiller : public StandardSpiller {
-public:
- SplittingSpiller(MachineFunctionPass &pass, MachineFunction &mf,
- VirtRegMap &vrm)
- : StandardSpiller(pass, mf, vrm) {
- mri = &mf.getRegInfo();
- tii = mf.getTarget().getInstrInfo();
- tri = mf.getTarget().getRegisterInfo();
- }
- void spill(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &newIntervals,
- SmallVectorImpl<LiveInterval*> &spillIs) {
- if (worthTryingToSplit(li))
- tryVNISplit(li);
- else
- StandardSpiller::spill(li, newIntervals, spillIs);
+ // Update LiveStacks.
+ int SS = vrm->getStackSlot(li->reg);
+ if (SS == VirtRegMap::NO_STACK_SLOT)
+ return;
+ const TargetRegisterClass *RC = mf->getRegInfo().getRegClass(li->reg);
+ LiveInterval &SI = lss->getOrCreateInterval(SS, RC);
+ if (!SI.hasAtLeastOneValue())
+ SI.getNextValue(SlotIndex(), 0, lss->getVNInfoAllocator());
+ SI.MergeRangesInAsValue(*li, SI.getValNumInfo(0));
}
-
-private:
-
- MachineRegisterInfo *mri;
- const TargetInstrInfo *tii;
- const TargetRegisterInfo *tri;
- DenseSet<LiveInterval*> alreadySplit;
-
- bool worthTryingToSplit(LiveInterval *li) const {
- return (!alreadySplit.count(li) && li->getNumValNums() > 1);
- }
-
- /// Try to break a LiveInterval into its component values.
- std::vector<LiveInterval*> tryVNISplit(LiveInterval *li) {
-
- DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n");
-
- std::vector<LiveInterval*> added;
- SmallVector<VNInfo*, 4> vnis;
-
- std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis));
-
- for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(),
- vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
- VNInfo *vni = *vniItr;
-
- // Skip unused VNIs.
- if (vni->isUnused())
- continue;
-
- DEBUG(dbgs() << " Extracted Val #" << vni->id << " as ");
- LiveInterval *splitInterval = extractVNI(li, vni);
-
- if (splitInterval != 0) {
- DEBUG(dbgs() << *splitInterval << "\n");
- added.push_back(splitInterval);
- alreadySplit.insert(splitInterval);
- } else {
- DEBUG(dbgs() << "0\n");
- }
- }
-
- DEBUG(dbgs() << "Original LI: " << *li << "\n");
-
- // If there original interval still contains some live ranges
- // add it to added and alreadySplit.
- if (!li->empty()) {
- added.push_back(li);
- alreadySplit.insert(li);
- }
-
- return added;
- }
-
- /// Extract the given value number from the interval.
- LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
- assert(vni->isDefAccurate() || vni->isPHIDef());
-
- // Create a new vreg and live interval, copy VNI ranges over.
- const TargetRegisterClass *trc = mri->getRegClass(li->reg);
- unsigned newVReg = mri->createVirtualRegister(trc);
- vrm->grow();
- LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
- VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator());
-
- // Start by copying all live ranges in the VN to the new interval.
- for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end();
- rItr != rEnd; ++rItr) {
- if (rItr->valno == vni) {
- newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI));
- }
- }
-
- // Erase the old VNI & ranges.
- li->removeValNo(vni);
-
- // Collect all current uses of the register belonging to the given VNI.
- // We'll use this to rename the register after we've dealt with the def.
- std::set<MachineInstr*> uses;
- for (MachineRegisterInfo::use_iterator
- useItr = mri->use_begin(li->reg), useEnd = mri->use_end();
- useItr != useEnd; ++useItr) {
- uses.insert(&*useItr);
- }
-
- // Process the def instruction for this VNI.
- if (newVNI->isPHIDef()) {
- // Insert a copy at the start of the MBB. The range proceeding the
- // copy will be attached to the original LiveInterval.
- MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def);
- MachineInstr *copyMI = BuildMI(*defMBB, defMBB->begin(), DebugLoc(),
- tii->get(TargetOpcode::COPY), newVReg)
- .addReg(li->reg, RegState::Kill);
- SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
- VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB),
- 0, false, lis->getVNInfoAllocator());
- phiDefVNI->setIsPHIDef(true);
- li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
- LiveRange *oldPHIDefRange =
- newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
-
- // If the old phi def starts in the middle of the range chop it up.
- if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) {
- LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end,
- oldPHIDefRange->valno);
- oldPHIDefRange->end = lis->getMBBStartIdx(defMBB);
- newLI->addRange(oldPHIDefRange2);
- } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) {
- // Otherwise if it's at the start of the range just trim it.
- oldPHIDefRange->start = copyIdx.getDefIndex();
- } else {
- assert(false && "PHI def range doesn't cover PHI def?");
- }
-
- newVNI->def = copyIdx.getDefIndex();
- newVNI->setCopy(copyMI);
- newVNI->setIsPHIDef(false); // not a PHI def anymore.
- newVNI->setIsDefAccurate(true);
- } else {
- // non-PHI def. Rename the def. If it's two-addr that means renaming the
- // use and inserting a new copy too.
- MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def);
- // We'll rename this now, so we can remove it from uses.
- uses.erase(defInst);
- unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg);
- bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx),
- twoAddrUseIsUndef = false;
-
- for (unsigned i = 0; i < defInst->getNumOperands(); ++i) {
- MachineOperand &mo = defInst->getOperand(i);
- if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) {
- mo.setReg(newVReg);
- if (isTwoAddr && mo.isUse() && mo.isUndef())
- twoAddrUseIsUndef = true;
- }
- }
-
- SlotIndex defIdx = lis->getInstructionIndex(defInst);
- newVNI->def = defIdx.getDefIndex();
-
- if (isTwoAddr && !twoAddrUseIsUndef) {
- MachineBasicBlock *defMBB = defInst->getParent();
- MachineInstr *copyMI = BuildMI(*defMBB, defInst, DebugLoc(),
- tii->get(TargetOpcode::COPY), newVReg)
- .addReg(li->reg, RegState::Kill);
- SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
- LiveRange *origUseRange =
- li->getLiveRangeContaining(newVNI->def.getUseIndex());
- origUseRange->end = copyIdx.getDefIndex();
- VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
- true, lis->getVNInfoAllocator());
- LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
- newLI->addRange(copyRange);
- }
- }
-
- for (std::set<MachineInstr*>::iterator
- usesItr = uses.begin(), usesEnd = uses.end();
- usesItr != usesEnd; ++usesItr) {
- MachineInstr *useInst = *usesItr;
- SlotIndex useIdx = lis->getInstructionIndex(useInst);
- LiveRange *useRange =
- newLI->getLiveRangeContaining(useIdx.getUseIndex());
-
- // If this use doesn't belong to the new interval skip it.
- if (useRange == 0)
- continue;
-
- // This use doesn't belong to the VNI, skip it.
- if (useRange->valno != newVNI)
- continue;
-
- // Check if this instr is two address.
- unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg);
- bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx);
-
- // Rename uses (and defs for two-address instrs).
- for (unsigned i = 0; i < useInst->getNumOperands(); ++i) {
- MachineOperand &mo = useInst->getOperand(i);
- if (mo.isReg() && (mo.isUse() || isTwoAddress) &&
- (mo.getReg() == li->reg)) {
- mo.setReg(newVReg);
- }
- }
-
- // If this is a two address instruction we've got some extra work to do.
- if (isTwoAddress) {
- // We modified the def operand, so we need to copy back to the original
- // reg.
- MachineBasicBlock *useMBB = useInst->getParent();
- MachineBasicBlock::iterator useItr(useInst);
- MachineInstr *copyMI = BuildMI(*useMBB, llvm::next(useItr), DebugLoc(),
- tii->get(TargetOpcode::COPY), newVReg)
- .addReg(li->reg, RegState::Kill);
- SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
-
- // Change the old two-address defined range & vni to start at
- // (and be defined by) the copy.
- LiveRange *origDefRange =
- li->getLiveRangeContaining(useIdx.getDefIndex());
- origDefRange->start = copyIdx.getDefIndex();
- origDefRange->valno->def = copyIdx.getDefIndex();
- origDefRange->valno->setCopy(copyMI);
-
- // Insert a new range & vni for the two-address-to-copy value. This
- // will be attached to the new live interval.
- VNInfo *copyVNI =
- newLI->getNextValue(useIdx.getDefIndex(), 0, true,
- lis->getVNInfoAllocator());
- LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
- newLI->addRange(copyRange);
- }
- }
-
- // Iterate over any PHI kills - we'll need to insert new copies for them.
- for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end();
- LRI != LRE; ++LRI) {
- if (LRI->valno != newVNI || LRI->end.isPHI())
- continue;
- SlotIndex killIdx = LRI->end;
- MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
- MachineInstr *copyMI = BuildMI(*killMBB, killMBB->getFirstTerminator(),
- DebugLoc(), tii->get(TargetOpcode::COPY),
- li->reg)
- .addReg(newVReg, RegState::Kill);
- SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
-
- // Save the current end. We may need it to add a new range if the
- // current range runs of the end of the MBB.
- SlotIndex newKillRangeEnd = LRI->end;
- LRI->end = copyIdx.getDefIndex();
-
- if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
- assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
- "PHI kill range doesn't reach kill-block end. Not sane.");
- newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
- newKillRangeEnd, newVNI));
- }
-
- VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
- copyMI, true,
- lis->getVNInfoAllocator());
- newKillVNI->setHasPHIKill(true);
- li->addRange(LiveRange(copyIdx.getDefIndex(),
- lis->getMBBEndIdx(killMBB),
- newKillVNI));
- }
- newVNI->setHasPHIKill(false);
-
- return newLI;
- }
-
};
} // end anonymous namespace
-
-namespace llvm {
-Spiller *createInlineSpiller(MachineFunctionPass &pass,
- MachineFunction &mf,
- VirtRegMap &vrm);
-}
-
llvm::Spiller* llvm::createSpiller(MachineFunctionPass &pass,
MachineFunction &mf,
VirtRegMap &vrm) {
@@ -511,7 +238,6 @@ llvm::Spiller* llvm::createSpiller(MachineFunctionPass &pass,
default: assert(0 && "unknown spiller");
case trivial: return new TrivialSpiller(pass, mf, vrm);
case standard: return new StandardSpiller(pass, mf, vrm);
- case splitting: return new SplittingSpiller(pass, mf, vrm);
case inline_: return createInlineSpiller(pass, mf, vrm);
}
}
diff --git a/contrib/llvm/lib/CodeGen/Spiller.h b/contrib/llvm/lib/CodeGen/Spiller.h
index 59bc0ec..f017583 100644
--- a/contrib/llvm/lib/CodeGen/Spiller.h
+++ b/contrib/llvm/lib/CodeGen/Spiller.h
@@ -10,14 +10,13 @@
#ifndef LLVM_CODEGEN_SPILLER_H
#define LLVM_CODEGEN_SPILLER_H
-#include "llvm/ADT/SmallVector.h"
-
namespace llvm {
class LiveInterval;
class MachineFunction;
class MachineFunctionPass;
class SlotIndex;
+ template <typename T> class SmallVectorImpl;
class VirtRegMap;
/// Spiller interface.
@@ -37,7 +36,7 @@ namespace llvm {
/// @param newIntervals The newly created intervals will be appended here.
virtual void spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
- SmallVectorImpl<LiveInterval*> &spillIs) = 0;
+ const SmallVectorImpl<LiveInterval*> &spillIs) = 0;
};
@@ -45,6 +44,13 @@ namespace llvm {
Spiller* createSpiller(MachineFunctionPass &pass,
MachineFunction &mf,
VirtRegMap &vrm);
+
+ /// Create and return a spiller that will insert spill code directly instead
+ /// of deferring though VirtRegMap.
+ Spiller *createInlineSpiller(MachineFunctionPass &pass,
+ MachineFunction &mf,
+ VirtRegMap &vrm);
+
}
#endif
diff --git a/contrib/llvm/lib/CodeGen/SplitKit.cpp b/contrib/llvm/lib/CodeGen/SplitKit.cpp
index 29474f0..5663936 100644
--- a/contrib/llvm/lib/CodeGen/SplitKit.cpp
+++ b/contrib/llvm/lib/CodeGen/SplitKit.cpp
@@ -12,13 +12,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "splitter"
+#define DEBUG_TYPE "regalloc"
#include "SplitKit.h"
+#include "LiveRangeEdit.h"
#include "VirtRegMap.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -36,371 +37,231 @@ AllowSplit("spiller-splits-edges",
// Split Analysis
//===----------------------------------------------------------------------===//
-SplitAnalysis::SplitAnalysis(const MachineFunction &mf,
+SplitAnalysis::SplitAnalysis(const VirtRegMap &vrm,
const LiveIntervals &lis,
const MachineLoopInfo &mli)
- : mf_(mf),
- lis_(lis),
- loops_(mli),
- tii_(*mf.getTarget().getInstrInfo()),
- curli_(0) {}
+ : MF(vrm.getMachineFunction()),
+ VRM(vrm),
+ LIS(lis),
+ Loops(mli),
+ TII(*MF.getTarget().getInstrInfo()),
+ CurLI(0) {}
void SplitAnalysis::clear() {
- usingInstrs_.clear();
- usingBlocks_.clear();
- usingLoops_.clear();
- curli_ = 0;
+ UseSlots.clear();
+ UsingInstrs.clear();
+ UsingBlocks.clear();
+ LiveBlocks.clear();
+ CurLI = 0;
}
bool SplitAnalysis::canAnalyzeBranch(const MachineBasicBlock *MBB) {
MachineBasicBlock *T, *F;
SmallVector<MachineOperand, 4> Cond;
- return !tii_.AnalyzeBranch(const_cast<MachineBasicBlock&>(*MBB), T, F, Cond);
+ return !TII.AnalyzeBranch(const_cast<MachineBasicBlock&>(*MBB), T, F, Cond);
}
-/// analyzeUses - Count instructions, basic blocks, and loops using curli.
+/// analyzeUses - Count instructions, basic blocks, and loops using CurLI.
void SplitAnalysis::analyzeUses() {
- const MachineRegisterInfo &MRI = mf_.getRegInfo();
- for (MachineRegisterInfo::reg_iterator I = MRI.reg_begin(curli_->reg);
- MachineInstr *MI = I.skipInstruction();) {
- if (MI->isDebugValue() || !usingInstrs_.insert(MI))
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ for (MachineRegisterInfo::reg_iterator I = MRI.reg_begin(CurLI->reg),
+ E = MRI.reg_end(); I != E; ++I) {
+ MachineOperand &MO = I.getOperand();
+ if (MO.isUse() && MO.isUndef())
continue;
- MachineBasicBlock *MBB = MI->getParent();
- if (usingBlocks_[MBB]++)
+ MachineInstr *MI = MO.getParent();
+ if (MI->isDebugValue() || !UsingInstrs.insert(MI))
continue;
- if (MachineLoop *Loop = loops_.getLoopFor(MBB))
- usingLoops_[Loop]++;
+ UseSlots.push_back(LIS.getInstructionIndex(MI).getDefIndex());
+ MachineBasicBlock *MBB = MI->getParent();
+ UsingBlocks[MBB]++;
}
+ array_pod_sort(UseSlots.begin(), UseSlots.end());
+ calcLiveBlockInfo();
DEBUG(dbgs() << " counted "
- << usingInstrs_.size() << " instrs, "
- << usingBlocks_.size() << " blocks, "
- << usingLoops_.size() << " loops.\n");
+ << UsingInstrs.size() << " instrs, "
+ << UsingBlocks.size() << " blocks.\n");
}
-/// removeUse - Update statistics by noting that MI no longer uses curli.
-void SplitAnalysis::removeUse(const MachineInstr *MI) {
- if (!usingInstrs_.erase(MI))
+/// calcLiveBlockInfo - Fill the LiveBlocks array with information about blocks
+/// where CurLI is live.
+void SplitAnalysis::calcLiveBlockInfo() {
+ if (CurLI->empty())
return;
- // Decrement MBB count.
- const MachineBasicBlock *MBB = MI->getParent();
- BlockCountMap::iterator bi = usingBlocks_.find(MBB);
- assert(bi != usingBlocks_.end() && "MBB missing");
- assert(bi->second && "0 count in map");
- if (--bi->second)
- return;
- // No more uses in MBB.
- usingBlocks_.erase(bi);
+ LiveInterval::const_iterator LVI = CurLI->begin();
+ LiveInterval::const_iterator LVE = CurLI->end();
+
+ SmallVectorImpl<SlotIndex>::const_iterator UseI, UseE;
+ UseI = UseSlots.begin();
+ UseE = UseSlots.end();
+
+ // Loop over basic blocks where CurLI is live.
+ MachineFunction::iterator MFI = LIS.getMBBFromIndex(LVI->start);
+ for (;;) {
+ BlockInfo BI;
+ BI.MBB = MFI;
+ SlotIndex Start, Stop;
+ tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
+
+ // The last split point is the latest possible insertion point that dominates
+ // all successor blocks. If interference reaches LastSplitPoint, it is not
+ // possible to insert a split or reload that makes CurLI live in the
+ // outgoing bundle.
+ MachineBasicBlock::iterator LSP = LIS.getLastSplitPoint(*CurLI, BI.MBB);
+ if (LSP == BI.MBB->end())
+ BI.LastSplitPoint = Stop;
+ else
+ BI.LastSplitPoint = LIS.getInstructionIndex(LSP);
+
+ // LVI is the first live segment overlapping MBB.
+ BI.LiveIn = LVI->start <= Start;
+ if (!BI.LiveIn)
+ BI.Def = LVI->start;
+
+ // Find the first and last uses in the block.
+ BI.Uses = hasUses(MFI);
+ if (BI.Uses && UseI != UseE) {
+ BI.FirstUse = *UseI;
+ assert(BI.FirstUse >= Start);
+ do ++UseI;
+ while (UseI != UseE && *UseI < Stop);
+ BI.LastUse = UseI[-1];
+ assert(BI.LastUse < Stop);
+ }
- // Decrement loop count.
- MachineLoop *Loop = loops_.getLoopFor(MBB);
- if (!Loop)
- return;
- LoopCountMap::iterator li = usingLoops_.find(Loop);
- assert(li != usingLoops_.end() && "Loop missing");
- assert(li->second && "0 count in map");
- if (--li->second)
- return;
- // No more blocks in Loop.
- usingLoops_.erase(li);
-}
+ // Look for gaps in the live range.
+ bool hasGap = false;
+ BI.LiveOut = true;
+ while (LVI->end < Stop) {
+ SlotIndex LastStop = LVI->end;
+ if (++LVI == LVE || LVI->start >= Stop) {
+ BI.Kill = LastStop;
+ BI.LiveOut = false;
+ break;
+ }
+ if (LastStop < LVI->start) {
+ hasGap = true;
+ BI.Kill = LastStop;
+ BI.Def = LVI->start;
+ }
+ }
-// Get three sets of basic blocks surrounding a loop: Blocks inside the loop,
-// predecessor blocks, and exit blocks.
-void SplitAnalysis::getLoopBlocks(const MachineLoop *Loop, LoopBlocks &Blocks) {
- Blocks.clear();
-
- // Blocks in the loop.
- Blocks.Loop.insert(Loop->block_begin(), Loop->block_end());
-
- // Predecessor blocks.
- const MachineBasicBlock *Header = Loop->getHeader();
- for (MachineBasicBlock::const_pred_iterator I = Header->pred_begin(),
- E = Header->pred_end(); I != E; ++I)
- if (!Blocks.Loop.count(*I))
- Blocks.Preds.insert(*I);
-
- // Exit blocks.
- for (MachineLoop::block_iterator I = Loop->block_begin(),
- E = Loop->block_end(); I != E; ++I) {
- const MachineBasicBlock *MBB = *I;
- for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI)
- if (!Blocks.Loop.count(*SI))
- Blocks.Exits.insert(*SI);
- }
-}
+ // Don't set LiveThrough when the block has a gap.
+ BI.LiveThrough = !hasGap && BI.LiveIn && BI.LiveOut;
+ LiveBlocks.push_back(BI);
-/// analyzeLoopPeripheralUse - Return an enum describing how curli_ is used in
-/// and around the Loop.
-SplitAnalysis::LoopPeripheralUse SplitAnalysis::
-analyzeLoopPeripheralUse(const SplitAnalysis::LoopBlocks &Blocks) {
- LoopPeripheralUse use = ContainedInLoop;
- for (BlockCountMap::iterator I = usingBlocks_.begin(), E = usingBlocks_.end();
- I != E; ++I) {
- const MachineBasicBlock *MBB = I->first;
- // Is this a peripheral block?
- if (use < MultiPeripheral &&
- (Blocks.Preds.count(MBB) || Blocks.Exits.count(MBB))) {
- if (I->second > 1) use = MultiPeripheral;
- else use = SinglePeripheral;
- continue;
- }
- // Is it a loop block?
- if (Blocks.Loop.count(MBB))
- continue;
- // It must be an unrelated block.
- return OutsideLoop;
- }
- return use;
-}
+ // LVI is now at LVE or LVI->end >= Stop.
+ if (LVI == LVE)
+ break;
-/// getCriticalExits - It may be necessary to partially break critical edges
-/// leaving the loop if an exit block has phi uses of curli. Collect the exit
-/// blocks that need special treatment into CriticalExits.
-void SplitAnalysis::getCriticalExits(const SplitAnalysis::LoopBlocks &Blocks,
- BlockPtrSet &CriticalExits) {
- CriticalExits.clear();
-
- // A critical exit block contains a phi def of curli, and has a predecessor
- // that is not in the loop nor a loop predecessor.
- // For such an exit block, the edges carrying the new variable must be moved
- // to a new pre-exit block.
- for (BlockPtrSet::iterator I = Blocks.Exits.begin(), E = Blocks.Exits.end();
- I != E; ++I) {
- const MachineBasicBlock *Succ = *I;
- SlotIndex SuccIdx = lis_.getMBBStartIdx(Succ);
- VNInfo *SuccVNI = curli_->getVNInfoAt(SuccIdx);
- // This exit may not have curli live in at all. No need to split.
- if (!SuccVNI)
- continue;
- // If this is not a PHI def, it is either using a value from before the
- // loop, or a value defined inside the loop. Both are safe.
- if (!SuccVNI->isPHIDef() || SuccVNI->def.getBaseIndex() != SuccIdx)
- continue;
- // This exit block does have a PHI. Does it also have a predecessor that is
- // not a loop block or loop predecessor?
- for (MachineBasicBlock::const_pred_iterator PI = Succ->pred_begin(),
- PE = Succ->pred_end(); PI != PE; ++PI) {
- const MachineBasicBlock *Pred = *PI;
- if (Blocks.Loop.count(Pred) || Blocks.Preds.count(Pred))
- continue;
- // This is a critical exit block, and we need to split the exit edge.
- CriticalExits.insert(Succ);
+ // Live segment ends exactly at Stop. Move to the next segment.
+ if (LVI->end == Stop && ++LVI == LVE)
break;
- }
+
+ // Pick the next basic block.
+ if (LVI->start < Stop)
+ ++MFI;
+ else
+ MFI = LIS.getMBBFromIndex(LVI->start);
}
}
-/// canSplitCriticalExits - Return true if it is possible to insert new exit
-/// blocks before the blocks in CriticalExits.
-bool
-SplitAnalysis::canSplitCriticalExits(const SplitAnalysis::LoopBlocks &Blocks,
- BlockPtrSet &CriticalExits) {
- // If we don't allow critical edge splitting, require no critical exits.
- if (!AllowSplit)
- return CriticalExits.empty();
-
- for (BlockPtrSet::iterator I = CriticalExits.begin(), E = CriticalExits.end();
- I != E; ++I) {
- const MachineBasicBlock *Succ = *I;
- // We want to insert a new pre-exit MBB before Succ, and change all the
- // in-loop blocks to branch to the pre-exit instead of Succ.
- // Check that all the in-loop predecessors can be changed.
- for (MachineBasicBlock::const_pred_iterator PI = Succ->pred_begin(),
- PE = Succ->pred_end(); PI != PE; ++PI) {
- const MachineBasicBlock *Pred = *PI;
- // The external predecessors won't be altered.
- if (!Blocks.Loop.count(Pred) && !Blocks.Preds.count(Pred))
- continue;
- if (!canAnalyzeBranch(Pred))
- return false;
- }
-
- // If Succ's layout predecessor falls through, that too must be analyzable.
- // We need to insert the pre-exit block in the gap.
- MachineFunction::const_iterator MFI = Succ;
- if (MFI == mf_.begin())
- continue;
- if (!canAnalyzeBranch(--MFI))
- return false;
+void SplitAnalysis::print(const BlockPtrSet &B, raw_ostream &OS) const {
+ for (BlockPtrSet::const_iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ unsigned count = UsingBlocks.lookup(*I);
+ OS << " BB#" << (*I)->getNumber();
+ if (count)
+ OS << '(' << count << ')';
}
- // No problems found.
- return true;
}
void SplitAnalysis::analyze(const LiveInterval *li) {
clear();
- curli_ = li;
+ CurLI = li;
analyzeUses();
}
-const MachineLoop *SplitAnalysis::getBestSplitLoop() {
- assert(curli_ && "Call analyze() before getBestSplitLoop");
- if (usingLoops_.empty())
- return 0;
-
- LoopPtrSet Loops, SecondLoops;
- LoopBlocks Blocks;
- BlockPtrSet CriticalExits;
-
- // Find first-class and second class candidate loops.
- // We prefer to split around loops where curli is used outside the periphery.
- for (LoopCountMap::const_iterator I = usingLoops_.begin(),
- E = usingLoops_.end(); I != E; ++I) {
- const MachineLoop *Loop = I->first;
- getLoopBlocks(Loop, Blocks);
-
- // FIXME: We need an SSA updater to properly handle multiple exit blocks.
- if (Blocks.Exits.size() > 1) {
- DEBUG(dbgs() << " multiple exits from " << *Loop);
- continue;
- }
-
- LoopPtrSet *LPS = 0;
- switch(analyzeLoopPeripheralUse(Blocks)) {
- case OutsideLoop:
- LPS = &Loops;
- break;
- case MultiPeripheral:
- LPS = &SecondLoops;
- break;
- case ContainedInLoop:
- DEBUG(dbgs() << " contained in " << *Loop);
- continue;
- case SinglePeripheral:
- DEBUG(dbgs() << " single peripheral use in " << *Loop);
- continue;
- }
- // Will it be possible to split around this loop?
- getCriticalExits(Blocks, CriticalExits);
- DEBUG(dbgs() << " " << CriticalExits.size() << " critical exits from "
- << *Loop);
- if (!canSplitCriticalExits(Blocks, CriticalExits))
- continue;
- // This is a possible split.
- assert(LPS);
- LPS->insert(Loop);
- }
-
- DEBUG(dbgs() << " getBestSplitLoop found " << Loops.size() << " + "
- << SecondLoops.size() << " candidate loops.\n");
-
- // If there are no first class loops available, look at second class loops.
- if (Loops.empty())
- Loops = SecondLoops;
- if (Loops.empty())
- return 0;
+//===----------------------------------------------------------------------===//
+// LiveIntervalMap
+//===----------------------------------------------------------------------===//
- // Pick the earliest loop.
- // FIXME: Are there other heuristics to consider?
- const MachineLoop *Best = 0;
- SlotIndex BestIdx;
- for (LoopPtrSet::const_iterator I = Loops.begin(), E = Loops.end(); I != E;
- ++I) {
- SlotIndex Idx = lis_.getMBBStartIdx((*I)->getHeader());
- if (!Best || Idx < BestIdx)
- Best = *I, BestIdx = Idx;
- }
- DEBUG(dbgs() << " getBestSplitLoop found " << *Best);
- return Best;
+// Work around the fact that the std::pair constructors are broken for pointer
+// pairs in some implementations. makeVV(x, 0) works.
+static inline std::pair<const VNInfo*, VNInfo*>
+makeVV(const VNInfo *a, VNInfo *b) {
+ return std::make_pair(a, b);
}
-/// getMultiUseBlocks - if curli has more than one use in a basic block, it
-/// may be an advantage to split curli for the duration of the block.
-bool SplitAnalysis::getMultiUseBlocks(BlockPtrSet &Blocks) {
- // If curli is local to one block, there is no point to splitting it.
- if (usingBlocks_.size() <= 1)
- return false;
- // Add blocks with multiple uses.
- for (BlockCountMap::iterator I = usingBlocks_.begin(), E = usingBlocks_.end();
- I != E; ++I)
- switch (I->second) {
- case 0:
- case 1:
- continue;
- case 2: {
- // It doesn't pay to split a 2-instr block if it redefines curli.
- VNInfo *VN1 = curli_->getVNInfoAt(lis_.getMBBStartIdx(I->first));
- VNInfo *VN2 =
- curli_->getVNInfoAt(lis_.getMBBEndIdx(I->first).getPrevIndex());
- // live-in and live-out with a different value.
- if (VN1 && VN2 && VN1 != VN2)
- continue;
- } // Fall through.
- default:
- Blocks.insert(I->first);
- }
- return !Blocks.empty();
+void LiveIntervalMap::reset(LiveInterval *li) {
+ LI = li;
+ Values.clear();
+ LiveOutCache.clear();
}
-//===----------------------------------------------------------------------===//
-// LiveIntervalMap
-//===----------------------------------------------------------------------===//
+bool LiveIntervalMap::isComplexMapped(const VNInfo *ParentVNI) const {
+ ValueMap::const_iterator i = Values.find(ParentVNI);
+ return i != Values.end() && i->second == 0;
+}
-// defValue - Introduce a li_ def for ParentVNI that could be later than
+// defValue - Introduce a LI def for ParentVNI that could be later than
// ParentVNI->def.
VNInfo *LiveIntervalMap::defValue(const VNInfo *ParentVNI, SlotIndex Idx) {
+ assert(LI && "call reset first");
assert(ParentVNI && "Mapping NULL value");
assert(Idx.isValid() && "Invalid SlotIndex");
- assert(parentli_.getVNInfoAt(Idx) == ParentVNI && "Bad ParentVNI");
-
- // Is this a simple 1-1 mapping? Not likely.
- if (Idx == ParentVNI->def)
- return mapValue(ParentVNI, Idx);
-
- // This is a complex def. Mark with a NULL in valueMap.
- VNInfo *OldVNI =
- valueMap_.insert(
- ValueMap::value_type(ParentVNI, static_cast<VNInfo *>(0))).first->second;
- // The static_cast<VNInfo *> is only needed to work around a bug in an
- // old version of the C++0x standard which the following compilers
- // implemented and have yet to fix:
- //
- // Microsoft Visual Studio 2010 Version 10.0.30319.1 RTMRel
- // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.30319.01
- //
- // If/When we move to C++0x, this can be replaced by nullptr.
- (void)OldVNI;
- assert(OldVNI == 0 && "Simple/Complex values mixed");
-
- // Should we insert a minimal snippet of VNI LiveRange, or can we count on
- // callers to do that? We need it for lookups of complex values.
- VNInfo *VNI = li_.getNextValue(Idx, 0, true, lis_.getVNInfoAllocator());
+ assert(ParentLI.getVNInfoAt(Idx) == ParentVNI && "Bad ParentVNI");
+
+ // Create a new value.
+ VNInfo *VNI = LI->getNextValue(Idx, 0, LIS.getVNInfoAllocator());
+
+ // Preserve the PHIDef bit.
+ if (ParentVNI->isPHIDef() && Idx == ParentVNI->def)
+ VNI->setIsPHIDef(true);
+
+ // Use insert for lookup, so we can add missing values with a second lookup.
+ std::pair<ValueMap::iterator,bool> InsP =
+ Values.insert(makeVV(ParentVNI, Idx == ParentVNI->def ? VNI : 0));
+
+ // This is now a complex def. Mark with a NULL in valueMap.
+ if (!InsP.second)
+ InsP.first->second = 0;
+
return VNI;
}
+
// mapValue - Find the mapped value for ParentVNI at Idx.
// Potentially create phi-def values.
-VNInfo *LiveIntervalMap::mapValue(const VNInfo *ParentVNI, SlotIndex Idx) {
+VNInfo *LiveIntervalMap::mapValue(const VNInfo *ParentVNI, SlotIndex Idx,
+ bool *simple) {
+ assert(LI && "call reset first");
assert(ParentVNI && "Mapping NULL value");
assert(Idx.isValid() && "Invalid SlotIndex");
- assert(parentli_.getVNInfoAt(Idx) == ParentVNI && "Bad ParentVNI");
+ assert(ParentLI.getVNInfoAt(Idx) == ParentVNI && "Bad ParentVNI");
// Use insert for lookup, so we can add missing values with a second lookup.
std::pair<ValueMap::iterator,bool> InsP =
- valueMap_.insert(ValueMap::value_type(ParentVNI, static_cast<VNInfo *>(0)));
- // The static_cast<VNInfo *> is only needed to work around a bug in an
- // old version of the C++0x standard which the following compilers
- // implemented and have yet to fix:
- //
- // Microsoft Visual Studio 2010 Version 10.0.30319.1 RTMRel
- // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.30319.01
- //
- // If/When we move to C++0x, this can be replaced by nullptr.
+ Values.insert(makeVV(ParentVNI, 0));
// This was an unknown value. Create a simple mapping.
- if (InsP.second)
- return InsP.first->second = li_.createValueCopy(ParentVNI,
- lis_.getVNInfoAllocator());
+ if (InsP.second) {
+ if (simple) *simple = true;
+ return InsP.first->second = LI->createValueCopy(ParentVNI,
+ LIS.getVNInfoAllocator());
+ }
+
// This was a simple mapped value.
- if (InsP.first->second)
+ if (InsP.first->second) {
+ if (simple) *simple = true;
return InsP.first->second;
+ }
// This is a complex mapped value. There may be multiple defs, and we may need
// to create phi-defs.
- MachineBasicBlock *IdxMBB = lis_.getMBBFromIndex(Idx);
+ if (simple) *simple = false;
+ MachineBasicBlock *IdxMBB = LIS.getMBBFromIndex(Idx);
assert(IdxMBB && "No MBB at Idx");
// Is there a def in the same MBB we can extend?
@@ -409,157 +270,260 @@ VNInfo *LiveIntervalMap::mapValue(const VNInfo *ParentVNI, SlotIndex Idx) {
// Now for the fun part. We know that ParentVNI potentially has multiple defs,
// and we may need to create even more phi-defs to preserve VNInfo SSA form.
- // Perform a depth-first search for predecessor blocks where we know the
- // dominating VNInfo. Insert phi-def VNInfos along the path back to IdxMBB.
-
- // Track MBBs where we have created or learned the dominating value.
- // This may change during the DFS as we create new phi-defs.
- typedef DenseMap<MachineBasicBlock*, VNInfo*> MBBValueMap;
- MBBValueMap DomValue;
-
- for (idf_iterator<MachineBasicBlock*>
- IDFI = idf_begin(IdxMBB),
- IDFE = idf_end(IdxMBB); IDFI != IDFE;) {
- MachineBasicBlock *MBB = *IDFI;
- SlotIndex End = lis_.getMBBEndIdx(MBB);
-
- // We are operating on the restricted CFG where ParentVNI is live.
- if (parentli_.getVNInfoAt(End.getPrevSlot()) != ParentVNI) {
- IDFI.skipChildren();
- continue;
- }
-
- // Do we have a dominating value in this block?
- VNInfo *VNI = extendTo(MBB, End);
- if (!VNI) {
- ++IDFI;
- continue;
+ // Perform a search for all predecessor blocks where we know the dominating
+ // VNInfo. Insert phi-def VNInfos along the path back to IdxMBB.
+ DEBUG(dbgs() << "\n Reaching defs for BB#" << IdxMBB->getNumber()
+ << " at " << Idx << " in " << *LI << '\n');
+
+ // Blocks where LI should be live-in.
+ SmallVector<MachineDomTreeNode*, 16> LiveIn;
+ LiveIn.push_back(MDT[IdxMBB]);
+
+ // Using LiveOutCache as a visited set, perform a BFS for all reaching defs.
+ for (unsigned i = 0; i != LiveIn.size(); ++i) {
+ MachineBasicBlock *MBB = LiveIn[i]->getBlock();
+ for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
+ PE = MBB->pred_end(); PI != PE; ++PI) {
+ MachineBasicBlock *Pred = *PI;
+ // Is this a known live-out block?
+ std::pair<LiveOutMap::iterator,bool> LOIP =
+ LiveOutCache.insert(std::make_pair(Pred, LiveOutPair()));
+ // Yes, we have been here before.
+ if (!LOIP.second) {
+ DEBUG(if (VNInfo *VNI = LOIP.first->second.first)
+ dbgs() << " known valno #" << VNI->id
+ << " at BB#" << Pred->getNumber() << '\n');
+ continue;
+ }
+
+ // Does Pred provide a live-out value?
+ SlotIndex Last = LIS.getMBBEndIdx(Pred).getPrevSlot();
+ if (VNInfo *VNI = extendTo(Pred, Last)) {
+ MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(VNI->def);
+ DEBUG(dbgs() << " found valno #" << VNI->id
+ << " from BB#" << DefMBB->getNumber()
+ << " at BB#" << Pred->getNumber() << '\n');
+ LiveOutPair &LOP = LOIP.first->second;
+ LOP.first = VNI;
+ LOP.second = MDT[DefMBB];
+ continue;
+ }
+ // No, we need a live-in value for Pred as well
+ if (Pred != IdxMBB)
+ LiveIn.push_back(MDT[Pred]);
}
+ }
- // Yes, VNI dominates MBB. Track the path back to IdxMBB, creating phi-defs
- // as needed along the way.
- for (unsigned PI = IDFI.getPathLength()-1; PI != 0; --PI) {
- // Start from MBB's immediate successor. End at IdxMBB.
- MachineBasicBlock *Succ = IDFI.getPath(PI-1);
- std::pair<MBBValueMap::iterator, bool> InsP =
- DomValue.insert(MBBValueMap::value_type(Succ, VNI));
-
- // This is the first time we backtrack to Succ.
- if (InsP.second)
- continue;
-
- // We reached Succ again with the same VNI. Nothing is going to change.
- VNInfo *OVNI = InsP.first->second;
- if (OVNI == VNI)
- break;
+ // We may need to add phi-def values to preserve the SSA form.
+ // This is essentially the same iterative algorithm that SSAUpdater uses,
+ // except we already have a dominator tree, so we don't have to recompute it.
+ VNInfo *IdxVNI = 0;
+ unsigned Changes;
+ do {
+ Changes = 0;
+ DEBUG(dbgs() << " Iterating over " << LiveIn.size() << " blocks.\n");
+ // Propagate live-out values down the dominator tree, inserting phi-defs when
+ // necessary. Since LiveIn was created by a BFS, going backwards makes it more
+ // likely for us to visit immediate dominators before their children.
+ for (unsigned i = LiveIn.size(); i; --i) {
+ MachineDomTreeNode *Node = LiveIn[i-1];
+ MachineBasicBlock *MBB = Node->getBlock();
+ MachineDomTreeNode *IDom = Node->getIDom();
+ LiveOutPair IDomValue;
+ // We need a live-in value to a block with no immediate dominator?
+ // This is probably an unreachable block that has survived somehow.
+ bool needPHI = !IDom;
+
+ // Get the IDom live-out value.
+ if (!needPHI) {
+ LiveOutMap::iterator I = LiveOutCache.find(IDom->getBlock());
+ if (I != LiveOutCache.end())
+ IDomValue = I->second;
+ else
+ // If IDom is outside our set of live-out blocks, there must be new
+ // defs, and we need a phi-def here.
+ needPHI = true;
+ }
- // Succ already has a phi-def. No need to continue.
- SlotIndex Start = lis_.getMBBStartIdx(Succ);
- if (OVNI->def == Start)
- break;
+ // IDom dominates all of our predecessors, but it may not be the immediate
+ // dominator. Check if any of them have live-out values that are properly
+ // dominated by IDom. If so, we need a phi-def here.
+ if (!needPHI) {
+ for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
+ PE = MBB->pred_end(); PI != PE; ++PI) {
+ LiveOutPair Value = LiveOutCache[*PI];
+ if (!Value.first || Value.first == IDomValue.first)
+ continue;
+ // This predecessor is carrying something other than IDomValue.
+ // It could be because IDomValue hasn't propagated yet, or it could be
+ // because MBB is in the dominance frontier of that value.
+ if (MDT.dominates(IDom, Value.second)) {
+ needPHI = true;
+ break;
+ }
+ }
+ }
- // We have a collision between the old and new VNI at Succ. That means
- // neither dominates and we need a new phi-def.
- VNI = li_.getNextValue(Start, 0, true, lis_.getVNInfoAllocator());
- VNI->setIsPHIDef(true);
- InsP.first->second = VNI;
-
- // Replace OVNI with VNI in the remaining path.
- for (; PI > 1 ; --PI) {
- MBBValueMap::iterator I = DomValue.find(IDFI.getPath(PI-2));
- if (I == DomValue.end() || I->second != OVNI)
- break;
- I->second = VNI;
+ // Create a phi-def if required.
+ if (needPHI) {
+ ++Changes;
+ SlotIndex Start = LIS.getMBBStartIdx(MBB);
+ VNInfo *VNI = LI->getNextValue(Start, 0, LIS.getVNInfoAllocator());
+ VNI->setIsPHIDef(true);
+ DEBUG(dbgs() << " - BB#" << MBB->getNumber()
+ << " phi-def #" << VNI->id << " at " << Start << '\n');
+ // We no longer need LI to be live-in.
+ LiveIn.erase(LiveIn.begin()+(i-1));
+ // Blocks in LiveIn are either IdxMBB, or have a value live-through.
+ if (MBB == IdxMBB)
+ IdxVNI = VNI;
+ // Check if we need to update live-out info.
+ LiveOutMap::iterator I = LiveOutCache.find(MBB);
+ if (I == LiveOutCache.end() || I->second.second == Node) {
+ // We already have a live-out defined in MBB, so this must be IdxMBB.
+ assert(MBB == IdxMBB && "Adding phi-def to known live-out");
+ LI->addRange(LiveRange(Start, Idx.getNextSlot(), VNI));
+ } else {
+ // This phi-def is also live-out, so color the whole block.
+ LI->addRange(LiveRange(Start, LIS.getMBBEndIdx(MBB), VNI));
+ I->second = LiveOutPair(VNI, Node);
+ }
+ } else if (IDomValue.first) {
+ // No phi-def here. Remember incoming value for IdxMBB.
+ if (MBB == IdxMBB)
+ IdxVNI = IDomValue.first;
+ // Propagate IDomValue if needed:
+ // MBB is live-out and doesn't define its own value.
+ LiveOutMap::iterator I = LiveOutCache.find(MBB);
+ if (I != LiveOutCache.end() && I->second.second != Node &&
+ I->second.first != IDomValue.first) {
+ ++Changes;
+ I->second = IDomValue;
+ DEBUG(dbgs() << " - BB#" << MBB->getNumber()
+ << " idom valno #" << IDomValue.first->id
+ << " from BB#" << IDom->getBlock()->getNumber() << '\n');
+ }
}
}
+ DEBUG(dbgs() << " - made " << Changes << " changes.\n");
+ } while (Changes);
- // No need to search the children, we found a dominating value.
- IDFI.skipChildren();
- }
+ assert(IdxVNI && "Didn't find value for Idx");
- // The search should at least find a dominating value for IdxMBB.
- assert(!DomValue.empty() && "Couldn't find a reaching definition");
+#ifndef NDEBUG
+ // Check the LiveOutCache invariants.
+ for (LiveOutMap::iterator I = LiveOutCache.begin(), E = LiveOutCache.end();
+ I != E; ++I) {
+ assert(I->first && "Null MBB entry in cache");
+ assert(I->second.first && "Null VNInfo in cache");
+ assert(I->second.second && "Null DomTreeNode in cache");
+ if (I->second.second->getBlock() == I->first)
+ continue;
+ for (MachineBasicBlock::pred_iterator PI = I->first->pred_begin(),
+ PE = I->first->pred_end(); PI != PE; ++PI)
+ assert(LiveOutCache.lookup(*PI) == I->second && "Bad invariant");
+ }
+#endif
- // Since we went through the trouble of a full DFS visiting all reaching defs,
- // the values in DomValue are now accurate. No more phi-defs are needed for
- // these blocks, so we can color the live ranges.
+ // Since we went through the trouble of a full BFS visiting all reaching defs,
+ // the values in LiveIn are now accurate. No more phi-defs are needed
+ // for these blocks, so we can color the live ranges.
// This makes the next mapValue call much faster.
- VNInfo *IdxVNI = 0;
- for (MBBValueMap::iterator I = DomValue.begin(), E = DomValue.end(); I != E;
- ++I) {
- MachineBasicBlock *MBB = I->first;
- VNInfo *VNI = I->second;
- SlotIndex Start = lis_.getMBBStartIdx(MBB);
- if (MBB == IdxMBB) {
- // Don't add full liveness to IdxMBB, stop at Idx.
- if (Start != Idx)
- li_.addRange(LiveRange(Start, Idx, VNI));
- // The caller had better add some liveness to IdxVNI, or it leaks.
- IdxVNI = VNI;
- } else
- li_.addRange(LiveRange(Start, lis_.getMBBEndIdx(MBB), VNI));
+ for (unsigned i = 0, e = LiveIn.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = LiveIn[i]->getBlock();
+ SlotIndex Start = LIS.getMBBStartIdx(MBB);
+ VNInfo *VNI = LiveOutCache.lookup(MBB).first;
+
+ // Anything in LiveIn other than IdxMBB is live-through.
+ // In IdxMBB, we should stop at Idx unless the same value is live-out.
+ if (MBB == IdxMBB && IdxVNI != VNI)
+ LI->addRange(LiveRange(Start, Idx.getNextSlot(), IdxVNI));
+ else
+ LI->addRange(LiveRange(Start, LIS.getMBBEndIdx(MBB), VNI));
}
- assert(IdxVNI && "Didn't find value for Idx");
return IdxVNI;
}
-// extendTo - Find the last li_ value defined in MBB at or before Idx. The
-// parentli_ is assumed to be live at Idx. Extend the live range to Idx.
+#ifndef NDEBUG
+void LiveIntervalMap::dumpCache() {
+ for (LiveOutMap::iterator I = LiveOutCache.begin(), E = LiveOutCache.end();
+ I != E; ++I) {
+ assert(I->first && "Null MBB entry in cache");
+ assert(I->second.first && "Null VNInfo in cache");
+ assert(I->second.second && "Null DomTreeNode in cache");
+ dbgs() << " cache: BB#" << I->first->getNumber()
+ << " has valno #" << I->second.first->id << " from BB#"
+ << I->second.second->getBlock()->getNumber() << ", preds";
+ for (MachineBasicBlock::pred_iterator PI = I->first->pred_begin(),
+ PE = I->first->pred_end(); PI != PE; ++PI)
+ dbgs() << " BB#" << (*PI)->getNumber();
+ dbgs() << '\n';
+ }
+ dbgs() << " cache: " << LiveOutCache.size() << " entries.\n";
+}
+#endif
+
+// extendTo - Find the last LI value defined in MBB at or before Idx. The
+// ParentLI is assumed to be live at Idx. Extend the live range to Idx.
// Return the found VNInfo, or NULL.
-VNInfo *LiveIntervalMap::extendTo(MachineBasicBlock *MBB, SlotIndex Idx) {
- LiveInterval::iterator I = std::upper_bound(li_.begin(), li_.end(), Idx);
- if (I == li_.begin())
+VNInfo *LiveIntervalMap::extendTo(const MachineBasicBlock *MBB, SlotIndex Idx) {
+ assert(LI && "call reset first");
+ LiveInterval::iterator I = std::upper_bound(LI->begin(), LI->end(), Idx);
+ if (I == LI->begin())
return 0;
--I;
- if (I->start < lis_.getMBBStartIdx(MBB))
+ if (I->end <= LIS.getMBBStartIdx(MBB))
return 0;
- if (I->end < Idx)
- I->end = Idx;
+ if (I->end <= Idx)
+ I->end = Idx.getNextSlot();
return I->valno;
}
-// addSimpleRange - Add a simple range from parentli_ to li_.
+// addSimpleRange - Add a simple range from ParentLI to LI.
// ParentVNI must be live in the [Start;End) interval.
void LiveIntervalMap::addSimpleRange(SlotIndex Start, SlotIndex End,
const VNInfo *ParentVNI) {
- VNInfo *VNI = mapValue(ParentVNI, Start);
- // A simple mappoing is easy.
- if (VNI->def == ParentVNI->def) {
- li_.addRange(LiveRange(Start, End, VNI));
+ assert(LI && "call reset first");
+ bool simple;
+ VNInfo *VNI = mapValue(ParentVNI, Start, &simple);
+ // A simple mapping is easy.
+ if (simple) {
+ LI->addRange(LiveRange(Start, End, VNI));
return;
}
// ParentVNI is a complex value. We must map per MBB.
- MachineFunction::iterator MBB = lis_.getMBBFromIndex(Start);
- MachineFunction::iterator MBBE = lis_.getMBBFromIndex(End);
+ MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start);
+ MachineFunction::iterator MBBE = LIS.getMBBFromIndex(End.getPrevSlot());
if (MBB == MBBE) {
- li_.addRange(LiveRange(Start, End, VNI));
+ LI->addRange(LiveRange(Start, End, VNI));
return;
}
// First block.
- li_.addRange(LiveRange(Start, lis_.getMBBEndIdx(MBB), VNI));
+ LI->addRange(LiveRange(Start, LIS.getMBBEndIdx(MBB), VNI));
// Run sequence of full blocks.
for (++MBB; MBB != MBBE; ++MBB) {
- Start = lis_.getMBBStartIdx(MBB);
- li_.addRange(LiveRange(Start, lis_.getMBBEndIdx(MBB),
- mapValue(ParentVNI, Start)));
+ Start = LIS.getMBBStartIdx(MBB);
+ LI->addRange(LiveRange(Start, LIS.getMBBEndIdx(MBB),
+ mapValue(ParentVNI, Start)));
}
// Final block.
- Start = lis_.getMBBStartIdx(MBB);
+ Start = LIS.getMBBStartIdx(MBB);
if (Start != End)
- li_.addRange(LiveRange(Start, End, mapValue(ParentVNI, Start)));
+ LI->addRange(LiveRange(Start, End, mapValue(ParentVNI, Start)));
}
-/// addRange - Add live ranges to li_ where [Start;End) intersects parentli_.
+/// addRange - Add live ranges to LI where [Start;End) intersects ParentLI.
/// All needed values whose def is not inside [Start;End) must be defined
/// beforehand so mapValue will work.
void LiveIntervalMap::addRange(SlotIndex Start, SlotIndex End) {
- LiveInterval::const_iterator B = parentli_.begin(), E = parentli_.end();
+ assert(LI && "call reset first");
+ LiveInterval::const_iterator B = ParentLI.begin(), E = ParentLI.end();
LiveInterval::const_iterator I = std::lower_bound(B, E, Start);
// Check if --I begins before Start and overlaps.
@@ -575,403 +539,374 @@ void LiveIntervalMap::addRange(SlotIndex Start, SlotIndex End) {
addSimpleRange(I->start, std::min(End, I->end), I->valno);
}
+
//===----------------------------------------------------------------------===//
// Split Editor
//===----------------------------------------------------------------------===//
/// Create a new SplitEditor for editing the LiveInterval analyzed by SA.
-SplitEditor::SplitEditor(SplitAnalysis &sa, LiveIntervals &lis, VirtRegMap &vrm,
- SmallVectorImpl<LiveInterval*> &intervals)
- : sa_(sa), lis_(lis), vrm_(vrm),
- mri_(vrm.getMachineFunction().getRegInfo()),
- tii_(*vrm.getMachineFunction().getTarget().getInstrInfo()),
- curli_(sa_.getCurLI()),
- dupli_(0), openli_(0),
- intervals_(intervals),
- firstInterval(intervals_.size())
+SplitEditor::SplitEditor(SplitAnalysis &sa,
+ LiveIntervals &lis,
+ VirtRegMap &vrm,
+ MachineDominatorTree &mdt,
+ LiveRangeEdit &edit)
+ : SA(sa), LIS(lis), VRM(vrm),
+ MRI(vrm.getMachineFunction().getRegInfo()),
+ MDT(mdt),
+ TII(*vrm.getMachineFunction().getTarget().getInstrInfo()),
+ TRI(*vrm.getMachineFunction().getTarget().getRegisterInfo()),
+ Edit(edit),
+ OpenIdx(0),
+ RegAssign(Allocator)
{
- assert(curli_ && "SplitEditor created from empty SplitAnalysis");
-
- // Make sure curli_ is assigned a stack slot, so all our intervals get the
- // same slot as curli_.
- if (vrm_.getStackSlot(curli_->reg) == VirtRegMap::NO_STACK_SLOT)
- vrm_.assignVirt2StackSlot(curli_->reg);
-
+ // We don't need an AliasAnalysis since we will only be performing
+ // cheap-as-a-copy remats anyway.
+ Edit.anyRematerializable(LIS, TII, 0);
}
-LiveInterval *SplitEditor::createInterval() {
- unsigned curli = sa_.getCurLI()->reg;
- unsigned Reg = mri_.createVirtualRegister(mri_.getRegClass(curli));
- LiveInterval &Intv = lis_.getOrCreateInterval(Reg);
- vrm_.grow();
- vrm_.assignVirt2StackSlot(Reg, vrm_.getStackSlot(curli));
- return &Intv;
+void SplitEditor::dump() const {
+ if (RegAssign.empty()) {
+ dbgs() << " empty\n";
+ return;
+ }
+
+ for (RegAssignMap::const_iterator I = RegAssign.begin(); I.valid(); ++I)
+ dbgs() << " [" << I.start() << ';' << I.stop() << "):" << I.value();
+ dbgs() << '\n';
}
-LiveInterval *SplitEditor::getDupLI() {
- if (!dupli_) {
- // Create an interval for dupli that is a copy of curli.
- dupli_ = createInterval();
- dupli_->Copy(*curli_, &mri_, lis_.getVNInfoAllocator());
+VNInfo *SplitEditor::defFromParent(unsigned RegIdx,
+ VNInfo *ParentVNI,
+ SlotIndex UseIdx,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) {
+ MachineInstr *CopyMI = 0;
+ SlotIndex Def;
+ LiveInterval *LI = Edit.get(RegIdx);
+
+ // Attempt cheap-as-a-copy rematerialization.
+ LiveRangeEdit::Remat RM(ParentVNI);
+ if (Edit.canRematerializeAt(RM, UseIdx, true, LIS)) {
+ Def = Edit.rematerializeAt(MBB, I, LI->reg, RM, LIS, TII, TRI);
+ } else {
+ // Can't remat, just insert a copy from parent.
+ CopyMI = BuildMI(MBB, I, DebugLoc(), TII.get(TargetOpcode::COPY), LI->reg)
+ .addReg(Edit.getReg());
+ Def = LIS.InsertMachineInstrInMaps(CopyMI).getDefIndex();
}
- return dupli_;
-}
-VNInfo *SplitEditor::mapValue(const VNInfo *curliVNI) {
- VNInfo *&VNI = valueMap_[curliVNI];
- if (!VNI)
- VNI = openli_->createValueCopy(curliVNI, lis_.getVNInfoAllocator());
- return VNI;
-}
+ // Define the value in Reg.
+ VNInfo *VNI = LIMappers[RegIdx].defValue(ParentVNI, Def);
+ VNI->setCopy(CopyMI);
-/// Insert a COPY instruction curli -> li. Allocate a new value from li
-/// defined by the COPY. Note that rewrite() will deal with the curli
-/// register, so this function can be used to copy from any interval - openli,
-/// curli, or dupli.
-VNInfo *SplitEditor::insertCopy(LiveInterval &LI,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) {
- MachineInstr *MI = BuildMI(MBB, I, DebugLoc(), tii_.get(TargetOpcode::COPY),
- LI.reg).addReg(curli_->reg);
- SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
- return LI.getNextValue(DefIdx, MI, true, lis_.getVNInfoAllocator());
+ // Add minimal liveness for the new value.
+ Edit.get(RegIdx)->addRange(LiveRange(Def, Def.getNextSlot(), VNI));
+ return VNI;
}
/// Create a new virtual register and live interval.
void SplitEditor::openIntv() {
- assert(!openli_ && "Previous LI not closed before openIntv");
- openli_ = createInterval();
- intervals_.push_back(openli_);
- liveThrough_ = false;
-}
+ assert(!OpenIdx && "Previous LI not closed before openIntv");
-/// enterIntvBefore - Enter openli before the instruction at Idx. If curli is
-/// not live before Idx, a COPY is not inserted.
-void SplitEditor::enterIntvBefore(SlotIndex Idx) {
- assert(openli_ && "openIntv not called before enterIntvBefore");
-
- // Copy from curli_ if it is live.
- if (VNInfo *CurVNI = curli_->getVNInfoAt(Idx.getUseIndex())) {
- MachineInstr *MI = lis_.getInstructionFromIndex(Idx);
- assert(MI && "enterIntvBefore called with invalid index");
- VNInfo *VNI = insertCopy(*openli_, *MI->getParent(), MI);
- openli_->addRange(LiveRange(VNI->def, Idx.getDefIndex(), VNI));
-
- // Make sure CurVNI is properly mapped.
- VNInfo *&mapVNI = valueMap_[CurVNI];
- // We dont have SSA update yet, so only one entry per value is allowed.
- assert(!mapVNI && "enterIntvBefore called more than once for the same value");
- mapVNI = VNI;
+ // Create the complement as index 0.
+ if (Edit.empty()) {
+ Edit.create(MRI, LIS, VRM);
+ LIMappers.push_back(LiveIntervalMap(LIS, MDT, Edit.getParent()));
+ LIMappers.back().reset(Edit.get(0));
}
- DEBUG(dbgs() << " enterIntvBefore " << Idx << ": " << *openli_ << '\n');
-}
-/// enterIntvAtEnd - Enter openli at the end of MBB.
-/// PhiMBB is a successor inside openli where a PHI value is created.
-/// Currently, all entries must share the same PhiMBB.
-void SplitEditor::enterIntvAtEnd(MachineBasicBlock &A, MachineBasicBlock &B) {
- assert(openli_ && "openIntv not called before enterIntvAtEnd");
-
- SlotIndex EndA = lis_.getMBBEndIdx(&A);
- VNInfo *CurVNIA = curli_->getVNInfoAt(EndA.getPrevIndex());
- if (!CurVNIA) {
- DEBUG(dbgs() << " enterIntvAtEnd, curli not live out of BB#"
- << A.getNumber() << ".\n");
- return;
- }
+ // Create the open interval.
+ OpenIdx = Edit.size();
+ Edit.create(MRI, LIS, VRM);
+ LIMappers.push_back(LiveIntervalMap(LIS, MDT, Edit.getParent()));
+ LIMappers[OpenIdx].reset(Edit.get(OpenIdx));
+}
- // Add a phi kill value and live range out of A.
- VNInfo *VNIA = insertCopy(*openli_, A, A.getFirstTerminator());
- openli_->addRange(LiveRange(VNIA->def, EndA, VNIA));
-
- // FIXME: If this is the only entry edge, we don't need the extra PHI value.
- // FIXME: If there are multiple entry blocks (so not a loop), we need proper
- // SSA update.
-
- // Now look at the start of B.
- SlotIndex StartB = lis_.getMBBStartIdx(&B);
- SlotIndex EndB = lis_.getMBBEndIdx(&B);
- const LiveRange *CurB = curli_->getLiveRangeContaining(StartB);
- if (!CurB) {
- DEBUG(dbgs() << " enterIntvAtEnd: curli not live in to BB#"
- << B.getNumber() << ".\n");
- return;
+SlotIndex SplitEditor::enterIntvBefore(SlotIndex Idx) {
+ assert(OpenIdx && "openIntv not called before enterIntvBefore");
+ DEBUG(dbgs() << " enterIntvBefore " << Idx);
+ Idx = Idx.getBaseIndex();
+ VNInfo *ParentVNI = Edit.getParent().getVNInfoAt(Idx);
+ if (!ParentVNI) {
+ DEBUG(dbgs() << ": not live\n");
+ return Idx;
}
+ DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
+ MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
+ assert(MI && "enterIntvBefore called with invalid index");
- VNInfo *VNIB = openli_->getVNInfoAt(StartB);
- if (!VNIB) {
- // Create a phi value.
- VNIB = openli_->getNextValue(SlotIndex(StartB, true), 0, false,
- lis_.getVNInfoAllocator());
- VNIB->setIsPHIDef(true);
- VNInfo *&mapVNI = valueMap_[CurB->valno];
- if (mapVNI) {
- // Multiple copies - must create PHI value.
- abort();
- } else {
- // This is the first copy of dupLR. Mark the mapping.
- mapVNI = VNIB;
- }
+ VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Idx, *MI->getParent(), MI);
+ return VNI->def;
+}
+SlotIndex SplitEditor::enterIntvAtEnd(MachineBasicBlock &MBB) {
+ assert(OpenIdx && "openIntv not called before enterIntvAtEnd");
+ SlotIndex End = LIS.getMBBEndIdx(&MBB);
+ SlotIndex Last = End.getPrevSlot();
+ DEBUG(dbgs() << " enterIntvAtEnd BB#" << MBB.getNumber() << ", " << Last);
+ VNInfo *ParentVNI = Edit.getParent().getVNInfoAt(Last);
+ if (!ParentVNI) {
+ DEBUG(dbgs() << ": not live\n");
+ return End;
}
-
- DEBUG(dbgs() << " enterIntvAtEnd: " << *openli_ << '\n');
+ DEBUG(dbgs() << ": valno " << ParentVNI->id);
+ VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Last, MBB,
+ LIS.getLastSplitPoint(Edit.getParent(), &MBB));
+ RegAssign.insert(VNI->def, End, OpenIdx);
+ DEBUG(dump());
+ return VNI->def;
}
-/// useIntv - indicate that all instructions in MBB should use openli.
+/// useIntv - indicate that all instructions in MBB should use OpenLI.
void SplitEditor::useIntv(const MachineBasicBlock &MBB) {
- useIntv(lis_.getMBBStartIdx(&MBB), lis_.getMBBEndIdx(&MBB));
+ useIntv(LIS.getMBBStartIdx(&MBB), LIS.getMBBEndIdx(&MBB));
}
void SplitEditor::useIntv(SlotIndex Start, SlotIndex End) {
- assert(openli_ && "openIntv not called before useIntv");
+ assert(OpenIdx && "openIntv not called before useIntv");
+ DEBUG(dbgs() << " useIntv [" << Start << ';' << End << "):");
+ RegAssign.insert(Start, End, OpenIdx);
+ DEBUG(dump());
+}
- // Map the curli values from the interval into openli_
- LiveInterval::const_iterator B = curli_->begin(), E = curli_->end();
- LiveInterval::const_iterator I = std::lower_bound(B, E, Start);
+SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) {
+ assert(OpenIdx && "openIntv not called before leaveIntvAfter");
+ DEBUG(dbgs() << " leaveIntvAfter " << Idx);
- if (I != B) {
- --I;
- // I begins before Start, but overlaps.
- if (I->end > Start)
- openli_->addRange(LiveRange(Start, std::min(End, I->end),
- mapValue(I->valno)));
- ++I;
+ // The interval must be live beyond the instruction at Idx.
+ Idx = Idx.getBoundaryIndex();
+ VNInfo *ParentVNI = Edit.getParent().getVNInfoAt(Idx);
+ if (!ParentVNI) {
+ DEBUG(dbgs() << ": not live\n");
+ return Idx.getNextSlot();
}
+ DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
- // The remaining ranges begin after Start.
- for (;I != E && I->start < End; ++I)
- openli_->addRange(LiveRange(I->start, std::min(End, I->end),
- mapValue(I->valno)));
- DEBUG(dbgs() << " use [" << Start << ';' << End << "): " << *openli_
- << '\n');
+ MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
+ assert(MI && "No instruction at index");
+ VNInfo *VNI = defFromParent(0, ParentVNI, Idx, *MI->getParent(),
+ llvm::next(MachineBasicBlock::iterator(MI)));
+ return VNI->def;
}
-/// leaveIntvAfter - Leave openli after the instruction at Idx.
-void SplitEditor::leaveIntvAfter(SlotIndex Idx) {
- assert(openli_ && "openIntv not called before leaveIntvAfter");
+SlotIndex SplitEditor::leaveIntvBefore(SlotIndex Idx) {
+ assert(OpenIdx && "openIntv not called before leaveIntvBefore");
+ DEBUG(dbgs() << " leaveIntvBefore " << Idx);
- const LiveRange *CurLR = curli_->getLiveRangeContaining(Idx.getDefIndex());
- if (!CurLR || CurLR->end <= Idx.getBoundaryIndex()) {
- DEBUG(dbgs() << " leaveIntvAfter " << Idx << ": not live\n");
- return;
+ // The interval must be live into the instruction at Idx.
+ Idx = Idx.getBoundaryIndex();
+ VNInfo *ParentVNI = Edit.getParent().getVNInfoAt(Idx);
+ if (!ParentVNI) {
+ DEBUG(dbgs() << ": not live\n");
+ return Idx.getNextSlot();
}
+ DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
- // Was this value of curli live through openli?
- if (!openli_->liveAt(CurLR->valno->def)) {
- DEBUG(dbgs() << " leaveIntvAfter " << Idx << ": using external value\n");
- liveThrough_ = true;
- return;
- }
-
- // We are going to insert a back copy, so we must have a dupli_.
- LiveRange *DupLR = getDupLI()->getLiveRangeContaining(Idx.getDefIndex());
- assert(DupLR && "dupli not live into black, but curli is?");
-
- // Insert the COPY instruction.
- MachineBasicBlock::iterator I = lis_.getInstructionFromIndex(Idx);
- MachineInstr *MI = BuildMI(*I->getParent(), llvm::next(I), I->getDebugLoc(),
- tii_.get(TargetOpcode::COPY), dupli_->reg)
- .addReg(openli_->reg);
- SlotIndex CopyIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
- openli_->addRange(LiveRange(Idx.getDefIndex(), CopyIdx,
- mapValue(CurLR->valno)));
- DupLR->valno->def = CopyIdx;
- DEBUG(dbgs() << " leaveIntvAfter " << Idx << ": " << *openli_ << '\n');
+ MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
+ assert(MI && "No instruction at index");
+ VNInfo *VNI = defFromParent(0, ParentVNI, Idx, *MI->getParent(), MI);
+ return VNI->def;
}
-/// leaveIntvAtTop - Leave the interval at the top of MBB.
-/// Currently, only one value can leave the interval.
-void SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
- assert(openli_ && "openIntv not called before leaveIntvAtTop");
-
- SlotIndex Start = lis_.getMBBStartIdx(&MBB);
- const LiveRange *CurLR = curli_->getLiveRangeContaining(Start);
-
- // Is curli even live-in to MBB?
- if (!CurLR) {
- DEBUG(dbgs() << " leaveIntvAtTop at " << Start << ": not live\n");
- return;
- }
-
- // Is curli defined by PHI at the beginning of MBB?
- bool isPHIDef = CurLR->valno->isPHIDef() &&
- CurLR->valno->def.getBaseIndex() == Start;
+SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
+ assert(OpenIdx && "openIntv not called before leaveIntvAtTop");
+ SlotIndex Start = LIS.getMBBStartIdx(&MBB);
+ DEBUG(dbgs() << " leaveIntvAtTop BB#" << MBB.getNumber() << ", " << Start);
- // If MBB is using a value of curli that was defined outside the openli range,
- // we don't want to copy it back here.
- if (!isPHIDef && !openli_->liveAt(CurLR->valno->def)) {
- DEBUG(dbgs() << " leaveIntvAtTop at " << Start
- << ": using external value\n");
- liveThrough_ = true;
- return;
+ VNInfo *ParentVNI = Edit.getParent().getVNInfoAt(Start);
+ if (!ParentVNI) {
+ DEBUG(dbgs() << ": not live\n");
+ return Start;
}
- // We are going to insert a back copy, so we must have a dupli_.
- LiveRange *DupLR = getDupLI()->getLiveRangeContaining(Start);
- assert(DupLR && "dupli not live into black, but curli is?");
-
- // Insert the COPY instruction.
- MachineInstr *MI = BuildMI(MBB, MBB.begin(), DebugLoc(),
- tii_.get(TargetOpcode::COPY), dupli_->reg)
- .addReg(openli_->reg);
- SlotIndex Idx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
-
- // Adjust dupli and openli values.
- if (isPHIDef) {
- // dupli was already a PHI on entry to MBB. Simply insert an openli PHI,
- // and shift the dupli def down to the COPY.
- VNInfo *VNI = openli_->getNextValue(SlotIndex(Start, true), 0, false,
- lis_.getVNInfoAllocator());
- VNI->setIsPHIDef(true);
- openli_->addRange(LiveRange(VNI->def, Idx, VNI));
-
- dupli_->removeRange(Start, Idx);
- DupLR->valno->def = Idx;
- DupLR->valno->setIsPHIDef(false);
- } else {
- // The dupli value was defined somewhere inside the openli range.
- DEBUG(dbgs() << " leaveIntvAtTop source value defined at "
- << DupLR->valno->def << "\n");
- // FIXME: We may not need a PHI here if all predecessors have the same
- // value.
- VNInfo *VNI = openli_->getNextValue(SlotIndex(Start, true), 0, false,
- lis_.getVNInfoAllocator());
- VNI->setIsPHIDef(true);
- openli_->addRange(LiveRange(VNI->def, Idx, VNI));
-
- // FIXME: What if DupLR->valno is used by multiple exits? SSA Update.
-
- // closeIntv is going to remove the superfluous live ranges.
- DupLR->valno->def = Idx;
- DupLR->valno->setIsPHIDef(false);
- }
+ VNInfo *VNI = defFromParent(0, ParentVNI, Start, MBB,
+ MBB.SkipPHIsAndLabels(MBB.begin()));
+ RegAssign.insert(Start, VNI->def, OpenIdx);
+ DEBUG(dump());
+ return VNI->def;
+}
- DEBUG(dbgs() << " leaveIntvAtTop at " << Idx << ": " << *openli_ << '\n');
+void SplitEditor::overlapIntv(SlotIndex Start, SlotIndex End) {
+ assert(OpenIdx && "openIntv not called before overlapIntv");
+ assert(Edit.getParent().getVNInfoAt(Start) ==
+ Edit.getParent().getVNInfoAt(End.getPrevSlot()) &&
+ "Parent changes value in extended range");
+ assert(Edit.get(0)->getVNInfoAt(Start) && "Start must come from leaveIntv*");
+ assert(LIS.getMBBFromIndex(Start) == LIS.getMBBFromIndex(End) &&
+ "Range cannot span basic blocks");
+
+ // Treat this as useIntv() for now. The complement interval will be extended
+ // as needed by mapValue().
+ DEBUG(dbgs() << " overlapIntv [" << Start << ';' << End << "):");
+ RegAssign.insert(Start, End, OpenIdx);
+ DEBUG(dump());
}
/// closeIntv - Indicate that we are done editing the currently open
/// LiveInterval, and ranges can be trimmed.
void SplitEditor::closeIntv() {
- assert(openli_ && "openIntv not called before closeIntv");
-
- DEBUG(dbgs() << " closeIntv cleaning up\n");
- DEBUG(dbgs() << " open " << *openli_ << '\n');
-
- if (liveThrough_) {
- DEBUG(dbgs() << " value live through region, leaving dupli as is.\n");
- } else {
- // live out with copies inserted, or killed by region. Either way we need to
- // remove the overlapping region from dupli.
- getDupLI();
- for (LiveInterval::iterator I = openli_->begin(), E = openli_->end();
- I != E; ++I) {
- dupli_->removeRange(I->start, I->end);
- }
- // FIXME: A block branching to the entry block may also branch elsewhere
- // curli is live. We need both openli and curli to be live in that case.
- DEBUG(dbgs() << " dup2 " << *dupli_ << '\n');
- }
- openli_ = 0;
- valueMap_.clear();
+ assert(OpenIdx && "openIntv not called before closeIntv");
+ OpenIdx = 0;
}
-/// rewrite - after all the new live ranges have been created, rewrite
-/// instructions using curli to use the new intervals.
-void SplitEditor::rewrite() {
- assert(!openli_ && "Previous LI not closed before rewrite");
- const LiveInterval *curli = sa_.getCurLI();
- for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(curli->reg),
- RE = mri_.reg_end(); RI != RE;) {
+/// rewriteAssigned - Rewrite all uses of Edit.getReg().
+void SplitEditor::rewriteAssigned() {
+ for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Edit.getReg()),
+ RE = MRI.reg_end(); RI != RE;) {
MachineOperand &MO = RI.getOperand();
MachineInstr *MI = MO.getParent();
++RI;
+ // LiveDebugVariables should have handled all DBG_VALUE instructions.
if (MI->isDebugValue()) {
DEBUG(dbgs() << "Zapping " << *MI);
- // FIXME: We can do much better with debug values.
MO.setReg(0);
continue;
}
- SlotIndex Idx = lis_.getInstructionIndex(MI);
- Idx = MO.isUse() ? Idx.getUseIndex() : Idx.getDefIndex();
- LiveInterval *LI = dupli_;
- for (unsigned i = firstInterval, e = intervals_.size(); i != e; ++i) {
- LiveInterval *testli = intervals_[i];
- if (testli->liveAt(Idx)) {
- LI = testli;
- break;
- }
- }
- if (LI) {
- MO.setReg(LI->reg);
- sa_.removeUse(MI);
- DEBUG(dbgs() << " rewrite " << Idx << '\t' << *MI);
- }
- }
- // dupli_ goes in last, after rewriting.
- if (dupli_) {
- if (dupli_->empty()) {
- DEBUG(dbgs() << " dupli became empty?\n");
- lis_.removeInterval(dupli_->reg);
- dupli_ = 0;
- } else {
- dupli_->RenumberValues(lis_);
- intervals_.push_back(dupli_);
+ // <undef> operands don't really read the register, so just assign them to
+ // the complement.
+ if (MO.isUse() && MO.isUndef()) {
+ MO.setReg(Edit.get(0)->reg);
+ continue;
}
+
+ SlotIndex Idx = LIS.getInstructionIndex(MI);
+ Idx = MO.isUse() ? Idx.getUseIndex() : Idx.getDefIndex();
+
+ // Rewrite to the mapped register at Idx.
+ unsigned RegIdx = RegAssign.lookup(Idx);
+ MO.setReg(Edit.get(RegIdx)->reg);
+ DEBUG(dbgs() << " rewr BB#" << MI->getParent()->getNumber() << '\t'
+ << Idx << ':' << RegIdx << '\t' << *MI);
+
+ // Extend liveness to Idx.
+ const VNInfo *ParentVNI = Edit.getParent().getVNInfoAt(Idx);
+ LIMappers[RegIdx].mapValue(ParentVNI, Idx);
}
+}
- // Calculate spill weight and allocation hints for new intervals.
- VirtRegAuxInfo vrai(vrm_.getMachineFunction(), lis_, sa_.loops_);
- for (unsigned i = firstInterval, e = intervals_.size(); i != e; ++i) {
- LiveInterval &li = *intervals_[i];
- vrai.CalculateRegClass(li.reg);
- vrai.CalculateWeightAndHint(li);
- DEBUG(dbgs() << " new interval " << mri_.getRegClass(li.reg)->getName()
- << ":" << li << '\n');
+/// rewriteSplit - Rewrite uses of Intvs[0] according to the ConEQ mapping.
+void SplitEditor::rewriteComponents(const SmallVectorImpl<LiveInterval*> &Intvs,
+ const ConnectedVNInfoEqClasses &ConEq) {
+ for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Intvs[0]->reg),
+ RE = MRI.reg_end(); RI != RE;) {
+ MachineOperand &MO = RI.getOperand();
+ MachineInstr *MI = MO.getParent();
+ ++RI;
+ if (MO.isUse() && MO.isUndef())
+ continue;
+ // DBG_VALUE instructions should have been eliminated earlier.
+ SlotIndex Idx = LIS.getInstructionIndex(MI);
+ Idx = MO.isUse() ? Idx.getUseIndex() : Idx.getDefIndex();
+ DEBUG(dbgs() << " rewr BB#" << MI->getParent()->getNumber() << '\t'
+ << Idx << ':');
+ const VNInfo *VNI = Intvs[0]->getVNInfoAt(Idx);
+ assert(VNI && "Interval not live at use.");
+ MO.setReg(Intvs[ConEq.getEqClass(VNI)]->reg);
+ DEBUG(dbgs() << VNI->id << '\t' << *MI);
}
}
+void SplitEditor::finish() {
+ assert(OpenIdx == 0 && "Previous LI not closed before rewrite");
-//===----------------------------------------------------------------------===//
-// Loop Splitting
-//===----------------------------------------------------------------------===//
+ // At this point, the live intervals in Edit contain VNInfos corresponding to
+ // the inserted copies.
-bool SplitEditor::splitAroundLoop(const MachineLoop *Loop) {
- SplitAnalysis::LoopBlocks Blocks;
- sa_.getLoopBlocks(Loop, Blocks);
+ // Add the original defs from the parent interval.
+ for (LiveInterval::const_vni_iterator I = Edit.getParent().vni_begin(),
+ E = Edit.getParent().vni_end(); I != E; ++I) {
+ const VNInfo *ParentVNI = *I;
+ if (ParentVNI->isUnused())
+ continue;
+ LiveIntervalMap &LIM = LIMappers[RegAssign.lookup(ParentVNI->def)];
+ VNInfo *VNI = LIM.defValue(ParentVNI, ParentVNI->def);
+ LIM.getLI()->addRange(LiveRange(ParentVNI->def,
+ ParentVNI->def.getNextSlot(), VNI));
+ // Mark all values as complex to force liveness computation.
+ // This should really only be necessary for remat victims, but we are lazy.
+ LIM.markComplexMapped(ParentVNI);
+ }
- // Break critical edges as needed.
- SplitAnalysis::BlockPtrSet CriticalExits;
- sa_.getCriticalExits(Blocks, CriticalExits);
- assert(CriticalExits.empty() && "Cannot break critical exits yet");
+#ifndef NDEBUG
+ // Every new interval must have a def by now, otherwise the split is bogus.
+ for (LiveRangeEdit::iterator I = Edit.begin(), E = Edit.end(); I != E; ++I)
+ assert((*I)->hasAtLeastOneValue() && "Split interval has no value");
+#endif
+
+ // FIXME: Don't recompute the liveness of all values, infer it from the
+ // overlaps between the parent live interval and RegAssign.
+ // The mapValue algorithm is only necessary when:
+ // - The parent value maps to multiple defs, and new phis are needed, or
+ // - The value has been rematerialized before some uses, and we want to
+ // minimize the live range so it only reaches the remaining uses.
+ // All other values have simple liveness that can be computed from RegAssign
+ // and the parent live interval.
+
+ // Extend live ranges to be live-out for successor PHI values.
+ for (LiveInterval::const_vni_iterator I = Edit.getParent().vni_begin(),
+ E = Edit.getParent().vni_end(); I != E; ++I) {
+ const VNInfo *PHIVNI = *I;
+ if (PHIVNI->isUnused() || !PHIVNI->isPHIDef())
+ continue;
+ unsigned RegIdx = RegAssign.lookup(PHIVNI->def);
+ LiveIntervalMap &LIM = LIMappers[RegIdx];
+ MachineBasicBlock *MBB = LIS.getMBBFromIndex(PHIVNI->def);
+ DEBUG(dbgs() << " map phi in BB#" << MBB->getNumber() << '@' << PHIVNI->def
+ << " -> " << RegIdx << '\n');
+ for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
+ PE = MBB->pred_end(); PI != PE; ++PI) {
+ SlotIndex End = LIS.getMBBEndIdx(*PI).getPrevSlot();
+ DEBUG(dbgs() << " pred BB#" << (*PI)->getNumber() << '@' << End);
+ // The predecessor may not have a live-out value. That is OK, like an
+ // undef PHI operand.
+ if (VNInfo *VNI = Edit.getParent().getVNInfoAt(End)) {
+ DEBUG(dbgs() << " has parent valno #" << VNI->id << " live out\n");
+ assert(RegAssign.lookup(End) == RegIdx &&
+ "Different register assignment in phi predecessor");
+ LIM.mapValue(VNI, End);
+ }
+ else
+ DEBUG(dbgs() << " is not live-out\n");
+ }
+ DEBUG(dbgs() << " " << *LIM.getLI() << '\n');
+ }
- // Create new live interval for the loop.
- openIntv();
+ // Rewrite instructions.
+ rewriteAssigned();
- // Insert copies in the predecessors.
- for (SplitAnalysis::BlockPtrSet::iterator I = Blocks.Preds.begin(),
- E = Blocks.Preds.end(); I != E; ++I) {
- MachineBasicBlock &MBB = const_cast<MachineBasicBlock&>(**I);
- enterIntvAtEnd(MBB, *Loop->getHeader());
- }
+ // FIXME: Delete defs that were rematted everywhere.
- // Switch all loop blocks.
- for (SplitAnalysis::BlockPtrSet::iterator I = Blocks.Loop.begin(),
- E = Blocks.Loop.end(); I != E; ++I)
- useIntv(**I);
+ // Get rid of unused values and set phi-kill flags.
+ for (LiveRangeEdit::iterator I = Edit.begin(), E = Edit.end(); I != E; ++I)
+ (*I)->RenumberValues(LIS);
- // Insert back copies in the exit blocks.
- for (SplitAnalysis::BlockPtrSet::iterator I = Blocks.Exits.begin(),
- E = Blocks.Exits.end(); I != E; ++I) {
- MachineBasicBlock &MBB = const_cast<MachineBasicBlock&>(**I);
- leaveIntvAtTop(MBB);
+ // Now check if any registers were separated into multiple components.
+ ConnectedVNInfoEqClasses ConEQ(LIS);
+ for (unsigned i = 0, e = Edit.size(); i != e; ++i) {
+ // Don't use iterators, they are invalidated by create() below.
+ LiveInterval *li = Edit.get(i);
+ unsigned NumComp = ConEQ.Classify(li);
+ if (NumComp <= 1)
+ continue;
+ DEBUG(dbgs() << " " << NumComp << " components: " << *li << '\n');
+ SmallVector<LiveInterval*, 8> dups;
+ dups.push_back(li);
+ for (unsigned i = 1; i != NumComp; ++i)
+ dups.push_back(&Edit.create(MRI, LIS, VRM));
+ rewriteComponents(dups, ConEQ);
+ ConEQ.Distribute(&dups[0]);
}
- // Done.
- closeIntv();
- rewrite();
- return dupli_;
+ // Calculate spill weight and allocation hints for new intervals.
+ VirtRegAuxInfo vrai(VRM.getMachineFunction(), LIS, SA.Loops);
+ for (LiveRangeEdit::iterator I = Edit.begin(), E = Edit.end(); I != E; ++I){
+ LiveInterval &li = **I;
+ vrai.CalculateRegClass(li.reg);
+ vrai.CalculateWeightAndHint(li);
+ DEBUG(dbgs() << " new interval " << MRI.getRegClass(li.reg)->getName()
+ << ":" << li << '\n');
+ }
}
@@ -979,45 +914,50 @@ bool SplitEditor::splitAroundLoop(const MachineLoop *Loop) {
// Single Block Splitting
//===----------------------------------------------------------------------===//
-/// splitSingleBlocks - Split curli into a separate live interval inside each
-/// basic block in Blocks. Return true if curli has been completely replaced,
-/// false if curli is still intact, and needs to be spilled or split further.
-bool SplitEditor::splitSingleBlocks(const SplitAnalysis::BlockPtrSet &Blocks) {
- DEBUG(dbgs() << " splitSingleBlocks for " << Blocks.size() << " blocks.\n");
- // Determine the first and last instruction using curli in each block.
- typedef std::pair<SlotIndex,SlotIndex> IndexPair;
- typedef DenseMap<const MachineBasicBlock*,IndexPair> IndexPairMap;
- IndexPairMap MBBRange;
- for (SplitAnalysis::InstrPtrSet::const_iterator I = sa_.usingInstrs_.begin(),
- E = sa_.usingInstrs_.end(); I != E; ++I) {
- const MachineBasicBlock *MBB = (*I)->getParent();
- if (!Blocks.count(MBB))
+/// getMultiUseBlocks - if CurLI has more than one use in a basic block, it
+/// may be an advantage to split CurLI for the duration of the block.
+bool SplitAnalysis::getMultiUseBlocks(BlockPtrSet &Blocks) {
+ // If CurLI is local to one block, there is no point to splitting it.
+ if (LiveBlocks.size() <= 1)
+ return false;
+ // Add blocks with multiple uses.
+ for (unsigned i = 0, e = LiveBlocks.size(); i != e; ++i) {
+ const BlockInfo &BI = LiveBlocks[i];
+ if (!BI.Uses)
continue;
- SlotIndex Idx = lis_.getInstructionIndex(*I);
- DEBUG(dbgs() << " BB#" << MBB->getNumber() << '\t' << Idx << '\t' << **I);
- IndexPair &IP = MBBRange[MBB];
- if (!IP.first.isValid() || Idx < IP.first)
- IP.first = Idx;
- if (!IP.second.isValid() || Idx > IP.second)
- IP.second = Idx;
+ unsigned Instrs = UsingBlocks.lookup(BI.MBB);
+ if (Instrs <= 1)
+ continue;
+ if (Instrs == 2 && BI.LiveIn && BI.LiveOut && !BI.LiveThrough)
+ continue;
+ Blocks.insert(BI.MBB);
}
+ return !Blocks.empty();
+}
+
+/// splitSingleBlocks - Split CurLI into a separate live interval inside each
+/// basic block in Blocks.
+void SplitEditor::splitSingleBlocks(const SplitAnalysis::BlockPtrSet &Blocks) {
+ DEBUG(dbgs() << " splitSingleBlocks for " << Blocks.size() << " blocks.\n");
- // Create a new interval for each block.
- for (SplitAnalysis::BlockPtrSet::const_iterator I = Blocks.begin(),
- E = Blocks.end(); I != E; ++I) {
- IndexPair &IP = MBBRange[*I];
- DEBUG(dbgs() << " splitting for BB#" << (*I)->getNumber() << ": ["
- << IP.first << ';' << IP.second << ")\n");
- assert(IP.first.isValid() && IP.second.isValid());
+ for (unsigned i = 0, e = SA.LiveBlocks.size(); i != e; ++i) {
+ const SplitAnalysis::BlockInfo &BI = SA.LiveBlocks[i];
+ if (!BI.Uses || !Blocks.count(BI.MBB))
+ continue;
openIntv();
- enterIntvBefore(IP.first);
- useIntv(IP.first.getBaseIndex(), IP.second.getBoundaryIndex());
- leaveIntvAfter(IP.second);
+ SlotIndex SegStart = enterIntvBefore(BI.FirstUse);
+ if (BI.LastUse < BI.LastSplitPoint) {
+ useIntv(SegStart, leaveIntvAfter(BI.LastUse));
+ } else {
+ // THe last use os after tha last valid split point.
+ SlotIndex SegStop = leaveIntvBefore(BI.LastSplitPoint);
+ useIntv(SegStart, SegStop);
+ overlapIntv(SegStop, BI.LastUse);
+ }
closeIntv();
}
- rewrite();
- return dupli_;
+ finish();
}
@@ -1025,31 +965,29 @@ bool SplitEditor::splitSingleBlocks(const SplitAnalysis::BlockPtrSet &Blocks) {
// Sub Block Splitting
//===----------------------------------------------------------------------===//
-/// getBlockForInsideSplit - If curli is contained inside a single basic block,
+/// getBlockForInsideSplit - If CurLI is contained inside a single basic block,
/// and it wou pay to subdivide the interval inside that block, return it.
/// Otherwise return NULL. The returned block can be passed to
/// SplitEditor::splitInsideBlock.
const MachineBasicBlock *SplitAnalysis::getBlockForInsideSplit() {
// The interval must be exclusive to one block.
- if (usingBlocks_.size() != 1)
+ if (UsingBlocks.size() != 1)
return 0;
// Don't to this for less than 4 instructions. We want to be sure that
// splitting actually reduces the instruction count per interval.
- if (usingInstrs_.size() < 4)
+ if (UsingInstrs.size() < 4)
return 0;
- return usingBlocks_.begin()->first;
+ return UsingBlocks.begin()->first;
}
-/// splitInsideBlock - Split curli into multiple intervals inside MBB. Return
-/// true if curli has been completely replaced, false if curli is still
-/// intact, and needs to be spilled or split further.
-bool SplitEditor::splitInsideBlock(const MachineBasicBlock *MBB) {
+/// splitInsideBlock - Split CurLI into multiple intervals inside MBB.
+void SplitEditor::splitInsideBlock(const MachineBasicBlock *MBB) {
SmallVector<SlotIndex, 32> Uses;
- Uses.reserve(sa_.usingInstrs_.size());
- for (SplitAnalysis::InstrPtrSet::const_iterator I = sa_.usingInstrs_.begin(),
- E = sa_.usingInstrs_.end(); I != E; ++I)
+ Uses.reserve(SA.UsingInstrs.size());
+ for (SplitAnalysis::InstrPtrSet::const_iterator I = SA.UsingInstrs.begin(),
+ E = SA.UsingInstrs.end(); I != E; ++I)
if ((*I)->getParent() == MBB)
- Uses.push_back(lis_.getInstructionIndex(*I));
+ Uses.push_back(LIS.getInstructionIndex(*I));
DEBUG(dbgs() << " splitInsideBlock BB#" << MBB->getNumber() << " for "
<< Uses.size() << " instructions.\n");
assert(Uses.size() >= 3 && "Need at least 3 instructions");
@@ -1077,21 +1015,16 @@ bool SplitEditor::splitInsideBlock(const MachineBasicBlock *MBB) {
// First interval before the gap. Don't create single-instr intervals.
if (bestPos > 1) {
openIntv();
- enterIntvBefore(Uses.front());
- useIntv(Uses.front().getBaseIndex(), Uses[bestPos-1].getBoundaryIndex());
- leaveIntvAfter(Uses[bestPos-1]);
+ useIntv(enterIntvBefore(Uses.front()), leaveIntvAfter(Uses[bestPos-1]));
closeIntv();
}
// Second interval after the gap.
if (bestPos < Uses.size()-1) {
openIntv();
- enterIntvBefore(Uses[bestPos]);
- useIntv(Uses[bestPos].getBaseIndex(), Uses.back().getBoundaryIndex());
- leaveIntvAfter(Uses.back());
+ useIntv(enterIntvBefore(Uses[bestPos]), leaveIntvAfter(Uses.back()));
closeIntv();
}
- rewrite();
- return dupli_;
+ finish();
}
diff --git a/contrib/llvm/lib/CodeGen/SplitKit.h b/contrib/llvm/lib/CodeGen/SplitKit.h
index ddef746..5c34afd 100644
--- a/contrib/llvm/lib/CodeGen/SplitKit.h
+++ b/contrib/llvm/lib/CodeGen/SplitKit.h
@@ -1,4 +1,4 @@
-//===---------- SplitKit.cpp - Toolkit for splitting live ranges ----------===//
+//===-------- SplitKit.h - Toolkit for splitting live ranges ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,125 +12,132 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/SlotIndexes.h"
namespace llvm {
+class ConnectedVNInfoEqClasses;
class LiveInterval;
class LiveIntervals;
+class LiveRangeEdit;
class MachineInstr;
-class MachineLoop;
class MachineLoopInfo;
class MachineRegisterInfo;
class TargetInstrInfo;
+class TargetRegisterInfo;
class VirtRegMap;
class VNInfo;
+class raw_ostream;
+
+/// At some point we should just include MachineDominators.h:
+class MachineDominatorTree;
+template <class NodeT> class DomTreeNodeBase;
+typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode;
+
/// SplitAnalysis - Analyze a LiveInterval, looking for live range splitting
/// opportunities.
class SplitAnalysis {
public:
- const MachineFunction &mf_;
- const LiveIntervals &lis_;
- const MachineLoopInfo &loops_;
- const TargetInstrInfo &tii_;
+ const MachineFunction &MF;
+ const VirtRegMap &VRM;
+ const LiveIntervals &LIS;
+ const MachineLoopInfo &Loops;
+ const TargetInstrInfo &TII;
// Instructions using the the current register.
typedef SmallPtrSet<const MachineInstr*, 16> InstrPtrSet;
- InstrPtrSet usingInstrs_;
+ InstrPtrSet UsingInstrs;
+
+ // Sorted slot indexes of using instructions.
+ SmallVector<SlotIndex, 8> UseSlots;
- // The number of instructions using curli in each basic block.
+ // The number of instructions using CurLI in each basic block.
typedef DenseMap<const MachineBasicBlock*, unsigned> BlockCountMap;
- BlockCountMap usingBlocks_;
+ BlockCountMap UsingBlocks;
+
+ /// Additional information about basic blocks where the current variable is
+ /// live. Such a block will look like one of these templates:
+ ///
+ /// 1. | o---x | Internal to block. Variable is only live in this block.
+ /// 2. |---x | Live-in, kill.
+ /// 3. | o---| Def, live-out.
+ /// 4. |---x o---| Live-in, kill, def, live-out.
+ /// 5. |---o---o---| Live-through with uses or defs.
+ /// 6. |-----------| Live-through without uses. Transparent.
+ ///
+ struct BlockInfo {
+ MachineBasicBlock *MBB;
+ SlotIndex FirstUse; ///< First instr using current reg.
+ SlotIndex LastUse; ///< Last instr using current reg.
+ SlotIndex Kill; ///< Interval end point inside block.
+ SlotIndex Def; ///< Interval start point inside block.
+ /// Last possible point for splitting live ranges.
+ SlotIndex LastSplitPoint;
+ bool Uses; ///< Current reg has uses or defs in block.
+ bool LiveThrough; ///< Live in whole block (Templ 5. or 6. above).
+ bool LiveIn; ///< Current reg is live in.
+ bool LiveOut; ///< Current reg is live out.
+
+ // Per-interference pattern scratch data.
+ bool OverlapEntry; ///< Interference overlaps entering interval.
+ bool OverlapExit; ///< Interference overlaps exiting interval.
+ };
- // The number of basic block using curli in each loop.
- typedef DenseMap<const MachineLoop*, unsigned> LoopCountMap;
- LoopCountMap usingLoops_;
+ /// Basic blocks where var is live. This array is parallel to
+ /// SpillConstraints.
+ SmallVector<BlockInfo, 8> LiveBlocks;
private:
// Current live interval.
- const LiveInterval *curli_;
+ const LiveInterval *CurLI;
- // Sumarize statistics by counting instructions using curli_.
+ // Sumarize statistics by counting instructions using CurLI.
void analyzeUses();
+ /// calcLiveBlockInfo - Compute per-block information about CurLI.
+ void calcLiveBlockInfo();
+
/// canAnalyzeBranch - Return true if MBB ends in a branch that can be
/// analyzed.
bool canAnalyzeBranch(const MachineBasicBlock *MBB);
public:
- SplitAnalysis(const MachineFunction &mf, const LiveIntervals &lis,
+ SplitAnalysis(const VirtRegMap &vrm, const LiveIntervals &lis,
const MachineLoopInfo &mli);
- /// analyze - set curli to the specified interval, and analyze how it may be
+ /// analyze - set CurLI to the specified interval, and analyze how it may be
/// split.
void analyze(const LiveInterval *li);
- /// removeUse - Update statistics by noting that mi no longer uses curli.
- void removeUse(const MachineInstr *mi);
-
- const LiveInterval *getCurLI() { return curli_; }
-
/// clear - clear all data structures so SplitAnalysis is ready to analyze a
/// new interval.
void clear();
- typedef SmallPtrSet<const MachineBasicBlock*, 16> BlockPtrSet;
- typedef SmallPtrSet<const MachineLoop*, 16> LoopPtrSet;
-
- // Sets of basic blocks surrounding a machine loop.
- struct LoopBlocks {
- BlockPtrSet Loop; // Blocks in the loop.
- BlockPtrSet Preds; // Loop predecessor blocks.
- BlockPtrSet Exits; // Loop exit blocks.
-
- void clear() {
- Loop.clear();
- Preds.clear();
- Exits.clear();
- }
- };
-
- // Calculate the block sets surrounding the loop.
- void getLoopBlocks(const MachineLoop *Loop, LoopBlocks &Blocks);
-
- /// LoopPeripheralUse - how is a variable used in and around a loop?
- /// Peripheral blocks are the loop predecessors and exit blocks.
- enum LoopPeripheralUse {
- ContainedInLoop, // All uses are inside the loop.
- SinglePeripheral, // At most one instruction per peripheral block.
- MultiPeripheral, // Multiple instructions in some peripheral blocks.
- OutsideLoop // Uses outside loop periphery.
- };
-
- /// analyzeLoopPeripheralUse - Return an enum describing how curli_ is used in
- /// and around the Loop.
- LoopPeripheralUse analyzeLoopPeripheralUse(const LoopBlocks&);
+ /// getParent - Return the last analyzed interval.
+ const LiveInterval &getParent() const { return *CurLI; }
- /// getCriticalExits - It may be necessary to partially break critical edges
- /// leaving the loop if an exit block has phi uses of curli. Collect the exit
- /// blocks that need special treatment into CriticalExits.
- void getCriticalExits(const LoopBlocks &Blocks, BlockPtrSet &CriticalExits);
+ /// hasUses - Return true if MBB has any uses of CurLI.
+ bool hasUses(const MachineBasicBlock *MBB) const {
+ return UsingBlocks.lookup(MBB);
+ }
- /// canSplitCriticalExits - Return true if it is possible to insert new exit
- /// blocks before the blocks in CriticalExits.
- bool canSplitCriticalExits(const LoopBlocks &Blocks,
- BlockPtrSet &CriticalExits);
+ typedef SmallPtrSet<const MachineBasicBlock*, 16> BlockPtrSet;
- /// getBestSplitLoop - Return the loop where curli may best be split to a
- /// separate register, or NULL.
- const MachineLoop *getBestSplitLoop();
+ // Print a set of blocks with use counts.
+ void print(const BlockPtrSet&, raw_ostream&) const;
/// getMultiUseBlocks - Add basic blocks to Blocks that may benefit from
- /// having curli split to a new live interval. Return true if Blocks can be
+ /// having CurLI split to a new live interval. Return true if Blocks can be
/// passed to SplitEditor::splitSingleBlocks.
bool getMultiUseBlocks(BlockPtrSet &Blocks);
- /// getBlockForInsideSplit - If curli is contained inside a single basic block,
- /// and it wou pay to subdivide the interval inside that block, return it.
- /// Otherwise return NULL. The returned block can be passed to
+ /// getBlockForInsideSplit - If CurLI is contained inside a single basic
+ /// block, and it would pay to subdivide the interval inside that block,
+ /// return it. Otherwise return NULL. The returned block can be passed to
/// SplitEditor::splitInsideBlock.
const MachineBasicBlock *getBlockForInsideSplit();
};
@@ -140,58 +147,102 @@ public:
/// interval that is a subset. Insert phi-def values as needed. This class is
/// used by SplitEditor to create new smaller LiveIntervals.
///
-/// parentli_ is the larger interval, li_ is the subset interval. Every value
-/// in li_ corresponds to exactly one value in parentli_, and the live range
-/// of the value is contained within the live range of the parentli_ value.
-/// Values in parentli_ may map to any number of openli_ values, including 0.
+/// ParentLI is the larger interval, LI is the subset interval. Every value
+/// in LI corresponds to exactly one value in ParentLI, and the live range
+/// of the value is contained within the live range of the ParentLI value.
+/// Values in ParentLI may map to any number of OpenLI values, including 0.
class LiveIntervalMap {
- LiveIntervals &lis_;
+ LiveIntervals &LIS;
+ MachineDominatorTree &MDT;
// The parent interval is never changed.
- const LiveInterval &parentli_;
+ const LiveInterval &ParentLI;
- // The child interval's values are fully contained inside parentli_ values.
- LiveInterval &li_;
+ // The child interval's values are fully contained inside ParentLI values.
+ LiveInterval *LI;
typedef DenseMap<const VNInfo*, VNInfo*> ValueMap;
- // Map parentli_ values to simple values in li_ that are defined at the same
- // SlotIndex, or NULL for parentli_ values that have complex li_ defs.
+ // Map ParentLI values to simple values in LI that are defined at the same
+ // SlotIndex, or NULL for ParentLI values that have complex LI defs.
// Note there is a difference between values mapping to NULL (complex), and
// values not present (unknown/unmapped).
- ValueMap valueMap_;
-
- // extendTo - Find the last li_ value defined in MBB at or before Idx. The
- // parentli_ is assumed to be live at Idx. Extend the live range to Idx.
- // Return the found VNInfo, or NULL.
- VNInfo *extendTo(MachineBasicBlock *MBB, SlotIndex Idx);
-
- // addSimpleRange - Add a simple range from parentli_ to li_.
- // ParentVNI must be live in the [Start;End) interval.
- void addSimpleRange(SlotIndex Start, SlotIndex End, const VNInfo *ParentVNI);
+ ValueMap Values;
+
+ typedef std::pair<VNInfo*, MachineDomTreeNode*> LiveOutPair;
+ typedef DenseMap<MachineBasicBlock*,LiveOutPair> LiveOutMap;
+
+ // LiveOutCache - Map each basic block where LI is live out to the live-out
+ // value and its defining block. One of these conditions shall be true:
+ //
+ // 1. !LiveOutCache.count(MBB)
+ // 2. LiveOutCache[MBB].second.getNode() == MBB
+ // 3. forall P in preds(MBB): LiveOutCache[P] == LiveOutCache[MBB]
+ //
+ // This is only a cache, the values can be computed as:
+ //
+ // VNI = LI->getVNInfoAt(LIS.getMBBEndIdx(MBB))
+ // Node = mbt_[LIS.getMBBFromIndex(VNI->def)]
+ //
+ // The cache is also used as a visiteed set by mapValue().
+ LiveOutMap LiveOutCache;
+
+ // Dump the live-out cache to dbgs().
+ void dumpCache();
public:
LiveIntervalMap(LiveIntervals &lis,
- const LiveInterval &parentli,
- LiveInterval &li)
- : lis_(lis), parentli_(parentli), li_(li) {}
+ MachineDominatorTree &mdt,
+ const LiveInterval &parentli)
+ : LIS(lis), MDT(mdt), ParentLI(parentli), LI(0) {}
+
+ /// reset - clear all data structures and start a new live interval.
+ void reset(LiveInterval *);
+
+ /// getLI - return the current live interval.
+ LiveInterval *getLI() const { return LI; }
- /// defValue - define a value in li_ from the parentli_ value VNI and Idx.
+ /// defValue - define a value in LI from the ParentLI value VNI and Idx.
/// Idx does not have to be ParentVNI->def, but it must be contained within
- /// ParentVNI's live range in parentli_.
- /// Return the new li_ value.
+ /// ParentVNI's live range in ParentLI.
+ /// Return the new LI value.
VNInfo *defValue(const VNInfo *ParentVNI, SlotIndex Idx);
- /// mapValue - map ParentVNI to the corresponding li_ value at Idx. It is
+ /// mapValue - map ParentVNI to the corresponding LI value at Idx. It is
/// assumed that ParentVNI is live at Idx.
/// If ParentVNI has not been defined by defValue, it is assumed that
/// ParentVNI->def dominates Idx.
/// If ParentVNI has been defined by defValue one or more times, a value that
/// dominates Idx will be returned. This may require creating extra phi-def
- /// values and adding live ranges to li_.
- VNInfo *mapValue(const VNInfo *ParentVNI, SlotIndex Idx);
+ /// values and adding live ranges to LI.
+ /// If simple is not NULL, *simple will indicate if ParentVNI is a simply
+ /// mapped value.
+ VNInfo *mapValue(const VNInfo *ParentVNI, SlotIndex Idx, bool *simple = 0);
+
+ // extendTo - Find the last LI value defined in MBB at or before Idx. The
+ // parentli is assumed to be live at Idx. Extend the live range to include
+ // Idx. Return the found VNInfo, or NULL.
+ VNInfo *extendTo(const MachineBasicBlock *MBB, SlotIndex Idx);
+
+ /// isMapped - Return true is ParentVNI is a known mapped value. It may be a
+ /// simple 1-1 mapping or a complex mapping to later defs.
+ bool isMapped(const VNInfo *ParentVNI) const {
+ return Values.count(ParentVNI);
+ }
+
+ /// isComplexMapped - Return true if ParentVNI has received new definitions
+ /// with defValue.
+ bool isComplexMapped(const VNInfo *ParentVNI) const;
+
+ /// markComplexMapped - Mark ParentVNI as complex mapped regardless of the
+ /// number of definitions.
+ void markComplexMapped(const VNInfo *ParentVNI) { Values[ParentVNI] = 0; }
+
+ // addSimpleRange - Add a simple range from ParentLI to LI.
+ // ParentVNI must be live in the [Start;End) interval.
+ void addSimpleRange(SlotIndex Start, SlotIndex End, const VNInfo *ParentVNI);
- /// addRange - Add live ranges to li_ where [Start;End) intersects parentli_.
+ /// addRange - Add live ranges to LI where [Start;End) intersects ParentLI.
/// All needed values whose def is not inside [Start;End) must be defined
/// beforehand so mapValue will work.
void addRange(SlotIndex Start, SlotIndex End);
@@ -207,115 +258,129 @@ public:
/// - Mark the ranges where the new interval is used with useIntv*
/// - Mark the places where the interval is exited with exitIntv*.
/// - Finish the current interval with closeIntv and repeat from 2.
-/// - Rewrite instructions with rewrite().
+/// - Rewrite instructions with finish().
///
class SplitEditor {
- SplitAnalysis &sa_;
- LiveIntervals &lis_;
- VirtRegMap &vrm_;
- MachineRegisterInfo &mri_;
- const TargetInstrInfo &tii_;
-
- /// curli_ - The immutable interval we are currently splitting.
- const LiveInterval *const curli_;
-
- /// dupli_ - Created as a copy of curli_, ranges are carved out as new
- /// intervals get added through openIntv / closeIntv. This is used to avoid
- /// editing curli_.
- LiveInterval *dupli_;
-
- /// Currently open LiveInterval.
- LiveInterval *openli_;
-
- /// createInterval - Create a new virtual register and LiveInterval with same
- /// register class and spill slot as curli.
- LiveInterval *createInterval();
-
- /// getDupLI - Ensure dupli is created and return it.
- LiveInterval *getDupLI();
-
- /// valueMap_ - Map values in cupli to values in openli. These are direct 1-1
- /// mappings, and do not include values created by inserted copies.
- DenseMap<const VNInfo*, VNInfo*> valueMap_;
-
- /// mapValue - Return the openIntv value that corresponds to the given curli
- /// value.
- VNInfo *mapValue(const VNInfo *curliVNI);
-
- /// A dupli value is live through openIntv.
- bool liveThrough_;
-
- /// All the new intervals created for this split are added to intervals_.
- SmallVectorImpl<LiveInterval*> &intervals_;
-
- /// The index into intervals_ of the first interval we added. There may be
- /// others from before we got it.
- unsigned firstInterval;
-
- /// Insert a COPY instruction curli -> li. Allocate a new value from li
- /// defined by the COPY
- VNInfo *insertCopy(LiveInterval &LI,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I);
+ SplitAnalysis &SA;
+ LiveIntervals &LIS;
+ VirtRegMap &VRM;
+ MachineRegisterInfo &MRI;
+ MachineDominatorTree &MDT;
+ const TargetInstrInfo &TII;
+ const TargetRegisterInfo &TRI;
+
+ /// Edit - The current parent register and new intervals created.
+ LiveRangeEdit &Edit;
+
+ /// Index into Edit of the currently open interval.
+ /// The index 0 is used for the complement, so the first interval started by
+ /// openIntv will be 1.
+ unsigned OpenIdx;
+
+ typedef IntervalMap<SlotIndex, unsigned> RegAssignMap;
+
+ /// Allocator for the interval map. This will eventually be shared with
+ /// SlotIndexes and LiveIntervals.
+ RegAssignMap::Allocator Allocator;
+
+ /// RegAssign - Map of the assigned register indexes.
+ /// Edit.get(RegAssign.lookup(Idx)) is the register that should be live at
+ /// Idx.
+ RegAssignMap RegAssign;
+
+ /// LIMappers - One LiveIntervalMap or each interval in Edit.
+ SmallVector<LiveIntervalMap, 4> LIMappers;
+
+ /// defFromParent - Define Reg from ParentVNI at UseIdx using either
+ /// rematerialization or a COPY from parent. Return the new value.
+ VNInfo *defFromParent(unsigned RegIdx,
+ VNInfo *ParentVNI,
+ SlotIndex UseIdx,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I);
+
+ /// rewriteAssigned - Rewrite all uses of Edit.getReg() to assigned registers.
+ void rewriteAssigned();
+
+ /// rewriteComponents - Rewrite all uses of Intv[0] according to the eq
+ /// classes in ConEQ.
+ /// This must be done when Intvs[0] is styill live at all uses, before calling
+ /// ConEq.Distribute().
+ void rewriteComponents(const SmallVectorImpl<LiveInterval*> &Intvs,
+ const ConnectedVNInfoEqClasses &ConEq);
public:
/// Create a new SplitEditor for editing the LiveInterval analyzed by SA.
/// Newly created intervals will be appended to newIntervals.
SplitEditor(SplitAnalysis &SA, LiveIntervals&, VirtRegMap&,
- SmallVectorImpl<LiveInterval*> &newIntervals);
+ MachineDominatorTree&, LiveRangeEdit&);
/// getAnalysis - Get the corresponding analysis.
- SplitAnalysis &getAnalysis() { return sa_; }
+ SplitAnalysis &getAnalysis() { return SA; }
/// Create a new virtual register and live interval.
void openIntv();
- /// enterIntvBefore - Enter openli before the instruction at Idx. If curli is
- /// not live before Idx, a COPY is not inserted.
- void enterIntvBefore(SlotIndex Idx);
+ /// enterIntvBefore - Enter the open interval before the instruction at Idx.
+ /// If the parent interval is not live before Idx, a COPY is not inserted.
+ /// Return the beginning of the new live range.
+ SlotIndex enterIntvBefore(SlotIndex Idx);
- /// enterIntvAtEnd - Enter openli at the end of MBB.
- /// PhiMBB is a successor inside openli where a PHI value is created.
- /// Currently, all entries must share the same PhiMBB.
- void enterIntvAtEnd(MachineBasicBlock &MBB, MachineBasicBlock &PhiMBB);
+ /// enterIntvAtEnd - Enter the open interval at the end of MBB.
+ /// Use the open interval from he inserted copy to the MBB end.
+ /// Return the beginning of the new live range.
+ SlotIndex enterIntvAtEnd(MachineBasicBlock &MBB);
- /// useIntv - indicate that all instructions in MBB should use openli.
+ /// useIntv - indicate that all instructions in MBB should use OpenLI.
void useIntv(const MachineBasicBlock &MBB);
- /// useIntv - indicate that all instructions in range should use openli.
+ /// useIntv - indicate that all instructions in range should use OpenLI.
void useIntv(SlotIndex Start, SlotIndex End);
- /// leaveIntvAfter - Leave openli after the instruction at Idx.
- void leaveIntvAfter(SlotIndex Idx);
+ /// leaveIntvAfter - Leave the open interval after the instruction at Idx.
+ /// Return the end of the live range.
+ SlotIndex leaveIntvAfter(SlotIndex Idx);
+
+ /// leaveIntvBefore - Leave the open interval before the instruction at Idx.
+ /// Return the end of the live range.
+ SlotIndex leaveIntvBefore(SlotIndex Idx);
/// leaveIntvAtTop - Leave the interval at the top of MBB.
- /// Currently, only one value can leave the interval.
- void leaveIntvAtTop(MachineBasicBlock &MBB);
+ /// Add liveness from the MBB top to the copy.
+ /// Return the end of the live range.
+ SlotIndex leaveIntvAtTop(MachineBasicBlock &MBB);
+
+ /// overlapIntv - Indicate that all instructions in range should use the open
+ /// interval, but also let the complement interval be live.
+ ///
+ /// This doubles the register pressure, but is sometimes required to deal with
+ /// register uses after the last valid split point.
+ ///
+ /// The Start index should be a return value from a leaveIntv* call, and End
+ /// should be in the same basic block. The parent interval must have the same
+ /// value across the range.
+ ///
+ void overlapIntv(SlotIndex Start, SlotIndex End);
/// closeIntv - Indicate that we are done editing the currently open
/// LiveInterval, and ranges can be trimmed.
void closeIntv();
- /// rewrite - after all the new live ranges have been created, rewrite
- /// instructions using curli to use the new intervals.
- void rewrite();
+ /// finish - after all the new live ranges have been created, compute the
+ /// remaining live range, and rewrite instructions to use the new registers.
+ void finish();
- // ===--- High level methods ---===
+ /// dump - print the current interval maping to dbgs().
+ void dump() const;
- /// splitAroundLoop - Split curli into a separate live interval inside
- /// the loop. Return true if curli has been completely replaced, false if
- /// curli is still intact, and needs to be spilled or split further.
- bool splitAroundLoop(const MachineLoop*);
+ // ===--- High level methods ---===
- /// splitSingleBlocks - Split curli into a separate live interval inside each
- /// basic block in Blocks. Return true if curli has been completely replaced,
- /// false if curli is still intact, and needs to be spilled or split further.
- bool splitSingleBlocks(const SplitAnalysis::BlockPtrSet &Blocks);
+ /// splitSingleBlocks - Split CurLI into a separate live interval inside each
+ /// basic block in Blocks.
+ void splitSingleBlocks(const SplitAnalysis::BlockPtrSet &Blocks);
- /// splitInsideBlock - Split curli into multiple intervals inside MBB. Return
- /// true if curli has been completely replaced, false if curli is still
- /// intact, and needs to be spilled or split further.
- bool splitInsideBlock(const MachineBasicBlock *);
+ /// splitInsideBlock - Split CurLI into multiple intervals inside MBB.
+ void splitInsideBlock(const MachineBasicBlock *);
};
}
diff --git a/contrib/llvm/lib/CodeGen/Splitter.cpp b/contrib/llvm/lib/CodeGen/Splitter.cpp
index 38f3b1f..08aee82 100644
--- a/contrib/llvm/lib/CodeGen/Splitter.cpp
+++ b/contrib/llvm/lib/CodeGen/Splitter.cpp
@@ -29,8 +29,14 @@
using namespace llvm;
char LoopSplitter::ID = 0;
-INITIALIZE_PASS(LoopSplitter, "loop-splitting",
- "Split virtual regists across loop boundaries.", false, false);
+INITIALIZE_PASS_BEGIN(LoopSplitter, "loop-splitting",
+ "Split virtual regists across loop boundaries.", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_END(LoopSplitter, "loop-splitting",
+ "Split virtual regists across loop boundaries.", false, false)
namespace llvm {
@@ -140,7 +146,6 @@ namespace llvm {
VNInfo *newVal = getNewVNI(preHeaderRange->valno);
newVal->def = copyDefIdx;
newVal->setCopy(copy);
- newVal->setIsDefAccurate(true);
li.removeRange(copyDefIdx, ls.lis->getMBBEndIdx(preHeader), true);
getNewLI()->addRange(LiveRange(copyDefIdx,
@@ -174,13 +179,13 @@ namespace llvm {
// Blow away output range definition.
outRange->valno->def = ls.lis->getInvalidIndex();
- outRange->valno->setIsDefAccurate(false);
li.removeRange(ls.lis->getMBBStartIdx(outBlock), copyDefIdx);
+ SlotIndex newDefIdx = ls.lis->getMBBStartIdx(outBlock);
+ assert(ls.lis->getInstructionFromIndex(newDefIdx) == 0 &&
+ "PHI def index points at actual instruction.");
VNInfo *newVal =
- getNewLI()->getNextValue(SlotIndex(ls.lis->getMBBStartIdx(outBlock),
- true),
- 0, false, ls.lis->getVNInfoAllocator());
+ getNewLI()->getNextValue(newDefIdx, 0, ls.lis->getVNInfoAllocator());
getNewLI()->addRange(LiveRange(ls.lis->getMBBStartIdx(outBlock),
copyDefIdx, newVal));
@@ -514,8 +519,10 @@ namespace llvm {
if (!insertRange)
continue;
- VNInfo *newVal = li.getNextValue(lis->getMBBStartIdx(preHeader),
- 0, false, lis->getVNInfoAllocator());
+ SlotIndex newDefIdx = lis->getMBBStartIdx(preHeader);
+ assert(lis->getInstructionFromIndex(newDefIdx) == 0 &&
+ "PHI def index points at actual instruction.");
+ VNInfo *newVal = li.getNextValue(newDefIdx, 0, lis->getVNInfoAllocator());
li.addRange(LiveRange(lis->getMBBStartIdx(preHeader),
lis->getMBBEndIdx(preHeader),
newVal));
@@ -612,8 +619,11 @@ namespace llvm {
lis->getMBBEndIdx(splitBlock), true);
}
} else if (intersects) {
- VNInfo *newVal = li.getNextValue(lis->getMBBStartIdx(splitBlock),
- 0, false, lis->getVNInfoAllocator());
+ SlotIndex newDefIdx = lis->getMBBStartIdx(splitBlock);
+ assert(lis->getInstructionFromIndex(newDefIdx) == 0 &&
+ "PHI def index points at actual instruction.");
+ VNInfo *newVal = li.getNextValue(newDefIdx, 0,
+ lis->getVNInfoAllocator());
li.addRange(LiveRange(lis->getMBBStartIdx(splitBlock),
lis->getMBBEndIdx(splitBlock),
newVal));
diff --git a/contrib/llvm/lib/CodeGen/Splitter.h b/contrib/llvm/lib/CodeGen/Splitter.h
index a726a7b..9fb1b8b 100644
--- a/contrib/llvm/lib/CodeGen/Splitter.h
+++ b/contrib/llvm/lib/CodeGen/Splitter.h
@@ -36,7 +36,9 @@ namespace llvm {
public:
static char ID;
- LoopSplitter() : MachineFunctionPass(ID) {}
+ LoopSplitter() : MachineFunctionPass(ID) {
+ initializeLoopSplitterPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &au) const;
diff --git a/contrib/llvm/lib/CodeGen/StackProtector.cpp b/contrib/llvm/lib/CodeGen/StackProtector.cpp
index 9f51778..fcaee42 100644
--- a/contrib/llvm/lib/CodeGen/StackProtector.cpp
+++ b/contrib/llvm/lib/CodeGen/StackProtector.cpp
@@ -16,6 +16,7 @@
#define DEBUG_TYPE "stack-protector"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/Attributes.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
@@ -45,6 +46,8 @@ namespace {
Function *F;
Module *M;
+ DominatorTree* DT;
+
/// InsertStackProtectors - Insert code into the prologue and epilogue of
/// the function.
///
@@ -62,9 +65,17 @@ namespace {
bool RequiresStackProtector() const;
public:
static char ID; // Pass identification, replacement for typeid.
- StackProtector() : FunctionPass(ID), TLI(0) {}
+ StackProtector() : FunctionPass(ID), TLI(0) {
+ initializeStackProtectorPass(*PassRegistry::getPassRegistry());
+ }
StackProtector(const TargetLowering *tli)
- : FunctionPass(ID), TLI(tli) {}
+ : FunctionPass(ID), TLI(tli) {
+ initializeStackProtectorPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addPreserved<DominatorTree>();
+ }
virtual bool runOnFunction(Function &Fn);
};
@@ -72,7 +83,7 @@ namespace {
char StackProtector::ID = 0;
INITIALIZE_PASS(StackProtector, "stack-protector",
- "Insert stack protectors", false, false);
+ "Insert stack protectors", false, false)
FunctionPass *llvm::createStackProtectorPass(const TargetLowering *tli) {
return new StackProtector(tli);
@@ -81,6 +92,7 @@ FunctionPass *llvm::createStackProtectorPass(const TargetLowering *tli) {
bool StackProtector::runOnFunction(Function &Fn) {
F = &Fn;
M = F->getParent();
+ DT = getAnalysisIfAvailable<DominatorTree>();
if (!RequiresStackProtector()) return false;
@@ -135,6 +147,7 @@ bool StackProtector::RequiresStackProtector() const {
/// value. It calls __stack_chk_fail if they differ.
bool StackProtector::InsertStackProtectors() {
BasicBlock *FailBB = 0; // The basic block to jump to if check fails.
+ BasicBlock *FailBBDom = 0; // FailBB's dominator.
AllocaInst *AI = 0; // Place on stack that stores the stack guard.
Value *StackGuardVar = 0; // The stack guard variable.
@@ -178,6 +191,8 @@ bool StackProtector::InsertStackProtectors() {
// Create the basic block to jump to when the guard check fails.
FailBB = CreateFailBB();
+ if (DT)
+ FailBBDom = DT->isReachableFromEntry(BB) ? BB : 0;
}
// For each block with a return instruction, convert this:
@@ -204,6 +219,10 @@ bool StackProtector::InsertStackProtectors() {
// Split the basic block before the return instruction.
BasicBlock *NewBB = BB->splitBasicBlock(RI, "SP_return");
+ if (DT) {
+ DT->addNewBlock(NewBB, DT->isReachableFromEntry(BB) ? BB : 0);
+ FailBBDom = DT->findNearestCommonDominator(FailBBDom, BB);
+ }
// Remove default branch instruction to the new BB.
BB->getTerminator()->eraseFromParent();
@@ -223,6 +242,9 @@ bool StackProtector::InsertStackProtectors() {
// statements in the function.
if (!FailBB) return false;
+ if (DT)
+ DT->addNewBlock(FailBB, FailBBDom);
+
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
index 8d57ae9..01f5b56 100644
--- a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -95,9 +95,13 @@ namespace {
public:
static char ID; // Pass identification
StackSlotColoring() :
- MachineFunctionPass(ID), ColorWithRegs(false), NextColor(-1) {}
+ MachineFunctionPass(ID), ColorWithRegs(false), NextColor(-1) {
+ initializeStackSlotColoringPass(*PassRegistry::getPassRegistry());
+ }
StackSlotColoring(bool RegColor) :
- MachineFunctionPass(ID), ColorWithRegs(RegColor), NextColor(-1) {}
+ MachineFunctionPass(ID), ColorWithRegs(RegColor), NextColor(-1) {
+ initializeStackSlotColoringPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -145,8 +149,14 @@ namespace {
char StackSlotColoring::ID = 0;
-INITIALIZE_PASS(StackSlotColoring, "stack-slot-coloring",
- "Stack Slot Coloring", false, false);
+INITIALIZE_PASS_BEGIN(StackSlotColoring, "stack-slot-coloring",
+ "Stack Slot Coloring", false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveStacks)
+INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(StackSlotColoring, "stack-slot-coloring",
+ "Stack Slot Coloring", false, false)
FunctionPass *llvm::createStackSlotColoringPass(bool RegColor) {
return new StackSlotColoring(RegColor);
@@ -208,7 +218,7 @@ void StackSlotColoring::InitializeSlots() {
for (LiveStacks::iterator i = LS->begin(), e = LS->end(); i != e; ++i) {
LiveInterval &li = i->second;
DEBUG(li.dump());
- int FI = li.getStackSlotIndex();
+ int FI = TargetRegisterInfo::stackSlot2Index(li.reg);
if (MFI->isDeadObjectIndex(FI))
continue;
SSIntervals.push_back(&li);
@@ -251,7 +261,7 @@ StackSlotColoring::ColorSlotsWithFreeRegs(SmallVector<int, 16> &SlotMapping,
DEBUG(dbgs() << "Assigning unused registers to spill slots:\n");
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
LiveInterval *li = SSIntervals[i];
- int SS = li->getStackSlotIndex();
+ int SS = TargetRegisterInfo::stackSlot2Index(li->reg);
if (!UsedColors[SS] || li->weight < 20)
// If the weight is < 20, i.e. two references in a loop with depth 1,
// don't bother with it.
@@ -340,7 +350,7 @@ int StackSlotColoring::ColorSlot(LiveInterval *li) {
// Record the assignment.
Assignments[Color].push_back(li);
- int FI = li->getStackSlotIndex();
+ int FI = TargetRegisterInfo::stackSlot2Index(li->reg);
DEBUG(dbgs() << "Assigning fi#" << FI << " to fi#" << Color << "\n");
// Change size and alignment of the allocated slot. If there are multiple
@@ -369,7 +379,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
bool Changed = false;
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
LiveInterval *li = SSIntervals[i];
- int SS = li->getStackSlotIndex();
+ int SS = TargetRegisterInfo::stackSlot2Index(li->reg);
int NewSS = ColorSlot(li);
assert(NewSS >= 0 && "Stack coloring failed?");
SlotMapping[SS] = NewSS;
@@ -382,7 +392,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
DEBUG(dbgs() << "\nSpill slots after coloring:\n");
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
LiveInterval *li = SSIntervals[i];
- int SS = li->getStackSlotIndex();
+ int SS = TargetRegisterInfo::stackSlot2Index(li->reg);
li->weight = SlotWeights[SS];
}
// Sort them by new weight.
@@ -636,7 +646,7 @@ StackSlotColoring::UnfoldAndRewriteInstruction(MachineInstr *MI, int OldFI,
} else {
SmallVector<MachineInstr*, 4> NewMIs;
bool Success = TII->unfoldMemoryOperand(MF, MI, Reg, false, false, NewMIs);
- Success = Success; // Silence compiler warning.
+ (void)Success; // Silence compiler warning.
assert(Success && "Failed to unfold!");
MachineInstr *NewMI = NewMIs[0];
MBB->insert(MI, NewMI);
diff --git a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
index 894dbfa..ec7829e 100644
--- a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
@@ -1,4 +1,4 @@
-//===- StrongPhiElimination.cpp - Eliminate PHI nodes by inserting copies -===//
+//===- StrongPHIElimination.cpp - Eliminate PHI nodes by inserting copies -===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,1039 +7,823 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass eliminates machine instruction PHI nodes by inserting copy
-// instructions, using an intelligent copy-folding technique based on
-// dominator information. This is technique is derived from:
+// This pass eliminates PHI instructions by aggressively coalescing the copies
+// that would be inserted by a naive algorithm and only inserting the copies
+// that are necessary. The coalescing technique initially assumes that all
+// registers appearing in a PHI instruction do not interfere. It then eliminates
+// proven interferences, using dominators to only perform a linear number of
+// interference tests instead of the quadratic number of interference tests
+// that this would naively require. This is a technique derived from:
//
// Budimlic, et al. Fast copy coalescing and live-range identification.
// In Proceedings of the ACM SIGPLAN 2002 Conference on Programming Language
// Design and Implementation (Berlin, Germany, June 17 - 19, 2002).
// PLDI '02. ACM, New York, NY, 25-32.
-// DOI= http://doi.acm.org/10.1145/512529.512534
+//
+// The original implementation constructs a data structure they call a dominance
+// forest for this purpose. The dominance forest was shown to be unnecessary,
+// as it is possible to emulate the creation and traversal of a dominance forest
+// by directly using the dominator tree, rather than actually constructing the
+// dominance forest. This technique is explained in:
+//
+// Boissinot, et al. Revisiting Out-of-SSA Translation for Correctness, Code
+// Quality and Efficiency,
+// In Proceedings of the 7th annual IEEE/ACM International Symposium on Code
+// Generation and Optimization (Seattle, Washington, March 22 - 25, 2009).
+// CGO '09. IEEE, Washington, DC, 114-125.
+//
+// Careful implementation allows for all of the dominator forest interference
+// checks to be performed at once in a single depth-first traversal of the
+// dominator tree, which is what is implemented here.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "strongphielim"
+#include "PHIEliminationUtils.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/RegisterCoalescer.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
namespace {
- struct StrongPHIElimination : public MachineFunctionPass {
+ class StrongPHIElimination : public MachineFunctionPass {
+ public:
static char ID; // Pass identification, replacement for typeid
- StrongPHIElimination() : MachineFunctionPass(ID) {}
-
- // Waiting stores, for each MBB, the set of copies that need to
- // be inserted into that MBB
- DenseMap<MachineBasicBlock*,
- std::multimap<unsigned, unsigned> > Waiting;
-
- // Stacks holds the renaming stack for each register
- std::map<unsigned, std::vector<unsigned> > Stacks;
-
- // Registers in UsedByAnother are PHI nodes that are themselves
- // used as operands to another PHI node
- std::set<unsigned> UsedByAnother;
-
- // RenameSets are the is a map from a PHI-defined register
- // to the input registers to be coalesced along with the
- // predecessor block for those input registers.
- std::map<unsigned, std::map<unsigned, MachineBasicBlock*> > RenameSets;
-
- // PhiValueNumber holds the ID numbers of the VNs for each phi that we're
- // eliminating, indexed by the register defined by that phi.
- std::map<unsigned, unsigned> PhiValueNumber;
-
- // Store the DFS-in number of each block
- DenseMap<MachineBasicBlock*, unsigned> preorder;
-
- // Store the DFS-out number of each block
- DenseMap<MachineBasicBlock*, unsigned> maxpreorder;
-
- bool runOnMachineFunction(MachineFunction &Fn);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequired<MachineDominatorTree>();
- AU.addRequired<SlotIndexes>();
- AU.addPreserved<SlotIndexes>();
- AU.addRequired<LiveIntervals>();
-
- // TODO: Actually make this true.
- AU.addPreserved<LiveIntervals>();
- AU.addPreserved<RegisterCoalescer>();
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- virtual void releaseMemory() {
- preorder.clear();
- maxpreorder.clear();
-
- Waiting.clear();
- Stacks.clear();
- UsedByAnother.clear();
- RenameSets.clear();
+ StrongPHIElimination() : MachineFunctionPass(ID) {
+ initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
}
+ virtual void getAnalysisUsage(AnalysisUsage&) const;
+ bool runOnMachineFunction(MachineFunction&);
+
private:
-
- /// DomForestNode - Represents a node in the "dominator forest". This is
- /// a forest in which the nodes represent registers and the edges
- /// represent a dominance relation in the block defining those registers.
- struct DomForestNode {
- private:
- // Store references to our children
- std::vector<DomForestNode*> children;
- // The register we represent
- unsigned reg;
-
- // Add another node as our child
- void addChild(DomForestNode* DFN) { children.push_back(DFN); }
-
- public:
- typedef std::vector<DomForestNode*>::iterator iterator;
-
- // Create a DomForestNode by providing the register it represents, and
- // the node to be its parent. The virtual root node has register 0
- // and a null parent.
- DomForestNode(unsigned r, DomForestNode* parent) : reg(r) {
- if (parent)
- parent->addChild(this);
- }
-
- ~DomForestNode() {
- for (iterator I = begin(), E = end(); I != E; ++I)
- delete *I;
- }
-
- /// getReg - Return the regiser that this node represents
- inline unsigned getReg() { return reg; }
-
- // Provide iterator access to our children
- inline DomForestNode::iterator begin() { return children.begin(); }
- inline DomForestNode::iterator end() { return children.end(); }
+ /// This struct represents a single node in the union-find data structure
+ /// representing the variable congruence classes. There is one difference
+ /// from a normal union-find data structure. We steal two bits from the parent
+ /// pointer . One of these bits is used to represent whether the register
+ /// itself has been isolated, and the other is used to represent whether the
+ /// PHI with that register as its destination has been isolated.
+ ///
+ /// Note that this leads to the strange situation where the leader of a
+ /// congruence class may no longer logically be a member, due to being
+ /// isolated.
+ struct Node {
+ enum Flags {
+ kRegisterIsolatedFlag = 1,
+ kPHIIsolatedFlag = 2
+ };
+ Node(unsigned v) : value(v), rank(0) { parent.setPointer(this); }
+
+ Node *getLeader();
+
+ PointerIntPair<Node*, 2> parent;
+ unsigned value;
+ unsigned rank;
};
-
- void computeDFS(MachineFunction& MF);
- void processBlock(MachineBasicBlock* MBB);
-
- std::vector<DomForestNode*> computeDomForest(
- std::map<unsigned, MachineBasicBlock*>& instrs,
- MachineRegisterInfo& MRI);
- void processPHIUnion(MachineInstr* Inst,
- std::map<unsigned, MachineBasicBlock*>& PHIUnion,
- std::vector<StrongPHIElimination::DomForestNode*>& DF,
- std::vector<std::pair<unsigned, unsigned> >& locals);
- void ScheduleCopies(MachineBasicBlock* MBB, std::set<unsigned>& pushed);
- void InsertCopies(MachineDomTreeNode* MBB,
- SmallPtrSet<MachineBasicBlock*, 16>& v);
- bool mergeLiveIntervals(unsigned primary, unsigned secondary);
- };
-}
-char StrongPHIElimination::ID = 0;
-INITIALIZE_PASS(StrongPHIElimination, "strong-phi-node-elimination",
- "Eliminate PHI nodes for register allocation, intelligently", false, false);
+ /// Add a register in a new congruence class containing only itself.
+ void addReg(unsigned);
-char &llvm::StrongPHIEliminationID = StrongPHIElimination::ID;
+ /// Join the congruence classes of two registers. This function is biased
+ /// towards the left argument, i.e. after
+ ///
+ /// addReg(r2);
+ /// unionRegs(r1, r2);
+ ///
+ /// the leader of the unioned congruence class is the same as the leader of
+ /// r1's congruence class prior to the union. This is actually relied upon
+ /// in the copy insertion code.
+ void unionRegs(unsigned, unsigned);
-/// computeDFS - Computes the DFS-in and DFS-out numbers of the dominator tree
-/// of the given MachineFunction. These numbers are then used in other parts
-/// of the PHI elimination process.
-void StrongPHIElimination::computeDFS(MachineFunction& MF) {
- SmallPtrSet<MachineDomTreeNode*, 8> frontier;
- SmallPtrSet<MachineDomTreeNode*, 8> visited;
-
- unsigned time = 0;
-
- MachineDominatorTree& DT = getAnalysis<MachineDominatorTree>();
-
- MachineDomTreeNode* node = DT.getRootNode();
-
- std::vector<MachineDomTreeNode*> worklist;
- worklist.push_back(node);
-
- while (!worklist.empty()) {
- MachineDomTreeNode* currNode = worklist.back();
-
- if (!frontier.count(currNode)) {
- frontier.insert(currNode);
- ++time;
- preorder.insert(std::make_pair(currNode->getBlock(), time));
- }
-
- bool inserted = false;
- for (MachineDomTreeNode::iterator I = currNode->begin(), E = currNode->end();
- I != E; ++I)
- if (!frontier.count(*I) && !visited.count(*I)) {
- worklist.push_back(*I);
- inserted = true;
- break;
- }
-
- if (!inserted) {
- frontier.erase(currNode);
- visited.insert(currNode);
- maxpreorder.insert(std::make_pair(currNode->getBlock(), time));
-
- worklist.pop_back();
+ /// Get the color of a register. The color is 0 if the register has been
+ /// isolated.
+ unsigned getRegColor(unsigned);
+
+ // Isolate a register.
+ void isolateReg(unsigned);
+
+ /// Get the color of a PHI. The color of a PHI is 0 if the PHI has been
+ /// isolated. Otherwise, it is the original color of its destination and
+ /// all of its operands (before they were isolated, if they were).
+ unsigned getPHIColor(MachineInstr*);
+
+ /// Isolate a PHI.
+ void isolatePHI(MachineInstr*);
+
+ /// Traverses a basic block, splitting any interferences found between
+ /// registers in the same congruence class. It takes two DenseMaps as
+ /// arguments that it also updates: CurrentDominatingParent, which maps
+ /// a color to the register in that congruence class whose definition was
+ /// most recently seen, and ImmediateDominatingParent, which maps a register
+ /// to the register in the same congruence class that most immediately
+ /// dominates it.
+ ///
+ /// This function assumes that it is being called in a depth-first traversal
+ /// of the dominator tree.
+ void SplitInterferencesForBasicBlock(
+ MachineBasicBlock&,
+ DenseMap<unsigned, unsigned> &CurrentDominatingParent,
+ DenseMap<unsigned, unsigned> &ImmediateDominatingParent);
+
+ // Lowers a PHI instruction, inserting copies of the source and destination
+ // registers as necessary.
+ void InsertCopiesForPHI(MachineInstr*, MachineBasicBlock*);
+
+ // Merges the live interval of Reg into NewReg and renames Reg to NewReg
+ // everywhere that Reg appears. Requires Reg and NewReg to have non-
+ // overlapping lifetimes.
+ void MergeLIsAndRename(unsigned Reg, unsigned NewReg);
+
+ MachineRegisterInfo *MRI;
+ const TargetInstrInfo *TII;
+ MachineDominatorTree *DT;
+ LiveIntervals *LI;
+
+ BumpPtrAllocator Allocator;
+
+ DenseMap<unsigned, Node*> RegNodeMap;
+
+ // Maps a basic block to a list of its defs of registers that appear as PHI
+ // sources.
+ DenseMap<MachineBasicBlock*, std::vector<MachineInstr*> > PHISrcDefs;
+
+ // Maps a color to a pair of a MachineInstr* and a virtual register, which
+ // is the operand of that PHI corresponding to the current basic block.
+ DenseMap<unsigned, std::pair<MachineInstr*, unsigned> > CurrentPHIForColor;
+
+ // FIXME: Can these two data structures be combined? Would a std::multimap
+ // be any better?
+
+ // Stores pairs of predecessor basic blocks and the source registers of
+ // inserted copy instructions.
+ typedef DenseSet<std::pair<MachineBasicBlock*, unsigned> > SrcCopySet;
+ SrcCopySet InsertedSrcCopySet;
+
+ // Maps pairs of predecessor basic blocks and colors to their defining copy
+ // instructions.
+ typedef DenseMap<std::pair<MachineBasicBlock*, unsigned>, MachineInstr*>
+ SrcCopyMap;
+ SrcCopyMap InsertedSrcCopyMap;
+
+ // Maps inserted destination copy registers to their defining copy
+ // instructions.
+ typedef DenseMap<unsigned, MachineInstr*> DestCopyMap;
+ DestCopyMap InsertedDestCopies;
+ };
+
+ struct MIIndexCompare {
+ MIIndexCompare(LiveIntervals *LiveIntervals) : LI(LiveIntervals) { }
+
+ bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
+ return LI->getInstructionIndex(LHS) < LI->getInstructionIndex(RHS);
}
- }
-}
-namespace {
+ LiveIntervals *LI;
+ };
+} // namespace
-/// PreorderSorter - a helper class that is used to sort registers
-/// according to the preorder number of their defining blocks
-class PreorderSorter {
-private:
- DenseMap<MachineBasicBlock*, unsigned>& preorder;
- MachineRegisterInfo& MRI;
-
-public:
- PreorderSorter(DenseMap<MachineBasicBlock*, unsigned>& p,
- MachineRegisterInfo& M) : preorder(p), MRI(M) { }
-
- bool operator()(unsigned A, unsigned B) {
- if (A == B)
- return false;
-
- MachineBasicBlock* ABlock = MRI.getVRegDef(A)->getParent();
- MachineBasicBlock* BBlock = MRI.getVRegDef(B)->getParent();
-
- if (preorder[ABlock] < preorder[BBlock])
- return true;
- else if (preorder[ABlock] > preorder[BBlock])
- return false;
-
- return false;
- }
-};
+STATISTIC(NumPHIsLowered, "Number of PHIs lowered");
+STATISTIC(NumDestCopiesInserted, "Number of destination copies inserted");
+STATISTIC(NumSrcCopiesInserted, "Number of source copies inserted");
+char StrongPHIElimination::ID = 0;
+INITIALIZE_PASS_BEGIN(StrongPHIElimination, "strong-phi-node-elimination",
+ "Eliminate PHI nodes for register allocation, intelligently", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_END(StrongPHIElimination, "strong-phi-node-elimination",
+ "Eliminate PHI nodes for register allocation, intelligently", false, false)
+
+char &llvm::StrongPHIEliminationID = StrongPHIElimination::ID;
+
+void StrongPHIElimination::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
+ MachineFunctionPass::getAnalysisUsage(AU);
}
-/// computeDomForest - compute the subforest of the DomTree corresponding
-/// to the defining blocks of the registers in question
-std::vector<StrongPHIElimination::DomForestNode*>
-StrongPHIElimination::computeDomForest(
- std::map<unsigned, MachineBasicBlock*>& regs,
- MachineRegisterInfo& MRI) {
- // Begin by creating a virtual root node, since the actual results
- // may well be a forest. Assume this node has maximum DFS-out number.
- DomForestNode* VirtualRoot = new DomForestNode(0, 0);
- maxpreorder.insert(std::make_pair((MachineBasicBlock*)0, ~0UL));
-
- // Populate a worklist with the registers
- std::vector<unsigned> worklist;
- worklist.reserve(regs.size());
- for (std::map<unsigned, MachineBasicBlock*>::iterator I = regs.begin(),
- E = regs.end(); I != E; ++I)
- worklist.push_back(I->first);
-
- // Sort the registers by the DFS-in number of their defining block
- PreorderSorter PS(preorder, MRI);
- std::sort(worklist.begin(), worklist.end(), PS);
-
- // Create a "current parent" stack, and put the virtual root on top of it
- DomForestNode* CurrentParent = VirtualRoot;
- std::vector<DomForestNode*> stack;
- stack.push_back(VirtualRoot);
-
- // Iterate over all the registers in the previously computed order
- for (std::vector<unsigned>::iterator I = worklist.begin(), E = worklist.end();
- I != E; ++I) {
- unsigned pre = preorder[MRI.getVRegDef(*I)->getParent()];
- MachineBasicBlock* parentBlock = CurrentParent->getReg() ?
- MRI.getVRegDef(CurrentParent->getReg())->getParent() :
- 0;
-
- // If the DFS-in number of the register is greater than the DFS-out number
- // of the current parent, repeatedly pop the parent stack until it isn't.
- while (pre > maxpreorder[parentBlock]) {
- stack.pop_back();
- CurrentParent = stack.back();
-
- parentBlock = CurrentParent->getReg() ?
- MRI.getVRegDef(CurrentParent->getReg())->getParent() :
- 0;
+static MachineOperand *findLastUse(MachineBasicBlock *MBB, unsigned Reg) {
+ // FIXME: This only needs to check from the first terminator, as only the
+ // first terminator can use a virtual register.
+ for (MachineBasicBlock::reverse_iterator RI = MBB->rbegin(); ; ++RI) {
+ assert (RI != MBB->rend());
+ MachineInstr *MI = &*RI;
+
+ for (MachineInstr::mop_iterator OI = MI->operands_begin(),
+ OE = MI->operands_end(); OI != OE; ++OI) {
+ MachineOperand &MO = *OI;
+ if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
+ return &MO;
}
-
- // Now that we've found the appropriate parent, create a DomForestNode for
- // this register and attach it to the forest
- DomForestNode* child = new DomForestNode(*I, CurrentParent);
-
- // Push this new node on the "current parent" stack
- stack.push_back(child);
- CurrentParent = child;
}
-
- // Return a vector containing the children of the virtual root node
- std::vector<DomForestNode*> ret;
- ret.insert(ret.end(), VirtualRoot->begin(), VirtualRoot->end());
- return ret;
+ return NULL;
}
-/// isLiveIn - helper method that determines, from a regno, if a register
-/// is live into a block
-static bool isLiveIn(unsigned r, MachineBasicBlock* MBB,
- LiveIntervals& LI) {
- LiveInterval& I = LI.getOrCreateInterval(r);
- SlotIndex idx = LI.getMBBStartIdx(MBB);
- return I.liveAt(idx);
-}
+bool StrongPHIElimination::runOnMachineFunction(MachineFunction &MF) {
+ MRI = &MF.getRegInfo();
+ TII = MF.getTarget().getInstrInfo();
+ DT = &getAnalysis<MachineDominatorTree>();
+ LI = &getAnalysis<LiveIntervals>();
-/// isLiveOut - help method that determines, from a regno, if a register is
-/// live out of a block.
-static bool isLiveOut(unsigned r, MachineBasicBlock* MBB,
- LiveIntervals& LI) {
- for (MachineBasicBlock::succ_iterator PI = MBB->succ_begin(),
- E = MBB->succ_end(); PI != E; ++PI)
- if (isLiveIn(r, *PI, LI))
- return true;
-
- return false;
-}
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end();
+ I != E; ++I) {
+ for (MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
+ BBI != BBE && BBI->isPHI(); ++BBI) {
+ unsigned DestReg = BBI->getOperand(0).getReg();
+ addReg(DestReg);
+ PHISrcDefs[I].push_back(BBI);
-/// interferes - checks for local interferences by scanning a block. The only
-/// trick parameter is 'mode' which tells it the relationship of the two
-/// registers. 0 - defined in the same block, 1 - first properly dominates
-/// second, 2 - second properly dominates first
-static bool interferes(unsigned a, unsigned b, MachineBasicBlock* scan,
- LiveIntervals& LV, unsigned mode) {
- MachineInstr* def = 0;
- MachineInstr* kill = 0;
-
- // The code is still in SSA form at this point, so there is only one
- // definition per VReg. Thus we can safely use MRI->getVRegDef().
- const MachineRegisterInfo* MRI = &scan->getParent()->getRegInfo();
-
- bool interference = false;
-
- // Wallk the block, checking for interferences
- for (MachineBasicBlock::iterator MBI = scan->begin(), MBE = scan->end();
- MBI != MBE; ++MBI) {
- MachineInstr* curr = MBI;
-
- // Same defining block...
- if (mode == 0) {
- if (curr == MRI->getVRegDef(a)) {
- // If we find our first definition, save it
- if (!def) {
- def = curr;
- // If there's already an unkilled definition, then
- // this is an interference
- } else if (!kill) {
- interference = true;
- break;
- // If there's a definition followed by a KillInst, then
- // they can't interfere
- } else {
- interference = false;
- break;
- }
- // Symmetric with the above
- } else if (curr == MRI->getVRegDef(b)) {
- if (!def) {
- def = curr;
- } else if (!kill) {
- interference = true;
- break;
- } else {
- interference = false;
- break;
- }
- // Store KillInsts if they match up with the definition
- } else if (curr->killsRegister(a)) {
- if (def == MRI->getVRegDef(a)) {
- kill = curr;
- } else if (curr->killsRegister(b)) {
- if (def == MRI->getVRegDef(b)) {
- kill = curr;
- }
- }
- }
- // First properly dominates second...
- } else if (mode == 1) {
- if (curr == MRI->getVRegDef(b)) {
- // Definition of second without kill of first is an interference
- if (!kill) {
- interference = true;
- break;
- // Definition after a kill is a non-interference
- } else {
- interference = false;
- break;
- }
- // Save KillInsts of First
- } else if (curr->killsRegister(a)) {
- kill = curr;
- }
- // Symmetric with the above
- } else if (mode == 2) {
- if (curr == MRI->getVRegDef(a)) {
- if (!kill) {
- interference = true;
- break;
- } else {
- interference = false;
- break;
- }
- } else if (curr->killsRegister(b)) {
- kill = curr;
+ for (unsigned i = 1; i < BBI->getNumOperands(); i += 2) {
+ MachineOperand &SrcMO = BBI->getOperand(i);
+ unsigned SrcReg = SrcMO.getReg();
+ addReg(SrcReg);
+ unionRegs(DestReg, SrcReg);
+
+ MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
+ if (DefMI)
+ PHISrcDefs[DefMI->getParent()].push_back(DefMI);
}
}
}
-
- return interference;
-}
-/// processBlock - Determine how to break up PHIs in the current block. Each
-/// PHI is broken up by some combination of renaming its operands and inserting
-/// copies. This method is responsible for determining which operands receive
-/// which treatment.
-void StrongPHIElimination::processBlock(MachineBasicBlock* MBB) {
- LiveIntervals& LI = getAnalysis<LiveIntervals>();
- MachineRegisterInfo& MRI = MBB->getParent()->getRegInfo();
-
- // Holds names that have been added to a set in any PHI within this block
- // before the current one.
- std::set<unsigned> ProcessedNames;
-
- // Iterate over all the PHI nodes in this block
- MachineBasicBlock::iterator P = MBB->begin();
- while (P != MBB->end() && P->isPHI()) {
- unsigned DestReg = P->getOperand(0).getReg();
-
- // Don't both doing PHI elimination for dead PHI's.
- if (P->registerDefIsDead(DestReg)) {
- ++P;
- continue;
- }
+ // Perform a depth-first traversal of the dominator tree, splitting
+ // interferences amongst PHI-congruence classes.
+ DenseMap<unsigned, unsigned> CurrentDominatingParent;
+ DenseMap<unsigned, unsigned> ImmediateDominatingParent;
+ for (df_iterator<MachineDomTreeNode*> DI = df_begin(DT->getRootNode()),
+ DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
+ SplitInterferencesForBasicBlock(*DI->getBlock(),
+ CurrentDominatingParent,
+ ImmediateDominatingParent);
+ }
- LiveInterval& PI = LI.getOrCreateInterval(DestReg);
- SlotIndex pIdx = LI.getInstructionIndex(P).getDefIndex();
- VNInfo* PVN = PI.getLiveRangeContaining(pIdx)->valno;
- PhiValueNumber.insert(std::make_pair(DestReg, PVN->id));
-
- // PHIUnion is the set of incoming registers to the PHI node that
- // are going to be renames rather than having copies inserted. This set
- // is refinded over the course of this function. UnionedBlocks is the set
- // of corresponding MBBs.
- std::map<unsigned, MachineBasicBlock*> PHIUnion;
- SmallPtrSet<MachineBasicBlock*, 8> UnionedBlocks;
-
- // Iterate over the operands of the PHI node
- for (int i = P->getNumOperands() - 1; i >= 2; i-=2) {
- unsigned SrcReg = P->getOperand(i-1).getReg();
-
- // Don't need to try to coalesce a register with itself.
- if (SrcReg == DestReg) {
- ProcessedNames.insert(SrcReg);
- continue;
- }
-
- // We don't need to insert copies for implicit_defs.
- MachineInstr* DefMI = MRI.getVRegDef(SrcReg);
- if (DefMI->isImplicitDef())
- ProcessedNames.insert(SrcReg);
-
- // Check for trivial interferences via liveness information, allowing us
- // to avoid extra work later. Any registers that interfere cannot both
- // be in the renaming set, so choose one and add copies for it instead.
- // The conditions are:
- // 1) if the operand is live into the PHI node's block OR
- // 2) if the PHI node is live out of the operand's defining block OR
- // 3) if the operand is itself a PHI node and the original PHI is
- // live into the operand's defining block OR
- // 4) if the operand is already being renamed for another PHI node
- // in this block OR
- // 5) if any two operands are defined in the same block, insert copies
- // for one of them
- if (isLiveIn(SrcReg, P->getParent(), LI) ||
- isLiveOut(P->getOperand(0).getReg(),
- MRI.getVRegDef(SrcReg)->getParent(), LI) ||
- ( MRI.getVRegDef(SrcReg)->isPHI() &&
- isLiveIn(P->getOperand(0).getReg(),
- MRI.getVRegDef(SrcReg)->getParent(), LI) ) ||
- ProcessedNames.count(SrcReg) ||
- UnionedBlocks.count(MRI.getVRegDef(SrcReg)->getParent())) {
-
- // Add a copy for the selected register
- MachineBasicBlock* From = P->getOperand(i).getMBB();
- Waiting[From].insert(std::make_pair(SrcReg, DestReg));
- UsedByAnother.insert(SrcReg);
- } else {
- // Otherwise, add it to the renaming set
- PHIUnion.insert(std::make_pair(SrcReg,P->getOperand(i).getMBB()));
- UnionedBlocks.insert(MRI.getVRegDef(SrcReg)->getParent());
- }
+ // Insert copies for all PHI source and destination registers.
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end();
+ I != E; ++I) {
+ for (MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
+ BBI != BBE && BBI->isPHI(); ++BBI) {
+ InsertCopiesForPHI(BBI, I);
}
-
- // Compute the dominator forest for the renaming set. This is a forest
- // where the nodes are the registers and the edges represent dominance
- // relations between the defining blocks of the registers
- std::vector<StrongPHIElimination::DomForestNode*> DF =
- computeDomForest(PHIUnion, MRI);
-
- // Walk DomForest to resolve interferences at an inter-block level. This
- // will remove registers from the renaming set (and insert copies for them)
- // if interferences are found.
- std::vector<std::pair<unsigned, unsigned> > localInterferences;
- processPHIUnion(P, PHIUnion, DF, localInterferences);
-
- // If one of the inputs is defined in the same block as the current PHI
- // then we need to check for a local interference between that input and
- // the PHI.
- for (std::map<unsigned, MachineBasicBlock*>::iterator I = PHIUnion.begin(),
- E = PHIUnion.end(); I != E; ++I)
- if (MRI.getVRegDef(I->first)->getParent() == P->getParent())
- localInterferences.push_back(std::make_pair(I->first,
- P->getOperand(0).getReg()));
-
- // The dominator forest walk may have returned some register pairs whose
- // interference cannot be determined from dominator analysis. We now
- // examine these pairs for local interferences.
- for (std::vector<std::pair<unsigned, unsigned> >::iterator I =
- localInterferences.begin(), E = localInterferences.end(); I != E; ++I) {
- std::pair<unsigned, unsigned> p = *I;
-
- MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
-
- // Determine the block we need to scan and the relationship between
- // the two registers
- MachineBasicBlock* scan = 0;
- unsigned mode = 0;
- if (MRI.getVRegDef(p.first)->getParent() ==
- MRI.getVRegDef(p.second)->getParent()) {
- scan = MRI.getVRegDef(p.first)->getParent();
- mode = 0; // Same block
- } else if (MDT.dominates(MRI.getVRegDef(p.first)->getParent(),
- MRI.getVRegDef(p.second)->getParent())) {
- scan = MRI.getVRegDef(p.second)->getParent();
- mode = 1; // First dominates second
- } else {
- scan = MRI.getVRegDef(p.first)->getParent();
- mode = 2; // Second dominates first
- }
-
- // If there's an interference, we need to insert copies
- if (interferes(p.first, p.second, scan, LI, mode)) {
- // Insert copies for First
- for (int i = P->getNumOperands() - 1; i >= 2; i-=2) {
- if (P->getOperand(i-1).getReg() == p.first) {
- unsigned SrcReg = p.first;
- MachineBasicBlock* From = P->getOperand(i).getMBB();
-
- Waiting[From].insert(std::make_pair(SrcReg,
- P->getOperand(0).getReg()));
- UsedByAnother.insert(SrcReg);
-
- PHIUnion.erase(SrcReg);
- }
- }
+ }
+
+ // FIXME: Preserve the equivalence classes during copy insertion and use
+ // the preversed equivalence classes instead of recomputing them.
+ RegNodeMap.clear();
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end();
+ I != E; ++I) {
+ for (MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
+ BBI != BBE && BBI->isPHI(); ++BBI) {
+ unsigned DestReg = BBI->getOperand(0).getReg();
+ addReg(DestReg);
+
+ for (unsigned i = 1; i < BBI->getNumOperands(); i += 2) {
+ unsigned SrcReg = BBI->getOperand(i).getReg();
+ addReg(SrcReg);
+ unionRegs(DestReg, SrcReg);
}
}
-
- // Add the renaming set for this PHI node to our overall renaming information
- for (std::map<unsigned, MachineBasicBlock*>::iterator QI = PHIUnion.begin(),
- QE = PHIUnion.end(); QI != QE; ++QI) {
- DEBUG(dbgs() << "Adding Renaming: " << QI->first << " -> "
- << P->getOperand(0).getReg() << "\n");
- }
-
- RenameSets.insert(std::make_pair(P->getOperand(0).getReg(), PHIUnion));
-
- // Remember which registers are already renamed, so that we don't try to
- // rename them for another PHI node in this block
- for (std::map<unsigned, MachineBasicBlock*>::iterator I = PHIUnion.begin(),
- E = PHIUnion.end(); I != E; ++I)
- ProcessedNames.insert(I->first);
-
- ++P;
}
-}
-/// processPHIUnion - Take a set of candidate registers to be coalesced when
-/// decomposing the PHI instruction. Use the DominanceForest to remove the ones
-/// that are known to interfere, and flag others that need to be checked for
-/// local interferences.
-void StrongPHIElimination::processPHIUnion(MachineInstr* Inst,
- std::map<unsigned, MachineBasicBlock*>& PHIUnion,
- std::vector<StrongPHIElimination::DomForestNode*>& DF,
- std::vector<std::pair<unsigned, unsigned> >& locals) {
-
- std::vector<DomForestNode*> worklist(DF.begin(), DF.end());
- SmallPtrSet<DomForestNode*, 4> visited;
-
- // Code is still in SSA form, so we can use MRI::getVRegDef()
- MachineRegisterInfo& MRI = Inst->getParent()->getParent()->getRegInfo();
-
- LiveIntervals& LI = getAnalysis<LiveIntervals>();
- unsigned DestReg = Inst->getOperand(0).getReg();
-
- // DF walk on the DomForest
- while (!worklist.empty()) {
- DomForestNode* DFNode = worklist.back();
-
- visited.insert(DFNode);
-
- bool inserted = false;
- for (DomForestNode::iterator CI = DFNode->begin(), CE = DFNode->end();
- CI != CE; ++CI) {
- DomForestNode* child = *CI;
-
- // If the current node is live-out of the defining block of one of its
- // children, insert a copy for it. NOTE: The paper actually calls for
- // a more elaborate heuristic for determining whether to insert copies
- // for the child or the parent. In the interest of simplicity, we're
- // just always choosing the parent.
- if (isLiveOut(DFNode->getReg(),
- MRI.getVRegDef(child->getReg())->getParent(), LI)) {
- // Insert copies for parent
- for (int i = Inst->getNumOperands() - 1; i >= 2; i-=2) {
- if (Inst->getOperand(i-1).getReg() == DFNode->getReg()) {
- unsigned SrcReg = DFNode->getReg();
- MachineBasicBlock* From = Inst->getOperand(i).getMBB();
-
- Waiting[From].insert(std::make_pair(SrcReg, DestReg));
- UsedByAnother.insert(SrcReg);
-
- PHIUnion.erase(SrcReg);
- }
- }
-
- // If a node is live-in to the defining block of one of its children, but
- // not live-out, then we need to scan that block for local interferences.
- } else if (isLiveIn(DFNode->getReg(),
- MRI.getVRegDef(child->getReg())->getParent(), LI) ||
- MRI.getVRegDef(DFNode->getReg())->getParent() ==
- MRI.getVRegDef(child->getReg())->getParent()) {
- // Add (p, c) to possible local interferences
- locals.push_back(std::make_pair(DFNode->getReg(), child->getReg()));
+ DenseMap<unsigned, unsigned> RegRenamingMap;
+ bool Changed = false;
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end();
+ I != E; ++I) {
+ MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
+ while (BBI != BBE && BBI->isPHI()) {
+ MachineInstr *PHI = BBI;
+
+ assert(PHI->getNumOperands() > 0);
+
+ unsigned SrcReg = PHI->getOperand(1).getReg();
+ unsigned SrcColor = getRegColor(SrcReg);
+ unsigned NewReg = RegRenamingMap[SrcColor];
+ if (!NewReg) {
+ NewReg = SrcReg;
+ RegRenamingMap[SrcColor] = SrcReg;
}
-
- if (!visited.count(child)) {
- worklist.push_back(child);
- inserted = true;
+ MergeLIsAndRename(SrcReg, NewReg);
+
+ unsigned DestReg = PHI->getOperand(0).getReg();
+ if (!InsertedDestCopies.count(DestReg))
+ MergeLIsAndRename(DestReg, NewReg);
+
+ for (unsigned i = 3; i < PHI->getNumOperands(); i += 2) {
+ unsigned SrcReg = PHI->getOperand(i).getReg();
+ MergeLIsAndRename(SrcReg, NewReg);
}
+
+ ++BBI;
+ LI->RemoveMachineInstrFromMaps(PHI);
+ PHI->eraseFromParent();
+ Changed = true;
}
-
- if (!inserted) worklist.pop_back();
}
-}
-/// ScheduleCopies - Insert copies into predecessor blocks, scheduling
-/// them properly so as to avoid the 'lost copy' and the 'virtual swap'
-/// problems.
-///
-/// Based on "Practical Improvements to the Construction and Destruction
-/// of Static Single Assignment Form" by Briggs, et al.
-void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
- std::set<unsigned>& pushed) {
- // FIXME: This function needs to update LiveIntervals
- std::multimap<unsigned, unsigned>& copy_set= Waiting[MBB];
-
- std::multimap<unsigned, unsigned> worklist;
- std::map<unsigned, unsigned> map;
-
- // Setup worklist of initial copies
- for (std::multimap<unsigned, unsigned>::iterator I = copy_set.begin(),
- E = copy_set.end(); I != E; ) {
- map.insert(std::make_pair(I->first, I->first));
- map.insert(std::make_pair(I->second, I->second));
-
- if (!UsedByAnother.count(I->second)) {
- worklist.insert(*I);
-
- // Avoid iterator invalidation
- std::multimap<unsigned, unsigned>::iterator OI = I;
- ++I;
- copy_set.erase(OI);
- } else {
- ++I;
+ // Due to the insertion of copies to split live ranges, the live intervals are
+ // guaranteed to not overlap, except in one case: an original PHI source and a
+ // PHI destination copy. In this case, they have the same value and thus don't
+ // truly intersect, so we merge them into the value live at that point.
+ // FIXME: Is there some better way we can handle this?
+ for (DestCopyMap::iterator I = InsertedDestCopies.begin(),
+ E = InsertedDestCopies.end(); I != E; ++I) {
+ unsigned DestReg = I->first;
+ unsigned DestColor = getRegColor(DestReg);
+ unsigned NewReg = RegRenamingMap[DestColor];
+
+ LiveInterval &DestLI = LI->getInterval(DestReg);
+ LiveInterval &NewLI = LI->getInterval(NewReg);
+
+ assert(DestLI.ranges.size() == 1
+ && "PHI destination copy's live interval should be a single live "
+ "range from the beginning of the BB to the copy instruction.");
+ LiveRange *DestLR = DestLI.begin();
+ VNInfo *NewVNI = NewLI.getVNInfoAt(DestLR->start);
+ if (!NewVNI) {
+ NewVNI = NewLI.createValueCopy(DestLR->valno, LI->getVNInfoAllocator());
+ MachineInstr *CopyInstr = I->second;
+ CopyInstr->getOperand(1).setIsKill(true);
}
+
+ LiveRange NewLR(DestLR->start, DestLR->end, NewVNI);
+ NewLI.addRange(NewLR);
+
+ LI->removeInterval(DestReg);
+ MRI->replaceRegWith(DestReg, NewReg);
}
-
- LiveIntervals& LI = getAnalysis<LiveIntervals>();
- MachineFunction* MF = MBB->getParent();
- MachineRegisterInfo& MRI = MF->getRegInfo();
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
-
- SmallVector<std::pair<unsigned, MachineInstr*>, 4> InsertedPHIDests;
-
- // Iterate over the worklist, inserting copies
- while (!worklist.empty() || !copy_set.empty()) {
- while (!worklist.empty()) {
- std::multimap<unsigned, unsigned>::iterator WI = worklist.begin();
- std::pair<unsigned, unsigned> curr = *WI;
- worklist.erase(WI);
-
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(curr.first);
-
- if (isLiveOut(curr.second, MBB, LI)) {
- // Create a temporary
- unsigned t = MF->getRegInfo().createVirtualRegister(RC);
-
- // Insert copy from curr.second to a temporary at
- // the Phi defining curr.second
- MachineBasicBlock::iterator PI = MRI.getVRegDef(curr.second);
- BuildMI(*PI->getParent(), PI, DebugLoc(), TII->get(TargetOpcode::COPY),
- t).addReg(curr.second);
- DEBUG(dbgs() << "Inserted copy from " << curr.second << " to " << t
- << "\n");
-
- // Push temporary on Stacks
- Stacks[curr.second].push_back(t);
-
- // Insert curr.second in pushed
- pushed.insert(curr.second);
-
- // Create a live interval for this temporary
- InsertedPHIDests.push_back(std::make_pair(t, --PI));
- }
-
- // Insert copy from map[curr.first] to curr.second
- BuildMI(*MBB, MBB->getFirstTerminator(), DebugLoc(),
- TII->get(TargetOpcode::COPY), curr.second).addReg(map[curr.first]);
- map[curr.first] = curr.second;
- DEBUG(dbgs() << "Inserted copy from " << curr.first << " to "
- << curr.second << "\n");
-
- // Push this copy onto InsertedPHICopies so we can
- // update LiveIntervals with it.
- MachineBasicBlock::iterator MI = MBB->getFirstTerminator();
- InsertedPHIDests.push_back(std::make_pair(curr.second, --MI));
-
- // If curr.first is a destination in copy_set...
- for (std::multimap<unsigned, unsigned>::iterator I = copy_set.begin(),
- E = copy_set.end(); I != E; )
- if (curr.first == I->second) {
- std::pair<unsigned, unsigned> temp = *I;
- worklist.insert(temp);
-
- // Avoid iterator invalidation
- std::multimap<unsigned, unsigned>::iterator OI = I;
- ++I;
- copy_set.erase(OI);
-
- break;
- } else {
- ++I;
- }
- }
-
- if (!copy_set.empty()) {
- std::multimap<unsigned, unsigned>::iterator CI = copy_set.begin();
- std::pair<unsigned, unsigned> curr = *CI;
- worklist.insert(curr);
- copy_set.erase(CI);
-
- LiveInterval& I = LI.getInterval(curr.second);
- MachineBasicBlock::iterator term = MBB->getFirstTerminator();
- SlotIndex endIdx = SlotIndex();
- if (term != MBB->end())
- endIdx = LI.getInstructionIndex(term);
- else
- endIdx = LI.getMBBEndIdx(MBB);
-
- if (I.liveAt(endIdx)) {
- const TargetRegisterClass *RC =
- MF->getRegInfo().getRegClass(curr.first);
-
- // Insert a copy from dest to a new temporary t at the end of b
- unsigned t = MF->getRegInfo().createVirtualRegister(RC);
- BuildMI(*MBB, MBB->getFirstTerminator(), DebugLoc(),
- TII->get(TargetOpcode::COPY), t).addReg(curr.second);
- map[curr.second] = t;
-
- MachineBasicBlock::iterator TI = MBB->getFirstTerminator();
- InsertedPHIDests.push_back(std::make_pair(t, --TI));
+
+ // Adjust the live intervals of all PHI source registers to handle the case
+ // where the PHIs in successor blocks were the only later uses of the source
+ // register.
+ for (SrcCopySet::iterator I = InsertedSrcCopySet.begin(),
+ E = InsertedSrcCopySet.end(); I != E; ++I) {
+ MachineBasicBlock *MBB = I->first;
+ unsigned SrcReg = I->second;
+ if (unsigned RenamedRegister = RegRenamingMap[getRegColor(SrcReg)])
+ SrcReg = RenamedRegister;
+
+ LiveInterval &SrcLI = LI->getInterval(SrcReg);
+
+ bool isLiveOut = false;
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end(); SI != SE; ++SI) {
+ if (SrcLI.liveAt(LI->getMBBStartIdx(*SI))) {
+ isLiveOut = true;
+ break;
}
}
+
+ if (isLiveOut)
+ continue;
+
+ MachineOperand *LastUse = findLastUse(MBB, SrcReg);
+ assert(LastUse);
+ SlotIndex LastUseIndex = LI->getInstructionIndex(LastUse->getParent());
+ SrcLI.removeRange(LastUseIndex.getDefIndex(), LI->getMBBEndIdx(MBB));
+ LastUse->setIsKill(true);
}
-
- // Renumber the instructions so that we can perform the index computations
- // needed to create new live intervals.
- LI.renumber();
-
- // For copies that we inserted at the ends of predecessors, we construct
- // live intervals. This is pretty easy, since we know that the destination
- // register cannot have be in live at that point previously. We just have
- // to make sure that, for registers that serve as inputs to more than one
- // PHI, we don't create multiple overlapping live intervals.
- std::set<unsigned> RegHandled;
- for (SmallVector<std::pair<unsigned, MachineInstr*>, 4>::iterator I =
- InsertedPHIDests.begin(), E = InsertedPHIDests.end(); I != E; ++I) {
- if (RegHandled.insert(I->first).second) {
- LiveInterval& Int = LI.getOrCreateInterval(I->first);
- SlotIndex instrIdx = LI.getInstructionIndex(I->second);
- if (Int.liveAt(instrIdx.getDefIndex()))
- Int.removeRange(instrIdx.getDefIndex(),
- LI.getMBBEndIdx(I->second->getParent()).getNextSlot(),
- true);
-
- LiveRange R = LI.addLiveRangeToEndOfBlock(I->first, I->second);
- R.valno->setCopy(I->second);
- R.valno->def = LI.getInstructionIndex(I->second).getDefIndex();
- }
+
+ LI->renumber();
+
+ Allocator.Reset();
+ RegNodeMap.clear();
+ PHISrcDefs.clear();
+ InsertedSrcCopySet.clear();
+ InsertedSrcCopyMap.clear();
+ InsertedDestCopies.clear();
+
+ return Changed;
+}
+
+void StrongPHIElimination::addReg(unsigned Reg) {
+ if (RegNodeMap.count(Reg))
+ return;
+ RegNodeMap[Reg] = new (Allocator) Node(Reg);
+}
+
+StrongPHIElimination::Node*
+StrongPHIElimination::Node::getLeader() {
+ Node *N = this;
+ Node *Parent = parent.getPointer();
+ Node *Grandparent = Parent->parent.getPointer();
+
+ while (Parent != Grandparent) {
+ N->parent.setPointer(Grandparent);
+ N = Grandparent;
+ Parent = Parent->parent.getPointer();
+ Grandparent = Parent->parent.getPointer();
}
+
+ return Parent;
}
-/// InsertCopies - insert copies into MBB and all of its successors
-void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
- SmallPtrSet<MachineBasicBlock*, 16>& visited) {
- MachineBasicBlock* MBB = MDTN->getBlock();
- visited.insert(MBB);
-
- std::set<unsigned> pushed;
-
- LiveIntervals& LI = getAnalysis<LiveIntervals>();
- // Rewrite register uses from Stacks
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
- if (I->isPHI())
- continue;
-
- for (unsigned i = 0; i < I->getNumOperands(); ++i)
- if (I->getOperand(i).isReg() &&
- Stacks[I->getOperand(i).getReg()].size()) {
- // Remove the live range for the old vreg.
- LiveInterval& OldInt = LI.getInterval(I->getOperand(i).getReg());
- LiveInterval::iterator OldLR =
- OldInt.FindLiveRangeContaining(LI.getInstructionIndex(I).getUseIndex());
- if (OldLR != OldInt.end())
- OldInt.removeRange(*OldLR, true);
-
- // Change the register
- I->getOperand(i).setReg(Stacks[I->getOperand(i).getReg()].back());
-
- // Add a live range for the new vreg
- LiveInterval& Int = LI.getInterval(I->getOperand(i).getReg());
- VNInfo* FirstVN = *Int.vni_begin();
- FirstVN->setHasPHIKill(false);
- LiveRange LR (LI.getMBBStartIdx(I->getParent()),
- LI.getInstructionIndex(I).getUseIndex().getNextSlot(),
- FirstVN);
-
- Int.addRange(LR);
- }
- }
-
- // Schedule the copies for this block
- ScheduleCopies(MBB, pushed);
-
- // Recur down the dominator tree.
- for (MachineDomTreeNode::iterator I = MDTN->begin(),
- E = MDTN->end(); I != E; ++I)
- if (!visited.count((*I)->getBlock()))
- InsertCopies(*I, visited);
-
- // As we exit this block, pop the names we pushed while processing it
- for (std::set<unsigned>::iterator I = pushed.begin(),
- E = pushed.end(); I != E; ++I)
- Stacks[*I].pop_back();
+unsigned StrongPHIElimination::getRegColor(unsigned Reg) {
+ DenseMap<unsigned, Node*>::iterator RI = RegNodeMap.find(Reg);
+ if (RI == RegNodeMap.end())
+ return 0;
+ Node *Node = RI->second;
+ if (Node->parent.getInt() & Node::kRegisterIsolatedFlag)
+ return 0;
+ return Node->getLeader()->value;
}
-bool StrongPHIElimination::mergeLiveIntervals(unsigned primary,
- unsigned secondary) {
-
- LiveIntervals& LI = getAnalysis<LiveIntervals>();
- LiveInterval& LHS = LI.getOrCreateInterval(primary);
- LiveInterval& RHS = LI.getOrCreateInterval(secondary);
-
- LI.renumber();
-
- DenseMap<VNInfo*, VNInfo*> VNMap;
- for (LiveInterval::iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
- LiveRange R = *I;
-
- SlotIndex Start = R.start;
- SlotIndex End = R.end;
- if (LHS.getLiveRangeContaining(Start))
- return false;
-
- if (LHS.getLiveRangeContaining(End))
- return false;
-
- LiveInterval::iterator RI = std::upper_bound(LHS.begin(), LHS.end(), R);
- if (RI != LHS.end() && RI->start < End)
- return false;
+void StrongPHIElimination::unionRegs(unsigned Reg1, unsigned Reg2) {
+ Node *Node1 = RegNodeMap[Reg1]->getLeader();
+ Node *Node2 = RegNodeMap[Reg2]->getLeader();
+
+ if (Node1->rank > Node2->rank) {
+ Node2->parent.setPointer(Node1->getLeader());
+ } else if (Node1->rank < Node2->rank) {
+ Node1->parent.setPointer(Node2->getLeader());
+ } else if (Node1 != Node2) {
+ Node2->parent.setPointer(Node1->getLeader());
+ Node1->rank++;
}
-
- for (LiveInterval::iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
- LiveRange R = *I;
- VNInfo* OldVN = R.valno;
- VNInfo*& NewVN = VNMap[OldVN];
- if (!NewVN) {
- NewVN = LHS.createValueCopy(OldVN, LI.getVNInfoAllocator());
- }
-
- LiveRange LR (R.start, R.end, NewVN);
- LHS.addRange(LR);
+}
+
+void StrongPHIElimination::isolateReg(unsigned Reg) {
+ Node *Node = RegNodeMap[Reg];
+ Node->parent.setInt(Node->parent.getInt() | Node::kRegisterIsolatedFlag);
+}
+
+unsigned StrongPHIElimination::getPHIColor(MachineInstr *PHI) {
+ assert(PHI->isPHI());
+
+ unsigned DestReg = PHI->getOperand(0).getReg();
+ Node *DestNode = RegNodeMap[DestReg];
+ if (DestNode->parent.getInt() & Node::kPHIIsolatedFlag)
+ return 0;
+
+ for (unsigned i = 1; i < PHI->getNumOperands(); i += 2) {
+ unsigned SrcColor = getRegColor(PHI->getOperand(i).getReg());
+ if (SrcColor)
+ return SrcColor;
}
-
- LI.removeInterval(RHS.reg);
-
- return true;
+ return 0;
}
-bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
- LiveIntervals& LI = getAnalysis<LiveIntervals>();
-
- // Compute DFS numbers of each block
- computeDFS(Fn);
-
- // Determine which phi node operands need copies
- for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
- if (!I->empty() && I->begin()->isPHI())
- processBlock(I);
-
- // Break interferences where two different phis want to coalesce
- // in the same register.
- std::set<unsigned> seen;
- typedef std::map<unsigned, std::map<unsigned, MachineBasicBlock*> >
- RenameSetType;
- for (RenameSetType::iterator I = RenameSets.begin(), E = RenameSets.end();
- I != E; ++I) {
- for (std::map<unsigned, MachineBasicBlock*>::iterator
- OI = I->second.begin(), OE = I->second.end(); OI != OE; ) {
- if (!seen.count(OI->first)) {
- seen.insert(OI->first);
- ++OI;
+void StrongPHIElimination::isolatePHI(MachineInstr *PHI) {
+ assert(PHI->isPHI());
+ Node *Node = RegNodeMap[PHI->getOperand(0).getReg()];
+ Node->parent.setInt(Node->parent.getInt() | Node::kPHIIsolatedFlag);
+}
+
+/// SplitInterferencesForBasicBlock - traverses a basic block, splitting any
+/// interferences found between registers in the same congruence class. It
+/// takes two DenseMaps as arguments that it also updates:
+///
+/// 1) CurrentDominatingParent, which maps a color to the register in that
+/// congruence class whose definition was most recently seen.
+///
+/// 2) ImmediateDominatingParent, which maps a register to the register in the
+/// same congruence class that most immediately dominates it.
+///
+/// This function assumes that it is being called in a depth-first traversal
+/// of the dominator tree.
+///
+/// The algorithm used here is a generalization of the dominance-based SSA test
+/// for two variables. If there are variables a_1, ..., a_n such that
+///
+/// def(a_1) dom ... dom def(a_n),
+///
+/// then we can test for an interference between any two a_i by only using O(n)
+/// interference tests between pairs of variables. If i < j and a_i and a_j
+/// interfere, then a_i is alive at def(a_j), so it is also alive at def(a_i+1).
+/// Thus, in order to test for an interference involving a_i, we need only check
+/// for a potential interference with a_i+1.
+///
+/// This method can be generalized to arbitrary sets of variables by performing
+/// a depth-first traversal of the dominator tree. As we traverse down a branch
+/// of the dominator tree, we keep track of the current dominating variable and
+/// only perform an interference test with that variable. However, when we go to
+/// another branch of the dominator tree, the definition of the current dominating
+/// variable may no longer dominate the current block. In order to correct this,
+/// we need to use a stack of past choices of the current dominating variable
+/// and pop from this stack until we find a variable whose definition actually
+/// dominates the current block.
+///
+/// There will be one push on this stack for each variable that has become the
+/// current dominating variable, so instead of using an explicit stack we can
+/// simply associate the previous choice for a current dominating variable with
+/// the new choice. This works better in our implementation, where we test for
+/// interference in multiple distinct sets at once.
+void
+StrongPHIElimination::SplitInterferencesForBasicBlock(
+ MachineBasicBlock &MBB,
+ DenseMap<unsigned, unsigned> &CurrentDominatingParent,
+ DenseMap<unsigned, unsigned> &ImmediateDominatingParent) {
+ // Sort defs by their order in the original basic block, as the code below
+ // assumes that it is processing definitions in dominance order.
+ std::vector<MachineInstr*> &DefInstrs = PHISrcDefs[&MBB];
+ std::sort(DefInstrs.begin(), DefInstrs.end(), MIIndexCompare(LI));
+
+ for (std::vector<MachineInstr*>::const_iterator BBI = DefInstrs.begin(),
+ BBE = DefInstrs.end(); BBI != BBE; ++BBI) {
+ for (MachineInstr::const_mop_iterator I = (*BBI)->operands_begin(),
+ E = (*BBI)->operands_end(); I != E; ++I) {
+ const MachineOperand &MO = *I;
+
+ // FIXME: This would be faster if it were possible to bail out of checking
+ // an instruction's operands after the explicit defs, but this is incorrect
+ // for variadic instructions, which may appear before register allocation
+ // in the future.
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+
+ unsigned DestReg = MO.getReg();
+ if (!DestReg || !TargetRegisterInfo::isVirtualRegister(DestReg))
+ continue;
+
+ // If the virtual register being defined is not used in any PHI or has
+ // already been isolated, then there are no more interferences to check.
+ unsigned DestColor = getRegColor(DestReg);
+ if (!DestColor)
+ continue;
+
+ // The input to this pass sometimes is not in SSA form in every basic
+ // block, as some virtual registers have redefinitions. We could eliminate
+ // this by fixing the passes that generate the non-SSA code, or we could
+ // handle it here by tracking defining machine instructions rather than
+ // virtual registers. For now, we just handle the situation conservatively
+ // in a way that will possibly lead to false interferences.
+ unsigned &CurrentParent = CurrentDominatingParent[DestColor];
+ unsigned NewParent = CurrentParent;
+ if (NewParent == DestReg)
+ continue;
+
+ // Pop registers from the stack represented by ImmediateDominatingParent
+ // until we find a parent that dominates the current instruction.
+ while (NewParent && (!DT->dominates(MRI->getVRegDef(NewParent), *BBI)
+ || !getRegColor(NewParent)))
+ NewParent = ImmediateDominatingParent[NewParent];
+
+ // If NewParent is nonzero, then its definition dominates the current
+ // instruction, so it is only necessary to check for the liveness of
+ // NewParent in order to check for an interference.
+ if (NewParent
+ && LI->getInterval(NewParent).liveAt(LI->getInstructionIndex(*BBI))) {
+ // If there is an interference, always isolate the new register. This
+ // could be improved by using a heuristic that decides which of the two
+ // registers to isolate.
+ isolateReg(DestReg);
+ CurrentParent = NewParent;
} else {
- Waiting[OI->second].insert(std::make_pair(OI->first, I->first));
- unsigned reg = OI->first;
- ++OI;
- I->second.erase(reg);
- DEBUG(dbgs() << "Removing Renaming: " << reg << " -> " << I->first
- << "\n");
+ // If there is no interference, update ImmediateDominatingParent and set
+ // the CurrentDominatingParent for this color to the current register.
+ ImmediateDominatingParent[DestReg] = NewParent;
+ CurrentParent = DestReg;
}
}
}
-
- // Insert copies
- // FIXME: This process should probably preserve LiveIntervals
- SmallPtrSet<MachineBasicBlock*, 16> visited;
- MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
- InsertCopies(MDT.getRootNode(), visited);
-
- // Perform renaming
- for (RenameSetType::iterator I = RenameSets.begin(), E = RenameSets.end();
- I != E; ++I)
- while (I->second.size()) {
- std::map<unsigned, MachineBasicBlock*>::iterator SI = I->second.begin();
-
- DEBUG(dbgs() << "Renaming: " << SI->first << " -> " << I->first << "\n");
-
- if (SI->first != I->first) {
- if (mergeLiveIntervals(I->first, SI->first)) {
- Fn.getRegInfo().replaceRegWith(SI->first, I->first);
-
- if (RenameSets.count(SI->first)) {
- I->second.insert(RenameSets[SI->first].begin(),
- RenameSets[SI->first].end());
- RenameSets.erase(SI->first);
- }
- } else {
- // Insert a last-minute copy if a conflict was detected.
- const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
- BuildMI(*SI->second, SI->second->getFirstTerminator(), DebugLoc(),
- TII->get(TargetOpcode::COPY), I->first).addReg(SI->first);
-
- LI.renumber();
-
- LiveInterval& Int = LI.getOrCreateInterval(I->first);
- SlotIndex instrIdx =
- LI.getInstructionIndex(--SI->second->getFirstTerminator());
- if (Int.liveAt(instrIdx.getDefIndex()))
- Int.removeRange(instrIdx.getDefIndex(),
- LI.getMBBEndIdx(SI->second).getNextSlot(), true);
-
- LiveRange R = LI.addLiveRangeToEndOfBlock(I->first,
- --SI->second->getFirstTerminator());
- R.valno->setCopy(--SI->second->getFirstTerminator());
- R.valno->def = instrIdx.getDefIndex();
-
- DEBUG(dbgs() << "Renaming failed: " << SI->first << " -> "
- << I->first << "\n");
- }
+
+ // We now walk the PHIs in successor blocks and check for interferences. This
+ // is necesary because the use of a PHI's operands are logically contained in
+ // the predecessor block. The def of a PHI's destination register is processed
+ // along with the other defs in a basic block.
+
+ CurrentPHIForColor.clear();
+
+ for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
+ SE = MBB.succ_end(); SI != SE; ++SI) {
+ for (MachineBasicBlock::iterator BBI = (*SI)->begin(), BBE = (*SI)->end();
+ BBI != BBE && BBI->isPHI(); ++BBI) {
+ MachineInstr *PHI = BBI;
+
+ // If a PHI is already isolated, either by being isolated directly or
+ // having all of its operands isolated, ignore it.
+ unsigned Color = getPHIColor(PHI);
+ if (!Color)
+ continue;
+
+ // Find the index of the PHI operand that corresponds to this basic block.
+ unsigned PredIndex;
+ for (PredIndex = 1; PredIndex < PHI->getNumOperands(); PredIndex += 2) {
+ if (PHI->getOperand(PredIndex + 1).getMBB() == &MBB)
+ break;
}
-
- LiveInterval& Int = LI.getOrCreateInterval(I->first);
- const LiveRange* LR =
- Int.getLiveRangeContaining(LI.getMBBEndIdx(SI->second));
- LR->valno->setHasPHIKill(true);
-
- I->second.erase(SI->first);
+ assert(PredIndex < PHI->getNumOperands());
+ unsigned PredOperandReg = PHI->getOperand(PredIndex).getReg();
+
+ // Pop registers from the stack represented by ImmediateDominatingParent
+ // until we find a parent that dominates the current instruction.
+ unsigned &CurrentParent = CurrentDominatingParent[Color];
+ unsigned NewParent = CurrentParent;
+ while (NewParent
+ && (!DT->dominates(MRI->getVRegDef(NewParent)->getParent(), &MBB)
+ || !getRegColor(NewParent)))
+ NewParent = ImmediateDominatingParent[NewParent];
+ CurrentParent = NewParent;
+
+ // If there is an interference with a register, always isolate the
+ // register rather than the PHI. It is also possible to isolate the
+ // PHI, but that introduces copies for all of the registers involved
+ // in that PHI.
+ if (NewParent && LI->isLiveOutOfMBB(LI->getInterval(NewParent), &MBB)
+ && NewParent != PredOperandReg)
+ isolateReg(NewParent);
+
+ std::pair<MachineInstr*, unsigned>
+ &CurrentPHI = CurrentPHIForColor[Color];
+
+ // If two PHIs have the same operand from every shared predecessor, then
+ // they don't actually interfere. Otherwise, isolate the current PHI. This
+ // could possibly be improved, e.g. we could isolate the PHI with the
+ // fewest operands.
+ if (CurrentPHI.first && CurrentPHI.second != PredOperandReg)
+ isolatePHI(PHI);
+ else
+ CurrentPHI = std::make_pair(PHI, PredOperandReg);
}
-
- // Remove PHIs
- std::vector<MachineInstr*> phis;
- for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
- for (MachineBasicBlock::iterator BI = I->begin(), BE = I->end();
- BI != BE; ++BI)
- if (BI->isPHI())
- phis.push_back(BI);
}
-
- for (std::vector<MachineInstr*>::iterator I = phis.begin(), E = phis.end();
- I != E; ) {
- MachineInstr* PInstr = *(I++);
-
- // If this is a dead PHI node, then remove it from LiveIntervals.
- unsigned DestReg = PInstr->getOperand(0).getReg();
- LiveInterval& PI = LI.getInterval(DestReg);
- if (PInstr->registerDefIsDead(DestReg)) {
- if (PI.containsOneValue()) {
- LI.removeInterval(DestReg);
+}
+
+void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI,
+ MachineBasicBlock *MBB) {
+ assert(PHI->isPHI());
+ ++NumPHIsLowered;
+ unsigned PHIColor = getPHIColor(PHI);
+
+ for (unsigned i = 1; i < PHI->getNumOperands(); i += 2) {
+ MachineOperand &SrcMO = PHI->getOperand(i);
+
+ // If a source is defined by an implicit def, there is no need to insert a
+ // copy in the predecessor.
+ if (SrcMO.isUndef())
+ continue;
+
+ unsigned SrcReg = SrcMO.getReg();
+ assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
+ "Machine PHI Operands must all be virtual registers!");
+
+ MachineBasicBlock *PredBB = PHI->getOperand(i + 1).getMBB();
+ unsigned SrcColor = getRegColor(SrcReg);
+
+ // If neither the PHI nor the operand were isolated, then we only need to
+ // set the phi-kill flag on the VNInfo at this PHI.
+ if (PHIColor && SrcColor == PHIColor) {
+ LiveInterval &SrcInterval = LI->getInterval(SrcReg);
+ SlotIndex PredIndex = LI->getMBBEndIdx(PredBB);
+ VNInfo *SrcVNI = SrcInterval.getVNInfoAt(PredIndex.getPrevIndex());
+ assert(SrcVNI);
+ SrcVNI->setHasPHIKill(true);
+ continue;
+ }
+
+ unsigned CopyReg = 0;
+ if (PHIColor) {
+ SrcCopyMap::const_iterator I
+ = InsertedSrcCopyMap.find(std::make_pair(PredBB, PHIColor));
+ CopyReg
+ = I != InsertedSrcCopyMap.end() ? I->second->getOperand(0).getReg() : 0;
+ }
+
+ if (!CopyReg) {
+ const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
+ CopyReg = MRI->createVirtualRegister(RC);
+
+ MachineBasicBlock::iterator
+ CopyInsertPoint = findPHICopyInsertPoint(PredBB, MBB, SrcReg);
+ unsigned SrcSubReg = SrcMO.getSubReg();
+ MachineInstr *CopyInstr = BuildMI(*PredBB,
+ CopyInsertPoint,
+ PHI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ CopyReg).addReg(SrcReg, 0, SrcSubReg);
+ LI->InsertMachineInstrInMaps(CopyInstr);
+ ++NumSrcCopiesInserted;
+
+ // addLiveRangeToEndOfBlock() also adds the phikill flag to the VNInfo for
+ // the newly added range.
+ LI->addLiveRangeToEndOfBlock(CopyReg, CopyInstr);
+ InsertedSrcCopySet.insert(std::make_pair(PredBB, SrcReg));
+
+ addReg(CopyReg);
+ if (PHIColor) {
+ unionRegs(PHIColor, CopyReg);
+ assert(getRegColor(CopyReg) != CopyReg);
} else {
- SlotIndex idx = LI.getInstructionIndex(PInstr).getDefIndex();
- PI.removeRange(*PI.getLiveRangeContaining(idx), true);
- }
- } else {
- // Trim live intervals of input registers. They are no longer live into
- // this block if they died after the PHI. If they lived after it, don't
- // trim them because they might have other legitimate uses.
- for (unsigned i = 1; i < PInstr->getNumOperands(); i += 2) {
- unsigned reg = PInstr->getOperand(i).getReg();
-
- MachineBasicBlock* MBB = PInstr->getOperand(i+1).getMBB();
- LiveInterval& InputI = LI.getInterval(reg);
- if (MBB != PInstr->getParent() &&
- InputI.liveAt(LI.getMBBStartIdx(PInstr->getParent())) &&
- InputI.expiredAt(LI.getInstructionIndex(PInstr).getNextIndex()))
- InputI.removeRange(LI.getMBBStartIdx(PInstr->getParent()),
- LI.getInstructionIndex(PInstr),
- true);
+ PHIColor = CopyReg;
+ assert(getRegColor(CopyReg) == CopyReg);
}
-
- // If the PHI is not dead, then the valno defined by the PHI
- // now has an unknown def.
- SlotIndex idx = LI.getInstructionIndex(PInstr).getDefIndex();
- const LiveRange* PLR = PI.getLiveRangeContaining(idx);
- PLR->valno->setIsPHIDef(true);
- LiveRange R (LI.getMBBStartIdx(PInstr->getParent()),
- PLR->start, PLR->valno);
- PI.addRange(R);
+
+ if (!InsertedSrcCopyMap.count(std::make_pair(PredBB, PHIColor)))
+ InsertedSrcCopyMap[std::make_pair(PredBB, PHIColor)] = CopyInstr;
}
-
- LI.RemoveMachineInstrFromMaps(PInstr);
- PInstr->eraseFromParent();
+
+ SrcMO.setReg(CopyReg);
+
+ // If SrcReg is not live beyond the PHI, trim its interval so that it is no
+ // longer live-in to MBB. Note that SrcReg may appear in other PHIs that are
+ // processed later, but this is still correct to do at this point because we
+ // never rely on LiveIntervals being correct while inserting copies.
+ // FIXME: Should this just count uses at PHIs like the normal PHIElimination
+ // pass does?
+ LiveInterval &SrcLI = LI->getInterval(SrcReg);
+ SlotIndex MBBStartIndex = LI->getMBBStartIdx(MBB);
+ SlotIndex PHIIndex = LI->getInstructionIndex(PHI);
+ SlotIndex NextInstrIndex = PHIIndex.getNextIndex();
+ if (SrcLI.liveAt(MBBStartIndex) && SrcLI.expiredAt(NextInstrIndex))
+ SrcLI.removeRange(MBBStartIndex, PHIIndex, true);
}
-
- LI.renumber();
-
- return true;
+
+ unsigned DestReg = PHI->getOperand(0).getReg();
+ unsigned DestColor = getRegColor(DestReg);
+
+ if (PHIColor && DestColor == PHIColor) {
+ LiveInterval &DestLI = LI->getInterval(DestReg);
+
+ // Set the phi-def flag for the VN at this PHI.
+ SlotIndex PHIIndex = LI->getInstructionIndex(PHI);
+ VNInfo *DestVNI = DestLI.getVNInfoAt(PHIIndex.getDefIndex());
+ assert(DestVNI);
+ DestVNI->setIsPHIDef(true);
+
+ // Prior to PHI elimination, the live ranges of PHIs begin at their defining
+ // instruction. After PHI elimination, PHI instructions are replaced by VNs
+ // with the phi-def flag set, and the live ranges of these VNs start at the
+ // beginning of the basic block.
+ SlotIndex MBBStartIndex = LI->getMBBStartIdx(MBB);
+ DestVNI->def = MBBStartIndex;
+ DestLI.addRange(LiveRange(MBBStartIndex,
+ PHIIndex.getDefIndex(),
+ DestVNI));
+ return;
+ }
+
+ const TargetRegisterClass *RC = MRI->getRegClass(DestReg);
+ unsigned CopyReg = MRI->createVirtualRegister(RC);
+
+ MachineInstr *CopyInstr = BuildMI(*MBB,
+ MBB->SkipPHIsAndLabels(MBB->begin()),
+ PHI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ DestReg).addReg(CopyReg);
+ LI->InsertMachineInstrInMaps(CopyInstr);
+ PHI->getOperand(0).setReg(CopyReg);
+ ++NumDestCopiesInserted;
+
+ // Add the region from the beginning of MBB to the copy instruction to
+ // CopyReg's live interval, and give the VNInfo the phidef flag.
+ LiveInterval &CopyLI = LI->getOrCreateInterval(CopyReg);
+ SlotIndex MBBStartIndex = LI->getMBBStartIdx(MBB);
+ SlotIndex DestCopyIndex = LI->getInstructionIndex(CopyInstr);
+ VNInfo *CopyVNI = CopyLI.getNextValue(MBBStartIndex,
+ CopyInstr,
+ LI->getVNInfoAllocator());
+ CopyVNI->setIsPHIDef(true);
+ CopyLI.addRange(LiveRange(MBBStartIndex,
+ DestCopyIndex.getDefIndex(),
+ CopyVNI));
+
+ // Adjust DestReg's live interval to adjust for its new definition at
+ // CopyInstr.
+ LiveInterval &DestLI = LI->getOrCreateInterval(DestReg);
+ SlotIndex PHIIndex = LI->getInstructionIndex(PHI);
+ DestLI.removeRange(PHIIndex.getDefIndex(), DestCopyIndex.getDefIndex());
+
+ VNInfo *DestVNI = DestLI.getVNInfoAt(DestCopyIndex.getDefIndex());
+ assert(DestVNI);
+ DestVNI->def = DestCopyIndex.getDefIndex();
+
+ InsertedDestCopies[CopyReg] = CopyInstr;
+}
+
+void StrongPHIElimination::MergeLIsAndRename(unsigned Reg, unsigned NewReg) {
+ if (Reg == NewReg)
+ return;
+
+ LiveInterval &OldLI = LI->getInterval(Reg);
+ LiveInterval &NewLI = LI->getInterval(NewReg);
+
+ // Merge the live ranges of the two registers.
+ DenseMap<VNInfo*, VNInfo*> VNMap;
+ for (LiveInterval::iterator LRI = OldLI.begin(), LRE = OldLI.end();
+ LRI != LRE; ++LRI) {
+ LiveRange OldLR = *LRI;
+ VNInfo *OldVN = OldLR.valno;
+
+ VNInfo *&NewVN = VNMap[OldVN];
+ if (!NewVN) {
+ NewVN = NewLI.createValueCopy(OldVN, LI->getVNInfoAllocator());
+ VNMap[OldVN] = NewVN;
+ }
+
+ LiveRange LR(OldLR.start, OldLR.end, NewVN);
+ NewLI.addRange(LR);
+ }
+
+ // Remove the LiveInterval for the register being renamed and replace all
+ // of its defs and uses with the new register.
+ LI->removeInterval(Reg);
+ MRI->replaceRegWith(Reg, NewReg);
}
diff --git a/contrib/llvm/lib/CodeGen/TailDuplication.cpp b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
index a815b36..04d3d31 100644
--- a/contrib/llvm/lib/CodeGen/TailDuplication.cpp
+++ b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
@@ -350,7 +350,7 @@ void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
- if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg))
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (MO.isDef()) {
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
@@ -459,15 +459,19 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, MachineFunction &MF,
// duplicate only one, because one branch instruction can be eliminated to
// compensate for the duplication.
unsigned MaxDuplicateCount;
- if (MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
+ if (TailDuplicateSize.getNumOccurrences() == 0 &&
+ MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
MaxDuplicateCount = 1;
else
MaxDuplicateCount = TailDuplicateSize;
if (PreRegAlloc) {
- // Pre-regalloc tail duplication hurts compile time and doesn't help
- // much except for indirect branches.
- if (TailBB->empty() || !TailBB->back().getDesc().isIndirectBranch())
+ if (TailBB->empty())
+ return false;
+ const TargetInstrDesc &TID = TailBB->back().getDesc();
+ // Pre-regalloc tail duplication hurts compile time and doesn't help
+ // much except for indirect branches and returns.
+ if (!TID.isIndirectBranch() && !TID.isReturn())
return false;
// If the target has hardware branch prediction that can handle indirect
// branches, duplicating them can often make them predictable when there
@@ -500,9 +504,10 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, MachineFunction &MF,
if (!I->isPHI() && !I->isDebugValue())
InstrCount += 1;
}
- // Heuristically, don't tail-duplicate calls if it would expand code size,
- // as it's less likely to be worth the extra cost.
- if (InstrCount > 1 && HasCall)
+ // Don't tail-duplicate calls before register allocation. Calls presents a
+ // barrier to register allocation so duplicating them may end up increasing
+ // spills.
+ if (InstrCount > 1 && (PreRegAlloc && HasCall))
return false;
DEBUG(dbgs() << "\n*** Tail-duplicating BB#" << TailBB->getNumber() << '\n');
diff --git a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
index 6e4a0d8..15340a3 100644
--- a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@@ -22,13 +22,18 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PostRAHazardRecognizer.h"
+#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+static cl::opt<bool> DisableHazardRecognizer(
+ "disable-sched-hazard", cl::Hidden, cl::init(false),
+ cl::desc("Disable hazard detection during preRA scheduling"));
+
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
/// after it, replacing it with an unconditional branch to NewDest.
void
@@ -135,7 +140,7 @@ bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI,
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isPredicable())
return false;
-
+
for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (TID.OpInfo[i].isPredicate()) {
MachineOperand &MO = MI->getOperand(i);
@@ -166,8 +171,10 @@ void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB,
MBB.insert(I, MI);
}
-bool TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1) const {
+bool
+TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0,
+ const MachineInstr *MI1,
+ const MachineRegisterInfo *MRI) const {
return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
}
@@ -252,9 +259,9 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
const MachineFrameInfo &MFI = *MF.getFrameInfo();
assert(MFI.getObjectOffset(FI) != -1);
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- Flags, /*Offset=*/0,
- MFI.getObjectSize(FI),
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ Flags, MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
NewMI->addMemOperand(MF, MMO);
@@ -329,8 +336,13 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
const TargetInstrDesc &TID = MI->getDesc();
// Avoid instructions obviously unsafe for remat.
- if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable() ||
- TID.mayStore())
+ if (TID.isNotDuplicable() || TID.mayStore() ||
+ MI->hasUnmodeledSideEffects())
+ return false;
+
+ // Don't remat inline asm. We have no idea how expensive it is
+ // even if it's side effect free.
+ if (MI->isInlineAsm())
return false;
// Avoid instructions which load from potentially varying memory.
@@ -414,8 +426,24 @@ bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
return false;
}
+// Provide a global flag for disabling the PreRA hazard recognizer that targets
+// may choose to honor.
+bool TargetInstrInfoImpl::usePreRAHazardRecognizer() const {
+ return !DisableHazardRecognizer;
+}
+
+// Default implementation of CreateTargetRAHazardRecognizer.
+ScheduleHazardRecognizer *TargetInstrInfoImpl::
+CreateTargetHazardRecognizer(const TargetMachine *TM,
+ const ScheduleDAG *DAG) const {
+ // Dummy hazard recognizer allows all instructions to issue.
+ return new ScheduleHazardRecognizer();
+}
+
// Default implementation of CreateTargetPostRAHazardRecognizer.
ScheduleHazardRecognizer *TargetInstrInfoImpl::
-CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {
- return (ScheduleHazardRecognizer *)new PostRAHazardRecognizer(II);
+CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
+ const ScheduleDAG *DAG) const {
+ return (ScheduleHazardRecognizer *)
+ new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
}
diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index f1e10ee..0b7bd98 100644
--- a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -29,10 +29,12 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
using namespace llvm;
using namespace dwarf;
@@ -45,81 +47,81 @@ void TargetLoweringObjectFileELF::Initialize(MCContext &Ctx,
TargetLoweringObjectFile::Initialize(Ctx, TM);
BSSSection =
- getContext().getELFSection(".bss", MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".bss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE |ELF::SHF_ALLOC,
SectionKind::getBSS());
TextSection =
- getContext().getELFSection(".text", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_EXECINSTR |
- MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".text", ELF::SHT_PROGBITS,
+ ELF::SHF_EXECINSTR |
+ ELF::SHF_ALLOC,
SectionKind::getText());
DataSection =
- getContext().getELFSection(".data", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".data", ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE |ELF::SHF_ALLOC,
SectionKind::getDataRel());
ReadOnlySection =
- getContext().getELFSection(".rodata", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".rodata", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC,
SectionKind::getReadOnly());
TLSDataSection =
- getContext().getELFSection(".tdata", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_TLS |
- MCSectionELF::SHF_WRITE,
+ getContext().getELFSection(".tdata", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_TLS |
+ ELF::SHF_WRITE,
SectionKind::getThreadData());
TLSBSSSection =
- getContext().getELFSection(".tbss", MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_TLS |
- MCSectionELF::SHF_WRITE,
+ getContext().getELFSection(".tbss", ELF::SHT_NOBITS,
+ ELF::SHF_ALLOC | ELF::SHF_TLS |
+ ELF::SHF_WRITE,
SectionKind::getThreadBSS());
DataRelSection =
- getContext().getELFSection(".data.rel", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ getContext().getELFSection(".data.rel", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
SectionKind::getDataRel());
DataRelLocalSection =
- getContext().getELFSection(".data.rel.local", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ getContext().getELFSection(".data.rel.local", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
SectionKind::getDataRelLocal());
DataRelROSection =
- getContext().getELFSection(".data.rel.ro", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ getContext().getELFSection(".data.rel.ro", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
SectionKind::getReadOnlyWithRel());
DataRelROLocalSection =
- getContext().getELFSection(".data.rel.ro.local", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ getContext().getELFSection(".data.rel.ro.local", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
SectionKind::getReadOnlyWithRelLocal());
MergeableConst4Section =
- getContext().getELFSection(".rodata.cst4", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_MERGE,
+ getContext().getELFSection(".rodata.cst4", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_MERGE,
SectionKind::getMergeableConst4());
MergeableConst8Section =
- getContext().getELFSection(".rodata.cst8", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_MERGE,
+ getContext().getELFSection(".rodata.cst8", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_MERGE,
SectionKind::getMergeableConst8());
MergeableConst16Section =
- getContext().getELFSection(".rodata.cst16", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_MERGE,
+ getContext().getELFSection(".rodata.cst16", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_MERGE,
SectionKind::getMergeableConst16());
StaticCtorSection =
- getContext().getELFSection(".ctors", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ getContext().getELFSection(".ctors", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
SectionKind::getDataRel());
StaticDtorSection =
- getContext().getELFSection(".dtors", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ getContext().getELFSection(".dtors", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
SectionKind::getDataRel());
// Exception Handling Sections.
@@ -129,50 +131,50 @@ void TargetLoweringObjectFileELF::Initialize(MCContext &Ctx,
// runtime hit for C++ apps. Either the contents of the LSDA need to be
// adjusted or this should be a data section.
LSDASection =
- getContext().getELFSection(".gcc_except_table", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".gcc_except_table", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC,
SectionKind::getReadOnly());
- EHFrameSection =
- getContext().getELFSection(".eh_frame", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
- SectionKind::getDataRel());
-
// Debug Info Sections.
DwarfAbbrevSection =
- getContext().getELFSection(".debug_abbrev", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_abbrev", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfInfoSection =
- getContext().getELFSection(".debug_info", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_info", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfLineSection =
- getContext().getELFSection(".debug_line", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_line", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfFrameSection =
- getContext().getELFSection(".debug_frame", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_frame", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfPubNamesSection =
- getContext().getELFSection(".debug_pubnames", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_pubnames", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfPubTypesSection =
- getContext().getELFSection(".debug_pubtypes", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_pubtypes", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfStrSection =
- getContext().getELFSection(".debug_str", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_str", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfLocSection =
- getContext().getELFSection(".debug_loc", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_loc", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfARangesSection =
- getContext().getELFSection(".debug_aranges", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_aranges", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfRangesSection =
- getContext().getELFSection(".debug_ranges", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_ranges", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfMacroInfoSection =
- getContext().getELFSection(".debug_macinfo", MCSectionELF::SHT_PROGBITS, 0,
+ getContext().getELFSection(".debug_macinfo", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
}
+const MCSection *TargetLoweringObjectFileELF::getEHFrameSection() const {
+ return getContext().getELFSection(".eh_frame", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC,
+ SectionKind::getDataRel());
+}
static SectionKind
getELFKindForNamedSection(StringRef Name, SectionKind K) {
@@ -208,18 +210,18 @@ getELFKindForNamedSection(StringRef Name, SectionKind K) {
static unsigned getELFSectionType(StringRef Name, SectionKind K) {
if (Name == ".init_array")
- return MCSectionELF::SHT_INIT_ARRAY;
+ return ELF::SHT_INIT_ARRAY;
if (Name == ".fini_array")
- return MCSectionELF::SHT_FINI_ARRAY;
+ return ELF::SHT_FINI_ARRAY;
if (Name == ".preinit_array")
- return MCSectionELF::SHT_PREINIT_ARRAY;
+ return ELF::SHT_PREINIT_ARRAY;
if (K.isBSS() || K.isThreadBSS())
- return MCSectionELF::SHT_NOBITS;
+ return ELF::SHT_NOBITS;
- return MCSectionELF::SHT_PROGBITS;
+ return ELF::SHT_PROGBITS;
}
@@ -228,24 +230,24 @@ getELFSectionFlags(SectionKind K) {
unsigned Flags = 0;
if (!K.isMetadata())
- Flags |= MCSectionELF::SHF_ALLOC;
+ Flags |= ELF::SHF_ALLOC;
if (K.isText())
- Flags |= MCSectionELF::SHF_EXECINSTR;
+ Flags |= ELF::SHF_EXECINSTR;
if (K.isWriteable())
- Flags |= MCSectionELF::SHF_WRITE;
+ Flags |= ELF::SHF_WRITE;
if (K.isThreadLocal())
- Flags |= MCSectionELF::SHF_TLS;
+ Flags |= ELF::SHF_TLS;
// K.isMergeableConst() is left out to honour PR4650
if (K.isMergeableCString() || K.isMergeableConst4() ||
K.isMergeableConst8() || K.isMergeableConst16())
- Flags |= MCSectionELF::SHF_MERGE;
+ Flags |= ELF::SHF_MERGE;
if (K.isMergeableCString())
- Flags |= MCSectionELF::SHF_STRINGS;
+ Flags |= ELF::SHF_STRINGS;
return Flags;
}
@@ -261,23 +263,7 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
return getContext().getELFSection(SectionName,
getELFSectionType(SectionName, Kind),
- getELFSectionFlags(Kind), Kind, true);
-}
-
-static const char *getSectionPrefixForUniqueGlobal(SectionKind Kind) {
- if (Kind.isText()) return ".gnu.linkonce.t.";
- if (Kind.isReadOnly()) return ".gnu.linkonce.r.";
-
- if (Kind.isThreadData()) return ".gnu.linkonce.td.";
- if (Kind.isThreadBSS()) return ".gnu.linkonce.tb.";
-
- if (Kind.isDataNoRel()) return ".gnu.linkonce.d.";
- if (Kind.isDataRelLocal()) return ".gnu.linkonce.d.rel.local.";
- if (Kind.isDataRel()) return ".gnu.linkonce.d.rel.";
- if (Kind.isReadOnlyWithRelLocal()) return ".gnu.linkonce.d.rel.ro.local.";
-
- assert(Kind.isReadOnlyWithRel() && "Unknown section kind");
- return ".gnu.linkonce.d.rel.ro.";
+ getELFSectionFlags(Kind), Kind);
}
/// getSectionPrefixForGlobal - Return the section prefix name used by options
@@ -307,7 +293,7 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
bool EmitUniquedSection;
if (Kind.isText())
EmitUniquedSection = TM.getFunctionSections();
- else
+ else
EmitUniquedSection = TM.getDataSections();
// If this global is linkonce/weak and the target handles this by emitting it
@@ -315,19 +301,21 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
if ((GV->isWeakForLinker() || EmitUniquedSection) &&
!Kind.isCommon() && !Kind.isBSS()) {
const char *Prefix;
- if (GV->isWeakForLinker())
- Prefix = getSectionPrefixForUniqueGlobal(Kind);
- else {
- assert(EmitUniquedSection);
- Prefix = getSectionPrefixForGlobal(Kind);
- }
+ Prefix = getSectionPrefixForGlobal(Kind);
SmallString<128> Name(Prefix, Prefix+strlen(Prefix));
MCSymbol *Sym = Mang->getSymbol(GV);
Name.append(Sym->getName().begin(), Sym->getName().end());
+ StringRef Group = "";
+ unsigned Flags = getELFSectionFlags(Kind);
+ if (GV->isWeakForLinker()) {
+ Group = Sym->getName();
+ Flags |= ELF::SHF_GROUP;
+ }
+
return getContext().getELFSection(Name.str(),
getELFSectionType(Name.str(), Kind),
- getELFSectionFlags(Kind), Kind);
+ Flags, Kind, 0, Group);
}
if (Kind.isText()) return TextSection;
@@ -352,10 +340,10 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
std::string Name = SizeSpec + utostr(Align);
- return getContext().getELFSection(Name, MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |
- MCSectionELF::SHF_MERGE |
- MCSectionELF::SHF_STRINGS,
+ return getContext().getELFSection(Name, ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |
+ ELF::SHF_MERGE |
+ ELF::SHF_STRINGS,
Kind);
}
@@ -450,7 +438,16 @@ void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
IsFunctionEHSymbolGlobal = true;
IsFunctionEHFrameSymbolPrivate = false;
SupportsWeakOmittedEHFrame = false;
-
+
+ Triple T(((LLVMTargetMachine&)TM).getTargetTriple());
+ if (T.getOS() == Triple::Darwin) {
+ unsigned MajNum = T.getDarwinMajorNumber();
+ if (MajNum == 7 || MajNum == 8) // 10.3 Panther, 10.4 Tiger
+ CommDirectiveSupportsAlignment = false;
+ if (MajNum > 9) // 10.6 SnowLeopard
+ IsFunctionEHSymbolGlobal = false;
+ }
+
TargetLoweringObjectFile::Initialize(Ctx, TM);
TextSection // .text
@@ -469,20 +466,20 @@ void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
= getContext().getMachOSection("__DATA", "__thread_bss",
MCSectionMachO::S_THREAD_LOCAL_ZEROFILL,
SectionKind::getThreadBSS());
-
+
// TODO: Verify datarel below.
TLSTLVSection // .tlv
= getContext().getMachOSection("__DATA", "__thread_vars",
MCSectionMachO::S_THREAD_LOCAL_VARIABLES,
SectionKind::getDataRel());
-
+
TLSThreadInitSection
= getContext().getMachOSection("__DATA", "__thread_init",
MCSectionMachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS,
SectionKind::getDataRel());
-
+
CStringSection // .cstring
- = getContext().getMachOSection("__TEXT", "__cstring",
+ = getContext().getMachOSection("__TEXT", "__cstring",
MCSectionMachO::S_CSTRING_LITERALS,
SectionKind::getMergeable1ByteCString());
UStringSection
@@ -493,7 +490,7 @@ void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
MCSectionMachO::S_4BYTE_LITERALS,
SectionKind::getMergeableConst4());
EightByteConstantSection // .literal8
- = getContext().getMachOSection("__TEXT", "__literal8",
+ = getContext().getMachOSection("__TEXT", "__literal8",
MCSectionMachO::S_8BYTE_LITERALS,
SectionKind::getMergeableConst8());
@@ -517,14 +514,14 @@ void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
SectionKind::getText());
ConstTextCoalSection
- = getContext().getMachOSection("__TEXT", "__const_coal",
+ = getContext().getMachOSection("__TEXT", "__const_coal",
MCSectionMachO::S_COALESCED,
SectionKind::getReadOnly());
ConstDataSection // .const_data
= getContext().getMachOSection("__DATA", "__const", 0,
SectionKind::getReadOnlyWithRel());
DataCoalSection
- = getContext().getMachOSection("__DATA","__datacoal_nt",
+ = getContext().getMachOSection("__DATA","__datacoal_nt",
MCSectionMachO::S_COALESCED,
SectionKind::getDataRel());
DataCommonSection
@@ -534,7 +531,7 @@ void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
DataBSSSection
= getContext().getMachOSection("__DATA","__bss", MCSectionMachO::S_ZEROFILL,
SectionKind::getBSS());
-
+
LazySymbolPointerSection
= getContext().getMachOSection("__DATA", "__la_symbol_ptr",
@@ -566,17 +563,9 @@ void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
// Exception Handling.
LSDASection = getContext().getMachOSection("__TEXT", "__gcc_except_tab", 0,
SectionKind::getReadOnlyWithRel());
- EHFrameSection =
- getContext().getMachOSection("__TEXT", "__eh_frame",
- MCSectionMachO::S_COALESCED |
- MCSectionMachO::S_ATTR_NO_TOC |
- MCSectionMachO::S_ATTR_STRIP_STATIC_SYMS |
- MCSectionMachO::S_ATTR_LIVE_SUPPORT,
- SectionKind::getReadOnly());
-
// Debug Information.
DwarfAbbrevSection =
- getContext().getMachOSection("__DWARF", "__debug_abbrev",
+ getContext().getMachOSection("__DWARF", "__debug_abbrev",
MCSectionMachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
DwarfInfoSection =
@@ -623,10 +612,19 @@ void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
getContext().getMachOSection("__DWARF", "__debug_inlined",
MCSectionMachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
-
+
TLSExtraDataSection = TLSTLVSection;
}
+const MCSection *TargetLoweringObjectFileMachO::getEHFrameSection() const {
+ return getContext().getMachOSection("__TEXT", "__eh_frame",
+ MCSectionMachO::S_COALESCED |
+ MCSectionMachO::S_ATTR_NO_TOC |
+ MCSectionMachO::S_ATTR_STRIP_STATIC_SYMS |
+ MCSectionMachO::S_ATTR_LIVE_SUPPORT,
+ SectionKind::getReadOnly());
+}
+
const MCSection *TargetLoweringObjectFileMachO::
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const {
@@ -665,7 +663,7 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
const MCSection *TargetLoweringObjectFileMachO::
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const {
-
+
// Handle thread local data.
if (Kind.isThreadBSS()) return TLSBSSSection;
if (Kind.isThreadData()) return TLSDataSection;
@@ -685,7 +683,7 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
if (Kind.isMergeable1ByteCString() &&
TM.getTargetData()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
return CStringSection;
-
+
// Do not put 16-bit arrays in the UString section if they have an
// externally visible label, this runs into issues with certain linker
// versions.
@@ -721,7 +719,7 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
// with the .zerofill directive (aka .lcomm).
if (Kind.isBSSLocal())
return DataBSSSection;
-
+
// Otherwise, just drop the variable in the normal data section.
return DataSection;
}
@@ -858,13 +856,6 @@ void TargetLoweringObjectFileCOFF::Initialize(MCContext &Ctx,
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ,
SectionKind::getReadOnly());
- EHFrameSection =
- getContext().getCOFFSection(".eh_frame",
- COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- COFF::IMAGE_SCN_MEM_READ |
- COFF::IMAGE_SCN_MEM_WRITE,
- SectionKind::getDataRel());
-
// Debug info.
DwarfAbbrevSection =
getContext().getCOFFSection(".debug_abbrev",
@@ -928,6 +919,15 @@ void TargetLoweringObjectFileCOFF::Initialize(MCContext &Ctx,
SectionKind::getMetadata());
}
+const MCSection *TargetLoweringObjectFileCOFF::getEHFrameSection() const {
+ return getContext().getCOFFSection(".eh_frame",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
+}
+
+
static unsigned
getCOFFSectionFlags(SectionKind K) {
unsigned Flags = 0;
@@ -938,6 +938,7 @@ getCOFFSectionFlags(SectionKind K) {
else if (K.isText())
Flags |=
COFF::IMAGE_SCN_MEM_EXECUTE |
+ COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_CNT_CODE;
else if (K.isBSS ())
Flags |=
@@ -967,12 +968,12 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
static const char *getCOFFSectionPrefixForUniqueGlobal(SectionKind Kind) {
if (Kind.isText())
- return ".text$linkonce";
+ return ".text$";
if (Kind.isBSS ())
- return ".bss$linkonce";
+ return ".bss$";
if (Kind.isWriteable())
- return ".data$linkonce";
- return ".rdata$linkonce";
+ return ".data$";
+ return ".rdata$";
}
@@ -987,14 +988,14 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
const char *Prefix = getCOFFSectionPrefixForUniqueGlobal(Kind);
SmallString<128> Name(Prefix, Prefix+strlen(Prefix));
MCSymbol *Sym = Mang->getSymbol(GV);
- Name.append(Sym->getName().begin(), Sym->getName().end());
+ Name.append(Sym->getName().begin() + 1, Sym->getName().end());
unsigned Characteristics = getCOFFSectionFlags(Kind);
Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
return getContext().getCOFFSection(Name.str(), Characteristics,
- COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH, Kind);
+ COFF::IMAGE_COMDAT_SELECT_ANY, Kind);
}
if (Kind.isText())
diff --git a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index 78989c5..b3120b8 100644
--- a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -110,7 +110,7 @@ namespace {
bool ConvertInstTo3Addr(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
MachineFunction::iterator &mbbi,
- unsigned RegB, unsigned Dist);
+ unsigned RegA, unsigned RegB, unsigned Dist);
typedef std::pair<std::pair<unsigned, bool>, MachineInstr*> NewKill;
bool canUpdateDeletedKills(SmallVector<unsigned, 4> &Kills,
@@ -138,7 +138,9 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- TwoAddressInstructionPass() : MachineFunctionPass(ID) {}
+ TwoAddressInstructionPass() : MachineFunctionPass(ID) {
+ initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -146,10 +148,7 @@ namespace {
AU.addPreserved<LiveVariables>();
AU.addPreservedID(MachineLoopInfoID);
AU.addPreservedID(MachineDominatorsID);
- if (StrongPHIElim)
- AU.addPreservedID(StrongPHIEliminationID);
- else
- AU.addPreservedID(PHIEliminationID);
+ AU.addPreservedID(PHIEliminationID);
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -159,8 +158,11 @@ namespace {
}
char TwoAddressInstructionPass::ID = 0;
-INITIALIZE_PASS(TwoAddressInstructionPass, "twoaddressinstruction",
- "Two-Address instruction pass", false, false);
+INITIALIZE_PASS_BEGIN(TwoAddressInstructionPass, "twoaddressinstruction",
+ "Two-Address instruction pass", false, false)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(TwoAddressInstructionPass, "twoaddressinstruction",
+ "Two-Address instruction pass", false, false)
char &llvm::TwoAddressInstructionPassID = TwoAddressInstructionPass::ID;
@@ -548,8 +550,9 @@ TwoAddressInstructionPass::isProfitableToCommute(unsigned regB, unsigned regC,
unsigned FromRegC = getMappedReg(regC, SrcRegMap);
unsigned ToRegB = getMappedReg(regB, DstRegMap);
unsigned ToRegC = getMappedReg(regC, DstRegMap);
- if (!regsAreCompatible(FromRegB, ToRegB, TRI) &&
- (regsAreCompatible(FromRegB, ToRegC, TRI) ||
+ if ((FromRegB && ToRegB && !regsAreCompatible(FromRegB, ToRegB, TRI)) &&
+ ((!FromRegC && !ToRegC) ||
+ regsAreCompatible(FromRegB, ToRegC, TRI) ||
regsAreCompatible(FromRegC, ToRegB, TRI)))
return true;
@@ -630,7 +633,8 @@ bool
TwoAddressInstructionPass::ConvertInstTo3Addr(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
MachineFunction::iterator &mbbi,
- unsigned RegB, unsigned Dist) {
+ unsigned RegA, unsigned RegB,
+ unsigned Dist) {
MachineInstr *NewMI = TII->convertToThreeAddress(mbbi, mi, LV);
if (NewMI) {
DEBUG(dbgs() << "2addr: CONVERTING 2-ADDR: " << *mi);
@@ -650,6 +654,10 @@ TwoAddressInstructionPass::ConvertInstTo3Addr(MachineBasicBlock::iterator &mi,
mi = NewMI;
nmi = llvm::next(mi);
}
+
+ // Update source and destination register maps.
+ SrcRegMap.erase(RegA);
+ DstRegMap.erase(RegB);
return true;
}
@@ -740,7 +748,7 @@ static bool isSafeToDelete(MachineInstr *MI,
const TargetInstrDesc &TID = MI->getDesc();
if (TID.mayStore() || TID.isCall())
return false;
- if (TID.isTerminator() || TID.hasUnmodeledSideEffects())
+ if (TID.isTerminator() || MI->hasUnmodeledSideEffects())
return false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -884,7 +892,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// three-address instruction. Check if it is profitable.
if (!regBKilled || isProfitableToConv3Addr(regA)) {
// Try to convert it.
- if (ConvertInstTo3Addr(mi, nmi, mbbi, regB, Dist)) {
+ if (ConvertInstTo3Addr(mi, nmi, mbbi, regA, regB, Dist)) {
++NumConvertedTo3Addr;
return true; // Done with this instruction.
}
@@ -951,7 +959,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
if (LV) {
for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
MachineOperand &MO = mi->getOperand(i);
- if (MO.isReg() && MO.getReg() != 0 &&
+ if (MO.isReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (MO.isUse()) {
if (MO.isKill()) {
@@ -1013,8 +1021,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
<< MF.getFunction()->getName() << '\n');
// ReMatRegs - Keep track of the registers whose def's are remat'ed.
- BitVector ReMatRegs;
- ReMatRegs.resize(MRI->getLastVirtReg()+1);
+ BitVector ReMatRegs(MRI->getNumVirtRegs());
typedef DenseMap<unsigned, SmallVector<std::pair<unsigned, unsigned>, 4> >
TiedOperandMap;
@@ -1143,7 +1150,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "2addr: REMATTING : " << *DefMI << "\n");
unsigned regASubIdx = mi->getOperand(DstIdx).getSubReg();
TII->reMaterialize(*mbbi, mi, regA, regASubIdx, DefMI, *TRI);
- ReMatRegs.set(regB);
+ ReMatRegs.set(TargetRegisterInfo::virtReg2Index(regB));
++NumReMats;
} else {
BuildMI(*mbbi, mi, mi->getDebugLoc(), TII->get(TargetOpcode::COPY),
@@ -1229,13 +1236,12 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
}
// Some remat'ed instructions are dead.
- int VReg = ReMatRegs.find_first();
- while (VReg != -1) {
+ for (int i = ReMatRegs.find_first(); i != -1; i = ReMatRegs.find_next(i)) {
+ unsigned VReg = TargetRegisterInfo::index2VirtReg(i);
if (MRI->use_nodbg_empty(VReg)) {
MachineInstr *DefMI = MRI->getVRegDef(VReg);
DefMI->eraseFromParent();
}
- VReg = ReMatRegs.find_next(VReg);
}
// Eliminate REG_SEQUENCE instructions. Their whole purpose was to preseve
@@ -1346,7 +1352,6 @@ TwoAddressInstructionPass::CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs,
continue;
// Insert a copy to replace the original.
- MachineBasicBlock::iterator InsertLoc = SomeMI;
MachineInstr *CopyMI = BuildMI(*SomeMI->getParent(), SomeMI,
SomeMI->getDebugLoc(),
TII->get(TargetOpcode::COPY))
@@ -1412,6 +1417,7 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
SmallSet<unsigned, 4> Seen;
for (unsigned i = 1, e = MI->getNumOperands(); i < e; i += 2) {
unsigned SrcReg = MI->getOperand(i).getReg();
+ unsigned SubIdx = MI->getOperand(i+1).getImm();
if (MI->getOperand(i).getSubReg() ||
TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << *MI);
@@ -1431,7 +1437,9 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
bool isKill = MI->getOperand(i).isKill();
if (!Seen.insert(SrcReg) || MI->getParent() != DefMI->getParent() ||
- !isKill || HasOtherRegSequenceUses(SrcReg, MI, MRI)) {
+ !isKill || HasOtherRegSequenceUses(SrcReg, MI, MRI) ||
+ !TRI->getMatchingSuperRegClass(MRI->getRegClass(DstReg),
+ MRI->getRegClass(SrcReg), SubIdx)) {
// REG_SEQUENCE cannot have duplicated operands, add a copy.
// Also add an copy if the source is live-in the block. We don't want
// to end up with a partial-redef of a livein, e.g.
@@ -1460,7 +1468,7 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
MachineBasicBlock::iterator InsertLoc = MI;
MachineInstr *CopyMI = BuildMI(*MI->getParent(), InsertLoc,
MI->getDebugLoc(), TII->get(TargetOpcode::COPY))
- .addReg(DstReg, RegState::Define, MI->getOperand(i+1).getImm())
+ .addReg(DstReg, RegState::Define, SubIdx)
.addReg(SrcReg, getKillRegState(isKill));
MI->getOperand(i).setReg(0);
if (LV && isKill)
diff --git a/contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp b/contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp
index 6dd3333..48d8ab1 100644
--- a/contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp
+++ b/contrib/llvm/lib/CodeGen/UnreachableBlockElim.cpp
@@ -26,6 +26,7 @@
#include "llvm/Function.h"
#include "llvm/Pass.h"
#include "llvm/Type.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -43,16 +44,19 @@ namespace {
virtual bool runOnFunction(Function &F);
public:
static char ID; // Pass identification, replacement for typeid
- UnreachableBlockElim() : FunctionPass(ID) {}
+ UnreachableBlockElim() : FunctionPass(ID) {
+ initializeUnreachableBlockElimPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addPreserved<DominatorTree>();
AU.addPreserved<ProfileInfo>();
}
};
}
char UnreachableBlockElim::ID = 0;
INITIALIZE_PASS(UnreachableBlockElim, "unreachableblockelim",
- "Remove unreachable blocks from the CFG", false, false);
+ "Remove unreachable blocks from the CFG", false, false)
FunctionPass *llvm::createUnreachableBlockEliminationPass() {
return new UnreachableBlockElim();
@@ -106,7 +110,7 @@ namespace {
char UnreachableMachineBlockElim::ID = 0;
INITIALIZE_PASS(UnreachableMachineBlockElim, "unreachable-mbb-elimination",
- "Remove unreachable machine basic blocks", false, false);
+ "Remove unreachable machine basic blocks", false, false)
char &llvm::UnreachableMachineBlockElimID = UnreachableMachineBlockElim::ID;
@@ -118,6 +122,7 @@ void UnreachableMachineBlockElim::getAnalysisUsage(AnalysisUsage &AU) const {
bool UnreachableMachineBlockElim::runOnMachineFunction(MachineFunction &F) {
SmallPtrSet<MachineBasicBlock*, 8> Reachable;
+ bool ModifiedPHI = false;
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
MachineDominatorTree *MDT = getAnalysisIfAvailable<MachineDominatorTree>();
@@ -179,6 +184,7 @@ bool UnreachableMachineBlockElim::runOnMachineFunction(MachineFunction &F) {
if (!preds.count(phi->getOperand(i).getMBB())) {
phi->RemoveOperand(i);
phi->RemoveOperand(i-1);
+ ModifiedPHI = true;
}
if (phi->getNumOperands() == 3) {
@@ -188,6 +194,7 @@ bool UnreachableMachineBlockElim::runOnMachineFunction(MachineFunction &F) {
MachineInstr* temp = phi;
++phi;
temp->eraseFromParent();
+ ModifiedPHI = true;
if (Input != Output)
F.getRegInfo().replaceRegWith(Output, Input);
@@ -201,5 +208,5 @@ bool UnreachableMachineBlockElim::runOnMachineFunction(MachineFunction &F) {
F.RenumberBlocks();
- return DeadBlocks.size();
+ return (DeadBlocks.size() || ModifiedPHI);
}
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
index 20ffcff..734b87e 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
@@ -24,6 +24,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -48,7 +49,7 @@ STATISTIC(NumSpills , "Number of register spills");
char VirtRegMap::ID = 0;
-INITIALIZE_PASS(VirtRegMap, "virtregmap", "Virtual Register Map", false, false);
+INITIALIZE_PASS(VirtRegMap, "virtregmap", "Virtual Register Map", false, false)
bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
MRI = &mf.getRegInfo();
@@ -74,8 +75,7 @@ bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
EmergencySpillSlots.clear();
SpillSlotToUsesMap.resize(8);
- ImplicitDefed.resize(MF->getRegInfo().getLastVirtReg()+1-
- TargetRegisterInfo::FirstVirtualRegister);
+ ImplicitDefed.resize(MF->getRegInfo().getNumVirtRegs());
allocatableRCRegs.clear();
for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
@@ -89,24 +89,37 @@ bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
}
void VirtRegMap::grow() {
- unsigned LastVirtReg = MF->getRegInfo().getLastVirtReg();
- Virt2PhysMap.grow(LastVirtReg);
- Virt2StackSlotMap.grow(LastVirtReg);
- Virt2ReMatIdMap.grow(LastVirtReg);
- Virt2SplitMap.grow(LastVirtReg);
- Virt2SplitKillMap.grow(LastVirtReg);
- ReMatMap.grow(LastVirtReg);
- ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1);
+ unsigned NumRegs = MF->getRegInfo().getNumVirtRegs();
+ Virt2PhysMap.resize(NumRegs);
+ Virt2StackSlotMap.resize(NumRegs);
+ Virt2ReMatIdMap.resize(NumRegs);
+ Virt2SplitMap.resize(NumRegs);
+ Virt2SplitKillMap.resize(NumRegs);
+ ReMatMap.resize(NumRegs);
+ ImplicitDefed.resize(NumRegs);
+}
+
+unsigned VirtRegMap::createSpillSlot(const TargetRegisterClass *RC) {
+ int SS = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
+ RC->getAlignment());
+ if (LowSpillSlot == NO_STACK_SLOT)
+ LowSpillSlot = SS;
+ if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
+ HighSpillSlot = SS;
+ assert(SS >= LowSpillSlot && "Unexpected low spill slot");
+ unsigned Idx = SS-LowSpillSlot;
+ while (Idx >= SpillSlotToUsesMap.size())
+ SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2);
+ return SS;
}
unsigned VirtRegMap::getRegAllocPref(unsigned virtReg) {
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(virtReg);
unsigned physReg = Hint.second;
- if (physReg &&
- TargetRegisterInfo::isVirtualRegister(physReg) && hasPhys(physReg))
+ if (TargetRegisterInfo::isVirtualRegister(physReg) && hasPhys(physReg))
physReg = getPhys(physReg);
if (Hint.first == 0)
- return (physReg && TargetRegisterInfo::isPhysicalRegister(physReg))
+ return (TargetRegisterInfo::isPhysicalRegister(physReg))
? physReg : 0;
return TRI->ResolveRegAllocHint(Hint.first, physReg, *MF);
}
@@ -116,18 +129,8 @@ int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
"attempt to assign stack slot to already spilled register");
const TargetRegisterClass* RC = MF->getRegInfo().getRegClass(virtReg);
- int SS = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
- RC->getAlignment());
- if (LowSpillSlot == NO_STACK_SLOT)
- LowSpillSlot = SS;
- if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
- HighSpillSlot = SS;
- unsigned Idx = SS-LowSpillSlot;
- while (Idx >= SpillSlotToUsesMap.size())
- SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2);
- Virt2StackSlotMap[virtReg] = SS;
++NumSpills;
- return SS;
+ return Virt2StackSlotMap[virtReg] = createSpillSlot(RC);
}
void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) {
@@ -160,14 +163,7 @@ int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) {
EmergencySpillSlots.find(RC);
if (I != EmergencySpillSlots.end())
return I->second;
- int SS = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
- RC->getAlignment());
- if (LowSpillSlot == NO_STACK_SLOT)
- LowSpillSlot = SS;
- if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
- HighSpillSlot = SS;
- EmergencySpillSlots[RC] = SS;
- return SS;
+ return EmergencySpillSlots[RC] = createSpillSlot(RC);
}
void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) {
@@ -232,10 +228,11 @@ bool VirtRegMap::FindUnusedRegisters(LiveIntervals* LIs) {
UnusedRegs.resize(NumRegs);
BitVector Used(NumRegs);
- for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
- e = MF->getRegInfo().getLastVirtReg(); i <= e; ++i)
- if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG)
- Used.set(Virt2PhysMap[i]);
+ for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (Virt2PhysMap[Reg] != (unsigned)VirtRegMap::NO_PHYS_REG)
+ Used.set(Virt2PhysMap[Reg]);
+ }
BitVector Allocatable = TRI->getAllocatableSet(*MF);
bool AnyUnused = false;
@@ -258,23 +255,97 @@ bool VirtRegMap::FindUnusedRegisters(LiveIntervals* LIs) {
return AnyUnused;
}
+void VirtRegMap::rewrite(SlotIndexes *Indexes) {
+ DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n"
+ << "********** Function: "
+ << MF->getFunction()->getName() << '\n');
+
+ SmallVector<unsigned, 8> SuperKills;
+
+ for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
+ MBBI != MBBE; ++MBBI) {
+ DEBUG(MBBI->print(dbgs(), Indexes));
+ for (MachineBasicBlock::iterator MII = MBBI->begin(), MIE = MBBI->end();
+ MII != MIE;) {
+ MachineInstr *MI = MII;
+ ++MII;
+
+ for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end(); MOI != MOE; ++MOI) {
+ MachineOperand &MO = *MOI;
+ if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ continue;
+ unsigned VirtReg = MO.getReg();
+ unsigned PhysReg = getPhys(VirtReg);
+ assert(PhysReg != NO_PHYS_REG && "Instruction uses unmapped VirtReg");
+
+ // Preserve semantics of sub-register operands.
+ if (MO.getSubReg()) {
+ // A virtual register kill refers to the whole register, so we may
+ // have to add <imp-use,kill> operands for the super-register.
+ if (MO.isUse() && MO.isKill() && !MO.isUndef())
+ SuperKills.push_back(PhysReg);
+
+ // We don't have to deal with sub-register defs because
+ // LiveIntervalAnalysis already added the necessary <imp-def>
+ // operands.
+
+ // PhysReg operands cannot have subregister indexes.
+ PhysReg = TRI->getSubReg(PhysReg, MO.getSubReg());
+ assert(PhysReg && "Invalid SubReg for physical register");
+ MO.setSubReg(0);
+ }
+ // Rewrite. Note we could have used MachineOperand::substPhysReg(), but
+ // we need the inlining here.
+ MO.setReg(PhysReg);
+ }
+
+ // Add any missing super-register kills after rewriting the whole
+ // instruction.
+ while (!SuperKills.empty())
+ MI->addRegisterKilled(SuperKills.pop_back_val(), TRI, true);
+
+ DEBUG(dbgs() << "> " << *MI);
+
+ // Finally, remove any identity copies.
+ if (MI->isIdentityCopy()) {
+ DEBUG(dbgs() << "Deleting identity copy.\n");
+ RemoveMachineInstrFromMaps(MI);
+ if (Indexes)
+ Indexes->removeMachineInstrFromMaps(MI);
+ // It's safe to erase MI because MII has already been incremented.
+ MI->eraseFromParent();
+ }
+ }
+ }
+
+ // Tell MRI about physical registers in use.
+ for (unsigned Reg = 1, RegE = TRI->getNumRegs(); Reg != RegE; ++Reg)
+ if (!MRI->reg_nodbg_empty(Reg))
+ MRI->setPhysRegUsed(Reg);
+}
+
void VirtRegMap::print(raw_ostream &OS, const Module* M) const {
const TargetRegisterInfo* TRI = MF->getTarget().getRegisterInfo();
const MachineRegisterInfo &MRI = MF->getRegInfo();
OS << "********** REGISTER MAP **********\n";
- for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
- e = MF->getRegInfo().getLastVirtReg(); i <= e; ++i) {
- if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG)
- OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i])
- << "] " << MRI.getRegClass(i)->getName() << "\n";
+ for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (Virt2PhysMap[Reg] != (unsigned)VirtRegMap::NO_PHYS_REG) {
+ OS << '[' << PrintReg(Reg, TRI) << " -> "
+ << PrintReg(Virt2PhysMap[Reg], TRI) << "] "
+ << MRI.getRegClass(Reg)->getName() << "\n";
+ }
}
- for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
- e = MF->getRegInfo().getLastVirtReg(); i <= e; ++i)
- if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT)
- OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i]
- << "] " << MRI.getRegClass(i)->getName() << "\n";
+ for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (Virt2StackSlotMap[Reg] != VirtRegMap::NO_STACK_SLOT) {
+ OS << '[' << PrintReg(Reg, TRI) << " -> fi#" << Virt2StackSlotMap[Reg]
+ << "] " << MRI.getRegClass(Reg)->getName() << "\n";
+ }
+ }
OS << '\n';
}
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.h b/contrib/llvm/lib/CodeGen/VirtRegMap.h
index 8b6082d..ba50f4e 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.h
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.h
@@ -35,6 +35,7 @@ namespace llvm {
class TargetInstrInfo;
class TargetRegisterInfo;
class raw_ostream;
+ class SlotIndexes;
class VirtRegMap : public MachineFunctionPass {
public:
@@ -80,7 +81,7 @@ namespace llvm {
/// Virt2SplitKillMap - This is splitted virtual register to its last use
/// (kill) index mapping.
- IndexedMap<SlotIndex> Virt2SplitKillMap;
+ IndexedMap<SlotIndex, VirtReg2IndexFunctor> Virt2SplitKillMap;
/// ReMatMap - This is virtual register to re-materialized instruction
/// mapping. Each virtual register whose definition is going to be
@@ -134,6 +135,9 @@ namespace llvm {
/// UnusedRegs - A list of physical registers that have not been used.
BitVector UnusedRegs;
+ /// createSpillSlot - Allocate a spill slot for RC from MFI.
+ unsigned createSpillSlot(const TargetRegisterClass *RC);
+
VirtRegMap(const VirtRegMap&); // DO NOT IMPLEMENT
void operator=(const VirtRegMap&); // DO NOT IMPLEMENT
@@ -153,10 +157,13 @@ namespace llvm {
}
MachineFunction &getMachineFunction() const {
- assert(MF && "getMachineFunction called before runOnMAchineFunction");
+ assert(MF && "getMachineFunction called before runOnMachineFunction");
return *MF;
}
+ MachineRegisterInfo &getRegInfo() const { return *MRI; }
+ const TargetRegisterInfo &getTargetRegInfo() const { return *TRI; }
+
void grow();
/// @brief returns true if the specified virtual register is
@@ -207,10 +214,19 @@ namespace llvm {
}
/// @brief returns the live interval virtReg is split from.
- unsigned getPreSplitReg(unsigned virtReg) {
+ unsigned getPreSplitReg(unsigned virtReg) const {
return Virt2SplitMap[virtReg];
}
+ /// getOriginal - Return the original virtual register that VirtReg descends
+ /// from through splitting.
+ /// A register that was not created by splitting is its own original.
+ /// This operation is idempotent.
+ unsigned getOriginal(unsigned VirtReg) const {
+ unsigned Orig = getPreSplitReg(VirtReg);
+ return Orig ? Orig : VirtReg;
+ }
+
/// @brief returns true if the specified virtual register is not
/// mapped to a stack slot or rematerialized.
bool isAssignedReg(unsigned virtReg) const {
@@ -426,12 +442,12 @@ namespace llvm {
/// @brief Mark the specified register as being implicitly defined.
void setIsImplicitlyDefined(unsigned VirtReg) {
- ImplicitDefed.set(VirtReg-TargetRegisterInfo::FirstVirtualRegister);
+ ImplicitDefed.set(TargetRegisterInfo::virtReg2Index(VirtReg));
}
/// @brief Returns true if the virtual register is implicitly defined.
bool isImplicitlyDefined(unsigned VirtReg) const {
- return ImplicitDefed[VirtReg-TargetRegisterInfo::FirstVirtualRegister];
+ return ImplicitDefed[TargetRegisterInfo::virtReg2Index(VirtReg)];
}
/// @brief Updates information about the specified virtual register's value
@@ -487,6 +503,13 @@ namespace llvm {
return 0;
}
+ /// rewrite - Rewrite all instructions in MF to use only physical registers
+ /// by mapping all virtual register operands to their assigned physical
+ /// registers.
+ ///
+ /// @param Indexes Optionally remove deleted instructions from indexes.
+ void rewrite(SlotIndexes *Indexes);
+
void print(raw_ostream &OS, const Module* M = 0) const;
void dump() const;
};
diff --git a/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp b/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp
index 240d28c..458a213 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp
+++ b/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp
@@ -22,8 +22,8 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
-#include <algorithm>
using namespace llvm;
STATISTIC(NumDSE , "Number of dead stores elided");
@@ -216,7 +216,8 @@ public:
<< SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1);
else
DEBUG(dbgs() << "Remembering SS#" << SlotOrReMat);
- DEBUG(dbgs() << " in physreg " << TRI->getName(Reg) << "\n");
+ DEBUG(dbgs() << " in physreg " << TRI->getName(Reg)
+ << (CanClobber ? " canclobber" : "") << "\n");
}
/// canClobberPhysRegForSS - Return true if the spiller is allowed to change
@@ -297,7 +298,7 @@ ComputeReloadLoc(MachineBasicBlock::iterator const InsertLoc,
const TargetLowering *TL = MF.getTarget().getTargetLowering();
if (!TL->isTypeLegal(TL->getPointerTy()))
- // Believe it or not, this is true on PIC16.
+ // Believe it or not, this is true on 16-bit targets like PIC16.
return InsertLoc;
const TargetRegisterClass *ptrRegClass =
@@ -462,25 +463,70 @@ static void findSinglePredSuccessor(MachineBasicBlock *MBB,
}
}
-/// InvalidateKill - Invalidate register kill information for a specific
-/// register. This also unsets the kills marker on the last kill operand.
-static void InvalidateKill(unsigned Reg,
- const TargetRegisterInfo* TRI,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
- if (RegKills[Reg]) {
- KillOps[Reg]->setIsKill(false);
- // KillOps[Reg] might be a def of a super-register.
- unsigned KReg = KillOps[Reg]->getReg();
- KillOps[KReg] = NULL;
- RegKills.reset(KReg);
- for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
- if (RegKills[*SR]) {
- KillOps[*SR]->setIsKill(false);
- KillOps[*SR] = NULL;
- RegKills.reset(*SR);
- }
- }
+/// ResurrectConfirmedKill - Helper for ResurrectKill. This register is killed
+/// but not re-defined and it's being reused. Remove the kill flag for the
+/// register and unset the kill's marker and last kill operand.
+static void ResurrectConfirmedKill(unsigned Reg, const TargetRegisterInfo* TRI,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps) {
+ DEBUG(dbgs() << "Resurrect " << TRI->getName(Reg) << "\n");
+
+ MachineOperand *KillOp = KillOps[Reg];
+ KillOp->setIsKill(false);
+ // KillOps[Reg] might be a def of a super-register.
+ unsigned KReg = KillOp->getReg();
+ if (!RegKills[KReg])
+ return;
+
+ assert(KillOps[KReg] == KillOp && "invalid superreg kill flags");
+ KillOps[KReg] = NULL;
+ RegKills.reset(KReg);
+
+ // If it's a def of a super-register. Its other sub-regsters are no
+ // longer killed as well.
+ for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
+ DEBUG(dbgs() << " Resurrect subreg " << TRI->getName(*SR) << "\n");
+
+ assert(KillOps[*SR] == KillOp && "invalid subreg kill flags");
+ KillOps[*SR] = NULL;
+ RegKills.reset(*SR);
+ }
+}
+
+/// ResurrectKill - Invalidate kill info associated with a previous MI. An
+/// optimization may have decided that it's safe to reuse a previously killed
+/// register. If we fail to erase the invalid kill flags, then the register
+/// scavenger may later clobber the register used by this MI. Note that this
+/// must be done even if this MI is being deleted! Consider:
+///
+/// USE $r1 (vreg1) <kill>
+/// ...
+/// $r1(vreg3) = COPY $r1 (vreg2)
+///
+/// RegAlloc has smartly assigned all three vregs to the same physreg. Initially
+/// vreg1's only use is a kill. The rewriter doesn't know it should be live
+/// until it rewrites vreg2. At that points it sees that the copy is dead and
+/// deletes it. However, deleting the copy implicitly forwards liveness of $r1
+/// (it's copy coalescing). We must resurrect $r1 by removing the kill flag at
+/// vreg1 before deleting the copy.
+static void ResurrectKill(MachineInstr &MI, unsigned Reg,
+ const TargetRegisterInfo* TRI, BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps) {
+ if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
+ ResurrectConfirmedKill(Reg, TRI, RegKills, KillOps);
+ return;
+ }
+ // No previous kill for this reg. Check for subreg kills as well.
+ // d4 =
+ // store d4, fi#0
+ // ...
+ // = s8<kill>
+ // ...
+ // = d4 <avoiding reload>
+ for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
+ unsigned SReg = *SR;
+ if (RegKills[SReg] && KillOps[SReg]->getParent() != &MI)
+ ResurrectConfirmedKill(SReg, TRI, RegKills, KillOps);
}
}
@@ -502,15 +548,22 @@ static void InvalidateKills(MachineInstr &MI,
KillRegs->push_back(Reg);
assert(Reg < KillOps.size());
if (KillOps[Reg] == &MO) {
+ // This operand was the kill, now no longer.
KillOps[Reg] = NULL;
RegKills.reset(Reg);
for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
if (RegKills[*SR]) {
+ assert(KillOps[*SR] == &MO && "bad subreg kill flags");
KillOps[*SR] = NULL;
RegKills.reset(*SR);
}
}
}
+ else {
+ // This operand may have reused a previously killed reg. Keep it live in
+ // case it continues to be used after erasing this instruction.
+ ResurrectKill(MI, Reg, TRI, RegKills, KillOps);
+ }
}
}
@@ -578,44 +631,8 @@ static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
if (Reg == 0)
continue;
- if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
- // That can't be right. Register is killed but not re-defined and it's
- // being reused. Let's fix that.
- KillOps[Reg]->setIsKill(false);
- // KillOps[Reg] might be a def of a super-register.
- unsigned KReg = KillOps[Reg]->getReg();
- KillOps[KReg] = NULL;
- RegKills.reset(KReg);
-
- // Must be a def of a super-register. Its other sub-regsters are no
- // longer killed as well.
- for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
- KillOps[*SR] = NULL;
- RegKills.reset(*SR);
- }
- } else {
- // Check for subreg kills as well.
- // d4 =
- // store d4, fi#0
- // ...
- // = s8<kill>
- // ...
- // = d4 <avoiding reload>
- for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
- unsigned SReg = *SR;
- if (RegKills[SReg] && KillOps[SReg]->getParent() != &MI) {
- KillOps[SReg]->setIsKill(false);
- unsigned KReg = KillOps[SReg]->getReg();
- KillOps[KReg] = NULL;
- RegKills.reset(KReg);
-
- for (const unsigned *SSR = TRI->getSubRegisters(KReg); *SSR; ++SSR) {
- KillOps[*SSR] = NULL;
- RegKills.reset(*SSR);
- }
- }
- }
- }
+ // This operand may have reused a previously killed reg. Keep it live.
+ ResurrectKill(MI, Reg, TRI, RegKills, KillOps);
if (MO.isKill()) {
RegKills.set(Reg);
@@ -770,7 +787,8 @@ void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
NotAvailable.insert(Reg);
else {
MBB.addLiveIn(Reg);
- InvalidateKill(Reg, TRI, RegKills, KillOps);
+ if (RegKills[Reg])
+ ResurrectConfirmedKill(Reg, TRI, RegKills, KillOps);
}
// Skip over the same register.
@@ -1056,6 +1074,7 @@ class LocalRewriter : public VirtRegRewriter {
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
VirtRegMap *VRM;
+ LiveIntervals *LIs;
BitVector AllocatableRegs;
DenseMap<MachineInstr*, unsigned> DistanceMap;
DenseMap<int, SmallVector<MachineInstr*,4> > Slot2DbgValues;
@@ -1068,6 +1087,11 @@ public:
LiveIntervals* LIs);
private:
+ void EraseInstr(MachineInstr *MI) {
+ VRM->RemoveMachineInstrFromMaps(MI);
+ LIs->RemoveMachineInstrFromMaps(MI);
+ MI->eraseFromParent();
+ }
bool OptimizeByUnfold2(unsigned VirtReg, int SS,
MachineBasicBlock::iterator &MII,
@@ -1110,6 +1134,12 @@ private:
bool InsertSpills(MachineInstr *MI);
+ void ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
+ std::vector<MachineInstr*> &MaybeDeadStores,
+ BitVector &RegKills,
+ ReuseInfo &ReusedOperands,
+ std::vector<MachineOperand*> &KillOps);
+
void RewriteMBB(LiveIntervals *LIs,
AvailableSpills &Spills, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps);
@@ -1117,17 +1147,18 @@ private:
}
bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
- LiveIntervals* LIs) {
+ LiveIntervals* lis) {
MRI = &MF.getRegInfo();
TRI = MF.getTarget().getRegisterInfo();
TII = MF.getTarget().getInstrInfo();
VRM = &vrm;
+ LIs = lis;
AllocatableRegs = TRI->getAllocatableSet(MF);
DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
<< MF.getFunction()->getName() << "':\n");
DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
" reloads!) ****\n");
- DEBUG(MF.dump());
+ DEBUG(MF.print(dbgs(), LIs->getSlotIndexes()));
// Spills - Keep track of which spilled values are available in physregs
// so that we can choose to reuse the physregs instead of emitting
@@ -1178,7 +1209,7 @@ bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
}
DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
- DEBUG(MF.dump());
+ DEBUG(MF.print(dbgs(), LIs->getSlotIndexes()));
// Mark unused spill slots.
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -1190,10 +1221,8 @@ bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
MFI->RemoveStackObject(SS);
for (unsigned j = 0, ee = DbgValues.size(); j != ee; ++j) {
MachineInstr *DVMI = DbgValues[j];
- MachineBasicBlock *DVMBB = DVMI->getParent();
DEBUG(dbgs() << "Removing debug info referencing FI#" << SS << '\n');
- VRM->RemoveMachineInstrFromMaps(DVMI);
- DVMBB->erase(DVMI);
+ EraseInstr(DVMI);
}
++NumDSS;
}
@@ -1273,8 +1302,7 @@ OptimizeByUnfold2(unsigned VirtReg, int SS,
VRM->transferRestorePts(&MI, NewMIs[0]);
MII = MBB->insert(MII, NewMIs[0]);
InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(&MI);
- MBB->erase(&MI);
+ EraseInstr(&MI);
++NumModRefUnfold;
// Unfold next instructions that fold the same SS.
@@ -1289,8 +1317,7 @@ OptimizeByUnfold2(unsigned VirtReg, int SS,
VRM->transferRestorePts(&NextMI, NewMIs[0]);
MBB->insert(NextMII, NewMIs[0]);
InvalidateKills(NextMI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(&NextMI);
- MBB->erase(&NextMI);
+ EraseInstr(&NextMI);
++NumModRefUnfold;
// Skip over dbg_value instructions.
while (NextMII != MBB->end() && NextMII->isDebugValue())
@@ -1417,8 +1444,7 @@ OptimizeByUnfold(MachineBasicBlock::iterator &MII,
VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
MII = FoldedMI;
InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(&MI);
- MBB->erase(&MI);
+ EraseInstr(&MI);
return true;
}
}
@@ -1524,14 +1550,11 @@ CommuteToFoldReload(MachineBasicBlock::iterator &MII,
// Delete all 3 old instructions.
InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(ReloadMI);
- MBB->erase(ReloadMI);
+ EraseInstr(ReloadMI);
InvalidateKills(*DefMI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(DefMI);
- MBB->erase(DefMI);
+ EraseInstr(DefMI);
InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(&MI);
- MBB->erase(&MI);
+ EraseInstr(&MI);
// If NewReg was previously holding value of some SS, it's now clobbered.
// This has to be done now because it's a physical register. When this
@@ -1574,8 +1597,7 @@ SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
bool CheckDef = PrevMII != MBB->begin();
if (CheckDef)
--PrevMII;
- VRM->RemoveMachineInstrFromMaps(LastStore);
- MBB->erase(LastStore);
+ EraseInstr(LastStore);
if (CheckDef) {
// Look at defs of killed registers on the store. Mark the defs
// as dead since the store has been deleted and they aren't
@@ -1586,8 +1608,7 @@ SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
MachineInstr *DeadDef = PrevMII;
if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
// FIXME: This assumes a remat def does not have side effects.
- VRM->RemoveMachineInstrFromMaps(DeadDef);
- MBB->erase(DeadDef);
+ EraseInstr(DeadDef);
++NumDRM;
}
}
@@ -1612,10 +1633,18 @@ SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
/// effect and all of its defs are dead.
static bool isSafeToDelete(MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
- if (TID.mayLoad() || TID.mayStore() || TID.isCall() || TID.isTerminator() ||
+ if (TID.mayLoad() || TID.mayStore() || TID.isTerminator() ||
TID.isCall() || TID.isBarrier() || TID.isReturn() ||
- TID.hasUnmodeledSideEffects())
+ MI.isLabel() || MI.isDebugValue() ||
+ MI.hasUnmodeledSideEffects())
return false;
+
+ // Technically speaking inline asm without side effects and no defs can still
+ // be deleted. But there is so much bad inline asm code out there, we should
+ // let them be.
+ if (MI.isInlineAsm())
+ return false;
+
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.getReg())
@@ -1675,8 +1704,7 @@ TransferDeadness(unsigned Reg, BitVector &RegKills,
LastUD->setIsDead();
break;
}
- VRM->RemoveMachineInstrFromMaps(LastUDMI);
- MBB->erase(LastUDMI);
+ EraseInstr(LastUDMI);
} else {
LastUD->setIsKill();
RegKills.set(Reg);
@@ -1764,6 +1792,10 @@ bool LocalRewriter::InsertRestores(MachineInstr *MI,
<< TRI->getName(InReg) << " for vreg"
<< VirtReg <<" instead of reloading into physreg "
<< TRI->getName(Phys) << '\n');
+
+ // Reusing a physreg may resurrect it. But we expect ProcessUses to update
+ // the kill flags for the current instruction after processing it.
+
++NumOmitted;
continue;
} else if (InReg && InReg != Phys) {
@@ -1828,7 +1860,7 @@ bool LocalRewriter::InsertRestores(MachineInstr *MI,
return true;
}
-/// InsertEmergencySpills - Insert spills after MI if requested by VRM. Return
+/// InsertSpills - Insert spills after MI if requested by VRM. Return
/// true if spills were inserted.
bool LocalRewriter::InsertSpills(MachineInstr *MI) {
if (!VRM->isSpillPt(MI))
@@ -1856,6 +1888,349 @@ bool LocalRewriter::InsertSpills(MachineInstr *MI) {
}
+/// ProcessUses - Process all of MI's spilled operands and all available
+/// operands.
+void LocalRewriter::ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
+ std::vector<MachineInstr*> &MaybeDeadStores,
+ BitVector &RegKills,
+ ReuseInfo &ReusedOperands,
+ std::vector<MachineOperand*> &KillOps) {
+ // Clear kill info.
+ SmallSet<unsigned, 2> KilledMIRegs;
+ SmallVector<unsigned, 4> VirtUseOps;
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ if (!MO.isReg() || MO.getReg() == 0)
+ continue; // Ignore non-register operands.
+
+ unsigned VirtReg = MO.getReg();
+
+ if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
+ // Ignore physregs for spilling, but remember that it is used by this
+ // function.
+ MRI->setPhysRegUsed(VirtReg);
+ continue;
+ }
+
+ // We want to process implicit virtual register uses first.
+ if (MO.isImplicit())
+ // If the virtual register is implicitly defined, emit a implicit_def
+ // before so scavenger knows it's "defined".
+ // FIXME: This is a horrible hack done the by register allocator to
+ // remat a definition with virtual register operand.
+ VirtUseOps.insert(VirtUseOps.begin(), i);
+ else
+ VirtUseOps.push_back(i);
+
+ // A partial def causes problems because the same operand both reads and
+ // writes the register. This rewriter is designed to rewrite uses and defs
+ // separately, so a partial def would already have been rewritten to a
+ // physreg by the time we get to processing defs.
+ // Add an implicit use operand to model the partial def.
+ if (MO.isDef() && MO.getSubReg() && MI.readsVirtualRegister(VirtReg) &&
+ MI.findRegisterUseOperandIdx(VirtReg) == -1) {
+ VirtUseOps.insert(VirtUseOps.begin(), MI.getNumOperands());
+ MI.addOperand(MachineOperand::CreateReg(VirtReg,
+ false, // isDef
+ true)); // isImplicit
+ DEBUG(dbgs() << "Partial redef: " << MI);
+ }
+ }
+
+ // Process all of the spilled uses and all non spilled reg references.
+ SmallVector<int, 2> PotentialDeadStoreSlots;
+ KilledMIRegs.clear();
+ for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
+ unsigned i = VirtUseOps[j];
+ unsigned VirtReg = MI.getOperand(i).getReg();
+ assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
+ "Not a virtual register?");
+
+ unsigned SubIdx = MI.getOperand(i).getSubReg();
+ if (VRM->isAssignedReg(VirtReg)) {
+ // This virtual register was assigned a physreg!
+ unsigned Phys = VRM->getPhys(VirtReg);
+ MRI->setPhysRegUsed(Phys);
+ if (MI.getOperand(i).isDef())
+ ReusedOperands.markClobbered(Phys);
+ substitutePhysReg(MI.getOperand(i), Phys, *TRI);
+ if (VRM->isImplicitlyDefined(VirtReg))
+ // FIXME: Is this needed?
+ BuildMI(*MBB, &MI, MI.getDebugLoc(),
+ TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
+ continue;
+ }
+
+ // This virtual register is now known to be a spilled value.
+ if (!MI.getOperand(i).isUse())
+ continue; // Handle defs in the loop below (handle use&def here though)
+
+ bool AvoidReload = MI.getOperand(i).isUndef();
+ // Check if it is defined by an implicit def. It should not be spilled.
+ // Note, this is for correctness reason. e.g.
+ // 8 %reg1024<def> = IMPLICIT_DEF
+ // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
+ // The live range [12, 14) are not part of the r1024 live interval since
+ // it's defined by an implicit def. It will not conflicts with live
+ // interval of r1025. Now suppose both registers are spilled, you can
+ // easily see a situation where both registers are reloaded before
+ // the INSERT_SUBREG and both target registers that would overlap.
+ bool DoReMat = VRM->isReMaterialized(VirtReg);
+ int SSorRMId = DoReMat
+ ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
+ int ReuseSlot = SSorRMId;
+
+ // Check to see if this stack slot is available.
+ unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
+
+ // If this is a sub-register use, make sure the reuse register is in the
+ // right register class. For example, for x86 not all of the 32-bit
+ // registers have accessible sub-registers.
+ // Similarly so for EXTRACT_SUBREG. Consider this:
+ // EDI = op
+ // MOV32_mr fi#1, EDI
+ // ...
+ // = EXTRACT_SUBREG fi#1
+ // fi#1 is available in EDI, but it cannot be reused because it's not in
+ // the right register file.
+ if (PhysReg && !AvoidReload && SubIdx) {
+ const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
+ if (!RC->contains(PhysReg))
+ PhysReg = 0;
+ }
+
+ if (PhysReg && !AvoidReload) {
+ // This spilled operand might be part of a two-address operand. If this
+ // is the case, then changing it will necessarily require changing the
+ // def part of the instruction as well. However, in some cases, we
+ // aren't allowed to modify the reused register. If none of these cases
+ // apply, reuse it.
+ bool CanReuse = true;
+ bool isTied = MI.isRegTiedToDefOperand(i);
+ if (isTied) {
+ // Okay, we have a two address operand. We can reuse this physreg as
+ // long as we are allowed to clobber the value and there isn't an
+ // earlier def that has already clobbered the physreg.
+ CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
+ Spills.canClobberPhysReg(PhysReg);
+ }
+ // If this is an asm, and a PhysReg alias is used elsewhere as an
+ // earlyclobber operand, we can't also use it as an input.
+ if (MI.isInlineAsm()) {
+ for (unsigned k = 0, e = MI.getNumOperands(); k != e; ++k) {
+ MachineOperand &MOk = MI.getOperand(k);
+ if (MOk.isReg() && MOk.isEarlyClobber() &&
+ TRI->regsOverlap(MOk.getReg(), PhysReg)) {
+ CanReuse = false;
+ DEBUG(dbgs() << "Not reusing physreg " << TRI->getName(PhysReg)
+ << " for vreg" << VirtReg << ": " << MOk << '\n');
+ break;
+ }
+ }
+ }
+
+ if (CanReuse) {
+ // If this stack slot value is already available, reuse it!
+ if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
+ DEBUG(dbgs() << "Reusing RM#"
+ << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
+ else
+ DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
+ DEBUG(dbgs() << " from physreg "
+ << TRI->getName(PhysReg) << " for vreg"
+ << VirtReg <<" instead of reloading into physreg "
+ << TRI->getName(VRM->getPhys(VirtReg)) << '\n');
+ unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
+ MI.getOperand(i).setReg(RReg);
+ MI.getOperand(i).setSubReg(0);
+
+ // Reusing a physreg may resurrect it. But we expect ProcessUses to
+ // update the kill flags for the current instr after processing it.
+
+ // The only technical detail we have is that we don't know that
+ // PhysReg won't be clobbered by a reloaded stack slot that occurs
+ // later in the instruction. In particular, consider 'op V1, V2'.
+ // If V1 is available in physreg R0, we would choose to reuse it
+ // here, instead of reloading it into the register the allocator
+ // indicated (say R1). However, V2 might have to be reloaded
+ // later, and it might indicate that it needs to live in R0. When
+ // this occurs, we need to have information available that
+ // indicates it is safe to use R1 for the reload instead of R0.
+ //
+ // To further complicate matters, we might conflict with an alias,
+ // or R0 and R1 might not be compatible with each other. In this
+ // case, we actually insert a reload for V1 in R1, ensuring that
+ // we can get at R0 or its alias.
+ ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
+ VRM->getPhys(VirtReg), VirtReg);
+ if (isTied)
+ // Only mark it clobbered if this is a use&def operand.
+ ReusedOperands.markClobbered(PhysReg);
+ ++NumReused;
+
+ if (MI.getOperand(i).isKill() &&
+ ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
+
+ // The store of this spilled value is potentially dead, but we
+ // won't know for certain until we've confirmed that the re-use
+ // above is valid, which means waiting until the other operands
+ // are processed. For now we just track the spill slot, we'll
+ // remove it after the other operands are processed if valid.
+
+ PotentialDeadStoreSlots.push_back(ReuseSlot);
+ }
+
+ // Mark is isKill if it's there no other uses of the same virtual
+ // register and it's not a two-address operand. IsKill will be
+ // unset if reg is reused.
+ if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
+ MI.getOperand(i).setIsKill();
+ KilledMIRegs.insert(VirtReg);
+ }
+ continue;
+ } // CanReuse
+
+ // Otherwise we have a situation where we have a two-address instruction
+ // whose mod/ref operand needs to be reloaded. This reload is already
+ // available in some register "PhysReg", but if we used PhysReg as the
+ // operand to our 2-addr instruction, the instruction would modify
+ // PhysReg. This isn't cool if something later uses PhysReg and expects
+ // to get its initial value.
+ //
+ // To avoid this problem, and to avoid doing a load right after a store,
+ // we emit a copy from PhysReg into the designated register for this
+ // operand.
+ //
+ // This case also applies to an earlyclobber'd PhysReg.
+ unsigned DesignatedReg = VRM->getPhys(VirtReg);
+ assert(DesignatedReg && "Must map virtreg to physreg!");
+
+ // Note that, if we reused a register for a previous operand, the
+ // register we want to reload into might not actually be
+ // available. If this occurs, use the register indicated by the
+ // reuser.
+ if (ReusedOperands.hasReuses())
+ DesignatedReg = ReusedOperands.
+ GetRegForReload(VirtReg, DesignatedReg, &MI, Spills,
+ MaybeDeadStores, RegKills, KillOps, *VRM);
+
+ // If the mapped designated register is actually the physreg we have
+ // incoming, we don't need to inserted a dead copy.
+ if (DesignatedReg == PhysReg) {
+ // If this stack slot value is already available, reuse it!
+ if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
+ DEBUG(dbgs() << "Reusing RM#"
+ << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
+ else
+ DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
+ DEBUG(dbgs() << " from physreg " << TRI->getName(PhysReg)
+ << " for vreg" << VirtReg
+ << " instead of reloading into same physreg.\n");
+ unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
+ MI.getOperand(i).setReg(RReg);
+ MI.getOperand(i).setSubReg(0);
+ ReusedOperands.markClobbered(RReg);
+ ++NumReused;
+ continue;
+ }
+
+ MRI->setPhysRegUsed(DesignatedReg);
+ ReusedOperands.markClobbered(DesignatedReg);
+
+ // Back-schedule reloads and remats.
+ MachineBasicBlock::iterator InsertLoc =
+ ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
+ SSorRMId, TII, *MBB->getParent());
+ MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI.getDebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ DesignatedReg).addReg(PhysReg);
+ CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
+ UpdateKills(*CopyMI, TRI, RegKills, KillOps);
+
+ // This invalidates DesignatedReg.
+ Spills.ClobberPhysReg(DesignatedReg);
+
+ Spills.addAvailable(ReuseSlot, DesignatedReg);
+ unsigned RReg =
+ SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
+ MI.getOperand(i).setReg(RReg);
+ MI.getOperand(i).setSubReg(0);
+ DEBUG(dbgs() << '\t' << *prior(InsertLoc));
+ ++NumReused;
+ continue;
+ } // if (PhysReg)
+
+ // Otherwise, reload it and remember that we have it.
+ PhysReg = VRM->getPhys(VirtReg);
+ assert(PhysReg && "Must map virtreg to physreg!");
+
+ // Note that, if we reused a register for a previous operand, the
+ // register we want to reload into might not actually be
+ // available. If this occurs, use the register indicated by the
+ // reuser.
+ if (ReusedOperands.hasReuses())
+ PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
+ Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
+
+ MRI->setPhysRegUsed(PhysReg);
+ ReusedOperands.markClobbered(PhysReg);
+ if (AvoidReload)
+ ++NumAvoided;
+ else {
+ // Back-schedule reloads and remats.
+ MachineBasicBlock::iterator InsertLoc =
+ ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI, DoReMat,
+ SSorRMId, TII, *MBB->getParent());
+
+ if (DoReMat) {
+ ReMaterialize(*MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, *VRM);
+ } else {
+ const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
+ TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SSorRMId, RC,TRI);
+ MachineInstr *LoadMI = prior(InsertLoc);
+ VRM->addSpillSlotUse(SSorRMId, LoadMI);
+ ++NumLoads;
+ DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
+ }
+ // This invalidates PhysReg.
+ Spills.ClobberPhysReg(PhysReg);
+
+ // Any stores to this stack slot are not dead anymore.
+ if (!DoReMat)
+ MaybeDeadStores[SSorRMId] = NULL;
+ Spills.addAvailable(SSorRMId, PhysReg);
+ // Assumes this is the last use. IsKill will be unset if reg is reused
+ // unless it's a two-address operand.
+ if (!MI.isRegTiedToDefOperand(i) &&
+ KilledMIRegs.count(VirtReg) == 0) {
+ MI.getOperand(i).setIsKill();
+ KilledMIRegs.insert(VirtReg);
+ }
+
+ UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
+ DEBUG(dbgs() << '\t' << *prior(InsertLoc));
+ }
+ unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
+ MI.getOperand(i).setReg(RReg);
+ MI.getOperand(i).setSubReg(0);
+ }
+
+ // Ok - now we can remove stores that have been confirmed dead.
+ for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
+ // This was the last use and the spilled value is still available
+ // for reuse. That means the spill was unnecessary!
+ int PDSSlot = PotentialDeadStoreSlots[j];
+ MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
+ if (DeadStore) {
+ DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
+ InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
+ EraseInstr(DeadStore);
+ MaybeDeadStores[PDSSlot] = NULL;
+ ++NumDSE;
+ }
+ }
+}
+
/// rewriteMBB - Keep track of which spills are available even after the
/// register allocator is done with them. If possible, avoid reloading vregs.
void
@@ -1880,9 +2255,6 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
// ReMatDefs - These are rematerializable def MIs which are not deleted.
SmallSet<MachineInstr*, 4> ReMatDefs;
- // Clear kill info.
- SmallSet<unsigned, 2> KilledMIRegs;
-
// Keep track of the registers we have already spilled in case there are
// multiple defs of the same register in MI.
SmallSet<unsigned, 8> SpilledMIRegs;
@@ -1918,323 +2290,8 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
/// ReusedOperands - Keep track of operand reuse in case we need to undo
/// reuse.
ReuseInfo ReusedOperands(MI, TRI);
- SmallVector<unsigned, 4> VirtUseOps;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.getReg() == 0)
- continue; // Ignore non-register operands.
-
- unsigned VirtReg = MO.getReg();
- if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
- // Ignore physregs for spilling, but remember that it is used by this
- // function.
- MRI->setPhysRegUsed(VirtReg);
- continue;
- }
-
- // We want to process implicit virtual register uses first.
- if (MO.isImplicit())
- // If the virtual register is implicitly defined, emit a implicit_def
- // before so scavenger knows it's "defined".
- // FIXME: This is a horrible hack done the by register allocator to
- // remat a definition with virtual register operand.
- VirtUseOps.insert(VirtUseOps.begin(), i);
- else
- VirtUseOps.push_back(i);
- }
-
- // Process all of the spilled uses and all non spilled reg references.
- SmallVector<int, 2> PotentialDeadStoreSlots;
- KilledMIRegs.clear();
- for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
- unsigned i = VirtUseOps[j];
- unsigned VirtReg = MI.getOperand(i).getReg();
- assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "Not a virtual register?");
-
- unsigned SubIdx = MI.getOperand(i).getSubReg();
- if (VRM->isAssignedReg(VirtReg)) {
- // This virtual register was assigned a physreg!
- unsigned Phys = VRM->getPhys(VirtReg);
- MRI->setPhysRegUsed(Phys);
- if (MI.getOperand(i).isDef())
- ReusedOperands.markClobbered(Phys);
- substitutePhysReg(MI.getOperand(i), Phys, *TRI);
- if (VRM->isImplicitlyDefined(VirtReg))
- // FIXME: Is this needed?
- BuildMI(*MBB, &MI, MI.getDebugLoc(),
- TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
- continue;
- }
-
- // This virtual register is now known to be a spilled value.
- if (!MI.getOperand(i).isUse())
- continue; // Handle defs in the loop below (handle use&def here though)
-
- bool AvoidReload = MI.getOperand(i).isUndef();
- // Check if it is defined by an implicit def. It should not be spilled.
- // Note, this is for correctness reason. e.g.
- // 8 %reg1024<def> = IMPLICIT_DEF
- // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
- // The live range [12, 14) are not part of the r1024 live interval since
- // it's defined by an implicit def. It will not conflicts with live
- // interval of r1025. Now suppose both registers are spilled, you can
- // easily see a situation where both registers are reloaded before
- // the INSERT_SUBREG and both target registers that would overlap.
- bool DoReMat = VRM->isReMaterialized(VirtReg);
- int SSorRMId = DoReMat
- ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
- int ReuseSlot = SSorRMId;
-
- // Check to see if this stack slot is available.
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
-
- // If this is a sub-register use, make sure the reuse register is in the
- // right register class. For example, for x86 not all of the 32-bit
- // registers have accessible sub-registers.
- // Similarly so for EXTRACT_SUBREG. Consider this:
- // EDI = op
- // MOV32_mr fi#1, EDI
- // ...
- // = EXTRACT_SUBREG fi#1
- // fi#1 is available in EDI, but it cannot be reused because it's not in
- // the right register file.
- if (PhysReg && !AvoidReload && SubIdx) {
- const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
- if (!RC->contains(PhysReg))
- PhysReg = 0;
- }
-
- if (PhysReg && !AvoidReload) {
- // This spilled operand might be part of a two-address operand. If this
- // is the case, then changing it will necessarily require changing the
- // def part of the instruction as well. However, in some cases, we
- // aren't allowed to modify the reused register. If none of these cases
- // apply, reuse it.
- bool CanReuse = true;
- bool isTied = MI.isRegTiedToDefOperand(i);
- if (isTied) {
- // Okay, we have a two address operand. We can reuse this physreg as
- // long as we are allowed to clobber the value and there isn't an
- // earlier def that has already clobbered the physreg.
- CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
- Spills.canClobberPhysReg(PhysReg);
- }
- // If this is an asm, and a PhysReg alias is used elsewhere as an
- // earlyclobber operand, we can't also use it as an input.
- if (MI.isInlineAsm()) {
- for (unsigned k = 0, e = MI.getNumOperands(); k != e; ++k) {
- MachineOperand &MOk = MI.getOperand(k);
- if (MOk.isReg() && MOk.isEarlyClobber() &&
- TRI->regsOverlap(MOk.getReg(), PhysReg)) {
- CanReuse = false;
- DEBUG(dbgs() << "Not reusing physreg " << TRI->getName(PhysReg)
- << " for vreg" << VirtReg << ": " << MOk << '\n');
- break;
- }
- }
- }
-
- if (CanReuse) {
- // If this stack slot value is already available, reuse it!
- if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
- DEBUG(dbgs() << "Reusing RM#"
- << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
- DEBUG(dbgs() << " from physreg "
- << TRI->getName(PhysReg) << " for vreg"
- << VirtReg <<" instead of reloading into physreg "
- << TRI->getName(VRM->getPhys(VirtReg)) << '\n');
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
-
- // The only technical detail we have is that we don't know that
- // PhysReg won't be clobbered by a reloaded stack slot that occurs
- // later in the instruction. In particular, consider 'op V1, V2'.
- // If V1 is available in physreg R0, we would choose to reuse it
- // here, instead of reloading it into the register the allocator
- // indicated (say R1). However, V2 might have to be reloaded
- // later, and it might indicate that it needs to live in R0. When
- // this occurs, we need to have information available that
- // indicates it is safe to use R1 for the reload instead of R0.
- //
- // To further complicate matters, we might conflict with an alias,
- // or R0 and R1 might not be compatible with each other. In this
- // case, we actually insert a reload for V1 in R1, ensuring that
- // we can get at R0 or its alias.
- ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
- VRM->getPhys(VirtReg), VirtReg);
- if (isTied)
- // Only mark it clobbered if this is a use&def operand.
- ReusedOperands.markClobbered(PhysReg);
- ++NumReused;
-
- if (MI.getOperand(i).isKill() &&
- ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
-
- // The store of this spilled value is potentially dead, but we
- // won't know for certain until we've confirmed that the re-use
- // above is valid, which means waiting until the other operands
- // are processed. For now we just track the spill slot, we'll
- // remove it after the other operands are processed if valid.
-
- PotentialDeadStoreSlots.push_back(ReuseSlot);
- }
-
- // Mark is isKill if it's there no other uses of the same virtual
- // register and it's not a two-address operand. IsKill will be
- // unset if reg is reused.
- if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
- MI.getOperand(i).setIsKill();
- KilledMIRegs.insert(VirtReg);
- }
-
- continue;
- } // CanReuse
-
- // Otherwise we have a situation where we have a two-address instruction
- // whose mod/ref operand needs to be reloaded. This reload is already
- // available in some register "PhysReg", but if we used PhysReg as the
- // operand to our 2-addr instruction, the instruction would modify
- // PhysReg. This isn't cool if something later uses PhysReg and expects
- // to get its initial value.
- //
- // To avoid this problem, and to avoid doing a load right after a store,
- // we emit a copy from PhysReg into the designated register for this
- // operand.
- //
- // This case also applies to an earlyclobber'd PhysReg.
- unsigned DesignatedReg = VRM->getPhys(VirtReg);
- assert(DesignatedReg && "Must map virtreg to physreg!");
-
- // Note that, if we reused a register for a previous operand, the
- // register we want to reload into might not actually be
- // available. If this occurs, use the register indicated by the
- // reuser.
- if (ReusedOperands.hasReuses())
- DesignatedReg = ReusedOperands.
- GetRegForReload(VirtReg, DesignatedReg, &MI, Spills,
- MaybeDeadStores, RegKills, KillOps, *VRM);
-
- // If the mapped designated register is actually the physreg we have
- // incoming, we don't need to inserted a dead copy.
- if (DesignatedReg == PhysReg) {
- // If this stack slot value is already available, reuse it!
- if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
- DEBUG(dbgs() << "Reusing RM#"
- << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
- DEBUG(dbgs() << " from physreg " << TRI->getName(PhysReg)
- << " for vreg" << VirtReg
- << " instead of reloading into same physreg.\n");
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
- ReusedOperands.markClobbered(RReg);
- ++NumReused;
- continue;
- }
-
- MRI->setPhysRegUsed(DesignatedReg);
- ReusedOperands.markClobbered(DesignatedReg);
-
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
- SSorRMId, TII, MF);
- MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI.getDebugLoc(),
- TII->get(TargetOpcode::COPY),
- DesignatedReg).addReg(PhysReg);
- CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
- UpdateKills(*CopyMI, TRI, RegKills, KillOps);
-
- // This invalidates DesignatedReg.
- Spills.ClobberPhysReg(DesignatedReg);
-
- Spills.addAvailable(ReuseSlot, DesignatedReg);
- unsigned RReg =
- SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
- DEBUG(dbgs() << '\t' << *prior(MII));
- ++NumReused;
- continue;
- } // if (PhysReg)
-
- // Otherwise, reload it and remember that we have it.
- PhysReg = VRM->getPhys(VirtReg);
- assert(PhysReg && "Must map virtreg to physreg!");
-
- // Note that, if we reused a register for a previous operand, the
- // register we want to reload into might not actually be
- // available. If this occurs, use the register indicated by the
- // reuser.
- if (ReusedOperands.hasReuses())
- PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
- Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
-
- MRI->setPhysRegUsed(PhysReg);
- ReusedOperands.markClobbered(PhysReg);
- if (AvoidReload)
- ++NumAvoided;
- else {
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, DoReMat,
- SSorRMId, TII, MF);
-
- if (DoReMat) {
- ReMaterialize(*MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, *VRM);
- } else {
- const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
- TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SSorRMId, RC,TRI);
- MachineInstr *LoadMI = prior(InsertLoc);
- VRM->addSpillSlotUse(SSorRMId, LoadMI);
- ++NumLoads;
- DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
- }
- // This invalidates PhysReg.
- Spills.ClobberPhysReg(PhysReg);
-
- // Any stores to this stack slot are not dead anymore.
- if (!DoReMat)
- MaybeDeadStores[SSorRMId] = NULL;
- Spills.addAvailable(SSorRMId, PhysReg);
- // Assumes this is the last use. IsKill will be unset if reg is reused
- // unless it's a two-address operand.
- if (!MI.isRegTiedToDefOperand(i) &&
- KilledMIRegs.count(VirtReg) == 0) {
- MI.getOperand(i).setIsKill();
- KilledMIRegs.insert(VirtReg);
- }
-
- UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
- DEBUG(dbgs() << '\t' << *prior(InsertLoc));
- }
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
- }
-
- // Ok - now we can remove stores that have been confirmed dead.
- for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
- // This was the last use and the spilled value is still available
- // for reuse. That means the spill was unnecessary!
- int PDSSlot = PotentialDeadStoreSlots[j];
- MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
- if (DeadStore) {
- DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
- InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(DeadStore);
- MBB->erase(DeadStore);
- MaybeDeadStores[PDSSlot] = NULL;
- ++NumDSE;
- }
- }
+ ProcessUses(MI, Spills, MaybeDeadStores, RegKills, ReusedOperands, KillOps);
DEBUG(dbgs() << '\t' << MI);
@@ -2288,14 +2345,13 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
BackTracked = true;
} else {
DEBUG(dbgs() << "Removing now-noop copy: " << MI);
- // Unset last kill since it's being reused.
- InvalidateKill(InReg, TRI, RegKills, KillOps);
+ // InvalidateKills resurrects any prior kill of the copy's source
+ // allowing the source reg to be reused in place of the copy.
Spills.disallowClobberPhysReg(InReg);
}
InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(&MI);
- MBB->erase(&MI);
+ EraseInstr(&MI);
Erased = true;
goto ProcessNextInst;
}
@@ -2306,8 +2362,7 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)){
MBB->insert(MII, NewMIs[0]);
InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(&MI);
- MBB->erase(&MI);
+ EraseInstr(&MI);
Erased = true;
--NextMII; // backtrack to the unfolded instruction.
BackTracked = true;
@@ -2343,8 +2398,7 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
MBB->insert(MII, NewStore);
VRM->addSpillSlotUse(SS, NewStore);
InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(&MI);
- MBB->erase(&MI);
+ EraseInstr(&MI);
Erased = true;
--NextMII;
--NextMII; // backtrack to the unfolded instruction.
@@ -2359,8 +2413,7 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
// If we get here, the store is dead, nuke it now.
DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(DeadStore);
- MBB->erase(DeadStore);
+ EraseInstr(DeadStore);
if (!NewStore)
++NumDSE;
}
@@ -2437,8 +2490,7 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
// Last def is now dead.
TransferDeadness(MI.getOperand(1).getReg(), RegKills, KillOps);
}
- VRM->RemoveMachineInstrFromMaps(&MI);
- MBB->erase(&MI);
+ EraseInstr(&MI);
Erased = true;
Spills.disallowClobberPhysReg(VirtReg);
goto ProcessNextInst;
@@ -2514,8 +2566,7 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
++NumDCE;
DEBUG(dbgs() << "Removing now-noop copy: " << MI);
InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(&MI);
- MBB->erase(&MI);
+ EraseInstr(&MI);
Erased = true;
UpdateKills(*LastStore, TRI, RegKills, KillOps);
goto ProcessNextInst;
@@ -2526,8 +2577,7 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
// Delete dead instructions without side effects.
if (!Erased && !BackTracked && isSafeToDelete(MI)) {
InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM->RemoveMachineInstrFromMaps(&MI);
- MBB->erase(&MI);
+ EraseInstr(&MI);
Erased = true;
}
if (!Erased)
diff --git a/contrib/llvm/lib/CompilerDriver/Action.cpp b/contrib/llvm/lib/CompilerDriver/Action.cpp
index 0be8049..a8d625c 100644
--- a/contrib/llvm/lib/CompilerDriver/Action.cpp
+++ b/contrib/llvm/lib/CompilerDriver/Action.cpp
@@ -14,11 +14,12 @@
#include "llvm/CompilerDriver/Action.h"
#include "llvm/CompilerDriver/BuiltinOptions.h"
#include "llvm/CompilerDriver/Error.h"
+#include "llvm/CompilerDriver/Main.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/SystemUtils.h"
-#include "llvm/System/Program.h"
-#include "llvm/System/TimeValue.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/TimeValue.h"
#include <stdexcept>
#include <string>
@@ -28,7 +29,6 @@ using namespace llvmc;
namespace llvmc {
-extern int Main(int argc, char** argv);
extern const char* ProgramName;
}
@@ -53,15 +53,19 @@ namespace {
#endif
}
- int ExecuteProgram (const std::string& name,
- const StrVector& args) {
- sys::Path prog = sys::Program::FindProgramByName(name);
+ int ExecuteProgram (const std::string& name, const StrVector& args) {
+ sys::Path prog(name);
- if (prog.isEmpty()) {
- prog = FindExecutable(name, ProgramName, (void *)(intptr_t)&Main);
- if (prog.isEmpty()) {
- PrintError("Can't find program '" + name + "'");
- return -1;
+ if (sys::path::is_relative(prog.str())) {
+ prog = PrependMainExecutablePath(name, ProgramName,
+ (void *)(intptr_t)&Main);
+
+ if (!prog.canExecute()) {
+ prog = sys::Program::FindProgramByName(name);
+ if (prog.isEmpty()) {
+ PrintError("Can't find program '" + name + "'");
+ return -1;
+ }
}
}
if (!prog.canExecute()) {
diff --git a/contrib/llvm/lib/CompilerDriver/CompilationGraph.cpp b/contrib/llvm/lib/CompilerDriver/CompilationGraph.cpp
index d0c0e15..33c6566 100644
--- a/contrib/llvm/lib/CompilerDriver/CompilationGraph.cpp
+++ b/contrib/llvm/lib/CompilerDriver/CompilationGraph.cpp
@@ -32,7 +32,8 @@ using namespace llvmc;
namespace llvmc {
const std::string* LanguageMap::GetLanguage(const sys::Path& File) const {
- StringRef suf = File.getSuffix();
+ // Remove the '.'.
+ StringRef suf = sys::path::extension(File.str()).substr(1);
LanguageMap::const_iterator Lang =
this->find(suf.empty() ? "*empty*" : suf);
if (Lang == this->end()) {
@@ -218,10 +219,11 @@ FindToolChain(const sys::Path& In, const std::string* ForceLanguage,
InputLanguagesSet& InLangs, const LanguageMap& LangMap) const {
// Determine the input language.
- const std::string* InLang = LangMap.GetLanguage(In);
+ const std::string* InLang = (ForceLanguage ? ForceLanguage
+ : LangMap.GetLanguage(In));
if (InLang == 0)
return 0;
- const std::string& InLanguage = (ForceLanguage ? *ForceLanguage : *InLang);
+ const std::string& InLanguage = *InLang;
// Add the current input language to the input language set.
InLangs.insert(InLanguage);
@@ -439,13 +441,17 @@ int CompilationGraph::CheckLanguageNames() const {
continue;
}
- const char* OutLang = N1.ToolPtr->OutputLanguage();
+ const char** OutLangs = N1.ToolPtr->OutputLanguages();
const char** InLangs = N2->ToolPtr->InputLanguages();
bool eq = false;
- for (;*InLangs; ++InLangs) {
- if (std::strcmp(OutLang, *InLangs) == 0) {
- eq = true;
- break;
+ const char* OutLang = 0;
+ for (;*OutLangs; ++OutLangs) {
+ OutLang = *OutLangs;
+ for (;*InLangs; ++InLangs) {
+ if (std::strcmp(OutLang, *InLangs) == 0) {
+ eq = true;
+ break;
+ }
}
}
@@ -480,7 +486,7 @@ int CompilationGraph::CheckMultipleDefaultEdges() const {
for (const_nodes_iterator B = this->NodesMap.begin(),
E = this->NodesMap.end(); B != E; ++B) {
const Node& N = B->second;
- int MaxWeight = 0;
+ int MaxWeight = -1024;
// Ignore the root node.
if (!N.ToolPtr)
@@ -572,6 +578,26 @@ int CompilationGraph::Check () {
// Code related to graph visualization.
+namespace {
+
+std::string SquashStrArray (const char** StrArr) {
+ std::string ret;
+
+ for (; *StrArr; ++StrArr) {
+ if (*(StrArr + 1)) {
+ ret += *StrArr;
+ ret += ", ";
+ }
+ else {
+ ret += *StrArr;
+ }
+ }
+
+ return ret;
+}
+
+} // End anonymous namespace.
+
namespace llvm {
template <>
struct DOTGraphTraits<llvmc::CompilationGraph*>
@@ -586,7 +612,8 @@ namespace llvm {
if (N->ToolPtr->IsJoin())
return N->Name() + "\n (join" +
(N->HasChildren() ? ")"
- : std::string(": ") + N->ToolPtr->OutputLanguage() + ')');
+ : std::string(": ") +
+ SquashStrArray(N->ToolPtr->OutputLanguages()) + ')');
else
return N->Name();
else
@@ -596,28 +623,15 @@ namespace llvm {
template<typename EdgeIter>
static std::string getEdgeSourceLabel(const Node* N, EdgeIter I) {
if (N->ToolPtr) {
- return N->ToolPtr->OutputLanguage();
+ return SquashStrArray(N->ToolPtr->OutputLanguages());
}
else {
- const char** InLangs = I->ToolPtr->InputLanguages();
- std::string ret;
-
- for (; *InLangs; ++InLangs) {
- if (*(InLangs + 1)) {
- ret += *InLangs;
- ret += ", ";
- }
- else {
- ret += *InLangs;
- }
- }
-
- return ret;
+ return SquashStrArray(I->ToolPtr->InputLanguages());
}
}
};
-}
+} // End namespace llvm
int CompilationGraph::writeGraph(const std::string& OutputFilename) {
std::string ErrorInfo;
diff --git a/contrib/llvm/lib/CompilerDriver/Main.cpp b/contrib/llvm/lib/CompilerDriver/Main.cpp
index 0a6613a..7120027 100644
--- a/contrib/llvm/lib/CompilerDriver/Main.cpp
+++ b/contrib/llvm/lib/CompilerDriver/Main.cpp
@@ -16,8 +16,9 @@
#include "llvm/CompilerDriver/CompilationGraph.h"
#include "llvm/CompilerDriver/Error.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include <sstream>
#include <string>
@@ -43,15 +44,15 @@ namespace {
return 0;
}
else if (SaveTemps == SaveTempsEnum::Obj && !OutputFilename.empty()) {
- tempDir = OutputFilename;
- tempDir = tempDir.getDirname();
+ tempDir = sys::path::parent_path(OutputFilename);
}
else {
// SaveTemps == Cwd --> use current dir (leave tempDir empty).
return 0;
}
- if (!tempDir.exists()) {
+ bool Exists;
+ if (llvm::sys::fs::exists(tempDir.str(), Exists) || !Exists) {
std::string ErrMsg;
if (tempDir.createDirectoryOnDisk(true, &ErrMsg)) {
PrintError(ErrMsg);
diff --git a/contrib/llvm/lib/CompilerDriver/Tool.cpp b/contrib/llvm/lib/CompilerDriver/Tool.cpp
index c8488b2..876759a 100644
--- a/contrib/llvm/lib/CompilerDriver/Tool.cpp
+++ b/contrib/llvm/lib/CompilerDriver/Tool.cpp
@@ -15,7 +15,7 @@
#include "llvm/CompilerDriver/Tool.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include <algorithm>
@@ -61,7 +61,7 @@ sys::Path Tool::OutFilename(const sys::Path& In,
Out.appendSuffix(OutputSuffix);
}
else {
- Out.set(In.getBasename());
+ Out.set(sys::path::stem(In.str()));
Out.appendSuffix(OutputSuffix);
}
}
@@ -69,7 +69,7 @@ sys::Path Tool::OutFilename(const sys::Path& In,
if (IsJoin())
Out = MakeTempFile(TempDir, "tmp", OutputSuffix);
else
- Out = MakeTempFile(TempDir, In.getBasename(), OutputSuffix);
+ Out = MakeTempFile(TempDir, sys::path::stem(In.str()), OutputSuffix);
}
return Out;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
index be7f1f5..f286975 100644
--- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -19,14 +19,15 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/DynamicLibrary.h"
-#include "llvm/System/Host.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Host.h"
#include "llvm/Target/TargetData.h"
#include <cmath>
#include <cstring>
@@ -45,14 +46,24 @@ ExecutionEngine *(*ExecutionEngine::JITCtor)(
StringRef MArch,
StringRef MCPU,
const SmallVectorImpl<std::string>& MAttrs) = 0;
+ExecutionEngine *(*ExecutionEngine::MCJITCtor)(
+ Module *M,
+ std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel,
+ bool GVsWithCode,
+ CodeModel::Model CMM,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs) = 0;
ExecutionEngine *(*ExecutionEngine::InterpCtor)(Module *M,
std::string *ErrorStr) = 0;
-ExecutionEngine::EERegisterFn ExecutionEngine::ExceptionTableRegister = 0;
-
ExecutionEngine::ExecutionEngine(Module *M)
: EEState(*this),
- LazyFunctionCreator(0) {
+ LazyFunctionCreator(0),
+ ExceptionTableRegister(0),
+ ExceptionTableDeregister(0) {
CompilingLazily = false;
GVCompilationDisabled = false;
SymbolSearchingDisabled = false;
@@ -66,16 +77,25 @@ ExecutionEngine::~ExecutionEngine() {
delete Modules[i];
}
+void ExecutionEngine::DeregisterAllTables() {
+ if (ExceptionTableDeregister) {
+ for (std::vector<void*>::iterator it = AllExceptionTables.begin(),
+ ie = AllExceptionTables.end(); it != ie; ++it)
+ ExceptionTableDeregister(*it);
+ AllExceptionTables.clear();
+ }
+}
+
namespace {
-// This class automatically deletes the memory block when the GlobalVariable is
-// destroyed.
+/// \brief Helper class which uses a value handler to automatically deletes the
+/// memory block when the GlobalVariable is destroyed.
class GVMemoryBlock : public CallbackVH {
GVMemoryBlock(const GlobalVariable *GV)
: CallbackVH(const_cast<GlobalVariable*>(GV)) {}
public:
- // Returns the address the GlobalVariable should be written into. The
- // GVMemoryBlock object prefixes that.
+ /// \brief Returns the address the GlobalVariable should be written into. The
+ /// GVMemoryBlock object prefixes that.
static char *Create(const GlobalVariable *GV, const TargetData& TD) {
const Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
@@ -97,13 +117,12 @@ public:
};
} // anonymous namespace
-char* ExecutionEngine::getMemoryForGV(const GlobalVariable* GV) {
+char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
return GVMemoryBlock::Create(GV, *getTargetData());
}
-/// removeModule - Remove a Module from the list of modules.
bool ExecutionEngine::removeModule(Module *M) {
- for(SmallVector<Module *, 1>::iterator I = Modules.begin(),
+ for(SmallVector<Module *, 1>::iterator I = Modules.begin(),
E = Modules.end(); I != E; ++I) {
Module *Found = *I;
if (Found == M) {
@@ -115,9 +134,6 @@ bool ExecutionEngine::removeModule(Module *M) {
return false;
}
-/// FindFunctionNamed - Search all of the active modules to find the one that
-/// defines FnName. This is very slow operation and shouldn't be used for
-/// general code.
Function *ExecutionEngine::FindFunctionNamed(const char *FnName) {
for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
if (Function *F = Modules[i]->getFunction(FnName))
@@ -127,10 +143,13 @@ Function *ExecutionEngine::FindFunctionNamed(const char *FnName) {
}
-void *ExecutionEngineState::RemoveMapping(
- const MutexGuard &, const GlobalValue *ToUnmap) {
+void *ExecutionEngineState::RemoveMapping(const MutexGuard &,
+ const GlobalValue *ToUnmap) {
GlobalAddressMapTy::iterator I = GlobalAddressMap.find(ToUnmap);
void *OldVal;
+
+ // FIXME: This is silly, we shouldn't end up with a mapping -> 0 in the
+ // GlobalAddressMap.
if (I == GlobalAddressMap.end())
OldVal = 0;
else {
@@ -142,21 +161,16 @@ void *ExecutionEngineState::RemoveMapping(
return OldVal;
}
-/// addGlobalMapping - Tell the execution engine that the specified global is
-/// at the specified location. This is used internally as functions are JIT'd
-/// and as global variables are laid out in memory. It can and should also be
-/// used by clients of the EE that want to have an LLVM global overlay
-/// existing data in memory.
void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
MutexGuard locked(lock);
- DEBUG(dbgs() << "JIT: Map \'" << GV->getName()
+ DEBUG(dbgs() << "JIT: Map \'" << GV->getName()
<< "\' to [" << Addr << "]\n";);
void *&CurVal = EEState.getGlobalAddressMap(locked)[GV];
assert((CurVal == 0 || Addr == 0) && "GlobalMapping already established!");
CurVal = Addr;
-
- // If we are using the reverse mapping, add it too
+
+ // If we are using the reverse mapping, add it too.
if (!EEState.getGlobalAddressReverseMap(locked).empty()) {
AssertingVH<const GlobalValue> &V =
EEState.getGlobalAddressReverseMap(locked)[Addr];
@@ -165,32 +179,23 @@ void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
}
}
-/// clearAllGlobalMappings - Clear all global mappings and start over again
-/// use in dynamic compilation scenarios when you want to move globals
void ExecutionEngine::clearAllGlobalMappings() {
MutexGuard locked(lock);
-
+
EEState.getGlobalAddressMap(locked).clear();
EEState.getGlobalAddressReverseMap(locked).clear();
}
-/// clearGlobalMappingsFromModule - Clear all global mappings that came from a
-/// particular module, because it has been removed from the JIT.
void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) {
MutexGuard locked(lock);
-
- for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI) {
+
+ for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI)
EEState.RemoveMapping(locked, FI);
- }
- for (Module::global_iterator GI = M->global_begin(), GE = M->global_end();
- GI != GE; ++GI) {
+ for (Module::global_iterator GI = M->global_begin(), GE = M->global_end();
+ GI != GE; ++GI)
EEState.RemoveMapping(locked, GI);
- }
}
-/// updateGlobalMapping - Replace an existing mapping for GV with a new
-/// address. This updates both maps as required. If "Addr" is null, the
-/// entry for the global is removed from the mappings.
void *ExecutionEngine::updateGlobalMapping(const GlobalValue *GV, void *Addr) {
MutexGuard locked(lock);
@@ -198,18 +203,17 @@ void *ExecutionEngine::updateGlobalMapping(const GlobalValue *GV, void *Addr) {
EEState.getGlobalAddressMap(locked);
// Deleting from the mapping?
- if (Addr == 0) {
+ if (Addr == 0)
return EEState.RemoveMapping(locked, GV);
- }
-
+
void *&CurVal = Map[GV];
void *OldVal = CurVal;
if (CurVal && !EEState.getGlobalAddressReverseMap(locked).empty())
EEState.getGlobalAddressReverseMap(locked).erase(CurVal);
CurVal = Addr;
-
- // If we are using the reverse mapping, add it too
+
+ // If we are using the reverse mapping, add it too.
if (!EEState.getGlobalAddressReverseMap(locked).empty()) {
AssertingVH<const GlobalValue> &V =
EEState.getGlobalAddressReverseMap(locked)[Addr];
@@ -219,20 +223,14 @@ void *ExecutionEngine::updateGlobalMapping(const GlobalValue *GV, void *Addr) {
return OldVal;
}
-/// getPointerToGlobalIfAvailable - This returns the address of the specified
-/// global value if it is has already been codegen'd, otherwise it returns null.
-///
void *ExecutionEngine::getPointerToGlobalIfAvailable(const GlobalValue *GV) {
MutexGuard locked(lock);
-
+
ExecutionEngineState::GlobalAddressMapTy::iterator I =
EEState.getGlobalAddressMap(locked).find(GV);
return I != EEState.getGlobalAddressMap(locked).end() ? I->second : 0;
}
-/// getGlobalValueAtAddress - Return the LLVM global value object that starts
-/// at the specified address.
-///
const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
MutexGuard locked(lock);
@@ -241,8 +239,8 @@ const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
for (ExecutionEngineState::GlobalAddressMapTy::iterator
I = EEState.getGlobalAddressMap(locked).begin(),
E = EEState.getGlobalAddressMap(locked).end(); I != E; ++I)
- EEState.getGlobalAddressReverseMap(locked).insert(std::make_pair(I->second,
- I->first));
+ EEState.getGlobalAddressReverseMap(locked).insert(std::make_pair(
+ I->second, I->first));
}
std::map<void *, AssertingVH<const GlobalValue> >::iterator I =
@@ -301,54 +299,50 @@ void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
return Array;
}
-
-/// runStaticConstructorsDestructors - This method is used to execute all of
-/// the static constructors or destructors for a module, depending on the
-/// value of isDtors.
void ExecutionEngine::runStaticConstructorsDestructors(Module *module,
bool isDtors) {
const char *Name = isDtors ? "llvm.global_dtors" : "llvm.global_ctors";
-
- // Execute global ctors/dtors for each module in the program.
-
- GlobalVariable *GV = module->getNamedGlobal(Name);
-
- // If this global has internal linkage, or if it has a use, then it must be
- // an old-style (llvmgcc3) static ctor with __main linked in and in use. If
- // this is the case, don't execute any of the global ctors, __main will do
- // it.
- if (!GV || GV->isDeclaration() || GV->hasLocalLinkage()) return;
-
- // Should be an array of '{ int, void ()* }' structs. The first value is
- // the init priority, which we ignore.
- ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
- if (!InitList) return;
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
- if (ConstantStruct *CS =
- dyn_cast<ConstantStruct>(InitList->getOperand(i))) {
- if (CS->getNumOperands() != 2) return; // Not array of 2-element structs.
-
- Constant *FP = CS->getOperand(1);
- if (FP->isNullValue())
- break; // Found a null terminator, exit.
-
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP))
- if (CE->isCast())
- FP = CE->getOperand(0);
- if (Function *F = dyn_cast<Function>(FP)) {
- // Execute the ctor/dtor function!
- runFunction(F, std::vector<GenericValue>());
- }
- }
+ GlobalVariable *GV = module->getNamedGlobal(Name);
+
+ // If this global has internal linkage, or if it has a use, then it must be
+ // an old-style (llvmgcc3) static ctor with __main linked in and in use. If
+ // this is the case, don't execute any of the global ctors, __main will do
+ // it.
+ if (!GV || GV->isDeclaration() || GV->hasLocalLinkage()) return;
+
+ // Should be an array of '{ int, void ()* }' structs. The first value is
+ // the init priority, which we ignore.
+ ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!InitList) return;
+ for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
+ ConstantStruct *CS =
+ dyn_cast<ConstantStruct>(InitList->getOperand(i));
+ if (!CS) continue;
+ if (CS->getNumOperands() != 2) return; // Not array of 2-element structs.
+
+ Constant *FP = CS->getOperand(1);
+ if (FP->isNullValue())
+ break; // Found a null terminator, exit.
+
+ // Strip off constant expression casts.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP))
+ if (CE->isCast())
+ FP = CE->getOperand(0);
+
+ // Execute the ctor/dtor function!
+ if (Function *F = dyn_cast<Function>(FP))
+ runFunction(F, std::vector<GenericValue>());
+
+ // FIXME: It is marginally lame that we just do nothing here if we see an
+ // entry we don't recognize. It might not be unreasonable for the verifier
+ // to not even allow this and just assert here.
+ }
}
-/// runStaticConstructorsDestructors - This method is used to execute all of
-/// the static constructors or destructors for a program, depending on the
-/// value of isDtors.
void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
// Execute global ctors/dtors for each module in the program.
- for (unsigned m = 0, e = Modules.size(); m != e; ++m)
- runStaticConstructorsDestructors(Modules[m], isDtors);
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i)
+ runStaticConstructorsDestructors(Modules[i], isDtors);
}
#ifndef NDEBUG
@@ -362,9 +356,6 @@ static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
}
#endif
-/// runFunctionAsMain - This is a helper function which wraps runFunction to
-/// handle the common task of starting up main with the specified argc, argv,
-/// and envp parameters.
int ExecutionEngine::runFunctionAsMain(Function *Fn,
const std::vector<std::string> &argv,
const char * const * envp) {
@@ -376,32 +367,20 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn,
unsigned NumArgs = Fn->getFunctionType()->getNumParams();
const FunctionType *FTy = Fn->getFunctionType();
const Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
- switch (NumArgs) {
- case 3:
- if (FTy->getParamType(2) != PPInt8Ty) {
- report_fatal_error("Invalid type for third argument of main() supplied");
- }
- // FALLS THROUGH
- case 2:
- if (FTy->getParamType(1) != PPInt8Ty) {
- report_fatal_error("Invalid type for second argument of main() supplied");
- }
- // FALLS THROUGH
- case 1:
- if (!FTy->getParamType(0)->isIntegerTy(32)) {
- report_fatal_error("Invalid type for first argument of main() supplied");
- }
- // FALLS THROUGH
- case 0:
- if (!FTy->getReturnType()->isIntegerTy() &&
- !FTy->getReturnType()->isVoidTy()) {
- report_fatal_error("Invalid return type of main() supplied");
- }
- break;
- default:
- report_fatal_error("Invalid number of arguments of main() supplied");
- }
-
+
+ // Check the argument types.
+ if (NumArgs > 3)
+ report_fatal_error("Invalid number of arguments of main() supplied");
+ if (NumArgs >= 3 && FTy->getParamType(2) != PPInt8Ty)
+ report_fatal_error("Invalid type for third argument of main() supplied");
+ if (NumArgs >= 2 && FTy->getParamType(1) != PPInt8Ty)
+ report_fatal_error("Invalid type for second argument of main() supplied");
+ if (NumArgs >= 1 && !FTy->getParamType(0)->isIntegerTy(32))
+ report_fatal_error("Invalid type for first argument of main() supplied");
+ if (!FTy->getReturnType()->isIntegerTy() &&
+ !FTy->getReturnType()->isVoidTy())
+ report_fatal_error("Invalid return type of main() supplied");
+
ArgvArray CArgv;
ArgvArray CEnv;
if (NumArgs) {
@@ -420,13 +399,10 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn,
}
}
}
+
return runFunction(Fn, GVArgs).IntVal.getZExtValue();
}
-/// If possible, create a JIT, unless the caller specifically requests an
-/// Interpreter or there's an error. If even an Interpreter cannot be created,
-/// NULL is returned.
-///
ExecutionEngine *ExecutionEngine::create(Module *M,
bool ForceInterpreter,
std::string *ErrorStr,
@@ -464,7 +440,13 @@ ExecutionEngine *EngineBuilder::create() {
// Unless the interpreter was explicitly selected or the JIT is not linked,
// try making a JIT.
if (WhichEngine & EngineKind::JIT) {
- if (ExecutionEngine::JITCtor) {
+ if (UseMCJIT && ExecutionEngine::MCJITCtor) {
+ ExecutionEngine *EE =
+ ExecutionEngine::MCJITCtor(M, ErrorStr, JMM, OptLevel,
+ AllocateGVsWithCode, CMModel,
+ MArch, MCPU, MAttrs);
+ if (EE) return EE;
+ } else if (ExecutionEngine::JITCtor) {
ExecutionEngine *EE =
ExecutionEngine::JITCtor(M, ErrorStr, JMM, OptLevel,
AllocateGVsWithCode, CMModel,
@@ -486,21 +468,18 @@ ExecutionEngine *EngineBuilder::create() {
if ((WhichEngine & EngineKind::JIT) && ExecutionEngine::JITCtor == 0) {
if (ErrorStr)
*ErrorStr = "JIT has not been linked in.";
- }
+ }
+
return 0;
}
-/// getPointerToGlobal - This returns the address of the specified global
-/// value. This may involve code generation if it's a function.
-///
void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
if (Function *F = const_cast<Function*>(dyn_cast<Function>(GV)))
return getPointerToFunction(F);
MutexGuard locked(lock);
- void *p = EEState.getGlobalAddressMap(locked)[GV];
- if (p)
- return p;
+ if (void *P = EEState.getGlobalAddressMap(locked)[GV])
+ return P;
// Global variable might have been added since interpreter started.
if (GlobalVariable *GVar =
@@ -508,12 +487,12 @@ void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
EmitGlobalVariable(GVar);
else
llvm_unreachable("Global hasn't had an address allocated yet!");
+
return EEState.getGlobalAddressMap(locked)[GV];
}
-/// This function converts a Constant* into a GenericValue. The interesting
-/// part is if C is a ConstantExpr.
-/// @brief Get a GenericValue for a Constant*
+/// \brief Converts a Constant* into a GenericValue, including handling of
+/// ConstantExpr values.
GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
// If its undefined, return the garbage.
if (isa<UndefValue>(C)) {
@@ -533,12 +512,12 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
return Result;
}
- // If the value is a ConstantExpr
+ // Otherwise, if the value is a ConstantExpr...
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
Constant *Op0 = CE->getOperand(0);
switch (CE->getOpcode()) {
case Instruction::GetElementPtr: {
- // Compute the index
+ // Compute the index
GenericValue Result = getConstantValue(Op0);
SmallVector<Value*, 8> Indices(CE->op_begin()+1, CE->op_end());
uint64_t Offset =
@@ -585,9 +564,8 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
else if (CE->getType()->isDoubleTy())
GV.DoubleVal = GV.IntVal.roundToDouble();
else if (CE->getType()->isX86_FP80Ty()) {
- const uint64_t zero[] = {0, 0};
- APFloat apf = APFloat(APInt(80, 2, zero));
- (void)apf.convertFromAPInt(GV.IntVal,
+ APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended);
+ (void)apf.convertFromAPInt(GV.IntVal,
false,
APFloat::rmNearestTiesToEven);
GV.IntVal = apf.bitcastToAPInt();
@@ -601,9 +579,8 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
else if (CE->getType()->isDoubleTy())
GV.DoubleVal = GV.IntVal.signedRoundToDouble();
else if (CE->getType()->isX86_FP80Ty()) {
- const uint64_t zero[] = { 0, 0};
- APFloat apf = APFloat(APInt(80, 2, zero));
- (void)apf.convertFromAPInt(GV.IntVal,
+ APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended);
+ (void)apf.convertFromAPInt(GV.IntVal,
true,
APFloat::rmNearestTiesToEven);
GV.IntVal = apf.bitcastToAPInt();
@@ -623,7 +600,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
uint64_t v;
bool ignored;
(void)apf.convertToInteger(&v, BitWidth,
- CE->getOpcode()==Instruction::FPToSI,
+ CE->getOpcode()==Instruction::FPToSI,
APFloat::rmTowardZero, &ignored);
GV.IntVal = v; // endian?
}
@@ -656,13 +633,13 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
else if (DestTy->isDoubleTy())
GV.DoubleVal = GV.IntVal.bitsToDouble();
break;
- case Type::FloatTyID:
+ case Type::FloatTyID:
assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
- GV.IntVal.floatToBits(GV.FloatVal);
+ GV.IntVal = APInt::floatToBits(GV.FloatVal);
break;
case Type::DoubleTyID:
assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
- GV.IntVal.doubleToBits(GV.DoubleVal);
+ GV.IntVal = APInt::doubleToBits(GV.DoubleVal);
break;
case Type::PointerTyID:
assert(DestTy->isPointerTy() && "Invalid bitcast");
@@ -712,9 +689,9 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
GV.FloatVal = LHS.FloatVal - RHS.FloatVal; break;
case Instruction::FMul:
GV.FloatVal = LHS.FloatVal * RHS.FloatVal; break;
- case Instruction::FDiv:
+ case Instruction::FDiv:
GV.FloatVal = LHS.FloatVal / RHS.FloatVal; break;
- case Instruction::FRem:
+ case Instruction::FRem:
GV.FloatVal = std::fmod(LHS.FloatVal,RHS.FloatVal); break;
}
break;
@@ -727,9 +704,9 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
GV.DoubleVal = LHS.DoubleVal - RHS.DoubleVal; break;
case Instruction::FMul:
GV.DoubleVal = LHS.DoubleVal * RHS.DoubleVal; break;
- case Instruction::FDiv:
+ case Instruction::FDiv:
GV.DoubleVal = LHS.DoubleVal / RHS.DoubleVal; break;
- case Instruction::FRem:
+ case Instruction::FRem:
GV.DoubleVal = std::fmod(LHS.DoubleVal,RHS.DoubleVal); break;
}
break;
@@ -738,7 +715,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
case Type::FP128TyID: {
APFloat apfLHS = APFloat(LHS.IntVal);
switch (CE->getOpcode()) {
- default: llvm_unreachable("Invalid long double opcode");llvm_unreachable(0);
+ default: llvm_unreachable("Invalid long double opcode");
case Instruction::FAdd:
apfLHS.add(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven);
GV.IntVal = apfLHS.bitcastToAPInt();
@@ -751,11 +728,11 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
apfLHS.multiply(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven);
GV.IntVal = apfLHS.bitcastToAPInt();
break;
- case Instruction::FDiv:
+ case Instruction::FDiv:
apfLHS.divide(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven);
GV.IntVal = apfLHS.bitcastToAPInt();
break;
- case Instruction::FRem:
+ case Instruction::FRem:
apfLHS.mod(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven);
GV.IntVal = apfLHS.bitcastToAPInt();
break;
@@ -768,16 +745,18 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
default:
break;
}
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "ConstantExpr not handled: " << *CE;
- report_fatal_error(Msg.str());
+
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "ConstantExpr not handled: " << *CE;
+ report_fatal_error(OS.str());
}
+ // Otherwise, we have a simple constant.
GenericValue Result;
switch (C->getType()->getTypeID()) {
- case Type::FloatTyID:
- Result.FloatVal = cast<ConstantFP>(C)->getValueAPF().convertToFloat();
+ case Type::FloatTyID:
+ Result.FloatVal = cast<ConstantFP>(C)->getValueAPF().convertToFloat();
break;
case Type::DoubleTyID:
Result.DoubleVal = cast<ConstantFP>(C)->getValueAPF().convertToDouble();
@@ -804,11 +783,12 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
llvm_unreachable("Unknown constant pointer type!");
break;
default:
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
- report_fatal_error(Msg.str());
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "ERROR: Constant unimplemented for type: " << *C->getType();
+ report_fatal_error(OS.str());
}
+
return Result;
}
@@ -819,11 +799,11 @@ static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
uint8_t *Src = (uint8_t *)IntVal.getRawData();
- if (sys::isLittleEndianHost())
+ if (sys::isLittleEndianHost()) {
// Little-endian host - the source is ordered from LSB to MSB. Order the
// destination from LSB to MSB: Do a straight copy.
memcpy(Dst, Src, StoreBytes);
- else {
+ } else {
// Big-endian host - the source is an array of 64 bit words ordered from
// LSW to MSW. Each word is ordered from MSB to LSB. Order the destination
// from MSB to LSB: Reverse the word order, but not the bytes in a word.
@@ -838,10 +818,6 @@ static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
}
}
-/// StoreValueToMemory - Stores the data in Val of type Ty at address Ptr. Ptr
-/// is the address of the memory at which to store Val, cast to GenericValue *.
-/// It is not a pointer to a GenericValue containing the address at which to
-/// store Val.
void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
GenericValue *Ptr, const Type *Ty) {
const unsigned StoreBytes = getTargetData()->getTypeStoreSize(Ty);
@@ -932,16 +908,13 @@ void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
break;
}
default:
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Cannot load value of type " << *Ty << "!";
- report_fatal_error(Msg.str());
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "Cannot load value of type " << *Ty << "!";
+ report_fatal_error(OS.str());
}
}
-// InitializeMemory - Recursive function to apply a Constant value into the
-// specified memory location...
-//
void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
DEBUG(dbgs() << "JIT: Initializing " << Addr << " ");
DEBUG(Init->dump());
@@ -974,20 +947,17 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
return;
}
- dbgs() << "Bad Type: " << *Init->getType() << "\n";
+ DEBUG(dbgs() << "Bad Type: " << *Init->getType() << "\n");
llvm_unreachable("Unknown constant type to initialize memory with!");
}
/// EmitGlobals - Emit all of the global variables to memory, storing their
/// addresses into GlobalAddress. This must make sure to copy the contents of
/// their initializers into the memory.
-///
void ExecutionEngine::emitGlobals() {
-
// Loop over all of the global variables in the program, allocating the memory
// to hold them. If there is more than one module, do a prepass over globals
// to figure out how the different modules should link together.
- //
std::map<std::pair<std::string, const Type*>,
const GlobalValue*> LinkedGlobalsMap;
@@ -1000,8 +970,8 @@ void ExecutionEngine::emitGlobals() {
if (GV->hasLocalLinkage() || GV->isDeclaration() ||
GV->hasAppendingLinkage() || !GV->hasName())
continue;// Ignore external globals and globals with internal linkage.
-
- const GlobalValue *&GVEntry =
+
+ const GlobalValue *&GVEntry =
LinkedGlobalsMap[std::make_pair(GV->getName(), GV->getType())];
// If this is the first time we've seen this global, it is the canonical
@@ -1010,13 +980,13 @@ void ExecutionEngine::emitGlobals() {
GVEntry = GV;
continue;
}
-
+
// If the existing global is strong, never replace it.
if (GVEntry->hasExternalLinkage() ||
GVEntry->hasDLLImportLinkage() ||
GVEntry->hasDLLExportLinkage())
continue;
-
+
// Otherwise, we know it's linkonce/weak, replace it if this is a strong
// symbol. FIXME is this right for common?
if (GV->hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
@@ -1024,7 +994,7 @@ void ExecutionEngine::emitGlobals() {
}
}
}
-
+
std::vector<const GlobalValue*> NonCanonicalGlobals;
for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
Module &M = *Modules[m];
@@ -1032,7 +1002,7 @@ void ExecutionEngine::emitGlobals() {
I != E; ++I) {
// In the multi-module case, see what this global maps to.
if (!LinkedGlobalsMap.empty()) {
- if (const GlobalValue *GVEntry =
+ if (const GlobalValue *GVEntry =
LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())]) {
// If something else is the canonical global, ignore this one.
if (GVEntry != &*I) {
@@ -1041,7 +1011,7 @@ void ExecutionEngine::emitGlobals() {
}
}
}
-
+
if (!I->isDeclaration()) {
addGlobalMapping(I, getMemoryForGV(I));
} else {
@@ -1056,7 +1026,7 @@ void ExecutionEngine::emitGlobals() {
}
}
}
-
+
// If there are multiple modules, map the non-canonical globals to their
// canonical location.
if (!NonCanonicalGlobals.empty()) {
@@ -1069,14 +1039,14 @@ void ExecutionEngine::emitGlobals() {
addGlobalMapping(GV, Ptr);
}
}
-
- // Now that all of the globals are set up in memory, loop through them all
+
+ // Now that all of the globals are set up in memory, loop through them all
// and initialize their contents.
for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I) {
if (!I->isDeclaration()) {
if (!LinkedGlobalsMap.empty()) {
- if (const GlobalValue *GVEntry =
+ if (const GlobalValue *GVEntry =
LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())])
if (GVEntry != &*I) // Not the canonical variable.
continue;
@@ -1098,11 +1068,11 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
GA = getMemoryForGV(GV);
addGlobalMapping(GV, GA);
}
-
+
// Don't initialize if it's thread local, let the client do it.
if (!GV->isThreadLocal())
InitializeMemory(GV->getInitializer(), GA);
-
+
const Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
NumInitBytes += (unsigned)GVSize;
@@ -1113,18 +1083,20 @@ ExecutionEngineState::ExecutionEngineState(ExecutionEngine &EE)
: EE(EE), GlobalAddressMap(this) {
}
-sys::Mutex *ExecutionEngineState::AddressMapConfig::getMutex(
- ExecutionEngineState *EES) {
+sys::Mutex *
+ExecutionEngineState::AddressMapConfig::getMutex(ExecutionEngineState *EES) {
return &EES->EE.lock;
}
-void ExecutionEngineState::AddressMapConfig::onDelete(
- ExecutionEngineState *EES, const GlobalValue *Old) {
+
+void ExecutionEngineState::AddressMapConfig::onDelete(ExecutionEngineState *EES,
+ const GlobalValue *Old) {
void *OldVal = EES->GlobalAddressMap.lookup(Old);
EES->GlobalAddressReverseMap.erase(OldVal);
}
-void ExecutionEngineState::AddressMapConfig::onRAUW(
- ExecutionEngineState *, const GlobalValue *, const GlobalValue *) {
+void ExecutionEngineState::AddressMapConfig::onRAUW(ExecutionEngineState *,
+ const GlobalValue *,
+ const GlobalValue *) {
assert(false && "The ExecutionEngine doesn't know how to handle a"
" RAUW on a value it has a global mapping for.");
}
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 59ebe6e..498063b 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -1060,11 +1060,9 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, const Type *DstTy,
Dest.PointerVal = Src.PointerVal;
} else if (DstTy->isIntegerTy()) {
if (SrcTy->isFloatTy()) {
- Dest.IntVal.zext(sizeof(Src.FloatVal) * CHAR_BIT);
- Dest.IntVal.floatToBits(Src.FloatVal);
+ Dest.IntVal = APInt::floatToBits(Src.FloatVal);
} else if (SrcTy->isDoubleTy()) {
- Dest.IntVal.zext(sizeof(Src.DoubleVal) * CHAR_BIT);
- Dest.IntVal.doubleToBits(Src.DoubleVal);
+ Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
} else if (SrcTy->isIntegerTy()) {
Dest.IntVal = Src.IntVal;
} else
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index 57d1260..062256a 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -24,10 +24,10 @@
#include "llvm/Module.h"
#include "llvm/Config/config.h" // Detect libffi
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/ManagedStatic.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include <csignal>
#include <cstdio>
#include <map>
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
index 564e9ab..bfebe3d 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -19,7 +19,7 @@
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/Intercept.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/Intercept.cpp
index 274f816..169e1ba 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/Intercept.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/Intercept.cpp
@@ -17,7 +17,7 @@
#include "JIT.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Config/config.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
index 63125b7..cc76b13 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
@@ -30,7 +30,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MutexGuard.h"
-#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Config/config.h"
using namespace llvm;
@@ -66,8 +66,15 @@ static struct RegisterJIT {
extern "C" void LLVMLinkInJIT() {
}
+// Determine whether we can register EH tables.
+#if (defined(__GNUC__) && !defined(__ARM_EABI__) && \
+ !defined(__USING_SJLJ_EXCEPTIONS__))
+#define HAVE_EHTABLE_SUPPORT 1
+#else
+#define HAVE_EHTABLE_SUPPORT 0
+#endif
-#if defined(__GNUC__) && !defined(__ARM_EABI__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+#if HAVE_EHTABLE_SUPPORT
// libgcc defines the __register_frame function to dynamically register new
// dwarf frames for exception handling. This functionality is not portable
@@ -87,6 +94,7 @@ extern "C" void LLVMLinkInJIT() {
// values of an opaque key, used by libgcc to find dwarf tables.
extern "C" void __register_frame(void*);
+extern "C" void __deregister_frame(void*);
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED <= 1050
# define USE_KEYMGR 1
@@ -190,7 +198,7 @@ void DarwinRegisterFrame(void* FrameBegin) {
}
#endif // __APPLE__
-#endif // __GNUC__
+#endif // HAVE_EHTABLE_SUPPORT
/// createJIT - This is the factory method for creating a JIT for the current
/// machine, it does not fall back to the interpreter. This takes ownership
@@ -306,7 +314,7 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
}
// Register routine for informing unwinding runtime about new EH frames
-#if defined(__GNUC__) && !defined(__ARM_EABI__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+#if HAVE_EHTABLE_SUPPORT
#if USE_KEYMGR
struct LibgccObjectInfo* LOI = (struct LibgccObjectInfo*)
_keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST);
@@ -318,16 +326,21 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
LOI = (LibgccObjectInfo*)calloc(sizeof(struct LibgccObjectInfo), 1);
_keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST, LOI);
InstallExceptionTableRegister(DarwinRegisterFrame);
+ // Not sure about how to deregister on Darwin.
#else
InstallExceptionTableRegister(__register_frame);
+ InstallExceptionTableDeregister(__deregister_frame);
#endif // __APPLE__
-#endif // __GNUC__
+#endif // HAVE_EHTABLE_SUPPORT
// Initialize passes.
PM.doInitialization();
}
JIT::~JIT() {
+ // Unregister all exception tables registered by this JIT.
+ DeregisterAllTables();
+ // Cleanup.
AllJits->Remove(this);
delete jitstate;
delete JCE;
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
index 6e11a3c..3b5acb7 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
@@ -25,7 +25,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include <string>
#include <vector>
@@ -35,7 +35,7 @@ namespace llvm {
extern "C" {
// Debuggers puts a breakpoint in this function.
- DISABLE_INLINE void __jit_debug_register_code() { }
+ LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() { }
// We put information about the JITed function in this global, which the
// debugger reads. Make sure to specify the version statically, because the
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.h b/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.h
index 7e53d78..dce506b 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.h
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.h
@@ -16,7 +16,7 @@
#define LLVM_EXECUTION_ENGINE_JIT_DEBUGREGISTERER_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <string>
// This must be kept in sync with gdb/gdb/jit.h .
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
index 1105bcc..f54ccca 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
@@ -26,7 +26,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
using namespace llvm;
@@ -43,8 +43,9 @@ unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
const TargetMachine& TM = F.getTarget();
TD = TM.getTargetData();
- stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection();
+ stackGrowthDirection = TM.getFrameLowering()->getStackGrowthDirection();
RI = TM.getRegisterInfo();
+ TFI = TM.getFrameLowering();
JCE = &jce;
unsigned char* ExceptionTable = EmitExceptionTable(&F, StartFunction,
@@ -66,7 +67,7 @@ void
JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr,
const std::vector<MachineMove> &Moves) const {
unsigned PointerSize = TD->getPointerSize();
- int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ?
PointerSize : -PointerSize;
MCSymbol *BaseLabel = 0;
@@ -481,7 +482,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
unsigned char*
JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const {
unsigned PointerSize = TD->getPointerSize();
- int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ?
PointerSize : -PointerSize;
unsigned char* StartCommonPtr = (unsigned char*)JCE->getCurrentPCValue();
@@ -523,7 +524,7 @@ JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const {
}
std::vector<MachineMove> Moves;
- RI->getInitialFrameState(Moves);
+ TFI->getInitialFrameState(Moves);
EmitFrameMoves(0, Moves);
JCE->emitAlignmentWithFill(PointerSize, dwarf::DW_CFA_nop);
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
index 3095682..9495697 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
@@ -23,6 +23,7 @@ class MachineFunction;
class MachineModuleInfo;
class MachineMove;
class TargetData;
+class TargetFrameLowering;
class TargetMachine;
class TargetRegisterInfo;
@@ -30,6 +31,7 @@ class JITDwarfEmitter {
const TargetData* TD;
JITCodeEmitter* JCE;
const TargetRegisterInfo* RI;
+ const TargetFrameLowering *TFI;
MachineModuleInfo* MMI;
JIT& Jit;
bool stackGrowthDirection;
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
index 4c0d078..4cd8757 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -42,8 +42,8 @@
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Disassembler.h"
-#include "llvm/System/Memory.h"
+#include "llvm/Support/Disassembler.h"
+#include "llvm/Support/Memory.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
index 653e6f1..eec23ce 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
@@ -22,7 +22,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Memory.h"
+#include "llvm/Support/Memory.h"
#include <vector>
#include <cassert>
#include <climits>
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp
index 1ca084b..670fa7d 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp
@@ -26,7 +26,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Errno.h"
+#include "llvm/Support/Errno.h"
#include "llvm/Config/config.h"
#include <stddef.h>
using namespace llvm;
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/TargetSelect.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/TargetSelect.cpp
index 3349c33..6b7173c 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/TargetSelect.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/TargetSelect.cpp
@@ -18,7 +18,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Host.h"
+#include "llvm/Support/Host.h"
#include "llvm/Target/SubtargetFeature.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegistry.h"
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/CMakeLists.txt b/contrib/llvm/lib/ExecutionEngine/MCJIT/CMakeLists.txt
new file mode 100644
index 0000000..f7ed176
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_llvm_library(LLVMMCJIT
+ MCJIT.cpp
+ TargetSelect.cpp
+ )
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
new file mode 100644
index 0000000..f1e9dab
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -0,0 +1,92 @@
+//===-- JIT.cpp - MC-based Just-in-Time Compiler --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCJIT.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/MCJIT.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/DynamicLibrary.h"
+
+using namespace llvm;
+
+namespace {
+
+static struct RegisterJIT {
+ RegisterJIT() { MCJIT::Register(); }
+} JITRegistrator;
+
+}
+
+extern "C" void LLVMLinkInMCJIT() {
+}
+
+ExecutionEngine *MCJIT::createJIT(Module *M,
+ std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel,
+ bool GVsWithCode,
+ CodeModel::Model CMM,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs) {
+ // Try to register the program as a source of symbols to resolve against.
+ //
+ // FIXME: Don't do this here.
+ sys::DynamicLibrary::LoadLibraryPermanently(0, NULL);
+
+ // Pick a target either via -march or by guessing the native arch.
+ //
+ // FIXME: This should be lifted out of here, it isn't something which should
+ // be part of the JIT policy, rather the burden for this selection should be
+ // pushed to clients.
+ TargetMachine *TM = MCJIT::selectTarget(M, MArch, MCPU, MAttrs, ErrorStr);
+ if (!TM || (ErrorStr && ErrorStr->length() > 0)) return 0;
+ TM->setCodeModel(CMM);
+
+ // If the target supports JIT code generation, create the JIT.
+ if (TargetJITInfo *TJ = TM->getJITInfo())
+ return new MCJIT(M, *TM, *TJ, JMM, OptLevel, GVsWithCode);
+
+ if (ErrorStr)
+ *ErrorStr = "target does not support JIT code generation";
+ return 0;
+}
+
+MCJIT::MCJIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
+ JITMemoryManager *JMM, CodeGenOpt::Level OptLevel,
+ bool AllocateGVsWithCode)
+ : ExecutionEngine(M) {
+}
+
+MCJIT::~MCJIT() {
+}
+
+void *MCJIT::getPointerToBasicBlock(BasicBlock *BB) {
+ report_fatal_error("not yet implemented");
+ return 0;
+}
+
+void *MCJIT::getPointerToFunction(Function *F) {
+ report_fatal_error("not yet implemented");
+ return 0;
+}
+
+void *MCJIT::recompileAndRelinkFunction(Function *F) {
+ report_fatal_error("not yet implemented");
+}
+
+void MCJIT::freeMachineCodeForFunction(Function *F) {
+ report_fatal_error("not yet implemented");
+}
+
+GenericValue MCJIT::runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) {
+ report_fatal_error("not yet implemented");
+ return GenericValue();
+}
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
new file mode 100644
index 0000000..cd1f989
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -0,0 +1,68 @@
+//===-- MCJIT.h - Class definition for the MCJIT ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_MCJIT_H
+#define LLVM_LIB_EXECUTIONENGINE_MCJIT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+
+namespace llvm {
+
+class MCJIT : public ExecutionEngine {
+ MCJIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
+ JITMemoryManager *JMM, CodeGenOpt::Level OptLevel,
+ bool AllocateGVsWithCode);
+public:
+ ~MCJIT();
+
+ /// @name ExecutionEngine interface implementation
+ /// @{
+
+ virtual void *getPointerToBasicBlock(BasicBlock *BB);
+
+ virtual void *getPointerToFunction(Function *F);
+
+ virtual void *recompileAndRelinkFunction(Function *F);
+
+ virtual void freeMachineCodeForFunction(Function *F);
+
+ virtual GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues);
+
+ /// @}
+ /// @name (Private) Registration Interfaces
+ /// @{
+
+ static void Register() {
+ MCJITCtor = createJIT;
+ }
+
+ // FIXME: This routine is scheduled for termination. Do not use it.
+ static TargetMachine *selectTarget(Module *M,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs,
+ std::string *Err);
+
+ static ExecutionEngine *createJIT(Module *M,
+ std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel,
+ bool GVsWithCode,
+ CodeModel::Model CMM,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs);
+
+ // @}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/Makefile b/contrib/llvm/lib/ExecutionEngine/MCJIT/Makefile
new file mode 100644
index 0000000..967efbc
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/Makefile
@@ -0,0 +1,13 @@
+##===- lib/ExecutionEngine/MCJIT/Makefile ------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../..
+LIBRARYNAME = LLVMMCJIT
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/TargetSelect.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/TargetSelect.cpp
new file mode 100644
index 0000000..50f6593
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/TargetSelect.cpp
@@ -0,0 +1,91 @@
+//===-- TargetSelect.cpp - Target Chooser Code ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This just asks the TargetRegistry for the appropriate JIT to use, and allows
+// the user to specify a specific one on the commandline with -march=x. Clients
+// should initialize targets prior to calling createJIT.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCJIT.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Target/SubtargetFeature.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegistry.h"
+using namespace llvm;
+
+/// selectTarget - Pick a target either via -march or by guessing the native
+/// arch. Add any CPU features specified via -mcpu or -mattr.
+TargetMachine *MCJIT::selectTarget(Module *Mod,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs,
+ std::string *ErrorStr) {
+ Triple TheTriple(Mod->getTargetTriple());
+ if (TheTriple.getTriple().empty())
+ TheTriple.setTriple(sys::getHostTriple());
+
+ // Adjust the triple to match what the user requested.
+ const Target *TheTarget = 0;
+ if (!MArch.empty()) {
+ for (TargetRegistry::iterator it = TargetRegistry::begin(),
+ ie = TargetRegistry::end(); it != ie; ++it) {
+ if (MArch == it->getName()) {
+ TheTarget = &*it;
+ break;
+ }
+ }
+
+ if (!TheTarget) {
+ *ErrorStr = "No available targets are compatible with this -march, "
+ "see -version for the available targets.\n";
+ return 0;
+ }
+
+ // Adjust the triple to match (if known), otherwise stick with the
+ // module/host triple.
+ Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
+ if (Type != Triple::UnknownArch)
+ TheTriple.setArch(Type);
+ } else {
+ std::string Error;
+ TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
+ if (TheTarget == 0) {
+ if (ErrorStr)
+ *ErrorStr = Error;
+ return 0;
+ }
+ }
+
+ if (!TheTarget->hasJIT()) {
+ errs() << "WARNING: This target JIT is not designed for the host you are"
+ << " running. If bad things happen, please choose a different "
+ << "-march switch.\n";
+ }
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (!MCPU.empty() || !MAttrs.empty()) {
+ SubtargetFeatures Features;
+ Features.setCPU(MCPU);
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ // Allocate a target...
+ TargetMachine *Target =
+ TheTarget->createTargetMachine(TheTriple.getTriple(), FeaturesStr);
+ assert(Target && "Could not allocate target machine!");
+ return Target;
+}
diff --git a/contrib/llvm/lib/Linker/LinkItems.cpp b/contrib/llvm/lib/Linker/LinkItems.cpp
index 1be2bec..52a0d17 100644
--- a/contrib/llvm/lib/Linker/LinkItems.cpp
+++ b/contrib/llvm/lib/Linker/LinkItems.cpp
@@ -15,9 +15,10 @@
#include "llvm/Linker.h"
#include "llvm/Module.h"
#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/system_error.h"
using namespace llvm;
// LinkItems - This function is the main entry point into linking. It takes a
@@ -160,19 +161,19 @@ bool Linker::LinkInFile(const sys::Path &File, bool &is_native) {
// Check for a file of name "-", which means "read standard input"
if (File.str() == "-") {
std::auto_ptr<Module> M;
- if (MemoryBuffer *Buffer = MemoryBuffer::getSTDIN(&Error)) {
+ OwningPtr<MemoryBuffer> Buffer;
+ error_code ec;
+ if (!(ec = MemoryBuffer::getSTDIN(Buffer))) {
if (!Buffer->getBufferSize()) {
- delete Buffer;
Error = "standard input is empty";
} else {
- M.reset(ParseBitcodeFile(Buffer, Context, &Error));
- delete Buffer;
+ M.reset(ParseBitcodeFile(Buffer.get(), Context, &Error));
if (M.get())
if (!LinkInModule(M.get(), &Error))
return false;
}
}
- return error("Cannot link stdin: " + Error);
+ return error("Cannot link stdin: " + ec.message());
}
// Determine what variety of file it is.
diff --git a/contrib/llvm/lib/Linker/LinkModules.cpp b/contrib/llvm/lib/Linker/LinkModules.cpp
index 7e8245a..5aa06ab 100644
--- a/contrib/llvm/lib/Linker/LinkModules.cpp
+++ b/contrib/llvm/lib/Linker/LinkModules.cpp
@@ -28,7 +28,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include "llvm/ADT/DenseMap.h"
using namespace llvm;
@@ -434,8 +434,10 @@ static bool GetLinkageResult(GlobalValue *Dest, const GlobalValue *Src,
}
// Check visibility
- if (Dest && Src->getVisibility() != Dest->getVisibility())
- if (!Src->isDeclaration() && !Dest->isDeclaration())
+ if (Dest && Src->getVisibility() != Dest->getVisibility() &&
+ !Src->isDeclaration() && !Dest->isDeclaration() &&
+ !Src->hasAvailableExternallyLinkage() &&
+ !Dest->hasAvailableExternallyLinkage())
return Error(Err, "Linking globals named '" + Src->getName() +
"': symbols have different visibilities!");
return false;
@@ -449,10 +451,9 @@ static void LinkNamedMDNodes(Module *Dest, Module *Src,
const NamedMDNode *SrcNMD = I;
NamedMDNode *DestNMD = Dest->getOrInsertNamedMetadata(SrcNMD->getName());
// Add Src elements into Dest node.
- for (unsigned i = 0, e = SrcNMD->getNumOperands(); i != e; ++i)
+ for (unsigned i = 0, e = SrcNMD->getNumOperands(); i != e; ++i)
DestNMD->addOperand(cast<MDNode>(MapValue(SrcNMD->getOperand(i),
- ValueMap,
- true)));
+ ValueMap)));
}
}
@@ -520,6 +521,8 @@ static bool LinkGlobals(Module *Dest, const Module *Src,
continue;
}
+ bool HasUnnamedAddr = SGV->hasUnnamedAddr() && DGV->hasUnnamedAddr();
+
// If the visibilities of the symbols disagree and the destination is a
// prototype, take the visibility of its input.
if (DGV->isDeclaration())
@@ -559,14 +562,17 @@ static bool LinkGlobals(Module *Dest, const Module *Src,
// we are replacing may be a function (if a prototype, weak, etc) or a
// global variable.
GlobalVariable *NewDGV =
- new GlobalVariable(*Dest, SGV->getType()->getElementType(),
- SGV->isConstant(), NewLinkage, /*init*/0,
+ new GlobalVariable(*Dest, SGV->getType()->getElementType(),
+ SGV->isConstant(), NewLinkage, /*init*/0,
DGV->getName(), 0, false,
SGV->getType()->getAddressSpace());
+ // Set the unnamed_addr.
+ NewDGV->setUnnamedAddr(HasUnnamedAddr);
+
// Propagate alignment, section, and visibility info.
CopyGVAttributes(NewDGV, SGV);
- DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDGV,
+ DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDGV,
DGV->getType()));
// DGV will conflict with NewDGV because they both had the same
@@ -608,8 +614,9 @@ static bool LinkGlobals(Module *Dest, const Module *Src,
"': symbol multiple defined");
}
- // Set calculated linkage
+ // Set calculated linkage and unnamed_addr
DGV->setLinkage(NewLinkage);
+ DGV->setUnnamedAddr(HasUnnamedAddr);
// Make sure to remember this mapping...
ValueMap[SGV] = ConstantExpr::getBitCast(DGV, SGV->getType());
@@ -668,6 +675,13 @@ static bool LinkAlias(Module *Dest, const Module *Src,
GlobalValue* DAliasee = cast<GlobalValue>(VMI->second);
GlobalValue* DGV = NULL;
+ // Fixup aliases to bitcasts. Note that aliases to GEPs are still broken
+ // by this, but aliases to GEPs are broken to a lot of other things, so
+ // it's less important.
+ Constant *DAliaseeConst = DAliasee;
+ if (SGA->getType() != DAliasee->getType())
+ DAliaseeConst = ConstantExpr::getBitCast(DAliasee, SGA->getType());
+
// Try to find something 'similar' to SGA in destination module.
if (!DGV && !SGA->hasLocalLinkage()) {
DGV = Dest->getNamedAlias(SGA->getName());
@@ -721,7 +735,7 @@ static bool LinkAlias(Module *Dest, const Module *Src,
"': aliasee is not global variable");
NewGA = new GlobalAlias(SGA->getType(), SGA->getLinkage(),
- SGA->getName(), DAliasee, Dest);
+ SGA->getName(), DAliaseeConst, Dest);
CopyGVAttributes(NewGA, SGA);
// Any uses of DGV need to change to NewGA, with cast, if needed.
@@ -750,7 +764,7 @@ static bool LinkAlias(Module *Dest, const Module *Src,
"': aliasee is not function");
NewGA = new GlobalAlias(SGA->getType(), SGA->getLinkage(),
- SGA->getName(), DAliasee, Dest);
+ SGA->getName(), DAliaseeConst, Dest);
CopyGVAttributes(NewGA, SGA);
// Any uses of DF need to change to NewGA, with cast, if needed.
@@ -772,14 +786,8 @@ static bool LinkAlias(Module *Dest, const Module *Src,
} else {
// No linking to be performed, simply create an identical version of the
// alias over in the dest module...
- Constant *Aliasee = DAliasee;
- // Fixup aliases to bitcasts. Note that aliases to GEPs are still broken
- // by this, but aliases to GEPs are broken to a lot of other things, so
- // it's less important.
- if (SGA->getType() != DAliasee->getType())
- Aliasee = ConstantExpr::getBitCast(DAliasee, SGA->getType());
NewGA = new GlobalAlias(SGA->getType(), SGA->getLinkage(),
- SGA->getName(), Aliasee, Dest);
+ SGA->getName(), DAliaseeConst, Dest);
CopyGVAttributes(NewGA, SGA);
// Proceed to 'common' steps
@@ -813,9 +821,9 @@ static bool LinkGlobalInits(Module *Dest, const Module *Src,
const GlobalVariable *SGV = I;
if (SGV->hasInitializer()) { // Only process initialized GV's
- // Figure out what the initializer looks like in the dest module...
+ // Figure out what the initializer looks like in the dest module.
Constant *SInit =
- cast<Constant>(MapValue(SGV->getInitializer(), ValueMap, true));
+ cast<Constant>(MapValue(SGV->getInitializer(), ValueMap));
// Grab destination global variable or alias.
GlobalValue *DGV = cast<GlobalValue>(ValueMap[SGV]->stripPointerCasts());
@@ -927,7 +935,7 @@ static bool LinkFunctionProtos(Module *Dest, const Module *Src,
CopyGVAttributes(NewDF, SF);
// Any uses of DF need to change to NewDF, with cast
- DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDF,
+ DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDF,
DGV->getType()));
// DF will conflict with NewDF because they both had the same. We must
@@ -995,32 +1003,10 @@ static bool LinkFunctionBody(Function *Dest, Function *Src,
// At this point, all of the instructions and values of the function are now
// copied over. The only problem is that they are still referencing values in
// the Source function as operands. Loop through all of the operands of the
- // functions and patch them up to point to the local versions...
- //
- // This is the same as RemapInstruction, except that it avoids remapping
- // instruction and basic block operands.
- //
+ // functions and patch them up to point to the local versions.
for (Function::iterator BB = Dest->begin(), BE = Dest->end(); BB != BE; ++BB)
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- // Remap operands.
- for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
- OI != OE; ++OI)
- if (!isa<Instruction>(*OI) && !isa<BasicBlock>(*OI))
- *OI = MapValue(*OI, ValueMap, true);
-
- // Remap attached metadata.
- SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
- I->getAllMetadata(MDs);
- for (SmallVectorImpl<std::pair<unsigned, MDNode *> >::iterator
- MI = MDs.begin(), ME = MDs.end(); MI != ME; ++MI) {
- Value *Old = MI->second;
- if (!isa<Instruction>(Old) && !isa<BasicBlock>(Old)) {
- Value *New = MapValue(Old, ValueMap, true);
- if (New != Old)
- I->setMetadata(MI->first, cast<MDNode>(New));
- }
- }
- }
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
+ RemapInstruction(I, ValueMap, RF_IgnoreMissingEntries);
// There is no need to map the arguments anymore.
for (Function::arg_iterator I = Src->arg_begin(), E = Src->arg_end();
@@ -1099,7 +1085,7 @@ static bool LinkAppendingVars(Module *M,
"Appending variables with different section name need to be linked!");
unsigned NewSize = T1->getNumElements() + T2->getNumElements();
- ArrayType *NewType = ArrayType::get(T1->getElementType(),
+ ArrayType *NewType = ArrayType::get(T1->getElementType(),
NewSize);
G1->setName(""); // Clear G1's name in case of a conflict!
@@ -1143,7 +1129,7 @@ static bool LinkAppendingVars(Module *M,
// getelementptr instructions to not use the Cast!
G1->replaceAllUsesWith(ConstantExpr::getBitCast(NG,
G1->getType()));
- G2->replaceAllUsesWith(ConstantExpr::getBitCast(NG,
+ G2->replaceAllUsesWith(ConstantExpr::getBitCast(NG,
G2->getType()));
// Remove the two globals from the module now...
@@ -1217,8 +1203,13 @@ Linker::LinkModules(Module *Dest, Module *Src, std::string *ErrorMsg) {
Src->getDataLayout() != Dest->getDataLayout())
errs() << "WARNING: Linking two modules of different data layouts!\n";
if (!Src->getTargetTriple().empty() &&
- Dest->getTargetTriple() != Src->getTargetTriple())
- errs() << "WARNING: Linking two modules of different target triples!\n";
+ Dest->getTargetTriple() != Src->getTargetTriple()) {
+ errs() << "WARNING: Linking two modules of different target triples: ";
+ if (!Src->getModuleIdentifier().empty())
+ errs() << Src->getModuleIdentifier() << ": ";
+ errs() << "'" << Src->getTargetTriple() << "' and '"
+ << Dest->getTargetTriple() << "'\n";
+ }
// Append the module inline asm string.
if (!Src->getModuleInlineAsm().empty()) {
@@ -1300,10 +1291,9 @@ Linker::LinkModules(Module *Dest, Module *Src, std::string *ErrorMsg) {
// If the source library's module id is in the dependent library list of the
// destination library, remove it since that module is now linked in.
- sys::Path modId;
- modId.set(Src->getModuleIdentifier());
- if (!modId.isEmpty())
- Dest->removeLibrary(modId.getBasename());
+ const std::string &modId = Src->getModuleIdentifier();
+ if (!modId.empty())
+ Dest->removeLibrary(sys::path::stem(modId));
return false;
}
diff --git a/contrib/llvm/lib/Linker/Linker.cpp b/contrib/llvm/lib/Linker/Linker.cpp
index 32aa0f9..fba91da 100644
--- a/contrib/llvm/lib/Linker/Linker.cpp
+++ b/contrib/llvm/lib/Linker/Linker.cpp
@@ -14,10 +14,11 @@
#include "llvm/Linker.h"
#include "llvm/Module.h"
#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Config/config.h"
+#include "llvm/Support/system_error.h"
using namespace llvm;
Linker::Linker(StringRef progname, StringRef modname,
@@ -97,13 +98,14 @@ std::auto_ptr<Module>
Linker::LoadObject(const sys::Path &FN) {
std::string ParseErrorMessage;
Module *Result = 0;
-
- std::auto_ptr<MemoryBuffer> Buffer(MemoryBuffer::getFileOrSTDIN(FN.c_str()));
- if (Buffer.get())
- Result = ParseBitcodeFile(Buffer.get(), Context, &ParseErrorMessage);
+
+ OwningPtr<MemoryBuffer> Buffer;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(FN.c_str(), Buffer))
+ ParseErrorMessage = "Error reading file '" + FN.str() + "'" + ": "
+ + ec.message();
else
- ParseErrorMessage = "Error reading file '" + FN.str() + "'";
-
+ Result = ParseBitcodeFile(Buffer.get(), Context, &ParseErrorMessage);
+
if (Result)
return std::auto_ptr<Module>(Result);
Error = "Bitcode file '" + FN.str() + "' could not be loaded";
@@ -133,7 +135,7 @@ static inline sys::Path IsLibrary(StringRef Name,
// Try the libX.so (or .dylib) form
FullPath.eraseSuffix();
- FullPath.appendSuffix(&(LTDL_SHLIB_EXT[1]));
+ FullPath.appendSuffix(sys::Path::GetDLLSuffix());
if (FullPath.isDynamicLibrary()) // Native shared library?
return FullPath;
if (FullPath.isBitcodeFile()) // .so file containing bitcode?
diff --git a/contrib/llvm/lib/MC/ELFObjectWriter.cpp b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
index cf35b45..8a00a16 100644
--- a/contrib/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
@@ -11,7 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/ELFObjectWriter.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
@@ -20,6 +21,7 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFSymbolFlags.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSymbol.h"
@@ -28,27 +30,76 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ELF.h"
#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/ADT/StringSwitch.h"
#include "../Target/X86/X86FixupKinds.h"
+#include "../Target/ARM/ARMFixupKinds.h"
#include <vector>
using namespace llvm;
-namespace {
+static unsigned GetType(const MCSymbolData &SD) {
+ uint32_t Type = (SD.getFlags() & (0xf << ELF_STT_Shift)) >> ELF_STT_Shift;
+ assert(Type == ELF::STT_NOTYPE || Type == ELF::STT_OBJECT ||
+ Type == ELF::STT_FUNC || Type == ELF::STT_SECTION ||
+ Type == ELF::STT_FILE || Type == ELF::STT_COMMON ||
+ Type == ELF::STT_TLS);
+ return Type;
+}
- class ELFObjectWriterImpl {
- static bool isFixupKindX86PCRel(unsigned Kind) {
- switch (Kind) {
- default:
- return false;
- case X86::reloc_pcrel_1byte:
- case X86::reloc_pcrel_4byte:
- case X86::reloc_riprel_4byte:
- case X86::reloc_riprel_4byte_movq_load:
- return true;
- }
- }
+static unsigned GetBinding(const MCSymbolData &SD) {
+ uint32_t Binding = (SD.getFlags() & (0xf << ELF_STB_Shift)) >> ELF_STB_Shift;
+ assert(Binding == ELF::STB_LOCAL || Binding == ELF::STB_GLOBAL ||
+ Binding == ELF::STB_WEAK);
+ return Binding;
+}
+
+static void SetBinding(MCSymbolData &SD, unsigned Binding) {
+ assert(Binding == ELF::STB_LOCAL || Binding == ELF::STB_GLOBAL ||
+ Binding == ELF::STB_WEAK);
+ uint32_t OtherFlags = SD.getFlags() & ~(0xf << ELF_STB_Shift);
+ SD.setFlags(OtherFlags | (Binding << ELF_STB_Shift));
+}
+static unsigned GetVisibility(MCSymbolData &SD) {
+ unsigned Visibility =
+ (SD.getFlags() & (0xf << ELF_STV_Shift)) >> ELF_STV_Shift;
+ assert(Visibility == ELF::STV_DEFAULT || Visibility == ELF::STV_INTERNAL ||
+ Visibility == ELF::STV_HIDDEN || Visibility == ELF::STV_PROTECTED);
+ return Visibility;
+}
+
+
+static bool RelocNeedsGOT(MCSymbolRefExpr::VariantKind Variant) {
+ switch (Variant) {
+ default:
+ return false;
+ case MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_PLT:
+ case MCSymbolRefExpr::VK_GOTPCREL:
+ case MCSymbolRefExpr::VK_TPOFF:
+ case MCSymbolRefExpr::VK_TLSGD:
+ case MCSymbolRefExpr::VK_GOTTPOFF:
+ case MCSymbolRefExpr::VK_INDNTPOFF:
+ case MCSymbolRefExpr::VK_NTPOFF:
+ case MCSymbolRefExpr::VK_GOTNTPOFF:
+ case MCSymbolRefExpr::VK_TLSLDM:
+ case MCSymbolRefExpr::VK_DTPOFF:
+ case MCSymbolRefExpr::VK_TLSLD:
+ return true;
+ }
+}
+
+static bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind) {
+ const MCFixupKindInfo &FKI =
+ Asm.getBackend().getFixupKindInfo((MCFixupKind) Kind);
+
+ return FKI.Flags & MCFixupKindInfo::FKF_IsPCRel;
+}
+
+namespace {
+ class ELFObjectWriter : public MCObjectWriter {
+ protected:
/*static bool isFixupKindX86RIPRel(unsigned Kind) {
return Kind == X86::reloc_riprel_4byte ||
Kind == X86::reloc_riprel_4byte_movq_load;
@@ -64,6 +115,10 @@ namespace {
// Support lexicographic sorting.
bool operator<(const ELFSymbolData &RHS) const {
+ if (GetType(*SymbolData) == ELF::STT_FILE)
+ return true;
+ if (GetType(*RHS.SymbolData) == ELF::STT_FILE)
+ return false;
return SymbolData->getSymbol().getName() <
RHS.SymbolData->getSymbol().getName();
}
@@ -75,15 +130,33 @@ namespace {
struct ELFRelocationEntry {
// Make these big enough for both 32-bit and 64-bit
uint64_t r_offset;
- uint64_t r_info;
+ int Index;
+ unsigned Type;
+ const MCSymbol *Symbol;
uint64_t r_addend;
+ ELFRelocationEntry()
+ : r_offset(0), Index(0), Type(0), Symbol(0), r_addend(0) {}
+
+ ELFRelocationEntry(uint64_t RelocOffset, int Idx,
+ unsigned RelType, const MCSymbol *Sym,
+ uint64_t Addend)
+ : r_offset(RelocOffset), Index(Idx), Type(RelType),
+ Symbol(Sym), r_addend(Addend) {}
+
// Support lexicographic sorting.
bool operator<(const ELFRelocationEntry &RE) const {
return RE.r_offset < r_offset;
}
};
+ /// The target specific ELF writer instance.
+ llvm::OwningPtr<MCELFObjectTargetWriter> TargetObjectWriter;
+
+ SmallPtrSet<const MCSymbol *, 16> UsedInReloc;
+ SmallPtrSet<const MCSymbol *, 16> WeakrefUsedInReloc;
+ DenseMap<const MCSymbol *, const MCSymbol *> Renames;
+
llvm::DenseMap<const MCSectionData*,
std::vector<ELFRelocationEntry> > Relocations;
DenseMap<const MCSection*, uint64_t> SectionStringTableIndex;
@@ -99,49 +172,52 @@ namespace {
/// @}
- ELFObjectWriter *Writer;
-
- raw_ostream &OS;
-
- // This holds the current offset into the object file.
- size_t FileOff;
-
- unsigned Is64Bit : 1;
+ bool NeedsGOT;
- bool HasRelocationAddend;
+ bool NeedsSymtabShndx;
// This holds the symbol table index of the last local symbol.
unsigned LastLocalSymbolIndex;
// This holds the .strtab section index.
unsigned StringTableIndex;
+ // This holds the .symtab section index.
+ unsigned SymbolTableIndex;
unsigned ShstrtabIndex;
+
+ const MCSymbol *SymbolToReloc(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F) const;
+
+ // For arch-specific emission of explicit reloc symbol
+ virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ bool IsBSS) const {
+ return NULL;
+ }
+
+ bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
+ bool hasRelocationAddend() const {
+ return TargetObjectWriter->hasRelocationAddend();
+ }
+
public:
- ELFObjectWriterImpl(ELFObjectWriter *_Writer, bool _Is64Bit,
- bool _HasRelAddend)
- : Writer(_Writer), OS(Writer->getStream()), FileOff(0),
- Is64Bit(_Is64Bit), HasRelocationAddend(_HasRelAddend) {
+ ELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &_OS, bool IsLittleEndian)
+ : MCObjectWriter(_OS, IsLittleEndian),
+ TargetObjectWriter(MOTW),
+ NeedsGOT(false), NeedsSymtabShndx(false){
}
- void Write8(uint8_t Value) { Writer->Write8(Value); }
- void Write16(uint16_t Value) { Writer->Write16(Value); }
- void Write32(uint32_t Value) { Writer->Write32(Value); }
- //void Write64(uint64_t Value) { Writer->Write64(Value); }
- void WriteZeros(unsigned N) { Writer->WriteZeros(N); }
- //void WriteBytes(StringRef Str, unsigned ZeroFillSize = 0) {
- // Writer->WriteBytes(Str, ZeroFillSize);
- //}
+ virtual ~ELFObjectWriter();
void WriteWord(uint64_t W) {
- if (Is64Bit)
- Writer->Write64(W);
+ if (is64Bit())
+ Write64(W);
else
- Writer->Write32(W);
- }
-
- void String8(char *buf, uint8_t Value) {
- buf[0] = Value;
+ Write32(W);
}
void StringLE16(char *buf, uint16_t Value) {
@@ -174,86 +250,191 @@ namespace {
StringBE32(buf + 4, uint32_t(Value >> 0));
}
- void String16(char *buf, uint16_t Value) {
- if (Writer->isLittleEndian())
+ void String8(MCDataFragment &F, uint8_t Value) {
+ char buf[1];
+ buf[0] = Value;
+ F.getContents() += StringRef(buf, 1);
+ }
+
+ void String16(MCDataFragment &F, uint16_t Value) {
+ char buf[2];
+ if (isLittleEndian())
StringLE16(buf, Value);
else
StringBE16(buf, Value);
+ F.getContents() += StringRef(buf, 2);
}
- void String32(char *buf, uint32_t Value) {
- if (Writer->isLittleEndian())
+ void String32(MCDataFragment &F, uint32_t Value) {
+ char buf[4];
+ if (isLittleEndian())
StringLE32(buf, Value);
else
StringBE32(buf, Value);
+ F.getContents() += StringRef(buf, 4);
}
- void String64(char *buf, uint64_t Value) {
- if (Writer->isLittleEndian())
+ void String64(MCDataFragment &F, uint64_t Value) {
+ char buf[8];
+ if (isLittleEndian())
StringLE64(buf, Value);
else
StringBE64(buf, Value);
+ F.getContents() += StringRef(buf, 8);
}
- void WriteHeader(uint64_t SectionDataSize, unsigned NumberOfSections);
+ virtual void WriteHeader(uint64_t SectionDataSize, unsigned NumberOfSections);
+
+ /// Default e_flags = 0
+ virtual void WriteEFlags() { Write32(0); }
- void WriteSymbolEntry(MCDataFragment *F, uint64_t name, uint8_t info,
+ virtual void WriteSymbolEntry(MCDataFragment *SymtabF, MCDataFragment *ShndxF,
+ uint64_t name, uint8_t info,
uint64_t value, uint64_t size,
- uint8_t other, uint16_t shndx);
+ uint8_t other, uint32_t shndx,
+ bool Reserved);
- void WriteSymbol(MCDataFragment *F, ELFSymbolData &MSD,
+ virtual void WriteSymbol(MCDataFragment *SymtabF, MCDataFragment *ShndxF,
+ ELFSymbolData &MSD,
const MCAsmLayout &Layout);
- void WriteSymbolTable(MCDataFragment *F, const MCAssembler &Asm,
- const MCAsmLayout &Layout);
+ typedef DenseMap<const MCSectionELF*, uint32_t> SectionIndexMapTy;
+ virtual void WriteSymbolTable(MCDataFragment *SymtabF, MCDataFragment *ShndxF,
+ const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const SectionIndexMapTy &SectionIndexMap);
- void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
- const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, uint64_t &FixedValue);
+ virtual void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, uint64_t &FixedValue);
- uint64_t getSymbolIndexInSymbolTable(const MCAssembler &Asm,
+ virtual uint64_t getSymbolIndexInSymbolTable(const MCAssembler &Asm,
const MCSymbol *S);
+ // Map from a group section to the signature symbol
+ typedef DenseMap<const MCSectionELF*, const MCSymbol*> GroupMapTy;
+ // Map from a signature symbol to the group section
+ typedef DenseMap<const MCSymbol*, const MCSectionELF*> RevGroupMapTy;
+
/// ComputeSymbolTable - Compute the symbol table data
///
/// \param StringTable [out] - The string table data.
/// \param StringIndexMap [out] - Map from symbol names to offsets in the
/// string table.
- void ComputeSymbolTable(MCAssembler &Asm);
+ virtual void ComputeSymbolTable(MCAssembler &Asm,
+ const SectionIndexMapTy &SectionIndexMap,
+ RevGroupMapTy RevGroupMap);
- void WriteRelocation(MCAssembler &Asm, MCAsmLayout &Layout,
+ virtual void ComputeIndexMap(MCAssembler &Asm,
+ SectionIndexMapTy &SectionIndexMap);
+
+ virtual void WriteRelocation(MCAssembler &Asm, MCAsmLayout &Layout,
const MCSectionData &SD);
- void WriteRelocations(MCAssembler &Asm, MCAsmLayout &Layout) {
+ virtual void WriteRelocations(MCAssembler &Asm, MCAsmLayout &Layout) {
for (MCAssembler::const_iterator it = Asm.begin(),
ie = Asm.end(); it != ie; ++it) {
WriteRelocation(Asm, Layout, *it);
}
}
- void CreateMetadataSections(MCAssembler &Asm, MCAsmLayout &Layout);
+ virtual void CreateMetadataSections(MCAssembler &Asm, MCAsmLayout &Layout,
+ const SectionIndexMapTy &SectionIndexMap);
- void ExecutePostLayoutBinding(MCAssembler &Asm) {
- // Compute symbol table information.
- ComputeSymbolTable(Asm);
- }
+ // Create the sections that show up in the symbol table. Currently
+ // those are the .note.GNU-stack section and the group sections.
+ virtual void CreateIndexedSections(MCAssembler &Asm, MCAsmLayout &Layout,
+ GroupMapTy &GroupMap,
+ RevGroupMapTy &RevGroupMap);
- void WriteSecHdrEntry(uint32_t Name, uint32_t Type, uint64_t Flags,
+ virtual void ExecutePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout);
+
+ virtual void WriteSecHdrEntry(uint32_t Name, uint32_t Type, uint64_t Flags,
uint64_t Address, uint64_t Offset,
uint64_t Size, uint32_t Link, uint32_t Info,
uint64_t Alignment, uint64_t EntrySize);
- void WriteRelocationsFragment(const MCAssembler &Asm, MCDataFragment *F,
- const MCSectionData *SD);
+ virtual void WriteRelocationsFragment(const MCAssembler &Asm,
+ MCDataFragment *F,
+ const MCSectionData *SD);
+
+ virtual void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout);
+ virtual void WriteSection(MCAssembler &Asm,
+ const SectionIndexMapTy &SectionIndexMap,
+ uint32_t GroupSymbolIndex,
+ uint64_t Offset, uint64_t Size, uint64_t Alignment,
+ const MCSectionELF &Section);
+
+ protected:
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend) = 0;
+ };
+
+ //===- X86ELFObjectWriter -------------------------------------------===//
+
+ class X86ELFObjectWriter : public ELFObjectWriter {
+ public:
+ X86ELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &_OS,
+ bool IsLittleEndian);
+
+ virtual ~X86ELFObjectWriter();
+ protected:
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend);
+ };
+
+
+ //===- ARMELFObjectWriter -------------------------------------------===//
+
+ class ARMELFObjectWriter : public ELFObjectWriter {
+ public:
+ // FIXME: MCAssembler can't yet return the Subtarget,
+ enum { DefaultEABIVersion = 0x05000000U };
+
+ ARMELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &_OS,
+ bool IsLittleEndian);
- void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout);
+ virtual ~ARMELFObjectWriter();
+
+ virtual void WriteEFlags();
+ protected:
+ virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ bool IsBSS) const;
+
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend);
};
+ //===- MBlazeELFObjectWriter -------------------------------------------===//
+
+ class MBlazeELFObjectWriter : public ELFObjectWriter {
+ public:
+ MBlazeELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &_OS,
+ bool IsLittleEndian);
+
+ virtual ~MBlazeELFObjectWriter();
+ protected:
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend);
+ };
}
+ELFObjectWriter::~ELFObjectWriter()
+{}
+
// Emit the ELF header.
-void ELFObjectWriterImpl::WriteHeader(uint64_t SectionDataSize,
- unsigned NumberOfSections) {
+void ELFObjectWriter::WriteHeader(uint64_t SectionDataSize,
+ unsigned NumberOfSections) {
// ELF Header
// ----------
//
@@ -267,140 +448,193 @@ void ELFObjectWriterImpl::WriteHeader(uint64_t SectionDataSize,
Write8('L'); // e_ident[EI_MAG2]
Write8('F'); // e_ident[EI_MAG3]
- Write8(Is64Bit ? ELF::ELFCLASS64 : ELF::ELFCLASS32); // e_ident[EI_CLASS]
+ Write8(is64Bit() ? ELF::ELFCLASS64 : ELF::ELFCLASS32); // e_ident[EI_CLASS]
// e_ident[EI_DATA]
- Write8(Writer->isLittleEndian() ? ELF::ELFDATA2LSB : ELF::ELFDATA2MSB);
+ Write8(isLittleEndian() ? ELF::ELFDATA2LSB : ELF::ELFDATA2MSB);
Write8(ELF::EV_CURRENT); // e_ident[EI_VERSION]
- Write8(ELF::ELFOSABI_LINUX); // e_ident[EI_OSABI]
+ // e_ident[EI_OSABI]
+ switch (TargetObjectWriter->getOSType()) {
+ case Triple::FreeBSD: Write8(ELF::ELFOSABI_FREEBSD); break;
+ case Triple::Linux: Write8(ELF::ELFOSABI_LINUX); break;
+ default: Write8(ELF::ELFOSABI_NONE); break;
+ }
Write8(0); // e_ident[EI_ABIVERSION]
WriteZeros(ELF::EI_NIDENT - ELF::EI_PAD);
Write16(ELF::ET_REL); // e_type
- // FIXME: Make this configurable
- Write16(Is64Bit ? ELF::EM_X86_64 : ELF::EM_386); // e_machine = target
+ Write16(TargetObjectWriter->getEMachine()); // e_machine = target
Write32(ELF::EV_CURRENT); // e_version
WriteWord(0); // e_entry, no entry point in .o file
WriteWord(0); // e_phoff, no program header for .o
- WriteWord(SectionDataSize + (Is64Bit ? sizeof(ELF::Elf64_Ehdr) :
+ WriteWord(SectionDataSize + (is64Bit() ? sizeof(ELF::Elf64_Ehdr) :
sizeof(ELF::Elf32_Ehdr))); // e_shoff = sec hdr table off in bytes
- // FIXME: Make this configurable.
- Write32(0); // e_flags = whatever the target wants
+ // e_flags = whatever the target wants
+ WriteEFlags();
// e_ehsize = ELF header size
- Write16(Is64Bit ? sizeof(ELF::Elf64_Ehdr) : sizeof(ELF::Elf32_Ehdr));
+ Write16(is64Bit() ? sizeof(ELF::Elf64_Ehdr) : sizeof(ELF::Elf32_Ehdr));
Write16(0); // e_phentsize = prog header entry size
Write16(0); // e_phnum = # prog header entries = 0
// e_shentsize = Section header entry size
- Write16(Is64Bit ? sizeof(ELF::Elf64_Shdr) : sizeof(ELF::Elf32_Shdr));
+ Write16(is64Bit() ? sizeof(ELF::Elf64_Shdr) : sizeof(ELF::Elf32_Shdr));
// e_shnum = # of section header ents
- Write16(NumberOfSections);
+ if (NumberOfSections >= ELF::SHN_LORESERVE)
+ Write16(0);
+ else
+ Write16(NumberOfSections);
// e_shstrndx = Section # of '.shstrtab'
- Write16(ShstrtabIndex);
+ if (NumberOfSections >= ELF::SHN_LORESERVE)
+ Write16(ELF::SHN_XINDEX);
+ else
+ Write16(ShstrtabIndex);
}
-void ELFObjectWriterImpl::WriteSymbolEntry(MCDataFragment *F, uint64_t name,
- uint8_t info, uint64_t value,
- uint64_t size, uint8_t other,
- uint16_t shndx) {
- if (Is64Bit) {
- char buf[8];
+void ELFObjectWriter::WriteSymbolEntry(MCDataFragment *SymtabF,
+ MCDataFragment *ShndxF,
+ uint64_t name,
+ uint8_t info, uint64_t value,
+ uint64_t size, uint8_t other,
+ uint32_t shndx,
+ bool Reserved) {
+ if (ShndxF) {
+ if (shndx >= ELF::SHN_LORESERVE && !Reserved)
+ String32(*ShndxF, shndx);
+ else
+ String32(*ShndxF, 0);
+ }
- String32(buf, name);
- F->getContents() += StringRef(buf, 4); // st_name
+ uint16_t Index = (shndx >= ELF::SHN_LORESERVE && !Reserved) ?
+ uint16_t(ELF::SHN_XINDEX) : shndx;
- String8(buf, info);
- F->getContents() += StringRef(buf, 1); // st_info
+ if (is64Bit()) {
+ String32(*SymtabF, name); // st_name
+ String8(*SymtabF, info); // st_info
+ String8(*SymtabF, other); // st_other
+ String16(*SymtabF, Index); // st_shndx
+ String64(*SymtabF, value); // st_value
+ String64(*SymtabF, size); // st_size
+ } else {
+ String32(*SymtabF, name); // st_name
+ String32(*SymtabF, value); // st_value
+ String32(*SymtabF, size); // st_size
+ String8(*SymtabF, info); // st_info
+ String8(*SymtabF, other); // st_other
+ String16(*SymtabF, Index); // st_shndx
+ }
+}
- String8(buf, other);
- F->getContents() += StringRef(buf, 1); // st_other
+static uint64_t SymbolValue(MCSymbolData &Data, const MCAsmLayout &Layout) {
+ if (Data.isCommon() && Data.isExternal())
+ return Data.getCommonAlignment();
- String16(buf, shndx);
- F->getContents() += StringRef(buf, 2); // st_shndx
+ const MCSymbol &Symbol = Data.getSymbol();
- String64(buf, value);
- F->getContents() += StringRef(buf, 8); // st_value
+ if (Symbol.isAbsolute() && Symbol.isVariable()) {
+ if (const MCExpr *Value = Symbol.getVariableValue()) {
+ int64_t IntValue;
+ if (Value->EvaluateAsAbsolute(IntValue, Layout))
+ return (uint64_t)IntValue;
+ }
+ }
- String64(buf, size);
- F->getContents() += StringRef(buf, 8); // st_size
- } else {
- char buf[4];
+ if (!Symbol.isInSection())
+ return 0;
+
+ if (Data.getFragment())
+ return Layout.getSymbolOffset(&Data);
+
+ return 0;
+}
- String32(buf, name);
- F->getContents() += StringRef(buf, 4); // st_name
+void ELFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+ // The presence of symbol versions causes undefined symbols and
+ // versions declared with @@@ to be renamed.
- String32(buf, value);
- F->getContents() += StringRef(buf, 4); // st_value
+ for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
+ ie = Asm.symbol_end(); it != ie; ++it) {
+ const MCSymbol &Alias = it->getSymbol();
+ const MCSymbol &Symbol = Alias.AliasedSymbol();
+ MCSymbolData &SD = Asm.getSymbolData(Symbol);
+
+ // Not an alias.
+ if (&Symbol == &Alias)
+ continue;
+
+ StringRef AliasName = Alias.getName();
+ size_t Pos = AliasName.find('@');
+ if (Pos == StringRef::npos)
+ continue;
- String32(buf, size);
- F->getContents() += StringRef(buf, 4); // st_size
+ // Aliases defined with .symvar copy the binding from the symbol they alias.
+ // This is the first place we are able to copy this information.
+ it->setExternal(SD.isExternal());
+ SetBinding(*it, GetBinding(SD));
- String8(buf, info);
- F->getContents() += StringRef(buf, 1); // st_info
+ StringRef Rest = AliasName.substr(Pos);
+ if (!Symbol.isUndefined() && !Rest.startswith("@@@"))
+ continue;
- String8(buf, other);
- F->getContents() += StringRef(buf, 1); // st_other
+ // FIXME: produce a better error message.
+ if (Symbol.isUndefined() && Rest.startswith("@@") &&
+ !Rest.startswith("@@@"))
+ report_fatal_error("A @@ version cannot be undefined");
- String16(buf, shndx);
- F->getContents() += StringRef(buf, 2); // st_shndx
+ Renames.insert(std::make_pair(&Symbol, &Alias));
}
}
-void ELFObjectWriterImpl::WriteSymbol(MCDataFragment *F, ELFSymbolData &MSD,
- const MCAsmLayout &Layout) {
- MCSymbolData &Data = *MSD.SymbolData;
- uint8_t Info = (Data.getFlags() & 0xff);
- uint8_t Other = ((Data.getFlags() & 0xf00) >> ELF_STV_Shift);
- uint64_t Value = 0;
+void ELFObjectWriter::WriteSymbol(MCDataFragment *SymtabF,
+ MCDataFragment *ShndxF,
+ ELFSymbolData &MSD,
+ const MCAsmLayout &Layout) {
+ MCSymbolData &OrigData = *MSD.SymbolData;
+ MCSymbolData &Data =
+ Layout.getAssembler().getSymbolData(OrigData.getSymbol().AliasedSymbol());
+
+ bool IsReserved = Data.isCommon() || Data.getSymbol().isAbsolute() ||
+ Data.getSymbol().isVariable();
+
+ uint8_t Binding = GetBinding(OrigData);
+ uint8_t Visibility = GetVisibility(OrigData);
+ uint8_t Type = GetType(Data);
+
+ uint8_t Info = (Binding << ELF_STB_Shift) | (Type << ELF_STT_Shift);
+ uint8_t Other = Visibility;
+
+ uint64_t Value = SymbolValue(Data, Layout);
uint64_t Size = 0;
- const MCExpr *ESize;
- if (Data.isCommon() && Data.isExternal())
- Value = Data.getCommonAlignment();
-
- if (!Data.isCommon())
- if (MCFragment *FF = Data.getFragment())
- Value = Layout.getSymbolAddress(&Data) -
- Layout.getSectionAddress(FF->getParent());
-
- ESize = Data.getSize();
- if (Data.getSize()) {
- MCValue Res;
- if (ESize->getKind() == MCExpr::Binary) {
- const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(ESize);
-
- if (BE->EvaluateAsRelocatable(Res, &Layout)) {
- MCSymbolData &A =
- Layout.getAssembler().getSymbolData(Res.getSymA()->getSymbol());
- MCSymbolData &B =
- Layout.getAssembler().getSymbolData(Res.getSymB()->getSymbol());
-
- Size = Layout.getSymbolAddress(&A) - Layout.getSymbolAddress(&B);
- }
- } else if (ESize->getKind() == MCExpr::Constant) {
- Size = static_cast<const MCConstantExpr *>(ESize)->getValue();
- } else {
- assert(0 && "Unsupported size expression");
- }
+ assert(!(Data.isCommon() && !Data.isExternal()));
+
+ const MCExpr *ESize = Data.getSize();
+ if (ESize) {
+ int64_t Res;
+ if (!ESize->EvaluateAsAbsolute(Res, Layout))
+ report_fatal_error("Size expression must be absolute.");
+ Size = Res;
}
// Write out the symbol table entry
- WriteSymbolEntry(F, MSD.StringIndex, Info, Value,
- Size, Other, MSD.SectionIndex);
+ WriteSymbolEntry(SymtabF, ShndxF, MSD.StringIndex, Info, Value,
+ Size, Other, MSD.SectionIndex, IsReserved);
}
-void ELFObjectWriterImpl::WriteSymbolTable(MCDataFragment *F,
- const MCAssembler &Asm,
- const MCAsmLayout &Layout) {
+void ELFObjectWriter::WriteSymbolTable(MCDataFragment *SymtabF,
+ MCDataFragment *ShndxF,
+ const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const SectionIndexMapTy &SectionIndexMap) {
// The string table must be emitted first because we need the index
// into the string table for all the symbol names.
assert(StringTable.size() && "Missing string table");
@@ -408,258 +642,343 @@ void ELFObjectWriterImpl::WriteSymbolTable(MCDataFragment *F,
// FIXME: Make sure the start of the symbol table is aligned.
// The first entry is the undefined symbol entry.
- unsigned EntrySize = Is64Bit ? ELF::SYMENTRY_SIZE64 : ELF::SYMENTRY_SIZE32;
- F->getContents().append(EntrySize, '\x00');
+ WriteSymbolEntry(SymtabF, ShndxF, 0, 0, 0, 0, 0, 0, false);
// Write the symbol table entries.
LastLocalSymbolIndex = LocalSymbolData.size() + 1;
for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i) {
ELFSymbolData &MSD = LocalSymbolData[i];
- WriteSymbol(F, MSD, Layout);
+ WriteSymbol(SymtabF, ShndxF, MSD, Layout);
}
- // Write out a symbol table entry for each section.
- // leaving out the just added .symtab which is at
- // the very end
- unsigned Index = 1;
- for (MCAssembler::const_iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it, ++Index) {
+ // Write out a symbol table entry for each regular section.
+ for (MCAssembler::const_iterator i = Asm.begin(), e = Asm.end(); i != e;
+ ++i) {
const MCSectionELF &Section =
- static_cast<const MCSectionELF&>(it->getSection());
- // Leave out relocations so we don't have indexes within
- // the relocations messed up
- if (Section.getType() == ELF::SHT_RELA || Section.getType() == ELF::SHT_REL)
- continue;
- if (Index == Asm.size())
+ static_cast<const MCSectionELF&>(i->getSection());
+ if (Section.getType() == ELF::SHT_RELA ||
+ Section.getType() == ELF::SHT_REL ||
+ Section.getType() == ELF::SHT_STRTAB ||
+ Section.getType() == ELF::SHT_SYMTAB)
continue;
- WriteSymbolEntry(F, 0, ELF::STT_SECTION, 0, 0, ELF::STV_DEFAULT, Index);
+ WriteSymbolEntry(SymtabF, ShndxF, 0, ELF::STT_SECTION, 0, 0,
+ ELF::STV_DEFAULT, SectionIndexMap.lookup(&Section), false);
LastLocalSymbolIndex++;
}
for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i) {
ELFSymbolData &MSD = ExternalSymbolData[i];
MCSymbolData &Data = *MSD.SymbolData;
- assert((Data.getFlags() & ELF_STB_Global) &&
- "External symbol requires STB_GLOBAL flag");
- WriteSymbol(F, MSD, Layout);
- if (Data.getFlags() & ELF_STB_Local)
+ assert(((Data.getFlags() & ELF_STB_Global) ||
+ (Data.getFlags() & ELF_STB_Weak)) &&
+ "External symbol requires STB_GLOBAL or STB_WEAK flag");
+ WriteSymbol(SymtabF, ShndxF, MSD, Layout);
+ if (GetBinding(Data) == ELF::STB_LOCAL)
LastLocalSymbolIndex++;
}
for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i) {
ELFSymbolData &MSD = UndefinedSymbolData[i];
MCSymbolData &Data = *MSD.SymbolData;
- Data.setFlags(Data.getFlags() | ELF_STB_Global);
- WriteSymbol(F, MSD, Layout);
- if (Data.getFlags() & ELF_STB_Local)
+ WriteSymbol(SymtabF, ShndxF, MSD, Layout);
+ if (GetBinding(Data) == ELF::STB_LOCAL)
LastLocalSymbolIndex++;
}
}
-// FIXME: this is currently X86/X86_64 only
-void ELFObjectWriterImpl::RecordRelocation(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup,
- MCValue Target,
- uint64_t &FixedValue) {
+const MCSymbol *ELFObjectWriter::SymbolToReloc(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F) const {
+ const MCSymbol &Symbol = Target.getSymA()->getSymbol();
+ const MCSymbol &ASymbol = Symbol.AliasedSymbol();
+ const MCSymbol *Renamed = Renames.lookup(&Symbol);
+ const MCSymbolData &SD = Asm.getSymbolData(Symbol);
+
+ if (ASymbol.isUndefined()) {
+ if (Renamed)
+ return Renamed;
+ return &ASymbol;
+ }
+
+ if (SD.isExternal()) {
+ if (Renamed)
+ return Renamed;
+ return &Symbol;
+ }
+
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(ASymbol.getSection());
+ const SectionKind secKind = Section.getKind();
+
+ if (secKind.isBSS())
+ return ExplicitRelSym(Asm, Target, F, true);
+
+ if (secKind.isThreadLocal()) {
+ if (Renamed)
+ return Renamed;
+ return &Symbol;
+ }
+
+ MCSymbolRefExpr::VariantKind Kind = Target.getSymA()->getKind();
+ const MCSectionELF &Sec2 =
+ static_cast<const MCSectionELF&>(F.getParent()->getSection());
+
+ if (&Sec2 != &Section &&
+ (Kind == MCSymbolRefExpr::VK_PLT ||
+ Kind == MCSymbolRefExpr::VK_GOTPCREL ||
+ Kind == MCSymbolRefExpr::VK_GOTOFF)) {
+ if (Renamed)
+ return Renamed;
+ return &Symbol;
+ }
+
+ if (Section.getFlags() & ELF::SHF_MERGE) {
+ if (Target.getConstant() == 0)
+ return NULL;
+ if (Renamed)
+ return Renamed;
+ return &Symbol;
+ }
+
+ return ExplicitRelSym(Asm, Target, F, false);
+}
+
+
+void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup,
+ MCValue Target,
+ uint64_t &FixedValue) {
int64_t Addend = 0;
- unsigned Index = 0;
+ int Index = 0;
int64_t Value = Target.getConstant();
+ const MCSymbol *RelocSymbol = NULL;
+ bool IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
if (!Target.isAbsolute()) {
- const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
- MCSymbolData &SD = Asm.getSymbolData(*Symbol);
- const MCSymbolData *Base = Asm.getAtom(Layout, &SD);
- MCFragment *F = SD.getFragment();
-
- if (Base) {
- if (F && (!Symbol->isInSection() || SD.isCommon()) && !SD.isExternal()) {
- Index = F->getParent()->getOrdinal() + LocalSymbolData.size() + 1;
- Value += Layout.getSymbolAddress(&SD);
- } else
- Index = getSymbolIndexInSymbolTable(Asm, Symbol);
- if (Base != &SD)
- Value += Layout.getSymbolAddress(&SD) - Layout.getSymbolAddress(Base);
- Addend = Value;
- // Compensate for the addend on i386.
- if (Is64Bit)
- Value = 0;
- } else {
- if (F) {
- // Index of the section in .symtab against this symbol
- // is being relocated + 2 (empty section + abs. symbols).
- Index = F->getParent()->getOrdinal() + LocalSymbolData.size() + 1;
-
- MCSectionData *FSD = F->getParent();
- // Offset of the symbol in the section
- Addend = Layout.getSymbolAddress(&SD) - Layout.getSectionAddress(FSD);
- } else {
- FixedValue = Value;
- return;
- }
- }
- }
+ const MCSymbol &Symbol = Target.getSymA()->getSymbol();
+ const MCSymbol &ASymbol = Symbol.AliasedSymbol();
+ RelocSymbol = SymbolToReloc(Asm, Target, *Fragment);
- FixedValue = Value;
+ if (const MCSymbolRefExpr *RefB = Target.getSymB()) {
+ const MCSymbol &SymbolB = RefB->getSymbol();
+ MCSymbolData &SDB = Asm.getSymbolData(SymbolB);
+ IsPCRel = true;
- // determine the type of the relocation
- bool IsPCRel = isFixupKindX86PCRel(Fixup.getKind());
- unsigned Type;
- if (Is64Bit) {
- if (IsPCRel) {
- Type = ELF::R_X86_64_PC32;
- } else {
- switch ((unsigned)Fixup.getKind()) {
- default: llvm_unreachable("invalid fixup kind!");
- case FK_Data_8: Type = ELF::R_X86_64_64; break;
- case X86::reloc_pcrel_4byte:
- case FK_Data_4:
- // check that the offset fits within a signed long
- if (isInt<32>(Target.getConstant()))
- Type = ELF::R_X86_64_32S;
- else
- Type = ELF::R_X86_64_32;
- break;
- case FK_Data_2: Type = ELF::R_X86_64_16; break;
- case X86::reloc_pcrel_1byte:
- case FK_Data_1: Type = ELF::R_X86_64_8; break;
- }
+ // Offset of the symbol in the section
+ int64_t a = Layout.getSymbolOffset(&SDB);
+
+ // Ofeset of the relocation in the section
+ int64_t b = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
+ Value += b - a;
}
- } else {
- if (IsPCRel) {
- Type = ELF::R_386_PC32;
+
+ if (!RelocSymbol) {
+ MCSymbolData &SD = Asm.getSymbolData(ASymbol);
+ MCFragment *F = SD.getFragment();
+
+ Index = F->getParent()->getOrdinal() + 1;
+
+ // Offset of the symbol in the section
+ Value += Layout.getSymbolOffset(&SD);
} else {
- switch ((unsigned)Fixup.getKind()) {
- default: llvm_unreachable("invalid fixup kind!");
- case X86::reloc_pcrel_4byte:
- case FK_Data_4: Type = ELF::R_386_32; break;
- case FK_Data_2: Type = ELF::R_386_16; break;
- case X86::reloc_pcrel_1byte:
- case FK_Data_1: Type = ELF::R_386_8; break;
- }
+ if (Asm.getSymbolData(Symbol).getFlags() & ELF_Other_Weakref)
+ WeakrefUsedInReloc.insert(RelocSymbol);
+ else
+ UsedInReloc.insert(RelocSymbol);
+ Index = -1;
}
+ Addend = Value;
+ // Compensate for the addend on i386.
+ if (is64Bit())
+ Value = 0;
}
- ELFRelocationEntry ERE;
+ FixedValue = Value;
+ unsigned Type = GetRelocType(Target, Fixup, IsPCRel,
+ (RelocSymbol != 0), Addend);
+
+ uint64_t RelocOffset = Layout.getFragmentOffset(Fragment) +
+ Fixup.getOffset();
+
+ if (!hasRelocationAddend())
+ Addend = 0;
+ ELFRelocationEntry ERE(RelocOffset, Index, Type, RelocSymbol, Addend);
+ Relocations[Fragment->getParent()].push_back(ERE);
+}
- if (Is64Bit) {
- struct ELF::Elf64_Rela ERE64;
- ERE64.setSymbolAndType(Index, Type);
- ERE.r_info = ERE64.r_info;
- } else {
- struct ELF::Elf32_Rela ERE32;
- ERE32.setSymbolAndType(Index, Type);
- ERE.r_info = ERE32.r_info;
- }
- ERE.r_offset = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
+uint64_t
+ELFObjectWriter::getSymbolIndexInSymbolTable(const MCAssembler &Asm,
+ const MCSymbol *S) {
+ MCSymbolData &SD = Asm.getSymbolData(*S);
+ return SD.getIndex();
+}
- if (HasRelocationAddend)
- ERE.r_addend = Addend;
- else
- ERE.r_addend = 0; // Silence compiler warning.
+static bool isInSymtab(const MCAssembler &Asm, const MCSymbolData &Data,
+ bool Used, bool Renamed) {
+ if (Data.getFlags() & ELF_Other_Weakref)
+ return false;
- Relocations[Fragment->getParent()].push_back(ERE);
+ if (Used)
+ return true;
+
+ if (Renamed)
+ return false;
+
+ const MCSymbol &Symbol = Data.getSymbol();
+
+ if (Symbol.getName() == "_GLOBAL_OFFSET_TABLE_")
+ return true;
+
+ const MCSymbol &A = Symbol.AliasedSymbol();
+ if (!A.isVariable() && A.isUndefined() && !Data.isCommon())
+ return false;
+
+ if (!Asm.isSymbolLinkerVisible(Symbol) && !Symbol.isUndefined())
+ return false;
+
+ if (Symbol.isTemporary())
+ return false;
+
+ return true;
}
-uint64_t
-ELFObjectWriterImpl::getSymbolIndexInSymbolTable(const MCAssembler &Asm,
- const MCSymbol *S) {
- MCSymbolData &SD = Asm.getSymbolData(*S);
+static bool isLocal(const MCSymbolData &Data, bool isSignature,
+ bool isUsedInReloc) {
+ if (Data.isExternal())
+ return false;
+
+ const MCSymbol &Symbol = Data.getSymbol();
+ const MCSymbol &RefSymbol = Symbol.AliasedSymbol();
- // Local symbol.
- if (!SD.isExternal() && !S->isUndefined())
- return SD.getIndex() + /* empty symbol */ 1;
+ if (RefSymbol.isUndefined() && !RefSymbol.isVariable()) {
+ if (isSignature && !isUsedInReloc)
+ return true;
- // External or undefined symbol.
- return SD.getIndex() + Asm.size() + /* empty symbol */ 1;
+ return false;
+ }
+
+ return true;
}
-void ELFObjectWriterImpl::ComputeSymbolTable(MCAssembler &Asm) {
- // Build section lookup table.
- DenseMap<const MCSection*, uint8_t> SectionIndexMap;
+void ELFObjectWriter::ComputeIndexMap(MCAssembler &Asm,
+ SectionIndexMapTy &SectionIndexMap) {
unsigned Index = 1;
for (MCAssembler::iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it, ++Index)
- SectionIndexMap[&it->getSection()] = Index;
+ ie = Asm.end(); it != ie; ++it) {
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF &>(it->getSection());
+ if (Section.getType() != ELF::SHT_GROUP)
+ continue;
+ SectionIndexMap[&Section] = Index++;
+ }
+
+ for (MCAssembler::iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it) {
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF &>(it->getSection());
+ if (Section.getType() == ELF::SHT_GROUP)
+ continue;
+ SectionIndexMap[&Section] = Index++;
+ }
+}
+
+void ELFObjectWriter::ComputeSymbolTable(MCAssembler &Asm,
+ const SectionIndexMapTy &SectionIndexMap,
+ RevGroupMapTy RevGroupMap) {
+ // FIXME: Is this the correct place to do this?
+ if (NeedsGOT) {
+ llvm::StringRef Name = "_GLOBAL_OFFSET_TABLE_";
+ MCSymbol *Sym = Asm.getContext().GetOrCreateSymbol(Name);
+ MCSymbolData &Data = Asm.getOrCreateSymbolData(*Sym);
+ Data.setExternal(true);
+ SetBinding(Data, ELF::STB_GLOBAL);
+ }
+
+ // Build section lookup table.
+ int NumRegularSections = Asm.size();
// Index 0 is always the empty string.
StringMap<uint64_t> StringIndexMap;
StringTable += '\x00';
- // Add the data for local symbols.
+ // Add the data for the symbols.
for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
ie = Asm.symbol_end(); it != ie; ++it) {
const MCSymbol &Symbol = it->getSymbol();
- // Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(Symbol))
- continue;
+ bool Used = UsedInReloc.count(&Symbol);
+ bool WeakrefUsed = WeakrefUsedInReloc.count(&Symbol);
+ bool isSignature = RevGroupMap.count(&Symbol);
- if (it->isExternal() || Symbol.isUndefined())
+ if (!isInSymtab(Asm, *it,
+ Used || WeakrefUsed || isSignature,
+ Renames.count(&Symbol)))
continue;
- uint64_t &Entry = StringIndexMap[Symbol.getName()];
- if (!Entry) {
- Entry = StringTable.size();
- StringTable += Symbol.getName();
- StringTable += '\x00';
- }
-
ELFSymbolData MSD;
MSD.SymbolData = it;
- MSD.StringIndex = Entry;
+ const MCSymbol &RefSymbol = Symbol.AliasedSymbol();
+
+ // Undefined symbols are global, but this is the first place we
+ // are able to set it.
+ bool Local = isLocal(*it, isSignature, Used);
+ if (!Local && GetBinding(*it) == ELF::STB_LOCAL) {
+ MCSymbolData &SD = Asm.getSymbolData(RefSymbol);
+ SetBinding(*it, ELF::STB_GLOBAL);
+ SetBinding(SD, ELF::STB_GLOBAL);
+ }
+
+ if (RefSymbol.isUndefined() && !Used && WeakrefUsed)
+ SetBinding(*it, ELF::STB_WEAK);
- if (Symbol.isAbsolute()) {
+ if (it->isCommon()) {
+ assert(!Local);
+ MSD.SectionIndex = ELF::SHN_COMMON;
+ } else if (Symbol.isAbsolute() || RefSymbol.isVariable()) {
MSD.SectionIndex = ELF::SHN_ABS;
- LocalSymbolData.push_back(MSD);
+ } else if (RefSymbol.isUndefined()) {
+ if (isSignature && !Used)
+ MSD.SectionIndex = SectionIndexMap.lookup(RevGroupMap[&Symbol]);
+ else
+ MSD.SectionIndex = ELF::SHN_UNDEF;
} else {
- MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(RefSymbol.getSection());
+ MSD.SectionIndex = SectionIndexMap.lookup(&Section);
+ if (MSD.SectionIndex >= ELF::SHN_LORESERVE)
+ NeedsSymtabShndx = true;
assert(MSD.SectionIndex && "Invalid section index!");
- LocalSymbolData.push_back(MSD);
}
- }
-
- // Now add non-local symbols.
- for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
- ie = Asm.symbol_end(); it != ie; ++it) {
- const MCSymbol &Symbol = it->getSymbol();
- // Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(Symbol))
- continue;
-
- if (!it->isExternal() && !Symbol.isUndefined())
- continue;
+ // The @@@ in symbol version is replaced with @ in undefined symbols and
+ // @@ in defined ones.
+ StringRef Name = Symbol.getName();
+ SmallString<32> Buf;
+
+ size_t Pos = Name.find("@@@");
+ if (Pos != StringRef::npos) {
+ Buf += Name.substr(0, Pos);
+ unsigned Skip = MSD.SectionIndex == ELF::SHN_UNDEF ? 2 : 1;
+ Buf += Name.substr(Pos + Skip);
+ Name = Buf;
+ }
- uint64_t &Entry = StringIndexMap[Symbol.getName()];
+ uint64_t &Entry = StringIndexMap[Name];
if (!Entry) {
Entry = StringTable.size();
- StringTable += Symbol.getName();
+ StringTable += Name;
StringTable += '\x00';
}
-
- ELFSymbolData MSD;
- MSD.SymbolData = it;
MSD.StringIndex = Entry;
-
- if (Symbol.isUndefined()) {
- MSD.SectionIndex = ELF::SHN_UNDEF;
- // XXX: for some reason we dont Emit* this
- it->setFlags(it->getFlags() | ELF_STB_Global);
+ if (MSD.SectionIndex == ELF::SHN_UNDEF)
UndefinedSymbolData.push_back(MSD);
- } else if (Symbol.isAbsolute()) {
- MSD.SectionIndex = ELF::SHN_ABS;
- ExternalSymbolData.push_back(MSD);
- } else if (it->isCommon()) {
- MSD.SectionIndex = ELF::SHN_COMMON;
- ExternalSymbolData.push_back(MSD);
- } else {
- MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
- assert(MSD.SectionIndex && "Invalid section index!");
+ else if (Local)
+ LocalSymbolData.push_back(MSD);
+ else
ExternalSymbolData.push_back(MSD);
- }
}
// Symbols are required to be in lexicographic order.
@@ -669,55 +988,56 @@ void ELFObjectWriterImpl::ComputeSymbolTable(MCAssembler &Asm) {
// Set the symbol indices. Local symbols must come before all other
// symbols with non-local bindings.
- Index = 0;
+ unsigned Index = 1;
for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i)
LocalSymbolData[i].SymbolData->setIndex(Index++);
+
+ Index += NumRegularSections;
+
for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i)
ExternalSymbolData[i].SymbolData->setIndex(Index++);
for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
UndefinedSymbolData[i].SymbolData->setIndex(Index++);
}
-void ELFObjectWriterImpl::WriteRelocation(MCAssembler &Asm, MCAsmLayout &Layout,
- const MCSectionData &SD) {
+void ELFObjectWriter::WriteRelocation(MCAssembler &Asm, MCAsmLayout &Layout,
+ const MCSectionData &SD) {
if (!Relocations[&SD].empty()) {
MCContext &Ctx = Asm.getContext();
- const MCSection *RelaSection;
+ const MCSectionELF *RelaSection;
const MCSectionELF &Section =
static_cast<const MCSectionELF&>(SD.getSection());
const StringRef SectionName = Section.getSectionName();
- std::string RelaSectionName = HasRelocationAddend ? ".rela" : ".rel";
+ std::string RelaSectionName = hasRelocationAddend() ? ".rela" : ".rel";
RelaSectionName += SectionName;
unsigned EntrySize;
- if (HasRelocationAddend)
- EntrySize = Is64Bit ? sizeof(ELF::Elf64_Rela) : sizeof(ELF::Elf32_Rela);
+ if (hasRelocationAddend())
+ EntrySize = is64Bit() ? sizeof(ELF::Elf64_Rela) : sizeof(ELF::Elf32_Rela);
else
- EntrySize = Is64Bit ? sizeof(ELF::Elf64_Rel) : sizeof(ELF::Elf32_Rel);
+ EntrySize = is64Bit() ? sizeof(ELF::Elf64_Rel) : sizeof(ELF::Elf32_Rel);
- RelaSection = Ctx.getELFSection(RelaSectionName, HasRelocationAddend ?
+ RelaSection = Ctx.getELFSection(RelaSectionName, hasRelocationAddend() ?
ELF::SHT_RELA : ELF::SHT_REL, 0,
SectionKind::getReadOnly(),
- false, EntrySize);
+ EntrySize, "");
MCSectionData &RelaSD = Asm.getOrCreateSectionData(*RelaSection);
- RelaSD.setAlignment(1);
+ RelaSD.setAlignment(is64Bit() ? 8 : 4);
MCDataFragment *F = new MCDataFragment(&RelaSD);
WriteRelocationsFragment(Asm, F, &SD);
-
- Asm.AddSectionToTheEnd(RelaSD, Layout);
}
}
-void ELFObjectWriterImpl::WriteSecHdrEntry(uint32_t Name, uint32_t Type,
- uint64_t Flags, uint64_t Address,
- uint64_t Offset, uint64_t Size,
- uint32_t Link, uint32_t Info,
- uint64_t Alignment,
- uint64_t EntrySize) {
+void ELFObjectWriter::WriteSecHdrEntry(uint32_t Name, uint32_t Type,
+ uint64_t Flags, uint64_t Address,
+ uint64_t Offset, uint64_t Size,
+ uint32_t Link, uint32_t Info,
+ uint64_t Alignment,
+ uint64_t EntrySize) {
Write32(Name); // sh_name: index into string table
Write32(Type); // sh_type
WriteWord(Flags); // sh_flags
@@ -730,9 +1050,9 @@ void ELFObjectWriterImpl::WriteSecHdrEntry(uint32_t Name, uint32_t Type,
WriteWord(EntrySize); // sh_entsize
}
-void ELFObjectWriterImpl::WriteRelocationsFragment(const MCAssembler &Asm,
- MCDataFragment *F,
- const MCSectionData *SD) {
+void ELFObjectWriter::WriteRelocationsFragment(const MCAssembler &Asm,
+ MCDataFragment *F,
+ const MCSectionData *SD) {
std::vector<ELFRelocationEntry> &Relocs = Relocations[SD];
// sort by the r_offset just like gnu as does
array_pod_sort(Relocs.begin(), Relocs.end());
@@ -740,67 +1060,90 @@ void ELFObjectWriterImpl::WriteRelocationsFragment(const MCAssembler &Asm,
for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
ELFRelocationEntry entry = Relocs[e - i - 1];
- unsigned WordSize = Is64Bit ? 8 : 4;
- F->getContents() += StringRef((const char *)&entry.r_offset, WordSize);
- F->getContents() += StringRef((const char *)&entry.r_info, WordSize);
+ if (!entry.Index)
+ ;
+ else if (entry.Index < 0)
+ entry.Index = getSymbolIndexInSymbolTable(Asm, entry.Symbol);
+ else
+ entry.Index += LocalSymbolData.size();
+ if (is64Bit()) {
+ String64(*F, entry.r_offset);
+
+ struct ELF::Elf64_Rela ERE64;
+ ERE64.setSymbolAndType(entry.Index, entry.Type);
+ String64(*F, ERE64.r_info);
- if (HasRelocationAddend)
- F->getContents() += StringRef((const char *)&entry.r_addend, WordSize);
+ if (hasRelocationAddend())
+ String64(*F, entry.r_addend);
+ } else {
+ String32(*F, entry.r_offset);
+
+ struct ELF::Elf32_Rela ERE32;
+ ERE32.setSymbolAndType(entry.Index, entry.Type);
+ String32(*F, ERE32.r_info);
+
+ if (hasRelocationAddend())
+ String32(*F, entry.r_addend);
+ }
}
}
-void ELFObjectWriterImpl::CreateMetadataSections(MCAssembler &Asm,
- MCAsmLayout &Layout) {
+void ELFObjectWriter::CreateMetadataSections(MCAssembler &Asm,
+ MCAsmLayout &Layout,
+ const SectionIndexMapTy &SectionIndexMap) {
MCContext &Ctx = Asm.getContext();
MCDataFragment *F;
- WriteRelocations(Asm, Layout);
-
- const MCSection *SymtabSection;
- unsigned EntrySize = Is64Bit ? ELF::SYMENTRY_SIZE64 : ELF::SYMENTRY_SIZE32;
+ unsigned EntrySize = is64Bit() ? ELF::SYMENTRY_SIZE64 : ELF::SYMENTRY_SIZE32;
- SymtabSection = Ctx.getELFSection(".symtab", ELF::SHT_SYMTAB, 0,
- SectionKind::getReadOnly(),
- false, EntrySize);
+ // We construct .shstrtab, .symtab and .strtab in this order to match gnu as.
+ const MCSectionELF *ShstrtabSection =
+ Ctx.getELFSection(".shstrtab", ELF::SHT_STRTAB, 0,
+ SectionKind::getReadOnly());
+ MCSectionData &ShstrtabSD = Asm.getOrCreateSectionData(*ShstrtabSection);
+ ShstrtabSD.setAlignment(1);
+ ShstrtabIndex = Asm.size();
+ const MCSectionELF *SymtabSection =
+ Ctx.getELFSection(".symtab", ELF::SHT_SYMTAB, 0,
+ SectionKind::getReadOnly(),
+ EntrySize, "");
MCSectionData &SymtabSD = Asm.getOrCreateSectionData(*SymtabSection);
+ SymtabSD.setAlignment(is64Bit() ? 8 : 4);
+ SymbolTableIndex = Asm.size();
- SymtabSD.setAlignment(Is64Bit ? 8 : 4);
+ MCSectionData *SymtabShndxSD = NULL;
- F = new MCDataFragment(&SymtabSD);
-
- // Symbol table
- WriteSymbolTable(F, Asm, Layout);
- Asm.AddSectionToTheEnd(SymtabSD, Layout);
+ if (NeedsSymtabShndx) {
+ const MCSectionELF *SymtabShndxSection =
+ Ctx.getELFSection(".symtab_shndx", ELF::SHT_SYMTAB_SHNDX, 0,
+ SectionKind::getReadOnly(), 4, "");
+ SymtabShndxSD = &Asm.getOrCreateSectionData(*SymtabShndxSection);
+ SymtabShndxSD->setAlignment(4);
+ }
const MCSection *StrtabSection;
StrtabSection = Ctx.getELFSection(".strtab", ELF::SHT_STRTAB, 0,
- SectionKind::getReadOnly(), false);
-
+ SectionKind::getReadOnly());
MCSectionData &StrtabSD = Asm.getOrCreateSectionData(*StrtabSection);
StrtabSD.setAlignment(1);
-
- // FIXME: This isn't right. If the sections get rearranged this will
- // be wrong. We need a proper lookup.
StringTableIndex = Asm.size();
- F = new MCDataFragment(&StrtabSD);
- F->getContents().append(StringTable.begin(), StringTable.end());
- Asm.AddSectionToTheEnd(StrtabSD, Layout);
+ WriteRelocations(Asm, Layout);
- const MCSection *ShstrtabSection;
- ShstrtabSection = Ctx.getELFSection(".shstrtab", ELF::SHT_STRTAB, 0,
- SectionKind::getReadOnly(), false);
+ // Symbol table
+ F = new MCDataFragment(&SymtabSD);
+ MCDataFragment *ShndxF = NULL;
+ if (NeedsSymtabShndx) {
+ ShndxF = new MCDataFragment(SymtabShndxSD);
+ }
+ WriteSymbolTable(F, ShndxF, Asm, Layout, SectionIndexMap);
- MCSectionData &ShstrtabSD = Asm.getOrCreateSectionData(*ShstrtabSection);
- ShstrtabSD.setAlignment(1);
+ F = new MCDataFragment(&StrtabSD);
+ F->getContents().append(StringTable.begin(), StringTable.end());
F = new MCDataFragment(&ShstrtabSD);
- // FIXME: This isn't right. If the sections get rearranged this will
- // be wrong. We need a proper lookup.
- ShstrtabIndex = Asm.size();
-
// Section header string table.
//
// The first entry of a string table holds a null character so skip
@@ -808,166 +1151,691 @@ void ELFObjectWriterImpl::CreateMetadataSections(MCAssembler &Asm,
uint64_t Index = 1;
F->getContents() += '\x00';
+ StringMap<uint64_t> SecStringMap;
for (MCAssembler::const_iterator it = Asm.begin(),
ie = Asm.end(); it != ie; ++it) {
const MCSectionELF &Section =
static_cast<const MCSectionELF&>(it->getSection());
+ // FIXME: We could merge suffixes like in .text and .rela.text.
+ StringRef Name = Section.getSectionName();
+ if (SecStringMap.count(Name)) {
+ SectionStringTableIndex[&Section] = SecStringMap[Name];
+ continue;
+ }
// Remember the index into the string table so we can write it
// into the sh_name field of the section header table.
- SectionStringTableIndex[&it->getSection()] = Index;
+ SectionStringTableIndex[&Section] = Index;
+ SecStringMap[Name] = Index;
- Index += Section.getSectionName().size() + 1;
- F->getContents() += Section.getSectionName();
+ Index += Name.size() + 1;
+ F->getContents() += Name;
F->getContents() += '\x00';
}
+}
+
+void ELFObjectWriter::CreateIndexedSections(MCAssembler &Asm,
+ MCAsmLayout &Layout,
+ GroupMapTy &GroupMap,
+ RevGroupMapTy &RevGroupMap) {
+ // Create the .note.GNU-stack section if needed.
+ MCContext &Ctx = Asm.getContext();
+ if (Asm.getNoExecStack()) {
+ const MCSectionELF *GnuStackSection =
+ Ctx.getELFSection(".note.GNU-stack", ELF::SHT_PROGBITS, 0,
+ SectionKind::getReadOnly());
+ Asm.getOrCreateSectionData(*GnuStackSection);
+ }
+
+ // Build the groups
+ for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end();
+ it != ie; ++it) {
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(it->getSection());
+ if (!(Section.getFlags() & ELF::SHF_GROUP))
+ continue;
+
+ const MCSymbol *SignatureSymbol = Section.getGroup();
+ Asm.getOrCreateSymbolData(*SignatureSymbol);
+ const MCSectionELF *&Group = RevGroupMap[SignatureSymbol];
+ if (!Group) {
+ Group = Ctx.CreateELFGroupSection();
+ MCSectionData &Data = Asm.getOrCreateSectionData(*Group);
+ Data.setAlignment(4);
+ MCDataFragment *F = new MCDataFragment(&Data);
+ String32(*F, ELF::GRP_COMDAT);
+ }
+ GroupMap[Group] = SignatureSymbol;
+ }
+
+ // Add sections to the groups
+ unsigned Index = 1;
+ unsigned NumGroups = RevGroupMap.size();
+ for (MCAssembler::const_iterator it = Asm.begin(), ie = Asm.end();
+ it != ie; ++it, ++Index) {
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(it->getSection());
+ if (!(Section.getFlags() & ELF::SHF_GROUP))
+ continue;
+ const MCSectionELF *Group = RevGroupMap[Section.getGroup()];
+ MCSectionData &Data = Asm.getOrCreateSectionData(*Group);
+ // FIXME: we could use the previous fragment
+ MCDataFragment *F = new MCDataFragment(&Data);
+ String32(*F, NumGroups + Index);
+ }
+}
+
+void ELFObjectWriter::WriteSection(MCAssembler &Asm,
+ const SectionIndexMapTy &SectionIndexMap,
+ uint32_t GroupSymbolIndex,
+ uint64_t Offset, uint64_t Size,
+ uint64_t Alignment,
+ const MCSectionELF &Section) {
+ uint64_t sh_link = 0;
+ uint64_t sh_info = 0;
+
+ switch(Section.getType()) {
+ case ELF::SHT_DYNAMIC:
+ sh_link = SectionStringTableIndex[&Section];
+ sh_info = 0;
+ break;
+
+ case ELF::SHT_REL:
+ case ELF::SHT_RELA: {
+ const MCSectionELF *SymtabSection;
+ const MCSectionELF *InfoSection;
+ SymtabSection = Asm.getContext().getELFSection(".symtab", ELF::SHT_SYMTAB,
+ 0,
+ SectionKind::getReadOnly());
+ sh_link = SectionIndexMap.lookup(SymtabSection);
+ assert(sh_link && ".symtab not found");
+
+ // Remove ".rel" and ".rela" prefixes.
+ unsigned SecNameLen = (Section.getType() == ELF::SHT_REL) ? 4 : 5;
+ StringRef SectionName = Section.getSectionName().substr(SecNameLen);
+
+ InfoSection = Asm.getContext().getELFSection(SectionName,
+ ELF::SHT_PROGBITS, 0,
+ SectionKind::getReadOnly());
+ sh_info = SectionIndexMap.lookup(InfoSection);
+ break;
+ }
+
+ case ELF::SHT_SYMTAB:
+ case ELF::SHT_DYNSYM:
+ sh_link = StringTableIndex;
+ sh_info = LastLocalSymbolIndex;
+ break;
+
+ case ELF::SHT_SYMTAB_SHNDX:
+ sh_link = SymbolTableIndex;
+ break;
+
+ case ELF::SHT_PROGBITS:
+ case ELF::SHT_STRTAB:
+ case ELF::SHT_NOBITS:
+ case ELF::SHT_NOTE:
+ case ELF::SHT_NULL:
+ case ELF::SHT_ARM_ATTRIBUTES:
+ case ELF::SHT_INIT_ARRAY:
+ case ELF::SHT_FINI_ARRAY:
+ case ELF::SHT_PREINIT_ARRAY:
+ case ELF::SHT_X86_64_UNWIND:
+ // Nothing to do.
+ break;
+
+ case ELF::SHT_GROUP: {
+ sh_link = SymbolTableIndex;
+ sh_info = GroupSymbolIndex;
+ break;
+ }
+
+ default:
+ assert(0 && "FIXME: sh_type value not supported!");
+ break;
+ }
- Asm.AddSectionToTheEnd(ShstrtabSD, Layout);
+ WriteSecHdrEntry(SectionStringTableIndex[&Section], Section.getType(),
+ Section.getFlags(), 0, Offset, Size, sh_link, sh_info,
+ Alignment, Section.getEntrySize());
}
-void ELFObjectWriterImpl::WriteObject(const MCAssembler &Asm,
- const MCAsmLayout &Layout) {
+static bool IsELFMetaDataSection(const MCSectionData &SD) {
+ return SD.getOrdinal() == ~UINT32_C(0) &&
+ !SD.getSection().isVirtualSection();
+}
+
+static uint64_t DataSectionSize(const MCSectionData &SD) {
+ uint64_t Ret = 0;
+ for (MCSectionData::const_iterator i = SD.begin(), e = SD.end(); i != e;
+ ++i) {
+ const MCFragment &F = *i;
+ assert(F.getKind() == MCFragment::FT_Data);
+ Ret += cast<MCDataFragment>(F).getContents().size();
+ }
+ return Ret;
+}
+
+static uint64_t GetSectionFileSize(const MCAsmLayout &Layout,
+ const MCSectionData &SD) {
+ if (IsELFMetaDataSection(SD))
+ return DataSectionSize(SD);
+ return Layout.getSectionFileSize(&SD);
+}
+
+static uint64_t GetSectionAddressSize(const MCAsmLayout &Layout,
+ const MCSectionData &SD) {
+ if (IsELFMetaDataSection(SD))
+ return DataSectionSize(SD);
+ return Layout.getSectionAddressSize(&SD);
+}
+
+static void WriteDataSectionData(ELFObjectWriter *W, const MCSectionData &SD) {
+ for (MCSectionData::const_iterator i = SD.begin(), e = SD.end(); i != e;
+ ++i) {
+ const MCFragment &F = *i;
+ assert(F.getKind() == MCFragment::FT_Data);
+ W->WriteBytes(cast<MCDataFragment>(F).getContents().str());
+ }
+}
+
+void ELFObjectWriter::WriteObject(MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+ GroupMapTy GroupMap;
+ RevGroupMapTy RevGroupMap;
+ CreateIndexedSections(Asm, const_cast<MCAsmLayout&>(Layout), GroupMap,
+ RevGroupMap);
+
+ SectionIndexMapTy SectionIndexMap;
+
+ ComputeIndexMap(Asm, SectionIndexMap);
+
+ // Compute symbol table information.
+ ComputeSymbolTable(Asm, SectionIndexMap, RevGroupMap);
+
CreateMetadataSections(const_cast<MCAssembler&>(Asm),
- const_cast<MCAsmLayout&>(Layout));
+ const_cast<MCAsmLayout&>(Layout),
+ SectionIndexMap);
+
+ // Update to include the metadata sections.
+ ComputeIndexMap(Asm, SectionIndexMap);
// Add 1 for the null section.
unsigned NumSections = Asm.size() + 1;
+ uint64_t NaturalAlignment = is64Bit() ? 8 : 4;
+ uint64_t HeaderSize = is64Bit() ? sizeof(ELF::Elf64_Ehdr) :
+ sizeof(ELF::Elf32_Ehdr);
+ uint64_t FileOff = HeaderSize;
+
+ std::vector<const MCSectionELF*> Sections;
+ Sections.resize(NumSections);
+
+ for (SectionIndexMapTy::const_iterator i=
+ SectionIndexMap.begin(), e = SectionIndexMap.end(); i != e; ++i) {
+ const std::pair<const MCSectionELF*, uint32_t> &p = *i;
+ Sections[p.second] = p.first;
+ }
- uint64_t SectionDataSize = 0;
+ for (unsigned i = 1; i < NumSections; ++i) {
+ const MCSectionELF &Section = *Sections[i];
+ const MCSectionData &SD = Asm.getOrCreateSectionData(Section);
- for (MCAssembler::const_iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it) {
- const MCSectionData &SD = *it;
+ FileOff = RoundUpToAlignment(FileOff, SD.getAlignment());
// Get the size of the section in the output file (including padding).
- uint64_t Size = Layout.getSectionFileSize(&SD);
- SectionDataSize += Size;
+ FileOff += GetSectionFileSize(Layout, SD);
}
+ FileOff = RoundUpToAlignment(FileOff, NaturalAlignment);
+
// Write out the ELF header ...
- WriteHeader(SectionDataSize, NumSections);
- FileOff = Is64Bit ? sizeof(ELF::Elf64_Ehdr) : sizeof(ELF::Elf32_Ehdr);
+ WriteHeader(FileOff - HeaderSize, NumSections);
+
+ FileOff = HeaderSize;
// ... then all of the sections ...
DenseMap<const MCSection*, uint64_t> SectionOffsetMap;
- DenseMap<const MCSection*, uint8_t> SectionIndexMap;
+ for (unsigned i = 1; i < NumSections; ++i) {
+ const MCSectionELF &Section = *Sections[i];
+ const MCSectionData &SD = Asm.getOrCreateSectionData(Section);
- unsigned Index = 1;
- for (MCAssembler::const_iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it) {
- // Remember the offset into the file for this section.
- SectionOffsetMap[&it->getSection()] = FileOff;
+ uint64_t Padding = OffsetToAlignment(FileOff, SD.getAlignment());
+ WriteZeros(Padding);
+ FileOff += Padding;
- SectionIndexMap[&it->getSection()] = Index++;
+ // Remember the offset into the file for this section.
+ SectionOffsetMap[&Section] = FileOff;
- const MCSectionData &SD = *it;
- FileOff += Layout.getSectionFileSize(&SD);
+ FileOff += GetSectionFileSize(Layout, SD);
- Asm.WriteSectionData(it, Layout, Writer);
+ if (IsELFMetaDataSection(SD))
+ WriteDataSectionData(this, SD);
+ else
+ Asm.WriteSectionData(&SD, Layout);
}
+ uint64_t Padding = OffsetToAlignment(FileOff, NaturalAlignment);
+ WriteZeros(Padding);
+ FileOff += Padding;
+
// ... and then the section header table.
// Should we align the section header table?
//
// Null section first.
- WriteSecHdrEntry(0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ uint64_t FirstSectionSize =
+ NumSections >= ELF::SHN_LORESERVE ? NumSections : 0;
+ uint32_t FirstSectionLink =
+ ShstrtabIndex >= ELF::SHN_LORESERVE ? ShstrtabIndex : 0;
+ WriteSecHdrEntry(0, 0, 0, 0, 0, FirstSectionSize, FirstSectionLink, 0, 0, 0);
+
+ for (unsigned i = 1; i < NumSections; ++i) {
+ const MCSectionELF &Section = *Sections[i];
+ const MCSectionData &SD = Asm.getOrCreateSectionData(Section);
+ uint32_t GroupSymbolIndex;
+ if (Section.getType() != ELF::SHT_GROUP)
+ GroupSymbolIndex = 0;
+ else
+ GroupSymbolIndex = getSymbolIndexInSymbolTable(Asm, GroupMap[&Section]);
- for (MCAssembler::const_iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it) {
- const MCSectionData &SD = *it;
- const MCSectionELF &Section =
- static_cast<const MCSectionELF&>(SD.getSection());
+ uint64_t Size = GetSectionAddressSize(Layout, SD);
- uint64_t sh_link = 0;
- uint64_t sh_info = 0;
+ WriteSection(Asm, SectionIndexMap, GroupSymbolIndex,
+ SectionOffsetMap[&Section], Size,
+ SD.getAlignment(), Section);
+ }
+}
- switch(Section.getType()) {
- case ELF::SHT_DYNAMIC:
- sh_link = SectionStringTableIndex[&it->getSection()];
- sh_info = 0;
- break;
+MCObjectWriter *llvm::createELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &OS,
+ bool IsLittleEndian) {
+ switch (MOTW->getEMachine()) {
+ case ELF::EM_386:
+ case ELF::EM_X86_64:
+ return new X86ELFObjectWriter(MOTW, OS, IsLittleEndian); break;
+ case ELF::EM_ARM:
+ return new ARMELFObjectWriter(MOTW, OS, IsLittleEndian); break;
+ case ELF::EM_MBLAZE:
+ return new MBlazeELFObjectWriter(MOTW, OS, IsLittleEndian); break;
+ default: llvm_unreachable("Unsupported architecture"); break;
+ }
+}
+
+
+/// START OF SUBCLASSES for ELFObjectWriter
+//===- ARMELFObjectWriter -------------------------------------------===//
+
+ARMELFObjectWriter::ARMELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &_OS,
+ bool IsLittleEndian)
+ : ELFObjectWriter(MOTW, _OS, IsLittleEndian)
+{}
+
+ARMELFObjectWriter::~ARMELFObjectWriter()
+{}
- case ELF::SHT_REL:
- case ELF::SHT_RELA: {
- const MCSection *SymtabSection;
- const MCSection *InfoSection;
-
- SymtabSection = Asm.getContext().getELFSection(".symtab", ELF::SHT_SYMTAB, 0,
- SectionKind::getReadOnly(),
- false);
- sh_link = SectionIndexMap[SymtabSection];
-
- // Remove ".rel" and ".rela" prefixes.
- unsigned SecNameLen = (Section.getType() == ELF::SHT_REL) ? 4 : 5;
- StringRef SectionName = Section.getSectionName().substr(SecNameLen);
-
- InfoSection = Asm.getContext().getELFSection(SectionName,
- ELF::SHT_PROGBITS, 0,
- SectionKind::getReadOnly(),
- false);
- sh_info = SectionIndexMap[InfoSection];
+// FIXME: get the real EABI Version from the Triple.
+void ARMELFObjectWriter::WriteEFlags() {
+ Write32(ELF::EF_ARM_EABIMASK & DefaultEABIVersion);
+}
+
+// In ARM, _MergedGlobals and other most symbols get emitted directly.
+// I.e. not as an offset to a section symbol.
+// This code is a first-cut approximation of what ARM/gcc does.
+
+const MCSymbol *ARMELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ bool IsBSS) const {
+ const MCSymbol &Symbol = Target.getSymA()->getSymbol();
+ bool EmitThisSym = false;
+
+ if (IsBSS) {
+ EmitThisSym = StringSwitch<bool>(Symbol.getName())
+ .Case("_MergedGlobals", true)
+ .Default(false);
+ } else {
+ EmitThisSym = StringSwitch<bool>(Symbol.getName())
+ .Case("_MergedGlobals", true)
+ .StartsWith(".L.str", true)
+ .Default(false);
+ }
+ if (EmitThisSym)
+ return &Symbol;
+ if (! Symbol.isTemporary())
+ return &Symbol;
+ return NULL;
+}
+
+unsigned ARMELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) {
+ MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
+ MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+
+ unsigned Type = 0;
+ if (IsPCRel) {
+ switch ((unsigned)Fixup.getKind()) {
+ default: assert(0 && "Unimplemented");
+ case FK_Data_4:
+ switch (Modifier) {
+ default: llvm_unreachable("Unsupported Modifier");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_ARM_BASE_PREL;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TLSGD:
+ assert(0 && "unimplemented");
+ break;
+ case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
+ Type = ELF::R_ARM_TLS_IE32;
+ break;
+ }
+ break;
+ case ARM::fixup_arm_uncondbranch:
+ switch (Modifier) {
+ case MCSymbolRefExpr::VK_ARM_PLT:
+ Type = ELF::R_ARM_PLT32;
+ break;
+ default:
+ Type = ELF::R_ARM_CALL;
+ break;
+ }
+ break;
+ case ARM::fixup_arm_condbranch:
+ Type = ELF::R_ARM_JUMP24;
+ break;
+ case ARM::fixup_arm_movt_hi16:
+ case ARM::fixup_arm_movt_hi16_pcrel:
+ Type = ELF::R_ARM_MOVT_PREL;
+ break;
+ case ARM::fixup_arm_movw_lo16:
+ case ARM::fixup_arm_movw_lo16_pcrel:
+ Type = ELF::R_ARM_MOVW_PREL_NC;
+ break;
+ case ARM::fixup_t2_movt_hi16:
+ case ARM::fixup_t2_movt_hi16_pcrel:
+ Type = ELF::R_ARM_THM_MOVT_PREL;
+ break;
+ case ARM::fixup_t2_movw_lo16:
+ case ARM::fixup_t2_movw_lo16_pcrel:
+ Type = ELF::R_ARM_THM_MOVW_PREL_NC;
break;
}
-
- case ELF::SHT_SYMTAB:
- case ELF::SHT_DYNSYM:
- sh_link = StringTableIndex;
- sh_info = LastLocalSymbolIndex;
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case FK_Data_4:
+ switch (Modifier) {
+ default: llvm_unreachable("Unsupported Modifier"); break;
+ case MCSymbolRefExpr::VK_ARM_GOT:
+ Type = ELF::R_ARM_GOT_BREL;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TLSGD:
+ Type = ELF::R_ARM_TLS_GD32;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TPOFF:
+ Type = ELF::R_ARM_TLS_LE32;
+ break;
+ case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
+ Type = ELF::R_ARM_TLS_IE32;
+ break;
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_ARM_ABS32;
+ break;
+ case MCSymbolRefExpr::VK_ARM_GOTOFF:
+ Type = ELF::R_ARM_GOTOFF32;
+ break;
+ }
break;
-
- case ELF::SHT_PROGBITS:
- case ELF::SHT_STRTAB:
- case ELF::SHT_NOBITS:
- case ELF::SHT_NULL:
- // Nothing to do.
+ case ARM::fixup_arm_ldst_pcrel_12:
+ case ARM::fixup_arm_pcrel_10:
+ case ARM::fixup_arm_adr_pcrel_12:
+ case ARM::fixup_arm_thumb_bl:
+ case ARM::fixup_arm_thumb_cb:
+ case ARM::fixup_arm_thumb_cp:
+ case ARM::fixup_arm_thumb_br:
+ assert(0 && "Unimplemented");
break;
-
- case ELF::SHT_HASH:
- case ELF::SHT_GROUP:
- case ELF::SHT_SYMTAB_SHNDX:
- default:
- assert(0 && "FIXME: sh_type value not supported!");
+ case ARM::fixup_arm_uncondbranch:
+ Type = ELF::R_ARM_CALL;
+ break;
+ case ARM::fixup_arm_condbranch:
+ Type = ELF::R_ARM_JUMP24;
+ break;
+ case ARM::fixup_arm_movt_hi16:
+ Type = ELF::R_ARM_MOVT_ABS;
+ break;
+ case ARM::fixup_arm_movw_lo16:
+ Type = ELF::R_ARM_MOVW_ABS_NC;
+ break;
+ case ARM::fixup_t2_movt_hi16:
+ Type = ELF::R_ARM_THM_MOVT_ABS;
+ break;
+ case ARM::fixup_t2_movw_lo16:
+ Type = ELF::R_ARM_THM_MOVW_ABS_NC;
break;
}
-
- WriteSecHdrEntry(SectionStringTableIndex[&it->getSection()],
- Section.getType(), Section.getFlags(),
- Layout.getSectionAddress(&SD),
- SectionOffsetMap.lookup(&SD.getSection()),
- Layout.getSectionSize(&SD), sh_link,
- sh_info, SD.getAlignment(),
- Section.getEntrySize());
}
-}
-ELFObjectWriter::ELFObjectWriter(raw_ostream &OS,
- bool Is64Bit,
- bool IsLittleEndian,
- bool HasRelocationAddend)
- : MCObjectWriter(OS, IsLittleEndian)
-{
- Impl = new ELFObjectWriterImpl(this, Is64Bit, HasRelocationAddend);
+ if (RelocNeedsGOT(Modifier))
+ NeedsGOT = true;
+
+ return Type;
}
-ELFObjectWriter::~ELFObjectWriter() {
- delete (ELFObjectWriterImpl*) Impl;
+//===- MBlazeELFObjectWriter -------------------------------------------===//
+
+MBlazeELFObjectWriter::MBlazeELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &_OS,
+ bool IsLittleEndian)
+ : ELFObjectWriter(MOTW, _OS, IsLittleEndian) {
}
-void ELFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm) {
- ((ELFObjectWriterImpl*) Impl)->ExecutePostLayoutBinding(Asm);
+MBlazeELFObjectWriter::~MBlazeELFObjectWriter() {
}
-void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup, MCValue Target,
- uint64_t &FixedValue) {
- ((ELFObjectWriterImpl*) Impl)->RecordRelocation(Asm, Layout, Fragment, Fixup,
- Target, FixedValue);
+unsigned MBlazeELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) {
+ // determine the type of the relocation
+ unsigned Type;
+ if (IsPCRel) {
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case FK_PCRel_4:
+ Type = ELF::R_MICROBLAZE_64_PCREL;
+ break;
+ case FK_PCRel_2:
+ Type = ELF::R_MICROBLAZE_32_PCREL;
+ break;
+ }
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case FK_Data_4:
+ Type = ((IsRelocWithSymbol || Addend !=0)
+ ? ELF::R_MICROBLAZE_32
+ : ELF::R_MICROBLAZE_64);
+ break;
+ case FK_Data_2:
+ Type = ELF::R_MICROBLAZE_32;
+ break;
+ }
+ }
+ return Type;
}
-void ELFObjectWriter::WriteObject(const MCAssembler &Asm,
- const MCAsmLayout &Layout) {
- ((ELFObjectWriterImpl*) Impl)->WriteObject(Asm, Layout);
+//===- X86ELFObjectWriter -------------------------------------------===//
+
+
+X86ELFObjectWriter::X86ELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &_OS,
+ bool IsLittleEndian)
+ : ELFObjectWriter(MOTW, _OS, IsLittleEndian)
+{}
+
+X86ELFObjectWriter::~X86ELFObjectWriter()
+{}
+
+unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) {
+ // determine the type of the relocation
+
+ MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
+ MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+ unsigned Type;
+ if (is64Bit()) {
+ if (IsPCRel) {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case FK_PCRel_8:
+ assert(Modifier == MCSymbolRefExpr::VK_None);
+ Type = ELF::R_X86_64_PC64;
+ break;
+ case X86::reloc_signed_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ case FK_Data_4: // FIXME?
+ case X86::reloc_riprel_4byte:
+ case FK_PCRel_4:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_X86_64_PC32;
+ break;
+ case MCSymbolRefExpr::VK_PLT:
+ Type = ELF::R_X86_64_PLT32;
+ break;
+ case MCSymbolRefExpr::VK_GOTPCREL:
+ Type = ELF::R_X86_64_GOTPCREL;
+ break;
+ case MCSymbolRefExpr::VK_GOTTPOFF:
+ Type = ELF::R_X86_64_GOTTPOFF;
+ break;
+ case MCSymbolRefExpr::VK_TLSGD:
+ Type = ELF::R_X86_64_TLSGD;
+ break;
+ case MCSymbolRefExpr::VK_TLSLD:
+ Type = ELF::R_X86_64_TLSLD;
+ break;
+ }
+ break;
+ case FK_PCRel_2:
+ assert(Modifier == MCSymbolRefExpr::VK_None);
+ Type = ELF::R_X86_64_PC16;
+ break;
+ }
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case FK_Data_8: Type = ELF::R_X86_64_64; break;
+ case X86::reloc_signed_4byte:
+ assert(isInt<32>(Target.getConstant()));
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_X86_64_32S;
+ break;
+ case MCSymbolRefExpr::VK_GOT:
+ Type = ELF::R_X86_64_GOT32;
+ break;
+ case MCSymbolRefExpr::VK_GOTPCREL:
+ Type = ELF::R_X86_64_GOTPCREL;
+ break;
+ case MCSymbolRefExpr::VK_TPOFF:
+ Type = ELF::R_X86_64_TPOFF32;
+ break;
+ case MCSymbolRefExpr::VK_DTPOFF:
+ Type = ELF::R_X86_64_DTPOFF32;
+ break;
+ }
+ break;
+ case FK_Data_4:
+ Type = ELF::R_X86_64_32;
+ break;
+ case FK_Data_2: Type = ELF::R_X86_64_16; break;
+ case FK_PCRel_1:
+ case FK_Data_1: Type = ELF::R_X86_64_8; break;
+ }
+ }
+ } else {
+ if (IsPCRel) {
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_386_PC32;
+ break;
+ case MCSymbolRefExpr::VK_PLT:
+ Type = ELF::R_386_PLT32;
+ break;
+ }
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+
+ case X86::reloc_global_offset_table:
+ Type = ELF::R_386_GOTPC;
+ break;
+
+ // FIXME: Should we avoid selecting reloc_signed_4byte in 32 bit mode
+ // instead?
+ case X86::reloc_signed_4byte:
+ case FK_PCRel_4:
+ case FK_Data_4:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_386_32;
+ break;
+ case MCSymbolRefExpr::VK_GOT:
+ Type = ELF::R_386_GOT32;
+ break;
+ case MCSymbolRefExpr::VK_GOTOFF:
+ Type = ELF::R_386_GOTOFF;
+ break;
+ case MCSymbolRefExpr::VK_TLSGD:
+ Type = ELF::R_386_TLS_GD;
+ break;
+ case MCSymbolRefExpr::VK_TPOFF:
+ Type = ELF::R_386_TLS_LE_32;
+ break;
+ case MCSymbolRefExpr::VK_INDNTPOFF:
+ Type = ELF::R_386_TLS_IE;
+ break;
+ case MCSymbolRefExpr::VK_NTPOFF:
+ Type = ELF::R_386_TLS_LE;
+ break;
+ case MCSymbolRefExpr::VK_GOTNTPOFF:
+ Type = ELF::R_386_TLS_GOTIE;
+ break;
+ case MCSymbolRefExpr::VK_TLSLDM:
+ Type = ELF::R_386_TLS_LDM;
+ break;
+ case MCSymbolRefExpr::VK_DTPOFF:
+ Type = ELF::R_386_TLS_LDO_32;
+ break;
+ }
+ break;
+ case FK_Data_2: Type = ELF::R_386_16; break;
+ case FK_PCRel_1:
+ case FK_Data_1: Type = ELF::R_386_8; break;
+ }
+ }
+ }
+
+ if (RelocNeedsGOT(Modifier))
+ NeedsGOT = true;
+
+ return Type;
}
diff --git a/contrib/llvm/lib/MC/MCAsmInfo.cpp b/contrib/llvm/lib/MC/MCAsmInfo.cpp
index 670b2e9..cc1afbd 100644
--- a/contrib/llvm/lib/MC/MCAsmInfo.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfo.cpp
@@ -13,7 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cctype>
#include <cstring>
using namespace llvm;
@@ -23,11 +23,13 @@ MCAsmInfo::MCAsmInfo() {
HasMachoZeroFillDirective = false;
HasMachoTBSSDirective = false;
HasStaticCtorDtorReferenceInStaticMode = false;
+ LinkerRequiresNonEmptyDwarfLines = false;
MaxInstLength = 4;
PCSymbol = "$";
SeparatorChar = ';';
CommentColumn = 40;
CommentString = "#";
+ LabelSuffix = ":";
GlobalPrefix = "";
PrivateGlobalPrefix = ".";
LinkerPrivateGlobalPrefix = "";
@@ -52,18 +54,19 @@ MCAsmInfo::MCAsmInfo() {
GPRel32Directive = 0;
GlobalDirective = "\t.globl\t";
HasSetDirective = true;
+ HasAggressiveSymbolFolding = true;
HasLCOMMDirective = false;
COMMDirectiveAlignmentIsInBytes = true;
HasDotTypeDotSizeDirective = true;
HasSingleParameterDotFile = true;
HasNoDeadStrip = false;
+ HasSymbolResolver = false;
WeakRefDirective = 0;
WeakDefDirective = 0;
LinkOnceDirective = 0;
HiddenVisibilityAttr = MCSA_Hidden;
ProtectedVisibilityAttr = MCSA_Protected;
HasLEB128 = false;
- HasDotLocAndDotFile = false;
SupportsDebugInformation = false;
ExceptionsType = ExceptionHandling::None;
DwarfRequiresFrameSection = true;
diff --git a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
index e0e261a..13776f0 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
@@ -37,13 +37,20 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() {
HasMachoZeroFillDirective = true; // Uses .zerofill
HasMachoTBSSDirective = true; // Uses .tbss
HasStaticCtorDtorReferenceInStaticMode = true;
-
+
+ // FIXME: Darwin 10 and newer don't need this.
+ LinkerRequiresNonEmptyDwarfLines = true;
+
+ // FIXME: Change this once MC is the system assembler.
+ HasAggressiveSymbolFolding = false;
+
HiddenVisibilityAttr = MCSA_PrivateExtern;
// Doesn't support protected visibility.
ProtectedVisibilityAttr = MCSA_Global;
HasDotTypeDotSizeDirective = false;
HasNoDeadStrip = true;
+ HasSymbolResolver = true;
DwarfUsesAbsoluteLabelForStmtList = false;
DwarfUsesLabelOffsetForRanges = false;
diff --git a/contrib/llvm/lib/MC/MCAsmStreamer.cpp b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
index 1cc8fb0..8d06982 100644
--- a/contrib/llvm/lib/MC/MCAsmStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
@@ -12,6 +12,7 @@
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCSectionMachO.h"
@@ -23,6 +24,10 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/Target/TargetAsmInfo.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include <cctype>
using namespace llvm;
namespace {
@@ -32,29 +37,33 @@ class MCAsmStreamer : public MCStreamer {
const MCAsmInfo &MAI;
OwningPtr<MCInstPrinter> InstPrinter;
OwningPtr<MCCodeEmitter> Emitter;
-
+ OwningPtr<TargetAsmBackend> AsmBackend;
+
SmallString<128> CommentToEmit;
raw_svector_ostream CommentStream;
- unsigned IsLittleEndian : 1;
unsigned IsVerboseAsm : 1;
unsigned ShowInst : 1;
+ unsigned UseLoc : 1;
+
+ bool needsSet(const MCExpr *Value);
public:
MCAsmStreamer(MCContext &Context, formatted_raw_ostream &os,
- bool isLittleEndian, bool isVerboseAsm, MCInstPrinter *printer,
- MCCodeEmitter *emitter, bool showInst)
+ bool isVerboseAsm,
+ bool useLoc,
+ MCInstPrinter *printer, MCCodeEmitter *emitter,
+ TargetAsmBackend *asmbackend,
+ bool showInst)
: MCStreamer(Context), OS(os), MAI(Context.getAsmInfo()),
- InstPrinter(printer), Emitter(emitter), CommentStream(CommentToEmit),
- IsLittleEndian(isLittleEndian), IsVerboseAsm(isVerboseAsm),
- ShowInst(showInst) {
+ InstPrinter(printer), Emitter(emitter), AsmBackend(asmbackend),
+ CommentStream(CommentToEmit), IsVerboseAsm(isVerboseAsm),
+ ShowInst(showInst), UseLoc(useLoc) {
if (InstPrinter && IsVerboseAsm)
InstPrinter->setCommentStream(CommentStream);
}
~MCAsmStreamer() {}
- bool isLittleEndian() const { return IsLittleEndian; }
-
inline void EmitEOL() {
// If we don't have any comments, just emit a \n.
if (!IsVerboseAsm) {
@@ -68,7 +77,7 @@ public:
/// isVerboseAsm - Return true if this streamer supports verbose assembly at
/// all.
virtual bool isVerboseAsm() const { return IsVerboseAsm; }
-
+
/// hasRawTextSupport - We support EmitRawText.
virtual bool hasRawTextSupport() const { return true; }
@@ -98,13 +107,26 @@ public:
/// @name MCStreamer Interface
/// @{
- virtual void SwitchSection(const MCSection *Section);
+ virtual void ChangeSection(const MCSection *Section);
+
+ virtual void InitSections() {
+ // FIXME, this is MachO specific, but the testsuite
+ // expects this.
+ SwitchSection(getContext().getMachOSection("__TEXT", "__text",
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ 0, SectionKind::getText()));
+ }
virtual void EmitLabel(MCSymbol *Symbol);
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitThumbFunc(MCSymbol *Func);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
+ virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
+ const MCSymbol *LastLabel,
+ const MCSymbol *Label);
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
@@ -122,19 +144,26 @@ public:
/// @param Symbol - The common symbol to emit.
/// @param Size - The size of the common symbol.
virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size);
-
+
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
unsigned Size = 0, unsigned ByteAlignment = 0);
virtual void EmitTBSSSymbol (const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment = 0);
-
+
virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
- virtual void EmitValue(const MCExpr *Value, unsigned Size,unsigned AddrSpace);
- virtual void EmitIntValue(uint64_t Value, unsigned Size, unsigned AddrSpace);
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ bool isPCRel, unsigned AddrSpace);
+ virtual void EmitIntValue(uint64_t Value, unsigned Size,
+ unsigned AddrSpace = 0);
+
+ virtual void EmitULEB128Value(const MCExpr *Value, unsigned AddrSpace = 0);
+
+ virtual void EmitSLEB128Value(const MCExpr *Value, unsigned AddrSpace = 0);
+
virtual void EmitGPRel32Value(const MCExpr *Value);
-
+
virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue,
unsigned AddrSpace);
@@ -150,17 +179,28 @@ public:
unsigned char Value = 0);
virtual void EmitFileDirective(StringRef Filename);
- virtual void EmitDwarfFileDirective(unsigned FileNo, StringRef Filename);
+ virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Filename);
+ virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+ unsigned Column, unsigned Flags,
+ unsigned Isa, unsigned Discriminator);
+
+ virtual bool EmitCFIStartProc();
+ virtual bool EmitCFIEndProc();
+ virtual bool EmitCFIDefCfaOffset(int64_t Offset);
+ virtual bool EmitCFIDefCfaRegister(int64_t Register);
+ virtual bool EmitCFIOffset(int64_t Register, int64_t Offset);
+ virtual bool EmitCFIPersonality(const MCSymbol *Sym, unsigned Encoding);
+ virtual bool EmitCFILsda(const MCSymbol *Sym, unsigned Encoding);
virtual void EmitInstruction(const MCInst &Inst);
-
- /// EmitRawText - If this file is backed by a assembly streamer, this dumps
+
+ /// EmitRawText - If this file is backed by an assembly streamer, this dumps
/// the specified string in the output .s file. This capability is
/// indicated by the hasRawTextSupport() predicate.
virtual void EmitRawText(StringRef String);
-
+
virtual void Finish();
-
+
/// @}
};
@@ -172,14 +212,14 @@ public:
/// verbose assembly output is enabled.
void MCAsmStreamer::AddComment(const Twine &T) {
if (!IsVerboseAsm) return;
-
+
// Make sure that CommentStream is flushed.
CommentStream.flush();
-
+
T.toVector(CommentToEmit);
// Each comment goes on its own line.
CommentToEmit.push_back('\n');
-
+
// Tell the comment stream that the vector changed underneath it.
CommentStream.resync();
}
@@ -189,10 +229,10 @@ void MCAsmStreamer::EmitCommentsAndEOL() {
OS << '\n';
return;
}
-
+
CommentStream.flush();
StringRef Comments = CommentToEmit.str();
-
+
assert(Comments.back() == '\n' &&
"Comment array not newline terminated");
do {
@@ -200,10 +240,10 @@ void MCAsmStreamer::EmitCommentsAndEOL() {
OS.PadToColumn(MAI.getCommentColumn());
size_t Position = Comments.find('\n');
OS << MAI.getCommentString() << ' ' << Comments.substr(0, Position) << '\n';
-
+
Comments = Comments.substr(Position+1);
} while (!Comments.empty());
-
+
CommentToEmit.clear();
// Tell the comment stream that the vector changed underneath it.
CommentStream.resync();
@@ -214,33 +254,41 @@ static inline int64_t truncateToSize(int64_t Value, unsigned Bytes) {
return Value & ((uint64_t) (int64_t) -1 >> (64 - Bytes * 8));
}
-void MCAsmStreamer::SwitchSection(const MCSection *Section) {
+void MCAsmStreamer::ChangeSection(const MCSection *Section) {
assert(Section && "Cannot switch to a null section!");
- if (Section != CurSection) {
- PrevSection = CurSection;
- CurSection = Section;
- Section->PrintSwitchToSection(MAI, OS);
- }
+ Section->PrintSwitchToSection(MAI, OS);
}
void MCAsmStreamer::EmitLabel(MCSymbol *Symbol) {
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
- assert(CurSection && "Cannot emit before setting section!");
+ assert(getCurrentSection() && "Cannot emit before setting section!");
- OS << *Symbol << ":";
+ OS << *Symbol << MAI.getLabelSuffix();
EmitEOL();
- Symbol->setSection(*CurSection);
+ Symbol->setSection(*getCurrentSection());
}
void MCAsmStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
switch (Flag) {
default: assert(0 && "Invalid flag!");
+ case MCAF_SyntaxUnified: OS << "\t.syntax unified"; break;
case MCAF_SubsectionsViaSymbols: OS << ".subsections_via_symbols"; break;
+ case MCAF_Code16: OS << "\t.code\t16"; break;
+ case MCAF_Code32: OS << "\t.code\t32"; break;
}
EmitEOL();
}
+void MCAsmStreamer::EmitThumbFunc(MCSymbol *Func) {
+ // This needs to emit to a temporary string to get properly quoted
+ // MCSymbols when they have spaces in them.
+ OS << "\t.thumb_func";
+ if (Func)
+ OS << '\t' << *Func;
+ EmitEOL();
+}
+
void MCAsmStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
OS << *Symbol << " = " << *Value;
EmitEOL();
@@ -249,6 +297,18 @@ void MCAsmStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
Symbol->setVariableValue(Value);
}
+void MCAsmStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
+ OS << ".weakref " << *Alias << ", " << *Symbol;
+ EmitEOL();
+}
+
+void MCAsmStreamer::EmitDwarfAdvanceLineAddr(int64_t LineDelta,
+ const MCSymbol *LastLabel,
+ const MCSymbol *Label) {
+ EmitDwarfSetLineAddr(LineDelta, Label,
+ getContext().getTargetAsmInfo().getPointerSize());
+}
+
void MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
MCSymbolAttr Attribute) {
switch (Attribute) {
@@ -259,6 +319,7 @@ void MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_ELF_TypeTLS: /// .type _foo, STT_TLS # aka @tls_object
case MCSA_ELF_TypeCommon: /// .type _foo, STT_COMMON # aka @common
case MCSA_ELF_TypeNoType: /// .type _foo, STT_NOTYPE # aka @notype
+ case MCSA_ELF_TypeGnuUniqueObject: /// .type _foo, @gnu_unique_object
assert(MAI.hasDotTypeDotSizeDirective() && "Symbol Attr not supported");
OS << "\t.type\t" << *Symbol << ','
<< ((MAI.getCommentString()[0] != '@') ? '@' : '%');
@@ -270,6 +331,7 @@ void MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_ELF_TypeTLS: OS << "tls_object"; break;
case MCSA_ELF_TypeCommon: OS << "common"; break;
case MCSA_ELF_TypeNoType: OS << "no_type"; break;
+ case MCSA_ELF_TypeGnuUniqueObject: OS << "gnu_unique_object"; break;
}
EmitEOL();
return;
@@ -282,6 +344,7 @@ void MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_LazyReference: OS << "\t.lazy_reference\t"; break;
case MCSA_Local: OS << "\t.local\t"; break;
case MCSA_NoDeadStrip: OS << "\t.no_dead_strip\t"; break;
+ case MCSA_SymbolResolver: OS << "\t.symbol_resolver\t"; break;
case MCSA_PrivateExtern: OS << "\t.private_extern\t"; break;
case MCSA_Protected: OS << "\t.protected\t"; break;
case MCSA_Reference: OS << "\t.reference\t"; break;
@@ -352,11 +415,11 @@ void MCAsmStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
unsigned Size, unsigned ByteAlignment) {
// Note: a .zerofill directive does not switch sections.
OS << ".zerofill ";
-
+
// This is a mach-o specific directive.
const MCSectionMachO *MOSection = ((const MCSectionMachO*)Section);
OS << MOSection->getSegmentName() << "," << MOSection->getSectionName();
-
+
if (Symbol != NULL) {
OS << ',' << *Symbol << ',' << Size;
if (ByteAlignment != 0)
@@ -374,11 +437,11 @@ void MCAsmStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
// Instead of using the Section we'll just use the shortcut.
// This is a mach-o specific directive and section.
OS << ".tbss " << *Symbol << ", " << Size;
-
+
// Output align if we have it. We default to 1 so don't bother printing
// that.
if (ByteAlignment > 1) OS << ", " << Log2_32(ByteAlignment);
-
+
EmitEOL();
}
@@ -386,19 +449,19 @@ static inline char toOctal(int X) { return (X&7)+'0'; }
static void PrintQuotedString(StringRef Data, raw_ostream &OS) {
OS << '"';
-
+
for (unsigned i = 0, e = Data.size(); i != e; ++i) {
unsigned char C = Data[i];
if (C == '"' || C == '\\') {
OS << '\\' << (char)C;
continue;
}
-
+
if (isprint((unsigned char)C)) {
OS << (char)C;
continue;
}
-
+
switch (C) {
case '\b': OS << "\\b"; break;
case '\f': OS << "\\f"; break;
@@ -413,15 +476,15 @@ static void PrintQuotedString(StringRef Data, raw_ostream &OS) {
break;
}
}
-
+
OS << '"';
}
void MCAsmStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
- assert(CurSection && "Cannot emit contents before setting section!");
+ assert(getCurrentSection() && "Cannot emit contents before setting section!");
if (Data.empty()) return;
-
+
if (Data.size() == 1) {
OS << MAI.getData8bitsDirective(AddrSpace);
OS << (unsigned)(unsigned char)Data[0];
@@ -443,11 +506,15 @@ void MCAsmStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
EmitEOL();
}
-/// EmitIntValue - Special case of EmitValue that avoids the client having
-/// to pass in a MCExpr for constant integers.
void MCAsmStreamer::EmitIntValue(uint64_t Value, unsigned Size,
unsigned AddrSpace) {
- assert(CurSection && "Cannot emit contents before setting section!");
+ EmitValue(MCConstantExpr::Create(Value, getContext()), Size, AddrSpace);
+}
+
+void MCAsmStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
+ bool isPCRel, unsigned AddrSpace) {
+ assert(getCurrentSection() && "Cannot emit contents before setting section!");
+ assert(!isPCRel && "Cannot emit pc relative relocations!");
const char *Directive = 0;
switch (Size) {
default: break;
@@ -458,35 +525,43 @@ void MCAsmStreamer::EmitIntValue(uint64_t Value, unsigned Size,
Directive = MAI.getData64bitsDirective(AddrSpace);
// If the target doesn't support 64-bit data, emit as two 32-bit halves.
if (Directive) break;
- if (isLittleEndian()) {
- EmitIntValue((uint32_t)(Value >> 0 ), 4, AddrSpace);
- EmitIntValue((uint32_t)(Value >> 32), 4, AddrSpace);
+ int64_t IntValue;
+ if (!Value->EvaluateAsAbsolute(IntValue))
+ report_fatal_error("Don't know how to emit this value.");
+ if (getContext().getTargetAsmInfo().isLittleEndian()) {
+ EmitIntValue((uint32_t)(IntValue >> 0 ), 4, AddrSpace);
+ EmitIntValue((uint32_t)(IntValue >> 32), 4, AddrSpace);
} else {
- EmitIntValue((uint32_t)(Value >> 32), 4, AddrSpace);
- EmitIntValue((uint32_t)(Value >> 0 ), 4, AddrSpace);
+ EmitIntValue((uint32_t)(IntValue >> 32), 4, AddrSpace);
+ EmitIntValue((uint32_t)(IntValue >> 0 ), 4, AddrSpace);
}
return;
}
-
+
assert(Directive && "Invalid size for machine code value!");
- OS << Directive << truncateToSize(Value, Size);
+ OS << Directive << *Value;
EmitEOL();
}
-void MCAsmStreamer::EmitValue(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace) {
- assert(CurSection && "Cannot emit contents before setting section!");
- const char *Directive = 0;
- switch (Size) {
- default: break;
- case 1: Directive = MAI.getData8bitsDirective(AddrSpace); break;
- case 2: Directive = MAI.getData16bitsDirective(AddrSpace); break;
- case 4: Directive = MAI.getData32bitsDirective(AddrSpace); break;
- case 8: Directive = MAI.getData64bitsDirective(AddrSpace); break;
+void MCAsmStreamer::EmitULEB128Value(const MCExpr *Value, unsigned AddrSpace) {
+ int64_t IntValue;
+ if (Value->EvaluateAsAbsolute(IntValue)) {
+ EmitULEB128IntValue(IntValue, AddrSpace);
+ return;
}
-
- assert(Directive && "Invalid size for machine code value!");
- OS << Directive << *Value;
+ assert(MAI.hasLEB128() && "Cannot print a .uleb");
+ OS << ".uleb128 " << *Value;
+ EmitEOL();
+}
+
+void MCAsmStreamer::EmitSLEB128Value(const MCExpr *Value, unsigned AddrSpace) {
+ int64_t IntValue;
+ if (Value->EvaluateAsAbsolute(IntValue)) {
+ EmitSLEB128IntValue(IntValue, AddrSpace);
+ return;
+ }
+ assert(MAI.hasLEB128() && "Cannot print a .sleb");
+ OS << ".sleb128 " << *Value;
EmitEOL();
}
@@ -502,7 +577,7 @@ void MCAsmStreamer::EmitGPRel32Value(const MCExpr *Value) {
void MCAsmStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue,
unsigned AddrSpace) {
if (NumBytes == 0) return;
-
+
if (AddrSpace == 0)
if (const char *ZeroDirective = MAI.getZeroDirective()) {
OS << ZeroDirective << NumBytes;
@@ -530,7 +605,7 @@ void MCAsmStreamer::EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
case 4: OS << ".p2alignl "; break;
case 8: llvm_unreachable("Unsupported alignment size!");
}
-
+
if (MAI.getAlignmentIsInBytes())
OS << ByteAlignment;
else
@@ -540,13 +615,13 @@ void MCAsmStreamer::EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
OS << ", 0x";
OS.write_hex(truncateToSize(Value, ValueSize));
- if (MaxBytesToEmit)
+ if (MaxBytesToEmit)
OS << ", " << MaxBytesToEmit;
}
EmitEOL();
return;
}
-
+
// Non-power of two alignment. This is not widely supported by assemblers.
// FIXME: Parameterize this based on MAI.
switch (ValueSize) {
@@ -559,7 +634,7 @@ void MCAsmStreamer::EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
OS << ' ' << ByteAlignment;
OS << ", " << truncateToSize(Value, ValueSize);
- if (MaxBytesToEmit)
+ if (MaxBytesToEmit)
OS << ", " << MaxBytesToEmit;
EmitEOL();
}
@@ -586,10 +661,118 @@ void MCAsmStreamer::EmitFileDirective(StringRef Filename) {
EmitEOL();
}
-void MCAsmStreamer::EmitDwarfFileDirective(unsigned FileNo, StringRef Filename){
- OS << "\t.file\t" << FileNo << ' ';
- PrintQuotedString(Filename, OS);
+bool MCAsmStreamer::EmitDwarfFileDirective(unsigned FileNo, StringRef Filename){
+ if (UseLoc) {
+ OS << "\t.file\t" << FileNo << ' ';
+ PrintQuotedString(Filename, OS);
+ EmitEOL();
+ }
+ return this->MCStreamer::EmitDwarfFileDirective(FileNo, Filename);
+}
+
+void MCAsmStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+ unsigned Column, unsigned Flags,
+ unsigned Isa,
+ unsigned Discriminator) {
+ this->MCStreamer::EmitDwarfLocDirective(FileNo, Line, Column, Flags,
+ Isa, Discriminator);
+ if (!UseLoc)
+ return;
+
+ OS << "\t.loc\t" << FileNo << " " << Line << " " << Column;
+ if (Flags & DWARF2_FLAG_BASIC_BLOCK)
+ OS << " basic_block";
+ if (Flags & DWARF2_FLAG_PROLOGUE_END)
+ OS << " prologue_end";
+ if (Flags & DWARF2_FLAG_EPILOGUE_BEGIN)
+ OS << " epilogue_begin";
+
+ unsigned OldFlags = getContext().getCurrentDwarfLoc().getFlags();
+ if ((Flags & DWARF2_FLAG_IS_STMT) != (OldFlags & DWARF2_FLAG_IS_STMT)) {
+ OS << " is_stmt ";
+
+ if (Flags & DWARF2_FLAG_IS_STMT)
+ OS << "1";
+ else
+ OS << "0";
+ }
+
+ if (Isa)
+ OS << "isa " << Isa;
+ if (Discriminator)
+ OS << "discriminator " << Discriminator;
+ EmitEOL();
+}
+
+bool MCAsmStreamer::EmitCFIStartProc() {
+ if (this->MCStreamer::EmitCFIStartProc())
+ return true;
+
+ OS << "\t.cfi_startproc";
EmitEOL();
+
+ return false;
+}
+
+bool MCAsmStreamer::EmitCFIEndProc() {
+ if (this->MCStreamer::EmitCFIEndProc())
+ return true;
+
+ OS << "\t.cfi_endproc";
+ EmitEOL();
+
+ return false;
+}
+
+bool MCAsmStreamer::EmitCFIDefCfaOffset(int64_t Offset) {
+ if (this->MCStreamer::EmitCFIDefCfaOffset(Offset))
+ return true;
+
+ OS << "\t.cfi_def_cfa_offset " << Offset;
+ EmitEOL();
+
+ return false;
+}
+
+bool MCAsmStreamer::EmitCFIDefCfaRegister(int64_t Register) {
+ if (this->MCStreamer::EmitCFIDefCfaRegister(Register))
+ return true;
+
+ OS << "\t.cfi_def_cfa_register " << Register;
+ EmitEOL();
+
+ return false;
+}
+
+bool MCAsmStreamer::EmitCFIOffset(int64_t Register, int64_t Offset) {
+ if (this->MCStreamer::EmitCFIOffset(Register, Offset))
+ return true;
+
+ OS << "\t.cfi_offset " << Register << ", " << Offset;
+ EmitEOL();
+
+ return false;
+}
+
+bool MCAsmStreamer::EmitCFIPersonality(const MCSymbol *Sym,
+ unsigned Encoding) {
+ if (this->MCStreamer::EmitCFIPersonality(Sym, Encoding))
+ return true;
+
+ OS << "\t.cfi_personality " << Encoding << ", " << *Sym;
+ EmitEOL();
+
+ return false;
+}
+
+bool MCAsmStreamer::EmitCFILsda(const MCSymbol *Sym, unsigned Encoding) {
+ if (this->MCStreamer::EmitCFILsda(Sym, Encoding))
+ return true;
+
+ OS << "\t.cfi_lsda " << Encoding << ", " << *Sym;
+ EmitEOL();
+
+ return false;
}
void MCAsmStreamer::AddEncodingComment(const MCInst &Inst) {
@@ -610,7 +793,7 @@ void MCAsmStreamer::AddEncodingComment(const MCInst &Inst) {
for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
MCFixup &F = Fixups[i];
- const MCFixupKindInfo &Info = Emitter->getFixupKindInfo(F.getKind());
+ const MCFixupKindInfo &Info = AsmBackend->getFixupKindInfo(F.getKind());
for (unsigned j = 0; j != Info.TargetSize; ++j) {
unsigned Index = F.getOffset() * 8 + Info.TargetOffset + j;
assert(Index < Code.size() * 8 && "Invalid offset in fixup!");
@@ -618,6 +801,8 @@ void MCAsmStreamer::AddEncodingComment(const MCInst &Inst) {
}
}
+ // FIXME: Node the fixup comments for Thumb2 are completely bogus since the
+ // high order halfword of a 32-bit Thumb2 instruction is emitted first.
OS << "encoding: [";
for (unsigned i = 0, e = Code.size(); i != e; ++i) {
if (i)
@@ -637,15 +822,26 @@ void MCAsmStreamer::AddEncodingComment(const MCInst &Inst) {
if (MapEntry == 0) {
OS << format("0x%02x", uint8_t(Code[i]));
} else {
- assert(Code[i] == 0 && "Encoder wrote into fixed up bit!");
- OS << char('A' + MapEntry - 1);
+ if (Code[i]) {
+ // FIXME: Some of the 8 bits require fix up.
+ OS << format("0x%02x", uint8_t(Code[i])) << '\''
+ << char('A' + MapEntry - 1) << '\'';
+ } else
+ OS << char('A' + MapEntry - 1);
}
} else {
// Otherwise, write out in binary.
OS << "0b";
for (unsigned j = 8; j--;) {
unsigned Bit = (Code[i] >> j) & 1;
- if (uint8_t MapEntry = FixupMap[i * 8 + j]) {
+
+ unsigned FixupBit;
+ if (getContext().getTargetAsmInfo().isLittleEndian())
+ FixupBit = i * 8 + j;
+ else
+ FixupBit = i * 8 + (7-j);
+
+ if (uint8_t MapEntry = FixupMap[FixupBit]) {
assert(Bit == 0 && "Encoder wrote into fixed up bit!");
OS << char('A' + MapEntry - 1);
} else
@@ -657,14 +853,17 @@ void MCAsmStreamer::AddEncodingComment(const MCInst &Inst) {
for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
MCFixup &F = Fixups[i];
- const MCFixupKindInfo &Info = Emitter->getFixupKindInfo(F.getKind());
+ const MCFixupKindInfo &Info = AsmBackend->getFixupKindInfo(F.getKind());
OS << " fixup " << char('A' + i) << " - " << "offset: " << F.getOffset()
<< ", value: " << *F.getValue() << ", kind: " << Info.Name << "\n";
}
}
void MCAsmStreamer::EmitInstruction(const MCInst &Inst) {
- assert(CurSection && "Cannot emit contents before setting section!");
+ assert(getCurrentSection() && "Cannot emit contents before setting section!");
+
+ if (!UseLoc)
+ MCLineEntry::Make(this, getCurrentSection());
// Show the encoding in a comment if we have a code emitter.
if (Emitter)
@@ -684,7 +883,7 @@ void MCAsmStreamer::EmitInstruction(const MCInst &Inst) {
EmitEOL();
}
-/// EmitRawText - If this file is backed by a assembly streamer, this dumps
+/// EmitRawText - If this file is backed by an assembly streamer, this dumps
/// the specified string in the output .s file. This capability is
/// indicated by the hasRawTextSupport() predicate.
void MCAsmStreamer::EmitRawText(StringRef String) {
@@ -695,13 +894,16 @@ void MCAsmStreamer::EmitRawText(StringRef String) {
}
void MCAsmStreamer::Finish() {
+ // Dump out the dwarf file & directory tables and line tables.
+ if (getContext().hasDwarfFiles() && !UseLoc)
+ MCDwarfFileTable::Emit(this);
}
MCStreamer *llvm::createAsmStreamer(MCContext &Context,
formatted_raw_ostream &OS,
- bool isLittleEndian,
- bool isVerboseAsm, MCInstPrinter *IP,
- MCCodeEmitter *CE, bool ShowInst) {
- return new MCAsmStreamer(Context, OS, isLittleEndian, isVerboseAsm,
- IP, CE, ShowInst);
+ bool isVerboseAsm, bool useLoc,
+ MCInstPrinter *IP, MCCodeEmitter *CE,
+ TargetAsmBackend *TAB, bool ShowInst) {
+ return new MCAsmStreamer(Context, OS, isVerboseAsm, useLoc,
+ IP, CE, TAB, ShowInst);
}
diff --git a/contrib/llvm/lib/MC/MCAssembler.cpp b/contrib/llvm/lib/MC/MCAssembler.cpp
index f0e1d7f..9992646 100644
--- a/contrib/llvm/lib/MC/MCAssembler.cpp
+++ b/contrib/llvm/lib/MC/MCAssembler.cpp
@@ -11,10 +11,13 @@
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCDwarf.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
@@ -36,7 +39,6 @@ STATISTIC(FragmentLayouts, "Number of fragment layouts");
STATISTIC(ObjectBytes, "Number of emitted object file bytes");
STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
-STATISTIC(SectionLayouts, "Number of section layouts");
}
}
@@ -48,131 +50,78 @@ STATISTIC(SectionLayouts, "Number of section layouts");
/* *** */
MCAsmLayout::MCAsmLayout(MCAssembler &Asm)
- : Assembler(Asm), LastValidFragment(0)
+ : Assembler(Asm), LastValidFragment()
{
// Compute the section layout order. Virtual sections must go last.
for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
- if (!Asm.getBackend().isVirtualSection(it->getSection()))
+ if (!it->getSection().isVirtualSection())
SectionOrder.push_back(&*it);
for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
- if (Asm.getBackend().isVirtualSection(it->getSection()))
+ if (it->getSection().isVirtualSection())
SectionOrder.push_back(&*it);
}
-bool MCAsmLayout::isSectionUpToDate(const MCSectionData *SD) const {
- // The first section is always up-to-date.
- unsigned Index = SD->getLayoutOrder();
- if (!Index)
- return true;
-
- // Otherwise, sections are always implicitly computed when the preceeding
- // fragment is layed out.
- const MCSectionData *Prev = getSectionOrder()[Index - 1];
- return isFragmentUpToDate(&(Prev->getFragmentList().back()));
-}
-
bool MCAsmLayout::isFragmentUpToDate(const MCFragment *F) const {
- return (LastValidFragment &&
- F->getLayoutOrder() <= LastValidFragment->getLayoutOrder());
+ const MCSectionData &SD = *F->getParent();
+ const MCFragment *LastValid = LastValidFragment.lookup(&SD);
+ if (!LastValid)
+ return false;
+ assert(LastValid->getParent() == F->getParent());
+ return F->getLayoutOrder() <= LastValid->getLayoutOrder();
}
-void MCAsmLayout::UpdateForSlide(MCFragment *F, int SlideAmount) {
+void MCAsmLayout::Invalidate(MCFragment *F) {
// If this fragment wasn't already up-to-date, we don't need to do anything.
if (!isFragmentUpToDate(F))
return;
- // Otherwise, reset the last valid fragment to the predecessor of the
- // invalidated fragment.
- LastValidFragment = F->getPrevNode();
- if (!LastValidFragment) {
- unsigned Index = F->getParent()->getLayoutOrder();
- if (Index != 0) {
- MCSectionData *Prev = getSectionOrder()[Index - 1];
- LastValidFragment = &(Prev->getFragmentList().back());
- }
- }
+ // Otherwise, reset the last valid fragment to this fragment.
+ const MCSectionData &SD = *F->getParent();
+ LastValidFragment[&SD] = F;
}
void MCAsmLayout::EnsureValid(const MCFragment *F) const {
+ MCSectionData &SD = *F->getParent();
+
+ MCFragment *Cur = LastValidFragment[&SD];
+ if (!Cur)
+ Cur = &*SD.begin();
+ else
+ Cur = Cur->getNextNode();
+
// Advance the layout position until the fragment is up-to-date.
while (!isFragmentUpToDate(F)) {
- // Advance to the next fragment.
- MCFragment *Cur = LastValidFragment;
- if (Cur)
- Cur = Cur->getNextNode();
- if (!Cur) {
- unsigned NextIndex = 0;
- if (LastValidFragment)
- NextIndex = LastValidFragment->getParent()->getLayoutOrder() + 1;
- Cur = SectionOrder[NextIndex]->begin();
- }
-
const_cast<MCAsmLayout*>(this)->LayoutFragment(Cur);
+ Cur = Cur->getNextNode();
}
}
-void MCAsmLayout::FragmentReplaced(MCFragment *Src, MCFragment *Dst) {
- if (LastValidFragment == Src)
- LastValidFragment = Dst;
-
- Dst->Offset = Src->Offset;
- Dst->EffectiveSize = Src->EffectiveSize;
-}
-
-uint64_t MCAsmLayout::getFragmentAddress(const MCFragment *F) const {
- assert(F->getParent() && "Missing section()!");
- return getSectionAddress(F->getParent()) + getFragmentOffset(F);
-}
-
-uint64_t MCAsmLayout::getFragmentEffectiveSize(const MCFragment *F) const {
- EnsureValid(F);
- assert(F->EffectiveSize != ~UINT64_C(0) && "Address not set!");
- return F->EffectiveSize;
-}
-
uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const {
EnsureValid(F);
assert(F->Offset != ~UINT64_C(0) && "Address not set!");
return F->Offset;
}
-uint64_t MCAsmLayout::getSymbolAddress(const MCSymbolData *SD) const {
- assert(SD->getFragment() && "Invalid getAddress() on undefined symbol!");
- return getFragmentAddress(SD->getFragment()) + SD->getOffset();
-}
-
-uint64_t MCAsmLayout::getSectionAddress(const MCSectionData *SD) const {
- EnsureValid(SD->begin());
- assert(SD->Address != ~UINT64_C(0) && "Address not set!");
- return SD->Address;
+uint64_t MCAsmLayout::getSymbolOffset(const MCSymbolData *SD) const {
+ assert(SD->getFragment() && "Invalid getOffset() on undefined symbol!");
+ return getFragmentOffset(SD->getFragment()) + SD->getOffset();
}
uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const {
// The size is the last fragment's end offset.
const MCFragment &F = SD->getFragmentList().back();
- return getFragmentOffset(&F) + getFragmentEffectiveSize(&F);
+ return getFragmentOffset(&F) + getAssembler().ComputeFragmentSize(*this, F);
}
uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const {
// Virtual sections have no file size.
- if (getAssembler().getBackend().isVirtualSection(SD->getSection()))
+ if (SD->getSection().isVirtualSection())
return 0;
// Otherwise, the file size is the same as the address space size.
return getSectionAddressSize(SD);
}
-uint64_t MCAsmLayout::getSectionSize(const MCSectionData *SD) const {
- // The logical size is the address space size minus any tail padding.
- uint64_t Size = getSectionAddressSize(SD);
- const MCAlignFragment *AF =
- dyn_cast<MCAlignFragment>(&(SD->getFragmentList().back()));
- if (AF && AF->hasOnlyAlignAddress())
- Size -= getFragmentEffectiveSize(AF);
-
- return Size;
-}
-
/* *** */
MCFragment::MCFragment() : Kind(FragmentType(~0)) {
@@ -182,8 +131,7 @@ MCFragment::~MCFragment() {
}
MCFragment::MCFragment(FragmentType _Kind, MCSectionData *_Parent)
- : Kind(_Kind), Parent(_Parent), Atom(0), Offset(~UINT64_C(0)),
- EffectiveSize(~UINT64_C(0))
+ : Kind(_Kind), Parent(_Parent), Atom(0), Offset(~UINT64_C(0))
{
if (Parent)
Parent->getFragmentList().push_back(this);
@@ -195,8 +143,8 @@ MCSectionData::MCSectionData() : Section(0) {}
MCSectionData::MCSectionData(const MCSection &_Section, MCAssembler *A)
: Section(&_Section),
+ Ordinal(~UINT32_C(0)),
Alignment(1),
- Address(~UINT64_C(0)),
HasInstructions(false)
{
if (A)
@@ -220,99 +168,17 @@ MCSymbolData::MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment,
/* *** */
-MCAssembler::MCAssembler(MCContext &_Context, TargetAsmBackend &_Backend,
- MCCodeEmitter &_Emitter, raw_ostream &_OS)
- : Context(_Context), Backend(_Backend), Emitter(_Emitter),
- OS(_OS), RelaxAll(false), SubsectionsViaSymbols(false)
+MCAssembler::MCAssembler(MCContext &Context_, TargetAsmBackend &Backend_,
+ MCCodeEmitter &Emitter_, MCObjectWriter &Writer_,
+ raw_ostream &OS_)
+ : Context(Context_), Backend(Backend_), Emitter(Emitter_), Writer(Writer_),
+ OS(OS_), RelaxAll(false), NoExecStack(false), SubsectionsViaSymbols(false)
{
}
MCAssembler::~MCAssembler() {
}
-static bool isScatteredFixupFullyResolvedSimple(const MCAssembler &Asm,
- const MCFixup &Fixup,
- const MCValue Target,
- const MCSection *BaseSection) {
- // The effective fixup address is
- // addr(atom(A)) + offset(A)
- // - addr(atom(B)) - offset(B)
- // - addr(<base symbol>) + <fixup offset from base symbol>
- // and the offsets are not relocatable, so the fixup is fully resolved when
- // addr(atom(A)) - addr(atom(B)) - addr(<base symbol>)) == 0.
- //
- // The simple (Darwin, except on x86_64) way of dealing with this was to
- // assume that any reference to a temporary symbol *must* be a temporary
- // symbol in the same atom, unless the sections differ. Therefore, any PCrel
- // relocation to a temporary symbol (in the same section) is fully
- // resolved. This also works in conjunction with absolutized .set, which
- // requires the compiler to use .set to absolutize the differences between
- // symbols which the compiler knows to be assembly time constants, so we don't
- // need to worry about considering symbol differences fully resolved.
-
- // Non-relative fixups are only resolved if constant.
- if (!BaseSection)
- return Target.isAbsolute();
-
- // Otherwise, relative fixups are only resolved if not a difference and the
- // target is a temporary in the same section.
- if (Target.isAbsolute() || Target.getSymB())
- return false;
-
- const MCSymbol *A = &Target.getSymA()->getSymbol();
- if (!A->isTemporary() || !A->isInSection() ||
- &A->getSection() != BaseSection)
- return false;
-
- return true;
-}
-
-static bool isScatteredFixupFullyResolved(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFixup &Fixup,
- const MCValue Target,
- const MCSymbolData *BaseSymbol) {
- // The effective fixup address is
- // addr(atom(A)) + offset(A)
- // - addr(atom(B)) - offset(B)
- // - addr(BaseSymbol) + <fixup offset from base symbol>
- // and the offsets are not relocatable, so the fixup is fully resolved when
- // addr(atom(A)) - addr(atom(B)) - addr(BaseSymbol) == 0.
- //
- // Note that "false" is almost always conservatively correct (it means we emit
- // a relocation which is unnecessary), except when it would force us to emit a
- // relocation which the target cannot encode.
-
- const MCSymbolData *A_Base = 0, *B_Base = 0;
- if (const MCSymbolRefExpr *A = Target.getSymA()) {
- // Modified symbol references cannot be resolved.
- if (A->getKind() != MCSymbolRefExpr::VK_None)
- return false;
-
- A_Base = Asm.getAtom(Layout, &Asm.getSymbolData(A->getSymbol()));
- if (!A_Base)
- return false;
- }
-
- if (const MCSymbolRefExpr *B = Target.getSymB()) {
- // Modified symbol references cannot be resolved.
- if (B->getKind() != MCSymbolRefExpr::VK_None)
- return false;
-
- B_Base = Asm.getAtom(Layout, &Asm.getSymbolData(B->getSymbol()));
- if (!B_Base)
- return false;
- }
-
- // If there is no base, A and B have to be the same atom for this fixup to be
- // fully resolved.
- if (!BaseSymbol)
- return A_Base == B_Base;
-
- // Otherwise, B must be missing and A must be the base.
- return !B_Base && BaseSymbol == A_Base;
-}
-
bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
// Non-temporary labels should always be visible to the linker.
if (!Symbol.isTemporary())
@@ -326,8 +192,7 @@ bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
return getBackend().doesSectionRequireSymbols(Symbol.getSection());
}
-const MCSymbolData *MCAssembler::getAtom(const MCAsmLayout &Layout,
- const MCSymbolData *SD) const {
+const MCSymbolData *MCAssembler::getAtom(const MCSymbolData *SD) const {
// Linker visible symbols define atoms.
if (isSymbolLinkerVisible(SD->getSymbol()))
return SD;
@@ -351,67 +216,78 @@ bool MCAssembler::EvaluateFixup(const MCAsmLayout &Layout,
MCValue &Target, uint64_t &Value) const {
++stats::EvaluateFixup;
- if (!Fixup.getValue()->EvaluateAsRelocatable(Target, &Layout))
+ if (!Fixup.getValue()->EvaluateAsRelocatable(Target, Layout))
report_fatal_error("expected relocatable expression");
- // FIXME: How do non-scattered symbols work in ELF? I presume the linker
- // doesn't support small relocations, but then under what criteria does the
- // assembler allow symbol differences?
+ bool IsPCRel = Backend.getFixupKindInfo(
+ Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel;
+
+ bool IsResolved;
+ if (IsPCRel) {
+ if (Target.getSymB()) {
+ IsResolved = false;
+ } else if (!Target.getSymA()) {
+ IsResolved = false;
+ } else {
+ const MCSymbolRefExpr *A = Target.getSymA();
+ const MCSymbol &SA = A->getSymbol();
+ if (A->getKind() != MCSymbolRefExpr::VK_None ||
+ SA.AliasedSymbol().isUndefined()) {
+ IsResolved = false;
+ } else {
+ const MCSymbolData &DataA = getSymbolData(SA);
+ IsResolved =
+ getWriter().IsSymbolRefDifferenceFullyResolvedImpl(*this, DataA,
+ *DF, false, true);
+ }
+ }
+ } else {
+ IsResolved = Target.isAbsolute();
+ }
Value = Target.getConstant();
- bool IsPCRel = Emitter.getFixupKindInfo(
- Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel;
- bool IsResolved = true;
+ bool IsThumb = false;
if (const MCSymbolRefExpr *A = Target.getSymA()) {
- if (A->getSymbol().isDefined())
- Value += Layout.getSymbolAddress(&getSymbolData(A->getSymbol()));
- else
- IsResolved = false;
+ const MCSymbol &Sym = A->getSymbol().AliasedSymbol();
+ if (Sym.isDefined())
+ Value += Layout.getSymbolOffset(&getSymbolData(Sym));
+ if (isThumbFunc(&Sym))
+ IsThumb = true;
}
if (const MCSymbolRefExpr *B = Target.getSymB()) {
- if (B->getSymbol().isDefined())
- Value -= Layout.getSymbolAddress(&getSymbolData(B->getSymbol()));
- else
- IsResolved = false;
+ const MCSymbol &Sym = B->getSymbol().AliasedSymbol();
+ if (Sym.isDefined())
+ Value -= Layout.getSymbolOffset(&getSymbolData(Sym));
}
- // If we are using scattered symbols, determine whether this value is actually
- // resolved; scattering may cause atoms to move.
- if (IsResolved && getBackend().hasScatteredSymbols()) {
- if (getBackend().hasReliableSymbolDifference()) {
- // If this is a PCrel relocation, find the base atom (identified by its
- // symbol) that the fixup value is relative to.
- const MCSymbolData *BaseSymbol = 0;
- if (IsPCRel) {
- BaseSymbol = DF->getAtom();
- if (!BaseSymbol)
- IsResolved = false;
- }
- if (IsResolved)
- IsResolved = isScatteredFixupFullyResolved(*this, Layout, Fixup, Target,
- BaseSymbol);
- } else {
- const MCSection *BaseSection = 0;
- if (IsPCRel)
- BaseSection = &DF->getParent()->getSection();
+ bool ShouldAlignPC = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits;
+ assert((ShouldAlignPC ? IsPCRel : true) &&
+ "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!");
- IsResolved = isScatteredFixupFullyResolvedSimple(*this, Fixup, Target,
- BaseSection);
- }
+ if (IsPCRel) {
+ uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset();
+
+ // A number of ARM fixups in Thumb mode require that the effective PC
+ // address be determined as the 32-bit aligned version of the actual offset.
+ if (ShouldAlignPC) Offset &= ~0x3;
+ Value -= Offset;
}
- if (IsPCRel)
- Value -= Layout.getFragmentAddress(DF) + Fixup.getOffset();
+ // ARM fixups based from a thumb function address need to have the low
+ // bit set. The actual value is always at least 16-bit aligned, so the
+ // low bit is normally clear and available for use as an ISA flag for
+ // interworking.
+ if (IsThumb)
+ Value |= 1;
return IsResolved;
}
-uint64_t MCAssembler::ComputeFragmentSize(MCAsmLayout &Layout,
- const MCFragment &F,
- uint64_t SectionAddress,
- uint64_t FragmentOffset) const {
+uint64_t MCAssembler::ComputeFragmentSize(const MCAsmLayout &Layout,
+ const MCFragment &F) const {
switch (F.getKind()) {
case MCFragment::FT_Data:
return cast<MCDataFragment>(F).getContents().size();
@@ -420,62 +296,48 @@ uint64_t MCAssembler::ComputeFragmentSize(MCAsmLayout &Layout,
case MCFragment::FT_Inst:
return cast<MCInstFragment>(F).getInstSize();
+ case MCFragment::FT_LEB:
+ return cast<MCLEBFragment>(F).getContents().size();
+
case MCFragment::FT_Align: {
const MCAlignFragment &AF = cast<MCAlignFragment>(F);
-
- assert((!AF.hasOnlyAlignAddress() || !AF.getNextNode()) &&
- "Invalid OnlyAlignAddress bit, not the last fragment!");
-
- uint64_t Size = OffsetToAlignment(SectionAddress + FragmentOffset,
- AF.getAlignment());
-
- // Honor MaxBytesToEmit.
+ unsigned Offset = Layout.getFragmentOffset(&AF);
+ unsigned Size = OffsetToAlignment(Offset, AF.getAlignment());
if (Size > AF.getMaxBytesToEmit())
return 0;
-
return Size;
}
case MCFragment::FT_Org: {
- const MCOrgFragment &OF = cast<MCOrgFragment>(F);
-
- // FIXME: We should compute this sooner, we don't want to recurse here, and
- // we would like to be more functional.
+ MCOrgFragment &OF = cast<MCOrgFragment>(F);
int64_t TargetLocation;
- if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, &Layout))
+ if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, Layout))
report_fatal_error("expected assembly-time absolute expression");
// FIXME: We need a way to communicate this error.
- int64_t Offset = TargetLocation - FragmentOffset;
- if (Offset < 0)
+ uint64_t FragmentOffset = Layout.getFragmentOffset(&OF);
+ int64_t Size = TargetLocation - FragmentOffset;
+ if (Size < 0 || Size >= 0x40000000)
report_fatal_error("invalid .org offset '" + Twine(TargetLocation) +
- "' (at offset '" + Twine(FragmentOffset) + "'");
-
- return Offset;
+ "' (at offset '" + Twine(FragmentOffset) + "')");
+ return Size;
}
+
+ case MCFragment::FT_Dwarf:
+ return cast<MCDwarfLineAddrFragment>(F).getContents().size();
+ case MCFragment::FT_DwarfFrame:
+ return cast<MCDwarfCallFrameFragment>(F).getContents().size();
}
assert(0 && "invalid fragment kind");
return 0;
}
-void MCAsmLayout::LayoutFile() {
- // Initialize the first section and set the valid fragment layout point. All
- // actual layout computations are done lazily.
- LastValidFragment = 0;
- if (!getSectionOrder().empty())
- getSectionOrder().front()->Address = 0;
-}
-
void MCAsmLayout::LayoutFragment(MCFragment *F) {
MCFragment *Prev = F->getPrevNode();
// We should never try to recompute something which is up-to-date.
assert(!isFragmentUpToDate(F) && "Attempt to recompute up-to-date fragment!");
- // We should never try to compute the fragment layout if the section isn't
- // up-to-date.
- assert(isSectionUpToDate(F->getParent()) &&
- "Attempt to compute fragment before it's section!");
// We should never try to compute the fragment layout if it's predecessor
// isn't up-to-date.
assert((!Prev || isFragmentUpToDate(Prev)) &&
@@ -483,55 +345,26 @@ void MCAsmLayout::LayoutFragment(MCFragment *F) {
++stats::FragmentLayouts;
- // Compute the fragment start address.
- uint64_t StartAddress = F->getParent()->Address;
- uint64_t Address = StartAddress;
- if (Prev)
- Address += Prev->Offset + Prev->EffectiveSize;
-
// Compute fragment offset and size.
- F->Offset = Address - StartAddress;
- F->EffectiveSize = getAssembler().ComputeFragmentSize(*this, *F, StartAddress,
- F->Offset);
- LastValidFragment = F;
-
- // If this is the last fragment in a section, update the next section address.
- if (!F->getNextNode()) {
- unsigned NextIndex = F->getParent()->getLayoutOrder() + 1;
- if (NextIndex != getSectionOrder().size())
- LayoutSection(getSectionOrder()[NextIndex]);
- }
-}
-
-void MCAsmLayout::LayoutSection(MCSectionData *SD) {
- unsigned SectionOrderIndex = SD->getLayoutOrder();
-
- ++stats::SectionLayouts;
-
- // Compute the section start address.
- uint64_t StartAddress = 0;
- if (SectionOrderIndex) {
- MCSectionData *Prev = getSectionOrder()[SectionOrderIndex - 1];
- StartAddress = getSectionAddress(Prev) + getSectionAddressSize(Prev);
- }
-
- // Honor the section alignment requirements.
- StartAddress = RoundUpToAlignment(StartAddress, SD->getAlignment());
+ uint64_t Offset = 0;
+ if (Prev)
+ Offset += Prev->Offset + getAssembler().ComputeFragmentSize(*this, *Prev);
- // Set the section address.
- SD->Address = StartAddress;
+ F->Offset = Offset;
+ LastValidFragment[F->getParent()] = F;
}
/// WriteFragmentData - Write the \arg F data to the output file.
static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
- const MCFragment &F, MCObjectWriter *OW) {
+ const MCFragment &F) {
+ MCObjectWriter *OW = &Asm.getWriter();
uint64_t Start = OW->getStream().tell();
(void) Start;
++stats::EmittedFragments;
// FIXME: Embed in fragments instead?
- uint64_t FragmentSize = Layout.getFragmentEffectiveSize(&F);
+ uint64_t FragmentSize = Asm.ComputeFragmentSize(Layout, F);
switch (F.getKind()) {
case MCFragment::FT_Align: {
MCAlignFragment &AF = cast<MCAlignFragment>(F);
@@ -598,9 +431,17 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
break;
}
- case MCFragment::FT_Inst:
- llvm_unreachable("unexpected inst fragment after lowering");
+ case MCFragment::FT_Inst: {
+ MCInstFragment &IF = cast<MCInstFragment>(F);
+ OW->WriteBytes(StringRef(IF.getCode().begin(), IF.getCode().size()));
+ break;
+ }
+
+ case MCFragment::FT_LEB: {
+ MCLEBFragment &LF = cast<MCLEBFragment>(F);
+ OW->WriteBytes(LF.getContents().str());
break;
+ }
case MCFragment::FT_Org: {
MCOrgFragment &OF = cast<MCOrgFragment>(F);
@@ -610,16 +451,26 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
break;
}
+
+ case MCFragment::FT_Dwarf: {
+ const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F);
+ OW->WriteBytes(OF.getContents().str());
+ break;
+ }
+ case MCFragment::FT_DwarfFrame: {
+ const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F);
+ OW->WriteBytes(CF.getContents().str());
+ break;
+ }
}
assert(OW->getStream().tell() - Start == FragmentSize);
}
void MCAssembler::WriteSectionData(const MCSectionData *SD,
- const MCAsmLayout &Layout,
- MCObjectWriter *OW) const {
+ const MCAsmLayout &Layout) const {
// Ignore virtual sections.
- if (getBackend().isVirtualSection(SD->getSection())) {
+ if (SD->getSection().isVirtualSection()) {
assert(Layout.getSectionFileSize(SD) == 0 && "Invalid size for section!");
// Check that contents are only things legal inside a virtual section.
@@ -657,51 +508,34 @@ void MCAssembler::WriteSectionData(const MCSectionData *SD,
return;
}
- uint64_t Start = OW->getStream().tell();
+ uint64_t Start = getWriter().getStream().tell();
(void) Start;
for (MCSectionData::const_iterator it = SD->begin(),
ie = SD->end(); it != ie; ++it)
- WriteFragmentData(*this, Layout, *it, OW);
+ WriteFragmentData(*this, Layout, *it);
- assert(OW->getStream().tell() - Start == Layout.getSectionFileSize(SD));
+ assert(getWriter().getStream().tell() - Start ==
+ Layout.getSectionAddressSize(SD));
}
-void MCAssembler::AddSectionToTheEnd(MCSectionData &SD, MCAsmLayout &Layout) {
- // Create dummy fragments and assign section ordinals.
- unsigned SectionIndex = 0;
- for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it)
- SectionIndex++;
-
- SD.setOrdinal(SectionIndex);
-
- // Assign layout order indices to sections and fragments.
- unsigned FragmentIndex = 0;
- unsigned i = 0;
- for (unsigned e = Layout.getSectionOrder().size(); i != e; ++i) {
- MCSectionData *SD = Layout.getSectionOrder()[i];
- for (MCSectionData::iterator it2 = SD->begin(),
- ie2 = SD->end(); it2 != ie2; ++it2)
- FragmentIndex++;
- }
+uint64_t MCAssembler::HandleFixup(const MCAsmLayout &Layout,
+ MCFragment &F,
+ const MCFixup &Fixup) {
+ // Evaluate the fixup.
+ MCValue Target;
+ uint64_t FixedValue;
+ if (!EvaluateFixup(Layout, Fixup, &F, Target, FixedValue)) {
+ // The fixup was unresolved, we need a relocation. Inform the object
+ // writer of the relocation, and give it an opportunity to adjust the
+ // fixup value if need be.
+ getWriter().RecordRelocation(*this, Layout, &F, Fixup, Target, FixedValue);
+ }
+ return FixedValue;
+ }
- SD.setLayoutOrder(i);
- for (MCSectionData::iterator it2 = SD.begin(),
- ie2 = SD.end(); it2 != ie2; ++it2) {
- it2->setLayoutOrder(FragmentIndex++);
- }
- Layout.getSectionOrder().push_back(&SD);
-
- Layout.LayoutSection(&SD);
-
- // Layout until everything fits.
- while (LayoutOnce(Layout))
- continue;
-
-}
-
-void MCAssembler::Finish(MCObjectWriter *Writer) {
+void MCAssembler::Finish() {
DEBUG_WITH_TYPE("mc-dump", {
llvm::errs() << "assembler backend - pre-layout\n--\n";
dump(); });
@@ -709,47 +543,23 @@ void MCAssembler::Finish(MCObjectWriter *Writer) {
// Create the layout object.
MCAsmLayout Layout(*this);
- // Insert additional align fragments for concrete sections to explicitly pad
- // the previous section to match their alignment requirements. This is for
- // 'gas' compatibility, it shouldn't strictly be necessary.
- //
- // FIXME: This may be Mach-O specific.
- for (unsigned i = 1, e = Layout.getSectionOrder().size(); i < e; ++i) {
- MCSectionData *SD = Layout.getSectionOrder()[i];
-
- // Ignore sections without alignment requirements.
- unsigned Align = SD->getAlignment();
- if (Align <= 1)
- continue;
-
- // Ignore virtual sections, they don't cause file size modifications.
- if (getBackend().isVirtualSection(SD->getSection()))
- continue;
-
- // Otherwise, create a new align fragment at the end of the previous
- // section.
- MCAlignFragment *AF = new MCAlignFragment(Align, 0, 1, Align,
- Layout.getSectionOrder()[i - 1]);
- AF->setOnlyAlignAddress(true);
- }
-
// Create dummy fragments and assign section ordinals.
unsigned SectionIndex = 0;
for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
// Create dummy fragments to eliminate any empty sections, this simplifies
// layout.
if (it->getFragmentList().empty())
- new MCFillFragment(0, 1, 0, it);
+ new MCDataFragment(it);
it->setOrdinal(SectionIndex++);
}
// Assign layout order indices to sections and fragments.
- unsigned FragmentIndex = 0;
for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) {
MCSectionData *SD = Layout.getSectionOrder()[i];
SD->setLayoutOrder(i);
+ unsigned FragmentIndex = 0;
for (MCSectionData::iterator it2 = SD->begin(),
ie2 = SD->end(); it2 != ie2; ++it2)
it2->setLayoutOrder(FragmentIndex++);
@@ -772,48 +582,39 @@ void MCAssembler::Finish(MCObjectWriter *Writer) {
uint64_t StartOffset = OS.tell();
- llvm::OwningPtr<MCObjectWriter> OwnWriter(0);
- if (Writer == 0) {
- //no custom Writer_ : create the default one life-managed by OwningPtr
- OwnWriter.reset(getBackend().createObjectWriter(OS));
- Writer = OwnWriter.get();
- if (!Writer)
- report_fatal_error("unable to create object writer!");
- }
-
// Allow the object writer a chance to perform post-layout binding (for
// example, to set the index fields in the symbol data).
- Writer->ExecutePostLayoutBinding(*this);
+ getWriter().ExecutePostLayoutBinding(*this, Layout);
// Evaluate and apply the fixups, generating relocation entries as necessary.
for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
for (MCSectionData::iterator it2 = it->begin(),
ie2 = it->end(); it2 != ie2; ++it2) {
MCDataFragment *DF = dyn_cast<MCDataFragment>(it2);
- if (!DF)
- continue;
-
- for (MCDataFragment::fixup_iterator it3 = DF->fixup_begin(),
- ie3 = DF->fixup_end(); it3 != ie3; ++it3) {
- MCFixup &Fixup = *it3;
-
- // Evaluate the fixup.
- MCValue Target;
- uint64_t FixedValue;
- if (!EvaluateFixup(Layout, Fixup, DF, Target, FixedValue)) {
- // The fixup was unresolved, we need a relocation. Inform the object
- // writer of the relocation, and give it an opportunity to adjust the
- // fixup value if need be.
- Writer->RecordRelocation(*this, Layout, DF, Fixup, Target,FixedValue);
+ if (DF) {
+ for (MCDataFragment::fixup_iterator it3 = DF->fixup_begin(),
+ ie3 = DF->fixup_end(); it3 != ie3; ++it3) {
+ MCFixup &Fixup = *it3;
+ uint64_t FixedValue = HandleFixup(Layout, *DF, Fixup);
+ getBackend().ApplyFixup(Fixup, DF->getContents().data(),
+ DF->getContents().size(), FixedValue);
+ }
+ }
+ MCInstFragment *IF = dyn_cast<MCInstFragment>(it2);
+ if (IF) {
+ for (MCInstFragment::fixup_iterator it3 = IF->fixup_begin(),
+ ie3 = IF->fixup_end(); it3 != ie3; ++it3) {
+ MCFixup &Fixup = *it3;
+ uint64_t FixedValue = HandleFixup(Layout, *IF, Fixup);
+ getBackend().ApplyFixup(Fixup, IF->getCode().data(),
+ IF->getCode().size(), FixedValue);
}
-
- getBackend().ApplyFixup(Fixup, *DF, FixedValue);
}
}
}
// Write the object file.
- Writer->WriteObject(*this, Layout);
+ getWriter().WriteObject(*this, Layout);
stats::ObjectBytes += OS.tell() - StartOffset;
}
@@ -852,100 +653,144 @@ bool MCAssembler::FragmentNeedsRelaxation(const MCInstFragment *IF,
return false;
}
-bool MCAssembler::LayoutOnce(MCAsmLayout &Layout) {
- ++stats::RelaxationSteps;
+bool MCAssembler::RelaxInstruction(MCAsmLayout &Layout,
+ MCInstFragment &IF) {
+ if (!FragmentNeedsRelaxation(&IF, Layout))
+ return false;
- // Layout the sections in order.
- Layout.LayoutFile();
+ ++stats::RelaxedInstructions;
+ // FIXME-PERF: We could immediately lower out instructions if we can tell
+ // they are fully resolved, to avoid retesting on later passes.
+
+ // Relax the fragment.
+
+ MCInst Relaxed;
+ getBackend().RelaxInstruction(IF.getInst(), Relaxed);
+
+ // Encode the new instruction.
+ //
+ // FIXME-PERF: If it matters, we could let the target do this. It can
+ // probably do so more efficiently in many cases.
+ SmallVector<MCFixup, 4> Fixups;
+ SmallString<256> Code;
+ raw_svector_ostream VecOS(Code);
+ getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups);
+ VecOS.flush();
+
+ // Update the instruction fragment.
+ IF.setInst(Relaxed);
+ IF.getCode() = Code;
+ IF.getFixups().clear();
+ // FIXME: Eliminate copy.
+ for (unsigned i = 0, e = Fixups.size(); i != e; ++i)
+ IF.getFixups().push_back(Fixups[i]);
+
+ return true;
+}
+
+bool MCAssembler::RelaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
+ int64_t Value = 0;
+ uint64_t OldSize = LF.getContents().size();
+ LF.getValue().EvaluateAsAbsolute(Value, Layout);
+ SmallString<8> &Data = LF.getContents();
+ Data.clear();
+ raw_svector_ostream OSE(Data);
+ if (LF.isSigned())
+ MCObjectWriter::EncodeSLEB128(Value, OSE);
+ else
+ MCObjectWriter::EncodeULEB128(Value, OSE);
+ OSE.flush();
+ return OldSize != LF.getContents().size();
+}
+
+bool MCAssembler::RelaxDwarfLineAddr(MCAsmLayout &Layout,
+ MCDwarfLineAddrFragment &DF) {
+ int64_t AddrDelta = 0;
+ uint64_t OldSize = DF.getContents().size();
+ bool IsAbs = DF.getAddrDelta().EvaluateAsAbsolute(AddrDelta, Layout);
+ (void)IsAbs;
+ assert(IsAbs);
+ int64_t LineDelta;
+ LineDelta = DF.getLineDelta();
+ SmallString<8> &Data = DF.getContents();
+ Data.clear();
+ raw_svector_ostream OSE(Data);
+ MCDwarfLineAddr::Encode(LineDelta, AddrDelta, OSE);
+ OSE.flush();
+ return OldSize != Data.size();
+}
+
+bool MCAssembler::RelaxDwarfCallFrameFragment(MCAsmLayout &Layout,
+ MCDwarfCallFrameFragment &DF) {
+ int64_t AddrDelta = 0;
+ uint64_t OldSize = DF.getContents().size();
+ bool IsAbs = DF.getAddrDelta().EvaluateAsAbsolute(AddrDelta, Layout);
+ (void)IsAbs;
+ assert(IsAbs);
+ SmallString<8> &Data = DF.getContents();
+ Data.clear();
+ raw_svector_ostream OSE(Data);
+ MCDwarfFrameEmitter::EncodeAdvanceLoc(AddrDelta, OSE);
+ OSE.flush();
+ return OldSize != Data.size();
+}
+
+bool MCAssembler::LayoutSectionOnce(MCAsmLayout &Layout,
+ MCSectionData &SD) {
+ MCFragment *FirstInvalidFragment = NULL;
// Scan for fragments that need relaxation.
+ for (MCSectionData::iterator it2 = SD.begin(),
+ ie2 = SD.end(); it2 != ie2; ++it2) {
+ // Check if this is an fragment that needs relaxation.
+ bool relaxedFrag = false;
+ switch(it2->getKind()) {
+ default:
+ break;
+ case MCFragment::FT_Inst:
+ relaxedFrag = RelaxInstruction(Layout, *cast<MCInstFragment>(it2));
+ break;
+ case MCFragment::FT_Dwarf:
+ relaxedFrag = RelaxDwarfLineAddr(Layout,
+ *cast<MCDwarfLineAddrFragment>(it2));
+ break;
+ case MCFragment::FT_DwarfFrame:
+ relaxedFrag =
+ RelaxDwarfCallFrameFragment(Layout,
+ *cast<MCDwarfCallFrameFragment>(it2));
+ break;
+ case MCFragment::FT_LEB:
+ relaxedFrag = RelaxLEB(Layout, *cast<MCLEBFragment>(it2));
+ break;
+ }
+ // Update the layout, and remember that we relaxed.
+ if (relaxedFrag && !FirstInvalidFragment)
+ FirstInvalidFragment = it2;
+ }
+ if (FirstInvalidFragment) {
+ Layout.Invalidate(FirstInvalidFragment);
+ return true;
+ }
+ return false;
+}
+
+bool MCAssembler::LayoutOnce(MCAsmLayout &Layout) {
+ ++stats::RelaxationSteps;
+
bool WasRelaxed = false;
for (iterator it = begin(), ie = end(); it != ie; ++it) {
MCSectionData &SD = *it;
-
- for (MCSectionData::iterator it2 = SD.begin(),
- ie2 = SD.end(); it2 != ie2; ++it2) {
- // Check if this is an instruction fragment that needs relaxation.
- MCInstFragment *IF = dyn_cast<MCInstFragment>(it2);
- if (!IF || !FragmentNeedsRelaxation(IF, Layout))
- continue;
-
- ++stats::RelaxedInstructions;
-
- // FIXME-PERF: We could immediately lower out instructions if we can tell
- // they are fully resolved, to avoid retesting on later passes.
-
- // Relax the fragment.
-
- MCInst Relaxed;
- getBackend().RelaxInstruction(IF->getInst(), Relaxed);
-
- // Encode the new instruction.
- //
- // FIXME-PERF: If it matters, we could let the target do this. It can
- // probably do so more efficiently in many cases.
- SmallVector<MCFixup, 4> Fixups;
- SmallString<256> Code;
- raw_svector_ostream VecOS(Code);
- getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups);
- VecOS.flush();
-
- // Update the instruction fragment.
- int SlideAmount = Code.size() - IF->getInstSize();
- IF->setInst(Relaxed);
- IF->getCode() = Code;
- IF->getFixups().clear();
- // FIXME: Eliminate copy.
- for (unsigned i = 0, e = Fixups.size(); i != e; ++i)
- IF->getFixups().push_back(Fixups[i]);
-
- // Update the layout, and remember that we relaxed.
- Layout.UpdateForSlide(IF, SlideAmount);
+ while(LayoutSectionOnce(Layout, SD))
WasRelaxed = true;
- }
}
return WasRelaxed;
}
void MCAssembler::FinishLayout(MCAsmLayout &Layout) {
- // Lower out any instruction fragments, to simplify the fixup application and
- // output.
- //
- // FIXME-PERF: We don't have to do this, but the assumption is that it is
- // cheap (we will mostly end up eliminating fragments and appending on to data
- // fragments), so the extra complexity downstream isn't worth it. Evaluate
- // this assumption.
- for (iterator it = begin(), ie = end(); it != ie; ++it) {
- MCSectionData &SD = *it;
-
- for (MCSectionData::iterator it2 = SD.begin(),
- ie2 = SD.end(); it2 != ie2; ++it2) {
- MCInstFragment *IF = dyn_cast<MCInstFragment>(it2);
- if (!IF)
- continue;
-
- // Create a new data fragment for the instruction.
- //
- // FIXME-PERF: Reuse previous data fragment if possible.
- MCDataFragment *DF = new MCDataFragment();
- SD.getFragmentList().insert(it2, DF);
-
- // Update the data fragments layout data.
- DF->setParent(IF->getParent());
- DF->setAtom(IF->getAtom());
- DF->setLayoutOrder(IF->getLayoutOrder());
- Layout.FragmentReplaced(IF, DF);
-
- // Copy in the data and the fixups.
- DF->getContents().append(IF->getCode().begin(), IF->getCode().end());
- for (unsigned i = 0, e = IF->getFixups().size(); i != e; ++i)
- DF->getFixups().push_back(IF->getFixups()[i]);
-
- // Delete the instruction fragment and update the iterator.
- SD.getFragmentList().erase(IF);
- it2 = DF;
- }
+ // The layout is done. Mark every fragment as valid.
+ for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
+ Layout.getFragmentOffset(&*Layout.getSectionOrder()[i]->rbegin());
}
}
@@ -972,18 +817,19 @@ void MCFragment::dump() {
case MCFragment::FT_Fill: OS << "MCFillFragment"; break;
case MCFragment::FT_Inst: OS << "MCInstFragment"; break;
case MCFragment::FT_Org: OS << "MCOrgFragment"; break;
+ case MCFragment::FT_Dwarf: OS << "MCDwarfFragment"; break;
+ case MCFragment::FT_DwarfFrame: OS << "MCDwarfCallFrameFragment"; break;
+ case MCFragment::FT_LEB: OS << "MCLEBFragment"; break;
}
OS << "<MCFragment " << (void*) this << " LayoutOrder:" << LayoutOrder
- << " Offset:" << Offset << " EffectiveSize:" << EffectiveSize << ">";
+ << " Offset:" << Offset << ">";
switch (getKind()) {
case MCFragment::FT_Align: {
const MCAlignFragment *AF = cast<MCAlignFragment>(this);
if (AF->hasEmitNops())
OS << " (emit nops)";
- if (AF->hasOnlyAlignAddress())
- OS << " (only align section)";
OS << "\n ";
OS << " Alignment:" << AF->getAlignment()
<< " Value:" << AF->getValue() << " ValueSize:" << AF->getValueSize()
@@ -1032,6 +878,25 @@ void MCFragment::dump() {
OS << " Offset:" << OF->getOffset() << " Value:" << OF->getValue();
break;
}
+ case MCFragment::FT_Dwarf: {
+ const MCDwarfLineAddrFragment *OF = cast<MCDwarfLineAddrFragment>(this);
+ OS << "\n ";
+ OS << " AddrDelta:" << OF->getAddrDelta()
+ << " LineDelta:" << OF->getLineDelta();
+ break;
+ }
+ case MCFragment::FT_DwarfFrame: {
+ const MCDwarfCallFrameFragment *CF = cast<MCDwarfCallFrameFragment>(this);
+ OS << "\n ";
+ OS << " AddrDelta:" << CF->getAddrDelta();
+ break;
+ }
+ case MCFragment::FT_LEB: {
+ const MCLEBFragment *LF = cast<MCLEBFragment>(this);
+ OS << "\n ";
+ OS << " Value:" << LF->getValue() << " Signed:" << LF->isSigned();
+ break;
+ }
}
OS << ">";
}
@@ -1040,8 +905,7 @@ void MCSectionData::dump() {
raw_ostream &OS = llvm::errs();
OS << "<MCSectionData";
- OS << " Alignment:" << getAlignment() << " Address:" << Address
- << " Fragments:[\n ";
+ OS << " Alignment:" << getAlignment() << " Fragments:[\n ";
for (iterator it = begin(), ie = end(); it != ie; ++it) {
if (it != begin()) OS << ",\n ";
it->dump();
diff --git a/contrib/llvm/lib/MC/MCCodeEmitter.cpp b/contrib/llvm/lib/MC/MCCodeEmitter.cpp
index d513237..c122763 100644
--- a/contrib/llvm/lib/MC/MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/MC/MCCodeEmitter.cpp
@@ -16,15 +16,3 @@ MCCodeEmitter::MCCodeEmitter() {
MCCodeEmitter::~MCCodeEmitter() {
}
-
-const MCFixupKindInfo &MCCodeEmitter::getFixupKindInfo(MCFixupKind Kind) const {
- static const MCFixupKindInfo Builtins[] = {
- { "FK_Data_1", 0, 8, 0 },
- { "FK_Data_2", 0, 16, 0 },
- { "FK_Data_4", 0, 32, 0 },
- { "FK_Data_8", 0, 64, 0 }
- };
-
- assert(Kind <= 3 && "Unknown fixup kind");
- return Builtins[Kind];
-}
diff --git a/contrib/llvm/lib/MC/MCContext.cpp b/contrib/llvm/lib/MC/MCContext.cpp
index e5586a0..018f00c 100644
--- a/contrib/llvm/lib/MC/MCContext.cpp
+++ b/contrib/llvm/lib/MC/MCContext.cpp
@@ -15,8 +15,10 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCLabel.h"
#include "llvm/MC/MCDwarf.h"
+#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ELF.h"
using namespace llvm;
typedef StringMap<const MCSectionMachO*> MachOUniqueMapTy;
@@ -24,8 +26,9 @@ typedef StringMap<const MCSectionELF*> ELFUniqueMapTy;
typedef StringMap<const MCSectionCOFF*> COFFUniqueMapTy;
-MCContext::MCContext(const MCAsmInfo &mai) : MAI(mai), NextUniqueID(0),
- CurrentDwarfLoc(0,0,0,0,0) {
+MCContext::MCContext(const MCAsmInfo &mai, const TargetAsmInfo *tai) :
+ MAI(mai), TAI(tai), NextUniqueID(0),
+ CurrentDwarfLoc(0,0,0,DWARF2_FLAG_IS_STMT,0,0) {
MachOUniquingMap = 0;
ELFUniquingMap = 0;
COFFUniquingMap = 0;
@@ -40,7 +43,7 @@ MCContext::MCContext(const MCAsmInfo &mai) : MAI(mai), NextUniqueID(0),
MCContext::~MCContext() {
// NOTE: The symbols are all allocated out of a bump pointer allocator,
// we don't need to free them here.
-
+
// If we have the MachO uniquing map, free it.
delete (MachOUniqueMapTy*)MachOUniquingMap;
delete (ELFUniqueMapTy*)ELFUniquingMap;
@@ -48,6 +51,8 @@ MCContext::~MCContext() {
// If the stream for the .secure_log_unique directive was created free it.
delete (raw_ostream*)SecureLog;
+
+ delete TAI;
}
//===----------------------------------------------------------------------===//
@@ -56,20 +61,42 @@ MCContext::~MCContext() {
MCSymbol *MCContext::GetOrCreateSymbol(StringRef Name) {
assert(!Name.empty() && "Normal symbols cannot be unnamed!");
-
- // Determine whether this is an assembler temporary or normal label.
- bool isTemporary = Name.startswith(MAI.getPrivateGlobalPrefix());
-
+
// Do the lookup and get the entire StringMapEntry. We want access to the
// key if we are creating the entry.
StringMapEntry<MCSymbol*> &Entry = Symbols.GetOrCreateValue(Name);
- if (Entry.getValue()) return Entry.getValue();
+ MCSymbol *Sym = Entry.getValue();
+
+ if (Sym)
+ return Sym;
+
+ Sym = CreateSymbol(Name);
+ Entry.setValue(Sym);
+ return Sym;
+}
+
+MCSymbol *MCContext::CreateSymbol(StringRef Name) {
+ // Determine whether this is an assembler temporary or normal label.
+ bool isTemporary = Name.startswith(MAI.getPrivateGlobalPrefix());
+
+ StringMapEntry<bool> *NameEntry = &UsedNames.GetOrCreateValue(Name);
+ if (NameEntry->getValue()) {
+ assert(isTemporary && "Cannot rename non temporary symbols");
+ SmallString<128> NewName;
+ do {
+ Twine T = Name + Twine(NextUniqueID++);
+ T.toVector(NewName);
+ StringRef foo = NewName;
+ NameEntry = &UsedNames.GetOrCreateValue(foo);
+ } while (NameEntry->getValue());
+ }
+ NameEntry->setValue(true);
// Ok, the entry doesn't already exist. Have the MCSymbol object itself refer
- // to the copy of the string that is embedded in the StringMapEntry.
- MCSymbol *Result = new (*this) MCSymbol(Entry.getKey(), isTemporary);
- Entry.setValue(Result);
- return Result;
+ // to the copy of the string that is embedded in the UsedNames entry.
+ MCSymbol *Result = new (*this) MCSymbol(NameEntry->getKey(), isTemporary);
+
+ return Result;
}
MCSymbol *MCContext::GetOrCreateSymbol(const Twine &Name) {
@@ -79,8 +106,11 @@ MCSymbol *MCContext::GetOrCreateSymbol(const Twine &Name) {
}
MCSymbol *MCContext::CreateTempSymbol() {
- return GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix()) +
- "tmp" + Twine(NextUniqueID++));
+ SmallString<128> NameSV;
+ Twine Name = Twine(MAI.getPrivateGlobalPrefix()) + "tmp" +
+ Twine(NextUniqueID++);
+ Name.toVector(NameSV);
+ return CreateSymbol(NameSV);
}
unsigned MCContext::NextInstance(int64_t LocalLabelVal) {
@@ -123,49 +153,70 @@ const MCSectionMachO *MCContext::
getMachOSection(StringRef Segment, StringRef Section,
unsigned TypeAndAttributes,
unsigned Reserved2, SectionKind Kind) {
-
+
// We unique sections by their segment/section pair. The returned section
// may not have the same flags as the requested section, if so this should be
// diagnosed by the client as an error.
-
+
// Create the map if it doesn't already exist.
if (MachOUniquingMap == 0)
MachOUniquingMap = new MachOUniqueMapTy();
MachOUniqueMapTy &Map = *(MachOUniqueMapTy*)MachOUniquingMap;
-
+
// Form the name to look up.
SmallString<64> Name;
Name += Segment;
Name.push_back(',');
Name += Section;
-
+
// Do the lookup, if we have a hit, return it.
const MCSectionMachO *&Entry = Map[Name.str()];
if (Entry) return Entry;
-
+
// Otherwise, return a new section.
return Entry = new (*this) MCSectionMachO(Segment, Section, TypeAndAttributes,
Reserved2, Kind);
}
+const MCSectionELF *MCContext::
+getELFSection(StringRef Section, unsigned Type, unsigned Flags,
+ SectionKind Kind) {
+ return getELFSection(Section, Type, Flags, Kind, 0, "");
+}
-const MCSection *MCContext::
+const MCSectionELF *MCContext::
getELFSection(StringRef Section, unsigned Type, unsigned Flags,
- SectionKind Kind, bool IsExplicit, unsigned EntrySize) {
+ SectionKind Kind, unsigned EntrySize, StringRef Group) {
if (ELFUniquingMap == 0)
ELFUniquingMap = new ELFUniqueMapTy();
ELFUniqueMapTy &Map = *(ELFUniqueMapTy*)ELFUniquingMap;
-
+
// Do the lookup, if we have a hit, return it.
StringMapEntry<const MCSectionELF*> &Entry = Map.GetOrCreateValue(Section);
if (Entry.getValue()) return Entry.getValue();
-
+
+ // Possibly refine the entry size first.
+ if (!EntrySize) {
+ EntrySize = MCSectionELF::DetermineEntrySize(Kind);
+ }
+
+ MCSymbol *GroupSym = NULL;
+ if (!Group.empty())
+ GroupSym = GetOrCreateSymbol(Group);
+
MCSectionELF *Result = new (*this) MCSectionELF(Entry.getKey(), Type, Flags,
- Kind, IsExplicit, EntrySize);
+ Kind, EntrySize, GroupSym);
Entry.setValue(Result);
return Result;
}
+const MCSectionELF *MCContext::CreateELFGroupSection() {
+ MCSectionELF *Result =
+ new (*this) MCSectionELF(".group", ELF::SHT_GROUP, 0,
+ SectionKind::getReadOnly(), 4, NULL);
+ return Result;
+}
+
const MCSection *MCContext::getCOFFSection(StringRef Section,
unsigned Characteristics,
int Selection,
@@ -173,15 +224,15 @@ const MCSection *MCContext::getCOFFSection(StringRef Section,
if (COFFUniquingMap == 0)
COFFUniquingMap = new COFFUniqueMapTy();
COFFUniqueMapTy &Map = *(COFFUniqueMapTy*)COFFUniquingMap;
-
+
// Do the lookup, if we have a hit, return it.
StringMapEntry<const MCSectionCOFF*> &Entry = Map.GetOrCreateValue(Section);
if (Entry.getValue()) return Entry.getValue();
-
+
MCSectionCOFF *Result = new (*this) MCSectionCOFF(Entry.getKey(),
Characteristics,
Selection, Kind);
-
+
Entry.setValue(Result);
return Result;
}
@@ -240,7 +291,7 @@ unsigned MCContext::GetDwarfFile(StringRef FileName, unsigned FileNumber) {
// stored at MCDwarfFiles[FileNumber].Name .
DirIndex++;
}
-
+
// Now make the MCDwarfFile entry and place it in the slot in the MCDwarfFiles
// vector.
char *Buf = static_cast<char *>(Allocate(Name.size()));
@@ -251,15 +302,11 @@ unsigned MCContext::GetDwarfFile(StringRef FileName, unsigned FileNumber) {
return FileNumber;
}
-/// ValidateDwarfFileNumber - takes a dwarf file number and returns true if it
+/// isValidDwarfFileNumber - takes a dwarf file number and returns true if it
/// currently is assigned and false otherwise.
-bool MCContext::ValidateDwarfFileNumber(unsigned FileNumber) {
+bool MCContext::isValidDwarfFileNumber(unsigned FileNumber) {
if(FileNumber == 0 || FileNumber >= MCDwarfFiles.size())
return false;
- MCDwarfFile *&ExistingFile = MCDwarfFiles[FileNumber];
- if (ExistingFile)
- return true;
- else
- return false;
+ return MCDwarfFiles[FileNumber] != 0;
}
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
index 697b3d9..2fd14db 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
@@ -354,7 +354,7 @@ int EDDisassembler::parseInst(SmallVectorImpl<MCParsedAsmOperand*> &operands,
SourceMgr sourceMgr;
sourceMgr.AddNewSourceBuffer(buf, SMLoc()); // ownership of buf handed over
- MCContext context(*AsmInfo);
+ MCContext context(*AsmInfo, NULL);
OwningPtr<MCStreamer> streamer(createNullStreamer(context));
OwningPtr<MCAsmParser> genericParser(createMCAsmParser(*Tgt, sourceMgr,
context, *streamer,
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h
index e2f850b..71e45f0 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h
@@ -21,7 +21,7 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include <map>
#include <set>
@@ -89,8 +89,10 @@ struct EDDisassembler {
bool operator<(const CPUKey &key) const {
if(Arch > key.Arch)
return false;
- if(Syntax >= key.Syntax)
- return false;
+ else if (Arch == key.Arch) {
+ if(Syntax > key.Syntax)
+ return false;
+ }
return true;
}
};
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDInst.cpp b/contrib/llvm/lib/MC/MCDisassembler/EDInst.cpp
index e22408f..63b049f 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDInst.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDInst.cpp
@@ -62,6 +62,8 @@ int EDInst::stringify() {
if (Disassembler.printInst(String, *Inst))
return StringifyResult.setResult(-1);
+
+ String.push_back('\n');
return StringifyResult.setResult(0);
}
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDInst.h b/contrib/llvm/lib/MC/MCDisassembler/EDInst.h
index 39d264f..ceb9505 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDInst.h
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDInst.h
@@ -16,7 +16,7 @@
#ifndef LLVM_EDINST_H
#define LLVM_EDINST_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/SmallVector.h"
#include <string>
#include <vector>
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp b/contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp
index 2aed123..cfeb56f 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp
@@ -260,23 +260,20 @@ int EDOperand::isMemory() {
}
#ifdef __BLOCKS__
-struct RegisterReaderWrapper {
- EDOperand::EDRegisterBlock_t regBlock;
-};
+namespace {
+ struct RegisterReaderWrapper {
+ EDOperand::EDRegisterBlock_t regBlock;
+ };
+}
-int readerWrapperCallback(uint64_t *value,
- unsigned regID,
- void *arg) {
- struct RegisterReaderWrapper *wrapper = (struct RegisterReaderWrapper *)arg;
+static int readerWrapperCallback(uint64_t *value, unsigned regID, void *arg) {
+ RegisterReaderWrapper *wrapper = (RegisterReaderWrapper *)arg;
return wrapper->regBlock(value, regID);
}
-int EDOperand::evaluate(uint64_t &result,
- EDRegisterBlock_t regBlock) {
- struct RegisterReaderWrapper wrapper;
+int EDOperand::evaluate(uint64_t &result, EDRegisterBlock_t regBlock) {
+ RegisterReaderWrapper wrapper;
wrapper.regBlock = regBlock;
- return evaluate(result,
- readerWrapperCallback,
- (void*)&wrapper);
+ return evaluate(result, readerWrapperCallback, (void*)&wrapper);
}
#endif
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDOperand.h b/contrib/llvm/lib/MC/MCDisassembler/EDOperand.h
index 6e69522..50260ec 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDOperand.h
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDOperand.h
@@ -16,7 +16,7 @@
#ifndef LLVM_EDOPERAND_H
#define LLVM_EDOPERAND_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDToken.h b/contrib/llvm/lib/MC/MCDisassembler/EDToken.h
index 6b2aeac..ba46707 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDToken.h
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDToken.h
@@ -17,7 +17,7 @@
#define LLVM_EDTOKEN_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <string>
#include <vector>
diff --git a/contrib/llvm/lib/MC/MCDwarf.cpp b/contrib/llvm/lib/MC/MCDwarf.cpp
index 2da71f9..112d7d8 100644
--- a/contrib/llvm/lib/MC/MCDwarf.cpp
+++ b/contrib/llvm/lib/MC/MCDwarf.cpp
@@ -7,11 +7,420 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/Target/TargetAsmInfo.h"
using namespace llvm;
+// Given a special op, return the address skip amount (in units of
+// DWARF2_LINE_MIN_INSN_LENGTH.
+#define SPECIAL_ADDR(op) (((op) - DWARF2_LINE_OPCODE_BASE)/DWARF2_LINE_RANGE)
+
+// The maximum address skip amount that can be encoded with a special op.
+#define MAX_SPECIAL_ADDR_DELTA SPECIAL_ADDR(255)
+
+// First special line opcode - leave room for the standard opcodes.
+// Note: If you want to change this, you'll have to update the
+// "standard_opcode_lengths" table that is emitted in DwarfFileTable::Emit().
+#define DWARF2_LINE_OPCODE_BASE 13
+
+// Minimum line offset in a special line info. opcode. This value
+// was chosen to give a reasonable range of values.
+#define DWARF2_LINE_BASE -5
+
+// Range of line offsets in a special line info. opcode.
+# define DWARF2_LINE_RANGE 14
+
+// Define the architecture-dependent minimum instruction length (in bytes).
+// This value should be rather too small than too big.
+# define DWARF2_LINE_MIN_INSN_LENGTH 1
+
+// Note: when DWARF2_LINE_MIN_INSN_LENGTH == 1 which is the current setting,
+// this routine is a nop and will be optimized away.
+static inline uint64_t ScaleAddrDelta(uint64_t AddrDelta)
+{
+ if (DWARF2_LINE_MIN_INSN_LENGTH == 1)
+ return AddrDelta;
+ if (AddrDelta % DWARF2_LINE_MIN_INSN_LENGTH != 0) {
+ // TODO: report this error, but really only once.
+ ;
+ }
+ return AddrDelta / DWARF2_LINE_MIN_INSN_LENGTH;
+}
+
+//
+// This is called when an instruction is assembled into the specified section
+// and if there is information from the last .loc directive that has yet to have
+// a line entry made for it is made.
+//
+void MCLineEntry::Make(MCStreamer *MCOS, const MCSection *Section) {
+ if (!MCOS->getContext().getDwarfLocSeen())
+ return;
+
+ // Create a symbol at in the current section for use in the line entry.
+ MCSymbol *LineSym = MCOS->getContext().CreateTempSymbol();
+ // Set the value of the symbol to use for the MCLineEntry.
+ MCOS->EmitLabel(LineSym);
+
+ // Get the current .loc info saved in the context.
+ const MCDwarfLoc &DwarfLoc = MCOS->getContext().getCurrentDwarfLoc();
+
+ // Create a (local) line entry with the symbol and the current .loc info.
+ MCLineEntry LineEntry(LineSym, DwarfLoc);
+
+ // clear DwarfLocSeen saying the current .loc info is now used.
+ MCOS->getContext().ClearDwarfLocSeen();
+
+ // Get the MCLineSection for this section, if one does not exist for this
+ // section create it.
+ const DenseMap<const MCSection *, MCLineSection *> &MCLineSections =
+ MCOS->getContext().getMCLineSections();
+ MCLineSection *LineSection = MCLineSections.lookup(Section);
+ if (!LineSection) {
+ // Create a new MCLineSection. This will be deleted after the dwarf line
+ // table is created using it by iterating through the MCLineSections
+ // DenseMap.
+ LineSection = new MCLineSection;
+ // Save a pointer to the new LineSection into the MCLineSections DenseMap.
+ MCOS->getContext().addMCLineSection(Section, LineSection);
+ }
+
+ // Add the line entry to this section's entries.
+ LineSection->addLineEntry(LineEntry);
+}
+
+//
+// This helper routine returns an expression of End - Start + IntVal .
+//
+static inline const MCExpr *MakeStartMinusEndExpr(const MCStreamer &MCOS,
+ const MCSymbol &Start,
+ const MCSymbol &End,
+ int IntVal) {
+ MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(&End, Variant, MCOS.getContext());
+ const MCExpr *RHS =
+ MCSymbolRefExpr::Create(&Start, Variant, MCOS.getContext());
+ const MCExpr *Res1 =
+ MCBinaryExpr::Create(MCBinaryExpr::Sub, Res, RHS, MCOS.getContext());
+ const MCExpr *Res2 =
+ MCConstantExpr::Create(IntVal, MCOS.getContext());
+ const MCExpr *Res3 =
+ MCBinaryExpr::Create(MCBinaryExpr::Sub, Res1, Res2, MCOS.getContext());
+ return Res3;
+}
+
+//
+// This emits the Dwarf line table for the specified section from the entries
+// in the LineSection.
+//
+static inline void EmitDwarfLineTable(MCStreamer *MCOS,
+ const MCSection *Section,
+ const MCLineSection *LineSection) {
+ unsigned FileNum = 1;
+ unsigned LastLine = 1;
+ unsigned Column = 0;
+ unsigned Flags = DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0;
+ unsigned Isa = 0;
+ MCSymbol *LastLabel = NULL;
+
+ // Loop through each MCLineEntry and encode the dwarf line number table.
+ for (MCLineSection::const_iterator
+ it = LineSection->getMCLineEntries()->begin(),
+ ie = LineSection->getMCLineEntries()->end(); it != ie; ++it) {
+
+ if (FileNum != it->getFileNum()) {
+ FileNum = it->getFileNum();
+ MCOS->EmitIntValue(dwarf::DW_LNS_set_file, 1);
+ MCOS->EmitULEB128IntValue(FileNum);
+ }
+ if (Column != it->getColumn()) {
+ Column = it->getColumn();
+ MCOS->EmitIntValue(dwarf::DW_LNS_set_column, 1);
+ MCOS->EmitULEB128IntValue(Column);
+ }
+ if (Isa != it->getIsa()) {
+ Isa = it->getIsa();
+ MCOS->EmitIntValue(dwarf::DW_LNS_set_isa, 1);
+ MCOS->EmitULEB128IntValue(Isa);
+ }
+ if ((it->getFlags() ^ Flags) & DWARF2_FLAG_IS_STMT) {
+ Flags = it->getFlags();
+ MCOS->EmitIntValue(dwarf::DW_LNS_negate_stmt, 1);
+ }
+ if (it->getFlags() & DWARF2_FLAG_BASIC_BLOCK)
+ MCOS->EmitIntValue(dwarf::DW_LNS_set_basic_block, 1);
+ if (it->getFlags() & DWARF2_FLAG_PROLOGUE_END)
+ MCOS->EmitIntValue(dwarf::DW_LNS_set_prologue_end, 1);
+ if (it->getFlags() & DWARF2_FLAG_EPILOGUE_BEGIN)
+ MCOS->EmitIntValue(dwarf::DW_LNS_set_epilogue_begin, 1);
+
+ int64_t LineDelta = static_cast<int64_t>(it->getLine()) - LastLine;
+ MCSymbol *Label = it->getLabel();
+
+ // At this point we want to emit/create the sequence to encode the delta in
+ // line numbers and the increment of the address from the previous Label
+ // and the current Label.
+ MCOS->EmitDwarfAdvanceLineAddr(LineDelta, LastLabel, Label);
+
+ LastLine = it->getLine();
+ LastLabel = Label;
+ }
+
+ // Emit a DW_LNE_end_sequence for the end of the section.
+ // Using the pointer Section create a temporary label at the end of the
+ // section and use that and the LastLabel to compute the address delta
+ // and use INT64_MAX as the line delta which is the signal that this is
+ // actually a DW_LNE_end_sequence.
+
+ // Switch to the section to be able to create a symbol at its end.
+ MCOS->SwitchSection(Section);
+
+ MCContext &context = MCOS->getContext();
+ // Create a symbol at the end of the section.
+ MCSymbol *SectionEnd = context.CreateTempSymbol();
+ // Set the value of the symbol, as we are at the end of the section.
+ MCOS->EmitLabel(SectionEnd);
+
+ // Switch back the the dwarf line section.
+ MCOS->SwitchSection(context.getTargetAsmInfo().getDwarfLineSection());
+
+ MCOS->EmitDwarfAdvanceLineAddr(INT64_MAX, LastLabel, SectionEnd);
+}
+
+//
+// This emits the Dwarf file and the line tables.
+//
+void MCDwarfFileTable::Emit(MCStreamer *MCOS) {
+ MCContext &context = MCOS->getContext();
+ // Switch to the section where the table will be emitted into.
+ MCOS->SwitchSection(context.getTargetAsmInfo().getDwarfLineSection());
+
+ // Create a symbol at the beginning of this section.
+ MCSymbol *LineStartSym = context.CreateTempSymbol();
+ // Set the value of the symbol, as we are at the start of the section.
+ MCOS->EmitLabel(LineStartSym);
+
+ // Create a symbol for the end of the section (to be set when we get there).
+ MCSymbol *LineEndSym = context.CreateTempSymbol();
+
+ // The first 4 bytes is the total length of the information for this
+ // compilation unit (not including these 4 bytes for the length).
+ MCOS->EmitAbsValue(MakeStartMinusEndExpr(*MCOS, *LineStartSym, *LineEndSym,4),
+ 4);
+
+ // Next 2 bytes is the Version, which is Dwarf 2.
+ MCOS->EmitIntValue(2, 2);
+
+ // Create a symbol for the end of the prologue (to be set when we get there).
+ MCSymbol *ProEndSym = context.CreateTempSymbol(); // Lprologue_end
+
+ // Length of the prologue, is the next 4 bytes. Which is the start of the
+ // section to the end of the prologue. Not including the 4 bytes for the
+ // total length, the 2 bytes for the version, and these 4 bytes for the
+ // length of the prologue.
+ MCOS->EmitAbsValue(MakeStartMinusEndExpr(*MCOS, *LineStartSym, *ProEndSym,
+ (4 + 2 + 4)),
+ 4, 0);
+
+ // Parameters of the state machine, are next.
+ MCOS->EmitIntValue(DWARF2_LINE_MIN_INSN_LENGTH, 1);
+ MCOS->EmitIntValue(DWARF2_LINE_DEFAULT_IS_STMT, 1);
+ MCOS->EmitIntValue(DWARF2_LINE_BASE, 1);
+ MCOS->EmitIntValue(DWARF2_LINE_RANGE, 1);
+ MCOS->EmitIntValue(DWARF2_LINE_OPCODE_BASE, 1);
+
+ // Standard opcode lengths
+ MCOS->EmitIntValue(0, 1); // length of DW_LNS_copy
+ MCOS->EmitIntValue(1, 1); // length of DW_LNS_advance_pc
+ MCOS->EmitIntValue(1, 1); // length of DW_LNS_advance_line
+ MCOS->EmitIntValue(1, 1); // length of DW_LNS_set_file
+ MCOS->EmitIntValue(1, 1); // length of DW_LNS_set_column
+ MCOS->EmitIntValue(0, 1); // length of DW_LNS_negate_stmt
+ MCOS->EmitIntValue(0, 1); // length of DW_LNS_set_basic_block
+ MCOS->EmitIntValue(0, 1); // length of DW_LNS_const_add_pc
+ MCOS->EmitIntValue(1, 1); // length of DW_LNS_fixed_advance_pc
+ MCOS->EmitIntValue(0, 1); // length of DW_LNS_set_prologue_end
+ MCOS->EmitIntValue(0, 1); // length of DW_LNS_set_epilogue_begin
+ MCOS->EmitIntValue(1, 1); // DW_LNS_set_isa
+
+ // Put out the directory and file tables.
+
+ // First the directory table.
+ const std::vector<StringRef> &MCDwarfDirs =
+ context.getMCDwarfDirs();
+ for (unsigned i = 0; i < MCDwarfDirs.size(); i++) {
+ MCOS->EmitBytes(MCDwarfDirs[i], 0); // the DirectoryName
+ MCOS->EmitBytes(StringRef("\0", 1), 0); // the null term. of the string
+ }
+ MCOS->EmitIntValue(0, 1); // Terminate the directory list
+
+ // Second the file table.
+ const std::vector<MCDwarfFile *> &MCDwarfFiles =
+ MCOS->getContext().getMCDwarfFiles();
+ for (unsigned i = 1; i < MCDwarfFiles.size(); i++) {
+ MCOS->EmitBytes(MCDwarfFiles[i]->getName(), 0); // FileName
+ MCOS->EmitBytes(StringRef("\0", 1), 0); // the null term. of the string
+ // the Directory num
+ MCOS->EmitULEB128IntValue(MCDwarfFiles[i]->getDirIndex());
+ MCOS->EmitIntValue(0, 1); // last modification timestamp (always 0)
+ MCOS->EmitIntValue(0, 1); // filesize (always 0)
+ }
+ MCOS->EmitIntValue(0, 1); // Terminate the file list
+
+ // This is the end of the prologue, so set the value of the symbol at the
+ // end of the prologue (that was used in a previous expression).
+ MCOS->EmitLabel(ProEndSym);
+
+ // Put out the line tables.
+ const DenseMap<const MCSection *, MCLineSection *> &MCLineSections =
+ MCOS->getContext().getMCLineSections();
+ const std::vector<const MCSection *> &MCLineSectionOrder =
+ MCOS->getContext().getMCLineSectionOrder();
+ for (std::vector<const MCSection*>::const_iterator it =
+ MCLineSectionOrder.begin(), ie = MCLineSectionOrder.end(); it != ie;
+ ++it) {
+ const MCSection *Sec = *it;
+ const MCLineSection *Line = MCLineSections.lookup(Sec);
+ EmitDwarfLineTable(MCOS, Sec, Line);
+
+ // Now delete the MCLineSections that were created in MCLineEntry::Make()
+ // and used to emit the line table.
+ delete Line;
+ }
+
+ if (MCOS->getContext().getAsmInfo().getLinkerRequiresNonEmptyDwarfLines()
+ && MCLineSectionOrder.begin() == MCLineSectionOrder.end()) {
+ // The darwin9 linker has a bug (see PR8715). For for 32-bit architectures
+ // it requires:
+ // total_length >= prologue_length + 10
+ // We are 4 bytes short, since we have total_length = 51 and
+ // prologue_length = 45
+
+ // The regular end_sequence should be sufficient.
+ MCDwarfLineAddr::Emit(MCOS, INT64_MAX, 0);
+ }
+
+ // This is the end of the section, so set the value of the symbol at the end
+ // of this section (that was used in a previous expression).
+ MCOS->EmitLabel(LineEndSym);
+}
+
+/// Utility function to write the encoding to an object writer.
+void MCDwarfLineAddr::Write(MCObjectWriter *OW, int64_t LineDelta,
+ uint64_t AddrDelta) {
+ SmallString<256> Tmp;
+ raw_svector_ostream OS(Tmp);
+ MCDwarfLineAddr::Encode(LineDelta, AddrDelta, OS);
+ OW->WriteBytes(OS.str());
+}
+
+/// Utility function to emit the encoding to a streamer.
+void MCDwarfLineAddr::Emit(MCStreamer *MCOS, int64_t LineDelta,
+ uint64_t AddrDelta) {
+ SmallString<256> Tmp;
+ raw_svector_ostream OS(Tmp);
+ MCDwarfLineAddr::Encode(LineDelta, AddrDelta, OS);
+ MCOS->EmitBytes(OS.str(), /*AddrSpace=*/0);
+}
+
+/// Utility function to encode a Dwarf pair of LineDelta and AddrDeltas.
+void MCDwarfLineAddr::Encode(int64_t LineDelta, uint64_t AddrDelta,
+ raw_ostream &OS) {
+ uint64_t Temp, Opcode;
+ bool NeedCopy = false;
+
+ // Scale the address delta by the minimum instruction length.
+ AddrDelta = ScaleAddrDelta(AddrDelta);
+
+ // A LineDelta of INT64_MAX is a signal that this is actually a
+ // DW_LNE_end_sequence. We cannot use special opcodes here, since we want the
+ // end_sequence to emit the matrix entry.
+ if (LineDelta == INT64_MAX) {
+ if (AddrDelta == MAX_SPECIAL_ADDR_DELTA)
+ OS << char(dwarf::DW_LNS_const_add_pc);
+ else {
+ OS << char(dwarf::DW_LNS_advance_pc);
+ SmallString<32> Tmp;
+ raw_svector_ostream OSE(Tmp);
+ MCObjectWriter::EncodeULEB128(AddrDelta, OSE);
+ OS << OSE.str();
+ }
+ OS << char(dwarf::DW_LNS_extended_op);
+ OS << char(1);
+ OS << char(dwarf::DW_LNE_end_sequence);
+ return;
+ }
+
+ // Bias the line delta by the base.
+ Temp = LineDelta - DWARF2_LINE_BASE;
+
+ // If the line increment is out of range of a special opcode, we must encode
+ // it with DW_LNS_advance_line.
+ if (Temp >= DWARF2_LINE_RANGE) {
+ OS << char(dwarf::DW_LNS_advance_line);
+ SmallString<32> Tmp;
+ raw_svector_ostream OSE(Tmp);
+ MCObjectWriter::EncodeSLEB128(LineDelta, OSE);
+ OS << OSE.str();
+
+ LineDelta = 0;
+ Temp = 0 - DWARF2_LINE_BASE;
+ NeedCopy = true;
+ }
+
+ // Use DW_LNS_copy instead of a "line +0, addr +0" special opcode.
+ if (LineDelta == 0 && AddrDelta == 0) {
+ OS << char(dwarf::DW_LNS_copy);
+ return;
+ }
+
+ // Bias the opcode by the special opcode base.
+ Temp += DWARF2_LINE_OPCODE_BASE;
+
+ // Avoid overflow when addr_delta is large.
+ if (AddrDelta < 256 + MAX_SPECIAL_ADDR_DELTA) {
+ // Try using a special opcode.
+ Opcode = Temp + AddrDelta * DWARF2_LINE_RANGE;
+ if (Opcode <= 255) {
+ OS << char(Opcode);
+ return;
+ }
+
+ // Try using DW_LNS_const_add_pc followed by special op.
+ Opcode = Temp + (AddrDelta - MAX_SPECIAL_ADDR_DELTA) * DWARF2_LINE_RANGE;
+ if (Opcode <= 255) {
+ OS << char(dwarf::DW_LNS_const_add_pc);
+ OS << char(Opcode);
+ return;
+ }
+ }
+
+ // Otherwise use DW_LNS_advance_pc.
+ OS << char(dwarf::DW_LNS_advance_pc);
+ SmallString<32> Tmp;
+ raw_svector_ostream OSE(Tmp);
+ MCObjectWriter::EncodeULEB128(AddrDelta, OSE);
+ OS << OSE.str();
+
+ if (NeedCopy)
+ OS << char(dwarf::DW_LNS_copy);
+ else
+ OS << char(Temp);
+}
+
void MCDwarfFile::print(raw_ostream &OS) const {
OS << '"' << getName() << '"';
}
@@ -19,3 +428,387 @@ void MCDwarfFile::print(raw_ostream &OS) const {
void MCDwarfFile::dump() const {
print(dbgs());
}
+
+static int getDataAlignmentFactor(MCStreamer &streamer) {
+ MCContext &context = streamer.getContext();
+ const TargetAsmInfo &asmInfo = context.getTargetAsmInfo();
+ int size = asmInfo.getPointerSize();
+ if (asmInfo.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp)
+ return size;
+ else
+ return -size;
+}
+
+static void EmitCFIInstruction(MCStreamer &Streamer,
+ const MCCFIInstruction &Instr) {
+ int dataAlignmentFactor = getDataAlignmentFactor(Streamer);
+
+ switch (Instr.getOperation()) {
+ case MCCFIInstruction::Move: {
+ const MachineLocation &Dst = Instr.getDestination();
+ const MachineLocation &Src = Instr.getSource();
+
+ // If advancing cfa.
+ if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) {
+ assert(!Src.isReg() && "Machine move not supported yet.");
+
+ if (Src.getReg() == MachineLocation::VirtualFP) {
+ Streamer.EmitIntValue(dwarf::DW_CFA_def_cfa_offset, 1);
+ } else {
+ Streamer.EmitIntValue(dwarf::DW_CFA_def_cfa, 1);
+ Streamer.EmitULEB128IntValue(Src.getReg());
+ }
+
+ Streamer.EmitULEB128IntValue(-Src.getOffset(), 1);
+ return;
+ }
+
+ if (Src.isReg() && Src.getReg() == MachineLocation::VirtualFP) {
+ assert(Dst.isReg() && "Machine move not supported yet.");
+ Streamer.EmitIntValue(dwarf::DW_CFA_def_cfa_register, 1);
+ Streamer.EmitULEB128IntValue(Dst.getReg());
+ return;
+ }
+
+ unsigned Reg = Src.getReg();
+ int Offset = Dst.getOffset() / dataAlignmentFactor;
+
+ if (Offset < 0) {
+ Streamer.EmitIntValue(dwarf::DW_CFA_offset_extended_sf, 1);
+ Streamer.EmitULEB128IntValue(Reg);
+ Streamer.EmitSLEB128IntValue(Offset);
+ } else if (Reg < 64) {
+ Streamer.EmitIntValue(dwarf::DW_CFA_offset + Reg, 1);
+ Streamer.EmitULEB128IntValue(Offset, 1);
+ } else {
+ Streamer.EmitIntValue(dwarf::DW_CFA_offset_extended, 1);
+ Streamer.EmitULEB128IntValue(Reg, 1);
+ Streamer.EmitULEB128IntValue(Offset, 1);
+ }
+ return;
+ }
+ case MCCFIInstruction::Remember:
+ Streamer.EmitIntValue(dwarf::DW_CFA_remember_state, 1);
+ return;
+ case MCCFIInstruction::Restore:
+ Streamer.EmitIntValue(dwarf::DW_CFA_restore_state, 1);
+ return;
+ }
+ llvm_unreachable("Unhandled case in switch");
+}
+
+/// EmitFrameMoves - Emit frame instructions to describe the layout of the
+/// frame.
+static void EmitCFIInstructions(MCStreamer &streamer,
+ const std::vector<MCCFIInstruction> &Instrs,
+ MCSymbol *BaseLabel) {
+ for (unsigned i = 0, N = Instrs.size(); i < N; ++i) {
+ const MCCFIInstruction &Instr = Instrs[i];
+ MCSymbol *Label = Instr.getLabel();
+ // Throw out move if the label is invalid.
+ if (Label && !Label->isDefined()) continue; // Not emitted, in dead code.
+
+ // Advance row if new location.
+ if (BaseLabel && Label) {
+ MCSymbol *ThisSym = Label;
+ if (ThisSym != BaseLabel) {
+ streamer.EmitDwarfAdvanceFrameAddr(BaseLabel, ThisSym);
+ BaseLabel = ThisSym;
+ }
+ }
+
+ EmitCFIInstruction(streamer, Instr);
+ }
+}
+
+static void EmitSymbol(MCStreamer &streamer, const MCSymbol &symbol,
+ unsigned symbolEncoding) {
+ MCContext &context = streamer.getContext();
+ const TargetAsmInfo &asmInfo = context.getTargetAsmInfo();
+ unsigned format = symbolEncoding & 0x0f;
+ unsigned application = symbolEncoding & 0x70;
+ unsigned size;
+ switch (format) {
+ default:
+ assert(0 && "Unknown Encoding");
+ case dwarf::DW_EH_PE_absptr:
+ case dwarf::DW_EH_PE_signed:
+ size = asmInfo.getPointerSize();
+ break;
+ case dwarf::DW_EH_PE_udata2:
+ case dwarf::DW_EH_PE_sdata2:
+ size = 2;
+ break;
+ case dwarf::DW_EH_PE_udata4:
+ case dwarf::DW_EH_PE_sdata4:
+ size = 4;
+ break;
+ case dwarf::DW_EH_PE_udata8:
+ case dwarf::DW_EH_PE_sdata8:
+ size = 8;
+ break;
+ }
+ switch (application) {
+ default:
+ assert(0 && "Unknown Encoding");
+ break;
+ case 0:
+ streamer.EmitSymbolValue(&symbol, size);
+ break;
+ case dwarf::DW_EH_PE_pcrel:
+ streamer.EmitPCRelSymbolValue(&symbol, size);
+ break;
+ }
+}
+
+static const MachineLocation TranslateMachineLocation(
+ const TargetAsmInfo &AsmInfo,
+ const MachineLocation &Loc) {
+ unsigned Reg = Loc.getReg() == MachineLocation::VirtualFP ?
+ MachineLocation::VirtualFP :
+ unsigned(AsmInfo.getDwarfRegNum(Loc.getReg(), true));
+ const MachineLocation &NewLoc = Loc.isReg() ?
+ MachineLocation(Reg) : MachineLocation(Reg, Loc.getOffset());
+ return NewLoc;
+}
+
+static const MCSymbol &EmitCIE(MCStreamer &streamer,
+ const MCSymbol *personality,
+ unsigned personalityEncoding,
+ const MCSymbol *lsda,
+ unsigned lsdaEncoding) {
+ MCContext &context = streamer.getContext();
+ const TargetAsmInfo &asmInfo = context.getTargetAsmInfo();
+ const MCSection &section = *asmInfo.getEHFrameSection();
+ streamer.SwitchSection(&section);
+ MCSymbol *sectionStart = streamer.getContext().CreateTempSymbol();
+ MCSymbol *sectionEnd = streamer.getContext().CreateTempSymbol();
+
+ // Length
+ const MCExpr *Length = MakeStartMinusEndExpr(streamer, *sectionStart,
+ *sectionEnd, 4);
+ streamer.EmitLabel(sectionStart);
+ streamer.EmitValue(Length, 4);
+
+ // CIE ID
+ streamer.EmitIntValue(0, 4);
+
+ // Version
+ streamer.EmitIntValue(dwarf::DW_CIE_VERSION, 1);
+
+ // Augmentation String
+ SmallString<8> Augmentation;
+ Augmentation += "z";
+ if (personality)
+ Augmentation += "P";
+ if (lsda)
+ Augmentation += "L";
+ Augmentation += "R";
+ streamer.EmitBytes(Augmentation.str(), 0);
+ streamer.EmitIntValue(0, 1);
+
+ // Code Alignment Factor
+ streamer.EmitULEB128IntValue(1);
+
+ // Data Alignment Factor
+ streamer.EmitSLEB128IntValue(getDataAlignmentFactor(streamer));
+
+ // Return Address Register
+ streamer.EmitULEB128IntValue(asmInfo.getDwarfRARegNum(true));
+
+ // Augmentation Data Length (optional)
+ MCSymbol *augmentationStart = streamer.getContext().CreateTempSymbol();
+ MCSymbol *augmentationEnd = streamer.getContext().CreateTempSymbol();
+ const MCExpr *augmentationLength = MakeStartMinusEndExpr(streamer,
+ *augmentationStart,
+ *augmentationEnd, 0);
+ streamer.EmitULEB128Value(augmentationLength);
+
+ // Augmentation Data (optional)
+ streamer.EmitLabel(augmentationStart);
+ if (personality) {
+ // Personality Encoding
+ streamer.EmitIntValue(personalityEncoding, 1);
+ // Personality
+ EmitSymbol(streamer, *personality, personalityEncoding);
+ }
+ if (lsda) {
+ // LSDA Encoding
+ streamer.EmitIntValue(lsdaEncoding, 1);
+ }
+ // Encoding of the FDE pointers
+ streamer.EmitIntValue(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4, 1);
+ streamer.EmitLabel(augmentationEnd);
+
+ // Initial Instructions
+
+ const std::vector<MachineMove> Moves = asmInfo.getInitialFrameState();
+ std::vector<MCCFIInstruction> Instructions;
+
+ for (int i = 0, n = Moves.size(); i != n; ++i) {
+ MCSymbol *Label = Moves[i].getLabel();
+ const MachineLocation &Dst =
+ TranslateMachineLocation(asmInfo, Moves[i].getDestination());
+ const MachineLocation &Src =
+ TranslateMachineLocation(asmInfo, Moves[i].getSource());
+ MCCFIInstruction Inst(Label, Dst, Src);
+ Instructions.push_back(Inst);
+ }
+
+ EmitCFIInstructions(streamer, Instructions, NULL);
+
+ // Padding
+ streamer.EmitValueToAlignment(4);
+
+ streamer.EmitLabel(sectionEnd);
+ return *sectionStart;
+}
+
+static MCSymbol *EmitFDE(MCStreamer &streamer,
+ const MCSymbol &cieStart,
+ const MCDwarfFrameInfo &frame) {
+ MCContext &context = streamer.getContext();
+ MCSymbol *fdeStart = context.CreateTempSymbol();
+ MCSymbol *fdeEnd = context.CreateTempSymbol();
+
+ // Length
+ const MCExpr *Length = MakeStartMinusEndExpr(streamer, *fdeStart, *fdeEnd, 0);
+ streamer.EmitValue(Length, 4);
+
+ streamer.EmitLabel(fdeStart);
+ // CIE Pointer
+ const MCExpr *offset = MakeStartMinusEndExpr(streamer, cieStart, *fdeStart,
+ 0);
+ streamer.EmitValue(offset, 4);
+
+ // PC Begin
+ streamer.EmitPCRelSymbolValue(frame.Begin, 4);
+
+ // PC Range
+ const MCExpr *Range = MakeStartMinusEndExpr(streamer, *frame.Begin,
+ *frame.End, 0);
+ streamer.EmitValue(Range, 4);
+
+ // Augmentation Data Length
+ MCSymbol *augmentationStart = streamer.getContext().CreateTempSymbol();
+ MCSymbol *augmentationEnd = streamer.getContext().CreateTempSymbol();
+ const MCExpr *augmentationLength = MakeStartMinusEndExpr(streamer,
+ *augmentationStart,
+ *augmentationEnd, 0);
+ streamer.EmitULEB128Value(augmentationLength);
+
+ // Augmentation Data
+ streamer.EmitLabel(augmentationStart);
+ if (frame.Lsda)
+ EmitSymbol(streamer, *frame.Lsda, frame.LsdaEncoding);
+ streamer.EmitLabel(augmentationEnd);
+ // Call Frame Instructions
+
+ EmitCFIInstructions(streamer, frame.Instructions, frame.Begin);
+
+ // Padding
+ streamer.EmitValueToAlignment(4);
+
+ return fdeEnd;
+}
+
+namespace {
+ struct CIEKey {
+ static const CIEKey getEmptyKey() { return CIEKey(0, 0, -1); }
+ static const CIEKey getTombstoneKey() { return CIEKey(0, -1, 0); }
+
+ CIEKey(const MCSymbol* Personality_, unsigned PersonalityEncoding_,
+ unsigned LsdaEncoding_) : Personality(Personality_),
+ PersonalityEncoding(PersonalityEncoding_),
+ LsdaEncoding(LsdaEncoding_) {
+ }
+ const MCSymbol* Personality;
+ unsigned PersonalityEncoding;
+ unsigned LsdaEncoding;
+ };
+}
+
+namespace llvm {
+ template <>
+ struct DenseMapInfo<CIEKey> {
+ static CIEKey getEmptyKey() {
+ return CIEKey::getEmptyKey();
+ }
+ static CIEKey getTombstoneKey() {
+ return CIEKey::getTombstoneKey();
+ }
+ static unsigned getHashValue(const CIEKey &Key) {
+ FoldingSetNodeID ID;
+ ID.AddPointer(Key.Personality);
+ ID.AddInteger(Key.PersonalityEncoding);
+ ID.AddInteger(Key.LsdaEncoding);
+ return ID.ComputeHash();
+ }
+ static bool isEqual(const CIEKey &LHS,
+ const CIEKey &RHS) {
+ return LHS.Personality == RHS.Personality &&
+ LHS.PersonalityEncoding == RHS.PersonalityEncoding &&
+ LHS.LsdaEncoding == RHS.LsdaEncoding;
+ }
+ };
+}
+
+void MCDwarfFrameEmitter::Emit(MCStreamer &streamer) {
+ const MCContext &context = streamer.getContext();
+ const TargetAsmInfo &asmInfo = context.getTargetAsmInfo();
+ MCSymbol *fdeEnd = NULL;
+ DenseMap<CIEKey, const MCSymbol*> CIEStarts;
+
+ for (unsigned i = 0, n = streamer.getNumFrameInfos(); i < n; ++i) {
+ const MCDwarfFrameInfo &frame = streamer.getFrameInfo(i);
+ CIEKey key(frame.Personality, frame.PersonalityEncoding,
+ frame.LsdaEncoding);
+ const MCSymbol *&cieStart = CIEStarts[key];
+ if (!cieStart)
+ cieStart = &EmitCIE(streamer, frame.Personality,
+ frame.PersonalityEncoding, frame.Lsda,
+ frame.LsdaEncoding);
+ fdeEnd = EmitFDE(streamer, *cieStart, frame);
+ if (i != n - 1)
+ streamer.EmitLabel(fdeEnd);
+ }
+
+ streamer.EmitValueToAlignment(asmInfo.getPointerSize());
+ if (fdeEnd)
+ streamer.EmitLabel(fdeEnd);
+}
+
+void MCDwarfFrameEmitter::EmitAdvanceLoc(MCStreamer &Streamer,
+ uint64_t AddrDelta) {
+ SmallString<256> Tmp;
+ raw_svector_ostream OS(Tmp);
+ MCDwarfFrameEmitter::EncodeAdvanceLoc(AddrDelta, OS);
+ Streamer.EmitBytes(OS.str(), /*AddrSpace=*/0);
+}
+
+void MCDwarfFrameEmitter::EncodeAdvanceLoc(uint64_t AddrDelta,
+ raw_ostream &OS) {
+ // FIXME: Assumes the code alignment factor is 1.
+ if (AddrDelta == 0) {
+ } else if (isUIntN(6, AddrDelta)) {
+ uint8_t Opcode = dwarf::DW_CFA_advance_loc | AddrDelta;
+ OS << Opcode;
+ } else if (isUInt<8>(AddrDelta)) {
+ OS << uint8_t(dwarf::DW_CFA_advance_loc1);
+ OS << uint8_t(AddrDelta);
+ } else if (isUInt<16>(AddrDelta)) {
+ // FIXME: check what is the correct behavior on a big endian machine.
+ OS << uint8_t(dwarf::DW_CFA_advance_loc2);
+ OS << uint8_t( AddrDelta & 0xff);
+ OS << uint8_t((AddrDelta >> 8) & 0xff);
+ } else {
+ // FIXME: check what is the correct behavior on a big endian machine.
+ assert(isUInt<32>(AddrDelta));
+ OS << uint8_t(dwarf::DW_CFA_advance_loc4);
+ OS << uint8_t( AddrDelta & 0xff);
+ OS << uint8_t((AddrDelta >> 8) & 0xff);
+ OS << uint8_t((AddrDelta >> 16) & 0xff);
+ OS << uint8_t((AddrDelta >> 24) & 0xff);
+
+ }
+}
diff --git a/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
new file mode 100644
index 0000000..12a02a9
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
@@ -0,0 +1,23 @@
+//===-- MCELFObjectTargetWriter.cpp - ELF Target Writer Subclass ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCELFObjectWriter.h"
+
+using namespace llvm;
+
+MCELFObjectTargetWriter::MCELFObjectTargetWriter(bool Is64Bit_,
+ Triple::OSType OSType_,
+ uint16_t EMachine_,
+ bool HasRelocationAddend_)
+ : OSType(OSType_), EMachine(EMachine_),
+ HasRelocationAddend(HasRelocationAddend_), Is64Bit(Is64Bit_) {
+}
+
+MCELFObjectTargetWriter::~MCELFObjectTargetWriter() {
+}
diff --git a/contrib/llvm/lib/MC/MCELFStreamer.cpp b/contrib/llvm/lib/MC/MCELFStreamer.cpp
index 570c391..e49074d 100644
--- a/contrib/llvm/lib/MC/MCELFStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCELFStreamer.cpp
@@ -13,6 +13,7 @@
#include "llvm/MC/MCStreamer.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCCodeEmitter.h"
@@ -23,19 +24,51 @@
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCValue.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/Target/TargetAsmInfo.h"
using namespace llvm;
namespace {
+static void SetBinding(MCSymbolData &SD, unsigned Binding) {
+ assert(Binding == ELF::STB_LOCAL || Binding == ELF::STB_GLOBAL ||
+ Binding == ELF::STB_WEAK);
+ uint32_t OtherFlags = SD.getFlags() & ~(0xf << ELF_STB_Shift);
+ SD.setFlags(OtherFlags | (Binding << ELF_STB_Shift));
+}
+
+static unsigned GetBinding(const MCSymbolData &SD) {
+ uint32_t Binding = (SD.getFlags() & (0xf << ELF_STB_Shift)) >> ELF_STB_Shift;
+ assert(Binding == ELF::STB_LOCAL || Binding == ELF::STB_GLOBAL ||
+ Binding == ELF::STB_WEAK);
+ return Binding;
+}
+
+static void SetType(MCSymbolData &SD, unsigned Type) {
+ assert(Type == ELF::STT_NOTYPE || Type == ELF::STT_OBJECT ||
+ Type == ELF::STT_FUNC || Type == ELF::STT_SECTION ||
+ Type == ELF::STT_FILE || Type == ELF::STT_COMMON ||
+ Type == ELF::STT_TLS);
+
+ uint32_t OtherFlags = SD.getFlags() & ~(0xf << ELF_STT_Shift);
+ SD.setFlags(OtherFlags | (Type << ELF_STT_Shift));
+}
+
+static void SetVisibility(MCSymbolData &SD, unsigned Visibility) {
+ assert(Visibility == ELF::STV_DEFAULT || Visibility == ELF::STV_INTERNAL ||
+ Visibility == ELF::STV_HIDDEN || Visibility == ELF::STV_PROTECTED);
+
+ uint32_t OtherFlags = SD.getFlags() & ~(0xf << ELF_STV_Shift);
+ SD.setFlags(OtherFlags | (Visibility << ELF_STV_Shift));
+}
+
class MCELFStreamer : public MCObjectStreamer {
- void EmitInstToFragment(const MCInst &Inst);
- void EmitInstToData(const MCInst &Inst);
public:
MCELFStreamer(MCContext &Context, TargetAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *Emitter)
@@ -46,9 +79,13 @@ public:
/// @name MCStreamer Interface
/// @{
+ virtual void InitSections();
+ virtual void ChangeSection(const MCSection *Section);
virtual void EmitLabel(MCSymbol *Symbol);
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitThumbFunc(MCSymbol *Func);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
assert(0 && "ELF doesn't support this directive");
@@ -76,9 +113,8 @@ public:
SD.setSize(Value);
}
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) {
- assert(0 && "ELF doesn't support this directive");
- }
+ virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size);
+
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
unsigned Size = 0, unsigned ByteAlignment = 0) {
assert(0 && "ELF doesn't support this directive");
@@ -88,49 +124,84 @@ public:
assert(0 && "ELF doesn't support this directive");
}
virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
- virtual void EmitValue(const MCExpr *Value, unsigned Size,unsigned AddrSpace);
- virtual void EmitGPRel32Value(const MCExpr *Value) {
- assert(0 && "ELF doesn't support this directive");
- }
virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
unsigned ValueSize = 1,
unsigned MaxBytesToEmit = 0);
virtual void EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit = 0);
- virtual void EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value = 0);
virtual void EmitFileDirective(StringRef Filename);
- virtual void EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
- DEBUG(dbgs() << "FIXME: MCELFStreamer:EmitDwarfFileDirective not implemented\n");
- }
- virtual void EmitInstruction(const MCInst &Inst);
virtual void Finish();
+private:
+ virtual void EmitInstToFragment(const MCInst &Inst);
+ virtual void EmitInstToData(const MCInst &Inst);
+
+ void fixSymbolsInTLSFixups(const MCExpr *expr);
+
+ struct LocalCommon {
+ MCSymbolData *SD;
+ uint64_t Size;
+ unsigned ByteAlignment;
+ };
+ std::vector<LocalCommon> LocalCommons;
+
+ SmallPtrSet<MCSymbol *, 16> BindingExplicitlySet;
/// @}
+ void SetSection(StringRef Section, unsigned Type, unsigned Flags,
+ SectionKind Kind) {
+ SwitchSection(getContext().getELFSection(Section, Type, Flags, Kind));
+ }
+
+ void SetSectionData() {
+ SetSection(".data", ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE |ELF::SHF_ALLOC,
+ SectionKind::getDataRel());
+ EmitCodeAlignment(4, 0);
+ }
+ void SetSectionText() {
+ SetSection(".text", ELF::SHT_PROGBITS,
+ ELF::SHF_EXECINSTR |
+ ELF::SHF_ALLOC, SectionKind::getText());
+ EmitCodeAlignment(4, 0);
+ }
+ void SetSectionBss() {
+ SetSection(".bss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE |
+ ELF::SHF_ALLOC, SectionKind::getBSS());
+ EmitCodeAlignment(4, 0);
+ }
};
} // end anonymous namespace.
+void MCELFStreamer::InitSections() {
+ // This emulates the same behavior of GNU as. This makes it easier
+ // to compare the output as the major sections are in the same order.
+ SetSectionText();
+ SetSectionData();
+ SetSectionBss();
+ SetSectionText();
+}
+
void MCELFStreamer::EmitLabel(MCSymbol *Symbol) {
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- // FIXME: This is wasteful, we don't necessarily need to create a data
- // fragment. Instead, we should mark the symbol as pointing into the data
- // fragment if it exists, otherwise we should just queue the label and set its
- // fragment pointer when we emit the next fragment.
- MCDataFragment *F = getOrCreateDataFragment();
- MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
- assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
- SD.setFragment(F);
- SD.setOffset(F->getContents().size());
+ MCObjectStreamer::EmitLabel(Symbol);
- Symbol->setSection(*CurSection);
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(Symbol->getSection());
+ MCSymbolData &SD = getAssembler().getSymbolData(*Symbol);
+ if (Section.getFlags() & ELF::SHF_TLS)
+ SetType(SD, ELF::STT_TLS);
}
void MCELFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
switch (Flag) {
+ case MCAF_SyntaxUnified: return; // no-op here.
+ case MCAF_Code16: return; // no-op here.
+ case MCAF_Code32: return; // no-op here.
case MCAF_SubsectionsViaSymbols:
getAssembler().setSubsectionsViaSymbols(true);
return;
@@ -139,6 +210,10 @@ void MCELFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
assert(0 && "invalid assembler flag!");
}
+void MCELFStreamer::EmitThumbFunc(MCSymbol *Func) {
+ // FIXME: Anything needed here to flag the function as thumb?
+}
+
void MCELFStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
// TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
// MCObjectStreamer.
@@ -147,6 +222,21 @@ void MCELFStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
Symbol->setVariableValue(AddValueSymbols(Value));
}
+void MCELFStreamer::ChangeSection(const MCSection *Section) {
+ const MCSymbol *Grp = static_cast<const MCSectionELF *>(Section)->getGroup();
+ if (Grp)
+ getAssembler().getOrCreateSymbolData(*Grp);
+ this->MCObjectStreamer::ChangeSection(Section);
+}
+
+void MCELFStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
+ getAssembler().getOrCreateSymbolData(*Symbol);
+ MCSymbolData &AliasSD = getAssembler().getOrCreateSymbolData(*Alias);
+ AliasSD.setFlags(AliasSD.getFlags() | ELF_Other_Weakref);
+ const MCExpr *Value = MCSymbolRefExpr::Create(Symbol, getContext());
+ Alias->setVariableValue(Value);
+}
+
void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
MCSymbolAttr Attribute) {
// Indirect symbols are handled differently, to match how 'as' handles
@@ -176,6 +266,7 @@ void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_LazyReference:
case MCSA_Reference:
case MCSA_NoDeadStrip:
+ case MCSA_SymbolResolver:
case MCSA_PrivateExtern:
case MCSA_WeakDefinition:
case MCSA_WeakDefAutoPrivate:
@@ -185,50 +276,59 @@ void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
assert(0 && "Invalid symbol attribute for ELF!");
break;
+ case MCSA_ELF_TypeGnuUniqueObject:
+ // Ignore for now.
+ break;
+
case MCSA_Global:
- SD.setFlags(SD.getFlags() | ELF_STB_Global);
+ SetBinding(SD, ELF::STB_GLOBAL);
SD.setExternal(true);
+ BindingExplicitlySet.insert(Symbol);
break;
case MCSA_WeakReference:
case MCSA_Weak:
- SD.setFlags(SD.getFlags() | ELF_STB_Weak);
+ SetBinding(SD, ELF::STB_WEAK);
+ SD.setExternal(true);
+ BindingExplicitlySet.insert(Symbol);
break;
case MCSA_Local:
- SD.setFlags(SD.getFlags() | ELF_STB_Local);
+ SetBinding(SD, ELF::STB_LOCAL);
+ SD.setExternal(false);
+ BindingExplicitlySet.insert(Symbol);
break;
case MCSA_ELF_TypeFunction:
- SD.setFlags(SD.getFlags() | ELF_STT_Func);
+ SetType(SD, ELF::STT_FUNC);
break;
case MCSA_ELF_TypeObject:
- SD.setFlags(SD.getFlags() | ELF_STT_Object);
+ SetType(SD, ELF::STT_OBJECT);
break;
case MCSA_ELF_TypeTLS:
- SD.setFlags(SD.getFlags() | ELF_STT_Tls);
+ SetType(SD, ELF::STT_TLS);
break;
case MCSA_ELF_TypeCommon:
- SD.setFlags(SD.getFlags() | ELF_STT_Common);
+ SetType(SD, ELF::STT_COMMON);
break;
case MCSA_ELF_TypeNoType:
- SD.setFlags(SD.getFlags() | ELF_STT_Notype);
+ SetType(SD, ELF::STT_NOTYPE);
break;
case MCSA_Protected:
- SD.setFlags(SD.getFlags() | ELF_STV_Protected);
+ SetVisibility(SD, ELF::STV_PROTECTED);
break;
case MCSA_Hidden:
- SD.setFlags(SD.getFlags() | ELF_STV_Hidden);
+ SetVisibility(SD, ELF::STV_HIDDEN);
break;
case MCSA_Internal:
- SD.setFlags(SD.getFlags() | ELF_STV_Internal);
+ SetVisibility(SD, ELF::STV_INTERNAL);
break;
}
}
@@ -237,24 +337,38 @@ void MCELFStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) {
MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
- if ((SD.getFlags() & (0xf << ELF_STB_Shift)) == ELF_STB_Local) {
+ if (!BindingExplicitlySet.count(Symbol)) {
+ SetBinding(SD, ELF::STB_GLOBAL);
+ SD.setExternal(true);
+ }
+
+ SetType(SD, ELF::STT_OBJECT);
+
+ if (GetBinding(SD) == ELF_STB_Local) {
const MCSection *Section = getAssembler().getContext().getELFSection(".bss",
- MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_WRITE |
- MCSectionELF::SHF_ALLOC,
+ ELF::SHT_NOBITS,
+ ELF::SHF_WRITE |
+ ELF::SHF_ALLOC,
SectionKind::getBSS());
-
- MCSectionData &SectData = getAssembler().getOrCreateSectionData(*Section);
- MCFragment *F = new MCFillFragment(0, 0, Size, &SectData);
- SD.setFragment(F);
Symbol->setSection(*Section);
- SD.setSize(MCConstantExpr::Create(Size, getContext()));
+
+ struct LocalCommon L = {&SD, Size, ByteAlignment};
+ LocalCommons.push_back(L);
+ } else {
+ SD.setCommon(Size, ByteAlignment);
}
- SD.setFlags(SD.getFlags() | ELF_STB_Global);
- SD.setExternal(true);
+ SD.setSize(MCConstantExpr::Create(Size, getContext()));
+}
- SD.setCommon(Size, ByteAlignment);
+void MCELFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) {
+ // FIXME: Should this be caught and done earlier?
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+ SetBinding(SD, ELF::STB_LOCAL);
+ SD.setExternal(false);
+ BindingExplicitlySet.insert(Symbol);
+ // FIXME: ByteAlignment is not needed here, but is required.
+ EmitCommonSymbol(Symbol, Size, 1);
}
void MCELFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
@@ -263,25 +377,6 @@ void MCELFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
}
-void MCELFStreamer::EmitValue(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- MCDataFragment *DF = getOrCreateDataFragment();
-
- // Avoid fixups when possible.
- int64_t AbsValue;
- if (AddValueSymbols(Value)->EvaluateAsAbsolute(AbsValue)) {
- // FIXME: Endianness assumption.
- for (unsigned i = 0; i != Size; ++i)
- DF->getContents().push_back(uint8_t(AbsValue >> (i * 8)));
- } else {
- DF->addFixup(MCFixup::Create(DF->getContents().size(), AddValueSymbols(Value),
- MCFixup::getKindForSize(Size)));
- DF->getContents().resize(DF->getContents().size() + Size, 0);
- }
-}
-
void MCELFStreamer::EmitValueToAlignment(unsigned ByteAlignment,
int64_t Value, unsigned ValueSize,
unsigned MaxBytesToEmit) {
@@ -312,18 +407,11 @@ void MCELFStreamer::EmitCodeAlignment(unsigned ByteAlignment,
getCurrentSectionData()->setAlignment(ByteAlignment);
}
-void MCELFStreamer::EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value) {
- // TODO: This is exactly the same as MCMachOStreamer. Consider merging into
- // MCObjectStreamer.
- new MCOrgFragment(*Offset, Value, getCurrentSectionData());
-}
-
// Add a symbol for the file name of this module. This is the second
// entry in the module's symbol table (the first being the null symbol).
void MCELFStreamer::EmitFileDirective(StringRef Filename) {
MCSymbol *Symbol = getAssembler().getContext().GetOrCreateSymbol(Filename);
- Symbol->setSection(*CurSection);
+ Symbol->setSection(*getCurrentSection());
Symbol->setAbsolute();
MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
@@ -331,21 +419,52 @@ void MCELFStreamer::EmitFileDirective(StringRef Filename) {
SD.setFlags(ELF_STT_File | ELF_STB_Local | ELF_STV_Default);
}
-void MCELFStreamer::EmitInstToFragment(const MCInst &Inst) {
- MCInstFragment *IF = new MCInstFragment(Inst, getCurrentSectionData());
+void MCELFStreamer::fixSymbolsInTLSFixups(const MCExpr *expr) {
+ switch (expr->getKind()) {
+ case MCExpr::Target: llvm_unreachable("Can't handle target exprs yet!");
+ case MCExpr::Constant:
+ break;
- // Add the fixups and data.
- //
- // FIXME: Revisit this design decision when relaxation is done, we may be
- // able to get away with not storing any extra data in the MCInst.
- SmallVector<MCFixup, 4> Fixups;
- SmallString<256> Code;
- raw_svector_ostream VecOS(Code);
- getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
- VecOS.flush();
+ case MCExpr::Binary: {
+ const MCBinaryExpr *be = cast<MCBinaryExpr>(expr);
+ fixSymbolsInTLSFixups(be->getLHS());
+ fixSymbolsInTLSFixups(be->getRHS());
+ break;
+ }
+
+ case MCExpr::SymbolRef: {
+ const MCSymbolRefExpr &symRef = *cast<MCSymbolRefExpr>(expr);
+ switch (symRef.getKind()) {
+ default:
+ return;
+ case MCSymbolRefExpr::VK_NTPOFF:
+ case MCSymbolRefExpr::VK_GOTNTPOFF:
+ case MCSymbolRefExpr::VK_TLSGD:
+ case MCSymbolRefExpr::VK_TLSLDM:
+ case MCSymbolRefExpr::VK_TPOFF:
+ case MCSymbolRefExpr::VK_DTPOFF:
+ case MCSymbolRefExpr::VK_GOTTPOFF:
+ case MCSymbolRefExpr::VK_TLSLD:
+ case MCSymbolRefExpr::VK_ARM_TLSGD:
+ break;
+ }
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(symRef.getSymbol());
+ SetType(SD, ELF::STT_TLS);
+ break;
+ }
+
+ case MCExpr::Unary:
+ fixSymbolsInTLSFixups(cast<MCUnaryExpr>(expr)->getSubExpr());
+ break;
+ }
+}
+
+void MCELFStreamer::EmitInstToFragment(const MCInst &Inst) {
+ this->MCObjectStreamer::EmitInstToFragment(Inst);
+ MCInstFragment &F = *cast<MCInstFragment>(getCurrentFragment());
- IF->getCode() = Code;
- IF->getFixups() = Fixups;
+ for (unsigned i = 0, e = F.getFixups().size(); i != e; ++i)
+ fixSymbolsInTLSFixups(F.getFixups()[i].getValue());
}
void MCELFStreamer::EmitInstToData(const MCInst &Inst) {
@@ -357,6 +476,9 @@ void MCELFStreamer::EmitInstToData(const MCInst &Inst) {
getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
VecOS.flush();
+ for (unsigned i = 0, e = Fixups.size(); i != e; ++i)
+ fixSymbolsInTLSFixups(Fixups[i].getValue());
+
// Add the fixups and data.
for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
Fixups[i].setOffset(Fixups[i].getOffset() + DF->getContents().size());
@@ -365,44 +487,40 @@ void MCELFStreamer::EmitInstToData(const MCInst &Inst) {
DF->getContents().append(Code.begin(), Code.end());
}
-void MCELFStreamer::EmitInstruction(const MCInst &Inst) {
- // Scan for values.
- for (unsigned i = 0; i != Inst.getNumOperands(); ++i)
- if (Inst.getOperand(i).isExpr())
- AddValueSymbols(Inst.getOperand(i).getExpr());
+void MCELFStreamer::Finish() {
+ if (getNumFrameInfos())
+ MCDwarfFrameEmitter::Emit(*this);
- getCurrentSectionData()->setHasInstructions(true);
+ for (std::vector<LocalCommon>::const_iterator i = LocalCommons.begin(),
+ e = LocalCommons.end();
+ i != e; ++i) {
+ MCSymbolData *SD = i->SD;
+ uint64_t Size = i->Size;
+ unsigned ByteAlignment = i->ByteAlignment;
+ const MCSymbol &Symbol = SD->getSymbol();
+ const MCSection &Section = Symbol.getSection();
- // If this instruction doesn't need relaxation, just emit it as data.
- if (!getAssembler().getBackend().MayNeedRelaxation(Inst)) {
- EmitInstToData(Inst);
- return;
- }
+ MCSectionData &SectData = getAssembler().getOrCreateSectionData(Section);
+ new MCAlignFragment(ByteAlignment, 0, 1, ByteAlignment, &SectData);
- // Otherwise, if we are relaxing everything, relax the instruction as much as
- // possible and emit it as data.
- if (getAssembler().getRelaxAll()) {
- MCInst Relaxed;
- getAssembler().getBackend().RelaxInstruction(Inst, Relaxed);
- while (getAssembler().getBackend().MayNeedRelaxation(Relaxed))
- getAssembler().getBackend().RelaxInstruction(Relaxed, Relaxed);
- EmitInstToData(Relaxed);
- return;
- }
+ MCFragment *F = new MCFillFragment(0, 0, Size, &SectData);
+ SD->setFragment(F);
- // Otherwise emit to a separate fragment.
- EmitInstToFragment(Inst);
-}
+ // Update the maximum alignment of the section if necessary.
+ if (ByteAlignment > SectData.getAlignment())
+ SectData.setAlignment(ByteAlignment);
+ }
-void MCELFStreamer::Finish() {
- getAssembler().Finish();
+ this->MCObjectStreamer::Finish();
}
MCStreamer *llvm::createELFStreamer(MCContext &Context, TargetAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *CE,
- bool RelaxAll) {
+ raw_ostream &OS, MCCodeEmitter *CE,
+ bool RelaxAll, bool NoExecStack) {
MCELFStreamer *S = new MCELFStreamer(Context, TAB, OS, CE);
if (RelaxAll)
S->getAssembler().setRelaxAll(true);
+ if (NoExecStack)
+ S->getAssembler().setNoExecStack(true);
return S;
}
diff --git a/contrib/llvm/lib/MC/MCExpr.cpp b/contrib/llvm/lib/MC/MCExpr.cpp
index 343f334..54d3743 100644
--- a/contrib/llvm/lib/MC/MCExpr.cpp
+++ b/contrib/llvm/lib/MC/MCExpr.cpp
@@ -38,21 +38,31 @@ void MCExpr::print(raw_ostream &OS) const {
case MCExpr::SymbolRef: {
const MCSymbolRefExpr &SRE = cast<MCSymbolRefExpr>(*this);
const MCSymbol &Sym = SRE.getSymbol();
+ // Parenthesize names that start with $ so that they don't look like
+ // absolute names.
+ bool UseParens = Sym.getName()[0] == '$';
- if (SRE.getKind() == MCSymbolRefExpr::VK_ARM_HI16 ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_LO16)
+ if (SRE.getKind() == MCSymbolRefExpr::VK_PPC_HA16 ||
+ SRE.getKind() == MCSymbolRefExpr::VK_PPC_LO16) {
OS << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
+ UseParens = true;
+ }
- // Parenthesize names that start with $ so that they don't look like
- // absolute names.
- if (Sym.getName()[0] == '$')
+ if (UseParens)
OS << '(' << Sym << ')';
else
OS << Sym;
- if (SRE.getKind() != MCSymbolRefExpr::VK_None &&
- SRE.getKind() != MCSymbolRefExpr::VK_ARM_HI16 &&
- SRE.getKind() != MCSymbolRefExpr::VK_ARM_LO16)
+ if (SRE.getKind() == MCSymbolRefExpr::VK_ARM_PLT ||
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_TLSGD ||
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOT ||
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTOFF ||
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_TPOFF ||
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTTPOFF)
+ OS << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
+ else if (SRE.getKind() != MCSymbolRefExpr::VK_None &&
+ SRE.getKind() != MCSymbolRefExpr::VK_PPC_HA16 &&
+ SRE.getKind() != MCSymbolRefExpr::VK_PPC_LO16)
OS << '@' << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
return;
@@ -172,12 +182,23 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
case VK_GOTTPOFF: return "GOTTPOFF";
case VK_INDNTPOFF: return "INDNTPOFF";
case VK_NTPOFF: return "NTPOFF";
+ case VK_GOTNTPOFF: return "GOTNTPOFF";
case VK_PLT: return "PLT";
case VK_TLSGD: return "TLSGD";
+ case VK_TLSLD: return "TLSLD";
+ case VK_TLSLDM: return "TLSLDM";
case VK_TPOFF: return "TPOFF";
- case VK_ARM_HI16: return ":upper16:";
- case VK_ARM_LO16: return ":lower16:";
+ case VK_DTPOFF: return "DTPOFF";
case VK_TLVP: return "TLVP";
+ case VK_ARM_PLT: return "(PLT)";
+ case VK_ARM_GOT: return "(GOT)";
+ case VK_ARM_GOTOFF: return "(GOTOFF)";
+ case VK_ARM_TPOFF: return "(tpoff)";
+ case VK_ARM_GOTTPOFF: return "(gottpoff)";
+ case VK_ARM_TLSGD: return "(tlsgd)";
+ case VK_PPC_TOC: return "toc";
+ case VK_PPC_HA16: return "ha16";
+ case VK_PPC_LO16: return "lo16";
}
}
@@ -185,15 +206,33 @@ MCSymbolRefExpr::VariantKind
MCSymbolRefExpr::getVariantKindForName(StringRef Name) {
return StringSwitch<VariantKind>(Name)
.Case("GOT", VK_GOT)
+ .Case("got", VK_GOT)
.Case("GOTOFF", VK_GOTOFF)
+ .Case("gotoff", VK_GOTOFF)
.Case("GOTPCREL", VK_GOTPCREL)
+ .Case("gotpcrel", VK_GOTPCREL)
.Case("GOTTPOFF", VK_GOTTPOFF)
+ .Case("gottpoff", VK_GOTTPOFF)
.Case("INDNTPOFF", VK_INDNTPOFF)
+ .Case("indntpoff", VK_INDNTPOFF)
.Case("NTPOFF", VK_NTPOFF)
+ .Case("ntpoff", VK_NTPOFF)
+ .Case("GOTNTPOFF", VK_GOTNTPOFF)
+ .Case("gotntpoff", VK_GOTNTPOFF)
.Case("PLT", VK_PLT)
+ .Case("plt", VK_PLT)
.Case("TLSGD", VK_TLSGD)
+ .Case("tlsgd", VK_TLSGD)
+ .Case("TLSLD", VK_TLSLD)
+ .Case("tlsld", VK_TLSLD)
+ .Case("TLSLDM", VK_TLSLDM)
+ .Case("tlsldm", VK_TLSLDM)
.Case("TPOFF", VK_TPOFF)
+ .Case("tpoff", VK_TPOFF)
+ .Case("DTPOFF", VK_DTPOFF)
+ .Case("dtpoff", VK_DTPOFF)
.Case("TLVP", VK_TLVP)
+ .Case("tlvp", VK_TLVP)
.Default(VK_Invalid);
}
@@ -203,7 +242,28 @@ void MCTargetExpr::Anchor() {}
/* *** */
-bool MCExpr::EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout *Layout) const {
+bool MCExpr::EvaluateAsAbsolute(int64_t &Res) const {
+ return EvaluateAsAbsolute(Res, 0, 0, 0);
+}
+
+bool MCExpr::EvaluateAsAbsolute(int64_t &Res,
+ const MCAsmLayout &Layout) const {
+ return EvaluateAsAbsolute(Res, &Layout.getAssembler(), &Layout, 0);
+}
+
+bool MCExpr::EvaluateAsAbsolute(int64_t &Res,
+ const MCAsmLayout &Layout,
+ const SectionAddrMap &Addrs) const {
+ return EvaluateAsAbsolute(Res, &Layout.getAssembler(), &Layout, &Addrs);
+}
+
+bool MCExpr::EvaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const {
+ return EvaluateAsAbsolute(Res, &Asm, 0, 0);
+}
+
+bool MCExpr::EvaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
+ const MCAsmLayout *Layout,
+ const SectionAddrMap *Addrs) const {
MCValue Value;
// Fast path constants.
@@ -212,37 +272,159 @@ bool MCExpr::EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout *Layout) const {
return true;
}
- if (!EvaluateAsRelocatable(Value, Layout) || !Value.isAbsolute())
- return false;
+ // FIXME: The use if InSet = Addrs is a hack. Setting InSet causes us
+ // absolutize differences across sections and that is what the MachO writer
+ // uses Addrs for.
+ bool IsRelocatable =
+ EvaluateAsRelocatableImpl(Value, Asm, Layout, Addrs, /*InSet*/ Addrs);
+ // Record the current value.
Res = Value.getConstant();
- return true;
+
+ return IsRelocatable && Value.isAbsolute();
+}
+
+/// \brief Helper method for \see EvaluateSymbolAdd().
+static void AttemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
+ const MCAsmLayout *Layout,
+ const SectionAddrMap *Addrs,
+ bool InSet,
+ const MCSymbolRefExpr *&A,
+ const MCSymbolRefExpr *&B,
+ int64_t &Addend) {
+ if (!A || !B)
+ return;
+
+ const MCSymbol &SA = A->getSymbol();
+ const MCSymbol &SB = B->getSymbol();
+
+ if (SA.isUndefined() || SB.isUndefined())
+ return;
+
+ if (!Asm->getWriter().IsSymbolRefDifferenceFullyResolved(*Asm, A, B, InSet))
+ return;
+
+ MCSymbolData &AD = Asm->getSymbolData(SA);
+ MCSymbolData &BD = Asm->getSymbolData(SB);
+
+ if (AD.getFragment() == BD.getFragment()) {
+ Addend += (AD.getOffset() - BD.getOffset());
+
+ // Clear the symbol expr pointers to indicate we have folded these
+ // operands.
+ A = B = 0;
+ return;
+ }
+
+ if (!Layout)
+ return;
+
+ const MCSectionData &SecA = *AD.getFragment()->getParent();
+ const MCSectionData &SecB = *BD.getFragment()->getParent();
+
+ if ((&SecA != &SecB) && !Addrs)
+ return;
+
+ // Eagerly evaluate.
+ Addend += (Layout->getSymbolOffset(&Asm->getSymbolData(A->getSymbol())) -
+ Layout->getSymbolOffset(&Asm->getSymbolData(B->getSymbol())));
+ if (Addrs && (&SecA != &SecB))
+ Addend += (Addrs->lookup(&SecA) - Addrs->lookup(&SecB));
+
+ // Clear the symbol expr pointers to indicate we have folded these
+ // operands.
+ A = B = 0;
}
-static bool EvaluateSymbolicAdd(const MCValue &LHS,const MCSymbolRefExpr *RHS_A,
+/// \brief Evaluate the result of an add between (conceptually) two MCValues.
+///
+/// This routine conceptually attempts to construct an MCValue:
+/// Result = (Result_A - Result_B + Result_Cst)
+/// from two MCValue's LHS and RHS where
+/// Result = LHS + RHS
+/// and
+/// Result = (LHS_A - LHS_B + LHS_Cst) + (RHS_A - RHS_B + RHS_Cst).
+///
+/// This routine attempts to aggresively fold the operands such that the result
+/// is representable in an MCValue, but may not always succeed.
+///
+/// \returns True on success, false if the result is not representable in an
+/// MCValue.
+
+/// NOTE: It is really important to have both the Asm and Layout arguments.
+/// They might look redundant, but this function can be used before layout
+/// is done (see the object streamer for example) and having the Asm argument
+/// lets us avoid relaxations early.
+static bool EvaluateSymbolicAdd(const MCAssembler *Asm,
+ const MCAsmLayout *Layout,
+ const SectionAddrMap *Addrs,
+ bool InSet,
+ const MCValue &LHS,const MCSymbolRefExpr *RHS_A,
const MCSymbolRefExpr *RHS_B, int64_t RHS_Cst,
MCValue &Res) {
- // We can't add or subtract two symbols.
- if ((LHS.getSymA() && RHS_A) ||
- (LHS.getSymB() && RHS_B))
+ // FIXME: This routine (and other evaluation parts) are *incredibly* sloppy
+ // about dealing with modifiers. This will ultimately bite us, one day.
+ const MCSymbolRefExpr *LHS_A = LHS.getSymA();
+ const MCSymbolRefExpr *LHS_B = LHS.getSymB();
+ int64_t LHS_Cst = LHS.getConstant();
+
+ // Fold the result constant immediately.
+ int64_t Result_Cst = LHS_Cst + RHS_Cst;
+
+ assert((!Layout || Asm) &&
+ "Must have an assembler object if layout is given!");
+
+ // If we have a layout, we can fold resolved differences.
+ if (Asm) {
+ // First, fold out any differences which are fully resolved. By
+ // reassociating terms in
+ // Result = (LHS_A - LHS_B + LHS_Cst) + (RHS_A - RHS_B + RHS_Cst).
+ // we have the four possible differences:
+ // (LHS_A - LHS_B),
+ // (LHS_A - RHS_B),
+ // (RHS_A - LHS_B),
+ // (RHS_A - RHS_B).
+ // Since we are attempting to be as aggresive as possible about folding, we
+ // attempt to evaluate each possible alternative.
+ AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, LHS_A, LHS_B,
+ Result_Cst);
+ AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, LHS_A, RHS_B,
+ Result_Cst);
+ AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, RHS_A, LHS_B,
+ Result_Cst);
+ AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, RHS_A, RHS_B,
+ Result_Cst);
+ }
+
+ // We can't represent the addition or subtraction of two symbols.
+ if ((LHS_A && RHS_A) || (LHS_B && RHS_B))
return false;
- const MCSymbolRefExpr *A = LHS.getSymA() ? LHS.getSymA() : RHS_A;
- const MCSymbolRefExpr *B = LHS.getSymB() ? LHS.getSymB() : RHS_B;
- if (B) {
- // If we have a negated symbol, then we must have also have a non-negated
- // symbol in order to encode the expression. We can do this check later to
- // permit expressions which eventually fold to a representable form -- such
- // as (a + (0 - b)) -- if necessary.
- if (!A)
- return false;
- }
- Res = MCValue::get(A, B, LHS.getConstant() + RHS_Cst);
+ // At this point, we have at most one additive symbol and one subtractive
+ // symbol -- find them.
+ const MCSymbolRefExpr *A = LHS_A ? LHS_A : RHS_A;
+ const MCSymbolRefExpr *B = LHS_B ? LHS_B : RHS_B;
+
+ // If we have a negated symbol, then we must have also have a non-negated
+ // symbol in order to encode the expression.
+ if (B && !A)
+ return false;
+
+ Res = MCValue::get(A, B, Result_Cst);
return true;
}
bool MCExpr::EvaluateAsRelocatable(MCValue &Res,
- const MCAsmLayout *Layout) const {
+ const MCAsmLayout &Layout) const {
+ return EvaluateAsRelocatableImpl(Res, &Layout.getAssembler(), &Layout,
+ 0, false);
+}
+
+bool MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAssembler *Asm,
+ const MCAsmLayout *Layout,
+ const SectionAddrMap *Addrs,
+ bool InSet) const {
++stats::MCExprEvaluate;
switch (getKind()) {
@@ -258,26 +440,15 @@ bool MCExpr::EvaluateAsRelocatable(MCValue &Res,
const MCSymbol &Sym = SRE->getSymbol();
// Evaluate recursively if this is a variable.
- if (Sym.isVariable()) {
- if (!Sym.getVariableValue()->EvaluateAsRelocatable(Res, Layout))
- return false;
-
- // Absolutize symbol differences between defined symbols when we have a
- // layout object and the target requests it.
- if (Layout && Res.getSymB() &&
- Layout->getAssembler().getBackend().hasAbsolutizedSet() &&
- Res.getSymA()->getSymbol().isDefined() &&
- Res.getSymB()->getSymbol().isDefined()) {
- MCSymbolData &A =
- Layout->getAssembler().getSymbolData(Res.getSymA()->getSymbol());
- MCSymbolData &B =
- Layout->getAssembler().getSymbolData(Res.getSymB()->getSymbol());
- Res = MCValue::get(+ Layout->getSymbolAddress(&A)
- - Layout->getSymbolAddress(&B)
- + Res.getConstant());
- }
-
- return true;
+ if (Sym.isVariable() && SRE->getKind() == MCSymbolRefExpr::VK_None) {
+ bool Ret = Sym.getVariableValue()->EvaluateAsRelocatableImpl(Res, Asm,
+ Layout,
+ Addrs,
+ true);
+ // If we failed to simplify this to a constant, let the target
+ // handle it.
+ if (Ret && !Res.getSymA() && !Res.getSymB())
+ return true;
}
Res = MCValue::get(SRE, 0, 0);
@@ -288,7 +459,8 @@ bool MCExpr::EvaluateAsRelocatable(MCValue &Res,
const MCUnaryExpr *AUE = cast<MCUnaryExpr>(this);
MCValue Value;
- if (!AUE->getSubExpr()->EvaluateAsRelocatable(Value, Layout))
+ if (!AUE->getSubExpr()->EvaluateAsRelocatableImpl(Value, Asm, Layout,
+ Addrs, InSet))
return false;
switch (AUE->getOpcode()) {
@@ -321,8 +493,10 @@ bool MCExpr::EvaluateAsRelocatable(MCValue &Res,
const MCBinaryExpr *ABE = cast<MCBinaryExpr>(this);
MCValue LHSValue, RHSValue;
- if (!ABE->getLHS()->EvaluateAsRelocatable(LHSValue, Layout) ||
- !ABE->getRHS()->EvaluateAsRelocatable(RHSValue, Layout))
+ if (!ABE->getLHS()->EvaluateAsRelocatableImpl(LHSValue, Asm, Layout,
+ Addrs, InSet) ||
+ !ABE->getRHS()->EvaluateAsRelocatableImpl(RHSValue, Asm, Layout,
+ Addrs, InSet))
return false;
// We only support a few operations on non-constant expressions, handle
@@ -333,13 +507,13 @@ bool MCExpr::EvaluateAsRelocatable(MCValue &Res,
return false;
case MCBinaryExpr::Sub:
// Negate RHS and add.
- return EvaluateSymbolicAdd(LHSValue,
+ return EvaluateSymbolicAdd(Asm, Layout, Addrs, InSet, LHSValue,
RHSValue.getSymB(), RHSValue.getSymA(),
-RHSValue.getConstant(),
Res);
case MCBinaryExpr::Add:
- return EvaluateSymbolicAdd(LHSValue,
+ return EvaluateSymbolicAdd(Asm, Layout, Addrs, InSet, LHSValue,
RHSValue.getSymA(), RHSValue.getSymB(),
RHSValue.getConstant(),
Res);
diff --git a/contrib/llvm/lib/MC/MCLoggingStreamer.cpp b/contrib/llvm/lib/MC/MCLoggingStreamer.cpp
index b96040a..012c7f6 100644
--- a/contrib/llvm/lib/MC/MCLoggingStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCLoggingStreamer.cpp
@@ -48,10 +48,14 @@ public:
return Child->AddBlankLine();
}
- virtual void SwitchSection(const MCSection *Section) {
- CurSection = Section;
- LogCall("SwitchSection");
- return Child->SwitchSection(Section);
+ virtual void ChangeSection(const MCSection *Section) {
+ LogCall("ChangeSection");
+ return Child->ChangeSection(Section);
+ }
+
+ virtual void InitSections() {
+ LogCall("InitSections");
+ return Child->InitSections();
}
virtual void EmitLabel(MCSymbol *Symbol) {
@@ -64,11 +68,28 @@ public:
return Child->EmitAssemblerFlag(Flag);
}
+ virtual void EmitThumbFunc(MCSymbol *Func) {
+ LogCall("EmitThumbFunc");
+ return Child->EmitThumbFunc(Func);
+ }
+
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
LogCall("EmitAssignment");
return Child->EmitAssignment(Symbol, Value);
}
+ virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
+ LogCall("EmitWeakReference");
+ return Child->EmitWeakReference(Alias, Symbol);
+ }
+
+ virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
+ const MCSymbol *LastLabel,
+ const MCSymbol *Label) {
+ LogCall("EmitDwarfAdvanceLineAddr");
+ return Child->EmitDwarfAdvanceLineAddr(LineDelta, LastLabel, Label);
+ }
+
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) {
LogCall("EmitSymbolAttribute");
return Child->EmitSymbolAttribute(Symbol, Attribute);
@@ -132,14 +153,22 @@ public:
return Child->EmitBytes(Data, AddrSpace);
}
- virtual void EmitValue(const MCExpr *Value, unsigned Size,unsigned AddrSpace){
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ bool isPCRel, unsigned AddrSpace){
LogCall("EmitValue");
- return Child->EmitValue(Value, Size, AddrSpace);
+ return Child->EmitValueImpl(Value, Size, isPCRel, AddrSpace);
+ }
+
+ virtual void EmitULEB128Value(const MCExpr *Value,
+ unsigned AddrSpace = 0) {
+ LogCall("EmitULEB128Value");
+ return Child->EmitULEB128Value(Value, AddrSpace);
}
- virtual void EmitIntValue(uint64_t Value, unsigned Size, unsigned AddrSpace) {
- LogCall("EmitIntValue");
- return Child->EmitIntValue(Value, Size, AddrSpace);
+ virtual void EmitSLEB128Value(const MCExpr *Value,
+ unsigned AddrSpace = 0) {
+ LogCall("EmitSLEB128Value");
+ return Child->EmitSLEB128Value(Value, AddrSpace);
}
virtual void EmitGPRel32Value(const MCExpr *Value) {
@@ -178,12 +207,23 @@ public:
return Child->EmitFileDirective(Filename);
}
- virtual void EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
+ virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
LogCall("EmitDwarfFileDirective",
"FileNo:" + Twine(FileNo) + " Filename:" + Filename);
return Child->EmitDwarfFileDirective(FileNo, Filename);
}
+ virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+ unsigned Column, unsigned Flags,
+ unsigned Isa, unsigned Discriminator) {
+ LogCall("EmitDwarfLocDirective",
+ "FileNo:" + Twine(FileNo) + " Line:" + Twine(Line) +
+ " Column:" + Twine(Column) + " Flags:" + Twine(Flags) +
+ " Isa:" + Twine(Isa) + " Discriminator:" + Twine(Discriminator));
+ return Child->EmitDwarfLocDirective(FileNo, Line, Column, Flags,
+ Isa, Discriminator);
+ }
+
virtual void EmitInstruction(const MCInst &Inst) {
LogCall("EmitInstruction");
return Child->EmitInstruction(Inst);
diff --git a/contrib/llvm/lib/MC/MCMachOStreamer.cpp b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
index 671874d..d1f9f5c 100644
--- a/contrib/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
@@ -20,9 +20,11 @@
#include "llvm/MC/MCMachOSymbolFlags.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCDwarf.h"
+#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/Target/TargetAsmInfo.h"
using namespace llvm;
@@ -30,13 +32,7 @@ namespace {
class MCMachOStreamer : public MCObjectStreamer {
private:
- void EmitInstToFragment(const MCInst &Inst);
- void EmitInstToData(const MCInst &Inst);
- // FIXME: These will likely moved to a better place.
- void MakeLineEntryForSection(const MCSection *Section);
- const MCExpr * MakeStartMinusEndExpr(MCSymbol *Start, MCSymbol *End,
- int IntVal);
- void EmitDwarfFileTable(void);
+ virtual void EmitInstToData(const MCInst &Inst);
public:
MCMachOStreamer(MCContext &Context, TargetAsmBackend &TAB,
@@ -46,8 +42,10 @@ public:
/// @name MCStreamer Interface
/// @{
+ virtual void InitSections();
virtual void EmitLabel(MCSymbol *Symbol);
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitThumbFunc(MCSymbol *Func);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
@@ -76,17 +74,11 @@ public:
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment = 0);
virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
- virtual void EmitValue(const MCExpr *Value, unsigned Size,unsigned AddrSpace);
- virtual void EmitGPRel32Value(const MCExpr *Value) {
- assert(0 && "macho doesn't support this directive");
- }
virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
unsigned ValueSize = 1,
unsigned MaxBytesToEmit = 0);
virtual void EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit = 0);
- virtual void EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value = 0);
virtual void EmitFileDirective(StringRef Filename) {
// FIXME: Just ignore the .file; it isn't important enough to fail the
@@ -94,14 +86,6 @@ public:
//report_fatal_error("unsupported directive: '.file'");
}
- virtual void EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
- // FIXME: Just ignore the .file; it isn't important enough to fail the
- // entire assembly.
-
- //report_fatal_error("unsupported directive: '.file'");
- }
-
- virtual void EmitInstruction(const MCInst &Inst);
virtual void Finish();
@@ -110,31 +94,26 @@ public:
} // end anonymous namespace.
-void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
- // TODO: This is almost exactly the same as WinCOFFStreamer. Consider merging
- // into MCObjectStreamer.
- assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
- assert(CurSection && "Cannot emit before setting section!");
+void MCMachOStreamer::InitSections() {
+ SwitchSection(getContext().getMachOSection("__TEXT", "__text",
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ 0, SectionKind::getText()));
- Symbol->setSection(*CurSection);
+}
- MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
+ assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
+ // isSymbolLinkerVisible uses the section.
+ Symbol->setSection(*getCurrentSection());
// We have to create a new fragment if this is an atom defining symbol,
// fragments cannot span atoms.
- if (getAssembler().isSymbolLinkerVisible(SD.getSymbol()))
+ if (getAssembler().isSymbolLinkerVisible(*Symbol))
new MCDataFragment(getCurrentSectionData());
- // FIXME: This is wasteful, we don't necessarily need to create a data
- // fragment. Instead, we should mark the symbol as pointing into the data
- // fragment if it exists, otherwise we should just queue the label and set its
- // fragment pointer when we emit the next fragment.
- MCDataFragment *F = getOrCreateDataFragment();
- assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
- SD.setFragment(F);
- SD.setOffset(F->getContents().size());
+ MCObjectStreamer::EmitLabel(Symbol);
+ MCSymbolData &SD = getAssembler().getSymbolData(*Symbol);
// This causes the reference type flag to be cleared. Darwin 'as' was "trying"
// to clear the weak reference and weak definition bits too, but the
// implementation was buggy. For now we just try to match 'as', for
@@ -146,13 +125,31 @@ void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
}
void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
+ // Let the target do whatever target specific stuff it needs to do.
+ getAssembler().getBackend().HandleAssemblerFlag(Flag);
+ // Do any generic stuff we need to do.
switch (Flag) {
+ case MCAF_SyntaxUnified: return; // no-op here.
+ case MCAF_Code16: return; // no-op here.
+ case MCAF_Code32: return; // no-op here.
case MCAF_SubsectionsViaSymbols:
getAssembler().setSubsectionsViaSymbols(true);
return;
+ default:
+ llvm_unreachable("invalid assembler flag!");
}
+}
+
+void MCMachOStreamer::EmitThumbFunc(MCSymbol *Symbol) {
+ // FIXME: Flag the function ISA as thumb with DW_AT_APPLE_isa.
- assert(0 && "invalid assembler flag!");
+ // Remember that the function is a thumb function. Fixup and relocation
+ // values will need adjusted.
+ getAssembler().setIsThumbFunc(Symbol);
+
+ // Mark the thumb bit on the symbol.
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+ SD.setFlags(SD.getFlags() | SF_ThumbFunc);
}
void MCMachOStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
@@ -196,6 +193,7 @@ void MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_ELF_TypeTLS:
case MCSA_ELF_TypeCommon:
case MCSA_ELF_TypeNoType:
+ case MCSA_ELF_TypeGnuUniqueObject:
case MCSA_IndirectSymbol:
case MCSA_Hidden:
case MCSA_Internal:
@@ -230,6 +228,10 @@ void MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
SD.setFlags(SD.getFlags() | SF_NoDeadStrip);
break;
+ case MCSA_SymbolResolver:
+ SD.setFlags(SD.getFlags() | SF_SymbolResolver);
+ break;
+
case MCSA_PrivateExtern:
SD.setExternal(true);
SD.setPrivateExtern(true);
@@ -313,26 +315,6 @@ void MCMachOStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
}
-void MCMachOStreamer::EmitValue(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace) {
- // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
- // MCObjectStreamer.
- MCDataFragment *DF = getOrCreateDataFragment();
-
- // Avoid fixups when possible.
- int64_t AbsValue;
- if (AddValueSymbols(Value)->EvaluateAsAbsolute(AbsValue)) {
- // FIXME: Endianness assumption.
- for (unsigned i = 0; i != Size; ++i)
- DF->getContents().push_back(uint8_t(AbsValue >> (i * 8)));
- } else {
- DF->addFixup(MCFixup::Create(DF->getContents().size(),
- AddValueSymbols(Value),
- MCFixup::getKindForSize(Size)));
- DF->getContents().resize(DF->getContents().size() + Size, 0);
- }
-}
-
void MCMachOStreamer::EmitValueToAlignment(unsigned ByteAlignment,
int64_t Value, unsigned ValueSize,
unsigned MaxBytesToEmit) {
@@ -363,28 +345,6 @@ void MCMachOStreamer::EmitCodeAlignment(unsigned ByteAlignment,
getCurrentSectionData()->setAlignment(ByteAlignment);
}
-void MCMachOStreamer::EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value) {
- new MCOrgFragment(*Offset, Value, getCurrentSectionData());
-}
-
-void MCMachOStreamer::EmitInstToFragment(const MCInst &Inst) {
- MCInstFragment *IF = new MCInstFragment(Inst, getCurrentSectionData());
-
- // Add the fixups and data.
- //
- // FIXME: Revisit this design decision when relaxation is done, we may be
- // able to get away with not storing any extra data in the MCInst.
- SmallVector<MCFixup, 4> Fixups;
- SmallString<256> Code;
- raw_svector_ostream VecOS(Code);
- getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
- VecOS.flush();
-
- IF->getCode() = Code;
- IF->getFixups() = Fixups;
-}
-
void MCMachOStreamer::EmitInstToData(const MCInst &Inst) {
MCDataFragment *DF = getOrCreateDataFragment();
@@ -402,240 +362,7 @@ void MCMachOStreamer::EmitInstToData(const MCInst &Inst) {
DF->getContents().append(Code.begin(), Code.end());
}
-void MCMachOStreamer::EmitInstruction(const MCInst &Inst) {
- // Scan for values.
- for (unsigned i = Inst.getNumOperands(); i--; )
- if (Inst.getOperand(i).isExpr())
- AddValueSymbols(Inst.getOperand(i).getExpr());
-
- getCurrentSectionData()->setHasInstructions(true);
-
- // Now that a machine instruction has been assembled into this section, make
- // a line entry for any .loc directive that has been seen.
- MakeLineEntryForSection(getCurrentSection());
-
- // If this instruction doesn't need relaxation, just emit it as data.
- if (!getAssembler().getBackend().MayNeedRelaxation(Inst)) {
- EmitInstToData(Inst);
- return;
- }
-
- // Otherwise, if we are relaxing everything, relax the instruction as much as
- // possible and emit it as data.
- if (getAssembler().getRelaxAll()) {
- MCInst Relaxed;
- getAssembler().getBackend().RelaxInstruction(Inst, Relaxed);
- while (getAssembler().getBackend().MayNeedRelaxation(Relaxed))
- getAssembler().getBackend().RelaxInstruction(Relaxed, Relaxed);
- EmitInstToData(Relaxed);
- return;
- }
-
- // Otherwise emit to a separate fragment.
- EmitInstToFragment(Inst);
-}
-
-//
-// This is called when an instruction is assembled into the specified section
-// and if there is information from the last .loc directive that has yet to have
-// a line entry made for it is made.
-//
-void MCMachOStreamer::MakeLineEntryForSection(const MCSection *Section) {
- if (!getContext().getDwarfLocSeen())
- return;
-
- // Create a symbol at in the current section for use in the line entry.
- MCSymbol *LineSym = getContext().CreateTempSymbol();
- // Set the value of the symbol to use for the MCLineEntry.
- EmitLabel(LineSym);
-
- // Get the current .loc info saved in the context.
- const MCDwarfLoc &DwarfLoc = getContext().getCurrentDwarfLoc();
-
- // Create a (local) line entry with the symbol and the current .loc info.
- MCLineEntry LineEntry(LineSym, DwarfLoc);
-
- // clear DwarfLocSeen saying the current .loc info is now used.
- getContext().clearDwarfLocSeen();
-
- // Get the MCLineSection for this section, if one does not exist for this
- // section create it.
- DenseMap<const MCSection *, MCLineSection *> &MCLineSections =
- getContext().getMCLineSections();
- MCLineSection *LineSection = MCLineSections[Section];
- if (!LineSection) {
- // Create a new MCLineSection. This will be deleted after the dwarf line
- // table is created using it by iterating through the MCLineSections
- // DenseMap.
- LineSection = new MCLineSection;
- // Save a pointer to the new LineSection into the MCLineSections DenseMap.
- MCLineSections[Section] = LineSection;
- }
-
- // Add the line entry to this section's entries.
- LineSection->addLineEntry(LineEntry);
-}
-
-//
-// This helper routine returns an expression of End - Start + IntVal for use
-// by EmitDwarfFileTable() below.
-//
-const MCExpr * MCMachOStreamer::MakeStartMinusEndExpr(MCSymbol *Start,
- MCSymbol *End,
- int IntVal) {
- MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
- const MCExpr *Res =
- MCSymbolRefExpr::Create(End, Variant, getContext());
- const MCExpr *RHS =
- MCSymbolRefExpr::Create(Start, Variant, getContext());
- const MCExpr *Res1 =
- MCBinaryExpr::Create(MCBinaryExpr::Sub, Res, RHS,getContext());
- const MCExpr *Res2 =
- MCConstantExpr::Create(IntVal, getContext());
- const MCExpr *Res3 =
- MCBinaryExpr::Create(MCBinaryExpr::Sub, Res1, Res2, getContext());
- return Res3;
-}
-
-//
-// This emits the Dwarf file (and eventually the line) table.
-//
-void MCMachOStreamer::EmitDwarfFileTable(void) {
- // For now make sure we don't put out the Dwarf file table if no .file
- // directives were seen.
- const std::vector<MCDwarfFile *> &MCDwarfFiles =
- getContext().getMCDwarfFiles();
- if (MCDwarfFiles.size() == 0)
- return;
-
- // This is the Mach-O section, for ELF it is the .debug_line section.
- SwitchSection(getContext().getMachOSection("__DWARF", "__debug_line",
- MCSectionMachO::S_ATTR_DEBUG,
- 0, SectionKind::getDataRelLocal()));
-
- // Create a symbol at the beginning of this section.
- MCSymbol *LineStartSym = getContext().CreateTempSymbol();
- // Set the value of the symbol, as we are at the start of the section.
- EmitLabel(LineStartSym);
-
- // Create a symbol for the end of the section (to be set when we get there).
- MCSymbol *LineEndSym = getContext().CreateTempSymbol();
-
- // The first 4 bytes is the total length of the information for this
- // compilation unit (not including these 4 bytes for the length).
- EmitValue(MakeStartMinusEndExpr(LineStartSym, LineEndSym, 4), 4, 0);
-
- // Next 2 bytes is the Version, which is Dwarf 2.
- EmitIntValue(2, 2);
-
- // Create a symbol for the end of the prologue (to be set when we get there).
- MCSymbol *ProEndSym = getContext().CreateTempSymbol(); // Lprologue_end
-
- // Length of the prologue, is the next 4 bytes. Which is the start of the
- // section to the end of the prologue. Not including the 4 bytes for the
- // total length, the 2 bytes for the version, and these 4 bytes for the
- // length of the prologue.
- EmitValue(MakeStartMinusEndExpr(LineStartSym, ProEndSym, (4 + 2 + 4)), 4, 0);
-
- // Parameters of the state machine, are next.
- // Define the architecture-dependent minimum instruction length (in
- // bytes). This value should be rather too small than too big. */
- // DWARF2_LINE_MIN_INSN_LENGTH
- EmitIntValue(1, 1);
- // Flag that indicates the initial value of the is_stmt_start flag.
- // DWARF2_LINE_DEFAULT_IS_STMT
- EmitIntValue(1, 1);
- // Minimum line offset in a special line info. opcode. This value
- // was chosen to give a reasonable range of values. */
- // DWARF2_LINE_BASE
- EmitIntValue(uint64_t(-5), 1);
- // Range of line offsets in a special line info. opcode.
- // DWARF2_LINE_RANGE
- EmitIntValue(14, 1);
- // First special line opcode - leave room for the standard opcodes.
- // DWARF2_LINE_OPCODE_BASE
- EmitIntValue(13, 1);
-
- // Standard opcode lengths
- EmitIntValue(0, 1); // length of DW_LNS_copy
- EmitIntValue(1, 1); // length of DW_LNS_advance_pc
- EmitIntValue(1, 1); // length of DW_LNS_advance_line
- EmitIntValue(1, 1); // length of DW_LNS_set_file
- EmitIntValue(1, 1); // length of DW_LNS_set_column
- EmitIntValue(0, 1); // length of DW_LNS_negate_stmt
- EmitIntValue(0, 1); // length of DW_LNS_set_basic_block
- EmitIntValue(0, 1); // length of DW_LNS_const_add_pc
- EmitIntValue(1, 1); // length of DW_LNS_fixed_advance_pc
- EmitIntValue(0, 1); // length of DW_LNS_set_prologue_end
- EmitIntValue(0, 1); // length of DW_LNS_set_epilogue_begin
- EmitIntValue(1, 1); // DW_LNS_set_isa
-
- // Put out the directory and file tables.
-
- // First the directory table.
- const std::vector<StringRef> &MCDwarfDirs =
- getContext().getMCDwarfDirs();
- for (unsigned i = 0; i < MCDwarfDirs.size(); i++) {
- EmitBytes(MCDwarfDirs[i], 0); // the DirectoryName
- EmitBytes(StringRef("\0", 1), 0); // the null termination of the string
- }
- EmitIntValue(0, 1); // Terminate the directory list
-
- // Second the file table.
- for (unsigned i = 1; i < MCDwarfFiles.size(); i++) {
- EmitBytes(MCDwarfFiles[i]->getName(), 0); // FileName
- EmitBytes(StringRef("\0", 1), 0); // the null termination of the string
- // FIXME the Directory number should be a .uleb128 not a .byte
- EmitIntValue(MCDwarfFiles[i]->getDirIndex(), 1);
- EmitIntValue(0, 1); // last modification timestamp (always 0)
- EmitIntValue(0, 1); // filesize (always 0)
- }
- EmitIntValue(0, 1); // Terminate the file list
-
- // This is the end of the prologue, so set the value of the symbol at the
- // end of the prologue (that was used in a previous expression).
- EmitLabel(ProEndSym);
-
- // TODO: This is the point where the line tables would be emitted.
-
- // Delete the MCLineSections that were created in
- // MCMachOStreamer::MakeLineEntryForSection() and used to emit the line
- // tables.
- DenseMap<const MCSection *, MCLineSection *> &MCLineSections =
- getContext().getMCLineSections();
- for (DenseMap<const MCSection *, MCLineSection *>::iterator it =
- MCLineSections.begin(), ie = MCLineSections.end(); it != ie; ++it) {
- delete it->second;
- }
-
- // If there are no line tables emited then we emit:
- // The following DW_LNE_set_address sequence to set the address to zero
- // TODO test for 32-bit or 64-bit output
- // This is the sequence for 32-bit code
- EmitIntValue(0, 1);
- EmitIntValue(5, 1);
- EmitIntValue(2, 1);
- EmitIntValue(0, 1);
- EmitIntValue(0, 1);
- EmitIntValue(0, 1);
- EmitIntValue(0, 1);
-
- // Lastly emit the DW_LNE_end_sequence which consists of 3 bytes '00 01 01'
- // (00 is the code for extended opcodes, followed by a ULEB128 length of the
- // extended opcode (01), and the DW_LNE_end_sequence (01).
- EmitIntValue(0, 1); // DW_LNS_extended_op
- EmitIntValue(1, 1); // ULEB128 length of the extended opcode
- EmitIntValue(1, 1); // DW_LNE_end_sequence
-
- // This is the end of the section, so set the value of the symbol at the end
- // of this section (that was used in a previous expression).
- EmitLabel(LineEndSym);
-}
-
void MCMachOStreamer::Finish() {
- // Dump out the dwarf file and directory tables (soon to include line table)
- EmitDwarfFileTable();
-
// We have to set the fragment atom associations so we can relax properly for
// Mach-O.
diff --git a/contrib/llvm/lib/MC/MCMachObjectTargetWriter.cpp b/contrib/llvm/lib/MC/MCMachObjectTargetWriter.cpp
new file mode 100644
index 0000000..146cebf
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCMachObjectTargetWriter.cpp
@@ -0,0 +1,22 @@
+//===-- MCMachObjectTargetWriter.cpp - Mach-O Target Writer Subclass ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCMachObjectWriter.h"
+
+using namespace llvm;
+
+MCMachObjectTargetWriter::MCMachObjectTargetWriter(
+ bool Is64Bit_, uint32_t CPUType_, uint32_t CPUSubtype_,
+ bool UseAggressiveSymbolFolding_)
+ : Is64Bit(Is64Bit_), CPUType(CPUType_), CPUSubtype(CPUSubtype_),
+ UseAggressiveSymbolFolding(UseAggressiveSymbolFolding_) {
+}
+
+MCMachObjectTargetWriter::~MCMachObjectTargetWriter() {
+}
diff --git a/contrib/llvm/lib/MC/MCNullStreamer.cpp b/contrib/llvm/lib/MC/MCNullStreamer.cpp
index f7a2f20..08ddf01 100644
--- a/contrib/llvm/lib/MC/MCNullStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCNullStreamer.cpp
@@ -25,20 +25,26 @@ namespace {
/// @name MCStreamer Interface
/// @{
- virtual void SwitchSection(const MCSection *Section) {
- PrevSection = CurSection;
- CurSection = Section;
+ virtual void InitSections() {
+ }
+
+ virtual void ChangeSection(const MCSection *Section) {
}
virtual void EmitLabel(MCSymbol *Symbol) {
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- assert(CurSection && "Cannot emit before setting section!");
- Symbol->setSection(*CurSection);
+ assert(getCurrentSection() && "Cannot emit before setting section!");
+ Symbol->setSection(*getCurrentSection());
}
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {}
+ virtual void EmitThumbFunc(MCSymbol *Func) {}
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {}
+ virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol){}
+ virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
+ const MCSymbol *LastLabel,
+ const MCSymbol *Label) {}
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute){}
@@ -60,8 +66,12 @@ namespace {
uint64_t Size, unsigned ByteAlignment) {}
virtual void EmitBytes(StringRef Data, unsigned AddrSpace) {}
- virtual void EmitValue(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace) {}
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ bool isPCRel, unsigned AddrSpace) {}
+ virtual void EmitULEB128Value(const MCExpr *Value,
+ unsigned AddrSpace = 0) {}
+ virtual void EmitSLEB128Value(const MCExpr *Value,
+ unsigned AddrSpace = 0) {}
virtual void EmitGPRel32Value(const MCExpr *Value) {}
virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
unsigned ValueSize = 1,
@@ -74,7 +84,12 @@ namespace {
unsigned char Value = 0) {}
virtual void EmitFileDirective(StringRef Filename) {}
- virtual void EmitDwarfFileDirective(unsigned FileNo,StringRef Filename) {}
+ virtual bool EmitDwarfFileDirective(unsigned FileNo,StringRef Filename) {
+ return false;
+ }
+ virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+ unsigned Column, unsigned Flags,
+ unsigned Isa, unsigned Discriminator) {}
virtual void EmitInstruction(const MCInst &Inst) {}
virtual void Finish() {}
diff --git a/contrib/llvm/lib/MC/MCObjectStreamer.cpp b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
index 2b2385e..0358266 100644
--- a/contrib/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
@@ -7,19 +7,26 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/Target/TargetAsmInfo.h"
using namespace llvm;
MCObjectStreamer::MCObjectStreamer(MCContext &Context, TargetAsmBackend &TAB,
- raw_ostream &_OS, MCCodeEmitter *_Emitter)
- : MCStreamer(Context), Assembler(new MCAssembler(Context, TAB,
- *_Emitter, _OS)),
+ raw_ostream &OS, MCCodeEmitter *Emitter_)
+ : MCStreamer(Context),
+ Assembler(new MCAssembler(Context, TAB,
+ *Emitter_, *TAB.createObjectWriter(OS),
+ OS)),
CurSectionData(0)
{
}
@@ -27,6 +34,7 @@ MCObjectStreamer::MCObjectStreamer(MCContext &Context, TargetAsmBackend &TAB,
MCObjectStreamer::~MCObjectStreamer() {
delete &Assembler->getBackend();
delete &Assembler->getEmitter();
+ delete &Assembler->getWriter();
delete Assembler;
}
@@ -48,7 +56,10 @@ MCDataFragment *MCObjectStreamer::getOrCreateDataFragment() const {
const MCExpr *MCObjectStreamer::AddValueSymbols(const MCExpr *Value) {
switch (Value->getKind()) {
- case MCExpr::Target: llvm_unreachable("Can't handle target exprs yet!");
+ case MCExpr::Target:
+ cast<MCTargetExpr>(Value)->AddValueSymbols(Assembler);
+ break;
+
case MCExpr::Constant:
break;
@@ -71,17 +82,173 @@ const MCExpr *MCObjectStreamer::AddValueSymbols(const MCExpr *Value) {
return Value;
}
-void MCObjectStreamer::SwitchSection(const MCSection *Section) {
- assert(Section && "Cannot switch to a null section!");
+void MCObjectStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
+ bool isPCRel, unsigned AddrSpace) {
+ assert(AddrSpace == 0 && "Address space must be 0!");
+ MCDataFragment *DF = getOrCreateDataFragment();
+
+ // Avoid fixups when possible.
+ int64_t AbsValue;
+ if (AddValueSymbols(Value)->EvaluateAsAbsolute(AbsValue, getAssembler())) {
+ EmitIntValue(AbsValue, Size, AddrSpace);
+ return;
+ }
+ DF->addFixup(MCFixup::Create(DF->getContents().size(),
+ Value,
+ MCFixup::getKindForSize(Size, isPCRel)));
+ DF->getContents().resize(DF->getContents().size() + Size, 0);
+}
+
+void MCObjectStreamer::EmitLabel(MCSymbol *Symbol) {
+ assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
+ assert(getCurrentSection() && "Cannot emit before setting section!");
+
+ Symbol->setSection(*getCurrentSection());
+
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+
+ // FIXME: This is wasteful, we don't necessarily need to create a data
+ // fragment. Instead, we should mark the symbol as pointing into the data
+ // fragment if it exists, otherwise we should just queue the label and set its
+ // fragment pointer when we emit the next fragment.
+ MCDataFragment *F = getOrCreateDataFragment();
+ assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
+ SD.setFragment(F);
+ SD.setOffset(F->getContents().size());
+}
+
+void MCObjectStreamer::EmitULEB128Value(const MCExpr *Value,
+ unsigned AddrSpace) {
+ int64_t IntValue;
+ if (Value->EvaluateAsAbsolute(IntValue, getAssembler())) {
+ EmitULEB128IntValue(IntValue, AddrSpace);
+ return;
+ }
+ new MCLEBFragment(*Value, false, getCurrentSectionData());
+}
+
+void MCObjectStreamer::EmitSLEB128Value(const MCExpr *Value,
+ unsigned AddrSpace) {
+ int64_t IntValue;
+ if (Value->EvaluateAsAbsolute(IntValue, getAssembler())) {
+ EmitSLEB128IntValue(IntValue, AddrSpace);
+ return;
+ }
+ new MCLEBFragment(*Value, true, getCurrentSectionData());
+}
+
+void MCObjectStreamer::EmitWeakReference(MCSymbol *Alias,
+ const MCSymbol *Symbol) {
+ report_fatal_error("This file format doesn't support weak aliases.");
+}
- // If already in this section, then this is a noop.
- if (Section == CurSection) return;
+void MCObjectStreamer::ChangeSection(const MCSection *Section) {
+ assert(Section && "Cannot switch to a null section!");
- PrevSection = CurSection;
- CurSection = Section;
CurSectionData = &getAssembler().getOrCreateSectionData(*Section);
}
+void MCObjectStreamer::EmitInstruction(const MCInst &Inst) {
+ // Scan for values.
+ for (unsigned i = Inst.getNumOperands(); i--; )
+ if (Inst.getOperand(i).isExpr())
+ AddValueSymbols(Inst.getOperand(i).getExpr());
+
+ getCurrentSectionData()->setHasInstructions(true);
+
+ // Now that a machine instruction has been assembled into this section, make
+ // a line entry for any .loc directive that has been seen.
+ MCLineEntry::Make(this, getCurrentSection());
+
+ // If this instruction doesn't need relaxation, just emit it as data.
+ if (!getAssembler().getBackend().MayNeedRelaxation(Inst)) {
+ EmitInstToData(Inst);
+ return;
+ }
+
+ // Otherwise, if we are relaxing everything, relax the instruction as much as
+ // possible and emit it as data.
+ if (getAssembler().getRelaxAll()) {
+ MCInst Relaxed;
+ getAssembler().getBackend().RelaxInstruction(Inst, Relaxed);
+ while (getAssembler().getBackend().MayNeedRelaxation(Relaxed))
+ getAssembler().getBackend().RelaxInstruction(Relaxed, Relaxed);
+ EmitInstToData(Relaxed);
+ return;
+ }
+
+ // Otherwise emit to a separate fragment.
+ EmitInstToFragment(Inst);
+}
+
+void MCObjectStreamer::EmitInstToFragment(const MCInst &Inst) {
+ MCInstFragment *IF = new MCInstFragment(Inst, getCurrentSectionData());
+
+ raw_svector_ostream VecOS(IF->getCode());
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, IF->getFixups());
+}
+
+static const MCExpr *BuildSymbolDiff(MCContext &Context,
+ const MCSymbol *A, const MCSymbol *B) {
+ MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
+ const MCExpr *ARef =
+ MCSymbolRefExpr::Create(A, Variant, Context);
+ const MCExpr *BRef =
+ MCSymbolRefExpr::Create(B, Variant, Context);
+ const MCExpr *AddrDelta =
+ MCBinaryExpr::Create(MCBinaryExpr::Sub, ARef, BRef, Context);
+ return AddrDelta;
+}
+
+static const MCExpr *ForceExpAbs(MCObjectStreamer *Streamer,
+ MCContext &Context, const MCExpr* Expr) {
+ if (Context.getAsmInfo().hasAggressiveSymbolFolding())
+ return Expr;
+
+ MCSymbol *ABS = Context.CreateTempSymbol();
+ Streamer->EmitAssignment(ABS, Expr);
+ return MCSymbolRefExpr::Create(ABS, Context);
+}
+
+void MCObjectStreamer::EmitDwarfAdvanceLineAddr(int64_t LineDelta,
+ const MCSymbol *LastLabel,
+ const MCSymbol *Label) {
+ if (!LastLabel) {
+ int PointerSize = getContext().getTargetAsmInfo().getPointerSize();
+ EmitDwarfSetLineAddr(LineDelta, Label, PointerSize);
+ return;
+ }
+ const MCExpr *AddrDelta = BuildSymbolDiff(getContext(), Label, LastLabel);
+ int64_t Res;
+ if (AddrDelta->EvaluateAsAbsolute(Res, getAssembler())) {
+ MCDwarfLineAddr::Emit(this, LineDelta, Res);
+ return;
+ }
+ AddrDelta = ForceExpAbs(this, getContext(), AddrDelta);
+ new MCDwarfLineAddrFragment(LineDelta, *AddrDelta, getCurrentSectionData());
+}
+
+void MCObjectStreamer::EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
+ const MCSymbol *Label) {
+ const MCExpr *AddrDelta = BuildSymbolDiff(getContext(), Label, LastLabel);
+ int64_t Res;
+ if (AddrDelta->EvaluateAsAbsolute(Res, getAssembler())) {
+ MCDwarfFrameEmitter::EmitAdvanceLoc(*this, Res);
+ return;
+ }
+ AddrDelta = ForceExpAbs(this, getContext(), AddrDelta);
+ new MCDwarfCallFrameFragment(*AddrDelta, getCurrentSectionData());
+}
+
+void MCObjectStreamer::EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value) {
+ new MCOrgFragment(*Offset, Value, getCurrentSectionData());
+}
+
void MCObjectStreamer::Finish() {
+ // Dump out the dwarf file & directory tables and line tables.
+ if (getContext().hasDwarfFiles())
+ MCDwarfFileTable::Emit(this);
+
getAssembler().Finish();
}
diff --git a/contrib/llvm/lib/MC/MCObjectWriter.cpp b/contrib/llvm/lib/MC/MCObjectWriter.cpp
index d117e82..efe9f68 100644
--- a/contrib/llvm/lib/MC/MCObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/MCObjectWriter.cpp
@@ -7,9 +7,74 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSymbol.h"
using namespace llvm;
MCObjectWriter::~MCObjectWriter() {
}
+
+/// Utility function to encode a SLEB128 value.
+void MCObjectWriter::EncodeSLEB128(int64_t Value, raw_ostream &OS) {
+ bool More;
+ do {
+ uint8_t Byte = Value & 0x7f;
+ // NOTE: this assumes that this signed shift is an arithmetic right shift.
+ Value >>= 7;
+ More = !((((Value == 0 ) && ((Byte & 0x40) == 0)) ||
+ ((Value == -1) && ((Byte & 0x40) != 0))));
+ if (More)
+ Byte |= 0x80; // Mark this byte that that more bytes will follow.
+ OS << char(Byte);
+ } while (More);
+}
+
+/// Utility function to encode a ULEB128 value.
+void MCObjectWriter::EncodeULEB128(uint64_t Value, raw_ostream &OS) {
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ if (Value != 0)
+ Byte |= 0x80; // Mark this byte that that more bytes will follow.
+ OS << char(Byte);
+ } while (Value != 0);
+}
+
+bool
+MCObjectWriter::IsSymbolRefDifferenceFullyResolved(const MCAssembler &Asm,
+ const MCSymbolRefExpr *A,
+ const MCSymbolRefExpr *B,
+ bool InSet) const {
+ // Modified symbol references cannot be resolved.
+ if (A->getKind() != MCSymbolRefExpr::VK_None ||
+ B->getKind() != MCSymbolRefExpr::VK_None)
+ return false;
+
+ const MCSymbol &SA = A->getSymbol();
+ const MCSymbol &SB = B->getSymbol();
+ if (SA.AliasedSymbol().isUndefined() || SB.AliasedSymbol().isUndefined())
+ return false;
+
+ const MCSymbolData &DataA = Asm.getSymbolData(SA);
+ const MCSymbolData &DataB = Asm.getSymbolData(SB);
+
+ return IsSymbolRefDifferenceFullyResolvedImpl(Asm, DataA,
+ *DataB.getFragment(),
+ InSet,
+ false);
+}
+
+bool
+MCObjectWriter::IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+ const MCSymbolData &DataA,
+ const MCFragment &FB,
+ bool InSet,
+ bool IsPCRel) const {
+ const MCSection &SecA = DataA.getSymbol().AliasedSymbol().getSection();
+ const MCSection &SecB = FB.getParent()->getSection();
+ // On ELF and COFF A - B is absolute if A and B are in the same section.
+ return &SecA == &SecB;
+}
diff --git a/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp b/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
index 086df08..89374d0 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
@@ -15,6 +15,7 @@
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/MC/MCAsmInfo.h"
+#include <cctype>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
@@ -30,12 +31,12 @@ AsmLexer::~AsmLexer() {
void AsmLexer::setBuffer(const MemoryBuffer *buf, const char *ptr) {
CurBuf = buf;
-
+
if (ptr)
CurPtr = ptr;
else
CurPtr = CurBuf->getBufferStart();
-
+
TokStart = 0;
}
@@ -43,7 +44,7 @@ void AsmLexer::setBuffer(const MemoryBuffer *buf, const char *ptr) {
/// location. This is defined to always return AsmToken::Error.
AsmToken AsmLexer::ReturnError(const char *Loc, const std::string &Msg) {
SetError(SMLoc::getFromPointer(Loc), Msg);
-
+
return AsmToken(AsmToken::Error, StringRef(Loc, 0));
}
@@ -57,23 +58,59 @@ int AsmLexer::getNextChar() {
// a random nul in the file. Disambiguate that here.
if (CurPtr-1 != CurBuf->getBufferEnd())
return 0; // Just whitespace.
-
+
// Otherwise, return end of file.
- --CurPtr; // Another call to lex will return EOF again.
+ --CurPtr; // Another call to lex will return EOF again.
return EOF;
}
}
+/// LexFloatLiteral: [0-9]*[.][0-9]*([eE][+-]?[0-9]*)?
+///
+/// The leading integral digit sequence and dot should have already been
+/// consumed, some or all of the fractional digit sequence *can* have been
+/// consumed.
+AsmToken AsmLexer::LexFloatLiteral() {
+ // Skip the fractional digit sequence.
+ while (isdigit(*CurPtr))
+ ++CurPtr;
+
+ // Check for exponent; we intentionally accept a slighlty wider set of
+ // literals here and rely on the upstream client to reject invalid ones (e.g.,
+ // "1e+").
+ if (*CurPtr == 'e' || *CurPtr == 'E') {
+ ++CurPtr;
+ if (*CurPtr == '-' || *CurPtr == '+')
+ ++CurPtr;
+ while (isdigit(*CurPtr))
+ ++CurPtr;
+ }
+
+ return AsmToken(AsmToken::Real,
+ StringRef(TokStart, CurPtr - TokStart));
+}
+
/// LexIdentifier: [a-zA-Z_.][a-zA-Z0-9_$.@]*
+static bool IsIdentifierChar(char c) {
+ return isalnum(c) || c == '_' || c == '$' || c == '.' || c == '@';
+}
AsmToken AsmLexer::LexIdentifier() {
- while (isalnum(*CurPtr) || *CurPtr == '_' || *CurPtr == '$' ||
- *CurPtr == '.' || *CurPtr == '@')
+ // Check for floating point literals.
+ if (CurPtr[-1] == '.' && isdigit(*CurPtr)) {
+ // Disambiguate a .1243foo identifier from a floating literal.
+ while (isdigit(*CurPtr))
+ ++CurPtr;
+ if (*CurPtr == 'e' || *CurPtr == 'E' || !IsIdentifierChar(*CurPtr))
+ return LexFloatLiteral();
+ }
+
+ while (IsIdentifierChar(*CurPtr))
++CurPtr;
-
+
// Handle . as a special case.
if (CurPtr == TokStart+1 && TokStart[0] == '.')
return AsmToken(AsmToken::Dot, StringRef(TokStart, 1));
-
+
return AsmToken(AsmToken::Identifier, StringRef(TokStart, CurPtr - TokStart));
}
@@ -83,7 +120,7 @@ AsmToken AsmLexer::LexSlash() {
switch (*CurPtr) {
case '*': break; // C style comment.
case '/': return ++CurPtr, LexLineComment();
- default: return AsmToken(AsmToken::Slash, StringRef(CurPtr, 1));
+ default: return AsmToken(AsmToken::Slash, StringRef(CurPtr-1, 1));
}
// C Style comment.
@@ -96,7 +133,7 @@ AsmToken AsmLexer::LexSlash() {
case '*':
// End of the comment?
if (CurPtr[0] != '/') break;
-
+
++CurPtr; // End the */.
return LexToken();
}
@@ -111,7 +148,7 @@ AsmToken AsmLexer::LexLineComment() {
int CurChar = getNextChar();
while (CurChar != '\n' && CurChar != '\n' && CurChar != EOF)
CurChar = getNextChar();
-
+
if (CurChar == EOF)
return AsmToken(AsmToken::Eof, StringRef(CurPtr, 0));
return AsmToken(AsmToken::EndOfStatement, StringRef(CurPtr, 0));
@@ -124,7 +161,6 @@ static void SkipIgnoredIntegerSuffix(const char *&CurPtr) {
CurPtr += 3;
}
-
/// LexDigit: First character is [0-9].
/// Local Label: [0-9][:]
/// Forward/Backward Label: [0-9][fb]
@@ -132,32 +168,37 @@ static void SkipIgnoredIntegerSuffix(const char *&CurPtr) {
/// Octal integer: 0[0-7]+
/// Hex integer: 0x[0-9a-fA-F]+
/// Decimal integer: [1-9][0-9]*
-/// TODO: FP literal.
AsmToken AsmLexer::LexDigit() {
// Decimal integer: [1-9][0-9]*
- if (CurPtr[-1] != '0') {
+ if (CurPtr[-1] != '0' || CurPtr[0] == '.') {
while (isdigit(*CurPtr))
++CurPtr;
-
+
+ // Check for floating point literals.
+ if (*CurPtr == '.' || *CurPtr == 'e') {
+ ++CurPtr;
+ return LexFloatLiteral();
+ }
+
StringRef Result(TokStart, CurPtr - TokStart);
long long Value;
if (Result.getAsInteger(10, Value)) {
- // We have to handle minint_as_a_positive_value specially, because
- // - minint_as_a_positive_value = minint and it is valid.
- if (Result == "9223372036854775808")
- Value = -9223372036854775808ULL;
- else
- return ReturnError(TokStart, "Invalid decimal number");
+ // Allow positive values that are too large to fit into a signed 64-bit
+ // integer, but that do fit in an unsigned one, we just convert them over.
+ unsigned long long UValue;
+ if (Result.getAsInteger(10, UValue))
+ return ReturnError(TokStart, "invalid decimal number");
+ Value = (long long)UValue;
}
-
+
// The darwin/x86 (and x86-64) assembler accepts and ignores ULL and LL
// suffixes on integer literals.
SkipIgnoredIntegerSuffix(CurPtr);
-
+
return AsmToken(AsmToken::Integer, Result, Value);
}
-
+
if (*CurPtr == 'b') {
++CurPtr;
// See if we actually have "0b" as part of something like "jmp 0b\n"
@@ -169,30 +210,30 @@ AsmToken AsmLexer::LexDigit() {
const char *NumStart = CurPtr;
while (CurPtr[0] == '0' || CurPtr[0] == '1')
++CurPtr;
-
+
// Requires at least one binary digit.
if (CurPtr == NumStart)
return ReturnError(TokStart, "Invalid binary number");
-
+
StringRef Result(TokStart, CurPtr - TokStart);
-
+
long long Value;
if (Result.substr(2).getAsInteger(2, Value))
return ReturnError(TokStart, "Invalid binary number");
-
+
// The darwin/x86 (and x86-64) assembler accepts and ignores ULL and LL
// suffixes on integer literals.
SkipIgnoredIntegerSuffix(CurPtr);
-
+
return AsmToken(AsmToken::Integer, Result, Value);
}
-
+
if (*CurPtr == 'x') {
++CurPtr;
const char *NumStart = CurPtr;
while (isxdigit(CurPtr[0]))
++CurPtr;
-
+
// Requires at least one hex digit.
if (CurPtr == NumStart)
return ReturnError(CurPtr-2, "Invalid hexadecimal number");
@@ -200,31 +241,67 @@ AsmToken AsmLexer::LexDigit() {
unsigned long long Result;
if (StringRef(TokStart, CurPtr - TokStart).getAsInteger(0, Result))
return ReturnError(TokStart, "Invalid hexadecimal number");
-
+
// The darwin/x86 (and x86-64) assembler accepts and ignores ULL and LL
// suffixes on integer literals.
SkipIgnoredIntegerSuffix(CurPtr);
-
+
return AsmToken(AsmToken::Integer, StringRef(TokStart, CurPtr - TokStart),
(int64_t)Result);
}
-
+
// Must be an octal number, it starts with 0.
while (*CurPtr >= '0' && *CurPtr <= '7')
++CurPtr;
-
+
StringRef Result(TokStart, CurPtr - TokStart);
long long Value;
if (Result.getAsInteger(8, Value))
return ReturnError(TokStart, "Invalid octal number");
-
+
// The darwin/x86 (and x86-64) assembler accepts and ignores ULL and LL
// suffixes on integer literals.
SkipIgnoredIntegerSuffix(CurPtr);
-
+
return AsmToken(AsmToken::Integer, Result, Value);
}
+/// LexSingleQuote: Integer: 'b'
+AsmToken AsmLexer::LexSingleQuote() {
+ int CurChar = getNextChar();
+
+ if (CurChar == '\\')
+ CurChar = getNextChar();
+
+ if (CurChar == EOF)
+ return ReturnError(TokStart, "unterminated single quote");
+
+ CurChar = getNextChar();
+
+ if (CurChar != '\'')
+ return ReturnError(TokStart, "single quote way too long");
+
+ // The idea here being that 'c' is basically just an integral
+ // constant.
+ StringRef Res = StringRef(TokStart,CurPtr - TokStart);
+ long long Value;
+
+ if (Res.startswith("\'\\")) {
+ char theChar = Res[2];
+ switch (theChar) {
+ default: Value = theChar; break;
+ case '\'': Value = '\''; break;
+ case 't': Value = '\t'; break;
+ case 'n': Value = '\n'; break;
+ case 'b': Value = '\b'; break;
+ }
+ } else
+ Value = TokStart[1];
+
+ return AsmToken(AsmToken::Integer, Res, Value);
+}
+
+
/// LexQuote: String: "..."
AsmToken AsmLexer::LexQuote() {
int CurChar = getNextChar();
@@ -234,13 +311,13 @@ AsmToken AsmLexer::LexQuote() {
// Allow \", etc.
CurChar = getNextChar();
}
-
+
if (CurChar == EOF)
return ReturnError(TokStart, "unterminated string constant");
CurChar = getNextChar();
}
-
+
return AsmToken(AsmToken::String, StringRef(TokStart, CurPtr - TokStart));
}
@@ -266,7 +343,7 @@ AsmToken AsmLexer::LexToken() {
TokStart = CurPtr;
// This always consumes at least one character.
int CurChar = getNextChar();
-
+
if (isAtStartOfComment(CurChar))
return LexLineComment();
@@ -275,7 +352,7 @@ AsmToken AsmLexer::LexToken() {
// Handle identifier: [a-zA-Z_.][a-zA-Z0-9_$.@]*
if (isalpha(CurChar) || CurChar == '_' || CurChar == '.')
return LexIdentifier();
-
+
// Unknown character, emit an error.
return ReturnError(TokStart, "invalid character in input");
case EOF: return AsmToken(AsmToken::Eof, StringRef(TokStart, 0));
@@ -301,49 +378,50 @@ AsmToken AsmLexer::LexToken() {
case ',': return AsmToken(AsmToken::Comma, StringRef(TokStart, 1));
case '$': return AsmToken(AsmToken::Dollar, StringRef(TokStart, 1));
case '@': return AsmToken(AsmToken::At, StringRef(TokStart, 1));
- case '=':
+ case '=':
if (*CurPtr == '=')
return ++CurPtr, AsmToken(AsmToken::EqualEqual, StringRef(TokStart, 2));
return AsmToken(AsmToken::Equal, StringRef(TokStart, 1));
- case '|':
+ case '|':
if (*CurPtr == '|')
return ++CurPtr, AsmToken(AsmToken::PipePipe, StringRef(TokStart, 2));
return AsmToken(AsmToken::Pipe, StringRef(TokStart, 1));
case '^': return AsmToken(AsmToken::Caret, StringRef(TokStart, 1));
- case '&':
+ case '&':
if (*CurPtr == '&')
return ++CurPtr, AsmToken(AsmToken::AmpAmp, StringRef(TokStart, 2));
return AsmToken(AsmToken::Amp, StringRef(TokStart, 1));
- case '!':
+ case '!':
if (*CurPtr == '=')
return ++CurPtr, AsmToken(AsmToken::ExclaimEqual, StringRef(TokStart, 2));
return AsmToken(AsmToken::Exclaim, StringRef(TokStart, 1));
case '%': return AsmToken(AsmToken::Percent, StringRef(TokStart, 1));
case '/': return LexSlash();
case '#': return AsmToken(AsmToken::Hash, StringRef(TokStart, 1));
+ case '\'': return LexSingleQuote();
case '"': return LexQuote();
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
return LexDigit();
case '<':
switch (*CurPtr) {
- case '<': return ++CurPtr, AsmToken(AsmToken::LessLess,
+ case '<': return ++CurPtr, AsmToken(AsmToken::LessLess,
StringRef(TokStart, 2));
- case '=': return ++CurPtr, AsmToken(AsmToken::LessEqual,
+ case '=': return ++CurPtr, AsmToken(AsmToken::LessEqual,
StringRef(TokStart, 2));
- case '>': return ++CurPtr, AsmToken(AsmToken::LessGreater,
+ case '>': return ++CurPtr, AsmToken(AsmToken::LessGreater,
StringRef(TokStart, 2));
default: return AsmToken(AsmToken::Less, StringRef(TokStart, 1));
}
case '>':
switch (*CurPtr) {
- case '>': return ++CurPtr, AsmToken(AsmToken::GreaterGreater,
+ case '>': return ++CurPtr, AsmToken(AsmToken::GreaterGreater,
StringRef(TokStart, 2));
- case '=': return ++CurPtr, AsmToken(AsmToken::GreaterEqual,
+ case '=': return ++CurPtr, AsmToken(AsmToken::GreaterEqual,
StringRef(TokStart, 2));
default: return AsmToken(AsmToken::Greater, StringRef(TokStart, 1));
}
-
+
// TODO: Quoted identifiers (objc methods etc)
// local labels: [0-9][:]
// Forward/backward labels: [0-9][fb]
diff --git a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
index f83cd5e..c6d0da6 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSwitch.h"
@@ -18,7 +19,6 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCParser/AsmCond.h"
#include "llvm/MC/MCParser/AsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
@@ -27,11 +27,12 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCDwarf.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetAsmParser.h"
+#include <cctype>
#include <vector>
using namespace llvm;
@@ -102,6 +103,9 @@ private:
/// Boolean tracking whether macro substitution is enabled.
unsigned MacrosEnabled : 1;
+ /// Flag tracking whether any errors have been encountered.
+ unsigned HadError : 1;
+
public:
AsmParser(const Target &T, SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
const MCAsmInfo &MAI);
@@ -137,14 +141,18 @@ public:
/// }
private:
+ void CheckForValidSection();
+
bool ParseStatement();
bool HandleMacroEntry(StringRef Name, SMLoc NameLoc, const Macro *M);
void HandleMacroExit();
void PrintMacroInstantiations();
- void PrintMessage(SMLoc Loc, const std::string &Msg, const char *Type) const;
-
+ void PrintMessage(SMLoc Loc, const Twine &Msg, const char *Type) const {
+ SrcMgr.PrintMessage(Loc, Msg, Type);
+ }
+
/// EnterIncludeFile - Enter the specified file. This returns true on failure.
bool EnterIncludeFile(const std::string &Filename);
@@ -160,22 +168,27 @@ private:
/// will be either the EndOfStatement or EOF.
StringRef ParseStringToEndOfStatement();
- bool ParseAssignment(StringRef Name);
+ bool ParseAssignment(StringRef Name, bool allow_redef);
bool ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc);
bool ParseBinOpRHS(unsigned Precedence, const MCExpr *&Res, SMLoc &EndLoc);
bool ParseParenExpr(const MCExpr *&Res, SMLoc &EndLoc);
+ bool ParseBracketExpr(const MCExpr *&Res, SMLoc &EndLoc);
/// ParseIdentifier - Parse an identifier or string (as a quoted identifier)
/// and set \arg Res to the identifier contents.
bool ParseIdentifier(StringRef &Res);
-
+
// Directive Parsing.
- bool ParseDirectiveAscii(bool ZeroTerminated); // ".ascii", ".asciiz"
+
+ // ".ascii", ".asciiz", ".string"
+ bool ParseDirectiveAscii(StringRef IDVal, bool ZeroTerminated);
bool ParseDirectiveValue(unsigned Size); // ".byte", ".long", ...
+ bool ParseDirectiveRealValue(const fltSemantics &); // ".single", ...
bool ParseDirectiveFill(); // ".fill"
bool ParseDirectiveSpace(); // ".space"
- bool ParseDirectiveSet(); // ".set"
+ bool ParseDirectiveZero(); // ".zero"
+ bool ParseDirectiveSet(StringRef IDVal, bool allow_redef); // ".set", ".equ", ".equiv"
bool ParseDirectiveOrg(); // ".org"
// ".align{,32}", ".p2align{,w,l}"
bool ParseDirectiveAlign(bool IsPow2, unsigned ValueSize);
@@ -183,7 +196,6 @@ private:
/// ParseDirectiveSymbolAttribute - Parse a directive like ".globl" which
/// accepts a single symbol (which should be a label or an external).
bool ParseDirectiveSymbolAttribute(MCSymbolAttr Attr);
- bool ParseDirectiveELFType(); // ELF specific ".type"
bool ParseDirectiveComm(bool IsLocal); // ".comm" and ".lcomm"
@@ -191,6 +203,8 @@ private:
bool ParseDirectiveInclude(); // ".include"
bool ParseDirectiveIf(SMLoc DirectiveLoc); // ".if"
+ // ".ifdef" or ".ifndef", depending on expect_defined
+ bool ParseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined);
bool ParseDirectiveElseIf(SMLoc DirectiveLoc); // ".elseif"
bool ParseDirectiveElse(SMLoc DirectiveLoc); // ".else"
bool ParseDirectiveEndIf(SMLoc DirectiveLoc); // .endif
@@ -198,6 +212,9 @@ private:
/// ParseEscapedString - Parse the current token as a string which may include
/// escaped characters and return the string contents.
bool ParseEscapedString(std::string &Data);
+
+ const MCExpr *ApplyModifierToExpr(const MCExpr *E,
+ MCSymbolRefExpr::VariantKind Variant);
};
/// \brief Generic implementations of directive handling, etc. which is shared
@@ -208,7 +225,6 @@ class GenericAsmParser : public MCAsmParserExtension {
getParser().AddDirectiveHandler(this, Directive,
HandleDirective<GenericAsmParser, Handler>);
}
-
public:
GenericAsmParser() {}
@@ -224,6 +240,29 @@ public:
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveFile>(".file");
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveLine>(".line");
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveLoc>(".loc");
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectiveStabs>(".stabs");
+
+ // CFI directives.
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectiveCFIStartProc>(
+ ".cfi_startproc");
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectiveCFIEndProc>(
+ ".cfi_endproc");
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectiveCFIDefCfa>(
+ ".cfi_def_cfa");
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectiveCFIDefCfaOffset>(
+ ".cfi_def_cfa_offset");
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectiveCFIDefCfaRegister>(
+ ".cfi_def_cfa_register");
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectiveCFIOffset>(
+ ".cfi_offset");
+ AddDirectiveHandler<
+ &GenericAsmParser::ParseDirectiveCFIPersonalityOrLsda>(".cfi_personality");
+ AddDirectiveHandler<
+ &GenericAsmParser::ParseDirectiveCFIPersonalityOrLsda>(".cfi_lsda");
+ AddDirectiveHandler<
+ &GenericAsmParser::ParseDirectiveCFIRememberState>(".cfi_remember_state");
+ AddDirectiveHandler<
+ &GenericAsmParser::ParseDirectiveCFIRestoreState>(".cfi_restore_state");
// Macro directives.
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveMacrosOnOff>(
@@ -233,15 +272,32 @@ public:
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveMacro>(".macro");
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveEndMacro>(".endm");
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveEndMacro>(".endmacro");
+
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectiveLEB128>(".sleb128");
+ AddDirectiveHandler<&GenericAsmParser::ParseDirectiveLEB128>(".uleb128");
}
+ bool ParseRegisterOrRegisterNumber(int64_t &Register, SMLoc DirectiveLoc);
+
bool ParseDirectiveFile(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveLine(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveStabs(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIStartProc(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIEndProc(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIDefCfa(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIDefCfaOffset(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIDefCfaRegister(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIOffset(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIPersonalityOrLsda(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIRememberState(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIRestoreState(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveMacrosOnOff(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveMacro(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveEndMacro(StringRef, SMLoc DirectiveLoc);
+
+ bool ParseDirectiveLEB128(StringRef, SMLoc);
};
}
@@ -250,6 +306,7 @@ namespace llvm {
extern MCAsmParserExtension *createDarwinAsmParser();
extern MCAsmParserExtension *createELFAsmParser();
+extern MCAsmParserExtension *createCOFFAsmParser();
}
@@ -269,7 +326,10 @@ AsmParser::AsmParser(const Target &T, SourceMgr &_SM, MCContext &_Ctx,
//
// FIXME: This is a hack, we need to (majorly) cleanup how these objects are
// created.
- if (_MAI.hasSubsectionsViaSymbols()) {
+ if (_MAI.hasMicrosoftFastStdCallMangling()) {
+ PlatformParser = createCOFFAsmParser();
+ PlatformParser->Initialize(*this);
+ } else if (_MAI.hasSubsectionsViaSymbols()) {
PlatformParser = createDarwinAsmParser();
PlatformParser->Initialize(*this);
} else {
@@ -299,30 +359,26 @@ void AsmParser::PrintMacroInstantiations() {
}
void AsmParser::Warning(SMLoc L, const Twine &Msg) {
- PrintMessage(L, Msg.str(), "warning");
+ PrintMessage(L, Msg, "warning");
PrintMacroInstantiations();
}
bool AsmParser::Error(SMLoc L, const Twine &Msg) {
- PrintMessage(L, Msg.str(), "error");
+ HadError = true;
+ PrintMessage(L, Msg, "error");
PrintMacroInstantiations();
return true;
}
-void AsmParser::PrintMessage(SMLoc Loc, const std::string &Msg,
- const char *Type) const {
- SrcMgr.PrintMessage(Loc, Msg, Type);
-}
-
bool AsmParser::EnterIncludeFile(const std::string &Filename) {
int NewBuf = SrcMgr.AddIncludeFile(Filename, Lexer.getLoc());
if (NewBuf == -1)
return true;
-
+
CurBuffer = NewBuf;
-
+
Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer));
-
+
return false;
}
@@ -333,7 +389,7 @@ void AsmParser::JumpToLoc(SMLoc Loc) {
const AsmToken &AsmParser::Lex() {
const AsmToken *tok = &Lexer.Lex();
-
+
if (tok->is(AsmToken::Eof)) {
// If this is the end of an included file, pop the parent file off the
// include stack.
@@ -343,35 +399,31 @@ const AsmToken &AsmParser::Lex() {
tok = &Lexer.Lex();
}
}
-
+
if (tok->is(AsmToken::Error))
Error(Lexer.getErrLoc(), Lexer.getErr());
-
+
return *tok;
}
bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
// Create the initial section, if requested.
- //
- // FIXME: Target hook & command line option for initial section.
if (!NoInitialTextSection)
- Out.SwitchSection(Ctx.getMachOSection("__TEXT", "__text",
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- 0, SectionKind::getText()));
+ Out.InitSections();
// Prime the lexer.
Lex();
-
- bool HadError = false;
-
+
+ HadError = false;
AsmCond StartingCondState = TheCondState;
// While we have input, parse each statement.
while (Lexer.isNot(AsmToken::Eof)) {
if (!ParseStatement()) continue;
-
- // We had an error, remember it and recover by skipping to the next line.
- HadError = true;
+
+ // We had an error, validate that one was emitted and recover by skipping to
+ // the next line.
+ assert(HadError && "Parse statement returned an error, but none emitted!");
EatToEndOfStatement();
}
@@ -383,26 +435,34 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
const std::vector<MCDwarfFile *> &MCDwarfFiles =
getContext().getMCDwarfFiles();
for (unsigned i = 1; i < MCDwarfFiles.size(); i++) {
- if (!MCDwarfFiles[i]){
+ if (!MCDwarfFiles[i])
TokError("unassigned file number: " + Twine(i) + " for .file directives");
- HadError = true;
- }
}
-
+
// Finalize the output stream if there are no errors and if the client wants
// us to.
- if (!HadError && !NoFinalize)
+ if (!HadError && !NoFinalize)
Out.Finish();
return HadError;
}
+void AsmParser::CheckForValidSection() {
+ if (!getStreamer().getCurrentSection()) {
+ TokError("expected section directive before assembly directive");
+ Out.SwitchSection(Ctx.getMachOSection(
+ "__TEXT", "__text",
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ 0, SectionKind::getText()));
+ }
+}
+
/// EatToEndOfStatement - Throw away the rest of the line for testing purposes.
void AsmParser::EatToEndOfStatement() {
while (Lexer.isNot(AsmToken::EndOfStatement) &&
Lexer.isNot(AsmToken::Eof))
Lex();
-
+
// Eat EOL.
if (Lexer.is(AsmToken::EndOfStatement))
Lex();
@@ -433,6 +493,20 @@ bool AsmParser::ParseParenExpr(const MCExpr *&Res, SMLoc &EndLoc) {
return false;
}
+/// ParseBracketExpr - Parse a bracket expression and return it.
+/// NOTE: This assumes the leading '[' has already been consumed.
+///
+/// bracketexpr ::= expr]
+///
+bool AsmParser::ParseBracketExpr(const MCExpr *&Res, SMLoc &EndLoc) {
+ if (ParseExpression(Res)) return true;
+ if (Lexer.isNot(AsmToken::RBrac))
+ return TokError("expected ']' in brackets expression");
+ EndLoc = Lexer.getLoc();
+ Lex();
+ return false;
+}
+
/// ParsePrimaryExpr - Parse a primary expression and return it.
/// primaryexpr ::= (parenexpr
/// primaryexpr ::= symbol
@@ -462,19 +536,21 @@ bool AsmParser::ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
std::pair<StringRef, StringRef> Split = Identifier.split('@');
MCSymbol *Sym = getContext().GetOrCreateSymbol(Split.first);
- // Mark the symbol as used in an expression.
- Sym->setUsedInExpr(true);
-
// Lookup the symbol variant if used.
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
- if (Split.first.size() != Identifier.size())
+ if (Split.first.size() != Identifier.size()) {
Variant = MCSymbolRefExpr::getVariantKindForName(Split.second);
+ if (Variant == MCSymbolRefExpr::VK_Invalid) {
+ Variant = MCSymbolRefExpr::VK_None;
+ return TokError("invalid variant '" + Split.second + "'");
+ }
+ }
// If this is an absolute variable reference, substitute it now to preserve
// semantics in the face of reassignment.
if (Sym->isVariable() && isa<MCConstantExpr>(Sym->getVariableValue())) {
if (Variant)
- return Error(EndLoc, "unexpected modified on variable reference");
+ return Error(EndLoc, "unexpected modifier on variable reference");
Res = Sym->getVariableValue();
return false;
@@ -506,6 +582,13 @@ bool AsmParser::ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
}
return false;
}
+ case AsmToken::Real: {
+ APFloat RealVal(APFloat::IEEEdouble, getTok().getString());
+ uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
+ Res = MCConstantExpr::Create(IntVal, getContext());
+ Lex(); // Eat token.
+ return false;
+ }
case AsmToken::Dot: {
// This is a '.' reference, which references the current PC. Emit a
// temporary label to the streamer and refer to it.
@@ -516,10 +599,12 @@ bool AsmParser::ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
Lex(); // Eat identifier.
return false;
}
-
case AsmToken::LParen:
Lex(); // Eat the '('.
return ParseParenExpr(Res, EndLoc);
+ case AsmToken::LBrac:
+ Lex(); // Eat the '['.
+ return ParseBracketExpr(Res, EndLoc);
case AsmToken::Minus:
Lex(); // Eat the operator.
if (ParsePrimaryExpr(Res, EndLoc))
@@ -546,8 +631,57 @@ bool AsmParser::ParseExpression(const MCExpr *&Res) {
return ParseExpression(Res, EndLoc);
}
+const MCExpr *
+AsmParser::ApplyModifierToExpr(const MCExpr *E,
+ MCSymbolRefExpr::VariantKind Variant) {
+ // Recurse over the given expression, rebuilding it to apply the given variant
+ // if there is exactly one symbol.
+ switch (E->getKind()) {
+ case MCExpr::Target:
+ case MCExpr::Constant:
+ return 0;
+
+ case MCExpr::SymbolRef: {
+ const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E);
+
+ if (SRE->getKind() != MCSymbolRefExpr::VK_None) {
+ TokError("invalid variant on expression '" +
+ getTok().getIdentifier() + "' (already modified)");
+ return E;
+ }
+
+ return MCSymbolRefExpr::Create(&SRE->getSymbol(), Variant, getContext());
+ }
+
+ case MCExpr::Unary: {
+ const MCUnaryExpr *UE = cast<MCUnaryExpr>(E);
+ const MCExpr *Sub = ApplyModifierToExpr(UE->getSubExpr(), Variant);
+ if (!Sub)
+ return 0;
+ return MCUnaryExpr::Create(UE->getOpcode(), Sub, getContext());
+ }
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(E);
+ const MCExpr *LHS = ApplyModifierToExpr(BE->getLHS(), Variant);
+ const MCExpr *RHS = ApplyModifierToExpr(BE->getRHS(), Variant);
+
+ if (!LHS && !RHS)
+ return 0;
+
+ if (!LHS) LHS = BE->getLHS();
+ if (!RHS) RHS = BE->getRHS();
+
+ return MCBinaryExpr::Create(BE->getOpcode(), LHS, RHS, getContext());
+ }
+ }
+
+ assert(0 && "Invalid expression kind!");
+ return 0;
+}
+
/// ParseExpression - Parse an expression and return it.
-///
+///
/// expr ::= expr +,- expr -> lowest.
/// expr ::= expr |,^,&,! expr -> middle.
/// expr ::= expr *,/,%,<<,>> expr -> highest.
@@ -559,6 +693,31 @@ bool AsmParser::ParseExpression(const MCExpr *&Res, SMLoc &EndLoc) {
if (ParsePrimaryExpr(Res, EndLoc) || ParseBinOpRHS(1, Res, EndLoc))
return true;
+ // As a special case, we support 'a op b @ modifier' by rewriting the
+ // expression to include the modifier. This is inefficient, but in general we
+ // expect users to use 'a@modifier op b'.
+ if (Lexer.getKind() == AsmToken::At) {
+ Lex();
+
+ if (Lexer.isNot(AsmToken::Identifier))
+ return TokError("unexpected symbol modifier following '@'");
+
+ MCSymbolRefExpr::VariantKind Variant =
+ MCSymbolRefExpr::getVariantKindForName(getTok().getIdentifier());
+ if (Variant == MCSymbolRefExpr::VK_Invalid)
+ return TokError("invalid variant '" + getTok().getIdentifier() + "'");
+
+ const MCExpr *ModifiedRes = ApplyModifierToExpr(Res, Variant);
+ if (!ModifiedRes) {
+ return TokError("invalid modifier '" + getTok().getIdentifier() +
+ "' (no symbols present)");
+ return true;
+ }
+
+ Res = ModifiedRes;
+ Lex();
+ }
+
// Try to constant fold it up front, if possible.
int64_t Value;
if (Res->EvaluateAsAbsolute(Value))
@@ -575,7 +734,7 @@ bool AsmParser::ParseParenExpression(const MCExpr *&Res, SMLoc &EndLoc) {
bool AsmParser::ParseAbsoluteExpression(int64_t &Res) {
const MCExpr *Expr;
-
+
SMLoc StartLoc = Lexer.getLoc();
if (ParseExpression(Expr))
return true;
@@ -586,13 +745,13 @@ bool AsmParser::ParseAbsoluteExpression(int64_t &Res) {
return false;
}
-static unsigned getBinOpPrecedence(AsmToken::TokenKind K,
+static unsigned getBinOpPrecedence(AsmToken::TokenKind K,
MCBinaryExpr::Opcode &Kind) {
switch (K) {
default:
return 0; // not a binop.
- // Lowest Precedence: &&, ||
+ // Lowest Precedence: &&, ||, @
case AsmToken::AmpAmp:
Kind = MCBinaryExpr::LAnd;
return 1;
@@ -600,62 +759,65 @@ static unsigned getBinOpPrecedence(AsmToken::TokenKind K,
Kind = MCBinaryExpr::LOr;
return 1;
- // Low Precedence: +, -, ==, !=, <>, <, <=, >, >=
- case AsmToken::Plus:
- Kind = MCBinaryExpr::Add;
+
+ // Low Precedence: |, &, ^
+ //
+ // FIXME: gas seems to support '!' as an infix operator?
+ case AsmToken::Pipe:
+ Kind = MCBinaryExpr::Or;
return 2;
- case AsmToken::Minus:
- Kind = MCBinaryExpr::Sub;
+ case AsmToken::Caret:
+ Kind = MCBinaryExpr::Xor;
+ return 2;
+ case AsmToken::Amp:
+ Kind = MCBinaryExpr::And;
return 2;
+
+ // Low Intermediate Precedence: ==, !=, <>, <, <=, >, >=
case AsmToken::EqualEqual:
Kind = MCBinaryExpr::EQ;
- return 2;
+ return 3;
case AsmToken::ExclaimEqual:
case AsmToken::LessGreater:
Kind = MCBinaryExpr::NE;
- return 2;
+ return 3;
case AsmToken::Less:
Kind = MCBinaryExpr::LT;
- return 2;
+ return 3;
case AsmToken::LessEqual:
Kind = MCBinaryExpr::LTE;
- return 2;
+ return 3;
case AsmToken::Greater:
Kind = MCBinaryExpr::GT;
- return 2;
+ return 3;
case AsmToken::GreaterEqual:
Kind = MCBinaryExpr::GTE;
- return 2;
-
- // Intermediate Precedence: |, &, ^
- //
- // FIXME: gas seems to support '!' as an infix operator?
- case AsmToken::Pipe:
- Kind = MCBinaryExpr::Or;
- return 3;
- case AsmToken::Caret:
- Kind = MCBinaryExpr::Xor;
- return 3;
- case AsmToken::Amp:
- Kind = MCBinaryExpr::And;
return 3;
+ // High Intermediate Precedence: +, -
+ case AsmToken::Plus:
+ Kind = MCBinaryExpr::Add;
+ return 4;
+ case AsmToken::Minus:
+ Kind = MCBinaryExpr::Sub;
+ return 4;
+
// Highest Precedence: *, /, %, <<, >>
case AsmToken::Star:
Kind = MCBinaryExpr::Mul;
- return 4;
+ return 5;
case AsmToken::Slash:
Kind = MCBinaryExpr::Div;
- return 4;
+ return 5;
case AsmToken::Percent:
Kind = MCBinaryExpr::Mod;
- return 4;
+ return 5;
case AsmToken::LessLess:
Kind = MCBinaryExpr::Shl;
- return 4;
+ return 5;
case AsmToken::GreaterGreater:
Kind = MCBinaryExpr::Shr;
- return 4;
+ return 5;
}
}
@@ -667,18 +829,18 @@ bool AsmParser::ParseBinOpRHS(unsigned Precedence, const MCExpr *&Res,
while (1) {
MCBinaryExpr::Opcode Kind = MCBinaryExpr::Add;
unsigned TokPrec = getBinOpPrecedence(Lexer.getKind(), Kind);
-
+
// If the next token is lower precedence than we are allowed to eat, return
// successfully with what we ate already.
if (TokPrec < Precedence)
return false;
-
+
Lex();
-
+
// Eat the next primary expression.
const MCExpr *RHS;
if (ParsePrimaryExpr(RHS, EndLoc)) return true;
-
+
// If BinOp binds less tightly with RHS than the operator after RHS, let
// the pending operator take RHS as its LHS.
MCBinaryExpr::Opcode Dummy;
@@ -692,9 +854,9 @@ bool AsmParser::ParseBinOpRHS(unsigned Precedence, const MCExpr *&Res,
}
}
-
-
-
+
+
+
/// ParseStatement:
/// ::= EndOfStatement
/// ::= Label* Directive ...Operands... EndOfStatement
@@ -706,12 +868,17 @@ bool AsmParser::ParseStatement() {
return false;
}
- // Statements always start with an identifier.
+ // Statements always start with an identifier or are a full line comment.
AsmToken ID = getTok();
SMLoc IDLoc = ID.getLoc();
StringRef IDVal;
int64_t LocalLabelVal = -1;
- // GUESS allow an integer followed by a ':' as a directional local label
+ // A full line comment is a '#' as the first token.
+ if (Lexer.is(AsmToken::Hash)) {
+ EatToEndOfStatement();
+ return false;
+ }
+ // Allow an integer followed by a ':' as a directional local label.
if (Lexer.is(AsmToken::Integer)) {
LocalLabelVal = getTok().getIntVal();
if (LocalLabelVal < 0) {
@@ -739,24 +906,30 @@ bool AsmParser::ParseStatement() {
// example.
if (IDVal == ".if")
return ParseDirectiveIf(IDLoc);
+ if (IDVal == ".ifdef")
+ return ParseDirectiveIfdef(IDLoc, true);
+ if (IDVal == ".ifndef" || IDVal == ".ifnotdef")
+ return ParseDirectiveIfdef(IDLoc, false);
if (IDVal == ".elseif")
return ParseDirectiveElseIf(IDLoc);
if (IDVal == ".else")
return ParseDirectiveElse(IDLoc);
if (IDVal == ".endif")
return ParseDirectiveEndIf(IDLoc);
-
+
// If we are in a ".if 0" block, ignore this statement.
if (TheCondState.Ignore) {
EatToEndOfStatement();
return false;
}
-
+
// FIXME: Recurse on local labels?
// See what kind of statement we have.
switch (Lexer.getKind()) {
case AsmToken::Colon: {
+ CheckForValidSection();
+
// identifier ':' -> Label.
Lex();
@@ -772,10 +945,10 @@ bool AsmParser::ParseStatement() {
Sym = Ctx.CreateDirectionalLocalSymbol(LocalLabelVal);
if (!Sym->isUndefined() || Sym->isVariable())
return Error(IDLoc, "invalid symbol redefinition");
-
+
// Emit the label.
Out.EmitLabel(Sym);
-
+
// Consume any end of statement token, if present, to avoid spurious
// AddBlankLine calls().
if (Lexer.is(AsmToken::EndOfStatement)) {
@@ -791,7 +964,7 @@ bool AsmParser::ParseStatement() {
// identifier '=' ... -> assignment statement
Lex();
- return ParseAssignment(IDVal);
+ return ParseAssignment(IDVal, true);
default: // Normal instruction or directive.
break;
@@ -802,27 +975,43 @@ bool AsmParser::ParseStatement() {
if (const Macro *M = MacroMap.lookup(IDVal))
return HandleMacroEntry(IDVal, IDLoc, M);
- // Otherwise, we have a normal instruction or directive.
+ // Otherwise, we have a normal instruction or directive.
if (IDVal[0] == '.') {
// Assembler features
- if (IDVal == ".set")
- return ParseDirectiveSet();
+ if (IDVal == ".set" || IDVal == ".equ")
+ return ParseDirectiveSet(IDVal, true);
+ if (IDVal == ".equiv")
+ return ParseDirectiveSet(IDVal, false);
// Data directives
if (IDVal == ".ascii")
- return ParseDirectiveAscii(false);
- if (IDVal == ".asciz")
- return ParseDirectiveAscii(true);
+ return ParseDirectiveAscii(IDVal, false);
+ if (IDVal == ".asciz" || IDVal == ".string")
+ return ParseDirectiveAscii(IDVal, true);
if (IDVal == ".byte")
return ParseDirectiveValue(1);
if (IDVal == ".short")
return ParseDirectiveValue(2);
+ if (IDVal == ".value")
+ return ParseDirectiveValue(2);
+ if (IDVal == ".2byte")
+ return ParseDirectiveValue(2);
if (IDVal == ".long")
return ParseDirectiveValue(4);
+ if (IDVal == ".int")
+ return ParseDirectiveValue(4);
+ if (IDVal == ".4byte")
+ return ParseDirectiveValue(4);
if (IDVal == ".quad")
return ParseDirectiveValue(8);
+ if (IDVal == ".8byte")
+ return ParseDirectiveValue(8);
+ if (IDVal == ".single" || IDVal == ".float")
+ return ParseDirectiveRealValue(APFloat::IEEEsingle);
+ if (IDVal == ".double")
+ return ParseDirectiveRealValue(APFloat::IEEEdouble);
if (IDVal == ".align") {
bool IsPow2 = !getContext().getAsmInfo().getAlignmentIsInBytes();
@@ -852,11 +1041,16 @@ bool AsmParser::ParseStatement() {
return ParseDirectiveFill();
if (IDVal == ".space")
return ParseDirectiveSpace();
+ if (IDVal == ".zero")
+ return ParseDirectiveZero();
// Symbol attribute directives
if (IDVal == ".globl" || IDVal == ".global")
return ParseDirectiveSymbolAttribute(MCSA_Global);
+ // ELF only? Should it be here?
+ if (IDVal == ".local")
+ return ParseDirectiveSymbolAttribute(MCSA_Local);
if (IDVal == ".hidden")
return ParseDirectiveSymbolAttribute(MCSA_Hidden);
if (IDVal == ".indirect_symbol")
@@ -867,14 +1061,14 @@ bool AsmParser::ParseStatement() {
return ParseDirectiveSymbolAttribute(MCSA_LazyReference);
if (IDVal == ".no_dead_strip")
return ParseDirectiveSymbolAttribute(MCSA_NoDeadStrip);
+ if (IDVal == ".symbol_resolver")
+ return ParseDirectiveSymbolAttribute(MCSA_SymbolResolver);
if (IDVal == ".private_extern")
return ParseDirectiveSymbolAttribute(MCSA_PrivateExtern);
if (IDVal == ".protected")
return ParseDirectiveSymbolAttribute(MCSA_Protected);
if (IDVal == ".reference")
return ParseDirectiveSymbolAttribute(MCSA_Reference);
- if (IDVal == ".type")
- return ParseDirectiveELFType();
if (IDVal == ".weak")
return ParseDirectiveSymbolAttribute(MCSA_Weak);
if (IDVal == ".weak_definition")
@@ -894,6 +1088,9 @@ bool AsmParser::ParseStatement() {
if (IDVal == ".include")
return ParseDirectiveInclude();
+ if (IDVal == ".code16" || IDVal == ".code32" || IDVal == ".code64")
+ return TokError(Twine(IDVal) + " not supported yet");
+
// Look up the handler in the handler table.
std::pair<MCAsmParserExtension*, DirectiveHandler> Handler =
DirectiveMap.lookup(IDVal);
@@ -909,16 +1106,16 @@ bool AsmParser::ParseStatement() {
return false;
}
+ CheckForValidSection();
+
// Canonicalize the opcode to lower case.
SmallString<128> Opcode;
for (unsigned i = 0, e = IDVal.size(); i != e; ++i)
Opcode.push_back(tolower(IDVal[i]));
-
+
SmallVector<MCParsedAsmOperand*, 8> ParsedOperands;
bool HadError = getTargetParser().ParseInstruction(Opcode.str(), IDLoc,
ParsedOperands);
- if (!HadError && Lexer.isNot(AsmToken::EndOfStatement))
- HadError = TokError("unexpected token in argument list");
// Dump the parsed representation, if requested.
if (getShowParsedOperands()) {
@@ -936,25 +1133,17 @@ bool AsmParser::ParseStatement() {
}
// If parsing succeeded, match the instruction.
- if (!HadError) {
- MCInst Inst;
- if (!getTargetParser().MatchInstruction(IDLoc, ParsedOperands, Inst)) {
- // Emit the instruction on success.
- Out.EmitInstruction(Inst);
- } else
- HadError = true;
- }
-
- // If there was no error, consume the end-of-statement token. Otherwise this
- // will be done by our caller.
if (!HadError)
- Lex();
+ HadError = getTargetParser().MatchAndEmitInstruction(IDLoc, ParsedOperands,
+ Out);
// Free any parsed operands.
for (unsigned i = 0, e = ParsedOperands.size(); i != e; ++i)
delete ParsedOperands[i];
- return HadError;
+ // Don't skip the rest of the line, the instruction parser is responsible for
+ // that.
+ return false;
}
MacroInstantiation::MacroInstantiation(const Macro *M, SMLoc IL, SMLoc EL,
@@ -1083,14 +1272,35 @@ void AsmParser::HandleMacroExit() {
ActiveMacros.pop_back();
}
-bool AsmParser::ParseAssignment(StringRef Name) {
+static void MarkUsed(const MCExpr *Value) {
+ switch (Value->getKind()) {
+ case MCExpr::Binary:
+ MarkUsed(static_cast<const MCBinaryExpr*>(Value)->getLHS());
+ MarkUsed(static_cast<const MCBinaryExpr*>(Value)->getRHS());
+ break;
+ case MCExpr::Target:
+ case MCExpr::Constant:
+ break;
+ case MCExpr::SymbolRef: {
+ static_cast<const MCSymbolRefExpr*>(Value)->getSymbol().setUsed(true);
+ break;
+ }
+ case MCExpr::Unary:
+ MarkUsed(static_cast<const MCUnaryExpr*>(Value)->getSubExpr());
+ break;
+ }
+}
+
+bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef) {
// FIXME: Use better location, we should use proper tokens.
SMLoc EqualLoc = Lexer.getLoc();
const MCExpr *Value;
if (ParseExpression(Value))
return true;
-
+
+ MarkUsed(Value);
+
if (Lexer.isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in assignment");
@@ -1105,22 +1315,23 @@ bool AsmParser::ParseAssignment(StringRef Name) {
//
// FIXME: Diagnostics. Note the location of the definition as a label.
// FIXME: Diagnose assignment to protected identifier (e.g., register name).
- if (Sym->isUndefined() && !Sym->isUsedInExpr())
+ if (Sym->isUndefined() && !Sym->isUsed() && !Sym->isVariable())
; // Allow redefinitions of undefined symbols only used in directives.
- else if (!Sym->isUndefined() && !Sym->isAbsolute())
+ else if (!Sym->isUndefined() && (!Sym->isAbsolute() || !allow_redef))
return Error(EqualLoc, "redefinition of '" + Name + "'");
else if (!Sym->isVariable())
return Error(EqualLoc, "invalid assignment to '" + Name + "'");
else if (!isa<MCConstantExpr>(Sym->getVariableValue()))
return Error(EqualLoc, "invalid reassignment of non-absolute variable '" +
Name + "'");
+
+ // Don't count these checks as uses.
+ Sym->setUsed(false);
} else
Sym = getContext().GetOrCreateSymbol(Name);
// FIXME: Handle '.'.
- Sym->setUsedInExpr(true);
-
// Do the assignment.
Out.EmitAssignment(Sym, Value);
@@ -1167,18 +1378,20 @@ bool AsmParser::ParseIdentifier(StringRef &Res) {
}
/// ParseDirectiveSet:
+/// ::= .equ identifier ',' expression
+/// ::= .equiv identifier ',' expression
/// ::= .set identifier ',' expression
-bool AsmParser::ParseDirectiveSet() {
+bool AsmParser::ParseDirectiveSet(StringRef IDVal, bool allow_redef) {
StringRef Name;
if (ParseIdentifier(Name))
- return TokError("expected identifier after '.set' directive");
-
+ return TokError("expected identifier after '" + Twine(IDVal) + "'");
+
if (getLexer().isNot(AsmToken::Comma))
- return TokError("unexpected token in '.set'");
+ return TokError("unexpected token in '" + Twine(IDVal) + "'");
Lex();
- return ParseAssignment(Name);
+ return ParseAssignment(Name, allow_redef);
}
bool AsmParser::ParseEscapedString(std::string &Data) {
@@ -1240,12 +1453,14 @@ bool AsmParser::ParseEscapedString(std::string &Data) {
}
/// ParseDirectiveAscii:
-/// ::= ( .ascii | .asciz ) [ "string" ( , "string" )* ]
-bool AsmParser::ParseDirectiveAscii(bool ZeroTerminated) {
+/// ::= ( .ascii | .asciz | .string ) [ "string" ( , "string" )* ]
+bool AsmParser::ParseDirectiveAscii(StringRef IDVal, bool ZeroTerminated) {
if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ CheckForValidSection();
+
for (;;) {
if (getLexer().isNot(AsmToken::String))
- return TokError("expected string in '.ascii' or '.asciz' directive");
+ return TokError("expected string in '" + Twine(IDVal) + "' directive");
std::string Data;
if (ParseEscapedString(Data))
@@ -1261,7 +1476,7 @@ bool AsmParser::ParseDirectiveAscii(bool ZeroTerminated) {
break;
if (getLexer().isNot(AsmToken::Comma))
- return TokError("unexpected token in '.ascii' or '.asciz' directive");
+ return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
Lex();
}
}
@@ -1274,9 +1489,10 @@ bool AsmParser::ParseDirectiveAscii(bool ZeroTerminated) {
/// ::= (.byte | .short | ... ) [ expression (, expression)* ]
bool AsmParser::ParseDirectiveValue(unsigned Size) {
if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ CheckForValidSection();
+
for (;;) {
const MCExpr *Value;
- SMLoc ATTRIBUTE_UNUSED StartLoc = getLexer().getLoc();
if (ParseExpression(Value))
return true;
@@ -1288,7 +1504,7 @@ bool AsmParser::ParseDirectiveValue(unsigned Size) {
if (getLexer().is(AsmToken::EndOfStatement))
break;
-
+
// FIXME: Improve diagnostic.
if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in directive");
@@ -1300,9 +1516,61 @@ bool AsmParser::ParseDirectiveValue(unsigned Size) {
return false;
}
+/// ParseDirectiveRealValue
+/// ::= (.single | .double) [ expression (, expression)* ]
+bool AsmParser::ParseDirectiveRealValue(const fltSemantics &Semantics) {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ CheckForValidSection();
+
+ for (;;) {
+ // We don't truly support arithmetic on floating point expressions, so we
+ // have to manually parse unary prefixes.
+ bool IsNeg = false;
+ if (getLexer().is(AsmToken::Minus)) {
+ Lex();
+ IsNeg = true;
+ } else if (getLexer().is(AsmToken::Plus))
+ Lex();
+
+ if (getLexer().isNot(AsmToken::Integer) &&
+ getLexer().isNot(AsmToken::Real))
+ return TokError("unexpected token in directive");
+
+ // Convert to an APFloat.
+ APFloat Value(Semantics);
+ if (Value.convertFromString(getTok().getString(),
+ APFloat::rmNearestTiesToEven) ==
+ APFloat::opInvalidOp)
+ return TokError("invalid floating point literal");
+ if (IsNeg)
+ Value.changeSign();
+
+ // Consume the numeric token.
+ Lex();
+
+ // Emit the value as an integer.
+ APInt AsInt = Value.bitcastToAPInt();
+ getStreamer().EmitIntValue(AsInt.getLimitedValue(),
+ AsInt.getBitWidth() / 8, DEFAULT_ADDRSPACE);
+
+ if (getLexer().is(AsmToken::EndOfStatement))
+ break;
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in directive");
+ Lex();
+ }
+ }
+
+ Lex();
+ return false;
+}
+
/// ParseDirectiveSpace
/// ::= .space expression [ , expression ]
bool AsmParser::ParseDirectiveSpace() {
+ CheckForValidSection();
+
int64_t NumBytes;
if (ParseAbsoluteExpression(NumBytes))
return true;
@@ -1312,7 +1580,7 @@ bool AsmParser::ParseDirectiveSpace() {
if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.space' directive");
Lex();
-
+
if (ParseAbsoluteExpression(FillExpr))
return true;
@@ -1331,9 +1599,37 @@ bool AsmParser::ParseDirectiveSpace() {
return false;
}
+/// ParseDirectiveZero
+/// ::= .zero expression
+bool AsmParser::ParseDirectiveZero() {
+ CheckForValidSection();
+
+ int64_t NumBytes;
+ if (ParseAbsoluteExpression(NumBytes))
+ return true;
+
+ int64_t Val = 0;
+ if (getLexer().is(AsmToken::Comma)) {
+ Lex();
+ if (ParseAbsoluteExpression(Val))
+ return true;
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.zero' directive");
+
+ Lex();
+
+ getStreamer().EmitFill(NumBytes, Val, DEFAULT_ADDRSPACE);
+
+ return false;
+}
+
/// ParseDirectiveFill
/// ::= .fill expression , expression , expression
bool AsmParser::ParseDirectiveFill() {
+ CheckForValidSection();
+
int64_t NumValues;
if (ParseAbsoluteExpression(NumValues))
return true;
@@ -1341,7 +1637,7 @@ bool AsmParser::ParseDirectiveFill() {
if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.fill' directive");
Lex();
-
+
int64_t FillSize;
if (ParseAbsoluteExpression(FillSize))
return true;
@@ -1349,14 +1645,14 @@ bool AsmParser::ParseDirectiveFill() {
if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.fill' directive");
Lex();
-
+
int64_t FillExpr;
if (ParseAbsoluteExpression(FillExpr))
return true;
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.fill' directive");
-
+
Lex();
if (FillSize != 1 && FillSize != 2 && FillSize != 4 && FillSize != 8)
@@ -1371,6 +1667,8 @@ bool AsmParser::ParseDirectiveFill() {
/// ParseDirectiveOrg
/// ::= .org expression [ , expression ]
bool AsmParser::ParseDirectiveOrg() {
+ CheckForValidSection();
+
const MCExpr *Offset;
if (ParseExpression(Offset))
return true;
@@ -1381,7 +1679,7 @@ bool AsmParser::ParseDirectiveOrg() {
if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.org' directive");
Lex();
-
+
if (ParseAbsoluteExpression(FillExpr))
return true;
@@ -1401,6 +1699,8 @@ bool AsmParser::ParseDirectiveOrg() {
/// ParseDirectiveAlign
/// ::= {.align, ...} expression [ , expression [ , expression ]]
bool AsmParser::ParseDirectiveAlign(bool IsPow2, unsigned ValueSize) {
+ CheckForValidSection();
+
SMLoc AlignmentLoc = getLexer().getLoc();
int64_t Alignment;
if (ParseAbsoluteExpression(Alignment))
@@ -1432,7 +1732,7 @@ bool AsmParser::ParseDirectiveAlign(bool IsPow2, unsigned ValueSize) {
MaxBytesLoc = getLexer().getLoc();
if (ParseAbsoluteExpression(MaxBytesToFill))
return true;
-
+
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in directive");
}
@@ -1471,12 +1771,7 @@ bool AsmParser::ParseDirectiveAlign(bool IsPow2, unsigned ValueSize) {
// Check whether we should use optimal code alignment for this .align
// directive.
- //
- // FIXME: This should be using a target hook.
- bool UseCodeAlign = false;
- if (const MCSectionMachO *S = dyn_cast<MCSectionMachO>(
- getStreamer().getCurrentSection()))
- UseCodeAlign = S->hasAttribute(MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS);
+ bool UseCodeAlign = getStreamer().getCurrentSection()->UseCodeAlign();
if ((!HasFillExpr || Lexer.getMAI().getTextAlignFillValue() == FillExpr) &&
ValueSize == 1 && UseCodeAlign) {
getStreamer().EmitCodeAlignment(Alignment, MaxBytesToFill);
@@ -1498,7 +1793,7 @@ bool AsmParser::ParseDirectiveSymbolAttribute(MCSymbolAttr Attr) {
if (ParseIdentifier(Name))
return TokError("expected identifier in directive");
-
+
MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
getStreamer().EmitSymbolAttribute(Sym, Attr);
@@ -1513,63 +1808,19 @@ bool AsmParser::ParseDirectiveSymbolAttribute(MCSymbolAttr Attr) {
}
Lex();
- return false;
-}
-
-/// ParseDirectiveELFType
-/// ::= .type identifier , @attribute
-bool AsmParser::ParseDirectiveELFType() {
- StringRef Name;
- if (ParseIdentifier(Name))
- return TokError("expected identifier in directive");
-
- // Handle the identifier as the key symbol.
- MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
-
- if (getLexer().isNot(AsmToken::Comma))
- return TokError("unexpected token in '.type' directive");
- Lex();
-
- if (getLexer().isNot(AsmToken::At))
- return TokError("expected '@' before type");
- Lex();
-
- StringRef Type;
- SMLoc TypeLoc;
-
- TypeLoc = getLexer().getLoc();
- if (ParseIdentifier(Type))
- return TokError("expected symbol type in directive");
-
- MCSymbolAttr Attr = StringSwitch<MCSymbolAttr>(Type)
- .Case("function", MCSA_ELF_TypeFunction)
- .Case("object", MCSA_ELF_TypeObject)
- .Case("tls_object", MCSA_ELF_TypeTLS)
- .Case("common", MCSA_ELF_TypeCommon)
- .Case("notype", MCSA_ELF_TypeNoType)
- .Default(MCSA_Invalid);
-
- if (Attr == MCSA_Invalid)
- return Error(TypeLoc, "unsupported attribute in '.type' directive");
-
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.type' directive");
-
- Lex();
-
- getStreamer().EmitSymbolAttribute(Sym, Attr);
-
return false;
}
/// ParseDirectiveComm
/// ::= ( .comm | .lcomm ) identifier , size_expression [ , align_expression ]
bool AsmParser::ParseDirectiveComm(bool IsLocal) {
+ CheckForValidSection();
+
SMLoc IDLoc = getLexer().getLoc();
StringRef Name;
if (ParseIdentifier(Name))
return TokError("expected identifier in directive");
-
+
// Handle the identifier as the key symbol.
MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
@@ -1589,7 +1840,7 @@ bool AsmParser::ParseDirectiveComm(bool IsLocal) {
Pow2AlignmentLoc = getLexer().getLoc();
if (ParseAbsoluteExpression(Pow2Alignment))
return true;
-
+
// If this target takes alignments in bytes (not log) validate and convert.
if (Lexer.getMAI().getAlignmentIsInBytes()) {
if (!isPowerOf2_64(Pow2Alignment))
@@ -1597,10 +1848,10 @@ bool AsmParser::ParseDirectiveComm(bool IsLocal) {
Pow2Alignment = Log2_64(Pow2Alignment);
}
}
-
+
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.comm' or '.lcomm' directive");
-
+
Lex();
// NOTE: a size of zero for a .comm should create a undefined symbol
@@ -1659,17 +1910,17 @@ bool AsmParser::ParseDirectiveAbort() {
bool AsmParser::ParseDirectiveInclude() {
if (getLexer().isNot(AsmToken::String))
return TokError("expected string in '.include' directive");
-
+
std::string Filename = getTok().getString();
SMLoc IncludeLoc = getLexer().getLoc();
Lex();
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.include' directive");
-
+
// Strip the quotes.
Filename = Filename.substr(1, Filename.size()-2);
-
+
// Attempt to switch the lexer to the included file before consuming the end
// of statement to avoid losing it when we switch.
if (EnterIncludeFile(Filename)) {
@@ -1695,7 +1946,7 @@ bool AsmParser::ParseDirectiveIf(SMLoc DirectiveLoc) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.if' directive");
-
+
Lex();
TheCondState.CondMet = ExprValue;
@@ -1705,6 +1956,31 @@ bool AsmParser::ParseDirectiveIf(SMLoc DirectiveLoc) {
return false;
}
+bool AsmParser::ParseDirectiveIfdef(SMLoc DirectiveLoc, bool expect_defined) {
+ StringRef Name;
+ TheCondStack.push_back(TheCondState);
+ TheCondState.TheCond = AsmCond::IfCond;
+
+ if (TheCondState.Ignore) {
+ EatToEndOfStatement();
+ } else {
+ if (ParseIdentifier(Name))
+ return TokError("expected identifier after '.ifdef'");
+
+ Lex();
+
+ MCSymbol *Sym = getContext().LookupSymbol(Name);
+
+ if (expect_defined)
+ TheCondState.CondMet = (Sym != NULL && !Sym->isUndefined());
+ else
+ TheCondState.CondMet = (Sym == NULL || Sym->isUndefined());
+ TheCondState.Ignore = !TheCondState.CondMet;
+ }
+
+ return false;
+}
+
/// ParseDirectiveElseIf
/// ::= .elseif expression
bool AsmParser::ParseDirectiveElseIf(SMLoc DirectiveLoc) {
@@ -1728,7 +2004,7 @@ bool AsmParser::ParseDirectiveElseIf(SMLoc DirectiveLoc) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.elseif' directive");
-
+
Lex();
TheCondState.CondMet = ExprValue;
TheCondState.Ignore = !TheCondState.CondMet;
@@ -1742,7 +2018,7 @@ bool AsmParser::ParseDirectiveElseIf(SMLoc DirectiveLoc) {
bool AsmParser::ParseDirectiveElse(SMLoc DirectiveLoc) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.else' directive");
-
+
Lex();
if (TheCondState.TheCond != AsmCond::IfCond &&
@@ -1766,7 +2042,7 @@ bool AsmParser::ParseDirectiveElse(SMLoc DirectiveLoc) {
bool AsmParser::ParseDirectiveEndIf(SMLoc DirectiveLoc) {
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.endif' directive");
-
+
Lex();
if ((TheCondState.TheCond == AsmCond::NoCond) ||
@@ -1808,9 +2084,8 @@ bool GenericAsmParser::ParseDirectiveFile(StringRef, SMLoc DirectiveLoc) {
if (FileNumber == -1)
getStreamer().EmitFileDirective(Filename);
else {
- if (getContext().GetDwarfFile(Filename, FileNumber) == 0)
- Error(FileNumberLoc, "file number already allocated");
- getStreamer().EmitDwarfFileDirective(FileNumber, Filename);
+ if (getStreamer().EmitDwarfFileDirective(FileNumber, Filename))
+ Error(FileNumberLoc, "file number already allocated");
}
return false;
@@ -1851,7 +2126,7 @@ bool GenericAsmParser::ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc) {
int64_t FileNumber = getTok().getIntVal();
if (FileNumber < 1)
return TokError("file number less than one in '.loc' directive");
- if (!getContext().ValidateDwarfFileNumber(FileNumber))
+ if (!getContext().isValidDwarfFileNumber(FileNumber))
return TokError("unassigned file number in '.loc' directive");
Lex();
@@ -1871,8 +2146,9 @@ bool GenericAsmParser::ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc) {
Lex();
}
- unsigned Flags = 0;
+ unsigned Flags = DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0;
unsigned Isa = 0;
+ int64_t Discriminator = 0;
if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
if (getLexer().is(AsmToken::EndOfStatement))
@@ -1903,7 +2179,7 @@ bool GenericAsmParser::ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc) {
Flags |= DWARF2_FLAG_IS_STMT;
else
return Error(Loc, "is_stmt value not 0 or 1");
- }
+ }
else {
return Error(Loc, "is_stmt value not the constant value of 0 or 1");
}
@@ -1919,11 +2195,15 @@ bool GenericAsmParser::ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc) {
if (Value < 0)
return Error(Loc, "isa number less than zero");
Isa = Value;
- }
+ }
else {
return Error(Loc, "isa number not a constant value");
}
}
+ else if (Name == "discriminator") {
+ if (getParser().ParseAbsoluteExpression(Discriminator))
+ return true;
+ }
else {
return Error(Loc, "unknown sub-directive in '.loc' directive");
}
@@ -1933,11 +2213,176 @@ bool GenericAsmParser::ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc) {
}
}
- getContext().setCurrentDwarfLoc(FileNumber, LineNumber, ColumnPos, Flags,Isa);
+ getStreamer().EmitDwarfLocDirective(FileNumber, LineNumber, ColumnPos, Flags,
+ Isa, Discriminator);
return false;
}
+/// ParseDirectiveStabs
+/// ::= .stabs string, number, number, number
+bool GenericAsmParser::ParseDirectiveStabs(StringRef Directive,
+ SMLoc DirectiveLoc) {
+ return TokError("unsupported directive '" + Directive + "'");
+}
+
+/// ParseDirectiveCFIStartProc
+/// ::= .cfi_startproc
+bool GenericAsmParser::ParseDirectiveCFIStartProc(StringRef,
+ SMLoc DirectiveLoc) {
+ return getStreamer().EmitCFIStartProc();
+}
+
+/// ParseDirectiveCFIEndProc
+/// ::= .cfi_endproc
+bool GenericAsmParser::ParseDirectiveCFIEndProc(StringRef, SMLoc DirectiveLoc) {
+ return getStreamer().EmitCFIEndProc();
+}
+
+/// ParseRegisterOrRegisterNumber - parse register name or number.
+bool GenericAsmParser::ParseRegisterOrRegisterNumber(int64_t &Register,
+ SMLoc DirectiveLoc) {
+ unsigned RegNo;
+
+ if (getLexer().is(AsmToken::Percent)) {
+ if (getParser().getTargetParser().ParseRegister(RegNo, DirectiveLoc,
+ DirectiveLoc))
+ return true;
+ Register = getContext().getTargetAsmInfo().getDwarfRegNum(RegNo, true);
+ } else
+ return getParser().ParseAbsoluteExpression(Register);
+
+ return false;
+}
+
+/// ParseDirectiveCFIDefCfa
+/// ::= .cfi_def_cfa register, offset
+bool GenericAsmParser::ParseDirectiveCFIDefCfa(StringRef,
+ SMLoc DirectiveLoc) {
+ int64_t Register = 0;
+ if (ParseRegisterOrRegisterNumber(Register, DirectiveLoc))
+ return true;
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in directive");
+ Lex();
+
+ int64_t Offset = 0;
+ if (getParser().ParseAbsoluteExpression(Offset))
+ return true;
+
+ return getStreamer().EmitCFIDefCfa(Register, Offset);
+}
+
+/// ParseDirectiveCFIDefCfaOffset
+/// ::= .cfi_def_cfa_offset offset
+bool GenericAsmParser::ParseDirectiveCFIDefCfaOffset(StringRef,
+ SMLoc DirectiveLoc) {
+ int64_t Offset = 0;
+ if (getParser().ParseAbsoluteExpression(Offset))
+ return true;
+
+ return getStreamer().EmitCFIDefCfaOffset(Offset);
+}
+
+/// ParseDirectiveCFIDefCfaRegister
+/// ::= .cfi_def_cfa_register register
+bool GenericAsmParser::ParseDirectiveCFIDefCfaRegister(StringRef,
+ SMLoc DirectiveLoc) {
+ int64_t Register = 0;
+ if (ParseRegisterOrRegisterNumber(Register, DirectiveLoc))
+ return true;
+
+ return getStreamer().EmitCFIDefCfaRegister(Register);
+}
+
+/// ParseDirectiveCFIOffset
+/// ::= .cfi_off register, offset
+bool GenericAsmParser::ParseDirectiveCFIOffset(StringRef, SMLoc DirectiveLoc) {
+ int64_t Register = 0;
+ int64_t Offset = 0;
+
+ if (ParseRegisterOrRegisterNumber(Register, DirectiveLoc))
+ return true;
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in directive");
+ Lex();
+
+ if (getParser().ParseAbsoluteExpression(Offset))
+ return true;
+
+ return getStreamer().EmitCFIOffset(Register, Offset);
+}
+
+static bool isValidEncoding(int64_t Encoding) {
+ if (Encoding & ~0xff)
+ return false;
+
+ if (Encoding == dwarf::DW_EH_PE_omit)
+ return true;
+
+ const unsigned Format = Encoding & 0xf;
+ if (Format != dwarf::DW_EH_PE_absptr && Format != dwarf::DW_EH_PE_udata2 &&
+ Format != dwarf::DW_EH_PE_udata4 && Format != dwarf::DW_EH_PE_udata8 &&
+ Format != dwarf::DW_EH_PE_sdata2 && Format != dwarf::DW_EH_PE_sdata4 &&
+ Format != dwarf::DW_EH_PE_sdata8 && Format != dwarf::DW_EH_PE_signed)
+ return false;
+
+ const unsigned Application = Encoding & 0x70;
+ if (Application != dwarf::DW_EH_PE_absptr &&
+ Application != dwarf::DW_EH_PE_pcrel)
+ return false;
+
+ return true;
+}
+
+/// ParseDirectiveCFIPersonalityOrLsda
+/// ::= .cfi_personality encoding, [symbol_name]
+/// ::= .cfi_lsda encoding, [symbol_name]
+bool GenericAsmParser::ParseDirectiveCFIPersonalityOrLsda(StringRef IDVal,
+ SMLoc DirectiveLoc) {
+ int64_t Encoding = 0;
+ if (getParser().ParseAbsoluteExpression(Encoding))
+ return true;
+ if (Encoding == dwarf::DW_EH_PE_omit)
+ return false;
+
+ if (!isValidEncoding(Encoding))
+ return TokError("unsupported encoding.");
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in directive");
+ Lex();
+
+ StringRef Name;
+ if (getParser().ParseIdentifier(Name))
+ return TokError("expected identifier in directive");
+
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+
+ if (IDVal == ".cfi_personality")
+ return getStreamer().EmitCFIPersonality(Sym, Encoding);
+ else {
+ assert(IDVal == ".cfi_lsda");
+ return getStreamer().EmitCFILsda(Sym, Encoding);
+ }
+}
+
+/// ParseDirectiveCFIRememberState
+/// ::= .cfi_remember_state
+bool GenericAsmParser::ParseDirectiveCFIRememberState(StringRef IDVal,
+ SMLoc DirectiveLoc) {
+ return getStreamer().EmitCFIRememberState();
+}
+
+/// ParseDirectiveCFIRestoreState
+/// ::= .cfi_remember_state
+bool GenericAsmParser::ParseDirectiveCFIRestoreState(StringRef IDVal,
+ SMLoc DirectiveLoc) {
+ return getStreamer().EmitCFIRestoreState();
+}
+
/// ParseDirectiveMacrosOnOff
/// ::= .macros_on
/// ::= .macros_off
@@ -2022,6 +2467,26 @@ bool GenericAsmParser::ParseDirectiveEndMacro(StringRef Directive,
"no current macro definition");
}
+bool GenericAsmParser::ParseDirectiveLEB128(StringRef DirName, SMLoc) {
+ getParser().CheckForValidSection();
+
+ const MCExpr *Value;
+
+ if (getParser().ParseExpression(Value))
+ return true;
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in directive");
+
+ if (DirName[1] == 's')
+ getStreamer().EmitSLEB128Value(Value);
+ else
+ getStreamer().EmitULEB128Value(Value);
+
+ return false;
+}
+
+
/// \brief Create an MCAsmParser instance.
MCAsmParser *llvm::createMCAsmParser(const Target &T, SourceMgr &SM,
MCContext &C, MCStreamer &Out,
diff --git a/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp
new file mode 100644
index 0000000..5ecab03
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp
@@ -0,0 +1,144 @@
+//===- COFFAsmParser.cpp - COFF Assembly Parser ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCSectionCOFF.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/COFF.h"
+using namespace llvm;
+
+namespace {
+
+class COFFAsmParser : public MCAsmParserExtension {
+ template<bool (COFFAsmParser::*Handler)(StringRef, SMLoc)>
+ void AddDirectiveHandler(StringRef Directive) {
+ getParser().AddDirectiveHandler(this, Directive,
+ HandleDirective<COFFAsmParser, Handler>);
+ }
+
+ bool ParseSectionSwitch(StringRef Section,
+ unsigned Characteristics,
+ SectionKind Kind);
+
+ virtual void Initialize(MCAsmParser &Parser) {
+ // Call the base implementation.
+ MCAsmParserExtension::Initialize(Parser);
+
+ AddDirectiveHandler<&COFFAsmParser::ParseSectionDirectiveText>(".text");
+ AddDirectiveHandler<&COFFAsmParser::ParseSectionDirectiveData>(".data");
+ AddDirectiveHandler<&COFFAsmParser::ParseSectionDirectiveBSS>(".bss");
+ AddDirectiveHandler<&COFFAsmParser::ParseDirectiveDef>(".def");
+ AddDirectiveHandler<&COFFAsmParser::ParseDirectiveScl>(".scl");
+ AddDirectiveHandler<&COFFAsmParser::ParseDirectiveType>(".type");
+ AddDirectiveHandler<&COFFAsmParser::ParseDirectiveEndef>(".endef");
+ }
+
+ bool ParseSectionDirectiveText(StringRef, SMLoc) {
+ return ParseSectionSwitch(".text",
+ COFF::IMAGE_SCN_CNT_CODE
+ | COFF::IMAGE_SCN_MEM_EXECUTE
+ | COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getText());
+ }
+ bool ParseSectionDirectiveData(StringRef, SMLoc) {
+ return ParseSectionSwitch(".data",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ
+ | COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
+ }
+ bool ParseSectionDirectiveBSS(StringRef, SMLoc) {
+ return ParseSectionSwitch(".bss",
+ COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ
+ | COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getBSS());
+ }
+
+ bool ParseDirectiveDef(StringRef, SMLoc);
+ bool ParseDirectiveScl(StringRef, SMLoc);
+ bool ParseDirectiveType(StringRef, SMLoc);
+ bool ParseDirectiveEndef(StringRef, SMLoc);
+
+public:
+ COFFAsmParser() {}
+};
+
+} // end annonomous namespace.
+
+bool COFFAsmParser::ParseSectionSwitch(StringRef Section,
+ unsigned Characteristics,
+ SectionKind Kind) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in section switching directive");
+ Lex();
+
+ getStreamer().SwitchSection(getContext().getCOFFSection(
+ Section, Characteristics, Kind));
+
+ return false;
+}
+
+bool COFFAsmParser::ParseDirectiveDef(StringRef, SMLoc) {
+ StringRef SymbolName;
+
+ if (getParser().ParseIdentifier(SymbolName))
+ return TokError("expected identifier in directive");
+
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(SymbolName);
+
+ getStreamer().BeginCOFFSymbolDef(Sym);
+
+ Lex();
+ return false;
+}
+
+bool COFFAsmParser::ParseDirectiveScl(StringRef, SMLoc) {
+ int64_t SymbolStorageClass;
+ if (getParser().ParseAbsoluteExpression(SymbolStorageClass))
+ return true;
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in directive");
+
+ Lex();
+ getStreamer().EmitCOFFSymbolStorageClass(SymbolStorageClass);
+ return false;
+}
+
+bool COFFAsmParser::ParseDirectiveType(StringRef, SMLoc) {
+ int64_t Type;
+ if (getParser().ParseAbsoluteExpression(Type))
+ return true;
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in directive");
+
+ Lex();
+ getStreamer().EmitCOFFSymbolType(Type);
+ return false;
+}
+
+bool COFFAsmParser::ParseDirectiveEndef(StringRef, SMLoc) {
+ Lex();
+ getStreamer().EndCOFFSymbolDef();
+ return false;
+}
+
+namespace llvm {
+
+MCAsmParserExtension *createCOFFAsmParser() {
+ return new COFFAsmParser;
+}
+
+}
diff --git a/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
index 54ddb44..44f2345 100644
--- a/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
@@ -305,7 +305,7 @@ bool DarwinAsmParser::ParseSectionSwitch(const char *Segment,
//
// FIXME: This isn't really what 'as' does; I think it just uses the implicit
// alignment on the section (e.g., if one manually inserts bytes into the
- // section, then just issueing the section switch directive will not realign
+ // section, then just issuing the section switch directive will not realign
// the section. However, this is arguably more reasonable behavior, and there
// is no good reason for someone to intentionally emit incorrectly sized
// values into the implicitly aligned sections.
diff --git a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
index f982fda..bfaf36a 100644
--- a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -8,13 +8,15 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ELF.h"
using namespace llvm;
namespace {
@@ -47,72 +49,86 @@ public:
AddDirectiveHandler<&ELFAsmParser::ParseSectionDirectiveDataRelRoLocal>(".data.rel.ro.local");
AddDirectiveHandler<&ELFAsmParser::ParseSectionDirectiveEhFrame>(".eh_frame");
AddDirectiveHandler<&ELFAsmParser::ParseDirectiveSection>(".section");
+ AddDirectiveHandler<&ELFAsmParser::ParseDirectivePushSection>(".pushsection");
+ AddDirectiveHandler<&ELFAsmParser::ParseDirectivePopSection>(".popsection");
AddDirectiveHandler<&ELFAsmParser::ParseDirectiveSize>(".size");
- AddDirectiveHandler<&ELFAsmParser::ParseDirectiveLEB128>(".sleb128");
- AddDirectiveHandler<&ELFAsmParser::ParseDirectiveLEB128>(".uleb128");
AddDirectiveHandler<&ELFAsmParser::ParseDirectivePrevious>(".previous");
+ AddDirectiveHandler<&ELFAsmParser::ParseDirectiveType>(".type");
+ AddDirectiveHandler<&ELFAsmParser::ParseDirectiveIdent>(".ident");
+ AddDirectiveHandler<&ELFAsmParser::ParseDirectiveSymver>(".symver");
+ AddDirectiveHandler<&ELFAsmParser::ParseDirectiveWeakref>(".weakref");
}
+ // FIXME: Part of this logic is duplicated in the MCELFStreamer. What is
+ // the best way for us to get access to it?
bool ParseSectionDirectiveData(StringRef, SMLoc) {
- return ParseSectionSwitch(".data", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ return ParseSectionSwitch(".data", ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE |ELF::SHF_ALLOC,
SectionKind::getDataRel());
}
bool ParseSectionDirectiveText(StringRef, SMLoc) {
- return ParseSectionSwitch(".text", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_EXECINSTR |
- MCSectionELF::SHF_ALLOC, SectionKind::getText());
+ return ParseSectionSwitch(".text", ELF::SHT_PROGBITS,
+ ELF::SHF_EXECINSTR |
+ ELF::SHF_ALLOC, SectionKind::getText());
}
bool ParseSectionDirectiveBSS(StringRef, SMLoc) {
- return ParseSectionSwitch(".bss", MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_WRITE |
- MCSectionELF::SHF_ALLOC, SectionKind::getBSS());
+ return ParseSectionSwitch(".bss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE |
+ ELF::SHF_ALLOC, SectionKind::getBSS());
}
bool ParseSectionDirectiveRoData(StringRef, SMLoc) {
- return ParseSectionSwitch(".rodata", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC,
+ return ParseSectionSwitch(".rodata", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC,
SectionKind::getReadOnly());
}
bool ParseSectionDirectiveTData(StringRef, SMLoc) {
- return ParseSectionSwitch(".tdata", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |
- MCSectionELF::SHF_TLS | MCSectionELF::SHF_WRITE,
+ return ParseSectionSwitch(".tdata", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |
+ ELF::SHF_TLS | ELF::SHF_WRITE,
SectionKind::getThreadData());
}
bool ParseSectionDirectiveTBSS(StringRef, SMLoc) {
- return ParseSectionSwitch(".tbss", MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_ALLOC |
- MCSectionELF::SHF_TLS | MCSectionELF::SHF_WRITE,
+ return ParseSectionSwitch(".tbss", ELF::SHT_NOBITS,
+ ELF::SHF_ALLOC |
+ ELF::SHF_TLS | ELF::SHF_WRITE,
SectionKind::getThreadBSS());
}
bool ParseSectionDirectiveDataRel(StringRef, SMLoc) {
- return ParseSectionSwitch(".data.rel", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |
- MCSectionELF::SHF_WRITE,
+ return ParseSectionSwitch(".data.rel", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |
+ ELF::SHF_WRITE,
SectionKind::getDataRel());
}
bool ParseSectionDirectiveDataRelRo(StringRef, SMLoc) {
- return ParseSectionSwitch(".data.rel.ro", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |
- MCSectionELF::SHF_WRITE,
+ return ParseSectionSwitch(".data.rel.ro", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |
+ ELF::SHF_WRITE,
SectionKind::getReadOnlyWithRel());
}
bool ParseSectionDirectiveDataRelRoLocal(StringRef, SMLoc) {
- return ParseSectionSwitch(".data.rel.ro.local", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |
- MCSectionELF::SHF_WRITE,
+ return ParseSectionSwitch(".data.rel.ro.local", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |
+ ELF::SHF_WRITE,
SectionKind::getReadOnlyWithRelLocal());
}
bool ParseSectionDirectiveEhFrame(StringRef, SMLoc) {
- return ParseSectionSwitch(".eh_frame", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |
- MCSectionELF::SHF_WRITE,
+ return ParseSectionSwitch(".eh_frame", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |
+ ELF::SHF_WRITE,
SectionKind::getDataRel());
}
- bool ParseDirectiveLEB128(StringRef, SMLoc);
+ bool ParseDirectivePushSection(StringRef, SMLoc);
+ bool ParseDirectivePopSection(StringRef, SMLoc);
bool ParseDirectiveSection(StringRef, SMLoc);
bool ParseDirectiveSize(StringRef, SMLoc);
bool ParseDirectivePrevious(StringRef, SMLoc);
+ bool ParseDirectiveType(StringRef, SMLoc);
+ bool ParseDirectiveIdent(StringRef, SMLoc);
+ bool ParseDirectiveSymver(StringRef, SMLoc);
+ bool ParseDirectiveWeakref(StringRef, SMLoc);
+
+private:
+ bool ParseSectionName(StringRef &SectionName);
};
}
@@ -150,135 +166,359 @@ bool ELFAsmParser::ParseDirectiveSize(StringRef, SMLoc) {
return false;
}
-// FIXME: This is a work in progress.
-bool ELFAsmParser::ParseDirectiveSection(StringRef, SMLoc) {
- StringRef SectionName;
- // FIXME: This doesn't parse section names like ".note.GNU-stack" correctly.
- if (getParser().ParseIdentifier(SectionName))
- return TokError("expected identifier in directive");
-
- std::string FlagsStr;
- StringRef TypeName;
- int64_t Size = 0;
- if (getLexer().is(AsmToken::Comma)) {
- Lex();
-
- if (getLexer().isNot(AsmToken::String))
- return TokError("expected string in directive");
+bool ELFAsmParser::ParseSectionName(StringRef &SectionName) {
+ // A section name can contain -, so we cannot just use
+ // ParseIdentifier.
+ SMLoc FirstLoc = getLexer().getLoc();
+ unsigned Size = 0;
- FlagsStr = getTok().getStringContents();
+ if (getLexer().is(AsmToken::String)) {
+ SectionName = getTok().getIdentifier();
Lex();
+ return false;
+ }
- AsmToken::TokenKind TypeStartToken;
- if (getContext().getAsmInfo().getCommentString()[0] == '@')
- TypeStartToken = AsmToken::Percent;
- else
- TypeStartToken = AsmToken::At;
+ for (;;) {
+ StringRef Tmp;
+ unsigned CurSize;
- if (getLexer().is(AsmToken::Comma)) {
+ SMLoc PrevLoc = getLexer().getLoc();
+ if (getLexer().is(AsmToken::Minus)) {
+ CurSize = 1;
+ Lex(); // Consume the "-".
+ } else if (getLexer().is(AsmToken::String)) {
+ CurSize = getTok().getIdentifier().size() + 2;
Lex();
- if (getLexer().is(TypeStartToken)) {
- Lex();
- if (getParser().ParseIdentifier(TypeName))
- return TokError("expected identifier in directive");
-
- if (getLexer().is(AsmToken::Comma)) {
- Lex();
+ } else if (getLexer().is(AsmToken::Identifier)) {
+ CurSize = getTok().getIdentifier().size();
+ Lex();
+ } else {
+ break;
+ }
- if (getParser().ParseAbsoluteExpression(Size))
- return true;
+ Size += CurSize;
+ SectionName = StringRef(FirstLoc.getPointer(), Size);
- if (Size <= 0)
- return TokError("section size must be positive");
- }
- }
- }
+ // Make sure the following token is adjacent.
+ if (PrevLoc.getPointer() + CurSize != getTok().getLoc().getPointer())
+ break;
}
+ if (Size == 0)
+ return true;
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in directive");
+ return false;
+}
- unsigned Flags = 0;
- for (unsigned i = 0; i < FlagsStr.size(); i++) {
- switch (FlagsStr[i]) {
+static SectionKind computeSectionKind(unsigned Flags) {
+ if (Flags & ELF::SHF_EXECINSTR)
+ return SectionKind::getText();
+ if (Flags & ELF::SHF_TLS)
+ return SectionKind::getThreadData();
+ return SectionKind::getDataRel();
+}
+
+static int parseSectionFlags(StringRef flagsStr) {
+ int flags = 0;
+
+ for (unsigned i = 0; i < flagsStr.size(); i++) {
+ switch (flagsStr[i]) {
case 'a':
- Flags |= MCSectionELF::SHF_ALLOC;
+ flags |= ELF::SHF_ALLOC;
break;
case 'x':
- Flags |= MCSectionELF::SHF_EXECINSTR;
+ flags |= ELF::SHF_EXECINSTR;
break;
case 'w':
- Flags |= MCSectionELF::SHF_WRITE;
+ flags |= ELF::SHF_WRITE;
break;
case 'M':
- Flags |= MCSectionELF::SHF_MERGE;
+ flags |= ELF::SHF_MERGE;
break;
case 'S':
- Flags |= MCSectionELF::SHF_STRINGS;
+ flags |= ELF::SHF_STRINGS;
break;
case 'T':
- Flags |= MCSectionELF::SHF_TLS;
+ flags |= ELF::SHF_TLS;
break;
case 'c':
- Flags |= MCSectionELF::XCORE_SHF_CP_SECTION;
+ flags |= ELF::XCORE_SHF_CP_SECTION;
break;
case 'd':
- Flags |= MCSectionELF::XCORE_SHF_DP_SECTION;
+ flags |= ELF::XCORE_SHF_DP_SECTION;
+ break;
+ case 'G':
+ flags |= ELF::SHF_GROUP;
break;
default:
+ return -1;
+ }
+ }
+
+ return flags;
+}
+
+bool ELFAsmParser::ParseDirectivePushSection(StringRef s, SMLoc loc) {
+ getStreamer().PushSection();
+
+ if (ParseDirectiveSection(s, loc)) {
+ getStreamer().PopSection();
+ return true;
+ }
+
+ return false;
+}
+
+bool ELFAsmParser::ParseDirectivePopSection(StringRef, SMLoc) {
+ if (!getStreamer().PopSection())
+ return TokError(".popsection without corresponding .pushsection");
+ return false;
+}
+
+// FIXME: This is a work in progress.
+bool ELFAsmParser::ParseDirectiveSection(StringRef, SMLoc) {
+ StringRef SectionName;
+
+ if (ParseSectionName(SectionName))
+ return TokError("expected identifier in directive");
+
+ StringRef TypeName;
+ int64_t Size = 0;
+ StringRef GroupName;
+ unsigned Flags = 0;
+
+ // Set the defaults first.
+ if (SectionName == ".fini" || SectionName == ".init" ||
+ SectionName == ".rodata")
+ Flags |= ELF::SHF_ALLOC;
+ if (SectionName == ".fini" || SectionName == ".init")
+ Flags |= ELF::SHF_EXECINSTR;
+
+ if (getLexer().is(AsmToken::Comma)) {
+ Lex();
+
+ if (getLexer().isNot(AsmToken::String))
+ return TokError("expected string in directive");
+
+ StringRef FlagsStr = getTok().getStringContents();
+ Lex();
+
+ int extraFlags = parseSectionFlags(FlagsStr);
+ if (extraFlags < 0)
return TokError("unknown flag");
+ Flags |= extraFlags;
+
+ bool Mergeable = Flags & ELF::SHF_MERGE;
+ bool Group = Flags & ELF::SHF_GROUP;
+
+ if (getLexer().isNot(AsmToken::Comma)) {
+ if (Mergeable)
+ return TokError("Mergeable section must specify the type");
+ if (Group)
+ return TokError("Group section must specify the type");
+ } else {
+ Lex();
+ if (getLexer().isNot(AsmToken::Percent) && getLexer().isNot(AsmToken::At))
+ return TokError("expected '@' or '%' before type");
+
+ Lex();
+ if (getParser().ParseIdentifier(TypeName))
+ return TokError("expected identifier in directive");
+
+ if (Mergeable) {
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("expected the entry size");
+ Lex();
+ if (getParser().ParseAbsoluteExpression(Size))
+ return true;
+ if (Size <= 0)
+ return TokError("entry size must be positive");
+ }
+
+ if (Group) {
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("expected group name");
+ Lex();
+ if (getParser().ParseIdentifier(GroupName))
+ return true;
+ if (getLexer().is(AsmToken::Comma)) {
+ Lex();
+ StringRef Linkage;
+ if (getParser().ParseIdentifier(Linkage))
+ return true;
+ if (Linkage != "comdat")
+ return TokError("Linkage must be 'comdat'");
+ }
+ }
}
}
- unsigned Type = MCSectionELF::SHT_NULL;
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in directive");
+
+ unsigned Type = ELF::SHT_PROGBITS;
+
if (!TypeName.empty()) {
if (TypeName == "init_array")
- Type = MCSectionELF::SHT_INIT_ARRAY;
+ Type = ELF::SHT_INIT_ARRAY;
else if (TypeName == "fini_array")
- Type = MCSectionELF::SHT_FINI_ARRAY;
+ Type = ELF::SHT_FINI_ARRAY;
else if (TypeName == "preinit_array")
- Type = MCSectionELF::SHT_PREINIT_ARRAY;
+ Type = ELF::SHT_PREINIT_ARRAY;
else if (TypeName == "nobits")
- Type = MCSectionELF::SHT_NOBITS;
+ Type = ELF::SHT_NOBITS;
else if (TypeName == "progbits")
- Type = MCSectionELF::SHT_PROGBITS;
+ Type = ELF::SHT_PROGBITS;
+ else if (TypeName == "note")
+ Type = ELF::SHT_NOTE;
+ else if (TypeName == "unwind")
+ Type = ELF::SHT_X86_64_UNWIND;
else
return TokError("unknown section type");
}
- SectionKind Kind = (Flags & MCSectionELF::SHF_EXECINSTR)
- ? SectionKind::getText()
- : SectionKind::getDataRel();
+ SectionKind Kind = computeSectionKind(Flags);
getStreamer().SwitchSection(getContext().getELFSection(SectionName, Type,
- Flags, Kind, false));
+ Flags, Kind, Size,
+ GroupName));
return false;
}
-bool ELFAsmParser::ParseDirectiveLEB128(StringRef DirName, SMLoc) {
- int64_t Value;
- if (getParser().ParseAbsoluteExpression(Value))
- return true;
+bool ELFAsmParser::ParseDirectivePrevious(StringRef DirName, SMLoc) {
+ const MCSection *PreviousSection = getStreamer().getPreviousSection();
+ if (PreviousSection == NULL)
+ return TokError(".previous without corresponding .section");
+ getStreamer().SwitchSection(PreviousSection);
+
+ return false;
+}
+
+/// ParseDirectiveELFType
+/// ::= .type identifier , @attribute
+bool ELFAsmParser::ParseDirectiveType(StringRef, SMLoc) {
+ StringRef Name;
+ if (getParser().ParseIdentifier(Name))
+ return TokError("expected identifier in directive");
+
+ // Handle the identifier as the key symbol.
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in '.type' directive");
+ Lex();
+
+ if (getLexer().isNot(AsmToken::Percent) && getLexer().isNot(AsmToken::At))
+ return TokError("expected '@' or '%' before type");
+ Lex();
+
+ StringRef Type;
+ SMLoc TypeLoc;
+
+ TypeLoc = getLexer().getLoc();
+ if (getParser().ParseIdentifier(Type))
+ return TokError("expected symbol type in directive");
+
+ MCSymbolAttr Attr = StringSwitch<MCSymbolAttr>(Type)
+ .Case("function", MCSA_ELF_TypeFunction)
+ .Case("object", MCSA_ELF_TypeObject)
+ .Case("tls_object", MCSA_ELF_TypeTLS)
+ .Case("common", MCSA_ELF_TypeCommon)
+ .Case("notype", MCSA_ELF_TypeNoType)
+ .Case("gnu_unique_object", MCSA_ELF_TypeGnuUniqueObject)
+ .Default(MCSA_Invalid);
+
+ if (Attr == MCSA_Invalid)
+ return Error(TypeLoc, "unsupported attribute in '.type' directive");
if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in directive");
+ return TokError("unexpected token in '.type' directive");
- // FIXME: Add proper MC support.
- if (getContext().getAsmInfo().hasLEB128()) {
- if (DirName[1] == 's')
- getStreamer().EmitRawText("\t.sleb128\t" + Twine(Value));
- else
- getStreamer().EmitRawText("\t.uleb128\t" + Twine(Value));
- return false;
- }
- // FIXME: This shouldn't be an error!
- return TokError("LEB128 not supported yet");
+ Lex();
+
+ getStreamer().EmitSymbolAttribute(Sym, Attr);
+
+ return false;
}
-bool ELFAsmParser::ParseDirectivePrevious(StringRef DirName, SMLoc) {
- const MCSection *PreviousSection = getStreamer().getPreviousSection();
- if (PreviousSection != NULL)
- getStreamer().SwitchSection(PreviousSection);
+/// ParseDirectiveIdent
+/// ::= .ident string
+bool ELFAsmParser::ParseDirectiveIdent(StringRef, SMLoc) {
+ if (getLexer().isNot(AsmToken::String))
+ return TokError("unexpected token in '.ident' directive");
+
+ StringRef Data = getTok().getIdentifier();
+
+ Lex();
+
+ const MCSection *Comment =
+ getContext().getELFSection(".comment", ELF::SHT_PROGBITS,
+ ELF::SHF_MERGE |
+ ELF::SHF_STRINGS,
+ SectionKind::getReadOnly(),
+ 1, "");
+
+ static bool First = true;
+
+ getStreamer().PushSection();
+ getStreamer().SwitchSection(Comment);
+ if (First)
+ getStreamer().EmitIntValue(0, 1);
+ First = false;
+ getStreamer().EmitBytes(Data, 0);
+ getStreamer().EmitIntValue(0, 1);
+ getStreamer().PopSection();
+ return false;
+}
+
+/// ParseDirectiveSymver
+/// ::= .symver foo, bar2@zed
+bool ELFAsmParser::ParseDirectiveSymver(StringRef, SMLoc) {
+ StringRef Name;
+ if (getParser().ParseIdentifier(Name))
+ return TokError("expected identifier in directive");
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("expected a comma");
+
+ Lex();
+
+ StringRef AliasName;
+ if (getParser().ParseIdentifier(AliasName))
+ return TokError("expected identifier in directive");
+
+ if (AliasName.find('@') == StringRef::npos)
+ return TokError("expected a '@' in the name");
+
+ MCSymbol *Alias = getContext().GetOrCreateSymbol(AliasName);
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+ const MCExpr *Value = MCSymbolRefExpr::Create(Sym, getContext());
+
+ getStreamer().EmitAssignment(Alias, Value);
+ return false;
+}
+
+/// ParseDirectiveWeakref
+/// ::= .weakref foo, bar
+bool ELFAsmParser::ParseDirectiveWeakref(StringRef, SMLoc) {
+ // FIXME: Share code with the other alias building directives.
+
+ StringRef AliasName;
+ if (getParser().ParseIdentifier(AliasName))
+ return TokError("expected identifier in directive");
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("expected a comma");
+
+ Lex();
+
+ StringRef Name;
+ if (getParser().ParseIdentifier(Name))
+ return TokError("expected identifier in directive");
+
+ MCSymbol *Alias = getContext().GetOrCreateSymbol(AliasName);
+
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+ getStreamer().EmitWeakReference(Alias, Sym);
return false;
}
diff --git a/contrib/llvm/lib/MC/MCPureStreamer.cpp b/contrib/llvm/lib/MC/MCPureStreamer.cpp
new file mode 100644
index 0000000..6098e6b
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCPureStreamer.cpp
@@ -0,0 +1,234 @@
+//===- lib/MC/MCPureStreamer.cpp - MC "Pure" Object Output ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectStreamer.h"
+// FIXME: Remove this.
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+
+class MCPureStreamer : public MCObjectStreamer {
+private:
+ virtual void EmitInstToFragment(const MCInst &Inst);
+ virtual void EmitInstToData(const MCInst &Inst);
+
+public:
+ MCPureStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter)
+ : MCObjectStreamer(Context, TAB, OS, Emitter) {}
+
+ /// @name MCStreamer Interface
+ /// @{
+
+ virtual void InitSections();
+ virtual void EmitLabel(MCSymbol *Symbol);
+ virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
+ unsigned Size = 0, unsigned ByteAlignment = 0);
+ virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
+ virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0);
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0);
+ virtual void EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value = 0);
+ virtual void Finish();
+
+
+ virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitThumbFunc(MCSymbol *Func) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitCOFFSymbolType(int Type) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EndCOFFSymbolDef() {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual void EmitFileDirective(StringRef Filename) {
+ report_fatal_error("unsupported directive in pure streamer");
+ }
+ virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
+ report_fatal_error("unsupported directive in pure streamer");
+ return false;
+ }
+
+ /// @}
+};
+
+} // end anonymous namespace.
+
+void MCPureStreamer::InitSections() {
+ // FIMXE: To what!?
+ SwitchSection(getContext().getMachOSection("__TEXT", "__text",
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ 0, SectionKind::getText()));
+
+}
+
+void MCPureStreamer::EmitLabel(MCSymbol *Symbol) {
+ assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
+ assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
+ assert(getCurrentSection() && "Cannot emit before setting section!");
+
+ Symbol->setSection(*getCurrentSection());
+
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+
+ // We have to create a new fragment if this is an atom defining symbol,
+ // fragments cannot span atoms.
+ if (getAssembler().isSymbolLinkerVisible(SD.getSymbol()))
+ new MCDataFragment(getCurrentSectionData());
+
+ // FIXME: This is wasteful, we don't necessarily need to create a data
+ // fragment. Instead, we should mark the symbol as pointing into the data
+ // fragment if it exists, otherwise we should just queue the label and set its
+ // fragment pointer when we emit the next fragment.
+ MCDataFragment *F = getOrCreateDataFragment();
+ assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
+ SD.setFragment(F);
+ SD.setOffset(F->getContents().size());
+}
+
+void MCPureStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ // FIXME: Lift context changes into super class.
+ getAssembler().getOrCreateSymbolData(*Symbol);
+ Symbol->setVariableValue(AddValueSymbols(Value));
+}
+
+void MCPureStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
+ unsigned Size, unsigned ByteAlignment) {
+ report_fatal_error("not yet implemented in pure streamer");
+}
+
+void MCPureStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
+}
+
+void MCPureStreamer::EmitValueToAlignment(unsigned ByteAlignment,
+ int64_t Value, unsigned ValueSize,
+ unsigned MaxBytesToEmit) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ if (MaxBytesToEmit == 0)
+ MaxBytesToEmit = ByteAlignment;
+ new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
+ getCurrentSectionData());
+
+ // Update the maximum alignment on the current section if necessary.
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
+}
+
+void MCPureStreamer::EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ if (MaxBytesToEmit == 0)
+ MaxBytesToEmit = ByteAlignment;
+ MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit,
+ getCurrentSectionData());
+ F->setEmitNops(true);
+
+ // Update the maximum alignment on the current section if necessary.
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
+}
+
+void MCPureStreamer::EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value) {
+ new MCOrgFragment(*Offset, Value, getCurrentSectionData());
+}
+
+void MCPureStreamer::EmitInstToFragment(const MCInst &Inst) {
+ MCInstFragment *IF = new MCInstFragment(Inst, getCurrentSectionData());
+
+ // Add the fixups and data.
+ //
+ // FIXME: Revisit this design decision when relaxation is done, we may be
+ // able to get away with not storing any extra data in the MCInst.
+ SmallVector<MCFixup, 4> Fixups;
+ SmallString<256> Code;
+ raw_svector_ostream VecOS(Code);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ VecOS.flush();
+
+ IF->getCode() = Code;
+ IF->getFixups() = Fixups;
+}
+
+void MCPureStreamer::EmitInstToData(const MCInst &Inst) {
+ MCDataFragment *DF = getOrCreateDataFragment();
+
+ SmallVector<MCFixup, 4> Fixups;
+ SmallString<256> Code;
+ raw_svector_ostream VecOS(Code);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ VecOS.flush();
+
+ // Add the fixups and data.
+ for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
+ Fixups[i].setOffset(Fixups[i].getOffset() + DF->getContents().size());
+ DF->addFixup(Fixups[i]);
+ }
+ DF->getContents().append(Code.begin(), Code.end());
+}
+
+void MCPureStreamer::Finish() {
+ // FIXME: Handle DWARF tables?
+
+ this->MCObjectStreamer::Finish();
+}
+
+MCStreamer *llvm::createPureStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *CE) {
+ return new MCPureStreamer(Context, TAB, OS, CE);
+}
diff --git a/contrib/llvm/lib/MC/MCSectionCOFF.cpp b/contrib/llvm/lib/MC/MCSectionCOFF.cpp
index eb53160..90091f0 100644
--- a/contrib/llvm/lib/MC/MCSectionCOFF.cpp
+++ b/contrib/llvm/lib/MC/MCSectionCOFF.cpp
@@ -74,3 +74,11 @@ void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI,
}
}
}
+
+bool MCSectionCOFF::UseCodeAlign() const {
+ return getKind().isText();
+}
+
+bool MCSectionCOFF::isVirtualSection() const {
+ return getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+}
diff --git a/contrib/llvm/lib/MC/MCSectionELF.cpp b/contrib/llvm/lib/MC/MCSectionELF.cpp
index a7599de..d32aea1 100644
--- a/contrib/llvm/lib/MC/MCSectionELF.cpp
+++ b/contrib/llvm/lib/MC/MCSectionELF.cpp
@@ -11,7 +11,9 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/ELF.h"
#include "llvm/Support/raw_ostream.h"
+
using namespace llvm;
MCSectionELF::~MCSectionELF() {} // anchor.
@@ -29,14 +31,6 @@ bool MCSectionELF::ShouldOmitSectionDirective(StringRef Name,
return false;
}
-// ShouldPrintSectionType - Only prints the section type if supported
-bool MCSectionELF::ShouldPrintSectionType(unsigned Ty) const {
- if (IsExplicit && !(Ty == SHT_NOBITS || Ty == SHT_PROGBITS))
- return false;
-
- return true;
-}
-
void MCSectionELF::PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const {
@@ -49,87 +43,88 @@ void MCSectionELF::PrintSwitchToSection(const MCAsmInfo &MAI,
// Handle the weird solaris syntax if desired.
if (MAI.usesSunStyleELFSectionSwitchSyntax() &&
- !(Flags & MCSectionELF::SHF_MERGE)) {
- if (Flags & MCSectionELF::SHF_ALLOC)
+ !(Flags & ELF::SHF_MERGE)) {
+ if (Flags & ELF::SHF_ALLOC)
OS << ",#alloc";
- if (Flags & MCSectionELF::SHF_EXECINSTR)
+ if (Flags & ELF::SHF_EXECINSTR)
OS << ",#execinstr";
- if (Flags & MCSectionELF::SHF_WRITE)
+ if (Flags & ELF::SHF_WRITE)
OS << ",#write";
- if (Flags & MCSectionELF::SHF_TLS)
+ if (Flags & ELF::SHF_TLS)
OS << ",#tls";
OS << '\n';
return;
}
OS << ",\"";
- if (Flags & MCSectionELF::SHF_ALLOC)
+ if (Flags & ELF::SHF_ALLOC)
OS << 'a';
- if (Flags & MCSectionELF::SHF_EXECINSTR)
+ if (Flags & ELF::SHF_EXECINSTR)
OS << 'x';
- if (Flags & MCSectionELF::SHF_WRITE)
+ if (Flags & ELF::SHF_GROUP)
+ OS << 'G';
+ if (Flags & ELF::SHF_WRITE)
OS << 'w';
- if (Flags & MCSectionELF::SHF_MERGE)
+ if (Flags & ELF::SHF_MERGE)
OS << 'M';
- if (Flags & MCSectionELF::SHF_STRINGS)
+ if (Flags & ELF::SHF_STRINGS)
OS << 'S';
- if (Flags & MCSectionELF::SHF_TLS)
+ if (Flags & ELF::SHF_TLS)
OS << 'T';
// If there are target-specific flags, print them.
- if (Flags & MCSectionELF::XCORE_SHF_CP_SECTION)
+ if (Flags & ELF::XCORE_SHF_CP_SECTION)
OS << 'c';
- if (Flags & MCSectionELF::XCORE_SHF_DP_SECTION)
+ if (Flags & ELF::XCORE_SHF_DP_SECTION)
OS << 'd';
OS << '"';
- if (ShouldPrintSectionType(Type)) {
- OS << ',';
-
- // If comment string is '@', e.g. as on ARM - use '%' instead
- if (MAI.getCommentString()[0] == '@')
- OS << '%';
- else
- OS << '@';
-
- if (Type == MCSectionELF::SHT_INIT_ARRAY)
- OS << "init_array";
- else if (Type == MCSectionELF::SHT_FINI_ARRAY)
- OS << "fini_array";
- else if (Type == MCSectionELF::SHT_PREINIT_ARRAY)
- OS << "preinit_array";
- else if (Type == MCSectionELF::SHT_NOBITS)
- OS << "nobits";
- else if (Type == MCSectionELF::SHT_PROGBITS)
- OS << "progbits";
-
- if (getKind().isMergeable1ByteCString()) {
- OS << ",1";
- } else if (getKind().isMergeable2ByteCString()) {
- OS << ",2";
- } else if (getKind().isMergeable4ByteCString() ||
- getKind().isMergeableConst4()) {
- OS << ",4";
- } else if (getKind().isMergeableConst8()) {
- OS << ",8";
- } else if (getKind().isMergeableConst16()) {
- OS << ",16";
- }
+ OS << ',';
+
+ // If comment string is '@', e.g. as on ARM - use '%' instead
+ if (MAI.getCommentString()[0] == '@')
+ OS << '%';
+ else
+ OS << '@';
+
+ if (Type == ELF::SHT_INIT_ARRAY)
+ OS << "init_array";
+ else if (Type == ELF::SHT_FINI_ARRAY)
+ OS << "fini_array";
+ else if (Type == ELF::SHT_PREINIT_ARRAY)
+ OS << "preinit_array";
+ else if (Type == ELF::SHT_NOBITS)
+ OS << "nobits";
+ else if (Type == ELF::SHT_NOTE)
+ OS << "note";
+ else if (Type == ELF::SHT_PROGBITS)
+ OS << "progbits";
+
+ if (EntrySize) {
+ assert(Flags & ELF::SHF_MERGE);
+ OS << "," << EntrySize;
}
-
+
+ if (Flags & ELF::SHF_GROUP)
+ OS << "," << Group->getName() << ",comdat";
OS << '\n';
}
-// HasCommonSymbols - True if this section holds common symbols, this is
-// indicated on the ELF object file by a symbol with SHN_COMMON section
-// header index.
-bool MCSectionELF::HasCommonSymbols() const {
-
- if (StringRef(SectionName).startswith(".gnu.linkonce."))
- return true;
-
- return false;
+bool MCSectionELF::UseCodeAlign() const {
+ return getFlags() & ELF::SHF_EXECINSTR;
}
+bool MCSectionELF::isVirtualSection() const {
+ return getType() == ELF::SHT_NOBITS;
+}
+unsigned MCSectionELF::DetermineEntrySize(SectionKind Kind) {
+ if (Kind.isMergeable1ByteCString()) return 1;
+ if (Kind.isMergeable2ByteCString()) return 2;
+ if (Kind.isMergeable4ByteCString()) return 4;
+ if (Kind.isMergeableConst4()) return 4;
+ if (Kind.isMergeableConst8()) return 8;
+ if (Kind.isMergeableConst16()) return 16;
+ return 0;
+}
diff --git a/contrib/llvm/lib/MC/MCSectionMachO.cpp b/contrib/llvm/lib/MC/MCSectionMachO.cpp
index ded3b20..b897c0b 100644
--- a/contrib/llvm/lib/MC/MCSectionMachO.cpp
+++ b/contrib/llvm/lib/MC/MCSectionMachO.cpp
@@ -10,6 +10,7 @@
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Support/raw_ostream.h"
+#include <cctype>
using namespace llvm;
/// SectionTypeDescriptors - These are strings that describe the various section
@@ -81,18 +82,18 @@ MCSectionMachO::MCSectionMachO(StringRef Segment, StringRef Section,
SegmentName[i] = Segment[i];
else
SegmentName[i] = 0;
-
+
if (i < Section.size())
SectionName[i] = Section[i];
else
SectionName[i] = 0;
- }
+ }
}
void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const {
OS << "\t.section\t" << getSegmentName() << ',' << getSectionName();
-
+
// Get the section type and attributes.
unsigned TAA = getTypeAndAttributes();
if (TAA == 0) {
@@ -101,7 +102,7 @@ void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI,
}
OS << ',';
-
+
unsigned SectionType = TAA & MCSectionMachO::SECTION_TYPE;
assert(SectionType <= MCSectionMachO::LAST_KNOWN_SECTION_TYPE &&
"Invalid SectionType specified!");
@@ -110,7 +111,7 @@ void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI,
OS << SectionTypeDescriptors[SectionType].AssemblerName;
else
OS << "<<" << SectionTypeDescriptors[SectionType].EnumName << ">>";
-
+
// If we don't have any attributes, we're done.
unsigned SectionAttrs = TAA & MCSectionMachO::SECTION_ATTRIBUTES;
if (SectionAttrs == 0) {
@@ -128,10 +129,10 @@ void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI,
// Check to see if we have this attribute.
if ((SectionAttrDescriptors[i].AttrFlag & SectionAttrs) == 0)
continue;
-
+
// Yep, clear it and print it.
SectionAttrs &= ~SectionAttrDescriptors[i].AttrFlag;
-
+
OS << Separator;
if (SectionAttrDescriptors[i].AssemblerName)
OS << SectionAttrDescriptors[i].AssemblerName;
@@ -139,15 +140,25 @@ void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI,
OS << "<<" << SectionAttrDescriptors[i].EnumName << ">>";
Separator = '+';
}
-
+
assert(SectionAttrs == 0 && "Unknown section attributes!");
-
+
// If we have a S_SYMBOL_STUBS size specified, print it.
if (Reserved2 != 0)
OS << ',' << Reserved2;
OS << '\n';
}
+bool MCSectionMachO::UseCodeAlign() const {
+ return hasAttribute(MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS);
+}
+
+bool MCSectionMachO::isVirtualSection() const {
+ return (getType() == MCSectionMachO::S_ZEROFILL ||
+ getType() == MCSectionMachO::S_GB_ZEROFILL ||
+ getType() == MCSectionMachO::S_THREAD_LOCAL_ZEROFILL);
+}
+
/// StripSpaces - This removes leading and trailing spaces from the StringRef.
static void StripSpaces(StringRef &Str) {
while (!Str.empty() && isspace(Str[0]))
@@ -168,12 +179,12 @@ std::string MCSectionMachO::ParseSectionSpecifier(StringRef Spec, // In.
unsigned &StubSize) { // Out.
// Find the first comma.
std::pair<StringRef, StringRef> Comma = Spec.split(',');
-
+
// If there is no comma, we fail.
if (Comma.second.empty())
return "mach-o section specifier requires a segment and section "
"separated by a comma";
-
+
// Capture segment, remove leading and trailing whitespace.
Segment = Comma.first;
StripSpaces(Segment);
@@ -182,14 +193,14 @@ std::string MCSectionMachO::ParseSectionSpecifier(StringRef Spec, // In.
if (Segment.empty() || Segment.size() > 16)
return "mach-o section specifier requires a segment whose length is "
"between 1 and 16 characters";
-
+
// Split the section name off from any attributes if present.
Comma = Comma.second.split(',');
// Capture section, remove leading and trailing whitespace.
Section = Comma.first;
StripSpaces(Section);
-
+
// Verify that the section is present and not too long.
if (Section.empty() || Section.size() > 16)
return "mach-o section specifier requires a section whose length is "
@@ -200,25 +211,25 @@ std::string MCSectionMachO::ParseSectionSpecifier(StringRef Spec, // In.
StubSize = 0;
if (Comma.second.empty())
return "";
-
+
// Otherwise, we need to parse the section type and attributes.
Comma = Comma.second.split(',');
-
+
// Get the section type.
StringRef SectionType = Comma.first;
StripSpaces(SectionType);
-
+
// Figure out which section type it is.
unsigned TypeID;
for (TypeID = 0; TypeID !=MCSectionMachO::LAST_KNOWN_SECTION_TYPE+1; ++TypeID)
if (SectionTypeDescriptors[TypeID].AssemblerName &&
SectionType == SectionTypeDescriptors[TypeID].AssemblerName)
break;
-
+
// If we didn't find the section type, reject it.
if (TypeID > MCSectionMachO::LAST_KNOWN_SECTION_TYPE)
return "mach-o section specifier uses an unknown section type";
-
+
// Remember the TypeID.
TAA = TypeID;
@@ -235,10 +246,10 @@ std::string MCSectionMachO::ParseSectionSpecifier(StringRef Spec, // In.
// present.
Comma = Comma.second.split(',');
StringRef Attrs = Comma.first;
-
+
// The attribute list is a '+' separated list of attributes.
std::pair<StringRef, StringRef> Plus = Attrs.split('+');
-
+
while (1) {
StringRef Attr = Plus.first;
StripSpaces(Attr);
@@ -247,14 +258,14 @@ std::string MCSectionMachO::ParseSectionSpecifier(StringRef Spec, // In.
for (unsigned i = 0; ; ++i) {
if (SectionAttrDescriptors[i].AttrFlag == AttrFlagEnd)
return "mach-o section specifier has invalid attribute";
-
+
if (SectionAttrDescriptors[i].AssemblerName &&
Attr == SectionAttrDescriptors[i].AssemblerName) {
TAA |= SectionAttrDescriptors[i].AttrFlag;
break;
}
}
-
+
if (Plus.second.empty()) break;
Plus = Plus.second.split('+');
};
@@ -272,15 +283,14 @@ std::string MCSectionMachO::ParseSectionSpecifier(StringRef Spec, // In.
if ((TAA & MCSectionMachO::SECTION_TYPE) != MCSectionMachO::S_SYMBOL_STUBS)
return "mach-o section specifier cannot have a stub size specified because "
"it does not have type 'symbol_stubs'";
-
+
// Okay, if we do, it must be a number.
StringRef StubSizeStr = Comma.second;
StripSpaces(StubSizeStr);
-
+
// Convert the stub size from a string to an integer.
if (StubSizeStr.getAsInteger(0, StubSize))
return "mach-o section specifier has a malformed stub size";
-
+
return "";
}
-
diff --git a/contrib/llvm/lib/MC/MCStreamer.cpp b/contrib/llvm/lib/MC/MCStreamer.cpp
index 3e9d02e..3dcdba1 100644
--- a/contrib/llvm/lib/MC/MCStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCStreamer.cpp
@@ -7,16 +7,21 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include <cstdlib>
using namespace llvm;
-MCStreamer::MCStreamer(MCContext &Ctx) : Context(Ctx), CurSection(0),
- PrevSection(0) {
+MCStreamer::MCStreamer(MCContext &Ctx) : Context(Ctx) {
+ PrevSectionStack.push_back(NULL);
+ CurSectionStack.push_back(NULL);
}
MCStreamer::~MCStreamer() {
@@ -27,17 +32,90 @@ raw_ostream &MCStreamer::GetCommentOS() {
return nulls();
}
+void MCStreamer::EmitDwarfSetLineAddr(int64_t LineDelta,
+ const MCSymbol *Label, int PointerSize) {
+ // emit the sequence to set the address
+ EmitIntValue(dwarf::DW_LNS_extended_op, 1);
+ EmitULEB128IntValue(PointerSize + 1);
+ EmitIntValue(dwarf::DW_LNE_set_address, 1);
+ EmitSymbolValue(Label, PointerSize);
+
+ // emit the sequence for the LineDelta (from 1) and a zero address delta.
+ MCDwarfLineAddr::Emit(this, LineDelta, 0);
+}
/// EmitIntValue - Special case of EmitValue that avoids the client having to
/// pass in a MCExpr for constant integers.
void MCStreamer::EmitIntValue(uint64_t Value, unsigned Size,
unsigned AddrSpace) {
- EmitValue(MCConstantExpr::Create(Value, getContext()), Size, AddrSpace);
+ assert(Size <= 8 && "Invalid size");
+ assert((isUIntN(8 * Size, Value) || isIntN(8 * Size, Value)) &&
+ "Invalid size");
+ char buf[8];
+ // FIXME: Endianness assumption.
+ for (unsigned i = 0; i != Size; ++i)
+ buf[i] = uint8_t(Value >> (i * 8));
+ EmitBytes(StringRef(buf, Size), AddrSpace);
+}
+
+/// EmitULEB128Value - Special case of EmitULEB128Value that avoids the
+/// client having to pass in a MCExpr for constant integers.
+void MCStreamer::EmitULEB128IntValue(uint64_t Value, unsigned AddrSpace) {
+ SmallString<32> Tmp;
+ raw_svector_ostream OSE(Tmp);
+ MCObjectWriter::EncodeULEB128(Value, OSE);
+ EmitBytes(OSE.str(), AddrSpace);
+}
+
+/// EmitSLEB128Value - Special case of EmitSLEB128Value that avoids the
+/// client having to pass in a MCExpr for constant integers.
+void MCStreamer::EmitSLEB128IntValue(int64_t Value, unsigned AddrSpace) {
+ SmallString<32> Tmp;
+ raw_svector_ostream OSE(Tmp);
+ MCObjectWriter::EncodeSLEB128(Value, OSE);
+ EmitBytes(OSE.str(), AddrSpace);
+}
+
+void MCStreamer::EmitAbsValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace) {
+ if (getContext().getAsmInfo().hasAggressiveSymbolFolding()) {
+ EmitValue(Value, Size, AddrSpace);
+ return;
+ }
+ MCSymbol *ABS = getContext().CreateTempSymbol();
+ EmitAssignment(ABS, Value);
+ EmitSymbolValue(ABS, Size, AddrSpace);
+}
+
+
+void MCStreamer::EmitValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace) {
+ EmitValueImpl(Value, Size, false, AddrSpace);
+}
+
+void MCStreamer::EmitPCRelValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace) {
+ EmitValueImpl(Value, Size, true, AddrSpace);
+}
+
+void MCStreamer::EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
+ bool isPCRel, unsigned AddrSpace) {
+ EmitValueImpl(MCSymbolRefExpr::Create(Sym, getContext()), Size, isPCRel,
+ AddrSpace);
}
void MCStreamer::EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
unsigned AddrSpace) {
- EmitValue(MCSymbolRefExpr::Create(Sym, getContext()), Size, AddrSpace);
+ EmitSymbolValue(Sym, Size, false, AddrSpace);
+}
+
+void MCStreamer::EmitPCRelSymbolValue(const MCSymbol *Sym, unsigned Size,
+ unsigned AddrSpace) {
+ EmitSymbolValue(Sym, Size, true, AddrSpace);
+}
+
+void MCStreamer::EmitGPRel32Value(const MCExpr *Value) {
+ report_fatal_error("unsupported directive in streamer");
}
/// EmitFill - Emit NumBytes bytes worth of the value specified by
@@ -49,6 +127,138 @@ void MCStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue,
EmitValue(E, 1, AddrSpace);
}
+bool MCStreamer::EmitDwarfFileDirective(unsigned FileNo,
+ StringRef Filename) {
+ return getContext().GetDwarfFile(Filename, FileNo) == 0;
+}
+
+void MCStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+ unsigned Column, unsigned Flags,
+ unsigned Isa,
+ unsigned Discriminator) {
+ getContext().setCurrentDwarfLoc(FileNo, Line, Column, Flags, Isa,
+ Discriminator);
+}
+
+MCDwarfFrameInfo *MCStreamer::getCurrentFrameInfo() {
+ if (FrameInfos.empty())
+ return NULL;
+ return &FrameInfos.back();
+}
+
+void MCStreamer::EnsureValidFrame() {
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ if (!CurFrame || CurFrame->End)
+ report_fatal_error("No open frame");
+}
+
+bool MCStreamer::EmitCFIStartProc() {
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ if (CurFrame && !CurFrame->End) {
+ report_fatal_error("Starting a frame before finishing the previous one!");
+ return true;
+ }
+ MCDwarfFrameInfo Frame;
+ Frame.Begin = getContext().CreateTempSymbol();
+ EmitLabel(Frame.Begin);
+ FrameInfos.push_back(Frame);
+ return false;
+}
+
+bool MCStreamer::EmitCFIEndProc() {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ CurFrame->End = getContext().CreateTempSymbol();
+ EmitLabel(CurFrame->End);
+ return false;
+}
+
+bool MCStreamer::EmitCFIDefCfa(int64_t Register, int64_t Offset) {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCSymbol *Label = getContext().CreateTempSymbol();
+ EmitLabel(Label);
+ MachineLocation Dest(MachineLocation::VirtualFP);
+ MachineLocation Source(Register, -Offset);
+ MCCFIInstruction Instruction(Label, Dest, Source);
+ CurFrame->Instructions.push_back(Instruction);
+ return false;
+}
+
+bool MCStreamer::EmitCFIDefCfaOffset(int64_t Offset) {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCSymbol *Label = getContext().CreateTempSymbol();
+ EmitLabel(Label);
+ MachineLocation Dest(MachineLocation::VirtualFP);
+ MachineLocation Source(MachineLocation::VirtualFP, -Offset);
+ MCCFIInstruction Instruction(Label, Dest, Source);
+ CurFrame->Instructions.push_back(Instruction);
+ return false;
+}
+
+bool MCStreamer::EmitCFIDefCfaRegister(int64_t Register) {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCSymbol *Label = getContext().CreateTempSymbol();
+ EmitLabel(Label);
+ MachineLocation Dest(Register);
+ MachineLocation Source(MachineLocation::VirtualFP);
+ MCCFIInstruction Instruction(Label, Dest, Source);
+ CurFrame->Instructions.push_back(Instruction);
+ return false;
+}
+
+bool MCStreamer::EmitCFIOffset(int64_t Register, int64_t Offset) {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCSymbol *Label = getContext().CreateTempSymbol();
+ EmitLabel(Label);
+ MachineLocation Dest(Register, Offset);
+ MachineLocation Source(Register, Offset);
+ MCCFIInstruction Instruction(Label, Dest, Source);
+ CurFrame->Instructions.push_back(Instruction);
+ return false;
+}
+
+bool MCStreamer::EmitCFIPersonality(const MCSymbol *Sym,
+ unsigned Encoding) {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ CurFrame->Personality = Sym;
+ CurFrame->PersonalityEncoding = Encoding;
+ return false;
+}
+
+bool MCStreamer::EmitCFILsda(const MCSymbol *Sym, unsigned Encoding) {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ CurFrame->Lsda = Sym;
+ CurFrame->LsdaEncoding = Encoding;
+ return false;
+}
+
+bool MCStreamer::EmitCFIRememberState() {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCSymbol *Label = getContext().CreateTempSymbol();
+ EmitLabel(Label);
+ MCCFIInstruction Instruction(MCCFIInstruction::Remember, Label);
+ CurFrame->Instructions.push_back(Instruction);
+ return false;
+}
+
+bool MCStreamer::EmitCFIRestoreState() {
+ // FIXME: Error if there is no matching cfi_remember_state.
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCSymbol *Label = getContext().CreateTempSymbol();
+ EmitLabel(Label);
+ MCCFIInstruction Instruction(MCCFIInstruction::Restore, Label);
+ CurFrame->Instructions.push_back(Instruction);
+ return false;
+}
+
/// EmitRawText - If this file is backed by an assembly streamer, this dumps
/// the specified string in the output .s file. This capability is
/// indicated by the hasRawTextSupport() predicate.
diff --git a/contrib/llvm/lib/MC/MCSymbol.cpp b/contrib/llvm/lib/MC/MCSymbol.cpp
index 07751f7..1c71f26 100644
--- a/contrib/llvm/lib/MC/MCSymbol.cpp
+++ b/contrib/llvm/lib/MC/MCSymbol.cpp
@@ -39,7 +39,20 @@ static bool NameNeedsQuoting(StringRef Str) {
return false;
}
+const MCSymbol &MCSymbol::AliasedSymbol() const {
+ const MCSymbol *S = this;
+ while (S->isVariable()) {
+ const MCExpr *Value = S->getVariableValue();
+ if (Value->getKind() != MCExpr::SymbolRef)
+ return *S;
+ const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Value);
+ S = &Ref->getSymbol();
+ }
+ return *S;
+}
+
void MCSymbol::setVariableValue(const MCExpr *Value) {
+ assert(!IsUsed && "Cannot set a variable that has already been used.");
assert(Value && "Invalid variable value!");
assert((isUndefined() || (isAbsolute() && isa<MCConstantExpr>(Value))) &&
"Invalid redefinition!");
diff --git a/contrib/llvm/lib/MC/MachObjectWriter.cpp b/contrib/llvm/lib/MC/MachObjectWriter.cpp
index cffabfa..8af07c7 100644
--- a/contrib/llvm/lib/MC/MachObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/MachObjectWriter.cpp
@@ -7,7 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/MachObjectWriter.h"
+#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAssembler.h"
@@ -18,49 +19,37 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCMachOSymbolFlags.h"
#include "llvm/MC/MCValue.h"
+#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MachO.h"
#include "llvm/Target/TargetAsmBackend.h"
// FIXME: Gross.
+#include "../Target/ARM/ARMFixupKinds.h"
#include "../Target/X86/X86FixupKinds.h"
#include <vector>
using namespace llvm;
+using namespace llvm::object;
+// FIXME: this has been copied from (or to) X86AsmBackend.cpp
static unsigned getFixupKindLog2Size(unsigned Kind) {
switch (Kind) {
- default: llvm_unreachable("invalid fixup kind!");
- case X86::reloc_pcrel_1byte:
+ default:
+ llvm_unreachable("invalid fixup kind!");
+ case FK_PCRel_1:
case FK_Data_1: return 0;
- case X86::reloc_pcrel_2byte:
+ case FK_PCRel_2:
case FK_Data_2: return 1;
- case X86::reloc_pcrel_4byte:
+ case FK_PCRel_4:
+ // FIXME: Remove these!!!
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_signed_4byte:
case FK_Data_4: return 2;
case FK_Data_8: return 3;
}
}
-static bool isFixupKindPCRel(unsigned Kind) {
- switch (Kind) {
- default:
- return false;
- case X86::reloc_pcrel_1byte:
- case X86::reloc_pcrel_2byte:
- case X86::reloc_pcrel_4byte:
- case X86::reloc_riprel_4byte:
- case X86::reloc_riprel_4byte_movq_load:
- return true;
- }
-}
-
-static bool isFixupKindRIPRel(unsigned Kind) {
- return Kind == X86::reloc_riprel_4byte ||
- Kind == X86::reloc_riprel_4byte_movq_load;
-}
-
static bool doesSymbolRequireExternRelocation(MCSymbolData *SD) {
// Undefined symbols are always extern.
if (SD->Symbol->isUndefined())
@@ -77,94 +66,7 @@ static bool doesSymbolRequireExternRelocation(MCSymbolData *SD) {
namespace {
-class MachObjectWriterImpl {
- // See <mach-o/loader.h>.
- enum {
- Header_Magic32 = 0xFEEDFACE,
- Header_Magic64 = 0xFEEDFACF
- };
-
- enum {
- Header32Size = 28,
- Header64Size = 32,
- SegmentLoadCommand32Size = 56,
- SegmentLoadCommand64Size = 72,
- Section32Size = 68,
- Section64Size = 80,
- SymtabLoadCommandSize = 24,
- DysymtabLoadCommandSize = 80,
- Nlist32Size = 12,
- Nlist64Size = 16,
- RelocationInfoSize = 8
- };
-
- enum HeaderFileType {
- HFT_Object = 0x1
- };
-
- enum HeaderFlags {
- HF_SubsectionsViaSymbols = 0x2000
- };
-
- enum LoadCommandType {
- LCT_Segment = 0x1,
- LCT_Symtab = 0x2,
- LCT_Dysymtab = 0xb,
- LCT_Segment64 = 0x19
- };
-
- // See <mach-o/nlist.h>.
- enum SymbolTypeType {
- STT_Undefined = 0x00,
- STT_Absolute = 0x02,
- STT_Section = 0x0e
- };
-
- enum SymbolTypeFlags {
- // If any of these bits are set, then the entry is a stab entry number (see
- // <mach-o/stab.h>. Otherwise the other masks apply.
- STF_StabsEntryMask = 0xe0,
-
- STF_TypeMask = 0x0e,
- STF_External = 0x01,
- STF_PrivateExtern = 0x10
- };
-
- /// IndirectSymbolFlags - Flags for encoding special values in the indirect
- /// symbol entry.
- enum IndirectSymbolFlags {
- ISF_Local = 0x80000000,
- ISF_Absolute = 0x40000000
- };
-
- /// RelocationFlags - Special flags for addresses.
- enum RelocationFlags {
- RF_Scattered = 0x80000000
- };
-
- enum RelocationInfoType {
- RIT_Vanilla = 0,
- RIT_Pair = 1,
- RIT_Difference = 2,
- RIT_PreboundLazyPointer = 3,
- RIT_LocalDifference = 4,
- RIT_TLV = 5
- };
-
- /// X86_64 uses its own relocation types.
- enum RelocationInfoTypeX86_64 {
- RIT_X86_64_Unsigned = 0,
- RIT_X86_64_Signed = 1,
- RIT_X86_64_Branch = 2,
- RIT_X86_64_GOTLoad = 3,
- RIT_X86_64_GOT = 4,
- RIT_X86_64_Subtractor = 5,
- RIT_X86_64_Signed1 = 6,
- RIT_X86_64_Signed2 = 7,
- RIT_X86_64_Signed4 = 8,
- RIT_X86_64_TLV = 9
- };
-
+class MachObjectWriter : public MCObjectWriter {
/// MachSymbolData - Helper struct for containing some precomputed information
/// on symbols.
struct MachSymbolData {
@@ -179,16 +81,14 @@ class MachObjectWriterImpl {
}
};
+ /// The target specific Mach-O writer instance.
+ llvm::OwningPtr<MCMachObjectTargetWriter> TargetObjectWriter;
+
/// @name Relocation Data
/// @{
- struct MachRelocationEntry {
- uint32_t Word0;
- uint32_t Word1;
- };
-
llvm::DenseMap<const MCSectionData*,
- std::vector<MachRelocationEntry> > Relocations;
+ std::vector<macho::RelocationEntry> > Relocations;
llvm::DenseMap<const MCSectionData*, unsigned> IndirectSymBase;
/// @}
@@ -202,32 +102,70 @@ class MachObjectWriterImpl {
/// @}
- MachObjectWriter *Writer;
+private:
+ /// @name Utility Methods
+ /// @{
- raw_ostream &OS;
+ bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind) {
+ const MCFixupKindInfo &FKI = Asm.getBackend().getFixupKindInfo(
+ (MCFixupKind) Kind);
- unsigned Is64Bit : 1;
+ return FKI.Flags & MCFixupKindInfo::FKF_IsPCRel;
+ }
+
+ /// @}
+
+ SectionAddrMap SectionAddress;
+ uint64_t getSectionAddress(const MCSectionData* SD) const {
+ return SectionAddress.lookup(SD);
+ }
+ uint64_t getSymbolAddress(const MCSymbolData* SD,
+ const MCAsmLayout &Layout) const {
+ return getSectionAddress(SD->getFragment()->getParent()) +
+ Layout.getSymbolOffset(SD);
+ }
+ uint64_t getFragmentAddress(const MCFragment *Fragment,
+ const MCAsmLayout &Layout) const {
+ return getSectionAddress(Fragment->getParent()) +
+ Layout.getFragmentOffset(Fragment);
+ }
+
+ uint64_t getPaddingSize(const MCSectionData *SD,
+ const MCAsmLayout &Layout) const {
+ uint64_t EndAddr = getSectionAddress(SD) + Layout.getSectionAddressSize(SD);
+ unsigned Next = SD->getLayoutOrder() + 1;
+ if (Next >= Layout.getSectionOrder().size())
+ return 0;
+
+ const MCSectionData &NextSD = *Layout.getSectionOrder()[Next];
+ if (NextSD.getSection().isVirtualSection())
+ return 0;
+ return OffsetToAlignment(EndAddr, NextSD.getAlignment());
+ }
public:
- MachObjectWriterImpl(MachObjectWriter *_Writer, bool _Is64Bit)
- : Writer(_Writer), OS(Writer->getStream()), Is64Bit(_Is64Bit) {
+ MachObjectWriter(MCMachObjectTargetWriter *MOTW, raw_ostream &_OS,
+ bool _IsLittleEndian)
+ : MCObjectWriter(_OS, _IsLittleEndian), TargetObjectWriter(MOTW) {
}
- void Write8(uint8_t Value) { Writer->Write8(Value); }
- void Write16(uint16_t Value) { Writer->Write16(Value); }
- void Write32(uint32_t Value) { Writer->Write32(Value); }
- void Write64(uint64_t Value) { Writer->Write64(Value); }
- void WriteZeros(unsigned N) { Writer->WriteZeros(N); }
- void WriteBytes(StringRef Str, unsigned ZeroFillSize = 0) {
- Writer->WriteBytes(Str, ZeroFillSize);
+ /// @name Target Writer Proxy Accessors
+ /// @{
+
+ bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
+ bool isARM() const {
+ uint32_t CPUType = TargetObjectWriter->getCPUType() & ~mach::CTFM_ArchMask;
+ return CPUType == mach::CTM_ARM;
}
+ /// @}
+
void WriteHeader(unsigned NumLoadCommands, unsigned LoadCommandsSize,
bool SubsectionsViaSymbols) {
uint32_t Flags = 0;
if (SubsectionsViaSymbols)
- Flags |= HF_SubsectionsViaSymbols;
+ Flags |= macho::HF_SubsectionsViaSymbols;
// struct mach_header (28 bytes) or
// struct mach_header_64 (32 bytes)
@@ -235,21 +173,20 @@ public:
uint64_t Start = OS.tell();
(void) Start;
- Write32(Is64Bit ? Header_Magic64 : Header_Magic32);
+ Write32(is64Bit() ? macho::HM_Object64 : macho::HM_Object32);
+
+ Write32(TargetObjectWriter->getCPUType());
+ Write32(TargetObjectWriter->getCPUSubtype());
- // FIXME: Support cputype.
- Write32(Is64Bit ? MachO::CPUTypeX86_64 : MachO::CPUTypeI386);
- // FIXME: Support cpusubtype.
- Write32(MachO::CPUSubType_I386_ALL);
- Write32(HFT_Object);
- Write32(NumLoadCommands); // Object files have a single load command, the
- // segment.
+ Write32(macho::HFT_Object);
+ Write32(NumLoadCommands);
Write32(LoadCommandsSize);
Write32(Flags);
- if (Is64Bit)
+ if (is64Bit())
Write32(0); // reserved
- assert(OS.tell() - Start == Is64Bit ? Header64Size : Header32Size);
+ assert(OS.tell() - Start ==
+ (is64Bit() ? macho::Header64Size : macho::Header32Size));
}
/// WriteSegmentLoadCommand - Write a segment load command.
@@ -266,14 +203,16 @@ public:
uint64_t Start = OS.tell();
(void) Start;
- unsigned SegmentLoadCommandSize = Is64Bit ? SegmentLoadCommand64Size :
- SegmentLoadCommand32Size;
- Write32(Is64Bit ? LCT_Segment64 : LCT_Segment);
+ unsigned SegmentLoadCommandSize =
+ is64Bit() ? macho::SegmentLoadCommand64Size:
+ macho::SegmentLoadCommand32Size;
+ Write32(is64Bit() ? macho::LCT_Segment64 : macho::LCT_Segment);
Write32(SegmentLoadCommandSize +
- NumSections * (Is64Bit ? Section64Size : Section32Size));
+ NumSections * (is64Bit() ? macho::Section64Size :
+ macho::Section32Size));
WriteBytes("", 16);
- if (Is64Bit) {
+ if (is64Bit()) {
Write64(0); // vmaddr
Write64(VMSize); // vmsize
Write64(SectionDataStartOffset); // file offset
@@ -295,10 +234,10 @@ public:
void WriteSection(const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCSectionData &SD, uint64_t FileOffset,
uint64_t RelocationsStart, unsigned NumRelocations) {
- uint64_t SectionSize = Layout.getSectionSize(&SD);
+ uint64_t SectionSize = Layout.getSectionAddressSize(&SD);
// The offset is unused for virtual sections.
- if (Asm.getBackend().isVirtualSection(SD.getSection())) {
+ if (SD.getSection().isVirtualSection()) {
assert(Layout.getSectionFileSize(&SD) == 0 && "Invalid file size!");
FileOffset = 0;
}
@@ -312,11 +251,11 @@ public:
const MCSectionMachO &Section = cast<MCSectionMachO>(SD.getSection());
WriteBytes(Section.getSectionName(), 16);
WriteBytes(Section.getSegmentName(), 16);
- if (Is64Bit) {
- Write64(Layout.getSectionAddress(&SD)); // address
+ if (is64Bit()) {
+ Write64(getSectionAddress(&SD)); // address
Write64(SectionSize); // size
} else {
- Write32(Layout.getSectionAddress(&SD)); // address
+ Write32(getSectionAddress(&SD)); // address
Write32(SectionSize); // size
}
Write32(FileOffset);
@@ -332,10 +271,11 @@ public:
Write32(Flags);
Write32(IndirectSymBase.lookup(&SD)); // reserved1
Write32(Section.getStubSize()); // reserved2
- if (Is64Bit)
+ if (is64Bit())
Write32(0); // reserved3
- assert(OS.tell() - Start == Is64Bit ? Section64Size : Section32Size);
+ assert(OS.tell() - Start == is64Bit() ? macho::Section64Size :
+ macho::Section32Size);
}
void WriteSymtabLoadCommand(uint32_t SymbolOffset, uint32_t NumSymbols,
@@ -346,14 +286,14 @@ public:
uint64_t Start = OS.tell();
(void) Start;
- Write32(LCT_Symtab);
- Write32(SymtabLoadCommandSize);
+ Write32(macho::LCT_Symtab);
+ Write32(macho::SymtabLoadCommandSize);
Write32(SymbolOffset);
Write32(NumSymbols);
Write32(StringTableOffset);
Write32(StringTableSize);
- assert(OS.tell() - Start == SymtabLoadCommandSize);
+ assert(OS.tell() - Start == macho::SymtabLoadCommandSize);
}
void WriteDysymtabLoadCommand(uint32_t FirstLocalSymbol,
@@ -369,8 +309,8 @@ public:
uint64_t Start = OS.tell();
(void) Start;
- Write32(LCT_Dysymtab);
- Write32(DysymtabLoadCommandSize);
+ Write32(macho::LCT_Dysymtab);
+ Write32(macho::DysymtabLoadCommandSize);
Write32(FirstLocalSymbol);
Write32(NumLocalSymbols);
Write32(FirstExternalSymbol);
@@ -390,7 +330,7 @@ public:
Write32(0); // locreloff
Write32(0); // nlocrel
- assert(OS.tell() - Start == DysymtabLoadCommandSize);
+ assert(OS.tell() - Start == macho::DysymtabLoadCommandSize);
}
void WriteNlist(MachSymbolData &MSD, const MCAsmLayout &Layout) {
@@ -404,27 +344,27 @@ public:
//
// FIXME: Are the prebound or indirect fields possible here?
if (Symbol.isUndefined())
- Type = STT_Undefined;
+ Type = macho::STT_Undefined;
else if (Symbol.isAbsolute())
- Type = STT_Absolute;
+ Type = macho::STT_Absolute;
else
- Type = STT_Section;
+ Type = macho::STT_Section;
// FIXME: Set STAB bits.
if (Data.isPrivateExtern())
- Type |= STF_PrivateExtern;
+ Type |= macho::STF_PrivateExtern;
// Set external bit.
if (Data.isExternal() || Symbol.isUndefined())
- Type |= STF_External;
+ Type |= macho::STF_External;
// Compute the symbol address.
if (Symbol.isDefined()) {
if (Symbol.isAbsolute()) {
Address = cast<MCConstantExpr>(Symbol.getVariableValue())->getValue();
} else {
- Address = Layout.getSymbolAddress(&Data);
+ Address = getSymbolAddress(&Data, Layout);
}
} else if (Data.isCommon()) {
// Common symbols are encoded with the size in the address
@@ -452,7 +392,7 @@ public:
// The Mach-O streamer uses the lowest 16-bits of the flags for the 'desc'
// value.
Write16(Flags);
- if (Is64Bit)
+ if (is64Bit())
Write64(Address);
else
Write32(Address);
@@ -472,11 +412,15 @@ public:
// - Input errors, where something cannot be correctly encoded. 'as' allows
// these through in many cases.
+ static bool isFixupKindRIPRel(unsigned Kind) {
+ return Kind == X86::reloc_riprel_4byte ||
+ Kind == X86::reloc_riprel_4byte_movq_load;
+ }
void RecordX86_64Relocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) {
- unsigned IsPCRel = isFixupKindPCRel(Fixup.getKind());
+ unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
unsigned IsRIPRel = isFixupKindRIPRel(Fixup.getKind());
unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
@@ -484,7 +428,7 @@ public:
uint32_t FixupOffset =
Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
uint32_t FixupAddress =
- Layout.getFragmentAddress(Fragment) + Fixup.getOffset();
+ getFragmentAddress(Fragment, Layout) + Fixup.getOffset();
int64_t Value = 0;
unsigned Index = 0;
unsigned IsExtern = 0;
@@ -503,7 +447,7 @@ public:
if (Target.isAbsolute()) { // constant
// SymbolNum of 0 indicates the absolute section.
- Type = RIT_X86_64_Unsigned;
+ Type = macho::RIT_X86_64_Unsigned;
Index = 0;
// FIXME: I believe this is broken, I don't think the linker can
@@ -513,16 +457,16 @@ public:
// yet).
if (IsPCRel) {
IsExtern = 1;
- Type = RIT_X86_64_Branch;
+ Type = macho::RIT_X86_64_Branch;
}
} else if (Target.getSymB()) { // A - B + constant
const MCSymbol *A = &Target.getSymA()->getSymbol();
MCSymbolData &A_SD = Asm.getSymbolData(*A);
- const MCSymbolData *A_Base = Asm.getAtom(Layout, &A_SD);
+ const MCSymbolData *A_Base = Asm.getAtom(&A_SD);
const MCSymbol *B = &Target.getSymB()->getSymbol();
MCSymbolData &B_SD = Asm.getSymbolData(*B);
- const MCSymbolData *B_Base = Asm.getAtom(Layout, &B_SD);
+ const MCSymbolData *B_Base = Asm.getAtom(&B_SD);
// Neither symbol can be modified.
if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None ||
@@ -534,25 +478,35 @@ public:
if (IsPCRel)
report_fatal_error("unsupported pc-relative relocation of difference");
- // We don't currently support any situation where one or both of the
- // symbols would require a local relocation. This is almost certainly
- // unused and may not be possible to encode correctly.
- if (!A_Base || !B_Base)
- report_fatal_error("unsupported local relocations in difference");
+ // The support for the situation where one or both of the symbols would
+ // require a local relocation is handled just like if the symbols were
+ // external. This is certainly used in the case of debug sections where
+ // the section has only temporary symbols and thus the symbols don't have
+ // base symbols. This is encoded using the section ordinal and
+ // non-extern relocation entries.
// Darwin 'as' doesn't emit correct relocations for this (it ends up with
- // a single SIGNED relocation); reject it for now.
- if (A_Base == B_Base)
+ // a single SIGNED relocation); reject it for now. Except the case where
+ // both symbols don't have a base, equal but both NULL.
+ if (A_Base == B_Base && A_Base)
report_fatal_error("unsupported relocation with identical base");
- Value += Layout.getSymbolAddress(&A_SD) - Layout.getSymbolAddress(A_Base);
- Value -= Layout.getSymbolAddress(&B_SD) - Layout.getSymbolAddress(B_Base);
+ Value += getSymbolAddress(&A_SD, Layout) -
+ (A_Base == NULL ? 0 : getSymbolAddress(A_Base, Layout));
+ Value -= getSymbolAddress(&B_SD, Layout) -
+ (B_Base == NULL ? 0 : getSymbolAddress(B_Base, Layout));
- Index = A_Base->getIndex();
- IsExtern = 1;
- Type = RIT_X86_64_Unsigned;
+ if (A_Base) {
+ Index = A_Base->getIndex();
+ IsExtern = 1;
+ }
+ else {
+ Index = A_SD.getFragment()->getParent()->getOrdinal() + 1;
+ IsExtern = 0;
+ }
+ Type = macho::RIT_X86_64_Unsigned;
- MachRelocationEntry MRE;
+ macho::RelocationEntry MRE;
MRE.Word0 = FixupOffset;
MRE.Word1 = ((Index << 0) |
(IsPCRel << 24) |
@@ -561,13 +515,19 @@ public:
(Type << 28));
Relocations[Fragment->getParent()].push_back(MRE);
- Index = B_Base->getIndex();
- IsExtern = 1;
- Type = RIT_X86_64_Subtractor;
+ if (B_Base) {
+ Index = B_Base->getIndex();
+ IsExtern = 1;
+ }
+ else {
+ Index = B_SD.getFragment()->getParent()->getOrdinal() + 1;
+ IsExtern = 0;
+ }
+ Type = macho::RIT_X86_64_Subtractor;
} else {
const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
MCSymbolData &SD = Asm.getSymbolData(*Symbol);
- const MCSymbolData *Base = Asm.getAtom(Layout, &SD);
+ const MCSymbolData *Base = Asm.getAtom(&SD);
// Relocations inside debug sections always use local relocations when
// possible. This seems to be done because the debugger doesn't fully
@@ -589,15 +549,26 @@ public:
// Add the local offset, if needed.
if (Base != &SD)
- Value += Layout.getSymbolAddress(&SD) - Layout.getSymbolAddress(Base);
+ Value += Layout.getSymbolOffset(&SD) - Layout.getSymbolOffset(Base);
} else if (Symbol->isInSection()) {
// The index is the section ordinal (1-based).
Index = SD.getFragment()->getParent()->getOrdinal() + 1;
IsExtern = 0;
- Value += Layout.getSymbolAddress(&SD);
+ Value += getSymbolAddress(&SD, Layout);
if (IsPCRel)
Value -= FixupAddress + (1 << Log2Size);
+ } else if (Symbol->isVariable()) {
+ const MCExpr *Value = Symbol->getVariableValue();
+ int64_t Res;
+ bool isAbs = Value->EvaluateAsAbsolute(Res, Layout, SectionAddress);
+ if (isAbs) {
+ FixedValue = Res;
+ return;
+ } else {
+ report_fatal_error("unsupported relocation of variable '" +
+ Symbol->getName() + "'");
+ }
} else {
report_fatal_error("unsupported relocation of undefined symbol '" +
Symbol->getName() + "'");
@@ -611,15 +582,15 @@ public:
// rewrite the movq to an leaq at link time if the symbol ends up in
// the same linkage unit.
if (unsigned(Fixup.getKind()) == X86::reloc_riprel_4byte_movq_load)
- Type = RIT_X86_64_GOTLoad;
+ Type = macho::RIT_X86_64_GOTLoad;
else
- Type = RIT_X86_64_GOT;
+ Type = macho::RIT_X86_64_GOT;
} else if (Modifier == MCSymbolRefExpr::VK_TLVP) {
- Type = RIT_X86_64_TLV;
+ Type = macho::RIT_X86_64_TLV;
} else if (Modifier != MCSymbolRefExpr::VK_None) {
report_fatal_error("unsupported symbol modifier in relocation");
} else {
- Type = RIT_X86_64_Signed;
+ Type = macho::RIT_X86_64_Signed;
// The Darwin x86_64 relocation format has a problem where it cannot
// encode an address (L<foo> + <constant>) which is outside the atom
@@ -636,9 +607,9 @@ public:
// (the additional bias), but instead appear to just look at the
// final offset.
switch (-(Target.getConstant() + (1LL << Log2Size))) {
- case 1: Type = RIT_X86_64_Signed1; break;
- case 2: Type = RIT_X86_64_Signed2; break;
- case 4: Type = RIT_X86_64_Signed4; break;
+ case 1: Type = macho::RIT_X86_64_Signed1; break;
+ case 2: Type = macho::RIT_X86_64_Signed2; break;
+ case 4: Type = macho::RIT_X86_64_Signed4; break;
}
}
} else {
@@ -646,24 +617,24 @@ public:
report_fatal_error("unsupported symbol modifier in branch "
"relocation");
- Type = RIT_X86_64_Branch;
+ Type = macho::RIT_X86_64_Branch;
}
} else {
if (Modifier == MCSymbolRefExpr::VK_GOT) {
- Type = RIT_X86_64_GOT;
+ Type = macho::RIT_X86_64_GOT;
} else if (Modifier == MCSymbolRefExpr::VK_GOTPCREL) {
// GOTPCREL is allowed as a modifier on non-PCrel instructions, in
// which case all we do is set the PCrel bit in the relocation entry;
// this is used with exception handling, for example. The source is
// required to include any necessary offset directly.
- Type = RIT_X86_64_GOT;
+ Type = macho::RIT_X86_64_GOT;
IsPCRel = 1;
} else if (Modifier == MCSymbolRefExpr::VK_TLVP) {
report_fatal_error("TLVP symbol modifier should have been rip-rel");
} else if (Modifier != MCSymbolRefExpr::VK_None)
report_fatal_error("unsupported symbol modifier in relocation");
else
- Type = RIT_X86_64_Unsigned;
+ Type = macho::RIT_X86_64_Unsigned;
}
}
@@ -671,7 +642,7 @@ public:
FixedValue = Value;
// struct relocation_info (8 bytes)
- MachRelocationEntry MRE;
+ macho::RelocationEntry MRE;
MRE.Word0 = FixupOffset;
MRE.Word1 = ((Index << 0) |
(IsPCRel << 24) |
@@ -685,11 +656,11 @@ public:
const MCAsmLayout &Layout,
const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
+ unsigned Log2Size,
uint64_t &FixedValue) {
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
- unsigned IsPCRel = isFixupKindPCRel(Fixup.getKind());
- unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
- unsigned Type = RIT_Vanilla;
+ unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
+ unsigned Type = macho::RIT_Vanilla;
// See <reloc.h>.
const MCSymbol *A = &Target.getSymA()->getSymbol();
@@ -699,7 +670,9 @@ public:
report_fatal_error("symbol '" + A->getName() +
"' can not be undefined in a subtraction expression");
- uint32_t Value = Layout.getSymbolAddress(A_SD);
+ uint32_t Value = getSymbolAddress(A_SD, Layout);
+ uint64_t SecAddr = getSectionAddress(A_SD->getFragment()->getParent());
+ FixedValue += SecAddr;
uint32_t Value2 = 0;
if (const MCSymbolRefExpr *B = Target.getSymB()) {
@@ -714,28 +687,184 @@ public:
// Note that there is no longer any semantic difference between these two
// relocation types from the linkers point of view, this is done solely
// for pedantic compatibility with 'as'.
- Type = A_SD->isExternal() ? RIT_Difference : RIT_LocalDifference;
- Value2 = Layout.getSymbolAddress(B_SD);
+ Type = A_SD->isExternal() ? (unsigned)macho::RIT_Difference :
+ (unsigned)macho::RIT_Generic_LocalDifference;
+ Value2 = getSymbolAddress(B_SD, Layout);
+ FixedValue -= getSectionAddress(B_SD->getFragment()->getParent());
+ }
+
+ // Relocations are written out in reverse order, so the PAIR comes first.
+ if (Type == macho::RIT_Difference ||
+ Type == macho::RIT_Generic_LocalDifference) {
+ macho::RelocationEntry MRE;
+ MRE.Word0 = ((0 << 0) |
+ (macho::RIT_Pair << 24) |
+ (Log2Size << 28) |
+ (IsPCRel << 30) |
+ macho::RF_Scattered);
+ MRE.Word1 = Value2;
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ macho::RelocationEntry MRE;
+ MRE.Word0 = ((FixupOffset << 0) |
+ (Type << 24) |
+ (Log2Size << 28) |
+ (IsPCRel << 30) |
+ macho::RF_Scattered);
+ MRE.Word1 = Value;
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ void RecordARMScatteredRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ unsigned Log2Size,
+ uint64_t &FixedValue) {
+ uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
+ unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
+ unsigned Type = macho::RIT_Vanilla;
+
+ // See <reloc.h>.
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ MCSymbolData *A_SD = &Asm.getSymbolData(*A);
+
+ if (!A_SD->getFragment())
+ report_fatal_error("symbol '" + A->getName() +
+ "' can not be undefined in a subtraction expression");
+
+ uint32_t Value = getSymbolAddress(A_SD, Layout);
+ uint64_t SecAddr = getSectionAddress(A_SD->getFragment()->getParent());
+ FixedValue += SecAddr;
+ uint32_t Value2 = 0;
+
+ if (const MCSymbolRefExpr *B = Target.getSymB()) {
+ MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
+
+ if (!B_SD->getFragment())
+ report_fatal_error("symbol '" + B->getSymbol().getName() +
+ "' can not be undefined in a subtraction expression");
+
+ // Select the appropriate difference relocation type.
+ Type = macho::RIT_Difference;
+ Value2 = getSymbolAddress(B_SD, Layout);
+ FixedValue -= getSectionAddress(B_SD->getFragment()->getParent());
}
// Relocations are written out in reverse order, so the PAIR comes first.
- if (Type == RIT_Difference || Type == RIT_LocalDifference) {
- MachRelocationEntry MRE;
+ if (Type == macho::RIT_Difference ||
+ Type == macho::RIT_Generic_LocalDifference) {
+ macho::RelocationEntry MRE;
MRE.Word0 = ((0 << 0) |
- (RIT_Pair << 24) |
+ (macho::RIT_Pair << 24) |
(Log2Size << 28) |
(IsPCRel << 30) |
- RF_Scattered);
+ macho::RF_Scattered);
MRE.Word1 = Value2;
Relocations[Fragment->getParent()].push_back(MRE);
}
- MachRelocationEntry MRE;
+ macho::RelocationEntry MRE;
MRE.Word0 = ((FixupOffset << 0) |
(Type << 24) |
(Log2Size << 28) |
(IsPCRel << 30) |
- RF_Scattered);
+ macho::RF_Scattered);
+ MRE.Word1 = Value;
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ void RecordARMMovwMovtRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
+ unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
+ unsigned Type = macho::RIT_ARM_Half;
+
+ // See <reloc.h>.
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ MCSymbolData *A_SD = &Asm.getSymbolData(*A);
+
+ if (!A_SD->getFragment())
+ report_fatal_error("symbol '" + A->getName() +
+ "' can not be undefined in a subtraction expression");
+
+ uint32_t Value = getSymbolAddress(A_SD, Layout);
+ uint32_t Value2 = 0;
+ uint64_t SecAddr = getSectionAddress(A_SD->getFragment()->getParent());
+ FixedValue += SecAddr;
+
+ if (const MCSymbolRefExpr *B = Target.getSymB()) {
+ MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
+
+ if (!B_SD->getFragment())
+ report_fatal_error("symbol '" + B->getSymbol().getName() +
+ "' can not be undefined in a subtraction expression");
+
+ // Select the appropriate difference relocation type.
+ Type = macho::RIT_ARM_HalfDifference;
+ Value2 = getSymbolAddress(B_SD, Layout);
+ FixedValue -= getSectionAddress(B_SD->getFragment()->getParent());
+ }
+
+ // Relocations are written out in reverse order, so the PAIR comes first.
+ // ARM_RELOC_HALF and ARM_RELOC_HALF_SECTDIFF abuse the r_length field:
+ //
+ // For these two r_type relocations they always have a pair following them
+ // and the r_length bits are used differently. The encoding of the
+ // r_length is as follows:
+ // low bit of r_length:
+ // 0 - :lower16: for movw instructions
+ // 1 - :upper16: for movt instructions
+ // high bit of r_length:
+ // 0 - arm instructions
+ // 1 - thumb instructions
+ // the other half of the relocated expression is in the following pair
+ // relocation entry in the the low 16 bits of r_address field.
+ unsigned ThumbBit = 0;
+ unsigned MovtBit = 0;
+ switch (Fixup.getKind()) {
+ default: break;
+ case ARM::fixup_arm_movt_hi16:
+ case ARM::fixup_arm_movt_hi16_pcrel:
+ MovtBit = 1;
+ break;
+ case ARM::fixup_t2_movt_hi16:
+ case ARM::fixup_t2_movt_hi16_pcrel:
+ MovtBit = 1;
+ // Fallthrough
+ case ARM::fixup_t2_movw_lo16:
+ case ARM::fixup_t2_movw_lo16_pcrel:
+ ThumbBit = 1;
+ break;
+ }
+
+
+ if (Type == macho::RIT_ARM_HalfDifference) {
+ uint32_t OtherHalf = MovtBit
+ ? (FixedValue & 0xffff) : ((FixedValue & 0xffff0000) >> 16);
+
+ macho::RelocationEntry MRE;
+ MRE.Word0 = ((OtherHalf << 0) |
+ (macho::RIT_Pair << 24) |
+ (MovtBit << 28) |
+ (ThumbBit << 29) |
+ (IsPCRel << 30) |
+ macho::RF_Scattered);
+ MRE.Word1 = Value2;
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ macho::RelocationEntry MRE;
+ MRE.Word0 = ((FixupOffset << 0) |
+ (Type << 24) |
+ (MovtBit << 28) |
+ (ThumbBit << 29) |
+ (IsPCRel << 30) |
+ macho::RF_Scattered);
MRE.Word1 = Value;
Relocations[Fragment->getParent()].push_back(MRE);
}
@@ -746,7 +875,7 @@ public:
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) {
assert(Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP &&
- !Is64Bit &&
+ !is64Bit() &&
"Should only be called with a 32-bit TLVP relocation!");
unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
@@ -764,50 +893,218 @@ public:
if (Target.getSymB()) {
// If this is a subtraction then we're pcrel.
uint32_t FixupAddress =
- Layout.getFragmentAddress(Fragment) + Fixup.getOffset();
+ getFragmentAddress(Fragment, Layout) + Fixup.getOffset();
MCSymbolData *SD_B = &Asm.getSymbolData(Target.getSymB()->getSymbol());
IsPCRel = 1;
- FixedValue = (FixupAddress - Layout.getSymbolAddress(SD_B) +
+ FixedValue = (FixupAddress - getSymbolAddress(SD_B, Layout) +
Target.getConstant());
FixedValue += 1ULL << Log2Size;
} else {
FixedValue = 0;
}
-
+
// struct relocation_info (8 bytes)
- MachRelocationEntry MRE;
+ macho::RelocationEntry MRE;
MRE.Word0 = Value;
+ MRE.Word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (1 << 27) | // Extern
+ (macho::RIT_Generic_TLV << 28)); // Type
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
+ unsigned &Log2Size) {
+ RelocType = unsigned(macho::RIT_Vanilla);
+ Log2Size = ~0U;
+
+ switch (Kind) {
+ default:
+ return false;
+
+ case FK_Data_1:
+ Log2Size = llvm::Log2_32(1);
+ return true;
+ case FK_Data_2:
+ Log2Size = llvm::Log2_32(2);
+ return true;
+ case FK_Data_4:
+ Log2Size = llvm::Log2_32(4);
+ return true;
+ case FK_Data_8:
+ Log2Size = llvm::Log2_32(8);
+ return true;
+
+ // Handle 24-bit branch kinds.
+ case ARM::fixup_arm_ldst_pcrel_12:
+ case ARM::fixup_arm_pcrel_10:
+ case ARM::fixup_arm_adr_pcrel_12:
+ case ARM::fixup_arm_condbranch:
+ case ARM::fixup_arm_uncondbranch:
+ RelocType = unsigned(macho::RIT_ARM_Branch24Bit);
+ // Report as 'long', even though that is not quite accurate.
+ Log2Size = llvm::Log2_32(4);
+ return true;
+
+ // Handle Thumb branches.
+ case ARM::fixup_arm_thumb_br:
+ RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit);
+ Log2Size = llvm::Log2_32(2);
+ return true;
+
+ case ARM::fixup_arm_thumb_bl:
+ RelocType = unsigned(macho::RIT_ARM_ThumbBranch32Bit);
+ Log2Size = llvm::Log2_32(4);
+ return true;
+
+ case ARM::fixup_arm_thumb_blx:
+ RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit);
+ // Report as 'long', even though that is not quite accurate.
+ Log2Size = llvm::Log2_32(4);
+ return true;
+
+ case ARM::fixup_arm_movt_hi16:
+ case ARM::fixup_arm_movt_hi16_pcrel:
+ case ARM::fixup_t2_movt_hi16:
+ case ARM::fixup_t2_movt_hi16_pcrel:
+ RelocType = unsigned(macho::RIT_ARM_HalfDifference);
+ // Report as 'long', even though that is not quite accurate.
+ Log2Size = llvm::Log2_32(4);
+ return true;
+
+ case ARM::fixup_arm_movw_lo16:
+ case ARM::fixup_arm_movw_lo16_pcrel:
+ case ARM::fixup_t2_movw_lo16:
+ case ARM::fixup_t2_movw_lo16_pcrel:
+ RelocType = unsigned(macho::RIT_ARM_Half);
+ // Report as 'long', even though that is not quite accurate.
+ Log2Size = llvm::Log2_32(4);
+ return true;
+ }
+ }
+ void RecordARMRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, uint64_t &FixedValue) {
+ unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
+ unsigned Log2Size;
+ unsigned RelocType = macho::RIT_Vanilla;
+ if (!getARMFixupKindMachOInfo(Fixup.getKind(), RelocType, Log2Size)) {
+ report_fatal_error("unknown ARM fixup kind!");
+ return;
+ }
+
+ // If this is a difference or a defined symbol plus an offset, then we need
+ // a scattered relocation entry. Differences always require scattered
+ // relocations.
+ if (Target.getSymB()) {
+ if (RelocType == macho::RIT_ARM_Half ||
+ RelocType == macho::RIT_ARM_HalfDifference)
+ return RecordARMMovwMovtRelocation(Asm, Layout, Fragment, Fixup,
+ Target, FixedValue);
+ return RecordARMScatteredRelocation(Asm, Layout, Fragment, Fixup,
+ Target, Log2Size, FixedValue);
+ }
+
+ // Get the symbol data, if any.
+ MCSymbolData *SD = 0;
+ if (Target.getSymA())
+ SD = &Asm.getSymbolData(Target.getSymA()->getSymbol());
+
+ // FIXME: For other platforms, we need to use scattered relocations for
+ // internal relocations with offsets. If this is an internal relocation
+ // with an offset, it also needs a scattered relocation entry.
+ //
+ // Is this right for ARM?
+ uint32_t Offset = Target.getConstant();
+ if (IsPCRel && RelocType == macho::RIT_Vanilla)
+ Offset += 1 << Log2Size;
+ if (Offset && SD && !doesSymbolRequireExternRelocation(SD))
+ return RecordARMScatteredRelocation(Asm, Layout, Fragment, Fixup, Target,
+ Log2Size, FixedValue);
+
+ // See <reloc.h>.
+ uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
+ unsigned Index = 0;
+ unsigned IsExtern = 0;
+ unsigned Type = 0;
+
+ if (Target.isAbsolute()) { // constant
+ // FIXME!
+ report_fatal_error("FIXME: relocations to absolute targets "
+ "not yet implemented");
+ } else if (SD->getSymbol().isVariable()) {
+ int64_t Res;
+ if (SD->getSymbol().getVariableValue()->EvaluateAsAbsolute(
+ Res, Layout, SectionAddress)) {
+ FixedValue = Res;
+ return;
+ }
+
+ report_fatal_error("unsupported relocation of variable '" +
+ SD->getSymbol().getName() + "'");
+ } else {
+ // Check whether we need an external or internal relocation.
+ if (doesSymbolRequireExternRelocation(SD)) {
+ IsExtern = 1;
+ Index = SD->getIndex();
+ // For external relocations, make sure to offset the fixup value to
+ // compensate for the addend of the symbol address, if it was
+ // undefined. This occurs with weak definitions, for example.
+ if (!SD->Symbol->isUndefined())
+ FixedValue -= Layout.getSymbolOffset(SD);
+ } else {
+ // The index is the section ordinal (1-based).
+ Index = SD->getFragment()->getParent()->getOrdinal() + 1;
+ FixedValue += getSectionAddress(SD->getFragment()->getParent());
+ }
+ if (IsPCRel)
+ FixedValue -= getSectionAddress(Fragment->getParent());
+
+ // The type is determined by the fixup kind.
+ Type = RelocType;
+ }
+
+ // struct relocation_info (8 bytes)
+ macho::RelocationEntry MRE;
+ MRE.Word0 = FixupOffset;
MRE.Word1 = ((Index << 0) |
(IsPCRel << 24) |
(Log2Size << 25) |
- (1 << 27) | // Extern
- (RIT_TLV << 28)); // Type
+ (IsExtern << 27) |
+ (Type << 28));
Relocations[Fragment->getParent()].push_back(MRE);
}
-
+
void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
MCValue Target, uint64_t &FixedValue) {
- if (Is64Bit) {
+ // FIXME: These needs to be factored into the target Mach-O writer.
+ if (isARM()) {
+ RecordARMRelocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
+ return;
+ }
+ if (is64Bit()) {
RecordX86_64Relocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
return;
}
- unsigned IsPCRel = isFixupKindPCRel(Fixup.getKind());
+ unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
// If this is a 32-bit TLVP reloc it's handled a bit differently.
- if (Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP) {
+ if (Target.getSymA() &&
+ Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP) {
RecordTLVPRelocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
return;
}
-
+
// If this is a difference or a defined symbol plus an offset, then we need
// a scattered relocation entry.
// Differences always require scattered relocations.
if (Target.getSymB())
return RecordScatteredRelocation(Asm, Layout, Fragment, Fixup,
- Target, FixedValue);
+ Target, Log2Size, FixedValue);
// Get the symbol data, if any.
MCSymbolData *SD = 0;
@@ -821,7 +1118,7 @@ public:
Offset += 1 << Log2Size;
if (Offset && SD && !doesSymbolRequireExternRelocation(SD))
return RecordScatteredRelocation(Asm, Layout, Fragment, Fixup,
- Target, FixedValue);
+ Target, Log2Size, FixedValue);
// See <reloc.h>.
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
@@ -834,7 +1131,17 @@ public:
//
// FIXME: Currently, these are never generated (see code below). I cannot
// find a case where they are actually emitted.
- Type = RIT_Vanilla;
+ Type = macho::RIT_Vanilla;
+ } else if (SD->getSymbol().isVariable()) {
+ int64_t Res;
+ if (SD->getSymbol().getVariableValue()->EvaluateAsAbsolute(
+ Res, Layout, SectionAddress)) {
+ FixedValue = Res;
+ return;
+ }
+
+ report_fatal_error("unsupported relocation of variable '" +
+ SD->getSymbol().getName() + "'");
} else {
// Check whether we need an external or internal relocation.
if (doesSymbolRequireExternRelocation(SD)) {
@@ -844,17 +1151,20 @@ public:
// compensate for the addend of the symbol address, if it was
// undefined. This occurs with weak definitions, for example.
if (!SD->Symbol->isUndefined())
- FixedValue -= Layout.getSymbolAddress(SD);
+ FixedValue -= Layout.getSymbolOffset(SD);
} else {
// The index is the section ordinal (1-based).
Index = SD->getFragment()->getParent()->getOrdinal() + 1;
+ FixedValue += getSectionAddress(SD->getFragment()->getParent());
}
+ if (IsPCRel)
+ FixedValue -= getSectionAddress(Fragment->getParent());
- Type = RIT_Vanilla;
+ Type = macho::RIT_Vanilla;
}
// struct relocation_info (8 bytes)
- MachRelocationEntry MRE;
+ macho::RelocationEntry MRE;
MRE.Word0 = FixupOffset;
MRE.Word1 = ((Index << 0) |
(IsPCRel << 24) |
@@ -885,7 +1195,7 @@ public:
// Initialize the section indirect symbol base, if necessary.
if (!IndirectSymBase.count(it->SectionData))
IndirectSymBase[it->SectionData] = IndirectIndex;
-
+
Asm.getOrCreateSymbolData(*it->Symbol);
}
@@ -1028,7 +1338,25 @@ public:
StringTable += '\x00';
}
- void ExecutePostLayoutBinding(MCAssembler &Asm) {
+ void computeSectionAddresses(const MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+ uint64_t StartAddress = 0;
+ const SmallVectorImpl<MCSectionData*> &Order = Layout.getSectionOrder();
+ for (int i = 0, n = Order.size(); i != n ; ++i) {
+ const MCSectionData *SD = Order[i];
+ StartAddress = RoundUpToAlignment(StartAddress, SD->getAlignment());
+ SectionAddress[SD] = StartAddress;
+ StartAddress += Layout.getSectionAddressSize(SD);
+ // Explicitly pad the section to match the alignment requirements of the
+ // following one. This is for 'gas' compatibility, it shouldn't
+ /// strictly be necessary.
+ StartAddress += getPaddingSize(SD, Layout);
+ }
+ }
+
+ void ExecutePostLayoutBinding(MCAssembler &Asm, const MCAsmLayout &Layout) {
+ computeSectionAddresses(Asm, Layout);
+
// Create symbol data for any indirect symbols.
BindIndirectSymbols(Asm);
@@ -1037,41 +1365,101 @@ public:
UndefinedSymbolData);
}
- void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout) {
+ virtual bool IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+ const MCSymbolData &DataA,
+ const MCFragment &FB,
+ bool InSet,
+ bool IsPCRel) const {
+ if (InSet)
+ return true;
+
+ // The effective address is
+ // addr(atom(A)) + offset(A)
+ // - addr(atom(B)) - offset(B)
+ // and the offsets are not relocatable, so the fixup is fully resolved when
+ // addr(atom(A)) - addr(atom(B)) == 0.
+ const MCSymbolData *A_Base = 0, *B_Base = 0;
+
+ const MCSymbol &SA = DataA.getSymbol().AliasedSymbol();
+ const MCSection &SecA = SA.getSection();
+ const MCSection &SecB = FB.getParent()->getSection();
+
+ if (IsPCRel) {
+ // The simple (Darwin, except on x86_64) way of dealing with this was to
+ // assume that any reference to a temporary symbol *must* be a temporary
+ // symbol in the same atom, unless the sections differ. Therefore, any
+ // PCrel relocation to a temporary symbol (in the same section) is fully
+ // resolved. This also works in conjunction with absolutized .set, which
+ // requires the compiler to use .set to absolutize the differences between
+ // symbols which the compiler knows to be assembly time constants, so we
+ // don't need to worry about considering symbol differences fully
+ // resolved.
+
+ if (!Asm.getBackend().hasReliableSymbolDifference()) {
+ if (!SA.isTemporary() || !SA.isInSection() || &SecA != &SecB)
+ return false;
+ return true;
+ }
+ } else {
+ if (!TargetObjectWriter->useAggressiveSymbolFolding())
+ return false;
+ }
+
+ const MCFragment &FA = *Asm.getSymbolData(SA).getFragment();
+
+ A_Base = FA.getAtom();
+ if (!A_Base)
+ return false;
+
+ B_Base = FB.getAtom();
+ if (!B_Base)
+ return false;
+
+ // If the atoms are the same, they are guaranteed to have the same address.
+ if (A_Base == B_Base)
+ return true;
+
+ // Otherwise, we can't prove this is fully resolved.
+ return false;
+ }
+
+ void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout) {
unsigned NumSections = Asm.size();
// The section data starts after the header, the segment load command (and
// section headers) and the symbol table.
unsigned NumLoadCommands = 1;
- uint64_t LoadCommandsSize = Is64Bit ?
- SegmentLoadCommand64Size + NumSections * Section64Size :
- SegmentLoadCommand32Size + NumSections * Section32Size;
+ uint64_t LoadCommandsSize = is64Bit() ?
+ macho::SegmentLoadCommand64Size + NumSections * macho::Section64Size :
+ macho::SegmentLoadCommand32Size + NumSections * macho::Section32Size;
// Add the symbol table load command sizes, if used.
unsigned NumSymbols = LocalSymbolData.size() + ExternalSymbolData.size() +
UndefinedSymbolData.size();
if (NumSymbols) {
NumLoadCommands += 2;
- LoadCommandsSize += SymtabLoadCommandSize + DysymtabLoadCommandSize;
+ LoadCommandsSize += (macho::SymtabLoadCommandSize +
+ macho::DysymtabLoadCommandSize);
}
// Compute the total size of the section data, as well as its file size and
// vm size.
- uint64_t SectionDataStart = (Is64Bit ? Header64Size : Header32Size)
- + LoadCommandsSize;
+ uint64_t SectionDataStart = (is64Bit() ? macho::Header64Size :
+ macho::Header32Size) + LoadCommandsSize;
uint64_t SectionDataSize = 0;
uint64_t SectionDataFileSize = 0;
uint64_t VMSize = 0;
for (MCAssembler::const_iterator it = Asm.begin(),
ie = Asm.end(); it != ie; ++it) {
const MCSectionData &SD = *it;
- uint64_t Address = Layout.getSectionAddress(&SD);
- uint64_t Size = Layout.getSectionSize(&SD);
+ uint64_t Address = getSectionAddress(&SD);
+ uint64_t Size = Layout.getSectionAddressSize(&SD);
uint64_t FileSize = Layout.getSectionFileSize(&SD);
+ FileSize += getPaddingSize(&SD, Layout);
VMSize = std::max(VMSize, Address + Size);
- if (Asm.getBackend().isVirtualSection(SD.getSection()))
+ if (SD.getSection().isVirtualSection())
continue;
SectionDataSize = std::max(SectionDataSize, Address + Size);
@@ -1094,11 +1482,11 @@ public:
uint64_t RelocTableEnd = SectionDataStart + SectionDataFileSize;
for (MCAssembler::const_iterator it = Asm.begin(),
ie = Asm.end(); it != ie; ++it) {
- std::vector<MachRelocationEntry> &Relocs = Relocations[it];
+ std::vector<macho::RelocationEntry> &Relocs = Relocations[it];
unsigned NumRelocs = Relocs.size();
- uint64_t SectionStart = SectionDataStart + Layout.getSectionAddress(it);
+ uint64_t SectionStart = SectionDataStart + getSectionAddress(it);
WriteSection(Asm, Layout, *it, SectionStart, RelocTableEnd, NumRelocs);
- RelocTableEnd += NumRelocs * RelocationInfoSize;
+ RelocTableEnd += NumRelocs * macho::RelocationInfoSize;
}
// Write the symbol table load command, if used.
@@ -1124,8 +1512,8 @@ public:
// The string table is written after symbol table.
uint64_t StringTableOffset =
- SymbolTableOffset + NumSymTabSymbols * (Is64Bit ? Nlist64Size :
- Nlist32Size);
+ SymbolTableOffset + NumSymTabSymbols * (is64Bit() ? macho::Nlist64Size :
+ macho::Nlist32Size);
WriteSymtabLoadCommand(SymbolTableOffset, NumSymTabSymbols,
StringTableOffset, StringTable.size());
@@ -1137,8 +1525,13 @@ public:
// Write the actual section data.
for (MCAssembler::const_iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it)
- Asm.WriteSectionData(it, Layout, Writer);
+ ie = Asm.end(); it != ie; ++it) {
+ Asm.WriteSectionData(it, Layout);
+
+ uint64_t Pad = getPaddingSize(it, Layout);
+ for (unsigned int i = 0; i < Pad; ++i)
+ Write8(0);
+ }
// Write the extra padding.
WriteZeros(SectionDataPadding);
@@ -1148,7 +1541,7 @@ public:
ie = Asm.end(); it != ie; ++it) {
// Write the section relocation entries, in reverse order to match 'as'
// (approximately, the exact algorithm is more complicated than this).
- std::vector<MachRelocationEntry> &Relocs = Relocations[it];
+ std::vector<macho::RelocationEntry> &Relocs = Relocations[it];
for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
Write32(Relocs[e - i - 1].Word0);
Write32(Relocs[e - i - 1].Word1);
@@ -1169,9 +1562,9 @@ public:
// If this symbol is defined and internal, mark it as such.
if (it->Symbol->isDefined() &&
!Asm.getSymbolData(*it->Symbol).isExternal()) {
- uint32_t Flags = ISF_Local;
+ uint32_t Flags = macho::ISF_Local;
if (it->Symbol->isAbsolute())
- Flags |= ISF_Absolute;
+ Flags |= macho::ISF_Absolute;
Write32(Flags);
continue;
}
@@ -1198,32 +1591,8 @@ public:
}
-MachObjectWriter::MachObjectWriter(raw_ostream &OS,
- bool Is64Bit,
- bool IsLittleEndian)
- : MCObjectWriter(OS, IsLittleEndian)
-{
- Impl = new MachObjectWriterImpl(this, Is64Bit);
-}
-
-MachObjectWriter::~MachObjectWriter() {
- delete (MachObjectWriterImpl*) Impl;
-}
-
-void MachObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm) {
- ((MachObjectWriterImpl*) Impl)->ExecutePostLayoutBinding(Asm);
-}
-
-void MachObjectWriter::RecordRelocation(const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup, MCValue Target,
- uint64_t &FixedValue) {
- ((MachObjectWriterImpl*) Impl)->RecordRelocation(Asm, Layout, Fragment, Fixup,
- Target, FixedValue);
-}
-
-void MachObjectWriter::WriteObject(const MCAssembler &Asm,
- const MCAsmLayout &Layout) {
- ((MachObjectWriterImpl*) Impl)->WriteObject(Asm, Layout);
+MCObjectWriter *llvm::createMachObjectWriter(MCMachObjectTargetWriter *MOTW,
+ raw_ostream &OS,
+ bool IsLittleEndian) {
+ return new MachObjectWriter(MOTW, OS, IsLittleEndian);
}
diff --git a/contrib/llvm/lib/MC/TargetAsmBackend.cpp b/contrib/llvm/lib/MC/TargetAsmBackend.cpp
index bbfddbe..1927557 100644
--- a/contrib/llvm/lib/MC/TargetAsmBackend.cpp
+++ b/contrib/llvm/lib/MC/TargetAsmBackend.cpp
@@ -10,13 +10,28 @@
#include "llvm/Target/TargetAsmBackend.h"
using namespace llvm;
-TargetAsmBackend::TargetAsmBackend(const Target &T)
- : TheTarget(T),
- HasAbsolutizedSet(false),
- HasReliableSymbolDifference(false),
- HasScatteredSymbols(false)
+TargetAsmBackend::TargetAsmBackend()
+ : HasReliableSymbolDifference(false)
{
}
TargetAsmBackend::~TargetAsmBackend() {
}
+
+const MCFixupKindInfo &
+TargetAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
+ static const MCFixupKindInfo Builtins[] = {
+ { "FK_Data_1", 0, 8, 0 },
+ { "FK_Data_2", 0, 16, 0 },
+ { "FK_Data_4", 0, 32, 0 },
+ { "FK_Data_8", 0, 64, 0 },
+ { "FK_PCRel_1", 0, 8, MCFixupKindInfo::FKF_IsPCRel },
+ { "FK_PCRel_2", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "FK_PCRel_4", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "FK_PCRel_8", 0, 64, MCFixupKindInfo::FKF_IsPCRel }
+ };
+
+ assert((size_t)Kind <= sizeof(Builtins) / sizeof(Builtins[0]) &&
+ "Unknown fixup kind");
+ return Builtins[Kind];
+}
diff --git a/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp b/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
index eeb2b96..6ca5d37 100644
--- a/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
@@ -31,7 +31,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/TimeValue.h"
+#include "llvm/Support/TimeValue.h"
#include "../Target/X86/X86FixupKinds.h"
@@ -55,6 +55,9 @@ struct AuxSymbol {
COFF::Auxiliary Aux;
};
+class COFFSymbol;
+class COFFSection;
+
class COFFSymbol {
public:
COFF::symbol Data;
@@ -62,15 +65,19 @@ public:
typedef llvm::SmallVector<AuxSymbol, 1> AuxiliarySymbols;
name Name;
- size_t Index;
+ int Index;
AuxiliarySymbols Aux;
COFFSymbol *Other;
+ COFFSection *Section;
+ int Relocations;
MCSymbolData const *MCData;
- COFFSymbol(llvm::StringRef name, size_t index);
+ COFFSymbol(llvm::StringRef name);
size_t size() const;
void set_name_offset(uint32_t Offset);
+
+ bool should_keep() const;
};
// This class contains staging data for a COFF relocation entry.
@@ -89,12 +96,12 @@ public:
COFF::section Header;
std::string Name;
- size_t Number;
+ int Number;
MCSectionData const *MCData;
- COFFSymbol *Symb;
+ COFFSymbol *Symbol;
relocations Relocations;
- COFFSection(llvm::StringRef name, size_t Index);
+ COFFSection(llvm::StringRef name);
static size_t size();
};
@@ -118,11 +125,8 @@ public:
typedef std::vector<COFFSymbol*> symbols;
typedef std::vector<COFFSection*> sections;
- typedef StringMap<COFFSymbol *> name_symbol_map;
- typedef StringMap<COFFSection *> name_section_map;
-
- typedef DenseMap<MCSymbolData const *, COFFSymbol *> symbol_map;
- typedef DenseMap<MCSectionData const *, COFFSection *> section_map;
+ typedef DenseMap<MCSymbol const *, COFFSymbol *> symbol_map;
+ typedef DenseMap<MCSection const *, COFFSection *> section_map;
// Root level file contents.
bool Is64Bit;
@@ -138,11 +142,9 @@ public:
WinCOFFObjectWriter(raw_ostream &OS, bool is64Bit);
~WinCOFFObjectWriter();
- COFFSymbol *createSymbol(llvm::StringRef Name);
- COFFSection *createSection(llvm::StringRef Name);
-
- void InitCOFFEntity(COFFSymbol &Symbol);
- void InitCOFFEntity(COFFSection &Section);
+ COFFSymbol *createSymbol(StringRef Name);
+ COFFSymbol *GetOrCreateCOFFSymbol(const MCSymbol * Symbol);
+ COFFSection *createSection(StringRef Name);
template <typename object_t, typename list_t>
object_t *createCOFFEntity(llvm::StringRef Name, list_t &List);
@@ -150,9 +152,14 @@ public:
void DefineSection(MCSectionData const &SectionData);
void DefineSymbol(MCSymbolData const &SymbolData, MCAssembler &Assembler);
- bool ExportSection(COFFSection *S);
+ void MakeSymbolReal(COFFSymbol &S, size_t Index);
+ void MakeSectionReal(COFFSection &S, size_t Number);
+
+ bool ExportSection(COFFSection const *S);
bool ExportSymbol(MCSymbolData const &SymbolData, MCAssembler &Asm);
+ bool IsPhysicalSection(COFFSection *S);
+
// Entity writing methods.
void WriteFileHeader(const COFF::header &Header);
@@ -163,7 +170,7 @@ public:
// MCObjectWriter interface implementation.
- void ExecutePostLayoutBinding(MCAssembler &Asm);
+ void ExecutePostLayoutBinding(MCAssembler &Asm, const MCAsmLayout &Layout);
void RecordRelocation(const MCAssembler &Asm,
const MCAsmLayout &Layout,
@@ -172,7 +179,7 @@ public:
MCValue Target,
uint64_t &FixedValue);
- void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout);
+ void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout);
};
}
@@ -198,9 +205,12 @@ static inline void write_uint8_le(void *Data, uint8_t const &Value) {
//------------------------------------------------------------------------------
// Symbol class implementation
-COFFSymbol::COFFSymbol(llvm::StringRef name, size_t index)
- : Name(name.begin(), name.end()), Index(-1)
- , Other(NULL), MCData(NULL) {
+COFFSymbol::COFFSymbol(llvm::StringRef name)
+ : Name(name.begin(), name.end())
+ , Other(NULL)
+ , Section(NULL)
+ , Relocations(0)
+ , MCData(NULL) {
memset(&Data, 0, sizeof(Data));
}
@@ -216,12 +226,41 @@ void COFFSymbol::set_name_offset(uint32_t Offset) {
write_uint32_le(Data.Name + 4, Offset);
}
+/// logic to decide if the symbol should be reported in the symbol table
+bool COFFSymbol::should_keep() const {
+ // no section means its external, keep it
+ if (Section == NULL)
+ return true;
+
+ // if it has relocations pointing at it, keep it
+ if (Relocations > 0) {
+ assert(Section->Number != -1 && "Sections with relocations must be real!");
+ return true;
+ }
+
+ // if the section its in is being droped, drop it
+ if (Section->Number == -1)
+ return false;
+
+ // if it is the section symbol, keep it
+ if (Section->Symbol == this)
+ return true;
+
+ // if its temporary, drop it
+ if (MCData && MCData->getSymbol().isTemporary())
+ return false;
+
+ // otherwise, keep it
+ return true;
+}
+
//------------------------------------------------------------------------------
// Section class implementation
-COFFSection::COFFSection(llvm::StringRef name, size_t Index)
- : Name(name), Number(Index + 1)
- , MCData(NULL), Symb(NULL) {
+COFFSection::COFFSection(llvm::StringRef name)
+ : Name(name)
+ , MCData(NULL)
+ , Symbol(NULL) {
memset(&Header, 0, sizeof(Header));
}
@@ -290,43 +329,22 @@ WinCOFFObjectWriter::~WinCOFFObjectWriter() {
delete *I;
}
-COFFSymbol *WinCOFFObjectWriter::createSymbol(llvm::StringRef Name) {
+COFFSymbol *WinCOFFObjectWriter::createSymbol(StringRef Name) {
return createCOFFEntity<COFFSymbol>(Name, Symbols);
}
-COFFSection *WinCOFFObjectWriter::createSection(llvm::StringRef Name) {
- return createCOFFEntity<COFFSection>(Name, Sections);
-}
-
-/// This function initializes a symbol by entering its name into the string
-/// table if it is too long to fit in the symbol table header.
-void WinCOFFObjectWriter::InitCOFFEntity(COFFSymbol &S) {
- if (S.Name.size() > COFF::NameSize) {
- size_t StringTableEntry = Strings.insert(S.Name.c_str());
-
- S.set_name_offset(StringTableEntry);
- } else
- memcpy(S.Data.Name, S.Name.c_str(), S.Name.size());
+COFFSymbol *WinCOFFObjectWriter::GetOrCreateCOFFSymbol(const MCSymbol * Symbol){
+ symbol_map::iterator i = SymbolMap.find(Symbol);
+ if (i != SymbolMap.end())
+ return i->second;
+ COFFSymbol *RetSymbol
+ = createCOFFEntity<COFFSymbol>(Symbol->getName(), Symbols);
+ SymbolMap[Symbol] = RetSymbol;
+ return RetSymbol;
}
-/// This function initializes a section by entering its name into the string
-/// table if it is too long to fit in the section table header.
-void WinCOFFObjectWriter::InitCOFFEntity(COFFSection &S) {
- if (S.Name.size() > COFF::NameSize) {
- size_t StringTableEntry = Strings.insert(S.Name.c_str());
-
- // FIXME: Why is this number 999999? This number is never mentioned in the
- // spec. I'm assuming this is due to the printed value needing to fit into
- // the S.Header.Name field. In which case why not 9999999 (7 9's instead of
- // 6)? The spec does not state if this entry should be null terminated in
- // this case, and thus this seems to be the best way to do it. I think I
- // just solved my own FIXME...
- if (StringTableEntry > 999999)
- report_fatal_error("COFF string table is greater than 999999 bytes.");
-
- sprintf(S.Header.Name, "/%d", (unsigned)StringTableEntry);
- } else
- memcpy(S.Header.Name, S.Name.c_str(), S.Name.size());
+COFFSection *WinCOFFObjectWriter::createSection(llvm::StringRef Name) {
+ return createCOFFEntity<COFFSection>(Name, Sections);
}
/// A template used to lookup or create a symbol/section, and initialize it if
@@ -334,9 +352,7 @@ void WinCOFFObjectWriter::InitCOFFEntity(COFFSection &S) {
template <typename object_t, typename list_t>
object_t *WinCOFFObjectWriter::createCOFFEntity(llvm::StringRef Name,
list_t &List) {
- object_t *Object = new object_t(Name, List.size());
-
- InitCOFFEntity(*Object);
+ object_t *Object = new object_t(Name);
List.push_back(Object);
@@ -346,6 +362,8 @@ object_t *WinCOFFObjectWriter::createCOFFEntity(llvm::StringRef Name,
/// This function takes a section data object from the assembler
/// and creates the associated COFF section staging object.
void WinCOFFObjectWriter::DefineSection(MCSectionData const &SectionData) {
+ assert(SectionData.getSection().getVariant() == MCSection::SV_COFF
+ && "Got non COFF section in the COFF backend!");
// FIXME: Not sure how to verify this (at least in a debug build).
MCSectionCOFF const &Sec =
static_cast<MCSectionCOFF const &>(SectionData.getSection());
@@ -353,15 +371,14 @@ void WinCOFFObjectWriter::DefineSection(MCSectionData const &SectionData) {
COFFSection *coff_section = createSection(Sec.getSectionName());
COFFSymbol *coff_symbol = createSymbol(Sec.getSectionName());
- coff_section->Symb = coff_symbol;
+ coff_section->Symbol = coff_symbol;
+ coff_symbol->Section = coff_section;
coff_symbol->Data.StorageClass = COFF::IMAGE_SYM_CLASS_STATIC;
- coff_symbol->Data.SectionNumber = coff_section->Number;
// In this case the auxiliary symbol is a Section Definition.
coff_symbol->Aux.resize(1);
memset(&coff_symbol->Aux[0], 0, sizeof(coff_symbol->Aux[0]));
coff_symbol->Aux[0].AuxType = ATSectionDefinition;
- coff_symbol->Aux[0].Aux.SectionDefinition.Number = coff_section->Number;
coff_symbol->Aux[0].Aux.SectionDefinition.Selection = Sec.getSelection();
coff_section->Header.Characteristics = Sec.getCharacteristics();
@@ -388,18 +405,53 @@ void WinCOFFObjectWriter::DefineSection(MCSectionData const &SectionData) {
// Bind internal COFF section to MC section.
coff_section->MCData = &SectionData;
- SectionMap[&SectionData] = coff_section;
+ SectionMap[&SectionData.getSection()] = coff_section;
}
/// This function takes a section data object from the assembler
/// and creates the associated COFF symbol staging object.
void WinCOFFObjectWriter::DefineSymbol(MCSymbolData const &SymbolData,
- MCAssembler &Assembler) {
- COFFSymbol *coff_symbol = createSymbol(SymbolData.getSymbol().getName());
+ MCAssembler &Assembler) {
+ COFFSymbol *coff_symbol = GetOrCreateCOFFSymbol(&SymbolData.getSymbol());
coff_symbol->Data.Type = (SymbolData.getFlags() & 0x0000FFFF) >> 0;
coff_symbol->Data.StorageClass = (SymbolData.getFlags() & 0x00FF0000) >> 16;
+ if (SymbolData.getFlags() & COFF::SF_WeakExternal) {
+ coff_symbol->Data.StorageClass = COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL;
+
+ if (SymbolData.getSymbol().isVariable()) {
+ coff_symbol->Data.StorageClass = COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL;
+ const MCExpr *Value = SymbolData.getSymbol().getVariableValue();
+
+ // FIXME: This assert message isn't very good.
+ assert(Value->getKind() == MCExpr::SymbolRef &&
+ "Value must be a SymbolRef!");
+
+ const MCSymbolRefExpr *SymbolRef =
+ static_cast<const MCSymbolRefExpr *>(Value);
+ coff_symbol->Other = GetOrCreateCOFFSymbol(&SymbolRef->getSymbol());
+ } else {
+ std::string WeakName = std::string(".weak.")
+ + SymbolData.getSymbol().getName().str()
+ + ".default";
+ COFFSymbol *WeakDefault = createSymbol(WeakName);
+ WeakDefault->Data.SectionNumber = COFF::IMAGE_SYM_ABSOLUTE;
+ WeakDefault->Data.StorageClass = COFF::IMAGE_SYM_CLASS_EXTERNAL;
+ WeakDefault->Data.Type = 0;
+ WeakDefault->Data.Value = 0;
+ coff_symbol->Other = WeakDefault;
+ }
+
+ // Setup the Weak External auxiliary symbol.
+ coff_symbol->Aux.resize(1);
+ memset(&coff_symbol->Aux[0], 0, sizeof(coff_symbol->Aux[0]));
+ coff_symbol->Aux[0].AuxType = ATWeakExternal;
+ coff_symbol->Aux[0].Aux.WeakExternal.TagIndex = 0;
+ coff_symbol->Aux[0].Aux.WeakExternal.Characteristics =
+ COFF::IMAGE_WEAK_EXTERN_SEARCH_LIBRARY;
+ }
+
// If no storage class was specified in the streamer, define it here.
if (coff_symbol->Data.StorageClass == 0) {
bool external = SymbolData.isExternal() || (SymbolData.Fragment == NULL);
@@ -408,44 +460,51 @@ void WinCOFFObjectWriter::DefineSymbol(MCSymbolData const &SymbolData,
external ? COFF::IMAGE_SYM_CLASS_EXTERNAL : COFF::IMAGE_SYM_CLASS_STATIC;
}
- if (SymbolData.getFlags() & COFF::SF_WeakReference) {
- coff_symbol->Data.StorageClass = COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL;
-
- const MCExpr *Value = SymbolData.getSymbol().getVariableValue();
+ if (SymbolData.Fragment != NULL)
+ coff_symbol->Section =
+ SectionMap[&SymbolData.Fragment->getParent()->getSection()];
- // FIXME: This assert message isn't very good.
- assert(Value->getKind() == MCExpr::SymbolRef &&
- "Value must be a SymbolRef!");
+ // Bind internal COFF symbol to MC symbol.
+ coff_symbol->MCData = &SymbolData;
+ SymbolMap[&SymbolData.getSymbol()] = coff_symbol;
+}
- const MCSymbolRefExpr *SymbolRef =
- static_cast<const MCSymbolRefExpr *>(Value);
+/// making a section real involves assigned it a number and putting
+/// name into the string table if needed
+void WinCOFFObjectWriter::MakeSectionReal(COFFSection &S, size_t Number) {
+ if (S.Name.size() > COFF::NameSize) {
+ size_t StringTableEntry = Strings.insert(S.Name.c_str());
- const MCSymbolData &OtherSymbolData =
- Assembler.getSymbolData(SymbolRef->getSymbol());
+ // FIXME: Why is this number 999999? This number is never mentioned in the
+ // spec. I'm assuming this is due to the printed value needing to fit into
+ // the S.Header.Name field. In which case why not 9999999 (7 9's instead of
+ // 6)? The spec does not state if this entry should be null terminated in
+ // this case, and thus this seems to be the best way to do it. I think I
+ // just solved my own FIXME...
+ if (StringTableEntry > 999999)
+ report_fatal_error("COFF string table is greater than 999999 bytes.");
- // FIXME: This assert message isn't very good.
- assert(SymbolMap.find(&OtherSymbolData) != SymbolMap.end() &&
- "OtherSymbolData must be in the symbol map!");
+ std::sprintf(S.Header.Name, "/%d", unsigned(StringTableEntry));
+ } else
+ std::memcpy(S.Header.Name, S.Name.c_str(), S.Name.size());
- coff_symbol->Other = SymbolMap[&OtherSymbolData];
+ S.Number = Number;
+ S.Symbol->Data.SectionNumber = S.Number;
+ S.Symbol->Aux[0].Aux.SectionDefinition.Number = S.Number;
+}
- // Setup the Weak External auxiliary symbol.
- coff_symbol->Aux.resize(1);
- memset(&coff_symbol->Aux[0], 0, sizeof(coff_symbol->Aux[0]));
- coff_symbol->Aux[0].AuxType = ATWeakExternal;
- coff_symbol->Aux[0].Aux.WeakExternal.TagIndex = 0;
- coff_symbol->Aux[0].Aux.WeakExternal.Characteristics =
- COFF::IMAGE_WEAK_EXTERN_SEARCH_LIBRARY;
- }
+void WinCOFFObjectWriter::MakeSymbolReal(COFFSymbol &S, size_t Index) {
+ if (S.Name.size() > COFF::NameSize) {
+ size_t StringTableEntry = Strings.insert(S.Name.c_str());
- // Bind internal COFF symbol to MC symbol.
- coff_symbol->MCData = &SymbolData;
- SymbolMap[&SymbolData] = coff_symbol;
+ S.set_name_offset(StringTableEntry);
+ } else
+ std::memcpy(S.Data.Name, S.Name.c_str(), S.Name.size());
+ S.Index = Index;
}
-bool WinCOFFObjectWriter::ExportSection(COFFSection *S) {
- return (S->Header.Characteristics
- & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA) == 0;
+bool WinCOFFObjectWriter::ExportSection(COFFSection const *S) {
+ return !S->MCData->getFragmentList().empty();
}
bool WinCOFFObjectWriter::ExportSymbol(MCSymbolData const &SymbolData,
@@ -455,8 +514,14 @@ bool WinCOFFObjectWriter::ExportSymbol(MCSymbolData const &SymbolData,
// return Asm.isSymbolLinkerVisible (&SymbolData);
- // For now, all symbols are exported, the linker will sort it out for us.
- return true;
+ // For now, all non-variable symbols are exported,
+ // the linker will sort the rest out for us.
+ return SymbolData.isExternal() || !SymbolData.getSymbol().isVariable();
+}
+
+bool WinCOFFObjectWriter::IsPhysicalSection(COFFSection *S) {
+ return (S->Header.Characteristics
+ & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA) == 0;
}
//------------------------------------------------------------------------------
@@ -546,9 +611,10 @@ void WinCOFFObjectWriter::WriteRelocation(const COFF::relocation &R) {
////////////////////////////////////////////////////////////////////////////////
// MCObjectWriter interface implementations
-void WinCOFFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm) {
+void WinCOFFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
// "Define" each section & symbol. This creates section & symbol
- // entries in the staging area and gives them their final indexes.
+ // entries in the staging area.
for (MCAssembler::const_iterator i = Asm.begin(), e = Asm.end(); i != e; i++)
DefineSection(*i);
@@ -574,19 +640,24 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
MCSectionData const *SectionData = Fragment->getParent();
// Mark this symbol as requiring an entry in the symbol table.
- assert(SectionMap.find(SectionData) != SectionMap.end() &&
+ assert(SectionMap.find(&SectionData->getSection()) != SectionMap.end() &&
"Section must already have been defined in ExecutePostLayoutBinding!");
- assert(SymbolMap.find(&A_SD) != SymbolMap.end() &&
+ assert(SymbolMap.find(&A_SD.getSymbol()) != SymbolMap.end() &&
"Symbol must already have been defined in ExecutePostLayoutBinding!");
- COFFSection *coff_section = SectionMap[SectionData];
- COFFSymbol *coff_symbol = SymbolMap[&A_SD];
+ COFFSection *coff_section = SectionMap[&SectionData->getSection()];
+ COFFSymbol *coff_symbol = SymbolMap[&A_SD.getSymbol()];
if (Target.getSymB()) {
+ if (&Target.getSymA()->getSymbol().getSection()
+ != &Target.getSymB()->getSymbol().getSection()) {
+ llvm_unreachable("Symbol relative relocations are only allowed between "
+ "symbols in the same section");
+ }
const MCSymbol *B = &Target.getSymB()->getSymbol();
MCSymbolData &B_SD = Asm.getSymbolData(*B);
- FixedValue = Layout.getSymbolAddress(&A_SD) - Layout.getSymbolAddress(&B_SD);
+ FixedValue = Layout.getSymbolOffset(&A_SD) - Layout.getSymbolOffset(&B_SD);
// In the case where we have SymbA and SymB, we just need to store the delta
// between the two symbols. Update FixedValue to account for the delta, and
@@ -600,12 +671,21 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
Reloc.Data.SymbolTableIndex = 0;
Reloc.Data.VirtualAddress = Layout.getFragmentOffset(Fragment);
- Reloc.Symb = coff_symbol;
+
+ // Turn relocations for temporary symbols into section relocations.
+ if (coff_symbol->MCData->getSymbol().isTemporary()) {
+ Reloc.Symb = coff_symbol->Section->Symbol;
+ FixedValue += Layout.getFragmentOffset(coff_symbol->MCData->Fragment)
+ + coff_symbol->MCData->getOffset();
+ } else
+ Reloc.Symb = coff_symbol;
+
+ ++Reloc.Symb->Relocations;
Reloc.Data.VirtualAddress += Fixup.getOffset();
- switch (Fixup.getKind()) {
- case X86::reloc_pcrel_4byte:
+ switch ((unsigned)Fixup.getKind()) {
+ case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
Reloc.Data.Type = Is64Bit ? COFF::IMAGE_REL_AMD64_REL32
@@ -615,6 +695,7 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
FixedValue += 4;
break;
case FK_Data_4:
+ case X86::reloc_signed_4byte:
Reloc.Data.Type = Is64Bit ? COFF::IMAGE_REL_AMD64_ADDR32
: COFF::IMAGE_REL_I386_DIR32;
break;
@@ -631,9 +712,19 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
coff_section->Relocations.push_back(Reloc);
}
-void WinCOFFObjectWriter::WriteObject(const MCAssembler &Asm,
+void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
const MCAsmLayout &Layout) {
// Assign symbol and section indexes and offsets.
+ Header.NumberOfSections = 0;
+
+ for (sections::iterator i = Sections.begin(),
+ e = Sections.end(); i != e; i++) {
+ if (Layout.getSectionAddressSize((*i)->MCData) > 0) {
+ MakeSectionReal(**i, ++Header.NumberOfSections);
+ } else {
+ (*i)->Number = -1;
+ }
+ }
Header.NumberOfSymbols = 0;
@@ -641,32 +732,35 @@ void WinCOFFObjectWriter::WriteObject(const MCAssembler &Asm,
COFFSymbol *coff_symbol = *i;
MCSymbolData const *SymbolData = coff_symbol->MCData;
- coff_symbol->Index = Header.NumberOfSymbols++;
-
// Update section number & offset for symbols that have them.
if ((SymbolData != NULL) && (SymbolData->Fragment != NULL)) {
- COFFSection *coff_section = SectionMap[SymbolData->Fragment->getParent()];
+ assert(coff_symbol->Section != NULL);
- coff_symbol->Data.SectionNumber = coff_section->Number;
+ coff_symbol->Data.SectionNumber = coff_symbol->Section->Number;
coff_symbol->Data.Value = Layout.getFragmentOffset(SymbolData->Fragment)
+ SymbolData->Offset;
}
- // Update auxiliary symbol info.
- coff_symbol->Data.NumberOfAuxSymbols = coff_symbol->Aux.size();
- Header.NumberOfSymbols += coff_symbol->Data.NumberOfAuxSymbols;
+ if (coff_symbol->should_keep()) {
+ MakeSymbolReal(*coff_symbol, Header.NumberOfSymbols++);
+
+ // Update auxiliary symbol info.
+ coff_symbol->Data.NumberOfAuxSymbols = coff_symbol->Aux.size();
+ Header.NumberOfSymbols += coff_symbol->Data.NumberOfAuxSymbols;
+ } else
+ coff_symbol->Index = -1;
}
// Fixup weak external references.
for (symbols::iterator i = Symbols.begin(), e = Symbols.end(); i != e; i++) {
- COFFSymbol *symb = *i;
-
- if (symb->Other != NULL) {
- assert(symb->Aux.size() == 1 &&
+ COFFSymbol *coff_symbol = *i;
+ if (coff_symbol->Other != NULL) {
+ assert(coff_symbol->Index != -1);
+ assert(coff_symbol->Aux.size() == 1 &&
"Symbol must contain one aux symbol!");
- assert(symb->Aux[0].AuxType == ATWeakExternal &&
+ assert(coff_symbol->Aux[0].AuxType == ATWeakExternal &&
"Symbol's aux symbol must be a Weak External!");
- symb->Aux[0].Aux.WeakExternal.TagIndex = symb->Other->Index;
+ coff_symbol->Aux[0].Aux.WeakExternal.TagIndex = coff_symbol->Other->Index;
}
}
@@ -675,18 +769,19 @@ void WinCOFFObjectWriter::WriteObject(const MCAssembler &Asm,
unsigned offset = 0;
offset += COFF::HeaderSize;
- offset += COFF::SectionSize * Asm.size();
-
- Header.NumberOfSections = Sections.size();
+ offset += COFF::SectionSize * Header.NumberOfSections;
for (MCAssembler::const_iterator i = Asm.begin(),
e = Asm.end();
i != e; i++) {
- COFFSection *Sec = SectionMap[i];
+ COFFSection *Sec = SectionMap[&i->getSection()];
- Sec->Header.SizeOfRawData = Layout.getSectionFileSize(i);
+ if (Sec->Number == -1)
+ continue;
- if (ExportSection(Sec)) {
+ Sec->Header.SizeOfRawData = Layout.getSectionAddressSize(i);
+
+ if (IsPhysicalSection(Sec)) {
Sec->Header.PointerToRawData = offset;
offset += Sec->Header.SizeOfRawData;
@@ -700,13 +795,15 @@ void WinCOFFObjectWriter::WriteObject(const MCAssembler &Asm,
for (relocations::iterator cr = Sec->Relocations.begin(),
er = Sec->Relocations.end();
- cr != er; cr++) {
+ cr != er; ++cr) {
+ assert((*cr).Symb->Index != -1);
(*cr).Data.SymbolTableIndex = (*cr).Symb->Index;
}
}
- assert(Sec->Symb->Aux.size() == 1 && "Section's symbol must have one aux!");
- AuxSymbol &Aux = Sec->Symb->Aux[0];
+ assert(Sec->Symbol->Aux.size() == 1
+ && "Section's symbol must have one aux!");
+ AuxSymbol &Aux = Sec->Symbol->Aux[0];
assert(Aux.AuxType == ATSectionDefinition &&
"Section's symbol's aux symbol must be a Section Definition!");
Aux.Aux.SectionDefinition.Length = Sec->Header.SizeOfRawData;
@@ -728,16 +825,21 @@ void WinCOFFObjectWriter::WriteObject(const MCAssembler &Asm,
MCAssembler::const_iterator j, je;
for (i = Sections.begin(), ie = Sections.end(); i != ie; i++)
- WriteSectionHeader((*i)->Header);
+ if ((*i)->Number != -1)
+ WriteSectionHeader((*i)->Header);
for (i = Sections.begin(), ie = Sections.end(),
j = Asm.begin(), je = Asm.end();
- (i != ie) && (j != je); i++, j++) {
+ (i != ie) && (j != je); ++i, ++j) {
+
+ if ((*i)->Number == -1)
+ continue;
+
if ((*i)->Header.PointerToRawData != 0) {
assert(OS.tell() == (*i)->Header.PointerToRawData &&
"Section::PointerToRawData is insane!");
- Asm.WriteSectionData(j, Layout, this);
+ Asm.WriteSectionData(j, Layout);
}
if ((*i)->Relocations.size() > 0) {
@@ -759,7 +861,8 @@ void WinCOFFObjectWriter::WriteObject(const MCAssembler &Asm,
"Header::PointerToSymbolTable is insane!");
for (symbols::iterator i = Symbols.begin(), e = Symbols.end(); i != e; i++)
- WriteSymbol(*i);
+ if ((*i)->Index != -1)
+ WriteSymbol(*i);
OS.write((char const *)&Strings.Data.front(), Strings.Data.size());
}
diff --git a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
index 8a194bf..46968e6 100644
--- a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
+++ b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
@@ -48,8 +48,10 @@ public:
// MCStreamer interface
+ virtual void InitSections();
virtual void EmitLabel(MCSymbol *Symbol);
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitThumbFunc(MCSymbol *Func);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
@@ -66,18 +68,55 @@ public:
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment);
virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
- virtual void EmitValue(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace);
- virtual void EmitGPRel32Value(const MCExpr *Value);
virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
unsigned ValueSize, unsigned MaxBytesToEmit);
virtual void EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit);
- virtual void EmitValueToOffset(const MCExpr *Offset, unsigned char Value);
virtual void EmitFileDirective(StringRef Filename);
- virtual void EmitDwarfFileDirective(unsigned FileNo,StringRef Filename);
virtual void EmitInstruction(const MCInst &Instruction);
virtual void Finish();
+
+private:
+ virtual void EmitInstToFragment(const MCInst &Inst) {
+ llvm_unreachable("Not used by WinCOFF.");
+ }
+ virtual void EmitInstToData(const MCInst &Inst) {
+ llvm_unreachable("Not used by WinCOFF.");
+ }
+
+ void SetSection(StringRef Section,
+ unsigned Characteristics,
+ SectionKind Kind) {
+ SwitchSection(getContext().getCOFFSection(Section, Characteristics, Kind));
+ }
+
+ void SetSectionText() {
+ SetSection(".text",
+ COFF::IMAGE_SCN_CNT_CODE
+ | COFF::IMAGE_SCN_MEM_EXECUTE
+ | COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getText());
+ EmitCodeAlignment(4, 0);
+ }
+
+ void SetSectionData() {
+ SetSection(".data",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ
+ | COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
+ EmitCodeAlignment(4, 0);
+ }
+
+ void SetSectionBSS() {
+ SetSection(".bss",
+ COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ
+ | COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getBSS());
+ EmitCodeAlignment(4, 0);
+ }
+
};
} // end anonymous namespace.
@@ -126,47 +165,81 @@ void WinCOFFStreamer::AddCommonSymbol(MCSymbol *Symbol, uint64_t Size,
// MCStreamer interface
+void WinCOFFStreamer::InitSections() {
+ SetSectionText();
+ SetSectionData();
+ SetSectionBSS();
+ SetSectionText();
+}
+
void WinCOFFStreamer::EmitLabel(MCSymbol *Symbol) {
- // TODO: This is copied almost exactly from the MachOStreamer. Consider
- // merging into MCObjectStreamer?
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
- assert(CurSection && "Cannot emit before setting section!");
-
- Symbol->setSection(*CurSection);
-
- MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
-
- // FIXME: This is wasteful, we don't necessarily need to create a data
- // fragment. Instead, we should mark the symbol as pointing into the data
- // fragment if it exists, otherwise we should just queue the label and set its
- // fragment pointer when we emit the next fragment.
- MCDataFragment *DF = getOrCreateDataFragment();
-
- assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
- SD.setFragment(DF);
- SD.setOffset(DF->getContents().size());
+ MCObjectStreamer::EmitLabel(Symbol);
}
void WinCOFFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
llvm_unreachable("not implemented");
}
+void WinCOFFStreamer::EmitThumbFunc(MCSymbol *Func) {
+ llvm_unreachable("not implemented");
+}
+
void WinCOFFStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
- // TODO: This is exactly the same as MachOStreamer. Consider merging into
- // MCObjectStreamer.
- getAssembler().getOrCreateSymbolData(*Symbol);
- AddValueSymbols(Value);
- Symbol->setVariableValue(Value);
+ assert((Symbol->isInSection()
+ ? Symbol->getSection().getVariant() == MCSection::SV_COFF
+ : true) && "Got non COFF section in the COFF backend!");
+ // FIXME: This is all very ugly and depressing. What needs to happen here
+ // depends on quite a few things that are all part of relaxation, which we
+ // don't really even do.
+
+ if (Value->getKind() != MCExpr::SymbolRef) {
+ // TODO: This is exactly the same as MachOStreamer. Consider merging into
+ // MCObjectStreamer.
+ getAssembler().getOrCreateSymbolData(*Symbol);
+ AddValueSymbols(Value);
+ Symbol->setVariableValue(Value);
+ } else {
+ // FIXME: This is a horrible way to do this :(. This should really be
+ // handled after we are done with the MC* objects and immediately before
+ // writing out the object file when we know exactly what the symbol should
+ // look like in the coff symbol table. I'm not doing that now because the
+ // COFF object writer doesn't have a clearly defined separation between MC
+ // data structures, the object writers data structures, and the raw, POD,
+ // data structures that get written to disk.
+
+ // Copy over the aliased data.
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+ const MCSymbolData &RealSD = getAssembler().getOrCreateSymbolData(
+ dyn_cast<const MCSymbolRefExpr>(Value)->getSymbol());
+
+ // FIXME: This is particularly nasty because it breaks as soon as any data
+ // members of MCSymbolData change.
+ SD.CommonAlign = RealSD.CommonAlign;
+ SD.CommonSize = RealSD.CommonSize;
+ SD.Flags = RealSD.Flags;
+ SD.Fragment = RealSD.Fragment;
+ SD.Index = RealSD.Index;
+ SD.IsExternal = RealSD.IsExternal;
+ SD.IsPrivateExtern = RealSD.IsPrivateExtern;
+ SD.Offset = RealSD.Offset;
+ SD.SymbolSize = RealSD.SymbolSize;
+ }
}
void WinCOFFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
MCSymbolAttr Attribute) {
+ assert(Symbol && "Symbol must be non-null!");
+ assert((Symbol->isInSection()
+ ? Symbol->getSection().getVariant() == MCSection::SV_COFF
+ : true) && "Got non COFF section in the COFF backend!");
switch (Attribute) {
case MCSA_WeakReference:
- getAssembler().getOrCreateSymbolData(*Symbol).modifyFlags(
- COFF::SF_WeakReference,
- COFF::SF_WeakReference);
+ case MCSA_Weak: {
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+ SD.modifyFlags(COFF::SF_WeakExternal, COFF::SF_WeakExternal);
+ SD.setExternal(true);
+ }
break;
case MCSA_Global:
@@ -184,6 +257,9 @@ void WinCOFFStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
}
void WinCOFFStreamer::BeginCOFFSymbolDef(MCSymbol const *Symbol) {
+ assert((Symbol->isInSection()
+ ? Symbol->getSection().getVariant() == MCSection::SV_COFF
+ : true) && "Got non COFF section in the COFF backend!");
assert(CurSymbol == NULL && "EndCOFFSymbolDef must be called between calls "
"to BeginCOFFSymbolDef!");
CurSymbol = Symbol;
@@ -220,10 +296,16 @@ void WinCOFFStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
void WinCOFFStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) {
+ assert((Symbol->isInSection()
+ ? Symbol->getSection().getVariant() == MCSection::SV_COFF
+ : true) && "Got non COFF section in the COFF backend!");
AddCommonSymbol(Symbol, Size, ByteAlignment, true);
}
void WinCOFFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) {
+ assert((Symbol->isInSection()
+ ? Symbol->getSection().getVariant() == MCSection::SV_COFF
+ : true) && "Got non COFF section in the COFF backend!");
AddCommonSymbol(Symbol, Size, 1, false);
}
@@ -243,32 +325,6 @@ void WinCOFFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
}
-void WinCOFFStreamer::EmitValue(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace) {
- assert(AddrSpace == 0 && "Address space must be 0!");
-
- // TODO: This is copied exactly from the MachOStreamer. Consider merging into
- // MCObjectStreamer?
- MCDataFragment *DF = getOrCreateDataFragment();
-
- // Avoid fixups when possible.
- int64_t AbsValue;
- if (AddValueSymbols(Value)->EvaluateAsAbsolute(AbsValue)) {
- // FIXME: Endianness assumption.
- for (unsigned i = 0; i != Size; ++i)
- DF->getContents().push_back(uint8_t(AbsValue >> (i * 8)));
- } else {
- DF->addFixup(MCFixup::Create(DF->getContents().size(),
- AddValueSymbols(Value),
- MCFixup::getKindForSize(Size)));
- DF->getContents().resize(DF->getContents().size() + Size, 0);
- }
-}
-
-void WinCOFFStreamer::EmitGPRel32Value(const MCExpr *Value) {
- llvm_unreachable("not implemented");
-}
-
void WinCOFFStreamer::EmitValueToAlignment(unsigned ByteAlignment,
int64_t Value,
unsigned ValueSize,
@@ -300,21 +356,11 @@ void WinCOFFStreamer::EmitCodeAlignment(unsigned ByteAlignment,
getCurrentSectionData()->setAlignment(ByteAlignment);
}
-void WinCOFFStreamer::EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value) {
- llvm_unreachable("not implemented");
-}
-
void WinCOFFStreamer::EmitFileDirective(StringRef Filename) {
// Ignore for now, linkers don't care, and proper debug
// info will be a much large effort.
}
-void WinCOFFStreamer::EmitDwarfFileDirective(unsigned FileNo,
- StringRef Filename) {
- llvm_unreachable("not implemented");
-}
-
void WinCOFFStreamer::EmitInstruction(const MCInst &Instruction) {
for (unsigned i = 0, e = Instruction.getNumOperands(); i != e; ++i)
if (Instruction.getOperand(i).isExpr())
diff --git a/contrib/llvm/lib/Object/CMakeLists.txt b/contrib/llvm/lib/Object/CMakeLists.txt
new file mode 100644
index 0000000..6a6814f
--- /dev/null
+++ b/contrib/llvm/lib/Object/CMakeLists.txt
@@ -0,0 +1,6 @@
+add_llvm_library(LLVMObject
+ MachOObject.cpp
+ ObjectFile.cpp
+ COFFObjectFile.cpp
+ ELFObjectFile.cpp
+ )
diff --git a/contrib/llvm/lib/Object/COFFObjectFile.cpp b/contrib/llvm/lib/Object/COFFObjectFile.cpp
new file mode 100644
index 0000000..cfee82a
--- /dev/null
+++ b/contrib/llvm/lib/Object/COFFObjectFile.cpp
@@ -0,0 +1,375 @@
+//===- COFFObjectFile.cpp - COFF object file implementation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the COFFObjectFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/COFF.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace object;
+
+namespace {
+using support::ulittle8_t;
+using support::ulittle16_t;
+using support::ulittle32_t;
+using support::little16_t;
+}
+
+namespace {
+struct coff_file_header {
+ ulittle16_t Machine;
+ ulittle16_t NumberOfSections;
+ ulittle32_t TimeDateStamp;
+ ulittle32_t PointerToSymbolTable;
+ ulittle32_t NumberOfSymbols;
+ ulittle16_t SizeOfOptionalHeader;
+ ulittle16_t Characteristics;
+};
+}
+
+extern char coff_file_header_layout_static_assert
+ [sizeof(coff_file_header) == 20 ? 1 : -1];
+
+namespace {
+struct coff_symbol {
+ struct StringTableOffset {
+ ulittle32_t Zeroes;
+ ulittle32_t Offset;
+ };
+
+ union {
+ char ShortName[8];
+ StringTableOffset Offset;
+ } Name;
+
+ ulittle32_t Value;
+ little16_t SectionNumber;
+
+ struct {
+ ulittle8_t BaseType;
+ ulittle8_t ComplexType;
+ } Type;
+
+ ulittle8_t StorageClass;
+ ulittle8_t NumberOfAuxSymbols;
+};
+}
+
+extern char coff_coff_symbol_layout_static_assert
+ [sizeof(coff_symbol) == 18 ? 1 : -1];
+
+namespace {
+struct coff_section {
+ char Name[8];
+ ulittle32_t VirtualSize;
+ ulittle32_t VirtualAddress;
+ ulittle32_t SizeOfRawData;
+ ulittle32_t PointerToRawData;
+ ulittle32_t PointerToRelocations;
+ ulittle32_t PointerToLinenumbers;
+ ulittle16_t NumberOfRelocations;
+ ulittle16_t NumberOfLinenumbers;
+ ulittle32_t Characteristics;
+};
+}
+
+extern char coff_coff_section_layout_static_assert
+ [sizeof(coff_section) == 40 ? 1 : -1];
+
+namespace {
+class COFFObjectFile : public ObjectFile {
+private:
+ const coff_file_header *Header;
+ const coff_section *SectionTable;
+ const coff_symbol *SymbolTable;
+ const char *StringTable;
+
+ const coff_section *getSection(std::size_t index) const;
+ const char *getString(std::size_t offset) const;
+
+protected:
+ virtual SymbolRef getSymbolNext(DataRefImpl Symb) const;
+ virtual StringRef getSymbolName(DataRefImpl Symb) const;
+ virtual uint64_t getSymbolAddress(DataRefImpl Symb) const;
+ virtual uint64_t getSymbolSize(DataRefImpl Symb) const;
+ virtual char getSymbolNMTypeChar(DataRefImpl Symb) const;
+ virtual bool isSymbolInternal(DataRefImpl Symb) const;
+
+ virtual SectionRef getSectionNext(DataRefImpl Sec) const;
+ virtual StringRef getSectionName(DataRefImpl Sec) const;
+ virtual uint64_t getSectionAddress(DataRefImpl Sec) const;
+ virtual uint64_t getSectionSize(DataRefImpl Sec) const;
+ virtual StringRef getSectionContents(DataRefImpl Sec) const;
+ virtual bool isSectionText(DataRefImpl Sec) const;
+
+public:
+ COFFObjectFile(MemoryBuffer *Object);
+ virtual symbol_iterator begin_symbols() const;
+ virtual symbol_iterator end_symbols() const;
+ virtual section_iterator begin_sections() const;
+ virtual section_iterator end_sections() const;
+
+ virtual uint8_t getBytesInAddress() const;
+ virtual StringRef getFileFormatName() const;
+ virtual unsigned getArch() const;
+};
+} // end namespace
+
+SymbolRef COFFObjectFile::getSymbolNext(DataRefImpl Symb) const {
+ const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
+ symb += 1 + symb->NumberOfAuxSymbols;
+ Symb.p = reinterpret_cast<intptr_t>(symb);
+ return SymbolRef(Symb, this);
+}
+
+StringRef COFFObjectFile::getSymbolName(DataRefImpl Symb) const {
+ const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
+ // Check for string table entry. First 4 bytes are 0.
+ if (symb->Name.Offset.Zeroes == 0) {
+ uint32_t Offset = symb->Name.Offset.Offset;
+ return StringRef(getString(Offset));
+ }
+
+ if (symb->Name.ShortName[7] == 0)
+ // Null terminated, let ::strlen figure out the length.
+ return StringRef(symb->Name.ShortName);
+ // Not null terminated, use all 8 bytes.
+ return StringRef(symb->Name.ShortName, 8);
+}
+
+uint64_t COFFObjectFile::getSymbolAddress(DataRefImpl Symb) const {
+ const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
+ const coff_section *Section = getSection(symb->SectionNumber);
+ char Type = getSymbolNMTypeChar(Symb);
+ if (Type == 'U' || Type == 'w')
+ return UnknownAddressOrSize;
+ if (Section)
+ return Section->VirtualAddress + symb->Value;
+ return symb->Value;
+}
+
+uint64_t COFFObjectFile::getSymbolSize(DataRefImpl Symb) const {
+ // FIXME: Return the correct size. This requires looking at all the symbols
+ // in the same section as this symbol, and looking for either the next
+ // symbol, or the end of the section.
+ const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
+ const coff_section *Section = getSection(symb->SectionNumber);
+ char Type = getSymbolNMTypeChar(Symb);
+ if (Type == 'U' || Type == 'w')
+ return UnknownAddressOrSize;
+ if (Section)
+ return Section->SizeOfRawData - symb->Value;
+ return 0;
+}
+
+char COFFObjectFile::getSymbolNMTypeChar(DataRefImpl Symb) const {
+ const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
+ char ret = StringSwitch<char>(getSymbolName(Symb))
+ .StartsWith(".debug", 'N')
+ .StartsWith(".sxdata", 'N')
+ .Default('?');
+
+ if (ret != '?')
+ return ret;
+
+ uint32_t Characteristics = 0;
+ uint32_t PointerToRawData = 0;
+ const coff_section *Section = getSection(symb->SectionNumber);
+ if (Section) {
+ Characteristics = Section->Characteristics;
+ PointerToRawData = Section->PointerToRawData;
+ }
+
+ switch (symb->SectionNumber) {
+ case COFF::IMAGE_SYM_UNDEFINED:
+ // Check storage classes.
+ if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL)
+ return 'w'; // Don't do ::toupper.
+ else
+ ret = 'u';
+ break;
+ case COFF::IMAGE_SYM_ABSOLUTE:
+ ret = 'a';
+ break;
+ case COFF::IMAGE_SYM_DEBUG:
+ ret = 'n';
+ break;
+ default:
+ // Check section type.
+ if (Characteristics & COFF::IMAGE_SCN_CNT_CODE)
+ ret = 't';
+ else if ( Characteristics & COFF::IMAGE_SCN_MEM_READ
+ && ~Characteristics & COFF::IMAGE_SCN_MEM_WRITE) // Read only.
+ ret = 'r';
+ else if (Characteristics & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA)
+ ret = 'd';
+ else if (Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA)
+ ret = 'b';
+ else if (Characteristics & COFF::IMAGE_SCN_LNK_INFO)
+ ret = 'i';
+
+ // Check for section symbol.
+ else if ( symb->StorageClass == COFF::IMAGE_SYM_CLASS_STATIC
+ && symb->Value == 0)
+ ret = 's';
+ }
+
+ if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL)
+ ret = ::toupper(ret);
+
+ return ret;
+}
+
+bool COFFObjectFile::isSymbolInternal(DataRefImpl Symb) const {
+ return false;
+}
+
+SectionRef COFFObjectFile::getSectionNext(DataRefImpl Sec) const {
+ const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
+ sec += 1;
+ Sec.p = reinterpret_cast<intptr_t>(sec);
+ return SectionRef(Sec, this);
+}
+
+StringRef COFFObjectFile::getSectionName(DataRefImpl Sec) const {
+ const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
+ StringRef name;
+ if (sec->Name[7] == 0)
+ // Null terminated, let ::strlen figure out the length.
+ name = sec->Name;
+ else
+ // Not null terminated, use all 8 bytes.
+ name = StringRef(sec->Name, 8);
+
+ // Check for string table entry. First byte is '/'.
+ if (name[0] == '/') {
+ uint32_t Offset;
+ name.getAsInteger(10, Offset);
+ return StringRef(getString(Offset));
+ }
+
+ // It's just a normal name.
+ return name;
+}
+
+uint64_t COFFObjectFile::getSectionAddress(DataRefImpl Sec) const {
+ const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
+ return sec->VirtualAddress;
+}
+
+uint64_t COFFObjectFile::getSectionSize(DataRefImpl Sec) const {
+ const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
+ return sec->SizeOfRawData;
+}
+
+StringRef COFFObjectFile::getSectionContents(DataRefImpl Sec) const {
+ const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
+ return StringRef(reinterpret_cast<const char *>(base + sec->PointerToRawData),
+ sec->SizeOfRawData);
+}
+
+bool COFFObjectFile::isSectionText(DataRefImpl Sec) const {
+ const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
+ return sec->Characteristics & COFF::IMAGE_SCN_CNT_CODE;
+}
+
+COFFObjectFile::COFFObjectFile(MemoryBuffer *Object)
+ : ObjectFile(Object) {
+ Header = reinterpret_cast<const coff_file_header *>(base);
+ SectionTable =
+ reinterpret_cast<const coff_section *>( base
+ + sizeof(coff_file_header)
+ + Header->SizeOfOptionalHeader);
+ SymbolTable =
+ reinterpret_cast<const coff_symbol *>(base + Header->PointerToSymbolTable);
+
+ // Find string table.
+ StringTable = reinterpret_cast<const char *>(base)
+ + Header->PointerToSymbolTable
+ + Header->NumberOfSymbols * 18;
+}
+
+ObjectFile::symbol_iterator COFFObjectFile::begin_symbols() const {
+ DataRefImpl ret;
+ ret.p = reinterpret_cast<intptr_t>(SymbolTable);
+ return symbol_iterator(SymbolRef(ret, this));
+}
+
+ObjectFile::symbol_iterator COFFObjectFile::end_symbols() const {
+ // The symbol table ends where the string table begins.
+ DataRefImpl ret;
+ ret.p = reinterpret_cast<intptr_t>(StringTable);
+ return symbol_iterator(SymbolRef(ret, this));
+}
+
+ObjectFile::section_iterator COFFObjectFile::begin_sections() const {
+ DataRefImpl ret;
+ ret.p = reinterpret_cast<intptr_t>(SectionTable);
+ return section_iterator(SectionRef(ret, this));
+}
+
+ObjectFile::section_iterator COFFObjectFile::end_sections() const {
+ DataRefImpl ret;
+ ret.p = reinterpret_cast<intptr_t>(SectionTable + Header->NumberOfSections);
+ return section_iterator(SectionRef(ret, this));
+}
+
+uint8_t COFFObjectFile::getBytesInAddress() const {
+ return getArch() == Triple::x86_64 ? 8 : 4;
+}
+
+StringRef COFFObjectFile::getFileFormatName() const {
+ switch(Header->Machine) {
+ case COFF::IMAGE_FILE_MACHINE_I386:
+ return "COFF-i386";
+ case COFF::IMAGE_FILE_MACHINE_AMD64:
+ return "COFF-x86-64";
+ default:
+ return "COFF-<unknown arch>";
+ }
+}
+
+unsigned COFFObjectFile::getArch() const {
+ switch(Header->Machine) {
+ case COFF::IMAGE_FILE_MACHINE_I386:
+ return Triple::x86;
+ case COFF::IMAGE_FILE_MACHINE_AMD64:
+ return Triple::x86_64;
+ default:
+ return Triple::UnknownArch;
+ }
+}
+
+const coff_section *COFFObjectFile::getSection(std::size_t index) const {
+ if (index > 0 && index <= Header->NumberOfSections)
+ return SectionTable + (index - 1);
+ return 0;
+}
+
+const char *COFFObjectFile::getString(std::size_t offset) const {
+ const ulittle32_t *StringTableSize =
+ reinterpret_cast<const ulittle32_t *>(StringTable);
+ if (offset < *StringTableSize)
+ return StringTable + offset;
+ return 0;
+}
+
+namespace llvm {
+
+ ObjectFile *ObjectFile::createCOFFObjectFile(MemoryBuffer *Object) {
+ return new COFFObjectFile(Object);
+ }
+
+} // end namespace llvm
diff --git a/contrib/llvm/lib/Object/ELFObjectFile.cpp b/contrib/llvm/lib/Object/ELFObjectFile.cpp
new file mode 100644
index 0000000..682be77
--- /dev/null
+++ b/contrib/llvm/lib/Object/ELFObjectFile.cpp
@@ -0,0 +1,686 @@
+//===- ELFObjectFile.cpp - ELF object file implementation -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ELFObjectFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <limits>
+#include <utility>
+
+using namespace llvm;
+using namespace object;
+
+// Templates to choose Elf_Addr and Elf_Off depending on is64Bits.
+namespace {
+template<support::endianness target_endianness>
+struct ELFDataTypeTypedefHelperCommon {
+ typedef support::detail::packed_endian_specific_integral
+ <uint16_t, target_endianness, support::aligned> Elf_Half;
+ typedef support::detail::packed_endian_specific_integral
+ <uint32_t, target_endianness, support::aligned> Elf_Word;
+ typedef support::detail::packed_endian_specific_integral
+ <int32_t, target_endianness, support::aligned> Elf_Sword;
+ typedef support::detail::packed_endian_specific_integral
+ <uint64_t, target_endianness, support::aligned> Elf_Xword;
+ typedef support::detail::packed_endian_specific_integral
+ <int64_t, target_endianness, support::aligned> Elf_Sxword;
+};
+}
+
+namespace {
+template<support::endianness target_endianness, bool is64Bits>
+struct ELFDataTypeTypedefHelper;
+
+/// ELF 32bit types.
+template<support::endianness target_endianness>
+struct ELFDataTypeTypedefHelper<target_endianness, false>
+ : ELFDataTypeTypedefHelperCommon<target_endianness> {
+ typedef support::detail::packed_endian_specific_integral
+ <uint32_t, target_endianness, support::aligned> Elf_Addr;
+ typedef support::detail::packed_endian_specific_integral
+ <uint32_t, target_endianness, support::aligned> Elf_Off;
+};
+
+/// ELF 64bit types.
+template<support::endianness target_endianness>
+struct ELFDataTypeTypedefHelper<target_endianness, true>
+ : ELFDataTypeTypedefHelperCommon<target_endianness>{
+ typedef support::detail::packed_endian_specific_integral
+ <uint64_t, target_endianness, support::aligned> Elf_Addr;
+ typedef support::detail::packed_endian_specific_integral
+ <uint64_t, target_endianness, support::aligned> Elf_Off;
+};
+}
+
+// I really don't like doing this, but the alternative is copypasta.
+#define LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits) \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Addr Elf_Addr; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Off Elf_Off; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Half Elf_Half; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Word Elf_Word; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Sword Elf_Sword; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Xword Elf_Xword; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Sxword Elf_Sxword;
+
+ // Section header.
+namespace {
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Shdr_Base;
+
+template<support::endianness target_endianness>
+struct Elf_Shdr_Base<target_endianness, false> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+ Elf_Word sh_name; // Section name (index into string table)
+ Elf_Word sh_type; // Section type (SHT_*)
+ Elf_Word sh_flags; // Section flags (SHF_*)
+ Elf_Addr sh_addr; // Address where section is to be loaded
+ Elf_Off sh_offset; // File offset of section data, in bytes
+ Elf_Word sh_size; // Size of section, in bytes
+ Elf_Word sh_link; // Section type-specific header table index link
+ Elf_Word sh_info; // Section type-specific extra information
+ Elf_Word sh_addralign;// Section address alignment
+ Elf_Word sh_entsize; // Size of records contained within the section
+};
+
+template<support::endianness target_endianness>
+struct Elf_Shdr_Base<target_endianness, true> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+ Elf_Word sh_name; // Section name (index into string table)
+ Elf_Word sh_type; // Section type (SHT_*)
+ Elf_Xword sh_flags; // Section flags (SHF_*)
+ Elf_Addr sh_addr; // Address where section is to be loaded
+ Elf_Off sh_offset; // File offset of section data, in bytes
+ Elf_Xword sh_size; // Size of section, in bytes
+ Elf_Word sh_link; // Section type-specific header table index link
+ Elf_Word sh_info; // Section type-specific extra information
+ Elf_Xword sh_addralign;// Section address alignment
+ Elf_Xword sh_entsize; // Size of records contained within the section
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Shdr_Impl : Elf_Shdr_Base<target_endianness, is64Bits> {
+ using Elf_Shdr_Base<target_endianness, is64Bits>::sh_entsize;
+ using Elf_Shdr_Base<target_endianness, is64Bits>::sh_size;
+
+ /// @brief Get the number of entities this section contains if it has any.
+ unsigned getEntityCount() const {
+ if (sh_entsize == 0)
+ return 0;
+ return sh_size / sh_entsize;
+ }
+};
+}
+
+namespace {
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Sym_Base;
+
+template<support::endianness target_endianness>
+struct Elf_Sym_Base<target_endianness, false> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+ Elf_Word st_name; // Symbol name (index into string table)
+ Elf_Addr st_value; // Value or address associated with the symbol
+ Elf_Word st_size; // Size of the symbol
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf_Half st_shndx; // Which section (header table index) it's defined in
+};
+
+template<support::endianness target_endianness>
+struct Elf_Sym_Base<target_endianness, true> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+ Elf_Word st_name; // Symbol name (index into string table)
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf_Half st_shndx; // Which section (header table index) it's defined in
+ Elf_Addr st_value; // Value or address associated with the symbol
+ Elf_Xword st_size; // Size of the symbol
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Sym_Impl : Elf_Sym_Base<target_endianness, is64Bits> {
+ using Elf_Sym_Base<target_endianness, is64Bits>::st_info;
+
+ // These accessors and mutators correspond to the ELF32_ST_BIND,
+ // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
+ st_info = (b << 4) + (t & 0x0f);
+ }
+};
+}
+
+namespace {
+template<support::endianness target_endianness, bool is64Bits>
+class ELFObjectFile : public ObjectFile {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+
+ typedef Elf_Shdr_Impl<target_endianness, is64Bits> Elf_Shdr;
+ typedef Elf_Sym_Impl<target_endianness, is64Bits> Elf_Sym;
+
+ struct Elf_Ehdr {
+ unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
+ Elf_Half e_type; // Type of file (see ET_*)
+ Elf_Half e_machine; // Required architecture for this file (see EM_*)
+ Elf_Word e_version; // Must be equal to 1
+ Elf_Addr e_entry; // Address to jump to in order to start program
+ Elf_Off e_phoff; // Program header table's file offset, in bytes
+ Elf_Off e_shoff; // Section header table's file offset, in bytes
+ Elf_Word e_flags; // Processor-specific flags
+ Elf_Half e_ehsize; // Size of ELF header, in bytes
+ Elf_Half e_phentsize;// Size of an entry in the program header table
+ Elf_Half e_phnum; // Number of entries in the program header table
+ Elf_Half e_shentsize;// Size of an entry in the section header table
+ Elf_Half e_shnum; // Number of entries in the section header table
+ Elf_Half e_shstrndx; // Section header table index of section name
+ // string table
+ bool checkMagic() const {
+ return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
+ };
+
+ typedef SmallVector<const Elf_Shdr*, 1> SymbolTableSections_t;
+
+ const Elf_Ehdr *Header;
+ const Elf_Shdr *SectionHeaderTable;
+ const Elf_Shdr *dot_shstrtab_sec; // Section header string table.
+ const Elf_Shdr *dot_strtab_sec; // Symbol header string table.
+ SymbolTableSections_t SymbolTableSections;
+
+ void validateSymbol(DataRefImpl Symb) const;
+ const Elf_Sym *getSymbol(DataRefImpl Symb) const;
+ const Elf_Shdr *getSection(DataRefImpl index) const;
+ const Elf_Shdr *getSection(uint16_t index) const;
+ const char *getString(uint16_t section, uint32_t offset) const;
+ const char *getString(const Elf_Shdr *section, uint32_t offset) const;
+
+protected:
+ virtual SymbolRef getSymbolNext(DataRefImpl Symb) const;
+ virtual StringRef getSymbolName(DataRefImpl Symb) const;
+ virtual uint64_t getSymbolAddress(DataRefImpl Symb) const;
+ virtual uint64_t getSymbolSize(DataRefImpl Symb) const;
+ virtual char getSymbolNMTypeChar(DataRefImpl Symb) const;
+ virtual bool isSymbolInternal(DataRefImpl Symb) const;
+
+ virtual SectionRef getSectionNext(DataRefImpl Sec) const;
+ virtual StringRef getSectionName(DataRefImpl Sec) const;
+ virtual uint64_t getSectionAddress(DataRefImpl Sec) const;
+ virtual uint64_t getSectionSize(DataRefImpl Sec) const;
+ virtual StringRef getSectionContents(DataRefImpl Sec) const;
+ virtual bool isSectionText(DataRefImpl Sec) const;
+
+public:
+ ELFObjectFile(MemoryBuffer *Object);
+ virtual symbol_iterator begin_symbols() const;
+ virtual symbol_iterator end_symbols() const;
+ virtual section_iterator begin_sections() const;
+ virtual section_iterator end_sections() const;
+
+ virtual uint8_t getBytesInAddress() const;
+ virtual StringRef getFileFormatName() const;
+ virtual unsigned getArch() const;
+};
+} // end namespace
+
+template<support::endianness target_endianness, bool is64Bits>
+void ELFObjectFile<target_endianness, is64Bits>
+ ::validateSymbol(DataRefImpl Symb) const {
+ const Elf_Sym *symb = getSymbol(Symb);
+ const Elf_Shdr *SymbolTableSection = SymbolTableSections[Symb.d.b];
+ // FIXME: We really need to do proper error handling in the case of an invalid
+ // input file. Because we don't use exceptions, I think we'll just pass
+ // an error object around.
+ if (!( symb
+ && SymbolTableSection
+ && symb >= (const Elf_Sym*)(base
+ + SymbolTableSection->sh_offset)
+ && symb < (const Elf_Sym*)(base
+ + SymbolTableSection->sh_offset
+ + SymbolTableSection->sh_size)))
+ // FIXME: Proper error handling.
+ report_fatal_error("Symb must point to a valid symbol!");
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+SymbolRef ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolNext(DataRefImpl Symb) const {
+ validateSymbol(Symb);
+ const Elf_Shdr *SymbolTableSection = SymbolTableSections[Symb.d.b];
+
+ ++Symb.d.a;
+ // Check to see if we are at the end of this symbol table.
+ if (Symb.d.a >= SymbolTableSection->getEntityCount()) {
+ // We are at the end. If there are other symbol tables, jump to them.
+ ++Symb.d.b;
+ Symb.d.a = 1; // The 0th symbol in ELF is fake.
+ // Otherwise return the terminator.
+ if (Symb.d.b >= SymbolTableSections.size()) {
+ Symb.d.a = std::numeric_limits<uint32_t>::max();
+ Symb.d.b = std::numeric_limits<uint32_t>::max();
+ }
+ }
+
+ return SymbolRef(Symb, this);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+StringRef ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolName(DataRefImpl Symb) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ if (symb->st_name == 0) {
+ const Elf_Shdr *section = getSection(symb->st_shndx);
+ if (!section)
+ return "";
+ return getString(dot_shstrtab_sec, section->sh_name);
+ }
+
+ // Use the default symbol table name section.
+ return getString(dot_strtab_sec, symb->st_name);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+uint64_t ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolAddress(DataRefImpl Symb) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ const Elf_Shdr *Section;
+ switch (symb->st_shndx) {
+ case ELF::SHN_COMMON:
+ // Undefined symbols have no address yet.
+ case ELF::SHN_UNDEF: return UnknownAddressOrSize;
+ case ELF::SHN_ABS: return symb->st_value;
+ default: Section = getSection(symb->st_shndx);
+ }
+
+ switch (symb->getType()) {
+ case ELF::STT_SECTION: return Section ? Section->sh_addr
+ : UnknownAddressOrSize;
+ case ELF::STT_FUNC:
+ case ELF::STT_OBJECT:
+ case ELF::STT_NOTYPE:
+ return symb->st_value;
+ default: return UnknownAddressOrSize;
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+uint64_t ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolSize(DataRefImpl Symb) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ if (symb->st_size == 0)
+ return UnknownAddressOrSize;
+ return symb->st_size;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+char ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolNMTypeChar(DataRefImpl Symb) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ const Elf_Shdr *Section = getSection(symb->st_shndx);
+
+ char ret = '?';
+
+ if (Section) {
+ switch (Section->sh_type) {
+ case ELF::SHT_PROGBITS:
+ case ELF::SHT_DYNAMIC:
+ switch (Section->sh_flags) {
+ case (ELF::SHF_ALLOC | ELF::SHF_EXECINSTR):
+ ret = 't'; break;
+ case (ELF::SHF_ALLOC | ELF::SHF_WRITE):
+ ret = 'd'; break;
+ case ELF::SHF_ALLOC:
+ case (ELF::SHF_ALLOC | ELF::SHF_MERGE):
+ case (ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::SHF_STRINGS):
+ ret = 'r'; break;
+ }
+ break;
+ case ELF::SHT_NOBITS: ret = 'b';
+ }
+ }
+
+ switch (symb->st_shndx) {
+ case ELF::SHN_UNDEF:
+ if (ret == '?')
+ ret = 'U';
+ break;
+ case ELF::SHN_ABS: ret = 'a'; break;
+ case ELF::SHN_COMMON: ret = 'c'; break;
+ }
+
+ switch (symb->getBinding()) {
+ case ELF::STB_GLOBAL: ret = ::toupper(ret); break;
+ case ELF::STB_WEAK:
+ if (symb->st_shndx == ELF::SHN_UNDEF)
+ ret = 'w';
+ else
+ if (symb->getType() == ELF::STT_OBJECT)
+ ret = 'V';
+ else
+ ret = 'W';
+ }
+
+ if (ret == '?' && symb->getType() == ELF::STT_SECTION)
+ return StringSwitch<char>(getSymbolName(Symb))
+ .StartsWith(".debug", 'N')
+ .StartsWith(".note", 'n');
+
+ return ret;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+bool ELFObjectFile<target_endianness, is64Bits>
+ ::isSymbolInternal(DataRefImpl Symb) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+
+ if ( symb->getType() == ELF::STT_FILE
+ || symb->getType() == ELF::STT_SECTION)
+ return true;
+ return false;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+SectionRef ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionNext(DataRefImpl Sec) const {
+ const uint8_t *sec = reinterpret_cast<const uint8_t *>(Sec.p);
+ sec += Header->e_shentsize;
+ Sec.p = reinterpret_cast<intptr_t>(sec);
+ return SectionRef(Sec, this);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+StringRef ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionName(DataRefImpl Sec) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ return StringRef(getString(dot_shstrtab_sec, sec->sh_name));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+uint64_t ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionAddress(DataRefImpl Sec) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ return sec->sh_addr;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+uint64_t ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionSize(DataRefImpl Sec) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ return sec->sh_size;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+StringRef ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionContents(DataRefImpl Sec) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ const char *start = (char*)base + sec->sh_offset;
+ return StringRef(start, sec->sh_size);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+bool ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionText(DataRefImpl Sec) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ if (sec->sh_flags & ELF::SHF_EXECINSTR)
+ return true;
+ return false;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+ELFObjectFile<target_endianness, is64Bits>::ELFObjectFile(MemoryBuffer *Object)
+ : ObjectFile(Object)
+ , SectionHeaderTable(0)
+ , dot_shstrtab_sec(0)
+ , dot_strtab_sec(0) {
+ Header = reinterpret_cast<const Elf_Ehdr *>(base);
+
+ if (Header->e_shoff == 0)
+ return;
+
+ SectionHeaderTable =
+ reinterpret_cast<const Elf_Shdr *>(base + Header->e_shoff);
+ uint32_t SectionTableSize = Header->e_shnum * Header->e_shentsize;
+ if (!( (const uint8_t *)SectionHeaderTable + SectionTableSize
+ <= base + MapFile->getBufferSize()))
+ // FIXME: Proper error handling.
+ report_fatal_error("Section table goes past end of file!");
+
+
+ // To find the symbol tables we walk the section table to find SHT_STMTAB.
+ for (const char *i = reinterpret_cast<const char *>(SectionHeaderTable),
+ *e = i + Header->e_shnum * Header->e_shentsize;
+ i != e; i += Header->e_shentsize) {
+ const Elf_Shdr *sh = reinterpret_cast<const Elf_Shdr*>(i);
+ if (sh->sh_type == ELF::SHT_SYMTAB) {
+ SymbolTableSections.push_back(sh);
+ }
+ }
+
+ // Get string table sections.
+ dot_shstrtab_sec = getSection(Header->e_shstrndx);
+ if (dot_shstrtab_sec) {
+ // Verify that the last byte in the string table in a null.
+ if (((const char*)base + dot_shstrtab_sec->sh_offset)
+ [dot_shstrtab_sec->sh_size - 1] != 0)
+ // FIXME: Proper error handling.
+ report_fatal_error("String table must end with a null terminator!");
+ }
+
+ // Merge this into the above loop.
+ for (const char *i = reinterpret_cast<const char *>(SectionHeaderTable),
+ *e = i + Header->e_shnum * Header->e_shentsize;
+ i != e; i += Header->e_shentsize) {
+ const Elf_Shdr *sh = reinterpret_cast<const Elf_Shdr*>(i);
+ if (sh->sh_type == ELF::SHT_STRTAB) {
+ StringRef SectionName(getString(dot_shstrtab_sec, sh->sh_name));
+ if (SectionName == ".strtab") {
+ if (dot_strtab_sec != 0)
+ // FIXME: Proper error handling.
+ report_fatal_error("Already found section named .strtab!");
+ dot_strtab_sec = sh;
+ const char *dot_strtab = (const char*)base + sh->sh_offset;
+ if (dot_strtab[sh->sh_size - 1] != 0)
+ // FIXME: Proper error handling.
+ report_fatal_error("String table must end with a null terminator!");
+ }
+ }
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+ObjectFile::symbol_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::begin_symbols() const {
+ DataRefImpl SymbolData;
+ memset(&SymbolData, 0, sizeof(SymbolData));
+ if (SymbolTableSections.size() == 0) {
+ SymbolData.d.a = std::numeric_limits<uint32_t>::max();
+ SymbolData.d.b = std::numeric_limits<uint32_t>::max();
+ } else {
+ SymbolData.d.a = 1; // The 0th symbol in ELF is fake.
+ SymbolData.d.b = 0;
+ }
+ return symbol_iterator(SymbolRef(SymbolData, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+ObjectFile::symbol_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::end_symbols() const {
+ DataRefImpl SymbolData;
+ memset(&SymbolData, 0, sizeof(SymbolData));
+ SymbolData.d.a = std::numeric_limits<uint32_t>::max();
+ SymbolData.d.b = std::numeric_limits<uint32_t>::max();
+ return symbol_iterator(SymbolRef(SymbolData, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+ObjectFile::section_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::begin_sections() const {
+ DataRefImpl ret;
+ ret.p = reinterpret_cast<intptr_t>(base + Header->e_shoff);
+ return section_iterator(SectionRef(ret, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+ObjectFile::section_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::end_sections() const {
+ DataRefImpl ret;
+ ret.p = reinterpret_cast<intptr_t>(base
+ + Header->e_shoff
+ + (Header->e_shentsize * Header->e_shnum));
+ return section_iterator(SectionRef(ret, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+uint8_t ELFObjectFile<target_endianness, is64Bits>::getBytesInAddress() const {
+ return is64Bits ? 8 : 4;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+StringRef ELFObjectFile<target_endianness, is64Bits>
+ ::getFileFormatName() const {
+ switch(Header->e_ident[ELF::EI_CLASS]) {
+ case ELF::ELFCLASS32:
+ switch(Header->e_machine) {
+ case ELF::EM_386:
+ return "ELF32-i386";
+ case ELF::EM_X86_64:
+ return "ELF32-x86-64";
+ default:
+ return "ELF32-unknown";
+ }
+ case ELF::ELFCLASS64:
+ switch(Header->e_machine) {
+ case ELF::EM_386:
+ return "ELF64-i386";
+ case ELF::EM_X86_64:
+ return "ELF64-x86-64";
+ default:
+ return "ELF64-unknown";
+ }
+ default:
+ // FIXME: Proper error handling.
+ report_fatal_error("Invalid ELFCLASS!");
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+unsigned ELFObjectFile<target_endianness, is64Bits>::getArch() const {
+ switch(Header->e_machine) {
+ case ELF::EM_386:
+ return Triple::x86;
+ case ELF::EM_X86_64:
+ return Triple::x86_64;
+ default:
+ return Triple::UnknownArch;
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Sym *
+ELFObjectFile<target_endianness, is64Bits>::getSymbol(DataRefImpl Symb) const {
+ const Elf_Shdr *sec = SymbolTableSections[Symb.d.b];
+ return reinterpret_cast<const Elf_Sym *>(
+ base
+ + sec->sh_offset
+ + (Symb.d.a * sec->sh_entsize));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr *
+ELFObjectFile<target_endianness, is64Bits>::getSection(DataRefImpl Symb) const {
+ const Elf_Shdr *sec = getSection(Symb.d.b);
+ if (sec->sh_type != ELF::SHT_SYMTAB)
+ // FIXME: Proper error handling.
+ report_fatal_error("Invalid symbol table section!");
+ return sec;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr *
+ELFObjectFile<target_endianness, is64Bits>::getSection(uint16_t index) const {
+ if (index == 0 || index >= ELF::SHN_LORESERVE)
+ return 0;
+ if (!SectionHeaderTable || index >= Header->e_shnum)
+ // FIXME: Proper error handling.
+ report_fatal_error("Invalid section index!");
+
+ return reinterpret_cast<const Elf_Shdr *>(
+ reinterpret_cast<const char *>(SectionHeaderTable)
+ + (index * Header->e_shentsize));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const char *ELFObjectFile<target_endianness, is64Bits>
+ ::getString(uint16_t section,
+ ELF::Elf32_Word offset) const {
+ return getString(getSection(section), offset);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const char *ELFObjectFile<target_endianness, is64Bits>
+ ::getString(const Elf_Shdr *section,
+ ELF::Elf32_Word offset) const {
+ assert(section && section->sh_type == ELF::SHT_STRTAB && "Invalid section!");
+ if (offset >= section->sh_size)
+ // FIXME: Proper error handling.
+ report_fatal_error("Sybol name offset outside of string table!");
+ return (const char *)base + section->sh_offset + offset;
+}
+
+// EI_CLASS, EI_DATA.
+static std::pair<unsigned char, unsigned char>
+getElfArchType(MemoryBuffer *Object) {
+ if (Object->getBufferSize() < ELF::EI_NIDENT)
+ return std::make_pair((uint8_t)ELF::ELFCLASSNONE,(uint8_t)ELF::ELFDATANONE);
+ return std::make_pair( (uint8_t)Object->getBufferStart()[ELF::EI_CLASS]
+ , (uint8_t)Object->getBufferStart()[ELF::EI_DATA]);
+}
+
+namespace llvm {
+
+ ObjectFile *ObjectFile::createELFObjectFile(MemoryBuffer *Object) {
+ std::pair<unsigned char, unsigned char> Ident = getElfArchType(Object);
+ if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB)
+ return new ELFObjectFile<support::little, false>(Object);
+ else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB)
+ return new ELFObjectFile<support::big, false>(Object);
+ else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB)
+ return new ELFObjectFile<support::little, true>(Object);
+ else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB)
+ return new ELFObjectFile<support::big, true>(Object);
+ // FIXME: Proper error handling.
+ report_fatal_error("Not an ELF object file!");
+ }
+
+} // end namespace llvm
diff --git a/contrib/llvm/lib/Object/MachOObject.cpp b/contrib/llvm/lib/Object/MachOObject.cpp
new file mode 100644
index 0000000..5e64d63
--- /dev/null
+++ b/contrib/llvm/lib/Object/MachOObject.cpp
@@ -0,0 +1,342 @@
+//===- MachOObject.cpp - Mach-O Object File Wrapper -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Object/MachOObject.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+/* Translation Utilities */
+
+template<typename T>
+static void SwapValue(T &Value) {
+ Value = sys::SwapByteOrder(Value);
+}
+
+template<typename T>
+static void SwapStruct(T &Value);
+
+template<typename T>
+static void ReadInMemoryStruct(const MachOObject &MOO,
+ StringRef Buffer, uint64_t Base,
+ InMemoryStruct<T> &Res) {
+ typedef T struct_type;
+ uint64_t Size = sizeof(struct_type);
+
+ // Check that the buffer contains the expected data.
+ if (Base + Size > Buffer.size()) {
+ Res = 0;
+ return;
+ }
+
+ // Check whether we can return a direct pointer.
+ struct_type *Ptr = (struct_type *) (Buffer.data() + Base);
+ if (!MOO.isSwappedEndian()) {
+ Res = Ptr;
+ return;
+ }
+
+ // Otherwise, copy the struct and translate the values.
+ Res = *Ptr;
+ SwapStruct(*Res);
+}
+
+/* *** */
+
+MachOObject::MachOObject(MemoryBuffer *Buffer_, bool IsLittleEndian_,
+ bool Is64Bit_)
+ : Buffer(Buffer_), IsLittleEndian(IsLittleEndian_), Is64Bit(Is64Bit_),
+ IsSwappedEndian(IsLittleEndian != sys::isLittleEndianHost()),
+ HasStringTable(false), LoadCommands(0), NumLoadedCommands(0) {
+ // Load the common header.
+ memcpy(&Header, Buffer->getBuffer().data(), sizeof(Header));
+ if (IsSwappedEndian) {
+ SwapValue(Header.Magic);
+ SwapValue(Header.CPUType);
+ SwapValue(Header.CPUSubtype);
+ SwapValue(Header.FileType);
+ SwapValue(Header.NumLoadCommands);
+ SwapValue(Header.SizeOfLoadCommands);
+ SwapValue(Header.Flags);
+ }
+
+ if (is64Bit()) {
+ memcpy(&Header64Ext, Buffer->getBuffer().data() + sizeof(Header),
+ sizeof(Header64Ext));
+ if (IsSwappedEndian) {
+ SwapValue(Header64Ext.Reserved);
+ }
+ }
+
+ // Create the load command array if sane.
+ if (getHeader().NumLoadCommands < (1 << 20))
+ LoadCommands = new LoadCommandInfo[getHeader().NumLoadCommands];
+}
+
+MachOObject::~MachOObject() {
+ delete [] LoadCommands;
+}
+
+MachOObject *MachOObject::LoadFromBuffer(MemoryBuffer *Buffer,
+ std::string *ErrorStr) {
+ // First, check the magic value and initialize the basic object info.
+ bool IsLittleEndian = false, Is64Bit = false;
+ StringRef Magic = Buffer->getBuffer().slice(0, 4);
+ if (Magic == "\xFE\xED\xFA\xCE") {
+ } else if (Magic == "\xCE\xFA\xED\xFE") {
+ IsLittleEndian = true;
+ } else if (Magic == "\xFE\xED\xFA\xCF") {
+ Is64Bit = true;
+ } else if (Magic == "\xCF\xFA\xED\xFE") {
+ IsLittleEndian = true;
+ Is64Bit = true;
+ } else {
+ if (ErrorStr) *ErrorStr = "not a Mach object file (invalid magic)";
+ return 0;
+ }
+
+ // Ensure that the at least the full header is present.
+ unsigned HeaderSize = Is64Bit ? macho::Header64Size : macho::Header32Size;
+ if (Buffer->getBufferSize() < HeaderSize) {
+ if (ErrorStr) *ErrorStr = "not a Mach object file (invalid header)";
+ return 0;
+ }
+
+ OwningPtr<MachOObject> Object(new MachOObject(Buffer, IsLittleEndian,
+ Is64Bit));
+
+ // Check for bogus number of load commands.
+ if (Object->getHeader().NumLoadCommands >= (1 << 20)) {
+ if (ErrorStr) *ErrorStr = "not a Mach object file (unreasonable header)";
+ return 0;
+ }
+
+ if (ErrorStr) *ErrorStr = "";
+ return Object.take();
+}
+
+StringRef MachOObject::getData(size_t Offset, size_t Size) const {
+ return Buffer->getBuffer().substr(Offset,Size);
+}
+
+void MachOObject::RegisterStringTable(macho::SymtabLoadCommand &SLC) {
+ HasStringTable = true;
+ StringTable = Buffer->getBuffer().substr(SLC.StringTableOffset,
+ SLC.StringTableSize);
+}
+
+const MachOObject::LoadCommandInfo &
+MachOObject::getLoadCommandInfo(unsigned Index) const {
+ assert(Index < getHeader().NumLoadCommands && "Invalid index!");
+
+ // Load the command, if necessary.
+ if (Index >= NumLoadedCommands) {
+ uint64_t Offset;
+ if (Index == 0) {
+ Offset = getHeaderSize();
+ } else {
+ const LoadCommandInfo &Prev = getLoadCommandInfo(Index - 1);
+ Offset = Prev.Offset + Prev.Command.Size;
+ }
+
+ LoadCommandInfo &Info = LoadCommands[Index];
+ memcpy(&Info.Command, Buffer->getBuffer().data() + Offset,
+ sizeof(macho::LoadCommand));
+ if (IsSwappedEndian) {
+ SwapValue(Info.Command.Type);
+ SwapValue(Info.Command.Size);
+ }
+ Info.Offset = Offset;
+ NumLoadedCommands = Index + 1;
+ }
+
+ return LoadCommands[Index];
+}
+
+template<>
+void SwapStruct(macho::SegmentLoadCommand &Value) {
+ SwapValue(Value.Type);
+ SwapValue(Value.Size);
+ SwapValue(Value.VMAddress);
+ SwapValue(Value.VMSize);
+ SwapValue(Value.FileOffset);
+ SwapValue(Value.FileSize);
+ SwapValue(Value.MaxVMProtection);
+ SwapValue(Value.InitialVMProtection);
+ SwapValue(Value.NumSections);
+ SwapValue(Value.Flags);
+}
+void MachOObject::ReadSegmentLoadCommand(const LoadCommandInfo &LCI,
+ InMemoryStruct<macho::SegmentLoadCommand> &Res) const {
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res);
+}
+
+template<>
+void SwapStruct(macho::Segment64LoadCommand &Value) {
+ SwapValue(Value.Type);
+ SwapValue(Value.Size);
+ SwapValue(Value.VMAddress);
+ SwapValue(Value.VMSize);
+ SwapValue(Value.FileOffset);
+ SwapValue(Value.FileSize);
+ SwapValue(Value.MaxVMProtection);
+ SwapValue(Value.InitialVMProtection);
+ SwapValue(Value.NumSections);
+ SwapValue(Value.Flags);
+}
+void MachOObject::ReadSegment64LoadCommand(const LoadCommandInfo &LCI,
+ InMemoryStruct<macho::Segment64LoadCommand> &Res) const {
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res);
+}
+
+template<>
+void SwapStruct(macho::SymtabLoadCommand &Value) {
+ SwapValue(Value.Type);
+ SwapValue(Value.Size);
+ SwapValue(Value.SymbolTableOffset);
+ SwapValue(Value.NumSymbolTableEntries);
+ SwapValue(Value.StringTableOffset);
+ SwapValue(Value.StringTableSize);
+}
+void MachOObject::ReadSymtabLoadCommand(const LoadCommandInfo &LCI,
+ InMemoryStruct<macho::SymtabLoadCommand> &Res) const {
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res);
+}
+
+template<>
+void SwapStruct(macho::DysymtabLoadCommand &Value) {
+ SwapValue(Value.Type);
+ SwapValue(Value.Size);
+ SwapValue(Value.LocalSymbolsIndex);
+ SwapValue(Value.NumLocalSymbols);
+ SwapValue(Value.ExternalSymbolsIndex);
+ SwapValue(Value.NumExternalSymbols);
+ SwapValue(Value.UndefinedSymbolsIndex);
+ SwapValue(Value.NumUndefinedSymbols);
+ SwapValue(Value.TOCOffset);
+ SwapValue(Value.NumTOCEntries);
+ SwapValue(Value.ModuleTableOffset);
+ SwapValue(Value.NumModuleTableEntries);
+ SwapValue(Value.ReferenceSymbolTableOffset);
+ SwapValue(Value.NumReferencedSymbolTableEntries);
+ SwapValue(Value.IndirectSymbolTableOffset);
+ SwapValue(Value.NumIndirectSymbolTableEntries);
+ SwapValue(Value.ExternalRelocationTableOffset);
+ SwapValue(Value.NumExternalRelocationTableEntries);
+ SwapValue(Value.LocalRelocationTableOffset);
+ SwapValue(Value.NumLocalRelocationTableEntries);
+}
+void MachOObject::ReadDysymtabLoadCommand(const LoadCommandInfo &LCI,
+ InMemoryStruct<macho::DysymtabLoadCommand> &Res) const {
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), LCI.Offset, Res);
+}
+
+template<>
+void SwapStruct(macho::IndirectSymbolTableEntry &Value) {
+ SwapValue(Value.Index);
+}
+void
+MachOObject::ReadIndirectSymbolTableEntry(const macho::DysymtabLoadCommand &DLC,
+ unsigned Index,
+ InMemoryStruct<macho::IndirectSymbolTableEntry> &Res) const {
+ uint64_t Offset = (DLC.IndirectSymbolTableOffset +
+ Index * sizeof(macho::IndirectSymbolTableEntry));
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res);
+}
+
+
+template<>
+void SwapStruct(macho::Section &Value) {
+ SwapValue(Value.Address);
+ SwapValue(Value.Size);
+ SwapValue(Value.Offset);
+ SwapValue(Value.Align);
+ SwapValue(Value.RelocationTableOffset);
+ SwapValue(Value.NumRelocationTableEntries);
+ SwapValue(Value.Flags);
+ SwapValue(Value.Reserved1);
+ SwapValue(Value.Reserved2);
+}
+void MachOObject::ReadSection(const LoadCommandInfo &LCI,
+ unsigned Index,
+ InMemoryStruct<macho::Section> &Res) const {
+ assert(LCI.Command.Type == macho::LCT_Segment &&
+ "Unexpected load command info!");
+ uint64_t Offset = (LCI.Offset + sizeof(macho::SegmentLoadCommand) +
+ Index * sizeof(macho::Section));
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res);
+}
+
+template<>
+void SwapStruct(macho::Section64 &Value) {
+ SwapValue(Value.Address);
+ SwapValue(Value.Size);
+ SwapValue(Value.Offset);
+ SwapValue(Value.Align);
+ SwapValue(Value.RelocationTableOffset);
+ SwapValue(Value.NumRelocationTableEntries);
+ SwapValue(Value.Flags);
+ SwapValue(Value.Reserved1);
+ SwapValue(Value.Reserved2);
+ SwapValue(Value.Reserved3);
+}
+void MachOObject::ReadSection64(const LoadCommandInfo &LCI,
+ unsigned Index,
+ InMemoryStruct<macho::Section64> &Res) const {
+ assert(LCI.Command.Type == macho::LCT_Segment64 &&
+ "Unexpected load command info!");
+ uint64_t Offset = (LCI.Offset + sizeof(macho::Segment64LoadCommand) +
+ Index * sizeof(macho::Section64));
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res);
+}
+
+template<>
+void SwapStruct(macho::RelocationEntry &Value) {
+ SwapValue(Value.Word0);
+ SwapValue(Value.Word1);
+}
+void MachOObject::ReadRelocationEntry(uint64_t RelocationTableOffset,
+ unsigned Index,
+ InMemoryStruct<macho::RelocationEntry> &Res) const {
+ uint64_t Offset = (RelocationTableOffset +
+ Index * sizeof(macho::RelocationEntry));
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res);
+}
+
+template<>
+void SwapStruct(macho::SymbolTableEntry &Value) {
+ SwapValue(Value.StringIndex);
+ SwapValue(Value.Flags);
+ SwapValue(Value.Value);
+}
+void MachOObject::ReadSymbolTableEntry(uint64_t SymbolTableOffset,
+ unsigned Index,
+ InMemoryStruct<macho::SymbolTableEntry> &Res) const {
+ uint64_t Offset = (SymbolTableOffset +
+ Index * sizeof(macho::SymbolTableEntry));
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res);
+}
+
+template<>
+void SwapStruct(macho::Symbol64TableEntry &Value) {
+ SwapValue(Value.StringIndex);
+ SwapValue(Value.Flags);
+ SwapValue(Value.Value);
+}
+void MachOObject::ReadSymbol64TableEntry(uint64_t SymbolTableOffset,
+ unsigned Index,
+ InMemoryStruct<macho::Symbol64TableEntry> &Res) const {
+ uint64_t Offset = (SymbolTableOffset +
+ Index * sizeof(macho::Symbol64TableEntry));
+ ReadInMemoryStruct(*this, Buffer->getBuffer(), Offset, Res);
+}
diff --git a/contrib/llvm/lib/Object/Makefile b/contrib/llvm/lib/Object/Makefile
new file mode 100644
index 0000000..79388dc
--- /dev/null
+++ b/contrib/llvm/lib/Object/Makefile
@@ -0,0 +1,14 @@
+##===- lib/Object/Makefile ---------------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../..
+LIBRARYNAME = LLVMObject
+BUILD_ARCHIVE := 1
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Object/ObjectFile.cpp b/contrib/llvm/lib/Object/ObjectFile.cpp
new file mode 100644
index 0000000..161ae3a
--- /dev/null
+++ b/contrib/llvm/lib/Object/ObjectFile.cpp
@@ -0,0 +1,71 @@
+//===- ObjectFile.cpp - File format independent object file -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a file format independent ObjectFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
+
+using namespace llvm;
+using namespace object;
+
+ObjectFile::ObjectFile(MemoryBuffer *Object)
+ : MapFile(Object) {
+ assert(MapFile && "Must be a valid MemoryBuffer!");
+ base = reinterpret_cast<const uint8_t *>(MapFile->getBufferStart());
+}
+
+ObjectFile::~ObjectFile() {
+ delete MapFile;
+}
+
+StringRef ObjectFile::getFilename() const {
+ return MapFile->getBufferIdentifier();
+}
+
+ObjectFile *ObjectFile::createObjectFile(MemoryBuffer *Object) {
+ if (!Object || Object->getBufferSize() < 64)
+ return 0;
+ sys::LLVMFileType type = sys::IdentifyFileType(Object->getBufferStart(),
+ static_cast<unsigned>(Object->getBufferSize()));
+ switch (type) {
+ case sys::ELF_Relocatable_FileType:
+ case sys::ELF_Executable_FileType:
+ case sys::ELF_SharedObject_FileType:
+ case sys::ELF_Core_FileType:
+ return createELFObjectFile(Object);
+ case sys::Mach_O_Object_FileType:
+ case sys::Mach_O_Executable_FileType:
+ case sys::Mach_O_FixedVirtualMemorySharedLib_FileType:
+ case sys::Mach_O_Core_FileType:
+ case sys::Mach_O_PreloadExecutable_FileType:
+ case sys::Mach_O_DynamicallyLinkedSharedLib_FileType:
+ case sys::Mach_O_DynamicLinker_FileType:
+ case sys::Mach_O_Bundle_FileType:
+ case sys::Mach_O_DynamicallyLinkedSharedLibStub_FileType:
+ return 0;
+ case sys::COFF_FileType:
+ return createCOFFObjectFile(Object);
+ default:
+ llvm_unreachable("Unknown Object File Type");
+ }
+}
+
+ObjectFile *ObjectFile::createObjectFile(StringRef ObjectPath) {
+ OwningPtr<MemoryBuffer> File;
+ if (error_code ec = MemoryBuffer::getFile(ObjectPath, File))
+ return NULL;
+ return createObjectFile(File.take());
+}
diff --git a/contrib/llvm/lib/Support/APFloat.cpp b/contrib/llvm/lib/Support/APFloat.cpp
index b87ddf9..e765ba0 100644
--- a/contrib/llvm/lib/Support/APFloat.cpp
+++ b/contrib/llvm/lib/Support/APFloat.cpp
@@ -175,7 +175,7 @@ totalExponent(StringRef::iterator p, StringRef::iterator end,
{
int unsignedExponent;
bool negative, overflow;
- int exponent;
+ int exponent = 0;
assert(p != end && "Exponent has no digits");
@@ -194,11 +194,11 @@ totalExponent(StringRef::iterator p, StringRef::iterator end,
assert(value < 10U && "Invalid character in exponent");
unsignedExponent = unsignedExponent * 10 + value;
- if (unsignedExponent > 65535)
+ if (unsignedExponent > 32767)
overflow = true;
}
- if (exponentAdjustment > 65535 || exponentAdjustment < -65536)
+ if (exponentAdjustment > 32767 || exponentAdjustment < -32768)
overflow = true;
if (!overflow) {
@@ -206,12 +206,12 @@ totalExponent(StringRef::iterator p, StringRef::iterator end,
if (negative)
exponent = -exponent;
exponent += exponentAdjustment;
- if (exponent > 65535 || exponent < -65536)
+ if (exponent > 32767 || exponent < -32768)
overflow = true;
}
if (overflow)
- exponent = negative ? -65536: 65535;
+ exponent = negative ? -32768: 32767;
return exponent;
}
@@ -3197,6 +3197,12 @@ APFloat::initFromAPInt(const APInt& api, bool isIEEE)
llvm_unreachable(0);
}
+APFloat
+APFloat::getAllOnesValue(unsigned BitWidth, bool isIEEE)
+{
+ return APFloat(APInt::getAllOnesValue(BitWidth), isIEEE);
+}
+
APFloat APFloat::getLargest(const fltSemantics &Sem, bool Negative) {
APFloat Val(Sem, fcNormal, Negative);
@@ -3258,14 +3264,12 @@ APFloat::APFloat(const APInt& api, bool isIEEE)
APFloat::APFloat(float f)
{
- APInt api = APInt(32, 0);
- initFromAPInt(api.floatToBits(f));
+ initFromAPInt(APInt::floatToBits(f));
}
APFloat::APFloat(double d)
{
- APInt api = APInt(64, 0);
- initFromAPInt(api.doubleToBits(d));
+ initFromAPInt(APInt::doubleToBits(d));
}
namespace {
@@ -3312,7 +3316,7 @@ namespace {
// Truncate the significand down to its active bit count, but
// don't try to drop below 32.
unsigned newPrecision = std::max(32U, significand.getActiveBits());
- significand.trunc(newPrecision);
+ significand = significand.trunc(newPrecision);
}
@@ -3417,7 +3421,7 @@ void APFloat::toString(SmallVectorImpl<char> &Str,
// Nothing to do.
} else if (exp > 0) {
// Just shift left.
- significand.zext(semantics->precision + exp);
+ significand = significand.zext(semantics->precision + exp);
significand <<= exp;
exp = 0;
} else { /* exp < 0 */
@@ -3436,7 +3440,7 @@ void APFloat::toString(SmallVectorImpl<char> &Str,
// Multiply significand by 5^e.
// N * 5^0101 == N * 5^(1*1) * 5^(0*2) * 5^(1*4) * 5^(0*8)
- significand.zext(precision);
+ significand = significand.zext(precision);
APInt five_to_the_i(precision, 5);
while (true) {
if (texp & 1) significand *= five_to_the_i;
diff --git a/contrib/llvm/lib/Support/APInt.cpp b/contrib/llvm/lib/Support/APInt.cpp
index 8a212a2..7703342 100644
--- a/contrib/llvm/lib/Support/APInt.cpp
+++ b/contrib/llvm/lib/Support/APInt.cpp
@@ -361,7 +361,7 @@ APInt& APInt::operator*=(const APInt& RHS) {
unsigned rhsWords = !rhsBits ? 0 : whichWord(rhsBits - 1) + 1;
if (!rhsWords) {
// X * 0 ===> 0
- clear();
+ clearAllBits();
return *this;
}
@@ -373,7 +373,7 @@ APInt& APInt::operator*=(const APInt& RHS) {
mul(dest, pVal, lhsWords, RHS.pVal, rhsWords);
// Copy result back into *this
- clear();
+ clearAllBits();
unsigned wordsToCopy = destWords >= getNumWords() ? getNumWords() : destWords;
memcpy(pVal, dest, wordsToCopy * APINT_WORD_SIZE);
@@ -483,6 +483,7 @@ APInt APInt::operator-(const APInt& RHS) const {
}
bool APInt::operator[](unsigned bitPosition) const {
+ assert(bitPosition < getBitWidth() && "Bit position out of bounds!");
return (maskBit(bitPosition) &
(isSingleWord() ? VAL : pVal[whichWord(bitPosition)])) != 0;
}
@@ -561,12 +562,12 @@ bool APInt::slt(const APInt& RHS) const {
bool rhsNeg = rhs.isNegative();
if (lhsNeg) {
// Sign bit is set so perform two's complement to make it positive
- lhs.flip();
+ lhs.flipAllBits();
lhs++;
}
if (rhsNeg) {
// Sign bit is set so perform two's complement to make it positive
- rhs.flip();
+ rhs.flipAllBits();
rhs++;
}
@@ -583,22 +584,20 @@ bool APInt::slt(const APInt& RHS) const {
return lhs.ult(rhs);
}
-APInt& APInt::set(unsigned bitPosition) {
+void APInt::setBit(unsigned bitPosition) {
if (isSingleWord())
VAL |= maskBit(bitPosition);
else
pVal[whichWord(bitPosition)] |= maskBit(bitPosition);
- return *this;
}
/// Set the given bit to 0 whose position is given as "bitPosition".
/// @brief Set a given bit to 0.
-APInt& APInt::clear(unsigned bitPosition) {
+void APInt::clearBit(unsigned bitPosition) {
if (isSingleWord())
VAL &= ~maskBit(bitPosition);
else
pVal[whichWord(bitPosition)] &= ~maskBit(bitPosition);
- return *this;
}
/// @brief Toggle every bit to its opposite value.
@@ -606,11 +605,10 @@ APInt& APInt::clear(unsigned bitPosition) {
/// Toggle a given bit to its opposite value whose position is given
/// as "bitPosition".
/// @brief Toggles a given bit to its opposite value.
-APInt& APInt::flip(unsigned bitPosition) {
+void APInt::flipBit(unsigned bitPosition) {
assert(bitPosition < BitWidth && "Out of the bit-width range!");
- if ((*this)[bitPosition]) clear(bitPosition);
- else set(bitPosition);
- return *this;
+ if ((*this)[bitPosition]) clearBit(bitPosition);
+ else setBit(bitPosition);
}
unsigned APInt::getBitsNeeded(StringRef str, uint8_t radix) {
@@ -761,10 +759,6 @@ APInt APInt::getLoBits(unsigned numBits) const {
BitWidth - numBits);
}
-bool APInt::isPowerOf2() const {
- return (!!*this) && !(*this & (*this - APInt(BitWidth,1)));
-}
-
unsigned APInt::countLeadingZerosSlowCase() const {
// Treat the most significand word differently because it might have
// meaningless bits set beyond the precision.
@@ -1001,96 +995,90 @@ double APInt::roundToDouble(bool isSigned) const {
}
// Truncate to new width.
-APInt &APInt::trunc(unsigned width) {
+APInt APInt::trunc(unsigned width) const {
assert(width < BitWidth && "Invalid APInt Truncate request");
assert(width && "Can't truncate to 0 bits");
- unsigned wordsBefore = getNumWords();
- BitWidth = width;
- unsigned wordsAfter = getNumWords();
- if (wordsBefore != wordsAfter) {
- if (wordsAfter == 1) {
- uint64_t *tmp = pVal;
- VAL = pVal[0];
- delete [] tmp;
- } else {
- uint64_t *newVal = getClearedMemory(wordsAfter);
- for (unsigned i = 0; i < wordsAfter; ++i)
- newVal[i] = pVal[i];
- delete [] pVal;
- pVal = newVal;
- }
- }
- return clearUnusedBits();
+
+ if (width <= APINT_BITS_PER_WORD)
+ return APInt(width, getRawData()[0]);
+
+ APInt Result(getMemory(getNumWords(width)), width);
+
+ // Copy full words.
+ unsigned i;
+ for (i = 0; i != width / APINT_BITS_PER_WORD; i++)
+ Result.pVal[i] = pVal[i];
+
+ // Truncate and copy any partial word.
+ unsigned bits = (0 - width) % APINT_BITS_PER_WORD;
+ if (bits != 0)
+ Result.pVal[i] = pVal[i] << bits >> bits;
+
+ return Result;
}
// Sign extend to a new width.
-APInt &APInt::sext(unsigned width) {
+APInt APInt::sext(unsigned width) const {
assert(width > BitWidth && "Invalid APInt SignExtend request");
- // If the sign bit isn't set, this is the same as zext.
- if (!isNegative()) {
- zext(width);
- return *this;
+
+ if (width <= APINT_BITS_PER_WORD) {
+ uint64_t val = VAL << (APINT_BITS_PER_WORD - BitWidth);
+ val = (int64_t)val >> (width - BitWidth);
+ return APInt(width, val >> (APINT_BITS_PER_WORD - width));
}
- // The sign bit is set. First, get some facts
- unsigned wordsBefore = getNumWords();
- unsigned wordBits = BitWidth % APINT_BITS_PER_WORD;
- BitWidth = width;
- unsigned wordsAfter = getNumWords();
-
- // Mask the high order word appropriately
- if (wordsBefore == wordsAfter) {
- unsigned newWordBits = width % APINT_BITS_PER_WORD;
- // The extension is contained to the wordsBefore-1th word.
- uint64_t mask = ~0ULL;
- if (newWordBits)
- mask >>= APINT_BITS_PER_WORD - newWordBits;
- mask <<= wordBits;
- if (wordsBefore == 1)
- VAL |= mask;
- else
- pVal[wordsBefore-1] |= mask;
- return clearUnusedBits();
+ APInt Result(getMemory(getNumWords(width)), width);
+
+ // Copy full words.
+ unsigned i;
+ uint64_t word = 0;
+ for (i = 0; i != BitWidth / APINT_BITS_PER_WORD; i++) {
+ word = getRawData()[i];
+ Result.pVal[i] = word;
}
- uint64_t mask = wordBits == 0 ? 0 : ~0ULL << wordBits;
- uint64_t *newVal = getMemory(wordsAfter);
- if (wordsBefore == 1)
- newVal[0] = VAL | mask;
- else {
- for (unsigned i = 0; i < wordsBefore; ++i)
- newVal[i] = pVal[i];
- newVal[wordsBefore-1] |= mask;
+ // Read and sign-extend any partial word.
+ unsigned bits = (0 - BitWidth) % APINT_BITS_PER_WORD;
+ if (bits != 0)
+ word = (int64_t)getRawData()[i] << bits >> bits;
+ else
+ word = (int64_t)word >> (APINT_BITS_PER_WORD - 1);
+
+ // Write remaining full words.
+ for (; i != width / APINT_BITS_PER_WORD; i++) {
+ Result.pVal[i] = word;
+ word = (int64_t)word >> (APINT_BITS_PER_WORD - 1);
}
- for (unsigned i = wordsBefore; i < wordsAfter; i++)
- newVal[i] = -1ULL;
- if (wordsBefore != 1)
- delete [] pVal;
- pVal = newVal;
- return clearUnusedBits();
+
+ // Write any partial word.
+ bits = (0 - width) % APINT_BITS_PER_WORD;
+ if (bits != 0)
+ Result.pVal[i] = word << bits >> bits;
+
+ return Result;
}
// Zero extend to a new width.
-APInt &APInt::zext(unsigned width) {
+APInt APInt::zext(unsigned width) const {
assert(width > BitWidth && "Invalid APInt ZeroExtend request");
- unsigned wordsBefore = getNumWords();
- BitWidth = width;
- unsigned wordsAfter = getNumWords();
- if (wordsBefore != wordsAfter) {
- uint64_t *newVal = getClearedMemory(wordsAfter);
- if (wordsBefore == 1)
- newVal[0] = VAL;
- else
- for (unsigned i = 0; i < wordsBefore; ++i)
- newVal[i] = pVal[i];
- if (wordsBefore != 1)
- delete [] pVal;
- pVal = newVal;
- }
- return *this;
+
+ if (width <= APINT_BITS_PER_WORD)
+ return APInt(width, VAL);
+
+ APInt Result(getMemory(getNumWords(width)), width);
+
+ // Copy words.
+ unsigned i;
+ for (i = 0; i != getNumWords(); i++)
+ Result.pVal[i] = getRawData()[i];
+
+ // Zero remaining words.
+ memset(&Result.pVal[i], 0, (Result.getNumWords() - i) * APINT_WORD_SIZE);
+
+ return Result;
}
-APInt &APInt::zextOrTrunc(unsigned width) {
+APInt APInt::zextOrTrunc(unsigned width) const {
if (BitWidth < width)
return zext(width);
if (BitWidth > width)
@@ -1098,7 +1086,7 @@ APInt &APInt::zextOrTrunc(unsigned width) {
return *this;
}
-APInt &APInt::sextOrTrunc(unsigned width) {
+APInt APInt::sextOrTrunc(unsigned width) const {
if (BitWidth < width)
return sext(width);
if (BitWidth > width)
@@ -1873,7 +1861,7 @@ void APInt::divide(const APInt LHS, unsigned lhsWords,
if (!Quotient->isSingleWord())
Quotient->pVal = getClearedMemory(Quotient->getNumWords());
} else
- Quotient->clear();
+ Quotient->clearAllBits();
// The quotient is in Q. Reconstitute the quotient into Quotient's low
// order words.
@@ -1904,7 +1892,7 @@ void APInt::divide(const APInt LHS, unsigned lhsWords,
if (!Remainder->isSingleWord())
Remainder->pVal = getClearedMemory(Remainder->getNumWords());
} else
- Remainder->clear();
+ Remainder->clearAllBits();
// The remainder is in R. Reconstitute the remainder into Remainder's low
// order words.
@@ -2046,6 +2034,64 @@ void APInt::udivrem(const APInt &LHS, const APInt &RHS,
divide(LHS, lhsWords, RHS, rhsWords, &Quotient, &Remainder);
}
+APInt APInt::sadd_ov(const APInt &RHS, bool &Overflow) const {
+ APInt Res = *this+RHS;
+ Overflow = isNonNegative() == RHS.isNonNegative() &&
+ Res.isNonNegative() != isNonNegative();
+ return Res;
+}
+
+APInt APInt::uadd_ov(const APInt &RHS, bool &Overflow) const {
+ APInt Res = *this+RHS;
+ Overflow = Res.ult(RHS);
+ return Res;
+}
+
+APInt APInt::ssub_ov(const APInt &RHS, bool &Overflow) const {
+ APInt Res = *this - RHS;
+ Overflow = isNonNegative() != RHS.isNonNegative() &&
+ Res.isNonNegative() != isNonNegative();
+ return Res;
+}
+
+APInt APInt::usub_ov(const APInt &RHS, bool &Overflow) const {
+ APInt Res = *this-RHS;
+ Overflow = Res.ugt(*this);
+ return Res;
+}
+
+APInt APInt::sdiv_ov(const APInt &RHS, bool &Overflow) const {
+ // MININT/-1 --> overflow.
+ Overflow = isMinSignedValue() && RHS.isAllOnesValue();
+ return sdiv(RHS);
+}
+
+APInt APInt::smul_ov(const APInt &RHS, bool &Overflow) const {
+ APInt Res = *this * RHS;
+
+ if (*this != 0 && RHS != 0)
+ Overflow = Res.sdiv(RHS) != *this || Res.sdiv(*this) != RHS;
+ else
+ Overflow = false;
+ return Res;
+}
+
+APInt APInt::sshl_ov(unsigned ShAmt, bool &Overflow) const {
+ Overflow = ShAmt >= getBitWidth();
+ if (Overflow)
+ ShAmt = getBitWidth()-1;
+
+ if (isNonNegative()) // Don't allow sign change.
+ Overflow = ShAmt >= countLeadingZeros();
+ else
+ Overflow = ShAmt >= countLeadingOnes();
+
+ return *this << ShAmt;
+}
+
+
+
+
void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) {
// Check our assumptions here
assert(!str.empty() && "Invalid string length");
@@ -2101,7 +2147,7 @@ void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) {
// If its negative, put it in two's complement form
if (isNeg) {
(*this)--;
- this->flip();
+ this->flipAllBits();
}
}
@@ -2149,7 +2195,7 @@ void APInt::toString(SmallVectorImpl<char> &Str, unsigned Radix,
// They want to print the signed version and it is a negative value
// Flip the bits and add one to turn it into the equivalent positive
// value and put a '-' in the result.
- Tmp.flip();
+ Tmp.flipAllBits();
Tmp++;
Str.push_back('-');
}
diff --git a/contrib/llvm/lib/Support/Allocator.cpp b/contrib/llvm/lib/Support/Allocator.cpp
index 90df262..5e27df6 100644
--- a/contrib/llvm/lib/Support/Allocator.cpp
+++ b/contrib/llvm/lib/Support/Allocator.cpp
@@ -12,10 +12,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/Allocator.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Recycler.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Memory.h"
+#include "llvm/Support/Memory.h"
#include <cstring>
namespace llvm {
@@ -44,6 +44,12 @@ char *BumpPtrAllocator::AlignPtr(char *Ptr, size_t Alignment) {
/// StartNewSlab - Allocate a new slab and move the bump pointers over into
/// the new slab. Modifies CurPtr and End.
void BumpPtrAllocator::StartNewSlab() {
+ // If we allocated a big number of slabs already it's likely that we're going
+ // to allocate more. Increase slab size to reduce mallocs and possibly memory
+ // overhead. The factors are chosen conservatively to avoid overallocation.
+ if (BytesAllocated >= SlabSize * 128)
+ SlabSize *= 2;
+
MemSlab *NewSlab = Allocator.Allocate(SlabSize);
NewSlab->NextPtr = CurSlab;
CurSlab = NewSlab;
diff --git a/contrib/llvm/lib/System/Atomic.cpp b/contrib/llvm/lib/Support/Atomic.cpp
index 7ba8b77..c7b4bff 100644
--- a/contrib/llvm/lib/System/Atomic.cpp
+++ b/contrib/llvm/lib/Support/Atomic.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Atomic.h"
+#include "llvm/Support/Atomic.h"
#include "llvm/Config/config.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Support/CommandLine.cpp b/contrib/llvm/lib/Support/CommandLine.cpp
index ae66110..7e74499 100644
--- a/contrib/llvm/lib/Support/CommandLine.cpp
+++ b/contrib/llvm/lib/Support/CommandLine.cpp
@@ -22,9 +22,10 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
#include "llvm/Target/TargetRegistry.h"
-#include "llvm/System/Host.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
@@ -179,6 +180,45 @@ static Option *LookupOption(StringRef &Arg, StringRef &Value,
return I->second;
}
+/// LookupNearestOption - Lookup the closest match to the option specified by
+/// the specified option on the command line. If there is a value specified
+/// (after an equal sign) return that as well. This assumes that leading dashes
+/// have already been stripped.
+static Option *LookupNearestOption(StringRef Arg,
+ const StringMap<Option*> &OptionsMap,
+ const char *&NearestString) {
+ // Reject all dashes.
+ if (Arg.empty()) return 0;
+
+ // Split on any equal sign.
+ StringRef LHS = Arg.split('=').first;
+
+ // Find the closest match.
+ Option *Best = 0;
+ unsigned BestDistance = 0;
+ for (StringMap<Option*>::const_iterator it = OptionsMap.begin(),
+ ie = OptionsMap.end(); it != ie; ++it) {
+ Option *O = it->second;
+ SmallVector<const char*, 16> OptionNames;
+ O->getExtraOptionNames(OptionNames);
+ if (O->ArgStr[0])
+ OptionNames.push_back(O->ArgStr);
+
+ for (size_t i = 0, e = OptionNames.size(); i != e; ++i) {
+ StringRef Name = OptionNames[i];
+ unsigned Distance = StringRef(Name).edit_distance(
+ Arg, /*AllowReplacements=*/true, /*MaxEditDistance=*/BestDistance);
+ if (!Best || Distance < BestDistance) {
+ Best = O;
+ NearestString = OptionNames[i];
+ BestDistance = Distance;
+ }
+ }
+ }
+
+ return Best;
+}
+
/// CommaSeparateAndAddOccurence - A wrapper around Handler->addOccurence() that
/// does special handling of cl::CommaSeparated options.
static bool CommaSeparateAndAddOccurence(Option *Handler, unsigned pos,
@@ -463,10 +503,6 @@ static void ExpandResponseFiles(unsigned argc, char** argv,
const sys::FileStatus *FileStat = respFile.getFileStatus();
if (FileStat && FileStat->getSize() != 0) {
- // Mmap the response file into memory.
- OwningPtr<MemoryBuffer>
- respFilePtr(MemoryBuffer::getFile(respFile.c_str()));
-
// If we could open the file, parse its contents, otherwise
// pass the @file option verbatim.
@@ -475,7 +511,9 @@ static void ExpandResponseFiles(unsigned argc, char** argv,
// itself contain additional @file options; any such options will be
// processed recursively.")
- if (respFilePtr != 0) {
+ // Mmap the response file into memory.
+ OwningPtr<MemoryBuffer> respFilePtr;
+ if (!MemoryBuffer::getFile(respFile.c_str(), respFilePtr)) {
ParseCStringVector(newArgv, respFilePtr->getBufferStart());
continue;
}
@@ -506,7 +544,7 @@ void cl::ParseCommandLineOptions(int argc, char **argv,
}
// Copy the program name into ProgName, making sure not to overflow it.
- std::string ProgName = sys::Path(argv[0]).getLast();
+ std::string ProgName = sys::path::filename(argv[0]);
size_t Len = std::min(ProgName.size(), size_t(79));
memcpy(ProgramName, ProgName.data(), Len);
ProgramName[Len] = '\0';
@@ -572,6 +610,8 @@ void cl::ParseCommandLineOptions(int argc, char **argv,
bool DashDashFound = false; // Have we read '--'?
for (int i = 1; i < argc; ++i) {
Option *Handler = 0;
+ Option *NearestHandler = 0;
+ const char *NearestHandlerString = 0;
StringRef Value;
StringRef ArgName = "";
@@ -645,12 +685,25 @@ void cl::ParseCommandLineOptions(int argc, char **argv,
if (Handler == 0)
Handler = HandlePrefixedOrGroupedOption(ArgName, Value,
ErrorParsing, Opts);
+
+ // Otherwise, look for the closest available option to report to the user
+ // in the upcoming error.
+ if (Handler == 0 && SinkOpts.empty())
+ NearestHandler = LookupNearestOption(ArgName, Opts,
+ NearestHandlerString);
}
if (Handler == 0) {
if (SinkOpts.empty()) {
errs() << ProgramName << ": Unknown command line argument '"
<< argv[i] << "'. Try: '" << argv[0] << " -help'\n";
+
+ if (NearestHandler) {
+ // If we know a near match, report it as well.
+ errs() << ProgramName << ": Did you mean '-"
+ << NearestHandlerString << "'?\n";
+ }
+
ErrorParsing = true;
} else {
for (SmallVectorImpl<Option*>::iterator I = SinkOpts.begin(),
@@ -765,6 +818,15 @@ void cl::ParseCommandLineOptions(int argc, char **argv,
}
}
+ // Now that we know if -debug is specified, we can use it.
+ // Note that if ReadResponseFiles == true, this must be done before the
+ // memory allocated for the expanded command line is free()d below.
+ DEBUG(dbgs() << "Args: ";
+ for (int i = 0; i < argc; ++i)
+ dbgs() << argv[i] << ' ';
+ dbgs() << '\n';
+ );
+
// Free all of the memory allocated to the map. Command line options may only
// be processed once!
Opts.clear();
@@ -779,12 +841,6 @@ void cl::ParseCommandLineOptions(int argc, char **argv,
free(*i);
}
- DEBUG(dbgs() << "Args: ";
- for (int i = 0; i < argc; ++i)
- dbgs() << argv[i] << ' ';
- dbgs() << '\n';
- );
-
// If we had an error processing our arguments, don't let the program execute
if (ErrorParsing) exit(1);
}
diff --git a/contrib/llvm/lib/Support/ConstantRange.cpp b/contrib/llvm/lib/Support/ConstantRange.cpp
index 8ef3785..493f708 100644
--- a/contrib/llvm/lib/Support/ConstantRange.cpp
+++ b/contrib/llvm/lib/Support/ConstantRange.cpp
@@ -51,6 +51,9 @@ ConstantRange::ConstantRange(const APInt &L, const APInt &U) :
ConstantRange ConstantRange::makeICmpRegion(unsigned Pred,
const ConstantRange &CR) {
+ if (CR.isEmptySet())
+ return CR;
+
uint32_t W = CR.getBitWidth();
switch (Pred) {
default: assert(!"Invalid ICmp predicate to makeICmpRegion()");
@@ -60,10 +63,18 @@ ConstantRange ConstantRange::makeICmpRegion(unsigned Pred,
if (CR.isSingleElement())
return ConstantRange(CR.getUpper(), CR.getLower());
return ConstantRange(W);
- case ICmpInst::ICMP_ULT:
- return ConstantRange(APInt::getMinValue(W), CR.getUnsignedMax());
- case ICmpInst::ICMP_SLT:
- return ConstantRange(APInt::getSignedMinValue(W), CR.getSignedMax());
+ case ICmpInst::ICMP_ULT: {
+ APInt UMax(CR.getUnsignedMax());
+ if (UMax.isMinValue())
+ return ConstantRange(W, /* empty */ false);
+ return ConstantRange(APInt::getMinValue(W), UMax);
+ }
+ case ICmpInst::ICMP_SLT: {
+ APInt SMax(CR.getSignedMax());
+ if (SMax.isMinSignedValue())
+ return ConstantRange(W, /* empty */ false);
+ return ConstantRange(APInt::getSignedMinValue(W), SMax);
+ }
case ICmpInst::ICMP_ULE: {
APInt UMax(CR.getUnsignedMax());
if (UMax.isMaxValue())
@@ -72,15 +83,22 @@ ConstantRange ConstantRange::makeICmpRegion(unsigned Pred,
}
case ICmpInst::ICMP_SLE: {
APInt SMax(CR.getSignedMax());
- if (SMax.isMaxSignedValue() || (SMax+1).isMaxSignedValue())
+ if (SMax.isMaxSignedValue())
return ConstantRange(W);
return ConstantRange(APInt::getSignedMinValue(W), SMax + 1);
}
- case ICmpInst::ICMP_UGT:
- return ConstantRange(CR.getUnsignedMin() + 1, APInt::getNullValue(W));
- case ICmpInst::ICMP_SGT:
- return ConstantRange(CR.getSignedMin() + 1,
- APInt::getSignedMinValue(W));
+ case ICmpInst::ICMP_UGT: {
+ APInt UMin(CR.getUnsignedMin());
+ if (UMin.isMaxValue())
+ return ConstantRange(W, /* empty */ false);
+ return ConstantRange(UMin + 1, APInt::getNullValue(W));
+ }
+ case ICmpInst::ICMP_SGT: {
+ APInt SMin(CR.getSignedMin());
+ if (SMin.isMaxSignedValue())
+ return ConstantRange(W, /* empty */ false);
+ return ConstantRange(SMin + 1, APInt::getSignedMinValue(W));
+ }
case ICmpInst::ICMP_UGE: {
APInt UMin(CR.getUnsignedMin());
if (UMin.isMinValue())
@@ -115,6 +133,14 @@ bool ConstantRange::isWrappedSet() const {
return Lower.ugt(Upper);
}
+/// isSignWrappedSet - Return true if this set wraps around the INT_MIN of
+/// its bitwidth, for example: i8 [120, 140).
+///
+bool ConstantRange::isSignWrappedSet() const {
+ return contains(APInt::getSignedMaxValue(getBitWidth())) &&
+ contains(APInt::getSignedMinValue(getBitWidth()));
+}
+
/// getSetSize - Return the number of elements in this set.
///
APInt ConstantRange::getSetSize() const {
@@ -408,15 +434,15 @@ ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const {
/// correspond to the possible range of values as if the source range had been
/// zero extended.
ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
+ if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false);
+
unsigned SrcTySize = getBitWidth();
assert(SrcTySize < DstTySize && "Not a value extension");
- if (isFullSet())
- // Change a source full set into [0, 1 << 8*numbytes)
+ if (isFullSet() || isWrappedSet())
+ // Change into [0, 1 << src bit width)
return ConstantRange(APInt(DstTySize,0), APInt(DstTySize,1).shl(SrcTySize));
- APInt L = Lower; L.zext(DstTySize);
- APInt U = Upper; U.zext(DstTySize);
- return ConstantRange(L, U);
+ return ConstantRange(Lower.zext(DstTySize), Upper.zext(DstTySize));
}
/// signExtend - Return a new range in the specified integer type, which must
@@ -424,16 +450,16 @@ ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
/// correspond to the possible range of values as if the source range had been
/// sign extended.
ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
+ if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false);
+
unsigned SrcTySize = getBitWidth();
assert(SrcTySize < DstTySize && "Not a value extension");
- if (isFullSet()) {
+ if (isFullSet() || isSignWrappedSet()) {
return ConstantRange(APInt::getHighBitsSet(DstTySize,DstTySize-SrcTySize+1),
APInt::getLowBitsSet(DstTySize, SrcTySize-1) + 1);
}
- APInt L = Lower; L.sext(DstTySize);
- APInt U = Upper; U.sext(DstTySize);
- return ConstantRange(L, U);
+ return ConstantRange(Lower.sext(DstTySize), Upper.sext(DstTySize));
}
/// truncate - Return a new range in the specified integer type, which must be
@@ -447,9 +473,7 @@ ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
if (isFullSet() || getSetSize().ugt(Size))
return ConstantRange(DstTySize, /*isFullSet=*/true);
- APInt L = Lower; L.trunc(DstTySize);
- APInt U = Upper; U.trunc(DstTySize);
- return ConstantRange(L, U);
+ return ConstantRange(Lower.trunc(DstTySize), Upper.trunc(DstTySize));
}
/// zextOrTrunc - make this range have the bit width given by \p DstTySize. The
@@ -596,6 +620,32 @@ ConstantRange::udiv(const ConstantRange &RHS) const {
}
ConstantRange
+ConstantRange::binaryAnd(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+
+ // TODO: replace this with something less conservative
+
+ APInt umin = APIntOps::umin(Other.getUnsignedMax(), getUnsignedMax());
+ if (umin.isAllOnesValue())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+ return ConstantRange(APInt::getNullValue(getBitWidth()), umin + 1);
+}
+
+ConstantRange
+ConstantRange::binaryOr(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+
+ // TODO: replace this with something less conservative
+
+ APInt umax = APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin());
+ if (umax.isMinValue())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+ return ConstantRange(umax, APInt::getNullValue(getBitWidth()));
+}
+
+ConstantRange
ConstantRange::shl(const ConstantRange &Other) const {
if (isEmptySet() || Other.isEmptySet())
return ConstantRange(getBitWidth(), /*isFullSet=*/false);
diff --git a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
index 49258ede..bf8ca3f 100644
--- a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
+++ b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
@@ -10,8 +10,8 @@
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Config/config.h"
-#include "llvm/System/Mutex.h"
-#include "llvm/System/ThreadLocal.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/ThreadLocal.h"
#include <setjmp.h>
#include <cstdio>
using namespace llvm;
@@ -128,6 +128,9 @@ static void CrashRecoverySignalHandler(int Signal) {
// This call of Disable isn't thread safe, but it doesn't actually matter.
CrashRecoveryContext::Disable();
raise(Signal);
+
+ // The signal will be thrown once the signal mask is restored.
+ return;
}
// Unblock the signal we received.
@@ -202,3 +205,26 @@ const std::string &CrashRecoveryContext::getBacktrace() const {
assert(CRC->Failed && "No crash was detected!");
return CRC->Backtrace;
}
+
+//
+
+namespace {
+struct RunSafelyOnThreadInfo {
+ void (*UserFn)(void*);
+ void *UserData;
+ CrashRecoveryContext *CRC;
+ bool Result;
+};
+}
+
+static void RunSafelyOnThread_Dispatch(void *UserData) {
+ RunSafelyOnThreadInfo *Info =
+ reinterpret_cast<RunSafelyOnThreadInfo*>(UserData);
+ Info->Result = Info->CRC->RunSafely(Info->UserFn, Info->UserData);
+}
+bool CrashRecoveryContext::RunSafelyOnThread(void (*Fn)(void*), void *UserData,
+ unsigned RequestedStackSize) {
+ RunSafelyOnThreadInfo Info = { Fn, UserData, this, false };
+ llvm_execute_on_thread(RunSafelyOnThread_Dispatch, &Info, RequestedStackSize);
+ return Info.Result;
+}
diff --git a/contrib/llvm/lib/Support/Debug.cpp b/contrib/llvm/lib/Support/Debug.cpp
index 7f48f8a..9fdb12e 100644
--- a/contrib/llvm/lib/Support/Debug.cpp
+++ b/contrib/llvm/lib/Support/Debug.cpp
@@ -26,7 +26,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/circular_raw_ostream.h"
-#include "llvm/System/Signals.h"
+#include "llvm/Support/Signals.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/System/Disassembler.cpp b/contrib/llvm/lib/Support/Disassembler.cpp
index 139e3be..6362aff 100644
--- a/contrib/llvm/lib/System/Disassembler.cpp
+++ b/contrib/llvm/lib/Support/Disassembler.cpp
@@ -13,7 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
-#include "llvm/System/Disassembler.h"
+#include "llvm/Support/Disassembler.h"
#include <cassert>
#include <iomanip>
diff --git a/contrib/llvm/lib/Support/Dwarf.cpp b/contrib/llvm/lib/Support/Dwarf.cpp
index 96ce9d3..9799ef5 100644
--- a/contrib/llvm/lib/Support/Dwarf.cpp
+++ b/contrib/llvm/lib/Support/Dwarf.cpp
@@ -78,6 +78,10 @@ const char *llvm::dwarf::TagString(unsigned Tag) {
case DW_TAG_shared_type: return "DW_TAG_shared_type";
case DW_TAG_lo_user: return "DW_TAG_lo_user";
case DW_TAG_hi_user: return "DW_TAG_hi_user";
+ case DW_TAG_auto_variable: return "DW_TAG_auto_variable";
+ case DW_TAG_arg_variable: return "DW_TAG_arg_variable";
+ case DW_TAG_return_variable: return "DW_TAG_return_variable";
+ case DW_TAG_vector_type: return "DW_TAG_vector_type";
}
return 0;
}
diff --git a/contrib/llvm/lib/System/DynamicLibrary.cpp b/contrib/llvm/lib/Support/DynamicLibrary.cpp
index 660db49..455c380 100644
--- a/contrib/llvm/lib/System/DynamicLibrary.cpp
+++ b/contrib/llvm/lib/Support/DynamicLibrary.cpp
@@ -14,7 +14,8 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Mutex.h"
#include "llvm/Config/config.h"
#include <cstdio>
#include <cstring>
@@ -46,7 +47,7 @@ void llvm::sys::DynamicLibrary::AddSymbol(const char* symbolName,
#ifdef LLVM_ON_WIN32
-#include "Win32/DynamicLibrary.inc"
+#include "Windows/DynamicLibrary.inc"
#else
@@ -63,6 +64,12 @@ using namespace llvm::sys;
static std::vector<void *> *OpenedHandles = 0;
+static SmartMutex<true>& getMutex() {
+ static SmartMutex<true> HandlesMutex;
+ return HandlesMutex;
+}
+
+
bool DynamicLibrary::LoadLibraryPermanently(const char *Filename,
std::string *ErrMsg) {
void *H = dlopen(Filename, RTLD_LAZY|RTLD_GLOBAL);
@@ -76,6 +83,7 @@ bool DynamicLibrary::LoadLibraryPermanently(const char *Filename,
if (Filename == NULL)
H = RTLD_DEFAULT;
#endif
+ SmartScopedLock<true> Lock(getMutex());
if (OpenedHandles == 0)
OpenedHandles = new std::vector<void *>();
OpenedHandles->push_back(H);
@@ -103,13 +111,14 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) {
std::map<std::string, void *>::iterator I =
ExplicitSymbols->find(symbolName);
std::map<std::string, void *>::iterator E = ExplicitSymbols->end();
-
+
if (I != E)
return I->second;
}
#if HAVE_DLFCN_H
// Now search the libraries.
+ SmartScopedLock<true> Lock(getMutex());
if (OpenedHandles) {
for (std::vector<void *>::iterator I = OpenedHandles->begin(),
E = OpenedHandles->end(); I != E; ++I) {
@@ -130,7 +139,7 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) {
if (!strcmp(symbolName, #SYM)) return &SYM
// On linux we have a weird situation. The stderr/out/in symbols are both
-// macros and global variables because of standards requirements. So, we
+// macros and global variables because of standards requirements. So, we
// boldly use the EXPLICIT_SYMBOL macro without checking for a #define first.
#if defined(__linux__)
{
diff --git a/contrib/llvm/lib/System/Errno.cpp b/contrib/llvm/lib/Support/Errno.cpp
index 68f66f6..18c6581 100644
--- a/contrib/llvm/lib/System/Errno.cpp
+++ b/contrib/llvm/lib/Support/Errno.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Errno.h"
+#include "llvm/Support/Errno.h"
#include "llvm/Config/config.h" // Get autoconf configuration settings
#if HAVE_STRING_H
@@ -50,7 +50,7 @@ std::string StrError(int errnum) {
# else
strerror_r(errnum,buffer,MaxErrStrLen-1);
# endif
-#elif defined(HAVE_STRERROR_S) // Windows.
+#elif HAVE_DECL_STRERROR_S // "Windows Secure API"
if (errnum)
strerror_s(buffer, errnum);
#elif defined(HAVE_STRERROR)
diff --git a/contrib/llvm/lib/Support/ErrorHandling.cpp b/contrib/llvm/lib/Support/ErrorHandling.cpp
index 0b7af3e..3579546 100644
--- a/contrib/llvm/lib/Support/ErrorHandling.cpp
+++ b/contrib/llvm/lib/Support/ErrorHandling.cpp
@@ -16,8 +16,8 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Signals.h"
-#include "llvm/System/Threading.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/Threading.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Config/config.h"
#include <cassert>
@@ -58,6 +58,10 @@ void llvm::report_fatal_error(const std::string &Reason) {
report_fatal_error(Twine(Reason));
}
+void llvm::report_fatal_error(StringRef Reason) {
+ report_fatal_error(Twine(Reason));
+}
+
void llvm::report_fatal_error(const Twine &Reason) {
if (ErrorHandler) {
ErrorHandler(ErrorHandlerUserData, Reason.str());
@@ -69,7 +73,8 @@ void llvm::report_fatal_error(const Twine &Reason) {
raw_svector_ostream OS(Buffer);
OS << "LLVM ERROR: " << Reason << "\n";
StringRef MessageStr = OS.str();
- (void)::write(2, MessageStr.data(), MessageStr.size());
+ ssize_t written = ::write(2, MessageStr.data(), MessageStr.size());
+ (void)written; // If something went wrong, we deliberately just give up.
}
// If we reached here, we are failing ungracefully. Run the interrupt handlers
diff --git a/contrib/llvm/lib/Support/FileUtilities.cpp b/contrib/llvm/lib/Support/FileUtilities.cpp
index 1bde2fe..5dbabee 100644
--- a/contrib/llvm/lib/Support/FileUtilities.cpp
+++ b/contrib/llvm/lib/Support/FileUtilities.cpp
@@ -15,7 +15,8 @@
#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include <cstdlib>
@@ -108,17 +109,17 @@ static bool CompareNumbers(const char *&F1P, const char *&F2P,
SmallString<200> StrTmp(F1P, EndOfNumber(F1NumEnd)+1);
// Strange exponential notation!
StrTmp[static_cast<unsigned>(F1NumEnd-F1P)] = 'e';
-
+
V1 = strtod(&StrTmp[0], const_cast<char**>(&F1NumEnd));
F1NumEnd = F1P + (F1NumEnd-&StrTmp[0]);
}
-
+
if (*F2NumEnd == 'D' || *F2NumEnd == 'd') {
// Copy string into tmp buffer to replace the 'D' with an 'e'.
SmallString<200> StrTmp(F2P, EndOfNumber(F2NumEnd)+1);
// Strange exponential notation!
StrTmp[static_cast<unsigned>(F2NumEnd-F2P)] = 'e';
-
+
V2 = strtod(&StrTmp[0], const_cast<char**>(&F2NumEnd));
F2NumEnd = F2P + (F2NumEnd-&StrTmp[0]);
}
@@ -199,11 +200,20 @@ int llvm::DiffFilesWithTolerance(const sys::PathWithStatus &FileA,
// Now its safe to mmap the files into memory becasue both files
// have a non-zero size.
- OwningPtr<MemoryBuffer> F1(MemoryBuffer::getFile(FileA.c_str(), Error));
- OwningPtr<MemoryBuffer> F2(MemoryBuffer::getFile(FileB.c_str(), Error));
- if (F1 == 0 || F2 == 0)
+ error_code ec;
+ OwningPtr<MemoryBuffer> F1;
+ if (error_code ec = MemoryBuffer::getFile(FileA.c_str(), F1)) {
+ if (Error)
+ *Error = ec.message();
return 2;
-
+ }
+ OwningPtr<MemoryBuffer> F2;
+ if (error_code ec = MemoryBuffer::getFile(FileB.c_str(), F2)) {
+ if (Error)
+ *Error = ec.message();
+ return 2;
+ }
+
// Okay, now that we opened the files, scan them for the first difference.
const char *File1Start = F1->getBufferStart();
const char *File2Start = F2->getBufferStart();
diff --git a/contrib/llvm/lib/Support/FoldingSet.cpp b/contrib/llvm/lib/Support/FoldingSet.cpp
index 29b5952..a4f80a9 100644
--- a/contrib/llvm/lib/Support/FoldingSet.cpp
+++ b/contrib/llvm/lib/Support/FoldingSet.cpp
@@ -18,6 +18,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Host.h"
#include <cassert>
#include <cstring>
using namespace llvm;
@@ -110,18 +111,32 @@ void FoldingSetNodeID::AddString(StringRef String) {
Pos = (Units + 1) * 4;
} else {
// Otherwise do it the hard way.
- for (Pos += 4; Pos <= Size; Pos += 4) {
- unsigned V = ((unsigned char)String[Pos - 4] << 24) |
- ((unsigned char)String[Pos - 3] << 16) |
- ((unsigned char)String[Pos - 2] << 8) |
- (unsigned char)String[Pos - 1];
- Bits.push_back(V);
+ // To be compatible with above bulk transfer, we need to take endianness
+ // into account.
+ if (sys::isBigEndianHost()) {
+ for (Pos += 4; Pos <= Size; Pos += 4) {
+ unsigned V = ((unsigned char)String[Pos - 4] << 24) |
+ ((unsigned char)String[Pos - 3] << 16) |
+ ((unsigned char)String[Pos - 2] << 8) |
+ (unsigned char)String[Pos - 1];
+ Bits.push_back(V);
+ }
+ } else {
+ assert(sys::isLittleEndianHost() && "Unexpected host endianness");
+ for (Pos += 4; Pos <= Size; Pos += 4) {
+ unsigned V = ((unsigned char)String[Pos - 1] << 24) |
+ ((unsigned char)String[Pos - 2] << 16) |
+ ((unsigned char)String[Pos - 3] << 8) |
+ (unsigned char)String[Pos - 4];
+ Bits.push_back(V);
+ }
}
}
// With the leftover bits.
unsigned V = 0;
- // Pos will have overshot size by 4 - #bytes left over.
+ // Pos will have overshot size by 4 - #bytes left over.
+ // No need to take endianness into account here - this is always executed.
switch (Pos - Size) {
case 1: V = (V << 8) | (unsigned char)String[Size - 3]; // Fall thru.
case 2: V = (V << 8) | (unsigned char)String[Size - 2]; // Fall thru.
diff --git a/contrib/llvm/lib/Support/FormattedStream.cpp b/contrib/llvm/lib/Support/FormattedStream.cpp
index c72b5a1..231ae48 100644
--- a/contrib/llvm/lib/Support/FormattedStream.cpp
+++ b/contrib/llvm/lib/Support/FormattedStream.cpp
@@ -13,6 +13,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
+#include <algorithm>
using namespace llvm;
diff --git a/contrib/llvm/lib/Support/GraphWriter.cpp b/contrib/llvm/lib/Support/GraphWriter.cpp
index fdd6285..0dba28a 100644
--- a/contrib/llvm/lib/Support/GraphWriter.cpp
+++ b/contrib/llvm/lib/Support/GraphWriter.cpp
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/GraphWriter.h"
-#include "llvm/System/Path.h"
-#include "llvm/System/Program.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
#include "llvm/Config/config.h"
using namespace llvm;
@@ -63,11 +63,37 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
args.push_back(0);
errs() << "Running 'Graphviz' program... ";
- if (sys::Program::ExecuteAndWait(Graphviz, &args[0],0,0,0,0,&ErrMsg))
- errs() << "Error viewing graph " << Filename.str() << ": " << ErrMsg
- << "\n";
- else
- Filename.eraseFromDisk();
+ if (sys::Program::ExecuteAndWait(Graphviz, &args[0],0,0,0,0,&ErrMsg)) {
+ errs() << "Error: " << ErrMsg << "\n";
+ return;
+ }
+ Filename.eraseFromDisk();
+ errs() << " done. \n";
+
+#elif HAVE_XDOT_PY
+ std::vector<const char*> args;
+ args.push_back(LLVM_PATH_XDOT_PY);
+ args.push_back(Filename.c_str());
+
+ switch (program) {
+ case GraphProgram::DOT: args.push_back("-f"); args.push_back("dot"); break;
+ case GraphProgram::FDP: args.push_back("-f"); args.push_back("fdp"); break;
+ case GraphProgram::NEATO: args.push_back("-f"); args.push_back("neato");break;
+ case GraphProgram::TWOPI: args.push_back("-f"); args.push_back("twopi");break;
+ case GraphProgram::CIRCO: args.push_back("-f"); args.push_back("circo");break;
+ default: errs() << "Unknown graph layout name; using default.\n";
+ }
+
+ args.push_back(0);
+
+ errs() << "Running 'xdot.py' program... ";
+ if (sys::Program::ExecuteAndWait(sys::Path(LLVM_PATH_XDOT_PY),
+ &args[0],0,0,0,0,&ErrMsg)) {
+ errs() << "Error: " << ErrMsg << "\n";
+ return;
+ }
+ Filename.eraseFromDisk();
+ errs() << " done. \n";
#elif (HAVE_GV && (HAVE_DOT || HAVE_FDP || HAVE_NEATO || \
HAVE_TWOPI || HAVE_CIRCO))
@@ -128,8 +154,7 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
errs() << "Running '" << prog.str() << "' program... ";
if (sys::Program::ExecuteAndWait(prog, &args[0], 0, 0, 0, 0, &ErrMsg)) {
- errs() << "Error viewing graph " << Filename.str() << ": '"
- << ErrMsg << "\n";
+ errs() << "Error: " << ErrMsg << "\n";
return;
}
errs() << " done. \n";
@@ -144,7 +169,7 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
ErrMsg.clear();
if (wait) {
if (sys::Program::ExecuteAndWait(gv, &args[0],0,0,0,0,&ErrMsg))
- errs() << "Error viewing graph: " << ErrMsg << "\n";
+ errs() << "Error: " << ErrMsg << "\n";
Filename.eraseFromDisk();
PSFilename.eraseFromDisk();
}
@@ -163,8 +188,7 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
errs() << "Running 'dotty' program... ";
if (sys::Program::ExecuteAndWait(dotty, &args[0],0,0,0,0,&ErrMsg)) {
- errs() << "Error viewing graph " << Filename.str() << ": "
- << ErrMsg << "\n";
+ errs() << "Error: " << ErrMsg << "\n";
} else {
// Dotty spawns another app and doesn't wait until it returns
#if defined (__MINGW32__) || defined (_WINDOWS)
diff --git a/contrib/llvm/lib/System/Host.cpp b/contrib/llvm/lib/Support/Host.cpp
index e7193db..4dacf96 100644
--- a/contrib/llvm/lib/System/Host.cpp
+++ b/contrib/llvm/lib/Support/Host.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Host.h"
+#include "llvm/Support/Host.h"
#include "llvm/Config/config.h"
#include <string.h>
@@ -20,7 +20,7 @@
#include "Unix/Host.inc"
#endif
#ifdef LLVM_ON_WIN32
-#include "Win32/Host.inc"
+#include "Windows/Host.inc"
#endif
#ifdef _MSC_VER
#include <intrin.h>
@@ -92,7 +92,8 @@ static bool GetX86CpuIDAndInfo(unsigned value, unsigned *rEAX,
return true;
}
-static void DetectX86FamilyModel(unsigned EAX, unsigned &Family, unsigned &Model) {
+static void DetectX86FamilyModel(unsigned EAX, unsigned &Family,
+ unsigned &Model) {
Family = (EAX >> 8) & 0xf; // Bits 8 - 11
Model = (EAX >> 4) & 0xf; // Bits 4 - 7
if (Family == 6 || Family == 0xf) {
@@ -112,9 +113,9 @@ std::string sys::getHostCPUName() {
unsigned Model = 0;
DetectX86FamilyModel(EAX, Family, Model);
+ bool HasSSE3 = (ECX & 0x1);
GetX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
bool Em64T = (EDX >> 29) & 0x1;
- bool HasSSE3 = (ECX & 0x1);
union {
unsigned u[3];
@@ -128,21 +129,21 @@ std::string sys::getHostCPUName() {
return "i386";
case 4:
switch (Model) {
- case 0: // Intel486TM DX processors
- case 1: // Intel486TM DX processors
+ case 0: // Intel486 DX processors
+ case 1: // Intel486 DX processors
case 2: // Intel486 SX processors
- case 3: // Intel487TM processors, IntelDX2 OverDrive® processors,
- // IntelDX2TM processors
+ case 3: // Intel487 processors, IntelDX2 OverDrive processors,
+ // IntelDX2 processors
case 4: // Intel486 SL processor
- case 5: // IntelSX2TM processors
+ case 5: // IntelSX2 processors
case 7: // Write-Back Enhanced IntelDX2 processors
- case 8: // IntelDX4 OverDrive processors, IntelDX4TM processors
+ case 8: // IntelDX4 OverDrive processors, IntelDX4 processors
default: return "i486";
}
case 5:
switch (Model) {
case 1: // Pentium OverDrive processor for Pentium processor (60, 66),
- // Pentium® processors (60, 66)
+ // Pentium processors (60, 66)
case 2: // Pentium OverDrive processor for Pentium processor (75, 90,
// 100, 120, 133), Pentium processors (75, 90, 100, 120, 133,
// 150, 166, 200)
@@ -150,9 +151,9 @@ std::string sys::getHostCPUName() {
// systems
return "pentium";
- case 4: // Pentium OverDrive processor with MMXTM technology for Pentium
+ case 4: // Pentium OverDrive processor with MMX technology for Pentium
// processor (75, 90, 100, 120, 133), Pentium processor with
- // MMXTM technology (166, 200)
+ // MMX technology (166, 200)
return "pentium-mmx";
default: return "pentium";
@@ -165,7 +166,7 @@ std::string sys::getHostCPUName() {
case 3: // Intel Pentium II OverDrive processor, Pentium II processor,
// model 03
case 5: // Pentium II processor, model 05, Pentium II Xeon processor,
- // model 05, and Intel® Celeron® processor, model 05
+ // model 05, and Intel Celeron processor, model 05
case 6: // Celeron processor, model 06
return "pentium2";
@@ -182,13 +183,13 @@ std::string sys::getHostCPUName() {
// 0Dh. All processors are manufactured using the 90 nm process.
return "pentium-m";
- case 14: // Intel CoreTM Duo processor, Intel CoreTM Solo processor, model
+ case 14: // Intel Core Duo processor, Intel Core Solo processor, model
// 0Eh. All processors are manufactured using the 65 nm process.
return "yonah";
- case 15: // Intel CoreTM2 Duo processor, Intel CoreTM2 Duo mobile
- // processor, Intel CoreTM2 Quad processor, Intel CoreTM2 Quad
- // mobile processor, Intel CoreTM2 Extreme processor, Intel
+ case 15: // Intel Core 2 Duo processor, Intel Core 2 Duo mobile
+ // processor, Intel Core 2 Quad processor, Intel Core 2 Quad
+ // mobile processor, Intel Core 2 Extreme processor, Intel
// Pentium Dual-Core processor, Intel Xeon processor, model
// 0Fh. All processors are manufactured using the 65 nm process.
case 22: // Intel Celeron processor model 16h. All processors are
@@ -199,7 +200,7 @@ std::string sys::getHostCPUName() {
// Integrated Processor with Intel QuickAssist Technology
return "i686"; // FIXME: ???
- case 23: // Intel CoreTM2 Extreme processor, Intel Xeon processor, model
+ case 23: // Intel Core 2 Extreme processor, Intel Xeon processor, model
// 17h. All processors are manufactured using the 45 nm process.
//
// 45nm: Penryn , Wolfdale, Yorkfield (XE)
@@ -209,6 +210,9 @@ std::string sys::getHostCPUName() {
// processors are manufactured using the 45 nm process.
case 29: // Intel Xeon processor MP. All processors are manufactured using
// the 45 nm process.
+ case 30: // Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz.
+ // As found in a Summer 2010 model iMac.
+ case 37: // Intel Core i7, laptop version.
return "corei7";
case 28: // Intel Atom processor. All processors are manufactured using
@@ -224,7 +228,7 @@ std::string sys::getHostCPUName() {
case 1: // Pentium 4 processor, Intel Xeon processor, Intel Xeon
// processor MP, and Intel Celeron processor. All processors are
// model 01h and manufactured using the 0.18 micron process.
- case 2: // Pentium 4 processor, Mobile Intel Pentium 4 processor – M,
+ case 2: // Pentium 4 processor, Mobile Intel Pentium 4 processor - M,
// Intel Xeon processor, Intel Xeon processor MP, Intel Celeron
// processor, and Mobile Intel Celeron processor. All processors
// are model 02h and manufactured using the 0.13 micron process.
@@ -277,14 +281,12 @@ std::string sys::getHostCPUName() {
default: return "athlon";
}
case 15:
- if (HasSSE3) {
+ if (HasSSE3)
return "k8-sse3";
- } else {
- switch (Model) {
- case 1: return "opteron";
- case 5: return "athlon-fx"; // also opteron
- default: return "athlon64";
- }
+ switch (Model) {
+ case 1: return "opteron";
+ case 5: return "athlon-fx"; // also opteron
+ default: return "athlon64";
}
case 16:
return "amdfam10";
diff --git a/contrib/llvm/lib/System/IncludeFile.cpp b/contrib/llvm/lib/Support/IncludeFile.cpp
index 8258d40..5da8826 100644
--- a/contrib/llvm/lib/System/IncludeFile.cpp
+++ b/contrib/llvm/lib/Support/IncludeFile.cpp
@@ -11,10 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/IncludeFile.h"
+#include "llvm/Support/IncludeFile.h"
using namespace llvm;
// This constructor is used to ensure linking of other modules. See the
-// llvm/System/IncludeFile.h header for details.
+// llvm/Support/IncludeFile.h header for details.
IncludeFile::IncludeFile(const void*) {}
diff --git a/contrib/llvm/lib/Support/IntEqClasses.cpp b/contrib/llvm/lib/Support/IntEqClasses.cpp
new file mode 100644
index 0000000..1134495
--- /dev/null
+++ b/contrib/llvm/lib/Support/IntEqClasses.cpp
@@ -0,0 +1,70 @@
+//===-- llvm/ADT/IntEqClasses.cpp - Equivalence Classes of Integers -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Equivalence classes for small integers. This is a mapping of the integers
+// 0 .. N-1 into M equivalence classes numbered 0 .. M-1.
+//
+// Initially each integer has its own equivalence class. Classes are joined by
+// passing a representative member of each class to join().
+//
+// Once the classes are built, compress() will number them 0 .. M-1 and prevent
+// further changes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/IntEqClasses.h"
+
+using namespace llvm;
+
+void IntEqClasses::grow(unsigned N) {
+ assert(NumClasses == 0 && "grow() called after compress().");
+ EC.reserve(N);
+ while (EC.size() < N)
+ EC.push_back(EC.size());
+}
+
+void IntEqClasses::join(unsigned a, unsigned b) {
+ assert(NumClasses == 0 && "join() called after compress().");
+ unsigned eca = EC[a];
+ unsigned ecb = EC[b];
+ // Update pointers while searching for the leaders, compressing the paths
+ // incrementally. The larger leader will eventually be updated, joining the
+ // classes.
+ while (eca != ecb)
+ if (eca < ecb)
+ EC[b] = eca, b = ecb, ecb = EC[b];
+ else
+ EC[a] = ecb, a = eca, eca = EC[a];
+}
+
+unsigned IntEqClasses::findLeader(unsigned a) const {
+ assert(NumClasses == 0 && "findLeader() called after compress().");
+ while (a != EC[a])
+ a = EC[a];
+ return a;
+}
+
+void IntEqClasses::compress() {
+ if (NumClasses)
+ return;
+ for (unsigned i = 0, e = EC.size(); i != e; ++i)
+ EC[i] = (EC[i] == i) ? NumClasses++ : EC[EC[i]];
+}
+
+void IntEqClasses::uncompress() {
+ if (!NumClasses)
+ return;
+ SmallVector<unsigned, 8> Leader;
+ for (unsigned i = 0, e = EC.size(); i != e; ++i)
+ if (EC[i] < Leader.size())
+ EC[i] = Leader[EC[i]];
+ else
+ Leader.push_back(EC[i] = i);
+ NumClasses = 0;
+}
diff --git a/contrib/llvm/lib/Support/IntervalMap.cpp b/contrib/llvm/lib/Support/IntervalMap.cpp
new file mode 100644
index 0000000..4dfcc40
--- /dev/null
+++ b/contrib/llvm/lib/Support/IntervalMap.cpp
@@ -0,0 +1,161 @@
+//===- lib/Support/IntervalMap.cpp - A sorted interval map ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the few non-templated functions in IntervalMap.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/IntervalMap.h"
+
+namespace llvm {
+namespace IntervalMapImpl {
+
+void Path::replaceRoot(void *Root, unsigned Size, IdxPair Offsets) {
+ assert(!path.empty() && "Can't replace missing root");
+ path.front() = Entry(Root, Size, Offsets.first);
+ path.insert(path.begin() + 1, Entry(subtree(0), Offsets.second));
+}
+
+NodeRef Path::getLeftSibling(unsigned Level) const {
+ // The root has no siblings.
+ if (Level == 0)
+ return NodeRef();
+
+ // Go up the tree until we can go left.
+ unsigned l = Level - 1;
+ while (l && path[l].offset == 0)
+ --l;
+
+ // We can't go left.
+ if (path[l].offset == 0)
+ return NodeRef();
+
+ // NR is the subtree containing our left sibling.
+ NodeRef NR = path[l].subtree(path[l].offset - 1);
+
+ // Keep right all the way down.
+ for (++l; l != Level; ++l)
+ NR = NR.subtree(NR.size() - 1);
+ return NR;
+}
+
+void Path::moveLeft(unsigned Level) {
+ assert(Level != 0 && "Cannot move the root node");
+
+ // Go up the tree until we can go left.
+ unsigned l = 0;
+ if (valid()) {
+ l = Level - 1;
+ while (path[l].offset == 0) {
+ assert(l != 0 && "Cannot move beyond begin()");
+ --l;
+ }
+ } else if (height() < Level)
+ // end() may have created a height=0 path.
+ path.resize(Level + 1, Entry(0, 0, 0));
+
+ // NR is the subtree containing our left sibling.
+ --path[l].offset;
+ NodeRef NR = subtree(l);
+
+ // Get the rightmost node in the subtree.
+ for (++l; l != Level; ++l) {
+ path[l] = Entry(NR, NR.size() - 1);
+ NR = NR.subtree(NR.size() - 1);
+ }
+ path[l] = Entry(NR, NR.size() - 1);
+}
+
+NodeRef Path::getRightSibling(unsigned Level) const {
+ // The root has no siblings.
+ if (Level == 0)
+ return NodeRef();
+
+ // Go up the tree until we can go right.
+ unsigned l = Level - 1;
+ while (l && atLastEntry(l))
+ --l;
+
+ // We can't go right.
+ if (atLastEntry(l))
+ return NodeRef();
+
+ // NR is the subtree containing our right sibling.
+ NodeRef NR = path[l].subtree(path[l].offset + 1);
+
+ // Keep left all the way down.
+ for (++l; l != Level; ++l)
+ NR = NR.subtree(0);
+ return NR;
+}
+
+void Path::moveRight(unsigned Level) {
+ assert(Level != 0 && "Cannot move the root node");
+
+ // Go up the tree until we can go right.
+ unsigned l = Level - 1;
+ while (l && atLastEntry(l))
+ --l;
+
+ // NR is the subtree containing our right sibling. If we hit end(), we have
+ // offset(0) == node(0).size().
+ if (++path[l].offset == path[l].size)
+ return;
+ NodeRef NR = subtree(l);
+
+ for (++l; l != Level; ++l) {
+ path[l] = Entry(NR, 0);
+ NR = NR.subtree(0);
+ }
+ path[l] = Entry(NR, 0);
+}
+
+
+IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity,
+ const unsigned *CurSize, unsigned NewSize[],
+ unsigned Position, bool Grow) {
+ assert(Elements + Grow <= Nodes * Capacity && "Not enough room for elements");
+ assert(Position <= Elements && "Invalid position");
+ if (!Nodes)
+ return IdxPair();
+
+ // Trivial algorithm: left-leaning even distribution.
+ const unsigned PerNode = (Elements + Grow) / Nodes;
+ const unsigned Extra = (Elements + Grow) % Nodes;
+ IdxPair PosPair = IdxPair(Nodes, 0);
+ unsigned Sum = 0;
+ for (unsigned n = 0; n != Nodes; ++n) {
+ Sum += NewSize[n] = PerNode + (n < Extra);
+ if (PosPair.first == Nodes && Sum > Position)
+ PosPair = IdxPair(n, Position - (Sum - NewSize[n]));
+ }
+ assert(Sum == Elements + Grow && "Bad distribution sum");
+
+ // Subtract the Grow element that was added.
+ if (Grow) {
+ assert(PosPair.first < Nodes && "Bad algebra");
+ assert(NewSize[PosPair.first] && "Too few elements to need Grow");
+ --NewSize[PosPair.first];
+ }
+
+#ifndef NDEBUG
+ Sum = 0;
+ for (unsigned n = 0; n != Nodes; ++n) {
+ assert(NewSize[n] <= Capacity && "Overallocated node");
+ Sum += NewSize[n];
+ }
+ assert(Sum == Elements && "Bad distribution sum");
+#endif
+
+ return PosPair;
+}
+
+} // namespace IntervalMapImpl
+} // namespace llvm
+
diff --git a/contrib/llvm/lib/Support/ManagedStatic.cpp b/contrib/llvm/lib/Support/ManagedStatic.cpp
index 4e655a0..c767c15 100644
--- a/contrib/llvm/lib/Support/ManagedStatic.cpp
+++ b/contrib/llvm/lib/Support/ManagedStatic.cpp
@@ -13,7 +13,7 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Config/config.h"
-#include "llvm/System/Atomic.h"
+#include "llvm/Support/Atomic.h"
#include <cassert>
using namespace llvm;
diff --git a/contrib/llvm/lib/System/Memory.cpp b/contrib/llvm/lib/Support/Memory.cpp
index 49ccf3d..ac7af0a 100644
--- a/contrib/llvm/lib/System/Memory.cpp
+++ b/contrib/llvm/lib/Support/Memory.cpp
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Memory.h"
-#include "llvm/System/Valgrind.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/Valgrind.h"
#include "llvm/Config/config.h"
namespace llvm {
@@ -25,7 +25,7 @@ using namespace sys;
#include "Unix/Memory.inc"
#endif
#ifdef LLVM_ON_WIN32
-#include "Win32/Memory.inc"
+#include "Windows/Memory.inc"
#endif
extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
@@ -35,7 +35,7 @@ extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
/// platforms.
void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
size_t Len) {
-
+
// icache invalidation for PPC and ARM.
#if defined(__APPLE__)
diff --git a/contrib/llvm/lib/Support/MemoryBuffer.cpp b/contrib/llvm/lib/Support/MemoryBuffer.cpp
index 542162d..a0c650d 100644
--- a/contrib/llvm/lib/Support/MemoryBuffer.cpp
+++ b/contrib/llvm/lib/Support/MemoryBuffer.cpp
@@ -15,14 +15,16 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/System/Errno.h"
-#include "llvm/System/Path.h"
-#include "llvm/System/Process.h"
-#include "llvm/System/Program.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/system_error.h"
#include <cassert>
#include <cstdio>
#include <cstring>
#include <cerrno>
+#include <new>
#include <sys/types.h>
#include <sys/stat.h>
#if !defined(_MSC_VER) && !defined(__MINGW32__)
@@ -34,6 +36,8 @@
#include <fcntl.h>
using namespace llvm;
+namespace { const llvm::error_code success; }
+
//===----------------------------------------------------------------------===//
// MemoryBuffer implementation itself.
//===----------------------------------------------------------------------===//
@@ -142,22 +146,20 @@ MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size, StringRef BufferName) {
/// if the Filename is "-". If an error occurs, this returns null and fills
/// in *ErrStr with a reason. If stdin is empty, this API (unlike getSTDIN)
/// returns an empty buffer.
-MemoryBuffer *MemoryBuffer::getFileOrSTDIN(StringRef Filename,
- std::string *ErrStr,
- int64_t FileSize,
- struct stat *FileInfo) {
+error_code MemoryBuffer::getFileOrSTDIN(StringRef Filename,
+ OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize) {
if (Filename == "-")
- return getSTDIN(ErrStr);
- return getFile(Filename, ErrStr, FileSize, FileInfo);
+ return getSTDIN(result);
+ return getFile(Filename, result, FileSize);
}
-MemoryBuffer *MemoryBuffer::getFileOrSTDIN(const char *Filename,
- std::string *ErrStr,
- int64_t FileSize,
- struct stat *FileInfo) {
+error_code MemoryBuffer::getFileOrSTDIN(const char *Filename,
+ OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize) {
if (strcmp(Filename, "-") == 0)
- return getSTDIN(ErrStr);
- return getFile(Filename, ErrStr, FileSize, FileInfo);
+ return getSTDIN(result);
+ return getFile(Filename, result, FileSize);
}
//===----------------------------------------------------------------------===//
@@ -177,50 +179,47 @@ public:
sys::Path::UnMapFilePages(getBufferStart(), getBufferSize());
}
};
-
-/// FileCloser - RAII object to make sure an FD gets closed properly.
-class FileCloser {
- int FD;
-public:
- explicit FileCloser(int FD) : FD(FD) {}
- ~FileCloser() { ::close(FD); }
-};
}
-MemoryBuffer *MemoryBuffer::getFile(StringRef Filename, std::string *ErrStr,
- int64_t FileSize, struct stat *FileInfo) {
+error_code MemoryBuffer::getFile(StringRef Filename,
+ OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize) {
+ // Ensure the path is null terminated.
SmallString<256> PathBuf(Filename.begin(), Filename.end());
- return MemoryBuffer::getFile(PathBuf.c_str(), ErrStr, FileSize, FileInfo);
+ return MemoryBuffer::getFile(PathBuf.c_str(), result, FileSize);
}
-MemoryBuffer *MemoryBuffer::getFile(const char *Filename, std::string *ErrStr,
- int64_t FileSize, struct stat *FileInfo) {
+error_code MemoryBuffer::getFile(const char *Filename,
+ OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize) {
int OpenFlags = O_RDONLY;
#ifdef O_BINARY
OpenFlags |= O_BINARY; // Open input file in binary mode on win32.
#endif
int FD = ::open(Filename, OpenFlags);
if (FD == -1) {
- if (ErrStr) *ErrStr = sys::StrError();
- return 0;
+ return error_code(errno, posix_category());
}
- FileCloser FC(FD); // Close FD on return.
-
+ error_code ret = getOpenFile(FD, Filename, result, FileSize);
+ close(FD);
+ return ret;
+}
+
+error_code MemoryBuffer::getOpenFile(int FD, const char *Filename,
+ OwningPtr<MemoryBuffer> &result,
+ int64_t FileSize) {
// If we don't know the file size, use fstat to find out. fstat on an open
// file descriptor is cheaper than stat on a random path.
- if (FileSize == -1 || FileInfo) {
- struct stat MyFileInfo;
- struct stat *FileInfoPtr = FileInfo? FileInfo : &MyFileInfo;
-
+ if (FileSize == -1) {
+ struct stat FileInfo;
// TODO: This should use fstat64 when available.
- if (fstat(FD, FileInfoPtr) == -1) {
- if (ErrStr) *ErrStr = sys::StrError();
- return 0;
+ if (fstat(FD, &FileInfo) == -1) {
+ return error_code(errno, posix_category());
}
- FileSize = FileInfoPtr->st_size;
+ FileSize = FileInfo.st_size;
}
-
-
+
+
// If the file is large, try to use mmap to read it in. We don't use mmap
// for small files, because this can severely fragment our address space. Also
// don't try to map files that are exactly a multiple of the system page size,
@@ -230,16 +229,17 @@ MemoryBuffer *MemoryBuffer::getFile(const char *Filename, std::string *ErrStr,
if (FileSize >= 4096*4 &&
(FileSize & (sys::Process::GetPageSize()-1)) != 0) {
if (const char *Pages = sys::Path::MapInFilePages(FD, FileSize)) {
- return GetNamedBuffer<MemoryBufferMMapFile>(StringRef(Pages, FileSize),
- Filename);
+ result.reset(GetNamedBuffer<MemoryBufferMMapFile>(
+ StringRef(Pages, FileSize), Filename));
+ return success;
}
}
MemoryBuffer *Buf = MemoryBuffer::getNewUninitMemBuffer(FileSize, Filename);
if (!Buf) {
- // Failed to create a buffer.
- if (ErrStr) *ErrStr = "could not allocate buffer";
- return 0;
+ // Failed to create a buffer. The only way it can fail is if
+ // new(std::nothrow) returns 0.
+ return make_error_code(errc::not_enough_memory);
}
OwningPtr<MemoryBuffer> SB(Buf);
@@ -252,26 +252,27 @@ MemoryBuffer *MemoryBuffer::getFile(const char *Filename, std::string *ErrStr,
if (errno == EINTR)
continue;
// Error while reading.
- if (ErrStr) *ErrStr = sys::StrError();
- return 0;
+ return error_code(errno, posix_category());
} else if (NumRead == 0) {
// We hit EOF early, truncate and terminate buffer.
Buf->BufferEnd = BufPtr;
*BufPtr = 0;
- return SB.take();
+ result.swap(SB);
+ return success;
}
BytesLeft -= NumRead;
BufPtr += NumRead;
}
- return SB.take();
+ result.swap(SB);
+ return success;
}
//===----------------------------------------------------------------------===//
// MemoryBuffer::getSTDIN implementation.
//===----------------------------------------------------------------------===//
-MemoryBuffer *MemoryBuffer::getSTDIN(std::string *ErrStr) {
+error_code MemoryBuffer::getSTDIN(OwningPtr<MemoryBuffer> &result) {
// Read in all of the data from stdin, we cannot mmap stdin.
//
// FIXME: That isn't necessarily true, we should try to mmap stdin and
@@ -287,11 +288,11 @@ MemoryBuffer *MemoryBuffer::getSTDIN(std::string *ErrStr) {
ReadBytes = read(0, Buffer.end(), ChunkSize);
if (ReadBytes == -1) {
if (errno == EINTR) continue;
- if (ErrStr) *ErrStr = sys::StrError();
- return 0;
+ return error_code(errno, posix_category());
}
Buffer.set_size(Buffer.size() + ReadBytes);
} while (ReadBytes != 0);
- return getMemBufferCopy(Buffer, "<stdin>");
+ result.reset(getMemBufferCopy(Buffer, "<stdin>"));
+ return success;
}
diff --git a/contrib/llvm/lib/System/Mutex.cpp b/contrib/llvm/lib/Support/Mutex.cpp
index 8ccd6e5..b408973 100644
--- a/contrib/llvm/lib/System/Mutex.cpp
+++ b/contrib/llvm/lib/Support/Mutex.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
//===----------------------------------------------------------------------===//
//=== WARNING: Implementation here must contain only TRULY operating system
@@ -78,6 +78,7 @@ MutexImpl::MutexImpl( bool recursive)
#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__DragonFly__)
// Make it a process local mutex
errorcode = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_PRIVATE);
+ assert(errorcode == 0);
#endif
// Initialize the mutex
@@ -149,9 +150,8 @@ MutexImpl::tryacquire()
#elif defined(LLVM_ON_UNIX)
#include "Unix/Mutex.inc"
#elif defined( LLVM_ON_WIN32)
-#include "Win32/Mutex.inc"
+#include "Windows/Mutex.inc"
#else
#warning Neither LLVM_ON_UNIX nor LLVM_ON_WIN32 was set in System/Mutex.cpp
#endif
#endif
-
diff --git a/contrib/llvm/lib/System/Path.cpp b/contrib/llvm/lib/Support/Path.cpp
index 4445c66..e5e875b 100644
--- a/contrib/llvm/lib/System/Path.cpp
+++ b/contrib/llvm/lib/Support/Path.cpp
@@ -11,8 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Config/config.h"
+#include "llvm/Support/FileSystem.h"
#include <cassert>
#include <cstring>
#include <ostream>
@@ -104,7 +106,7 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
case 2: return Mach_O_Executable_FileType;
case 3: return Mach_O_FixedVirtualMemorySharedLib_FileType;
case 4: return Mach_O_Core_FileType;
- case 5: return Mach_O_PreloadExectuable_FileType;
+ case 5: return Mach_O_PreloadExecutable_FileType;
case 6: return Mach_O_DynamicallyLinkedSharedLib_FileType;
case 7: return Mach_O_DynamicLinker_FileType;
case 8: return Mach_O_Bundle_FileType;
@@ -127,6 +129,10 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
if (magic[1] == 0x02)
return COFF_FileType;
break;
+ case 0x64: // x86-64 Windows.
+ if (magic[1] == char(0x86))
+ return COFF_FileType;
+ break;
default:
break;
@@ -136,24 +142,33 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
bool
Path::isArchive() const {
- return hasMagicNumber("!<arch>\012");
+ LLVMFileType type;
+ if (fs::identify_magic(str(), type))
+ return false;
+ return type == Archive_FileType;
}
bool
Path::isDynamicLibrary() const {
- std::string Magic;
- if (getMagicNumber(Magic, 64))
- switch (IdentifyFileType(Magic.c_str(),
- static_cast<unsigned>(Magic.length()))) {
- default: return false;
- case Mach_O_FixedVirtualMemorySharedLib_FileType:
- case Mach_O_DynamicallyLinkedSharedLib_FileType:
- case Mach_O_DynamicallyLinkedSharedLibStub_FileType:
- case ELF_SharedObject_FileType:
- case COFF_FileType: return true;
- }
+ LLVMFileType type;
+ if (fs::identify_magic(str(), type))
+ return false;
+ switch (type) {
+ default: return false;
+ case Mach_O_FixedVirtualMemorySharedLib_FileType:
+ case Mach_O_DynamicallyLinkedSharedLib_FileType:
+ case Mach_O_DynamicallyLinkedSharedLibStub_FileType:
+ case ELF_SharedObject_FileType:
+ case COFF_FileType: return true;
+ }
+}
- return false;
+bool
+Path::isObjectFile() const {
+ LLVMFileType type;
+ if (fs::identify_magic(str(), type) || type == Unknown_FileType)
+ return false;
+ return true;
}
Path
@@ -174,18 +189,23 @@ Path::FindLibrary(std::string& name) {
}
StringRef Path::GetDLLSuffix() {
- return LTDL_SHLIB_EXT;
+ return &(LTDL_SHLIB_EXT[1]);
+}
+
+void
+Path::appendSuffix(StringRef suffix) {
+ if (!suffix.empty()) {
+ path.append(".");
+ path.append(suffix);
+ }
}
bool
Path::isBitcodeFile() const {
- std::string actualMagic;
- if (!getMagicNumber(actualMagic, 4))
+ LLVMFileType type;
+ if (fs::identify_magic(str(), type))
return false;
- LLVMFileType FT =
- IdentifyFileType(actualMagic.c_str(),
- static_cast<unsigned>(actualMagic.length()));
- return FT == Bitcode_FileType;
+ return type == Bitcode_FileType;
}
bool Path::hasMagicNumber(StringRef Magic) const {
@@ -259,6 +279,5 @@ static StringRef getDirnameCharSep(StringRef path, const char *Sep) {
#include "Unix/Path.inc"
#endif
#if defined(LLVM_ON_WIN32)
-#include "Win32/Path.inc"
+#include "Windows/Path.inc"
#endif
-
diff --git a/contrib/llvm/lib/Support/PathV2.cpp b/contrib/llvm/lib/Support/PathV2.cpp
new file mode 100644
index 0000000..896c94c
--- /dev/null
+++ b/contrib/llvm/lib/Support/PathV2.cpp
@@ -0,0 +1,774 @@
+//===-- PathV2.cpp - Implement OS Path Concept ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the operating system PathV2 API.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/PathV2.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cctype>
+#include <cstdio>
+#include <cstring>
+
+namespace {
+ using llvm::StringRef;
+ using llvm::sys::path::is_separator;
+
+#ifdef LLVM_ON_WIN32
+ const StringRef separators = "\\/";
+ const char prefered_separator = '\\';
+#else
+ const StringRef separators = "/";
+ const char prefered_separator = '/';
+#endif
+
+ const llvm::error_code success;
+
+ StringRef find_first_component(StringRef path) {
+ // Look for this first component in the following order.
+ // * empty (in this case we return an empty string)
+ // * either C: or {//,\\}net.
+ // * {/,\}
+ // * {.,..}
+ // * {file,directory}name
+
+ if (path.empty())
+ return path;
+
+#ifdef LLVM_ON_WIN32
+ // C:
+ if (path.size() >= 2 && std::isalpha(path[0]) && path[1] == ':')
+ return path.substr(0, 2);
+#endif
+
+ // //net
+ if ((path.size() > 2) &&
+ is_separator(path[0]) &&
+ path[0] == path[1] &&
+ !is_separator(path[2])) {
+ // Find the next directory separator.
+ size_t end = path.find_first_of(separators, 2);
+ return path.substr(0, end);
+ }
+
+ // {/,\}
+ if (is_separator(path[0]))
+ return path.substr(0, 1);
+
+ if (path.startswith(".."))
+ return path.substr(0, 2);
+
+ if (path[0] == '.')
+ return path.substr(0, 1);
+
+ // * {file,directory}name
+ size_t end = path.find_first_of(separators, 2);
+ return path.substr(0, end);
+ }
+
+ size_t filename_pos(StringRef str) {
+ if (str.size() == 2 &&
+ is_separator(str[0]) &&
+ str[0] == str[1])
+ return 0;
+
+ if (str.size() > 0 && is_separator(str[str.size() - 1]))
+ return str.size() - 1;
+
+ size_t pos = str.find_last_of(separators, str.size() - 1);
+
+#ifdef LLVM_ON_WIN32
+ if (pos == StringRef::npos)
+ pos = str.find_last_of(':', str.size() - 2);
+#endif
+
+ if (pos == StringRef::npos ||
+ (pos == 1 && is_separator(str[0])))
+ return 0;
+
+ return pos + 1;
+ }
+
+ size_t root_dir_start(StringRef str) {
+ // case "c:/"
+#ifdef LLVM_ON_WIN32
+ if (str.size() > 2 &&
+ str[1] == ':' &&
+ is_separator(str[2]))
+ return 2;
+#endif
+
+ // case "//"
+ if (str.size() == 2 &&
+ is_separator(str[0]) &&
+ str[0] == str[1])
+ return StringRef::npos;
+
+ // case "//net"
+ if (str.size() > 3 &&
+ is_separator(str[0]) &&
+ str[0] == str[1] &&
+ !is_separator(str[2])) {
+ return str.find_first_of(separators, 2);
+ }
+
+ // case "/"
+ if (str.size() > 0 && is_separator(str[0]))
+ return 0;
+
+ return StringRef::npos;
+ }
+
+ size_t parent_path_end(StringRef path) {
+ size_t end_pos = filename_pos(path);
+
+ bool filename_was_sep = path.size() > 0 && is_separator(path[end_pos]);
+
+ // Skip separators except for root dir.
+ size_t root_dir_pos = root_dir_start(path.substr(0, end_pos));
+
+ while(end_pos > 0 &&
+ (end_pos - 1) != root_dir_pos &&
+ is_separator(path[end_pos - 1]))
+ --end_pos;
+
+ if (end_pos == 1 && root_dir_pos == 0 && filename_was_sep)
+ return StringRef::npos;
+
+ return end_pos;
+ }
+} // end unnamed namespace
+
+namespace llvm {
+namespace sys {
+namespace path {
+
+const_iterator begin(StringRef path) {
+ const_iterator i;
+ i.Path = path;
+ i.Component = find_first_component(path);
+ i.Position = 0;
+ return i;
+}
+
+const_iterator end(StringRef path) {
+ const_iterator i;
+ i.Path = path;
+ i.Position = path.size();
+ return i;
+}
+
+const_iterator &const_iterator::operator++() {
+ assert(Position < Path.size() && "Tried to increment past end!");
+
+ // Increment Position to past the current component
+ Position += Component.size();
+
+ // Check for end.
+ if (Position == Path.size()) {
+ Component = StringRef();
+ return *this;
+ }
+
+ // Both POSIX and Windows treat paths that begin with exactly two separators
+ // specially.
+ bool was_net = Component.size() > 2 &&
+ is_separator(Component[0]) &&
+ Component[1] == Component[0] &&
+ !is_separator(Component[2]);
+
+ // Handle separators.
+ if (is_separator(Path[Position])) {
+ // Root dir.
+ if (was_net
+#ifdef LLVM_ON_WIN32
+ // c:/
+ || Component.endswith(":")
+#endif
+ ) {
+ Component = Path.substr(Position, 1);
+ return *this;
+ }
+
+ // Skip extra separators.
+ while (Position != Path.size() &&
+ is_separator(Path[Position])) {
+ ++Position;
+ }
+
+ // Treat trailing '/' as a '.'.
+ if (Position == Path.size()) {
+ --Position;
+ Component = ".";
+ return *this;
+ }
+ }
+
+ // Find next component.
+ size_t end_pos = Path.find_first_of(separators, Position);
+ Component = Path.slice(Position, end_pos);
+
+ return *this;
+}
+
+const_iterator &const_iterator::operator--() {
+ // If we're at the end and the previous char was a '/', return '.'.
+ if (Position == Path.size() &&
+ Path.size() > 1 &&
+ is_separator(Path[Position - 1])
+#ifdef LLVM_ON_WIN32
+ && Path[Position - 2] != ':'
+#endif
+ ) {
+ --Position;
+ Component = ".";
+ return *this;
+ }
+
+ // Skip separators unless it's the root directory.
+ size_t root_dir_pos = root_dir_start(Path);
+ size_t end_pos = Position;
+
+ while(end_pos > 0 &&
+ (end_pos - 1) != root_dir_pos &&
+ is_separator(Path[end_pos - 1]))
+ --end_pos;
+
+ // Find next separator.
+ size_t start_pos = filename_pos(Path.substr(0, end_pos));
+ Component = Path.slice(start_pos, end_pos);
+ Position = start_pos;
+ return *this;
+}
+
+bool const_iterator::operator==(const const_iterator &RHS) const {
+ return Path.begin() == RHS.Path.begin() &&
+ Position == RHS.Position;
+}
+
+bool const_iterator::operator!=(const const_iterator &RHS) const {
+ return !(*this == RHS);
+}
+
+ptrdiff_t const_iterator::operator-(const const_iterator &RHS) const {
+ return Position - RHS.Position;
+}
+
+const StringRef root_path(StringRef path) {
+ const_iterator b = begin(path),
+ pos = b,
+ e = end(path);
+ if (b != e) {
+ bool has_net = b->size() > 2 && is_separator((*b)[0]) && (*b)[1] == (*b)[0];
+ bool has_drive =
+#ifdef LLVM_ON_WIN32
+ b->endswith(":");
+#else
+ false;
+#endif
+
+ if (has_net || has_drive) {
+ if ((++pos != e) && is_separator((*pos)[0])) {
+ // {C:/,//net/}, so get the first two components.
+ return path.substr(0, b->size() + pos->size());
+ } else {
+ // just {C:,//net}, return the first component.
+ return *b;
+ }
+ }
+
+ // POSIX style root directory.
+ if (is_separator((*b)[0])) {
+ return *b;
+ }
+ }
+
+ return StringRef();
+}
+
+const StringRef root_name(StringRef path) {
+ const_iterator b = begin(path),
+ e = end(path);
+ if (b != e) {
+ bool has_net = b->size() > 2 && is_separator((*b)[0]) && (*b)[1] == (*b)[0];
+ bool has_drive =
+#ifdef LLVM_ON_WIN32
+ b->endswith(":");
+#else
+ false;
+#endif
+
+ if (has_net || has_drive) {
+ // just {C:,//net}, return the first component.
+ return *b;
+ }
+ }
+
+ // No path or no name.
+ return StringRef();
+}
+
+const StringRef root_directory(StringRef path) {
+ const_iterator b = begin(path),
+ pos = b,
+ e = end(path);
+ if (b != e) {
+ bool has_net = b->size() > 2 && is_separator((*b)[0]) && (*b)[1] == (*b)[0];
+ bool has_drive =
+#ifdef LLVM_ON_WIN32
+ b->endswith(":");
+#else
+ false;
+#endif
+
+ if ((has_net || has_drive) &&
+ // {C:,//net}, skip to the next component.
+ (++pos != e) && is_separator((*pos)[0])) {
+ return *pos;
+ }
+
+ // POSIX style root directory.
+ if (!has_net && is_separator((*b)[0])) {
+ return *b;
+ }
+ }
+
+ // No path or no root.
+ return StringRef();
+}
+
+const StringRef relative_path(StringRef path) {
+ StringRef root = root_path(path);
+ return root.substr(root.size());
+}
+
+void append(SmallVectorImpl<char> &path, const Twine &a,
+ const Twine &b,
+ const Twine &c,
+ const Twine &d) {
+ SmallString<32> a_storage;
+ SmallString<32> b_storage;
+ SmallString<32> c_storage;
+ SmallString<32> d_storage;
+
+ SmallVector<StringRef, 4> components;
+ if (!a.isTriviallyEmpty()) components.push_back(a.toStringRef(a_storage));
+ if (!b.isTriviallyEmpty()) components.push_back(b.toStringRef(b_storage));
+ if (!c.isTriviallyEmpty()) components.push_back(c.toStringRef(c_storage));
+ if (!d.isTriviallyEmpty()) components.push_back(d.toStringRef(d_storage));
+
+ for (SmallVectorImpl<StringRef>::const_iterator i = components.begin(),
+ e = components.end();
+ i != e; ++i) {
+ bool path_has_sep = !path.empty() && is_separator(path[path.size() - 1]);
+ bool component_has_sep = !i->empty() && is_separator((*i)[0]);
+ bool is_root_name = has_root_name(*i);
+
+ if (path_has_sep) {
+ // Strip separators from beginning of component.
+ size_t loc = i->find_first_not_of(separators);
+ StringRef c = i->substr(loc);
+
+ // Append it.
+ path.append(c.begin(), c.end());
+ continue;
+ }
+
+ if (!component_has_sep && !(path.empty() || is_root_name)) {
+ // Add a separator.
+ path.push_back(prefered_separator);
+ }
+
+ path.append(i->begin(), i->end());
+ }
+}
+
+void append(SmallVectorImpl<char> &path,
+ const_iterator begin, const_iterator end) {
+ for (; begin != end; ++begin)
+ path::append(path, *begin);
+}
+
+const StringRef parent_path(StringRef path) {
+ size_t end_pos = parent_path_end(path);
+ if (end_pos == StringRef::npos)
+ return StringRef();
+ else
+ return path.substr(0, end_pos);
+}
+
+void remove_filename(SmallVectorImpl<char> &path) {
+ size_t end_pos = parent_path_end(StringRef(path.begin(), path.size()));
+ if (end_pos != StringRef::npos)
+ path.set_size(end_pos);
+}
+
+void replace_extension(SmallVectorImpl<char> &path, const Twine &extension) {
+ StringRef p(path.begin(), path.size());
+ SmallString<32> ext_storage;
+ StringRef ext = extension.toStringRef(ext_storage);
+
+ // Erase existing extension.
+ size_t pos = p.find_last_of('.');
+ if (pos != StringRef::npos && pos >= filename_pos(p))
+ path.set_size(pos);
+
+ // Append '.' if needed.
+ if (ext.size() > 0 && ext[0] != '.')
+ path.push_back('.');
+
+ // Append extension.
+ path.append(ext.begin(), ext.end());
+}
+
+void native(const Twine &path, SmallVectorImpl<char> &result) {
+ // Clear result.
+ result.clear();
+#ifdef LLVM_ON_WIN32
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+ result.reserve(p.size());
+ for (StringRef::const_iterator i = p.begin(),
+ e = p.end();
+ i != e;
+ ++i) {
+ if (*i == '/')
+ result.push_back('\\');
+ else
+ result.push_back(*i);
+ }
+#else
+ path.toVector(result);
+#endif
+}
+
+const StringRef filename(StringRef path) {
+ return *(--end(path));
+}
+
+const StringRef stem(StringRef path) {
+ StringRef fname = filename(path);
+ size_t pos = fname.find_last_of('.');
+ if (pos == StringRef::npos)
+ return fname;
+ else
+ if ((fname.size() == 1 && fname == ".") ||
+ (fname.size() == 2 && fname == ".."))
+ return fname;
+ else
+ return fname.substr(0, pos);
+}
+
+const StringRef extension(StringRef path) {
+ StringRef fname = filename(path);
+ size_t pos = fname.find_last_of('.');
+ if (pos == StringRef::npos)
+ return StringRef();
+ else
+ if ((fname.size() == 1 && fname == ".") ||
+ (fname.size() == 2 && fname == ".."))
+ return StringRef();
+ else
+ return fname.substr(pos);
+}
+
+bool is_separator(char value) {
+ switch(value) {
+#ifdef LLVM_ON_WIN32
+ case '\\': // fall through
+#endif
+ case '/': return true;
+ default: return false;
+ }
+}
+
+bool has_root_name(const Twine &path) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ return !root_name(p).empty();
+}
+
+bool has_root_directory(const Twine &path) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ return !root_directory(p).empty();
+}
+
+bool has_root_path(const Twine &path) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ return !root_path(p).empty();
+}
+
+bool has_relative_path(const Twine &path) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ return !relative_path(p).empty();
+}
+
+bool has_filename(const Twine &path) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ return !filename(p).empty();
+}
+
+bool has_parent_path(const Twine &path) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ return !parent_path(p).empty();
+}
+
+bool has_stem(const Twine &path) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ return !stem(p).empty();
+}
+
+bool has_extension(const Twine &path) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ return !extension(p).empty();
+}
+
+bool is_absolute(const Twine &path) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ bool rootDir = has_root_directory(p),
+#ifdef LLVM_ON_WIN32
+ rootName = has_root_name(p);
+#else
+ rootName = true;
+#endif
+
+ return rootDir && rootName;
+}
+
+bool is_relative(const Twine &path) {
+ return !is_absolute(path);
+}
+
+} // end namespace path
+
+namespace fs {
+
+error_code make_absolute(SmallVectorImpl<char> &path) {
+ StringRef p(path.data(), path.size());
+
+ bool rootName = path::has_root_name(p),
+ rootDirectory = path::has_root_directory(p);
+
+ // Already absolute.
+ if (rootName && rootDirectory)
+ return success;
+
+ // All of the following conditions will need the current directory.
+ SmallString<128> current_dir;
+ if (error_code ec = current_path(current_dir)) return ec;
+
+ // Relative path. Prepend the current directory.
+ if (!rootName && !rootDirectory) {
+ // Append path to the current directory.
+ path::append(current_dir, p);
+ // Set path to the result.
+ path.swap(current_dir);
+ return success;
+ }
+
+ if (!rootName && rootDirectory) {
+ StringRef cdrn = path::root_name(current_dir);
+ SmallString<128> curDirRootName(cdrn.begin(), cdrn.end());
+ path::append(curDirRootName, p);
+ // Set path to the result.
+ path.swap(curDirRootName);
+ return success;
+ }
+
+ if (rootName && !rootDirectory) {
+ StringRef pRootName = path::root_name(p);
+ StringRef bRootDirectory = path::root_directory(current_dir);
+ StringRef bRelativePath = path::relative_path(current_dir);
+ StringRef pRelativePath = path::relative_path(p);
+
+ SmallString<128> res;
+ path::append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath);
+ path.swap(res);
+ return success;
+ }
+
+ llvm_unreachable("All rootName and rootDirectory combinations should have "
+ "occurred above!");
+}
+
+error_code create_directories(const Twine &path, bool &existed) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ StringRef parent = path::parent_path(p);
+ bool parent_exists;
+
+ if (error_code ec = fs::exists(parent, parent_exists)) return ec;
+
+ if (!parent_exists)
+ return create_directories(parent, existed);
+
+ return create_directory(p, existed);
+}
+
+bool exists(file_status status) {
+ return status_known(status) && status.type() != file_type::file_not_found;
+}
+
+bool status_known(file_status s) {
+ return s.type() != file_type::status_error;
+}
+
+bool is_directory(file_status status) {
+ return status.type() == file_type::directory_file;
+}
+
+error_code is_directory(const Twine &path, bool &result) {
+ file_status st;
+ if (error_code ec = status(path, st))
+ return ec;
+ result = is_directory(st);
+ return success;
+}
+
+bool is_regular_file(file_status status) {
+ return status.type() == file_type::regular_file;
+}
+
+error_code is_regular_file(const Twine &path, bool &result) {
+ file_status st;
+ if (error_code ec = status(path, st))
+ return ec;
+ result = is_regular_file(st);
+ return success;
+}
+
+bool is_symlink(file_status status) {
+ return status.type() == file_type::symlink_file;
+}
+
+error_code is_symlink(const Twine &path, bool &result) {
+ file_status st;
+ if (error_code ec = status(path, st))
+ return ec;
+ result = is_symlink(st);
+ return success;
+}
+
+bool is_other(file_status status) {
+ return exists(status) &&
+ !is_regular_file(status) &&
+ !is_directory(status) &&
+ !is_symlink(status);
+}
+
+void directory_entry::replace_filename(const Twine &filename, file_status st,
+ file_status symlink_st) {
+ SmallString<128> path(Path.begin(), Path.end());
+ path::remove_filename(path);
+ path::append(path, filename);
+ Path = path.str();
+ Status = st;
+ SymlinkStatus = symlink_st;
+}
+
+error_code has_magic(const Twine &path, const Twine &magic, bool &result) {
+ SmallString<32> MagicStorage;
+ StringRef Magic = magic.toStringRef(MagicStorage);
+ SmallString<32> Buffer;
+
+ if (error_code ec = get_magic(path, Magic.size(), Buffer)) {
+ if (ec == errc::value_too_large) {
+ // Magic.size() > file_size(Path).
+ result = false;
+ return success;
+ }
+ return ec;
+ }
+
+ result = Magic == Buffer;
+ return success;
+}
+
+error_code identify_magic(const Twine &path, LLVMFileType &result) {
+ SmallString<32> Magic;
+ error_code ec = get_magic(path, Magic.capacity(), Magic);
+ if (ec && ec != errc::value_too_large)
+ return ec;
+
+ result = IdentifyFileType(Magic.data(), Magic.size());
+ return success;
+}
+
+namespace {
+error_code remove_all_r(StringRef path, file_type ft, uint32_t &count) {
+ if (ft == file_type::directory_file) {
+ // This code would be a lot better with exceptions ;/.
+ error_code ec;
+ for (directory_iterator i(path, ec), e; i != e; i.increment(ec)) {
+ if (ec) return ec;
+ file_status st;
+ if (error_code ec = i->status(st)) return ec;
+ if (error_code ec = remove_all_r(i->path(), st.type(), count)) return ec;
+ }
+ bool obviously_this_exists;
+ if (error_code ec = remove(path, obviously_this_exists)) return ec;
+ assert(obviously_this_exists);
+ ++count; // Include the directory itself in the items removed.
+ } else {
+ bool obviously_this_exists;
+ if (error_code ec = remove(path, obviously_this_exists)) return ec;
+ assert(obviously_this_exists);
+ ++count;
+ }
+
+ return success;
+}
+} // end unnamed namespace
+
+error_code remove_all(const Twine &path, uint32_t &num_removed) {
+ SmallString<128> path_storage;
+ StringRef p = path.toStringRef(path_storage);
+
+ file_status fs;
+ if (error_code ec = status(path, fs))
+ return ec;
+ num_removed = 0;
+ return remove_all_r(p, fs.type(), num_removed);
+}
+
+error_code directory_entry::status(file_status &result) const {
+ return fs::status(Path, result);
+}
+
+} // end namespace fs
+} // end namespace sys
+} // end namespace llvm
+
+// Include the truly platform-specific parts.
+#if defined(LLVM_ON_UNIX)
+#include "Unix/PathV2.inc"
+#endif
+#if defined(LLVM_ON_WIN32)
+#include "Windows/PathV2.inc"
+#endif
diff --git a/contrib/llvm/lib/Support/PluginLoader.cpp b/contrib/llvm/lib/Support/PluginLoader.cpp
index 36caecf..2924cfa 100644
--- a/contrib/llvm/lib/Support/PluginLoader.cpp
+++ b/contrib/llvm/lib/Support/PluginLoader.cpp
@@ -15,8 +15,8 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/PluginLoader.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/DynamicLibrary.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Mutex.h"
#include <vector>
using namespace llvm;
diff --git a/contrib/llvm/lib/Support/PrettyStackTrace.cpp b/contrib/llvm/lib/Support/PrettyStackTrace.cpp
index 3c8a108..a9f4709 100644
--- a/contrib/llvm/lib/Support/PrettyStackTrace.cpp
+++ b/contrib/llvm/lib/Support/PrettyStackTrace.cpp
@@ -15,8 +15,8 @@
#include "llvm/Config/config.h" // Get autoconf configuration settings
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Signals.h"
-#include "llvm/System/ThreadLocal.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/ThreadLocal.h"
#include "llvm/ADT/SmallString.h"
#ifdef HAVE_CRASHREPORTERCLIENT_H
@@ -55,7 +55,7 @@ static void PrintCurStackTrace(raw_ostream &OS) {
}
// Integrate with crash reporter libraries.
-#if defined (__APPLE__) && defined (HAVE_CRASHREPORTERCLIENT_H)
+#if defined (__APPLE__) && HAVE_CRASHREPORTERCLIENT_H
// If any clients of llvm try to link to libCrashReporterClient.a themselves,
// only one crash info struct will be used.
extern "C" {
@@ -64,7 +64,7 @@ struct crashreporter_annotations_t gCRAnnotations
__attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION)))
= { CRASHREPORTER_ANNOTATIONS_VERSION, 0, 0, 0, 0 };
}
-#elif defined (__APPLE__)
+#elif defined (__APPLE__) && HAVE_CRASHREPORTER_INFO
static const char *__crashreporter_info__ = 0;
asm(".desc ___crashreporter_info__, 0x10");
#endif
@@ -86,11 +86,11 @@ static void CrashHandler(void *) {
}
if (!TmpStr.empty()) {
-#ifndef HAVE_CRASHREPORTERCLIENT_H
- __crashreporter_info__ = strdup(std::string(TmpStr.str()).c_str());
-#else
+#ifdef HAVE_CRASHREPORTERCLIENT_H
// Cast to void to avoid warning.
(void)CRSetCrashLogMessage(std::string(TmpStr.str()).c_str());
+#elif HAVE_CRASHREPORTER_INFO
+ __crashreporter_info__ = strdup(std::string(TmpStr.str()).c_str());
#endif
errs() << TmpStr.str();
}
@@ -107,7 +107,7 @@ static bool RegisterCrashPrinter() {
PrettyStackTraceEntry::PrettyStackTraceEntry() {
// The first time this is called, we register the crash printer.
static bool HandlerRegistered = RegisterCrashPrinter();
- HandlerRegistered = HandlerRegistered;
+ (void)HandlerRegistered;
// Link ourselves.
NextEntry = PrettyStackTraceHead.get();
@@ -131,4 +131,3 @@ void PrettyStackTraceProgram::print(raw_ostream &OS) const {
OS << ArgV[i] << ' ';
OS << '\n';
}
-
diff --git a/contrib/llvm/lib/System/Process.cpp b/contrib/llvm/lib/Support/Process.cpp
index e93b2af..88ca7c3 100644
--- a/contrib/llvm/lib/System/Process.cpp
+++ b/contrib/llvm/lib/Support/Process.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Process.h"
+#include "llvm/Support/Process.h"
#include "llvm/Config/config.h"
namespace llvm {
@@ -29,5 +29,5 @@ using namespace sys;
#include "Unix/Process.inc"
#endif
#ifdef LLVM_ON_WIN32
-#include "Win32/Process.inc"
+#include "Windows/Process.inc"
#endif
diff --git a/contrib/llvm/lib/System/Program.cpp b/contrib/llvm/lib/Support/Program.cpp
index cd58c2c..01860b0 100644
--- a/contrib/llvm/lib/System/Program.cpp
+++ b/contrib/llvm/lib/Support/Program.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Program.h"
+#include "llvm/Support/Program.h"
#include "llvm/Config/config.h"
using namespace llvm;
using namespace sys;
@@ -31,7 +31,7 @@ Program::ExecuteAndWait(const Path& path,
std::string* ErrMsg) {
Program prg;
if (prg.Execute(path, args, envp, redirects, memoryLimit, ErrMsg))
- return prg.Wait(secondsToWait, ErrMsg);
+ return prg.Wait(path, secondsToWait, ErrMsg);
else
return -1;
}
@@ -52,5 +52,5 @@ Program::ExecuteNoWait(const Path& path,
#include "Unix/Program.inc"
#endif
#ifdef LLVM_ON_WIN32
-#include "Win32/Program.inc"
+#include "Windows/Program.inc"
#endif
diff --git a/contrib/llvm/lib/System/RWMutex.cpp b/contrib/llvm/lib/Support/RWMutex.cpp
index deb0470..fc02f9c 100644
--- a/contrib/llvm/lib/System/RWMutex.cpp
+++ b/contrib/llvm/lib/Support/RWMutex.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
-#include "llvm/System/RWMutex.h"
+#include "llvm/Support/RWMutex.h"
#include <cstring>
//===----------------------------------------------------------------------===//
@@ -150,7 +150,7 @@ RWMutexImpl::writer_release()
#elif defined(LLVM_ON_UNIX)
#include "Unix/RWMutex.inc"
#elif defined( LLVM_ON_WIN32)
-#include "Win32/RWMutex.inc"
+#include "Windows/RWMutex.inc"
#else
#warning Neither LLVM_ON_UNIX nor LLVM_ON_WIN32 was set in System/Mutex.cpp
#endif
diff --git a/contrib/llvm/lib/System/SearchForAddressOfSpecialSymbol.cpp b/contrib/llvm/lib/Support/SearchForAddressOfSpecialSymbol.cpp
index 73b484c..d638301 100644
--- a/contrib/llvm/lib/System/SearchForAddressOfSpecialSymbol.cpp
+++ b/contrib/llvm/lib/Support/SearchForAddressOfSpecialSymbol.cpp
@@ -32,7 +32,6 @@ static void *DoSearch(const char* symbolName) {
EXPLICIT_SYMBOL(__ashrdi3);
EXPLICIT_SYMBOL(__cmpdi2);
EXPLICIT_SYMBOL(__divdi3);
- EXPLICIT_SYMBOL(__eprintf);
EXPLICIT_SYMBOL(__fixdfdi);
EXPLICIT_SYMBOL(__fixsfdi);
EXPLICIT_SYMBOL(__fixunsdfdi);
@@ -43,6 +42,16 @@ static void *DoSearch(const char* symbolName) {
EXPLICIT_SYMBOL(__moddi3);
EXPLICIT_SYMBOL(__udivdi3);
EXPLICIT_SYMBOL(__umoddi3);
+
+ // __eprintf is sometimes used for assert() handling on x86.
+ //
+ // FIXME: Currently disabled when using Clang, as we don't always have our
+ // runtime support libraries available.
+#ifndef __clang__
+#ifdef __i386__
+ EXPLICIT_SYMBOL(__eprintf);
+#endif
+#endif
}
#endif
diff --git a/contrib/llvm/lib/System/Signals.cpp b/contrib/llvm/lib/Support/Signals.cpp
index d345b0a..a3af37d 100644
--- a/contrib/llvm/lib/System/Signals.cpp
+++ b/contrib/llvm/lib/Support/Signals.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Signals.h"
+#include "llvm/Support/Signals.h"
#include "llvm/Config/config.h"
namespace llvm {
@@ -30,5 +30,5 @@ using namespace sys;
#include "Unix/Signals.inc"
#endif
#ifdef LLVM_ON_WIN32
-#include "Win32/Signals.inc"
+#include "Windows/Signals.inc"
#endif
diff --git a/contrib/llvm/lib/Support/SourceMgr.cpp b/contrib/llvm/lib/Support/SourceMgr.cpp
index da5681c..ef09916 100644
--- a/contrib/llvm/lib/Support/SourceMgr.cpp
+++ b/contrib/llvm/lib/Support/SourceMgr.cpp
@@ -13,9 +13,12 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/Twine.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
using namespace llvm;
namespace {
@@ -47,18 +50,18 @@ SourceMgr::~SourceMgr() {
/// ~0, otherwise it returns the buffer ID of the stacked file.
unsigned SourceMgr::AddIncludeFile(const std::string &Filename,
SMLoc IncludeLoc) {
-
- MemoryBuffer *NewBuf = MemoryBuffer::getFile(Filename.c_str());
+ OwningPtr<MemoryBuffer> NewBuf;
+ MemoryBuffer::getFile(Filename.c_str(), NewBuf);
// If the file didn't exist directly, see if it's in an include path.
for (unsigned i = 0, e = IncludeDirectories.size(); i != e && !NewBuf; ++i) {
std::string IncFile = IncludeDirectories[i] + "/" + Filename;
- NewBuf = MemoryBuffer::getFile(IncFile.c_str());
+ MemoryBuffer::getFile(IncFile.c_str(), NewBuf);
}
if (NewBuf == 0) return ~0U;
- return AddNewSourceBuffer(NewBuf, IncludeLoc);
+ return AddNewSourceBuffer(NewBuf.take(), IncludeLoc);
}
@@ -135,7 +138,7 @@ void SourceMgr::PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const {
///
/// @param Type - If non-null, the kind of message (e.g., "error") which is
/// prefixed to the message.
-SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, const std::string &Msg,
+SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, const Twine &Msg,
const char *Type, bool ShowLine) const {
// First thing to do: find the current buffer containing the specified
@@ -162,27 +165,25 @@ SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, const std::string &Msg,
}
std::string PrintedMsg;
- if (Type) {
- PrintedMsg = Type;
- PrintedMsg += ": ";
- }
- PrintedMsg += Msg;
+ raw_string_ostream OS(PrintedMsg);
+ if (Type)
+ OS << Type << ": ";
+ OS << Msg;
return SMDiagnostic(*this, Loc,
CurMB->getBufferIdentifier(), FindLineNumber(Loc, CurBuf),
- Loc.getPointer()-LineStart, PrintedMsg,
+ Loc.getPointer()-LineStart, OS.str(),
LineStr, ShowLine);
}
-void SourceMgr::PrintMessage(SMLoc Loc, const std::string &Msg,
+void SourceMgr::PrintMessage(SMLoc Loc, const Twine &Msg,
const char *Type, bool ShowLine) const {
// Report the message with the diagnostic handler if present.
if (DiagHandler) {
- DiagHandler(GetMessage(Loc, Msg, Type, ShowLine),
- DiagContext, DiagLocCookie);
+ DiagHandler(GetMessage(Loc, Msg, Type, ShowLine), DiagContext);
return;
}
-
+
raw_ostream &OS = errs();
int CurBuf = FindBufferContainingLoc(Loc);
diff --git a/contrib/llvm/lib/Support/Statistic.cpp b/contrib/llvm/lib/Support/Statistic.cpp
index e32ab74..f0ed626 100644
--- a/contrib/llvm/lib/Support/Statistic.cpp
+++ b/contrib/llvm/lib/Support/Statistic.cpp
@@ -26,7 +26,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include "llvm/ADT/StringExtras.h"
#include <algorithm>
#include <cstring>
diff --git a/contrib/llvm/lib/Support/StringMap.cpp b/contrib/llvm/lib/Support/StringMap.cpp
index 6f28277..90ec299 100644
--- a/contrib/llvm/lib/Support/StringMap.cpp
+++ b/contrib/llvm/lib/Support/StringMap.cpp
@@ -155,7 +155,7 @@ int StringMapImpl::FindKey(StringRef Key) const {
void StringMapImpl::RemoveKey(StringMapEntryBase *V) {
const char *VStr = (char*)V + ItemSize;
StringMapEntryBase *V2 = RemoveKey(StringRef(VStr, V->getKeyLength()));
- V2 = V2;
+ (void)V2;
assert(V == V2 && "Didn't find key?");
}
diff --git a/contrib/llvm/lib/Support/StringRef.cpp b/contrib/llvm/lib/Support/StringRef.cpp
index 46f26b2..5398051 100644
--- a/contrib/llvm/lib/Support/StringRef.cpp
+++ b/contrib/llvm/lib/Support/StringRef.cpp
@@ -9,6 +9,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/OwningPtr.h"
#include <bitset>
using namespace llvm;
@@ -67,8 +68,9 @@ int StringRef::compare_numeric(StringRef RHS) const {
}
// Compute the edit distance between the two given strings.
-unsigned StringRef::edit_distance(llvm::StringRef Other,
- bool AllowReplacements) {
+unsigned StringRef::edit_distance(llvm::StringRef Other,
+ bool AllowReplacements,
+ unsigned MaxEditDistance) {
// The algorithm implemented below is the "classic"
// dynamic-programming algorithm for computing the Levenshtein
// distance, which is described here:
@@ -83,17 +85,21 @@ unsigned StringRef::edit_distance(llvm::StringRef Other,
const unsigned SmallBufferSize = 64;
unsigned SmallBuffer[SmallBufferSize];
- unsigned *Allocated = 0;
+ llvm::OwningArrayPtr<unsigned> Allocated;
unsigned *previous = SmallBuffer;
- if (2*(n + 1) > SmallBufferSize)
- Allocated = previous = new unsigned [2*(n+1)];
+ if (2*(n + 1) > SmallBufferSize) {
+ previous = new unsigned [2*(n+1)];
+ Allocated.reset(previous);
+ }
unsigned *current = previous + (n + 1);
-
- for (unsigned i = 0; i <= n; ++i)
+
+ for (unsigned i = 0; i <= n; ++i)
previous[i] = i;
for (size_type y = 1; y <= m; ++y) {
current[0] = y;
+ unsigned BestThisRow = current[0];
+
for (size_type x = 1; x <= n; ++x) {
if (AllowReplacements) {
current[x] = min(previous[x-1] + ((*this)[y-1] == Other[x-1]? 0u:1u),
@@ -103,16 +109,18 @@ unsigned StringRef::edit_distance(llvm::StringRef Other,
if ((*this)[y-1] == Other[x-1]) current[x] = previous[x-1];
else current[x] = min(current[x-1], previous[x]) + 1;
}
+ BestThisRow = min(BestThisRow, current[x]);
}
-
+
+ if (MaxEditDistance && BestThisRow > MaxEditDistance)
+ return MaxEditDistance + 1;
+
unsigned *tmp = current;
current = previous;
previous = tmp;
}
unsigned Result = previous[n];
- delete [] Allocated;
-
return Result;
}
@@ -192,6 +200,21 @@ StringRef::size_type StringRef::find_first_not_of(StringRef Chars,
return npos;
}
+/// find_last_of - Find the last character in the string that is in \arg C,
+/// or npos if not found.
+///
+/// Note: O(size() + Chars.size())
+StringRef::size_type StringRef::find_last_of(StringRef Chars,
+ size_t From) const {
+ std::bitset<1 << CHAR_BIT> CharBits;
+ for (size_type i = 0; i != Chars.size(); ++i)
+ CharBits.set((unsigned char)Chars[i]);
+
+ for (size_type i = min(From, Length) - 1, e = -1; i != e; --i)
+ if (CharBits.test((unsigned char)Data[i]))
+ return i;
+ return npos;
+}
//===----------------------------------------------------------------------===//
// Helpful Algorithms
@@ -232,10 +255,10 @@ static bool GetAsUnsignedInteger(StringRef Str, unsigned Radix,
// Autosense radix if not specified.
if (Radix == 0)
Radix = GetAutoSenseRadix(Str);
-
+
// Empty strings (after the radix autosense) are invalid.
if (Str.empty()) return true;
-
+
// Parse all the bytes of the string given this radix. Watch for overflow.
Result = 0;
while (!Str.empty()) {
@@ -248,23 +271,23 @@ static bool GetAsUnsignedInteger(StringRef Str, unsigned Radix,
CharVal = Str[0]-'A'+10;
else
return true;
-
+
// If the parsed value is larger than the integer radix, the string is
// invalid.
if (CharVal >= Radix)
return true;
-
+
// Add in this character.
unsigned long long PrevResult = Result;
Result = Result*Radix+CharVal;
-
+
// Check for overflow.
if (Result < PrevResult)
return true;
Str = Str.substr(1);
}
-
+
return false;
}
@@ -275,7 +298,7 @@ bool StringRef::getAsInteger(unsigned Radix, unsigned long long &Result) const {
bool StringRef::getAsInteger(unsigned Radix, long long &Result) const {
unsigned long long ULLVal;
-
+
// Handle positive strings first.
if (empty() || front() != '-') {
if (GetAsUnsignedInteger(*this, Radix, ULLVal) ||
@@ -285,7 +308,7 @@ bool StringRef::getAsInteger(unsigned Radix, long long &Result) const {
Result = ULLVal;
return false;
}
-
+
// Get the positive part of the value.
if (GetAsUnsignedInteger(substr(1), Radix, ULLVal) ||
// Reject values so large they'd overflow as negative signed, but allow
@@ -293,7 +316,7 @@ bool StringRef::getAsInteger(unsigned Radix, long long &Result) const {
// on signed overflow.
(long long)-ULLVal > 0)
return true;
-
+
Result = -ULLVal;
return false;
}
@@ -314,7 +337,7 @@ bool StringRef::getAsInteger(unsigned Radix, unsigned &Result) const {
return true;
Result = Val;
return false;
-}
+}
bool StringRef::getAsInteger(unsigned Radix, APInt &Result) const {
StringRef Str = *this;
@@ -324,7 +347,7 @@ bool StringRef::getAsInteger(unsigned Radix, APInt &Result) const {
Radix = GetAutoSenseRadix(Str);
assert(Radix > 1 && Radix <= 36);
-
+
// Empty strings (after the radix autosense) are invalid.
if (Str.empty()) return true;
@@ -348,7 +371,7 @@ bool StringRef::getAsInteger(unsigned Radix, APInt &Result) const {
if (BitWidth < Result.getBitWidth())
BitWidth = Result.getBitWidth(); // don't shrink the result
else
- Result.zext(BitWidth);
+ Result = Result.zext(BitWidth);
APInt RadixAP, CharAP; // unused unless !IsPowerOf2Radix
if (!IsPowerOf2Radix) {
@@ -369,12 +392,12 @@ bool StringRef::getAsInteger(unsigned Radix, APInt &Result) const {
CharVal = Str[0]-'A'+10;
else
return true;
-
+
// If the parsed value is larger than the integer radix, the string is
// invalid.
if (CharVal >= Radix)
return true;
-
+
// Add in this character.
if (IsPowerOf2Radix) {
Result <<= Log2Radix;
@@ -387,6 +410,6 @@ bool StringRef::getAsInteger(unsigned Radix, APInt &Result) const {
Str = Str.substr(1);
}
-
+
return false;
}
diff --git a/contrib/llvm/lib/Support/SystemUtils.cpp b/contrib/llvm/lib/Support/SystemUtils.cpp
index c8b260c..54b5e97 100644
--- a/contrib/llvm/lib/Support/SystemUtils.cpp
+++ b/contrib/llvm/lib/Support/SystemUtils.cpp
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/SystemUtils.h"
-#include "llvm/System/Process.h"
-#include "llvm/System/Program.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -23,43 +23,33 @@ bool llvm::CheckBitcodeOutputToConsole(raw_ostream &stream_to_check,
if (stream_to_check.is_displayed()) {
if (print_warning) {
errs() << "WARNING: You're attempting to print out a bitcode file.\n"
- << "This is inadvisable as it may cause display problems. If\n"
- << "you REALLY want to taste LLVM bitcode first-hand, you\n"
- << "can force output with the `-f' option.\n\n";
+ "This is inadvisable as it may cause display problems. If\n"
+ "you REALLY want to taste LLVM bitcode first-hand, you\n"
+ "can force output with the `-f' option.\n\n";
}
return true;
}
return false;
}
-/// FindExecutable - Find a named executable, giving the argv[0] of program
-/// being executed. This allows us to find another LLVM tool if it is built in
-/// the same directory. If the executable cannot be found, return an
-/// empty string.
+/// PrependMainExecutablePath - Prepend the path to the program being executed
+/// to \p ExeName, given the value of argv[0] and the address of main()
+/// itself. This allows us to find another LLVM tool if it is built in the same
+/// directory. An empty string is returned on error; note that this function
+/// just mainpulates the path and doesn't check for executability.
/// @brief Find a named executable.
-#undef FindExecutable // needed on windows :(
-sys::Path llvm::FindExecutable(const std::string &ExeName,
- const char *Argv0, void *MainAddr) {
+sys::Path llvm::PrependMainExecutablePath(const std::string &ExeName,
+ const char *Argv0, void *MainAddr) {
// Check the directory that the calling program is in. We can do
// this if ProgramPath contains at least one / character, indicating that it
// is a relative path to the executable itself.
sys::Path Result = sys::Path::GetMainExecutable(Argv0, MainAddr);
Result.eraseComponent();
+
if (!Result.isEmpty()) {
Result.appendComponent(ExeName);
- if (Result.canExecute())
- return Result;
- // If the path is absolute (and it usually is), call FindProgramByName to
- // allow it to try platform-specific logic, such as appending a .exe suffix
- // on Windows. Don't do this if we somehow have a relative path, because
- // we don't want to go searching the PATH and accidentally find an unrelated
- // version of the program.
- if (Result.isAbsolute()) {
- Result = sys::Program::FindProgramByName(Result.str());
- if (!Result.empty())
- return Result;
- }
+ Result.appendSuffix(sys::Path::GetEXESuffix());
}
- return sys::Path();
+ return Result;
}
diff --git a/contrib/llvm/lib/Support/TargetRegistry.cpp b/contrib/llvm/lib/Support/TargetRegistry.cpp
index 5896447..293a5d7 100644
--- a/contrib/llvm/lib/Support/TargetRegistry.cpp
+++ b/contrib/llvm/lib/Support/TargetRegistry.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetRegistry.h"
-#include "llvm/System/Host.h"
+#include "llvm/Support/Host.h"
#include <cassert>
using namespace llvm;
diff --git a/contrib/llvm/lib/System/ThreadLocal.cpp b/contrib/llvm/lib/Support/ThreadLocal.cpp
index f6a55a1..6b43048 100644
--- a/contrib/llvm/lib/System/ThreadLocal.cpp
+++ b/contrib/llvm/lib/Support/ThreadLocal.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
-#include "llvm/System/ThreadLocal.h"
+#include "llvm/Support/ThreadLocal.h"
//===----------------------------------------------------------------------===//
//=== WARNING: Implementation here must contain only TRULY operating system
@@ -77,9 +77,8 @@ void ThreadLocalImpl::removeInstance() {
#elif defined(LLVM_ON_UNIX)
#include "Unix/ThreadLocal.inc"
#elif defined( LLVM_ON_WIN32)
-#include "Win32/ThreadLocal.inc"
+#include "Windows/ThreadLocal.inc"
#else
#warning Neither LLVM_ON_UNIX nor LLVM_ON_WIN32 was set in System/ThreadLocal.cpp
#endif
#endif
-
diff --git a/contrib/llvm/lib/System/Threading.cpp b/contrib/llvm/lib/Support/Threading.cpp
index 466c468..2957956 100644
--- a/contrib/llvm/lib/System/Threading.cpp
+++ b/contrib/llvm/lib/Support/Threading.cpp
@@ -1,4 +1,4 @@
-//===-- llvm/System/Threading.cpp- Control multithreading mode --*- C++ -*-==//
+//===-- llvm/Support/Threading.cpp- Control multithreading mode --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Threading.h"
-#include "llvm/System/Atomic.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Threading.h"
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Mutex.h"
#include "llvm/Config/config.h"
#include <cassert>
@@ -28,7 +28,7 @@ bool llvm::llvm_start_multithreaded() {
assert(!multithreaded_mode && "Already multithreaded!");
multithreaded_mode = true;
global_lock = new sys::Mutex(true);
-
+
// We fence here to ensure that all initialization is complete BEFORE we
// return from llvm_start_multithreaded().
sys::MemoryFence();
@@ -41,11 +41,11 @@ bool llvm::llvm_start_multithreaded() {
void llvm::llvm_stop_multithreaded() {
#ifdef LLVM_MULTITHREADED
assert(multithreaded_mode && "Not currently multithreaded!");
-
+
// We fence here to insure that all threaded operations are complete BEFORE we
// return from llvm_stop_multithreaded().
sys::MemoryFence();
-
+
multithreaded_mode = false;
delete global_lock;
#endif
@@ -62,3 +62,55 @@ void llvm::llvm_acquire_global_lock() {
void llvm::llvm_release_global_lock() {
if (multithreaded_mode) global_lock->release();
}
+
+#if defined(LLVM_MULTITHREADED) && defined(HAVE_PTHREAD_H)
+#include <pthread.h>
+
+struct ThreadInfo {
+ void (*UserFn)(void *);
+ void *UserData;
+};
+static void *ExecuteOnThread_Dispatch(void *Arg) {
+ ThreadInfo *TI = reinterpret_cast<ThreadInfo*>(Arg);
+ TI->UserFn(TI->UserData);
+ return 0;
+}
+
+void llvm::llvm_execute_on_thread(void (*Fn)(void*), void *UserData,
+ unsigned RequestedStackSize) {
+ ThreadInfo Info = { Fn, UserData };
+ pthread_attr_t Attr;
+ pthread_t Thread;
+
+ // Construct the attributes object.
+ if (::pthread_attr_init(&Attr) != 0)
+ return;
+
+ // Set the requested stack size, if given.
+ if (RequestedStackSize != 0) {
+ if (::pthread_attr_setstacksize(&Attr, RequestedStackSize) != 0)
+ goto error;
+ }
+
+ // Construct and execute the thread.
+ if (::pthread_create(&Thread, &Attr, ExecuteOnThread_Dispatch, &Info) != 0)
+ goto error;
+
+ // Wait for the thread and clean up.
+ ::pthread_join(Thread, 0);
+
+ error:
+ ::pthread_attr_destroy(&Attr);
+}
+
+#else
+
+// No non-pthread implementation, currently.
+
+void llvm::llvm_execute_on_thread(void (*Fn)(void*), void *UserData,
+ unsigned RequestedStackSize) {
+ (void) RequestedStackSize;
+ Fn(UserData);
+}
+
+#endif
diff --git a/contrib/llvm/lib/System/TimeValue.cpp b/contrib/llvm/lib/Support/TimeValue.cpp
index cf4984c..1a0f7bc 100644
--- a/contrib/llvm/lib/System/TimeValue.cpp
+++ b/contrib/llvm/lib/Support/TimeValue.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/TimeValue.h"
+#include "llvm/Support/TimeValue.h"
#include "llvm/Config/config.h"
namespace llvm {
@@ -53,6 +53,5 @@ TimeValue::normalize( void ) {
#include "Unix/TimeValue.inc"
#endif
#ifdef LLVM_ON_WIN32
-#include "Win32/TimeValue.inc"
+#include "Windows/TimeValue.inc"
#endif
-
diff --git a/contrib/llvm/lib/Support/Timer.cpp b/contrib/llvm/lib/Support/Timer.cpp
index 44ee177..a9ed5ee 100644
--- a/contrib/llvm/lib/Support/Timer.cpp
+++ b/contrib/llvm/lib/Support/Timer.cpp
@@ -17,8 +17,8 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Format.h"
-#include "llvm/System/Mutex.h"
-#include "llvm/System/Process.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Process.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringMap.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Support/ToolOutputFile.cpp b/contrib/llvm/lib/Support/ToolOutputFile.cpp
new file mode 100644
index 0000000..e7ca927
--- /dev/null
+++ b/contrib/llvm/lib/Support/ToolOutputFile.cpp
@@ -0,0 +1,43 @@
+//===--- ToolOutputFile.cpp - Implement the tool_output_file class --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements the tool_output_file class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/Signals.h"
+using namespace llvm;
+
+tool_output_file::CleanupInstaller::CleanupInstaller(const char *filename)
+ : Filename(filename), Keep(false) {
+ // Arrange for the file to be deleted if the process is killed.
+ if (Filename != "-")
+ sys::RemoveFileOnSignal(sys::Path(Filename));
+}
+
+tool_output_file::CleanupInstaller::~CleanupInstaller() {
+ // Delete the file if the client hasn't told us not to.
+ if (!Keep && Filename != "-")
+ sys::Path(Filename).eraseFromDisk();
+
+ // Ok, the file is successfully written and closed, or deleted. There's no
+ // further need to clean it up on signals.
+ if (Filename != "-")
+ sys::DontRemoveFileOnSignal(sys::Path(Filename));
+}
+
+tool_output_file::tool_output_file(const char *filename, std::string &ErrorInfo,
+ unsigned Flags)
+ : Installer(filename),
+ OS(filename, ErrorInfo, Flags) {
+ // If open fails, no cleanup is needed.
+ if (!ErrorInfo.empty())
+ Installer.Keep = true;
+}
diff --git a/contrib/llvm/lib/Support/Triple.cpp b/contrib/llvm/lib/Support/Triple.cpp
index 3a95b65..36edf6e 100644
--- a/contrib/llvm/lib/Support/Triple.cpp
+++ b/contrib/llvm/lib/Support/Triple.cpp
@@ -10,6 +10,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Twine.h"
#include <cassert>
#include <cstring>
@@ -21,7 +22,7 @@ const char *Triple::getArchTypeName(ArchType Kind) {
switch (Kind) {
case InvalidArch: return "<invalid>";
case UnknownArch: return "unknown";
-
+
case alpha: return "alpha";
case arm: return "arm";
case bfin: return "bfin";
@@ -29,7 +30,6 @@ const char *Triple::getArchTypeName(ArchType Kind) {
case mips: return "mips";
case mipsel: return "mipsel";
case msp430: return "msp430";
- case pic16: return "pic16";
case ppc64: return "powerpc64";
case ppc: return "powerpc";
case sparc: return "sparc";
@@ -41,6 +41,7 @@ const char *Triple::getArchTypeName(ArchType Kind) {
case x86_64: return "x86_64";
case xcore: return "xcore";
case mblaze: return "mblaze";
+ case ptx: return "ptx";
}
return "<invalid>";
@@ -70,7 +71,10 @@ const char *Triple::getArchTypePrefix(ArchType Kind) {
case x86:
case x86_64: return "x86";
+
case xcore: return "xcore";
+
+ case ptx: return "ptx";
}
}
@@ -97,7 +101,6 @@ const char *Triple::getOSTypeName(OSType Kind) {
case Linux: return "linux";
case Lv2: return "lv2";
case MinGW32: return "mingw32";
- case MinGW64: return "mingw64";
case NetBSD: return "netbsd";
case OpenBSD: return "openbsd";
case Psp: return "psp";
@@ -110,6 +113,18 @@ const char *Triple::getOSTypeName(OSType Kind) {
return "<invalid>";
}
+const char *Triple::getEnvironmentTypeName(EnvironmentType Kind) {
+ switch (Kind) {
+ case UnknownEnvironment: return "unknown";
+ case GNU: return "gnu";
+ case GNUEABI: return "gnueabi";
+ case EABI: return "eabi";
+ case MachO: return "macho";
+ }
+
+ return "<invalid>";
+}
+
Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
if (Name == "alpha")
return alpha;
@@ -125,8 +140,6 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
return mipsel;
if (Name == "msp430")
return msp430;
- if (Name == "pic16")
- return pic16;
if (Name == "ppc64")
return ppc64;
if (Name == "ppc")
@@ -149,6 +162,8 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
return x86_64;
if (Name == "xcore")
return xcore;
+ if (Name == "ptx")
+ return ptx;
return UnknownArch;
}
@@ -187,6 +202,9 @@ Triple::ArchType Triple::getArchTypeForDarwinArchName(StringRef Str) {
Str == "armv6" || Str == "armv7")
return Triple::arm;
+ if (Str == "ptx")
+ return Triple::ptx;
+
return Triple::UnknownArch;
}
@@ -210,28 +228,29 @@ const char *Triple::getArchNameForAssembler() {
return "arm";
if (Str == "armv4t" || Str == "thumbv4t")
return "armv4t";
- if (Str == "armv5" || Str == "armv5e" || Str == "thumbv5" || Str == "thumbv5e")
+ if (Str == "armv5" || Str == "armv5e" || Str == "thumbv5"
+ || Str == "thumbv5e")
return "armv5";
if (Str == "armv6" || Str == "thumbv6")
return "armv6";
if (Str == "armv7" || Str == "thumbv7")
return "armv7";
+ if (Str == "ptx")
+ return "ptx";
return NULL;
}
//
Triple::ArchType Triple::ParseArch(StringRef ArchName) {
- if (ArchName.size() == 4 && ArchName[0] == 'i' &&
- ArchName[2] == '8' && ArchName[3] == '6' &&
+ if (ArchName.size() == 4 && ArchName[0] == 'i' &&
+ ArchName[2] == '8' && ArchName[3] == '6' &&
ArchName[1] - '3' < 6) // i[3-9]86
return x86;
else if (ArchName == "amd64" || ArchName == "x86_64")
return x86_64;
else if (ArchName == "bfin")
return bfin;
- else if (ArchName == "pic16")
- return pic16;
else if (ArchName == "powerpc")
return ppc;
else if ((ArchName == "powerpc64") || (ArchName == "ppu"))
@@ -266,6 +285,8 @@ Triple::ArchType Triple::ParseArch(StringRef ArchName) {
return tce;
else if (ArchName == "xcore")
return xcore;
+ else if (ArchName == "ptx")
+ return ptx;
else
return UnknownArch;
}
@@ -296,8 +317,6 @@ Triple::OSType Triple::ParseOS(StringRef OSName) {
return Lv2;
else if (OSName.startswith("mingw32"))
return MinGW32;
- else if (OSName.startswith("mingw64"))
- return MinGW64;
else if (OSName.startswith("netbsd"))
return NetBSD;
else if (OSName.startswith("openbsd"))
@@ -316,12 +335,26 @@ Triple::OSType Triple::ParseOS(StringRef OSName) {
return UnknownOS;
}
+Triple::EnvironmentType Triple::ParseEnvironment(StringRef EnvironmentName) {
+ if (EnvironmentName.startswith("eabi"))
+ return EABI;
+ else if (EnvironmentName.startswith("gnueabi"))
+ return GNUEABI;
+ else if (EnvironmentName.startswith("gnu"))
+ return GNU;
+ else if (EnvironmentName.startswith("macho"))
+ return MachO;
+ else
+ return UnknownEnvironment;
+}
+
void Triple::Parse() const {
assert(!isInitialized() && "Invalid parse call.");
Arch = ParseArch(getArchName());
Vendor = ParseVendor(getVendorName());
OS = ParseOS(getOSName());
+ Environment = ParseEnvironment(getEnvironmentName());
assert(isInitialized() && "Failed to initialize!");
}
@@ -348,24 +381,28 @@ std::string Triple::normalize(StringRef Str) {
OSType OS = UnknownOS;
if (Components.size() > 2)
OS = ParseOS(Components[2]);
+ EnvironmentType Environment = UnknownEnvironment;
+ if (Components.size() > 3)
+ Environment = ParseEnvironment(Components[3]);
// Note which components are already in their final position. These will not
// be moved.
- bool Found[3];
+ bool Found[4];
Found[0] = Arch != UnknownArch;
Found[1] = Vendor != UnknownVendor;
Found[2] = OS != UnknownOS;
+ Found[3] = Environment != UnknownEnvironment;
// If they are not there already, permute the components into their canonical
// positions by seeing if they parse as a valid architecture, and if so moving
// the component to the architecture position etc.
- for (unsigned Pos = 0; Pos != 3; ++Pos) {
+ for (unsigned Pos = 0; Pos != array_lengthof(Found); ++Pos) {
if (Found[Pos])
continue; // Already in the canonical position.
for (unsigned Idx = 0; Idx != Components.size(); ++Idx) {
// Do not reparse any components that already matched.
- if (Idx < 3 && Found[Idx])
+ if (Idx < array_lengthof(Found) && Found[Idx])
continue;
// Does this component parse as valid for the target position?
@@ -386,6 +423,10 @@ std::string Triple::normalize(StringRef Str) {
OS = ParseOS(Comp);
Valid = OS != UnknownOS;
break;
+ case 3:
+ Environment = ParseEnvironment(Comp);
+ Valid = Environment != UnknownEnvironment;
+ break;
}
if (!Valid)
continue; // Nope, try the next component.
@@ -404,7 +445,7 @@ std::string Triple::normalize(StringRef Str) {
// components to the right.
for (unsigned i = Pos; !CurrentComponent.empty(); ++i) {
// Skip over any fixed components.
- while (i < 3 && Found[i]) ++i;
+ while (i < array_lengthof(Found) && Found[i]) ++i;
// Place the component at the new position, getting the component
// that was at this position - it will be moved right.
std::swap(CurrentComponent, Components[i]);
@@ -416,22 +457,23 @@ std::string Triple::normalize(StringRef Str) {
do {
// Insert one empty component at Idx.
StringRef CurrentComponent(""); // The empty component.
- for (unsigned i = Idx; i < Components.size(); ++i) {
- // Skip over any fixed components.
- while (i < 3 && Found[i]) ++i;
+ for (unsigned i = Idx; i < Components.size();) {
// Place the component at the new position, getting the component
// that was at this position - it will be moved right.
std::swap(CurrentComponent, Components[i]);
// If it was placed on top of an empty component then we are done.
if (CurrentComponent.empty())
break;
+ // Advance to the next component, skipping any fixed components.
+ while (++i < array_lengthof(Found) && Found[i])
+ ;
}
// The last component was pushed off the end - append it.
if (!CurrentComponent.empty())
Components.push_back(CurrentComponent);
// Advance Idx to the component's new position.
- while (++Idx < 3 && Found[Idx]) {}
+ while (++Idx < array_lengthof(Found) && Found[Idx]) {}
} while (Idx < Pos); // Add more until the final position is reached.
}
assert(Pos < Components.size() && Components[Pos] == Comp &&
@@ -482,17 +524,17 @@ StringRef Triple::getOSAndEnvironmentName() const {
static unsigned EatNumber(StringRef &Str) {
assert(!Str.empty() && Str[0] >= '0' && Str[0] <= '9' && "Not a number");
unsigned Result = Str[0]-'0';
-
+
// Eat the digit.
Str = Str.substr(1);
-
+
// Handle "darwin11".
if (Result == 1 && !Str.empty() && Str[0] >= '0' && Str[0] <= '9') {
Result = Result*10 + (Str[0] - '0');
// Eat the digit.
Str = Str.substr(1);
}
-
+
return Result;
}
@@ -505,10 +547,10 @@ void Triple::getDarwinNumber(unsigned &Maj, unsigned &Min,
assert(getOS() == Darwin && "Not a darwin target triple!");
StringRef OSName = getOSName();
assert(OSName.startswith("darwin") && "Unknown darwin target triple!");
-
+
// Strip off "darwin".
OSName = OSName.substr(6);
-
+
Maj = Min = Revision = 0;
if (OSName.empty() || OSName[0] < '0' || OSName[0] > '9')
@@ -517,27 +559,27 @@ void Triple::getDarwinNumber(unsigned &Maj, unsigned &Min,
// The major version is the first digit.
Maj = EatNumber(OSName);
if (OSName.empty()) return;
-
+
// Handle minor version: 10.4.9 -> darwin8.9.
if (OSName[0] != '.')
return;
-
+
// Eat the '.'.
OSName = OSName.substr(1);
if (OSName.empty() || OSName[0] < '0' || OSName[0] > '9')
return;
-
+
Min = EatNumber(OSName);
if (OSName.empty()) return;
// Handle revision darwin8.9.1
if (OSName[0] != '.')
return;
-
+
// Eat the '.'.
OSName = OSName.substr(1);
-
+
if (OSName.empty() || OSName[0] < '0' || OSName[0] > '9')
return;
@@ -561,6 +603,10 @@ void Triple::setOS(OSType Kind) {
setOSName(getOSTypeName(Kind));
}
+void Triple::setEnvironment(EnvironmentType Kind) {
+ setEnvironmentName(getEnvironmentTypeName(Kind));
+}
+
void Triple::setArchName(StringRef Str) {
// Work around a miscompilation bug for Twines in gcc 4.0.3.
SmallString<64> Triple;
diff --git a/contrib/llvm/lib/Support/Twine.cpp b/contrib/llvm/lib/Support/Twine.cpp
index b3ea013..75cea29 100644
--- a/contrib/llvm/lib/Support/Twine.cpp
+++ b/contrib/llvm/lib/Support/Twine.cpp
@@ -30,22 +30,42 @@ StringRef Twine::toStringRef(SmallVectorImpl<char> &Out) const {
return StringRef(Out.data(), Out.size());
}
-void Twine::printOneChild(raw_ostream &OS, const void *Ptr,
+StringRef Twine::toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const {
+ if (isUnary()) {
+ switch (getLHSKind()) {
+ case CStringKind:
+ // Already null terminated, yay!
+ return StringRef(static_cast<const char*>(LHS));
+ case StdStringKind: {
+ const std::string *str = static_cast<const std::string*>(LHS);
+ return StringRef(str->c_str(), str->size());
+ }
+ default:
+ break;
+ }
+ }
+ toVector(Out);
+ Out.push_back(0);
+ Out.pop_back();
+ return StringRef(Out.data(), Out.size());
+}
+
+void Twine::printOneChild(raw_ostream &OS, const void *Ptr,
NodeKind Kind) const {
switch (Kind) {
case Twine::NullKind: break;
case Twine::EmptyKind: break;
case Twine::TwineKind:
- static_cast<const Twine*>(Ptr)->print(OS);
+ static_cast<const Twine*>(Ptr)->print(OS);
break;
- case Twine::CStringKind:
- OS << static_cast<const char*>(Ptr);
+ case Twine::CStringKind:
+ OS << static_cast<const char*>(Ptr);
break;
case Twine::StdStringKind:
- OS << *static_cast<const std::string*>(Ptr);
+ OS << *static_cast<const std::string*>(Ptr);
break;
case Twine::StringRefKind:
- OS << *static_cast<const StringRef*>(Ptr);
+ OS << *static_cast<const StringRef*>(Ptr);
break;
case Twine::DecUIKind:
OS << (unsigned)(uintptr_t)Ptr;
@@ -71,7 +91,7 @@ void Twine::printOneChild(raw_ostream &OS, const void *Ptr,
}
}
-void Twine::printOneChildRepr(raw_ostream &OS, const void *Ptr,
+void Twine::printOneChildRepr(raw_ostream &OS, const void *Ptr,
NodeKind Kind) const {
switch (Kind) {
case Twine::NullKind:
diff --git a/contrib/llvm/lib/System/Unix/Host.inc b/contrib/llvm/lib/Support/Unix/Host.inc
index c76d6a4..ed74b67 100644
--- a/contrib/llvm/lib/System/Unix/Host.inc
+++ b/contrib/llvm/lib/Support/Unix/Host.inc
@@ -1,4 +1,4 @@
- //===- llvm/System/Unix/Host.inc -------------------------------*- C++ -*-===//
+ //===- llvm/Support/Unix/Host.inc -------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,6 +20,7 @@
#include "llvm/ADT/StringRef.h"
#include "Unix.h"
#include <sys/utsname.h>
+#include <cctype>
#include <string>
using namespace llvm;
@@ -39,7 +40,7 @@ std::string sys::getHostTriple() {
StringRef HostTripleString(LLVM_HOSTTRIPLE);
std::pair<StringRef, StringRef> ArchSplit = HostTripleString.split('-');
-
+
// Normalize the arch, since the host triple may not actually match the host.
std::string Arch = ArchSplit.first;
@@ -77,16 +78,16 @@ std::string sys::getHostTriple() {
Triple += ArchSplit.second;
// Force i<N>86 to i386.
- if (Triple[0] == 'i' && isdigit(Triple[1]) &&
+ if (Triple[0] == 'i' && isdigit(Triple[1]) &&
Triple[2] == '8' && Triple[3] == '6')
Triple[1] = '3';
// On darwin, we want to update the version to match that of the
- // host.
+ // host.
std::string::size_type DarwinDashIdx = Triple.find("-darwin");
if (DarwinDashIdx != std::string::npos) {
Triple.resize(DarwinDashIdx + strlen("-darwin"));
-
+
// Only add the major part of the os version.
std::string Version = getOSVersion();
Triple += Version.substr(0, Version.find('.'));
diff --git a/contrib/llvm/lib/System/Unix/Memory.inc b/contrib/llvm/lib/Support/Unix/Memory.inc
index 1b038f9..4312d67 100644
--- a/contrib/llvm/lib/System/Unix/Memory.inc
+++ b/contrib/llvm/lib/Support/Unix/Memory.inc
@@ -1,10 +1,10 @@
//===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines some functions for various memory management utilities.
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#include "Unix.h"
-#include "llvm/System/DataTypes.h"
-#include "llvm/System/Process.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Process.h"
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
@@ -28,7 +28,7 @@
/// to emit code to the memory then jump to it. Getting this type of memory
/// is very OS specific.
///
-llvm::sys::MemoryBlock
+llvm::sys::MemoryBlock
llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
std::string *ErrMsg) {
if (NumBytes == 0) return MemoryBlock();
@@ -54,7 +54,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
#endif
;
- void* start = NearBlock ? (unsigned char*)NearBlock->base() +
+ void* start = NearBlock ? (unsigned char*)NearBlock->base() +
NearBlock->size() : 0;
#if defined(__APPLE__) && defined(__arm__)
diff --git a/contrib/llvm/lib/System/Unix/Mutex.inc b/contrib/llvm/lib/Support/Unix/Mutex.inc
index 4a5e28d..fe6b170 100644
--- a/contrib/llvm/lib/System/Unix/Mutex.inc
+++ b/contrib/llvm/lib/Support/Unix/Mutex.inc
@@ -1,10 +1,10 @@
-//===- llvm/System/Unix/Mutex.inc - Unix Mutex Implementation ---*- C++ -*-===//
-//
+//===- llvm/Support/Unix/Mutex.inc - Unix Mutex Implementation ---*- C++ -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file implements the Unix specific (non-pthread) Mutex class.
@@ -28,13 +28,13 @@ MutexImpl::~MutexImpl()
{
}
-bool
+bool
MutexImpl::release()
{
return true;
}
-bool
+bool
MutexImpl::tryacquire( void )
{
return true;
diff --git a/contrib/llvm/lib/System/Unix/Path.inc b/contrib/llvm/lib/Support/Unix/Path.inc
index 47e4d1a..0f6e800 100644
--- a/contrib/llvm/lib/System/Unix/Path.inc
+++ b/contrib/llvm/lib/Support/Unix/Path.inc
@@ -1,4 +1,4 @@
-//===- llvm/System/Unix/Path.cpp - Unix Path Implementation -----*- C++ -*-===//
+//===- llvm/Support/Unix/Path.cpp - Unix Path Implementation -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -78,6 +78,10 @@ using namespace sys;
const char sys::PathSeparator = ':';
+StringRef Path::GetEXESuffix() {
+ return StringRef();
+}
+
Path::Path(StringRef p)
: path(p) {}
@@ -92,10 +96,12 @@ Path::operator=(StringRef that) {
bool
Path::isValid() const {
- // Check some obvious things
- if (path.empty())
- return false;
- return path.length() < MAXPATHLEN;
+ // Empty paths are considered invalid here.
+ // This code doesn't check MAXPATHLEN because there's no need. Nothing in
+ // LLVM manipulates Paths with fixed-sizes arrays, and if the OS can't
+ // handle names longer than some limit, it'll report this on demand using
+ // ENAMETOLONG.
+ return !path.empty();
}
bool
@@ -113,18 +119,6 @@ Path::isAbsolute() const {
return path[0] == '/';
}
-void Path::makeAbsolute() {
- if (isAbsolute())
- return;
-
- Path CWD = Path::GetCurrentDirectory();
- assert(CWD.isAbsolute() && "GetCurrentDirectory returned relative path!");
-
- CWD.appendComponent(path);
-
- path = CWD.str();
-}
-
Path
Path::GetRootDirectory() {
Path result;
@@ -137,25 +131,20 @@ Path::GetTemporaryDirectory(std::string *ErrMsg) {
#if defined(HAVE_MKDTEMP)
// The best way is with mkdtemp but that's not available on many systems,
// Linux and FreeBSD have it. Others probably won't.
- char pathname[MAXPATHLEN];
- strcpy(pathname,"/tmp/llvm_XXXXXX");
+ char pathname[] = "/tmp/llvm_XXXXXX";
if (0 == mkdtemp(pathname)) {
MakeErrMsg(ErrMsg,
- std::string(pathname) + ": can't create temporary directory");
+ std::string(pathname) + ": can't create temporary directory");
return Path();
}
- Path result;
- result.set(pathname);
- assert(result.isValid() && "mkdtemp didn't create a valid pathname!");
- return result;
+ return Path(pathname);
#elif defined(HAVE_MKSTEMP)
// If no mkdtemp is available, mkstemp can be used to create a temporary file
// which is then removed and created as a directory. We prefer this over
// mktemp because of mktemp's inherent security and threading risks. We still
// have a slight race condition from the time the temporary file is created to
// the time it is re-created as a directoy.
- char pathname[MAXPATHLEN];
- strcpy(pathname, "/tmp/llvm_XXXXXX");
+ char pathname[] = "/tmp/llvm_XXXXXX";
int fd = 0;
if (-1 == (fd = mkstemp(pathname))) {
MakeErrMsg(ErrMsg,
@@ -169,18 +158,14 @@ Path::GetTemporaryDirectory(std::string *ErrMsg) {
std::string(pathname) + ": can't create temporary directory");
return Path();
}
- Path result;
- result.set(pathname);
- assert(result.isValid() && "mkstemp didn't create a valid pathname!");
- return result;
+ return Path(pathname);
#elif defined(HAVE_MKTEMP)
// If a system doesn't have mkdtemp(3) or mkstemp(3) but it does have
// mktemp(3) then we'll assume that system (e.g. AIX) has a reasonable
// implementation of mktemp(3) and doesn't follow BSD 4.3's lead of replacing
// the XXXXXX with the pid of the process and a letter. That leads to only
// twenty six temporary files that can be generated.
- char pathname[MAXPATHLEN];
- strcpy(pathname, "/tmp/llvm_XXXXXX");
+ char pathname[] = "/tmp/llvm_XXXXXX";
char *TmpName = ::mktemp(pathname);
if (TmpName == 0) {
MakeErrMsg(ErrMsg,
@@ -192,10 +177,7 @@ Path::GetTemporaryDirectory(std::string *ErrMsg) {
std::string(TmpName) + ": can't create temporary directory");
return Path();
}
- Path result;
- result.set(TmpName);
- assert(result.isValid() && "mktemp didn't create a valid pathname!");
- return result;
+ return Path(TmpName);
#else
// This is the worst case implementation. tempnam(3) leaks memory unless its
// on an SVID2 (or later) system. On BSD 4.3 it leaks. tmpnam(3) has thread
@@ -216,10 +198,7 @@ Path::GetTemporaryDirectory(std::string *ErrMsg) {
std::string(pathname) + ": can't create temporary directory");
return Path();
}
- Path result;
- result.set(pathname);
- assert(result.isValid() && "mkstemp didn't create a valid pathname!");
- return result;
+ return Path(pathname);
#endif
}
@@ -263,12 +242,11 @@ Path::GetLLVMDefaultConfigDir() {
Path
Path::GetUserHomeDirectory() {
const char* home = getenv("HOME");
- if (home) {
- Path result;
- if (result.set(home))
- return result;
- }
- return GetRootDirectory();
+ Path result;
+ if (home && result.set(home))
+ return result;
+ result.set("/");
+ return result;
}
Path
@@ -282,7 +260,8 @@ Path::GetCurrentDirectory() {
return Path(pathname);
}
-#if defined(__FreeBSD__) || defined (__NetBSD__) || defined(__minix)
+#if defined(__FreeBSD__) || defined (__NetBSD__) || \
+ defined(__OpenBSD__) || defined(__minix)
static int
test_dir(char buf[PATH_MAX], char ret[PATH_MAX],
const char *dir, const char *bin)
@@ -348,18 +327,19 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) {
if (_NSGetExecutablePath(exe_path, &size) == 0) {
char link_path[MAXPATHLEN];
if (realpath(exe_path, link_path))
- return Path(std::string(link_path));
+ return Path(link_path);
}
-#elif defined(__FreeBSD__) || defined (__NetBSD__) || defined(__minix)
+#elif defined(__FreeBSD__) || defined (__NetBSD__) || \
+ defined(__OpenBSD__) || defined(__minix)
char exe_path[PATH_MAX];
if (getprogpath(exe_path, argv0) != NULL)
- return Path(std::string(exe_path));
+ return Path(exe_path);
#elif defined(__linux__) || defined(__CYGWIN__)
char exe_path[MAXPATHLEN];
ssize_t len = readlink("/proc/self/exe", exe_path, sizeof(exe_path));
if (len >= 0)
- return Path(std::string(exe_path, len));
+ return Path(StringRef(exe_path, len));
#elif defined(HAVE_DLFCN_H)
// Use dladdr to get executable path if available.
Dl_info DLInfo;
@@ -371,7 +351,9 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) {
// the actual executable.
char link_path[MAXPATHLEN];
if (realpath(DLInfo.dli_fname, link_path))
- return Path(std::string(link_path));
+ return Path(link_path);
+#else
+#error GetMainExecutable is not implemented on this host yet.
#endif
return Path();
}
@@ -437,9 +419,18 @@ Path::isDirectory() const {
struct stat buf;
if (0 != stat(path.c_str(), &buf))
return false;
- return buf.st_mode & S_IFDIR ? true : false;
+ return ((buf.st_mode & S_IFMT) == S_IFDIR) ? true : false;
+}
+
+bool
+Path::isSymLink() const {
+ struct stat buf;
+ if (0 != lstat(path.c_str(), &buf))
+ return false;
+ return S_ISLNK(buf.st_mode);
}
+
bool
Path::canRead() const {
return 0 == access(path.c_str(), R_OK);
@@ -590,12 +581,7 @@ bool
Path::set(StringRef a_path) {
if (a_path.empty())
return false;
- std::string save(path);
path = a_path;
- if (!isValid()) {
- path = save;
- return false;
- }
return true;
}
@@ -603,14 +589,9 @@ bool
Path::appendComponent(StringRef name) {
if (name.empty())
return false;
- std::string save(path);
if (!lastIsSlash(path))
path += '/';
path += name;
- if (!isValid()) {
- path = save;
- return false;
- }
return true;
}
@@ -632,20 +613,7 @@ Path::eraseComponent() {
}
bool
-Path::appendSuffix(StringRef suffix) {
- std::string save(path);
- path.append(".");
- path.append(suffix);
- if (!isValid()) {
- path = save;
- return false;
- }
- return true;
-}
-
-bool
Path::eraseSuffix() {
- std::string save = path;
size_t dotpos = path.rfind('.',path.size());
size_t slashpos = path.rfind('/',path.size());
if (dotpos != std::string::npos) {
@@ -654,8 +622,6 @@ Path::eraseSuffix() {
return true;
}
}
- if (!isValid())
- path = save;
return false;
}
@@ -690,8 +656,7 @@ static bool createDirectoryHelper(char* beg, char* end, bool create_parents) {
bool
Path::createDirectoryOnDisk( bool create_parents, std::string* ErrMsg ) {
// Get a writeable copy of the path name
- char pathname[MAXPATHLEN];
- path.copy(pathname,MAXPATHLEN);
+ std::string pathname(path);
// Null-terminate the last component
size_t lastchar = path.length() - 1 ;
@@ -699,11 +664,10 @@ Path::createDirectoryOnDisk( bool create_parents, std::string* ErrMsg ) {
if (pathname[lastchar] != '/')
++lastchar;
- pathname[lastchar] = 0;
+ pathname[lastchar] = '\0';
- if (createDirectoryHelper(pathname, pathname+lastchar, create_parents))
- return MakeErrMsg(ErrMsg,
- std::string(pathname) + ": can't create directory");
+ if (createDirectoryHelper(&pathname[0], &pathname[lastchar], create_parents))
+ return MakeErrMsg(ErrMsg, pathname + ": can't create directory");
return false;
}
@@ -768,17 +732,15 @@ Path::eraseFromDisk(bool remove_contents, std::string *ErrStr) const {
}
// Otherwise, try to just remove the one directory.
- char pathname[MAXPATHLEN];
- path.copy(pathname, MAXPATHLEN);
+ std::string pathname(path);
size_t lastchar = path.length() - 1;
if (pathname[lastchar] == '/')
- pathname[lastchar] = 0;
+ pathname[lastchar] = '\0';
else
- pathname[lastchar+1] = 0;
+ pathname[lastchar+1] = '\0';
- if (rmdir(pathname) != 0)
- return MakeErrMsg(ErrStr,
- std::string(pathname) + ": can't erase directory");
+ if (rmdir(pathname.c_str()) != 0)
+ return MakeErrMsg(ErrStr, pathname + ": can't erase directory");
return false;
}
@@ -851,7 +813,8 @@ sys::CopyFile(const sys::Path &Dest, const sys::Path &Src, std::string* ErrMsg){
bool
Path::makeUnique(bool reuse_current, std::string* ErrMsg) {
- if (reuse_current && !exists())
+ bool Exists;
+ if (reuse_current && (fs::exists(path, Exists) || !Exists))
return false; // File doesn't exist already, just use it!
// Append an XXXXXX pattern to the end of the file for use with mkstemp,
@@ -862,7 +825,8 @@ Path::makeUnique(bool reuse_current, std::string* ErrMsg) {
Buf.resize(path.size()+8);
char *FNBuffer = &Buf[0];
path.copy(FNBuffer,path.size());
- if (isDirectory())
+ bool isdir;
+ if (!fs::is_directory(path, isdir) && isdir)
strcpy(FNBuffer+path.size(), "/XXXXXX");
else
strcpy(FNBuffer+path.size(), "-XXXXXX");
diff --git a/contrib/llvm/lib/Support/Unix/PathV2.inc b/contrib/llvm/lib/Support/Unix/PathV2.inc
new file mode 100644
index 0000000..03ff283
--- /dev/null
+++ b/contrib/llvm/lib/Support/Unix/PathV2.inc
@@ -0,0 +1,507 @@
+//===- llvm/Support/Unix/PathV2.cpp - Unix Path Implementation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Unix specific implementation of the PathV2 API.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+//=== WARNING: Implementation here must contain only generic UNIX code that
+//=== is guaranteed to work on *all* UNIX variants.
+//===----------------------------------------------------------------------===//
+
+#include "Unix.h"
+#if HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+#if HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#if HAVE_DIRENT_H
+# include <dirent.h>
+# define NAMLEN(dirent) strlen((dirent)->d_name)
+#else
+# define dirent direct
+# define NAMLEN(dirent) (dirent)->d_namlen
+# if HAVE_SYS_NDIR_H
+# include <sys/ndir.h>
+# endif
+# if HAVE_SYS_DIR_H
+# include <sys/dir.h>
+# endif
+# if HAVE_NDIR_H
+# include <ndir.h>
+# endif
+#endif
+#if HAVE_STDIO_H
+#include <stdio.h>
+#endif
+
+using namespace llvm;
+
+namespace {
+ /// This class automatically closes the given file descriptor when it goes out
+ /// of scope. You can take back explicit ownership of the file descriptor by
+ /// calling take(). The destructor does not verify that close was successful.
+ /// Therefore, never allow this class to call close on a file descriptor that
+ /// has been read from or written to.
+ struct AutoFD {
+ int FileDescriptor;
+
+ AutoFD(int fd) : FileDescriptor(fd) {}
+ ~AutoFD() {
+ if (FileDescriptor >= 0)
+ ::close(FileDescriptor);
+ }
+
+ int take() {
+ int ret = FileDescriptor;
+ FileDescriptor = -1;
+ return ret;
+ }
+
+ operator int() const {return FileDescriptor;}
+ };
+
+ error_code TempDir(SmallVectorImpl<char> &result) {
+ // FIXME: Don't use TMPDIR if program is SUID or SGID enabled.
+ const char *dir = 0;
+ (dir = std::getenv("TMPDIR" )) ||
+ (dir = std::getenv("TMP" )) ||
+ (dir = std::getenv("TEMP" )) ||
+ (dir = std::getenv("TEMPDIR")) ||
+#ifdef P_tmpdir
+ (dir = P_tmpdir) ||
+#endif
+ (dir = "/tmp");
+
+ result.clear();
+ StringRef d(dir);
+ result.append(d.begin(), d.end());
+ return success;
+ }
+}
+
+namespace llvm {
+namespace sys {
+namespace fs {
+
+error_code current_path(SmallVectorImpl<char> &result) {
+ result.reserve(MAXPATHLEN);
+
+ while (true) {
+ if (::getcwd(result.data(), result.capacity()) == 0) {
+ // See if there was a real error.
+ if (errno != errc::not_enough_memory)
+ return error_code(errno, system_category());
+ // Otherwise there just wasn't enough space.
+ result.reserve(result.capacity() * 2);
+ } else
+ break;
+ }
+
+ result.set_size(strlen(result.data()));
+ return success;
+}
+
+error_code copy_file(const Twine &from, const Twine &to, copy_option copt) {
+ // Get arguments.
+ SmallString<128> from_storage;
+ SmallString<128> to_storage;
+ StringRef f = from.toNullTerminatedStringRef(from_storage);
+ StringRef t = to.toNullTerminatedStringRef(to_storage);
+
+ const size_t buf_sz = 32768;
+ char buffer[buf_sz];
+ int from_file = -1, to_file = -1;
+
+ // Open from.
+ if ((from_file = ::open(f.begin(), O_RDONLY)) < 0)
+ return error_code(errno, system_category());
+ AutoFD from_fd(from_file);
+
+ // Stat from.
+ struct stat from_stat;
+ if (::stat(f.begin(), &from_stat) != 0)
+ return error_code(errno, system_category());
+
+ // Setup to flags.
+ int to_flags = O_CREAT | O_WRONLY;
+ if (copt == copy_option::fail_if_exists)
+ to_flags |= O_EXCL;
+
+ // Open to.
+ if ((to_file = ::open(t.begin(), to_flags, from_stat.st_mode)) < 0)
+ return error_code(errno, system_category());
+ AutoFD to_fd(to_file);
+
+ // Copy!
+ ssize_t sz, sz_read = 1, sz_write;
+ while (sz_read > 0 &&
+ (sz_read = ::read(from_fd, buffer, buf_sz)) > 0) {
+ // Allow for partial writes - see Advanced Unix Programming (2nd Ed.),
+ // Marc Rochkind, Addison-Wesley, 2004, page 94
+ sz_write = 0;
+ do {
+ if ((sz = ::write(to_fd, buffer + sz_write, sz_read - sz_write)) < 0) {
+ sz_read = sz; // cause read loop termination.
+ break; // error.
+ }
+ sz_write += sz;
+ } while (sz_write < sz_read);
+ }
+
+ // After all the file operations above the return value of close actually
+ // matters.
+ if (::close(from_fd.take()) < 0) sz_read = -1;
+ if (::close(to_fd.take()) < 0) sz_read = -1;
+
+ // Check for errors.
+ if (sz_read < 0)
+ return error_code(errno, system_category());
+
+ return success;
+}
+
+error_code create_directory(const Twine &path, bool &existed) {
+ SmallString<128> path_storage;
+ StringRef p = path.toNullTerminatedStringRef(path_storage);
+
+ if (::mkdir(p.begin(), S_IRWXU | S_IRWXG) == -1) {
+ if (errno != errc::file_exists)
+ return error_code(errno, system_category());
+ existed = true;
+ } else
+ existed = false;
+
+ return success;
+}
+
+error_code create_hard_link(const Twine &to, const Twine &from) {
+ // Get arguments.
+ SmallString<128> from_storage;
+ SmallString<128> to_storage;
+ StringRef f = from.toNullTerminatedStringRef(from_storage);
+ StringRef t = to.toNullTerminatedStringRef(to_storage);
+
+ if (::link(t.begin(), f.begin()) == -1)
+ return error_code(errno, system_category());
+
+ return success;
+}
+
+error_code create_symlink(const Twine &to, const Twine &from) {
+ // Get arguments.
+ SmallString<128> from_storage;
+ SmallString<128> to_storage;
+ StringRef f = from.toNullTerminatedStringRef(from_storage);
+ StringRef t = to.toNullTerminatedStringRef(to_storage);
+
+ if (::symlink(t.begin(), f.begin()) == -1)
+ return error_code(errno, system_category());
+
+ return success;
+}
+
+error_code remove(const Twine &path, bool &existed) {
+ SmallString<128> path_storage;
+ StringRef p = path.toNullTerminatedStringRef(path_storage);
+
+ if (::remove(p.begin()) == -1) {
+ if (errno != errc::no_such_file_or_directory)
+ return error_code(errno, system_category());
+ existed = false;
+ } else
+ existed = true;
+
+ return success;
+}
+
+error_code rename(const Twine &from, const Twine &to) {
+ // Get arguments.
+ SmallString<128> from_storage;
+ SmallString<128> to_storage;
+ StringRef f = from.toNullTerminatedStringRef(from_storage);
+ StringRef t = to.toNullTerminatedStringRef(to_storage);
+
+ if (::rename(f.begin(), t.begin()) == -1) {
+ // If it's a cross device link, copy then delete, otherwise return the error
+ if (errno == EXDEV) {
+ if (error_code ec = copy_file(from, to, copy_option::overwrite_if_exists))
+ return ec;
+ bool Existed;
+ if (error_code ec = remove(from, Existed))
+ return ec;
+ } else
+ return error_code(errno, system_category());
+ }
+
+ return success;
+}
+
+error_code resize_file(const Twine &path, uint64_t size) {
+ SmallString<128> path_storage;
+ StringRef p = path.toNullTerminatedStringRef(path_storage);
+
+ if (::truncate(p.begin(), size) == -1)
+ return error_code(errno, system_category());
+
+ return success;
+}
+
+error_code exists(const Twine &path, bool &result) {
+ SmallString<128> path_storage;
+ StringRef p = path.toNullTerminatedStringRef(path_storage);
+
+ struct stat status;
+ if (::stat(p.begin(), &status) == -1) {
+ if (errno != errc::no_such_file_or_directory)
+ return error_code(errno, system_category());
+ result = false;
+ } else
+ result = true;
+
+ return success;
+}
+
+error_code equivalent(const Twine &A, const Twine &B, bool &result) {
+ // Get arguments.
+ SmallString<128> a_storage;
+ SmallString<128> b_storage;
+ StringRef a = A.toNullTerminatedStringRef(a_storage);
+ StringRef b = B.toNullTerminatedStringRef(b_storage);
+
+ struct stat stat_a, stat_b;
+ int error_b = ::stat(b.begin(), &stat_b);
+ int error_a = ::stat(a.begin(), &stat_a);
+
+ // If both are invalid, it's an error. If only one is, the result is false.
+ if (error_a != 0 || error_b != 0) {
+ if (error_a == error_b)
+ return error_code(errno, system_category());
+ result = false;
+ } else {
+ result =
+ stat_a.st_dev == stat_b.st_dev &&
+ stat_a.st_ino == stat_b.st_ino;
+ }
+
+ return success;
+}
+
+error_code file_size(const Twine &path, uint64_t &result) {
+ SmallString<128> path_storage;
+ StringRef p = path.toNullTerminatedStringRef(path_storage);
+
+ struct stat status;
+ if (::stat(p.begin(), &status) == -1)
+ return error_code(errno, system_category());
+ if (!S_ISREG(status.st_mode))
+ return make_error_code(errc::operation_not_permitted);
+
+ result = status.st_size;
+ return success;
+}
+
+error_code status(const Twine &path, file_status &result) {
+ SmallString<128> path_storage;
+ StringRef p = path.toNullTerminatedStringRef(path_storage);
+
+ struct stat status;
+ if (::stat(p.begin(), &status) != 0) {
+ error_code ec(errno, system_category());
+ if (ec == errc::no_such_file_or_directory)
+ result = file_status(file_type::file_not_found);
+ else
+ result = file_status(file_type::status_error);
+ return ec;
+ }
+
+ if (S_ISDIR(status.st_mode))
+ result = file_status(file_type::directory_file);
+ else if (S_ISREG(status.st_mode))
+ result = file_status(file_type::regular_file);
+ else if (S_ISBLK(status.st_mode))
+ result = file_status(file_type::block_file);
+ else if (S_ISCHR(status.st_mode))
+ result = file_status(file_type::character_file);
+ else if (S_ISFIFO(status.st_mode))
+ result = file_status(file_type::fifo_file);
+ else if (S_ISSOCK(status.st_mode))
+ result = file_status(file_type::socket_file);
+ else
+ result = file_status(file_type::type_unknown);
+
+ return success;
+}
+
+error_code unique_file(const Twine &model, int &result_fd,
+ SmallVectorImpl<char> &result_path) {
+ SmallString<128> Model;
+ model.toVector(Model);
+ // Null terminate.
+ Model.c_str();
+
+ // Make model absolute by prepending a temp directory if it's not already.
+ bool absolute = path::is_absolute(Twine(Model));
+ if (!absolute) {
+ SmallString<128> TDir;
+ if (error_code ec = TempDir(TDir)) return ec;
+ path::append(TDir, Twine(Model));
+ Model.swap(TDir);
+ }
+
+ // Replace '%' with random chars. From here on, DO NOT modify model. It may be
+ // needed if the randomly chosen path already exists.
+ SmallString<128> RandomPath;
+ RandomPath.reserve(Model.size() + 1);
+ ::srand(::time(NULL));
+
+retry_random_path:
+ // This is opened here instead of above to make it easier to track when to
+ // close it. Collisions should be rare enough for the possible extra syscalls
+ // not to matter.
+ FILE *RandomSource = ::fopen("/dev/urandom", "r");
+ RandomPath.set_size(0);
+ for (SmallVectorImpl<char>::const_iterator i = Model.begin(),
+ e = Model.end(); i != e; ++i) {
+ if (*i == '%') {
+ char val = 0;
+ if (RandomSource)
+ val = fgetc(RandomSource);
+ else
+ val = ::rand();
+ RandomPath.push_back("0123456789abcdef"[val & 15]);
+ } else
+ RandomPath.push_back(*i);
+ }
+
+ if (RandomSource)
+ ::fclose(RandomSource);
+
+ // Try to open + create the file.
+rety_open_create:
+ int RandomFD = ::open(RandomPath.c_str(), O_RDWR | O_CREAT | O_EXCL, 0600);
+ if (RandomFD == -1) {
+ // If the file existed, try again, otherwise, error.
+ if (errno == errc::file_exists)
+ goto retry_random_path;
+ // The path prefix doesn't exist.
+ if (errno == errc::no_such_file_or_directory) {
+ StringRef p(RandomPath.begin(), RandomPath.size());
+ SmallString<64> dir_to_create;
+ for (path::const_iterator i = path::begin(p),
+ e = --path::end(p); i != e; ++i) {
+ path::append(dir_to_create, *i);
+ bool Exists;
+ if (error_code ec = exists(Twine(dir_to_create), Exists)) return ec;
+ if (!Exists) {
+ // Don't try to create network paths.
+ if (i->size() > 2 && (*i)[0] == '/' &&
+ (*i)[1] == '/' &&
+ (*i)[2] != '/')
+ return make_error_code(errc::no_such_file_or_directory);
+ if (::mkdir(dir_to_create.c_str(), 0700) == -1)
+ return error_code(errno, system_category());
+ }
+ }
+ goto rety_open_create;
+ }
+ return error_code(errno, system_category());
+ }
+
+ // Make the path absolute.
+ char real_path_buff[PATH_MAX + 1];
+ if (realpath(RandomPath.c_str(), real_path_buff) == NULL) {
+ int error = errno;
+ ::close(RandomFD);
+ ::unlink(RandomPath.c_str());
+ return error_code(error, system_category());
+ }
+
+ result_path.clear();
+ StringRef d(real_path_buff);
+ result_path.append(d.begin(), d.end());
+
+ result_fd = RandomFD;
+ return success;
+}
+
+error_code directory_iterator_construct(directory_iterator &it, StringRef path){
+ SmallString<128> path_null(path);
+ DIR *directory = ::opendir(path_null.c_str());
+ if (directory == 0)
+ return error_code(errno, system_category());
+
+ it.IterationHandle = reinterpret_cast<intptr_t>(directory);
+ // Add something for replace_filename to replace.
+ path::append(path_null, ".");
+ it.CurrentEntry = directory_entry(path_null.str());
+ return directory_iterator_increment(it);
+}
+
+error_code directory_iterator_destruct(directory_iterator& it) {
+ if (it.IterationHandle)
+ ::closedir(reinterpret_cast<DIR *>(it.IterationHandle));
+ it.IterationHandle = 0;
+ it.CurrentEntry = directory_entry();
+ return success;
+}
+
+error_code directory_iterator_increment(directory_iterator& it) {
+ errno = 0;
+ dirent *cur_dir = ::readdir(reinterpret_cast<DIR *>(it.IterationHandle));
+ if (cur_dir == 0 && errno != 0) {
+ return error_code(errno, system_category());
+ } else if (cur_dir != 0) {
+ StringRef name(cur_dir->d_name, NAMLEN(cur_dir));
+ if ((name.size() == 1 && name[0] == '.') ||
+ (name.size() == 2 && name[0] == '.' && name[1] == '.'))
+ return directory_iterator_increment(it);
+ it.CurrentEntry.replace_filename(name);
+ } else
+ return directory_iterator_destruct(it);
+
+ return success;
+}
+
+error_code get_magic(const Twine &path, uint32_t len,
+ SmallVectorImpl<char> &result) {
+ SmallString<128> PathStorage;
+ StringRef Path = path.toNullTerminatedStringRef(PathStorage);
+ result.set_size(0);
+
+ // Open path.
+ std::FILE *file = std::fopen(Path.data(), "rb");
+ if (file == 0)
+ return error_code(errno, system_category());
+
+ // Reserve storage.
+ result.reserve(len);
+
+ // Read magic!
+ size_t size = std::fread(result.data(), 1, len, file);
+ if (std::ferror(file) != 0) {
+ std::fclose(file);
+ return error_code(errno, system_category());
+ } else if (size != result.size()) {
+ if (std::feof(file) != 0) {
+ std::fclose(file);
+ result.set_size(size);
+ return make_error_code(errc::value_too_large);
+ }
+ }
+ std::fclose(file);
+ result.set_size(len);
+ return success;
+}
+
+} // end namespace fs
+} // end namespace sys
+} // end namespace llvm
diff --git a/contrib/llvm/lib/System/Unix/Process.inc b/contrib/llvm/lib/Support/Unix/Process.inc
index cf6a47a..5cdb11c 100644
--- a/contrib/llvm/lib/System/Unix/Process.inc
+++ b/contrib/llvm/lib/Support/Unix/Process.inc
@@ -1,10 +1,10 @@
//===- Unix/Process.cpp - Unix Process Implementation --------- -*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file provides the generic Unix implementation of the Process class.
@@ -41,8 +41,8 @@
using namespace llvm;
using namespace sys;
-unsigned
-Process::GetPageSize()
+unsigned
+Process::GetPageSize()
{
#if defined(__CYGWIN__)
// On Cygwin, getpagesize() returns 64k but the page size for the purposes of
@@ -104,20 +104,20 @@ Process::GetTotalMemoryUsage()
}
void
-Process::GetTimeUsage(TimeValue& elapsed, TimeValue& user_time,
+Process::GetTimeUsage(TimeValue& elapsed, TimeValue& user_time,
TimeValue& sys_time)
{
elapsed = TimeValue::now();
#if defined(HAVE_GETRUSAGE)
struct rusage usage;
::getrusage(RUSAGE_SELF, &usage);
- user_time = TimeValue(
- static_cast<TimeValue::SecondsType>( usage.ru_utime.tv_sec ),
- static_cast<TimeValue::NanoSecondsType>( usage.ru_utime.tv_usec *
+ user_time = TimeValue(
+ static_cast<TimeValue::SecondsType>( usage.ru_utime.tv_sec ),
+ static_cast<TimeValue::NanoSecondsType>( usage.ru_utime.tv_usec *
TimeValue::NANOSECONDS_PER_MICROSECOND ) );
- sys_time = TimeValue(
- static_cast<TimeValue::SecondsType>( usage.ru_stime.tv_sec ),
- static_cast<TimeValue::NanoSecondsType>( usage.ru_stime.tv_usec *
+ sys_time = TimeValue(
+ static_cast<TimeValue::SecondsType>( usage.ru_stime.tv_sec ),
+ static_cast<TimeValue::NanoSecondsType>( usage.ru_stime.tv_usec *
TimeValue::NANOSECONDS_PER_MICROSECOND ) );
#else
#warning Cannot get usage times on this platform
@@ -159,14 +159,14 @@ void Process::PreventCoreFiles() {
exception_port_t OriginalPorts[EXC_TYPES_COUNT];
exception_behavior_t OriginalBehaviors[EXC_TYPES_COUNT];
thread_state_flavor_t OriginalFlavors[EXC_TYPES_COUNT];
- kern_return_t err =
+ kern_return_t err =
task_get_exception_ports(mach_task_self(), EXC_MASK_ALL, OriginalMasks,
&Count, OriginalPorts, OriginalBehaviors,
OriginalFlavors);
if (err == KERN_SUCCESS) {
// replace each with MACH_PORT_NULL.
for (unsigned i = 0; i != Count; ++i)
- task_set_exception_ports(mach_task_self(), OriginalMasks[i],
+ task_set_exception_ports(mach_task_self(), OriginalMasks[i],
MACH_PORT_NULL, OriginalBehaviors[i],
OriginalFlavors[i]);
}
diff --git a/contrib/llvm/lib/System/Unix/Program.inc b/contrib/llvm/lib/Support/Unix/Program.inc
index 0209f5a..1104bc7 100644
--- a/contrib/llvm/lib/System/Unix/Program.inc
+++ b/contrib/llvm/lib/Support/Unix/Program.inc
@@ -1,4 +1,4 @@
-//===- llvm/System/Unix/Program.cpp -----------------------------*- C++ -*-===//
+//===- llvm/Support/Unix/Program.cpp -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,6 +17,7 @@
//===----------------------------------------------------------------------===//
#include <llvm/Config/config.h>
+#include "llvm/Support/FileSystem.h"
#include "Unix.h"
#if HAVE_SYS_STAT_H
#include <sys/stat.h>
@@ -66,8 +67,8 @@ Program::FindProgramByName(const std::string& progName) {
if (progName.find('/') != std::string::npos)
return temp;
- // At this point, the file name does not contain slashes. Search for it
- // through the directories specified in the PATH environment variable.
+ // At this point, the file name is valid and does not contain slashes. Search
+ // for it through the directories specified in the PATH environment variable.
// Get the path. If its empty, we can't do anything to find it.
const char *PathStr = getenv("PATH");
@@ -196,7 +197,7 @@ Program::Execute(const Path &path, const char **args, const char **envp,
*redirects[1] != *redirects[2]) {
// Just redirect stderr
if (RedirectIO_PS(redirects[2], 2, ErrMsg, FileActions)) return false;
- } else {
+ } else {
// If stdout and stderr should go to the same place, redirect stderr
// to the FD already open for stdout.
if (int Err = posix_spawn_file_actions_adddup2(&FileActions, 1, 2))
@@ -212,25 +213,21 @@ Program::Execute(const Path &path, const char **args, const char **envp,
envp = const_cast<const char **>(*_NSGetEnviron());
#endif
- pid_t PID;
+ // Explicitly initialized to prevent what appears to be a valgrind false
+ // positive.
+ pid_t PID = 0;
int Err = posix_spawn(&PID, path.c_str(), &FileActions, /*attrp*/0,
const_cast<char **>(args), const_cast<char **>(envp));
-
+
posix_spawn_file_actions_destroy(&FileActions);
if (Err)
return !MakeErrMsg(ErrMsg, "posix_spawn failed", Err);
-
+
Data_ = reinterpret_cast<void*>(PID);
return true;
}
#endif
-
- if (!path.canExecute()) {
- if (ErrMsg)
- *ErrMsg = path.str() + " is not executable";
- return false;
- }
// Create a child process.
int child = fork();
@@ -295,7 +292,8 @@ Program::Execute(const Path &path, const char **args, const char **envp,
}
int
-Program::Wait(unsigned secondsToWait,
+Program::Wait(const sys::Path &path,
+ unsigned secondsToWait,
std::string* ErrMsg)
{
#ifdef HAVE_SYS_WAIT_H
@@ -348,22 +346,46 @@ Program::Wait(unsigned secondsToWait,
sigaction(SIGALRM, &Old, 0);
}
- // Return the proper exit status. 0=success, >0 is programs' exit status,
- // <0 means a signal was returned, -9999999 means the program dumped core.
+ // Return the proper exit status. Detect error conditions
+ // so we can return -1 for them and set ErrMsg informatively.
int result = 0;
- if (WIFEXITED(status))
+ if (WIFEXITED(status)) {
result = WEXITSTATUS(status);
- else if (WIFSIGNALED(status))
- result = 0 - WTERMSIG(status);
+#ifdef HAVE_POSIX_SPAWN
+ // The posix_spawn child process returns 127 on any kind of error.
+ // Following the POSIX convention for command-line tools (which posix_spawn
+ // itself apparently does not), check to see if the failure was due to some
+ // reason other than the file not existing, and return 126 in this case.
+ bool Exists;
+ if (result == 127 && !llvm::sys::fs::exists(path.str(), Exists) && Exists)
+ result = 126;
+#endif
+ if (result == 127) {
+ if (ErrMsg)
+ *ErrMsg = llvm::sys::StrError(ENOENT);
+ return -1;
+ }
+ if (result == 126) {
+ if (ErrMsg)
+ *ErrMsg = "Program could not be executed";
+ return -1;
+ }
+ } else if (WIFSIGNALED(status)) {
+ if (ErrMsg) {
+ *ErrMsg = strsignal(WTERMSIG(status));
#ifdef WCOREDUMP
- else if (WCOREDUMP(status))
- result |= 0x01000000;
+ if (WCOREDUMP(status))
+ *ErrMsg += " (core dumped)";
#endif
+ }
+ return -1;
+ }
return result;
#else
- return -99;
+ if (ErrMsg)
+ *ErrMsg = "Program::Wait is not implemented on this platform yet!";
+ return -1;
#endif
-
}
bool
diff --git a/contrib/llvm/lib/Support/Unix/README.txt b/contrib/llvm/lib/Support/Unix/README.txt
new file mode 100644
index 0000000..3d547c2
--- /dev/null
+++ b/contrib/llvm/lib/Support/Unix/README.txt
@@ -0,0 +1,16 @@
+llvm/lib/Support/Unix README
+===========================
+
+This directory provides implementations of the lib/System classes that
+are common to two or more variants of UNIX. For example, the directory
+structure underneath this directory could look like this:
+
+Unix - only code that is truly generic to all UNIX platforms
+ Posix - code that is specific to Posix variants of UNIX
+ SUS - code that is specific to the Single Unix Specification
+ SysV - code that is specific to System V variants of UNIX
+
+As a rule, only those directories actually needing to be created should be
+created. Also, further subdirectories could be created to reflect versions of
+the various standards. For example, under SUS there could be v1, v2, and v3
+subdirectories to reflect the three major versions of SUS.
diff --git a/contrib/llvm/lib/System/Unix/RWMutex.inc b/contrib/llvm/lib/Support/Unix/RWMutex.inc
index e83d41e..40e87ff 100644
--- a/contrib/llvm/lib/System/Unix/RWMutex.inc
+++ b/contrib/llvm/lib/Support/Unix/RWMutex.inc
@@ -1,10 +1,10 @@
-//= llvm/System/Unix/RWMutex.inc - Unix Reader/Writer Mutual Exclusion Lock =//
-//
+//= llvm/Support/Unix/RWMutex.inc - Unix Reader/Writer Mutual Exclusion Lock =//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file implements the Unix specific (non-pthread) RWMutex class.
diff --git a/contrib/llvm/lib/System/Unix/Signals.inc b/contrib/llvm/lib/Support/Unix/Signals.inc
index 7b7c43e..0a61759 100644
--- a/contrib/llvm/lib/System/Unix/Signals.inc
+++ b/contrib/llvm/lib/Support/Unix/Signals.inc
@@ -1,10 +1,10 @@
//===- Signals.cpp - Generic Unix Signals Implementation -----*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines some helpful functions for dealing with the possibility of
@@ -14,7 +14,7 @@
#include "Unix.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include <vector>
#include <algorithm>
#if HAVE_EXECINFO_H
@@ -28,7 +28,7 @@
#endif
#if HAVE_DLFCN_H && __GNUG__
#include <dlfcn.h>
-#include <cxxabi.h>
+#include <cxxabi.h>
#endif
using namespace llvm;
@@ -82,11 +82,11 @@ static void RegisterHandler(int Signal) {
"Out of space for signal handlers!");
struct sigaction NewHandler;
-
+
NewHandler.sa_handler = SignalHandler;
NewHandler.sa_flags = SA_NODEFER|SA_RESETHAND;
- sigemptyset(&NewHandler.sa_mask);
-
+ sigemptyset(&NewHandler.sa_mask);
+
// Install the new handler, save the old one in RegisteredSignalInfo.
sigaction(Signal, &NewHandler,
&RegisteredSignalInfo[NumRegisteredSignals].SA);
@@ -144,7 +144,7 @@ static RETSIGTYPE SignalHandler(int Sig) {
IF(); // run the interrupt function.
return;
}
-
+
SignalsMutex.release();
raise(Sig); // Execute the default handler.
return;
@@ -205,7 +205,7 @@ void llvm::sys::AddSignalHandler(void (*FnPtr)(void *), void *Cookie) {
// trace so that the user has an indication of why and where we died.
//
// On glibc systems we have the 'backtrace' function, which works nicely, but
-// doesn't demangle symbols.
+// doesn't demangle symbols.
static void PrintStackTrace(void *) {
#ifdef HAVE_BACKTRACE
static void* StackTrace[256];
@@ -274,6 +274,10 @@ void llvm::sys::PrintStackTraceOnErrorSignal() {
#ifdef __APPLE__
+int raise(int sig) {
+ return pthread_kill(pthread_self(), sig);
+}
+
void __assert_rtn(const char *func,
const char *file,
int line,
@@ -291,7 +295,7 @@ void __assert_rtn(const char *func,
#include <pthread.h>
void abort() {
- pthread_kill(pthread_self(), SIGABRT);
+ raise(SIGABRT);
usleep(1000);
__builtin_trap();
}
diff --git a/contrib/llvm/lib/System/Unix/ThreadLocal.inc b/contrib/llvm/lib/Support/Unix/ThreadLocal.inc
index 6769520..2b4c901 100644
--- a/contrib/llvm/lib/System/Unix/ThreadLocal.inc
+++ b/contrib/llvm/lib/Support/Unix/ThreadLocal.inc
@@ -1,10 +1,10 @@
-//=== llvm/System/Unix/ThreadLocal.inc - Unix Thread Local Data -*- C++ -*-===//
-//
+//=== llvm/Support/Unix/ThreadLocal.inc - Unix Thread Local Data -*- C++ -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file implements the Unix specific (non-pthread) ThreadLocal class.
diff --git a/contrib/llvm/lib/System/Unix/TimeValue.inc b/contrib/llvm/lib/Support/Unix/TimeValue.inc
index d8cc8f5..5cf5a9d 100644
--- a/contrib/llvm/lib/System/Unix/TimeValue.inc
+++ b/contrib/llvm/lib/Support/Unix/TimeValue.inc
@@ -1,10 +1,10 @@
//===- Unix/TimeValue.cpp - Unix TimeValue Implementation -------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file implements the Unix specific portion of the TimeValue class.
@@ -26,7 +26,7 @@ std::string TimeValue::str() const {
time_t ourTime = time_t(this->toEpochTime());
#ifdef __hpux
-// note that the following line needs -D_REENTRANT on HP-UX to be picked up
+// note that the following line needs -D_REENTRANT on HP-UX to be picked up
asctime_r(localtime(&ourTime), buffer);
#else
::asctime_r(::localtime(&ourTime), buffer);
@@ -43,13 +43,13 @@ TimeValue TimeValue::now() {
// This is *really* unlikely to occur because the only gettimeofday
// errors concern the timezone parameter which we're passing in as 0.
// In the unlikely case it does happen, just return MinTime, no error
- // message needed.
+ // message needed.
return MinTime;
}
return TimeValue(
- static_cast<TimeValue::SecondsType>( the_time.tv_sec + PosixZeroTime.seconds_ ),
- static_cast<TimeValue::NanoSecondsType>( the_time.tv_usec *
+ static_cast<TimeValue::SecondsType>( the_time.tv_sec + PosixZeroTime.seconds_ ),
+ static_cast<TimeValue::NanoSecondsType>( the_time.tv_usec *
NANOSECONDS_PER_MICROSECOND ) );
}
diff --git a/contrib/llvm/lib/System/Unix/Unix.h b/contrib/llvm/lib/Support/Unix/Unix.h
index c15866f..b7be311 100644
--- a/contrib/llvm/lib/System/Unix/Unix.h
+++ b/contrib/llvm/lib/Support/Unix/Unix.h
@@ -1,4 +1,4 @@
-//===- llvm/System/Unix/Unix.h - Common Unix Include File -------*- C++ -*-===//
+//===- llvm/Support/Unix/Unix.h - Common Unix Include File -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,7 +20,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h" // Get autoconf configuration settings
-#include "llvm/System/Errno.h"
+#include "llvm/Support/Errno.h"
#include <cstdlib>
#include <cstdio>
#include <cstring>
diff --git a/contrib/llvm/lib/Support/Unix/system_error.inc b/contrib/llvm/lib/Support/Unix/system_error.inc
new file mode 100644
index 0000000..681e919
--- /dev/null
+++ b/contrib/llvm/lib/Support/Unix/system_error.inc
@@ -0,0 +1,34 @@
+//===- llvm/Support/Unix/system_error.inc - Unix error_code ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the Unix specific implementation of the error_code
+// and error_condition classes.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+//=== WARNING: Implementation here must contain only generic UNIX code that
+//=== is guaranteed to work on *all* UNIX variants.
+//===----------------------------------------------------------------------===//
+
+using namespace llvm;
+
+std::string
+_system_error_category::message(int ev) const {
+ return _do_message::message(ev);
+}
+
+error_condition
+_system_error_category::default_error_condition(int ev) const {
+#ifdef ELAST
+ if (ev > ELAST)
+ return error_condition(ev, system_category());
+#endif // ELAST
+ return error_condition(ev, generic_category());
+}
diff --git a/contrib/llvm/lib/System/Valgrind.cpp b/contrib/llvm/lib/Support/Valgrind.cpp
index c76cfe4..7034485 100644
--- a/contrib/llvm/lib/System/Valgrind.cpp
+++ b/contrib/llvm/lib/Support/Valgrind.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/System/Valgrind.h"
+#include "llvm/Support/Valgrind.h"
#include "llvm/Config/config.h"
#if HAVE_VALGRIND_VALGRIND_H
diff --git a/contrib/llvm/lib/System/Win32/DynamicLibrary.inc b/contrib/llvm/lib/Support/Windows/DynamicLibrary.inc
index c9a89e5..2c14366 100644
--- a/contrib/llvm/lib/System/Win32/DynamicLibrary.inc
+++ b/contrib/llvm/lib/Support/Windows/DynamicLibrary.inc
@@ -1,17 +1,17 @@
//===- Win32/DynamicLibrary.cpp - Win32 DL Implementation -------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file provides the Win32 specific implementation of DynamicLibrary.
//
//===----------------------------------------------------------------------===//
-#include "Win32.h"
+#include "Windows.h"
#ifdef __MINGW32__
#include <imagehlp.h>
@@ -35,7 +35,7 @@ namespace llvm {
using namespace sys;
//===----------------------------------------------------------------------===//
-//=== WARNING: Implementation here must contain only Win32 specific code
+//=== WARNING: Implementation here must contain only Win32 specific code
//=== and must not be UNIX code.
//===----------------------------------------------------------------------===//
@@ -50,12 +50,22 @@ static std::vector<HMODULE> OpenedHandles;
extern "C" {
// Use old callback if:
// - Not using Visual Studio
-// - Visual Studio 2005 or earlier but only if we are not using the Windows SDK
+// - Visual Studio 2005 or earlier but only if we are not using the Windows SDK
// or Windows SDK version is older than 6.0
// Use new callback if:
// - Newer Visual Studio (comes with newer SDK).
// - Visual Studio 2005 with Windows SDK 6.0+
-#if !defined(_MSC_VER) || _MSC_VER < 1500 && (!defined(VER_PRODUCTBUILD) || VER_PRODUCTBUILD < 6000)
+#if defined(_MSC_VER)
+ #if _MSC_VER < 1500 && (!defined(VER_PRODUCTBUILD) || VER_PRODUCTBUILD < 6000)
+ #define OLD_ELM_CALLBACK_DECL 1
+ #endif
+#elif defined(__MINGW64__)
+ // Use new callback.
+#elif defined(__MINGW32__)
+ #define OLD_ELM_CALLBACK_DECL 1
+#endif
+
+#ifdef OLD_ELM_CALLBACK_DECL
static BOOL CALLBACK ELM_Callback(PSTR ModuleName,
ModuleBaseType ModuleBase,
ULONG ModuleSize,
@@ -89,7 +99,7 @@ extern "C" {
}
bool DynamicLibrary::LoadLibraryPermanently(const char *filename,
- std::string *ErrMsg) {
+ std::string *ErrMsg) {
if (filename) {
HMODULE a_handle = LoadLibrary(filename);
@@ -110,40 +120,19 @@ bool DynamicLibrary::LoadLibraryPermanently(const char *filename,
// Stack probing routines are in the support library (e.g. libgcc), but we don't
// have dynamic linking on windows. Provide a hook.
-#if defined(__MINGW32__) || defined (_MSC_VER)
- #define EXPLICIT_SYMBOL(SYM) \
- if (!strcmp(symbolName, #SYM)) return (void*)&SYM
- #define EXPLICIT_SYMBOL2(SYMFROM, SYMTO) \
- if (!strcmp(symbolName, #SYMFROM)) return (void*)&SYMTO
- #define EXPLICIT_SYMBOL_DEF(SYM) \
- extern "C" { extern void *SYM; }
-
- #if defined(__MINGW32__)
- EXPLICIT_SYMBOL_DEF(_alloca)
- EXPLICIT_SYMBOL_DEF(__main)
- EXPLICIT_SYMBOL_DEF(__ashldi3)
- EXPLICIT_SYMBOL_DEF(__ashrdi3)
- EXPLICIT_SYMBOL_DEF(__cmpdi2)
- EXPLICIT_SYMBOL_DEF(__divdi3)
- EXPLICIT_SYMBOL_DEF(__fixdfdi)
- EXPLICIT_SYMBOL_DEF(__fixsfdi)
- EXPLICIT_SYMBOL_DEF(__fixunsdfdi)
- EXPLICIT_SYMBOL_DEF(__fixunssfdi)
- EXPLICIT_SYMBOL_DEF(__floatdidf)
- EXPLICIT_SYMBOL_DEF(__floatdisf)
- EXPLICIT_SYMBOL_DEF(__lshrdi3)
- EXPLICIT_SYMBOL_DEF(__moddi3)
- EXPLICIT_SYMBOL_DEF(__udivdi3)
- EXPLICIT_SYMBOL_DEF(__umoddi3)
- #elif defined(_MSC_VER)
- EXPLICIT_SYMBOL_DEF(_alloca_probe)
- #endif
-#endif
+#define EXPLICIT_SYMBOL(SYM) \
+ extern "C" { extern void *SYM; }
+#define EXPLICIT_SYMBOL2(SYMFROM, SYMTO) EXPLICIT_SYMBOL(SYMTO)
+
+#include "explicit_symbols.inc"
+
+#undef EXPLICIT_SYMBOL
+#undef EXPLICIT_SYMBOL2
void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) {
// First check symbols added via AddSymbol().
if (ExplicitSymbols) {
- std::map<std::string, void *>::iterator I =
+ std::map<std::string, void *>::iterator I =
ExplicitSymbols->find(symbolName);
std::map<std::string, void *>::iterator E = ExplicitSymbols->end();
if (I != E)
@@ -159,42 +148,19 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) {
}
}
-#if defined(__MINGW32__)
- {
- EXPLICIT_SYMBOL(_alloca);
- EXPLICIT_SYMBOL(__main);
- EXPLICIT_SYMBOL(__ashldi3);
- EXPLICIT_SYMBOL(__ashrdi3);
- EXPLICIT_SYMBOL(__cmpdi2);
- EXPLICIT_SYMBOL(__divdi3);
- EXPLICIT_SYMBOL(__fixdfdi);
- EXPLICIT_SYMBOL(__fixsfdi);
- EXPLICIT_SYMBOL(__fixunsdfdi);
- EXPLICIT_SYMBOL(__fixunssfdi);
- EXPLICIT_SYMBOL(__floatdidf);
- EXPLICIT_SYMBOL(__floatdisf);
- EXPLICIT_SYMBOL(__lshrdi3);
- EXPLICIT_SYMBOL(__moddi3);
- EXPLICIT_SYMBOL(__udivdi3);
- EXPLICIT_SYMBOL(__umoddi3);
-
- EXPLICIT_SYMBOL2(alloca, _alloca);
-#undef EXPLICIT_SYMBOL
-#undef EXPLICIT_SYMBOL2
-#undef EXPLICIT_SYMBOL_DEF
- }
-#elif defined(_MSC_VER)
+ #define EXPLICIT_SYMBOL(SYM) \
+ if (!strcmp(symbolName, #SYM)) return (void*)&SYM;
+ #define EXPLICIT_SYMBOL2(SYMFROM, SYMTO) \
+ if (!strcmp(symbolName, #SYMFROM)) return (void*)&SYMTO;
+
{
- EXPLICIT_SYMBOL2(alloca, _alloca_probe);
- EXPLICIT_SYMBOL2(_alloca, _alloca_probe);
-#undef EXPLICIT_SYMBOL
-#undef EXPLICIT_SYMBOL2
-#undef EXPLICIT_SYMBOL_DEF
+ #include "explicit_symbols.inc"
}
-#endif
+
+ #undef EXPLICIT_SYMBOL
+ #undef EXPLICIT_SYMBOL2
return 0;
}
}
-
diff --git a/contrib/llvm/lib/System/Win32/Host.inc b/contrib/llvm/lib/Support/Windows/Host.inc
index 18f00f8..733830e 100644
--- a/contrib/llvm/lib/System/Win32/Host.inc
+++ b/contrib/llvm/lib/Support/Windows/Host.inc
@@ -1,4 +1,4 @@
-//===- llvm/System/Win32/Host.inc -------------------------------*- C++ -*-===//
+//===- llvm/Support/Win32/Host.inc -------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "Win32.h"
+#include "Windows.h"
#include <cstdio>
#include <string>
diff --git a/contrib/llvm/lib/System/Win32/Memory.inc b/contrib/llvm/lib/Support/Windows/Memory.inc
index 19fccbd..9f69e73 100644
--- a/contrib/llvm/lib/System/Win32/Memory.inc
+++ b/contrib/llvm/lib/Support/Windows/Memory.inc
@@ -1,10 +1,10 @@
//===- Win32/Memory.cpp - Win32 Memory Implementation -----------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file provides the Win32 specific implementation of various Memory
@@ -12,15 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#include "Win32.h"
-#include "llvm/System/DataTypes.h"
-#include "llvm/System/Process.h"
+#include "Windows.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Process.h"
namespace llvm {
using namespace sys;
//===----------------------------------------------------------------------===//
-//=== WARNING: Implementation here must contain only Win32 specific code
+//=== WARNING: Implementation here must contain only Win32 specific code
//=== and must not be UNIX code
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/System/Win32/Mutex.inc b/contrib/llvm/lib/Support/Windows/Mutex.inc
index 75f01fe..583dc63 100644
--- a/contrib/llvm/lib/System/Win32/Mutex.inc
+++ b/contrib/llvm/lib/Support/Windows/Mutex.inc
@@ -1,10 +1,10 @@
-//===- llvm/System/Win32/Mutex.inc - Win32 Mutex Implementation -*- C++ -*-===//
-//
+//===- llvm/Support/Win32/Mutex.inc - Win32 Mutex Implementation -*- C++ -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file implements the Win32 specific (non-pthread) Mutex class.
@@ -16,8 +16,8 @@
//=== is guaranteed to work on *all* Win32 variants.
//===----------------------------------------------------------------------===//
-#include "Win32.h"
-#include "llvm/System/Mutex.h"
+#include "Windows.h"
+#include "llvm/Support/Mutex.h"
namespace llvm {
using namespace sys;
@@ -35,21 +35,21 @@ MutexImpl::~MutexImpl()
data_ = 0;
}
-bool
+bool
MutexImpl::acquire()
{
EnterCriticalSection((LPCRITICAL_SECTION)data_);
return true;
}
-bool
+bool
MutexImpl::release()
{
LeaveCriticalSection((LPCRITICAL_SECTION)data_);
return true;
}
-bool
+bool
MutexImpl::tryacquire()
{
return TryEnterCriticalSection((LPCRITICAL_SECTION)data_);
diff --git a/contrib/llvm/lib/System/Win32/Path.inc b/contrib/llvm/lib/Support/Windows/Path.inc
index 4a6dbd3..625f67a 100644
--- a/contrib/llvm/lib/System/Win32/Path.inc
+++ b/contrib/llvm/lib/Support/Windows/Path.inc
@@ -1,13 +1,10 @@
-//===- llvm/System/Win32/Path.cpp - Win32 Path Implementation ---*- C++ -*-===//
+//===- llvm/Support/Win32/Path.cpp - Win32 Path Implementation ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
-// Modified by Henrik Bach to comply with at least MinGW.
-// Ported to Win32 by Jeff Cohen.
-//
//===----------------------------------------------------------------------===//
//
// This file provides the Win32 specific implementation of the Path class.
@@ -19,7 +16,7 @@
//=== is guaranteed to work on *all* Win32 variants.
//===----------------------------------------------------------------------===//
-#include "Win32.h"
+#include "Windows.h"
#include <malloc.h>
#include <cstdio>
@@ -45,8 +42,13 @@ static void FlipBackSlashes(std::string& s) {
namespace llvm {
namespace sys {
+
const char PathSeparator = ';';
+StringRef Path::GetEXESuffix() {
+ return "exe";
+}
+
Path::Path(llvm::StringRef p)
: path(p) {
FlipBackSlashes(path);
@@ -64,6 +66,18 @@ Path::operator=(StringRef that) {
return *this;
}
+// push_back 0 on create, and pop_back on delete.
+struct ScopedNullTerminator {
+ std::string &str;
+ ScopedNullTerminator(std::string &s) : str(s) { str.push_back(0); }
+ ~ScopedNullTerminator() {
+ // str.pop_back(); But wait, C++03 doesn't have this...
+ assert(!str.empty() && str[str.size() - 1] == 0
+ && "Null char not present!");
+ str.resize(str.size() - 1);
+ }
+};
+
bool
Path::isValid() const {
if (path.empty())
@@ -72,6 +86,8 @@ Path::isValid() const {
// If there is a colon, it must be the second character, preceded by a letter
// and followed by something.
size_t len = path.size();
+ // This code assumes that path is null terminated, so make sure it is.
+ ScopedNullTerminator snt(path);
size_t pos = path.rfind(':',len);
size_t rootslash = 0;
if (pos != std::string::npos) {
@@ -156,8 +172,9 @@ Path::isAbsolute(const char *NameStart, unsigned NameLen) {
case 2:
return NameStart[0] == '/';
default:
- return (NameStart[0] == '/' || (NameStart[1] == ':' && NameStart[2] == '/')) ||
- (NameStart[0] == '\\' || (NameStart[1] == ':' && NameStart[2] == '\\'));
+ return
+ (NameStart[0] == '/' || (NameStart[1] == ':' && NameStart[2] == '/')) ||
+ (NameStart[0] == '\\' || (NameStart[1] == ':' && NameStart[2] == '\\'));
}
}
@@ -216,15 +233,39 @@ Path::GetTemporaryDirectory(std::string* ErrMsg) {
// FIXME: the following set of functions don't map to Windows very well.
Path
Path::GetRootDirectory() {
- Path result;
- result.set("C:/");
- return result;
+ // This is the only notion that that Windows has of a root directory. Nothing
+ // is here except for drives.
+ return Path("file:///");
}
void
Path::GetSystemLibraryPaths(std::vector<sys::Path>& Paths) {
- Paths.push_back(sys::Path("C:/WINDOWS/SYSTEM32"));
- Paths.push_back(sys::Path("C:/WINDOWS"));
+ char buff[MAX_PATH];
+ // Generic form of C:\Windows\System32
+ HRESULT res = SHGetFolderPathA(NULL,
+ CSIDL_FLAG_CREATE | CSIDL_SYSTEM,
+ NULL,
+ SHGFP_TYPE_CURRENT,
+ buff);
+ if (res != S_OK) {
+ assert(0 && "Failed to get system directory");
+ return;
+ }
+ Paths.push_back(sys::Path(buff));
+
+ // Reset buff.
+ buff[0] = 0;
+ // Generic form of C:\Windows
+ res = SHGetFolderPathA(NULL,
+ CSIDL_FLAG_CREATE | CSIDL_WINDOWS,
+ NULL,
+ SHGFP_TYPE_CURRENT,
+ buff);
+ if (res != S_OK) {
+ assert(0 && "Failed to get windows directory");
+ return;
+ }
+ Paths.push_back(sys::Path(buff));
}
void
@@ -246,20 +287,23 @@ Path::GetBitcodeLibraryPaths(std::vector<sys::Path>& Paths) {
Path
Path::GetLLVMDefaultConfigDir() {
- // TODO: this isn't going to fly on Windows
- return Path("/etc/llvm");
+ Path ret = GetUserHomeDirectory();
+ if (!ret.appendComponent(".llvm"))
+ assert(0 && "Failed to append .llvm");
+ return ret;
}
Path
Path::GetUserHomeDirectory() {
- // TODO: Typical Windows setup doesn't define HOME.
- const char* home = getenv("HOME");
- if (home) {
- Path result;
- if (result.set(home))
- return result;
- }
- return GetRootDirectory();
+ char buff[MAX_PATH];
+ HRESULT res = SHGetFolderPathA(NULL,
+ CSIDL_FLAG_CREATE | CSIDL_APPDATA,
+ NULL,
+ SHGFP_TYPE_CURRENT,
+ buff);
+ if (res != S_OK)
+ assert(0 && "Failed to get user home directory");
+ return Path(buff);
}
Path
@@ -331,6 +375,19 @@ Path::isDirectory() const {
}
bool
+Path::isSymLink() const {
+ DWORD attributes = GetFileAttributes(path.c_str());
+
+ if (attributes == INVALID_FILE_ATTRIBUTES)
+ // There's no sane way to report this :(.
+ assert(0 && "GetFileAttributes returned INVALID_FILE_ATTRIBUTES");
+
+ // This isn't exactly what defines a NTFS symlink, but it is only true for
+ // paths that act like a symlink.
+ return attributes & FILE_ATTRIBUTE_REPARSE_POINT;
+}
+
+bool
Path::canRead() const {
// FIXME: take security attributes into account.
DWORD attr = GetFileAttributes(path.c_str());
@@ -353,9 +410,10 @@ Path::canExecute() const {
bool
Path::isRegularFile() const {
- if (isDirectory())
+ bool res;
+ if (fs::is_regular_file(path, res))
return false;
- return true;
+ return res;
}
StringRef
@@ -532,18 +590,6 @@ Path::eraseComponent() {
}
bool
-Path::appendSuffix(StringRef suffix) {
- std::string save(path);
- path.append(".");
- path.append(suffix);
- if (!isValid()) {
- path = save;
- return false;
- }
- return true;
-}
-
-bool
Path::eraseSuffix() {
size_t dotpos = path.rfind('.',path.size());
size_t slashpos = path.rfind('/',path.size());
@@ -622,7 +668,8 @@ Path::createDirectoryOnDisk(bool create_parents, std::string* ErrMsg) {
pathname[len-1] = 0;
if (!CreateDirectory(pathname, NULL) &&
GetLastError() != ERROR_ALREADY_EXISTS) {
- return MakeErrMsg(ErrMsg, std::string(pathname) + ": Can't create directory: ");
+ return MakeErrMsg(ErrMsg, std::string(pathname) +
+ ": Can't create directory: ");
}
}
return false;
@@ -648,7 +695,8 @@ Path::eraseFromDisk(bool remove_contents, std::string *ErrStr) const {
if (fi.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
// If it doesn't exist, we're done.
- if (!exists())
+ bool Exists;
+ if (fs::exists(path, Exists) || !Exists)
return false;
char *pathname = reinterpret_cast<char *>(_alloca(path.length()+3));
@@ -822,7 +870,8 @@ CopyFile(const sys::Path &Dest, const sys::Path &Src, std::string* ErrMsg) {
bool
Path::makeUnique(bool reuse_current, std::string* ErrMsg) {
- if (reuse_current && !exists())
+ bool Exists;
+ if (reuse_current && (fs::exists(path, Exists) || !Exists))
return false; // File doesn't exist already, just use it!
// Reserve space for -XXXXXX at the end.
@@ -839,7 +888,7 @@ Path::makeUnique(bool reuse_current, std::string* ErrMsg) {
if (++FCounter > 999999)
FCounter = 0;
path = FNBuffer;
- } while (exists());
+ } while (!fs::exists(path, Exists) && Exists);
return false;
}
diff --git a/contrib/llvm/lib/Support/Windows/PathV2.inc b/contrib/llvm/lib/Support/Windows/PathV2.inc
new file mode 100644
index 0000000..8effb0c
--- /dev/null
+++ b/contrib/llvm/lib/Support/Windows/PathV2.inc
@@ -0,0 +1,750 @@
+//===- llvm/Support/Windows/PathV2.inc - Windows Path Impl ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Windows specific implementation of the PathV2 API.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+//=== WARNING: Implementation here must contain only generic Windows code that
+//=== is guaranteed to work on *all* Windows variants.
+//===----------------------------------------------------------------------===//
+
+#include "Windows.h"
+#include <wincrypt.h>
+#include <fcntl.h>
+#include <io.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+// MinGW doesn't define this.
+#ifndef _ERRNO_T_DEFINED
+#define _ERRNO_T_DEFINED
+typedef int errno_t;
+#endif
+
+using namespace llvm;
+
+namespace {
+ typedef BOOLEAN (WINAPI *PtrCreateSymbolicLinkW)(
+ /*__in*/ LPCWSTR lpSymlinkFileName,
+ /*__in*/ LPCWSTR lpTargetFileName,
+ /*__in*/ DWORD dwFlags);
+
+ PtrCreateSymbolicLinkW create_symbolic_link_api = PtrCreateSymbolicLinkW(
+ ::GetProcAddress(::GetModuleHandleA("kernel32.dll"),
+ "CreateSymbolicLinkW"));
+
+ error_code UTF8ToUTF16(StringRef utf8, SmallVectorImpl<wchar_t> &utf16) {
+ int len = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS,
+ utf8.begin(), utf8.size(),
+ utf16.begin(), 0);
+
+ if (len == 0)
+ return windows_error(::GetLastError());
+
+ utf16.reserve(len + 1);
+ utf16.set_size(len);
+
+ len = ::MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS,
+ utf8.begin(), utf8.size(),
+ utf16.begin(), utf16.size());
+
+ if (len == 0)
+ return windows_error(::GetLastError());
+
+ // Make utf16 null terminated.
+ utf16.push_back(0);
+ utf16.pop_back();
+
+ return success;
+ }
+
+ error_code UTF16ToUTF8(const wchar_t *utf16, size_t utf16_len,
+ SmallVectorImpl<char> &utf8) {
+ // Get length.
+ int len = ::WideCharToMultiByte(CP_UTF8, 0,
+ utf16, utf16_len,
+ utf8.begin(), 0,
+ NULL, NULL);
+
+ if (len == 0)
+ return windows_error(::GetLastError());
+
+ utf8.reserve(len);
+ utf8.set_size(len);
+
+ // Now do the actual conversion.
+ len = ::WideCharToMultiByte(CP_UTF8, 0,
+ utf16, utf16_len,
+ utf8.data(), utf8.size(),
+ NULL, NULL);
+
+ if (len == 0)
+ return windows_error(::GetLastError());
+
+ // Make utf8 null terminated.
+ utf8.push_back(0);
+ utf8.pop_back();
+
+ return success;
+ }
+
+ error_code TempDir(SmallVectorImpl<wchar_t> &result) {
+ retry_temp_dir:
+ DWORD len = ::GetTempPathW(result.capacity(), result.begin());
+
+ if (len == 0)
+ return windows_error(::GetLastError());
+
+ if (len > result.capacity()) {
+ result.reserve(len);
+ goto retry_temp_dir;
+ }
+
+ result.set_size(len);
+ return success;
+ }
+
+ // Forwarder for ScopedHandle.
+ BOOL WINAPI CryptReleaseContext(HCRYPTPROV Provider) {
+ return ::CryptReleaseContext(Provider, 0);
+ }
+
+ typedef ScopedHandle<HCRYPTPROV, uintptr_t(-1),
+ BOOL (WINAPI*)(HCRYPTPROV), CryptReleaseContext>
+ ScopedCryptContext;
+ bool is_separator(const wchar_t value) {
+ switch (value) {
+ case L'\\':
+ case L'/':
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+namespace llvm {
+namespace sys {
+namespace fs {
+
+error_code current_path(SmallVectorImpl<char> &result) {
+ SmallVector<wchar_t, 128> cur_path;
+ cur_path.reserve(128);
+retry_cur_dir:
+ DWORD len = ::GetCurrentDirectoryW(cur_path.capacity(), cur_path.data());
+
+ // A zero return value indicates a failure other than insufficient space.
+ if (len == 0)
+ return windows_error(::GetLastError());
+
+ // If there's insufficient space, the len returned is larger than the len
+ // given.
+ if (len > cur_path.capacity()) {
+ cur_path.reserve(len);
+ goto retry_cur_dir;
+ }
+
+ cur_path.set_size(len);
+ // cur_path now holds the current directory in utf-16. Convert to utf-8.
+
+ // Find out how much space we need. Sadly, this function doesn't return the
+ // size needed unless you tell it the result size is 0, which means you
+ // _always_ have to call it twice.
+ len = ::WideCharToMultiByte(CP_UTF8, 0,
+ cur_path.data(), cur_path.size(),
+ result.data(), 0,
+ NULL, NULL);
+
+ if (len == 0)
+ return make_error_code(windows_error(::GetLastError()));
+
+ result.reserve(len);
+ result.set_size(len);
+ // Now do the actual conversion.
+ len = ::WideCharToMultiByte(CP_UTF8, 0,
+ cur_path.data(), cur_path.size(),
+ result.data(), result.size(),
+ NULL, NULL);
+ if (len == 0)
+ return windows_error(::GetLastError());
+
+ return success;
+}
+
+error_code copy_file(const Twine &from, const Twine &to, copy_option copt) {
+ // Get arguments.
+ SmallString<128> from_storage;
+ SmallString<128> to_storage;
+ StringRef f = from.toStringRef(from_storage);
+ StringRef t = to.toStringRef(to_storage);
+
+ // Convert to utf-16.
+ SmallVector<wchar_t, 128> wide_from;
+ SmallVector<wchar_t, 128> wide_to;
+ if (error_code ec = UTF8ToUTF16(f, wide_from)) return ec;
+ if (error_code ec = UTF8ToUTF16(t, wide_to)) return ec;
+
+ // Copy the file.
+ BOOL res = ::CopyFileW(wide_from.begin(), wide_to.begin(),
+ copt != copy_option::overwrite_if_exists);
+
+ if (res == 0)
+ return windows_error(::GetLastError());
+
+ return success;
+}
+
+error_code create_directory(const Twine &path, bool &existed) {
+ SmallString<128> path_storage;
+ SmallVector<wchar_t, 128> path_utf16;
+
+ if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
+ path_utf16))
+ return ec;
+
+ if (!::CreateDirectoryW(path_utf16.begin(), NULL)) {
+ error_code ec = windows_error(::GetLastError());
+ if (ec == windows_error::already_exists)
+ existed = true;
+ else
+ return ec;
+ } else
+ existed = false;
+
+ return success;
+}
+
+error_code create_hard_link(const Twine &to, const Twine &from) {
+ // Get arguments.
+ SmallString<128> from_storage;
+ SmallString<128> to_storage;
+ StringRef f = from.toStringRef(from_storage);
+ StringRef t = to.toStringRef(to_storage);
+
+ // Convert to utf-16.
+ SmallVector<wchar_t, 128> wide_from;
+ SmallVector<wchar_t, 128> wide_to;
+ if (error_code ec = UTF8ToUTF16(f, wide_from)) return ec;
+ if (error_code ec = UTF8ToUTF16(t, wide_to)) return ec;
+
+ if (!::CreateHardLinkW(wide_from.begin(), wide_to.begin(), NULL))
+ return windows_error(::GetLastError());
+
+ return success;
+}
+
+error_code create_symlink(const Twine &to, const Twine &from) {
+ // Only do it if the function is available at runtime.
+ if (!create_symbolic_link_api)
+ return make_error_code(errc::function_not_supported);
+
+ // Get arguments.
+ SmallString<128> from_storage;
+ SmallString<128> to_storage;
+ StringRef f = from.toStringRef(from_storage);
+ StringRef t = to.toStringRef(to_storage);
+
+ // Convert to utf-16.
+ SmallVector<wchar_t, 128> wide_from;
+ SmallVector<wchar_t, 128> wide_to;
+ if (error_code ec = UTF8ToUTF16(f, wide_from)) return ec;
+ if (error_code ec = UTF8ToUTF16(t, wide_to)) return ec;
+
+ if (!create_symbolic_link_api(wide_from.begin(), wide_to.begin(), 0))
+ return windows_error(::GetLastError());
+
+ return success;
+}
+
+error_code remove(const Twine &path, bool &existed) {
+ SmallString<128> path_storage;
+ SmallVector<wchar_t, 128> path_utf16;
+
+ file_status st;
+ if (error_code ec = status(path, st))
+ return ec;
+
+ if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
+ path_utf16))
+ return ec;
+
+ if (st.type() == file_type::directory_file) {
+ if (!::RemoveDirectoryW(c_str(path_utf16))) {
+ error_code ec = windows_error(::GetLastError());
+ if (ec != windows_error::file_not_found)
+ return ec;
+ existed = false;
+ } else
+ existed = true;
+ } else {
+ if (!::DeleteFileW(c_str(path_utf16))) {
+ error_code ec = windows_error(::GetLastError());
+ if (ec != windows_error::file_not_found)
+ return ec;
+ existed = false;
+ } else
+ existed = true;
+ }
+
+ return success;
+}
+
+error_code rename(const Twine &from, const Twine &to) {
+ // Get arguments.
+ SmallString<128> from_storage;
+ SmallString<128> to_storage;
+ StringRef f = from.toStringRef(from_storage);
+ StringRef t = to.toStringRef(to_storage);
+
+ // Convert to utf-16.
+ SmallVector<wchar_t, 128> wide_from;
+ SmallVector<wchar_t, 128> wide_to;
+ if (error_code ec = UTF8ToUTF16(f, wide_from)) return ec;
+ if (error_code ec = UTF8ToUTF16(t, wide_to)) return ec;
+
+ if (!::MoveFileExW(wide_from.begin(), wide_to.begin(),
+ MOVEFILE_COPY_ALLOWED | MOVEFILE_REPLACE_EXISTING))
+ return windows_error(::GetLastError());
+
+ return success;
+}
+
+error_code resize_file(const Twine &path, uint64_t size) {
+ SmallString<128> path_storage;
+ SmallVector<wchar_t, 128> path_utf16;
+
+ if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
+ path_utf16))
+ return ec;
+
+ int fd = ::_wopen(path_utf16.begin(), O_BINARY, S_IREAD | S_IWRITE);
+ if (fd == -1)
+ return error_code(errno, generic_category());
+#ifdef HAVE__CHSIZE_S
+ errno_t error = ::_chsize_s(fd, size);
+#else
+ errno_t error = ::_chsize(fd, size);
+#endif
+ ::close(fd);
+ return error_code(error, generic_category());
+}
+
+error_code exists(const Twine &path, bool &result) {
+ SmallString<128> path_storage;
+ SmallVector<wchar_t, 128> path_utf16;
+
+ if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
+ path_utf16))
+ return ec;
+
+ DWORD attributes = ::GetFileAttributesW(path_utf16.begin());
+
+ if (attributes == INVALID_FILE_ATTRIBUTES) {
+ // See if the file didn't actually exist.
+ error_code ec = make_error_code(windows_error(::GetLastError()));
+ if (ec != windows_error::file_not_found &&
+ ec != windows_error::path_not_found)
+ return ec;
+ result = false;
+ } else
+ result = true;
+ return success;
+}
+
+error_code equivalent(const Twine &A, const Twine &B, bool &result) {
+ // Get arguments.
+ SmallString<128> a_storage;
+ SmallString<128> b_storage;
+ StringRef a = A.toStringRef(a_storage);
+ StringRef b = B.toStringRef(b_storage);
+
+ // Convert to utf-16.
+ SmallVector<wchar_t, 128> wide_a;
+ SmallVector<wchar_t, 128> wide_b;
+ if (error_code ec = UTF8ToUTF16(a, wide_a)) return ec;
+ if (error_code ec = UTF8ToUTF16(b, wide_b)) return ec;
+
+ AutoHandle HandleB(
+ ::CreateFileW(wide_b.begin(),
+ 0,
+ FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
+ 0,
+ OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS,
+ 0));
+
+ AutoHandle HandleA(
+ ::CreateFileW(wide_a.begin(),
+ 0,
+ FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
+ 0,
+ OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS,
+ 0));
+
+ // If both handles are invalid, it's an error.
+ if (HandleA == INVALID_HANDLE_VALUE &&
+ HandleB == INVALID_HANDLE_VALUE)
+ return windows_error(::GetLastError());
+
+ // If only one is invalid, it's false.
+ if (HandleA == INVALID_HANDLE_VALUE &&
+ HandleB == INVALID_HANDLE_VALUE) {
+ result = false;
+ return success;
+ }
+
+ // Get file information.
+ BY_HANDLE_FILE_INFORMATION InfoA, InfoB;
+ if (!::GetFileInformationByHandle(HandleA, &InfoA))
+ return windows_error(::GetLastError());
+ if (!::GetFileInformationByHandle(HandleB, &InfoB))
+ return windows_error(::GetLastError());
+
+ // See if it's all the same.
+ result =
+ InfoA.dwVolumeSerialNumber == InfoB.dwVolumeSerialNumber &&
+ InfoA.nFileIndexHigh == InfoB.nFileIndexHigh &&
+ InfoA.nFileIndexLow == InfoB.nFileIndexLow &&
+ InfoA.nFileSizeHigh == InfoB.nFileSizeHigh &&
+ InfoA.nFileSizeLow == InfoB.nFileSizeLow &&
+ InfoA.ftLastWriteTime.dwLowDateTime ==
+ InfoB.ftLastWriteTime.dwLowDateTime &&
+ InfoA.ftLastWriteTime.dwHighDateTime ==
+ InfoB.ftLastWriteTime.dwHighDateTime;
+
+ return success;
+}
+
+error_code file_size(const Twine &path, uint64_t &result) {
+ SmallString<128> path_storage;
+ SmallVector<wchar_t, 128> path_utf16;
+
+ if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
+ path_utf16))
+ return ec;
+
+ WIN32_FILE_ATTRIBUTE_DATA FileData;
+ if (!::GetFileAttributesExW(path_utf16.begin(),
+ ::GetFileExInfoStandard,
+ &FileData))
+ return windows_error(::GetLastError());
+
+ result =
+ (uint64_t(FileData.nFileSizeHigh) << (sizeof(FileData.nFileSizeLow) * 8))
+ + FileData.nFileSizeLow;
+
+ return success;
+}
+
+error_code status(const Twine &path, file_status &result) {
+ SmallString<128> path_storage;
+ SmallVector<wchar_t, 128> path_utf16;
+
+ if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
+ path_utf16))
+ return ec;
+
+ DWORD attr = ::GetFileAttributesW(path_utf16.begin());
+ if (attr == INVALID_FILE_ATTRIBUTES)
+ goto handle_status_error;
+
+ // Handle reparse points.
+ if (attr & FILE_ATTRIBUTE_REPARSE_POINT) {
+ AutoHandle h(
+ ::CreateFileW(path_utf16.begin(),
+ 0, // Attributes only.
+ FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS,
+ 0));
+ if (h == INVALID_HANDLE_VALUE)
+ goto handle_status_error;
+ }
+
+ if (attr & FILE_ATTRIBUTE_DIRECTORY)
+ result = file_status(file_type::directory_file);
+ else
+ result = file_status(file_type::regular_file);
+
+ return success;
+
+handle_status_error:
+ error_code ec = windows_error(::GetLastError());
+ if (ec == windows_error::file_not_found ||
+ ec == windows_error::path_not_found)
+ result = file_status(file_type::file_not_found);
+ else if (ec == windows_error::sharing_violation)
+ result = file_status(file_type::type_unknown);
+ else {
+ result = file_status(file_type::status_error);
+ return ec;
+ }
+
+ return success;
+}
+
+error_code unique_file(const Twine &model, int &result_fd,
+ SmallVectorImpl<char> &result_path) {
+ // Use result_path as temp storage.
+ result_path.set_size(0);
+ StringRef m = model.toStringRef(result_path);
+
+ SmallVector<wchar_t, 128> model_utf16;
+ if (error_code ec = UTF8ToUTF16(m, model_utf16)) return ec;
+
+ // Make model absolute by prepending a temp directory if it's not already.
+ bool absolute = path::is_absolute(m);
+
+ if (!absolute) {
+ SmallVector<wchar_t, 64> temp_dir;
+ if (error_code ec = TempDir(temp_dir)) return ec;
+ // Handle c: by removing it.
+ if (model_utf16.size() > 2 && model_utf16[1] == L':') {
+ model_utf16.erase(model_utf16.begin(), model_utf16.begin() + 2);
+ }
+ model_utf16.insert(model_utf16.begin(), temp_dir.begin(), temp_dir.end());
+ }
+
+ // Replace '%' with random chars. From here on, DO NOT modify model. It may be
+ // needed if the randomly chosen path already exists.
+ SmallVector<wchar_t, 128> random_path_utf16;
+
+ // Get a Crypto Provider for CryptGenRandom.
+ HCRYPTPROV HCPC;
+ if (!::CryptAcquireContextW(&HCPC,
+ NULL,
+ NULL,
+ PROV_RSA_FULL,
+ CRYPT_VERIFYCONTEXT))
+ return windows_error(::GetLastError());
+ ScopedCryptContext CryptoProvider(HCPC);
+
+retry_random_path:
+ random_path_utf16.set_size(0);
+ for (SmallVectorImpl<wchar_t>::const_iterator i = model_utf16.begin(),
+ e = model_utf16.end();
+ i != e; ++i) {
+ if (*i == L'%') {
+ BYTE val = 0;
+ if (!::CryptGenRandom(CryptoProvider, 1, &val))
+ return windows_error(::GetLastError());
+ random_path_utf16.push_back("0123456789abcdef"[val & 15]);
+ }
+ else
+ random_path_utf16.push_back(*i);
+ }
+ // Make random_path_utf16 null terminated.
+ random_path_utf16.push_back(0);
+ random_path_utf16.pop_back();
+
+ // Try to create + open the path.
+retry_create_file:
+ HANDLE TempFileHandle = ::CreateFileW(random_path_utf16.begin(),
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ,
+ NULL,
+ // Return ERROR_FILE_EXISTS if the file
+ // already exists.
+ CREATE_NEW,
+ FILE_ATTRIBUTE_TEMPORARY,
+ NULL);
+ if (TempFileHandle == INVALID_HANDLE_VALUE) {
+ // If the file existed, try again, otherwise, error.
+ error_code ec = windows_error(::GetLastError());
+ if (ec == windows_error::file_exists)
+ goto retry_random_path;
+ // Check for non-existing parent directories.
+ if (ec == windows_error::path_not_found) {
+ // Create the directories using result_path as temp storage.
+ if (error_code ec = UTF16ToUTF8(random_path_utf16.begin(),
+ random_path_utf16.size(), result_path))
+ return ec;
+ StringRef p(result_path.begin(), result_path.size());
+ SmallString<64> dir_to_create;
+ for (path::const_iterator i = path::begin(p),
+ e = --path::end(p); i != e; ++i) {
+ path::append(dir_to_create, *i);
+ bool Exists;
+ if (error_code ec = exists(Twine(dir_to_create), Exists)) return ec;
+ if (!Exists) {
+ // If c: doesn't exist, bail.
+ if (i->endswith(":"))
+ return ec;
+
+ SmallVector<wchar_t, 64> dir_to_create_utf16;
+ if (error_code ec = UTF8ToUTF16(dir_to_create, dir_to_create_utf16))
+ return ec;
+
+ // Create the directory.
+ if (!::CreateDirectoryW(dir_to_create_utf16.begin(), NULL))
+ return windows_error(::GetLastError());
+ }
+ }
+ goto retry_create_file;
+ }
+ return ec;
+ }
+
+ // Set result_path to the utf-8 representation of the path.
+ if (error_code ec = UTF16ToUTF8(random_path_utf16.begin(),
+ random_path_utf16.size(), result_path)) {
+ ::CloseHandle(TempFileHandle);
+ ::DeleteFileW(random_path_utf16.begin());
+ return ec;
+ }
+
+ // Convert the Windows API file handle into a C-runtime handle.
+ int fd = ::_open_osfhandle(intptr_t(TempFileHandle), 0);
+ if (fd == -1) {
+ ::CloseHandle(TempFileHandle);
+ ::DeleteFileW(random_path_utf16.begin());
+ // MSDN doesn't say anything about _open_osfhandle setting errno or
+ // GetLastError(), so just return invalid_handle.
+ return windows_error::invalid_handle;
+ }
+
+ result_fd = fd;
+ return success;
+}
+
+error_code get_magic(const Twine &path, uint32_t len,
+ SmallVectorImpl<char> &result) {
+ SmallString<128> path_storage;
+ SmallVector<wchar_t, 128> path_utf16;
+ result.set_size(0);
+
+ // Convert path to UTF-16.
+ if (error_code ec = UTF8ToUTF16(path.toStringRef(path_storage),
+ path_utf16))
+ return ec;
+
+ // Open file.
+ HANDLE file = ::CreateFileW(c_str(path_utf16),
+ GENERIC_READ,
+ FILE_SHARE_READ,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_READONLY,
+ NULL);
+ if (file == INVALID_HANDLE_VALUE)
+ return windows_error(::GetLastError());
+
+ // Allocate buffer.
+ result.reserve(len);
+
+ // Get magic!
+ DWORD bytes_read = 0;
+ BOOL read_success = ::ReadFile(file, result.data(), len, &bytes_read, NULL);
+ error_code ec = windows_error(::GetLastError());
+ ::CloseHandle(file);
+ if (!read_success || (bytes_read != len)) {
+ // Set result size to the number of bytes read if it's valid.
+ if (bytes_read >= 0 && bytes_read <= len)
+ result.set_size(bytes_read);
+ // ERROR_HANDLE_EOF is mapped to errc::value_too_large.
+ return ec;
+ }
+
+ result.set_size(len);
+ return success;
+}
+
+error_code directory_iterator_construct(directory_iterator &it, StringRef path){
+ SmallVector<wchar_t, 128> path_utf16;
+
+ if (error_code ec = UTF8ToUTF16(path,
+ path_utf16))
+ return ec;
+
+ // Convert path to the format that Windows is happy with.
+ if (path_utf16.size() > 0 &&
+ !is_separator(path_utf16[path.size() - 1]) &&
+ path_utf16[path.size() - 1] != L':') {
+ path_utf16.push_back(L'\\');
+ path_utf16.push_back(L'*');
+ } else {
+ path_utf16.push_back(L'*');
+ }
+
+ // Get the first directory entry.
+ WIN32_FIND_DATAW FirstFind;
+ ScopedFindHandle FindHandle(::FindFirstFileW(c_str(path_utf16), &FirstFind));
+ if (!FindHandle)
+ return windows_error(::GetLastError());
+
+ size_t FilenameLen = ::wcslen(FirstFind.cFileName);
+ while ((FilenameLen == 1 && FirstFind.cFileName[0] == L'.') ||
+ (FilenameLen == 2 && FirstFind.cFileName[0] == L'.' &&
+ FirstFind.cFileName[1] == L'.'))
+ if (!::FindNextFileW(FindHandle, &FirstFind)) {
+ error_code ec = windows_error(::GetLastError());
+ // Check for end.
+ if (ec == windows_error::no_more_files)
+ return directory_iterator_destruct(it);
+ return ec;
+ } else
+ FilenameLen = ::wcslen(FirstFind.cFileName);
+
+ // Construct the current directory entry.
+ SmallString<128> directory_entry_name_utf8;
+ if (error_code ec = UTF16ToUTF8(FirstFind.cFileName,
+ ::wcslen(FirstFind.cFileName),
+ directory_entry_name_utf8))
+ return ec;
+
+ it.IterationHandle = intptr_t(FindHandle.take());
+ SmallString<128> directory_entry_path(path);
+ path::append(directory_entry_path, directory_entry_name_utf8.str());
+ it.CurrentEntry = directory_entry(directory_entry_path.str());
+
+ return success;
+}
+
+error_code directory_iterator_destruct(directory_iterator& it) {
+ if (it.IterationHandle != 0)
+ // Closes the handle if it's valid.
+ ScopedFindHandle close(HANDLE(it.IterationHandle));
+ it.IterationHandle = 0;
+ it.CurrentEntry = directory_entry();
+ return success;
+}
+
+error_code directory_iterator_increment(directory_iterator& it) {
+ WIN32_FIND_DATAW FindData;
+ if (!::FindNextFileW(HANDLE(it.IterationHandle), &FindData)) {
+ error_code ec = windows_error(::GetLastError());
+ // Check for end.
+ if (ec == windows_error::no_more_files)
+ return directory_iterator_destruct(it);
+ return ec;
+ }
+
+ size_t FilenameLen = ::wcslen(FindData.cFileName);
+ if ((FilenameLen == 1 && FindData.cFileName[0] == L'.') ||
+ (FilenameLen == 2 && FindData.cFileName[0] == L'.' &&
+ FindData.cFileName[1] == L'.'))
+ return directory_iterator_increment(it);
+
+ SmallString<128> directory_entry_path_utf8;
+ if (error_code ec = UTF16ToUTF8(FindData.cFileName,
+ ::wcslen(FindData.cFileName),
+ directory_entry_path_utf8))
+ return ec;
+
+ it.CurrentEntry.replace_filename(Twine(directory_entry_path_utf8));
+ return success;
+}
+
+} // end namespace fs
+} // end namespace sys
+} // end namespace llvm
diff --git a/contrib/llvm/lib/System/Win32/Process.inc b/contrib/llvm/lib/Support/Windows/Process.inc
index feb0806..06a7f00 100644
--- a/contrib/llvm/lib/System/Win32/Process.inc
+++ b/contrib/llvm/lib/Support/Windows/Process.inc
@@ -1,17 +1,17 @@
//===- Win32/Process.cpp - Win32 Process Implementation ------- -*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file provides the Win32 specific implementation of the Process class.
//
//===----------------------------------------------------------------------===//
-#include "Win32.h"
+#include "Windows.h"
#include <psapi.h>
#include <malloc.h>
#include <io.h>
@@ -25,7 +25,7 @@
#endif
//===----------------------------------------------------------------------===//
-//=== WARNING: Implementation here must contain only Win32 specific code
+//=== WARNING: Implementation here must contain only Win32 specific code
//=== and must not be UNIX code
//===----------------------------------------------------------------------===//
@@ -51,13 +51,13 @@ inline unsigned GetPageSizeOnce() {
return static_cast<unsigned>(info.dwPageSize);
}
-unsigned
+unsigned
Process::GetPageSize() {
static const unsigned PageSize = GetPageSizeOnce();
return PageSize;
}
-size_t
+size_t
Process::GetMallocUsage()
{
_HEAPINFO hinfo;
@@ -86,7 +86,7 @@ Process::GetTimeUsage(
elapsed = TimeValue::now();
uint64_t ProcCreate, ProcExit, KernelTime, UserTime;
- GetProcessTimes(GetCurrentProcess(), (FILETIME*)&ProcCreate,
+ GetProcessTimes(GetCurrentProcess(), (FILETIME*)&ProcCreate,
(FILETIME*)&ProcExit, (FILETIME*)&KernelTime,
(FILETIME*)&UserTime);
@@ -132,7 +132,8 @@ bool Process::StandardErrIsDisplayed() {
}
bool Process::FileDescriptorIsDisplayed(int fd) {
- return GetFileType((HANDLE)_get_osfhandle(fd)) == FILE_TYPE_CHAR;
+ DWORD Mode; // Unused
+ return (GetConsoleMode((HANDLE)_get_osfhandle(fd), &Mode) != 0);
}
unsigned Process::StandardOutColumns() {
diff --git a/contrib/llvm/lib/System/Win32/Program.inc b/contrib/llvm/lib/Support/Windows/Program.inc
index 16bb28e..350363c 100644
--- a/contrib/llvm/lib/System/Win32/Program.inc
+++ b/contrib/llvm/lib/Support/Windows/Program.inc
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "Win32.h"
+#include "Windows.h"
#include <cstdio>
#include <malloc.h>
#include <io.h>
@@ -58,10 +58,12 @@ Program::FindProgramByName(const std::string& progName) {
Path temp;
if (!temp.set(progName)) // invalid name
return Path();
- if (temp.canExecute()) // already executable as is
+ // Return paths with slashes verbatim.
+ if (progName.find('\\') != std::string::npos ||
+ progName.find('/') != std::string::npos)
return temp;
- // At this point, the file name is valid and its not executable.
+ // At this point, the file name is valid and does not contain slashes.
// Let Windows search for it.
char buffer[MAX_PATH];
char *dummy = NULL;
@@ -123,19 +125,10 @@ static HANDLE RedirectIO(const Path *path, int fd, std::string* ErrMsg) {
return h;
}
-#ifdef __MINGW32__
- // Due to unknown reason, mingw32's w32api doesn't have this declaration.
- extern "C"
- BOOL WINAPI SetInformationJobObject(HANDLE hJob,
- JOBOBJECTINFOCLASS JobObjectInfoClass,
- LPVOID lpJobObjectInfo,
- DWORD cbJobObjectInfoLength);
-#endif
-
/// ArgNeedsQuotes - Check whether argument needs to be quoted when calling
/// CreateProcess.
static bool ArgNeedsQuotes(const char *Str) {
- return Str[0] == '\0' || strchr(Str, ' ') != 0;
+ return Str[0] == '\0' || strpbrk(Str, "\t \"&\'()*<>\\`^|") != 0;
}
@@ -337,7 +330,8 @@ Program::Execute(const Path& path,
}
int
-Program::Wait(unsigned secondsToWait,
+Program::Wait(const Path &path,
+ unsigned secondsToWait,
std::string* ErrMsg) {
if (Data_ == 0) {
MakeErrMsg(ErrMsg, "Process not started!");
diff --git a/contrib/llvm/lib/System/Win32/RWMutex.inc b/contrib/llvm/lib/Support/Windows/RWMutex.inc
index e269226..471f8fa 100644
--- a/contrib/llvm/lib/System/Win32/RWMutex.inc
+++ b/contrib/llvm/lib/Support/Windows/RWMutex.inc
@@ -1,10 +1,10 @@
-//= llvm/System/Win32/Mutex.inc - Win32 Reader/Writer Mutual Exclusion Lock =//
-//
+//= llvm/Support/Win32/Mutex.inc - Win32 Reader/Writer Mutual Exclusion Lock =//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file implements the Win32 specific (non-pthread) RWMutex class.
@@ -16,10 +16,10 @@
//=== is guaranteed to work on *all* Win32 variants.
//===----------------------------------------------------------------------===//
-#include "Win32.h"
+#include "Windows.h"
// FIXME: Windows does not have reader-writer locks pre-Vista. If you want
-// real reader-writer locks, you a pthreads implementation for Windows.
+// real reader-writer locks, you a threads implementation for Windows.
namespace llvm {
using namespace sys;
diff --git a/contrib/llvm/lib/System/Win32/Signals.inc b/contrib/llvm/lib/Support/Windows/Signals.inc
index 2498a26e..14f3f21 100644
--- a/contrib/llvm/lib/System/Win32/Signals.inc
+++ b/contrib/llvm/lib/Support/Windows/Signals.inc
@@ -1,17 +1,17 @@
//===- Win32/Signals.cpp - Win32 Signals Implementation ---------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file provides the Win32 specific implementation of the Signals class.
//
//===----------------------------------------------------------------------===//
-#include "Win32.h"
+#include "Windows.h"
#include <stdio.h>
#include <vector>
#include <algorithm>
@@ -43,9 +43,7 @@ static std::vector<llvm::sys::Path> *FilesToRemove = NULL;
static std::vector<std::pair<void(*)(void*), void*> > *CallBacksToRun = 0;
static bool RegisteredUnhandledExceptionFilter = false;
static bool CleanupExecuted = false;
-#ifdef _MSC_VER
static bool ExitOnUnhandledExceptions = false;
-#endif
static PTOP_LEVEL_EXCEPTION_FILTER OldFilter = NULL;
// Windows creates a new thread to execute the console handler when an event
@@ -56,7 +54,7 @@ static CRITICAL_SECTION CriticalSection;
namespace llvm {
//===----------------------------------------------------------------------===//
-//=== WARNING: Implementation here must contain only Win32 specific code
+//=== WARNING: Implementation here must contain only Win32 specific code
//=== and must not be UNIX code
//===----------------------------------------------------------------------===//
@@ -110,12 +108,15 @@ static void RegisterHandler() {
SetConsoleCtrlHandler(LLVMConsoleCtrlHandler, TRUE);
// Environment variable to disable any kind of crash dialog.
-#ifdef _MSC_VER
if (getenv("LLVM_DISABLE_CRT_DEBUG")) {
+#ifdef _MSC_VER
_CrtSetReportHook(CRTReportHook);
+#endif
+ SetErrorMode(SEM_FAILCRITICALERRORS |
+ SEM_NOGPFAULTERRORBOX |
+ SEM_NOOPENFILEERRORBOX);
ExitOnUnhandledExceptions = true;
}
-#endif
// IMPORTANT NOTE: Caller must call LeaveCriticalSection(&CriticalSection) or
// else multi-threading problems will ensue.
@@ -145,6 +146,8 @@ void sys::DontRemoveFileOnSignal(const sys::Path &Filename) {
if (FilesToRemove == NULL)
return;
+ RegisterHandler();
+
FilesToRemove->push_back(Filename);
std::vector<sys::Path>::reverse_iterator I =
std::find(FilesToRemove->rbegin(), FilesToRemove->rend(), Filename);
@@ -208,97 +211,91 @@ void llvm::sys::RunInterruptHandlers() {
}
static LONG WINAPI LLVMUnhandledExceptionFilter(LPEXCEPTION_POINTERS ep) {
- try {
- Cleanup();
-
+ Cleanup();
+
#ifdef _WIN64
// TODO: provide a x64 friendly version of the following
#else
-
- // Initialize the STACKFRAME structure.
- STACKFRAME StackFrame;
- memset(&StackFrame, 0, sizeof(StackFrame));
-
- StackFrame.AddrPC.Offset = ep->ContextRecord->Eip;
- StackFrame.AddrPC.Mode = AddrModeFlat;
- StackFrame.AddrStack.Offset = ep->ContextRecord->Esp;
- StackFrame.AddrStack.Mode = AddrModeFlat;
- StackFrame.AddrFrame.Offset = ep->ContextRecord->Ebp;
- StackFrame.AddrFrame.Mode = AddrModeFlat;
-
- HANDLE hProcess = GetCurrentProcess();
- HANDLE hThread = GetCurrentThread();
-
- // Initialize the symbol handler.
- SymSetOptions(SYMOPT_DEFERRED_LOADS|SYMOPT_LOAD_LINES);
- SymInitialize(hProcess, NULL, TRUE);
-
- while (true) {
- if (!StackWalk(IMAGE_FILE_MACHINE_I386, hProcess, hThread, &StackFrame,
- ep->ContextRecord, NULL, SymFunctionTableAccess,
- SymGetModuleBase, NULL)) {
- break;
- }
-
- if (StackFrame.AddrFrame.Offset == 0)
- break;
-
- // Print the PC in hexadecimal.
- DWORD PC = StackFrame.AddrPC.Offset;
- fprintf(stderr, "%08lX", PC);
-
- // Print the parameters. Assume there are four.
- fprintf(stderr, " (0x%08lX 0x%08lX 0x%08lX 0x%08lX)", StackFrame.Params[0],
- StackFrame.Params[1], StackFrame.Params[2], StackFrame.Params[3]);
-
- // Verify the PC belongs to a module in this process.
- if (!SymGetModuleBase(hProcess, PC)) {
- fputs(" <unknown module>\n", stderr);
- continue;
- }
-
- // Print the symbol name.
- char buffer[512];
- IMAGEHLP_SYMBOL *symbol = reinterpret_cast<IMAGEHLP_SYMBOL *>(buffer);
- memset(symbol, 0, sizeof(IMAGEHLP_SYMBOL));
- symbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL);
- symbol->MaxNameLength = 512 - sizeof(IMAGEHLP_SYMBOL);
-
- DWORD dwDisp;
- if (!SymGetSymFromAddr(hProcess, PC, &dwDisp, symbol)) {
- fputc('\n', stderr);
- continue;
- }
-
- buffer[511] = 0;
- if (dwDisp > 0)
- fprintf(stderr, ", %s()+%04lu bytes(s)", symbol->Name, dwDisp);
- else
- fprintf(stderr, ", %s", symbol->Name);
-
- // Print the source file and line number information.
- IMAGEHLP_LINE line;
- memset(&line, 0, sizeof(line));
- line.SizeOfStruct = sizeof(line);
- if (SymGetLineFromAddr(hProcess, PC, &dwDisp, &line)) {
- fprintf(stderr, ", %s, line %lu", line.FileName, line.LineNumber);
- if (dwDisp > 0)
- fprintf(stderr, "+%04lu byte(s)", dwDisp);
- }
+ // Initialize the STACKFRAME structure.
+ STACKFRAME StackFrame;
+ memset(&StackFrame, 0, sizeof(StackFrame));
+
+ StackFrame.AddrPC.Offset = ep->ContextRecord->Eip;
+ StackFrame.AddrPC.Mode = AddrModeFlat;
+ StackFrame.AddrStack.Offset = ep->ContextRecord->Esp;
+ StackFrame.AddrStack.Mode = AddrModeFlat;
+ StackFrame.AddrFrame.Offset = ep->ContextRecord->Ebp;
+ StackFrame.AddrFrame.Mode = AddrModeFlat;
+
+ HANDLE hProcess = GetCurrentProcess();
+ HANDLE hThread = GetCurrentThread();
+
+ // Initialize the symbol handler.
+ SymSetOptions(SYMOPT_DEFERRED_LOADS|SYMOPT_LOAD_LINES);
+ SymInitialize(hProcess, NULL, TRUE);
+
+ while (true) {
+ if (!StackWalk(IMAGE_FILE_MACHINE_I386, hProcess, hThread, &StackFrame,
+ ep->ContextRecord, NULL, SymFunctionTableAccess,
+ SymGetModuleBase, NULL)) {
+ break;
+ }
+
+ if (StackFrame.AddrFrame.Offset == 0)
+ break;
+
+ // Print the PC in hexadecimal.
+ DWORD PC = StackFrame.AddrPC.Offset;
+ fprintf(stderr, "%08lX", PC);
+
+ // Print the parameters. Assume there are four.
+ fprintf(stderr, " (0x%08lX 0x%08lX 0x%08lX 0x%08lX)",
+ StackFrame.Params[0],
+ StackFrame.Params[1], StackFrame.Params[2], StackFrame.Params[3]);
+
+ // Verify the PC belongs to a module in this process.
+ if (!SymGetModuleBase(hProcess, PC)) {
+ fputs(" <unknown module>\n", stderr);
+ continue;
+ }
+
+ // Print the symbol name.
+ char buffer[512];
+ IMAGEHLP_SYMBOL *symbol = reinterpret_cast<IMAGEHLP_SYMBOL *>(buffer);
+ memset(symbol, 0, sizeof(IMAGEHLP_SYMBOL));
+ symbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL);
+ symbol->MaxNameLength = 512 - sizeof(IMAGEHLP_SYMBOL);
+
+ DWORD dwDisp;
+ if (!SymGetSymFromAddr(hProcess, PC, &dwDisp, symbol)) {
fputc('\n', stderr);
+ continue;
}
-#endif
+ buffer[511] = 0;
+ if (dwDisp > 0)
+ fprintf(stderr, ", %s()+%04lu bytes(s)", symbol->Name, dwDisp);
+ else
+ fprintf(stderr, ", %s", symbol->Name);
+
+ // Print the source file and line number information.
+ IMAGEHLP_LINE line;
+ memset(&line, 0, sizeof(line));
+ line.SizeOfStruct = sizeof(line);
+ if (SymGetLineFromAddr(hProcess, PC, &dwDisp, &line)) {
+ fprintf(stderr, ", %s, line %lu", line.FileName, line.LineNumber);
+ if (dwDisp > 0)
+ fprintf(stderr, "+%04lu byte(s)", dwDisp);
+ }
- } catch (...) {
- assert(0 && "Crashed in LLVMUnhandledExceptionFilter");
+ fputc('\n', stderr);
}
-#ifdef _MSC_VER
+#endif
+
if (ExitOnUnhandledExceptions)
_exit(-3);
-#endif
// Allow dialog box to pop up allowing choice to start debugger.
if (OldFilter)
@@ -329,4 +326,3 @@ static BOOL WINAPI LLVMConsoleCtrlHandler(DWORD dwCtrlType) {
LeaveCriticalSection(&CriticalSection);
return FALSE;
}
-
diff --git a/contrib/llvm/lib/System/Win32/ThreadLocal.inc b/contrib/llvm/lib/Support/Windows/ThreadLocal.inc
index b8b933c..512462d 100644
--- a/contrib/llvm/lib/System/Win32/ThreadLocal.inc
+++ b/contrib/llvm/lib/Support/Windows/ThreadLocal.inc
@@ -1,10 +1,10 @@
-//= llvm/System/Win32/ThreadLocal.inc - Win32 Thread Local Data -*- C++ -*-===//
-//
+//= llvm/Support/Win32/ThreadLocal.inc - Win32 Thread Local Data -*- C++ -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file implements the Win32 specific (non-pthread) ThreadLocal class.
@@ -16,8 +16,8 @@
//=== is guaranteed to work on *all* Win32 variants.
//===----------------------------------------------------------------------===//
-#include "Win32.h"
-#include "llvm/System/ThreadLocal.h"
+#include "Windows.h"
+#include "llvm/Support/ThreadLocal.h"
namespace llvm {
using namespace sys;
@@ -44,6 +44,7 @@ void ThreadLocalImpl::setInstance(const void* d){
DWORD* tls = static_cast<DWORD*>(data);
int errorcode = TlsSetValue(*tls, const_cast<void*>(d));
assert(errorcode != 0);
+ (void)errorcode;
}
void ThreadLocalImpl::removeInstance() {
diff --git a/contrib/llvm/lib/System/Win32/TimeValue.inc b/contrib/llvm/lib/Support/Windows/TimeValue.inc
index e37f111..1227552 100644
--- a/contrib/llvm/lib/System/Win32/TimeValue.inc
+++ b/contrib/llvm/lib/Support/Windows/TimeValue.inc
@@ -1,17 +1,17 @@
//===- Win32/TimeValue.cpp - Win32 TimeValue Implementation -----*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file provides the Win32 implementation of the TimeValue class.
//
//===----------------------------------------------------------------------===//
-#include "Win32.h"
+#include "Windows.h"
#include <time.h>
namespace llvm {
diff --git a/contrib/llvm/lib/Support/Windows/Windows.h b/contrib/llvm/lib/Support/Windows/Windows.h
new file mode 100644
index 0000000..4a1553b
--- /dev/null
+++ b/contrib/llvm/lib/Support/Windows/Windows.h
@@ -0,0 +1,120 @@
+//===- Win32/Win32.h - Common Win32 Include File ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things specific to Win32 implementations.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+//=== WARNING: Implementation here must contain only generic Win32 code that
+//=== is guaranteed to work on *all* Win32 variants.
+//===----------------------------------------------------------------------===//
+
+// mingw-w64 tends to define it as 0x0502 in its headers.
+#undef _WIN32_WINNT
+
+// Require at least Windows 2000 API.
+#define _WIN32_WINNT 0x0500
+#define _WIN32_IE 0x0500 // MinGW at it again.
+#define WIN32_LEAN_AND_MEAN
+
+#include "llvm/Config/config.h" // Get build system configuration settings
+#include <windows.h>
+#include <shlobj.h>
+#include <cassert>
+#include <string>
+
+inline bool MakeErrMsg(std::string* ErrMsg, const std::string& prefix) {
+ if (!ErrMsg)
+ return true;
+ char *buffer = NULL;
+ FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER|FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, GetLastError(), 0, (LPSTR)&buffer, 1, NULL);
+ *ErrMsg = prefix + buffer;
+ LocalFree(buffer);
+ return true;
+}
+
+class AutoHandle {
+ HANDLE handle;
+
+public:
+ AutoHandle(HANDLE h) : handle(h) {}
+
+ ~AutoHandle() {
+ if (handle)
+ CloseHandle(handle);
+ }
+
+ operator HANDLE() {
+ return handle;
+ }
+
+ AutoHandle &operator=(HANDLE h) {
+ handle = h;
+ return *this;
+ }
+};
+
+template <class HandleType, uintptr_t InvalidHandle,
+ class DeleterType, DeleterType D>
+class ScopedHandle {
+ HandleType Handle;
+
+public:
+ ScopedHandle() : Handle(InvalidHandle) {}
+ ScopedHandle(HandleType handle) : Handle(handle) {}
+
+ ~ScopedHandle() {
+ if (Handle != HandleType(InvalidHandle))
+ D(Handle);
+ }
+
+ HandleType take() {
+ HandleType temp = Handle;
+ Handle = HandleType(InvalidHandle);
+ return temp;
+ }
+
+ operator HandleType() const { return Handle; }
+
+ ScopedHandle &operator=(HandleType handle) {
+ Handle = handle;
+ return *this;
+ }
+
+ typedef void (*unspecified_bool_type)();
+ static void unspecified_bool_true() {}
+
+ // True if Handle is valid.
+ operator unspecified_bool_type() const {
+ return Handle == HandleType(InvalidHandle) ? 0 : unspecified_bool_true;
+ }
+
+ bool operator!() const {
+ return Handle == HandleType(InvalidHandle);
+ }
+};
+
+typedef ScopedHandle<HANDLE, uintptr_t(-1),
+ BOOL (WINAPI*)(HANDLE), ::FindClose>
+ ScopedFindHandle;
+
+namespace llvm {
+template <class T>
+class SmallVectorImpl;
+
+template <class T>
+typename SmallVectorImpl<T>::const_pointer
+c_str(SmallVectorImpl<T> &str) {
+ str.push_back(0);
+ str.pop_back();
+ return str.data();
+}
+} // end namespace llvm.
diff --git a/contrib/llvm/lib/Support/Windows/explicit_symbols.inc b/contrib/llvm/lib/Support/Windows/explicit_symbols.inc
new file mode 100644
index 0000000..84862d6
--- /dev/null
+++ b/contrib/llvm/lib/Support/Windows/explicit_symbols.inc
@@ -0,0 +1,66 @@
+/* in libgcc.a */
+
+#ifdef HAVE__ALLOCA
+ EXPLICIT_SYMBOL(_alloca)
+ EXPLICIT_SYMBOL2(alloca, _alloca);
+#endif
+#ifdef HAVE___ALLOCA
+ EXPLICIT_SYMBOL(__alloca)
+#endif
+#ifdef HAVE___CHKSTK
+ EXPLICIT_SYMBOL(__chkstk)
+#endif
+#ifdef HAVE____CHKSTK
+ EXPLICIT_SYMBOL(___chkstk)
+#endif
+#ifdef HAVE___MAIN
+ EXPLICIT_SYMBOL(__main) // FIXME: Don't call it.
+#endif
+
+#ifdef HAVE___ASHLDI3
+ EXPLICIT_SYMBOL(__ashldi3)
+#endif
+#ifdef HAVE___ASHRDI3
+ EXPLICIT_SYMBOL(__ashrdi3)
+#endif
+#ifdef HAVE___CMPDI2 // FIXME: unused
+ EXPLICIT_SYMBOL(__cmpdi2)
+#endif
+#ifdef HAVE___DIVDI3
+ EXPLICIT_SYMBOL(__divdi3)
+#endif
+#ifdef HAVE___FIXDFDI
+ EXPLICIT_SYMBOL(__fixdfdi)
+#endif
+#ifdef HAVE___FIXSFDI
+ EXPLICIT_SYMBOL(__fixsfdi)
+#endif
+#ifdef HAVE___FIXUNSDFDI
+ EXPLICIT_SYMBOL(__fixunsdfdi)
+#endif
+#ifdef HAVE___FIXUNSSFDI
+ EXPLICIT_SYMBOL(__fixunssfdi)
+#endif
+#ifdef HAVE___FLOATDIDF
+ EXPLICIT_SYMBOL(__floatdidf)
+#endif
+#ifdef HAVE___FLOATDISF
+ EXPLICIT_SYMBOL(__floatdisf)
+#endif
+#ifdef HAVE___LSHRDI3
+ EXPLICIT_SYMBOL(__lshrdi3)
+#endif
+#ifdef HAVE___MODDI3
+ EXPLICIT_SYMBOL(__moddi3)
+#endif
+#ifdef HAVE___UDIVDI3
+ EXPLICIT_SYMBOL(__udivdi3)
+#endif
+#ifdef HAVE___UMODDI3
+ EXPLICIT_SYMBOL(__umoddi3)
+#endif
+
+/* msvcrt */
+#if defined(_MSC_VER)
+ EXPLICIT_SYMBOL2(alloca, _alloca_probe);
+#endif
diff --git a/contrib/llvm/lib/Support/Windows/system_error.inc b/contrib/llvm/lib/Support/Windows/system_error.inc
new file mode 100644
index 0000000..37ec81d
--- /dev/null
+++ b/contrib/llvm/lib/Support/Windows/system_error.inc
@@ -0,0 +1,142 @@
+//===- llvm/Support/Win32/system_error.inc - Windows error_code --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the Windows specific implementation of the error_code
+// and error_condition classes.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+//=== WARNING: Implementation here must contain only generic Windows code that
+//=== is guaranteed to work on *all* Windows variants.
+//===----------------------------------------------------------------------===//
+
+#include <windows.h>
+#include <winerror.h>
+
+using namespace llvm;
+
+std::string
+_system_error_category::message(int ev) const {
+ LPVOID lpMsgBuf = 0;
+ DWORD retval = ::FormatMessageA(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ ev,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
+ (LPSTR) &lpMsgBuf,
+ 0,
+ NULL);
+ if (retval == 0) {
+ ::LocalFree(lpMsgBuf);
+ return std::string("Unknown error");
+ }
+
+ std::string str( static_cast<LPCSTR>(lpMsgBuf) );
+ ::LocalFree(lpMsgBuf);
+
+ while (str.size()
+ && (str[str.size()-1] == '\n' || str[str.size()-1] == '\r'))
+ str.erase( str.size()-1 );
+ if (str.size() && str[str.size()-1] == '.')
+ str.erase( str.size()-1 );
+ return str;
+}
+
+// I'd rather not double the line count of the following.
+#define MAP_ERR_TO_COND(x, y) case x: return make_error_condition(errc::y)
+
+error_condition
+_system_error_category::default_error_condition(int ev) const {
+ switch (ev) {
+ MAP_ERR_TO_COND(0, success);
+ // Windows system -> posix_errno decode table ---------------------------//
+ // see WinError.h comments for descriptions of errors
+ MAP_ERR_TO_COND(ERROR_ACCESS_DENIED, permission_denied);
+ MAP_ERR_TO_COND(ERROR_ALREADY_EXISTS, file_exists);
+ MAP_ERR_TO_COND(ERROR_BAD_UNIT, no_such_device);
+ MAP_ERR_TO_COND(ERROR_BUFFER_OVERFLOW, filename_too_long);
+ MAP_ERR_TO_COND(ERROR_BUSY, device_or_resource_busy);
+ MAP_ERR_TO_COND(ERROR_BUSY_DRIVE, device_or_resource_busy);
+ MAP_ERR_TO_COND(ERROR_CANNOT_MAKE, permission_denied);
+ MAP_ERR_TO_COND(ERROR_CANTOPEN, io_error);
+ MAP_ERR_TO_COND(ERROR_CANTREAD, io_error);
+ MAP_ERR_TO_COND(ERROR_CANTWRITE, io_error);
+ MAP_ERR_TO_COND(ERROR_CURRENT_DIRECTORY, permission_denied);
+ MAP_ERR_TO_COND(ERROR_DEV_NOT_EXIST, no_such_device);
+ MAP_ERR_TO_COND(ERROR_DEVICE_IN_USE, device_or_resource_busy);
+ MAP_ERR_TO_COND(ERROR_DIR_NOT_EMPTY, directory_not_empty);
+ MAP_ERR_TO_COND(ERROR_DIRECTORY, invalid_argument);
+ MAP_ERR_TO_COND(ERROR_DISK_FULL, no_space_on_device);
+ MAP_ERR_TO_COND(ERROR_FILE_EXISTS, file_exists);
+ MAP_ERR_TO_COND(ERROR_FILE_NOT_FOUND, no_such_file_or_directory);
+ MAP_ERR_TO_COND(ERROR_HANDLE_DISK_FULL, no_space_on_device);
+ MAP_ERR_TO_COND(ERROR_HANDLE_EOF, value_too_large);
+ MAP_ERR_TO_COND(ERROR_INVALID_ACCESS, permission_denied);
+ MAP_ERR_TO_COND(ERROR_INVALID_DRIVE, no_such_device);
+ MAP_ERR_TO_COND(ERROR_INVALID_FUNCTION, function_not_supported);
+ MAP_ERR_TO_COND(ERROR_INVALID_HANDLE, invalid_argument);
+ MAP_ERR_TO_COND(ERROR_INVALID_NAME, invalid_argument);
+ MAP_ERR_TO_COND(ERROR_LOCK_VIOLATION, no_lock_available);
+ MAP_ERR_TO_COND(ERROR_LOCKED, no_lock_available);
+ MAP_ERR_TO_COND(ERROR_NEGATIVE_SEEK, invalid_argument);
+ MAP_ERR_TO_COND(ERROR_NOACCESS, permission_denied);
+ MAP_ERR_TO_COND(ERROR_NOT_ENOUGH_MEMORY, not_enough_memory);
+ MAP_ERR_TO_COND(ERROR_NOT_READY, resource_unavailable_try_again);
+ MAP_ERR_TO_COND(ERROR_NOT_SAME_DEVICE, cross_device_link);
+ MAP_ERR_TO_COND(ERROR_OPEN_FAILED, io_error);
+ MAP_ERR_TO_COND(ERROR_OPEN_FILES, device_or_resource_busy);
+ MAP_ERR_TO_COND(ERROR_OPERATION_ABORTED, operation_canceled);
+ MAP_ERR_TO_COND(ERROR_OUTOFMEMORY, not_enough_memory);
+ MAP_ERR_TO_COND(ERROR_PATH_NOT_FOUND, no_such_file_or_directory);
+ MAP_ERR_TO_COND(ERROR_BAD_NETPATH, no_such_file_or_directory);
+ MAP_ERR_TO_COND(ERROR_READ_FAULT, io_error);
+ MAP_ERR_TO_COND(ERROR_RETRY, resource_unavailable_try_again);
+ MAP_ERR_TO_COND(ERROR_SEEK, io_error);
+ MAP_ERR_TO_COND(ERROR_SHARING_VIOLATION, permission_denied);
+ MAP_ERR_TO_COND(ERROR_TOO_MANY_OPEN_FILES, too_many_files_open);
+ MAP_ERR_TO_COND(ERROR_WRITE_FAULT, io_error);
+ MAP_ERR_TO_COND(ERROR_WRITE_PROTECT, permission_denied);
+ MAP_ERR_TO_COND(ERROR_SEM_TIMEOUT, timed_out);
+ MAP_ERR_TO_COND(WSAEACCES, permission_denied);
+ MAP_ERR_TO_COND(WSAEADDRINUSE, address_in_use);
+ MAP_ERR_TO_COND(WSAEADDRNOTAVAIL, address_not_available);
+ MAP_ERR_TO_COND(WSAEAFNOSUPPORT, address_family_not_supported);
+ MAP_ERR_TO_COND(WSAEALREADY, connection_already_in_progress);
+ MAP_ERR_TO_COND(WSAEBADF, bad_file_descriptor);
+ MAP_ERR_TO_COND(WSAECONNABORTED, connection_aborted);
+ MAP_ERR_TO_COND(WSAECONNREFUSED, connection_refused);
+ MAP_ERR_TO_COND(WSAECONNRESET, connection_reset);
+ MAP_ERR_TO_COND(WSAEDESTADDRREQ, destination_address_required);
+ MAP_ERR_TO_COND(WSAEFAULT, bad_address);
+ MAP_ERR_TO_COND(WSAEHOSTUNREACH, host_unreachable);
+ MAP_ERR_TO_COND(WSAEINPROGRESS, operation_in_progress);
+ MAP_ERR_TO_COND(WSAEINTR, interrupted);
+ MAP_ERR_TO_COND(WSAEINVAL, invalid_argument);
+ MAP_ERR_TO_COND(WSAEISCONN, already_connected);
+ MAP_ERR_TO_COND(WSAEMFILE, too_many_files_open);
+ MAP_ERR_TO_COND(WSAEMSGSIZE, message_size);
+ MAP_ERR_TO_COND(WSAENAMETOOLONG, filename_too_long);
+ MAP_ERR_TO_COND(WSAENETDOWN, network_down);
+ MAP_ERR_TO_COND(WSAENETRESET, network_reset);
+ MAP_ERR_TO_COND(WSAENETUNREACH, network_unreachable);
+ MAP_ERR_TO_COND(WSAENOBUFS, no_buffer_space);
+ MAP_ERR_TO_COND(WSAENOPROTOOPT, no_protocol_option);
+ MAP_ERR_TO_COND(WSAENOTCONN, not_connected);
+ MAP_ERR_TO_COND(WSAENOTSOCK, not_a_socket);
+ MAP_ERR_TO_COND(WSAEOPNOTSUPP, operation_not_supported);
+ MAP_ERR_TO_COND(WSAEPROTONOSUPPORT, protocol_not_supported);
+ MAP_ERR_TO_COND(WSAEPROTOTYPE, wrong_protocol_type);
+ MAP_ERR_TO_COND(WSAETIMEDOUT, timed_out);
+ MAP_ERR_TO_COND(WSAEWOULDBLOCK, operation_would_block);
+ default: return error_condition(ev, system_category());
+ }
+}
diff --git a/contrib/llvm/lib/Support/raw_ostream.cpp b/contrib/llvm/lib/Support/raw_ostream.cpp
index dba46df..80ea740 100644
--- a/contrib/llvm/lib/Support/raw_ostream.cpp
+++ b/contrib/llvm/lib/Support/raw_ostream.cpp
@@ -13,13 +13,13 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Format.h"
-#include "llvm/System/Program.h"
-#include "llvm/System/Process.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Process.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Config/config.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/Signals.h"
#include "llvm/ADT/STLExtras.h"
#include <cctype>
#include <cerrno>
@@ -32,6 +32,13 @@
#if defined(HAVE_FCNTL_H)
# include <fcntl.h>
#endif
+#if defined(HAVE_SYS_UIO_H) && defined(HAVE_WRITEV)
+# include <sys/uio.h>
+#endif
+
+#if defined(__CYGWIN__)
+#include <io.h>
+#endif
#if defined(_MSC_VER)
#include <io.h>
@@ -164,7 +171,8 @@ raw_ostream &raw_ostream::write_hex(unsigned long long N) {
return write(CurPtr, EndPtr-CurPtr);
}
-raw_ostream &raw_ostream::write_escaped(StringRef Str) {
+raw_ostream &raw_ostream::write_escaped(StringRef Str,
+ bool UseHexEscapes) {
for (unsigned i = 0, e = Str.size(); i != e; ++i) {
unsigned char c = Str[i];
@@ -187,11 +195,18 @@ raw_ostream &raw_ostream::write_escaped(StringRef Str) {
break;
}
- // Always expand to a 3-character octal escape.
- *this << '\\';
- *this << char('0' + ((c >> 6) & 7));
- *this << char('0' + ((c >> 3) & 7));
- *this << char('0' + ((c >> 0) & 7));
+ // Write out the escaped representation.
+ if (UseHexEscapes) {
+ *this << '\\' << 'x';
+ *this << hexdigit((c >> 4 & 0xF));
+ *this << hexdigit((c >> 0) & 0xF);
+ } else {
+ // Always use a full 3-character octal escape.
+ *this << '\\';
+ *this << char('0' + ((c >> 6) & 7));
+ *this << char('0' + ((c >> 3) & 7));
+ *this << char('0' + ((c >> 0) & 7));
+ }
}
}
@@ -363,7 +378,9 @@ void format_object_base::home() {
/// stream should be immediately destroyed; the string will be empty
/// if no error occurred.
raw_fd_ostream::raw_fd_ostream(const char *Filename, std::string &ErrorInfo,
- unsigned Flags) : Error(false), pos(0) {
+ unsigned Flags)
+ : Error(false), UseAtomicWrites(false), pos(0)
+{
assert(Filename != 0 && "Filename is null");
// Verify that we don't have both "append" and "excl".
assert((!(Flags & F_Excl) || !(Flags & F_Append)) &&
@@ -410,6 +427,26 @@ raw_fd_ostream::raw_fd_ostream(const char *Filename, std::string &ErrorInfo,
ShouldClose = true;
}
+/// raw_fd_ostream ctor - FD is the file descriptor that this writes to. If
+/// ShouldClose is true, this closes the file when the stream is destroyed.
+raw_fd_ostream::raw_fd_ostream(int fd, bool shouldClose, bool unbuffered)
+ : raw_ostream(unbuffered), FD(fd),
+ ShouldClose(shouldClose), Error(false), UseAtomicWrites(false) {
+#ifdef O_BINARY
+ // Setting STDOUT and STDERR to binary mode is necessary in Win32
+ // to avoid undesirable linefeed conversion.
+ if (fd == STDOUT_FILENO || fd == STDERR_FILENO)
+ setmode(fd, O_BINARY);
+#endif
+
+ // Get the starting position.
+ off_t loc = ::lseek(FD, 0, SEEK_CUR);
+ if (loc == (off_t)-1)
+ pos = 0;
+ else
+ pos = static_cast<uint64_t>(loc);
+}
+
raw_fd_ostream::~raw_fd_ostream() {
if (FD >= 0) {
flush();
@@ -435,7 +472,20 @@ void raw_fd_ostream::write_impl(const char *Ptr, size_t Size) {
pos += Size;
do {
- ssize_t ret = ::write(FD, Ptr, Size);
+ ssize_t ret;
+
+ // Check whether we should attempt to use atomic writes.
+ if (BUILTIN_EXPECT(!UseAtomicWrites, true)) {
+ ret = ::write(FD, Ptr, Size);
+ } else {
+ // Use ::writev() where available.
+#if defined(HAVE_WRITEV)
+ struct iovec IOV = { (void*) Ptr, Size };
+ ret = ::writev(FD, &IOV, 1);
+#else
+ ret = ::write(FD, Ptr, Size);
+#endif
+ }
if (ret < 0) {
// If it's a recoverable error, swallow it and retry the write.
@@ -665,34 +715,3 @@ void raw_null_ostream::write_impl(const char *Ptr, size_t Size) {
uint64_t raw_null_ostream::current_pos() const {
return 0;
}
-
-//===----------------------------------------------------------------------===//
-// tool_output_file
-//===----------------------------------------------------------------------===//
-
-tool_output_file::CleanupInstaller::CleanupInstaller(const char *filename)
- : Filename(filename), Keep(false) {
- // Arrange for the file to be deleted if the process is killed.
- if (Filename != "-")
- sys::RemoveFileOnSignal(sys::Path(Filename));
-}
-
-tool_output_file::CleanupInstaller::~CleanupInstaller() {
- // Delete the file if the client hasn't told us not to.
- if (!Keep && Filename != "-")
- sys::Path(Filename).eraseFromDisk();
-
- // Ok, the file is successfully written and closed, or deleted. There's no
- // further need to clean it up on signals.
- if (Filename != "-")
- sys::DontRemoveFileOnSignal(sys::Path(Filename));
-}
-
-tool_output_file::tool_output_file(const char *filename, std::string &ErrorInfo,
- unsigned Flags)
- : Installer(filename),
- OS(filename, ErrorInfo, Flags) {
- // If open fails, no cleanup is needed.
- if (!ErrorInfo.empty())
- Installer.Keep = true;
-}
diff --git a/contrib/llvm/lib/Support/regexec.c b/contrib/llvm/lib/Support/regexec.c
index 41fb2ea..0078616 100644
--- a/contrib/llvm/lib/Support/regexec.c
+++ b/contrib/llvm/lib/Support/regexec.c
@@ -54,8 +54,9 @@
#include "regex2.h"
/* macros for manipulating states, small version */
-#define states long
-#define states1 states /* for later use in llvm_regexec() decision */
+/* FIXME: 'states' is assumed as 'long' on small version. */
+#define states1 long /* for later use in llvm_regexec() decision */
+#define states states1
#define CLEAR(v) ((v) = 0)
#define SET0(v, n) ((v) &= ~((unsigned long)1 << (n)))
#define SET1(v, n) ((v) |= (unsigned long)1 << (n))
diff --git a/contrib/llvm/lib/Support/system_error.cpp b/contrib/llvm/lib/Support/system_error.cpp
new file mode 100644
index 0000000..56898de
--- /dev/null
+++ b/contrib/llvm/lib/Support/system_error.cpp
@@ -0,0 +1,130 @@
+//===---------------------- system_error.cpp ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This was lifted from libc++ and modified for C++03.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/system_error.h"
+#include "llvm/Support/Errno.h"
+#include <string>
+#include <cstring>
+
+namespace llvm {
+
+// class error_category
+
+error_category::error_category() {
+}
+
+error_category::~error_category() {
+}
+
+error_condition
+error_category::default_error_condition(int ev) const {
+ return error_condition(ev, *this);
+}
+
+bool
+error_category::equivalent(int code, const error_condition& condition) const {
+ return default_error_condition(code) == condition;
+}
+
+bool
+error_category::equivalent(const error_code& code, int condition) const {
+ return *this == code.category() && code.value() == condition;
+}
+
+std::string
+_do_message::message(int ev) const {
+ return std::string(sys::StrError(ev));
+}
+
+class _generic_error_category : public _do_message {
+public:
+ virtual const char* name() const;
+ virtual std::string message(int ev) const;
+};
+
+const char*
+_generic_error_category::name() const {
+ return "generic";
+}
+
+std::string
+_generic_error_category::message(int ev) const {
+#ifdef ELAST
+ if (ev > ELAST)
+ return std::string("unspecified generic_category error");
+#endif // ELAST
+ return _do_message::message(ev);
+}
+
+const error_category&
+generic_category() {
+ static _generic_error_category s;
+ return s;
+}
+
+class _system_error_category : public _do_message {
+public:
+ virtual const char* name() const;
+ virtual std::string message(int ev) const;
+ virtual error_condition default_error_condition(int ev) const;
+};
+
+const char*
+_system_error_category::name() const {
+ return "system";
+}
+
+// std::string _system_error_category::message(int ev) const {
+// Is in Platform/system_error.inc
+
+// error_condition _system_error_category::default_error_condition(int ev) const
+// Is in Platform/system_error.inc
+
+const error_category&
+system_category() {
+ static _system_error_category s;
+ return s;
+}
+
+const error_category&
+posix_category() {
+#ifdef LLVM_ON_WIN32
+ return generic_category();
+#else
+ return system_category();
+#endif
+}
+
+// error_condition
+
+std::string
+error_condition::message() const {
+ return _cat_->message(_val_);
+}
+
+// error_code
+
+std::string
+error_code::message() const {
+ return _cat_->message(_val_);
+}
+
+} // end namespace llvm
+
+// Include the truly platform-specific parts of this class.
+#if defined(LLVM_ON_UNIX)
+#include "Unix/system_error.inc"
+#endif
+#if defined(LLVM_ON_WIN32)
+#include "Windows/system_error.inc"
+#endif
diff --git a/contrib/llvm/lib/System/Alarm.cpp b/contrib/llvm/lib/System/Alarm.cpp
deleted file mode 100644
index 0014ca7..0000000
--- a/contrib/llvm/lib/System/Alarm.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//===- Alarm.cpp - Alarm Generation Support ---------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the Alarm functionality
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/System/Alarm.h"
-#include "llvm/Config/config.h"
-
-namespace llvm {
-using namespace sys;
-
-//===----------------------------------------------------------------------===//
-//=== WARNING: Implementation here must contain only TRULY operating system
-//=== independent code.
-//===----------------------------------------------------------------------===//
-
-}
-
-// Include the platform-specific parts of this class.
-#ifdef LLVM_ON_UNIX
-#include "Unix/Alarm.inc"
-#endif
-#ifdef LLVM_ON_WIN32
-#include "Win32/Alarm.inc"
-#endif
diff --git a/contrib/llvm/lib/System/Unix/Alarm.inc b/contrib/llvm/lib/System/Unix/Alarm.inc
deleted file mode 100644
index fb42b6c..0000000
--- a/contrib/llvm/lib/System/Unix/Alarm.inc
+++ /dev/null
@@ -1,72 +0,0 @@
-//===-- Alarm.inc - Implement Unix Alarm Support ----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the UNIX Alarm support.
-//
-//===----------------------------------------------------------------------===//
-
-#include <signal.h>
-#include <unistd.h>
-#include <cassert>
-using namespace llvm;
-
-/// AlarmCancelled - This flag is set by the SIGINT signal handler if the
-/// user presses CTRL-C.
-static volatile bool AlarmCancelled = false;
-
-/// AlarmTriggered - This flag is set by the SIGALRM signal handler if the
-/// alarm was triggered.
-static volatile bool AlarmTriggered = false;
-
-/// NestedSOI - Sanity check. Alarms cannot be nested or run in parallel.
-/// This ensures that they never do.
-static bool NestedSOI = false;
-
-static RETSIGTYPE SigIntHandler(int Sig) {
- AlarmCancelled = true;
- signal(SIGINT, SigIntHandler);
-}
-
-static RETSIGTYPE SigAlarmHandler(int Sig) {
- AlarmTriggered = true;
-}
-
-static void (*OldSigIntHandler) (int);
-
-void sys::SetupAlarm(unsigned seconds) {
- assert(!NestedSOI && "sys::SetupAlarm calls cannot be nested!");
- NestedSOI = true;
- AlarmCancelled = false;
- AlarmTriggered = false;
- ::signal(SIGALRM, SigAlarmHandler);
- OldSigIntHandler = ::signal(SIGINT, SigIntHandler);
- ::alarm(seconds);
-}
-
-void sys::TerminateAlarm() {
- assert(NestedSOI && "sys::TerminateAlarm called without sys::SetupAlarm!");
- ::alarm(0);
- ::signal(SIGALRM, SIG_DFL);
- ::signal(SIGINT, OldSigIntHandler);
- AlarmCancelled = false;
- AlarmTriggered = false;
- NestedSOI = false;
-}
-
-int sys::AlarmStatus() {
- if (AlarmCancelled)
- return -1;
- if (AlarmTriggered)
- return 1;
- return 0;
-}
-
-void sys::Sleep(unsigned n) {
- ::sleep(n);
-}
diff --git a/contrib/llvm/lib/System/Win32/Alarm.inc b/contrib/llvm/lib/System/Win32/Alarm.inc
deleted file mode 100644
index e0d00a0..0000000
--- a/contrib/llvm/lib/System/Win32/Alarm.inc
+++ /dev/null
@@ -1,43 +0,0 @@
-//===-- Alarm.inc - Implement Win32 Alarm Support ---------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the Win32 Alarm support.
-//
-//===----------------------------------------------------------------------===//
-
-#include <cassert>
-using namespace llvm;
-
-/// NestedSOI - Sanity check. Alarms cannot be nested or run in parallel.
-/// This ensures that they never do.
-static bool NestedSOI = false;
-
-void sys::SetupAlarm(unsigned seconds) {
- assert(!NestedSOI && "sys::SetupAlarm calls cannot be nested!");
- NestedSOI = true;
- // FIXME: Implement for Win32
-}
-
-void sys::TerminateAlarm() {
- assert(NestedSOI && "sys::TerminateAlarm called without sys::SetupAlarm!");
- // FIXME: Implement for Win32
- NestedSOI = false;
-}
-
-int sys::AlarmStatus() {
- // FIXME: Implement for Win32
- return 0;
-}
-
-// Don't pull in all of the Windows headers.
-extern "C" void __stdcall Sleep(unsigned long);
-
-void sys::Sleep(unsigned n) {
- ::Sleep(n*1000);
-}
diff --git a/contrib/llvm/lib/System/Win32/Win32.h b/contrib/llvm/lib/System/Win32/Win32.h
deleted file mode 100644
index 8f505b1..0000000
--- a/contrib/llvm/lib/System/Win32/Win32.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//===- Win32/Win32.h - Common Win32 Include File ----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines things specific to Win32 implementations.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-//=== WARNING: Implementation here must contain only generic Win32 code that
-//=== is guaranteed to work on *all* Win32 variants.
-//===----------------------------------------------------------------------===//
-
-// Require at least Windows 2000 API.
-#define _WIN32_WINNT 0x0500
-
-#include "llvm/Config/config.h" // Get autoconf configuration settings
-#include "windows.h"
-#include <cassert>
-#include <string>
-
-inline bool MakeErrMsg(std::string* ErrMsg, const std::string& prefix) {
- if (!ErrMsg)
- return true;
- char *buffer = NULL;
- FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER|FORMAT_MESSAGE_FROM_SYSTEM,
- NULL, GetLastError(), 0, (LPSTR)&buffer, 1, NULL);
- *ErrMsg = prefix + buffer;
- LocalFree(buffer);
- return true;
-}
-
-class AutoHandle {
- HANDLE handle;
-
-public:
- AutoHandle(HANDLE h) : handle(h) {}
-
- ~AutoHandle() {
- if (handle)
- CloseHandle(handle);
- }
-
- operator HANDLE() {
- return handle;
- }
-
- AutoHandle &operator=(HANDLE h) {
- handle = h;
- return *this;
- }
-};
diff --git a/contrib/llvm/lib/Target/ARM/ARM.h b/contrib/llvm/lib/Target/ARM/ARM.h
index 271ca44..4679f74 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.h
+++ b/contrib/llvm/lib/Target/ARM/ARM.h
@@ -15,6 +15,7 @@
#ifndef TARGET_ARM_H
#define TARGET_ARM_H
+#include "ARMBaseInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetMachine.h"
#include <cassert>
@@ -25,97 +26,17 @@ class ARMBaseTargetMachine;
class FunctionPass;
class JITCodeEmitter;
class formatted_raw_ostream;
+class MCCodeEmitter;
+class TargetAsmBackend;
+class MachineInstr;
+class ARMAsmPrinter;
+class MCInst;
-// Enums corresponding to ARM condition codes
-namespace ARMCC {
- // The CondCodes constants map directly to the 4-bit encoding of the
- // condition field for predicated instructions.
- enum CondCodes { // Meaning (integer) Meaning (floating-point)
- EQ, // Equal Equal
- NE, // Not equal Not equal, or unordered
- HS, // Carry set >, ==, or unordered
- LO, // Carry clear Less than
- MI, // Minus, negative Less than
- PL, // Plus, positive or zero >, ==, or unordered
- VS, // Overflow Unordered
- VC, // No overflow Not unordered
- HI, // Unsigned higher Greater than, or unordered
- LS, // Unsigned lower or same Less than or equal
- GE, // Greater than or equal Greater than or equal
- LT, // Less than Less than, or unordered
- GT, // Greater than Greater than
- LE, // Less than or equal <, ==, or unordered
- AL // Always (unconditional) Always (unconditional)
- };
+MCCodeEmitter *createARMMCCodeEmitter(const Target &,
+ TargetMachine &TM,
+ MCContext &Ctx);
- inline static CondCodes getOppositeCondition(CondCodes CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case EQ: return NE;
- case NE: return EQ;
- case HS: return LO;
- case LO: return HS;
- case MI: return PL;
- case PL: return MI;
- case VS: return VC;
- case VC: return VS;
- case HI: return LS;
- case LS: return HI;
- case GE: return LT;
- case LT: return GE;
- case GT: return LE;
- case LE: return GT;
- }
- }
-} // namespace ARMCC
-
-inline static const char *ARMCondCodeToString(ARMCC::CondCodes CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case ARMCC::EQ: return "eq";
- case ARMCC::NE: return "ne";
- case ARMCC::HS: return "hs";
- case ARMCC::LO: return "lo";
- case ARMCC::MI: return "mi";
- case ARMCC::PL: return "pl";
- case ARMCC::VS: return "vs";
- case ARMCC::VC: return "vc";
- case ARMCC::HI: return "hi";
- case ARMCC::LS: return "ls";
- case ARMCC::GE: return "ge";
- case ARMCC::LT: return "lt";
- case ARMCC::GT: return "gt";
- case ARMCC::LE: return "le";
- case ARMCC::AL: return "al";
- }
-}
-
-namespace ARM_MB {
- // The Memory Barrier Option constants map directly to the 4-bit encoding of
- // the option field for memory barrier operations.
- enum MemBOpt {
- ST = 14,
- ISH = 11,
- ISHST = 10,
- NSH = 7,
- NSHST = 6,
- OSH = 3,
- OSHST = 2
- };
-
- inline static const char *MemBOptToString(unsigned val) {
- switch (val) {
- default: llvm_unreachable("Unknown memory opetion");
- case ST: return "st";
- case ISH: return "ish";
- case ISHST: return "ishst";
- case NSH: return "nsh";
- case NSHST: return "nshst";
- case OSH: return "osh";
- case OSHST: return "oshst";
- }
- }
-} // namespace ARM_MB
+TargetAsmBackend *createARMAsmBackend(const Target &, const std::string &);
FunctionPass *createARMISelDag(ARMBaseTargetMachine &TM,
CodeGenOpt::Level OptLevel);
@@ -127,23 +48,16 @@ FunctionPass *createARMLoadStoreOptimizationPass(bool PreAlloc = false);
FunctionPass *createARMExpandPseudoPass();
FunctionPass *createARMGlobalMergePass(const TargetLowering* tli);
FunctionPass *createARMConstantIslandPass();
-FunctionPass *createNEONPreAllocPass();
FunctionPass *createNEONMoveFixPass();
+FunctionPass *createMLxExpansionPass();
FunctionPass *createThumb2ITBlockPass();
FunctionPass *createThumb2SizeReductionPass();
extern Target TheARMTarget, TheThumbTarget;
-} // end namespace llvm;
-
-// Defines symbolic names for ARM registers. This defines a mapping from
-// register name to register number.
-//
-#include "ARMGenRegisterNames.inc"
-
-// Defines symbolic names for the ARM instructions.
-//
-#include "ARMGenInstrNames.inc"
+void LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
+ ARMAsmPrinter &AP);
+} // end namespace llvm;
#endif
diff --git a/contrib/llvm/lib/Target/ARM/ARM.td b/contrib/llvm/lib/Target/ARM/ARM.td
index d6a8f19..bf4315f 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.td
+++ b/contrib/llvm/lib/Target/ARM/ARM.td
@@ -16,6 +16,7 @@
include "llvm/Target/Target.td"
+
//===----------------------------------------------------------------------===//
// ARM Subtarget features.
//
@@ -32,6 +33,8 @@ def FeatureNoARM : SubtargetFeature<"noarm", "NoARM", "true",
"Does not support ARM mode execution">;
def FeatureFP16 : SubtargetFeature<"fp16", "HasFP16", "true",
"Enable half-precision floating point">;
+def FeatureD16 : SubtargetFeature<"d16", "HasD16", "true",
+ "Restrict VFP3 to 16 double registers">;
def FeatureHWDiv : SubtargetFeature<"hwdiv", "HasHardwareDivide", "true",
"Enable divide instructions">;
def FeatureT2XtPk : SubtargetFeature<"t2xtpk", "HasT2ExtractPack", "true",
@@ -43,14 +46,11 @@ def FeatureSlowFPBrcc : SubtargetFeature<"slow-fp-brcc", "SlowFPBrcc", "true",
def FeatureVFPOnlySP : SubtargetFeature<"fp-only-sp", "FPOnlySP", "true",
"Floating point unit supports single precision only">;
-// Some processors have multiply-accumulate instructions that don't
-// play nicely with other VFP instructions, and it's generally better
+// Some processors have FP multiply-accumulate instructions that don't
+// play nicely with other VFP / NEON instructions, and it's generally better
// to just not use them.
-// FIXME: Currently, this is only flagged for Cortex-A8. It may be true for
-// others as well. We should do more benchmarking and confirm one way or
-// the other.
-def FeatureHasSlowVMLx : SubtargetFeature<"vmlx", "SlowVMLx", "true",
- "Disable VFP MAC instructions">;
+def FeatureHasSlowFPVMLx : SubtargetFeature<"slowfpvmlx", "SlowFPVMLx", "true",
+ "Disable VFP / NEON MAC instructions">;
// Some processors benefit from using NEON instructions for scalar
// single-precision FP operations.
def FeatureNEONForFP : SubtargetFeature<"neonfp", "UseNEONForSinglePrecisionFP",
@@ -61,6 +61,9 @@ def FeatureNEONForFP : SubtargetFeature<"neonfp", "UseNEONForSinglePrecisionFP",
def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Pref32BitThumb", "true",
"Prefer 32-bit Thumb instrs">;
+// Multiprocessing extension.
+def FeatureMP : SubtargetFeature<"mp", "HasMPExtension", "true",
+ "Supports Multiprocessing extension">;
// ARM architectures.
def ArchV4T : SubtargetFeature<"v4t", "ARMArchVersion", "V4T",
@@ -91,6 +94,18 @@ def ArchV7M : SubtargetFeature<"v7m", "ARMArchVersion", "V7M",
include "ARMSchedule.td"
+// ARM processor families.
+def ProcOthers : SubtargetFeature<"others", "ARMProcFamily", "Others",
+ "One of the other ARM processor families">;
+def ProcA8 : SubtargetFeature<"a8", "ARMProcFamily", "CortexA8",
+ "Cortex-A8 ARM processors",
+ [FeatureSlowFPBrcc, FeatureNEONForFP,
+ FeatureHasSlowFPVMLx, FeatureT2XtPk]>;
+def ProcA9 : SubtargetFeature<"a9", "ARMProcFamily", "CortexA9",
+ "Cortex-A9 ARM processors",
+ [FeatureHasSlowFPVMLx, FeatureT2XtPk,
+ FeatureFP16]>;
+
class ProcNoItin<string Name, list<SubtargetFeature> Features>
: Processor<Name, GenericItineraries, Features>;
@@ -135,25 +150,27 @@ def : ProcNoItin<"iwmmxt", [ArchV5TE]>;
// V6 Processors.
def : Processor<"arm1136j-s", ARMV6Itineraries, [ArchV6]>;
def : Processor<"arm1136jf-s", ARMV6Itineraries, [ArchV6, FeatureVFP2,
- FeatureHasSlowVMLx]>;
+ FeatureHasSlowFPVMLx]>;
def : Processor<"arm1176jz-s", ARMV6Itineraries, [ArchV6]>;
-def : Processor<"arm1176jzf-s", ARMV6Itineraries, [ArchV6, FeatureVFP2]>;
+def : Processor<"arm1176jzf-s", ARMV6Itineraries, [ArchV6, FeatureVFP2,
+ FeatureHasSlowFPVMLx]>;
def : Processor<"mpcorenovfp", ARMV6Itineraries, [ArchV6]>;
-def : Processor<"mpcore", ARMV6Itineraries, [ArchV6, FeatureVFP2]>;
+def : Processor<"mpcore", ARMV6Itineraries, [ArchV6, FeatureVFP2,
+ FeatureHasSlowFPVMLx]>;
// V6M Processors.
def : Processor<"cortex-m0", ARMV6Itineraries, [ArchV6M]>;
// V6T2 Processors.
def : Processor<"arm1156t2-s", ARMV6Itineraries, [ArchV6T2]>;
-def : Processor<"arm1156t2f-s", ARMV6Itineraries, [ArchV6T2, FeatureVFP2]>;
+def : Processor<"arm1156t2f-s", ARMV6Itineraries, [ArchV6T2, FeatureVFP2,
+ FeatureHasSlowFPVMLx]>;
// V7 Processors.
def : Processor<"cortex-a8", CortexA8Itineraries,
- [ArchV7A, FeatureHasSlowVMLx,
- FeatureSlowFPBrcc, FeatureNEONForFP, FeatureT2XtPk]>;
+ [ArchV7A, ProcA8]>;
def : Processor<"cortex-a9", CortexA9Itineraries,
- [ArchV7A, FeatureT2XtPk]>;
+ [ArchV7A, ProcA9]>;
// V7M Processors.
def : ProcNoItin<"cortex-m3", [ArchV7M]>;
@@ -175,6 +192,17 @@ include "ARMInstrInfo.td"
def ARMInstrInfo : InstrInfo;
+
+//===----------------------------------------------------------------------===//
+// Assembly printer
+//===----------------------------------------------------------------------===//
+// ARM Uses the MC printer for asm output, so make sure the TableGen
+// AsmWriter bits get associated with the correct class.
+def ARMAsmWriter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ bit isMCAsmWriter = 1;
+}
+
//===----------------------------------------------------------------------===//
// Declare the target which we are implementing
//===----------------------------------------------------------------------===//
@@ -182,4 +210,6 @@ def ARMInstrInfo : InstrInfo;
def ARM : Target {
// Pull in Instruction Info:
let InstructionSet = ARMInstrInfo;
+
+ let AssemblyWriters = [ARMAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMAddressingModes.h b/contrib/llvm/lib/Target/ARM/ARMAddressingModes.h
index db48100..19fbf05 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAddressingModes.h
+++ b/contrib/llvm/lib/Target/ARM/ARMAddressingModes.h
@@ -50,6 +50,16 @@ namespace ARM_AM {
}
}
+ static inline unsigned getShiftOpcEncoding(ShiftOpc Op) {
+ switch (Op) {
+ default: assert(0 && "Unknown shift opc!");
+ case ARM_AM::asr: return 2;
+ case ARM_AM::lsl: return 0;
+ case ARM_AM::lsr: return 1;
+ case ARM_AM::ror: return 3;
+ }
+ }
+
static inline ShiftOpc getShiftOpcForNode(SDValue N) {
switch (N.getOpcode()) {
default: return ARM_AM::no_shift;
@@ -566,6 +576,8 @@ namespace ARM_AM {
return Val;
}
+ AMSubMode getLoadStoreMultipleSubMode(int Opcode);
+
} // end namespace ARM_AM
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmBackend.cpp b/contrib/llvm/lib/Target/ARM/ARMAsmBackend.cpp
new file mode 100644
index 0000000..ec23449
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmBackend.cpp
@@ -0,0 +1,512 @@
+//===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARM.h"
+#include "ARMAddressingModes.h"
+#include "ARMFixupKinds.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/Object/MachOFormat.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/Target/TargetRegistry.h"
+using namespace llvm;
+
+namespace {
+class ARMMachObjectWriter : public MCMachObjectTargetWriter {
+public:
+ ARMMachObjectWriter(bool Is64Bit, uint32_t CPUType,
+ uint32_t CPUSubtype)
+ : MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype,
+ /*UseAggressiveSymbolFolding=*/true) {}
+};
+
+class ARMELFObjectWriter : public MCELFObjectTargetWriter {
+public:
+ ARMELFObjectWriter(Triple::OSType OSType)
+ : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSType, ELF::EM_ARM,
+ /*HasRelocationAddend*/ false) {}
+};
+
+class ARMAsmBackend : public TargetAsmBackend {
+ bool isThumbMode; // Currently emitting Thumb code.
+public:
+ ARMAsmBackend(const Target &T) : TargetAsmBackend(), isThumbMode(false) {}
+
+ unsigned getNumFixupKinds() const { return ARM::NumTargetFixupKinds; }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[ARM::NumTargetFixupKinds] = {
+// This table *must* be in the order that the fixup_* kinds are defined in
+// ARMFixupKinds.h.
+//
+// Name Offset (bits) Size (bits) Flags
+{ "fixup_arm_ldst_pcrel_12", 1, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+{ "fixup_arm_pcrel_10", 1, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+{ "fixup_thumb_adr_pcrel_10",0, 8, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+{ "fixup_arm_adr_pcrel_12", 1, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+{ "fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_blx", 7, 21, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_cp", 1, 8, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_thumb_bcc", 1, 8, MCFixupKindInfo::FKF_IsPCRel },
+// movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 - 19.
+{ "fixup_arm_movt_hi16", 0, 20, 0 },
+{ "fixup_arm_movw_lo16", 0, 20, 0 },
+{ "fixup_t2_movt_hi16", 0, 20, 0 },
+{ "fixup_t2_movw_lo16", 0, 20, 0 },
+{ "fixup_arm_movt_hi16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_movw_lo16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_movt_hi16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_t2_movw_lo16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel },
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return TargetAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+ }
+
+ bool MayNeedRelaxation(const MCInst &Inst) const;
+
+ void RelaxInstruction(const MCInst &Inst, MCInst &Res) const;
+
+ bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
+
+ void HandleAssemblerFlag(MCAssemblerFlag Flag) {
+ switch (Flag) {
+ default: break;
+ case MCAF_Code16:
+ setIsThumb(true);
+ break;
+ case MCAF_Code32:
+ setIsThumb(false);
+ break;
+ }
+ }
+
+ unsigned getPointerSize() const { return 4; }
+ bool isThumb() const { return isThumbMode; }
+ void setIsThumb(bool it) { isThumbMode = it; }
+};
+} // end anonymous namespace
+
+bool ARMAsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
+ // FIXME: Thumb targets, different move constant targets..
+ return false;
+}
+
+void ARMAsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ assert(0 && "ARMAsmBackend::RelaxInstruction() unimplemented");
+ return;
+}
+
+bool ARMAsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
+ if (isThumb()) {
+ // FIXME: 0xbf00 is the ARMv7 value. For v6 and before, we'll need to
+ // use 0x46c0 (which is a 'mov r8, r8' insn).
+ uint64_t NumNops = Count / 2;
+ for (uint64_t i = 0; i != NumNops; ++i)
+ OW->Write16(0xbf00);
+ if (Count & 1)
+ OW->Write8(0);
+ return true;
+ }
+ // ARM mode
+ uint64_t NumNops = Count / 4;
+ for (uint64_t i = 0; i != NumNops; ++i)
+ OW->Write32(0xe1a00000);
+ switch (Count % 4) {
+ default: break; // No leftover bytes to write
+ case 1: OW->Write8(0); break;
+ case 2: OW->Write16(0); break;
+ case 3: OW->Write16(0); OW->Write8(0xa0); break;
+ }
+
+ return true;
+}
+
+static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
+ return Value;
+ case ARM::fixup_arm_movt_hi16:
+ case ARM::fixup_arm_movt_hi16_pcrel:
+ Value >>= 16;
+ // Fallthrough
+ case ARM::fixup_arm_movw_lo16:
+ case ARM::fixup_arm_movw_lo16_pcrel: {
+ unsigned Hi4 = (Value & 0xF000) >> 12;
+ unsigned Lo12 = Value & 0x0FFF;
+ // inst{19-16} = Hi4;
+ // inst{11-0} = Lo12;
+ Value = (Hi4 << 16) | (Lo12);
+ return Value;
+ }
+ case ARM::fixup_t2_movt_hi16:
+ case ARM::fixup_t2_movt_hi16_pcrel:
+ Value >>= 16;
+ // Fallthrough
+ case ARM::fixup_t2_movw_lo16:
+ case ARM::fixup_t2_movw_lo16_pcrel: {
+ unsigned Hi4 = (Value & 0xF000) >> 12;
+ unsigned i = (Value & 0x800) >> 11;
+ unsigned Mid3 = (Value & 0x700) >> 8;
+ unsigned Lo8 = Value & 0x0FF;
+ // inst{19-16} = Hi4;
+ // inst{26} = i;
+ // inst{14-12} = Mid3;
+ // inst{7-0} = Lo8;
+ Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
+
+ uint64_t swapped = (Value & 0xFFFF0000) >> 16;
+ swapped |= (Value & 0x0000FFFF) << 16;
+ return swapped;
+ }
+ case ARM::fixup_arm_ldst_pcrel_12:
+ // ARM PC-relative values are offset by 8.
+ Value -= 4;
+ // FALLTHROUGH
+ case ARM::fixup_t2_ldst_pcrel_12: {
+ // Offset by 4, adjusted by two due to the half-word ordering of thumb.
+ Value -= 4;
+ bool isAdd = true;
+ if ((int64_t)Value < 0) {
+ Value = -Value;
+ isAdd = false;
+ }
+ assert ((Value < 4096) && "Out of range pc-relative fixup value!");
+ Value |= isAdd << 23;
+
+ // Same addressing mode as fixup_arm_pcrel_10,
+ // but with 16-bit halfwords swapped.
+ if (Kind == ARM::fixup_t2_ldst_pcrel_12) {
+ uint64_t swapped = (Value & 0xFFFF0000) >> 16;
+ swapped |= (Value & 0x0000FFFF) << 16;
+ return swapped;
+ }
+
+ return Value;
+ }
+ case ARM::fixup_thumb_adr_pcrel_10:
+ return ((Value - 4) >> 2) & 0xff;
+ case ARM::fixup_arm_adr_pcrel_12: {
+ // ARM PC-relative values are offset by 8.
+ Value -= 8;
+ unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
+ if ((int64_t)Value < 0) {
+ Value = -Value;
+ opc = 2; // 0b0010
+ }
+ assert(ARM_AM::getSOImmVal(Value) != -1 &&
+ "Out of range pc-relative fixup value!");
+ // Encode the immediate and shift the opcode into place.
+ return ARM_AM::getSOImmVal(Value) | (opc << 21);
+ }
+
+ case ARM::fixup_t2_adr_pcrel_12: {
+ Value -= 4;
+ unsigned opc = 0;
+ if ((int64_t)Value < 0) {
+ Value = -Value;
+ opc = 5;
+ }
+
+ uint32_t out = (opc << 21);
+ out |= (Value & 0x800) << 14;
+ out |= (Value & 0x700) << 4;
+ out |= (Value & 0x0FF);
+
+ uint64_t swapped = (out & 0xFFFF0000) >> 16;
+ swapped |= (out & 0x0000FFFF) << 16;
+ return swapped;
+ }
+
+ case ARM::fixup_arm_condbranch:
+ case ARM::fixup_arm_uncondbranch:
+ // These values don't encode the low two bits since they're always zero.
+ // Offset by 8 just as above.
+ return 0xffffff & ((Value - 8) >> 2);
+ case ARM::fixup_t2_uncondbranch: {
+ Value = Value - 4;
+ Value >>= 1; // Low bit is not encoded.
+
+ uint32_t out = 0;
+ bool I = Value & 0x800000;
+ bool J1 = Value & 0x400000;
+ bool J2 = Value & 0x200000;
+ J1 ^= I;
+ J2 ^= I;
+
+ out |= I << 26; // S bit
+ out |= !J1 << 13; // J1 bit
+ out |= !J2 << 11; // J2 bit
+ out |= (Value & 0x1FF800) << 5; // imm6 field
+ out |= (Value & 0x0007FF); // imm11 field
+
+ uint64_t swapped = (out & 0xFFFF0000) >> 16;
+ swapped |= (out & 0x0000FFFF) << 16;
+ return swapped;
+ }
+ case ARM::fixup_t2_condbranch: {
+ Value = Value - 4;
+ Value >>= 1; // Low bit is not encoded.
+
+ uint64_t out = 0;
+ out |= (Value & 0x80000) << 7; // S bit
+ out |= (Value & 0x40000) >> 7; // J2 bit
+ out |= (Value & 0x20000) >> 4; // J1 bit
+ out |= (Value & 0x1F800) << 5; // imm6 field
+ out |= (Value & 0x007FF); // imm11 field
+
+ uint32_t swapped = (out & 0xFFFF0000) >> 16;
+ swapped |= (out & 0x0000FFFF) << 16;
+ return swapped;
+ }
+ case ARM::fixup_arm_thumb_bl: {
+ // The value doesn't encode the low bit (always zero) and is offset by
+ // four. The value is encoded into disjoint bit positions in the destination
+ // opcode. x = unchanged, I = immediate value bit, S = sign extension bit
+ //
+ // BL: xxxxxSIIIIIIIIII xxxxxIIIIIIIIIII
+ //
+ // Note that the halfwords are stored high first, low second; so we need
+ // to transpose the fixup value here to map properly.
+ unsigned isNeg = (int64_t(Value) < 0) ? 1 : 0;
+ uint32_t Binary = 0;
+ Value = 0x3fffff & ((Value - 4) >> 1);
+ Binary = (Value & 0x7ff) << 16; // Low imm11 value.
+ Binary |= (Value & 0x1ffc00) >> 11; // High imm10 value.
+ Binary |= isNeg << 10; // Sign bit.
+ return Binary;
+ }
+ case ARM::fixup_arm_thumb_blx: {
+ // The value doesn't encode the low two bits (always zero) and is offset by
+ // four (see fixup_arm_thumb_cp). The value is encoded into disjoint bit
+ // positions in the destination opcode. x = unchanged, I = immediate value
+ // bit, S = sign extension bit, 0 = zero.
+ //
+ // BLX: xxxxxSIIIIIIIIII xxxxxIIIIIIIIII0
+ //
+ // Note that the halfwords are stored high first, low second; so we need
+ // to transpose the fixup value here to map properly.
+ unsigned isNeg = (int64_t(Value) < 0) ? 1 : 0;
+ uint32_t Binary = 0;
+ Value = 0xfffff & ((Value - 2) >> 2);
+ Binary = (Value & 0x3ff) << 17; // Low imm10L value.
+ Binary |= (Value & 0xffc00) >> 10; // High imm10H value.
+ Binary |= isNeg << 10; // Sign bit.
+ return Binary;
+ }
+ case ARM::fixup_arm_thumb_cp:
+ // Offset by 4, and don't encode the low two bits. Two bytes of that
+ // 'off by 4' is implicitly handled by the half-word ordering of the
+ // Thumb encoding, so we only need to adjust by 2 here.
+ return ((Value - 2) >> 2) & 0xff;
+ case ARM::fixup_arm_thumb_cb: {
+ // Offset by 4 and don't encode the lower bit, which is always 0.
+ uint32_t Binary = (Value - 4) >> 1;
+ return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
+ }
+ case ARM::fixup_arm_thumb_br:
+ // Offset by 4 and don't encode the lower bit, which is always 0.
+ return ((Value - 4) >> 1) & 0x7ff;
+ case ARM::fixup_arm_thumb_bcc:
+ // Offset by 4 and don't encode the lower bit, which is always 0.
+ return ((Value - 4) >> 1) & 0xff;
+ case ARM::fixup_arm_pcrel_10:
+ Value = Value - 4; // ARM fixups offset by an additional word and don't
+ // need to adjust for the half-word ordering.
+ // Fall through.
+ case ARM::fixup_t2_pcrel_10: {
+ // Offset by 4, adjusted by two due to the half-word ordering of thumb.
+ Value = Value - 4;
+ bool isAdd = true;
+ if ((int64_t)Value < 0) {
+ Value = -Value;
+ isAdd = false;
+ }
+ // These values don't encode the low two bits since they're always zero.
+ Value >>= 2;
+ assert ((Value < 256) && "Out of range pc-relative fixup value!");
+ Value |= isAdd << 23;
+
+ // Same addressing mode as fixup_arm_pcrel_10,
+ // but with 16-bit halfwords swapped.
+ if (Kind == ARM::fixup_t2_pcrel_10) {
+ uint32_t swapped = (Value & 0xFFFF0000) >> 16;
+ swapped |= (Value & 0x0000FFFF) << 16;
+ return swapped;
+ }
+
+ return Value;
+ }
+ }
+}
+
+namespace {
+
+// FIXME: This should be in a separate file.
+// ELF is an ELF of course...
+class ELFARMAsmBackend : public ARMAsmBackend {
+public:
+ Triple::OSType OSType;
+ ELFARMAsmBackend(const Target &T, Triple::OSType _OSType)
+ : ARMAsmBackend(T), OSType(_OSType) { }
+
+ void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value) const;
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ return createELFObjectWriter(new ARMELFObjectWriter(OSType), OS,
+ /*IsLittleEndian*/ true);
+ }
+};
+
+// FIXME: Raise this to share code between Darwin and ELF.
+void ELFARMAsmBackend::ApplyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value) const {
+ unsigned NumBytes = 4; // FIXME: 2 for Thumb
+ Value = adjustFixupValue(Fixup.getKind(), Value);
+ if (!Value) return; // Doesn't change encoding.
+
+ unsigned Offset = Fixup.getOffset();
+ assert(Offset % NumBytes == 0 && "Offset mod NumBytes is nonzero!");
+
+ // For each byte of the fragment that the fixup touches, mask in the bits from
+ // the fixup value. The Value has been "split up" into the appropriate
+ // bitfields above.
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+}
+
+// FIXME: This should be in a separate file.
+class DarwinARMAsmBackend : public ARMAsmBackend {
+public:
+ DarwinARMAsmBackend(const Target &T) : ARMAsmBackend(T) { }
+
+ void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value) const;
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ // FIXME: Subtarget info should be derived. Force v7 for now.
+ return createMachObjectWriter(new ARMMachObjectWriter(
+ /*Is64Bit=*/false,
+ object::mach::CTM_ARM,
+ object::mach::CSARM_V7),
+ OS,
+ /*IsLittleEndian=*/true);
+ }
+
+ virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
+ return false;
+ }
+};
+
+/// getFixupKindNumBytes - The number of bytes the fixup may change.
+static unsigned getFixupKindNumBytes(unsigned Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+
+ case FK_Data_1:
+ case ARM::fixup_arm_thumb_bcc:
+ case ARM::fixup_arm_thumb_cp:
+ case ARM::fixup_thumb_adr_pcrel_10:
+ return 1;
+
+ case FK_Data_2:
+ case ARM::fixup_arm_thumb_br:
+ case ARM::fixup_arm_thumb_cb:
+ return 2;
+
+ case ARM::fixup_arm_ldst_pcrel_12:
+ case ARM::fixup_arm_pcrel_10:
+ case ARM::fixup_arm_adr_pcrel_12:
+ case ARM::fixup_arm_condbranch:
+ case ARM::fixup_arm_uncondbranch:
+ return 3;
+
+ case FK_Data_4:
+ case ARM::fixup_t2_ldst_pcrel_12:
+ case ARM::fixup_t2_condbranch:
+ case ARM::fixup_t2_uncondbranch:
+ case ARM::fixup_t2_pcrel_10:
+ case ARM::fixup_t2_adr_pcrel_12:
+ case ARM::fixup_arm_thumb_bl:
+ case ARM::fixup_arm_thumb_blx:
+ case ARM::fixup_arm_movt_hi16:
+ case ARM::fixup_arm_movw_lo16:
+ case ARM::fixup_arm_movt_hi16_pcrel:
+ case ARM::fixup_arm_movw_lo16_pcrel:
+ case ARM::fixup_t2_movt_hi16:
+ case ARM::fixup_t2_movw_lo16:
+ case ARM::fixup_t2_movt_hi16_pcrel:
+ case ARM::fixup_t2_movw_lo16_pcrel:
+ return 4;
+ }
+}
+
+void DarwinARMAsmBackend::ApplyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value) const {
+ unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
+ Value = adjustFixupValue(Fixup.getKind(), Value);
+ if (!Value) return; // Doesn't change encoding.
+
+ unsigned Offset = Fixup.getOffset();
+ assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
+
+ // For each byte of the fragment that the fixup touches, mask in the
+ // bits from the fixup value.
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+}
+
+} // end anonymous namespace
+
+TargetAsmBackend *llvm::createARMAsmBackend(const Target &T,
+ const std::string &TT) {
+ switch (Triple(TT).getOS()) {
+ case Triple::Darwin:
+ return new DarwinARMAsmBackend(T);
+ case Triple::MinGW32:
+ case Triple::Cygwin:
+ case Triple::Win32:
+ assert(0 && "Windows not supported on ARM");
+ default:
+ return new ELFARMAsmBackend(T, Triple(TT).getOS());
+ }
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 6cfd596..db12b8e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -14,28 +14,31 @@
#define DEBUG_TYPE "asm-printer"
#include "ARM.h"
-#include "ARMBuildAttrs.h"
+#include "ARMAsmPrinter.h"
#include "ARMAddressingModes.h"
+#include "ARMBuildAttrs.h"
+#include "ARMBaseRegisterInfo.h"
#include "ARMConstantPoolValue.h"
-#include "AsmPrinter/ARMInstPrinter.h"
#include "ARMMachineFunctionInfo.h"
-#include "ARMMCInstLower.h"
+#include "ARMMCExpr.h"
#include "ARMTargetMachine.h"
+#include "ARMTargetObjectFile.h"
+#include "InstPrinter/ARMInstPrinter.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Constants.h"
#include "llvm/Module.h"
#include "llvm/Type.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
@@ -53,270 +56,127 @@
#include <cctype>
using namespace llvm;
-static cl::opt<bool>
-EnableMCInst("enable-arm-mcinst-printer", cl::Hidden,
- cl::desc("enable experimental asmprinter gunk in the arm backend"));
-
-namespace llvm {
- namespace ARM {
- enum DW_ISA {
- DW_ISA_ARM_thumb = 1,
- DW_ISA_ARM_arm = 2
- };
- }
-}
-
namespace {
- class ARMAsmPrinter : public AsmPrinter {
-
- /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
- /// make the right decision when printing asm code for different targets.
- const ARMSubtarget *Subtarget;
- /// AFI - Keep a pointer to ARMFunctionInfo for the current
- /// MachineFunction.
- ARMFunctionInfo *AFI;
+ // Per section and per symbol attributes are not supported.
+ // To implement them we would need the ability to delay this emission
+ // until the assembly file is fully parsed/generated as only then do we
+ // know the symbol and section numbers.
+ class AttributeEmitter {
+ public:
+ virtual void MaybeSwitchVendor(StringRef Vendor) = 0;
+ virtual void EmitAttribute(unsigned Attribute, unsigned Value) = 0;
+ virtual void EmitTextAttribute(unsigned Attribute, StringRef String) = 0;
+ virtual void Finish() = 0;
+ virtual ~AttributeEmitter() {}
+ };
- /// MCP - Keep a pointer to constantpool entries of the current
- /// MachineFunction.
- const MachineConstantPool *MCP;
+ class AsmAttributeEmitter : public AttributeEmitter {
+ MCStreamer &Streamer;
public:
- explicit ARMAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), AFI(NULL), MCP(NULL) {
- Subtarget = &TM.getSubtarget<ARMSubtarget>();
- }
+ AsmAttributeEmitter(MCStreamer &Streamer_) : Streamer(Streamer_) {}
+ void MaybeSwitchVendor(StringRef Vendor) { }
- virtual const char *getPassName() const {
- return "ARM Assembly Printer";
+ void EmitAttribute(unsigned Attribute, unsigned Value) {
+ Streamer.EmitRawText("\t.eabi_attribute " +
+ Twine(Attribute) + ", " + Twine(Value));
}
- void printInstructionThroughMCStreamer(const MachineInstr *MI);
-
-
- void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &O,
- const char *Modifier = 0);
- void printSOImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O);
- void printSOImm2PartOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printSORegOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printAddrMode2Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printAddrMode2OffsetOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printAddrMode3Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printAddrMode3OffsetOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printAddrMode4Operand(const MachineInstr *MI, int OpNum,raw_ostream &O,
- const char *Modifier = 0);
- void printAddrMode5Operand(const MachineInstr *MI, int OpNum,raw_ostream &O,
- const char *Modifier = 0);
- void printAddrMode6Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printAddrMode6OffsetOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printAddrModePCOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O,
- const char *Modifier = 0);
- void printBitfieldInvMaskImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printMemBOption(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printShiftImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
-
- void printThumbS4ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printThumbITMask(const MachineInstr *MI, int OpNum, raw_ostream &O);
- void printThumbAddrModeRROperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printThumbAddrModeRI5Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O,
- unsigned Scale);
- void printThumbAddrModeS1Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printThumbAddrModeS2Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printThumbAddrModeS4Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printThumbAddrModeSPOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
-
- void printT2SOOperand(const MachineInstr *MI, int OpNum, raw_ostream &O);
- void printT2AddrModeImm12Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printT2AddrModeImm8Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printT2AddrModeImm8s4Operand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printT2AddrModeImm8OffsetOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printT2AddrModeImm8s4OffsetOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {}
- void printT2AddrModeSoRegOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
-
- void printCPSOptionOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {}
- void printMSRMaskOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {}
- void printNegZeroOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {}
- void printPredicateOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printMandatoryPredicateOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printSBitModifierOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printPCLabel(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printRegisterList(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printCPInstOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O,
- const char *Modifier);
- void printJTBlockOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printJT2BlockOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printTBAddrMode(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printNoHashImmediate(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printVFPf32ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printVFPf64ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
- void printNEONModImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O);
-
- virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
- virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant,
- const char *ExtraCode, raw_ostream &O);
-
- void printInstruction(const MachineInstr *MI, raw_ostream &O); // autogen
- static const char *getRegisterName(unsigned RegNo);
-
- virtual void EmitInstruction(const MachineInstr *MI);
- bool runOnMachineFunction(MachineFunction &F);
-
- virtual void EmitConstantPool() {} // we emit constant pools customly!
- virtual void EmitFunctionEntryLabel();
- void EmitStartOfAsmFile(Module &M);
- void EmitEndOfAsmFile(Module &M);
-
- MachineLocation getDebugValueLocation(const MachineInstr *MI) const {
- MachineLocation Location;
- assert (MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
- // Frame address. Currently handles register +- offset only.
- if (MI->getOperand(0).isReg() && MI->getOperand(1).isImm())
- Location.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm());
- else {
- DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n");
+ void EmitTextAttribute(unsigned Attribute, StringRef String) {
+ switch (Attribute) {
+ case ARMBuildAttrs::CPU_name:
+ Streamer.EmitRawText(StringRef("\t.cpu ") + LowercaseString(String));
+ break;
+ default: assert(0 && "Unsupported Text attribute in ASM Mode"); break;
}
- return Location;
+ }
+ void Finish() { }
+ };
+
+ class ObjectAttributeEmitter : public AttributeEmitter {
+ MCObjectStreamer &Streamer;
+ StringRef CurrentVendor;
+ SmallString<64> Contents;
+
+ public:
+ ObjectAttributeEmitter(MCObjectStreamer &Streamer_) :
+ Streamer(Streamer_), CurrentVendor("") { }
+
+ void MaybeSwitchVendor(StringRef Vendor) {
+ assert(!Vendor.empty() && "Vendor cannot be empty.");
+
+ if (CurrentVendor.empty())
+ CurrentVendor = Vendor;
+ else if (CurrentVendor == Vendor)
+ return;
+ else
+ Finish();
+
+ CurrentVendor = Vendor;
+
+ assert(Contents.size() == 0);
}
- virtual unsigned getISAEncoding() {
- // ARM/Darwin adds ISA to the DWARF info for each function.
- if (!Subtarget->isTargetDarwin())
- return 0;
- return Subtarget->isThumb() ?
- llvm::ARM::DW_ISA_ARM_thumb : llvm::ARM::DW_ISA_ARM_arm;
+ void EmitAttribute(unsigned Attribute, unsigned Value) {
+ // FIXME: should be ULEB
+ Contents += Attribute;
+ Contents += Value;
}
- MCSymbol *GetARMSetPICJumpTableLabel2(unsigned uid, unsigned uid2,
- const MachineBasicBlock *MBB) const;
- MCSymbol *GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const;
-
- /// EmitMachineConstantPoolValue - Print a machine constantpool value to
- /// the .s file.
- virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- EmitMachineConstantPoolValue(MCPV, OS);
- OutStreamer.EmitRawText(OS.str());
+ void EmitTextAttribute(unsigned Attribute, StringRef String) {
+ Contents += Attribute;
+ Contents += UppercaseString(String);
+ Contents += 0;
}
- void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV,
- raw_ostream &O) {
- switch (TM.getTargetData()->getTypeAllocSize(MCPV->getType())) {
- case 1: O << MAI->getData8bitsDirective(0); break;
- case 2: O << MAI->getData16bitsDirective(0); break;
- case 4: O << MAI->getData32bitsDirective(0); break;
- default: assert(0 && "Unknown CPV size");
- }
+ void Finish() {
+ const size_t ContentsSize = Contents.size();
- ARMConstantPoolValue *ACPV = static_cast<ARMConstantPoolValue*>(MCPV);
-
- if (ACPV->isLSDA()) {
- O << MAI->getPrivateGlobalPrefix() << "_LSDA_" << getFunctionNumber();
- } else if (ACPV->isBlockAddress()) {
- O << *GetBlockAddressSymbol(ACPV->getBlockAddress());
- } else if (ACPV->isGlobalValue()) {
- const GlobalValue *GV = ACPV->getGV();
- bool isIndirect = Subtarget->isTargetDarwin() &&
- Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel());
- if (!isIndirect)
- O << *Mang->getSymbol(GV);
- else {
- // FIXME: Remove this when Darwin transition to @GOT like syntax.
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- O << *Sym;
-
- MachineModuleInfoMachO &MMIMachO =
- MMI->getObjFileInfo<MachineModuleInfoMachO>();
- MachineModuleInfoImpl::StubValueTy &StubSym =
- GV->hasHiddenVisibility() ? MMIMachO.getHiddenGVStubEntry(Sym) :
- MMIMachO.getGVStubEntry(Sym);
- if (StubSym.getPointer() == 0)
- StubSym = MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
- }
- } else {
- assert(ACPV->isExtSymbol() && "unrecognized constant pool value");
- O << *GetExternalSymbolSymbol(ACPV->getSymbol());
- }
+ // Vendor size + Vendor name + '\0'
+ const size_t VendorHeaderSize = 4 + CurrentVendor.size() + 1;
- if (ACPV->hasModifier()) O << "(" << ACPV->getModifier() << ")";
- if (ACPV->getPCAdjustment() != 0) {
- O << "-(" << MAI->getPrivateGlobalPrefix() << "PC"
- << getFunctionNumber() << "_" << ACPV->getLabelId()
- << "+" << (unsigned)ACPV->getPCAdjustment();
- if (ACPV->mustAddCurrentAddress())
- O << "-.";
- O << ')';
- }
+ // Tag + Tag Size
+ const size_t TagHeaderSize = 1 + 4;
+
+ Streamer.EmitIntValue(VendorHeaderSize + TagHeaderSize + ContentsSize, 4);
+ Streamer.EmitBytes(CurrentVendor, 0);
+ Streamer.EmitIntValue(0, 1); // '\0'
+
+ Streamer.EmitIntValue(ARMBuildAttrs::File, 1);
+ Streamer.EmitIntValue(TagHeaderSize + ContentsSize, 4);
+
+ Streamer.EmitBytes(Contents, 0);
+
+ Contents.clear();
}
};
+
} // end of anonymous namespace
-#include "ARMGenAsmWriter.inc"
+MachineLocation ARMAsmPrinter::
+getDebugValueLocation(const MachineInstr *MI) const {
+ MachineLocation Location;
+ assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
+ // Frame address. Currently handles register +- offset only.
+ if (MI->getOperand(0).isReg() && MI->getOperand(1).isImm())
+ Location.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm());
+ else {
+ DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n");
+ }
+ return Location;
+}
void ARMAsmPrinter::EmitFunctionEntryLabel() {
if (AFI->isThumbFunction()) {
- OutStreamer.EmitRawText(StringRef("\t.code\t16"));
- if (!Subtarget->isTargetDarwin())
- OutStreamer.EmitRawText(StringRef("\t.thumb_func"));
- else {
- // This needs to emit to a temporary string to get properly quoted
- // MCSymbols when they have spaces in them.
- SmallString<128> Tmp;
- raw_svector_ostream OS(Tmp);
- OS << "\t.thumb_func\t" << *CurrentFnSym;
- OutStreamer.EmitRawText(OS.str());
- }
+ OutStreamer.EmitAssemblerFlag(MCAF_Code16);
+ OutStreamer.EmitThumbFunc(Subtarget->isTargetDarwin()? CurrentFnSym : 0);
}
OutStreamer.EmitLabel(CurrentFnSym);
}
-/// runOnMachineFunction - This uses the printInstruction()
+/// runOnMachineFunction - This uses the EmitInstruction()
/// method to print assembly for each instruction.
///
bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
@@ -337,32 +197,18 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
case MachineOperand::MO_Register: {
unsigned Reg = MO.getReg();
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
- if (Modifier && strcmp(Modifier, "dregpair") == 0) {
- unsigned DRegLo = TM.getRegisterInfo()->getSubReg(Reg, ARM::dsub_0);
- unsigned DRegHi = TM.getRegisterInfo()->getSubReg(Reg, ARM::dsub_1);
- O << '{'
- << getRegisterName(DRegLo) << ", " << getRegisterName(DRegHi)
- << '}';
- } else if (Modifier && strcmp(Modifier, "lane") == 0) {
- unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
- unsigned DReg =
- TM.getRegisterInfo()->getMatchingSuperReg(Reg,
- RegNum & 1 ? ARM::ssub_1 : ARM::ssub_0, &ARM::DPR_VFP2RegClass);
- O << getRegisterName(DReg) << '[' << (RegNum & 1) << ']';
- } else {
- assert(!MO.getSubReg() && "Subregs should be eliminated!");
- O << getRegisterName(Reg);
- }
+ assert(!MO.getSubReg() && "Subregs should be eliminated!");
+ O << ARMInstPrinter::getRegisterName(Reg);
break;
}
case MachineOperand::MO_Immediate: {
int64_t Imm = MO.getImm();
O << '#';
if ((Modifier && strcmp(Modifier, "lo16") == 0) ||
- (TF & ARMII::MO_LO16))
+ (TF == ARMII::MO_LO16))
O << ":lower16:";
else if ((Modifier && strcmp(Modifier, "hi16") == 0) ||
- (TF & ARMII::MO_HI16))
+ (TF == ARMII::MO_HI16))
O << ":upper16:";
O << Imm;
break;
@@ -371,9 +217,7 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
O << *MO.getMBB()->getSymbol();
return;
case MachineOperand::MO_GlobalAddress: {
- bool isCallOp = Modifier && !strcmp(Modifier, "call");
const GlobalValue *GV = MO.getGlobal();
-
if ((Modifier && strcmp(Modifier, "lo16") == 0) ||
(TF & ARMII::MO_LO16))
O << ":lower16:";
@@ -383,18 +227,13 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
O << *Mang->getSymbol(GV);
printOffset(MO.getOffset(), O);
-
- if (isCallOp && Subtarget->isTargetELF() &&
- TM.getRelocationModel() == Reloc::PIC_)
+ if (TF == ARMII::MO_PLT)
O << "(PLT)";
break;
}
case MachineOperand::MO_ExternalSymbol: {
- bool isCallOp = Modifier && !strcmp(Modifier, "call");
O << *GetExternalSymbolSymbol(MO.getSymbolName());
-
- if (isCallOp && Subtarget->isTargetELF() &&
- TM.getRelocationModel() == Reloc::PIC_)
+ if (TF == ARMII::MO_PLT)
O << "(PLT)";
break;
}
@@ -407,538 +246,8 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
}
}
-static void printSOImm(raw_ostream &O, int64_t V, bool VerboseAsm,
- const MCAsmInfo *MAI) {
- // Break it up into two parts that make up a shifter immediate.
- V = ARM_AM::getSOImmVal(V);
- assert(V != -1 && "Not a valid so_imm value!");
-
- unsigned Imm = ARM_AM::getSOImmValImm(V);
- unsigned Rot = ARM_AM::getSOImmValRot(V);
-
- // Print low-level immediate formation info, per
- // A5.1.3: "Data-processing operands - Immediate".
- if (Rot) {
- O << "#" << Imm << ", " << Rot;
- // Pretty printed version.
- if (VerboseAsm) {
- O << "\t" << MAI->getCommentString() << ' ';
- O << (int)ARM_AM::rotr32(Imm, Rot);
- }
- } else {
- O << "#" << Imm;
- }
-}
-
-/// printSOImmOperand - SOImm is 4-bit rotate amount in bits 8-11 with 8-bit
-/// immediate in bits 0-7.
-void ARMAsmPrinter::printSOImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(OpNum);
- assert(MO.isImm() && "Not a valid so_imm value!");
- printSOImm(O, MO.getImm(), isVerbose(), MAI);
-}
-
-/// printSOImm2PartOperand - SOImm is broken into two pieces using a 'mov'
-/// followed by an 'orr' to materialize.
-void ARMAsmPrinter::printSOImm2PartOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(OpNum);
- assert(MO.isImm() && "Not a valid so_imm value!");
- unsigned V1 = ARM_AM::getSOImmTwoPartFirst(MO.getImm());
- unsigned V2 = ARM_AM::getSOImmTwoPartSecond(MO.getImm());
- printSOImm(O, V1, isVerbose(), MAI);
- O << "\n\torr";
- printPredicateOperand(MI, 2, O);
- O << "\t";
- printOperand(MI, 0, O);
- O << ", ";
- printOperand(MI, 0, O);
- O << ", ";
- printSOImm(O, V2, isVerbose(), MAI);
-}
-
-// so_reg is a 4-operand unit corresponding to register forms of the A5.1
-// "Addressing Mode 1 - Data-processing operands" forms. This includes:
-// REG 0 0 - e.g. R5
-// REG REG 0,SH_OPC - e.g. R5, ROR R3
-// REG 0 IMM,SH_OPC - e.g. R5, LSL #3
-void ARMAsmPrinter::printSORegOperand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- const MachineOperand &MO3 = MI->getOperand(Op+2);
-
- O << getRegisterName(MO1.getReg());
-
- // Print the shift opc.
- ARM_AM::ShiftOpc ShOpc = ARM_AM::getSORegShOp(MO3.getImm());
- O << ", " << ARM_AM::getShiftOpcStr(ShOpc);
- if (MO2.getReg()) {
- O << ' ' << getRegisterName(MO2.getReg());
- assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0);
- } else if (ShOpc != ARM_AM::rrx) {
- O << " #" << ARM_AM::getSORegOffset(MO3.getImm());
- }
-}
-
-void ARMAsmPrinter::printAddrMode2Operand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- const MachineOperand &MO3 = MI->getOperand(Op+2);
-
- if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
- printOperand(MI, Op, O);
- return;
- }
-
- O << "[" << getRegisterName(MO1.getReg());
-
- if (!MO2.getReg()) {
- if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
- O << ", #"
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
- << ARM_AM::getAM2Offset(MO3.getImm());
- O << "]";
- return;
- }
-
- O << ", "
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
- << getRegisterName(MO2.getReg());
-
- if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm()))
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO3.getImm()))
- << " #" << ShImm;
- O << "]";
-}
-
-void ARMAsmPrinter::printAddrMode2OffsetOperand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
-
- if (!MO1.getReg()) {
- unsigned ImmOffs = ARM_AM::getAM2Offset(MO2.getImm());
- O << "#"
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
- << ImmOffs;
- return;
- }
-
- O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
- << getRegisterName(MO1.getReg());
-
- if (unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()))
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO2.getImm()))
- << " #" << ShImm;
-}
-
-void ARMAsmPrinter::printAddrMode3Operand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- const MachineOperand &MO3 = MI->getOperand(Op+2);
-
- assert(TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
- O << "[" << getRegisterName(MO1.getReg());
-
- if (MO2.getReg()) {
- O << ", "
- << (char)ARM_AM::getAM3Op(MO3.getImm())
- << getRegisterName(MO2.getReg())
- << "]";
- return;
- }
-
- if (unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()))
- O << ", #"
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO3.getImm()))
- << ImmOffs;
- O << "]";
-}
-
-void ARMAsmPrinter::printAddrMode3OffsetOperand(const MachineInstr *MI, int Op,
- raw_ostream &O){
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
-
- if (MO1.getReg()) {
- O << (char)ARM_AM::getAM3Op(MO2.getImm())
- << getRegisterName(MO1.getReg());
- return;
- }
-
- unsigned ImmOffs = ARM_AM::getAM3Offset(MO2.getImm());
- O << "#"
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm()))
- << ImmOffs;
-}
-
-void ARMAsmPrinter::printAddrMode4Operand(const MachineInstr *MI, int Op,
- raw_ostream &O,
- const char *Modifier) {
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MO2.getImm());
- if (Modifier && strcmp(Modifier, "submode") == 0) {
- O << ARM_AM::getAMSubModeStr(Mode);
- } else if (Modifier && strcmp(Modifier, "wide") == 0) {
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MO2.getImm());
- if (Mode == ARM_AM::ia)
- O << ".w";
- } else {
- printOperand(MI, Op, O);
- }
-}
-
-void ARMAsmPrinter::printAddrMode5Operand(const MachineInstr *MI, int Op,
- raw_ostream &O,
- const char *Modifier) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
-
- if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
- printOperand(MI, Op, O);
- return;
- }
-
- assert(TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
-
- O << "[" << getRegisterName(MO1.getReg());
-
- if (unsigned ImmOffs = ARM_AM::getAM5Offset(MO2.getImm())) {
- O << ", #"
- << ARM_AM::getAddrOpcStr(ARM_AM::getAM5Op(MO2.getImm()))
- << ImmOffs*4;
- }
- O << "]";
-}
-
-void ARMAsmPrinter::printAddrMode6Operand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
-
- O << "[" << getRegisterName(MO1.getReg());
- if (MO2.getImm()) {
- // FIXME: Both darwin as and GNU as violate ARM docs here.
- O << ", :" << (MO2.getImm() << 3);
- }
- O << "]";
-}
-
-void ARMAsmPrinter::printAddrMode6OffsetOperand(const MachineInstr *MI, int Op,
- raw_ostream &O){
- const MachineOperand &MO = MI->getOperand(Op);
- if (MO.getReg() == 0)
- O << "!";
- else
- O << ", " << getRegisterName(MO.getReg());
-}
-
-void ARMAsmPrinter::printAddrModePCOperand(const MachineInstr *MI, int Op,
- raw_ostream &O,
- const char *Modifier) {
- if (Modifier && strcmp(Modifier, "label") == 0) {
- printPCLabel(MI, Op+1, O);
- return;
- }
-
- const MachineOperand &MO1 = MI->getOperand(Op);
- assert(TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
- O << "[pc, " << getRegisterName(MO1.getReg()) << "]";
-}
-
-void
-ARMAsmPrinter::printBitfieldInvMaskImmOperand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(Op);
- uint32_t v = ~MO.getImm();
- int32_t lsb = CountTrailingZeros_32(v);
- int32_t width = (32 - CountLeadingZeros_32 (v)) - lsb;
- assert(MO.isImm() && "Not a valid bf_inv_mask_imm value!");
- O << "#" << lsb << ", #" << width;
-}
-
-void
-ARMAsmPrinter::printMemBOption(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- unsigned val = MI->getOperand(OpNum).getImm();
- O << ARM_MB::MemBOptToString(val);
-}
-
-void ARMAsmPrinter::printShiftImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- unsigned ShiftOp = MI->getOperand(OpNum).getImm();
- ARM_AM::ShiftOpc Opc = ARM_AM::getSORegShOp(ShiftOp);
- switch (Opc) {
- case ARM_AM::no_shift:
- return;
- case ARM_AM::lsl:
- O << ", lsl #";
- break;
- case ARM_AM::asr:
- O << ", asr #";
- break;
- default:
- assert(0 && "unexpected shift opcode for shift immediate operand");
- }
- O << ARM_AM::getSORegOffset(ShiftOp);
-}
-
-//===--------------------------------------------------------------------===//
-
-void ARMAsmPrinter::printThumbS4ImmOperand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- O << "#" << MI->getOperand(Op).getImm() * 4;
-}
-
-void
-ARMAsmPrinter::printThumbITMask(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- // (3 - the number of trailing zeros) is the number of then / else.
- unsigned Mask = MI->getOperand(Op).getImm();
- unsigned CondBit0 = Mask >> 4 & 1;
- unsigned NumTZ = CountTrailingZeros_32(Mask);
- assert(NumTZ <= 3 && "Invalid IT mask!");
- for (unsigned Pos = 3, e = NumTZ; Pos > e; --Pos) {
- bool T = ((Mask >> Pos) & 1) == CondBit0;
- if (T)
- O << 't';
- else
- O << 'e';
- }
-}
-
-void
-ARMAsmPrinter::printThumbAddrModeRROperand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- O << "[" << getRegisterName(MO1.getReg());
- O << ", " << getRegisterName(MO2.getReg()) << "]";
-}
-
-void
-ARMAsmPrinter::printThumbAddrModeRI5Operand(const MachineInstr *MI, int Op,
- raw_ostream &O,
- unsigned Scale) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- const MachineOperand &MO3 = MI->getOperand(Op+2);
-
- if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
- printOperand(MI, Op, O);
- return;
- }
-
- O << "[" << getRegisterName(MO1.getReg());
- if (MO3.getReg())
- O << ", " << getRegisterName(MO3.getReg());
- else if (unsigned ImmOffs = MO2.getImm())
- O << ", #" << ImmOffs * Scale;
- O << "]";
-}
-
-void
-ARMAsmPrinter::printThumbAddrModeS1Operand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- printThumbAddrModeRI5Operand(MI, Op, O, 1);
-}
-void
-ARMAsmPrinter::printThumbAddrModeS2Operand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- printThumbAddrModeRI5Operand(MI, Op, O, 2);
-}
-void
-ARMAsmPrinter::printThumbAddrModeS4Operand(const MachineInstr *MI, int Op,
- raw_ostream &O) {
- printThumbAddrModeRI5Operand(MI, Op, O, 4);
-}
-
-void ARMAsmPrinter::printThumbAddrModeSPOperand(const MachineInstr *MI,int Op,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- O << "[" << getRegisterName(MO1.getReg());
- if (unsigned ImmOffs = MO2.getImm())
- O << ", #" << ImmOffs*4;
- O << "]";
-}
-
-//===--------------------------------------------------------------------===//
-
-// Constant shifts t2_so_reg is a 2-operand unit corresponding to the Thumb2
-// register with shift forms.
-// REG 0 0 - e.g. R5
-// REG IMM, SH_OPC - e.g. R5, LSL #3
-void ARMAsmPrinter::printT2SOOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
-
- unsigned Reg = MO1.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
- O << getRegisterName(Reg);
-
- // Print the shift opc.
- assert(MO2.isImm() && "Not a valid t2_so_reg value!");
- ARM_AM::ShiftOpc ShOpc = ARM_AM::getSORegShOp(MO2.getImm());
- O << ", " << ARM_AM::getShiftOpcStr(ShOpc);
- if (ShOpc != ARM_AM::rrx)
- O << " #" << ARM_AM::getSORegOffset(MO2.getImm());
-}
-
-void ARMAsmPrinter::printT2AddrModeImm12Operand(const MachineInstr *MI,
- int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
-
- O << "[" << getRegisterName(MO1.getReg());
-
- unsigned OffImm = MO2.getImm();
- if (OffImm) // Don't print +0.
- O << ", #" << OffImm;
- O << "]";
-}
-
-void ARMAsmPrinter::printT2AddrModeImm8Operand(const MachineInstr *MI,
- int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
-
- O << "[" << getRegisterName(MO1.getReg());
-
- int32_t OffImm = (int32_t)MO2.getImm();
- // Don't print +0.
- if (OffImm < 0)
- O << ", #-" << -OffImm;
- else if (OffImm > 0)
- O << ", #" << OffImm;
- O << "]";
-}
-
-void ARMAsmPrinter::printT2AddrModeImm8s4Operand(const MachineInstr *MI,
- int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
-
- O << "[" << getRegisterName(MO1.getReg());
-
- int32_t OffImm = (int32_t)MO2.getImm() / 4;
- // Don't print +0.
- if (OffImm < 0)
- O << ", #-" << -OffImm * 4;
- else if (OffImm > 0)
- O << ", #" << OffImm * 4;
- O << "]";
-}
-
-void ARMAsmPrinter::printT2AddrModeImm8OffsetOperand(const MachineInstr *MI,
- int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- int32_t OffImm = (int32_t)MO1.getImm();
- // Don't print +0.
- if (OffImm < 0)
- O << "#-" << -OffImm;
- else if (OffImm > 0)
- O << "#" << OffImm;
-}
-
-void ARMAsmPrinter::printT2AddrModeSoRegOperand(const MachineInstr *MI,
- int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
- const MachineOperand &MO3 = MI->getOperand(OpNum+2);
-
- O << "[" << getRegisterName(MO1.getReg());
-
- assert(MO2.getReg() && "Invalid so_reg load / store address!");
- O << ", " << getRegisterName(MO2.getReg());
-
- unsigned ShAmt = MO3.getImm();
- if (ShAmt) {
- assert(ShAmt <= 3 && "Not a valid Thumb2 addressing mode!");
- O << ", lsl #" << ShAmt;
- }
- O << "]";
-}
-
-
//===--------------------------------------------------------------------===//
-void ARMAsmPrinter::printPredicateOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(OpNum).getImm();
- if (CC != ARMCC::AL)
- O << ARMCondCodeToString(CC);
-}
-
-void ARMAsmPrinter::printMandatoryPredicateOperand(const MachineInstr *MI,
- int OpNum,
- raw_ostream &O) {
- ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(OpNum).getImm();
- O << ARMCondCodeToString(CC);
-}
-
-void ARMAsmPrinter::printSBitModifierOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O){
- unsigned Reg = MI->getOperand(OpNum).getReg();
- if (Reg) {
- assert(Reg == ARM::CPSR && "Expect ARM CPSR register!");
- O << 's';
- }
-}
-
-void ARMAsmPrinter::printPCLabel(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- int Id = (int)MI->getOperand(OpNum).getImm();
- O << MAI->getPrivateGlobalPrefix()
- << "PC" << getFunctionNumber() << "_" << Id;
-}
-
-void ARMAsmPrinter::printRegisterList(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "{";
- for (unsigned i = OpNum, e = MI->getNumOperands(); i != e; ++i) {
- if (MI->getOperand(i).isImplicit())
- continue;
- if ((int)i != OpNum) O << ", ";
- printOperand(MI, i, O);
- }
- O << "}";
-}
-
-void ARMAsmPrinter::printCPInstOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O, const char *Modifier) {
- assert(Modifier && "This operand only works with a modifier!");
- // There are two aspects to a CONSTANTPOOL_ENTRY operand, the label and the
- // data itself.
- if (!strcmp(Modifier, "label")) {
- unsigned ID = MI->getOperand(OpNum).getImm();
- OutStreamer.EmitLabel(GetCPISymbol(ID));
- } else {
- assert(!strcmp(Modifier, "cpentry") && "Unknown modifier for CPE");
- unsigned CPI = MI->getOperand(OpNum).getIndex();
-
- const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
-
- if (MCPE.isMachineConstantPoolEntry()) {
- EmitMachineConstantPoolValue(MCPE.Val.MachineCPVal);
- } else {
- EmitGlobalConstant(MCPE.Val.ConstVal);
- }
- }
-}
-
MCSymbol *ARMAsmPrinter::
GetARMSetPICJumpTableLabel2(unsigned uid, unsigned uid2,
const MachineBasicBlock *MBB) const {
@@ -957,126 +266,12 @@ GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const {
return OutContext.GetOrCreateSymbol(Name.str());
}
-void ARMAsmPrinter::printJTBlockOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- assert(!Subtarget->isThumb2() && "Thumb2 should use double-jump jumptables!");
-
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1); // Unique Id
-
- unsigned JTI = MO1.getIndex();
- MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel2(JTI, MO2.getImm());
- // Can't use EmitLabel until instprinter happens, label comes out in the wrong
- // order.
- O << "\n" << *JTISymbol << ":\n";
-
- const char *JTEntryDirective = MAI->getData32bitsDirective();
-
- const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
- bool UseSet= MAI->hasSetDirective() && TM.getRelocationModel() == Reloc::PIC_;
- SmallPtrSet<MachineBasicBlock*, 8> JTSets;
- for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
- MachineBasicBlock *MBB = JTBBs[i];
- bool isNew = JTSets.insert(MBB);
-
- if (UseSet && isNew) {
- O << "\t.set\t"
- << *GetARMSetPICJumpTableLabel2(JTI, MO2.getImm(), MBB) << ','
- << *MBB->getSymbol() << '-' << *JTISymbol << '\n';
- }
-
- O << JTEntryDirective << ' ';
- if (UseSet)
- O << *GetARMSetPICJumpTableLabel2(JTI, MO2.getImm(), MBB);
- else if (TM.getRelocationModel() == Reloc::PIC_)
- O << *MBB->getSymbol() << '-' << *JTISymbol;
- else
- O << *MBB->getSymbol();
-
- if (i != e-1)
- O << '\n';
- }
-}
-
-void ARMAsmPrinter::printJT2BlockOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1); // Unique Id
- unsigned JTI = MO1.getIndex();
-
- MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel2(JTI, MO2.getImm());
-
- // Can't use EmitLabel until instprinter happens, label comes out in the wrong
- // order.
- O << "\n" << *JTISymbol << ":\n";
-
- const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
- bool ByteOffset = false, HalfWordOffset = false;
- if (MI->getOpcode() == ARM::t2TBB)
- ByteOffset = true;
- else if (MI->getOpcode() == ARM::t2TBH)
- HalfWordOffset = true;
-
- for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
- MachineBasicBlock *MBB = JTBBs[i];
- if (ByteOffset)
- O << MAI->getData8bitsDirective();
- else if (HalfWordOffset)
- O << MAI->getData16bitsDirective();
-
- if (ByteOffset || HalfWordOffset)
- O << '(' << *MBB->getSymbol() << "-" << *JTISymbol << ")/2";
- else
- O << "\tb.w " << *MBB->getSymbol();
-
- if (i != e-1)
- O << '\n';
- }
-}
-
-void ARMAsmPrinter::printTBAddrMode(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "[pc, " << getRegisterName(MI->getOperand(OpNum).getReg());
- if (MI->getOpcode() == ARM::t2TBH)
- O << ", lsl #1";
- O << ']';
-}
-void ARMAsmPrinter::printNoHashImmediate(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << MI->getOperand(OpNum).getImm();
-}
-
-void ARMAsmPrinter::printVFPf32ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- const ConstantFP *FP = MI->getOperand(OpNum).getFPImm();
- O << '#' << FP->getValueAPF().convertToFloat();
- if (isVerbose()) {
- O << "\t\t" << MAI->getCommentString() << ' ';
- WriteAsOperand(O, FP, /*PrintType=*/false);
- }
-}
-
-void ARMAsmPrinter::printVFPf64ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- const ConstantFP *FP = MI->getOperand(OpNum).getFPImm();
- O << '#' << FP->getValueAPF().convertToDouble();
- if (isVerbose()) {
- O << "\t\t" << MAI->getCommentString() << ' ';
- WriteAsOperand(O, FP, /*PrintType=*/false);
- }
-}
-
-void ARMAsmPrinter::printNEONModImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- unsigned EncodedImm = MI->getOperand(OpNum).getImm();
- unsigned EltBits;
- uint64_t Val = ARM_AM::decodeNEONModImm(EncodedImm, EltBits);
- O << "#0x" << utohexstr(Val);
+MCSymbol *ARMAsmPrinter::GetARMSJLJEHLabel(void) const {
+ SmallString<60> Name;
+ raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix() << "SJLJEH"
+ << getFunctionNumber();
+ return OutContext.GetOrCreateSymbol(Name.str());
}
bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
@@ -1090,14 +285,16 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
default: return true; // Unknown modifier.
case 'a': // Print as a memory address.
if (MI->getOperand(OpNum).isReg()) {
- O << "[" << getRegisterName(MI->getOperand(OpNum).getReg()) << "]";
+ O << "["
+ << ARMInstPrinter::getRegisterName(MI->getOperand(OpNum).getReg())
+ << "]";
return false;
}
// Fallthrough
case 'c': // Don't print "#" before an immediate operand.
if (!MI->getOperand(OpNum).isImm())
return true;
- printNoHashImmediate(MI, OpNum, O);
+ O << MI->getOperand(OpNum).getImm();
return false;
case 'P': // Print a VFP double precision register.
case 'q': // Print a NEON quad precision register.
@@ -1106,7 +303,7 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
case 'Q':
case 'R':
case 'H':
- report_fatal_error("llvm does not support 'Q', 'R', and 'H' modifiers!");
+ // These modifiers are not yet supported.
return true;
}
}
@@ -1124,48 +321,10 @@ bool ARMAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
const MachineOperand &MO = MI->getOperand(OpNum);
assert(MO.isReg() && "unexpected inline asm memory operand");
- O << "[" << getRegisterName(MO.getReg()) << "]";
+ O << "[" << ARMInstPrinter::getRegisterName(MO.getReg()) << "]";
return false;
}
-void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- if (EnableMCInst) {
- printInstructionThroughMCStreamer(MI);
- return;
- }
-
- if (MI->getOpcode() == ARM::CONSTPOOL_ENTRY)
- EmitAlignment(2);
-
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- if (MI->getOpcode() == ARM::DBG_VALUE) {
- unsigned NOps = MI->getNumOperands();
- assert(NOps==4);
- OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
- // cast away const; DIetc do not take const operands for some reason.
- DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata()));
- OS << V.getName();
- OS << " <- ";
- // Frame address. Currently handles register +- offset only.
- assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
- OS << '['; printOperand(MI, 0, OS); OS << '+'; printOperand(MI, 1, OS);
- OS << ']';
- OS << "+";
- printOperand(MI, NOps-2, OS);
- OutStreamer.EmitRawText(OS.str());
- return;
- }
-
- printInstruction(MI, OS);
- OutStreamer.EmitRawText(OS.str());
-
- // Make sure the instruction that follows TBB is 2-byte aligned.
- // FIXME: Constant island pass should insert an "ALIGN" instruction instead.
- if (MI->getOpcode() == ARM::t2TBB)
- EmitAlignment(1);
-}
-
void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
if (Subtarget->isTargetDarwin()) {
Reloc::Model RelocM = TM.getRelocationModel();
@@ -1205,49 +364,12 @@ void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
}
// Use unified assembler syntax.
- OutStreamer.EmitRawText(StringRef("\t.syntax unified"));
+ OutStreamer.EmitAssemblerFlag(MCAF_SyntaxUnified);
// Emit ARM Build Attributes
if (Subtarget->isTargetELF()) {
- // CPU Type
- std::string CPUString = Subtarget->getCPUString();
- if (CPUString != "generic")
- OutStreamer.EmitRawText("\t.cpu " + Twine(CPUString));
-
- // FIXME: Emit FPU type
- if (Subtarget->hasVFP2())
- OutStreamer.EmitRawText("\t.eabi_attribute " +
- Twine(ARMBuildAttrs::VFP_arch) + ", 2");
-
- // Signal various FP modes.
- if (!UnsafeFPMath) {
- OutStreamer.EmitRawText("\t.eabi_attribute " +
- Twine(ARMBuildAttrs::ABI_FP_denormal) + ", 1");
- OutStreamer.EmitRawText("\t.eabi_attribute " +
- Twine(ARMBuildAttrs::ABI_FP_exceptions) + ", 1");
- }
- if (NoInfsFPMath && NoNaNsFPMath)
- OutStreamer.EmitRawText("\t.eabi_attribute " +
- Twine(ARMBuildAttrs::ABI_FP_number_model)+ ", 1");
- else
- OutStreamer.EmitRawText("\t.eabi_attribute " +
- Twine(ARMBuildAttrs::ABI_FP_number_model)+ ", 3");
-
- // 8-bytes alignment stuff.
- OutStreamer.EmitRawText("\t.eabi_attribute " +
- Twine(ARMBuildAttrs::ABI_align8_needed) + ", 1");
- OutStreamer.EmitRawText("\t.eabi_attribute " +
- Twine(ARMBuildAttrs::ABI_align8_preserved) + ", 1");
-
- // Hard float. Use both S and D registers and conform to AAPCS-VFP.
- if (Subtarget->isAAPCS_ABI() && FloatABIType == FloatABI::Hard) {
- OutStreamer.EmitRawText("\t.eabi_attribute " +
- Twine(ARMBuildAttrs::ABI_HardFP_use) + ", 3");
- OutStreamer.EmitRawText("\t.eabi_attribute " +
- Twine(ARMBuildAttrs::ABI_VFP_args) + ", 1");
- }
- // FIXME: Should we signal R9 usage?
+ emitAttributes();
}
}
@@ -1280,10 +402,10 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
else
// Internal to current translation unit.
//
- // When we place the LSDA into the TEXT section, the type info pointers
- // need to be indirect and pc-rel. We accomplish this by using NLPs.
- // However, sometimes the types are local to the file. So we need to
- // fill in the value for the NLP in those cases.
+ // When we place the LSDA into the TEXT section, the type info
+ // pointers need to be indirect and pc-rel. We accomplish this by
+ // using NLPs; however, sometimes the types are local to the file.
+ // We need to fill in the value for the NLP in those cases.
OutStreamer.EmitValue(MCSymbolRefExpr::Create(MCSym.getPointer(),
OutContext),
4/*size*/, 0/*addrspace*/);
@@ -1321,38 +443,631 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
}
//===----------------------------------------------------------------------===//
+// Helper routines for EmitStartOfAsmFile() and EmitEndOfAsmFile()
+// FIXME:
+// The following seem like one-off assembler flags, but they actually need
+// to appear in the .ARM.attributes section in ELF.
+// Instead of subclassing the MCELFStreamer, we do the work here.
+
+void ARMAsmPrinter::emitAttributes() {
+
+ emitARMAttributeSection();
+
+ AttributeEmitter *AttrEmitter;
+ if (OutStreamer.hasRawTextSupport())
+ AttrEmitter = new AsmAttributeEmitter(OutStreamer);
+ else {
+ MCObjectStreamer &O = static_cast<MCObjectStreamer&>(OutStreamer);
+ AttrEmitter = new ObjectAttributeEmitter(O);
+ }
+
+ AttrEmitter->MaybeSwitchVendor("aeabi");
+
+ std::string CPUString = Subtarget->getCPUString();
+
+ if (CPUString == "cortex-a8" ||
+ Subtarget->isCortexA8()) {
+ AttrEmitter->EmitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a8");
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch_profile,
+ ARMBuildAttrs::ApplicationProfile);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ARM_ISA_use,
+ ARMBuildAttrs::Allowed);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use,
+ ARMBuildAttrs::AllowThumb32);
+ // Fixme: figure out when this is emitted.
+ //AttrEmitter->EmitAttribute(ARMBuildAttrs::WMMX_arch,
+ // ARMBuildAttrs::AllowWMMXv1);
+ //
+
+ /// ADD additional Else-cases here!
+ } else if (CPUString == "generic") {
+ // FIXME: Why these defaults?
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v4T);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ARM_ISA_use,
+ ARMBuildAttrs::Allowed);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use,
+ ARMBuildAttrs::Allowed);
+ }
-void ARMAsmPrinter::printInstructionThroughMCStreamer(const MachineInstr *MI) {
- ARMMCInstLower MCInstLowering(OutContext, *Mang, *this);
- switch (MI->getOpcode()) {
- case ARM::t2MOVi32imm:
- assert(0 && "Should be lowered by thumb2it pass");
+ // FIXME: Emit FPU type
+ if (Subtarget->hasVFP2())
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPv2);
+
+ // Signal various FP modes.
+ if (!UnsafeFPMath) {
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_denormal,
+ ARMBuildAttrs::Allowed);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_exceptions,
+ ARMBuildAttrs::Allowed);
+ }
+
+ if (NoInfsFPMath && NoNaNsFPMath)
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_number_model,
+ ARMBuildAttrs::Allowed);
+ else
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_number_model,
+ ARMBuildAttrs::AllowIEE754);
+
+ // FIXME: add more flags to ARMBuildAttrs.h
+ // 8-bytes alignment stuff.
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_align8_needed, 1);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_align8_preserved, 1);
+
+ // Hard float. Use both S and D registers and conform to AAPCS-VFP.
+ if (Subtarget->isAAPCS_ABI() && FloatABIType == FloatABI::Hard) {
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_HardFP_use, 3);
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_VFP_args, 1);
+ }
+ // FIXME: Should we signal R9 usage?
+
+ if (Subtarget->hasDivide())
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::DIV_use, 1);
+
+ AttrEmitter->Finish();
+ delete AttrEmitter;
+}
+
+void ARMAsmPrinter::emitARMAttributeSection() {
+ // <format-version>
+ // [ <section-length> "vendor-name"
+ // [ <file-tag> <size> <attribute>*
+ // | <section-tag> <size> <section-number>* 0 <attribute>*
+ // | <symbol-tag> <size> <symbol-number>* 0 <attribute>*
+ // ]+
+ // ]*
+
+ if (OutStreamer.hasRawTextSupport())
+ return;
+
+ const ARMElfTargetObjectFile &TLOFELF =
+ static_cast<const ARMElfTargetObjectFile &>
+ (getObjFileLowering());
+
+ OutStreamer.SwitchSection(TLOFELF.getAttributesSection());
+
+ // Format version
+ OutStreamer.EmitIntValue(0x41, 1);
+}
+
+//===----------------------------------------------------------------------===//
+
+static MCSymbol *getPICLabel(const char *Prefix, unsigned FunctionNumber,
+ unsigned LabelId, MCContext &Ctx) {
+
+ MCSymbol *Label = Ctx.GetOrCreateSymbol(Twine(Prefix)
+ + "PC" + Twine(FunctionNumber) + "_" + Twine(LabelId));
+ return Label;
+}
+
+static MCSymbolRefExpr::VariantKind
+getModifierVariantKind(ARMCP::ARMCPModifier Modifier) {
+ switch (Modifier) {
+ default: llvm_unreachable("Unknown modifier!");
+ case ARMCP::no_modifier: return MCSymbolRefExpr::VK_None;
+ case ARMCP::TLSGD: return MCSymbolRefExpr::VK_ARM_TLSGD;
+ case ARMCP::TPOFF: return MCSymbolRefExpr::VK_ARM_TPOFF;
+ case ARMCP::GOTTPOFF: return MCSymbolRefExpr::VK_ARM_GOTTPOFF;
+ case ARMCP::GOT: return MCSymbolRefExpr::VK_ARM_GOT;
+ case ARMCP::GOTOFF: return MCSymbolRefExpr::VK_ARM_GOTOFF;
+ }
+ return MCSymbolRefExpr::VK_None;
+}
+
+MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV) {
+ bool isIndirect = Subtarget->isTargetDarwin() &&
+ Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel());
+ if (!isIndirect)
+ return Mang->getSymbol(GV);
+
+ // FIXME: Remove this when Darwin transition to @GOT like syntax.
+ MCSymbol *MCSym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ MachineModuleInfoMachO &MMIMachO =
+ MMI->getObjFileInfo<MachineModuleInfoMachO>();
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ GV->hasHiddenVisibility() ? MMIMachO.getHiddenGVStubEntry(MCSym) :
+ MMIMachO.getGVStubEntry(MCSym);
+ if (StubSym.getPointer() == 0)
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ return MCSym;
+}
+
+void ARMAsmPrinter::
+EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
+ int Size = TM.getTargetData()->getTypeAllocSize(MCPV->getType());
+
+ ARMConstantPoolValue *ACPV = static_cast<ARMConstantPoolValue*>(MCPV);
+
+ MCSymbol *MCSym;
+ if (ACPV->isLSDA()) {
+ SmallString<128> Str;
+ raw_svector_ostream OS(Str);
+ OS << MAI->getPrivateGlobalPrefix() << "_LSDA_" << getFunctionNumber();
+ MCSym = OutContext.GetOrCreateSymbol(OS.str());
+ } else if (ACPV->isBlockAddress()) {
+ MCSym = GetBlockAddressSymbol(ACPV->getBlockAddress());
+ } else if (ACPV->isGlobalValue()) {
+ const GlobalValue *GV = ACPV->getGV();
+ MCSym = GetARMGVSymbol(GV);
+ } else {
+ assert(ACPV->isExtSymbol() && "unrecognized constant pool value");
+ MCSym = GetExternalSymbolSymbol(ACPV->getSymbol());
+ }
+
+ // Create an MCSymbol for the reference.
+ const MCExpr *Expr =
+ MCSymbolRefExpr::Create(MCSym, getModifierVariantKind(ACPV->getModifier()),
+ OutContext);
+
+ if (ACPV->getPCAdjustment()) {
+ MCSymbol *PCLabel = getPICLabel(MAI->getPrivateGlobalPrefix(),
+ getFunctionNumber(),
+ ACPV->getLabelId(),
+ OutContext);
+ const MCExpr *PCRelExpr = MCSymbolRefExpr::Create(PCLabel, OutContext);
+ PCRelExpr =
+ MCBinaryExpr::CreateAdd(PCRelExpr,
+ MCConstantExpr::Create(ACPV->getPCAdjustment(),
+ OutContext),
+ OutContext);
+ if (ACPV->mustAddCurrentAddress()) {
+ // We want "(<expr> - .)", but MC doesn't have a concept of the '.'
+ // label, so just emit a local label end reference that instead.
+ MCSymbol *DotSym = OutContext.CreateTempSymbol();
+ OutStreamer.EmitLabel(DotSym);
+ const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext);
+ PCRelExpr = MCBinaryExpr::CreateSub(PCRelExpr, DotExpr, OutContext);
+ }
+ Expr = MCBinaryExpr::CreateSub(Expr, PCRelExpr, OutContext);
+ }
+ OutStreamer.EmitValue(Expr, Size);
+}
+
+void ARMAsmPrinter::EmitJumpTable(const MachineInstr *MI) {
+ unsigned Opcode = MI->getOpcode();
+ int OpNum = 1;
+ if (Opcode == ARM::BR_JTadd)
+ OpNum = 2;
+ else if (Opcode == ARM::BR_JTm)
+ OpNum = 3;
+
+ const MachineOperand &MO1 = MI->getOperand(OpNum);
+ const MachineOperand &MO2 = MI->getOperand(OpNum+1); // Unique Id
+ unsigned JTI = MO1.getIndex();
+
+ // Emit a label for the jump table.
+ MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel2(JTI, MO2.getImm());
+ OutStreamer.EmitLabel(JTISymbol);
+
+ // Emit each entry of the table.
+ const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
+
+ for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = JTBBs[i];
+ // Construct an MCExpr for the entry. We want a value of the form:
+ // (BasicBlockAddr - TableBeginAddr)
+ //
+ // For example, a table with entries jumping to basic blocks BB0 and BB1
+ // would look like:
+ // LJTI_0_0:
+ // .word (LBB0 - LJTI_0_0)
+ // .word (LBB1 - LJTI_0_0)
+ const MCExpr *Expr = MCSymbolRefExpr::Create(MBB->getSymbol(), OutContext);
+
+ if (TM.getRelocationModel() == Reloc::PIC_)
+ Expr = MCBinaryExpr::CreateSub(Expr, MCSymbolRefExpr::Create(JTISymbol,
+ OutContext),
+ OutContext);
+ OutStreamer.EmitValue(Expr, 4);
+ }
+}
+
+void ARMAsmPrinter::EmitJump2Table(const MachineInstr *MI) {
+ unsigned Opcode = MI->getOpcode();
+ int OpNum = (Opcode == ARM::t2BR_JT) ? 2 : 1;
+ const MachineOperand &MO1 = MI->getOperand(OpNum);
+ const MachineOperand &MO2 = MI->getOperand(OpNum+1); // Unique Id
+ unsigned JTI = MO1.getIndex();
+
+ // Emit a label for the jump table.
+ MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel2(JTI, MO2.getImm());
+ OutStreamer.EmitLabel(JTISymbol);
+
+ // Emit each entry of the table.
+ const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
+ unsigned OffsetWidth = 4;
+ if (MI->getOpcode() == ARM::t2TBB_JT)
+ OffsetWidth = 1;
+ else if (MI->getOpcode() == ARM::t2TBH_JT)
+ OffsetWidth = 2;
+
+ for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = JTBBs[i];
+ const MCExpr *MBBSymbolExpr = MCSymbolRefExpr::Create(MBB->getSymbol(),
+ OutContext);
+ // If this isn't a TBB or TBH, the entries are direct branch instructions.
+ if (OffsetWidth == 4) {
+ MCInst BrInst;
+ BrInst.setOpcode(ARM::t2B);
+ BrInst.addOperand(MCOperand::CreateExpr(MBBSymbolExpr));
+ OutStreamer.EmitInstruction(BrInst);
+ continue;
+ }
+ // Otherwise it's an offset from the dispatch instruction. Construct an
+ // MCExpr for the entry. We want a value of the form:
+ // (BasicBlockAddr - TableBeginAddr) / 2
+ //
+ // For example, a TBB table with entries jumping to basic blocks BB0 and BB1
+ // would look like:
+ // LJTI_0_0:
+ // .byte (LBB0 - LJTI_0_0) / 2
+ // .byte (LBB1 - LJTI_0_0) / 2
+ const MCExpr *Expr =
+ MCBinaryExpr::CreateSub(MBBSymbolExpr,
+ MCSymbolRefExpr::Create(JTISymbol, OutContext),
+ OutContext);
+ Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(2, OutContext),
+ OutContext);
+ OutStreamer.EmitValue(Expr, OffsetWidth);
+ }
+}
+
+void ARMAsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
+ raw_ostream &OS) {
+ unsigned NOps = MI->getNumOperands();
+ assert(NOps==4);
+ OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
+ // cast away const; DIetc do not take const operands for some reason.
+ DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata()));
+ OS << V.getName();
+ OS << " <- ";
+ // Frame address. Currently handles register +- offset only.
+ assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
+ OS << '['; printOperand(MI, 0, OS); OS << '+'; printOperand(MI, 1, OS);
+ OS << ']';
+ OS << "+";
+ printOperand(MI, NOps-2, OS);
+}
+
+static void populateADROperands(MCInst &Inst, unsigned Dest,
+ const MCSymbol *Label,
+ unsigned pred, unsigned ccreg,
+ MCContext &Ctx) {
+ const MCExpr *SymbolExpr = MCSymbolRefExpr::Create(Label, Ctx);
+ Inst.addOperand(MCOperand::CreateReg(Dest));
+ Inst.addOperand(MCOperand::CreateExpr(SymbolExpr));
+ // Add predicate operands.
+ Inst.addOperand(MCOperand::CreateImm(pred));
+ Inst.addOperand(MCOperand::CreateReg(ccreg));
+}
+
+void ARMAsmPrinter::EmitPatchedInstruction(const MachineInstr *MI,
+ unsigned Opcode) {
+ MCInst TmpInst;
+
+ // Emit the instruction as usual, just patch the opcode.
+ LowerARMMachineInstrToMCInst(MI, TmpInst, *this);
+ TmpInst.setOpcode(Opcode);
+ OutStreamer.EmitInstruction(TmpInst);
+}
+
+void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ unsigned Opc = MI->getOpcode();
+ switch (Opc) {
default: break;
- case ARM::PICADD: { // FIXME: Remove asm string from td file.
+ case ARM::t2ADDrSPi:
+ case ARM::t2ADDrSPi12:
+ case ARM::t2SUBrSPi:
+ case ARM::t2SUBrSPi12:
+ assert ((MI->getOperand(1).getReg() == ARM::SP) &&
+ "Unexpected source register!");
+ break;
+
+ case ARM::t2MOVi32imm: assert(0 && "Should be lowered by thumb2it pass");
+ case ARM::DBG_VALUE: {
+ if (isVerbose() && OutStreamer.hasRawTextSupport()) {
+ SmallString<128> TmpStr;
+ raw_svector_ostream OS(TmpStr);
+ PrintDebugValueComment(MI, OS);
+ OutStreamer.EmitRawText(StringRef(OS.str()));
+ }
+ return;
+ }
+ case ARM::tBfar: {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tBL);
+ TmpInst.addOperand(MCOperand::CreateExpr(MCSymbolRefExpr::Create(
+ MI->getOperand(0).getMBB()->getSymbol(), OutContext)));
+ OutStreamer.EmitInstruction(TmpInst);
+ return;
+ }
+ case ARM::LEApcrel:
+ case ARM::tLEApcrel:
+ case ARM::t2LEApcrel: {
+ // FIXME: Need to also handle globals and externals
+ MCInst TmpInst;
+ TmpInst.setOpcode(MI->getOpcode() == ARM::t2LEApcrel ? ARM::t2ADR
+ : (MI->getOpcode() == ARM::tLEApcrel ? ARM::tADR
+ : ARM::ADR));
+ populateADROperands(TmpInst, MI->getOperand(0).getReg(),
+ GetCPISymbol(MI->getOperand(1).getIndex()),
+ MI->getOperand(2).getImm(), MI->getOperand(3).getReg(),
+ OutContext);
+ OutStreamer.EmitInstruction(TmpInst);
+ return;
+ }
+ case ARM::LEApcrelJT:
+ case ARM::tLEApcrelJT:
+ case ARM::t2LEApcrelJT: {
+ MCInst TmpInst;
+ TmpInst.setOpcode(MI->getOpcode() == ARM::t2LEApcrelJT ? ARM::t2ADR
+ : (MI->getOpcode() == ARM::tLEApcrelJT ? ARM::tADR
+ : ARM::ADR));
+ populateADROperands(TmpInst, MI->getOperand(0).getReg(),
+ GetARMJTIPICJumpTableLabel2(MI->getOperand(1).getIndex(),
+ MI->getOperand(2).getImm()),
+ MI->getOperand(3).getImm(), MI->getOperand(4).getReg(),
+ OutContext);
+ OutStreamer.EmitInstruction(TmpInst);
+ return;
+ }
+ case ARM::MOVPCRX: {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Add 's' bit operand (always reg0 for this)
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ return;
+ }
+ case ARM::BXr9_CALL:
+ case ARM::BX_CALL: {
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::LR));
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Add 's' bit operand (always reg0 for this)
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::BX);
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ return;
+ }
+ case ARM::BMOVPCRXr9_CALL:
+ case ARM::BMOVPCRX_CALL: {
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::LR));
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Add 's' bit operand (always reg0 for this)
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Add 's' bit operand (always reg0 for this)
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ return;
+ }
+ case ARM::MOVi16_ga_pcrel:
+ case ARM::t2MOVi16_ga_pcrel: {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opc == ARM::MOVi16_ga_pcrel? ARM::MOVi16 : ARM::t2MOVi16);
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+
+ unsigned TF = MI->getOperand(1).getTargetFlags();
+ bool isPIC = TF == ARMII::MO_LO16_NONLAZY_PIC;
+ const GlobalValue *GV = MI->getOperand(1).getGlobal();
+ MCSymbol *GVSym = GetARMGVSymbol(GV);
+ const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext);
+ if (isPIC) {
+ MCSymbol *LabelSym = getPICLabel(MAI->getPrivateGlobalPrefix(),
+ getFunctionNumber(),
+ MI->getOperand(2).getImm(), OutContext);
+ const MCExpr *LabelSymExpr= MCSymbolRefExpr::Create(LabelSym, OutContext);
+ unsigned PCAdj = (Opc == ARM::MOVi16_ga_pcrel) ? 8 : 4;
+ const MCExpr *PCRelExpr =
+ ARMMCExpr::CreateLower16(MCBinaryExpr::CreateSub(GVSymExpr,
+ MCBinaryExpr::CreateAdd(LabelSymExpr,
+ MCConstantExpr::Create(PCAdj, OutContext),
+ OutContext), OutContext), OutContext);
+ TmpInst.addOperand(MCOperand::CreateExpr(PCRelExpr));
+ } else {
+ const MCExpr *RefExpr= ARMMCExpr::CreateLower16(GVSymExpr, OutContext);
+ TmpInst.addOperand(MCOperand::CreateExpr(RefExpr));
+ }
+
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Add 's' bit operand (always reg0 for this)
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ return;
+ }
+ case ARM::MOVTi16_ga_pcrel:
+ case ARM::t2MOVTi16_ga_pcrel: {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opc == ARM::MOVTi16_ga_pcrel
+ ? ARM::MOVTi16 : ARM::t2MOVTi16);
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg()));
+
+ unsigned TF = MI->getOperand(2).getTargetFlags();
+ bool isPIC = TF == ARMII::MO_HI16_NONLAZY_PIC;
+ const GlobalValue *GV = MI->getOperand(2).getGlobal();
+ MCSymbol *GVSym = GetARMGVSymbol(GV);
+ const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext);
+ if (isPIC) {
+ MCSymbol *LabelSym = getPICLabel(MAI->getPrivateGlobalPrefix(),
+ getFunctionNumber(),
+ MI->getOperand(3).getImm(), OutContext);
+ const MCExpr *LabelSymExpr= MCSymbolRefExpr::Create(LabelSym, OutContext);
+ unsigned PCAdj = (Opc == ARM::MOVTi16_ga_pcrel) ? 8 : 4;
+ const MCExpr *PCRelExpr =
+ ARMMCExpr::CreateUpper16(MCBinaryExpr::CreateSub(GVSymExpr,
+ MCBinaryExpr::CreateAdd(LabelSymExpr,
+ MCConstantExpr::Create(PCAdj, OutContext),
+ OutContext), OutContext), OutContext);
+ TmpInst.addOperand(MCOperand::CreateExpr(PCRelExpr));
+ } else {
+ const MCExpr *RefExpr= ARMMCExpr::CreateUpper16(GVSymExpr, OutContext);
+ TmpInst.addOperand(MCOperand::CreateExpr(RefExpr));
+ }
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Add 's' bit operand (always reg0 for this)
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ return;
+ }
+ case ARM::tPICADD: {
// This is a pseudo op for a label + instruction sequence, which looks like:
// LPC0:
- // add r0, pc, r0
+ // add r0, pc
// This adds the address of LPC0 to r0.
// Emit the label.
- // FIXME: MOVE TO SHARED PLACE.
- unsigned Id = (unsigned)MI->getOperand(2).getImm();
- const char *Prefix = MAI->getPrivateGlobalPrefix();
- MCSymbol *Label =OutContext.GetOrCreateSymbol(Twine(Prefix)
- + "PC" + Twine(getFunctionNumber()) + "_" + Twine(Id));
- OutStreamer.EmitLabel(Label);
+ OutStreamer.EmitLabel(getPICLabel(MAI->getPrivateGlobalPrefix(),
+ getFunctionNumber(), MI->getOperand(2).getImm(),
+ OutContext));
+ // Form and emit the add.
+ MCInst AddInst;
+ AddInst.setOpcode(ARM::tADDhirr);
+ AddInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ AddInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ AddInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ // Add predicate operands.
+ AddInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ AddInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(AddInst);
+ return;
+ }
+ case ARM::PICADD: {
+ // This is a pseudo op for a label + instruction sequence, which looks like:
+ // LPC0:
+ // add r0, pc, r0
+ // This adds the address of LPC0 to r0.
+
+ // Emit the label.
+ OutStreamer.EmitLabel(getPICLabel(MAI->getPrivateGlobalPrefix(),
+ getFunctionNumber(), MI->getOperand(2).getImm(),
+ OutContext));
- // Form and emit tha dd.
+ // Form and emit the add.
MCInst AddInst;
AddInst.setOpcode(ARM::ADDrr);
AddInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
AddInst.addOperand(MCOperand::CreateReg(ARM::PC));
AddInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg()));
+ // Add predicate operands.
+ AddInst.addOperand(MCOperand::CreateImm(MI->getOperand(3).getImm()));
+ AddInst.addOperand(MCOperand::CreateReg(MI->getOperand(4).getReg()));
+ // Add 's' bit operand (always reg0 for this)
+ AddInst.addOperand(MCOperand::CreateReg(0));
OutStreamer.EmitInstruction(AddInst);
return;
}
- case ARM::CONSTPOOL_ENTRY: { // FIXME: Remove asm string from td file.
+ case ARM::PICSTR:
+ case ARM::PICSTRB:
+ case ARM::PICSTRH:
+ case ARM::PICLDR:
+ case ARM::PICLDRB:
+ case ARM::PICLDRH:
+ case ARM::PICLDRSB:
+ case ARM::PICLDRSH: {
+ // This is a pseudo op for a label + instruction sequence, which looks like:
+ // LPC0:
+ // OP r0, [pc, r0]
+ // The LCP0 label is referenced by a constant pool entry in order to get
+ // a PC-relative address at the ldr instruction.
+
+ // Emit the label.
+ OutStreamer.EmitLabel(getPICLabel(MAI->getPrivateGlobalPrefix(),
+ getFunctionNumber(), MI->getOperand(2).getImm(),
+ OutContext));
+
+ // Form and emit the load
+ unsigned Opcode;
+ switch (MI->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode!");
+ case ARM::PICSTR: Opcode = ARM::STRrs; break;
+ case ARM::PICSTRB: Opcode = ARM::STRBrs; break;
+ case ARM::PICSTRH: Opcode = ARM::STRH; break;
+ case ARM::PICLDR: Opcode = ARM::LDRrs; break;
+ case ARM::PICLDRB: Opcode = ARM::LDRBrs; break;
+ case ARM::PICLDRH: Opcode = ARM::LDRH; break;
+ case ARM::PICLDRSB: Opcode = ARM::LDRSB; break;
+ case ARM::PICLDRSH: Opcode = ARM::LDRSH; break;
+ }
+ MCInst LdStInst;
+ LdStInst.setOpcode(Opcode);
+ LdStInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ LdStInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ LdStInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg()));
+ LdStInst.addOperand(MCOperand::CreateImm(0));
+ // Add predicate operands.
+ LdStInst.addOperand(MCOperand::CreateImm(MI->getOperand(3).getImm()));
+ LdStInst.addOperand(MCOperand::CreateReg(MI->getOperand(4).getReg()));
+ OutStreamer.EmitInstruction(LdStInst);
+
+ return;
+ }
+ case ARM::CONSTPOOL_ENTRY: {
/// CONSTPOOL_ENTRY - This instruction represents a floating constant pool
/// in the function. The first operand is the ID# for this instruction, the
/// second is the index into the MachineConstantPool that this is, the third
@@ -1371,100 +1086,450 @@ void ARMAsmPrinter::printInstructionThroughMCStreamer(const MachineInstr *MI) {
return;
}
- case ARM::MOVi2pieces: { // FIXME: Remove asmstring from td file.
- // This is a hack that lowers as a two instruction sequence.
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned ImmVal = (unsigned)MI->getOperand(1).getImm();
+ case ARM::t2BR_JT: {
+ // Lower and emit the instruction itself, then the jump table following it.
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tMOVgpr2gpr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ // Output the data for the jump table itself
+ EmitJump2Table(MI);
+ return;
+ }
+ case ARM::t2TBB_JT: {
+ // Lower and emit the instruction itself, then the jump table following it.
+ MCInst TmpInst;
+
+ TmpInst.setOpcode(ARM::t2TBB);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ // Output the data for the jump table itself
+ EmitJump2Table(MI);
+ // Make sure the next instruction is 2-byte aligned.
+ EmitAlignment(1);
+ return;
+ }
+ case ARM::t2TBH_JT: {
+ // Lower and emit the instruction itself, then the jump table following it.
+ MCInst TmpInst;
+
+ TmpInst.setOpcode(ARM::t2TBH);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ // Output the data for the jump table itself
+ EmitJump2Table(MI);
+ return;
+ }
+ case ARM::tBR_JTr:
+ case ARM::BR_JTr: {
+ // Lower and emit the instruction itself, then the jump table following it.
+ // mov pc, target
+ MCInst TmpInst;
+ unsigned Opc = MI->getOpcode() == ARM::BR_JTr ?
+ ARM::MOVr : ARM::tMOVgpr2gpr;
+ TmpInst.setOpcode(Opc);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Add 's' bit operand (always reg0 for this)
+ if (Opc == ARM::MOVr)
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+
+ // Make sure the Thumb jump table is 4-byte aligned.
+ if (Opc == ARM::tMOVgpr2gpr)
+ EmitAlignment(2);
- unsigned SOImmValV1 = ARM_AM::getSOImmTwoPartFirst(ImmVal);
- unsigned SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal);
+ // Output the data for the jump table itself
+ EmitJumpTable(MI);
+ return;
+ }
+ case ARM::BR_JTm: {
+ // Lower and emit the instruction itself, then the jump table following it.
+ // ldr pc, target
+ MCInst TmpInst;
+ if (MI->getOperand(1).getReg() == 0) {
+ // literal offset
+ TmpInst.setOpcode(ARM::LDRi12);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ TmpInst.addOperand(MCOperand::CreateImm(MI->getOperand(2).getImm()));
+ } else {
+ TmpInst.setOpcode(ARM::LDRrs);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg()));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ }
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ // Output the data for the jump table itself
+ EmitJumpTable(MI);
+ return;
+ }
+ case ARM::BR_JTadd: {
+ // Lower and emit the instruction itself, then the jump table following it.
+ // add pc, target, idx
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::ADDrr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg()));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Add 's' bit operand (always reg0 for this)
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+
+ // Output the data for the jump table itself
+ EmitJumpTable(MI);
+ return;
+ }
+ case ARM::TRAP: {
+ // Non-Darwin binutils don't yet support the "trap" mnemonic.
+ // FIXME: Remove this special case when they do.
+ if (!Subtarget->isTargetDarwin()) {
+ //.long 0xe7ffdefe @ trap
+ uint32_t Val = 0xe7ffdefeUL;
+ OutStreamer.AddComment("trap");
+ OutStreamer.EmitIntValue(Val, 4);
+ return;
+ }
+ break;
+ }
+ case ARM::tTRAP: {
+ // Non-Darwin binutils don't yet support the "trap" mnemonic.
+ // FIXME: Remove this special case when they do.
+ if (!Subtarget->isTargetDarwin()) {
+ //.short 57086 @ trap
+ uint16_t Val = 0xdefe;
+ OutStreamer.AddComment("trap");
+ OutStreamer.EmitIntValue(Val, 2);
+ return;
+ }
+ break;
+ }
+ case ARM::t2Int_eh_sjlj_setjmp:
+ case ARM::t2Int_eh_sjlj_setjmp_nofp:
+ case ARM::tInt_eh_sjlj_setjmp: {
+ // Two incoming args: GPR:$src, GPR:$val
+ // mov $val, pc
+ // adds $val, #7
+ // str $val, [$src, #4]
+ // movs r0, #0
+ // b 1f
+ // movs r0, #1
+ // 1:
+ unsigned SrcReg = MI->getOperand(0).getReg();
+ unsigned ValReg = MI->getOperand(1).getReg();
+ MCSymbol *Label = GetARMSJLJEHLabel();
{
MCInst TmpInst;
- TmpInst.setOpcode(ARM::MOVi);
- TmpInst.addOperand(MCOperand::CreateReg(DstReg));
- TmpInst.addOperand(MCOperand::CreateImm(SOImmValV1));
-
+ TmpInst.setOpcode(ARM::tMOVgpr2tgpr);
+ TmpInst.addOperand(MCOperand::CreateReg(ValReg));
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ // 's' bit operand
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::CPSR));
+ OutStreamer.AddComment("eh_setjmp begin");
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tADDi3);
+ TmpInst.addOperand(MCOperand::CreateReg(ValReg));
+ // 's' bit operand
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::CPSR));
+ TmpInst.addOperand(MCOperand::CreateReg(ValReg));
+ TmpInst.addOperand(MCOperand::CreateImm(7));
// Predicate.
- TmpInst.addOperand(MCOperand::CreateImm(MI->getOperand(2).getImm()));
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(3).getReg()));
-
- TmpInst.addOperand(MCOperand::CreateReg(0)); // cc_out
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
OutStreamer.EmitInstruction(TmpInst);
}
-
{
MCInst TmpInst;
- TmpInst.setOpcode(ARM::ORRri);
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // dstreg
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // inreg
- TmpInst.addOperand(MCOperand::CreateImm(SOImmValV2)); // so_imm
+ TmpInst.setOpcode(ARM::tSTRi);
+ TmpInst.addOperand(MCOperand::CreateReg(ValReg));
+ TmpInst.addOperand(MCOperand::CreateReg(SrcReg));
+ // The offset immediate is #4. The operand value is scaled by 4 for the
+ // tSTR instruction.
+ TmpInst.addOperand(MCOperand::CreateImm(1));
// Predicate.
- TmpInst.addOperand(MCOperand::CreateImm(MI->getOperand(2).getImm()));
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(3).getReg()));
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tMOVi8);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::R0));
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::CPSR));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ const MCExpr *SymbolExpr = MCSymbolRefExpr::Create(Label, OutContext);
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tB);
+ TmpInst.addOperand(MCOperand::CreateExpr(SymbolExpr));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tMOVi8);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::R0));
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::CPSR));
+ TmpInst.addOperand(MCOperand::CreateImm(1));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.AddComment("eh_setjmp end");
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ OutStreamer.EmitLabel(Label);
+ return;
+ }
- TmpInst.addOperand(MCOperand::CreateReg(0)); // cc_out
+ case ARM::Int_eh_sjlj_setjmp_nofp:
+ case ARM::Int_eh_sjlj_setjmp: {
+ // Two incoming args: GPR:$src, GPR:$val
+ // add $val, pc, #8
+ // str $val, [$src, #+4]
+ // mov r0, #0
+ // add pc, pc, #0
+ // mov r0, #1
+ unsigned SrcReg = MI->getOperand(0).getReg();
+ unsigned ValReg = MI->getOperand(1).getReg();
+
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::ADDri);
+ TmpInst.addOperand(MCOperand::CreateReg(ValReg));
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateImm(8));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // 's' bit operand (always reg0 for this).
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.AddComment("eh_setjmp begin");
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::STRi12);
+ TmpInst.addOperand(MCOperand::CreateReg(ValReg));
+ TmpInst.addOperand(MCOperand::CreateReg(SrcReg));
+ TmpInst.addOperand(MCOperand::CreateImm(4));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVi);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::R0));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // 's' bit operand (always reg0 for this).
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::ADDri);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // 's' bit operand (always reg0 for this).
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVi);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::R0));
+ TmpInst.addOperand(MCOperand::CreateImm(1));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // 's' bit operand (always reg0 for this).
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.AddComment("eh_setjmp end");
OutStreamer.EmitInstruction(TmpInst);
}
return;
}
- case ARM::MOVi32imm: { // FIXME: Remove asmstring from td file.
- // This is a hack that lowers as a two instruction sequence.
- unsigned DstReg = MI->getOperand(0).getReg();
- const MachineOperand &MO = MI->getOperand(1);
- MCOperand V1, V2;
- if (MO.isImm()) {
- unsigned ImmVal = (unsigned)MI->getOperand(1).getImm();
- V1 = MCOperand::CreateImm(ImmVal & 65535);
- V2 = MCOperand::CreateImm(ImmVal >> 16);
- } else if (MO.isGlobal()) {
- MCSymbol *Symbol = MCInstLowering.GetGlobalAddressSymbol(MO);
- const MCSymbolRefExpr *SymRef1 =
- MCSymbolRefExpr::Create(Symbol,
- MCSymbolRefExpr::VK_ARM_LO16, OutContext);
- const MCSymbolRefExpr *SymRef2 =
- MCSymbolRefExpr::Create(Symbol,
- MCSymbolRefExpr::VK_ARM_HI16, OutContext);
- V1 = MCOperand::CreateExpr(SymRef1);
- V2 = MCOperand::CreateExpr(SymRef2);
- } else {
- MI->dump();
- llvm_unreachable("cannot handle this operand");
+ case ARM::Int_eh_sjlj_longjmp: {
+ // ldr sp, [$src, #8]
+ // ldr $scratch, [$src, #4]
+ // ldr r7, [$src]
+ // bx $scratch
+ unsigned SrcReg = MI->getOperand(0).getReg();
+ unsigned ScratchReg = MI->getOperand(1).getReg();
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::LDRi12);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::SP));
+ TmpInst.addOperand(MCOperand::CreateReg(SrcReg));
+ TmpInst.addOperand(MCOperand::CreateImm(8));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
}
-
{
MCInst TmpInst;
- TmpInst.setOpcode(ARM::MOVi16);
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // dstreg
- TmpInst.addOperand(V1); // lower16(imm)
-
+ TmpInst.setOpcode(ARM::LDRi12);
+ TmpInst.addOperand(MCOperand::CreateReg(ScratchReg));
+ TmpInst.addOperand(MCOperand::CreateReg(SrcReg));
+ TmpInst.addOperand(MCOperand::CreateImm(4));
// Predicate.
- TmpInst.addOperand(MCOperand::CreateImm(MI->getOperand(2).getImm()));
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(3).getReg()));
-
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
OutStreamer.EmitInstruction(TmpInst);
}
-
{
MCInst TmpInst;
- TmpInst.setOpcode(ARM::MOVTi16);
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // dstreg
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // srcreg
- TmpInst.addOperand(V2); // upper16(imm)
-
+ TmpInst.setOpcode(ARM::LDRi12);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::R7));
+ TmpInst.addOperand(MCOperand::CreateReg(SrcReg));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
// Predicate.
- TmpInst.addOperand(MCOperand::CreateImm(MI->getOperand(2).getImm()));
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(3).getReg()));
-
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::BX);
+ TmpInst.addOperand(MCOperand::CreateReg(ScratchReg));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
OutStreamer.EmitInstruction(TmpInst);
}
-
return;
}
+ case ARM::tInt_eh_sjlj_longjmp: {
+ // ldr $scratch, [$src, #8]
+ // mov sp, $scratch
+ // ldr $scratch, [$src, #4]
+ // ldr r7, [$src]
+ // bx $scratch
+ unsigned SrcReg = MI->getOperand(0).getReg();
+ unsigned ScratchReg = MI->getOperand(1).getReg();
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tLDRi);
+ TmpInst.addOperand(MCOperand::CreateReg(ScratchReg));
+ TmpInst.addOperand(MCOperand::CreateReg(SrcReg));
+ // The offset immediate is #8. The operand value is scaled by 4 for the
+ // tLDR instruction.
+ TmpInst.addOperand(MCOperand::CreateImm(2));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tMOVtgpr2gpr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::SP));
+ TmpInst.addOperand(MCOperand::CreateReg(ScratchReg));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tLDRi);
+ TmpInst.addOperand(MCOperand::CreateReg(ScratchReg));
+ TmpInst.addOperand(MCOperand::CreateReg(SrcReg));
+ TmpInst.addOperand(MCOperand::CreateImm(1));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tLDRr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::R7));
+ TmpInst.addOperand(MCOperand::CreateReg(SrcReg));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tBX_RET_vararg);
+ TmpInst.addOperand(MCOperand::CreateReg(ScratchReg));
+ // Predicate.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ return;
+ }
+ // These are the pseudos created to comply with stricter operand restrictions
+ // on ARMv5. Lower them now to "normal" instructions, since all the
+ // restrictions are already satisfied.
+ case ARM::MULv5:
+ EmitPatchedInstruction(MI, ARM::MUL);
+ return;
+ case ARM::MLAv5:
+ EmitPatchedInstruction(MI, ARM::MLA);
+ return;
+ case ARM::SMULLv5:
+ EmitPatchedInstruction(MI, ARM::SMULL);
+ return;
+ case ARM::UMULLv5:
+ EmitPatchedInstruction(MI, ARM::UMULL);
+ return;
+ case ARM::SMLALv5:
+ EmitPatchedInstruction(MI, ARM::SMLAL);
+ return;
+ case ARM::UMLALv5:
+ EmitPatchedInstruction(MI, ARM::UMLAL);
+ return;
+ case ARM::UMAALv5:
+ EmitPatchedInstruction(MI, ARM::UMAAL);
+ return;
}
MCInst TmpInst;
- MCInstLowering.Lower(MI, TmpInst);
+ LowerARMMachineInstrToMCInst(MI, TmpInst, *this);
OutStreamer.EmitInstruction(TmpInst);
}
@@ -1476,7 +1541,7 @@ static MCInstPrinter *createARMMCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI) {
if (SyntaxVariant == 0)
- return new ARMInstPrinter(MAI, false);
+ return new ARMInstPrinter(MAI);
return 0;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
new file mode 100644
index 0000000..5852684
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
@@ -0,0 +1,112 @@
+//===-- ARMAsmPrinter.h - Print machine code to an ARM .s file ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ARM Assembly printer class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARMASMPRINTER_H
+#define ARMASMPRINTER_H
+
+#include "ARM.h"
+#include "ARMTargetMachine.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+
+namespace ARM {
+ enum DW_ISA {
+ DW_ISA_ARM_thumb = 1,
+ DW_ISA_ARM_arm = 2
+ };
+}
+
+class LLVM_LIBRARY_VISIBILITY ARMAsmPrinter : public AsmPrinter {
+
+ /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
+ /// make the right decision when printing asm code for different targets.
+ const ARMSubtarget *Subtarget;
+
+ /// AFI - Keep a pointer to ARMFunctionInfo for the current
+ /// MachineFunction.
+ ARMFunctionInfo *AFI;
+
+ /// MCP - Keep a pointer to constantpool entries of the current
+ /// MachineFunction.
+ const MachineConstantPool *MCP;
+
+public:
+ explicit ARMAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer), AFI(NULL), MCP(NULL) {
+ Subtarget = &TM.getSubtarget<ARMSubtarget>();
+ }
+
+ virtual const char *getPassName() const {
+ return "ARM Assembly Printer";
+ }
+
+ void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &O,
+ const char *Modifier = 0);
+
+ virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O);
+ virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
+ unsigned AsmVariant,
+ const char *ExtraCode, raw_ostream &O);
+
+ void EmitJumpTable(const MachineInstr *MI);
+ void EmitJump2Table(const MachineInstr *MI);
+ virtual void EmitInstruction(const MachineInstr *MI);
+ bool runOnMachineFunction(MachineFunction &F);
+
+ virtual void EmitConstantPool() {} // we emit constant pools customly!
+ virtual void EmitFunctionEntryLabel();
+ void EmitStartOfAsmFile(Module &M);
+ void EmitEndOfAsmFile(Module &M);
+
+private:
+ // Helpers for EmitStartOfAsmFile() and EmitEndOfAsmFile()
+ void emitAttributes();
+
+ // Helper for ELF .o only
+ void emitARMAttributeSection();
+
+ // Generic helper used to emit e.g. ARMv5 mul pseudos
+ void EmitPatchedInstruction(const MachineInstr *MI, unsigned TargetOpc);
+
+public:
+ void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
+
+ MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
+
+ virtual unsigned getISAEncoding() {
+ // ARM/Darwin adds ISA to the DWARF info for each function.
+ if (!Subtarget->isTargetDarwin())
+ return 0;
+ return Subtarget->isThumb() ?
+ llvm::ARM::DW_ISA_ARM_thumb : llvm::ARM::DW_ISA_ARM_arm;
+ }
+
+ MCSymbol *GetARMSetPICJumpTableLabel2(unsigned uid, unsigned uid2,
+ const MachineBasicBlock *MBB) const;
+ MCSymbol *GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const;
+
+ MCSymbol *GetARMSJLJEHLabel(void) const;
+
+ MCSymbol *GetARMGVSymbol(const GlobalValue *GV);
+
+ /// EmitMachineConstantPoolValue - Print a machine constantpool value to
+ /// the .s file.
+ virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseInfo.h
new file mode 100644
index 0000000..a56cc1a
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInfo.h
@@ -0,0 +1,249 @@
+//===-- ARMBaseInfo.h - Top level definitions for ARM -------- --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains small standalone helper functions and enum definitions for
+// the ARM target useful for the compiler back-end and the MC libraries.
+// As such, it deliberately does not include references to LLVM core
+// code gen types, passes, etc..
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARMBASEINFO_H
+#define ARMBASEINFO_H
+
+#include "llvm/Support/ErrorHandling.h"
+
+// Note that the following auto-generated files only defined enum types, and
+// so are safe to include here.
+
+// Defines symbolic names for ARM registers. This defines a mapping from
+// register name to register number.
+//
+#include "ARMGenRegisterNames.inc"
+
+// Defines symbolic names for the ARM instructions.
+//
+#include "ARMGenInstrNames.inc"
+
+namespace llvm {
+
+// Enums corresponding to ARM condition codes
+namespace ARMCC {
+ // The CondCodes constants map directly to the 4-bit encoding of the
+ // condition field for predicated instructions.
+ enum CondCodes { // Meaning (integer) Meaning (floating-point)
+ EQ, // Equal Equal
+ NE, // Not equal Not equal, or unordered
+ HS, // Carry set >, ==, or unordered
+ LO, // Carry clear Less than
+ MI, // Minus, negative Less than
+ PL, // Plus, positive or zero >, ==, or unordered
+ VS, // Overflow Unordered
+ VC, // No overflow Not unordered
+ HI, // Unsigned higher Greater than, or unordered
+ LS, // Unsigned lower or same Less than or equal
+ GE, // Greater than or equal Greater than or equal
+ LT, // Less than Less than, or unordered
+ GT, // Greater than Greater than
+ LE, // Less than or equal <, ==, or unordered
+ AL // Always (unconditional) Always (unconditional)
+ };
+
+ inline static CondCodes getOppositeCondition(CondCodes CC) {
+ switch (CC) {
+ default: llvm_unreachable("Unknown condition code");
+ case EQ: return NE;
+ case NE: return EQ;
+ case HS: return LO;
+ case LO: return HS;
+ case MI: return PL;
+ case PL: return MI;
+ case VS: return VC;
+ case VC: return VS;
+ case HI: return LS;
+ case LS: return HI;
+ case GE: return LT;
+ case LT: return GE;
+ case GT: return LE;
+ case LE: return GT;
+ }
+ }
+} // namespace ARMCC
+
+inline static const char *ARMCondCodeToString(ARMCC::CondCodes CC) {
+ switch (CC) {
+ default: llvm_unreachable("Unknown condition code");
+ case ARMCC::EQ: return "eq";
+ case ARMCC::NE: return "ne";
+ case ARMCC::HS: return "hs";
+ case ARMCC::LO: return "lo";
+ case ARMCC::MI: return "mi";
+ case ARMCC::PL: return "pl";
+ case ARMCC::VS: return "vs";
+ case ARMCC::VC: return "vc";
+ case ARMCC::HI: return "hi";
+ case ARMCC::LS: return "ls";
+ case ARMCC::GE: return "ge";
+ case ARMCC::LT: return "lt";
+ case ARMCC::GT: return "gt";
+ case ARMCC::LE: return "le";
+ case ARMCC::AL: return "al";
+ }
+}
+
+namespace ARM_PROC {
+ enum IMod {
+ IE = 2,
+ ID = 3
+ };
+
+ enum IFlags {
+ F = 1,
+ I = 2,
+ A = 4
+ };
+
+ inline static const char *IFlagsToString(unsigned val) {
+ switch (val) {
+ default: llvm_unreachable("Unknown iflags operand");
+ case F: return "f";
+ case I: return "i";
+ case A: return "a";
+ }
+ }
+
+ inline static const char *IModToString(unsigned val) {
+ switch (val) {
+ default: llvm_unreachable("Unknown imod operand");
+ case IE: return "ie";
+ case ID: return "id";
+ }
+ }
+}
+
+namespace ARM_MB {
+ // The Memory Barrier Option constants map directly to the 4-bit encoding of
+ // the option field for memory barrier operations.
+ enum MemBOpt {
+ SY = 15,
+ ST = 14,
+ ISH = 11,
+ ISHST = 10,
+ NSH = 7,
+ NSHST = 6,
+ OSH = 3,
+ OSHST = 2
+ };
+
+ inline static const char *MemBOptToString(unsigned val) {
+ switch (val) {
+ default: llvm_unreachable("Unknown memory operation");
+ case SY: return "sy";
+ case ST: return "st";
+ case ISH: return "ish";
+ case ISHST: return "ishst";
+ case NSH: return "nsh";
+ case NSHST: return "nshst";
+ case OSH: return "osh";
+ case OSHST: return "oshst";
+ }
+ }
+} // namespace ARM_MB
+
+/// getARMRegisterNumbering - Given the enum value for some register, e.g.
+/// ARM::LR, return the number that it corresponds to (e.g. 14).
+inline static unsigned getARMRegisterNumbering(unsigned Reg) {
+ using namespace ARM;
+ switch (Reg) {
+ default:
+ llvm_unreachable("Unknown ARM register!");
+ case R0: case S0: case D0: case Q0: return 0;
+ case R1: case S1: case D1: case Q1: return 1;
+ case R2: case S2: case D2: case Q2: return 2;
+ case R3: case S3: case D3: case Q3: return 3;
+ case R4: case S4: case D4: case Q4: return 4;
+ case R5: case S5: case D5: case Q5: return 5;
+ case R6: case S6: case D6: case Q6: return 6;
+ case R7: case S7: case D7: case Q7: return 7;
+ case R8: case S8: case D8: case Q8: return 8;
+ case R9: case S9: case D9: case Q9: return 9;
+ case R10: case S10: case D10: case Q10: return 10;
+ case R11: case S11: case D11: case Q11: return 11;
+ case R12: case S12: case D12: case Q12: return 12;
+ case SP: case S13: case D13: case Q13: return 13;
+ case LR: case S14: case D14: case Q14: return 14;
+ case PC: case S15: case D15: case Q15: return 15;
+
+ case S16: case D16: return 16;
+ case S17: case D17: return 17;
+ case S18: case D18: return 18;
+ case S19: case D19: return 19;
+ case S20: case D20: return 20;
+ case S21: case D21: return 21;
+ case S22: case D22: return 22;
+ case S23: case D23: return 23;
+ case S24: case D24: return 24;
+ case S25: case D25: return 25;
+ case S26: case D26: return 26;
+ case S27: case D27: return 27;
+ case S28: case D28: return 28;
+ case S29: case D29: return 29;
+ case S30: case D30: return 30;
+ case S31: case D31: return 31;
+ }
+}
+
+namespace ARMII {
+ /// Target Operand Flag enum.
+ enum TOF {
+ //===------------------------------------------------------------------===//
+ // ARM Specific MachineOperand flags.
+
+ MO_NO_FLAG,
+
+ /// MO_LO16 - On a symbol operand, this represents a relocation containing
+ /// lower 16 bit of the address. Used only via movw instruction.
+ MO_LO16,
+
+ /// MO_HI16 - On a symbol operand, this represents a relocation containing
+ /// higher 16 bit of the address. Used only via movt instruction.
+ MO_HI16,
+
+ /// MO_LO16_NONLAZY - On a symbol operand "FOO", this represents a
+ /// relocation containing lower 16 bit of the non-lazy-ptr indirect symbol,
+ /// i.e. "FOO$non_lazy_ptr".
+ /// Used only via movw instruction.
+ MO_LO16_NONLAZY,
+
+ /// MO_HI16_NONLAZY - On a symbol operand "FOO", this represents a
+ /// relocation containing lower 16 bit of the non-lazy-ptr indirect symbol,
+ /// i.e. "FOO$non_lazy_ptr". Used only via movt instruction.
+ MO_HI16_NONLAZY,
+
+ /// MO_LO16_NONLAZY_PIC - On a symbol operand "FOO", this represents a
+ /// relocation containing lower 16 bit of the PC relative address of the
+ /// non-lazy-ptr indirect symbol, i.e. "FOO$non_lazy_ptr - LABEL".
+ /// Used only via movw instruction.
+ MO_LO16_NONLAZY_PIC,
+
+ /// MO_HI16_NONLAZY_PIC - On a symbol operand "FOO", this represents a
+ /// relocation containing lower 16 bit of the PC relative address of the
+ /// non-lazy-ptr indirect symbol, i.e. "FOO$non_lazy_ptr - LABEL".
+ /// Used only via movt instruction.
+ MO_HI16_NONLAZY_PIC,
+
+ /// MO_PLT - On a symbol operand, this represents an ELF PLT reference on a
+ /// call operand.
+ MO_PLT
+ };
+} // end namespace ARMII
+
+} // end namespace llvm;
+
+#endif
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index e4f10f9..2268e59 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -15,13 +15,13 @@
#include "ARM.h"
#include "ARMAddressingModes.h"
#include "ARMConstantPoolValue.h"
+#include "ARMHazardRecognizer.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMRegisterInfo.h"
#include "ARMGenInstrInfo.inc"
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/GlobalValue.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -34,15 +34,75 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/STLExtras.h"
using namespace llvm;
static cl::opt<bool>
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
cl::desc("Enable ARM 2-addr to 3-addr conv"));
+/// ARM_MLxEntry - Record information about MLA / MLS instructions.
+struct ARM_MLxEntry {
+ unsigned MLxOpc; // MLA / MLS opcode
+ unsigned MulOpc; // Expanded multiplication opcode
+ unsigned AddSubOpc; // Expanded add / sub opcode
+ bool NegAcc; // True if the acc is negated before the add / sub.
+ bool HasLane; // True if instruction has an extra "lane" operand.
+};
+
+static const ARM_MLxEntry ARM_MLxTable[] = {
+ // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane
+ // fp scalar ops
+ { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false },
+ { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false },
+ { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false },
+ { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false },
+ { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false },
+ { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false },
+ { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false },
+ { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false },
+
+ // fp SIMD ops
+ { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false },
+ { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false },
+ { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false },
+ { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false },
+ { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true },
+ { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true },
+ { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true },
+ { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true },
+};
+
ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
: TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
Subtarget(STI) {
+ for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
+ if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
+ assert(false && "Duplicated entries?");
+ MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
+ MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
+ }
+}
+
+// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
+// currently defaults to no prepass hazard recognizer.
+ScheduleHazardRecognizer *ARMBaseInstrInfo::
+CreateTargetHazardRecognizer(const TargetMachine *TM,
+ const ScheduleDAG *DAG) const {
+ if (usePreRAHazardRecognizer()) {
+ const InstrItineraryData *II = TM->getInstrItineraryData();
+ return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
+ }
+ return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG);
+}
+
+ScheduleHazardRecognizer *ARMBaseInstrInfo::
+CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
+ const ScheduleDAG *DAG) const {
+ if (Subtarget.isThumb2() || Subtarget.hasVFP2())
+ return (ScheduleHazardRecognizer *)
+ new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG);
+ return TargetInstrInfoImpl::CreateTargetPostRAHazardRecognizer(II, DAG);
}
MachineInstr *
@@ -140,7 +200,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (isLoad)
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc), MI->getOperand(0).getReg())
- .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
+ .addReg(WBReg).addImm(0).addImm(Pred);
else
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc)).addReg(MI->getOperand(1).getReg())
@@ -151,7 +211,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (isLoad)
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc), MI->getOperand(0).getReg())
- .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
+ .addReg(BaseReg).addImm(0).addImm(Pred);
else
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc)).addReg(MI->getOperand(1).getReg())
@@ -166,8 +226,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (LV) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+ if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
unsigned Reg = MO.getReg();
LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
@@ -197,43 +256,6 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
return NewMIs[0];
}
-bool
-ARMBaseInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- bool isKill = true;
-
- // Add the callee-saved register as live-in unless it's LR and
- // @llvm.returnaddress is called. If LR is returned for @llvm.returnaddress
- // then it's already added to the function and entry block live-in sets.
- if (Reg == ARM::LR) {
- MachineFunction &MF = *MBB.getParent();
- if (MF.getFrameInfo()->isReturnAddressTaken() &&
- MF.getRegInfo().isLiveIn(Reg))
- isKill = false;
- }
-
- if (isKill)
- MBB.addLiveIn(Reg);
-
- // Insert the spill to the stack frame. The register is killed at the spill
- //
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- storeRegToStackSlot(MBB, MI, Reg, isKill,
- CSI[i].getFrameIdx(), RC, TRI);
- }
- return true;
-}
-
// Branch analysis.
bool
ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
@@ -275,13 +297,31 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// Get the instruction before it if it is a terminator.
MachineInstr *SecondLastInst = I;
+ unsigned SecondLastOpc = SecondLastInst->getOpcode();
+
+ // If AllowModify is true and the block ends with two or more unconditional
+ // branches, delete all but the first unconditional branch.
+ if (AllowModify && isUncondBranchOpcode(LastOpc)) {
+ while (isUncondBranchOpcode(SecondLastOpc)) {
+ LastInst->eraseFromParent();
+ LastInst = SecondLastInst;
+ LastOpc = LastInst->getOpcode();
+ if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ // Return now the only terminator is an unconditional branch.
+ TBB = LastInst->getOperand(0).getMBB();
+ return false;
+ } else {
+ SecondLastInst = I;
+ SecondLastOpc = SecondLastInst->getOpcode();
+ }
+ }
+ }
// If there are three terminators, we don't know what sort of block this is.
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
return true;
// If the block ends with a B and a Bcc, handle it.
- unsigned SecondLastOpc = SecondLastInst->getOpcode();
if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
TBB = SecondLastInst->getOperand(0).getMBB();
Cond.push_back(SecondLastInst->getOperand(1));
@@ -468,7 +508,7 @@ bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
}
/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
-DISABLE_INLINE
+LLVM_ATTRIBUTE_NOINLINE
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
unsigned JTI);
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
@@ -513,6 +553,14 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
case ARMII::Size2Bytes: return 2; // Thumb1 instruction.
case ARMII::SizeSpecial: {
switch (Opc) {
+ case ARM::MOVi16_ga_pcrel:
+ case ARM::MOVTi16_ga_pcrel:
+ case ARM::t2MOVi16_ga_pcrel:
+ case ARM::t2MOVTi16_ga_pcrel:
+ return 4;
+ case ARM::MOVi32imm:
+ case ARM::t2MOVi32imm:
+ return 8;
case ARM::CONSTPOOL_ENTRY:
// If this machine instr is a constant pool entry, its size is recorded as
// operand #2.
@@ -533,13 +581,13 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
case ARM::BR_JTadd:
case ARM::tBR_JTr:
case ARM::t2BR_JT:
- case ARM::t2TBB:
- case ARM::t2TBH: {
+ case ARM::t2TBB_JT:
+ case ARM::t2TBH_JT: {
// These are jumptable branches, i.e. a branch followed by an inlined
// jumptable. The size is 4 + 4 * number of entries. For TBB, each
// entry is one byte; TBH two byte each.
- unsigned EntrySize = (Opc == ARM::t2TBB)
- ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
+ unsigned EntrySize = (Opc == ARM::t2TBB_JT)
+ ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
unsigned NumOps = TID.getNumOperands();
MachineOperand JTOP =
MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
@@ -557,7 +605,7 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
// alignment issue.
unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
unsigned NumEntries = getNumJTEntries(JT, JTI);
- if (Opc == ARM::t2TBB && (NumEntries & 1))
+ if (Opc == ARM::t2TBB_JT && (NumEntries & 1))
// Make sure the instruction that follows TBB is 2-byte aligned.
// FIXME: Constant island pass should insert an "ALIGN" instruction
// instead.
@@ -573,84 +621,6 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
return 0; // Not reached
}
-unsigned
-ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case ARM::LDR:
- case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isReg() &&
- MI->getOperand(3).isImm() &&
- MI->getOperand(2).getReg() == 0 &&
- MI->getOperand(3).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::t2LDRi12:
- case ARM::tRestore:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::VLDRD:
- case ARM::VLDRS:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
-
- return 0;
-}
-
-unsigned
-ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case ARM::STR:
- case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isReg() &&
- MI->getOperand(3).isImm() &&
- MI->getOperand(2).getReg() == 0 &&
- MI->getOperand(3).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::t2STRi12:
- case ARM::tSpill:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::VSTRD:
- case ARM::VSTRS:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
-
- return 0;
-}
-
void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
@@ -715,8 +685,9 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Align = MFI.getObjectAlignment(FI);
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOStore, 0,
+ MF.getMachineMemOperand(MachinePointerInfo(
+ PseudoSourceValue::getFixedStack(FI)),
+ MachineMemOperand::MOStore,
MFI.getObjectSize(FI),
Align);
@@ -728,9 +699,9 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
switch (RC->getID()) {
case ARM::GPRRegClassID:
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12))
.addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
break;
case ARM::SPRRegClassID:
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
@@ -747,17 +718,15 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
case ARM::QPRRegClassID:
case ARM::QPR_VFP2RegClassID:
case ARM::QPR_8RegClassID:
- // FIXME: Neon instructions should support predicates
- if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q))
+ if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) {
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64Pseudo))
.addFrameIndex(FI).addImm(16)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO));
} else {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQ))
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI)
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
.addMemOperand(MMO));
}
break;
@@ -766,18 +735,14 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
// FIXME: It's possible to only store part of the QQ register if the
// spilled def has a sub-register index.
- MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VST1d64Q))
- .addFrameIndex(FI).addImm(16);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
- AddDefaultPred(MIB.addMemOperand(MMO));
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo))
+ .addFrameIndex(FI).addImm(16)
+ .addReg(SrcReg, getKillRegState(isKill))
+ .addMemOperand(MMO));
} else {
MachineInstrBuilder MIB =
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMD))
- .addFrameIndex(FI)
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)))
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
+ .addFrameIndex(FI))
.addMemOperand(MMO);
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
@@ -787,9 +752,8 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
break;
case ARM::QQQQPRRegClassID: {
MachineInstrBuilder MIB =
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMD))
- .addFrameIndex(FI)
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)))
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
+ .addFrameIndex(FI))
.addMemOperand(MMO);
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
@@ -806,6 +770,53 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
}
}
+unsigned
+ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ switch (MI->getOpcode()) {
+ default: break;
+ case ARM::STRrs:
+ case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isReg() &&
+ MI->getOperand(3).isImm() &&
+ MI->getOperand(2).getReg() == 0 &&
+ MI->getOperand(3).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ case ARM::STRi12:
+ case ARM::t2STRi12:
+ case ARM::tSpill:
+ case ARM::VSTRD:
+ case ARM::VSTRS:
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isImm() &&
+ MI->getOperand(2).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ case ARM::VST1q64Pseudo:
+ if (MI->getOperand(0).isFI() &&
+ MI->getOperand(2).getSubReg() == 0) {
+ FrameIndex = MI->getOperand(0).getIndex();
+ return MI->getOperand(2).getReg();
+ }
+ break;
+ case ARM::VSTMQIA:
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(0).getSubReg() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ }
+
+ return 0;
+}
+
void ARMBaseInstrInfo::
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
@@ -817,8 +828,9 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineFrameInfo &MFI = *MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FI);
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOLoad, 0,
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
Align);
@@ -830,8 +842,8 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
switch (RC->getID()) {
case ARM::GPRRegClassID:
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
- .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
break;
case ARM::SPRRegClassID:
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
@@ -846,31 +858,26 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
case ARM::QPRRegClassID:
case ARM::QPR_VFP2RegClassID:
case ARM::QPR_8RegClassID:
- if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q), DestReg)
+ if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) {
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64Pseudo), DestReg)
.addFrameIndex(FI).addImm(16)
.addMemOperand(MMO));
} else {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQ), DestReg)
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
.addFrameIndex(FI)
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
.addMemOperand(MMO));
}
break;
case ARM::QQPRRegClassID:
case ARM::QQPR_VFP2RegClassID:
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
- MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLD1d64Q));
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
- AddDefaultPred(MIB.addFrameIndex(FI).addImm(16).addMemOperand(MMO));
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
+ .addFrameIndex(FI).addImm(16)
+ .addMemOperand(MMO));
} else {
MachineInstrBuilder MIB =
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
- .addFrameIndex(FI)
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)))
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
+ .addFrameIndex(FI))
.addMemOperand(MMO);
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
@@ -880,9 +887,8 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
break;
case ARM::QQQQPRRegClassID: {
MachineInstrBuilder MIB =
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
- .addFrameIndex(FI)
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)))
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
+ .addFrameIndex(FI))
.addMemOperand(MMO);
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
@@ -899,6 +905,53 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
}
}
+unsigned
+ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ switch (MI->getOpcode()) {
+ default: break;
+ case ARM::LDRrs:
+ case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isReg() &&
+ MI->getOperand(3).isImm() &&
+ MI->getOperand(2).getReg() == 0 &&
+ MI->getOperand(3).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ case ARM::LDRi12:
+ case ARM::t2LDRi12:
+ case ARM::tRestore:
+ case ARM::VLDRD:
+ case ARM::VLDRS:
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isImm() &&
+ MI->getOperand(2).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ case ARM::VLD1q64Pseudo:
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(0).getSubReg() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ case ARM::VLDMQIA:
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(0).getSubReg() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ }
+
+ return 0;
+}
+
MachineInstr*
ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
int FrameIx, uint64_t Offset,
@@ -921,7 +974,7 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
ARMConstantPoolValue *ACPV =
static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
- unsigned PCLabelId = AFI->createConstPoolEntryUId();
+ unsigned PCLabelId = AFI->createPICLabelUId();
ARMConstantPoolValue *NewCPV = 0;
// FIXME: The below assumes PIC relocation model and that the function
// is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
@@ -991,12 +1044,18 @@ ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
}
bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1) const {
+ const MachineInstr *MI1,
+ const MachineRegisterInfo *MRI) const {
int Opcode = MI0->getOpcode();
if (Opcode == ARM::t2LDRpci ||
Opcode == ARM::t2LDRpci_pic ||
Opcode == ARM::tLDRpci ||
- Opcode == ARM::tLDRpci_pic) {
+ Opcode == ARM::tLDRpci_pic ||
+ Opcode == ARM::MOV_ga_dyn ||
+ Opcode == ARM::MOV_ga_pcrel ||
+ Opcode == ARM::MOV_ga_pcrel_ldr ||
+ Opcode == ARM::t2MOV_ga_dyn ||
+ Opcode == ARM::t2MOV_ga_pcrel) {
if (MI1->getOpcode() != Opcode)
return false;
if (MI0->getNumOperands() != MI1->getNumOperands())
@@ -1007,6 +1066,14 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
if (MO0.getOffset() != MO1.getOffset())
return false;
+ if (Opcode == ARM::MOV_ga_dyn ||
+ Opcode == ARM::MOV_ga_pcrel ||
+ Opcode == ARM::MOV_ga_pcrel_ldr ||
+ Opcode == ARM::t2MOV_ga_dyn ||
+ Opcode == ARM::t2MOV_ga_pcrel)
+ // Ignore the PC labels.
+ return MO0.getGlobal() == MO1.getGlobal();
+
const MachineFunction *MF = MI0->getParent()->getParent();
const MachineConstantPool *MCP = MF->getConstantPool();
int CPI0 = MO0.getIndex();
@@ -1018,6 +1085,37 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
ARMConstantPoolValue *ACPV1 =
static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
return ACPV0->hasSameValue(ACPV1);
+ } else if (Opcode == ARM::PICLDR) {
+ if (MI1->getOpcode() != Opcode)
+ return false;
+ if (MI0->getNumOperands() != MI1->getNumOperands())
+ return false;
+
+ unsigned Addr0 = MI0->getOperand(1).getReg();
+ unsigned Addr1 = MI1->getOperand(1).getReg();
+ if (Addr0 != Addr1) {
+ if (!MRI ||
+ !TargetRegisterInfo::isVirtualRegister(Addr0) ||
+ !TargetRegisterInfo::isVirtualRegister(Addr1))
+ return false;
+
+ // This assumes SSA form.
+ MachineInstr *Def0 = MRI->getVRegDef(Addr0);
+ MachineInstr *Def1 = MRI->getVRegDef(Addr1);
+ // Check if the loaded value, e.g. a constantpool of a global address, are
+ // the same.
+ if (!produceSameValue(Def0, Def1, MRI))
+ return false;
+ }
+
+ for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) {
+ // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
+ const MachineOperand &MO0 = MI0->getOperand(i);
+ const MachineOperand &MO1 = MI1->getOperand(i);
+ if (!MO0.isIdenticalTo(MO1))
+ return false;
+ }
+ return true;
}
return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
@@ -1040,8 +1138,8 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
switch (Load1->getMachineOpcode()) {
default:
return false;
- case ARM::LDR:
- case ARM::LDRB:
+ case ARM::LDRi12:
+ case ARM::LDRBi12:
case ARM::LDRD:
case ARM::LDRH:
case ARM::LDRSB:
@@ -1059,8 +1157,8 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
switch (Load2->getMachineOpcode()) {
default:
return false;
- case ARM::LDR:
- case ARM::LDRB:
+ case ARM::LDRi12:
+ case ARM::LDRBi12:
case ARM::LDRD:
case ARM::LDRH:
case ARM::LDRSB:
@@ -1164,22 +1262,37 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
return false;
}
-bool ARMBaseInstrInfo::
-isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
- if (!NumInstrs)
+bool ARMBaseInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
+ unsigned NumCyles,
+ unsigned ExtraPredCycles,
+ float Probability,
+ float Confidence) const {
+ if (!NumCyles)
return false;
- if (Subtarget.getCPUString() == "generic")
- // Generic (and overly aggressive) if-conversion limits for testing.
- return NumInstrs <= 10;
- else if (Subtarget.hasV7Ops())
- return NumInstrs <= 3;
- return NumInstrs <= 2;
+
+ // Attempt to estimate the relative costs of predication versus branching.
+ float UnpredCost = Probability * NumCyles;
+ UnpredCost += 1.0; // The branch itself
+ UnpredCost += (1.0 - Confidence) * Subtarget.getMispredictionPenalty();
+
+ return (float)(NumCyles + ExtraPredCycles) < UnpredCost;
}
-
+
bool ARMBaseInstrInfo::
-isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
- MachineBasicBlock &FMBB, unsigned NumF) const {
- return NumT && NumF && NumT <= 2 && NumF <= 2;
+isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned TCycles, unsigned TExtra,
+ MachineBasicBlock &FMBB,
+ unsigned FCycles, unsigned FExtra,
+ float Probability, float Confidence) const {
+ if (!TCycles || !FCycles)
+ return false;
+
+ // Attempt to estimate the relative costs of predication versus branching.
+ float UnpredCost = Probability * TCycles + (1.0 - Probability) * FCycles;
+ UnpredCost += 1.0; // The branch itself
+ UnpredCost += (1.0 - Confidence) * Subtarget.getMispredictionPenalty();
+
+ return (float)(TCycles + FCycles + TExtra + FExtra) < UnpredCost;
}
/// getInstrPredicate - If instruction is predicated, returns its predicate
@@ -1292,6 +1405,12 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned NumBits = 0;
unsigned Scale = 1;
switch (AddrMode) {
+ case ARMII::AddrMode_i12: {
+ ImmIdx = FrameRegIdx + 1;
+ InstrOffs = MI.getOperand(ImmIdx).getImm();
+ NumBits = 12;
+ break;
+ }
case ARMII::AddrMode2: {
ImmIdx = FrameRegIdx+2;
InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
@@ -1342,8 +1461,15 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
if ((unsigned)Offset <= Mask * Scale) {
// Replace the FrameIndex with sp
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
- if (isSub)
- ImmedOffset |= 1 << NumBits;
+ // FIXME: When addrmode2 goes away, this will simplify (like the
+ // T2 version), as the LDR.i12 versions don't need the encoding
+ // tricks for the offset value.
+ if (isSub) {
+ if (AddrMode == ARMII::AddrMode_i12)
+ ImmedOffset = -ImmedOffset;
+ else
+ ImmedOffset |= 1 << NumBits;
+ }
ImmOp.ChangeToImmediate(ImmedOffset);
Offset = 0;
return true;
@@ -1351,8 +1477,12 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
ImmedOffset = ImmedOffset & Mask;
- if (isSub)
- ImmedOffset |= 1 << NumBits;
+ if (isSub) {
+ if (AddrMode == ARMII::AddrMode_i12)
+ ImmedOffset = -ImmedOffset;
+ else
+ ImmedOffset |= 1 << NumBits;
+ }
ImmOp.ChangeToImmediate(ImmedOffset);
Offset &= ~(Mask*Scale);
}
@@ -1363,25 +1493,88 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
}
bool ARMBaseInstrInfo::
-AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpValue) const {
+AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpMask,
+ int &CmpValue) const {
switch (MI->getOpcode()) {
default: break;
case ARM::CMPri:
- case ARM::CMPzri:
case ARM::t2CMPri:
- case ARM::t2CMPzri:
SrcReg = MI->getOperand(0).getReg();
+ CmpMask = ~0;
CmpValue = MI->getOperand(1).getImm();
return true;
+ case ARM::TSTri:
+ case ARM::t2TSTri:
+ SrcReg = MI->getOperand(0).getReg();
+ CmpMask = MI->getOperand(1).getImm();
+ CmpValue = 0;
+ return true;
}
return false;
}
-/// ConvertToSetZeroFlag - Convert the instruction to set the "zero" flag so
-/// that we can remove a "comparison with zero".
+/// isSuitableForMask - Identify a suitable 'and' instruction that
+/// operates on the given source register and applies the same mask
+/// as a 'tst' instruction. Provide a limited look-through for copies.
+/// When successful, MI will hold the found instruction.
+static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
+ int CmpMask, bool CommonUse) {
+ switch (MI->getOpcode()) {
+ case ARM::ANDri:
+ case ARM::t2ANDri:
+ if (CmpMask != MI->getOperand(2).getImm())
+ return false;
+ if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
+ return true;
+ break;
+ case ARM::COPY: {
+ // Walk down one instruction which is potentially an 'and'.
+ const MachineInstr &Copy = *MI;
+ MachineBasicBlock::iterator AND(
+ llvm::next(MachineBasicBlock::iterator(MI)));
+ if (AND == MI->getParent()->end()) return false;
+ MI = AND;
+ return isSuitableForMask(MI, Copy.getOperand(0).getReg(),
+ CmpMask, true);
+ }
+ }
+
+ return false;
+}
+
+/// OptimizeCompareInstr - Convert the instruction supplying the argument to the
+/// comparison into one that sets the zero bit in the flags register.
bool ARMBaseInstrInfo::
-ConvertToSetZeroFlag(MachineInstr *MI, MachineInstr *CmpInstr) const {
+OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
+ int CmpValue, const MachineRegisterInfo *MRI) const {
+ if (CmpValue != 0)
+ return false;
+
+ MachineRegisterInfo::def_iterator DI = MRI->def_begin(SrcReg);
+ if (llvm::next(DI) != MRI->def_end())
+ // Only support one definition.
+ return false;
+
+ MachineInstr *MI = &*DI;
+
+ // Masked compares sometimes use the same register as the corresponding 'and'.
+ if (CmpMask != ~0) {
+ if (!isSuitableForMask(MI, SrcReg, CmpMask, false)) {
+ MI = 0;
+ for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
+ UE = MRI->use_end(); UI != UE; ++UI) {
+ if (UI->getParent() != CmpInstr->getParent()) continue;
+ MachineInstr *PotentialAND = &*UI;
+ if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true))
+ continue;
+ MI = PotentialAND;
+ break;
+ }
+ if (!MI) return false;
+ }
+ }
+
// Conservatively refuse to convert an instruction which isn't in the same BB
// as the comparison.
if (MI->getParent() != CmpInstr->getParent())
@@ -1391,16 +1584,20 @@ ConvertToSetZeroFlag(MachineInstr *MI, MachineInstr *CmpInstr) const {
// want to change.
MachineBasicBlock::const_iterator I = CmpInstr, E = MI,
B = MI->getParent()->begin();
+
+ // Early exit if CmpInstr is at the beginning of the BB.
+ if (I == B) return false;
+
--I;
for (; I != E; --I) {
const MachineInstr &Instr = *I;
for (unsigned IO = 0, EO = Instr.getNumOperands(); IO != EO; ++IO) {
const MachineOperand &MO = Instr.getOperand(IO);
- if (!MO.isReg() || !MO.isDef()) continue;
+ if (!MO.isReg()) continue;
- // This instruction modifies CPSR before the one we want to change. We
- // can't do this transformation.
+ // This instruction modifies or uses CPSR after the one we want to
+ // change. We can't do this transformation.
if (MO.getReg() == ARM::CPSR)
return false;
}
@@ -1414,15 +1611,713 @@ ConvertToSetZeroFlag(MachineInstr *MI, MachineInstr *CmpInstr) const {
switch (MI->getOpcode()) {
default: break;
case ARM::ADDri:
+ case ARM::ANDri:
+ case ARM::t2ANDri:
case ARM::SUBri:
case ARM::t2ADDri:
case ARM::t2SUBri:
- MI->RemoveOperand(5);
- MachineInstrBuilder(MI)
- .addReg(ARM::CPSR, RegState::Define | RegState::Implicit);
+ // Toggle the optional operand to CPSR.
+ MI->getOperand(5).setReg(ARM::CPSR);
+ MI->getOperand(5).setIsDef(true);
CmpInstr->eraseFromParent();
return true;
}
return false;
}
+
+bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
+ MachineInstr *DefMI, unsigned Reg,
+ MachineRegisterInfo *MRI) const {
+ // Fold large immediates into add, sub, or, xor.
+ unsigned DefOpc = DefMI->getOpcode();
+ if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
+ return false;
+ if (!DefMI->getOperand(1).isImm())
+ // Could be t2MOVi32imm <ga:xx>
+ return false;
+
+ if (!MRI->hasOneNonDBGUse(Reg))
+ return false;
+
+ unsigned UseOpc = UseMI->getOpcode();
+ unsigned NewUseOpc = 0;
+ uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm();
+ uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
+ bool Commute = false;
+ switch (UseOpc) {
+ default: return false;
+ case ARM::SUBrr:
+ case ARM::ADDrr:
+ case ARM::ORRrr:
+ case ARM::EORrr:
+ case ARM::t2SUBrr:
+ case ARM::t2ADDrr:
+ case ARM::t2ORRrr:
+ case ARM::t2EORrr: {
+ Commute = UseMI->getOperand(2).getReg() != Reg;
+ switch (UseOpc) {
+ default: break;
+ case ARM::SUBrr: {
+ if (Commute)
+ return false;
+ ImmVal = -ImmVal;
+ NewUseOpc = ARM::SUBri;
+ // Fallthrough
+ }
+ case ARM::ADDrr:
+ case ARM::ORRrr:
+ case ARM::EORrr: {
+ if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
+ return false;
+ SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
+ SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
+ switch (UseOpc) {
+ default: break;
+ case ARM::ADDrr: NewUseOpc = ARM::ADDri; break;
+ case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
+ case ARM::EORrr: NewUseOpc = ARM::EORri; break;
+ }
+ break;
+ }
+ case ARM::t2SUBrr: {
+ if (Commute)
+ return false;
+ ImmVal = -ImmVal;
+ NewUseOpc = ARM::t2SUBri;
+ // Fallthrough
+ }
+ case ARM::t2ADDrr:
+ case ARM::t2ORRrr:
+ case ARM::t2EORrr: {
+ if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
+ return false;
+ SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
+ SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
+ switch (UseOpc) {
+ default: break;
+ case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break;
+ case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
+ case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ unsigned OpIdx = Commute ? 2 : 1;
+ unsigned Reg1 = UseMI->getOperand(OpIdx).getReg();
+ bool isKill = UseMI->getOperand(OpIdx).isKill();
+ unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
+ AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(),
+ *UseMI, UseMI->getDebugLoc(),
+ get(NewUseOpc), NewReg)
+ .addReg(Reg1, getKillRegState(isKill))
+ .addImm(SOImmValV1)));
+ UseMI->setDesc(get(NewUseOpc));
+ UseMI->getOperand(1).setReg(NewReg);
+ UseMI->getOperand(1).setIsKill();
+ UseMI->getOperand(2).ChangeToImmediate(SOImmValV2);
+ DefMI->eraseFromParent();
+ return true;
+}
+
+unsigned
+ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const {
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ const TargetInstrDesc &Desc = MI->getDesc();
+ unsigned Class = Desc.getSchedClass();
+ unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
+ if (UOps)
+ return UOps;
+
+ unsigned Opc = MI->getOpcode();
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unexpected multi-uops instruction!");
+ break;
+ case ARM::VLDMQIA:
+ case ARM::VLDMQDB:
+ case ARM::VSTMQIA:
+ case ARM::VSTMQDB:
+ return 2;
+
+ // The number of uOps for load / store multiple are determined by the number
+ // registers.
+ //
+ // On Cortex-A8, each pair of register loads / stores can be scheduled on the
+ // same cycle. The scheduling for the first load / store must be done
+ // separately by assuming the the address is not 64-bit aligned.
+ //
+ // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
+ // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON
+ // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
+ case ARM::VLDMDIA:
+ case ARM::VLDMDDB:
+ case ARM::VLDMDIA_UPD:
+ case ARM::VLDMDDB_UPD:
+ case ARM::VLDMSIA:
+ case ARM::VLDMSDB:
+ case ARM::VLDMSIA_UPD:
+ case ARM::VLDMSDB_UPD:
+ case ARM::VSTMDIA:
+ case ARM::VSTMDDB:
+ case ARM::VSTMDIA_UPD:
+ case ARM::VSTMDDB_UPD:
+ case ARM::VSTMSIA:
+ case ARM::VSTMSDB:
+ case ARM::VSTMSIA_UPD:
+ case ARM::VSTMSDB_UPD: {
+ unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands();
+ return (NumRegs / 2) + (NumRegs % 2) + 1;
+ }
+
+ case ARM::LDMIA_RET:
+ case ARM::LDMIA:
+ case ARM::LDMDA:
+ case ARM::LDMDB:
+ case ARM::LDMIB:
+ case ARM::LDMIA_UPD:
+ case ARM::LDMDA_UPD:
+ case ARM::LDMDB_UPD:
+ case ARM::LDMIB_UPD:
+ case ARM::STMIA:
+ case ARM::STMDA:
+ case ARM::STMDB:
+ case ARM::STMIB:
+ case ARM::STMIA_UPD:
+ case ARM::STMDA_UPD:
+ case ARM::STMDB_UPD:
+ case ARM::STMIB_UPD:
+ case ARM::tLDMIA:
+ case ARM::tLDMIA_UPD:
+ case ARM::tSTMIA:
+ case ARM::tSTMIA_UPD:
+ case ARM::tPOP_RET:
+ case ARM::tPOP:
+ case ARM::tPUSH:
+ case ARM::t2LDMIA_RET:
+ case ARM::t2LDMIA:
+ case ARM::t2LDMDB:
+ case ARM::t2LDMIA_UPD:
+ case ARM::t2LDMDB_UPD:
+ case ARM::t2STMIA:
+ case ARM::t2STMDB:
+ case ARM::t2STMIA_UPD:
+ case ARM::t2STMDB_UPD: {
+ unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
+ if (Subtarget.isCortexA8()) {
+ if (NumRegs < 4)
+ return 2;
+ // 4 registers would be issued: 2, 2.
+ // 5 registers would be issued: 2, 2, 1.
+ UOps = (NumRegs / 2);
+ if (NumRegs % 2)
+ ++UOps;
+ return UOps;
+ } else if (Subtarget.isCortexA9()) {
+ UOps = (NumRegs / 2);
+ // If there are odd number of registers or if it's not 64-bit aligned,
+ // then it takes an extra AGU (Address Generation Unit) cycle.
+ if ((NumRegs % 2) ||
+ !MI->hasOneMemOperand() ||
+ (*MI->memoperands_begin())->getAlignment() < 8)
+ ++UOps;
+ return UOps;
+ } else {
+ // Assume the worst.
+ return NumRegs;
+ }
+ }
+ }
+}
+
+int
+ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &DefTID,
+ unsigned DefClass,
+ unsigned DefIdx, unsigned DefAlign) const {
+ int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1;
+ if (RegNo <= 0)
+ // Def is the address writeback.
+ return ItinData->getOperandCycle(DefClass, DefIdx);
+
+ int DefCycle;
+ if (Subtarget.isCortexA8()) {
+ // (regno / 2) + (regno % 2) + 1
+ DefCycle = RegNo / 2 + 1;
+ if (RegNo % 2)
+ ++DefCycle;
+ } else if (Subtarget.isCortexA9()) {
+ DefCycle = RegNo;
+ bool isSLoad = false;
+
+ switch (DefTID.getOpcode()) {
+ default: break;
+ case ARM::VLDMSIA:
+ case ARM::VLDMSDB:
+ case ARM::VLDMSIA_UPD:
+ case ARM::VLDMSDB_UPD:
+ isSLoad = true;
+ break;
+ }
+
+ // If there are odd number of 'S' registers or if it's not 64-bit aligned,
+ // then it takes an extra cycle.
+ if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
+ ++DefCycle;
+ } else {
+ // Assume the worst.
+ DefCycle = RegNo + 2;
+ }
+
+ return DefCycle;
+}
+
+int
+ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &DefTID,
+ unsigned DefClass,
+ unsigned DefIdx, unsigned DefAlign) const {
+ int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1;
+ if (RegNo <= 0)
+ // Def is the address writeback.
+ return ItinData->getOperandCycle(DefClass, DefIdx);
+
+ int DefCycle;
+ if (Subtarget.isCortexA8()) {
+ // 4 registers would be issued: 1, 2, 1.
+ // 5 registers would be issued: 1, 2, 2.
+ DefCycle = RegNo / 2;
+ if (DefCycle < 1)
+ DefCycle = 1;
+ // Result latency is issue cycle + 2: E2.
+ DefCycle += 2;
+ } else if (Subtarget.isCortexA9()) {
+ DefCycle = (RegNo / 2);
+ // If there are odd number of registers or if it's not 64-bit aligned,
+ // then it takes an extra AGU (Address Generation Unit) cycle.
+ if ((RegNo % 2) || DefAlign < 8)
+ ++DefCycle;
+ // Result latency is AGU cycles + 2.
+ DefCycle += 2;
+ } else {
+ // Assume the worst.
+ DefCycle = RegNo + 2;
+ }
+
+ return DefCycle;
+}
+
+int
+ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &UseTID,
+ unsigned UseClass,
+ unsigned UseIdx, unsigned UseAlign) const {
+ int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1;
+ if (RegNo <= 0)
+ return ItinData->getOperandCycle(UseClass, UseIdx);
+
+ int UseCycle;
+ if (Subtarget.isCortexA8()) {
+ // (regno / 2) + (regno % 2) + 1
+ UseCycle = RegNo / 2 + 1;
+ if (RegNo % 2)
+ ++UseCycle;
+ } else if (Subtarget.isCortexA9()) {
+ UseCycle = RegNo;
+ bool isSStore = false;
+
+ switch (UseTID.getOpcode()) {
+ default: break;
+ case ARM::VSTMSIA:
+ case ARM::VSTMSDB:
+ case ARM::VSTMSIA_UPD:
+ case ARM::VSTMSDB_UPD:
+ isSStore = true;
+ break;
+ }
+
+ // If there are odd number of 'S' registers or if it's not 64-bit aligned,
+ // then it takes an extra cycle.
+ if ((isSStore && (RegNo % 2)) || UseAlign < 8)
+ ++UseCycle;
+ } else {
+ // Assume the worst.
+ UseCycle = RegNo + 2;
+ }
+
+ return UseCycle;
+}
+
+int
+ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &UseTID,
+ unsigned UseClass,
+ unsigned UseIdx, unsigned UseAlign) const {
+ int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1;
+ if (RegNo <= 0)
+ return ItinData->getOperandCycle(UseClass, UseIdx);
+
+ int UseCycle;
+ if (Subtarget.isCortexA8()) {
+ UseCycle = RegNo / 2;
+ if (UseCycle < 2)
+ UseCycle = 2;
+ // Read in E3.
+ UseCycle += 2;
+ } else if (Subtarget.isCortexA9()) {
+ UseCycle = (RegNo / 2);
+ // If there are odd number of registers or if it's not 64-bit aligned,
+ // then it takes an extra AGU (Address Generation Unit) cycle.
+ if ((RegNo % 2) || UseAlign < 8)
+ ++UseCycle;
+ } else {
+ // Assume the worst.
+ UseCycle = 1;
+ }
+ return UseCycle;
+}
+
+int
+ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &DefTID,
+ unsigned DefIdx, unsigned DefAlign,
+ const TargetInstrDesc &UseTID,
+ unsigned UseIdx, unsigned UseAlign) const {
+ unsigned DefClass = DefTID.getSchedClass();
+ unsigned UseClass = UseTID.getSchedClass();
+
+ if (DefIdx < DefTID.getNumDefs() && UseIdx < UseTID.getNumOperands())
+ return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
+
+ // This may be a def / use of a variable_ops instruction, the operand
+ // latency might be determinable dynamically. Let the target try to
+ // figure it out.
+ int DefCycle = -1;
+ bool LdmBypass = false;
+ switch (DefTID.getOpcode()) {
+ default:
+ DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
+ break;
+
+ case ARM::VLDMDIA:
+ case ARM::VLDMDDB:
+ case ARM::VLDMDIA_UPD:
+ case ARM::VLDMDDB_UPD:
+ case ARM::VLDMSIA:
+ case ARM::VLDMSDB:
+ case ARM::VLDMSIA_UPD:
+ case ARM::VLDMSDB_UPD:
+ DefCycle = getVLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign);
+ break;
+
+ case ARM::LDMIA_RET:
+ case ARM::LDMIA:
+ case ARM::LDMDA:
+ case ARM::LDMDB:
+ case ARM::LDMIB:
+ case ARM::LDMIA_UPD:
+ case ARM::LDMDA_UPD:
+ case ARM::LDMDB_UPD:
+ case ARM::LDMIB_UPD:
+ case ARM::tLDMIA:
+ case ARM::tLDMIA_UPD:
+ case ARM::tPUSH:
+ case ARM::t2LDMIA_RET:
+ case ARM::t2LDMIA:
+ case ARM::t2LDMDB:
+ case ARM::t2LDMIA_UPD:
+ case ARM::t2LDMDB_UPD:
+ LdmBypass = 1;
+ DefCycle = getLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign);
+ break;
+ }
+
+ if (DefCycle == -1)
+ // We can't seem to determine the result latency of the def, assume it's 2.
+ DefCycle = 2;
+
+ int UseCycle = -1;
+ switch (UseTID.getOpcode()) {
+ default:
+ UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
+ break;
+
+ case ARM::VSTMDIA:
+ case ARM::VSTMDDB:
+ case ARM::VSTMDIA_UPD:
+ case ARM::VSTMDDB_UPD:
+ case ARM::VSTMSIA:
+ case ARM::VSTMSDB:
+ case ARM::VSTMSIA_UPD:
+ case ARM::VSTMSDB_UPD:
+ UseCycle = getVSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign);
+ break;
+
+ case ARM::STMIA:
+ case ARM::STMDA:
+ case ARM::STMDB:
+ case ARM::STMIB:
+ case ARM::STMIA_UPD:
+ case ARM::STMDA_UPD:
+ case ARM::STMDB_UPD:
+ case ARM::STMIB_UPD:
+ case ARM::tSTMIA:
+ case ARM::tSTMIA_UPD:
+ case ARM::tPOP_RET:
+ case ARM::tPOP:
+ case ARM::t2STMIA:
+ case ARM::t2STMDB:
+ case ARM::t2STMIA_UPD:
+ case ARM::t2STMDB_UPD:
+ UseCycle = getSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign);
+ break;
+ }
+
+ if (UseCycle == -1)
+ // Assume it's read in the first stage.
+ UseCycle = 1;
+
+ UseCycle = DefCycle - UseCycle + 1;
+ if (UseCycle > 0) {
+ if (LdmBypass) {
+ // It's a variable_ops instruction so we can't use DefIdx here. Just use
+ // first def operand.
+ if (ItinData->hasPipelineForwarding(DefClass, DefTID.getNumOperands()-1,
+ UseClass, UseIdx))
+ --UseCycle;
+ } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
+ UseClass, UseIdx)) {
+ --UseCycle;
+ }
+ }
+
+ return UseCycle;
+}
+
+int
+ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const {
+ if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
+ DefMI->isRegSequence() || DefMI->isImplicitDef())
+ return 1;
+
+ const TargetInstrDesc &DefTID = DefMI->getDesc();
+ if (!ItinData || ItinData->isEmpty())
+ return DefTID.mayLoad() ? 3 : 1;
+
+ const TargetInstrDesc &UseTID = UseMI->getDesc();
+ const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
+ if (DefMO.getReg() == ARM::CPSR) {
+ if (DefMI->getOpcode() == ARM::FMSTAT) {
+ // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
+ return Subtarget.isCortexA9() ? 1 : 20;
+ }
+
+ // CPSR set and branch can be paired in the same cycle.
+ if (UseTID.isBranch())
+ return 0;
+ }
+
+ unsigned DefAlign = DefMI->hasOneMemOperand()
+ ? (*DefMI->memoperands_begin())->getAlignment() : 0;
+ unsigned UseAlign = UseMI->hasOneMemOperand()
+ ? (*UseMI->memoperands_begin())->getAlignment() : 0;
+ int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign,
+ UseTID, UseIdx, UseAlign);
+
+ if (Latency > 1 &&
+ (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
+ // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
+ // variants are one cycle cheaper.
+ switch (DefTID.getOpcode()) {
+ default: break;
+ case ARM::LDRrs:
+ case ARM::LDRBrs: {
+ unsigned ShOpVal = DefMI->getOperand(3).getImm();
+ unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
+ if (ShImm == 0 ||
+ (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
+ --Latency;
+ break;
+ }
+ case ARM::t2LDRs:
+ case ARM::t2LDRBs:
+ case ARM::t2LDRHs:
+ case ARM::t2LDRSHs: {
+ // Thumb2 mode: lsl only.
+ unsigned ShAmt = DefMI->getOperand(3).getImm();
+ if (ShAmt == 0 || ShAmt == 2)
+ --Latency;
+ break;
+ }
+ }
+ }
+
+ return Latency;
+}
+
+int
+ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const {
+ if (!DefNode->isMachineOpcode())
+ return 1;
+
+ const TargetInstrDesc &DefTID = get(DefNode->getMachineOpcode());
+
+ if (isZeroCost(DefTID.Opcode))
+ return 0;
+
+ if (!ItinData || ItinData->isEmpty())
+ return DefTID.mayLoad() ? 3 : 1;
+
+ if (!UseNode->isMachineOpcode()) {
+ int Latency = ItinData->getOperandCycle(DefTID.getSchedClass(), DefIdx);
+ if (Subtarget.isCortexA9())
+ return Latency <= 2 ? 1 : Latency - 1;
+ else
+ return Latency <= 3 ? 1 : Latency - 2;
+ }
+
+ const TargetInstrDesc &UseTID = get(UseNode->getMachineOpcode());
+ const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode);
+ unsigned DefAlign = !DefMN->memoperands_empty()
+ ? (*DefMN->memoperands_begin())->getAlignment() : 0;
+ const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode);
+ unsigned UseAlign = !UseMN->memoperands_empty()
+ ? (*UseMN->memoperands_begin())->getAlignment() : 0;
+ int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign,
+ UseTID, UseIdx, UseAlign);
+
+ if (Latency > 1 &&
+ (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
+ // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
+ // variants are one cycle cheaper.
+ switch (DefTID.getOpcode()) {
+ default: break;
+ case ARM::LDRrs:
+ case ARM::LDRBrs: {
+ unsigned ShOpVal =
+ cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
+ unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
+ if (ShImm == 0 ||
+ (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
+ --Latency;
+ break;
+ }
+ case ARM::t2LDRs:
+ case ARM::t2LDRBs:
+ case ARM::t2LDRHs:
+ case ARM::t2LDRSHs: {
+ // Thumb2 mode: lsl only.
+ unsigned ShAmt =
+ cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
+ if (ShAmt == 0 || ShAmt == 2)
+ --Latency;
+ break;
+ }
+ }
+ }
+
+ return Latency;
+}
+
+int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost) const {
+ if (MI->isCopyLike() || MI->isInsertSubreg() ||
+ MI->isRegSequence() || MI->isImplicitDef())
+ return 1;
+
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ const TargetInstrDesc &TID = MI->getDesc();
+ unsigned Class = TID.getSchedClass();
+ unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
+ if (PredCost && TID.hasImplicitDefOfPhysReg(ARM::CPSR))
+ // When predicated, CPSR is an additional source operand for CPSR updating
+ // instructions, this apparently increases their latencies.
+ *PredCost = 1;
+ if (UOps)
+ return ItinData->getStageLatency(Class);
+ return getNumMicroOps(ItinData, MI);
+}
+
+int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const {
+ if (!Node->isMachineOpcode())
+ return 1;
+
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ unsigned Opcode = Node->getMachineOpcode();
+ switch (Opcode) {
+ default:
+ return ItinData->getStageLatency(get(Opcode).getSchedClass());
+ case ARM::VLDMQIA:
+ case ARM::VLDMQDB:
+ case ARM::VSTMQIA:
+ case ARM::VSTMQDB:
+ return 2;
+ }
+}
+
+bool ARMBaseInstrInfo::
+hasHighOperandLatency(const InstrItineraryData *ItinData,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const {
+ unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
+ unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask;
+ if (Subtarget.isCortexA8() &&
+ (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
+ // CortexA8 VFP instructions are not pipelined.
+ return true;
+
+ // Hoist VFP / NEON instructions with 4 or higher latency.
+ int Latency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
+ if (Latency <= 3)
+ return false;
+ return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
+ UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
+}
+
+bool ARMBaseInstrInfo::
+hasLowDefLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx) const {
+ if (!ItinData || ItinData->isEmpty())
+ return false;
+
+ unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
+ if (DDomain == ARMII::DomainGeneral) {
+ unsigned DefClass = DefMI->getDesc().getSchedClass();
+ int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
+ return (DefCycle != -1 && DefCycle <= 2);
+ }
+ return false;
+}
+
+bool
+ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
+ unsigned &AddSubOpc,
+ bool &NegAcc, bool &HasLane) const {
+ DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
+ if (I == MLxEntryMap.end())
+ return false;
+
+ const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
+ MulOpc = Entry.MulOpc;
+ AddSubOpc = Entry.AddSubOpc;
+ NegAcc = Entry.NegAcc;
+ HasLane = Entry.HasLane;
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index b4f4a33..1fb8872 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -17,6 +17,8 @@
#include "ARM.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
namespace llvm {
class ARMSubtarget;
@@ -33,7 +35,7 @@ namespace ARMII {
//===------------------------------------------------------------------===//
// This four-bit field describes the addressing mode used.
- AddrModeMask = 0xf,
+ AddrModeMask = 0x1f,
AddrModeNone = 0,
AddrMode1 = 1,
AddrMode2 = 2,
@@ -50,9 +52,10 @@ namespace ARMII {
AddrModeT2_so = 13,
AddrModeT2_pc = 14, // +/- i12 for pc relative data
AddrModeT2_i8s4 = 15, // i8 * 4
+ AddrMode_i12 = 16,
// Size* - Flags to keep track of the size of an instruction.
- SizeShift = 4,
+ SizeShift = 5,
SizeMask = 7 << SizeShift,
SizeSpecial = 1, // 0 byte pseudo or special case.
Size8Bytes = 2,
@@ -61,7 +64,7 @@ namespace ARMII {
// IndexMode - Unindex, pre-indexed, or post-indexed are valid for load
// and store ops only. Generic "updating" flag is used for ld/st multiple.
- IndexModeShift = 7,
+ IndexModeShift = 8,
IndexModeMask = 3 << IndexModeShift,
IndexModePre = 1,
IndexModePost = 2,
@@ -70,7 +73,7 @@ namespace ARMII {
//===------------------------------------------------------------------===//
// Instruction encoding formats.
//
- FormShift = 9,
+ FormShift = 10,
FormMask = 0x3f << FormShift,
// Pseudo instructions
@@ -143,15 +146,15 @@ namespace ARMII {
// UnaryDP - Indicates this is a unary data processing instruction, i.e.
// it doesn't have a Rn operand.
- UnaryDP = 1 << 15,
+ UnaryDP = 1 << 16,
// Xform16Bit - Indicates this Thumb2 instruction may be transformed into
// a 16-bit Thumb instruction if certain conditions are met.
- Xform16Bit = 1 << 16,
+ Xform16Bit = 1 << 17,
//===------------------------------------------------------------------===//
// Code domain.
- DomainShift = 17,
+ DomainShift = 18,
DomainMask = 3 << DomainShift,
DomainGeneral = 0 << DomainShift,
DomainVFP = 1 << DomainShift,
@@ -160,6 +163,11 @@ namespace ARMII {
//===------------------------------------------------------------------===//
// Field shifts - such shifts are used to set field while generating
// machine instructions.
+ //
+ // FIXME: This list will need adjusting/fixing as the MC code emitter
+ // takes shape and the ARMCodeEmitter.cpp bits go away.
+ ShiftTypeShift = 4,
+
M_BitShift = 5,
ShiftImmShift = 5,
ShiftShift = 7,
@@ -181,29 +189,15 @@ namespace ARMII {
I_BitShift = 25,
CondShift = 28
};
-
- /// Target Operand Flag enum.
- enum TOF {
- //===------------------------------------------------------------------===//
- // ARM Specific MachineOperand flags.
-
- MO_NO_FLAG,
-
- /// MO_LO16 - On a symbol operand, this represents a relocation containing
- /// lower 16 bit of the address. Used only via movw instruction.
- MO_LO16,
-
- /// MO_HI16 - On a symbol operand, this represents a relocation containing
- /// higher 16 bit of the address. Used only via movt instruction.
- MO_HI16
- };
}
class ARMBaseInstrInfo : public TargetInstrInfoImpl {
const ARMSubtarget &Subtarget;
+
protected:
// Can be only subclassed.
explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
+
public:
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
@@ -216,10 +210,13 @@ public:
virtual const ARMBaseRegisterInfo &getRegisterInfo() const =0;
const ARMSubtarget &getSubtarget() const { return Subtarget; }
- bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
+ ScheduleHazardRecognizer *
+ CreateTargetHazardRecognizer(const TargetMachine *TM,
+ const ScheduleDAG *DAG) const;
+
+ ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
+ const ScheduleDAG *DAG) const;
// Branch analysis.
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
@@ -301,7 +298,8 @@ public:
MachineInstr *duplicate(MachineInstr *Orig, MachineFunction &MF) const;
virtual bool produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1) const;
+ const MachineInstr *MI1,
+ const MachineRegisterInfo *MRI) const;
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
/// determine if two loads are loading from the same base address. It should
@@ -328,26 +326,117 @@ public:
const MachineFunction &MF) const;
virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB,
- unsigned NumInstrs) const;
+ unsigned NumCyles, unsigned ExtraPredCycles,
+ float Prob, float Confidence) const;
- virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,unsigned NumT,
- MachineBasicBlock &FMBB,unsigned NumF) const;
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumT, unsigned ExtraT,
+ MachineBasicBlock &FMBB,
+ unsigned NumF, unsigned ExtraF,
+ float Probability, float Confidence) const;
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
- unsigned NumInstrs) const {
- return NumInstrs && NumInstrs == 1;
+ unsigned NumCyles,
+ float Probability,
+ float Confidence) const {
+ return NumCyles == 1;
}
/// AnalyzeCompare - For a comparison instruction, return the source register
/// in SrcReg and the value it compares against in CmpValue. Return true if
/// the comparison instruction can be analyzed.
virtual bool AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
- int &CmpValue) const;
+ int &CmpMask, int &CmpValue) const;
- /// ConvertToSetZeroFlag - Convert the instruction to set the zero flag so
+ /// OptimizeCompareInstr - Convert the instruction to set the zero flag so
/// that we can remove a "comparison with zero".
- virtual bool ConvertToSetZeroFlag(MachineInstr *Instr,
- MachineInstr *CmpInstr) const;
+ virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ int CmpMask, int CmpValue,
+ const MachineRegisterInfo *MRI) const;
+
+ /// FoldImmediate - 'Reg' is known to be defined by a move immediate
+ /// instruction, try to fold the immediate into the use instruction.
+ virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+ unsigned Reg, MachineRegisterInfo *MRI) const;
+
+ virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const;
+
+ virtual
+ int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const;
+ virtual
+ int getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const;
+private:
+ int getVLDMDefCycle(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &DefTID,
+ unsigned DefClass,
+ unsigned DefIdx, unsigned DefAlign) const;
+ int getLDMDefCycle(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &DefTID,
+ unsigned DefClass,
+ unsigned DefIdx, unsigned DefAlign) const;
+ int getVSTMUseCycle(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &UseTID,
+ unsigned UseClass,
+ unsigned UseIdx, unsigned UseAlign) const;
+ int getSTMUseCycle(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &UseTID,
+ unsigned UseClass,
+ unsigned UseIdx, unsigned UseAlign) const;
+ int getOperandLatency(const InstrItineraryData *ItinData,
+ const TargetInstrDesc &DefTID,
+ unsigned DefIdx, unsigned DefAlign,
+ const TargetInstrDesc &UseTID,
+ unsigned UseIdx, unsigned UseAlign) const;
+
+ int getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI, unsigned *PredCost = 0) const;
+
+ int getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const;
+
+ bool hasHighOperandLatency(const InstrItineraryData *ItinData,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const;
+ bool hasLowDefLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx) const;
+
+private:
+ /// Modeling special VFP / NEON fp MLA / MLS hazards.
+
+ /// MLxEntryMap - Map fp MLA / MLS to the corresponding entry in the internal
+ /// MLx table.
+ DenseMap<unsigned, unsigned> MLxEntryMap;
+
+ /// MLxHazardOpcodes - Set of add / sub and multiply opcodes that would cause
+ /// stalls when scheduled together with fp MLA / MLS opcodes.
+ SmallSet<unsigned, 16> MLxHazardOpcodes;
+
+public:
+ /// isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS
+ /// instruction.
+ bool isFpMLxInstruction(unsigned Opcode) const {
+ return MLxEntryMap.count(Opcode);
+ }
+
+ /// isFpMLxInstruction - This version also returns the multiply opcode and the
+ /// addition / subtraction opcode to expand to. Return true for 'HasLane' for
+ /// the MLX instructions with an extra lane operand.
+ bool isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
+ unsigned &AddSubOpc, bool &NegAcc,
+ bool &HasLane) const;
+
+ /// canCauseFpMLxStall - Return true if an instruction of the specified opcode
+ /// will cause stalls when scheduled after (within 4-cycle window) a fp
+ /// MLA / MLS instruction.
+ bool canCauseFpMLxStall(unsigned Opcode) const {
+ return MLxHazardOpcodes.count(Opcode);
+ }
};
static inline
@@ -389,7 +478,7 @@ bool isJumpTableBranchOpcode(int Opc) {
static inline
bool isIndirectBranchOpcode(int Opc) {
- return Opc == ARM::BRIND || Opc == ARM::MOVPCRX || Opc == ARM::tBRIND;
+ return Opc == ARM::BX || Opc == ARM::MOVPCRX || Opc == ARM::tBRIND;
}
/// getInstrPredicate - If instruction is predicated, returns its predicate
@@ -413,6 +502,12 @@ void emitT2RegPlusImmediate(MachineBasicBlock &MBB,
unsigned DestReg, unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const ARMBaseInstrInfo &TII);
+void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ unsigned DestReg, unsigned BaseReg,
+ int NumBytes, const TargetInstrInfo &TII,
+ const ARMBaseRegisterInfo& MRI,
+ DebugLoc dl);
/// rewriteARMFrameIndex / rewriteT2FrameIndex -
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index eceafad..67a4b7d 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -15,6 +15,7 @@
#include "ARMAddressingModes.h"
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
+#include "ARMFrameLowering.h"
#include "ARMInstrInfo.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMSubtarget.h"
@@ -32,120 +33,25 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/CommandLine.h"
-namespace llvm {
+using namespace llvm;
+
static cl::opt<bool>
ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
cl::desc("Force use of virtual base registers for stack load/store"));
static cl::opt<bool>
EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
cl::desc("Enable pre-regalloc stack frame index allocation"));
-}
-
-using namespace llvm;
-
static cl::opt<bool>
EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
-unsigned ARMBaseRegisterInfo::getRegisterNumbering(unsigned RegEnum,
- bool *isSPVFP) {
- if (isSPVFP)
- *isSPVFP = false;
-
- using namespace ARM;
- switch (RegEnum) {
- default:
- llvm_unreachable("Unknown ARM register!");
- case R0: case D0: case Q0: return 0;
- case R1: case D1: case Q1: return 1;
- case R2: case D2: case Q2: return 2;
- case R3: case D3: case Q3: return 3;
- case R4: case D4: case Q4: return 4;
- case R5: case D5: case Q5: return 5;
- case R6: case D6: case Q6: return 6;
- case R7: case D7: case Q7: return 7;
- case R8: case D8: case Q8: return 8;
- case R9: case D9: case Q9: return 9;
- case R10: case D10: case Q10: return 10;
- case R11: case D11: case Q11: return 11;
- case R12: case D12: case Q12: return 12;
- case SP: case D13: case Q13: return 13;
- case LR: case D14: case Q14: return 14;
- case PC: case D15: case Q15: return 15;
-
- case D16: return 16;
- case D17: return 17;
- case D18: return 18;
- case D19: return 19;
- case D20: return 20;
- case D21: return 21;
- case D22: return 22;
- case D23: return 23;
- case D24: return 24;
- case D25: return 25;
- case D26: return 26;
- case D27: return 27;
- case D28: return 28;
- case D29: return 29;
- case D30: return 30;
- case D31: return 31;
-
- case S0: case S1: case S2: case S3:
- case S4: case S5: case S6: case S7:
- case S8: case S9: case S10: case S11:
- case S12: case S13: case S14: case S15:
- case S16: case S17: case S18: case S19:
- case S20: case S21: case S22: case S23:
- case S24: case S25: case S26: case S27:
- case S28: case S29: case S30: case S31: {
- if (isSPVFP)
- *isSPVFP = true;
- switch (RegEnum) {
- default: return 0; // Avoid compile time warning.
- case S0: return 0;
- case S1: return 1;
- case S2: return 2;
- case S3: return 3;
- case S4: return 4;
- case S5: return 5;
- case S6: return 6;
- case S7: return 7;
- case S8: return 8;
- case S9: return 9;
- case S10: return 10;
- case S11: return 11;
- case S12: return 12;
- case S13: return 13;
- case S14: return 14;
- case S15: return 15;
- case S16: return 16;
- case S17: return 17;
- case S18: return 18;
- case S19: return 19;
- case S20: return 20;
- case S21: return 21;
- case S22: return 22;
- case S23: return 23;
- case S24: return 24;
- case S25: return 25;
- case S26: return 26;
- case S27: return 27;
- case S28: return 28;
- case S29: return 29;
- case S30: return 30;
- case S31: return 31;
- }
- }
- }
-}
-
ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
const ARMSubtarget &sti)
: ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
@@ -180,12 +86,14 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
BitVector ARMBaseRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
// FIXME: avoid re-calculating this everytime.
BitVector Reserved(getNumRegs());
Reserved.set(ARM::SP);
Reserved.set(ARM::PC);
Reserved.set(ARM::FPSCR);
- if (hasFP(MF))
+ if (TFI->hasFP(MF))
Reserved.set(FramePtr);
if (hasBasePointer(MF))
Reserved.set(BasePtr);
@@ -197,6 +105,8 @@ getReservedRegs(const MachineFunction &MF) const {
bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
unsigned Reg) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
switch (Reg) {
default: break;
case ARM::SP:
@@ -208,7 +118,7 @@ bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
break;
case ARM::R7:
case ARM::R11:
- if (FramePtr == Reg && hasFP(MF))
+ if (FramePtr == Reg && TFI->hasFP(MF))
return true;
break;
case ARM::R9:
@@ -444,6 +354,7 @@ std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
unsigned HintType, unsigned HintReg,
const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
// Alternative register allocation orders when favoring even / odd registers
// of register pairs.
@@ -525,7 +436,7 @@ ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
return std::make_pair(RC->allocation_order_begin(MF),
RC->allocation_order_end(MF));
- if (!hasFP(MF)) {
+ if (!TFI->hasFP(MF)) {
if (!STI.isR9Reserved())
return std::make_pair(GPREven1,
GPREven1 + (sizeof(GPREven1)/sizeof(unsigned)));
@@ -554,7 +465,7 @@ ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
return std::make_pair(RC->allocation_order_begin(MF),
RC->allocation_order_end(MF));
- if (!hasFP(MF)) {
+ if (!TFI->hasFP(MF)) {
if (!STI.isR9Reserved())
return std::make_pair(GPROdd1,
GPROdd1 + (sizeof(GPROdd1)/sizeof(unsigned)));
@@ -606,7 +517,7 @@ ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
Hint.first == (unsigned)ARMRI::RegPairEven) &&
- Hint.second && TargetRegisterInfo::isVirtualRegister(Hint.second)) {
+ TargetRegisterInfo::isVirtualRegister(Hint.second)) {
// If 'Reg' is one of the even / odd register pair and it's now changed
// (e.g. coalesced) into a different register. The other register of the
// pair allocation hint must be updated to reflect the relationship
@@ -619,23 +530,6 @@ ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
}
}
-/// hasFP - Return true if the specified function should have a dedicated frame
-/// pointer register. This is true if the function has variable sized allocas
-/// or if frame pointer elimination is disabled.
-///
-bool ARMBaseRegisterInfo::hasFP(const MachineFunction &MF) const {
- // Mac OS X requires FP not to be clobbered for backtracing purpose.
- if (STI.isTargetDarwin())
- return true;
-
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- // Always eliminate non-leaf frame pointers.
- return ((DisableFramePointerElim(MF) && MFI->hasCalls()) ||
- needsStackRealignment(MF) ||
- MFI->hasVarSizedObjects() ||
- MFI->isFrameAddressTaken());
-}
-
bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -681,7 +575,7 @@ bool ARMBaseRegisterInfo::
needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
- unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
+ unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
F->hasFnAttr(Attribute::StackAlignment));
@@ -697,417 +591,19 @@ cannotEliminateFrame(const MachineFunction &MF) const {
|| needsStackRealignment(MF);
}
-/// estimateStackSize - Estimate and return the size of the frame.
-static unsigned estimateStackSize(MachineFunction &MF) {
- const MachineFrameInfo *FFI = MF.getFrameInfo();
- int Offset = 0;
- for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
- int FixedOff = -FFI->getObjectOffset(i);
- if (FixedOff > Offset) Offset = FixedOff;
- }
- for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
- if (FFI->isDeadObjectIndex(i))
- continue;
- Offset += FFI->getObjectSize(i);
- unsigned Align = FFI->getObjectAlignment(i);
- // Adjust to alignment boundary
- Offset = (Offset+Align-1)/Align*Align;
- }
- return (unsigned)Offset;
-}
-
-/// estimateRSStackSizeLimit - Look at each instruction that references stack
-/// frames and return the stack size limit beyond which some of these
-/// instructions will require a scratch register during their expansion later.
-unsigned
-ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
- const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned Limit = (1 << 12) - 1;
- for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
- for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- if (!I->getOperand(i).isFI()) continue;
-
- // When using ADDri to get the address of a stack object, 255 is the
- // largest offset guaranteed to fit in the immediate offset.
- if (I->getOpcode() == ARM::ADDri) {
- Limit = std::min(Limit, (1U << 8) - 1);
- break;
- }
-
- // Otherwise check the addressing mode.
- switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
- case ARMII::AddrMode3:
- case ARMII::AddrModeT2_i8:
- Limit = std::min(Limit, (1U << 8) - 1);
- break;
- case ARMII::AddrMode5:
- case ARMII::AddrModeT2_i8s4:
- Limit = std::min(Limit, ((1U << 8) - 1) * 4);
- break;
- case ARMII::AddrModeT2_i12:
- // i12 supports only positive offset so these will be converted to
- // i8 opcodes. See llvm::rewriteT2FrameIndex.
- if (hasFP(MF) && AFI->hasStackFrame())
- Limit = std::min(Limit, (1U << 8) - 1);
- break;
- case ARMII::AddrMode6:
- // Addressing mode 6 (load/store) instructions can't encode an
- // immediate offset for stack references.
- return 0;
- default:
- break;
- }
- break; // At most one FI per instruction
- }
- }
- }
-
- return Limit;
-}
-
-static unsigned GetFunctionSizeInBytes(const MachineFunction &MF,
- const ARMBaseInstrInfo &TII) {
- unsigned FnSize = 0;
- for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
- MBBI != E; ++MBBI) {
- const MachineBasicBlock &MBB = *MBBI;
- for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end();
- I != E; ++I)
- FnSize += TII.GetInstSizeInBytes(I);
- }
- return FnSize;
-}
-
-void
-ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- // This tells PEI to spill the FP as if it is any other callee-save register
- // to take advantage the eliminateFrameIndex machinery. This also ensures it
- // is spilled in the order specified by getCalleeSavedRegs() to make it easier
- // to combine multiple loads / stores.
- bool CanEliminateFrame = true;
- bool CS1Spilled = false;
- bool LRSpilled = false;
- unsigned NumGPRSpills = 0;
- SmallVector<unsigned, 4> UnspilledCS1GPRs;
- SmallVector<unsigned, 4> UnspilledCS2GPRs;
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- MachineFrameInfo *MFI = MF.getFrameInfo();
-
- // Spill R4 if Thumb2 function requires stack realignment - it will be used as
- // scratch register.
- // FIXME: It will be better just to find spare register here.
- if (needsStackRealignment(MF) &&
- AFI->isThumb2Function())
- MF.getRegInfo().setPhysRegUsed(ARM::R4);
-
- // Spill LR if Thumb1 function uses variable length argument lists.
- if (AFI->isThumb1OnlyFunction() && AFI->getVarArgsRegSaveSize() > 0)
- MF.getRegInfo().setPhysRegUsed(ARM::LR);
-
- // Spill the BasePtr if it's used.
- if (hasBasePointer(MF))
- MF.getRegInfo().setPhysRegUsed(BasePtr);
-
- // Don't spill FP if the frame can be eliminated. This is determined
- // by scanning the callee-save registers to see if any is used.
- const unsigned *CSRegs = getCalleeSavedRegs();
- for (unsigned i = 0; CSRegs[i]; ++i) {
- unsigned Reg = CSRegs[i];
- bool Spilled = false;
- if (MF.getRegInfo().isPhysRegUsed(Reg)) {
- AFI->setCSRegisterIsSpilled(Reg);
- Spilled = true;
- CanEliminateFrame = false;
- } else {
- // Check alias registers too.
- for (const unsigned *Aliases = getAliasSet(Reg); *Aliases; ++Aliases) {
- if (MF.getRegInfo().isPhysRegUsed(*Aliases)) {
- Spilled = true;
- CanEliminateFrame = false;
- }
- }
- }
-
- if (!ARM::GPRRegisterClass->contains(Reg))
- continue;
-
- if (Spilled) {
- NumGPRSpills++;
-
- if (!STI.isTargetDarwin()) {
- if (Reg == ARM::LR)
- LRSpilled = true;
- CS1Spilled = true;
- continue;
- }
-
- // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
- switch (Reg) {
- case ARM::LR:
- LRSpilled = true;
- // Fallthrough
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- CS1Spilled = true;
- break;
- default:
- break;
- }
- } else {
- if (!STI.isTargetDarwin()) {
- UnspilledCS1GPRs.push_back(Reg);
- continue;
- }
-
- switch (Reg) {
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- case ARM::LR:
- UnspilledCS1GPRs.push_back(Reg);
- break;
- default:
- UnspilledCS2GPRs.push_back(Reg);
- break;
- }
- }
- }
-
- bool ForceLRSpill = false;
- if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
- unsigned FnSize = GetFunctionSizeInBytes(MF, TII);
- // Force LR to be spilled if the Thumb function size is > 2048. This enables
- // use of BL to implement far jump. If it turns out that it's not needed
- // then the branch fix up path will undo it.
- if (FnSize >= (1 << 11)) {
- CanEliminateFrame = false;
- ForceLRSpill = true;
- }
- }
-
- // If any of the stack slot references may be out of range of an immediate
- // offset, make sure a register (or a spill slot) is available for the
- // register scavenger. Note that if we're indexing off the frame pointer, the
- // effective stack size is 4 bytes larger since the FP points to the stack
- // slot of the previous FP. Also, if we have variable sized objects in the
- // function, stack slot references will often be negative, and some of
- // our instructions are positive-offset only, so conservatively consider
- // that case to want a spill slot (or register) as well. Similarly, if
- // the function adjusts the stack pointer during execution and the
- // adjustments aren't already part of our stack size estimate, our offset
- // calculations may be off, so be conservative.
- // FIXME: We could add logic to be more precise about negative offsets
- // and which instructions will need a scratch register for them. Is it
- // worth the effort and added fragility?
- bool BigStack =
- (RS &&
- (estimateStackSize(MF) + ((hasFP(MF) && AFI->hasStackFrame()) ? 4:0) >=
- estimateRSStackSizeLimit(MF)))
- || MFI->hasVarSizedObjects()
- || (MFI->adjustsStack() && !canSimplifyCallFramePseudos(MF));
-
- bool ExtraCSSpill = false;
- if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) {
- AFI->setHasStackFrame(true);
-
- // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
- // Spill LR as well so we can fold BX_RET to the registers restore (LDM).
- if (!LRSpilled && CS1Spilled) {
- MF.getRegInfo().setPhysRegUsed(ARM::LR);
- AFI->setCSRegisterIsSpilled(ARM::LR);
- NumGPRSpills++;
- UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
- UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
- ForceLRSpill = false;
- ExtraCSSpill = true;
- }
-
- if (hasFP(MF)) {
- MF.getRegInfo().setPhysRegUsed(FramePtr);
- NumGPRSpills++;
- }
-
- // If stack and double are 8-byte aligned and we are spilling an odd number
- // of GPRs. Spill one extra callee save GPR so we won't have to pad between
- // the integer and double callee save areas.
- unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
- if (TargetAlign == 8 && (NumGPRSpills & 1)) {
- if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
- for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
- unsigned Reg = UnspilledCS1GPRs[i];
- // Don't spill high register if the function is thumb1
- if (!AFI->isThumb1OnlyFunction() ||
- isARMLowRegister(Reg) || Reg == ARM::LR) {
- MF.getRegInfo().setPhysRegUsed(Reg);
- AFI->setCSRegisterIsSpilled(Reg);
- if (!isReservedReg(MF, Reg))
- ExtraCSSpill = true;
- break;
- }
- }
- } else if (!UnspilledCS2GPRs.empty() &&
- !AFI->isThumb1OnlyFunction()) {
- unsigned Reg = UnspilledCS2GPRs.front();
- MF.getRegInfo().setPhysRegUsed(Reg);
- AFI->setCSRegisterIsSpilled(Reg);
- if (!isReservedReg(MF, Reg))
- ExtraCSSpill = true;
- }
- }
-
- // Estimate if we might need to scavenge a register at some point in order
- // to materialize a stack offset. If so, either spill one additional
- // callee-saved register or reserve a special spill slot to facilitate
- // register scavenging. Thumb1 needs a spill slot for stack pointer
- // adjustments also, even when the frame itself is small.
- if (BigStack && !ExtraCSSpill) {
- // If any non-reserved CS register isn't spilled, just spill one or two
- // extra. That should take care of it!
- unsigned NumExtras = TargetAlign / 4;
- SmallVector<unsigned, 2> Extras;
- while (NumExtras && !UnspilledCS1GPRs.empty()) {
- unsigned Reg = UnspilledCS1GPRs.back();
- UnspilledCS1GPRs.pop_back();
- if (!isReservedReg(MF, Reg) &&
- (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) ||
- Reg == ARM::LR)) {
- Extras.push_back(Reg);
- NumExtras--;
- }
- }
- // For non-Thumb1 functions, also check for hi-reg CS registers
- if (!AFI->isThumb1OnlyFunction()) {
- while (NumExtras && !UnspilledCS2GPRs.empty()) {
- unsigned Reg = UnspilledCS2GPRs.back();
- UnspilledCS2GPRs.pop_back();
- if (!isReservedReg(MF, Reg)) {
- Extras.push_back(Reg);
- NumExtras--;
- }
- }
- }
- if (Extras.size() && NumExtras == 0) {
- for (unsigned i = 0, e = Extras.size(); i != e; ++i) {
- MF.getRegInfo().setPhysRegUsed(Extras[i]);
- AFI->setCSRegisterIsSpilled(Extras[i]);
- }
- } else if (!AFI->isThumb1OnlyFunction()) {
- // note: Thumb1 functions spill to R12, not the stack. Reserve a slot
- // closest to SP or frame pointer.
- const TargetRegisterClass *RC = ARM::GPRRegisterClass;
- RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
- }
- }
- }
-
- if (ForceLRSpill) {
- MF.getRegInfo().setPhysRegUsed(ARM::LR);
- AFI->setCSRegisterIsSpilled(ARM::LR);
- AFI->setLRIsSpilledForFarJump(true);
- }
-}
-
unsigned ARMBaseRegisterInfo::getRARegister() const {
return ARM::LR;
}
unsigned
ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- if (hasFP(MF))
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (TFI->hasFP(MF))
return FramePtr;
return ARM::SP;
}
-// Provide a base+offset reference to an FI slot for debug info. It's the
-// same as what we use for resolving the code-gen references for now.
-// FIXME: This can go wrong when references are SP-relative and simple call
-// frames aren't used.
-int
-ARMBaseRegisterInfo::getFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const {
- return ResolveFrameIndexReference(MF, FI, FrameReg, 0);
-}
-
-int
-ARMBaseRegisterInfo::ResolveFrameIndexReference(const MachineFunction &MF,
- int FI,
- unsigned &FrameReg,
- int SPAdj) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
- int FPOffset = Offset - AFI->getFramePtrSpillOffset();
- bool isFixed = MFI->isFixedObjectIndex(FI);
-
- FrameReg = ARM::SP;
- Offset += SPAdj;
- if (AFI->isGPRCalleeSavedArea1Frame(FI))
- return Offset - AFI->getGPRCalleeSavedArea1Offset();
- else if (AFI->isGPRCalleeSavedArea2Frame(FI))
- return Offset - AFI->getGPRCalleeSavedArea2Offset();
- else if (AFI->isDPRCalleeSavedAreaFrame(FI))
- return Offset - AFI->getDPRCalleeSavedAreaOffset();
-
- // When dynamically realigning the stack, use the frame pointer for
- // parameters, and the stack/base pointer for locals.
- if (needsStackRealignment(MF)) {
- assert (hasFP(MF) && "dynamic stack realignment without a FP!");
- if (isFixed) {
- FrameReg = getFrameRegister(MF);
- Offset = FPOffset;
- } else if (MFI->hasVarSizedObjects()) {
- assert(hasBasePointer(MF) &&
- "VLAs and dynamic stack alignment, but missing base pointer!");
- FrameReg = BasePtr;
- }
- return Offset;
- }
-
- // If there is a frame pointer, use it when we can.
- if (hasFP(MF) && AFI->hasStackFrame()) {
- // Use frame pointer to reference fixed objects. Use it for locals if
- // there are VLAs (and thus the SP isn't reliable as a base).
- if (isFixed || (MFI->hasVarSizedObjects() && !hasBasePointer(MF))) {
- FrameReg = getFrameRegister(MF);
- return FPOffset;
- } else if (MFI->hasVarSizedObjects()) {
- assert(hasBasePointer(MF) && "missing base pointer!");
- // Use the base register since we have it.
- FrameReg = BasePtr;
- } else if (AFI->isThumb2Function()) {
- // In Thumb2 mode, the negative offset is very limited. Try to avoid
- // out of range references.
- if (FPOffset >= -255 && FPOffset < 0) {
- FrameReg = getFrameRegister(MF);
- return FPOffset;
- }
- } else if (Offset > (FPOffset < 0 ? -FPOffset : FPOffset)) {
- // Otherwise, use SP or FP, whichever is closer to the stack slot.
- FrameReg = getFrameRegister(MF);
- return FPOffset;
- }
- }
- // Use the base pointer if we have one.
- if (hasBasePointer(MF))
- FrameReg = BasePtr;
- return Offset;
-}
-
-int
-ARMBaseRegisterInfo::getFrameIndexOffset(const MachineFunction &MF,
- int FI) const {
- unsigned FrameReg;
- return getFrameIndexReference(MF, FI, FrameReg);
-}
-
unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
llvm_unreachable("What is the exception register");
return 0;
@@ -1320,7 +816,7 @@ emitLoadConstPool(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
.addReg(DestReg, getDefRegState(true), SubIdx)
.addConstantPoolIndex(Idx)
- .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
+ .addImm(0).addImm(Pred).addReg(PredReg);
}
bool ARMBaseRegisterInfo::
@@ -1338,34 +834,6 @@ requiresVirtualBaseRegisters(const MachineFunction &MF) const {
return EnableLocalStackAlloc;
}
-// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
-// not required, we reserve argument space for call sites in the function
-// immediately on entry to the current function. This eliminates the need for
-// add/sub sp brackets around call sites. Returns true if the call frame is
-// included as part of the stack frame.
-bool ARMBaseRegisterInfo::
-hasReservedCallFrame(const MachineFunction &MF) const {
- const MachineFrameInfo *FFI = MF.getFrameInfo();
- unsigned CFSize = FFI->getMaxCallFrameSize();
- // It's not always a good idea to include the call frame as part of the
- // stack frame. ARM (especially Thumb) has small immediate offset to
- // address the stack frame. So a large call frame can cause poor codegen
- // and may even makes it impossible to scavenge a register.
- if (CFSize >= ((1 << 12) - 1) / 2) // Half of imm12
- return false;
-
- return !MF.getFrameInfo()->hasVarSizedObjects();
-}
-
-// canSimplifyCallFramePseudos - If there is a reserved call frame, the
-// call frame pseudos can be simplified. Unlike most targets, having a FP
-// is not sufficient here since we still may reference some objects via SP
-// even when FP is available in Thumb2 mode.
-bool ARMBaseRegisterInfo::
-canSimplifyCallFramePseudos(const MachineFunction &MF) const {
- return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
-}
-
static void
emitSPUpdate(bool isARM,
MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
@@ -1384,7 +852,8 @@ emitSPUpdate(bool isARM,
void ARMBaseRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- if (!hasReservedCallFrame(MF)) {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ if (!TFI->hasReservedCallFrame(MF)) {
// If we have alloca, convert as follows:
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
// ADJCALLSTACKUP -> add, sp, sp, amount
@@ -1395,7 +864,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
+ unsigned Align = TFI->getStackAlignment();
Amount = (Amount+Align-1)/Align*Align;
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -1433,8 +902,7 @@ getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
switch (AddrMode) {
case ARMII::AddrModeT2_i8:
case ARMII::AddrModeT2_i12:
- // i8 supports only negative, and i12 supports only positive, so
- // based on Offset sign, consider the appropriate instruction
+ case ARMII::AddrMode_i12:
InstrOffs = MI->getOperand(Idx+1).getImm();
Scale = 1;
break;
@@ -1496,8 +964,8 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
// return false for everything else.
unsigned Opc = MI->getOpcode();
switch (Opc) {
- case ARM::LDR: case ARM::LDRH: case ARM::LDRB:
- case ARM::STR: case ARM::STRH: case ARM::STRB:
+ case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
+ case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
case ARM::t2LDRi12: case ARM::t2LDRi8:
case ARM::t2STRi12: case ARM::t2STRi8:
case ARM::VLDRS: case ARM::VLDRD:
@@ -1516,6 +984,7 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
// Note that the incoming offset is based on the SP value at function entry,
// so it'll be negative.
MachineFunction &MF = *MI->getParent()->getParent();
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
MachineFrameInfo *MFI = MF.getFrameInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -1542,8 +1011,8 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
// The FP is only available if there is no dynamic realignment. We
// don't know for sure yet whether we'll need that, so we guess based
// on whether there are any local variables that would trigger it.
- unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
- if (hasFP(MF) &&
+ unsigned StackAlign = TFI->getStackAlignment();
+ if (TFI->hasFP(MF) &&
!((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
if (isFrameOffsetLegal(MI, FPOffset))
return false;
@@ -1560,19 +1029,25 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
return true;
}
-/// materializeFrameBaseRegister - Insert defining instruction(s) for
-/// BaseReg to be a pointer to FrameIdx before insertion point I.
+/// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
+/// be a pointer to FrameIdx at the beginning of the basic block.
void ARMBaseRegisterInfo::
-materializeFrameBaseRegister(MachineBasicBlock::iterator I, unsigned BaseReg,
- int FrameIdx, int64_t Offset) const {
- ARMFunctionInfo *AFI =
- I->getParent()->getParent()->getInfo<ARMFunctionInfo>();
+materializeFrameBaseRegister(MachineBasicBlock *MBB,
+ unsigned BaseReg, int FrameIdx,
+ int64_t Offset) const {
+ ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
(AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
+ MachineBasicBlock::iterator Ins = MBB->begin();
+ DebugLoc DL; // Defaults to "unknown"
+ if (Ins != MBB->end())
+ DL = Ins->getDebugLoc();
+
MachineInstrBuilder MIB =
- BuildMI(*I->getParent(), I, I->getDebugLoc(), TII.get(ADDriOpc), BaseReg)
+ BuildMI(*MBB, Ins, DL, TII.get(ADDriOpc), BaseReg)
.addFrameIndex(FrameIdx).addImm(Offset);
+
if (!AFI->isThumb1OnlyFunction())
AddDefaultCC(AddDefaultPred(MIB));
}
@@ -1640,6 +1115,7 @@ bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
NumBits = 8;
Scale = 4;
break;
+ case ARMII::AddrMode_i12:
case ARMII::AddrMode2:
NumBits = 12;
break;
@@ -1679,6 +1155,8 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const ARMFrameLowering *TFI =
+ static_cast<const ARMFrameLowering*>(MF.getTarget().getFrameLowering());
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
assert(!AFI->isThumb1OnlyFunction() &&
"This eliminateFrameIndex does not support Thumb1!");
@@ -1691,7 +1169,7 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int FrameIndex = MI.getOperand(i).getIndex();
unsigned FrameReg;
- int Offset = ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
+ int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
// Special handling of dbg_value instructions.
if (MI.isDebugValue()) {
@@ -1737,339 +1215,13 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
Offset, Pred, PredReg, TII);
}
+ // Update the original instruction to use the scratch register.
MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
+ if (MI.getOpcode() == ARM::t2ADDrSPi)
+ MI.setDesc(TII.get(ARM::t2ADDri));
+ else if (MI.getOpcode() == ARM::t2SUBrSPi)
+ MI.setDesc(TII.get(ARM::t2SUBri));
}
}
-/// Move iterator past the next bunch of callee save load / store ops for
-/// the particular spill area (1: integer area 1, 2: integer area 2,
-/// 3: fp area, 0: don't care).
-static void movePastCSLoadStoreOps(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- int Opc1, int Opc2, unsigned Area,
- const ARMSubtarget &STI) {
- while (MBBI != MBB.end() &&
- ((MBBI->getOpcode() == Opc1) || (MBBI->getOpcode() == Opc2)) &&
- MBBI->getOperand(1).isFI()) {
- if (Area != 0) {
- bool Done = false;
- unsigned Category = 0;
- switch (MBBI->getOperand(0).getReg()) {
- case ARM::R4: case ARM::R5: case ARM::R6: case ARM::R7:
- case ARM::LR:
- Category = 1;
- break;
- case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
- Category = STI.isTargetDarwin() ? 2 : 1;
- break;
- case ARM::D8: case ARM::D9: case ARM::D10: case ARM::D11:
- case ARM::D12: case ARM::D13: case ARM::D14: case ARM::D15:
- Category = 3;
- break;
- default:
- Done = true;
- break;
- }
- if (Done || Category != Area)
- break;
- }
-
- ++MBBI;
- }
-}
-
-void ARMBaseRegisterInfo::
-emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- assert(!AFI->isThumb1OnlyFunction() &&
- "This emitPrologue does not support Thumb1!");
- bool isARM = !AFI->isThumbFunction();
- unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
- unsigned NumBytes = MFI->getStackSize();
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- // Determine the sizes of each callee-save spill areas and record which frame
- // belongs to which callee-save spill areas.
- unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
- int FramePtrSpillFI = 0;
-
- // Allocate the vararg register save area. This is not counted in NumBytes.
- if (VARegSaveSize)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize);
-
- if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
- return;
- }
-
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- int FI = CSI[i].getFrameIdx();
- switch (Reg) {
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- case ARM::LR:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- AFI->addGPRCalleeSavedArea1Frame(FI);
- GPRCS1Size += 4;
- break;
- case ARM::R8:
- case ARM::R9:
- case ARM::R10:
- case ARM::R11:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- if (STI.isTargetDarwin()) {
- AFI->addGPRCalleeSavedArea2Frame(FI);
- GPRCS2Size += 4;
- } else {
- AFI->addGPRCalleeSavedArea1Frame(FI);
- GPRCS1Size += 4;
- }
- break;
- default:
- AFI->addDPRCalleeSavedAreaFrame(FI);
- DPRCSSize += 8;
- }
- }
-
- // Build the new SUBri to adjust SP for integer callee-save spill area 1.
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS1Size);
- movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 1, STI);
-
- // Set FP to point to the stack slot that contains the previous FP.
- // For Darwin, FP is R7, which has now been stored in spill area 1.
- // Otherwise, if this is not Darwin, all the callee-saved registers go
- // into spill area 1, including the FP in R11. In either case, it is
- // now safe to emit this assignment.
- bool HasFP = hasFP(MF);
- if (HasFP) {
- unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri;
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr)
- .addFrameIndex(FramePtrSpillFI).addImm(0);
- AddDefaultCC(AddDefaultPred(MIB));
- }
-
- // Build the new SUBri to adjust SP for integer callee-save spill area 2.
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS2Size);
-
- // Build the new SUBri to adjust SP for FP callee-save spill area.
- movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 2, STI);
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRCSSize);
-
- // Determine starting offsets of spill areas.
- unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
- unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
- unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
- if (HasFP)
- AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
- NumBytes);
- AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
- AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
- AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
-
- movePastCSLoadStoreOps(MBB, MBBI, ARM::VSTRD, 0, 3, STI);
- NumBytes = DPRCSOffset;
- if (NumBytes) {
- // Adjust SP after all the callee-save spills.
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
- if (HasFP)
- AFI->setShouldRestoreSPFromFP(true);
- }
-
- if (STI.isTargetELF() && hasFP(MF)) {
- MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
- AFI->getFramePtrSpillOffset());
- AFI->setShouldRestoreSPFromFP(true);
- }
-
- AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
- AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
- AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
-
- // If we need dynamic stack realignment, do it here. Be paranoid and make
- // sure if we also have VLAs, we have a base pointer for frame access.
- if (needsStackRealignment(MF)) {
- unsigned MaxAlign = MFI->getMaxAlignment();
- assert (!AFI->isThumb1OnlyFunction());
- if (!AFI->isThumbFunction()) {
- // Emit bic sp, sp, MaxAlign
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
- TII.get(ARM::BICri), ARM::SP)
- .addReg(ARM::SP, RegState::Kill)
- .addImm(MaxAlign-1)));
- } else {
- // We cannot use sp as source/dest register here, thus we're emitting the
- // following sequence:
- // mov r4, sp
- // bic r4, r4, MaxAlign
- // mov sp, r4
- // FIXME: It will be better just to find spare register here.
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4)
- .addReg(ARM::SP, RegState::Kill);
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
- TII.get(ARM::t2BICri), ARM::R4)
- .addReg(ARM::R4, RegState::Kill)
- .addImm(MaxAlign-1)));
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
- .addReg(ARM::R4, RegState::Kill);
- }
-
- AFI->setShouldRestoreSPFromFP(true);
- }
-
- // If we need a base pointer, set it up here. It's whatever the value
- // of the stack pointer is at this point. Any variable size objects
- // will be allocated after this, so we can still use the base pointer
- // to reference locals.
- if (hasBasePointer(MF)) {
- if (isARM)
- BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), BasePtr)
- .addReg(ARM::SP)
- .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
- else
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), BasePtr)
- .addReg(ARM::SP);
- }
-
- // If the frame has variable sized objects then the epilogue must restore
- // the sp from fp.
- if (!AFI->shouldRestoreSPFromFP() && MFI->hasVarSizedObjects())
- AFI->setShouldRestoreSPFromFP(true);
-}
-
-static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
- for (unsigned i = 0; CSRegs[i]; ++i)
- if (Reg == CSRegs[i])
- return true;
- return false;
-}
-
-static bool isCSRestore(MachineInstr *MI,
- const ARMBaseInstrInfo &TII,
- const unsigned *CSRegs) {
- return ((MI->getOpcode() == (int)ARM::VLDRD ||
- MI->getOpcode() == (int)ARM::LDR ||
- MI->getOpcode() == (int)ARM::t2LDRi12) &&
- MI->getOperand(1).isFI() &&
- isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
-}
-
-void ARMBaseRegisterInfo::
-emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- assert(MBBI->getDesc().isReturn() &&
- "Can only insert epilog into returning blocks");
- unsigned RetOpcode = MBBI->getOpcode();
- DebugLoc dl = MBBI->getDebugLoc();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- assert(!AFI->isThumb1OnlyFunction() &&
- "This emitEpilogue does not support Thumb1!");
- bool isARM = !AFI->isThumbFunction();
-
- unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
- int NumBytes = (int)MFI->getStackSize();
-
- if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
- } else {
- // Unwind MBBI to point to first LDR / VLDRD.
- const unsigned *CSRegs = getCalleeSavedRegs();
- if (MBBI != MBB.begin()) {
- do
- --MBBI;
- while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
- if (!isCSRestore(MBBI, TII, CSRegs))
- ++MBBI;
- }
-
- // Move SP to start of FP callee save spill area.
- NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
- AFI->getGPRCalleeSavedArea2Size() +
- AFI->getDPRCalleeSavedAreaSize());
-
- // Reset SP based on frame pointer only if the stack frame extends beyond
- // frame pointer stack slot or target is ELF and the function has FP.
- if (AFI->shouldRestoreSPFromFP()) {
- NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
- if (NumBytes) {
- if (isARM)
- emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
- ARMCC::AL, 0, TII);
- else
- emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
- ARMCC::AL, 0, TII);
- } else {
- // Thumb2 or ARM.
- if (isARM)
- BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
- .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
- else
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
- .addReg(FramePtr);
- }
- } else if (NumBytes)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
-
- // Move SP to start of integer callee save spill area 2.
- movePastCSLoadStoreOps(MBB, MBBI, ARM::VLDRD, 0, 3, STI);
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getDPRCalleeSavedAreaSize());
-
- // Move SP to start of integer callee save spill area 1.
- movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 2, STI);
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea2Size());
-
- // Move SP to SP upon entry to the function.
- movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 1, STI);
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size());
- }
-
- if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
- RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
- // Tail call return: adjust the stack pointer and jump to callee.
- MBBI = prior(MBB.end());
- MachineOperand &JumpTarget = MBBI->getOperand(0);
-
- // Jump to label or value in register.
- if (RetOpcode == ARM::TCRETURNdi) {
- BuildMI(MBB, MBBI, dl,
- TII.get(STI.isThumb() ? ARM::TAILJMPdt : ARM::TAILJMPd)).
- addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
- JumpTarget.getTargetFlags());
- } else if (RetOpcode == ARM::TCRETURNdiND) {
- BuildMI(MBB, MBBI, dl,
- TII.get(STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND)).
- addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
- JumpTarget.getTargetFlags());
- } else if (RetOpcode == ARM::TCRETURNri) {
- BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)).
- addReg(JumpTarget.getReg(), RegState::Kill);
- } else if (RetOpcode == ARM::TCRETURNriND) {
- BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)).
- addReg(JumpTarget.getReg(), RegState::Kill);
- }
-
- MachineInstr *NewMI = prior(MBBI);
- for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
- NewMI->addOperand(MBBI->getOperand(i));
-
- // Delete the pseudo instruction TCRETURN.
- MBB.erase(MBBI);
- }
-
- if (VARegSaveSize)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
-}
-
#include "ARMGenRegisterInfo.inc"
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
index fa2eb6c..ba6bd2b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -44,6 +44,45 @@ static inline bool isARMLowRegister(unsigned Reg) {
}
}
+/// isARMArea1Register - Returns true if the register is a low register (r0-r7)
+/// or a stack/pc register that we should push/pop.
+static inline bool isARMArea1Register(unsigned Reg, bool isDarwin) {
+ using namespace ARM;
+ switch (Reg) {
+ case R0: case R1: case R2: case R3:
+ case R4: case R5: case R6: case R7:
+ case LR: case SP: case PC:
+ return true;
+ case R8: case R9: case R10: case R11:
+ // For darwin we want r7 and lr to be next to each other.
+ return !isDarwin;
+ default:
+ return false;
+ }
+}
+
+static inline bool isARMArea2Register(unsigned Reg, bool isDarwin) {
+ using namespace ARM;
+ switch (Reg) {
+ case R8: case R9: case R10: case R11:
+ // Darwin has this second area.
+ return isDarwin;
+ default:
+ return false;
+ }
+}
+
+static inline bool isARMArea3Register(unsigned Reg, bool isDarwin) {
+ using namespace ARM;
+ switch (Reg) {
+ case D15: case D14: case D13: case D12:
+ case D11: case D10: case D9: case D8:
+ return true;
+ default:
+ return false;
+ }
+}
+
class ARMBaseRegisterInfo : public ARMGenRegisterInfo {
protected:
const ARMBaseInstrInfo &TII;
@@ -65,12 +104,6 @@ protected:
unsigned getOpcode(int Op) const;
public:
- /// getRegisterNumbering - Given the enum value for some register, e.g.
- /// ARM::LR, return the number that it corresponds to (e.g. 14). It
- /// also returns true in isSPVFP if the register is a single precision
- /// VFP register.
- static unsigned getRegisterNumbering(unsigned RegEnum, bool *isSPVFP = 0);
-
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
@@ -106,14 +139,13 @@ public:
void UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const;
bool hasBasePointer(const MachineFunction &MF) const;
bool canRealignStack(const MachineFunction &MF) const;
bool needsStackRealignment(const MachineFunction &MF) const;
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const;
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const;
- void materializeFrameBaseRegister(MachineBasicBlock::iterator I,
+ void materializeFrameBaseRegister(MachineBasicBlock *MBB,
unsigned BaseReg, int FrameIdx,
int64_t Offset) const;
void resolveFrameIndex(MachineBasicBlock::iterator I,
@@ -122,17 +154,10 @@ public:
bool cannotEliminateFrame(const MachineFunction &MF) const;
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
-
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
- int getFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const;
- int ResolveFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg, int SPAdj) const;
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+ unsigned getBaseRegister() const { return BasePtr; }
// Exception handling queries.
unsigned getEHExceptionRegister() const;
@@ -162,9 +187,6 @@ public:
virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const;
- virtual bool hasReservedCallFrame(const MachineFunction &MF) const;
- virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const;
-
virtual void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -172,12 +194,7 @@ public:
virtual void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
- virtual void emitPrologue(MachineFunction &MF) const;
- virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
private:
- unsigned estimateRSStackSizeLimit(MachineFunction &MF) const;
-
unsigned getRegisterPairEven(unsigned Reg, const MachineFunction &MF) const;
unsigned getRegisterPairOdd(unsigned Reg, const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h b/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h
index 3b38375..69eddf0 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This file contains enumerations and support routines for ARM build attributes
-// as defined in ARM ABI addenda document (ABI release 2.07).
+// as defined in ARM ABI addenda document (ABI release 2.08).
//
//===----------------------------------------------------------------------===//
@@ -16,7 +16,14 @@
#define __TARGET_ARMBUILDATTRS_H__
namespace ARMBuildAttrs {
- enum {
+ enum SpecialAttr {
+ // This is for the .cpu asm attr. It translates into one or more
+ // AttrType (below) entries in the .ARM.attributes section in the ELF.
+ SEL_CPU
+ };
+
+ enum AttrType {
+ // Rest correspond to ELF/.ARM.attributes
File = 1,
Section = 2,
Symbol = 3,
@@ -52,12 +59,72 @@ namespace ARMBuildAttrs {
CPU_unaligned_access = 34,
VFP_HP_extension = 36,
ABI_FP_16bit_format = 38,
+ MPextension_use = 42, // was 70, 2.08 ABI
+ DIV_use = 44,
nodefaults = 64,
also_compatible_with = 65,
T2EE_use = 66,
conformance = 67,
Virtualization_use = 68,
- MPextension_use = 70
+ MPextension_use_old = 70
+ };
+
+ // Magic numbers for .ARM.attributes
+ enum AttrMagic {
+ Format_Version = 0x41
+ };
+
+ // Legal Values for CPU_arch, (=6), uleb128
+ enum CPUArch {
+ Pre_v4 = 0,
+ v4 = 1, // e.g. SA110
+ v4T = 2, // e.g. ARM7TDMI
+ v5T = 3, // e.g. ARM9TDMI
+ v5TE = 4, // e.g. ARM946E_S
+ v5TEJ = 5, // e.g. ARM926EJ_S
+ v6 = 6, // e.g. ARM1136J_S
+ v6KZ = 7, // e.g. ARM1176JZ_S
+ v6T2 = 8, // e.g. ARM1156T2F_S
+ v6K = 9, // e.g. ARM1136J_S
+ v7 = 10, // e.g. Cortex A8, Cortex M3
+ v6_M = 11, // e.g. Cortex M1
+ v6S_M = 12, // v6_M with the System extensions
+ v7E_M = 13 // v7_M with DSP extensions
+ };
+
+ enum CPUArchProfile { // (=7), uleb128
+ Not_Applicable = 0, // pre v7, or cross-profile code
+ ApplicationProfile = (0x41), // 'A' (e.g. for Cortex A8)
+ RealTimeProfile = (0x52), // 'R' (e.g. for Cortex R4)
+ MicroControllerProfile = (0x4D), // 'M' (e.g. for Cortex M3)
+ SystemProfile = (0x53) // 'S' Application or real-time profile
+ };
+
+ // The following have a lot of common use cases
+ enum {
+ //ARMISAUse (=8), uleb128 and THUMBISAUse (=9), uleb128
+ Not_Allowed = 0,
+ Allowed = 1,
+
+ // FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10)
+ AllowFPv2 = 2, // v2 FP ISA permitted (implies use of the v1 FP ISA)
+ AllowFPv3A = 3, // v3 FP ISA permitted (implies use of the v2 FP ISA)
+ AllowFPv3B = 4, // v3 FP ISA permitted, but only D0-D15, S0-S31
+ AllowFPv4A = 5, // v4 FP ISA permitted (implies use of v3 FP ISA)
+ AllowFPv4B = 6, // v4 FP ISA was permitted, but only D0-D15, S0-S31
+
+ // Tag_WMMX_arch, (=11), uleb128
+ AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions)
+
+ // Tag_WMMX_arch, (=11), uleb128
+ AllowWMMXv1 = 2, // The user permitted this entity to use WMMX v2
+
+ // Tag_ABI_FP_denormal, (=20), uleb128
+ PreserveFPSign = 2, // sign when flushed-to-zero is preserved
+
+ // Tag_ABI_FP_number_model, (=23), uleb128
+ AllowRTABI = 2, // numbers, infinities, and one quiet NaN (see [RTABI])
+ AllowIEE754 = 3 // this code to use all the IEEE 754-defined FP encodings
};
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.h b/contrib/llvm/lib/Target/ARM/ARMCallingConv.h
new file mode 100644
index 0000000..ff7db1f
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.h
@@ -0,0 +1,160 @@
+//===-- ARMCallingConv.h - ARM Custom Calling Convention Routines ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the custom routines for the ARM Calling Convention that
+// aren't done by tablegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARMCALLINGCONV_H
+#define ARMCALLINGCONV_H
+
+#include "llvm/CallingConv.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "ARMBaseInstrInfo.h"
+#include "ARMRegisterInfo.h"
+#include "ARMSubtarget.h"
+#include "ARM.h"
+
+namespace llvm {
+
+// APCS f64 is in register pairs, possibly split to stack
+static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ CCState &State, bool CanFail) {
+ static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
+
+ // Try to get the first register.
+ if (unsigned Reg = State.AllocateReg(RegList, 4))
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ else {
+ // For the 2nd half of a v2f64, do not fail.
+ if (CanFail)
+ return false;
+
+ // Put the whole thing on the stack.
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ State.AllocateStack(8, 4),
+ LocVT, LocInfo));
+ return true;
+ }
+
+ // Try to get the second register.
+ if (unsigned Reg = State.AllocateReg(RegList, 4))
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ else
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ State.AllocateStack(4, 4),
+ LocVT, LocInfo));
+ return true;
+}
+
+static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
+ return false;
+ if (LocVT == MVT::v2f64 &&
+ !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
+ return false;
+ return true; // we handled it
+}
+
+// AAPCS f64 is in aligned register pairs
+static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ CCState &State, bool CanFail) {
+ static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
+ static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
+ static const unsigned ShadowRegList[] = { ARM::R0, ARM::R1 };
+
+ unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList, 2);
+ if (Reg == 0) {
+ // For the 2nd half of a v2f64, do not just fail.
+ if (CanFail)
+ return false;
+
+ // Put the whole thing on the stack.
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ State.AllocateStack(8, 8),
+ LocVT, LocInfo));
+ return true;
+ }
+
+ unsigned i;
+ for (i = 0; i < 2; ++i)
+ if (HiRegList[i] == Reg)
+ break;
+
+ unsigned T = State.AllocateReg(LoRegList[i]);
+ (void)T;
+ assert(T == LoRegList[i] && "Could not allocate register");
+
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
+ LocVT, LocInfo));
+ return true;
+}
+
+static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
+ return false;
+ if (LocVT == MVT::v2f64 &&
+ !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
+ return false;
+ return true; // we handled it
+}
+
+static bool f64RetAssign(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo, CCState &State) {
+ static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
+ static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
+
+ unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
+ if (Reg == 0)
+ return false; // we didn't handle it
+
+ unsigned i;
+ for (i = 0; i < 2; ++i)
+ if (HiRegList[i] == Reg)
+ break;
+
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
+ LocVT, LocInfo));
+ return true;
+}
+
+static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
+ return false;
+ if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
+ return false;
+ return true; // we handled it
+}
+
+static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
index 293e32a..426ba13 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
+++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
@@ -53,6 +53,34 @@ def RetCC_ARM_APCS : CallingConv<[
]>;
//===----------------------------------------------------------------------===//
+// ARM APCS Calling Convention for FastCC (when VFP2 or later is available)
+//===----------------------------------------------------------------------===//
+def FastCC_ARM_APCS : CallingConv<[
+ // Handle all vector types as either f64 or v2f64.
+ CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
+ CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
+
+ CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>,
+ CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
+ CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7, S8,
+ S9, S10, S11, S12, S13, S14, S15]>>,
+ CCDelegateTo<CC_ARM_APCS>
+]>;
+
+def RetFastCC_ARM_APCS : CallingConv<[
+ // Handle all vector types as either f64 or v2f64.
+ CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
+ CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
+
+ CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>,
+ CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
+ CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7, S8,
+ S9, S10, S11, S12, S13, S14, S15]>>,
+ CCDelegateTo<RetCC_ARM_APCS>
+]>;
+
+
+//===----------------------------------------------------------------------===//
// ARM AAPCS (EABI) Calling Convention, common parts
//===----------------------------------------------------------------------===//
@@ -105,6 +133,7 @@ def RetCC_ARM_AAPCS : CallingConv<[
//===----------------------------------------------------------------------===//
// ARM AAPCS-VFP (EABI) Calling Convention
+// Also used for FastCC (when VFP2 or later is available)
//===----------------------------------------------------------------------===//
def CC_ARM_AAPCS_VFP : CallingConv<[
diff --git a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
index b1a702f..9bbf6a0 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
@@ -74,7 +74,7 @@ namespace {
/// getBinaryCodeForInstr - This function, generated by the
/// CodeEmitterGenerator using TableGen, produces the binary encoding for
/// machine instructions.
- unsigned getBinaryCodeForInstr(const MachineInstr &MI);
+ unsigned getBinaryCodeForInstr(const MachineInstr &MI) const;
bool runOnMachineFunction(MachineFunction &MF);
@@ -101,7 +101,6 @@ namespace {
unsigned OpIdx);
unsigned getMachineSoImmOpValue(unsigned SoImm);
-
unsigned getAddrModeSBit(const MachineInstr &MI,
const TargetInstrDesc &TID) const;
@@ -140,8 +139,6 @@ namespace {
void emitVFPLoadStoreMultipleInstruction(const MachineInstr &MI);
- void emitMiscInstruction(const MachineInstr &MI);
-
void emitNEONLaneInstruction(const MachineInstr &MI);
void emitNEONDupInstruction(const MachineInstr &MI);
void emitNEON1RegModImmInstruction(const MachineInstr &MI);
@@ -150,20 +147,176 @@ namespace {
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
- unsigned getMachineOpValue(const MachineInstr &MI,const MachineOperand &MO);
- unsigned getMachineOpValue(const MachineInstr &MI, unsigned OpIdx) {
+ unsigned getMachineOpValue(const MachineInstr &MI,
+ const MachineOperand &MO) const;
+ unsigned getMachineOpValue(const MachineInstr &MI, unsigned OpIdx) const {
return getMachineOpValue(MI, MI.getOperand(OpIdx));
}
+ // FIXME: The legacy JIT ARMCodeEmitter doesn't rely on the the
+ // TableGen'erated getBinaryCodeForInstr() function to encode any
+ // operand values, instead querying getMachineOpValue() directly for
+ // each operand it needs to encode. Thus, any of the new encoder
+ // helper functions can simply return 0 as the values the return
+ // are already handled elsewhere. They are placeholders to allow this
+ // encoder to continue to function until the MC encoder is sufficiently
+ // far along that this one can be eliminated entirely.
+ unsigned NEONThumb2DataIPostEncoder(const MachineInstr &MI, unsigned Val)
+ const { return 0; }
+ unsigned NEONThumb2LoadStorePostEncoder(const MachineInstr &MI,unsigned Val)
+ const { return 0; }
+ unsigned NEONThumb2DupPostEncoder(const MachineInstr &MI,unsigned Val)
+ const { return 0; }
+ unsigned VFPThumb2PostEncoder(const MachineInstr&MI, unsigned Val)
+ const { return 0; }
+ unsigned getAdrLabelOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getThumbAdrLabelOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getThumbBLTargetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getThumbBLXTargetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getThumbBRTargetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getThumbBCCTargetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getThumbCBTargetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getBranchTargetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getUnconditionalBranchTargetOpValue(const MachineInstr &MI,
+ unsigned Op) const { return 0; }
+ unsigned getARMBranchTargetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getCCOutOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getSOImmOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getT2SOImmOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getSORegOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getThumbAddrModeRegRegOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getT2AddrModeImm12OpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getT2AddrModeImm8OpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getT2AddrModeImm8s4OpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getT2AddrModeImm8OffsetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getT2AddrModeImm12OffsetOpValue(const MachineInstr &MI,unsigned Op)
+ const { return 0; }
+ unsigned getT2AddrModeSORegOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getT2SORegOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getRotImmOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getImmMinusOneOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getT2AdrLabelOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getAddrMode6AddressOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getAddrMode6DupAddressOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getAddrMode6OffsetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ unsigned getBitfieldInvertedMaskOpValue(const MachineInstr &MI,
+ unsigned Op) const { return 0; }
+ unsigned getMsbOpValue(const MachineInstr &MI,
+ unsigned Op) const { return 0; }
+ uint32_t getLdStmModeOpValue(const MachineInstr &MI, unsigned OpIdx)
+ const {return 0; }
+ uint32_t getLdStSORegOpValue(const MachineInstr &MI, unsigned OpIdx)
+ const { return 0; }
+
+ unsigned getAddrModeImm12OpValue(const MachineInstr &MI, unsigned Op)
+ const {
+ // {17-13} = reg
+ // {12} = (U)nsigned (add == '1', sub == '0')
+ // {11-0} = imm12
+ const MachineOperand &MO = MI.getOperand(Op);
+ const MachineOperand &MO1 = MI.getOperand(Op + 1);
+ if (!MO.isReg()) {
+ emitConstPoolAddress(MO.getIndex(), ARM::reloc_arm_cp_entry);
+ return 0;
+ }
+ unsigned Reg = getARMRegisterNumbering(MO.getReg());
+ int32_t Imm12 = MO1.getImm();
+ uint32_t Binary;
+ Binary = Imm12 & 0xfff;
+ if (Imm12 >= 0)
+ Binary |= (1 << 12);
+ Binary |= (Reg << 13);
+ return Binary;
+ }
+
+ unsigned getHiLo16ImmOpValue(const MachineInstr &MI, unsigned Op) const {
+ return 0;
+ }
+
+ uint32_t getAddrMode2OpValue(const MachineInstr &MI, unsigned OpIdx)
+ const { return 0;}
+ uint32_t getAddrMode2OffsetOpValue(const MachineInstr &MI, unsigned OpIdx)
+ const { return 0;}
+ uint32_t getAddrMode3OffsetOpValue(const MachineInstr &MI, unsigned OpIdx)
+ const { return 0;}
+ uint32_t getAddrMode3OpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ uint32_t getAddrModeThumbSPOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ uint32_t getAddrModeSOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ uint32_t getAddrModeISOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ uint32_t getAddrModePCOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+ uint32_t getAddrMode5OpValue(const MachineInstr &MI, unsigned Op) const {
+ // {17-13} = reg
+ // {12} = (U)nsigned (add == '1', sub == '0')
+ // {11-0} = imm12
+ const MachineOperand &MO = MI.getOperand(Op);
+ const MachineOperand &MO1 = MI.getOperand(Op + 1);
+ if (!MO.isReg()) {
+ emitConstPoolAddress(MO.getIndex(), ARM::reloc_arm_cp_entry);
+ return 0;
+ }
+ unsigned Reg = getARMRegisterNumbering(MO.getReg());
+ int32_t Imm12 = MO1.getImm();
+
+ // Special value for #-0
+ if (Imm12 == INT32_MIN)
+ Imm12 = 0;
+
+ // Immediate is always encoded as positive. The 'U' bit controls add vs
+ // sub.
+ bool isAdd = true;
+ if (Imm12 < 0) {
+ Imm12 = -Imm12;
+ isAdd = false;
+ }
+
+ uint32_t Binary = Imm12 & 0xfff;
+ if (isAdd)
+ Binary |= (1 << 12);
+ Binary |= (Reg << 13);
+ return Binary;
+ }
+ unsigned getNEONVcvtImm32OpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+
+ unsigned getRegisterListOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
+
/// getMovi32Value - Return binary encoding of operand for movw/movt. If the
/// machine operand requires relocation, record the relocation and return
/// zero.
unsigned getMovi32Value(const MachineInstr &MI,const MachineOperand &MO,
unsigned Reloc);
- unsigned getMovi32Value(const MachineInstr &MI, unsigned OpIdx,
- unsigned Reloc) {
- return getMovi32Value(MI, MI.getOperand(OpIdx), Reloc);
- }
/// getShiftOp - Return the shift opcode (bit[6:5]) of the immediate value.
///
@@ -173,12 +326,12 @@ namespace {
/// fixed up by the relocation stage.
void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
bool MayNeedFarStub, bool Indirect,
- intptr_t ACPV = 0);
- void emitExternalSymbolAddress(const char *ES, unsigned Reloc);
- void emitConstPoolAddress(unsigned CPI, unsigned Reloc);
- void emitJumpTableAddress(unsigned JTIndex, unsigned Reloc);
+ intptr_t ACPV = 0) const;
+ void emitExternalSymbolAddress(const char *ES, unsigned Reloc) const;
+ void emitConstPoolAddress(unsigned CPI, unsigned Reloc) const;
+ void emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const;
void emitMachineBasicBlock(MachineBasicBlock *BB, unsigned Reloc,
- intptr_t JTBase = 0);
+ intptr_t JTBase = 0) const;
};
}
@@ -266,9 +419,9 @@ unsigned ARMCodeEmitter::getMovi32Value(const MachineInstr &MI,
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned ARMCodeEmitter::getMachineOpValue(const MachineInstr &MI,
- const MachineOperand &MO) {
+ const MachineOperand &MO) const {
if (MO.isReg())
- return ARMRegisterInfo::getRegisterNumbering(MO.getReg());
+ return getARMRegisterNumbering(MO.getReg());
else if (MO.isImm())
return static_cast<unsigned>(MO.getImm());
else if (MO.isGlobal())
@@ -285,12 +438,8 @@ unsigned ARMCodeEmitter::getMachineOpValue(const MachineInstr &MI,
emitJumpTableAddress(MO.getIndex(), ARM::reloc_arm_relative);
else if (MO.isMBB())
emitMachineBasicBlock(MO.getMBB(), ARM::reloc_arm_branch);
- else {
-#ifndef NDEBUG
- errs() << MO;
-#endif
- llvm_unreachable(0);
- }
+ else
+ llvm_unreachable("Unable to encode MachineOperand!");
return 0;
}
@@ -298,7 +447,7 @@ unsigned ARMCodeEmitter::getMachineOpValue(const MachineInstr &MI,
///
void ARMCodeEmitter::emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
bool MayNeedFarStub, bool Indirect,
- intptr_t ACPV) {
+ intptr_t ACPV) const {
MachineRelocation MR = Indirect
? MachineRelocation::getIndirectSymbol(MCE.getCurrentPCOffset(), Reloc,
const_cast<GlobalValue *>(GV),
@@ -312,7 +461,8 @@ void ARMCodeEmitter::emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
/// emitExternalSymbolAddress - Arrange for the address of an external symbol to
/// be emitted to the current location in the function, and allow it to be PC
/// relative.
-void ARMCodeEmitter::emitExternalSymbolAddress(const char *ES, unsigned Reloc) {
+void ARMCodeEmitter::
+emitExternalSymbolAddress(const char *ES, unsigned Reloc) const {
MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
Reloc, ES));
}
@@ -320,7 +470,7 @@ void ARMCodeEmitter::emitExternalSymbolAddress(const char *ES, unsigned Reloc) {
/// emitConstPoolAddress - Arrange for the address of an constant pool
/// to be emitted to the current location in the function, and allow it to be PC
/// relative.
-void ARMCodeEmitter::emitConstPoolAddress(unsigned CPI, unsigned Reloc) {
+void ARMCodeEmitter::emitConstPoolAddress(unsigned CPI, unsigned Reloc) const {
// Tell JIT emitter we'll resolve the address.
MCE.addRelocation(MachineRelocation::getConstPool(MCE.getCurrentPCOffset(),
Reloc, CPI, 0, true));
@@ -329,14 +479,16 @@ void ARMCodeEmitter::emitConstPoolAddress(unsigned CPI, unsigned Reloc) {
/// emitJumpTableAddress - Arrange for the address of a jump table to
/// be emitted to the current location in the function, and allow it to be PC
/// relative.
-void ARMCodeEmitter::emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) {
+void ARMCodeEmitter::
+emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const {
MCE.addRelocation(MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(),
Reloc, JTIndex, 0, true));
}
/// emitMachineBasicBlock - Emit the specified address basic block.
void ARMCodeEmitter::emitMachineBasicBlock(MachineBasicBlock *BB,
- unsigned Reloc, intptr_t JTBase) {
+ unsigned Reloc,
+ intptr_t JTBase) const {
MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
Reloc, BB, JTBase));
}
@@ -364,6 +516,14 @@ void ARMCodeEmitter::emitInstruction(const MachineInstr &MI) {
llvm_unreachable("Unhandled instruction encoding format!");
break;
}
+ case ARMII::MiscFrm:
+ if (MI.getOpcode() == ARM::LEApcrelJT) {
+ // Materialize jumptable address.
+ emitLEApcrelJTInstruction(MI);
+ break;
+ }
+ llvm_unreachable("Unhandled instruction encoding!");
+ break;
case ARMII::Pseudo:
emitPseudoInstruction(MI);
break;
@@ -418,9 +578,7 @@ void ARMCodeEmitter::emitInstruction(const MachineInstr &MI) {
case ARMII::VFPLdStMulFrm:
emitVFPLoadStoreMultipleInstruction(MI);
break;
- case ARMII::VFPMiscFrm:
- emitMiscInstruction(MI);
- break;
+
// NEON instructions.
case ARMII::NGetLnFrm:
case ARMII::NSetLnFrm:
@@ -488,7 +646,7 @@ void ARMCodeEmitter::emitConstPoolInstruction(const MachineInstr &MI) {
emitGlobalAddress(GV, ARM::reloc_arm_absolute, isa<Function>(GV), false);
emitWordLE(0);
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
- uint32_t Val = *(uint32_t*)CI->getValue().getRawData();
+ uint32_t Val = uint32_t(*CI->getValue().getRawData());
emitWordLE(Val);
} else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
if (CFP->getType()->isFloatTy())
@@ -588,7 +746,7 @@ void ARMCodeEmitter::emitLEApcrelJTInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
// Emit the 'add' instruction.
- unsigned Binary = 0x4 << 21; // add: Insts{24-31} = 0b0100
+ unsigned Binary = 0x4 << 21; // add: Insts{24-21} = 0b0100
// Set the conditional execution predicate
Binary |= II->getPredicate(&MI) << ARMII::CondShift;
@@ -600,7 +758,7 @@ void ARMCodeEmitter::emitLEApcrelJTInstruction(const MachineInstr &MI) {
Binary |= getMachineOpValue(MI, 0) << ARMII::RegRdShift;
// Encode Rn which is PC.
- Binary |= ARMRegisterInfo::getRegisterNumbering(ARM::PC) << ARMII::RegRnShift;
+ Binary |= getARMRegisterNumbering(ARM::PC) << ARMII::RegRnShift;
// Encode the displacement.
Binary |= 1 << ARMII::I_BitShift;
@@ -628,7 +786,7 @@ void ARMCodeEmitter::emitPseudoMoveInstruction(const MachineInstr &MI) {
// Encode the shift operation.
switch (Opcode) {
default: break;
- case ARM::MOVrx:
+ case ARM::RRX:
// rrx
Binary |= 0x6 << 4;
break;
@@ -659,10 +817,10 @@ void ARMCodeEmitter::emitPseudoInstruction(const MachineInstr &MI) {
switch (Opcode) {
default:
llvm_unreachable("ARMCodeEmitter::emitPseudoInstruction");
- case ARM::BX:
- case ARM::BMOVPCRX:
- case ARM::BXr9:
- case ARM::BMOVPCRXr9: {
+ case ARM::BX_CALL:
+ case ARM::BMOVPCRX_CALL:
+ case ARM::BXr9_CALL:
+ case ARM::BMOVPCRXr9_CALL: {
// First emit mov lr, pc
unsigned Binary = 0x01a0e00f;
Binary |= II->getPredicate(&MI) << ARMII::CondShift;
@@ -720,18 +878,18 @@ void ARMCodeEmitter::emitPseudoInstruction(const MachineInstr &MI) {
}
case ARM::MOVi32imm:
- emitMOVi32immInstruction(MI);
- break;
-
- case ARM::MOVi2pieces:
// Two instructions to materialize a constant.
- emitMOVi2piecesInstruction(MI);
+ if (Subtarget->hasV6T2Ops())
+ emitMOVi32immInstruction(MI);
+ else
+ emitMOVi2piecesInstruction(MI);
break;
+
case ARM::LEApcrelJT:
// Materialize jumptable address.
emitLEApcrelJTInstruction(MI);
break;
- case ARM::MOVrx:
+ case ARM::RRX:
case ARM::MOVsrl_flag:
case ARM::MOVsra_flag:
emitPseudoMoveInstruction(MI);
@@ -789,8 +947,7 @@ unsigned ARMCodeEmitter::getMachineSoRegOpValue(const MachineInstr &MI,
if (Rs) {
// Encode Rs bit[11:8].
assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0);
- return Binary |
- (ARMRegisterInfo::getRegisterNumbering(Rs) << ARMII::RegRsShift);
+ return Binary | (getARMRegisterNumbering(Rs) << ARMII::RegRsShift);
}
// Encode shift_imm bit[11:7].
@@ -841,8 +998,7 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
else if (ImplicitRd)
// Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRd)
- << ARMII::RegRdShift);
+ Binary |= (getARMRegisterNumbering(ImplicitRd) << ARMII::RegRdShift);
if (TID.Opcode == ARM::MOVi16) {
// Get immediate from MI.
@@ -892,8 +1048,7 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
if (!isUnary) {
if (ImplicitRn)
// Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRn)
- << ARMII::RegRnShift);
+ Binary |= (getARMRegisterNumbering(ImplicitRn) << ARMII::RegRnShift);
else {
Binary |= getMachineOpValue(MI, OpIdx) << ARMII::RegRnShift;
++OpIdx;
@@ -910,7 +1065,7 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
if (MO.isReg()) {
// Encode register Rm.
- emitWordLE(Binary | ARMRegisterInfo::getRegisterNumbering(MO.getReg()));
+ emitWordLE(Binary | getARMRegisterNumbering(MO.getReg()));
return;
}
@@ -930,6 +1085,13 @@ void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI,
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
+ // If this is an LDRi12, STRi12 or LDRcp, nothing more needs be done.
+ if (MI.getOpcode() == ARM::LDRi12 || MI.getOpcode() == ARM::LDRcp ||
+ MI.getOpcode() == ARM::STRi12) {
+ emitWordLE(Binary);
+ return;
+ }
+
// Set the conditional execution predicate
Binary |= II->getPredicate(&MI) << ARMII::CondShift;
@@ -946,16 +1108,14 @@ void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI,
// Set first operand
if (ImplicitRd)
// Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRd)
- << ARMII::RegRdShift);
+ Binary |= (getARMRegisterNumbering(ImplicitRd) << ARMII::RegRdShift);
else
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
// Set second operand
if (ImplicitRn)
// Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRn)
- << ARMII::RegRnShift);
+ Binary |= (getARMRegisterNumbering(ImplicitRn) << ARMII::RegRnShift);
else
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
@@ -978,11 +1138,11 @@ void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI,
return;
}
- // Set bit I(25), because this is not in immediate enconding.
+ // Set bit I(25), because this is not in immediate encoding.
Binary |= 1 << ARMII::I_BitShift;
assert(TargetRegisterInfo::isPhysicalRegister(MO2.getReg()));
// Set bit[3:0] to the corresponding Rm register
- Binary |= ARMRegisterInfo::getRegisterNumbering(MO2.getReg());
+ Binary |= getARMRegisterNumbering(MO2.getReg());
// If this instr is in scaled register offset/index instruction, set
// shift_immed(bit[11:7]) and shift(bit[6:5]) fields.
@@ -1026,8 +1186,7 @@ void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI,
// Set second operand
if (ImplicitRn)
// Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRn)
- << ARMII::RegRnShift);
+ Binary |= (getARMRegisterNumbering(ImplicitRn) << ARMII::RegRnShift);
else
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
@@ -1046,7 +1205,7 @@ void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI,
// If this instr is in register offset/index encoding, set bit[3:0]
// to the corresponding Rm register.
if (MO2.getReg()) {
- Binary |= ARMRegisterInfo::getRegisterNumbering(MO2.getReg());
+ Binary |= getARMRegisterNumbering(MO2.getReg());
emitWordLE(Binary);
return;
}
@@ -1100,8 +1259,8 @@ void ARMCodeEmitter::emitLoadStoreMultipleInstruction(const MachineInstr &MI) {
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
// Set addressing mode by modifying bits U(23) and P(24)
- const MachineOperand &MO = MI.getOperand(OpIdx++);
- Binary |= getAddrModeUPBits(ARM_AM::getAM4SubMode(MO.getImm()));
+ ARM_AM::AMSubMode Mode = ARM_AM::getLoadStoreMultipleSubMode(MI.getOpcode());
+ Binary |= getAddrModeUPBits(ARM_AM::getAM4SubMode(Mode));
// Set bit W(21)
if (IsUpdating)
@@ -1112,7 +1271,7 @@ void ARMCodeEmitter::emitLoadStoreMultipleInstruction(const MachineInstr &MI) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.isImplicit())
break;
- unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(MO.getReg());
+ unsigned RegNum = getARMRegisterNumbering(MO.getReg());
assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
RegNum < 16);
Binary |= 0x1 << RegNum;
@@ -1349,7 +1508,7 @@ void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) {
if (TID.Opcode == ARM::BX_RET || TID.Opcode == ARM::MOVPCLR)
// The return register is LR.
- Binary |= ARMRegisterInfo::getRegisterNumbering(ARM::LR);
+ Binary |= getARMRegisterNumbering(ARM::LR);
else
// otherwise, set the return register
Binary |= getMachineOpValue(MI, 0);
@@ -1360,8 +1519,8 @@ void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) {
static unsigned encodeVFPRd(const MachineInstr &MI, unsigned OpIdx) {
unsigned RegD = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- bool isSPVFP = false;
- RegD = ARMRegisterInfo::getRegisterNumbering(RegD, &isSPVFP);
+ bool isSPVFP = ARM::SPRRegisterClass->contains(RegD);
+ RegD = getARMRegisterNumbering(RegD);
if (!isSPVFP)
Binary |= RegD << ARMII::RegRdShift;
else {
@@ -1374,8 +1533,8 @@ static unsigned encodeVFPRd(const MachineInstr &MI, unsigned OpIdx) {
static unsigned encodeVFPRn(const MachineInstr &MI, unsigned OpIdx) {
unsigned RegN = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- bool isSPVFP = false;
- RegN = ARMRegisterInfo::getRegisterNumbering(RegN, &isSPVFP);
+ bool isSPVFP = ARM::SPRRegisterClass->contains(RegN);
+ RegN = getARMRegisterNumbering(RegN);
if (!isSPVFP)
Binary |= RegN << ARMII::RegRnShift;
else {
@@ -1388,8 +1547,8 @@ static unsigned encodeVFPRn(const MachineInstr &MI, unsigned OpIdx) {
static unsigned encodeVFPRm(const MachineInstr &MI, unsigned OpIdx) {
unsigned RegM = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- bool isSPVFP = false;
- RegM = ARMRegisterInfo::getRegisterNumbering(RegM, &isSPVFP);
+ bool isSPVFP = ARM::SPRRegisterClass->contains(RegM);
+ RegM = getARMRegisterNumbering(RegM);
if (!isSPVFP)
Binary |= RegM;
else {
@@ -1548,8 +1707,8 @@ ARMCodeEmitter::emitVFPLoadStoreMultipleInstruction(const MachineInstr &MI) {
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
// Set addressing mode by modifying bits U(23) and P(24)
- const MachineOperand &MO = MI.getOperand(OpIdx++);
- Binary |= getAddrModeUPBits(ARM_AM::getAM4SubMode(MO.getImm()));
+ ARM_AM::AMSubMode Mode = ARM_AM::getLoadStoreMultipleSubMode(MI.getOpcode());
+ Binary |= getAddrModeUPBits(ARM_AM::getAM4SubMode(Mode));
// Set bit W(21)
if (IsUpdating)
@@ -1576,63 +1735,10 @@ ARMCodeEmitter::emitVFPLoadStoreMultipleInstruction(const MachineInstr &MI) {
emitWordLE(Binary);
}
-void ARMCodeEmitter::emitMiscInstruction(const MachineInstr &MI) {
- unsigned Opcode = MI.getDesc().Opcode;
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- switch(Opcode) {
- default:
- llvm_unreachable("ARMCodeEmitter::emitMiscInstruction");
-
- case ARM::FMSTAT:
- // No further encoding needed.
- break;
-
- case ARM::VMRS:
- case ARM::VMSR: {
- const MachineOperand &MO0 = MI.getOperand(0);
- // Encode Rt.
- Binary |= ARMRegisterInfo::getRegisterNumbering(MO0.getReg())
- << ARMII::RegRdShift;
- break;
- }
-
- case ARM::FCONSTD:
- case ARM::FCONSTS: {
- // Encode Dd / Sd.
- Binary |= encodeVFPRd(MI, 0);
-
- // Encode imm., Table A7-18 VFP modified immediate constants
- const MachineOperand &MO1 = MI.getOperand(1);
- unsigned Imm = static_cast<unsigned>(MO1.getFPImm()->getValueAPF()
- .bitcastToAPInt().getHiBits(32).getLimitedValue());
- unsigned ModifiedImm;
-
- if(Opcode == ARM::FCONSTS)
- ModifiedImm = (Imm & 0x80000000) >> 24 | // a
- (Imm & 0x03F80000) >> 19; // bcdefgh
- else // Opcode == ARM::FCONSTD
- ModifiedImm = (Imm & 0x80000000) >> 24 | // a
- (Imm & 0x007F0000) >> 16; // bcdefgh
-
- // Insts{19-16} = abcd, Insts{3-0} = efgh
- Binary |= ((ModifiedImm & 0xF0) >> 4) << 16;
- Binary |= (ModifiedImm & 0xF);
- break;
- }
- }
-
- emitWordLE(Binary);
-}
-
static unsigned encodeNEONRd(const MachineInstr &MI, unsigned OpIdx) {
unsigned RegD = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- RegD = ARMRegisterInfo::getRegisterNumbering(RegD);
+ RegD = getARMRegisterNumbering(RegD);
Binary |= (RegD & 0xf) << ARMII::RegRdShift;
Binary |= ((RegD >> 4) & 1) << ARMII::D_BitShift;
return Binary;
@@ -1641,7 +1747,7 @@ static unsigned encodeNEONRd(const MachineInstr &MI, unsigned OpIdx) {
static unsigned encodeNEONRn(const MachineInstr &MI, unsigned OpIdx) {
unsigned RegN = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- RegN = ARMRegisterInfo::getRegisterNumbering(RegN);
+ RegN = getARMRegisterNumbering(RegN);
Binary |= (RegN & 0xf) << ARMII::RegRnShift;
Binary |= ((RegN >> 4) & 1) << ARMII::N_BitShift;
return Binary;
@@ -1650,7 +1756,7 @@ static unsigned encodeNEONRn(const MachineInstr &MI, unsigned OpIdx) {
static unsigned encodeNEONRm(const MachineInstr &MI, unsigned OpIdx) {
unsigned RegM = MI.getOperand(OpIdx).getReg();
unsigned Binary = 0;
- RegM = ARMRegisterInfo::getRegisterNumbering(RegM);
+ RegM = getARMRegisterNumbering(RegM);
Binary |= (RegM & 0xf);
Binary |= ((RegM >> 4) & 1) << ARMII::M_BitShift;
return Binary;
@@ -1684,7 +1790,7 @@ void ARMCodeEmitter::emitNEONLaneInstruction(const MachineInstr &MI) {
Binary |= (IsThumb ? ARMCC::AL : II->getPredicate(&MI)) << ARMII::CondShift;
unsigned RegT = MI.getOperand(RegTOpIdx).getReg();
- RegT = ARMRegisterInfo::getRegisterNumbering(RegT);
+ RegT = getARMRegisterNumbering(RegT);
Binary |= (RegT << ARMII::RegRdShift);
Binary |= encodeNEONRn(MI, RegNOpIdx);
@@ -1713,7 +1819,7 @@ void ARMCodeEmitter::emitNEONDupInstruction(const MachineInstr &MI) {
Binary |= (IsThumb ? ARMCC::AL : II->getPredicate(&MI)) << ARMII::CondShift;
unsigned RegT = MI.getOperand(1).getReg();
- RegT = ARMRegisterInfo::getRegisterNumbering(RegT);
+ RegT = getARMRegisterNumbering(RegT);
Binary |= (RegT << ARMII::RegRdShift);
Binary |= encodeNEONRn(MI, 0);
emitWordLE(Binary);
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 60e923b..13d1b33 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -1,4 +1,4 @@
-//===-- ARMConstantIslandPass.cpp - ARM constant islands --------*- C++ -*-===//
+//===-- ARMConstantIslandPass.cpp - ARM constant islands ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -316,7 +316,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
}
/// The next UID to take is the first unused one.
- AFI->initConstPoolEntryUId(CPEMIs.size());
+ AFI->initPICLabelUId(CPEMIs.size());
// Do the initial scan of the function, building up information about the
// sizes of each block, the location of all the water, and finding all of the
@@ -327,7 +327,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
/// Remove dead constant pool entries.
- RemoveUnusedCPEntries();
+ MadeChange |= RemoveUnusedCPEntries();
// Iteratively place constant pool entries and fix up branches until there
// is no change.
@@ -368,6 +368,14 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
MadeChange |= UndoLRSpillRestore();
+ // Save the mapping between original and cloned constpool entries.
+ for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
+ for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
+ const CPEntry & CPE = CPEntries[i][j];
+ AFI->recordCPEClone(i, CPE.CPI);
+ }
+ }
+
DEBUG(errs() << '\n'; dumpBBs());
BBSizes.clear();
@@ -482,7 +490,7 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
HasInlineAsm = true;
}
- // Now go back through the instructions and build up our data structures
+ // Now go back through the instructions and build up our data structures.
unsigned Offset = 0;
for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
MBBI != E; ++MBBI) {
@@ -603,7 +611,7 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
Scale = 4;
break;
- case ARM::LDR:
+ case ARM::LDRi12:
case ARM::LDRcp:
case ARM::t2LDRpci:
Bits = 12; // +-offset_12
@@ -611,7 +619,6 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
break;
case ARM::tLDRpci:
- case ARM::tLDRcp:
Bits = 8;
Scale = 4; // +(offset_8*4)
break;
@@ -692,7 +699,7 @@ static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
/// machine function, it upsets all of the block numbers. Renumber the blocks
/// and update the arrays that parallel this numbering.
void ARMConstantIslands::UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
- // Renumber the MBB's to keep them consequtive.
+ // Renumber the MBB's to keep them consecutive.
NewBB->getParent()->RenumberBlocks(NewBB);
// Insert a size into BBSizes to align it properly with the (newly
@@ -1242,7 +1249,7 @@ bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
// No existing clone of this CPE is within range.
// We will be generating a new clone. Get a UID for it.
- unsigned ID = AFI->createConstPoolEntryUId();
+ unsigned ID = AFI->createPICLabelUId();
// Look for water where we can place this CPE.
MachineBasicBlock *NewIsland = MF.CreateMachineBasicBlock();
@@ -1644,7 +1651,7 @@ bool ARMConstantIslands::OptimizeThumb2Branches(MachineFunction &MF) {
unsigned DestOffset = BBOffsets[DestBB->getNumber()];
if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) {
MachineBasicBlock::iterator CmpMI = Br.MI; --CmpMI;
- if (CmpMI->getOpcode() == ARM::tCMPzi8) {
+ if (CmpMI->getOpcode() == ARM::tCMPi8) {
unsigned Reg = CmpMI->getOperand(0).getReg();
Pred = llvm::getInstrPredicate(CmpMI, PredReg);
if (Pred == ARMCC::AL &&
@@ -1766,7 +1773,7 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
if (!OptOk)
continue;
- unsigned Opc = ByteOk ? ARM::t2TBB : ARM::t2TBH;
+ unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
MachineInstr *NewJTMI = BuildMI(MBB, MI->getDebugLoc(), TII->get(Opc))
.addReg(IdxReg, getKillRegState(IdxRegKill))
.addJumpTableIndex(JTI, JTOP.getTargetFlags())
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
index f13ccc6..165a1d8 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
@@ -24,7 +24,7 @@ using namespace llvm;
ARMConstantPoolValue::ARMConstantPoolValue(const Constant *cval, unsigned id,
ARMCP::ARMCPKind K,
unsigned char PCAdj,
- const char *Modif,
+ ARMCP::ARMCPModifier Modif,
bool AddCA)
: MachineConstantPoolValue((const Type*)cval->getType()),
CVal(cval), S(NULL), LabelId(id), Kind(K), PCAdjust(PCAdj),
@@ -33,17 +33,17 @@ ARMConstantPoolValue::ARMConstantPoolValue(const Constant *cval, unsigned id,
ARMConstantPoolValue::ARMConstantPoolValue(LLVMContext &C,
const char *s, unsigned id,
unsigned char PCAdj,
- const char *Modif,
+ ARMCP::ARMCPModifier Modif,
bool AddCA)
: MachineConstantPoolValue((const Type*)Type::getInt32Ty(C)),
CVal(NULL), S(strdup(s)), LabelId(id), Kind(ARMCP::CPExtSymbol),
PCAdjust(PCAdj), Modifier(Modif), AddCurrentAddress(AddCA) {}
ARMConstantPoolValue::ARMConstantPoolValue(const GlobalValue *gv,
- const char *Modif)
+ ARMCP::ARMCPModifier Modif)
: MachineConstantPoolValue((const Type*)Type::getInt32Ty(gv->getContext())),
CVal(gv), S(NULL), LabelId(0), Kind(ARMCP::CPValue), PCAdjust(0),
- Modifier(Modif) {}
+ Modifier(Modif), AddCurrentAddress(false) {}
const GlobalValue *ARMConstantPoolValue::getGV() const {
return dyn_cast_or_null<GlobalValue>(CVal);
@@ -53,6 +53,14 @@ const BlockAddress *ARMConstantPoolValue::getBlockAddress() const {
return dyn_cast_or_null<BlockAddress>(CVal);
}
+static bool CPV_streq(const char *S1, const char *S2) {
+ if (S1 == S2)
+ return true;
+ if (S1 && S2 && strcmp(S1, S2) == 0)
+ return true;
+ return false;
+}
+
int ARMConstantPoolValue::getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment) {
unsigned AlignMask = Alignment - 1;
@@ -65,8 +73,8 @@ int ARMConstantPoolValue::getExistingMachineCPValue(MachineConstantPool *CP,
if (CPV->CVal == CVal &&
CPV->LabelId == LabelId &&
CPV->PCAdjust == PCAdjust &&
- (CPV->S == S || strcmp(CPV->S, S) == 0) &&
- (CPV->Modifier == Modifier || strcmp(CPV->Modifier, Modifier) == 0))
+ CPV_streq(CPV->S, S) &&
+ CPV->Modifier == Modifier)
return i;
}
}
@@ -91,8 +99,8 @@ ARMConstantPoolValue::hasSameValue(ARMConstantPoolValue *ACPV) {
if (ACPV->Kind == Kind &&
ACPV->CVal == CVal &&
ACPV->PCAdjust == PCAdjust &&
- (ACPV->S == S || strcmp(ACPV->S, S) == 0) &&
- (ACPV->Modifier == Modifier || strcmp(ACPV->Modifier, Modifier) == 0)) {
+ CPV_streq(ACPV->S, S) &&
+ ACPV->Modifier == Modifier) {
if (ACPV->LabelId == LabelId)
return true;
// Two PC relative constpool entries containing the same GV address or
@@ -113,7 +121,7 @@ void ARMConstantPoolValue::print(raw_ostream &O) const {
O << CVal->getName();
else
O << S;
- if (Modifier) O << "(" << Modifier << ")";
+ if (Modifier) O << "(" << getModifierText() << ")";
if (PCAdjust != 0) {
O << "-(LPC" << LabelId << "+" << (unsigned)PCAdjust;
if (AddCurrentAddress) O << "-.";
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
index 3119b54..d008811 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
@@ -15,6 +15,7 @@
#define LLVM_TARGET_ARM_CONSTANTPOOLVALUE_H
#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cstddef>
namespace llvm {
@@ -31,6 +32,15 @@ namespace ARMCP {
CPBlockAddress,
CPLSDA
};
+
+ enum ARMCPModifier {
+ no_modifier,
+ TLSGD,
+ GOT,
+ GOTOFF,
+ GOTTPOFF,
+ TPOFF
+ };
}
/// ARMConstantPoolValue - ARM specific constantpool value. This is used to
@@ -43,26 +53,41 @@ class ARMConstantPoolValue : public MachineConstantPoolValue {
ARMCP::ARMCPKind Kind; // Kind of constant.
unsigned char PCAdjust; // Extra adjustment if constantpool is pc-relative.
// 8 for ARM, 4 for Thumb.
- const char *Modifier; // GV modifier i.e. (&GV(modifier)-(LPIC+8))
+ ARMCP::ARMCPModifier Modifier; // GV modifier i.e. (&GV(modifier)-(LPIC+8))
bool AddCurrentAddress;
public:
ARMConstantPoolValue(const Constant *cval, unsigned id,
ARMCP::ARMCPKind Kind = ARMCP::CPValue,
- unsigned char PCAdj = 0, const char *Modifier = NULL,
+ unsigned char PCAdj = 0,
+ ARMCP::ARMCPModifier Modifier = ARMCP::no_modifier,
bool AddCurrentAddress = false);
ARMConstantPoolValue(LLVMContext &C, const char *s, unsigned id,
- unsigned char PCAdj = 0, const char *Modifier = NULL,
+ unsigned char PCAdj = 0,
+ ARMCP::ARMCPModifier Modifier = ARMCP::no_modifier,
bool AddCurrentAddress = false);
- ARMConstantPoolValue(const GlobalValue *GV, const char *Modifier);
+ ARMConstantPoolValue(const GlobalValue *GV, ARMCP::ARMCPModifier Modifier);
ARMConstantPoolValue();
~ARMConstantPoolValue();
const GlobalValue *getGV() const;
const char *getSymbol() const { return S; }
const BlockAddress *getBlockAddress() const;
- const char *getModifier() const { return Modifier; }
- bool hasModifier() const { return Modifier != NULL; }
+ ARMCP::ARMCPModifier getModifier() const { return Modifier; }
+ const char *getModifierText() const {
+ switch (Modifier) {
+ default: llvm_unreachable("Unknown modifier!");
+ // FIXME: Are these case sensitive? It'd be nice to lower-case all the
+ // strings if that's legal.
+ case ARMCP::no_modifier: return "none";
+ case ARMCP::TLSGD: return "tlsgd";
+ case ARMCP::GOT: return "GOT";
+ case ARMCP::GOTOFF: return "GOTOFF";
+ case ARMCP::GOTTPOFF: return "gottpoff";
+ case ARMCP::TPOFF: return "tpoff";
+ }
+ }
+ bool hasModifier() const { return Modifier != ARMCP::no_modifier; }
bool mustAddCurrentAddress() const { return AddCurrentAddress; }
unsigned getLabelId() const { return LabelId; }
unsigned char getPCAdjustment() const { return PCAdjust; }
@@ -71,11 +96,7 @@ public:
bool isBlockAddress() { return Kind == ARMCP::CPBlockAddress; }
bool isLSDA() { return Kind == ARMCP::CPLSDA; }
- virtual unsigned getRelocationInfo() const {
- // FIXME: This is conservatively claiming that these entries require a
- // relocation, we may be able to do better than this.
- return 2;
- }
+ virtual unsigned getRelocationInfo() const { return 2; }
virtual int getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment);
diff --git a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp
new file mode 100644
index 0000000..51e68b4
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp
@@ -0,0 +1,83 @@
+//===-- ARMELFWriterInfo.cpp - ELF Writer Info for the ARM backend --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ELF writer information for the ARM backend.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARMELFWriterInfo.h"
+#include "ARMRelocations.h"
+#include "llvm/Function.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/ELF.h"
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Implementation of the ARMELFWriterInfo class
+//===----------------------------------------------------------------------===//
+
+ARMELFWriterInfo::ARMELFWriterInfo(TargetMachine &TM)
+ : TargetELFWriterInfo(TM.getTargetData()->getPointerSizeInBits() == 64,
+ TM.getTargetData()->isLittleEndian()) {
+}
+
+ARMELFWriterInfo::~ARMELFWriterInfo() {}
+
+unsigned ARMELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
+ switch (MachineRelTy) {
+ case ARM::reloc_arm_absolute:
+ case ARM::reloc_arm_relative:
+ case ARM::reloc_arm_cp_entry:
+ case ARM::reloc_arm_vfp_cp_entry:
+ case ARM::reloc_arm_machine_cp_entry:
+ case ARM::reloc_arm_jt_base:
+ case ARM::reloc_arm_pic_jt:
+ assert(0 && "unsupported ARM relocation type"); break;
+
+ case ARM::reloc_arm_branch: return ELF::R_ARM_CALL; break;
+ case ARM::reloc_arm_movt: return ELF::R_ARM_MOVT_ABS; break;
+ case ARM::reloc_arm_movw: return ELF::R_ARM_MOVW_ABS_NC; break;
+ default:
+ llvm_unreachable("unknown ARM relocation type"); break;
+ }
+ return 0;
+}
+
+long int ARMELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
+ long int Modifier) const {
+ assert(0 && "ARMELFWriterInfo::getDefaultAddendForRelTy() not implemented");
+ return 0;
+}
+
+unsigned ARMELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
+ assert(0 && "ARMELFWriterInfo::getRelocationTySize() not implemented");
+ return 0;
+}
+
+bool ARMELFWriterInfo::isPCRelativeRel(unsigned RelTy) const {
+ assert(0 && "ARMELFWriterInfo::isPCRelativeRel() not implemented");
+ return 1;
+}
+
+unsigned ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() const {
+ assert(0 &&
+ "ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not implemented");
+ return 0;
+}
+
+long int ARMELFWriterInfo::computeRelocation(unsigned SymOffset,
+ unsigned RelOffset,
+ unsigned RelTy) const {
+ assert(0 &&
+ "ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not implemented");
+ return 0;
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h
new file mode 100644
index 0000000..1c4e532
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h
@@ -0,0 +1,58 @@
+//===-- ARMELFWriterInfo.h - ELF Writer Info for ARM ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ELF writer information for the ARM backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARM_ELF_WRITER_INFO_H
+#define ARM_ELF_WRITER_INFO_H
+
+#include "llvm/Target/TargetELFWriterInfo.h"
+
+namespace llvm {
+
+ class ARMELFWriterInfo : public TargetELFWriterInfo {
+ public:
+ ARMELFWriterInfo(TargetMachine &TM);
+ virtual ~ARMELFWriterInfo();
+
+ /// getRelocationType - Returns the target specific ELF Relocation type.
+ /// 'MachineRelTy' contains the object code independent relocation type
+ virtual unsigned getRelocationType(unsigned MachineRelTy) const;
+
+ /// hasRelocationAddend - True if the target uses an addend in the
+ /// ELF relocation entry.
+ virtual bool hasRelocationAddend() const { return false; }
+
+ /// getDefaultAddendForRelTy - Gets the default addend value for a
+ /// relocation entry based on the target ELF relocation type.
+ virtual long int getDefaultAddendForRelTy(unsigned RelTy,
+ long int Modifier = 0) const;
+
+ /// getRelTySize - Returns the size of relocatable field in bits
+ virtual unsigned getRelocationTySize(unsigned RelTy) const;
+
+ /// isPCRelativeRel - True if the relocation type is pc relative
+ virtual bool isPCRelativeRel(unsigned RelTy) const;
+
+ /// getJumpTableRelocationTy - Returns the machine relocation type used
+ /// to reference a jumptable.
+ virtual unsigned getAbsoluteLabelMachineRelTy() const;
+
+ /// computeRelocation - Some relocatable fields could be relocated
+ /// directly, avoiding the relocation symbol emission, compute the
+ /// final relocation value for this symbol.
+ virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset,
+ unsigned RelTy) const;
+ };
+
+} // end llvm namespace
+
+#endif // ARM_ELF_WRITER_INFO_H
diff --git a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index fc2e3c3..bd753d2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -7,36 +7,38 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains a pass that expand pseudo instructions into target
+// This file contains a pass that expands pseudo instructions into target
// instructions to allow proper scheduling, if-conversion, and other late
// optimizations. This pass should be run after register allocation but before
-// post- regalloc scheduling pass.
+// the post-regalloc scheduling pass.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "arm-pseudo"
#include "ARM.h"
+#include "ARMAddressingModes.h"
#include "ARMBaseInstrInfo.h"
+#include "ARMBaseRegisterInfo.h"
+#include "ARMMachineFunctionInfo.h"
+#include "ARMRegisterInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/raw_ostream.h" // FIXME: for debug only. remove!
using namespace llvm;
namespace {
class ARMExpandPseudo : public MachineFunctionPass {
- // Constants for register spacing in NEON load/store instructions.
- enum NEONRegSpacing {
- SingleSpc,
- EvenDblSpc,
- OddDblSpc
- };
-
public:
static char ID;
ARMExpandPseudo() : MachineFunctionPass(ID) {}
- const TargetInstrInfo *TII;
+ const ARMBaseInstrInfo *TII;
const TargetRegisterInfo *TRI;
+ const ARMSubtarget *STI;
+ ARMFunctionInfo *AFI;
virtual bool runOnMachineFunction(MachineFunction &Fn);
@@ -47,11 +49,16 @@ namespace {
private:
void TransferImpOps(MachineInstr &OldMI,
MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI);
+ bool ExpandMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI);
bool ExpandMBB(MachineBasicBlock &MBB);
- void ExpandVLD(MachineBasicBlock::iterator &MBBI, unsigned Opc,
- bool hasWriteBack, NEONRegSpacing RegSpc, unsigned NumRegs);
- void ExpandVST(MachineBasicBlock::iterator &MBBI, unsigned Opc,
- bool hasWriteBack, NEONRegSpacing RegSpc, unsigned NumRegs);
+ void ExpandVLD(MachineBasicBlock::iterator &MBBI);
+ void ExpandVST(MachineBasicBlock::iterator &MBBI);
+ void ExpandLaneOp(MachineBasicBlock::iterator &MBBI);
+ void ExpandVTBL(MachineBasicBlock::iterator &MBBI,
+ unsigned Opc, bool IsExt, unsigned NumRegs);
+ void ExpandMOV32BitImm(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI);
};
char ARMExpandPseudo::ID = 0;
}
@@ -67,44 +74,349 @@ void ARMExpandPseudo::TransferImpOps(MachineInstr &OldMI,
const MachineOperand &MO = OldMI.getOperand(i);
assert(MO.isReg() && MO.getReg());
if (MO.isUse())
- UseMI.addReg(MO.getReg(), getKillRegState(MO.isKill()));
+ UseMI.addOperand(MO);
else
- DefMI.addReg(MO.getReg(),
- getDefRegState(true) | getDeadRegState(MO.isDead()));
+ DefMI.addOperand(MO);
+ }
+}
+
+namespace {
+ // Constants for register spacing in NEON load/store instructions.
+ // For quad-register load-lane and store-lane pseudo instructors, the
+ // spacing is initially assumed to be EvenDblSpc, and that is changed to
+ // OddDblSpc depending on the lane number operand.
+ enum NEONRegSpacing {
+ SingleSpc,
+ EvenDblSpc,
+ OddDblSpc
+ };
+
+ // Entries for NEON load/store information table. The table is sorted by
+ // PseudoOpc for fast binary-search lookups.
+ struct NEONLdStTableEntry {
+ unsigned PseudoOpc;
+ unsigned RealOpc;
+ bool IsLoad;
+ bool HasWriteBack;
+ NEONRegSpacing RegSpacing;
+ unsigned char NumRegs; // D registers loaded or stored
+ unsigned char RegElts; // elements per D register; used for lane ops
+
+ // Comparison methods for binary search of the table.
+ bool operator<(const NEONLdStTableEntry &TE) const {
+ return PseudoOpc < TE.PseudoOpc;
+ }
+ friend bool operator<(const NEONLdStTableEntry &TE, unsigned PseudoOpc) {
+ return TE.PseudoOpc < PseudoOpc;
+ }
+ friend bool LLVM_ATTRIBUTE_UNUSED operator<(unsigned PseudoOpc,
+ const NEONLdStTableEntry &TE) {
+ return PseudoOpc < TE.PseudoOpc;
+ }
+ };
+}
+
+static const NEONLdStTableEntry NEONLdStTable[] = {
+{ ARM::VLD1DUPq16Pseudo, ARM::VLD1DUPq16, true, false, SingleSpc, 2, 4},
+{ ARM::VLD1DUPq16Pseudo_UPD, ARM::VLD1DUPq16_UPD, true, true, SingleSpc, 2, 4},
+{ ARM::VLD1DUPq32Pseudo, ARM::VLD1DUPq32, true, false, SingleSpc, 2, 2},
+{ ARM::VLD1DUPq32Pseudo_UPD, ARM::VLD1DUPq32_UPD, true, true, SingleSpc, 2, 2},
+{ ARM::VLD1DUPq8Pseudo, ARM::VLD1DUPq8, true, false, SingleSpc, 2, 8},
+{ ARM::VLD1DUPq8Pseudo_UPD, ARM::VLD1DUPq8_UPD, true, true, SingleSpc, 2, 8},
+
+{ ARM::VLD1LNq16Pseudo, ARM::VLD1LNd16, true, false, EvenDblSpc, 1, 4 },
+{ ARM::VLD1LNq16Pseudo_UPD, ARM::VLD1LNd16_UPD, true, true, EvenDblSpc, 1, 4 },
+{ ARM::VLD1LNq32Pseudo, ARM::VLD1LNd32, true, false, EvenDblSpc, 1, 2 },
+{ ARM::VLD1LNq32Pseudo_UPD, ARM::VLD1LNd32_UPD, true, true, EvenDblSpc, 1, 2 },
+{ ARM::VLD1LNq8Pseudo, ARM::VLD1LNd8, true, false, EvenDblSpc, 1, 8 },
+{ ARM::VLD1LNq8Pseudo_UPD, ARM::VLD1LNd8_UPD, true, true, EvenDblSpc, 1, 8 },
+
+{ ARM::VLD1d64QPseudo, ARM::VLD1d64Q, true, false, SingleSpc, 4, 1 },
+{ ARM::VLD1d64QPseudo_UPD, ARM::VLD1d64Q_UPD, true, true, SingleSpc, 4, 1 },
+{ ARM::VLD1d64TPseudo, ARM::VLD1d64T, true, false, SingleSpc, 3, 1 },
+{ ARM::VLD1d64TPseudo_UPD, ARM::VLD1d64T_UPD, true, true, SingleSpc, 3, 1 },
+
+{ ARM::VLD1q16Pseudo, ARM::VLD1q16, true, false, SingleSpc, 2, 4 },
+{ ARM::VLD1q16Pseudo_UPD, ARM::VLD1q16_UPD, true, true, SingleSpc, 2, 4 },
+{ ARM::VLD1q32Pseudo, ARM::VLD1q32, true, false, SingleSpc, 2, 2 },
+{ ARM::VLD1q32Pseudo_UPD, ARM::VLD1q32_UPD, true, true, SingleSpc, 2, 2 },
+{ ARM::VLD1q64Pseudo, ARM::VLD1q64, true, false, SingleSpc, 2, 1 },
+{ ARM::VLD1q64Pseudo_UPD, ARM::VLD1q64_UPD, true, true, SingleSpc, 2, 1 },
+{ ARM::VLD1q8Pseudo, ARM::VLD1q8, true, false, SingleSpc, 2, 8 },
+{ ARM::VLD1q8Pseudo_UPD, ARM::VLD1q8_UPD, true, true, SingleSpc, 2, 8 },
+
+{ ARM::VLD2DUPd16Pseudo, ARM::VLD2DUPd16, true, false, SingleSpc, 2, 4},
+{ ARM::VLD2DUPd16Pseudo_UPD, ARM::VLD2DUPd16_UPD, true, true, SingleSpc, 2, 4},
+{ ARM::VLD2DUPd32Pseudo, ARM::VLD2DUPd32, true, false, SingleSpc, 2, 2},
+{ ARM::VLD2DUPd32Pseudo_UPD, ARM::VLD2DUPd32_UPD, true, true, SingleSpc, 2, 2},
+{ ARM::VLD2DUPd8Pseudo, ARM::VLD2DUPd8, true, false, SingleSpc, 2, 8},
+{ ARM::VLD2DUPd8Pseudo_UPD, ARM::VLD2DUPd8_UPD, true, true, SingleSpc, 2, 8},
+
+{ ARM::VLD2LNd16Pseudo, ARM::VLD2LNd16, true, false, SingleSpc, 2, 4 },
+{ ARM::VLD2LNd16Pseudo_UPD, ARM::VLD2LNd16_UPD, true, true, SingleSpc, 2, 4 },
+{ ARM::VLD2LNd32Pseudo, ARM::VLD2LNd32, true, false, SingleSpc, 2, 2 },
+{ ARM::VLD2LNd32Pseudo_UPD, ARM::VLD2LNd32_UPD, true, true, SingleSpc, 2, 2 },
+{ ARM::VLD2LNd8Pseudo, ARM::VLD2LNd8, true, false, SingleSpc, 2, 8 },
+{ ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd8_UPD, true, true, SingleSpc, 2, 8 },
+{ ARM::VLD2LNq16Pseudo, ARM::VLD2LNq16, true, false, EvenDblSpc, 2, 4 },
+{ ARM::VLD2LNq16Pseudo_UPD, ARM::VLD2LNq16_UPD, true, true, EvenDblSpc, 2, 4 },
+{ ARM::VLD2LNq32Pseudo, ARM::VLD2LNq32, true, false, EvenDblSpc, 2, 2 },
+{ ARM::VLD2LNq32Pseudo_UPD, ARM::VLD2LNq32_UPD, true, true, EvenDblSpc, 2, 2 },
+
+{ ARM::VLD2d16Pseudo, ARM::VLD2d16, true, false, SingleSpc, 2, 4 },
+{ ARM::VLD2d16Pseudo_UPD, ARM::VLD2d16_UPD, true, true, SingleSpc, 2, 4 },
+{ ARM::VLD2d32Pseudo, ARM::VLD2d32, true, false, SingleSpc, 2, 2 },
+{ ARM::VLD2d32Pseudo_UPD, ARM::VLD2d32_UPD, true, true, SingleSpc, 2, 2 },
+{ ARM::VLD2d8Pseudo, ARM::VLD2d8, true, false, SingleSpc, 2, 8 },
+{ ARM::VLD2d8Pseudo_UPD, ARM::VLD2d8_UPD, true, true, SingleSpc, 2, 8 },
+
+{ ARM::VLD2q16Pseudo, ARM::VLD2q16, true, false, SingleSpc, 4, 4 },
+{ ARM::VLD2q16Pseudo_UPD, ARM::VLD2q16_UPD, true, true, SingleSpc, 4, 4 },
+{ ARM::VLD2q32Pseudo, ARM::VLD2q32, true, false, SingleSpc, 4, 2 },
+{ ARM::VLD2q32Pseudo_UPD, ARM::VLD2q32_UPD, true, true, SingleSpc, 4, 2 },
+{ ARM::VLD2q8Pseudo, ARM::VLD2q8, true, false, SingleSpc, 4, 8 },
+{ ARM::VLD2q8Pseudo_UPD, ARM::VLD2q8_UPD, true, true, SingleSpc, 4, 8 },
+
+{ ARM::VLD3DUPd16Pseudo, ARM::VLD3DUPd16, true, false, SingleSpc, 3, 4},
+{ ARM::VLD3DUPd16Pseudo_UPD, ARM::VLD3DUPd16_UPD, true, true, SingleSpc, 3, 4},
+{ ARM::VLD3DUPd32Pseudo, ARM::VLD3DUPd32, true, false, SingleSpc, 3, 2},
+{ ARM::VLD3DUPd32Pseudo_UPD, ARM::VLD3DUPd32_UPD, true, true, SingleSpc, 3, 2},
+{ ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd8, true, false, SingleSpc, 3, 8},
+{ ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd8_UPD, true, true, SingleSpc, 3, 8},
+
+{ ARM::VLD3LNd16Pseudo, ARM::VLD3LNd16, true, false, SingleSpc, 3, 4 },
+{ ARM::VLD3LNd16Pseudo_UPD, ARM::VLD3LNd16_UPD, true, true, SingleSpc, 3, 4 },
+{ ARM::VLD3LNd32Pseudo, ARM::VLD3LNd32, true, false, SingleSpc, 3, 2 },
+{ ARM::VLD3LNd32Pseudo_UPD, ARM::VLD3LNd32_UPD, true, true, SingleSpc, 3, 2 },
+{ ARM::VLD3LNd8Pseudo, ARM::VLD3LNd8, true, false, SingleSpc, 3, 8 },
+{ ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd8_UPD, true, true, SingleSpc, 3, 8 },
+{ ARM::VLD3LNq16Pseudo, ARM::VLD3LNq16, true, false, EvenDblSpc, 3, 4 },
+{ ARM::VLD3LNq16Pseudo_UPD, ARM::VLD3LNq16_UPD, true, true, EvenDblSpc, 3, 4 },
+{ ARM::VLD3LNq32Pseudo, ARM::VLD3LNq32, true, false, EvenDblSpc, 3, 2 },
+{ ARM::VLD3LNq32Pseudo_UPD, ARM::VLD3LNq32_UPD, true, true, EvenDblSpc, 3, 2 },
+
+{ ARM::VLD3d16Pseudo, ARM::VLD3d16, true, false, SingleSpc, 3, 4 },
+{ ARM::VLD3d16Pseudo_UPD, ARM::VLD3d16_UPD, true, true, SingleSpc, 3, 4 },
+{ ARM::VLD3d32Pseudo, ARM::VLD3d32, true, false, SingleSpc, 3, 2 },
+{ ARM::VLD3d32Pseudo_UPD, ARM::VLD3d32_UPD, true, true, SingleSpc, 3, 2 },
+{ ARM::VLD3d8Pseudo, ARM::VLD3d8, true, false, SingleSpc, 3, 8 },
+{ ARM::VLD3d8Pseudo_UPD, ARM::VLD3d8_UPD, true, true, SingleSpc, 3, 8 },
+
+{ ARM::VLD3q16Pseudo_UPD, ARM::VLD3q16_UPD, true, true, EvenDblSpc, 3, 4 },
+{ ARM::VLD3q16oddPseudo, ARM::VLD3q16, true, false, OddDblSpc, 3, 4 },
+{ ARM::VLD3q16oddPseudo_UPD, ARM::VLD3q16_UPD, true, true, OddDblSpc, 3, 4 },
+{ ARM::VLD3q32Pseudo_UPD, ARM::VLD3q32_UPD, true, true, EvenDblSpc, 3, 2 },
+{ ARM::VLD3q32oddPseudo, ARM::VLD3q32, true, false, OddDblSpc, 3, 2 },
+{ ARM::VLD3q32oddPseudo_UPD, ARM::VLD3q32_UPD, true, true, OddDblSpc, 3, 2 },
+{ ARM::VLD3q8Pseudo_UPD, ARM::VLD3q8_UPD, true, true, EvenDblSpc, 3, 8 },
+{ ARM::VLD3q8oddPseudo, ARM::VLD3q8, true, false, OddDblSpc, 3, 8 },
+{ ARM::VLD3q8oddPseudo_UPD, ARM::VLD3q8_UPD, true, true, OddDblSpc, 3, 8 },
+
+{ ARM::VLD4DUPd16Pseudo, ARM::VLD4DUPd16, true, false, SingleSpc, 4, 4},
+{ ARM::VLD4DUPd16Pseudo_UPD, ARM::VLD4DUPd16_UPD, true, true, SingleSpc, 4, 4},
+{ ARM::VLD4DUPd32Pseudo, ARM::VLD4DUPd32, true, false, SingleSpc, 4, 2},
+{ ARM::VLD4DUPd32Pseudo_UPD, ARM::VLD4DUPd32_UPD, true, true, SingleSpc, 4, 2},
+{ ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd8, true, false, SingleSpc, 4, 8},
+{ ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd8_UPD, true, true, SingleSpc, 4, 8},
+
+{ ARM::VLD4LNd16Pseudo, ARM::VLD4LNd16, true, false, SingleSpc, 4, 4 },
+{ ARM::VLD4LNd16Pseudo_UPD, ARM::VLD4LNd16_UPD, true, true, SingleSpc, 4, 4 },
+{ ARM::VLD4LNd32Pseudo, ARM::VLD4LNd32, true, false, SingleSpc, 4, 2 },
+{ ARM::VLD4LNd32Pseudo_UPD, ARM::VLD4LNd32_UPD, true, true, SingleSpc, 4, 2 },
+{ ARM::VLD4LNd8Pseudo, ARM::VLD4LNd8, true, false, SingleSpc, 4, 8 },
+{ ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd8_UPD, true, true, SingleSpc, 4, 8 },
+{ ARM::VLD4LNq16Pseudo, ARM::VLD4LNq16, true, false, EvenDblSpc, 4, 4 },
+{ ARM::VLD4LNq16Pseudo_UPD, ARM::VLD4LNq16_UPD, true, true, EvenDblSpc, 4, 4 },
+{ ARM::VLD4LNq32Pseudo, ARM::VLD4LNq32, true, false, EvenDblSpc, 4, 2 },
+{ ARM::VLD4LNq32Pseudo_UPD, ARM::VLD4LNq32_UPD, true, true, EvenDblSpc, 4, 2 },
+
+{ ARM::VLD4d16Pseudo, ARM::VLD4d16, true, false, SingleSpc, 4, 4 },
+{ ARM::VLD4d16Pseudo_UPD, ARM::VLD4d16_UPD, true, true, SingleSpc, 4, 4 },
+{ ARM::VLD4d32Pseudo, ARM::VLD4d32, true, false, SingleSpc, 4, 2 },
+{ ARM::VLD4d32Pseudo_UPD, ARM::VLD4d32_UPD, true, true, SingleSpc, 4, 2 },
+{ ARM::VLD4d8Pseudo, ARM::VLD4d8, true, false, SingleSpc, 4, 8 },
+{ ARM::VLD4d8Pseudo_UPD, ARM::VLD4d8_UPD, true, true, SingleSpc, 4, 8 },
+
+{ ARM::VLD4q16Pseudo_UPD, ARM::VLD4q16_UPD, true, true, EvenDblSpc, 4, 4 },
+{ ARM::VLD4q16oddPseudo, ARM::VLD4q16, true, false, OddDblSpc, 4, 4 },
+{ ARM::VLD4q16oddPseudo_UPD, ARM::VLD4q16_UPD, true, true, OddDblSpc, 4, 4 },
+{ ARM::VLD4q32Pseudo_UPD, ARM::VLD4q32_UPD, true, true, EvenDblSpc, 4, 2 },
+{ ARM::VLD4q32oddPseudo, ARM::VLD4q32, true, false, OddDblSpc, 4, 2 },
+{ ARM::VLD4q32oddPseudo_UPD, ARM::VLD4q32_UPD, true, true, OddDblSpc, 4, 2 },
+{ ARM::VLD4q8Pseudo_UPD, ARM::VLD4q8_UPD, true, true, EvenDblSpc, 4, 8 },
+{ ARM::VLD4q8oddPseudo, ARM::VLD4q8, true, false, OddDblSpc, 4, 8 },
+{ ARM::VLD4q8oddPseudo_UPD, ARM::VLD4q8_UPD, true, true, OddDblSpc, 4, 8 },
+
+{ ARM::VST1LNq16Pseudo, ARM::VST1LNd16, false, false, EvenDblSpc, 1, 4 },
+{ ARM::VST1LNq16Pseudo_UPD, ARM::VST1LNd16_UPD,false, true, EvenDblSpc, 1, 4 },
+{ ARM::VST1LNq32Pseudo, ARM::VST1LNd32, false, false, EvenDblSpc, 1, 2 },
+{ ARM::VST1LNq32Pseudo_UPD, ARM::VST1LNd32_UPD,false, true, EvenDblSpc, 1, 2 },
+{ ARM::VST1LNq8Pseudo, ARM::VST1LNd8, false, false, EvenDblSpc, 1, 8 },
+{ ARM::VST1LNq8Pseudo_UPD, ARM::VST1LNd8_UPD, false, true, EvenDblSpc, 1, 8 },
+
+{ ARM::VST1d64QPseudo, ARM::VST1d64Q, false, false, SingleSpc, 4, 1 },
+{ ARM::VST1d64QPseudo_UPD, ARM::VST1d64Q_UPD, false, true, SingleSpc, 4, 1 },
+{ ARM::VST1d64TPseudo, ARM::VST1d64T, false, false, SingleSpc, 3, 1 },
+{ ARM::VST1d64TPseudo_UPD, ARM::VST1d64T_UPD, false, true, SingleSpc, 3, 1 },
+
+{ ARM::VST1q16Pseudo, ARM::VST1q16, false, false, SingleSpc, 2, 4 },
+{ ARM::VST1q16Pseudo_UPD, ARM::VST1q16_UPD, false, true, SingleSpc, 2, 4 },
+{ ARM::VST1q32Pseudo, ARM::VST1q32, false, false, SingleSpc, 2, 2 },
+{ ARM::VST1q32Pseudo_UPD, ARM::VST1q32_UPD, false, true, SingleSpc, 2, 2 },
+{ ARM::VST1q64Pseudo, ARM::VST1q64, false, false, SingleSpc, 2, 1 },
+{ ARM::VST1q64Pseudo_UPD, ARM::VST1q64_UPD, false, true, SingleSpc, 2, 1 },
+{ ARM::VST1q8Pseudo, ARM::VST1q8, false, false, SingleSpc, 2, 8 },
+{ ARM::VST1q8Pseudo_UPD, ARM::VST1q8_UPD, false, true, SingleSpc, 2, 8 },
+
+{ ARM::VST2LNd16Pseudo, ARM::VST2LNd16, false, false, SingleSpc, 2, 4 },
+{ ARM::VST2LNd16Pseudo_UPD, ARM::VST2LNd16_UPD, false, true, SingleSpc, 2, 4 },
+{ ARM::VST2LNd32Pseudo, ARM::VST2LNd32, false, false, SingleSpc, 2, 2 },
+{ ARM::VST2LNd32Pseudo_UPD, ARM::VST2LNd32_UPD, false, true, SingleSpc, 2, 2 },
+{ ARM::VST2LNd8Pseudo, ARM::VST2LNd8, false, false, SingleSpc, 2, 8 },
+{ ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd8_UPD, false, true, SingleSpc, 2, 8 },
+{ ARM::VST2LNq16Pseudo, ARM::VST2LNq16, false, false, EvenDblSpc, 2, 4},
+{ ARM::VST2LNq16Pseudo_UPD, ARM::VST2LNq16_UPD, false, true, EvenDblSpc, 2, 4},
+{ ARM::VST2LNq32Pseudo, ARM::VST2LNq32, false, false, EvenDblSpc, 2, 2},
+{ ARM::VST2LNq32Pseudo_UPD, ARM::VST2LNq32_UPD, false, true, EvenDblSpc, 2, 2},
+
+{ ARM::VST2d16Pseudo, ARM::VST2d16, false, false, SingleSpc, 2, 4 },
+{ ARM::VST2d16Pseudo_UPD, ARM::VST2d16_UPD, false, true, SingleSpc, 2, 4 },
+{ ARM::VST2d32Pseudo, ARM::VST2d32, false, false, SingleSpc, 2, 2 },
+{ ARM::VST2d32Pseudo_UPD, ARM::VST2d32_UPD, false, true, SingleSpc, 2, 2 },
+{ ARM::VST2d8Pseudo, ARM::VST2d8, false, false, SingleSpc, 2, 8 },
+{ ARM::VST2d8Pseudo_UPD, ARM::VST2d8_UPD, false, true, SingleSpc, 2, 8 },
+
+{ ARM::VST2q16Pseudo, ARM::VST2q16, false, false, SingleSpc, 4, 4 },
+{ ARM::VST2q16Pseudo_UPD, ARM::VST2q16_UPD, false, true, SingleSpc, 4, 4 },
+{ ARM::VST2q32Pseudo, ARM::VST2q32, false, false, SingleSpc, 4, 2 },
+{ ARM::VST2q32Pseudo_UPD, ARM::VST2q32_UPD, false, true, SingleSpc, 4, 2 },
+{ ARM::VST2q8Pseudo, ARM::VST2q8, false, false, SingleSpc, 4, 8 },
+{ ARM::VST2q8Pseudo_UPD, ARM::VST2q8_UPD, false, true, SingleSpc, 4, 8 },
+
+{ ARM::VST3LNd16Pseudo, ARM::VST3LNd16, false, false, SingleSpc, 3, 4 },
+{ ARM::VST3LNd16Pseudo_UPD, ARM::VST3LNd16_UPD, false, true, SingleSpc, 3, 4 },
+{ ARM::VST3LNd32Pseudo, ARM::VST3LNd32, false, false, SingleSpc, 3, 2 },
+{ ARM::VST3LNd32Pseudo_UPD, ARM::VST3LNd32_UPD, false, true, SingleSpc, 3, 2 },
+{ ARM::VST3LNd8Pseudo, ARM::VST3LNd8, false, false, SingleSpc, 3, 8 },
+{ ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd8_UPD, false, true, SingleSpc, 3, 8 },
+{ ARM::VST3LNq16Pseudo, ARM::VST3LNq16, false, false, EvenDblSpc, 3, 4},
+{ ARM::VST3LNq16Pseudo_UPD, ARM::VST3LNq16_UPD, false, true, EvenDblSpc, 3, 4},
+{ ARM::VST3LNq32Pseudo, ARM::VST3LNq32, false, false, EvenDblSpc, 3, 2},
+{ ARM::VST3LNq32Pseudo_UPD, ARM::VST3LNq32_UPD, false, true, EvenDblSpc, 3, 2},
+
+{ ARM::VST3d16Pseudo, ARM::VST3d16, false, false, SingleSpc, 3, 4 },
+{ ARM::VST3d16Pseudo_UPD, ARM::VST3d16_UPD, false, true, SingleSpc, 3, 4 },
+{ ARM::VST3d32Pseudo, ARM::VST3d32, false, false, SingleSpc, 3, 2 },
+{ ARM::VST3d32Pseudo_UPD, ARM::VST3d32_UPD, false, true, SingleSpc, 3, 2 },
+{ ARM::VST3d8Pseudo, ARM::VST3d8, false, false, SingleSpc, 3, 8 },
+{ ARM::VST3d8Pseudo_UPD, ARM::VST3d8_UPD, false, true, SingleSpc, 3, 8 },
+
+{ ARM::VST3q16Pseudo_UPD, ARM::VST3q16_UPD, false, true, EvenDblSpc, 3, 4 },
+{ ARM::VST3q16oddPseudo, ARM::VST3q16, false, false, OddDblSpc, 3, 4 },
+{ ARM::VST3q16oddPseudo_UPD, ARM::VST3q16_UPD, false, true, OddDblSpc, 3, 4 },
+{ ARM::VST3q32Pseudo_UPD, ARM::VST3q32_UPD, false, true, EvenDblSpc, 3, 2 },
+{ ARM::VST3q32oddPseudo, ARM::VST3q32, false, false, OddDblSpc, 3, 2 },
+{ ARM::VST3q32oddPseudo_UPD, ARM::VST3q32_UPD, false, true, OddDblSpc, 3, 2 },
+{ ARM::VST3q8Pseudo_UPD, ARM::VST3q8_UPD, false, true, EvenDblSpc, 3, 8 },
+{ ARM::VST3q8oddPseudo, ARM::VST3q8, false, false, OddDblSpc, 3, 8 },
+{ ARM::VST3q8oddPseudo_UPD, ARM::VST3q8_UPD, false, true, OddDblSpc, 3, 8 },
+
+{ ARM::VST4LNd16Pseudo, ARM::VST4LNd16, false, false, SingleSpc, 4, 4 },
+{ ARM::VST4LNd16Pseudo_UPD, ARM::VST4LNd16_UPD, false, true, SingleSpc, 4, 4 },
+{ ARM::VST4LNd32Pseudo, ARM::VST4LNd32, false, false, SingleSpc, 4, 2 },
+{ ARM::VST4LNd32Pseudo_UPD, ARM::VST4LNd32_UPD, false, true, SingleSpc, 4, 2 },
+{ ARM::VST4LNd8Pseudo, ARM::VST4LNd8, false, false, SingleSpc, 4, 8 },
+{ ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd8_UPD, false, true, SingleSpc, 4, 8 },
+{ ARM::VST4LNq16Pseudo, ARM::VST4LNq16, false, false, EvenDblSpc, 4, 4},
+{ ARM::VST4LNq16Pseudo_UPD, ARM::VST4LNq16_UPD, false, true, EvenDblSpc, 4, 4},
+{ ARM::VST4LNq32Pseudo, ARM::VST4LNq32, false, false, EvenDblSpc, 4, 2},
+{ ARM::VST4LNq32Pseudo_UPD, ARM::VST4LNq32_UPD, false, true, EvenDblSpc, 4, 2},
+
+{ ARM::VST4d16Pseudo, ARM::VST4d16, false, false, SingleSpc, 4, 4 },
+{ ARM::VST4d16Pseudo_UPD, ARM::VST4d16_UPD, false, true, SingleSpc, 4, 4 },
+{ ARM::VST4d32Pseudo, ARM::VST4d32, false, false, SingleSpc, 4, 2 },
+{ ARM::VST4d32Pseudo_UPD, ARM::VST4d32_UPD, false, true, SingleSpc, 4, 2 },
+{ ARM::VST4d8Pseudo, ARM::VST4d8, false, false, SingleSpc, 4, 8 },
+{ ARM::VST4d8Pseudo_UPD, ARM::VST4d8_UPD, false, true, SingleSpc, 4, 8 },
+
+{ ARM::VST4q16Pseudo_UPD, ARM::VST4q16_UPD, false, true, EvenDblSpc, 4, 4 },
+{ ARM::VST4q16oddPseudo, ARM::VST4q16, false, false, OddDblSpc, 4, 4 },
+{ ARM::VST4q16oddPseudo_UPD, ARM::VST4q16_UPD, false, true, OddDblSpc, 4, 4 },
+{ ARM::VST4q32Pseudo_UPD, ARM::VST4q32_UPD, false, true, EvenDblSpc, 4, 2 },
+{ ARM::VST4q32oddPseudo, ARM::VST4q32, false, false, OddDblSpc, 4, 2 },
+{ ARM::VST4q32oddPseudo_UPD, ARM::VST4q32_UPD, false, true, OddDblSpc, 4, 2 },
+{ ARM::VST4q8Pseudo_UPD, ARM::VST4q8_UPD, false, true, EvenDblSpc, 4, 8 },
+{ ARM::VST4q8oddPseudo, ARM::VST4q8, false, false, OddDblSpc, 4, 8 },
+{ ARM::VST4q8oddPseudo_UPD, ARM::VST4q8_UPD, false, true, OddDblSpc, 4, 8 }
+};
+
+/// LookupNEONLdSt - Search the NEONLdStTable for information about a NEON
+/// load or store pseudo instruction.
+static const NEONLdStTableEntry *LookupNEONLdSt(unsigned Opcode) {
+ unsigned NumEntries = array_lengthof(NEONLdStTable);
+
+#ifndef NDEBUG
+ // Make sure the table is sorted.
+ static bool TableChecked = false;
+ if (!TableChecked) {
+ for (unsigned i = 0; i != NumEntries-1; ++i)
+ assert(NEONLdStTable[i] < NEONLdStTable[i+1] &&
+ "NEONLdStTable is not sorted!");
+ TableChecked = true;
+ }
+#endif
+
+ const NEONLdStTableEntry *I =
+ std::lower_bound(NEONLdStTable, NEONLdStTable + NumEntries, Opcode);
+ if (I != NEONLdStTable + NumEntries && I->PseudoOpc == Opcode)
+ return I;
+ return NULL;
+}
+
+/// GetDSubRegs - Get 4 D subregisters of a Q, QQ, or QQQQ register,
+/// corresponding to the specified register spacing. Not all of the results
+/// are necessarily valid, e.g., a Q register only has 2 D subregisters.
+static void GetDSubRegs(unsigned Reg, NEONRegSpacing RegSpc,
+ const TargetRegisterInfo *TRI, unsigned &D0,
+ unsigned &D1, unsigned &D2, unsigned &D3) {
+ if (RegSpc == SingleSpc) {
+ D0 = TRI->getSubReg(Reg, ARM::dsub_0);
+ D1 = TRI->getSubReg(Reg, ARM::dsub_1);
+ D2 = TRI->getSubReg(Reg, ARM::dsub_2);
+ D3 = TRI->getSubReg(Reg, ARM::dsub_3);
+ } else if (RegSpc == EvenDblSpc) {
+ D0 = TRI->getSubReg(Reg, ARM::dsub_0);
+ D1 = TRI->getSubReg(Reg, ARM::dsub_2);
+ D2 = TRI->getSubReg(Reg, ARM::dsub_4);
+ D3 = TRI->getSubReg(Reg, ARM::dsub_6);
+ } else {
+ assert(RegSpc == OddDblSpc && "unknown register spacing");
+ D0 = TRI->getSubReg(Reg, ARM::dsub_1);
+ D1 = TRI->getSubReg(Reg, ARM::dsub_3);
+ D2 = TRI->getSubReg(Reg, ARM::dsub_5);
+ D3 = TRI->getSubReg(Reg, ARM::dsub_7);
}
}
/// ExpandVLD - Translate VLD pseudo instructions with Q, QQ or QQQQ register
/// operands to real VLD instructions with D register operands.
-void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI,
- unsigned Opc, bool hasWriteBack,
- NEONRegSpacing RegSpc, unsigned NumRegs) {
+void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) {
MachineInstr &MI = *MBBI;
MachineBasicBlock &MBB = *MI.getParent();
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc));
+ const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
+ assert(TableEntry && TableEntry->IsLoad && "NEONLdStTable lookup failed");
+ NEONRegSpacing RegSpc = TableEntry->RegSpacing;
+ unsigned NumRegs = TableEntry->NumRegs;
+
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(TableEntry->RealOpc));
unsigned OpIdx = 0;
bool DstIsDead = MI.getOperand(OpIdx).isDead();
unsigned DstReg = MI.getOperand(OpIdx++).getReg();
unsigned D0, D1, D2, D3;
- if (RegSpc == SingleSpc) {
- D0 = TRI->getSubReg(DstReg, ARM::dsub_0);
- D1 = TRI->getSubReg(DstReg, ARM::dsub_1);
- D2 = TRI->getSubReg(DstReg, ARM::dsub_2);
- D3 = TRI->getSubReg(DstReg, ARM::dsub_3);
- } else if (RegSpc == EvenDblSpc) {
- D0 = TRI->getSubReg(DstReg, ARM::dsub_0);
- D1 = TRI->getSubReg(DstReg, ARM::dsub_2);
- D2 = TRI->getSubReg(DstReg, ARM::dsub_4);
- D3 = TRI->getSubReg(DstReg, ARM::dsub_6);
- } else {
- assert(RegSpc == OddDblSpc && "unknown register spacing for VLD");
- D0 = TRI->getSubReg(DstReg, ARM::dsub_1);
- D1 = TRI->getSubReg(DstReg, ARM::dsub_3);
- D2 = TRI->getSubReg(DstReg, ARM::dsub_5);
- D3 = TRI->getSubReg(DstReg, ARM::dsub_7);
- }
+ GetDSubRegs(DstReg, RegSpc, TRI, D0, D1, D2, D3);
MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead))
.addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
if (NumRegs > 2)
@@ -112,107 +424,373 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI,
if (NumRegs > 3)
MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
- if (hasWriteBack) {
- bool WBIsDead = MI.getOperand(OpIdx).isDead();
- unsigned WBReg = MI.getOperand(OpIdx++).getReg();
- MIB.addReg(WBReg, RegState::Define | getDeadRegState(WBIsDead));
- }
+ if (TableEntry->HasWriteBack)
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
// Copy the addrmode6 operands.
- bool AddrIsKill = MI.getOperand(OpIdx).isKill();
- MIB.addReg(MI.getOperand(OpIdx++).getReg(), getKillRegState(AddrIsKill));
- MIB.addImm(MI.getOperand(OpIdx++).getImm());
- if (hasWriteBack) {
- // Copy the am6offset operand.
- bool OffsetIsKill = MI.getOperand(OpIdx).isKill();
- MIB.addReg(MI.getOperand(OpIdx++).getReg(), getKillRegState(OffsetIsKill));
- }
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ // Copy the am6offset operand.
+ if (TableEntry->HasWriteBack)
+ MIB.addOperand(MI.getOperand(OpIdx++));
- MIB = AddDefaultPred(MIB);
- TransferImpOps(MI, MIB, MIB);
- // For an instruction writing the odd subregs, add an implicit use of the
- // super-register because the even subregs were loaded separately.
- if (RegSpc == OddDblSpc)
- MIB.addReg(DstReg, RegState::Implicit);
+ // For an instruction writing double-spaced subregs, the pseudo instruction
+ // has an extra operand that is a use of the super-register. Record the
+ // operand index and skip over it.
+ unsigned SrcOpIdx = 0;
+ if (RegSpc == EvenDblSpc || RegSpc == OddDblSpc)
+ SrcOpIdx = OpIdx++;
+
+ // Copy the predicate operands.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ // Copy the super-register source operand used for double-spaced subregs over
+ // to the new instruction as an implicit operand.
+ if (SrcOpIdx != 0) {
+ MachineOperand MO = MI.getOperand(SrcOpIdx);
+ MO.setImplicit(true);
+ MIB.addOperand(MO);
+ }
// Add an implicit def for the super-register.
MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
+ TransferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
}
/// ExpandVST - Translate VST pseudo instructions with Q, QQ or QQQQ register
/// operands to real VST instructions with D register operands.
-void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI,
- unsigned Opc, bool hasWriteBack,
- NEONRegSpacing RegSpc, unsigned NumRegs) {
+void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
MachineInstr &MI = *MBBI;
MachineBasicBlock &MBB = *MI.getParent();
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc));
+ const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
+ assert(TableEntry && !TableEntry->IsLoad && "NEONLdStTable lookup failed");
+ NEONRegSpacing RegSpc = TableEntry->RegSpacing;
+ unsigned NumRegs = TableEntry->NumRegs;
+
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(TableEntry->RealOpc));
unsigned OpIdx = 0;
- if (hasWriteBack) {
- bool DstIsDead = MI.getOperand(OpIdx).isDead();
- unsigned DstReg = MI.getOperand(OpIdx++).getReg();
- MIB.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead));
- }
+ if (TableEntry->HasWriteBack)
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
// Copy the addrmode6 operands.
- bool AddrIsKill = MI.getOperand(OpIdx).isKill();
- MIB.addReg(MI.getOperand(OpIdx++).getReg(), getKillRegState(AddrIsKill));
- MIB.addImm(MI.getOperand(OpIdx++).getImm());
- if (hasWriteBack) {
- // Copy the am6offset operand.
- bool OffsetIsKill = MI.getOperand(OpIdx).isKill();
- MIB.addReg(MI.getOperand(OpIdx++).getReg(), getKillRegState(OffsetIsKill));
- }
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ // Copy the am6offset operand.
+ if (TableEntry->HasWriteBack)
+ MIB.addOperand(MI.getOperand(OpIdx++));
bool SrcIsKill = MI.getOperand(OpIdx).isKill();
- unsigned SrcReg = MI.getOperand(OpIdx).getReg();
+ unsigned SrcReg = MI.getOperand(OpIdx++).getReg();
unsigned D0, D1, D2, D3;
- if (RegSpc == SingleSpc) {
- D0 = TRI->getSubReg(SrcReg, ARM::dsub_0);
- D1 = TRI->getSubReg(SrcReg, ARM::dsub_1);
- D2 = TRI->getSubReg(SrcReg, ARM::dsub_2);
- D3 = TRI->getSubReg(SrcReg, ARM::dsub_3);
- } else if (RegSpc == EvenDblSpc) {
- D0 = TRI->getSubReg(SrcReg, ARM::dsub_0);
- D1 = TRI->getSubReg(SrcReg, ARM::dsub_2);
- D2 = TRI->getSubReg(SrcReg, ARM::dsub_4);
- D3 = TRI->getSubReg(SrcReg, ARM::dsub_6);
- } else {
- assert(RegSpc == OddDblSpc && "unknown register spacing for VST");
- D0 = TRI->getSubReg(SrcReg, ARM::dsub_1);
- D1 = TRI->getSubReg(SrcReg, ARM::dsub_3);
- D2 = TRI->getSubReg(SrcReg, ARM::dsub_5);
- D3 = TRI->getSubReg(SrcReg, ARM::dsub_7);
- }
-
+ GetDSubRegs(SrcReg, RegSpc, TRI, D0, D1, D2, D3);
MIB.addReg(D0).addReg(D1);
if (NumRegs > 2)
MIB.addReg(D2);
if (NumRegs > 3)
MIB.addReg(D3);
- MIB = AddDefaultPred(MIB);
+
+ // Copy the predicate operands.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ if (SrcIsKill)
+ // Add an implicit kill for the super-reg.
+ (*MIB).addRegisterKilled(SrcReg, TRI, true);
+ TransferImpOps(MI, MIB, MIB);
+ MI.eraseFromParent();
+}
+
+/// ExpandLaneOp - Translate VLD*LN and VST*LN instructions with Q, QQ or QQQQ
+/// register operands to real instructions with D register operands.
+void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) {
+ MachineInstr &MI = *MBBI;
+ MachineBasicBlock &MBB = *MI.getParent();
+
+ const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
+ assert(TableEntry && "NEONLdStTable lookup failed");
+ NEONRegSpacing RegSpc = TableEntry->RegSpacing;
+ unsigned NumRegs = TableEntry->NumRegs;
+ unsigned RegElts = TableEntry->RegElts;
+
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(TableEntry->RealOpc));
+ unsigned OpIdx = 0;
+ // The lane operand is always the 3rd from last operand, before the 2
+ // predicate operands.
+ unsigned Lane = MI.getOperand(MI.getDesc().getNumOperands() - 3).getImm();
+
+ // Adjust the lane and spacing as needed for Q registers.
+ assert(RegSpc != OddDblSpc && "unexpected register spacing for VLD/VST-lane");
+ if (RegSpc == EvenDblSpc && Lane >= RegElts) {
+ RegSpc = OddDblSpc;
+ Lane -= RegElts;
+ }
+ assert(Lane < RegElts && "out of range lane for VLD/VST-lane");
+
+ unsigned D0 = 0, D1 = 0, D2 = 0, D3 = 0;
+ unsigned DstReg = 0;
+ bool DstIsDead = false;
+ if (TableEntry->IsLoad) {
+ DstIsDead = MI.getOperand(OpIdx).isDead();
+ DstReg = MI.getOperand(OpIdx++).getReg();
+ GetDSubRegs(DstReg, RegSpc, TRI, D0, D1, D2, D3);
+ MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead));
+ if (NumRegs > 1)
+ MIB.addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
+ if (NumRegs > 2)
+ MIB.addReg(D2, RegState::Define | getDeadRegState(DstIsDead));
+ if (NumRegs > 3)
+ MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
+ }
+
+ if (TableEntry->HasWriteBack)
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ // Copy the addrmode6 operands.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ // Copy the am6offset operand.
+ if (TableEntry->HasWriteBack)
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ // Grab the super-register source.
+ MachineOperand MO = MI.getOperand(OpIdx++);
+ if (!TableEntry->IsLoad)
+ GetDSubRegs(MO.getReg(), RegSpc, TRI, D0, D1, D2, D3);
+
+ // Add the subregs as sources of the new instruction.
+ unsigned SrcFlags = (getUndefRegState(MO.isUndef()) |
+ getKillRegState(MO.isKill()));
+ MIB.addReg(D0, SrcFlags);
+ if (NumRegs > 1)
+ MIB.addReg(D1, SrcFlags);
+ if (NumRegs > 2)
+ MIB.addReg(D2, SrcFlags);
+ if (NumRegs > 3)
+ MIB.addReg(D3, SrcFlags);
+
+ // Add the lane number operand.
+ MIB.addImm(Lane);
+ OpIdx += 1;
+
+ // Copy the predicate operands.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ // Copy the super-register source to be an implicit source.
+ MO.setImplicit(true);
+ MIB.addOperand(MO);
+ if (TableEntry->IsLoad)
+ // Add an implicit def for the super-register.
+ MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
TransferImpOps(MI, MIB, MIB);
+ MI.eraseFromParent();
+}
+
+/// ExpandVTBL - Translate VTBL and VTBX pseudo instructions with Q or QQ
+/// register operands to real instructions with D register operands.
+void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI,
+ unsigned Opc, bool IsExt, unsigned NumRegs) {
+ MachineInstr &MI = *MBBI;
+ MachineBasicBlock &MBB = *MI.getParent();
+
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc));
+ unsigned OpIdx = 0;
+
+ // Transfer the destination register operand.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ if (IsExt)
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ bool SrcIsKill = MI.getOperand(OpIdx).isKill();
+ unsigned SrcReg = MI.getOperand(OpIdx++).getReg();
+ unsigned D0, D1, D2, D3;
+ GetDSubRegs(SrcReg, SingleSpc, TRI, D0, D1, D2, D3);
+ MIB.addReg(D0).addReg(D1);
+ if (NumRegs > 2)
+ MIB.addReg(D2);
+ if (NumRegs > 3)
+ MIB.addReg(D3);
+
+ // Copy the other source register operand.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ // Copy the predicate operands.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
if (SrcIsKill)
// Add an implicit kill for the super-reg.
(*MIB).addRegisterKilled(SrcReg, TRI, true);
+ TransferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
}
-bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
- bool Modified = false;
+void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ unsigned PredReg = 0;
+ ARMCC::CondCodes Pred = llvm::getInstrPredicate(&MI, PredReg);
+ unsigned DstReg = MI.getOperand(0).getReg();
+ bool DstIsDead = MI.getOperand(0).isDead();
+ bool isCC = Opcode == ARM::MOVCCi32imm || Opcode == ARM::t2MOVCCi32imm;
+ const MachineOperand &MO = MI.getOperand(isCC ? 2 : 1);
+ MachineInstrBuilder LO16, HI16;
- MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
- while (MBBI != E) {
- MachineInstr &MI = *MBBI;
- MachineBasicBlock::iterator NMBBI = llvm::next(MBBI);
+ if (!STI->hasV6T2Ops() &&
+ (Opcode == ARM::MOVi32imm || Opcode == ARM::MOVCCi32imm)) {
+ // Expand into a movi + orr.
+ LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVi), DstReg);
+ HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::ORRri))
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg);
+
+ assert (MO.isImm() && "MOVi32imm w/ non-immediate source operand!");
+ unsigned ImmVal = (unsigned)MO.getImm();
+ unsigned SOImmValV1 = ARM_AM::getSOImmTwoPartFirst(ImmVal);
+ unsigned SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal);
+ LO16 = LO16.addImm(SOImmValV1);
+ HI16 = HI16.addImm(SOImmValV2);
+ (*LO16).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ (*HI16).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ LO16.addImm(Pred).addReg(PredReg).addReg(0);
+ HI16.addImm(Pred).addReg(PredReg).addReg(0);
+ TransferImpOps(MI, LO16, HI16);
+ MI.eraseFromParent();
+ return;
+ }
+
+ unsigned LO16Opc = 0;
+ unsigned HI16Opc = 0;
+ if (Opcode == ARM::t2MOVi32imm || Opcode == ARM::t2MOVCCi32imm) {
+ LO16Opc = ARM::t2MOVi16;
+ HI16Opc = ARM::t2MOVTi16;
+ } else {
+ LO16Opc = ARM::MOVi16;
+ HI16Opc = ARM::MOVTi16;
+ }
- bool ModifiedOp = true;
- unsigned Opcode = MI.getOpcode();
- switch (Opcode) {
+ LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(LO16Opc), DstReg);
+ HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(HI16Opc))
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg);
+
+ if (MO.isImm()) {
+ unsigned Imm = MO.getImm();
+ unsigned Lo16 = Imm & 0xffff;
+ unsigned Hi16 = (Imm >> 16) & 0xffff;
+ LO16 = LO16.addImm(Lo16);
+ HI16 = HI16.addImm(Hi16);
+ } else {
+ const GlobalValue *GV = MO.getGlobal();
+ unsigned TF = MO.getTargetFlags();
+ LO16 = LO16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_LO16);
+ HI16 = HI16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_HI16);
+ }
+
+ (*LO16).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ (*HI16).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ LO16.addImm(Pred).addReg(PredReg);
+ HI16.addImm(Pred).addReg(PredReg);
+
+ TransferImpOps(MI, LO16, HI16);
+ MI.eraseFromParent();
+}
+
+bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ switch (Opcode) {
default:
- ModifiedOp = false;
- break;
+ return false;
+ case ARM::Int_eh_sjlj_dispatchsetup: {
+ MachineFunction &MF = *MI.getParent()->getParent();
+ const ARMBaseInstrInfo *AII =
+ static_cast<const ARMBaseInstrInfo*>(TII);
+ const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
+ // For functions using a base pointer, we rematerialize it (via the frame
+ // pointer) here since eh.sjlj.setjmp and eh.sjlj.longjmp don't do it
+ // for us. Otherwise, expand to nothing.
+ if (RI.hasBasePointer(MF)) {
+ int32_t NumBytes = AFI->getFramePtrSpillOffset();
+ unsigned FramePtr = RI.getFrameRegister(MF);
+ assert(MF.getTarget().getFrameLowering()->hasFP(MF) &&
+ "base pointer without frame pointer?");
+
+ if (AFI->isThumb2Function()) {
+ llvm::emitT2RegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
+ FramePtr, -NumBytes, ARMCC::AL, 0, *TII);
+ } else if (AFI->isThumbFunction()) {
+ llvm::emitThumbRegPlusImmediate(MBB, MBBI, ARM::R6,
+ FramePtr, -NumBytes,
+ *TII, RI, MI.getDebugLoc());
+ } else {
+ llvm::emitARMRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
+ FramePtr, -NumBytes, ARMCC::AL, 0,
+ *TII);
+ }
+ // If there's dynamic realignment, adjust for it.
+ if (RI.needsStackRealignment(MF)) {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ unsigned MaxAlign = MFI->getMaxAlignment();
+ assert (!AFI->isThumb1OnlyFunction());
+ // Emit bic r6, r6, MaxAlign
+ unsigned bicOpc = AFI->isThumbFunction() ?
+ ARM::t2BICri : ARM::BICri;
+ AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(bicOpc), ARM::R6)
+ .addReg(ARM::R6, RegState::Kill)
+ .addImm(MaxAlign-1)));
+ }
+
+ }
+ MI.eraseFromParent();
+ return true;
+ }
- case ARM::tLDRpci_pic:
+ case ARM::MOVsrl_flag:
+ case ARM::MOVsra_flag: {
+ // These are just fancy MOVs insructions.
+ AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVs),
+ MI.getOperand(0).getReg())
+ .addOperand(MI.getOperand(1))
+ .addReg(0)
+ .addImm(ARM_AM::getSORegOpc((Opcode == ARM::MOVsrl_flag ? ARM_AM::lsr
+ : ARM_AM::asr), 1)))
+ .addReg(ARM::CPSR, RegState::Define);
+ MI.eraseFromParent();
+ return true;
+ }
+ case ARM::RRX: {
+ // This encodes as "MOVs Rd, Rm, rrx
+ MachineInstrBuilder MIB =
+ AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVs),
+ MI.getOperand(0).getReg())
+ .addOperand(MI.getOperand(1))
+ .addOperand(MI.getOperand(1))
+ .addImm(ARM_AM::getSORegOpc(ARM_AM::rrx, 0)))
+ .addReg(0);
+ TransferImpOps(MI, MIB, MIB);
+ MI.eraseFromParent();
+ return true;
+ }
+ case ARM::TPsoft: {
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(ARM::BL))
+ .addExternalSymbol("__aeabi_read_tp", 0);
+
+ (*MIB).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ TransferImpOps(MI, MIB, MIB);
+ MI.eraseFromParent();
+ return true;
+ }
+ case ARM::tLDRpci_pic:
case ARM::t2LDRpci_pic: {
unsigned NewLdOpc = (Opcode == ARM::tLDRpci_pic)
? ARM::tLDRpci : ARM::t2LDRpci;
@@ -225,54 +803,73 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
(*MIB1).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(ARM::tPICADD))
- .addReg(DstReg, getDefRegState(true) | getDeadRegState(DstIsDead))
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
.addReg(DstReg)
.addOperand(MI.getOperand(2));
TransferImpOps(MI, MIB1, MIB2);
MI.eraseFromParent();
- break;
+ return true;
}
- case ARM::MOVi32imm:
- case ARM::t2MOVi32imm: {
- unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(&MI, PredReg);
+ case ARM::MOV_ga_dyn:
+ case ARM::MOV_ga_pcrel:
+ case ARM::MOV_ga_pcrel_ldr:
+ case ARM::t2MOV_ga_dyn:
+ case ARM::t2MOV_ga_pcrel: {
+ // Expand into movw + movw. Also "add pc" / ldr [pc] in PIC mode.
+ unsigned LabelId = AFI->createPICLabelUId();
unsigned DstReg = MI.getOperand(0).getReg();
bool DstIsDead = MI.getOperand(0).isDead();
- const MachineOperand &MO = MI.getOperand(1);
- MachineInstrBuilder LO16, HI16;
-
- LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
- TII->get(Opcode == ARM::MOVi32imm ?
- ARM::MOVi16 : ARM::t2MOVi16),
- DstReg);
- HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
- TII->get(Opcode == ARM::MOVi32imm ?
- ARM::MOVTi16 : ARM::t2MOVTi16))
- .addReg(DstReg, getDefRegState(true) | getDeadRegState(DstIsDead))
- .addReg(DstReg);
-
- if (MO.isImm()) {
- unsigned Imm = MO.getImm();
- unsigned Lo16 = Imm & 0xffff;
- unsigned Hi16 = (Imm >> 16) & 0xffff;
- LO16 = LO16.addImm(Lo16);
- HI16 = HI16.addImm(Hi16);
- } else {
- const GlobalValue *GV = MO.getGlobal();
- unsigned TF = MO.getTargetFlags();
- LO16 = LO16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_LO16);
- HI16 = HI16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_HI16);
+ const MachineOperand &MO1 = MI.getOperand(1);
+ const GlobalValue *GV = MO1.getGlobal();
+ unsigned TF = MO1.getTargetFlags();
+ bool isARM = Opcode != ARM::t2MOV_ga_pcrel;
+ bool isPIC = (Opcode != ARM::MOV_ga_dyn && Opcode != ARM::t2MOV_ga_dyn);
+ unsigned LO16Opc = isARM ? ARM::MOVi16_ga_pcrel : ARM::t2MOVi16_ga_pcrel;
+ unsigned HI16Opc = isARM ? ARM::MOVTi16_ga_pcrel : ARM::t2MOVTi16_ga_pcrel;
+ unsigned LO16TF = isPIC
+ ? ARMII::MO_LO16_NONLAZY_PIC : ARMII::MO_LO16_NONLAZY;
+ unsigned HI16TF = isPIC
+ ? ARMII::MO_HI16_NONLAZY_PIC : ARMII::MO_HI16_NONLAZY;
+ unsigned PICAddOpc = isARM
+ ? (Opcode == ARM::MOV_ga_pcrel_ldr ? ARM::PICLDR : ARM::PICADD)
+ : ARM::tPICADD;
+ MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(LO16Opc), DstReg)
+ .addGlobalAddress(GV, MO1.getOffset(), TF | LO16TF)
+ .addImm(LabelId);
+ MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(HI16Opc), DstReg)
+ .addReg(DstReg)
+ .addGlobalAddress(GV, MO1.getOffset(), TF | HI16TF)
+ .addImm(LabelId);
+ if (!isPIC) {
+ TransferImpOps(MI, MIB1, MIB2);
+ MI.eraseFromParent();
+ return true;
}
- (*LO16).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- (*HI16).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- LO16.addImm(Pred).addReg(PredReg);
- HI16.addImm(Pred).addReg(PredReg);
- TransferImpOps(MI, LO16, HI16);
+
+ MachineInstrBuilder MIB3 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(PICAddOpc))
+ .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(DstReg).addImm(LabelId);
+ if (isARM) {
+ AddDefaultPred(MIB3);
+ if (Opcode == ARM::MOV_ga_pcrel_ldr)
+ (*MIB2).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ }
+ TransferImpOps(MI, MIB1, MIB3);
MI.eraseFromParent();
- break;
+ return true;
}
+ case ARM::MOVi32imm:
+ case ARM::MOVCCi32imm:
+ case ARM::t2MOVi32imm:
+ case ARM::t2MOVCCi32imm:
+ ExpandMOV32BitImm(MBB, MBBI);
+ return true;
+
case ARM::VMOVQQ: {
unsigned DstReg = MI.getOperand(0).getReg();
bool DstIsDead = MI.getOperand(0).isDead();
@@ -285,222 +882,339 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
MachineInstrBuilder Even =
AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(ARM::VMOVQ))
- .addReg(EvenDst,
- getDefRegState(true) | getDeadRegState(DstIsDead))
- .addReg(EvenSrc, getKillRegState(SrcIsKill)));
+ .addReg(EvenDst,
+ RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(EvenSrc, getKillRegState(SrcIsKill)));
MachineInstrBuilder Odd =
AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(ARM::VMOVQ))
- .addReg(OddDst,
- getDefRegState(true) | getDeadRegState(DstIsDead))
- .addReg(OddSrc, getKillRegState(SrcIsKill)));
+ .addReg(OddDst,
+ RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(OddSrc, getKillRegState(SrcIsKill)));
TransferImpOps(MI, Even, Odd);
MI.eraseFromParent();
+ return true;
+ }
+
+ case ARM::VLDMQIA:
+ case ARM::VLDMQDB: {
+ unsigned NewOpc = (Opcode == ARM::VLDMQIA) ? ARM::VLDMDIA : ARM::VLDMDDB;
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
+ unsigned OpIdx = 0;
+
+ // Grab the Q register destination.
+ bool DstIsDead = MI.getOperand(OpIdx).isDead();
+ unsigned DstReg = MI.getOperand(OpIdx++).getReg();
+
+ // Copy the source register.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ // Copy the predicate operands.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ // Add the destination operands (D subregs).
+ unsigned D0 = TRI->getSubReg(DstReg, ARM::dsub_0);
+ unsigned D1 = TRI->getSubReg(DstReg, ARM::dsub_1);
+ MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead))
+ .addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
+
+ // Add an implicit def for the super-register.
+ MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
+ TransferImpOps(MI, MIB, MIB);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ case ARM::VSTMQIA:
+ case ARM::VSTMQDB: {
+ unsigned NewOpc = (Opcode == ARM::VSTMQIA) ? ARM::VSTMDIA : ARM::VSTMDDB;
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
+ unsigned OpIdx = 0;
+
+ // Grab the Q register source.
+ bool SrcIsKill = MI.getOperand(OpIdx).isKill();
+ unsigned SrcReg = MI.getOperand(OpIdx++).getReg();
+
+ // Copy the destination register.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ // Copy the predicate operands.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ // Add the source operands (D subregs).
+ unsigned D0 = TRI->getSubReg(SrcReg, ARM::dsub_0);
+ unsigned D1 = TRI->getSubReg(SrcReg, ARM::dsub_1);
+ MIB.addReg(D0).addReg(D1);
+
+ if (SrcIsKill)
+ // Add an implicit kill for the Q register.
+ (*MIB).addRegisterKilled(SrcReg, TRI, true);
+
+ TransferImpOps(MI, MIB, MIB);
+ MI.eraseFromParent();
+ return true;
+ }
+ case ARM::VDUPfqf:
+ case ARM::VDUPfdf:{
+ unsigned NewOpc = Opcode == ARM::VDUPfqf ? ARM::VDUPLNfq : ARM::VDUPLNfd;
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
+ unsigned OpIdx = 0;
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ unsigned Lane = getARMRegisterNumbering(SrcReg) & 1;
+ unsigned DReg = TRI->getMatchingSuperReg(SrcReg,
+ Lane & 1 ? ARM::ssub_1 : ARM::ssub_0, &ARM::DPR_VFP2RegClass);
+ // The lane is [0,1] for the containing DReg superregister.
+ // Copy the dst/src register operands.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addReg(DReg);
+ ++OpIdx;
+ // Add the lane select operand.
+ MIB.addImm(Lane);
+ // Add the predicate operands.
+ MIB.addOperand(MI.getOperand(OpIdx++));
+ MIB.addOperand(MI.getOperand(OpIdx++));
+
+ TransferImpOps(MI, MIB, MIB);
+ MI.eraseFromParent();
+ return true;
}
case ARM::VLD1q8Pseudo:
- ExpandVLD(MBBI, ARM::VLD1q8, false, SingleSpc, 2); break;
case ARM::VLD1q16Pseudo:
- ExpandVLD(MBBI, ARM::VLD1q16, false, SingleSpc, 2); break;
case ARM::VLD1q32Pseudo:
- ExpandVLD(MBBI, ARM::VLD1q32, false, SingleSpc, 2); break;
case ARM::VLD1q64Pseudo:
- ExpandVLD(MBBI, ARM::VLD1q64, false, SingleSpc, 2); break;
case ARM::VLD1q8Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD1q8, true, SingleSpc, 2); break;
case ARM::VLD1q16Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD1q16, true, SingleSpc, 2); break;
case ARM::VLD1q32Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD1q32, true, SingleSpc, 2); break;
case ARM::VLD1q64Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD1q64, true, SingleSpc, 2); break;
-
case ARM::VLD2d8Pseudo:
- ExpandVLD(MBBI, ARM::VLD2d8, false, SingleSpc, 2); break;
case ARM::VLD2d16Pseudo:
- ExpandVLD(MBBI, ARM::VLD2d16, false, SingleSpc, 2); break;
case ARM::VLD2d32Pseudo:
- ExpandVLD(MBBI, ARM::VLD2d32, false, SingleSpc, 2); break;
case ARM::VLD2q8Pseudo:
- ExpandVLD(MBBI, ARM::VLD2q8, false, SingleSpc, 4); break;
case ARM::VLD2q16Pseudo:
- ExpandVLD(MBBI, ARM::VLD2q16, false, SingleSpc, 4); break;
case ARM::VLD2q32Pseudo:
- ExpandVLD(MBBI, ARM::VLD2q32, false, SingleSpc, 4); break;
case ARM::VLD2d8Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD2d8, true, SingleSpc, 2); break;
case ARM::VLD2d16Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD2d16, true, SingleSpc, 2); break;
case ARM::VLD2d32Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD2d32, true, SingleSpc, 2); break;
case ARM::VLD2q8Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD2q8, true, SingleSpc, 4); break;
case ARM::VLD2q16Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD2q16, true, SingleSpc, 4); break;
case ARM::VLD2q32Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD2q32, true, SingleSpc, 4); break;
-
case ARM::VLD3d8Pseudo:
- ExpandVLD(MBBI, ARM::VLD3d8, false, SingleSpc, 3); break;
case ARM::VLD3d16Pseudo:
- ExpandVLD(MBBI, ARM::VLD3d16, false, SingleSpc, 3); break;
case ARM::VLD3d32Pseudo:
- ExpandVLD(MBBI, ARM::VLD3d32, false, SingleSpc, 3); break;
case ARM::VLD1d64TPseudo:
- ExpandVLD(MBBI, ARM::VLD1d64T, false, SingleSpc, 3); break;
case ARM::VLD3d8Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD3d8_UPD, true, SingleSpc, 3); break;
case ARM::VLD3d16Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD3d16_UPD, true, SingleSpc, 3); break;
case ARM::VLD3d32Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD3d32_UPD, true, SingleSpc, 3); break;
case ARM::VLD1d64TPseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD1d64T_UPD, true, SingleSpc, 3); break;
case ARM::VLD3q8Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD3q8_UPD, true, EvenDblSpc, 3); break;
case ARM::VLD3q16Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD3q16_UPD, true, EvenDblSpc, 3); break;
case ARM::VLD3q32Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD3q32_UPD, true, EvenDblSpc, 3); break;
+ case ARM::VLD3q8oddPseudo:
+ case ARM::VLD3q16oddPseudo:
+ case ARM::VLD3q32oddPseudo:
case ARM::VLD3q8oddPseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD3q8_UPD, true, OddDblSpc, 3); break;
case ARM::VLD3q16oddPseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD3q16_UPD, true, OddDblSpc, 3); break;
case ARM::VLD3q32oddPseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD3q32_UPD, true, OddDblSpc, 3); break;
-
case ARM::VLD4d8Pseudo:
- ExpandVLD(MBBI, ARM::VLD4d8, false, SingleSpc, 4); break;
case ARM::VLD4d16Pseudo:
- ExpandVLD(MBBI, ARM::VLD4d16, false, SingleSpc, 4); break;
case ARM::VLD4d32Pseudo:
- ExpandVLD(MBBI, ARM::VLD4d32, false, SingleSpc, 4); break;
case ARM::VLD1d64QPseudo:
- ExpandVLD(MBBI, ARM::VLD1d64Q, false, SingleSpc, 4); break;
case ARM::VLD4d8Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD4d8_UPD, true, SingleSpc, 4); break;
case ARM::VLD4d16Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD4d16_UPD, true, SingleSpc, 4); break;
case ARM::VLD4d32Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD4d32_UPD, true, SingleSpc, 4); break;
case ARM::VLD1d64QPseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD1d64Q_UPD, true, SingleSpc, 4); break;
case ARM::VLD4q8Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD4q8_UPD, true, EvenDblSpc, 4); break;
case ARM::VLD4q16Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD4q16_UPD, true, EvenDblSpc, 4); break;
case ARM::VLD4q32Pseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD4q32_UPD, true, EvenDblSpc, 4); break;
+ case ARM::VLD4q8oddPseudo:
+ case ARM::VLD4q16oddPseudo:
+ case ARM::VLD4q32oddPseudo:
case ARM::VLD4q8oddPseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD4q8_UPD, true, OddDblSpc, 4); break;
case ARM::VLD4q16oddPseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD4q16_UPD, true, OddDblSpc, 4); break;
case ARM::VLD4q32oddPseudo_UPD:
- ExpandVLD(MBBI, ARM::VLD4q32_UPD, true, OddDblSpc, 4); break;
+ case ARM::VLD1DUPq8Pseudo:
+ case ARM::VLD1DUPq16Pseudo:
+ case ARM::VLD1DUPq32Pseudo:
+ case ARM::VLD1DUPq8Pseudo_UPD:
+ case ARM::VLD1DUPq16Pseudo_UPD:
+ case ARM::VLD1DUPq32Pseudo_UPD:
+ case ARM::VLD2DUPd8Pseudo:
+ case ARM::VLD2DUPd16Pseudo:
+ case ARM::VLD2DUPd32Pseudo:
+ case ARM::VLD2DUPd8Pseudo_UPD:
+ case ARM::VLD2DUPd16Pseudo_UPD:
+ case ARM::VLD2DUPd32Pseudo_UPD:
+ case ARM::VLD3DUPd8Pseudo:
+ case ARM::VLD3DUPd16Pseudo:
+ case ARM::VLD3DUPd32Pseudo:
+ case ARM::VLD3DUPd8Pseudo_UPD:
+ case ARM::VLD3DUPd16Pseudo_UPD:
+ case ARM::VLD3DUPd32Pseudo_UPD:
+ case ARM::VLD4DUPd8Pseudo:
+ case ARM::VLD4DUPd16Pseudo:
+ case ARM::VLD4DUPd32Pseudo:
+ case ARM::VLD4DUPd8Pseudo_UPD:
+ case ARM::VLD4DUPd16Pseudo_UPD:
+ case ARM::VLD4DUPd32Pseudo_UPD:
+ ExpandVLD(MBBI);
+ return true;
case ARM::VST1q8Pseudo:
- ExpandVST(MBBI, ARM::VST1q8, false, SingleSpc, 2); break;
case ARM::VST1q16Pseudo:
- ExpandVST(MBBI, ARM::VST1q16, false, SingleSpc, 2); break;
case ARM::VST1q32Pseudo:
- ExpandVST(MBBI, ARM::VST1q32, false, SingleSpc, 2); break;
case ARM::VST1q64Pseudo:
- ExpandVST(MBBI, ARM::VST1q64, false, SingleSpc, 2); break;
case ARM::VST1q8Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST1q8_UPD, true, SingleSpc, 2); break;
case ARM::VST1q16Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST1q16_UPD, true, SingleSpc, 2); break;
case ARM::VST1q32Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST1q32_UPD, true, SingleSpc, 2); break;
case ARM::VST1q64Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST1q64_UPD, true, SingleSpc, 2); break;
-
case ARM::VST2d8Pseudo:
- ExpandVST(MBBI, ARM::VST2d8, false, SingleSpc, 2); break;
case ARM::VST2d16Pseudo:
- ExpandVST(MBBI, ARM::VST2d16, false, SingleSpc, 2); break;
case ARM::VST2d32Pseudo:
- ExpandVST(MBBI, ARM::VST2d32, false, SingleSpc, 2); break;
case ARM::VST2q8Pseudo:
- ExpandVST(MBBI, ARM::VST2q8, false, SingleSpc, 4); break;
case ARM::VST2q16Pseudo:
- ExpandVST(MBBI, ARM::VST2q16, false, SingleSpc, 4); break;
case ARM::VST2q32Pseudo:
- ExpandVST(MBBI, ARM::VST2q32, false, SingleSpc, 4); break;
case ARM::VST2d8Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST2d8_UPD, true, SingleSpc, 2); break;
case ARM::VST2d16Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST2d16_UPD, true, SingleSpc, 2); break;
case ARM::VST2d32Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST2d32_UPD, true, SingleSpc, 2); break;
case ARM::VST2q8Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST2q8_UPD, true, SingleSpc, 4); break;
case ARM::VST2q16Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST2q16_UPD, true, SingleSpc, 4); break;
case ARM::VST2q32Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST2q32_UPD, true, SingleSpc, 4); break;
-
case ARM::VST3d8Pseudo:
- ExpandVST(MBBI, ARM::VST3d8, false, SingleSpc, 3); break;
case ARM::VST3d16Pseudo:
- ExpandVST(MBBI, ARM::VST3d16, false, SingleSpc, 3); break;
case ARM::VST3d32Pseudo:
- ExpandVST(MBBI, ARM::VST3d32, false, SingleSpc, 3); break;
case ARM::VST1d64TPseudo:
- ExpandVST(MBBI, ARM::VST1d64T, false, SingleSpc, 3); break;
case ARM::VST3d8Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST3d8_UPD, true, SingleSpc, 3); break;
case ARM::VST3d16Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST3d16_UPD, true, SingleSpc, 3); break;
case ARM::VST3d32Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST3d32_UPD, true, SingleSpc, 3); break;
case ARM::VST1d64TPseudo_UPD:
- ExpandVST(MBBI, ARM::VST1d64T_UPD, true, SingleSpc, 3); break;
case ARM::VST3q8Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST3q8_UPD, true, EvenDblSpc, 3); break;
case ARM::VST3q16Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST3q16_UPD, true, EvenDblSpc, 3); break;
case ARM::VST3q32Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST3q32_UPD, true, EvenDblSpc, 3); break;
+ case ARM::VST3q8oddPseudo:
+ case ARM::VST3q16oddPseudo:
+ case ARM::VST3q32oddPseudo:
case ARM::VST3q8oddPseudo_UPD:
- ExpandVST(MBBI, ARM::VST3q8_UPD, true, OddDblSpc, 3); break;
case ARM::VST3q16oddPseudo_UPD:
- ExpandVST(MBBI, ARM::VST3q16_UPD, true, OddDblSpc, 3); break;
case ARM::VST3q32oddPseudo_UPD:
- ExpandVST(MBBI, ARM::VST3q32_UPD, true, OddDblSpc, 3); break;
-
case ARM::VST4d8Pseudo:
- ExpandVST(MBBI, ARM::VST4d8, false, SingleSpc, 4); break;
case ARM::VST4d16Pseudo:
- ExpandVST(MBBI, ARM::VST4d16, false, SingleSpc, 4); break;
case ARM::VST4d32Pseudo:
- ExpandVST(MBBI, ARM::VST4d32, false, SingleSpc, 4); break;
case ARM::VST1d64QPseudo:
- ExpandVST(MBBI, ARM::VST1d64Q, false, SingleSpc, 4); break;
case ARM::VST4d8Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST4d8_UPD, true, SingleSpc, 4); break;
case ARM::VST4d16Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST4d16_UPD, true, SingleSpc, 4); break;
case ARM::VST4d32Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST4d32_UPD, true, SingleSpc, 4); break;
case ARM::VST1d64QPseudo_UPD:
- ExpandVST(MBBI, ARM::VST1d64Q_UPD, true, SingleSpc, 4); break;
case ARM::VST4q8Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST4q8_UPD, true, EvenDblSpc, 4); break;
case ARM::VST4q16Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST4q16_UPD, true, EvenDblSpc, 4); break;
case ARM::VST4q32Pseudo_UPD:
- ExpandVST(MBBI, ARM::VST4q32_UPD, true, EvenDblSpc, 4); break;
+ case ARM::VST4q8oddPseudo:
+ case ARM::VST4q16oddPseudo:
+ case ARM::VST4q32oddPseudo:
case ARM::VST4q8oddPseudo_UPD:
- ExpandVST(MBBI, ARM::VST4q8_UPD, true, OddDblSpc, 4); break;
case ARM::VST4q16oddPseudo_UPD:
- ExpandVST(MBBI, ARM::VST4q16_UPD, true, OddDblSpc, 4); break;
case ARM::VST4q32oddPseudo_UPD:
- ExpandVST(MBBI, ARM::VST4q32_UPD, true, OddDblSpc, 4); break;
- }
+ ExpandVST(MBBI);
+ return true;
+
+ case ARM::VLD1LNq8Pseudo:
+ case ARM::VLD1LNq16Pseudo:
+ case ARM::VLD1LNq32Pseudo:
+ case ARM::VLD1LNq8Pseudo_UPD:
+ case ARM::VLD1LNq16Pseudo_UPD:
+ case ARM::VLD1LNq32Pseudo_UPD:
+ case ARM::VLD2LNd8Pseudo:
+ case ARM::VLD2LNd16Pseudo:
+ case ARM::VLD2LNd32Pseudo:
+ case ARM::VLD2LNq16Pseudo:
+ case ARM::VLD2LNq32Pseudo:
+ case ARM::VLD2LNd8Pseudo_UPD:
+ case ARM::VLD2LNd16Pseudo_UPD:
+ case ARM::VLD2LNd32Pseudo_UPD:
+ case ARM::VLD2LNq16Pseudo_UPD:
+ case ARM::VLD2LNq32Pseudo_UPD:
+ case ARM::VLD3LNd8Pseudo:
+ case ARM::VLD3LNd16Pseudo:
+ case ARM::VLD3LNd32Pseudo:
+ case ARM::VLD3LNq16Pseudo:
+ case ARM::VLD3LNq32Pseudo:
+ case ARM::VLD3LNd8Pseudo_UPD:
+ case ARM::VLD3LNd16Pseudo_UPD:
+ case ARM::VLD3LNd32Pseudo_UPD:
+ case ARM::VLD3LNq16Pseudo_UPD:
+ case ARM::VLD3LNq32Pseudo_UPD:
+ case ARM::VLD4LNd8Pseudo:
+ case ARM::VLD4LNd16Pseudo:
+ case ARM::VLD4LNd32Pseudo:
+ case ARM::VLD4LNq16Pseudo:
+ case ARM::VLD4LNq32Pseudo:
+ case ARM::VLD4LNd8Pseudo_UPD:
+ case ARM::VLD4LNd16Pseudo_UPD:
+ case ARM::VLD4LNd32Pseudo_UPD:
+ case ARM::VLD4LNq16Pseudo_UPD:
+ case ARM::VLD4LNq32Pseudo_UPD:
+ case ARM::VST1LNq8Pseudo:
+ case ARM::VST1LNq16Pseudo:
+ case ARM::VST1LNq32Pseudo:
+ case ARM::VST1LNq8Pseudo_UPD:
+ case ARM::VST1LNq16Pseudo_UPD:
+ case ARM::VST1LNq32Pseudo_UPD:
+ case ARM::VST2LNd8Pseudo:
+ case ARM::VST2LNd16Pseudo:
+ case ARM::VST2LNd32Pseudo:
+ case ARM::VST2LNq16Pseudo:
+ case ARM::VST2LNq32Pseudo:
+ case ARM::VST2LNd8Pseudo_UPD:
+ case ARM::VST2LNd16Pseudo_UPD:
+ case ARM::VST2LNd32Pseudo_UPD:
+ case ARM::VST2LNq16Pseudo_UPD:
+ case ARM::VST2LNq32Pseudo_UPD:
+ case ARM::VST3LNd8Pseudo:
+ case ARM::VST3LNd16Pseudo:
+ case ARM::VST3LNd32Pseudo:
+ case ARM::VST3LNq16Pseudo:
+ case ARM::VST3LNq32Pseudo:
+ case ARM::VST3LNd8Pseudo_UPD:
+ case ARM::VST3LNd16Pseudo_UPD:
+ case ARM::VST3LNd32Pseudo_UPD:
+ case ARM::VST3LNq16Pseudo_UPD:
+ case ARM::VST3LNq32Pseudo_UPD:
+ case ARM::VST4LNd8Pseudo:
+ case ARM::VST4LNd16Pseudo:
+ case ARM::VST4LNd32Pseudo:
+ case ARM::VST4LNq16Pseudo:
+ case ARM::VST4LNq32Pseudo:
+ case ARM::VST4LNd8Pseudo_UPD:
+ case ARM::VST4LNd16Pseudo_UPD:
+ case ARM::VST4LNd32Pseudo_UPD:
+ case ARM::VST4LNq16Pseudo_UPD:
+ case ARM::VST4LNq32Pseudo_UPD:
+ ExpandLaneOp(MBBI);
+ return true;
+
+ case ARM::VTBL2Pseudo: ExpandVTBL(MBBI, ARM::VTBL2, false, 2); return true;
+ case ARM::VTBL3Pseudo: ExpandVTBL(MBBI, ARM::VTBL3, false, 3); return true;
+ case ARM::VTBL4Pseudo: ExpandVTBL(MBBI, ARM::VTBL4, false, 4); return true;
+ case ARM::VTBX2Pseudo: ExpandVTBL(MBBI, ARM::VTBX2, true, 2); return true;
+ case ARM::VTBX3Pseudo: ExpandVTBL(MBBI, ARM::VTBX3, true, 3); return true;
+ case ARM::VTBX4Pseudo: ExpandVTBL(MBBI, ARM::VTBX4, true, 4); return true;
+ }
+
+ return false;
+}
- if (ModifiedOp)
- Modified = true;
+bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
+ bool Modified = false;
+
+ MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ while (MBBI != E) {
+ MachineBasicBlock::iterator NMBBI = llvm::next(MBBI);
+ Modified |= ExpandMI(MBB, MBBI);
MBBI = NMBBI;
}
@@ -508,8 +1222,11 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
}
bool ARMExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ const TargetMachine &TM = MF.getTarget();
+ TII = static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
+ TRI = TM.getRegisterInfo();
+ STI = &TM.getSubtarget<ARMSubtarget>();
+ AFI = MF.getInfo<ARMFunctionInfo>();
bool Modified = false;
for (MachineFunction::iterator MFI = MF.begin(), E = MF.end(); MFI != E;
diff --git a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
index 4892eae..9f29530 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -15,14 +15,17 @@
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
+#include "ARMCallingConv.h"
#include "ARMRegisterInfo.h"
#include "ARMTargetMachine.h"
#include "ARMSubtarget.h"
+#include "ARMConstantPoolValue.h"
#include "llvm/CallingConv.h"
#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/Module.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
@@ -30,7 +33,9 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
@@ -43,12 +48,37 @@
using namespace llvm;
static cl::opt<bool>
-EnableARMFastISel("arm-fast-isel",
- cl::desc("Turn on experimental ARM fast-isel support"),
- cl::init(false), cl::Hidden);
+DisableARMFastISel("disable-arm-fast-isel",
+ cl::desc("Turn off experimental ARM fast-isel support"),
+ cl::init(false), cl::Hidden);
+
+extern cl::opt<bool> EnableARMLongCalls;
namespace {
+ // All possible address modes, plus some.
+ typedef struct Address {
+ enum {
+ RegBase,
+ FrameIndexBase
+ } BaseType;
+
+ union {
+ unsigned Reg;
+ int FI;
+ } Base;
+
+ int Offset;
+ unsigned Scale;
+ unsigned PlusReg;
+
+ // Innocuous defaults for our address.
+ Address()
+ : BaseType(RegBase), Offset(0), Scale(0), PlusReg(0) {
+ Base.Reg = 0;
+ }
+ } Address;
+
class ARMFastISel : public FastISel {
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
@@ -57,13 +87,14 @@ class ARMFastISel : public FastISel {
const TargetMachine &TM;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
- const ARMFunctionInfo *AFI;
+ ARMFunctionInfo *AFI;
- // Convenience variable to avoid checking all the time.
+ // Convenience variables to avoid some queries.
bool isThumb;
+ LLVMContext *Context;
public:
- explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
+ explicit ARMFastISel(FunctionLoweringInfo &funcInfo)
: FastISel(funcInfo),
TM(funcInfo.MF->getTarget()),
TII(*TM.getInstrInfo()),
@@ -71,6 +102,7 @@ class ARMFastISel : public FastISel {
Subtarget = &TM.getSubtarget<ARMSubtarget>();
AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
isThumb = AFI->isThumbFunction();
+ Context = &funcInfo.Fn->getContext();
}
// Code from FastISel.cpp.
@@ -102,36 +134,73 @@ class ARMFastISel : public FastISel {
virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
unsigned Op0, bool Op0IsKill,
uint32_t Idx);
-
+
// Backend specific FastISel code.
virtual bool TargetSelectInstruction(const Instruction *I);
virtual unsigned TargetMaterializeConstant(const Constant *C);
+ virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
#include "ARMGenFastISel.inc"
-
+
// Instruction selection routines.
- virtual bool ARMSelectLoad(const Instruction *I);
- virtual bool ARMSelectStore(const Instruction *I);
- virtual bool ARMSelectBranch(const Instruction *I);
+ private:
+ bool SelectLoad(const Instruction *I);
+ bool SelectStore(const Instruction *I);
+ bool SelectBranch(const Instruction *I);
+ bool SelectCmp(const Instruction *I);
+ bool SelectFPExt(const Instruction *I);
+ bool SelectFPTrunc(const Instruction *I);
+ bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode);
+ bool SelectSIToFP(const Instruction *I);
+ bool SelectFPToSI(const Instruction *I);
+ bool SelectSDiv(const Instruction *I);
+ bool SelectSRem(const Instruction *I);
+ bool SelectCall(const Instruction *I);
+ bool SelectSelect(const Instruction *I);
+ bool SelectRet(const Instruction *I);
// Utility routines.
private:
- bool isTypeLegal(const Type *Ty, EVT &VT);
- bool isLoadTypeLegal(const Type *Ty, EVT &VT);
- bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Reg, int Offset);
- bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Reg, int Offset);
- bool ARMLoadAlloca(const Instruction *I);
- bool ARMStoreAlloca(const Instruction *I, unsigned SrcReg);
- bool ARMComputeRegOffset(const Value *Obj, unsigned &Reg, int &Offset);
- bool ARMMaterializeConstant(const ConstantInt *Val, unsigned &Reg);
-
+ bool isTypeLegal(const Type *Ty, MVT &VT);
+ bool isLoadTypeLegal(const Type *Ty, MVT &VT);
+ bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr);
+ bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr);
+ bool ARMComputeAddress(const Value *Obj, Address &Addr);
+ void ARMSimplifyAddress(Address &Addr, EVT VT);
+ unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
+ unsigned ARMMaterializeInt(const Constant *C, EVT VT);
+ unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
+ unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
+ unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
+
+ // Call handling routines.
+ private:
+ bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
+ unsigned &ResultReg);
+ CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return);
+ bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
+ SmallVectorImpl<unsigned> &ArgRegs,
+ SmallVectorImpl<MVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
+ SmallVectorImpl<unsigned> &RegArgs,
+ CallingConv::ID CC,
+ unsigned &NumBytes);
+ bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
+ const Instruction *I, CallingConv::ID CC,
+ unsigned &NumBytes);
+ bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
+
+ // OptionalDef handling routines.
+ private:
bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
+ void AddLoadStoreOperands(EVT VT, Address &Addr,
+ const MachineInstrBuilder &MIB);
};
} // end anonymous namespace
-// #include "ARMGenCallingConv.inc"
+#include "ARMGenCallingConv.inc"
// DefinesOptionalPredicate - This is different from DefinesPredicate in that
// we don't care about implicit defs here, just places we'll need to add a
@@ -153,6 +222,9 @@ bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
// If the machine is predicable go ahead and add the predicate operands, if
// it needs default CC operands add those.
+// TODO: If we want to support thumb1 then we'll need to deal with optional
+// CPSR defs that need to be added before the remaining operands. See s_cc_out
+// for descriptions why.
const MachineInstrBuilder &
ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
MachineInstr *MI = &*MIB;
@@ -160,7 +232,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
// Do we use a predicate?
if (TII.isPredicable(MI))
AddDefaultPred(MIB);
-
+
// Do we optionally set a predicate? Preds is size > 0 iff the predicate
// defines CPSR. All other OptionalDefines in ARM are the CCR register.
bool CPSR = false;
@@ -297,7 +369,7 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
-
+
if (II.getNumDefs() >= 1)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addImm(Imm));
@@ -323,16 +395,84 @@ unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
return ResultReg;
}
-unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
- EVT VT = TLI.getValueType(C->getType(), true);
+// TODO: Don't worry about 64-bit now, but when this is fixed remove the
+// checks from the various callers.
+unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
+ if (VT == MVT::f64) return 0;
- // Only handle simple types.
- if (!VT.isSimple()) return 0;
-
- // TODO: This should be safe for fp because they're just bits from the
- // Constant.
- // TODO: Theoretically we could materialize fp constants with instructions
- // from VFP3.
+ unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::VMOVRS), MoveReg)
+ .addReg(SrcReg));
+ return MoveReg;
+}
+
+unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
+ if (VT == MVT::i64) return 0;
+
+ unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::VMOVSR), MoveReg)
+ .addReg(SrcReg));
+ return MoveReg;
+}
+
+// For double width floating point we need to materialize two constants
+// (the high and the low) into integer registers then use a move to get
+// the combined constant into an FP reg.
+unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
+ const APFloat Val = CFP->getValueAPF();
+ bool is64bit = VT == MVT::f64;
+
+ // This checks to see if we can use VFP3 instructions to materialize
+ // a constant, otherwise we have to go through the constant pool.
+ if (TLI.isFPImmLegal(Val, VT)) {
+ unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS;
+ unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
+ DestReg)
+ .addFPImm(CFP));
+ return DestReg;
+ }
+
+ // Require VFP2 for loading fp constants.
+ if (!Subtarget->hasVFP2()) return false;
+
+ // MachineConstantPool wants an explicit alignment.
+ unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
+ if (Align == 0) {
+ // TODO: Figure out if this is correct.
+ Align = TD.getTypeAllocSize(CFP->getType());
+ }
+ unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
+ unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
+
+ // The extra reg is for addrmode5.
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
+ DestReg)
+ .addConstantPoolIndex(Idx)
+ .addReg(0));
+ return DestReg;
+}
+
+unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
+
+ // For now 32-bit only.
+ if (VT != MVT::i32) return false;
+
+ unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+
+ // If we can do this in a single instruction without a constant pool entry
+ // do so now.
+ const ConstantInt *CI = cast<ConstantInt>(C);
+ if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) {
+ unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), DestReg)
+ .addImm(CI->getSExtValue()));
+ return DestReg;
+ }
// MachineConstantPool wants an explicit alignment.
unsigned Align = TD.getPrefTypeAlignment(C->getType());
@@ -342,58 +482,144 @@ unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
}
unsigned Idx = MCP.getConstantPoolIndex(C, Align);
- unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
- // Different addressing modes between ARM/Thumb2 for constant pool loads.
if (isThumb)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(ARM::t2LDRpci))
- .addReg(DestReg).addConstantPoolIndex(Idx));
+ TII.get(ARM::t2LDRpci), DestReg)
+ .addConstantPoolIndex(Idx));
else
+ // The extra immediate is for addrmode2.
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(ARM::LDRcp))
- .addReg(DestReg).addConstantPoolIndex(Idx)
- .addReg(0).addImm(0));
-
+ TII.get(ARM::LDRcp), DestReg)
+ .addConstantPoolIndex(Idx)
+ .addImm(0));
+
return DestReg;
}
-bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) {
- VT = TLI.getValueType(Ty, true);
-
+unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
+ // For now 32-bit only.
+ if (VT != MVT::i32) return 0;
+
+ Reloc::Model RelocM = TM.getRelocationModel();
+
+ // TODO: No external globals for now.
+ if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0;
+
+ // TODO: Need more magic for ARM PIC.
+ if (!isThumb && (RelocM == Reloc::PIC_)) return 0;
+
+ // MachineConstantPool wants an explicit alignment.
+ unsigned Align = TD.getPrefTypeAlignment(GV->getType());
+ if (Align == 0) {
+ // TODO: Figure out if this is correct.
+ Align = TD.getTypeAllocSize(GV->getType());
+ }
+
+ // Grab index.
+ unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8);
+ unsigned Id = AFI->createPICLabelUId();
+ ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id,
+ ARMCP::CPValue, PCAdj);
+ unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
+
+ // Load value.
+ MachineInstrBuilder MIB;
+ unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ if (isThumb) {
+ unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addConstantPoolIndex(Idx);
+ if (RelocM == Reloc::PIC_)
+ MIB.addImm(Id);
+ } else {
+ // The extra immediate is for addrmode2.
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
+ DestReg)
+ .addConstantPoolIndex(Idx)
+ .addImm(0);
+ }
+ AddOptionalDefs(MIB);
+ return DestReg;
+}
+
+unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
+ EVT VT = TLI.getValueType(C->getType(), true);
+
// Only handle simple types.
- if (VT == MVT::Other || !VT.isSimple()) return false;
-
+ if (!VT.isSimple()) return 0;
+
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
+ return ARMMaterializeFP(CFP, VT);
+ else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ return ARMMaterializeGV(GV, VT);
+ else if (isa<ConstantInt>(C))
+ return ARMMaterializeInt(C, VT);
+
+ return 0;
+}
+
+unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
+ // Don't handle dynamic allocas.
+ if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
+
+ MVT VT;
+ if (!isLoadTypeLegal(AI->getType(), VT)) return false;
+
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+
+ // This will get lowered later into the correct offsets and registers
+ // via rewriteXFrameIndex.
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ TargetRegisterClass* RC = TLI.getRegClassFor(VT);
+ unsigned ResultReg = createResultReg(RC);
+ unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg)
+ .addFrameIndex(SI->second)
+ .addImm(0));
+ return ResultReg;
+ }
+
+ return 0;
+}
+
+bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) {
+ EVT evt = TLI.getValueType(Ty, true);
+
+ // Only handle simple types.
+ if (evt == MVT::Other || !evt.isSimple()) return false;
+ VT = evt.getSimpleVT();
+
// Handle all legal types, i.e. a register that will directly hold this
// value.
return TLI.isTypeLegal(VT);
}
-bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) {
+bool ARMFastISel::isLoadTypeLegal(const Type *Ty, MVT &VT) {
if (isTypeLegal(Ty, VT)) return true;
-
+
// If this is a type than can be sign or zero-extended to a basic operation
// go ahead and accept it now.
if (VT == MVT::i8 || VT == MVT::i16)
return true;
-
+
return false;
}
-// Computes the Reg+Offset to get to an object.
-bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
- int &Offset) {
+// Computes the address to get to an object.
+bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
// Some boilerplate from the X86 FastISel.
const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
- // Don't walk into other basic blocks; it's possible we haven't
- // visited them yet, so the instructions may not yet be assigned
- // virtual registers.
- if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
- return false;
-
- Opcode = I->getOpcode();
- U = I;
+ // Don't walk into other basic blocks unless the object is an alloca from
+ // another block, otherwise it may not have a virtual register assigned.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
Opcode = C->getOpcode();
U = C;
@@ -404,141 +630,282 @@ bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Reg,
// Fast instruction selection doesn't support the special
// address spaces.
return false;
-
+
switch (Opcode) {
- default:
- //errs() << "Failing Opcode is: " << *Op1 << "\n";
+ default:
break;
+ case Instruction::BitCast: {
+ // Look through bitcasts.
+ return ARMComputeAddress(U->getOperand(0), Addr);
+ }
+ case Instruction::IntToPtr: {
+ // Look past no-op inttoptrs.
+ if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
+ return ARMComputeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::PtrToInt: {
+ // Look past no-op ptrtoints.
+ if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
+ return ARMComputeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ Address SavedAddr = Addr;
+ int TmpOffset = Addr.Offset;
+
+ // Iterate through the GEP folding the constants into offsets where
+ // we can.
+ gep_type_iterator GTI = gep_type_begin(U);
+ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
+ i != e; ++i, ++GTI) {
+ const Value *Op = *i;
+ if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ const StructLayout *SL = TD.getStructLayout(STy);
+ unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
+ TmpOffset += SL->getElementOffset(Idx);
+ } else {
+ uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
+ SmallVector<const Value *, 4> Worklist;
+ Worklist.push_back(Op);
+ do {
+ Op = Worklist.pop_back_val();
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ TmpOffset += CI->getSExtValue() * S;
+ } else if (isa<AddOperator>(Op) &&
+ isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
+ // An add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ TmpOffset += CI->getSExtValue() * S;
+ // Add the other operand back to the work list.
+ Worklist.push_back(cast<AddOperator>(Op)->getOperand(0));
+ } else
+ goto unsupported_gep;
+ } while (!Worklist.empty());
+ }
+ }
+
+ // Try to grab the base operand now.
+ Addr.Offset = TmpOffset;
+ if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
+
+ // We failed, restore everything and try the other options.
+ Addr = SavedAddr;
+
+ unsupported_gep:
+ break;
+ }
case Instruction::Alloca: {
- assert(false && "Alloca should have been handled earlier!");
- return false;
+ const AllocaInst *AI = cast<AllocaInst>(Obj);
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ Addr.BaseType = Address::FrameIndexBase;
+ Addr.Base.FI = SI->second;
+ return true;
+ }
+ break;
}
}
-
+
+ // Materialize the global variable's address into a reg which can
+ // then be used later to load the variable.
if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
- //errs() << "Failing GV is: " << GV << "\n";
- (void)GV;
- return false;
+ unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType()));
+ if (Tmp == 0) return false;
+
+ Addr.Base.Reg = Tmp;
+ return true;
}
-
+
// Try to get this in a register if nothing else has worked.
- Reg = getRegForValue(Obj);
- if (Reg == 0) return false;
+ if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
+ return Addr.Base.Reg != 0;
+}
+
+void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
- // Since the offset may be too large for the load instruction
+ assert(VT.isSimple() && "Non-simple types are invalid here!");
+
+ bool needsLowering = false;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default:
+ assert(false && "Unhandled load/store type!");
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ // Integer loads/stores handle 12-bit offsets.
+ needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
+ break;
+ case MVT::f32:
+ case MVT::f64:
+ // Floating point operands handle 8-bit offsets.
+ needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
+ break;
+ }
+
+ // If this is a stack pointer and the offset needs to be simplified then
+ // put the alloca address into a register, set the base type back to
+ // register and continue. This should almost never happen.
+ if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
+ TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass :
+ ARM::GPRRegisterClass;
+ unsigned ResultReg = createResultReg(RC);
+ unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg)
+ .addFrameIndex(Addr.Base.FI)
+ .addImm(0));
+ Addr.Base.Reg = ResultReg;
+ Addr.BaseType = Address::RegBase;
+ }
+
+ // Since the offset is too large for the load/store instruction
// get the reg+offset into a register.
- // TODO: Verify the additions work, otherwise we'll need to add the
- // offset instead of 0 to the instructions and do all sorts of operand
- // munging.
- // TODO: Optimize this somewhat.
- if (Offset != 0) {
+ if (needsLowering) {
ARMCC::CondCodes Pred = ARMCC::AL;
unsigned PredReg = 0;
+ TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass :
+ ARM::GPRRegisterClass;
+ unsigned BaseReg = createResultReg(RC);
+
if (!isThumb)
emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- Reg, Reg, Offset, Pred, PredReg,
+ BaseReg, Addr.Base.Reg, Addr.Offset,
+ Pred, PredReg,
static_cast<const ARMBaseInstrInfo&>(TII));
else {
assert(AFI->isThumb2Function());
emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- Reg, Reg, Offset, Pred, PredReg,
+ BaseReg, Addr.Base.Reg, Addr.Offset, Pred, PredReg,
static_cast<const ARMBaseInstrInfo&>(TII));
}
+ Addr.Offset = 0;
+ Addr.Base.Reg = BaseReg;
}
-
- return true;
}
-bool ARMFastISel::ARMLoadAlloca(const Instruction *I) {
- Value *Op0 = I->getOperand(0);
+void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
+ const MachineInstrBuilder &MIB) {
+ // addrmode5 output depends on the selection dag addressing dividing the
+ // offset by 4 that it then later multiplies. Do this here as well.
+ if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
+ VT.getSimpleVT().SimpleTy == MVT::f64)
+ Addr.Offset /= 4;
+
+ // Frame base works a bit differently. Handle it separately.
+ if (Addr.BaseType == Address::FrameIndexBase) {
+ int FI = Addr.Base.FI;
+ int Offset = Addr.Offset;
+ MachineMemOperand *MMO =
+ FuncInfo.MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(FI, Offset),
+ MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FI),
+ MFI.getObjectAlignment(FI));
+ // Now add the rest of the operands.
+ MIB.addFrameIndex(FI);
- // Verify it's an alloca.
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op0)) {
- DenseMap<const AllocaInst*, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(AI);
-
- if (SI != FuncInfo.StaticAllocaMap.end()) {
- TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
- unsigned ResultReg = createResultReg(RC);
- TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
- ResultReg, SI->second, RC,
- TM.getRegisterInfo());
- UpdateValueMap(I, ResultReg);
- return true;
- }
+ // ARM halfword load/stores need an additional operand.
+ if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
+
+ MIB.addImm(Addr.Offset);
+ MIB.addMemOperand(MMO);
+ } else {
+ // Now add the rest of the operands.
+ MIB.addReg(Addr.Base.Reg);
+
+ // ARM halfword load/stores need an additional operand.
+ if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
+
+ MIB.addImm(Addr.Offset);
}
- return false;
+ AddOptionalDefs(MIB);
}
-bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg,
- unsigned Reg, int Offset) {
-
+bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) {
+
assert(VT.isSimple() && "Non-simple types are invalid here!");
unsigned Opc;
-
+ TargetRegisterClass *RC;
switch (VT.getSimpleVT().SimpleTy) {
- default:
- assert(false && "Trying to emit for an unhandled type!");
- return false;
+ // This is mostly going to be Neon/vector support.
+ default: return false;
case MVT::i16:
- Opc = isThumb ? ARM::tLDRH : ARM::LDRH;
- VT = MVT::i32;
+ Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH;
+ RC = ARM::GPRRegisterClass;
break;
case MVT::i8:
- Opc = isThumb ? ARM::tLDRB : ARM::LDRB;
- VT = MVT::i32;
+ Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12;
+ RC = ARM::GPRRegisterClass;
break;
case MVT::i32:
- Opc = isThumb ? ARM::tLDR : ARM::LDR;
+ Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12;
+ RC = ARM::GPRRegisterClass;
+ break;
+ case MVT::f32:
+ Opc = ARM::VLDRS;
+ RC = TLI.getRegClassFor(VT);
+ break;
+ case MVT::f64:
+ Opc = ARM::VLDRD;
+ RC = TLI.getRegClassFor(VT);
break;
}
-
- ResultReg = createResultReg(TLI.getRegClassFor(VT));
-
- // TODO: Fix the Addressing modes so that these can share some code.
- // Since this is a Thumb1 load this will work in Thumb1 or 2 mode.
- if (isThumb)
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc), ResultReg)
- .addReg(Reg).addImm(Offset).addReg(0));
- else
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc), ResultReg)
- .addReg(Reg).addReg(0).addImm(Offset));
-
+ // Simplify this down to something we can handle.
+ ARMSimplifyAddress(Addr, VT);
+
+ // Create the base instruction, then add the operands.
+ ResultReg = createResultReg(RC);
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg);
+ AddLoadStoreOperands(VT, Addr, MIB);
return true;
}
-bool ARMFastISel::ARMStoreAlloca(const Instruction *I, unsigned SrcReg) {
- Value *Op1 = I->getOperand(1);
+bool ARMFastISel::SelectLoad(const Instruction *I) {
+ // Verify we have a legal type before going any further.
+ MVT VT;
+ if (!isLoadTypeLegal(I->getType(), VT))
+ return false;
- // Verify it's an alloca.
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
- DenseMap<const AllocaInst*, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(AI);
+ // See if we can handle this address.
+ Address Addr;
+ if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
- if (SI != FuncInfo.StaticAllocaMap.end()) {
- TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
- assert(SrcReg != 0 && "Nothing to store!");
- TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt,
- SrcReg, true /*isKill*/, SI->second, RC,
- TM.getRegisterInfo());
- return true;
- }
- }
- return false;
+ unsigned ResultReg;
+ if (!ARMEmitLoad(VT, ResultReg, Addr)) return false;
+ UpdateValueMap(I, ResultReg);
+ return true;
}
-bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
- unsigned DstReg, int Offset) {
+bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
unsigned StrOpc;
switch (VT.getSimpleVT().SimpleTy) {
+ // This is mostly going to be Neon/vector support.
default: return false;
- case MVT::i1:
- case MVT::i8: StrOpc = isThumb ? ARM::tSTRB : ARM::STRB; break;
- case MVT::i16: StrOpc = isThumb ? ARM::tSTRH : ARM::STRH; break;
- case MVT::i32: StrOpc = isThumb ? ARM::tSTR : ARM::STR; break;
+ case MVT::i1: {
+ unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass :
+ ARM::GPRRegisterClass);
+ unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), Res)
+ .addReg(SrcReg).addImm(1));
+ SrcReg = Res;
+ } // Fallthrough here.
+ case MVT::i8:
+ StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12;
+ break;
+ case MVT::i16:
+ StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH;
+ break;
+ case MVT::i32:
+ StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12;
+ break;
case MVT::f32:
if (!Subtarget->hasVFP2()) return false;
StrOpc = ARM::VSTRS;
@@ -548,91 +915,162 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg,
StrOpc = ARM::VSTRD;
break;
}
-
- if (isThumb)
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(StrOpc), SrcReg)
- .addReg(DstReg).addImm(Offset).addReg(0));
- else
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(StrOpc), SrcReg)
- .addReg(DstReg).addReg(0).addImm(Offset));
-
+ // Simplify this down to something we can handle.
+ ARMSimplifyAddress(Addr, VT);
+
+ // Create the base instruction, then add the operands.
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(StrOpc))
+ .addReg(SrcReg, getKillRegState(true));
+ AddLoadStoreOperands(VT, Addr, MIB);
return true;
}
-bool ARMFastISel::ARMSelectStore(const Instruction *I) {
+bool ARMFastISel::SelectStore(const Instruction *I) {
Value *Op0 = I->getOperand(0);
unsigned SrcReg = 0;
- // Yay type legalization
- EVT VT;
+ // Verify we have a legal type before going any further.
+ MVT VT;
if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
return false;
// Get the value to be stored into a register.
SrcReg = getRegForValue(Op0);
- if (SrcReg == 0)
- return false;
-
- // If we're an alloca we know we have a frame index and can emit the store
- // quickly.
- if (ARMStoreAlloca(I, SrcReg))
- return true;
-
- // Our register and offset with innocuous defaults.
- unsigned Reg = 0;
- int Offset = 0;
-
- // See if we can handle this as Reg + Offset
- if (!ARMComputeRegOffset(I->getOperand(1), Reg, Offset))
- return false;
-
- if (!ARMEmitStore(VT, SrcReg, Reg, Offset /* 0 */)) return false;
-
- return false;
-
-}
+ if (SrcReg == 0) return false;
-bool ARMFastISel::ARMSelectLoad(const Instruction *I) {
- // If we're an alloca we know we have a frame index and can emit the load
- // directly in short order.
- if (ARMLoadAlloca(I))
- return true;
-
- // Verify we have a legal type before going any further.
- EVT VT;
- if (!isLoadTypeLegal(I->getType(), VT))
- return false;
-
- // Our register and offset with innocuous defaults.
- unsigned Reg = 0;
- int Offset = 0;
-
- // See if we can handle this as Reg + Offset
- if (!ARMComputeRegOffset(I->getOperand(0), Reg, Offset))
+ // See if we can handle this address.
+ Address Addr;
+ if (!ARMComputeAddress(I->getOperand(1), Addr))
return false;
-
- unsigned ResultReg;
- if (!ARMEmitLoad(VT, ResultReg, Reg, Offset /* 0 */)) return false;
-
- UpdateValueMap(I, ResultReg);
+
+ if (!ARMEmitStore(VT, SrcReg, Addr)) return false;
return true;
}
-bool ARMFastISel::ARMSelectBranch(const Instruction *I) {
+static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
+ switch (Pred) {
+ // Needs two compares...
+ case CmpInst::FCMP_ONE:
+ case CmpInst::FCMP_UEQ:
+ default:
+ // AL is our "false" for now. The other two need more compares.
+ return ARMCC::AL;
+ case CmpInst::ICMP_EQ:
+ case CmpInst::FCMP_OEQ:
+ return ARMCC::EQ;
+ case CmpInst::ICMP_SGT:
+ case CmpInst::FCMP_OGT:
+ return ARMCC::GT;
+ case CmpInst::ICMP_SGE:
+ case CmpInst::FCMP_OGE:
+ return ARMCC::GE;
+ case CmpInst::ICMP_UGT:
+ case CmpInst::FCMP_UGT:
+ return ARMCC::HI;
+ case CmpInst::FCMP_OLT:
+ return ARMCC::MI;
+ case CmpInst::ICMP_ULE:
+ case CmpInst::FCMP_OLE:
+ return ARMCC::LS;
+ case CmpInst::FCMP_ORD:
+ return ARMCC::VC;
+ case CmpInst::FCMP_UNO:
+ return ARMCC::VS;
+ case CmpInst::FCMP_UGE:
+ return ARMCC::PL;
+ case CmpInst::ICMP_SLT:
+ case CmpInst::FCMP_ULT:
+ return ARMCC::LT;
+ case CmpInst::ICMP_SLE:
+ case CmpInst::FCMP_ULE:
+ return ARMCC::LE;
+ case CmpInst::FCMP_UNE:
+ case CmpInst::ICMP_NE:
+ return ARMCC::NE;
+ case CmpInst::ICMP_UGE:
+ return ARMCC::HS;
+ case CmpInst::ICMP_ULT:
+ return ARMCC::LO;
+ }
+}
+
+bool ARMFastISel::SelectBranch(const Instruction *I) {
const BranchInst *BI = cast<BranchInst>(I);
MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
-
+
// Simple branch support.
- unsigned CondReg = getRegForValue(BI->getCondition());
- if (CondReg == 0) return false;
-
- unsigned CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
- unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
+
+ // If we can, avoid recomputing the compare - redoing it could lead to wonky
+ // behavior.
+ // TODO: Factor this out.
+ if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
+ if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
+ MVT VT;
+ const Type *Ty = CI->getOperand(0)->getType();
+ if (!isTypeLegal(Ty, VT))
+ return false;
+
+ bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
+ if (isFloat && !Subtarget->hasVFP2())
+ return false;
+
+ unsigned CmpOpc;
+ switch (VT.SimpleTy) {
+ default: return false;
+ // TODO: Verify compares.
+ case MVT::f32:
+ CmpOpc = ARM::VCMPES;
+ break;
+ case MVT::f64:
+ CmpOpc = ARM::VCMPED;
+ break;
+ case MVT::i32:
+ CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
+ break;
+ }
+
+ // Get the compare predicate.
+ ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
+
+ // We may not handle every CC for now.
+ if (ARMPred == ARMCC::AL) return false;
+
+ unsigned Arg1 = getRegForValue(CI->getOperand(0));
+ if (Arg1 == 0) return false;
+
+ unsigned Arg2 = getRegForValue(CI->getOperand(1));
+ if (Arg2 == 0) return false;
+
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CmpOpc))
+ .addReg(Arg1).addReg(Arg2));
+
+ // For floating point we need to move the result to a comparison register
+ // that we can then use for branches.
+ if (isFloat)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::FMSTAT)));
+
+ unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
+ .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
+ FastEmitBranch(FBB, DL);
+ FuncInfo.MBB->addSuccessor(TBB);
+ return true;
+ }
+ }
+
+ unsigned CmpReg = getRegForValue(BI->getCondition());
+ if (CmpReg == 0) return false;
+
+ // Re-set the flags just in case.
+ unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
- .addReg(CondReg).addReg(CondReg));
+ .addReg(CmpReg).addImm(0));
+
+ unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
.addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
FastEmitBranch(FBB, DL);
@@ -640,18 +1078,809 @@ bool ARMFastISel::ARMSelectBranch(const Instruction *I) {
return true;
}
+bool ARMFastISel::SelectCmp(const Instruction *I) {
+ const CmpInst *CI = cast<CmpInst>(I);
+
+ MVT VT;
+ const Type *Ty = CI->getOperand(0)->getType();
+ if (!isTypeLegal(Ty, VT))
+ return false;
+
+ bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
+ if (isFloat && !Subtarget->hasVFP2())
+ return false;
+
+ unsigned CmpOpc;
+ unsigned CondReg;
+ switch (VT.SimpleTy) {
+ default: return false;
+ // TODO: Verify compares.
+ case MVT::f32:
+ CmpOpc = ARM::VCMPES;
+ CondReg = ARM::FPSCR;
+ break;
+ case MVT::f64:
+ CmpOpc = ARM::VCMPED;
+ CondReg = ARM::FPSCR;
+ break;
+ case MVT::i32:
+ CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
+ CondReg = ARM::CPSR;
+ break;
+ }
+
+ // Get the compare predicate.
+ ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
+
+ // We may not handle every CC for now.
+ if (ARMPred == ARMCC::AL) return false;
+
+ unsigned Arg1 = getRegForValue(CI->getOperand(0));
+ if (Arg1 == 0) return false;
+
+ unsigned Arg2 = getRegForValue(CI->getOperand(1));
+ if (Arg2 == 0) return false;
+
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
+ .addReg(Arg1).addReg(Arg2));
+
+ // For floating point we need to move the result to a comparison register
+ // that we can then use for branches.
+ if (isFloat)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::FMSTAT)));
+
+ // Now set a register based on the comparison. Explicitly set the predicates
+ // here.
+ unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi;
+ TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass
+ : ARM::GPRRegisterClass;
+ unsigned DestReg = createResultReg(RC);
+ Constant *Zero
+ = ConstantInt::get(Type::getInt32Ty(*Context), 0);
+ unsigned ZeroReg = TargetMaterializeConstant(Zero);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
+ .addReg(ZeroReg).addImm(1)
+ .addImm(ARMPred).addReg(CondReg);
+
+ UpdateValueMap(I, DestReg);
+ return true;
+}
+
+bool ARMFastISel::SelectFPExt(const Instruction *I) {
+ // Make sure we have VFP and that we're extending float to double.
+ if (!Subtarget->hasVFP2()) return false;
+
+ Value *V = I->getOperand(0);
+ if (!I->getType()->isDoubleTy() ||
+ !V->getType()->isFloatTy()) return false;
+
+ unsigned Op = getRegForValue(V);
+ if (Op == 0) return false;
+
+ unsigned Result = createResultReg(ARM::DPRRegisterClass);
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::VCVTDS), Result)
+ .addReg(Op));
+ UpdateValueMap(I, Result);
+ return true;
+}
+
+bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
+ // Make sure we have VFP and that we're truncating double to float.
+ if (!Subtarget->hasVFP2()) return false;
+
+ Value *V = I->getOperand(0);
+ if (!(I->getType()->isFloatTy() &&
+ V->getType()->isDoubleTy())) return false;
+
+ unsigned Op = getRegForValue(V);
+ if (Op == 0) return false;
+
+ unsigned Result = createResultReg(ARM::SPRRegisterClass);
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::VCVTSD), Result)
+ .addReg(Op));
+ UpdateValueMap(I, Result);
+ return true;
+}
+
+bool ARMFastISel::SelectSIToFP(const Instruction *I) {
+ // Make sure we have VFP.
+ if (!Subtarget->hasVFP2()) return false;
+
+ MVT DstVT;
+ const Type *Ty = I->getType();
+ if (!isTypeLegal(Ty, DstVT))
+ return false;
+
+ unsigned Op = getRegForValue(I->getOperand(0));
+ if (Op == 0) return false;
+
+ // The conversion routine works on fp-reg to fp-reg and the operand above
+ // was an integer, move it to the fp registers if possible.
+ unsigned FP = ARMMoveToFPReg(MVT::f32, Op);
+ if (FP == 0) return false;
+
+ unsigned Opc;
+ if (Ty->isFloatTy()) Opc = ARM::VSITOS;
+ else if (Ty->isDoubleTy()) Opc = ARM::VSITOD;
+ else return 0;
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
+ ResultReg)
+ .addReg(FP));
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool ARMFastISel::SelectFPToSI(const Instruction *I) {
+ // Make sure we have VFP.
+ if (!Subtarget->hasVFP2()) return false;
+
+ MVT DstVT;
+ const Type *RetTy = I->getType();
+ if (!isTypeLegal(RetTy, DstVT))
+ return false;
+
+ unsigned Op = getRegForValue(I->getOperand(0));
+ if (Op == 0) return false;
+
+ unsigned Opc;
+ const Type *OpTy = I->getOperand(0)->getType();
+ if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS;
+ else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD;
+ else return 0;
+
+ // f64->s32 or f32->s32 both need an intermediate f32 reg.
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
+ ResultReg)
+ .addReg(Op));
+
+ // This result needs to be in an integer register, but the conversion only
+ // takes place in fp-regs.
+ unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
+ if (IntReg == 0) return false;
+
+ UpdateValueMap(I, IntReg);
+ return true;
+}
+
+bool ARMFastISel::SelectSelect(const Instruction *I) {
+ MVT VT;
+ if (!isTypeLegal(I->getType(), VT))
+ return false;
+
+ // Things need to be register sized for register moves.
+ if (VT != MVT::i32) return false;
+ const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
+
+ unsigned CondReg = getRegForValue(I->getOperand(0));
+ if (CondReg == 0) return false;
+ unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ if (Op1Reg == 0) return false;
+ unsigned Op2Reg = getRegForValue(I->getOperand(2));
+ if (Op2Reg == 0) return false;
+
+ unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
+ .addReg(CondReg).addImm(1));
+ unsigned ResultReg = createResultReg(RC);
+ unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
+ .addReg(Op1Reg).addReg(Op2Reg)
+ .addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool ARMFastISel::SelectSDiv(const Instruction *I) {
+ MVT VT;
+ const Type *Ty = I->getType();
+ if (!isTypeLegal(Ty, VT))
+ return false;
+
+ // If we have integer div support we should have selected this automagically.
+ // In case we have a real miss go ahead and return false and we'll pick
+ // it up later.
+ if (Subtarget->hasDivide()) return false;
+
+ // Otherwise emit a libcall.
+ RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+ if (VT == MVT::i8)
+ LC = RTLIB::SDIV_I8;
+ else if (VT == MVT::i16)
+ LC = RTLIB::SDIV_I16;
+ else if (VT == MVT::i32)
+ LC = RTLIB::SDIV_I32;
+ else if (VT == MVT::i64)
+ LC = RTLIB::SDIV_I64;
+ else if (VT == MVT::i128)
+ LC = RTLIB::SDIV_I128;
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
+
+ return ARMEmitLibcall(I, LC);
+}
+
+bool ARMFastISel::SelectSRem(const Instruction *I) {
+ MVT VT;
+ const Type *Ty = I->getType();
+ if (!isTypeLegal(Ty, VT))
+ return false;
+
+ RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+ if (VT == MVT::i8)
+ LC = RTLIB::SREM_I8;
+ else if (VT == MVT::i16)
+ LC = RTLIB::SREM_I16;
+ else if (VT == MVT::i32)
+ LC = RTLIB::SREM_I32;
+ else if (VT == MVT::i64)
+ LC = RTLIB::SREM_I64;
+ else if (VT == MVT::i128)
+ LC = RTLIB::SREM_I128;
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
+
+ return ARMEmitLibcall(I, LC);
+}
+
+bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
+ EVT VT = TLI.getValueType(I->getType(), true);
+
+ // We can get here in the case when we want to use NEON for our fp
+ // operations, but can't figure out how to. Just use the vfp instructions
+ // if we have them.
+ // FIXME: It'd be nice to use NEON instructions.
+ const Type *Ty = I->getType();
+ bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
+ if (isFloat && !Subtarget->hasVFP2())
+ return false;
+
+ unsigned Op1 = getRegForValue(I->getOperand(0));
+ if (Op1 == 0) return false;
+
+ unsigned Op2 = getRegForValue(I->getOperand(1));
+ if (Op2 == 0) return false;
+
+ unsigned Opc;
+ bool is64bit = VT == MVT::f64 || VT == MVT::i64;
+ switch (ISDOpcode) {
+ default: return false;
+ case ISD::FADD:
+ Opc = is64bit ? ARM::VADDD : ARM::VADDS;
+ break;
+ case ISD::FSUB:
+ Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
+ break;
+ case ISD::FMUL:
+ Opc = is64bit ? ARM::VMULD : ARM::VMULS;
+ break;
+ }
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg)
+ .addReg(Op1).addReg(Op2));
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+// Call Handling Code
+
+bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src,
+ EVT SrcVT, unsigned &ResultReg) {
+ unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
+ Src, /*TODO: Kill=*/false);
+
+ if (RR != 0) {
+ ResultReg = RR;
+ return true;
+ } else
+ return false;
+}
+
+// This is largely taken directly from CCAssignFnForNode - we don't support
+// varargs in FastISel so that part has been removed.
+// TODO: We may not support all of this.
+CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) {
+ switch (CC) {
+ default:
+ llvm_unreachable("Unsupported calling convention");
+ case CallingConv::Fast:
+ // Ignore fastcc. Silence compiler warnings.
+ (void)RetFastCC_ARM_APCS;
+ (void)FastCC_ARM_APCS;
+ // Fallthrough
+ case CallingConv::C:
+ // Use target triple & subtarget features to do actual dispatch.
+ if (Subtarget->isAAPCS_ABI()) {
+ if (Subtarget->hasVFP2() &&
+ FloatABIType == FloatABI::Hard)
+ return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
+ else
+ return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
+ } else
+ return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
+ case CallingConv::ARM_AAPCS_VFP:
+ return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
+ case CallingConv::ARM_AAPCS:
+ return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
+ case CallingConv::ARM_APCS:
+ return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
+ }
+}
+
+bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
+ SmallVectorImpl<unsigned> &ArgRegs,
+ SmallVectorImpl<MVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
+ SmallVectorImpl<unsigned> &RegArgs,
+ CallingConv::ID CC,
+ unsigned &NumBytes) {
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, false, TM, ArgLocs, *Context);
+ CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ NumBytes = CCInfo.getNextStackOffset();
+
+ // Issue CALLSEQ_START
+ unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(AdjStackDown))
+ .addImm(NumBytes));
+
+ // Process the args.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ unsigned Arg = ArgRegs[VA.getValNo()];
+ MVT ArgVT = ArgVTs[VA.getValNo()];
+
+ // We don't handle NEON/vector parameters yet.
+ if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
+ return false;
+
+ // Handle arg promotion, etc.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::Full: break;
+ case CCValAssign::SExt: {
+ bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
+ Arg, ArgVT, Arg);
+ assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
+ Emitted = true;
+ ArgVT = VA.getLocVT();
+ break;
+ }
+ case CCValAssign::ZExt: {
+ bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
+ Arg, ArgVT, Arg);
+ assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
+ Emitted = true;
+ ArgVT = VA.getLocVT();
+ break;
+ }
+ case CCValAssign::AExt: {
+ bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
+ Arg, ArgVT, Arg);
+ if (!Emitted)
+ Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
+ Arg, ArgVT, Arg);
+ if (!Emitted)
+ Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
+ Arg, ArgVT, Arg);
+
+ assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
+ ArgVT = VA.getLocVT();
+ break;
+ }
+ case CCValAssign::BCvt: {
+ unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
+ /*TODO: Kill=*/false);
+ assert(BC != 0 && "Failed to emit a bitcast!");
+ Arg = BC;
+ ArgVT = VA.getLocVT();
+ break;
+ }
+ default: llvm_unreachable("Unknown arg promotion!");
+ }
+
+ // Now copy/store arg to correct locations.
+ if (VA.isRegLoc() && !VA.needsCustom()) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ VA.getLocReg())
+ .addReg(Arg);
+ RegArgs.push_back(VA.getLocReg());
+ } else if (VA.needsCustom()) {
+ // TODO: We need custom lowering for vector (v2f64) args.
+ if (VA.getLocVT() != MVT::f64) return false;
+
+ CCValAssign &NextVA = ArgLocs[++i];
+
+ // TODO: Only handle register args for now.
+ if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false;
+
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::VMOVRRD), VA.getLocReg())
+ .addReg(NextVA.getLocReg(), RegState::Define)
+ .addReg(Arg));
+ RegArgs.push_back(VA.getLocReg());
+ RegArgs.push_back(NextVA.getLocReg());
+ } else {
+ assert(VA.isMemLoc());
+ // Need to store on the stack.
+ Address Addr;
+ Addr.BaseType = Address::RegBase;
+ Addr.Base.Reg = ARM::SP;
+ Addr.Offset = VA.getLocMemOffset();
+
+ if (!ARMEmitStore(ArgVT, Arg, Addr)) return false;
+ }
+ }
+ return true;
+}
+
+bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
+ const Instruction *I, CallingConv::ID CC,
+ unsigned &NumBytes) {
+ // Issue CALLSEQ_END
+ unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(AdjStackUp))
+ .addImm(NumBytes).addImm(0));
+
+ // Now the return value.
+ if (RetVT != MVT::isVoid) {
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CC, false, TM, RVLocs, *Context);
+ CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true));
+
+ // Copy all of the result registers out of their specified physreg.
+ if (RVLocs.size() == 2 && RetVT == MVT::f64) {
+ // For this move we copy into two registers and then move into the
+ // double fp reg we want.
+ EVT DestVT = RVLocs[0].getValVT();
+ TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
+ unsigned ResultReg = createResultReg(DstRC);
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::VMOVDRR), ResultReg)
+ .addReg(RVLocs[0].getLocReg())
+ .addReg(RVLocs[1].getLocReg()));
+
+ UsedRegs.push_back(RVLocs[0].getLocReg());
+ UsedRegs.push_back(RVLocs[1].getLocReg());
+
+ // Finally update the result.
+ UpdateValueMap(I, ResultReg);
+ } else {
+ assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
+ EVT CopyVT = RVLocs[0].getValVT();
+ TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
+
+ unsigned ResultReg = createResultReg(DstRC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(RVLocs[0].getLocReg());
+ UsedRegs.push_back(RVLocs[0].getLocReg());
+
+ // Finally update the result.
+ UpdateValueMap(I, ResultReg);
+ }
+ }
+
+ return true;
+}
+
+bool ARMFastISel::SelectRet(const Instruction *I) {
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+ const Function &F = *I->getParent()->getParent();
+
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ if (F.isVarArg())
+ return false;
+
+ CallingConv::ID CC = F.getCallingConv();
+ if (Ret->getNumOperands() > 0) {
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
+ Outs, TLI);
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ValLocs;
+ CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
+ CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */));
+
+ const Value *RV = Ret->getOperand(0);
+ unsigned Reg = getRegForValue(RV);
+ if (Reg == 0)
+ return false;
+
+ // Only handle a single return value for now.
+ if (ValLocs.size() != 1)
+ return false;
+
+ CCValAssign &VA = ValLocs[0];
+
+ // Don't bother handling odd stuff for now.
+ if (VA.getLocInfo() != CCValAssign::Full)
+ return false;
+ // Only handle register returns for now.
+ if (!VA.isRegLoc())
+ return false;
+ // TODO: For now, don't try to handle cases where getLocInfo()
+ // says Full but the types don't match.
+ if (TLI.getValueType(RV->getType()) != VA.getValVT())
+ return false;
+
+ // Make the copy.
+ unsigned SrcReg = Reg + VA.getValNo();
+ unsigned DstReg = VA.getLocReg();
+ const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
+ // Avoid a cross-class copy. This is very unlikely.
+ if (!SrcRC->contains(DstReg))
+ return false;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ DstReg).addReg(SrcReg);
+
+ // Mark the register as live out of the function.
+ MRI.addLiveOut(VA.getLocReg());
+ }
+
+ unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(RetOpc)));
+ return true;
+}
+
+// A quick function that will emit a call for a named libcall in F with the
+// vector of passed arguments for the Instruction in I. We can assume that we
+// can emit a call for any libcall we can produce. This is an abridged version
+// of the full call infrastructure since we won't need to worry about things
+// like computed function pointers or strange arguments at call sites.
+// TODO: Try to unify this and the normal call bits for ARM, then try to unify
+// with X86.
+bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
+ CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
+
+ // Handle *simple* calls for now.
+ const Type *RetTy = I->getType();
+ MVT RetVT;
+ if (RetTy->isVoidTy())
+ RetVT = MVT::isVoid;
+ else if (!isTypeLegal(RetTy, RetVT))
+ return false;
+
+ // For now we're using BLX etc on the assumption that we have v5t ops.
+ if (!Subtarget->hasV5TOps()) return false;
+
+ // TODO: For now if we have long calls specified we don't handle the call.
+ if (EnableARMLongCalls) return false;
+
+ // Set up the argument vectors.
+ SmallVector<Value*, 8> Args;
+ SmallVector<unsigned, 8> ArgRegs;
+ SmallVector<MVT, 8> ArgVTs;
+ SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
+ Args.reserve(I->getNumOperands());
+ ArgRegs.reserve(I->getNumOperands());
+ ArgVTs.reserve(I->getNumOperands());
+ ArgFlags.reserve(I->getNumOperands());
+ for (unsigned i = 0; i < I->getNumOperands(); ++i) {
+ Value *Op = I->getOperand(i);
+ unsigned Arg = getRegForValue(Op);
+ if (Arg == 0) return false;
+
+ const Type *ArgTy = Op->getType();
+ MVT ArgVT;
+ if (!isTypeLegal(ArgTy, ArgVT)) return false;
+
+ ISD::ArgFlagsTy Flags;
+ unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ Flags.setOrigAlign(OriginalAlignment);
+
+ Args.push_back(Op);
+ ArgRegs.push_back(Arg);
+ ArgVTs.push_back(ArgVT);
+ ArgFlags.push_back(Flags);
+ }
+
+ // Handle the arguments now that we've gotten them.
+ SmallVector<unsigned, 4> RegArgs;
+ unsigned NumBytes;
+ if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
+ return false;
+
+ // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops.
+ // TODO: Turn this into the table of arm call ops.
+ MachineInstrBuilder MIB;
+ unsigned CallOpc;
+ if(isThumb) {
+ CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi;
+ // Explicitly adding the predicate here.
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc)))
+ .addExternalSymbol(TLI.getLibcallName(Call));
+ } else {
+ CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL;
+ // Explicitly adding the predicate here.
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc))
+ .addExternalSymbol(TLI.getLibcallName(Call)));
+ }
+
+ // Add implicit physical register uses to the call.
+ for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
+ MIB.addReg(RegArgs[i]);
+
+ // Finish off the call including any return values.
+ SmallVector<unsigned, 4> UsedRegs;
+ if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
+
+ // Set all unused physreg defs as dead.
+ static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+
+ return true;
+}
+
+bool ARMFastISel::SelectCall(const Instruction *I) {
+ const CallInst *CI = cast<CallInst>(I);
+ const Value *Callee = CI->getCalledValue();
+
+ // Can't handle inline asm or worry about intrinsics yet.
+ if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false;
+
+ // Only handle global variable Callees that are direct calls.
+ const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
+ if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel()))
+ return false;
+
+ // Check the calling convention.
+ ImmutableCallSite CS(CI);
+ CallingConv::ID CC = CS.getCallingConv();
+
+ // TODO: Avoid some calling conventions?
+
+ // Let SDISel handle vararg functions.
+ const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ if (FTy->isVarArg())
+ return false;
+
+ // Handle *simple* calls for now.
+ const Type *RetTy = I->getType();
+ MVT RetVT;
+ if (RetTy->isVoidTy())
+ RetVT = MVT::isVoid;
+ else if (!isTypeLegal(RetTy, RetVT))
+ return false;
+
+ // For now we're using BLX etc on the assumption that we have v5t ops.
+ // TODO: Maybe?
+ if (!Subtarget->hasV5TOps()) return false;
+
+ // TODO: For now if we have long calls specified we don't handle the call.
+ if (EnableARMLongCalls) return false;
+
+ // Set up the argument vectors.
+ SmallVector<Value*, 8> Args;
+ SmallVector<unsigned, 8> ArgRegs;
+ SmallVector<MVT, 8> ArgVTs;
+ SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
+ Args.reserve(CS.arg_size());
+ ArgRegs.reserve(CS.arg_size());
+ ArgVTs.reserve(CS.arg_size());
+ ArgFlags.reserve(CS.arg_size());
+ for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
+ i != e; ++i) {
+ unsigned Arg = getRegForValue(*i);
+
+ if (Arg == 0)
+ return false;
+ ISD::ArgFlagsTy Flags;
+ unsigned AttrInd = i - CS.arg_begin() + 1;
+ if (CS.paramHasAttr(AttrInd, Attribute::SExt))
+ Flags.setSExt();
+ if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
+ Flags.setZExt();
+
+ // FIXME: Only handle *easy* calls for now.
+ if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
+ CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
+ CS.paramHasAttr(AttrInd, Attribute::Nest) ||
+ CS.paramHasAttr(AttrInd, Attribute::ByVal))
+ return false;
+
+ const Type *ArgTy = (*i)->getType();
+ MVT ArgVT;
+ if (!isTypeLegal(ArgTy, ArgVT))
+ return false;
+ unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ Flags.setOrigAlign(OriginalAlignment);
+
+ Args.push_back(*i);
+ ArgRegs.push_back(Arg);
+ ArgVTs.push_back(ArgVT);
+ ArgFlags.push_back(Flags);
+ }
+
+ // Handle the arguments now that we've gotten them.
+ SmallVector<unsigned, 4> RegArgs;
+ unsigned NumBytes;
+ if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
+ return false;
+
+ // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops.
+ // TODO: Turn this into the table of arm call ops.
+ MachineInstrBuilder MIB;
+ unsigned CallOpc;
+ // Explicitly adding the predicate here.
+ if(isThumb) {
+ CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi;
+ // Explicitly adding the predicate here.
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc)))
+ .addGlobalAddress(GV, 0, 0);
+ } else {
+ CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL;
+ // Explicitly adding the predicate here.
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc))
+ .addGlobalAddress(GV, 0, 0));
+ }
+
+ // Add implicit physical register uses to the call.
+ for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
+ MIB.addReg(RegArgs[i]);
+
+ // Finish off the call including any return values.
+ SmallVector<unsigned, 4> UsedRegs;
+ if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
+
+ // Set all unused physreg defs as dead.
+ static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+
+ return true;
+
+}
+
// TODO: SoftFP support.
bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
- // No Thumb-1 for now.
- if (isThumb && !AFI->isThumb2Function()) return false;
-
+
switch (I->getOpcode()) {
case Instruction::Load:
- return ARMSelectLoad(I);
+ return SelectLoad(I);
case Instruction::Store:
- return ARMSelectStore(I);
+ return SelectStore(I);
case Instruction::Br:
- return ARMSelectBranch(I);
+ return SelectBranch(I);
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ return SelectCmp(I);
+ case Instruction::FPExt:
+ return SelectFPExt(I);
+ case Instruction::FPTrunc:
+ return SelectFPTrunc(I);
+ case Instruction::SIToFP:
+ return SelectSIToFP(I);
+ case Instruction::FPToSI:
+ return SelectFPToSI(I);
+ case Instruction::FAdd:
+ return SelectBinaryOp(I, ISD::FADD);
+ case Instruction::FSub:
+ return SelectBinaryOp(I, ISD::FSUB);
+ case Instruction::FMul:
+ return SelectBinaryOp(I, ISD::FMUL);
+ case Instruction::SDiv:
+ return SelectSDiv(I);
+ case Instruction::SRem:
+ return SelectSRem(I);
+ case Instruction::Call:
+ return SelectCall(I);
+ case Instruction::Select:
+ return SelectSelect(I);
+ case Instruction::Ret:
+ return SelectRet(I);
default: break;
}
return false;
@@ -659,7 +1888,14 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
namespace llvm {
llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
- if (EnableARMFastISel) return new ARMFastISel(funcInfo);
+ // Completely untested on non-darwin.
+ const TargetMachine &TM = funcInfo.MF->getTarget();
+
+ // Darwin and thumb1 only for now.
+ const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
+ if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() &&
+ !DisableARMFastISel)
+ return new ARMFastISel(funcInfo);
return 0;
}
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMFixupKinds.h b/contrib/llvm/lib/Target/ARM/ARMFixupKinds.h
new file mode 100644
index 0000000..3d175e3
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMFixupKinds.h
@@ -0,0 +1,97 @@
+//===-- ARM/ARMFixupKinds.h - ARM Specific Fixup Entries --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ARM_ARMFIXUPKINDS_H
+#define LLVM_ARM_ARMFIXUPKINDS_H
+
+#include "llvm/MC/MCFixup.h"
+
+namespace llvm {
+namespace ARM {
+enum Fixups {
+ // fixup_arm_ldst_pcrel_12 - 12-bit PC relative relocation for symbol
+ // addresses
+ fixup_arm_ldst_pcrel_12 = FirstTargetFixupKind,
+
+ // fixup_t2_ldst_pcrel_12 - Equivalent to fixup_arm_ldst_pcrel_12, with
+ // the 16-bit halfwords reordered.
+ fixup_t2_ldst_pcrel_12,
+
+ // fixup_arm_pcrel_10 - 10-bit PC relative relocation for symbol addresses
+ // used in VFP instructions where the lower 2 bits are not encoded
+ // (so it's encoded as an 8-bit immediate).
+ fixup_arm_pcrel_10,
+ // fixup_t2_pcrel_10 - Equivalent to fixup_arm_pcrel_10, accounting for
+ // the short-swapped encoding of Thumb2 instructions.
+ fixup_t2_pcrel_10,
+ // fixup_thumb_adr_pcrel_10 - 10-bit PC relative relocation for symbol
+ // addresses where the lower 2 bits are not encoded (so it's encoded as an
+ // 8-bit immediate).
+ fixup_thumb_adr_pcrel_10,
+ // fixup_arm_adr_pcrel_12 - 12-bit PC relative relocation for the ADR
+ // instruction.
+ fixup_arm_adr_pcrel_12,
+ // fixup_t2_adr_pcrel_12 - 12-bit PC relative relocation for the ADR
+ // instruction.
+ fixup_t2_adr_pcrel_12,
+ // fixup_arm_condbranch - 24-bit PC relative relocation for conditional branch
+ // instructions.
+ fixup_arm_condbranch,
+ // fixup_arm_uncondbranch - 24-bit PC relative relocation for
+ // branch instructions. (unconditional)
+ fixup_arm_uncondbranch,
+ // fixup_t2_condbranch - 20-bit PC relative relocation for Thumb2 direct
+ // uconditional branch instructions.
+ fixup_t2_condbranch,
+ // fixup_t2_uncondbranch - 20-bit PC relative relocation for Thumb2 direct
+ // branch unconditional branch instructions.
+ fixup_t2_uncondbranch,
+
+ // fixup_arm_thumb_br - 12-bit fixup for Thumb B instructions.
+ fixup_arm_thumb_br,
+
+ // fixup_arm_thumb_blx - Fixup for Thumb BL instructions.
+ fixup_arm_thumb_bl,
+
+ // fixup_arm_thumb_blx - Fixup for Thumb BLX instructions.
+ fixup_arm_thumb_blx,
+
+ // fixup_arm_thumb_cb - Fixup for Thumb branch instructions.
+ fixup_arm_thumb_cb,
+
+ // fixup_arm_thumb_cp - Fixup for Thumb load/store from constant pool instrs.
+ fixup_arm_thumb_cp,
+
+ // fixup_arm_thumb_bcc - Fixup for Thumb conditional branching instructions.
+ fixup_arm_thumb_bcc,
+
+ // The next two are for the movt/movw pair
+ // the 16bit imm field are split into imm{15-12} and imm{11-0}
+ fixup_arm_movt_hi16, // :upper16:
+ fixup_arm_movw_lo16, // :lower16:
+ fixup_t2_movt_hi16, // :upper16:
+ fixup_t2_movw_lo16, // :lower16:
+
+ // It is possible to create an "immediate" that happens to be pcrel.
+ // movw r0, :lower16:Foo-(Bar+8) and movt r0, :upper16:Foo-(Bar+8)
+ // result in different reloc tags than the above two.
+ // Needed to support ELF::R_ARM_MOVT_PREL and ELF::R_ARM_MOVW_PREL_NC
+ fixup_arm_movt_hi16_pcrel, // :upper16:
+ fixup_arm_movw_lo16_pcrel, // :lower16:
+ fixup_t2_movt_hi16_pcrel, // :upper16:
+ fixup_t2_movw_lo16_pcrel, // :lower16:
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameInfo.h b/contrib/llvm/lib/Target/ARM/ARMFrameInfo.h
deleted file mode 100644
index d5dae24..0000000
--- a/contrib/llvm/lib/Target/ARM/ARMFrameInfo.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//===-- ARMTargetFrameInfo.h - Define TargetFrameInfo for ARM ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARM_FRAMEINFO_H
-#define ARM_FRAMEINFO_H
-
-#include "ARM.h"
-#include "ARMSubtarget.h"
-#include "llvm/Target/TargetFrameInfo.h"
-
-namespace llvm {
-
-class ARMFrameInfo : public TargetFrameInfo {
-public:
- explicit ARMFrameInfo(const ARMSubtarget &ST)
- : TargetFrameInfo(StackGrowsDown, ST.getStackAlignment(), 0, 4) {
- }
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
new file mode 100644
index 0000000..f42c6db
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -0,0 +1,1021 @@
+//=======- ARMFrameLowering.cpp - ARM Frame Information --------*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the ARM implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARMFrameLowering.h"
+#include "ARMAddressingModes.h"
+#include "ARMBaseInstrInfo.h"
+#include "ARMBaseRegisterInfo.h"
+#include "ARMMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
+
+/// hasFP - Return true if the specified function should have a dedicated frame
+/// pointer register. This is true if the function has variable sized allocas
+/// or if frame pointer elimination is disabled.
+bool ARMFrameLowering::hasFP(const MachineFunction &MF) const {
+ const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
+
+ // Mac OS X requires FP not to be clobbered for backtracing purpose.
+ if (STI.isTargetDarwin())
+ return true;
+
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // Always eliminate non-leaf frame pointers.
+ return ((DisableFramePointerElim(MF) && MFI->hasCalls()) ||
+ RegInfo->needsStackRealignment(MF) ||
+ MFI->hasVarSizedObjects() ||
+ MFI->isFrameAddressTaken());
+}
+
+/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
+/// not required, we reserve argument space for call sites in the function
+/// immediately on entry to the current function. This eliminates the need for
+/// add/sub sp brackets around call sites. Returns true if the call frame is
+/// included as part of the stack frame.
+bool ARMFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ const MachineFrameInfo *FFI = MF.getFrameInfo();
+ unsigned CFSize = FFI->getMaxCallFrameSize();
+ // It's not always a good idea to include the call frame as part of the
+ // stack frame. ARM (especially Thumb) has small immediate offset to
+ // address the stack frame. So a large call frame can cause poor codegen
+ // and may even makes it impossible to scavenge a register.
+ if (CFSize >= ((1 << 12) - 1) / 2) // Half of imm12
+ return false;
+
+ return !MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+/// canSimplifyCallFramePseudos - If there is a reserved call frame, the
+/// call frame pseudos can be simplified. Unlike most targets, having a FP
+/// is not sufficient here since we still may reference some objects via SP
+/// even when FP is available in Thumb2 mode.
+bool
+ARMFrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
+ return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
+ for (unsigned i = 0; CSRegs[i]; ++i)
+ if (Reg == CSRegs[i])
+ return true;
+ return false;
+}
+
+static bool isCSRestore(MachineInstr *MI,
+ const ARMBaseInstrInfo &TII,
+ const unsigned *CSRegs) {
+ // Integer spill area is handled with "pop".
+ if (MI->getOpcode() == ARM::LDMIA_RET ||
+ MI->getOpcode() == ARM::t2LDMIA_RET ||
+ MI->getOpcode() == ARM::LDMIA_UPD ||
+ MI->getOpcode() == ARM::t2LDMIA_UPD ||
+ MI->getOpcode() == ARM::VLDMDIA_UPD) {
+ // The first two operands are predicates. The last two are
+ // imp-def and imp-use of SP. Check everything in between.
+ for (int i = 5, e = MI->getNumOperands(); i != e; ++i)
+ if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
+ return false;
+ return true;
+ }
+ if ((MI->getOpcode() == ARM::LDR_POST ||
+ MI->getOpcode() == ARM::t2LDR_POST) &&
+ isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs) &&
+ MI->getOperand(1).getReg() == ARM::SP)
+ return true;
+
+ return false;
+}
+
+static void
+emitSPUpdate(bool isARM,
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ DebugLoc dl, const ARMBaseInstrInfo &TII,
+ int NumBytes,
+ ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
+ if (isARM)
+ emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
+ Pred, PredReg, TII);
+ else
+ emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
+ Pred, PredReg, TII);
+}
+
+void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ const ARMBaseRegisterInfo *RegInfo =
+ static_cast<const ARMBaseRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo());
+ assert(!AFI->isThumb1OnlyFunction() &&
+ "This emitPrologue does not support Thumb1!");
+ bool isARM = !AFI->isThumbFunction();
+ unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
+ unsigned NumBytes = MFI->getStackSize();
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+
+ // Determine the sizes of each callee-save spill areas and record which frame
+ // belongs to which callee-save spill areas.
+ unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
+ int FramePtrSpillFI = 0;
+
+ // Allocate the vararg register save area. This is not counted in NumBytes.
+ if (VARegSaveSize)
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize);
+
+ if (!AFI->hasStackFrame()) {
+ if (NumBytes != 0)
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
+ return;
+ }
+
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ int FI = CSI[i].getFrameIdx();
+ switch (Reg) {
+ case ARM::R4:
+ case ARM::R5:
+ case ARM::R6:
+ case ARM::R7:
+ case ARM::LR:
+ if (Reg == FramePtr)
+ FramePtrSpillFI = FI;
+ AFI->addGPRCalleeSavedArea1Frame(FI);
+ GPRCS1Size += 4;
+ break;
+ case ARM::R8:
+ case ARM::R9:
+ case ARM::R10:
+ case ARM::R11:
+ if (Reg == FramePtr)
+ FramePtrSpillFI = FI;
+ if (STI.isTargetDarwin()) {
+ AFI->addGPRCalleeSavedArea2Frame(FI);
+ GPRCS2Size += 4;
+ } else {
+ AFI->addGPRCalleeSavedArea1Frame(FI);
+ GPRCS1Size += 4;
+ }
+ break;
+ default:
+ AFI->addDPRCalleeSavedAreaFrame(FI);
+ DPRCSSize += 8;
+ }
+ }
+
+ // Move past area 1.
+ if (GPRCS1Size > 0) MBBI++;
+
+ // Set FP to point to the stack slot that contains the previous FP.
+ // For Darwin, FP is R7, which has now been stored in spill area 1.
+ // Otherwise, if this is not Darwin, all the callee-saved registers go
+ // into spill area 1, including the FP in R11. In either case, it is
+ // now safe to emit this assignment.
+ bool HasFP = hasFP(MF);
+ if (HasFP) {
+ unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri;
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr)
+ .addFrameIndex(FramePtrSpillFI).addImm(0);
+ AddDefaultCC(AddDefaultPred(MIB));
+ }
+
+ // Move past area 2.
+ if (GPRCS2Size > 0) MBBI++;
+
+ // Determine starting offsets of spill areas.
+ unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
+ unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
+ unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
+ if (HasFP)
+ AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
+ NumBytes);
+ AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
+ AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
+ AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
+
+ // Move past area 3.
+ if (DPRCSSize > 0) MBBI++;
+
+ NumBytes = DPRCSOffset;
+ if (NumBytes) {
+ // Adjust SP after all the callee-save spills.
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
+ if (HasFP && isARM)
+ // Restore from fp only in ARM mode: e.g. sub sp, r7, #24
+ // Note it's not safe to do this in Thumb2 mode because it would have
+ // taken two instructions:
+ // mov sp, r7
+ // sub sp, #24
+ // If an interrupt is taken between the two instructions, then sp is in
+ // an inconsistent state (pointing to the middle of callee-saved area).
+ // The interrupt handler can end up clobbering the registers.
+ AFI->setShouldRestoreSPFromFP(true);
+ }
+
+ if (STI.isTargetELF() && hasFP(MF))
+ MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
+ AFI->getFramePtrSpillOffset());
+
+ AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
+ AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
+ AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
+
+ // If we need dynamic stack realignment, do it here. Be paranoid and make
+ // sure if we also have VLAs, we have a base pointer for frame access.
+ if (RegInfo->needsStackRealignment(MF)) {
+ unsigned MaxAlign = MFI->getMaxAlignment();
+ assert (!AFI->isThumb1OnlyFunction());
+ if (!AFI->isThumbFunction()) {
+ // Emit bic sp, sp, MaxAlign
+ AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
+ TII.get(ARM::BICri), ARM::SP)
+ .addReg(ARM::SP, RegState::Kill)
+ .addImm(MaxAlign-1)));
+ } else {
+ // We cannot use sp as source/dest register here, thus we're emitting the
+ // following sequence:
+ // mov r4, sp
+ // bic r4, r4, MaxAlign
+ // mov sp, r4
+ // FIXME: It will be better just to find spare register here.
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4)
+ .addReg(ARM::SP, RegState::Kill);
+ AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
+ TII.get(ARM::t2BICri), ARM::R4)
+ .addReg(ARM::R4, RegState::Kill)
+ .addImm(MaxAlign-1)));
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
+ .addReg(ARM::R4, RegState::Kill);
+ }
+
+ AFI->setShouldRestoreSPFromFP(true);
+ }
+
+ // If we need a base pointer, set it up here. It's whatever the value
+ // of the stack pointer is at this point. Any variable size objects
+ // will be allocated after this, so we can still use the base pointer
+ // to reference locals.
+ if (RegInfo->hasBasePointer(MF)) {
+ if (isARM)
+ BuildMI(MBB, MBBI, dl,
+ TII.get(ARM::MOVr), RegInfo->getBaseRegister())
+ .addReg(ARM::SP)
+ .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
+ else
+ BuildMI(MBB, MBBI, dl,
+ TII.get(ARM::tMOVgpr2gpr), RegInfo->getBaseRegister())
+ .addReg(ARM::SP);
+ }
+
+ // If the frame has variable sized objects then the epilogue must restore
+ // the sp from fp. We can assume there's an FP here since hasFP already
+ // checks for hasVarSizedObjects.
+ if (MFI->hasVarSizedObjects())
+ AFI->setShouldRestoreSPFromFP(true);
+}
+
+void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI->getDesc().isReturn() &&
+ "Can only insert epilog into returning blocks");
+ unsigned RetOpcode = MBBI->getOpcode();
+ DebugLoc dl = MBBI->getDebugLoc();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo());
+ assert(!AFI->isThumb1OnlyFunction() &&
+ "This emitEpilogue does not support Thumb1!");
+ bool isARM = !AFI->isThumbFunction();
+
+ unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
+ int NumBytes = (int)MFI->getStackSize();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+
+ if (!AFI->hasStackFrame()) {
+ if (NumBytes != 0)
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
+ } else {
+ // Unwind MBBI to point to first LDR / VLDRD.
+ const unsigned *CSRegs = RegInfo->getCalleeSavedRegs();
+ if (MBBI != MBB.begin()) {
+ do
+ --MBBI;
+ while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
+ if (!isCSRestore(MBBI, TII, CSRegs))
+ ++MBBI;
+ }
+
+ // Move SP to start of FP callee save spill area.
+ NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
+ AFI->getGPRCalleeSavedArea2Size() +
+ AFI->getDPRCalleeSavedAreaSize());
+
+ // Reset SP based on frame pointer only if the stack frame extends beyond
+ // frame pointer stack slot or target is ELF and the function has FP.
+ if (AFI->shouldRestoreSPFromFP()) {
+ NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
+ if (NumBytes) {
+ if (isARM)
+ emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
+ ARMCC::AL, 0, TII);
+ else {
+ // It's not possible to restore SP from FP in a single instruction.
+ // For Darwin, this looks like:
+ // mov sp, r7
+ // sub sp, #24
+ // This is bad, if an interrupt is taken after the mov, sp is in an
+ // inconsistent state.
+ // Use the first callee-saved register as a scratch register.
+ assert(MF.getRegInfo().isPhysRegUsed(ARM::R4) &&
+ "No scratch register to restore SP from FP!");
+ emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::R4, FramePtr, -NumBytes,
+ ARMCC::AL, 0, TII);
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
+ .addReg(ARM::R4);
+ }
+ } else {
+ // Thumb2 or ARM.
+ if (isARM)
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
+ .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
+ else
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
+ .addReg(FramePtr);
+ }
+ } else if (NumBytes)
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
+
+ // Increment past our save areas.
+ if (AFI->getDPRCalleeSavedAreaSize()) MBBI++;
+ if (AFI->getGPRCalleeSavedArea2Size()) MBBI++;
+ if (AFI->getGPRCalleeSavedArea1Size()) MBBI++;
+ }
+
+ if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
+ RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
+ // Tail call return: adjust the stack pointer and jump to callee.
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+
+ // Jump to label or value in register.
+ if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND) {
+ unsigned TCOpcode = (RetOpcode == ARM::TCRETURNdi)
+ ? (STI.isThumb() ? ARM::TAILJMPdt : ARM::TAILJMPd)
+ : (STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
+ if (JumpTarget.isGlobal())
+ MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
+ JumpTarget.getTargetFlags());
+ else {
+ assert(JumpTarget.isSymbol());
+ MIB.addExternalSymbol(JumpTarget.getSymbolName(),
+ JumpTarget.getTargetFlags());
+ }
+ } else if (RetOpcode == ARM::TCRETURNri) {
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
+ } else if (RetOpcode == ARM::TCRETURNriND) {
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
+ }
+
+ MachineInstr *NewMI = prior(MBBI);
+ for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
+ NewMI->addOperand(MBBI->getOperand(i));
+
+ // Delete the pseudo instruction TCRETURN.
+ MBB.erase(MBBI);
+ }
+
+ if (VARegSaveSize)
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
+}
+
+/// getFrameIndexReference - Provide a base+offset reference to an FI slot for
+/// debug info. It's the same as what we use for resolving the code-gen
+/// references for now. FIXME: This can go wrong when references are
+/// SP-relative and simple call frames aren't used.
+int
+ARMFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const {
+ return ResolveFrameIndexReference(MF, FI, FrameReg, 0);
+}
+
+int
+ARMFrameLowering::ResolveFrameIndexReference(const MachineFunction &MF,
+ int FI,
+ unsigned &FrameReg,
+ int SPAdj) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const ARMBaseRegisterInfo *RegInfo =
+ static_cast<const ARMBaseRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
+ int FPOffset = Offset - AFI->getFramePtrSpillOffset();
+ bool isFixed = MFI->isFixedObjectIndex(FI);
+
+ FrameReg = ARM::SP;
+ Offset += SPAdj;
+ if (AFI->isGPRCalleeSavedArea1Frame(FI))
+ return Offset - AFI->getGPRCalleeSavedArea1Offset();
+ else if (AFI->isGPRCalleeSavedArea2Frame(FI))
+ return Offset - AFI->getGPRCalleeSavedArea2Offset();
+ else if (AFI->isDPRCalleeSavedAreaFrame(FI))
+ return Offset - AFI->getDPRCalleeSavedAreaOffset();
+
+ // When dynamically realigning the stack, use the frame pointer for
+ // parameters, and the stack/base pointer for locals.
+ if (RegInfo->needsStackRealignment(MF)) {
+ assert (hasFP(MF) && "dynamic stack realignment without a FP!");
+ if (isFixed) {
+ FrameReg = RegInfo->getFrameRegister(MF);
+ Offset = FPOffset;
+ } else if (MFI->hasVarSizedObjects()) {
+ assert(RegInfo->hasBasePointer(MF) &&
+ "VLAs and dynamic stack alignment, but missing base pointer!");
+ FrameReg = RegInfo->getBaseRegister();
+ }
+ return Offset;
+ }
+
+ // If there is a frame pointer, use it when we can.
+ if (hasFP(MF) && AFI->hasStackFrame()) {
+ // Use frame pointer to reference fixed objects. Use it for locals if
+ // there are VLAs (and thus the SP isn't reliable as a base).
+ if (isFixed || (MFI->hasVarSizedObjects() &&
+ !RegInfo->hasBasePointer(MF))) {
+ FrameReg = RegInfo->getFrameRegister(MF);
+ return FPOffset;
+ } else if (MFI->hasVarSizedObjects()) {
+ assert(RegInfo->hasBasePointer(MF) && "missing base pointer!");
+ // Try to use the frame pointer if we can, else use the base pointer
+ // since it's available. This is handy for the emergency spill slot, in
+ // particular.
+ if (AFI->isThumb2Function()) {
+ if (FPOffset >= -255 && FPOffset < 0) {
+ FrameReg = RegInfo->getFrameRegister(MF);
+ return FPOffset;
+ }
+ } else
+ FrameReg = RegInfo->getBaseRegister();
+ } else if (AFI->isThumb2Function()) {
+ // In Thumb2 mode, the negative offset is very limited. Try to avoid
+ // out of range references.
+ if (FPOffset >= -255 && FPOffset < 0) {
+ FrameReg = RegInfo->getFrameRegister(MF);
+ return FPOffset;
+ }
+ } else if (Offset > (FPOffset < 0 ? -FPOffset : FPOffset)) {
+ // Otherwise, use SP or FP, whichever is closer to the stack slot.
+ FrameReg = RegInfo->getFrameRegister(MF);
+ return FPOffset;
+ }
+ }
+ // Use the base pointer if we have one.
+ if (RegInfo->hasBasePointer(MF))
+ FrameReg = RegInfo->getBaseRegister();
+ return Offset;
+}
+
+int ARMFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
+ unsigned FrameReg;
+ return getFrameIndexReference(MF, FI, FrameReg);
+}
+
+void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ unsigned StmOpc, unsigned StrOpc,
+ bool NoGap,
+ bool(*Func)(unsigned, bool)) const {
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ DebugLoc DL;
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
+ SmallVector<std::pair<unsigned,bool>, 4> Regs;
+ unsigned i = CSI.size();
+ while (i != 0) {
+ unsigned LastReg = 0;
+ for (; i != 0; --i) {
+ unsigned Reg = CSI[i-1].getReg();
+ if (!(Func)(Reg, STI.isTargetDarwin())) continue;
+
+ // Add the callee-saved register as live-in unless it's LR and
+ // @llvm.returnaddress is called. If LR is returned for
+ // @llvm.returnaddress then it's already added to the function and
+ // entry block live-in sets.
+ bool isKill = true;
+ if (Reg == ARM::LR) {
+ if (MF.getFrameInfo()->isReturnAddressTaken() &&
+ MF.getRegInfo().isLiveIn(Reg))
+ isKill = false;
+ }
+
+ if (isKill)
+ MBB.addLiveIn(Reg);
+
+ // If NoGap is true, push consecutive registers and then leave the rest
+ // for other instructions. e.g.
+ // vpush {d8, d10, d11} -> vpush {d8}, vpush {d10, d11}
+ if (NoGap && LastReg && LastReg != Reg-1)
+ break;
+ LastReg = Reg;
+ Regs.push_back(std::make_pair(Reg, isKill));
+ }
+
+ if (Regs.empty())
+ continue;
+ if (Regs.size() > 1 || StrOpc== 0) {
+ MachineInstrBuilder MIB =
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(StmOpc), ARM::SP)
+ .addReg(ARM::SP));
+ for (unsigned i = 0, e = Regs.size(); i < e; ++i)
+ MIB.addReg(Regs[i].first, getKillRegState(Regs[i].second));
+ } else if (Regs.size() == 1) {
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc),
+ ARM::SP)
+ .addReg(Regs[0].first, getKillRegState(Regs[0].second))
+ .addReg(ARM::SP);
+ // ARM mode needs an extra reg0 here due to addrmode2. Will go away once
+ // that refactoring is complete (eventually).
+ if (StrOpc == ARM::STR_PRE) {
+ MIB.addReg(0);
+ MIB.addImm(ARM_AM::getAM2Opc(ARM_AM::sub, 4, ARM_AM::no_shift));
+ } else
+ MIB.addImm(-4);
+ AddDefaultPred(MIB);
+ }
+ Regs.clear();
+ }
+}
+
+void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ unsigned LdmOpc, unsigned LdrOpc,
+ bool isVarArg, bool NoGap,
+ bool(*Func)(unsigned, bool)) const {
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned RetOpcode = MI->getOpcode();
+ bool isTailCall = (RetOpcode == ARM::TCRETURNdi ||
+ RetOpcode == ARM::TCRETURNdiND ||
+ RetOpcode == ARM::TCRETURNri ||
+ RetOpcode == ARM::TCRETURNriND);
+
+ SmallVector<unsigned, 4> Regs;
+ unsigned i = CSI.size();
+ while (i != 0) {
+ unsigned LastReg = 0;
+ bool DeleteRet = false;
+ for (; i != 0; --i) {
+ unsigned Reg = CSI[i-1].getReg();
+ if (!(Func)(Reg, STI.isTargetDarwin())) continue;
+
+ if (Reg == ARM::LR && !isTailCall && !isVarArg && STI.hasV5TOps()) {
+ Reg = ARM::PC;
+ LdmOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_RET : ARM::LDMIA_RET;
+ // Fold the return instruction into the LDM.
+ DeleteRet = true;
+ }
+
+ // If NoGap is true, pop consecutive registers and then leave the rest
+ // for other instructions. e.g.
+ // vpop {d8, d10, d11} -> vpop {d8}, vpop {d10, d11}
+ if (NoGap && LastReg && LastReg != Reg-1)
+ break;
+
+ LastReg = Reg;
+ Regs.push_back(Reg);
+ }
+
+ if (Regs.empty())
+ continue;
+ if (Regs.size() > 1 || LdrOpc == 0) {
+ MachineInstrBuilder MIB =
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(LdmOpc), ARM::SP)
+ .addReg(ARM::SP));
+ for (unsigned i = 0, e = Regs.size(); i < e; ++i)
+ MIB.addReg(Regs[i], getDefRegState(true));
+ if (DeleteRet)
+ MI->eraseFromParent();
+ MI = MIB;
+ } else if (Regs.size() == 1) {
+ // If we adjusted the reg to PC from LR above, switch it back here. We
+ // only do that for LDM.
+ if (Regs[0] == ARM::PC)
+ Regs[0] = ARM::LR;
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MI, DL, TII.get(LdrOpc), Regs[0])
+ .addReg(ARM::SP, RegState::Define)
+ .addReg(ARM::SP);
+ // ARM mode needs an extra reg0 here due to addrmode2. Will go away once
+ // that refactoring is complete (eventually).
+ if (LdrOpc == ARM::LDR_POST) {
+ MIB.addReg(0);
+ MIB.addImm(ARM_AM::getAM2Opc(ARM_AM::add, 4, ARM_AM::no_shift));
+ } else
+ MIB.addImm(4);
+ AddDefaultPred(MIB);
+ }
+ Regs.clear();
+ }
+}
+
+bool ARMFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ MachineFunction &MF = *MBB.getParent();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+
+ unsigned PushOpc = AFI->isThumbFunction() ? ARM::t2STMDB_UPD : ARM::STMDB_UPD;
+ unsigned PushOneOpc = AFI->isThumbFunction() ? ARM::t2STR_PRE : ARM::STR_PRE;
+ unsigned FltOpc = ARM::VSTMDDB_UPD;
+ emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea1Register);
+ emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea2Register);
+ emitPushInst(MBB, MI, CSI, FltOpc, 0, true, &isARMArea3Register);
+
+ return true;
+}
+
+bool ARMFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ MachineFunction &MF = *MBB.getParent();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
+
+ unsigned PopOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_UPD : ARM::LDMIA_UPD;
+ unsigned LdrOpc = AFI->isThumbFunction() ? ARM::t2LDR_POST : ARM::LDR_POST;
+ unsigned FltOpc = ARM::VLDMDIA_UPD;
+ emitPopInst(MBB, MI, CSI, FltOpc, 0, isVarArg, true, &isARMArea3Register);
+ emitPopInst(MBB, MI, CSI, PopOpc, LdrOpc, isVarArg, false,
+ &isARMArea2Register);
+ emitPopInst(MBB, MI, CSI, PopOpc, LdrOpc, isVarArg, false,
+ &isARMArea1Register);
+
+ return true;
+}
+
+// FIXME: Make generic?
+static unsigned GetFunctionSizeInBytes(const MachineFunction &MF,
+ const ARMBaseInstrInfo &TII) {
+ unsigned FnSize = 0;
+ for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
+ MBBI != E; ++MBBI) {
+ const MachineBasicBlock &MBB = *MBBI;
+ for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end();
+ I != E; ++I)
+ FnSize += TII.GetInstSizeInBytes(I);
+ }
+ return FnSize;
+}
+
+/// estimateStackSize - Estimate and return the size of the frame.
+/// FIXME: Make generic?
+static unsigned estimateStackSize(MachineFunction &MF) {
+ const MachineFrameInfo *FFI = MF.getFrameInfo();
+ int Offset = 0;
+ for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
+ int FixedOff = -FFI->getObjectOffset(i);
+ if (FixedOff > Offset) Offset = FixedOff;
+ }
+ for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
+ if (FFI->isDeadObjectIndex(i))
+ continue;
+ Offset += FFI->getObjectSize(i);
+ unsigned Align = FFI->getObjectAlignment(i);
+ // Adjust to alignment boundary
+ Offset = (Offset+Align-1)/Align*Align;
+ }
+ return (unsigned)Offset;
+}
+
+/// estimateRSStackSizeLimit - Look at each instruction that references stack
+/// frames and return the stack size limit beyond which some of these
+/// instructions will require a scratch register during their expansion later.
+// FIXME: Move to TII?
+static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
+ const TargetFrameLowering *TFI) {
+ const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ unsigned Limit = (1 << 12) - 1;
+ for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
+ for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
+ I != E; ++I) {
+ for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
+ if (!I->getOperand(i).isFI()) continue;
+
+ // When using ADDri to get the address of a stack object, 255 is the
+ // largest offset guaranteed to fit in the immediate offset.
+ if (I->getOpcode() == ARM::ADDri) {
+ Limit = std::min(Limit, (1U << 8) - 1);
+ break;
+ }
+
+ // Otherwise check the addressing mode.
+ switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
+ case ARMII::AddrMode3:
+ case ARMII::AddrModeT2_i8:
+ Limit = std::min(Limit, (1U << 8) - 1);
+ break;
+ case ARMII::AddrMode5:
+ case ARMII::AddrModeT2_i8s4:
+ Limit = std::min(Limit, ((1U << 8) - 1) * 4);
+ break;
+ case ARMII::AddrModeT2_i12:
+ // i12 supports only positive offset so these will be converted to
+ // i8 opcodes. See llvm::rewriteT2FrameIndex.
+ if (TFI->hasFP(MF) && AFI->hasStackFrame())
+ Limit = std::min(Limit, (1U << 8) - 1);
+ break;
+ case ARMII::AddrMode4:
+ case ARMII::AddrMode6:
+ // Addressing modes 4 & 6 (load/store) instructions can't encode an
+ // immediate offset for stack references.
+ return 0;
+ default:
+ break;
+ }
+ break; // At most one FI per instruction
+ }
+ }
+ }
+
+ return Limit;
+}
+
+void
+ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ // This tells PEI to spill the FP as if it is any other callee-save register
+ // to take advantage the eliminateFrameIndex machinery. This also ensures it
+ // is spilled in the order specified by getCalleeSavedRegs() to make it easier
+ // to combine multiple loads / stores.
+ bool CanEliminateFrame = true;
+ bool CS1Spilled = false;
+ bool LRSpilled = false;
+ unsigned NumGPRSpills = 0;
+ SmallVector<unsigned, 4> UnspilledCS1GPRs;
+ SmallVector<unsigned, 4> UnspilledCS2GPRs;
+ const ARMBaseRegisterInfo *RegInfo =
+ static_cast<const ARMBaseRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo*>(MF.getTarget().getInstrInfo());
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+
+ // Spill R4 if Thumb2 function requires stack realignment - it will be used as
+ // scratch register. Also spill R4 if Thumb2 function has varsized objects,
+ // since it's not always possible to restore sp from fp in a single
+ // instruction.
+ // FIXME: It will be better just to find spare register here.
+ if (AFI->isThumb2Function() &&
+ (MFI->hasVarSizedObjects() || RegInfo->needsStackRealignment(MF)))
+ MF.getRegInfo().setPhysRegUsed(ARM::R4);
+
+ if (AFI->isThumb1OnlyFunction()) {
+ // Spill LR if Thumb1 function uses variable length argument lists.
+ if (AFI->getVarArgsRegSaveSize() > 0)
+ MF.getRegInfo().setPhysRegUsed(ARM::LR);
+
+ // Spill R4 if Thumb1 epilogue has to restore SP from FP since
+ // FIXME: It will be better just to find spare register here.
+ if (MFI->hasVarSizedObjects())
+ MF.getRegInfo().setPhysRegUsed(ARM::R4);
+ }
+
+ // Spill the BasePtr if it's used.
+ if (RegInfo->hasBasePointer(MF))
+ MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
+
+ // Don't spill FP if the frame can be eliminated. This is determined
+ // by scanning the callee-save registers to see if any is used.
+ const unsigned *CSRegs = RegInfo->getCalleeSavedRegs();
+ for (unsigned i = 0; CSRegs[i]; ++i) {
+ unsigned Reg = CSRegs[i];
+ bool Spilled = false;
+ if (MF.getRegInfo().isPhysRegUsed(Reg)) {
+ Spilled = true;
+ CanEliminateFrame = false;
+ } else {
+ // Check alias registers too.
+ for (const unsigned *Aliases =
+ RegInfo->getAliasSet(Reg); *Aliases; ++Aliases) {
+ if (MF.getRegInfo().isPhysRegUsed(*Aliases)) {
+ Spilled = true;
+ CanEliminateFrame = false;
+ }
+ }
+ }
+
+ if (!ARM::GPRRegisterClass->contains(Reg))
+ continue;
+
+ if (Spilled) {
+ NumGPRSpills++;
+
+ if (!STI.isTargetDarwin()) {
+ if (Reg == ARM::LR)
+ LRSpilled = true;
+ CS1Spilled = true;
+ continue;
+ }
+
+ // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
+ switch (Reg) {
+ case ARM::LR:
+ LRSpilled = true;
+ // Fallthrough
+ case ARM::R4: case ARM::R5:
+ case ARM::R6: case ARM::R7:
+ CS1Spilled = true;
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (!STI.isTargetDarwin()) {
+ UnspilledCS1GPRs.push_back(Reg);
+ continue;
+ }
+
+ switch (Reg) {
+ case ARM::R4: case ARM::R5:
+ case ARM::R6: case ARM::R7:
+ case ARM::LR:
+ UnspilledCS1GPRs.push_back(Reg);
+ break;
+ default:
+ UnspilledCS2GPRs.push_back(Reg);
+ break;
+ }
+ }
+ }
+
+ bool ForceLRSpill = false;
+ if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
+ unsigned FnSize = GetFunctionSizeInBytes(MF, TII);
+ // Force LR to be spilled if the Thumb function size is > 2048. This enables
+ // use of BL to implement far jump. If it turns out that it's not needed
+ // then the branch fix up path will undo it.
+ if (FnSize >= (1 << 11)) {
+ CanEliminateFrame = false;
+ ForceLRSpill = true;
+ }
+ }
+
+ // If any of the stack slot references may be out of range of an immediate
+ // offset, make sure a register (or a spill slot) is available for the
+ // register scavenger. Note that if we're indexing off the frame pointer, the
+ // effective stack size is 4 bytes larger since the FP points to the stack
+ // slot of the previous FP. Also, if we have variable sized objects in the
+ // function, stack slot references will often be negative, and some of
+ // our instructions are positive-offset only, so conservatively consider
+ // that case to want a spill slot (or register) as well. Similarly, if
+ // the function adjusts the stack pointer during execution and the
+ // adjustments aren't already part of our stack size estimate, our offset
+ // calculations may be off, so be conservative.
+ // FIXME: We could add logic to be more precise about negative offsets
+ // and which instructions will need a scratch register for them. Is it
+ // worth the effort and added fragility?
+ bool BigStack =
+ (RS &&
+ (estimateStackSize(MF) + ((hasFP(MF) && AFI->hasStackFrame()) ? 4:0) >=
+ estimateRSStackSizeLimit(MF, this)))
+ || MFI->hasVarSizedObjects()
+ || (MFI->adjustsStack() && !canSimplifyCallFramePseudos(MF));
+
+ bool ExtraCSSpill = false;
+ if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) {
+ AFI->setHasStackFrame(true);
+
+ // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
+ // Spill LR as well so we can fold BX_RET to the registers restore (LDM).
+ if (!LRSpilled && CS1Spilled) {
+ MF.getRegInfo().setPhysRegUsed(ARM::LR);
+ NumGPRSpills++;
+ UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
+ UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
+ ForceLRSpill = false;
+ ExtraCSSpill = true;
+ }
+
+ if (hasFP(MF)) {
+ MF.getRegInfo().setPhysRegUsed(FramePtr);
+ NumGPRSpills++;
+ }
+
+ // If stack and double are 8-byte aligned and we are spilling an odd number
+ // of GPRs, spill one extra callee save GPR so we won't have to pad between
+ // the integer and double callee save areas.
+ unsigned TargetAlign = getStackAlignment();
+ if (TargetAlign == 8 && (NumGPRSpills & 1)) {
+ if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
+ for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
+ unsigned Reg = UnspilledCS1GPRs[i];
+ // Don't spill high register if the function is thumb1
+ if (!AFI->isThumb1OnlyFunction() ||
+ isARMLowRegister(Reg) || Reg == ARM::LR) {
+ MF.getRegInfo().setPhysRegUsed(Reg);
+ if (!RegInfo->isReservedReg(MF, Reg))
+ ExtraCSSpill = true;
+ break;
+ }
+ }
+ } else if (!UnspilledCS2GPRs.empty() && !AFI->isThumb1OnlyFunction()) {
+ unsigned Reg = UnspilledCS2GPRs.front();
+ MF.getRegInfo().setPhysRegUsed(Reg);
+ if (!RegInfo->isReservedReg(MF, Reg))
+ ExtraCSSpill = true;
+ }
+ }
+
+ // Estimate if we might need to scavenge a register at some point in order
+ // to materialize a stack offset. If so, either spill one additional
+ // callee-saved register or reserve a special spill slot to facilitate
+ // register scavenging. Thumb1 needs a spill slot for stack pointer
+ // adjustments also, even when the frame itself is small.
+ if (BigStack && !ExtraCSSpill) {
+ // If any non-reserved CS register isn't spilled, just spill one or two
+ // extra. That should take care of it!
+ unsigned NumExtras = TargetAlign / 4;
+ SmallVector<unsigned, 2> Extras;
+ while (NumExtras && !UnspilledCS1GPRs.empty()) {
+ unsigned Reg = UnspilledCS1GPRs.back();
+ UnspilledCS1GPRs.pop_back();
+ if (!RegInfo->isReservedReg(MF, Reg) &&
+ (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) ||
+ Reg == ARM::LR)) {
+ Extras.push_back(Reg);
+ NumExtras--;
+ }
+ }
+ // For non-Thumb1 functions, also check for hi-reg CS registers
+ if (!AFI->isThumb1OnlyFunction()) {
+ while (NumExtras && !UnspilledCS2GPRs.empty()) {
+ unsigned Reg = UnspilledCS2GPRs.back();
+ UnspilledCS2GPRs.pop_back();
+ if (!RegInfo->isReservedReg(MF, Reg)) {
+ Extras.push_back(Reg);
+ NumExtras--;
+ }
+ }
+ }
+ if (Extras.size() && NumExtras == 0) {
+ for (unsigned i = 0, e = Extras.size(); i != e; ++i) {
+ MF.getRegInfo().setPhysRegUsed(Extras[i]);
+ }
+ } else if (!AFI->isThumb1OnlyFunction()) {
+ // note: Thumb1 functions spill to R12, not the stack. Reserve a slot
+ // closest to SP or frame pointer.
+ const TargetRegisterClass *RC = ARM::GPRRegisterClass;
+ RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
+ RC->getAlignment(),
+ false));
+ }
+ }
+ }
+
+ if (ForceLRSpill) {
+ MF.getRegInfo().setPhysRegUsed(ARM::LR);
+ AFI->setLRIsSpilledForFarJump(true);
+ }
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h
new file mode 100644
index 0000000..1288b70
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h
@@ -0,0 +1,74 @@
+//==-- ARMTargetFrameLowering.h - Define frame lowering for ARM --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARM_FRAMEINFO_H
+#define ARM_FRAMEINFO_H
+
+#include "ARM.h"
+#include "ARMSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class ARMSubtarget;
+
+class ARMFrameLowering : public TargetFrameLowering {
+protected:
+ const ARMSubtarget &STI;
+
+public:
+ explicit ARMFrameLowering(const ARMSubtarget &sti)
+ : TargetFrameLowering(StackGrowsDown, sti.getStackAlignment(), 0, 4),
+ STI(sti) {
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+ bool hasReservedCallFrame(const MachineFunction &MF) const;
+ bool canSimplifyCallFramePseudos(const MachineFunction &MF) const;
+ int getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const;
+ int ResolveFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg, int SPAdj) const;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const;
+
+ private:
+ void emitPushInst(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI, unsigned StmOpc,
+ unsigned StrOpc, bool NoGap,
+ bool(*Func)(unsigned, bool)) const;
+ void emitPopInst(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI, unsigned LdmOpc,
+ unsigned LdrOpc, bool isVarArg, bool NoGap,
+ bool(*Func)(unsigned, bool)) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/ARM/ARMGlobalMerge.cpp b/contrib/llvm/lib/Target/ARM/ARMGlobalMerge.cpp
index 85b0c6c..3f02383 100644
--- a/contrib/llvm/lib/Target/ARM/ARMGlobalMerge.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMGlobalMerge.cpp
@@ -12,7 +12,8 @@
// global). Such a transformation can significantly reduce the register pressure
// when many globals are involved.
//
-// For example, consider the code which touches several global variables at once:
+// For example, consider the code which touches several global variables at
+// once:
//
// static int foo[N], bar[N], baz[N];
//
@@ -48,7 +49,7 @@
// str r0, [r5], #4
//
// note that we saved 2 registers here almostly "for free".
-// ===----------------------------------------------------------------------===//
+// ===---------------------------------------------------------------------===//
#define DEBUG_TYPE "arm-global-merge"
#include "ARM.h"
@@ -64,16 +65,17 @@
#include "llvm/Pass.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
using namespace llvm;
namespace {
- class LLVM_LIBRARY_VISIBILITY ARMGlobalMerge : public FunctionPass {
+ class ARMGlobalMerge : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// target type sizes.
const TargetLowering *TLI;
bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
- Module &M, bool) const;
+ Module &M, bool isConst) const;
public:
static char ID; // Pass identification, replacement for typeid.
@@ -81,7 +83,7 @@ namespace {
: FunctionPass(ID), TLI(tli) {}
virtual bool doInitialization(Module &M);
- virtual bool runOnFunction(Function& F);
+ virtual bool runOnFunction(Function &F);
const char *getPassName() const {
return "Merge internal globals";
@@ -95,13 +97,11 @@ namespace {
struct GlobalCmp {
const TargetData *TD;
- GlobalCmp(const TargetData *td):
- TD(td) { }
+ GlobalCmp(const TargetData *td) : TD(td) { }
- bool operator() (const GlobalVariable* GV1,
- const GlobalVariable* GV2) {
- const Type* Ty1 = cast<PointerType>(GV1->getType())->getElementType();
- const Type* Ty2 = cast<PointerType>(GV2->getType())->getElementType();
+ bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {
+ const Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
+ const Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
return (TD->getTypeAllocSize(Ty1) < TD->getTypeAllocSize(Ty2));
}
@@ -130,27 +130,27 @@ bool ARMGlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
uint64_t MergedSize = 0;
std::vector<const Type*> Tys;
std::vector<Constant*> Inits;
- for (j = i; MergedSize < MaxOffset && j != e; ++j) {
- const Type* Ty = Globals[j]->getType()->getElementType();
+ for (j = i; j != e; ++j) {
+ const Type *Ty = Globals[j]->getType()->getElementType();
+ MergedSize += TD->getTypeAllocSize(Ty);
+ if (MergedSize > MaxOffset) {
+ break;
+ }
Tys.push_back(Ty);
Inits.push_back(Globals[j]->getInitializer());
- MergedSize += TD->getTypeAllocSize(Ty);
}
- StructType* MergedTy = StructType::get(M.getContext(), Tys);
- Constant* MergedInit = ConstantStruct::get(MergedTy, Inits);
- GlobalVariable* MergedGV = new GlobalVariable(M, MergedTy, isConst,
+ StructType *MergedTy = StructType::get(M.getContext(), Tys);
+ Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
+ GlobalVariable *MergedGV = new GlobalVariable(M, MergedTy, isConst,
GlobalValue::InternalLinkage,
- MergedInit, "merged");
+ MergedInit, "_MergedGlobals");
for (size_t k = i; k < j; ++k) {
- SmallVector<Constant*, 2> Idx;
- Idx.push_back(ConstantInt::get(Int32Ty, 0));
- Idx.push_back(ConstantInt::get(Int32Ty, k-i));
-
- Constant* GEP =
- ConstantExpr::getInBoundsGetElementPtr(MergedGV,
- &Idx[0], Idx.size());
-
+ Constant *Idx[2] = {
+ ConstantInt::get(Int32Ty, 0),
+ ConstantInt::get(Int32Ty, k-i)
+ };
+ Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(MergedGV, Idx, 2);
Globals[k]->replaceAllUsesWith(GEP);
Globals[k]->eraseFromParent();
}
@@ -161,8 +161,8 @@ bool ARMGlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
}
-bool ARMGlobalMerge::doInitialization(Module& M) {
- SmallVector<GlobalVariable*, 16> Globals, ConstGlobals;
+bool ARMGlobalMerge::doInitialization(Module &M) {
+ SmallVector<GlobalVariable*, 16> Globals, ConstGlobals, BSSGlobals;
const TargetData *TD = TLI->getTargetData();
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
bool Changed = false;
@@ -183,8 +183,11 @@ bool ARMGlobalMerge::doInitialization(Module& M) {
I->getName().startswith(".llvm."))
continue;
- if (TD->getTypeAllocSize(I->getType()) < MaxOffset) {
- if (I->isConstant())
+ if (TD->getTypeAllocSize(I->getType()->getElementType()) < MaxOffset) {
+ const TargetLoweringObjectFile &TLOF = TLI->getObjFileLowering();
+ if (TLOF.getKindForGlobal(I, TLI->getTargetMachine()).isBSSLocal())
+ BSSGlobals.push_back(I);
+ else if (I->isConstant())
ConstGlobals.push_back(I);
else
Globals.push_back(I);
@@ -193,17 +196,19 @@ bool ARMGlobalMerge::doInitialization(Module& M) {
if (Globals.size() > 1)
Changed |= doMerge(Globals, M, false);
+ if (BSSGlobals.size() > 1)
+ Changed |= doMerge(BSSGlobals, M, false);
+
// FIXME: This currently breaks the EH processing due to way how the
// typeinfo detection works. We might want to detect the TIs and ignore
// them in the future.
-
// if (ConstGlobals.size() > 1)
// Changed |= doMerge(ConstGlobals, M, true);
return Changed;
}
-bool ARMGlobalMerge::runOnFunction(Function& F) {
+bool ARMGlobalMerge::runOnFunction(Function &F) {
return false;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
new file mode 100644
index 0000000..676b01e
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
@@ -0,0 +1,121 @@
+//===-- ARMHazardRecognizer.cpp - ARM postra hazard recognizer ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARMHazardRecognizer.h"
+#include "ARMBaseInstrInfo.h"
+#include "ARMBaseRegisterInfo.h"
+#include "ARMSubtarget.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+using namespace llvm;
+
+static bool hasRAWHazard(MachineInstr *DefMI, MachineInstr *MI,
+ const TargetRegisterInfo &TRI) {
+ // FIXME: Detect integer instructions properly.
+ const TargetInstrDesc &TID = MI->getDesc();
+ unsigned Domain = TID.TSFlags & ARMII::DomainMask;
+ if (Domain == ARMII::DomainVFP) {
+ unsigned Opcode = MI->getOpcode();
+ if (Opcode == ARM::VSTRS || Opcode == ARM::VSTRD ||
+ Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
+ return false;
+ } else if (Domain == ARMII::DomainNEON) {
+ if (MI->getDesc().mayStore() || MI->getDesc().mayLoad())
+ return false;
+ } else
+ return false;
+ return MI->readsRegister(DefMI->getOperand(0).getReg(), &TRI);
+}
+
+ScheduleHazardRecognizer::HazardType
+ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
+ assert(Stalls == 0 && "ARM hazards don't support scoreboard lookahead");
+
+ MachineInstr *MI = SU->getInstr();
+
+ if (!MI->isDebugValue()) {
+ if (ITBlockSize && MI != ITBlockMIs[ITBlockSize-1])
+ return Hazard;
+
+ // Look for special VMLA / VMLS hazards. A VMUL / VADD / VSUB following
+ // a VMLA / VMLS will cause 4 cycle stall.
+ const TargetInstrDesc &TID = MI->getDesc();
+ if (LastMI && (TID.TSFlags & ARMII::DomainMask) != ARMII::DomainGeneral) {
+ MachineInstr *DefMI = LastMI;
+ const TargetInstrDesc &LastTID = LastMI->getDesc();
+ // Skip over one non-VFP / NEON instruction.
+ if (!LastTID.isBarrier() &&
+ (LastTID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
+ MachineBasicBlock::iterator I = LastMI;
+ if (I != LastMI->getParent()->begin()) {
+ I = llvm::prior(I);
+ DefMI = &*I;
+ }
+ }
+
+ if (TII.isFpMLxInstruction(DefMI->getOpcode()) &&
+ (TII.canCauseFpMLxStall(MI->getOpcode()) ||
+ hasRAWHazard(DefMI, MI, TRI))) {
+ // Try to schedule another instruction for the next 4 cycles.
+ if (FpMLxStalls == 0)
+ FpMLxStalls = 4;
+ return Hazard;
+ }
+ }
+ }
+
+ return ScoreboardHazardRecognizer::getHazardType(SU, Stalls);
+}
+
+void ARMHazardRecognizer::Reset() {
+ LastMI = 0;
+ FpMLxStalls = 0;
+ ITBlockSize = 0;
+ ScoreboardHazardRecognizer::Reset();
+}
+
+void ARMHazardRecognizer::EmitInstruction(SUnit *SU) {
+ MachineInstr *MI = SU->getInstr();
+ unsigned Opcode = MI->getOpcode();
+ if (ITBlockSize) {
+ --ITBlockSize;
+ } else if (Opcode == ARM::t2IT) {
+ unsigned Mask = MI->getOperand(1).getImm();
+ unsigned NumTZ = CountTrailingZeros_32(Mask);
+ assert(NumTZ <= 3 && "Invalid IT mask!");
+ ITBlockSize = 4 - NumTZ;
+ MachineBasicBlock::iterator I = MI;
+ for (unsigned i = 0; i < ITBlockSize; ++i) {
+ // Advance to the next instruction, skipping any dbg_value instructions.
+ do {
+ ++I;
+ } while (I->isDebugValue());
+ ITBlockMIs[ITBlockSize-1-i] = &*I;
+ }
+ }
+
+ if (!MI->isDebugValue()) {
+ LastMI = MI;
+ FpMLxStalls = 0;
+ }
+
+ ScoreboardHazardRecognizer::EmitInstruction(SU);
+}
+
+void ARMHazardRecognizer::AdvanceCycle() {
+ if (FpMLxStalls && --FpMLxStalls == 0)
+ // Stalled for 4 cycles but still can't schedule any other instructions.
+ LastMI = 0;
+ ScoreboardHazardRecognizer::AdvanceCycle();
+}
+
+void ARMHazardRecognizer::RecedeCycle() {
+ llvm_unreachable("reverse ARM hazard checking unsupported");
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h
new file mode 100644
index 0000000..2bc218d
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h
@@ -0,0 +1,54 @@
+//===-- ARMHazardRecognizer.h - ARM Hazard Recognizers ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines hazard recognizers for scheduling ARM functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARMHAZARDRECOGNIZER_H
+#define ARMHAZARDRECOGNIZER_H
+
+#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
+
+namespace llvm {
+
+class ARMBaseInstrInfo;
+class ARMBaseRegisterInfo;
+class ARMSubtarget;
+class MachineInstr;
+
+class ARMHazardRecognizer : public ScoreboardHazardRecognizer {
+ const ARMBaseInstrInfo &TII;
+ const ARMBaseRegisterInfo &TRI;
+ const ARMSubtarget &STI;
+
+ MachineInstr *LastMI;
+ unsigned FpMLxStalls;
+ unsigned ITBlockSize; // No. of MIs in current IT block yet to be scheduled.
+ MachineInstr *ITBlockMIs[4];
+
+public:
+ ARMHazardRecognizer(const InstrItineraryData *ItinData,
+ const ARMBaseInstrInfo &tii,
+ const ARMBaseRegisterInfo &tri,
+ const ARMSubtarget &sti,
+ const ScheduleDAG *DAG) :
+ ScoreboardHazardRecognizer(ItinData, DAG, "post-RA-sched"), TII(tii),
+ TRI(tri), STI(sti), LastMI(0), ITBlockSize(0) {}
+
+ virtual HazardType getHazardType(SUnit *SU, int Stalls);
+ virtual void Reset();
+ virtual void EmitInstruction(SUnit *SU);
+ virtual void AdvanceCycle();
+ virtual void RecedeCycle();
+};
+
+} // end namespace llvm
+
+#endif // ARMHAZARDRECOGNIZER_H
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 51a30c1..a506cff 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -13,6 +13,7 @@
#define DEBUG_TYPE "arm-isel"
#include "ARM.h"
+#include "ARMBaseInstrInfo.h"
#include "ARMAddressingModes.h"
#include "ARMTargetMachine.h"
#include "llvm/CallingConv.h"
@@ -41,13 +42,25 @@ DisableShifterOp("disable-shifter-op", cl::Hidden,
cl::desc("Disable isel of shifter-op"),
cl::init(false));
+static cl::opt<bool>
+CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
+ cl::desc("Check fp vmla / vmls hazard at isel time"),
+ cl::init(false));
+
//===--------------------------------------------------------------------===//
/// ARMDAGToDAGISel - ARM specific code to select ARM machine
/// instructions for SelectionDAG operations.
///
namespace {
+
+enum AddrMode2Type {
+ AM2_BASE, // Simple AM2 (+-imm12)
+ AM2_SHOP // Shifter-op AM2
+};
+
class ARMDAGToDAGISel : public SelectionDAGISel {
ARMBaseTargetMachine &TM;
+ const ARMBaseInstrInfo *TII;
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
/// make the right decision when generating code for different targets.
@@ -57,7 +70,8 @@ public:
explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
CodeGenOpt::Level OptLevel)
: SelectionDAGISel(tm, OptLevel), TM(tm),
- Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
+ TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())),
+ Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
}
virtual const char *getPassName() const {
@@ -72,60 +86,101 @@ public:
SDNode *Select(SDNode *N);
- bool SelectShifterOperandReg(SDNode *Op, SDValue N, SDValue &A,
+
+ bool hasNoVMLxHazardUse(SDNode *N) const;
+ bool isShifterOpProfitable(const SDValue &Shift,
+ ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
+ bool SelectShifterOperandReg(SDValue N, SDValue &A,
SDValue &B, SDValue &C);
- bool SelectAddrMode2(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Offset, SDValue &Opc);
+ bool SelectShiftShifterOperandReg(SDValue N, SDValue &A,
+ SDValue &B, SDValue &C);
+ bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
+ bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
+
+ AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
+ SDValue &Offset, SDValue &Opc);
+ bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
+ SDValue &Opc) {
+ return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
+ }
+
+ bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
+ SDValue &Opc) {
+ return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
+ }
+
+ bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
+ SDValue &Opc) {
+ SelectAddrMode2Worker(N, Base, Offset, Opc);
+// return SelectAddrMode2ShOp(N, Base, Offset, Opc);
+ // This always matches one way or another.
+ return true;
+ }
+
bool SelectAddrMode2Offset(SDNode *Op, SDValue N,
SDValue &Offset, SDValue &Opc);
- bool SelectAddrMode3(SDNode *Op, SDValue N, SDValue &Base,
+ bool SelectAddrMode3(SDValue N, SDValue &Base,
SDValue &Offset, SDValue &Opc);
bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
SDValue &Offset, SDValue &Opc);
- bool SelectAddrMode4(SDNode *Op, SDValue N, SDValue &Addr,
- SDValue &Mode);
- bool SelectAddrMode5(SDNode *Op, SDValue N, SDValue &Base,
+ bool SelectAddrMode5(SDValue N, SDValue &Base,
SDValue &Offset);
- bool SelectAddrMode6(SDNode *Op, SDValue N, SDValue &Addr, SDValue &Align);
-
- bool SelectAddrModePC(SDNode *Op, SDValue N, SDValue &Offset,
- SDValue &Label);
-
- bool SelectThumbAddrModeRR(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Offset);
- bool SelectThumbAddrModeRI5(SDNode *Op, SDValue N, unsigned Scale,
- SDValue &Base, SDValue &OffImm,
- SDValue &Offset);
- bool SelectThumbAddrModeS1(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm, SDValue &Offset);
- bool SelectThumbAddrModeS2(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm, SDValue &Offset);
- bool SelectThumbAddrModeS4(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm, SDValue &Offset);
- bool SelectThumbAddrModeSP(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm);
-
- bool SelectT2ShifterOperandReg(SDNode *Op, SDValue N,
+ bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
+
+ bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
+
+ // Thumb Addressing Modes:
+ bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
+ bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
+ unsigned Scale);
+ bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
+ bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
+ bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
+ bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
+ SDValue &OffImm);
+ bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
+ SDValue &OffImm);
+ bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
+ SDValue &OffImm);
+ bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
+ SDValue &OffImm);
+ bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
+
+ // Thumb 2 Addressing Modes:
+ bool SelectT2ShifterOperandReg(SDValue N,
SDValue &BaseReg, SDValue &Opc);
- bool SelectT2AddrModeImm12(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm);
- bool SelectT2AddrModeImm8(SDNode *Op, SDValue N, SDValue &Base,
+ bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
+ bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
SDValue &OffImm);
bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
SDValue &OffImm);
- bool SelectT2AddrModeImm8s4(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm);
- bool SelectT2AddrModeSoReg(SDNode *Op, SDValue N, SDValue &Base,
+ bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
SDValue &OffReg, SDValue &ShImm);
+ inline bool is_so_imm(unsigned Imm) const {
+ return ARM_AM::getSOImmVal(Imm) != -1;
+ }
+
+ inline bool is_so_imm_not(unsigned Imm) const {
+ return ARM_AM::getSOImmVal(~Imm) != -1;
+ }
+
+ inline bool is_t2_so_imm(unsigned Imm) const {
+ return ARM_AM::getT2SOImmVal(Imm) != -1;
+ }
+
+ inline bool is_t2_so_imm_not(unsigned Imm) const {
+ return ARM_AM::getT2SOImmVal(~Imm) != -1;
+ }
+
inline bool Pred_so_imm(SDNode *inN) const {
ConstantSDNode *N = cast<ConstantSDNode>(inN);
- return ARM_AM::getSOImmVal(N->getZExtValue()) != -1;
+ return is_so_imm(N->getZExtValue());
}
inline bool Pred_t2_so_imm(SDNode *inN) const {
ConstantSDNode *N = cast<ConstantSDNode>(inN);
- return ARM_AM::getT2SOImmVal(N->getZExtValue()) != -1;
+ return is_t2_so_imm(N->getZExtValue());
}
// Include the pieces autogenerated from the target description.
@@ -141,22 +196,30 @@ private:
/// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
/// loads of D registers and even subregs and odd subregs of Q registers.
/// For NumVecs <= 2, QOpcodes1 is not used.
- SDNode *SelectVLD(SDNode *N, unsigned NumVecs, unsigned *DOpcodes,
+ SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
+ unsigned *DOpcodes,
unsigned *QOpcodes0, unsigned *QOpcodes1);
/// SelectVST - Select NEON store intrinsics. NumVecs should
/// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
/// stores of D registers and even subregs and odd subregs of Q registers.
/// For NumVecs <= 2, QOpcodes1 is not used.
- SDNode *SelectVST(SDNode *N, unsigned NumVecs, unsigned *DOpcodes,
+ SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
+ unsigned *DOpcodes,
unsigned *QOpcodes0, unsigned *QOpcodes1);
/// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
/// be 2, 3 or 4. The opcode arrays specify the instructions used for
- /// load/store of D registers and even subregs and odd subregs of Q registers.
- SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, unsigned NumVecs,
- unsigned *DOpcodes, unsigned *QOpcodes0,
- unsigned *QOpcodes1);
+ /// load/store of D registers and Q registers.
+ SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
+ bool isUpdating, unsigned NumVecs,
+ unsigned *DOpcodes, unsigned *QOpcodes);
+
+ /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
+ /// should be 2, 3 or 4. The opcode array specifies the instructions used
+ /// for loading D registers. (Q registers are not supported.)
+ SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
+ unsigned *Opcodes);
/// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
/// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
@@ -174,10 +237,10 @@ private:
SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
ARMCC::CondCodes CCVal, SDValue CCR,
SDValue InFlag);
- SDNode *SelectT2CMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
+ SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
ARMCC::CondCodes CCVal, SDValue CCR,
SDValue InFlag);
- SDNode *SelectARMCMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
+ SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
ARMCC::CondCodes CCVal, SDValue CCR,
SDValue InFlag);
@@ -199,9 +262,8 @@ private:
SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
- // Form sequences of 8 consecutive D registers.
- SDNode *OctoDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3,
- SDValue V4, SDValue V5, SDValue V6, SDValue V7);
+ // Get the alignment operand for a NEON VLD or VST instruction.
+ SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
};
}
@@ -229,9 +291,85 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
isInt32Immediate(N->getOperand(1).getNode(), Imm);
}
+/// \brief Check whether a particular node is a constant value representable as
+/// (N * Scale) where (N in [\arg RangeMin, \arg RangeMax).
+///
+/// \param ScaledConstant [out] - On success, the pre-scaled constant value.
+static bool isScaledConstantInRange(SDValue Node, unsigned Scale,
+ int RangeMin, int RangeMax,
+ int &ScaledConstant) {
+ assert(Scale && "Invalid scale!");
+
+ // Check that this is a constant.
+ const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
+ if (!C)
+ return false;
-bool ARMDAGToDAGISel::SelectShifterOperandReg(SDNode *Op,
- SDValue N,
+ ScaledConstant = (int) C->getZExtValue();
+ if ((ScaledConstant % Scale) != 0)
+ return false;
+
+ ScaledConstant /= Scale;
+ return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
+}
+
+/// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
+/// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
+/// least on current ARM implementations) which should be avoidded.
+bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
+ if (OptLevel == CodeGenOpt::None)
+ return true;
+
+ if (!CheckVMLxHazard)
+ return true;
+
+ if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9())
+ return true;
+
+ if (!N->hasOneUse())
+ return false;
+
+ SDNode *Use = *N->use_begin();
+ if (Use->getOpcode() == ISD::CopyToReg)
+ return true;
+ if (Use->isMachineOpcode()) {
+ const TargetInstrDesc &TID = TII->get(Use->getMachineOpcode());
+ if (TID.mayStore())
+ return true;
+ unsigned Opcode = TID.getOpcode();
+ if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
+ return true;
+ // vmlx feeding into another vmlx. We actually want to unfold
+ // the use later in the MLxExpansion pass. e.g.
+ // vmla
+ // vmla (stall 8 cycles)
+ //
+ // vmul (5 cycles)
+ // vadd (5 cycles)
+ // vmla
+ // This adds up to about 18 - 19 cycles.
+ //
+ // vmla
+ // vmul (stall 4 cycles)
+ // vadd adds up to about 14 cycles.
+ return TII->isFpMLxInstruction(Opcode);
+ }
+
+ return false;
+}
+
+bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
+ ARM_AM::ShiftOpc ShOpcVal,
+ unsigned ShAmt) {
+ if (!Subtarget->isCortexA9())
+ return true;
+ if (Shift.hasOneUse())
+ return true;
+ // R << 2 is free.
+ return ShOpcVal == ARM_AM::lsl && ShAmt == 2;
+}
+
+bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue N,
SDValue &BaseReg,
SDValue &ShReg,
SDValue &Opc) {
@@ -251,16 +389,92 @@ bool ARMDAGToDAGISel::SelectShifterOperandReg(SDNode *Op,
ShImmVal = RHS->getZExtValue() & 31;
} else {
ShReg = N.getOperand(1);
+ if (!isShifterOpProfitable(N, ShOpcVal, ShImmVal))
+ return false;
}
Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
MVT::i32);
return true;
}
-bool ARMDAGToDAGISel::SelectAddrMode2(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &Offset,
+bool ARMDAGToDAGISel::SelectShiftShifterOperandReg(SDValue N,
+ SDValue &BaseReg,
+ SDValue &ShReg,
+ SDValue &Opc) {
+ ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
+
+ // Don't match base register only case. That is matched to a separate
+ // lower complexity pattern with explicit register operand.
+ if (ShOpcVal == ARM_AM::no_shift) return false;
+
+ BaseReg = N.getOperand(0);
+ unsigned ShImmVal = 0;
+ // Do not check isShifterOpProfitable. This must return true.
+ if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
+ ShReg = CurDAG->getRegister(0, MVT::i32);
+ ShImmVal = RHS->getZExtValue() & 31;
+ } else {
+ ShReg = N.getOperand(1);
+ }
+ Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
+ MVT::i32);
+ return true;
+}
+
+bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
+ SDValue &Base,
+ SDValue &OffImm) {
+ // Match simple R + imm12 operands.
+
+ // Base only.
+ if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
+ !CurDAG->isBaseWithConstantOffset(N)) {
+ if (N.getOpcode() == ISD::FrameIndex) {
+ // Match frame index.
+ int FI = cast<FrameIndexSDNode>(N)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+ }
+
+ if (N.getOpcode() == ARMISD::Wrapper &&
+ !(Subtarget->useMovt() &&
+ N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
+ Base = N.getOperand(0);
+ } else
+ Base = N;
+ OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+ }
+
+ if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
+ int RHSC = (int)RHS->getZExtValue();
+ if (N.getOpcode() == ISD::SUB)
+ RHSC = -RHSC;
+
+ if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
+ Base = N.getOperand(0);
+ if (Base.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ }
+ OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+ return true;
+ }
+ }
+
+ // Base only.
+ Base = N;
+ OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+}
+
+
+
+bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
SDValue &Opc) {
- if (N.getOpcode() == ISD::MUL) {
+ if (N.getOpcode() == ISD::MUL &&
+ (!Subtarget->isCortexA9() || N.hasOneUse())) {
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
// X * [3,5,9] -> X + X * [2,4,8] etc.
int RHSC = (int)RHS->getZExtValue();
@@ -283,7 +497,114 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDNode *Op, SDValue N,
}
}
- if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
+ if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
+ // ISD::OR that is equivalent to an ISD::ADD.
+ !CurDAG->isBaseWithConstantOffset(N))
+ return false;
+
+ // Leave simple R +/- imm12 operands for LDRi12
+ if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
+ int RHSC;
+ if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
+ -0x1000+1, 0x1000, RHSC)) // 12 bits.
+ return false;
+ }
+
+ if (Subtarget->isCortexA9() && !N.hasOneUse())
+ // Compute R +/- (R << N) and reuse it.
+ return false;
+
+ // Otherwise this is R +/- [possibly shifted] R.
+ ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
+ ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
+ unsigned ShAmt = 0;
+
+ Base = N.getOperand(0);
+ Offset = N.getOperand(1);
+
+ if (ShOpcVal != ARM_AM::no_shift) {
+ // Check to see if the RHS of the shift is a constant, if not, we can't fold
+ // it.
+ if (ConstantSDNode *Sh =
+ dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
+ ShAmt = Sh->getZExtValue();
+ if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
+ Offset = N.getOperand(1).getOperand(0);
+ else {
+ ShAmt = 0;
+ ShOpcVal = ARM_AM::no_shift;
+ }
+ } else {
+ ShOpcVal = ARM_AM::no_shift;
+ }
+ }
+
+ // Try matching (R shl C) + (R).
+ if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
+ !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
+ ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
+ if (ShOpcVal != ARM_AM::no_shift) {
+ // Check to see if the RHS of the shift is a constant, if not, we can't
+ // fold it.
+ if (ConstantSDNode *Sh =
+ dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
+ ShAmt = Sh->getZExtValue();
+ if (!Subtarget->isCortexA9() ||
+ (N.hasOneUse() &&
+ isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) {
+ Offset = N.getOperand(0).getOperand(0);
+ Base = N.getOperand(1);
+ } else {
+ ShAmt = 0;
+ ShOpcVal = ARM_AM::no_shift;
+ }
+ } else {
+ ShOpcVal = ARM_AM::no_shift;
+ }
+ }
+ }
+
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
+ MVT::i32);
+ return true;
+}
+
+
+
+
+//-----
+
+AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
+ SDValue &Base,
+ SDValue &Offset,
+ SDValue &Opc) {
+ if (N.getOpcode() == ISD::MUL &&
+ (!Subtarget->isCortexA9() || N.hasOneUse())) {
+ if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
+ // X * [3,5,9] -> X + X * [2,4,8] etc.
+ int RHSC = (int)RHS->getZExtValue();
+ if (RHSC & 1) {
+ RHSC = RHSC & ~1;
+ ARM_AM::AddrOpc AddSub = ARM_AM::add;
+ if (RHSC < 0) {
+ AddSub = ARM_AM::sub;
+ RHSC = - RHSC;
+ }
+ if (isPowerOf2_32(RHSC)) {
+ unsigned ShAmt = Log2_32(RHSC);
+ Base = Offset = N.getOperand(0);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
+ ARM_AM::lsl),
+ MVT::i32);
+ return AM2_SHOP;
+ }
+ }
+ }
+ }
+
+ if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
+ // ISD::OR that is equivalent to an ADD.
+ !CurDAG->isBaseWithConstantOffset(N)) {
Base = N;
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
@@ -297,36 +618,45 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDNode *Op, SDValue N,
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
ARM_AM::no_shift),
MVT::i32);
- return true;
+ return AM2_BASE;
}
// Match simple R +/- imm12 operands.
- if (N.getOpcode() == ISD::ADD)
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC >= 0 && RHSC < 0x1000) ||
- (RHSC < 0 && RHSC > -0x1000)) { // 12 bits.
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- Offset = CurDAG->getRegister(0, MVT::i32);
+ if (N.getOpcode() != ISD::SUB) {
+ int RHSC;
+ if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
+ -0x1000+1, 0x1000, RHSC)) { // 12 bits.
+ Base = N.getOperand(0);
+ if (Base.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ }
+ Offset = CurDAG->getRegister(0, MVT::i32);
- ARM_AM::AddrOpc AddSub = ARM_AM::add;
- if (RHSC < 0) {
- AddSub = ARM_AM::sub;
- RHSC = - RHSC;
- }
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
- ARM_AM::no_shift),
- MVT::i32);
- return true;
+ ARM_AM::AddrOpc AddSub = ARM_AM::add;
+ if (RHSC < 0) {
+ AddSub = ARM_AM::sub;
+ RHSC = - RHSC;
}
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
+ ARM_AM::no_shift),
+ MVT::i32);
+ return AM2_BASE;
}
+ }
+
+ if (Subtarget->isCortexA9() && !N.hasOneUse()) {
+ // Compute R +/- (R << N) and reuse it.
+ Base = N;
+ Offset = CurDAG->getRegister(0, MVT::i32);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
+ ARM_AM::no_shift),
+ MVT::i32);
+ return AM2_BASE;
+ }
// Otherwise this is R +/- [possibly shifted] R.
- ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
+ ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
unsigned ShAmt = 0;
@@ -339,14 +669,20 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDNode *Op, SDValue N,
if (ConstantSDNode *Sh =
dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
ShAmt = Sh->getZExtValue();
- Offset = N.getOperand(1).getOperand(0);
+ if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
+ Offset = N.getOperand(1).getOperand(0);
+ else {
+ ShAmt = 0;
+ ShOpcVal = ARM_AM::no_shift;
+ }
} else {
ShOpcVal = ARM_AM::no_shift;
}
}
// Try matching (R shl C) + (R).
- if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
+ if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
+ !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
if (ShOpcVal != ARM_AM::no_shift) {
// Check to see if the RHS of the shift is a constant, if not, we can't
@@ -354,8 +690,15 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDNode *Op, SDValue N,
if (ConstantSDNode *Sh =
dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
ShAmt = Sh->getZExtValue();
- Offset = N.getOperand(0).getOperand(0);
- Base = N.getOperand(1);
+ if (!Subtarget->isCortexA9() ||
+ (N.hasOneUse() &&
+ isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) {
+ Offset = N.getOperand(0).getOperand(0);
+ Base = N.getOperand(1);
+ } else {
+ ShAmt = 0;
+ ShOpcVal = ARM_AM::no_shift;
+ }
} else {
ShOpcVal = ARM_AM::no_shift;
}
@@ -364,7 +707,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDNode *Op, SDValue N,
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
MVT::i32);
- return true;
+ return AM2_SHOP;
}
bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N,
@@ -375,15 +718,13 @@ bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N,
: cast<StoreSDNode>(Op)->getAddressingMode();
ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
? ARM_AM::add : ARM_AM::sub;
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
- int Val = (int)C->getZExtValue();
- if (Val >= 0 && Val < 0x1000) { // 12 bits.
- Offset = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
- ARM_AM::no_shift),
- MVT::i32);
- return true;
- }
+ int Val;
+ if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
+ Offset = CurDAG->getRegister(0, MVT::i32);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
+ ARM_AM::no_shift),
+ MVT::i32);
+ return true;
}
Offset = N;
@@ -394,7 +735,12 @@ bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N,
// it.
if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
ShAmt = Sh->getZExtValue();
- Offset = N.getOperand(0);
+ if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
+ Offset = N.getOperand(0);
+ else {
+ ShAmt = 0;
+ ShOpcVal = ARM_AM::no_shift;
+ }
} else {
ShOpcVal = ARM_AM::no_shift;
}
@@ -406,7 +752,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N,
}
-bool ARMDAGToDAGISel::SelectAddrMode3(SDNode *Op, SDValue N,
+bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
SDValue &Base, SDValue &Offset,
SDValue &Opc) {
if (N.getOpcode() == ISD::SUB) {
@@ -417,7 +763,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDNode *Op, SDValue N,
return true;
}
- if (N.getOpcode() != ISD::ADD) {
+ if (!CurDAG->isBaseWithConstantOffset(N)) {
Base = N;
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
@@ -429,25 +775,23 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDNode *Op, SDValue N,
}
// If the RHS is +/- imm8, fold into addr mode.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC >= 0 && RHSC < 256) ||
- (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- Offset = CurDAG->getRegister(0, MVT::i32);
+ int RHSC;
+ if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
+ -256 + 1, 256, RHSC)) { // 8 bits.
+ Base = N.getOperand(0);
+ if (Base.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ }
+ Offset = CurDAG->getRegister(0, MVT::i32);
- ARM_AM::AddrOpc AddSub = ARM_AM::add;
- if (RHSC < 0) {
- AddSub = ARM_AM::sub;
- RHSC = - RHSC;
- }
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
- return true;
+ ARM_AM::AddrOpc AddSub = ARM_AM::add;
+ if (RHSC < 0) {
+ AddSub = ARM_AM::sub;
+ RHSC = -RHSC;
}
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
+ return true;
}
Base = N.getOperand(0);
@@ -464,13 +808,11 @@ bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
: cast<StoreSDNode>(Op)->getAddressingMode();
ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
? ARM_AM::add : ARM_AM::sub;
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
- int Val = (int)C->getZExtValue();
- if (Val >= 0 && Val < 256) {
- Offset = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
- return true;
- }
+ int Val;
+ if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
+ Offset = CurDAG->getRegister(0, MVT::i32);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
+ return true;
}
Offset = N;
@@ -478,16 +820,9 @@ bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
return true;
}
-bool ARMDAGToDAGISel::SelectAddrMode4(SDNode *Op, SDValue N,
- SDValue &Addr, SDValue &Mode) {
- Addr = N;
- Mode = CurDAG->getTargetConstant(ARM_AM::getAM4ModeImm(ARM_AM::ia), MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectAddrMode5(SDNode *Op, SDValue N,
+bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
SDValue &Base, SDValue &Offset) {
- if (N.getOpcode() != ISD::ADD) {
+ if (!CurDAG->isBaseWithConstantOffset(N)) {
Base = N;
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
@@ -503,28 +838,23 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDNode *Op, SDValue N,
}
// If the RHS is +/- imm8, fold into addr mode.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC & 3) == 0) { // The constant is implicitly multiplied by 4.
- RHSC >>= 2;
- if ((RHSC >= 0 && RHSC < 256) ||
- (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
+ int RHSC;
+ if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
+ -256 + 1, 256, RHSC)) {
+ Base = N.getOperand(0);
+ if (Base.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ }
- ARM_AM::AddrOpc AddSub = ARM_AM::add;
- if (RHSC < 0) {
- AddSub = ARM_AM::sub;
- RHSC = - RHSC;
- }
- Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
- MVT::i32);
- return true;
- }
+ ARM_AM::AddrOpc AddSub = ARM_AM::add;
+ if (RHSC < 0) {
+ AddSub = ARM_AM::sub;
+ RHSC = -RHSC;
}
+ Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
+ MVT::i32);
+ return true;
}
Base = N;
@@ -533,30 +863,50 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDNode *Op, SDValue N,
return true;
}
-bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Op, SDValue N,
- SDValue &Addr, SDValue &Align) {
+bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
+ SDValue &Align) {
Addr = N;
- // Default to no alignment.
- Align = CurDAG->getTargetConstant(0, MVT::i32);
+
+ unsigned Alignment = 0;
+ if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
+ // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
+ // The maximum alignment is equal to the memory size being referenced.
+ unsigned LSNAlign = LSN->getAlignment();
+ unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
+ if (LSNAlign > MemSize && MemSize > 1)
+ Alignment = MemSize;
+ } else {
+ // All other uses of addrmode6 are for intrinsics. For now just record
+ // the raw alignment value; it will be refined later based on the legal
+ // alignment operands for the intrinsic.
+ Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
+ }
+
+ Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
return true;
}
-bool ARMDAGToDAGISel::SelectAddrModePC(SDNode *Op, SDValue N,
+bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
SDValue &Offset, SDValue &Label) {
if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
Offset = N.getOperand(0);
SDValue N1 = N.getOperand(1);
- Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
- MVT::i32);
+ Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
+ MVT::i32);
return true;
}
+
return false;
}
-bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDNode *Op, SDValue N,
+
+//===----------------------------------------------------------------------===//
+// Thumb Addressing Modes
+//===----------------------------------------------------------------------===//
+
+bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
SDValue &Base, SDValue &Offset){
- // FIXME dl should come from the parent load or store, not the address
- if (N.getOpcode() != ISD::ADD) {
+ if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
if (!NC || !NC->isNullValue())
return false;
@@ -571,82 +921,137 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDNode *Op, SDValue N,
}
bool
-ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDNode *Op, SDValue N,
- unsigned Scale, SDValue &Base,
- SDValue &OffImm, SDValue &Offset) {
+ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
+ SDValue &Offset, unsigned Scale) {
if (Scale == 4) {
SDValue TmpBase, TmpOffImm;
- if (SelectThumbAddrModeSP(Op, N, TmpBase, TmpOffImm))
+ if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
return false; // We want to select tLDRspi / tSTRspi instead.
+
if (N.getOpcode() == ARMISD::Wrapper &&
N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
return false; // We want to select tLDRpci instead.
}
- if (N.getOpcode() != ISD::ADD) {
+ if (!CurDAG->isBaseWithConstantOffset(N))
+ return false;
+
+ // Thumb does not have [sp, r] address mode.
+ RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
+ RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
+ if ((LHSR && LHSR->getReg() == ARM::SP) ||
+ (RHSR && RHSR->getReg() == ARM::SP))
+ return false;
+
+ // FIXME: Why do we explicitly check for a match here and then return false?
+ // Presumably to allow something else to match, but shouldn't this be
+ // documented?
+ int RHSC;
+ if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
+ return false;
+
+ Base = N.getOperand(0);
+ Offset = N.getOperand(1);
+ return true;
+}
+
+bool
+ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
+ SDValue &Base,
+ SDValue &Offset) {
+ return SelectThumbAddrModeRI(N, Base, Offset, 1);
+}
+
+bool
+ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
+ SDValue &Base,
+ SDValue &Offset) {
+ return SelectThumbAddrModeRI(N, Base, Offset, 2);
+}
+
+bool
+ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
+ SDValue &Base,
+ SDValue &Offset) {
+ return SelectThumbAddrModeRI(N, Base, Offset, 4);
+}
+
+bool
+ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
+ SDValue &Base, SDValue &OffImm) {
+ if (Scale == 4) {
+ SDValue TmpBase, TmpOffImm;
+ if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
+ return false; // We want to select tLDRspi / tSTRspi instead.
+
+ if (N.getOpcode() == ARMISD::Wrapper &&
+ N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
+ return false; // We want to select tLDRpci instead.
+ }
+
+ if (!CurDAG->isBaseWithConstantOffset(N)) {
if (N.getOpcode() == ARMISD::Wrapper &&
!(Subtarget->useMovt() &&
N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
Base = N.getOperand(0);
- } else
+ } else {
Base = N;
+ }
- Offset = CurDAG->getRegister(0, MVT::i32);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
- // Thumb does not have [sp, r] address mode.
RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
if ((LHSR && LHSR->getReg() == ARM::SP) ||
(RHSR && RHSR->getReg() == ARM::SP)) {
+ ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
+ ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
+ unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
+
+ // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
+ if (LHSC != 0 || RHSC != 0) return false;
+
Base = N;
- Offset = CurDAG->getRegister(0, MVT::i32);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
// If the RHS is + imm5 * scale, fold into addr mode.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC & (Scale-1)) == 0) { // The constant is implicitly multiplied.
- RHSC /= Scale;
- if (RHSC >= 0 && RHSC < 32) {
- Base = N.getOperand(0);
- Offset = CurDAG->getRegister(0, MVT::i32);
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
- return true;
- }
- }
+ int RHSC;
+ if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
+ Base = N.getOperand(0);
+ OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+ return true;
}
Base = N.getOperand(0);
- Offset = N.getOperand(1);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
-bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm,
- SDValue &Offset) {
- return SelectThumbAddrModeRI5(Op, N, 1, Base, OffImm, Offset);
+bool
+ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
+ SDValue &OffImm) {
+ return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
}
-bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm,
- SDValue &Offset) {
- return SelectThumbAddrModeRI5(Op, N, 2, Base, OffImm, Offset);
+bool
+ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
+ SDValue &OffImm) {
+ return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
}
-bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm,
- SDValue &Offset) {
- return SelectThumbAddrModeRI5(Op, N, 4, Base, OffImm, Offset);
+bool
+ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
+ SDValue &OffImm) {
+ return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
}
-bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm) {
+bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
+ SDValue &Base, SDValue &OffImm) {
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
@@ -654,35 +1059,35 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDNode *Op, SDValue N,
return true;
}
- if (N.getOpcode() != ISD::ADD)
+ if (!CurDAG->isBaseWithConstantOffset(N))
return false;
RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
(LHSR && LHSR->getReg() == ARM::SP)) {
// If the RHS is + imm8 * scale, fold into addr mode.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC & 3) == 0) { // The constant is implicitly multiplied.
- RHSC >>= 2;
- if (RHSC >= 0 && RHSC < 256) {
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
- return true;
- }
+ int RHSC;
+ if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
+ Base = N.getOperand(0);
+ if (Base.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
}
+ OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+ return true;
}
}
return false;
}
-bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDNode *Op, SDValue N,
- SDValue &BaseReg,
+
+//===----------------------------------------------------------------------===//
+// Thumb 2 Addressing Modes
+//===----------------------------------------------------------------------===//
+
+
+bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
SDValue &Opc) {
if (DisableShifterOp)
return false;
@@ -704,19 +1109,22 @@ bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDNode *Op, SDValue N,
return false;
}
-bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDNode *Op, SDValue N,
+bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
SDValue &Base, SDValue &OffImm) {
// Match simple R + imm12 operands.
// Base only.
- if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
+ if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
+ !CurDAG->isBaseWithConstantOffset(N)) {
if (N.getOpcode() == ISD::FrameIndex) {
- // Match frame index...
+ // Match frame index.
int FI = cast<FrameIndexSDNode>(N)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
- } else if (N.getOpcode() == ARMISD::Wrapper &&
+ }
+
+ if (N.getOpcode() == ARMISD::Wrapper &&
!(Subtarget->useMovt() &&
N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
Base = N.getOperand(0);
@@ -729,7 +1137,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDNode *Op, SDValue N,
}
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- if (SelectT2AddrModeImm8(Op, N, Base, OffImm))
+ if (SelectT2AddrModeImm8(N, Base, OffImm))
// Let t2LDRi8 handle (R - imm8).
return false;
@@ -754,24 +1162,26 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDNode *Op, SDValue N,
return true;
}
-bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDNode *Op, SDValue N,
+bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
SDValue &Base, SDValue &OffImm) {
// Match simple R - imm8 operands.
- if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::SUB) {
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getSExtValue();
- if (N.getOpcode() == ISD::SUB)
- RHSC = -RHSC;
-
- if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
- return true;
+ if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
+ !CurDAG->isBaseWithConstantOffset(N))
+ return false;
+
+ if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
+ int RHSC = (int)RHS->getSExtValue();
+ if (N.getOpcode() == ISD::SUB)
+ RHSC = -RHSC;
+
+ if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
+ Base = N.getOperand(0);
+ if (Base.getOpcode() == ISD::FrameIndex) {
+ int FI = cast<FrameIndexSDNode>(Base)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
}
+ OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+ return true;
}
}
@@ -784,52 +1194,22 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
? cast<LoadSDNode>(Op)->getAddressingMode()
: cast<StoreSDNode>(Op)->getAddressingMode();
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N)) {
- int RHSC = (int)RHS->getZExtValue();
- if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
- OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
- ? CurDAG->getTargetConstant(RHSC, MVT::i32)
- : CurDAG->getTargetConstant(-RHSC, MVT::i32);
- return true;
- }
- }
-
- return false;
-}
-
-bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm) {
- if (N.getOpcode() == ISD::ADD) {
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- // 8 bits.
- if (((RHSC & 0x3) == 0) &&
- ((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) {
- Base = N.getOperand(0);
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
- return true;
- }
- }
- } else if (N.getOpcode() == ISD::SUB) {
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- // 8 bits.
- if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) {
- Base = N.getOperand(0);
- OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
- return true;
- }
- }
+ int RHSC;
+ if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
+ OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
+ ? CurDAG->getTargetConstant(RHSC, MVT::i32)
+ : CurDAG->getTargetConstant(-RHSC, MVT::i32);
+ return true;
}
return false;
}
-bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDNode *Op, SDValue N,
+bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
SDValue &Base,
SDValue &OffReg, SDValue &ShImm) {
// (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
- if (N.getOpcode() != ISD::ADD)
+ if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
return false;
// Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
@@ -841,6 +1221,12 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDNode *Op, SDValue N,
return false;
}
+ if (Subtarget->isCortexA9() && !N.hasOneUse()) {
+ // Compute R + (R << [1,2,3]) and reuse it.
+ Base = N;
+ return false;
+ }
+
// Look for (R + R) or (R + (R << [1,2,3])).
unsigned ShAmt = 0;
Base = N.getOperand(0);
@@ -859,11 +1245,12 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDNode *Op, SDValue N,
// it.
if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
ShAmt = Sh->getZExtValue();
- if (ShAmt >= 4) {
+ if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
+ OffReg = OffReg.getOperand(0);
+ else {
ShAmt = 0;
ShOpcVal = ARM_AM::no_shift;
- } else
- OffReg = OffReg.getOperand(0);
+ }
} else {
ShOpcVal = ARM_AM::no_shift;
}
@@ -1045,52 +1432,43 @@ SDNode *ARMDAGToDAGISel::QuadQRegs(EVT VT, SDValue V0, SDValue V1,
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 8);
}
-/// OctoDRegs - Form 8 consecutive D registers.
-///
-SDNode *ARMDAGToDAGISel::OctoDRegs(EVT VT, SDValue V0, SDValue V1,
- SDValue V2, SDValue V3,
- SDValue V4, SDValue V5,
- SDValue V6, SDValue V7) {
- DebugLoc dl = V0.getNode()->getDebugLoc();
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
- SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
- SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
- SDValue SubReg4 = CurDAG->getTargetConstant(ARM::dsub_4, MVT::i32);
- SDValue SubReg5 = CurDAG->getTargetConstant(ARM::dsub_5, MVT::i32);
- SDValue SubReg6 = CurDAG->getTargetConstant(ARM::dsub_6, MVT::i32);
- SDValue SubReg7 = CurDAG->getTargetConstant(ARM::dsub_7, MVT::i32);
- const SDValue Ops[] ={ V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3,
- V4, SubReg4, V5, SubReg5, V6, SubReg6, V7, SubReg7 };
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 16);
+/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
+/// of a NEON VLD or VST instruction. The supported values depend on the
+/// number of registers being loaded.
+SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
+ bool is64BitVector) {
+ unsigned NumRegs = NumVecs;
+ if (!is64BitVector && NumVecs < 3)
+ NumRegs *= 2;
+
+ unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ if (Alignment >= 32 && NumRegs == 4)
+ Alignment = 32;
+ else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
+ Alignment = 16;
+ else if (Alignment >= 8)
+ Alignment = 8;
+ else
+ Alignment = 0;
+
+ return CurDAG->getTargetConstant(Alignment, MVT::i32);
}
-/// GetNEONSubregVT - Given a type for a 128-bit NEON vector, return the type
-/// for a 64-bit subregister of the vector.
-static EVT GetNEONSubregVT(EVT VT) {
- switch (VT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("unhandled NEON type");
- case MVT::v16i8: return MVT::v8i8;
- case MVT::v8i16: return MVT::v4i16;
- case MVT::v4f32: return MVT::v2f32;
- case MVT::v4i32: return MVT::v2i32;
- case MVT::v2i64: return MVT::v1i64;
- }
-}
-
-SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
+SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
unsigned *DOpcodes, unsigned *QOpcodes0,
unsigned *QOpcodes1) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
DebugLoc dl = N->getDebugLoc();
SDValue MemAddr, Align;
- if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align))
+ unsigned AddrOpIdx = isUpdating ? 1 : 2;
+ if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
return NULL;
SDValue Chain = N->getOperand(0);
EVT VT = N->getValueType(0);
bool is64BitVector = VT.is64BitVector();
+ Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
unsigned OpcodeIndex;
switch (VT.getSimpleVT().SimpleTy) {
@@ -1120,88 +1498,97 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
ResTyElts *= 2;
ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
}
+ std::vector<EVT> ResTys;
+ ResTys.push_back(ResTy);
+ if (isUpdating)
+ ResTys.push_back(MVT::i32);
+ ResTys.push_back(MVT::Other);
SDValue Pred = getAL(CurDAG);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
- SDValue SuperReg;
- if (is64BitVector) {
- unsigned Opc = DOpcodes[OpcodeIndex];
- const SDValue Ops[] = { MemAddr, Align, Pred, Reg0, Chain };
- SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTy, MVT::Other, Ops, 5);
- if (NumVecs == 1)
- return VLd;
-
- SuperReg = SDValue(VLd, 0);
- assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDValue D = CurDAG->getTargetExtractSubreg(ARM::dsub_0+Vec,
- dl, VT, SuperReg);
- ReplaceUses(SDValue(N, Vec), D);
- }
- ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
- return NULL;
- }
-
- if (NumVecs <= 2) {
- // Quad registers are directly supported for VLD1 and VLD2,
- // loading pairs of D regs.
- unsigned Opc = QOpcodes0[OpcodeIndex];
- const SDValue Ops[] = { MemAddr, Align, Pred, Reg0, Chain };
- SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTy, MVT::Other, Ops, 5);
- if (NumVecs == 1)
- return VLd;
+ SDNode *VLd;
+ SmallVector<SDValue, 7> Ops;
- SuperReg = SDValue(VLd, 0);
- Chain = SDValue(VLd, 1);
+ // Double registers and VLD1/VLD2 quad registers are directly supported.
+ if (is64BitVector || NumVecs <= 2) {
+ unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
+ QOpcodes0[OpcodeIndex]);
+ Ops.push_back(MemAddr);
+ Ops.push_back(Align);
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(AddrOpIdx + 1);
+ Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
+ }
+ Ops.push_back(Pred);
+ Ops.push_back(Reg0);
+ Ops.push_back(Chain);
+ VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
} else {
// Otherwise, quad registers are loaded with two separate instructions,
// where one loads the even registers and the other loads the odd registers.
EVT AddrTy = MemAddr.getValueType();
- // Load the even subregs.
- unsigned Opc = QOpcodes0[OpcodeIndex];
+ // Load the even subregs. This is always an updating load, so that it
+ // provides the address to the second load for the odd subregs.
SDValue ImplDef =
SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
- SDNode *VLdA =
- CurDAG->getMachineNode(Opc, dl, ResTy, AddrTy, MVT::Other, OpsA, 7);
+ SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
+ ResTy, AddrTy, MVT::Other, OpsA, 7);
Chain = SDValue(VLdA, 2);
// Load the odd subregs.
- Opc = QOpcodes1[OpcodeIndex];
- const SDValue OpsB[] = { SDValue(VLdA, 1), Align, Reg0, SDValue(VLdA, 0),
- Pred, Reg0, Chain };
- SDNode *VLdB =
- CurDAG->getMachineNode(Opc, dl, ResTy, AddrTy, MVT::Other, OpsB, 7);
- SuperReg = SDValue(VLdB, 0);
- Chain = SDValue(VLdB, 2);
- }
-
- // Extract out the Q registers.
- assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDValue Q = CurDAG->getTargetExtractSubreg(ARM::qsub_0+Vec,
- dl, VT, SuperReg);
- ReplaceUses(SDValue(N, Vec), Q);
- }
- ReplaceUses(SDValue(N, NumVecs), Chain);
+ Ops.push_back(SDValue(VLdA, 1));
+ Ops.push_back(Align);
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(AddrOpIdx + 1);
+ assert(isa<ConstantSDNode>(Inc.getNode()) &&
+ "only constant post-increment update allowed for VLD3/4");
+ (void)Inc;
+ Ops.push_back(Reg0);
+ }
+ Ops.push_back(SDValue(VLdA, 0));
+ Ops.push_back(Pred);
+ Ops.push_back(Reg0);
+ Ops.push_back(Chain);
+ VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
+ Ops.data(), Ops.size());
+ }
+
+ if (NumVecs == 1)
+ return VLd;
+
+ // Extract out the subregisters.
+ SDValue SuperReg = SDValue(VLd, 0);
+ assert(ARM::dsub_7 == ARM::dsub_0+7 &&
+ ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
+ unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ ReplaceUses(SDValue(N, Vec),
+ CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
+ ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
+ if (isUpdating)
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
return NULL;
}
-SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
+SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
unsigned *DOpcodes, unsigned *QOpcodes0,
unsigned *QOpcodes1) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
DebugLoc dl = N->getDebugLoc();
SDValue MemAddr, Align;
- if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align))
+ unsigned AddrOpIdx = isUpdating ? 1 : 2;
+ unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
+ if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
return NULL;
SDValue Chain = N->getOperand(0);
- EVT VT = N->getOperand(3).getValueType();
+ EVT VT = N->getOperand(Vec0Idx).getValueType();
bool is64BitVector = VT.is64BitVector();
+ Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
unsigned OpcodeIndex;
switch (VT.getSimpleVT().SimpleTy) {
@@ -1222,119 +1609,128 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
break;
}
+ std::vector<EVT> ResTys;
+ if (isUpdating)
+ ResTys.push_back(MVT::i32);
+ ResTys.push_back(MVT::Other);
+
SDValue Pred = getAL(CurDAG);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
-
SmallVector<SDValue, 7> Ops;
- Ops.push_back(MemAddr);
- Ops.push_back(Align);
- if (is64BitVector) {
+ // Double registers and VST1/VST2 quad registers are directly supported.
+ if (is64BitVector || NumVecs <= 2) {
+ SDValue SrcReg;
if (NumVecs == 1) {
- Ops.push_back(N->getOperand(3));
- } else {
- SDValue RegSeq;
- SDValue V0 = N->getOperand(0+3);
- SDValue V1 = N->getOperand(1+3);
-
+ SrcReg = N->getOperand(Vec0Idx);
+ } else if (is64BitVector) {
// Form a REG_SEQUENCE to force register allocation.
+ SDValue V0 = N->getOperand(Vec0Idx + 0);
+ SDValue V1 = N->getOperand(Vec0Idx + 1);
if (NumVecs == 2)
- RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
+ SrcReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
else {
- SDValue V2 = N->getOperand(2+3);
- // If it's a vld3, form a quad D-register and leave the last part as
+ SDValue V2 = N->getOperand(Vec0Idx + 2);
+ // If it's a vst3, form a quad D-register and leave the last part as
// an undef.
SDValue V3 = (NumVecs == 3)
? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : N->getOperand(3+3);
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
+ : N->getOperand(Vec0Idx + 3);
+ SrcReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
}
- Ops.push_back(RegSeq);
- }
- Ops.push_back(Pred);
- Ops.push_back(Reg0); // predicate register
- Ops.push_back(Chain);
- unsigned Opc = DOpcodes[OpcodeIndex];
- return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 6);
- }
-
- if (NumVecs <= 2) {
- // Quad registers are directly supported for VST1 and VST2.
- unsigned Opc = QOpcodes0[OpcodeIndex];
- if (NumVecs == 1) {
- Ops.push_back(N->getOperand(3));
} else {
// Form a QQ register.
- SDValue Q0 = N->getOperand(3);
- SDValue Q1 = N->getOperand(4);
- Ops.push_back(SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0));
+ SDValue Q0 = N->getOperand(Vec0Idx);
+ SDValue Q1 = N->getOperand(Vec0Idx + 1);
+ SrcReg = SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0);
+ }
+
+ unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
+ QOpcodes0[OpcodeIndex]);
+ Ops.push_back(MemAddr);
+ Ops.push_back(Align);
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(AddrOpIdx + 1);
+ Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
}
+ Ops.push_back(SrcReg);
Ops.push_back(Pred);
- Ops.push_back(Reg0); // predicate register
+ Ops.push_back(Reg0);
Ops.push_back(Chain);
- return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 6);
+ return CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
}
// Otherwise, quad registers are stored with two separate instructions,
// where one stores the even registers and the other stores the odd registers.
// Form the QQQQ REG_SEQUENCE.
- SDValue V0 = N->getOperand(0+3);
- SDValue V1 = N->getOperand(1+3);
- SDValue V2 = N->getOperand(2+3);
+ SDValue V0 = N->getOperand(Vec0Idx + 0);
+ SDValue V1 = N->getOperand(Vec0Idx + 1);
+ SDValue V2 = N->getOperand(Vec0Idx + 2);
SDValue V3 = (NumVecs == 3)
? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
- : N->getOperand(3+3);
+ : N->getOperand(Vec0Idx + 3);
SDValue RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
- // Store the even D registers.
- Ops.push_back(Reg0); // post-access address offset
- Ops.push_back(RegSeq);
- Ops.push_back(Pred);
- Ops.push_back(Reg0); // predicate register
- Ops.push_back(Chain);
- unsigned Opc = QOpcodes0[OpcodeIndex];
- SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), 7);
+ // Store the even D registers. This is always an updating store, so that it
+ // provides the address to the second store for the odd subregs.
+ const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
+ SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
+ MemAddr.getValueType(),
+ MVT::Other, OpsA, 7);
Chain = SDValue(VStA, 1);
// Store the odd D registers.
- Ops[0] = SDValue(VStA, 0); // MemAddr
- Ops[6] = Chain;
- Opc = QOpcodes1[OpcodeIndex];
- SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), 7);
- Chain = SDValue(VStB, 1);
- ReplaceUses(SDValue(N, 0), Chain);
- return NULL;
+ Ops.push_back(SDValue(VStA, 0));
+ Ops.push_back(Align);
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(AddrOpIdx + 1);
+ assert(isa<ConstantSDNode>(Inc.getNode()) &&
+ "only constant post-increment update allowed for VST3/4");
+ (void)Inc;
+ Ops.push_back(Reg0);
+ }
+ Ops.push_back(RegSeq);
+ Ops.push_back(Pred);
+ Ops.push_back(Reg0);
+ Ops.push_back(Chain);
+ return CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
+ Ops.data(), Ops.size());
}
SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
- unsigned NumVecs, unsigned *DOpcodes,
- unsigned *QOpcodes0,
- unsigned *QOpcodes1) {
+ bool isUpdating, unsigned NumVecs,
+ unsigned *DOpcodes,
+ unsigned *QOpcodes) {
assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
DebugLoc dl = N->getDebugLoc();
SDValue MemAddr, Align;
- if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align))
+ unsigned AddrOpIdx = isUpdating ? 1 : 2;
+ unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
+ if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
return NULL;
SDValue Chain = N->getOperand(0);
unsigned Lane =
- cast<ConstantSDNode>(N->getOperand(NumVecs+3))->getZExtValue();
- EVT VT = IsLoad ? N->getValueType(0) : N->getOperand(3).getValueType();
+ cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
+ EVT VT = N->getOperand(Vec0Idx).getValueType();
bool is64BitVector = VT.is64BitVector();
- // Quad registers are handled by load/store of subregs. Find the subreg info.
- unsigned NumElts = 0;
- bool Even = false;
- EVT RegVT = VT;
- if (!is64BitVector) {
- RegVT = GetNEONSubregVT(VT);
- NumElts = RegVT.getVectorNumElements();
- Even = Lane < NumElts;
- }
+ unsigned Alignment = 0;
+ if (NumVecs != 3) {
+ Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
+ if (Alignment > NumBytes)
+ Alignment = NumBytes;
+ if (Alignment < 8 && Alignment < NumBytes)
+ Alignment = 0;
+ // Alignment must be a power of two; make sure of that.
+ Alignment = (Alignment & -Alignment);
+ if (Alignment == 1)
+ Alignment = 0;
+ }
+ Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
unsigned OpcodeIndex;
switch (VT.getSimpleVT().SimpleTy) {
@@ -1350,124 +1746,144 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
case MVT::v4i32: OpcodeIndex = 1; break;
}
+ std::vector<EVT> ResTys;
+ if (IsLoad) {
+ unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
+ if (!is64BitVector)
+ ResTyElts *= 2;
+ ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
+ MVT::i64, ResTyElts));
+ }
+ if (isUpdating)
+ ResTys.push_back(MVT::i32);
+ ResTys.push_back(MVT::Other);
+
SDValue Pred = getAL(CurDAG);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
- SmallVector<SDValue, 10> Ops;
+ SmallVector<SDValue, 8> Ops;
Ops.push_back(MemAddr);
Ops.push_back(Align);
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(AddrOpIdx + 1);
+ Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
+ }
- unsigned Opc = 0;
- if (is64BitVector) {
- Opc = DOpcodes[OpcodeIndex];
- SDValue RegSeq;
- SDValue V0 = N->getOperand(0+3);
- SDValue V1 = N->getOperand(1+3);
- if (NumVecs == 2) {
- RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
- } else {
- SDValue V2 = N->getOperand(2+3);
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : N->getOperand(3+3);
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
- }
-
- // Now extract the D registers back out.
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, VT, RegSeq));
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, VT, RegSeq));
- if (NumVecs > 2)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, VT,RegSeq));
- if (NumVecs > 3)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, VT,RegSeq));
+ SDValue SuperReg;
+ SDValue V0 = N->getOperand(Vec0Idx + 0);
+ SDValue V1 = N->getOperand(Vec0Idx + 1);
+ if (NumVecs == 2) {
+ if (is64BitVector)
+ SuperReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
+ else
+ SuperReg = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
} else {
- // Check if this is loading the even or odd subreg of a Q register.
- if (Lane < NumElts) {
- Opc = QOpcodes0[OpcodeIndex];
- } else {
- Lane -= NumElts;
- Opc = QOpcodes1[OpcodeIndex];
- }
-
- SDValue RegSeq;
- SDValue V0 = N->getOperand(0+3);
- SDValue V1 = N->getOperand(1+3);
- if (NumVecs == 2) {
- RegSeq = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
- } else {
- SDValue V2 = N->getOperand(2+3);
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : N->getOperand(3+3);
- RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
- }
-
- // Extract the subregs of the input vector.
- unsigned SubIdx = Even ? ARM::dsub_0 : ARM::dsub_1;
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(SubIdx+Vec*2, dl, RegVT,
- RegSeq));
+ SDValue V2 = N->getOperand(Vec0Idx + 2);
+ SDValue V3 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
+ : N->getOperand(Vec0Idx + 3);
+ if (is64BitVector)
+ SuperReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
+ else
+ SuperReg = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
}
+ Ops.push_back(SuperReg);
Ops.push_back(getI32Imm(Lane));
Ops.push_back(Pred);
Ops.push_back(Reg0);
Ops.push_back(Chain);
+ unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
+ QOpcodes[OpcodeIndex]);
+ SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys,
+ Ops.data(), Ops.size());
if (!IsLoad)
- return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), NumVecs+6);
+ return VLdLn;
- std::vector<EVT> ResTys(NumVecs, RegVT);
- ResTys.push_back(MVT::Other);
- SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(),NumVecs+6);
+ // Extract the subregisters.
+ SuperReg = SDValue(VLdLn, 0);
+ assert(ARM::dsub_7 == ARM::dsub_0+7 &&
+ ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
+ unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ ReplaceUses(SDValue(N, Vec),
+ CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
+ ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
+ if (isUpdating)
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
+ return NULL;
+}
- // Form a REG_SEQUENCE to force register allocation.
- SDValue RegSeq;
- if (is64BitVector) {
- SDValue V0 = SDValue(VLdLn, 0);
- SDValue V1 = SDValue(VLdLn, 1);
- if (NumVecs == 2) {
- RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
- } else {
- SDValue V2 = SDValue(VLdLn, 2);
- // If it's a vld3, form a quad D-register but discard the last part.
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : SDValue(VLdLn, 3);
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
- }
- } else {
- // For 128-bit vectors, take the 64-bit results of the load and insert
- // them as subregs into the result.
- SDValue V[8];
- for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
- if (Even) {
- V[i] = SDValue(VLdLn, Vec);
- V[i+1] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
- } else {
- V[i] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
- V[i+1] = SDValue(VLdLn, Vec);
- }
- }
- if (NumVecs == 3)
- V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
+SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
+ unsigned NumVecs, unsigned *Opcodes) {
+ assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
+ DebugLoc dl = N->getDebugLoc();
- if (NumVecs == 2)
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V[0], V[1], V[2], V[3]), 0);
- else
- RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3],
- V[4], V[5], V[6], V[7]), 0);
+ SDValue MemAddr, Align;
+ if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
+ return NULL;
+
+ SDValue Chain = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ unsigned Alignment = 0;
+ if (NumVecs != 3) {
+ Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
+ if (Alignment > NumBytes)
+ Alignment = NumBytes;
+ if (Alignment < 8 && Alignment < NumBytes)
+ Alignment = 0;
+ // Alignment must be a power of two; make sure of that.
+ Alignment = (Alignment & -Alignment);
+ if (Alignment == 1)
+ Alignment = 0;
+ }
+ Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
+
+ unsigned OpcodeIndex;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("unhandled vld-dup type");
+ case MVT::v8i8: OpcodeIndex = 0; break;
+ case MVT::v4i16: OpcodeIndex = 1; break;
+ case MVT::v2f32:
+ case MVT::v2i32: OpcodeIndex = 2; break;
+ }
+
+ SDValue Pred = getAL(CurDAG);
+ SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
+ SDValue SuperReg;
+ unsigned Opc = Opcodes[OpcodeIndex];
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(MemAddr);
+ Ops.push_back(Align);
+ if (isUpdating) {
+ SDValue Inc = N->getOperand(2);
+ Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
}
+ Ops.push_back(Pred);
+ Ops.push_back(Reg0);
+ Ops.push_back(Chain);
+ unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
+ std::vector<EVT> ResTys;
+ ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts));
+ if (isUpdating)
+ ResTys.push_back(MVT::i32);
+ ResTys.push_back(MVT::Other);
+ SDNode *VLdDup =
+ CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
+ SuperReg = SDValue(VLdDup, 0);
+
+ // Extract the subregisters.
assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
- unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
+ unsigned SubIdx = ARM::dsub_0;
for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
ReplaceUses(SDValue(N, Vec),
- CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, RegSeq));
- ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, NumVecs));
+ CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
+ ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
+ if (isUpdating)
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
return NULL;
}
@@ -1486,7 +1902,7 @@ SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
else {
SDValue V2 = N->getOperand(FirstTblReg + 2);
- // If it's a vtbl3, form a quad D-register and leave the last part as
+ // If it's a vtbl3, form a quad D-register and leave the last part as
// an undef.
SDValue V3 = (NumVecs == 3)
? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
@@ -1494,17 +1910,10 @@ SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
}
- // Now extract the D registers back out.
SmallVector<SDValue, 6> Ops;
if (IsExt)
Ops.push_back(N->getOperand(1));
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, VT, RegSeq));
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, VT, RegSeq));
- if (NumVecs > 2)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, VT, RegSeq));
- if (NumVecs > 3)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, VT, RegSeq));
-
+ Ops.push_back(RegSeq);
Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
Ops.push_back(getAL(CurDAG)); // predicate
Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
@@ -1574,7 +1983,7 @@ SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
SDValue CPTmp0;
SDValue CPTmp1;
- if (SelectT2ShifterOperandReg(N, TrueVal, CPTmp0, CPTmp1)) {
+ if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
unsigned Opc = 0;
@@ -1602,7 +2011,7 @@ SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
SDValue CPTmp0;
SDValue CPTmp1;
SDValue CPTmp2;
- if (SelectShifterOperandReg(N, TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
+ if (SelectShifterOperandReg(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
return CurDAG->SelectNodeTo(N, ARM::MOVCCs, MVT::i32, Ops, 7);
@@ -1611,36 +2020,66 @@ SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
}
SDNode *ARMDAGToDAGISel::
-SelectT2CMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
+SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
+ ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
if (!T)
return 0;
- if (Pred_t2_so_imm(TrueVal.getNode())) {
- SDValue True = CurDAG->getTargetConstant(T->getZExtValue(), MVT::i32);
+ unsigned Opc = 0;
+ unsigned TrueImm = T->getZExtValue();
+ if (is_t2_so_imm(TrueImm)) {
+ Opc = ARM::t2MOVCCi;
+ } else if (TrueImm <= 0xffff) {
+ Opc = ARM::t2MOVCCi16;
+ } else if (is_t2_so_imm_not(TrueImm)) {
+ TrueImm = ~TrueImm;
+ Opc = ARM::t2MVNCCi;
+ } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) {
+ // Large immediate.
+ Opc = ARM::t2MOVCCi32imm;
+ }
+
+ if (Opc) {
+ SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N,
- ARM::t2MOVCCi, MVT::i32, Ops, 5);
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
}
+
return 0;
}
SDNode *ARMDAGToDAGISel::
-SelectARMCMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
+SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
+ ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
if (!T)
return 0;
- if (Pred_so_imm(TrueVal.getNode())) {
- SDValue True = CurDAG->getTargetConstant(T->getZExtValue(), MVT::i32);
+ unsigned Opc = 0;
+ unsigned TrueImm = T->getZExtValue();
+ bool isSoImm = is_so_imm(TrueImm);
+ if (isSoImm) {
+ Opc = ARM::MOVCCi;
+ } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) {
+ Opc = ARM::MOVCCi16;
+ } else if (is_so_imm_not(TrueImm)) {
+ TrueImm = ~TrueImm;
+ Opc = ARM::MVNCCi;
+ } else if (TrueVal.getNode()->hasOneUse() &&
+ (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) {
+ // Large immediate.
+ Opc = ARM::MOVCCi32imm;
+ }
+
+ if (Opc) {
+ SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N,
- ARM::MOVCCi, MVT::i32, Ops, 5);
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
}
+
return 0;
}
@@ -1688,18 +2127,18 @@ SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
// (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
// Pattern complexity = 10 cost = 1 size = 0
if (Subtarget->isThumb()) {
- SDNode *Res = SelectT2CMOVSoImmOp(N, FalseVal, TrueVal,
+ SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal,
CCVal, CCR, InFlag);
if (!Res)
- Res = SelectT2CMOVSoImmOp(N, TrueVal, FalseVal,
+ Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal,
ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
if (Res)
return Res;
} else {
- SDNode *Res = SelectARMCMOVSoImmOp(N, FalseVal, TrueVal,
+ SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal,
CCVal, CCR, InFlag);
if (!Res)
- Res = SelectARMCMOVSoImmOp(N, TrueVal, FalseVal,
+ Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal,
ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
if (Res)
return Res;
@@ -1742,13 +2181,7 @@ SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
EVT VT = N->getValueType(0);
if (!VT.is128BitVector() || N->getNumOperands() != 2)
llvm_unreachable("unexpected CONCAT_VECTORS");
- DebugLoc dl = N->getDebugLoc();
- SDValue V0 = N->getOperand(0);
- SDValue V1 = N->getOperand(1);
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
- const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
+ return PairDRegs(VT, N->getOperand(0), N->getOperand(1));
}
SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
@@ -1788,19 +2221,18 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Pred = getAL(CurDAG);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
- ResNode = CurDAG->getMachineNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
+ ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
Ops, 4);
} else {
SDValue Ops[] = {
CPIdx,
- CurDAG->getRegister(0, MVT::i32),
CurDAG->getTargetConstant(0, MVT::i32),
getAL(CurDAG),
CurDAG->getRegister(0, MVT::i32),
CurDAG->getEntryNode()
};
ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
- Ops, 6);
+ Ops, 5);
}
ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
return NULL;
@@ -1930,7 +2362,9 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5);
+ return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
+ ARM::UMULL : ARM::UMULLv5,
+ dl, MVT::i32, MVT::i32, Ops, 5);
}
}
case ISD::SMUL_LOHI: {
@@ -1944,7 +2378,9 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5);
+ return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
+ ARM::SMULL : ARM::SMULLv5,
+ dl, MVT::i32, MVT::i32, Ops, 5);
}
}
case ISD::LOAD: {
@@ -1987,7 +2423,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
MVT::i32);
SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
- MVT::Flag, Ops, 5);
+ MVT::Glue, Ops, 5);
Chain = SDValue(ResNode, 0);
if (N->getNumValues() == 2) {
InFlag = SDValue(ResNode, 1);
@@ -2088,12 +2524,11 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
EVT VecVT = N->getValueType(0);
EVT EltVT = VecVT.getVectorElementType();
unsigned NumElts = VecVT.getVectorNumElements();
- if (EltVT.getSimpleVT() == MVT::f64) {
+ if (EltVT == MVT::f64) {
assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1));
}
- assert(EltVT.getSimpleVT() == MVT::f32 &&
- "unexpected type for BUILD_VECTOR");
+ assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
if (NumElts == 2)
return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1));
assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
@@ -2101,6 +2536,170 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
N->getOperand(2), N->getOperand(3));
}
+ case ARMISD::VLD2DUP: {
+ unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo, ARM::VLD2DUPd16Pseudo,
+ ARM::VLD2DUPd32Pseudo };
+ return SelectVLDDup(N, false, 2, Opcodes);
+ }
+
+ case ARMISD::VLD3DUP: {
+ unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd16Pseudo,
+ ARM::VLD3DUPd32Pseudo };
+ return SelectVLDDup(N, false, 3, Opcodes);
+ }
+
+ case ARMISD::VLD4DUP: {
+ unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd16Pseudo,
+ ARM::VLD4DUPd32Pseudo };
+ return SelectVLDDup(N, false, 4, Opcodes);
+ }
+
+ case ARMISD::VLD2DUP_UPD: {
+ unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo_UPD, ARM::VLD2DUPd16Pseudo_UPD,
+ ARM::VLD2DUPd32Pseudo_UPD };
+ return SelectVLDDup(N, true, 2, Opcodes);
+ }
+
+ case ARMISD::VLD3DUP_UPD: {
+ unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd16Pseudo_UPD,
+ ARM::VLD3DUPd32Pseudo_UPD };
+ return SelectVLDDup(N, true, 3, Opcodes);
+ }
+
+ case ARMISD::VLD4DUP_UPD: {
+ unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd16Pseudo_UPD,
+ ARM::VLD4DUPd32Pseudo_UPD };
+ return SelectVLDDup(N, true, 4, Opcodes);
+ }
+
+ case ARMISD::VLD1_UPD: {
+ unsigned DOpcodes[] = { ARM::VLD1d8_UPD, ARM::VLD1d16_UPD,
+ ARM::VLD1d32_UPD, ARM::VLD1d64_UPD };
+ unsigned QOpcodes[] = { ARM::VLD1q8Pseudo_UPD, ARM::VLD1q16Pseudo_UPD,
+ ARM::VLD1q32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD };
+ return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
+ }
+
+ case ARMISD::VLD2_UPD: {
+ unsigned DOpcodes[] = { ARM::VLD2d8Pseudo_UPD, ARM::VLD2d16Pseudo_UPD,
+ ARM::VLD2d32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD };
+ unsigned QOpcodes[] = { ARM::VLD2q8Pseudo_UPD, ARM::VLD2q16Pseudo_UPD,
+ ARM::VLD2q32Pseudo_UPD };
+ return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
+ }
+
+ case ARMISD::VLD3_UPD: {
+ unsigned DOpcodes[] = { ARM::VLD3d8Pseudo_UPD, ARM::VLD3d16Pseudo_UPD,
+ ARM::VLD3d32Pseudo_UPD, ARM::VLD1d64TPseudo_UPD };
+ unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
+ ARM::VLD3q16Pseudo_UPD,
+ ARM::VLD3q32Pseudo_UPD };
+ unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
+ ARM::VLD3q16oddPseudo_UPD,
+ ARM::VLD3q32oddPseudo_UPD };
+ return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ }
+
+ case ARMISD::VLD4_UPD: {
+ unsigned DOpcodes[] = { ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD,
+ ARM::VLD4d32Pseudo_UPD, ARM::VLD1d64QPseudo_UPD };
+ unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
+ ARM::VLD4q16Pseudo_UPD,
+ ARM::VLD4q32Pseudo_UPD };
+ unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
+ ARM::VLD4q16oddPseudo_UPD,
+ ARM::VLD4q32oddPseudo_UPD };
+ return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ }
+
+ case ARMISD::VLD2LN_UPD: {
+ unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd16Pseudo_UPD,
+ ARM::VLD2LNd32Pseudo_UPD };
+ unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
+ ARM::VLD2LNq32Pseudo_UPD };
+ return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
+ }
+
+ case ARMISD::VLD3LN_UPD: {
+ unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd16Pseudo_UPD,
+ ARM::VLD3LNd32Pseudo_UPD };
+ unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
+ ARM::VLD3LNq32Pseudo_UPD };
+ return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
+ }
+
+ case ARMISD::VLD4LN_UPD: {
+ unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd16Pseudo_UPD,
+ ARM::VLD4LNd32Pseudo_UPD };
+ unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
+ ARM::VLD4LNq32Pseudo_UPD };
+ return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
+ }
+
+ case ARMISD::VST1_UPD: {
+ unsigned DOpcodes[] = { ARM::VST1d8_UPD, ARM::VST1d16_UPD,
+ ARM::VST1d32_UPD, ARM::VST1d64_UPD };
+ unsigned QOpcodes[] = { ARM::VST1q8Pseudo_UPD, ARM::VST1q16Pseudo_UPD,
+ ARM::VST1q32Pseudo_UPD, ARM::VST1q64Pseudo_UPD };
+ return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
+ }
+
+ case ARMISD::VST2_UPD: {
+ unsigned DOpcodes[] = { ARM::VST2d8Pseudo_UPD, ARM::VST2d16Pseudo_UPD,
+ ARM::VST2d32Pseudo_UPD, ARM::VST1q64Pseudo_UPD };
+ unsigned QOpcodes[] = { ARM::VST2q8Pseudo_UPD, ARM::VST2q16Pseudo_UPD,
+ ARM::VST2q32Pseudo_UPD };
+ return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
+ }
+
+ case ARMISD::VST3_UPD: {
+ unsigned DOpcodes[] = { ARM::VST3d8Pseudo_UPD, ARM::VST3d16Pseudo_UPD,
+ ARM::VST3d32Pseudo_UPD, ARM::VST1d64TPseudo_UPD };
+ unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
+ ARM::VST3q16Pseudo_UPD,
+ ARM::VST3q32Pseudo_UPD };
+ unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
+ ARM::VST3q16oddPseudo_UPD,
+ ARM::VST3q32oddPseudo_UPD };
+ return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ }
+
+ case ARMISD::VST4_UPD: {
+ unsigned DOpcodes[] = { ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD,
+ ARM::VST4d32Pseudo_UPD, ARM::VST1d64QPseudo_UPD };
+ unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
+ ARM::VST4q16Pseudo_UPD,
+ ARM::VST4q32Pseudo_UPD };
+ unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
+ ARM::VST4q16oddPseudo_UPD,
+ ARM::VST4q32oddPseudo_UPD };
+ return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ }
+
+ case ARMISD::VST2LN_UPD: {
+ unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd16Pseudo_UPD,
+ ARM::VST2LNd32Pseudo_UPD };
+ unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
+ ARM::VST2LNq32Pseudo_UPD };
+ return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
+ }
+
+ case ARMISD::VST3LN_UPD: {
+ unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd16Pseudo_UPD,
+ ARM::VST3LNd32Pseudo_UPD };
+ unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
+ ARM::VST3LNq32Pseudo_UPD };
+ return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
+ }
+
+ case ARMISD::VST4LN_UPD: {
+ unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd16Pseudo_UPD,
+ ARM::VST4LNd32Pseudo_UPD };
+ unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
+ ARM::VST4LNq32Pseudo_UPD };
+ return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
+ }
+
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
@@ -2113,7 +2712,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD1d32, ARM::VLD1d64 };
unsigned QOpcodes[] = { ARM::VLD1q8Pseudo, ARM::VLD1q16Pseudo,
ARM::VLD1q32Pseudo, ARM::VLD1q64Pseudo };
- return SelectVLD(N, 1, DOpcodes, QOpcodes, 0);
+ return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vld2: {
@@ -2121,7 +2720,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD2d32Pseudo, ARM::VLD1q64Pseudo };
unsigned QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
ARM::VLD2q32Pseudo };
- return SelectVLD(N, 2, DOpcodes, QOpcodes, 0);
+ return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vld3: {
@@ -2130,10 +2729,10 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
ARM::VLD3q16Pseudo_UPD,
ARM::VLD3q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
- ARM::VLD3q16oddPseudo_UPD,
- ARM::VLD3q32oddPseudo_UPD };
- return SelectVLD(N, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo,
+ ARM::VLD3q16oddPseudo,
+ ARM::VLD3q32oddPseudo };
+ return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
}
case Intrinsic::arm_neon_vld4: {
@@ -2142,31 +2741,31 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
ARM::VLD4q16Pseudo_UPD,
ARM::VLD4q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
- ARM::VLD4q16oddPseudo_UPD,
- ARM::VLD4q32oddPseudo_UPD };
- return SelectVLD(N, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo,
+ ARM::VLD4q16oddPseudo,
+ ARM::VLD4q32oddPseudo };
+ return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
}
case Intrinsic::arm_neon_vld2lane: {
- unsigned DOpcodes[] = { ARM::VLD2LNd8, ARM::VLD2LNd16, ARM::VLD2LNd32 };
- unsigned QOpcodes0[] = { ARM::VLD2LNq16, ARM::VLD2LNq32 };
- unsigned QOpcodes1[] = { ARM::VLD2LNq16odd, ARM::VLD2LNq32odd };
- return SelectVLDSTLane(N, true, 2, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo, ARM::VLD2LNd16Pseudo,
+ ARM::VLD2LNd32Pseudo };
+ unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo, ARM::VLD2LNq32Pseudo };
+ return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vld3lane: {
- unsigned DOpcodes[] = { ARM::VLD3LNd8, ARM::VLD3LNd16, ARM::VLD3LNd32 };
- unsigned QOpcodes0[] = { ARM::VLD3LNq16, ARM::VLD3LNq32 };
- unsigned QOpcodes1[] = { ARM::VLD3LNq16odd, ARM::VLD3LNq32odd };
- return SelectVLDSTLane(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo, ARM::VLD3LNd16Pseudo,
+ ARM::VLD3LNd32Pseudo };
+ unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo, ARM::VLD3LNq32Pseudo };
+ return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vld4lane: {
- unsigned DOpcodes[] = { ARM::VLD4LNd8, ARM::VLD4LNd16, ARM::VLD4LNd32 };
- unsigned QOpcodes0[] = { ARM::VLD4LNq16, ARM::VLD4LNq32 };
- unsigned QOpcodes1[] = { ARM::VLD4LNq16odd, ARM::VLD4LNq32odd };
- return SelectVLDSTLane(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo, ARM::VLD4LNd16Pseudo,
+ ARM::VLD4LNd32Pseudo };
+ unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo, ARM::VLD4LNq32Pseudo };
+ return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vst1: {
@@ -2174,7 +2773,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST1d32, ARM::VST1d64 };
unsigned QOpcodes[] = { ARM::VST1q8Pseudo, ARM::VST1q16Pseudo,
ARM::VST1q32Pseudo, ARM::VST1q64Pseudo };
- return SelectVST(N, 1, DOpcodes, QOpcodes, 0);
+ return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vst2: {
@@ -2182,7 +2781,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST2d32Pseudo, ARM::VST1q64Pseudo };
unsigned QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
ARM::VST2q32Pseudo };
- return SelectVST(N, 2, DOpcodes, QOpcodes, 0);
+ return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vst3: {
@@ -2191,10 +2790,10 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
ARM::VST3q16Pseudo_UPD,
ARM::VST3q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
- ARM::VST3q16oddPseudo_UPD,
- ARM::VST3q32oddPseudo_UPD };
- return SelectVST(N, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo,
+ ARM::VST3q16oddPseudo,
+ ARM::VST3q32oddPseudo };
+ return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
}
case Intrinsic::arm_neon_vst4: {
@@ -2203,31 +2802,31 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
ARM::VST4q16Pseudo_UPD,
ARM::VST4q32Pseudo_UPD };
- unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
- ARM::VST4q16oddPseudo_UPD,
- ARM::VST4q32oddPseudo_UPD };
- return SelectVST(N, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo,
+ ARM::VST4q16oddPseudo,
+ ARM::VST4q32oddPseudo };
+ return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
}
case Intrinsic::arm_neon_vst2lane: {
- unsigned DOpcodes[] = { ARM::VST2LNd8, ARM::VST2LNd16, ARM::VST2LNd32 };
- unsigned QOpcodes0[] = { ARM::VST2LNq16, ARM::VST2LNq32 };
- unsigned QOpcodes1[] = { ARM::VST2LNq16odd, ARM::VST2LNq32odd };
- return SelectVLDSTLane(N, false, 2, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo, ARM::VST2LNd16Pseudo,
+ ARM::VST2LNd32Pseudo };
+ unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo, ARM::VST2LNq32Pseudo };
+ return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vst3lane: {
- unsigned DOpcodes[] = { ARM::VST3LNd8, ARM::VST3LNd16, ARM::VST3LNd32 };
- unsigned QOpcodes0[] = { ARM::VST3LNq16, ARM::VST3LNq32 };
- unsigned QOpcodes1[] = { ARM::VST3LNq16odd, ARM::VST3LNq32odd };
- return SelectVLDSTLane(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo, ARM::VST3LNd16Pseudo,
+ ARM::VST3LNd32Pseudo };
+ unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo, ARM::VST3LNq32Pseudo };
+ return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
}
case Intrinsic::arm_neon_vst4lane: {
- unsigned DOpcodes[] = { ARM::VST4LNd8, ARM::VST4LNd16, ARM::VST4LNd32 };
- unsigned QOpcodes0[] = { ARM::VST4LNq16, ARM::VST4LNq32 };
- unsigned QOpcodes1[] = { ARM::VST4LNq16odd, ARM::VST4LNq32odd };
- return SelectVLDSTLane(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo, ARM::VST4LNd16Pseudo,
+ ARM::VST4LNd32Pseudo };
+ unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo, ARM::VST4LNq32Pseudo };
+ return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
}
}
break;
@@ -2240,18 +2839,18 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
break;
case Intrinsic::arm_neon_vtbl2:
- return SelectVTBL(N, false, 2, ARM::VTBL2);
+ return SelectVTBL(N, false, 2, ARM::VTBL2Pseudo);
case Intrinsic::arm_neon_vtbl3:
- return SelectVTBL(N, false, 3, ARM::VTBL3);
+ return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
case Intrinsic::arm_neon_vtbl4:
- return SelectVTBL(N, false, 4, ARM::VTBL4);
+ return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
case Intrinsic::arm_neon_vtbx2:
- return SelectVTBL(N, true, 2, ARM::VTBX2);
+ return SelectVTBL(N, true, 2, ARM::VTBX2Pseudo);
case Intrinsic::arm_neon_vtbx3:
- return SelectVTBL(N, true, 3, ARM::VTBX3);
+ return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
case Intrinsic::arm_neon_vtbx4:
- return SelectVTBL(N, true, 4, ARM::VTBX4);
+ return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
}
break;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
index ce4a2c9..1835ec0 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -15,6 +15,7 @@
#define DEBUG_TYPE "arm-isel"
#include "ARM.h"
#include "ARMAddressingModes.h"
+#include "ARMCallingConv.h"
#include "ARMConstantPoolValue.h"
#include "ARMISelLowering.h"
#include "ARMMachineFunctionInfo.h"
@@ -28,9 +29,11 @@
#include "llvm/Function.h"
#include "llvm/GlobalValue.h"
#include "llvm/Instruction.h"
+#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/Type.h"
#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -41,6 +44,7 @@
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/VectorExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
@@ -50,6 +54,7 @@
using namespace llvm;
STATISTIC(NumTailCalls, "Number of tail calls");
+STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
// This option should go away when tail calls fully work.
static cl::opt<bool>
@@ -57,14 +62,7 @@ EnableARMTailCalls("arm-tail-calls", cl::Hidden,
cl::desc("Generate tail calls (TEMPORARY OPTION)."),
cl::init(false));
-// This option should go away when Machine LICM is smart enough to hoist a
-// reg-to-reg VDUP.
-static cl::opt<bool>
-EnableARMVDUPsplat("arm-vdup-splat", cl::Hidden,
- cl::desc("Generate VDUP for integer constant splats (TEMPORARY OPTION)."),
- cl::init(false));
-
-static cl::opt<bool>
+cl::opt<bool>
EnableARMLongCalls("arm-long-calls", cl::Hidden,
cl::desc("Generate calls via indirect call instructions"),
cl::init(false));
@@ -74,28 +72,6 @@ ARMInterworking("arm-interworking", cl::Hidden,
cl::desc("Enable / disable ARM interworking (for debugging only)"),
cl::init(true));
-static cl::opt<bool>
-EnableARMCodePlacement("arm-code-placement", cl::Hidden,
- cl::desc("Enable code placement pass for ARM"),
- cl::init(false));
-
-static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-
void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
EVT PromotedBitwiseVT) {
if (VT != PromotedLdStVT) {
@@ -111,8 +87,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
EVT ElemTy = VT.getVectorElementType();
if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom);
- if (ElemTy == MVT::i8 || ElemTy == MVT::i16)
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
if (ElemTy != MVT::i32) {
setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
@@ -122,7 +97,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal);
setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
if (VT.isInteger()) {
@@ -131,6 +106,10 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand);
+ for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
+ setTruncStoreAction(VT.getSimpleVT(),
+ (MVT::SimpleValueType)InnerVT, Expand);
}
setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand);
@@ -177,6 +156,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
: TargetLowering(TM, createTLOF(TM)) {
Subtarget = &TM.getSubtarget<ARMSubtarget>();
RegInfo = TM.getRegisterInfo();
+ Itins = TM.getInstrItineraryData();
if (Subtarget->isTargetDarwin()) {
// Uses VFP for Thumb libfuncs if available.
@@ -260,13 +240,157 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLibcallName(RTLIB::SRL_I128, 0);
setLibcallName(RTLIB::SRA_I128, 0);
- // Libcalls should use the AAPCS base standard ABI, even if hard float
- // is in effect, as per the ARM RTABI specification, section 4.1.2.
if (Subtarget->isAAPCS_ABI()) {
- for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
- setLibcallCallingConv(static_cast<RTLIB::Libcall>(i),
- CallingConv::ARM_AAPCS);
- }
+ // Double-precision floating-point arithmetic helper functions
+ // RTABI chapter 4.1.2, Table 2
+ setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd");
+ setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv");
+ setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul");
+ setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub");
+ setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS);
+
+ // Double-precision floating-point comparison helper functions
+ // RTABI chapter 4.1.2, Table 3
+ setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq");
+ setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
+ setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq");
+ setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ);
+ setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt");
+ setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
+ setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple");
+ setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
+ setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge");
+ setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
+ setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt");
+ setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
+ setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun");
+ setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE);
+ setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun");
+ setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ);
+ setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS);
+
+ // Single-precision floating-point arithmetic helper functions
+ // RTABI chapter 4.1.2, Table 4
+ setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd");
+ setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv");
+ setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul");
+ setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub");
+ setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS);
+
+ // Single-precision floating-point comparison helper functions
+ // RTABI chapter 4.1.2, Table 5
+ setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq");
+ setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
+ setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq");
+ setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ);
+ setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt");
+ setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
+ setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple");
+ setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
+ setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge");
+ setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
+ setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt");
+ setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
+ setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun");
+ setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
+ setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun");
+ setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
+ setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS);
+
+ // Floating-point to integer conversions.
+ // RTABI chapter 4.1.2, Table 6
+ setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz");
+ setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz");
+ setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz");
+ setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz");
+ setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz");
+ setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz");
+ setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz");
+ setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz");
+ setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS);
+
+ // Conversions between floating types.
+ // RTABI chapter 4.1.2, Table 7
+ setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f");
+ setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d");
+ setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
+
+ // Integer to floating-point conversions.
+ // RTABI chapter 4.1.2, Table 8
+ setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d");
+ setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d");
+ setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d");
+ setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d");
+ setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f");
+ setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f");
+ setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f");
+ setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f");
+ setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
+
+ // Long long helper functions
+ // RTABI chapter 4.2, Table 9
+ setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul");
+ setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod");
+ setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod");
+ setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl");
+ setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr");
+ setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr");
+ setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS);
+
+ // Integer division functions
+ // RTABI chapter 4.3.1
+ setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv");
+ setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv");
+ setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv");
+ setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv");
+ setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv");
+ setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv");
+ setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
}
if (Subtarget->isThumb1Only())
@@ -330,9 +454,16 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::MUL, MVT::v8i16, Custom);
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
setOperationAction(ISD::MUL, MVT::v2i64, Custom);
+ // Custom handling for some vector types to avoid expensive expansions
+ setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
+ setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
+ setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
+ setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
setOperationAction(ISD::VSETCC, MVT::v1i64, Expand);
setOperationAction(ISD::VSETCC, MVT::v2i64, Expand);
+ setTargetDAGCombine(ISD::INTRINSIC_VOID);
+ setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
setTargetDAGCombine(ISD::SHL);
setTargetDAGCombine(ISD::SRL);
@@ -341,6 +472,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setTargetDAGCombine(ISD::ZERO_EXTEND);
setTargetDAGCombine(ISD::ANY_EXTEND);
setTargetDAGCombine(ISD::SELECT_CC);
+ setTargetDAGCombine(ISD::BUILD_VECTOR);
+ setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
+ setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
+ setTargetDAGCombine(ISD::STORE);
}
computeRegisterProperties();
@@ -397,7 +532,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
// These are expanded into libcalls.
- if (!Subtarget->hasDivide()) {
+ if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) {
// v7M has a hardware divider
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
@@ -423,14 +558,15 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
- // FIXME: Shouldn't need this, since no register is used, but the legalizer
- // doesn't yet know how to not do that for SjLj.
- setExceptionSelectorRegister(ARM::R0);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
+ setExceptionPointerRegister(ARM::R0);
+ setExceptionSelectorRegister(ARM::R1);
+
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
// ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
// the default expansion.
if (Subtarget->hasDataBarrier() ||
- (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only())) {
+ (Subtarget->hasV6Ops() && !Subtarget->isThumb())) {
// membarrier needs custom lowering; the rest are legal and handled
// normally.
setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
@@ -474,6 +610,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand);
setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand);
+ setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
+
// Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
if (!Subtarget->hasV6Ops()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
@@ -484,7 +622,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
// Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
// iff target supports vfp2.
- setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
+ setOperationAction(ISD::BITCAST, MVT::i64, Custom);
setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
}
@@ -493,6 +631,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
if (Subtarget->isTargetDarwin()) {
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
+ setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom);
}
setOperationAction(ISD::SETCC, MVT::i32, Expand);
@@ -547,8 +686,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setTargetDAGCombine(ISD::SUB);
setTargetDAGCombine(ISD::MUL);
- if (Subtarget->hasV6T2Ops())
+ if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON())
setTargetDAGCombine(ISD::OR);
+ if (Subtarget->hasNEON())
+ setTargetDAGCombine(ISD::AND);
setStackPointerRegisterToSaveRestore(ARM::SP);
@@ -557,16 +698,26 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
else
setSchedulingPreference(Sched::Hybrid);
- maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type
+ //// temporary - rewrite interface to use type
+ maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1;
// On ARM arguments smaller than 4 bytes are extended, so all arguments
// are at least 4 bytes aligned.
setMinStackArgumentAlignment(4);
- if (EnableARMCodePlacement)
- benefitFromCodePlacementOpt = true;
+ benefitFromCodePlacementOpt = true;
}
+// FIXME: It might make sense to define the representative register class as the
+// nearest super-register that has a non-null superset. For example, DPR_VFP2 is
+// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
+// SPR's representative would be DPR_VFP2. This should work well if register
+// pressure tracking were modified such that a register use would increment the
+// pressure of the register class's representative and all of it's super
+// classes' representatives transitively. We have not implemented this because
+// of the difficulty prior to coalescing of modeling operand register classes
+// due to the common occurence of cross class copies and subregister insertions
+// and extractions.
std::pair<const TargetRegisterClass*, uint8_t>
ARMTargetLowering::findRepresentativeClass(EVT VT) const{
const TargetRegisterClass *RRC = 0;
@@ -580,6 +731,12 @@ ARMTargetLowering::findRepresentativeClass(EVT VT) const{
case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
RRC = ARM::DPRRegisterClass;
+ // When NEON is used for SP, only half of the register file is available
+ // because operations that define both SP and DP results will be constrained
+ // to the VFP2 class (D0-D15). We currently model this constraint prior to
+ // coalescing by double-counting the SP regs. See the FIXME above.
+ if (Subtarget->useNEONForSinglePrecisionFP())
+ Cost = 2;
break;
case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
case MVT::v4f32: case MVT::v2f64:
@@ -602,6 +759,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
default: return 0;
case ARMISD::Wrapper: return "ARMISD::Wrapper";
+ case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN";
+ case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC";
case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
case ARMISD::CALL: return "ARMISD::CALL";
case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
@@ -612,7 +771,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
- case ARMISD::AND: return "ARMISD::AND";
case ARMISD::CMP: return "ARMISD::CMP";
case ARMISD::CMPZ: return "ARMISD::CMPZ";
case ARMISD::CMPFP: return "ARMISD::CMPFP";
@@ -633,25 +791,33 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
case ARMISD::RRX: return "ARMISD::RRX";
- case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD";
- case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR";
+ case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD";
+ case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR";
case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
+ case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP";
case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN";
-
+
case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER";
- case ARMISD::SYNCBARRIER: return "ARMISD::SYNCBARRIER";
+ case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
+
+ case ARMISD::PRELOAD: return "ARMISD::PRELOAD";
case ARMISD::VCEQ: return "ARMISD::VCEQ";
+ case ARMISD::VCEQZ: return "ARMISD::VCEQZ";
case ARMISD::VCGE: return "ARMISD::VCGE";
+ case ARMISD::VCGEZ: return "ARMISD::VCGEZ";
+ case ARMISD::VCLEZ: return "ARMISD::VCLEZ";
case ARMISD::VCGEU: return "ARMISD::VCGEU";
case ARMISD::VCGT: return "ARMISD::VCGT";
+ case ARMISD::VCGTZ: return "ARMISD::VCGTZ";
+ case ARMISD::VCLTZ: return "ARMISD::VCLTZ";
case ARMISD::VCGTU: return "ARMISD::VCGTU";
case ARMISD::VTST: return "ARMISD::VTST";
@@ -693,6 +859,28 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::FMAX: return "ARMISD::FMAX";
case ARMISD::FMIN: return "ARMISD::FMIN";
case ARMISD::BFI: return "ARMISD::BFI";
+ case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
+ case ARMISD::VBICIMM: return "ARMISD::VBICIMM";
+ case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP";
+ case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP";
+ case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP";
+ case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD";
+ case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD";
+ case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD";
+ case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD";
+ case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD";
+ case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD";
+ case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD";
+ case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD";
+ case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD";
+ case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD";
+ case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD";
+ case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD";
+ case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD";
+ case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD";
+ case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD";
+ case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD";
+ case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD";
}
}
@@ -735,6 +923,8 @@ Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
for (unsigned i = 0; i != NumVals; ++i) {
EVT VT = N->getValueType(i);
+ if (VT == MVT::Glue || VT == MVT::Other)
+ continue;
if (VT.isFloatingPoint() || VT.isVector())
return Sched::Latency;
}
@@ -746,25 +936,29 @@ Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
// is not available.
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
- if (TID.mayLoad())
- return Sched::Latency;
- const InstrItineraryData &Itins = getTargetMachine().getInstrItineraryData();
- if (!Itins.isEmpty() && Itins.getStageLatency(TID.getSchedClass()) > 2)
+ if (TID.getNumDefs() == 0)
+ return Sched::RegPressure;
+ if (!Itins->isEmpty() &&
+ Itins->getOperandCycle(TID.getSchedClass(), 0) > 2)
return Sched::Latency;
+
return Sched::RegPressure;
}
+// FIXME: Move to RegInfo
unsigned
ARMTargetLowering::getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
switch (RC->getID()) {
default:
return 0;
case ARM::tGPRRegClassID:
- return RegInfo->hasFP(MF) ? 4 : 5;
+ return TFI->hasFP(MF) ? 4 : 5;
case ARM::GPRRegClassID: {
- unsigned FP = RegInfo->hasFP(MF) ? 1 : 0;
+ unsigned FP = TFI->hasFP(MF) ? 1 : 0;
return 10 - FP - (Subtarget->isR9Reserved() ? 1 : 0);
}
case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
@@ -829,136 +1023,6 @@ static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
#include "ARMGenCallingConv.inc"
-// APCS f64 is in register pairs, possibly split to stack
-static bool f64AssignAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- CCState &State, bool CanFail) {
- static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
-
- // Try to get the first register.
- if (unsigned Reg = State.AllocateReg(RegList, 4))
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- else {
- // For the 2nd half of a v2f64, do not fail.
- if (CanFail)
- return false;
-
- // Put the whole thing on the stack.
- State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
- State.AllocateStack(8, 4),
- LocVT, LocInfo));
- return true;
- }
-
- // Try to get the second register.
- if (unsigned Reg = State.AllocateReg(RegList, 4))
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- else
- State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
- State.AllocateStack(4, 4),
- LocVT, LocInfo));
- return true;
-}
-
-static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
- if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
- return false;
- if (LocVT == MVT::v2f64 &&
- !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
- return false;
- return true; // we handled it
-}
-
-// AAPCS f64 is in aligned register pairs
-static bool f64AssignAAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- CCState &State, bool CanFail) {
- static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
- static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
- static const unsigned ShadowRegList[] = { ARM::R0, ARM::R1 };
-
- unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList, 2);
- if (Reg == 0) {
- // For the 2nd half of a v2f64, do not just fail.
- if (CanFail)
- return false;
-
- // Put the whole thing on the stack.
- State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
- State.AllocateStack(8, 8),
- LocVT, LocInfo));
- return true;
- }
-
- unsigned i;
- for (i = 0; i < 2; ++i)
- if (HiRegList[i] == Reg)
- break;
-
- unsigned T = State.AllocateReg(LoRegList[i]);
- (void)T;
- assert(T == LoRegList[i] && "Could not allocate register");
-
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
- LocVT, LocInfo));
- return true;
-}
-
-static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
- if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
- return false;
- if (LocVT == MVT::v2f64 &&
- !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
- return false;
- return true; // we handled it
-}
-
-static bool f64RetAssign(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo, CCState &State) {
- static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
- static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
-
- unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
- if (Reg == 0)
- return false; // we didn't handle it
-
- unsigned i;
- for (i = 0; i < 2; ++i)
- if (HiRegList[i] == Reg)
- break;
-
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
- LocVT, LocInfo));
- return true;
-}
-
-static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
- if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
- return false;
- if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
- return false;
- return true; // we handled it
-}
-
-static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
- return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
- State);
-}
-
/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
/// given CallingConvention value.
CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
@@ -967,23 +1031,29 @@ CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
switch (CC) {
default:
llvm_unreachable("Unsupported calling convention");
- case CallingConv::C:
case CallingConv::Fast:
+ if (Subtarget->hasVFP2() && !isVarArg) {
+ if (!Subtarget->isAAPCS_ABI())
+ return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
+ // For AAPCS ABI targets, just use VFP variant of the calling convention.
+ return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
+ }
+ // Fallthrough
+ case CallingConv::C: {
// Use target triple & subtarget features to do actual dispatch.
- if (Subtarget->isAAPCS_ABI()) {
- if (Subtarget->hasVFP2() &&
- FloatABIType == FloatABI::Hard && !isVarArg)
- return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
- else
- return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
- } else
- return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
+ if (!Subtarget->isAAPCS_ABI())
+ return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
+ else if (Subtarget->hasVFP2() &&
+ FloatABIType == FloatABI::Hard && !isVarArg)
+ return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
+ return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
+ }
case CallingConv::ARM_AAPCS_VFP:
- return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
+ return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
case CallingConv::ARM_AAPCS:
- return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
+ return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
case CallingConv::ARM_APCS:
- return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
+ return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
}
}
@@ -1050,7 +1120,7 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::BCvt:
- Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val);
+ Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
break;
}
@@ -1073,7 +1143,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile=*/false, /*AlwaysInline=*/false,
- NULL, 0, NULL, 0);
+ MachinePointerInfo(0), MachinePointerInfo(0));
}
/// LowerMemOpCallTo - Store the argument to the stack.
@@ -1086,11 +1156,11 @@ ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
unsigned LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
- if (Flags.isByVal()) {
+ if (Flags.isByVal())
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
- }
+
return DAG.getStore(Chain, dl, Arg, PtrOff,
- PseudoSourceValue::getStack(), LocMemOffset,
+ MachinePointerInfo::getStack(LocMemOffset),
false, false, 0);
}
@@ -1198,7 +1268,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
break;
case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
break;
}
@@ -1289,7 +1359,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
const GlobalValue *GV = G->getGlobal();
// Create a constant pool entry for the callee address
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV,
ARMPCLabelIndex,
ARMCP::CPValue, 0);
@@ -1298,13 +1368,13 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
Callee = DAG.getLoad(getPointerTy(), dl,
DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
} else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
const char *Sym = S->getSymbol();
// Create a constant pool entry for the callee address
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
Sym, ARMPCLabelIndex, 0);
// Get the address of the callee into a register
@@ -1312,7 +1382,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
Callee = DAG.getLoad(getPointerTy(), dl,
DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
}
} else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
@@ -1326,7 +1396,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
// tBX takes a register source operand.
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV,
ARMPCLabelIndex,
ARMCP::CPValue, 4);
@@ -1334,13 +1404,19 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
Callee = DAG.getLoad(getPointerTy(), dl,
DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
getPointerTy(), Callee, PICLabel);
- } else
- Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
+ } else {
+ // On ELF targets for PIC code, direct calls should go through the PLT
+ unsigned OpFlags = 0;
+ if (Subtarget->isTargetELF() &&
+ getTargetMachine().getRelocationModel() == Reloc::PIC_)
+ OpFlags = ARMII::MO_PLT;
+ Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
+ }
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
isDirect = true;
bool isStub = Subtarget->isTargetDarwin() &&
@@ -1349,20 +1425,26 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// tBX takes a register source operand.
const char *Sym = S->getSymbol();
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
Sym, ARMPCLabelIndex, 4);
SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
Callee = DAG.getLoad(getPointerTy(), dl,
DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
getPointerTy(), Callee, PICLabel);
- } else
- Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
+ } else {
+ unsigned OpFlags = 0;
+ // On ELF targets for PIC code, direct calls should go through the PLT
+ if (Subtarget->isTargetELF() &&
+ getTargetMachine().getRelocationModel() == Reloc::PIC_)
+ OpFlags = ARMII::MO_PLT;
+ Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags);
+ }
}
// FIXME: handle tail calls differently.
@@ -1391,7 +1473,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (InFlag.getNode())
Ops.push_back(InFlag);
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
if (isTailCall)
return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
@@ -1421,7 +1503,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
- if (!VR || TargetRegisterInfo::isPhysicalRegister(VR))
+ if (!TargetRegisterInfo::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
if (!Def)
@@ -1490,32 +1572,15 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// LR. This means if we need to reload LR, it takes an extra instructions,
// which outweighs the value of the tail call; but here we don't know yet
// whether LR is going to be used. Probably the right approach is to
- // generate the tail call here and turn it back into CALL/RET in
+ // generate the tail call here and turn it back into CALL/RET in
// emitEpilogue if LR is used.
- if (Subtarget->isThumb1Only())
- return false;
-
- // For the moment, we can only do this to functions defined in this
- // compilation, or to indirect calls. A Thumb B to an ARM function,
- // or vice versa, is not easily fixed up in the linker unlike BL.
- // (We could do this by loading the address of the callee into a register;
- // that is an extra instruction over the direct call and burns a register
- // as well, so is not likely to be a win.)
-
- // It might be safe to remove this restriction on non-Darwin.
// Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
// but we need to make sure there are enough registers; the only valid
// registers are the 4 used for parameters. We don't currently do this
// case.
- if (isa<ExternalSymbolSDNode>(Callee))
- return false;
-
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- const GlobalValue *GV = G->getGlobal();
- if (GV->isDeclaration() || GV->isWeakForLinker())
- return false;
- }
+ if (Subtarget->isThumb1Only())
+ return false;
// If the calling conventions do not match, then we'd better make sure the
// results are returned in the same way as what the caller expects.
@@ -1583,7 +1648,7 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
if (!VA.isRegLoc())
return false;
if (!ArgLocs[++i].isRegLoc())
- return false;
+ return false;
if (RegVT == MVT::v2f64) {
if (!ArgLocs[++i].isRegLoc())
return false;
@@ -1643,7 +1708,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
break;
}
@@ -1693,6 +1758,61 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
return result;
}
+bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const {
+ if (N->getNumValues() != 1)
+ return false;
+ if (!N->hasNUsesOfValue(1, 0))
+ return false;
+
+ unsigned NumCopies = 0;
+ SDNode* Copies[2];
+ SDNode *Use = *N->use_begin();
+ if (Use->getOpcode() == ISD::CopyToReg) {
+ Copies[NumCopies++] = Use;
+ } else if (Use->getOpcode() == ARMISD::VMOVRRD) {
+ // f64 returned in a pair of GPRs.
+ for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end();
+ UI != UE; ++UI) {
+ if (UI->getOpcode() != ISD::CopyToReg)
+ return false;
+ Copies[UI.getUse().getResNo()] = *UI;
+ ++NumCopies;
+ }
+ } else if (Use->getOpcode() == ISD::BITCAST) {
+ // f32 returned in a single GPR.
+ if (!Use->hasNUsesOfValue(1, 0))
+ return false;
+ Use = *Use->use_begin();
+ if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0))
+ return false;
+ Copies[NumCopies++] = Use;
+ } else {
+ return false;
+ }
+
+ if (NumCopies != 1 && NumCopies != 2)
+ return false;
+
+ bool HasRet = false;
+ for (unsigned i = 0; i < NumCopies; ++i) {
+ SDNode *Copy = Copies[i];
+ for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
+ UI != UE; ++UI) {
+ if (UI->getOpcode() == ISD::CopyToReg) {
+ SDNode *Use = *UI;
+ if (Use == Copies[0] || Use == Copies[1])
+ continue;
+ return false;
+ }
+ if (UI->getOpcode() != ARMISD::RET_FLAG)
+ return false;
+ HasRet = true;
+ }
+ }
+
+ return HasRet;
+}
+
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise
@@ -1732,7 +1852,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
} else {
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
- ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ ARMPCLabelIndex = AFI->createPICLabelUId();
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex,
ARMCP::CPBlockAddress,
PCAdj);
@@ -1740,7 +1860,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
}
CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
if (RelocM == Reloc::Static)
return Result;
@@ -1757,14 +1877,14 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
ARMConstantPoolValue *CPV =
new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
- ARMCP::CPValue, PCAdj, "tlsgd", true);
+ ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
SDValue Chain = Argument.getValue(1);
@@ -1802,16 +1922,16 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
if (GV->isDeclaration()) {
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
// Initial exec model.
unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
ARMConstantPoolValue *CPV =
new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
- ARMCP::CPValue, PCAdj, "gottpoff", true);
+ ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, true);
Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
Chain = Offset.getValue(1);
@@ -1819,15 +1939,15 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
} else {
// local exec model
- ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, "tpoff");
+ ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::TPOFF);
Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
}
@@ -1859,51 +1979,72 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
if (RelocM == Reloc::PIC_) {
bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
ARMConstantPoolValue *CPV =
- new ARMConstantPoolValue(GV, UseGOTOFF ? "GOTOFF" : "GOT");
+ new ARMConstantPoolValue(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
SDValue Chain = Result.getValue(1);
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
if (!UseGOTOFF)
Result = DAG.getLoad(PtrVT, dl, Chain, Result,
- PseudoSourceValue::getGOT(), 0,
- false, false, 0);
+ MachinePointerInfo::getGOT(), false, false, 0);
return Result;
+ }
+
+ // If we have T2 ops, we can materialize the address directly via movt/movw
+ // pair. This is always cheaper.
+ if (Subtarget->useMovt()) {
+ ++NumMovwMovt;
+ // FIXME: Once remat is capable of dealing with instructions with register
+ // operands, expand this into two nodes.
+ return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
+ DAG.getTargetGlobalAddress(GV, dl, PtrVT));
} else {
- // If we have T2 ops, we can materialize the address directly via movt/movw
- // pair. This is always cheaper.
- if (Subtarget->useMovt()) {
- return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
- DAG.getTargetGlobalAddress(GV, dl, PtrVT));
- } else {
- SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- }
+ SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
+ CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
+ return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
+ MachinePointerInfo::getConstantPool(),
+ false, false, 0);
}
}
SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = 0;
EVT PtrVT = getPointerTy();
DebugLoc dl = Op.getDebugLoc();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
+ MachineFunction &MF = DAG.getMachineFunction();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+
+ if (Subtarget->useMovt()) {
+ ++NumMovwMovt;
+ // FIXME: Once remat is capable of dealing with instructions with register
+ // operands, expand this into two nodes.
+ if (RelocM == Reloc::Static)
+ return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
+ DAG.getTargetGlobalAddress(GV, dl, PtrVT));
+
+ unsigned Wrapper = (RelocM == Reloc::PIC_)
+ ? ARMISD::WrapperPIC : ARMISD::WrapperDYN;
+ SDValue Result = DAG.getNode(Wrapper, dl, PtrVT,
+ DAG.getTargetGlobalAddress(GV, dl, PtrVT));
+ if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
+ Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
+ MachinePointerInfo::getGOT(), false, false, 0);
+ return Result;
+ }
+
+ unsigned ARMPCLabelIndex = 0;
SDValue CPAddr;
- if (RelocM == Reloc::Static)
+ if (RelocM == Reloc::Static) {
CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
- else {
- ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ } else {
+ ARMPCLabelIndex = AFI->createPICLabelUId();
unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8);
ARMConstantPoolValue *CPV =
new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj);
@@ -1912,7 +2053,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
SDValue Chain = Result.getValue(1);
@@ -1922,8 +2063,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
}
if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
- Result = DAG.getLoad(PtrVT, dl, Chain, Result,
- PseudoSourceValue::getGOT(), 0,
+ Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(),
false, false, 0);
return Result;
@@ -1935,7 +2075,7 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
"GLOBAL OFFSET TABLE not implemented for non-ELF targets");
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
EVT PtrVT = getPointerTy();
DebugLoc dl = Op.getDebugLoc();
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
@@ -1945,13 +2085,21 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
}
SDValue
+ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG)
+ const {
+ DebugLoc dl = Op.getDebugLoc();
+ return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other,
+ Op.getOperand(0), Op.getOperand(1));
+}
+
+SDValue
ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
SDValue Val = DAG.getConstant(0, MVT::i32);
@@ -1980,7 +2128,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
case Intrinsic::eh_sjlj_lsda: {
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
+ unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
EVT PtrVT = getPointerTy();
DebugLoc dl = Op.getDebugLoc();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
@@ -1994,7 +2142,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
SDValue Result =
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 0);
if (RelocM == Reloc::PIC_) {
@@ -2009,21 +2157,55 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) {
DebugLoc dl = Op.getDebugLoc();
- SDValue Op5 = Op.getOperand(5);
- unsigned isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue();
- // Some subtargets which have dmb and dsb instructions can handle barriers
- // directly. Some ARMv6 cpus can support them with the help of mcr
- // instruction. Thumb1 and pre-v6 ARM mode use a libcall instead and should
- // never get here.
- unsigned Opc = isDeviceBarrier ? ARMISD::SYNCBARRIER : ARMISD::MEMBARRIER;
- if (Subtarget->hasDataBarrier())
- return DAG.getNode(Opc, dl, MVT::Other, Op.getOperand(0));
- else {
- assert(Subtarget->hasV6Ops() && !Subtarget->isThumb1Only() &&
+ if (!Subtarget->hasDataBarrier()) {
+ // Some ARMv6 cpus can support data barriers with an mcr instruction.
+ // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
+ // here.
+ assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
"Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
- return DAG.getNode(Opc, dl, MVT::Other, Op.getOperand(0),
+ return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
DAG.getConstant(0, MVT::i32));
}
+
+ SDValue Op5 = Op.getOperand(5);
+ bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0;
+ unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0);
+
+ ARM_MB::MemBOpt DMBOpt;
+ if (isDeviceBarrier)
+ DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY;
+ else
+ DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH;
+ return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
+ DAG.getConstant(DMBOpt, MVT::i32));
+}
+
+static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
+ const ARMSubtarget *Subtarget) {
+ // ARM pre v5TE and Thumb1 does not have preload instructions.
+ if (!(Subtarget->isThumb2() ||
+ (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
+ // Just preserve the chain.
+ return Op.getOperand(0);
+
+ DebugLoc dl = Op.getDebugLoc();
+ unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
+ if (!isRead &&
+ (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
+ // ARMv7 with MP extension has PLDW.
+ return Op.getOperand(0);
+
+ if (Subtarget->isThumb())
+ // Invert the bits.
+ isRead = ~isRead & 1;
+ unsigned isData = Subtarget->isThumb() ? 0 : 1;
+
+ // Currently there is no intrinsic that matches pli.
+ return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
+ Op.getOperand(1), DAG.getConstant(isRead, MVT::i32),
+ DAG.getConstant(isData, MVT::i32));
}
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
@@ -2036,8 +2218,8 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0,
- false, false, 0);
+ return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
+ MachinePointerInfo(SV), false, false, 0);
}
SDValue
@@ -2054,7 +2236,7 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
RC = ARM::GPRRegisterClass;
// Transform the arguments stored in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC, dl);
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
SDValue ArgValue2;
@@ -2065,10 +2247,10 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
// Create load node to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
- PseudoSourceValue::getFixedStack(FI), 0,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0);
} else {
- Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
+ Reg = MF.addLiveIn(NextVA.getLocReg(), RC, dl);
ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
}
@@ -2119,7 +2301,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
- PseudoSourceValue::getFixedStack(FI), 0,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0);
} else {
ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
@@ -2149,7 +2331,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
// Transform the arguments in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC, dl);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
}
@@ -2160,7 +2342,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::BCvt:
- ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
+ ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
break;
case CCValAssign::SExt:
ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
@@ -2188,7 +2370,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
// Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
- PseudoSourceValue::getFixedStack(FI), 0,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0));
}
}
@@ -2202,7 +2384,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned NumGPRs = CCInfo.getFirstUnallocated
(GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
+ unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
unsigned VARegSize = (4 - NumGPRs) * 4;
unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
unsigned ArgOffset = CCInfo.getNextStackOffset();
@@ -2214,7 +2396,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
AFI->setVarArgsFrameIndex(
MFI->CreateFixedObject(VARegSaveSize,
ArgOffset + VARegSaveSize - VARegSize,
- true));
+ false));
SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
getPointerTy());
@@ -2226,12 +2408,12 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
else
RC = ARM::GPRRegisterClass;
- unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
+ unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC, dl);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
- PseudoSourceValue::getFixedStack(AFI->getVarArgsFrameIndex()),
- 0, false, false, 0);
+ MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()),
+ false, false, 0);
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
DAG.getConstant(4, getPointerTy()));
@@ -2320,7 +2502,7 @@ ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
break;
}
ARMcc = DAG.getConstant(CondCode, MVT::i32);
- return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS);
+ return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
}
/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
@@ -2329,10 +2511,10 @@ ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
DebugLoc dl) const {
SDValue Cmp;
if (!isFloatingPointZero(RHS))
- Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS);
+ Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
else
- Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS);
- return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp);
+ Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
+ return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
}
SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
@@ -2444,8 +2626,7 @@ static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
return DAG.getLoad(MVT::i32, Op.getDebugLoc(),
- Ld->getChain(), Ld->getBasePtr(),
- Ld->getSrcValue(), Ld->getSrcValueOffset(),
+ Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
Ld->isVolatile(), Ld->isNonTemporal(),
Ld->getAlignment());
@@ -2464,7 +2645,7 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
SDValue Ptr = Ld->getBasePtr();
RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
Ld->getChain(), Ptr,
- Ld->getSrcValue(), Ld->getSrcValueOffset(),
+ Ld->getPointerInfo(),
Ld->isVolatile(), Ld->isNonTemporal(),
Ld->getAlignment());
@@ -2474,7 +2655,7 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
PtrType, Ptr, DAG.getConstant(4, PtrType));
RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
Ld->getChain(), NewPtr,
- Ld->getSrcValue(), Ld->getSrcValueOffset() + 4,
+ Ld->getPointerInfo().getWithOffset(4),
Ld->isVolatile(), Ld->isNonTemporal(),
NewAlign);
return;
@@ -2524,7 +2705,7 @@ ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
expandf64Toi32(RHS, DAG, RHS1, RHS2);
ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
ARMcc = DAG.getConstant(CondCode, MVT::i32);
- SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7);
}
@@ -2564,7 +2745,7 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
if (CondCode2 != ARMCC::AL) {
@@ -2599,14 +2780,14 @@ SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
}
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
- PseudoSourceValue::getJumpTable(), 0,
+ MachinePointerInfo::getJumpTable(),
false, false, 0);
Chain = Addr.getValue(1);
Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
} else {
Addr = DAG.getLoad(PTy, dl, Chain, Addr,
- PseudoSourceValue::getJumpTable(), 0, false, false, 0);
+ MachinePointerInfo::getJumpTable(), false, false, 0);
Chain = Addr.getValue(1);
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
}
@@ -2627,7 +2808,7 @@ static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
break;
}
Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
}
static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
@@ -2646,7 +2827,7 @@ static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
break;
}
- Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
return DAG.getNode(Opc, dl, VT, Op);
}
@@ -2657,12 +2838,46 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
EVT SrcVT = Tmp1.getValueType();
- SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
- SDValue ARMcc = DAG.getConstant(ARMCC::LT, MVT::i32);
- SDValue FP0 = DAG.getConstantFP(0.0, SrcVT);
- SDValue Cmp = getVFPCmp(Tmp1, FP0, DAG, dl);
+ bool F2IisFast = Subtarget->isCortexA9() ||
+ Tmp0.getOpcode() == ISD::BITCAST || Tmp0.getOpcode() == ARMISD::VMOVDRR;
+
+ // Bitcast operand 1 to i32.
+ if (SrcVT == MVT::f64)
+ Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
+ &Tmp1, 1).getValue(1);
+ Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
+
+ // If float to int conversion isn't going to be super expensive, then simply
+ // or in the signbit.
+ if (F2IisFast) {
+ SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32);
+ SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32);
+ Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
+ if (VT == MVT::f32) {
+ Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
+ DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
+ DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
+ }
+
+ // f64: Or the high part with signbit and then combine two parts.
+ Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
+ &Tmp0, 1);
+ SDValue Lo = Tmp0.getValue(0);
+ SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
+ Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
+ return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
+ }
+
+ // Remove the signbit of operand 0.
+ Tmp0 = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
+
+ // If operand 1 signbit is one, then negate operand 0.
+ SDValue ARMcc;
+ SDValue Cmp = getARMCmp(Tmp1, DAG.getConstant(0, MVT::i32),
+ ISD::SETLT, ARMcc, DAG, dl);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMcc, CCR, Cmp);
+ return DAG.getNode(ARMISD::CNEG, dl, VT, Tmp0, Tmp0, ARMcc, CCR, Cmp);
}
SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
@@ -2678,11 +2893,11 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
SDValue Offset = DAG.getConstant(4, MVT::i32);
return DAG.getLoad(VT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
- NULL, 0, false, false, 0);
+ MachinePointerInfo(), false, false, 0);
}
// Return LR, which contains the return address. Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
+ unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32), dl);
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
}
@@ -2697,17 +2912,18 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
? ARM::R7 : ARM::R11;
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
- FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0,
+ FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
+ MachinePointerInfo(),
false, false, 0);
return FrameAddr;
}
-/// ExpandBIT_CONVERT - If the target supports VFP, this function is called to
+/// ExpandBITCAST - If the target supports VFP, this function is called to
/// expand a bit convert where either the source or destination type is i64 to
/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
/// operand type is illegal (e.g., v2f32 for a target that doesn't support
/// vectors), since the legalizer won't know what to do with that.
-static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
+static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
DebugLoc dl = N->getDebugLoc();
SDValue Op = N->getOperand(0);
@@ -2717,7 +2933,7 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
EVT SrcVT = Op.getValueType();
EVT DstVT = N->getValueType(0);
assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
- "ExpandBIT_CONVERT called for non-i64 type");
+ "ExpandBITCAST called for non-i64 type");
// Turn i64->f64 into VMOVDRR.
if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
@@ -2725,7 +2941,7 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(0, MVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
DAG.getConstant(1, MVT::i32));
- return DAG.getNode(ISD::BIT_CONVERT, dl, DstVT,
+ return DAG.getNode(ISD::BITCAST, dl, DstVT,
DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
}
@@ -2752,7 +2968,7 @@ static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
}
/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
@@ -2825,7 +3041,7 @@ SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
return DAG.getMergeValues(Ops, 2, dl);
}
-SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
+SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SelectionDAG &DAG) const {
// The rounding mode is in bits 23:22 of the FPSCR.
// The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
@@ -2835,11 +3051,11 @@ SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
DAG.getConstant(Intrinsic::arm_get_fpscr,
MVT::i32));
- SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
+ SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
DAG.getConstant(1U << 22, MVT::i32));
SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
DAG.getConstant(22, MVT::i32));
- return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
+ return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
DAG.getConstant(3, MVT::i32));
}
@@ -2860,33 +3076,40 @@ static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
+ if (!VT.isVector())
+ return SDValue();
+
// Lower vector shifts on NEON to use VSHL.
- if (VT.isVector()) {
- assert(ST->hasNEON() && "unexpected vector shift");
-
- // Left shifts translate directly to the vshiftu intrinsic.
- if (N->getOpcode() == ISD::SHL)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
- N->getOperand(0), N->getOperand(1));
-
- assert((N->getOpcode() == ISD::SRA ||
- N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
-
- // NEON uses the same intrinsics for both left and right shifts. For
- // right shifts, the shift amounts are negative, so negate the vector of
- // shift amounts.
- EVT ShiftVT = N->getOperand(1).getValueType();
- SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
- getZeroVector(ShiftVT, DAG, dl),
- N->getOperand(1));
- Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
- Intrinsic::arm_neon_vshifts :
- Intrinsic::arm_neon_vshiftu);
+ assert(ST->hasNEON() && "unexpected vector shift");
+
+ // Left shifts translate directly to the vshiftu intrinsic.
+ if (N->getOpcode() == ISD::SHL)
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(vshiftInt, MVT::i32),
- N->getOperand(0), NegatedCount);
- }
+ DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
+ N->getOperand(0), N->getOperand(1));
+
+ assert((N->getOpcode() == ISD::SRA ||
+ N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
+
+ // NEON uses the same intrinsics for both left and right shifts. For
+ // right shifts, the shift amounts are negative, so negate the vector of
+ // shift amounts.
+ EVT ShiftVT = N->getOperand(1).getValueType();
+ SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
+ getZeroVector(ShiftVT, DAG, dl),
+ N->getOperand(1));
+ Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
+ Intrinsic::arm_neon_vshifts :
+ Intrinsic::arm_neon_vshiftu);
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(vshiftInt, MVT::i32),
+ N->getOperand(0), NegatedCount);
+}
+
+static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
+ const ARMSubtarget *ST) {
+ EVT VT = N->getValueType(0);
+ DebugLoc dl = N->getDebugLoc();
// We can get here for a node like i32 = ISD::SHL i32, i64
if (VT != MVT::i64)
@@ -2912,7 +3135,7 @@ static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
// First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
// captures the result into a carry flag.
unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
- Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
+ Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1);
// The low part is an ARMISD::RRX operand, which shifts the carry in.
Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
@@ -2998,13 +3221,13 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
AndOp = Op1;
// Ignore bitconvert.
- if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT)
+ if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
AndOp = AndOp.getOperand(0);
if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
Opc = ARMISD::VTST;
- Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0));
- Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1));
+ Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
+ Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
Invert = !Invert;
}
}
@@ -3013,7 +3236,38 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
if (Swap)
std::swap(Op0, Op1);
- SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
+ // If one of the operands is a constant vector zero, attempt to fold the
+ // comparison to a specialized compare-against-zero form.
+ SDValue SingleOp;
+ if (ISD::isBuildVectorAllZeros(Op1.getNode()))
+ SingleOp = Op0;
+ else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
+ if (Opc == ARMISD::VCGE)
+ Opc = ARMISD::VCLEZ;
+ else if (Opc == ARMISD::VCGT)
+ Opc = ARMISD::VCLTZ;
+ SingleOp = Op1;
+ }
+
+ SDValue Result;
+ if (SingleOp.getNode()) {
+ switch (Opc) {
+ case ARMISD::VCEQ:
+ Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break;
+ case ARMISD::VCGE:
+ Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break;
+ case ARMISD::VCLEZ:
+ Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break;
+ case ARMISD::VCGT:
+ Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break;
+ case ARMISD::VCLTZ:
+ Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break;
+ default:
+ Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
+ }
+ } else {
+ Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
+ }
if (Invert)
Result = DAG.getNOT(dl, Result, VT);
@@ -3026,7 +3280,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
/// operand (e.g., VMOV). If so, return the encoded value.
static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
unsigned SplatBitSize, SelectionDAG &DAG,
- EVT &VT, bool is128Bits, bool isVMOV) {
+ EVT &VT, bool is128Bits, NEONModImmType type) {
unsigned OpCmode, Imm;
// SplatBitSize is set to the smallest size that splats the vector, so a
@@ -3039,7 +3293,7 @@ static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
switch (SplatBitSize) {
case 8:
- if (!isVMOV)
+ if (type != VMOVModImm)
return SDValue();
// Any 1-byte value is OK. Op=0, Cmode=1110.
assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
@@ -3096,6 +3350,9 @@ static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
break;
}
+ // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
+ if (type == OtherModImm) return SDValue();
+
if ((SplatBits & ~0xffff) == 0 &&
((SplatBits | SplatUndef) & 0xff) == 0xff) {
// Value = 0x0000nnff: Op=x, Cmode=1100.
@@ -3122,7 +3379,7 @@ static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
return SDValue();
case 64: {
- if (!isVMOV)
+ if (type != VMOVModImm)
return SDValue();
// NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
uint64_t BitMask = 0xff;
@@ -3376,8 +3633,8 @@ static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
// If this is a case we can't handle, return null and let the default
// expansion code take care of it.
-static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *ST) {
+SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
+ const ARMSubtarget *ST) const {
BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
@@ -3391,10 +3648,11 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
EVT VmovVT;
SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
SplatUndef.getZExtValue(), SplatBitSize,
- DAG, VmovVT, VT.is128BitVector(), true);
+ DAG, VmovVT, VT.is128BitVector(),
+ VMOVModImm);
if (Val.getNode()) {
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
}
// Try an immediate VMVN.
@@ -3402,10 +3660,11 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
((1LL << SplatBitSize) - 1));
Val = isNEONModifiedImm(NegatedImm,
SplatUndef.getZExtValue(), SplatBitSize,
- DAG, VmovVT, VT.is128BitVector(), false);
+ DAG, VmovVT, VT.is128BitVector(),
+ VMVNModImm);
if (Val.getNode()) {
SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
}
}
}
@@ -3439,26 +3698,25 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- if (EnableARMVDUPsplat) {
- // Use VDUP for non-constant splats. For f32 constant splats, reduce to
- // i32 and try again.
- if (usesOnlyOneValue && EltSize <= 32) {
- if (!isConstant)
- return DAG.getNode(ARMISD::VDUP, dl, VT, Value);
- if (VT.getVectorElementType().isFloatingPoint()) {
- SmallVector<SDValue, 8> Ops;
- for (unsigned i = 0; i < NumElts; ++i)
- Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
- Op.getOperand(i)));
- SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &Ops[0],
- NumElts);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
- LowerBUILD_VECTOR(Val, DAG, ST));
- }
- SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
+ // Use VDUP for non-constant splats. For f32 constant splats, reduce to
+ // i32 and try again.
+ if (usesOnlyOneValue && EltSize <= 32) {
+ if (!isConstant)
+ return DAG.getNode(ARMISD::VDUP, dl, VT, Value);
+ if (VT.getVectorElementType().isFloatingPoint()) {
+ SmallVector<SDValue, 8> Ops;
+ for (unsigned i = 0; i < NumElts; ++i)
+ Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
+ Op.getOperand(i)));
+ EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
+ SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts);
+ Val = LowerBUILD_VECTOR(Val, DAG, ST);
if (Val.getNode())
- return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
+ SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
+ if (Val.getNode())
+ return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
}
// If all elements are constants and the case above didn't get hit, fall back
@@ -3467,10 +3725,11 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
if (isConstant)
return SDValue();
- if (!EnableARMVDUPsplat) {
- // Use VDUP for non-constant splats.
- if (usesOnlyOneValue && EltSize <= 32)
- return DAG.getNode(ARMISD::VDUP, dl, VT, Value);
+ // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
+ if (NumElts >= 4) {
+ SDValue shuffle = ReconstructShuffle(Op, DAG);
+ if (shuffle != SDValue())
+ return shuffle;
}
// Vectors with 32- or 64-bit elements can be built by directly assigning
@@ -3483,14 +3742,144 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i)
- Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, EltVT, Op.getOperand(i)));
+ Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
return SDValue();
}
+// Gather data to see if the operation can be modelled as a
+// shuffle in combination with VEXTs.
+SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
+ SelectionDAG &DAG) const {
+ DebugLoc dl = Op.getDebugLoc();
+ EVT VT = Op.getValueType();
+ unsigned NumElts = VT.getVectorNumElements();
+
+ SmallVector<SDValue, 2> SourceVecs;
+ SmallVector<unsigned, 2> MinElts;
+ SmallVector<unsigned, 2> MaxElts;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue V = Op.getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
+ // A shuffle can only come from building a vector from various
+ // elements of other vectors.
+ return SDValue();
+ }
+
+ // Record this extraction against the appropriate vector if possible...
+ SDValue SourceVec = V.getOperand(0);
+ unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
+ bool FoundSource = false;
+ for (unsigned j = 0; j < SourceVecs.size(); ++j) {
+ if (SourceVecs[j] == SourceVec) {
+ if (MinElts[j] > EltNo)
+ MinElts[j] = EltNo;
+ if (MaxElts[j] < EltNo)
+ MaxElts[j] = EltNo;
+ FoundSource = true;
+ break;
+ }
+ }
+
+ // Or record a new source if not...
+ if (!FoundSource) {
+ SourceVecs.push_back(SourceVec);
+ MinElts.push_back(EltNo);
+ MaxElts.push_back(EltNo);
+ }
+ }
+
+ // Currently only do something sane when at most two source vectors
+ // involved.
+ if (SourceVecs.size() > 2)
+ return SDValue();
+
+ SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
+ int VEXTOffsets[2] = {0, 0};
+
+ // This loop extracts the usage patterns of the source vectors
+ // and prepares appropriate SDValues for a shuffle if possible.
+ for (unsigned i = 0; i < SourceVecs.size(); ++i) {
+ if (SourceVecs[i].getValueType() == VT) {
+ // No VEXT necessary
+ ShuffleSrcs[i] = SourceVecs[i];
+ VEXTOffsets[i] = 0;
+ continue;
+ } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) {
+ // It probably isn't worth padding out a smaller vector just to
+ // break it down again in a shuffle.
+ return SDValue();
+ }
+
+ // Since only 64-bit and 128-bit vectors are legal on ARM and
+ // we've eliminated the other cases...
+ assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts &&
+ "unexpected vector sizes in ReconstructShuffle");
+
+ if (MaxElts[i] - MinElts[i] >= NumElts) {
+ // Span too large for a VEXT to cope
+ return SDValue();
+ }
+
+ if (MinElts[i] >= NumElts) {
+ // The extraction can just take the second half
+ VEXTOffsets[i] = NumElts;
+ ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
+ SourceVecs[i],
+ DAG.getIntPtrConstant(NumElts));
+ } else if (MaxElts[i] < NumElts) {
+ // The extraction can just take the first half
+ VEXTOffsets[i] = 0;
+ ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
+ SourceVecs[i],
+ DAG.getIntPtrConstant(0));
+ } else {
+ // An actual VEXT is needed
+ VEXTOffsets[i] = MinElts[i];
+ SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
+ SourceVecs[i],
+ DAG.getIntPtrConstant(0));
+ SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
+ SourceVecs[i],
+ DAG.getIntPtrConstant(NumElts));
+ ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
+ DAG.getConstant(VEXTOffsets[i], MVT::i32));
+ }
+ }
+
+ SmallVector<int, 8> Mask;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Entry = Op.getOperand(i);
+ if (Entry.getOpcode() == ISD::UNDEF) {
+ Mask.push_back(-1);
+ continue;
+ }
+
+ SDValue ExtractVec = Entry.getOperand(0);
+ int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i)
+ .getOperand(1))->getSExtValue();
+ if (ExtractVec == SourceVecs[0]) {
+ Mask.push_back(ExtractElt - VEXTOffsets[0]);
+ } else {
+ Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]);
+ }
+ }
+
+ // Final check before we try to produce nonsense...
+ if (isShuffleMaskLegal(Mask, VT))
+ return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1],
+ &Mask[0]);
+
+ return SDValue();
+}
+
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
@@ -3706,8 +4095,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
// registers are defined to use, and since i64 is not legal.
EVT EltVT = EVT::getFloatingPointVT(EltSize);
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
- V1 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V1);
- V2 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V2);
+ V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
+ V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i) {
if (ShuffleMask[i] < 0)
@@ -3719,21 +4108,26 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
MVT::i32)));
}
SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
return SDValue();
}
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
- SDValue Vec = Op.getOperand(0);
+ // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
SDValue Lane = Op.getOperand(1);
- assert(VT == MVT::i32 &&
- Vec.getValueType().getVectorElementType().getSizeInBits() < 32 &&
- "unexpected type for custom-lowering vector extract");
- return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
+ if (!isa<ConstantSDNode>(Lane))
+ return SDValue();
+
+ SDValue Vec = Op.getOperand(0);
+ if (Op.getValueType() == MVT::i32 &&
+ Vec.getValueType().getVectorElementType().getSizeInBits() < 32) {
+ DebugLoc dl = Op.getDebugLoc();
+ return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
+ }
+
+ return Op;
}
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
@@ -3747,25 +4141,123 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
SDValue Op1 = Op.getOperand(1);
if (Op0.getOpcode() != ISD::UNDEF)
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0),
+ DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
DAG.getIntPtrConstant(0));
if (Op1.getOpcode() != ISD::UNDEF)
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1),
+ DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
DAG.getIntPtrConstant(1));
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
+}
+
+/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
+/// element has been zero/sign-extended, depending on the isSigned parameter,
+/// from an integer type half its size.
+static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
+ bool isSigned) {
+ // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
+ EVT VT = N->getValueType(0);
+ if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
+ SDNode *BVN = N->getOperand(0).getNode();
+ if (BVN->getValueType(0) != MVT::v4i32 ||
+ BVN->getOpcode() != ISD::BUILD_VECTOR)
+ return false;
+ unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0;
+ unsigned HiElt = 1 - LoElt;
+ ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
+ ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
+ ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
+ ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
+ if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
+ return false;
+ if (isSigned) {
+ if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
+ Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
+ return true;
+ } else {
+ if (Hi0->isNullValue() && Hi1->isNullValue())
+ return true;
+ }
+ return false;
+ }
+
+ if (N->getOpcode() != ISD::BUILD_VECTOR)
+ return false;
+
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ SDNode *Elt = N->getOperand(i).getNode();
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned HalfSize = EltSize / 2;
+ if (isSigned) {
+ int64_t SExtVal = C->getSExtValue();
+ if ((SExtVal >> HalfSize) != (SExtVal >> EltSize))
+ return false;
+ } else {
+ if ((C->getZExtValue() >> HalfSize) != 0)
+ return false;
+ }
+ continue;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+/// isSignExtended - Check if a node is a vector value that is sign-extended
+/// or a constant BUILD_VECTOR with sign-extended elements.
+static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
+ if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
+ return true;
+ if (isExtendedBUILD_VECTOR(N, DAG, true))
+ return true;
+ return false;
+}
+
+/// isZeroExtended - Check if a node is a vector value that is zero-extended
+/// or a constant BUILD_VECTOR with zero-extended elements.
+static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
+ if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
+ return true;
+ if (isExtendedBUILD_VECTOR(N, DAG, false))
+ return true;
+ return false;
}
-/// SkipExtension - For a node that is either a SIGN_EXTEND, ZERO_EXTEND, or
-/// an extending load, return the unextended value.
+/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending
+/// load, or BUILD_VECTOR with extended elements, return the unextended value.
static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) {
if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
return N->getOperand(0);
- LoadSDNode *LD = cast<LoadSDNode>(N);
- return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(),
- LD->getBasePtr(), LD->getSrcValue(),
- LD->getSrcValueOffset(), LD->isVolatile(),
- LD->isNonTemporal(), LD->getAlignment());
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
+ return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(),
+ LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(),
+ LD->isNonTemporal(), LD->getAlignment());
+ // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will
+ // have been legalized as a BITCAST from v4i32.
+ if (N->getOpcode() == ISD::BITCAST) {
+ SDNode *BVN = N->getOperand(0).getNode();
+ assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
+ BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
+ unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0;
+ return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32,
+ BVN->getOperand(LowElt), BVN->getOperand(LowElt+2));
+ }
+ // Construct a new BUILD_VECTOR with elements truncated to half the size.
+ assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
+ EVT VT = N->getValueType(0);
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
+ unsigned NumElts = VT.getVectorNumElements();
+ MVT TruncVT = MVT::getIntegerVT(EltSize);
+ SmallVector<SDValue, 8> Ops;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
+ const APInt &CInt = C->getAPIntValue();
+ Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT));
+ }
+ return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
+ MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts);
}
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
@@ -3776,19 +4268,16 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
SDNode *N0 = Op.getOperand(0).getNode();
SDNode *N1 = Op.getOperand(1).getNode();
unsigned NewOpc = 0;
- if ((N0->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N0)) &&
- (N1->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N1))) {
+ if (isSignExtended(N0, DAG) && isSignExtended(N1, DAG))
NewOpc = ARMISD::VMULLs;
- } else if ((N0->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N0)) &&
- (N1->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N1))) {
+ else if (isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG))
NewOpc = ARMISD::VMULLu;
- } else if (VT.getSimpleVT().SimpleTy == MVT::v2i64) {
+ else if (VT == MVT::v2i64)
// Fall through to expand this. It is not legal.
return SDValue();
- } else {
+ else
// Other vector multiplications are legal.
return Op;
- }
// Legalize to a VMULL instruction.
DebugLoc DL = Op.getDebugLoc();
@@ -3801,6 +4290,181 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
}
+static SDValue
+LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) {
+ // Convert to float
+ // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
+ // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
+ X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
+ Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
+ X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
+ Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
+ // Get reciprocal estimate.
+ // float4 recip = vrecpeq_f32(yf);
+ Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
+ DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y);
+ // Because char has a smaller range than uchar, we can actually get away
+ // without any newton steps. This requires that we use a weird bias
+ // of 0xb000, however (again, this has been exhaustively tested).
+ // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
+ X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
+ X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
+ Y = DAG.getConstant(0xb000, MVT::i32);
+ Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y);
+ X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
+ X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
+ // Convert back to short.
+ X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
+ X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
+ return X;
+}
+
+static SDValue
+LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) {
+ SDValue N2;
+ // Convert to float.
+ // float4 yf = vcvt_f32_s32(vmovl_s16(y));
+ // float4 xf = vcvt_f32_s32(vmovl_s16(x));
+ N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
+ N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
+ N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
+ N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
+
+ // Use reciprocal estimate and one refinement step.
+ // float4 recip = vrecpeq_f32(yf);
+ // recip *= vrecpsq_f32(yf, recip);
+ N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
+ DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1);
+ N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
+ DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
+ N1, N2);
+ N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
+ // Because short has a smaller range than ushort, we can actually get away
+ // with only a single newton step. This requires that we use a weird bias
+ // of 89, however (again, this has been exhaustively tested).
+ // float4 result = as_float4(as_int4(xf*recip) + 89);
+ N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
+ N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
+ N1 = DAG.getConstant(89, MVT::i32);
+ N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
+ N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
+ N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
+ // Convert back to integer and return.
+ // return vmovn_s32(vcvt_s32_f32(result));
+ N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
+ N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
+ return N0;
+}
+
+static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
+ EVT VT = Op.getValueType();
+ assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
+ "unexpected type for custom-lowering ISD::SDIV");
+
+ DebugLoc dl = Op.getDebugLoc();
+ SDValue N0 = Op.getOperand(0);
+ SDValue N1 = Op.getOperand(1);
+ SDValue N2, N3;
+
+ if (VT == MVT::v8i8) {
+ N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
+ N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
+
+ N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
+ DAG.getIntPtrConstant(4));
+ N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
+ DAG.getIntPtrConstant(4));
+ N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
+ DAG.getIntPtrConstant(0));
+ N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
+ DAG.getIntPtrConstant(0));
+
+ N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
+ N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
+
+ N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
+ N0 = LowerCONCAT_VECTORS(N0, DAG);
+
+ N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
+ return N0;
+ }
+ return LowerSDIV_v4i16(N0, N1, dl, DAG);
+}
+
+static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
+ EVT VT = Op.getValueType();
+ assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
+ "unexpected type for custom-lowering ISD::UDIV");
+
+ DebugLoc dl = Op.getDebugLoc();
+ SDValue N0 = Op.getOperand(0);
+ SDValue N1 = Op.getOperand(1);
+ SDValue N2, N3;
+
+ if (VT == MVT::v8i8) {
+ N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
+ N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
+
+ N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
+ DAG.getIntPtrConstant(4));
+ N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
+ DAG.getIntPtrConstant(4));
+ N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
+ DAG.getIntPtrConstant(0));
+ N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
+ DAG.getIntPtrConstant(0));
+
+ N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
+ N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
+
+ N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
+ N0 = LowerCONCAT_VECTORS(N0, DAG);
+
+ N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
+ DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32),
+ N0);
+ return N0;
+ }
+
+ // v4i16 sdiv ... Convert to float.
+ // float4 yf = vcvt_f32_s32(vmovl_u16(y));
+ // float4 xf = vcvt_f32_s32(vmovl_u16(x));
+ N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
+ N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
+ N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
+ N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
+
+ // Use reciprocal estimate and two refinement steps.
+ // float4 recip = vrecpeq_f32(yf);
+ // recip *= vrecpsq_f32(yf, recip);
+ // recip *= vrecpsq_f32(yf, recip);
+ N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
+ DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1);
+ N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
+ DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
+ N1, N2);
+ N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
+ N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
+ DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
+ N1, N2);
+ N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
+ // Simply multiplying by the reciprocal estimate can leave us a few ulps
+ // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
+ // and that it will never cause us to return an answer too large).
+ // float4 result = as_float4(as_int4(xf*recip) + 89);
+ N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
+ N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
+ N1 = DAG.getConstant(2, MVT::i32);
+ N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
+ N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
+ N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
+ // Convert back to integer and return.
+ // return vmovn_u32(vcvt_s32_f32(result));
+ N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
+ N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
+ return N0;
+}
+
SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Don't know how to custom lower this!");
@@ -3816,6 +4480,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget);
+ case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget);
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
case ISD::FP_TO_SINT:
@@ -3826,9 +4491,10 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
+ case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
Subtarget);
- case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG);
+ case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG);
case ISD::SHL:
case ISD::SRL:
case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
@@ -3843,6 +4509,8 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
+ case ISD::SDIV: return LowerSDIV(Op, DAG);
+ case ISD::UDIV: return LowerUDIV(Op, DAG);
}
return SDValue();
}
@@ -3857,12 +4525,12 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
default:
llvm_unreachable("Don't know how to custom expand this!");
break;
- case ISD::BIT_CONVERT:
- Res = ExpandBIT_CONVERT(N, DAG);
+ case ISD::BITCAST:
+ Res = ExpandBITCAST(N, DAG);
break;
case ISD::SRL:
case ISD::SRA:
- Res = LowerShift(N, DAG, Subtarget);
+ Res = Expand64BitShift(N, DAG, Subtarget);
break;
}
if (Res.getNode())
@@ -3892,7 +4560,7 @@ ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
case 1:
ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
- strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB;
+ strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
break;
case 2:
ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
@@ -4183,6 +4851,9 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case ARM::BCCi64:
case ARM::BCCZi64: {
+ // If there is an unconditional branch to the other successor, remove it.
+ BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
+
// Compare both parts that make up the double comparison separately for
// equality.
bool RHSisZero = MI->getOpcode() == ARM::BCCZi64;
@@ -4341,10 +5012,6 @@ static SDValue PerformMULCombine(SDNode *N,
if (Subtarget->isThumb1Only())
return SDValue();
- if (DAG.getMachineFunction().
- getFunction()->hasFnAttr(Attribute::OptimizeForSize))
- return SDValue();
-
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
@@ -4389,10 +5056,67 @@ static SDValue PerformMULCombine(SDNode *N,
return SDValue();
}
+static SDValue PerformANDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // Attempt to use immediate-form VBIC
+ BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
+ DebugLoc dl = N->getDebugLoc();
+ EVT VT = N->getValueType(0);
+ SelectionDAG &DAG = DCI.DAG;
+
+ APInt SplatBits, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ if (BVN &&
+ BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
+ if (SplatBitSize <= 64) {
+ EVT VbicVT;
+ SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
+ SplatUndef.getZExtValue(), SplatBitSize,
+ DAG, VbicVT, VT.is128BitVector(),
+ OtherModImm);
+ if (Val.getNode()) {
+ SDValue Input =
+ DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
+ SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
+ }
+ }
+ }
+
+ return SDValue();
+}
+
/// PerformORCombine - Target-specific dag combine xforms for ISD::OR
static SDValue PerformORCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) {
+ // Attempt to use immediate-form VORR
+ BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
+ DebugLoc dl = N->getDebugLoc();
+ EVT VT = N->getValueType(0);
+ SelectionDAG &DAG = DCI.DAG;
+
+ APInt SplatBits, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ if (BVN && Subtarget->hasNEON() &&
+ BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
+ if (SplatBitSize <= 64) {
+ EVT VorrVT;
+ SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
+ SplatUndef.getZExtValue(), SplatBitSize,
+ DAG, VorrVT, VT.is128BitVector(),
+ OtherModImm);
+ if (Val.getNode()) {
+ SDValue Input =
+ DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
+ SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
+ }
+ }
+ }
+
// Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
// reasonable.
@@ -4400,7 +5124,6 @@ static SDValue PerformORCombine(SDNode *N,
if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
return SDValue();
- SelectionDAG &DAG = DCI.DAG;
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
DebugLoc DL = N->getDebugLoc();
// 1) or (and A, mask), val => ARMbfi A, val, mask
@@ -4415,40 +5138,46 @@ static SDValue PerformORCombine(SDNode *N,
if (N0.getOpcode() != ISD::AND)
return SDValue();
- EVT VT = N->getValueType(0);
if (VT != MVT::i32)
return SDValue();
+ SDValue N00 = N0.getOperand(0);
// The value and the mask need to be constants so we can verify this is
// actually a bitfield set. If the mask is 0xffff, we can do better
// via a movt instruction, so don't use BFI in that case.
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
- if (!C)
+ SDValue MaskOp = N0.getOperand(1);
+ ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
+ if (!MaskC)
return SDValue();
- unsigned Mask = C->getZExtValue();
+ unsigned Mask = MaskC->getZExtValue();
if (Mask == 0xffff)
return SDValue();
SDValue Res;
// Case (1): or (and A, mask), val => ARMbfi A, val, mask
- if ((C = dyn_cast<ConstantSDNode>(N1))) {
- unsigned Val = C->getZExtValue();
- if (!ARM::isBitFieldInvertedMask(Mask) || (Val & ~Mask) != Val)
+ ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ if (N1C) {
+ unsigned Val = N1C->getZExtValue();
+ if ((Val & ~Mask) != Val)
return SDValue();
- Val >>= CountTrailingZeros_32(~Mask);
- Res = DAG.getNode(ARMISD::BFI, DL, VT, N0.getOperand(0),
- DAG.getConstant(Val, MVT::i32),
- DAG.getConstant(Mask, MVT::i32));
+ if (ARM::isBitFieldInvertedMask(Mask)) {
+ Val >>= CountTrailingZeros_32(~Mask);
- // Do not add new nodes to DAG combiner worklist.
- DCI.CombineTo(N, Res, false);
+ Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
+ DAG.getConstant(Val, MVT::i32),
+ DAG.getConstant(Mask, MVT::i32));
+
+ // Do not add new nodes to DAG combiner worklist.
+ DCI.CombineTo(N, Res, false);
+ return SDValue();
+ }
} else if (N1.getOpcode() == ISD::AND) {
// case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
- C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
- if (!C)
+ ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
+ if (!N11C)
return SDValue();
- unsigned Mask2 = C->getZExtValue();
+ unsigned Mask2 = N11C->getZExtValue();
if (ARM::isBitFieldInvertedMask(Mask) &&
ARM::isBitFieldInvertedMask(~Mask2) &&
@@ -4462,10 +5191,11 @@ static SDValue PerformORCombine(SDNode *N,
unsigned lsb = CountTrailingZeros_32(Mask2);
Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
DAG.getConstant(lsb, MVT::i32));
- Res = DAG.getNode(ARMISD::BFI, DL, VT, N0.getOperand(0), Res,
+ Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
DAG.getConstant(Mask, MVT::i32));
// Do not add new nodes to DAG combiner worklist.
DCI.CombineTo(N, Res, false);
+ return SDValue();
} else if (ARM::isBitFieldInvertedMask(~Mask) &&
ARM::isBitFieldInvertedMask(Mask2) &&
(CountPopulation_32(~Mask) == CountPopulation_32(Mask2))) {
@@ -4476,40 +5206,472 @@ static SDValue PerformORCombine(SDNode *N,
return SDValue();
// 2b
unsigned lsb = CountTrailingZeros_32(Mask);
- Res = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0),
+ Res = DAG.getNode(ISD::SRL, DL, VT, N00,
DAG.getConstant(lsb, MVT::i32));
Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
DAG.getConstant(Mask2, MVT::i32));
// Do not add new nodes to DAG combiner worklist.
DCI.CombineTo(N, Res, false);
+ return SDValue();
}
}
+ if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
+ N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
+ ARM::isBitFieldInvertedMask(~Mask)) {
+ // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
+ // where lsb(mask) == #shamt and masked bits of B are known zero.
+ SDValue ShAmt = N00.getOperand(1);
+ unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
+ unsigned LSB = CountTrailingZeros_32(Mask);
+ if (ShAmtC != LSB)
+ return SDValue();
+
+ Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
+ DAG.getConstant(~Mask, MVT::i32));
+
+ // Do not add new nodes to DAG combiner worklist.
+ DCI.CombineTo(N, Res, false);
+ }
+
+ return SDValue();
+}
+
+/// PerformBFICombine - (bfi A, (and B, C1), C2) -> (bfi A, B, C2) iff
+/// C1 & C2 == C1.
+static SDValue PerformBFICombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ SDValue N1 = N->getOperand(1);
+ if (N1.getOpcode() == ISD::AND) {
+ ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
+ if (!N11C)
+ return SDValue();
+ unsigned Mask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ unsigned Mask2 = N11C->getZExtValue();
+ if ((Mask & Mask2) == Mask2)
+ return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0),
+ N->getOperand(0), N1.getOperand(0),
+ N->getOperand(2));
+ }
return SDValue();
}
/// PerformVMOVRRDCombine - Target-specific dag combine xforms for
/// ARMISD::VMOVRRD.
static SDValue PerformVMOVRRDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- // fmrrd(fmdrr x, y) -> x,y
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // vmovrrd(vmovdrr x, y) -> x,y
SDValue InDouble = N->getOperand(0);
if (InDouble.getOpcode() == ARMISD::VMOVDRR)
return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
return SDValue();
}
+/// PerformVMOVDRRCombine - Target-specific dag combine xforms for
+/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands.
+static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
+ // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ if (Op0.getOpcode() == ISD::BITCAST)
+ Op0 = Op0.getOperand(0);
+ if (Op1.getOpcode() == ISD::BITCAST)
+ Op1 = Op1.getOperand(0);
+ if (Op0.getOpcode() == ARMISD::VMOVRRD &&
+ Op0.getNode() == Op1.getNode() &&
+ Op0.getResNo() == 0 && Op1.getResNo() == 1)
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
+ N->getValueType(0), Op0.getOperand(0));
+ return SDValue();
+}
+
+/// PerformSTORECombine - Target-specific dag combine xforms for
+/// ISD::STORE.
+static SDValue PerformSTORECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // Bitcast an i64 store extracted from a vector to f64.
+ // Otherwise, the i64 value will be legalized to a pair of i32 values.
+ StoreSDNode *St = cast<StoreSDNode>(N);
+ SDValue StVal = St->getValue();
+ if (!ISD::isNormalStore(St) || St->isVolatile() ||
+ StVal.getValueType() != MVT::i64 ||
+ StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ DebugLoc dl = StVal.getDebugLoc();
+ SDValue IntVec = StVal.getOperand(0);
+ EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
+ IntVec.getValueType().getVectorNumElements());
+ SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
+ SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
+ Vec, StVal.getOperand(1));
+ dl = N->getDebugLoc();
+ SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
+ // Make the DAGCombiner fold the bitcasts.
+ DCI.AddToWorklist(Vec.getNode());
+ DCI.AddToWorklist(ExtElt.getNode());
+ DCI.AddToWorklist(V.getNode());
+ return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
+ St->getPointerInfo(), St->isVolatile(),
+ St->isNonTemporal(), St->getAlignment(),
+ St->getTBAAInfo());
+}
+
+/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
+/// are normal, non-volatile loads. If so, it is profitable to bitcast an
+/// i64 vector to have f64 elements, since the value can then be loaded
+/// directly into a VFP register.
+static bool hasNormalLoadOperand(SDNode *N) {
+ unsigned NumElts = N->getValueType(0).getVectorNumElements();
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDNode *Elt = N->getOperand(i).getNode();
+ if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
+ return true;
+ }
+ return false;
+}
+
+/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
+/// ISD::BUILD_VECTOR.
+static SDValue PerformBUILD_VECTORCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI){
+ // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
+ // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value
+ // into a pair of GPRs, which is fine when the value is used as a scalar,
+ // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
+ SelectionDAG &DAG = DCI.DAG;
+ if (N->getNumOperands() == 2) {
+ SDValue RV = PerformVMOVDRRCombine(N, DAG);
+ if (RV.getNode())
+ return RV;
+ }
+
+ // Load i64 elements as f64 values so that type legalization does not split
+ // them up into i32 values.
+ EVT VT = N->getValueType(0);
+ if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
+ return SDValue();
+ DebugLoc dl = N->getDebugLoc();
+ SmallVector<SDValue, 8> Ops;
+ unsigned NumElts = VT.getVectorNumElements();
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
+ Ops.push_back(V);
+ // Make the DAGCombiner fold the bitcast.
+ DCI.AddToWorklist(V.getNode());
+ }
+ EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts);
+ return DAG.getNode(ISD::BITCAST, dl, VT, BV);
+}
+
+/// PerformInsertEltCombine - Target-specific dag combine xforms for
+/// ISD::INSERT_VECTOR_ELT.
+static SDValue PerformInsertEltCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // Bitcast an i64 load inserted into a vector to f64.
+ // Otherwise, the i64 value will be legalized to a pair of i32 values.
+ EVT VT = N->getValueType(0);
+ SDNode *Elt = N->getOperand(1).getNode();
+ if (VT.getVectorElementType() != MVT::i64 ||
+ !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ DebugLoc dl = N->getDebugLoc();
+ EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
+ VT.getVectorNumElements());
+ SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
+ SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
+ // Make the DAGCombiner fold the bitcasts.
+ DCI.AddToWorklist(Vec.getNode());
+ DCI.AddToWorklist(V.getNode());
+ SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
+ Vec, V, N->getOperand(2));
+ return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
+}
+
+/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
+/// ISD::VECTOR_SHUFFLE.
+static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
+ // The LLVM shufflevector instruction does not require the shuffle mask
+ // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
+ // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the
+ // operands do not match the mask length, they are extended by concatenating
+ // them with undef vectors. That is probably the right thing for other
+ // targets, but for NEON it is better to concatenate two double-register
+ // size vector operands into a single quad-register size vector. Do that
+ // transformation here:
+ // shuffle(concat(v1, undef), concat(v2, undef)) ->
+ // shuffle(concat(v1, v2), undef)
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
+ Op1.getOpcode() != ISD::CONCAT_VECTORS ||
+ Op0.getNumOperands() != 2 ||
+ Op1.getNumOperands() != 2)
+ return SDValue();
+ SDValue Concat0Op1 = Op0.getOperand(1);
+ SDValue Concat1Op1 = Op1.getOperand(1);
+ if (Concat0Op1.getOpcode() != ISD::UNDEF ||
+ Concat1Op1.getOpcode() != ISD::UNDEF)
+ return SDValue();
+ // Skip the transformation if any of the types are illegal.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT VT = N->getValueType(0);
+ if (!TLI.isTypeLegal(VT) ||
+ !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
+ !TLI.isTypeLegal(Concat1Op1.getValueType()))
+ return SDValue();
+
+ SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT,
+ Op0.getOperand(0), Op1.getOperand(0));
+ // Translate the shuffle mask.
+ SmallVector<int, 16> NewMask;
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned HalfElts = NumElts/2;
+ ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
+ for (unsigned n = 0; n < NumElts; ++n) {
+ int MaskElt = SVN->getMaskElt(n);
+ int NewElt = -1;
+ if (MaskElt < (int)HalfElts)
+ NewElt = MaskElt;
+ else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
+ NewElt = HalfElts + MaskElt - NumElts;
+ NewMask.push_back(NewElt);
+ }
+ return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat,
+ DAG.getUNDEF(VT), NewMask.data());
+}
+
+/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and
+/// NEON load/store intrinsics to merge base address updates.
+static SDValue CombineBaseUpdate(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
+ N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
+ unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
+ SDValue Addr = N->getOperand(AddrOpIdx);
+
+ // Search for a use of the address operand that is an increment.
+ for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
+ UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ if (User->getOpcode() != ISD::ADD ||
+ UI.getUse().getResNo() != Addr.getResNo())
+ continue;
+
+ // Check that the add is independent of the load/store. Otherwise, folding
+ // it would create a cycle.
+ if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
+ continue;
+
+ // Find the new opcode for the updating load/store.
+ bool isLoad = true;
+ bool isLaneOp = false;
+ unsigned NewOpc = 0;
+ unsigned NumVecs = 0;
+ if (isIntrinsic) {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ switch (IntNo) {
+ default: assert(0 && "unexpected intrinsic for Neon base update");
+ case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD;
+ NumVecs = 1; break;
+ case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD;
+ NumVecs = 2; break;
+ case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD;
+ NumVecs = 3; break;
+ case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD;
+ NumVecs = 4; break;
+ case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
+ NumVecs = 2; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
+ NumVecs = 3; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
+ NumVecs = 4; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD;
+ NumVecs = 1; isLoad = false; break;
+ case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD;
+ NumVecs = 2; isLoad = false; break;
+ case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD;
+ NumVecs = 3; isLoad = false; break;
+ case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD;
+ NumVecs = 4; isLoad = false; break;
+ case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
+ NumVecs = 2; isLoad = false; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
+ NumVecs = 3; isLoad = false; isLaneOp = true; break;
+ case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
+ NumVecs = 4; isLoad = false; isLaneOp = true; break;
+ }
+ } else {
+ isLaneOp = true;
+ switch (N->getOpcode()) {
+ default: assert(0 && "unexpected opcode for Neon base update");
+ case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
+ case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
+ case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
+ }
+ }
+
+ // Find the size of memory referenced by the load/store.
+ EVT VecTy;
+ if (isLoad)
+ VecTy = N->getValueType(0);
+ else
+ VecTy = N->getOperand(AddrOpIdx+1).getValueType();
+ unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
+ if (isLaneOp)
+ NumBytes /= VecTy.getVectorNumElements();
+
+ // If the increment is a constant, it must match the memory ref size.
+ SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
+ if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
+ uint64_t IncVal = CInc->getZExtValue();
+ if (IncVal != NumBytes)
+ continue;
+ } else if (NumBytes >= 3 * 16) {
+ // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
+ // separate instructions that make it harder to use a non-constant update.
+ continue;
+ }
+
+ // Create the new updating load/store node.
+ EVT Tys[6];
+ unsigned NumResultVecs = (isLoad ? NumVecs : 0);
+ unsigned n;
+ for (n = 0; n < NumResultVecs; ++n)
+ Tys[n] = VecTy;
+ Tys[n++] = MVT::i32;
+ Tys[n] = MVT::Other;
+ SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2);
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(N->getOperand(0)); // incoming chain
+ Ops.push_back(N->getOperand(AddrOpIdx));
+ Ops.push_back(Inc);
+ for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
+ Ops.push_back(N->getOperand(i));
+ }
+ MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
+ SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys,
+ Ops.data(), Ops.size(),
+ MemInt->getMemoryVT(),
+ MemInt->getMemOperand());
+
+ // Update the uses.
+ std::vector<SDValue> NewResults;
+ for (unsigned i = 0; i < NumResultVecs; ++i) {
+ NewResults.push_back(SDValue(UpdN.getNode(), i));
+ }
+ NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
+ DCI.CombineTo(N, NewResults);
+ DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
+
+ break;
+ }
+ return SDValue();
+}
+
+/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
+/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
+/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and
+/// return true.
+static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = N->getValueType(0);
+ // vldN-dup instructions only support 64-bit vectors for N > 1.
+ if (!VT.is64BitVector())
+ return false;
+
+ // Check if the VDUPLANE operand is a vldN-dup intrinsic.
+ SDNode *VLD = N->getOperand(0).getNode();
+ if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
+ return false;
+ unsigned NumVecs = 0;
+ unsigned NewOpc = 0;
+ unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
+ if (IntNo == Intrinsic::arm_neon_vld2lane) {
+ NumVecs = 2;
+ NewOpc = ARMISD::VLD2DUP;
+ } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
+ NumVecs = 3;
+ NewOpc = ARMISD::VLD3DUP;
+ } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
+ NumVecs = 4;
+ NewOpc = ARMISD::VLD4DUP;
+ } else {
+ return false;
+ }
+
+ // First check that all the vldN-lane uses are VDUPLANEs and that the lane
+ // numbers match the load.
+ unsigned VLDLaneNo =
+ cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
+ for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
+ UI != UE; ++UI) {
+ // Ignore uses of the chain result.
+ if (UI.getUse().getResNo() == NumVecs)
+ continue;
+ SDNode *User = *UI;
+ if (User->getOpcode() != ARMISD::VDUPLANE ||
+ VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
+ return false;
+ }
+
+ // Create the vldN-dup node.
+ EVT Tys[5];
+ unsigned n;
+ for (n = 0; n < NumVecs; ++n)
+ Tys[n] = VT;
+ Tys[n] = MVT::Other;
+ SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1);
+ SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
+ MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
+ SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys,
+ Ops, 2, VLDMemInt->getMemoryVT(),
+ VLDMemInt->getMemOperand());
+
+ // Update the uses.
+ for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
+ UI != UE; ++UI) {
+ unsigned ResNo = UI.getUse().getResNo();
+ // Ignore uses of the chain result.
+ if (ResNo == NumVecs)
+ continue;
+ SDNode *User = *UI;
+ DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
+ }
+
+ // Now the vldN-lane intrinsic is dead except for its chain result.
+ // Update uses of the chain.
+ std::vector<SDValue> VLDDupResults;
+ for (unsigned n = 0; n < NumVecs; ++n)
+ VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
+ VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
+ DCI.CombineTo(VLD, VLDDupResults);
+
+ return true;
+}
+
/// PerformVDUPLANECombine - Target-specific dag combine xforms for
/// ARMISD::VDUPLANE.
static SDValue PerformVDUPLANECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
- // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
- // redundant.
SDValue Op = N->getOperand(0);
- EVT VT = N->getValueType(0);
- // Ignore bit_converts.
- while (Op.getOpcode() == ISD::BIT_CONVERT)
+ // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
+ // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
+ if (CombineVLDDUP(N, DCI))
+ return SDValue(N, 0);
+
+ // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
+ // redundant. Ignore bit_converts for now; element sizes are checked below.
+ while (Op.getOpcode() == ISD::BITCAST)
Op = Op.getOperand(0);
if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
return SDValue();
@@ -4521,11 +5683,11 @@ static SDValue PerformVDUPLANECombine(SDNode *N,
unsigned EltBits;
if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
EltSize = 8;
+ EVT VT = N->getValueType(0);
if (EltSize > VT.getVectorElementType().getSizeInBits())
return SDValue();
- SDValue Res = DCI.DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op);
- return DCI.CombineTo(N, Res, false);
+ return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
}
/// getVShiftImm - Check if this is a valid build_vector for the immediate
@@ -4533,7 +5695,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N,
/// build_vector must have the same constant integer value.
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
// Ignore bit_converts.
- while (Op.getOpcode() == ISD::BIT_CONVERT)
+ while (Op.getOpcode() == ISD::BITCAST)
Op = Op.getOperand(0);
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
APInt SplatBits, SplatUndef;
@@ -4747,7 +5909,8 @@ static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
EVT VT = N->getValueType(0);
// Nothing to be done for scalar shifts.
- if (! VT.isVector())
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (!VT.isVector() || !TLI.isTypeLegal(VT))
return SDValue();
assert(ST->hasNEON() && "unexpected vector shift");
@@ -4793,7 +5956,8 @@ static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
if (VT == MVT::i32 &&
(EltVT == MVT::i8 || EltVT == MVT::i16) &&
- TLI.isTypeLegal(Vec.getValueType())) {
+ TLI.isTypeLegal(Vec.getValueType()) &&
+ isa<ConstantSDNode>(Lane)) {
unsigned Opc = 0;
switch (N->getOpcode()) {
@@ -4906,7 +6070,14 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SUB: return PerformSUBCombine(N, DCI);
case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget);
case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
+ case ISD::AND: return PerformANDCombine(N, DCI);
+ case ARMISD::BFI: return PerformBFICombine(N, DCI);
case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
+ case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
+ case ISD::STORE: return PerformSTORECombine(N, DCI);
+ case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI);
+ case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
+ case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
case ISD::SHL:
@@ -4916,20 +6087,42 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget);
+ case ARMISD::VLD2DUP:
+ case ARMISD::VLD3DUP:
+ case ARMISD::VLD4DUP:
+ return CombineBaseUpdate(N, DCI);
+ case ISD::INTRINSIC_VOID:
+ case ISD::INTRINSIC_W_CHAIN:
+ switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ case Intrinsic::arm_neon_vld1:
+ case Intrinsic::arm_neon_vld2:
+ case Intrinsic::arm_neon_vld3:
+ case Intrinsic::arm_neon_vld4:
+ case Intrinsic::arm_neon_vld2lane:
+ case Intrinsic::arm_neon_vld3lane:
+ case Intrinsic::arm_neon_vld4lane:
+ case Intrinsic::arm_neon_vst1:
+ case Intrinsic::arm_neon_vst2:
+ case Intrinsic::arm_neon_vst3:
+ case Intrinsic::arm_neon_vst4:
+ case Intrinsic::arm_neon_vst2lane:
+ case Intrinsic::arm_neon_vst3lane:
+ case Intrinsic::arm_neon_vst4lane:
+ return CombineBaseUpdate(N, DCI);
+ default: break;
+ }
+ break;
}
return SDValue();
}
-bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
- if (!Subtarget->hasV6Ops())
- // Pre-v6 does not support unaligned mem access.
- return false;
+bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
+ EVT VT) const {
+ return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
+}
- // v6+ may or may not support unaligned mem access depending on the system
- // configuration.
- // FIXME: This is pretty conservative. Should we provide cmdline option to
- // control the behaviour?
- if (!Subtarget->isTargetDarwin())
+bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
+ if (!Subtarget->allowsUnalignedMem())
return false;
switch (VT.getSimpleVT().SimpleTy) {
@@ -5143,7 +6336,7 @@ bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
if (!Subtarget->isThumb())
return ARM_AM::getSOImmVal(Imm) != -1;
if (Subtarget->isThumb2())
- return ARM_AM::getT2SOImmVal(Imm) != -1;
+ return ARM_AM::getT2SOImmVal(Imm) != -1;
return Imm >= 0 && Imm <= 255;
}
@@ -5348,6 +6541,37 @@ void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
// ARM Inline Assembly Support
//===----------------------------------------------------------------------===//
+bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
+ // Looking for "rev" which is V6+.
+ if (!Subtarget->hasV6Ops())
+ return false;
+
+ InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
+ std::string AsmStr = IA->getAsmString();
+ SmallVector<StringRef, 4> AsmPieces;
+ SplitString(AsmStr, AsmPieces, ";\n");
+
+ switch (AsmPieces.size()) {
+ default: return false;
+ case 1:
+ AsmStr = AsmPieces[0];
+ AsmPieces.clear();
+ SplitString(AsmStr, AsmPieces, " \t,");
+
+ // rev $0, $1
+ if (AsmPieces.size() == 3 &&
+ AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
+ IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
+ const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ if (Ty && Ty->getBitWidth() == 32)
+ return IntrinsicLowering::LowerToByteSwap(CI);
+ }
+ break;
+ }
+
+ return false;
+}
+
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
ARMTargetLowering::ConstraintType
@@ -5362,6 +6586,40 @@ ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
return TargetLowering::getConstraintType(Constraint);
}
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+ARMTargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ const Type *type = CallOperandVal->getType();
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+ case 'l':
+ if (type->isIntegerTy()) {
+ if (Subtarget->isThumb())
+ weight = CW_SpecificReg;
+ else
+ weight = CW_Register;
+ }
+ break;
+ case 'w':
+ if (type->isFloatingPointTy())
+ weight = CW_Register;
+ break;
+ }
+ return weight;
+}
+
std::pair<unsigned, const TargetRegisterClass*>
ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const {
@@ -5664,3 +6922,63 @@ bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
return ARM::getVFPf64Imm(Imm) != -1;
return false;
}
+
+/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
+/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
+/// specified in the intrinsic calls.
+bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I,
+ unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ case Intrinsic::arm_neon_vld1:
+ case Intrinsic::arm_neon_vld2:
+ case Intrinsic::arm_neon_vld3:
+ case Intrinsic::arm_neon_vld4:
+ case Intrinsic::arm_neon_vld2lane:
+ case Intrinsic::arm_neon_vld3lane:
+ case Intrinsic::arm_neon_vld4lane: {
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ // Conservatively set memVT to the entire set of vectors loaded.
+ uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8;
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
+ Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
+ Info.vol = false; // volatile loads with NEON intrinsics not supported
+ Info.readMem = true;
+ Info.writeMem = false;
+ return true;
+ }
+ case Intrinsic::arm_neon_vst1:
+ case Intrinsic::arm_neon_vst2:
+ case Intrinsic::arm_neon_vst3:
+ case Intrinsic::arm_neon_vst4:
+ case Intrinsic::arm_neon_vst2lane:
+ case Intrinsic::arm_neon_vst3lane:
+ case Intrinsic::arm_neon_vst4lane: {
+ Info.opc = ISD::INTRINSIC_VOID;
+ // Conservatively set memVT to the entire set of vectors stored.
+ unsigned NumElts = 0;
+ for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
+ const Type *ArgTy = I.getArgOperand(ArgI)->getType();
+ if (!ArgTy->isVectorTy())
+ break;
+ NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8;
+ }
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
+ Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
+ Info.vol = false; // volatile stores with NEON intrinsics not supported
+ Info.readMem = false;
+ Info.writeMem = true;
+ return true;
+ }
+ default:
+ break;
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
index ba9ea7f..dc400c4 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -34,6 +34,10 @@ namespace llvm {
Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
// TargetExternalSymbol, and TargetGlobalAddress.
+ WrapperDYN, // WrapperDYN - A wrapper node for TargetGlobalAddress in
+ // DYN mode.
+ WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
+ // PIC mode.
WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
CALL, // Function call.
@@ -47,8 +51,6 @@ namespace llvm {
PIC_ADD, // Add with a PC operand and a PIC label.
- AND, // ARM "and" instruction that sets the 's' flag in CPSR.
-
CMP, // ARM compare instructions.
CMPZ, // ARM compare that sets only Z flag.
CMPFP, // ARM VFP compare instruction, sets FPSCR.
@@ -73,8 +75,9 @@ namespace llvm {
VMOVRRD, // double to two gprs.
VMOVDRR, // Two gprs to double.
- EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
- EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
+ EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
+ EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
+ EH_SJLJ_DISPATCHSETUP, // SjLj exception handling dispatch setup.
TC_RETURN, // Tail call return pseudo.
@@ -82,13 +85,20 @@ namespace llvm {
DYN_ALLOC, // Dynamic allocation on the stack.
- MEMBARRIER, // Memory barrier
- SYNCBARRIER, // Memory sync barrier
+ MEMBARRIER, // Memory barrier (DMB)
+ MEMBARRIER_MCR, // Memory barrier (MCR)
+
+ PRELOAD, // Preload
VCEQ, // Vector compare equal.
+ VCEQZ, // Vector compare equal to zero.
VCGE, // Vector compare greater than or equal.
+ VCGEZ, // Vector compare greater than or equal to zero.
+ VCLEZ, // Vector compare less than or equal to zero.
VCGEU, // Vector compare unsigned greater than or equal.
VCGT, // Vector compare greater than.
+ VCGTZ, // Vector compare greater than zero.
+ VCLTZ, // Vector compare less than zero.
VCGTU, // Vector compare unsigned greater than.
VTST, // Vector test bits.
@@ -161,7 +171,38 @@ namespace llvm {
FMIN,
// Bit-field insert
- BFI
+ BFI,
+
+ // Vector OR with immediate
+ VORRIMM,
+ // Vector AND with NOT of immediate
+ VBICIMM,
+
+ // Vector load N-element structure to all lanes:
+ VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ VLD3DUP,
+ VLD4DUP,
+
+ // NEON loads with post-increment base updates:
+ VLD1_UPD,
+ VLD2_UPD,
+ VLD3_UPD,
+ VLD4_UPD,
+ VLD2LN_UPD,
+ VLD3LN_UPD,
+ VLD4LN_UPD,
+ VLD2DUP_UPD,
+ VLD3DUP_UPD,
+ VLD4DUP_UPD,
+
+ // NEON stores with post-increment base updates:
+ VST1_UPD,
+ VST2_UPD,
+ VST3_UPD,
+ VST4_UPD,
+ VST2LN_UPD,
+ VST3LN_UPD,
+ VST4LN_UPD
};
}
@@ -193,14 +234,16 @@ namespace llvm {
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-
virtual const char *getTargetNodeName(unsigned Opcode) const;
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
+ virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+
+ bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const;
+
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
/// unaligned memory accesses. of the specified type.
/// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
@@ -241,7 +284,15 @@ namespace llvm {
unsigned Depth) const;
+ virtual bool ExpandInlineAsm(CallInst *CI) const;
+
ConstraintType getConstraintType(const std::string &Constraint) const;
+
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
+
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
@@ -290,6 +341,9 @@ namespace llvm {
/// materialize the FP immediate as a load from a constant pool.
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
+ virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I,
+ unsigned Intrinsic) const;
protected:
std::pair<const TargetRegisterClass*, uint8_t>
findRepresentativeClass(EVT VT) const;
@@ -301,6 +355,8 @@ namespace llvm {
const TargetRegisterInfo *RegInfo;
+ const InstrItineraryData *Itins;
+
/// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
///
unsigned ARMPCLabelIndex;
@@ -329,6 +385,7 @@ namespace llvm {
ISD::ArgFlagsTy Flags) const;
SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
@@ -350,6 +407,10 @@ namespace llvm {
SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
+ const ARMSubtarget *ST) const;
+
+ SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
@@ -393,6 +454,8 @@ namespace llvm {
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
+ virtual bool isUsedByReturnOnly(SDNode *N) const;
+
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
SDValue getVFPCmp(SDValue LHS, SDValue RHS,
@@ -410,6 +473,13 @@ namespace llvm {
};
+ enum NEONModImmType {
+ VMOVModImm,
+ VMVNModImm,
+ OtherModImm
+ };
+
+
namespace ARM {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
index 113cfff..765cba4 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
@@ -1,4 +1,4 @@
-//===- ARMInstrFormats.td - ARM Instruction Formats --*- tablegen -*---------=//
+//===- ARMInstrFormats.td - ARM Instruction Formats ----------*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -71,7 +71,7 @@ def NVTBLFrm : Format<41>;
// Misc flags.
-// the instruction has a Rn register operand.
+// The instruction has an Rn register operand.
// UnaryDP - Indicates this is a unary data processing instruction, i.e.
// it doesn't have a Rn operand.
class UnaryDP { bit isUnaryDataProc = 1; }
@@ -84,9 +84,10 @@ class Xform16Bit { bit canXformTo16Bit = 1; }
// ARM Instruction flags. These need to match ARMBaseInstrInfo.h.
//
+// FIXME: Once the JIT is MC-ized, these can go away.
// Addressing mode.
-class AddrMode<bits<4> val> {
- bits<4> Value = val;
+class AddrMode<bits<5> val> {
+ bits<5> Value = val;
}
def AddrModeNone : AddrMode<0>;
def AddrMode1 : AddrMode<1>;
@@ -104,6 +105,7 @@ def AddrModeT2_i8 : AddrMode<12>;
def AddrModeT2_so : AddrMode<13>;
def AddrModeT2_pc : AddrMode<14>;
def AddrModeT2_i8s4 : AddrMode<15>;
+def AddrMode_i12 : AddrMode<16>;
// Instruction size.
class SizeFlagVal<bits<3> val> {
@@ -134,7 +136,6 @@ def NeonDomain : Domain<2>; // Instructions in Neon domain only
def VFPNeonDomain : Domain<3>; // Instructions in both VFP & Neon domains
//===----------------------------------------------------------------------===//
-
// ARM special operands.
//
@@ -143,6 +144,39 @@ def CondCodeOperand : AsmOperandClass {
let SuperClasses = [];
}
+def CCOutOperand : AsmOperandClass {
+ let Name = "CCOut";
+ let SuperClasses = [];
+}
+
+def MemBarrierOptOperand : AsmOperandClass {
+ let Name = "MemBarrierOpt";
+ let SuperClasses = [];
+ let ParserMethod = "tryParseMemBarrierOptOperand";
+}
+
+def ProcIFlagsOperand : AsmOperandClass {
+ let Name = "ProcIFlags";
+ let SuperClasses = [];
+ let ParserMethod = "tryParseProcIFlagsOperand";
+}
+
+def MSRMaskOperand : AsmOperandClass {
+ let Name = "MSRMask";
+ let SuperClasses = [];
+ let ParserMethod = "tryParseMSRMaskOperand";
+}
+
+// ARM imod and iflag operands, used only by the CPS instruction.
+def imod_op : Operand<i32> {
+ let PrintMethod = "printCPSIMod";
+}
+
+def iflags_op : Operand<i32> {
+ let PrintMethod = "printCPSIFlag";
+ let ParserMatchClass = ProcIFlagsOperand;
+}
+
// ARM Predicate operand. Default to 14 = always (AL). Second part is CC
// register whose default is 0 (no register).
def pred : PredicateOperand<OtherVT, (ops i32imm, CCR),
@@ -153,16 +187,23 @@ def pred : PredicateOperand<OtherVT, (ops i32imm, CCR),
// Conditional code result for instructions whose 's' bit is set, e.g. subs.
def cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 zero_reg))> {
+ let EncoderMethod = "getCCOutOpValue";
let PrintMethod = "printSBitModifierOperand";
+ let ParserMatchClass = CCOutOperand;
}
// Same as cc_out except it defaults to setting CPSR.
def s_cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 CPSR))> {
+ let EncoderMethod = "getCCOutOpValue";
let PrintMethod = "printSBitModifierOperand";
+ let ParserMatchClass = CCOutOperand;
}
// ARM special operands for disassembly only.
//
+def setend_op : Operand<i32> {
+ let PrintMethod = "printSetendOperand";
+}
def cps_opt : Operand<i32> {
let PrintMethod = "printCPSOptionOperand";
@@ -170,6 +211,7 @@ def cps_opt : Operand<i32> {
def msr_mask : Operand<i32> {
let PrintMethod = "printMSRMaskOperand";
+ let ParserMatchClass = MSRMaskOperand;
}
// A8.6.117, A8.6.118. Different instructions are generated for #0 and #-0.
@@ -179,7 +221,6 @@ def neg_zero : Operand<i32> {
}
//===----------------------------------------------------------------------===//
-
// ARM Instruction templates.
//
@@ -198,14 +239,17 @@ class InstTemplate<AddrMode am, SizeFlagVal sz, IndexMode im,
bit isUnaryDataProc = 0;
bit canXformTo16Bit = 0;
+ // If this is a pseudo instruction, mark it isCodeGenOnly.
+ let isCodeGenOnly = !eq(!cast<string>(f), "Pseudo");
+
// The layout of TSFlags should be kept in sync with ARMBaseInstrInfo.h.
- let TSFlags{3-0} = AM.Value;
- let TSFlags{6-4} = SZ.Value;
- let TSFlags{8-7} = IndexModeBits;
- let TSFlags{14-9} = Form;
- let TSFlags{15} = isUnaryDataProc;
- let TSFlags{16} = canXformTo16Bit;
- let TSFlags{18-17} = D.Value;
+ let TSFlags{4-0} = AM.Value;
+ let TSFlags{7-5} = SZ.Value;
+ let TSFlags{9-8} = IndexModeBits;
+ let TSFlags{15-10} = Form;
+ let TSFlags{16} = isUnaryDataProc;
+ let TSFlags{17} = canXformTo16Bit;
+ let TSFlags{19-18} = D.Value;
let Constraints = cstr;
let Itinerary = itin;
@@ -225,25 +269,51 @@ class InstThumb<AddrMode am, SizeFlagVal sz, IndexMode im,
Format f, Domain d, string cstr, InstrItinClass itin>
: InstTemplate<am, sz, im, f, d, cstr, itin>;
-class PseudoInst<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
+class PseudoInst<dag oops, dag iops, InstrItinClass itin, list<dag> pattern>
+ // FIXME: This really should derive from InstTemplate instead, as pseudos
+ // don't need encoding information. TableGen doesn't like that
+ // currently. Need to figure out why and fix it.
: InstARM<AddrModeNone, SizeSpecial, IndexModeNone, Pseudo, GenericDomain,
"", itin> {
let OutOperandList = oops;
let InOperandList = iops;
- let AsmString = asm;
let Pattern = pattern;
}
+// PseudoInst that's ARM-mode only.
+class ARMPseudoInst<dag oops, dag iops, SizeFlagVal sz, InstrItinClass itin,
+ list<dag> pattern>
+ : PseudoInst<oops, iops, itin, pattern> {
+ let SZ = sz;
+ list<Predicate> Predicates = [IsARM];
+}
+
+// PseudoInst that's Thumb-mode only.
+class tPseudoInst<dag oops, dag iops, SizeFlagVal sz, InstrItinClass itin,
+ list<dag> pattern>
+ : PseudoInst<oops, iops, itin, pattern> {
+ let SZ = sz;
+ list<Predicate> Predicates = [IsThumb];
+}
+
+// PseudoInst that's Thumb2-mode only.
+class t2PseudoInst<dag oops, dag iops, SizeFlagVal sz, InstrItinClass itin,
+ list<dag> pattern>
+ : PseudoInst<oops, iops, itin, pattern> {
+ let SZ = sz;
+ list<Predicate> Predicates = [IsThumb2];
+}
// Almost all ARM instructions are predicable.
class I<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
IndexMode im, Format f, InstrItinClass itin,
string opc, string asm, string cstr,
list<dag> pattern>
: InstARM<am, sz, im, f, GenericDomain, cstr, itin> {
+ bits<4> p;
+ let Inst{31-28} = p;
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
+ let AsmString = !strconcat(opc, "${p}", asm);
let Pattern = pattern;
list<Predicate> Predicates = [IsARM];
}
@@ -270,9 +340,14 @@ class sI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
string opc, string asm, string cstr,
list<dag> pattern>
: InstARM<am, sz, im, f, GenericDomain, cstr, itin> {
+ bits<4> p; // Predicate operand
+ bits<1> s; // condition-code set flag ('1' if the insn should set the flags)
+ let Inst{31-28} = p;
+ let Inst{20} = s;
+
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p, cc_out:$s));
- let AsmString = !strconcat(opc, !strconcat("${p}${s}", asm));
+ let AsmString = !strconcat(opc, "${s}${p}", asm);
let Pattern = pattern;
list<Predicate> Predicates = [IsARM];
}
@@ -319,10 +394,6 @@ class ABXI<bits<4> opcod, dag oops, dag iops, InstrItinClass itin,
asm, "", pattern> {
let Inst{27-24} = opcod;
}
-class ABXIx2<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrModeNone, Size8Bytes, IndexModeNone, Pseudo, itin,
- asm, "", pattern>;
// BR_JT instructions
class JTI<dag oops, dag iops, InstrItinClass itin,
@@ -335,19 +406,42 @@ class AIldrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, LdStExFrm, itin,
opc, asm, "", pattern> {
+ bits<4> Rt;
+ bits<4> Rn;
let Inst{27-23} = 0b00011;
let Inst{22-21} = opcod;
let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rt;
let Inst{11-0} = 0b111110011111;
}
class AIstrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, LdStExFrm, itin,
opc, asm, "", pattern> {
+ bits<4> Rd;
+ bits<4> Rt;
+ bits<4> Rn;
let Inst{27-23} = 0b00011;
let Inst{22-21} = opcod;
let Inst{20} = 0;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
let Inst{11-4} = 0b11111001;
+ let Inst{3-0} = Rt;
+}
+class AIswp<bit b, dag oops, dag iops, string opc, list<dag> pattern>
+ : AI<oops, iops, MiscFrm, NoItinerary, opc, "\t$Rt, $Rt2, [$Rn]", pattern> {
+ bits<4> Rt;
+ bits<4> Rt2;
+ bits<4> Rn;
+ let Inst{27-23} = 0b00010;
+ let Inst{22} = b;
+ let Inst{21-20} = 0b00;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rt;
+ let Inst{11-4} = 0b00001001;
+ let Inst{3-0} = Rt2;
}
// addrmode1 instructions
@@ -372,387 +466,125 @@ class AXI1<bits<4> opcod, dag oops, dag iops, Format f, InstrItinClass itin,
let Inst{24-21} = opcod;
let Inst{27-26} = 0b00;
}
-class AI1x2<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode1, Size8Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern>;
-
-
-// addrmode2 loads and stores
-class AI2<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{27-26} = 0b01;
-}
// loads
-class AI2ldw<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-class AXI2ldw<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-class AI2ldb<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-class AXI2ldb<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-
-// stores
-class AI2stw<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-class AXI2stw<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-class AI2stb<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-class AXI2stb<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-// Pre-indexed loads
-class AI2ldwpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-class AI2ldbpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-
-// Pre-indexed stores
-class AI2stwpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 1; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-class AI2stbpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePre, f, itin,
+// LDR/LDRB/STR/STRB/...
+class AI2ldst<bits<3> op, bit isLd, bit isByte, dag oops, dag iops, AddrMode am,
+ Format f, InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : I<oops, iops, am, Size4Bytes, IndexModeNone, f, itin, opc, asm,
+ "", pattern> {
+ let Inst{27-25} = op;
+ let Inst{24} = 1; // 24 == P
+ // 23 == U
+ let Inst{22} = isByte;
+ let Inst{21} = 0; // 21 == W
+ let Inst{20} = isLd;
+}
+// Indexed load/stores
+class AI2ldstidx<bit isLd, bit isByte, bit isPre, dag oops, dag iops,
+ IndexMode im, Format f, InstrItinClass itin, string opc,
+ string asm, string cstr, list<dag> pattern>
+ : I<oops, iops, AddrMode2, Size4Bytes, im, f, itin,
opc, asm, cstr, pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 1; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = 0b01;
-}
-
-// Post-indexed loads
-class AI2ldwpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 0; // P bit
- let Inst{27-26} = 0b01;
-}
-class AI2ldbpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 0; // P bit
- let Inst{27-26} = 0b01;
-}
-
-// Post-indexed stores
-class AI2stwpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 0; // P bit
- let Inst{27-26} = 0b01;
-}
-class AI2stbpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 0; // P bit
+ bits<4> Rt;
let Inst{27-26} = 0b01;
+ let Inst{24} = isPre; // P bit
+ let Inst{22} = isByte; // B bit
+ let Inst{21} = isPre; // W bit
+ let Inst{20} = isLd; // L bit
+ let Inst{15-12} = Rt;
+}
+class AI2stridx<bit isByte, bit isPre, dag oops, dag iops,
+ IndexMode im, Format f, InstrItinClass itin, string opc,
+ string asm, string cstr, list<dag> pattern>
+ : AI2ldstidx<0, isByte, isPre, oops, iops, im, f, itin, opc, asm, cstr,
+ pattern> {
+ // AM2 store w/ two operands: (GPR, am2offset)
+ // {13} 1 == Rm, 0 == imm12
+ // {12} isAdd
+ // {11-0} imm12/Rm
+ bits<14> offset;
+ bits<4> Rn;
+ let Inst{25} = offset{13};
+ let Inst{23} = offset{12};
+ let Inst{19-16} = Rn;
+ let Inst{11-0} = offset{11-0};
}
// addrmode3 instructions
-class AI3<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern>;
-class AXI3<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern>;
-
-// loads
-class AI3ldh<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AXI3ldh<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
-}
-class AI3ldsh<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
+class AI3ld<bits<4> op, bit op20, dag oops, dag iops, Format f,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
+ bits<14> addr;
+ bits<4> Rt;
let Inst{27-25} = 0b000;
-}
-class AXI3ldsh<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
-}
-class AI3ldsb<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
+ let Inst{24} = 1; // P bit
+ let Inst{23} = addr{8}; // U bit
+ let Inst{22} = addr{13}; // 1 == imm8, 0 == Rm
+ let Inst{21} = 0; // W bit
+ let Inst{20} = op20; // L bit
+ let Inst{19-16} = addr{12-9}; // Rn
+ let Inst{15-12} = Rt; // Rt
+ let Inst{11-8} = addr{7-4}; // imm7_4/zero
+ let Inst{7-4} = op;
+ let Inst{3-0} = addr{3-0}; // imm3_0/Rm
+}
+
+class AI3ldstidx<bits<4> op, bit op20, bit isLd, bit isPre, dag oops, dag iops,
+ IndexMode im, Format f, InstrItinClass itin, string opc,
+ string asm, string cstr, list<dag> pattern>
+ : I<oops, iops, AddrMode3, Size4Bytes, im, f, itin,
+ opc, asm, cstr, pattern> {
+ bits<4> Rt;
let Inst{27-25} = 0b000;
-}
-class AXI3ldsb<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
-}
-class AI3ldd<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
+ let Inst{24} = isPre; // P bit
+ let Inst{21} = isPre; // W bit
+ let Inst{20} = op20; // L bit
+ let Inst{15-12} = Rt; // Rt
+ let Inst{7-4} = op;
+}
+class AI3stridx<bits<4> op, bit isByte, bit isPre, dag oops, dag iops,
+ IndexMode im, Format f, InstrItinClass itin, string opc,
+ string asm, string cstr, list<dag> pattern>
+ : AI2ldstidx<0, isByte, isPre, oops, iops, im, f, itin, opc, asm, cstr,
+ pattern> {
+ // AM3 store w/ two operands: (GPR, am3offset)
+ bits<14> offset;
+ bits<4> Rt;
+ bits<4> Rn;
let Inst{27-25} = 0b000;
+ let Inst{23} = offset{8};
+ let Inst{22} = offset{9};
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rt; // Rt
+ let Inst{11-8} = offset{7-4}; // imm7_4/zero
+ let Inst{7-4} = op;
+ let Inst{3-0} = offset{3-0}; // imm3_0/Rm
}
// stores
-class AI3sth<dag oops, dag iops, Format f, InstrItinClass itin,
+class AI3str<bits<4> op, dag oops, dag iops, Format f, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AXI3sth<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
-}
-class AI3std<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
+ bits<14> addr;
+ bits<4> Rt;
let Inst{27-25} = 0b000;
+ let Inst{24} = 1; // P bit
+ let Inst{23} = addr{8}; // U bit
+ let Inst{22} = addr{13}; // 1 == imm8, 0 == Rm
+ let Inst{21} = 0; // W bit
+ let Inst{20} = 0; // L bit
+ let Inst{19-16} = addr{12-9}; // Rn
+ let Inst{15-12} = Rt; // Rt
+ let Inst{11-8} = addr{7-4}; // imm7_4/zero
+ let Inst{7-4} = op;
+ let Inst{3-0} = addr{3-0}; // imm3_0/Rm
}
-// Pre-indexed loads
-class AI3ldhpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3ldshpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3ldsbpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3lddpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-
-
// Pre-indexed stores
class AI3sthpr<dag oops, dag iops, Format f, InstrItinClass itin,
string opc, string asm, string cstr, list<dag> pattern>
@@ -781,60 +613,6 @@ class AI3stdpr<dag oops, dag iops, Format f, InstrItinClass itin,
let Inst{27-25} = 0b000;
}
-// Post-indexed loads
-class AI3ldhpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3ldshpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3ldsbpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3lddpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-
// Post-indexed stores
class AI3sthpo<dag oops, dag iops, Format f, InstrItinClass itin,
string opc, string asm, string cstr, list<dag> pattern>
@@ -864,21 +642,17 @@ class AI3stdpo<dag oops, dag iops, Format f, InstrItinClass itin,
}
// addrmode4 instructions
-class AXI4ld<dag oops, dag iops, IndexMode im, Format f, InstrItinClass itin,
- string asm, string cstr, list<dag> pattern>
- : XI<oops, iops, AddrMode4, Size4Bytes, im, f, itin,
- asm, cstr, pattern> {
- let Inst{20} = 1; // L bit
- let Inst{22} = 0; // S bit
+class AXI4<dag oops, dag iops, IndexMode im, Format f, InstrItinClass itin,
+ string asm, string cstr, list<dag> pattern>
+ : XI<oops, iops, AddrMode4, Size4Bytes, im, f, itin, asm, cstr, pattern> {
+ bits<4> p;
+ bits<16> regs;
+ bits<4> Rn;
+ let Inst{31-28} = p;
let Inst{27-25} = 0b100;
-}
-class AXI4st<dag oops, dag iops, IndexMode im, Format f, InstrItinClass itin,
- string asm, string cstr, list<dag> pattern>
- : XI<oops, iops, AddrMode4, Size4Bytes, im, f, itin,
- asm, cstr, pattern> {
- let Inst{20} = 0; // L bit
let Inst{22} = 0; // S bit
- let Inst{27-25} = 0b100;
+ let Inst{19-16} = Rn;
+ let Inst{15-0} = regs;
}
// Unsigned multiply, multiply-accumulate instructions.
@@ -899,24 +673,65 @@ class AsMul1I<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
}
// Most significant word multiply
-class AMul2I<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
+class AMul2I<bits<7> opcod, bits<4> opc7_4, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, MulFrm, itin,
opc, asm, "", pattern> {
- let Inst{7-4} = 0b1001;
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+ let Inst{7-4} = opc7_4;
let Inst{20} = 1;
let Inst{27-21} = opcod;
+ let Inst{19-16} = Rd;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
+}
+// MSW multiple w/ Ra operand
+class AMul2Ia<bits<7> opcod, bits<4> opc7_4, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
+ : AMul2I<opcod, opc7_4, oops, iops, itin, opc, asm, pattern> {
+ bits<4> Ra;
+ let Inst{15-12} = Ra;
}
// SMUL<x><y> / SMULW<y> / SMLA<x><y> / SMLAW<x><y>
-class AMulxyI<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
+class AMulxyIbase<bits<7> opcod, bits<2> bit6_5, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, MulFrm, itin,
opc, asm, "", pattern> {
+ bits<4> Rn;
+ bits<4> Rm;
let Inst{4} = 0;
let Inst{7} = 1;
let Inst{20} = 0;
let Inst{27-21} = opcod;
+ let Inst{6-5} = bit6_5;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
+}
+class AMulxyI<bits<7> opcod, bits<2> bit6_5, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
+ : AMulxyIbase<opcod, bit6_5, oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ let Inst{19-16} = Rd;
+}
+
+// AMulxyI with Ra operand
+class AMulxyIa<bits<7> opcod, bits<2> bit6_5, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
+ : AMulxyI<opcod, bit6_5, oops, iops, itin, opc, asm, pattern> {
+ bits<4> Ra;
+ let Inst{15-12} = Ra;
+}
+// SMLAL*
+class AMulxyI64<bits<7> opcod, bits<2> bit6_5, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
+ : AMulxyIbase<opcod, bit6_5, oops, iops, itin, opc, asm, pattern> {
+ bits<4> RdLo;
+ bits<4> RdHi;
+ let Inst{19-16} = RdHi;
+ let Inst{15-12} = RdLo;
}
// Extend instructions.
@@ -924,16 +739,47 @@ class AExtI<bits<8> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, ExtFrm, itin,
opc, asm, "", pattern> {
+ // All AExtI instructions have Rd and Rm register operands.
+ bits<4> Rd;
+ bits<4> Rm;
+ let Inst{15-12} = Rd;
+ let Inst{3-0} = Rm;
let Inst{7-4} = 0b0111;
+ let Inst{9-8} = 0b00;
let Inst{27-20} = opcod;
}
// Misc Arithmetic instructions.
-class AMiscA1I<bits<8> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
+class AMiscA1I<bits<8> opcod, bits<4> opc7_4, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
: I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, ArithMiscFrm, itin,
opc, asm, "", pattern> {
+ bits<4> Rd;
+ bits<4> Rm;
let Inst{27-20} = opcod;
+ let Inst{19-16} = 0b1111;
+ let Inst{15-12} = Rd;
+ let Inst{11-8} = 0b1111;
+ let Inst{7-4} = opc7_4;
+ let Inst{3-0} = Rm;
+}
+
+// PKH instructions
+class APKHI<bits<8> opcod, bit tb, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, ArithMiscFrm, itin,
+ opc, asm, "", pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+ bits<8> sh;
+ let Inst{27-20} = opcod;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{11-7} = sh{7-3};
+ let Inst{6} = tb;
+ let Inst{5-4} = 0b01;
+ let Inst{3-0} = Rm;
}
//===----------------------------------------------------------------------===//
@@ -950,12 +796,9 @@ class ARMV6Pat<dag pattern, dag result> : Pat<pattern, result> {
}
//===----------------------------------------------------------------------===//
-//
// Thumb Instruction Format Definitions.
//
-// TI - Thumb instruction.
-
class ThumbI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
InstrItinClass itin, string asm, string cstr, list<dag> pattern>
: InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
@@ -966,6 +809,7 @@ class ThumbI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
list<Predicate> Predicates = [IsThumb];
}
+// TI - Thumb instruction.
class TI<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern>
: ThumbI<oops, iops, AddrModeNone, Size2Bytes, itin, asm, "", pattern>;
@@ -986,6 +830,13 @@ class TIx2<bits<5> opcod1, bits<2> opcod2, bit opcod3,
let Inst{12} = opcod3;
}
+// Move to/from coprocessor instructions
+class T1Cop<dag oops, dag iops, string asm, list<dag> pattern>
+ : ThumbI<oops, iops, AddrModeNone, Size4Bytes, NoItinerary, asm, "", pattern>,
+ Encoding, Requires<[IsThumb, HasV6]> {
+ let Inst{31-28} = 0b1110;
+}
+
// BR_JT instructions
class TJTI<dag oops, dag iops, InstrItinClass itin, string asm,
list<dag> pattern>
@@ -999,7 +850,7 @@ class Thumb1I<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
let InOperandList = iops;
let AsmString = asm;
let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb1Only];
+ list<Predicate> Predicates = [IsThumb, IsThumb1Only];
}
class T1I<dag oops, dag iops, InstrItinClass itin,
@@ -1008,9 +859,6 @@ class T1I<dag oops, dag iops, InstrItinClass itin,
class T1Ix2<dag oops, dag iops, InstrItinClass itin,
string asm, list<dag> pattern>
: Thumb1I<oops, iops, AddrModeNone, Size4Bytes, itin, asm, "", pattern>;
-class T1JTI<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : Thumb1I<oops, iops, AddrModeNone, SizeSpecial, itin, asm, "", pattern>;
// Two-address instructions
class T1It<dag oops, dag iops, InstrItinClass itin,
@@ -1025,9 +873,9 @@ class Thumb1sI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
: InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
let OutOperandList = !con(oops, (outs s_cc_out:$s));
let InOperandList = !con(iops, (ins pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${s}${p}", asm));
+ let AsmString = !strconcat(opc, "${s}${p}", asm);
let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb1Only];
+ list<Predicate> Predicates = [IsThumb, IsThumb1Only];
}
class T1sI<dag oops, dag iops, InstrItinClass itin,
@@ -1038,7 +886,7 @@ class T1sI<dag oops, dag iops, InstrItinClass itin,
class T1sIt<dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: Thumb1sI<oops, iops, AddrModeNone, Size2Bytes, itin, opc, asm,
- "$lhs = $dst", pattern>;
+ "$Rn = $Rdn", pattern>;
// Thumb1 instruction that can be predicated.
class Thumb1pI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
@@ -1047,9 +895,9 @@ class Thumb1pI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
: InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
+ let AsmString = !strconcat(opc, "${p}", asm);
let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb1Only];
+ list<Predicate> Predicates = [IsThumb, IsThumb1Only];
}
class T1pI<dag oops, dag iops, InstrItinClass itin,
@@ -1060,17 +908,8 @@ class T1pI<dag oops, dag iops, InstrItinClass itin,
class T1pIt<dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: Thumb1pI<oops, iops, AddrModeNone, Size2Bytes, itin, opc, asm,
- "$lhs = $dst", pattern>;
+ "$Rn = $Rdn", pattern>;
-class T1pI1<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1pI<oops, iops, AddrModeT1_1, Size2Bytes, itin, opc, asm, "", pattern>;
-class T1pI2<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1pI<oops, iops, AddrModeT1_2, Size2Bytes, itin, opc, asm, "", pattern>;
-class T1pI4<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1pI<oops, iops, AddrModeT1_4, Size2Bytes, itin, opc, asm, "", pattern>;
class T1pIs<dag oops, dag iops,
InstrItinClass itin, string opc, string asm, list<dag> pattern>
: Thumb1pI<oops, iops, AddrModeT1_s, Size2Bytes, itin, opc, asm, "", pattern>;
@@ -1099,7 +938,7 @@ class T1DataProcessing<bits<4> opcode> : Encoding16 {
// A6.2.3 Special data instructions and branch and exchange encoding.
class T1Special<bits<4> opcode> : Encoding16 {
let Inst{15-10} = 0b010001;
- let Inst{9-6} = opcode;
+ let Inst{9-6} = opcode;
}
// A6.2.4 Load/store single data item encoding.
@@ -1107,12 +946,37 @@ class T1LoadStore<bits<4> opA, bits<3> opB> : Encoding16 {
let Inst{15-12} = opA;
let Inst{11-9} = opB;
}
-class T1LdSt<bits<3> opB> : T1LoadStore<0b0101, opB>;
-class T1LdSt4Imm<bits<3> opB> : T1LoadStore<0b0110, opB>; // Immediate, 4 bytes
-class T1LdSt1Imm<bits<3> opB> : T1LoadStore<0b0111, opB>; // Immediate, 1 byte
-class T1LdSt2Imm<bits<3> opB> : T1LoadStore<0b1000, opB>; // Immediate, 2 bytes
class T1LdStSP<bits<3> opB> : T1LoadStore<0b1001, opB>; // SP relative
+// Helper classes to encode Thumb1 loads and stores. For immediates, the
+// following bits are used for "opA" (see A6.2.4):
+//
+// 0b0110 => Immediate, 4 bytes
+// 0b1000 => Immediate, 2 bytes
+// 0b0111 => Immediate, 1 byte
+class T1pILdStEncode<bits<3> opcode, dag oops, dag iops, AddrMode am,
+ InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : Thumb1pI<oops, iops, am, Size2Bytes, itin, opc, asm, "", pattern>,
+ T1LoadStore<0b0101, opcode> {
+ bits<3> Rt;
+ bits<8> addr;
+ let Inst{8-6} = addr{5-3}; // Rm
+ let Inst{5-3} = addr{2-0}; // Rn
+ let Inst{2-0} = Rt;
+}
+class T1pILdStEncodeImm<bits<4> opA, bit opB, dag oops, dag iops, AddrMode am,
+ InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : Thumb1pI<oops, iops, am, Size2Bytes, itin, opc, asm, "", pattern>,
+ T1LoadStore<opA, {opB,?,?}> {
+ bits<3> Rt;
+ bits<8> addr;
+ let Inst{10-6} = addr{7-3}; // imm5
+ let Inst{5-3} = addr{2-0}; // Rn
+ let Inst{2-0} = Rt;
+}
+
// A6.2.5 Miscellaneous 16-bit instructions encoding.
class T1Misc<bits<7> opcode> : Encoding16 {
let Inst{15-12} = 0b1011;
@@ -1126,7 +990,7 @@ class Thumb2I<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
: InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
+ let AsmString = !strconcat(opc, "${p}", asm);
let Pattern = pattern;
list<Predicate> Predicates = [IsThumb2];
}
@@ -1134,16 +998,19 @@ class Thumb2I<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
// Same as Thumb2I except it can optionally modify CPSR. Note it's modeled as an
// input operand since by default it's a zero register. It will become an
// implicit def once it's "flipped".
-//
+//
// FIXME: This uses unified syntax so {s} comes before {p}. We should make it
// more consistent.
class Thumb2sI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
InstrItinClass itin,
string opc, string asm, string cstr, list<dag> pattern>
: InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
+ bits<1> s; // condition-code set flag ('1' if the insn should set the flags)
+ let Inst{20} = s;
+
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p, cc_out:$s));
- let AsmString = !strconcat(opc, !strconcat("${s}${p}", asm));
+ let AsmString = !strconcat(opc, "${s}${p}", asm);
let Pattern = pattern;
list<Predicate> Predicates = [IsThumb2];
}
@@ -1168,7 +1035,7 @@ class ThumbXI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
let InOperandList = iops;
let AsmString = asm;
let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb1Only];
+ list<Predicate> Predicates = [IsThumb, IsThumb1Only];
}
class T2I<dag oops, dag iops, InstrItinClass itin,
@@ -1186,17 +1053,23 @@ class T2Iso<dag oops, dag iops, InstrItinClass itin,
class T2Ipc<dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: Thumb2I<oops, iops, AddrModeT2_pc, Size4Bytes, itin, opc, asm, "", pattern>;
-class T2Ii8s4<bit P, bit W, bit load, dag oops, dag iops, InstrItinClass itin,
+class T2Ii8s4<bit P, bit W, bit isLoad, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: Thumb2I<oops, iops, AddrModeT2_i8s4, Size4Bytes, itin, opc, asm, "",
pattern> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b00;
+ bits<4> Rt;
+ bits<4> Rt2;
+ bits<13> addr;
+ let Inst{31-25} = 0b1110100;
let Inst{24} = P;
- let Inst{23} = ?; // The U bit.
+ let Inst{23} = addr{8};
let Inst{22} = 1;
let Inst{21} = W;
- let Inst{20} = load;
+ let Inst{20} = isLoad;
+ let Inst{19-16} = addr{12-9};
+ let Inst{15-12} = Rt{3-0};
+ let Inst{11-8} = Rt2{3-0};
+ let Inst{7-0} = addr{7-0};
}
class T2sI<dag oops, dag iops, InstrItinClass itin,
@@ -1210,9 +1083,11 @@ class T2JTI<dag oops, dag iops, InstrItinClass itin,
string asm, list<dag> pattern>
: Thumb2XI<oops, iops, AddrModeNone, SizeSpecial, itin, asm, "", pattern>;
-class T2Ix2<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb2I<oops, iops, AddrModeNone, Size8Bytes, itin, opc, asm, "", pattern>;
+// Move to/from coprocessor instructions
+class T2Cop<dag oops, dag iops, string asm, list<dag> pattern>
+ : T2XI<oops, iops, NoItinerary, asm, pattern>, Requires<[IsThumb2, HasV6]> {
+ let Inst{31-28} = 0b1111;
+}
// Two-address instructions
class T2XIt<dag oops, dag iops, InstrItinClass itin,
@@ -1227,7 +1102,7 @@ class T2Iidxldst<bit signed, bits<2> opcod, bit load, bit pre,
: InstARM<am, Size4Bytes, im, ThumbFrm, GenericDomain, cstr, itin> {
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
+ let AsmString = !strconcat(opc, "${p}", asm);
let Pattern = pattern;
list<Predicate> Predicates = [IsThumb2];
let Inst{31-27} = 0b11111;
@@ -1240,29 +1115,25 @@ class T2Iidxldst<bit signed, bits<2> opcod, bit load, bit pre,
// (P, W) = (1, 1) Pre-indexed or (0, 1) Post-indexed
let Inst{10} = pre; // The P bit.
let Inst{8} = 1; // The W bit.
-}
-// Helper class for disassembly only
-// A6.3.16 & A6.3.17
-// T2Imac - Thumb2 multiply [accumulate, and absolute difference] instructions.
-class T2I_mac<bit long, bits<3> op22_20, bits<4> op7_4, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : T2I<oops, iops, itin, opc, asm, pattern> {
- let Inst{31-27} = 0b11111;
- let Inst{26-24} = 0b011;
- let Inst{23} = long;
- let Inst{22-20} = op22_20;
- let Inst{7-4} = op7_4;
+ bits<9> addr;
+ let Inst{7-0} = addr{7-0};
+ let Inst{9} = addr{8}; // Sign bit
+
+ bits<4> Rt;
+ bits<4> Rn;
+ let Inst{15-12} = Rt{3-0};
+ let Inst{19-16} = Rn{3-0};
}
// Tv5Pat - Same as Pat<>, but requires V5T Thumb mode.
class Tv5Pat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [IsThumb1Only, HasV5T];
+ list<Predicate> Predicates = [IsThumb, IsThumb1Only, HasV5T];
}
// T1Pat - Same as Pat<>, but requires that the compiler be in Thumb1 mode.
class T1Pat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [IsThumb1Only];
+ list<Predicate> Predicates = [IsThumb, IsThumb1Only];
}
// T2Pat - Same as Pat<>, but requires that the compiler be in Thumb2 mode.
@@ -1281,10 +1152,13 @@ class VFPI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
IndexMode im, Format f, InstrItinClass itin,
string opc, string asm, string cstr, list<dag> pattern>
: InstARM<am, sz, im, f, VFPDomain, cstr, itin> {
+ bits<4> p;
+ let Inst{31-28} = p;
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
+ let AsmString = !strconcat(opc, "${p}", asm);
let Pattern = pattern;
+ let PostEncoderMethod = "VFPThumb2PostEncoder";
list<Predicate> Predicates = [HasVFP2];
}
@@ -1293,17 +1167,22 @@ class VFPXI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
IndexMode im, Format f, InstrItinClass itin,
string asm, string cstr, list<dag> pattern>
: InstARM<am, sz, im, f, VFPDomain, cstr, itin> {
+ bits<4> p;
+ let Inst{31-28} = p;
let OutOperandList = oops;
let InOperandList = iops;
let AsmString = asm;
let Pattern = pattern;
+ let PostEncoderMethod = "VFPThumb2PostEncoder";
list<Predicate> Predicates = [HasVFP2];
}
class VFPAI<dag oops, dag iops, Format f, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: VFPI<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern>;
+ opc, asm, "", pattern> {
+ let PostEncoderMethod = "VFPThumb2PostEncoder";
+}
// ARM VFP addrmode5 loads and stores
class ADI5<bits<4> opcod1, bits<2> opcod2, dag oops, dag iops,
@@ -1311,12 +1190,24 @@ class ADI5<bits<4> opcod1, bits<2> opcod2, dag oops, dag iops,
string opc, string asm, list<dag> pattern>
: VFPI<oops, iops, AddrMode5, Size4Bytes, IndexModeNone,
VFPLdStFrm, itin, opc, asm, "", pattern> {
+ // Instruction operands.
+ bits<5> Dd;
+ bits<13> addr;
+
+ // Encode instruction operands.
+ let Inst{23} = addr{8}; // U (add = (U == '1'))
+ let Inst{22} = Dd{4};
+ let Inst{19-16} = addr{12-9}; // Rn
+ let Inst{15-12} = Dd{3-0};
+ let Inst{7-0} = addr{7-0}; // imm8
+
// TODO: Mark the instructions with the appropriate subtarget info.
let Inst{27-24} = opcod1;
let Inst{21-20} = opcod2;
- let Inst{11-8} = 0b1011;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 1; // Double precision
- // 64-bit loads & stores operate on both NEON and VFP pipelines.
+ // Loads & stores operate on both NEON and VFP pipelines.
let D = VFPNeonDomain;
}
@@ -1325,10 +1216,36 @@ class ASI5<bits<4> opcod1, bits<2> opcod2, dag oops, dag iops,
string opc, string asm, list<dag> pattern>
: VFPI<oops, iops, AddrMode5, Size4Bytes, IndexModeNone,
VFPLdStFrm, itin, opc, asm, "", pattern> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<13> addr;
+
+ // Encode instruction operands.
+ let Inst{23} = addr{8}; // U (add = (U == '1'))
+ let Inst{22} = Sd{0};
+ let Inst{19-16} = addr{12-9}; // Rn
+ let Inst{15-12} = Sd{4-1};
+ let Inst{7-0} = addr{7-0}; // imm8
+
// TODO: Mark the instructions with the appropriate subtarget info.
let Inst{27-24} = opcod1;
let Inst{21-20} = opcod2;
- let Inst{11-8} = 0b1010;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 0; // Single precision
+
+ // Loads & stores operate on both NEON and VFP pipelines.
+ let D = VFPNeonDomain;
+}
+
+// VFP Load / store multiple pseudo instructions.
+class PseudoVFPLdStM<dag oops, dag iops, InstrItinClass itin, string cstr,
+ list<dag> pattern>
+ : InstARM<AddrMode4, Size4Bytes, IndexModeNone, Pseudo, VFPNeonDomain,
+ cstr, itin> {
+ let OutOperandList = oops;
+ let InOperandList = !con(iops, (ins pred:$p));
+ let Pattern = pattern;
+ list<Predicate> Predicates = [HasVFP2];
}
// Load / store multiple
@@ -1336,21 +1253,40 @@ class AXDI4<dag oops, dag iops, IndexMode im, InstrItinClass itin,
string asm, string cstr, list<dag> pattern>
: VFPXI<oops, iops, AddrMode4, Size4Bytes, im,
VFPLdStMulFrm, itin, asm, cstr, pattern> {
+ // Instruction operands.
+ bits<4> Rn;
+ bits<13> regs;
+
+ // Encode instruction operands.
+ let Inst{19-16} = Rn;
+ let Inst{22} = regs{12};
+ let Inst{15-12} = regs{11-8};
+ let Inst{7-0} = regs{7-0};
+
// TODO: Mark the instructions with the appropriate subtarget info.
let Inst{27-25} = 0b110;
- let Inst{11-8} = 0b1011;
-
- // 64-bit loads & stores operate on both NEON and VFP pipelines.
- let D = VFPNeonDomain;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 1; // Double precision
}
class AXSI4<dag oops, dag iops, IndexMode im, InstrItinClass itin,
string asm, string cstr, list<dag> pattern>
: VFPXI<oops, iops, AddrMode4, Size4Bytes, im,
VFPLdStMulFrm, itin, asm, cstr, pattern> {
+ // Instruction operands.
+ bits<4> Rn;
+ bits<13> regs;
+
+ // Encode instruction operands.
+ let Inst{19-16} = Rn;
+ let Inst{22} = regs{8};
+ let Inst{15-12} = regs{12-9};
+ let Inst{7-0} = regs{7-0};
+
// TODO: Mark the instructions with the appropriate subtarget info.
let Inst{27-25} = 0b110;
- let Inst{11-8} = 0b1010;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 0; // Single precision
}
// Double precision, unary
@@ -1358,10 +1294,21 @@ class ADuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc,
string asm, list<dag> pattern>
: VFPAI<oops, iops, VFPUnaryFrm, itin, opc, asm, pattern> {
+ // Instruction operands.
+ bits<5> Dd;
+ bits<5> Dm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{15-12} = Dd{3-0};
+ let Inst{22} = Dd{4};
+
let Inst{27-23} = opcod1;
let Inst{21-20} = opcod2;
let Inst{19-16} = opcod3;
- let Inst{11-8} = 0b1011;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 1; // Double precision
let Inst{7-6} = opcod4;
let Inst{4} = opcod5;
}
@@ -1371,24 +1318,25 @@ class ADbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
dag iops, InstrItinClass itin, string opc, string asm,
list<dag> pattern>
: VFPAI<oops, iops, VFPBinaryFrm, itin, opc, asm, pattern> {
- let Inst{27-23} = opcod1;
- let Inst{21-20} = opcod2;
- let Inst{11-8} = 0b1011;
- let Inst{6} = op6;
- let Inst{4} = op4;
-}
+ // Instruction operands.
+ bits<5> Dd;
+ bits<5> Dn;
+ bits<5> Dm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{19-16} = Dn{3-0};
+ let Inst{7} = Dn{4};
+ let Inst{15-12} = Dd{3-0};
+ let Inst{22} = Dd{4};
-// Double precision, binary, VML[AS] (for additional predicate)
-class ADbI_vmlX<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
- dag iops, InstrItinClass itin, string opc, string asm,
- list<dag> pattern>
- : VFPAI<oops, iops, VFPBinaryFrm, itin, opc, asm, pattern> {
let Inst{27-23} = opcod1;
let Inst{21-20} = opcod2;
- let Inst{11-8} = 0b1011;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 1; // Double precision
let Inst{6} = op6;
let Inst{4} = op4;
- list<Predicate> Predicates = [HasVFP2, UseVMLx];
}
// Single precision, unary
@@ -1396,16 +1344,27 @@ class ASuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc,
string asm, list<dag> pattern>
: VFPAI<oops, iops, VFPUnaryFrm, itin, opc, asm, pattern> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
let Inst{27-23} = opcod1;
let Inst{21-20} = opcod2;
let Inst{19-16} = opcod3;
- let Inst{11-8} = 0b1010;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 0; // Single precision
let Inst{7-6} = opcod4;
let Inst{4} = opcod5;
}
-// Single precision unary, if no NEON
-// Same as ASuI except not available if NEON is enabled
+// Single precision unary, if no NEON. Same as ASuI except not available if
+// NEON is enabled.
class ASuIn<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc,
string asm, list<dag> pattern>
@@ -1418,20 +1377,47 @@ class ASuIn<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
class ASbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops, dag iops,
InstrItinClass itin, string opc, string asm, list<dag> pattern>
: VFPAI<oops, iops, VFPBinaryFrm, itin, opc, asm, pattern> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sn;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{19-16} = Sn{4-1};
+ let Inst{7} = Sn{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
let Inst{27-23} = opcod1;
let Inst{21-20} = opcod2;
- let Inst{11-8} = 0b1010;
+ let Inst{11-9} = 0b101;
+ let Inst{8} = 0; // Single precision
let Inst{6} = op6;
let Inst{4} = op4;
}
-// Single precision binary, if no NEON
-// Same as ASbI except not available if NEON is enabled
+// Single precision binary, if no NEON. Same as ASbI except not available if
+// NEON is enabled.
class ASbIn<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
dag iops, InstrItinClass itin, string opc, string asm,
list<dag> pattern>
: ASbI<opcod1, opcod2, op6, op4, oops, iops, itin, opc, asm, pattern> {
list<Predicate> Predicates = [HasVFP2,DontUseNEONForFP];
+
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sn;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{19-16} = Sn{4-1};
+ let Inst{7} = Sn{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
}
// VFP conversion instructions
@@ -1502,9 +1488,7 @@ class NeonI<dag oops, dag iops, AddrMode am, IndexMode im, Format f,
: InstARM<am, Size4Bytes, im, f, NeonDomain, cstr, itin> {
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p));
- let AsmString = !strconcat(
- !strconcat(!strconcat(opc, "${p}"), !strconcat(".", dt)),
- !strconcat("\t", asm));
+ let AsmString = !strconcat(opc, "${p}", ".", dt, "\t", asm);
let Pattern = pattern;
list<Predicate> Predicates = [HasNEON];
}
@@ -1516,7 +1500,7 @@ class NeonXI<dag oops, dag iops, AddrMode am, IndexMode im, Format f,
: InstARM<am, Size4Bytes, im, f, NeonDomain, cstr, itin> {
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p));
- let AsmString = !strconcat(!strconcat(opc, "${p}"), !strconcat("\t", asm));
+ let AsmString = !strconcat(opc, "${p}", "\t", asm);
let Pattern = pattern;
list<Predicate> Predicates = [HasNEON];
}
@@ -1531,6 +1515,25 @@ class NLdSt<bit op23, bits<2> op21_20, bits<4> op11_8, bits<4> op7_4,
let Inst{21-20} = op21_20;
let Inst{11-8} = op11_8;
let Inst{7-4} = op7_4;
+
+ let PostEncoderMethod = "NEONThumb2LoadStorePostEncoder";
+
+ bits<5> Vd;
+ bits<6> Rn;
+ bits<4> Rm;
+
+ let Inst{22} = Vd{4};
+ let Inst{15-12} = Vd{3-0};
+ let Inst{19-16} = Rn{3-0};
+ let Inst{3-0} = Rm{3-0};
+}
+
+class NLdStLn<bit op23, bits<2> op21_20, bits<4> op11_8, bits<4> op7_4,
+ dag oops, dag iops, InstrItinClass itin,
+ string opc, string dt, string asm, string cstr, list<dag> pattern>
+ : NLdSt<op23, op21_20, op11_8, op7_4, oops, iops, itin, opc,
+ dt, asm, cstr, pattern> {
+ bits<3> lane;
}
class PseudoNLdSt<dag oops, dag iops, InstrItinClass itin, string cstr>
@@ -1541,11 +1544,22 @@ class PseudoNLdSt<dag oops, dag iops, InstrItinClass itin, string cstr>
list<Predicate> Predicates = [HasNEON];
}
+class PseudoNeonI<dag oops, dag iops, InstrItinClass itin, string cstr,
+ list<dag> pattern>
+ : InstARM<AddrModeNone, Size4Bytes, IndexModeNone, Pseudo, NeonDomain, cstr,
+ itin> {
+ let OutOperandList = oops;
+ let InOperandList = !con(iops, (ins pred:$p));
+ let Pattern = pattern;
+ list<Predicate> Predicates = [HasNEON];
+}
+
class NDataI<dag oops, dag iops, Format f, InstrItinClass itin,
string opc, string dt, string asm, string cstr, list<dag> pattern>
: NeonI<oops, iops, AddrModeNone, IndexModeNone, f, itin, opc, dt, asm, cstr,
pattern> {
let Inst{31-25} = 0b1111001;
+ let PostEncoderMethod = "NEONThumb2DataIPostEncoder";
}
class NDataXI<dag oops, dag iops, Format f, InstrItinClass itin,
@@ -1553,6 +1567,7 @@ class NDataXI<dag oops, dag iops, Format f, InstrItinClass itin,
: NeonXI<oops, iops, AddrModeNone, IndexModeNone, f, itin, opc, asm,
cstr, pattern> {
let Inst{31-25} = 0b1111001;
+ let PostEncoderMethod = "NEONThumb2DataIPostEncoder";
}
// NEON "one register and a modified immediate" format.
@@ -1569,6 +1584,16 @@ class N1ModImm<bit op23, bits<3> op21_19, bits<4> op11_8, bit op7, bit op6,
let Inst{6} = op6;
let Inst{5} = op5;
let Inst{4} = op4;
+
+ // Instruction operands.
+ bits<5> Vd;
+ bits<13> SIMM;
+
+ let Inst{15-12} = Vd{3-0};
+ let Inst{22} = Vd{4};
+ let Inst{24} = SIMM{7};
+ let Inst{18-16} = SIMM{6-4};
+ let Inst{3-0} = SIMM{3-0};
}
// NEON 2 vector register format.
@@ -1584,6 +1609,15 @@ class N2V<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16,
let Inst{11-7} = op11_7;
let Inst{6} = op6;
let Inst{4} = op4;
+
+ // Instruction operands.
+ bits<5> Vd;
+ bits<5> Vm;
+
+ let Inst{15-12} = Vd{3-0};
+ let Inst{22} = Vd{4};
+ let Inst{3-0} = Vm{3-0};
+ let Inst{5} = Vm{4};
}
// Same as N2V except it doesn't have a datatype suffix.
@@ -1599,6 +1633,15 @@ class N2VX<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16,
let Inst{11-7} = op11_7;
let Inst{6} = op6;
let Inst{4} = op4;
+
+ // Instruction operands.
+ bits<5> Vd;
+ bits<5> Vm;
+
+ let Inst{15-12} = Vd{3-0};
+ let Inst{22} = Vd{4};
+ let Inst{3-0} = Vm{3-0};
+ let Inst{5} = Vm{4};
}
// NEON 2 vector register with immediate.
@@ -1612,6 +1655,17 @@ class N2VImm<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
let Inst{7} = op7;
let Inst{6} = op6;
let Inst{4} = op4;
+
+ // Instruction operands.
+ bits<5> Vd;
+ bits<5> Vm;
+ bits<6> SIMM;
+
+ let Inst{15-12} = Vd{3-0};
+ let Inst{22} = Vd{4};
+ let Inst{3-0} = Vm{3-0};
+ let Inst{5} = Vm{4};
+ let Inst{21-16} = SIMM{5-0};
}
// NEON 3 vector register format.
@@ -1625,6 +1679,18 @@ class N3V<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4,
let Inst{11-8} = op11_8;
let Inst{6} = op6;
let Inst{4} = op4;
+
+ // Instruction operands.
+ bits<5> Vd;
+ bits<5> Vn;
+ bits<5> Vm;
+
+ let Inst{15-12} = Vd{3-0};
+ let Inst{22} = Vd{4};
+ let Inst{19-16} = Vn{3-0};
+ let Inst{7} = Vn{4};
+ let Inst{3-0} = Vm{3-0};
+ let Inst{5} = Vm{4};
}
// Same as N3V except it doesn't have a data type suffix.
@@ -1639,13 +1705,25 @@ class N3VX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6,
let Inst{11-8} = op11_8;
let Inst{6} = op6;
let Inst{4} = op4;
+
+ // Instruction operands.
+ bits<5> Vd;
+ bits<5> Vn;
+ bits<5> Vm;
+
+ let Inst{15-12} = Vd{3-0};
+ let Inst{22} = Vd{4};
+ let Inst{19-16} = Vn{3-0};
+ let Inst{7} = Vn{4};
+ let Inst{3-0} = Vm{3-0};
+ let Inst{5} = Vm{4};
}
// NEON VMOVs between scalar and core registers.
class NVLaneOp<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
dag oops, dag iops, Format f, InstrItinClass itin,
string opc, string dt, string asm, list<dag> pattern>
- : InstARM<AddrModeNone, Size4Bytes, IndexModeNone, f, GenericDomain,
+ : InstARM<AddrModeNone, Size4Bytes, IndexModeNone, f, NeonDomain,
"", itin> {
let Inst{27-20} = opcod1;
let Inst{11-8} = opcod2;
@@ -1654,11 +1732,21 @@ class NVLaneOp<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
let OutOperandList = oops;
let InOperandList = !con(iops, (ins pred:$p));
- let AsmString = !strconcat(
- !strconcat(!strconcat(opc, "${p}"), !strconcat(".", dt)),
- !strconcat("\t", asm));
+ let AsmString = !strconcat(opc, "${p}", ".", dt, "\t", asm);
let Pattern = pattern;
list<Predicate> Predicates = [HasNEON];
+
+ let PostEncoderMethod = "NEONThumb2DupPostEncoder";
+
+ bits<5> V;
+ bits<4> R;
+ bits<4> p;
+ bits<4> lane;
+
+ let Inst{31-28} = p{3-0};
+ let Inst{7} = V{4};
+ let Inst{19-16} = V{3-0};
+ let Inst{15-12} = R{3-0};
}
class NVGetLane<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
dag oops, dag iops, InstrItinClass itin,
@@ -1687,6 +1775,15 @@ class NVDupLane<bits<4> op19_16, bit op6, dag oops, dag iops,
let Inst{11-7} = 0b11000;
let Inst{6} = op6;
let Inst{4} = 0;
+
+ bits<5> Vd;
+ bits<5> Vm;
+ bits<4> lane;
+
+ let Inst{22} = Vd{4};
+ let Inst{15-12} = Vd{3-0};
+ let Inst{5} = Vm{4};
+ let Inst{3-0} = Vm{3-0};
}
// NEONFPPat - Same as Pat<>, but requires that the compiler be using NEON
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
index ba228ff..6f48d96 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
@@ -33,13 +33,13 @@ unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const {
default: break;
case ARM::LDR_PRE:
case ARM::LDR_POST:
- return ARM::LDR;
+ return ARM::LDRi12;
case ARM::LDRH_PRE:
case ARM::LDRH_POST:
return ARM::LDRH;
case ARM::LDRB_PRE:
case ARM::LDRB_POST:
- return ARM::LDRB;
+ return ARM::LDRBi12;
case ARM::LDRSH_PRE:
case ARM::LDRSH_POST:
return ARM::LDRSH;
@@ -48,39 +48,14 @@ unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const {
return ARM::LDRSB;
case ARM::STR_PRE:
case ARM::STR_POST:
- return ARM::STR;
+ return ARM::STRi12;
case ARM::STRH_PRE:
case ARM::STRH_POST:
return ARM::STRH;
case ARM::STRB_PRE:
case ARM::STRB_POST:
- return ARM::STRB;
+ return ARM::STRBi12;
}
return 0;
}
-
-void ARMInstrInfo::
-reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SubIdx, const MachineInstr *Orig,
- const TargetRegisterInfo &TRI) const {
- DebugLoc dl = Orig->getDebugLoc();
- unsigned Opcode = Orig->getOpcode();
- switch (Opcode) {
- default:
- break;
- case ARM::MOVi2pieces: {
- RI.emitLoadConstPool(MBB, I, dl,
- DestReg, SubIdx,
- Orig->getOperand(1).getImm(),
- (ARMCC::CondCodes)Orig->getOperand(2).getImm(),
- Orig->getOperand(3).getReg());
- MachineInstr *NewMI = prior(I);
- NewMI->getOperand(0).setSubReg(SubIdx);
- return;
- }
- }
-
- return ARMBaseInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig, TRI);
-}
-
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
index 4563ffe..f2c7bdc 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
@@ -32,11 +32,6 @@ public:
// if there is not such an opcode.
unsigned getUnindexedOpcode(unsigned Opc) const;
- void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
- const TargetRegisterInfo &TRI) const;
-
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
index e66f9b9..c827ce3d 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -58,10 +58,9 @@ def SDT_ARMEH_SJLJ_Setjmp : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisPtrTy<1>,
SDTCisInt<2>]>;
def SDT_ARMEH_SJLJ_Longjmp: SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisInt<1>]>;
-def SDT_ARMMEMBARRIER : SDTypeProfile<0, 0, []>;
-def SDT_ARMSYNCBARRIER : SDTypeProfile<0, 0, []>;
-def SDT_ARMMEMBARRIERMCR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDT_ARMSYNCBARRIERMCR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_ARMEH_SJLJ_DispatchSetup: SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
+
+def SDT_ARMMEMBARRIER : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_ARMTCRET : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
@@ -70,33 +69,35 @@ def SDT_ARMBFI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
+def ARMWrapperDYN : SDNode<"ARMISD::WrapperDYN", SDTIntUnaryOp>;
+def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>;
def ARMWrapperJT : SDNode<"ARMISD::WrapperJT", SDTIntBinOp>;
def ARMcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_ARMCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def ARMcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_ARMCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def ARMcall : SDNode<"ARMISD::CALL", SDT_ARMcall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def ARMcall_pred : SDNode<"ARMISD::CALL_PRED", SDT_ARMcall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def ARMcall_nolink : SDNode<"ARMISD::CALL_NOLINK", SDT_ARMcall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def ARMretflag : SDNode<"ARMISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
def ARMcmov : SDNode<"ARMISD::CMOV", SDT_ARMCMov,
- [SDNPInFlag]>;
+ [SDNPInGlue]>;
def ARMcneg : SDNode<"ARMISD::CNEG", SDT_ARMCMov,
- [SDNPInFlag]>;
+ [SDNPInGlue]>;
def ARMbrcond : SDNode<"ARMISD::BRCOND", SDT_ARMBrcond,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
def ARMbrjt : SDNode<"ARMISD::BR_JT", SDT_ARMBrJT,
[SDNPHasChain]>;
@@ -106,40 +107,38 @@ def ARMbr2jt : SDNode<"ARMISD::BR2_JT", SDT_ARMBr2JT,
def ARMBcci64 : SDNode<"ARMISD::BCC_i64", SDT_ARMBCC_i64,
[SDNPHasChain]>;
-def ARMand : SDNode<"ARMISD::AND", SDT_ARMAnd,
- [SDNPOutFlag]>;
-
def ARMcmp : SDNode<"ARMISD::CMP", SDT_ARMCmp,
- [SDNPOutFlag]>;
+ [SDNPOutGlue]>;
def ARMcmpZ : SDNode<"ARMISD::CMPZ", SDT_ARMCmp,
- [SDNPOutFlag, SDNPCommutative]>;
+ [SDNPOutGlue, SDNPCommutative]>;
def ARMpic_add : SDNode<"ARMISD::PIC_ADD", SDT_ARMPICAdd>;
-def ARMsrl_flag : SDNode<"ARMISD::SRL_FLAG", SDTIntUnaryOp, [SDNPOutFlag]>;
-def ARMsra_flag : SDNode<"ARMISD::SRA_FLAG", SDTIntUnaryOp, [SDNPOutFlag]>;
-def ARMrrx : SDNode<"ARMISD::RRX" , SDTIntUnaryOp, [SDNPInFlag ]>;
+def ARMsrl_flag : SDNode<"ARMISD::SRL_FLAG", SDTIntUnaryOp, [SDNPOutGlue]>;
+def ARMsra_flag : SDNode<"ARMISD::SRA_FLAG", SDTIntUnaryOp, [SDNPOutGlue]>;
+def ARMrrx : SDNode<"ARMISD::RRX" , SDTIntUnaryOp, [SDNPInGlue ]>;
def ARMthread_pointer: SDNode<"ARMISD::THREAD_POINTER", SDT_ARMThreadPointer>;
def ARMeh_sjlj_setjmp: SDNode<"ARMISD::EH_SJLJ_SETJMP",
SDT_ARMEH_SJLJ_Setjmp, [SDNPHasChain]>;
def ARMeh_sjlj_longjmp: SDNode<"ARMISD::EH_SJLJ_LONGJMP",
- SDT_ARMEH_SJLJ_Longjmp, [SDNPHasChain]>;
+ SDT_ARMEH_SJLJ_Longjmp, [SDNPHasChain]>;
+def ARMeh_sjlj_dispatchsetup: SDNode<"ARMISD::EH_SJLJ_DISPATCHSETUP",
+ SDT_ARMEH_SJLJ_DispatchSetup, [SDNPHasChain]>;
+
def ARMMemBarrier : SDNode<"ARMISD::MEMBARRIER", SDT_ARMMEMBARRIER,
[SDNPHasChain]>;
-def ARMSyncBarrier : SDNode<"ARMISD::SYNCBARRIER", SDT_ARMMEMBARRIER,
- [SDNPHasChain]>;
-def ARMMemBarrierMCR : SDNode<"ARMISD::MEMBARRIER", SDT_ARMMEMBARRIERMCR,
- [SDNPHasChain]>;
-def ARMSyncBarrierMCR : SDNode<"ARMISD::SYNCBARRIER", SDT_ARMMEMBARRIERMCR,
+def ARMMemBarrierMCR : SDNode<"ARMISD::MEMBARRIER_MCR", SDT_ARMMEMBARRIER,
[SDNPHasChain]>;
+def ARMPreload : SDNode<"ARMISD::PRELOAD", SDTPrefetch,
+ [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>;
def ARMrbit : SDNode<"ARMISD::RBIT", SDTIntUnaryOp>;
-def ARMtcret : SDNode<"ARMISD::TC_RETURN", SDT_ARMTCRET,
- [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
+def ARMtcret : SDNode<"ARMISD::TC_RETURN", SDT_ARMTCRET,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def ARMbfi : SDNode<"ARMISD::BFI", SDT_ARMBFI>;
@@ -147,34 +146,40 @@ def ARMbfi : SDNode<"ARMISD::BFI", SDT_ARMBFI>;
//===----------------------------------------------------------------------===//
// ARM Instruction Predicate Definitions.
//
-def HasV4T : Predicate<"Subtarget->hasV4TOps()">;
+def HasV4T : Predicate<"Subtarget->hasV4TOps()">, AssemblerPredicate;
def NoV4T : Predicate<"!Subtarget->hasV4TOps()">;
def HasV5T : Predicate<"Subtarget->hasV5TOps()">;
-def HasV5TE : Predicate<"Subtarget->hasV5TEOps()">;
-def HasV6 : Predicate<"Subtarget->hasV6Ops()">;
-def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">;
+def HasV5TE : Predicate<"Subtarget->hasV5TEOps()">, AssemblerPredicate;
+def HasV6 : Predicate<"Subtarget->hasV6Ops()">, AssemblerPredicate;
+def NoV6 : Predicate<"!Subtarget->hasV6Ops()">;
+def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">, AssemblerPredicate;
def NoV6T2 : Predicate<"!Subtarget->hasV6T2Ops()">;
-def HasV7 : Predicate<"Subtarget->hasV7Ops()">;
+def HasV7 : Predicate<"Subtarget->hasV7Ops()">, AssemblerPredicate;
def NoVFP : Predicate<"!Subtarget->hasVFP2()">;
-def HasVFP2 : Predicate<"Subtarget->hasVFP2()">;
-def HasVFP3 : Predicate<"Subtarget->hasVFP3()">;
-def HasNEON : Predicate<"Subtarget->hasNEON()">;
-def HasDivide : Predicate<"Subtarget->hasDivide()">;
-def HasT2ExtractPack : Predicate<"Subtarget->hasT2ExtractPack()">;
-def HasDB : Predicate<"Subtarget->hasDataBarrier()">;
+def HasVFP2 : Predicate<"Subtarget->hasVFP2()">, AssemblerPredicate;
+def HasVFP3 : Predicate<"Subtarget->hasVFP3()">, AssemblerPredicate;
+def HasNEON : Predicate<"Subtarget->hasNEON()">, AssemblerPredicate;
+def HasFP16 : Predicate<"Subtarget->hasFP16()">, AssemblerPredicate;
+def HasDivide : Predicate<"Subtarget->hasDivide()">, AssemblerPredicate;
+def HasT2ExtractPack : Predicate<"Subtarget->hasT2ExtractPack()">,
+ AssemblerPredicate;
+def HasDB : Predicate<"Subtarget->hasDataBarrier()">,
+ AssemblerPredicate;
+def HasMP : Predicate<"Subtarget->hasMPExtension()">,
+ AssemblerPredicate;
def UseNEONForFP : Predicate<"Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseNEONForFP : Predicate<"!Subtarget->useNEONForSinglePrecisionFP()">;
-def IsThumb : Predicate<"Subtarget->isThumb()">;
+def IsThumb : Predicate<"Subtarget->isThumb()">, AssemblerPredicate;
def IsThumb1Only : Predicate<"Subtarget->isThumb1Only()">;
-def IsThumb2 : Predicate<"Subtarget->isThumb2()">;
-def IsARM : Predicate<"!Subtarget->isThumb()">;
+def IsThumb2 : Predicate<"Subtarget->isThumb2()">, AssemblerPredicate;
+def IsARM : Predicate<"!Subtarget->isThumb()">, AssemblerPredicate;
def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
def IsNotDarwin : Predicate<"!Subtarget->isTargetDarwin()">;
// FIXME: Eventually this will be just "hasV6T2Ops".
def UseMovt : Predicate<"Subtarget->useMovt()">;
def DontUseMovt : Predicate<"!Subtarget->useMovt()">;
-def UseVMLx : Predicate<"Subtarget->useVMLx()">;
+def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">;
//===----------------------------------------------------------------------===//
// ARM Flag Definitions.
@@ -199,12 +204,6 @@ def so_imm_not_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
}]>;
-// rot_imm predicate - True if the 32-bit immediate is equal to 8, 16, or 24.
-def rot_imm : PatLeaf<(i32 imm), [{
- int32_t v = (int32_t)N->getZExtValue();
- return v == 8 || v == 16 || v == 24;
-}]>;
-
/// imm1_15 predicate - True if the 32-bit immediate is in the range [1,15].
def imm1_15 : PatLeaf<(i32 imm), [{
return (int32_t)N->getZExtValue() >= 1 && (int32_t)N->getZExtValue() < 16;
@@ -217,12 +216,12 @@ def imm16_31 : PatLeaf<(i32 imm), [{
def so_imm_neg :
PatLeaf<(imm), [{
- return ARM_AM::getSOImmVal(-(int)N->getZExtValue()) != -1;
+ return ARM_AM::getSOImmVal(-(uint32_t)N->getZExtValue()) != -1;
}], so_imm_neg_XFORM>;
def so_imm_not :
PatLeaf<(imm), [{
- return ARM_AM::getSOImmVal(~(int)N->getZExtValue()) != -1;
+ return ARM_AM::getSOImmVal(~(uint32_t)N->getZExtValue()) != -1;
}], so_imm_not_XFORM>;
// sext_16_node predicate - True if the SDNode is sign-extended 16 or more bits.
@@ -230,15 +229,6 @@ def sext_16_node : PatLeaf<(i32 GPR:$a), [{
return CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17;
}]>;
-/// bf_inv_mask_imm predicate - An AND mask to clear an arbitrary width bitfield
-/// e.g., 0xf000ffff
-def bf_inv_mask_imm : Operand<i32>,
- PatLeaf<(imm), [{
- return ARM::isBitFieldInvertedMask(N->getZExtValue());
-}] > {
- let PrintMethod = "printBitfieldInvMaskImmOperand";
-}
-
/// Split a 32-bit immediate into two 16 bit parts.
def hi16 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, MVT::i32);
@@ -273,28 +263,103 @@ def sube_live_carry :
PatFrag<(ops node:$LHS, node:$RHS), (sube node:$LHS, node:$RHS),
[{return N->hasAnyUseOfValue(1);}]>;
+// An 'and' node with a single use.
+def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
+ return N->hasOneUse();
+}]>;
+
+// An 'xor' node with a single use.
+def xor_su : PatFrag<(ops node:$lhs, node:$rhs), (xor node:$lhs, node:$rhs), [{
+ return N->hasOneUse();
+}]>;
+
+// An 'fmul' node with a single use.
+def fmul_su : PatFrag<(ops node:$lhs, node:$rhs), (fmul node:$lhs, node:$rhs),[{
+ return N->hasOneUse();
+}]>;
+
+// An 'fadd' node which checks for single non-hazardous use.
+def fadd_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fadd node:$lhs, node:$rhs),[{
+ return hasNoVMLxHazardUse(N);
+}]>;
+
+// An 'fsub' node which checks for single non-hazardous use.
+def fsub_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fsub node:$lhs, node:$rhs),[{
+ return hasNoVMLxHazardUse(N);
+}]>;
+
//===----------------------------------------------------------------------===//
// Operand Definitions.
//
// Branch target.
-def brtarget : Operand<OtherVT>;
+// FIXME: rename brtarget to t2_brtarget
+def brtarget : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValue";
+}
+
+// FIXME: get rid of this one?
+def uncondbrtarget : Operand<OtherVT> {
+ let EncoderMethod = "getUnconditionalBranchTargetOpValue";
+}
+
+// Branch target for ARM. Handles conditional/unconditional
+def br_target : Operand<OtherVT> {
+ let EncoderMethod = "getARMBranchTargetOpValue";
+}
+
+// Call target.
+// FIXME: rename bltarget to t2_bl_target?
+def bltarget : Operand<i32> {
+ // Encoded the same as branch targets.
+ let EncoderMethod = "getBranchTargetOpValue";
+}
+
+// Call target for ARM. Handles conditional/unconditional
+// FIXME: rename bl_target to t2_bltarget?
+def bl_target : Operand<i32> {
+ // Encoded the same as branch targets.
+ let EncoderMethod = "getARMBranchTargetOpValue";
+}
+
// A list of registers separated by comma. Used by load/store multiple.
+def RegListAsmOperand : AsmOperandClass {
+ let Name = "RegList";
+ let SuperClasses = [];
+}
+
+def DPRRegListAsmOperand : AsmOperandClass {
+ let Name = "DPRRegList";
+ let SuperClasses = [];
+}
+
+def SPRRegListAsmOperand : AsmOperandClass {
+ let Name = "SPRRegList";
+ let SuperClasses = [];
+}
+
def reglist : Operand<i32> {
+ let EncoderMethod = "getRegisterListOpValue";
+ let ParserMatchClass = RegListAsmOperand;
let PrintMethod = "printRegisterList";
}
-// An operand for the CONSTPOOL_ENTRY pseudo-instruction.
-def cpinst_operand : Operand<i32> {
- let PrintMethod = "printCPInstOperand";
+def dpr_reglist : Operand<i32> {
+ let EncoderMethod = "getRegisterListOpValue";
+ let ParserMatchClass = DPRRegListAsmOperand;
+ let PrintMethod = "printRegisterList";
}
-def jtblock_operand : Operand<i32> {
- let PrintMethod = "printJTBlockOperand";
+def spr_reglist : Operand<i32> {
+ let EncoderMethod = "getRegisterListOpValue";
+ let ParserMatchClass = SPRRegListAsmOperand;
+ let PrintMethod = "printRegisterList";
}
-def jt2block_operand : Operand<i32> {
- let PrintMethod = "printJT2BlockOperand";
+
+// An operand for the CONSTPOOL_ENTRY pseudo-instruction.
+def cpinst_operand : Operand<i32> {
+ let PrintMethod = "printCPInstOperand";
}
// Local PC labels.
@@ -302,6 +367,22 @@ def pclabel : Operand<i32> {
let PrintMethod = "printPCLabel";
}
+// ADR instruction labels.
+def adrlabel : Operand<i32> {
+ let EncoderMethod = "getAdrLabelOpValue";
+}
+
+def neon_vcvt_imm32 : Operand<i32> {
+ let EncoderMethod = "getNEONVcvtImm32OpValue";
+}
+
+// rot_imm: An integer that encodes a rotate amount. Must be 8, 16, or 24.
+def rot_imm : Operand<i32>, PatLeaf<(i32 imm), [{
+ int32_t v = (int32_t)N->getZExtValue();
+ return v == 8 || v == 16 || v == 24; }]> {
+ let EncoderMethod = "getRotImmOpValue";
+}
+
// shift_imm: An integer that encodes a shift amount and the type of shift
// (currently either asr or lsl) using the same encoding used for the
// immediates in so_reg operands.
@@ -313,73 +394,120 @@ def shift_imm : Operand<i32> {
def so_reg : Operand<i32>, // reg reg imm
ComplexPattern<i32, 3, "SelectShifterOperandReg",
[shl,srl,sra,rotr]> {
+ let EncoderMethod = "getSORegOpValue";
+ let PrintMethod = "printSORegOperand";
+ let MIOperandInfo = (ops GPR, GPR, i32imm);
+}
+def shift_so_reg : Operand<i32>, // reg reg imm
+ ComplexPattern<i32, 3, "SelectShiftShifterOperandReg",
+ [shl,srl,sra,rotr]> {
+ let EncoderMethod = "getSORegOpValue";
let PrintMethod = "printSORegOperand";
let MIOperandInfo = (ops GPR, GPR, i32imm);
}
// so_imm - Match a 32-bit shifter_operand immediate operand, which is an
-// 8-bit immediate rotated by an arbitrary number of bits. so_imm values are
-// represented in the imm field in the same 12-bit form that they are encoded
-// into so_imm instructions: the 8-bit immediate is the least significant bits
-// [bits 0-7], the 4-bit shift amount is the next 4 bits [bits 8-11].
+// 8-bit immediate rotated by an arbitrary number of bits.
def so_imm : Operand<i32>, PatLeaf<(imm), [{ return Pred_so_imm(N); }]> {
+ let EncoderMethod = "getSOImmOpValue";
let PrintMethod = "printSOImmOperand";
}
// Break so_imm's up into two pieces. This handles immediates with up to 16
// bits set in them. This uses so_imm2part to match and so_imm2part_[12] to
// get the first/second pieces.
-def so_imm2part : Operand<i32>,
- PatLeaf<(imm), [{
+def so_imm2part : PatLeaf<(imm), [{
return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
- }]> {
- let PrintMethod = "printSOImm2PartOperand";
-}
+}]>;
-def so_imm2part_1 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getSOImmTwoPartFirst((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
+/// arm_i32imm - True for +V6T2, or true only if so_imm2part is true.
+///
+def arm_i32imm : PatLeaf<(imm), [{
+ if (Subtarget->hasV6T2Ops())
+ return true;
+ return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
}]>;
-def so_imm2part_2 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getSOImmTwoPartSecond((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
+/// imm0_31 predicate - True if the 32-bit immediate is in the range [0,31].
+def imm0_31 : Operand<i32>, PatLeaf<(imm), [{
+ return (int32_t)N->getZExtValue() < 32;
}]>;
-def so_neg_imm2part : Operand<i32>, PatLeaf<(imm), [{
- return ARM_AM::isSOImmTwoPartVal(-(int)N->getZExtValue());
- }]> {
- let PrintMethod = "printSOImm2PartOperand";
+/// imm0_31_m1 - Matches and prints like imm0_31, but encodes as 'value - 1'.
+def imm0_31_m1 : Operand<i32>, PatLeaf<(imm), [{
+ return (int32_t)N->getZExtValue() < 32;
+}]> {
+ let EncoderMethod = "getImmMinusOneOpValue";
}
-def so_neg_imm2part_1 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getSOImmTwoPartFirst(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
+// i32imm_hilo16 - For movt/movw - sets the MC Encoder method.
+// The imm is split into imm{15-12}, imm{11-0}
+//
+def i32imm_hilo16 : Operand<i32> {
+ let EncoderMethod = "getHiLo16ImmOpValue";
+}
-def so_neg_imm2part_2 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getSOImmTwoPartSecond(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
+/// bf_inv_mask_imm predicate - An AND mask to clear an arbitrary width bitfield
+/// e.g., 0xf000ffff
+def bf_inv_mask_imm : Operand<i32>,
+ PatLeaf<(imm), [{
+ return ARM::isBitFieldInvertedMask(N->getZExtValue());
+}] > {
+ let EncoderMethod = "getBitfieldInvertedMaskOpValue";
+ let PrintMethod = "printBitfieldInvMaskImmOperand";
+}
-/// imm0_31 predicate - True if the 32-bit immediate is in the range [0,31].
-def imm0_31 : Operand<i32>, PatLeaf<(imm), [{
- return (int32_t)N->getZExtValue() < 32;
+/// lsb_pos_imm - position of the lsb bit, used by BFI4p and t2BFI4p
+def lsb_pos_imm : Operand<i32>, PatLeaf<(imm), [{
+ return isInt<5>(N->getSExtValue());
}]>;
+/// width_imm - number of bits to be copied, used by BFI4p and t2BFI4p
+def width_imm : Operand<i32>, PatLeaf<(imm), [{
+ return N->getSExtValue() > 0 && N->getSExtValue() <= 32;
+}] > {
+ let EncoderMethod = "getMsbOpValue";
+}
+
// Define ARM specific addressing modes.
-// addrmode2 := reg +/- reg shop imm
+
+// addrmode_imm12 := reg +/- imm12
+//
+def addrmode_imm12 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectAddrModeImm12", []> {
+ // 12-bit immediate operand. Note that instructions using this encode
+ // #0 and #-0 differently. We flag #-0 as the magic value INT32_MIN. All other
+ // immediate values are as normal.
+
+ let EncoderMethod = "getAddrModeImm12OpValue";
+ let PrintMethod = "printAddrModeImm12Operand";
+ let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
+}
+// ldst_so_reg := reg +/- reg shop imm
+//
+def ldst_so_reg : Operand<i32>,
+ ComplexPattern<i32, 3, "SelectLdStSOReg", []> {
+ let EncoderMethod = "getLdStSORegOpValue";
+ // FIXME: Simplify the printer
+ let PrintMethod = "printAddrMode2Operand";
+ let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
+}
+
// addrmode2 := reg +/- imm12
+// := reg +/- reg shop imm
//
def addrmode2 : Operand<i32>,
ComplexPattern<i32, 3, "SelectAddrMode2", []> {
+ let EncoderMethod = "getAddrMode2OpValue";
let PrintMethod = "printAddrMode2Operand";
let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
}
def am2offset : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrMode2Offset", []> {
+ ComplexPattern<i32, 2, "SelectAddrMode2Offset",
+ [], [SDNPWantRoot]> {
+ let EncoderMethod = "getAddrMode2OffsetOpValue";
let PrintMethod = "printAddrMode2OffsetOperand";
let MIOperandInfo = (ops GPR, i32imm);
}
@@ -389,22 +517,29 @@ def am2offset : Operand<i32>,
//
def addrmode3 : Operand<i32>,
ComplexPattern<i32, 3, "SelectAddrMode3", []> {
+ let EncoderMethod = "getAddrMode3OpValue";
let PrintMethod = "printAddrMode3Operand";
let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
}
def am3offset : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrMode3Offset", []> {
+ ComplexPattern<i32, 2, "SelectAddrMode3Offset",
+ [], [SDNPWantRoot]> {
+ let EncoderMethod = "getAddrMode3OffsetOpValue";
let PrintMethod = "printAddrMode3OffsetOperand";
let MIOperandInfo = (ops GPR, i32imm);
}
-// addrmode4 := reg, <mode|W>
+// ldstm_mode := {ia, ib, da, db}
//
-def addrmode4 : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrMode4", []> {
- let PrintMethod = "printAddrMode4Operand";
- let MIOperandInfo = (ops GPR:$addr, i32imm);
+def ldstm_mode : OptionalDefOperand<OtherVT, (ops i32), (ops (i32 1))> {
+ let EncoderMethod = "getLdStmModeOpValue";
+ let PrintMethod = "printLdStmModeOperand";
+}
+
+def MemMode5AsmOperand : AsmOperandClass {
+ let Name = "MemMode5";
+ let SuperClasses = [];
}
// addrmode5 := reg +/- imm8*4
@@ -413,19 +548,32 @@ def addrmode5 : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode5", []> {
let PrintMethod = "printAddrMode5Operand";
let MIOperandInfo = (ops GPR:$base, i32imm);
+ let ParserMatchClass = MemMode5AsmOperand;
+ let EncoderMethod = "getAddrMode5OpValue";
}
-// addrmode6 := reg with optional writeback
+// addrmode6 := reg with optional alignment
//
def addrmode6 : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrMode6", []> {
+ ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm);
+ let EncoderMethod = "getAddrMode6AddressOpValue";
}
def am6offset : Operand<i32> {
let PrintMethod = "printAddrMode6OffsetOperand";
let MIOperandInfo = (ops GPR);
+ let EncoderMethod = "getAddrMode6OffsetOpValue";
+}
+
+// Special version of addrmode6 to handle alignment encoding for VLD-dup
+// instructions, specifically VLD4-dup.
+def addrmode6dup : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
+ let PrintMethod = "printAddrMode6Operand";
+ let MIOperandInfo = (ops GPR:$addr, i32imm);
+ let EncoderMethod = "getAddrMode6DupAddressOpValue";
}
// addrmodepc := pc + reg
@@ -440,6 +588,28 @@ def nohash_imm : Operand<i32> {
let PrintMethod = "printNoHashImmediate";
}
+def CoprocNumAsmOperand : AsmOperandClass {
+ let Name = "CoprocNum";
+ let SuperClasses = [];
+ let ParserMethod = "tryParseCoprocNumOperand";
+}
+
+def CoprocRegAsmOperand : AsmOperandClass {
+ let Name = "CoprocReg";
+ let SuperClasses = [];
+ let ParserMethod = "tryParseCoprocRegOperand";
+}
+
+def p_imm : Operand<i32> {
+ let PrintMethod = "printPImmediate";
+ let ParserMatchClass = CoprocNumAsmOperand;
+}
+
+def c_imm : Operand<i32> {
+ let PrintMethod = "printCImmediate";
+ let ParserMatchClass = CoprocRegAsmOperand;
+}
+
//===----------------------------------------------------------------------===//
include "ARMInstrFormats.td"
@@ -450,55 +620,93 @@ include "ARMInstrFormats.td"
/// AsI1_bin_irs - Defines a set of (op r, {so_imm|r|so_reg}) patterns for a
/// binop that produces a value.
-multiclass AsI1_bin_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
+multiclass AsI1_bin_irs<bits<4> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
+ PatFrag opnode, bit Commutable = 0> {
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
- def ri : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_imm:$b), DPFrm,
- IIC_iALUi, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]> {
+ def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
+ iii, opc, "\t$Rd, $Rn, $imm",
+ [(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
let Inst{25} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = imm;
}
}
- def rr : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm,
- IIC_iALUr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]> {
- let Inst{11-4} = 0b00000000;
+ def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
+ iir, opc, "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
let Inst{25} = 0;
let isCommutable = Commutable;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{11-4} = 0b00000000;
+ let Inst{3-0} = Rm;
}
- def rs : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPSoRegFrm,
- IIC_iALUsr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]> {
+ def rs : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg:$shift), DPSoRegFrm,
+ iis, opc, "\t$Rd, $Rn, $shift",
+ [(set GPR:$Rd, (opnode GPR:$Rn, so_reg:$shift))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> shift;
let Inst{25} = 0;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = shift;
}
}
/// AI1_bin_s_irs - Similar to AsI1_bin_irs except it sets the 's' bit so the
/// instruction modifies the CPSR register.
-let Defs = [CPSR] in {
-multiclass AI1_bin_s_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- def ri : AI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_imm:$b), DPFrm,
- IIC_iALUi, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]> {
- let Inst{20} = 1;
+let isCodeGenOnly = 1, Defs = [CPSR] in {
+multiclass AI1_bin_s_irs<bits<4> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
+ PatFrag opnode, bit Commutable = 0> {
+ def ri : AI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
+ iii, opc, "\t$Rd, $Rn, $imm",
+ [(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
let Inst{25} = 1;
+ let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = imm;
}
- def rr : AI1<opcod, (outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm,
- IIC_iALUr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]> {
+ def rr : AI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
+ iir, opc, "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
let isCommutable = Commutable;
- let Inst{11-4} = 0b00000000;
- let Inst{20} = 1;
let Inst{25} = 0;
- }
- def rs : AI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPSoRegFrm,
- IIC_iALUsr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]> {
let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{11-4} = 0b00000000;
+ let Inst{3-0} = Rm;
+ }
+ def rs : AI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg:$shift), DPSoRegFrm,
+ iis, opc, "\t$Rd, $Rn, $shift",
+ [(set GPR:$Rd, (opnode GPR:$Rn, so_reg:$shift))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> shift;
let Inst{25} = 0;
+ let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = shift;
}
}
}
@@ -507,146 +715,233 @@ multiclass AI1_bin_s_irs<bits<4> opcod, string opc, PatFrag opnode,
/// patterns. Similar to AsI1_bin_irs except the instruction does not produce
/// a explicit result, only implicitly set CPSR.
let isCompare = 1, Defs = [CPSR] in {
-multiclass AI1_cmp_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- def ri : AI1<opcod, (outs), (ins GPR:$a, so_imm:$b), DPFrm, IIC_iCMPi,
- opc, "\t$a, $b",
- [(opnode GPR:$a, so_imm:$b)]> {
- let Inst{20} = 1;
+multiclass AI1_cmp_irs<bits<4> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
+ PatFrag opnode, bit Commutable = 0> {
+ def ri : AI1<opcod, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, iii,
+ opc, "\t$Rn, $imm",
+ [(opnode GPR:$Rn, so_imm:$imm)]> {
+ bits<4> Rn;
+ bits<12> imm;
let Inst{25} = 1;
- }
- def rr : AI1<opcod, (outs), (ins GPR:$a, GPR:$b), DPFrm, IIC_iCMPr,
- opc, "\t$a, $b",
- [(opnode GPR:$a, GPR:$b)]> {
- let Inst{11-4} = 0b00000000;
let Inst{20} = 1;
- let Inst{25} = 0;
- let isCommutable = Commutable;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = 0b0000;
+ let Inst{11-0} = imm;
}
- def rs : AI1<opcod, (outs), (ins GPR:$a, so_reg:$b), DPSoRegFrm, IIC_iCMPsr,
- opc, "\t$a, $b",
- [(opnode GPR:$a, so_reg:$b)]> {
+ def rr : AI1<opcod, (outs), (ins GPR:$Rn, GPR:$Rm), DPFrm, iir,
+ opc, "\t$Rn, $Rm",
+ [(opnode GPR:$Rn, GPR:$Rm)]> {
+ bits<4> Rn;
+ bits<4> Rm;
+ let isCommutable = Commutable;
+ let Inst{25} = 0;
let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = 0b0000;
+ let Inst{11-4} = 0b00000000;
+ let Inst{3-0} = Rm;
+ }
+ def rs : AI1<opcod, (outs), (ins GPR:$Rn, so_reg:$shift), DPSoRegFrm, iis,
+ opc, "\t$Rn, $shift",
+ [(opnode GPR:$Rn, so_reg:$shift)]> {
+ bits<4> Rn;
+ bits<12> shift;
let Inst{25} = 0;
+ let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = 0b0000;
+ let Inst{11-0} = shift;
}
}
}
-/// AI_unary_rrot - A unary operation with two forms: one whose operand is a
+/// AI_ext_rrot - A unary operation with two forms: one whose operand is a
/// register and one whose operand is a register rotated by 8/16/24.
/// FIXME: Remove the 'r' variant. Its rot_imm is zero.
-multiclass AI_unary_rrot<bits<8> opcod, string opc, PatFrag opnode> {
- def r : AExtI<opcod, (outs GPR:$dst), (ins GPR:$src),
- IIC_iUNAr, opc, "\t$dst, $src",
- [(set GPR:$dst, (opnode GPR:$src))]>,
+multiclass AI_ext_rrot<bits<8> opcod, string opc, PatFrag opnode> {
+ def r : AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rm),
+ IIC_iEXTr, opc, "\t$Rd, $Rm",
+ [(set GPR:$Rd, (opnode GPR:$Rm))]>,
Requires<[IsARM, HasV6]> {
- let Inst{11-10} = 0b00;
+ bits<4> Rd;
+ bits<4> Rm;
let Inst{19-16} = 0b1111;
+ let Inst{15-12} = Rd;
+ let Inst{11-10} = 0b00;
+ let Inst{3-0} = Rm;
}
- def r_rot : AExtI<opcod, (outs GPR:$dst), (ins GPR:$src, i32imm:$rot),
- IIC_iUNAsi, opc, "\t$dst, $src, ror $rot",
- [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]>,
+ def r_rot : AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rm, rot_imm:$rot),
+ IIC_iEXTr, opc, "\t$Rd, $Rm, ror $rot",
+ [(set GPR:$Rd, (opnode (rotr GPR:$Rm, rot_imm:$rot)))]>,
Requires<[IsARM, HasV6]> {
+ bits<4> Rd;
+ bits<4> Rm;
+ bits<2> rot;
let Inst{19-16} = 0b1111;
+ let Inst{15-12} = Rd;
+ let Inst{11-10} = rot;
+ let Inst{3-0} = Rm;
}
}
-multiclass AI_unary_rrot_np<bits<8> opcod, string opc> {
- def r : AExtI<opcod, (outs GPR:$dst), (ins GPR:$src),
- IIC_iUNAr, opc, "\t$dst, $src",
+multiclass AI_ext_rrot_np<bits<8> opcod, string opc> {
+ def r : AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rm),
+ IIC_iEXTr, opc, "\t$Rd, $Rm",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6]> {
- let Inst{11-10} = 0b00;
let Inst{19-16} = 0b1111;
+ let Inst{11-10} = 0b00;
}
- def r_rot : AExtI<opcod, (outs GPR:$dst), (ins GPR:$src, i32imm:$rot),
- IIC_iUNAsi, opc, "\t$dst, $src, ror $rot",
+ def r_rot : AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rm, rot_imm:$rot),
+ IIC_iEXTr, opc, "\t$Rd, $Rm, ror $rot",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6]> {
+ bits<2> rot;
let Inst{19-16} = 0b1111;
+ let Inst{11-10} = rot;
}
}
-/// AI_bin_rrot - A binary operation with two forms: one whose operand is a
+/// AI_exta_rrot - A binary operation with two forms: one whose operand is a
/// register and one whose operand is a register rotated by 8/16/24.
-multiclass AI_bin_rrot<bits<8> opcod, string opc, PatFrag opnode> {
- def rr : AExtI<opcod, (outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS),
- IIC_iALUr, opc, "\t$dst, $LHS, $RHS",
- [(set GPR:$dst, (opnode GPR:$LHS, GPR:$RHS))]>,
+multiclass AI_exta_rrot<bits<8> opcod, string opc, PatFrag opnode> {
+ def rr : AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasV6]> {
+ bits<4> Rd;
+ bits<4> Rm;
+ bits<4> Rn;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
let Inst{11-10} = 0b00;
+ let Inst{9-4} = 0b000111;
+ let Inst{3-0} = Rm;
+ }
+ def rr_rot : AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm,
+ rot_imm:$rot),
+ IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm, ror $rot",
+ [(set GPR:$Rd, (opnode GPR:$Rn,
+ (rotr GPR:$Rm, rot_imm:$rot)))]>,
+ Requires<[IsARM, HasV6]> {
+ bits<4> Rd;
+ bits<4> Rm;
+ bits<4> Rn;
+ bits<2> rot;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{11-10} = rot;
+ let Inst{9-4} = 0b000111;
+ let Inst{3-0} = Rm;
}
- def rr_rot : AExtI<opcod, (outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS,
- i32imm:$rot),
- IIC_iALUsi, opc, "\t$dst, $LHS, $RHS, ror $rot",
- [(set GPR:$dst, (opnode GPR:$LHS,
- (rotr GPR:$RHS, rot_imm:$rot)))]>,
- Requires<[IsARM, HasV6]>;
}
// For disassembly only.
-multiclass AI_bin_rrot_np<bits<8> opcod, string opc> {
- def rr : AExtI<opcod, (outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS),
- IIC_iALUr, opc, "\t$dst, $LHS, $RHS",
+multiclass AI_exta_rrot_np<bits<8> opcod, string opc> {
+ def rr : AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6]> {
let Inst{11-10} = 0b00;
}
- def rr_rot : AExtI<opcod, (outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS,
- i32imm:$rot),
- IIC_iALUsi, opc, "\t$dst, $LHS, $RHS, ror $rot",
+ def rr_rot : AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm,
+ rot_imm:$rot),
+ IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm, ror $rot",
[/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]>;
+ Requires<[IsARM, HasV6]> {
+ bits<4> Rn;
+ bits<2> rot;
+ let Inst{19-16} = Rn;
+ let Inst{11-10} = rot;
+ }
}
/// AI1_adde_sube_irs - Define instructions and patterns for adde and sube.
let Uses = [CPSR] in {
multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
bit Commutable = 0> {
- def ri : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_imm:$b),
- DPFrm, IIC_iALUi, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]>,
+ def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
+ DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
+ [(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]>,
Requires<[IsARM]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
let Inst{25} = 1;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{11-0} = imm;
}
- def rr : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- DPFrm, IIC_iALUr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]>,
+ def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ DPFrm, IIC_iALUr, opc, "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM]> {
- let isCommutable = Commutable;
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
+ let isCommutable = Commutable;
+ let Inst{3-0} = Rm;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
}
- def rs : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_reg:$b),
- DPSoRegFrm, IIC_iALUsr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]>,
+ def rs : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg:$shift),
+ DPSoRegFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift",
+ [(set GPR:$Rd, (opnode GPR:$Rn, so_reg:$shift))]>,
Requires<[IsARM]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> shift;
let Inst{25} = 0;
+ let Inst{11-0} = shift;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
}
}
// Carry setting variants
-let Defs = [CPSR] in {
+let isCodeGenOnly = 1, Defs = [CPSR] in {
multiclass AI1_adde_sube_s_irs<bits<4> opcod, string opc, PatFrag opnode,
bit Commutable = 0> {
- def Sri : AXI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_imm:$b),
- DPFrm, IIC_iALUi, !strconcat(opc, "\t$dst, $a, $b"),
- [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]>,
+ def Sri : AXI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
+ DPFrm, IIC_iALUi, !strconcat(opc, "\t$Rd, $Rn, $imm"),
+ [(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]>,
Requires<[IsARM]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{11-0} = imm;
let Inst{20} = 1;
let Inst{25} = 1;
}
- def Srr : AXI1<opcod, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- DPFrm, IIC_iALUr, !strconcat(opc, "\t$dst, $a, $b"),
- [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]>,
+ def Srr : AXI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ DPFrm, IIC_iALUr, !strconcat(opc, "\t$Rd, $Rn, $Rm"),
+ [(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
let Inst{11-4} = 0b00000000;
+ let isCommutable = Commutable;
+ let Inst{3-0} = Rm;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
let Inst{20} = 1;
let Inst{25} = 0;
}
- def Srs : AXI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_reg:$b),
- DPSoRegFrm, IIC_iALUsr, !strconcat(opc, "\t$dst, $a, $b"),
- [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]>,
+ def Srs : AXI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg:$shift),
+ DPSoRegFrm, IIC_iALUsr, !strconcat(opc, "\t$Rd, $Rn, $shift"),
+ [(set GPR:$Rd, (opnode GPR:$Rn, so_reg:$shift))]>,
Requires<[IsARM]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> shift;
+ let Inst{11-0} = shift;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
let Inst{20} = 1;
let Inst{25} = 0;
}
@@ -654,6 +949,62 @@ multiclass AI1_adde_sube_s_irs<bits<4> opcod, string opc, PatFrag opnode,
}
}
+let canFoldAsLoad = 1, isReMaterializable = 1 in {
+multiclass AI_ldr1<bit isByte, string opc, InstrItinClass iii,
+ InstrItinClass iir, PatFrag opnode> {
+ // Note: We use the complex addrmode_imm12 rather than just an input
+ // GPR and a constrained immediate so that we can use this to match
+ // frame index references and avoid matching constant pool references.
+ def i12: AI2ldst<0b010, 1, isByte, (outs GPR:$Rt), (ins addrmode_imm12:$addr),
+ AddrMode_i12, LdFrm, iii, opc, "\t$Rt, $addr",
+ [(set GPR:$Rt, (opnode addrmode_imm12:$addr))]> {
+ bits<4> Rt;
+ bits<17> addr;
+ let Inst{23} = addr{12}; // U (add = ('U' == 1))
+ let Inst{19-16} = addr{16-13}; // Rn
+ let Inst{15-12} = Rt;
+ let Inst{11-0} = addr{11-0}; // imm12
+ }
+ def rs : AI2ldst<0b011, 1, isByte, (outs GPR:$Rt), (ins ldst_so_reg:$shift),
+ AddrModeNone, LdFrm, iir, opc, "\t$Rt, $shift",
+ [(set GPR:$Rt, (opnode ldst_so_reg:$shift))]> {
+ bits<4> Rt;
+ bits<17> shift;
+ let Inst{23} = shift{12}; // U (add = ('U' == 1))
+ let Inst{19-16} = shift{16-13}; // Rn
+ let Inst{15-12} = Rt;
+ let Inst{11-0} = shift{11-0};
+ }
+}
+}
+
+multiclass AI_str1<bit isByte, string opc, InstrItinClass iii,
+ InstrItinClass iir, PatFrag opnode> {
+ // Note: We use the complex addrmode_imm12 rather than just an input
+ // GPR and a constrained immediate so that we can use this to match
+ // frame index references and avoid matching constant pool references.
+ def i12 : AI2ldst<0b010, 0, isByte, (outs),
+ (ins GPR:$Rt, addrmode_imm12:$addr),
+ AddrMode_i12, StFrm, iii, opc, "\t$Rt, $addr",
+ [(opnode GPR:$Rt, addrmode_imm12:$addr)]> {
+ bits<4> Rt;
+ bits<17> addr;
+ let Inst{23} = addr{12}; // U (add = ('U' == 1))
+ let Inst{19-16} = addr{16-13}; // Rn
+ let Inst{15-12} = Rt;
+ let Inst{11-0} = addr{11-0}; // imm12
+ }
+ def rs : AI2ldst<0b011, 0, isByte, (outs), (ins GPR:$Rt, ldst_so_reg:$shift),
+ AddrModeNone, StFrm, iir, opc, "\t$Rt, $shift",
+ [(opnode GPR:$Rt, ldst_so_reg:$shift)]> {
+ bits<4> Rt;
+ bits<17> shift;
+ let Inst{23} = shift{12}; // U (add = ('U' == 1))
+ let Inst{19-16} = shift{16-13}; // Rn
+ let Inst{15-12} = Rt;
+ let Inst{11-0} = shift{11-0};
+ }
+}
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
@@ -669,8 +1020,7 @@ multiclass AI1_adde_sube_s_irs<bits<4> opcod, string opc, PatFrag opnode,
let neverHasSideEffects = 1, isNotDuplicable = 1 in
def CONSTPOOL_ENTRY :
PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
- i32imm:$size), NoItinerary,
- "${instid:label} ${cpidx:cpentry}", []>;
+ i32imm:$size), NoItinerary, []>;
// FIXME: Marking these as hasSideEffects is necessary to prevent machine DCE
// from removing one half of the matched pairs. That breaks PEI, which assumes
@@ -678,12 +1028,10 @@ PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
let Defs = [SP], Uses = [SP], hasSideEffects = 1 in {
def ADJCALLSTACKUP :
PseudoInst<(outs), (ins i32imm:$amt1, i32imm:$amt2, pred:$p), NoItinerary,
- "${:comment} ADJCALLSTACKUP $amt1",
[(ARMcallseq_end timm:$amt1, timm:$amt2)]>;
def ADJCALLSTACKDOWN :
PseudoInst<(outs), (ins i32imm:$amt, pred:$p), NoItinerary,
- "${:comment} ADJCALLSTACKDOWN $amt",
[(ARMcallseq_start timm:$amt)]>;
}
@@ -691,6 +1039,7 @@ def NOP : AI<(outs), (ins), MiscFrm, NoItinerary, "nop", "",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
+ let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000000;
}
@@ -698,6 +1047,7 @@ def YIELD : AI<(outs), (ins), MiscFrm, NoItinerary, "yield", "",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
+ let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000001;
}
@@ -705,6 +1055,7 @@ def WFE : AI<(outs), (ins), MiscFrm, NoItinerary, "wfe", "",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
+ let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000010;
}
@@ -712,6 +1063,7 @@ def WFI : AI<(outs), (ins), MiscFrm, NoItinerary, "wfi", "",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
+ let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000011;
}
@@ -719,14 +1071,22 @@ def SEL : AI<(outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm, NoItinerary, "sel",
"\t$dst, $a, $b",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+ let Inst{3-0} = Rm;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
let Inst{27-20} = 0b01101000;
let Inst{7-4} = 0b1011;
+ let Inst{11-8} = 0b1111;
}
def SEV : AI<(outs), (ins), MiscFrm, NoItinerary, "sev", "",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
+ let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000100;
}
@@ -735,154 +1095,174 @@ def SEV : AI<(outs), (ins), MiscFrm, NoItinerary, "sev", "",
def BKPT : AI<(outs), (ins i32imm:$val), MiscFrm, NoItinerary, "bkpt", "\t$val",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM]> {
+ bits<16> val;
+ let Inst{3-0} = val{3-0};
+ let Inst{19-8} = val{15-4};
let Inst{27-20} = 0b00010010;
let Inst{7-4} = 0b0111;
}
-// Change Processor State is a system instruction -- for disassembly only.
-// The singleton $opt operand contains the following information:
-// opt{4-0} = mode from Inst{4-0}
-// opt{5} = changemode from Inst{17}
-// opt{8-6} = AIF from Inst{8-6}
-// opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
-def CPS : AXI<(outs), (ins cps_opt:$opt), MiscFrm, NoItinerary, "cps$opt",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM]> {
+// Change Processor State is a system instruction -- for disassembly and
+// parsing only.
+// FIXME: Since the asm parser has currently no clean way to handle optional
+// operands, create 3 versions of the same instruction. Once there's a clean
+// framework to represent optional operands, change this behavior.
+class CPS<dag iops, string asm_ops>
+ : AXI<(outs), iops, MiscFrm, NoItinerary, !strconcat("cps", asm_ops),
+ [/* For disassembly only; pattern left blank */]>, Requires<[IsARM]> {
+ bits<2> imod;
+ bits<3> iflags;
+ bits<5> mode;
+ bit M;
+
let Inst{31-28} = 0b1111;
let Inst{27-20} = 0b00010000;
- let Inst{16} = 0;
- let Inst{5} = 0;
+ let Inst{19-18} = imod;
+ let Inst{17} = M; // Enabled if mode is set;
+ let Inst{16} = 0;
+ let Inst{8-6} = iflags;
+ let Inst{5} = 0;
+ let Inst{4-0} = mode;
}
+let M = 1 in
+ def CPS3p : CPS<(ins imod_op:$imod, iflags_op:$iflags, i32imm:$mode),
+ "$imod\t$iflags, $mode">;
+let mode = 0, M = 0 in
+ def CPS2p : CPS<(ins imod_op:$imod, iflags_op:$iflags), "$imod\t$iflags">;
+
+let imod = 0, iflags = 0, M = 1 in
+ def CPS1p : CPS<(ins i32imm:$mode), "\t$mode">;
+
// Preload signals the memory system of possible future data/instruction access.
// These are for disassembly only.
-//
-// A8.6.117, A8.6.118. Different instructions are generated for #0 and #-0.
-// The neg_zero operand translates -0 to -1, -1 to -2, ..., etc.
-multiclass APreLoad<bit data, bit read, string opc> {
+multiclass APreLoad<bits<1> read, bits<1> data, string opc> {
- def i : AXI<(outs), (ins GPR:$base, neg_zero:$imm), MiscFrm, NoItinerary,
- !strconcat(opc, "\t[$base, $imm]"), []> {
+ def i12 : AXI<(outs), (ins addrmode_imm12:$addr), MiscFrm, IIC_Preload,
+ !strconcat(opc, "\t$addr"),
+ [(ARMPreload addrmode_imm12:$addr, (i32 read), (i32 data))]> {
+ bits<4> Rt;
+ bits<17> addr;
let Inst{31-26} = 0b111101;
let Inst{25} = 0; // 0 for immediate form
let Inst{24} = data;
+ let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{22} = read;
let Inst{21-20} = 0b01;
+ let Inst{19-16} = addr{16-13}; // Rn
+ let Inst{15-12} = 0b1111;
+ let Inst{11-0} = addr{11-0}; // imm12
}
- def r : AXI<(outs), (ins addrmode2:$addr), MiscFrm, NoItinerary,
- !strconcat(opc, "\t$addr"), []> {
+ def rs : AXI<(outs), (ins ldst_so_reg:$shift), MiscFrm, IIC_Preload,
+ !strconcat(opc, "\t$shift"),
+ [(ARMPreload ldst_so_reg:$shift, (i32 read), (i32 data))]> {
+ bits<17> shift;
let Inst{31-26} = 0b111101;
let Inst{25} = 1; // 1 for register form
let Inst{24} = data;
+ let Inst{23} = shift{12}; // U (add = ('U' == 1))
let Inst{22} = read;
let Inst{21-20} = 0b01;
- let Inst{4} = 0;
+ let Inst{19-16} = shift{16-13}; // Rn
+ let Inst{15-12} = 0b1111;
+ let Inst{11-0} = shift{11-0};
}
}
-defm PLD : APreLoad<1, 1, "pld">;
-defm PLDW : APreLoad<1, 0, "pldw">;
-defm PLI : APreLoad<0, 1, "pli">;
-
-def SETENDBE : AXI<(outs),(ins), MiscFrm, NoItinerary, "setend\tbe",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM]> {
- let Inst{31-28} = 0b1111;
- let Inst{27-20} = 0b00010000;
- let Inst{16} = 1;
- let Inst{9} = 1;
- let Inst{7-4} = 0b0000;
-}
+defm PLD : APreLoad<1, 1, "pld">, Requires<[IsARM]>;
+defm PLDW : APreLoad<0, 1, "pldw">, Requires<[IsARM,HasV7,HasMP]>;
+defm PLI : APreLoad<1, 0, "pli">, Requires<[IsARM,HasV7]>;
-def SETENDLE : AXI<(outs),(ins), MiscFrm, NoItinerary, "setend\tle",
- [/* For disassembly only; pattern left blank */]>,
+def SETEND : AXI<(outs),(ins setend_op:$end), MiscFrm, NoItinerary,
+ "setend\t$end",
+ [/* For disassembly only; pattern left blank */]>,
Requires<[IsARM]> {
- let Inst{31-28} = 0b1111;
- let Inst{27-20} = 0b00010000;
- let Inst{16} = 1;
- let Inst{9} = 0;
- let Inst{7-4} = 0b0000;
+ bits<1> end;
+ let Inst{31-10} = 0b1111000100000001000000;
+ let Inst{9} = end;
+ let Inst{8-0} = 0;
}
def DBG : AI<(outs), (ins i32imm:$opt), MiscFrm, NoItinerary, "dbg", "\t$opt",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV7]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{7-4} = 0b1111;
+ bits<4> opt;
+ let Inst{27-4} = 0b001100100000111100001111;
+ let Inst{3-0} = opt;
}
// A5.4 Permanently UNDEFINED instructions.
-// FIXME: Temporary emitted as raw bytes until this pseudo-op will be added to
-// binutils
let isBarrier = 1, isTerminator = 1 in
-def TRAP : AXI<(outs), (ins), MiscFrm, NoItinerary,
- ".long 0xe7ffdefe ${:comment} trap", [(trap)]>,
+def TRAP : AXI<(outs), (ins), MiscFrm, NoItinerary,
+ "trap", [(trap)]>,
Requires<[IsARM]> {
- let Inst{27-25} = 0b011;
- let Inst{24-20} = 0b11111;
- let Inst{7-5} = 0b111;
- let Inst{4} = 0b1;
+ let Inst = 0xe7ffdefe;
}
// Address computation and loads and stores in PIC mode.
let isNotDuplicable = 1 in {
-def PICADD : AXI1<0b0100, (outs GPR:$dst), (ins GPR:$a, pclabel:$cp, pred:$p),
- Pseudo, IIC_iALUr, "\n$cp:\n\tadd$p\t$dst, pc, $a",
- [(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>;
+def PICADD : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$a, pclabel:$cp, pred:$p),
+ Size4Bytes, IIC_iALUr,
+ [(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>;
let AddedComplexity = 10 in {
-def PICLDR : AXI2ldw<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldr$p\t$dst, $addr",
- [(set GPR:$dst, (load addrmodepc:$addr))]>;
+def PICLDR : ARMPseudoInst<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
+ Size4Bytes, IIC_iLoad_r,
+ [(set GPR:$dst, (load addrmodepc:$addr))]>;
-def PICLDRH : AXI3ldh<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldrh${p}\t$dst, $addr",
- [(set GPR:$dst, (zextloadi16 addrmodepc:$addr))]>;
+def PICLDRH : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
+ Size4Bytes, IIC_iLoad_bh_r,
+ [(set GPR:$Rt, (zextloadi16 addrmodepc:$addr))]>;
-def PICLDRB : AXI2ldb<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldrb${p}\t$dst, $addr",
- [(set GPR:$dst, (zextloadi8 addrmodepc:$addr))]>;
+def PICLDRB : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
+ Size4Bytes, IIC_iLoad_bh_r,
+ [(set GPR:$Rt, (zextloadi8 addrmodepc:$addr))]>;
-def PICLDRSH : AXI3ldsh<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldrsh${p}\t$dst, $addr",
- [(set GPR:$dst, (sextloadi16 addrmodepc:$addr))]>;
+def PICLDRSH : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
+ Size4Bytes, IIC_iLoad_bh_r,
+ [(set GPR:$Rt, (sextloadi16 addrmodepc:$addr))]>;
-def PICLDRSB : AXI3ldsb<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldrsb${p}\t$dst, $addr",
- [(set GPR:$dst, (sextloadi8 addrmodepc:$addr))]>;
+def PICLDRSB : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
+ Size4Bytes, IIC_iLoad_bh_r,
+ [(set GPR:$Rt, (sextloadi8 addrmodepc:$addr))]>;
}
let AddedComplexity = 10 in {
-def PICSTR : AXI2stw<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iStorer, "\n${addr:label}:\n\tstr$p\t$src, $addr",
- [(store GPR:$src, addrmodepc:$addr)]>;
+def PICSTR : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
+ Size4Bytes, IIC_iStore_r, [(store GPR:$src, addrmodepc:$addr)]>;
-def PICSTRH : AXI3sth<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iStorer, "\n${addr:label}:\n\tstrh${p}\t$src, $addr",
- [(truncstorei16 GPR:$src, addrmodepc:$addr)]>;
+def PICSTRH : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
+ Size4Bytes, IIC_iStore_bh_r, [(truncstorei16 GPR:$src,
+ addrmodepc:$addr)]>;
-def PICSTRB : AXI2stb<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iStorer, "\n${addr:label}:\n\tstrb${p}\t$src, $addr",
- [(truncstorei8 GPR:$src, addrmodepc:$addr)]>;
+def PICSTRB : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
+ Size4Bytes, IIC_iStore_bh_r, [(truncstorei8 GPR:$src, addrmodepc:$addr)]>;
}
} // isNotDuplicable = 1
// LEApcrel - Load a pc-relative address into a register without offending the
// assembler.
-let neverHasSideEffects = 1 in {
-let isReMaterializable = 1 in
-def LEApcrel : AXI1<0x0, (outs GPR:$dst), (ins i32imm:$label, pred:$p),
- Pseudo, IIC_iALUi,
- "adr$p\t$dst, #$label", []>;
-
-} // neverHasSideEffects
-def LEApcrelJT : AXI1<0x0, (outs GPR:$dst),
- (ins i32imm:$label, nohash_imm:$id, pred:$p),
- Pseudo, IIC_iALUi,
- "adr$p\t$dst, #${label}_${id}", []> {
- let Inst{25} = 1;
+let neverHasSideEffects = 1, isReMaterializable = 1 in
+// The 'adr' mnemonic encodes differently if the label is before or after
+// the instruction. The {24-21} opcode bits are set by the fixup, as we don't
+// know until then which form of the instruction will be used.
+def ADR : AI1<0, (outs GPR:$Rd), (ins adrlabel:$label),
+ MiscFrm, IIC_iALUi, "adr", "\t$Rd, #$label", []> {
+ bits<4> Rd;
+ bits<12> label;
+ let Inst{27-25} = 0b001;
+ let Inst{20} = 0;
+ let Inst{19-16} = 0b1111;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = label;
}
+def LEApcrel : ARMPseudoInst<(outs GPR:$Rd), (ins i32imm:$label, pred:$p),
+ Size4Bytes, IIC_iALUi, []>;
+
+def LEApcrelJT : ARMPseudoInst<(outs GPR:$Rd),
+ (ins i32imm:$label, nohash_imm:$id, pred:$p),
+ Size4Bytes, IIC_iALUi, []>;
//===----------------------------------------------------------------------===//
// Control Flow Instructions.
@@ -893,159 +1273,139 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
def BX_RET : AI<(outs), (ins), BrMiscFrm, IIC_Br,
"bx", "\tlr", [(ARMretflag)]>,
Requires<[IsARM, HasV4T]> {
- let Inst{3-0} = 0b1110;
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
+ let Inst{27-0} = 0b0001001011111111111100011110;
}
// ARMV4 only
- def MOVPCLR : AI<(outs), (ins), BrMiscFrm, IIC_Br,
+ def MOVPCLR : AI<(outs), (ins), BrMiscFrm, IIC_Br,
"mov", "\tpc, lr", [(ARMretflag)]>,
Requires<[IsARM, NoV4T]> {
- let Inst{11-0} = 0b000000001110;
- let Inst{15-12} = 0b1111;
- let Inst{19-16} = 0b0000;
- let Inst{27-20} = 0b00011010;
+ let Inst{27-0} = 0b0001101000001111000000001110;
}
}
// Indirect branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
// ARMV4T and above
- def BRIND : AXI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br, "bx\t$dst",
+ def BX : AXI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br, "bx\t$dst",
[(brind GPR:$dst)]>,
Requires<[IsARM, HasV4T]> {
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- let Inst{31-28} = 0b1110;
+ bits<4> dst;
+ let Inst{31-4} = 0b1110000100101111111111110001;
+ let Inst{3-0} = dst;
}
// ARMV4 only
- def MOVPCRX : AXI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br, "mov\tpc, $dst",
- [(brind GPR:$dst)]>,
- Requires<[IsARM, NoV4T]> {
- let Inst{11-4} = 0b00000000;
- let Inst{15-12} = 0b1111;
- let Inst{19-16} = 0b0000;
- let Inst{27-20} = 0b00011010;
- let Inst{31-28} = 0b1110;
- }
+ // FIXME: We would really like to define this as a vanilla ARMPat like:
+ // ARMPat<(brind GPR:$dst), (MOVr PC, GPR:$dst)>
+ // With that, however, we can't set isBranch, isTerminator, etc..
+ def MOVPCRX : ARMPseudoInst<(outs), (ins GPR:$dst),
+ Size4Bytes, IIC_Br, [(brind GPR:$dst)]>,
+ Requires<[IsARM, NoV4T]>;
}
-// FIXME: remove when we have a way to marking a MI with these properties.
-// FIXME: Should pc be an implicit operand like PICADD, etc?
-let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
- hasExtraDefRegAllocReq = 1 in
- def LDM_RET : AXI4ld<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$dsts, variable_ops),
- IndexModeUpd, LdStMulFrm, IIC_Br,
- "ldm${addr:submode}${p}\t$addr!, $dsts",
- "$addr.addr = $wb", []>;
-
-// On non-Darwin platforms R9 is callee-saved.
+// All calls clobber the non-callee saved registers. SP is marked as
+// a use to prevent stack-pointer assignments that appear immediately
+// before calls from potentially appearing dead.
let isCall = 1,
+ // On non-Darwin platforms R9 is callee-saved.
Defs = [R0, R1, R2, R3, R12, LR,
D0, D1, D2, D3, D4, D5, D6, D7,
D16, D17, D18, D19, D20, D21, D22, D23,
- D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in {
- def BL : ABXI<0b1011, (outs), (ins i32imm:$func, variable_ops),
- IIC_Br, "bl\t${func:call}",
+ D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR],
+ Uses = [SP] in {
+ def BL : ABXI<0b1011, (outs), (ins bl_target:$func, variable_ops),
+ IIC_Br, "bl\t$func",
[(ARMcall tglobaladdr:$func)]>,
Requires<[IsARM, IsNotDarwin]> {
let Inst{31-28} = 0b1110;
+ bits<24> func;
+ let Inst{23-0} = func;
}
- def BL_pred : ABI<0b1011, (outs), (ins i32imm:$func, variable_ops),
- IIC_Br, "bl", "\t${func:call}",
+ def BL_pred : ABI<0b1011, (outs), (ins bl_target:$func, variable_ops),
+ IIC_Br, "bl", "\t$func",
[(ARMcall_pred tglobaladdr:$func)]>,
- Requires<[IsARM, IsNotDarwin]>;
+ Requires<[IsARM, IsNotDarwin]> {
+ bits<24> func;
+ let Inst{23-0} = func;
+ }
// ARMv5T and above
def BLX : AXI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
IIC_Br, "blx\t$func",
[(ARMcall GPR:$func)]>,
Requires<[IsARM, HasV5T, IsNotDarwin]> {
- let Inst{7-4} = 0b0011;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
+ bits<4> func;
+ let Inst{31-4} = 0b1110000100101111111111110011;
+ let Inst{3-0} = func;
}
// ARMv4T
// Note: Restrict $func to the tGPR regclass to prevent it being in LR.
- def BX : ABXIx2<(outs), (ins tGPR:$func, variable_ops),
- IIC_Br, "mov\tlr, pc\n\tbx\t$func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, HasV4T, IsNotDarwin]> {
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- }
+ def BX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
+ Size8Bytes, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
+ Requires<[IsARM, HasV4T, IsNotDarwin]>;
// ARMv4
- def BMOVPCRX : ABXIx2<(outs), (ins tGPR:$func, variable_ops),
- IIC_Br, "mov\tlr, pc\n\tmov\tpc, $func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, NoV4T, IsNotDarwin]> {
- let Inst{11-4} = 0b00000000;
- let Inst{15-12} = 0b1111;
- let Inst{19-16} = 0b0000;
- let Inst{27-20} = 0b00011010;
- }
+ def BMOVPCRX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
+ Size8Bytes, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
+ Requires<[IsARM, NoV4T, IsNotDarwin]>;
}
-// On Darwin R9 is call-clobbered.
let isCall = 1,
+ // On Darwin R9 is call-clobbered.
+ // R7 is marked as a use to prevent frame-pointer assignments from being
+ // moved above / below calls.
Defs = [R0, R1, R2, R3, R9, R12, LR,
D0, D1, D2, D3, D4, D5, D6, D7,
D16, D17, D18, D19, D20, D21, D22, D23,
- D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in {
- def BLr9 : ABXI<0b1011, (outs), (ins i32imm:$func, variable_ops),
- IIC_Br, "bl\t${func:call}",
+ D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR],
+ Uses = [R7, SP] in {
+ def BLr9 : ABXI<0b1011, (outs), (ins bltarget:$func, variable_ops),
+ IIC_Br, "bl\t$func",
[(ARMcall tglobaladdr:$func)]>, Requires<[IsARM, IsDarwin]> {
let Inst{31-28} = 0b1110;
+ bits<24> func;
+ let Inst{23-0} = func;
}
- def BLr9_pred : ABI<0b1011, (outs), (ins i32imm:$func, variable_ops),
- IIC_Br, "bl", "\t${func:call}",
+ def BLr9_pred : ABI<0b1011, (outs), (ins bltarget:$func, variable_ops),
+ IIC_Br, "bl", "\t$func",
[(ARMcall_pred tglobaladdr:$func)]>,
- Requires<[IsARM, IsDarwin]>;
+ Requires<[IsARM, IsDarwin]> {
+ bits<24> func;
+ let Inst{23-0} = func;
+ }
// ARMv5T and above
def BLXr9 : AXI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
IIC_Br, "blx\t$func",
[(ARMcall GPR:$func)]>, Requires<[IsARM, HasV5T, IsDarwin]> {
- let Inst{7-4} = 0b0011;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
+ bits<4> func;
+ let Inst{31-4} = 0b1110000100101111111111110011;
+ let Inst{3-0} = func;
}
// ARMv4T
// Note: Restrict $func to the tGPR regclass to prevent it being in LR.
- def BXr9 : ABXIx2<(outs), (ins tGPR:$func, variable_ops),
- IIC_Br, "mov\tlr, pc\n\tbx\t$func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, HasV4T, IsDarwin]> {
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- }
+ def BXr9_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
+ Size8Bytes, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
+ Requires<[IsARM, HasV4T, IsDarwin]>;
// ARMv4
- def BMOVPCRXr9 : ABXIx2<(outs), (ins tGPR:$func, variable_ops),
- IIC_Br, "mov\tlr, pc\n\tmov\tpc, $func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, NoV4T, IsDarwin]> {
- let Inst{11-4} = 0b00000000;
- let Inst{15-12} = 0b1111;
- let Inst{19-16} = 0b0000;
- let Inst{27-20} = 0b00011010;
- }
+ def BMOVPCRXr9_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
+ Size8Bytes, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
+ Requires<[IsARM, NoV4T, IsDarwin]>;
}
// Tail calls.
+// FIXME: These should probably be xformed into the non-TC versions of the
+// instructions as part of MC lowering.
+// FIXME: These seem to be used for both Thumb and ARM instruction selection.
+// Thumb should have its own version since the instruction is actually
+// different, even though the mnemonic is the same.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
// Darwin versions.
let Defs = [R0, R1, R2, R3, R9, R12,
@@ -1053,29 +1413,26 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26,
D27, D28, D29, D30, D31, PC],
Uses = [SP] in {
- def TCRETURNdi : AInoP<(outs), (ins i32imm:$dst, variable_ops),
- Pseudo, IIC_Br,
- "@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
+ def TCRETURNdi : PseudoInst<(outs), (ins i32imm:$dst, variable_ops),
+ IIC_Br, []>, Requires<[IsDarwin]>;
- def TCRETURNri : AInoP<(outs), (ins tcGPR:$dst, variable_ops),
- Pseudo, IIC_Br,
- "@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
+ def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst, variable_ops),
+ IIC_Br, []>, Requires<[IsDarwin]>;
def TAILJMPd : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
IIC_Br, "b\t$dst @ TAILCALL",
- []>, Requires<[IsDarwin]>;
+ []>, Requires<[IsARM, IsDarwin]>;
def TAILJMPdt: ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
IIC_Br, "b.w\t$dst @ TAILCALL",
- []>, Requires<[IsDarwin]>;
+ []>, Requires<[IsThumb, IsDarwin]>;
def TAILJMPr : AXI<(outs), (ins tcGPR:$dst, variable_ops),
BrMiscFrm, IIC_Br, "bx\t$dst @ TAILCALL",
[]>, Requires<[IsDarwin]> {
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- let Inst{31-28} = 0b1110;
+ bits<4> dst;
+ let Inst{31-4} = 0b1110000100101111111111110001;
+ let Inst{3-0} = dst;
}
}
@@ -1085,13 +1442,11 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26,
D27, D28, D29, D30, D31, PC],
Uses = [SP] in {
- def TCRETURNdiND : AInoP<(outs), (ins i32imm:$dst, variable_ops),
- Pseudo, IIC_Br,
- "@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
+ def TCRETURNdiND : PseudoInst<(outs), (ins i32imm:$dst, variable_ops),
+ IIC_Br, []>, Requires<[IsNotDarwin]>;
- def TCRETURNriND : AInoP<(outs), (ins tcGPR:$dst, variable_ops),
- Pseudo, IIC_Br,
- "@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
+ def TCRETURNriND : PseudoInst<(outs), (ins tcGPR:$dst, variable_ops),
+ IIC_Br, []>, Requires<[IsNotDarwin]>;
def TAILJMPdND : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
IIC_Br, "b\t$dst @ TAILCALL",
@@ -1104,10 +1459,9 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
def TAILJMPrND : AXI<(outs), (ins tcGPR:$dst, variable_ops),
BrMiscFrm, IIC_Br, "bx\t$dst @ TAILCALL",
[]>, Requires<[IsNotDarwin]> {
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- let Inst{31-28} = 0b1110;
+ bits<4> dst;
+ let Inst{31-4} = 0b1110000100101111111111110001;
+ let Inst{3-0} = dst;
}
}
}
@@ -1117,48 +1471,40 @@ let isBranch = 1, isTerminator = 1 in {
let isBarrier = 1 in {
let isPredicable = 1 in
def B : ABXI<0b1010, (outs), (ins brtarget:$target), IIC_Br,
- "b\t$target", [(br bb:$target)]>;
+ "b\t$target", [(br bb:$target)]> {
+ bits<24> target;
+ let Inst{31-28} = 0b1110;
+ let Inst{23-0} = target;
+ }
- let isNotDuplicable = 1, isIndirectBranch = 1 in {
- def BR_JTr : JTI<(outs), (ins GPR:$target, jtblock_operand:$jt, i32imm:$id),
- IIC_Br, "mov\tpc, $target$jt",
- [(ARMbrjt GPR:$target, tjumptable:$jt, imm:$id)]> {
- let Inst{11-4} = 0b00000000;
- let Inst{15-12} = 0b1111;
- let Inst{20} = 0; // S Bit
- let Inst{24-21} = 0b1101;
- let Inst{27-25} = 0b000;
- }
- def BR_JTm : JTI<(outs),
- (ins addrmode2:$target, jtblock_operand:$jt, i32imm:$id),
- IIC_Br, "ldr\tpc, $target$jt",
- [(ARMbrjt (i32 (load addrmode2:$target)), tjumptable:$jt,
- imm:$id)]> {
- let Inst{15-12} = 0b1111;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b011;
- }
- def BR_JTadd : JTI<(outs),
- (ins GPR:$target, GPR:$idx, jtblock_operand:$jt, i32imm:$id),
- IIC_Br, "add\tpc, $target, $idx$jt",
- [(ARMbrjt (add GPR:$target, GPR:$idx), tjumptable:$jt,
- imm:$id)]> {
- let Inst{15-12} = 0b1111;
- let Inst{20} = 0; // S bit
- let Inst{24-21} = 0b0100;
- let Inst{27-25} = 0b000;
- }
- } // isNotDuplicable = 1, isIndirectBranch = 1
+ let isNotDuplicable = 1, isIndirectBranch = 1 in {
+ def BR_JTr : ARMPseudoInst<(outs),
+ (ins GPR:$target, i32imm:$jt, i32imm:$id),
+ SizeSpecial, IIC_Br,
+ [(ARMbrjt GPR:$target, tjumptable:$jt, imm:$id)]>;
+ // FIXME: This shouldn't use the generic "addrmode2," but rather be split
+ // into i12 and rs suffixed versions.
+ def BR_JTm : ARMPseudoInst<(outs),
+ (ins addrmode2:$target, i32imm:$jt, i32imm:$id),
+ SizeSpecial, IIC_Br,
+ [(ARMbrjt (i32 (load addrmode2:$target)), tjumptable:$jt,
+ imm:$id)]>;
+ def BR_JTadd : ARMPseudoInst<(outs),
+ (ins GPR:$target, GPR:$idx, i32imm:$jt, i32imm:$id),
+ SizeSpecial, IIC_Br,
+ [(ARMbrjt (add GPR:$target, GPR:$idx), tjumptable:$jt,
+ imm:$id)]>;
+ } // isNotDuplicable = 1, isIndirectBranch = 1
} // isBarrier = 1
// FIXME: should be able to write a pattern for ARMBrcond, but can't use
// a two-value operand where a dag node expects two operands. :(
- def Bcc : ABI<0b1010, (outs), (ins brtarget:$target),
+ def Bcc : ABI<0b1010, (outs), (ins br_target:$target),
IIC_Br, "b", "\t$target",
- [/*(ARMbrcond bb:$target, imm:$cc, CCR:$ccr)*/]>;
+ [/*(ARMbrcond bb:$target, imm:$cc, CCR:$ccr)*/]> {
+ bits<24> target;
+ let Inst{23-0} = target;
+ }
}
// Branch and Exchange Jazelle -- for disassembly only
@@ -1172,271 +1518,303 @@ def BXJ : ABI<0b0001, (outs), (ins GPR:$func), NoItinerary, "bxj", "\t$func",
// Secure Monitor Call is a system instruction -- for disassembly only
def SMC : ABI<0b0001, (outs), (ins i32imm:$opt), NoItinerary, "smc", "\t$opt",
[/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0110;
- let Inst{7-4} = 0b0111;
+ bits<4> opt;
+ let Inst{23-4} = 0b01100000000000000111;
+ let Inst{3-0} = opt;
}
// Supervisor Call (Software Interrupt) -- for disassembly only
-let isCall = 1 in {
+let isCall = 1, Uses = [SP] in {
def SVC : ABI<0b1111, (outs), (ins i32imm:$svc), IIC_Br, "svc", "\t$svc",
- [/* For disassembly only; pattern left blank */]>;
+ [/* For disassembly only; pattern left blank */]> {
+ bits<24> svc;
+ let Inst{23-0} = svc;
+}
}
// Store Return State is a system instruction -- for disassembly only
-def SRSW : ABXI<{1,0,0,?}, (outs), (ins addrmode4:$addr, i32imm:$mode),
- NoItinerary, "srs${addr:submode}\tsp!, $mode",
+let isCodeGenOnly = 1 in { // FIXME: This should not use submode!
+def SRSW : ABXI<{1,0,0,?}, (outs), (ins ldstm_mode:$amode, i32imm:$mode),
+ NoItinerary, "srs${amode}\tsp!, $mode",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
let Inst{22-20} = 0b110; // W = 1
}
-def SRS : ABXI<{1,0,0,?}, (outs), (ins addrmode4:$addr, i32imm:$mode),
- NoItinerary, "srs${addr:submode}\tsp, $mode",
+def SRS : ABXI<{1,0,0,?}, (outs), (ins ldstm_mode:$amode, i32imm:$mode),
+ NoItinerary, "srs${amode}\tsp, $mode",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
let Inst{22-20} = 0b100; // W = 0
}
// Return From Exception is a system instruction -- for disassembly only
-def RFEW : ABXI<{1,0,0,?}, (outs), (ins addrmode4:$addr, GPR:$base),
- NoItinerary, "rfe${addr:submode}\t$base!",
+def RFEW : ABXI<{1,0,0,?}, (outs), (ins ldstm_mode:$amode, GPR:$base),
+ NoItinerary, "rfe${amode}\t$base!",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
let Inst{22-20} = 0b011; // W = 1
}
-def RFE : ABXI<{1,0,0,?}, (outs), (ins addrmode4:$addr, GPR:$base),
- NoItinerary, "rfe${addr:submode}\t$base",
+def RFE : ABXI<{1,0,0,?}, (outs), (ins ldstm_mode:$amode, GPR:$base),
+ NoItinerary, "rfe${amode}\t$base",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
let Inst{22-20} = 0b001; // W = 0
}
+} // isCodeGenOnly = 1
//===----------------------------------------------------------------------===//
// Load / store Instructions.
//
// Load
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def LDR : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm, IIC_iLoadr,
- "ldr", "\t$dst, $addr",
- [(set GPR:$dst, (load addrmode2:$addr))]>;
+
+
+defm LDR : AI_ldr1<0, "ldr", IIC_iLoad_r, IIC_iLoad_si,
+ UnOpFrag<(load node:$Src)>>;
+defm LDRB : AI_ldr1<1, "ldrb", IIC_iLoad_bh_r, IIC_iLoad_bh_si,
+ UnOpFrag<(zextloadi8 node:$Src)>>;
+defm STR : AI_str1<0, "str", IIC_iStore_r, IIC_iStore_si,
+ BinOpFrag<(store node:$LHS, node:$RHS)>>;
+defm STRB : AI_str1<1, "strb", IIC_iStore_bh_r, IIC_iStore_bh_si,
+ BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
// Special LDR for loads from non-pc-relative constpools.
let canFoldAsLoad = 1, mayLoad = 1, neverHasSideEffects = 1,
isReMaterializable = 1 in
-def LDRcp : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm, IIC_iLoadr,
- "ldr", "\t$dst, $addr", []>;
+def LDRcp : AI2ldst<0b010, 1, 0, (outs GPR:$Rt), (ins addrmode_imm12:$addr),
+ AddrMode_i12, LdFrm, IIC_iLoad_r, "ldr", "\t$Rt, $addr",
+ []> {
+ bits<4> Rt;
+ bits<17> addr;
+ let Inst{23} = addr{12}; // U (add = ('U' == 1))
+ let Inst{19-16} = 0b1111;
+ let Inst{15-12} = Rt;
+ let Inst{11-0} = addr{11-0}; // imm12
+}
// Loads with zero extension
-def LDRH : AI3ldh<(outs GPR:$dst), (ins addrmode3:$addr), LdMiscFrm,
- IIC_iLoadr, "ldrh", "\t$dst, $addr",
- [(set GPR:$dst, (zextloadi16 addrmode3:$addr))]>;
-
-def LDRB : AI2ldb<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm,
- IIC_iLoadr, "ldrb", "\t$dst, $addr",
- [(set GPR:$dst, (zextloadi8 addrmode2:$addr))]>;
+def LDRH : AI3ld<0b1011, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
+ IIC_iLoad_bh_r, "ldrh", "\t$Rt, $addr",
+ [(set GPR:$Rt, (zextloadi16 addrmode3:$addr))]>;
// Loads with sign extension
-def LDRSH : AI3ldsh<(outs GPR:$dst), (ins addrmode3:$addr), LdMiscFrm,
- IIC_iLoadr, "ldrsh", "\t$dst, $addr",
- [(set GPR:$dst, (sextloadi16 addrmode3:$addr))]>;
-
-def LDRSB : AI3ldsb<(outs GPR:$dst), (ins addrmode3:$addr), LdMiscFrm,
- IIC_iLoadr, "ldrsb", "\t$dst, $addr",
- [(set GPR:$dst, (sextloadi8 addrmode3:$addr))]>;
-
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+def LDRSH : AI3ld<0b1111, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
+ IIC_iLoad_bh_r, "ldrsh", "\t$Rt, $addr",
+ [(set GPR:$Rt, (sextloadi16 addrmode3:$addr))]>;
+
+def LDRSB : AI3ld<0b1101, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
+ IIC_iLoad_bh_r, "ldrsb", "\t$Rt, $addr",
+ [(set GPR:$Rt, (sextloadi8 addrmode3:$addr))]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1,
+ isCodeGenOnly = 1 in { // $dst2 doesn't exist in asmstring?
+// FIXME: $dst2 isn't in the asm string as it's implied by $Rd (dst2 = Rd+1)
+// how to represent that such that tblgen is happy and we don't
+// mark this codegen only?
// Load doubleword
-def LDRD : AI3ldd<(outs GPR:$dst1, GPR:$dst2), (ins addrmode3:$addr), LdMiscFrm,
- IIC_iLoadr, "ldrd", "\t$dst1, $addr",
+def LDRD : AI3ld<0b1101, 0, (outs GPR:$Rd, GPR:$dst2),
+ (ins addrmode3:$addr), LdMiscFrm,
+ IIC_iLoad_d_r, "ldrd", "\t$Rd, $addr",
[]>, Requires<[IsARM, HasV5TE]>;
+}
// Indexed loads
-def LDR_PRE : AI2ldwpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode2:$addr), LdFrm, IIC_iLoadru,
- "ldr", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDR_POST : AI2ldwpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, am2offset:$offset), LdFrm, IIC_iLoadru,
- "ldr", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
-
-def LDRH_PRE : AI3ldhpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode3:$addr), LdMiscFrm, IIC_iLoadru,
- "ldrh", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDRH_POST : AI3ldhpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrh", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
-
-def LDRB_PRE : AI2ldbpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode2:$addr), LdFrm, IIC_iLoadru,
- "ldrb", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDRB_POST : AI2ldbpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am2offset:$offset), LdFrm, IIC_iLoadru,
- "ldrb", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
-
-def LDRSH_PRE : AI3ldshpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode3:$addr), LdMiscFrm, IIC_iLoadru,
- "ldrsh", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDRSH_POST: AI3ldshpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrsh", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
-
-def LDRSB_PRE : AI3ldsbpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode3:$addr), LdMiscFrm, IIC_iLoadru,
- "ldrsb", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDRSB_POST: AI3ldsbpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrsb", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
+multiclass AI2_ldridx<bit isByte, string opc, InstrItinClass itin> {
+ def _PRE : AI2ldstidx<1, isByte, 1, (outs GPR:$Rt, GPR:$Rn_wb),
+ (ins addrmode2:$addr), IndexModePre, LdFrm, itin,
+ opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
+ // {17-14} Rn
+ // {13} 1 == Rm, 0 == imm12
+ // {12} isAdd
+ // {11-0} imm12/Rm
+ bits<18> addr;
+ let Inst{25} = addr{13};
+ let Inst{23} = addr{12};
+ let Inst{19-16} = addr{17-14};
+ let Inst{11-0} = addr{11-0};
+ }
+ def _POST : AI2ldstidx<1, isByte, 0, (outs GPR:$Rt, GPR:$Rn_wb),
+ (ins GPR:$Rn, am2offset:$offset),
+ IndexModePost, LdFrm, itin,
+ opc, "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb", []> {
+ // {13} 1 == Rm, 0 == imm12
+ // {12} isAdd
+ // {11-0} imm12/Rm
+ bits<14> offset;
+ bits<4> Rn;
+ let Inst{25} = offset{13};
+ let Inst{23} = offset{12};
+ let Inst{19-16} = Rn;
+ let Inst{11-0} = offset{11-0};
+ }
+}
-// For disassembly only
-def LDRD_PRE : AI3lddpr<(outs GPR:$dst1, GPR:$dst2, GPR:$base_wb),
- (ins addrmode3:$addr), LdMiscFrm, IIC_iLoadr,
- "ldrd", "\t$dst1, $dst2, $addr!", "$addr.base = $base_wb", []>,
- Requires<[IsARM, HasV5TE]>;
+let mayLoad = 1, neverHasSideEffects = 1 in {
+defm LDR : AI2_ldridx<0, "ldr", IIC_iLoad_ru>;
+defm LDRB : AI2_ldridx<1, "ldrb", IIC_iLoad_bh_ru>;
+}
-// For disassembly only
-def LDRD_POST : AI3lddpo<(outs GPR:$dst1, GPR:$dst2, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadr,
- "ldrd", "\t$dst1, $dst2, [$base], $offset", "$base = $base_wb", []>,
- Requires<[IsARM, HasV5TE]>;
+multiclass AI3_ldridx<bits<4> op, bit op20, string opc, InstrItinClass itin> {
+ def _PRE : AI3ldstidx<op, op20, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb),
+ (ins addrmode3:$addr), IndexModePre,
+ LdMiscFrm, itin,
+ opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
+ bits<14> addr;
+ let Inst{23} = addr{8}; // U bit
+ let Inst{22} = addr{13}; // 1 == imm8, 0 == Rm
+ let Inst{19-16} = addr{12-9}; // Rn
+ let Inst{11-8} = addr{7-4}; // imm7_4/zero
+ let Inst{3-0} = addr{3-0}; // imm3_0/Rm
+ }
+ def _POST : AI3ldstidx<op, op20, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
+ (ins GPR:$Rn, am3offset:$offset), IndexModePost,
+ LdMiscFrm, itin,
+ opc, "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb", []> {
+ bits<10> offset;
+ bits<4> Rn;
+ let Inst{23} = offset{8}; // U bit
+ let Inst{22} = offset{9}; // 1 == imm8, 0 == Rm
+ let Inst{19-16} = Rn;
+ let Inst{11-8} = offset{7-4}; // imm7_4/zero
+ let Inst{3-0} = offset{3-0}; // imm3_0/Rm
+ }
+}
-} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
+let mayLoad = 1, neverHasSideEffects = 1 in {
+defm LDRH : AI3_ldridx<0b1011, 1, "ldrh", IIC_iLoad_bh_ru>;
+defm LDRSH : AI3_ldridx<0b1111, 1, "ldrsh", IIC_iLoad_bh_ru>;
+defm LDRSB : AI3_ldridx<0b1101, 1, "ldrsb", IIC_iLoad_bh_ru>;
+let hasExtraDefRegAllocReq = 1, isCodeGenOnly = 1 in
+defm LDRD : AI3_ldridx<0b1101, 0, "ldrd", IIC_iLoad_d_ru>;
+} // mayLoad = 1, neverHasSideEffects = 1
// LDRT, LDRBT, LDRSBT, LDRHT, LDRSHT are for disassembly only.
-
-def LDRT : AI2ldwpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, am2offset:$offset), LdFrm, IIC_iLoadru,
+let mayLoad = 1, neverHasSideEffects = 1 in {
+def LDRT : AI2ldstidx<1, 0, 0, (outs GPR:$dst, GPR:$base_wb),
+ (ins GPR:$base, am2offset:$offset), IndexModeNone,
+ LdFrm, IIC_iLoad_ru,
"ldrt", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
let Inst{21} = 1; // overwrite
}
-
-def LDRBT : AI2ldbpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am2offset:$offset), LdFrm, IIC_iLoadru,
+def LDRBT : AI2ldstidx<1, 1, 0, (outs GPR:$dst, GPR:$base_wb),
+ (ins GPR:$base, am2offset:$offset), IndexModeNone,
+ LdFrm, IIC_iLoad_bh_ru,
"ldrbt", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
let Inst{21} = 1; // overwrite
}
-
-def LDRSBT : AI3ldsbpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadru,
+def LDRSBT : AI3ldstidx<0b1101, 1, 1, 0, (outs GPR:$dst, GPR:$base_wb),
+ (ins GPR:$base, am3offset:$offset), IndexModePost,
+ LdMiscFrm, IIC_iLoad_bh_ru,
"ldrsbt", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
let Inst{21} = 1; // overwrite
}
-
-def LDRHT : AI3ldhpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, am3offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrht", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
+def LDRHT : AI3ldstidx<0b1011, 1, 1, 0, (outs GPR:$dst, GPR:$base_wb),
+ (ins GPR:$base, am3offset:$offset), IndexModePost,
+ LdMiscFrm, IIC_iLoad_bh_ru,
+ "ldrht", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
let Inst{21} = 1; // overwrite
}
-
-def LDRSHT : AI3ldshpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadru,
+def LDRSHT : AI3ldstidx<0b1111, 1, 1, 0, (outs GPR:$dst, GPR:$base_wb),
+ (ins GPR:$base, am3offset:$offset), IndexModePost,
+ LdMiscFrm, IIC_iLoad_bh_ru,
"ldrsht", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
let Inst{21} = 1; // overwrite
}
+}
// Store
-def STR : AI2stw<(outs), (ins GPR:$src, addrmode2:$addr), StFrm, IIC_iStorer,
- "str", "\t$src, $addr",
- [(store GPR:$src, addrmode2:$addr)]>;
// Stores with truncate
-def STRH : AI3sth<(outs), (ins GPR:$src, addrmode3:$addr), StMiscFrm,
- IIC_iStorer, "strh", "\t$src, $addr",
- [(truncstorei16 GPR:$src, addrmode3:$addr)]>;
-
-def STRB : AI2stb<(outs), (ins GPR:$src, addrmode2:$addr), StFrm, IIC_iStorer,
- "strb", "\t$src, $addr",
- [(truncstorei8 GPR:$src, addrmode2:$addr)]>;
+def STRH : AI3str<0b1011, (outs), (ins GPR:$Rt, addrmode3:$addr), StMiscFrm,
+ IIC_iStore_bh_r, "strh", "\t$Rt, $addr",
+ [(truncstorei16 GPR:$Rt, addrmode3:$addr)]>;
// Store doubleword
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in
-def STRD : AI3std<(outs), (ins GPR:$src1, GPR:$src2, addrmode3:$addr),
- StMiscFrm, IIC_iStorer,
+let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1,
+ isCodeGenOnly = 1 in // $src2 doesn't exist in asm string
+def STRD : AI3str<0b1111, (outs), (ins GPR:$src1, GPR:$src2, addrmode3:$addr),
+ StMiscFrm, IIC_iStore_d_r,
"strd", "\t$src1, $addr", []>, Requires<[IsARM, HasV5TE]>;
// Indexed stores
-def STR_PRE : AI2stwpr<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, am2offset:$offset),
- StFrm, IIC_iStoreru,
- "str", "\t$src, [$base, $offset]!", "$base = $base_wb",
- [(set GPR:$base_wb,
- (pre_store GPR:$src, GPR:$base, am2offset:$offset))]>;
-
-def STR_POST : AI2stwpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "str", "\t$src, [$base], $offset", "$base = $base_wb",
- [(set GPR:$base_wb,
- (post_store GPR:$src, GPR:$base, am2offset:$offset))]>;
-
-def STRH_PRE : AI3sthpr<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
- "strh", "\t$src, [$base, $offset]!", "$base = $base_wb",
- [(set GPR:$base_wb,
- (pre_truncsti16 GPR:$src, GPR:$base,am3offset:$offset))]>;
-
-def STRH_POST: AI3sthpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
- "strh", "\t$src, [$base], $offset", "$base = $base_wb",
- [(set GPR:$base_wb, (post_truncsti16 GPR:$src,
- GPR:$base, am3offset:$offset))]>;
-
-def STRB_PRE : AI2stbpr<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "strb", "\t$src, [$base, $offset]!", "$base = $base_wb",
- [(set GPR:$base_wb, (pre_truncsti8 GPR:$src,
- GPR:$base, am2offset:$offset))]>;
-
-def STRB_POST: AI2stbpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "strb", "\t$src, [$base], $offset", "$base = $base_wb",
- [(set GPR:$base_wb, (post_truncsti8 GPR:$src,
- GPR:$base, am2offset:$offset))]>;
+def STR_PRE : AI2stridx<0, 1, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, GPR:$Rn, am2offset:$offset),
+ IndexModePre, StFrm, IIC_iStore_ru,
+ "str", "\t$Rt, [$Rn, $offset]!", "$Rn = $Rn_wb",
+ [(set GPR:$Rn_wb,
+ (pre_store GPR:$Rt, GPR:$Rn, am2offset:$offset))]>;
+
+def STR_POST : AI2stridx<0, 0, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, GPR:$Rn, am2offset:$offset),
+ IndexModePost, StFrm, IIC_iStore_ru,
+ "str", "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb",
+ [(set GPR:$Rn_wb,
+ (post_store GPR:$Rt, GPR:$Rn, am2offset:$offset))]>;
+
+def STRB_PRE : AI2stridx<1, 1, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, GPR:$Rn, am2offset:$offset),
+ IndexModePre, StFrm, IIC_iStore_bh_ru,
+ "strb", "\t$Rt, [$Rn, $offset]!", "$Rn = $Rn_wb",
+ [(set GPR:$Rn_wb, (pre_truncsti8 GPR:$Rt,
+ GPR:$Rn, am2offset:$offset))]>;
+def STRB_POST: AI2stridx<1, 0, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, GPR:$Rn, am2offset:$offset),
+ IndexModePost, StFrm, IIC_iStore_bh_ru,
+ "strb", "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb",
+ [(set GPR:$Rn_wb, (post_truncsti8 GPR:$Rt,
+ GPR:$Rn, am2offset:$offset))]>;
+
+def STRH_PRE : AI3stridx<0b1011, 0, 1, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, GPR:$Rn, am3offset:$offset),
+ IndexModePre, StMiscFrm, IIC_iStore_ru,
+ "strh", "\t$Rt, [$Rn, $offset]!", "$Rn = $Rn_wb",
+ [(set GPR:$Rn_wb,
+ (pre_truncsti16 GPR:$Rt, GPR:$Rn, am3offset:$offset))]>;
+
+def STRH_POST: AI3stridx<0b1011, 0, 0, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, GPR:$Rn, am3offset:$offset),
+ IndexModePost, StMiscFrm, IIC_iStore_bh_ru,
+ "strh", "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb",
+ [(set GPR:$Rn_wb, (post_truncsti16 GPR:$Rt,
+ GPR:$Rn, am3offset:$offset))]>;
// For disassembly only
def STRD_PRE : AI3stdpr<(outs GPR:$base_wb),
(ins GPR:$src1, GPR:$src2, GPR:$base, am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
+ StMiscFrm, IIC_iStore_d_ru,
"strd", "\t$src1, $src2, [$base, $offset]!",
"$base = $base_wb", []>;
// For disassembly only
def STRD_POST: AI3stdpo<(outs GPR:$base_wb),
(ins GPR:$src1, GPR:$src2, GPR:$base, am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
+ StMiscFrm, IIC_iStore_d_ru,
"strd", "\t$src1, $src2, [$base], $offset",
"$base = $base_wb", []>;
// STRT, STRBT, and STRHT are for disassembly only.
-def STRT : AI2stwpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "strt", "\t$src, [$base], $offset", "$base = $base_wb",
+def STRT : AI2stridx<0, 0, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, GPR:$Rn,am2offset:$offset),
+ IndexModeNone, StFrm, IIC_iStore_ru,
+ "strt", "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb",
[/* For disassembly only; pattern left blank */]> {
let Inst{21} = 1; // overwrite
}
-def STRBT : AI2stbpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "strbt", "\t$src, [$base], $offset", "$base = $base_wb",
+def STRBT : AI2stridx<1, 0, (outs GPR:$Rn_wb),
+ (ins GPR:$Rt, GPR:$Rn, am2offset:$offset),
+ IndexModeNone, StFrm, IIC_iStore_bh_ru,
+ "strbt", "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb",
[/* For disassembly only; pattern left blank */]> {
let Inst{21} = 1; // overwrite
}
def STRHT: AI3sthpo<(outs GPR:$base_wb),
(ins GPR:$src, GPR:$base,am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
+ StMiscFrm, IIC_iStore_bh_ru,
"strht", "\t$src, [$base], $offset", "$base = $base_wb",
[/* For disassembly only; pattern left blank */]> {
let Inst{21} = 1; // overwrite
@@ -1446,103 +1824,212 @@ def STRHT: AI3sthpo<(outs GPR:$base_wb),
// Load / store multiple Instructions.
//
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
-def LDM : AXI4ld<(outs), (ins addrmode4:$addr, pred:$p,
- reglist:$dsts, variable_ops),
- IndexModeNone, LdStMulFrm, IIC_iLoadm,
- "ldm${addr:submode}${p}\t$addr, $dsts", "", []>;
-
-def LDM_UPD : AXI4ld<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$dsts, variable_ops),
- IndexModeUpd, LdStMulFrm, IIC_iLoadm,
- "ldm${addr:submode}${p}\t$addr!, $dsts",
- "$addr.addr = $wb", []>;
-} // mayLoad, neverHasSideEffects, hasExtraDefRegAllocReq
-
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
-def STM : AXI4st<(outs), (ins addrmode4:$addr, pred:$p,
- reglist:$srcs, variable_ops),
- IndexModeNone, LdStMulFrm, IIC_iStorem,
- "stm${addr:submode}${p}\t$addr, $srcs", "", []>;
-
-def STM_UPD : AXI4st<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$srcs, variable_ops),
- IndexModeUpd, LdStMulFrm, IIC_iStorem,
- "stm${addr:submode}${p}\t$addr!, $srcs",
- "$addr.addr = $wb", []>;
-} // mayStore, neverHasSideEffects, hasExtraSrcRegAllocReq
+multiclass arm_ldst_mult<string asm, bit L_bit, Format f,
+ InstrItinClass itin, InstrItinClass itin_upd> {
+ def IA :
+ AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ IndexModeNone, f, itin,
+ !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+ }
+ def IA_UPD :
+ AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ IndexModeUpd, f, itin_upd,
+ !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+ }
+ def DA :
+ AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ IndexModeNone, f, itin,
+ !strconcat(asm, "da${p}\t$Rn, $regs"), "", []> {
+ let Inst{24-23} = 0b00; // Decrement After
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+ }
+ def DA_UPD :
+ AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ IndexModeUpd, f, itin_upd,
+ !strconcat(asm, "da${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b00; // Decrement After
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+ }
+ def DB :
+ AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ IndexModeNone, f, itin,
+ !strconcat(asm, "db${p}\t$Rn, $regs"), "", []> {
+ let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+ }
+ def DB_UPD :
+ AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ IndexModeUpd, f, itin_upd,
+ !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+ }
+ def IB :
+ AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ IndexModeNone, f, itin,
+ !strconcat(asm, "ib${p}\t$Rn, $regs"), "", []> {
+ let Inst{24-23} = 0b11; // Increment Before
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+ }
+ def IB_UPD :
+ AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ IndexModeUpd, f, itin_upd,
+ !strconcat(asm, "ib${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b11; // Increment Before
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+ }
+}
+
+let neverHasSideEffects = 1 in {
+
+let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
+defm LDM : arm_ldst_mult<"ldm", 1, LdStMulFrm, IIC_iLoad_m, IIC_iLoad_mu>;
+
+let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
+defm STM : arm_ldst_mult<"stm", 0, LdStMulFrm, IIC_iStore_m, IIC_iStore_mu>;
+
+} // neverHasSideEffects
+
+// Load / Store Multiple Mnemonic Aliases
+def : MnemonicAlias<"ldm", "ldmia">;
+def : MnemonicAlias<"stm", "stmia">;
+
+// FIXME: remove when we have a way to marking a MI with these properties.
+// FIXME: Should pc be an implicit operand like PICADD, etc?
+let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
+ hasExtraDefRegAllocReq = 1, isCodeGenOnly = 1 in
+// FIXME: Should be a pseudo-instruction.
+def LDMIA_RET : AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p,
+ reglist:$regs, variable_ops),
+ IndexModeUpd, LdStMulFrm, IIC_iLoad_mBr,
+ "ldmia${p}\t$Rn!, $regs",
+ "$Rn = $wb", []> {
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = 1; // Load
+}
//===----------------------------------------------------------------------===//
// Move Instructions.
//
let neverHasSideEffects = 1 in
-def MOVr : AsI1<0b1101, (outs GPR:$dst), (ins GPR:$src), DPFrm, IIC_iMOVr,
- "mov", "\t$dst, $src", []>, UnaryDP {
+def MOVr : AsI1<0b1101, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMOVr,
+ "mov", "\t$Rd, $Rm", []>, UnaryDP {
+ bits<4> Rd;
+ bits<4> Rm;
+
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
+ let Inst{3-0} = Rm;
+ let Inst{15-12} = Rd;
}
// A version for the smaller set of tail call registers.
let neverHasSideEffects = 1 in
-def MOVr_TC : AsI1<0b1101, (outs tcGPR:$dst), (ins tcGPR:$src), DPFrm,
- IIC_iMOVr, "mov", "\t$dst, $src", []>, UnaryDP {
+def MOVr_TC : AsI1<0b1101, (outs tcGPR:$Rd), (ins tcGPR:$Rm), DPFrm,
+ IIC_iMOVr, "mov", "\t$Rd, $Rm", []>, UnaryDP {
+ bits<4> Rd;
+ bits<4> Rm;
+
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
+ let Inst{3-0} = Rm;
+ let Inst{15-12} = Rd;
}
-def MOVs : AsI1<0b1101, (outs GPR:$dst), (ins so_reg:$src),
+def MOVs : AsI1<0b1101, (outs GPR:$Rd), (ins shift_so_reg:$src),
DPSoRegFrm, IIC_iMOVsr,
- "mov", "\t$dst, $src", [(set GPR:$dst, so_reg:$src)]>, UnaryDP {
+ "mov", "\t$Rd, $src", [(set GPR:$Rd, shift_so_reg:$src)]>,
+ UnaryDP {
+ bits<4> Rd;
+ bits<12> src;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = src;
let Inst{25} = 0;
}
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def MOVi : AsI1<0b1101, (outs GPR:$dst), (ins so_imm:$src), DPFrm, IIC_iMOVi,
- "mov", "\t$dst, $src", [(set GPR:$dst, so_imm:$src)]>, UnaryDP {
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
+def MOVi : AsI1<0b1101, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm, IIC_iMOVi,
+ "mov", "\t$Rd, $imm", [(set GPR:$Rd, so_imm:$imm)]>, UnaryDP {
+ bits<4> Rd;
+ bits<12> imm;
let Inst{25} = 1;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = 0b0000;
+ let Inst{11-0} = imm;
}
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def MOVi16 : AI1<0b1000, (outs GPR:$dst), (ins i32imm:$src),
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
+def MOVi16 : AI1<0b1000, (outs GPR:$Rd), (ins i32imm_hilo16:$imm),
DPFrm, IIC_iMOVi,
- "movw", "\t$dst, $src",
- [(set GPR:$dst, imm0_65535:$src)]>,
+ "movw", "\t$Rd, $imm",
+ [(set GPR:$Rd, imm0_65535:$imm)]>,
Requires<[IsARM, HasV6T2]>, UnaryDP {
+ bits<4> Rd;
+ bits<16> imm;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = imm{11-0};
+ let Inst{19-16} = imm{15-12};
let Inst{20} = 0;
let Inst{25} = 1;
}
-let Constraints = "$src = $dst" in
-def MOVTi16 : AI1<0b1010, (outs GPR:$dst), (ins GPR:$src, i32imm:$imm),
+def MOVi16_ga_pcrel : PseudoInst<(outs GPR:$Rd),
+ (ins i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
+
+let Constraints = "$src = $Rd" in {
+def MOVTi16 : AI1<0b1010, (outs GPR:$Rd), (ins GPR:$src, i32imm_hilo16:$imm),
DPFrm, IIC_iMOVi,
- "movt", "\t$dst, $imm",
- [(set GPR:$dst,
+ "movt", "\t$Rd, $imm",
+ [(set GPR:$Rd,
(or (and GPR:$src, 0xffff),
lo16AllZero:$imm))]>, UnaryDP,
Requires<[IsARM, HasV6T2]> {
+ bits<4> Rd;
+ bits<16> imm;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = imm{11-0};
+ let Inst{19-16} = imm{15-12};
let Inst{20} = 0;
let Inst{25} = 1;
}
+def MOVTi16_ga_pcrel : PseudoInst<(outs GPR:$Rd),
+ (ins GPR:$src, i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
+
+} // Constraints
+
def : ARMPat<(or GPR:$src, 0xffff0000), (MOVTi16 GPR:$src, 0xffff)>,
Requires<[IsARM, HasV6T2]>;
let Uses = [CPSR] in
-def MOVrx : AsI1<0b1101, (outs GPR:$dst), (ins GPR:$src), Pseudo, IIC_iMOVsi,
- "mov", "\t$dst, $src, rrx",
- [(set GPR:$dst, (ARMrrx GPR:$src))]>, UnaryDP;
+def RRX: PseudoInst<(outs GPR:$Rd), (ins GPR:$Rm), IIC_iMOVsi,
+ [(set GPR:$Rd, (ARMrrx GPR:$Rm))]>, UnaryDP,
+ Requires<[IsARM]>;
// These aren't really mov instructions, but we have to define them this way
// due to flag operands.
let Defs = [CPSR] in {
-def MOVsrl_flag : AI1<0b1101, (outs GPR:$dst), (ins GPR:$src), Pseudo,
- IIC_iMOVsi, "movs", "\t$dst, $src, lsr #1",
- [(set GPR:$dst, (ARMsrl_flag GPR:$src))]>, UnaryDP;
-def MOVsra_flag : AI1<0b1101, (outs GPR:$dst), (ins GPR:$src), Pseudo,
- IIC_iMOVsi, "movs", "\t$dst, $src, asr #1",
- [(set GPR:$dst, (ARMsra_flag GPR:$src))]>, UnaryDP;
+def MOVsrl_flag : PseudoInst<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
+ [(set GPR:$dst, (ARMsrl_flag GPR:$src))]>, UnaryDP,
+ Requires<[IsARM]>;
+def MOVsra_flag : PseudoInst<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
+ [(set GPR:$dst, (ARMsra_flag GPR:$src))]>, UnaryDP,
+ Requires<[IsARM]>;
}
//===----------------------------------------------------------------------===//
@@ -1551,31 +2038,31 @@ def MOVsra_flag : AI1<0b1101, (outs GPR:$dst), (ins GPR:$src), Pseudo,
// Sign extenders
-defm SXTB : AI_unary_rrot<0b01101010,
- "sxtb", UnOpFrag<(sext_inreg node:$Src, i8)>>;
-defm SXTH : AI_unary_rrot<0b01101011,
- "sxth", UnOpFrag<(sext_inreg node:$Src, i16)>>;
+defm SXTB : AI_ext_rrot<0b01101010,
+ "sxtb", UnOpFrag<(sext_inreg node:$Src, i8)>>;
+defm SXTH : AI_ext_rrot<0b01101011,
+ "sxth", UnOpFrag<(sext_inreg node:$Src, i16)>>;
-defm SXTAB : AI_bin_rrot<0b01101010,
+defm SXTAB : AI_exta_rrot<0b01101010,
"sxtab", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS, i8))>>;
-defm SXTAH : AI_bin_rrot<0b01101011,
+defm SXTAH : AI_exta_rrot<0b01101011,
"sxtah", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS,i16))>>;
// For disassembly only
-defm SXTB16 : AI_unary_rrot_np<0b01101000, "sxtb16">;
+defm SXTB16 : AI_ext_rrot_np<0b01101000, "sxtb16">;
// For disassembly only
-defm SXTAB16 : AI_bin_rrot_np<0b01101000, "sxtab16">;
+defm SXTAB16 : AI_exta_rrot_np<0b01101000, "sxtab16">;
// Zero extenders
let AddedComplexity = 16 in {
-defm UXTB : AI_unary_rrot<0b01101110,
- "uxtb" , UnOpFrag<(and node:$Src, 0x000000FF)>>;
-defm UXTH : AI_unary_rrot<0b01101111,
- "uxth" , UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
-defm UXTB16 : AI_unary_rrot<0b01101100,
- "uxtb16", UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
+defm UXTB : AI_ext_rrot<0b01101110,
+ "uxtb" , UnOpFrag<(and node:$Src, 0x000000FF)>>;
+defm UXTH : AI_ext_rrot<0b01101111,
+ "uxth" , UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
+defm UXTB16 : AI_ext_rrot<0b01101100,
+ "uxtb16", UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
// FIXME: This pattern incorrectly assumes the shl operator is a rotate.
// The transformation should probably be done as a combiner action
@@ -1586,33 +2073,49 @@ defm UXTB16 : AI_unary_rrot<0b01101100,
def : ARMV6Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
(UXTB16r_rot GPR:$Src, 8)>;
-defm UXTAB : AI_bin_rrot<0b01101110, "uxtab",
+defm UXTAB : AI_exta_rrot<0b01101110, "uxtab",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
-defm UXTAH : AI_bin_rrot<0b01101111, "uxtah",
+defm UXTAH : AI_exta_rrot<0b01101111, "uxtah",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0xFFFF))>>;
}
// This isn't safe in general, the add is two 16-bit units, not a 32-bit add.
// For disassembly only
-defm UXTAB16 : AI_bin_rrot_np<0b01101100, "uxtab16">;
+defm UXTAB16 : AI_exta_rrot_np<0b01101100, "uxtab16">;
-def SBFX : I<(outs GPR:$dst),
- (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
- AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iALUi,
- "sbfx", "\t$dst, $src, $lsb, $width", "", []>,
+def SBFX : I<(outs GPR:$Rd),
+ (ins GPR:$Rn, imm0_31:$lsb, imm0_31_m1:$width),
+ AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iUNAsi,
+ "sbfx", "\t$Rd, $Rn, $lsb, $width", "", []>,
Requires<[IsARM, HasV6T2]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<5> lsb;
+ bits<5> width;
let Inst{27-21} = 0b0111101;
let Inst{6-4} = 0b101;
+ let Inst{20-16} = width;
+ let Inst{15-12} = Rd;
+ let Inst{11-7} = lsb;
+ let Inst{3-0} = Rn;
}
-def UBFX : I<(outs GPR:$dst),
- (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
- AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iALUi,
- "ubfx", "\t$dst, $src, $lsb, $width", "", []>,
+def UBFX : I<(outs GPR:$Rd),
+ (ins GPR:$Rn, imm0_31:$lsb, imm0_31_m1:$width),
+ AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iUNAsi,
+ "ubfx", "\t$Rd, $Rn, $lsb, $width", "", []>,
Requires<[IsARM, HasV6T2]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<5> lsb;
+ bits<5> width;
let Inst{27-21} = 0b0111111;
let Inst{6-4} = 0b101;
+ let Inst{20-16} = width;
+ let Inst{15-12} = Rd;
+ let Inst{11-7} = lsb;
+ let Inst{3-0} = Rn;
}
//===----------------------------------------------------------------------===//
@@ -1620,100 +2123,166 @@ def UBFX : I<(outs GPR:$dst),
//
defm ADD : AsI1_bin_irs<0b0100, "add",
+ IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(add node:$LHS, node:$RHS)>, 1>;
defm SUB : AsI1_bin_irs<0b0010, "sub",
+ IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(sub node:$LHS, node:$RHS)>>;
// ADD and SUB with 's' bit set.
defm ADDS : AI1_bin_s_irs<0b0100, "adds",
+ IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(addc node:$LHS, node:$RHS)>, 1>;
defm SUBS : AI1_bin_s_irs<0b0010, "subs",
+ IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(subc node:$LHS, node:$RHS)>>;
defm ADC : AI1_adde_sube_irs<0b0101, "adc",
BinOpFrag<(adde_dead_carry node:$LHS, node:$RHS)>, 1>;
defm SBC : AI1_adde_sube_irs<0b0110, "sbc",
BinOpFrag<(sube_dead_carry node:$LHS, node:$RHS)>>;
+
+// ADC and SUBC with 's' bit set.
defm ADCS : AI1_adde_sube_s_irs<0b0101, "adcs",
BinOpFrag<(adde_live_carry node:$LHS, node:$RHS)>, 1>;
defm SBCS : AI1_adde_sube_s_irs<0b0110, "sbcs",
BinOpFrag<(sube_live_carry node:$LHS, node:$RHS) >>;
-def RSBri : AsI1<0b0011, (outs GPR:$dst), (ins GPR:$a, so_imm:$b), DPFrm,
- IIC_iALUi, "rsb", "\t$dst, $a, $b",
- [(set GPR:$dst, (sub so_imm:$b, GPR:$a))]> {
- let Inst{25} = 1;
+def RSBri : AsI1<0b0011, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
+ IIC_iALUi, "rsb", "\t$Rd, $Rn, $imm",
+ [(set GPR:$Rd, (sub so_imm:$imm, GPR:$Rn))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
+ let Inst{25} = 1;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{11-0} = imm;
}
// The reg/reg form is only defined for the disassembler; for codegen it is
// equivalent to SUBrr.
-def RSBrr : AsI1<0b0011, (outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm,
- IIC_iALUr, "rsb", "\t$dst, $a, $b",
+def RSBrr : AsI1<0b0011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
+ IIC_iALUr, "rsb", "\t$Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]> {
- let Inst{25} = 0;
- let Inst{11-4} = 0b00000000;
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+ let Inst{11-4} = 0b00000000;
+ let Inst{25} = 0;
+ let Inst{3-0} = Rm;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
}
-def RSBrs : AsI1<0b0011, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPSoRegFrm,
- IIC_iALUsr, "rsb", "\t$dst, $a, $b",
- [(set GPR:$dst, (sub so_reg:$b, GPR:$a))]> {
- let Inst{25} = 0;
+def RSBrs : AsI1<0b0011, (outs GPR:$Rd), (ins GPR:$Rn, so_reg:$shift),
+ DPSoRegFrm, IIC_iALUsr, "rsb", "\t$Rd, $Rn, $shift",
+ [(set GPR:$Rd, (sub so_reg:$shift, GPR:$Rn))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> shift;
+ let Inst{25} = 0;
+ let Inst{11-0} = shift;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
}
// RSB with 's' bit set.
-let Defs = [CPSR] in {
-def RSBSri : AI1<0b0011, (outs GPR:$dst), (ins GPR:$a, so_imm:$b), DPFrm,
- IIC_iALUi, "rsbs", "\t$dst, $a, $b",
- [(set GPR:$dst, (subc so_imm:$b, GPR:$a))]> {
- let Inst{20} = 1;
- let Inst{25} = 1;
-}
-def RSBSrs : AI1<0b0011, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPSoRegFrm,
- IIC_iALUsr, "rsbs", "\t$dst, $a, $b",
- [(set GPR:$dst, (subc so_reg:$b, GPR:$a))]> {
- let Inst{20} = 1;
- let Inst{25} = 0;
+let isCodeGenOnly = 1, Defs = [CPSR] in {
+def RSBSri : AI1<0b0011, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
+ IIC_iALUi, "rsbs", "\t$Rd, $Rn, $imm",
+ [(set GPR:$Rd, (subc so_imm:$imm, GPR:$Rn))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
+ let Inst{25} = 1;
+ let Inst{20} = 1;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{11-0} = imm;
+}
+def RSBSrs : AI1<0b0011, (outs GPR:$Rd), (ins GPR:$Rn, so_reg:$shift),
+ DPSoRegFrm, IIC_iALUsr, "rsbs", "\t$Rd, $Rn, $shift",
+ [(set GPR:$Rd, (subc so_reg:$shift, GPR:$Rn))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> shift;
+ let Inst{25} = 0;
+ let Inst{20} = 1;
+ let Inst{11-0} = shift;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
}
}
let Uses = [CPSR] in {
-def RSCri : AsI1<0b0111, (outs GPR:$dst), (ins GPR:$a, so_imm:$b),
- DPFrm, IIC_iALUi, "rsc", "\t$dst, $a, $b",
- [(set GPR:$dst, (sube_dead_carry so_imm:$b, GPR:$a))]>,
+def RSCri : AsI1<0b0111, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
+ DPFrm, IIC_iALUi, "rsc", "\t$Rd, $Rn, $imm",
+ [(set GPR:$Rd, (sube_dead_carry so_imm:$imm, GPR:$Rn))]>,
Requires<[IsARM]> {
- let Inst{25} = 1;
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
+ let Inst{25} = 1;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{11-0} = imm;
}
// The reg/reg form is only defined for the disassembler; for codegen it is
// equivalent to SUBrr.
-def RSCrr : AsI1<0b0111, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- DPFrm, IIC_iALUr, "rsc", "\t$dst, $a, $b",
+def RSCrr : AsI1<0b0111, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ DPFrm, IIC_iALUr, "rsc", "\t$Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]> {
- let Inst{25} = 0;
- let Inst{11-4} = 0b00000000;
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+ let Inst{11-4} = 0b00000000;
+ let Inst{25} = 0;
+ let Inst{3-0} = Rm;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
}
-def RSCrs : AsI1<0b0111, (outs GPR:$dst), (ins GPR:$a, so_reg:$b),
- DPSoRegFrm, IIC_iALUsr, "rsc", "\t$dst, $a, $b",
- [(set GPR:$dst, (sube_dead_carry so_reg:$b, GPR:$a))]>,
+def RSCrs : AsI1<0b0111, (outs GPR:$Rd), (ins GPR:$Rn, so_reg:$shift),
+ DPSoRegFrm, IIC_iALUsr, "rsc", "\t$Rd, $Rn, $shift",
+ [(set GPR:$Rd, (sube_dead_carry so_reg:$shift, GPR:$Rn))]>,
Requires<[IsARM]> {
- let Inst{25} = 0;
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> shift;
+ let Inst{25} = 0;
+ let Inst{11-0} = shift;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
}
}
// FIXME: Allow these to be predicated.
-let Defs = [CPSR], Uses = [CPSR] in {
-def RSCSri : AXI1<0b0111, (outs GPR:$dst), (ins GPR:$a, so_imm:$b),
- DPFrm, IIC_iALUi, "rscs\t$dst, $a, $b",
- [(set GPR:$dst, (sube_dead_carry so_imm:$b, GPR:$a))]>,
+let isCodeGenOnly = 1, Defs = [CPSR], Uses = [CPSR] in {
+def RSCSri : AXI1<0b0111, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
+ DPFrm, IIC_iALUi, "rscs\t$Rd, $Rn, $imm",
+ [(set GPR:$Rd, (sube_dead_carry so_imm:$imm, GPR:$Rn))]>,
Requires<[IsARM]> {
- let Inst{20} = 1;
- let Inst{25} = 1;
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
+ let Inst{25} = 1;
+ let Inst{20} = 1;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{11-0} = imm;
}
-def RSCSrs : AXI1<0b0111, (outs GPR:$dst), (ins GPR:$a, so_reg:$b),
- DPSoRegFrm, IIC_iALUsr, "rscs\t$dst, $a, $b",
- [(set GPR:$dst, (sube_dead_carry so_reg:$b, GPR:$a))]>,
+def RSCSrs : AXI1<0b0111, (outs GPR:$Rd), (ins GPR:$Rn, so_reg:$shift),
+ DPSoRegFrm, IIC_iALUsr, "rscs\t$Rd, $Rn, $shift",
+ [(set GPR:$Rd, (sube_dead_carry so_reg:$shift, GPR:$Rn))]>,
Requires<[IsARM]> {
- let Inst{20} = 1;
- let Inst{25} = 0;
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> shift;
+ let Inst{25} = 0;
+ let Inst{20} = 1;
+ let Inst{11-0} = shift;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = Rn;
}
}
@@ -1740,111 +2309,166 @@ def : ARMPat<(adde GPR:$src, so_imm_not:$imm),
// ARM Arithmetic Instruction -- for disassembly only
// GPR:$dst = GPR:$a op GPR:$b
-class AAI<bits<8> op27_20, bits<4> op7_4, string opc,
- list<dag> pattern = [/* For disassembly only; pattern left blank */]>
- : AI<(outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm, IIC_iALUr,
- opc, "\t$dst, $a, $b", pattern> {
+class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
+ list<dag> pattern = [/* For disassembly only; pattern left blank */],
+ dag iops = (ins GPR:$Rn, GPR:$Rm), string asm = "\t$Rd, $Rn, $Rm">
+ : AI<(outs GPR:$Rd), iops, DPFrm, IIC_iALUr, opc, asm, pattern> {
+ bits<4> Rn;
+ bits<4> Rd;
+ bits<4> Rm;
let Inst{27-20} = op27_20;
- let Inst{7-4} = op7_4;
+ let Inst{11-4} = op11_4;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rd;
+ let Inst{3-0} = Rm;
}
// Saturating add/subtract -- for disassembly only
-def QADD : AAI<0b00010000, 0b0101, "qadd",
- [(set GPR:$dst, (int_arm_qadd GPR:$a, GPR:$b))]>;
-def QADD16 : AAI<0b01100010, 0b0001, "qadd16">;
-def QADD8 : AAI<0b01100010, 0b1001, "qadd8">;
-def QASX : AAI<0b01100010, 0b0011, "qasx">;
-def QDADD : AAI<0b00010100, 0b0101, "qdadd">;
-def QDSUB : AAI<0b00010110, 0b0101, "qdsub">;
-def QSAX : AAI<0b01100010, 0b0101, "qsax">;
-def QSUB : AAI<0b00010010, 0b0101, "qsub",
- [(set GPR:$dst, (int_arm_qsub GPR:$a, GPR:$b))]>;
-def QSUB16 : AAI<0b01100010, 0b0111, "qsub16">;
-def QSUB8 : AAI<0b01100010, 0b1111, "qsub8">;
-def UQADD16 : AAI<0b01100110, 0b0001, "uqadd16">;
-def UQADD8 : AAI<0b01100110, 0b1001, "uqadd8">;
-def UQASX : AAI<0b01100110, 0b0011, "uqasx">;
-def UQSAX : AAI<0b01100110, 0b0101, "uqsax">;
-def UQSUB16 : AAI<0b01100110, 0b0111, "uqsub16">;
-def UQSUB8 : AAI<0b01100110, 0b1111, "uqsub8">;
+def QADD : AAI<0b00010000, 0b00000101, "qadd",
+ [(set GPR:$Rd, (int_arm_qadd GPR:$Rm, GPR:$Rn))],
+ (ins GPR:$Rm, GPR:$Rn), "\t$Rd, $Rm, $Rn">;
+def QSUB : AAI<0b00010010, 0b00000101, "qsub",
+ [(set GPR:$Rd, (int_arm_qsub GPR:$Rm, GPR:$Rn))],
+ (ins GPR:$Rm, GPR:$Rn), "\t$Rd, $Rm, $Rn">;
+def QDADD : AAI<0b00010100, 0b00000101, "qdadd", [], (ins GPR:$Rm, GPR:$Rn),
+ "\t$Rd, $Rm, $Rn">;
+def QDSUB : AAI<0b00010110, 0b00000101, "qdsub", [], (ins GPR:$Rm, GPR:$Rn),
+ "\t$Rd, $Rm, $Rn">;
+
+def QADD16 : AAI<0b01100010, 0b11110001, "qadd16">;
+def QADD8 : AAI<0b01100010, 0b11111001, "qadd8">;
+def QASX : AAI<0b01100010, 0b11110011, "qasx">;
+def QSAX : AAI<0b01100010, 0b11110101, "qsax">;
+def QSUB16 : AAI<0b01100010, 0b11110111, "qsub16">;
+def QSUB8 : AAI<0b01100010, 0b11111111, "qsub8">;
+def UQADD16 : AAI<0b01100110, 0b11110001, "uqadd16">;
+def UQADD8 : AAI<0b01100110, 0b11111001, "uqadd8">;
+def UQASX : AAI<0b01100110, 0b11110011, "uqasx">;
+def UQSAX : AAI<0b01100110, 0b11110101, "uqsax">;
+def UQSUB16 : AAI<0b01100110, 0b11110111, "uqsub16">;
+def UQSUB8 : AAI<0b01100110, 0b11111111, "uqsub8">;
// Signed/Unsigned add/subtract -- for disassembly only
-def SASX : AAI<0b01100001, 0b0011, "sasx">;
-def SADD16 : AAI<0b01100001, 0b0001, "sadd16">;
-def SADD8 : AAI<0b01100001, 0b1001, "sadd8">;
-def SSAX : AAI<0b01100001, 0b0101, "ssax">;
-def SSUB16 : AAI<0b01100001, 0b0111, "ssub16">;
-def SSUB8 : AAI<0b01100001, 0b1111, "ssub8">;
-def UASX : AAI<0b01100101, 0b0011, "uasx">;
-def UADD16 : AAI<0b01100101, 0b0001, "uadd16">;
-def UADD8 : AAI<0b01100101, 0b1001, "uadd8">;
-def USAX : AAI<0b01100101, 0b0101, "usax">;
-def USUB16 : AAI<0b01100101, 0b0111, "usub16">;
-def USUB8 : AAI<0b01100101, 0b1111, "usub8">;
+def SASX : AAI<0b01100001, 0b11110011, "sasx">;
+def SADD16 : AAI<0b01100001, 0b11110001, "sadd16">;
+def SADD8 : AAI<0b01100001, 0b11111001, "sadd8">;
+def SSAX : AAI<0b01100001, 0b11110101, "ssax">;
+def SSUB16 : AAI<0b01100001, 0b11110111, "ssub16">;
+def SSUB8 : AAI<0b01100001, 0b11111111, "ssub8">;
+def UASX : AAI<0b01100101, 0b11110011, "uasx">;
+def UADD16 : AAI<0b01100101, 0b11110001, "uadd16">;
+def UADD8 : AAI<0b01100101, 0b11111001, "uadd8">;
+def USAX : AAI<0b01100101, 0b11110101, "usax">;
+def USUB16 : AAI<0b01100101, 0b11110111, "usub16">;
+def USUB8 : AAI<0b01100101, 0b11111111, "usub8">;
// Signed/Unsigned halving add/subtract -- for disassembly only
-def SHASX : AAI<0b01100011, 0b0011, "shasx">;
-def SHADD16 : AAI<0b01100011, 0b0001, "shadd16">;
-def SHADD8 : AAI<0b01100011, 0b1001, "shadd8">;
-def SHSAX : AAI<0b01100011, 0b0101, "shsax">;
-def SHSUB16 : AAI<0b01100011, 0b0111, "shsub16">;
-def SHSUB8 : AAI<0b01100011, 0b1111, "shsub8">;
-def UHASX : AAI<0b01100111, 0b0011, "uhasx">;
-def UHADD16 : AAI<0b01100111, 0b0001, "uhadd16">;
-def UHADD8 : AAI<0b01100111, 0b1001, "uhadd8">;
-def UHSAX : AAI<0b01100111, 0b0101, "uhsax">;
-def UHSUB16 : AAI<0b01100111, 0b0111, "uhsub16">;
-def UHSUB8 : AAI<0b01100111, 0b1111, "uhsub8">;
+def SHASX : AAI<0b01100011, 0b11110011, "shasx">;
+def SHADD16 : AAI<0b01100011, 0b11110001, "shadd16">;
+def SHADD8 : AAI<0b01100011, 0b11111001, "shadd8">;
+def SHSAX : AAI<0b01100011, 0b11110101, "shsax">;
+def SHSUB16 : AAI<0b01100011, 0b11110111, "shsub16">;
+def SHSUB8 : AAI<0b01100011, 0b11111111, "shsub8">;
+def UHASX : AAI<0b01100111, 0b11110011, "uhasx">;
+def UHADD16 : AAI<0b01100111, 0b11110001, "uhadd16">;
+def UHADD8 : AAI<0b01100111, 0b11111001, "uhadd8">;
+def UHSAX : AAI<0b01100111, 0b11110101, "uhsax">;
+def UHSUB16 : AAI<0b01100111, 0b11110111, "uhsub16">;
+def UHSUB8 : AAI<0b01100111, 0b11111111, "uhsub8">;
// Unsigned Sum of Absolute Differences [and Accumulate] -- for disassembly only
-def USAD8 : AI<(outs GPR:$dst), (ins GPR:$a, GPR:$b),
+def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
MulFrm /* for convenience */, NoItinerary, "usad8",
- "\t$dst, $a, $b", []>,
+ "\t$Rd, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
let Inst{27-20} = 0b01111000;
let Inst{15-12} = 0b1111;
let Inst{7-4} = 0b0001;
+ let Inst{19-16} = Rd;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
}
-def USADA8 : AI<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
+def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
MulFrm /* for convenience */, NoItinerary, "usada8",
- "\t$dst, $a, $b, $acc", []>,
+ "\t$Rd, $Rn, $Rm, $Ra", []>,
Requires<[IsARM, HasV6]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+ bits<4> Ra;
let Inst{27-20} = 0b01111000;
let Inst{7-4} = 0b0001;
+ let Inst{19-16} = Rd;
+ let Inst{15-12} = Ra;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
}
// Signed/Unsigned saturate -- for disassembly only
-def SSAT : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a, shift_imm:$sh),
- SatFrm, NoItinerary, "ssat", "\t$dst, $bit_pos, $a$sh",
+def SSAT : AI<(outs GPR:$Rd), (ins i32imm:$sat_imm, GPR:$a, shift_imm:$sh),
+ SatFrm, NoItinerary, "ssat", "\t$Rd, $sat_imm, $a$sh",
[/* For disassembly only; pattern left blank */]> {
+ bits<4> Rd;
+ bits<5> sat_imm;
+ bits<4> Rn;
+ bits<8> sh;
let Inst{27-21} = 0b0110101;
let Inst{5-4} = 0b01;
+ let Inst{20-16} = sat_imm;
+ let Inst{15-12} = Rd;
+ let Inst{11-7} = sh{7-3};
+ let Inst{6} = sh{0};
+ let Inst{3-0} = Rn;
}
-def SSAT16 : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a), SatFrm,
- NoItinerary, "ssat16", "\t$dst, $bit_pos, $a",
+def SSAT16 : AI<(outs GPR:$Rd), (ins i32imm:$sat_imm, GPR:$Rn), SatFrm,
+ NoItinerary, "ssat16", "\t$Rd, $sat_imm, $Rn",
[/* For disassembly only; pattern left blank */]> {
+ bits<4> Rd;
+ bits<4> sat_imm;
+ bits<4> Rn;
let Inst{27-20} = 0b01101010;
- let Inst{7-4} = 0b0011;
+ let Inst{11-4} = 0b11110011;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = sat_imm;
+ let Inst{3-0} = Rn;
}
-def USAT : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a, shift_imm:$sh),
- SatFrm, NoItinerary, "usat", "\t$dst, $bit_pos, $a$sh",
+def USAT : AI<(outs GPR:$Rd), (ins i32imm:$sat_imm, GPR:$a, shift_imm:$sh),
+ SatFrm, NoItinerary, "usat", "\t$Rd, $sat_imm, $a$sh",
[/* For disassembly only; pattern left blank */]> {
+ bits<4> Rd;
+ bits<5> sat_imm;
+ bits<4> Rn;
+ bits<8> sh;
let Inst{27-21} = 0b0110111;
let Inst{5-4} = 0b01;
+ let Inst{15-12} = Rd;
+ let Inst{11-7} = sh{7-3};
+ let Inst{6} = sh{0};
+ let Inst{20-16} = sat_imm;
+ let Inst{3-0} = Rn;
}
-def USAT16 : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a), SatFrm,
- NoItinerary, "usat16", "\t$dst, $bit_pos, $a",
+def USAT16 : AI<(outs GPR:$Rd), (ins i32imm:$sat_imm, GPR:$a), SatFrm,
+ NoItinerary, "usat16", "\t$Rd, $sat_imm, $a",
[/* For disassembly only; pattern left blank */]> {
+ bits<4> Rd;
+ bits<4> sat_imm;
+ bits<4> Rn;
let Inst{27-20} = 0b01101110;
- let Inst{7-4} = 0b0011;
+ let Inst{11-4} = 0b11110011;
+ let Inst{15-12} = Rd;
+ let Inst{19-16} = sat_imm;
+ let Inst{3-0} = Rn;
}
def : ARMV6Pat<(int_arm_ssat GPR:$a, imm:$pos), (SSAT imm:$pos, GPR:$a, 0)>;
@@ -1855,52 +2479,100 @@ def : ARMV6Pat<(int_arm_usat GPR:$a, imm:$pos), (USAT imm:$pos, GPR:$a, 0)>;
//
defm AND : AsI1_bin_irs<0b0000, "and",
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
-defm ANDS : AI1_bin_s_irs<0b0000, "and",
- BinOpFrag<(ARMand node:$LHS, node:$RHS)>, 1>;
defm ORR : AsI1_bin_irs<0b1100, "orr",
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(or node:$LHS, node:$RHS)>, 1>;
defm EOR : AsI1_bin_irs<0b0001, "eor",
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
defm BIC : AsI1_bin_irs<0b1110, "bic",
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
-def BFC : I<(outs GPR:$dst), (ins GPR:$src, bf_inv_mask_imm:$imm),
+def BFC : I<(outs GPR:$Rd), (ins GPR:$src, bf_inv_mask_imm:$imm),
AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iUNAsi,
- "bfc", "\t$dst, $imm", "$src = $dst",
- [(set GPR:$dst, (and GPR:$src, bf_inv_mask_imm:$imm))]>,
+ "bfc", "\t$Rd, $imm", "$src = $Rd",
+ [(set GPR:$Rd, (and GPR:$src, bf_inv_mask_imm:$imm))]>,
Requires<[IsARM, HasV6T2]> {
+ bits<4> Rd;
+ bits<10> imm;
let Inst{27-21} = 0b0111110;
let Inst{6-0} = 0b0011111;
+ let Inst{15-12} = Rd;
+ let Inst{11-7} = imm{4-0}; // lsb
+ let Inst{20-16} = imm{9-5}; // width
}
// A8.6.18 BFI - Bitfield insert (Encoding A1)
-def BFI : I<(outs GPR:$dst), (ins GPR:$src, GPR:$val, bf_inv_mask_imm:$imm),
+def BFI : I<(outs GPR:$Rd), (ins GPR:$src, GPR:$Rn, bf_inv_mask_imm:$imm),
AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iUNAsi,
- "bfi", "\t$dst, $val, $imm", "$src = $dst",
- [(set GPR:$dst, (ARMbfi GPR:$src, GPR:$val,
+ "bfi", "\t$Rd, $Rn, $imm", "$src = $Rd",
+ [(set GPR:$Rd, (ARMbfi GPR:$src, GPR:$Rn,
bf_inv_mask_imm:$imm))]>,
Requires<[IsARM, HasV6T2]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<10> imm;
+ let Inst{27-21} = 0b0111110;
+ let Inst{6-4} = 0b001; // Rn: Inst{3-0} != 15
+ let Inst{15-12} = Rd;
+ let Inst{11-7} = imm{4-0}; // lsb
+ let Inst{20-16} = imm{9-5}; // width
+ let Inst{3-0} = Rn;
+}
+
+// GNU as only supports this form of bfi (w/ 4 arguments)
+let isAsmParserOnly = 1 in
+def BFI4p : I<(outs GPR:$Rd), (ins GPR:$src, GPR:$Rn,
+ lsb_pos_imm:$lsb, width_imm:$width),
+ AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iUNAsi,
+ "bfi", "\t$Rd, $Rn, $lsb, $width", "$src = $Rd",
+ []>, Requires<[IsARM, HasV6T2]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<5> lsb;
+ bits<5> width;
let Inst{27-21} = 0b0111110;
let Inst{6-4} = 0b001; // Rn: Inst{3-0} != 15
+ let Inst{15-12} = Rd;
+ let Inst{11-7} = lsb;
+ let Inst{20-16} = width; // Custom encoder => lsb+width-1
+ let Inst{3-0} = Rn;
}
-def MVNr : AsI1<0b1111, (outs GPR:$dst), (ins GPR:$src), DPFrm, IIC_iMOVr,
- "mvn", "\t$dst, $src",
- [(set GPR:$dst, (not GPR:$src))]>, UnaryDP {
+def MVNr : AsI1<0b1111, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMVNr,
+ "mvn", "\t$Rd, $Rm",
+ [(set GPR:$Rd, (not GPR:$Rm))]>, UnaryDP {
+ bits<4> Rd;
+ bits<4> Rm;
let Inst{25} = 0;
+ let Inst{19-16} = 0b0000;
let Inst{11-4} = 0b00000000;
-}
-def MVNs : AsI1<0b1111, (outs GPR:$dst), (ins so_reg:$src), DPSoRegFrm,
- IIC_iMOVsr, "mvn", "\t$dst, $src",
- [(set GPR:$dst, (not so_reg:$src))]>, UnaryDP {
+ let Inst{15-12} = Rd;
+ let Inst{3-0} = Rm;
+}
+def MVNs : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg:$shift), DPSoRegFrm,
+ IIC_iMVNsr, "mvn", "\t$Rd, $shift",
+ [(set GPR:$Rd, (not so_reg:$shift))]>, UnaryDP {
+ bits<4> Rd;
+ bits<12> shift;
let Inst{25} = 0;
-}
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def MVNi : AsI1<0b1111, (outs GPR:$dst), (ins so_imm:$imm), DPFrm,
- IIC_iMOVi, "mvn", "\t$dst, $imm",
- [(set GPR:$dst, so_imm_not:$imm)]>,UnaryDP {
- let Inst{25} = 1;
+ let Inst{19-16} = 0b0000;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = shift;
+}
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
+def MVNi : AsI1<0b1111, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm,
+ IIC_iMVNi, "mvn", "\t$Rd, $imm",
+ [(set GPR:$Rd, so_imm_not:$imm)]>,UnaryDP {
+ bits<4> Rd;
+ bits<12> imm;
+ let Inst{25} = 1;
+ let Inst{19-16} = 0b0000;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = imm;
}
def : ARMPat<(and GPR:$src, so_imm_not:$imm),
@@ -1909,247 +2581,299 @@ def : ARMPat<(and GPR:$src, so_imm_not:$imm),
//===----------------------------------------------------------------------===//
// Multiply Instructions.
//
+class AsMul1I32<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AsMul1I<opcod, oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rm;
+ bits<4> Rn;
+ let Inst{19-16} = Rd;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
+}
+class AsMul1I64<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AsMul1I<opcod, oops, iops, itin, opc, asm, pattern> {
+ bits<4> RdLo;
+ bits<4> RdHi;
+ bits<4> Rm;
+ bits<4> Rn;
+ let Inst{19-16} = RdHi;
+ let Inst{15-12} = RdLo;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
+}
-let isCommutable = 1 in
-def MUL : AsMul1I<0b0000000, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, "mul", "\t$dst, $a, $b",
- [(set GPR:$dst, (mul GPR:$a, GPR:$b))]>;
-
-def MLA : AsMul1I<0b0000001, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "mla", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (add (mul GPR:$a, GPR:$b), GPR:$c))]>;
-
-def MLS : AMul1I<0b0000011, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "mls", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (sub GPR:$c, (mul GPR:$a, GPR:$b)))]>,
- Requires<[IsARM, HasV6T2]>;
+let isCommutable = 1 in {
+let Constraints = "@earlyclobber $Rd" in
+def MULv5: ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm,
+ pred:$p, cc_out:$s),
+ Size4Bytes, IIC_iMUL32,
+ [(set GPR:$Rd, (mul GPR:$Rn, GPR:$Rm))]>,
+ Requires<[IsARM, NoV6]>;
+
+def MUL : AsMul1I32<0b0000000, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMUL32, "mul", "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (mul GPR:$Rn, GPR:$Rm))]>,
+ Requires<[IsARM, HasV6]>;
+}
+
+let Constraints = "@earlyclobber $Rd" in
+def MLAv5: ARMPseudoInst<(outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra, pred:$p, cc_out:$s),
+ Size4Bytes, IIC_iMAC32,
+ [(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
+ Requires<[IsARM, NoV6]> {
+ bits<4> Ra;
+ let Inst{15-12} = Ra;
+}
+def MLA : AsMul1I32<0b0000001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC32, "mla", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
+ Requires<[IsARM, HasV6]> {
+ bits<4> Ra;
+ let Inst{15-12} = Ra;
+}
+
+def MLS : AMul1I<0b0000011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC32, "mls", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (sub GPR:$Ra, (mul GPR:$Rn, GPR:$Rm)))]>,
+ Requires<[IsARM, HasV6T2]> {
+ bits<4> Rd;
+ bits<4> Rm;
+ bits<4> Rn;
+ bits<4> Ra;
+ let Inst{19-16} = Rd;
+ let Inst{15-12} = Ra;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
+}
// Extra precision multiplies with low / high results
+
let neverHasSideEffects = 1 in {
let isCommutable = 1 in {
-def SMULL : AsMul1I<0b0000110, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMUL64,
- "smull", "\t$ldst, $hdst, $a, $b", []>;
+let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in {
+def SMULLv5 : ARMPseudoInst<(outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
+ Size4Bytes, IIC_iMUL64, []>,
+ Requires<[IsARM, NoV6]>;
-def UMULL : AsMul1I<0b0000100, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMUL64,
- "umull", "\t$ldst, $hdst, $a, $b", []>;
+def UMULLv5 : ARMPseudoInst<(outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
+ Size4Bytes, IIC_iMUL64, []>,
+ Requires<[IsARM, NoV6]>;
}
-// Multiply + accumulate
-def SMLAL : AsMul1I<0b0000111, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMAC64,
- "smlal", "\t$ldst, $hdst, $a, $b", []>;
+def SMULL : AsMul1I64<0b0000110, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm), IIC_iMUL64,
+ "smull", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
+ Requires<[IsARM, HasV6]>;
-def UMLAL : AsMul1I<0b0000101, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMAC64,
- "umlal", "\t$ldst, $hdst, $a, $b", []>;
+def UMULL : AsMul1I64<0b0000100, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm), IIC_iMUL64,
+ "umull", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
+ Requires<[IsARM, HasV6]>;
+}
-def UMAAL : AMul1I <0b0000010, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMAC64,
- "umaal", "\t$ldst, $hdst, $a, $b", []>,
+// Multiply + accumulate
+let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in {
+def SMLALv5 : ARMPseudoInst<(outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
+ Size4Bytes, IIC_iMAC64, []>,
+ Requires<[IsARM, NoV6]>;
+def UMLALv5 : ARMPseudoInst<(outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
+ Size4Bytes, IIC_iMAC64, []>,
+ Requires<[IsARM, NoV6]>;
+def UMAALv5 : ARMPseudoInst<(outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
+ Size4Bytes, IIC_iMAC64, []>,
+ Requires<[IsARM, NoV6]>;
+
+}
+
+def SMLAL : AsMul1I64<0b0000111, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
+ "smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
+ Requires<[IsARM, HasV6]>;
+def UMLAL : AsMul1I64<0b0000101, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
+ "umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]>;
+
+def UMAAL : AMul1I <0b0000010, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
+ "umaal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
+ Requires<[IsARM, HasV6]> {
+ bits<4> RdLo;
+ bits<4> RdHi;
+ bits<4> Rm;
+ bits<4> Rn;
+ let Inst{19-16} = RdLo;
+ let Inst{15-12} = RdHi;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
+}
} // neverHasSideEffects
// Most significant word multiply
-def SMMUL : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, "smmul", "\t$dst, $a, $b",
- [(set GPR:$dst, (mulhs GPR:$a, GPR:$b))]>,
+def SMMUL : AMul2I <0b0111010, 0b0001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMUL32, "smmul", "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (mulhs GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0001;
let Inst{15-12} = 0b1111;
}
-def SMMULR : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, "smmulr", "\t$dst, $a, $b",
+def SMMULR : AMul2I <0b0111010, 0b0011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMUL32, "smmulr", "\t$Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0011; // R = 1
let Inst{15-12} = 0b1111;
}
-def SMMLA : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "smmla", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (add (mulhs GPR:$a, GPR:$b), GPR:$c))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0001;
-}
+def SMMLA : AMul2Ia <0b0111010, 0b0001, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC32, "smmla", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (add (mulhs GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
+ Requires<[IsARM, HasV6]>;
-def SMMLAR : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "smmlar", "\t$dst, $a, $b, $c",
+def SMMLAR : AMul2Ia <0b0111010, 0b0011, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC32, "smmlar", "\t$Rd, $Rn, $Rm, $Ra",
[/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0011; // R = 1
-}
+ Requires<[IsARM, HasV6]>;
-def SMMLS : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "smmls", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (sub GPR:$c, (mulhs GPR:$a, GPR:$b)))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b1101;
-}
+def SMMLS : AMul2Ia <0b0111010, 0b1101, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC32, "smmls", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (sub GPR:$Ra, (mulhs GPR:$Rn, GPR:$Rm)))]>,
+ Requires<[IsARM, HasV6]>;
-def SMMLSR : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "smmlsr", "\t$dst, $a, $b, $c",
+def SMMLSR : AMul2Ia <0b0111010, 0b1111, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC32, "smmlsr", "\t$Rd, $Rn, $Rm, $Ra",
[/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b1111; // R = 1
-}
+ Requires<[IsARM, HasV6]>;
multiclass AI_smul<string opc, PatFrag opnode> {
- def BB : AMulxyI<0b0001011, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, !strconcat(opc, "bb"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16),
- (sext_inreg GPR:$b, i16)))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 0;
- }
-
- def BT : AMulxyI<0b0001011, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, !strconcat(opc, "bt"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16),
- (sra GPR:$b, (i32 16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 1;
- }
-
- def TB : AMulxyI<0b0001011, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, !strconcat(opc, "tb"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sra GPR:$a, (i32 16)),
- (sext_inreg GPR:$b, i16)))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 0;
- }
-
- def TT : AMulxyI<0b0001011, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, !strconcat(opc, "tt"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sra GPR:$a, (i32 16)),
- (sra GPR:$b, (i32 16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 1;
- }
-
- def WB : AMulxyI<0b0001001, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL16, !strconcat(opc, "wb"), "\t$dst, $a, $b",
- [(set GPR:$dst, (sra (opnode GPR:$a,
- (sext_inreg GPR:$b, i16)), (i32 16)))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 0;
- }
-
- def WT : AMulxyI<0b0001001, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL16, !strconcat(opc, "wt"), "\t$dst, $a, $b",
- [(set GPR:$dst, (sra (opnode GPR:$a,
- (sra GPR:$b, (i32 16))), (i32 16)))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 1;
- }
+ def BB : AMulxyI<0b0001011, 0b00, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMUL16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (opnode (sext_inreg GPR:$Rn, i16),
+ (sext_inreg GPR:$Rm, i16)))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def BT : AMulxyI<0b0001011, 0b10, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMUL16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (opnode (sext_inreg GPR:$Rn, i16),
+ (sra GPR:$Rm, (i32 16))))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def TB : AMulxyI<0b0001011, 0b01, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMUL16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (opnode (sra GPR:$Rn, (i32 16)),
+ (sext_inreg GPR:$Rm, i16)))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def TT : AMulxyI<0b0001011, 0b11, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMUL16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (opnode (sra GPR:$Rn, (i32 16)),
+ (sra GPR:$Rm, (i32 16))))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def WB : AMulxyI<0b0001001, 0b01, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMUL16, !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (sra (opnode GPR:$Rn,
+ (sext_inreg GPR:$Rm, i16)), (i32 16)))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def WT : AMulxyI<0b0001001, 0b11, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMUL16, !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (sra (opnode GPR:$Rn,
+ (sra GPR:$Rm, (i32 16))), (i32 16)))]>,
+ Requires<[IsARM, HasV5TE]>;
}
multiclass AI_smla<string opc, PatFrag opnode> {
- def BB : AMulxyI<0b0001000, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "bb"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc,
- (opnode (sext_inreg GPR:$a, i16),
- (sext_inreg GPR:$b, i16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 0;
- }
-
- def BT : AMulxyI<0b0001000, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "bt"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (opnode (sext_inreg GPR:$a, i16),
- (sra GPR:$b, (i32 16)))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 1;
- }
-
- def TB : AMulxyI<0b0001000, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "tb"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, (i32 16)),
- (sext_inreg GPR:$b, i16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 0;
- }
-
- def TT : AMulxyI<0b0001000, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "tt"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, (i32 16)),
- (sra GPR:$b, (i32 16)))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 1;
- }
-
- def WB : AMulxyI<0b0001001, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "wb"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a,
- (sext_inreg GPR:$b, i16)), (i32 16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 0;
- }
-
- def WT : AMulxyI<0b0001001, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "wt"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a,
- (sra GPR:$b, (i32 16))), (i32 16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 1;
- }
+ def BB : AMulxyIa<0b0001000, 0b00, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (add GPR:$Ra,
+ (opnode (sext_inreg GPR:$Rn, i16),
+ (sext_inreg GPR:$Rm, i16))))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def BT : AMulxyIa<0b0001000, 0b10, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (add GPR:$Ra, (opnode (sext_inreg GPR:$Rn, i16),
+ (sra GPR:$Rm, (i32 16)))))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def TB : AMulxyIa<0b0001000, 0b01, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (add GPR:$Ra, (opnode (sra GPR:$Rn, (i32 16)),
+ (sext_inreg GPR:$Rm, i16))))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def TT : AMulxyIa<0b0001000, 0b11, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (add GPR:$Ra, (opnode (sra GPR:$Rn, (i32 16)),
+ (sra GPR:$Rm, (i32 16)))))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def WB : AMulxyIa<0b0001001, 0b00, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC16, !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (add GPR:$Ra, (sra (opnode GPR:$Rn,
+ (sext_inreg GPR:$Rm, i16)), (i32 16))))]>,
+ Requires<[IsARM, HasV5TE]>;
+
+ def WT : AMulxyIa<0b0001001, 0b10, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ IIC_iMAC16, !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (add GPR:$Ra, (sra (opnode GPR:$Rn,
+ (sra GPR:$Rm, (i32 16))), (i32 16))))]>,
+ Requires<[IsARM, HasV5TE]>;
}
defm SMUL : AI_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
defm SMLA : AI_smla<"smla", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
// Halfword multiply accumulate long: SMLAL<x><y> -- for disassembly only
-def SMLALBB : AMulxyI<0b0001010,(outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- IIC_iMAC64, "smlalbb", "\t$ldst, $hdst, $a, $b",
+def SMLALBB : AMulxyI64<0b0001010, 0b00, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMAC64, "smlalbb", "\t$RdLo, $RdHi, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 0;
-}
+ Requires<[IsARM, HasV5TE]>;
-def SMLALBT : AMulxyI<0b0001010,(outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- IIC_iMAC64, "smlalbt", "\t$ldst, $hdst, $a, $b",
+def SMLALBT : AMulxyI64<0b0001010, 0b10, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMAC64, "smlalbt", "\t$RdLo, $RdHi, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 1;
-}
+ Requires<[IsARM, HasV5TE]>;
-def SMLALTB : AMulxyI<0b0001010,(outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- IIC_iMAC64, "smlaltb", "\t$ldst, $hdst, $a, $b",
+def SMLALTB : AMulxyI64<0b0001010, 0b01, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMAC64, "smlaltb", "\t$RdLo, $RdHi, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 0;
-}
+ Requires<[IsARM, HasV5TE]>;
-def SMLALTT : AMulxyI<0b0001010,(outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- IIC_iMAC64, "smlaltt", "\t$ldst, $hdst, $a, $b",
+def SMLALTT : AMulxyI64<0b0001010, 0b11, (outs GPR:$RdLo, GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm),
+ IIC_iMAC64, "smlaltt", "\t$RdLo, $RdHi, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 1;
-}
+ Requires<[IsARM, HasV5TE]>;
// Helper class for AI_smld -- for disassembly only
-class AMulDualI<bit long, bit sub, bit swap, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm>
+class AMulDualIbase<bit long, bit sub, bit swap, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm>
: AI<oops, iops, MulFrm, itin, opc, asm, []>, Requires<[IsARM, HasV6]> {
+ bits<4> Rn;
+ bits<4> Rm;
let Inst{4} = 1;
let Inst{5} = swap;
let Inst{6} = sub;
@@ -2157,21 +2881,46 @@ class AMulDualI<bit long, bit sub, bit swap, dag oops, dag iops,
let Inst{21-20} = 0b00;
let Inst{22} = long;
let Inst{27-23} = 0b01110;
+ let Inst{11-8} = Rm;
+ let Inst{3-0} = Rn;
+}
+class AMulDualI<bit long, bit sub, bit swap, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm>
+ : AMulDualIbase<long, sub, swap, oops, iops, itin, opc, asm> {
+ bits<4> Rd;
+ let Inst{15-12} = 0b1111;
+ let Inst{19-16} = Rd;
+}
+class AMulDualIa<bit long, bit sub, bit swap, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm>
+ : AMulDualIbase<long, sub, swap, oops, iops, itin, opc, asm> {
+ bits<4> Ra;
+ let Inst{15-12} = Ra;
+}
+class AMulDualI64<bit long, bit sub, bit swap, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm>
+ : AMulDualIbase<long, sub, swap, oops, iops, itin, opc, asm> {
+ bits<4> RdLo;
+ bits<4> RdHi;
+ let Inst{19-16} = RdHi;
+ let Inst{15-12} = RdLo;
}
multiclass AI_smld<bit sub, string opc> {
- def D : AMulDualI<0, sub, 0, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- NoItinerary, !strconcat(opc, "d"), "\t$dst, $a, $b, $acc">;
+ def D : AMulDualIa<0, sub, 0, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ NoItinerary, !strconcat(opc, "d"), "\t$Rd, $Rn, $Rm, $Ra">;
- def DX : AMulDualI<0, sub, 1, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- NoItinerary, !strconcat(opc, "dx"), "\t$dst, $a, $b, $acc">;
+ def DX: AMulDualIa<0, sub, 1, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
+ NoItinerary, !strconcat(opc, "dx"), "\t$Rd, $Rn, $Rm, $Ra">;
- def LD : AMulDualI<1, sub, 0, (outs GPR:$ldst,GPR:$hdst), (ins GPR:$a,GPR:$b),
- NoItinerary, !strconcat(opc, "ld"), "\t$ldst, $hdst, $a, $b">;
+ def LD: AMulDualI64<1, sub, 0, (outs GPR:$RdLo,GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm), NoItinerary,
+ !strconcat(opc, "ld"), "\t$RdLo, $RdHi, $Rn, $Rm">;
- def LDX : AMulDualI<1, sub, 1, (outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- NoItinerary, !strconcat(opc, "ldx"),"\t$ldst, $hdst, $a, $b">;
+ def LDX : AMulDualI64<1, sub, 1, (outs GPR:$RdLo,GPR:$RdHi),
+ (ins GPR:$Rn, GPR:$Rm), NoItinerary,
+ !strconcat(opc, "ldx"),"\t$RdLo, $RdHi, $Rn, $Rm">;
}
@@ -2180,16 +2929,10 @@ defm SMLS : AI_smld<1, "smls">;
multiclass AI_sdml<bit sub, string opc> {
- def D : AMulDualI<0, sub, 0, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- NoItinerary, !strconcat(opc, "d"), "\t$dst, $a, $b"> {
- let Inst{15-12} = 0b1111;
- }
-
- def DX : AMulDualI<0, sub, 1, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- NoItinerary, !strconcat(opc, "dx"), "\t$dst, $a, $b"> {
- let Inst{15-12} = 0b1111;
- }
-
+ def D : AMulDualI<0, sub, 0, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ NoItinerary, !strconcat(opc, "d"), "\t$Rd, $Rn, $Rm">;
+ def DX : AMulDualI<0, sub, 1, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ NoItinerary, !strconcat(opc, "dx"), "\t$Rd, $Rn, $Rm">;
}
defm SMUA : AI_sdml<0, "smua">;
@@ -2199,55 +2942,35 @@ defm SMUS : AI_sdml<1, "smus">;
// Misc. Arithmetic Instructions.
//
-def CLZ : AMiscA1I<0b000010110, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "clz", "\t$dst, $src",
- [(set GPR:$dst, (ctlz GPR:$src))]>, Requires<[IsARM, HasV5T]> {
- let Inst{7-4} = 0b0001;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
-
-def RBIT : AMiscA1I<0b01101111, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "rbit", "\t$dst, $src",
- [(set GPR:$dst, (ARMrbit GPR:$src))]>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{7-4} = 0b0011;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
-
-def REV : AMiscA1I<0b01101011, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "rev", "\t$dst, $src",
- [(set GPR:$dst, (bswap GPR:$src))]>, Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0011;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
-
-def REV16 : AMiscA1I<0b01101011, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "rev16", "\t$dst, $src",
- [(set GPR:$dst,
- (or (and (srl GPR:$src, (i32 8)), 0xFF),
- (or (and (shl GPR:$src, (i32 8)), 0xFF00),
- (or (and (srl GPR:$src, (i32 8)), 0xFF0000),
- (and (shl GPR:$src, (i32 8)), 0xFF000000)))))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b1011;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
-
-def REVSH : AMiscA1I<0b01101111, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "revsh", "\t$dst, $src",
- [(set GPR:$dst,
+def CLZ : AMiscA1I<0b000010110, 0b0001, (outs GPR:$Rd), (ins GPR:$Rm),
+ IIC_iUNAr, "clz", "\t$Rd, $Rm",
+ [(set GPR:$Rd, (ctlz GPR:$Rm))]>, Requires<[IsARM, HasV5T]>;
+
+def RBIT : AMiscA1I<0b01101111, 0b0011, (outs GPR:$Rd), (ins GPR:$Rm),
+ IIC_iUNAr, "rbit", "\t$Rd, $Rm",
+ [(set GPR:$Rd, (ARMrbit GPR:$Rm))]>,
+ Requires<[IsARM, HasV6T2]>;
+
+def REV : AMiscA1I<0b01101011, 0b0011, (outs GPR:$Rd), (ins GPR:$Rm),
+ IIC_iUNAr, "rev", "\t$Rd, $Rm",
+ [(set GPR:$Rd, (bswap GPR:$Rm))]>, Requires<[IsARM, HasV6]>;
+
+def REV16 : AMiscA1I<0b01101011, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
+ IIC_iUNAr, "rev16", "\t$Rd, $Rm",
+ [(set GPR:$Rd,
+ (or (and (srl GPR:$Rm, (i32 8)), 0xFF),
+ (or (and (shl GPR:$Rm, (i32 8)), 0xFF00),
+ (or (and (srl GPR:$Rm, (i32 8)), 0xFF0000),
+ (and (shl GPR:$Rm, (i32 8)), 0xFF000000)))))]>,
+ Requires<[IsARM, HasV6]>;
+
+def REVSH : AMiscA1I<0b01101111, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
+ IIC_iUNAr, "revsh", "\t$Rd, $Rm",
+ [(set GPR:$Rd,
(sext_inreg
- (or (srl (and GPR:$src, 0xFF00), (i32 8)),
- (shl GPR:$src, (i32 8))), i16))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b1011;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
+ (or (srl (and GPR:$Rm, 0xFF00), (i32 8)),
+ (shl GPR:$Rm, (i32 8))), i16))]>,
+ Requires<[IsARM, HasV6]>;
def lsl_shift_imm : SDNodeXForm<imm, [{
unsigned Sh = ARM_AM::getSORegOpc(ARM_AM::lsl, N->getZExtValue());
@@ -2258,21 +2981,19 @@ def lsl_amt : PatLeaf<(i32 imm), [{
return (N->getZExtValue() < 32);
}], lsl_shift_imm>;
-def PKHBT : AMiscA1I<0b01101000, (outs GPR:$dst),
- (ins GPR:$src1, GPR:$src2, shift_imm:$sh),
- IIC_iALUsi, "pkhbt", "\t$dst, $src1, $src2$sh",
- [(set GPR:$dst, (or (and GPR:$src1, 0xFFFF),
- (and (shl GPR:$src2, lsl_amt:$sh),
- 0xFFFF0000)))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{6-4} = 0b001;
-}
+def PKHBT : APKHI<0b01101000, 0, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, shift_imm:$sh),
+ IIC_iALUsi, "pkhbt", "\t$Rd, $Rn, $Rm$sh",
+ [(set GPR:$Rd, (or (and GPR:$Rn, 0xFFFF),
+ (and (shl GPR:$Rm, lsl_amt:$sh),
+ 0xFFFF0000)))]>,
+ Requires<[IsARM, HasV6]>;
// Alternate cases for PKHBT where identities eliminate some nodes.
-def : ARMV6Pat<(or (and GPR:$src1, 0xFFFF), (and GPR:$src2, 0xFFFF0000)),
- (PKHBT GPR:$src1, GPR:$src2, 0)>;
-def : ARMV6Pat<(or (and GPR:$src1, 0xFFFF), (shl GPR:$src2, imm16_31:$sh)),
- (PKHBT GPR:$src1, GPR:$src2, (lsl_shift_imm imm16_31:$sh))>;
+def : ARMV6Pat<(or (and GPR:$Rn, 0xFFFF), (and GPR:$Rm, 0xFFFF0000)),
+ (PKHBT GPR:$Rn, GPR:$Rm, 0)>;
+def : ARMV6Pat<(or (and GPR:$Rn, 0xFFFF), (shl GPR:$Rm, imm16_31:$sh)),
+ (PKHBT GPR:$Rn, GPR:$Rm, (lsl_shift_imm imm16_31:$sh))>;
def asr_shift_imm : SDNodeXForm<imm, [{
unsigned Sh = ARM_AM::getSORegOpc(ARM_AM::asr, N->getZExtValue());
@@ -2285,15 +3006,13 @@ def asr_amt : PatLeaf<(i32 imm), [{
// Note: Shifts of 1-15 bits will be transformed to srl instead of sra and
// will match the pattern below.
-def PKHTB : AMiscA1I<0b01101000, (outs GPR:$dst),
- (ins GPR:$src1, GPR:$src2, shift_imm:$sh),
- IIC_iALUsi, "pkhtb", "\t$dst, $src1, $src2$sh",
- [(set GPR:$dst, (or (and GPR:$src1, 0xFFFF0000),
- (and (sra GPR:$src2, asr_amt:$sh),
- 0xFFFF)))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{6-4} = 0b101;
-}
+def PKHTB : APKHI<0b01101000, 1, (outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, shift_imm:$sh),
+ IIC_iBITsi, "pkhtb", "\t$Rd, $Rn, $Rm$sh",
+ [(set GPR:$Rd, (or (and GPR:$Rn, 0xFFFF0000),
+ (and (sra GPR:$Rm, asr_amt:$sh),
+ 0xFFFF)))]>,
+ Requires<[IsARM, HasV6]>;
// Alternate cases for PKHTB where identities eliminate some nodes. Note that
// a shift amount of 0 is *not legal* here, it is PKHBT instead.
@@ -2308,10 +3027,19 @@ def : ARMV6Pat<(or (and GPR:$src1, 0xFFFF0000),
//
defm CMP : AI1_cmp_irs<0b1010, "cmp",
+ IIC_iCMPi, IIC_iCMPr, IIC_iCMPsr,
BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
-// FIXME: There seems to be a (potential) hardware bug with the CMN instruction
-// and comparison with 0. These two pieces of code should give identical
+// ARMcmpZ can re-use the above instruction definitions.
+def : ARMPat<(ARMcmpZ GPR:$src, so_imm:$imm),
+ (CMPri GPR:$src, so_imm:$imm)>;
+def : ARMPat<(ARMcmpZ GPR:$src, GPR:$rhs),
+ (CMPrr GPR:$src, GPR:$rhs)>;
+def : ARMPat<(ARMcmpZ GPR:$src, so_reg:$rhs),
+ (CMPrs GPR:$src, so_reg:$rhs)>;
+
+// FIXME: We have to be careful when using the CMN instruction and comparison
+// with 0. One would expect these two pieces of code should give identical
// results:
//
// rsbs r1, r1, 0
@@ -2321,7 +3049,7 @@ defm CMP : AI1_cmp_irs<0b1010, "cmp",
// mov r0, #1
//
// and:
-//
+//
// cmn r0, r1
// mov r0, #0
// it ls
@@ -2336,20 +3064,16 @@ defm CMP : AI1_cmp_irs<0b1010, "cmp",
// never a "carry" when this AddWithCarry is performed (because the "carry bit"
// parameter to AddWithCarry is defined as 0).
//
-// The AddWithCarry in the CMP case seems to be relying upon the identity:
-//
-// ~x + 1 = -x
-//
-// However when x is 0 and unsigned, this doesn't hold:
+// When x is 0 and unsigned:
//
// x = 0
// ~x = 0xFFFF FFFF
// ~x + 1 = 0x1 0000 0000
// (-x = 0) != (0x1 0000 0000 = ~x + 1)
//
-// Therefore, we should disable *all* versions of CMN, especially when comparing
-// against zero, until we can limit when the CMN instruction is used (when we
-// know that the RHS is not 0) or when we have a hardware fix for this.
+// Therefore, we should disable CMN when comparing against zero, until we can
+// limit when the CMN instruction is used (when we know that the RHS is not 0 or
+// when it's a comparison which doesn't look at the 'carry' flag).
//
// (See the ARM docs for the "AddWithCarry" pseudo-code.)
//
@@ -2360,13 +3084,14 @@ defm CMP : AI1_cmp_irs<0b1010, "cmp",
// Note that TST/TEQ don't set all the same flags that CMP does!
defm TST : AI1_cmp_irs<0b1000, "tst",
- BinOpFrag<(ARMcmpZ (and node:$LHS, node:$RHS), 0)>, 1>;
+ IIC_iTSTi, IIC_iTSTr, IIC_iTSTsr,
+ BinOpFrag<(ARMcmpZ (and_su node:$LHS, node:$RHS), 0)>, 1>;
defm TEQ : AI1_cmp_irs<0b1001, "teq",
- BinOpFrag<(ARMcmpZ (xor node:$LHS, node:$RHS), 0)>, 1>;
+ IIC_iTSTi, IIC_iTSTr, IIC_iTSTsr,
+ BinOpFrag<(ARMcmpZ (xor_su node:$LHS, node:$RHS), 0)>, 1>;
-defm CMPz : AI1_cmp_irs<0b1010, "cmp",
- BinOpFrag<(ARMcmpZ node:$LHS, node:$RHS)>>;
defm CMNz : AI1_cmp_irs<0b1011, "cmn",
+ IIC_iCMPi, IIC_iCMPr, IIC_iCMPsr,
BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>>;
//def : ARMPat<(ARMcmp GPR:$src, so_imm_neg:$imm),
@@ -2381,13 +3106,10 @@ let usesCustomInserter = 1, isBranch = 1, isTerminator = 1,
def BCCi64 : PseudoInst<(outs),
(ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, brtarget:$dst),
IIC_Br,
- "${:comment} B\t$dst GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, imm:$cc",
[(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, bb:$dst)]>;
def BCCZi64 : PseudoInst<(outs),
- (ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, brtarget:$dst),
- IIC_Br,
- "${:comment} B\t$dst GPR:$lhs1, GPR:$lhs2, 0, 0, imm:$cc",
+ (ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, brtarget:$dst), IIC_Br,
[(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, 0, 0, bb:$dst)]>;
} // usesCustomInserter
@@ -2395,29 +3117,87 @@ def BCCZi64 : PseudoInst<(outs),
// Conditional moves
// FIXME: should be able to write a pattern for ARMcmov, but can't use
// a two-value operand where a dag node expects two operands. :(
+// FIXME: These should all be pseudo-instructions that get expanded to
+// the normal MOV instructions. That would fix the dependency on
+// special casing them in tblgen.
let neverHasSideEffects = 1 in {
-def MOVCCr : AI1<0b1101, (outs GPR:$dst), (ins GPR:$false, GPR:$true), DPFrm,
- IIC_iCMOVr, "mov", "\t$dst, $true",
- [/*(set GPR:$dst, (ARMcmov GPR:$false, GPR:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst">, UnaryDP {
- let Inst{11-4} = 0b00000000;
+def MOVCCr : AI1<0b1101, (outs GPR:$Rd), (ins GPR:$false, GPR:$Rm), DPFrm,
+ IIC_iCMOVr, "mov", "\t$Rd, $Rm",
+ [/*(set GPR:$Rd, (ARMcmov GPR:$false, GPR:$Rm, imm:$cc, CCR:$ccr))*/]>,
+ RegConstraint<"$false = $Rd">, UnaryDP {
+ bits<4> Rd;
+ bits<4> Rm;
let Inst{25} = 0;
+ let Inst{20} = 0;
+ let Inst{15-12} = Rd;
+ let Inst{11-4} = 0b00000000;
+ let Inst{3-0} = Rm;
}
-def MOVCCs : AI1<0b1101, (outs GPR:$dst),
- (ins GPR:$false, so_reg:$true), DPSoRegFrm, IIC_iCMOVsr,
- "mov", "\t$dst, $true",
- [/*(set GPR:$dst, (ARMcmov GPR:$false, so_reg:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst">, UnaryDP {
+def MOVCCs : AI1<0b1101, (outs GPR:$Rd),
+ (ins GPR:$false, so_reg:$shift), DPSoRegFrm, IIC_iCMOVsr,
+ "mov", "\t$Rd, $shift",
+ [/*(set GPR:$Rd, (ARMcmov GPR:$false, so_reg:$shift, imm:$cc, CCR:$ccr))*/]>,
+ RegConstraint<"$false = $Rd">, UnaryDP {
+ bits<4> Rd;
+ bits<12> shift;
let Inst{25} = 0;
+ let Inst{20} = 0;
+ let Inst{19-16} = 0;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = shift;
}
-def MOVCCi : AI1<0b1101, (outs GPR:$dst),
- (ins GPR:$false, so_imm:$true), DPFrm, IIC_iCMOVi,
- "mov", "\t$dst, $true",
- [/*(set GPR:$dst, (ARMcmov GPR:$false, so_imm:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst">, UnaryDP {
+let isMoveImm = 1 in
+def MOVCCi16 : AI1<0b1000, (outs GPR:$Rd), (ins GPR:$false, i32imm_hilo16:$imm),
+ DPFrm, IIC_iMOVi,
+ "movw", "\t$Rd, $imm",
+ []>,
+ RegConstraint<"$false = $Rd">, Requires<[IsARM, HasV6T2]>,
+ UnaryDP {
+ bits<4> Rd;
+ bits<16> imm;
+ let Inst{25} = 1;
+ let Inst{20} = 0;
+ let Inst{19-16} = imm{15-12};
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = imm{11-0};
+}
+
+let isMoveImm = 1 in
+def MOVCCi : AI1<0b1101, (outs GPR:$Rd),
+ (ins GPR:$false, so_imm:$imm), DPFrm, IIC_iCMOVi,
+ "mov", "\t$Rd, $imm",
+ [/*(set GPR:$Rd, (ARMcmov GPR:$false, so_imm:$imm, imm:$cc, CCR:$ccr))*/]>,
+ RegConstraint<"$false = $Rd">, UnaryDP {
+ bits<4> Rd;
+ bits<12> imm;
let Inst{25} = 1;
+ let Inst{20} = 0;
+ let Inst{19-16} = 0b0000;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = imm;
+}
+
+// Two instruction predicate mov immediate.
+let isMoveImm = 1 in
+def MOVCCi32imm : PseudoInst<(outs GPR:$Rd),
+ (ins GPR:$false, i32imm:$src, pred:$p),
+ IIC_iCMOVix2, []>, RegConstraint<"$false = $Rd">;
+
+let isMoveImm = 1 in
+def MVNCCi : AI1<0b1111, (outs GPR:$Rd),
+ (ins GPR:$false, so_imm:$imm), DPFrm, IIC_iCMOVi,
+ "mvn", "\t$Rd, $imm",
+ [/*(set GPR:$Rd, (ARMcmov GPR:$false, so_imm_not:$imm, imm:$cc, CCR:$ccr))*/]>,
+ RegConstraint<"$false = $Rd">, UnaryDP {
+ bits<4> Rd;
+ bits<12> imm;
+ let Inst{25} = 1;
+ let Inst{20} = 0;
+ let Inst{19-16} = 0b0000;
+ let Inst{15-12} = Rd;
+ let Inst{11-0} = imm;
}
} // neverHasSideEffects
@@ -2425,64 +3205,41 @@ def MOVCCi : AI1<0b1101, (outs GPR:$dst),
// Atomic operations intrinsics
//
+def memb_opt : Operand<i32> {
+ let PrintMethod = "printMemBOption";
+ let ParserMatchClass = MemBarrierOptOperand;
+}
+
// memory barriers protect the atomic sequences
let hasSideEffects = 1 in {
-def DMBsy : AInoP<(outs), (ins), MiscFrm, NoItinerary, "dmb", "",
- [(ARMMemBarrier)]>, Requires<[IsARM, HasDB]> {
+def DMB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
+ "dmb", "\t$opt", [(ARMMemBarrier (i32 imm:$opt))]>,
+ Requires<[IsARM, HasDB]> {
+ bits<4> opt;
let Inst{31-4} = 0xf57ff05;
- // FIXME: add support for options other than a full system DMB
- // See DMB disassembly-only variants below.
- let Inst{3-0} = 0b1111;
-}
-
-def DSBsy : AInoP<(outs), (ins), MiscFrm, NoItinerary, "dsb", "",
- [(ARMSyncBarrier)]>, Requires<[IsARM, HasDB]> {
- let Inst{31-4} = 0xf57ff04;
- // FIXME: add support for options other than a full system DSB
- // See DSB disassembly-only variants below.
- let Inst{3-0} = 0b1111;
+ let Inst{3-0} = opt;
}
def DMB_MCR : AInoP<(outs), (ins GPR:$zero), MiscFrm, NoItinerary,
"mcr", "\tp15, 0, $zero, c7, c10, 5",
[(ARMMemBarrierMCR GPR:$zero)]>,
Requires<[IsARM, HasV6]> {
- // FIXME: add support for options other than a full system DMB
// FIXME: add encoding
}
-
-def DSB_MCR : AInoP<(outs), (ins GPR:$zero), MiscFrm, NoItinerary,
- "mcr", "\tp15, 0, $zero, c7, c10, 4",
- [(ARMSyncBarrierMCR GPR:$zero)]>,
- Requires<[IsARM, HasV6]> {
- // FIXME: add support for options other than a full system DSB
- // FIXME: add encoding
-}
-}
-
-// Memory Barrier Operations Variants -- for disassembly only
-
-def memb_opt : Operand<i32> {
- let PrintMethod = "printMemBOption";
}
-class AMBI<bits<4> op7_4, string opc>
- : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary, opc, "\t$opt",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasDB]> {
- let Inst{31-8} = 0xf57ff0;
- let Inst{7-4} = op7_4;
+def DSB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
+ "dsb", "\t$opt",
+ [/* For disassembly only; pattern left blank */]>,
+ Requires<[IsARM, HasDB]> {
+ bits<4> opt;
+ let Inst{31-4} = 0xf57ff04;
+ let Inst{3-0} = opt;
}
-// These DMB variants are for disassembly only.
-def DMBvar : AMBI<0b0101, "dmb">;
-
-// These DSB variants are for disassembly only.
-def DSBvar : AMBI<0b0100, "dsb">;
-
// ISB has only full system option -- for disassembly only
-def ISBsy : AInoP<(outs), (ins), MiscFrm, NoItinerary, "isb", "", []>,
- Requires<[IsARM, HasDB]> {
+def ISB : AInoP<(outs), (ins), MiscFrm, NoItinerary, "isb", "", []>,
+ Requires<[IsARM, HasDB]> {
let Inst{31-4} = 0xf57ff06;
let Inst{3-0} = 0b1111;
}
@@ -2491,138 +3248,114 @@ let usesCustomInserter = 1 in {
let Uses = [CPSR] in {
def ATOMIC_LOAD_ADD_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_ADD_I8 PSEUDO!",
[(set GPR:$dst, (atomic_load_add_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_SUB_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_SUB_I8 PSEUDO!",
[(set GPR:$dst, (atomic_load_sub_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_AND_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_AND_I8 PSEUDO!",
[(set GPR:$dst, (atomic_load_and_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_OR_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_OR_I8 PSEUDO!",
[(set GPR:$dst, (atomic_load_or_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_XOR_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_XOR_I8 PSEUDO!",
[(set GPR:$dst, (atomic_load_xor_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_NAND_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_NAND_I8 PSEUDO!",
[(set GPR:$dst, (atomic_load_nand_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_ADD_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_ADD_I16 PSEUDO!",
[(set GPR:$dst, (atomic_load_add_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_SUB_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_SUB_I16 PSEUDO!",
[(set GPR:$dst, (atomic_load_sub_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_AND_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_AND_I16 PSEUDO!",
[(set GPR:$dst, (atomic_load_and_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_OR_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_OR_I16 PSEUDO!",
[(set GPR:$dst, (atomic_load_or_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_XOR_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_XOR_I16 PSEUDO!",
[(set GPR:$dst, (atomic_load_xor_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_NAND_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_NAND_I16 PSEUDO!",
[(set GPR:$dst, (atomic_load_nand_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_ADD_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_ADD_I32 PSEUDO!",
[(set GPR:$dst, (atomic_load_add_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_SUB_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_SUB_I32 PSEUDO!",
[(set GPR:$dst, (atomic_load_sub_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_AND_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_AND_I32 PSEUDO!",
[(set GPR:$dst, (atomic_load_and_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_OR_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_OR_I32 PSEUDO!",
[(set GPR:$dst, (atomic_load_or_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_XOR_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_XOR_I32 PSEUDO!",
[(set GPR:$dst, (atomic_load_xor_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_NAND_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_NAND_I32 PSEUDO!",
[(set GPR:$dst, (atomic_load_nand_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_SWAP_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_SWAP_I8 PSEUDO!",
[(set GPR:$dst, (atomic_swap_8 GPR:$ptr, GPR:$new))]>;
def ATOMIC_SWAP_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_SWAP_I16 PSEUDO!",
[(set GPR:$dst, (atomic_swap_16 GPR:$ptr, GPR:$new))]>;
def ATOMIC_SWAP_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_SWAP_I32 PSEUDO!",
[(set GPR:$dst, (atomic_swap_32 GPR:$ptr, GPR:$new))]>;
def ATOMIC_CMP_SWAP_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_CMP_SWAP_I8 PSEUDO!",
[(set GPR:$dst, (atomic_cmp_swap_8 GPR:$ptr, GPR:$old, GPR:$new))]>;
def ATOMIC_CMP_SWAP_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_CMP_SWAP_I16 PSEUDO!",
[(set GPR:$dst, (atomic_cmp_swap_16 GPR:$ptr, GPR:$old, GPR:$new))]>;
def ATOMIC_CMP_SWAP_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_CMP_SWAP_I32 PSEUDO!",
[(set GPR:$dst, (atomic_cmp_swap_32 GPR:$ptr, GPR:$old, GPR:$new))]>;
}
}
let mayLoad = 1 in {
-def LDREXB : AIldrex<0b10, (outs GPR:$dest), (ins GPR:$ptr), NoItinerary,
- "ldrexb", "\t$dest, [$ptr]",
+def LDREXB : AIldrex<0b10, (outs GPR:$Rt), (ins GPR:$Rn), NoItinerary,
+ "ldrexb", "\t$Rt, [$Rn]",
[]>;
-def LDREXH : AIldrex<0b11, (outs GPR:$dest), (ins GPR:$ptr), NoItinerary,
- "ldrexh", "\t$dest, [$ptr]",
+def LDREXH : AIldrex<0b11, (outs GPR:$Rt), (ins GPR:$Rn), NoItinerary,
+ "ldrexh", "\t$Rt, [$Rn]",
[]>;
-def LDREX : AIldrex<0b00, (outs GPR:$dest), (ins GPR:$ptr), NoItinerary,
- "ldrex", "\t$dest, [$ptr]",
+def LDREX : AIldrex<0b00, (outs GPR:$Rt), (ins GPR:$Rn), NoItinerary,
+ "ldrex", "\t$Rt, [$Rn]",
[]>;
-def LDREXD : AIldrex<0b01, (outs GPR:$dest, GPR:$dest2), (ins GPR:$ptr),
+def LDREXD : AIldrex<0b01, (outs GPR:$Rt, GPR:$Rt2), (ins GPR:$Rn),
NoItinerary,
- "ldrexd", "\t$dest, $dest2, [$ptr]",
+ "ldrexd", "\t$Rt, $Rt2, [$Rn]",
[]>;
}
-let mayStore = 1, Constraints = "@earlyclobber $success" in {
-def STREXB : AIstrex<0b10, (outs GPR:$success), (ins GPR:$src, GPR:$ptr),
+let mayStore = 1, Constraints = "@earlyclobber $Rd" in {
+def STREXB : AIstrex<0b10, (outs GPR:$Rd), (ins GPR:$src, GPR:$Rn),
NoItinerary,
- "strexb", "\t$success, $src, [$ptr]",
+ "strexb", "\t$Rd, $src, [$Rn]",
[]>;
-def STREXH : AIstrex<0b11, (outs GPR:$success), (ins GPR:$src, GPR:$ptr),
+def STREXH : AIstrex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, GPR:$Rn),
NoItinerary,
- "strexh", "\t$success, $src, [$ptr]",
+ "strexh", "\t$Rd, $Rt, [$Rn]",
[]>;
-def STREX : AIstrex<0b00, (outs GPR:$success), (ins GPR:$src, GPR:$ptr),
+def STREX : AIstrex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, GPR:$Rn),
NoItinerary,
- "strex", "\t$success, $src, [$ptr]",
+ "strex", "\t$Rd, $Rt, [$Rn]",
[]>;
-def STREXD : AIstrex<0b01, (outs GPR:$success),
- (ins GPR:$src, GPR:$src2, GPR:$ptr),
+def STREXD : AIstrex<0b01, (outs GPR:$Rd),
+ (ins GPR:$Rt, GPR:$Rt2, GPR:$Rn),
NoItinerary,
- "strexd", "\t$success, $src, $src2, [$ptr]",
+ "strexd", "\t$Rd, $Rt, $Rt2, [$Rn]",
[]>;
}
@@ -2630,29 +3363,15 @@ def STREXD : AIstrex<0b01, (outs GPR:$success),
def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV7]> {
- let Inst{31-20} = 0xf57;
- let Inst{7-4} = 0b0001;
+ let Inst{31-0} = 0b11110101011111111111000000011111;
}
// SWP/SWPB are deprecated in V6/V7 and for disassembly only.
let mayLoad = 1 in {
-def SWP : AI<(outs GPR:$dst), (ins GPR:$src, GPR:$ptr), LdStExFrm, NoItinerary,
- "swp", "\t$dst, $src, [$ptr]",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-23} = 0b00010;
- let Inst{22} = 0; // B = 0
- let Inst{21-20} = 0b00;
- let Inst{7-4} = 0b1001;
-}
-
-def SWPB : AI<(outs GPR:$dst), (ins GPR:$src, GPR:$ptr), LdStExFrm, NoItinerary,
- "swpb", "\t$dst, $src, [$ptr]",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-23} = 0b00010;
- let Inst{22} = 1; // B = 1
- let Inst{21-20} = 0b00;
- let Inst{7-4} = 0b1001;
-}
+def SWP : AIswp<0, (outs GPR:$Rt), (ins GPR:$Rt2, GPR:$Rn), "swp",
+ [/* For disassembly only; pattern left blank */]>;
+def SWPB : AIswp<1, (outs GPR:$Rt), (ins GPR:$Rt2, GPR:$Rn), "swpb",
+ [/* For disassembly only; pattern left blank */]>;
}
//===----------------------------------------------------------------------===//
@@ -2660,10 +3379,11 @@ def SWPB : AI<(outs GPR:$dst), (ins GPR:$src, GPR:$ptr), LdStExFrm, NoItinerary,
//
// __aeabi_read_tp preserves the registers r1-r3.
+// This is a pseudo inst so that we can get the encoding right,
+// complete with fixup for the aeabi_read_tp function.
let isCall = 1,
- Defs = [R0, R12, LR, CPSR] in {
- def TPsoft : ABXI<0b1011, (outs), (ins), IIC_Br,
- "bl\t__aeabi_read_tp",
+ Defs = [R0, R12, LR, CPSR], Uses = [SP] in {
+ def TPsoft : PseudoInst<(outs), (ins), IIC_Br,
[(set R0, ARMthread_pointer)]>;
}
@@ -2680,19 +3400,16 @@ let isCall = 1,
// doing so, we also cause the prologue/epilogue code to actively preserve
// all of the callee-saved resgisters, which is exactly what we want.
// A constant value is passed in $val, and we use the location as a scratch.
+//
+// These are pseudo-instructions and are lowered to individual MC-insts, so
+// no encoding information is necessary.
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, D0,
D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30,
D31 ], hasSideEffects = 1, isBarrier = 1 in {
- def Int_eh_sjlj_setjmp : XI<(outs), (ins GPR:$src, GPR:$val),
- AddrModeNone, SizeSpecial, IndexModeNone,
- Pseudo, NoItinerary,
- "add\t$val, pc, #8\t${:comment} eh_setjmp begin\n\t"
- "str\t$val, [$src, #+4]\n\t"
- "mov\tr0, #0\n\t"
- "add\tpc, pc, #0\n\t"
- "mov\tr0, #1 ${:comment} eh_setjmp end", "",
+ def Int_eh_sjlj_setjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$val),
+ NoItinerary,
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
Requires<[IsARM, HasVFP2]>;
}
@@ -2700,14 +3417,8 @@ let Defs =
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR ],
hasSideEffects = 1, isBarrier = 1 in {
- def Int_eh_sjlj_setjmp_nofp : XI<(outs), (ins GPR:$src, GPR:$val),
- AddrModeNone, SizeSpecial, IndexModeNone,
- Pseudo, NoItinerary,
- "add\t$val, pc, #8\n ${:comment} eh_setjmp begin\n\t"
- "str\t$val, [$src, #+4]\n\t"
- "mov\tr0, #0\n\t"
- "add\tpc, pc, #0\n\t"
- "mov\tr0, #1 ${:comment} eh_setjmp end", "",
+ def Int_eh_sjlj_setjmp_nofp : PseudoInst<(outs), (ins GPR:$src, GPR:$val),
+ NoItinerary,
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
Requires<[IsARM, NoVFP]>;
}
@@ -2715,53 +3426,58 @@ let Defs =
// FIXME: Non-Darwin version(s)
let isBarrier = 1, hasSideEffects = 1, isTerminator = 1,
Defs = [ R7, LR, SP ] in {
-def Int_eh_sjlj_longjmp : XI<(outs), (ins GPR:$src, GPR:$scratch),
- AddrModeNone, SizeSpecial, IndexModeNone,
- Pseudo, NoItinerary,
- "ldr\tsp, [$src, #8]\n\t"
- "ldr\t$scratch, [$src, #4]\n\t"
- "ldr\tr7, [$src]\n\t"
- "bx\t$scratch", "",
+def Int_eh_sjlj_longjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$scratch),
+ NoItinerary,
[(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
Requires<[IsARM, IsDarwin]>;
}
+// eh.sjlj.dispatchsetup pseudo-instruction.
+// This pseudo is used for ARM, Thumb1 and Thumb2. Any differences are
+// handled when the pseudo is expanded (which happens before any passes
+// that need the instruction size).
+let isBarrier = 1, hasSideEffects = 1 in
+def Int_eh_sjlj_dispatchsetup :
+ PseudoInst<(outs), (ins GPR:$src), NoItinerary,
+ [(ARMeh_sjlj_dispatchsetup GPR:$src)]>,
+ Requires<[IsDarwin]>;
+
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//
// Large immediate handling.
-// Two piece so_imms.
-let isReMaterializable = 1 in
-def MOVi2pieces : AI1x2<(outs GPR:$dst), (ins so_imm2part:$src),
- Pseudo, IIC_iMOVi,
- "mov", "\t$dst, $src",
- [(set GPR:$dst, so_imm2part:$src)]>,
- Requires<[IsARM, NoV6T2]>;
-
-def : ARMPat<(or GPR:$LHS, so_imm2part:$RHS),
- (ORRri (ORRri GPR:$LHS, (so_imm2part_1 imm:$RHS)),
- (so_imm2part_2 imm:$RHS))>;
-def : ARMPat<(xor GPR:$LHS, so_imm2part:$RHS),
- (EORri (EORri GPR:$LHS, (so_imm2part_1 imm:$RHS)),
- (so_imm2part_2 imm:$RHS))>;
-def : ARMPat<(add GPR:$LHS, so_imm2part:$RHS),
- (ADDri (ADDri GPR:$LHS, (so_imm2part_1 imm:$RHS)),
- (so_imm2part_2 imm:$RHS))>;
-def : ARMPat<(add GPR:$LHS, so_neg_imm2part:$RHS),
- (SUBri (SUBri GPR:$LHS, (so_neg_imm2part_1 imm:$RHS)),
- (so_neg_imm2part_2 imm:$RHS))>;
-
-// 32-bit immediate using movw + movt.
+// 32-bit immediate using two piece so_imms or movw + movt.
// This is a single pseudo instruction, the benefit is that it can be remat'd
// as a single unit instead of having to handle reg inputs.
// FIXME: Remove this when we can do generalized remat.
-let isReMaterializable = 1 in
-def MOVi32imm : AI1x2<(outs GPR:$dst), (ins i32imm:$src), Pseudo, IIC_iMOVi,
- "movw", "\t$dst, ${src:lo16}\n\tmovt${p}\t$dst, ${src:hi16}",
- [(set GPR:$dst, (i32 imm:$src))]>,
- Requires<[IsARM, HasV6T2]>;
+let isReMaterializable = 1, isMoveImm = 1 in
+def MOVi32imm : PseudoInst<(outs GPR:$dst), (ins i32imm:$src), IIC_iMOVix2,
+ [(set GPR:$dst, (arm_i32imm:$src))]>,
+ Requires<[IsARM]>;
+
+// Pseudo instruction that combines movw + movt + add pc (if PIC).
+// It also makes it possible to rematerialize the instructions.
+// FIXME: Remove this when we can do generalized remat and when machine licm
+// can properly the instructions.
+let isReMaterializable = 1 in {
+def MOV_ga_pcrel : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
+ IIC_iMOVix2addpc,
+ [(set GPR:$dst, (ARMWrapperPIC tglobaladdr:$addr))]>,
+ Requires<[IsARM, UseMovt]>;
+
+def MOV_ga_dyn : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
+ IIC_iMOVix2,
+ [(set GPR:$dst, (ARMWrapperDYN tglobaladdr:$addr))]>,
+ Requires<[IsARM, UseMovt]>;
+
+let AddedComplexity = 10 in
+def MOV_ga_pcrel_ldr : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
+ IIC_iMOVix2ld,
+ [(set GPR:$dst, (load (ARMWrapperPIC tglobaladdr:$addr)))]>,
+ Requires<[IsARM, UseMovt]>;
+} // isReMaterializable
// ConstantPool, GlobalAddress, and JumpTable
def : ARMPat<(ARMWrapper tglobaladdr :$dst), (LEApcrel tglobaladdr :$dst)>,
@@ -2800,11 +3516,15 @@ def : ARMPat<(ARMcall texternalsym:$func), (BLr9 texternalsym:$func)>,
Requires<[IsARM, IsDarwin]>;
// zextload i1 -> zextload i8
-def : ARMPat<(zextloadi1 addrmode2:$addr), (LDRB addrmode2:$addr)>;
+def : ARMPat<(zextloadi1 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
+def : ARMPat<(zextloadi1 ldst_so_reg:$addr), (LDRBrs ldst_so_reg:$addr)>;
// extload -> zextload
-def : ARMPat<(extloadi1 addrmode2:$addr), (LDRB addrmode2:$addr)>;
-def : ARMPat<(extloadi8 addrmode2:$addr), (LDRB addrmode2:$addr)>;
+def : ARMPat<(extloadi1 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
+def : ARMPat<(extloadi1 ldst_so_reg:$addr), (LDRBrs ldst_so_reg:$addr)>;
+def : ARMPat<(extloadi8 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
+def : ARMPat<(extloadi8 ldst_so_reg:$addr), (LDRBrs ldst_so_reg:$addr)>;
+
def : ARMPat<(extloadi16 addrmode3:$addr), (LDRH addrmode3:$addr)>;
def : ARMPat<(extloadi8 addrmodepc:$addr), (PICLDRB addrmodepc:$addr)>;
@@ -2889,19 +3609,45 @@ include "ARMInstrNEON.td"
// Coprocessor Instructions. For disassembly only.
//
-def CDP : ABI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- nohash_imm:$CRd, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "cdp", "\tp$cop, $opc1, cr$CRd, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{4} = 0;
-}
-
-def CDP2 : ABXI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- nohash_imm:$CRd, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "cdp2\tp$cop, $opc1, cr$CRd, cr$CRn, cr$CRm, $opc2",
+def CDP : ABI<0b1110, (outs), (ins p_imm:$cop, i32imm:$opc1,
+ c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, i32imm:$opc2),
+ NoItinerary, "cdp", "\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
+ [/* For disassembly only; pattern left blank */]> {
+ bits<4> opc1;
+ bits<4> CRn;
+ bits<4> CRd;
+ bits<4> cop;
+ bits<3> opc2;
+ bits<4> CRm;
+
+ let Inst{3-0} = CRm;
+ let Inst{4} = 0;
+ let Inst{7-5} = opc2;
+ let Inst{11-8} = cop;
+ let Inst{15-12} = CRd;
+ let Inst{19-16} = CRn;
+ let Inst{23-20} = opc1;
+}
+
+def CDP2 : ABXI<0b1110, (outs), (ins p_imm:$cop, i32imm:$opc1,
+ c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, i32imm:$opc2),
+ NoItinerary, "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
- let Inst{4} = 0;
+ bits<4> opc1;
+ bits<4> CRn;
+ bits<4> CRd;
+ bits<4> cop;
+ bits<3> opc2;
+ bits<4> CRm;
+
+ let Inst{3-0} = CRm;
+ let Inst{4} = 0;
+ let Inst{7-5} = opc2;
+ let Inst{11-8} = cop;
+ let Inst{15-12} = CRd;
+ let Inst{19-16} = CRn;
+ let Inst{23-20} = opc1;
}
class ACI<dag oops, dag iops, string opc, string asm>
@@ -3000,110 +3746,164 @@ defm LDC2 : LdStCop<0b1111, 1, "ldc2">;
defm STC : LdStCop<{?,?,?,?}, 0, "stc">;
defm STC2 : LdStCop<0b1111, 0, "stc2">;
-def MCR : ABI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- GPR:$Rt, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "mcr", "\tp$cop, $opc1, $Rt, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{20} = 0;
- let Inst{4} = 1;
-}
-
-def MCR2 : ABXI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- GPR:$Rt, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "mcr2\tp$cop, $opc1, $Rt, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{20} = 0;
- let Inst{4} = 1;
-}
+//===----------------------------------------------------------------------===//
+// Move between coprocessor and ARM core register -- for disassembly only
+//
-def MRC : ABI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- GPR:$Rt, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "mrc", "\tp$cop, $opc1, $Rt, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{20} = 1;
+class MovRCopro<string opc, bit direction>
+ : ABI<0b1110, (outs), (ins p_imm:$cop, i32imm:$opc1,
+ GPR:$Rt, c_imm:$CRn, c_imm:$CRm, i32imm:$opc2),
+ NoItinerary, opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2",
+ [/* For disassembly only; pattern left blank */]> {
+ let Inst{20} = direction;
let Inst{4} = 1;
-}
-def MRC2 : ABXI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- GPR:$Rt, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "mrc2\tp$cop, $opc1, $Rt, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
+ bits<4> Rt;
+ bits<4> cop;
+ bits<3> opc1;
+ bits<3> opc2;
+ bits<4> CRm;
+ bits<4> CRn;
+
+ let Inst{15-12} = Rt;
+ let Inst{11-8} = cop;
+ let Inst{23-21} = opc1;
+ let Inst{7-5} = opc2;
+ let Inst{3-0} = CRm;
+ let Inst{19-16} = CRn;
+}
+
+def MCR : MovRCopro<"mcr", 0 /* from ARM core register to coprocessor */>;
+def MRC : MovRCopro<"mrc", 1 /* from coprocessor to ARM core register */>;
+
+class MovRCopro2<string opc, bit direction>
+ : ABXI<0b1110, (outs), (ins p_imm:$cop, i32imm:$opc1,
+ GPR:$Rt, c_imm:$CRn, c_imm:$CRm, i32imm:$opc2),
+ NoItinerary, !strconcat(opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2"),
+ [/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
- let Inst{20} = 1;
+ let Inst{20} = direction;
let Inst{4} = 1;
-}
-def MCRR : ABI<0b1100, (outs), (ins nohash_imm:$cop, i32imm:$opc,
- GPR:$Rt, GPR:$Rt2, nohash_imm:$CRm),
- NoItinerary, "mcrr", "\tp$cop, $opc, $Rt, $Rt2, cr$CRm",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0100;
-}
-
-def MCRR2 : ABXI<0b1100, (outs), (ins nohash_imm:$cop, i32imm:$opc,
- GPR:$Rt, GPR:$Rt2, nohash_imm:$CRm),
- NoItinerary, "mcrr2\tp$cop, $opc, $Rt, $Rt2, cr$CRm",
- [/* For disassembly only; pattern left blank */]> {
+ bits<4> Rt;
+ bits<4> cop;
+ bits<3> opc1;
+ bits<3> opc2;
+ bits<4> CRm;
+ bits<4> CRn;
+
+ let Inst{15-12} = Rt;
+ let Inst{11-8} = cop;
+ let Inst{23-21} = opc1;
+ let Inst{7-5} = opc2;
+ let Inst{3-0} = CRm;
+ let Inst{19-16} = CRn;
+}
+
+def MCR2 : MovRCopro2<"mcr2", 0 /* from ARM core register to coprocessor */>;
+def MRC2 : MovRCopro2<"mrc2", 1 /* from coprocessor to ARM core register */>;
+
+class MovRRCopro<string opc, bit direction>
+ : ABI<0b1100, (outs), (ins p_imm:$cop, i32imm:$opc1,
+ GPR:$Rt, GPR:$Rt2, c_imm:$CRm),
+ NoItinerary, opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm",
+ [/* For disassembly only; pattern left blank */]> {
+ let Inst{23-21} = 0b010;
+ let Inst{20} = direction;
+
+ bits<4> Rt;
+ bits<4> Rt2;
+ bits<4> cop;
+ bits<4> opc1;
+ bits<4> CRm;
+
+ let Inst{15-12} = Rt;
+ let Inst{19-16} = Rt2;
+ let Inst{11-8} = cop;
+ let Inst{7-4} = opc1;
+ let Inst{3-0} = CRm;
+}
+
+def MCRR : MovRRCopro<"mcrr", 0 /* from ARM core register to coprocessor */>;
+def MRRC : MovRRCopro<"mrrc", 1 /* from coprocessor to ARM core register */>;
+
+class MovRRCopro2<string opc, bit direction>
+ : ABXI<0b1100, (outs), (ins p_imm:$cop, i32imm:$opc1,
+ GPR:$Rt, GPR:$Rt2, c_imm:$CRm),
+ NoItinerary, !strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"),
+ [/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
- let Inst{23-20} = 0b0100;
-}
+ let Inst{23-21} = 0b010;
+ let Inst{20} = direction;
-def MRRC : ABI<0b1100, (outs), (ins nohash_imm:$cop, i32imm:$opc,
- GPR:$Rt, GPR:$Rt2, nohash_imm:$CRm),
- NoItinerary, "mrrc", "\tp$cop, $opc, $Rt, $Rt2, cr$CRm",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0101;
-}
+ bits<4> Rt;
+ bits<4> Rt2;
+ bits<4> cop;
+ bits<4> opc1;
+ bits<4> CRm;
-def MRRC2 : ABXI<0b1100, (outs), (ins nohash_imm:$cop, i32imm:$opc,
- GPR:$Rt, GPR:$Rt2, nohash_imm:$CRm),
- NoItinerary, "mrrc2\tp$cop, $opc, $Rt, $Rt2, cr$CRm",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{23-20} = 0b0101;
+ let Inst{15-12} = Rt;
+ let Inst{19-16} = Rt2;
+ let Inst{11-8} = cop;
+ let Inst{7-4} = opc1;
+ let Inst{3-0} = CRm;
}
+def MCRR2 : MovRRCopro2<"mcrr2", 0 /* from ARM core register to coprocessor */>;
+def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */>;
+
//===----------------------------------------------------------------------===//
// Move between special register and ARM core register -- for disassembly only
//
-def MRS : ABI<0b0001,(outs GPR:$dst),(ins), NoItinerary, "mrs", "\t$dst, cpsr",
+// Move to ARM core register from Special Register
+def MRS : ABI<0b0001, (outs GPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, cpsr",
[/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0000;
+ bits<4> Rd;
+ let Inst{23-16} = 0b00001111;
+ let Inst{15-12} = Rd;
let Inst{7-4} = 0b0000;
}
-def MRSsys : ABI<0b0001,(outs GPR:$dst),(ins), NoItinerary,"mrs","\t$dst, spsr",
+def MRSsys : ABI<0b0001, (outs GPR:$Rd), (ins), NoItinerary,"mrs","\t$Rd, spsr",
[/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0100;
+ bits<4> Rd;
+ let Inst{23-16} = 0b01001111;
+ let Inst{15-12} = Rd;
let Inst{7-4} = 0b0000;
}
-def MSR : ABI<0b0001, (outs), (ins GPR:$src, msr_mask:$mask), NoItinerary,
- "msr", "\tcpsr$mask, $src",
+// Move from ARM core register to Special Register
+//
+// No need to have both system and application versions, the encodings are the
+// same and the assembly parser has no way to distinguish between them. The mask
+// operand contains the special register (R Bit) in bit 4 and bits 3-0 contains
+// the mask with the fields to be accessed in the special register.
+def MSR : ABI<0b0001, (outs), (ins msr_mask:$mask, GPR:$Rn), NoItinerary,
+ "msr", "\t$mask, $Rn",
[/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0010;
- let Inst{7-4} = 0b0000;
-}
+ bits<5> mask;
+ bits<4> Rn;
-def MSRi : ABI<0b0011, (outs), (ins so_imm:$a, msr_mask:$mask), NoItinerary,
- "msr", "\tcpsr$mask, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0010;
- let Inst{7-4} = 0b0000;
+ let Inst{23} = 0;
+ let Inst{22} = mask{4}; // R bit
+ let Inst{21-20} = 0b10;
+ let Inst{19-16} = mask{3-0};
+ let Inst{15-12} = 0b1111;
+ let Inst{11-4} = 0b00000000;
+ let Inst{3-0} = Rn;
}
-def MSRsys : ABI<0b0001, (outs), (ins GPR:$src, msr_mask:$mask), NoItinerary,
- "msr", "\tspsr$mask, $src",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0110;
- let Inst{7-4} = 0b0000;
-}
+def MSRi : ABI<0b0011, (outs), (ins msr_mask:$mask, so_imm:$a), NoItinerary,
+ "msr", "\t$mask, $a",
+ [/* For disassembly only; pattern left blank */]> {
+ bits<5> mask;
+ bits<12> a;
-def MSRsysi : ABI<0b0011, (outs), (ins so_imm:$a, msr_mask:$mask), NoItinerary,
- "msr", "\tspsr$mask, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0110;
- let Inst{7-4} = 0b0000;
+ let Inst{23} = 0;
+ let Inst{22} = mask{4}; // R bit
+ let Inst{21-20} = 0b10;
+ let Inst{19-16} = mask{3-0};
+ let Inst{15-12} = 0b1111;
+ let Inst{11-0} = a;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
index 4d2f116..1e2e550 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -16,11 +16,17 @@
//===----------------------------------------------------------------------===//
def SDTARMVCMP : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<1, 2>]>;
+def SDTARMVCMPZ : SDTypeProfile<1, 1, []>;
def NEONvceq : SDNode<"ARMISD::VCEQ", SDTARMVCMP>;
+def NEONvceqz : SDNode<"ARMISD::VCEQZ", SDTARMVCMPZ>;
def NEONvcge : SDNode<"ARMISD::VCGE", SDTARMVCMP>;
+def NEONvcgez : SDNode<"ARMISD::VCGEZ", SDTARMVCMPZ>;
+def NEONvclez : SDNode<"ARMISD::VCLEZ", SDTARMVCMPZ>;
def NEONvcgeu : SDNode<"ARMISD::VCGEU", SDTARMVCMP>;
def NEONvcgt : SDNode<"ARMISD::VCGT", SDTARMVCMP>;
+def NEONvcgtz : SDNode<"ARMISD::VCGTZ", SDTARMVCMPZ>;
+def NEONvcltz : SDNode<"ARMISD::VCLTZ", SDTARMVCMPZ>;
def NEONvcgtu : SDNode<"ARMISD::VCGTU", SDTARMVCMP>;
def NEONvtst : SDNode<"ARMISD::VTST", SDTARMVCMP>;
@@ -69,6 +75,11 @@ def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
def NEONvmovImm : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>;
def NEONvmvnImm : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>;
+def SDTARMVORRIMM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
+ SDTCisVT<2, i32>]>;
+def NEONvorrImm : SDNode<"ARMISD::VORRIMM", SDTARMVORRIMM>;
+def NEONvbicImm : SDNode<"ARMISD::VBICIMM", SDTARMVORRIMM>;
+
def NEONvdup : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
// VDUPLANE can produce a quad-register result from a double-register source,
@@ -129,830 +140,1506 @@ def nModImm : Operand<i32> {
// NEON load / store instructions
//===----------------------------------------------------------------------===//
-// Use vldmia to load a Q register as a D register pair.
-// This is equivalent to VLDMD except that it has a Q register operand
-// instead of a pair of D registers.
-def VLDMQ
- : AXDI4<(outs QPR:$dst), (ins addrmode4:$addr, pred:$p),
- IndexModeNone, IIC_fpLoadm,
- "vldm${addr:submode}${p}\t$addr, ${dst:dregpair}", "",
- [(set QPR:$dst, (v2f64 (load addrmode4:$addr)))]>;
-
-let mayLoad = 1, neverHasSideEffects = 1 in {
-// Use vld1 to load a Q register as a D register pair.
-// This alternative to VLDMQ allows an alignment to be specified.
-// This is equivalent to VLD1q64 except that it has a Q register operand.
-def VLD1q
- : NLdSt<0,0b10,0b1010,0b1100, (outs QPR:$dst), (ins addrmode6:$addr),
- IIC_VLD1, "vld1", "64", "${dst:dregpair}, $addr", "", []>;
-} // mayLoad = 1, neverHasSideEffects = 1
-
-// Use vstmia to store a Q register as a D register pair.
-// This is equivalent to VSTMD except that it has a Q register operand
-// instead of a pair of D registers.
-def VSTMQ
- : AXDI4<(outs), (ins QPR:$src, addrmode4:$addr, pred:$p),
- IndexModeNone, IIC_fpStorem,
- "vstm${addr:submode}${p}\t$addr, ${src:dregpair}", "",
- [(store (v2f64 QPR:$src), addrmode4:$addr)]>;
-
-let mayStore = 1, neverHasSideEffects = 1 in {
-// Use vst1 to store a Q register as a D register pair.
-// This alternative to VSTMQ allows an alignment to be specified.
-// This is equivalent to VST1q64 except that it has a Q register operand.
-def VST1q
- : NLdSt<0,0b00,0b1010,0b1100, (outs), (ins addrmode6:$addr, QPR:$src),
- IIC_VST, "vst1", "64", "${src:dregpair}, $addr", "", []>;
-} // mayStore = 1, neverHasSideEffects = 1
-
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+// Use VLDM to load a Q register as a D register pair.
+// This is a pseudo instruction that is expanded to VLDMD after reg alloc.
+def VLDMQIA
+ : PseudoVFPLdStM<(outs QPR:$dst), (ins GPR:$Rn),
+ IIC_fpLoad_m, "",
+ [(set QPR:$dst, (v2f64 (load GPR:$Rn)))]>;
+def VLDMQDB
+ : PseudoVFPLdStM<(outs QPR:$dst), (ins GPR:$Rn),
+ IIC_fpLoad_m, "",
+ [(set QPR:$dst, (v2f64 (load GPR:$Rn)))]>;
+
+// Use VSTM to store a Q register as a D register pair.
+// This is a pseudo instruction that is expanded to VSTMD after reg alloc.
+def VSTMQIA
+ : PseudoVFPLdStM<(outs), (ins QPR:$src, GPR:$Rn),
+ IIC_fpStore_m, "",
+ [(store (v2f64 QPR:$src), GPR:$Rn)]>;
+def VSTMQDB
+ : PseudoVFPLdStM<(outs), (ins QPR:$src, GPR:$Rn),
+ IIC_fpStore_m, "",
+ [(store (v2f64 QPR:$src), GPR:$Rn)]>;
// Classes for VLD* pseudo-instructions with multi-register operands.
// These are expanded to real instructions after register allocation.
-class VLDQPseudo
- : PseudoNLdSt<(outs QPR:$dst), (ins addrmode6:$addr), IIC_VST, "">;
-class VLDQWBPseudo
+class VLDQPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QPR:$dst), (ins addrmode6:$addr), itin, "">;
+class VLDQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VST,
+ (ins addrmode6:$addr, am6offset:$offset), itin,
"$addr.addr = $wb">;
-class VLDQQPseudo
- : PseudoNLdSt<(outs QQPR:$dst), (ins addrmode6:$addr), IIC_VST, "">;
-class VLDQQWBPseudo
+class VLDQQPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QQPR:$dst), (ins addrmode6:$addr), itin, "">;
+class VLDQQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VST,
+ (ins addrmode6:$addr, am6offset:$offset), itin,
"$addr.addr = $wb">;
-class VLDQQQQWBPseudo
+class VLDQQQQPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QQQQPR:$dst), (ins addrmode6:$addr, QQQQPR:$src), itin,"">;
+class VLDQQQQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs QQQQPR:$dst, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), IIC_VST,
+ (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), itin,
"$addr.addr = $wb, $src = $dst">;
+let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+
// VLD1 : Vector Load (multiple single elements)
class VLD1D<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0111,op7_4, (outs DPR:$dst),
- (ins addrmode6:$addr), IIC_VLD1,
- "vld1", Dt, "\\{$dst\\}, $addr", "", []>;
+ : NLdSt<0,0b10,0b0111,op7_4, (outs DPR:$Vd),
+ (ins addrmode6:$Rn), IIC_VLD1,
+ "vld1", Dt, "\\{$Vd\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
class VLD1Q<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b1010,op7_4, (outs DPR:$dst1, DPR:$dst2),
- (ins addrmode6:$addr), IIC_VLD1,
- "vld1", Dt, "\\{$dst1, $dst2\\}, $addr", "", []>;
+ : NLdSt<0,0b10,0b1010,op7_4, (outs DPR:$Vd, DPR:$dst2),
+ (ins addrmode6:$Rn), IIC_VLD1x2,
+ "vld1", Dt, "\\{$Vd, $dst2\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
-def VLD1d8 : VLD1D<0b0000, "8">;
-def VLD1d16 : VLD1D<0b0100, "16">;
-def VLD1d32 : VLD1D<0b1000, "32">;
-def VLD1d64 : VLD1D<0b1100, "64">;
+def VLD1d8 : VLD1D<{0,0,0,?}, "8">;
+def VLD1d16 : VLD1D<{0,1,0,?}, "16">;
+def VLD1d32 : VLD1D<{1,0,0,?}, "32">;
+def VLD1d64 : VLD1D<{1,1,0,?}, "64">;
-def VLD1q8 : VLD1Q<0b0000, "8">;
-def VLD1q16 : VLD1Q<0b0100, "16">;
-def VLD1q32 : VLD1Q<0b1000, "32">;
-def VLD1q64 : VLD1Q<0b1100, "64">;
+def VLD1q8 : VLD1Q<{0,0,?,?}, "8">;
+def VLD1q16 : VLD1Q<{0,1,?,?}, "16">;
+def VLD1q32 : VLD1Q<{1,0,?,?}, "32">;
+def VLD1q64 : VLD1Q<{1,1,?,?}, "64">;
-def VLD1q8Pseudo : VLDQPseudo;
-def VLD1q16Pseudo : VLDQPseudo;
-def VLD1q32Pseudo : VLDQPseudo;
-def VLD1q64Pseudo : VLDQPseudo;
+def VLD1q8Pseudo : VLDQPseudo<IIC_VLD1x2>;
+def VLD1q16Pseudo : VLDQPseudo<IIC_VLD1x2>;
+def VLD1q32Pseudo : VLDQPseudo<IIC_VLD1x2>;
+def VLD1q64Pseudo : VLDQPseudo<IIC_VLD1x2>;
// ...with address register writeback:
class VLD1DWB<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0111,op7_4, (outs DPR:$dst, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VLD1,
- "vld1", Dt, "\\{$dst\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ : NLdSt<0,0b10,0b0111,op7_4, (outs DPR:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD1u,
+ "vld1", Dt, "\\{$Vd\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
class VLD1QWB<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b1010,op7_4, (outs QPR:$dst, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VLD1,
- "vld1", Dt, "${dst:dregpair}, $addr$offset",
- "$addr.addr = $wb", []>;
-
-def VLD1d8_UPD : VLD1DWB<0b0000, "8">;
-def VLD1d16_UPD : VLD1DWB<0b0100, "16">;
-def VLD1d32_UPD : VLD1DWB<0b1000, "32">;
-def VLD1d64_UPD : VLD1DWB<0b1100, "64">;
-
-def VLD1q8_UPD : VLD1QWB<0b0000, "8">;
-def VLD1q16_UPD : VLD1QWB<0b0100, "16">;
-def VLD1q32_UPD : VLD1QWB<0b1000, "32">;
-def VLD1q64_UPD : VLD1QWB<0b1100, "64">;
-
-def VLD1q8Pseudo_UPD : VLDQWBPseudo;
-def VLD1q16Pseudo_UPD : VLDQWBPseudo;
-def VLD1q32Pseudo_UPD : VLDQWBPseudo;
-def VLD1q64Pseudo_UPD : VLDQWBPseudo;
+ : NLdSt<0,0b10,0b1010,op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD1x2u,
+ "vld1", Dt, "\\{$Vd, $dst2\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+}
+
+def VLD1d8_UPD : VLD1DWB<{0,0,0,?}, "8">;
+def VLD1d16_UPD : VLD1DWB<{0,1,0,?}, "16">;
+def VLD1d32_UPD : VLD1DWB<{1,0,0,?}, "32">;
+def VLD1d64_UPD : VLD1DWB<{1,1,0,?}, "64">;
+
+def VLD1q8_UPD : VLD1QWB<{0,0,?,?}, "8">;
+def VLD1q16_UPD : VLD1QWB<{0,1,?,?}, "16">;
+def VLD1q32_UPD : VLD1QWB<{1,0,?,?}, "32">;
+def VLD1q64_UPD : VLD1QWB<{1,1,?,?}, "64">;
+
+def VLD1q8Pseudo_UPD : VLDQWBPseudo<IIC_VLD1x2u>;
+def VLD1q16Pseudo_UPD : VLDQWBPseudo<IIC_VLD1x2u>;
+def VLD1q32Pseudo_UPD : VLDQWBPseudo<IIC_VLD1x2u>;
+def VLD1q64Pseudo_UPD : VLDQWBPseudo<IIC_VLD1x2u>;
// ...with 3 registers (some of these are only for the disassembler):
class VLD1D3<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0110,op7_4, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3),
- (ins addrmode6:$addr), IIC_VLD1, "vld1", Dt,
- "\\{$dst1, $dst2, $dst3\\}, $addr", "", []>;
+ : NLdSt<0,0b10,0b0110,op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
+ (ins addrmode6:$Rn), IIC_VLD1x3, "vld1", Dt,
+ "\\{$Vd, $dst2, $dst3\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
class VLD1D3WB<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0110,op7_4, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VLD1, "vld1", Dt,
- "\\{$dst1, $dst2, $dst3\\}, $addr$offset", "$addr.addr = $wb", []>;
+ : NLdSt<0,0b10,0b0110,op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD1x3u, "vld1", Dt,
+ "\\{$Vd, $dst2, $dst3\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
-def VLD1d8T : VLD1D3<0b0000, "8">;
-def VLD1d16T : VLD1D3<0b0100, "16">;
-def VLD1d32T : VLD1D3<0b1000, "32">;
-def VLD1d64T : VLD1D3<0b1100, "64">;
+def VLD1d8T : VLD1D3<{0,0,0,?}, "8">;
+def VLD1d16T : VLD1D3<{0,1,0,?}, "16">;
+def VLD1d32T : VLD1D3<{1,0,0,?}, "32">;
+def VLD1d64T : VLD1D3<{1,1,0,?}, "64">;
-def VLD1d8T_UPD : VLD1D3WB<0b0000, "8">;
-def VLD1d16T_UPD : VLD1D3WB<0b0100, "16">;
-def VLD1d32T_UPD : VLD1D3WB<0b1000, "32">;
-def VLD1d64T_UPD : VLD1D3WB<0b1100, "64">;
+def VLD1d8T_UPD : VLD1D3WB<{0,0,0,?}, "8">;
+def VLD1d16T_UPD : VLD1D3WB<{0,1,0,?}, "16">;
+def VLD1d32T_UPD : VLD1D3WB<{1,0,0,?}, "32">;
+def VLD1d64T_UPD : VLD1D3WB<{1,1,0,?}, "64">;
-def VLD1d64TPseudo : VLDQQPseudo;
-def VLD1d64TPseudo_UPD : VLDQQWBPseudo;
+def VLD1d64TPseudo : VLDQQPseudo<IIC_VLD1x3>;
+def VLD1d64TPseudo_UPD : VLDQQWBPseudo<IIC_VLD1x3u>;
// ...with 4 registers (some of these are only for the disassembler):
class VLD1D4<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0010,op7_4,(outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$addr), IIC_VLD1, "vld1", Dt,
- "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr", "", []>;
+ : NLdSt<0,0b10,0b0010,op7_4,(outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
+ (ins addrmode6:$Rn), IIC_VLD1x4, "vld1", Dt,
+ "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
class VLD1D4WB<bits<4> op7_4, string Dt>
: NLdSt<0,0b10,0b0010,op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VLD1, "vld1", Dt,
- "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr$offset", "$addr.addr = $wb",
- []>;
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD1x4u, "vld1", Dt,
+ "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn$Rm", "$Rn.addr = $wb",
+ []> {
+ let Inst{5-4} = Rn{5-4};
+}
-def VLD1d8Q : VLD1D4<0b0000, "8">;
-def VLD1d16Q : VLD1D4<0b0100, "16">;
-def VLD1d32Q : VLD1D4<0b1000, "32">;
-def VLD1d64Q : VLD1D4<0b1100, "64">;
+def VLD1d8Q : VLD1D4<{0,0,?,?}, "8">;
+def VLD1d16Q : VLD1D4<{0,1,?,?}, "16">;
+def VLD1d32Q : VLD1D4<{1,0,?,?}, "32">;
+def VLD1d64Q : VLD1D4<{1,1,?,?}, "64">;
-def VLD1d8Q_UPD : VLD1D4WB<0b0000, "8">;
-def VLD1d16Q_UPD : VLD1D4WB<0b0100, "16">;
-def VLD1d32Q_UPD : VLD1D4WB<0b1000, "32">;
-def VLD1d64Q_UPD : VLD1D4WB<0b1100, "64">;
+def VLD1d8Q_UPD : VLD1D4WB<{0,0,?,?}, "8">;
+def VLD1d16Q_UPD : VLD1D4WB<{0,1,?,?}, "16">;
+def VLD1d32Q_UPD : VLD1D4WB<{1,0,?,?}, "32">;
+def VLD1d64Q_UPD : VLD1D4WB<{1,1,?,?}, "64">;
-def VLD1d64QPseudo : VLDQQPseudo;
-def VLD1d64QPseudo_UPD : VLDQQWBPseudo;
+def VLD1d64QPseudo : VLDQQPseudo<IIC_VLD1x4>;
+def VLD1d64QPseudo_UPD : VLDQQWBPseudo<IIC_VLD1x4u>;
// VLD2 : Vector Load (multiple 2-element structures)
class VLD2D<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$dst1, DPR:$dst2),
- (ins addrmode6:$addr), IIC_VLD2,
- "vld2", Dt, "\\{$dst1, $dst2\\}, $addr", "", []>;
+ : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2),
+ (ins addrmode6:$Rn), IIC_VLD2,
+ "vld2", Dt, "\\{$Vd, $dst2\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
class VLD2Q<bits<4> op7_4, string Dt>
: NLdSt<0, 0b10, 0b0011, op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$addr), IIC_VLD2,
- "vld2", Dt, "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr", "", []>;
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
+ (ins addrmode6:$Rn), IIC_VLD2x2,
+ "vld2", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
-def VLD2d8 : VLD2D<0b1000, 0b0000, "8">;
-def VLD2d16 : VLD2D<0b1000, 0b0100, "16">;
-def VLD2d32 : VLD2D<0b1000, 0b1000, "32">;
+def VLD2d8 : VLD2D<0b1000, {0,0,?,?}, "8">;
+def VLD2d16 : VLD2D<0b1000, {0,1,?,?}, "16">;
+def VLD2d32 : VLD2D<0b1000, {1,0,?,?}, "32">;
-def VLD2q8 : VLD2Q<0b0000, "8">;
-def VLD2q16 : VLD2Q<0b0100, "16">;
-def VLD2q32 : VLD2Q<0b1000, "32">;
+def VLD2q8 : VLD2Q<{0,0,?,?}, "8">;
+def VLD2q16 : VLD2Q<{0,1,?,?}, "16">;
+def VLD2q32 : VLD2Q<{1,0,?,?}, "32">;
-def VLD2d8Pseudo : VLDQPseudo;
-def VLD2d16Pseudo : VLDQPseudo;
-def VLD2d32Pseudo : VLDQPseudo;
+def VLD2d8Pseudo : VLDQPseudo<IIC_VLD2>;
+def VLD2d16Pseudo : VLDQPseudo<IIC_VLD2>;
+def VLD2d32Pseudo : VLDQPseudo<IIC_VLD2>;
-def VLD2q8Pseudo : VLDQQPseudo;
-def VLD2q16Pseudo : VLDQQPseudo;
-def VLD2q32Pseudo : VLDQQPseudo;
+def VLD2q8Pseudo : VLDQQPseudo<IIC_VLD2x2>;
+def VLD2q16Pseudo : VLDQQPseudo<IIC_VLD2x2>;
+def VLD2q32Pseudo : VLDQQPseudo<IIC_VLD2x2>;
// ...with address register writeback:
class VLD2DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$dst1, DPR:$dst2, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VLD2,
- "vld2", Dt, "\\{$dst1, $dst2\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2u,
+ "vld2", Dt, "\\{$Vd, $dst2\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+}
class VLD2QWB<bits<4> op7_4, string Dt>
: NLdSt<0, 0b10, 0b0011, op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VLD2,
- "vld2", Dt, "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2x2u,
+ "vld2", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+}
-def VLD2d8_UPD : VLD2DWB<0b1000, 0b0000, "8">;
-def VLD2d16_UPD : VLD2DWB<0b1000, 0b0100, "16">;
-def VLD2d32_UPD : VLD2DWB<0b1000, 0b1000, "32">;
+def VLD2d8_UPD : VLD2DWB<0b1000, {0,0,?,?}, "8">;
+def VLD2d16_UPD : VLD2DWB<0b1000, {0,1,?,?}, "16">;
+def VLD2d32_UPD : VLD2DWB<0b1000, {1,0,?,?}, "32">;
-def VLD2q8_UPD : VLD2QWB<0b0000, "8">;
-def VLD2q16_UPD : VLD2QWB<0b0100, "16">;
-def VLD2q32_UPD : VLD2QWB<0b1000, "32">;
+def VLD2q8_UPD : VLD2QWB<{0,0,?,?}, "8">;
+def VLD2q16_UPD : VLD2QWB<{0,1,?,?}, "16">;
+def VLD2q32_UPD : VLD2QWB<{1,0,?,?}, "32">;
-def VLD2d8Pseudo_UPD : VLDQWBPseudo;
-def VLD2d16Pseudo_UPD : VLDQWBPseudo;
-def VLD2d32Pseudo_UPD : VLDQWBPseudo;
+def VLD2d8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
+def VLD2d16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
+def VLD2d32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
-def VLD2q8Pseudo_UPD : VLDQQWBPseudo;
-def VLD2q16Pseudo_UPD : VLDQQWBPseudo;
-def VLD2q32Pseudo_UPD : VLDQQWBPseudo;
+def VLD2q8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
+def VLD2q16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
+def VLD2q32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
// ...with double-spaced registers (for disassembly only):
-def VLD2b8 : VLD2D<0b1001, 0b0000, "8">;
-def VLD2b16 : VLD2D<0b1001, 0b0100, "16">;
-def VLD2b32 : VLD2D<0b1001, 0b1000, "32">;
-def VLD2b8_UPD : VLD2DWB<0b1001, 0b0000, "8">;
-def VLD2b16_UPD : VLD2DWB<0b1001, 0b0100, "16">;
-def VLD2b32_UPD : VLD2DWB<0b1001, 0b1000, "32">;
+def VLD2b8 : VLD2D<0b1001, {0,0,?,?}, "8">;
+def VLD2b16 : VLD2D<0b1001, {0,1,?,?}, "16">;
+def VLD2b32 : VLD2D<0b1001, {1,0,?,?}, "32">;
+def VLD2b8_UPD : VLD2DWB<0b1001, {0,0,?,?}, "8">;
+def VLD2b16_UPD : VLD2DWB<0b1001, {0,1,?,?}, "16">;
+def VLD2b32_UPD : VLD2DWB<0b1001, {1,0,?,?}, "32">;
// VLD3 : Vector Load (multiple 3-element structures)
class VLD3D<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3),
- (ins addrmode6:$addr), IIC_VLD3,
- "vld3", Dt, "\\{$dst1, $dst2, $dst3\\}, $addr", "", []>;
+ : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
+ (ins addrmode6:$Rn), IIC_VLD3,
+ "vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
-def VLD3d8 : VLD3D<0b0100, 0b0000, "8">;
-def VLD3d16 : VLD3D<0b0100, 0b0100, "16">;
-def VLD3d32 : VLD3D<0b0100, 0b1000, "32">;
+def VLD3d8 : VLD3D<0b0100, {0,0,0,?}, "8">;
+def VLD3d16 : VLD3D<0b0100, {0,1,0,?}, "16">;
+def VLD3d32 : VLD3D<0b0100, {1,0,0,?}, "32">;
-def VLD3d8Pseudo : VLDQQPseudo;
-def VLD3d16Pseudo : VLDQQPseudo;
-def VLD3d32Pseudo : VLDQQPseudo;
+def VLD3d8Pseudo : VLDQQPseudo<IIC_VLD3>;
+def VLD3d16Pseudo : VLDQQPseudo<IIC_VLD3>;
+def VLD3d32Pseudo : VLDQQPseudo<IIC_VLD3>;
// ...with address register writeback:
class VLD3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdSt<0, 0b10, op11_8, op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VLD3,
- "vld3", Dt, "\\{$dst1, $dst2, $dst3\\}, $addr$offset",
- "$addr.addr = $wb", []>;
-
-def VLD3d8_UPD : VLD3DWB<0b0100, 0b0000, "8">;
-def VLD3d16_UPD : VLD3DWB<0b0100, 0b0100, "16">;
-def VLD3d32_UPD : VLD3DWB<0b0100, 0b1000, "32">;
-
-def VLD3d8Pseudo_UPD : VLDQQWBPseudo;
-def VLD3d16Pseudo_UPD : VLDQQWBPseudo;
-def VLD3d32Pseudo_UPD : VLDQQWBPseudo;
-
-// ...with double-spaced registers (non-updating versions for disassembly only):
-def VLD3q8 : VLD3D<0b0101, 0b0000, "8">;
-def VLD3q16 : VLD3D<0b0101, 0b0100, "16">;
-def VLD3q32 : VLD3D<0b0101, 0b1000, "32">;
-def VLD3q8_UPD : VLD3DWB<0b0101, 0b0000, "8">;
-def VLD3q16_UPD : VLD3DWB<0b0101, 0b0100, "16">;
-def VLD3q32_UPD : VLD3DWB<0b0101, 0b1000, "32">;
-
-def VLD3q8Pseudo_UPD : VLDQQQQWBPseudo;
-def VLD3q16Pseudo_UPD : VLDQQQQWBPseudo;
-def VLD3q32Pseudo_UPD : VLDQQQQWBPseudo;
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD3u,
+ "vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
+
+def VLD3d8_UPD : VLD3DWB<0b0100, {0,0,0,?}, "8">;
+def VLD3d16_UPD : VLD3DWB<0b0100, {0,1,0,?}, "16">;
+def VLD3d32_UPD : VLD3DWB<0b0100, {1,0,0,?}, "32">;
+
+def VLD3d8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
+def VLD3d16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
+def VLD3d32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
+
+// ...with double-spaced registers:
+def VLD3q8 : VLD3D<0b0101, {0,0,0,?}, "8">;
+def VLD3q16 : VLD3D<0b0101, {0,1,0,?}, "16">;
+def VLD3q32 : VLD3D<0b0101, {1,0,0,?}, "32">;
+def VLD3q8_UPD : VLD3DWB<0b0101, {0,0,0,?}, "8">;
+def VLD3q16_UPD : VLD3DWB<0b0101, {0,1,0,?}, "16">;
+def VLD3q32_UPD : VLD3DWB<0b0101, {1,0,0,?}, "32">;
+
+def VLD3q8Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
+def VLD3q16Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
+def VLD3q32Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
// ...alternate versions to be allocated odd register numbers:
-def VLD3q8oddPseudo_UPD : VLDQQQQWBPseudo;
-def VLD3q16oddPseudo_UPD : VLDQQQQWBPseudo;
-def VLD3q32oddPseudo_UPD : VLDQQQQWBPseudo;
+def VLD3q8oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
+def VLD3q16oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
+def VLD3q32oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
+
+def VLD3q8oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
+def VLD3q16oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
+def VLD3q32oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
// VLD4 : Vector Load (multiple 4-element structures)
class VLD4D<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdSt<0, 0b10, op11_8, op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$addr), IIC_VLD4,
- "vld4", Dt, "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr", "", []>;
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
+ (ins addrmode6:$Rn), IIC_VLD4,
+ "vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
-def VLD4d8 : VLD4D<0b0000, 0b0000, "8">;
-def VLD4d16 : VLD4D<0b0000, 0b0100, "16">;
-def VLD4d32 : VLD4D<0b0000, 0b1000, "32">;
+def VLD4d8 : VLD4D<0b0000, {0,0,?,?}, "8">;
+def VLD4d16 : VLD4D<0b0000, {0,1,?,?}, "16">;
+def VLD4d32 : VLD4D<0b0000, {1,0,?,?}, "32">;
-def VLD4d8Pseudo : VLDQQPseudo;
-def VLD4d16Pseudo : VLDQQPseudo;
-def VLD4d32Pseudo : VLDQQPseudo;
+def VLD4d8Pseudo : VLDQQPseudo<IIC_VLD4>;
+def VLD4d16Pseudo : VLDQQPseudo<IIC_VLD4>;
+def VLD4d32Pseudo : VLDQQPseudo<IIC_VLD4>;
// ...with address register writeback:
class VLD4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdSt<0, 0b10, op11_8, op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset), IIC_VLD4,
- "vld4", Dt, "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr$offset",
- "$addr.addr = $wb", []>;
-
-def VLD4d8_UPD : VLD4DWB<0b0000, 0b0000, "8">;
-def VLD4d16_UPD : VLD4DWB<0b0000, 0b0100, "16">;
-def VLD4d32_UPD : VLD4DWB<0b0000, 0b1000, "32">;
-
-def VLD4d8Pseudo_UPD : VLDQQWBPseudo;
-def VLD4d16Pseudo_UPD : VLDQQWBPseudo;
-def VLD4d32Pseudo_UPD : VLDQQWBPseudo;
-
-// ...with double-spaced registers (non-updating versions for disassembly only):
-def VLD4q8 : VLD4D<0b0001, 0b0000, "8">;
-def VLD4q16 : VLD4D<0b0001, 0b0100, "16">;
-def VLD4q32 : VLD4D<0b0001, 0b1000, "32">;
-def VLD4q8_UPD : VLD4DWB<0b0001, 0b0000, "8">;
-def VLD4q16_UPD : VLD4DWB<0b0001, 0b0100, "16">;
-def VLD4q32_UPD : VLD4DWB<0b0001, 0b1000, "32">;
-
-def VLD4q8Pseudo_UPD : VLDQQQQWBPseudo;
-def VLD4q16Pseudo_UPD : VLDQQQQWBPseudo;
-def VLD4q32Pseudo_UPD : VLDQQQQWBPseudo;
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD4u,
+ "vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+}
+
+def VLD4d8_UPD : VLD4DWB<0b0000, {0,0,?,?}, "8">;
+def VLD4d16_UPD : VLD4DWB<0b0000, {0,1,?,?}, "16">;
+def VLD4d32_UPD : VLD4DWB<0b0000, {1,0,?,?}, "32">;
+
+def VLD4d8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
+def VLD4d16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
+def VLD4d32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
+
+// ...with double-spaced registers:
+def VLD4q8 : VLD4D<0b0001, {0,0,?,?}, "8">;
+def VLD4q16 : VLD4D<0b0001, {0,1,?,?}, "16">;
+def VLD4q32 : VLD4D<0b0001, {1,0,?,?}, "32">;
+def VLD4q8_UPD : VLD4DWB<0b0001, {0,0,?,?}, "8">;
+def VLD4q16_UPD : VLD4DWB<0b0001, {0,1,?,?}, "16">;
+def VLD4q32_UPD : VLD4DWB<0b0001, {1,0,?,?}, "32">;
+
+def VLD4q8Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
+def VLD4q16Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
+def VLD4q32Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
// ...alternate versions to be allocated odd register numbers:
-def VLD4q8oddPseudo_UPD : VLDQQQQWBPseudo;
-def VLD4q16oddPseudo_UPD : VLDQQQQWBPseudo;
-def VLD4q32oddPseudo_UPD : VLDQQQQWBPseudo;
+def VLD4q8oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
+def VLD4q16oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
+def VLD4q32oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
+
+def VLD4q8oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
+def VLD4q16oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
+def VLD4q32oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
+
+} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
+
+// Classes for VLD*LN pseudo-instructions with multi-register operands.
+// These are expanded to real instructions after register allocation.
+class VLDQLNPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QPR:$dst),
+ (ins addrmode6:$addr, QPR:$src, nohash_imm:$lane),
+ itin, "$src = $dst">;
+class VLDQLNWBPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
+ (ins addrmode6:$addr, am6offset:$offset, QPR:$src,
+ nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
+class VLDQQLNPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QQPR:$dst),
+ (ins addrmode6:$addr, QQPR:$src, nohash_imm:$lane),
+ itin, "$src = $dst">;
+class VLDQQLNWBPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
+ (ins addrmode6:$addr, am6offset:$offset, QQPR:$src,
+ nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
+class VLDQQQQLNPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QQQQPR:$dst),
+ (ins addrmode6:$addr, QQQQPR:$src, nohash_imm:$lane),
+ itin, "$src = $dst">;
+class VLDQQQQLNWBPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QQQQPR:$dst, GPR:$wb),
+ (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src,
+ nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
// VLD1LN : Vector Load (single element to one lane)
-// FIXME: Not yet implemented.
+class VLD1LN<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
+ PatFrag LoadOp>
+ : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd),
+ (ins addrmode6:$Rn, DPR:$src, nohash_imm:$lane),
+ IIC_VLD1ln, "vld1", Dt, "\\{$Vd[$lane]\\}, $Rn",
+ "$src = $Vd",
+ [(set DPR:$Vd, (vector_insert (Ty DPR:$src),
+ (i32 (LoadOp addrmode6:$Rn)),
+ imm:$lane))]> {
+ let Rm = 0b1111;
+}
+class VLD1QLNPseudo<ValueType Ty, PatFrag LoadOp> : VLDQLNPseudo<IIC_VLD1ln> {
+ let Pattern = [(set QPR:$dst, (vector_insert (Ty QPR:$src),
+ (i32 (LoadOp addrmode6:$addr)),
+ imm:$lane))];
+}
+
+def VLD1LNd8 : VLD1LN<0b0000, {?,?,?,0}, "8", v8i8, extloadi8> {
+ let Inst{7-5} = lane{2-0};
+}
+def VLD1LNd16 : VLD1LN<0b0100, {?,?,0,?}, "16", v4i16, extloadi16> {
+ let Inst{7-6} = lane{1-0};
+ let Inst{4} = Rn{4};
+}
+def VLD1LNd32 : VLD1LN<0b1000, {?,0,?,?}, "32", v2i32, load> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{4};
+ let Inst{4} = Rn{4};
+}
+
+def VLD1LNq8Pseudo : VLD1QLNPseudo<v16i8, extloadi8>;
+def VLD1LNq16Pseudo : VLD1QLNPseudo<v8i16, extloadi16>;
+def VLD1LNq32Pseudo : VLD1QLNPseudo<v4i32, load>;
+
+def : Pat<(vector_insert (v2f32 DPR:$src),
+ (f32 (load addrmode6:$addr)), imm:$lane),
+ (VLD1LNd32 addrmode6:$addr, DPR:$src, imm:$lane)>;
+def : Pat<(vector_insert (v4f32 QPR:$src),
+ (f32 (load addrmode6:$addr)), imm:$lane),
+ (VLD1LNq32Pseudo addrmode6:$addr, QPR:$src, imm:$lane)>;
+
+let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+
+// ...with address register writeback:
+class VLD1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
+ : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$src, nohash_imm:$lane), IIC_VLD1lnu, "vld1", Dt,
+ "\\{$Vd[$lane]\\}, $Rn$Rm",
+ "$src = $Vd, $Rn.addr = $wb", []>;
+
+def VLD1LNd8_UPD : VLD1LNWB<0b0000, {?,?,?,0}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VLD1LNd16_UPD : VLD1LNWB<0b0100, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+ let Inst{4} = Rn{4};
+}
+def VLD1LNd32_UPD : VLD1LNWB<0b1000, {?,0,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{4};
+ let Inst{4} = Rn{4};
+}
+
+def VLD1LNq8Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
+def VLD1LNq16Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
+def VLD1LNq32Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
// VLD2LN : Vector Load (single 2-element structure to one lane)
class VLD2LN<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, op11_8, op7_4, (outs DPR:$dst1, DPR:$dst2),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, nohash_imm:$lane),
- IIC_VLD2, "vld2", Dt, "\\{$dst1[$lane], $dst2[$lane]\\}, $addr",
- "$src1 = $dst1, $src2 = $dst2", []>;
+ : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2),
+ (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, nohash_imm:$lane),
+ IIC_VLD2ln, "vld2", Dt, "\\{$Vd[$lane], $dst2[$lane]\\}, $Rn",
+ "$src1 = $Vd, $src2 = $dst2", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
-def VLD2LNd8 : VLD2LN<0b0001, {?,?,?,?}, "8">;
-def VLD2LNd16 : VLD2LN<0b0101, {?,?,0,?}, "16">;
-def VLD2LNd32 : VLD2LN<0b1001, {?,0,?,?}, "32">;
+def VLD2LNd8 : VLD2LN<0b0001, {?,?,?,?}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VLD2LNd16 : VLD2LN<0b0101, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD2LNd32 : VLD2LN<0b1001, {?,0,0,?}, "32"> {
+ let Inst{7} = lane{0};
+}
+
+def VLD2LNd8Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
+def VLD2LNd16Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
+def VLD2LNd32Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
// ...with double-spaced registers:
-def VLD2LNq16 : VLD2LN<0b0101, {?,?,1,?}, "16">;
-def VLD2LNq32 : VLD2LN<0b1001, {?,1,?,?}, "32">;
+def VLD2LNq16 : VLD2LN<0b0101, {?,?,1,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD2LNq32 : VLD2LN<0b1001, {?,1,0,?}, "32"> {
+ let Inst{7} = lane{0};
+}
-// ...alternate versions to be allocated odd register numbers:
-def VLD2LNq16odd : VLD2LN<0b0101, {?,?,1,?}, "16">;
-def VLD2LNq32odd : VLD2LN<0b1001, {?,1,?,?}, "32">;
+def VLD2LNq16Pseudo : VLDQQLNPseudo<IIC_VLD2ln>;
+def VLD2LNq32Pseudo : VLDQQLNPseudo<IIC_VLD2ln>;
// ...with address register writeback:
class VLD2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, op11_8, op7_4, (outs DPR:$dst1, DPR:$dst2, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VLD2, "vld2", Dt,
- "\\{$dst1[$lane], $dst2[$lane]\\}, $addr$offset",
- "$src1 = $dst1, $src2 = $dst2, $addr.addr = $wb", []>;
+ : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VLD2lnu, "vld2", Dt,
+ "\\{$Vd[$lane], $dst2[$lane]\\}, $Rn$Rm",
+ "$src1 = $Vd, $src2 = $dst2, $Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
-def VLD2LNd8_UPD : VLD2LNWB<0b0001, {?,?,?,?}, "8">;
-def VLD2LNd16_UPD : VLD2LNWB<0b0101, {?,?,0,?}, "16">;
-def VLD2LNd32_UPD : VLD2LNWB<0b1001, {?,0,?,?}, "32">;
+def VLD2LNd8_UPD : VLD2LNWB<0b0001, {?,?,?,?}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VLD2LNd16_UPD : VLD2LNWB<0b0101, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD2LNd32_UPD : VLD2LNWB<0b1001, {?,0,0,?}, "32"> {
+ let Inst{7} = lane{0};
+}
-def VLD2LNq16_UPD : VLD2LNWB<0b0101, {?,?,1,?}, "16">;
-def VLD2LNq32_UPD : VLD2LNWB<0b1001, {?,1,?,?}, "32">;
+def VLD2LNd8Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
+def VLD2LNd16Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
+def VLD2LNd32Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
+
+def VLD2LNq16_UPD : VLD2LNWB<0b0101, {?,?,1,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD2LNq32_UPD : VLD2LNWB<0b1001, {?,1,0,?}, "32"> {
+ let Inst{7} = lane{0};
+}
+
+def VLD2LNq16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD2lnu>;
+def VLD2LNq32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD2lnu>;
// VLD3LN : Vector Load (single 3-element structure to one lane)
class VLD3LN<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, op11_8, op7_4, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
- nohash_imm:$lane), IIC_VLD3, "vld3", Dt,
- "\\{$dst1[$lane], $dst2[$lane], $dst3[$lane]\\}, $addr",
- "$src1 = $dst1, $src2 = $dst2, $src3 = $dst3", []>;
+ : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
+ (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, DPR:$src3,
+ nohash_imm:$lane), IIC_VLD3ln, "vld3", Dt,
+ "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn",
+ "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3", []> {
+ let Rm = 0b1111;
+}
-def VLD3LNd8 : VLD3LN<0b0010, {?,?,?,0}, "8">;
-def VLD3LNd16 : VLD3LN<0b0110, {?,?,0,0}, "16">;
-def VLD3LNd32 : VLD3LN<0b1010, {?,0,0,0}, "32">;
+def VLD3LNd8 : VLD3LN<0b0010, {?,?,?,0}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VLD3LNd16 : VLD3LN<0b0110, {?,?,0,0}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD3LNd32 : VLD3LN<0b1010, {?,0,0,0}, "32"> {
+ let Inst{7} = lane{0};
+}
+
+def VLD3LNd8Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
+def VLD3LNd16Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
+def VLD3LNd32Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
// ...with double-spaced registers:
-def VLD3LNq16 : VLD3LN<0b0110, {?,?,1,0}, "16">;
-def VLD3LNq32 : VLD3LN<0b1010, {?,1,0,0}, "32">;
+def VLD3LNq16 : VLD3LN<0b0110, {?,?,1,0}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD3LNq32 : VLD3LN<0b1010, {?,1,0,0}, "32"> {
+ let Inst{7} = lane{0};
+}
-// ...alternate versions to be allocated odd register numbers:
-def VLD3LNq16odd : VLD3LN<0b0110, {?,?,1,0}, "16">;
-def VLD3LNq32odd : VLD3LN<0b1010, {?,1,0,0}, "32">;
+def VLD3LNq16Pseudo : VLDQQQQLNPseudo<IIC_VLD3ln>;
+def VLD3LNq32Pseudo : VLDQQQQLNPseudo<IIC_VLD3ln>;
// ...with address register writeback:
class VLD3LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, op11_8, op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
+ : NLdStLn<1, 0b10, op11_8, op7_4,
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm,
DPR:$src1, DPR:$src2, DPR:$src3, nohash_imm:$lane),
- IIC_VLD3, "vld3", Dt,
- "\\{$dst1[$lane], $dst2[$lane], $dst3[$lane]\\}, $addr$offset",
- "$src1 = $dst1, $src2 = $dst2, $src3 = $dst3, $addr.addr = $wb",
+ IIC_VLD3lnu, "vld3", Dt,
+ "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn$Rm",
+ "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $Rn.addr = $wb",
[]>;
-def VLD3LNd8_UPD : VLD3LNWB<0b0010, {?,?,?,0}, "8">;
-def VLD3LNd16_UPD : VLD3LNWB<0b0110, {?,?,0,0}, "16">;
-def VLD3LNd32_UPD : VLD3LNWB<0b1010, {?,0,0,0}, "32">;
+def VLD3LNd8_UPD : VLD3LNWB<0b0010, {?,?,?,0}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VLD3LNd16_UPD : VLD3LNWB<0b0110, {?,?,0,0}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD3LNd32_UPD : VLD3LNWB<0b1010, {?,0,0,0}, "32"> {
+ let Inst{7} = lane{0};
+}
-def VLD3LNq16_UPD : VLD3LNWB<0b0110, {?,?,1,0}, "16">;
-def VLD3LNq32_UPD : VLD3LNWB<0b1010, {?,1,0,0}, "32">;
+def VLD3LNd8Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
+def VLD3LNd16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
+def VLD3LNd32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
+
+def VLD3LNq16_UPD : VLD3LNWB<0b0110, {?,?,1,0}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD3LNq32_UPD : VLD3LNWB<0b1010, {?,1,0,0}, "32"> {
+ let Inst{7} = lane{0};
+}
+
+def VLD3LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD3lnu>;
+def VLD3LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD3lnu>;
// VLD4LN : Vector Load (single 4-element structure to one lane)
class VLD4LN<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, op11_8, op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
- nohash_imm:$lane), IIC_VLD4, "vld4", Dt,
- "\\{$dst1[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $addr",
- "$src1 = $dst1, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []>;
+ : NLdStLn<1, 0b10, op11_8, op7_4,
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
+ (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
+ nohash_imm:$lane), IIC_VLD4ln, "vld4", Dt,
+ "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $Rn",
+ "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
-def VLD4LNd8 : VLD4LN<0b0011, {?,?,?,?}, "8">;
-def VLD4LNd16 : VLD4LN<0b0111, {?,?,0,?}, "16">;
-def VLD4LNd32 : VLD4LN<0b1011, {?,0,?,?}, "32">;
+def VLD4LNd8 : VLD4LN<0b0011, {?,?,?,?}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VLD4LNd16 : VLD4LN<0b0111, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD4LNd32 : VLD4LN<0b1011, {?,0,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{5};
+}
+
+def VLD4LNd8Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
+def VLD4LNd16Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
+def VLD4LNd32Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
// ...with double-spaced registers:
-def VLD4LNq16 : VLD4LN<0b0111, {?,?,1,?}, "16">;
-def VLD4LNq32 : VLD4LN<0b1011, {?,1,?,?}, "32">;
+def VLD4LNq16 : VLD4LN<0b0111, {?,?,1,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD4LNq32 : VLD4LN<0b1011, {?,1,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{5};
+}
-// ...alternate versions to be allocated odd register numbers:
-def VLD4LNq16odd : VLD4LN<0b0111, {?,?,1,?}, "16">;
-def VLD4LNq32odd : VLD4LN<0b1011, {?,1,?,?}, "32">;
+def VLD4LNq16Pseudo : VLDQQQQLNPseudo<IIC_VLD4ln>;
+def VLD4LNq32Pseudo : VLDQQQQLNPseudo<IIC_VLD4ln>;
// ...with address register writeback:
class VLD4LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, op11_8, op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
+ : NLdStLn<1, 0b10, op11_8, op7_4,
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm,
DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane),
- IIC_VLD4, "vld4", Dt,
-"\\{$dst1[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $addr$offset",
-"$src1 = $dst1, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4, $addr.addr = $wb",
- []>;
+ IIC_VLD4lnu, "vld4", Dt,
+"\\{$Vd[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $Rn$Rm",
+"$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4, $Rn.addr = $wb",
+ []> {
+ let Inst{4} = Rn{4};
+}
-def VLD4LNd8_UPD : VLD4LNWB<0b0011, {?,?,?,?}, "8">;
-def VLD4LNd16_UPD : VLD4LNWB<0b0111, {?,?,0,?}, "16">;
-def VLD4LNd32_UPD : VLD4LNWB<0b1011, {?,0,?,?}, "32">;
+def VLD4LNd8_UPD : VLD4LNWB<0b0011, {?,?,?,?}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VLD4LNd16_UPD : VLD4LNWB<0b0111, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD4LNd32_UPD : VLD4LNWB<0b1011, {?,0,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{5};
+}
-def VLD4LNq16_UPD : VLD4LNWB<0b0111, {?,?,1,?}, "16">;
-def VLD4LNq32_UPD : VLD4LNWB<0b1011, {?,1,?,?}, "32">;
+def VLD4LNd8Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
+def VLD4LNd16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
+def VLD4LNd32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
+
+def VLD4LNq16_UPD : VLD4LNWB<0b0111, {?,?,1,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VLD4LNq32_UPD : VLD4LNWB<0b1011, {?,1,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{5};
+}
+
+def VLD4LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
+def VLD4LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
+
+} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
// VLD1DUP : Vector Load (single element to all lanes)
+class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
+ : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd), (ins addrmode6dup:$Rn),
+ IIC_VLD1dup, "vld1", Dt, "\\{$Vd[]\\}, $Rn", "",
+ [(set DPR:$Vd, (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
+class VLD1QDUPPseudo<ValueType Ty, PatFrag LoadOp> : VLDQPseudo<IIC_VLD1dup> {
+ let Pattern = [(set QPR:$dst,
+ (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$addr)))))];
+}
+
+def VLD1DUPd8 : VLD1DUP<{0,0,0,?}, "8", v8i8, extloadi8>;
+def VLD1DUPd16 : VLD1DUP<{0,1,0,?}, "16", v4i16, extloadi16>;
+def VLD1DUPd32 : VLD1DUP<{1,0,0,?}, "32", v2i32, load>;
+
+def VLD1DUPq8Pseudo : VLD1QDUPPseudo<v16i8, extloadi8>;
+def VLD1DUPq16Pseudo : VLD1QDUPPseudo<v8i16, extloadi16>;
+def VLD1DUPq32Pseudo : VLD1QDUPPseudo<v4i32, load>;
+
+def : Pat<(v2f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
+ (VLD1DUPd32 addrmode6:$addr)>;
+def : Pat<(v4f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
+ (VLD1DUPq32Pseudo addrmode6:$addr)>;
+
+let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
+
+class VLD1QDUP<bits<4> op7_4, string Dt>
+ : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2),
+ (ins addrmode6dup:$Rn), IIC_VLD1dup,
+ "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
+
+def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8">;
+def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16">;
+def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32">;
+
+// ...with address register writeback:
+class VLD1DUPWB<bits<4> op7_4, string Dt>
+ : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, GPR:$wb),
+ (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
+ "vld1", Dt, "\\{$Vd[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
+class VLD1QDUPWB<bits<4> op7_4, string Dt>
+ : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
+ (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
+ "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
+
+def VLD1DUPd8_UPD : VLD1DUPWB<{0,0,0,0}, "8">;
+def VLD1DUPd16_UPD : VLD1DUPWB<{0,1,0,?}, "16">;
+def VLD1DUPd32_UPD : VLD1DUPWB<{1,0,0,?}, "32">;
+
+def VLD1DUPq8_UPD : VLD1QDUPWB<{0,0,1,0}, "8">;
+def VLD1DUPq16_UPD : VLD1QDUPWB<{0,1,1,?}, "16">;
+def VLD1DUPq32_UPD : VLD1QDUPWB<{1,0,1,?}, "32">;
+
+def VLD1DUPq8Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
+def VLD1DUPq16Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
+def VLD1DUPq32Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
+
// VLD2DUP : Vector Load (single 2-element structure to all lanes)
+class VLD2DUP<bits<4> op7_4, string Dt>
+ : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2),
+ (ins addrmode6dup:$Rn), IIC_VLD2dup,
+ "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
+
+def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8">;
+def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16">;
+def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32">;
+
+def VLD2DUPd8Pseudo : VLDQPseudo<IIC_VLD2dup>;
+def VLD2DUPd16Pseudo : VLDQPseudo<IIC_VLD2dup>;
+def VLD2DUPd32Pseudo : VLDQPseudo<IIC_VLD2dup>;
+
+// ...with double-spaced registers (not used for codegen):
+def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8">;
+def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16">;
+def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32">;
+
+// ...with address register writeback:
+class VLD2DUPWB<bits<4> op7_4, string Dt>
+ : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
+ (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD2dupu,
+ "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
+
+def VLD2DUPd8_UPD : VLD2DUPWB<{0,0,0,0}, "8">;
+def VLD2DUPd16_UPD : VLD2DUPWB<{0,1,0,?}, "16">;
+def VLD2DUPd32_UPD : VLD2DUPWB<{1,0,0,?}, "32">;
+
+def VLD2DUPd8x2_UPD : VLD2DUPWB<{0,0,1,0}, "8">;
+def VLD2DUPd16x2_UPD : VLD2DUPWB<{0,1,1,?}, "16">;
+def VLD2DUPd32x2_UPD : VLD2DUPWB<{1,0,1,?}, "32">;
+
+def VLD2DUPd8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
+def VLD2DUPd16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
+def VLD2DUPd32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
+
// VLD3DUP : Vector Load (single 3-element structure to all lanes)
+class VLD3DUP<bits<4> op7_4, string Dt>
+ : NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
+ (ins addrmode6dup:$Rn), IIC_VLD3dup,
+ "vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
+
+def VLD3DUPd8 : VLD3DUP<{0,0,0,?}, "8">;
+def VLD3DUPd16 : VLD3DUP<{0,1,0,?}, "16">;
+def VLD3DUPd32 : VLD3DUP<{1,0,0,?}, "32">;
+
+def VLD3DUPd8Pseudo : VLDQQPseudo<IIC_VLD3dup>;
+def VLD3DUPd16Pseudo : VLDQQPseudo<IIC_VLD3dup>;
+def VLD3DUPd32Pseudo : VLDQQPseudo<IIC_VLD3dup>;
+
+// ...with double-spaced registers (not used for codegen):
+def VLD3DUPd8x2 : VLD3DUP<{0,0,1,?}, "8">;
+def VLD3DUPd16x2 : VLD3DUP<{0,1,1,?}, "16">;
+def VLD3DUPd32x2 : VLD3DUP<{1,0,1,?}, "32">;
+
+// ...with address register writeback:
+class VLD3DUPWB<bits<4> op7_4, string Dt>
+ : NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
+ (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD3dupu,
+ "vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
+
+def VLD3DUPd8_UPD : VLD3DUPWB<{0,0,0,0}, "8">;
+def VLD3DUPd16_UPD : VLD3DUPWB<{0,1,0,?}, "16">;
+def VLD3DUPd32_UPD : VLD3DUPWB<{1,0,0,?}, "32">;
+
+def VLD3DUPd8x2_UPD : VLD3DUPWB<{0,0,1,0}, "8">;
+def VLD3DUPd16x2_UPD : VLD3DUPWB<{0,1,1,?}, "16">;
+def VLD3DUPd32x2_UPD : VLD3DUPWB<{1,0,1,?}, "32">;
+
+def VLD3DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
+def VLD3DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
+def VLD3DUPd32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
+
// VLD4DUP : Vector Load (single 4-element structure to all lanes)
-// FIXME: Not yet implemented.
+class VLD4DUP<bits<4> op7_4, string Dt>
+ : NLdSt<1, 0b10, 0b1111, op7_4,
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
+ (ins addrmode6dup:$Rn), IIC_VLD4dup,
+ "vld4", Dt, "\\{$Vd[], $dst2[], $dst3[], $dst4[]\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
+
+def VLD4DUPd8 : VLD4DUP<{0,0,0,?}, "8">;
+def VLD4DUPd16 : VLD4DUP<{0,1,0,?}, "16">;
+def VLD4DUPd32 : VLD4DUP<{1,?,0,?}, "32"> { let Inst{6} = Rn{5}; }
+
+def VLD4DUPd8Pseudo : VLDQQPseudo<IIC_VLD4dup>;
+def VLD4DUPd16Pseudo : VLDQQPseudo<IIC_VLD4dup>;
+def VLD4DUPd32Pseudo : VLDQQPseudo<IIC_VLD4dup>;
+
+// ...with double-spaced registers (not used for codegen):
+def VLD4DUPd8x2 : VLD4DUP<{0,0,1,?}, "8">;
+def VLD4DUPd16x2 : VLD4DUP<{0,1,1,?}, "16">;
+def VLD4DUPd32x2 : VLD4DUP<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
+
+// ...with address register writeback:
+class VLD4DUPWB<bits<4> op7_4, string Dt>
+ : NLdSt<1, 0b10, 0b1111, op7_4,
+ (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
+ (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD4dupu,
+ "vld4", Dt, "\\{$Vd[], $dst2[], $dst3[], $dst4[]\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
+
+def VLD4DUPd8_UPD : VLD4DUPWB<{0,0,0,0}, "8">;
+def VLD4DUPd16_UPD : VLD4DUPWB<{0,1,0,?}, "16">;
+def VLD4DUPd32_UPD : VLD4DUPWB<{1,?,0,?}, "32"> { let Inst{6} = Rn{5}; }
+
+def VLD4DUPd8x2_UPD : VLD4DUPWB<{0,0,1,0}, "8">;
+def VLD4DUPd16x2_UPD : VLD4DUPWB<{0,1,1,?}, "16">;
+def VLD4DUPd32x2_UPD : VLD4DUPWB<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
+
+def VLD4DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
+def VLD4DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
+def VLD4DUPd32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
+
} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
// Classes for VST* pseudo-instructions with multi-register operands.
// These are expanded to real instructions after register allocation.
-class VSTQPseudo
- : PseudoNLdSt<(outs), (ins addrmode6:$addr, QPR:$src), IIC_VST, "">;
-class VSTQWBPseudo
+class VSTQPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs), (ins addrmode6:$addr, QPR:$src), itin, "">;
+class VSTQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset, QPR:$src), IIC_VST,
+ (ins addrmode6:$addr, am6offset:$offset, QPR:$src), itin,
"$addr.addr = $wb">;
-class VSTQQPseudo
- : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src), IIC_VST, "">;
-class VSTQQWBPseudo
+class VSTQQPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src), itin, "">;
+class VSTQQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset, QQPR:$src), IIC_VST,
+ (ins addrmode6:$addr, am6offset:$offset, QQPR:$src), itin,
"$addr.addr = $wb">;
-class VSTQQQQWBPseudo
+class VSTQQQQPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQQQPR:$src), itin, "">;
+class VSTQQQQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), IIC_VST,
+ (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), itin,
"$addr.addr = $wb">;
// VST1 : Vector Store (multiple single elements)
class VST1D<bits<4> op7_4, string Dt>
- : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$addr, DPR:$src), IIC_VST,
- "vst1", Dt, "\\{$src\\}, $addr", "", []>;
+ : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$Rn, DPR:$Vd),
+ IIC_VST1, "vst1", Dt, "\\{$Vd\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
class VST1Q<bits<4> op7_4, string Dt>
: NLdSt<0,0b00,0b1010,op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2), IIC_VST,
- "vst1", Dt, "\\{$src1, $src2\\}, $addr", "", []>;
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2), IIC_VST1x2,
+ "vst1", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
-def VST1d8 : VST1D<0b0000, "8">;
-def VST1d16 : VST1D<0b0100, "16">;
-def VST1d32 : VST1D<0b1000, "32">;
-def VST1d64 : VST1D<0b1100, "64">;
+def VST1d8 : VST1D<{0,0,0,?}, "8">;
+def VST1d16 : VST1D<{0,1,0,?}, "16">;
+def VST1d32 : VST1D<{1,0,0,?}, "32">;
+def VST1d64 : VST1D<{1,1,0,?}, "64">;
-def VST1q8 : VST1Q<0b0000, "8">;
-def VST1q16 : VST1Q<0b0100, "16">;
-def VST1q32 : VST1Q<0b1000, "32">;
-def VST1q64 : VST1Q<0b1100, "64">;
+def VST1q8 : VST1Q<{0,0,?,?}, "8">;
+def VST1q16 : VST1Q<{0,1,?,?}, "16">;
+def VST1q32 : VST1Q<{1,0,?,?}, "32">;
+def VST1q64 : VST1Q<{1,1,?,?}, "64">;
-def VST1q8Pseudo : VSTQPseudo;
-def VST1q16Pseudo : VSTQPseudo;
-def VST1q32Pseudo : VSTQPseudo;
-def VST1q64Pseudo : VSTQPseudo;
+def VST1q8Pseudo : VSTQPseudo<IIC_VST1x2>;
+def VST1q16Pseudo : VSTQPseudo<IIC_VST1x2>;
+def VST1q32Pseudo : VSTQPseudo<IIC_VST1x2>;
+def VST1q64Pseudo : VSTQPseudo<IIC_VST1x2>;
// ...with address register writeback:
class VST1DWB<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b0111, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset, DPR:$src), IIC_VST,
- "vst1", Dt, "\\{$src\\}, $addr$offset", "$addr.addr = $wb", []>;
+ (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd), IIC_VST1u,
+ "vst1", Dt, "\\{$Vd\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
class VST1QWB<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b1010, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset, QPR:$src), IIC_VST,
- "vst1", Dt, "${src:dregpair}, $addr$offset", "$addr.addr = $wb", []>;
+ (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd, DPR:$src2),
+ IIC_VST1x2u, "vst1", Dt, "\\{$Vd, $src2\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+}
-def VST1d8_UPD : VST1DWB<0b0000, "8">;
-def VST1d16_UPD : VST1DWB<0b0100, "16">;
-def VST1d32_UPD : VST1DWB<0b1000, "32">;
-def VST1d64_UPD : VST1DWB<0b1100, "64">;
+def VST1d8_UPD : VST1DWB<{0,0,0,?}, "8">;
+def VST1d16_UPD : VST1DWB<{0,1,0,?}, "16">;
+def VST1d32_UPD : VST1DWB<{1,0,0,?}, "32">;
+def VST1d64_UPD : VST1DWB<{1,1,0,?}, "64">;
-def VST1q8_UPD : VST1QWB<0b0000, "8">;
-def VST1q16_UPD : VST1QWB<0b0100, "16">;
-def VST1q32_UPD : VST1QWB<0b1000, "32">;
-def VST1q64_UPD : VST1QWB<0b1100, "64">;
+def VST1q8_UPD : VST1QWB<{0,0,?,?}, "8">;
+def VST1q16_UPD : VST1QWB<{0,1,?,?}, "16">;
+def VST1q32_UPD : VST1QWB<{1,0,?,?}, "32">;
+def VST1q64_UPD : VST1QWB<{1,1,?,?}, "64">;
-def VST1q8Pseudo_UPD : VSTQWBPseudo;
-def VST1q16Pseudo_UPD : VSTQWBPseudo;
-def VST1q32Pseudo_UPD : VSTQWBPseudo;
-def VST1q64Pseudo_UPD : VSTQWBPseudo;
+def VST1q8Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
+def VST1q16Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
+def VST1q32Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
+def VST1q64Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
// ...with 3 registers (some of these are only for the disassembler):
class VST1D3<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b0110, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3),
- IIC_VST, "vst1", Dt, "\\{$src1, $src2, $src3\\}, $addr", "", []>;
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3),
+ IIC_VST1x3, "vst1", Dt, "\\{$Vd, $src2, $src3\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
class VST1D3WB<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b0110, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, DPR:$src3),
- IIC_VST, "vst1", Dt, "\\{$src1, $src2, $src3\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, DPR:$src2, DPR:$src3),
+ IIC_VST1x3u, "vst1", Dt, "\\{$Vd, $src2, $src3\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
-def VST1d8T : VST1D3<0b0000, "8">;
-def VST1d16T : VST1D3<0b0100, "16">;
-def VST1d32T : VST1D3<0b1000, "32">;
-def VST1d64T : VST1D3<0b1100, "64">;
+def VST1d8T : VST1D3<{0,0,0,?}, "8">;
+def VST1d16T : VST1D3<{0,1,0,?}, "16">;
+def VST1d32T : VST1D3<{1,0,0,?}, "32">;
+def VST1d64T : VST1D3<{1,1,0,?}, "64">;
-def VST1d8T_UPD : VST1D3WB<0b0000, "8">;
-def VST1d16T_UPD : VST1D3WB<0b0100, "16">;
-def VST1d32T_UPD : VST1D3WB<0b1000, "32">;
-def VST1d64T_UPD : VST1D3WB<0b1100, "64">;
+def VST1d8T_UPD : VST1D3WB<{0,0,0,?}, "8">;
+def VST1d16T_UPD : VST1D3WB<{0,1,0,?}, "16">;
+def VST1d32T_UPD : VST1D3WB<{1,0,0,?}, "32">;
+def VST1d64T_UPD : VST1D3WB<{1,1,0,?}, "64">;
-def VST1d64TPseudo : VSTQQPseudo;
-def VST1d64TPseudo_UPD : VSTQQWBPseudo;
+def VST1d64TPseudo : VSTQQPseudo<IIC_VST1x3>;
+def VST1d64TPseudo_UPD : VSTQQWBPseudo<IIC_VST1x3u>;
// ...with 4 registers (some of these are only for the disassembler):
class VST1D4<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b0010, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST, "vst1", Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr", "",
- []>;
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
+ IIC_VST1x4, "vst1", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn", "",
+ []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
class VST1D4WB<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b0010, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST, "vst1", Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST1x4u,
+ "vst1", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+}
-def VST1d8Q : VST1D4<0b0000, "8">;
-def VST1d16Q : VST1D4<0b0100, "16">;
-def VST1d32Q : VST1D4<0b1000, "32">;
-def VST1d64Q : VST1D4<0b1100, "64">;
+def VST1d8Q : VST1D4<{0,0,?,?}, "8">;
+def VST1d16Q : VST1D4<{0,1,?,?}, "16">;
+def VST1d32Q : VST1D4<{1,0,?,?}, "32">;
+def VST1d64Q : VST1D4<{1,1,?,?}, "64">;
-def VST1d8Q_UPD : VST1D4WB<0b0000, "8">;
-def VST1d16Q_UPD : VST1D4WB<0b0100, "16">;
-def VST1d32Q_UPD : VST1D4WB<0b1000, "32">;
-def VST1d64Q_UPD : VST1D4WB<0b1100, "64">;
+def VST1d8Q_UPD : VST1D4WB<{0,0,?,?}, "8">;
+def VST1d16Q_UPD : VST1D4WB<{0,1,?,?}, "16">;
+def VST1d32Q_UPD : VST1D4WB<{1,0,?,?}, "32">;
+def VST1d64Q_UPD : VST1D4WB<{1,1,?,?}, "64">;
-def VST1d64QPseudo : VSTQQPseudo;
-def VST1d64QPseudo_UPD : VSTQQWBPseudo;
+def VST1d64QPseudo : VSTQQPseudo<IIC_VST1x4>;
+def VST1d64QPseudo_UPD : VSTQQWBPseudo<IIC_VST1x4u>;
// VST2 : Vector Store (multiple 2-element structures)
class VST2D<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, op11_8, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2),
- IIC_VST, "vst2", Dt, "\\{$src1, $src2\\}, $addr", "", []>;
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2),
+ IIC_VST2, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
class VST2Q<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b0011, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST, "vst2", Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr",
- "", []>;
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
+ IIC_VST2x2, "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn",
+ "", []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
-def VST2d8 : VST2D<0b1000, 0b0000, "8">;
-def VST2d16 : VST2D<0b1000, 0b0100, "16">;
-def VST2d32 : VST2D<0b1000, 0b1000, "32">;
+def VST2d8 : VST2D<0b1000, {0,0,?,?}, "8">;
+def VST2d16 : VST2D<0b1000, {0,1,?,?}, "16">;
+def VST2d32 : VST2D<0b1000, {1,0,?,?}, "32">;
-def VST2q8 : VST2Q<0b0000, "8">;
-def VST2q16 : VST2Q<0b0100, "16">;
-def VST2q32 : VST2Q<0b1000, "32">;
+def VST2q8 : VST2Q<{0,0,?,?}, "8">;
+def VST2q16 : VST2Q<{0,1,?,?}, "16">;
+def VST2q32 : VST2Q<{1,0,?,?}, "32">;
-def VST2d8Pseudo : VSTQPseudo;
-def VST2d16Pseudo : VSTQPseudo;
-def VST2d32Pseudo : VSTQPseudo;
+def VST2d8Pseudo : VSTQPseudo<IIC_VST2>;
+def VST2d16Pseudo : VSTQPseudo<IIC_VST2>;
+def VST2d32Pseudo : VSTQPseudo<IIC_VST2>;
-def VST2q8Pseudo : VSTQQPseudo;
-def VST2q16Pseudo : VSTQQPseudo;
-def VST2q32Pseudo : VSTQQPseudo;
+def VST2q8Pseudo : VSTQQPseudo<IIC_VST2x2>;
+def VST2q16Pseudo : VSTQQPseudo<IIC_VST2x2>;
+def VST2q32Pseudo : VSTQQPseudo<IIC_VST2x2>;
// ...with address register writeback:
class VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset, DPR:$src1, DPR:$src2),
- IIC_VST, "vst2", Dt, "\\{$src1, $src2\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd, DPR:$src2),
+ IIC_VST2u, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+}
class VST2QWB<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST, "vst2", Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST2x2u,
+ "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+}
-def VST2d8_UPD : VST2DWB<0b1000, 0b0000, "8">;
-def VST2d16_UPD : VST2DWB<0b1000, 0b0100, "16">;
-def VST2d32_UPD : VST2DWB<0b1000, 0b1000, "32">;
+def VST2d8_UPD : VST2DWB<0b1000, {0,0,?,?}, "8">;
+def VST2d16_UPD : VST2DWB<0b1000, {0,1,?,?}, "16">;
+def VST2d32_UPD : VST2DWB<0b1000, {1,0,?,?}, "32">;
-def VST2q8_UPD : VST2QWB<0b0000, "8">;
-def VST2q16_UPD : VST2QWB<0b0100, "16">;
-def VST2q32_UPD : VST2QWB<0b1000, "32">;
+def VST2q8_UPD : VST2QWB<{0,0,?,?}, "8">;
+def VST2q16_UPD : VST2QWB<{0,1,?,?}, "16">;
+def VST2q32_UPD : VST2QWB<{1,0,?,?}, "32">;
-def VST2d8Pseudo_UPD : VSTQWBPseudo;
-def VST2d16Pseudo_UPD : VSTQWBPseudo;
-def VST2d32Pseudo_UPD : VSTQWBPseudo;
+def VST2d8Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
+def VST2d16Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
+def VST2d32Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
-def VST2q8Pseudo_UPD : VSTQQWBPseudo;
-def VST2q16Pseudo_UPD : VSTQQWBPseudo;
-def VST2q32Pseudo_UPD : VSTQQWBPseudo;
+def VST2q8Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
+def VST2q16Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
+def VST2q32Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
// ...with double-spaced registers (for disassembly only):
-def VST2b8 : VST2D<0b1001, 0b0000, "8">;
-def VST2b16 : VST2D<0b1001, 0b0100, "16">;
-def VST2b32 : VST2D<0b1001, 0b1000, "32">;
-def VST2b8_UPD : VST2DWB<0b1001, 0b0000, "8">;
-def VST2b16_UPD : VST2DWB<0b1001, 0b0100, "16">;
-def VST2b32_UPD : VST2DWB<0b1001, 0b1000, "32">;
+def VST2b8 : VST2D<0b1001, {0,0,?,?}, "8">;
+def VST2b16 : VST2D<0b1001, {0,1,?,?}, "16">;
+def VST2b32 : VST2D<0b1001, {1,0,?,?}, "32">;
+def VST2b8_UPD : VST2DWB<0b1001, {0,0,?,?}, "8">;
+def VST2b16_UPD : VST2DWB<0b1001, {0,1,?,?}, "16">;
+def VST2b32_UPD : VST2DWB<0b1001, {1,0,?,?}, "32">;
// VST3 : Vector Store (multiple 3-element structures)
class VST3D<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, op11_8, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3), IIC_VST,
- "vst3", Dt, "\\{$src1, $src2, $src3\\}, $addr", "", []>;
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3), IIC_VST3,
+ "vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
-def VST3d8 : VST3D<0b0100, 0b0000, "8">;
-def VST3d16 : VST3D<0b0100, 0b0100, "16">;
-def VST3d32 : VST3D<0b0100, 0b1000, "32">;
+def VST3d8 : VST3D<0b0100, {0,0,0,?}, "8">;
+def VST3d16 : VST3D<0b0100, {0,1,0,?}, "16">;
+def VST3d32 : VST3D<0b0100, {1,0,0,?}, "32">;
-def VST3d8Pseudo : VSTQQPseudo;
-def VST3d16Pseudo : VSTQQPseudo;
-def VST3d32Pseudo : VSTQQPseudo;
+def VST3d8Pseudo : VSTQQPseudo<IIC_VST3>;
+def VST3d16Pseudo : VSTQQPseudo<IIC_VST3>;
+def VST3d32Pseudo : VSTQQPseudo<IIC_VST3>;
// ...with address register writeback:
class VST3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, DPR:$src3), IIC_VST,
- "vst3", Dt, "\\{$src1, $src2, $src3\\}, $addr$offset",
- "$addr.addr = $wb", []>;
-
-def VST3d8_UPD : VST3DWB<0b0100, 0b0000, "8">;
-def VST3d16_UPD : VST3DWB<0b0100, 0b0100, "16">;
-def VST3d32_UPD : VST3DWB<0b0100, 0b1000, "32">;
-
-def VST3d8Pseudo_UPD : VSTQQWBPseudo;
-def VST3d16Pseudo_UPD : VSTQQWBPseudo;
-def VST3d32Pseudo_UPD : VSTQQWBPseudo;
-
-// ...with double-spaced registers (non-updating versions for disassembly only):
-def VST3q8 : VST3D<0b0101, 0b0000, "8">;
-def VST3q16 : VST3D<0b0101, 0b0100, "16">;
-def VST3q32 : VST3D<0b0101, 0b1000, "32">;
-def VST3q8_UPD : VST3DWB<0b0101, 0b0000, "8">;
-def VST3q16_UPD : VST3DWB<0b0101, 0b0100, "16">;
-def VST3q32_UPD : VST3DWB<0b0101, 0b1000, "32">;
-
-def VST3q8Pseudo_UPD : VSTQQQQWBPseudo;
-def VST3q16Pseudo_UPD : VSTQQQQWBPseudo;
-def VST3q32Pseudo_UPD : VSTQQQQWBPseudo;
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, DPR:$src2, DPR:$src3), IIC_VST3u,
+ "vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
+
+def VST3d8_UPD : VST3DWB<0b0100, {0,0,0,?}, "8">;
+def VST3d16_UPD : VST3DWB<0b0100, {0,1,0,?}, "16">;
+def VST3d32_UPD : VST3DWB<0b0100, {1,0,0,?}, "32">;
+
+def VST3d8Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
+def VST3d16Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
+def VST3d32Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
+
+// ...with double-spaced registers:
+def VST3q8 : VST3D<0b0101, {0,0,0,?}, "8">;
+def VST3q16 : VST3D<0b0101, {0,1,0,?}, "16">;
+def VST3q32 : VST3D<0b0101, {1,0,0,?}, "32">;
+def VST3q8_UPD : VST3DWB<0b0101, {0,0,0,?}, "8">;
+def VST3q16_UPD : VST3DWB<0b0101, {0,1,0,?}, "16">;
+def VST3q32_UPD : VST3DWB<0b0101, {1,0,0,?}, "32">;
+
+def VST3q8Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
+def VST3q16Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
+def VST3q32Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
// ...alternate versions to be allocated odd register numbers:
-def VST3q8oddPseudo_UPD : VSTQQQQWBPseudo;
-def VST3q16oddPseudo_UPD : VSTQQQQWBPseudo;
-def VST3q32oddPseudo_UPD : VSTQQQQWBPseudo;
+def VST3q8oddPseudo : VSTQQQQPseudo<IIC_VST3>;
+def VST3q16oddPseudo : VSTQQQQPseudo<IIC_VST3>;
+def VST3q32oddPseudo : VSTQQQQPseudo<IIC_VST3>;
+
+def VST3q8oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
+def VST3q16oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
+def VST3q32oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
// VST4 : Vector Store (multiple 4-element structures)
class VST4D<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, op11_8, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST, "vst4", Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr",
- "", []>;
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
+ IIC_VST4, "vst4", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn",
+ "", []> {
+ let Rm = 0b1111;
+ let Inst{5-4} = Rn{5-4};
+}
-def VST4d8 : VST4D<0b0000, 0b0000, "8">;
-def VST4d16 : VST4D<0b0000, 0b0100, "16">;
-def VST4d32 : VST4D<0b0000, 0b1000, "32">;
+def VST4d8 : VST4D<0b0000, {0,0,?,?}, "8">;
+def VST4d16 : VST4D<0b0000, {0,1,?,?}, "16">;
+def VST4d32 : VST4D<0b0000, {1,0,?,?}, "32">;
-def VST4d8Pseudo : VSTQQPseudo;
-def VST4d16Pseudo : VSTQQPseudo;
-def VST4d32Pseudo : VSTQQPseudo;
+def VST4d8Pseudo : VSTQQPseudo<IIC_VST4>;
+def VST4d16Pseudo : VSTQQPseudo<IIC_VST4>;
+def VST4d32Pseudo : VSTQQPseudo<IIC_VST4>;
// ...with address register writeback:
class VST4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST,
- "vst4", Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr$offset",
- "$addr.addr = $wb", []>;
-
-def VST4d8_UPD : VST4DWB<0b0000, 0b0000, "8">;
-def VST4d16_UPD : VST4DWB<0b0000, 0b0100, "16">;
-def VST4d32_UPD : VST4DWB<0b0000, 0b1000, "32">;
-
-def VST4d8Pseudo_UPD : VSTQQWBPseudo;
-def VST4d16Pseudo_UPD : VSTQQWBPseudo;
-def VST4d32Pseudo_UPD : VSTQQWBPseudo;
-
-// ...with double-spaced registers (non-updating versions for disassembly only):
-def VST4q8 : VST4D<0b0001, 0b0000, "8">;
-def VST4q16 : VST4D<0b0001, 0b0100, "16">;
-def VST4q32 : VST4D<0b0001, 0b1000, "32">;
-def VST4q8_UPD : VST4DWB<0b0001, 0b0000, "8">;
-def VST4q16_UPD : VST4DWB<0b0001, 0b0100, "16">;
-def VST4q32_UPD : VST4DWB<0b0001, 0b1000, "32">;
-
-def VST4q8Pseudo_UPD : VSTQQQQWBPseudo;
-def VST4q16Pseudo_UPD : VSTQQQQWBPseudo;
-def VST4q32Pseudo_UPD : VSTQQQQWBPseudo;
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST4u,
+ "vst4", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+}
+
+def VST4d8_UPD : VST4DWB<0b0000, {0,0,?,?}, "8">;
+def VST4d16_UPD : VST4DWB<0b0000, {0,1,?,?}, "16">;
+def VST4d32_UPD : VST4DWB<0b0000, {1,0,?,?}, "32">;
+
+def VST4d8Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
+def VST4d16Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
+def VST4d32Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
+
+// ...with double-spaced registers:
+def VST4q8 : VST4D<0b0001, {0,0,?,?}, "8">;
+def VST4q16 : VST4D<0b0001, {0,1,?,?}, "16">;
+def VST4q32 : VST4D<0b0001, {1,0,?,?}, "32">;
+def VST4q8_UPD : VST4DWB<0b0001, {0,0,?,?}, "8">;
+def VST4q16_UPD : VST4DWB<0b0001, {0,1,?,?}, "16">;
+def VST4q32_UPD : VST4DWB<0b0001, {1,0,?,?}, "32">;
+
+def VST4q8Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
+def VST4q16Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
+def VST4q32Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
// ...alternate versions to be allocated odd register numbers:
-def VST4q8oddPseudo_UPD : VSTQQQQWBPseudo;
-def VST4q16oddPseudo_UPD : VSTQQQQWBPseudo;
-def VST4q32oddPseudo_UPD : VSTQQQQWBPseudo;
+def VST4q8oddPseudo : VSTQQQQPseudo<IIC_VST4>;
+def VST4q16oddPseudo : VSTQQQQPseudo<IIC_VST4>;
+def VST4q32oddPseudo : VSTQQQQPseudo<IIC_VST4>;
+
+def VST4q8oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
+def VST4q16oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
+def VST4q32oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
+
+} // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
+
+// Classes for VST*LN pseudo-instructions with multi-register operands.
+// These are expanded to real instructions after register allocation.
+class VSTQLNPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs), (ins addrmode6:$addr, QPR:$src, nohash_imm:$lane),
+ itin, "">;
+class VSTQLNWBPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs GPR:$wb),
+ (ins addrmode6:$addr, am6offset:$offset, QPR:$src,
+ nohash_imm:$lane), itin, "$addr.addr = $wb">;
+class VSTQQLNPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src, nohash_imm:$lane),
+ itin, "">;
+class VSTQQLNWBPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs GPR:$wb),
+ (ins addrmode6:$addr, am6offset:$offset, QQPR:$src,
+ nohash_imm:$lane), itin, "$addr.addr = $wb">;
+class VSTQQQQLNPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQQQPR:$src, nohash_imm:$lane),
+ itin, "">;
+class VSTQQQQLNWBPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs GPR:$wb),
+ (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src,
+ nohash_imm:$lane), itin, "$addr.addr = $wb">;
// VST1LN : Vector Store (single element from one lane)
-// FIXME: Not yet implemented.
+class VST1LN<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
+ PatFrag StoreOp, SDNode ExtractOp>
+ : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
+ (ins addrmode6:$Rn, DPR:$Vd, nohash_imm:$lane),
+ IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
+ [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6:$Rn)]> {
+ let Rm = 0b1111;
+}
+class VST1QLNPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
+ : VSTQLNPseudo<IIC_VST1ln> {
+ let Pattern = [(StoreOp (ExtractOp (Ty QPR:$src), imm:$lane),
+ addrmode6:$addr)];
+}
+
+def VST1LNd8 : VST1LN<0b0000, {?,?,?,0}, "8", v8i8, truncstorei8,
+ NEONvgetlaneu> {
+ let Inst{7-5} = lane{2-0};
+}
+def VST1LNd16 : VST1LN<0b0100, {?,?,0,?}, "16", v4i16, truncstorei16,
+ NEONvgetlaneu> {
+ let Inst{7-6} = lane{1-0};
+ let Inst{4} = Rn{5};
+}
+def VST1LNd32 : VST1LN<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt> {
+ let Inst{7} = lane{0};
+ let Inst{5-4} = Rn{5-4};
+}
+
+def VST1LNq8Pseudo : VST1QLNPseudo<v16i8, truncstorei8, NEONvgetlaneu>;
+def VST1LNq16Pseudo : VST1QLNPseudo<v8i16, truncstorei16, NEONvgetlaneu>;
+def VST1LNq32Pseudo : VST1QLNPseudo<v4i32, store, extractelt>;
+
+def : Pat<(store (extractelt (v2f32 DPR:$src), imm:$lane), addrmode6:$addr),
+ (VST1LNd32 addrmode6:$addr, DPR:$src, imm:$lane)>;
+def : Pat<(store (extractelt (v4f32 QPR:$src), imm:$lane), addrmode6:$addr),
+ (VST1LNq32Pseudo addrmode6:$addr, QPR:$src, imm:$lane)>;
+
+let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
+
+// ...with address register writeback:
+class VST1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
+ : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, nohash_imm:$lane), IIC_VST1lnu, "vst1", Dt,
+ "\\{$Vd[$lane]\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []>;
+
+def VST1LNd8_UPD : VST1LNWB<0b0000, {?,?,?,0}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VST1LNd16_UPD : VST1LNWB<0b0100, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+ let Inst{4} = Rn{5};
+}
+def VST1LNd32_UPD : VST1LNWB<0b1000, {?,0,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5-4} = Rn{5-4};
+}
+
+def VST1LNq8Pseudo_UPD : VSTQLNWBPseudo<IIC_VST1lnu>;
+def VST1LNq16Pseudo_UPD : VSTQLNWBPseudo<IIC_VST1lnu>;
+def VST1LNq32Pseudo_UPD : VSTQLNWBPseudo<IIC_VST1lnu>;
// VST2LN : Vector Store (single 2-element structure from one lane)
class VST2LN<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b00, op11_8, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, nohash_imm:$lane),
- IIC_VST, "vst2", Dt, "\\{$src1[$lane], $src2[$lane]\\}, $addr",
- "", []>;
+ : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, nohash_imm:$lane),
+ IIC_VST2ln, "vst2", Dt, "\\{$Vd[$lane], $src2[$lane]\\}, $Rn",
+ "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
-def VST2LNd8 : VST2LN<0b0001, {?,?,?,?}, "8">;
-def VST2LNd16 : VST2LN<0b0101, {?,?,0,?}, "16">;
-def VST2LNd32 : VST2LN<0b1001, {?,0,?,?}, "32">;
+def VST2LNd8 : VST2LN<0b0001, {?,?,?,?}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VST2LNd16 : VST2LN<0b0101, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST2LNd32 : VST2LN<0b1001, {?,0,0,?}, "32"> {
+ let Inst{7} = lane{0};
+}
+
+def VST2LNd8Pseudo : VSTQLNPseudo<IIC_VST2ln>;
+def VST2LNd16Pseudo : VSTQLNPseudo<IIC_VST2ln>;
+def VST2LNd32Pseudo : VSTQLNPseudo<IIC_VST2ln>;
// ...with double-spaced registers:
-def VST2LNq16 : VST2LN<0b0101, {?,?,1,?}, "16">;
-def VST2LNq32 : VST2LN<0b1001, {?,1,?,?}, "32">;
+def VST2LNq16 : VST2LN<0b0101, {?,?,1,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+ let Inst{4} = Rn{4};
+}
+def VST2LNq32 : VST2LN<0b1001, {?,1,0,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{4} = Rn{4};
+}
-// ...alternate versions to be allocated odd register numbers:
-def VST2LNq16odd : VST2LN<0b0101, {?,?,1,?}, "16">;
-def VST2LNq32odd : VST2LN<0b1001, {?,1,?,?}, "32">;
+def VST2LNq16Pseudo : VSTQQLNPseudo<IIC_VST2ln>;
+def VST2LNq32Pseudo : VSTQQLNPseudo<IIC_VST2ln>;
// ...with address register writeback:
class VST2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
+ : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
(ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VST, "vst2", Dt,
+ DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VST2lnu, "vst2", Dt,
"\\{$src1[$lane], $src2[$lane]\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ "$addr.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
-def VST2LNd8_UPD : VST2LNWB<0b0001, {?,?,?,?}, "8">;
-def VST2LNd16_UPD : VST2LNWB<0b0101, {?,?,0,?}, "16">;
-def VST2LNd32_UPD : VST2LNWB<0b1001, {?,0,?,?}, "32">;
+def VST2LNd8_UPD : VST2LNWB<0b0001, {?,?,?,?}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VST2LNd16_UPD : VST2LNWB<0b0101, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST2LNd32_UPD : VST2LNWB<0b1001, {?,0,0,?}, "32"> {
+ let Inst{7} = lane{0};
+}
+
+def VST2LNd8Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
+def VST2LNd16Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
+def VST2LNd32Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
+
+def VST2LNq16_UPD : VST2LNWB<0b0101, {?,?,1,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST2LNq32_UPD : VST2LNWB<0b1001, {?,1,0,?}, "32"> {
+ let Inst{7} = lane{0};
+}
-def VST2LNq16_UPD : VST2LNWB<0b0101, {?,?,1,?}, "16">;
-def VST2LNq32_UPD : VST2LNWB<0b1001, {?,1,?,?}, "32">;
+def VST2LNq16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST2lnu>;
+def VST2LNq32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST2lnu>;
// VST3LN : Vector Store (single 3-element structure from one lane)
class VST3LN<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b00, op11_8, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
- nohash_imm:$lane), IIC_VST, "vst3", Dt,
- "\\{$src1[$lane], $src2[$lane], $src3[$lane]\\}, $addr", "", []>;
+ : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3,
+ nohash_imm:$lane), IIC_VST3ln, "vst3", Dt,
+ "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn", "", []> {
+ let Rm = 0b1111;
+}
+
+def VST3LNd8 : VST3LN<0b0010, {?,?,?,0}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VST3LNd16 : VST3LN<0b0110, {?,?,0,0}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST3LNd32 : VST3LN<0b1010, {?,0,0,0}, "32"> {
+ let Inst{7} = lane{0};
+}
-def VST3LNd8 : VST3LN<0b0010, {?,?,?,0}, "8">;
-def VST3LNd16 : VST3LN<0b0110, {?,?,0,0}, "16">;
-def VST3LNd32 : VST3LN<0b1010, {?,0,0,0}, "32">;
+def VST3LNd8Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
+def VST3LNd16Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
+def VST3LNd32Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
// ...with double-spaced registers:
-def VST3LNq16 : VST3LN<0b0110, {?,?,1,0}, "16">;
-def VST3LNq32 : VST3LN<0b1010, {?,1,0,0}, "32">;
+def VST3LNq16 : VST3LN<0b0110, {?,?,1,0}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST3LNq32 : VST3LN<0b1010, {?,1,0,0}, "32"> {
+ let Inst{7} = lane{0};
+}
-// ...alternate versions to be allocated odd register numbers:
-def VST3LNq16odd : VST3LN<0b0110, {?,?,1,0}, "16">;
-def VST3LNq32odd : VST3LN<0b1010, {?,1,0,0}, "32">;
+def VST3LNq16Pseudo : VSTQQQQLNPseudo<IIC_VST3ln>;
+def VST3LNq32Pseudo : VSTQQQQLNPseudo<IIC_VST3ln>;
// ...with address register writeback:
class VST3LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, DPR:$src3, nohash_imm:$lane),
- IIC_VST, "vst3", Dt,
- "\\{$src1[$lane], $src2[$lane], $src3[$lane]\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, DPR:$src2, DPR:$src3, nohash_imm:$lane),
+ IIC_VST3lnu, "vst3", Dt,
+ "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []>;
+
+def VST3LNd8_UPD : VST3LNWB<0b0010, {?,?,?,0}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VST3LNd16_UPD : VST3LNWB<0b0110, {?,?,0,0}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST3LNd32_UPD : VST3LNWB<0b1010, {?,0,0,0}, "32"> {
+ let Inst{7} = lane{0};
+}
-def VST3LNd8_UPD : VST3LNWB<0b0010, {?,?,?,0}, "8">;
-def VST3LNd16_UPD : VST3LNWB<0b0110, {?,?,0,0}, "16">;
-def VST3LNd32_UPD : VST3LNWB<0b1010, {?,0,0,0}, "32">;
+def VST3LNd8Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
+def VST3LNd16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
+def VST3LNd32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
+
+def VST3LNq16_UPD : VST3LNWB<0b0110, {?,?,1,0}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST3LNq32_UPD : VST3LNWB<0b1010, {?,1,0,0}, "32"> {
+ let Inst{7} = lane{0};
+}
-def VST3LNq16_UPD : VST3LNWB<0b0110, {?,?,1,0}, "16">;
-def VST3LNq32_UPD : VST3LNWB<0b1010, {?,1,0,0}, "32">;
+def VST3LNq16Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST3lnu>;
+def VST3LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST3lnu>;
// VST4LN : Vector Store (single 4-element structure from one lane)
class VST4LN<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b00, op11_8, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
- nohash_imm:$lane), IIC_VST, "vst4", Dt,
- "\\{$src1[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $addr",
- "", []>;
+ : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
+ (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4,
+ nohash_imm:$lane), IIC_VST4ln, "vst4", Dt,
+ "\\{$Vd[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $Rn",
+ "", []> {
+ let Rm = 0b1111;
+ let Inst{4} = Rn{4};
+}
+
+def VST4LNd8 : VST4LN<0b0011, {?,?,?,?}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VST4LNd16 : VST4LN<0b0111, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST4LNd32 : VST4LN<0b1011, {?,0,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{5};
+}
-def VST4LNd8 : VST4LN<0b0011, {?,?,?,?}, "8">;
-def VST4LNd16 : VST4LN<0b0111, {?,?,0,?}, "16">;
-def VST4LNd32 : VST4LN<0b1011, {?,0,?,?}, "32">;
+def VST4LNd8Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
+def VST4LNd16Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
+def VST4LNd32Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
// ...with double-spaced registers:
-def VST4LNq16 : VST4LN<0b0111, {?,?,1,?}, "16">;
-def VST4LNq32 : VST4LN<0b1011, {?,1,?,?}, "32">;
+def VST4LNq16 : VST4LN<0b0111, {?,?,1,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST4LNq32 : VST4LN<0b1011, {?,1,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{5};
+}
-// ...alternate versions to be allocated odd register numbers:
-def VST4LNq16odd : VST4LN<0b0111, {?,?,1,?}, "16">;
-def VST4LNq32odd : VST4LN<0b1011, {?,1,?,?}, "32">;
+def VST4LNq16Pseudo : VSTQQQQLNPseudo<IIC_VST4ln>;
+def VST4LNq32Pseudo : VSTQQQQLNPseudo<IIC_VST4ln>;
// ...with address register writeback:
class VST4LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane),
- IIC_VST, "vst4", Dt,
- "\\{$src1[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $addr$offset",
- "$addr.addr = $wb", []>;
+ : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane),
+ IIC_VST4lnu, "vst4", Dt,
+ "\\{$Vd[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+}
-def VST4LNd8_UPD : VST4LNWB<0b0011, {?,?,?,?}, "8">;
-def VST4LNd16_UPD : VST4LNWB<0b0111, {?,?,0,?}, "16">;
-def VST4LNd32_UPD : VST4LNWB<0b1011, {?,0,?,?}, "32">;
+def VST4LNd8_UPD : VST4LNWB<0b0011, {?,?,?,?}, "8"> {
+ let Inst{7-5} = lane{2-0};
+}
+def VST4LNd16_UPD : VST4LNWB<0b0111, {?,?,0,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST4LNd32_UPD : VST4LNWB<0b1011, {?,0,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{5};
+}
+
+def VST4LNd8Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
+def VST4LNd16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
+def VST4LNd32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
+
+def VST4LNq16_UPD : VST4LNWB<0b0111, {?,?,1,?}, "16"> {
+ let Inst{7-6} = lane{1-0};
+}
+def VST4LNq32_UPD : VST4LNWB<0b1011, {?,1,?,?}, "32"> {
+ let Inst{7} = lane{0};
+ let Inst{5} = Rn{5};
+}
-def VST4LNq16_UPD : VST4LNWB<0b0111, {?,?,1,?}, "16">;
-def VST4LNq32_UPD : VST4LNWB<0b1011, {?,1,?,?}, "32">;
+def VST4LNq16Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
+def VST4LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
} // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
@@ -1000,98 +1687,92 @@ def SubReg_i32_lane : SDNodeXForm<imm, [{
// Instruction Classes
//===----------------------------------------------------------------------===//
-// Basic 2-register operations: single-, double- and quad-register.
-class N2VS<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
- string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
- (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src),
- IIC_VUNAD, OpcodeStr, Dt, "$dst, $src", "", []>;
+// Basic 2-register operations: double- and quad-register.
class N2VD<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src), IIC_VUNAD, OpcodeStr, Dt,"$dst, $src", "",
- [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src))))]>;
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
+ (ins DPR:$Vm), IIC_VUNAD, OpcodeStr, Dt,"$Vd, $Vm", "",
+ [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm))))]>;
class N2VQ<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src), IIC_VUNAQ, OpcodeStr, Dt,"$dst, $src", "",
- [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src))))]>;
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
+ (ins QPR:$Vm), IIC_VUNAQ, OpcodeStr, Dt,"$Vd, $Vm", "",
+ [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vm))))]>;
// Basic 2-register intrinsics, both double- and quad-register.
class N2VDInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src), itin, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
+ (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src), itin, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
+ (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
// Narrow 2-register operations.
class N2VN<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyD, ValueType TyQ, SDNode OpNode>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$dst),
- (ins QPR:$src), itin, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (TyD (OpNode (TyQ QPR:$src))))]>;
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$Vd),
+ (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set DPR:$Vd, (TyD (OpNode (TyQ QPR:$Vm))))]>;
// Narrow 2-register intrinsics.
class N2VNInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyD, ValueType TyQ, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$dst),
- (ins QPR:$src), itin, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src))))]>;
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$Vd),
+ (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set DPR:$Vd, (TyD (IntOp (TyQ QPR:$Vm))))]>;
// Long 2-register operations (currently only used for VMOVL).
class N2VL<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode OpNode>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$dst),
- (ins DPR:$src), itin, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (TyQ (OpNode (TyD DPR:$src))))]>;
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$Vd),
+ (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set QPR:$Vd, (TyQ (OpNode (TyD DPR:$Vm))))]>;
+
+// Long 2-register intrinsics.
+class N2VLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
+ bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
+ InstrItinClass itin, string OpcodeStr, string Dt,
+ ValueType TyQ, ValueType TyD, Intrinsic IntOp>
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$Vd),
+ (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vm))))]>;
// 2-register shuffles (VTRN/VZIP/VUZP), both double- and quad-register.
class N2VDShuffle<bits<2> op19_18, bits<5> op11_7, string OpcodeStr, string Dt>
- : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 0, 0, (outs DPR:$dst1, DPR:$dst2),
- (ins DPR:$src1, DPR:$src2), IIC_VPERMD,
- OpcodeStr, Dt, "$dst1, $dst2",
- "$src1 = $dst1, $src2 = $dst2", []>;
+ : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 0, 0, (outs DPR:$Vd, DPR:$Vm),
+ (ins DPR:$src1, DPR:$src2), IIC_VPERMD,
+ OpcodeStr, Dt, "$Vd, $Vm",
+ "$src1 = $Vd, $src2 = $Vm", []>;
class N2VQShuffle<bits<2> op19_18, bits<5> op11_7,
InstrItinClass itin, string OpcodeStr, string Dt>
- : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 1, 0, (outs QPR:$dst1, QPR:$dst2),
- (ins QPR:$src1, QPR:$src2), itin, OpcodeStr, Dt, "$dst1, $dst2",
- "$src1 = $dst1, $src2 = $dst2", []>;
-
-// Basic 3-register operations: single-, double- and quad-register.
-class N3VS<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
- SDNode OpNode, bit Commutable>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src1, DPR_VFP2:$src2), N3RegFrm,
- IIC_VBIND, OpcodeStr, Dt, "$dst, $src1, $src2", "", []> {
- let isCommutable = Commutable;
-}
+ : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 1, 0, (outs QPR:$Vd, QPR:$Vm),
+ (ins QPR:$src1, QPR:$src2), itin, OpcodeStr, Dt, "$Vd, $Vm",
+ "$src1 = $Vd, $src2 = $Vm", []>;
+// Basic 3-register operations: double- and quad-register.
class N3VD<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
let isCommutable = Commutable;
}
// Same as N3VD but no data type.
@@ -1100,31 +1781,31 @@ class N3VDX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
ValueType ResTy, ValueType OpTy,
SDNode OpNode, bit Commutable>
: N3VX<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), N3RegFrm, itin,
- OpcodeStr, "$dst, $src1, $src2", "",
- [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src1), (OpTy DPR:$src2))))]>{
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, "$Vd, $Vn, $Vm", "",
+ [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>{
let isCommutable = Commutable;
}
-class N3VDSL<bits<2> op21_20, bits<4> op11_8,
+class N3VDSL<bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType Ty, SDNode ShOp>
: N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (Ty DPR:$dst),
- (Ty (ShOp (Ty DPR:$src1),
- (Ty (NEONvduplane (Ty DPR_VFP2:$src2),imm:$lane)))))]> {
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set (Ty DPR:$Vd),
+ (Ty (ShOp (Ty DPR:$Vn),
+ (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),imm:$lane)))))]> {
let isCommutable = 0;
}
-class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
+class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
: N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- NVMulSLFrm, IIC_VMULi16D, OpcodeStr, Dt,"$dst, $src1, $src2[$lane]","",
- [(set (Ty DPR:$dst),
- (Ty (ShOp (Ty DPR:$src1),
- (Ty (NEONvduplane (Ty DPR_8:$src2), imm:$lane)))))]> {
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, IIC_VMULi16D, OpcodeStr, Dt,"$Vd, $Vn, $Vm[$lane]","",
+ [(set (Ty DPR:$Vd),
+ (Ty (ShOp (Ty DPR:$Vn),
+ (Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
let isCommutable = 0;
}
@@ -1132,40 +1813,40 @@ class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
+ (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
let isCommutable = Commutable;
}
class N3VQX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr,
ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
: N3VX<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), N3RegFrm, itin,
- OpcodeStr, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src1), (OpTy QPR:$src2))))]>{
+ (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>{
let isCommutable = Commutable;
}
-class N3VQSL<bits<2> op21_20, bits<4> op11_8,
+class N3VQSL<bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDNode ShOp>
: N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins QPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (ShOp (ResTy QPR:$src1),
- (ResTy (NEONvduplane (OpTy DPR_VFP2:$src2),
+ (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set (ResTy QPR:$Vd),
+ (ResTy (ShOp (ResTy QPR:$Vn),
+ (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
imm:$lane)))))]> {
let isCommutable = 0;
}
class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDNode ShOp>
: N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins QPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- NVMulSLFrm, IIC_VMULi16Q, OpcodeStr, Dt,"$dst, $src1, $src2[$lane]","",
- [(set (ResTy QPR:$dst),
- (ResTy (ShOp (ResTy QPR:$src1),
- (ResTy (NEONvduplane (OpTy DPR_8:$src2),
+ (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, IIC_VMULi16Q, OpcodeStr, Dt,"$Vd, $Vn, $Vm[$lane]","",
+ [(set (ResTy QPR:$Vd),
+ (ResTy (ShOp (ResTy QPR:$Vn),
+ (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
imm:$lane)))))]> {
let isCommutable = 0;
}
@@ -1175,30 +1856,39 @@ class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
Format f, InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), f, itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), f, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
let isCommutable = Commutable;
}
-class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
+class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
: N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (Ty DPR:$dst),
- (Ty (IntOp (Ty DPR:$src1),
- (Ty (NEONvduplane (Ty DPR_VFP2:$src2),
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set (Ty DPR:$Vd),
+ (Ty (IntOp (Ty DPR:$Vn),
+ (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),
imm:$lane)))))]> {
let isCommutable = 0;
}
class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
: N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (Ty DPR:$dst),
- (Ty (IntOp (Ty DPR:$src1),
- (Ty (NEONvduplane (Ty DPR_8:$src2), imm:$lane)))))]> {
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set (Ty DPR:$Vd),
+ (Ty (IntOp (Ty DPR:$Vn),
+ (Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
+ let isCommutable = 0;
+}
+class N3VDIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
+ Format f, InstrItinClass itin, string OpcodeStr, string Dt,
+ ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ : N3V<op24, op23, op21_20, op11_8, 0, op4,
+ (outs DPR:$Vd), (ins DPR:$Vm, DPR:$Vn), f, itin,
+ OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
+ [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (OpTy DPR:$Vn))))]> {
let isCommutable = 0;
}
@@ -1206,20 +1896,20 @@ class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
Format f, InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), f, itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
+ (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), f, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
let isCommutable = Commutable;
}
-class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
+class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins QPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (ResTy QPR:$src1),
- (ResTy (NEONvduplane (OpTy DPR_VFP2:$src2),
+ (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set (ResTy QPR:$Vd),
+ (ResTy (IntOp (ResTy QPR:$Vn),
+ (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
imm:$lane)))))]> {
let isCommutable = 0;
}
@@ -1227,93 +1917,95 @@ class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins QPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (ResTy QPR:$src1),
- (ResTy (NEONvduplane (OpTy DPR_8:$src2),
+ (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set (ResTy QPR:$Vd),
+ (ResTy (IntOp (ResTy QPR:$Vn),
+ (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
imm:$lane)))))]> {
let isCommutable = 0;
}
+class N3VQIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
+ Format f, InstrItinClass itin, string OpcodeStr, string Dt,
+ ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
+ : N3V<op24, op23, op21_20, op11_8, 1, op4,
+ (outs QPR:$Vd), (ins QPR:$Vm, QPR:$Vn), f, itin,
+ OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm), (OpTy QPR:$Vn))))]> {
+ let isCommutable = 0;
+}
-// Multiply-Add/Sub operations: single-, double- and quad-register.
-class N3VSMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType Ty, SDNode MulOp, SDNode OpNode>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR_VFP2:$dst),
- (ins DPR_VFP2:$src1, DPR_VFP2:$src2, DPR_VFP2:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst", []>;
-
+// Multiply-Add/Sub operations: double- and quad-register.
class N3VDMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType Ty, SDNode MulOp, SDNode OpNode>
+ ValueType Ty, SDPatternOperator MulOp, SDPatternOperator OpNode>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set DPR:$dst, (Ty (OpNode DPR:$src1,
- (Ty (MulOp DPR:$src2, DPR:$src3)))))]>;
+ (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set DPR:$Vd, (Ty (OpNode DPR:$src1,
+ (Ty (MulOp DPR:$Vn, DPR:$Vm)))))]>;
+
class N3VDMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
- ValueType Ty, SDNode MulOp, SDNode ShOp>
+ ValueType Ty, SDPatternOperator MulOp, SDPatternOperator ShOp>
: N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane),
+ (outs DPR:$Vd),
+ (ins DPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (Ty DPR:$dst),
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ [(set (Ty DPR:$Vd),
(Ty (ShOp (Ty DPR:$src1),
- (Ty (MulOp DPR:$src2,
- (Ty (NEONvduplane (Ty DPR_VFP2:$src3),
+ (Ty (MulOp DPR:$Vn,
+ (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),
imm:$lane)))))))]>;
class N3VDMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType Ty, SDNode MulOp, SDNode ShOp>
: N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, DPR_8:$src3, nohash_imm:$lane),
+ (outs DPR:$Vd),
+ (ins DPR:$src1, DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (Ty DPR:$dst),
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ [(set (Ty DPR:$Vd),
(Ty (ShOp (Ty DPR:$src1),
- (Ty (MulOp DPR:$src2,
- (Ty (NEONvduplane (Ty DPR_8:$src3),
+ (Ty (MulOp DPR:$Vn,
+ (Ty (NEONvduplane (Ty DPR_8:$Vm),
imm:$lane)))))))]>;
class N3VQMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt, ValueType Ty,
- SDNode MulOp, SDNode OpNode>
+ SDPatternOperator MulOp, SDPatternOperator OpNode>
: N3V<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst, (Ty (OpNode QPR:$src1,
- (Ty (MulOp QPR:$src2, QPR:$src3)))))]>;
+ (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set QPR:$Vd, (Ty (OpNode QPR:$src1,
+ (Ty (MulOp QPR:$Vn, QPR:$Vm)))))]>;
class N3VQMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
- SDNode MulOp, SDNode ShOp>
+ SDPatternOperator MulOp, SDPatternOperator ShOp>
: N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane),
+ (outs QPR:$Vd),
+ (ins QPR:$src1, QPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (ResTy QPR:$dst),
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ [(set (ResTy QPR:$Vd),
(ResTy (ShOp (ResTy QPR:$src1),
- (ResTy (MulOp QPR:$src2,
- (ResTy (NEONvduplane (OpTy DPR_VFP2:$src3),
+ (ResTy (MulOp QPR:$Vn,
+ (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
imm:$lane)))))))]>;
class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy,
SDNode MulOp, SDNode ShOp>
: N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, DPR_8:$src3, nohash_imm:$lane),
+ (outs QPR:$Vd),
+ (ins QPR:$src1, QPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (ResTy QPR:$dst),
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ [(set (ResTy QPR:$Vd),
(ResTy (ShOp (ResTy QPR:$src1),
- (ResTy (MulOp QPR:$src2,
- (ResTy (NEONvduplane (OpTy DPR_8:$src3),
+ (ResTy (MulOp QPR:$Vn,
+ (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
imm:$lane)))))))]>;
// Neon Intrinsic-Op instructions (VABA): double- and quad-register.
@@ -1321,18 +2013,18 @@ class N3VDIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType Ty, Intrinsic IntOp, SDNode OpNode>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set DPR:$dst, (Ty (OpNode DPR:$src1,
- (Ty (IntOp (Ty DPR:$src2), (Ty DPR:$src3))))))]>;
+ (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set DPR:$Vd, (Ty (OpNode DPR:$src1,
+ (Ty (IntOp (Ty DPR:$Vn), (Ty DPR:$Vm))))))]>;
class N3VQIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType Ty, Intrinsic IntOp, SDNode OpNode>
: N3V<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst, (Ty (OpNode QPR:$src1,
- (Ty (IntOp (Ty QPR:$src2), (Ty QPR:$src3))))))]>;
+ (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set QPR:$Vd, (Ty (OpNode QPR:$src1,
+ (Ty (IntOp (Ty QPR:$Vn), (Ty QPR:$Vm))))))]>;
// Neon 3-argument intrinsics, both double- and quad-register.
// The destination register is also used as the first source operand register.
@@ -1340,52 +2032,52 @@ class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1),
- (OpTy DPR:$src2), (OpTy DPR:$src3))))]>;
+ (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$src1),
+ (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>;
class N3VQInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3V<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1),
- (OpTy QPR:$src2), (OpTy QPR:$src3))))]>;
+ (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$src1),
+ (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>;
// Long Multiply-Add/Sub operations.
class N3VLMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins QPR:$src1, DPR:$src2, DPR:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst, (OpNode (TyQ QPR:$src1),
- (TyQ (MulOp (TyD DPR:$src2),
- (TyD DPR:$src3)))))]>;
+ (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set QPR:$Vd, (OpNode (TyQ QPR:$src1),
+ (TyQ (MulOp (TyD DPR:$Vn),
+ (TyD DPR:$Vm)))))]>;
class N3VLMulOpSL<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
- : N3V<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$dst),
- (ins QPR:$src1, DPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane),
+ : N3V<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
+ (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set QPR:$dst,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ [(set QPR:$Vd,
(OpNode (TyQ QPR:$src1),
- (TyQ (MulOp (TyD DPR:$src2),
- (TyD (NEONvduplane (TyD DPR_VFP2:$src3),
+ (TyQ (MulOp (TyD DPR:$Vn),
+ (TyD (NEONvduplane (TyD DPR_VFP2:$Vm),
imm:$lane))))))]>;
class N3VLMulOpSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
- : N3V<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$dst),
- (ins QPR:$src1, DPR:$src2, DPR_8:$src3, nohash_imm:$lane),
+ : N3V<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
+ (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set QPR:$dst,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ [(set QPR:$Vd,
(OpNode (TyQ QPR:$src1),
- (TyQ (MulOp (TyD DPR:$src2),
- (TyD (NEONvduplane (TyD DPR_8:$src3),
+ (TyQ (MulOp (TyD DPR:$Vn),
+ (TyD (NEONvduplane (TyD DPR_8:$Vm),
imm:$lane))))))]>;
// Long Intrinsic-Op vector operations with explicit extend (VABAL).
@@ -1394,11 +2086,11 @@ class N3VLIntExtOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
SDNode OpNode>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins QPR:$src1, DPR:$src2, DPR:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst, (OpNode (TyQ QPR:$src1),
- (TyQ (ExtOp (TyD (IntOp (TyD DPR:$src2),
- (TyD DPR:$src3)))))))]>;
+ (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set QPR:$Vd, (OpNode (TyQ QPR:$src1),
+ (TyQ (ExtOp (TyD (IntOp (TyD DPR:$Vn),
+ (TyD DPR:$Vm)))))))]>;
// Neon Long 3-argument intrinsic. The destination register is
// a quad-register and is also used as the first source operand register.
@@ -1406,35 +2098,35 @@ class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, Intrinsic IntOp>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins QPR:$src1, DPR:$src2, DPR:$src3), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst,
- (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$src2), (TyD DPR:$src3))))]>;
+ (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set QPR:$Vd,
+ (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$Vn), (TyD DPR:$Vm))))]>;
class N3VLInt3SL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst),
- (ins QPR:$src1, DPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane),
+ (outs QPR:$Vd),
+ (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (ResTy QPR:$dst),
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ [(set (ResTy QPR:$Vd),
(ResTy (IntOp (ResTy QPR:$src1),
- (OpTy DPR:$src2),
- (OpTy (NEONvduplane (OpTy DPR_VFP2:$src3),
+ (OpTy DPR:$Vn),
+ (OpTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
imm:$lane)))))]>;
class N3VLInt3SL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst),
- (ins QPR:$src1, DPR:$src2, DPR_8:$src3, nohash_imm:$lane),
+ (outs QPR:$Vd),
+ (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (ResTy QPR:$dst),
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ [(set (ResTy QPR:$Vd),
(ResTy (IntOp (ResTy QPR:$src1),
- (OpTy DPR:$src2),
- (OpTy (NEONvduplane (OpTy DPR_8:$src3),
+ (OpTy DPR:$Vn),
+ (OpTy (NEONvduplane (OpTy DPR_8:$Vm),
imm:$lane)))))]>;
// Narrowing 3-register intrinsics.
@@ -1442,9 +2134,9 @@ class N3VNInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
string OpcodeStr, string Dt, ValueType TyD, ValueType TyQ,
Intrinsic IntOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins QPR:$src1, QPR:$src2), N3RegFrm, IIC_VBINi4D,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src1), (TyQ QPR:$src2))))]> {
+ (outs DPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINi4D,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set DPR:$Vd, (TyD (IntOp (TyQ QPR:$Vn), (TyQ QPR:$Vm))))]> {
let isCommutable = Commutable;
}
@@ -1453,29 +2145,29 @@ class N3VL<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode OpNode, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins DPR:$src1, DPR:$src2), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (TyQ (OpNode (TyD DPR:$src1), (TyD DPR:$src2))))]> {
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (TyQ (OpNode (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
let isCommutable = Commutable;
}
class N3VLSL<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode OpNode>
: N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set QPR:$dst,
- (TyQ (OpNode (TyD DPR:$src1),
- (TyD (NEONvduplane (TyD DPR_VFP2:$src2),imm:$lane)))))]>;
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set QPR:$Vd,
+ (TyQ (OpNode (TyD DPR:$Vn),
+ (TyD (NEONvduplane (TyD DPR_VFP2:$Vm),imm:$lane)))))]>;
class N3VLSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode OpNode>
: N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set QPR:$dst,
- (TyQ (OpNode (TyD DPR:$src1),
- (TyD (NEONvduplane (TyD DPR_8:$src2), imm:$lane)))))]>;
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set QPR:$Vd,
+ (TyQ (OpNode (TyD DPR:$Vn),
+ (TyD (NEONvduplane (TyD DPR_8:$Vm), imm:$lane)))))]>;
// Long 3-register operations with explicitly extended operands.
class N3VLExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
@@ -1483,10 +2175,10 @@ class N3VLExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
ValueType TyQ, ValueType TyD, SDNode OpNode, SDNode ExtOp,
bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins DPR:$src1, DPR:$src2), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (OpNode (TyQ (ExtOp (TyD DPR:$src1))),
- (TyQ (ExtOp (TyD DPR:$src2)))))]> {
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (OpNode (TyQ (ExtOp (TyD DPR:$Vn))),
+ (TyQ (ExtOp (TyD DPR:$Vm)))))]> {
let isCommutable = Commutable;
}
@@ -1496,10 +2188,10 @@ class N3VLIntExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins DPR:$src1, DPR:$src2), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (TyQ (ExtOp (TyD (IntOp (TyD DPR:$src1),
- (TyD DPR:$src2))))))]> {
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (TyQ (ExtOp (TyD (IntOp (TyD DPR:$Vn),
+ (TyD DPR:$Vm))))))]> {
let isCommutable = Commutable;
}
@@ -1508,30 +2200,30 @@ class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, Intrinsic IntOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins DPR:$src1, DPR:$src2), N3RegFrm, itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (TyQ (IntOp (TyD DPR:$src1), (TyD DPR:$src2))))]> {
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
let isCommutable = Commutable;
}
class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (OpTy DPR:$src1),
- (OpTy (NEONvduplane (OpTy DPR_VFP2:$src2),
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set (ResTy QPR:$Vd),
+ (ResTy (IntOp (OpTy DPR:$Vn),
+ (OpTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
imm:$lane)))))]>;
class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (OpTy DPR:$src1),
- (OpTy (NEONvduplane (OpTy DPR_8:$src2),
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ [(set (ResTy QPR:$Vd),
+ (ResTy (IntOp (OpTy DPR:$Vn),
+ (OpTy (NEONvduplane (OpTy DPR_8:$Vm),
imm:$lane)))))]>;
// Wide 3-register operations.
@@ -1539,10 +2231,10 @@ class N3VW<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD,
SDNode OpNode, SDNode ExtOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins QPR:$src1, DPR:$src2), N3RegFrm, IIC_VSUBiD,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (OpNode (TyQ QPR:$src1),
- (TyQ (ExtOp (TyD DPR:$src2)))))]> {
+ (outs QPR:$Vd), (ins QPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VSUBiD,
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (OpNode (TyQ QPR:$Vn),
+ (TyQ (ExtOp (TyD DPR:$Vm)))))]> {
let isCommutable = Commutable;
}
@@ -1551,16 +2243,16 @@ class N2VDPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src), IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
+ (ins DPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src), IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
+ : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
+ (ins QPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
// Pairwise long 2-register accumulate intrinsics,
// both double- and quad-register.
@@ -1570,17 +2262,17 @@ class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), IIC_VPALiD,
- OpcodeStr, Dt, "$dst, $src2", "$src1 = $dst",
- [(set DPR:$dst, (ResTy (IntOp (ResTy DPR:$src1), (OpTy DPR:$src2))))]>;
+ (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vm), IIC_VPALiD,
+ OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
+ [(set DPR:$Vd, (ResTy (IntOp (ResTy DPR:$src1), (OpTy DPR:$Vm))))]>;
class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
bits<2> op17_16, bits<5> op11_7, bit op4,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), IIC_VPALiQ,
- OpcodeStr, Dt, "$dst, $src2", "$src1 = $dst",
- [(set QPR:$dst, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$src2))))]>;
+ (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vm), IIC_VPALiQ,
+ OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
+ [(set QPR:$Vd, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$Vm))))]>;
// Shift by immediate,
// both double- and quad-register.
@@ -1588,25 +2280,25 @@ class N2VDSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
Format f, InstrItinClass itin, string OpcodeStr, string Dt,
ValueType Ty, SDNode OpNode>
: N2VImm<op24, op23, op11_8, op7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM), f, itin,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set DPR:$dst, (Ty (OpNode (Ty DPR:$src), (i32 imm:$SIMM))))]>;
+ (outs DPR:$Vd), (ins DPR:$Vm, i32imm:$SIMM), f, itin,
+ OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
+ [(set DPR:$Vd, (Ty (OpNode (Ty DPR:$Vm), (i32 imm:$SIMM))))]>;
class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
Format f, InstrItinClass itin, string OpcodeStr, string Dt,
ValueType Ty, SDNode OpNode>
: N2VImm<op24, op23, op11_8, op7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM), f, itin,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set QPR:$dst, (Ty (OpNode (Ty QPR:$src), (i32 imm:$SIMM))))]>;
+ (outs QPR:$Vd), (ins QPR:$Vm, i32imm:$SIMM), f, itin,
+ OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
+ [(set QPR:$Vd, (Ty (OpNode (Ty QPR:$Vm), (i32 imm:$SIMM))))]>;
// Long shift by immediate.
class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDNode OpNode>
: N2VImm<op24, op23, op11_8, op7, op6, op4,
- (outs QPR:$dst), (ins DPR:$src, i32imm:$SIMM), N2RegVShLFrm,
- IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set QPR:$dst, (ResTy (OpNode (OpTy DPR:$src),
+ (outs QPR:$Vd), (ins DPR:$Vm, i32imm:$SIMM), N2RegVShLFrm,
+ IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
+ [(set QPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm),
(i32 imm:$SIMM))))]>;
// Narrow shift by immediate.
@@ -1614,42 +2306,42 @@ class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDNode OpNode>
: N2VImm<op24, op23, op11_8, op7, op6, op4,
- (outs DPR:$dst), (ins QPR:$src, i32imm:$SIMM), N2RegVShRFrm, itin,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set DPR:$dst, (ResTy (OpNode (OpTy QPR:$src),
+ (outs DPR:$Vd), (ins QPR:$Vm, i32imm:$SIMM), N2RegVShRFrm, itin,
+ OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
+ [(set DPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vm),
(i32 imm:$SIMM))))]>;
// Shift right by immediate and accumulate,
// both double- and quad-register.
class N2VDShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
- : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, i32imm:$SIMM), N2RegVShRFrm, IIC_VPALiD,
- OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
- [(set DPR:$dst, (Ty (add DPR:$src1,
- (Ty (ShOp DPR:$src2, (i32 imm:$SIMM))))))]>;
+ : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
+ (ins DPR:$src1, DPR:$Vm, i32imm:$SIMM), N2RegVShRFrm, IIC_VPALiD,
+ OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
+ [(set DPR:$Vd, (Ty (add DPR:$src1,
+ (Ty (ShOp DPR:$Vm, (i32 imm:$SIMM))))))]>;
class N2VQShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
- : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, i32imm:$SIMM), N2RegVShRFrm, IIC_VPALiD,
- OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
- [(set QPR:$dst, (Ty (add QPR:$src1,
- (Ty (ShOp QPR:$src2, (i32 imm:$SIMM))))))]>;
+ : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
+ (ins QPR:$src1, QPR:$Vm, i32imm:$SIMM), N2RegVShRFrm, IIC_VPALiD,
+ OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
+ [(set QPR:$Vd, (Ty (add QPR:$src1,
+ (Ty (ShOp QPR:$Vm, (i32 imm:$SIMM))))))]>;
// Shift by immediate and insert,
// both double- and quad-register.
class N2VDShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
Format f, string OpcodeStr, string Dt, ValueType Ty,SDNode ShOp>
- : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, i32imm:$SIMM), f, IIC_VSHLiD,
- OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
- [(set DPR:$dst, (Ty (ShOp DPR:$src1, DPR:$src2, (i32 imm:$SIMM))))]>;
+ : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
+ (ins DPR:$src1, DPR:$Vm, i32imm:$SIMM), f, IIC_VSHLiD,
+ OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
+ [(set DPR:$Vd, (Ty (ShOp DPR:$src1, DPR:$Vm, (i32 imm:$SIMM))))]>;
class N2VQShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
Format f, string OpcodeStr, string Dt, ValueType Ty,SDNode ShOp>
- : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, i32imm:$SIMM), f, IIC_VSHLiQ,
- OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
- [(set QPR:$dst, (Ty (ShOp QPR:$src1, QPR:$src2, (i32 imm:$SIMM))))]>;
+ : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
+ (ins QPR:$src1, QPR:$Vm, i32imm:$SIMM), f, IIC_VSHLiQ,
+ OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
+ [(set QPR:$Vd, (Ty (ShOp QPR:$src1, QPR:$Vm, (i32 imm:$SIMM))))]>;
// Convert, with fractional bits immediate,
// both double- and quad-register.
@@ -1657,16 +2349,16 @@ class N2VCvtD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
Intrinsic IntOp>
: N2VImm<op24, op23, op11_8, op7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM), NVCVTFrm,
- IIC_VUNAD, OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src), (i32 imm:$SIMM))))]>;
+ (outs DPR:$Vd), (ins DPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
+ IIC_VUNAD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
+ [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (i32 imm:$SIMM))))]>;
class N2VCvtQ<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
Intrinsic IntOp>
: N2VImm<op24, op23, op11_8, op7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM), NVCVTFrm,
- IIC_VUNAQ, OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src), (i32 imm:$SIMM))))]>;
+ (outs QPR:$Vd), (ins QPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
+ IIC_VUNAQ, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
+ [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm), (i32 imm:$SIMM))))]>;
//===----------------------------------------------------------------------===//
// Multiclasses
@@ -1678,45 +2370,127 @@ class N2VCvtQ<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
// S = single int (32 bit) elements
// D = double int (64 bit) elements
-// Neon 2-register vector operations -- for disassembly only.
+// Neon 2-register vector operations and intrinsics.
-// First with only element sizes of 8, 16 and 32 bits:
+// Neon 2-register comparisons.
+// source operand element sizes of 8, 16 and 32 bits:
multiclass N2V_QHS_cmp<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
bits<5> op11_7, bit op4, string opc, string Dt,
- string asm> {
+ string asm, SDNode OpNode> {
// 64-bit vector types.
def v8i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- opc, !strconcat(Dt, "8"), asm, "", []>;
+ (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
+ opc, !strconcat(Dt, "8"), asm, "",
+ [(set DPR:$Vd, (v8i8 (OpNode (v8i8 DPR:$Vm))))]>;
def v4i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- opc, !strconcat(Dt, "16"), asm, "", []>;
+ (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
+ opc, !strconcat(Dt, "16"), asm, "",
+ [(set DPR:$Vd, (v4i16 (OpNode (v4i16 DPR:$Vm))))]>;
def v2i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- opc, !strconcat(Dt, "32"), asm, "", []>;
+ (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
+ opc, !strconcat(Dt, "32"), asm, "",
+ [(set DPR:$Vd, (v2i32 (OpNode (v2i32 DPR:$Vm))))]>;
def v2f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- opc, "f32", asm, "", []> {
+ (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
+ opc, "f32", asm, "",
+ [(set DPR:$Vd, (v2i32 (OpNode (v2f32 DPR:$Vm))))]> {
let Inst{10} = 1; // overwrite F = 1
}
// 128-bit vector types.
def v16i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- opc, !strconcat(Dt, "8"), asm, "", []>;
+ (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
+ opc, !strconcat(Dt, "8"), asm, "",
+ [(set QPR:$Vd, (v16i8 (OpNode (v16i8 QPR:$Vm))))]>;
def v8i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- opc, !strconcat(Dt, "16"), asm, "", []>;
+ (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
+ opc, !strconcat(Dt, "16"), asm, "",
+ [(set QPR:$Vd, (v8i16 (OpNode (v8i16 QPR:$Vm))))]>;
def v4i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- opc, !strconcat(Dt, "32"), asm, "", []>;
+ (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
+ opc, !strconcat(Dt, "32"), asm, "",
+ [(set QPR:$Vd, (v4i32 (OpNode (v4i32 QPR:$Vm))))]>;
def v4f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- opc, "f32", asm, "", []> {
+ (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
+ opc, "f32", asm, "",
+ [(set QPR:$Vd, (v4i32 (OpNode (v4f32 QPR:$Vm))))]> {
let Inst{10} = 1; // overwrite F = 1
}
}
+
+// Neon 2-register vector intrinsics,
+// element sizes of 8, 16 and 32 bits:
+multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
+ bits<5> op11_7, bit op4,
+ InstrItinClass itinD, InstrItinClass itinQ,
+ string OpcodeStr, string Dt, Intrinsic IntOp> {
+ // 64-bit vector types.
+ def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
+ itinD, OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
+ def v4i16 : N2VDInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
+ itinD, OpcodeStr, !strconcat(Dt, "16"),v4i16,v4i16,IntOp>;
+ def v2i32 : N2VDInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
+ itinD, OpcodeStr, !strconcat(Dt, "32"),v2i32,v2i32,IntOp>;
+
+ // 128-bit vector types.
+ def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
+ itinQ, OpcodeStr, !strconcat(Dt, "8"), v16i8,v16i8,IntOp>;
+ def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
+ itinQ, OpcodeStr, !strconcat(Dt, "16"),v8i16,v8i16,IntOp>;
+ def v4i32 : N2VQInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
+ itinQ, OpcodeStr, !strconcat(Dt, "32"),v4i32,v4i32,IntOp>;
+}
+
+
+// Neon Narrowing 2-register vector operations,
+// source operand element sizes of 16, 32 and 64 bits:
+multiclass N2VN_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
+ bits<5> op11_7, bit op6, bit op4,
+ InstrItinClass itin, string OpcodeStr, string Dt,
+ SDNode OpNode> {
+ def v8i8 : N2VN<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
+ itin, OpcodeStr, !strconcat(Dt, "16"),
+ v8i8, v8i16, OpNode>;
+ def v4i16 : N2VN<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
+ itin, OpcodeStr, !strconcat(Dt, "32"),
+ v4i16, v4i32, OpNode>;
+ def v2i32 : N2VN<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
+ itin, OpcodeStr, !strconcat(Dt, "64"),
+ v2i32, v2i64, OpNode>;
+}
+
+// Neon Narrowing 2-register vector intrinsics,
+// source operand element sizes of 16, 32 and 64 bits:
+multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
+ bits<5> op11_7, bit op6, bit op4,
+ InstrItinClass itin, string OpcodeStr, string Dt,
+ Intrinsic IntOp> {
+ def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
+ itin, OpcodeStr, !strconcat(Dt, "16"),
+ v8i8, v8i16, IntOp>;
+ def v4i16 : N2VNInt<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
+ itin, OpcodeStr, !strconcat(Dt, "32"),
+ v4i16, v4i32, IntOp>;
+ def v2i32 : N2VNInt<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
+ itin, OpcodeStr, !strconcat(Dt, "64"),
+ v2i32, v2i64, IntOp>;
+}
+
+
+// Neon Lengthening 2-register vector intrinsic (currently specific to VMOVL).
+// source operand element sizes of 16, 32 and 64 bits:
+multiclass N2VL_QHS<bits<2> op24_23, bits<5> op11_7, bit op6, bit op4,
+ string OpcodeStr, string Dt, SDNode OpNode> {
+ def v8i16 : N2VL<op24_23, 0b00, 0b10, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
+ OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode>;
+ def v4i32 : N2VL<op24_23, 0b01, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
+ OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode>;
+ def v2i64 : N2VL<op24_23, 0b10, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
+ OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode>;
+}
+
+
// Neon 3-register vector operations.
// First with only element sizes of 8, 16 and 32 bits:
@@ -1726,7 +2500,7 @@ multiclass N3V_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
string OpcodeStr, string Dt,
SDNode OpNode, bit Commutable = 0> {
// 64-bit vector types.
- def v8i8 : N3VD<op24, op23, 0b00, op11_8, op4, itinD16,
+ def v8i8 : N3VD<op24, op23, 0b00, op11_8, op4, itinD16,
OpcodeStr, !strconcat(Dt, "8"),
v8i8, v8i8, OpNode, Commutable>;
def v4i16 : N3VD<op24, op23, 0b01, op11_8, op4, itinD16,
@@ -1775,54 +2549,6 @@ multiclass N3V_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
}
-// Neon Narrowing 2-register vector operations,
-// source operand element sizes of 16, 32 and 64 bits:
-multiclass N2VN_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
- bits<5> op11_7, bit op6, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- SDNode OpNode> {
- def v8i8 : N2VN<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
- itin, OpcodeStr, !strconcat(Dt, "16"),
- v8i8, v8i16, OpNode>;
- def v4i16 : N2VN<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
- itin, OpcodeStr, !strconcat(Dt, "32"),
- v4i16, v4i32, OpNode>;
- def v2i32 : N2VN<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
- itin, OpcodeStr, !strconcat(Dt, "64"),
- v2i32, v2i64, OpNode>;
-}
-
-// Neon Narrowing 2-register vector intrinsics,
-// source operand element sizes of 16, 32 and 64 bits:
-multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
- bits<5> op11_7, bit op6, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- Intrinsic IntOp> {
- def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
- itin, OpcodeStr, !strconcat(Dt, "16"),
- v8i8, v8i16, IntOp>;
- def v4i16 : N2VNInt<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
- itin, OpcodeStr, !strconcat(Dt, "32"),
- v4i16, v4i32, IntOp>;
- def v2i32 : N2VNInt<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
- itin, OpcodeStr, !strconcat(Dt, "64"),
- v2i32, v2i64, IntOp>;
-}
-
-
-// Neon Lengthening 2-register vector intrinsic (currently specific to VMOVL).
-// source operand element sizes of 16, 32 and 64 bits:
-multiclass N2VL_QHS<bits<2> op24_23, bits<5> op11_7, bit op6, bit op4,
- string OpcodeStr, string Dt, SDNode OpNode> {
- def v8i16 : N2VL<op24_23, 0b00, 0b10, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
- OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode>;
- def v4i32 : N2VL<op24_23, 0b01, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
- OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode>;
- def v2i64 : N2VL<op24_23, 0b10, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
- OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode>;
-}
-
-
// Neon 3-register vector intrinsics.
// First with only element sizes of 16 and 32 bits:
@@ -1847,8 +2573,29 @@ multiclass N3VInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
OpcodeStr, !strconcat(Dt, "32"),
v4i32, v4i32, IntOp, Commutable>;
}
+multiclass N3VInt_HSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
+ InstrItinClass itinD16, InstrItinClass itinD32,
+ InstrItinClass itinQ16, InstrItinClass itinQ32,
+ string OpcodeStr, string Dt,
+ Intrinsic IntOp> {
+ // 64-bit vector types.
+ def v4i16 : N3VDIntSh<op24, op23, 0b01, op11_8, op4, f, itinD16,
+ OpcodeStr, !strconcat(Dt, "16"),
+ v4i16, v4i16, IntOp>;
+ def v2i32 : N3VDIntSh<op24, op23, 0b10, op11_8, op4, f, itinD32,
+ OpcodeStr, !strconcat(Dt, "32"),
+ v2i32, v2i32, IntOp>;
+
+ // 128-bit vector types.
+ def v8i16 : N3VQIntSh<op24, op23, 0b01, op11_8, op4, f, itinQ16,
+ OpcodeStr, !strconcat(Dt, "16"),
+ v8i16, v8i16, IntOp>;
+ def v4i32 : N3VQIntSh<op24, op23, 0b10, op11_8, op4, f, itinQ32,
+ OpcodeStr, !strconcat(Dt, "32"),
+ v4i32, v4i32, IntOp>;
+}
-multiclass N3VIntSL_HS<bits<4> op11_8,
+multiclass N3VIntSL_HS<bits<4> op11_8,
InstrItinClass itinD16, InstrItinClass itinD32,
InstrItinClass itinQ16, InstrItinClass itinQ32,
string OpcodeStr, string Dt, Intrinsic IntOp> {
@@ -1877,6 +2624,21 @@ multiclass N3VInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
OpcodeStr, !strconcat(Dt, "8"),
v16i8, v16i8, IntOp, Commutable>;
}
+multiclass N3VInt_QHSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
+ InstrItinClass itinD16, InstrItinClass itinD32,
+ InstrItinClass itinQ16, InstrItinClass itinQ32,
+ string OpcodeStr, string Dt,
+ Intrinsic IntOp>
+ : N3VInt_HSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
+ OpcodeStr, Dt, IntOp> {
+ def v8i8 : N3VDIntSh<op24, op23, 0b00, op11_8, op4, f, itinD16,
+ OpcodeStr, !strconcat(Dt, "8"),
+ v8i8, v8i8, IntOp>;
+ def v16i8 : N3VQIntSh<op24, op23, 0b00, op11_8, op4, f, itinQ16,
+ OpcodeStr, !strconcat(Dt, "8"),
+ v16i8, v16i8, IntOp>;
+}
+
// ....then also with element size of 64 bits:
multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
@@ -1893,6 +2655,20 @@ multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
OpcodeStr, !strconcat(Dt, "64"),
v2i64, v2i64, IntOp, Commutable>;
}
+multiclass N3VInt_QHSDSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
+ InstrItinClass itinD16, InstrItinClass itinD32,
+ InstrItinClass itinQ16, InstrItinClass itinQ32,
+ string OpcodeStr, string Dt,
+ Intrinsic IntOp>
+ : N3VInt_QHSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
+ OpcodeStr, Dt, IntOp> {
+ def v1i64 : N3VDIntSh<op24, op23, 0b11, op11_8, op4, f, itinD32,
+ OpcodeStr, !strconcat(Dt, "64"),
+ v1i64, v1i64, IntOp>;
+ def v2i64 : N3VQIntSh<op24, op23, 0b11, op11_8, op4, f, itinQ32,
+ OpcodeStr, !strconcat(Dt, "64"),
+ v2i64, v2i64, IntOp>;
+}
// Neon Narrowing 3-register vector intrinsics,
// source operand element sizes of 16, 32 and 64 bits:
@@ -1920,7 +2696,7 @@ multiclass N3VL_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
def v8i16 : N3VL<op24, op23, 0b00, op11_8, op4, itin16,
OpcodeStr, !strconcat(Dt, "8"),
v8i16, v8i8, OpNode, Commutable>;
- def v4i32 : N3VL<op24, op23, 0b01, op11_8, op4, itin16,
+ def v4i32 : N3VL<op24, op23, 0b01, op11_8, op4, itin16,
OpcodeStr, !strconcat(Dt, "16"),
v4i32, v4i16, OpNode, Commutable>;
def v2i64 : N3VL<op24, op23, 0b10, op11_8, op4, itin32,
@@ -1944,7 +2720,7 @@ multiclass N3VLExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
def v8i16 : N3VLExt<op24, op23, 0b00, op11_8, op4, itin16,
OpcodeStr, !strconcat(Dt, "8"),
v8i16, v8i8, OpNode, ExtOp, Commutable>;
- def v4i32 : N3VLExt<op24, op23, 0b01, op11_8, op4, itin16,
+ def v4i32 : N3VLExt<op24, op23, 0b01, op11_8, op4, itin16,
OpcodeStr, !strconcat(Dt, "16"),
v4i32, v4i16, OpNode, ExtOp, Commutable>;
def v2i64 : N3VLExt<op24, op23, 0b10, op11_8, op4, itin32,
@@ -1959,7 +2735,7 @@ multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itin16, InstrItinClass itin32,
string OpcodeStr, string Dt,
Intrinsic IntOp, bit Commutable = 0> {
- def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, itin16,
+ def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, itin16,
OpcodeStr, !strconcat(Dt, "16"),
v4i32, v4i16, IntOp, Commutable>;
def v2i64 : N3VLInt<op24, op23, 0b10, op11_8, op4, itin32,
@@ -1970,7 +2746,7 @@ multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
multiclass N3VLIntSL_HS<bit op24, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
Intrinsic IntOp> {
- def v4i16 : N3VLIntSL16<op24, 0b01, op11_8, itin,
+ def v4i16 : N3VLIntSL16<op24, 0b01, op11_8, itin,
OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
def v2i32 : N3VLIntSL<op24, 0b10, op11_8, itin,
OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
@@ -1995,7 +2771,7 @@ multiclass N3VLIntExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
def v8i16 : N3VLIntExt<op24, op23, 0b00, op11_8, op4, itin,
OpcodeStr, !strconcat(Dt, "8"),
v8i16, v8i8, IntOp, ExtOp, Commutable>;
- def v4i32 : N3VLIntExt<op24, op23, 0b01, op11_8, op4, itin,
+ def v4i32 : N3VLIntExt<op24, op23, 0b01, op11_8, op4, itin,
OpcodeStr, !strconcat(Dt, "16"),
v4i32, v4i16, IntOp, ExtOp, Commutable>;
def v2i64 : N3VLIntExt<op24, op23, 0b10, op11_8, op4, itin,
@@ -2044,7 +2820,7 @@ multiclass N3VMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
OpcodeStr, !strconcat(Dt, "32"), v4i32, mul, OpNode>;
}
-multiclass N3VMulOpSL_HS<bits<4> op11_8,
+multiclass N3VMulOpSL_HS<bits<4> op11_8,
InstrItinClass itinD16, InstrItinClass itinD32,
InstrItinClass itinQ16, InstrItinClass itinQ32,
string OpcodeStr, string Dt, SDNode ShOp> {
@@ -2174,30 +2950,6 @@ multiclass N3VLIntExtOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
}
-// Neon 2-register vector intrinsics,
-// element sizes of 8, 16 and 32 bits:
-multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
- bits<5> op11_7, bit op4,
- InstrItinClass itinD, InstrItinClass itinQ,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
- // 64-bit vector types.
- def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
- itinD, OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
- def v4i16 : N2VDInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
- itinD, OpcodeStr, !strconcat(Dt, "16"),v4i16,v4i16,IntOp>;
- def v2i32 : N2VDInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
- itinD, OpcodeStr, !strconcat(Dt, "32"),v2i32,v2i32,IntOp>;
-
- // 128-bit vector types.
- def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
- itinQ, OpcodeStr, !strconcat(Dt, "8"), v16i8,v16i8,IntOp>;
- def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
- itinQ, OpcodeStr, !strconcat(Dt, "16"),v8i16,v8i16,IntOp>;
- def v4i32 : N2VQInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
- itinQ, OpcodeStr, !strconcat(Dt, "32"),v4i32,v4i32,IntOp>;
-}
-
-
// Neon Pairwise long 2-register intrinsics,
// element sizes of 8, 16 and 32 bits:
multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
@@ -2461,9 +3213,9 @@ def VMULpd : N3VDInt<1, 0, 0b00, 0b1001, 1, N3RegFrm, IIC_VMULi16D, "vmul",
"p8", v8i8, v8i8, int_arm_neon_vmulp, 1>;
def VMULpq : N3VQInt<1, 0, 0b00, 0b1001, 1, N3RegFrm, IIC_VMULi16Q, "vmul",
"p8", v16i8, v16i8, int_arm_neon_vmulp, 1>;
-def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, IIC_VBIND, "vmul", "f32",
+def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, IIC_VFMULD, "vmul", "f32",
v2f32, v2f32, fmul, 1>;
-def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, IIC_VBINQ, "vmul", "f32",
+def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, IIC_VFMULQ, "vmul", "f32",
v4f32, v4f32, fmul, 1>;
defm VMULsl : N3VSL_HS<0b1000, "vmul", "i", mul>;
def VMULslfd : N3VDSL<0b10, 0b1001, IIC_VBIND, "vmul", "f32", v2f32, fmul>;
@@ -2491,7 +3243,7 @@ def : Pat<(v4f32 (fmul (v4f32 QPR:$src1),
// VQDMULH : Vector Saturating Doubling Multiply Returning High Half
defm VQDMULH : N3VInt_HS<0, 0, 0b1011, 0, N3RegFrm, IIC_VMULi16D, IIC_VMULi32D,
- IIC_VMULi16Q, IIC_VMULi32Q,
+ IIC_VMULi16Q, IIC_VMULi32Q,
"vqdmulh", "s", int_arm_neon_vqdmulh, 1>;
defm VQDMULHsl: N3VIntSL_HS<0b1100, IIC_VMULi16D, IIC_VMULi32D,
IIC_VMULi16Q, IIC_VMULi32Q,
@@ -2555,15 +3307,19 @@ defm VQDMULLsl: N3VLIntSL_HS<0, 0b1011, IIC_VMULi16D,
defm VMLA : N3VMulOp_QHS<0, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
def VMLAfd : N3VDMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACD, "vmla", "f32",
- v2f32, fmul, fadd>;
+ v2f32, fmul_su, fadd_mlx>,
+ Requires<[HasNEON, UseFPVMLx]>;
def VMLAfq : N3VQMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACQ, "vmla", "f32",
- v4f32, fmul, fadd>;
+ v4f32, fmul_su, fadd_mlx>,
+ Requires<[HasNEON, UseFPVMLx]>;
defm VMLAsl : N3VMulOpSL_HS<0b0000, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
def VMLAslfd : N3VDMulOpSL<0b10, 0b0001, IIC_VMACD, "vmla", "f32",
- v2f32, fmul, fadd>;
+ v2f32, fmul_su, fadd_mlx>,
+ Requires<[HasNEON, UseFPVMLx]>;
def VMLAslfq : N3VQMulOpSL<0b10, 0b0001, IIC_VMACQ, "vmla", "f32",
- v4f32, v2f32, fmul, fadd>;
+ v4f32, v2f32, fmul_su, fadd_mlx>,
+ Requires<[HasNEON, UseFPVMLx]>;
def : Pat<(v8i16 (add (v8i16 QPR:$src1),
(mul (v8i16 QPR:$src2),
@@ -2581,14 +3337,15 @@ def : Pat<(v4i32 (add (v4i32 QPR:$src1),
(DSubReg_i32_reg imm:$lane))),
(SubReg_i32_lane imm:$lane)))>;
-def : Pat<(v4f32 (fadd (v4f32 QPR:$src1),
- (fmul (v4f32 QPR:$src2),
+def : Pat<(v4f32 (fadd_mlx (v4f32 QPR:$src1),
+ (fmul_su (v4f32 QPR:$src2),
(v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
(v4f32 (VMLAslfq (v4f32 QPR:$src1),
(v4f32 QPR:$src2),
(v2f32 (EXTRACT_SUBREG QPR:$src3,
(DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
+ (SubReg_i32_lane imm:$lane)))>,
+ Requires<[HasNEON, UseFPVMLx]>;
// VMLAL : Vector Multiply Accumulate Long (Q += D * D)
defm VMLALs : N3VLMulOp_QHS<0,1,0b1000,0, IIC_VMACi16D, IIC_VMACi32D,
@@ -2608,15 +3365,19 @@ defm VQDMLALsl: N3VLInt3SL_HS<0, 0b0011, "vqdmlal", "s", int_arm_neon_vqdmlal>;
defm VMLS : N3VMulOp_QHS<1, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
def VMLSfd : N3VDMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACD, "vmls", "f32",
- v2f32, fmul, fsub>;
+ v2f32, fmul_su, fsub_mlx>,
+ Requires<[HasNEON, UseFPVMLx]>;
def VMLSfq : N3VQMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACQ, "vmls", "f32",
- v4f32, fmul, fsub>;
+ v4f32, fmul_su, fsub_mlx>,
+ Requires<[HasNEON, UseFPVMLx]>;
defm VMLSsl : N3VMulOpSL_HS<0b0100, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
def VMLSslfd : N3VDMulOpSL<0b10, 0b0101, IIC_VMACD, "vmls", "f32",
- v2f32, fmul, fsub>;
+ v2f32, fmul_su, fsub_mlx>,
+ Requires<[HasNEON, UseFPVMLx]>;
def VMLSslfq : N3VQMulOpSL<0b10, 0b0101, IIC_VMACQ, "vmls", "f32",
- v4f32, v2f32, fmul, fsub>;
+ v4f32, v2f32, fmul_su, fsub_mlx>,
+ Requires<[HasNEON, UseFPVMLx]>;
def : Pat<(v8i16 (sub (v8i16 QPR:$src1),
(mul (v8i16 QPR:$src2),
@@ -2634,13 +3395,14 @@ def : Pat<(v4i32 (sub (v4i32 QPR:$src1),
(DSubReg_i32_reg imm:$lane))),
(SubReg_i32_lane imm:$lane)))>;
-def : Pat<(v4f32 (fsub (v4f32 QPR:$src1),
- (fmul (v4f32 QPR:$src2),
+def : Pat<(v4f32 (fsub_mlx (v4f32 QPR:$src1),
+ (fmul_su (v4f32 QPR:$src2),
(v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
(v4f32 (VMLSslfq (v4f32 QPR:$src1), (v4f32 QPR:$src2),
(v2f32 (EXTRACT_SUBREG QPR:$src3,
(DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
+ (SubReg_i32_lane imm:$lane)))>,
+ Requires<[HasNEON, UseFPVMLx]>;
// VMLSL : Vector Multiply Subtract Long (Q -= D * D)
defm VMLSLs : N3VLMulOp_QHS<0,1,0b1010,0, IIC_VMACi16D, IIC_VMACi32D,
@@ -2703,25 +3465,24 @@ def VCEQfd : N3VD<0,0,0b00,0b1110,0, IIC_VBIND, "vceq", "f32", v2i32, v2f32,
NEONvceq, 1>;
def VCEQfq : N3VQ<0,0,0b00,0b1110,0, IIC_VBINQ, "vceq", "f32", v4i32, v4f32,
NEONvceq, 1>;
-// For disassembly only.
+
defm VCEQz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00010, 0, "vceq", "i",
- "$dst, $src, #0">;
+ "$Vd, $Vm, #0", NEONvceqz>;
// VCGE : Vector Compare Greater Than or Equal
defm VCGEs : N3V_QHS<0, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
IIC_VSUBi4Q, "vcge", "s", NEONvcge, 0>;
-defm VCGEu : N3V_QHS<1, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
+defm VCGEu : N3V_QHS<1, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
IIC_VSUBi4Q, "vcge", "u", NEONvcgeu, 0>;
def VCGEfd : N3VD<1,0,0b00,0b1110,0, IIC_VBIND, "vcge", "f32", v2i32, v2f32,
NEONvcge, 0>;
def VCGEfq : N3VQ<1,0,0b00,0b1110,0, IIC_VBINQ, "vcge", "f32", v4i32, v4f32,
NEONvcge, 0>;
-// For disassembly only.
+
defm VCGEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00001, 0, "vcge", "s",
- "$dst, $src, #0">;
-// For disassembly only.
+ "$Vd, $Vm, #0", NEONvcgez>;
defm VCLEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00011, 0, "vcle", "s",
- "$dst, $src, #0">;
+ "$Vd, $Vm, #0", NEONvclez>;
// VCGT : Vector Compare Greater Than
defm VCGTs : N3V_QHS<0, 0, 0b0011, 0, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
@@ -2732,12 +3493,11 @@ def VCGTfd : N3VD<1,0,0b10,0b1110,0, IIC_VBIND, "vcgt", "f32", v2i32, v2f32,
NEONvcgt, 0>;
def VCGTfq : N3VQ<1,0,0b10,0b1110,0, IIC_VBINQ, "vcgt", "f32", v4i32, v4f32,
NEONvcgt, 0>;
-// For disassembly only.
+
defm VCGTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00000, 0, "vcgt", "s",
- "$dst, $src, #0">;
-// For disassembly only.
+ "$Vd, $Vm, #0", NEONvcgtz>;
defm VCLTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00100, 0, "vclt", "s",
- "$dst, $src, #0">;
+ "$Vd, $Vm, #0", NEONvcltz>;
// VACGE : Vector Absolute Compare Greater Than or Equal (aka VCAGE)
def VACGEd : N3VDInt<1, 0, 0b00, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacge",
@@ -2750,7 +3510,7 @@ def VACGTd : N3VDInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacgt",
def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacgt",
"f32", v4i32, v4f32, int_arm_neon_vacgtq, 0>;
// VTST : Vector Test Bits
-defm VTST : N3V_QHS<0, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
+defm VTST : N3V_QHS<0, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
IIC_VBINi4Q, "vtst", "", NEONvtst, 1>;
// Vector Bitwise Operations.
@@ -2779,104 +3539,190 @@ def VORRd : N3VDX<0, 0, 0b10, 0b0001, 1, IIC_VBINiD, "vorr",
def VORRq : N3VQX<0, 0, 0b10, 0b0001, 1, IIC_VBINiQ, "vorr",
v4i32, v4i32, or, 1>;
+def VORRiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 0, 1,
+ (outs DPR:$Vd), (ins nModImm:$SIMM, DPR:$src),
+ IIC_VMOVImm,
+ "vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
+ [(set DPR:$Vd,
+ (v4i16 (NEONvorrImm DPR:$src, timm:$SIMM)))]> {
+ let Inst{9} = SIMM{9};
+}
+
+def VORRiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 0, 1,
+ (outs DPR:$Vd), (ins nModImm:$SIMM, DPR:$src),
+ IIC_VMOVImm,
+ "vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
+ [(set DPR:$Vd,
+ (v2i32 (NEONvorrImm DPR:$src, timm:$SIMM)))]> {
+ let Inst{10-9} = SIMM{10-9};
+}
+
+def VORRiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 0, 1,
+ (outs QPR:$Vd), (ins nModImm:$SIMM, QPR:$src),
+ IIC_VMOVImm,
+ "vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
+ [(set QPR:$Vd,
+ (v8i16 (NEONvorrImm QPR:$src, timm:$SIMM)))]> {
+ let Inst{9} = SIMM{9};
+}
+
+def VORRiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 0, 1,
+ (outs QPR:$Vd), (ins nModImm:$SIMM, QPR:$src),
+ IIC_VMOVImm,
+ "vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
+ [(set QPR:$Vd,
+ (v4i32 (NEONvorrImm QPR:$src, timm:$SIMM)))]> {
+ let Inst{10-9} = SIMM{10-9};
+}
+
+
// VBIC : Vector Bitwise Bit Clear (AND NOT)
-def VBICd : N3VX<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2), N3RegFrm, IIC_VBINiD,
- "vbic", "$dst, $src1, $src2", "",
- [(set DPR:$dst, (v2i32 (and DPR:$src1,
- (vnotd DPR:$src2))))]>;
-def VBICq : N3VX<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2), N3RegFrm, IIC_VBINiQ,
- "vbic", "$dst, $src1, $src2", "",
- [(set QPR:$dst, (v4i32 (and QPR:$src1,
- (vnotq QPR:$src2))))]>;
+def VBICd : N3VX<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
+ (ins DPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VBINiD,
+ "vbic", "$Vd, $Vn, $Vm", "",
+ [(set DPR:$Vd, (v2i32 (and DPR:$Vn,
+ (vnotd DPR:$Vm))))]>;
+def VBICq : N3VX<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
+ (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINiQ,
+ "vbic", "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (v4i32 (and QPR:$Vn,
+ (vnotq QPR:$Vm))))]>;
+
+def VBICiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 1, 1,
+ (outs DPR:$Vd), (ins nModImm:$SIMM, DPR:$src),
+ IIC_VMOVImm,
+ "vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
+ [(set DPR:$Vd,
+ (v4i16 (NEONvbicImm DPR:$src, timm:$SIMM)))]> {
+ let Inst{9} = SIMM{9};
+}
+
+def VBICiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 1, 1,
+ (outs DPR:$Vd), (ins nModImm:$SIMM, DPR:$src),
+ IIC_VMOVImm,
+ "vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
+ [(set DPR:$Vd,
+ (v2i32 (NEONvbicImm DPR:$src, timm:$SIMM)))]> {
+ let Inst{10-9} = SIMM{10-9};
+}
+
+def VBICiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 1, 1,
+ (outs QPR:$Vd), (ins nModImm:$SIMM, QPR:$src),
+ IIC_VMOVImm,
+ "vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
+ [(set QPR:$Vd,
+ (v8i16 (NEONvbicImm QPR:$src, timm:$SIMM)))]> {
+ let Inst{9} = SIMM{9};
+}
+
+def VBICiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 1, 1,
+ (outs QPR:$Vd), (ins nModImm:$SIMM, QPR:$src),
+ IIC_VMOVImm,
+ "vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
+ [(set QPR:$Vd,
+ (v4i32 (NEONvbicImm QPR:$src, timm:$SIMM)))]> {
+ let Inst{10-9} = SIMM{10-9};
+}
// VORN : Vector Bitwise OR NOT
-def VORNd : N3VX<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2), N3RegFrm, IIC_VBINiD,
- "vorn", "$dst, $src1, $src2", "",
- [(set DPR:$dst, (v2i32 (or DPR:$src1,
- (vnotd DPR:$src2))))]>;
-def VORNq : N3VX<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2), N3RegFrm, IIC_VBINiQ,
- "vorn", "$dst, $src1, $src2", "",
- [(set QPR:$dst, (v4i32 (or QPR:$src1,
- (vnotq QPR:$src2))))]>;
+def VORNd : N3VX<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$Vd),
+ (ins DPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VBINiD,
+ "vorn", "$Vd, $Vn, $Vm", "",
+ [(set DPR:$Vd, (v2i32 (or DPR:$Vn,
+ (vnotd DPR:$Vm))))]>;
+def VORNq : N3VX<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$Vd),
+ (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINiQ,
+ "vorn", "$Vd, $Vn, $Vm", "",
+ [(set QPR:$Vd, (v4i32 (or QPR:$Vn,
+ (vnotq QPR:$Vm))))]>;
// VMVN : Vector Bitwise NOT (Immediate)
let isReMaterializable = 1 in {
-def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$dst),
+
+def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmvn", "i16", "$dst, $SIMM", "",
- [(set DPR:$dst, (v4i16 (NEONvmvnImm timm:$SIMM)))]>;
-def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$dst),
+ "vmvn", "i16", "$Vd, $SIMM", "",
+ [(set DPR:$Vd, (v4i16 (NEONvmvnImm timm:$SIMM)))]> {
+ let Inst{9} = SIMM{9};
+}
+
+def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmvn", "i16", "$dst, $SIMM", "",
- [(set QPR:$dst, (v8i16 (NEONvmvnImm timm:$SIMM)))]>;
+ "vmvn", "i16", "$Vd, $SIMM", "",
+ [(set QPR:$Vd, (v8i16 (NEONvmvnImm timm:$SIMM)))]> {
+ let Inst{9} = SIMM{9};
+}
-def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$dst),
+def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmvn", "i32", "$dst, $SIMM", "",
- [(set DPR:$dst, (v2i32 (NEONvmvnImm timm:$SIMM)))]>;
-def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$dst),
+ "vmvn", "i32", "$Vd, $SIMM", "",
+ [(set DPR:$Vd, (v2i32 (NEONvmvnImm timm:$SIMM)))]> {
+ let Inst{11-8} = SIMM{11-8};
+}
+
+def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmvn", "i32", "$dst, $SIMM", "",
- [(set QPR:$dst, (v4i32 (NEONvmvnImm timm:$SIMM)))]>;
+ "vmvn", "i32", "$Vd, $SIMM", "",
+ [(set QPR:$Vd, (v4i32 (NEONvmvnImm timm:$SIMM)))]> {
+ let Inst{11-8} = SIMM{11-8};
+}
}
// VMVN : Vector Bitwise NOT
def VMVNd : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 0, 0,
- (outs DPR:$dst), (ins DPR:$src), IIC_VSUBiD,
- "vmvn", "$dst, $src", "",
- [(set DPR:$dst, (v2i32 (vnotd DPR:$src)))]>;
+ (outs DPR:$Vd), (ins DPR:$Vm), IIC_VSUBiD,
+ "vmvn", "$Vd, $Vm", "",
+ [(set DPR:$Vd, (v2i32 (vnotd DPR:$Vm)))]>;
def VMVNq : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 1, 0,
- (outs QPR:$dst), (ins QPR:$src), IIC_VSUBiD,
- "vmvn", "$dst, $src", "",
- [(set QPR:$dst, (v4i32 (vnotq QPR:$src)))]>;
+ (outs QPR:$Vd), (ins QPR:$Vm), IIC_VSUBiD,
+ "vmvn", "$Vd, $Vm", "",
+ [(set QPR:$Vd, (v4i32 (vnotq QPR:$Vm)))]>;
def : Pat<(v2i32 (vnotd DPR:$src)), (VMVNd DPR:$src)>;
def : Pat<(v4i32 (vnotq QPR:$src)), (VMVNq QPR:$src)>;
// VBSL : Vector Bitwise Select
-def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, DPR:$src3),
+def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
+ (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
N3RegFrm, IIC_VCNTiD,
- "vbsl", "$dst, $src2, $src3", "$src1 = $dst",
- [(set DPR:$dst,
- (v2i32 (or (and DPR:$src2, DPR:$src1),
- (and DPR:$src3, (vnotd DPR:$src1)))))]>;
-def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, QPR:$src3),
+ "vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set DPR:$Vd,
+ (v2i32 (or (and DPR:$Vn, DPR:$src1),
+ (and DPR:$Vm, (vnotd DPR:$src1)))))]>;
+def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
+ (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
N3RegFrm, IIC_VCNTiQ,
- "vbsl", "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst,
- (v4i32 (or (and QPR:$src2, QPR:$src1),
- (and QPR:$src3, (vnotq QPR:$src1)))))]>;
+ "vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd",
+ [(set QPR:$Vd,
+ (v4i32 (or (and QPR:$Vn, QPR:$src1),
+ (and QPR:$Vm, (vnotq QPR:$src1)))))]>;
// VBIF : Vector Bitwise Insert if False
// like VBSL but with: "vbif $dst, $src3, $src1", "$src2 = $dst",
+// FIXME: This instruction's encoding MAY NOT BE correct.
def VBIFd : N3VX<1, 0, 0b11, 0b0001, 0, 1,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3),
+ (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
N3RegFrm, IIC_VBINiD,
- "vbif", "$dst, $src2, $src3", "$src1 = $dst",
+ "vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
[/* For disassembly only; pattern left blank */]>;
def VBIFq : N3VX<1, 0, 0b11, 0b0001, 1, 1,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3),
+ (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
N3RegFrm, IIC_VBINiQ,
- "vbif", "$dst, $src2, $src3", "$src1 = $dst",
+ "vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
[/* For disassembly only; pattern left blank */]>;
// VBIT : Vector Bitwise Insert if True
// like VBSL but with: "vbit $dst, $src2, $src1", "$src3 = $dst",
+// FIXME: This instruction's encoding MAY NOT BE correct.
def VBITd : N3VX<1, 0, 0b10, 0b0001, 0, 1,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3),
+ (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
N3RegFrm, IIC_VBINiD,
- "vbit", "$dst, $src2, $src3", "$src1 = $dst",
+ "vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
[/* For disassembly only; pattern left blank */]>;
def VBITq : N3VX<1, 0, 0b10, 0b0001, 1, 1,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3),
+ (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
N3RegFrm, IIC_VBINiQ,
- "vbit", "$dst, $src2, $src3", "$src1 = $dst",
+ "vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
[/* For disassembly only; pattern left blank */]>;
// VBIT/VBIF are not yet implemented. The TwoAddress pass will not go looking
@@ -2957,8 +3803,8 @@ def VPADDi16 : N3VDInt<0, 0, 0b01, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
def VPADDi32 : N3VDInt<0, 0, 0b10, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
"vpadd", "i32",
v2i32, v2i32, int_arm_neon_vpadd, 0>;
-def VPADDf : N3VDInt<1, 0, 0b00, 0b1101, 0, N3RegFrm,
- IIC_VBIND, "vpadd", "f32",
+def VPADDf : N3VDInt<1, 0, 0b00, 0b1101, 0, N3RegFrm,
+ IIC_VPBIND, "vpadd", "f32",
v2f32, v2f32, int_arm_neon_vpadd, 0>;
// VPADDL : Vector Pairwise Add Long
@@ -2986,7 +3832,7 @@ def VPMAXu16 : N3VDInt<1, 0, 0b01, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
"u16", v4i16, v4i16, int_arm_neon_vpmaxu, 0>;
def VPMAXu32 : N3VDInt<1, 0, 0b10, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
"u32", v2i32, v2i32, int_arm_neon_vpmaxu, 0>;
-def VPMAXf : N3VDInt<1, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
+def VPMAXf : N3VDInt<1, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VPBIND, "vpmax",
"f32", v2f32, v2f32, int_arm_neon_vpmaxs, 0>;
// VPMIN : Vector Pairwise Minimum
@@ -3002,16 +3848,16 @@ def VPMINu16 : N3VDInt<1, 0, 0b01, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
"u16", v4i16, v4i16, int_arm_neon_vpminu, 0>;
def VPMINu32 : N3VDInt<1, 0, 0b10, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
"u32", v2i32, v2i32, int_arm_neon_vpminu, 0>;
-def VPMINf : N3VDInt<1, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VSUBi4D, "vpmin",
+def VPMINf : N3VDInt<1, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VPBIND, "vpmin",
"f32", v2f32, v2f32, int_arm_neon_vpmins, 0>;
// Vector Reciprocal and Reciprocal Square Root Estimate and Step.
// VRECPE : Vector Reciprocal Estimate
-def VRECPEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
+def VRECPEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
IIC_VUNAD, "vrecpe", "u32",
v2i32, v2i32, int_arm_neon_vrecpe>;
-def VRECPEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
+def VRECPEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
IIC_VUNAQ, "vrecpe", "u32",
v4i32, v4i32, int_arm_neon_vrecpe>;
def VRECPEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0,
@@ -3039,7 +3885,7 @@ def VRSQRTEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0,
def VRSQRTEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
IIC_VUNAD, "vrsqrte", "f32",
v2f32, v2f32, int_arm_neon_vrsqrte>;
-def VRSQRTEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
+def VRSQRTEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
IIC_VUNAQ, "vrsqrte", "f32",
v4f32, v4f32, int_arm_neon_vrsqrte>;
@@ -3054,12 +3900,12 @@ def VRSQRTSfq : N3VQInt<0, 0, 0b10, 0b1111, 1, N3RegFrm,
// Vector Shifts.
// VSHL : Vector Shift
-defm VSHLs : N3VInt_QHSD<0, 0, 0b0100, 0, N3RegVShFrm,
+defm VSHLs : N3VInt_QHSDSh<0, 0, 0b0100, 0, N3RegVShFrm,
IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ, IIC_VSHLiQ,
- "vshl", "s", int_arm_neon_vshifts, 0>;
-defm VSHLu : N3VInt_QHSD<1, 0, 0b0100, 0, N3RegVShFrm,
+ "vshl", "s", int_arm_neon_vshifts>;
+defm VSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 0, N3RegVShFrm,
IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ, IIC_VSHLiQ,
- "vshl", "u", int_arm_neon_vshiftu, 0>;
+ "vshl", "u", int_arm_neon_vshiftu>;
// VSHL : Vector Shift Left (Immediate)
defm VSHLi : N2VSh_QHSD<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl,
N2RegVShLFrm>;
@@ -3093,12 +3939,12 @@ defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
NEONvshrn>;
// VRSHL : Vector Rounding Shift
-defm VRSHLs : N3VInt_QHSD<0, 0, 0b0101, 0, N3RegVShFrm,
+defm VRSHLs : N3VInt_QHSDSh<0, 0, 0b0101, 0, N3RegVShFrm,
IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
- "vrshl", "s", int_arm_neon_vrshifts, 0>;
-defm VRSHLu : N3VInt_QHSD<1, 0, 0b0101, 0, N3RegVShFrm,
+ "vrshl", "s", int_arm_neon_vrshifts>;
+defm VRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 0, N3RegVShFrm,
IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
- "vrshl", "u", int_arm_neon_vrshiftu, 0>;
+ "vrshl", "u", int_arm_neon_vrshiftu>;
// VRSHR : Vector Rounding Shift Right
defm VRSHRs : N2VSh_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s", NEONvrshrs,
N2RegVShRFrm>;
@@ -3110,12 +3956,12 @@ defm VRSHRN : N2VNSh_HSD<0, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vrshrn", "i",
NEONvrshrn>;
// VQSHL : Vector Saturating Shift
-defm VQSHLs : N3VInt_QHSD<0, 0, 0b0100, 1, N3RegVShFrm,
+defm VQSHLs : N3VInt_QHSDSh<0, 0, 0b0100, 1, N3RegVShFrm,
IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
- "vqshl", "s", int_arm_neon_vqshifts, 0>;
-defm VQSHLu : N3VInt_QHSD<1, 0, 0b0100, 1, N3RegVShFrm,
+ "vqshl", "s", int_arm_neon_vqshifts>;
+defm VQSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 1, N3RegVShFrm,
IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
- "vqshl", "u", int_arm_neon_vqshiftu, 0>;
+ "vqshl", "u", int_arm_neon_vqshiftu>;
// VQSHL : Vector Saturating Shift Left (Immediate)
defm VQSHLsi : N2VSh_QHSD<0,1,0b0111,1, IIC_VSHLi4D, "vqshl", "s",NEONvqshls,
N2RegVShLFrm>;
@@ -3136,12 +3982,12 @@ defm VQSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 0, 1, IIC_VSHLi4D, "vqshrun", "s",
NEONvqshrnsu>;
// VQRSHL : Vector Saturating Rounding Shift
-defm VQRSHLs : N3VInt_QHSD<0, 0, 0b0101, 1, N3RegVShFrm,
+defm VQRSHLs : N3VInt_QHSDSh<0, 0, 0b0101, 1, N3RegVShFrm,
IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
- "vqrshl", "s", int_arm_neon_vqrshifts, 0>;
-defm VQRSHLu : N3VInt_QHSD<1, 0, 0b0101, 1, N3RegVShFrm,
+ "vqrshl", "s", int_arm_neon_vqrshifts>;
+defm VQRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 1, N3RegVShFrm,
IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
- "vqrshl", "u", int_arm_neon_vqrshiftu, 0>;
+ "vqrshl", "u", int_arm_neon_vqrshiftu>;
// VQRSHRN : Vector Saturating Rounding Shift Right and Narrow
defm VQRSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "s",
@@ -3168,7 +4014,7 @@ defm VSRI : N2VShIns_QHSD<1, 1, 0b0100, 1, "vsri", NEONvsri, N2RegVShRFrm>;
// Vector Absolute and Saturating Absolute.
// VABS : Vector Absolute Value
-defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0,
+defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0,
IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s",
int_arm_neon_vabs>;
def VABSfd : N2VDInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
@@ -3179,7 +4025,7 @@ def VABSfq : N2VQInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
v4f32, v4f32, int_arm_neon_vabs>;
// VQABS : Vector Saturating Absolute Value
-defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
+defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
IIC_VQUNAiD, IIC_VQUNAiQ, "vqabs", "s",
int_arm_neon_vqabs>;
@@ -3191,13 +4037,13 @@ def vnegq : PatFrag<(ops node:$in),
(sub (bitconvert (v4i32 NEONimmAllZerosV)), node:$in)>;
class VNEGD<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$dst), (ins DPR:$src),
- IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (Ty (vnegd DPR:$src)))]>;
+ : N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$Vd), (ins DPR:$Vm),
+ IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set DPR:$Vd, (Ty (vnegd DPR:$Vm)))]>;
class VNEGQ<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$dst), (ins QPR:$src),
- IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (Ty (vnegq QPR:$src)))]>;
+ : N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$Vd), (ins QPR:$Vm),
+ IIC_VSHLiQ, OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set QPR:$Vd, (Ty (vnegq QPR:$Vm)))]>;
// VNEG : Vector Negate (integer)
def VNEGs8d : VNEGD<0b00, "vneg", "s8", v8i8>;
@@ -3209,13 +4055,13 @@ def VNEGs32q : VNEGQ<0b10, "vneg", "s32", v4i32>;
// VNEG : Vector Negate (floating-point)
def VNEGfd : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
- (outs DPR:$dst), (ins DPR:$src), IIC_VUNAD,
- "vneg", "f32", "$dst, $src", "",
- [(set DPR:$dst, (v2f32 (fneg DPR:$src)))]>;
+ (outs DPR:$Vd), (ins DPR:$Vm), IIC_VUNAD,
+ "vneg", "f32", "$Vd, $Vm", "",
+ [(set DPR:$Vd, (v2f32 (fneg DPR:$Vm)))]>;
def VNEGf32q : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 1, 0,
- (outs QPR:$dst), (ins QPR:$src), IIC_VUNAQ,
- "vneg", "f32", "$dst, $src", "",
- [(set QPR:$dst, (v4f32 (fneg QPR:$src)))]>;
+ (outs QPR:$Vd), (ins QPR:$Vm), IIC_VUNAQ,
+ "vneg", "f32", "$Vd, $Vm", "",
+ [(set QPR:$Vd, (v4f32 (fneg QPR:$Vm)))]>;
def : Pat<(v8i8 (vnegd DPR:$src)), (VNEGs8d DPR:$src)>;
def : Pat<(v4i16 (vnegd DPR:$src)), (VNEGs16d DPR:$src)>;
@@ -3225,22 +4071,22 @@ def : Pat<(v8i16 (vnegq QPR:$src)), (VNEGs16q QPR:$src)>;
def : Pat<(v4i32 (vnegq QPR:$src)), (VNEGs32q QPR:$src)>;
// VQNEG : Vector Saturating Negate
-defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0,
+defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0,
IIC_VQUNAiD, IIC_VQUNAiQ, "vqneg", "s",
int_arm_neon_vqneg>;
// Vector Bit Counting Operations.
// VCLS : Vector Count Leading Sign Bits
-defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0,
+defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0,
IIC_VCNTiD, IIC_VCNTiQ, "vcls", "s",
int_arm_neon_vcls>;
// VCLZ : Vector Count Leading Zeros
-defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0,
+defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0,
IIC_VCNTiD, IIC_VCNTiQ, "vclz", "i",
int_arm_neon_vclz>;
// VCNT : Vector Count One Bits
-def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
+def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
IIC_VCNTiD, "vcnt", "8",
v8i8, v8i8, int_arm_neon_vcnt>;
def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
@@ -3249,98 +4095,126 @@ def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
// Vector Swap -- for disassembly only.
def VSWPd : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 0, 0,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- "vswp", "$dst, $src", "", []>;
+ (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
+ "vswp", "$Vd, $Vm", "", []>;
def VSWPq : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 1, 0,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- "vswp", "$dst, $src", "", []>;
+ (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
+ "vswp", "$Vd, $Vm", "", []>;
// Vector Move Operations.
// VMOV : Vector Move (Register)
let neverHasSideEffects = 1 in {
-def VMOVDneon: N3VX<0, 0, 0b10, 0b0001, 0, 1, (outs DPR:$dst), (ins DPR:$src),
- N3RegFrm, IIC_VMOVD, "vmov", "$dst, $src", "", []>;
-def VMOVQ : N3VX<0, 0, 0b10, 0b0001, 1, 1, (outs QPR:$dst), (ins QPR:$src),
- N3RegFrm, IIC_VMOVD, "vmov", "$dst, $src", "", []>;
+def VMOVDneon: N3VX<0, 0, 0b10, 0b0001, 0, 1, (outs DPR:$Vd), (ins DPR:$Vm),
+ N3RegFrm, IIC_VMOV, "vmov", "$Vd, $Vm", "", []> {
+ let Vn{4-0} = Vm{4-0};
+}
+def VMOVQ : N3VX<0, 0, 0b10, 0b0001, 1, 1, (outs QPR:$Vd), (ins QPR:$Vm),
+ N3RegFrm, IIC_VMOV, "vmov", "$Vd, $Vm", "", []> {
+ let Vn{4-0} = Vm{4-0};
+}
// Pseudo vector move instructions for QQ and QQQQ registers. This should
// be expanded after register allocation is completed.
def VMOVQQ : PseudoInst<(outs QQPR:$dst), (ins QQPR:$src),
- NoItinerary, "${:comment} vmov\t$dst, $src", []>;
+ NoItinerary, []>;
def VMOVQQQQ : PseudoInst<(outs QQQQPR:$dst), (ins QQQQPR:$src),
- NoItinerary, "${:comment} vmov\t$dst, $src", []>;
+ NoItinerary, []>;
} // neverHasSideEffects
// VMOV : Vector Move (Immediate)
let isReMaterializable = 1 in {
-def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$dst),
+def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmov", "i8", "$dst, $SIMM", "",
- [(set DPR:$dst, (v8i8 (NEONvmovImm timm:$SIMM)))]>;
-def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$dst),
+ "vmov", "i8", "$Vd, $SIMM", "",
+ [(set DPR:$Vd, (v8i8 (NEONvmovImm timm:$SIMM)))]>;
+def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmov", "i8", "$dst, $SIMM", "",
- [(set QPR:$dst, (v16i8 (NEONvmovImm timm:$SIMM)))]>;
+ "vmov", "i8", "$Vd, $SIMM", "",
+ [(set QPR:$Vd, (v16i8 (NEONvmovImm timm:$SIMM)))]>;
-def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$dst),
+def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmov", "i16", "$dst, $SIMM", "",
- [(set DPR:$dst, (v4i16 (NEONvmovImm timm:$SIMM)))]>;
-def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$dst),
+ "vmov", "i16", "$Vd, $SIMM", "",
+ [(set DPR:$Vd, (v4i16 (NEONvmovImm timm:$SIMM)))]> {
+ let Inst{9} = SIMM{9};
+}
+
+def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmov", "i16", "$dst, $SIMM", "",
- [(set QPR:$dst, (v8i16 (NEONvmovImm timm:$SIMM)))]>;
+ "vmov", "i16", "$Vd, $SIMM", "",
+ [(set QPR:$Vd, (v8i16 (NEONvmovImm timm:$SIMM)))]> {
+ let Inst{9} = SIMM{9};
+}
-def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 0, 1, (outs DPR:$dst),
+def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 0, 1, (outs DPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmov", "i32", "$dst, $SIMM", "",
- [(set DPR:$dst, (v2i32 (NEONvmovImm timm:$SIMM)))]>;
-def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 0, 1, (outs QPR:$dst),
+ "vmov", "i32", "$Vd, $SIMM", "",
+ [(set DPR:$Vd, (v2i32 (NEONvmovImm timm:$SIMM)))]> {
+ let Inst{11-8} = SIMM{11-8};
+}
+
+def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 0, 1, (outs QPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmov", "i32", "$dst, $SIMM", "",
- [(set QPR:$dst, (v4i32 (NEONvmovImm timm:$SIMM)))]>;
+ "vmov", "i32", "$Vd, $SIMM", "",
+ [(set QPR:$Vd, (v4i32 (NEONvmovImm timm:$SIMM)))]> {
+ let Inst{11-8} = SIMM{11-8};
+}
-def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$dst),
+def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmov", "i64", "$dst, $SIMM", "",
- [(set DPR:$dst, (v1i64 (NEONvmovImm timm:$SIMM)))]>;
-def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$dst),
+ "vmov", "i64", "$Vd, $SIMM", "",
+ [(set DPR:$Vd, (v1i64 (NEONvmovImm timm:$SIMM)))]>;
+def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$Vd),
(ins nModImm:$SIMM), IIC_VMOVImm,
- "vmov", "i64", "$dst, $SIMM", "",
- [(set QPR:$dst, (v2i64 (NEONvmovImm timm:$SIMM)))]>;
+ "vmov", "i64", "$Vd, $SIMM", "",
+ [(set QPR:$Vd, (v2i64 (NEONvmovImm timm:$SIMM)))]>;
} // isReMaterializable
// VMOV : Vector Get Lane (move scalar to ARM core register)
def VGETLNs8 : NVGetLane<{1,1,1,0,0,1,?,1}, 0b1011, {?,?},
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "s8", "$dst, $src[$lane]",
- [(set GPR:$dst, (NEONvgetlanes (v8i8 DPR:$src),
- imm:$lane))]>;
+ (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
+ IIC_VMOVSI, "vmov", "s8", "$R, $V[$lane]",
+ [(set GPR:$R, (NEONvgetlanes (v8i8 DPR:$V),
+ imm:$lane))]> {
+ let Inst{21} = lane{2};
+ let Inst{6-5} = lane{1-0};
+}
def VGETLNs16 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, {?,1},
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "s16", "$dst, $src[$lane]",
- [(set GPR:$dst, (NEONvgetlanes (v4i16 DPR:$src),
- imm:$lane))]>;
+ (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
+ IIC_VMOVSI, "vmov", "s16", "$R, $V[$lane]",
+ [(set GPR:$R, (NEONvgetlanes (v4i16 DPR:$V),
+ imm:$lane))]> {
+ let Inst{21} = lane{1};
+ let Inst{6} = lane{0};
+}
def VGETLNu8 : NVGetLane<{1,1,1,0,1,1,?,1}, 0b1011, {?,?},
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "u8", "$dst, $src[$lane]",
- [(set GPR:$dst, (NEONvgetlaneu (v8i8 DPR:$src),
- imm:$lane))]>;
+ (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
+ IIC_VMOVSI, "vmov", "u8", "$R, $V[$lane]",
+ [(set GPR:$R, (NEONvgetlaneu (v8i8 DPR:$V),
+ imm:$lane))]> {
+ let Inst{21} = lane{2};
+ let Inst{6-5} = lane{1-0};
+}
def VGETLNu16 : NVGetLane<{1,1,1,0,1,0,?,1}, 0b1011, {?,1},
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "u16", "$dst, $src[$lane]",
- [(set GPR:$dst, (NEONvgetlaneu (v4i16 DPR:$src),
- imm:$lane))]>;
+ (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
+ IIC_VMOVSI, "vmov", "u16", "$R, $V[$lane]",
+ [(set GPR:$R, (NEONvgetlaneu (v4i16 DPR:$V),
+ imm:$lane))]> {
+ let Inst{21} = lane{1};
+ let Inst{6} = lane{0};
+}
def VGETLNi32 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, 0b00,
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "32", "$dst, $src[$lane]",
- [(set GPR:$dst, (extractelt (v2i32 DPR:$src),
- imm:$lane))]>;
+ (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
+ IIC_VMOVSI, "vmov", "32", "$R, $V[$lane]",
+ [(set GPR:$R, (extractelt (v2i32 DPR:$V),
+ imm:$lane))]> {
+ let Inst{21} = lane{0};
+}
// def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td
def : Pat<(NEONvgetlanes (v16i8 QPR:$src), imm:$lane),
(VGETLNs8 (v8i8 (EXTRACT_SUBREG QPR:$src,
@@ -3376,37 +4250,45 @@ def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
// VMOV : Vector Set Lane (move ARM core register to scalar)
-let Constraints = "$src1 = $dst" in {
-def VSETLNi8 : NVSetLane<{1,1,1,0,0,1,?,0}, 0b1011, {?,?}, (outs DPR:$dst),
- (ins DPR:$src1, GPR:$src2, nohash_imm:$lane),
- IIC_VMOVISL, "vmov", "8", "$dst[$lane], $src2",
- [(set DPR:$dst, (vector_insert (v8i8 DPR:$src1),
- GPR:$src2, imm:$lane))]>;
-def VSETLNi16 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, {?,1}, (outs DPR:$dst),
- (ins DPR:$src1, GPR:$src2, nohash_imm:$lane),
- IIC_VMOVISL, "vmov", "16", "$dst[$lane], $src2",
- [(set DPR:$dst, (vector_insert (v4i16 DPR:$src1),
- GPR:$src2, imm:$lane))]>;
-def VSETLNi32 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, 0b00, (outs DPR:$dst),
- (ins DPR:$src1, GPR:$src2, nohash_imm:$lane),
- IIC_VMOVISL, "vmov", "32", "$dst[$lane], $src2",
- [(set DPR:$dst, (insertelt (v2i32 DPR:$src1),
- GPR:$src2, imm:$lane))]>;
+let Constraints = "$src1 = $V" in {
+def VSETLNi8 : NVSetLane<{1,1,1,0,0,1,?,0}, 0b1011, {?,?}, (outs DPR:$V),
+ (ins DPR:$src1, GPR:$R, nohash_imm:$lane),
+ IIC_VMOVISL, "vmov", "8", "$V[$lane], $R",
+ [(set DPR:$V, (vector_insert (v8i8 DPR:$src1),
+ GPR:$R, imm:$lane))]> {
+ let Inst{21} = lane{2};
+ let Inst{6-5} = lane{1-0};
+}
+def VSETLNi16 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, {?,1}, (outs DPR:$V),
+ (ins DPR:$src1, GPR:$R, nohash_imm:$lane),
+ IIC_VMOVISL, "vmov", "16", "$V[$lane], $R",
+ [(set DPR:$V, (vector_insert (v4i16 DPR:$src1),
+ GPR:$R, imm:$lane))]> {
+ let Inst{21} = lane{1};
+ let Inst{6} = lane{0};
+}
+def VSETLNi32 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, 0b00, (outs DPR:$V),
+ (ins DPR:$src1, GPR:$R, nohash_imm:$lane),
+ IIC_VMOVISL, "vmov", "32", "$V[$lane], $R",
+ [(set DPR:$V, (insertelt (v2i32 DPR:$src1),
+ GPR:$R, imm:$lane))]> {
+ let Inst{21} = lane{0};
+}
}
def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane),
- (v16i8 (INSERT_SUBREG QPR:$src1,
+ (v16i8 (INSERT_SUBREG QPR:$src1,
(v8i8 (VSETLNi8 (v8i8 (EXTRACT_SUBREG QPR:$src1,
(DSubReg_i8_reg imm:$lane))),
GPR:$src2, (SubReg_i8_lane imm:$lane))),
(DSubReg_i8_reg imm:$lane)))>;
def : Pat<(vector_insert (v8i16 QPR:$src1), GPR:$src2, imm:$lane),
- (v8i16 (INSERT_SUBREG QPR:$src1,
+ (v8i16 (INSERT_SUBREG QPR:$src1,
(v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
(DSubReg_i16_reg imm:$lane))),
GPR:$src2, (SubReg_i16_lane imm:$lane))),
(DSubReg_i16_reg imm:$lane)))>;
def : Pat<(insertelt (v4i32 QPR:$src1), GPR:$src2, imm:$lane),
- (v4i32 (INSERT_SUBREG QPR:$src1,
+ (v4i32 (INSERT_SUBREG QPR:$src1,
(v2i32 (VSETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src1,
(DSubReg_i32_reg imm:$lane))),
GPR:$src2, (SubReg_i32_lane imm:$lane))),
@@ -3454,13 +4336,13 @@ def : Pat<(v4i32 (scalar_to_vector GPR:$src)),
// VDUP : Vector Duplicate (from ARM core register to all elements)
class VDUPD<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
- : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$dst), (ins GPR:$src),
- IIC_VMOVIS, "vdup", Dt, "$dst, $src",
- [(set DPR:$dst, (Ty (NEONvdup (i32 GPR:$src))))]>;
+ : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$V), (ins GPR:$R),
+ IIC_VMOVIS, "vdup", Dt, "$V, $R",
+ [(set DPR:$V, (Ty (NEONvdup (i32 GPR:$R))))]>;
class VDUPQ<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
- : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$dst), (ins GPR:$src),
- IIC_VMOVIS, "vdup", Dt, "$dst, $src",
- [(set QPR:$dst, (Ty (NEONvdup (i32 GPR:$src))))]>;
+ : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$V), (ins GPR:$R),
+ IIC_VMOVIS, "vdup", Dt, "$V, $R",
+ [(set QPR:$V, (Ty (NEONvdup (i32 GPR:$R))))]>;
def VDUP8d : VDUPD<0b11101100, 0b00, "8", v8i8>;
def VDUP16d : VDUPD<0b11101000, 0b01, "16", v4i16>;
@@ -3469,40 +4351,56 @@ def VDUP8q : VDUPQ<0b11101110, 0b00, "8", v16i8>;
def VDUP16q : VDUPQ<0b11101010, 0b01, "16", v8i16>;
def VDUP32q : VDUPQ<0b11101010, 0b00, "32", v4i32>;
-def VDUPfd : NVDup<0b11101000, 0b1011, 0b00, (outs DPR:$dst), (ins GPR:$src),
- IIC_VMOVIS, "vdup", "32", "$dst, $src",
- [(set DPR:$dst, (v2f32 (NEONvdup
- (f32 (bitconvert GPR:$src)))))]>;
-def VDUPfq : NVDup<0b11101010, 0b1011, 0b00, (outs QPR:$dst), (ins GPR:$src),
- IIC_VMOVIS, "vdup", "32", "$dst, $src",
- [(set QPR:$dst, (v4f32 (NEONvdup
- (f32 (bitconvert GPR:$src)))))]>;
+def VDUPfd : NVDup<0b11101000, 0b1011, 0b00, (outs DPR:$V), (ins GPR:$R),
+ IIC_VMOVIS, "vdup", "32", "$V, $R",
+ [(set DPR:$V, (v2f32 (NEONvdup
+ (f32 (bitconvert GPR:$R)))))]>;
+def VDUPfq : NVDup<0b11101010, 0b1011, 0b00, (outs QPR:$V), (ins GPR:$R),
+ IIC_VMOVIS, "vdup", "32", "$V, $R",
+ [(set QPR:$V, (v4f32 (NEONvdup
+ (f32 (bitconvert GPR:$R)))))]>;
// VDUP : Vector Duplicate Lane (from scalar to all elements)
class VDUPLND<bits<4> op19_16, string OpcodeStr, string Dt,
ValueType Ty>
- : NVDupLane<op19_16, 0, (outs DPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVD, OpcodeStr, Dt, "$dst, $src[$lane]",
- [(set DPR:$dst, (Ty (NEONvduplane (Ty DPR:$src), imm:$lane)))]>;
+ : NVDupLane<op19_16, 0, (outs DPR:$Vd), (ins DPR:$Vm, nohash_imm:$lane),
+ IIC_VMOVD, OpcodeStr, Dt, "$Vd, $Vm[$lane]",
+ [(set DPR:$Vd, (Ty (NEONvduplane (Ty DPR:$Vm), imm:$lane)))]>;
class VDUPLNQ<bits<4> op19_16, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy>
- : NVDupLane<op19_16, 1, (outs QPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVD, OpcodeStr, Dt, "$dst, $src[$lane]",
- [(set QPR:$dst, (ResTy (NEONvduplane (OpTy DPR:$src),
+ : NVDupLane<op19_16, 1, (outs QPR:$Vd), (ins DPR:$Vm, nohash_imm:$lane),
+ IIC_VMOVQ, OpcodeStr, Dt, "$Vd, $Vm[$lane]",
+ [(set QPR:$Vd, (ResTy (NEONvduplane (OpTy DPR:$Vm),
imm:$lane)))]>;
// Inst{19-16} is partially specified depending on the element size.
-def VDUPLN8d : VDUPLND<{?,?,?,1}, "vdup", "8", v8i8>;
-def VDUPLN16d : VDUPLND<{?,?,1,0}, "vdup", "16", v4i16>;
-def VDUPLN32d : VDUPLND<{?,1,0,0}, "vdup", "32", v2i32>;
-def VDUPLNfd : VDUPLND<{?,1,0,0}, "vdup", "32", v2f32>;
-def VDUPLN8q : VDUPLNQ<{?,?,?,1}, "vdup", "8", v16i8, v8i8>;
-def VDUPLN16q : VDUPLNQ<{?,?,1,0}, "vdup", "16", v8i16, v4i16>;
-def VDUPLN32q : VDUPLNQ<{?,1,0,0}, "vdup", "32", v4i32, v2i32>;
-def VDUPLNfq : VDUPLNQ<{?,1,0,0}, "vdup", "32", v4f32, v2f32>;
+def VDUPLN8d : VDUPLND<{?,?,?,1}, "vdup", "8", v8i8> {
+ let Inst{19-17} = lane{2-0};
+}
+def VDUPLN16d : VDUPLND<{?,?,1,0}, "vdup", "16", v4i16> {
+ let Inst{19-18} = lane{1-0};
+}
+def VDUPLN32d : VDUPLND<{?,1,0,0}, "vdup", "32", v2i32> {
+ let Inst{19} = lane{0};
+}
+def VDUPLNfd : VDUPLND<{?,1,0,0}, "vdup", "32", v2f32> {
+ let Inst{19} = lane{0};
+}
+def VDUPLN8q : VDUPLNQ<{?,?,?,1}, "vdup", "8", v16i8, v8i8> {
+ let Inst{19-17} = lane{2-0};
+}
+def VDUPLN16q : VDUPLNQ<{?,?,1,0}, "vdup", "16", v8i16, v4i16> {
+ let Inst{19-18} = lane{1-0};
+}
+def VDUPLN32q : VDUPLNQ<{?,1,0,0}, "vdup", "32", v4i32, v2i32> {
+ let Inst{19} = lane{0};
+}
+def VDUPLNfq : VDUPLNQ<{?,1,0,0}, "vdup", "32", v4f32, v2f32> {
+ let Inst{19} = lane{0};
+}
def : Pat<(v16i8 (NEONvduplane (v16i8 QPR:$src), imm:$lane)),
(v16i8 (VDUPLN8q (v8i8 (EXTRACT_SUBREG QPR:$src,
@@ -3521,18 +4419,13 @@ def : Pat<(v4f32 (NEONvduplane (v4f32 QPR:$src), imm:$lane)),
(DSubReg_i32_reg imm:$lane))),
(SubReg_i32_lane imm:$lane)))>;
-def VDUPfdf : N2V<0b11, 0b11, {?,1}, {0,0}, 0b11000, 0, 0,
- (outs DPR:$dst), (ins SPR:$src),
- IIC_VMOVD, "vdup", "32", "$dst, ${src:lane}", "",
+def VDUPfdf : PseudoNeonI<(outs DPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
[(set DPR:$dst, (v2f32 (NEONvdup (f32 SPR:$src))))]>;
-
-def VDUPfqf : N2V<0b11, 0b11, {?,1}, {0,0}, 0b11000, 1, 0,
- (outs QPR:$dst), (ins SPR:$src),
- IIC_VMOVD, "vdup", "32", "$dst, ${src:lane}", "",
+def VDUPfqf : PseudoNeonI<(outs QPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
[(set QPR:$dst, (v4f32 (NEONvdup (f32 SPR:$src))))]>;
// VMOVN : Vector Narrowing Move
-defm VMOVN : N2VN_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVD,
+defm VMOVN : N2VN_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVN,
"vmovn", "i", trunc>;
// VQMOVN : Vector Saturating Narrowing Move
defm VQMOVNs : N2VNInt_HSD<0b11,0b11,0b10,0b00101,0,0, IIC_VQUNAiD,
@@ -3585,20 +4478,30 @@ def VCVTxs2fq : N2VCvtQ<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
def VCVTxu2fq : N2VCvtQ<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
+// VCVT : Vector Convert Between Half-Precision and Single-Precision.
+def VCVTf2h : N2VNInt<0b11, 0b11, 0b01, 0b10, 0b01100, 0, 0,
+ IIC_VUNAQ, "vcvt", "f16.f32",
+ v4i16, v4f32, int_arm_neon_vcvtfp2hf>,
+ Requires<[HasNEON, HasFP16]>;
+def VCVTh2f : N2VLInt<0b11, 0b11, 0b01, 0b10, 0b01110, 0, 0,
+ IIC_VUNAQ, "vcvt", "f32.f16",
+ v4f32, v4i16, int_arm_neon_vcvthf2fp>,
+ Requires<[HasNEON, HasFP16]>;
+
// Vector Reverse.
// VREV64 : Vector Reverse elements within 64-bit doublewords
class VREV64D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 0, 0, (outs DPR:$dst),
- (ins DPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (Ty (NEONvrev64 (Ty DPR:$src))))]>;
+ : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 0, 0, (outs DPR:$Vd),
+ (ins DPR:$Vm), IIC_VMOVD,
+ OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set DPR:$Vd, (Ty (NEONvrev64 (Ty DPR:$Vm))))]>;
class VREV64Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 1, 0, (outs QPR:$dst),
- (ins QPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (Ty (NEONvrev64 (Ty QPR:$src))))]>;
+ : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 1, 0, (outs QPR:$Vd),
+ (ins QPR:$Vm), IIC_VMOVQ,
+ OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set QPR:$Vd, (Ty (NEONvrev64 (Ty QPR:$Vm))))]>;
def VREV64d8 : VREV64D<0b00, "vrev64", "8", v8i8>;
def VREV64d16 : VREV64D<0b01, "vrev64", "16", v4i16>;
@@ -3613,15 +4516,15 @@ def VREV64qf : VREV64Q<0b10, "vrev64", "32", v4f32>;
// VREV32 : Vector Reverse elements within 32-bit words
class VREV32D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 0, 0, (outs DPR:$dst),
- (ins DPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (Ty (NEONvrev32 (Ty DPR:$src))))]>;
+ : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 0, 0, (outs DPR:$Vd),
+ (ins DPR:$Vm), IIC_VMOVD,
+ OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set DPR:$Vd, (Ty (NEONvrev32 (Ty DPR:$Vm))))]>;
class VREV32Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 1, 0, (outs QPR:$dst),
- (ins QPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (Ty (NEONvrev32 (Ty QPR:$src))))]>;
+ : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 1, 0, (outs QPR:$Vd),
+ (ins QPR:$Vm), IIC_VMOVQ,
+ OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set QPR:$Vd, (Ty (NEONvrev32 (Ty QPR:$Vm))))]>;
def VREV32d8 : VREV32D<0b00, "vrev32", "8", v8i8>;
def VREV32d16 : VREV32D<0b01, "vrev32", "16", v4i16>;
@@ -3632,46 +4535,91 @@ def VREV32q16 : VREV32Q<0b01, "vrev32", "16", v8i16>;
// VREV16 : Vector Reverse elements within 16-bit halfwords
class VREV16D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 0, 0, (outs DPR:$dst),
- (ins DPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (Ty (NEONvrev16 (Ty DPR:$src))))]>;
+ : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 0, 0, (outs DPR:$Vd),
+ (ins DPR:$Vm), IIC_VMOVD,
+ OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set DPR:$Vd, (Ty (NEONvrev16 (Ty DPR:$Vm))))]>;
class VREV16Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 1, 0, (outs QPR:$dst),
- (ins QPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (Ty (NEONvrev16 (Ty QPR:$src))))]>;
+ : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 1, 0, (outs QPR:$Vd),
+ (ins QPR:$Vm), IIC_VMOVQ,
+ OpcodeStr, Dt, "$Vd, $Vm", "",
+ [(set QPR:$Vd, (Ty (NEONvrev16 (Ty QPR:$Vm))))]>;
def VREV16d8 : VREV16D<0b00, "vrev16", "8", v8i8>;
def VREV16q8 : VREV16Q<0b00, "vrev16", "8", v16i8>;
// Other Vector Shuffles.
+// Aligned extractions: really just dropping registers
+
+class AlignedVEXTq<ValueType DestTy, ValueType SrcTy, SDNodeXForm LaneCVT>
+ : Pat<(DestTy (vector_extract_subvec (SrcTy QPR:$src), (i32 imm:$start))),
+ (EXTRACT_SUBREG (SrcTy QPR:$src), (LaneCVT imm:$start))>;
+
+def : AlignedVEXTq<v8i8, v16i8, DSubReg_i8_reg>;
+
+def : AlignedVEXTq<v4i16, v8i16, DSubReg_i16_reg>;
+
+def : AlignedVEXTq<v2i32, v4i32, DSubReg_i32_reg>;
+
+def : AlignedVEXTq<v1i64, v2i64, DSubReg_f64_reg>;
+
+def : AlignedVEXTq<v2f32, v4f32, DSubReg_i32_reg>;
+
+
// VEXT : Vector Extract
class VEXTd<string OpcodeStr, string Dt, ValueType Ty>
- : N3V<0,1,0b11,{?,?,?,?},0,0, (outs DPR:$dst),
- (ins DPR:$lhs, DPR:$rhs, i32imm:$index), NVExtFrm,
- IIC_VEXTD, OpcodeStr, Dt, "$dst, $lhs, $rhs, $index", "",
- [(set DPR:$dst, (Ty (NEONvext (Ty DPR:$lhs),
- (Ty DPR:$rhs), imm:$index)))]>;
+ : N3V<0,1,0b11,{?,?,?,?},0,0, (outs DPR:$Vd),
+ (ins DPR:$Vn, DPR:$Vm, i32imm:$index), NVExtFrm,
+ IIC_VEXTD, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
+ [(set DPR:$Vd, (Ty (NEONvext (Ty DPR:$Vn),
+ (Ty DPR:$Vm), imm:$index)))]> {
+ bits<4> index;
+ let Inst{11-8} = index{3-0};
+}
class VEXTq<string OpcodeStr, string Dt, ValueType Ty>
- : N3V<0,1,0b11,{?,?,?,?},1,0, (outs QPR:$dst),
- (ins QPR:$lhs, QPR:$rhs, i32imm:$index), NVExtFrm,
- IIC_VEXTQ, OpcodeStr, Dt, "$dst, $lhs, $rhs, $index", "",
- [(set QPR:$dst, (Ty (NEONvext (Ty QPR:$lhs),
- (Ty QPR:$rhs), imm:$index)))]>;
-
-def VEXTd8 : VEXTd<"vext", "8", v8i8>;
-def VEXTd16 : VEXTd<"vext", "16", v4i16>;
-def VEXTd32 : VEXTd<"vext", "32", v2i32>;
-def VEXTdf : VEXTd<"vext", "32", v2f32>;
-
-def VEXTq8 : VEXTq<"vext", "8", v16i8>;
-def VEXTq16 : VEXTq<"vext", "16", v8i16>;
-def VEXTq32 : VEXTq<"vext", "32", v4i32>;
-def VEXTqf : VEXTq<"vext", "32", v4f32>;
+ : N3V<0,1,0b11,{?,?,?,?},1,0, (outs QPR:$Vd),
+ (ins QPR:$Vn, QPR:$Vm, i32imm:$index), NVExtFrm,
+ IIC_VEXTQ, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
+ [(set QPR:$Vd, (Ty (NEONvext (Ty QPR:$Vn),
+ (Ty QPR:$Vm), imm:$index)))]> {
+ bits<4> index;
+ let Inst{11-8} = index{3-0};
+}
+
+def VEXTd8 : VEXTd<"vext", "8", v8i8> {
+ let Inst{11-8} = index{3-0};
+}
+def VEXTd16 : VEXTd<"vext", "16", v4i16> {
+ let Inst{11-9} = index{2-0};
+ let Inst{8} = 0b0;
+}
+def VEXTd32 : VEXTd<"vext", "32", v2i32> {
+ let Inst{11-10} = index{1-0};
+ let Inst{9-8} = 0b00;
+}
+def VEXTdf : VEXTd<"vext", "32", v2f32> {
+ let Inst{11} = index{0};
+ let Inst{10-8} = 0b000;
+}
+
+def VEXTq8 : VEXTq<"vext", "8", v16i8> {
+ let Inst{11-8} = index{3-0};
+}
+def VEXTq16 : VEXTq<"vext", "16", v8i16> {
+ let Inst{11-9} = index{2-0};
+ let Inst{8} = 0b0;
+}
+def VEXTq32 : VEXTq<"vext", "32", v4i32> {
+ let Inst{11-10} = index{1-0};
+ let Inst{9-8} = 0b00;
+}
+def VEXTqf : VEXTq<"vext", "32", v4f32> {
+ let Inst{11} = index{0};
+ let Inst{10-8} = 0b000;
+}
// VTRN : Vector Transpose
@@ -3707,160 +4655,120 @@ def VZIPq32 : N2VQShuffle<0b10, 0b00011, IIC_VPERMQ3, "vzip", "32">;
// VTBL : Vector Table Lookup
def VTBL1
- : N3V<1,1,0b11,0b1000,0,0, (outs DPR:$dst),
- (ins DPR:$tbl1, DPR:$src), NVTBLFrm, IIC_VTB1,
- "vtbl", "8", "$dst, \\{$tbl1\\}, $src", "",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl1 DPR:$tbl1, DPR:$src)))]>;
+ : N3V<1,1,0b11,0b1000,0,0, (outs DPR:$Vd),
+ (ins DPR:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB1,
+ "vtbl", "8", "$Vd, \\{$Vn\\}, $Vm", "",
+ [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbl1 DPR:$Vn, DPR:$Vm)))]>;
let hasExtraSrcRegAllocReq = 1 in {
def VTBL2
- : N3V<1,1,0b11,0b1001,0,0, (outs DPR:$dst),
- (ins DPR:$tbl1, DPR:$tbl2, DPR:$src), NVTBLFrm, IIC_VTB2,
- "vtbl", "8", "$dst, \\{$tbl1, $tbl2\\}, $src", "", []>;
+ : N3V<1,1,0b11,0b1001,0,0, (outs DPR:$Vd),
+ (ins DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTB2,
+ "vtbl", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "", []>;
def VTBL3
- : N3V<1,1,0b11,0b1010,0,0, (outs DPR:$dst),
- (ins DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src), NVTBLFrm, IIC_VTB3,
- "vtbl", "8", "$dst, \\{$tbl1, $tbl2, $tbl3\\}, $src", "", []>;
+ : N3V<1,1,0b11,0b1010,0,0, (outs DPR:$Vd),
+ (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm), NVTBLFrm, IIC_VTB3,
+ "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm", "", []>;
def VTBL4
- : N3V<1,1,0b11,0b1011,0,0, (outs DPR:$dst),
- (ins DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src),
+ : N3V<1,1,0b11,0b1011,0,0, (outs DPR:$Vd),
+ (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm),
NVTBLFrm, IIC_VTB4,
- "vtbl", "8", "$dst, \\{$tbl1, $tbl2, $tbl3, $tbl4\\}, $src", "", []>;
+ "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm", "", []>;
} // hasExtraSrcRegAllocReq = 1
+def VTBL2Pseudo
+ : PseudoNeonI<(outs DPR:$dst), (ins QPR:$tbl, DPR:$src), IIC_VTB2, "", []>;
+def VTBL3Pseudo
+ : PseudoNeonI<(outs DPR:$dst), (ins QQPR:$tbl, DPR:$src), IIC_VTB3, "", []>;
+def VTBL4Pseudo
+ : PseudoNeonI<(outs DPR:$dst), (ins QQPR:$tbl, DPR:$src), IIC_VTB4, "", []>;
+
// VTBX : Vector Table Extension
def VTBX1
- : N3V<1,1,0b11,0b1000,1,0, (outs DPR:$dst),
- (ins DPR:$orig, DPR:$tbl1, DPR:$src), NVTBLFrm, IIC_VTBX1,
- "vtbx", "8", "$dst, \\{$tbl1\\}, $src", "$orig = $dst",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx1
- DPR:$orig, DPR:$tbl1, DPR:$src)))]>;
+ : N3V<1,1,0b11,0b1000,1,0, (outs DPR:$Vd),
+ (ins DPR:$orig, DPR:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTBX1,
+ "vtbx", "8", "$Vd, \\{$Vn\\}, $Vm", "$orig = $Vd",
+ [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbx1
+ DPR:$orig, DPR:$Vn, DPR:$Vm)))]>;
let hasExtraSrcRegAllocReq = 1 in {
def VTBX2
- : N3V<1,1,0b11,0b1001,1,0, (outs DPR:$dst),
- (ins DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$src), NVTBLFrm, IIC_VTBX2,
- "vtbx", "8", "$dst, \\{$tbl1, $tbl2\\}, $src", "$orig = $dst", []>;
+ : N3V<1,1,0b11,0b1001,1,0, (outs DPR:$Vd),
+ (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTBX2,
+ "vtbx", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "$orig = $Vd", []>;
def VTBX3
- : N3V<1,1,0b11,0b1010,1,0, (outs DPR:$dst),
- (ins DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src),
+ : N3V<1,1,0b11,0b1010,1,0, (outs DPR:$Vd),
+ (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm),
NVTBLFrm, IIC_VTBX3,
- "vtbx", "8", "$dst, \\{$tbl1, $tbl2, $tbl3\\}, $src",
- "$orig = $dst", []>;
+ "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm",
+ "$orig = $Vd", []>;
def VTBX4
- : N3V<1,1,0b11,0b1011,1,0, (outs DPR:$dst), (ins DPR:$orig, DPR:$tbl1,
- DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src), NVTBLFrm, IIC_VTBX4,
- "vtbx", "8", "$dst, \\{$tbl1, $tbl2, $tbl3, $tbl4\\}, $src",
- "$orig = $dst", []>;
+ : N3V<1,1,0b11,0b1011,1,0, (outs DPR:$Vd), (ins DPR:$orig, DPR:$Vn,
+ DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm), NVTBLFrm, IIC_VTBX4,
+ "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm",
+ "$orig = $Vd", []>;
} // hasExtraSrcRegAllocReq = 1
+def VTBX2Pseudo
+ : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QPR:$tbl, DPR:$src),
+ IIC_VTBX2, "$orig = $dst", []>;
+def VTBX3Pseudo
+ : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QQPR:$tbl, DPR:$src),
+ IIC_VTBX3, "$orig = $dst", []>;
+def VTBX4Pseudo
+ : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QQPR:$tbl, DPR:$src),
+ IIC_VTBX4, "$orig = $dst", []>;
+
//===----------------------------------------------------------------------===//
// NEON instructions for single-precision FP math
//===----------------------------------------------------------------------===//
-class N2VSPat<SDNode OpNode, ValueType ResTy, ValueType OpTy, NeonI Inst>
- : NEONFPPat<(ResTy (OpNode SPR:$a)),
- (EXTRACT_SUBREG (OpTy (Inst (INSERT_SUBREG (OpTy (IMPLICIT_DEF)),
- SPR:$a, ssub_0))),
- ssub_0)>;
+class N2VSPat<SDNode OpNode, NeonI Inst>
+ : NEONFPPat<(f32 (OpNode SPR:$a)),
+ (EXTRACT_SUBREG
+ (v2f32 (COPY_TO_REGCLASS (Inst
+ (INSERT_SUBREG
+ (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
+ SPR:$a, ssub_0)), DPR_VFP2)), ssub_0)>;
class N3VSPat<SDNode OpNode, NeonI Inst>
: NEONFPPat<(f32 (OpNode SPR:$a, SPR:$b)),
- (EXTRACT_SUBREG (v2f32
- (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$a, ssub_0),
- (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$b, ssub_0))),
- ssub_0)>;
+ (EXTRACT_SUBREG
+ (v2f32 (COPY_TO_REGCLASS (Inst
+ (INSERT_SUBREG
+ (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
+ SPR:$a, ssub_0),
+ (INSERT_SUBREG
+ (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
+ SPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
class N3VSMulOpPat<SDNode MulNode, SDNode OpNode, NeonI Inst>
: NEONFPPat<(f32 (OpNode SPR:$acc, (f32 (MulNode SPR:$a, SPR:$b)))),
- (EXTRACT_SUBREG (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$acc, ssub_0),
- (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$a, ssub_0),
- (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$b, ssub_0)),
- ssub_0)>;
-
-// These need separate instructions because they must use DPR_VFP2 register
-// class which have SPR sub-registers.
-
-// Vector Add Operations used for single-precision FP
-let neverHasSideEffects = 1 in
-def VADDfd_sfp : N3VS<0,0,0b00,0b1101,0, "vadd", "f32", v2f32, v2f32, fadd, 1>;
-def : N3VSPat<fadd, VADDfd_sfp>;
-
-// Vector Sub Operations used for single-precision FP
-let neverHasSideEffects = 1 in
-def VSUBfd_sfp : N3VS<0,0,0b10,0b1101,0, "vsub", "f32", v2f32, v2f32, fsub, 0>;
-def : N3VSPat<fsub, VSUBfd_sfp>;
-
-// Vector Multiply Operations used for single-precision FP
-let neverHasSideEffects = 1 in
-def VMULfd_sfp : N3VS<1,0,0b00,0b1101,1, "vmul", "f32", v2f32, v2f32, fmul, 1>;
-def : N3VSPat<fmul, VMULfd_sfp>;
-
-// Vector Multiply-Accumulate/Subtract used for single-precision FP
-// vml[as].f32 can cause 4-8 cycle stalls in following ASIMD instructions, so
-// we want to avoid them for now. e.g., alternating vmla/vadd instructions.
-
-//let neverHasSideEffects = 1 in
-//def VMLAfd_sfp : N3VSMulOp<0,0,0b00,0b1101,1, IIC_VMACD, "vmla", "f32",
-// v2f32, fmul, fadd>;
-//def : N3VSMulOpPat<fmul, fadd, VMLAfd_sfp>;
-
-//let neverHasSideEffects = 1 in
-//def VMLSfd_sfp : N3VSMulOp<0,0,0b10,0b1101,1, IIC_VMACD, "vmls", "f32",
-// v2f32, fmul, fsub>;
-//def : N3VSMulOpPat<fmul, fsub, VMLSfd_sfp>;
-
-// Vector Absolute used for single-precision FP
-let neverHasSideEffects = 1 in
-def VABSfd_sfp : N2V<0b11, 0b11, 0b10, 0b01, 0b01110, 0, 0,
- (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src), IIC_VUNAD,
- "vabs", "f32", "$dst, $src", "", []>;
-def : N2VSPat<fabs, f32, v2f32, VABSfd_sfp>;
-
-// Vector Negate used for single-precision FP
-let neverHasSideEffects = 1 in
-def VNEGfd_sfp : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
- (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src), IIC_VUNAD,
- "vneg", "f32", "$dst, $src", "", []>;
-def : N2VSPat<fneg, f32, v2f32, VNEGfd_sfp>;
-
-// Vector Maximum used for single-precision FP
-let neverHasSideEffects = 1 in
-def VMAXfd_sfp : N3V<0, 0, 0b00, 0b1111, 0, 0, (outs DPR_VFP2:$dst),
- (ins DPR_VFP2:$src1, DPR_VFP2:$src2), N3RegFrm, IIC_VBIND,
- "vmax", "f32", "$dst, $src1, $src2", "", []>;
-def : N3VSPat<NEONfmax, VMAXfd_sfp>;
-
-// Vector Minimum used for single-precision FP
-let neverHasSideEffects = 1 in
-def VMINfd_sfp : N3V<0, 0, 0b00, 0b1111, 0, 0, (outs DPR_VFP2:$dst),
- (ins DPR_VFP2:$src1, DPR_VFP2:$src2), N3RegFrm, IIC_VBIND,
- "vmin", "f32", "$dst, $src1, $src2", "", []>;
-def : N3VSPat<NEONfmin, VMINfd_sfp>;
-
-// Vector Convert between single-precision FP and integer
-let neverHasSideEffects = 1 in
-def VCVTf2sd_sfp : N2VS<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
- v2i32, v2f32, fp_to_sint>;
-def : N2VSPat<arm_ftosi, f32, v2f32, VCVTf2sd_sfp>;
-
-let neverHasSideEffects = 1 in
-def VCVTf2ud_sfp : N2VS<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
- v2i32, v2f32, fp_to_uint>;
-def : N2VSPat<arm_ftoui, f32, v2f32, VCVTf2ud_sfp>;
-
-let neverHasSideEffects = 1 in
-def VCVTs2fd_sfp : N2VS<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
- v2f32, v2i32, sint_to_fp>;
-def : N2VSPat<arm_sitof, f32, v2i32, VCVTs2fd_sfp>;
-
-let neverHasSideEffects = 1 in
-def VCVTu2fd_sfp : N2VS<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
- v2f32, v2i32, uint_to_fp>;
-def : N2VSPat<arm_uitof, f32, v2i32, VCVTu2fd_sfp>;
+ (EXTRACT_SUBREG
+ (v2f32 (COPY_TO_REGCLASS (Inst
+ (INSERT_SUBREG
+ (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
+ SPR:$acc, ssub_0),
+ (INSERT_SUBREG
+ (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
+ SPR:$a, ssub_0),
+ (INSERT_SUBREG
+ (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
+ SPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
+
+def : N3VSPat<fadd, VADDfd>;
+def : N3VSPat<fsub, VSUBfd>;
+def : N3VSPat<fmul, VMULfd>;
+def : N3VSMulOpPat<fmul, fadd, VMLAfd>,
+ Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
+def : N3VSMulOpPat<fmul, fsub, VMLSfd>,
+ Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
+def : N2VSPat<fabs, VABSfd>;
+def : N2VSPat<fneg, VNEGfd>;
+def : N3VSPat<NEONfmax, VMAXfd>;
+def : N3VSPat<NEONfmin, VMINfd>;
+def : N2VSPat<arm_ftosi, VCVTf2sd>;
+def : N2VSPat<arm_ftoui, VCVTf2ud>;
+def : N2VSPat<arm_sitof, VCVTs2fd>;
+def : N2VSPat<arm_uitof, VCVTu2fd>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
index a13ff12..826ef46 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
@@ -1,4 +1,4 @@
-//===- ARMInstrThumb.td - Thumb support for ARM ---------------------------===//
+//===- ARMInstrThumb.td - Thumb support for ARM ------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,7 +16,7 @@
//
def ARMtcall : SDNode<"ARMISD::tCALL", SDT_ARMcall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def imm_neg_XFORM : SDNodeXForm<imm, [{
@@ -26,7 +26,6 @@ def imm_comp_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
}]>;
-
/// imm0_7 predicate - True if the 32-bit immediate is in the range [0,7].
def imm0_7 : PatLeaf<(i32 imm), [{
return (uint32_t)N->getZExtValue() < 8;
@@ -50,9 +49,9 @@ def imm8_255_neg : PatLeaf<(i32 imm), [{
return Val >= 8 && Val < 256;
}], imm_neg_XFORM>;
-// Break imm's up into two pieces: an immediate + a left shift.
-// This uses thumb_immshifted to match and thumb_immshifted_val and
-// thumb_immshifted_shamt to get the val/shift pieces.
+// Break imm's up into two pieces: an immediate + a left shift. This uses
+// thumb_immshifted to match and thumb_immshifted_val and thumb_immshifted_shamt
+// to get the val/shift pieces.
def thumb_immshifted : PatLeaf<(imm), [{
return ARM_AM::isThumbImmShiftedVal((unsigned)N->getZExtValue());
}]>;
@@ -67,6 +66,11 @@ def thumb_immshifted_shamt : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(V, MVT::i32);
}]>;
+// ADR instruction labels.
+def t_adrlabel : Operand<i32> {
+ let EncoderMethod = "getThumbAdrLabelOpValue";
+}
+
// Scaled 4 immediate.
def t_imm_s4 : Operand<i32> {
let PrintMethod = "printThumbS4ImmOperand";
@@ -74,47 +78,114 @@ def t_imm_s4 : Operand<i32> {
// Define Thumb specific addressing modes.
+def t_brtarget : Operand<OtherVT> {
+ let EncoderMethod = "getThumbBRTargetOpValue";
+}
+
+def t_bcctarget : Operand<i32> {
+ let EncoderMethod = "getThumbBCCTargetOpValue";
+}
+
+def t_cbtarget : Operand<i32> {
+ let EncoderMethod = "getThumbCBTargetOpValue";
+}
+
+def t_bltarget : Operand<i32> {
+ let EncoderMethod = "getThumbBLTargetOpValue";
+}
+
+def t_blxtarget : Operand<i32> {
+ let EncoderMethod = "getThumbBLXTargetOpValue";
+}
+
+def MemModeRegThumbAsmOperand : AsmOperandClass {
+ let Name = "MemModeRegThumb";
+ let SuperClasses = [];
+}
+
+def MemModeImmThumbAsmOperand : AsmOperandClass {
+ let Name = "MemModeImmThumb";
+ let SuperClasses = [];
+}
+
// t_addrmode_rr := reg + reg
//
def t_addrmode_rr : Operand<i32>,
ComplexPattern<i32, 2, "SelectThumbAddrModeRR", []> {
+ let EncoderMethod = "getThumbAddrModeRegRegOpValue";
let PrintMethod = "printThumbAddrModeRROperand";
let MIOperandInfo = (ops tGPR:$base, tGPR:$offsreg);
}
-// t_addrmode_s4 := reg + reg
-// reg + imm5 * 4
+// t_addrmode_rrs := reg + reg
//
-def t_addrmode_s4 : Operand<i32>,
- ComplexPattern<i32, 3, "SelectThumbAddrModeS4", []> {
- let PrintMethod = "printThumbAddrModeS4Operand";
- let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm, tGPR:$offsreg);
+def t_addrmode_rrs1 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectThumbAddrModeRI5S1", []> {
+ let EncoderMethod = "getThumbAddrModeRegRegOpValue";
+ let PrintMethod = "printThumbAddrModeRROperand";
+ let MIOperandInfo = (ops tGPR:$base, tGPR:$offsreg);
+ let ParserMatchClass = MemModeRegThumbAsmOperand;
+}
+def t_addrmode_rrs2 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectThumbAddrModeRI5S2", []> {
+ let EncoderMethod = "getThumbAddrModeRegRegOpValue";
+ let PrintMethod = "printThumbAddrModeRROperand";
+ let MIOperandInfo = (ops tGPR:$base, tGPR:$offsreg);
+ let ParserMatchClass = MemModeRegThumbAsmOperand;
+}
+def t_addrmode_rrs4 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectThumbAddrModeRI5S4", []> {
+ let EncoderMethod = "getThumbAddrModeRegRegOpValue";
+ let PrintMethod = "printThumbAddrModeRROperand";
+ let MIOperandInfo = (ops tGPR:$base, tGPR:$offsreg);
+ let ParserMatchClass = MemModeRegThumbAsmOperand;
+}
+
+// t_addrmode_is4 := reg + imm5 * 4
+//
+def t_addrmode_is4 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectThumbAddrModeImm5S4", []> {
+ let EncoderMethod = "getAddrModeISOpValue";
+ let PrintMethod = "printThumbAddrModeImm5S4Operand";
+ let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm);
+ let ParserMatchClass = MemModeImmThumbAsmOperand;
}
-// t_addrmode_s2 := reg + reg
-// reg + imm5 * 2
+// t_addrmode_is2 := reg + imm5 * 2
//
-def t_addrmode_s2 : Operand<i32>,
- ComplexPattern<i32, 3, "SelectThumbAddrModeS2", []> {
- let PrintMethod = "printThumbAddrModeS2Operand";
- let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm, tGPR:$offsreg);
+def t_addrmode_is2 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectThumbAddrModeImm5S2", []> {
+ let EncoderMethod = "getAddrModeISOpValue";
+ let PrintMethod = "printThumbAddrModeImm5S2Operand";
+ let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm);
+ let ParserMatchClass = MemModeImmThumbAsmOperand;
}
-// t_addrmode_s1 := reg + reg
-// reg + imm5
+// t_addrmode_is1 := reg + imm5
//
-def t_addrmode_s1 : Operand<i32>,
- ComplexPattern<i32, 3, "SelectThumbAddrModeS1", []> {
- let PrintMethod = "printThumbAddrModeS1Operand";
- let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm, tGPR:$offsreg);
+def t_addrmode_is1 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectThumbAddrModeImm5S1", []> {
+ let EncoderMethod = "getAddrModeISOpValue";
+ let PrintMethod = "printThumbAddrModeImm5S1Operand";
+ let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm);
+ let ParserMatchClass = MemModeImmThumbAsmOperand;
}
// t_addrmode_sp := sp + imm8 * 4
//
def t_addrmode_sp : Operand<i32>,
ComplexPattern<i32, 2, "SelectThumbAddrModeSP", []> {
+ let EncoderMethod = "getAddrModeThumbSPOpValue";
let PrintMethod = "printThumbAddrModeSPOperand";
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
+ let ParserMatchClass = MemModeImmThumbAsmOperand;
+}
+
+// t_addrmode_pc := <label> => pc + imm8 * 4
+//
+def t_addrmode_pc : Operand<i32> {
+ let EncoderMethod = "getAddrModePCOpValue";
+ let ParserMatchClass = MemModeImmThumbAsmOperand;
}
//===----------------------------------------------------------------------===//
@@ -126,132 +197,162 @@ def t_addrmode_sp : Operand<i32>,
// these will always be in pairs, and asserts if it finds otherwise. Better way?
let Defs = [SP], Uses = [SP], hasSideEffects = 1 in {
def tADJCALLSTACKUP :
-PseudoInst<(outs), (ins i32imm:$amt1, i32imm:$amt2), NoItinerary,
- "${:comment} tADJCALLSTACKUP $amt1",
- [(ARMcallseq_end imm:$amt1, imm:$amt2)]>, Requires<[IsThumb1Only]>;
+ PseudoInst<(outs), (ins i32imm:$amt1, i32imm:$amt2), NoItinerary,
+ [(ARMcallseq_end imm:$amt1, imm:$amt2)]>,
+ Requires<[IsThumb, IsThumb1Only]>;
def tADJCALLSTACKDOWN :
-PseudoInst<(outs), (ins i32imm:$amt), NoItinerary,
- "${:comment} tADJCALLSTACKDOWN $amt",
- [(ARMcallseq_start imm:$amt)]>, Requires<[IsThumb1Only]>;
+ PseudoInst<(outs), (ins i32imm:$amt), NoItinerary,
+ [(ARMcallseq_start imm:$amt)]>,
+ Requires<[IsThumb, IsThumb1Only]>;
+}
+
+// T1Disassembly - A simple class to make encoding some disassembly patterns
+// easier and less verbose.
+class T1Disassembly<bits<2> op1, bits<8> op2>
+ : T1Encoding<0b101111> {
+ let Inst{9-8} = op1;
+ let Inst{7-0} = op2;
}
def tNOP : T1pI<(outs), (ins), NoItinerary, "nop", "",
[/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b00000000;
-}
+ T1Disassembly<0b11, 0x00>; // A8.6.110
def tYIELD : T1pI<(outs), (ins), NoItinerary, "yield", "",
[/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b00010000;
-}
+ T1Disassembly<0b11, 0x10>; // A8.6.410
def tWFE : T1pI<(outs), (ins), NoItinerary, "wfe", "",
[/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b00100000;
-}
+ T1Disassembly<0b11, 0x20>; // A8.6.408
def tWFI : T1pI<(outs), (ins), NoItinerary, "wfi", "",
[/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b00110000;
-}
+ T1Disassembly<0b11, 0x30>; // A8.6.409
def tSEV : T1pI<(outs), (ins), NoItinerary, "sev", "",
[/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b01000000;
-}
+ T1Disassembly<0b11, 0x40>; // A8.6.157
+
+// The i32imm operand $val can be used by a debugger to store more information
+// about the breakpoint.
+def tBKPT : T1I<(outs), (ins i32imm:$val), NoItinerary, "bkpt\t$val",
+ [/* For disassembly only; pattern left blank */]>,
+ T1Disassembly<0b10, {?,?,?,?,?,?,?,?}> {
+ // A8.6.22
+ bits<8> val;
+ let Inst{7-0} = val;
+}
def tSETENDBE : T1I<(outs), (ins), NoItinerary, "setend\tbe",
[/* For disassembly only; pattern left blank */]>,
T1Encoding<0b101101> {
+ // A8.6.156
let Inst{9-5} = 0b10010;
- let Inst{3} = 1;
+ let Inst{4} = 1;
+ let Inst{3} = 1; // Big-Endian
+ let Inst{2-0} = 0b000;
}
def tSETENDLE : T1I<(outs), (ins), NoItinerary, "setend\tle",
[/* For disassembly only; pattern left blank */]>,
T1Encoding<0b101101> {
+ // A8.6.156
let Inst{9-5} = 0b10010;
- let Inst{3} = 0;
+ let Inst{4} = 1;
+ let Inst{3} = 0; // Little-Endian
+ let Inst{2-0} = 0b000;
}
-// The i32imm operand $val can be used by a debugger to store more information
-// about the breakpoint.
-def tBKPT : T1I<(outs), (ins i32imm:$val), NoItinerary, "bkpt\t$val",
+// Change Processor State is a system instruction -- for disassembly only.
+def tCPS : T1I<(outs), (ins imod_op:$imod, iflags_op:$iflags),
+ NoItinerary, "cps$imod $iflags",
[/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b10;
+ T1Misc<0b0110011> {
+ // A8.6.38 & B6.1.1
+ bit imod;
+ bits<3> iflags;
+
+ let Inst{4} = imod;
+ let Inst{3} = 0;
+ let Inst{2-0} = iflags;
}
-// Change Processor State is a system instruction -- for disassembly only.
-// The singleton $opt operand contains the following information:
-// opt{4-0} = mode ==> don't care
-// opt{5} = changemode ==> 0 (false for 16-bit Thumb instr)
-// opt{8-6} = AIF from Inst{2-0}
-// opt{10-9} = 1:imod from Inst{4} with 0b10 as enable and 0b11 as disable
-//
-// The opt{4-0} and opt{5} sub-fields are to accommodate 32-bit Thumb and ARM
-// CPS which has more options.
-def tCPS : T1I<(outs), (ins cps_opt:$opt), NoItinerary, "cps$opt",
- [/* For disassembly only; pattern left blank */]>,
- T1Misc<0b0110011>;
-
// For both thumb1 and thumb2.
-let isNotDuplicable = 1 in
-def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp), IIC_iALUr,
- "\n$cp:\n\tadd\t$dst, pc",
- [(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>,
+let isNotDuplicable = 1, isCodeGenOnly = 1 in
+def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp), IIC_iALUr, "",
+ [(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>,
T1Special<{0,0,?,?}> {
- let Inst{6-3} = 0b1111; // A8.6.6 Rm = pc
+ // A8.6.6
+ bits<3> dst;
+ let Inst{6-3} = 0b1111; // Rm = pc
+ let Inst{2-0} = dst;
}
-// PC relative add.
+// PC relative add (ADR).
def tADDrPCi : T1I<(outs tGPR:$dst), (ins t_imm_s4:$rhs), IIC_iALUi,
- "add\t$dst, pc, $rhs", []>,
- T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
+ "add\t$dst, pc, $rhs", []>,
+ T1Encoding<{1,0,1,0,0,?}> {
+ // A6.2 & A8.6.10
+ bits<3> dst;
+ bits<8> rhs;
+ let Inst{10-8} = dst;
+ let Inst{7-0} = rhs;
+}
-// ADD rd, sp, #imm8
+// ADD <Rd>, sp, #<imm8>
// This is rematerializable, which is particularly useful for taking the
// address of locals.
-let isReMaterializable = 1 in {
+let isReMaterializable = 1 in
def tADDrSPi : T1I<(outs tGPR:$dst), (ins GPR:$sp, t_imm_s4:$rhs), IIC_iALUi,
- "add\t$dst, $sp, $rhs", []>,
- T1Encoding<{1,0,1,0,1,?}>; // A6.2 & A8.6.8
+ "add\t$dst, $sp, $rhs", []>,
+ T1Encoding<{1,0,1,0,1,?}> {
+ // A6.2 & A8.6.8
+ bits<3> dst;
+ bits<8> rhs;
+ let Inst{10-8} = dst;
+ let Inst{7-0} = rhs;
}
-// ADD sp, sp, #imm7
+// ADD sp, sp, #<imm7>
def tADDspi : TIt<(outs GPR:$dst), (ins GPR:$lhs, t_imm_s4:$rhs), IIC_iALUi,
"add\t$dst, $rhs", []>,
- T1Misc<{0,0,0,0,0,?,?}>; // A6.2.5 & A8.6.8
+ T1Misc<{0,0,0,0,0,?,?}> {
+ // A6.2.5 & A8.6.8
+ bits<7> rhs;
+ let Inst{6-0} = rhs;
+}
-// SUB sp, sp, #imm7
+// SUB sp, sp, #<imm7>
+// FIXME: The encoding and the ASM string don't match up.
def tSUBspi : TIt<(outs GPR:$dst), (ins GPR:$lhs, t_imm_s4:$rhs), IIC_iALUi,
"sub\t$dst, $rhs", []>,
- T1Misc<{0,0,0,0,1,?,?}>; // A6.2.5 & A8.6.215
+ T1Misc<{0,0,0,0,1,?,?}> {
+ // A6.2.5 & A8.6.214
+ bits<7> rhs;
+ let Inst{6-0} = rhs;
+}
-// ADD rm, sp
+// ADD <Rm>, sp
def tADDrSP : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
"add\t$dst, $rhs", []>,
T1Special<{0,0,?,?}> {
- let Inst{6-3} = 0b1101; // A8.6.9 Encoding T1
+ // A8.6.9 Encoding T1
+ bits<4> dst;
+ let Inst{7} = dst{3};
+ let Inst{6-3} = 0b1101;
+ let Inst{2-0} = dst{2-0};
}
-// ADD sp, rm
+// ADD sp, <Rm>
def tADDspr : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
"add\t$dst, $rhs", []>,
T1Special<{0,0,?,?}> {
// A8.6.9 Encoding T2
+ bits<4> dst;
let Inst{7} = 1;
+ let Inst{6-3} = dst;
let Inst{2-0} = 0b101;
}
@@ -260,21 +361,37 @@ def tADDspr : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
//
let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
- def tBX_RET : TI<(outs), (ins), IIC_Br, "bx\tlr", [(ARMretflag)]>,
- T1Special<{1,1,0,?}> { // A6.2.3 & A8.6.25
+ def tBX_RET : TI<(outs), (ins), IIC_Br, "bx\tlr",
+ [(ARMretflag)]>,
+ T1Special<{1,1,0,?}> {
+ // A6.2.3 & A8.6.25
let Inst{6-3} = 0b1110; // Rm = lr
+ let Inst{2-0} = 0b000;
}
+
// Alternative return instruction used by vararg functions.
- def tBX_RET_vararg : TI<(outs), (ins tGPR:$target), IIC_Br, "bx\t$target",[]>,
- T1Special<{1,1,0,?}>; // A6.2.3 & A8.6.25
+ def tBX_RET_vararg : TI<(outs), (ins tGPR:$Rm),
+ IIC_Br, "bx\t$Rm",
+ []>,
+ T1Special<{1,1,0,?}> {
+ // A6.2.3 & A8.6.25
+ bits<4> Rm;
+ let Inst{6-3} = Rm;
+ let Inst{2-0} = 0b000;
+ }
}
// Indirect branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
- def tBRIND : TI<(outs), (ins GPR:$dst), IIC_Br, "mov\tpc, $dst",
- [(brind GPR:$dst)]>,
- T1Special<{1,0,1,?}> {
- // <Rd> = Inst{7:2-0} = pc
+ def tBRIND : TI<(outs), (ins GPR:$Rm),
+ IIC_Br,
+ "mov\tpc, $Rm",
+ [(brind GPR:$Rm)]>,
+ T1Special<{1,0,?,?}> {
+ // A8.6.97
+ bits<4> Rm;
+ let Inst{7} = 1; // <Rd> = Inst{7:2-0} = pc
+ let Inst{6-3} = Rm;
let Inst{2-0} = 0b111;
}
}
@@ -282,28 +399,52 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
// FIXME: remove when we have a way to marking a MI with these properties.
let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
hasExtraDefRegAllocReq = 1 in
-def tPOP_RET : T1I<(outs), (ins pred:$p, reglist:$dsts, variable_ops), IIC_Br,
- "pop${p}\t$dsts", []>,
- T1Misc<{1,1,0,?,?,?,?}>;
+def tPOP_RET : T1I<(outs), (ins pred:$p, reglist:$regs, variable_ops),
+ IIC_iPop_Br,
+ "pop${p}\t$regs", []>,
+ T1Misc<{1,1,0,?,?,?,?}> {
+ // A8.6.121
+ bits<16> regs;
+ let Inst{8} = regs{15}; // registers = P:'0000000':register_list
+ let Inst{7-0} = regs{7-0};
+}
+// All calls clobber the non-callee saved registers. SP is marked as a use to
+// prevent stack-pointer assignments that appear immediately before calls from
+// potentially appearing dead.
let isCall = 1,
+ // On non-Darwin platforms R9 is callee-saved.
Defs = [R0, R1, R2, R3, R12, LR,
D0, D1, D2, D3, D4, D5, D6, D7,
D16, D17, D18, D19, D20, D21, D22, D23,
- D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in {
+ D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR],
+ Uses = [SP] in {
// Also used for Thumb2
def tBL : TIx2<0b11110, 0b11, 1,
- (outs), (ins i32imm:$func, variable_ops), IIC_Br,
- "bl\t${func:call}",
+ (outs), (ins t_bltarget:$func, variable_ops), IIC_Br,
+ "bl\t$func",
[(ARMtcall tglobaladdr:$func)]>,
- Requires<[IsThumb, IsNotDarwin]>;
+ Requires<[IsThumb, IsNotDarwin]> {
+ bits<21> func;
+ let Inst{25-16} = func{20-11};
+ let Inst{13} = 1;
+ let Inst{11} = 1;
+ let Inst{10-0} = func{10-0};
+ }
// ARMv5T and above, also used for Thumb2
def tBLXi : TIx2<0b11110, 0b11, 0,
- (outs), (ins i32imm:$func, variable_ops), IIC_Br,
- "blx\t${func:call}",
+ (outs), (ins t_blxtarget:$func, variable_ops), IIC_Br,
+ "blx\t$func",
[(ARMcall tglobaladdr:$func)]>,
- Requires<[IsThumb, HasV5T, IsNotDarwin]>;
+ Requires<[IsThumb, HasV5T, IsNotDarwin]> {
+ bits<21> func;
+ let Inst{25-16} = func{20-11};
+ let Inst{13} = 1;
+ let Inst{11} = 1;
+ let Inst{10-1} = func{10-1};
+ let Inst{0} = 0; // func{0} is assumed zero
+ }
// Also used for Thumb2
def tBLXr : TI<(outs), (ins GPR:$func, variable_ops), IIC_Br,
@@ -313,642 +454,1002 @@ let isCall = 1,
T1Special<{1,1,1,?}>; // A6.2.3 & A8.6.24;
// ARMv4T
+ // FIXME: Should be a pseudo.
+ let isCodeGenOnly = 1 in
def tBX : TIx2<{?,?,?,?,?}, {?,?}, ?,
(outs), (ins tGPR:$func, variable_ops), IIC_Br,
"mov\tlr, pc\n\tbx\t$func",
[(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsThumb1Only, IsNotDarwin]>;
+ Requires<[IsThumb, IsThumb1Only, IsNotDarwin]>;
}
-// On Darwin R9 is call-clobbered.
let isCall = 1,
+ // On Darwin R9 is call-clobbered.
+ // R7 is marked as a use to prevent frame-pointer assignments from being
+ // moved above / below calls.
Defs = [R0, R1, R2, R3, R9, R12, LR,
D0, D1, D2, D3, D4, D5, D6, D7,
D16, D17, D18, D19, D20, D21, D22, D23,
- D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in {
+ D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR],
+ Uses = [R7, SP] in {
// Also used for Thumb2
def tBLr9 : TIx2<0b11110, 0b11, 1,
- (outs), (ins i32imm:$func, variable_ops), IIC_Br,
- "bl\t${func:call}",
+ (outs), (ins pred:$p, t_bltarget:$func, variable_ops),
+ IIC_Br, "bl${p}\t$func",
[(ARMtcall tglobaladdr:$func)]>,
- Requires<[IsThumb, IsDarwin]>;
+ Requires<[IsThumb, IsDarwin]> {
+ bits<21> func;
+ let Inst{25-16} = func{20-11};
+ let Inst{13} = 1;
+ let Inst{11} = 1;
+ let Inst{10-0} = func{10-0};
+ }
// ARMv5T and above, also used for Thumb2
def tBLXi_r9 : TIx2<0b11110, 0b11, 0,
- (outs), (ins i32imm:$func, variable_ops), IIC_Br,
- "blx\t${func:call}",
+ (outs), (ins pred:$p, t_blxtarget:$func, variable_ops),
+ IIC_Br, "blx${p}\t$func",
[(ARMcall tglobaladdr:$func)]>,
- Requires<[IsThumb, HasV5T, IsDarwin]>;
+ Requires<[IsThumb, HasV5T, IsDarwin]> {
+ bits<21> func;
+ let Inst{25-16} = func{20-11};
+ let Inst{13} = 1;
+ let Inst{11} = 1;
+ let Inst{10-1} = func{10-1};
+ let Inst{0} = 0; // func{0} is assumed zero
+ }
// Also used for Thumb2
- def tBLXr_r9 : TI<(outs), (ins GPR:$func, variable_ops), IIC_Br,
- "blx\t$func",
+ def tBLXr_r9 : TI<(outs), (ins pred:$p, GPR:$func, variable_ops), IIC_Br,
+ "blx${p}\t$func",
[(ARMtcall GPR:$func)]>,
Requires<[IsThumb, HasV5T, IsDarwin]>,
- T1Special<{1,1,1,?}>; // A6.2.3 & A8.6.24
+ T1Special<{1,1,1,?}> {
+ // A6.2.3 & A8.6.24
+ bits<4> func;
+ let Inst{6-3} = func;
+ let Inst{2-0} = 0b000;
+ }
// ARMv4T
+ let isCodeGenOnly = 1 in
+ // FIXME: Should be a pseudo.
def tBXr9 : TIx2<{?,?,?,?,?}, {?,?}, ?,
(outs), (ins tGPR:$func, variable_ops), IIC_Br,
"mov\tlr, pc\n\tbx\t$func",
[(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsThumb1Only, IsDarwin]>;
+ Requires<[IsThumb, IsThumb1Only, IsDarwin]>;
}
-let isBranch = 1, isTerminator = 1 in {
- let isBarrier = 1 in {
- let isPredicable = 1 in
- def tB : T1I<(outs), (ins brtarget:$target), IIC_Br,
- "b\t$target", [(br bb:$target)]>,
- T1Encoding<{1,1,1,0,0,?}>;
+let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
+ let isPredicable = 1 in
+ def tB : T1I<(outs), (ins t_brtarget:$target), IIC_Br,
+ "b\t$target", [(br bb:$target)]>,
+ T1Encoding<{1,1,1,0,0,?}> {
+ bits<11> target;
+ let Inst{10-0} = target;
+ }
// Far jump
+ // Just a pseudo for a tBL instruction. Needed to let regalloc know about
+ // the clobber of LR.
let Defs = [LR] in
- def tBfar : TIx2<0b11110, 0b11, 1, (outs), (ins brtarget:$target), IIC_Br,
- "bl\t$target\t${:comment} far jump",[]>;
-
- def tBR_JTr : T1JTI<(outs),
- (ins tGPR:$target, jtblock_operand:$jt, i32imm:$id),
- IIC_Br, "mov\tpc, $target\n\t.align\t2$jt",
- [(ARMbrjt tGPR:$target, tjumptable:$jt, imm:$id)]>,
- Encoding16 {
- let Inst{15-7} = 0b010001101;
- let Inst{2-0} = 0b111;
- }
+ def tBfar : tPseudoInst<(outs), (ins t_bltarget:$target),
+ Size4Bytes, IIC_Br, []>;
+
+ def tBR_JTr : tPseudoInst<(outs),
+ (ins tGPR:$target, i32imm:$jt, i32imm:$id),
+ SizeSpecial, IIC_Br,
+ [(ARMbrjt tGPR:$target, tjumptable:$jt, imm:$id)]> {
+ list<Predicate> Predicates = [IsThumb, IsThumb1Only];
}
}
// FIXME: should be able to write a pattern for ARMBrcond, but can't use
// a two-value operand where a dag node expects two operands. :(
let isBranch = 1, isTerminator = 1 in
- def tBcc : T1I<(outs), (ins brtarget:$target, pred:$cc), IIC_Br,
- "b$cc\t$target",
+ def tBcc : T1I<(outs), (ins t_bcctarget:$target, pred:$p), IIC_Br,
+ "b${p}\t$target",
[/*(ARMbrcond bb:$target, imm:$cc)*/]>,
- T1Encoding<{1,1,0,1,?,?}>;
+ T1Encoding<{1,1,0,1,?,?}> {
+ bits<4> p;
+ bits<8> target;
+ let Inst{11-8} = p;
+ let Inst{7-0} = target;
+}
// Compare and branch on zero / non-zero
let isBranch = 1, isTerminator = 1 in {
- def tCBZ : T1I<(outs), (ins tGPR:$cmp, brtarget:$target), IIC_Br,
- "cbz\t$cmp, $target", []>,
- T1Misc<{0,0,?,1,?,?,?}>;
+ def tCBZ : T1I<(outs), (ins tGPR:$Rn, t_cbtarget:$target), IIC_Br,
+ "cbz\t$Rn, $target", []>,
+ T1Misc<{0,0,?,1,?,?,?}> {
+ // A8.6.27
+ bits<6> target;
+ bits<3> Rn;
+ let Inst{9} = target{5};
+ let Inst{7-3} = target{4-0};
+ let Inst{2-0} = Rn;
+ }
- def tCBNZ : T1I<(outs), (ins tGPR:$cmp, brtarget:$target), IIC_Br,
+ def tCBNZ : T1I<(outs), (ins tGPR:$cmp, t_cbtarget:$target), IIC_Br,
"cbnz\t$cmp, $target", []>,
- T1Misc<{1,0,?,1,?,?,?}>;
+ T1Misc<{1,0,?,1,?,?,?}> {
+ // A8.6.27
+ bits<6> target;
+ bits<3> Rn;
+ let Inst{9} = target{5};
+ let Inst{7-3} = target{4-0};
+ let Inst{2-0} = Rn;
+ }
}
// A8.6.218 Supervisor Call (Software Interrupt) -- for disassembly only
// A8.6.16 B: Encoding T1
// If Inst{11-8} == 0b1111 then SEE SVC
-let isCall = 1 in {
-def tSVC : T1pI<(outs), (ins i32imm:$svc), IIC_Br, "svc", "\t$svc", []>,
- Encoding16 {
+let isCall = 1, Uses = [SP] in
+def tSVC : T1pI<(outs), (ins i32imm:$imm), IIC_Br,
+ "svc", "\t$imm", []>, Encoding16 {
+ bits<8> imm;
let Inst{15-12} = 0b1101;
- let Inst{11-8} = 0b1111;
-}
+ let Inst{11-8} = 0b1111;
+ let Inst{7-0} = imm;
}
-// A8.6.16 B: Encoding T1
-// If Inst{11-8} == 0b1110 then UNDEFINED
-// FIXME: Temporary emitted as raw bytes until this pseudo-op will be added to
-// binutils
+// The assembler uses 0xDEFE for a trap instruction.
let isBarrier = 1, isTerminator = 1 in
def tTRAP : TI<(outs), (ins), IIC_Br,
- ".short 0xdefe ${:comment} trap", [(trap)]>, Encoding16 {
- let Inst{15-12} = 0b1101;
- let Inst{11-8} = 0b1110;
+ "trap", [(trap)]>, Encoding16 {
+ let Inst = 0xdefe;
}
//===----------------------------------------------------------------------===//
// Load Store Instructions.
//
+// Loads: reg/reg and reg/imm5
let canFoldAsLoad = 1, isReMaterializable = 1 in
-def tLDR : T1pI4<(outs tGPR:$dst), (ins t_addrmode_s4:$addr), IIC_iLoadr,
- "ldr", "\t$dst, $addr",
- [(set tGPR:$dst, (load t_addrmode_s4:$addr))]>,
- T1LdSt<0b100>;
-def tLDRi: T1pI4<(outs tGPR:$dst), (ins t_addrmode_s4:$addr), IIC_iLoadr,
- "ldr", "\t$dst, $addr",
- []>,
- T1LdSt4Imm<{1,?,?}>;
-
-def tLDRB : T1pI1<(outs tGPR:$dst), (ins t_addrmode_s1:$addr), IIC_iLoadr,
- "ldrb", "\t$dst, $addr",
- [(set tGPR:$dst, (zextloadi8 t_addrmode_s1:$addr))]>,
- T1LdSt<0b110>;
-def tLDRBi: T1pI1<(outs tGPR:$dst), (ins t_addrmode_s1:$addr), IIC_iLoadr,
- "ldrb", "\t$dst, $addr",
- []>,
- T1LdSt1Imm<{1,?,?}>;
-
-def tLDRH : T1pI2<(outs tGPR:$dst), (ins t_addrmode_s2:$addr), IIC_iLoadr,
- "ldrh", "\t$dst, $addr",
- [(set tGPR:$dst, (zextloadi16 t_addrmode_s2:$addr))]>,
- T1LdSt<0b101>;
-def tLDRHi: T1pI2<(outs tGPR:$dst), (ins t_addrmode_s2:$addr), IIC_iLoadr,
- "ldrh", "\t$dst, $addr",
- []>,
- T1LdSt2Imm<{1,?,?}>;
+multiclass thumb_ld_rr_ri_enc<bits<3> reg_opc, bits<4> imm_opc,
+ Operand AddrMode_r, Operand AddrMode_i,
+ AddrMode am, InstrItinClass itin_r,
+ InstrItinClass itin_i, string asm,
+ PatFrag opnode> {
+ def r : // reg/reg
+ T1pILdStEncode<reg_opc,
+ (outs tGPR:$Rt), (ins AddrMode_r:$addr),
+ am, itin_r, asm, "\t$Rt, $addr",
+ [(set tGPR:$Rt, (opnode AddrMode_r:$addr))]>;
+ def i : // reg/imm5
+ T1pILdStEncodeImm<imm_opc, 1 /* Load */,
+ (outs tGPR:$Rt), (ins AddrMode_i:$addr),
+ am, itin_i, asm, "\t$Rt, $addr",
+ [(set tGPR:$Rt, (opnode AddrMode_i:$addr))]>;
+}
+// Stores: reg/reg and reg/imm5
+multiclass thumb_st_rr_ri_enc<bits<3> reg_opc, bits<4> imm_opc,
+ Operand AddrMode_r, Operand AddrMode_i,
+ AddrMode am, InstrItinClass itin_r,
+ InstrItinClass itin_i, string asm,
+ PatFrag opnode> {
+ def r : // reg/reg
+ T1pILdStEncode<reg_opc,
+ (outs), (ins tGPR:$Rt, AddrMode_r:$addr),
+ am, itin_r, asm, "\t$Rt, $addr",
+ [(opnode tGPR:$Rt, AddrMode_r:$addr)]>;
+ def i : // reg/imm5
+ T1pILdStEncodeImm<imm_opc, 0 /* Store */,
+ (outs), (ins tGPR:$Rt, AddrMode_i:$addr),
+ am, itin_i, asm, "\t$Rt, $addr",
+ [(opnode tGPR:$Rt, AddrMode_i:$addr)]>;
+}
+
+// A8.6.57 & A8.6.60
+defm tLDR : thumb_ld_rr_ri_enc<0b100, 0b0110, t_addrmode_rrs4,
+ t_addrmode_is4, AddrModeT1_4,
+ IIC_iLoad_r, IIC_iLoad_i, "ldr",
+ UnOpFrag<(load node:$Src)>>;
+
+// A8.6.64 & A8.6.61
+defm tLDRB : thumb_ld_rr_ri_enc<0b110, 0b0111, t_addrmode_rrs1,
+ t_addrmode_is1, AddrModeT1_1,
+ IIC_iLoad_bh_r, IIC_iLoad_bh_i, "ldrb",
+ UnOpFrag<(zextloadi8 node:$Src)>>;
+
+// A8.6.76 & A8.6.73
+defm tLDRH : thumb_ld_rr_ri_enc<0b101, 0b1000, t_addrmode_rrs2,
+ t_addrmode_is2, AddrModeT1_2,
+ IIC_iLoad_bh_r, IIC_iLoad_bh_i, "ldrh",
+ UnOpFrag<(zextloadi16 node:$Src)>>;
let AddedComplexity = 10 in
-def tLDRSB : T1pI1<(outs tGPR:$dst), (ins t_addrmode_rr:$addr), IIC_iLoadr,
+def tLDRSB : // A8.6.80
+ T1pILdStEncode<0b011, (outs tGPR:$dst), (ins t_addrmode_rr:$addr),
+ AddrModeT1_1, IIC_iLoad_bh_r,
"ldrsb", "\t$dst, $addr",
- [(set tGPR:$dst, (sextloadi8 t_addrmode_rr:$addr))]>,
- T1LdSt<0b011>;
+ [(set tGPR:$dst, (sextloadi8 t_addrmode_rr:$addr))]>;
let AddedComplexity = 10 in
-def tLDRSH : T1pI2<(outs tGPR:$dst), (ins t_addrmode_rr:$addr), IIC_iLoadr,
+def tLDRSH : // A8.6.84
+ T1pILdStEncode<0b111, (outs tGPR:$dst), (ins t_addrmode_rr:$addr),
+ AddrModeT1_2, IIC_iLoad_bh_r,
"ldrsh", "\t$dst, $addr",
- [(set tGPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>,
- T1LdSt<0b111>;
+ [(set tGPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>;
let canFoldAsLoad = 1 in
-def tLDRspi : T1pIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr), IIC_iLoadi,
- "ldr", "\t$dst, $addr",
- [(set tGPR:$dst, (load t_addrmode_sp:$addr))]>,
- T1LdStSP<{1,?,?}>;
+def tLDRspi : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_sp:$addr), IIC_iLoad_i,
+ "ldr", "\t$Rt, $addr",
+ [(set tGPR:$Rt, (load t_addrmode_sp:$addr))]>,
+ T1LdStSP<{1,?,?}> {
+ bits<3> Rt;
+ bits<8> addr;
+ let Inst{10-8} = Rt;
+ let Inst{7-0} = addr;
+}
// Special instruction for restore. It cannot clobber condition register
// when it's expanded by eliminateCallFramePseudoInstr().
let canFoldAsLoad = 1, mayLoad = 1, neverHasSideEffects = 1 in
-def tRestore : T1pIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr), IIC_iLoadi,
- "ldr", "\t$dst, $addr", []>,
- T1LdStSP<{1,?,?}>;
+// FIXME: Pseudo for tLDRspi
+def tRestore : T1pIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr), IIC_iLoad_i,
+ "ldr", "\t$dst, $addr", []>,
+ T1LdStSP<{1,?,?}> {
+ bits<3> Rt;
+ bits<8> addr;
+ let Inst{10-8} = Rt;
+ let Inst{7-0} = addr;
+}
// Load tconstpool
// FIXME: Use ldr.n to work around a Darwin assembler bug.
let canFoldAsLoad = 1, isReMaterializable = 1 in
-def tLDRpci : T1pIs<(outs tGPR:$dst), (ins i32imm:$addr), IIC_iLoadi,
- "ldr", ".n\t$dst, $addr",
- [(set tGPR:$dst, (load (ARMWrapper tconstpool:$addr)))]>,
- T1Encoding<{0,1,0,0,1,?}>; // A6.2 & A8.6.59
-
-// Special LDR for loads from non-pc-relative constpools.
-let canFoldAsLoad = 1, mayLoad = 1, neverHasSideEffects = 1,
- isReMaterializable = 1 in
-def tLDRcp : T1pIs<(outs tGPR:$dst), (ins i32imm:$addr), IIC_iLoadi,
- "ldr", "\t$dst, $addr", []>,
- T1LdStSP<{1,?,?}>;
-
-def tSTR : T1pI4<(outs), (ins tGPR:$src, t_addrmode_s4:$addr), IIC_iStorer,
- "str", "\t$src, $addr",
- [(store tGPR:$src, t_addrmode_s4:$addr)]>,
- T1LdSt<0b000>;
-def tSTRi: T1pI4<(outs), (ins tGPR:$src, t_addrmode_s4:$addr), IIC_iStorer,
- "str", "\t$src, $addr",
- []>,
- T1LdSt4Imm<{0,?,?}>;
-
-def tSTRB : T1pI1<(outs), (ins tGPR:$src, t_addrmode_s1:$addr), IIC_iStorer,
- "strb", "\t$src, $addr",
- [(truncstorei8 tGPR:$src, t_addrmode_s1:$addr)]>,
- T1LdSt<0b010>;
-def tSTRBi: T1pI1<(outs), (ins tGPR:$src, t_addrmode_s1:$addr), IIC_iStorer,
- "strb", "\t$src, $addr",
- []>,
- T1LdSt1Imm<{0,?,?}>;
-
-def tSTRH : T1pI2<(outs), (ins tGPR:$src, t_addrmode_s2:$addr), IIC_iStorer,
- "strh", "\t$src, $addr",
- [(truncstorei16 tGPR:$src, t_addrmode_s2:$addr)]>,
- T1LdSt<0b001>;
-def tSTRHi: T1pI2<(outs), (ins tGPR:$src, t_addrmode_s2:$addr), IIC_iStorer,
- "strh", "\t$src, $addr",
- []>,
- T1LdSt2Imm<{0,?,?}>;
-
-def tSTRspi : T1pIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr), IIC_iStorei,
- "str", "\t$src, $addr",
- [(store tGPR:$src, t_addrmode_sp:$addr)]>,
- T1LdStSP<{0,?,?}>;
-
-let mayStore = 1, neverHasSideEffects = 1 in {
-// Special instruction for spill. It cannot clobber condition register
-// when it's expanded by eliminateCallFramePseudoInstr().
-def tSpill : T1pIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr), IIC_iStorei,
+def tLDRpci : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i,
+ "ldr", ".n\t$Rt, $addr",
+ [(set tGPR:$Rt, (load (ARMWrapper tconstpool:$addr)))]>,
+ T1Encoding<{0,1,0,0,1,?}> {
+ // A6.2 & A8.6.59
+ bits<3> Rt;
+ bits<8> addr;
+ let Inst{10-8} = Rt;
+ let Inst{7-0} = addr;
+}
+
+// A8.6.194 & A8.6.192
+defm tSTR : thumb_st_rr_ri_enc<0b000, 0b0110, t_addrmode_rrs4,
+ t_addrmode_is4, AddrModeT1_4,
+ IIC_iStore_r, IIC_iStore_i, "str",
+ BinOpFrag<(store node:$LHS, node:$RHS)>>;
+
+// A8.6.197 & A8.6.195
+defm tSTRB : thumb_st_rr_ri_enc<0b010, 0b0111, t_addrmode_rrs1,
+ t_addrmode_is1, AddrModeT1_1,
+ IIC_iStore_bh_r, IIC_iStore_bh_i, "strb",
+ BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
+
+// A8.6.207 & A8.6.205
+defm tSTRH : thumb_st_rr_ri_enc<0b001, 0b1000, t_addrmode_rrs2,
+ t_addrmode_is2, AddrModeT1_2,
+ IIC_iStore_bh_r, IIC_iStore_bh_i, "strh",
+ BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>;
+
+
+def tSTRspi : T1pIs<(outs), (ins tGPR:$Rt, t_addrmode_sp:$addr), IIC_iStore_i,
+ "str", "\t$Rt, $addr",
+ [(store tGPR:$Rt, t_addrmode_sp:$addr)]>,
+ T1LdStSP<{0,?,?}> {
+ bits<3> Rt;
+ bits<8> addr;
+ let Inst{10-8} = Rt;
+ let Inst{7-0} = addr;
+}
+
+let mayStore = 1, neverHasSideEffects = 1 in
+// Special instruction for spill. It cannot clobber condition register when it's
+// expanded by eliminateCallFramePseudoInstr().
+// FIXME: Pseudo for tSTRspi
+def tSpill : T1pIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr), IIC_iStore_i,
"str", "\t$src, $addr", []>,
- T1LdStSP<{0,?,?}>;
+ T1LdStSP<{0,?,?}> {
+ bits<3> Rt;
+ bits<8> addr;
+ let Inst{10-8} = Rt;
+ let Inst{7-0} = addr;
}
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
//
-// These requires base address to be written back or one of the loaded regs.
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
-def tLDM : T1I<(outs),
- (ins addrmode4:$addr, pred:$p, reglist:$dsts, variable_ops),
- IIC_iLoadm,
- "ldm${addr:submode}${p}\t$addr, $dsts", []>,
- T1Encoding<{1,1,0,0,1,?}>; // A6.2 & A8.6.53
-
-def tLDM_UPD : T1It<(outs tGPR:$wb),
- (ins addrmode4:$addr, pred:$p, reglist:$dsts, variable_ops),
- IIC_iLoadm,
- "ldm${addr:submode}${p}\t$addr!, $dsts",
- "$addr.addr = $wb", []>,
- T1Encoding<{1,1,0,0,1,?}>; // A6.2 & A8.6.53
-} // mayLoad, neverHasSideEffects = 1, hasExtraDefRegAllocReq
-
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in
-def tSTM_UPD : T1It<(outs tGPR:$wb),
- (ins addrmode4:$addr, pred:$p, reglist:$srcs, variable_ops),
- IIC_iStorem,
- "stm${addr:submode}${p}\t$addr!, $srcs",
- "$addr.addr = $wb", []>,
- T1Encoding<{1,1,0,0,0,?}>; // A6.2 & A8.6.189
+multiclass thumb_ldst_mult<string asm, InstrItinClass itin,
+ InstrItinClass itin_upd, bits<6> T1Enc,
+ bit L_bit> {
+ def IA :
+ T1I<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ itin, !strconcat(asm, "ia${p}\t$Rn, $regs"), []>,
+ T1Encoding<T1Enc> {
+ bits<3> Rn;
+ bits<8> regs;
+ let Inst{10-8} = Rn;
+ let Inst{7-0} = regs;
+ }
+ def IA_UPD :
+ T1It<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ itin_upd, !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []>,
+ T1Encoding<T1Enc> {
+ bits<3> Rn;
+ bits<8> regs;
+ let Inst{10-8} = Rn;
+ let Inst{7-0} = regs;
+ }
+}
+
+// These require base address to be written back or one of the loaded regs.
+let neverHasSideEffects = 1 in {
+
+let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
+defm tLDM : thumb_ldst_mult<"ldm", IIC_iLoad_m, IIC_iLoad_mu,
+ {1,1,0,0,1,?}, 1>;
+
+let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
+defm tSTM : thumb_ldst_mult<"stm", IIC_iStore_m, IIC_iStore_mu,
+ {1,1,0,0,0,?}, 0>;
+
+} // neverHasSideEffects
let mayLoad = 1, Uses = [SP], Defs = [SP], hasExtraDefRegAllocReq = 1 in
-def tPOP : T1I<(outs), (ins pred:$p, reglist:$dsts, variable_ops), IIC_Br,
- "pop${p}\t$dsts", []>,
- T1Misc<{1,1,0,?,?,?,?}>;
+def tPOP : T1I<(outs), (ins pred:$p, reglist:$regs, variable_ops),
+ IIC_iPop,
+ "pop${p}\t$regs", []>,
+ T1Misc<{1,1,0,?,?,?,?}> {
+ bits<16> regs;
+ let Inst{8} = regs{15};
+ let Inst{7-0} = regs{7-0};
+}
let mayStore = 1, Uses = [SP], Defs = [SP], hasExtraSrcRegAllocReq = 1 in
-def tPUSH : T1I<(outs), (ins pred:$p, reglist:$srcs, variable_ops), IIC_Br,
- "push${p}\t$srcs", []>,
- T1Misc<{0,1,0,?,?,?,?}>;
+def tPUSH : T1I<(outs), (ins pred:$p, reglist:$regs, variable_ops),
+ IIC_iStore_m,
+ "push${p}\t$regs", []>,
+ T1Misc<{0,1,0,?,?,?,?}> {
+ bits<16> regs;
+ let Inst{8} = regs{14};
+ let Inst{7-0} = regs{7-0};
+}
//===----------------------------------------------------------------------===//
// Arithmetic Instructions.
//
+// Helper classes for encoding T1pI patterns:
+class T1pIDPEncode<bits<4> opA, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T1pI<oops, iops, itin, opc, asm, pattern>,
+ T1DataProcessing<opA> {
+ bits<3> Rm;
+ bits<3> Rn;
+ let Inst{5-3} = Rm;
+ let Inst{2-0} = Rn;
+}
+class T1pIMiscEncode<bits<7> opA, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T1pI<oops, iops, itin, opc, asm, pattern>,
+ T1Misc<opA> {
+ bits<3> Rm;
+ bits<3> Rd;
+ let Inst{5-3} = Rm;
+ let Inst{2-0} = Rd;
+}
+
+// Helper classes for encoding T1sI patterns:
+class T1sIDPEncode<bits<4> opA, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T1sI<oops, iops, itin, opc, asm, pattern>,
+ T1DataProcessing<opA> {
+ bits<3> Rd;
+ bits<3> Rn;
+ let Inst{5-3} = Rn;
+ let Inst{2-0} = Rd;
+}
+class T1sIGenEncode<bits<5> opA, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T1sI<oops, iops, itin, opc, asm, pattern>,
+ T1General<opA> {
+ bits<3> Rm;
+ bits<3> Rn;
+ bits<3> Rd;
+ let Inst{8-6} = Rm;
+ let Inst{5-3} = Rn;
+ let Inst{2-0} = Rd;
+}
+class T1sIGenEncodeImm<bits<5> opA, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T1sI<oops, iops, itin, opc, asm, pattern>,
+ T1General<opA> {
+ bits<3> Rd;
+ bits<3> Rm;
+ let Inst{5-3} = Rm;
+ let Inst{2-0} = Rd;
+}
+
+// Helper classes for encoding T1sIt patterns:
+class T1sItDPEncode<bits<4> opA, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T1sIt<oops, iops, itin, opc, asm, pattern>,
+ T1DataProcessing<opA> {
+ bits<3> Rdn;
+ bits<3> Rm;
+ let Inst{5-3} = Rm;
+ let Inst{2-0} = Rdn;
+}
+class T1sItGenEncodeImm<bits<5> opA, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T1sIt<oops, iops, itin, opc, asm, pattern>,
+ T1General<opA> {
+ bits<3> Rdn;
+ bits<8> imm8;
+ let Inst{10-8} = Rdn;
+ let Inst{7-0} = imm8;
+}
+
// Add with carry register
let isCommutable = 1, Uses = [CPSR] in
-def tADC : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "adc", "\t$dst, $rhs",
- [(set tGPR:$dst, (adde tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0101>;
+def tADC : // A8.6.2
+ T1sItDPEncode<0b0101, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm), IIC_iALUr,
+ "adc", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (adde tGPR:$Rn, tGPR:$Rm))]>;
// Add immediate
-def tADDi3 : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi,
- "add", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, imm0_7:$rhs))]>,
- T1General<0b01110>;
+def tADDi3 : // A8.6.4 T1
+ T1sIGenEncodeImm<0b01110, (outs tGPR:$Rd), (ins tGPR:$Rm, i32imm:$imm3), IIC_iALUi,
+ "add", "\t$Rd, $Rm, $imm3",
+ [(set tGPR:$Rd, (add tGPR:$Rm, imm0_7:$imm3))]> {
+ bits<3> imm3;
+ let Inst{8-6} = imm3;
+}
-def tADDi8 : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi,
- "add", "\t$dst, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, imm8_255:$rhs))]>,
- T1General<{1,1,0,?,?}>;
+def tADDi8 : // A8.6.4 T2
+ T1sItGenEncodeImm<{1,1,0,?,?}, (outs tGPR:$Rdn), (ins tGPR:$Rn, i32imm:$imm8),
+ IIC_iALUi,
+ "add", "\t$Rdn, $imm8",
+ [(set tGPR:$Rdn, (add tGPR:$Rn, imm8_255:$imm8))]>;
// Add register
let isCommutable = 1 in
-def tADDrr : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "add", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, tGPR:$rhs))]>,
- T1General<0b01100>;
+def tADDrr : // A8.6.6 T1
+ T1sIGenEncode<0b01100, (outs tGPR:$Rd), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iALUr,
+ "add", "\t$Rd, $Rn, $Rm",
+ [(set tGPR:$Rd, (add tGPR:$Rn, tGPR:$Rm))]>;
let neverHasSideEffects = 1 in
-def tADDhirr : T1pIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
- "add", "\t$dst, $rhs", []>,
- T1Special<{0,0,?,?}>;
+def tADDhirr : T1pIt<(outs GPR:$Rdn), (ins GPR:$Rn, GPR:$Rm), IIC_iALUr,
+ "add", "\t$Rdn, $Rm", []>,
+ T1Special<{0,0,?,?}> {
+ // A8.6.6 T2
+ bits<4> Rdn;
+ bits<4> Rm;
+ let Inst{7} = Rdn{3};
+ let Inst{6-3} = Rm;
+ let Inst{2-0} = Rdn{2-0};
+}
-// And register
+// AND register
let isCommutable = 1 in
-def tAND : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "and", "\t$dst, $rhs",
- [(set tGPR:$dst, (and tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0000>;
+def tAND : // A8.6.12
+ T1sItDPEncode<0b0000, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iBITr,
+ "and", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (and tGPR:$Rn, tGPR:$Rm))]>;
// ASR immediate
-def tASRri : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iMOVsi,
- "asr", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (sra tGPR:$lhs, (i32 imm:$rhs)))]>,
- T1General<{0,1,0,?,?}>;
+def tASRri : // A8.6.14
+ T1sIGenEncodeImm<{0,1,0,?,?}, (outs tGPR:$Rd), (ins tGPR:$Rm, i32imm:$imm5),
+ IIC_iMOVsi,
+ "asr", "\t$Rd, $Rm, $imm5",
+ [(set tGPR:$Rd, (sra tGPR:$Rm, (i32 imm:$imm5)))]> {
+ bits<5> imm5;
+ let Inst{10-6} = imm5;
+}
// ASR register
-def tASRrr : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr,
- "asr", "\t$dst, $rhs",
- [(set tGPR:$dst, (sra tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0100>;
+def tASRrr : // A8.6.15
+ T1sItDPEncode<0b0100, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iMOVsr,
+ "asr", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (sra tGPR:$Rn, tGPR:$Rm))]>;
// BIC register
-def tBIC : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "bic", "\t$dst, $rhs",
- [(set tGPR:$dst, (and tGPR:$lhs, (not tGPR:$rhs)))]>,
- T1DataProcessing<0b1110>;
+def tBIC : // A8.6.20
+ T1sItDPEncode<0b1110, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iBITr,
+ "bic", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (and tGPR:$Rn, (not tGPR:$Rm)))]>;
// CMN register
-let Defs = [CPSR] in {
+let isCompare = 1, Defs = [CPSR] in {
//FIXME: Disable CMN, as CCodes are backwards from compare expectations
// Compare-to-zero still works out, just not the relationals
-//def tCMN : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
-// "cmn", "\t$lhs, $rhs",
-// [(ARMcmp tGPR:$lhs, (ineg tGPR:$rhs))]>,
-// T1DataProcessing<0b1011>;
-def tCMNz : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
- "cmn", "\t$lhs, $rhs",
- [(ARMcmpZ tGPR:$lhs, (ineg tGPR:$rhs))]>,
- T1DataProcessing<0b1011>;
-}
+//def tCMN : // A8.6.33
+// T1pIDPEncode<0b1011, (outs), (ins tGPR:$lhs, tGPR:$rhs),
+// IIC_iCMPr,
+// "cmn", "\t$lhs, $rhs",
+// [(ARMcmp tGPR:$lhs, (ineg tGPR:$rhs))]>;
+
+def tCMNz : // A8.6.33
+ T1pIDPEncode<0b1011, (outs), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iCMPr,
+ "cmn", "\t$Rn, $Rm",
+ [(ARMcmpZ tGPR:$Rn, (ineg tGPR:$Rm))]>;
+
+} // isCompare = 1, Defs = [CPSR]
// CMP immediate
-let Defs = [CPSR] in {
-def tCMPi8 : T1pI<(outs), (ins tGPR:$lhs, i32imm:$rhs), IIC_iCMPi,
- "cmp", "\t$lhs, $rhs",
- [(ARMcmp tGPR:$lhs, imm0_255:$rhs)]>,
- T1General<{1,0,1,?,?}>;
-def tCMPzi8 : T1pI<(outs), (ins tGPR:$lhs, i32imm:$rhs), IIC_iCMPi,
- "cmp", "\t$lhs, $rhs",
- [(ARMcmpZ tGPR:$lhs, imm0_255:$rhs)]>,
- T1General<{1,0,1,?,?}>;
+let isCompare = 1, Defs = [CPSR] in {
+def tCMPi8 : T1pI<(outs), (ins tGPR:$Rn, i32imm:$imm8), IIC_iCMPi,
+ "cmp", "\t$Rn, $imm8",
+ [(ARMcmp tGPR:$Rn, imm0_255:$imm8)]>,
+ T1General<{1,0,1,?,?}> {
+ // A8.6.35
+ bits<3> Rn;
+ bits<8> imm8;
+ let Inst{10-8} = Rn;
+ let Inst{7-0} = imm8;
}
// CMP register
-let Defs = [CPSR] in {
-def tCMPr : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
- "cmp", "\t$lhs, $rhs",
- [(ARMcmp tGPR:$lhs, tGPR:$rhs)]>,
- T1DataProcessing<0b1010>;
-def tCMPzr : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
- "cmp", "\t$lhs, $rhs",
- [(ARMcmpZ tGPR:$lhs, tGPR:$rhs)]>,
- T1DataProcessing<0b1010>;
-
-def tCMPhir : T1pI<(outs), (ins GPR:$lhs, GPR:$rhs), IIC_iCMPr,
- "cmp", "\t$lhs, $rhs", []>,
- T1Special<{0,1,?,?}>;
-def tCMPzhir : T1pI<(outs), (ins GPR:$lhs, GPR:$rhs), IIC_iCMPr,
- "cmp", "\t$lhs, $rhs", []>,
- T1Special<{0,1,?,?}>;
+def tCMPr : // A8.6.36 T1
+ T1pIDPEncode<0b1010, (outs), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iCMPr,
+ "cmp", "\t$Rn, $Rm",
+ [(ARMcmp tGPR:$Rn, tGPR:$Rm)]>;
+
+def tCMPhir : T1pI<(outs), (ins GPR:$Rn, GPR:$Rm), IIC_iCMPr,
+ "cmp", "\t$Rn, $Rm", []>,
+ T1Special<{0,1,?,?}> {
+ // A8.6.36 T2
+ bits<4> Rm;
+ bits<4> Rn;
+ let Inst{7} = Rn{3};
+ let Inst{6-3} = Rm;
+ let Inst{2-0} = Rn{2-0};
}
+} // isCompare = 1, Defs = [CPSR]
// XOR register
let isCommutable = 1 in
-def tEOR : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "eor", "\t$dst, $rhs",
- [(set tGPR:$dst, (xor tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0001>;
+def tEOR : // A8.6.45
+ T1sItDPEncode<0b0001, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iBITr,
+ "eor", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (xor tGPR:$Rn, tGPR:$Rm))]>;
// LSL immediate
-def tLSLri : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iMOVsi,
- "lsl", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (shl tGPR:$lhs, (i32 imm:$rhs)))]>,
- T1General<{0,0,0,?,?}>;
+def tLSLri : // A8.6.88
+ T1sIGenEncodeImm<{0,0,0,?,?}, (outs tGPR:$Rd), (ins tGPR:$Rm, i32imm:$imm5),
+ IIC_iMOVsi,
+ "lsl", "\t$Rd, $Rm, $imm5",
+ [(set tGPR:$Rd, (shl tGPR:$Rm, (i32 imm:$imm5)))]> {
+ bits<5> imm5;
+ let Inst{10-6} = imm5;
+}
// LSL register
-def tLSLrr : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr,
- "lsl", "\t$dst, $rhs",
- [(set tGPR:$dst, (shl tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0010>;
+def tLSLrr : // A8.6.89
+ T1sItDPEncode<0b0010, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iMOVsr,
+ "lsl", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (shl tGPR:$Rn, tGPR:$Rm))]>;
// LSR immediate
-def tLSRri : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iMOVsi,
- "lsr", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (srl tGPR:$lhs, (i32 imm:$rhs)))]>,
- T1General<{0,0,1,?,?}>;
+def tLSRri : // A8.6.90
+ T1sIGenEncodeImm<{0,0,1,?,?}, (outs tGPR:$Rd), (ins tGPR:$Rm, i32imm:$imm5),
+ IIC_iMOVsi,
+ "lsr", "\t$Rd, $Rm, $imm5",
+ [(set tGPR:$Rd, (srl tGPR:$Rm, (i32 imm:$imm5)))]> {
+ bits<5> imm5;
+ let Inst{10-6} = imm5;
+}
// LSR register
-def tLSRrr : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr,
- "lsr", "\t$dst, $rhs",
- [(set tGPR:$dst, (srl tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0011>;
-
-// move register
-def tMOVi8 : T1sI<(outs tGPR:$dst), (ins i32imm:$src), IIC_iMOVi,
- "mov", "\t$dst, $src",
- [(set tGPR:$dst, imm0_255:$src)]>,
- T1General<{1,0,0,?,?}>;
+def tLSRrr : // A8.6.91
+ T1sItDPEncode<0b0011, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iMOVsr,
+ "lsr", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (srl tGPR:$Rn, tGPR:$Rm))]>;
+
+// Move register
+let isMoveImm = 1 in
+def tMOVi8 : T1sI<(outs tGPR:$Rd), (ins i32imm:$imm8), IIC_iMOVi,
+ "mov", "\t$Rd, $imm8",
+ [(set tGPR:$Rd, imm0_255:$imm8)]>,
+ T1General<{1,0,0,?,?}> {
+ // A8.6.96
+ bits<3> Rd;
+ bits<8> imm8;
+ let Inst{10-8} = Rd;
+ let Inst{7-0} = imm8;
+}
// TODO: A7-73: MOV(2) - mov setting flag.
-
let neverHasSideEffects = 1 in {
// FIXME: Make this predicable.
-def tMOVr : T1I<(outs tGPR:$dst), (ins tGPR:$src), IIC_iMOVr,
- "mov\t$dst, $src", []>,
- T1Special<0b1000>;
+def tMOVr : T1I<(outs tGPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr,
+ "mov\t$Rd, $Rm", []>,
+ T1Special<0b1000> {
+ // A8.6.97
+ bits<4> Rd;
+ bits<4> Rm;
+ // Bits {7-6} are encoded by the T1Special value.
+ let Inst{5-3} = Rm{2-0};
+ let Inst{2-0} = Rd{2-0};
+}
let Defs = [CPSR] in
-def tMOVSr : T1I<(outs tGPR:$dst), (ins tGPR:$src), IIC_iMOVr,
- "movs\t$dst, $src", []>, Encoding16 {
+def tMOVSr : T1I<(outs tGPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr,
+ "movs\t$Rd, $Rm", []>, Encoding16 {
+ // A8.6.97
+ bits<3> Rd;
+ bits<3> Rm;
let Inst{15-6} = 0b0000000000;
+ let Inst{5-3} = Rm;
+ let Inst{2-0} = Rd;
}
// FIXME: Make these predicable.
-def tMOVgpr2tgpr : T1I<(outs tGPR:$dst), (ins GPR:$src), IIC_iMOVr,
- "mov\t$dst, $src", []>,
- T1Special<{1,0,0,?}>;
-def tMOVtgpr2gpr : T1I<(outs GPR:$dst), (ins tGPR:$src), IIC_iMOVr,
- "mov\t$dst, $src", []>,
- T1Special<{1,0,?,0}>;
-def tMOVgpr2gpr : T1I<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVr,
- "mov\t$dst, $src", []>,
- T1Special<{1,0,?,?}>;
+def tMOVgpr2tgpr : T1I<(outs tGPR:$Rd), (ins GPR:$Rm), IIC_iMOVr,
+ "mov\t$Rd, $Rm", []>,
+ T1Special<{1,0,0,?}> {
+ // A8.6.97
+ bits<4> Rd;
+ bits<4> Rm;
+ // Bit {7} is encoded by the T1Special value.
+ let Inst{6-3} = Rm;
+ let Inst{2-0} = Rd{2-0};
+}
+def tMOVtgpr2gpr : T1I<(outs GPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr,
+ "mov\t$Rd, $Rm", []>,
+ T1Special<{1,0,?,0}> {
+ // A8.6.97
+ bits<4> Rd;
+ bits<4> Rm;
+ // Bit {6} is encoded by the T1Special value.
+ let Inst{7} = Rd{3};
+ let Inst{5-3} = Rm{2-0};
+ let Inst{2-0} = Rd{2-0};
+}
+def tMOVgpr2gpr : T1I<(outs GPR:$Rd), (ins GPR:$Rm), IIC_iMOVr,
+ "mov\t$Rd, $Rm", []>,
+ T1Special<{1,0,?,?}> {
+ // A8.6.97
+ bits<4> Rd;
+ bits<4> Rm;
+ let Inst{7} = Rd{3};
+ let Inst{6-3} = Rm;
+ let Inst{2-0} = Rd{2-0};
+}
} // neverHasSideEffects
-// multiply register
+// Multiply register
let isCommutable = 1 in
-def tMUL : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMUL32,
- "mul", "\t$dst, $rhs, $dst", /* A8.6.105 MUL Encoding T1 */
- [(set tGPR:$dst, (mul tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b1101>;
-
-// move inverse register
-def tMVN : T1sI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iMOVr,
- "mvn", "\t$dst, $src",
- [(set tGPR:$dst, (not tGPR:$src))]>,
- T1DataProcessing<0b1111>;
-
-// bitwise or register
+def tMUL : // A8.6.105 T1
+ T1sItDPEncode<0b1101, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iMUL32,
+ "mul", "\t$Rdn, $Rm, $Rdn",
+ [(set tGPR:$Rdn, (mul tGPR:$Rn, tGPR:$Rm))]>;
+
+// Move inverse register
+def tMVN : // A8.6.107
+ T1sIDPEncode<0b1111, (outs tGPR:$Rd), (ins tGPR:$Rn), IIC_iMVNr,
+ "mvn", "\t$Rd, $Rn",
+ [(set tGPR:$Rd, (not tGPR:$Rn))]>;
+
+// Bitwise or register
let isCommutable = 1 in
-def tORR : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "orr", "\t$dst, $rhs",
- [(set tGPR:$dst, (or tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b1100>;
-
-// swaps
-def tREV : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "rev", "\t$dst, $src",
- [(set tGPR:$dst, (bswap tGPR:$src))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{1,0,1,0,0,0,?}>;
-
-def tREV16 : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "rev16", "\t$dst, $src",
- [(set tGPR:$dst,
- (or (and (srl tGPR:$src, (i32 8)), 0xFF),
- (or (and (shl tGPR:$src, (i32 8)), 0xFF00),
- (or (and (srl tGPR:$src, (i32 8)), 0xFF0000),
- (and (shl tGPR:$src, (i32 8)), 0xFF000000)))))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{1,0,1,0,0,1,?}>;
-
-def tREVSH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "revsh", "\t$dst, $src",
- [(set tGPR:$dst,
- (sext_inreg
- (or (srl (and tGPR:$src, 0xFF00), (i32 8)),
- (shl tGPR:$src, (i32 8))), i16))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{1,0,1,0,1,1,?}>;
-
-// rotate right register
-def tROR : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr,
- "ror", "\t$dst, $rhs",
- [(set tGPR:$dst, (rotr tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0111>;
-
-// negate register
-def tRSB : T1sI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iALUi,
- "rsb", "\t$dst, $src, #0",
- [(set tGPR:$dst, (ineg tGPR:$src))]>,
- T1DataProcessing<0b1001>;
+def tORR : // A8.6.114
+ T1sItDPEncode<0b1100, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iBITr,
+ "orr", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (or tGPR:$Rn, tGPR:$Rm))]>;
+
+// Swaps
+def tREV : // A8.6.134
+ T1pIMiscEncode<{1,0,1,0,0,0,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
+ IIC_iUNAr,
+ "rev", "\t$Rd, $Rm",
+ [(set tGPR:$Rd, (bswap tGPR:$Rm))]>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+
+def tREV16 : // A8.6.135
+ T1pIMiscEncode<{1,0,1,0,0,1,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
+ IIC_iUNAr,
+ "rev16", "\t$Rd, $Rm",
+ [(set tGPR:$Rd,
+ (or (and (srl tGPR:$Rm, (i32 8)), 0xFF),
+ (or (and (shl tGPR:$Rm, (i32 8)), 0xFF00),
+ (or (and (srl tGPR:$Rm, (i32 8)), 0xFF0000),
+ (and (shl tGPR:$Rm, (i32 8)), 0xFF000000)))))]>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+
+def tREVSH : // A8.6.136
+ T1pIMiscEncode<{1,0,1,0,1,1,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
+ IIC_iUNAr,
+ "revsh", "\t$Rd, $Rm",
+ [(set tGPR:$Rd,
+ (sext_inreg
+ (or (srl (and tGPR:$Rm, 0xFF00), (i32 8)),
+ (shl tGPR:$Rm, (i32 8))), i16))]>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+
+// Rotate right register
+def tROR : // A8.6.139
+ T1sItDPEncode<0b0111, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iMOVsr,
+ "ror", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (rotr tGPR:$Rn, tGPR:$Rm))]>;
+
+// Negate register
+def tRSB : // A8.6.141
+ T1sIDPEncode<0b1001, (outs tGPR:$Rd), (ins tGPR:$Rn),
+ IIC_iALUi,
+ "rsb", "\t$Rd, $Rn, #0",
+ [(set tGPR:$Rd, (ineg tGPR:$Rn))]>;
// Subtract with carry register
let Uses = [CPSR] in
-def tSBC : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "sbc", "\t$dst, $rhs",
- [(set tGPR:$dst, (sube tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0110>;
+def tSBC : // A8.6.151
+ T1sItDPEncode<0b0110, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iALUr,
+ "sbc", "\t$Rdn, $Rm",
+ [(set tGPR:$Rdn, (sube tGPR:$Rn, tGPR:$Rm))]>;
// Subtract immediate
-def tSUBi3 : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi,
- "sub", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, imm0_7_neg:$rhs))]>,
- T1General<0b01111>;
-
-def tSUBi8 : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi,
- "sub", "\t$dst, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, imm8_255_neg:$rhs))]>,
- T1General<{1,1,1,?,?}>;
-
-// subtract register
-def tSUBrr : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "sub", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (sub tGPR:$lhs, tGPR:$rhs))]>,
- T1General<0b01101>;
+def tSUBi3 : // A8.6.210 T1
+ T1sIGenEncodeImm<0b01111, (outs tGPR:$Rd), (ins tGPR:$Rm, i32imm:$imm3),
+ IIC_iALUi,
+ "sub", "\t$Rd, $Rm, $imm3",
+ [(set tGPR:$Rd, (add tGPR:$Rm, imm0_7_neg:$imm3))]> {
+ bits<3> imm3;
+ let Inst{8-6} = imm3;
+}
-// TODO: A7-96: STMIA - store multiple.
+def tSUBi8 : // A8.6.210 T2
+ T1sItGenEncodeImm<{1,1,1,?,?}, (outs tGPR:$Rdn), (ins tGPR:$Rn, i32imm:$imm8),
+ IIC_iALUi,
+ "sub", "\t$Rdn, $imm8",
+ [(set tGPR:$Rdn, (add tGPR:$Rn, imm8_255_neg:$imm8))]>;
+
+// Subtract register
+def tSUBrr : // A8.6.212
+ T1sIGenEncode<0b01101, (outs tGPR:$Rd), (ins tGPR:$Rn, tGPR:$Rm),
+ IIC_iALUr,
+ "sub", "\t$Rd, $Rn, $Rm",
+ [(set tGPR:$Rd, (sub tGPR:$Rn, tGPR:$Rm))]>;
-// sign-extend byte
-def tSXTB : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "sxtb", "\t$dst, $src",
- [(set tGPR:$dst, (sext_inreg tGPR:$src, i8))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{0,0,1,0,0,1,?}>;
-
-// sign-extend short
-def tSXTH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "sxth", "\t$dst, $src",
- [(set tGPR:$dst, (sext_inreg tGPR:$src, i16))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{0,0,1,0,0,0,?}>;
-
-// test
-let isCommutable = 1, Defs = [CPSR] in
-def tTST : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
- "tst", "\t$lhs, $rhs",
- [(ARMcmpZ (and tGPR:$lhs, tGPR:$rhs), 0)]>,
- T1DataProcessing<0b1000>;
-
-// zero-extend byte
-def tUXTB : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "uxtb", "\t$dst, $src",
- [(set tGPR:$dst, (and tGPR:$src, 0xFF))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{0,0,1,0,1,1,?}>;
-
-// zero-extend short
-def tUXTH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "uxth", "\t$dst, $src",
- [(set tGPR:$dst, (and tGPR:$src, 0xFFFF))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{0,0,1,0,1,0,?}>;
+// TODO: A7-96: STMIA - store multiple.
+// Sign-extend byte
+def tSXTB : // A8.6.222
+ T1pIMiscEncode<{0,0,1,0,0,1,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
+ IIC_iUNAr,
+ "sxtb", "\t$Rd, $Rm",
+ [(set tGPR:$Rd, (sext_inreg tGPR:$Rm, i8))]>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+
+// Sign-extend short
+def tSXTH : // A8.6.224
+ T1pIMiscEncode<{0,0,1,0,0,0,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
+ IIC_iUNAr,
+ "sxth", "\t$Rd, $Rm",
+ [(set tGPR:$Rd, (sext_inreg tGPR:$Rm, i16))]>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+
+// Test
+let isCompare = 1, isCommutable = 1, Defs = [CPSR] in
+def tTST : // A8.6.230
+ T1pIDPEncode<0b1000, (outs), (ins tGPR:$Rn, tGPR:$Rm), IIC_iTSTr,
+ "tst", "\t$Rn, $Rm",
+ [(ARMcmpZ (and_su tGPR:$Rn, tGPR:$Rm), 0)]>;
+
+// Zero-extend byte
+def tUXTB : // A8.6.262
+ T1pIMiscEncode<{0,0,1,0,1,1,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
+ IIC_iUNAr,
+ "uxtb", "\t$Rd, $Rm",
+ [(set tGPR:$Rd, (and tGPR:$Rm, 0xFF))]>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+
+// Zero-extend short
+def tUXTH : // A8.6.264
+ T1pIMiscEncode<{0,0,1,0,1,0,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
+ IIC_iUNAr,
+ "uxth", "\t$Rd, $Rm",
+ [(set tGPR:$Rd, (and tGPR:$Rm, 0xFFFF))]>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
// Conditional move tMOVCCr - Used to implement the Thumb SELECT_CC operation.
// Expanded after instruction selection into a branch sequence.
let usesCustomInserter = 1 in // Expanded after instruction selection.
def tMOVCCr_pseudo :
PseudoInst<(outs tGPR:$dst), (ins tGPR:$false, tGPR:$true, pred:$cc),
- NoItinerary, "${:comment} tMOVCCr $cc",
+ NoItinerary,
[/*(set tGPR:$dst, (ARMcmov tGPR:$false, tGPR:$true, imm:$cc))*/]>;
// 16-bit movcc in IT blocks for Thumb2.
let neverHasSideEffects = 1 in {
-def tMOVCCr : T1pIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iCMOVr,
- "mov", "\t$dst, $rhs", []>,
- T1Special<{1,0,?,?}>;
+def tMOVCCr : T1pIt<(outs GPR:$Rdn), (ins GPR:$Rn, GPR:$Rm), IIC_iCMOVr,
+ "mov", "\t$Rdn, $Rm", []>,
+ T1Special<{1,0,?,?}> {
+ bits<4> Rdn;
+ bits<4> Rm;
+ let Inst{7} = Rdn{3};
+ let Inst{6-3} = Rm;
+ let Inst{2-0} = Rdn{2-0};
+}
+
+let isMoveImm = 1 in
+def tMOVCCi : T1pIt<(outs tGPR:$Rdn), (ins tGPR:$Rn, i32imm:$Rm), IIC_iCMOVi,
+ "mov", "\t$Rdn, $Rm", []>,
+ T1General<{1,0,0,?,?}> {
+ bits<3> Rdn;
+ bits<8> Rm;
+ let Inst{10-8} = Rdn;
+ let Inst{7-0} = Rm;
+}
-def tMOVCCi : T1pIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iCMOVi,
- "mov", "\t$dst, $rhs", []>,
- T1General<{1,0,0,?,?}>;
} // neverHasSideEffects
// tLEApcrel - Load a pc-relative address into a register without offending the
// assembler.
-let neverHasSideEffects = 1 in {
-let isReMaterializable = 1 in
-def tLEApcrel : T1I<(outs tGPR:$dst), (ins i32imm:$label, pred:$p), IIC_iALUi,
- "adr$p\t$dst, #$label", []>,
- T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
-} // neverHasSideEffects
-def tLEApcrelJT : T1I<(outs tGPR:$dst),
- (ins i32imm:$label, nohash_imm:$id, pred:$p),
- IIC_iALUi, "adr$p\t$dst, #${label}_${id}", []>,
- T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
+def tADR : T1I<(outs tGPR:$Rd), (ins t_adrlabel:$addr, pred:$p),
+ IIC_iALUi, "adr{$p}\t$Rd, #$addr", []>,
+ T1Encoding<{1,0,1,0,0,?}> {
+ bits<3> Rd;
+ bits<8> addr;
+ let Inst{10-8} = Rd;
+ let Inst{7-0} = addr;
+}
+
+let neverHasSideEffects = 1, isReMaterializable = 1 in
+def tLEApcrel : tPseudoInst<(outs tGPR:$Rd), (ins i32imm:$label, pred:$p),
+ Size2Bytes, IIC_iALUi, []>;
+
+def tLEApcrelJT : tPseudoInst<(outs tGPR:$Rd),
+ (ins i32imm:$label, nohash_imm:$id, pred:$p),
+ Size2Bytes, IIC_iALUi, []>;
+
+//===----------------------------------------------------------------------===//
+// Move between coprocessor and ARM core register -- for disassembly only
+//
+
+class tMovRCopro<string opc, bit direction>
+ : T1Cop<(outs), (ins p_imm:$cop, i32imm:$opc1,
+ GPR:$Rt, c_imm:$CRn, c_imm:$CRm, i32imm:$opc2),
+ !strconcat(opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2"),
+ [/* For disassembly only; pattern left blank */]> {
+ let Inst{27-24} = 0b1110;
+ let Inst{20} = direction;
+ let Inst{4} = 1;
+
+ bits<4> Rt;
+ bits<4> cop;
+ bits<3> opc1;
+ bits<3> opc2;
+ bits<4> CRm;
+ bits<4> CRn;
+
+ let Inst{15-12} = Rt;
+ let Inst{11-8} = cop;
+ let Inst{23-21} = opc1;
+ let Inst{7-5} = opc2;
+ let Inst{3-0} = CRm;
+ let Inst{19-16} = CRn;
+}
+
+def tMCR : tMovRCopro<"mcr", 0 /* from ARM core register to coprocessor */>;
+def tMRC : tMovRCopro<"mrc", 1 /* from coprocessor to ARM core register */>;
+
+class tMovRRCopro<string opc, bit direction>
+ : T1Cop<(outs), (ins p_imm:$cop, i32imm:$opc1, GPR:$Rt, GPR:$Rt2, c_imm:$CRm),
+ !strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"),
+ [/* For disassembly only; pattern left blank */]> {
+ let Inst{27-24} = 0b1100;
+ let Inst{23-21} = 0b010;
+ let Inst{20} = direction;
+
+ bits<4> Rt;
+ bits<4> Rt2;
+ bits<4> cop;
+ bits<4> opc1;
+ bits<4> CRm;
+
+ let Inst{15-12} = Rt;
+ let Inst{19-16} = Rt2;
+ let Inst{11-8} = cop;
+ let Inst{7-4} = opc1;
+ let Inst{3-0} = CRm;
+}
+
+def tMCRR : tMovRRCopro<"mcrr", 0 /* from ARM core register to coprocessor */>;
+def tMRRC : tMovRRCopro<"mrrc", 1 /* from coprocessor to ARM core register */>;
+
+//===----------------------------------------------------------------------===//
+// Other Coprocessor Instructions. For disassembly only.
+//
+def tCDP : T1Cop<(outs), (ins p_imm:$cop, i32imm:$opc1,
+ c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, i32imm:$opc2),
+ "cdp\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
+ [/* For disassembly only; pattern left blank */]> {
+ let Inst{27-24} = 0b1110;
+
+ bits<4> opc1;
+ bits<4> CRn;
+ bits<4> CRd;
+ bits<4> cop;
+ bits<3> opc2;
+ bits<4> CRm;
+
+ let Inst{3-0} = CRm;
+ let Inst{4} = 0;
+ let Inst{7-5} = opc2;
+ let Inst{11-8} = cop;
+ let Inst{15-12} = CRd;
+ let Inst{19-16} = CRn;
+ let Inst{23-20} = opc1;
+}
//===----------------------------------------------------------------------===//
// TLS Instructions
//
// __aeabi_read_tp preserves the registers r1-r3.
-let isCall = 1,
- Defs = [R0, LR] in {
- def tTPsoft : TIx2<0b11110, 0b11, 1, (outs), (ins), IIC_Br,
- "bl\t__aeabi_read_tp",
- [(set R0, ARMthread_pointer)]>;
+let isCall = 1, Defs = [R0, LR], Uses = [SP] in
+def tTPsoft : TIx2<0b11110, 0b11, 1, (outs), (ins), IIC_Br,
+ "bl\t__aeabi_read_tp",
+ [(set R0, ARMthread_pointer)]> {
+ // Encoding is 0xf7fffffe.
+ let Inst = 0xf7fffffe;
}
+//===----------------------------------------------------------------------===//
// SJLJ Exception handling intrinsics
-// eh_sjlj_setjmp() is an instruction sequence to store the return
-// address and save #0 in R0 for the non-longjmp case.
-// Since by its nature we may be coming from some other function to get
-// here, and we're using the stack frame for the containing function to
-// save/restore registers, we can't keep anything live in regs across
-// the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon
-// when we get here from a longjmp(). We force everthing out of registers
-// except for our own input by listing the relevant registers in Defs. By
-// doing so, we also cause the prologue/epilogue code to actively preserve
-// all of the callee-saved resgisters, which is exactly what we want.
-// $val is a scratch register for our use.
-let Defs =
- [ R0, R1, R2, R3, R4, R5, R6, R7, R12 ], hasSideEffects = 1,
- isBarrier = 1 in {
- def tInt_eh_sjlj_setjmp : ThumbXI<(outs),(ins tGPR:$src, tGPR:$val),
- AddrModeNone, SizeSpecial, NoItinerary,
- "mov\t$val, pc\t${:comment} begin eh.setjmp\n\t"
- "adds\t$val, #7\n\t"
- "str\t$val, [$src, #4]\n\t"
- "movs\tr0, #0\n\t"
- "b\t1f\n\t"
- "movs\tr0, #1\t${:comment} end eh.setjmp\n\t"
- "1:", "",
- [(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>;
-}
+//
+
+// eh_sjlj_setjmp() is an instruction sequence to store the return address and
+// save #0 in R0 for the non-longjmp case. Since by its nature we may be coming
+// from some other function to get here, and we're using the stack frame for the
+// containing function to save/restore registers, we can't keep anything live in
+// regs across the eh_sjlj_setjmp(), else it will almost certainly have been
+// tromped upon when we get here from a longjmp(). We force everthing out of
+// registers except for our own input by listing the relevant registers in
+// Defs. By doing so, we also cause the prologue/epilogue code to actively
+// preserve all of the callee-saved resgisters, which is exactly what we want.
+// $val is a scratch register for our use.
+let Defs = [ R0, R1, R2, R3, R4, R5, R6, R7, R12 ],
+ hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1 in
+def tInt_eh_sjlj_setjmp : ThumbXI<(outs),(ins tGPR:$src, tGPR:$val),
+ AddrModeNone, SizeSpecial, NoItinerary, "","",
+ [(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>;
// FIXME: Non-Darwin version(s)
-let isBarrier = 1, hasSideEffects = 1, isTerminator = 1,
- Defs = [ R7, LR, SP ] in {
+let isBarrier = 1, hasSideEffects = 1, isTerminator = 1, isCodeGenOnly = 1,
+ Defs = [ R7, LR, SP ] in
def tInt_eh_sjlj_longjmp : XI<(outs), (ins GPR:$src, GPR:$scratch),
- AddrModeNone, SizeSpecial, IndexModeNone,
- Pseudo, NoItinerary,
- "ldr\t$scratch, [$src, #8]\n\t"
- "mov\tsp, $scratch\n\t"
- "ldr\t$scratch, [$src, #4]\n\t"
- "ldr\tr7, [$src]\n\t"
- "bx\t$scratch", "",
- [(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
- Requires<[IsThumb, IsDarwin]>;
-}
+ AddrModeNone, SizeSpecial, IndexModeNone,
+ Pseudo, NoItinerary, "", "",
+ [(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
+ Requires<[IsThumb, IsDarwin]>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//
+// Comparisons
+def : T1Pat<(ARMcmpZ tGPR:$Rn, imm0_255:$imm8),
+ (tCMPi8 tGPR:$Rn, imm0_255:$imm8)>;
+def : T1Pat<(ARMcmpZ tGPR:$Rn, tGPR:$Rm),
+ (tCMPr tGPR:$Rn, tGPR:$Rm)>;
+
// Add with carry
def : T1Pat<(addc tGPR:$lhs, imm0_7:$rhs),
(tADDi3 tGPR:$lhs, imm0_7:$rhs)>;
@@ -991,27 +1492,42 @@ def : Tv5Pat<(ARMcall GPR:$dst), (tBLXr_r9 GPR:$dst)>,
Requires<[IsThumb, HasV5T, IsDarwin]>;
// zextload i1 -> zextload i8
-def : T1Pat<(zextloadi1 t_addrmode_s1:$addr),
- (tLDRB t_addrmode_s1:$addr)>;
+def : T1Pat<(zextloadi1 t_addrmode_rrs1:$addr),
+ (tLDRBr t_addrmode_rrs1:$addr)>;
+def : T1Pat<(zextloadi1 t_addrmode_is1:$addr),
+ (tLDRBi t_addrmode_is1:$addr)>;
// extload -> zextload
-def : T1Pat<(extloadi1 t_addrmode_s1:$addr), (tLDRB t_addrmode_s1:$addr)>;
-def : T1Pat<(extloadi8 t_addrmode_s1:$addr), (tLDRB t_addrmode_s1:$addr)>;
-def : T1Pat<(extloadi16 t_addrmode_s2:$addr), (tLDRH t_addrmode_s2:$addr)>;
+def : T1Pat<(extloadi1 t_addrmode_rrs1:$addr), (tLDRBr t_addrmode_rrs1:$addr)>;
+def : T1Pat<(extloadi1 t_addrmode_is1:$addr), (tLDRBi t_addrmode_is1:$addr)>;
+def : T1Pat<(extloadi8 t_addrmode_rrs1:$addr), (tLDRBr t_addrmode_rrs1:$addr)>;
+def : T1Pat<(extloadi8 t_addrmode_is1:$addr), (tLDRBi t_addrmode_is1:$addr)>;
+def : T1Pat<(extloadi16 t_addrmode_rrs2:$addr), (tLDRHr t_addrmode_rrs2:$addr)>;
+def : T1Pat<(extloadi16 t_addrmode_is2:$addr), (tLDRHi t_addrmode_is2:$addr)>;
// If it's impossible to use [r,r] address mode for sextload, select to
// ldr{b|h} + sxt{b|h} instead.
-def : T1Pat<(sextloadi8 t_addrmode_s1:$addr),
- (tSXTB (tLDRB t_addrmode_s1:$addr))>,
- Requires<[IsThumb1Only, HasV6]>;
-def : T1Pat<(sextloadi16 t_addrmode_s2:$addr),
- (tSXTH (tLDRH t_addrmode_s2:$addr))>,
- Requires<[IsThumb1Only, HasV6]>;
-
-def : T1Pat<(sextloadi8 t_addrmode_s1:$addr),
- (tASRri (tLSLri (tLDRB t_addrmode_s1:$addr), 24), 24)>;
-def : T1Pat<(sextloadi16 t_addrmode_s1:$addr),
- (tASRri (tLSLri (tLDRH t_addrmode_s1:$addr), 16), 16)>;
+def : T1Pat<(sextloadi8 t_addrmode_is1:$addr),
+ (tSXTB (tLDRBi t_addrmode_is1:$addr))>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+def : T1Pat<(sextloadi8 t_addrmode_rrs1:$addr),
+ (tSXTB (tLDRBr t_addrmode_rrs1:$addr))>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+def : T1Pat<(sextloadi16 t_addrmode_is2:$addr),
+ (tSXTH (tLDRHi t_addrmode_is2:$addr))>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+def : T1Pat<(sextloadi16 t_addrmode_rrs2:$addr),
+ (tSXTH (tLDRHr t_addrmode_rrs2:$addr))>,
+ Requires<[IsThumb, IsThumb1Only, HasV6]>;
+
+def : T1Pat<(sextloadi8 t_addrmode_rrs1:$addr),
+ (tASRri (tLSLri (tLDRBr t_addrmode_rrs1:$addr), 24), 24)>;
+def : T1Pat<(sextloadi8 t_addrmode_is1:$addr),
+ (tASRri (tLSLri (tLDRBi t_addrmode_is1:$addr), 24), 24)>;
+def : T1Pat<(sextloadi16 t_addrmode_rrs2:$addr),
+ (tASRri (tLSLri (tLDRHr t_addrmode_rrs2:$addr), 16), 16)>;
+def : T1Pat<(sextloadi16 t_addrmode_is2:$addr),
+ (tASRri (tLSLri (tLDRHi t_addrmode_is2:$addr), 16), 16)>;
// Large immediate handling.
@@ -1028,8 +1544,7 @@ def : T1Pat<(i32 imm0_255_comp:$src),
// scheduling.
let isReMaterializable = 1 in
def tLDRpci_pic : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr, pclabel:$cp),
- NoItinerary,
- "${:comment} ldr.n\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
+ NoItinerary,
[(set GPR:$dst, (ARMpic_add (load (ARMWrapper tconstpool:$addr)),
imm:$cp))]>,
- Requires<[IsThumb1Only]>;
+ Requires<[IsThumb, IsThumb1Only]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
index 6ba0a44..0e01be5 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -21,16 +21,12 @@ def it_mask : Operand<i32> {
let PrintMethod = "printThumbITMask";
}
-// Table branch address
-def tb_addrmode : Operand<i32> {
- let PrintMethod = "printTBAddrMode";
-}
-
// Shifted operands. No register controlled shifts for Thumb2.
// Note: We do not support rrx shifted operands yet.
def t2_so_reg : Operand<i32>, // reg imm
ComplexPattern<i32, 2, "SelectT2ShifterOperandReg",
[shl,srl,sra,rotr]> {
+ let EncoderMethod = "getT2SORegOpValue";
let PrintMethod = "printT2SOOperand";
let MIOperandInfo = (ops rGPR, i32imm);
}
@@ -47,11 +43,10 @@ def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
// t2_so_imm - Match a 32-bit immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits, or an 8-bit
-// immediate splatted into multiple bytes of the word. t2_so_imm values are
-// represented in the imm field in the same 12-bit form that they are encoded
-// into t2_so_imm instructions: the 8-bit immediate is the least significant
-// bits [bits 0-7], the 4-bit shift/splat amount is the next 4 bits [bits 8-11].
-def t2_so_imm : Operand<i32>, PatLeaf<(imm), [{ return Pred_t2_so_imm(N); }]>;
+// immediate splatted into multiple bytes of the word.
+def t2_so_imm : Operand<i32>, PatLeaf<(imm), [{ return Pred_t2_so_imm(N); }]> {
+ let EncoderMethod = "getT2SOImmOpValue";
+}
// t2_so_imm_not - Match an immediate that is a complement
// of a t2_so_imm.
@@ -63,7 +58,7 @@ def t2_so_imm_not : Operand<i32>,
// t2_so_imm_neg - Match an immediate that is a negation of a t2_so_imm.
def t2_so_imm_neg : Operand<i32>,
PatLeaf<(imm), [{
- return ARM_AM::getT2SOImmVal(-((int)N->getZExtValue())) != -1;
+ return ARM_AM::getT2SOImmVal(-((uint32_t)N->getZExtValue())) != -1;
}], t2_so_imm_neg_XFORM>;
// Break t2_so_imm's up into two pieces. This handles immediates with up to 16
@@ -128,27 +123,41 @@ def imm0_255_not : PatLeaf<(i32 imm), [{
// t2addrmode_imm12 := reg + imm12
def t2addrmode_imm12 : Operand<i32>,
ComplexPattern<i32, 2, "SelectT2AddrModeImm12", []> {
- let PrintMethod = "printT2AddrModeImm12Operand";
+ let PrintMethod = "printAddrModeImm12Operand";
+ let EncoderMethod = "getAddrModeImm12OpValue";
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
+ let ParserMatchClass = MemMode5AsmOperand;
}
+// ADR instruction labels.
+def t2adrlabel : Operand<i32> {
+ let EncoderMethod = "getT2AdrLabelOpValue";
+}
+
+
// t2addrmode_imm8 := reg +/- imm8
def t2addrmode_imm8 : Operand<i32>,
ComplexPattern<i32, 2, "SelectT2AddrModeImm8", []> {
let PrintMethod = "printT2AddrModeImm8Operand";
+ let EncoderMethod = "getT2AddrModeImm8OpValue";
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
+ let ParserMatchClass = MemMode5AsmOperand;
}
def t2am_imm8_offset : Operand<i32>,
- ComplexPattern<i32, 1, "SelectT2AddrModeImm8Offset", []>{
+ ComplexPattern<i32, 1, "SelectT2AddrModeImm8Offset",
+ [], [SDNPWantRoot]> {
let PrintMethod = "printT2AddrModeImm8OffsetOperand";
+ let EncoderMethod = "getT2AddrModeImm8OffsetOpValue";
+ let ParserMatchClass = MemMode5AsmOperand;
}
// t2addrmode_imm8s4 := reg +/- (imm8 << 2)
-def t2addrmode_imm8s4 : Operand<i32>,
- ComplexPattern<i32, 2, "SelectT2AddrModeImm8s4", []> {
+def t2addrmode_imm8s4 : Operand<i32> {
let PrintMethod = "printT2AddrModeImm8s4Operand";
+ let EncoderMethod = "getT2AddrModeImm8s4OpValue";
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
+ let ParserMatchClass = MemMode5AsmOperand;
}
def t2am_imm8s4_offset : Operand<i32> {
@@ -159,7 +168,9 @@ def t2am_imm8s4_offset : Operand<i32> {
def t2addrmode_so_reg : Operand<i32>,
ComplexPattern<i32, 3, "SelectT2AddrModeSoReg", []> {
let PrintMethod = "printT2AddrModeSoRegOperand";
+ let EncoderMethod = "getT2AddrModeSORegOpValue";
let MIOperandInfo = (ops GPR:$base, rGPR:$offsreg, i32imm:$offsimm);
+ let ParserMatchClass = MemMode5AsmOperand;
}
@@ -167,45 +178,294 @@ def t2addrmode_so_reg : Operand<i32>,
// Multiclass helpers...
//
+
+class T2OneRegImm<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<12> imm;
+
+ let Inst{11-8} = Rd;
+ let Inst{26} = imm{11};
+ let Inst{14-12} = imm{10-8};
+ let Inst{7-0} = imm{7-0};
+}
+
+
+class T2sOneRegImm<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2sI<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
+
+ let Inst{11-8} = Rd;
+ let Inst{26} = imm{11};
+ let Inst{14-12} = imm{10-8};
+ let Inst{7-0} = imm{7-0};
+}
+
+class T2OneRegCmpImm<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rn;
+ bits<12> imm;
+
+ let Inst{19-16} = Rn;
+ let Inst{26} = imm{11};
+ let Inst{14-12} = imm{10-8};
+ let Inst{7-0} = imm{7-0};
+}
+
+
+class T2OneRegShiftedReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<12> ShiftedRm;
+
+ let Inst{11-8} = Rd;
+ let Inst{3-0} = ShiftedRm{3-0};
+ let Inst{5-4} = ShiftedRm{6-5};
+ let Inst{14-12} = ShiftedRm{11-9};
+ let Inst{7-6} = ShiftedRm{8-7};
+}
+
+class T2sOneRegShiftedReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2sI<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<12> ShiftedRm;
+
+ let Inst{11-8} = Rd;
+ let Inst{3-0} = ShiftedRm{3-0};
+ let Inst{5-4} = ShiftedRm{6-5};
+ let Inst{14-12} = ShiftedRm{11-9};
+ let Inst{7-6} = ShiftedRm{8-7};
+}
+
+class T2OneRegCmpShiftedReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rn;
+ bits<12> ShiftedRm;
+
+ let Inst{19-16} = Rn;
+ let Inst{3-0} = ShiftedRm{3-0};
+ let Inst{5-4} = ShiftedRm{6-5};
+ let Inst{14-12} = ShiftedRm{11-9};
+ let Inst{7-6} = ShiftedRm{8-7};
+}
+
+class T2TwoReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rm;
+
+ let Inst{11-8} = Rd;
+ let Inst{3-0} = Rm;
+}
+
+class T2sTwoReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2sI<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rm;
+
+ let Inst{11-8} = Rd;
+ let Inst{3-0} = Rm;
+}
+
+class T2TwoRegCmp<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rn;
+ bits<4> Rm;
+
+ let Inst{19-16} = Rn;
+ let Inst{3-0} = Rm;
+}
+
+
+class T2TwoRegImm<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{26} = imm{11};
+ let Inst{14-12} = imm{10-8};
+ let Inst{7-0} = imm{7-0};
+}
+
+class T2sTwoRegImm<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2sI<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{26} = imm{11};
+ let Inst{14-12} = imm{10-8};
+ let Inst{7-0} = imm{7-0};
+}
+
+class T2TwoRegShiftImm<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rm;
+ bits<5> imm;
+
+ let Inst{11-8} = Rd;
+ let Inst{3-0} = Rm;
+ let Inst{14-12} = imm{4-2};
+ let Inst{7-6} = imm{1-0};
+}
+
+class T2sTwoRegShiftImm<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2sI<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rm;
+ bits<5> imm;
+
+ let Inst{11-8} = Rd;
+ let Inst{3-0} = Rm;
+ let Inst{14-12} = imm{4-2};
+ let Inst{7-6} = imm{1-0};
+}
+
+class T2ThreeReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{3-0} = Rm;
+}
+
+class T2sThreeReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2sI<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{3-0} = Rm;
+}
+
+class T2TwoRegShiftedReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> ShiftedRm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{3-0} = ShiftedRm{3-0};
+ let Inst{5-4} = ShiftedRm{6-5};
+ let Inst{14-12} = ShiftedRm{11-9};
+ let Inst{7-6} = ShiftedRm{8-7};
+}
+
+class T2sTwoRegShiftedReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2sI<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> ShiftedRm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{3-0} = ShiftedRm{3-0};
+ let Inst{5-4} = ShiftedRm{6-5};
+ let Inst{14-12} = ShiftedRm{11-9};
+ let Inst{7-6} = ShiftedRm{8-7};
+}
+
+class T2FourReg<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+ bits<4> Ra;
+
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Ra;
+ let Inst{11-8} = Rd;
+ let Inst{3-0} = Rm;
+}
+
+class T2MulLong<bits<3> opc22_20, bits<4> opc7_4,
+ dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> RdLo;
+ bits<4> RdHi;
+ bits<4> Rn;
+ bits<4> Rm;
+
+ let Inst{31-23} = 0b111110111;
+ let Inst{22-20} = opc22_20;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = RdLo;
+ let Inst{11-8} = RdHi;
+ let Inst{7-4} = opc7_4;
+ let Inst{3-0} = Rm;
+}
+
+
/// T2I_un_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns for a
/// unary operation that produces a value. These are predicable and can be
/// changed to modify CPSR.
-multiclass T2I_un_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Cheap = 0, bit ReMat = 0> {
+multiclass T2I_un_irs<bits<4> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
+ PatFrag opnode, bit Cheap = 0, bit ReMat = 0> {
// shifted imm
- def i : T2sI<(outs rGPR:$dst), (ins t2_so_imm:$src), IIC_iMOVi,
- opc, "\t$dst, $src",
- [(set rGPR:$dst, (opnode t2_so_imm:$src))]> {
+ def i : T2sOneRegImm<(outs rGPR:$Rd), (ins t2_so_imm:$imm), iii,
+ opc, "\t$Rd, $imm",
+ [(set rGPR:$Rd, (opnode t2_so_imm:$imm))]> {
let isAsCheapAsAMove = Cheap;
let isReMaterializable = ReMat;
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
let Inst{19-16} = 0b1111; // Rn
let Inst{15} = 0;
}
// register
- def r : T2sI<(outs rGPR:$dst), (ins rGPR:$src), IIC_iMOVr,
- opc, ".w\t$dst, $src",
- [(set rGPR:$dst, (opnode rGPR:$src))]> {
+ def r : T2sTwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm), iir,
+ opc, ".w\t$Rd, $Rm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rm))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
let Inst{19-16} = 0b1111; // Rn
let Inst{14-12} = 0b000; // imm3
let Inst{7-6} = 0b00; // imm2
let Inst{5-4} = 0b00; // type
}
// shifted register
- def s : T2sI<(outs rGPR:$dst), (ins t2_so_reg:$src), IIC_iMOVsi,
- opc, ".w\t$dst, $src",
- [(set rGPR:$dst, (opnode t2_so_reg:$src))]> {
+ def s : T2sOneRegShiftedReg<(outs rGPR:$Rd), (ins t2_so_reg:$ShiftedRm), iis,
+ opc, ".w\t$Rd, $ShiftedRm",
+ [(set rGPR:$Rd, (opnode t2_so_reg:$ShiftedRm))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
let Inst{19-16} = 0b1111; // Rn
}
}
@@ -213,94 +473,97 @@ multiclass T2I_un_irs<bits<4> opcod, string opc, PatFrag opnode,
/// T2I_bin_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns for a
/// binary operation that produces a value. These are predicable and can be
/// changed to modify CPSR.
-multiclass T2I_bin_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0, string wide = ""> {
+multiclass T2I_bin_irs<bits<4> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
+ PatFrag opnode, bit Commutable = 0, string wide = ""> {
// shifted imm
- def ri : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- opc, "\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode rGPR:$lhs, t2_so_imm:$rhs))]> {
+ def ri : T2sTwoRegImm<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), iii,
+ opc, "\t$Rd, $Rn, $imm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_imm:$imm))]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
let Inst{15} = 0;
}
// register
- def rr : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, rGPR:$rhs), IIC_iALUr,
- opc, !strconcat(wide, "\t$dst, $lhs, $rhs"),
- [(set rGPR:$dst, (opnode rGPR:$lhs, rGPR:$rhs))]> {
+ def rr : T2sThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), iir,
+ opc, !strconcat(wide, "\t$Rd, $Rn, $Rm"),
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, rGPR:$Rm))]> {
let isCommutable = Commutable;
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
let Inst{14-12} = 0b000; // imm3
let Inst{7-6} = 0b00; // imm2
let Inst{5-4} = 0b00; // type
}
// shifted register
- def rs : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- opc, !strconcat(wide, "\t$dst, $lhs, $rhs"),
- [(set rGPR:$dst, (opnode rGPR:$lhs, t2_so_reg:$rhs))]> {
+ def rs : T2sTwoRegShiftedReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_reg:$ShiftedRm), iis,
+ opc, !strconcat(wide, "\t$Rd, $Rn, $ShiftedRm"),
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_reg:$ShiftedRm))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
}
}
/// T2I_bin_w_irs - Same as T2I_bin_irs except these operations need
// the ".w" prefix to indicate that they are wide.
-multiclass T2I_bin_w_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> :
- T2I_bin_irs<opcod, opc, opnode, Commutable, ".w">;
+multiclass T2I_bin_w_irs<bits<4> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
+ PatFrag opnode, bit Commutable = 0> :
+ T2I_bin_irs<opcod, opc, iii, iir, iis, opnode, Commutable, ".w">;
/// T2I_rbin_is - Same as T2I_bin_irs except the order of operands are
/// reversed. The 'rr' form is only defined for the disassembler; for codegen
/// it is equivalent to the T2I_bin_irs counterpart.
multiclass T2I_rbin_irs<bits<4> opcod, string opc, PatFrag opnode> {
// shifted imm
- def ri : T2sI<(outs rGPR:$dst), (ins rGPR:$rhs, t2_so_imm:$lhs), IIC_iALUi,
- opc, ".w\t$dst, $rhs, $lhs",
- [(set rGPR:$dst, (opnode t2_so_imm:$lhs, rGPR:$rhs))]> {
+ def ri : T2sTwoRegImm<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), IIC_iALUi,
+ opc, ".w\t$Rd, $Rn, $imm",
+ [(set rGPR:$Rd, (opnode t2_so_imm:$imm, rGPR:$Rn))]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
let Inst{15} = 0;
}
// register
- def rr : T2sI<(outs rGPR:$dst), (ins rGPR:$rhs, rGPR:$lhs), IIC_iALUr,
- opc, "\t$dst, $rhs, $lhs",
+ def rr : T2sThreeReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUr,
+ opc, "\t$Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
let Inst{14-12} = 0b000; // imm3
let Inst{7-6} = 0b00; // imm2
let Inst{5-4} = 0b00; // type
}
// shifted register
- def rs : T2sI<(outs rGPR:$dst), (ins rGPR:$rhs, t2_so_reg:$lhs), IIC_iALUsi,
- opc, "\t$dst, $rhs, $lhs",
- [(set rGPR:$dst, (opnode t2_so_reg:$lhs, rGPR:$rhs))]> {
+ def rs : T2sTwoRegShiftedReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_reg:$ShiftedRm),
+ IIC_iALUsir, opc, "\t$Rd, $Rn, $ShiftedRm",
+ [(set rGPR:$Rd, (opnode t2_so_reg:$ShiftedRm, rGPR:$Rn))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
}
}
/// T2I_bin_s_irs - Similar to T2I_bin_irs except it sets the 's' bit so the
/// instruction modifies the CPSR register.
-let Defs = [CPSR] in {
-multiclass T2I_bin_s_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
+let isCodeGenOnly = 1, Defs = [CPSR] in {
+multiclass T2I_bin_s_irs<bits<4> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
+ PatFrag opnode, bit Commutable = 0> {
// shifted imm
- def ri : T2I<(outs rGPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- !strconcat(opc, "s"), ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]> {
+ def ri : T2TwoRegImm<
+ (outs rGPR:$Rd), (ins GPR:$Rn, t2_so_imm:$imm), iii,
+ !strconcat(opc, "s"), ".w\t$Rd, $Rn, $imm",
+ [(set rGPR:$Rd, (opnode GPR:$Rn, t2_so_imm:$imm))]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
@@ -308,9 +571,10 @@ multiclass T2I_bin_s_irs<bits<4> opcod, string opc, PatFrag opnode,
let Inst{15} = 0;
}
// register
- def rr : T2I<(outs rGPR:$dst), (ins GPR:$lhs, rGPR:$rhs), IIC_iALUr,
- !strconcat(opc, "s"), ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode GPR:$lhs, rGPR:$rhs))]> {
+ def rr : T2ThreeReg<
+ (outs rGPR:$Rd), (ins GPR:$Rn, rGPR:$Rm), iir,
+ !strconcat(opc, "s"), ".w\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode GPR:$Rn, rGPR:$Rm))]> {
let isCommutable = Commutable;
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
@@ -321,9 +585,10 @@ multiclass T2I_bin_s_irs<bits<4> opcod, string opc, PatFrag opnode,
let Inst{5-4} = 0b00; // type
}
// shifted register
- def rs : T2I<(outs rGPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- !strconcat(opc, "s"), ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]> {
+ def rs : T2TwoRegShiftedReg<
+ (outs rGPR:$Rd), (ins GPR:$Rn, t2_so_reg:$ShiftedRm), iis,
+ !strconcat(opc, "s"), ".w\t$Rd, $Rn, $ShiftedRm",
+ [(set rGPR:$Rd, (opnode GPR:$Rn, t2_so_reg:$ShiftedRm))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -340,51 +605,58 @@ multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
- def ri : T2sI<(outs rGPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]> {
+ def ri : T2sTwoRegImm<
+ (outs rGPR:$Rd), (ins GPR:$Rn, t2_so_imm:$imm), IIC_iALUi,
+ opc, ".w\t$Rd, $Rn, $imm",
+ [(set rGPR:$Rd, (opnode GPR:$Rn, t2_so_imm:$imm))]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24} = 1;
let Inst{23-21} = op23_21;
- let Inst{20} = 0; // The S bit.
let Inst{15} = 0;
}
}
// 12-bit imm
- def ri12 : T2I<(outs rGPR:$dst), (ins GPR:$lhs, imm0_4095:$rhs), IIC_iALUi,
- !strconcat(opc, "w"), "\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode GPR:$lhs, imm0_4095:$rhs))]> {
+ def ri12 : T2I<
+ (outs rGPR:$Rd), (ins GPR:$Rn, imm0_4095:$imm), IIC_iALUi,
+ !strconcat(opc, "w"), "\t$Rd, $Rn, $imm",
+ [(set rGPR:$Rd, (opnode GPR:$Rn, imm0_4095:$imm))]> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<12> imm;
let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24} = 0;
+ let Inst{26} = imm{11};
+ let Inst{25-24} = 0b10;
let Inst{23-21} = op23_21;
let Inst{20} = 0; // The S bit.
+ let Inst{19-16} = Rn;
let Inst{15} = 0;
+ let Inst{14-12} = imm{10-8};
+ let Inst{11-8} = Rd;
+ let Inst{7-0} = imm{7-0};
}
// register
- def rr : T2sI<(outs rGPR:$dst), (ins GPR:$lhs, rGPR:$rhs), IIC_iALUr,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode GPR:$lhs, rGPR:$rhs))]> {
+ def rr : T2sThreeReg<(outs rGPR:$Rd), (ins GPR:$Rn, rGPR:$Rm), IIC_iALUr,
+ opc, ".w\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode GPR:$Rn, rGPR:$Rm))]> {
let isCommutable = Commutable;
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24} = 1;
let Inst{23-21} = op23_21;
- let Inst{20} = 0; // The S bit.
let Inst{14-12} = 0b000; // imm3
let Inst{7-6} = 0b00; // imm2
let Inst{5-4} = 0b00; // type
}
// shifted register
- def rs : T2sI<(outs rGPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]> {
+ def rs : T2sTwoRegShiftedReg<
+ (outs rGPR:$Rd), (ins GPR:$Rn, t2_so_reg:$ShiftedRm),
+ IIC_iALUsi, opc, ".w\t$Rd, $Rn, $ShiftedRm",
+ [(set rGPR:$Rd, (opnode GPR:$Rn, t2_so_reg:$ShiftedRm))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24} = 1;
let Inst{23-21} = op23_21;
- let Inst{20} = 0; // The S bit.
}
}
@@ -395,50 +667,49 @@ let Uses = [CPSR] in {
multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
bit Commutable = 0> {
// shifted imm
- def ri : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- opc, "\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode rGPR:$lhs, t2_so_imm:$rhs))]>,
+ def ri : T2sTwoRegImm<(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm),
+ IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_imm:$imm))]>,
Requires<[IsThumb2]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
- let Inst{20} = 0; // The S bit.
let Inst{15} = 0;
}
// register
- def rr : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, rGPR:$rhs), IIC_iALUr,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode rGPR:$lhs, rGPR:$rhs))]>,
+ def rr : T2sThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUr,
+ opc, ".w\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, rGPR:$Rm))]>,
Requires<[IsThumb2]> {
let isCommutable = Commutable;
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
- let Inst{20} = 0; // The S bit.
let Inst{14-12} = 0b000; // imm3
let Inst{7-6} = 0b00; // imm2
let Inst{5-4} = 0b00; // type
}
// shifted register
- def rs : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode rGPR:$lhs, t2_so_reg:$rhs))]>,
+ def rs : T2sTwoRegShiftedReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_reg:$ShiftedRm),
+ IIC_iALUsi, opc, ".w\t$Rd, $Rn, $ShiftedRm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_reg:$ShiftedRm))]>,
Requires<[IsThumb2]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
- let Inst{20} = 0; // The S bit.
}
}
// Carry setting variants
-let Defs = [CPSR] in {
+let isCodeGenOnly = 1, Defs = [CPSR] in {
multiclass T2I_adde_sube_s_irs<bits<4> opcod, string opc, PatFrag opnode,
bit Commutable = 0> {
// shifted imm
- def ri : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- opc, "\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode rGPR:$lhs, t2_so_imm:$rhs))]>,
+ def ri : T2sTwoRegImm<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), IIC_iALUi,
+ opc, "\t$Rd, $Rn, $imm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_imm:$imm))]>,
Requires<[IsThumb2]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
@@ -447,9 +718,9 @@ multiclass T2I_adde_sube_s_irs<bits<4> opcod, string opc, PatFrag opnode,
let Inst{15} = 0;
}
// register
- def rr : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, rGPR:$rhs), IIC_iALUr,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode rGPR:$lhs, rGPR:$rhs))]>,
+ def rr : T2sThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUr,
+ opc, ".w\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, rGPR:$Rm))]>,
Requires<[IsThumb2]> {
let isCommutable = Commutable;
let Inst{31-27} = 0b11101;
@@ -461,9 +732,10 @@ multiclass T2I_adde_sube_s_irs<bits<4> opcod, string opc, PatFrag opnode,
let Inst{5-4} = 0b00; // type
}
// shifted register
- def rs : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode rGPR:$lhs, t2_so_reg:$rhs))]>,
+ def rs : T2sTwoRegShiftedReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_reg:$ShiftedRm),
+ IIC_iALUsi, opc, ".w\t$Rd, $Rn, $ShiftedRm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, t2_so_reg:$ShiftedRm))]>,
Requires<[IsThumb2]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
@@ -476,12 +748,13 @@ multiclass T2I_adde_sube_s_irs<bits<4> opcod, string opc, PatFrag opnode,
/// T2I_rbin_s_is - Same as T2I_rbin_irs except sets 's' bit and the register
/// version is not needed since this is only for codegen.
-let Defs = [CPSR] in {
+let isCodeGenOnly = 1, Defs = [CPSR] in {
multiclass T2I_rbin_s_is<bits<4> opcod, string opc, PatFrag opnode> {
// shifted imm
- def ri : T2I<(outs rGPR:$dst), (ins rGPR:$rhs, t2_so_imm:$lhs), IIC_iALUi,
- !strconcat(opc, "s"), ".w\t$dst, $rhs, $lhs",
- [(set rGPR:$dst, (opnode t2_so_imm:$lhs, rGPR:$rhs))]> {
+ def ri : T2TwoRegImm<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), IIC_iALUi,
+ !strconcat(opc, "s"), ".w\t$Rd, $Rn, $imm",
+ [(set rGPR:$Rd, (opnode t2_so_imm:$imm, rGPR:$Rn))]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
@@ -489,9 +762,10 @@ multiclass T2I_rbin_s_is<bits<4> opcod, string opc, PatFrag opnode> {
let Inst{15} = 0;
}
// shifted register
- def rs : T2I<(outs rGPR:$dst), (ins rGPR:$rhs, t2_so_reg:$lhs), IIC_iALUsi,
- !strconcat(opc, "s"), "\t$dst, $rhs, $lhs",
- [(set rGPR:$dst, (opnode t2_so_reg:$lhs, rGPR:$rhs))]> {
+ def rs : T2TwoRegShiftedReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_reg:$ShiftedRm),
+ IIC_iALUsi, !strconcat(opc, "s"), "\t$Rd, $Rn, $ShiftedRm",
+ [(set rGPR:$Rd, (opnode t2_so_reg:$ShiftedRm, rGPR:$Rn))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -504,18 +778,20 @@ multiclass T2I_rbin_s_is<bits<4> opcod, string opc, PatFrag opnode> {
// rotate operation that produces a value.
multiclass T2I_sh_ir<bits<2> opcod, string opc, PatFrag opnode> {
// 5-bit imm
- def ri : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, i32imm:$rhs), IIC_iMOVsi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode rGPR:$lhs, imm1_31:$rhs))]> {
+ def ri : T2sTwoRegShiftImm<
+ (outs rGPR:$Rd), (ins rGPR:$Rm, i32imm:$imm), IIC_iMOVsi,
+ opc, ".w\t$Rd, $Rm, $imm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rm, imm1_31:$imm))]> {
let Inst{31-27} = 0b11101;
let Inst{26-21} = 0b010010;
let Inst{19-16} = 0b1111; // Rn
let Inst{5-4} = opcod;
}
// register
- def rr : T2sI<(outs rGPR:$dst), (ins rGPR:$lhs, rGPR:$rhs), IIC_iMOVsr,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set rGPR:$dst, (opnode rGPR:$lhs, rGPR:$rhs))]> {
+ def rr : T2sThreeReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMOVsr,
+ opc, ".w\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, rGPR:$Rm))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-21} = opcod;
@@ -528,11 +804,14 @@ multiclass T2I_sh_ir<bits<2> opcod, string opc, PatFrag opnode> {
/// patterns. Similar to T2I_bin_irs except the instruction does not produce
/// a explicit result, only implicitly set CPSR.
let isCompare = 1, Defs = [CPSR] in {
-multiclass T2I_cmp_irs<bits<4> opcod, string opc, PatFrag opnode> {
+multiclass T2I_cmp_irs<bits<4> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
+ PatFrag opnode> {
// shifted imm
- def ri : T2I<(outs), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iCMPi,
- opc, ".w\t$lhs, $rhs",
- [(opnode GPR:$lhs, t2_so_imm:$rhs)]> {
+ def ri : T2OneRegCmpImm<
+ (outs), (ins GPR:$Rn, t2_so_imm:$imm), iii,
+ opc, ".w\t$Rn, $imm",
+ [(opnode GPR:$Rn, t2_so_imm:$imm)]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = opcod;
@@ -541,7 +820,8 @@ multiclass T2I_cmp_irs<bits<4> opcod, string opc, PatFrag opnode> {
let Inst{11-8} = 0b1111; // Rd
}
// register
- def rr : T2I<(outs), (ins GPR:$lhs, rGPR:$rhs), IIC_iCMPr,
+ def rr : T2TwoRegCmp<
+ (outs), (ins GPR:$lhs, rGPR:$rhs), iir,
opc, ".w\t$lhs, $rhs",
[(opnode GPR:$lhs, rGPR:$rhs)]> {
let Inst{31-27} = 0b11101;
@@ -554,9 +834,10 @@ multiclass T2I_cmp_irs<bits<4> opcod, string opc, PatFrag opnode> {
let Inst{5-4} = 0b00; // type
}
// shifted register
- def rs : T2I<(outs), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iCMPsi,
- opc, ".w\t$lhs, $rhs",
- [(opnode GPR:$lhs, t2_so_reg:$rhs)]> {
+ def rs : T2OneRegCmpShiftedReg<
+ (outs), (ins GPR:$Rn, t2_so_reg:$ShiftedRm), iis,
+ opc, ".w\t$Rn, $ShiftedRm",
+ [(opnode GPR:$Rn, t2_so_reg:$ShiftedRm)]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = opcod;
@@ -567,20 +848,29 @@ multiclass T2I_cmp_irs<bits<4> opcod, string opc, PatFrag opnode> {
}
/// T2I_ld - Defines a set of (op r, {imm12|imm8|so_reg}) load patterns.
-multiclass T2I_ld<bit signed, bits<2> opcod, string opc, PatFrag opnode> {
- def i12 : T2Ii12<(outs GPR:$dst), (ins t2addrmode_imm12:$addr), IIC_iLoadi,
- opc, ".w\t$dst, $addr",
- [(set GPR:$dst, (opnode t2addrmode_imm12:$addr))]> {
+multiclass T2I_ld<bit signed, bits<2> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iis, PatFrag opnode> {
+ def i12 : T2Ii12<(outs GPR:$Rt), (ins t2addrmode_imm12:$addr), iii,
+ opc, ".w\t$Rt, $addr",
+ [(set GPR:$Rt, (opnode t2addrmode_imm12:$addr))]> {
let Inst{31-27} = 0b11111;
let Inst{26-25} = 0b00;
let Inst{24} = signed;
let Inst{23} = 1;
let Inst{22-21} = opcod;
let Inst{20} = 1; // load
+
+ bits<4> Rt;
+ let Inst{15-12} = Rt;
+
+ bits<17> addr;
+ let Inst{19-16} = addr{16-13}; // Rn
+ let Inst{23} = addr{12}; // U
+ let Inst{11-0} = addr{11-0}; // imm
}
- def i8 : T2Ii8 <(outs GPR:$dst), (ins t2addrmode_imm8:$addr), IIC_iLoadi,
- opc, "\t$dst, $addr",
- [(set GPR:$dst, (opnode t2addrmode_imm8:$addr))]> {
+ def i8 : T2Ii8 <(outs GPR:$Rt), (ins t2addrmode_imm8:$addr), iii,
+ opc, "\t$Rt, $addr",
+ [(set GPR:$Rt, (opnode t2addrmode_imm8:$addr))]> {
let Inst{31-27} = 0b11111;
let Inst{26-25} = 0b00;
let Inst{24} = signed;
@@ -591,10 +881,18 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc, PatFrag opnode> {
// Offset: index==TRUE, wback==FALSE
let Inst{10} = 1; // The P bit.
let Inst{8} = 0; // The W bit.
+
+ bits<4> Rt;
+ let Inst{15-12} = Rt;
+
+ bits<13> addr;
+ let Inst{19-16} = addr{12-9}; // Rn
+ let Inst{9} = addr{8}; // U
+ let Inst{7-0} = addr{7-0}; // imm
}
- def s : T2Iso <(outs GPR:$dst), (ins t2addrmode_so_reg:$addr), IIC_iLoadr,
- opc, ".w\t$dst, $addr",
- [(set GPR:$dst, (opnode t2addrmode_so_reg:$addr))]> {
+ def s : T2Iso <(outs GPR:$Rt), (ins t2addrmode_so_reg:$addr), iis,
+ opc, ".w\t$Rt, $addr",
+ [(set GPR:$Rt, (opnode t2addrmode_so_reg:$addr))]> {
let Inst{31-27} = 0b11111;
let Inst{26-25} = 0b00;
let Inst{24} = signed;
@@ -602,10 +900,20 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc, PatFrag opnode> {
let Inst{22-21} = opcod;
let Inst{20} = 1; // load
let Inst{11-6} = 0b000000;
+
+ bits<4> Rt;
+ let Inst{15-12} = Rt;
+
+ bits<10> addr;
+ let Inst{19-16} = addr{9-6}; // Rn
+ let Inst{3-0} = addr{5-2}; // Rm
+ let Inst{5-4} = addr{1-0}; // imm
}
- def pci : T2Ipc <(outs GPR:$dst), (ins i32imm:$addr), IIC_iLoadi,
- opc, ".w\t$dst, $addr",
- [(set GPR:$dst, (opnode (ARMWrapper tconstpool:$addr)))]> {
+
+ // FIXME: Is the pci variant actually needed?
+ def pci : T2Ipc <(outs GPR:$Rt), (ins i32imm:$addr), iii,
+ opc, ".w\t$Rt, $addr",
+ [(set GPR:$Rt, (opnode (ARMWrapper tconstpool:$addr)))]> {
let isReMaterializable = 1;
let Inst{31-27} = 0b11111;
let Inst{26-25} = 0b00;
@@ -614,22 +922,35 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc, PatFrag opnode> {
let Inst{22-21} = opcod;
let Inst{20} = 1; // load
let Inst{19-16} = 0b1111; // Rn
+ bits<4> Rt;
+ bits<12> addr;
+ let Inst{15-12} = Rt{3-0};
+ let Inst{11-0} = addr{11-0};
}
}
/// T2I_st - Defines a set of (op r, {imm12|imm8|so_reg}) store patterns.
-multiclass T2I_st<bits<2> opcod, string opc, PatFrag opnode> {
- def i12 : T2Ii12<(outs), (ins GPR:$src, t2addrmode_imm12:$addr), IIC_iStorei,
- opc, ".w\t$src, $addr",
- [(opnode GPR:$src, t2addrmode_imm12:$addr)]> {
+multiclass T2I_st<bits<2> opcod, string opc,
+ InstrItinClass iii, InstrItinClass iis, PatFrag opnode> {
+ def i12 : T2Ii12<(outs), (ins GPR:$Rt, t2addrmode_imm12:$addr), iii,
+ opc, ".w\t$Rt, $addr",
+ [(opnode GPR:$Rt, t2addrmode_imm12:$addr)]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0001;
let Inst{22-21} = opcod;
let Inst{20} = 0; // !load
+
+ bits<4> Rt;
+ let Inst{15-12} = Rt;
+
+ bits<17> addr;
+ let Inst{19-16} = addr{16-13}; // Rn
+ let Inst{23} = addr{12}; // U
+ let Inst{11-0} = addr{11-0}; // imm
}
- def i8 : T2Ii8 <(outs), (ins GPR:$src, t2addrmode_imm8:$addr), IIC_iStorei,
- opc, "\t$src, $addr",
- [(opnode GPR:$src, t2addrmode_imm8:$addr)]> {
+ def i8 : T2Ii8 <(outs), (ins GPR:$Rt, t2addrmode_imm8:$addr), iii,
+ opc, "\t$Rt, $addr",
+ [(opnode GPR:$Rt, t2addrmode_imm8:$addr)]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0000;
let Inst{22-21} = opcod;
@@ -638,24 +959,40 @@ multiclass T2I_st<bits<2> opcod, string opc, PatFrag opnode> {
// Offset: index==TRUE, wback==FALSE
let Inst{10} = 1; // The P bit.
let Inst{8} = 0; // The W bit.
+
+ bits<4> Rt;
+ let Inst{15-12} = Rt;
+
+ bits<13> addr;
+ let Inst{19-16} = addr{12-9}; // Rn
+ let Inst{9} = addr{8}; // U
+ let Inst{7-0} = addr{7-0}; // imm
}
- def s : T2Iso <(outs), (ins GPR:$src, t2addrmode_so_reg:$addr), IIC_iStorer,
- opc, ".w\t$src, $addr",
- [(opnode GPR:$src, t2addrmode_so_reg:$addr)]> {
+ def s : T2Iso <(outs), (ins GPR:$Rt, t2addrmode_so_reg:$addr), iis,
+ opc, ".w\t$Rt, $addr",
+ [(opnode GPR:$Rt, t2addrmode_so_reg:$addr)]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0000;
let Inst{22-21} = opcod;
let Inst{20} = 0; // !load
let Inst{11-6} = 0b000000;
+
+ bits<4> Rt;
+ let Inst{15-12} = Rt;
+
+ bits<10> addr;
+ let Inst{19-16} = addr{9-6}; // Rn
+ let Inst{3-0} = addr{5-2}; // Rm
+ let Inst{5-4} = addr{1-0}; // imm
}
}
-/// T2I_unary_rrot - A unary operation with two forms: one whose operand is a
+/// T2I_ext_rrot - A unary operation with two forms: one whose operand is a
/// register and one whose operand is a register rotated by 8/16/24.
-multiclass T2I_unary_rrot<bits<3> opcod, string opc, PatFrag opnode> {
- def r : T2I<(outs rGPR:$dst), (ins rGPR:$src), IIC_iUNAr,
- opc, ".w\t$dst, $src",
- [(set rGPR:$dst, (opnode rGPR:$src))]> {
+multiclass T2I_ext_rrot<bits<3> opcod, string opc, PatFrag opnode> {
+ def r : T2TwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iEXTr,
+ opc, ".w\t$Rd, $Rm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rm))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
@@ -664,25 +1001,27 @@ multiclass T2I_unary_rrot<bits<3> opcod, string opc, PatFrag opnode> {
let Inst{7} = 1;
let Inst{5-4} = 0b00; // rotate
}
- def r_rot : T2I<(outs rGPR:$dst), (ins rGPR:$src, i32imm:$rot), IIC_iUNAsi,
- opc, ".w\t$dst, $src, ror $rot",
- [(set rGPR:$dst, (opnode (rotr rGPR:$src, rot_imm:$rot)))]> {
+ def r_rot : T2TwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm, rot_imm:$rot), IIC_iEXTr,
+ opc, ".w\t$Rd, $Rm, ror $rot",
+ [(set rGPR:$Rd, (opnode (rotr rGPR:$Rm, rot_imm:$rot)))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
let Inst{19-16} = 0b1111; // Rn
let Inst{15-12} = 0b1111;
let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
+
+ bits<2> rot;
+ let Inst{5-4} = rot{1-0}; // rotate
}
}
// UXTB16 - Requres T2ExtractPack, does not need the .w qualifier.
-multiclass T2I_unary_rrot_uxtb16<bits<3> opcod, string opc, PatFrag opnode> {
- def r : T2I<(outs rGPR:$dst), (ins rGPR:$src), IIC_iUNAr,
- opc, "\t$dst, $src",
- [(set rGPR:$dst, (opnode rGPR:$src))]>,
- Requires<[HasT2ExtractPack]> {
+multiclass T2I_ext_rrot_uxtb16<bits<3> opcod, string opc, PatFrag opnode> {
+ def r : T2TwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iEXTr,
+ opc, "\t$Rd, $Rm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rm))]>,
+ Requires<[HasT2ExtractPack, IsThumb2]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
@@ -691,25 +1030,27 @@ multiclass T2I_unary_rrot_uxtb16<bits<3> opcod, string opc, PatFrag opnode> {
let Inst{7} = 1;
let Inst{5-4} = 0b00; // rotate
}
- def r_rot : T2I<(outs rGPR:$dst), (ins rGPR:$src, i32imm:$rot), IIC_iUNAsi,
- opc, "\t$dst, $src, ror $rot",
- [(set rGPR:$dst, (opnode (rotr rGPR:$src, rot_imm:$rot)))]>,
- Requires<[HasT2ExtractPack]> {
+ def r_rot : T2TwoReg<(outs rGPR:$dst), (ins rGPR:$Rm, rot_imm:$rot),
+ IIC_iEXTr, opc, "\t$dst, $Rm, ror $rot",
+ [(set rGPR:$dst, (opnode (rotr rGPR:$Rm, rot_imm:$rot)))]>,
+ Requires<[HasT2ExtractPack, IsThumb2]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
let Inst{19-16} = 0b1111; // Rn
let Inst{15-12} = 0b1111;
let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
+
+ bits<2> rot;
+ let Inst{5-4} = rot{1-0}; // rotate
}
}
// SXTB16 - Requres T2ExtractPack, does not need the .w qualifier, no pattern
// supported yet.
-multiclass T2I_unary_rrot_sxtb16<bits<3> opcod, string opc> {
- def r : T2I<(outs rGPR:$dst), (ins rGPR:$src), IIC_iUNAr,
- opc, "\t$dst, $src", []> {
+multiclass T2I_ext_rrot_sxtb16<bits<3> opcod, string opc> {
+ def r : T2TwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iEXTr,
+ opc, "\t$Rd, $Rm", []> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
@@ -718,25 +1059,27 @@ multiclass T2I_unary_rrot_sxtb16<bits<3> opcod, string opc> {
let Inst{7} = 1;
let Inst{5-4} = 0b00; // rotate
}
- def r_rot : T2I<(outs rGPR:$dst), (ins rGPR:$src, i32imm:$rot), IIC_iUNAsi,
- opc, "\t$dst, $src, ror $rot", []> {
+ def r_rot : T2TwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm, i32imm:$rot), IIC_iEXTr,
+ opc, "\t$Rd, $Rm, ror $rot", []> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
let Inst{19-16} = 0b1111; // Rn
let Inst{15-12} = 0b1111;
let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
+
+ bits<2> rot;
+ let Inst{5-4} = rot{1-0}; // rotate
}
}
-/// T2I_bin_rrot - A binary operation with two forms: one whose operand is a
+/// T2I_exta_rrot - A binary operation with two forms: one whose operand is a
/// register and one whose operand is a register rotated by 8/16/24.
-multiclass T2I_bin_rrot<bits<3> opcod, string opc, PatFrag opnode> {
- def rr : T2I<(outs rGPR:$dst), (ins rGPR:$LHS, rGPR:$RHS), IIC_iALUr,
- opc, "\t$dst, $LHS, $RHS",
- [(set rGPR:$dst, (opnode rGPR:$LHS, rGPR:$RHS))]>,
- Requires<[HasT2ExtractPack]> {
+multiclass T2I_exta_rrot<bits<3> opcod, string opc, PatFrag opnode> {
+ def rr : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iEXTAr,
+ opc, "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn, rGPR:$Rm))]>,
+ Requires<[HasT2ExtractPack, IsThumb2]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
@@ -744,25 +1087,28 @@ multiclass T2I_bin_rrot<bits<3> opcod, string opc, PatFrag opnode> {
let Inst{7} = 1;
let Inst{5-4} = 0b00; // rotate
}
- def rr_rot : T2I<(outs rGPR:$dst), (ins rGPR:$LHS, rGPR:$RHS, i32imm:$rot),
- IIC_iALUsr, opc, "\t$dst, $LHS, $RHS, ror $rot",
- [(set rGPR:$dst, (opnode rGPR:$LHS,
- (rotr rGPR:$RHS, rot_imm:$rot)))]>,
- Requires<[HasT2ExtractPack]> {
+ def rr_rot : T2ThreeReg<(outs rGPR:$Rd),
+ (ins rGPR:$Rn, rGPR:$Rm, rot_imm:$rot),
+ IIC_iEXTAsr, opc, "\t$Rd, $Rn, $Rm, ror $rot",
+ [(set rGPR:$Rd, (opnode rGPR:$Rn,
+ (rotr rGPR:$Rm, rot_imm:$rot)))]>,
+ Requires<[HasT2ExtractPack, IsThumb2]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
let Inst{15-12} = 0b1111;
let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
+
+ bits<2> rot;
+ let Inst{5-4} = rot{1-0}; // rotate
}
}
// DO variant - disassembly only, no pattern
-multiclass T2I_bin_rrot_DO<bits<3> opcod, string opc> {
- def rr : T2I<(outs rGPR:$dst), (ins rGPR:$LHS, rGPR:$RHS), IIC_iALUr,
- opc, "\t$dst, $LHS, $RHS", []> {
+multiclass T2I_exta_rrot_DO<bits<3> opcod, string opc> {
+ def rr : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iEXTAr,
+ opc, "\t$Rd, $Rn, $Rm", []> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
@@ -770,14 +1116,16 @@ multiclass T2I_bin_rrot_DO<bits<3> opcod, string opc> {
let Inst{7} = 1;
let Inst{5-4} = 0b00; // rotate
}
- def rr_rot : T2I<(outs rGPR:$dst), (ins rGPR:$LHS, rGPR:$RHS, i32imm:$rot),
- IIC_iALUsr, opc, "\t$dst, $LHS, $RHS, ror $rot", []> {
+ def rr_rot : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, i32imm:$rot),
+ IIC_iEXTAsr, opc, "\t$Rd, $Rn, $Rm, ror $rot", []> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
let Inst{15-12} = 0b1111;
let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
+
+ bits<2> rot;
+ let Inst{5-4} = rot{1-0}; // rotate
}
}
@@ -789,24 +1137,23 @@ multiclass T2I_bin_rrot_DO<bits<3> opcod, string opc> {
// Miscellaneous Instructions.
//
+class T2PCOneRegImm<dag oops, dag iops, InstrItinClass itin,
+ string asm, list<dag> pattern>
+ : T2XI<oops, iops, itin, asm, pattern> {
+ bits<4> Rd;
+ bits<12> label;
+
+ let Inst{11-8} = Rd;
+ let Inst{26} = label{11};
+ let Inst{14-12} = label{10-8};
+ let Inst{7-0} = label{7-0};
+}
+
// LEApcrel - Load a pc-relative address into a register without offending the
// assembler.
-let neverHasSideEffects = 1 in {
-let isReMaterializable = 1 in
-def t2LEApcrel : T2XI<(outs rGPR:$dst), (ins i32imm:$label, pred:$p), IIC_iALUi,
- "adr${p}.w\t$dst, #$label", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25-24} = 0b10;
- // Inst{23:21} = '11' (add = FALSE) or '00' (add = TRUE)
- let Inst{22} = 0;
- let Inst{20} = 0;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15} = 0;
-}
-} // neverHasSideEffects
-def t2LEApcrelJT : T2XI<(outs rGPR:$dst),
- (ins i32imm:$label, nohash_imm:$id, pred:$p), IIC_iALUi,
- "adr${p}.w\t$dst, #${label}_${id}", []> {
+def t2ADR : T2PCOneRegImm<(outs rGPR:$Rd),
+ (ins t2adrlabel:$addr, pred:$p),
+ IIC_iALUi, "adr{$p}.w\t$Rd, #$addr", []> {
let Inst{31-27} = 0b11110;
let Inst{25-24} = 0b10;
// Inst{23:21} = '11' (add = FALSE) or '00' (add = TRUE)
@@ -814,76 +1161,88 @@ def t2LEApcrelJT : T2XI<(outs rGPR:$dst),
let Inst{20} = 0;
let Inst{19-16} = 0b1111; // Rn
let Inst{15} = 0;
-}
+ bits<4> Rd;
+ bits<13> addr;
+ let Inst{11-8} = Rd;
+ let Inst{23} = addr{12};
+ let Inst{21} = addr{12};
+ let Inst{26} = addr{11};
+ let Inst{14-12} = addr{10-8};
+ let Inst{7-0} = addr{7-0};
+}
+
+let neverHasSideEffects = 1, isReMaterializable = 1 in
+def t2LEApcrel : t2PseudoInst<(outs rGPR:$Rd), (ins i32imm:$label, pred:$p),
+ Size4Bytes, IIC_iALUi, []>;
+def t2LEApcrelJT : t2PseudoInst<(outs rGPR:$Rd),
+ (ins i32imm:$label, nohash_imm:$id, pred:$p),
+ Size4Bytes, IIC_iALUi,
+ []>;
+
+
+// FIXME: None of these add/sub SP special instructions should be necessary
+// at all for thumb2 since they use the same encodings as the generic
+// add/sub instructions. In thumb1 we need them since they have dedicated
+// encodings. At the least, they should be pseudo instructions.
// ADD r, sp, {so_imm|i12}
-def t2ADDrSPi : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_imm:$imm),
- IIC_iALUi, "add", ".w\t$dst, $sp, $imm", []> {
+let isCodeGenOnly = 1 in {
+def t2ADDrSPi : T2sTwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, t2_so_imm:$imm),
+ IIC_iALUi, "add", ".w\t$Rd, $Rn, $imm", []> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = 0b1000;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
let Inst{15} = 0;
}
-def t2ADDrSPi12 : T2I<(outs GPR:$dst), (ins GPR:$sp, imm0_4095:$imm),
- IIC_iALUi, "addw", "\t$dst, $sp, $imm", []> {
+def t2ADDrSPi12 : T2TwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, imm0_4095:$imm),
+ IIC_iALUi, "addw", "\t$Rd, $Rn, $imm", []> {
let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-21} = 0b0000;
- let Inst{20} = 0; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
+ let Inst{25-20} = 0b100000;
let Inst{15} = 0;
}
// ADD r, sp, so_reg
-def t2ADDrSPs : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_reg:$rhs),
- IIC_iALUsi, "add", ".w\t$dst, $sp, $rhs", []> {
+def t2ADDrSPs : T2sTwoRegShiftedReg<
+ (outs GPR:$Rd), (ins GPR:$Rn, t2_so_reg:$ShiftedRm),
+ IIC_iALUsi, "add", ".w\t$Rd, $Rn, $ShiftedRm", []> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b1000;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
let Inst{15} = 0;
}
// SUB r, sp, {so_imm|i12}
-def t2SUBrSPi : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_imm:$imm),
- IIC_iALUi, "sub", ".w\t$dst, $sp, $imm", []> {
+def t2SUBrSPi : T2sTwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, t2_so_imm:$imm),
+ IIC_iALUi, "sub", ".w\t$Rd, $Rn, $imm", []> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = 0b1101;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
let Inst{15} = 0;
}
-def t2SUBrSPi12 : T2I<(outs GPR:$dst), (ins GPR:$sp, imm0_4095:$imm),
- IIC_iALUi, "subw", "\t$dst, $sp, $imm", []> {
+def t2SUBrSPi12 : T2TwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, imm0_4095:$imm),
+ IIC_iALUi, "subw", "\t$Rd, $Rn, $imm", []> {
let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-21} = 0b0101;
- let Inst{20} = 0; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
+ let Inst{25-20} = 0b101010;
let Inst{15} = 0;
}
// SUB r, sp, so_reg
-def t2SUBrSPs : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_reg:$rhs),
+def t2SUBrSPs : T2sTwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, t2_so_reg:$imm),
IIC_iALUsi,
- "sub", "\t$dst, $sp, $rhs", []> {
+ "sub", "\t$Rd, $Rn, $imm", []> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b1101;
- let Inst{20} = ?; // The S bit.
let Inst{19-16} = 0b1101; // Rn = sp
let Inst{15} = 0;
}
+} // end isCodeGenOnly = 1
// Signed and unsigned division on v7-M
-def t2SDIV : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iALUi,
- "sdiv", "\t$dst, $a, $b",
- [(set rGPR:$dst, (sdiv rGPR:$a, rGPR:$b))]>,
- Requires<[HasDivide]> {
+def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi,
+ "sdiv", "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (sdiv rGPR:$Rn, rGPR:$Rm))]>,
+ Requires<[HasDivide, IsThumb2]> {
let Inst{31-27} = 0b11111;
let Inst{26-21} = 0b011100;
let Inst{20} = 0b1;
@@ -891,10 +1250,10 @@ def t2SDIV : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iALUi,
let Inst{7-4} = 0b1111;
}
-def t2UDIV : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iALUi,
- "udiv", "\t$dst, $a, $b",
- [(set rGPR:$dst, (udiv rGPR:$a, rGPR:$b))]>,
- Requires<[HasDivide]> {
+def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi,
+ "udiv", "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (udiv rGPR:$Rn, rGPR:$Rm))]>,
+ Requires<[HasDivide, IsThumb2]> {
let Inst{31-27} = 0b11111;
let Inst{26-21} = 0b011101;
let Inst{20} = 0b1;
@@ -908,26 +1267,26 @@ def t2UDIV : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iALUi,
// Load
let canFoldAsLoad = 1, isReMaterializable = 1 in
-defm t2LDR : T2I_ld<0, 0b10, "ldr", UnOpFrag<(load node:$Src)>>;
+defm t2LDR : T2I_ld<0, 0b10, "ldr", IIC_iLoad_i, IIC_iLoad_si,
+ UnOpFrag<(load node:$Src)>>;
// Loads with zero extension
-defm t2LDRH : T2I_ld<0, 0b01, "ldrh", UnOpFrag<(zextloadi16 node:$Src)>>;
-defm t2LDRB : T2I_ld<0, 0b00, "ldrb", UnOpFrag<(zextloadi8 node:$Src)>>;
+defm t2LDRH : T2I_ld<0, 0b01, "ldrh", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
+ UnOpFrag<(zextloadi16 node:$Src)>>;
+defm t2LDRB : T2I_ld<0, 0b00, "ldrb", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
+ UnOpFrag<(zextloadi8 node:$Src)>>;
// Loads with sign extension
-defm t2LDRSH : T2I_ld<1, 0b01, "ldrsh", UnOpFrag<(sextloadi16 node:$Src)>>;
-defm t2LDRSB : T2I_ld<1, 0b00, "ldrsb", UnOpFrag<(sextloadi8 node:$Src)>>;
+defm t2LDRSH : T2I_ld<1, 0b01, "ldrsh", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
+ UnOpFrag<(sextloadi16 node:$Src)>>;
+defm t2LDRSB : T2I_ld<1, 0b00, "ldrsb", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
+ UnOpFrag<(sextloadi8 node:$Src)>>;
let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// Load doubleword
-def t2LDRDi8 : T2Ii8s4<1, 0, 1, (outs rGPR:$dst1, rGPR:$dst2),
+def t2LDRDi8 : T2Ii8s4<1, 0, 1, (outs rGPR:$Rt, rGPR:$Rt2),
(ins t2addrmode_imm8s4:$addr),
- IIC_iLoadi, "ldrd", "\t$dst1, $addr", []>;
-def t2LDRDpci : T2Ii8s4<1, 0, 1, (outs rGPR:$dst1, rGPR:$dst2),
- (ins i32imm:$addr), IIC_iLoadi,
- "ldrd", "\t$dst1, $addr", []> {
- let Inst{19-16} = 0b1111; // Rn
-}
+ IIC_iLoad_d_i, "ldrd", "\t$Rt, $Rt2, $addr", []>;
} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
// zextload i1 -> zextload i8
@@ -976,70 +1335,71 @@ def : T2Pat<(extloadi16 (ARMWrapper tconstpool:$addr)),
// not via pattern.
// Indexed loads
+
let mayLoad = 1, neverHasSideEffects = 1 in {
-def t2LDR_PRE : T2Iidxldst<0, 0b10, 1, 1, (outs GPR:$dst, GPR:$base_wb),
+def t2LDR_PRE : T2Iidxldst<0, 0b10, 1, 1, (outs GPR:$Rt, GPR:$Rn),
(ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldr", "\t$dst, $addr!", "$addr.base = $base_wb",
+ AddrModeT2_i8, IndexModePre, IIC_iLoad_iu,
+ "ldr", "\t$Rt, $addr!", "$addr.base = $Rn",
[]>;
-def t2LDR_POST : T2Iidxldst<0, 0b10, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldr", "\t$dst, [$base], $offset", "$base = $base_wb",
+def t2LDR_POST : T2Iidxldst<0, 0b10, 1, 0, (outs GPR:$Rt, GPR:$Rn),
+ (ins GPR:$base, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePost, IIC_iLoad_iu,
+ "ldr", "\t$Rt, [$Rn], $addr", "$base = $Rn",
[]>;
-def t2LDRB_PRE : T2Iidxldst<0, 0b00, 1, 1, (outs GPR:$dst, GPR:$base_wb),
+def t2LDRB_PRE : T2Iidxldst<0, 0b00, 1, 1, (outs GPR:$Rt, GPR:$Rn),
(ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldrb", "\t$dst, $addr!", "$addr.base = $base_wb",
+ AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu,
+ "ldrb", "\t$Rt, $addr!", "$addr.base = $Rn",
[]>;
-def t2LDRB_POST : T2Iidxldst<0, 0b00, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldrb", "\t$dst, [$base], $offset", "$base = $base_wb",
+def t2LDRB_POST : T2Iidxldst<0, 0b00, 1, 0, (outs GPR:$Rt, GPR:$Rn),
+ (ins GPR:$base, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu,
+ "ldrb", "\t$Rt, [$Rn], $addr", "$base = $Rn",
[]>;
-def t2LDRH_PRE : T2Iidxldst<0, 0b01, 1, 1, (outs GPR:$dst, GPR:$base_wb),
+def t2LDRH_PRE : T2Iidxldst<0, 0b01, 1, 1, (outs GPR:$Rt, GPR:$Rn),
(ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldrh", "\t$dst, $addr!", "$addr.base = $base_wb",
+ AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu,
+ "ldrh", "\t$Rt, $addr!", "$addr.base = $Rn",
[]>;
-def t2LDRH_POST : T2Iidxldst<0, 0b01, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldrh", "\t$dst, [$base], $offset", "$base = $base_wb",
+def t2LDRH_POST : T2Iidxldst<0, 0b01, 1, 0, (outs GPR:$Rt, GPR:$Rn),
+ (ins GPR:$base, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu,
+ "ldrh", "\t$Rt, [$Rn], $addr", "$base = $Rn",
[]>;
-def t2LDRSB_PRE : T2Iidxldst<1, 0b00, 1, 1, (outs GPR:$dst, GPR:$base_wb),
+def t2LDRSB_PRE : T2Iidxldst<1, 0b00, 1, 1, (outs GPR:$Rt, GPR:$Rn),
(ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldrsb", "\t$dst, $addr!", "$addr.base = $base_wb",
+ AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu,
+ "ldrsb", "\t$Rt, $addr!", "$addr.base = $Rn",
[]>;
-def t2LDRSB_POST : T2Iidxldst<1, 0b00, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldrsb", "\t$dst, [$base], $offset", "$base = $base_wb",
+def t2LDRSB_POST : T2Iidxldst<1, 0b00, 1, 0, (outs GPR:$Rt, GPR:$Rn),
+ (ins GPR:$base, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu,
+ "ldrsb", "\t$Rt, [$Rn], $addr", "$base = $Rn",
[]>;
-def t2LDRSH_PRE : T2Iidxldst<1, 0b01, 1, 1, (outs GPR:$dst, GPR:$base_wb),
+def t2LDRSH_PRE : T2Iidxldst<1, 0b01, 1, 1, (outs GPR:$Rt, GPR:$Rn),
(ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldrsh", "\t$dst, $addr!", "$addr.base = $base_wb",
+ AddrModeT2_i8, IndexModePre, IIC_iLoad_bh_iu,
+ "ldrsh", "\t$Rt, $addr!", "$addr.base = $Rn",
[]>;
-def t2LDRSH_POST : T2Iidxldst<1, 0b01, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldrsh", "\t$dst, [$base], $offset", "$base = $base_wb",
+def t2LDRSH_POST : T2Iidxldst<1, 0b01, 1, 0, (outs GPR:$dst, GPR:$Rn),
+ (ins GPR:$base, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePost, IIC_iLoad_bh_iu,
+ "ldrsh", "\t$dst, [$Rn], $addr", "$base = $Rn",
[]>;
-} // mayLoad = 1, neverHasSideEffects = 1
+} // mayLoad = 1, neverHasSideEffects = 1
// LDRT, LDRBT, LDRHT, LDRSBT, LDRSHT all have offset mode (PUW=0b110) and are
// for disassembly only.
// Ref: A8.6.57 LDR (immediate, Thumb) Encoding T4
-class T2IldT<bit signed, bits<2> type, string opc>
- : T2Ii8<(outs GPR:$dst), (ins t2addrmode_imm8:$addr), IIC_iLoadi, opc,
- "\t$dst, $addr", []> {
+class T2IldT<bit signed, bits<2> type, string opc, InstrItinClass ii>
+ : T2Ii8<(outs GPR:$Rt), (ins t2addrmode_imm8:$addr), ii, opc,
+ "\t$Rt, $addr", []> {
let Inst{31-27} = 0b11111;
let Inst{26-25} = 0b00;
let Inst{24} = signed;
@@ -1048,74 +1408,83 @@ class T2IldT<bit signed, bits<2> type, string opc>
let Inst{20} = 1; // load
let Inst{11} = 1;
let Inst{10-8} = 0b110; // PUW.
+
+ bits<4> Rt;
+ bits<13> addr;
+ let Inst{15-12} = Rt;
+ let Inst{19-16} = addr{12-9};
+ let Inst{7-0} = addr{7-0};
}
-def t2LDRT : T2IldT<0, 0b10, "ldrt">;
-def t2LDRBT : T2IldT<0, 0b00, "ldrbt">;
-def t2LDRHT : T2IldT<0, 0b01, "ldrht">;
-def t2LDRSBT : T2IldT<1, 0b00, "ldrsbt">;
-def t2LDRSHT : T2IldT<1, 0b01, "ldrsht">;
+def t2LDRT : T2IldT<0, 0b10, "ldrt", IIC_iLoad_i>;
+def t2LDRBT : T2IldT<0, 0b00, "ldrbt", IIC_iLoad_bh_i>;
+def t2LDRHT : T2IldT<0, 0b01, "ldrht", IIC_iLoad_bh_i>;
+def t2LDRSBT : T2IldT<1, 0b00, "ldrsbt", IIC_iLoad_bh_i>;
+def t2LDRSHT : T2IldT<1, 0b01, "ldrsht", IIC_iLoad_bh_i>;
// Store
-defm t2STR :T2I_st<0b10,"str", BinOpFrag<(store node:$LHS, node:$RHS)>>;
-defm t2STRB:T2I_st<0b00,"strb",BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
-defm t2STRH:T2I_st<0b01,"strh",BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>;
+defm t2STR :T2I_st<0b10,"str", IIC_iStore_i, IIC_iStore_si,
+ BinOpFrag<(store node:$LHS, node:$RHS)>>;
+defm t2STRB:T2I_st<0b00,"strb", IIC_iStore_bh_i, IIC_iStore_bh_si,
+ BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
+defm t2STRH:T2I_st<0b01,"strh", IIC_iStore_bh_i, IIC_iStore_bh_si,
+ BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>;
// Store doubleword
let mayLoad = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in
def t2STRDi8 : T2Ii8s4<1, 0, 0, (outs),
- (ins GPR:$src1, GPR:$src2, t2addrmode_imm8s4:$addr),
- IIC_iStorer, "strd", "\t$src1, $addr", []>;
+ (ins GPR:$Rt, GPR:$Rt2, t2addrmode_imm8s4:$addr),
+ IIC_iStore_d_r, "strd", "\t$Rt, $Rt2, $addr", []>;
// Indexed stores
def t2STR_PRE : T2Iidxldst<0, 0b10, 0, 1, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePre, IIC_iStoreiu,
- "str", "\t$src, [$base, $offset]!", "$base = $base_wb",
+ (ins GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePre, IIC_iStore_iu,
+ "str", "\t$Rt, [$Rn, $addr]!", "$Rn = $base_wb",
[(set GPR:$base_wb,
- (pre_store GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
+ (pre_store GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr))]>;
def t2STR_POST : T2Iidxldst<0, 0b10, 0, 0, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iStoreiu,
- "str", "\t$src, [$base], $offset", "$base = $base_wb",
+ (ins GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePost, IIC_iStore_iu,
+ "str", "\t$Rt, [$Rn], $addr", "$Rn = $base_wb",
[(set GPR:$base_wb,
- (post_store GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
+ (post_store GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr))]>;
def t2STRH_PRE : T2Iidxldst<0, 0b01, 0, 1, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePre, IIC_iStoreiu,
- "strh", "\t$src, [$base, $offset]!", "$base = $base_wb",
+ (ins GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePre, IIC_iStore_iu,
+ "strh", "\t$Rt, [$Rn, $addr]!", "$Rn = $base_wb",
[(set GPR:$base_wb,
- (pre_truncsti16 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
+ (pre_truncsti16 GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr))]>;
def t2STRH_POST : T2Iidxldst<0, 0b01, 0, 0, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iStoreiu,
- "strh", "\t$src, [$base], $offset", "$base = $base_wb",
+ (ins GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePost, IIC_iStore_bh_iu,
+ "strh", "\t$Rt, [$Rn], $addr", "$Rn = $base_wb",
[(set GPR:$base_wb,
- (post_truncsti16 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
+ (post_truncsti16 GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr))]>;
def t2STRB_PRE : T2Iidxldst<0, 0b00, 0, 1, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePre, IIC_iStoreiu,
- "strb", "\t$src, [$base, $offset]!", "$base = $base_wb",
+ (ins GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePre, IIC_iStore_bh_iu,
+ "strb", "\t$Rt, [$Rn, $addr]!", "$Rn = $base_wb",
[(set GPR:$base_wb,
- (pre_truncsti8 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
+ (pre_truncsti8 GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr))]>;
def t2STRB_POST : T2Iidxldst<0, 0b00, 0, 0, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iStoreiu,
- "strb", "\t$src, [$base], $offset", "$base = $base_wb",
+ (ins GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr),
+ AddrModeT2_i8, IndexModePost, IIC_iStore_bh_iu,
+ "strb", "\t$Rt, [$Rn], $addr", "$Rn = $base_wb",
[(set GPR:$base_wb,
- (post_truncsti8 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
+ (post_truncsti8 GPR:$Rt, GPR:$Rn, t2am_imm8_offset:$addr))]>;
// STRT, STRBT, STRHT all have offset mode (PUW=0b110) and are for disassembly
// only.
// Ref: A8.6.193 STR (immediate, Thumb) Encoding T4
-class T2IstT<bits<2> type, string opc>
- : T2Ii8<(outs GPR:$src), (ins t2addrmode_imm8:$addr), IIC_iStorei, opc,
- "\t$src, $addr", []> {
+class T2IstT<bits<2> type, string opc, InstrItinClass ii>
+ : T2Ii8<(outs GPR:$Rt), (ins t2addrmode_imm8:$addr), ii, opc,
+ "\t$Rt, $addr", []> {
let Inst{31-27} = 0b11111;
let Inst{26-25} = 0b00;
let Inst{24} = 0; // not signed
@@ -1124,51 +1493,62 @@ class T2IstT<bits<2> type, string opc>
let Inst{20} = 0; // store
let Inst{11} = 1;
let Inst{10-8} = 0b110; // PUW
+
+ bits<4> Rt;
+ bits<13> addr;
+ let Inst{15-12} = Rt;
+ let Inst{19-16} = addr{12-9};
+ let Inst{7-0} = addr{7-0};
}
-def t2STRT : T2IstT<0b10, "strt">;
-def t2STRBT : T2IstT<0b00, "strbt">;
-def t2STRHT : T2IstT<0b01, "strht">;
+def t2STRT : T2IstT<0b10, "strt", IIC_iStore_i>;
+def t2STRBT : T2IstT<0b00, "strbt", IIC_iStore_bh_i>;
+def t2STRHT : T2IstT<0b01, "strht", IIC_iStore_bh_i>;
// ldrd / strd pre / post variants
// For disassembly only.
-def t2LDRD_PRE : T2Ii8s4<1, 1, 1, (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$base, t2am_imm8s4_offset:$imm), NoItinerary,
- "ldrd", "\t$dst1, $dst2, [$base, $imm]!", []>;
+def t2LDRD_PRE : T2Ii8s4<1, 1, 1, (outs GPR:$Rt, GPR:$Rt2),
+ (ins GPR:$base, t2am_imm8s4_offset:$imm), IIC_iLoad_d_ru,
+ "ldrd", "\t$Rt, $Rt2, [$base, $imm]!", []>;
-def t2LDRD_POST : T2Ii8s4<0, 1, 1, (outs GPR:$dst1, GPR:$dst2),
- (ins GPR:$base, t2am_imm8s4_offset:$imm), NoItinerary,
- "ldrd", "\t$dst1, $dst2, [$base], $imm", []>;
+def t2LDRD_POST : T2Ii8s4<0, 1, 1, (outs GPR:$Rt, GPR:$Rt2),
+ (ins GPR:$base, t2am_imm8s4_offset:$imm), IIC_iLoad_d_ru,
+ "ldrd", "\t$Rt, $Rt2, [$base], $imm", []>;
def t2STRD_PRE : T2Ii8s4<1, 1, 0, (outs),
- (ins GPR:$src1, GPR:$src2, GPR:$base, t2am_imm8s4_offset:$imm),
- NoItinerary, "strd", "\t$src1, $src2, [$base, $imm]!", []>;
+ (ins GPR:$Rt, GPR:$Rt2, GPR:$base, t2am_imm8s4_offset:$imm),
+ IIC_iStore_d_ru, "strd", "\t$Rt, $Rt2, [$base, $imm]!", []>;
def t2STRD_POST : T2Ii8s4<0, 1, 0, (outs),
- (ins GPR:$src1, GPR:$src2, GPR:$base, t2am_imm8s4_offset:$imm),
- NoItinerary, "strd", "\t$src1, $src2, [$base], $imm", []>;
+ (ins GPR:$Rt, GPR:$Rt2, GPR:$base, t2am_imm8s4_offset:$imm),
+ IIC_iStore_d_ru, "strd", "\t$Rt, $Rt2, [$base], $imm", []>;
// T2Ipl (Preload Data/Instruction) signals the memory system of possible future
// data/instruction access. These are for disassembly only.
-//
-// A8.6.117, A8.6.118. Different instructions are generated for #0 and #-0.
-// The neg_zero operand translates -0 to -1, -1 to -2, ..., etc.
-multiclass T2Ipl<bit instr, bit write, string opc> {
+// instr_write is inverted for Thumb mode: (prefetch 3) -> (preload 0),
+// (prefetch 1) -> (preload 2), (prefetch 2) -> (preload 1).
+multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> {
- def i12 : T2I<(outs), (ins GPR:$base, i32imm:$imm), IIC_iLoadi, opc,
- "\t[$base, $imm]", []> {
+ def i12 : T2Ii12<(outs), (ins t2addrmode_imm12:$addr), IIC_Preload, opc,
+ "\t$addr",
+ [(ARMPreload t2addrmode_imm12:$addr, (i32 write), (i32 instr))]> {
let Inst{31-25} = 0b1111100;
let Inst{24} = instr;
- let Inst{23} = 1; // U = 1
let Inst{22} = 0;
let Inst{21} = write;
let Inst{20} = 1;
let Inst{15-12} = 0b1111;
+
+ bits<17> addr;
+ let Inst{19-16} = addr{16-13}; // Rn
+ let Inst{23} = addr{12}; // U
+ let Inst{11-0} = addr{11-0}; // imm12
}
- def i8 : T2I<(outs), (ins GPR:$base, neg_zero:$imm), IIC_iLoadi, opc,
- "\t[$base, $imm]", []> {
+ def i8 : T2Ii8<(outs), (ins t2addrmode_imm8:$addr), IIC_Preload, opc,
+ "\t$addr",
+ [(ARMPreload t2addrmode_imm8:$addr, (i32 write), (i32 instr))]> {
let Inst{31-25} = 0b1111100;
let Inst{24} = instr;
let Inst{23} = 0; // U = 0
@@ -1177,22 +1557,15 @@ multiclass T2Ipl<bit instr, bit write, string opc> {
let Inst{20} = 1;
let Inst{15-12} = 0b1111;
let Inst{11-8} = 0b1100;
- }
- def pci : T2I<(outs), (ins GPR:$base, neg_zero:$imm), IIC_iLoadi, opc,
- "\t[pc, $imm]", []> {
- let Inst{31-25} = 0b1111100;
- let Inst{24} = instr;
- let Inst{23} = ?; // add = (U == 1)
- let Inst{22} = 0;
- let Inst{21} = write;
- let Inst{20} = 1;
- let Inst{19-16} = 0b1111; // Rn = 0b1111
- let Inst{15-12} = 0b1111;
+ bits<13> addr;
+ let Inst{19-16} = addr{12-9}; // Rn
+ let Inst{7-0} = addr{7-0}; // imm8
}
- def r : T2I<(outs), (ins GPR:$base, GPR:$a), IIC_iLoadi, opc,
- "\t[$base, $a]", []> {
+ def s : T2Iso<(outs), (ins t2addrmode_so_reg:$addr), IIC_Preload, opc,
+ "\t$addr",
+ [(ARMPreload t2addrmode_so_reg:$addr, (i32 write), (i32 instr))]> {
let Inst{31-25} = 0b1111100;
let Inst{24} = instr;
let Inst{23} = 0; // add = TRUE for T1
@@ -1201,133 +1574,174 @@ multiclass T2Ipl<bit instr, bit write, string opc> {
let Inst{20} = 1;
let Inst{15-12} = 0b1111;
let Inst{11-6} = 0000000;
- let Inst{5-4} = 0b00; // no shift is applied
- }
- def s : T2I<(outs), (ins GPR:$base, GPR:$a, i32imm:$shamt), IIC_iLoadi, opc,
- "\t[$base, $a, lsl $shamt]", []> {
- let Inst{31-25} = 0b1111100;
- let Inst{24} = instr;
- let Inst{23} = 0; // add = TRUE for T1
- let Inst{22} = 0;
- let Inst{21} = write;
- let Inst{20} = 1;
- let Inst{15-12} = 0b1111;
- let Inst{11-6} = 0000000;
+ bits<10> addr;
+ let Inst{19-16} = addr{9-6}; // Rn
+ let Inst{3-0} = addr{5-2}; // Rm
+ let Inst{5-4} = addr{1-0}; // imm2
}
}
-defm t2PLD : T2Ipl<0, 0, "pld">;
-defm t2PLDW : T2Ipl<0, 1, "pldw">;
-defm t2PLI : T2Ipl<1, 0, "pli">;
+defm t2PLD : T2Ipl<0, 0, "pld">, Requires<[IsThumb2]>;
+defm t2PLDW : T2Ipl<1, 0, "pldw">, Requires<[IsThumb2,HasV7,HasMP]>;
+defm t2PLI : T2Ipl<0, 1, "pli">, Requires<[IsThumb2,HasV7]>;
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
//
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
-def t2LDM : T2XI<(outs), (ins addrmode4:$addr, pred:$p,
- reglist:$dsts, variable_ops), IIC_iLoadm,
- "ldm${addr:submode}${p}${addr:wide}\t$addr, $dsts", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b00;
- let Inst{24-23} = {?, ?}; // IA: '01', DB: '10'
- let Inst{22} = 0;
- let Inst{21} = 0; // The W bit.
- let Inst{20} = 1; // Load
-}
+multiclass thumb2_ldst_mult<string asm, InstrItinClass itin,
+ InstrItinClass itin_upd, bit L_bit> {
+ def IA :
+ T2XI<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ itin, !strconcat(asm, "ia${p}.w\t$Rn, $regs"), []> {
+ bits<4> Rn;
+ bits<16> regs;
-def t2LDM_UPD : T2XIt<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$dsts, variable_ops), IIC_iLoadm,
- "ldm${addr:submode}${p}${addr:wide}\t$addr!, $dsts",
- "$addr.addr = $wb", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b00;
- let Inst{24-23} = {?, ?}; // IA: '01', DB: '10'
- let Inst{22} = 0;
- let Inst{21} = 1; // The W bit.
- let Inst{20} = 1; // Load
-}
-} // mayLoad, neverHasSideEffects, hasExtraDefRegAllocReq
+ let Inst{31-27} = 0b11101;
+ let Inst{26-25} = 0b00;
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{22} = 0;
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+ let Inst{19-16} = Rn;
+ let Inst{15-0} = regs;
+ }
+ def IA_UPD :
+ T2XIt<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ itin_upd, !strconcat(asm, "ia${p}.w\t$Rn!, $regs"), "$Rn = $wb", []> {
+ bits<4> Rn;
+ bits<16> regs;
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
-def t2STM : T2XI<(outs), (ins addrmode4:$addr, pred:$p,
- reglist:$srcs, variable_ops), IIC_iStorem,
- "stm${addr:submode}${p}${addr:wide}\t$addr, $srcs", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b00;
- let Inst{24-23} = {?, ?}; // IA: '01', DB: '10'
- let Inst{22} = 0;
- let Inst{21} = 0; // The W bit.
- let Inst{20} = 0; // Store
-}
+ let Inst{31-27} = 0b11101;
+ let Inst{26-25} = 0b00;
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{22} = 0;
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+ let Inst{19-16} = Rn;
+ let Inst{15-0} = regs;
+ }
+ def DB :
+ T2XI<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ itin, !strconcat(asm, "db${p}.w\t$Rn, $regs"), []> {
+ bits<4> Rn;
+ bits<16> regs;
-def t2STM_UPD : T2XIt<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$srcs, variable_ops),
- IIC_iStorem,
- "stm${addr:submode}${p}${addr:wide}\t$addr!, $srcs",
- "$addr.addr = $wb", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b00;
- let Inst{24-23} = {?, ?}; // IA: '01', DB: '10'
- let Inst{22} = 0;
- let Inst{21} = 1; // The W bit.
- let Inst{20} = 0; // Store
+ let Inst{31-27} = 0b11101;
+ let Inst{26-25} = 0b00;
+ let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{22} = 0;
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+ let Inst{19-16} = Rn;
+ let Inst{15-0} = regs;
+ }
+ def DB_UPD :
+ T2XIt<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
+ itin_upd, !strconcat(asm, "db${p}.w\t$Rn, $regs"), "$Rn = $wb", []> {
+ bits<4> Rn;
+ bits<16> regs;
+
+ let Inst{31-27} = 0b11101;
+ let Inst{26-25} = 0b00;
+ let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{22} = 0;
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+ let Inst{19-16} = Rn;
+ let Inst{15-0} = regs;
+ }
}
-} // mayStore, neverHasSideEffects, hasExtraSrcRegAllocReq
+
+let neverHasSideEffects = 1 in {
+
+let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
+defm t2LDM : thumb2_ldst_mult<"ldm", IIC_iLoad_m, IIC_iLoad_mu, 1>;
+
+let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
+defm t2STM : thumb2_ldst_mult<"stm", IIC_iStore_m, IIC_iStore_mu, 0>;
+
+} // neverHasSideEffects
+
//===----------------------------------------------------------------------===//
// Move Instructions.
//
let neverHasSideEffects = 1 in
-def t2MOVr : T2sI<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVr,
- "mov", ".w\t$dst, $src", []> {
+def t2MOVr : T2sTwoReg<(outs GPR:$Rd), (ins GPR:$Rm), IIC_iMOVr,
+ "mov", ".w\t$Rd, $Rm", []> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
- let Inst{20} = ?; // The S bit.
let Inst{19-16} = 0b1111; // Rn
let Inst{14-12} = 0b000;
let Inst{7-4} = 0b0000;
}
// AddedComplexity to ensure isel tries t2MOVi before t2MOVi16.
-let isReMaterializable = 1, isAsCheapAsAMove = 1, AddedComplexity = 1 in
-def t2MOVi : T2sI<(outs rGPR:$dst), (ins t2_so_imm:$src), IIC_iMOVi,
- "mov", ".w\t$dst, $src",
- [(set rGPR:$dst, t2_so_imm:$src)]> {
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
+ AddedComplexity = 1 in
+def t2MOVi : T2sOneRegImm<(outs rGPR:$Rd), (ins t2_so_imm:$imm), IIC_iMOVi,
+ "mov", ".w\t$Rd, $imm",
+ [(set rGPR:$Rd, t2_so_imm:$imm)]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = 0b0010;
- let Inst{20} = ?; // The S bit.
let Inst{19-16} = 0b1111; // Rn
let Inst{15} = 0;
}
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def t2MOVi16 : T2I<(outs rGPR:$dst), (ins i32imm:$src), IIC_iMOVi,
- "movw", "\t$dst, $src",
- [(set rGPR:$dst, imm0_65535:$src)]> {
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
+def t2MOVi16 : T2I<(outs rGPR:$Rd), (ins i32imm_hilo16:$imm), IIC_iMOVi,
+ "movw", "\t$Rd, $imm",
+ [(set rGPR:$Rd, imm0_65535:$imm)]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 1;
let Inst{24-21} = 0b0010;
let Inst{20} = 0; // The S bit.
let Inst{15} = 0;
+
+ bits<4> Rd;
+ bits<16> imm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = imm{15-12};
+ let Inst{26} = imm{11};
+ let Inst{14-12} = imm{10-8};
+ let Inst{7-0} = imm{7-0};
}
-let Constraints = "$src = $dst" in
-def t2MOVTi16 : T2I<(outs rGPR:$dst), (ins rGPR:$src, i32imm:$imm), IIC_iMOVi,
- "movt", "\t$dst, $imm",
- [(set rGPR:$dst,
+def t2MOVi16_ga_pcrel : PseudoInst<(outs rGPR:$Rd),
+ (ins i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
+
+let Constraints = "$src = $Rd" in {
+def t2MOVTi16 : T2I<(outs rGPR:$Rd),
+ (ins rGPR:$src, i32imm_hilo16:$imm), IIC_iMOVi,
+ "movt", "\t$Rd, $imm",
+ [(set rGPR:$Rd,
(or (and rGPR:$src, 0xffff), lo16AllZero:$imm))]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 1;
let Inst{24-21} = 0b0110;
let Inst{20} = 0; // The S bit.
let Inst{15} = 0;
+
+ bits<4> Rd;
+ bits<16> imm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = imm{15-12};
+ let Inst{26} = imm{11};
+ let Inst{14-12} = imm{10-8};
+ let Inst{7-0} = imm{7-0};
}
+def t2MOVTi16_ga_pcrel : PseudoInst<(outs rGPR:$Rd),
+ (ins rGPR:$src, i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
+} // Constraints
+
def : T2Pat<(or rGPR:$src, 0xffff0000), (t2MOVTi16 rGPR:$src, 0xffff)>;
//===----------------------------------------------------------------------===//
@@ -1336,28 +1750,28 @@ def : T2Pat<(or rGPR:$src, 0xffff0000), (t2MOVTi16 rGPR:$src, 0xffff)>;
// Sign extenders
-defm t2SXTB : T2I_unary_rrot<0b100, "sxtb",
+defm t2SXTB : T2I_ext_rrot<0b100, "sxtb",
UnOpFrag<(sext_inreg node:$Src, i8)>>;
-defm t2SXTH : T2I_unary_rrot<0b000, "sxth",
+defm t2SXTH : T2I_ext_rrot<0b000, "sxth",
UnOpFrag<(sext_inreg node:$Src, i16)>>;
-defm t2SXTB16 : T2I_unary_rrot_sxtb16<0b010, "sxtb16">;
+defm t2SXTB16 : T2I_ext_rrot_sxtb16<0b010, "sxtb16">;
-defm t2SXTAB : T2I_bin_rrot<0b100, "sxtab",
+defm t2SXTAB : T2I_exta_rrot<0b100, "sxtab",
BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS, i8))>>;
-defm t2SXTAH : T2I_bin_rrot<0b000, "sxtah",
+defm t2SXTAH : T2I_exta_rrot<0b000, "sxtah",
BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS,i16))>>;
-defm t2SXTAB16 : T2I_bin_rrot_DO<0b010, "sxtab16">;
+defm t2SXTAB16 : T2I_exta_rrot_DO<0b010, "sxtab16">;
// TODO: SXT(A){B|H}16 - done for disassembly only
// Zero extenders
let AddedComplexity = 16 in {
-defm t2UXTB : T2I_unary_rrot<0b101, "uxtb",
+defm t2UXTB : T2I_ext_rrot<0b101, "uxtb",
UnOpFrag<(and node:$Src, 0x000000FF)>>;
-defm t2UXTH : T2I_unary_rrot<0b001, "uxth",
+defm t2UXTH : T2I_ext_rrot<0b001, "uxth",
UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
-defm t2UXTB16 : T2I_unary_rrot_uxtb16<0b011, "uxtb16",
+defm t2UXTB16 : T2I_ext_rrot_uxtb16<0b011, "uxtb16",
UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
// FIXME: This pattern incorrectly assumes the shl operator is a rotate.
@@ -1365,15 +1779,17 @@ defm t2UXTB16 : T2I_unary_rrot_uxtb16<0b011, "uxtb16",
// instead so we can include a check for masking back in the upper
// eight bits of the source into the lower eight bits of the result.
//def : T2Pat<(and (shl rGPR:$Src, (i32 8)), 0xFF00FF),
-// (t2UXTB16r_rot rGPR:$Src, 24)>, Requires<[HasT2ExtractPack]>;
+// (t2UXTB16r_rot rGPR:$Src, 24)>,
+// Requires<[HasT2ExtractPack, IsThumb2]>;
def : T2Pat<(and (srl rGPR:$Src, (i32 8)), 0xFF00FF),
- (t2UXTB16r_rot rGPR:$Src, 8)>, Requires<[HasT2ExtractPack]>;
+ (t2UXTB16r_rot rGPR:$Src, 8)>,
+ Requires<[HasT2ExtractPack, IsThumb2]>;
-defm t2UXTAB : T2I_bin_rrot<0b101, "uxtab",
+defm t2UXTAB : T2I_exta_rrot<0b101, "uxtab",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
-defm t2UXTAH : T2I_bin_rrot<0b001, "uxtah",
+defm t2UXTAH : T2I_exta_rrot<0b001, "uxtah",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0xFFFF))>>;
-defm t2UXTAB16 : T2I_bin_rrot_DO<0b011, "uxtab16">;
+defm t2UXTAB16 : T2I_exta_rrot_DO<0b011, "uxtab16">;
}
//===----------------------------------------------------------------------===//
@@ -1387,8 +1803,10 @@ defm t2SUB : T2I_bin_ii12rs<0b101, "sub",
// ADD and SUB with 's' bit set. No 12-bit immediate (T4) variants.
defm t2ADDS : T2I_bin_s_irs <0b1000, "add",
+ IIC_iALUi, IIC_iALUr, IIC_iALUsi,
BinOpFrag<(addc node:$LHS, node:$RHS)>, 1>;
defm t2SUBS : T2I_bin_s_irs <0b1101, "sub",
+ IIC_iALUi, IIC_iALUr, IIC_iALUsi,
BinOpFrag<(subc node:$LHS, node:$RHS)>>;
defm t2ADC : T2I_adde_sube_irs<0b1010, "adc",
@@ -1436,8 +1854,8 @@ def : T2Pat<(adde rGPR:$src, t2_so_imm_not:$imm),
// Select Bytes -- for disassembly only
-def t2SEL : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), NoItinerary, "sel",
- "\t$dst, $a, $b", []> {
+def t2SEL : T2ThreeReg<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+ NoItinerary, "sel", "\t$Rd, $Rn, $Rm", []> {
let Inst{31-27} = 0b11111;
let Inst{26-24} = 0b010;
let Inst{23} = 0b1;
@@ -1450,28 +1868,41 @@ def t2SEL : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), NoItinerary, "sel",
// A6.3.13, A6.3.14, A6.3.15 Parallel addition and subtraction (signed/unsigned)
// And Miscellaneous operations -- for disassembly only
class T2I_pam<bits<3> op22_20, bits<4> op7_4, string opc,
- list<dag> pat = [/* For disassembly only; pattern left blank */]>
- : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), NoItinerary, opc,
- "\t$dst, $a, $b", pat> {
+ list<dag> pat = [/* For disassembly only; pattern left blank */],
+ dag iops = (ins rGPR:$Rn, rGPR:$Rm),
+ string asm = "\t$Rd, $Rn, $Rm">
+ : T2I<(outs rGPR:$Rd), iops, NoItinerary, opc, asm, pat> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0101;
let Inst{22-20} = op22_20;
let Inst{15-12} = 0b1111;
let Inst{7-4} = op7_4;
+
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{3-0} = Rm;
}
// Saturating add/subtract -- for disassembly only
def t2QADD : T2I_pam<0b000, 0b1000, "qadd",
- [(set rGPR:$dst, (int_arm_qadd rGPR:$a, rGPR:$b))]>;
+ [(set rGPR:$Rd, (int_arm_qadd rGPR:$Rn, rGPR:$Rm))],
+ (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
def t2QADD16 : T2I_pam<0b001, 0b0001, "qadd16">;
def t2QADD8 : T2I_pam<0b000, 0b0001, "qadd8">;
def t2QASX : T2I_pam<0b010, 0b0001, "qasx">;
-def t2QDADD : T2I_pam<0b000, 0b1001, "qdadd">;
-def t2QDSUB : T2I_pam<0b000, 0b1011, "qdsub">;
+def t2QDADD : T2I_pam<0b000, 0b1001, "qdadd", [],
+ (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
+def t2QDSUB : T2I_pam<0b000, 0b1011, "qdsub", [],
+ (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
def t2QSAX : T2I_pam<0b110, 0b0001, "qsax">;
def t2QSUB : T2I_pam<0b000, 0b1010, "qsub",
- [(set rGPR:$dst, (int_arm_qsub rGPR:$a, rGPR:$b))]>;
+ [(set rGPR:$Rd, (int_arm_qsub rGPR:$Rn, rGPR:$Rm))],
+ (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
def t2QSUB16 : T2I_pam<0b101, 0b0001, "qsub16">;
def t2QSUB8 : T2I_pam<0b100, 0b0001, "qsub8">;
def t2UQADD16 : T2I_pam<0b001, 0b0101, "uqadd16">;
@@ -1511,21 +1942,61 @@ def t2UHSAX : T2I_pam<0b110, 0b0110, "uhsax">;
def t2UHSUB16 : T2I_pam<0b101, 0b0110, "uhsub16">;
def t2UHSUB8 : T2I_pam<0b100, 0b0110, "uhsub8">;
+// Helper class for disassembly only
+// A6.3.16 & A6.3.17
+// T2Imac - Thumb2 multiply [accumulate, and absolute difference] instructions.
+class T2ThreeReg_mac<bit long, bits<3> op22_20, bits<4> op7_4, dag oops,
+ dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern>
+ : T2ThreeReg<oops, iops, itin, opc, asm, pattern> {
+ let Inst{31-27} = 0b11111;
+ let Inst{26-24} = 0b011;
+ let Inst{23} = long;
+ let Inst{22-20} = op22_20;
+ let Inst{7-4} = op7_4;
+}
+
+class T2FourReg_mac<bit long, bits<3> op22_20, bits<4> op7_4, dag oops,
+ dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern>
+ : T2FourReg<oops, iops, itin, opc, asm, pattern> {
+ let Inst{31-27} = 0b11111;
+ let Inst{26-24} = 0b011;
+ let Inst{23} = long;
+ let Inst{22-20} = op22_20;
+ let Inst{7-4} = op7_4;
+}
+
// Unsigned Sum of Absolute Differences [and Accumulate] -- for disassembly only
-def t2USAD8 : T2I_mac<0, 0b111, 0b0000, (outs rGPR:$dst),
- (ins rGPR:$a, rGPR:$b),
- NoItinerary, "usad8", "\t$dst, $a, $b", []> {
+def t2USAD8 : T2ThreeReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
+ (ins rGPR:$Rn, rGPR:$Rm),
+ NoItinerary, "usad8", "\t$Rd, $Rn, $Rm", []> {
let Inst{15-12} = 0b1111;
}
-def t2USADA8 : T2I_mac<0, 0b111, 0b0000, (outs rGPR:$dst),
- (ins rGPR:$a, rGPR:$b, rGPR:$acc), NoItinerary, "usada8",
- "\t$dst, $a, $b, $acc", []>;
+def t2USADA8 : T2FourReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), NoItinerary,
+ "usada8", "\t$Rd, $Rn, $Rm, $Ra", []>;
// Signed/Unsigned saturate -- for disassembly only
-def t2SSAT: T2I<(outs rGPR:$dst), (ins i32imm:$bit_pos, rGPR:$a, shift_imm:$sh),
- NoItinerary, "ssat", "\t$dst, $bit_pos, $a$sh",
+class T2SatI<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<5> sat_imm;
+ bits<7> sh;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{4-0} = sat_imm{4-0};
+ let Inst{21} = sh{6};
+ let Inst{14-12} = sh{4-2};
+ let Inst{7-6} = sh{1-0};
+}
+
+def t2SSAT: T2SatI<
+ (outs rGPR:$Rd), (ins i32imm:$sat_imm, rGPR:$Rn, shift_imm:$sh),
+ NoItinerary, "ssat", "\t$Rd, $sat_imm, $Rn$sh",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-27} = 0b11110;
let Inst{25-22} = 0b1100;
@@ -1533,8 +2004,9 @@ def t2SSAT: T2I<(outs rGPR:$dst), (ins i32imm:$bit_pos, rGPR:$a, shift_imm:$sh),
let Inst{15} = 0;
}
-def t2SSAT16: T2I<(outs rGPR:$dst), (ins i32imm:$bit_pos, rGPR:$a), NoItinerary,
- "ssat16", "\t$dst, $bit_pos, $a",
+def t2SSAT16: T2SatI<
+ (outs rGPR:$Rd), (ins i32imm:$sat_imm, rGPR:$Rn), NoItinerary,
+ "ssat16", "\t$Rd, $sat_imm, $Rn",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-27} = 0b11110;
let Inst{25-22} = 0b1100;
@@ -1545,8 +2017,9 @@ def t2SSAT16: T2I<(outs rGPR:$dst), (ins i32imm:$bit_pos, rGPR:$a), NoItinerary,
let Inst{7-6} = 0b00; // imm2 = '00'
}
-def t2USAT: T2I<(outs rGPR:$dst), (ins i32imm:$bit_pos, rGPR:$a, shift_imm:$sh),
- NoItinerary, "usat", "\t$dst, $bit_pos, $a$sh",
+def t2USAT: T2SatI<
+ (outs rGPR:$Rd), (ins i32imm:$sat_imm, rGPR:$Rn, shift_imm:$sh),
+ NoItinerary, "usat", "\t$Rd, $sat_imm, $Rn$sh",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-27} = 0b11110;
let Inst{25-22} = 0b1110;
@@ -1554,8 +2027,9 @@ def t2USAT: T2I<(outs rGPR:$dst), (ins i32imm:$bit_pos, rGPR:$a, shift_imm:$sh),
let Inst{15} = 0;
}
-def t2USAT16: T2I<(outs rGPR:$dst), (ins i32imm:$bit_pos, rGPR:$a), NoItinerary,
- "usat16", "\t$dst, $bit_pos, $a",
+def t2USAT16: T2SatI<
+ (outs rGPR:$dst), (ins i32imm:$sat_imm, rGPR:$Rn), NoItinerary,
+ "usat16", "\t$dst, $sat_imm, $Rn",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-27} = 0b11110;
let Inst{25-22} = 0b1110;
@@ -1579,23 +2053,23 @@ defm t2ASR : T2I_sh_ir<0b10, "asr", BinOpFrag<(sra node:$LHS, node:$RHS)>>;
defm t2ROR : T2I_sh_ir<0b11, "ror", BinOpFrag<(rotr node:$LHS, node:$RHS)>>;
let Uses = [CPSR] in {
-def t2MOVrx : T2sI<(outs rGPR:$dst), (ins rGPR:$src), IIC_iMOVsi,
- "rrx", "\t$dst, $src",
- [(set rGPR:$dst, (ARMrrx rGPR:$src))]> {
+def t2RRX : T2sTwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iMOVsi,
+ "rrx", "\t$Rd, $Rm",
+ [(set rGPR:$Rd, (ARMrrx rGPR:$Rm))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
- let Inst{20} = ?; // The S bit.
let Inst{19-16} = 0b1111; // Rn
let Inst{14-12} = 0b000;
let Inst{7-4} = 0b0011;
}
}
-let Defs = [CPSR] in {
-def t2MOVsrl_flag : T2I<(outs rGPR:$dst), (ins rGPR:$src), IIC_iMOVsi,
- "lsrs", ".w\t$dst, $src, #1",
- [(set rGPR:$dst, (ARMsrl_flag rGPR:$src))]> {
+let isCodeGenOnly = 1, Defs = [CPSR] in {
+def t2MOVsrl_flag : T2TwoRegShiftImm<
+ (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iMOVsi,
+ "lsrs", ".w\t$Rd, $Rm, #1",
+ [(set rGPR:$Rd, (ARMsrl_flag rGPR:$Rm))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
@@ -1606,9 +2080,10 @@ def t2MOVsrl_flag : T2I<(outs rGPR:$dst), (ins rGPR:$src), IIC_iMOVsi,
let Inst{14-12} = 0b000;
let Inst{7-6} = 0b01;
}
-def t2MOVsra_flag : T2I<(outs rGPR:$dst), (ins rGPR:$src), IIC_iMOVsi,
- "asrs", ".w\t$dst, $src, #1",
- [(set rGPR:$dst, (ARMsra_flag rGPR:$src))]> {
+def t2MOVsra_flag : T2TwoRegShiftImm<
+ (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iMOVsi,
+ "asrs", ".w\t$Rd, $Rm, #1",
+ [(set rGPR:$Rd, (ARMsra_flag rGPR:$Rm))]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
@@ -1626,39 +2101,67 @@ def t2MOVsra_flag : T2I<(outs rGPR:$dst), (ins rGPR:$src), IIC_iMOVsi,
//
defm t2AND : T2I_bin_w_irs<0b0000, "and",
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi,
BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
defm t2ORR : T2I_bin_w_irs<0b0010, "orr",
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi,
BinOpFrag<(or node:$LHS, node:$RHS)>, 1>;
defm t2EOR : T2I_bin_w_irs<0b0100, "eor",
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi,
BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
defm t2BIC : T2I_bin_w_irs<0b0001, "bic",
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi,
BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
-defm t2ANDS : T2I_bin_s_irs<0b0000, "and",
- BinOpFrag<(ARMand node:$LHS, node:$RHS)>, 1>;
+class T2BitFI<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ bits<5> msb;
+ bits<5> lsb;
+
+ let Inst{11-8} = Rd;
+ let Inst{4-0} = msb{4-0};
+ let Inst{14-12} = lsb{4-2};
+ let Inst{7-6} = lsb{1-0};
+}
+
+class T2TwoRegBitFI<dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2BitFI<oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rn;
-let Constraints = "$src = $dst" in
-def t2BFC : T2I<(outs rGPR:$dst), (ins rGPR:$src, bf_inv_mask_imm:$imm),
- IIC_iUNAsi, "bfc", "\t$dst, $imm",
- [(set rGPR:$dst, (and rGPR:$src, bf_inv_mask_imm:$imm))]> {
+ let Inst{19-16} = Rn;
+}
+
+let Constraints = "$src = $Rd" in
+def t2BFC : T2BitFI<(outs rGPR:$Rd), (ins rGPR:$src, bf_inv_mask_imm:$imm),
+ IIC_iUNAsi, "bfc", "\t$Rd, $imm",
+ [(set rGPR:$Rd, (and rGPR:$src, bf_inv_mask_imm:$imm))]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 1;
let Inst{24-20} = 0b10110;
let Inst{19-16} = 0b1111; // Rn
let Inst{15} = 0;
+
+ bits<10> imm;
+ let msb{4-0} = imm{9-5};
+ let lsb{4-0} = imm{4-0};
}
-def t2SBFX: T2I<(outs rGPR:$dst), (ins rGPR:$src, imm0_31:$lsb, imm0_31:$width),
- IIC_iALUi, "sbfx", "\t$dst, $src, $lsb, $width", []> {
+def t2SBFX: T2TwoRegBitFI<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, imm0_31:$lsb, imm0_31_m1:$msb),
+ IIC_iUNAsi, "sbfx", "\t$Rd, $Rn, $lsb, $msb", []> {
let Inst{31-27} = 0b11110;
let Inst{25} = 1;
let Inst{24-20} = 0b10100;
let Inst{15} = 0;
}
-def t2UBFX: T2I<(outs rGPR:$dst), (ins rGPR:$src, imm0_31:$lsb, imm0_31:$width),
- IIC_iALUi, "ubfx", "\t$dst, $src, $lsb, $width", []> {
+def t2UBFX: T2TwoRegBitFI<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, imm0_31:$lsb, imm0_31_m1:$msb),
+ IIC_iUNAsi, "ubfx", "\t$Rd, $Rn, $lsb, $msb", []> {
let Inst{31-27} = 0b11110;
let Inst{25} = 1;
let Inst{24-20} = 0b11100;
@@ -1666,24 +2169,50 @@ def t2UBFX: T2I<(outs rGPR:$dst), (ins rGPR:$src, imm0_31:$lsb, imm0_31:$width),
}
// A8.6.18 BFI - Bitfield insert (Encoding T1)
-let Constraints = "$src = $dst" in
-def t2BFI : T2I<(outs rGPR:$dst),
- (ins rGPR:$src, rGPR:$val, bf_inv_mask_imm:$imm),
- IIC_iALUi, "bfi", "\t$dst, $val, $imm",
- [(set rGPR:$dst, (ARMbfi rGPR:$src, rGPR:$val,
- bf_inv_mask_imm:$imm))]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-20} = 0b10110;
- let Inst{15} = 0;
+let Constraints = "$src = $Rd" in {
+ def t2BFI : T2TwoRegBitFI<(outs rGPR:$Rd),
+ (ins rGPR:$src, rGPR:$Rn, bf_inv_mask_imm:$imm),
+ IIC_iBITi, "bfi", "\t$Rd, $Rn, $imm",
+ [(set rGPR:$Rd, (ARMbfi rGPR:$src, rGPR:$Rn,
+ bf_inv_mask_imm:$imm))]> {
+ let Inst{31-27} = 0b11110;
+ let Inst{25} = 1;
+ let Inst{24-20} = 0b10110;
+ let Inst{15} = 0;
+
+ bits<10> imm;
+ let msb{4-0} = imm{9-5};
+ let lsb{4-0} = imm{4-0};
+ }
+
+ // GNU as only supports this form of bfi (w/ 4 arguments)
+ let isAsmParserOnly = 1 in
+ def t2BFI4p : T2TwoRegBitFI<(outs rGPR:$Rd),
+ (ins rGPR:$src, rGPR:$Rn, lsb_pos_imm:$lsbit,
+ width_imm:$width),
+ IIC_iBITi, "bfi", "\t$Rd, $Rn, $lsbit, $width",
+ []> {
+ let Inst{31-27} = 0b11110;
+ let Inst{25} = 1;
+ let Inst{24-20} = 0b10110;
+ let Inst{15} = 0;
+
+ bits<5> lsbit;
+ bits<5> width;
+ let msb{4-0} = width; // Custom encoder => lsb+width-1
+ let lsb{4-0} = lsbit;
+ }
}
-defm t2ORN : T2I_bin_irs<0b0011, "orn", BinOpFrag<(or node:$LHS,
- (not node:$RHS))>, 0, "">;
+defm t2ORN : T2I_bin_irs<0b0011, "orn",
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi,
+ BinOpFrag<(or node:$LHS, (not node:$RHS))>, 0, "">;
// Prefer over of t2EORri ra, rb, -1 because mvn has 16-bit version
let AddedComplexity = 1 in
-defm t2MVN : T2I_un_irs <0b0011, "mvn", UnOpFrag<(not node:$Src)>, 1, 1>;
+defm t2MVN : T2I_un_irs <0b0011, "mvn",
+ IIC_iMVNi, IIC_iMVNr, IIC_iMVNsi,
+ UnOpFrag<(not node:$Src)>, 1, 1>;
let AddedComplexity = 1 in
@@ -1702,9 +2231,9 @@ def : T2Pat<(t2_so_imm_not:$src),
// Multiply Instructions.
//
let isCommutable = 1 in
-def t2MUL: T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
- "mul", "\t$dst, $a, $b",
- [(set rGPR:$dst, (mul rGPR:$a, rGPR:$b))]> {
+def t2MUL: T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL32,
+ "mul", "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (mul rGPR:$Rn, rGPR:$Rm))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b000;
@@ -1712,83 +2241,63 @@ def t2MUL: T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
let Inst{7-4} = 0b0000; // Multiply
}
-def t2MLA: T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$c), IIC_iMAC32,
- "mla", "\t$dst, $a, $b, $c",
- [(set rGPR:$dst, (add (mul rGPR:$a, rGPR:$b), rGPR:$c))]> {
+def t2MLA: T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
+ "mla", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (add (mul rGPR:$Rn, rGPR:$Rm), rGPR:$Ra))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b000;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-4} = 0b0000; // Multiply
}
-def t2MLS: T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$c), IIC_iMAC32,
- "mls", "\t$dst, $a, $b, $c",
- [(set rGPR:$dst, (sub rGPR:$c, (mul rGPR:$a, rGPR:$b)))]> {
+def t2MLS: T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
+ "mls", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (sub rGPR:$Ra, (mul rGPR:$Rn, rGPR:$Rm)))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b000;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-4} = 0b0001; // Multiply and Subtract
}
// Extra precision multiplies with low / high results
let neverHasSideEffects = 1 in {
let isCommutable = 1 in {
-def t2SMULL : T2I<(outs rGPR:$ldst, rGPR:$hdst),
- (ins rGPR:$a, rGPR:$b), IIC_iMUL64,
- "smull", "\t$ldst, $hdst, $a, $b", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b000;
- let Inst{7-4} = 0b0000;
-}
-
-def t2UMULL : T2I<(outs rGPR:$ldst, rGPR:$hdst),
- (ins rGPR:$a, rGPR:$b), IIC_iMUL64,
- "umull", "\t$ldst, $hdst, $a, $b", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b010;
- let Inst{7-4} = 0b0000;
-}
+def t2SMULL : T2MulLong<0b000, 0b0000,
+ (outs rGPR:$Rd, rGPR:$Ra),
+ (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL64,
+ "smull", "\t$Rd, $Ra, $Rn, $Rm", []>;
+
+def t2UMULL : T2MulLong<0b010, 0b0000,
+ (outs rGPR:$RdLo, rGPR:$RdHi),
+ (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL64,
+ "umull", "\t$RdLo, $RdHi, $Rn, $Rm", []>;
} // isCommutable
// Multiply + accumulate
-def t2SMLAL : T2I<(outs rGPR:$ldst, rGPR:$hdst),
- (ins rGPR:$a, rGPR:$b), IIC_iMAC64,
- "smlal", "\t$ldst, $hdst, $a, $b", []>{
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b100;
- let Inst{7-4} = 0b0000;
-}
-
-def t2UMLAL : T2I<(outs rGPR:$ldst, rGPR:$hdst),
- (ins rGPR:$a, rGPR:$b), IIC_iMAC64,
- "umlal", "\t$ldst, $hdst, $a, $b", []>{
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b110;
- let Inst{7-4} = 0b0000;
-}
-
-def t2UMAAL : T2I<(outs rGPR:$ldst, rGPR:$hdst),
- (ins rGPR:$a, rGPR:$b), IIC_iMAC64,
- "umaal", "\t$ldst, $hdst, $a, $b", []>{
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b110;
- let Inst{7-4} = 0b0110;
-}
+def t2SMLAL : T2MulLong<0b100, 0b0000,
+ (outs rGPR:$RdLo, rGPR:$RdHi),
+ (ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64,
+ "smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>;
+
+def t2UMLAL : T2MulLong<0b110, 0b0000,
+ (outs rGPR:$RdLo, rGPR:$RdHi),
+ (ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64,
+ "umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>;
+
+def t2UMAAL : T2MulLong<0b110, 0b0110,
+ (outs rGPR:$RdLo, rGPR:$RdHi),
+ (ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64,
+ "umaal", "\t$RdLo, $RdHi, $Rn, $Rm", []>;
} // neverHasSideEffects
// Rounding variants of the below included for disassembly only
// Most significant word multiply
-def t2SMMUL : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
- "smmul", "\t$dst, $a, $b",
- [(set rGPR:$dst, (mulhs rGPR:$a, rGPR:$b))]> {
+def t2SMMUL : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL32,
+ "smmul", "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (mulhs rGPR:$Rn, rGPR:$Rm))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b101;
@@ -1796,8 +2305,8 @@ def t2SMMUL : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
let Inst{7-4} = 0b0000; // No Rounding (Inst{4} = 0)
}
-def t2SMMULR : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
- "smmulr", "\t$dst, $a, $b", []> {
+def t2SMMULR : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL32,
+ "smmulr", "\t$Rd, $Rn, $Rm", []> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b101;
@@ -1805,49 +2314,49 @@ def t2SMMULR : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
let Inst{7-4} = 0b0001; // Rounding (Inst{4} = 1)
}
-def t2SMMLA : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$c), IIC_iMAC32,
- "smmla", "\t$dst, $a, $b, $c",
- [(set rGPR:$dst, (add (mulhs rGPR:$a, rGPR:$b), rGPR:$c))]> {
+def t2SMMLA : T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
+ "smmla", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (add (mulhs rGPR:$Rm, rGPR:$Rn), rGPR:$Ra))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b101;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-4} = 0b0000; // No Rounding (Inst{4} = 0)
}
-def t2SMMLAR: T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$c), IIC_iMAC32,
- "smmlar", "\t$dst, $a, $b, $c", []> {
+def t2SMMLAR: T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
+ "smmlar", "\t$Rd, $Rn, $Rm, $Ra", []> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b101;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-4} = 0b0001; // Rounding (Inst{4} = 1)
}
-def t2SMMLS: T2I <(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$c), IIC_iMAC32,
- "smmls", "\t$dst, $a, $b, $c",
- [(set rGPR:$dst, (sub rGPR:$c, (mulhs rGPR:$a, rGPR:$b)))]> {
+def t2SMMLS: T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
+ "smmls", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (sub rGPR:$Ra, (mulhs rGPR:$Rn, rGPR:$Rm)))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b110;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-4} = 0b0000; // No Rounding (Inst{4} = 0)
}
-def t2SMMLSR:T2I <(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$c), IIC_iMAC32,
- "smmlsr", "\t$dst, $a, $b, $c", []> {
+def t2SMMLSR:T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
+ "smmlsr", "\t$Rd, $Rn, $Rm, $Ra", []> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b110;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-4} = 0b0001; // Rounding (Inst{4} = 1)
}
multiclass T2I_smul<string opc, PatFrag opnode> {
- def BB : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
- !strconcat(opc, "bb"), "\t$dst, $a, $b",
- [(set rGPR:$dst, (opnode (sext_inreg rGPR:$a, i16),
- (sext_inreg rGPR:$b, i16)))]> {
+ def BB : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
+ !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode (sext_inreg rGPR:$Rn, i16),
+ (sext_inreg rGPR:$Rm, i16)))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
@@ -1856,10 +2365,10 @@ multiclass T2I_smul<string opc, PatFrag opnode> {
let Inst{5-4} = 0b00;
}
- def BT : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
- !strconcat(opc, "bt"), "\t$dst, $a, $b",
- [(set rGPR:$dst, (opnode (sext_inreg rGPR:$a, i16),
- (sra rGPR:$b, (i32 16))))]> {
+ def BT : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
+ !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode (sext_inreg rGPR:$Rn, i16),
+ (sra rGPR:$Rm, (i32 16))))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
@@ -1868,10 +2377,10 @@ multiclass T2I_smul<string opc, PatFrag opnode> {
let Inst{5-4} = 0b01;
}
- def TB : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
- !strconcat(opc, "tb"), "\t$dst, $a, $b",
- [(set rGPR:$dst, (opnode (sra rGPR:$a, (i32 16)),
- (sext_inreg rGPR:$b, i16)))]> {
+ def TB : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
+ !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode (sra rGPR:$Rn, (i32 16)),
+ (sext_inreg rGPR:$Rm, i16)))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
@@ -1880,10 +2389,10 @@ multiclass T2I_smul<string opc, PatFrag opnode> {
let Inst{5-4} = 0b10;
}
- def TT : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL32,
- !strconcat(opc, "tt"), "\t$dst, $a, $b",
- [(set rGPR:$dst, (opnode (sra rGPR:$a, (i32 16)),
- (sra rGPR:$b, (i32 16))))]> {
+ def TT : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
+ !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (opnode (sra rGPR:$Rn, (i32 16)),
+ (sra rGPR:$Rm, (i32 16))))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
@@ -1892,10 +2401,10 @@ multiclass T2I_smul<string opc, PatFrag opnode> {
let Inst{5-4} = 0b11;
}
- def WB : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL16,
- !strconcat(opc, "wb"), "\t$dst, $a, $b",
- [(set rGPR:$dst, (sra (opnode rGPR:$a,
- (sext_inreg rGPR:$b, i16)), (i32 16)))]> {
+ def WB : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
+ !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (sra (opnode rGPR:$Rn,
+ (sext_inreg rGPR:$Rm, i16)), (i32 16)))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b011;
@@ -1904,10 +2413,10 @@ multiclass T2I_smul<string opc, PatFrag opnode> {
let Inst{5-4} = 0b00;
}
- def WT : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b), IIC_iMUL16,
- !strconcat(opc, "wt"), "\t$dst, $a, $b",
- [(set rGPR:$dst, (sra (opnode rGPR:$a,
- (sra rGPR:$b, (i32 16))), (i32 16)))]> {
+ def WT : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
+ !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (sra (opnode rGPR:$Rn,
+ (sra rGPR:$Rm, (i32 16))), (i32 16)))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b011;
@@ -1919,75 +2428,75 @@ multiclass T2I_smul<string opc, PatFrag opnode> {
multiclass T2I_smla<string opc, PatFrag opnode> {
- def BB : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC16,
- !strconcat(opc, "bb"), "\t$dst, $a, $b, $acc",
- [(set rGPR:$dst, (add rGPR:$acc,
- (opnode (sext_inreg rGPR:$a, i16),
- (sext_inreg rGPR:$b, i16))))]> {
+ def BB : T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
+ !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (add rGPR:$Ra,
+ (opnode (sext_inreg rGPR:$Rn, i16),
+ (sext_inreg rGPR:$Rm, i16))))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-6} = 0b00;
let Inst{5-4} = 0b00;
}
- def BT : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC16,
- !strconcat(opc, "bt"), "\t$dst, $a, $b, $acc",
- [(set rGPR:$dst, (add rGPR:$acc, (opnode (sext_inreg rGPR:$a, i16),
- (sra rGPR:$b, (i32 16)))))]> {
+ def BT : T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
+ !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sext_inreg rGPR:$Rn, i16),
+ (sra rGPR:$Rm, (i32 16)))))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-6} = 0b00;
let Inst{5-4} = 0b01;
}
- def TB : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC16,
- !strconcat(opc, "tb"), "\t$dst, $a, $b, $acc",
- [(set rGPR:$dst, (add rGPR:$acc, (opnode (sra rGPR:$a, (i32 16)),
- (sext_inreg rGPR:$b, i16))))]> {
+ def TB : T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
+ !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sra rGPR:$Rn, (i32 16)),
+ (sext_inreg rGPR:$Rm, i16))))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-6} = 0b00;
let Inst{5-4} = 0b10;
}
- def TT : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC16,
- !strconcat(opc, "tt"), "\t$dst, $a, $b, $acc",
- [(set rGPR:$dst, (add rGPR:$acc, (opnode (sra rGPR:$a, (i32 16)),
- (sra rGPR:$b, (i32 16)))))]> {
+ def TT : T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
+ !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sra rGPR:$Rn, (i32 16)),
+ (sra rGPR:$Rm, (i32 16)))))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b001;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-6} = 0b00;
let Inst{5-4} = 0b11;
}
- def WB : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC16,
- !strconcat(opc, "wb"), "\t$dst, $a, $b, $acc",
- [(set rGPR:$dst, (add rGPR:$acc, (sra (opnode rGPR:$a,
- (sext_inreg rGPR:$b, i16)), (i32 16))))]> {
+ def WB : T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
+ !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (add rGPR:$Ra, (sra (opnode rGPR:$Rn,
+ (sext_inreg rGPR:$Rm, i16)), (i32 16))))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b011;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-6} = 0b00;
let Inst{5-4} = 0b00;
}
- def WT : T2I<(outs rGPR:$dst), (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC16,
- !strconcat(opc, "wt"), "\t$dst, $a, $b, $acc",
- [(set rGPR:$dst, (add rGPR:$acc, (sra (opnode rGPR:$a,
- (sra rGPR:$b, (i32 16))), (i32 16))))]> {
+ def WT : T2FourReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
+ !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (add rGPR:$Ra, (sra (opnode rGPR:$Rn,
+ (sra rGPR:$Rm, (i32 16))), (i32 16))))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0110;
let Inst{22-20} = 0b011;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
let Inst{7-6} = 0b00;
let Inst{5-4} = 0b01;
}
@@ -1997,62 +2506,68 @@ defm t2SMUL : T2I_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
defm t2SMLA : T2I_smla<"smla", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
// Halfword multiple accumulate long: SMLAL<x><y> -- for disassembly only
-def t2SMLALBB : T2I_mac<1, 0b100, 0b1000, (outs rGPR:$ldst,rGPR:$hdst),
- (ins rGPR:$a,rGPR:$b), IIC_iMAC64, "smlalbb", "\t$ldst, $hdst, $a, $b",
+def t2SMLALBB : T2FourReg_mac<1, 0b100, 0b1000, (outs rGPR:$Ra,rGPR:$Rd),
+ (ins rGPR:$Rn,rGPR:$Rm), IIC_iMAC64, "smlalbb", "\t$Ra, $Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>;
-def t2SMLALBT : T2I_mac<1, 0b100, 0b1001, (outs rGPR:$ldst,rGPR:$hdst),
- (ins rGPR:$a,rGPR:$b), IIC_iMAC64, "smlalbt", "\t$ldst, $hdst, $a, $b",
+def t2SMLALBT : T2FourReg_mac<1, 0b100, 0b1001, (outs rGPR:$Ra,rGPR:$Rd),
+ (ins rGPR:$Rn,rGPR:$Rm), IIC_iMAC64, "smlalbt", "\t$Ra, $Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>;
-def t2SMLALTB : T2I_mac<1, 0b100, 0b1010, (outs rGPR:$ldst,rGPR:$hdst),
- (ins rGPR:$a,rGPR:$b), IIC_iMAC64, "smlaltb", "\t$ldst, $hdst, $a, $b",
+def t2SMLALTB : T2FourReg_mac<1, 0b100, 0b1010, (outs rGPR:$Ra,rGPR:$Rd),
+ (ins rGPR:$Rn,rGPR:$Rm), IIC_iMAC64, "smlaltb", "\t$Ra, $Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>;
-def t2SMLALTT : T2I_mac<1, 0b100, 0b1011, (outs rGPR:$ldst,rGPR:$hdst),
- (ins rGPR:$a,rGPR:$b), IIC_iMAC64, "smlaltt", "\t$ldst, $hdst, $a, $b",
+def t2SMLALTT : T2FourReg_mac<1, 0b100, 0b1011, (outs rGPR:$Ra,rGPR:$Rd),
+ (ins rGPR:$Rn,rGPR:$Rm), IIC_iMAC64, "smlaltt", "\t$Ra, $Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>;
// Dual halfword multiple: SMUAD, SMUSD, SMLAD, SMLSD, SMLALD, SMLSLD
// These are for disassembly only.
-def t2SMUAD: T2I_mac<0, 0b010, 0b0000, (outs rGPR:$dst), (ins rGPR:$a, rGPR:$b),
- IIC_iMAC32, "smuad", "\t$dst, $a, $b", []> {
+def t2SMUAD: T2ThreeReg_mac<
+ 0, 0b010, 0b0000, (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm),
+ IIC_iMAC32, "smuad", "\t$Rd, $Rn, $Rm", []> {
let Inst{15-12} = 0b1111;
}
-def t2SMUADX:T2I_mac<0, 0b010, 0b0001, (outs rGPR:$dst), (ins rGPR:$a, rGPR:$b),
- IIC_iMAC32, "smuadx", "\t$dst, $a, $b", []> {
+def t2SMUADX:T2ThreeReg_mac<
+ 0, 0b010, 0b0001, (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm),
+ IIC_iMAC32, "smuadx", "\t$Rd, $Rn, $Rm", []> {
let Inst{15-12} = 0b1111;
}
-def t2SMUSD: T2I_mac<0, 0b100, 0b0000, (outs rGPR:$dst), (ins rGPR:$a, rGPR:$b),
- IIC_iMAC32, "smusd", "\t$dst, $a, $b", []> {
+def t2SMUSD: T2ThreeReg_mac<
+ 0, 0b100, 0b0000, (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm),
+ IIC_iMAC32, "smusd", "\t$Rd, $Rn, $Rm", []> {
let Inst{15-12} = 0b1111;
}
-def t2SMUSDX:T2I_mac<0, 0b100, 0b0001, (outs rGPR:$dst), (ins rGPR:$a, rGPR:$b),
- IIC_iMAC32, "smusdx", "\t$dst, $a, $b", []> {
+def t2SMUSDX:T2ThreeReg_mac<
+ 0, 0b100, 0b0001, (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm),
+ IIC_iMAC32, "smusdx", "\t$Rd, $Rn, $Rm", []> {
let Inst{15-12} = 0b1111;
}
-def t2SMLAD : T2I_mac<0, 0b010, 0b0000, (outs rGPR:$dst),
- (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC32, "smlad",
- "\t$dst, $a, $b, $acc", []>;
-def t2SMLADX : T2I_mac<0, 0b010, 0b0001, (outs rGPR:$dst),
- (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC32, "smladx",
- "\t$dst, $a, $b, $acc", []>;
-def t2SMLSD : T2I_mac<0, 0b100, 0b0000, (outs rGPR:$dst),
- (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC32, "smlsd",
- "\t$dst, $a, $b, $acc", []>;
-def t2SMLSDX : T2I_mac<0, 0b100, 0b0001, (outs rGPR:$dst),
- (ins rGPR:$a, rGPR:$b, rGPR:$acc), IIC_iMAC32, "smlsdx",
- "\t$dst, $a, $b, $acc", []>;
-def t2SMLALD : T2I_mac<1, 0b100, 0b1100, (outs rGPR:$ldst,rGPR:$hdst),
- (ins rGPR:$a,rGPR:$b), IIC_iMAC64, "smlald",
- "\t$ldst, $hdst, $a, $b", []>;
-def t2SMLALDX : T2I_mac<1, 0b100, 0b1101, (outs rGPR:$ldst,rGPR:$hdst),
- (ins rGPR:$a,rGPR:$b), IIC_iMAC64, "smlaldx",
- "\t$ldst, $hdst, $a, $b", []>;
-def t2SMLSLD : T2I_mac<1, 0b101, 0b1100, (outs rGPR:$ldst,rGPR:$hdst),
- (ins rGPR:$a,rGPR:$b), IIC_iMAC64, "smlsld",
- "\t$ldst, $hdst, $a, $b", []>;
-def t2SMLSLDX : T2I_mac<1, 0b101, 0b1101, (outs rGPR:$ldst,rGPR:$hdst),
- (ins rGPR:$a,rGPR:$b), IIC_iMAC64, "smlsldx",
- "\t$ldst, $hdst, $a, $b", []>;
+def t2SMLAD : T2ThreeReg_mac<
+ 0, 0b010, 0b0000, (outs rGPR:$Rd),
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smlad",
+ "\t$Rd, $Rn, $Rm, $Ra", []>;
+def t2SMLADX : T2FourReg_mac<
+ 0, 0b010, 0b0001, (outs rGPR:$Rd),
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smladx",
+ "\t$Rd, $Rn, $Rm, $Ra", []>;
+def t2SMLSD : T2FourReg_mac<0, 0b100, 0b0000, (outs rGPR:$Rd),
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smlsd",
+ "\t$Rd, $Rn, $Rm, $Ra", []>;
+def t2SMLSDX : T2FourReg_mac<0, 0b100, 0b0001, (outs rGPR:$Rd),
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smlsdx",
+ "\t$Rd, $Rn, $Rm, $Ra", []>;
+def t2SMLALD : T2FourReg_mac<1, 0b100, 0b1100, (outs rGPR:$Ra,rGPR:$Rd),
+ (ins rGPR:$Rm, rGPR:$Rn), IIC_iMAC64, "smlald",
+ "\t$Ra, $Rd, $Rm, $Rn", []>;
+def t2SMLALDX : T2FourReg_mac<1, 0b100, 0b1101, (outs rGPR:$Ra,rGPR:$Rd),
+ (ins rGPR:$Rm,rGPR:$Rn), IIC_iMAC64, "smlaldx",
+ "\t$Ra, $Rd, $Rm, $Rn", []>;
+def t2SMLSLD : T2FourReg_mac<1, 0b101, 0b1100, (outs rGPR:$Ra,rGPR:$Rd),
+ (ins rGPR:$Rm,rGPR:$Rn), IIC_iMAC64, "smlsld",
+ "\t$Ra, $Rd, $Rm, $Rn", []>;
+def t2SMLSLDX : T2FourReg_mac<1, 0b101, 0b1101, (outs rGPR:$Ra,rGPR:$Rd),
+ (ins rGPR:$Rm,rGPR:$Rn), IIC_iMAC64, "smlsldx",
+ "\t$Ra, $Rd, $Rm, $Rn", []>;
//===----------------------------------------------------------------------===//
// Misc. Arithmetic Instructions.
@@ -2060,99 +2575,117 @@ def t2SMLSLDX : T2I_mac<1, 0b101, 0b1101, (outs rGPR:$ldst,rGPR:$hdst),
class T2I_misc<bits<2> op1, bits<2> op2, dag oops, dag iops,
InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : T2I<oops, iops, itin, opc, asm, pattern> {
+ : T2ThreeReg<oops, iops, itin, opc, asm, pattern> {
let Inst{31-27} = 0b11111;
let Inst{26-22} = 0b01010;
let Inst{21-20} = op1;
let Inst{15-12} = 0b1111;
let Inst{7-6} = 0b10;
let Inst{5-4} = op2;
+ let Rn{3-0} = Rm;
}
-def t2CLZ : T2I_misc<0b11, 0b00, (outs rGPR:$dst), (ins rGPR:$src), IIC_iUNAr,
- "clz", "\t$dst, $src", [(set rGPR:$dst, (ctlz rGPR:$src))]>;
+def t2CLZ : T2I_misc<0b11, 0b00, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
+ "clz", "\t$Rd, $Rm", [(set rGPR:$Rd, (ctlz rGPR:$Rm))]>;
-def t2RBIT : T2I_misc<0b01, 0b10, (outs rGPR:$dst), (ins rGPR:$src), IIC_iUNAr,
- "rbit", "\t$dst, $src",
- [(set rGPR:$dst, (ARMrbit rGPR:$src))]>;
+def t2RBIT : T2I_misc<0b01, 0b10, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
+ "rbit", "\t$Rd, $Rm",
+ [(set rGPR:$Rd, (ARMrbit rGPR:$Rm))]>;
-def t2REV : T2I_misc<0b01, 0b00, (outs rGPR:$dst), (ins rGPR:$src), IIC_iUNAr,
- "rev", ".w\t$dst, $src", [(set rGPR:$dst, (bswap rGPR:$src))]>;
+def t2REV : T2I_misc<0b01, 0b00, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
+ "rev", ".w\t$Rd, $Rm", [(set rGPR:$Rd, (bswap rGPR:$Rm))]>;
-def t2REV16 : T2I_misc<0b01, 0b01, (outs rGPR:$dst), (ins rGPR:$src), IIC_iUNAr,
- "rev16", ".w\t$dst, $src",
- [(set rGPR:$dst,
- (or (and (srl rGPR:$src, (i32 8)), 0xFF),
- (or (and (shl rGPR:$src, (i32 8)), 0xFF00),
- (or (and (srl rGPR:$src, (i32 8)), 0xFF0000),
- (and (shl rGPR:$src, (i32 8)), 0xFF000000)))))]>;
+def t2REV16 : T2I_misc<0b01, 0b01, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
+ "rev16", ".w\t$Rd, $Rm",
+ [(set rGPR:$Rd,
+ (or (and (srl rGPR:$Rm, (i32 8)), 0xFF),
+ (or (and (shl rGPR:$Rm, (i32 8)), 0xFF00),
+ (or (and (srl rGPR:$Rm, (i32 8)), 0xFF0000),
+ (and (shl rGPR:$Rm, (i32 8)), 0xFF000000)))))]>;
-def t2REVSH : T2I_misc<0b01, 0b11, (outs rGPR:$dst), (ins rGPR:$src), IIC_iUNAr,
- "revsh", ".w\t$dst, $src",
- [(set rGPR:$dst,
+def t2REVSH : T2I_misc<0b01, 0b11, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
+ "revsh", ".w\t$Rd, $Rm",
+ [(set rGPR:$Rd,
(sext_inreg
- (or (srl (and rGPR:$src, 0xFF00), (i32 8)),
- (shl rGPR:$src, (i32 8))), i16))]>;
-
-def t2PKHBT : T2I<(outs rGPR:$dst), (ins rGPR:$src1, rGPR:$src2, shift_imm:$sh),
- IIC_iALUsi, "pkhbt", "\t$dst, $src1, $src2$sh",
- [(set rGPR:$dst, (or (and rGPR:$src1, 0xFFFF),
- (and (shl rGPR:$src2, lsl_amt:$sh),
+ (or (srl (and rGPR:$Rm, 0xFF00), (i32 8)),
+ (shl rGPR:$Rm, (i32 8))), i16))]>;
+
+def t2PKHBT : T2ThreeReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, shift_imm:$sh),
+ IIC_iBITsi, "pkhbt", "\t$Rd, $Rn, $Rm$sh",
+ [(set rGPR:$Rd, (or (and rGPR:$Rn, 0xFFFF),
+ (and (shl rGPR:$Rm, lsl_amt:$sh),
0xFFFF0000)))]>,
- Requires<[HasT2ExtractPack]> {
+ Requires<[HasT2ExtractPack, IsThumb2]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-20} = 0b01100;
let Inst{5} = 0; // BT form
let Inst{4} = 0;
+
+ bits<8> sh;
+ let Inst{14-12} = sh{7-5};
+ let Inst{7-6} = sh{4-3};
}
// Alternate cases for PKHBT where identities eliminate some nodes.
def : T2Pat<(or (and rGPR:$src1, 0xFFFF), (and rGPR:$src2, 0xFFFF0000)),
(t2PKHBT rGPR:$src1, rGPR:$src2, 0)>,
- Requires<[HasT2ExtractPack]>;
+ Requires<[HasT2ExtractPack, IsThumb2]>;
def : T2Pat<(or (and rGPR:$src1, 0xFFFF), (shl rGPR:$src2, imm16_31:$sh)),
(t2PKHBT rGPR:$src1, rGPR:$src2, (lsl_shift_imm imm16_31:$sh))>,
- Requires<[HasT2ExtractPack]>;
+ Requires<[HasT2ExtractPack, IsThumb2]>;
// Note: Shifts of 1-15 bits will be transformed to srl instead of sra and
// will match the pattern below.
-def t2PKHTB : T2I<(outs rGPR:$dst), (ins rGPR:$src1, rGPR:$src2, shift_imm:$sh),
- IIC_iALUsi, "pkhtb", "\t$dst, $src1, $src2$sh",
- [(set rGPR:$dst, (or (and rGPR:$src1, 0xFFFF0000),
- (and (sra rGPR:$src2, asr_amt:$sh),
+def t2PKHTB : T2ThreeReg<
+ (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, shift_imm:$sh),
+ IIC_iBITsi, "pkhtb", "\t$Rd, $Rn, $Rm$sh",
+ [(set rGPR:$Rd, (or (and rGPR:$Rn, 0xFFFF0000),
+ (and (sra rGPR:$Rm, asr_amt:$sh),
0xFFFF)))]>,
- Requires<[HasT2ExtractPack]> {
+ Requires<[HasT2ExtractPack, IsThumb2]> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-20} = 0b01100;
let Inst{5} = 1; // TB form
let Inst{4} = 0;
+
+ bits<8> sh;
+ let Inst{14-12} = sh{7-5};
+ let Inst{7-6} = sh{4-3};
}
// Alternate cases for PKHTB where identities eliminate some nodes. Note that
// a shift amount of 0 is *not legal* here, it is PKHBT instead.
def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000), (srl rGPR:$src2, imm16_31:$sh)),
(t2PKHTB rGPR:$src1, rGPR:$src2, (asr_shift_imm imm16_31:$sh))>,
- Requires<[HasT2ExtractPack]>;
+ Requires<[HasT2ExtractPack, IsThumb2]>;
def : T2Pat<(or (and rGPR:$src1, 0xFFFF0000),
(and (srl rGPR:$src2, imm1_15:$sh), 0xFFFF)),
(t2PKHTB rGPR:$src1, rGPR:$src2, (asr_shift_imm imm1_15:$sh))>,
- Requires<[HasT2ExtractPack]>;
+ Requires<[HasT2ExtractPack, IsThumb2]>;
//===----------------------------------------------------------------------===//
// Comparison Instructions...
//
defm t2CMP : T2I_cmp_irs<0b1101, "cmp",
+ IIC_iCMPi, IIC_iCMPr, IIC_iCMPsi,
BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
-defm t2CMPz : T2I_cmp_irs<0b1101, "cmp",
- BinOpFrag<(ARMcmpZ node:$LHS, node:$RHS)>>;
+
+def : T2Pat<(ARMcmpZ GPR:$lhs, t2_so_imm:$imm),
+ (t2CMPri GPR:$lhs, t2_so_imm:$imm)>;
+def : T2Pat<(ARMcmpZ GPR:$lhs, rGPR:$rhs),
+ (t2CMPrr GPR:$lhs, rGPR:$rhs)>;
+def : T2Pat<(ARMcmpZ GPR:$lhs, t2_so_reg:$rhs),
+ (t2CMPrs GPR:$lhs, t2_so_reg:$rhs)>;
//FIXME: Disable CMN, as CCodes are backwards from compare expectations
// Compare-to-zero still works out, just not the relationals
//defm t2CMN : T2I_cmp_irs<0b1000, "cmn",
// BinOpFrag<(ARMcmp node:$LHS,(ineg node:$RHS))>>;
defm t2CMNz : T2I_cmp_irs<0b1000, "cmn",
+ IIC_iCMPi, IIC_iCMPr, IIC_iCMPsi,
BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>>;
//def : T2Pat<(ARMcmp GPR:$src, t2_so_imm_neg:$imm),
@@ -2162,18 +2695,21 @@ def : T2Pat<(ARMcmpZ GPR:$src, t2_so_imm_neg:$imm),
(t2CMNzri GPR:$src, t2_so_imm_neg:$imm)>;
defm t2TST : T2I_cmp_irs<0b0000, "tst",
- BinOpFrag<(ARMcmpZ (and node:$LHS, node:$RHS), 0)>>;
+ IIC_iTSTi, IIC_iTSTr, IIC_iTSTsi,
+ BinOpFrag<(ARMcmpZ (and_su node:$LHS, node:$RHS), 0)>>;
defm t2TEQ : T2I_cmp_irs<0b0100, "teq",
- BinOpFrag<(ARMcmpZ (xor node:$LHS, node:$RHS), 0)>>;
+ IIC_iTSTi, IIC_iTSTr, IIC_iTSTsi,
+ BinOpFrag<(ARMcmpZ (xor_su node:$LHS, node:$RHS), 0)>>;
// Conditional moves
// FIXME: should be able to write a pattern for ARMcmov, but can't use
// a two-value operand where a dag node expects two operands. :(
let neverHasSideEffects = 1 in {
-def t2MOVCCr : T2I<(outs rGPR:$dst), (ins rGPR:$false, rGPR:$true), IIC_iCMOVr,
- "mov", ".w\t$dst, $true",
- [/*(set rGPR:$dst, (ARMcmov rGPR:$false, rGPR:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst"> {
+def t2MOVCCr : T2TwoReg<
+ (outs rGPR:$Rd), (ins rGPR:$false, rGPR:$Rm), IIC_iCMOVr,
+ "mov", ".w\t$Rd, $Rm",
+ [/*(set rGPR:$Rd, (ARMcmov rGPR:$false, rGPR:$Rm, imm:$cc, CCR:$ccr))*/]>,
+ RegConstraint<"$false = $Rd"> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
@@ -2183,10 +2719,11 @@ def t2MOVCCr : T2I<(outs rGPR:$dst), (ins rGPR:$false, rGPR:$true), IIC_iCMOVr,
let Inst{7-4} = 0b0000;
}
-def t2MOVCCi : T2I<(outs rGPR:$dst), (ins rGPR:$false, t2_so_imm:$true),
- IIC_iCMOVi, "mov", ".w\t$dst, $true",
-[/*(set rGPR:$dst,(ARMcmov rGPR:$false,t2_so_imm:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst"> {
+let isMoveImm = 1 in
+def t2MOVCCi : T2OneRegImm<(outs rGPR:$Rd), (ins rGPR:$false, t2_so_imm:$imm),
+ IIC_iCMOVi, "mov", ".w\t$Rd, $imm",
+[/*(set rGPR:$Rd,(ARMcmov rGPR:$false,t2_so_imm:$imm, imm:$cc, CCR:$ccr))*/]>,
+ RegConstraint<"$false = $Rd"> {
let Inst{31-27} = 0b11110;
let Inst{25} = 0;
let Inst{24-21} = 0b0010;
@@ -2195,9 +2732,49 @@ def t2MOVCCi : T2I<(outs rGPR:$dst), (ins rGPR:$false, t2_so_imm:$true),
let Inst{15} = 0;
}
+let isMoveImm = 1 in
+def t2MOVCCi16 : T2I<(outs rGPR:$Rd), (ins rGPR:$false, i32imm_hilo16:$imm),
+ IIC_iCMOVi,
+ "movw", "\t$Rd, $imm", []>,
+ RegConstraint<"$false = $Rd"> {
+ let Inst{31-27} = 0b11110;
+ let Inst{25} = 1;
+ let Inst{24-21} = 0b0010;
+ let Inst{20} = 0; // The S bit.
+ let Inst{15} = 0;
+
+ bits<4> Rd;
+ bits<16> imm;
+
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = imm{15-12};
+ let Inst{26} = imm{11};
+ let Inst{14-12} = imm{10-8};
+ let Inst{7-0} = imm{7-0};
+}
+
+let isMoveImm = 1 in
+def t2MOVCCi32imm : PseudoInst<(outs rGPR:$dst),
+ (ins rGPR:$false, i32imm:$src, pred:$p),
+ IIC_iCMOVix2, []>, RegConstraint<"$false = $dst">;
+
+let isMoveImm = 1 in
+def t2MVNCCi : T2OneRegImm<(outs rGPR:$Rd), (ins rGPR:$false, t2_so_imm:$imm),
+ IIC_iCMOVi, "mvn", ".w\t$Rd, $imm",
+[/*(set rGPR:$Rd,(ARMcmov rGPR:$false,t2_so_imm_not:$imm,
+ imm:$cc, CCR:$ccr))*/]>,
+ RegConstraint<"$false = $Rd"> {
+ let Inst{31-27} = 0b11110;
+ let Inst{25} = 0;
+ let Inst{24-21} = 0b0011;
+ let Inst{20} = 0; // The S bit.
+ let Inst{19-16} = 0b1111; // Rn
+ let Inst{15} = 0;
+}
+
class T2I_movcc_sh<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
- : T2I<oops, iops, itin, opc, asm, pattern> {
+ : T2TwoRegShiftImm<oops, iops, itin, opc, asm, pattern> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b01;
let Inst{24-21} = 0b0010;
@@ -2205,22 +2782,22 @@ class T2I_movcc_sh<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
let Inst{19-16} = 0b1111; // Rn
let Inst{5-4} = opcod; // Shift type.
}
-def t2MOVCClsl : T2I_movcc_sh<0b00, (outs rGPR:$dst),
- (ins rGPR:$false, rGPR:$true, i32imm:$rhs),
- IIC_iCMOVsi, "lsl", ".w\t$dst, $true, $rhs", []>,
- RegConstraint<"$false = $dst">;
-def t2MOVCClsr : T2I_movcc_sh<0b01, (outs rGPR:$dst),
- (ins rGPR:$false, rGPR:$true, i32imm:$rhs),
- IIC_iCMOVsi, "lsr", ".w\t$dst, $true, $rhs", []>,
- RegConstraint<"$false = $dst">;
-def t2MOVCCasr : T2I_movcc_sh<0b10, (outs rGPR:$dst),
- (ins rGPR:$false, rGPR:$true, i32imm:$rhs),
- IIC_iCMOVsi, "asr", ".w\t$dst, $true, $rhs", []>,
- RegConstraint<"$false = $dst">;
-def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$dst),
- (ins rGPR:$false, rGPR:$true, i32imm:$rhs),
- IIC_iCMOVsi, "ror", ".w\t$dst, $true, $rhs", []>,
- RegConstraint<"$false = $dst">;
+def t2MOVCClsl : T2I_movcc_sh<0b00, (outs rGPR:$Rd),
+ (ins rGPR:$false, rGPR:$Rm, i32imm:$imm),
+ IIC_iCMOVsi, "lsl", ".w\t$Rd, $Rm, $imm", []>,
+ RegConstraint<"$false = $Rd">;
+def t2MOVCClsr : T2I_movcc_sh<0b01, (outs rGPR:$Rd),
+ (ins rGPR:$false, rGPR:$Rm, i32imm:$imm),
+ IIC_iCMOVsi, "lsr", ".w\t$Rd, $Rm, $imm", []>,
+ RegConstraint<"$false = $Rd">;
+def t2MOVCCasr : T2I_movcc_sh<0b10, (outs rGPR:$Rd),
+ (ins rGPR:$false, rGPR:$Rm, i32imm:$imm),
+ IIC_iCMOVsi, "asr", ".w\t$Rd, $Rm, $imm", []>,
+ RegConstraint<"$false = $Rd">;
+def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$Rd),
+ (ins rGPR:$false, rGPR:$Rm, i32imm:$imm),
+ IIC_iCMOVsi, "ror", ".w\t$Rd, $Rm, $imm", []>,
+ RegConstraint<"$false = $Rd">;
} // neverHasSideEffects
//===----------------------------------------------------------------------===//
@@ -2229,78 +2806,29 @@ def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$dst),
// memory barriers protect the atomic sequences
let hasSideEffects = 1 in {
-def t2DMBsy : AInoP<(outs), (ins), ThumbFrm, NoItinerary, "dmb", "",
- [(ARMMemBarrier)]>, Requires<[IsThumb, HasDB]> {
- let Inst{31-4} = 0xF3BF8F5;
- // FIXME: add support for options other than a full system DMB
- let Inst{3-0} = 0b1111;
-}
-
-def t2DSBsy : AInoP<(outs), (ins), ThumbFrm, NoItinerary, "dsb", "",
- [(ARMSyncBarrier)]>, Requires<[IsThumb, HasDB]> {
- let Inst{31-4} = 0xF3BF8F4;
- // FIXME: add support for options other than a full system DSB
- let Inst{3-0} = 0b1111;
-}
+def t2DMB : AInoP<(outs), (ins memb_opt:$opt), ThumbFrm, NoItinerary,
+ "dmb", "\t$opt", [(ARMMemBarrier (i32 imm:$opt))]>,
+ Requires<[IsThumb, HasDB]> {
+ bits<4> opt;
+ let Inst{31-4} = 0xf3bf8f5;
+ let Inst{3-0} = opt;
}
-
-// Helper class for multiclass T2MemB -- for disassembly only
-class T2I_memb<string opc, string asm>
- : T2I<(outs), (ins), NoItinerary, opc, asm,
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsThumb2, HasV7]> {
- let Inst{31-20} = 0xf3b;
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
}
-multiclass T2MemB<bits<4> op7_4, string opc> {
-
- def st : T2I_memb<opc, "\tst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b1110;
- }
-
- def ish : T2I_memb<opc, "\tish"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b1011;
- }
-
- def ishst : T2I_memb<opc, "\tishst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b1010;
- }
-
- def nsh : T2I_memb<opc, "\tnsh"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0111;
- }
-
- def nshst : T2I_memb<opc, "\tnshst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0110;
- }
-
- def osh : T2I_memb<opc, "\tosh"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0011;
- }
-
- def oshst : T2I_memb<opc, "\toshst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0010;
- }
+def t2DSB : AInoP<(outs), (ins memb_opt:$opt), ThumbFrm, NoItinerary,
+ "dsb", "\t$opt",
+ [/* For disassembly only; pattern left blank */]>,
+ Requires<[IsThumb, HasDB]> {
+ bits<4> opt;
+ let Inst{31-4} = 0xf3bf8f4;
+ let Inst{3-0} = opt;
}
-// These DMB variants are for disassembly only.
-defm t2DMB : T2MemB<0b0101, "dmb">;
-
-// These DSB variants are for disassembly only.
-defm t2DSB : T2MemB<0b0100, "dsb">;
-
// ISB has only full system option -- for disassembly only
-def t2ISBsy : T2I_memb<"isb", ""> {
- let Inst{7-4} = 0b0110;
+def t2ISB : AInoP<(outs), (ins), ThumbFrm, NoItinerary, "isb", "",
+ [/* For disassembly only; pattern left blank */]>,
+ Requires<[IsThumb2, HasV7]> {
+ let Inst{31-4} = 0xf3bf8f6;
let Inst{3-0} = 0b1111;
}
@@ -2314,6 +2842,11 @@ class T2I_ldrex<bits<2> opcod, dag oops, dag iops, AddrMode am, SizeFlagVal sz,
let Inst{7-6} = 0b01;
let Inst{5-4} = opcod;
let Inst{3-0} = 0b1111;
+
+ bits<4> Rn;
+ bits<4> Rt;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rt;
}
class T2I_strex<bits<2> opcod, dag oops, dag iops, AddrMode am, SizeFlagVal sz,
InstrItinClass itin, string opc, string asm, string cstr,
@@ -2324,60 +2857,88 @@ class T2I_strex<bits<2> opcod, dag oops, dag iops, AddrMode am, SizeFlagVal sz,
let Inst{11-8} = rt2;
let Inst{7-6} = 0b01;
let Inst{5-4} = opcod;
+
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rt;
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rt;
}
let mayLoad = 1 in {
-def t2LDREXB : T2I_ldrex<0b00, (outs rGPR:$dest), (ins rGPR:$ptr), AddrModeNone,
- Size4Bytes, NoItinerary, "ldrexb", "\t$dest, [$ptr]",
+def t2LDREXB : T2I_ldrex<0b00, (outs rGPR:$Rt), (ins rGPR:$Rn), AddrModeNone,
+ Size4Bytes, NoItinerary, "ldrexb", "\t$Rt, [$Rn]",
"", []>;
-def t2LDREXH : T2I_ldrex<0b01, (outs rGPR:$dest), (ins rGPR:$ptr), AddrModeNone,
- Size4Bytes, NoItinerary, "ldrexh", "\t$dest, [$ptr]",
+def t2LDREXH : T2I_ldrex<0b01, (outs rGPR:$Rt), (ins rGPR:$Rn), AddrModeNone,
+ Size4Bytes, NoItinerary, "ldrexh", "\t$Rt, [$Rn]",
"", []>;
-def t2LDREX : Thumb2I<(outs rGPR:$dest), (ins rGPR:$ptr), AddrModeNone,
+def t2LDREX : Thumb2I<(outs rGPR:$Rt), (ins rGPR:$Rn), AddrModeNone,
Size4Bytes, NoItinerary,
- "ldrex", "\t$dest, [$ptr]", "",
+ "ldrex", "\t$Rt, [$Rn]", "",
[]> {
let Inst{31-27} = 0b11101;
let Inst{26-20} = 0b0000101;
let Inst{11-8} = 0b1111;
let Inst{7-0} = 0b00000000; // imm8 = 0
+
+ bits<4> Rn;
+ bits<4> Rt;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rt;
}
-def t2LDREXD : T2I_ldrex<0b11, (outs rGPR:$dest, rGPR:$dest2), (ins rGPR:$ptr),
+def t2LDREXD : T2I_ldrex<0b11, (outs rGPR:$Rt, rGPR:$Rt2), (ins rGPR:$Rn),
AddrModeNone, Size4Bytes, NoItinerary,
- "ldrexd", "\t$dest, $dest2, [$ptr]", "",
- [], {?, ?, ?, ?}>;
+ "ldrexd", "\t$Rt, $Rt2, [$Rn]", "",
+ [], {?, ?, ?, ?}> {
+ bits<4> Rt2;
+ let Inst{11-8} = Rt2;
+}
}
-let mayStore = 1, Constraints = "@earlyclobber $success" in {
-def t2STREXB : T2I_strex<0b00, (outs rGPR:$success), (ins rGPR:$src, rGPR:$ptr),
+let mayStore = 1, Constraints = "@earlyclobber $Rd" in {
+def t2STREXB : T2I_strex<0b00, (outs rGPR:$Rd), (ins rGPR:$Rt, rGPR:$Rn),
AddrModeNone, Size4Bytes, NoItinerary,
- "strexb", "\t$success, $src, [$ptr]", "", []>;
-def t2STREXH : T2I_strex<0b01, (outs rGPR:$success), (ins rGPR:$src, rGPR:$ptr),
+ "strexb", "\t$Rd, $Rt, [$Rn]", "", []>;
+def t2STREXH : T2I_strex<0b01, (outs rGPR:$Rd), (ins rGPR:$Rt, rGPR:$Rn),
AddrModeNone, Size4Bytes, NoItinerary,
- "strexh", "\t$success, $src, [$ptr]", "", []>;
-def t2STREX : Thumb2I<(outs rGPR:$success), (ins rGPR:$src, rGPR:$ptr),
+ "strexh", "\t$Rd, $Rt, [$Rn]", "", []>;
+def t2STREX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt, rGPR:$Rn),
AddrModeNone, Size4Bytes, NoItinerary,
- "strex", "\t$success, $src, [$ptr]", "",
+ "strex", "\t$Rd, $Rt, [$Rn]", "",
[]> {
let Inst{31-27} = 0b11101;
let Inst{26-20} = 0b0000100;
let Inst{7-0} = 0b00000000; // imm8 = 0
+
+ bits<4> Rd;
+ bits<4> Rn;
+ bits<4> Rt;
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = Rt;
}
-def t2STREXD : T2I_strex<0b11, (outs rGPR:$success),
- (ins rGPR:$src, rGPR:$src2, rGPR:$ptr),
+def t2STREXD : T2I_strex<0b11, (outs rGPR:$Rd),
+ (ins rGPR:$Rt, rGPR:$Rt2, rGPR:$Rn),
AddrModeNone, Size4Bytes, NoItinerary,
- "strexd", "\t$success, $src, $src2, [$ptr]", "", [],
- {?, ?, ?, ?}>;
+ "strexd", "\t$Rd, $Rt, $Rt2, [$Rn]", "", [],
+ {?, ?, ?, ?}> {
+ bits<4> Rt2;
+ let Inst{11-8} = Rt2;
+}
}
// Clear-Exclusive is for disassembly only.
-def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV7]> {
- let Inst{31-20} = 0xf3b;
+def t2CLREX : T2XI<(outs), (ins), NoItinerary, "clrex",
+ [/* For disassembly only; pattern left blank */]>,
+ Requires<[IsThumb2, HasV7]> {
+ let Inst{31-16} = 0xf3bf;
let Inst{15-14} = 0b10;
+ let Inst{13} = 0;
let Inst{12} = 0;
+ let Inst{11-8} = 0b1111;
let Inst{7-4} = 0b0010;
+ let Inst{3-0} = 0b1111;
}
//===----------------------------------------------------------------------===//
@@ -2386,7 +2947,7 @@ def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "",
// __aeabi_read_tp preserves the registers r1-r3.
let isCall = 1,
- Defs = [R0, R12, LR, CPSR] in {
+ Defs = [R0, R12, LR, CPSR], Uses = [SP] in {
def t2TPsoft : T2XI<(outs), (ins), IIC_Br,
"bl\t__aeabi_read_tp",
[(set R0, ARMthread_pointer)]> {
@@ -2413,32 +2974,18 @@ let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, D0,
D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30,
- D31 ], hasSideEffects = 1, isBarrier = 1 in {
+ D31 ], hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1 in {
def t2Int_eh_sjlj_setjmp : Thumb2XI<(outs), (ins tGPR:$src, tGPR:$val),
- AddrModeNone, SizeSpecial, NoItinerary,
- "mov\t$val, pc\t${:comment} begin eh.setjmp\n\t"
- "adds\t$val, #7\n\t"
- "str\t$val, [$src, #4]\n\t"
- "movs\tr0, #0\n\t"
- "b\t1f\n\t"
- "movs\tr0, #1\t${:comment} end eh.setjmp\n\t"
- "1:", "",
+ AddrModeNone, SizeSpecial, NoItinerary, "", "",
[(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>,
Requires<[IsThumb2, HasVFP2]>;
}
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR ],
- hasSideEffects = 1, isBarrier = 1 in {
+ hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1 in {
def t2Int_eh_sjlj_setjmp_nofp : Thumb2XI<(outs), (ins tGPR:$src, tGPR:$val),
- AddrModeNone, SizeSpecial, NoItinerary,
- "mov\t$val, pc\t${:comment} begin eh.setjmp\n\t"
- "adds\t$val, #7\n\t"
- "str\t$val, [$src, #4]\n\t"
- "movs\tr0, #0\n\t"
- "b\t1f\n\t"
- "movs\tr0, #1\t${:comment} end eh.setjmp\n\t"
- "1:", "",
+ AddrModeNone, SizeSpecial, NoItinerary, "", "",
[(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>,
Requires<[IsThumb2, NoVFP]>;
}
@@ -2453,82 +3000,77 @@ let Defs =
// operand list.
// FIXME: Should pc be an implicit operand like PICADD, etc?
let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
- hasExtraDefRegAllocReq = 1 in
- def t2LDM_RET : T2XIt<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$dsts, variable_ops), IIC_Br,
- "ldm${addr:submode}${p}${addr:wide}\t$addr!, $dsts",
- "$addr.addr = $wb", []> {
+ hasExtraDefRegAllocReq = 1, isCodeGenOnly = 1 in
+def t2LDMIA_RET: T2XIt<(outs GPR:$wb), (ins GPR:$Rn, pred:$p,
+ reglist:$regs, variable_ops),
+ IIC_iLoad_mBr,
+ "ldmia${p}.w\t$Rn!, $regs",
+ "$Rn = $wb", []> {
+ bits<4> Rn;
+ bits<16> regs;
+
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b00;
- let Inst{24-23} = {?, ?}; // IA: '01', DB: '10'
- let Inst{22} = 0;
- let Inst{21} = 1; // The W bit.
- let Inst{20} = 1; // Load
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{22} = 0;
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = 1;
+ let Inst{19-16} = Rn;
+ let Inst{15-0} = regs;
}
let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
let isPredicable = 1 in
-def t2B : T2XI<(outs), (ins brtarget:$target), IIC_Br,
+def t2B : T2XI<(outs), (ins uncondbrtarget:$target), IIC_Br,
"b.w\t$target",
[(br bb:$target)]> {
let Inst{31-27} = 0b11110;
let Inst{15-14} = 0b10;
let Inst{12} = 1;
+
+ bits<20> target;
+ let Inst{26} = target{19};
+ let Inst{11} = target{18};
+ let Inst{13} = target{17};
+ let Inst{21-16} = target{16-11};
+ let Inst{10-0} = target{10-0};
}
let isNotDuplicable = 1, isIndirectBranch = 1 in {
-def t2BR_JT :
- T2JTI<(outs),
- (ins GPR:$target, GPR:$index, jt2block_operand:$jt, i32imm:$id),
- IIC_Br, "mov\tpc, $target$jt",
- [(ARMbr2jt GPR:$target, GPR:$index, tjumptable:$jt, imm:$id)]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0100100;
- let Inst{19-16} = 0b1111;
- let Inst{14-12} = 0b000;
- let Inst{11-8} = 0b1111; // Rd = pc
- let Inst{7-4} = 0b0000;
-}
+def t2BR_JT : t2PseudoInst<(outs),
+ (ins GPR:$target, GPR:$index, i32imm:$jt, i32imm:$id),
+ SizeSpecial, IIC_Br,
+ [(ARMbr2jt GPR:$target, GPR:$index, tjumptable:$jt, imm:$id)]>;
// FIXME: Add a non-pc based case that can be predicated.
-def t2TBB :
- T2JTI<(outs),
- (ins tb_addrmode:$index, jt2block_operand:$jt, i32imm:$id),
- IIC_Br, "tbb\t$index$jt", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001101;
- let Inst{19-16} = 0b1111; // Rn = pc (table follows this instruction)
- let Inst{15-8} = 0b11110000;
- let Inst{7-4} = 0b0000; // B form
-}
-
-def t2TBH :
- T2JTI<(outs),
- (ins tb_addrmode:$index, jt2block_operand:$jt, i32imm:$id),
- IIC_Br, "tbh\t$index$jt", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001101;
- let Inst{19-16} = 0b1111; // Rn = pc (table follows this instruction)
- let Inst{15-8} = 0b11110000;
- let Inst{7-4} = 0b0001; // H form
-}
-
-// Generic versions of the above two instructions, for disassembly only
-
-def t2TBBgen : T2I<(outs), (ins GPR:$a, GPR:$b), IIC_Br,
- "tbb", "\t[$a, $b]", []>{
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001101;
- let Inst{15-8} = 0b11110000;
- let Inst{7-4} = 0b0000; // B form
-}
-
-def t2TBHgen : T2I<(outs), (ins GPR:$a, GPR:$b), IIC_Br,
- "tbh", "\t[$a, $b, lsl #1]", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001101;
- let Inst{15-8} = 0b11110000;
- let Inst{7-4} = 0b0001; // H form
+def t2TBB_JT : t2PseudoInst<(outs),
+ (ins GPR:$index, i32imm:$jt, i32imm:$id),
+ SizeSpecial, IIC_Br, []>;
+
+def t2TBH_JT : t2PseudoInst<(outs),
+ (ins GPR:$index, i32imm:$jt, i32imm:$id),
+ SizeSpecial, IIC_Br, []>;
+
+def t2TBB : T2I<(outs), (ins GPR:$Rn, GPR:$Rm), IIC_Br,
+ "tbb", "\t[$Rn, $Rm]", []> {
+ bits<4> Rn;
+ bits<4> Rm;
+ let Inst{31-20} = 0b111010001101;
+ let Inst{19-16} = Rn;
+ let Inst{15-5} = 0b11110000000;
+ let Inst{4} = 0; // B form
+ let Inst{3-0} = Rm;
+}
+
+def t2TBH : T2I<(outs), (ins GPR:$Rn, GPR:$Rm), IIC_Br,
+ "tbh", "\t[$Rn, $Rm, lsl #1]", []> {
+ bits<4> Rn;
+ bits<4> Rm;
+ let Inst{31-20} = 0b111010001101;
+ let Inst{19-16} = Rn;
+ let Inst{15-5} = 0b11110000000;
+ let Inst{4} = 1; // H form
+ let Inst{3-0} = Rm;
}
} // isNotDuplicable, isIndirectBranch
@@ -2543,6 +3085,16 @@ def t2Bcc : T2I<(outs), (ins brtarget:$target), IIC_Br,
let Inst{31-27} = 0b11110;
let Inst{15-14} = 0b10;
let Inst{12} = 0;
+
+ bits<4> p;
+ let Inst{25-22} = p;
+
+ bits<21> target;
+ let Inst{26} = target{20};
+ let Inst{11} = target{19};
+ let Inst{13} = target{18};
+ let Inst{21-16} = target{17-12};
+ let Inst{10-0} = target{11-1};
}
@@ -2554,6 +3106,11 @@ def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask),
// 16-bit instruction.
let Inst{31-16} = 0x0000;
let Inst{15-8} = 0b10111111;
+
+ bits<4> cc;
+ bits<4> mask;
+ let Inst{7-4} = cc;
+ let Inst{3-0} = mask;
}
// Branch and Exchange Jazelle -- for disassembly only
@@ -2565,22 +3122,44 @@ def t2BXJ : T2I<(outs), (ins rGPR:$func), NoItinerary, "bxj", "\t$func",
let Inst{25-20} = 0b111100;
let Inst{15-14} = 0b10;
let Inst{12} = 0;
+
+ bits<4> func;
+ let Inst{19-16} = func;
}
-// Change Processor State is a system instruction -- for disassembly only.
-// The singleton $opt operand contains the following information:
-// opt{4-0} = mode from Inst{4-0}
-// opt{5} = changemode from Inst{17}
-// opt{8-6} = AIF from Inst{8-6}
-// opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
-def t2CPS : T2XI<(outs),(ins cps_opt:$opt), NoItinerary, "cps$opt",
- [/* For disassembly only; pattern left blank */]> {
+// Change Processor State is a system instruction -- for disassembly and
+// parsing only.
+// FIXME: Since the asm parser has currently no clean way to handle optional
+// operands, create 3 versions of the same instruction. Once there's a clean
+// framework to represent optional operands, change this behavior.
+class t2CPS<dag iops, string asm_op> : T2XI<(outs), iops, NoItinerary,
+ !strconcat("cps", asm_op),
+ [/* For disassembly only; pattern left blank */]> {
+ bits<2> imod;
+ bits<3> iflags;
+ bits<5> mode;
+ bit M;
+
let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
+ let Inst{26} = 0;
let Inst{25-20} = 0b111010;
+ let Inst{19-16} = 0b1111;
let Inst{15-14} = 0b10;
- let Inst{12} = 0;
-}
+ let Inst{12} = 0;
+ let Inst{10-9} = imod;
+ let Inst{8} = M;
+ let Inst{7-5} = iflags;
+ let Inst{4-0} = mode;
+}
+
+let M = 1 in
+ def t2CPS3p : t2CPS<(ins imod_op:$imod, iflags_op:$iflags, i32imm:$mode),
+ "$imod.w\t$iflags, $mode">;
+let mode = 0, M = 0 in
+ def t2CPS2p : t2CPS<(ins imod_op:$imod, iflags_op:$iflags),
+ "$imod.w\t$iflags">;
+let imod = 0, iflags = 0, M = 1 in
+ def t2CPS1p : t2CPS<(ins i32imm:$mode), "\t$mode">;
// A6.3.4 Branches and miscellaneous control
// Table A6-14 Change Processor State, and hint instructions
@@ -2589,6 +3168,7 @@ class T2I_hint<bits<8> op7_0, string opc, string asm>
: T2I<(outs), (ins), NoItinerary, opc, asm,
[/* For disassembly only; pattern left blank */]> {
let Inst{31-20} = 0xf3a;
+ let Inst{19-16} = 0b1111;
let Inst{15-14} = 0b10;
let Inst{12} = 0;
let Inst{10-8} = 0b000;
@@ -2608,6 +3188,9 @@ def t2DBG : T2I<(outs),(ins i32imm:$opt), NoItinerary, "dbg", "\t$opt",
let Inst{12} = 0;
let Inst{10-8} = 0b000;
let Inst{7-4} = 0b1111;
+
+ bits<4> opt;
+ let Inst{3-0} = opt;
}
// Secure Monitor Call is a system instruction -- for disassembly only
@@ -2617,83 +3200,86 @@ def t2SMC : T2I<(outs), (ins i32imm:$opt), NoItinerary, "smc", "\t$opt",
let Inst{31-27} = 0b11110;
let Inst{26-20} = 0b1111111;
let Inst{15-12} = 0b1000;
-}
-// Store Return State is a system instruction -- for disassembly only
-def t2SRSDBW : T2I<(outs),(ins i32imm:$mode),NoItinerary,"srsdb","\tsp!, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000010; // W = 1
+ bits<4> opt;
+ let Inst{19-16} = opt;
}
-def t2SRSDB : T2I<(outs),(ins i32imm:$mode),NoItinerary,"srsdb","\tsp, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000000; // W = 0
-}
+class T2SRS<bits<12> op31_20,
+ dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ let Inst{31-20} = op31_20{11-0};
-def t2SRSIAW : T2I<(outs),(ins i32imm:$mode),NoItinerary,"srsia","\tsp!, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0011010; // W = 1
+ bits<5> mode;
+ let Inst{4-0} = mode{4-0};
}
-def t2SRSIA : T2I<(outs), (ins i32imm:$mode),NoItinerary,"srsia","\tsp, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0011000; // W = 0
-}
+// Store Return State is a system instruction -- for disassembly only
+def t2SRSDBW : T2SRS<0b111010000010,
+ (outs),(ins i32imm:$mode),NoItinerary,"srsdb","\tsp!, $mode",
+ [/* For disassembly only; pattern left blank */]>;
+def t2SRSDB : T2SRS<0b111010000000,
+ (outs),(ins i32imm:$mode),NoItinerary,"srsdb","\tsp, $mode",
+ [/* For disassembly only; pattern left blank */]>;
+def t2SRSIAW : T2SRS<0b111010011010,
+ (outs),(ins i32imm:$mode),NoItinerary,"srsia","\tsp!, $mode",
+ [/* For disassembly only; pattern left blank */]>;
+def t2SRSIA : T2SRS<0b111010011000,
+ (outs), (ins i32imm:$mode),NoItinerary,"srsia","\tsp, $mode",
+ [/* For disassembly only; pattern left blank */]>;
// Return From Exception is a system instruction -- for disassembly only
-def t2RFEDBW : T2I<(outs), (ins rGPR:$base), NoItinerary, "rfedb", "\t$base!",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000011; // W = 1
-}
-def t2RFEDB : T2I<(outs), (ins rGPR:$base), NoItinerary, "rfeab", "\t$base",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000001; // W = 0
-}
+class T2RFE<bits<12> op31_20, dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ let Inst{31-20} = op31_20{11-0};
-def t2RFEIAW : T2I<(outs), (ins rGPR:$base), NoItinerary, "rfeia", "\t$base!",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0011011; // W = 1
+ bits<4> Rn;
+ let Inst{19-16} = Rn;
}
-def t2RFEIA : T2I<(outs), (ins rGPR:$base), NoItinerary, "rfeia", "\t$base",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0011001; // W = 0
-}
+def t2RFEDBW : T2RFE<0b111010000011,
+ (outs), (ins rGPR:$Rn), NoItinerary, "rfedb", "\t$Rn!",
+ [/* For disassembly only; pattern left blank */]>;
+def t2RFEDB : T2RFE<0b111010000001,
+ (outs), (ins rGPR:$Rn), NoItinerary, "rfeab", "\t$Rn",
+ [/* For disassembly only; pattern left blank */]>;
+def t2RFEIAW : T2RFE<0b111010011011,
+ (outs), (ins rGPR:$Rn), NoItinerary, "rfeia", "\t$Rn!",
+ [/* For disassembly only; pattern left blank */]>;
+def t2RFEIA : T2RFE<0b111010011001,
+ (outs), (ins rGPR:$Rn), NoItinerary, "rfeia", "\t$Rn",
+ [/* For disassembly only; pattern left blank */]>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//
-// Two piece so_imms.
-def : T2Pat<(or rGPR:$LHS, t2_so_imm2part:$RHS),
- (t2ORRri (t2ORRri rGPR:$LHS, (t2_so_imm2part_1 imm:$RHS)),
- (t2_so_imm2part_2 imm:$RHS))>;
-def : T2Pat<(xor rGPR:$LHS, t2_so_imm2part:$RHS),
- (t2EORri (t2EORri rGPR:$LHS, (t2_so_imm2part_1 imm:$RHS)),
- (t2_so_imm2part_2 imm:$RHS))>;
-def : T2Pat<(add rGPR:$LHS, t2_so_imm2part:$RHS),
- (t2ADDri (t2ADDri rGPR:$LHS, (t2_so_imm2part_1 imm:$RHS)),
- (t2_so_imm2part_2 imm:$RHS))>;
-def : T2Pat<(add rGPR:$LHS, t2_so_neg_imm2part:$RHS),
- (t2SUBri (t2SUBri rGPR:$LHS, (t2_so_neg_imm2part_1 imm:$RHS)),
- (t2_so_neg_imm2part_2 imm:$RHS))>;
-
// 32-bit immediate using movw + movt.
-// This is a single pseudo instruction to make it re-materializable. Remove
-// when we can do generalized remat.
-let isReMaterializable = 1 in
-def t2MOVi32imm : T2Ix2<(outs rGPR:$dst), (ins i32imm:$src), IIC_iMOVi,
- "movw", "\t$dst, ${src:lo16}\n\tmovt${p}\t$dst, ${src:hi16}",
- [(set rGPR:$dst, (i32 imm:$src))]>;
+// This is a single pseudo instruction to make it re-materializable.
+// FIXME: Remove this when we can do generalized remat.
+let isReMaterializable = 1, isMoveImm = 1 in
+def t2MOVi32imm : PseudoInst<(outs rGPR:$dst), (ins i32imm:$src), IIC_iMOVix2,
+ [(set rGPR:$dst, (i32 imm:$src))]>,
+ Requires<[IsThumb, HasV6T2]>;
+
+// Pseudo instruction that combines movw + movt + add pc (if pic).
+// It also makes it possible to rematerialize the instructions.
+// FIXME: Remove this when we can do generalized remat and when machine licm
+// can properly the instructions.
+let isReMaterializable = 1 in {
+def t2MOV_ga_pcrel : PseudoInst<(outs rGPR:$dst), (ins i32imm:$addr),
+ IIC_iMOVix2addpc,
+ [(set rGPR:$dst, (ARMWrapperPIC tglobaladdr:$addr))]>,
+ Requires<[IsThumb2, UseMovt]>;
+
+def t2MOV_ga_dyn : PseudoInst<(outs rGPR:$dst), (ins i32imm:$addr),
+ IIC_iMOVix2,
+ [(set rGPR:$dst, (ARMWrapperDYN tglobaladdr:$addr))]>,
+ Requires<[IsThumb2, UseMovt]>;
+}
// ConstantPool, GlobalAddress, and JumpTable
def : T2Pat<(ARMWrapper tglobaladdr :$dst), (t2LEApcrel tglobaladdr :$dst)>,
@@ -2709,10 +3295,9 @@ def : T2Pat<(ARMWrapperJT tjumptable:$dst, imm:$id),
// be expanded into two instructions late to allow if-conversion and
// scheduling.
let canFoldAsLoad = 1, isReMaterializable = 1 in
-def t2LDRpci_pic : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr, pclabel:$cp),
- NoItinerary,
- "${:comment} ldr.w\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
- [(set GPR:$dst, (ARMpic_add (load (ARMWrapper tconstpool:$addr)),
+def t2LDRpci_pic : PseudoInst<(outs rGPR:$dst), (ins i32imm:$addr, pclabel:$cp),
+ IIC_iLoadiALU,
+ [(set rGPR:$dst, (ARMpic_add (load (ARMWrapper tconstpool:$addr)),
imm:$cp))]>,
Requires<[IsThumb2]>;
@@ -2720,48 +3305,128 @@ def t2LDRpci_pic : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr, pclabel:$cp),
// Move between special register and ARM core register -- for disassembly only
//
-// Rd = Instr{11-8}
-def t2MRS : T2I<(outs rGPR:$dst), (ins), NoItinerary, "mrs", "\t$dst, cpsr",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-21} = 0b11111;
- let Inst{20} = 0; // The R bit.
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
+class T2SpecialReg<bits<12> op31_20, bits<2> op15_14, bits<1> op12,
+ dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, itin, opc, asm, pattern> {
+ let Inst{31-20} = op31_20{11-0};
+ let Inst{15-14} = op15_14{1-0};
+ let Inst{12} = op12{0};
}
-// Rd = Instr{11-8}
-def t2MRSsys : T2I<(outs rGPR:$dst), (ins), NoItinerary, "mrs", "\t$dst, spsr",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-21} = 0b11111;
- let Inst{20} = 1; // The R bit.
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
+class T2MRS<bits<12> op31_20, bits<2> op15_14, bits<1> op12,
+ dag oops, dag iops, InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : T2SpecialReg<op31_20, op15_14, op12, oops, iops, itin, opc, asm, pattern> {
+ bits<4> Rd;
+ let Inst{11-8} = Rd;
+ let Inst{19-16} = 0b1111;
}
-// Rn = Inst{19-16}
-def t2MSR : T2I<(outs), (ins rGPR:$src, msr_mask:$mask), NoItinerary, "msr",
- "\tcpsr$mask, $src",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-21} = 0b11100;
- let Inst{20} = 0; // The R bit.
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
+def t2MRS : T2MRS<0b111100111110, 0b10, 0,
+ (outs rGPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, cpsr",
+ [/* For disassembly only; pattern left blank */]>;
+def t2MRSsys : T2MRS<0b111100111111, 0b10, 0,
+ (outs rGPR:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, spsr",
+ [/* For disassembly only; pattern left blank */]>;
+
+// Move from ARM core register to Special Register
+//
+// No need to have both system and application versions, the encodings are the
+// same and the assembly parser has no way to distinguish between them. The mask
+// operand contains the special register (R Bit) in bit 4 and bits 3-0 contains
+// the mask with the fields to be accessed in the special register.
+def t2MSR : T2SpecialReg<0b111100111000 /* op31-20 */, 0b10 /* op15-14 */,
+ 0 /* op12 */, (outs), (ins msr_mask:$mask, rGPR:$Rn),
+ NoItinerary, "msr", "\t$mask, $Rn",
+ [/* For disassembly only; pattern left blank */]> {
+ bits<5> mask;
+ bits<4> Rn;
+ let Inst{19-16} = Rn;
+ let Inst{20} = mask{4}; // R Bit
+ let Inst{13} = 0b0;
+ let Inst{11-8} = mask{3-0};
}
-// Rn = Inst{19-16}
-def t2MSRsys : T2I<(outs), (ins rGPR:$src, msr_mask:$mask), NoItinerary, "msr",
- "\tspsr$mask, $src",
+//===----------------------------------------------------------------------===//
+// Move between coprocessor and ARM core register -- for disassembly only
+//
+
+class t2MovRCopro<string opc, bit direction>
+ : T2Cop<(outs), (ins p_imm:$cop, i32imm:$opc1,
+ GPR:$Rt, c_imm:$CRn, c_imm:$CRm, i32imm:$opc2),
+ !strconcat(opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2"),
+ [/* For disassembly only; pattern left blank */]> {
+ let Inst{27-24} = 0b1110;
+ let Inst{20} = direction;
+ let Inst{4} = 1;
+
+ bits<4> Rt;
+ bits<4> cop;
+ bits<3> opc1;
+ bits<3> opc2;
+ bits<4> CRm;
+ bits<4> CRn;
+
+ let Inst{15-12} = Rt;
+ let Inst{11-8} = cop;
+ let Inst{23-21} = opc1;
+ let Inst{7-5} = opc2;
+ let Inst{3-0} = CRm;
+ let Inst{19-16} = CRn;
+}
+
+def t2MCR2 : t2MovRCopro<"mcr2", 0 /* from ARM core register to coprocessor */>;
+def t2MRC2 : t2MovRCopro<"mrc2", 1 /* from coprocessor to ARM core register */>;
+
+class t2MovRRCopro<string opc, bit direction>
+ : T2Cop<(outs), (ins p_imm:$cop, i32imm:$opc1, GPR:$Rt, GPR:$Rt2, c_imm:$CRm),
+ !strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"),
+ [/* For disassembly only; pattern left blank */]> {
+ let Inst{27-24} = 0b1100;
+ let Inst{23-21} = 0b010;
+ let Inst{20} = direction;
+
+ bits<4> Rt;
+ bits<4> Rt2;
+ bits<4> cop;
+ bits<4> opc1;
+ bits<4> CRm;
+
+ let Inst{15-12} = Rt;
+ let Inst{19-16} = Rt2;
+ let Inst{11-8} = cop;
+ let Inst{7-4} = opc1;
+ let Inst{3-0} = CRm;
+}
+
+def t2MCRR2 : t2MovRRCopro<"mcrr2",
+ 0 /* from ARM core register to coprocessor */>;
+def t2MRRC2 : t2MovRRCopro<"mrrc2",
+ 1 /* from coprocessor to ARM core register */>;
+
+//===----------------------------------------------------------------------===//
+// Other Coprocessor Instructions. For disassembly only.
+//
+
+def t2CDP2 : T2Cop<(outs), (ins p_imm:$cop, i32imm:$opc1,
+ c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, i32imm:$opc2),
+ "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-21} = 0b11100;
- let Inst{20} = 1; // The R bit.
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
+ let Inst{27-24} = 0b1110;
+
+ bits<4> opc1;
+ bits<4> CRn;
+ bits<4> CRd;
+ bits<4> cop;
+ bits<3> opc2;
+ bits<4> CRm;
+
+ let Inst{3-0} = CRm;
+ let Inst{4} = 0;
+ let Inst{7-5} = opc2;
+ let Inst{11-8} = cop;
+ let Inst{15-12} = CRd;
+ let Inst{19-16} = CRn;
+ let Inst{23-20} = opc1;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
index c29e096..920c5c9 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -1,4 +1,4 @@
-//===- ARMInstrVFP.td - VFP support for ARM -------------------------------===//
+//===- ARMInstrVFP.td - VFP support for ARM ----------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,30 +11,26 @@
//
//===----------------------------------------------------------------------===//
-def SDT_FTOI :
-SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
-def SDT_ITOF :
-SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
-def SDT_CMPFP0 :
-SDTypeProfile<0, 1, [SDTCisFP<0>]>;
-def SDT_VMOVDRR :
-SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
- SDTCisSameAs<1, 2>]>;
-
-def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
-def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
-def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
-def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
-def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInFlag,SDNPOutFlag]>;
-def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutFlag]>;
-def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0",SDT_CMPFP0, [SDNPOutFlag]>;
-def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
+def SDT_FTOI : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
+def SDT_ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
+def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
+def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
+ SDTCisSameAs<1, 2>]>;
+
+def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
+def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
+def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
+def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
+def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>;
+def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>;
+def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
+def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
+
//===----------------------------------------------------------------------===//
// Operand Definitions.
//
-
def vfp_f32imm : Operand<f32>,
PatLeaf<(f32 fpimm), [{
return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
@@ -55,86 +51,136 @@ def vfp_f64imm : Operand<f64>,
//
let canFoldAsLoad = 1, isReMaterializable = 1 in {
-def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr),
- IIC_fpLoad64, "vldr", ".64\t$dst, $addr",
- [(set DPR:$dst, (f64 (load addrmode5:$addr)))]>;
-def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr),
- IIC_fpLoad32, "vldr", ".32\t$dst, $addr",
- [(set SPR:$dst, (load addrmode5:$addr))]>;
-} // canFoldAsLoad
+def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
+ IIC_fpLoad64, "vldr", ".64\t$Dd, $addr",
+ [(set DPR:$Dd, (f64 (load addrmode5:$addr)))]>;
-def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr),
- IIC_fpStore64, "vstr", ".64\t$src, $addr",
- [(store (f64 DPR:$src), addrmode5:$addr)]>;
+def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
+ IIC_fpLoad32, "vldr", ".32\t$Sd, $addr",
+ [(set SPR:$Sd, (load addrmode5:$addr))]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
-def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$src, addrmode5:$addr),
- IIC_fpStore32, "vstr", ".32\t$src, $addr",
- [(store SPR:$src, addrmode5:$addr)]>;
+} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
-//===----------------------------------------------------------------------===//
-// Load / store multiple Instructions.
-//
+def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
+ IIC_fpStore64, "vstr", ".64\t$Dd, $addr",
+ [(store (f64 DPR:$Dd), addrmode5:$addr)]>;
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
-def VLDMD : AXDI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$dsts,
- variable_ops), IndexModeNone, IIC_fpLoadm,
- "vldm${addr:submode}${p}\t$addr, $dsts", "", []> {
- let Inst{20} = 1;
+def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
+ IIC_fpStore32, "vstr", ".32\t$Sd, $addr",
+ [(store SPR:$Sd, addrmode5:$addr)]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
}
-def VLDMS : AXSI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$dsts,
- variable_ops), IndexModeNone, IIC_fpLoadm,
- "vldm${addr:submode}${p}\t$addr, $dsts", "", []> {
- let Inst{20} = 1;
-}
+//===----------------------------------------------------------------------===//
+// Load / store multiple Instructions.
+//
-def VLDMD_UPD : AXDI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$dsts, variable_ops),
- IndexModeUpd, IIC_fpLoadm,
- "vldm${addr:submode}${p}\t$addr!, $dsts",
- "$addr.addr = $wb", []> {
- let Inst{20} = 1;
+multiclass vfp_ldst_mult<string asm, bit L_bit,
+ InstrItinClass itin, InstrItinClass itin_upd> {
+ // Double Precision
+ def DIA :
+ AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
+ IndexModeNone, itin,
+ !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+ }
+ def DIA_UPD :
+ AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
+ IndexModeUpd, itin_upd,
+ !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+ }
+ def DDB :
+ AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
+ IndexModeNone, itin,
+ !strconcat(asm, "db${p}\t$Rn, $regs"), "", []> {
+ let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+ }
+ def DDB_UPD :
+ AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
+ IndexModeUpd, itin_upd,
+ !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+ }
+
+ // Single Precision
+ def SIA :
+ AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
+ IndexModeNone, itin,
+ !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+
+ // Some single precision VFP instructions may be executed on both NEON and
+ // VFP pipelines.
+ let D = VFPNeonDomain;
+ }
+ def SIA_UPD :
+ AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
+ IndexModeUpd, itin_upd,
+ !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b01; // Increment After
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+
+ // Some single precision VFP instructions may be executed on both NEON and
+ // VFP pipelines.
+ let D = VFPNeonDomain;
+ }
+ def SDB :
+ AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
+ IndexModeNone, itin,
+ !strconcat(asm, "db${p}\t$Rn, $regs"), "", []> {
+ let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{21} = 0; // No writeback
+ let Inst{20} = L_bit;
+
+ // Some single precision VFP instructions may be executed on both NEON and
+ // VFP pipelines.
+ let D = VFPNeonDomain;
+ }
+ def SDB_UPD :
+ AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
+ IndexModeUpd, itin_upd,
+ !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{21} = 1; // Writeback
+ let Inst{20} = L_bit;
+
+ // Some single precision VFP instructions may be executed on both NEON and
+ // VFP pipelines.
+ let D = VFPNeonDomain;
+ }
}
-def VLDMS_UPD : AXSI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$dsts, variable_ops),
- IndexModeUpd, IIC_fpLoadm,
- "vldm${addr:submode}${p}\t$addr!, $dsts",
- "$addr.addr = $wb", []> {
- let Inst{20} = 1;
-}
-} // mayLoad, neverHasSideEffects, hasExtraDefRegAllocReq
+let neverHasSideEffects = 1 in {
-let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
-def VSTMD : AXDI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$srcs,
- variable_ops), IndexModeNone, IIC_fpStorem,
- "vstm${addr:submode}${p}\t$addr, $srcs", "", []> {
- let Inst{20} = 0;
-}
+let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
+defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
-def VSTMS : AXSI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$srcs,
- variable_ops), IndexModeNone, IIC_fpStorem,
- "vstm${addr:submode}${p}\t$addr, $srcs", "", []> {
- let Inst{20} = 0;
-}
+let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
+defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpLoad_m, IIC_fpLoad_mu>;
-def VSTMD_UPD : AXDI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$srcs, variable_ops),
- IndexModeUpd, IIC_fpStorem,
- "vstm${addr:submode}${p}\t$addr!, $srcs",
- "$addr.addr = $wb", []> {
- let Inst{20} = 0;
-}
+} // neverHasSideEffects
-def VSTMS_UPD : AXSI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
- reglist:$srcs, variable_ops),
- IndexModeUpd, IIC_fpStorem,
- "vstm${addr:submode}${p}\t$addr!, $srcs",
- "$addr.addr = $wb", []> {
- let Inst{20} = 0;
-}
-} // mayStore, neverHasSideEffects, hasExtraSrcRegAllocReq
+def : MnemonicAlias<"vldm", "vldmia">;
+def : MnemonicAlias<"vstm", "vstmia">;
// FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
@@ -142,56 +188,71 @@ def VSTMS_UPD : AXSI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
// FP Binary Operations.
//
-def VADDD : ADbI<0b11100, 0b11, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpALU64, "vadd", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fadd DPR:$a, (f64 DPR:$b)))]>;
-
-def VADDS : ASbIn<0b11100, 0b11, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpALU32, "vadd", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fadd SPR:$a, SPR:$b))]>;
-
-// These are encoded as unary instructions.
-let Defs = [FPSCR] in {
-def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins DPR:$a, DPR:$b),
- IIC_fpCMP64, "vcmpe", ".f64\t$a, $b",
- [(arm_cmpfp DPR:$a, (f64 DPR:$b))]>;
-
-def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins DPR:$a, DPR:$b),
- IIC_fpCMP64, "vcmp", ".f64\t$a, $b",
- [/* For disassembly only; pattern left blank */]>;
-
-def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins SPR:$a, SPR:$b),
- IIC_fpCMP32, "vcmpe", ".f32\t$a, $b",
- [(arm_cmpfp SPR:$a, SPR:$b)]>;
-
-def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins SPR:$a, SPR:$b),
- IIC_fpCMP32, "vcmp", ".f32\t$a, $b",
- [/* For disassembly only; pattern left blank */]>;
+def VADDD : ADbI<0b11100, 0b11, 0, 0,
+ (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
+ IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
+
+def VADDS : ASbIn<0b11100, 0b11, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
}
-def VDIVD : ADbI<0b11101, 0b00, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpDIV64, "vdiv", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fdiv DPR:$a, (f64 DPR:$b)))]>;
-
-def VDIVS : ASbI<0b11101, 0b00, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpDIV32, "vdiv", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fdiv SPR:$a, SPR:$b))]>;
-
-def VMULD : ADbI<0b11100, 0b10, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpMUL64, "vmul", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fmul DPR:$a, (f64 DPR:$b)))]>;
-
-def VMULS : ASbIn<0b11100, 0b10, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpMUL32, "vmul", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fmul SPR:$a, SPR:$b))]>;
+def VSUBD : ADbI<0b11100, 0b11, 1, 0,
+ (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
+ IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
+
+def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
-def VNMULD : ADbI<0b11100, 0b10, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpMUL64, "vnmul", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fneg (fmul DPR:$a, (f64 DPR:$b))))]>;
+def VDIVD : ADbI<0b11101, 0b00, 0, 0,
+ (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
+ IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
+
+def VDIVS : ASbI<0b11101, 0b00, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
+
+def VMULD : ADbI<0b11100, 0b10, 0, 0,
+ (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
+ IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
+
+def VMULS : ASbIn<0b11100, 0b10, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
-def VNMULS : ASbI<0b11100, 0b10, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpMUL32, "vnmul", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fneg (fmul SPR:$a, SPR:$b)))]>;
+def VNMULD : ADbI<0b11100, 0b10, 1, 0,
+ (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
+ IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>;
+
+def VNMULS : ASbI<0b11100, 0b10, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
// Match reassociated forms only if not sign dependent rounding.
def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
@@ -199,53 +260,128 @@ def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
def : Pat<(fmul (fneg SPR:$a), SPR:$b),
(VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
+// These are encoded as unary instructions.
+let Defs = [FPSCR] in {
+def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
+ (outs), (ins DPR:$Dd, DPR:$Dm),
+ IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
+ [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
+
+def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
+ (outs), (ins SPR:$Sd, SPR:$Sm),
+ IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
+ [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
-def VSUBD : ADbI<0b11100, 0b11, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpALU64, "vsub", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fsub DPR:$a, (f64 DPR:$b)))]>;
+// FIXME: Verify encoding after integrated assembler is working.
+def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
+ (outs), (ins DPR:$Dd, DPR:$Dm),
+ IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
+ [/* For disassembly only; pattern left blank */]>;
-def VSUBS : ASbIn<0b11100, 0b11, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpALU32, "vsub", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fsub SPR:$a, SPR:$b))]>;
+def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
+ (outs), (ins SPR:$Sd, SPR:$Sm),
+ IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
+ [/* For disassembly only; pattern left blank */]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
+} // Defs = [FPSCR]
//===----------------------------------------------------------------------===//
// FP Unary Operations.
//
-def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
- IIC_fpUNA64, "vabs", ".f64\t$dst, $a",
- [(set DPR:$dst, (fabs (f64 DPR:$a)))]>;
-
-def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,(outs SPR:$dst), (ins SPR:$a),
- IIC_fpUNA32, "vabs", ".f32\t$dst, $a",
- [(set SPR:$dst, (fabs SPR:$a))]>;
+def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
+ (outs DPR:$Dd), (ins DPR:$Dm),
+ IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
+ [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
+
+def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
+ [(set SPR:$Sd, (fabs SPR:$Sm))]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
let Defs = [FPSCR] in {
-def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins DPR:$a),
- IIC_fpCMP64, "vcmpe", ".f64\t$a, #0",
- [(arm_cmpfp0 (f64 DPR:$a))]>;
+def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
+ (outs), (ins DPR:$Dd),
+ IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
+ [(arm_cmpfp0 (f64 DPR:$Dd))]> {
+ let Inst{3-0} = 0b0000;
+ let Inst{5} = 0;
+}
-def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins DPR:$a),
- IIC_fpCMP64, "vcmp", ".f64\t$a, #0",
- [/* For disassembly only; pattern left blank */]>;
+def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
+ (outs), (ins SPR:$Sd),
+ IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
+ [(arm_cmpfp0 SPR:$Sd)]> {
+ let Inst{3-0} = 0b0000;
+ let Inst{5} = 0;
-def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins SPR:$a),
- IIC_fpCMP32, "vcmpe", ".f32\t$a, #0",
- [(arm_cmpfp0 SPR:$a)]>;
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
-def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins SPR:$a),
- IIC_fpCMP32, "vcmp", ".f32\t$a, #0",
- [/* For disassembly only; pattern left blank */]>;
+// FIXME: Verify encoding after integrated assembler is working.
+def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
+ (outs), (ins DPR:$Dd),
+ IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
+ [/* For disassembly only; pattern left blank */]> {
+ let Inst{3-0} = 0b0000;
+ let Inst{5} = 0;
}
-def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, (outs DPR:$dst), (ins SPR:$a),
- IIC_fpCVTDS, "vcvt", ".f64.f32\t$dst, $a",
- [(set DPR:$dst, (fextend SPR:$a))]>;
+def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
+ (outs), (ins SPR:$Sd),
+ IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
+ [/* For disassembly only; pattern left blank */]> {
+ let Inst{3-0} = 0b0000;
+ let Inst{5} = 0;
+
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
+} // Defs = [FPSCR]
+
+def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
+ (outs DPR:$Dd), (ins SPR:$Sm),
+ IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
+ [(set DPR:$Dd, (fextend SPR:$Sm))]> {
+ // Instruction operands.
+ bits<5> Dd;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Dd{3-0};
+ let Inst{22} = Dd{4};
+}
// Special case encoding: bits 11-8 is 0b1011.
-def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
- IIC_fpCVTSD, "vcvt", ".f32.f64\t$dst, $a",
- [(set SPR:$dst, (fround DPR:$a))]> {
+def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
+ IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
+ [(set SPR:$Sd, (fround DPR:$Dm))]> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Dm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
let Inst{27-23} = 0b11101;
let Inst{21-16} = 0b110111;
let Inst{11-8} = 0b1011;
@@ -255,6 +391,7 @@ def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
// Between half-precision and single-precision. For disassembly only.
+// FIXME: Verify encoding after integrated assembler is working.
def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
/* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
[/* For disassembly only; pattern left blank */]>;
@@ -277,47 +414,94 @@ def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
/* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
[/* For disassembly only; pattern left blank */]>;
-let neverHasSideEffects = 1 in {
-def VMOVD: ADuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
- IIC_fpUNA64, "vmov", ".f64\t$dst, $a", []>;
-
-def VMOVS: ASuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
- IIC_fpUNA32, "vmov", ".f32\t$dst, $a", []>;
-} // neverHasSideEffects
+def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
+ (outs DPR:$Dd), (ins DPR:$Dm),
+ IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
+ [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
+
+def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
+ [(set SPR:$Sd, (fneg SPR:$Sm))]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
-def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
- IIC_fpUNA64, "vneg", ".f64\t$dst, $a",
- [(set DPR:$dst, (fneg (f64 DPR:$a)))]>;
+def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
+ (outs DPR:$Dd), (ins DPR:$Dm),
+ IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
+ [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>;
-def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,(outs SPR:$dst), (ins SPR:$a),
- IIC_fpUNA32, "vneg", ".f32\t$dst, $a",
- [(set SPR:$dst, (fneg SPR:$a))]>;
+def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
+ [(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
-def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
- IIC_fpSQRT64, "vsqrt", ".f64\t$dst, $a",
- [(set DPR:$dst, (fsqrt (f64 DPR:$a)))]>;
+let neverHasSideEffects = 1 in {
+def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
+ (outs DPR:$Dd), (ins DPR:$Dm),
+ IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
-def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
- IIC_fpSQRT32, "vsqrt", ".f32\t$dst, $a",
- [(set SPR:$dst, (fsqrt SPR:$a))]>;
+def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
+} // neverHasSideEffects
//===----------------------------------------------------------------------===//
// FP <-> GPR Copies. Int <-> FP Conversions.
//
-def VMOVRS : AVConv2I<0b11100001, 0b1010, (outs GPR:$dst), (ins SPR:$src),
- IIC_fpMOVSI, "vmov", "\t$dst, $src",
- [(set GPR:$dst, (bitconvert SPR:$src))]>;
+def VMOVRS : AVConv2I<0b11100001, 0b1010,
+ (outs GPR:$Rt), (ins SPR:$Sn),
+ IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
+ [(set GPR:$Rt, (bitconvert SPR:$Sn))]> {
+ // Instruction operands.
+ bits<4> Rt;
+ bits<5> Sn;
+
+ // Encode instruction operands.
+ let Inst{19-16} = Sn{4-1};
+ let Inst{7} = Sn{0};
+ let Inst{15-12} = Rt;
+
+ let Inst{6-5} = 0b00;
+ let Inst{3-0} = 0b0000;
+}
-def VMOVSR : AVConv4I<0b11100000, 0b1010, (outs SPR:$dst), (ins GPR:$src),
- IIC_fpMOVIS, "vmov", "\t$dst, $src",
- [(set SPR:$dst, (bitconvert GPR:$src))]>;
+def VMOVSR : AVConv4I<0b11100000, 0b1010,
+ (outs SPR:$Sn), (ins GPR:$Rt),
+ IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
+ [(set SPR:$Sn, (bitconvert GPR:$Rt))]> {
+ // Instruction operands.
+ bits<5> Sn;
+ bits<4> Rt;
+
+ // Encode instruction operands.
+ let Inst{19-16} = Sn{4-1};
+ let Inst{7} = Sn{0};
+ let Inst{15-12} = Rt;
+
+ let Inst{6-5} = 0b00;
+ let Inst{3-0} = 0b0000;
+}
let neverHasSideEffects = 1 in {
def VMOVRRD : AVConv3I<0b11000101, 0b1011,
- (outs GPR:$wb, GPR:$dst2), (ins DPR:$src),
- IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src",
+ (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
+ IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
[/* FIXME: Can't write pattern for multiple result instr*/]> {
+ // Instruction operands.
+ bits<5> Dm;
+ bits<4> Rt;
+ bits<4> Rt2;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{15-12} = Rt;
+ let Inst{19-16} = Rt2;
+
let Inst{7-6} = 0b00;
}
@@ -333,10 +517,21 @@ def VMOVRRS : AVConv3I<0b11000101, 0b1010,
// FMDLR: GPR -> SPR
def VMOVDRR : AVConv5I<0b11000100, 0b1011,
- (outs DPR:$dst), (ins GPR:$src1, GPR:$src2),
- IIC_fpMOVID, "vmov", "\t$dst, $src1, $src2",
- [(set DPR:$dst, (arm_fmdrr GPR:$src1, GPR:$src2))]> {
- let Inst{7-6} = 0b00;
+ (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
+ IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
+ [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]> {
+ // Instruction operands.
+ bits<5> Dm;
+ bits<4> Rt;
+ bits<4> Rt2;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{15-12} = Rt;
+ let Inst{19-16} = Rt2;
+
+ let Inst{7-6} = 0b00;
}
let neverHasSideEffects = 1 in
@@ -350,102 +545,183 @@ def VMOVSRR : AVConv5I<0b11000100, 0b1010,
// FMRDH: SPR -> GPR
// FMRDL: SPR -> GPR
// FMRRS: SPR -> GPR
-// FMRX : SPR system reg -> GPR
-
+// FMRX: SPR system reg -> GPR
// FMSRR: GPR -> SPR
+// FMXR: GPR -> VFP system reg
+
+
+// Int -> FP:
+
+class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
+ bits<4> opcod4, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
+ pattern> {
+ // Instruction operands.
+ bits<5> Dd;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Dd{3-0};
+ let Inst{22} = Dd{4};
+}
-// FMXR: GPR -> VFP Sstem reg
-
-
-// Int to FP:
+class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
+ bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
+ pattern> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+}
-def VSITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
- (outs DPR:$dst), (ins SPR:$a),
- IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a",
- [(set DPR:$dst, (f64 (arm_sitof SPR:$a)))]> {
+def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
+ (outs DPR:$Dd), (ins SPR:$Sm),
+ IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
+ [(set DPR:$Dd, (f64 (arm_sitof SPR:$Sm)))]> {
let Inst{7} = 1; // s32
}
-def VSITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
- (outs SPR:$dst),(ins SPR:$a),
- IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a",
- [(set SPR:$dst, (arm_sitof SPR:$a))]> {
+def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
+ (outs SPR:$Sd),(ins SPR:$Sm),
+ IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
+ [(set SPR:$Sd, (arm_sitof SPR:$Sm))]> {
let Inst{7} = 1; // s32
+
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
}
-def VUITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
- (outs DPR:$dst), (ins SPR:$a),
- IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a",
- [(set DPR:$dst, (f64 (arm_uitof SPR:$a)))]> {
+def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
+ (outs DPR:$Dd), (ins SPR:$Sm),
+ IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
+ [(set DPR:$Dd, (f64 (arm_uitof SPR:$Sm)))]> {
let Inst{7} = 0; // u32
}
-def VUITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a",
- [(set SPR:$dst, (arm_uitof SPR:$a))]> {
+def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
+ [(set SPR:$Sd, (arm_uitof SPR:$Sm))]> {
let Inst{7} = 0; // u32
+
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
}
-// FP to Int:
-// Always set Z bit in the instruction, i.e. "round towards zero" variants.
+// FP -> Int:
+
+class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
+ bits<4> opcod4, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
+ pattern> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Dm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+}
+
+class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
+ bits<4> opcod4, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
+ pattern> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+}
-def VTOSIZD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
- (outs SPR:$dst), (ins DPR:$a),
- IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a",
- [(set SPR:$dst, (arm_ftosi (f64 DPR:$a)))]> {
+// Always set Z bit in the instruction, i.e. "round towards zero" variants.
+def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
+ (outs SPR:$Sd), (ins DPR:$Dm),
+ IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
+ [(set SPR:$Sd, (arm_ftosi (f64 DPR:$Dm)))]> {
let Inst{7} = 1; // Z bit
}
-def VTOSIZS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a",
- [(set SPR:$dst, (arm_ftosi SPR:$a))]> {
+def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
+ [(set SPR:$Sd, (arm_ftosi SPR:$Sm))]> {
let Inst{7} = 1; // Z bit
+
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
}
-def VTOUIZD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
- (outs SPR:$dst), (ins DPR:$a),
- IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a",
- [(set SPR:$dst, (arm_ftoui (f64 DPR:$a)))]> {
+def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
+ (outs SPR:$Sd), (ins DPR:$Dm),
+ IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
+ [(set SPR:$Sd, (arm_ftoui (f64 DPR:$Dm)))]> {
let Inst{7} = 1; // Z bit
}
-def VTOUIZS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a",
- [(set SPR:$dst, (arm_ftoui SPR:$a))]> {
+def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
+ [(set SPR:$Sd, (arm_ftoui SPR:$Sm))]> {
let Inst{7} = 1; // Z bit
+
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
}
// And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
-// For disassembly only.
let Uses = [FPSCR] in {
-def VTOSIRD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
- (outs SPR:$dst), (ins DPR:$a),
- IIC_fpCVTDI, "vcvtr", ".s32.f64\t$dst, $a",
- [(set SPR:$dst, (int_arm_vcvtr (f64 DPR:$a)))]> {
+// FIXME: Verify encoding after integrated assembler is working.
+def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
+ (outs SPR:$Sd), (ins DPR:$Dm),
+ IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
+ [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>{
let Inst{7} = 0; // Z bit
}
-def VTOSIRS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTSI, "vcvtr", ".s32.f32\t$dst, $a",
- [(set SPR:$dst, (int_arm_vcvtr SPR:$a))]> {
+def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
+ [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]> {
let Inst{7} = 0; // Z bit
}
-def VTOUIRD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
- (outs SPR:$dst), (ins DPR:$a),
- IIC_fpCVTDI, "vcvtr", ".u32.f64\t$dst, $a",
- [(set SPR:$dst, (int_arm_vcvtru (f64 DPR:$a)))]> {
+def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
+ (outs SPR:$Sd), (ins DPR:$Dm),
+ IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
+ [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>{
let Inst{7} = 0; // Z bit
}
-def VTOUIRS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTSI, "vcvtr", ".u32.f32\t$dst, $a",
- [(set SPR:$dst, (int_arm_vcvtru SPR:$a))]> {
+def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
+ [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> {
let Inst{7} = 0; // Z bit
}
}
@@ -457,30 +733,47 @@ def VTOUIRS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
// S32 (U=0, sx=1) -> SL
// U32 (U=1, sx=1) -> UL
-let Constraints = "$a = $dst" in {
+// FIXME: Marking these as codegen only seems wrong. They are real
+// instructions(?)
+let Constraints = "$a = $dst", isCodeGenOnly = 1 in {
// FP to Fixed-Point:
-let isCodeGenOnly = 1 in {
def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
(outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+ [/* For disassembly only; pattern left blank */]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
(outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+ [/* For disassembly only; pattern left blank */]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
(outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+ [/* For disassembly only; pattern left blank */]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
(outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+ [/* For disassembly only; pattern left blank */]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
(outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
@@ -501,30 +794,44 @@ def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
(outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
[/* For disassembly only; pattern left blank */]>;
-}
// Fixed-Point to FP:
-let isCodeGenOnly = 1 in {
def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
(outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+ [/* For disassembly only; pattern left blank */]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
(outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+ [/* For disassembly only; pattern left blank */]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
(outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+ [/* For disassembly only; pattern left blank */]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
(outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+ [/* For disassembly only; pattern left blank */]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
(outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
@@ -545,70 +852,120 @@ def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
(outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
[/* For disassembly only; pattern left blank */]>;
-}
-} // End of 'let Constraints = "$src = $dst" in'
+} // End of 'let Constraints = "$a = $dst", isCodeGenOnly = 1 in'
//===----------------------------------------------------------------------===//
// FP FMA Operations.
//
-def VMLAD : ADbI_vmlX<0b11100, 0b00, 0, 0,
- (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
- IIC_fpMAC64, "vmla", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fadd (fmul DPR:$a, DPR:$b),
- (f64 DPR:$dstin)))]>,
- RegConstraint<"$dstin = $dst">;
+def VMLAD : ADbI<0b11100, 0b00, 0, 0,
+ (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
+ IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
+ (f64 DPR:$Ddin)))]>,
+ RegConstraint<"$Ddin = $Dd">,
+ Requires<[HasVFP2,UseFPVMLx]>;
def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
- (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
- IIC_fpMAC32, "vmla", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fadd (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def VNMLSD : ADbI_vmlX<0b11100, 0b01, 0, 0,
- (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
- IIC_fpMAC64, "vnmls", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fsub (fmul DPR:$a, DPR:$b),
- (f64 DPR:$dstin)))]>,
- RegConstraint<"$dstin = $dst">;
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
+ SPR:$Sdin))]>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
-def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
- (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
- IIC_fpMAC32, "vnmls", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fsub (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def VMLSD : ADbI_vmlX<0b11100, 0b00, 1, 0,
- (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
- IIC_fpMAC64, "vmls", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fadd (fneg (fmul DPR:$a, DPR:$b)),
- (f64 DPR:$dstin)))]>,
- RegConstraint<"$dstin = $dst">;
+def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
+ (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
+ Requires<[HasVFP2,UseFPVMLx]>;
+def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
+ (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
+ Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
+
+def VMLSD : ADbI<0b11100, 0b00, 1, 0,
+ (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
+ IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
+ (f64 DPR:$Ddin)))]>,
+ RegConstraint<"$Ddin = $Dd">,
+ Requires<[HasVFP2,UseFPVMLx]>;
def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
- (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
- IIC_fpMAC32, "vmls", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fadd (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def : Pat<(fsub DPR:$dstin, (fmul DPR:$a, (f64 DPR:$b))),
- (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
-def : Pat<(fsub SPR:$dstin, (fmul SPR:$a, SPR:$b)),
- (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
-
-def VNMLAD : ADbI_vmlX<0b11100, 0b01, 1, 0,
- (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
- IIC_fpMAC64, "vnmla", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fsub (fneg (fmul DPR:$a, DPR:$b)),
- (f64 DPR:$dstin)))]>,
- RegConstraint<"$dstin = $dst">;
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
+ SPR:$Sdin))]>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
+
+def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
+ (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
+ Requires<[HasVFP2,UseFPVMLx]>;
+def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
+ (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
+
+def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
+ (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
+ IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
+ (f64 DPR:$Ddin)))]>,
+ RegConstraint<"$Ddin = $Dd">,
+ Requires<[HasVFP2,UseFPVMLx]>;
def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
- (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
- IIC_fpMAC32, "vnmla", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
+ SPR:$Sdin))]>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
+
+def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
+ (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
+ Requires<[HasVFP2,UseFPVMLx]>;
+def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
+ (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
+
+def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
+ (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
+ IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
+ (f64 DPR:$Ddin)))]>,
+ RegConstraint<"$Ddin = $Dd">,
+ Requires<[HasVFP2,UseFPVMLx]>;
+
+def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
+
+def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
+ (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
+ Requires<[HasVFP2,UseFPVMLx]>;
+def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
+ (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
+
//===----------------------------------------------------------------------===//
// FP Conditional moves.
@@ -616,92 +973,157 @@ def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
let neverHasSideEffects = 1 in {
def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
- (outs DPR:$dst), (ins DPR:$false, DPR:$true),
- IIC_fpUNA64, "vmov", ".f64\t$dst, $true",
- [/*(set DPR:$dst, (ARMcmov DPR:$false, DPR:$true, imm:$cc))*/]>,
- RegConstraint<"$false = $dst">;
+ (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
+ IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm",
+ [/*(set DPR:$Dd, (ARMcmov DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
+ RegConstraint<"$Dn = $Dd">;
def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
- (outs SPR:$dst), (ins SPR:$false, SPR:$true),
- IIC_fpUNA32, "vmov", ".f32\t$dst, $true",
- [/*(set SPR:$dst, (ARMcmov SPR:$false, SPR:$true, imm:$cc))*/]>,
- RegConstraint<"$false = $dst">;
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm",
+ [/*(set SPR:$Sd, (ARMcmov SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
+ RegConstraint<"$Sn = $Sd">;
def VNEGDcc : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
- (outs DPR:$dst), (ins DPR:$false, DPR:$true),
- IIC_fpUNA64, "vneg", ".f64\t$dst, $true",
- [/*(set DPR:$dst, (ARMcneg DPR:$false, DPR:$true, imm:$cc))*/]>,
- RegConstraint<"$false = $dst">;
+ (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
+ IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
+ [/*(set DPR:$Dd, (ARMcneg DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
+ RegConstraint<"$Dn = $Dd">;
def VNEGScc : ASuI<0b11101, 0b11, 0b0001, 0b01, 0,
- (outs SPR:$dst), (ins SPR:$false, SPR:$true),
- IIC_fpUNA32, "vneg", ".f32\t$dst, $true",
- [/*(set SPR:$dst, (ARMcneg SPR:$false, SPR:$true, imm:$cc))*/]>,
- RegConstraint<"$false = $dst">;
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
+ [/*(set SPR:$Sd, (ARMcneg SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
+ RegConstraint<"$Sn = $Sd"> {
+ // Some single precision VFP instructions may be executed on both NEON and VFP
+ // pipelines.
+ let D = VFPNeonDomain;
+}
} // neverHasSideEffects
//===----------------------------------------------------------------------===//
-// Misc.
+// Move from VFP System Register to ARM core register.
//
-// APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
-// to APSR.
-let Defs = [CPSR], Uses = [FPSCR] in
-def FMSTAT : VFPAI<(outs), (ins), VFPMiscFrm, IIC_fpSTAT, "vmrs",
- "\tapsr_nzcv, fpscr",
- [(arm_fmstat)]> {
+class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
+ list<dag> pattern>:
+ VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
+
+ // Instruction operand.
+ bits<4> Rt;
+
let Inst{27-20} = 0b11101111;
- let Inst{19-16} = 0b0001;
- let Inst{15-12} = 0b1111;
+ let Inst{19-16} = opc19_16;
+ let Inst{15-12} = Rt;
let Inst{11-8} = 0b1010;
let Inst{7} = 0;
+ let Inst{6-5} = 0b00;
let Inst{4} = 1;
+ let Inst{3-0} = 0b0000;
}
-// FPSCR <-> GPR (for disassembly only)
+// APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
+// to APSR.
+let Defs = [CPSR], Uses = [FPSCR], Rt = 0b1111 /* apsr_nzcv */ in
+def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
+ "vmrs", "\tapsr_nzcv, fpscr", [(arm_fmstat)]>;
+
+// Application level FPSCR -> GPR
let hasSideEffects = 1, Uses = [FPSCR] in
-def VMRS : VFPAI<(outs GPR:$dst), (ins), VFPMiscFrm, IIC_fpSTAT,
- "vmrs", "\t$dst, fpscr",
- [(set GPR:$dst, (int_arm_get_fpscr))]> {
- let Inst{27-20} = 0b11101111;
- let Inst{19-16} = 0b0001;
- let Inst{11-8} = 0b1010;
- let Inst{7} = 0;
- let Inst{4} = 1;
+def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPR:$Rt), (ins),
+ "vmrs", "\t$Rt, fpscr",
+ [(set GPR:$Rt, (int_arm_get_fpscr))]>;
+
+// System level FPEXC, FPSID -> GPR
+let Uses = [FPSCR] in {
+ def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPR:$Rt), (ins),
+ "vmrs", "\t$Rt, fpexc", []>;
+ def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPR:$Rt), (ins),
+ "vmrs", "\t$Rt, fpsid", []>;
}
-let Defs = [FPSCR] in
-def VMSR : VFPAI<(outs), (ins GPR:$src), VFPMiscFrm, IIC_fpSTAT,
- "vmsr", "\tfpscr, $src",
- [(int_arm_set_fpscr GPR:$src)]> {
+//===----------------------------------------------------------------------===//
+// Move from ARM core register to VFP System Register.
+//
+
+class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
+ list<dag> pattern>:
+ VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
+
+ // Instruction operand.
+ bits<4> src;
+
+ // Encode instruction operand.
+ let Inst{15-12} = src;
+
let Inst{27-20} = 0b11101110;
- let Inst{19-16} = 0b0001;
+ let Inst{19-16} = opc19_16;
let Inst{11-8} = 0b1010;
let Inst{7} = 0;
let Inst{4} = 1;
}
+let Defs = [FPSCR] in {
+ // Application level GPR -> FPSCR
+ def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPR:$src),
+ "vmsr", "\tfpscr, $src", [(int_arm_set_fpscr GPR:$src)]>;
+ // System level GPR -> FPEXC
+ def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPR:$src),
+ "vmsr", "\tfpexc, $src", []>;
+ // System level GPR -> FPSID
+ def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPR:$src),
+ "vmsr", "\tfpsid, $src", []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Misc.
+//
+
// Materialize FP immediates. VFP3 only.
let isReMaterializable = 1 in {
-def FCONSTD : VFPAI<(outs DPR:$dst), (ins vfp_f64imm:$imm),
+def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
VFPMiscFrm, IIC_fpUNA64,
- "vmov", ".f64\t$dst, $imm",
- [(set DPR:$dst, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
+ "vmov", ".f64\t$Dd, $imm",
+ [(set DPR:$Dd, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
+ // Instruction operands.
+ bits<5> Dd;
+ bits<32> imm;
+
+ // Encode instruction operands.
+ let Inst{15-12} = Dd{3-0};
+ let Inst{22} = Dd{4};
+ let Inst{19} = imm{31};
+ let Inst{18-16} = imm{22-20};
+ let Inst{3-0} = imm{19-16};
+
+ // Encode remaining instruction bits.
let Inst{27-23} = 0b11101;
let Inst{21-20} = 0b11;
let Inst{11-9} = 0b101;
- let Inst{8} = 1;
+ let Inst{8} = 1; // Double precision.
let Inst{7-4} = 0b0000;
}
-def FCONSTS : VFPAI<(outs SPR:$dst), (ins vfp_f32imm:$imm),
- VFPMiscFrm, IIC_fpUNA32,
- "vmov", ".f32\t$dst, $imm",
- [(set SPR:$dst, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
+def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
+ VFPMiscFrm, IIC_fpUNA32,
+ "vmov", ".f32\t$Sd, $imm",
+ [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<32> imm;
+
+ // Encode instruction operands.
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+ let Inst{19} = imm{31}; // The immediate is handled as a double.
+ let Inst{18-16} = imm{22-20};
+ let Inst{3-0} = imm{19-16};
+
+ // Encode remaining instruction bits.
let Inst{27-23} = 0b11101;
let Inst{21-20} = 0b11;
let Inst{11-9} = 0b101;
- let Inst{8} = 0;
+ let Inst{8} = 0; // Single precision.
let Inst{7-4} = 0b0000;
}
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
index 5f6d7ee..45b7e48 100644
--- a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
@@ -22,7 +22,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Memory.h"
+#include "llvm/Support/Memory.h"
#include <cstdlib>
using namespace llvm;
@@ -43,7 +43,7 @@ static TargetJITInfo::JITCompilerFn JITCompilerFunction;
#define ASMPREFIX GETASMPREFIX(__USER_LABEL_PREFIX__)
// CompilationCallback stub - We can't use a C function with inline assembly in
-// it, because we the prolog/epilog inserted by GCC won't work for us (we need
+// it, because the prolog/epilog inserted by GCC won't work for us. (We need
// to preserve more context and manipulate the stack directly). Instead,
// write our own wrapper, which does things our way, so we have complete
// control over register saving and restoring.
@@ -97,9 +97,10 @@ extern "C" {
"str r0, [sp,#16]\n"
// Return to the (newly modified) stub to invoke the real function.
// The above twiddling of the saved return addresses allows us to
- // deallocate everything, including the LR the stub saved, all in one
- // pop instruction.
- "ldmia sp!, {r0, r1, r2, r3, lr, pc}\n"
+ // deallocate everything, including the LR the stub saved, with two
+ // updating load instructions.
+ "ldmia sp!, {r0, r1, r2, r3, lr}\n"
+ "ldr pc, [sp], #4\n"
);
#else // Not an ARM host
void ARMCompilationCallback() {
@@ -290,7 +291,7 @@ void ARMJITInfo::relocate(void *Function, MachineRelocation *MR,
*((intptr_t*)RelocPos) |= ResultPtr;
// Set register Rn to PC.
*((intptr_t*)RelocPos) |=
- ARMRegisterInfo::getRegisterNumbering(ARM::PC) << ARMII::RegRnShift;
+ getARMRegisterNumbering(ARM::PC) << ARMII::RegRnShift;
break;
}
case ARM::reloc_arm_pic_jt:
diff --git a/contrib/llvm/lib/Target/ARM/ARMJITInfo.h b/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
index f5d9eff..2f97928 100644
--- a/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
@@ -105,7 +105,7 @@ namespace llvm {
/// model is PIC.
void Initialize(const MachineFunction &MF, bool isPIC) {
const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- ConstPoolId2AddrMap.resize(AFI->getNumConstPoolEntries());
+ ConstPoolId2AddrMap.resize(AFI->getNumPICLabels());
JumpTableId2AddrMap.resize(AFI->getNumJumpTables());
IsPIC = isPIC;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 2b7645a..d9dc5cd 100644
--- a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -128,45 +128,153 @@ namespace {
char ARMLoadStoreOpt::ID = 0;
}
-static int getLoadStoreMultipleOpcode(int Opcode) {
+static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
switch (Opcode) {
- case ARM::LDR:
+ default: llvm_unreachable("Unhandled opcode!");
+ case ARM::LDRi12:
++NumLDMGened;
- return ARM::LDM;
- case ARM::STR:
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::LDMIA;
+ case ARM_AM::da: return ARM::LDMDA;
+ case ARM_AM::db: return ARM::LDMDB;
+ case ARM_AM::ib: return ARM::LDMIB;
+ }
+ break;
+ case ARM::STRi12:
++NumSTMGened;
- return ARM::STM;
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::STMIA;
+ case ARM_AM::da: return ARM::STMDA;
+ case ARM_AM::db: return ARM::STMDB;
+ case ARM_AM::ib: return ARM::STMIB;
+ }
+ break;
case ARM::t2LDRi8:
case ARM::t2LDRi12:
++NumLDMGened;
- return ARM::t2LDM;
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::t2LDMIA;
+ case ARM_AM::db: return ARM::t2LDMDB;
+ }
+ break;
case ARM::t2STRi8:
case ARM::t2STRi12:
++NumSTMGened;
- return ARM::t2STM;
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::t2STMIA;
+ case ARM_AM::db: return ARM::t2STMDB;
+ }
+ break;
case ARM::VLDRS:
++NumVLDMGened;
- return ARM::VLDMS;
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::VLDMSIA;
+ case ARM_AM::db: return ARM::VLDMSDB;
+ }
+ break;
case ARM::VSTRS:
++NumVSTMGened;
- return ARM::VSTMS;
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::VSTMSIA;
+ case ARM_AM::db: return ARM::VSTMSDB;
+ }
+ break;
case ARM::VLDRD:
++NumVLDMGened;
- return ARM::VLDMD;
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::VLDMDIA;
+ case ARM_AM::db: return ARM::VLDMDDB;
+ }
+ break;
case ARM::VSTRD:
++NumVSTMGened;
- return ARM::VSTMD;
- default: llvm_unreachable("Unhandled opcode!");
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::VSTMDIA;
+ case ARM_AM::db: return ARM::VSTMDDB;
+ }
+ break;
}
+
return 0;
}
+namespace llvm {
+ namespace ARM_AM {
+
+AMSubMode getLoadStoreMultipleSubMode(int Opcode) {
+ switch (Opcode) {
+ default: llvm_unreachable("Unhandled opcode!");
+ case ARM::LDMIA_RET:
+ case ARM::LDMIA:
+ case ARM::LDMIA_UPD:
+ case ARM::STMIA:
+ case ARM::STMIA_UPD:
+ case ARM::t2LDMIA_RET:
+ case ARM::t2LDMIA:
+ case ARM::t2LDMIA_UPD:
+ case ARM::t2STMIA:
+ case ARM::t2STMIA_UPD:
+ case ARM::VLDMSIA:
+ case ARM::VLDMSIA_UPD:
+ case ARM::VSTMSIA:
+ case ARM::VSTMSIA_UPD:
+ case ARM::VLDMDIA:
+ case ARM::VLDMDIA_UPD:
+ case ARM::VSTMDIA:
+ case ARM::VSTMDIA_UPD:
+ return ARM_AM::ia;
+
+ case ARM::LDMDA:
+ case ARM::LDMDA_UPD:
+ case ARM::STMDA:
+ case ARM::STMDA_UPD:
+ return ARM_AM::da;
+
+ case ARM::LDMDB:
+ case ARM::LDMDB_UPD:
+ case ARM::STMDB:
+ case ARM::STMDB_UPD:
+ case ARM::t2LDMDB:
+ case ARM::t2LDMDB_UPD:
+ case ARM::t2STMDB:
+ case ARM::t2STMDB_UPD:
+ case ARM::VLDMSDB:
+ case ARM::VLDMSDB_UPD:
+ case ARM::VSTMSDB:
+ case ARM::VSTMSDB_UPD:
+ case ARM::VLDMDDB:
+ case ARM::VLDMDDB_UPD:
+ case ARM::VSTMDDB:
+ case ARM::VSTMDDB_UPD:
+ return ARM_AM::db;
+
+ case ARM::LDMIB:
+ case ARM::LDMIB_UPD:
+ case ARM::STMIB:
+ case ARM::STMIB_UPD:
+ return ARM_AM::ib;
+ }
+
+ return ARM_AM::bad_am_submode;
+}
+
+ } // end namespace ARM_AM
+} // end namespace llvm
+
static bool isT2i32Load(unsigned Opc) {
return Opc == ARM::t2LDRi12 || Opc == ARM::t2LDRi8;
}
static bool isi32Load(unsigned Opc) {
- return Opc == ARM::LDR || isT2i32Load(Opc);
+ return Opc == ARM::LDRi12 || isT2i32Load(Opc);
}
static bool isT2i32Store(unsigned Opc) {
@@ -174,7 +282,7 @@ static bool isT2i32Store(unsigned Opc) {
}
static bool isi32Store(unsigned Opc) {
- return Opc == ARM::STR || isT2i32Store(Opc);
+ return Opc == ARM::STRi12 || isT2i32Store(Opc);
}
/// MergeOps - Create and insert a LDM or STM with Base as base register and
@@ -245,10 +353,10 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
bool isDef = (isi32Load(Opcode) || Opcode == ARM::VLDRS ||
Opcode == ARM::VLDRD);
- Opcode = getLoadStoreMultipleOpcode(Opcode);
+ Opcode = getLoadStoreMultipleOpcode(Opcode, Mode);
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(Opcode))
.addReg(Base, getKillRegState(BaseKill))
- .addImm(ARM_AM::getAM4ModeImm(Mode)).addImm(Pred).addReg(PredReg);
+ .addImm(Pred).addReg(PredReg);
for (unsigned i = 0; i != NumRegs; ++i)
MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
| getKillRegState(Regs[i].second));
@@ -271,22 +379,14 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
// First calculate which of the registers should be killed by the merged
// instruction.
const unsigned insertPos = memOps[insertAfter].Position;
-
- SmallSet<unsigned, 4> UnavailRegs;
SmallSet<unsigned, 4> KilledRegs;
DenseMap<unsigned, unsigned> Killer;
- for (unsigned i = 0; i < memOpsBegin; ++i) {
- if (memOps[i].Position < insertPos && memOps[i].isKill) {
- unsigned Reg = memOps[i].Reg;
- if (memOps[i].Merged)
- UnavailRegs.insert(Reg);
- else {
- KilledRegs.insert(Reg);
- Killer[Reg] = i;
- }
+ for (unsigned i = 0, e = memOps.size(); i != e; ++i) {
+ if (i == memOpsBegin) {
+ i = memOpsEnd;
+ if (i == e)
+ break;
}
- }
- for (unsigned i = memOpsEnd, e = memOps.size(); i != e; ++i) {
if (memOps[i].Position < insertPos && memOps[i].isKill) {
unsigned Reg = memOps[i].Reg;
KilledRegs.insert(Reg);
@@ -297,12 +397,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
SmallVector<std::pair<unsigned, bool>, 8> Regs;
for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
unsigned Reg = memOps[i].Reg;
- if (UnavailRegs.count(Reg))
- // Register is killed before and it's not easy / possible to update the
- // kill marker on already merged instructions. Abort.
- return;
-
- // If we are inserting the merged operation after an unmerged operation that
+ // If we are inserting the merged operation after an operation that
// uses the same register, make sure to transfer any kill flag.
bool isKill = memOps[i].isKill || KilledRegs.count(Reg);
Regs.push_back(std::make_pair(Reg, isKill));
@@ -318,17 +413,24 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
// Merge succeeded, update records.
Merges.push_back(prior(Loc));
for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
- // Remove kill flags from any unmerged memops that come before insertPos.
+ // Remove kill flags from any memops that come before insertPos.
if (Regs[i-memOpsBegin].second) {
unsigned Reg = Regs[i-memOpsBegin].first;
if (KilledRegs.count(Reg)) {
unsigned j = Killer[Reg];
- memOps[j].MBBI->getOperand(0).setIsKill(false);
+ int Idx = memOps[j].MBBI->findRegisterUseOperandIdx(Reg, true);
+ assert(Idx >= 0 && "Cannot find killing operand");
+ memOps[j].MBBI->getOperand(Idx).setIsKill(false);
memOps[j].isKill = false;
}
+ memOps[i].isKill = true;
}
MBB.erase(memOps[i].MBBI);
+ // Update this memop to refer to the merged instruction.
+ // We may need to move kill flags again.
memOps[i].Merged = true;
+ memOps[i].MBBI = Merges.back();
+ memOps[i].Position = insertPos;
}
}
@@ -349,7 +451,7 @@ ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
const MachineOperand &PMO = Loc->getOperand(0);
unsigned PReg = PMO.getReg();
unsigned PRegNum = PMO.isUndef() ? UINT_MAX
- : ARMRegisterInfo::getRegisterNumbering(PReg);
+ : getARMRegisterNumbering(PReg);
unsigned Count = 1;
for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
@@ -357,7 +459,7 @@ ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
const MachineOperand &MO = MemOps[i].MBBI->getOperand(0);
unsigned Reg = MO.getReg();
unsigned RegNum = MO.isUndef() ? UINT_MAX
- : ARMRegisterInfo::getRegisterNumbering(Reg);
+ : getARMRegisterNumbering(Reg);
// Register numbers must be in ascending order. For VFP, the registers
// must also be consecutive and there is a limit of 16 double-word
// registers per instruction.
@@ -440,8 +542,8 @@ static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
switch (MI->getOpcode()) {
default: return 0;
- case ARM::LDR:
- case ARM::STR:
+ case ARM::LDRi12:
+ case ARM::STRi12:
case ARM::t2LDRi8:
case ARM::t2LDRi12:
case ARM::t2STRi8:
@@ -452,31 +554,109 @@ static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
case ARM::VLDRD:
case ARM::VSTRD:
return 8;
- case ARM::LDM:
- case ARM::STM:
- case ARM::t2LDM:
- case ARM::t2STM:
- case ARM::VLDMS:
- case ARM::VSTMS:
- return (MI->getNumOperands() - 4) * 4;
- case ARM::VLDMD:
- case ARM::VSTMD:
- return (MI->getNumOperands() - 4) * 8;
+ case ARM::LDMIA:
+ case ARM::LDMDA:
+ case ARM::LDMDB:
+ case ARM::LDMIB:
+ case ARM::STMIA:
+ case ARM::STMDA:
+ case ARM::STMDB:
+ case ARM::STMIB:
+ case ARM::t2LDMIA:
+ case ARM::t2LDMDB:
+ case ARM::t2STMIA:
+ case ARM::t2STMDB:
+ case ARM::VLDMSIA:
+ case ARM::VLDMSDB:
+ case ARM::VSTMSIA:
+ case ARM::VSTMSDB:
+ return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 4;
+ case ARM::VLDMDIA:
+ case ARM::VLDMDDB:
+ case ARM::VSTMDIA:
+ case ARM::VSTMDDB:
+ return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 8;
}
}
-static unsigned getUpdatingLSMultipleOpcode(unsigned Opc) {
+static unsigned getUpdatingLSMultipleOpcode(unsigned Opc,
+ ARM_AM::AMSubMode Mode) {
switch (Opc) {
- case ARM::LDM: return ARM::LDM_UPD;
- case ARM::STM: return ARM::STM_UPD;
- case ARM::t2LDM: return ARM::t2LDM_UPD;
- case ARM::t2STM: return ARM::t2STM_UPD;
- case ARM::VLDMS: return ARM::VLDMS_UPD;
- case ARM::VLDMD: return ARM::VLDMD_UPD;
- case ARM::VSTMS: return ARM::VSTMS_UPD;
- case ARM::VSTMD: return ARM::VSTMD_UPD;
default: llvm_unreachable("Unhandled opcode!");
+ case ARM::LDMIA:
+ case ARM::LDMDA:
+ case ARM::LDMDB:
+ case ARM::LDMIB:
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::LDMIA_UPD;
+ case ARM_AM::ib: return ARM::LDMIB_UPD;
+ case ARM_AM::da: return ARM::LDMDA_UPD;
+ case ARM_AM::db: return ARM::LDMDB_UPD;
+ }
+ break;
+ case ARM::STMIA:
+ case ARM::STMDA:
+ case ARM::STMDB:
+ case ARM::STMIB:
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::STMIA_UPD;
+ case ARM_AM::ib: return ARM::STMIB_UPD;
+ case ARM_AM::da: return ARM::STMDA_UPD;
+ case ARM_AM::db: return ARM::STMDB_UPD;
+ }
+ break;
+ case ARM::t2LDMIA:
+ case ARM::t2LDMDB:
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::t2LDMIA_UPD;
+ case ARM_AM::db: return ARM::t2LDMDB_UPD;
+ }
+ break;
+ case ARM::t2STMIA:
+ case ARM::t2STMDB:
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::t2STMIA_UPD;
+ case ARM_AM::db: return ARM::t2STMDB_UPD;
+ }
+ break;
+ case ARM::VLDMSIA:
+ case ARM::VLDMSDB:
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::VLDMSIA_UPD;
+ case ARM_AM::db: return ARM::VLDMSDB_UPD;
+ }
+ break;
+ case ARM::VLDMDIA:
+ case ARM::VLDMDDB:
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::VLDMDIA_UPD;
+ case ARM_AM::db: return ARM::VLDMDDB_UPD;
+ }
+ break;
+ case ARM::VSTMSIA:
+ case ARM::VSTMSDB:
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::VSTMSIA_UPD;
+ case ARM_AM::db: return ARM::VSTMSDB_UPD;
+ }
+ break;
+ case ARM::VSTMDIA:
+ case ARM::VSTMDDB:
+ switch (Mode) {
+ default: llvm_unreachable("Unhandled submode!");
+ case ARM_AM::ia: return ARM::VSTMDIA_UPD;
+ case ARM_AM::db: return ARM::VSTMDDB_UPD;
+ }
+ break;
}
+
return 0;
}
@@ -505,16 +685,14 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
int Opcode = MI->getOpcode();
DebugLoc dl = MI->getDebugLoc();
- bool DoMerge = false;
- ARM_AM::AMSubMode Mode = ARM_AM::ia;
-
// Can't use an updating ld/st if the base register is also a dest
// register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
- for (unsigned i = 3, e = MI->getNumOperands(); i != e; ++i) {
+ for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
if (MI->getOperand(i).getReg() == Base)
return false;
- }
- Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
+
+ bool DoMerge = false;
+ ARM_AM::AMSubMode Mode = ARM_AM::getLoadStoreMultipleSubMode(Opcode);
// Try merging with the previous instruction.
MachineBasicBlock::iterator BeginMBBI = MBB.begin();
@@ -560,15 +738,16 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
if (!DoMerge)
return false;
- unsigned NewOpc = getUpdatingLSMultipleOpcode(Opcode);
+ unsigned NewOpc = getUpdatingLSMultipleOpcode(Opcode, Mode);
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
.addReg(Base, getDefRegState(true)) // WB base register
.addReg(Base, getKillRegState(BaseKill))
- .addImm(ARM_AM::getAM4ModeImm(Mode))
.addImm(Pred).addReg(PredReg);
+
// Transfer the rest of operands.
- for (unsigned OpNum = 4, e = MI->getNumOperands(); OpNum != e; ++OpNum)
+ for (unsigned OpNum = 3, e = MI->getNumOperands(); OpNum != e; ++OpNum)
MIB.addOperand(MI->getOperand(OpNum));
+
// Transfer memoperands.
(*MIB).setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
@@ -576,14 +755,21 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
return true;
}
-static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
+static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc,
+ ARM_AM::AddrOpc Mode) {
switch (Opc) {
- case ARM::LDR: return ARM::LDR_PRE;
- case ARM::STR: return ARM::STR_PRE;
- case ARM::VLDRS: return ARM::VLDMS_UPD;
- case ARM::VLDRD: return ARM::VLDMD_UPD;
- case ARM::VSTRS: return ARM::VSTMS_UPD;
- case ARM::VSTRD: return ARM::VSTMD_UPD;
+ case ARM::LDRi12:
+ return ARM::LDR_PRE;
+ case ARM::STRi12:
+ return ARM::STR_PRE;
+ case ARM::VLDRS:
+ return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
+ case ARM::VLDRD:
+ return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
+ case ARM::VSTRS:
+ return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
+ case ARM::VSTRD:
+ return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
case ARM::t2LDRi8:
case ARM::t2LDRi12:
return ARM::t2LDR_PRE;
@@ -595,14 +781,21 @@ static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
return 0;
}
-static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
+static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc,
+ ARM_AM::AddrOpc Mode) {
switch (Opc) {
- case ARM::LDR: return ARM::LDR_POST;
- case ARM::STR: return ARM::STR_POST;
- case ARM::VLDRS: return ARM::VLDMS_UPD;
- case ARM::VLDRD: return ARM::VLDMD_UPD;
- case ARM::VSTRS: return ARM::VSTMS_UPD;
- case ARM::VSTRD: return ARM::VSTMD_UPD;
+ case ARM::LDRi12:
+ return ARM::LDR_POST;
+ case ARM::STRi12:
+ return ARM::STR_POST;
+ case ARM::VLDRS:
+ return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
+ case ARM::VLDRD:
+ return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
+ case ARM::VSTRS:
+ return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
+ case ARM::VSTRD:
+ return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
case ARM::t2LDRi8:
case ARM::t2LDRi12:
return ARM::t2LDR_POST;
@@ -629,14 +822,12 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
DebugLoc dl = MI->getDebugLoc();
bool isAM5 = (Opcode == ARM::VLDRD || Opcode == ARM::VLDRS ||
Opcode == ARM::VSTRD || Opcode == ARM::VSTRS);
- bool isAM2 = (Opcode == ARM::LDR || Opcode == ARM::STR);
- if (isAM2 && ARM_AM::getAM2Offset(MI->getOperand(3).getImm()) != 0)
- return false;
- if (isAM5 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0)
- return false;
- if (isT2i32Load(Opcode) || isT2i32Store(Opcode))
+ bool isAM2 = (Opcode == ARM::LDRi12 || Opcode == ARM::STRi12);
+ if (isi32Load(Opcode) || isi32Store(Opcode))
if (MI->getOperand(2).getImm() != 0)
return false;
+ if (isAM5 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0)
+ return false;
bool isLd = isi32Load(Opcode) || Opcode == ARM::VLDRS || Opcode == ARM::VLDRD;
// Can't do the merge if the destination register is the same as the would-be
@@ -666,7 +857,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
DoMerge = true;
}
if (DoMerge) {
- NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
+ NewOpc = getPreIndexedLoadStoreOpcode(Opcode, AddSub);
MBB.erase(PrevMBBI);
}
}
@@ -685,7 +876,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
DoMerge = true;
}
if (DoMerge) {
- NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
+ NewOpc = getPostIndexedLoadStoreOpcode(Opcode, AddSub);
if (NextMBBI == I) {
Advance = true;
++I;
@@ -698,12 +889,9 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
return false;
unsigned Offset = 0;
- if (isAM5)
- Offset = ARM_AM::getAM4ModeImm(AddSub == ARM_AM::sub ?
- ARM_AM::db : ARM_AM::ia);
- else if (isAM2)
+ if (isAM2)
Offset = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
- else
+ else if (!isAM5)
Offset = AddSub == ARM_AM::sub ? -Bytes : Bytes;
if (isAM5) {
@@ -715,7 +903,6 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
.addReg(Base, getDefRegState(true)) // WB base register
.addReg(Base, getKillRegState(isLd ? BaseKill : false))
- .addImm(Offset)
.addImm(Pred).addReg(PredReg)
.addReg(MO.getReg(), (isLd ? getDefRegState(true) :
getKillRegState(MO.isKill())));
@@ -782,15 +969,14 @@ static bool isMemoryOp(const MachineInstr *MI) {
int Opcode = MI->getOpcode();
switch (Opcode) {
default: break;
- case ARM::LDR:
- case ARM::STR:
- return MI->getOperand(1).isReg() && MI->getOperand(2).getReg() == 0;
case ARM::VLDRS:
case ARM::VSTRS:
return MI->getOperand(1).isReg();
case ARM::VLDRD:
case ARM::VSTRD:
return MI->getOperand(1).isReg();
+ case ARM::LDRi12:
+ case ARM::STRi12:
case ARM::t2LDRi8:
case ARM::t2LDRi12:
case ARM::t2STRi8:
@@ -818,24 +1004,19 @@ void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
static int getMemoryOpOffset(const MachineInstr *MI) {
int Opcode = MI->getOpcode();
- bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
unsigned NumOperands = MI->getDesc().getNumOperands();
unsigned OffField = MI->getOperand(NumOperands-3).getImm();
if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
- Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8)
+ Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8 ||
+ Opcode == ARM::LDRi12 || Opcode == ARM::STRi12)
return OffField;
- int Offset = isAM2
- ? ARM_AM::getAM2Offset(OffField)
- : (isAM3 ? ARM_AM::getAM3Offset(OffField)
- : ARM_AM::getAM5Offset(OffField) * 4);
- if (isAM2) {
- if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub)
- Offset = -Offset;
- } else if (isAM3) {
+ int Offset = isAM3 ? ARM_AM::getAM3Offset(OffField)
+ : ARM_AM::getAM5Offset(OffField) * 4;
+ if (isAM3) {
if (ARM_AM::getAM3Op(OffField) == ARM_AM::sub)
Offset = -Offset;
} else {
@@ -847,35 +1028,24 @@ static int getMemoryOpOffset(const MachineInstr *MI) {
static void InsertLDR_STR(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- int OffImm, bool isDef,
+ int Offset, bool isDef,
DebugLoc dl, unsigned NewOpc,
unsigned Reg, bool RegDeadKill, bool RegUndef,
unsigned BaseReg, bool BaseKill, bool BaseUndef,
- unsigned OffReg, bool OffKill, bool OffUndef,
+ bool OffKill, bool OffUndef,
ARMCC::CondCodes Pred, unsigned PredReg,
const TargetInstrInfo *TII, bool isT2) {
- int Offset = OffImm;
- if (!isT2) {
- if (OffImm < 0)
- Offset = ARM_AM::getAM2Opc(ARM_AM::sub, -OffImm, ARM_AM::no_shift);
- else
- Offset = ARM_AM::getAM2Opc(ARM_AM::add, OffImm, ARM_AM::no_shift);
- }
if (isDef) {
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
TII->get(NewOpc))
.addReg(Reg, getDefRegState(true) | getDeadRegState(RegDeadKill))
.addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
- if (!isT2)
- MIB.addReg(OffReg, getKillRegState(OffKill)|getUndefRegState(OffUndef));
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
} else {
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
TII->get(NewOpc))
.addReg(Reg, getKillRegState(RegDeadKill) | getUndefRegState(RegUndef))
.addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
- if (!isT2)
- MIB.addReg(OffReg, getKillRegState(OffKill)|getUndefRegState(OffUndef));
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
}
}
@@ -906,23 +1076,21 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
unsigned BaseReg = BaseOp.getReg();
bool BaseKill = BaseOp.isKill();
bool BaseUndef = BaseOp.isUndef();
- unsigned OffReg = isT2 ? 0 : MI->getOperand(3).getReg();
bool OffKill = isT2 ? false : MI->getOperand(3).isKill();
bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef();
int OffImm = getMemoryOpOffset(MI);
unsigned PredReg = 0;
ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
- if (OddRegNum > EvenRegNum && OffReg == 0 && OffImm == 0) {
+ if (OddRegNum > EvenRegNum && OffImm == 0) {
// Ascending register numbers and no offset. It's safe to change it to a
// ldm or stm.
unsigned NewOpc = (isLd)
- ? (isT2 ? ARM::t2LDM : ARM::LDM)
- : (isT2 ? ARM::t2STM : ARM::STM);
+ ? (isT2 ? ARM::t2LDMIA : ARM::LDMIA)
+ : (isT2 ? ARM::t2STMIA : ARM::STMIA);
if (isLd) {
BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
.addReg(BaseReg, getKillRegState(BaseKill))
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
.addImm(Pred).addReg(PredReg)
.addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill))
.addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill));
@@ -930,7 +1098,6 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
} else {
BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
.addReg(BaseReg, getKillRegState(BaseKill))
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
.addImm(Pred).addReg(PredReg)
.addReg(EvenReg,
getKillRegState(EvenDeadKill) | getUndefRegState(EvenUndef))
@@ -941,28 +1108,24 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
NewBBI = llvm::prior(MBBI);
} else {
// Split into two instructions.
- assert((!isT2 || !OffReg) &&
- "Thumb2 ldrd / strd does not encode offset register!");
unsigned NewOpc = (isLd)
- ? (isT2 ? (OffImm < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDR)
- : (isT2 ? (OffImm < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STR);
+ ? (isT2 ? (OffImm < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
+ : (isT2 ? (OffImm < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
DebugLoc dl = MBBI->getDebugLoc();
// If this is a load and base register is killed, it may have been
// re-defed by the load, make sure the first load does not clobber it.
if (isLd &&
(BaseKill || OffKill) &&
- (TRI->regsOverlap(EvenReg, BaseReg) ||
- (OffReg && TRI->regsOverlap(EvenReg, OffReg)))) {
- assert(!TRI->regsOverlap(OddReg, BaseReg) &&
- (!OffReg || !TRI->regsOverlap(OddReg, OffReg)));
+ (TRI->regsOverlap(EvenReg, BaseReg))) {
+ assert(!TRI->regsOverlap(OddReg, BaseReg));
InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
OddReg, OddDeadKill, false,
- BaseReg, false, BaseUndef, OffReg, false, OffUndef,
+ BaseReg, false, BaseUndef, false, OffUndef,
Pred, PredReg, TII, isT2);
NewBBI = llvm::prior(MBBI);
InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
EvenReg, EvenDeadKill, false,
- BaseReg, BaseKill, BaseUndef, OffReg, OffKill, OffUndef,
+ BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
Pred, PredReg, TII, isT2);
} else {
if (OddReg == EvenReg && EvenDeadKill) {
@@ -974,12 +1137,12 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
}
InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
EvenReg, EvenDeadKill, EvenUndef,
- BaseReg, false, BaseUndef, OffReg, false, OffUndef,
+ BaseReg, false, BaseUndef, false, OffUndef,
Pred, PredReg, TII, isT2);
NewBBI = llvm::prior(MBBI);
InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
OddReg, OddDeadKill, OddUndef,
- BaseReg, BaseKill, BaseUndef, OffReg, OffKill, OffUndef,
+ BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
Pred, PredReg, TII, isT2);
}
if (isLd)
@@ -1158,17 +1321,6 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
return NumMerges > 0;
}
-namespace {
- struct OffsetCompare {
- bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
- int LOffset = getMemoryOpOffset(LHS);
- int ROffset = getMemoryOpOffset(RHS);
- assert(LHS == RHS || LOffset != ROffset);
- return LOffset > ROffset;
- }
- };
-}
-
/// MergeReturnIntoLDM - If this is a exit BB, try merging the return ops
/// ("bx lr" and "mov pc, lr") into the preceeding stack restore so it
/// directly restore the value of LR into pc.
@@ -1182,20 +1334,25 @@ namespace {
bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
if (MBB.empty()) return false;
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
if (MBBI != MBB.begin() &&
(MBBI->getOpcode() == ARM::BX_RET ||
MBBI->getOpcode() == ARM::tBX_RET ||
MBBI->getOpcode() == ARM::MOVPCLR)) {
MachineInstr *PrevMI = prior(MBBI);
- if (PrevMI->getOpcode() == ARM::LDM_UPD ||
- PrevMI->getOpcode() == ARM::t2LDM_UPD) {
+ unsigned Opcode = PrevMI->getOpcode();
+ if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
+ Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
+ Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
if (MO.getReg() != ARM::LR)
return false;
- unsigned NewOpc = isThumb2 ? ARM::t2LDM_RET : ARM::LDM_RET;
+ unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
+ assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
+ Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!");
PrevMI->setDesc(TII->get(NewOpc));
MO.setReg(ARM::PC);
+ PrevMI->copyImplicitOps(&*MBBI);
MBB.erase(MBBI);
return true;
}
@@ -1216,7 +1373,8 @@ bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
++MFI) {
MachineBasicBlock &MBB = *MFI;
Modified |= LoadStoreMultipleOpti(MBB);
- Modified |= MergeReturnIntoLDM(MBB);
+ if (TM.getSubtarget<ARMSubtarget>().hasV5TOps())
+ Modified |= MergeReturnIntoLDM(MBB);
}
delete RS;
@@ -1250,7 +1408,7 @@ namespace {
bool CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, DebugLoc &dl,
unsigned &NewOpc, unsigned &EvenReg,
unsigned &OddReg, unsigned &BaseReg,
- unsigned &OffReg, int &Offset,
+ int &Offset,
unsigned &PredReg, ARMCC::CondCodes &Pred,
bool &isT2);
bool RescheduleOps(MachineBasicBlock *MBB,
@@ -1292,7 +1450,7 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
if (I->isDebugValue() || MemOps.count(&*I))
continue;
const TargetInstrDesc &TID = I->getDesc();
- if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects())
+ if (TID.isCall() || TID.isTerminator() || I->hasUnmodeledSideEffects())
return false;
if (isLd && TID.mayStore())
return false;
@@ -1330,8 +1488,7 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
DebugLoc &dl,
unsigned &NewOpc, unsigned &EvenReg,
unsigned &OddReg, unsigned &BaseReg,
- unsigned &OffReg, int &Offset,
- unsigned &PredReg,
+ int &Offset, unsigned &PredReg,
ARMCC::CondCodes &Pred,
bool &isT2) {
// Make sure we're allowed to generate LDRD/STRD.
@@ -1341,9 +1498,9 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
// FIXME: VLDRS / VSTRS -> VLDRD / VSTRD
unsigned Scale = 1;
unsigned Opcode = Op0->getOpcode();
- if (Opcode == ARM::LDR)
+ if (Opcode == ARM::LDRi12)
NewOpc = ARM::LDRD;
- else if (Opcode == ARM::STR)
+ else if (Opcode == ARM::STRi12)
NewOpc = ARM::STRD;
else if (Opcode == ARM::t2LDRi8 || Opcode == ARM::t2LDRi12) {
NewOpc = ARM::t2LDRDi8;
@@ -1356,12 +1513,7 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
} else
return false;
- // Make sure the offset registers match.
- if (!isT2 &&
- (Op0->getOperand(2).getReg() != Op1->getOperand(2).getReg()))
- return false;
-
- // Must sure the base address satisfies i64 ld / st alignment requirement.
+ // Make sure the base address satisfies i64 ld / st alignment requirement.
if (!Op0->hasOneMemOperand() ||
!(*Op0->memoperands_begin())->getValue() ||
(*Op0->memoperands_begin())->isVolatile())
@@ -1370,7 +1522,7 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
unsigned Align = (*Op0->memoperands_begin())->getAlignment();
const Function *Func = MF->getFunction();
unsigned ReqAlign = STI->hasV6Ops()
- ? TD->getPrefTypeAlignment(Type::getInt64Ty(Func->getContext()))
+ ? TD->getABITypeAlignment(Type::getInt64Ty(Func->getContext()))
: 8; // Pre-v6 need 8-byte align
if (Align < ReqAlign)
return false;
@@ -1404,13 +1556,22 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
if (EvenReg == OddReg)
return false;
BaseReg = Op0->getOperand(1).getReg();
- if (!isT2)
- OffReg = Op0->getOperand(2).getReg();
Pred = llvm::getInstrPredicate(Op0, PredReg);
dl = Op0->getDebugLoc();
return true;
}
+namespace {
+ struct OffsetCompare {
+ bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
+ int LOffset = getMemoryOpOffset(LHS);
+ int ROffset = getMemoryOpOffset(RHS);
+ assert(LHS == RHS || LOffset != ROffset);
+ return LOffset > ROffset;
+ }
+ };
+}
+
bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
SmallVector<MachineInstr*, 4> &Ops,
unsigned Base, bool isLd,
@@ -1493,14 +1654,14 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
MachineInstr *Op0 = Ops.back();
MachineInstr *Op1 = Ops[Ops.size()-2];
unsigned EvenReg = 0, OddReg = 0;
- unsigned BaseReg = 0, OffReg = 0, PredReg = 0;
+ unsigned BaseReg = 0, PredReg = 0;
ARMCC::CondCodes Pred = ARMCC::AL;
bool isT2 = false;
unsigned NewOpc = 0;
int Offset = 0;
DebugLoc dl;
if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc,
- EvenReg, OddReg, BaseReg, OffReg,
+ EvenReg, OddReg, BaseReg,
Offset, PredReg, Pred, isT2)) {
Ops.pop_back();
Ops.pop_back();
@@ -1512,8 +1673,11 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
.addReg(EvenReg, RegState::Define)
.addReg(OddReg, RegState::Define)
.addReg(BaseReg);
+ // FIXME: We're converting from LDRi12 to an insn that still
+ // uses addrmode2, so we need an explicit offset reg. It should
+ // always by reg0 since we're transforming LDRi12s.
if (!isT2)
- MIB.addReg(OffReg);
+ MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
++NumLDRDFormed;
} else {
@@ -1522,8 +1686,11 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
.addReg(EvenReg)
.addReg(OddReg)
.addReg(BaseReg);
+ // FIXME: We're converting from LDRi12 to an insn that still
+ // uses addrmode2, so we need an explicit offset reg. It should
+ // always by reg0 since we're transforming STRi12s.
if (!isT2)
- MIB.addReg(OffReg);
+ MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
++NumSTRDFormed;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMMCCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/ARMMCCodeEmitter.cpp
new file mode 100644
index 0000000..6d7b485
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMMCCodeEmitter.cpp
@@ -0,0 +1,1230 @@
+//===-- ARM/ARMMCCodeEmitter.cpp - Convert ARM code to machine code -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ARMMCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mccodeemitter"
+#include "ARM.h"
+#include "ARMAddressingModes.h"
+#include "ARMFixupKinds.h"
+#include "ARMInstrInfo.h"
+#include "ARMMCExpr.h"
+#include "ARMSubtarget.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
+STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created.");
+
+namespace {
+class ARMMCCodeEmitter : public MCCodeEmitter {
+ ARMMCCodeEmitter(const ARMMCCodeEmitter &); // DO NOT IMPLEMENT
+ void operator=(const ARMMCCodeEmitter &); // DO NOT IMPLEMENT
+ const TargetMachine &TM;
+ const TargetInstrInfo &TII;
+ const ARMSubtarget *Subtarget;
+ MCContext &Ctx;
+
+public:
+ ARMMCCodeEmitter(TargetMachine &tm, MCContext &ctx)
+ : TM(tm), TII(*TM.getInstrInfo()),
+ Subtarget(&TM.getSubtarget<ARMSubtarget>()), Ctx(ctx) {
+ }
+
+ ~ARMMCCodeEmitter() {}
+
+ unsigned getMachineSoImmOpValue(unsigned SoImm) const;
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ unsigned getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getMachineOpValue - Return binary encoding of operand. If the machine
+ /// operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getHiLo16ImmOpValue - Return the encoding for the hi / low 16-bit of
+ /// the specified operand. This is used for operands with :lower16: and
+ /// :upper16: prefixes.
+ uint32_t getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ bool EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx,
+ unsigned &Reg, unsigned &Imm,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getThumbBLTargetOpValue - Return encoding info for Thumb immediate
+ /// BL branch target.
+ uint32_t getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate
+ /// BLX branch target.
+ uint32_t getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target.
+ uint32_t getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target.
+ uint32_t getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target.
+ uint32_t getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getBranchTargetOpValue - Return encoding info for 24-bit immediate
+ /// branch target.
+ uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit
+ /// immediate Thumb2 direct branch target.
+ uint32_t getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getARMBranchTargetOpValue - Return encoding info for 24-bit immediate
+ /// branch target.
+ uint32_t getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getAdrLabelOpValue - Return encoding info for 12-bit immediate
+ /// ADR label target.
+ uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint32_t getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint32_t getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+
+ /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12'
+ /// operand.
+ uint32_t getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getThumbAddrModeRegRegOpValue - Return encoding for 'reg + reg' operand.
+ uint32_t getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups)const;
+
+ /// getT2AddrModeImm8s4OpValue - Return encoding info for 'reg +/- imm8<<2'
+ /// operand.
+ uint32_t getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+
+ /// getLdStSORegOpValue - Return encoding info for 'reg +/- reg shop imm'
+ /// operand as needed by load/store instructions.
+ uint32_t getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getLdStmModeOpValue - Return encoding for load/store multiple mode.
+ uint32_t getLdStmModeOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ ARM_AM::AMSubMode Mode = (ARM_AM::AMSubMode)MI.getOperand(OpIdx).getImm();
+ switch (Mode) {
+ default: assert(0 && "Unknown addressing sub-mode!");
+ case ARM_AM::da: return 0;
+ case ARM_AM::ia: return 1;
+ case ARM_AM::db: return 2;
+ case ARM_AM::ib: return 3;
+ }
+ }
+ /// getShiftOp - Return the shift opcode (bit[6:5]) of the immediate value.
+ ///
+ unsigned getShiftOp(ARM_AM::ShiftOpc ShOpc) const {
+ switch (ShOpc) {
+ default: llvm_unreachable("Unknown shift opc!");
+ case ARM_AM::no_shift:
+ case ARM_AM::lsl: return 0;
+ case ARM_AM::lsr: return 1;
+ case ARM_AM::asr: return 2;
+ case ARM_AM::ror:
+ case ARM_AM::rrx: return 3;
+ }
+ return 0;
+ }
+
+ /// getAddrMode2OpValue - Return encoding for addrmode2 operands.
+ uint32_t getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getAddrMode2OffsetOpValue - Return encoding for am2offset operands.
+ uint32_t getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getAddrMode3OffsetOpValue - Return encoding for am3offset operands.
+ uint32_t getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getAddrMode3OpValue - Return encoding for addrmode3 operands.
+ uint32_t getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getAddrModeThumbSPOpValue - Return encoding info for 'reg +/- imm12'
+ /// operand.
+ uint32_t getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getAddrModeISOpValue - Encode the t_addrmode_is# operands.
+ uint32_t getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands.
+ uint32_t getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getAddrMode5OpValue - Return encoding info for 'reg +/- imm8' operand.
+ uint32_t getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getCCOutOpValue - Return encoding of the 's' bit.
+ unsigned getCCOutOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // The operand is either reg0 or CPSR. The 's' bit is encoded as '0' or
+ // '1' respectively.
+ return MI.getOperand(Op).getReg() == ARM::CPSR;
+ }
+
+ /// getSOImmOpValue - Return an encoded 12-bit shifted-immediate value.
+ unsigned getSOImmOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ unsigned SoImm = MI.getOperand(Op).getImm();
+ int SoImmVal = ARM_AM::getSOImmVal(SoImm);
+ assert(SoImmVal != -1 && "Not a valid so_imm value!");
+
+ // Encode rotate_imm.
+ unsigned Binary = (ARM_AM::getSOImmValRot((unsigned)SoImmVal) >> 1)
+ << ARMII::SoRotImmShift;
+
+ // Encode immed_8.
+ Binary |= ARM_AM::getSOImmValImm((unsigned)SoImmVal);
+ return Binary;
+ }
+
+ /// getT2SOImmOpValue - Return an encoded 12-bit shifted-immediate value.
+ unsigned getT2SOImmOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ unsigned SoImm = MI.getOperand(Op).getImm();
+ unsigned Encoded = ARM_AM::getT2SOImmVal(SoImm);
+ assert(Encoded != ~0U && "Not a Thumb2 so_imm value?");
+ return Encoded;
+ }
+
+ unsigned getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getT2AddrModeImm12OffsetOpValue(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getSORegOpValue - Return an encoded so_reg shifted register value.
+ unsigned getSORegOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getT2SORegOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned getRotImmOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ switch (MI.getOperand(Op).getImm()) {
+ default: assert (0 && "Not a valid rot_imm value!");
+ case 0: return 0;
+ case 8: return 1;
+ case 16: return 2;
+ case 24: return 3;
+ }
+ }
+
+ unsigned getImmMinusOneOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return MI.getOperand(Op).getImm() - 1;
+ }
+
+ unsigned getNEONVcvtImm32OpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return 64 - MI.getOperand(Op).getImm();
+ }
+
+ unsigned getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned getMsbOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned getRegisterListOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned NEONThumb2DataIPostEncoder(const MCInst &MI,
+ unsigned EncodedValue) const;
+ unsigned NEONThumb2LoadStorePostEncoder(const MCInst &MI,
+ unsigned EncodedValue) const;
+ unsigned NEONThumb2DupPostEncoder(const MCInst &MI,
+ unsigned EncodedValue) const;
+
+ unsigned VFPThumb2PostEncoder(const MCInst &MI,
+ unsigned EncodedValue) const;
+
+ void EmitByte(unsigned char C, raw_ostream &OS) const {
+ OS << (char)C;
+ }
+
+ void EmitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) const {
+ // Output the constant in little endian byte order.
+ for (unsigned i = 0; i != Size; ++i) {
+ EmitByte(Val & 255, OS);
+ Val >>= 8;
+ }
+ }
+
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+};
+
+} // end anonymous namespace
+
+MCCodeEmitter *llvm::createARMMCCodeEmitter(const Target &, TargetMachine &TM,
+ MCContext &Ctx) {
+ return new ARMMCCodeEmitter(TM, Ctx);
+}
+
+/// NEONThumb2DataIPostEncoder - Post-process encoded NEON data-processing
+/// instructions, and rewrite them to their Thumb2 form if we are currently in
+/// Thumb2 mode.
+unsigned ARMMCCodeEmitter::NEONThumb2DataIPostEncoder(const MCInst &MI,
+ unsigned EncodedValue) const {
+ if (Subtarget->isThumb2()) {
+ // NEON Thumb2 data-processsing encodings are very simple: bit 24 is moved
+ // to bit 12 of the high half-word (i.e. bit 28), and bits 27-24 are
+ // set to 1111.
+ unsigned Bit24 = EncodedValue & 0x01000000;
+ unsigned Bit28 = Bit24 << 4;
+ EncodedValue &= 0xEFFFFFFF;
+ EncodedValue |= Bit28;
+ EncodedValue |= 0x0F000000;
+ }
+
+ return EncodedValue;
+}
+
+/// NEONThumb2LoadStorePostEncoder - Post-process encoded NEON load/store
+/// instructions, and rewrite them to their Thumb2 form if we are currently in
+/// Thumb2 mode.
+unsigned ARMMCCodeEmitter::NEONThumb2LoadStorePostEncoder(const MCInst &MI,
+ unsigned EncodedValue) const {
+ if (Subtarget->isThumb2()) {
+ EncodedValue &= 0xF0FFFFFF;
+ EncodedValue |= 0x09000000;
+ }
+
+ return EncodedValue;
+}
+
+/// NEONThumb2DupPostEncoder - Post-process encoded NEON vdup
+/// instructions, and rewrite them to their Thumb2 form if we are currently in
+/// Thumb2 mode.
+unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI,
+ unsigned EncodedValue) const {
+ if (Subtarget->isThumb2()) {
+ EncodedValue &= 0x00FFFFFF;
+ EncodedValue |= 0xEE000000;
+ }
+
+ return EncodedValue;
+}
+
+/// VFPThumb2PostEncoder - Post-process encoded VFP instructions and rewrite
+/// them to their Thumb2 form if we are currently in Thumb2 mode.
+unsigned ARMMCCodeEmitter::
+VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue) const {
+ if (Subtarget->isThumb2()) {
+ EncodedValue &= 0x0FFFFFFF;
+ EncodedValue |= 0xE0000000;
+ }
+ return EncodedValue;
+}
+
+/// getMachineOpValue - Return binary encoding of operand. If the machine
+/// operand requires relocation, record the relocation and return zero.
+unsigned ARMMCCodeEmitter::
+getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (MO.isReg()) {
+ unsigned Reg = MO.getReg();
+ unsigned RegNo = getARMRegisterNumbering(Reg);
+
+ // Q registers are encoded as 2x their register number.
+ switch (Reg) {
+ default:
+ return RegNo;
+ case ARM::Q0: case ARM::Q1: case ARM::Q2: case ARM::Q3:
+ case ARM::Q4: case ARM::Q5: case ARM::Q6: case ARM::Q7:
+ case ARM::Q8: case ARM::Q9: case ARM::Q10: case ARM::Q11:
+ case ARM::Q12: case ARM::Q13: case ARM::Q14: case ARM::Q15:
+ return 2 * RegNo;
+ }
+ } else if (MO.isImm()) {
+ return static_cast<unsigned>(MO.getImm());
+ } else if (MO.isFPImm()) {
+ return static_cast<unsigned>(APFloat(MO.getFPImm())
+ .bitcastToAPInt().getHiBits(32).getLimitedValue());
+ }
+
+ llvm_unreachable("Unable to encode MCOperand!");
+ return 0;
+}
+
+/// getAddrModeImmOpValue - Return encoding info for 'reg +/- imm' operand.
+bool ARMMCCodeEmitter::
+EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg,
+ unsigned &Imm, SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
+
+ Reg = getARMRegisterNumbering(MO.getReg());
+
+ int32_t SImm = MO1.getImm();
+ bool isAdd = true;
+
+ // Special value for #-0
+ if (SImm == INT32_MIN)
+ SImm = 0;
+
+ // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
+ if (SImm < 0) {
+ SImm = -SImm;
+ isAdd = false;
+ }
+
+ Imm = SImm;
+ return isAdd;
+}
+
+/// getBranchTargetOpValue - Helper function to get the branch target operand,
+/// which is either an immediate or requires a fixup.
+static uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ unsigned FixupKind,
+ SmallVectorImpl<MCFixup> &Fixups) {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ // If the destination is an immediate, we have nothing to do.
+ if (MO.isImm()) return MO.getImm();
+ assert(MO.isExpr() && "Unexpected branch target type!");
+ const MCExpr *Expr = MO.getExpr();
+ MCFixupKind Kind = MCFixupKind(FixupKind);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+
+ // All of the information is in the fixup.
+ return 0;
+}
+
+/// getThumbBLTargetOpValue - Return encoding info for immediate branch target.
+uint32_t ARMMCCodeEmitter::
+getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bl, Fixups);
+}
+
+/// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate
+/// BLX branch target.
+uint32_t ARMMCCodeEmitter::
+getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_blx, Fixups);
+}
+
+/// getThumbBRTargetOpValue - Return encoding info for Thumb branch target.
+uint32_t ARMMCCodeEmitter::
+getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_br, Fixups);
+}
+
+/// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target.
+uint32_t ARMMCCodeEmitter::
+getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bcc, Fixups);
+}
+
+/// getThumbCBTargetOpValue - Return encoding info for Thumb branch target.
+uint32_t ARMMCCodeEmitter::
+getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cb, Fixups);
+}
+
+/// Return true if this branch has a non-always predication
+static bool HasConditionalBranch(const MCInst &MI) {
+ int NumOp = MI.getNumOperands();
+ if (NumOp >= 2) {
+ for (int i = 0; i < NumOp-1; ++i) {
+ const MCOperand &MCOp1 = MI.getOperand(i);
+ const MCOperand &MCOp2 = MI.getOperand(i + 1);
+ if (MCOp1.isImm() && MCOp2.isReg() &&
+ (MCOp2.getReg() == 0 || MCOp2.getReg() == ARM::CPSR)) {
+ if (ARMCC::CondCodes(MCOp1.getImm()) != ARMCC::AL)
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch
+/// target.
+uint32_t ARMMCCodeEmitter::
+getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // FIXME: This really, really shouldn't use TargetMachine. We don't want
+ // coupling between MC and TM anywhere we can help it.
+ if (Subtarget->isThumb2())
+ return
+ ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_condbranch, Fixups);
+ return getARMBranchTargetOpValue(MI, OpIdx, Fixups);
+}
+
+/// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch
+/// target.
+uint32_t ARMMCCodeEmitter::
+getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (HasConditionalBranch(MI))
+ return ::getBranchTargetOpValue(MI, OpIdx,
+ ARM::fixup_arm_condbranch, Fixups);
+ return ::getBranchTargetOpValue(MI, OpIdx,
+ ARM::fixup_arm_uncondbranch, Fixups);
+}
+
+
+
+
+/// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit
+/// immediate branch target.
+uint32_t ARMMCCodeEmitter::
+getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ unsigned Val =
+ ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_uncondbranch, Fixups);
+ bool I = (Val & 0x800000);
+ bool J1 = (Val & 0x400000);
+ bool J2 = (Val & 0x200000);
+ if (I ^ J1)
+ Val &= ~0x400000;
+ else
+ Val |= 0x400000;
+
+ if (I ^ J2)
+ Val &= ~0x200000;
+ else
+ Val |= 0x200000;
+
+ return Val;
+}
+
+/// getAdrLabelOpValue - Return encoding info for 12-bit immediate ADR label
+/// target.
+uint32_t ARMMCCodeEmitter::
+getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ assert(MI.getOperand(OpIdx).isExpr() && "Unexpected adr target type!");
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_adr_pcrel_12,
+ Fixups);
+}
+
+/// getAdrLabelOpValue - Return encoding info for 12-bit immediate ADR label
+/// target.
+uint32_t ARMMCCodeEmitter::
+getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ assert(MI.getOperand(OpIdx).isExpr() && "Unexpected adr target type!");
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_adr_pcrel_12,
+ Fixups);
+}
+
+/// getAdrLabelOpValue - Return encoding info for 8-bit immediate ADR label
+/// target.
+uint32_t ARMMCCodeEmitter::
+getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ assert(MI.getOperand(OpIdx).isExpr() && "Unexpected adr target type!");
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_thumb_adr_pcrel_10,
+ Fixups);
+}
+
+/// getThumbAddrModeRegRegOpValue - Return encoding info for 'reg + reg'
+/// operand.
+uint32_t ARMMCCodeEmitter::
+getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &) const {
+ // [Rn, Rm]
+ // {5-3} = Rm
+ // {2-0} = Rn
+ const MCOperand &MO1 = MI.getOperand(OpIdx);
+ const MCOperand &MO2 = MI.getOperand(OpIdx + 1);
+ unsigned Rn = getARMRegisterNumbering(MO1.getReg());
+ unsigned Rm = getARMRegisterNumbering(MO2.getReg());
+ return (Rm << 3) | Rn;
+}
+
+/// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' operand.
+uint32_t ARMMCCodeEmitter::
+getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // {17-13} = reg
+ // {12} = (U)nsigned (add == '1', sub == '0')
+ // {11-0} = imm12
+ unsigned Reg, Imm12;
+ bool isAdd = true;
+ // If The first operand isn't a register, we have a label reference.
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ if (!MO.isReg()) {
+ Reg = getARMRegisterNumbering(ARM::PC); // Rn is PC.
+ Imm12 = 0;
+ isAdd = false ; // 'U' bit is set as part of the fixup.
+
+ assert(MO.isExpr() && "Unexpected machine operand type!");
+ const MCExpr *Expr = MO.getExpr();
+
+ MCFixupKind Kind;
+ if (Subtarget->isThumb2())
+ Kind = MCFixupKind(ARM::fixup_t2_ldst_pcrel_12);
+ else
+ Kind = MCFixupKind(ARM::fixup_arm_ldst_pcrel_12);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+
+ ++MCNumCPRelocations;
+ } else
+ isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm12, Fixups);
+
+ uint32_t Binary = Imm12 & 0xfff;
+ // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
+ if (isAdd)
+ Binary |= (1 << 12);
+ Binary |= (Reg << 13);
+ return Binary;
+}
+
+/// getT2AddrModeImm8s4OpValue - Return encoding info for
+/// 'reg +/- imm8<<2' operand.
+uint32_t ARMMCCodeEmitter::
+getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // {12-9} = reg
+ // {8} = (U)nsigned (add == '1', sub == '0')
+ // {7-0} = imm8
+ unsigned Reg, Imm8;
+ bool isAdd = true;
+ // If The first operand isn't a register, we have a label reference.
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ if (!MO.isReg()) {
+ Reg = getARMRegisterNumbering(ARM::PC); // Rn is PC.
+ Imm8 = 0;
+ isAdd = false ; // 'U' bit is set as part of the fixup.
+
+ assert(MO.isExpr() && "Unexpected machine operand type!");
+ const MCExpr *Expr = MO.getExpr();
+ MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_pcrel_10);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+
+ ++MCNumCPRelocations;
+ } else
+ isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups);
+
+ uint32_t Binary = (Imm8 >> 2) & 0xff;
+ // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
+ if (isAdd)
+ Binary |= (1 << 8);
+ Binary |= (Reg << 9);
+ return Binary;
+}
+
+// FIXME: This routine assumes that a binary
+// expression will always result in a PCRel expression
+// In reality, its only true if one or more subexpressions
+// is itself a PCRel (i.e. "." in asm or some other pcrel construct)
+// but this is good enough for now.
+static bool EvaluateAsPCRel(const MCExpr *Expr) {
+ switch (Expr->getKind()) {
+ default: assert(0 && "Unexpected expression type");
+ case MCExpr::SymbolRef: return false;
+ case MCExpr::Binary: return true;
+ }
+}
+
+uint32_t
+ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // {20-16} = imm{15-12}
+ // {11-0} = imm{11-0}
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ if (MO.isImm())
+ // Hi / lo 16 bits already extracted during earlier passes.
+ return static_cast<unsigned>(MO.getImm());
+
+ // Handle :upper16: and :lower16: assembly prefixes.
+ const MCExpr *E = MO.getExpr();
+ if (E->getKind() == MCExpr::Target) {
+ const ARMMCExpr *ARM16Expr = cast<ARMMCExpr>(E);
+ E = ARM16Expr->getSubExpr();
+
+ MCFixupKind Kind;
+ switch (ARM16Expr->getKind()) {
+ default: assert(0 && "Unsupported ARMFixup");
+ case ARMMCExpr::VK_ARM_HI16:
+ if (!Subtarget->isTargetDarwin() && EvaluateAsPCRel(E))
+ Kind = MCFixupKind(Subtarget->isThumb2()
+ ? ARM::fixup_t2_movt_hi16_pcrel
+ : ARM::fixup_arm_movt_hi16_pcrel);
+ else
+ Kind = MCFixupKind(Subtarget->isThumb2()
+ ? ARM::fixup_t2_movt_hi16
+ : ARM::fixup_arm_movt_hi16);
+ break;
+ case ARMMCExpr::VK_ARM_LO16:
+ if (!Subtarget->isTargetDarwin() && EvaluateAsPCRel(E))
+ Kind = MCFixupKind(Subtarget->isThumb2()
+ ? ARM::fixup_t2_movw_lo16_pcrel
+ : ARM::fixup_arm_movw_lo16_pcrel);
+ else
+ Kind = MCFixupKind(Subtarget->isThumb2()
+ ? ARM::fixup_t2_movw_lo16
+ : ARM::fixup_arm_movw_lo16);
+ break;
+ }
+ Fixups.push_back(MCFixup::Create(0, E, Kind));
+ return 0;
+ };
+
+ llvm_unreachable("Unsupported MCExpr type in MCOperand!");
+ return 0;
+}
+
+uint32_t ARMMCCodeEmitter::
+getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx+1);
+ const MCOperand &MO2 = MI.getOperand(OpIdx+2);
+ unsigned Rn = getARMRegisterNumbering(MO.getReg());
+ unsigned Rm = getARMRegisterNumbering(MO1.getReg());
+ unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm());
+ bool isAdd = ARM_AM::getAM2Op(MO2.getImm()) == ARM_AM::add;
+ ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(MO2.getImm());
+ unsigned SBits = getShiftOp(ShOp);
+
+ // {16-13} = Rn
+ // {12} = isAdd
+ // {11-0} = shifter
+ // {3-0} = Rm
+ // {4} = 0
+ // {6-5} = type
+ // {11-7} = imm
+ uint32_t Binary = Rm;
+ Binary |= Rn << 13;
+ Binary |= SBits << 5;
+ Binary |= ShImm << 7;
+ if (isAdd)
+ Binary |= 1 << 12;
+ return Binary;
+}
+
+uint32_t ARMMCCodeEmitter::
+getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // {17-14} Rn
+ // {13} 1 == imm12, 0 == Rm
+ // {12} isAdd
+ // {11-0} imm12/Rm
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ unsigned Rn = getARMRegisterNumbering(MO.getReg());
+ uint32_t Binary = getAddrMode2OffsetOpValue(MI, OpIdx + 1, Fixups);
+ Binary |= Rn << 14;
+ return Binary;
+}
+
+uint32_t ARMMCCodeEmitter::
+getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // {13} 1 == imm12, 0 == Rm
+ // {12} isAdd
+ // {11-0} imm12/Rm
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx+1);
+ unsigned Imm = MO1.getImm();
+ bool isAdd = ARM_AM::getAM2Op(Imm) == ARM_AM::add;
+ bool isReg = MO.getReg() != 0;
+ uint32_t Binary = ARM_AM::getAM2Offset(Imm);
+ // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm12
+ if (isReg) {
+ ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(Imm);
+ Binary <<= 7; // Shift amount is bits [11:7]
+ Binary |= getShiftOp(ShOp) << 5; // Shift type is bits [6:5]
+ Binary |= getARMRegisterNumbering(MO.getReg()); // Rm is bits [3:0]
+ }
+ return Binary | (isAdd << 12) | (isReg << 13);
+}
+
+uint32_t ARMMCCodeEmitter::
+getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // {9} 1 == imm8, 0 == Rm
+ // {8} isAdd
+ // {7-4} imm7_4/zero
+ // {3-0} imm3_0/Rm
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx+1);
+ unsigned Imm = MO1.getImm();
+ bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add;
+ bool isImm = MO.getReg() == 0;
+ uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
+ // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
+ if (!isImm)
+ Imm8 = getARMRegisterNumbering(MO.getReg());
+ return Imm8 | (isAdd << 8) | (isImm << 9);
+}
+
+uint32_t ARMMCCodeEmitter::
+getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // {13} 1 == imm8, 0 == Rm
+ // {12-9} Rn
+ // {8} isAdd
+ // {7-4} imm7_4/zero
+ // {3-0} imm3_0/Rm
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx+1);
+ const MCOperand &MO2 = MI.getOperand(OpIdx+2);
+ unsigned Rn = getARMRegisterNumbering(MO.getReg());
+ unsigned Imm = MO2.getImm();
+ bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add;
+ bool isImm = MO1.getReg() == 0;
+ uint32_t Imm8 = ARM_AM::getAM3Offset(Imm);
+ // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8
+ if (!isImm)
+ Imm8 = getARMRegisterNumbering(MO1.getReg());
+ return (Rn << 9) | Imm8 | (isAdd << 8) | (isImm << 13);
+}
+
+/// getAddrModeThumbSPOpValue - Encode the t_addrmode_sp operands.
+uint32_t ARMMCCodeEmitter::
+getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // [SP, #imm]
+ // {7-0} = imm8
+ const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
+ assert(MI.getOperand(OpIdx).getReg() == ARM::SP &&
+ "Unexpected base register!");
+
+ // The immediate is already shifted for the implicit zeroes, so no change
+ // here.
+ return MO1.getImm() & 0xff;
+}
+
+/// getAddrModeISOpValue - Encode the t_addrmode_is# operands.
+uint32_t ARMMCCodeEmitter::
+getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // [Rn, #imm]
+ // {7-3} = imm5
+ // {2-0} = Rn
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
+ unsigned Rn = getARMRegisterNumbering(MO.getReg());
+ unsigned Imm5 = MO1.getImm();
+ return ((Imm5 & 0x1f) << 3) | Rn;
+}
+
+/// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands.
+uint32_t ARMMCCodeEmitter::
+getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cp, Fixups);
+}
+
+/// getAddrMode5OpValue - Return encoding info for 'reg +/- imm10' operand.
+uint32_t ARMMCCodeEmitter::
+getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // {12-9} = reg
+ // {8} = (U)nsigned (add == '1', sub == '0')
+ // {7-0} = imm8
+ unsigned Reg, Imm8;
+ bool isAdd;
+ // If The first operand isn't a register, we have a label reference.
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ if (!MO.isReg()) {
+ Reg = getARMRegisterNumbering(ARM::PC); // Rn is PC.
+ Imm8 = 0;
+ isAdd = false; // 'U' bit is handled as part of the fixup.
+
+ assert(MO.isExpr() && "Unexpected machine operand type!");
+ const MCExpr *Expr = MO.getExpr();
+ MCFixupKind Kind;
+ if (Subtarget->isThumb2())
+ Kind = MCFixupKind(ARM::fixup_t2_pcrel_10);
+ else
+ Kind = MCFixupKind(ARM::fixup_arm_pcrel_10);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+
+ ++MCNumCPRelocations;
+ } else {
+ EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups);
+ isAdd = ARM_AM::getAM5Op(Imm8) == ARM_AM::add;
+ }
+
+ uint32_t Binary = ARM_AM::getAM5Offset(Imm8);
+ // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
+ if (isAdd)
+ Binary |= (1 << 8);
+ Binary |= (Reg << 9);
+ return Binary;
+}
+
+unsigned ARMMCCodeEmitter::
+getSORegOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Sub-operands are [reg, reg, imm]. The first register is Rm, the reg to be
+ // shifted. The second is either Rs, the amount to shift by, or reg0 in which
+ // case the imm contains the amount to shift by.
+ //
+ // {3-0} = Rm.
+ // {4} = 1 if reg shift, 0 if imm shift
+ // {6-5} = type
+ // If reg shift:
+ // {11-8} = Rs
+ // {7} = 0
+ // else (imm shift)
+ // {11-7} = imm
+
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
+ const MCOperand &MO2 = MI.getOperand(OpIdx + 2);
+ ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO2.getImm());
+
+ // Encode Rm.
+ unsigned Binary = getARMRegisterNumbering(MO.getReg());
+
+ // Encode the shift opcode.
+ unsigned SBits = 0;
+ unsigned Rs = MO1.getReg();
+ if (Rs) {
+ // Set shift operand (bit[7:4]).
+ // LSL - 0001
+ // LSR - 0011
+ // ASR - 0101
+ // ROR - 0111
+ // RRX - 0110 and bit[11:8] clear.
+ switch (SOpc) {
+ default: llvm_unreachable("Unknown shift opc!");
+ case ARM_AM::lsl: SBits = 0x1; break;
+ case ARM_AM::lsr: SBits = 0x3; break;
+ case ARM_AM::asr: SBits = 0x5; break;
+ case ARM_AM::ror: SBits = 0x7; break;
+ case ARM_AM::rrx: SBits = 0x6; break;
+ }
+ } else {
+ // Set shift operand (bit[6:4]).
+ // LSL - 000
+ // LSR - 010
+ // ASR - 100
+ // ROR - 110
+ switch (SOpc) {
+ default: llvm_unreachable("Unknown shift opc!");
+ case ARM_AM::lsl: SBits = 0x0; break;
+ case ARM_AM::lsr: SBits = 0x2; break;
+ case ARM_AM::asr: SBits = 0x4; break;
+ case ARM_AM::ror: SBits = 0x6; break;
+ }
+ }
+
+ Binary |= SBits << 4;
+ if (SOpc == ARM_AM::rrx)
+ return Binary;
+
+ // Encode the shift operation Rs or shift_imm (except rrx).
+ if (Rs) {
+ // Encode Rs bit[11:8].
+ assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0);
+ return Binary | (getARMRegisterNumbering(Rs) << ARMII::RegRsShift);
+ }
+
+ // Encode shift_imm bit[11:7].
+ return Binary | ARM_AM::getSORegOffset(MO2.getImm()) << 7;
+}
+
+unsigned ARMMCCodeEmitter::
+getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO1 = MI.getOperand(OpNum);
+ const MCOperand &MO2 = MI.getOperand(OpNum+1);
+ const MCOperand &MO3 = MI.getOperand(OpNum+2);
+
+ // Encoded as [Rn, Rm, imm].
+ // FIXME: Needs fixup support.
+ unsigned Value = getARMRegisterNumbering(MO1.getReg());
+ Value <<= 4;
+ Value |= getARMRegisterNumbering(MO2.getReg());
+ Value <<= 2;
+ Value |= MO3.getImm();
+
+ return Value;
+}
+
+unsigned ARMMCCodeEmitter::
+getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO1 = MI.getOperand(OpNum);
+ const MCOperand &MO2 = MI.getOperand(OpNum+1);
+
+ // FIXME: Needs fixup support.
+ unsigned Value = getARMRegisterNumbering(MO1.getReg());
+
+ // Even though the immediate is 8 bits long, we need 9 bits in order
+ // to represent the (inverse of the) sign bit.
+ Value <<= 9;
+ int32_t tmp = (int32_t)MO2.getImm();
+ if (tmp < 0)
+ tmp = abs(tmp);
+ else
+ Value |= 256; // Set the ADD bit
+ Value |= tmp & 255;
+ return Value;
+}
+
+unsigned ARMMCCodeEmitter::
+getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO1 = MI.getOperand(OpNum);
+
+ // FIXME: Needs fixup support.
+ unsigned Value = 0;
+ int32_t tmp = (int32_t)MO1.getImm();
+ if (tmp < 0)
+ tmp = abs(tmp);
+ else
+ Value |= 256; // Set the ADD bit
+ Value |= tmp & 255;
+ return Value;
+}
+
+unsigned ARMMCCodeEmitter::
+getT2AddrModeImm12OffsetOpValue(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO1 = MI.getOperand(OpNum);
+
+ // FIXME: Needs fixup support.
+ unsigned Value = 0;
+ int32_t tmp = (int32_t)MO1.getImm();
+ if (tmp < 0)
+ tmp = abs(tmp);
+ else
+ Value |= 4096; // Set the ADD bit
+ Value |= tmp & 4095;
+ return Value;
+}
+
+unsigned ARMMCCodeEmitter::
+getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Sub-operands are [reg, imm]. The first register is Rm, the reg to be
+ // shifted. The second is the amount to shift by.
+ //
+ // {3-0} = Rm.
+ // {4} = 0
+ // {6-5} = type
+ // {11-7} = imm
+
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
+ ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm());
+
+ // Encode Rm.
+ unsigned Binary = getARMRegisterNumbering(MO.getReg());
+
+ // Encode the shift opcode.
+ unsigned SBits = 0;
+ // Set shift operand (bit[6:4]).
+ // LSL - 000
+ // LSR - 010
+ // ASR - 100
+ // ROR - 110
+ switch (SOpc) {
+ default: llvm_unreachable("Unknown shift opc!");
+ case ARM_AM::lsl: SBits = 0x0; break;
+ case ARM_AM::lsr: SBits = 0x2; break;
+ case ARM_AM::asr: SBits = 0x4; break;
+ case ARM_AM::ror: SBits = 0x6; break;
+ }
+
+ Binary |= SBits << 4;
+ if (SOpc == ARM_AM::rrx)
+ return Binary;
+
+ // Encode shift_imm bit[11:7].
+ return Binary | ARM_AM::getSORegOffset(MO1.getImm()) << 7;
+}
+
+unsigned ARMMCCodeEmitter::
+getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // 10 bits. lower 5 bits are are the lsb of the mask, high five bits are the
+ // msb of the mask.
+ const MCOperand &MO = MI.getOperand(Op);
+ uint32_t v = ~MO.getImm();
+ uint32_t lsb = CountTrailingZeros_32(v);
+ uint32_t msb = (32 - CountLeadingZeros_32 (v)) - 1;
+ assert (v != 0 && lsb < 32 && msb < 32 && "Illegal bitfield mask!");
+ return lsb | (msb << 5);
+}
+
+unsigned ARMMCCodeEmitter::
+getMsbOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // MSB - 5 bits.
+ uint32_t lsb = MI.getOperand(Op-1).getImm();
+ uint32_t width = MI.getOperand(Op).getImm();
+ uint32_t msb = lsb+width-1;
+ assert (width != 0 && msb < 32 && "Illegal bit width!");
+ return msb;
+}
+
+unsigned ARMMCCodeEmitter::
+getRegisterListOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // VLDM/VSTM:
+ // {12-8} = Vd
+ // {7-0} = Number of registers
+ //
+ // LDM/STM:
+ // {15-0} = Bitfield of GPRs.
+ unsigned Reg = MI.getOperand(Op).getReg();
+ bool SPRRegs = ARM::SPRRegClass.contains(Reg);
+ bool DPRRegs = ARM::DPRRegClass.contains(Reg);
+
+ unsigned Binary = 0;
+
+ if (SPRRegs || DPRRegs) {
+ // VLDM/VSTM
+ unsigned RegNo = getARMRegisterNumbering(Reg);
+ unsigned NumRegs = (MI.getNumOperands() - Op) & 0xff;
+ Binary |= (RegNo & 0x1f) << 8;
+ if (SPRRegs)
+ Binary |= NumRegs;
+ else
+ Binary |= NumRegs * 2;
+ } else {
+ for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) {
+ unsigned RegNo = getARMRegisterNumbering(MI.getOperand(I).getReg());
+ Binary |= 1 << RegNo;
+ }
+ }
+
+ return Binary;
+}
+
+/// getAddrMode6AddressOpValue - Encode an addrmode6 register number along
+/// with the alignment operand.
+unsigned ARMMCCodeEmitter::
+getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &Reg = MI.getOperand(Op);
+ const MCOperand &Imm = MI.getOperand(Op + 1);
+
+ unsigned RegNo = getARMRegisterNumbering(Reg.getReg());
+ unsigned Align = 0;
+
+ switch (Imm.getImm()) {
+ default: break;
+ case 2:
+ case 4:
+ case 8: Align = 0x01; break;
+ case 16: Align = 0x02; break;
+ case 32: Align = 0x03; break;
+ }
+
+ return RegNo | (Align << 4);
+}
+
+/// getAddrMode6DupAddressOpValue - Encode an addrmode6 register number and
+/// alignment operand for use in VLD-dup instructions. This is the same as
+/// getAddrMode6AddressOpValue except for the alignment encoding, which is
+/// different for VLD4-dup.
+unsigned ARMMCCodeEmitter::
+getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &Reg = MI.getOperand(Op);
+ const MCOperand &Imm = MI.getOperand(Op + 1);
+
+ unsigned RegNo = getARMRegisterNumbering(Reg.getReg());
+ unsigned Align = 0;
+
+ switch (Imm.getImm()) {
+ default: break;
+ case 2:
+ case 4:
+ case 8: Align = 0x01; break;
+ case 16: Align = 0x03; break;
+ }
+
+ return RegNo | (Align << 4);
+}
+
+unsigned ARMMCCodeEmitter::
+getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(Op);
+ if (MO.getReg() == 0) return 0x0D;
+ return MO.getReg();
+}
+
+void ARMMCCodeEmitter::
+EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Pseudo instructions don't get encoded.
+ const TargetInstrDesc &Desc = TII.get(MI.getOpcode());
+ uint64_t TSFlags = Desc.TSFlags;
+ if ((TSFlags & ARMII::FormMask) == ARMII::Pseudo)
+ return;
+ int Size;
+ // Basic size info comes from the TSFlags field.
+ switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
+ default: llvm_unreachable("Unexpected instruction size!");
+ case ARMII::Size2Bytes: Size = 2; break;
+ case ARMII::Size4Bytes: Size = 4; break;
+ }
+ uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
+ // Thumb 32-bit wide instructions need to emit the high order halfword
+ // first.
+ if (Subtarget->isThumb() && Size == 4) {
+ EmitConstant(Binary >> 16, 2, OS);
+ EmitConstant(Binary & 0xffff, 2, OS);
+ } else
+ EmitConstant(Binary, Size, OS);
+ ++MCNumEmitted; // Keep track of the # of mi's emitted.
+}
+
+#include "ARMGenMCCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/ARM/ARMMCExpr.cpp b/contrib/llvm/lib/Target/ARM/ARMMCExpr.cpp
new file mode 100644
index 0000000..2727ba8
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMMCExpr.cpp
@@ -0,0 +1,73 @@
+//===-- ARMMCExpr.cpp - ARM specific MC expression classes ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "armmcexpr"
+#include "ARMMCExpr.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCAssembler.h"
+using namespace llvm;
+
+const ARMMCExpr*
+ARMMCExpr::Create(VariantKind Kind, const MCExpr *Expr,
+ MCContext &Ctx) {
+ return new (Ctx) ARMMCExpr(Kind, Expr);
+}
+
+void ARMMCExpr::PrintImpl(raw_ostream &OS) const {
+ switch (Kind) {
+ default: assert(0 && "Invalid kind!");
+ case VK_ARM_HI16: OS << ":upper16:"; break;
+ case VK_ARM_LO16: OS << ":lower16:"; break;
+ }
+
+ const MCExpr *Expr = getSubExpr();
+ if (Expr->getKind() != MCExpr::SymbolRef)
+ OS << '(';
+ Expr->print(OS);
+ if (Expr->getKind() != MCExpr::SymbolRef)
+ OS << ')';
+}
+
+bool
+ARMMCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const {
+ return false;
+}
+
+// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps
+// that method should be made public?
+static void AddValueSymbols_(const MCExpr *Value, MCAssembler *Asm) {
+ switch (Value->getKind()) {
+ case MCExpr::Target:
+ assert(0 && "Can't handle nested target expr!");
+ break;
+
+ case MCExpr::Constant:
+ break;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
+ AddValueSymbols_(BE->getLHS(), Asm);
+ AddValueSymbols_(BE->getRHS(), Asm);
+ break;
+ }
+
+ case MCExpr::SymbolRef:
+ Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
+ break;
+
+ case MCExpr::Unary:
+ AddValueSymbols_(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm);
+ break;
+ }
+}
+
+void ARMMCExpr::AddValueSymbols(MCAssembler *Asm) const {
+ AddValueSymbols_(getSubExpr(), Asm);
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMMCExpr.h b/contrib/llvm/lib/Target/ARM/ARMMCExpr.h
new file mode 100644
index 0000000..d42f766
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMMCExpr.h
@@ -0,0 +1,73 @@
+//===-- ARMMCExpr.h - ARM specific MC expression classes ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARMMCEXPR_H
+#define ARMMCEXPR_H
+
+#include "llvm/MC/MCExpr.h"
+
+namespace llvm {
+
+class ARMMCExpr : public MCTargetExpr {
+public:
+ enum VariantKind {
+ VK_ARM_None,
+ VK_ARM_HI16, // The R_ARM_MOVT_ABS relocation (:upper16: in the .s file)
+ VK_ARM_LO16 // The R_ARM_MOVW_ABS_NC relocation (:lower16: in the .s file)
+ };
+
+private:
+ const VariantKind Kind;
+ const MCExpr *Expr;
+
+ explicit ARMMCExpr(VariantKind _Kind, const MCExpr *_Expr)
+ : Kind(_Kind), Expr(_Expr) {}
+
+public:
+ /// @name Construction
+ /// @{
+
+ static const ARMMCExpr *Create(VariantKind Kind, const MCExpr *Expr,
+ MCContext &Ctx);
+
+ static const ARMMCExpr *CreateUpper16(const MCExpr *Expr, MCContext &Ctx) {
+ return Create(VK_ARM_HI16, Expr, Ctx);
+ }
+
+ static const ARMMCExpr *CreateLower16(const MCExpr *Expr, MCContext &Ctx) {
+ return Create(VK_ARM_LO16, Expr, Ctx);
+ }
+
+ /// @}
+ /// @name Accessors
+ /// @{
+
+ /// getOpcode - Get the kind of this expression.
+ VariantKind getKind() const { return Kind; }
+
+ /// getSubExpr - Get the child of this expression.
+ const MCExpr *getSubExpr() const { return Expr; }
+
+ /// @}
+
+ void PrintImpl(raw_ostream &OS) const;
+ bool EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const;
+ void AddValueSymbols(MCAssembler *) const;
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Target;
+ }
+
+ static bool classof(const ARMMCExpr *) { return true; }
+
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp b/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp
index ab2b06b..59d6050 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp
@@ -12,122 +12,69 @@
//
//===----------------------------------------------------------------------===//
-#include "ARMMCInstLower.h"
-//#include "llvm/CodeGen/MachineModuleInfoImpls.h"
-#include "llvm/CodeGen/AsmPrinter.h"
+#include "ARM.h"
+#include "ARMAsmPrinter.h"
+#include "ARMMCExpr.h"
+#include "llvm/Constants.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-//#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/SmallString.h"
using namespace llvm;
-#if 0
-const ARMSubtarget &ARMMCInstLower::getSubtarget() const {
- return AsmPrinter.getSubtarget();
-}
-
-MachineModuleInfoMachO &ARMMCInstLower::getMachOMMI() const {
- assert(getSubtarget().isTargetDarwin() &&"Can only get MachO info on darwin");
- return AsmPrinter.MMI->getObjFileInfo<MachineModuleInfoMachO>();
-}
-#endif
-
-MCSymbol *ARMMCInstLower::
-GetGlobalAddressSymbol(const MachineOperand &MO) const {
- // FIXME: HANDLE PLT references how??
- switch (MO.getTargetFlags()) {
- default: assert(0 && "Unknown target flag on GV operand");
- case 0: break;
- }
-
- return Printer.Mang->getSymbol(MO.getGlobal());
-}
-
-MCSymbol *ARMMCInstLower::
-GetExternalSymbolSymbol(const MachineOperand &MO) const {
- // FIXME: HANDLE PLT references how??
+static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
+ ARMAsmPrinter &Printer) {
+ MCContext &Ctx = Printer.OutContext;
+ const MCExpr *Expr;
switch (MO.getTargetFlags()) {
- default: assert(0 && "Unknown target flag on GV operand");
- case 0: break;
+ default: {
+ Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None, Ctx);
+ switch (MO.getTargetFlags()) {
+ default:
+ assert(0 && "Unknown target flag on symbol operand");
+ case 0:
+ break;
+ case ARMII::MO_LO16:
+ Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None, Ctx);
+ Expr = ARMMCExpr::CreateLower16(Expr, Ctx);
+ break;
+ case ARMII::MO_HI16:
+ Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None, Ctx);
+ Expr = ARMMCExpr::CreateUpper16(Expr, Ctx);
+ break;
+ }
+ break;
}
-
- return Printer.GetExternalSymbolSymbol(MO.getSymbolName());
-}
-
-
-MCSymbol *ARMMCInstLower::
-GetJumpTableSymbol(const MachineOperand &MO) const {
- SmallString<256> Name;
- raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "JTI"
- << Printer.getFunctionNumber() << '_' << MO.getIndex();
-
-#if 0
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
+ case ARMII::MO_PLT:
+ Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_ARM_PLT, Ctx);
+ break;
}
-#endif
-
- // Create a symbol for the name.
- return Ctx.GetOrCreateSymbol(Name.str());
-}
-MCSymbol *ARMMCInstLower::
-GetConstantPoolIndexSymbol(const MachineOperand &MO) const {
- SmallString<256> Name;
- raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "CPI"
- << Printer.getFunctionNumber() << '_' << MO.getIndex();
-
-#if 0
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- }
-#endif
-
- // Create a symbol for the name.
- return Ctx.GetOrCreateSymbol(Name.str());
-}
-
-MCOperand ARMMCInstLower::
-LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const {
- // FIXME: We would like an efficient form for this, so we don't have to do a
- // lot of extra uniquing.
- const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, Ctx);
-
-#if 0
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- }
-#endif
-
if (!MO.isJTI() && MO.getOffset())
Expr = MCBinaryExpr::CreateAdd(Expr,
MCConstantExpr::Create(MO.getOffset(), Ctx),
Ctx);
return MCOperand::CreateExpr(Expr);
-}
+}
-void ARMMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
+void llvm::LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
+ ARMAsmPrinter &AP) {
OutMI.setOpcode(MI->getOpcode());
-
+
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
-
+
MCOperand MCOp;
switch (MO.getType()) {
default:
MI->dump();
assert(0 && "unknown operand type");
case MachineOperand::MO_Register:
- // Ignore all implicit register operands.
- if (MO.isImplicit()) continue;
+ // Ignore all non-CPSR implicit register operands.
+ if (MO.isImplicit() && MO.getReg() != ARM::CPSR) continue;
assert(!MO.getSubReg() && "Subregs should be eliminated!");
MCOp = MCOperand::CreateReg(MO.getReg());
break;
@@ -136,27 +83,33 @@ void ARMMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
break;
case MachineOperand::MO_MachineBasicBlock:
MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
- MO.getMBB()->getSymbol(), Ctx));
+ MO.getMBB()->getSymbol(), AP.OutContext));
break;
case MachineOperand::MO_GlobalAddress:
- MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
+ MCOp = GetSymbolRef(MO, AP.Mang->getSymbol(MO.getGlobal()), AP);
break;
case MachineOperand::MO_ExternalSymbol:
- MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
+ MCOp = GetSymbolRef(MO,
+ AP.GetExternalSymbolSymbol(MO.getSymbolName()), AP);
break;
case MachineOperand::MO_JumpTableIndex:
- MCOp = LowerSymbolOperand(MO, GetJumpTableSymbol(MO));
+ MCOp = GetSymbolRef(MO, AP.GetJTISymbol(MO.getIndex()), AP);
break;
case MachineOperand::MO_ConstantPoolIndex:
- MCOp = LowerSymbolOperand(MO, GetConstantPoolIndexSymbol(MO));
+ MCOp = GetSymbolRef(MO, AP.GetCPISymbol(MO.getIndex()), AP);
break;
case MachineOperand::MO_BlockAddress:
- MCOp = LowerSymbolOperand(MO, Printer.GetBlockAddressSymbol(
- MO.getBlockAddress()));
+ MCOp = GetSymbolRef(MO,AP.GetBlockAddressSymbol(MO.getBlockAddress()),AP);
break;
+ case MachineOperand::MO_FPImmediate: {
+ APFloat Val = MO.getFPImm()->getValueAPF();
+ bool ignored;
+ Val.convert(APFloat::IEEEdouble, APFloat::rmTowardZero, &ignored);
+ MCOp = MCOperand::CreateFPImm(Val.convertToDouble());
+ break;
+ }
}
-
+
OutMI.addOperand(MCOp);
}
-
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
index 514c26b..138f0c2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -22,8 +22,8 @@
namespace llvm {
-/// ARMFunctionInfo - This class is derived from MachineFunction private
-/// ARM target-specific information for each MachineFunction.
+/// ARMFunctionInfo - This class is derived from MachineFunctionInfo and
+/// contains private ARM-specific information for each MachineFunction.
class ARMFunctionInfo : public MachineFunctionInfo {
/// isThumb - True if this function is compiled under Thumb mode.
@@ -79,15 +79,11 @@ class ARMFunctionInfo : public MachineFunctionInfo {
BitVector GPRCS2Frames;
BitVector DPRCSFrames;
- /// SpilledCSRegs - A BitVector mask of all spilled callee-saved registers.
- ///
- BitVector SpilledCSRegs;
-
/// JumpTableUId - Unique id for jumptables.
///
unsigned JumpTableUId;
- unsigned ConstPoolEntryUId;
+ unsigned PICLabelUId;
/// VarArgsFrameIndex - FrameIndex for start of varargs area.
int VarArgsFrameIndex;
@@ -95,6 +91,10 @@ class ARMFunctionInfo : public MachineFunctionInfo {
/// HasITBlocks - True if IT blocks have been inserted.
bool HasITBlocks;
+ /// CPEClones - Track constant pool entries clones created by Constant Island
+ /// pass.
+ DenseMap<unsigned, unsigned> CPEClones;
+
public:
ARMFunctionInfo() :
isThumb(false),
@@ -104,8 +104,8 @@ public:
FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
GPRCS1Frames(0), GPRCS2Frames(0), DPRCSFrames(0),
- JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0),
- HasITBlocks(false) {}
+ JumpTableUId(0), PICLabelUId(0),
+ VarArgsFrameIndex(0), HasITBlocks(false) {}
explicit ARMFunctionInfo(MachineFunction &MF) :
isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()),
@@ -115,9 +115,8 @@ public:
FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
GPRCS1Frames(32), GPRCS2Frames(32), DPRCSFrames(32),
- SpilledCSRegs(MF.getTarget().getRegisterInfo()->getNumRegs()),
- JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0),
- HasITBlocks(false) {}
+ JumpTableUId(0), PICLabelUId(0),
+ VarArgsFrameIndex(0), HasITBlocks(false) {}
bool isThumbFunction() const { return isThumb; }
bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; }
@@ -207,18 +206,6 @@ public:
}
}
- void setCSRegisterIsSpilled(unsigned Reg) {
- SpilledCSRegs.set(Reg);
- }
-
- bool isCSRegisterSpilled(unsigned Reg) const {
- return SpilledCSRegs[Reg];
- }
-
- const BitVector &getSpilledCSRegisters() const {
- return SpilledCSRegs;
- }
-
unsigned createJumpTableUId() {
return JumpTableUId++;
}
@@ -227,16 +214,16 @@ public:
return JumpTableUId;
}
- void initConstPoolEntryUId(unsigned UId) {
- ConstPoolEntryUId = UId;
+ void initPICLabelUId(unsigned UId) {
+ PICLabelUId = UId;
}
- unsigned getNumConstPoolEntries() const {
- return ConstPoolEntryUId;
+ unsigned getNumPICLabels() const {
+ return PICLabelUId;
}
- unsigned createConstPoolEntryUId() {
- return ConstPoolEntryUId++;
+ unsigned createPICLabelUId() {
+ return PICLabelUId++;
}
int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
@@ -244,6 +231,19 @@ public:
bool hasITBlocks() const { return HasITBlocks; }
void setHasITBlocks(bool h) { HasITBlocks = h; }
+
+ void recordCPEClone(unsigned CPIdx, unsigned CPCloneIdx) {
+ if (!CPEClones.insert(std::make_pair(CPCloneIdx, CPIdx)).second)
+ assert(0 && "Duplicate entries!");
+ }
+
+ unsigned getOriginalCPIdx(unsigned CloneIdx) const {
+ DenseMap<unsigned, unsigned>::const_iterator I = CPEClones.find(CloneIdx);
+ if (I != CPEClones.end())
+ return I->second;
+ else
+ return -1U;
+ }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h b/contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h
index 5ff7c38..edecc4b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h
+++ b/contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h
@@ -21,6566 +21,6566 @@
// This table is 6561*4 = 26244 bytes in size.
static const unsigned PerfectShuffleTable[6561+1] = {
- 135053414U, // <0,0,0,0>: Cost 1 vdup0 LHS
- 1543503974U, // <0,0,0,1>: Cost 2 vext2 <0,0,0,0>, LHS
- 2618572962U, // <0,0,0,2>: Cost 3 vext2 <0,2,0,0>, <0,2,0,0>
- 2568054923U, // <0,0,0,3>: Cost 3 vext1 <3,0,0,0>, <3,0,0,0>
- 1476398390U, // <0,0,0,4>: Cost 2 vext1 <0,0,0,0>, RHS
- 2550140624U, // <0,0,0,5>: Cost 3 vext1 <0,0,0,0>, <5,1,7,3>
- 2550141434U, // <0,0,0,6>: Cost 3 vext1 <0,0,0,0>, <6,2,7,3>
- 2591945711U, // <0,0,0,7>: Cost 3 vext1 <7,0,0,0>, <7,0,0,0>
- 135053414U, // <0,0,0,u>: Cost 1 vdup0 LHS
- 2886516736U, // <0,0,1,0>: Cost 3 vzipl LHS, <0,0,0,0>
- 1812775014U, // <0,0,1,1>: Cost 2 vzipl LHS, LHS
- 1618133094U, // <0,0,1,2>: Cost 2 vext3 <1,2,3,0>, LHS
- 2625209292U, // <0,0,1,3>: Cost 3 vext2 <1,3,0,0>, <1,3,0,0>
- 2886558034U, // <0,0,1,4>: Cost 3 vzipl LHS, <0,4,1,5>
- 2617246864U, // <0,0,1,5>: Cost 3 vext2 <0,0,0,0>, <1,5,3,7>
- 3659723031U, // <0,0,1,6>: Cost 4 vext1 <6,0,0,1>, <6,0,0,1>
- 2591953904U, // <0,0,1,7>: Cost 3 vext1 <7,0,0,1>, <7,0,0,1>
- 1812775581U, // <0,0,1,u>: Cost 2 vzipl LHS, LHS
- 3020734464U, // <0,0,2,0>: Cost 3 vtrnl LHS, <0,0,0,0>
- 3020734474U, // <0,0,2,1>: Cost 3 vtrnl LHS, <0,0,1,1>
- 1946992742U, // <0,0,2,2>: Cost 2 vtrnl LHS, LHS
- 2631181989U, // <0,0,2,3>: Cost 3 vext2 <2,3,0,0>, <2,3,0,0>
- 3020734668U, // <0,0,2,4>: Cost 3 vtrnl LHS, <0,2,4,6>
- 3826550569U, // <0,0,2,5>: Cost 4 vuzpl <0,2,0,2>, <2,4,5,6>
- 2617247674U, // <0,0,2,6>: Cost 3 vext2 <0,0,0,0>, <2,6,3,7>
- 2591962097U, // <0,0,2,7>: Cost 3 vext1 <7,0,0,2>, <7,0,0,2>
- 1946992796U, // <0,0,2,u>: Cost 2 vtrnl LHS, LHS
- 2635163787U, // <0,0,3,0>: Cost 3 vext2 <3,0,0,0>, <3,0,0,0>
- 2686419196U, // <0,0,3,1>: Cost 3 vext3 <0,3,1,0>, <0,3,1,0>
- 2686492933U, // <0,0,3,2>: Cost 3 vext3 <0,3,2,0>, <0,3,2,0>
- 2617248156U, // <0,0,3,3>: Cost 3 vext2 <0,0,0,0>, <3,3,3,3>
- 2617248258U, // <0,0,3,4>: Cost 3 vext2 <0,0,0,0>, <3,4,5,6>
- 3826551298U, // <0,0,3,5>: Cost 4 vuzpl <0,2,0,2>, <3,4,5,6>
- 3690990200U, // <0,0,3,6>: Cost 4 vext2 <0,0,0,0>, <3,6,0,7>
- 3713551042U, // <0,0,3,7>: Cost 4 vext2 <3,7,0,0>, <3,7,0,0>
- 2635163787U, // <0,0,3,u>: Cost 3 vext2 <3,0,0,0>, <3,0,0,0>
- 2617248658U, // <0,0,4,0>: Cost 3 vext2 <0,0,0,0>, <4,0,5,1>
- 2888450150U, // <0,0,4,1>: Cost 3 vzipl <0,4,1,5>, LHS
- 3021570150U, // <0,0,4,2>: Cost 3 vtrnl <0,2,4,6>, LHS
- 3641829519U, // <0,0,4,3>: Cost 4 vext1 <3,0,0,4>, <3,0,0,4>
- 3021570252U, // <0,0,4,4>: Cost 3 vtrnl <0,2,4,6>, <0,2,4,6>
- 1543507254U, // <0,0,4,5>: Cost 2 vext2 <0,0,0,0>, RHS
- 2752810294U, // <0,0,4,6>: Cost 3 vuzpl <0,2,0,2>, RHS
- 3786998152U, // <0,0,4,7>: Cost 4 vext3 <4,7,5,0>, <0,4,7,5>
- 1543507497U, // <0,0,4,u>: Cost 2 vext2 <0,0,0,0>, RHS
- 2684354972U, // <0,0,5,0>: Cost 3 vext3 <0,0,0,0>, <0,5,0,7>
- 2617249488U, // <0,0,5,1>: Cost 3 vext2 <0,0,0,0>, <5,1,7,3>
- 3765617070U, // <0,0,5,2>: Cost 4 vext3 <1,2,3,0>, <0,5,2,7>
- 3635865780U, // <0,0,5,3>: Cost 4 vext1 <2,0,0,5>, <3,0,4,5>
- 2617249734U, // <0,0,5,4>: Cost 3 vext2 <0,0,0,0>, <5,4,7,6>
- 2617249796U, // <0,0,5,5>: Cost 3 vext2 <0,0,0,0>, <5,5,5,5>
- 2718712274U, // <0,0,5,6>: Cost 3 vext3 <5,6,7,0>, <0,5,6,7>
- 2617249960U, // <0,0,5,7>: Cost 3 vext2 <0,0,0,0>, <5,7,5,7>
- 2720039396U, // <0,0,5,u>: Cost 3 vext3 <5,u,7,0>, <0,5,u,7>
- 2684355053U, // <0,0,6,0>: Cost 3 vext3 <0,0,0,0>, <0,6,0,7>
- 3963609190U, // <0,0,6,1>: Cost 4 vzipl <0,6,2,7>, LHS
- 2617250298U, // <0,0,6,2>: Cost 3 vext2 <0,0,0,0>, <6,2,7,3>
- 3796435464U, // <0,0,6,3>: Cost 4 vext3 <6,3,7,0>, <0,6,3,7>
- 3659762998U, // <0,0,6,4>: Cost 4 vext1 <6,0,0,6>, RHS
- 3659763810U, // <0,0,6,5>: Cost 4 vext1 <6,0,0,6>, <5,6,7,0>
- 2617250616U, // <0,0,6,6>: Cost 3 vext2 <0,0,0,0>, <6,6,6,6>
- 2657727309U, // <0,0,6,7>: Cost 3 vext2 <6,7,0,0>, <6,7,0,0>
- 2658390942U, // <0,0,6,u>: Cost 3 vext2 <6,u,0,0>, <6,u,0,0>
- 2659054575U, // <0,0,7,0>: Cost 3 vext2 <7,0,0,0>, <7,0,0,0>
- 3635880854U, // <0,0,7,1>: Cost 4 vext1 <2,0,0,7>, <1,2,3,0>
- 3635881401U, // <0,0,7,2>: Cost 4 vext1 <2,0,0,7>, <2,0,0,7>
- 3734787298U, // <0,0,7,3>: Cost 4 vext2 <7,3,0,0>, <7,3,0,0>
- 2617251174U, // <0,0,7,4>: Cost 3 vext2 <0,0,0,0>, <7,4,5,6>
- 3659772002U, // <0,0,7,5>: Cost 4 vext1 <6,0,0,7>, <5,6,7,0>
- 3659772189U, // <0,0,7,6>: Cost 4 vext1 <6,0,0,7>, <6,0,0,7>
- 2617251436U, // <0,0,7,7>: Cost 3 vext2 <0,0,0,0>, <7,7,7,7>
- 2659054575U, // <0,0,7,u>: Cost 3 vext2 <7,0,0,0>, <7,0,0,0>
- 135053414U, // <0,0,u,0>: Cost 1 vdup0 LHS
- 1817419878U, // <0,0,u,1>: Cost 2 vzipl LHS, LHS
- 1947435110U, // <0,0,u,2>: Cost 2 vtrnl LHS, LHS
- 2568120467U, // <0,0,u,3>: Cost 3 vext1 <3,0,0,u>, <3,0,0,u>
- 1476463926U, // <0,0,u,4>: Cost 2 vext1 <0,0,0,u>, RHS
- 1543510170U, // <0,0,u,5>: Cost 2 vext2 <0,0,0,0>, RHS
- 2752813210U, // <0,0,u,6>: Cost 3 vuzpl <0,2,0,2>, RHS
- 2592011255U, // <0,0,u,7>: Cost 3 vext1 <7,0,0,u>, <7,0,0,u>
- 135053414U, // <0,0,u,u>: Cost 1 vdup0 LHS
- 2618581002U, // <0,1,0,0>: Cost 3 vext2 <0,2,0,1>, <0,0,1,1>
- 1557446758U, // <0,1,0,1>: Cost 2 vext2 <2,3,0,1>, LHS
- 2618581155U, // <0,1,0,2>: Cost 3 vext2 <0,2,0,1>, <0,2,0,1>
- 2690548468U, // <0,1,0,3>: Cost 3 vext3 <1,0,3,0>, <1,0,3,0>
- 2626543954U, // <0,1,0,4>: Cost 3 vext2 <1,5,0,1>, <0,4,1,5>
- 4094985216U, // <0,1,0,5>: Cost 4 vtrnl <0,2,0,2>, <1,3,5,7>
- 2592019278U, // <0,1,0,6>: Cost 3 vext1 <7,0,1,0>, <6,7,0,1>
- 2592019448U, // <0,1,0,7>: Cost 3 vext1 <7,0,1,0>, <7,0,1,0>
- 1557447325U, // <0,1,0,u>: Cost 2 vext2 <2,3,0,1>, LHS
- 1476476938U, // <0,1,1,0>: Cost 2 vext1 <0,0,1,1>, <0,0,1,1>
- 2886517556U, // <0,1,1,1>: Cost 3 vzipl LHS, <1,1,1,1>
- 2886517654U, // <0,1,1,2>: Cost 3 vzipl LHS, <1,2,3,0>
- 2886517720U, // <0,1,1,3>: Cost 3 vzipl LHS, <1,3,1,3>
- 1476480310U, // <0,1,1,4>: Cost 2 vext1 <0,0,1,1>, RHS
- 2886558864U, // <0,1,1,5>: Cost 3 vzipl LHS, <1,5,3,7>
- 2550223354U, // <0,1,1,6>: Cost 3 vext1 <0,0,1,1>, <6,2,7,3>
- 2550223856U, // <0,1,1,7>: Cost 3 vext1 <0,0,1,1>, <7,0,0,1>
- 1476482862U, // <0,1,1,u>: Cost 2 vext1 <0,0,1,1>, LHS
- 1494401126U, // <0,1,2,0>: Cost 2 vext1 <3,0,1,2>, LHS
- 3020735284U, // <0,1,2,1>: Cost 3 vtrnl LHS, <1,1,1,1>
- 2562172349U, // <0,1,2,2>: Cost 3 vext1 <2,0,1,2>, <2,0,1,2>
- 835584U, // <0,1,2,3>: Cost 0 copy LHS
- 1494404406U, // <0,1,2,4>: Cost 2 vext1 <3,0,1,2>, RHS
- 3020735488U, // <0,1,2,5>: Cost 3 vtrnl LHS, <1,3,5,7>
- 2631190458U, // <0,1,2,6>: Cost 3 vext2 <2,3,0,1>, <2,6,3,7>
- 1518294010U, // <0,1,2,7>: Cost 2 vext1 <7,0,1,2>, <7,0,1,2>
- 835584U, // <0,1,2,u>: Cost 0 copy LHS
- 2692318156U, // <0,1,3,0>: Cost 3 vext3 <1,3,0,0>, <1,3,0,0>
- 2691875800U, // <0,1,3,1>: Cost 3 vext3 <1,2,3,0>, <1,3,1,3>
- 2691875806U, // <0,1,3,2>: Cost 3 vext3 <1,2,3,0>, <1,3,2,0>
- 2692539367U, // <0,1,3,3>: Cost 3 vext3 <1,3,3,0>, <1,3,3,0>
- 2562182454U, // <0,1,3,4>: Cost 3 vext1 <2,0,1,3>, RHS
- 2691875840U, // <0,1,3,5>: Cost 3 vext3 <1,2,3,0>, <1,3,5,7>
- 2692760578U, // <0,1,3,6>: Cost 3 vext3 <1,3,6,0>, <1,3,6,0>
- 2639817411U, // <0,1,3,7>: Cost 3 vext2 <3,7,0,1>, <3,7,0,1>
- 2691875863U, // <0,1,3,u>: Cost 3 vext3 <1,2,3,0>, <1,3,u,3>
- 2568159334U, // <0,1,4,0>: Cost 3 vext1 <3,0,1,4>, LHS
- 4095312692U, // <0,1,4,1>: Cost 4 vtrnl <0,2,4,6>, <1,1,1,1>
- 2568160934U, // <0,1,4,2>: Cost 3 vext1 <3,0,1,4>, <2,3,0,1>
- 2568161432U, // <0,1,4,3>: Cost 3 vext1 <3,0,1,4>, <3,0,1,4>
- 2568162614U, // <0,1,4,4>: Cost 3 vext1 <3,0,1,4>, RHS
- 1557450038U, // <0,1,4,5>: Cost 2 vext2 <2,3,0,1>, RHS
- 2754235702U, // <0,1,4,6>: Cost 3 vuzpl <0,4,1,5>, RHS
- 2592052220U, // <0,1,4,7>: Cost 3 vext1 <7,0,1,4>, <7,0,1,4>
- 1557450281U, // <0,1,4,u>: Cost 2 vext2 <2,3,0,1>, RHS
- 3765617775U, // <0,1,5,0>: Cost 4 vext3 <1,2,3,0>, <1,5,0,1>
- 2647781007U, // <0,1,5,1>: Cost 3 vext2 <5,1,0,1>, <5,1,0,1>
- 3704934138U, // <0,1,5,2>: Cost 4 vext2 <2,3,0,1>, <5,2,3,0>
- 2691875984U, // <0,1,5,3>: Cost 3 vext3 <1,2,3,0>, <1,5,3,7>
- 2657734598U, // <0,1,5,4>: Cost 3 vext2 <6,7,0,1>, <5,4,7,6>
- 2650435539U, // <0,1,5,5>: Cost 3 vext2 <5,5,0,1>, <5,5,0,1>
- 2651099172U, // <0,1,5,6>: Cost 3 vext2 <5,6,0,1>, <5,6,0,1>
- 2651762805U, // <0,1,5,7>: Cost 3 vext2 <5,7,0,1>, <5,7,0,1>
- 2691876029U, // <0,1,5,u>: Cost 3 vext3 <1,2,3,0>, <1,5,u,7>
- 2592063590U, // <0,1,6,0>: Cost 3 vext1 <7,0,1,6>, LHS
- 3765617871U, // <0,1,6,1>: Cost 4 vext3 <1,2,3,0>, <1,6,1,7>
- 2654417337U, // <0,1,6,2>: Cost 3 vext2 <6,2,0,1>, <6,2,0,1>
- 3765617889U, // <0,1,6,3>: Cost 4 vext3 <1,2,3,0>, <1,6,3,7>
- 2592066870U, // <0,1,6,4>: Cost 3 vext1 <7,0,1,6>, RHS
- 3765617907U, // <0,1,6,5>: Cost 4 vext3 <1,2,3,0>, <1,6,5,7>
- 2657071869U, // <0,1,6,6>: Cost 3 vext2 <6,6,0,1>, <6,6,0,1>
- 1583993678U, // <0,1,6,7>: Cost 2 vext2 <6,7,0,1>, <6,7,0,1>
- 1584657311U, // <0,1,6,u>: Cost 2 vext2 <6,u,0,1>, <6,u,0,1>
- 2657735672U, // <0,1,7,0>: Cost 3 vext2 <6,7,0,1>, <7,0,1,0>
- 2657735808U, // <0,1,7,1>: Cost 3 vext2 <6,7,0,1>, <7,1,7,1>
- 2631193772U, // <0,1,7,2>: Cost 3 vext2 <2,3,0,1>, <7,2,3,0>
- 2661053667U, // <0,1,7,3>: Cost 3 vext2 <7,3,0,1>, <7,3,0,1>
- 2657736038U, // <0,1,7,4>: Cost 3 vext2 <6,7,0,1>, <7,4,5,6>
- 3721524621U, // <0,1,7,5>: Cost 4 vext2 <5,1,0,1>, <7,5,1,0>
- 2657736158U, // <0,1,7,6>: Cost 3 vext2 <6,7,0,1>, <7,6,1,0>
- 2657736300U, // <0,1,7,7>: Cost 3 vext2 <6,7,0,1>, <7,7,7,7>
- 2657736322U, // <0,1,7,u>: Cost 3 vext2 <6,7,0,1>, <7,u,1,2>
- 1494450278U, // <0,1,u,0>: Cost 2 vext1 <3,0,1,u>, LHS
- 1557452590U, // <0,1,u,1>: Cost 2 vext2 <2,3,0,1>, LHS
- 2754238254U, // <0,1,u,2>: Cost 3 vuzpl <0,4,1,5>, LHS
- 835584U, // <0,1,u,3>: Cost 0 copy LHS
- 1494453558U, // <0,1,u,4>: Cost 2 vext1 <3,0,1,u>, RHS
- 1557452954U, // <0,1,u,5>: Cost 2 vext2 <2,3,0,1>, RHS
- 2754238618U, // <0,1,u,6>: Cost 3 vuzpl <0,4,1,5>, RHS
- 1518343168U, // <0,1,u,7>: Cost 2 vext1 <7,0,1,u>, <7,0,1,u>
- 835584U, // <0,1,u,u>: Cost 0 copy LHS
- 2752299008U, // <0,2,0,0>: Cost 3 vuzpl LHS, <0,0,0,0>
- 1544847462U, // <0,2,0,1>: Cost 2 vext2 <0,2,0,2>, LHS
- 1678557286U, // <0,2,0,2>: Cost 2 vuzpl LHS, LHS
- 2696521165U, // <0,2,0,3>: Cost 3 vext3 <2,0,3,0>, <2,0,3,0>
- 2752340172U, // <0,2,0,4>: Cost 3 vuzpl LHS, <0,2,4,6>
- 2691876326U, // <0,2,0,5>: Cost 3 vext3 <1,2,3,0>, <2,0,5,7>
- 2618589695U, // <0,2,0,6>: Cost 3 vext2 <0,2,0,2>, <0,6,2,7>
- 2592093185U, // <0,2,0,7>: Cost 3 vext1 <7,0,2,0>, <7,0,2,0>
- 1678557340U, // <0,2,0,u>: Cost 2 vuzpl LHS, LHS
- 2618589942U, // <0,2,1,0>: Cost 3 vext2 <0,2,0,2>, <1,0,3,2>
- 2752299828U, // <0,2,1,1>: Cost 3 vuzpl LHS, <1,1,1,1>
- 2886518376U, // <0,2,1,2>: Cost 3 vzipl LHS, <2,2,2,2>
- 2752299766U, // <0,2,1,3>: Cost 3 vuzpl LHS, <1,0,3,2>
- 2550295862U, // <0,2,1,4>: Cost 3 vext1 <0,0,2,1>, RHS
- 2752340992U, // <0,2,1,5>: Cost 3 vuzpl LHS, <1,3,5,7>
- 2886559674U, // <0,2,1,6>: Cost 3 vzipl LHS, <2,6,3,7>
- 3934208106U, // <0,2,1,7>: Cost 4 vuzpr <7,0,1,2>, <0,1,2,7>
- 2752340771U, // <0,2,1,u>: Cost 3 vuzpl LHS, <1,0,u,2>
- 1476558868U, // <0,2,2,0>: Cost 2 vext1 <0,0,2,2>, <0,0,2,2>
- 2226628029U, // <0,2,2,1>: Cost 3 vrev <2,0,1,2>
- 2752300648U, // <0,2,2,2>: Cost 3 vuzpl LHS, <2,2,2,2>
- 3020736114U, // <0,2,2,3>: Cost 3 vtrnl LHS, <2,2,3,3>
- 1476562230U, // <0,2,2,4>: Cost 2 vext1 <0,0,2,2>, RHS
- 2550304464U, // <0,2,2,5>: Cost 3 vext1 <0,0,2,2>, <5,1,7,3>
- 2618591162U, // <0,2,2,6>: Cost 3 vext2 <0,2,0,2>, <2,6,3,7>
- 2550305777U, // <0,2,2,7>: Cost 3 vext1 <0,0,2,2>, <7,0,0,2>
- 1476564782U, // <0,2,2,u>: Cost 2 vext1 <0,0,2,2>, LHS
- 2618591382U, // <0,2,3,0>: Cost 3 vext2 <0,2,0,2>, <3,0,1,2>
- 2752301206U, // <0,2,3,1>: Cost 3 vuzpl LHS, <3,0,1,2>
- 3826043121U, // <0,2,3,2>: Cost 4 vuzpl LHS, <3,1,2,3>
- 2752301468U, // <0,2,3,3>: Cost 3 vuzpl LHS, <3,3,3,3>
- 2618591746U, // <0,2,3,4>: Cost 3 vext2 <0,2,0,2>, <3,4,5,6>
- 2752301570U, // <0,2,3,5>: Cost 3 vuzpl LHS, <3,4,5,6>
- 3830688102U, // <0,2,3,6>: Cost 4 vuzpl LHS, <3,2,6,3>
- 2698807012U, // <0,2,3,7>: Cost 3 vext3 <2,3,7,0>, <2,3,7,0>
- 2752301269U, // <0,2,3,u>: Cost 3 vuzpl LHS, <3,0,u,2>
- 2562261094U, // <0,2,4,0>: Cost 3 vext1 <2,0,2,4>, LHS
- 4095313828U, // <0,2,4,1>: Cost 4 vtrnl <0,2,4,6>, <2,6,1,3>
- 2226718152U, // <0,2,4,2>: Cost 3 vrev <2,0,2,4>
- 2568235169U, // <0,2,4,3>: Cost 3 vext1 <3,0,2,4>, <3,0,2,4>
- 2562264374U, // <0,2,4,4>: Cost 3 vext1 <2,0,2,4>, RHS
- 1544850742U, // <0,2,4,5>: Cost 2 vext2 <0,2,0,2>, RHS
- 1678560566U, // <0,2,4,6>: Cost 2 vuzpl LHS, RHS
- 2592125957U, // <0,2,4,7>: Cost 3 vext1 <7,0,2,4>, <7,0,2,4>
- 1678560584U, // <0,2,4,u>: Cost 2 vuzpl LHS, RHS
- 2691876686U, // <0,2,5,0>: Cost 3 vext3 <1,2,3,0>, <2,5,0,7>
- 2618592976U, // <0,2,5,1>: Cost 3 vext2 <0,2,0,2>, <5,1,7,3>
- 3765618528U, // <0,2,5,2>: Cost 4 vext3 <1,2,3,0>, <2,5,2,7>
- 3765618536U, // <0,2,5,3>: Cost 4 vext3 <1,2,3,0>, <2,5,3,6>
- 2618593222U, // <0,2,5,4>: Cost 3 vext2 <0,2,0,2>, <5,4,7,6>
- 2752303108U, // <0,2,5,5>: Cost 3 vuzpl LHS, <5,5,5,5>
- 2618593378U, // <0,2,5,6>: Cost 3 vext2 <0,2,0,2>, <5,6,7,0>
- 2824785206U, // <0,2,5,7>: Cost 3 vuzpr <1,0,3,2>, RHS
- 2824785207U, // <0,2,5,u>: Cost 3 vuzpr <1,0,3,2>, RHS
- 2752303950U, // <0,2,6,0>: Cost 3 vuzpl LHS, <6,7,0,1>
- 3830690081U, // <0,2,6,1>: Cost 4 vuzpl LHS, <6,0,1,2>
- 2618593786U, // <0,2,6,2>: Cost 3 vext2 <0,2,0,2>, <6,2,7,3>
- 2691876794U, // <0,2,6,3>: Cost 3 vext3 <1,2,3,0>, <2,6,3,7>
- 2752303990U, // <0,2,6,4>: Cost 3 vuzpl LHS, <6,7,4,5>
- 3830690445U, // <0,2,6,5>: Cost 4 vuzpl LHS, <6,4,5,6>
- 2752303928U, // <0,2,6,6>: Cost 3 vuzpl LHS, <6,6,6,6>
- 2657743695U, // <0,2,6,7>: Cost 3 vext2 <6,7,0,2>, <6,7,0,2>
- 2691876839U, // <0,2,6,u>: Cost 3 vext3 <1,2,3,0>, <2,6,u,7>
- 2659070961U, // <0,2,7,0>: Cost 3 vext2 <7,0,0,2>, <7,0,0,2>
- 2659734594U, // <0,2,7,1>: Cost 3 vext2 <7,1,0,2>, <7,1,0,2>
- 3734140051U, // <0,2,7,2>: Cost 4 vext2 <7,2,0,2>, <7,2,0,2>
- 2701166596U, // <0,2,7,3>: Cost 3 vext3 <2,7,3,0>, <2,7,3,0>
- 2662389094U, // <0,2,7,4>: Cost 3 vext2 <7,5,0,2>, <7,4,5,6>
- 2662389126U, // <0,2,7,5>: Cost 3 vext2 <7,5,0,2>, <7,5,0,2>
- 3736794583U, // <0,2,7,6>: Cost 4 vext2 <7,6,0,2>, <7,6,0,2>
- 2752304748U, // <0,2,7,7>: Cost 3 vuzpl LHS, <7,7,7,7>
- 2659070961U, // <0,2,7,u>: Cost 3 vext2 <7,0,0,2>, <7,0,0,2>
- 1476608026U, // <0,2,u,0>: Cost 2 vext1 <0,0,2,u>, <0,0,2,u>
- 1544853294U, // <0,2,u,1>: Cost 2 vext2 <0,2,0,2>, LHS
- 1678563118U, // <0,2,u,2>: Cost 2 vuzpl LHS, LHS
- 3021178482U, // <0,2,u,3>: Cost 3 vtrnl LHS, <2,2,3,3>
- 1476611382U, // <0,2,u,4>: Cost 2 vext1 <0,0,2,u>, RHS
- 1544853658U, // <0,2,u,5>: Cost 2 vext2 <0,2,0,2>, RHS
- 1678563482U, // <0,2,u,6>: Cost 2 vuzpl LHS, RHS
- 2824785449U, // <0,2,u,7>: Cost 3 vuzpr <1,0,3,2>, RHS
- 1678563172U, // <0,2,u,u>: Cost 2 vuzpl LHS, LHS
- 2556329984U, // <0,3,0,0>: Cost 3 vext1 <1,0,3,0>, <0,0,0,0>
- 2686421142U, // <0,3,0,1>: Cost 3 vext3 <0,3,1,0>, <3,0,1,2>
- 2562303437U, // <0,3,0,2>: Cost 3 vext1 <2,0,3,0>, <2,0,3,0>
- 4094986652U, // <0,3,0,3>: Cost 4 vtrnl <0,2,0,2>, <3,3,3,3>
- 2556333366U, // <0,3,0,4>: Cost 3 vext1 <1,0,3,0>, RHS
- 4094986754U, // <0,3,0,5>: Cost 4 vtrnl <0,2,0,2>, <3,4,5,6>
- 3798796488U, // <0,3,0,6>: Cost 4 vext3 <6,7,3,0>, <3,0,6,7>
- 3776530634U, // <0,3,0,7>: Cost 4 vext3 <3,0,7,0>, <3,0,7,0>
- 2556335918U, // <0,3,0,u>: Cost 3 vext1 <1,0,3,0>, LHS
- 2886518934U, // <0,3,1,0>: Cost 3 vzipl LHS, <3,0,1,2>
- 2556338933U, // <0,3,1,1>: Cost 3 vext1 <1,0,3,1>, <1,0,3,1>
- 2691877105U, // <0,3,1,2>: Cost 3 vext3 <1,2,3,0>, <3,1,2,3>
- 2886519196U, // <0,3,1,3>: Cost 3 vzipl LHS, <3,3,3,3>
- 2886519298U, // <0,3,1,4>: Cost 3 vzipl LHS, <3,4,5,6>
- 4095740418U, // <0,3,1,5>: Cost 4 vtrnl <0,3,1,4>, <3,4,5,6>
- 3659944242U, // <0,3,1,6>: Cost 4 vext1 <6,0,3,1>, <6,0,3,1>
- 3769600286U, // <0,3,1,7>: Cost 4 vext3 <1,u,3,0>, <3,1,7,3>
- 2886519582U, // <0,3,1,u>: Cost 3 vzipl LHS, <3,u,1,2>
- 1482604646U, // <0,3,2,0>: Cost 2 vext1 <1,0,3,2>, LHS
- 1482605302U, // <0,3,2,1>: Cost 2 vext1 <1,0,3,2>, <1,0,3,2>
- 2556348008U, // <0,3,2,2>: Cost 3 vext1 <1,0,3,2>, <2,2,2,2>
- 3020736924U, // <0,3,2,3>: Cost 3 vtrnl LHS, <3,3,3,3>
- 1482607926U, // <0,3,2,4>: Cost 2 vext1 <1,0,3,2>, RHS
- 3020737026U, // <0,3,2,5>: Cost 3 vtrnl LHS, <3,4,5,6>
- 2598154746U, // <0,3,2,6>: Cost 3 vext1 <u,0,3,2>, <6,2,7,3>
- 2598155258U, // <0,3,2,7>: Cost 3 vext1 <u,0,3,2>, <7,0,1,2>
- 1482610478U, // <0,3,2,u>: Cost 2 vext1 <1,0,3,2>, LHS
- 3692341398U, // <0,3,3,0>: Cost 4 vext2 <0,2,0,3>, <3,0,1,2>
- 2635851999U, // <0,3,3,1>: Cost 3 vext2 <3,1,0,3>, <3,1,0,3>
- 3636069840U, // <0,3,3,2>: Cost 4 vext1 <2,0,3,3>, <2,0,3,3>
- 2691877276U, // <0,3,3,3>: Cost 3 vext3 <1,2,3,0>, <3,3,3,3>
- 3961522690U, // <0,3,3,4>: Cost 4 vzipl <0,3,1,4>, <3,4,5,6>
- 3826797058U, // <0,3,3,5>: Cost 4 vuzpl <0,2,3,5>, <3,4,5,6>
- 3703622282U, // <0,3,3,6>: Cost 4 vext2 <2,1,0,3>, <3,6,2,7>
- 3769600452U, // <0,3,3,7>: Cost 4 vext3 <1,u,3,0>, <3,3,7,7>
- 2640497430U, // <0,3,3,u>: Cost 3 vext2 <3,u,0,3>, <3,u,0,3>
- 3962194070U, // <0,3,4,0>: Cost 4 vzipl <0,4,1,5>, <3,0,1,2>
- 2232617112U, // <0,3,4,1>: Cost 3 vrev <3,0,1,4>
- 2232690849U, // <0,3,4,2>: Cost 3 vrev <3,0,2,4>
- 4095314332U, // <0,3,4,3>: Cost 4 vtrnl <0,2,4,6>, <3,3,3,3>
- 3962194434U, // <0,3,4,4>: Cost 4 vzipl <0,4,1,5>, <3,4,5,6>
- 2691877378U, // <0,3,4,5>: Cost 3 vext3 <1,2,3,0>, <3,4,5,6>
- 3826765110U, // <0,3,4,6>: Cost 4 vuzpl <0,2,3,1>, RHS
- 3665941518U, // <0,3,4,7>: Cost 4 vext1 <7,0,3,4>, <7,0,3,4>
- 2691877405U, // <0,3,4,u>: Cost 3 vext3 <1,2,3,0>, <3,4,u,6>
- 3630112870U, // <0,3,5,0>: Cost 4 vext1 <1,0,3,5>, LHS
- 3630113526U, // <0,3,5,1>: Cost 4 vext1 <1,0,3,5>, <1,0,3,2>
- 4035199734U, // <0,3,5,2>: Cost 4 vzipr <1,4,0,5>, <1,0,3,2>
- 3769600578U, // <0,3,5,3>: Cost 4 vext3 <1,u,3,0>, <3,5,3,7>
- 2232846516U, // <0,3,5,4>: Cost 3 vrev <3,0,4,5>
- 3779037780U, // <0,3,5,5>: Cost 4 vext3 <3,4,5,0>, <3,5,5,7>
- 2718714461U, // <0,3,5,6>: Cost 3 vext3 <5,6,7,0>, <3,5,6,7>
- 2706106975U, // <0,3,5,7>: Cost 3 vext3 <3,5,7,0>, <3,5,7,0>
- 2233141464U, // <0,3,5,u>: Cost 3 vrev <3,0,u,5>
- 2691877496U, // <0,3,6,0>: Cost 3 vext3 <1,2,3,0>, <3,6,0,7>
- 3727511914U, // <0,3,6,1>: Cost 4 vext2 <6,1,0,3>, <6,1,0,3>
- 3765619338U, // <0,3,6,2>: Cost 4 vext3 <1,2,3,0>, <3,6,2,7>
- 3765619347U, // <0,3,6,3>: Cost 4 vext3 <1,2,3,0>, <3,6,3,7>
- 3765987996U, // <0,3,6,4>: Cost 4 vext3 <1,2,u,0>, <3,6,4,7>
- 3306670270U, // <0,3,6,5>: Cost 4 vrev <3,0,5,6>
- 3792456365U, // <0,3,6,6>: Cost 4 vext3 <5,6,7,0>, <3,6,6,6>
- 2706770608U, // <0,3,6,7>: Cost 3 vext3 <3,6,7,0>, <3,6,7,0>
- 2706844345U, // <0,3,6,u>: Cost 3 vext3 <3,6,u,0>, <3,6,u,0>
- 3769600707U, // <0,3,7,0>: Cost 4 vext3 <1,u,3,0>, <3,7,0,1>
- 2659742787U, // <0,3,7,1>: Cost 3 vext2 <7,1,0,3>, <7,1,0,3>
- 3636102612U, // <0,3,7,2>: Cost 4 vext1 <2,0,3,7>, <2,0,3,7>
- 3769600740U, // <0,3,7,3>: Cost 4 vext3 <1,u,3,0>, <3,7,3,7>
- 3769600747U, // <0,3,7,4>: Cost 4 vext3 <1,u,3,0>, <3,7,4,5>
- 3769600758U, // <0,3,7,5>: Cost 4 vext3 <1,u,3,0>, <3,7,5,7>
- 3659993400U, // <0,3,7,6>: Cost 4 vext1 <6,0,3,7>, <6,0,3,7>
- 3781176065U, // <0,3,7,7>: Cost 4 vext3 <3,7,7,0>, <3,7,7,0>
- 2664388218U, // <0,3,7,u>: Cost 3 vext2 <7,u,0,3>, <7,u,0,3>
- 1482653798U, // <0,3,u,0>: Cost 2 vext1 <1,0,3,u>, LHS
- 1482654460U, // <0,3,u,1>: Cost 2 vext1 <1,0,3,u>, <1,0,3,u>
- 2556397160U, // <0,3,u,2>: Cost 3 vext1 <1,0,3,u>, <2,2,2,2>
- 3021179292U, // <0,3,u,3>: Cost 3 vtrnl LHS, <3,3,3,3>
- 1482657078U, // <0,3,u,4>: Cost 2 vext1 <1,0,3,u>, RHS
- 3021179394U, // <0,3,u,5>: Cost 3 vtrnl LHS, <3,4,5,6>
- 2598203898U, // <0,3,u,6>: Cost 3 vext1 <u,0,3,u>, <6,2,7,3>
- 2708097874U, // <0,3,u,7>: Cost 3 vext3 <3,u,7,0>, <3,u,7,0>
- 1482659630U, // <0,3,u,u>: Cost 2 vext1 <1,0,3,u>, LHS
- 2617278468U, // <0,4,0,0>: Cost 3 vext2 <0,0,0,4>, <0,0,0,4>
- 2618605670U, // <0,4,0,1>: Cost 3 vext2 <0,2,0,4>, LHS
- 2618605734U, // <0,4,0,2>: Cost 3 vext2 <0,2,0,4>, <0,2,0,4>
- 3642091695U, // <0,4,0,3>: Cost 4 vext1 <3,0,4,0>, <3,0,4,0>
- 2753134796U, // <0,4,0,4>: Cost 3 vuzpl <0,2,4,6>, <0,2,4,6>
- 2718714770U, // <0,4,0,5>: Cost 3 vext3 <5,6,7,0>, <4,0,5,1>
- 3021245750U, // <0,4,0,6>: Cost 3 vtrnl <0,2,0,2>, RHS
- 3665982483U, // <0,4,0,7>: Cost 4 vext1 <7,0,4,0>, <7,0,4,0>
- 3021245768U, // <0,4,0,u>: Cost 3 vtrnl <0,2,0,2>, RHS
- 2568355942U, // <0,4,1,0>: Cost 3 vext1 <3,0,4,1>, LHS
- 3692348212U, // <0,4,1,1>: Cost 4 vext2 <0,2,0,4>, <1,1,1,1>
- 3692348310U, // <0,4,1,2>: Cost 4 vext2 <0,2,0,4>, <1,2,3,0>
- 2568358064U, // <0,4,1,3>: Cost 3 vext1 <3,0,4,1>, <3,0,4,1>
- 2568359222U, // <0,4,1,4>: Cost 3 vext1 <3,0,4,1>, RHS
- 1812778294U, // <0,4,1,5>: Cost 2 vzipl LHS, RHS
- 3022671158U, // <0,4,1,6>: Cost 3 vtrnl <0,4,1,5>, RHS
- 2592248852U, // <0,4,1,7>: Cost 3 vext1 <7,0,4,1>, <7,0,4,1>
- 1812778537U, // <0,4,1,u>: Cost 2 vzipl LHS, RHS
- 2568364134U, // <0,4,2,0>: Cost 3 vext1 <3,0,4,2>, LHS
- 2238573423U, // <0,4,2,1>: Cost 3 vrev <4,0,1,2>
- 3692349032U, // <0,4,2,2>: Cost 4 vext2 <0,2,0,4>, <2,2,2,2>
- 2631214761U, // <0,4,2,3>: Cost 3 vext2 <2,3,0,4>, <2,3,0,4>
- 2568367414U, // <0,4,2,4>: Cost 3 vext1 <3,0,4,2>, RHS
- 2887028022U, // <0,4,2,5>: Cost 3 vzipl <0,2,0,2>, RHS
- 1946996022U, // <0,4,2,6>: Cost 2 vtrnl LHS, RHS
- 2592257045U, // <0,4,2,7>: Cost 3 vext1 <7,0,4,2>, <7,0,4,2>
- 1946996040U, // <0,4,2,u>: Cost 2 vtrnl LHS, RHS
- 3692349590U, // <0,4,3,0>: Cost 4 vext2 <0,2,0,4>, <3,0,1,2>
- 3826878614U, // <0,4,3,1>: Cost 4 vuzpl <0,2,4,6>, <3,0,1,2>
- 3826878625U, // <0,4,3,2>: Cost 4 vuzpl <0,2,4,6>, <3,0,2,4>
- 3692349852U, // <0,4,3,3>: Cost 4 vext2 <0,2,0,4>, <3,3,3,3>
- 3692349954U, // <0,4,3,4>: Cost 4 vext2 <0,2,0,4>, <3,4,5,6>
- 3826878978U, // <0,4,3,5>: Cost 4 vuzpl <0,2,4,6>, <3,4,5,6>
- 4095200566U, // <0,4,3,6>: Cost 4 vtrnl <0,2,3,1>, RHS
- 3713583814U, // <0,4,3,7>: Cost 4 vext2 <3,7,0,4>, <3,7,0,4>
- 3692350238U, // <0,4,3,u>: Cost 4 vext2 <0,2,0,4>, <3,u,1,2>
- 2550464552U, // <0,4,4,0>: Cost 3 vext1 <0,0,4,4>, <0,0,4,4>
- 3962194914U, // <0,4,4,1>: Cost 4 vzipl <0,4,1,5>, <4,1,5,0>
- 3693677631U, // <0,4,4,2>: Cost 4 vext2 <0,4,0,4>, <4,2,6,3>
- 3642124467U, // <0,4,4,3>: Cost 4 vext1 <3,0,4,4>, <3,0,4,4>
- 2718715088U, // <0,4,4,4>: Cost 3 vext3 <5,6,7,0>, <4,4,4,4>
- 2618608950U, // <0,4,4,5>: Cost 3 vext2 <0,2,0,4>, RHS
- 2753137974U, // <0,4,4,6>: Cost 3 vuzpl <0,2,4,6>, RHS
- 3666015255U, // <0,4,4,7>: Cost 4 vext1 <7,0,4,4>, <7,0,4,4>
- 2618609193U, // <0,4,4,u>: Cost 3 vext2 <0,2,0,4>, RHS
- 2568388710U, // <0,4,5,0>: Cost 3 vext1 <3,0,4,5>, LHS
- 2568389526U, // <0,4,5,1>: Cost 3 vext1 <3,0,4,5>, <1,2,3,0>
- 3636159963U, // <0,4,5,2>: Cost 4 vext1 <2,0,4,5>, <2,0,4,5>
- 2568390836U, // <0,4,5,3>: Cost 3 vext1 <3,0,4,5>, <3,0,4,5>
- 2568391990U, // <0,4,5,4>: Cost 3 vext1 <3,0,4,5>, RHS
- 2718715180U, // <0,4,5,5>: Cost 3 vext3 <5,6,7,0>, <4,5,5,6>
- 1618136374U, // <0,4,5,6>: Cost 2 vext3 <1,2,3,0>, RHS
- 2592281624U, // <0,4,5,7>: Cost 3 vext1 <7,0,4,5>, <7,0,4,5>
- 1618136392U, // <0,4,5,u>: Cost 2 vext3 <1,2,3,0>, RHS
- 2550480938U, // <0,4,6,0>: Cost 3 vext1 <0,0,4,6>, <0,0,4,6>
- 3826880801U, // <0,4,6,1>: Cost 4 vuzpl <0,2,4,6>, <6,0,1,2>
- 2562426332U, // <0,4,6,2>: Cost 3 vext1 <2,0,4,6>, <2,0,4,6>
- 3786190181U, // <0,4,6,3>: Cost 4 vext3 <4,6,3,0>, <4,6,3,0>
- 2718715252U, // <0,4,6,4>: Cost 3 vext3 <5,6,7,0>, <4,6,4,6>
- 3826881165U, // <0,4,6,5>: Cost 4 vuzpl <0,2,4,6>, <6,4,5,6>
- 2712669568U, // <0,4,6,6>: Cost 3 vext3 <4,6,6,0>, <4,6,6,0>
- 2657760081U, // <0,4,6,7>: Cost 3 vext2 <6,7,0,4>, <6,7,0,4>
- 2718715284U, // <0,4,6,u>: Cost 3 vext3 <5,6,7,0>, <4,6,u,2>
- 3654090854U, // <0,4,7,0>: Cost 4 vext1 <5,0,4,7>, LHS
- 3934229326U, // <0,4,7,1>: Cost 4 vuzpr <7,0,1,4>, <6,7,0,1>
- 3734156437U, // <0,4,7,2>: Cost 4 vext2 <7,2,0,4>, <7,2,0,4>
- 3734820070U, // <0,4,7,3>: Cost 4 vext2 <7,3,0,4>, <7,3,0,4>
- 3654094134U, // <0,4,7,4>: Cost 4 vext1 <5,0,4,7>, RHS
- 2713259464U, // <0,4,7,5>: Cost 3 vext3 <4,7,5,0>, <4,7,5,0>
- 2713333201U, // <0,4,7,6>: Cost 3 vext3 <4,7,6,0>, <4,7,6,0>
- 3654095866U, // <0,4,7,7>: Cost 4 vext1 <5,0,4,7>, <7,0,1,2>
- 2713259464U, // <0,4,7,u>: Cost 3 vext3 <4,7,5,0>, <4,7,5,0>
- 2568413286U, // <0,4,u,0>: Cost 3 vext1 <3,0,4,u>, LHS
- 2618611502U, // <0,4,u,1>: Cost 3 vext2 <0,2,0,4>, LHS
- 2753140526U, // <0,4,u,2>: Cost 3 vuzpl <0,2,4,6>, LHS
- 2568415415U, // <0,4,u,3>: Cost 3 vext1 <3,0,4,u>, <3,0,4,u>
- 2568416566U, // <0,4,u,4>: Cost 3 vext1 <3,0,4,u>, RHS
- 1817423158U, // <0,4,u,5>: Cost 2 vzipl LHS, RHS
- 1947438390U, // <0,4,u,6>: Cost 2 vtrnl LHS, RHS
- 2592306203U, // <0,4,u,7>: Cost 3 vext1 <7,0,4,u>, <7,0,4,u>
- 1947438408U, // <0,4,u,u>: Cost 2 vtrnl LHS, RHS
- 3630219264U, // <0,5,0,0>: Cost 4 vext1 <1,0,5,0>, <0,0,0,0>
- 2625912934U, // <0,5,0,1>: Cost 3 vext2 <1,4,0,5>, LHS
- 3692355748U, // <0,5,0,2>: Cost 4 vext2 <0,2,0,5>, <0,2,0,2>
- 3693019384U, // <0,5,0,3>: Cost 4 vext2 <0,3,0,5>, <0,3,0,5>
- 3630222646U, // <0,5,0,4>: Cost 4 vext1 <1,0,5,0>, RHS
- 3699655062U, // <0,5,0,5>: Cost 4 vext2 <1,4,0,5>, <0,5,0,1>
- 2718715508U, // <0,5,0,6>: Cost 3 vext3 <5,6,7,0>, <5,0,6,1>
- 3087011126U, // <0,5,0,7>: Cost 3 vtrnr <0,0,0,0>, RHS
- 2625913501U, // <0,5,0,u>: Cost 3 vext2 <1,4,0,5>, LHS
- 1500659814U, // <0,5,1,0>: Cost 2 vext1 <4,0,5,1>, LHS
- 2886520528U, // <0,5,1,1>: Cost 3 vzipl LHS, <5,1,7,3>
- 2574403176U, // <0,5,1,2>: Cost 3 vext1 <4,0,5,1>, <2,2,2,2>
- 2574403734U, // <0,5,1,3>: Cost 3 vext1 <4,0,5,1>, <3,0,1,2>
- 1500662674U, // <0,5,1,4>: Cost 2 vext1 <4,0,5,1>, <4,0,5,1>
- 2886520836U, // <0,5,1,5>: Cost 3 vzipl LHS, <5,5,5,5>
- 2886520930U, // <0,5,1,6>: Cost 3 vzipl LHS, <5,6,7,0>
- 2718715600U, // <0,5,1,7>: Cost 3 vext3 <5,6,7,0>, <5,1,7,3>
- 1500665646U, // <0,5,1,u>: Cost 2 vext1 <4,0,5,1>, LHS
- 2556493926U, // <0,5,2,0>: Cost 3 vext1 <1,0,5,2>, LHS
- 2244546120U, // <0,5,2,1>: Cost 3 vrev <5,0,1,2>
- 3692357256U, // <0,5,2,2>: Cost 4 vext2 <0,2,0,5>, <2,2,5,7>
- 2568439994U, // <0,5,2,3>: Cost 3 vext1 <3,0,5,2>, <3,0,5,2>
- 2556497206U, // <0,5,2,4>: Cost 3 vext1 <1,0,5,2>, RHS
- 3020738564U, // <0,5,2,5>: Cost 3 vtrnl LHS, <5,5,5,5>
- 4027877161U, // <0,5,2,6>: Cost 4 vzipr <0,2,0,2>, <2,4,5,6>
- 3093220662U, // <0,5,2,7>: Cost 3 vtrnr <1,0,3,2>, RHS
- 3093220663U, // <0,5,2,u>: Cost 3 vtrnr <1,0,3,2>, RHS
- 3699656854U, // <0,5,3,0>: Cost 4 vext2 <1,4,0,5>, <3,0,1,2>
- 3699656927U, // <0,5,3,1>: Cost 4 vext2 <1,4,0,5>, <3,1,0,3>
- 3699657006U, // <0,5,3,2>: Cost 4 vext2 <1,4,0,5>, <3,2,0,1>
- 3699657116U, // <0,5,3,3>: Cost 4 vext2 <1,4,0,5>, <3,3,3,3>
- 2637859284U, // <0,5,3,4>: Cost 3 vext2 <3,4,0,5>, <3,4,0,5>
- 3790319453U, // <0,5,3,5>: Cost 4 vext3 <5,3,5,0>, <5,3,5,0>
- 3699657354U, // <0,5,3,6>: Cost 4 vext2 <1,4,0,5>, <3,6,2,7>
- 2716725103U, // <0,5,3,7>: Cost 3 vext3 <5,3,7,0>, <5,3,7,0>
- 2716798840U, // <0,5,3,u>: Cost 3 vext3 <5,3,u,0>, <5,3,u,0>
- 2661747602U, // <0,5,4,0>: Cost 3 vext2 <7,4,0,5>, <4,0,5,1>
- 3630252810U, // <0,5,4,1>: Cost 4 vext1 <1,0,5,4>, <1,0,5,4>
- 3636225507U, // <0,5,4,2>: Cost 4 vext1 <2,0,5,4>, <2,0,5,4>
- 3716910172U, // <0,5,4,3>: Cost 4 vext2 <4,3,0,5>, <4,3,0,5>
- 3962195892U, // <0,5,4,4>: Cost 4 vzipl <0,4,1,5>, <5,4,5,6>
- 2625916214U, // <0,5,4,5>: Cost 3 vext2 <1,4,0,5>, RHS
- 3718901071U, // <0,5,4,6>: Cost 4 vext2 <4,6,0,5>, <4,6,0,5>
- 2718715846U, // <0,5,4,7>: Cost 3 vext3 <5,6,7,0>, <5,4,7,6>
- 2625916457U, // <0,5,4,u>: Cost 3 vext2 <1,4,0,5>, RHS
- 3791278034U, // <0,5,5,0>: Cost 4 vext3 <5,5,0,0>, <5,5,0,0>
- 3791351771U, // <0,5,5,1>: Cost 4 vext3 <5,5,1,0>, <5,5,1,0>
- 3318386260U, // <0,5,5,2>: Cost 4 vrev <5,0,2,5>
- 3791499245U, // <0,5,5,3>: Cost 4 vext3 <5,5,3,0>, <5,5,3,0>
- 3318533734U, // <0,5,5,4>: Cost 4 vrev <5,0,4,5>
- 2718715908U, // <0,5,5,5>: Cost 3 vext3 <5,6,7,0>, <5,5,5,5>
- 2657767522U, // <0,5,5,6>: Cost 3 vext2 <6,7,0,5>, <5,6,7,0>
- 2718715928U, // <0,5,5,7>: Cost 3 vext3 <5,6,7,0>, <5,5,7,7>
- 2718715937U, // <0,5,5,u>: Cost 3 vext3 <5,6,7,0>, <5,5,u,7>
- 2592358502U, // <0,5,6,0>: Cost 3 vext1 <7,0,5,6>, LHS
- 3792015404U, // <0,5,6,1>: Cost 4 vext3 <5,6,1,0>, <5,6,1,0>
- 3731509754U, // <0,5,6,2>: Cost 4 vext2 <6,7,0,5>, <6,2,7,3>
- 3785748546U, // <0,5,6,3>: Cost 4 vext3 <4,5,6,0>, <5,6,3,4>
- 2592361782U, // <0,5,6,4>: Cost 3 vext1 <7,0,5,6>, RHS
- 2592362594U, // <0,5,6,5>: Cost 3 vext1 <7,0,5,6>, <5,6,7,0>
- 3785748576U, // <0,5,6,6>: Cost 4 vext3 <4,5,6,0>, <5,6,6,7>
- 1644974178U, // <0,5,6,7>: Cost 2 vext3 <5,6,7,0>, <5,6,7,0>
- 1645047915U, // <0,5,6,u>: Cost 2 vext3 <5,6,u,0>, <5,6,u,0>
- 2562506854U, // <0,5,7,0>: Cost 3 vext1 <2,0,5,7>, LHS
- 2562507670U, // <0,5,7,1>: Cost 3 vext1 <2,0,5,7>, <1,2,3,0>
- 2562508262U, // <0,5,7,2>: Cost 3 vext1 <2,0,5,7>, <2,0,5,7>
- 3636250774U, // <0,5,7,3>: Cost 4 vext1 <2,0,5,7>, <3,0,1,2>
- 2562510134U, // <0,5,7,4>: Cost 3 vext1 <2,0,5,7>, RHS
- 2718716072U, // <0,5,7,5>: Cost 3 vext3 <5,6,7,0>, <5,7,5,7>
- 2718716074U, // <0,5,7,6>: Cost 3 vext3 <5,6,7,0>, <5,7,6,0>
- 2719379635U, // <0,5,7,7>: Cost 3 vext3 <5,7,7,0>, <5,7,7,0>
- 2562512686U, // <0,5,7,u>: Cost 3 vext1 <2,0,5,7>, LHS
- 1500717158U, // <0,5,u,0>: Cost 2 vext1 <4,0,5,u>, LHS
- 2625918766U, // <0,5,u,1>: Cost 3 vext2 <1,4,0,5>, LHS
- 2719674583U, // <0,5,u,2>: Cost 3 vext3 <5,u,2,0>, <5,u,2,0>
- 2568489152U, // <0,5,u,3>: Cost 3 vext1 <3,0,5,u>, <3,0,5,u>
- 1500720025U, // <0,5,u,4>: Cost 2 vext1 <4,0,5,u>, <4,0,5,u>
- 2625919130U, // <0,5,u,5>: Cost 3 vext2 <1,4,0,5>, RHS
- 2586407243U, // <0,5,u,6>: Cost 3 vext1 <6,0,5,u>, <6,0,5,u>
- 1646301444U, // <0,5,u,7>: Cost 2 vext3 <5,u,7,0>, <5,u,7,0>
- 1646375181U, // <0,5,u,u>: Cost 2 vext3 <5,u,u,0>, <5,u,u,0>
- 2586411110U, // <0,6,0,0>: Cost 3 vext1 <6,0,6,0>, LHS
- 2619949158U, // <0,6,0,1>: Cost 3 vext2 <0,4,0,6>, LHS
- 2619949220U, // <0,6,0,2>: Cost 3 vext2 <0,4,0,6>, <0,2,0,2>
- 3785748789U, // <0,6,0,3>: Cost 4 vext3 <4,5,6,0>, <6,0,3,4>
- 2619949386U, // <0,6,0,4>: Cost 3 vext2 <0,4,0,6>, <0,4,0,6>
- 2586415202U, // <0,6,0,5>: Cost 3 vext1 <6,0,6,0>, <5,6,7,0>
- 2586415436U, // <0,6,0,6>: Cost 3 vext1 <6,0,6,0>, <6,0,6,0>
- 2952793398U, // <0,6,0,7>: Cost 3 vzipr <0,0,0,0>, RHS
- 2619949725U, // <0,6,0,u>: Cost 3 vext2 <0,4,0,6>, LHS
- 2562531430U, // <0,6,1,0>: Cost 3 vext1 <2,0,6,1>, LHS
- 3693691700U, // <0,6,1,1>: Cost 4 vext2 <0,4,0,6>, <1,1,1,1>
- 2886521338U, // <0,6,1,2>: Cost 3 vzipl LHS, <6,2,7,3>
- 3693691864U, // <0,6,1,3>: Cost 4 vext2 <0,4,0,6>, <1,3,1,3>
- 2562534710U, // <0,6,1,4>: Cost 3 vext1 <2,0,6,1>, RHS
- 2580450932U, // <0,6,1,5>: Cost 3 vext1 <5,0,6,1>, <5,0,6,1>
- 2886521656U, // <0,6,1,6>: Cost 3 vzipl LHS, <6,6,6,6>
- 2966736182U, // <0,6,1,7>: Cost 3 vzipr <2,3,0,1>, RHS
- 2966736183U, // <0,6,1,u>: Cost 3 vzipr <2,3,0,1>, RHS
- 1500741734U, // <0,6,2,0>: Cost 2 vext1 <4,0,6,2>, LHS
- 2250518817U, // <0,6,2,1>: Cost 3 vrev <6,0,1,2>
- 2574485096U, // <0,6,2,2>: Cost 3 vext1 <4,0,6,2>, <2,2,2,2>
- 2631894694U, // <0,6,2,3>: Cost 3 vext2 <2,4,0,6>, <2,3,0,1>
- 1500744604U, // <0,6,2,4>: Cost 2 vext1 <4,0,6,2>, <4,0,6,2>
- 2574487248U, // <0,6,2,5>: Cost 3 vext1 <4,0,6,2>, <5,1,7,3>
- 3020739384U, // <0,6,2,6>: Cost 3 vtrnl LHS, <6,6,6,6>
- 2954136886U, // <0,6,2,7>: Cost 3 vzipr <0,2,0,2>, RHS
- 1500747566U, // <0,6,2,u>: Cost 2 vext1 <4,0,6,2>, LHS
- 3693693078U, // <0,6,3,0>: Cost 4 vext2 <0,4,0,6>, <3,0,1,2>
- 3705637136U, // <0,6,3,1>: Cost 4 vext2 <2,4,0,6>, <3,1,5,7>
- 3705637192U, // <0,6,3,2>: Cost 4 vext2 <2,4,0,6>, <3,2,3,0>
- 3693693340U, // <0,6,3,3>: Cost 4 vext2 <0,4,0,6>, <3,3,3,3>
- 2637867477U, // <0,6,3,4>: Cost 3 vext2 <3,4,0,6>, <3,4,0,6>
- 3705637424U, // <0,6,3,5>: Cost 4 vext2 <2,4,0,6>, <3,5,1,7>
- 3666154056U, // <0,6,3,6>: Cost 4 vext1 <7,0,6,3>, <6,3,7,0>
- 2722697800U, // <0,6,3,7>: Cost 3 vext3 <6,3,7,0>, <6,3,7,0>
- 2722771537U, // <0,6,3,u>: Cost 3 vext3 <6,3,u,0>, <6,3,u,0>
- 2562556006U, // <0,6,4,0>: Cost 3 vext1 <2,0,6,4>, LHS
- 4095316257U, // <0,6,4,1>: Cost 4 vtrnl <0,2,4,6>, <6,0,1,2>
- 2562557420U, // <0,6,4,2>: Cost 3 vext1 <2,0,6,4>, <2,0,6,4>
- 3636299926U, // <0,6,4,3>: Cost 4 vext1 <2,0,6,4>, <3,0,1,2>
- 2562559286U, // <0,6,4,4>: Cost 3 vext1 <2,0,6,4>, RHS
- 2619952438U, // <0,6,4,5>: Cost 3 vext2 <0,4,0,6>, RHS
- 2723287696U, // <0,6,4,6>: Cost 3 vext3 <6,4,6,0>, <6,4,6,0>
- 4027895094U, // <0,6,4,7>: Cost 4 vzipr <0,2,0,4>, RHS
- 2619952681U, // <0,6,4,u>: Cost 3 vext2 <0,4,0,6>, RHS
- 2718716594U, // <0,6,5,0>: Cost 3 vext3 <5,6,7,0>, <6,5,0,7>
- 3648250774U, // <0,6,5,1>: Cost 4 vext1 <4,0,6,5>, <1,2,3,0>
- 3792458436U, // <0,6,5,2>: Cost 4 vext3 <5,6,7,0>, <6,5,2,7>
- 3705638767U, // <0,6,5,3>: Cost 5 vext2 <2,4,0,6>, <5,3,7,0>
- 3648252831U, // <0,6,5,4>: Cost 4 vext1 <4,0,6,5>, <4,0,6,5>
- 3797619416U, // <0,6,5,5>: Cost 4 vext3 <6,5,5,0>, <6,5,5,0>
- 3792458472U, // <0,6,5,6>: Cost 4 vext3 <5,6,7,0>, <6,5,6,7>
- 4035202358U, // <0,6,5,7>: Cost 4 vzipr <1,4,0,5>, RHS
- 2718716594U, // <0,6,5,u>: Cost 3 vext3 <5,6,7,0>, <6,5,0,7>
- 3786412796U, // <0,6,6,0>: Cost 4 vext3 <4,6,6,0>, <6,6,0,0>
- 3792458504U, // <0,6,6,1>: Cost 4 vext3 <5,6,7,0>, <6,6,1,3>
- 3728200126U, // <0,6,6,2>: Cost 4 vext2 <6,2,0,6>, <6,2,0,6>
- 3798135575U, // <0,6,6,3>: Cost 4 vext3 <6,6,3,0>, <6,6,3,0>
- 3786412836U, // <0,6,6,4>: Cost 4 vext3 <4,6,6,0>, <6,6,4,4>
- 3792458543U, // <0,6,6,5>: Cost 4 vext3 <5,6,7,0>, <6,6,5,6>
- 2718716728U, // <0,6,6,6>: Cost 3 vext3 <5,6,7,0>, <6,6,6,6>
- 2718716738U, // <0,6,6,7>: Cost 3 vext3 <5,6,7,0>, <6,6,7,7>
- 2718716747U, // <0,6,6,u>: Cost 3 vext3 <5,6,7,0>, <6,6,u,7>
- 2718716750U, // <0,6,7,0>: Cost 3 vext3 <5,6,7,0>, <6,7,0,1>
- 2724909910U, // <0,6,7,1>: Cost 3 vext3 <6,7,1,0>, <6,7,1,0>
- 3636323823U, // <0,6,7,2>: Cost 4 vext1 <2,0,6,7>, <2,0,6,7>
- 2725057384U, // <0,6,7,3>: Cost 3 vext3 <6,7,3,0>, <6,7,3,0>
- 2718716790U, // <0,6,7,4>: Cost 3 vext3 <5,6,7,0>, <6,7,4,5>
- 2718716800U, // <0,6,7,5>: Cost 3 vext3 <5,6,7,0>, <6,7,5,6>
- 3792458629U, // <0,6,7,6>: Cost 4 vext3 <5,6,7,0>, <6,7,6,2>
- 2725352332U, // <0,6,7,7>: Cost 3 vext3 <6,7,7,0>, <6,7,7,0>
- 2718716822U, // <0,6,7,u>: Cost 3 vext3 <5,6,7,0>, <6,7,u,1>
- 1500790886U, // <0,6,u,0>: Cost 2 vext1 <4,0,6,u>, LHS
- 2619954990U, // <0,6,u,1>: Cost 3 vext2 <0,4,0,6>, LHS
- 2562590192U, // <0,6,u,2>: Cost 3 vext1 <2,0,6,u>, <2,0,6,u>
- 2725721017U, // <0,6,u,3>: Cost 3 vext3 <6,u,3,0>, <6,u,3,0>
- 1500793762U, // <0,6,u,4>: Cost 2 vext1 <4,0,6,u>, <4,0,6,u>
- 2619955354U, // <0,6,u,5>: Cost 3 vext2 <0,4,0,6>, RHS
- 2725942228U, // <0,6,u,6>: Cost 3 vext3 <6,u,6,0>, <6,u,6,0>
- 2954186038U, // <0,6,u,7>: Cost 3 vzipr <0,2,0,u>, RHS
- 1500796718U, // <0,6,u,u>: Cost 2 vext1 <4,0,6,u>, LHS
- 2256401391U, // <0,7,0,0>: Cost 3 vrev <7,0,0,0>
- 2632564838U, // <0,7,0,1>: Cost 3 vext2 <2,5,0,7>, LHS
- 2256548865U, // <0,7,0,2>: Cost 3 vrev <7,0,2,0>
- 3700998396U, // <0,7,0,3>: Cost 4 vext2 <1,6,0,7>, <0,3,1,0>
- 2718716952U, // <0,7,0,4>: Cost 3 vext3 <5,6,7,0>, <7,0,4,5>
- 2718716962U, // <0,7,0,5>: Cost 3 vext3 <5,6,7,0>, <7,0,5,6>
- 2621284845U, // <0,7,0,6>: Cost 3 vext2 <0,6,0,7>, <0,6,0,7>
- 3904685542U, // <0,7,0,7>: Cost 4 vuzpr <2,0,5,7>, <2,0,5,7>
- 2632565405U, // <0,7,0,u>: Cost 3 vext2 <2,5,0,7>, LHS
- 2256409584U, // <0,7,1,0>: Cost 3 vrev <7,0,0,1>
- 3706307380U, // <0,7,1,1>: Cost 4 vext2 <2,5,0,7>, <1,1,1,1>
- 2632565654U, // <0,7,1,2>: Cost 3 vext2 <2,5,0,7>, <1,2,3,0>
- 3769603168U, // <0,7,1,3>: Cost 4 vext3 <1,u,3,0>, <7,1,3,5>
- 2256704532U, // <0,7,1,4>: Cost 3 vrev <7,0,4,1>
- 3769603184U, // <0,7,1,5>: Cost 4 vext3 <1,u,3,0>, <7,1,5,3>
- 3700999366U, // <0,7,1,6>: Cost 4 vext2 <1,6,0,7>, <1,6,0,7>
- 2886522476U, // <0,7,1,7>: Cost 3 vzipl LHS, <7,7,7,7>
- 2256999480U, // <0,7,1,u>: Cost 3 vrev <7,0,u,1>
- 2586501222U, // <0,7,2,0>: Cost 3 vext1 <6,0,7,2>, LHS
- 1182749690U, // <0,7,2,1>: Cost 2 vrev <7,0,1,2>
- 3636356595U, // <0,7,2,2>: Cost 4 vext1 <2,0,7,2>, <2,0,7,2>
- 2727711916U, // <0,7,2,3>: Cost 3 vext3 <7,2,3,0>, <7,2,3,0>
- 2586504502U, // <0,7,2,4>: Cost 3 vext1 <6,0,7,2>, RHS
- 2632566606U, // <0,7,2,5>: Cost 3 vext2 <2,5,0,7>, <2,5,0,7>
- 2586505559U, // <0,7,2,6>: Cost 3 vext1 <6,0,7,2>, <6,0,7,2>
- 3020740204U, // <0,7,2,7>: Cost 3 vtrnl LHS, <7,7,7,7>
- 1183265849U, // <0,7,2,u>: Cost 2 vrev <7,0,u,2>
- 3701000342U, // <0,7,3,0>: Cost 4 vext2 <1,6,0,7>, <3,0,1,2>
- 3706308849U, // <0,7,3,1>: Cost 4 vext2 <2,5,0,7>, <3,1,2,3>
- 3330315268U, // <0,7,3,2>: Cost 4 vrev <7,0,2,3>
- 3706309020U, // <0,7,3,3>: Cost 4 vext2 <2,5,0,7>, <3,3,3,3>
- 3706309122U, // <0,7,3,4>: Cost 4 vext2 <2,5,0,7>, <3,4,5,6>
- 3712281127U, // <0,7,3,5>: Cost 4 vext2 <3,5,0,7>, <3,5,0,7>
- 2639202936U, // <0,7,3,6>: Cost 3 vext2 <3,6,0,7>, <3,6,0,7>
- 3802412321U, // <0,7,3,7>: Cost 4 vext3 <7,3,7,0>, <7,3,7,0>
- 2640530202U, // <0,7,3,u>: Cost 3 vext2 <3,u,0,7>, <3,u,0,7>
- 3654287462U, // <0,7,4,0>: Cost 4 vext1 <5,0,7,4>, LHS
- 2256507900U, // <0,7,4,1>: Cost 3 vrev <7,0,1,4>
- 2256581637U, // <0,7,4,2>: Cost 3 vrev <7,0,2,4>
- 3660262008U, // <0,7,4,3>: Cost 4 vext1 <6,0,7,4>, <3,6,0,7>
- 3786413405U, // <0,7,4,4>: Cost 4 vext3 <4,6,6,0>, <7,4,4,6>
- 2632568118U, // <0,7,4,5>: Cost 3 vext2 <2,5,0,7>, RHS
- 3718917457U, // <0,7,4,6>: Cost 4 vext2 <4,6,0,7>, <4,6,0,7>
- 3787003255U, // <0,7,4,7>: Cost 4 vext3 <4,7,5,0>, <7,4,7,5>
- 2632568361U, // <0,7,4,u>: Cost 3 vext2 <2,5,0,7>, RHS
- 3706310268U, // <0,7,5,0>: Cost 4 vext2 <2,5,0,7>, <5,0,7,0>
- 3792459156U, // <0,7,5,1>: Cost 4 vext3 <5,6,7,0>, <7,5,1,7>
- 3330331654U, // <0,7,5,2>: Cost 4 vrev <7,0,2,5>
- 3722899255U, // <0,7,5,3>: Cost 4 vext2 <5,3,0,7>, <5,3,0,7>
- 2256737304U, // <0,7,5,4>: Cost 3 vrev <7,0,4,5>
- 3724226521U, // <0,7,5,5>: Cost 4 vext2 <5,5,0,7>, <5,5,0,7>
- 2718717377U, // <0,7,5,6>: Cost 3 vext3 <5,6,7,0>, <7,5,6,7>
- 2729997763U, // <0,7,5,7>: Cost 3 vext3 <7,5,7,0>, <7,5,7,0>
- 2720044499U, // <0,7,5,u>: Cost 3 vext3 <5,u,7,0>, <7,5,u,7>
- 3712946517U, // <0,7,6,0>: Cost 4 vext2 <3,6,0,7>, <6,0,7,0>
- 2256524286U, // <0,7,6,1>: Cost 3 vrev <7,0,1,6>
- 3792459246U, // <0,7,6,2>: Cost 4 vext3 <5,6,7,0>, <7,6,2,7>
- 3796440567U, // <0,7,6,3>: Cost 4 vext3 <6,3,7,0>, <7,6,3,7>
- 3654307126U, // <0,7,6,4>: Cost 4 vext1 <5,0,7,6>, RHS
- 2656457394U, // <0,7,6,5>: Cost 3 vext2 <6,5,0,7>, <6,5,0,7>
- 3792459281U, // <0,7,6,6>: Cost 4 vext3 <5,6,7,0>, <7,6,6,6>
- 2730661396U, // <0,7,6,7>: Cost 3 vext3 <7,6,7,0>, <7,6,7,0>
- 2658448293U, // <0,7,6,u>: Cost 3 vext2 <6,u,0,7>, <6,u,0,7>
- 3787003431U, // <0,7,7,0>: Cost 4 vext3 <4,7,5,0>, <7,7,0,1>
- 3654312854U, // <0,7,7,1>: Cost 4 vext1 <5,0,7,7>, <1,2,3,0>
- 3654313446U, // <0,7,7,2>: Cost 4 vext1 <5,0,7,7>, <2,0,5,7>
- 3804771905U, // <0,7,7,3>: Cost 4 vext3 <7,7,3,0>, <7,7,3,0>
- 3654315318U, // <0,7,7,4>: Cost 4 vext1 <5,0,7,7>, RHS
- 3654315651U, // <0,7,7,5>: Cost 4 vext1 <5,0,7,7>, <5,0,7,7>
- 3660288348U, // <0,7,7,6>: Cost 4 vext1 <6,0,7,7>, <6,0,7,7>
- 2718717548U, // <0,7,7,7>: Cost 3 vext3 <5,6,7,0>, <7,7,7,7>
- 2664420990U, // <0,7,7,u>: Cost 3 vext2 <7,u,0,7>, <7,u,0,7>
- 2256466935U, // <0,7,u,0>: Cost 3 vrev <7,0,0,u>
- 1182798848U, // <0,7,u,1>: Cost 2 vrev <7,0,1,u>
- 2256614409U, // <0,7,u,2>: Cost 3 vrev <7,0,2,u>
- 2731693714U, // <0,7,u,3>: Cost 3 vext3 <7,u,3,0>, <7,u,3,0>
- 2256761883U, // <0,7,u,4>: Cost 3 vrev <7,0,4,u>
- 2632571034U, // <0,7,u,5>: Cost 3 vext2 <2,5,0,7>, RHS
- 2669066421U, // <0,7,u,6>: Cost 3 vext2 <u,6,0,7>, <u,6,0,7>
- 2731988662U, // <0,7,u,7>: Cost 3 vext3 <7,u,7,0>, <7,u,7,0>
- 1183315007U, // <0,7,u,u>: Cost 2 vrev <7,0,u,u>
- 135053414U, // <0,u,0,0>: Cost 1 vdup0 LHS
- 1544896614U, // <0,u,0,1>: Cost 2 vext2 <0,2,0,u>, LHS
- 1678999654U, // <0,u,0,2>: Cost 2 vuzpl LHS, LHS
- 2691880677U, // <0,u,0,3>: Cost 3 vext3 <1,2,3,0>, <u,0,3,2>
- 1476988214U, // <0,u,0,4>: Cost 2 vext1 <0,0,u,0>, RHS
- 2718791419U, // <0,u,0,5>: Cost 3 vext3 <5,6,u,0>, <u,0,5,6>
- 3021248666U, // <0,u,0,6>: Cost 3 vtrnl <0,2,0,2>, RHS
- 2592535607U, // <0,u,0,7>: Cost 3 vext1 <7,0,u,0>, <7,0,u,0>
- 135053414U, // <0,u,0,u>: Cost 1 vdup0 LHS
- 1476993097U, // <0,u,1,0>: Cost 2 vext1 <0,0,u,1>, <0,0,u,1>
- 1812780846U, // <0,u,1,1>: Cost 2 vzipl LHS, LHS
- 1618138926U, // <0,u,1,2>: Cost 2 vext3 <1,2,3,0>, LHS
- 2752742134U, // <0,u,1,3>: Cost 3 vuzpl LHS, <1,0,3,2>
- 1476996406U, // <0,u,1,4>: Cost 2 vext1 <0,0,u,1>, RHS
- 1812781210U, // <0,u,1,5>: Cost 2 vzipl LHS, RHS
- 2887006416U, // <0,u,1,6>: Cost 3 vzipl LHS, <u,6,3,7>
- 2966736200U, // <0,u,1,7>: Cost 3 vzipr <2,3,0,1>, RHS
- 1812781413U, // <0,u,1,u>: Cost 2 vzipl LHS, LHS
- 1482973286U, // <0,u,2,0>: Cost 2 vext1 <1,0,u,2>, LHS
- 1482973987U, // <0,u,2,1>: Cost 2 vext1 <1,0,u,2>, <1,0,u,2>
- 1946998574U, // <0,u,2,2>: Cost 2 vtrnl LHS, LHS
- 835584U, // <0,u,2,3>: Cost 0 copy LHS
- 1482976566U, // <0,u,2,4>: Cost 2 vext1 <1,0,u,2>, RHS
- 3020781631U, // <0,u,2,5>: Cost 3 vtrnl LHS, <u,4,5,6>
- 1946998938U, // <0,u,2,6>: Cost 2 vtrnl LHS, RHS
- 1518810169U, // <0,u,2,7>: Cost 2 vext1 <7,0,u,2>, <7,0,u,2>
- 835584U, // <0,u,2,u>: Cost 0 copy LHS
- 2618640534U, // <0,u,3,0>: Cost 3 vext2 <0,2,0,u>, <3,0,1,2>
- 2752743574U, // <0,u,3,1>: Cost 3 vuzpl LHS, <3,0,1,2>
- 2636556597U, // <0,u,3,2>: Cost 3 vext2 <3,2,0,u>, <3,2,0,u>
- 2752743836U, // <0,u,3,3>: Cost 3 vuzpl LHS, <3,3,3,3>
- 2618640898U, // <0,u,3,4>: Cost 3 vext2 <0,2,0,u>, <3,4,5,6>
- 2752743938U, // <0,u,3,5>: Cost 3 vuzpl LHS, <3,4,5,6>
- 2639202936U, // <0,u,3,6>: Cost 3 vext2 <3,6,0,7>, <3,6,0,7>
- 2639874762U, // <0,u,3,7>: Cost 3 vext2 <3,7,0,u>, <3,7,0,u>
- 2752743637U, // <0,u,3,u>: Cost 3 vuzpl LHS, <3,0,u,2>
- 2562703462U, // <0,u,4,0>: Cost 3 vext1 <2,0,u,4>, LHS
- 2888455982U, // <0,u,4,1>: Cost 3 vzipl <0,4,1,5>, LHS
- 3021575982U, // <0,u,4,2>: Cost 3 vtrnl <0,2,4,6>, LHS
- 2568677591U, // <0,u,4,3>: Cost 3 vext1 <3,0,u,4>, <3,0,u,4>
- 2562706742U, // <0,u,4,4>: Cost 3 vext1 <2,0,u,4>, RHS
- 1544899894U, // <0,u,4,5>: Cost 2 vext2 <0,2,0,u>, RHS
- 1679002934U, // <0,u,4,6>: Cost 2 vuzpl LHS, RHS
- 2718718033U, // <0,u,4,7>: Cost 3 vext3 <5,6,7,0>, <u,4,7,6>
- 1679002952U, // <0,u,4,u>: Cost 2 vuzpl LHS, RHS
- 2568683622U, // <0,u,5,0>: Cost 3 vext1 <3,0,u,5>, LHS
- 2568684438U, // <0,u,5,1>: Cost 3 vext1 <3,0,u,5>, <1,2,3,0>
- 3765622902U, // <0,u,5,2>: Cost 4 vext3 <1,2,3,0>, <u,5,2,7>
- 2691881087U, // <0,u,5,3>: Cost 3 vext3 <1,2,3,0>, <u,5,3,7>
- 2568686902U, // <0,u,5,4>: Cost 3 vext1 <3,0,u,5>, RHS
- 2650492890U, // <0,u,5,5>: Cost 3 vext2 <5,5,0,u>, <5,5,0,u>
- 1618139290U, // <0,u,5,6>: Cost 2 vext3 <1,2,3,0>, RHS
- 2824834358U, // <0,u,5,7>: Cost 3 vuzpr <1,0,3,u>, RHS
- 1618139308U, // <0,u,5,u>: Cost 2 vext3 <1,2,3,0>, RHS
- 2592579686U, // <0,u,6,0>: Cost 3 vext1 <7,0,u,6>, LHS
- 2262496983U, // <0,u,6,1>: Cost 3 vrev <u,0,1,6>
- 2654474688U, // <0,u,6,2>: Cost 3 vext2 <6,2,0,u>, <6,2,0,u>
- 2691881168U, // <0,u,6,3>: Cost 3 vext3 <1,2,3,0>, <u,6,3,7>
- 2592582966U, // <0,u,6,4>: Cost 3 vext1 <7,0,u,6>, RHS
- 2656465587U, // <0,u,6,5>: Cost 3 vext2 <6,5,0,u>, <6,5,0,u>
- 2657129220U, // <0,u,6,6>: Cost 3 vext2 <6,6,0,u>, <6,6,0,u>
- 1584051029U, // <0,u,6,7>: Cost 2 vext2 <6,7,0,u>, <6,7,0,u>
- 1584714662U, // <0,u,6,u>: Cost 2 vext2 <6,u,0,u>, <6,u,0,u>
- 2562728038U, // <0,u,7,0>: Cost 3 vext1 <2,0,u,7>, LHS
- 2562728854U, // <0,u,7,1>: Cost 3 vext1 <2,0,u,7>, <1,2,3,0>
- 2562729473U, // <0,u,7,2>: Cost 3 vext1 <2,0,u,7>, <2,0,u,7>
- 2661111018U, // <0,u,7,3>: Cost 3 vext2 <7,3,0,u>, <7,3,0,u>
- 2562731318U, // <0,u,7,4>: Cost 3 vext1 <2,0,u,7>, RHS
- 2718718258U, // <0,u,7,5>: Cost 3 vext3 <5,6,7,0>, <u,7,5,6>
- 2586620261U, // <0,u,7,6>: Cost 3 vext1 <6,0,u,7>, <6,0,u,7>
- 2657793644U, // <0,u,7,7>: Cost 3 vext2 <6,7,0,u>, <7,7,7,7>
- 2562733870U, // <0,u,7,u>: Cost 3 vext1 <2,0,u,7>, LHS
- 135053414U, // <0,u,u,0>: Cost 1 vdup0 LHS
- 1544902446U, // <0,u,u,1>: Cost 2 vext2 <0,2,0,u>, LHS
- 1679005486U, // <0,u,u,2>: Cost 2 vuzpl LHS, LHS
- 835584U, // <0,u,u,3>: Cost 0 copy LHS
- 1483025718U, // <0,u,u,4>: Cost 2 vext1 <1,0,u,u>, RHS
- 1544902810U, // <0,u,u,5>: Cost 2 vext2 <0,2,0,u>, RHS
- 1679005850U, // <0,u,u,6>: Cost 2 vuzpl LHS, RHS
- 1518859327U, // <0,u,u,7>: Cost 2 vext1 <7,0,u,u>, <7,0,u,u>
- 835584U, // <0,u,u,u>: Cost 0 copy LHS
- 2689744896U, // <1,0,0,0>: Cost 3 vext3 <0,u,1,1>, <0,0,0,0>
- 1610694666U, // <1,0,0,1>: Cost 2 vext3 <0,0,1,1>, <0,0,1,1>
- 2689744916U, // <1,0,0,2>: Cost 3 vext3 <0,u,1,1>, <0,0,2,2>
- 2619310332U, // <1,0,0,3>: Cost 3 vext2 <0,3,1,0>, <0,3,1,0>
- 2684657701U, // <1,0,0,4>: Cost 3 vext3 <0,0,4,1>, <0,0,4,1>
- 2620637598U, // <1,0,0,5>: Cost 3 vext2 <0,5,1,0>, <0,5,1,0>
- 3708977654U, // <1,0,0,6>: Cost 4 vext2 <3,0,1,0>, <0,6,1,7>
- 3666351168U, // <1,0,0,7>: Cost 4 vext1 <7,1,0,0>, <7,1,0,0>
- 1611210825U, // <1,0,0,u>: Cost 2 vext3 <0,0,u,1>, <0,0,u,1>
- 2556780646U, // <1,0,1,0>: Cost 3 vext1 <1,1,0,1>, LHS
- 2556781355U, // <1,0,1,1>: Cost 3 vext1 <1,1,0,1>, <1,1,0,1>
- 1616003174U, // <1,0,1,2>: Cost 2 vext3 <0,u,1,1>, LHS
- 3693052888U, // <1,0,1,3>: Cost 4 vext2 <0,3,1,0>, <1,3,1,3>
- 2556783926U, // <1,0,1,4>: Cost 3 vext1 <1,1,0,1>, RHS
- 2580672143U, // <1,0,1,5>: Cost 3 vext1 <5,1,0,1>, <5,1,0,1>
- 2724839566U, // <1,0,1,6>: Cost 3 vext3 <6,7,0,1>, <0,1,6,7>
- 3654415354U, // <1,0,1,7>: Cost 4 vext1 <5,1,0,1>, <7,0,1,2>
- 1616003228U, // <1,0,1,u>: Cost 2 vext3 <0,u,1,1>, LHS
- 2685690019U, // <1,0,2,0>: Cost 3 vext3 <0,2,0,1>, <0,2,0,1>
- 2685763756U, // <1,0,2,1>: Cost 3 vext3 <0,2,1,1>, <0,2,1,1>
- 2698297524U, // <1,0,2,2>: Cost 3 vext3 <2,3,0,1>, <0,2,2,0>
- 2685911230U, // <1,0,2,3>: Cost 3 vext3 <0,2,3,1>, <0,2,3,1>
- 2689745100U, // <1,0,2,4>: Cost 3 vext3 <0,u,1,1>, <0,2,4,6>
- 3764814038U, // <1,0,2,5>: Cost 4 vext3 <1,1,1,1>, <0,2,5,7>
- 2724839640U, // <1,0,2,6>: Cost 3 vext3 <6,7,0,1>, <0,2,6,0>
- 2592625658U, // <1,0,2,7>: Cost 3 vext1 <7,1,0,2>, <7,0,1,2>
- 2686279915U, // <1,0,2,u>: Cost 3 vext3 <0,2,u,1>, <0,2,u,1>
- 3087843328U, // <1,0,3,0>: Cost 3 vtrnr LHS, <0,0,0,0>
- 3087843338U, // <1,0,3,1>: Cost 3 vtrnr LHS, <0,0,1,1>
- 67944550U, // <1,0,3,2>: Cost 1 vrev LHS
- 2568743135U, // <1,0,3,3>: Cost 3 vext1 <3,1,0,3>, <3,1,0,3>
- 2562772278U, // <1,0,3,4>: Cost 3 vext1 <2,1,0,3>, RHS
- 4099850454U, // <1,0,3,5>: Cost 4 vtrnl <1,0,3,2>, <0,2,5,7>
- 3704998538U, // <1,0,3,6>: Cost 4 vext2 <2,3,1,0>, <3,6,2,7>
- 2592633923U, // <1,0,3,7>: Cost 3 vext1 <7,1,0,3>, <7,1,0,3>
- 68386972U, // <1,0,3,u>: Cost 1 vrev LHS
- 2620640146U, // <1,0,4,0>: Cost 3 vext2 <0,5,1,0>, <4,0,5,1>
- 2689745234U, // <1,0,4,1>: Cost 3 vext3 <0,u,1,1>, <0,4,1,5>
- 2689745244U, // <1,0,4,2>: Cost 3 vext3 <0,u,1,1>, <0,4,2,6>
- 3760980320U, // <1,0,4,3>: Cost 4 vext3 <0,4,3,1>, <0,4,3,1>
- 3761054057U, // <1,0,4,4>: Cost 4 vext3 <0,4,4,1>, <0,4,4,1>
- 2619313462U, // <1,0,4,5>: Cost 3 vext2 <0,3,1,0>, RHS
- 3761201531U, // <1,0,4,6>: Cost 4 vext3 <0,4,6,1>, <0,4,6,1>
- 3666383940U, // <1,0,4,7>: Cost 4 vext1 <7,1,0,4>, <7,1,0,4>
- 2619313705U, // <1,0,4,u>: Cost 3 vext2 <0,3,1,0>, RHS
- 4029300736U, // <1,0,5,0>: Cost 4 vzipr <0,4,1,5>, <0,0,0,0>
- 2895249510U, // <1,0,5,1>: Cost 3 vzipl <1,5,3,7>, LHS
- 3028287590U, // <1,0,5,2>: Cost 3 vtrnl <1,3,5,7>, LHS
- 3642501345U, // <1,0,5,3>: Cost 4 vext1 <3,1,0,5>, <3,1,0,5>
- 2215592058U, // <1,0,5,4>: Cost 3 vrev <0,1,4,5>
- 3724242907U, // <1,0,5,5>: Cost 4 vext2 <5,5,1,0>, <5,5,1,0>
- 3724906540U, // <1,0,5,6>: Cost 4 vext2 <5,6,1,0>, <5,6,1,0>
- 3911118134U, // <1,0,5,7>: Cost 4 vuzpr <3,1,3,0>, RHS
- 3028287644U, // <1,0,5,u>: Cost 3 vtrnl <1,3,5,7>, LHS
- 3762086375U, // <1,0,6,0>: Cost 4 vext3 <0,6,0,1>, <0,6,0,1>
- 2698297846U, // <1,0,6,1>: Cost 3 vext3 <2,3,0,1>, <0,6,1,7>
- 3760022015U, // <1,0,6,2>: Cost 4 vext3 <0,2,u,1>, <0,6,2,7>
- 3642509538U, // <1,0,6,3>: Cost 4 vext1 <3,1,0,6>, <3,1,0,6>
- 3762381323U, // <1,0,6,4>: Cost 4 vext3 <0,6,4,1>, <0,6,4,1>
- 3730215604U, // <1,0,6,5>: Cost 4 vext2 <6,5,1,0>, <6,5,1,0>
- 3730879237U, // <1,0,6,6>: Cost 4 vext2 <6,6,1,0>, <6,6,1,0>
- 2657801046U, // <1,0,6,7>: Cost 3 vext2 <6,7,1,0>, <6,7,1,0>
- 2658464679U, // <1,0,6,u>: Cost 3 vext2 <6,u,1,0>, <6,u,1,0>
- 2659128312U, // <1,0,7,0>: Cost 3 vext2 <7,0,1,0>, <7,0,1,0>
- 4047898278U, // <1,0,7,1>: Cost 4 vzipr <3,5,1,7>, <2,3,0,1>
- 2215460970U, // <1,0,7,2>: Cost 3 vrev <0,1,2,7>
- 3734861035U, // <1,0,7,3>: Cost 4 vext2 <7,3,1,0>, <7,3,1,0>
- 3731543398U, // <1,0,7,4>: Cost 4 vext2 <6,7,1,0>, <7,4,5,6>
- 3736188301U, // <1,0,7,5>: Cost 4 vext2 <7,5,1,0>, <7,5,1,0>
- 2663110110U, // <1,0,7,6>: Cost 3 vext2 <7,6,1,0>, <7,6,1,0>
- 3731543660U, // <1,0,7,7>: Cost 4 vext2 <6,7,1,0>, <7,7,7,7>
- 2664437376U, // <1,0,7,u>: Cost 3 vext2 <7,u,1,0>, <7,u,1,0>
- 3087884288U, // <1,0,u,0>: Cost 3 vtrnr LHS, <0,0,0,0>
- 1616003730U, // <1,0,u,1>: Cost 2 vext3 <0,u,1,1>, <0,u,1,1>
- 67985515U, // <1,0,u,2>: Cost 1 vrev LHS
- 2689893028U, // <1,0,u,3>: Cost 3 vext3 <0,u,3,1>, <0,u,3,1>
- 2689745586U, // <1,0,u,4>: Cost 3 vext3 <0,u,1,1>, <0,u,4,6>
- 2619316378U, // <1,0,u,5>: Cost 3 vext2 <0,3,1,0>, RHS
- 2669082807U, // <1,0,u,6>: Cost 3 vext2 <u,6,1,0>, <u,6,1,0>
- 2592674888U, // <1,0,u,7>: Cost 3 vext1 <7,1,0,u>, <7,1,0,u>
- 68427937U, // <1,0,u,u>: Cost 1 vrev LHS
- 1543585802U, // <1,1,0,0>: Cost 2 vext2 <0,0,1,1>, <0,0,1,1>
- 1548894310U, // <1,1,0,1>: Cost 2 vext2 <0,u,1,1>, LHS
- 2618654892U, // <1,1,0,2>: Cost 3 vext2 <0,2,1,1>, <0,2,1,1>
- 2689745654U, // <1,1,0,3>: Cost 3 vext3 <0,u,1,1>, <1,0,3,2>
- 2622636370U, // <1,1,0,4>: Cost 3 vext2 <0,u,1,1>, <0,4,1,5>
- 2620645791U, // <1,1,0,5>: Cost 3 vext2 <0,5,1,1>, <0,5,1,1>
- 3696378367U, // <1,1,0,6>: Cost 4 vext2 <0,u,1,1>, <0,6,2,7>
- 3666424905U, // <1,1,0,7>: Cost 4 vext1 <7,1,1,0>, <7,1,1,0>
- 1548894866U, // <1,1,0,u>: Cost 2 vext2 <0,u,1,1>, <0,u,1,1>
- 1483112550U, // <1,1,1,0>: Cost 2 vext1 <1,1,1,1>, LHS
- 202162278U, // <1,1,1,1>: Cost 1 vdup1 LHS
- 2622636950U, // <1,1,1,2>: Cost 3 vext2 <0,u,1,1>, <1,2,3,0>
- 2622637016U, // <1,1,1,3>: Cost 3 vext2 <0,u,1,1>, <1,3,1,3>
- 1483115830U, // <1,1,1,4>: Cost 2 vext1 <1,1,1,1>, RHS
- 2622637200U, // <1,1,1,5>: Cost 3 vext2 <0,u,1,1>, <1,5,3,7>
- 2622637263U, // <1,1,1,6>: Cost 3 vext2 <0,u,1,1>, <1,6,1,7>
- 2592691274U, // <1,1,1,7>: Cost 3 vext1 <7,1,1,1>, <7,1,1,1>
- 202162278U, // <1,1,1,u>: Cost 1 vdup1 LHS
- 2550890588U, // <1,1,2,0>: Cost 3 vext1 <0,1,1,2>, <0,1,1,2>
- 2617329183U, // <1,1,2,1>: Cost 3 vext2 <0,0,1,1>, <2,1,3,1>
- 2622637672U, // <1,1,2,2>: Cost 3 vext2 <0,u,1,1>, <2,2,2,2>
- 2622637734U, // <1,1,2,3>: Cost 3 vext2 <0,u,1,1>, <2,3,0,1>
- 2550893878U, // <1,1,2,4>: Cost 3 vext1 <0,1,1,2>, RHS
- 3696379744U, // <1,1,2,5>: Cost 4 vext2 <0,u,1,1>, <2,5,2,7>
- 2622638010U, // <1,1,2,6>: Cost 3 vext2 <0,u,1,1>, <2,6,3,7>
- 3804554170U, // <1,1,2,7>: Cost 4 vext3 <7,7,0,1>, <1,2,7,0>
- 2622638139U, // <1,1,2,u>: Cost 3 vext2 <0,u,1,1>, <2,u,0,1>
- 2622638230U, // <1,1,3,0>: Cost 3 vext2 <0,u,1,1>, <3,0,1,2>
- 3087844148U, // <1,1,3,1>: Cost 3 vtrnr LHS, <1,1,1,1>
- 4161585244U, // <1,1,3,2>: Cost 4 vtrnr LHS, <0,1,1,2>
- 2014101606U, // <1,1,3,3>: Cost 2 vtrnr LHS, LHS
- 2622638594U, // <1,1,3,4>: Cost 3 vext2 <0,u,1,1>, <3,4,5,6>
- 2689745920U, // <1,1,3,5>: Cost 3 vext3 <0,u,1,1>, <1,3,5,7>
- 3763487753U, // <1,1,3,6>: Cost 4 vext3 <0,u,1,1>, <1,3,6,7>
- 2592707660U, // <1,1,3,7>: Cost 3 vext1 <7,1,1,3>, <7,1,1,3>
- 2014101611U, // <1,1,3,u>: Cost 2 vtrnr LHS, LHS
- 2556878950U, // <1,1,4,0>: Cost 3 vext1 <1,1,1,4>, LHS
- 2221335351U, // <1,1,4,1>: Cost 3 vrev <1,1,1,4>
- 3696380988U, // <1,1,4,2>: Cost 4 vext2 <0,u,1,1>, <4,2,6,0>
- 3763487805U, // <1,1,4,3>: Cost 4 vext3 <0,u,1,1>, <1,4,3,5>
- 2556882230U, // <1,1,4,4>: Cost 3 vext1 <1,1,1,4>, RHS
- 1548897590U, // <1,1,4,5>: Cost 2 vext2 <0,u,1,1>, RHS
- 2758184246U, // <1,1,4,6>: Cost 3 vuzpl <1,1,1,1>, RHS
- 3666457677U, // <1,1,4,7>: Cost 4 vext1 <7,1,1,4>, <7,1,1,4>
- 1548897833U, // <1,1,4,u>: Cost 2 vext2 <0,u,1,1>, RHS
- 2693653615U, // <1,1,5,0>: Cost 3 vext3 <1,5,0,1>, <1,5,0,1>
- 2617331408U, // <1,1,5,1>: Cost 3 vext2 <0,0,1,1>, <5,1,7,3>
- 4029302934U, // <1,1,5,2>: Cost 4 vzipr <0,4,1,5>, <3,0,1,2>
- 2689746064U, // <1,1,5,3>: Cost 3 vext3 <0,u,1,1>, <1,5,3,7>
- 2221564755U, // <1,1,5,4>: Cost 3 vrev <1,1,4,5>
- 2955559250U, // <1,1,5,5>: Cost 3 vzipr <0,4,1,5>, <0,4,1,5>
- 2617331810U, // <1,1,5,6>: Cost 3 vext2 <0,0,1,1>, <5,6,7,0>
- 2825293110U, // <1,1,5,7>: Cost 3 vuzpr <1,1,1,1>, RHS
- 2689746109U, // <1,1,5,u>: Cost 3 vext3 <0,u,1,1>, <1,5,u,7>
- 3696382241U, // <1,1,6,0>: Cost 4 vext2 <0,u,1,1>, <6,0,1,2>
- 2689746127U, // <1,1,6,1>: Cost 3 vext3 <0,u,1,1>, <1,6,1,7>
- 2617332218U, // <1,1,6,2>: Cost 3 vext2 <0,0,1,1>, <6,2,7,3>
- 3763487969U, // <1,1,6,3>: Cost 4 vext3 <0,u,1,1>, <1,6,3,7>
- 3696382605U, // <1,1,6,4>: Cost 4 vext2 <0,u,1,1>, <6,4,5,6>
- 4029309266U, // <1,1,6,5>: Cost 4 vzipr <0,4,1,6>, <0,4,1,5>
- 2617332536U, // <1,1,6,6>: Cost 3 vext2 <0,0,1,1>, <6,6,6,6>
- 2724840702U, // <1,1,6,7>: Cost 3 vext3 <6,7,0,1>, <1,6,7,0>
- 2725504263U, // <1,1,6,u>: Cost 3 vext3 <6,u,0,1>, <1,6,u,0>
- 2617332720U, // <1,1,7,0>: Cost 3 vext2 <0,0,1,1>, <7,0,0,1>
- 2659800138U, // <1,1,7,1>: Cost 3 vext2 <7,1,1,1>, <7,1,1,1>
- 3691074717U, // <1,1,7,2>: Cost 4 vext2 <0,0,1,1>, <7,2,1,3>
- 4167811174U, // <1,1,7,3>: Cost 4 vtrnr <1,1,5,7>, LHS
- 2617333094U, // <1,1,7,4>: Cost 3 vext2 <0,0,1,1>, <7,4,5,6>
- 3295396702U, // <1,1,7,5>: Cost 4 vrev <1,1,5,7>
- 3803891014U, // <1,1,7,6>: Cost 4 vext3 <7,6,0,1>, <1,7,6,0>
- 2617333356U, // <1,1,7,7>: Cost 3 vext2 <0,0,1,1>, <7,7,7,7>
- 2659800138U, // <1,1,7,u>: Cost 3 vext2 <7,1,1,1>, <7,1,1,1>
- 1483112550U, // <1,1,u,0>: Cost 2 vext1 <1,1,1,1>, LHS
- 202162278U, // <1,1,u,1>: Cost 1 vdup1 LHS
- 2622642056U, // <1,1,u,2>: Cost 3 vext2 <0,u,1,1>, <u,2,3,3>
- 2014142566U, // <1,1,u,3>: Cost 2 vtrnr LHS, LHS
- 1483115830U, // <1,1,u,4>: Cost 2 vext1 <1,1,1,1>, RHS
- 1548900506U, // <1,1,u,5>: Cost 2 vext2 <0,u,1,1>, RHS
- 2622642384U, // <1,1,u,6>: Cost 3 vext2 <0,u,1,1>, <u,6,3,7>
- 2825293353U, // <1,1,u,7>: Cost 3 vuzpr <1,1,1,1>, RHS
- 202162278U, // <1,1,u,u>: Cost 1 vdup1 LHS
- 2635251712U, // <1,2,0,0>: Cost 3 vext2 <3,0,1,2>, <0,0,0,0>
- 1561509990U, // <1,2,0,1>: Cost 2 vext2 <3,0,1,2>, LHS
- 2618663085U, // <1,2,0,2>: Cost 3 vext2 <0,2,1,2>, <0,2,1,2>
- 2696529358U, // <1,2,0,3>: Cost 3 vext3 <2,0,3,1>, <2,0,3,1>
- 2635252050U, // <1,2,0,4>: Cost 3 vext2 <3,0,1,2>, <0,4,1,5>
- 3769533926U, // <1,2,0,5>: Cost 4 vext3 <1,u,2,1>, <2,0,5,7>
- 2621317617U, // <1,2,0,6>: Cost 3 vext2 <0,6,1,2>, <0,6,1,2>
- 2659140170U, // <1,2,0,7>: Cost 3 vext2 <7,0,1,2>, <0,7,2,1>
- 1561510557U, // <1,2,0,u>: Cost 2 vext2 <3,0,1,2>, LHS
- 2623308516U, // <1,2,1,0>: Cost 3 vext2 <1,0,1,2>, <1,0,1,2>
- 2635252532U, // <1,2,1,1>: Cost 3 vext2 <3,0,1,2>, <1,1,1,1>
- 2631271318U, // <1,2,1,2>: Cost 3 vext2 <2,3,1,2>, <1,2,3,0>
- 2958180454U, // <1,2,1,3>: Cost 3 vzipr <0,u,1,1>, LHS
- 2550959414U, // <1,2,1,4>: Cost 3 vext1 <0,1,2,1>, RHS
- 2635252880U, // <1,2,1,5>: Cost 3 vext2 <3,0,1,2>, <1,5,3,7>
- 2635252952U, // <1,2,1,6>: Cost 3 vext2 <3,0,1,2>, <1,6,2,7>
- 3732882731U, // <1,2,1,7>: Cost 4 vext2 <7,0,1,2>, <1,7,3,0>
- 2958180459U, // <1,2,1,u>: Cost 3 vzipr <0,u,1,1>, LHS
- 2629281213U, // <1,2,2,0>: Cost 3 vext2 <2,0,1,2>, <2,0,1,2>
- 2635253280U, // <1,2,2,1>: Cost 3 vext2 <3,0,1,2>, <2,1,3,2>
- 2618664552U, // <1,2,2,2>: Cost 3 vext2 <0,2,1,2>, <2,2,2,2>
- 2689746546U, // <1,2,2,3>: Cost 3 vext3 <0,u,1,1>, <2,2,3,3>
- 3764815485U, // <1,2,2,4>: Cost 4 vext3 <1,1,1,1>, <2,2,4,5>
- 3760023176U, // <1,2,2,5>: Cost 4 vext3 <0,2,u,1>, <2,2,5,7>
- 2635253690U, // <1,2,2,6>: Cost 3 vext2 <3,0,1,2>, <2,6,3,7>
- 2659141610U, // <1,2,2,7>: Cost 3 vext2 <7,0,1,2>, <2,7,0,1>
- 2689746591U, // <1,2,2,u>: Cost 3 vext3 <0,u,1,1>, <2,2,u,3>
- 403488870U, // <1,2,3,0>: Cost 1 vext1 LHS, LHS
- 1477231350U, // <1,2,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 1477232232U, // <1,2,3,2>: Cost 2 vext1 LHS, <2,2,2,2>
- 1477233052U, // <1,2,3,3>: Cost 2 vext1 LHS, <3,3,3,3>
- 403492150U, // <1,2,3,4>: Cost 1 vext1 LHS, RHS
- 1525010128U, // <1,2,3,5>: Cost 2 vext1 LHS, <5,1,7,3>
- 1525010938U, // <1,2,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 1525011450U, // <1,2,3,7>: Cost 2 vext1 LHS, <7,0,1,2>
- 403494702U, // <1,2,3,u>: Cost 1 vext1 LHS, LHS
- 2641226607U, // <1,2,4,0>: Cost 3 vext2 <4,0,1,2>, <4,0,1,2>
- 3624723446U, // <1,2,4,1>: Cost 4 vext1 <0,1,2,4>, <1,3,4,6>
- 3301123609U, // <1,2,4,2>: Cost 4 vrev <2,1,2,4>
- 2598759198U, // <1,2,4,3>: Cost 3 vext1 <u,1,2,4>, <3,u,1,2>
- 2659142864U, // <1,2,4,4>: Cost 3 vext2 <7,0,1,2>, <4,4,4,4>
- 1561513270U, // <1,2,4,5>: Cost 2 vext2 <3,0,1,2>, RHS
- 2659143028U, // <1,2,4,6>: Cost 3 vext2 <7,0,1,2>, <4,6,4,6>
- 2659143112U, // <1,2,4,7>: Cost 3 vext2 <7,0,1,2>, <4,7,5,0>
- 1561513513U, // <1,2,4,u>: Cost 2 vext2 <3,0,1,2>, RHS
- 2550988902U, // <1,2,5,0>: Cost 3 vext1 <0,1,2,5>, LHS
- 2550989824U, // <1,2,5,1>: Cost 3 vext1 <0,1,2,5>, <1,3,5,7>
- 3624732264U, // <1,2,5,2>: Cost 4 vext1 <0,1,2,5>, <2,2,2,2>
- 2955559014U, // <1,2,5,3>: Cost 3 vzipr <0,4,1,5>, LHS
- 2550992182U, // <1,2,5,4>: Cost 3 vext1 <0,1,2,5>, RHS
- 2659143684U, // <1,2,5,5>: Cost 3 vext2 <7,0,1,2>, <5,5,5,5>
- 2659143778U, // <1,2,5,6>: Cost 3 vext2 <7,0,1,2>, <5,6,7,0>
- 2659143848U, // <1,2,5,7>: Cost 3 vext2 <7,0,1,2>, <5,7,5,7>
- 2550994734U, // <1,2,5,u>: Cost 3 vext1 <0,1,2,5>, LHS
- 2700289945U, // <1,2,6,0>: Cost 3 vext3 <2,6,0,1>, <2,6,0,1>
- 2635256232U, // <1,2,6,1>: Cost 3 vext2 <3,0,1,2>, <6,1,7,2>
- 2659144186U, // <1,2,6,2>: Cost 3 vext2 <7,0,1,2>, <6,2,7,3>
- 2689746874U, // <1,2,6,3>: Cost 3 vext3 <0,u,1,1>, <2,6,3,7>
- 3763488705U, // <1,2,6,4>: Cost 4 vext3 <0,u,1,1>, <2,6,4,5>
- 3763488716U, // <1,2,6,5>: Cost 4 vext3 <0,u,1,1>, <2,6,5,7>
- 2659144504U, // <1,2,6,6>: Cost 3 vext2 <7,0,1,2>, <6,6,6,6>
- 2657817432U, // <1,2,6,7>: Cost 3 vext2 <6,7,1,2>, <6,7,1,2>
- 2689746919U, // <1,2,6,u>: Cost 3 vext3 <0,u,1,1>, <2,6,u,7>
- 1585402874U, // <1,2,7,0>: Cost 2 vext2 <7,0,1,2>, <7,0,1,2>
- 2659144770U, // <1,2,7,1>: Cost 3 vext2 <7,0,1,2>, <7,1,0,2>
- 3708998858U, // <1,2,7,2>: Cost 4 vext2 <3,0,1,2>, <7,2,6,3>
- 2635257059U, // <1,2,7,3>: Cost 3 vext2 <3,0,1,2>, <7,3,0,1>
- 2659145062U, // <1,2,7,4>: Cost 3 vext2 <7,0,1,2>, <7,4,5,6>
- 3732886916U, // <1,2,7,5>: Cost 4 vext2 <7,0,1,2>, <7,5,0,0>
- 3732886998U, // <1,2,7,6>: Cost 4 vext2 <7,0,1,2>, <7,6,0,1>
- 2659145255U, // <1,2,7,7>: Cost 3 vext2 <7,0,1,2>, <7,7,0,1>
- 1590711938U, // <1,2,7,u>: Cost 2 vext2 <7,u,1,2>, <7,u,1,2>
- 403529835U, // <1,2,u,0>: Cost 1 vext1 LHS, LHS
- 1477272310U, // <1,2,u,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 1477273192U, // <1,2,u,2>: Cost 2 vext1 LHS, <2,2,2,2>
- 1477273750U, // <1,2,u,3>: Cost 2 vext1 LHS, <3,0,1,2>
- 403533110U, // <1,2,u,4>: Cost 1 vext1 LHS, RHS
- 1561516186U, // <1,2,u,5>: Cost 2 vext2 <3,0,1,2>, RHS
- 1525051898U, // <1,2,u,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 1525052410U, // <1,2,u,7>: Cost 2 vext1 LHS, <7,0,1,2>
- 403535662U, // <1,2,u,u>: Cost 1 vext1 LHS, LHS
- 2819407872U, // <1,3,0,0>: Cost 3 vuzpr LHS, <0,0,0,0>
- 1551564902U, // <1,3,0,1>: Cost 2 vext2 <1,3,1,3>, LHS
- 2819408630U, // <1,3,0,2>: Cost 3 vuzpr LHS, <1,0,3,2>
- 2619334911U, // <1,3,0,3>: Cost 3 vext2 <0,3,1,3>, <0,3,1,3>
- 2625306962U, // <1,3,0,4>: Cost 3 vext2 <1,3,1,3>, <0,4,1,5>
- 3832725879U, // <1,3,0,5>: Cost 4 vuzpl <1,2,3,0>, <0,4,5,6>
- 3699048959U, // <1,3,0,6>: Cost 4 vext2 <1,3,1,3>, <0,6,2,7>
- 3776538827U, // <1,3,0,7>: Cost 4 vext3 <3,0,7,1>, <3,0,7,1>
- 1551565469U, // <1,3,0,u>: Cost 2 vext2 <1,3,1,3>, LHS
- 2618671862U, // <1,3,1,0>: Cost 3 vext2 <0,2,1,3>, <1,0,3,2>
- 2819408692U, // <1,3,1,1>: Cost 3 vuzpr LHS, <1,1,1,1>
- 2624643975U, // <1,3,1,2>: Cost 3 vext2 <1,2,1,3>, <1,2,1,3>
- 1745666150U, // <1,3,1,3>: Cost 2 vuzpr LHS, LHS
- 2557005110U, // <1,3,1,4>: Cost 3 vext1 <1,1,3,1>, RHS
- 2625307792U, // <1,3,1,5>: Cost 3 vext2 <1,3,1,3>, <1,5,3,7>
- 3698386127U, // <1,3,1,6>: Cost 4 vext2 <1,2,1,3>, <1,6,1,7>
- 2592838748U, // <1,3,1,7>: Cost 3 vext1 <7,1,3,1>, <7,1,3,1>
- 1745666155U, // <1,3,1,u>: Cost 2 vuzpr LHS, LHS
- 2819408790U, // <1,3,2,0>: Cost 3 vuzpr LHS, <1,2,3,0>
- 2625308193U, // <1,3,2,1>: Cost 3 vext2 <1,3,1,3>, <2,1,3,3>
- 2819408036U, // <1,3,2,2>: Cost 3 vuzpr LHS, <0,2,0,2>
- 2819851890U, // <1,3,2,3>: Cost 3 vuzpr LHS, <2,2,3,3>
- 2819408794U, // <1,3,2,4>: Cost 3 vuzpr LHS, <1,2,3,4>
- 3893149890U, // <1,3,2,5>: Cost 4 vuzpr LHS, <0,2,3,5>
- 2819408076U, // <1,3,2,6>: Cost 3 vuzpr LHS, <0,2,4,6>
- 3772041583U, // <1,3,2,7>: Cost 4 vext3 <2,3,0,1>, <3,2,7,3>
- 2819408042U, // <1,3,2,u>: Cost 3 vuzpr LHS, <0,2,0,u>
- 1483276390U, // <1,3,3,0>: Cost 2 vext1 <1,1,3,3>, LHS
- 1483277128U, // <1,3,3,1>: Cost 2 vext1 <1,1,3,3>, <1,1,3,3>
- 2557019752U, // <1,3,3,2>: Cost 3 vext1 <1,1,3,3>, <2,2,2,2>
- 2819408856U, // <1,3,3,3>: Cost 3 vuzpr LHS, <1,3,1,3>
- 1483279670U, // <1,3,3,4>: Cost 2 vext1 <1,1,3,3>, RHS
- 2819409614U, // <1,3,3,5>: Cost 3 vuzpr LHS, <2,3,4,5>
- 2598826490U, // <1,3,3,6>: Cost 3 vext1 <u,1,3,3>, <6,2,7,3>
- 3087844352U, // <1,3,3,7>: Cost 3 vtrnr LHS, <1,3,5,7>
- 1483282222U, // <1,3,3,u>: Cost 2 vext1 <1,1,3,3>, LHS
- 2568970342U, // <1,3,4,0>: Cost 3 vext1 <3,1,3,4>, LHS
- 2568971224U, // <1,3,4,1>: Cost 3 vext1 <3,1,3,4>, <1,3,1,3>
- 3832761290U, // <1,3,4,2>: Cost 4 vuzpl <1,2,3,4>, <4,1,2,3>
- 2233428219U, // <1,3,4,3>: Cost 3 vrev <3,1,3,4>
- 2568973622U, // <1,3,4,4>: Cost 3 vext1 <3,1,3,4>, RHS
- 1551568182U, // <1,3,4,5>: Cost 2 vext2 <1,3,1,3>, RHS
- 2819410434U, // <1,3,4,6>: Cost 3 vuzpr LHS, <3,4,5,6>
- 3666605151U, // <1,3,4,7>: Cost 4 vext1 <7,1,3,4>, <7,1,3,4>
- 1551568425U, // <1,3,4,u>: Cost 2 vext2 <1,3,1,3>, RHS
- 2563006566U, // <1,3,5,0>: Cost 3 vext1 <2,1,3,5>, LHS
- 2568979456U, // <1,3,5,1>: Cost 3 vext1 <3,1,3,5>, <1,3,5,7>
- 2563008035U, // <1,3,5,2>: Cost 3 vext1 <2,1,3,5>, <2,1,3,5>
- 2233436412U, // <1,3,5,3>: Cost 3 vrev <3,1,3,5>
- 2563009846U, // <1,3,5,4>: Cost 3 vext1 <2,1,3,5>, RHS
- 2867187716U, // <1,3,5,5>: Cost 3 vuzpr LHS, <5,5,5,5>
- 2655834214U, // <1,3,5,6>: Cost 3 vext2 <6,4,1,3>, <5,6,7,4>
- 1745669430U, // <1,3,5,7>: Cost 2 vuzpr LHS, RHS
- 1745669431U, // <1,3,5,u>: Cost 2 vuzpr LHS, RHS
- 2867187810U, // <1,3,6,0>: Cost 3 vuzpr LHS, <5,6,7,0>
- 3699052931U, // <1,3,6,1>: Cost 4 vext2 <1,3,1,3>, <6,1,3,1>
- 2654507460U, // <1,3,6,2>: Cost 3 vext2 <6,2,1,3>, <6,2,1,3>
- 3766291091U, // <1,3,6,3>: Cost 4 vext3 <1,3,3,1>, <3,6,3,7>
- 2655834726U, // <1,3,6,4>: Cost 3 vext2 <6,4,1,3>, <6,4,1,3>
- 3923384562U, // <1,3,6,5>: Cost 4 vuzpr <5,1,7,3>, <u,6,7,5>
- 2657161992U, // <1,3,6,6>: Cost 3 vext2 <6,6,1,3>, <6,6,1,3>
- 2819852218U, // <1,3,6,7>: Cost 3 vuzpr LHS, <2,6,3,7>
- 2819852219U, // <1,3,6,u>: Cost 3 vuzpr LHS, <2,6,3,u>
- 2706926275U, // <1,3,7,0>: Cost 3 vext3 <3,7,0,1>, <3,7,0,1>
- 2659816524U, // <1,3,7,1>: Cost 3 vext2 <7,1,1,3>, <7,1,1,3>
- 3636766245U, // <1,3,7,2>: Cost 4 vext1 <2,1,3,7>, <2,1,3,7>
- 2867187903U, // <1,3,7,3>: Cost 3 vuzpr LHS, <5,7,u,3>
- 2625312102U, // <1,3,7,4>: Cost 3 vext2 <1,3,1,3>, <7,4,5,6>
- 2867188598U, // <1,3,7,5>: Cost 3 vuzpr LHS, <6,7,4,5>
- 3728250344U, // <1,3,7,6>: Cost 4 vext2 <6,2,1,3>, <7,6,2,1>
- 2867187880U, // <1,3,7,7>: Cost 3 vuzpr LHS, <5,7,5,7>
- 2707516171U, // <1,3,7,u>: Cost 3 vext3 <3,7,u,1>, <3,7,u,1>
- 1483317350U, // <1,3,u,0>: Cost 2 vext1 <1,1,3,u>, LHS
- 1483318093U, // <1,3,u,1>: Cost 2 vext1 <1,1,3,u>, <1,1,3,u>
- 2819410718U, // <1,3,u,2>: Cost 3 vuzpr LHS, <3,u,1,2>
- 1745666717U, // <1,3,u,3>: Cost 2 vuzpr LHS, LHS
- 1483320630U, // <1,3,u,4>: Cost 2 vext1 <1,1,3,u>, RHS
- 1551571098U, // <1,3,u,5>: Cost 2 vext2 <1,3,1,3>, RHS
- 2819410758U, // <1,3,u,6>: Cost 3 vuzpr LHS, <3,u,5,6>
- 1745669673U, // <1,3,u,7>: Cost 2 vuzpr LHS, RHS
- 1745666722U, // <1,3,u,u>: Cost 2 vuzpr LHS, LHS
- 2617352205U, // <1,4,0,0>: Cost 3 vext2 <0,0,1,4>, <0,0,1,4>
- 2619342950U, // <1,4,0,1>: Cost 3 vext2 <0,3,1,4>, LHS
- 3692421295U, // <1,4,0,2>: Cost 4 vext2 <0,2,1,4>, <0,2,1,4>
- 2619343104U, // <1,4,0,3>: Cost 3 vext2 <0,3,1,4>, <0,3,1,4>
- 2617352530U, // <1,4,0,4>: Cost 3 vext2 <0,0,1,4>, <0,4,1,5>
- 1634880402U, // <1,4,0,5>: Cost 2 vext3 <4,0,5,1>, <4,0,5,1>
- 2713930652U, // <1,4,0,6>: Cost 3 vext3 <4,u,5,1>, <4,0,6,2>
- 3732898396U, // <1,4,0,7>: Cost 4 vext2 <7,0,1,4>, <0,7,4,1>
- 1635101613U, // <1,4,0,u>: Cost 2 vext3 <4,0,u,1>, <4,0,u,1>
- 3693085430U, // <1,4,1,0>: Cost 4 vext2 <0,3,1,4>, <1,0,3,2>
- 2623988535U, // <1,4,1,1>: Cost 3 vext2 <1,1,1,4>, <1,1,1,4>
- 3693085590U, // <1,4,1,2>: Cost 4 vext2 <0,3,1,4>, <1,2,3,0>
- 3692422134U, // <1,4,1,3>: Cost 4 vext2 <0,2,1,4>, <1,3,4,6>
- 3693085726U, // <1,4,1,4>: Cost 4 vext2 <0,3,1,4>, <1,4,0,1>
- 2892401974U, // <1,4,1,5>: Cost 3 vzipl <1,1,1,1>, RHS
- 3026619702U, // <1,4,1,6>: Cost 3 vtrnl <1,1,1,1>, RHS
- 3800206324U, // <1,4,1,7>: Cost 4 vext3 <7,0,4,1>, <4,1,7,0>
- 2892402217U, // <1,4,1,u>: Cost 3 vzipl <1,1,1,1>, RHS
- 3966978927U, // <1,4,2,0>: Cost 4 vzipl <1,2,3,4>, <4,0,1,2>
- 3966979018U, // <1,4,2,1>: Cost 4 vzipl <1,2,3,4>, <4,1,2,3>
- 3693086312U, // <1,4,2,2>: Cost 4 vext2 <0,3,1,4>, <2,2,2,2>
- 2635269798U, // <1,4,2,3>: Cost 3 vext2 <3,0,1,4>, <2,3,0,1>
- 3966979280U, // <1,4,2,4>: Cost 4 vzipl <1,2,3,4>, <4,4,4,4>
- 2893204790U, // <1,4,2,5>: Cost 3 vzipl <1,2,3,0>, RHS
- 3693086650U, // <1,4,2,6>: Cost 4 vext2 <0,3,1,4>, <2,6,3,7>
- 3666662502U, // <1,4,2,7>: Cost 4 vext1 <7,1,4,2>, <7,1,4,2>
- 2893205033U, // <1,4,2,u>: Cost 3 vzipl <1,2,3,0>, RHS
- 2563063910U, // <1,4,3,0>: Cost 3 vext1 <2,1,4,3>, LHS
- 2563064730U, // <1,4,3,1>: Cost 3 vext1 <2,1,4,3>, <1,2,3,4>
- 2563065386U, // <1,4,3,2>: Cost 3 vext1 <2,1,4,3>, <2,1,4,3>
- 3693087132U, // <1,4,3,3>: Cost 4 vext2 <0,3,1,4>, <3,3,3,3>
- 2619345410U, // <1,4,3,4>: Cost 3 vext2 <0,3,1,4>, <3,4,5,6>
- 3087843666U, // <1,4,3,5>: Cost 3 vtrnr LHS, <0,4,1,5>
- 3087843676U, // <1,4,3,6>: Cost 3 vtrnr LHS, <0,4,2,6>
- 3666670695U, // <1,4,3,7>: Cost 4 vext1 <7,1,4,3>, <7,1,4,3>
- 3087843669U, // <1,4,3,u>: Cost 3 vtrnr LHS, <0,4,1,u>
- 2620672914U, // <1,4,4,0>: Cost 3 vext2 <0,5,1,4>, <4,0,5,1>
- 3630842706U, // <1,4,4,1>: Cost 4 vext1 <1,1,4,4>, <1,1,4,4>
- 3313069003U, // <1,4,4,2>: Cost 4 vrev <4,1,2,4>
- 3642788100U, // <1,4,4,3>: Cost 4 vext1 <3,1,4,4>, <3,1,4,4>
- 2713930960U, // <1,4,4,4>: Cost 3 vext3 <4,u,5,1>, <4,4,4,4>
- 2619346230U, // <1,4,4,5>: Cost 3 vext2 <0,3,1,4>, RHS
- 2713930980U, // <1,4,4,6>: Cost 3 vext3 <4,u,5,1>, <4,4,6,6>
- 3736882642U, // <1,4,4,7>: Cost 4 vext2 <7,6,1,4>, <4,7,6,1>
- 2619346473U, // <1,4,4,u>: Cost 3 vext2 <0,3,1,4>, RHS
- 2557108326U, // <1,4,5,0>: Cost 3 vext1 <1,1,4,5>, LHS
- 2557109075U, // <1,4,5,1>: Cost 3 vext1 <1,1,4,5>, <1,1,4,5>
- 2598913774U, // <1,4,5,2>: Cost 3 vext1 <u,1,4,5>, <2,3,u,1>
- 3630852246U, // <1,4,5,3>: Cost 4 vext1 <1,1,4,5>, <3,0,1,2>
- 2557111606U, // <1,4,5,4>: Cost 3 vext1 <1,1,4,5>, RHS
- 2895252790U, // <1,4,5,5>: Cost 3 vzipl <1,5,3,7>, RHS
- 1616006454U, // <1,4,5,6>: Cost 2 vext3 <0,u,1,1>, RHS
- 3899059510U, // <1,4,5,7>: Cost 4 vuzpr <1,1,1,4>, RHS
- 1616006472U, // <1,4,5,u>: Cost 2 vext3 <0,u,1,1>, RHS
- 2557116518U, // <1,4,6,0>: Cost 3 vext1 <1,1,4,6>, LHS
- 2557117236U, // <1,4,6,1>: Cost 3 vext1 <1,1,4,6>, <1,1,1,1>
- 3630859880U, // <1,4,6,2>: Cost 4 vext1 <1,1,4,6>, <2,2,2,2>
- 2569062550U, // <1,4,6,3>: Cost 3 vext1 <3,1,4,6>, <3,0,1,2>
- 2557119798U, // <1,4,6,4>: Cost 3 vext1 <1,1,4,6>, RHS
- 3763490174U, // <1,4,6,5>: Cost 4 vext3 <0,u,1,1>, <4,6,5,7>
- 3763490183U, // <1,4,6,6>: Cost 4 vext3 <0,u,1,1>, <4,6,6,7>
- 2712751498U, // <1,4,6,7>: Cost 3 vext3 <4,6,7,1>, <4,6,7,1>
- 2557122350U, // <1,4,6,u>: Cost 3 vext1 <1,1,4,6>, LHS
- 2659161084U, // <1,4,7,0>: Cost 3 vext2 <7,0,1,4>, <7,0,1,4>
- 3732903040U, // <1,4,7,1>: Cost 4 vext2 <7,0,1,4>, <7,1,7,1>
- 3734230174U, // <1,4,7,2>: Cost 4 vext2 <7,2,1,4>, <7,2,1,4>
- 3734893807U, // <1,4,7,3>: Cost 4 vext2 <7,3,1,4>, <7,3,1,4>
- 3660729654U, // <1,4,7,4>: Cost 4 vext1 <6,1,4,7>, RHS
- 3786493384U, // <1,4,7,5>: Cost 4 vext3 <4,6,7,1>, <4,7,5,0>
- 2713341394U, // <1,4,7,6>: Cost 3 vext3 <4,7,6,1>, <4,7,6,1>
- 3660731386U, // <1,4,7,7>: Cost 4 vext1 <6,1,4,7>, <7,0,1,2>
- 2664470148U, // <1,4,7,u>: Cost 3 vext2 <7,u,1,4>, <7,u,1,4>
- 2557132902U, // <1,4,u,0>: Cost 3 vext1 <1,1,4,u>, LHS
- 2619348782U, // <1,4,u,1>: Cost 3 vext2 <0,3,1,4>, LHS
- 2563106351U, // <1,4,u,2>: Cost 3 vext1 <2,1,4,u>, <2,1,4,u>
- 2713783816U, // <1,4,u,3>: Cost 3 vext3 <4,u,3,1>, <4,u,3,1>
- 2622666815U, // <1,4,u,4>: Cost 3 vext2 <0,u,1,4>, <u,4,5,6>
- 1640189466U, // <1,4,u,5>: Cost 2 vext3 <4,u,5,1>, <4,u,5,1>
- 1616006697U, // <1,4,u,6>: Cost 2 vext3 <0,u,1,1>, RHS
- 2712751498U, // <1,4,u,7>: Cost 3 vext3 <4,6,7,1>, <4,6,7,1>
- 1616006715U, // <1,4,u,u>: Cost 2 vext3 <0,u,1,1>, RHS
- 2620014592U, // <1,5,0,0>: Cost 3 vext2 <0,4,1,5>, <0,0,0,0>
- 1546272870U, // <1,5,0,1>: Cost 2 vext2 <0,4,1,5>, LHS
- 2618687664U, // <1,5,0,2>: Cost 3 vext2 <0,2,1,5>, <0,2,1,5>
- 3693093120U, // <1,5,0,3>: Cost 4 vext2 <0,3,1,5>, <0,3,1,4>
- 1546273106U, // <1,5,0,4>: Cost 2 vext2 <0,4,1,5>, <0,4,1,5>
- 2620678563U, // <1,5,0,5>: Cost 3 vext2 <0,5,1,5>, <0,5,1,5>
- 2714668660U, // <1,5,0,6>: Cost 3 vext3 <5,0,6,1>, <5,0,6,1>
- 3772042877U, // <1,5,0,7>: Cost 4 vext3 <2,3,0,1>, <5,0,7,1>
- 1546273437U, // <1,5,0,u>: Cost 2 vext2 <0,4,1,5>, LHS
- 2620015350U, // <1,5,1,0>: Cost 3 vext2 <0,4,1,5>, <1,0,3,2>
- 2620015412U, // <1,5,1,1>: Cost 3 vext2 <0,4,1,5>, <1,1,1,1>
- 2620015510U, // <1,5,1,2>: Cost 3 vext2 <0,4,1,5>, <1,2,3,0>
- 2618688512U, // <1,5,1,3>: Cost 3 vext2 <0,2,1,5>, <1,3,5,7>
- 2620015677U, // <1,5,1,4>: Cost 3 vext2 <0,4,1,5>, <1,4,3,5>
- 2620015727U, // <1,5,1,5>: Cost 3 vext2 <0,4,1,5>, <1,5,0,1>
- 2620015859U, // <1,5,1,6>: Cost 3 vext2 <0,4,1,5>, <1,6,5,7>
- 3093728566U, // <1,5,1,7>: Cost 3 vtrnr <1,1,1,1>, RHS
- 2620015981U, // <1,5,1,u>: Cost 3 vext2 <0,4,1,5>, <1,u,1,3>
- 3692430816U, // <1,5,2,0>: Cost 4 vext2 <0,2,1,5>, <2,0,5,1>
- 2620016163U, // <1,5,2,1>: Cost 3 vext2 <0,4,1,5>, <2,1,3,5>
- 2620016232U, // <1,5,2,2>: Cost 3 vext2 <0,4,1,5>, <2,2,2,2>
- 2620016294U, // <1,5,2,3>: Cost 3 vext2 <0,4,1,5>, <2,3,0,1>
- 3693758221U, // <1,5,2,4>: Cost 4 vext2 <0,4,1,5>, <2,4,2,5>
- 3692431209U, // <1,5,2,5>: Cost 4 vext2 <0,2,1,5>, <2,5,3,7>
- 2620016570U, // <1,5,2,6>: Cost 3 vext2 <0,4,1,5>, <2,6,3,7>
- 4173598006U, // <1,5,2,7>: Cost 4 vtrnr <2,1,3,2>, RHS
- 2620016699U, // <1,5,2,u>: Cost 3 vext2 <0,4,1,5>, <2,u,0,1>
- 2620016790U, // <1,5,3,0>: Cost 3 vext2 <0,4,1,5>, <3,0,1,2>
- 2569110672U, // <1,5,3,1>: Cost 3 vext1 <3,1,5,3>, <1,5,3,7>
- 3693758785U, // <1,5,3,2>: Cost 4 vext2 <0,4,1,5>, <3,2,2,2>
- 2620017052U, // <1,5,3,3>: Cost 3 vext2 <0,4,1,5>, <3,3,3,3>
- 2620017154U, // <1,5,3,4>: Cost 3 vext2 <0,4,1,5>, <3,4,5,6>
- 3135623172U, // <1,5,3,5>: Cost 3 vtrnr LHS, <5,5,5,5>
- 4161587048U, // <1,5,3,6>: Cost 4 vtrnr LHS, <2,5,3,6>
- 2014104886U, // <1,5,3,7>: Cost 2 vtrnr LHS, RHS
- 2014104887U, // <1,5,3,u>: Cost 2 vtrnr LHS, RHS
- 2620017554U, // <1,5,4,0>: Cost 3 vext2 <0,4,1,5>, <4,0,5,1>
- 2620017634U, // <1,5,4,1>: Cost 3 vext2 <0,4,1,5>, <4,1,5,0>
- 3693759551U, // <1,5,4,2>: Cost 4 vext2 <0,4,1,5>, <4,2,6,3>
- 3642861837U, // <1,5,4,3>: Cost 4 vext1 <3,1,5,4>, <3,1,5,4>
- 2575092710U, // <1,5,4,4>: Cost 3 vext1 <4,1,5,4>, <4,1,5,4>
- 1546276150U, // <1,5,4,5>: Cost 2 vext2 <0,4,1,5>, RHS
- 2759855414U, // <1,5,4,6>: Cost 3 vuzpl <1,3,5,7>, RHS
- 2713931718U, // <1,5,4,7>: Cost 3 vext3 <4,u,5,1>, <5,4,7,6>
- 1546276393U, // <1,5,4,u>: Cost 2 vext2 <0,4,1,5>, RHS
- 2557182054U, // <1,5,5,0>: Cost 3 vext1 <1,1,5,5>, LHS
- 2557182812U, // <1,5,5,1>: Cost 3 vext1 <1,1,5,5>, <1,1,5,5>
- 3630925347U, // <1,5,5,2>: Cost 4 vext1 <1,1,5,5>, <2,1,3,5>
- 4029301675U, // <1,5,5,3>: Cost 4 vzipr <0,4,1,5>, <1,2,5,3>
- 2557185334U, // <1,5,5,4>: Cost 3 vext1 <1,1,5,5>, RHS
- 2713931780U, // <1,5,5,5>: Cost 3 vext3 <4,u,5,1>, <5,5,5,5>
- 2667794530U, // <1,5,5,6>: Cost 3 vext2 <u,4,1,5>, <5,6,7,0>
- 2713931800U, // <1,5,5,7>: Cost 3 vext3 <4,u,5,1>, <5,5,7,7>
- 2557187886U, // <1,5,5,u>: Cost 3 vext1 <1,1,5,5>, LHS
- 2718208036U, // <1,5,6,0>: Cost 3 vext3 <5,6,0,1>, <5,6,0,1>
- 2620019115U, // <1,5,6,1>: Cost 3 vext2 <0,4,1,5>, <6,1,7,5>
- 2667794938U, // <1,5,6,2>: Cost 3 vext2 <u,4,1,5>, <6,2,7,3>
- 3787673666U, // <1,5,6,3>: Cost 4 vext3 <4,u,5,1>, <5,6,3,4>
- 3693761165U, // <1,5,6,4>: Cost 4 vext2 <0,4,1,5>, <6,4,5,6>
- 3319279297U, // <1,5,6,5>: Cost 4 vrev <5,1,5,6>
- 2667795256U, // <1,5,6,6>: Cost 3 vext2 <u,4,1,5>, <6,6,6,6>
- 2713931874U, // <1,5,6,7>: Cost 3 vext3 <4,u,5,1>, <5,6,7,0>
- 2713931883U, // <1,5,6,u>: Cost 3 vext3 <4,u,5,1>, <5,6,u,0>
- 2557198438U, // <1,5,7,0>: Cost 3 vext1 <1,1,5,7>, LHS
- 2557199156U, // <1,5,7,1>: Cost 3 vext1 <1,1,5,7>, <1,1,1,1>
- 2569143974U, // <1,5,7,2>: Cost 3 vext1 <3,1,5,7>, <2,3,0,1>
- 2569144592U, // <1,5,7,3>: Cost 3 vext1 <3,1,5,7>, <3,1,5,7>
- 2557201718U, // <1,5,7,4>: Cost 3 vext1 <1,1,5,7>, RHS
- 2713931944U, // <1,5,7,5>: Cost 3 vext3 <4,u,5,1>, <5,7,5,7>
- 3787673770U, // <1,5,7,6>: Cost 4 vext3 <4,u,5,1>, <5,7,6,0>
- 2719387828U, // <1,5,7,7>: Cost 3 vext3 <5,7,7,1>, <5,7,7,1>
- 2557204270U, // <1,5,7,u>: Cost 3 vext1 <1,1,5,7>, LHS
- 2620020435U, // <1,5,u,0>: Cost 3 vext2 <0,4,1,5>, <u,0,1,2>
- 1546278702U, // <1,5,u,1>: Cost 2 vext2 <0,4,1,5>, LHS
- 2620020616U, // <1,5,u,2>: Cost 3 vext2 <0,4,1,5>, <u,2,3,3>
- 2620020668U, // <1,5,u,3>: Cost 3 vext2 <0,4,1,5>, <u,3,0,1>
- 1594054682U, // <1,5,u,4>: Cost 2 vext2 <u,4,1,5>, <u,4,1,5>
- 1546279066U, // <1,5,u,5>: Cost 2 vext2 <0,4,1,5>, RHS
- 2620020944U, // <1,5,u,6>: Cost 3 vext2 <0,4,1,5>, <u,6,3,7>
- 2014145846U, // <1,5,u,7>: Cost 2 vtrnr LHS, RHS
- 2014145847U, // <1,5,u,u>: Cost 2 vtrnr LHS, RHS
- 3692437504U, // <1,6,0,0>: Cost 4 vext2 <0,2,1,6>, <0,0,0,0>
- 2618695782U, // <1,6,0,1>: Cost 3 vext2 <0,2,1,6>, LHS
- 2618695857U, // <1,6,0,2>: Cost 3 vext2 <0,2,1,6>, <0,2,1,6>
- 3794161970U, // <1,6,0,3>: Cost 4 vext3 <6,0,3,1>, <6,0,3,1>
- 2620023122U, // <1,6,0,4>: Cost 3 vext2 <0,4,1,6>, <0,4,1,5>
- 2620686756U, // <1,6,0,5>: Cost 3 vext2 <0,5,1,6>, <0,5,1,6>
- 2621350389U, // <1,6,0,6>: Cost 3 vext2 <0,6,1,6>, <0,6,1,6>
- 4028599606U, // <1,6,0,7>: Cost 4 vzipr <0,3,1,0>, RHS
- 2618696349U, // <1,6,0,u>: Cost 3 vext2 <0,2,1,6>, LHS
- 3692438262U, // <1,6,1,0>: Cost 4 vext2 <0,2,1,6>, <1,0,3,2>
- 2625995572U, // <1,6,1,1>: Cost 3 vext2 <1,4,1,6>, <1,1,1,1>
- 3692438422U, // <1,6,1,2>: Cost 4 vext2 <0,2,1,6>, <1,2,3,0>
- 3692438488U, // <1,6,1,3>: Cost 4 vext2 <0,2,1,6>, <1,3,1,3>
- 2625995820U, // <1,6,1,4>: Cost 3 vext2 <1,4,1,6>, <1,4,1,6>
- 3692438672U, // <1,6,1,5>: Cost 4 vext2 <0,2,1,6>, <1,5,3,7>
- 3692438720U, // <1,6,1,6>: Cost 4 vext2 <0,2,1,6>, <1,6,0,1>
- 2958183734U, // <1,6,1,7>: Cost 3 vzipr <0,u,1,1>, RHS
- 2958183735U, // <1,6,1,u>: Cost 3 vzipr <0,u,1,1>, RHS
- 2721526201U, // <1,6,2,0>: Cost 3 vext3 <6,2,0,1>, <6,2,0,1>
- 3692439097U, // <1,6,2,1>: Cost 4 vext2 <0,2,1,6>, <2,1,6,0>
- 3692439144U, // <1,6,2,2>: Cost 4 vext2 <0,2,1,6>, <2,2,2,2>
- 3692439206U, // <1,6,2,3>: Cost 4 vext2 <0,2,1,6>, <2,3,0,1>
- 3636948278U, // <1,6,2,4>: Cost 4 vext1 <2,1,6,2>, RHS
- 3787674092U, // <1,6,2,5>: Cost 4 vext3 <4,u,5,1>, <6,2,5,7>
- 2618697658U, // <1,6,2,6>: Cost 3 vext2 <0,2,1,6>, <2,6,3,7>
- 2970799414U, // <1,6,2,7>: Cost 3 vzipr <3,0,1,2>, RHS
- 2970799415U, // <1,6,2,u>: Cost 3 vzipr <3,0,1,2>, RHS
- 2563211366U, // <1,6,3,0>: Cost 3 vext1 <2,1,6,3>, LHS
- 3699738854U, // <1,6,3,1>: Cost 4 vext2 <1,4,1,6>, <3,1,1,1>
- 2563212860U, // <1,6,3,2>: Cost 3 vext1 <2,1,6,3>, <2,1,6,3>
- 3692439964U, // <1,6,3,3>: Cost 4 vext2 <0,2,1,6>, <3,3,3,3>
- 2563214646U, // <1,6,3,4>: Cost 3 vext1 <2,1,6,3>, RHS
- 4191820018U, // <1,6,3,5>: Cost 4 vtrnr <5,1,7,3>, <u,6,7,5>
- 2587103648U, // <1,6,3,6>: Cost 3 vext1 <6,1,6,3>, <6,1,6,3>
- 3087845306U, // <1,6,3,7>: Cost 3 vtrnr LHS, <2,6,3,7>
- 3087845307U, // <1,6,3,u>: Cost 3 vtrnr LHS, <2,6,3,u>
- 3693767570U, // <1,6,4,0>: Cost 4 vext2 <0,4,1,6>, <4,0,5,1>
- 3693767650U, // <1,6,4,1>: Cost 4 vext2 <0,4,1,6>, <4,1,5,0>
- 3636962877U, // <1,6,4,2>: Cost 4 vext1 <2,1,6,4>, <2,1,6,4>
- 3325088134U, // <1,6,4,3>: Cost 4 vrev <6,1,3,4>
- 3693767898U, // <1,6,4,4>: Cost 4 vext2 <0,4,1,6>, <4,4,5,5>
- 2618699062U, // <1,6,4,5>: Cost 3 vext2 <0,2,1,6>, RHS
- 3833670966U, // <1,6,4,6>: Cost 4 vuzpl <1,3,6,7>, RHS
- 4028632374U, // <1,6,4,7>: Cost 4 vzipr <0,3,1,4>, RHS
- 2618699305U, // <1,6,4,u>: Cost 3 vext2 <0,2,1,6>, RHS
- 3693768264U, // <1,6,5,0>: Cost 4 vext2 <0,4,1,6>, <5,0,1,2>
- 3630998373U, // <1,6,5,1>: Cost 4 vext1 <1,1,6,5>, <1,1,6,5>
- 3636971070U, // <1,6,5,2>: Cost 4 vext1 <2,1,6,5>, <2,1,6,5>
- 3642943767U, // <1,6,5,3>: Cost 4 vext1 <3,1,6,5>, <3,1,6,5>
- 3693768628U, // <1,6,5,4>: Cost 4 vext2 <0,4,1,6>, <5,4,5,6>
- 3732918276U, // <1,6,5,5>: Cost 4 vext2 <7,0,1,6>, <5,5,5,5>
- 2620690530U, // <1,6,5,6>: Cost 3 vext2 <0,5,1,6>, <5,6,7,0>
- 2955562294U, // <1,6,5,7>: Cost 3 vzipr <0,4,1,5>, RHS
- 2955562295U, // <1,6,5,u>: Cost 3 vzipr <0,4,1,5>, RHS
- 2724180733U, // <1,6,6,0>: Cost 3 vext3 <6,6,0,1>, <6,6,0,1>
- 3631006566U, // <1,6,6,1>: Cost 4 vext1 <1,1,6,6>, <1,1,6,6>
- 3631007674U, // <1,6,6,2>: Cost 4 vext1 <1,1,6,6>, <2,6,3,7>
- 3692442184U, // <1,6,6,3>: Cost 4 vext2 <0,2,1,6>, <6,3,7,0>
- 3631009078U, // <1,6,6,4>: Cost 4 vext1 <1,1,6,6>, RHS
- 3787674416U, // <1,6,6,5>: Cost 4 vext3 <4,u,5,1>, <6,6,5,7>
- 2713932600U, // <1,6,6,6>: Cost 3 vext3 <4,u,5,1>, <6,6,6,6>
- 2713932610U, // <1,6,6,7>: Cost 3 vext3 <4,u,5,1>, <6,6,7,7>
- 2713932619U, // <1,6,6,u>: Cost 3 vext3 <4,u,5,1>, <6,6,u,7>
- 1651102542U, // <1,6,7,0>: Cost 2 vext3 <6,7,0,1>, <6,7,0,1>
- 2724918103U, // <1,6,7,1>: Cost 3 vext3 <6,7,1,1>, <6,7,1,1>
- 2698302306U, // <1,6,7,2>: Cost 3 vext3 <2,3,0,1>, <6,7,2,3>
- 3642960153U, // <1,6,7,3>: Cost 4 vext1 <3,1,6,7>, <3,1,6,7>
- 2713932662U, // <1,6,7,4>: Cost 3 vext3 <4,u,5,1>, <6,7,4,5>
- 2725213051U, // <1,6,7,5>: Cost 3 vext3 <6,7,5,1>, <6,7,5,1>
- 2724844426U, // <1,6,7,6>: Cost 3 vext3 <6,7,0,1>, <6,7,6,7>
- 4035956022U, // <1,6,7,7>: Cost 4 vzipr <1,5,1,7>, RHS
- 1651692438U, // <1,6,7,u>: Cost 2 vext3 <6,7,u,1>, <6,7,u,1>
- 1651766175U, // <1,6,u,0>: Cost 2 vext3 <6,u,0,1>, <6,u,0,1>
- 2618701614U, // <1,6,u,1>: Cost 3 vext2 <0,2,1,6>, LHS
- 3135663508U, // <1,6,u,2>: Cost 3 vtrnr LHS, <4,6,u,2>
- 3692443580U, // <1,6,u,3>: Cost 4 vext2 <0,2,1,6>, <u,3,0,1>
- 2713932743U, // <1,6,u,4>: Cost 3 vext3 <4,u,5,1>, <6,u,4,5>
- 2618701978U, // <1,6,u,5>: Cost 3 vext2 <0,2,1,6>, RHS
- 2622683344U, // <1,6,u,6>: Cost 3 vext2 <0,u,1,6>, <u,6,3,7>
- 3087886266U, // <1,6,u,7>: Cost 3 vtrnr LHS, <2,6,3,7>
- 1652356071U, // <1,6,u,u>: Cost 2 vext3 <6,u,u,1>, <6,u,u,1>
- 2726171632U, // <1,7,0,0>: Cost 3 vext3 <7,0,0,1>, <7,0,0,1>
- 2626666598U, // <1,7,0,1>: Cost 3 vext2 <1,5,1,7>, LHS
- 3695100067U, // <1,7,0,2>: Cost 4 vext2 <0,6,1,7>, <0,2,0,1>
- 3707044102U, // <1,7,0,3>: Cost 4 vext2 <2,6,1,7>, <0,3,2,1>
- 2726466580U, // <1,7,0,4>: Cost 3 vext3 <7,0,4,1>, <7,0,4,1>
- 3654921933U, // <1,7,0,5>: Cost 4 vext1 <5,1,7,0>, <5,1,7,0>
- 2621358582U, // <1,7,0,6>: Cost 3 vext2 <0,6,1,7>, <0,6,1,7>
- 2622022215U, // <1,7,0,7>: Cost 3 vext2 <0,7,1,7>, <0,7,1,7>
- 2626667165U, // <1,7,0,u>: Cost 3 vext2 <1,5,1,7>, LHS
- 2593128550U, // <1,7,1,0>: Cost 3 vext1 <7,1,7,1>, LHS
- 2626667316U, // <1,7,1,1>: Cost 3 vext2 <1,5,1,7>, <1,1,1,1>
- 3700409238U, // <1,7,1,2>: Cost 4 vext2 <1,5,1,7>, <1,2,3,0>
- 2257294428U, // <1,7,1,3>: Cost 3 vrev <7,1,3,1>
- 2593131830U, // <1,7,1,4>: Cost 3 vext1 <7,1,7,1>, RHS
- 2626667646U, // <1,7,1,5>: Cost 3 vext2 <1,5,1,7>, <1,5,1,7>
- 2627331279U, // <1,7,1,6>: Cost 3 vext2 <1,6,1,7>, <1,6,1,7>
- 2593133696U, // <1,7,1,7>: Cost 3 vext1 <7,1,7,1>, <7,1,7,1>
- 2628658545U, // <1,7,1,u>: Cost 3 vext2 <1,u,1,7>, <1,u,1,7>
- 2587164774U, // <1,7,2,0>: Cost 3 vext1 <6,1,7,2>, LHS
- 3701073445U, // <1,7,2,1>: Cost 4 vext2 <1,6,1,7>, <2,1,3,7>
- 3700409960U, // <1,7,2,2>: Cost 4 vext2 <1,5,1,7>, <2,2,2,2>
- 2638612134U, // <1,7,2,3>: Cost 3 vext2 <3,5,1,7>, <2,3,0,1>
- 2587168054U, // <1,7,2,4>: Cost 3 vext1 <6,1,7,2>, RHS
- 3706382167U, // <1,7,2,5>: Cost 4 vext2 <2,5,1,7>, <2,5,1,7>
- 2587169192U, // <1,7,2,6>: Cost 3 vext1 <6,1,7,2>, <6,1,7,2>
- 3660911610U, // <1,7,2,7>: Cost 4 vext1 <6,1,7,2>, <7,0,1,2>
- 2587170606U, // <1,7,2,u>: Cost 3 vext1 <6,1,7,2>, LHS
- 1507459174U, // <1,7,3,0>: Cost 2 vext1 <5,1,7,3>, LHS
- 2569257984U, // <1,7,3,1>: Cost 3 vext1 <3,1,7,3>, <1,3,5,7>
- 2581202536U, // <1,7,3,2>: Cost 3 vext1 <5,1,7,3>, <2,2,2,2>
- 2569259294U, // <1,7,3,3>: Cost 3 vext1 <3,1,7,3>, <3,1,7,3>
- 1507462454U, // <1,7,3,4>: Cost 2 vext1 <5,1,7,3>, RHS
- 1507462864U, // <1,7,3,5>: Cost 2 vext1 <5,1,7,3>, <5,1,7,3>
- 2581205498U, // <1,7,3,6>: Cost 3 vext1 <5,1,7,3>, <6,2,7,3>
- 2581206010U, // <1,7,3,7>: Cost 3 vext1 <5,1,7,3>, <7,0,1,2>
- 1507465006U, // <1,7,3,u>: Cost 2 vext1 <5,1,7,3>, LHS
- 2728826164U, // <1,7,4,0>: Cost 3 vext3 <7,4,0,1>, <7,4,0,1>
- 3654951732U, // <1,7,4,1>: Cost 4 vext1 <5,1,7,4>, <1,1,1,1>
- 3330987094U, // <1,7,4,2>: Cost 4 vrev <7,1,2,4>
- 3331060831U, // <1,7,4,3>: Cost 4 vrev <7,1,3,4>
- 3787674971U, // <1,7,4,4>: Cost 4 vext3 <4,u,5,1>, <7,4,4,4>
- 2626669878U, // <1,7,4,5>: Cost 3 vext2 <1,5,1,7>, RHS
- 3785979241U, // <1,7,4,6>: Cost 4 vext3 <4,6,0,1>, <7,4,6,0>
- 3787085176U, // <1,7,4,7>: Cost 4 vext3 <4,7,6,1>, <7,4,7,6>
- 2626670121U, // <1,7,4,u>: Cost 3 vext2 <1,5,1,7>, RHS
- 2569273446U, // <1,7,5,0>: Cost 3 vext1 <3,1,7,5>, LHS
- 2569274368U, // <1,7,5,1>: Cost 3 vext1 <3,1,7,5>, <1,3,5,7>
- 3643016808U, // <1,7,5,2>: Cost 4 vext1 <3,1,7,5>, <2,2,2,2>
- 2569275680U, // <1,7,5,3>: Cost 3 vext1 <3,1,7,5>, <3,1,7,5>
- 2569276726U, // <1,7,5,4>: Cost 3 vext1 <3,1,7,5>, RHS
- 4102034790U, // <1,7,5,5>: Cost 4 vtrnl <1,3,5,7>, <7,4,5,6>
- 2651222067U, // <1,7,5,6>: Cost 3 vext2 <5,6,1,7>, <5,6,1,7>
- 3899378998U, // <1,7,5,7>: Cost 4 vuzpr <1,1,5,7>, RHS
- 2569279278U, // <1,7,5,u>: Cost 3 vext1 <3,1,7,5>, LHS
- 2730153430U, // <1,7,6,0>: Cost 3 vext3 <7,6,0,1>, <7,6,0,1>
- 2724845022U, // <1,7,6,1>: Cost 3 vext3 <6,7,0,1>, <7,6,1,0>
- 3643025338U, // <1,7,6,2>: Cost 4 vext1 <3,1,7,6>, <2,6,3,7>
- 3643025697U, // <1,7,6,3>: Cost 4 vext1 <3,1,7,6>, <3,1,7,6>
- 3643026742U, // <1,7,6,4>: Cost 4 vext1 <3,1,7,6>, RHS
- 3654971091U, // <1,7,6,5>: Cost 4 vext1 <5,1,7,6>, <5,1,7,6>
- 3787675153U, // <1,7,6,6>: Cost 4 vext3 <4,u,5,1>, <7,6,6,6>
- 2724845076U, // <1,7,6,7>: Cost 3 vext3 <6,7,0,1>, <7,6,7,0>
- 2725508637U, // <1,7,6,u>: Cost 3 vext3 <6,u,0,1>, <7,6,u,0>
- 2730817063U, // <1,7,7,0>: Cost 3 vext3 <7,7,0,1>, <7,7,0,1>
- 3631088436U, // <1,7,7,1>: Cost 4 vext1 <1,1,7,7>, <1,1,1,1>
- 3660949158U, // <1,7,7,2>: Cost 4 vext1 <6,1,7,7>, <2,3,0,1>
- 3801904705U, // <1,7,7,3>: Cost 4 vext3 <7,3,0,1>, <7,7,3,0>
- 3631090998U, // <1,7,7,4>: Cost 4 vext1 <1,1,7,7>, RHS
- 2662503828U, // <1,7,7,5>: Cost 3 vext2 <7,5,1,7>, <7,5,1,7>
- 3660951981U, // <1,7,7,6>: Cost 4 vext1 <6,1,7,7>, <6,1,7,7>
- 2713933420U, // <1,7,7,7>: Cost 3 vext3 <4,u,5,1>, <7,7,7,7>
- 2731406959U, // <1,7,7,u>: Cost 3 vext3 <7,7,u,1>, <7,7,u,1>
- 1507500134U, // <1,7,u,0>: Cost 2 vext1 <5,1,7,u>, LHS
- 2626672430U, // <1,7,u,1>: Cost 3 vext2 <1,5,1,7>, LHS
- 2581243496U, // <1,7,u,2>: Cost 3 vext1 <5,1,7,u>, <2,2,2,2>
- 2569300259U, // <1,7,u,3>: Cost 3 vext1 <3,1,7,u>, <3,1,7,u>
- 1507503414U, // <1,7,u,4>: Cost 2 vext1 <5,1,7,u>, RHS
- 1507503829U, // <1,7,u,5>: Cost 2 vext1 <5,1,7,u>, <5,1,7,u>
- 2581246458U, // <1,7,u,6>: Cost 3 vext1 <5,1,7,u>, <6,2,7,3>
- 2581246970U, // <1,7,u,7>: Cost 3 vext1 <5,1,7,u>, <7,0,1,2>
- 1507505966U, // <1,7,u,u>: Cost 2 vext1 <5,1,7,u>, LHS
- 1543643153U, // <1,u,0,0>: Cost 2 vext2 <0,0,1,u>, <0,0,1,u>
- 1546297446U, // <1,u,0,1>: Cost 2 vext2 <0,4,1,u>, LHS
- 2819448852U, // <1,u,0,2>: Cost 3 vuzpr LHS, <0,0,2,2>
- 2619375876U, // <1,u,0,3>: Cost 3 vext2 <0,3,1,u>, <0,3,1,u>
- 1546297685U, // <1,u,0,4>: Cost 2 vext2 <0,4,1,u>, <0,4,1,u>
- 1658771190U, // <1,u,0,5>: Cost 2 vext3 <u,0,5,1>, <u,0,5,1>
- 2736789248U, // <1,u,0,6>: Cost 3 vext3 <u,7,0,1>, <u,0,6,2>
- 2659189376U, // <1,u,0,7>: Cost 3 vext2 <7,0,1,u>, <0,7,u,1>
- 1546298013U, // <1,u,0,u>: Cost 2 vext2 <0,4,1,u>, LHS
- 1483112550U, // <1,u,1,0>: Cost 2 vext1 <1,1,1,1>, LHS
- 202162278U, // <1,u,1,1>: Cost 1 vdup1 LHS
- 1616009006U, // <1,u,1,2>: Cost 2 vext3 <0,u,1,1>, LHS
- 1745707110U, // <1,u,1,3>: Cost 2 vuzpr LHS, LHS
- 1483115830U, // <1,u,1,4>: Cost 2 vext1 <1,1,1,1>, RHS
- 2620040336U, // <1,u,1,5>: Cost 3 vext2 <0,4,1,u>, <1,5,3,7>
- 3026622618U, // <1,u,1,6>: Cost 3 vtrnl <1,1,1,1>, RHS
- 2958183752U, // <1,u,1,7>: Cost 3 vzipr <0,u,1,1>, RHS
- 202162278U, // <1,u,1,u>: Cost 1 vdup1 LHS
- 2819449750U, // <1,u,2,0>: Cost 3 vuzpr LHS, <1,2,3,0>
- 2893207342U, // <1,u,2,1>: Cost 3 vzipl <1,2,3,0>, LHS
- 2819448996U, // <1,u,2,2>: Cost 3 vuzpr LHS, <0,2,0,2>
- 2819450482U, // <1,u,2,3>: Cost 3 vuzpr LHS, <2,2,3,3>
- 2819449754U, // <1,u,2,4>: Cost 3 vuzpr LHS, <1,2,3,4>
- 2893207706U, // <1,u,2,5>: Cost 3 vzipl <1,2,3,0>, RHS
- 2819449036U, // <1,u,2,6>: Cost 3 vuzpr LHS, <0,2,4,6>
- 2970799432U, // <1,u,2,7>: Cost 3 vzipr <3,0,1,2>, RHS
- 2819449002U, // <1,u,2,u>: Cost 3 vuzpr LHS, <0,2,0,u>
- 403931292U, // <1,u,3,0>: Cost 1 vext1 LHS, LHS
- 1477673718U, // <1,u,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 115726126U, // <1,u,3,2>: Cost 1 vrev LHS
- 2014102173U, // <1,u,3,3>: Cost 2 vtrnr LHS, LHS
- 403934518U, // <1,u,3,4>: Cost 1 vext1 LHS, RHS
- 1507536601U, // <1,u,3,5>: Cost 2 vext1 <5,1,u,3>, <5,1,u,3>
- 1525453306U, // <1,u,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 2014105129U, // <1,u,3,7>: Cost 2 vtrnr LHS, RHS
- 403937070U, // <1,u,3,u>: Cost 1 vext1 LHS, LHS
- 2620042157U, // <1,u,4,0>: Cost 3 vext2 <0,4,1,u>, <4,0,u,1>
- 2620042237U, // <1,u,4,1>: Cost 3 vext2 <0,4,1,u>, <4,1,u,0>
- 2263217967U, // <1,u,4,2>: Cost 3 vrev <u,1,2,4>
- 2569341224U, // <1,u,4,3>: Cost 3 vext1 <3,1,u,4>, <3,1,u,4>
- 2569342262U, // <1,u,4,4>: Cost 3 vext1 <3,1,u,4>, RHS
- 1546300726U, // <1,u,4,5>: Cost 2 vext2 <0,4,1,u>, RHS
- 2819449180U, // <1,u,4,6>: Cost 3 vuzpr LHS, <0,4,2,6>
- 2724845649U, // <1,u,4,7>: Cost 3 vext3 <6,7,0,1>, <u,4,7,6>
- 1546300969U, // <1,u,4,u>: Cost 2 vext2 <0,4,1,u>, RHS
- 2551431270U, // <1,u,5,0>: Cost 3 vext1 <0,1,u,5>, LHS
- 2551432192U, // <1,u,5,1>: Cost 3 vext1 <0,1,u,5>, <1,3,5,7>
- 3028293422U, // <1,u,5,2>: Cost 3 vtrnl <1,3,5,7>, LHS
- 2955559068U, // <1,u,5,3>: Cost 3 vzipr <0,4,1,5>, LHS
- 2551434550U, // <1,u,5,4>: Cost 3 vext1 <0,1,u,5>, RHS
- 2895255706U, // <1,u,5,5>: Cost 3 vzipl <1,5,3,7>, RHS
- 1616009370U, // <1,u,5,6>: Cost 2 vext3 <0,u,1,1>, RHS
- 1745710390U, // <1,u,5,7>: Cost 2 vuzpr LHS, RHS
- 1745710391U, // <1,u,5,u>: Cost 2 vuzpr LHS, RHS
- 2653221159U, // <1,u,6,0>: Cost 3 vext2 <6,0,1,u>, <6,0,1,u>
- 2725509303U, // <1,u,6,1>: Cost 3 vext3 <6,u,0,1>, <u,6,1,0>
- 2659193338U, // <1,u,6,2>: Cost 3 vext2 <7,0,1,u>, <6,2,7,3>
- 2689751248U, // <1,u,6,3>: Cost 3 vext3 <0,u,1,1>, <u,6,3,7>
- 2867228774U, // <1,u,6,4>: Cost 3 vuzpr LHS, <5,6,7,4>
- 3764820194U, // <1,u,6,5>: Cost 4 vext3 <1,1,1,1>, <u,6,5,7>
- 2657202957U, // <1,u,6,6>: Cost 3 vext2 <6,6,1,u>, <6,6,1,u>
- 2819450810U, // <1,u,6,7>: Cost 3 vuzpr LHS, <2,6,3,7>
- 2819450811U, // <1,u,6,u>: Cost 3 vuzpr LHS, <2,6,3,u>
- 1585452032U, // <1,u,7,0>: Cost 2 vext2 <7,0,1,u>, <7,0,1,u>
- 2557420340U, // <1,u,7,1>: Cost 3 vext1 <1,1,u,7>, <1,1,1,1>
- 2569365158U, // <1,u,7,2>: Cost 3 vext1 <3,1,u,7>, <2,3,0,1>
- 2569365803U, // <1,u,7,3>: Cost 3 vext1 <3,1,u,7>, <3,1,u,7>
- 2557422902U, // <1,u,7,4>: Cost 3 vext1 <1,1,u,7>, RHS
- 2662512021U, // <1,u,7,5>: Cost 3 vext2 <7,5,1,u>, <7,5,1,u>
- 2724845884U, // <1,u,7,6>: Cost 3 vext3 <6,7,0,1>, <u,7,6,7>
- 2659194476U, // <1,u,7,7>: Cost 3 vext2 <7,0,1,u>, <7,7,7,7>
- 1590761096U, // <1,u,7,u>: Cost 2 vext2 <7,u,1,u>, <7,u,1,u>
- 403972257U, // <1,u,u,0>: Cost 1 vext1 LHS, LHS
- 202162278U, // <1,u,u,1>: Cost 1 vdup1 LHS
- 115767091U, // <1,u,u,2>: Cost 1 vrev LHS
- 1745707677U, // <1,u,u,3>: Cost 2 vuzpr LHS, LHS
- 403975478U, // <1,u,u,4>: Cost 1 vext1 LHS, RHS
- 1546303642U, // <1,u,u,5>: Cost 2 vext2 <0,4,1,u>, RHS
- 1616009613U, // <1,u,u,6>: Cost 2 vext3 <0,u,1,1>, RHS
- 1745710633U, // <1,u,u,7>: Cost 2 vuzpr LHS, RHS
- 403978030U, // <1,u,u,u>: Cost 1 vext1 LHS, LHS
- 2551463936U, // <2,0,0,0>: Cost 3 vext1 <0,2,0,0>, <0,0,0,0>
- 2685698058U, // <2,0,0,1>: Cost 3 vext3 <0,2,0,2>, <0,0,1,1>
- 1610776596U, // <2,0,0,2>: Cost 2 vext3 <0,0,2,2>, <0,0,2,2>
- 2619384069U, // <2,0,0,3>: Cost 3 vext2 <0,3,2,0>, <0,3,2,0>
- 2551467318U, // <2,0,0,4>: Cost 3 vext1 <0,2,0,0>, RHS
- 3899836596U, // <2,0,0,5>: Cost 4 vuzpr <1,2,3,0>, <3,0,4,5>
- 2621374968U, // <2,0,0,6>: Cost 3 vext2 <0,6,2,0>, <0,6,2,0>
- 4168271334U, // <2,0,0,7>: Cost 4 vtrnr <1,2,3,0>, <2,0,5,7>
- 1611219018U, // <2,0,0,u>: Cost 2 vext3 <0,0,u,2>, <0,0,u,2>
- 2551472138U, // <2,0,1,0>: Cost 3 vext1 <0,2,0,1>, <0,0,1,1>
- 2690564186U, // <2,0,1,1>: Cost 3 vext3 <1,0,3,2>, <0,1,1,0>
- 1611956326U, // <2,0,1,2>: Cost 2 vext3 <0,2,0,2>, LHS
- 2826092646U, // <2,0,1,3>: Cost 3 vuzpr <1,2,3,0>, LHS
- 2551475510U, // <2,0,1,4>: Cost 3 vext1 <0,2,0,1>, RHS
- 3692463248U, // <2,0,1,5>: Cost 4 vext2 <0,2,2,0>, <1,5,3,7>
- 2587308473U, // <2,0,1,6>: Cost 3 vext1 <6,2,0,1>, <6,2,0,1>
- 3661050874U, // <2,0,1,7>: Cost 4 vext1 <6,2,0,1>, <7,0,1,2>
- 1611956380U, // <2,0,1,u>: Cost 2 vext3 <0,2,0,2>, LHS
- 1477738598U, // <2,0,2,0>: Cost 2 vext1 <0,2,0,2>, LHS
- 2551481078U, // <2,0,2,1>: Cost 3 vext1 <0,2,0,2>, <1,0,3,2>
- 2551481796U, // <2,0,2,2>: Cost 3 vext1 <0,2,0,2>, <2,0,2,0>
- 2551482518U, // <2,0,2,3>: Cost 3 vext1 <0,2,0,2>, <3,0,1,2>
- 1477741878U, // <2,0,2,4>: Cost 2 vext1 <0,2,0,2>, RHS
- 2551484112U, // <2,0,2,5>: Cost 3 vext1 <0,2,0,2>, <5,1,7,3>
- 2551484759U, // <2,0,2,6>: Cost 3 vext1 <0,2,0,2>, <6,0,7,2>
- 2551485434U, // <2,0,2,7>: Cost 3 vext1 <0,2,0,2>, <7,0,1,2>
- 1477744430U, // <2,0,2,u>: Cost 2 vext1 <0,2,0,2>, LHS
- 2953625600U, // <2,0,3,0>: Cost 3 vzipr LHS, <0,0,0,0>
- 2953627302U, // <2,0,3,1>: Cost 3 vzipr LHS, <2,3,0,1>
- 2953625764U, // <2,0,3,2>: Cost 3 vzipr LHS, <0,2,0,2>
- 4027369695U, // <2,0,3,3>: Cost 4 vzipr LHS, <3,1,0,3>
- 3625233718U, // <2,0,3,4>: Cost 4 vext1 <0,2,0,3>, RHS
- 3899836110U, // <2,0,3,5>: Cost 4 vuzpr <1,2,3,0>, <2,3,4,5>
- 4032012618U, // <2,0,3,6>: Cost 4 vzipr LHS, <0,4,0,6>
- 3899835392U, // <2,0,3,7>: Cost 4 vuzpr <1,2,3,0>, <1,3,5,7>
- 2953625770U, // <2,0,3,u>: Cost 3 vzipr LHS, <0,2,0,u>
- 2551496806U, // <2,0,4,0>: Cost 3 vext1 <0,2,0,4>, LHS
- 2685698386U, // <2,0,4,1>: Cost 3 vext3 <0,2,0,2>, <0,4,1,5>
- 2685698396U, // <2,0,4,2>: Cost 3 vext3 <0,2,0,2>, <0,4,2,6>
- 3625240726U, // <2,0,4,3>: Cost 4 vext1 <0,2,0,4>, <3,0,1,2>
- 2551500086U, // <2,0,4,4>: Cost 3 vext1 <0,2,0,4>, RHS
- 2618723638U, // <2,0,4,5>: Cost 3 vext2 <0,2,2,0>, RHS
- 2765409590U, // <2,0,4,6>: Cost 3 vuzpl <2,3,0,1>, RHS
- 3799990664U, // <2,0,4,7>: Cost 4 vext3 <7,0,1,2>, <0,4,7,5>
- 2685698450U, // <2,0,4,u>: Cost 3 vext3 <0,2,0,2>, <0,4,u,6>
- 3625246822U, // <2,0,5,0>: Cost 4 vext1 <0,2,0,5>, LHS
- 3289776304U, // <2,0,5,1>: Cost 4 vrev <0,2,1,5>
- 2690564526U, // <2,0,5,2>: Cost 3 vext3 <1,0,3,2>, <0,5,2,7>
- 3289923778U, // <2,0,5,3>: Cost 4 vrev <0,2,3,5>
- 2216255691U, // <2,0,5,4>: Cost 3 vrev <0,2,4,5>
- 3726307332U, // <2,0,5,5>: Cost 4 vext2 <5,u,2,0>, <5,5,5,5>
- 3726307426U, // <2,0,5,6>: Cost 4 vext2 <5,u,2,0>, <5,6,7,0>
- 2826095926U, // <2,0,5,7>: Cost 3 vuzpr <1,2,3,0>, RHS
- 2216550639U, // <2,0,5,u>: Cost 3 vrev <0,2,u,5>
- 4162420736U, // <2,0,6,0>: Cost 4 vtrnr <0,2,4,6>, <0,0,0,0>
- 2901885030U, // <2,0,6,1>: Cost 3 vzipl <2,6,3,7>, LHS
- 2685698559U, // <2,0,6,2>: Cost 3 vext3 <0,2,0,2>, <0,6,2,7>
- 3643173171U, // <2,0,6,3>: Cost 4 vext1 <3,2,0,6>, <3,2,0,6>
- 2216263884U, // <2,0,6,4>: Cost 3 vrev <0,2,4,6>
- 3730289341U, // <2,0,6,5>: Cost 4 vext2 <6,5,2,0>, <6,5,2,0>
- 3726308152U, // <2,0,6,6>: Cost 4 vext2 <5,u,2,0>, <6,6,6,6>
- 3899836346U, // <2,0,6,7>: Cost 4 vuzpr <1,2,3,0>, <2,6,3,7>
- 2216558832U, // <2,0,6,u>: Cost 3 vrev <0,2,u,6>
- 2659202049U, // <2,0,7,0>: Cost 3 vext2 <7,0,2,0>, <7,0,2,0>
- 3726308437U, // <2,0,7,1>: Cost 4 vext2 <5,u,2,0>, <7,1,2,3>
- 2726249034U, // <2,0,7,2>: Cost 3 vext3 <7,0,1,2>, <0,7,2,1>
- 3734934772U, // <2,0,7,3>: Cost 4 vext2 <7,3,2,0>, <7,3,2,0>
- 3726308710U, // <2,0,7,4>: Cost 4 vext2 <5,u,2,0>, <7,4,5,6>
- 3726308814U, // <2,0,7,5>: Cost 4 vext2 <5,u,2,0>, <7,5,u,2>
- 3736925671U, // <2,0,7,6>: Cost 4 vext2 <7,6,2,0>, <7,6,2,0>
- 3726308972U, // <2,0,7,7>: Cost 4 vext2 <5,u,2,0>, <7,7,7,7>
- 2659202049U, // <2,0,7,u>: Cost 3 vext2 <7,0,2,0>, <7,0,2,0>
- 1477787750U, // <2,0,u,0>: Cost 2 vext1 <0,2,0,u>, LHS
- 2953668262U, // <2,0,u,1>: Cost 3 vzipr LHS, <2,3,0,1>
- 1611956893U, // <2,0,u,2>: Cost 2 vext3 <0,2,0,2>, LHS
- 2551531670U, // <2,0,u,3>: Cost 3 vext1 <0,2,0,u>, <3,0,1,2>
- 1477791030U, // <2,0,u,4>: Cost 2 vext1 <0,2,0,u>, RHS
- 2618726554U, // <2,0,u,5>: Cost 3 vext2 <0,2,2,0>, RHS
- 2765412506U, // <2,0,u,6>: Cost 3 vuzpl <2,3,0,1>, RHS
- 2826096169U, // <2,0,u,7>: Cost 3 vuzpr <1,2,3,0>, RHS
- 1611956947U, // <2,0,u,u>: Cost 2 vext3 <0,2,0,2>, LHS
- 2569453670U, // <2,1,0,0>: Cost 3 vext1 <3,2,1,0>, LHS
- 2619392102U, // <2,1,0,1>: Cost 3 vext2 <0,3,2,1>, LHS
- 3759440619U, // <2,1,0,2>: Cost 4 vext3 <0,2,0,2>, <1,0,2,0>
- 1616823030U, // <2,1,0,3>: Cost 2 vext3 <1,0,3,2>, <1,0,3,2>
- 2569456950U, // <2,1,0,4>: Cost 3 vext1 <3,2,1,0>, RHS
- 2690712328U, // <2,1,0,5>: Cost 3 vext3 <1,0,5,2>, <1,0,5,2>
- 3661115841U, // <2,1,0,6>: Cost 4 vext1 <6,2,1,0>, <6,2,1,0>
- 2622046794U, // <2,1,0,7>: Cost 3 vext2 <0,7,2,1>, <0,7,2,1>
- 1617191715U, // <2,1,0,u>: Cost 2 vext3 <1,0,u,2>, <1,0,u,2>
- 2551545958U, // <2,1,1,0>: Cost 3 vext1 <0,2,1,1>, LHS
- 2685698868U, // <2,1,1,1>: Cost 3 vext3 <0,2,0,2>, <1,1,1,1>
- 2628682646U, // <2,1,1,2>: Cost 3 vext2 <1,u,2,1>, <1,2,3,0>
- 2685698888U, // <2,1,1,3>: Cost 3 vext3 <0,2,0,2>, <1,1,3,3>
- 2551549238U, // <2,1,1,4>: Cost 3 vext1 <0,2,1,1>, RHS
- 3693134992U, // <2,1,1,5>: Cost 4 vext2 <0,3,2,1>, <1,5,3,7>
- 3661124034U, // <2,1,1,6>: Cost 4 vext1 <6,2,1,1>, <6,2,1,1>
- 3625292794U, // <2,1,1,7>: Cost 4 vext1 <0,2,1,1>, <7,0,1,2>
- 2685698933U, // <2,1,1,u>: Cost 3 vext3 <0,2,0,2>, <1,1,u,3>
- 2551554150U, // <2,1,2,0>: Cost 3 vext1 <0,2,1,2>, LHS
- 3893649571U, // <2,1,2,1>: Cost 4 vuzpr <0,2,0,1>, <0,2,0,1>
- 2551555688U, // <2,1,2,2>: Cost 3 vext1 <0,2,1,2>, <2,2,2,2>
- 2685698966U, // <2,1,2,3>: Cost 3 vext3 <0,2,0,2>, <1,2,3,0>
- 2551557430U, // <2,1,2,4>: Cost 3 vext1 <0,2,1,2>, RHS
- 3763422123U, // <2,1,2,5>: Cost 4 vext3 <0,u,0,2>, <1,2,5,3>
- 3693135802U, // <2,1,2,6>: Cost 4 vext2 <0,3,2,1>, <2,6,3,7>
- 2726249402U, // <2,1,2,7>: Cost 3 vext3 <7,0,1,2>, <1,2,7,0>
- 2685699011U, // <2,1,2,u>: Cost 3 vext3 <0,2,0,2>, <1,2,u,0>
- 2551562342U, // <2,1,3,0>: Cost 3 vext1 <0,2,1,3>, LHS
- 2953625610U, // <2,1,3,1>: Cost 3 vzipr LHS, <0,0,1,1>
- 2953627798U, // <2,1,3,2>: Cost 3 vzipr LHS, <3,0,1,2>
- 2953626584U, // <2,1,3,3>: Cost 3 vzipr LHS, <1,3,1,3>
- 2551565622U, // <2,1,3,4>: Cost 3 vext1 <0,2,1,3>, RHS
- 2953625938U, // <2,1,3,5>: Cost 3 vzipr LHS, <0,4,1,5>
- 2587398596U, // <2,1,3,6>: Cost 3 vext1 <6,2,1,3>, <6,2,1,3>
- 4032013519U, // <2,1,3,7>: Cost 4 vzipr LHS, <1,6,1,7>
- 2953625617U, // <2,1,3,u>: Cost 3 vzipr LHS, <0,0,1,u>
- 2690565154U, // <2,1,4,0>: Cost 3 vext3 <1,0,3,2>, <1,4,0,5>
- 3625313270U, // <2,1,4,1>: Cost 4 vext1 <0,2,1,4>, <1,3,4,6>
- 3771532340U, // <2,1,4,2>: Cost 4 vext3 <2,2,2,2>, <1,4,2,5>
- 1148404634U, // <2,1,4,3>: Cost 2 vrev <1,2,3,4>
- 3625315638U, // <2,1,4,4>: Cost 4 vext1 <0,2,1,4>, RHS
- 2619395382U, // <2,1,4,5>: Cost 3 vext2 <0,3,2,1>, RHS
- 3837242678U, // <2,1,4,6>: Cost 4 vuzpl <2,0,1,2>, RHS
- 3799991394U, // <2,1,4,7>: Cost 4 vext3 <7,0,1,2>, <1,4,7,6>
- 1148773319U, // <2,1,4,u>: Cost 2 vrev <1,2,u,4>
- 2551578726U, // <2,1,5,0>: Cost 3 vext1 <0,2,1,5>, LHS
- 2551579648U, // <2,1,5,1>: Cost 3 vext1 <0,2,1,5>, <1,3,5,7>
- 3625321952U, // <2,1,5,2>: Cost 4 vext1 <0,2,1,5>, <2,0,5,1>
- 2685699216U, // <2,1,5,3>: Cost 3 vext3 <0,2,0,2>, <1,5,3,7>
- 2551582006U, // <2,1,5,4>: Cost 3 vext1 <0,2,1,5>, RHS
- 3740913668U, // <2,1,5,5>: Cost 4 vext2 <u,3,2,1>, <5,5,5,5>
- 3661156806U, // <2,1,5,6>: Cost 4 vext1 <6,2,1,5>, <6,2,1,5>
- 3893652790U, // <2,1,5,7>: Cost 4 vuzpr <0,2,0,1>, RHS
- 2685699261U, // <2,1,5,u>: Cost 3 vext3 <0,2,0,2>, <1,5,u,7>
- 2551586918U, // <2,1,6,0>: Cost 3 vext1 <0,2,1,6>, LHS
- 3625329398U, // <2,1,6,1>: Cost 4 vext1 <0,2,1,6>, <1,0,3,2>
- 2551588794U, // <2,1,6,2>: Cost 3 vext1 <0,2,1,6>, <2,6,3,7>
- 3088679014U, // <2,1,6,3>: Cost 3 vtrnr <0,2,4,6>, LHS
- 2551590198U, // <2,1,6,4>: Cost 3 vext1 <0,2,1,6>, RHS
- 4029382994U, // <2,1,6,5>: Cost 4 vzipr <0,4,2,6>, <0,4,1,5>
- 3625333560U, // <2,1,6,6>: Cost 4 vext1 <0,2,1,6>, <6,6,6,6>
- 3731624800U, // <2,1,6,7>: Cost 4 vext2 <6,7,2,1>, <6,7,2,1>
- 2551592750U, // <2,1,6,u>: Cost 3 vext1 <0,2,1,6>, LHS
- 2622051322U, // <2,1,7,0>: Cost 3 vext2 <0,7,2,1>, <7,0,1,2>
- 3733615699U, // <2,1,7,1>: Cost 4 vext2 <7,1,2,1>, <7,1,2,1>
- 3795125538U, // <2,1,7,2>: Cost 4 vext3 <6,1,7,2>, <1,7,2,0>
- 2222171037U, // <2,1,7,3>: Cost 3 vrev <1,2,3,7>
- 3740915046U, // <2,1,7,4>: Cost 4 vext2 <u,3,2,1>, <7,4,5,6>
- 3296060335U, // <2,1,7,5>: Cost 4 vrev <1,2,5,7>
- 3736933864U, // <2,1,7,6>: Cost 4 vext2 <7,6,2,1>, <7,6,2,1>
- 3805300055U, // <2,1,7,7>: Cost 4 vext3 <7,u,1,2>, <1,7,7,u>
- 2669827714U, // <2,1,7,u>: Cost 3 vext2 <u,7,2,1>, <7,u,1,2>
- 2551603302U, // <2,1,u,0>: Cost 3 vext1 <0,2,1,u>, LHS
- 2953666570U, // <2,1,u,1>: Cost 3 vzipr LHS, <0,0,1,1>
- 2953668758U, // <2,1,u,2>: Cost 3 vzipr LHS, <3,0,1,2>
- 1148437406U, // <2,1,u,3>: Cost 2 vrev <1,2,3,u>
- 2551606582U, // <2,1,u,4>: Cost 3 vext1 <0,2,1,u>, RHS
- 2953666898U, // <2,1,u,5>: Cost 3 vzipr LHS, <0,4,1,5>
- 2587398596U, // <2,1,u,6>: Cost 3 vext1 <6,2,1,3>, <6,2,1,3>
- 2669828370U, // <2,1,u,7>: Cost 3 vext2 <u,7,2,1>, <u,7,2,1>
- 1148806091U, // <2,1,u,u>: Cost 2 vrev <1,2,u,u>
- 1543667732U, // <2,2,0,0>: Cost 2 vext2 <0,0,2,2>, <0,0,2,2>
- 1548976230U, // <2,2,0,1>: Cost 2 vext2 <0,u,2,2>, LHS
- 2685699524U, // <2,2,0,2>: Cost 3 vext3 <0,2,0,2>, <2,0,2,0>
- 2685699535U, // <2,2,0,3>: Cost 3 vext3 <0,2,0,2>, <2,0,3,2>
- 2551614774U, // <2,2,0,4>: Cost 3 vext1 <0,2,2,0>, RHS
- 3704422830U, // <2,2,0,5>: Cost 4 vext2 <2,2,2,2>, <0,5,2,7>
- 3893657642U, // <2,2,0,6>: Cost 4 vuzpr <0,2,0,2>, <0,0,4,6>
- 3770574323U, // <2,2,0,7>: Cost 4 vext3 <2,0,7,2>, <2,0,7,2>
- 1548976796U, // <2,2,0,u>: Cost 2 vext2 <0,u,2,2>, <0,u,2,2>
- 2622718710U, // <2,2,1,0>: Cost 3 vext2 <0,u,2,2>, <1,0,3,2>
- 2622718772U, // <2,2,1,1>: Cost 3 vext2 <0,u,2,2>, <1,1,1,1>
- 2622718870U, // <2,2,1,2>: Cost 3 vext2 <0,u,2,2>, <1,2,3,0>
- 2819915878U, // <2,2,1,3>: Cost 3 vuzpr <0,2,0,2>, LHS
- 3625364790U, // <2,2,1,4>: Cost 4 vext1 <0,2,2,1>, RHS
- 2622719120U, // <2,2,1,5>: Cost 3 vext2 <0,u,2,2>, <1,5,3,7>
- 3760031292U, // <2,2,1,6>: Cost 4 vext3 <0,2,u,2>, <2,1,6,3>
- 3667170468U, // <2,2,1,7>: Cost 4 vext1 <7,2,2,1>, <7,2,2,1>
- 2819915883U, // <2,2,1,u>: Cost 3 vuzpr <0,2,0,2>, LHS
- 1489829990U, // <2,2,2,0>: Cost 2 vext1 <2,2,2,2>, LHS
- 2563572470U, // <2,2,2,1>: Cost 3 vext1 <2,2,2,2>, <1,0,3,2>
- 269271142U, // <2,2,2,2>: Cost 1 vdup2 LHS
- 2685699698U, // <2,2,2,3>: Cost 3 vext3 <0,2,0,2>, <2,2,3,3>
- 1489833270U, // <2,2,2,4>: Cost 2 vext1 <2,2,2,2>, RHS
- 2685699720U, // <2,2,2,5>: Cost 3 vext3 <0,2,0,2>, <2,2,5,7>
- 2622719930U, // <2,2,2,6>: Cost 3 vext2 <0,u,2,2>, <2,6,3,7>
- 2593436837U, // <2,2,2,7>: Cost 3 vext1 <7,2,2,2>, <7,2,2,2>
- 269271142U, // <2,2,2,u>: Cost 1 vdup2 LHS
- 2685699750U, // <2,2,3,0>: Cost 3 vext3 <0,2,0,2>, <2,3,0,1>
- 2690565806U, // <2,2,3,1>: Cost 3 vext3 <1,0,3,2>, <2,3,1,0>
- 2953627240U, // <2,2,3,2>: Cost 3 vzipr LHS, <2,2,2,2>
- 1879883878U, // <2,2,3,3>: Cost 2 vzipr LHS, LHS
- 2685699790U, // <2,2,3,4>: Cost 3 vext3 <0,2,0,2>, <2,3,4,5>
- 3893659342U, // <2,2,3,5>: Cost 4 vuzpr <0,2,0,2>, <2,3,4,5>
- 2958270812U, // <2,2,3,6>: Cost 3 vzipr LHS, <0,4,2,6>
- 2593445030U, // <2,2,3,7>: Cost 3 vext1 <7,2,2,3>, <7,2,2,3>
- 1879883883U, // <2,2,3,u>: Cost 2 vzipr LHS, LHS
- 2551644262U, // <2,2,4,0>: Cost 3 vext1 <0,2,2,4>, LHS
- 3625386742U, // <2,2,4,1>: Cost 4 vext1 <0,2,2,4>, <1,0,3,2>
- 2551645902U, // <2,2,4,2>: Cost 3 vext1 <0,2,2,4>, <2,3,4,5>
- 3759441686U, // <2,2,4,3>: Cost 4 vext3 <0,2,0,2>, <2,4,3,5>
- 2551647542U, // <2,2,4,4>: Cost 3 vext1 <0,2,2,4>, RHS
- 1548979510U, // <2,2,4,5>: Cost 2 vext2 <0,u,2,2>, RHS
- 2764901686U, // <2,2,4,6>: Cost 3 vuzpl <2,2,2,2>, RHS
- 3667195047U, // <2,2,4,7>: Cost 4 vext1 <7,2,2,4>, <7,2,2,4>
- 1548979753U, // <2,2,4,u>: Cost 2 vext2 <0,u,2,2>, RHS
- 3696463432U, // <2,2,5,0>: Cost 4 vext2 <0,u,2,2>, <5,0,1,2>
- 2617413328U, // <2,2,5,1>: Cost 3 vext2 <0,0,2,2>, <5,1,7,3>
- 2685699936U, // <2,2,5,2>: Cost 3 vext3 <0,2,0,2>, <2,5,2,7>
- 4027383910U, // <2,2,5,3>: Cost 4 vzipr <0,1,2,5>, LHS
- 2228201085U, // <2,2,5,4>: Cost 3 vrev <2,2,4,5>
- 2617413636U, // <2,2,5,5>: Cost 3 vext2 <0,0,2,2>, <5,5,5,5>
- 2617413730U, // <2,2,5,6>: Cost 3 vext2 <0,0,2,2>, <5,6,7,0>
- 2819919158U, // <2,2,5,7>: Cost 3 vuzpr <0,2,0,2>, RHS
- 2819919159U, // <2,2,5,u>: Cost 3 vuzpr <0,2,0,2>, RHS
- 3625402554U, // <2,2,6,0>: Cost 4 vext1 <0,2,2,6>, <0,2,2,6>
- 3760031652U, // <2,2,6,1>: Cost 4 vext3 <0,2,u,2>, <2,6,1,3>
- 2617414138U, // <2,2,6,2>: Cost 3 vext2 <0,0,2,2>, <6,2,7,3>
- 2685700026U, // <2,2,6,3>: Cost 3 vext3 <0,2,0,2>, <2,6,3,7>
- 3625405750U, // <2,2,6,4>: Cost 4 vext1 <0,2,2,6>, RHS
- 3760031692U, // <2,2,6,5>: Cost 4 vext3 <0,2,u,2>, <2,6,5,7>
- 3088679116U, // <2,2,6,6>: Cost 3 vtrnr <0,2,4,6>, <0,2,4,6>
- 2657891169U, // <2,2,6,7>: Cost 3 vext2 <6,7,2,2>, <6,7,2,2>
- 2685700071U, // <2,2,6,u>: Cost 3 vext3 <0,2,0,2>, <2,6,u,7>
- 2726250474U, // <2,2,7,0>: Cost 3 vext3 <7,0,1,2>, <2,7,0,1>
- 3704427616U, // <2,2,7,1>: Cost 4 vext2 <2,2,2,2>, <7,1,3,5>
- 2660545701U, // <2,2,7,2>: Cost 3 vext2 <7,2,2,2>, <7,2,2,2>
- 4030718054U, // <2,2,7,3>: Cost 4 vzipr <0,6,2,7>, LHS
- 2617415014U, // <2,2,7,4>: Cost 3 vext2 <0,0,2,2>, <7,4,5,6>
- 3302033032U, // <2,2,7,5>: Cost 4 vrev <2,2,5,7>
- 3661246929U, // <2,2,7,6>: Cost 4 vext1 <6,2,2,7>, <6,2,2,7>
- 2617415276U, // <2,2,7,7>: Cost 3 vext2 <0,0,2,2>, <7,7,7,7>
- 2731558962U, // <2,2,7,u>: Cost 3 vext3 <7,u,1,2>, <2,7,u,1>
- 1489829990U, // <2,2,u,0>: Cost 2 vext1 <2,2,2,2>, LHS
- 1548982062U, // <2,2,u,1>: Cost 2 vext2 <0,u,2,2>, LHS
- 269271142U, // <2,2,u,2>: Cost 1 vdup2 LHS
- 1879924838U, // <2,2,u,3>: Cost 2 vzipr LHS, LHS
- 1489833270U, // <2,2,u,4>: Cost 2 vext1 <2,2,2,2>, RHS
- 1548982426U, // <2,2,u,5>: Cost 2 vext2 <0,u,2,2>, RHS
- 2953666908U, // <2,2,u,6>: Cost 3 vzipr LHS, <0,4,2,6>
- 2819919401U, // <2,2,u,7>: Cost 3 vuzpr <0,2,0,2>, RHS
- 269271142U, // <2,2,u,u>: Cost 1 vdup2 LHS
- 1544339456U, // <2,3,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
- 470597734U, // <2,3,0,1>: Cost 1 vext2 LHS, LHS
- 1548984484U, // <2,3,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
- 2619408648U, // <2,3,0,3>: Cost 3 vext2 <0,3,2,3>, <0,3,2,3>
- 1548984658U, // <2,3,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
- 2665857454U, // <2,3,0,5>: Cost 3 vext2 LHS, <0,5,2,7>
- 2622726655U, // <2,3,0,6>: Cost 3 vext2 LHS, <0,6,2,7>
- 2593494188U, // <2,3,0,7>: Cost 3 vext1 <7,2,3,0>, <7,2,3,0>
- 470598301U, // <2,3,0,u>: Cost 1 vext2 LHS, LHS
- 1544340214U, // <2,3,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
- 1544340276U, // <2,3,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
- 1544340374U, // <2,3,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
- 1548985304U, // <2,3,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
- 2551696694U, // <2,3,1,4>: Cost 3 vext1 <0,2,3,1>, RHS
- 1548985488U, // <2,3,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
- 2622727375U, // <2,3,1,6>: Cost 3 vext2 LHS, <1,6,1,7>
- 2665858347U, // <2,3,1,7>: Cost 3 vext2 LHS, <1,7,3,0>
- 1548985709U, // <2,3,1,u>: Cost 2 vext2 LHS, <1,u,1,3>
- 2622727613U, // <2,3,2,0>: Cost 3 vext2 LHS, <2,0,1,2>
- 2622727711U, // <2,3,2,1>: Cost 3 vext2 LHS, <2,1,3,1>
- 1544341096U, // <2,3,2,2>: Cost 2 vext2 LHS, <2,2,2,2>
- 1544341158U, // <2,3,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
- 2622727958U, // <2,3,2,4>: Cost 3 vext2 LHS, <2,4,3,5>
- 2622728032U, // <2,3,2,5>: Cost 3 vext2 LHS, <2,5,2,7>
- 1548986298U, // <2,3,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
- 2665859050U, // <2,3,2,7>: Cost 3 vext2 LHS, <2,7,0,1>
- 1548986427U, // <2,3,2,u>: Cost 2 vext2 LHS, <2,u,0,1>
- 1548986518U, // <2,3,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
- 2622728415U, // <2,3,3,1>: Cost 3 vext2 LHS, <3,1,0,3>
- 1489913458U, // <2,3,3,2>: Cost 2 vext1 <2,2,3,3>, <2,2,3,3>
- 1544341916U, // <2,3,3,3>: Cost 2 vext2 LHS, <3,3,3,3>
- 1548986882U, // <2,3,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
- 2665859632U, // <2,3,3,5>: Cost 3 vext2 LHS, <3,5,1,7>
- 2234304870U, // <2,3,3,6>: Cost 3 vrev <3,2,6,3>
- 2958271632U, // <2,3,3,7>: Cost 3 vzipr LHS, <1,5,3,7>
- 1548987166U, // <2,3,3,u>: Cost 2 vext2 LHS, <3,u,1,2>
- 1483948134U, // <2,3,4,0>: Cost 2 vext1 <1,2,3,4>, LHS
- 1483948954U, // <2,3,4,1>: Cost 2 vext1 <1,2,3,4>, <1,2,3,4>
- 2622729276U, // <2,3,4,2>: Cost 3 vext2 LHS, <4,2,6,0>
- 2557692054U, // <2,3,4,3>: Cost 3 vext1 <1,2,3,4>, <3,0,1,2>
- 1483951414U, // <2,3,4,4>: Cost 2 vext1 <1,2,3,4>, RHS
- 470601014U, // <2,3,4,5>: Cost 1 vext2 LHS, RHS
- 1592118644U, // <2,3,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
- 2593526960U, // <2,3,4,7>: Cost 3 vext1 <7,2,3,4>, <7,2,3,4>
- 470601257U, // <2,3,4,u>: Cost 1 vext2 LHS, RHS
- 2551726182U, // <2,3,5,0>: Cost 3 vext1 <0,2,3,5>, LHS
- 1592118992U, // <2,3,5,1>: Cost 2 vext2 LHS, <5,1,7,3>
- 2665860862U, // <2,3,5,2>: Cost 3 vext2 LHS, <5,2,3,4>
- 2551728642U, // <2,3,5,3>: Cost 3 vext1 <0,2,3,5>, <3,4,5,6>
- 1592119238U, // <2,3,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
- 1592119300U, // <2,3,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
- 1592119394U, // <2,3,5,6>: Cost 2 vext2 LHS, <5,6,7,0>
- 1592119464U, // <2,3,5,7>: Cost 2 vext2 LHS, <5,7,5,7>
- 1592119545U, // <2,3,5,u>: Cost 2 vext2 LHS, <5,u,5,7>
- 2622730529U, // <2,3,6,0>: Cost 3 vext2 LHS, <6,0,1,2>
- 2557707164U, // <2,3,6,1>: Cost 3 vext1 <1,2,3,6>, <1,2,3,6>
- 1592119802U, // <2,3,6,2>: Cost 2 vext2 LHS, <6,2,7,3>
- 2665861682U, // <2,3,6,3>: Cost 3 vext2 LHS, <6,3,4,5>
- 2622730893U, // <2,3,6,4>: Cost 3 vext2 LHS, <6,4,5,6>
- 2665861810U, // <2,3,6,5>: Cost 3 vext2 LHS, <6,5,0,7>
- 1592120120U, // <2,3,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
- 1592120142U, // <2,3,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
- 1592120223U, // <2,3,6,u>: Cost 2 vext2 LHS, <6,u,0,1>
- 1592120314U, // <2,3,7,0>: Cost 2 vext2 LHS, <7,0,1,2>
- 2659890261U, // <2,3,7,1>: Cost 3 vext2 <7,1,2,3>, <7,1,2,3>
- 2660553894U, // <2,3,7,2>: Cost 3 vext2 <7,2,2,3>, <7,2,2,3>
- 2665862371U, // <2,3,7,3>: Cost 3 vext2 LHS, <7,3,0,1>
- 1592120678U, // <2,3,7,4>: Cost 2 vext2 LHS, <7,4,5,6>
- 2665862534U, // <2,3,7,5>: Cost 3 vext2 LHS, <7,5,0,2>
- 2665862614U, // <2,3,7,6>: Cost 3 vext2 LHS, <7,6,0,1>
- 1592120940U, // <2,3,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
- 1592120962U, // <2,3,7,u>: Cost 2 vext2 LHS, <7,u,1,2>
- 1548990163U, // <2,3,u,0>: Cost 2 vext2 LHS, <u,0,1,2>
- 470603566U, // <2,3,u,1>: Cost 1 vext2 LHS, LHS
- 1548990341U, // <2,3,u,2>: Cost 2 vext2 LHS, <u,2,3,0>
- 1548990396U, // <2,3,u,3>: Cost 2 vext2 LHS, <u,3,0,1>
- 1548990527U, // <2,3,u,4>: Cost 2 vext2 LHS, <u,4,5,6>
- 470603930U, // <2,3,u,5>: Cost 1 vext2 LHS, RHS
- 1548990672U, // <2,3,u,6>: Cost 2 vext2 LHS, <u,6,3,7>
- 1592121600U, // <2,3,u,7>: Cost 2 vext2 LHS, <u,7,0,1>
- 470604133U, // <2,3,u,u>: Cost 1 vext2 LHS, LHS
- 2617425942U, // <2,4,0,0>: Cost 3 vext2 <0,0,2,4>, <0,0,2,4>
- 2618753126U, // <2,4,0,1>: Cost 3 vext2 <0,2,2,4>, LHS
- 2618753208U, // <2,4,0,2>: Cost 3 vext2 <0,2,2,4>, <0,2,2,4>
- 2619416841U, // <2,4,0,3>: Cost 3 vext2 <0,3,2,4>, <0,3,2,4>
- 2587593628U, // <2,4,0,4>: Cost 3 vext1 <6,2,4,0>, <4,0,6,2>
- 2712832914U, // <2,4,0,5>: Cost 3 vext3 <4,6,u,2>, <4,0,5,1>
- 1634962332U, // <2,4,0,6>: Cost 2 vext3 <4,0,6,2>, <4,0,6,2>
- 3799993252U, // <2,4,0,7>: Cost 4 vext3 <7,0,1,2>, <4,0,7,1>
- 1634962332U, // <2,4,0,u>: Cost 2 vext3 <4,0,6,2>, <4,0,6,2>
- 2619417334U, // <2,4,1,0>: Cost 3 vext2 <0,3,2,4>, <1,0,3,2>
- 3692495668U, // <2,4,1,1>: Cost 4 vext2 <0,2,2,4>, <1,1,1,1>
- 2625389466U, // <2,4,1,2>: Cost 3 vext2 <1,3,2,4>, <1,2,3,4>
- 2826125414U, // <2,4,1,3>: Cost 3 vuzpr <1,2,3,4>, LHS
- 3699794995U, // <2,4,1,4>: Cost 4 vext2 <1,4,2,4>, <1,4,2,4>
- 3692496016U, // <2,4,1,5>: Cost 4 vext2 <0,2,2,4>, <1,5,3,7>
- 3763424238U, // <2,4,1,6>: Cost 4 vext3 <0,u,0,2>, <4,1,6,3>
- 3667317942U, // <2,4,1,7>: Cost 4 vext1 <7,2,4,1>, <7,2,4,1>
- 2826125419U, // <2,4,1,u>: Cost 3 vuzpr <1,2,3,4>, LHS
- 2629371336U, // <2,4,2,0>: Cost 3 vext2 <2,0,2,4>, <2,0,2,4>
- 3699131946U, // <2,4,2,1>: Cost 4 vext2 <1,3,2,4>, <2,1,4,3>
- 2630698602U, // <2,4,2,2>: Cost 3 vext2 <2,2,2,4>, <2,2,2,4>
- 2618754766U, // <2,4,2,3>: Cost 3 vext2 <0,2,2,4>, <2,3,4,5>
- 2826126234U, // <2,4,2,4>: Cost 3 vuzpr <1,2,3,4>, <1,2,3,4>
- 2899119414U, // <2,4,2,5>: Cost 3 vzipl <2,2,2,2>, RHS
- 3033337142U, // <2,4,2,6>: Cost 3 vtrnl <2,2,2,2>, RHS
- 3800214597U, // <2,4,2,7>: Cost 4 vext3 <7,0,4,2>, <4,2,7,0>
- 2899119657U, // <2,4,2,u>: Cost 3 vzipl <2,2,2,2>, RHS
- 2635344033U, // <2,4,3,0>: Cost 3 vext2 <3,0,2,4>, <3,0,2,4>
- 4032012325U, // <2,4,3,1>: Cost 4 vzipr LHS, <0,0,4,1>
- 3692497228U, // <2,4,3,2>: Cost 4 vext2 <0,2,2,4>, <3,2,3,4>
- 3692497308U, // <2,4,3,3>: Cost 4 vext2 <0,2,2,4>, <3,3,3,3>
- 3001404624U, // <2,4,3,4>: Cost 3 vzipr LHS, <4,4,4,4>
- 2953627342U, // <2,4,3,5>: Cost 3 vzipr LHS, <2,3,4,5>
- 2953625804U, // <2,4,3,6>: Cost 3 vzipr LHS, <0,2,4,6>
- 3899868160U, // <2,4,3,7>: Cost 4 vuzpr <1,2,3,4>, <1,3,5,7>
- 2953625806U, // <2,4,3,u>: Cost 3 vzipr LHS, <0,2,4,u>
- 2710916266U, // <2,4,4,0>: Cost 3 vext3 <4,4,0,2>, <4,4,0,2>
- 3899869648U, // <2,4,4,1>: Cost 4 vuzpr <1,2,3,4>, <3,4,0,1>
- 3899869658U, // <2,4,4,2>: Cost 4 vuzpr <1,2,3,4>, <3,4,1,2>
- 3899868930U, // <2,4,4,3>: Cost 4 vuzpr <1,2,3,4>, <2,4,1,3>
- 2712833232U, // <2,4,4,4>: Cost 3 vext3 <4,6,u,2>, <4,4,4,4>
- 2618756406U, // <2,4,4,5>: Cost 3 vext2 <0,2,2,4>, RHS
- 2765737270U, // <2,4,4,6>: Cost 3 vuzpl <2,3,4,5>, RHS
- 4168304426U, // <2,4,4,7>: Cost 4 vtrnr <1,2,3,4>, <2,4,5,7>
- 2618756649U, // <2,4,4,u>: Cost 3 vext2 <0,2,2,4>, RHS
- 2551800011U, // <2,4,5,0>: Cost 3 vext1 <0,2,4,5>, <0,2,4,5>
- 2569716470U, // <2,4,5,1>: Cost 3 vext1 <3,2,4,5>, <1,0,3,2>
- 2563745405U, // <2,4,5,2>: Cost 3 vext1 <2,2,4,5>, <2,2,4,5>
- 2569718102U, // <2,4,5,3>: Cost 3 vext1 <3,2,4,5>, <3,2,4,5>
- 2551803190U, // <2,4,5,4>: Cost 3 vext1 <0,2,4,5>, RHS
- 3625545732U, // <2,4,5,5>: Cost 4 vext1 <0,2,4,5>, <5,5,5,5>
- 1611959606U, // <2,4,5,6>: Cost 2 vext3 <0,2,0,2>, RHS
- 2826128694U, // <2,4,5,7>: Cost 3 vuzpr <1,2,3,4>, RHS
- 1611959624U, // <2,4,5,u>: Cost 2 vext3 <0,2,0,2>, RHS
- 1478066278U, // <2,4,6,0>: Cost 2 vext1 <0,2,4,6>, LHS
- 2551808758U, // <2,4,6,1>: Cost 3 vext1 <0,2,4,6>, <1,0,3,2>
- 2551809516U, // <2,4,6,2>: Cost 3 vext1 <0,2,4,6>, <2,0,6,4>
- 2551810198U, // <2,4,6,3>: Cost 3 vext1 <0,2,4,6>, <3,0,1,2>
- 1478069558U, // <2,4,6,4>: Cost 2 vext1 <0,2,4,6>, RHS
- 2901888310U, // <2,4,6,5>: Cost 3 vzipl <2,6,3,7>, RHS
- 2551812920U, // <2,4,6,6>: Cost 3 vext1 <0,2,4,6>, <6,6,6,6>
- 2726251914U, // <2,4,6,7>: Cost 3 vext3 <7,0,1,2>, <4,6,7,1>
- 1478072110U, // <2,4,6,u>: Cost 2 vext1 <0,2,4,6>, LHS
- 2659234821U, // <2,4,7,0>: Cost 3 vext2 <7,0,2,4>, <7,0,2,4>
- 3786722726U, // <2,4,7,1>: Cost 4 vext3 <4,7,1,2>, <4,7,1,2>
- 3734303911U, // <2,4,7,2>: Cost 4 vext2 <7,2,2,4>, <7,2,2,4>
- 3734967544U, // <2,4,7,3>: Cost 4 vext2 <7,3,2,4>, <7,3,2,4>
- 3727005030U, // <2,4,7,4>: Cost 4 vext2 <6,0,2,4>, <7,4,5,6>
- 2726251976U, // <2,4,7,5>: Cost 3 vext3 <7,0,1,2>, <4,7,5,0>
- 2726251986U, // <2,4,7,6>: Cost 3 vext3 <7,0,1,2>, <4,7,6,1>
- 3727005292U, // <2,4,7,7>: Cost 4 vext2 <6,0,2,4>, <7,7,7,7>
- 2659234821U, // <2,4,7,u>: Cost 3 vext2 <7,0,2,4>, <7,0,2,4>
- 1478082662U, // <2,4,u,0>: Cost 2 vext1 <0,2,4,u>, LHS
- 2618758958U, // <2,4,u,1>: Cost 3 vext2 <0,2,2,4>, LHS
- 2551826024U, // <2,4,u,2>: Cost 3 vext1 <0,2,4,u>, <2,2,2,2>
- 2551826582U, // <2,4,u,3>: Cost 3 vext1 <0,2,4,u>, <3,0,1,2>
- 1478085942U, // <2,4,u,4>: Cost 2 vext1 <0,2,4,u>, RHS
- 2953668302U, // <2,4,u,5>: Cost 3 vzipr LHS, <2,3,4,5>
- 1611959849U, // <2,4,u,6>: Cost 2 vext3 <0,2,0,2>, RHS
- 2826128937U, // <2,4,u,7>: Cost 3 vuzpr <1,2,3,4>, RHS
- 1611959867U, // <2,4,u,u>: Cost 2 vext3 <0,2,0,2>, RHS
- 3691839488U, // <2,5,0,0>: Cost 4 vext2 <0,1,2,5>, <0,0,0,0>
- 2618097766U, // <2,5,0,1>: Cost 3 vext2 <0,1,2,5>, LHS
- 2620088484U, // <2,5,0,2>: Cost 3 vext2 <0,4,2,5>, <0,2,0,2>
- 2619425034U, // <2,5,0,3>: Cost 3 vext2 <0,3,2,5>, <0,3,2,5>
- 2620088667U, // <2,5,0,4>: Cost 3 vext2 <0,4,2,5>, <0,4,2,5>
- 2620752300U, // <2,5,0,5>: Cost 3 vext2 <0,5,2,5>, <0,5,2,5>
- 3693830655U, // <2,5,0,6>: Cost 4 vext2 <0,4,2,5>, <0,6,2,7>
- 3094531382U, // <2,5,0,7>: Cost 3 vtrnr <1,2,3,0>, RHS
- 2618098333U, // <2,5,0,u>: Cost 3 vext2 <0,1,2,5>, LHS
- 3691840246U, // <2,5,1,0>: Cost 4 vext2 <0,1,2,5>, <1,0,3,2>
- 3691840308U, // <2,5,1,1>: Cost 4 vext2 <0,1,2,5>, <1,1,1,1>
- 2626061206U, // <2,5,1,2>: Cost 3 vext2 <1,4,2,5>, <1,2,3,0>
- 2618098688U, // <2,5,1,3>: Cost 3 vext2 <0,1,2,5>, <1,3,5,7>
- 2626061364U, // <2,5,1,4>: Cost 3 vext2 <1,4,2,5>, <1,4,2,5>
- 3691840656U, // <2,5,1,5>: Cost 4 vext2 <0,1,2,5>, <1,5,3,7>
- 3789082310U, // <2,5,1,6>: Cost 4 vext3 <5,1,6,2>, <5,1,6,2>
- 2712833744U, // <2,5,1,7>: Cost 3 vext3 <4,6,u,2>, <5,1,7,3>
- 2628715896U, // <2,5,1,u>: Cost 3 vext2 <1,u,2,5>, <1,u,2,5>
- 3693831613U, // <2,5,2,0>: Cost 4 vext2 <0,4,2,5>, <2,0,1,2>
- 4026698642U, // <2,5,2,1>: Cost 4 vzipr <0,0,2,2>, <4,0,5,1>
- 2632033896U, // <2,5,2,2>: Cost 3 vext2 <2,4,2,5>, <2,2,2,2>
- 3691841190U, // <2,5,2,3>: Cost 4 vext2 <0,1,2,5>, <2,3,0,1>
- 2632034061U, // <2,5,2,4>: Cost 3 vext2 <2,4,2,5>, <2,4,2,5>
- 3691841352U, // <2,5,2,5>: Cost 4 vext2 <0,1,2,5>, <2,5,0,1>
- 3691841466U, // <2,5,2,6>: Cost 4 vext2 <0,1,2,5>, <2,6,3,7>
- 3088354614U, // <2,5,2,7>: Cost 3 vtrnr <0,2,0,2>, RHS
- 3088354615U, // <2,5,2,u>: Cost 3 vtrnr <0,2,0,2>, RHS
- 2557829222U, // <2,5,3,0>: Cost 3 vext1 <1,2,5,3>, LHS
- 2557830059U, // <2,5,3,1>: Cost 3 vext1 <1,2,5,3>, <1,2,5,3>
- 2575746766U, // <2,5,3,2>: Cost 3 vext1 <4,2,5,3>, <2,3,4,5>
- 3691841948U, // <2,5,3,3>: Cost 4 vext2 <0,1,2,5>, <3,3,3,3>
- 2619427330U, // <2,5,3,4>: Cost 3 vext2 <0,3,2,5>, <3,4,5,6>
- 2581720847U, // <2,5,3,5>: Cost 3 vext1 <5,2,5,3>, <5,2,5,3>
- 2953628162U, // <2,5,3,6>: Cost 3 vzipr LHS, <3,4,5,6>
- 2953626624U, // <2,5,3,7>: Cost 3 vzipr LHS, <1,3,5,7>
- 2953626625U, // <2,5,3,u>: Cost 3 vzipr LHS, <1,3,5,u>
- 2569781350U, // <2,5,4,0>: Cost 3 vext1 <3,2,5,4>, LHS
- 3631580076U, // <2,5,4,1>: Cost 4 vext1 <1,2,5,4>, <1,2,5,4>
- 2569782990U, // <2,5,4,2>: Cost 3 vext1 <3,2,5,4>, <2,3,4,5>
- 2569783646U, // <2,5,4,3>: Cost 3 vext1 <3,2,5,4>, <3,2,5,4>
- 2569784630U, // <2,5,4,4>: Cost 3 vext1 <3,2,5,4>, RHS
- 2618101046U, // <2,5,4,5>: Cost 3 vext2 <0,1,2,5>, RHS
- 3893905922U, // <2,5,4,6>: Cost 4 vuzpr <0,2,3,5>, <3,4,5,6>
- 3094564150U, // <2,5,4,7>: Cost 3 vtrnr <1,2,3,4>, RHS
- 2618101289U, // <2,5,4,u>: Cost 3 vext2 <0,1,2,5>, RHS
- 2551873638U, // <2,5,5,0>: Cost 3 vext1 <0,2,5,5>, LHS
- 3637560320U, // <2,5,5,1>: Cost 4 vext1 <2,2,5,5>, <1,3,5,7>
- 3637560966U, // <2,5,5,2>: Cost 4 vext1 <2,2,5,5>, <2,2,5,5>
- 3723030343U, // <2,5,5,3>: Cost 4 vext2 <5,3,2,5>, <5,3,2,5>
- 2551876918U, // <2,5,5,4>: Cost 3 vext1 <0,2,5,5>, RHS
- 2712834052U, // <2,5,5,5>: Cost 3 vext3 <4,6,u,2>, <5,5,5,5>
- 4028713474U, // <2,5,5,6>: Cost 4 vzipr <0,3,2,5>, <3,4,5,6>
- 2712834072U, // <2,5,5,7>: Cost 3 vext3 <4,6,u,2>, <5,5,7,7>
- 2712834081U, // <2,5,5,u>: Cost 3 vext3 <4,6,u,2>, <5,5,u,7>
- 2575769702U, // <2,5,6,0>: Cost 3 vext1 <4,2,5,6>, LHS
- 3631596462U, // <2,5,6,1>: Cost 4 vext1 <1,2,5,6>, <1,2,5,6>
- 2655924730U, // <2,5,6,2>: Cost 3 vext2 <6,4,2,5>, <6,2,7,3>
- 3643541856U, // <2,5,6,3>: Cost 4 vext1 <3,2,5,6>, <3,2,5,6>
- 2655924849U, // <2,5,6,4>: Cost 3 vext2 <6,4,2,5>, <6,4,2,5>
- 3787755607U, // <2,5,6,5>: Cost 4 vext3 <4,u,6,2>, <5,6,5,7>
- 4029385218U, // <2,5,6,6>: Cost 4 vzipr <0,4,2,6>, <3,4,5,6>
- 3088682294U, // <2,5,6,7>: Cost 3 vtrnr <0,2,4,6>, RHS
- 3088682295U, // <2,5,6,u>: Cost 3 vtrnr <0,2,4,6>, RHS
- 2563833958U, // <2,5,7,0>: Cost 3 vext1 <2,2,5,7>, LHS
- 2551890678U, // <2,5,7,1>: Cost 3 vext1 <0,2,5,7>, <1,0,3,2>
- 2563835528U, // <2,5,7,2>: Cost 3 vext1 <2,2,5,7>, <2,2,5,7>
- 3637577878U, // <2,5,7,3>: Cost 4 vext1 <2,2,5,7>, <3,0,1,2>
- 2563837238U, // <2,5,7,4>: Cost 3 vext1 <2,2,5,7>, RHS
- 2712834216U, // <2,5,7,5>: Cost 3 vext3 <4,6,u,2>, <5,7,5,7>
- 2712834220U, // <2,5,7,6>: Cost 3 vext3 <4,6,u,2>, <5,7,6,2>
- 4174449974U, // <2,5,7,7>: Cost 4 vtrnr <2,2,5,7>, RHS
- 2563839790U, // <2,5,7,u>: Cost 3 vext1 <2,2,5,7>, LHS
- 2563842150U, // <2,5,u,0>: Cost 3 vext1 <2,2,5,u>, LHS
- 2618103598U, // <2,5,u,1>: Cost 3 vext2 <0,1,2,5>, LHS
- 2563843721U, // <2,5,u,2>: Cost 3 vext1 <2,2,5,u>, <2,2,5,u>
- 2569816418U, // <2,5,u,3>: Cost 3 vext1 <3,2,5,u>, <3,2,5,u>
- 2622748735U, // <2,5,u,4>: Cost 3 vext2 <0,u,2,5>, <u,4,5,6>
- 2618103962U, // <2,5,u,5>: Cost 3 vext2 <0,1,2,5>, RHS
- 2953669122U, // <2,5,u,6>: Cost 3 vzipr LHS, <3,4,5,6>
- 2953667584U, // <2,5,u,7>: Cost 3 vzipr LHS, <1,3,5,7>
- 2618104165U, // <2,5,u,u>: Cost 3 vext2 <0,1,2,5>, LHS
- 2620096512U, // <2,6,0,0>: Cost 3 vext2 <0,4,2,6>, <0,0,0,0>
- 1546354790U, // <2,6,0,1>: Cost 2 vext2 <0,4,2,6>, LHS
- 2620096676U, // <2,6,0,2>: Cost 3 vext2 <0,4,2,6>, <0,2,0,2>
- 3693838588U, // <2,6,0,3>: Cost 4 vext2 <0,4,2,6>, <0,3,1,0>
- 1546355036U, // <2,6,0,4>: Cost 2 vext2 <0,4,2,6>, <0,4,2,6>
- 3694502317U, // <2,6,0,5>: Cost 4 vext2 <0,5,2,6>, <0,5,2,6>
- 2551911246U, // <2,6,0,6>: Cost 3 vext1 <0,2,6,0>, <6,7,0,1>
- 2720723287U, // <2,6,0,7>: Cost 3 vext3 <6,0,7,2>, <6,0,7,2>
- 1546355357U, // <2,6,0,u>: Cost 2 vext2 <0,4,2,6>, LHS
- 2620097270U, // <2,6,1,0>: Cost 3 vext2 <0,4,2,6>, <1,0,3,2>
- 2620097332U, // <2,6,1,1>: Cost 3 vext2 <0,4,2,6>, <1,1,1,1>
- 2620097430U, // <2,6,1,2>: Cost 3 vext2 <0,4,2,6>, <1,2,3,0>
- 2820243558U, // <2,6,1,3>: Cost 3 vuzpr <0,2,4,6>, LHS
- 2620097598U, // <2,6,1,4>: Cost 3 vext2 <0,4,2,6>, <1,4,3,6>
- 2620097680U, // <2,6,1,5>: Cost 3 vext2 <0,4,2,6>, <1,5,3,7>
- 3693839585U, // <2,6,1,6>: Cost 4 vext2 <0,4,2,6>, <1,6,3,7>
- 2721386920U, // <2,6,1,7>: Cost 3 vext3 <6,1,7,2>, <6,1,7,2>
- 2820243563U, // <2,6,1,u>: Cost 3 vuzpr <0,2,4,6>, LHS
- 2714014137U, // <2,6,2,0>: Cost 3 vext3 <4,u,6,2>, <6,2,0,1>
- 2712834500U, // <2,6,2,1>: Cost 3 vext3 <4,6,u,2>, <6,2,1,3>
- 2620098152U, // <2,6,2,2>: Cost 3 vext2 <0,4,2,6>, <2,2,2,2>
- 2620098214U, // <2,6,2,3>: Cost 3 vext2 <0,4,2,6>, <2,3,0,1>
- 2632042254U, // <2,6,2,4>: Cost 3 vext2 <2,4,2,6>, <2,4,2,6>
- 2712834540U, // <2,6,2,5>: Cost 3 vext3 <4,6,u,2>, <6,2,5,7>
- 2820243660U, // <2,6,2,6>: Cost 3 vuzpr <0,2,4,6>, <0,2,4,6>
- 2958265654U, // <2,6,2,7>: Cost 3 vzipr <0,u,2,2>, RHS
- 2620098619U, // <2,6,2,u>: Cost 3 vext2 <0,4,2,6>, <2,u,0,1>
- 2620098710U, // <2,6,3,0>: Cost 3 vext2 <0,4,2,6>, <3,0,1,2>
- 3893986982U, // <2,6,3,1>: Cost 4 vuzpr <0,2,4,6>, <2,3,0,1>
- 2569848762U, // <2,6,3,2>: Cost 3 vext1 <3,2,6,3>, <2,6,3,7>
- 2620098972U, // <2,6,3,3>: Cost 3 vext2 <0,4,2,6>, <3,3,3,3>
- 2620099074U, // <2,6,3,4>: Cost 3 vext2 <0,4,2,6>, <3,4,5,6>
- 3893987022U, // <2,6,3,5>: Cost 4 vuzpr <0,2,4,6>, <2,3,4,5>
- 3001404644U, // <2,6,3,6>: Cost 3 vzipr LHS, <4,4,6,6>
- 1879887158U, // <2,6,3,7>: Cost 2 vzipr LHS, RHS
- 1879887159U, // <2,6,3,u>: Cost 2 vzipr LHS, RHS
- 2620099484U, // <2,6,4,0>: Cost 3 vext2 <0,4,2,6>, <4,0,6,2>
- 2620099566U, // <2,6,4,1>: Cost 3 vext2 <0,4,2,6>, <4,1,6,3>
- 2620099644U, // <2,6,4,2>: Cost 3 vext2 <0,4,2,6>, <4,2,6,0>
- 3643599207U, // <2,6,4,3>: Cost 4 vext1 <3,2,6,4>, <3,2,6,4>
- 2575830080U, // <2,6,4,4>: Cost 3 vext1 <4,2,6,4>, <4,2,6,4>
- 1546358070U, // <2,6,4,5>: Cost 2 vext2 <0,4,2,6>, RHS
- 2667875700U, // <2,6,4,6>: Cost 3 vext2 <u,4,2,6>, <4,6,4,6>
- 4028042550U, // <2,6,4,7>: Cost 4 vzipr <0,2,2,4>, RHS
- 1546358313U, // <2,6,4,u>: Cost 2 vext2 <0,4,2,6>, RHS
- 3693841992U, // <2,6,5,0>: Cost 4 vext2 <0,4,2,6>, <5,0,1,2>
- 2667876048U, // <2,6,5,1>: Cost 3 vext2 <u,4,2,6>, <5,1,7,3>
- 2712834756U, // <2,6,5,2>: Cost 3 vext3 <4,6,u,2>, <6,5,2,7>
- 3643607400U, // <2,6,5,3>: Cost 4 vext1 <3,2,6,5>, <3,2,6,5>
- 2252091873U, // <2,6,5,4>: Cost 3 vrev <6,2,4,5>
- 2667876356U, // <2,6,5,5>: Cost 3 vext2 <u,4,2,6>, <5,5,5,5>
- 2667876450U, // <2,6,5,6>: Cost 3 vext2 <u,4,2,6>, <5,6,7,0>
- 2820246838U, // <2,6,5,7>: Cost 3 vuzpr <0,2,4,6>, RHS
- 2820246839U, // <2,6,5,u>: Cost 3 vuzpr <0,2,4,6>, RHS
- 2563899494U, // <2,6,6,0>: Cost 3 vext1 <2,2,6,6>, LHS
- 3893988683U, // <2,6,6,1>: Cost 4 vuzpr <0,2,4,6>, <4,6,0,1>
- 2563901072U, // <2,6,6,2>: Cost 3 vext1 <2,2,6,6>, <2,2,6,6>
- 3893987236U, // <2,6,6,3>: Cost 4 vuzpr <0,2,4,6>, <2,6,1,3>
- 2563902774U, // <2,6,6,4>: Cost 3 vext1 <2,2,6,6>, RHS
- 3893988723U, // <2,6,6,5>: Cost 4 vuzpr <0,2,4,6>, <4,6,4,5>
- 2712834872U, // <2,6,6,6>: Cost 3 vext3 <4,6,u,2>, <6,6,6,6>
- 2955644214U, // <2,6,6,7>: Cost 3 vzipr <0,4,2,6>, RHS
- 2955644215U, // <2,6,6,u>: Cost 3 vzipr <0,4,2,6>, RHS
- 2712834894U, // <2,6,7,0>: Cost 3 vext3 <4,6,u,2>, <6,7,0,1>
- 2724926296U, // <2,6,7,1>: Cost 3 vext3 <6,7,1,2>, <6,7,1,2>
- 2725000033U, // <2,6,7,2>: Cost 3 vext3 <6,7,2,2>, <6,7,2,2>
- 2702365544U, // <2,6,7,3>: Cost 3 vext3 <3,0,1,2>, <6,7,3,0>
- 2712834934U, // <2,6,7,4>: Cost 3 vext3 <4,6,u,2>, <6,7,4,5>
- 3776107393U, // <2,6,7,5>: Cost 4 vext3 <3,0,1,2>, <6,7,5,7>
- 2725294981U, // <2,6,7,6>: Cost 3 vext3 <6,7,6,2>, <6,7,6,2>
- 2726253452U, // <2,6,7,7>: Cost 3 vext3 <7,0,1,2>, <6,7,7,0>
- 2712834966U, // <2,6,7,u>: Cost 3 vext3 <4,6,u,2>, <6,7,u,1>
- 2620102355U, // <2,6,u,0>: Cost 3 vext2 <0,4,2,6>, <u,0,1,2>
- 1546360622U, // <2,6,u,1>: Cost 2 vext2 <0,4,2,6>, LHS
- 2620102536U, // <2,6,u,2>: Cost 3 vext2 <0,4,2,6>, <u,2,3,3>
- 2820244125U, // <2,6,u,3>: Cost 3 vuzpr <0,2,4,6>, LHS
- 1594136612U, // <2,6,u,4>: Cost 2 vext2 <u,4,2,6>, <u,4,2,6>
- 1546360986U, // <2,6,u,5>: Cost 2 vext2 <0,4,2,6>, RHS
- 2620102864U, // <2,6,u,6>: Cost 3 vext2 <0,4,2,6>, <u,6,3,7>
- 1879928118U, // <2,6,u,7>: Cost 2 vzipr LHS, RHS
- 1879928119U, // <2,6,u,u>: Cost 2 vzipr LHS, RHS
- 2726179825U, // <2,7,0,0>: Cost 3 vext3 <7,0,0,2>, <7,0,0,2>
- 1652511738U, // <2,7,0,1>: Cost 2 vext3 <7,0,1,2>, <7,0,1,2>
- 2621431972U, // <2,7,0,2>: Cost 3 vext2 <0,6,2,7>, <0,2,0,2>
- 2257949868U, // <2,7,0,3>: Cost 3 vrev <7,2,3,0>
- 2726474773U, // <2,7,0,4>: Cost 3 vext3 <7,0,4,2>, <7,0,4,2>
- 2620768686U, // <2,7,0,5>: Cost 3 vext2 <0,5,2,7>, <0,5,2,7>
- 2621432319U, // <2,7,0,6>: Cost 3 vext2 <0,6,2,7>, <0,6,2,7>
- 2599760953U, // <2,7,0,7>: Cost 3 vext1 <u,2,7,0>, <7,0,u,2>
- 1653027897U, // <2,7,0,u>: Cost 2 vext3 <7,0,u,2>, <7,0,u,2>
- 2639348470U, // <2,7,1,0>: Cost 3 vext2 <3,6,2,7>, <1,0,3,2>
- 3695174452U, // <2,7,1,1>: Cost 4 vext2 <0,6,2,7>, <1,1,1,1>
- 3695174550U, // <2,7,1,2>: Cost 4 vext2 <0,6,2,7>, <1,2,3,0>
- 3694511104U, // <2,7,1,3>: Cost 4 vext2 <0,5,2,7>, <1,3,5,7>
- 3713090594U, // <2,7,1,4>: Cost 4 vext2 <3,6,2,7>, <1,4,0,5>
- 3693184144U, // <2,7,1,5>: Cost 4 vext2 <0,3,2,7>, <1,5,3,7>
- 2627405016U, // <2,7,1,6>: Cost 3 vext2 <1,6,2,7>, <1,6,2,7>
- 3799995519U, // <2,7,1,7>: Cost 4 vext3 <7,0,1,2>, <7,1,7,0>
- 2639348470U, // <2,7,1,u>: Cost 3 vext2 <3,6,2,7>, <1,0,3,2>
- 3695175101U, // <2,7,2,0>: Cost 4 vext2 <0,6,2,7>, <2,0,1,2>
- 3643655168U, // <2,7,2,1>: Cost 4 vext1 <3,2,7,2>, <1,3,5,7>
- 2257892517U, // <2,7,2,2>: Cost 3 vrev <7,2,2,2>
- 3695175334U, // <2,7,2,3>: Cost 4 vext2 <0,6,2,7>, <2,3,0,1>
- 3695175465U, // <2,7,2,4>: Cost 4 vext2 <0,6,2,7>, <2,4,5,6>
- 2632714080U, // <2,7,2,5>: Cost 3 vext2 <2,5,2,7>, <2,5,2,7>
- 2633377713U, // <2,7,2,6>: Cost 3 vext2 <2,6,2,7>, <2,6,2,7>
- 3695175658U, // <2,7,2,7>: Cost 4 vext2 <0,6,2,7>, <2,7,0,1>
- 2634704979U, // <2,7,2,u>: Cost 3 vext2 <2,u,2,7>, <2,u,2,7>
- 1514094694U, // <2,7,3,0>: Cost 2 vext1 <6,2,7,3>, LHS
- 2569921680U, // <2,7,3,1>: Cost 3 vext1 <3,2,7,3>, <1,5,3,7>
- 2587838056U, // <2,7,3,2>: Cost 3 vext1 <6,2,7,3>, <2,2,2,2>
- 2569922927U, // <2,7,3,3>: Cost 3 vext1 <3,2,7,3>, <3,2,7,3>
- 1514097974U, // <2,7,3,4>: Cost 2 vext1 <6,2,7,3>, RHS
- 2581868321U, // <2,7,3,5>: Cost 3 vext1 <5,2,7,3>, <5,2,7,3>
- 1514099194U, // <2,7,3,6>: Cost 2 vext1 <6,2,7,3>, <6,2,7,3>
- 2587841530U, // <2,7,3,7>: Cost 3 vext1 <6,2,7,3>, <7,0,1,2>
- 1514100526U, // <2,7,3,u>: Cost 2 vext1 <6,2,7,3>, LHS
- 2708706617U, // <2,7,4,0>: Cost 3 vext3 <4,0,6,2>, <7,4,0,6>
- 3649643418U, // <2,7,4,1>: Cost 4 vext1 <4,2,7,4>, <1,2,3,4>
- 3649644330U, // <2,7,4,2>: Cost 4 vext1 <4,2,7,4>, <2,4,5,7>
- 2257982640U, // <2,7,4,3>: Cost 3 vrev <7,2,3,4>
- 3649645641U, // <2,7,4,4>: Cost 4 vext1 <4,2,7,4>, <4,2,7,4>
- 2621435190U, // <2,7,4,5>: Cost 3 vext2 <0,6,2,7>, RHS
- 2712835441U, // <2,7,4,6>: Cost 3 vext3 <4,6,u,2>, <7,4,6,u>
- 3799995762U, // <2,7,4,7>: Cost 4 vext3 <7,0,1,2>, <7,4,7,0>
- 2621435433U, // <2,7,4,u>: Cost 3 vext2 <0,6,2,7>, RHS
- 2729497990U, // <2,7,5,0>: Cost 3 vext3 <7,5,0,2>, <7,5,0,2>
- 3643679744U, // <2,7,5,1>: Cost 4 vext1 <3,2,7,5>, <1,3,5,7>
- 3637708424U, // <2,7,5,2>: Cost 4 vext1 <2,2,7,5>, <2,2,5,7>
- 3643681137U, // <2,7,5,3>: Cost 4 vext1 <3,2,7,5>, <3,2,7,5>
- 2599800118U, // <2,7,5,4>: Cost 3 vext1 <u,2,7,5>, RHS
- 3786577334U, // <2,7,5,5>: Cost 4 vext3 <4,6,u,2>, <7,5,5,5>
- 3786577345U, // <2,7,5,6>: Cost 4 vext3 <4,6,u,2>, <7,5,6,7>
- 2599802214U, // <2,7,5,7>: Cost 3 vext1 <u,2,7,5>, <7,4,5,6>
- 2599802670U, // <2,7,5,u>: Cost 3 vext1 <u,2,7,5>, LHS
- 2581889126U, // <2,7,6,0>: Cost 3 vext1 <5,2,7,6>, LHS
- 3643687936U, // <2,7,6,1>: Cost 4 vext1 <3,2,7,6>, <1,3,5,7>
- 2663240186U, // <2,7,6,2>: Cost 3 vext2 <7,6,2,7>, <6,2,7,3>
- 3643689330U, // <2,7,6,3>: Cost 4 vext1 <3,2,7,6>, <3,2,7,6>
- 2581892406U, // <2,7,6,4>: Cost 3 vext1 <5,2,7,6>, RHS
- 2581892900U, // <2,7,6,5>: Cost 3 vext1 <5,2,7,6>, <5,2,7,6>
- 2587865597U, // <2,7,6,6>: Cost 3 vext1 <6,2,7,6>, <6,2,7,6>
- 3786577428U, // <2,7,6,7>: Cost 4 vext3 <4,6,u,2>, <7,6,7,0>
- 2581894958U, // <2,7,6,u>: Cost 3 vext1 <5,2,7,6>, LHS
- 2726254119U, // <2,7,7,0>: Cost 3 vext3 <7,0,1,2>, <7,7,0,1>
- 3804640817U, // <2,7,7,1>: Cost 4 vext3 <7,7,1,2>, <7,7,1,2>
- 3637724826U, // <2,7,7,2>: Cost 4 vext1 <2,2,7,7>, <2,2,7,7>
- 3734992123U, // <2,7,7,3>: Cost 4 vext2 <7,3,2,7>, <7,3,2,7>
- 2552040758U, // <2,7,7,4>: Cost 3 vext1 <0,2,7,7>, RHS
- 3799995992U, // <2,7,7,5>: Cost 4 vext3 <7,0,1,2>, <7,7,5,5>
- 2663241198U, // <2,7,7,6>: Cost 3 vext2 <7,6,2,7>, <7,6,2,7>
- 2712835692U, // <2,7,7,7>: Cost 3 vext3 <4,6,u,2>, <7,7,7,7>
- 2731562607U, // <2,7,7,u>: Cost 3 vext3 <7,u,1,2>, <7,7,u,1>
- 1514135654U, // <2,7,u,0>: Cost 2 vext1 <6,2,7,u>, LHS
- 1657820802U, // <2,7,u,1>: Cost 2 vext3 <7,u,1,2>, <7,u,1,2>
- 2587879016U, // <2,7,u,2>: Cost 3 vext1 <6,2,7,u>, <2,2,2,2>
- 2569963892U, // <2,7,u,3>: Cost 3 vext1 <3,2,7,u>, <3,2,7,u>
- 1514138934U, // <2,7,u,4>: Cost 2 vext1 <6,2,7,u>, RHS
- 2621438106U, // <2,7,u,5>: Cost 3 vext2 <0,6,2,7>, RHS
- 1514140159U, // <2,7,u,6>: Cost 2 vext1 <6,2,7,u>, <6,2,7,u>
- 2587882490U, // <2,7,u,7>: Cost 3 vext1 <6,2,7,u>, <7,0,1,2>
- 1514141486U, // <2,7,u,u>: Cost 2 vext1 <6,2,7,u>, LHS
- 1544380416U, // <2,u,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
- 470638699U, // <2,u,0,1>: Cost 1 vext2 LHS, LHS
- 1544380580U, // <2,u,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
- 1658631909U, // <2,u,0,3>: Cost 2 vext3 <u,0,3,2>, <u,0,3,2>
- 1544380754U, // <2,u,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
- 2665898414U, // <2,u,0,5>: Cost 3 vext2 LHS, <0,5,2,7>
- 1658853120U, // <2,u,0,6>: Cost 2 vext3 <u,0,6,2>, <u,0,6,2>
- 3094531625U, // <2,u,0,7>: Cost 3 vtrnr <1,2,3,0>, RHS
- 470639261U, // <2,u,0,u>: Cost 1 vext2 LHS, LHS
- 1544381174U, // <2,u,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
- 1544381236U, // <2,u,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
- 1544381334U, // <2,u,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
- 1544381400U, // <2,u,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
- 2618123325U, // <2,u,1,4>: Cost 3 vext2 LHS, <1,4,3,5>
- 1544381584U, // <2,u,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
- 2618123489U, // <2,u,1,6>: Cost 3 vext2 LHS, <1,6,3,7>
- 2726254427U, // <2,u,1,7>: Cost 3 vext3 <7,0,1,2>, <u,1,7,3>
- 1544381823U, // <2,u,1,u>: Cost 2 vext2 LHS, <1,u,3,3>
- 1478328422U, // <2,u,2,0>: Cost 2 vext1 <0,2,u,2>, LHS
- 2618123807U, // <2,u,2,1>: Cost 3 vext2 LHS, <2,1,3,1>
- 269271142U, // <2,u,2,2>: Cost 1 vdup2 LHS
- 1544382118U, // <2,u,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
- 1478331702U, // <2,u,2,4>: Cost 2 vext1 <0,2,u,2>, RHS
- 2618124136U, // <2,u,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
- 1544382394U, // <2,u,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
- 3088354857U, // <2,u,2,7>: Cost 3 vtrnr <0,2,0,2>, RHS
- 269271142U, // <2,u,2,u>: Cost 1 vdup2 LHS
- 1544382614U, // <2,u,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
- 2953627374U, // <2,u,3,1>: Cost 3 vzipr LHS, <2,3,u,1>
- 1490282143U, // <2,u,3,2>: Cost 2 vext1 <2,2,u,3>, <2,2,u,3>
- 1879883932U, // <2,u,3,3>: Cost 2 vzipr LHS, LHS
- 1544382978U, // <2,u,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
- 2953627378U, // <2,u,3,5>: Cost 3 vzipr LHS, <2,3,u,5>
- 1514172931U, // <2,u,3,6>: Cost 2 vext1 <6,2,u,3>, <6,2,u,3>
- 1879887176U, // <2,u,3,7>: Cost 2 vzipr LHS, RHS
- 1879883937U, // <2,u,3,u>: Cost 2 vzipr LHS, LHS
- 1484316774U, // <2,u,4,0>: Cost 2 vext1 <1,2,u,4>, LHS
- 1484317639U, // <2,u,4,1>: Cost 2 vext1 <1,2,u,4>, <1,2,u,4>
- 2552088270U, // <2,u,4,2>: Cost 3 vext1 <0,2,u,4>, <2,3,4,5>
- 1190213513U, // <2,u,4,3>: Cost 2 vrev <u,2,3,4>
- 1484320054U, // <2,u,4,4>: Cost 2 vext1 <1,2,u,4>, RHS
- 470641974U, // <2,u,4,5>: Cost 1 vext2 LHS, RHS
- 1592159604U, // <2,u,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
- 3094564393U, // <2,u,4,7>: Cost 3 vtrnr <1,2,3,4>, RHS
- 470642217U, // <2,u,4,u>: Cost 1 vext2 LHS, RHS
- 2552094959U, // <2,u,5,0>: Cost 3 vext1 <0,2,u,5>, <0,2,u,5>
- 1592159952U, // <2,u,5,1>: Cost 2 vext2 LHS, <5,1,7,3>
- 2564040353U, // <2,u,5,2>: Cost 3 vext1 <2,2,u,5>, <2,2,u,5>
- 2690275455U, // <2,u,5,3>: Cost 3 vext3 <0,u,u,2>, <u,5,3,7>
- 1592160198U, // <2,u,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
- 1592160260U, // <2,u,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
- 1611962522U, // <2,u,5,6>: Cost 2 vext3 <0,2,0,2>, RHS
- 1592160424U, // <2,u,5,7>: Cost 2 vext2 LHS, <5,7,5,7>
- 1611962540U, // <2,u,5,u>: Cost 2 vext3 <0,2,0,2>, RHS
- 1478361190U, // <2,u,6,0>: Cost 2 vext1 <0,2,u,6>, LHS
- 2552103670U, // <2,u,6,1>: Cost 3 vext1 <0,2,u,6>, <1,0,3,2>
- 1592160762U, // <2,u,6,2>: Cost 2 vext2 LHS, <6,2,7,3>
- 2685704400U, // <2,u,6,3>: Cost 3 vext3 <0,2,0,2>, <u,6,3,7>
- 1478364470U, // <2,u,6,4>: Cost 2 vext1 <0,2,u,6>, RHS
- 2901891226U, // <2,u,6,5>: Cost 3 vzipl <2,6,3,7>, RHS
- 1592161080U, // <2,u,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
- 1592161102U, // <2,u,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
- 1478367022U, // <2,u,6,u>: Cost 2 vext1 <0,2,u,6>, LHS
- 1592161274U, // <2,u,7,0>: Cost 2 vext2 LHS, <7,0,1,2>
- 2659931226U, // <2,u,7,1>: Cost 3 vext2 <7,1,2,u>, <7,1,2,u>
- 2564056739U, // <2,u,7,2>: Cost 3 vext1 <2,2,u,7>, <2,2,u,7>
- 2665903331U, // <2,u,7,3>: Cost 3 vext2 LHS, <7,3,0,1>
- 1592161638U, // <2,u,7,4>: Cost 2 vext2 LHS, <7,4,5,6>
- 2665903494U, // <2,u,7,5>: Cost 3 vext2 LHS, <7,5,0,2>
- 2587947527U, // <2,u,7,6>: Cost 3 vext1 <6,2,u,7>, <6,2,u,7>
- 1592161900U, // <2,u,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
- 1592161922U, // <2,u,7,u>: Cost 2 vext2 LHS, <7,u,1,2>
- 1478377574U, // <2,u,u,0>: Cost 2 vext1 <0,2,u,u>, LHS
- 470644526U, // <2,u,u,1>: Cost 1 vext2 LHS, LHS
- 269271142U, // <2,u,u,2>: Cost 1 vdup2 LHS
- 1879924892U, // <2,u,u,3>: Cost 2 vzipr LHS, LHS
- 1478380854U, // <2,u,u,4>: Cost 2 vext1 <0,2,u,u>, RHS
- 470644890U, // <2,u,u,5>: Cost 1 vext2 LHS, RHS
- 1611962765U, // <2,u,u,6>: Cost 2 vext3 <0,2,0,2>, RHS
- 1879928136U, // <2,u,u,7>: Cost 2 vzipr LHS, RHS
- 470645093U, // <2,u,u,u>: Cost 1 vext2 LHS, LHS
- 1611448320U, // <3,0,0,0>: Cost 2 vext3 LHS, <0,0,0,0>
- 1611890698U, // <3,0,0,1>: Cost 2 vext3 LHS, <0,0,1,1>
- 1611890708U, // <3,0,0,2>: Cost 2 vext3 LHS, <0,0,2,2>
- 3763576860U, // <3,0,0,3>: Cost 4 vext3 LHS, <0,0,3,1>
- 2689835045U, // <3,0,0,4>: Cost 3 vext3 LHS, <0,0,4,1>
- 3698508206U, // <3,0,0,5>: Cost 4 vext2 <1,2,3,0>, <0,5,2,7>
- 3763576887U, // <3,0,0,6>: Cost 4 vext3 LHS, <0,0,6,1>
- 3667678434U, // <3,0,0,7>: Cost 4 vext1 <7,3,0,0>, <7,3,0,0>
- 1616093258U, // <3,0,0,u>: Cost 2 vext3 LHS, <0,0,u,2>
- 1490337894U, // <3,0,1,0>: Cost 2 vext1 <2,3,0,1>, LHS
- 2685632602U, // <3,0,1,1>: Cost 3 vext3 LHS, <0,1,1,0>
- 537706598U, // <3,0,1,2>: Cost 1 vext3 LHS, LHS
- 2624766936U, // <3,0,1,3>: Cost 3 vext2 <1,2,3,0>, <1,3,1,3>
- 1490341174U, // <3,0,1,4>: Cost 2 vext1 <2,3,0,1>, RHS
- 2624767120U, // <3,0,1,5>: Cost 3 vext2 <1,2,3,0>, <1,5,3,7>
- 2732966030U, // <3,0,1,6>: Cost 3 vext3 LHS, <0,1,6,7>
- 2593944803U, // <3,0,1,7>: Cost 3 vext1 <7,3,0,1>, <7,3,0,1>
- 537706652U, // <3,0,1,u>: Cost 1 vext3 LHS, LHS
- 1611890852U, // <3,0,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
- 2685632684U, // <3,0,2,1>: Cost 3 vext3 LHS, <0,2,1,1>
- 2685632692U, // <3,0,2,2>: Cost 3 vext3 LHS, <0,2,2,0>
- 2685632702U, // <3,0,2,3>: Cost 3 vext3 LHS, <0,2,3,1>
- 1611890892U, // <3,0,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
- 2732966102U, // <3,0,2,5>: Cost 3 vext3 LHS, <0,2,5,7>
- 2624767930U, // <3,0,2,6>: Cost 3 vext2 <1,2,3,0>, <2,6,3,7>
- 2685632744U, // <3,0,2,7>: Cost 3 vext3 LHS, <0,2,7,7>
- 1611890924U, // <3,0,2,u>: Cost 2 vext3 LHS, <0,2,u,2>
- 2624768150U, // <3,0,3,0>: Cost 3 vext2 <1,2,3,0>, <3,0,1,2>
- 2685632764U, // <3,0,3,1>: Cost 3 vext3 LHS, <0,3,1,0>
- 2685632774U, // <3,0,3,2>: Cost 3 vext3 LHS, <0,3,2,1>
- 2624768412U, // <3,0,3,3>: Cost 3 vext2 <1,2,3,0>, <3,3,3,3>
- 2624768514U, // <3,0,3,4>: Cost 3 vext2 <1,2,3,0>, <3,4,5,6>
- 3702491714U, // <3,0,3,5>: Cost 4 vext2 <1,u,3,0>, <3,5,3,7>
- 2624768632U, // <3,0,3,6>: Cost 3 vext2 <1,2,3,0>, <3,6,0,7>
- 3702491843U, // <3,0,3,7>: Cost 4 vext2 <1,u,3,0>, <3,7,0,1>
- 2686959934U, // <3,0,3,u>: Cost 3 vext3 <0,3,u,3>, <0,3,u,3>
- 2689835336U, // <3,0,4,0>: Cost 3 vext3 LHS, <0,4,0,4>
- 1611891026U, // <3,0,4,1>: Cost 2 vext3 LHS, <0,4,1,5>
- 1611891036U, // <3,0,4,2>: Cost 2 vext3 LHS, <0,4,2,6>
- 3763577184U, // <3,0,4,3>: Cost 4 vext3 LHS, <0,4,3,1>
- 2689835374U, // <3,0,4,4>: Cost 3 vext3 LHS, <0,4,4,6>
- 1551027510U, // <3,0,4,5>: Cost 2 vext2 <1,2,3,0>, RHS
- 2666573172U, // <3,0,4,6>: Cost 3 vext2 <u,2,3,0>, <4,6,4,6>
- 3667711206U, // <3,0,4,7>: Cost 4 vext1 <7,3,0,4>, <7,3,0,4>
- 1616093586U, // <3,0,4,u>: Cost 2 vext3 LHS, <0,4,u,6>
- 2685190556U, // <3,0,5,0>: Cost 3 vext3 LHS, <0,5,0,7>
- 2666573520U, // <3,0,5,1>: Cost 3 vext2 <u,2,3,0>, <5,1,7,3>
- 3040886886U, // <3,0,5,2>: Cost 3 vtrnl <3,4,5,6>, LHS
- 3625912834U, // <3,0,5,3>: Cost 4 vext1 <0,3,0,5>, <3,4,5,6>
- 2666573766U, // <3,0,5,4>: Cost 3 vext2 <u,2,3,0>, <5,4,7,6>
- 2666573828U, // <3,0,5,5>: Cost 3 vext2 <u,2,3,0>, <5,5,5,5>
- 2732966354U, // <3,0,5,6>: Cost 3 vext3 LHS, <0,5,6,7>
- 2666573992U, // <3,0,5,7>: Cost 3 vext2 <u,2,3,0>, <5,7,5,7>
- 3040886940U, // <3,0,5,u>: Cost 3 vtrnl <3,4,5,6>, LHS
- 2685190637U, // <3,0,6,0>: Cost 3 vext3 LHS, <0,6,0,7>
- 2732966390U, // <3,0,6,1>: Cost 3 vext3 LHS, <0,6,1,7>
- 2689835519U, // <3,0,6,2>: Cost 3 vext3 LHS, <0,6,2,7>
- 3667724438U, // <3,0,6,3>: Cost 4 vext1 <7,3,0,6>, <3,0,1,2>
- 3763577355U, // <3,0,6,4>: Cost 4 vext3 LHS, <0,6,4,1>
- 3806708243U, // <3,0,6,5>: Cost 4 vext3 LHS, <0,6,5,0>
- 2666574648U, // <3,0,6,6>: Cost 3 vext2 <u,2,3,0>, <6,6,6,6>
- 2657948520U, // <3,0,6,7>: Cost 3 vext2 <6,7,3,0>, <6,7,3,0>
- 2689835573U, // <3,0,6,u>: Cost 3 vext3 LHS, <0,6,u,7>
- 2666574842U, // <3,0,7,0>: Cost 3 vext2 <u,2,3,0>, <7,0,1,2>
- 2685633095U, // <3,0,7,1>: Cost 3 vext3 LHS, <0,7,1,7>
- 2660603052U, // <3,0,7,2>: Cost 3 vext2 <7,2,3,0>, <7,2,3,0>
- 3643844997U, // <3,0,7,3>: Cost 4 vext1 <3,3,0,7>, <3,3,0,7>
- 2666575206U, // <3,0,7,4>: Cost 3 vext2 <u,2,3,0>, <7,4,5,6>
- 3655790391U, // <3,0,7,5>: Cost 4 vext1 <5,3,0,7>, <5,3,0,7>
- 3731690968U, // <3,0,7,6>: Cost 4 vext2 <6,7,3,0>, <7,6,0,3>
- 2666575468U, // <3,0,7,7>: Cost 3 vext2 <u,2,3,0>, <7,7,7,7>
- 2664584850U, // <3,0,7,u>: Cost 3 vext2 <7,u,3,0>, <7,u,3,0>
- 1616093834U, // <3,0,u,0>: Cost 2 vext3 LHS, <0,u,0,2>
- 1611891346U, // <3,0,u,1>: Cost 2 vext3 LHS, <0,u,1,1>
- 537707165U, // <3,0,u,2>: Cost 1 vext3 LHS, LHS
- 2689835684U, // <3,0,u,3>: Cost 3 vext3 LHS, <0,u,3,1>
- 1616093874U, // <3,0,u,4>: Cost 2 vext3 LHS, <0,u,4,6>
- 1551030426U, // <3,0,u,5>: Cost 2 vext2 <1,2,3,0>, RHS
- 2624772304U, // <3,0,u,6>: Cost 3 vext2 <1,2,3,0>, <u,6,3,7>
- 2594002154U, // <3,0,u,7>: Cost 3 vext1 <7,3,0,u>, <7,3,0,u>
- 537707219U, // <3,0,u,u>: Cost 1 vext3 LHS, LHS
- 2552201318U, // <3,1,0,0>: Cost 3 vext1 <0,3,1,0>, LHS
- 2618802278U, // <3,1,0,1>: Cost 3 vext2 <0,2,3,1>, LHS
- 2618802366U, // <3,1,0,2>: Cost 3 vext2 <0,2,3,1>, <0,2,3,1>
- 1611449078U, // <3,1,0,3>: Cost 2 vext3 LHS, <1,0,3,2>
- 2552204598U, // <3,1,0,4>: Cost 3 vext1 <0,3,1,0>, RHS
- 2732966663U, // <3,1,0,5>: Cost 3 vext3 LHS, <1,0,5,1>
- 3906258396U, // <3,1,0,6>: Cost 4 vuzpr <2,3,0,1>, <2,0,4,6>
- 3667752171U, // <3,1,0,7>: Cost 4 vext1 <7,3,1,0>, <7,3,1,0>
- 1611891491U, // <3,1,0,u>: Cost 2 vext3 LHS, <1,0,u,2>
- 2689835819U, // <3,1,1,0>: Cost 3 vext3 LHS, <1,1,0,1>
- 1611449140U, // <3,1,1,1>: Cost 2 vext3 LHS, <1,1,1,1>
- 2624775063U, // <3,1,1,2>: Cost 3 vext2 <1,2,3,1>, <1,2,3,1>
- 1611891528U, // <3,1,1,3>: Cost 2 vext3 LHS, <1,1,3,3>
- 2689835859U, // <3,1,1,4>: Cost 3 vext3 LHS, <1,1,4,5>
- 2689835868U, // <3,1,1,5>: Cost 3 vext3 LHS, <1,1,5,5>
- 3763577701U, // <3,1,1,6>: Cost 4 vext3 LHS, <1,1,6,5>
- 3765273452U, // <3,1,1,7>: Cost 4 vext3 <1,1,7,3>, <1,1,7,3>
- 1611891573U, // <3,1,1,u>: Cost 2 vext3 LHS, <1,1,u,3>
- 2629420494U, // <3,1,2,0>: Cost 3 vext2 <2,0,3,1>, <2,0,3,1>
- 2689835911U, // <3,1,2,1>: Cost 3 vext3 LHS, <1,2,1,3>
- 2564163248U, // <3,1,2,2>: Cost 3 vext1 <2,3,1,2>, <2,3,1,2>
- 1611449238U, // <3,1,2,3>: Cost 2 vext3 LHS, <1,2,3,0>
- 2564164918U, // <3,1,2,4>: Cost 3 vext1 <2,3,1,2>, RHS
- 2689835947U, // <3,1,2,5>: Cost 3 vext3 LHS, <1,2,5,3>
- 3692545978U, // <3,1,2,6>: Cost 4 vext2 <0,2,3,1>, <2,6,3,7>
- 2732966842U, // <3,1,2,7>: Cost 3 vext3 LHS, <1,2,7,0>
- 1611891651U, // <3,1,2,u>: Cost 2 vext3 LHS, <1,2,u,0>
- 1484456038U, // <3,1,3,0>: Cost 2 vext1 <1,3,1,3>, LHS
- 1611891672U, // <3,1,3,1>: Cost 2 vext3 LHS, <1,3,1,3>
- 2685633502U, // <3,1,3,2>: Cost 3 vext3 LHS, <1,3,2,0>
- 2685633512U, // <3,1,3,3>: Cost 3 vext3 LHS, <1,3,3,1>
- 1484459318U, // <3,1,3,4>: Cost 2 vext1 <1,3,1,3>, RHS
- 1611891712U, // <3,1,3,5>: Cost 2 vext3 LHS, <1,3,5,7>
- 2689836041U, // <3,1,3,6>: Cost 3 vext3 LHS, <1,3,6,7>
- 2733409294U, // <3,1,3,7>: Cost 3 vext3 LHS, <1,3,7,3>
- 1611891735U, // <3,1,3,u>: Cost 2 vext3 LHS, <1,3,u,3>
- 2552234086U, // <3,1,4,0>: Cost 3 vext1 <0,3,1,4>, LHS
- 2732966955U, // <3,1,4,1>: Cost 3 vext3 LHS, <1,4,1,5>
- 2732966964U, // <3,1,4,2>: Cost 3 vext3 LHS, <1,4,2,5>
- 2685633597U, // <3,1,4,3>: Cost 3 vext3 LHS, <1,4,3,5>
- 2552237366U, // <3,1,4,4>: Cost 3 vext1 <0,3,1,4>, RHS
- 2618805558U, // <3,1,4,5>: Cost 3 vext2 <0,2,3,1>, RHS
- 2769472822U, // <3,1,4,6>: Cost 3 vuzpl <3,0,1,2>, RHS
- 3667784943U, // <3,1,4,7>: Cost 4 vext1 <7,3,1,4>, <7,3,1,4>
- 2685633642U, // <3,1,4,u>: Cost 3 vext3 LHS, <1,4,u,5>
- 2689836143U, // <3,1,5,0>: Cost 3 vext3 LHS, <1,5,0,1>
- 2564187280U, // <3,1,5,1>: Cost 3 vext1 <2,3,1,5>, <1,5,3,7>
- 2564187827U, // <3,1,5,2>: Cost 3 vext1 <2,3,1,5>, <2,3,1,5>
- 1611891856U, // <3,1,5,3>: Cost 2 vext3 LHS, <1,5,3,7>
- 2689836183U, // <3,1,5,4>: Cost 3 vext3 LHS, <1,5,4,5>
- 3759375522U, // <3,1,5,5>: Cost 4 vext3 LHS, <1,5,5,7>
- 3720417378U, // <3,1,5,6>: Cost 4 vext2 <4,u,3,1>, <5,6,7,0>
- 2832518454U, // <3,1,5,7>: Cost 3 vuzpr <2,3,0,1>, RHS
- 1611891901U, // <3,1,5,u>: Cost 2 vext3 LHS, <1,5,u,7>
- 3763578048U, // <3,1,6,0>: Cost 4 vext3 LHS, <1,6,0,1>
- 2689836239U, // <3,1,6,1>: Cost 3 vext3 LHS, <1,6,1,7>
- 2732967128U, // <3,1,6,2>: Cost 3 vext3 LHS, <1,6,2,7>
- 2685633761U, // <3,1,6,3>: Cost 3 vext3 LHS, <1,6,3,7>
- 3763578088U, // <3,1,6,4>: Cost 4 vext3 LHS, <1,6,4,5>
- 2689836275U, // <3,1,6,5>: Cost 3 vext3 LHS, <1,6,5,7>
- 3763578108U, // <3,1,6,6>: Cost 4 vext3 LHS, <1,6,6,7>
- 2732967166U, // <3,1,6,7>: Cost 3 vext3 LHS, <1,6,7,0>
- 2685633806U, // <3,1,6,u>: Cost 3 vext3 LHS, <1,6,u,7>
- 3631972454U, // <3,1,7,0>: Cost 4 vext1 <1,3,1,7>, LHS
- 2659947612U, // <3,1,7,1>: Cost 3 vext2 <7,1,3,1>, <7,1,3,1>
- 4036102294U, // <3,1,7,2>: Cost 4 vzipr <1,5,3,7>, <3,0,1,2>
- 3095396454U, // <3,1,7,3>: Cost 3 vtrnr <1,3,5,7>, LHS
- 3631975734U, // <3,1,7,4>: Cost 4 vext1 <1,3,1,7>, RHS
- 2222982144U, // <3,1,7,5>: Cost 3 vrev <1,3,5,7>
- 3296797705U, // <3,1,7,6>: Cost 4 vrev <1,3,6,7>
- 3720418924U, // <3,1,7,7>: Cost 4 vext2 <4,u,3,1>, <7,7,7,7>
- 3095396459U, // <3,1,7,u>: Cost 3 vtrnr <1,3,5,7>, LHS
- 1484496998U, // <3,1,u,0>: Cost 2 vext1 <1,3,1,u>, LHS
- 1611892077U, // <3,1,u,1>: Cost 2 vext3 LHS, <1,u,1,3>
- 2685633907U, // <3,1,u,2>: Cost 3 vext3 LHS, <1,u,2,0>
- 1611892092U, // <3,1,u,3>: Cost 2 vext3 LHS, <1,u,3,0>
- 1484500278U, // <3,1,u,4>: Cost 2 vext1 <1,3,1,u>, RHS
- 1611892117U, // <3,1,u,5>: Cost 2 vext3 LHS, <1,u,5,7>
- 2685633950U, // <3,1,u,6>: Cost 3 vext3 LHS, <1,u,6,7>
- 2832518697U, // <3,1,u,7>: Cost 3 vuzpr <2,3,0,1>, RHS
- 1611892140U, // <3,1,u,u>: Cost 2 vext3 LHS, <1,u,u,3>
- 2623455232U, // <3,2,0,0>: Cost 3 vext2 <1,0,3,2>, <0,0,0,0>
- 1549713510U, // <3,2,0,1>: Cost 2 vext2 <1,0,3,2>, LHS
- 2689836484U, // <3,2,0,2>: Cost 3 vext3 LHS, <2,0,2,0>
- 2685633997U, // <3,2,0,3>: Cost 3 vext3 LHS, <2,0,3,0>
- 2623455570U, // <3,2,0,4>: Cost 3 vext2 <1,0,3,2>, <0,4,1,5>
- 2732967398U, // <3,2,0,5>: Cost 3 vext3 LHS, <2,0,5,7>
- 2689836524U, // <3,2,0,6>: Cost 3 vext3 LHS, <2,0,6,4>
- 2229044964U, // <3,2,0,7>: Cost 3 vrev <2,3,7,0>
- 1549714077U, // <3,2,0,u>: Cost 2 vext2 <1,0,3,2>, LHS
- 1549714166U, // <3,2,1,0>: Cost 2 vext2 <1,0,3,2>, <1,0,3,2>
- 2623456052U, // <3,2,1,1>: Cost 3 vext2 <1,0,3,2>, <1,1,1,1>
- 2623456150U, // <3,2,1,2>: Cost 3 vext2 <1,0,3,2>, <1,2,3,0>
- 2685634079U, // <3,2,1,3>: Cost 3 vext3 LHS, <2,1,3,1>
- 2552286518U, // <3,2,1,4>: Cost 3 vext1 <0,3,2,1>, RHS
- 2623456400U, // <3,2,1,5>: Cost 3 vext2 <1,0,3,2>, <1,5,3,7>
- 2689836604U, // <3,2,1,6>: Cost 3 vext3 LHS, <2,1,6,3>
- 3667834101U, // <3,2,1,7>: Cost 4 vext1 <7,3,2,1>, <7,3,2,1>
- 1155385070U, // <3,2,1,u>: Cost 2 vrev <2,3,u,1>
- 2689836629U, // <3,2,2,0>: Cost 3 vext3 LHS, <2,2,0,1>
- 2689836640U, // <3,2,2,1>: Cost 3 vext3 LHS, <2,2,1,3>
- 1611449960U, // <3,2,2,2>: Cost 2 vext3 LHS, <2,2,2,2>
- 1611892338U, // <3,2,2,3>: Cost 2 vext3 LHS, <2,2,3,3>
- 2689836669U, // <3,2,2,4>: Cost 3 vext3 LHS, <2,2,4,5>
- 2689836680U, // <3,2,2,5>: Cost 3 vext3 LHS, <2,2,5,7>
- 2689836688U, // <3,2,2,6>: Cost 3 vext3 LHS, <2,2,6,6>
- 3763578518U, // <3,2,2,7>: Cost 4 vext3 LHS, <2,2,7,3>
- 1611892383U, // <3,2,2,u>: Cost 2 vext3 LHS, <2,2,u,3>
- 1611450022U, // <3,2,3,0>: Cost 2 vext3 LHS, <2,3,0,1>
- 2685191854U, // <3,2,3,1>: Cost 3 vext3 LHS, <2,3,1,0>
- 2685191865U, // <3,2,3,2>: Cost 3 vext3 LHS, <2,3,2,2>
- 2685191875U, // <3,2,3,3>: Cost 3 vext3 LHS, <2,3,3,3>
- 1611450062U, // <3,2,3,4>: Cost 2 vext3 LHS, <2,3,4,5>
- 2732967635U, // <3,2,3,5>: Cost 3 vext3 LHS, <2,3,5,1>
- 2732967645U, // <3,2,3,6>: Cost 3 vext3 LHS, <2,3,6,2>
- 2732967652U, // <3,2,3,7>: Cost 3 vext3 LHS, <2,3,7,0>
- 1611450094U, // <3,2,3,u>: Cost 2 vext3 LHS, <2,3,u,1>
- 2558279782U, // <3,2,4,0>: Cost 3 vext1 <1,3,2,4>, LHS
- 2558280602U, // <3,2,4,1>: Cost 3 vext1 <1,3,2,4>, <1,2,3,4>
- 2732967692U, // <3,2,4,2>: Cost 3 vext3 LHS, <2,4,2,4>
- 2685634326U, // <3,2,4,3>: Cost 3 vext3 LHS, <2,4,3,5>
- 2558283062U, // <3,2,4,4>: Cost 3 vext1 <1,3,2,4>, RHS
- 1549716790U, // <3,2,4,5>: Cost 2 vext2 <1,0,3,2>, RHS
- 2689836844U, // <3,2,4,6>: Cost 3 vext3 LHS, <2,4,6,0>
- 2229077736U, // <3,2,4,7>: Cost 3 vrev <2,3,7,4>
- 1549717033U, // <3,2,4,u>: Cost 2 vext2 <1,0,3,2>, RHS
- 2552316006U, // <3,2,5,0>: Cost 3 vext1 <0,3,2,5>, LHS
- 2228643507U, // <3,2,5,1>: Cost 3 vrev <2,3,1,5>
- 2689836896U, // <3,2,5,2>: Cost 3 vext3 LHS, <2,5,2,7>
- 2685634408U, // <3,2,5,3>: Cost 3 vext3 LHS, <2,5,3,6>
- 1155122894U, // <3,2,5,4>: Cost 2 vrev <2,3,4,5>
- 2665263108U, // <3,2,5,5>: Cost 3 vext2 <u,0,3,2>, <5,5,5,5>
- 2689836932U, // <3,2,5,6>: Cost 3 vext3 LHS, <2,5,6,7>
- 2665263272U, // <3,2,5,7>: Cost 3 vext2 <u,0,3,2>, <5,7,5,7>
- 1155417842U, // <3,2,5,u>: Cost 2 vrev <2,3,u,5>
- 2689836953U, // <3,2,6,0>: Cost 3 vext3 LHS, <2,6,0,1>
- 2689836964U, // <3,2,6,1>: Cost 3 vext3 LHS, <2,6,1,3>
- 2689836976U, // <3,2,6,2>: Cost 3 vext3 LHS, <2,6,2,6>
- 1611892666U, // <3,2,6,3>: Cost 2 vext3 LHS, <2,6,3,7>
- 2689836993U, // <3,2,6,4>: Cost 3 vext3 LHS, <2,6,4,5>
- 2689837004U, // <3,2,6,5>: Cost 3 vext3 LHS, <2,6,5,7>
- 2689837013U, // <3,2,6,6>: Cost 3 vext3 LHS, <2,6,6,7>
- 2665263950U, // <3,2,6,7>: Cost 3 vext2 <u,0,3,2>, <6,7,0,1>
- 1611892711U, // <3,2,6,u>: Cost 2 vext3 LHS, <2,6,u,7>
- 2665264122U, // <3,2,7,0>: Cost 3 vext2 <u,0,3,2>, <7,0,1,2>
- 2623460419U, // <3,2,7,1>: Cost 3 vext2 <1,0,3,2>, <7,1,0,3>
- 4169138340U, // <3,2,7,2>: Cost 4 vtrnr <1,3,5,7>, <0,2,0,2>
- 2962358374U, // <3,2,7,3>: Cost 3 vzipr <1,5,3,7>, LHS
- 2665264486U, // <3,2,7,4>: Cost 3 vext2 <u,0,3,2>, <7,4,5,6>
- 2228954841U, // <3,2,7,5>: Cost 3 vrev <2,3,5,7>
- 2229028578U, // <3,2,7,6>: Cost 3 vrev <2,3,6,7>
- 2665264748U, // <3,2,7,7>: Cost 3 vext2 <u,0,3,2>, <7,7,7,7>
- 2962358379U, // <3,2,7,u>: Cost 3 vzipr <1,5,3,7>, LHS
- 1611892795U, // <3,2,u,0>: Cost 2 vext3 LHS, <2,u,0,1>
- 1549719342U, // <3,2,u,1>: Cost 2 vext2 <1,0,3,2>, LHS
- 1611449960U, // <3,2,u,2>: Cost 2 vext3 LHS, <2,2,2,2>
- 1611892824U, // <3,2,u,3>: Cost 2 vext3 LHS, <2,u,3,3>
- 1611892835U, // <3,2,u,4>: Cost 2 vext3 LHS, <2,u,4,5>
- 1549719706U, // <3,2,u,5>: Cost 2 vext2 <1,0,3,2>, RHS
- 2689837168U, // <3,2,u,6>: Cost 3 vext3 LHS, <2,u,6,0>
- 2665265408U, // <3,2,u,7>: Cost 3 vext2 <u,0,3,2>, <u,7,0,1>
- 1611892867U, // <3,2,u,u>: Cost 2 vext3 LHS, <2,u,u,1>
- 2685192331U, // <3,3,0,0>: Cost 3 vext3 LHS, <3,0,0,0>
- 1611450518U, // <3,3,0,1>: Cost 2 vext3 LHS, <3,0,1,2>
- 2685634717U, // <3,3,0,2>: Cost 3 vext3 LHS, <3,0,2,0>
- 2564294806U, // <3,3,0,3>: Cost 3 vext1 <2,3,3,0>, <3,0,1,2>
- 2685634736U, // <3,3,0,4>: Cost 3 vext3 LHS, <3,0,4,1>
- 2732968122U, // <3,3,0,5>: Cost 3 vext3 LHS, <3,0,5,2>
- 3763579075U, // <3,3,0,6>: Cost 4 vext3 LHS, <3,0,6,2>
- 4034053264U, // <3,3,0,7>: Cost 4 vzipr <1,2,3,0>, <1,5,3,7>
- 1611450581U, // <3,3,0,u>: Cost 2 vext3 LHS, <3,0,u,2>
- 2685192415U, // <3,3,1,0>: Cost 3 vext3 LHS, <3,1,0,3>
- 1550385992U, // <3,3,1,1>: Cost 2 vext2 <1,1,3,3>, <1,1,3,3>
- 2685192433U, // <3,3,1,2>: Cost 3 vext3 LHS, <3,1,2,3>
- 2685634808U, // <3,3,1,3>: Cost 3 vext3 LHS, <3,1,3,1>
- 2558332214U, // <3,3,1,4>: Cost 3 vext1 <1,3,3,1>, RHS
- 2685634828U, // <3,3,1,5>: Cost 3 vext3 LHS, <3,1,5,3>
- 3759376661U, // <3,3,1,6>: Cost 4 vext3 LHS, <3,1,6,3>
- 2703477022U, // <3,3,1,7>: Cost 3 vext3 <3,1,7,3>, <3,1,7,3>
- 1555031423U, // <3,3,1,u>: Cost 2 vext2 <1,u,3,3>, <1,u,3,3>
- 2564309094U, // <3,3,2,0>: Cost 3 vext1 <2,3,3,2>, LHS
- 2630100513U, // <3,3,2,1>: Cost 3 vext2 <2,1,3,3>, <2,1,3,3>
- 1557022322U, // <3,3,2,2>: Cost 2 vext2 <2,2,3,3>, <2,2,3,3>
- 2685192520U, // <3,3,2,3>: Cost 3 vext3 LHS, <3,2,3,0>
- 2564312374U, // <3,3,2,4>: Cost 3 vext1 <2,3,3,2>, RHS
- 2732968286U, // <3,3,2,5>: Cost 3 vext3 LHS, <3,2,5,4>
- 2685634918U, // <3,3,2,6>: Cost 3 vext3 LHS, <3,2,6,3>
- 2704140655U, // <3,3,2,7>: Cost 3 vext3 <3,2,7,3>, <3,2,7,3>
- 1561004120U, // <3,3,2,u>: Cost 2 vext2 <2,u,3,3>, <2,u,3,3>
- 1496547430U, // <3,3,3,0>: Cost 2 vext1 <3,3,3,3>, LHS
- 2624129256U, // <3,3,3,1>: Cost 3 vext2 <1,1,3,3>, <3,1,1,3>
- 2630764866U, // <3,3,3,2>: Cost 3 vext2 <2,2,3,3>, <3,2,2,3>
- 336380006U, // <3,3,3,3>: Cost 1 vdup3 LHS
- 1496550710U, // <3,3,3,4>: Cost 2 vext1 <3,3,3,3>, RHS
- 2732968368U, // <3,3,3,5>: Cost 3 vext3 LHS, <3,3,5,5>
- 2624129683U, // <3,3,3,6>: Cost 3 vext2 <1,1,3,3>, <3,6,3,7>
- 2594182400U, // <3,3,3,7>: Cost 3 vext1 <7,3,3,3>, <7,3,3,3>
- 336380006U, // <3,3,3,u>: Cost 1 vdup3 LHS
- 2558353510U, // <3,3,4,0>: Cost 3 vext1 <1,3,3,4>, LHS
- 2558354411U, // <3,3,4,1>: Cost 3 vext1 <1,3,3,4>, <1,3,3,4>
- 2564327108U, // <3,3,4,2>: Cost 3 vext1 <2,3,3,4>, <2,3,3,4>
- 2564327938U, // <3,3,4,3>: Cost 3 vext1 <2,3,3,4>, <3,4,5,6>
- 2960343962U, // <3,3,4,4>: Cost 3 vzipr <1,2,3,4>, <1,2,3,4>
- 1611893250U, // <3,3,4,5>: Cost 2 vext3 LHS, <3,4,5,6>
- 2771619126U, // <3,3,4,6>: Cost 3 vuzpl <3,3,3,3>, RHS
- 4034086032U, // <3,3,4,7>: Cost 4 vzipr <1,2,3,4>, <1,5,3,7>
- 1611893277U, // <3,3,4,u>: Cost 2 vext3 LHS, <3,4,u,6>
- 2558361702U, // <3,3,5,0>: Cost 3 vext1 <1,3,3,5>, LHS
- 2558362604U, // <3,3,5,1>: Cost 3 vext1 <1,3,3,5>, <1,3,3,5>
- 2558363342U, // <3,3,5,2>: Cost 3 vext1 <1,3,3,5>, <2,3,4,5>
- 2732968512U, // <3,3,5,3>: Cost 3 vext3 LHS, <3,5,3,5>
- 2558364982U, // <3,3,5,4>: Cost 3 vext1 <1,3,3,5>, RHS
- 3101279950U, // <3,3,5,5>: Cost 3 vtrnr <2,3,4,5>, <2,3,4,5>
- 2665934946U, // <3,3,5,6>: Cost 3 vext2 <u,1,3,3>, <5,6,7,0>
- 2826636598U, // <3,3,5,7>: Cost 3 vuzpr <1,3,1,3>, RHS
- 2826636599U, // <3,3,5,u>: Cost 3 vuzpr <1,3,1,3>, RHS
- 2732968568U, // <3,3,6,0>: Cost 3 vext3 LHS, <3,6,0,7>
- 3763579521U, // <3,3,6,1>: Cost 4 vext3 LHS, <3,6,1,7>
- 2732968586U, // <3,3,6,2>: Cost 3 vext3 LHS, <3,6,2,7>
- 2732968595U, // <3,3,6,3>: Cost 3 vext3 LHS, <3,6,3,7>
- 2732968604U, // <3,3,6,4>: Cost 3 vext3 LHS, <3,6,4,7>
- 3763579557U, // <3,3,6,5>: Cost 4 vext3 LHS, <3,6,5,7>
- 2732968621U, // <3,3,6,6>: Cost 3 vext3 LHS, <3,6,6,6>
- 2657973099U, // <3,3,6,7>: Cost 3 vext2 <6,7,3,3>, <6,7,3,3>
- 2658636732U, // <3,3,6,u>: Cost 3 vext2 <6,u,3,3>, <6,u,3,3>
- 2558378086U, // <3,3,7,0>: Cost 3 vext1 <1,3,3,7>, LHS
- 2558378990U, // <3,3,7,1>: Cost 3 vext1 <1,3,3,7>, <1,3,3,7>
- 2564351687U, // <3,3,7,2>: Cost 3 vext1 <2,3,3,7>, <2,3,3,7>
- 2661291264U, // <3,3,7,3>: Cost 3 vext2 <7,3,3,3>, <7,3,3,3>
- 2558381366U, // <3,3,7,4>: Cost 3 vext1 <1,3,3,7>, RHS
- 2732968694U, // <3,3,7,5>: Cost 3 vext3 LHS, <3,7,5,7>
- 3781126907U, // <3,3,7,6>: Cost 4 vext3 <3,7,6,3>, <3,7,6,3>
- 3095397376U, // <3,3,7,7>: Cost 3 vtrnr <1,3,5,7>, <1,3,5,7>
- 2558383918U, // <3,3,7,u>: Cost 3 vext1 <1,3,3,7>, LHS
- 1496547430U, // <3,3,u,0>: Cost 2 vext1 <3,3,3,3>, LHS
- 1611893534U, // <3,3,u,1>: Cost 2 vext3 LHS, <3,u,1,2>
- 1592858504U, // <3,3,u,2>: Cost 2 vext2 <u,2,3,3>, <u,2,3,3>
- 336380006U, // <3,3,u,3>: Cost 1 vdup3 LHS
- 1496550710U, // <3,3,u,4>: Cost 2 vext1 <3,3,3,3>, RHS
- 1611893574U, // <3,3,u,5>: Cost 2 vext3 LHS, <3,u,5,6>
- 2690280268U, // <3,3,u,6>: Cost 3 vext3 LHS, <3,u,6,3>
- 2826636841U, // <3,3,u,7>: Cost 3 vuzpr <1,3,1,3>, RHS
- 336380006U, // <3,3,u,u>: Cost 1 vdup3 LHS
- 2624798720U, // <3,4,0,0>: Cost 3 vext2 <1,2,3,4>, <0,0,0,0>
- 1551056998U, // <3,4,0,1>: Cost 2 vext2 <1,2,3,4>, LHS
- 2624798884U, // <3,4,0,2>: Cost 3 vext2 <1,2,3,4>, <0,2,0,2>
- 3693232384U, // <3,4,0,3>: Cost 4 vext2 <0,3,3,4>, <0,3,1,4>
- 2624799058U, // <3,4,0,4>: Cost 3 vext2 <1,2,3,4>, <0,4,1,5>
- 1659227026U, // <3,4,0,5>: Cost 2 vext3 LHS, <4,0,5,1>
- 1659227036U, // <3,4,0,6>: Cost 2 vext3 LHS, <4,0,6,2>
- 3667973382U, // <3,4,0,7>: Cost 4 vext1 <7,3,4,0>, <7,3,4,0>
- 1551057565U, // <3,4,0,u>: Cost 2 vext2 <1,2,3,4>, LHS
- 2624799478U, // <3,4,1,0>: Cost 3 vext2 <1,2,3,4>, <1,0,3,2>
- 2624799540U, // <3,4,1,1>: Cost 3 vext2 <1,2,3,4>, <1,1,1,1>
- 1551057818U, // <3,4,1,2>: Cost 2 vext2 <1,2,3,4>, <1,2,3,4>
- 2624799704U, // <3,4,1,3>: Cost 3 vext2 <1,2,3,4>, <1,3,1,3>
- 2564377910U, // <3,4,1,4>: Cost 3 vext1 <2,3,4,1>, RHS
- 2689838050U, // <3,4,1,5>: Cost 3 vext3 LHS, <4,1,5,0>
- 2689838062U, // <3,4,1,6>: Cost 3 vext3 LHS, <4,1,6,3>
- 2628117807U, // <3,4,1,7>: Cost 3 vext2 <1,7,3,4>, <1,7,3,4>
- 1555039616U, // <3,4,1,u>: Cost 2 vext2 <1,u,3,4>, <1,u,3,4>
- 3626180710U, // <3,4,2,0>: Cost 4 vext1 <0,3,4,2>, LHS
- 2624800298U, // <3,4,2,1>: Cost 3 vext2 <1,2,3,4>, <2,1,4,3>
- 2624800360U, // <3,4,2,2>: Cost 3 vext2 <1,2,3,4>, <2,2,2,2>
- 2624800422U, // <3,4,2,3>: Cost 3 vext2 <1,2,3,4>, <2,3,0,1>
- 2624800514U, // <3,4,2,4>: Cost 3 vext2 <1,2,3,4>, <2,4,1,3>
- 2709965878U, // <3,4,2,5>: Cost 3 vext3 <4,2,5,3>, <4,2,5,3>
- 2689838140U, // <3,4,2,6>: Cost 3 vext3 LHS, <4,2,6,0>
- 2634090504U, // <3,4,2,7>: Cost 3 vext2 <2,7,3,4>, <2,7,3,4>
- 2689838158U, // <3,4,2,u>: Cost 3 vext3 LHS, <4,2,u,0>
- 2624800918U, // <3,4,3,0>: Cost 3 vext2 <1,2,3,4>, <3,0,1,2>
- 2636081403U, // <3,4,3,1>: Cost 3 vext2 <3,1,3,4>, <3,1,3,4>
- 2636745036U, // <3,4,3,2>: Cost 3 vext2 <3,2,3,4>, <3,2,3,4>
- 2624801180U, // <3,4,3,3>: Cost 3 vext2 <1,2,3,4>, <3,3,3,3>
- 2624801232U, // <3,4,3,4>: Cost 3 vext2 <1,2,3,4>, <3,4,0,1>
- 2905836854U, // <3,4,3,5>: Cost 3 vzipl <3,3,3,3>, RHS
- 3040054582U, // <3,4,3,6>: Cost 3 vtrnl <3,3,3,3>, RHS
- 3702524611U, // <3,4,3,7>: Cost 4 vext2 <1,u,3,4>, <3,7,0,1>
- 2624801566U, // <3,4,3,u>: Cost 3 vext2 <1,2,3,4>, <3,u,1,2>
- 2564399206U, // <3,4,4,0>: Cost 3 vext1 <2,3,4,4>, LHS
- 2564400026U, // <3,4,4,1>: Cost 3 vext1 <2,3,4,4>, <1,2,3,4>
- 2564400845U, // <3,4,4,2>: Cost 3 vext1 <2,3,4,4>, <2,3,4,4>
- 2570373542U, // <3,4,4,3>: Cost 3 vext1 <3,3,4,4>, <3,3,4,4>
- 1659227344U, // <3,4,4,4>: Cost 2 vext3 LHS, <4,4,4,4>
- 1551060278U, // <3,4,4,5>: Cost 2 vext2 <1,2,3,4>, RHS
- 1659227364U, // <3,4,4,6>: Cost 2 vext3 LHS, <4,4,6,6>
- 3668006154U, // <3,4,4,7>: Cost 4 vext1 <7,3,4,4>, <7,3,4,4>
- 1551060521U, // <3,4,4,u>: Cost 2 vext2 <1,2,3,4>, RHS
- 1490665574U, // <3,4,5,0>: Cost 2 vext1 <2,3,4,5>, LHS
- 2689838341U, // <3,4,5,1>: Cost 3 vext3 LHS, <4,5,1,3>
- 1490667214U, // <3,4,5,2>: Cost 2 vext1 <2,3,4,5>, <2,3,4,5>
- 2564409494U, // <3,4,5,3>: Cost 3 vext1 <2,3,4,5>, <3,0,1,2>
- 1490668854U, // <3,4,5,4>: Cost 2 vext1 <2,3,4,5>, RHS
- 2689838381U, // <3,4,5,5>: Cost 3 vext3 LHS, <4,5,5,7>
- 537709878U, // <3,4,5,6>: Cost 1 vext3 LHS, RHS
- 2594272523U, // <3,4,5,7>: Cost 3 vext1 <7,3,4,5>, <7,3,4,5>
- 537709896U, // <3,4,5,u>: Cost 1 vext3 LHS, RHS
- 2689838411U, // <3,4,6,0>: Cost 3 vext3 LHS, <4,6,0,1>
- 2558444534U, // <3,4,6,1>: Cost 3 vext1 <1,3,4,6>, <1,3,4,6>
- 2666607098U, // <3,4,6,2>: Cost 3 vext2 <u,2,3,4>, <6,2,7,3>
- 2558446082U, // <3,4,6,3>: Cost 3 vext1 <1,3,4,6>, <3,4,5,6>
- 1659227508U, // <3,4,6,4>: Cost 2 vext3 LHS, <4,6,4,6>
- 2689838462U, // <3,4,6,5>: Cost 3 vext3 LHS, <4,6,5,7>
- 2689838471U, // <3,4,6,6>: Cost 3 vext3 LHS, <4,6,6,7>
- 2657981292U, // <3,4,6,7>: Cost 3 vext2 <6,7,3,4>, <6,7,3,4>
- 1659227540U, // <3,4,6,u>: Cost 2 vext3 LHS, <4,6,u,2>
- 2666607610U, // <3,4,7,0>: Cost 3 vext2 <u,2,3,4>, <7,0,1,2>
- 3702527072U, // <3,4,7,1>: Cost 4 vext2 <1,u,3,4>, <7,1,3,5>
- 2660635824U, // <3,4,7,2>: Cost 3 vext2 <7,2,3,4>, <7,2,3,4>
- 3644139945U, // <3,4,7,3>: Cost 4 vext1 <3,3,4,7>, <3,3,4,7>
- 2666607974U, // <3,4,7,4>: Cost 3 vext2 <u,2,3,4>, <7,4,5,6>
- 2732969416U, // <3,4,7,5>: Cost 3 vext3 LHS, <4,7,5,0>
- 2732969425U, // <3,4,7,6>: Cost 3 vext3 LHS, <4,7,6,0>
- 2666608236U, // <3,4,7,7>: Cost 3 vext2 <u,2,3,4>, <7,7,7,7>
- 2664617622U, // <3,4,7,u>: Cost 3 vext2 <7,u,3,4>, <7,u,3,4>
- 1490690150U, // <3,4,u,0>: Cost 2 vext1 <2,3,4,u>, LHS
- 1551062830U, // <3,4,u,1>: Cost 2 vext2 <1,2,3,4>, LHS
- 1490691793U, // <3,4,u,2>: Cost 2 vext1 <2,3,4,u>, <2,3,4,u>
- 2624804796U, // <3,4,u,3>: Cost 3 vext2 <1,2,3,4>, <u,3,0,1>
- 1490693430U, // <3,4,u,4>: Cost 2 vext1 <2,3,4,u>, RHS
- 1551063194U, // <3,4,u,5>: Cost 2 vext2 <1,2,3,4>, RHS
- 537710121U, // <3,4,u,6>: Cost 1 vext3 LHS, RHS
- 2594297102U, // <3,4,u,7>: Cost 3 vext1 <7,3,4,u>, <7,3,4,u>
- 537710139U, // <3,4,u,u>: Cost 1 vext3 LHS, RHS
- 3692576768U, // <3,5,0,0>: Cost 4 vext2 <0,2,3,5>, <0,0,0,0>
- 2618835046U, // <3,5,0,1>: Cost 3 vext2 <0,2,3,5>, LHS
- 2618835138U, // <3,5,0,2>: Cost 3 vext2 <0,2,3,5>, <0,2,3,5>
- 3692577024U, // <3,5,0,3>: Cost 4 vext2 <0,2,3,5>, <0,3,1,4>
- 2689838690U, // <3,5,0,4>: Cost 3 vext3 LHS, <5,0,4,1>
- 2732969579U, // <3,5,0,5>: Cost 3 vext3 LHS, <5,0,5,1>
- 2732969588U, // <3,5,0,6>: Cost 3 vext3 LHS, <5,0,6,1>
- 2246963055U, // <3,5,0,7>: Cost 3 vrev <5,3,7,0>
- 2618835613U, // <3,5,0,u>: Cost 3 vext2 <0,2,3,5>, LHS
- 2594308198U, // <3,5,1,0>: Cost 3 vext1 <7,3,5,1>, LHS
- 3692577588U, // <3,5,1,1>: Cost 4 vext2 <0,2,3,5>, <1,1,1,1>
- 2624807835U, // <3,5,1,2>: Cost 3 vext2 <1,2,3,5>, <1,2,3,5>
- 2625471468U, // <3,5,1,3>: Cost 3 vext2 <1,3,3,5>, <1,3,3,5>
- 2626135101U, // <3,5,1,4>: Cost 3 vext2 <1,4,3,5>, <1,4,3,5>
- 2594311888U, // <3,5,1,5>: Cost 3 vext1 <7,3,5,1>, <5,1,7,3>
- 3699877107U, // <3,5,1,6>: Cost 4 vext2 <1,4,3,5>, <1,6,5,7>
- 1641680592U, // <3,5,1,7>: Cost 2 vext3 <5,1,7,3>, <5,1,7,3>
- 1641754329U, // <3,5,1,u>: Cost 2 vext3 <5,1,u,3>, <5,1,u,3>
- 3692578274U, // <3,5,2,0>: Cost 4 vext2 <0,2,3,5>, <2,0,5,3>
- 2630116899U, // <3,5,2,1>: Cost 3 vext2 <2,1,3,5>, <2,1,3,5>
- 3692578408U, // <3,5,2,2>: Cost 4 vext2 <0,2,3,5>, <2,2,2,2>
- 2625472206U, // <3,5,2,3>: Cost 3 vext2 <1,3,3,5>, <2,3,4,5>
- 2632107798U, // <3,5,2,4>: Cost 3 vext2 <2,4,3,5>, <2,4,3,5>
- 2715938575U, // <3,5,2,5>: Cost 3 vext3 <5,2,5,3>, <5,2,5,3>
- 3692578746U, // <3,5,2,6>: Cost 4 vext2 <0,2,3,5>, <2,6,3,7>
- 2716086049U, // <3,5,2,7>: Cost 3 vext3 <5,2,7,3>, <5,2,7,3>
- 2634762330U, // <3,5,2,u>: Cost 3 vext2 <2,u,3,5>, <2,u,3,5>
- 3692578966U, // <3,5,3,0>: Cost 4 vext2 <0,2,3,5>, <3,0,1,2>
- 2636089596U, // <3,5,3,1>: Cost 3 vext2 <3,1,3,5>, <3,1,3,5>
- 3699214668U, // <3,5,3,2>: Cost 4 vext2 <1,3,3,5>, <3,2,3,4>
- 2638080412U, // <3,5,3,3>: Cost 3 vext2 <3,4,3,5>, <3,3,3,3>
- 2618837506U, // <3,5,3,4>: Cost 3 vext2 <0,2,3,5>, <3,4,5,6>
- 2832844494U, // <3,5,3,5>: Cost 3 vuzpr <2,3,4,5>, <2,3,4,5>
- 4033415682U, // <3,5,3,6>: Cost 4 vzipr <1,1,3,3>, <3,4,5,6>
- 3095072054U, // <3,5,3,7>: Cost 3 vtrnr <1,3,1,3>, RHS
- 3095072055U, // <3,5,3,u>: Cost 3 vtrnr <1,3,1,3>, RHS
- 2600304742U, // <3,5,4,0>: Cost 3 vext1 <u,3,5,4>, LHS
- 3763580815U, // <3,5,4,1>: Cost 4 vext3 LHS, <5,4,1,5>
- 2564474582U, // <3,5,4,2>: Cost 3 vext1 <2,3,5,4>, <2,3,5,4>
- 3699879044U, // <3,5,4,3>: Cost 4 vext2 <1,4,3,5>, <4,3,5,0>
- 2600308022U, // <3,5,4,4>: Cost 3 vext1 <u,3,5,4>, RHS
- 2618838326U, // <3,5,4,5>: Cost 3 vext2 <0,2,3,5>, RHS
- 2772454710U, // <3,5,4,6>: Cost 3 vuzpl <3,4,5,6>, RHS
- 1659228102U, // <3,5,4,7>: Cost 2 vext3 LHS, <5,4,7,6>
- 1659228111U, // <3,5,4,u>: Cost 2 vext3 LHS, <5,4,u,6>
- 2570453094U, // <3,5,5,0>: Cost 3 vext1 <3,3,5,5>, LHS
- 2624810704U, // <3,5,5,1>: Cost 3 vext2 <1,2,3,5>, <5,1,7,3>
- 2570454734U, // <3,5,5,2>: Cost 3 vext1 <3,3,5,5>, <2,3,4,5>
- 2570455472U, // <3,5,5,3>: Cost 3 vext1 <3,3,5,5>, <3,3,5,5>
- 2570456374U, // <3,5,5,4>: Cost 3 vext1 <3,3,5,5>, RHS
- 1659228164U, // <3,5,5,5>: Cost 2 vext3 LHS, <5,5,5,5>
- 2732969998U, // <3,5,5,6>: Cost 3 vext3 LHS, <5,5,6,6>
- 1659228184U, // <3,5,5,7>: Cost 2 vext3 LHS, <5,5,7,7>
- 1659228193U, // <3,5,5,u>: Cost 2 vext3 LHS, <5,5,u,7>
- 2732970020U, // <3,5,6,0>: Cost 3 vext3 LHS, <5,6,0,1>
- 2732970035U, // <3,5,6,1>: Cost 3 vext3 LHS, <5,6,1,7>
- 2564490968U, // <3,5,6,2>: Cost 3 vext1 <2,3,5,6>, <2,3,5,6>
- 2732970050U, // <3,5,6,3>: Cost 3 vext3 LHS, <5,6,3,4>
- 2732970060U, // <3,5,6,4>: Cost 3 vext3 LHS, <5,6,4,5>
- 2732970071U, // <3,5,6,5>: Cost 3 vext3 LHS, <5,6,5,7>
- 2732970080U, // <3,5,6,6>: Cost 3 vext3 LHS, <5,6,6,7>
- 1659228258U, // <3,5,6,7>: Cost 2 vext3 LHS, <5,6,7,0>
- 1659228267U, // <3,5,6,u>: Cost 2 vext3 LHS, <5,6,u,0>
- 1484783718U, // <3,5,7,0>: Cost 2 vext1 <1,3,5,7>, LHS
- 1484784640U, // <3,5,7,1>: Cost 2 vext1 <1,3,5,7>, <1,3,5,7>
- 2558527080U, // <3,5,7,2>: Cost 3 vext1 <1,3,5,7>, <2,2,2,2>
- 2558527638U, // <3,5,7,3>: Cost 3 vext1 <1,3,5,7>, <3,0,1,2>
- 1484786998U, // <3,5,7,4>: Cost 2 vext1 <1,3,5,7>, RHS
- 1659228328U, // <3,5,7,5>: Cost 2 vext3 LHS, <5,7,5,7>
- 2732970154U, // <3,5,7,6>: Cost 3 vext3 LHS, <5,7,6,0>
- 2558531180U, // <3,5,7,7>: Cost 3 vext1 <1,3,5,7>, <7,7,7,7>
- 1484789550U, // <3,5,7,u>: Cost 2 vext1 <1,3,5,7>, LHS
- 1484791910U, // <3,5,u,0>: Cost 2 vext1 <1,3,5,u>, LHS
- 1484792833U, // <3,5,u,1>: Cost 2 vext1 <1,3,5,u>, <1,3,5,u>
- 2558535272U, // <3,5,u,2>: Cost 3 vext1 <1,3,5,u>, <2,2,2,2>
- 2558535830U, // <3,5,u,3>: Cost 3 vext1 <1,3,5,u>, <3,0,1,2>
- 1484795190U, // <3,5,u,4>: Cost 2 vext1 <1,3,5,u>, RHS
- 1659228409U, // <3,5,u,5>: Cost 2 vext3 LHS, <5,u,5,7>
- 2772457626U, // <3,5,u,6>: Cost 3 vuzpl <3,4,5,6>, RHS
- 1646326023U, // <3,5,u,7>: Cost 2 vext3 <5,u,7,3>, <5,u,7,3>
- 1484797742U, // <3,5,u,u>: Cost 2 vext1 <1,3,5,u>, LHS
- 2558541926U, // <3,6,0,0>: Cost 3 vext1 <1,3,6,0>, LHS
- 2689839393U, // <3,6,0,1>: Cost 3 vext3 LHS, <6,0,1,2>
- 2689839404U, // <3,6,0,2>: Cost 3 vext3 LHS, <6,0,2,4>
- 3706519808U, // <3,6,0,3>: Cost 4 vext2 <2,5,3,6>, <0,3,1,4>
- 2689839420U, // <3,6,0,4>: Cost 3 vext3 LHS, <6,0,4,2>
- 2732970314U, // <3,6,0,5>: Cost 3 vext3 LHS, <6,0,5,7>
- 2732970316U, // <3,6,0,6>: Cost 3 vext3 LHS, <6,0,6,0>
- 2960313654U, // <3,6,0,7>: Cost 3 vzipr <1,2,3,0>, RHS
- 2689839456U, // <3,6,0,u>: Cost 3 vext3 LHS, <6,0,u,2>
- 3763581290U, // <3,6,1,0>: Cost 4 vext3 LHS, <6,1,0,3>
- 3763581297U, // <3,6,1,1>: Cost 4 vext3 LHS, <6,1,1,1>
- 2624816028U, // <3,6,1,2>: Cost 3 vext2 <1,2,3,6>, <1,2,3,6>
- 3763581315U, // <3,6,1,3>: Cost 4 vext3 LHS, <6,1,3,1>
- 2626143294U, // <3,6,1,4>: Cost 3 vext2 <1,4,3,6>, <1,4,3,6>
- 3763581335U, // <3,6,1,5>: Cost 4 vext3 LHS, <6,1,5,3>
- 2721321376U, // <3,6,1,6>: Cost 3 vext3 <6,1,6,3>, <6,1,6,3>
- 2721395113U, // <3,6,1,7>: Cost 3 vext3 <6,1,7,3>, <6,1,7,3>
- 2628797826U, // <3,6,1,u>: Cost 3 vext2 <1,u,3,6>, <1,u,3,6>
- 2594390118U, // <3,6,2,0>: Cost 3 vext1 <7,3,6,2>, LHS
- 2721616324U, // <3,6,2,1>: Cost 3 vext3 <6,2,1,3>, <6,2,1,3>
- 2630788725U, // <3,6,2,2>: Cost 3 vext2 <2,2,3,6>, <2,2,3,6>
- 3763581395U, // <3,6,2,3>: Cost 4 vext3 LHS, <6,2,3,0>
- 2632115991U, // <3,6,2,4>: Cost 3 vext2 <2,4,3,6>, <2,4,3,6>
- 2632779624U, // <3,6,2,5>: Cost 3 vext2 <2,5,3,6>, <2,5,3,6>
- 2594394618U, // <3,6,2,6>: Cost 3 vext1 <7,3,6,2>, <6,2,7,3>
- 1648316922U, // <3,6,2,7>: Cost 2 vext3 <6,2,7,3>, <6,2,7,3>
- 1648390659U, // <3,6,2,u>: Cost 2 vext3 <6,2,u,3>, <6,2,u,3>
- 3693914262U, // <3,6,3,0>: Cost 4 vext2 <0,4,3,6>, <3,0,1,2>
- 3638281176U, // <3,6,3,1>: Cost 4 vext1 <2,3,6,3>, <1,3,1,3>
- 3696568678U, // <3,6,3,2>: Cost 4 vext2 <0,u,3,6>, <3,2,6,3>
- 2638088604U, // <3,6,3,3>: Cost 3 vext2 <3,4,3,6>, <3,3,3,3>
- 2632780290U, // <3,6,3,4>: Cost 3 vext2 <2,5,3,6>, <3,4,5,6>
- 3712494145U, // <3,6,3,5>: Cost 4 vext2 <3,5,3,6>, <3,5,3,6>
- 3698559612U, // <3,6,3,6>: Cost 4 vext2 <1,2,3,6>, <3,6,1,2>
- 2959674678U, // <3,6,3,7>: Cost 3 vzipr <1,1,3,3>, RHS
- 2959674679U, // <3,6,3,u>: Cost 3 vzipr <1,1,3,3>, RHS
- 3763581536U, // <3,6,4,0>: Cost 4 vext3 LHS, <6,4,0,6>
- 2722943590U, // <3,6,4,1>: Cost 3 vext3 <6,4,1,3>, <6,4,1,3>
- 2732970609U, // <3,6,4,2>: Cost 3 vext3 LHS, <6,4,2,5>
- 3698560147U, // <3,6,4,3>: Cost 4 vext2 <1,2,3,6>, <4,3,6,6>
- 2732970628U, // <3,6,4,4>: Cost 3 vext3 LHS, <6,4,4,6>
- 2689839757U, // <3,6,4,5>: Cost 3 vext3 LHS, <6,4,5,6>
- 2732970640U, // <3,6,4,6>: Cost 3 vext3 LHS, <6,4,6,0>
- 2960346422U, // <3,6,4,7>: Cost 3 vzipr <1,2,3,4>, RHS
- 2689839784U, // <3,6,4,u>: Cost 3 vext3 LHS, <6,4,u,6>
- 2576498790U, // <3,6,5,0>: Cost 3 vext1 <4,3,6,5>, LHS
- 3650241270U, // <3,6,5,1>: Cost 4 vext1 <4,3,6,5>, <1,0,3,2>
- 2732970692U, // <3,6,5,2>: Cost 3 vext3 LHS, <6,5,2,7>
- 2576501250U, // <3,6,5,3>: Cost 3 vext1 <4,3,6,5>, <3,4,5,6>
- 2576501906U, // <3,6,5,4>: Cost 3 vext1 <4,3,6,5>, <4,3,6,5>
- 3650244622U, // <3,6,5,5>: Cost 4 vext1 <4,3,6,5>, <5,5,6,6>
- 4114633528U, // <3,6,5,6>: Cost 4 vtrnl <3,4,5,6>, <6,6,6,6>
- 2732970735U, // <3,6,5,7>: Cost 3 vext3 LHS, <6,5,7,5>
- 2576504622U, // <3,6,5,u>: Cost 3 vext1 <4,3,6,5>, LHS
- 2732970749U, // <3,6,6,0>: Cost 3 vext3 LHS, <6,6,0,1>
- 2724270856U, // <3,6,6,1>: Cost 3 vext3 <6,6,1,3>, <6,6,1,3>
- 2624819706U, // <3,6,6,2>: Cost 3 vext2 <1,2,3,6>, <6,2,7,3>
- 3656223234U, // <3,6,6,3>: Cost 4 vext1 <5,3,6,6>, <3,4,5,6>
- 2732970788U, // <3,6,6,4>: Cost 3 vext3 LHS, <6,6,4,4>
- 2732970800U, // <3,6,6,5>: Cost 3 vext3 LHS, <6,6,5,7>
- 1659228984U, // <3,6,6,6>: Cost 2 vext3 LHS, <6,6,6,6>
- 1659228994U, // <3,6,6,7>: Cost 2 vext3 LHS, <6,6,7,7>
- 1659229003U, // <3,6,6,u>: Cost 2 vext3 LHS, <6,6,u,7>
- 1659229006U, // <3,6,7,0>: Cost 2 vext3 LHS, <6,7,0,1>
- 2558600201U, // <3,6,7,1>: Cost 3 vext1 <1,3,6,7>, <1,3,6,7>
- 2558601146U, // <3,6,7,2>: Cost 3 vext1 <1,3,6,7>, <2,6,3,7>
- 2725081963U, // <3,6,7,3>: Cost 3 vext3 <6,7,3,3>, <6,7,3,3>
- 1659229046U, // <3,6,7,4>: Cost 2 vext3 LHS, <6,7,4,5>
- 2715423611U, // <3,6,7,5>: Cost 3 vext3 <5,1,7,3>, <6,7,5,1>
- 2722059141U, // <3,6,7,6>: Cost 3 vext3 <6,2,7,3>, <6,7,6,2>
- 2962361654U, // <3,6,7,7>: Cost 3 vzipr <1,5,3,7>, RHS
- 1659229078U, // <3,6,7,u>: Cost 2 vext3 LHS, <6,7,u,1>
- 1659229087U, // <3,6,u,0>: Cost 2 vext3 LHS, <6,u,0,1>
- 2689840041U, // <3,6,u,1>: Cost 3 vext3 LHS, <6,u,1,2>
- 2558609339U, // <3,6,u,2>: Cost 3 vext1 <1,3,6,u>, <2,6,3,u>
- 2576525853U, // <3,6,u,3>: Cost 3 vext1 <4,3,6,u>, <3,4,u,6>
- 1659229127U, // <3,6,u,4>: Cost 2 vext3 LHS, <6,u,4,5>
- 2689840081U, // <3,6,u,5>: Cost 3 vext3 LHS, <6,u,5,6>
- 1659228984U, // <3,6,u,6>: Cost 2 vext3 LHS, <6,6,6,6>
- 1652298720U, // <3,6,u,7>: Cost 2 vext3 <6,u,7,3>, <6,u,7,3>
- 1659229159U, // <3,6,u,u>: Cost 2 vext3 LHS, <6,u,u,1>
- 2626813952U, // <3,7,0,0>: Cost 3 vext2 <1,5,3,7>, <0,0,0,0>
- 1553072230U, // <3,7,0,1>: Cost 2 vext2 <1,5,3,7>, LHS
- 2626814116U, // <3,7,0,2>: Cost 3 vext2 <1,5,3,7>, <0,2,0,2>
- 3700556028U, // <3,7,0,3>: Cost 4 vext2 <1,5,3,7>, <0,3,1,0>
- 2626814290U, // <3,7,0,4>: Cost 3 vext2 <1,5,3,7>, <0,4,1,5>
- 2582507375U, // <3,7,0,5>: Cost 3 vext1 <5,3,7,0>, <5,3,7,0>
- 2588480072U, // <3,7,0,6>: Cost 3 vext1 <6,3,7,0>, <6,3,7,0>
- 2732971055U, // <3,7,0,7>: Cost 3 vext3 LHS, <7,0,7,1>
- 1553072797U, // <3,7,0,u>: Cost 2 vext2 <1,5,3,7>, LHS
- 2626814710U, // <3,7,1,0>: Cost 3 vext2 <1,5,3,7>, <1,0,3,2>
- 2626814772U, // <3,7,1,1>: Cost 3 vext2 <1,5,3,7>, <1,1,1,1>
- 2626814870U, // <3,7,1,2>: Cost 3 vext2 <1,5,3,7>, <1,2,3,0>
- 2625487854U, // <3,7,1,3>: Cost 3 vext2 <1,3,3,7>, <1,3,3,7>
- 2582514998U, // <3,7,1,4>: Cost 3 vext1 <5,3,7,1>, RHS
- 1553073296U, // <3,7,1,5>: Cost 2 vext2 <1,5,3,7>, <1,5,3,7>
- 2627478753U, // <3,7,1,6>: Cost 3 vext2 <1,6,3,7>, <1,6,3,7>
- 2727367810U, // <3,7,1,7>: Cost 3 vext3 <7,1,7,3>, <7,1,7,3>
- 1555064195U, // <3,7,1,u>: Cost 2 vext2 <1,u,3,7>, <1,u,3,7>
- 2588491878U, // <3,7,2,0>: Cost 3 vext1 <6,3,7,2>, LHS
- 3700557318U, // <3,7,2,1>: Cost 4 vext2 <1,5,3,7>, <2,1,0,3>
- 2626815592U, // <3,7,2,2>: Cost 3 vext2 <1,5,3,7>, <2,2,2,2>
- 2626815654U, // <3,7,2,3>: Cost 3 vext2 <1,5,3,7>, <2,3,0,1>
- 2588495158U, // <3,7,2,4>: Cost 3 vext1 <6,3,7,2>, RHS
- 2632787817U, // <3,7,2,5>: Cost 3 vext2 <2,5,3,7>, <2,5,3,7>
- 1559709626U, // <3,7,2,6>: Cost 2 vext2 <2,6,3,7>, <2,6,3,7>
- 2728031443U, // <3,7,2,7>: Cost 3 vext3 <7,2,7,3>, <7,2,7,3>
- 1561036892U, // <3,7,2,u>: Cost 2 vext2 <2,u,3,7>, <2,u,3,7>
- 2626816150U, // <3,7,3,0>: Cost 3 vext2 <1,5,3,7>, <3,0,1,2>
- 2626816268U, // <3,7,3,1>: Cost 3 vext2 <1,5,3,7>, <3,1,5,3>
- 2633451878U, // <3,7,3,2>: Cost 3 vext2 <2,6,3,7>, <3,2,6,3>
- 2626816412U, // <3,7,3,3>: Cost 3 vext2 <1,5,3,7>, <3,3,3,3>
- 2626816514U, // <3,7,3,4>: Cost 3 vext2 <1,5,3,7>, <3,4,5,6>
- 2638760514U, // <3,7,3,5>: Cost 3 vext2 <3,5,3,7>, <3,5,3,7>
- 2639424147U, // <3,7,3,6>: Cost 3 vext2 <3,6,3,7>, <3,6,3,7>
- 2826961920U, // <3,7,3,7>: Cost 3 vuzpr <1,3,5,7>, <1,3,5,7>
- 2626816798U, // <3,7,3,u>: Cost 3 vext2 <1,5,3,7>, <3,u,1,2>
- 2582536294U, // <3,7,4,0>: Cost 3 vext1 <5,3,7,4>, LHS
- 2582537360U, // <3,7,4,1>: Cost 3 vext1 <5,3,7,4>, <1,5,3,7>
- 2588510138U, // <3,7,4,2>: Cost 3 vext1 <6,3,7,4>, <2,6,3,7>
- 3700558996U, // <3,7,4,3>: Cost 4 vext2 <1,5,3,7>, <4,3,6,7>
- 2582539574U, // <3,7,4,4>: Cost 3 vext1 <5,3,7,4>, RHS
- 1553075510U, // <3,7,4,5>: Cost 2 vext2 <1,5,3,7>, RHS
- 2588512844U, // <3,7,4,6>: Cost 3 vext1 <6,3,7,4>, <6,3,7,4>
- 2564625766U, // <3,7,4,7>: Cost 3 vext1 <2,3,7,4>, <7,4,5,6>
- 1553075753U, // <3,7,4,u>: Cost 2 vext2 <1,5,3,7>, RHS
- 2732971398U, // <3,7,5,0>: Cost 3 vext3 LHS, <7,5,0,2>
- 2626817744U, // <3,7,5,1>: Cost 3 vext2 <1,5,3,7>, <5,1,7,3>
- 3700559649U, // <3,7,5,2>: Cost 4 vext2 <1,5,3,7>, <5,2,7,3>
- 2626817903U, // <3,7,5,3>: Cost 3 vext2 <1,5,3,7>, <5,3,7,0>
- 2258728203U, // <3,7,5,4>: Cost 3 vrev <7,3,4,5>
- 2732971446U, // <3,7,5,5>: Cost 3 vext3 LHS, <7,5,5,5>
- 2732971457U, // <3,7,5,6>: Cost 3 vext3 LHS, <7,5,6,7>
- 2826964278U, // <3,7,5,7>: Cost 3 vuzpr <1,3,5,7>, RHS
- 2826964279U, // <3,7,5,u>: Cost 3 vuzpr <1,3,5,7>, RHS
- 2732971478U, // <3,7,6,0>: Cost 3 vext3 LHS, <7,6,0,1>
- 2732971486U, // <3,7,6,1>: Cost 3 vext3 LHS, <7,6,1,0>
- 2633454074U, // <3,7,6,2>: Cost 3 vext2 <2,6,3,7>, <6,2,7,3>
- 2633454152U, // <3,7,6,3>: Cost 3 vext2 <2,6,3,7>, <6,3,7,0>
- 2732971518U, // <3,7,6,4>: Cost 3 vext3 LHS, <7,6,4,5>
- 2732971526U, // <3,7,6,5>: Cost 3 vext3 LHS, <7,6,5,4>
- 2732971537U, // <3,7,6,6>: Cost 3 vext3 LHS, <7,6,6,6>
- 2732971540U, // <3,7,6,7>: Cost 3 vext3 LHS, <7,6,7,0>
- 2726041124U, // <3,7,6,u>: Cost 3 vext3 <6,u,7,3>, <7,6,u,7>
- 2570616934U, // <3,7,7,0>: Cost 3 vext1 <3,3,7,7>, LHS
- 2570617856U, // <3,7,7,1>: Cost 3 vext1 <3,3,7,7>, <1,3,5,7>
- 2564646635U, // <3,7,7,2>: Cost 3 vext1 <2,3,7,7>, <2,3,7,7>
- 2570619332U, // <3,7,7,3>: Cost 3 vext1 <3,3,7,7>, <3,3,7,7>
- 2570620214U, // <3,7,7,4>: Cost 3 vext1 <3,3,7,7>, RHS
- 2582564726U, // <3,7,7,5>: Cost 3 vext1 <5,3,7,7>, <5,3,7,7>
- 2588537423U, // <3,7,7,6>: Cost 3 vext1 <6,3,7,7>, <6,3,7,7>
- 1659229804U, // <3,7,7,7>: Cost 2 vext3 LHS, <7,7,7,7>
- 1659229804U, // <3,7,7,u>: Cost 2 vext3 LHS, <7,7,7,7>
- 2626819795U, // <3,7,u,0>: Cost 3 vext2 <1,5,3,7>, <u,0,1,2>
- 1553078062U, // <3,7,u,1>: Cost 2 vext2 <1,5,3,7>, LHS
- 2626819973U, // <3,7,u,2>: Cost 3 vext2 <1,5,3,7>, <u,2,3,0>
- 2826961565U, // <3,7,u,3>: Cost 3 vuzpr <1,3,5,7>, LHS
- 2626820159U, // <3,7,u,4>: Cost 3 vext2 <1,5,3,7>, <u,4,5,6>
- 1553078426U, // <3,7,u,5>: Cost 2 vext2 <1,5,3,7>, RHS
- 1595545808U, // <3,7,u,6>: Cost 2 vext2 <u,6,3,7>, <u,6,3,7>
- 1659229804U, // <3,7,u,7>: Cost 2 vext3 LHS, <7,7,7,7>
- 1553078629U, // <3,7,u,u>: Cost 2 vext2 <1,5,3,7>, LHS
- 1611448320U, // <3,u,0,0>: Cost 2 vext3 LHS, <0,0,0,0>
- 1611896531U, // <3,u,0,1>: Cost 2 vext3 LHS, <u,0,1,2>
- 1659672284U, // <3,u,0,2>: Cost 2 vext3 LHS, <u,0,2,2>
- 1616099045U, // <3,u,0,3>: Cost 2 vext3 LHS, <u,0,3,2>
- 2685638381U, // <3,u,0,4>: Cost 3 vext3 LHS, <u,0,4,1>
- 1663874806U, // <3,u,0,5>: Cost 2 vext3 LHS, <u,0,5,1>
- 1663874816U, // <3,u,0,6>: Cost 2 vext3 LHS, <u,0,6,2>
- 2960313672U, // <3,u,0,7>: Cost 3 vzipr <1,2,3,0>, RHS
- 1611896594U, // <3,u,0,u>: Cost 2 vext3 LHS, <u,0,u,2>
- 1549763324U, // <3,u,1,0>: Cost 2 vext2 <1,0,3,u>, <1,0,3,u>
- 1550426957U, // <3,u,1,1>: Cost 2 vext2 <1,1,3,u>, <1,1,3,u>
- 537712430U, // <3,u,1,2>: Cost 1 vext3 LHS, LHS
- 1616541495U, // <3,u,1,3>: Cost 2 vext3 LHS, <u,1,3,3>
- 1490930998U, // <3,u,1,4>: Cost 2 vext1 <2,3,u,1>, RHS
- 1553081489U, // <3,u,1,5>: Cost 2 vext2 <1,5,3,u>, <1,5,3,u>
- 2627486946U, // <3,u,1,6>: Cost 3 vext2 <1,6,3,u>, <1,6,3,u>
- 1659230043U, // <3,u,1,7>: Cost 2 vext3 LHS, <u,1,7,3>
- 537712484U, // <3,u,1,u>: Cost 1 vext3 LHS, LHS
- 1611890852U, // <3,u,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
- 2624833102U, // <3,u,2,1>: Cost 3 vext2 <1,2,3,u>, <2,1,u,3>
- 1557063287U, // <3,u,2,2>: Cost 2 vext2 <2,2,3,u>, <2,2,3,u>
- 1616099205U, // <3,u,2,3>: Cost 2 vext3 LHS, <u,2,3,0>
- 1611890892U, // <3,u,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
- 2689841054U, // <3,u,2,5>: Cost 3 vext3 LHS, <u,2,5,7>
- 1559717819U, // <3,u,2,6>: Cost 2 vext2 <2,6,3,u>, <2,6,3,u>
- 1659230124U, // <3,u,2,7>: Cost 2 vext3 LHS, <u,2,7,3>
- 1616541618U, // <3,u,2,u>: Cost 2 vext3 LHS, <u,2,u,0>
- 1611896764U, // <3,u,3,0>: Cost 2 vext3 LHS, <u,3,0,1>
- 1484973079U, // <3,u,3,1>: Cost 2 vext1 <1,3,u,3>, <1,3,u,3>
- 2685638607U, // <3,u,3,2>: Cost 3 vext3 LHS, <u,3,2,2>
- 336380006U, // <3,u,3,3>: Cost 1 vdup3 LHS
- 1611896804U, // <3,u,3,4>: Cost 2 vext3 LHS, <u,3,4,5>
- 1616541679U, // <3,u,3,5>: Cost 2 vext3 LHS, <u,3,5,7>
- 2690283512U, // <3,u,3,6>: Cost 3 vext3 LHS, <u,3,6,7>
- 2959674696U, // <3,u,3,7>: Cost 3 vzipr <1,1,3,3>, RHS
- 336380006U, // <3,u,3,u>: Cost 1 vdup3 LHS
- 2558722150U, // <3,u,4,0>: Cost 3 vext1 <1,3,u,4>, LHS
- 1659672602U, // <3,u,4,1>: Cost 2 vext3 LHS, <u,4,1,5>
- 1659672612U, // <3,u,4,2>: Cost 2 vext3 LHS, <u,4,2,6>
- 2689841196U, // <3,u,4,3>: Cost 3 vext3 LHS, <u,4,3,5>
- 1659227344U, // <3,u,4,4>: Cost 2 vext3 LHS, <4,4,4,4>
- 1611896895U, // <3,u,4,5>: Cost 2 vext3 LHS, <u,4,5,6>
- 1663875144U, // <3,u,4,6>: Cost 2 vext3 LHS, <u,4,6,6>
- 1659230289U, // <3,u,4,7>: Cost 2 vext3 LHS, <u,4,7,6>
- 1611896922U, // <3,u,4,u>: Cost 2 vext3 LHS, <u,4,u,6>
- 1490960486U, // <3,u,5,0>: Cost 2 vext1 <2,3,u,5>, LHS
- 2689841261U, // <3,u,5,1>: Cost 3 vext3 LHS, <u,5,1,7>
- 1490962162U, // <3,u,5,2>: Cost 2 vext1 <2,3,u,5>, <2,3,u,5>
- 1616541823U, // <3,u,5,3>: Cost 2 vext3 LHS, <u,5,3,7>
- 1490963766U, // <3,u,5,4>: Cost 2 vext1 <2,3,u,5>, RHS
- 1659228164U, // <3,u,5,5>: Cost 2 vext3 LHS, <5,5,5,5>
- 537712794U, // <3,u,5,6>: Cost 1 vext3 LHS, RHS
- 1659230371U, // <3,u,5,7>: Cost 2 vext3 LHS, <u,5,7,7>
- 537712812U, // <3,u,5,u>: Cost 1 vext3 LHS, RHS
- 2689841327U, // <3,u,6,0>: Cost 3 vext3 LHS, <u,6,0,1>
- 2558739482U, // <3,u,6,1>: Cost 3 vext1 <1,3,u,6>, <1,3,u,6>
- 2689841351U, // <3,u,6,2>: Cost 3 vext3 LHS, <u,6,2,7>
- 1616099536U, // <3,u,6,3>: Cost 2 vext3 LHS, <u,6,3,7>
- 1659227508U, // <3,u,6,4>: Cost 2 vext3 LHS, <4,6,4,6>
- 2690283746U, // <3,u,6,5>: Cost 3 vext3 LHS, <u,6,5,7>
- 1659228984U, // <3,u,6,6>: Cost 2 vext3 LHS, <6,6,6,6>
- 1659230445U, // <3,u,6,7>: Cost 2 vext3 LHS, <u,6,7,0>
- 1616099581U, // <3,u,6,u>: Cost 2 vext3 LHS, <u,6,u,7>
- 1485004902U, // <3,u,7,0>: Cost 2 vext1 <1,3,u,7>, LHS
- 1485005851U, // <3,u,7,1>: Cost 2 vext1 <1,3,u,7>, <1,3,u,7>
- 2558748264U, // <3,u,7,2>: Cost 3 vext1 <1,3,u,7>, <2,2,2,2>
- 3095397021U, // <3,u,7,3>: Cost 3 vtrnr <1,3,5,7>, LHS
- 1485008182U, // <3,u,7,4>: Cost 2 vext1 <1,3,u,7>, RHS
- 1659228328U, // <3,u,7,5>: Cost 2 vext3 LHS, <5,7,5,7>
- 2722060599U, // <3,u,7,6>: Cost 3 vext3 <6,2,7,3>, <u,7,6,2>
- 1659229804U, // <3,u,7,7>: Cost 2 vext3 LHS, <7,7,7,7>
- 1485010734U, // <3,u,7,u>: Cost 2 vext1 <1,3,u,7>, LHS
- 1616099665U, // <3,u,u,0>: Cost 2 vext3 LHS, <u,u,0,1>
- 1611897179U, // <3,u,u,1>: Cost 2 vext3 LHS, <u,u,1,2>
- 537712997U, // <3,u,u,2>: Cost 1 vext3 LHS, LHS
- 336380006U, // <3,u,u,3>: Cost 1 vdup3 LHS
- 1616099705U, // <3,u,u,4>: Cost 2 vext3 LHS, <u,u,4,5>
- 1611897219U, // <3,u,u,5>: Cost 2 vext3 LHS, <u,u,5,6>
- 537713037U, // <3,u,u,6>: Cost 1 vext3 LHS, RHS
- 1659230607U, // <3,u,u,7>: Cost 2 vext3 LHS, <u,u,7,0>
- 537713051U, // <3,u,u,u>: Cost 1 vext3 LHS, LHS
- 2691907584U, // <4,0,0,0>: Cost 3 vext3 <1,2,3,4>, <0,0,0,0>
- 2691907594U, // <4,0,0,1>: Cost 3 vext3 <1,2,3,4>, <0,0,1,1>
- 2691907604U, // <4,0,0,2>: Cost 3 vext3 <1,2,3,4>, <0,0,2,2>
- 3709862144U, // <4,0,0,3>: Cost 4 vext2 <3,1,4,0>, <0,3,1,4>
- 2684682280U, // <4,0,0,4>: Cost 3 vext3 <0,0,4,4>, <0,0,4,4>
- 3694600633U, // <4,0,0,5>: Cost 4 vext2 <0,5,4,0>, <0,5,4,0>
- 3291431290U, // <4,0,0,6>: Cost 4 vrev <0,4,6,0>
- 3668342067U, // <4,0,0,7>: Cost 4 vext1 <7,4,0,0>, <7,4,0,0>
- 2691907657U, // <4,0,0,u>: Cost 3 vext3 <1,2,3,4>, <0,0,u,1>
- 2570715238U, // <4,0,1,0>: Cost 3 vext1 <3,4,0,1>, LHS
- 2570716058U, // <4,0,1,1>: Cost 3 vext1 <3,4,0,1>, <1,2,3,4>
- 1618165862U, // <4,0,1,2>: Cost 2 vext3 <1,2,3,4>, LHS
- 2570717648U, // <4,0,1,3>: Cost 3 vext1 <3,4,0,1>, <3,4,0,1>
- 2570718518U, // <4,0,1,4>: Cost 3 vext1 <3,4,0,1>, RHS
- 2594607206U, // <4,0,1,5>: Cost 3 vext1 <7,4,0,1>, <5,6,7,4>
- 3662377563U, // <4,0,1,6>: Cost 4 vext1 <6,4,0,1>, <6,4,0,1>
- 2594608436U, // <4,0,1,7>: Cost 3 vext1 <7,4,0,1>, <7,4,0,1>
- 1618165916U, // <4,0,1,u>: Cost 2 vext3 <1,2,3,4>, LHS
- 2685714598U, // <4,0,2,0>: Cost 3 vext3 <0,2,0,4>, <0,2,0,4>
- 3759530159U, // <4,0,2,1>: Cost 4 vext3 <0,2,1,4>, <0,2,1,4>
- 2685862072U, // <4,0,2,2>: Cost 3 vext3 <0,2,2,4>, <0,2,2,4>
- 2631476937U, // <4,0,2,3>: Cost 3 vext2 <2,3,4,0>, <2,3,4,0>
- 2685714636U, // <4,0,2,4>: Cost 3 vext3 <0,2,0,4>, <0,2,4,6>
- 3765649622U, // <4,0,2,5>: Cost 4 vext3 <1,2,3,4>, <0,2,5,7>
- 2686157020U, // <4,0,2,6>: Cost 3 vext3 <0,2,6,4>, <0,2,6,4>
- 3668358453U, // <4,0,2,7>: Cost 4 vext1 <7,4,0,2>, <7,4,0,2>
- 2686304494U, // <4,0,2,u>: Cost 3 vext3 <0,2,u,4>, <0,2,u,4>
- 3632529510U, // <4,0,3,0>: Cost 4 vext1 <1,4,0,3>, LHS
- 2686451968U, // <4,0,3,1>: Cost 3 vext3 <0,3,1,4>, <0,3,1,4>
- 2686525705U, // <4,0,3,2>: Cost 3 vext3 <0,3,2,4>, <0,3,2,4>
- 3760341266U, // <4,0,3,3>: Cost 4 vext3 <0,3,3,4>, <0,3,3,4>
- 3632532790U, // <4,0,3,4>: Cost 4 vext1 <1,4,0,3>, RHS
- 3913254606U, // <4,0,3,5>: Cost 4 vuzpr <3,4,5,0>, <2,3,4,5>
- 3705219740U, // <4,0,3,6>: Cost 4 vext2 <2,3,4,0>, <3,6,4,7>
- 3713845990U, // <4,0,3,7>: Cost 4 vext2 <3,7,4,0>, <3,7,4,0>
- 2686451968U, // <4,0,3,u>: Cost 3 vext3 <0,3,1,4>, <0,3,1,4>
- 2552823910U, // <4,0,4,0>: Cost 3 vext1 <0,4,0,4>, LHS
- 2691907922U, // <4,0,4,1>: Cost 3 vext3 <1,2,3,4>, <0,4,1,5>
- 2691907932U, // <4,0,4,2>: Cost 3 vext3 <1,2,3,4>, <0,4,2,6>
- 3626567830U, // <4,0,4,3>: Cost 4 vext1 <0,4,0,4>, <3,0,1,2>
- 2552827190U, // <4,0,4,4>: Cost 3 vext1 <0,4,0,4>, RHS
- 2631478582U, // <4,0,4,5>: Cost 3 vext2 <2,3,4,0>, RHS
- 3626570017U, // <4,0,4,6>: Cost 4 vext1 <0,4,0,4>, <6,0,1,2>
- 3668374839U, // <4,0,4,7>: Cost 4 vext1 <7,4,0,4>, <7,4,0,4>
- 2552829742U, // <4,0,4,u>: Cost 3 vext1 <0,4,0,4>, LHS
- 2558804070U, // <4,0,5,0>: Cost 3 vext1 <1,4,0,5>, LHS
- 1839644774U, // <4,0,5,1>: Cost 2 vzipl RHS, LHS
- 2913386660U, // <4,0,5,2>: Cost 3 vzipl RHS, <0,2,0,2>
- 2570750420U, // <4,0,5,3>: Cost 3 vext1 <3,4,0,5>, <3,4,0,5>
- 2558807350U, // <4,0,5,4>: Cost 3 vext1 <1,4,0,5>, RHS
- 3987128750U, // <4,0,5,5>: Cost 4 vzipl RHS, <0,5,2,7>
- 3987128822U, // <4,0,5,6>: Cost 4 vzipl RHS, <0,6,1,7>
- 2594641208U, // <4,0,5,7>: Cost 3 vext1 <7,4,0,5>, <7,4,0,5>
- 1839645341U, // <4,0,5,u>: Cost 2 vzipl RHS, LHS
- 2552840294U, // <4,0,6,0>: Cost 3 vext1 <0,4,0,6>, LHS
- 3047604234U, // <4,0,6,1>: Cost 3 vtrnl RHS, <0,0,1,1>
- 1973862502U, // <4,0,6,2>: Cost 2 vtrnl RHS, LHS
- 2570758613U, // <4,0,6,3>: Cost 3 vext1 <3,4,0,6>, <3,4,0,6>
- 2552843574U, // <4,0,6,4>: Cost 3 vext1 <0,4,0,6>, RHS
- 2217664887U, // <4,0,6,5>: Cost 3 vrev <0,4,5,6>
- 3662418528U, // <4,0,6,6>: Cost 4 vext1 <6,4,0,6>, <6,4,0,6>
- 2658022257U, // <4,0,6,7>: Cost 3 vext2 <6,7,4,0>, <6,7,4,0>
- 1973862556U, // <4,0,6,u>: Cost 2 vtrnl RHS, LHS
- 3731764218U, // <4,0,7,0>: Cost 4 vext2 <6,7,4,0>, <7,0,1,2>
- 3988324454U, // <4,0,7,1>: Cost 4 vzipl <4,7,5,0>, LHS
- 4122034278U, // <4,0,7,2>: Cost 4 vtrnl <4,6,7,1>, LHS
- 3735082246U, // <4,0,7,3>: Cost 4 vext2 <7,3,4,0>, <7,3,4,0>
- 3731764536U, // <4,0,7,4>: Cost 4 vext2 <6,7,4,0>, <7,4,0,5>
- 3937145718U, // <4,0,7,5>: Cost 4 vuzpr <7,4,5,0>, <6,7,4,5>
- 3737073145U, // <4,0,7,6>: Cost 4 vext2 <7,6,4,0>, <7,6,4,0>
- 3731764844U, // <4,0,7,7>: Cost 4 vext2 <6,7,4,0>, <7,7,7,7>
- 4122034332U, // <4,0,7,u>: Cost 4 vtrnl <4,6,7,1>, LHS
- 2552856678U, // <4,0,u,0>: Cost 3 vext1 <0,4,0,u>, LHS
- 1841635430U, // <4,0,u,1>: Cost 2 vzipl RHS, LHS
- 1618166429U, // <4,0,u,2>: Cost 2 vext3 <1,2,3,4>, LHS
- 2570774999U, // <4,0,u,3>: Cost 3 vext1 <3,4,0,u>, <3,4,0,u>
- 2552859958U, // <4,0,u,4>: Cost 3 vext1 <0,4,0,u>, RHS
- 2631481498U, // <4,0,u,5>: Cost 3 vext2 <2,3,4,0>, RHS
- 2686157020U, // <4,0,u,6>: Cost 3 vext3 <0,2,6,4>, <0,2,6,4>
- 2594665787U, // <4,0,u,7>: Cost 3 vext1 <7,4,0,u>, <7,4,0,u>
- 1618166483U, // <4,0,u,u>: Cost 2 vext3 <1,2,3,4>, LHS
- 2617548837U, // <4,1,0,0>: Cost 3 vext2 <0,0,4,1>, <0,0,4,1>
- 2622857318U, // <4,1,0,1>: Cost 3 vext2 <0,u,4,1>, LHS
- 3693281484U, // <4,1,0,2>: Cost 4 vext2 <0,3,4,1>, <0,2,4,6>
- 2691908342U, // <4,1,0,3>: Cost 3 vext3 <1,2,3,4>, <1,0,3,2>
- 2622857554U, // <4,1,0,4>: Cost 3 vext2 <0,u,4,1>, <0,4,1,5>
- 3764470538U, // <4,1,0,5>: Cost 4 vext3 <1,0,5,4>, <1,0,5,4>
- 3695272459U, // <4,1,0,6>: Cost 4 vext2 <0,6,4,1>, <0,6,4,1>
- 3733094980U, // <4,1,0,7>: Cost 4 vext2 <7,0,4,1>, <0,7,1,4>
- 2622857885U, // <4,1,0,u>: Cost 3 vext2 <0,u,4,1>, LHS
- 3696599798U, // <4,1,1,0>: Cost 4 vext2 <0,u,4,1>, <1,0,3,2>
- 2691097399U, // <4,1,1,1>: Cost 3 vext3 <1,1,1,4>, <1,1,1,4>
- 2631484314U, // <4,1,1,2>: Cost 3 vext2 <2,3,4,1>, <1,2,3,4>
- 2691908424U, // <4,1,1,3>: Cost 3 vext3 <1,2,3,4>, <1,1,3,3>
- 3696600125U, // <4,1,1,4>: Cost 4 vext2 <0,u,4,1>, <1,4,3,5>
- 3696600175U, // <4,1,1,5>: Cost 4 vext2 <0,u,4,1>, <1,5,0,1>
- 3696600307U, // <4,1,1,6>: Cost 4 vext2 <0,u,4,1>, <1,6,5,7>
- 3668423997U, // <4,1,1,7>: Cost 4 vext1 <7,4,1,1>, <7,4,1,1>
- 2691908469U, // <4,1,1,u>: Cost 3 vext3 <1,2,3,4>, <1,1,u,3>
- 2570797158U, // <4,1,2,0>: Cost 3 vext1 <3,4,1,2>, LHS
- 2570797978U, // <4,1,2,1>: Cost 3 vext1 <3,4,1,2>, <1,2,3,4>
- 3696600680U, // <4,1,2,2>: Cost 4 vext2 <0,u,4,1>, <2,2,2,2>
- 1618166682U, // <4,1,2,3>: Cost 2 vext3 <1,2,3,4>, <1,2,3,4>
- 2570800438U, // <4,1,2,4>: Cost 3 vext1 <3,4,1,2>, RHS
- 3765650347U, // <4,1,2,5>: Cost 4 vext3 <1,2,3,4>, <1,2,5,3>
- 3696601018U, // <4,1,2,6>: Cost 4 vext2 <0,u,4,1>, <2,6,3,7>
- 3668432190U, // <4,1,2,7>: Cost 4 vext1 <7,4,1,2>, <7,4,1,2>
- 1618535367U, // <4,1,2,u>: Cost 2 vext3 <1,2,u,4>, <1,2,u,4>
- 2564833382U, // <4,1,3,0>: Cost 3 vext1 <2,4,1,3>, LHS
- 2691908568U, // <4,1,3,1>: Cost 3 vext3 <1,2,3,4>, <1,3,1,3>
- 2691908578U, // <4,1,3,2>: Cost 3 vext3 <1,2,3,4>, <1,3,2,4>
- 2692572139U, // <4,1,3,3>: Cost 3 vext3 <1,3,3,4>, <1,3,3,4>
- 2564836662U, // <4,1,3,4>: Cost 3 vext1 <2,4,1,3>, RHS
- 2691908608U, // <4,1,3,5>: Cost 3 vext3 <1,2,3,4>, <1,3,5,7>
- 2588725862U, // <4,1,3,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
- 3662468090U, // <4,1,3,7>: Cost 4 vext1 <6,4,1,3>, <7,0,1,2>
- 2691908631U, // <4,1,3,u>: Cost 3 vext3 <1,2,3,4>, <1,3,u,3>
- 3760194590U, // <4,1,4,0>: Cost 4 vext3 <0,3,1,4>, <1,4,0,1>
- 3693947874U, // <4,1,4,1>: Cost 4 vext2 <0,4,4,1>, <4,1,5,0>
- 3765650484U, // <4,1,4,2>: Cost 4 vext3 <1,2,3,4>, <1,4,2,5>
- 3113877606U, // <4,1,4,3>: Cost 3 vtrnr <4,4,4,4>, LHS
- 3760194630U, // <4,1,4,4>: Cost 4 vext3 <0,3,1,4>, <1,4,4,5>
- 2622860598U, // <4,1,4,5>: Cost 3 vext2 <0,u,4,1>, RHS
- 3297436759U, // <4,1,4,6>: Cost 4 vrev <1,4,6,4>
- 3800007772U, // <4,1,4,7>: Cost 4 vext3 <7,0,1,4>, <1,4,7,0>
- 2622860841U, // <4,1,4,u>: Cost 3 vext2 <0,u,4,1>, RHS
- 1479164006U, // <4,1,5,0>: Cost 2 vext1 <0,4,1,5>, LHS
- 2552906486U, // <4,1,5,1>: Cost 3 vext1 <0,4,1,5>, <1,0,3,2>
- 2552907299U, // <4,1,5,2>: Cost 3 vext1 <0,4,1,5>, <2,1,3,5>
- 2552907926U, // <4,1,5,3>: Cost 3 vext1 <0,4,1,5>, <3,0,1,2>
- 1479167286U, // <4,1,5,4>: Cost 2 vext1 <0,4,1,5>, RHS
- 2913387664U, // <4,1,5,5>: Cost 3 vzipl RHS, <1,5,3,7>
- 2600686074U, // <4,1,5,6>: Cost 3 vext1 <u,4,1,5>, <6,2,7,3>
- 2600686586U, // <4,1,5,7>: Cost 3 vext1 <u,4,1,5>, <7,0,1,2>
- 1479169838U, // <4,1,5,u>: Cost 2 vext1 <0,4,1,5>, LHS
- 2552914022U, // <4,1,6,0>: Cost 3 vext1 <0,4,1,6>, LHS
- 2558886708U, // <4,1,6,1>: Cost 3 vext1 <1,4,1,6>, <1,1,1,1>
- 4028205206U, // <4,1,6,2>: Cost 4 vzipr <0,2,4,6>, <3,0,1,2>
- 3089858662U, // <4,1,6,3>: Cost 3 vtrnr <0,4,2,6>, LHS
- 2552917302U, // <4,1,6,4>: Cost 3 vext1 <0,4,1,6>, RHS
- 2223637584U, // <4,1,6,5>: Cost 3 vrev <1,4,5,6>
- 4121347081U, // <4,1,6,6>: Cost 4 vtrnl RHS, <1,3,6,7>
- 3721155406U, // <4,1,6,7>: Cost 4 vext2 <5,0,4,1>, <6,7,0,1>
- 2552919854U, // <4,1,6,u>: Cost 3 vext1 <0,4,1,6>, LHS
- 2659357716U, // <4,1,7,0>: Cost 3 vext2 <7,0,4,1>, <7,0,4,1>
- 3733763173U, // <4,1,7,1>: Cost 4 vext2 <7,1,4,1>, <7,1,4,1>
- 3734426806U, // <4,1,7,2>: Cost 4 vext2 <7,2,4,1>, <7,2,4,1>
- 2695226671U, // <4,1,7,3>: Cost 3 vext3 <1,7,3,4>, <1,7,3,4>
- 3721155942U, // <4,1,7,4>: Cost 4 vext2 <5,0,4,1>, <7,4,5,6>
- 3721155976U, // <4,1,7,5>: Cost 4 vext2 <5,0,4,1>, <7,5,0,4>
- 3662500458U, // <4,1,7,6>: Cost 4 vext1 <6,4,1,7>, <6,4,1,7>
- 3721156204U, // <4,1,7,7>: Cost 4 vext2 <5,0,4,1>, <7,7,7,7>
- 2659357716U, // <4,1,7,u>: Cost 3 vext2 <7,0,4,1>, <7,0,4,1>
- 1479188582U, // <4,1,u,0>: Cost 2 vext1 <0,4,1,u>, LHS
- 2552931062U, // <4,1,u,1>: Cost 3 vext1 <0,4,1,u>, <1,0,3,2>
- 2552931944U, // <4,1,u,2>: Cost 3 vext1 <0,4,1,u>, <2,2,2,2>
- 1622148480U, // <4,1,u,3>: Cost 2 vext3 <1,u,3,4>, <1,u,3,4>
- 1479191862U, // <4,1,u,4>: Cost 2 vext1 <0,4,1,u>, RHS
- 2622863514U, // <4,1,u,5>: Cost 3 vext2 <0,u,4,1>, RHS
- 2588725862U, // <4,1,u,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
- 2600686586U, // <4,1,u,7>: Cost 3 vext1 <u,4,1,5>, <7,0,1,2>
- 1479194414U, // <4,1,u,u>: Cost 2 vext1 <0,4,1,u>, LHS
- 2617557030U, // <4,2,0,0>: Cost 3 vext2 <0,0,4,2>, <0,0,4,2>
- 2622865510U, // <4,2,0,1>: Cost 3 vext2 <0,u,4,2>, LHS
- 2622865612U, // <4,2,0,2>: Cost 3 vext2 <0,u,4,2>, <0,2,4,6>
- 3693289753U, // <4,2,0,3>: Cost 4 vext2 <0,3,4,2>, <0,3,4,2>
- 2635473244U, // <4,2,0,4>: Cost 3 vext2 <3,0,4,2>, <0,4,2,6>
- 3765650918U, // <4,2,0,5>: Cost 4 vext3 <1,2,3,4>, <2,0,5,7>
- 2696775148U, // <4,2,0,6>: Cost 3 vext3 <2,0,6,4>, <2,0,6,4>
- 3695944285U, // <4,2,0,7>: Cost 4 vext2 <0,7,4,2>, <0,7,4,2>
- 2622866077U, // <4,2,0,u>: Cost 3 vext2 <0,u,4,2>, LHS
- 3696607990U, // <4,2,1,0>: Cost 4 vext2 <0,u,4,2>, <1,0,3,2>
- 3696608052U, // <4,2,1,1>: Cost 4 vext2 <0,u,4,2>, <1,1,1,1>
- 3696608150U, // <4,2,1,2>: Cost 4 vext2 <0,u,4,2>, <1,2,3,0>
- 3895574630U, // <4,2,1,3>: Cost 4 vuzpr <0,4,u,2>, LHS
- 2691909162U, // <4,2,1,4>: Cost 3 vext3 <1,2,3,4>, <2,1,4,3>
- 3696608400U, // <4,2,1,5>: Cost 4 vext2 <0,u,4,2>, <1,5,3,7>
- 3760784956U, // <4,2,1,6>: Cost 4 vext3 <0,4,0,4>, <2,1,6,3>
- 3773908549U, // <4,2,1,7>: Cost 5 vext3 <2,5,7,4>, <2,1,7,3>
- 2691909162U, // <4,2,1,u>: Cost 3 vext3 <1,2,3,4>, <2,1,4,3>
- 3696608748U, // <4,2,2,0>: Cost 4 vext2 <0,u,4,2>, <2,0,6,4>
- 3696608828U, // <4,2,2,1>: Cost 4 vext2 <0,u,4,2>, <2,1,6,3>
- 2691909224U, // <4,2,2,2>: Cost 3 vext3 <1,2,3,4>, <2,2,2,2>
- 2691909234U, // <4,2,2,3>: Cost 3 vext3 <1,2,3,4>, <2,2,3,3>
- 3759605368U, // <4,2,2,4>: Cost 4 vext3 <0,2,2,4>, <2,2,4,0>
- 3696609156U, // <4,2,2,5>: Cost 4 vext2 <0,u,4,2>, <2,5,6,7>
- 3760785040U, // <4,2,2,6>: Cost 4 vext3 <0,4,0,4>, <2,2,6,6>
- 3668505927U, // <4,2,2,7>: Cost 4 vext1 <7,4,2,2>, <7,4,2,2>
- 2691909279U, // <4,2,2,u>: Cost 3 vext3 <1,2,3,4>, <2,2,u,3>
- 2691909286U, // <4,2,3,0>: Cost 3 vext3 <1,2,3,4>, <2,3,0,1>
- 3764840111U, // <4,2,3,1>: Cost 4 vext3 <1,1,1,4>, <2,3,1,1>
- 3765651129U, // <4,2,3,2>: Cost 4 vext3 <1,2,3,4>, <2,3,2,2>
- 2698544836U, // <4,2,3,3>: Cost 3 vext3 <2,3,3,4>, <2,3,3,4>
- 2685863630U, // <4,2,3,4>: Cost 3 vext3 <0,2,2,4>, <2,3,4,5>
- 2698692310U, // <4,2,3,5>: Cost 3 vext3 <2,3,5,4>, <2,3,5,4>
- 3772507871U, // <4,2,3,6>: Cost 4 vext3 <2,3,6,4>, <2,3,6,4>
- 2698839784U, // <4,2,3,7>: Cost 3 vext3 <2,3,7,4>, <2,3,7,4>
- 2691909358U, // <4,2,3,u>: Cost 3 vext3 <1,2,3,4>, <2,3,u,1>
- 2564915302U, // <4,2,4,0>: Cost 3 vext1 <2,4,2,4>, LHS
- 2564916122U, // <4,2,4,1>: Cost 3 vext1 <2,4,2,4>, <1,2,3,4>
- 2564917004U, // <4,2,4,2>: Cost 3 vext1 <2,4,2,4>, <2,4,2,4>
- 2699208469U, // <4,2,4,3>: Cost 3 vext3 <2,4,3,4>, <2,4,3,4>
- 2564918582U, // <4,2,4,4>: Cost 3 vext1 <2,4,2,4>, RHS
- 2622868790U, // <4,2,4,5>: Cost 3 vext2 <0,u,4,2>, RHS
- 2229667632U, // <4,2,4,6>: Cost 3 vrev <2,4,6,4>
- 3800082229U, // <4,2,4,7>: Cost 4 vext3 <7,0,2,4>, <2,4,7,0>
- 2622869033U, // <4,2,4,u>: Cost 3 vext2 <0,u,4,2>, RHS
- 2552979558U, // <4,2,5,0>: Cost 3 vext1 <0,4,2,5>, LHS
- 2558952342U, // <4,2,5,1>: Cost 3 vext1 <1,4,2,5>, <1,2,3,0>
- 2564925032U, // <4,2,5,2>: Cost 3 vext1 <2,4,2,5>, <2,2,2,2>
- 2967060582U, // <4,2,5,3>: Cost 3 vzipr <2,3,4,5>, LHS
- 2552982838U, // <4,2,5,4>: Cost 3 vext1 <0,4,2,5>, RHS
- 3987130190U, // <4,2,5,5>: Cost 4 vzipl RHS, <2,5,0,7>
- 2913388474U, // <4,2,5,6>: Cost 3 vzipl RHS, <2,6,3,7>
- 3895577910U, // <4,2,5,7>: Cost 4 vuzpr <0,4,u,2>, RHS
- 2552985390U, // <4,2,5,u>: Cost 3 vext1 <0,4,2,5>, LHS
- 1479245926U, // <4,2,6,0>: Cost 2 vext1 <0,4,2,6>, LHS
- 2552988406U, // <4,2,6,1>: Cost 3 vext1 <0,4,2,6>, <1,0,3,2>
- 2552989288U, // <4,2,6,2>: Cost 3 vext1 <0,4,2,6>, <2,2,2,2>
- 2954461286U, // <4,2,6,3>: Cost 3 vzipr <0,2,4,6>, LHS
- 1479249206U, // <4,2,6,4>: Cost 2 vext1 <0,4,2,6>, RHS
- 2229610281U, // <4,2,6,5>: Cost 3 vrev <2,4,5,6>
- 2600767994U, // <4,2,6,6>: Cost 3 vext1 <u,4,2,6>, <6,2,7,3>
- 2600768506U, // <4,2,6,7>: Cost 3 vext1 <u,4,2,6>, <7,0,1,2>
- 1479251758U, // <4,2,6,u>: Cost 2 vext1 <0,4,2,6>, LHS
- 2659365909U, // <4,2,7,0>: Cost 3 vext2 <7,0,4,2>, <7,0,4,2>
- 3733771366U, // <4,2,7,1>: Cost 4 vext2 <7,1,4,2>, <7,1,4,2>
- 3734434999U, // <4,2,7,2>: Cost 4 vext2 <7,2,4,2>, <7,2,4,2>
- 2701199368U, // <4,2,7,3>: Cost 3 vext3 <2,7,3,4>, <2,7,3,4>
- 4175774618U, // <4,2,7,4>: Cost 4 vtrnr <2,4,5,7>, <1,2,3,4>
- 3303360298U, // <4,2,7,5>: Cost 4 vrev <2,4,5,7>
- 3727136217U, // <4,2,7,6>: Cost 4 vext2 <6,0,4,2>, <7,6,0,4>
- 3727136364U, // <4,2,7,7>: Cost 4 vext2 <6,0,4,2>, <7,7,7,7>
- 2659365909U, // <4,2,7,u>: Cost 3 vext2 <7,0,4,2>, <7,0,4,2>
- 1479262310U, // <4,2,u,0>: Cost 2 vext1 <0,4,2,u>, LHS
- 2553004790U, // <4,2,u,1>: Cost 3 vext1 <0,4,2,u>, <1,0,3,2>
- 2553005672U, // <4,2,u,2>: Cost 3 vext1 <0,4,2,u>, <2,2,2,2>
- 2954477670U, // <4,2,u,3>: Cost 3 vzipr <0,2,4,u>, LHS
- 1479265590U, // <4,2,u,4>: Cost 2 vext1 <0,4,2,u>, RHS
- 2622871706U, // <4,2,u,5>: Cost 3 vext2 <0,u,4,2>, RHS
- 2229700404U, // <4,2,u,6>: Cost 3 vrev <2,4,6,u>
- 2600784890U, // <4,2,u,7>: Cost 3 vext1 <u,4,2,u>, <7,0,1,2>
- 1479268142U, // <4,2,u,u>: Cost 2 vext1 <0,4,2,u>, LHS
- 3765651595U, // <4,3,0,0>: Cost 4 vext3 <1,2,3,4>, <3,0,0,0>
- 2691909782U, // <4,3,0,1>: Cost 3 vext3 <1,2,3,4>, <3,0,1,2>
- 2702452897U, // <4,3,0,2>: Cost 3 vext3 <3,0,2,4>, <3,0,2,4>
- 3693297946U, // <4,3,0,3>: Cost 4 vext2 <0,3,4,3>, <0,3,4,3>
- 3760711856U, // <4,3,0,4>: Cost 4 vext3 <0,3,u,4>, <3,0,4,1>
- 2235533820U, // <4,3,0,5>: Cost 3 vrev <3,4,5,0>
- 3309349381U, // <4,3,0,6>: Cost 4 vrev <3,4,6,0>
- 3668563278U, // <4,3,0,7>: Cost 4 vext1 <7,4,3,0>, <7,4,3,0>
- 2691909845U, // <4,3,0,u>: Cost 3 vext3 <1,2,3,4>, <3,0,u,2>
- 2235173328U, // <4,3,1,0>: Cost 3 vrev <3,4,0,1>
- 3764840678U, // <4,3,1,1>: Cost 4 vext3 <1,1,1,4>, <3,1,1,1>
- 2630173594U, // <4,3,1,2>: Cost 3 vext2 <2,1,4,3>, <1,2,3,4>
- 2703190267U, // <4,3,1,3>: Cost 3 vext3 <3,1,3,4>, <3,1,3,4>
- 3760195840U, // <4,3,1,4>: Cost 4 vext3 <0,3,1,4>, <3,1,4,0>
- 3765651724U, // <4,3,1,5>: Cost 4 vext3 <1,2,3,4>, <3,1,5,3>
- 3309357574U, // <4,3,1,6>: Cost 4 vrev <3,4,6,1>
- 3769633054U, // <4,3,1,7>: Cost 4 vext3 <1,u,3,4>, <3,1,7,3>
- 2703558952U, // <4,3,1,u>: Cost 3 vext3 <3,1,u,4>, <3,1,u,4>
- 3626770534U, // <4,3,2,0>: Cost 4 vext1 <0,4,3,2>, LHS
- 2630174250U, // <4,3,2,1>: Cost 3 vext2 <2,1,4,3>, <2,1,4,3>
- 3765651777U, // <4,3,2,2>: Cost 4 vext3 <1,2,3,4>, <3,2,2,2>
- 2703853900U, // <4,3,2,3>: Cost 3 vext3 <3,2,3,4>, <3,2,3,4>
- 3626773814U, // <4,3,2,4>: Cost 4 vext1 <0,4,3,2>, RHS
- 2704001374U, // <4,3,2,5>: Cost 3 vext3 <3,2,5,4>, <3,2,5,4>
- 3765651814U, // <4,3,2,6>: Cost 4 vext3 <1,2,3,4>, <3,2,6,3>
- 3769633135U, // <4,3,2,7>: Cost 4 vext3 <1,u,3,4>, <3,2,7,3>
- 2634819681U, // <4,3,2,u>: Cost 3 vext2 <2,u,4,3>, <2,u,4,3>
- 3765651839U, // <4,3,3,0>: Cost 4 vext3 <1,2,3,4>, <3,3,0,1>
- 3765651848U, // <4,3,3,1>: Cost 4 vext3 <1,2,3,4>, <3,3,1,1>
- 3710552404U, // <4,3,3,2>: Cost 4 vext2 <3,2,4,3>, <3,2,4,3>
- 2691910044U, // <4,3,3,3>: Cost 3 vext3 <1,2,3,4>, <3,3,3,3>
- 2704591270U, // <4,3,3,4>: Cost 3 vext3 <3,3,4,4>, <3,3,4,4>
- 3769633202U, // <4,3,3,5>: Cost 4 vext3 <1,u,3,4>, <3,3,5,7>
- 3703917212U, // <4,3,3,6>: Cost 4 vext2 <2,1,4,3>, <3,6,4,7>
- 3769633220U, // <4,3,3,7>: Cost 4 vext3 <1,u,3,4>, <3,3,7,7>
- 2691910044U, // <4,3,3,u>: Cost 3 vext3 <1,2,3,4>, <3,3,3,3>
- 2691910096U, // <4,3,4,0>: Cost 3 vext3 <1,2,3,4>, <3,4,0,1>
- 2691910106U, // <4,3,4,1>: Cost 3 vext3 <1,2,3,4>, <3,4,1,2>
- 2564990741U, // <4,3,4,2>: Cost 3 vext1 <2,4,3,4>, <2,4,3,4>
- 3765651946U, // <4,3,4,3>: Cost 4 vext3 <1,2,3,4>, <3,4,3,0>
- 2691910136U, // <4,3,4,4>: Cost 3 vext3 <1,2,3,4>, <3,4,4,5>
- 2686454274U, // <4,3,4,5>: Cost 3 vext3 <0,3,1,4>, <3,4,5,6>
- 2235640329U, // <4,3,4,6>: Cost 3 vrev <3,4,6,4>
- 3801483792U, // <4,3,4,7>: Cost 4 vext3 <7,2,3,4>, <3,4,7,2>
- 2691910168U, // <4,3,4,u>: Cost 3 vext3 <1,2,3,4>, <3,4,u,1>
- 2559025254U, // <4,3,5,0>: Cost 3 vext1 <1,4,3,5>, LHS
- 2559026237U, // <4,3,5,1>: Cost 3 vext1 <1,4,3,5>, <1,4,3,5>
- 2564998862U, // <4,3,5,2>: Cost 3 vext1 <2,4,3,5>, <2,3,4,5>
- 2570971548U, // <4,3,5,3>: Cost 3 vext1 <3,4,3,5>, <3,3,3,3>
- 2559028534U, // <4,3,5,4>: Cost 3 vext1 <1,4,3,5>, RHS
- 4163519477U, // <4,3,5,5>: Cost 4 vtrnr <0,4,1,5>, <1,3,4,5>
- 3309390346U, // <4,3,5,6>: Cost 4 vrev <3,4,6,5>
- 2706139747U, // <4,3,5,7>: Cost 3 vext3 <3,5,7,4>, <3,5,7,4>
- 2559031086U, // <4,3,5,u>: Cost 3 vext1 <1,4,3,5>, LHS
- 2559033446U, // <4,3,6,0>: Cost 3 vext1 <1,4,3,6>, LHS
- 2559034430U, // <4,3,6,1>: Cost 3 vext1 <1,4,3,6>, <1,4,3,6>
- 2565007127U, // <4,3,6,2>: Cost 3 vext1 <2,4,3,6>, <2,4,3,6>
- 2570979740U, // <4,3,6,3>: Cost 3 vext1 <3,4,3,6>, <3,3,3,3>
- 2559036726U, // <4,3,6,4>: Cost 3 vext1 <1,4,3,6>, RHS
- 1161841154U, // <4,3,6,5>: Cost 2 vrev <3,4,5,6>
- 4028203932U, // <4,3,6,6>: Cost 4 vzipr <0,2,4,6>, <1,2,3,6>
- 2706803380U, // <4,3,6,7>: Cost 3 vext3 <3,6,7,4>, <3,6,7,4>
- 1162062365U, // <4,3,6,u>: Cost 2 vrev <3,4,u,6>
- 3769633475U, // <4,3,7,0>: Cost 4 vext3 <1,u,3,4>, <3,7,0,1>
- 3769633488U, // <4,3,7,1>: Cost 4 vext3 <1,u,3,4>, <3,7,1,5>
- 3638757144U, // <4,3,7,2>: Cost 4 vext1 <2,4,3,7>, <2,4,3,7>
- 3769633508U, // <4,3,7,3>: Cost 4 vext3 <1,u,3,4>, <3,7,3,7>
- 3769633515U, // <4,3,7,4>: Cost 4 vext3 <1,u,3,4>, <3,7,4,5>
- 3769633526U, // <4,3,7,5>: Cost 4 vext3 <1,u,3,4>, <3,7,5,7>
- 3662647932U, // <4,3,7,6>: Cost 4 vext1 <6,4,3,7>, <6,4,3,7>
- 3781208837U, // <4,3,7,7>: Cost 4 vext3 <3,7,7,4>, <3,7,7,4>
- 3769633547U, // <4,3,7,u>: Cost 4 vext3 <1,u,3,4>, <3,7,u,1>
- 2559049830U, // <4,3,u,0>: Cost 3 vext1 <1,4,3,u>, LHS
- 2691910430U, // <4,3,u,1>: Cost 3 vext3 <1,2,3,4>, <3,u,1,2>
- 2565023513U, // <4,3,u,2>: Cost 3 vext1 <2,4,3,u>, <2,4,3,u>
- 2707835698U, // <4,3,u,3>: Cost 3 vext3 <3,u,3,4>, <3,u,3,4>
- 2559053110U, // <4,3,u,4>: Cost 3 vext1 <1,4,3,u>, RHS
- 1161857540U, // <4,3,u,5>: Cost 2 vrev <3,4,5,u>
- 2235673101U, // <4,3,u,6>: Cost 3 vrev <3,4,6,u>
- 2708130646U, // <4,3,u,7>: Cost 3 vext3 <3,u,7,4>, <3,u,7,4>
- 1162078751U, // <4,3,u,u>: Cost 2 vrev <3,4,u,u>
- 2617573416U, // <4,4,0,0>: Cost 3 vext2 <0,0,4,4>, <0,0,4,4>
- 1570373734U, // <4,4,0,1>: Cost 2 vext2 <4,4,4,4>, LHS
- 2779676774U, // <4,4,0,2>: Cost 3 vuzpl <4,6,4,6>, LHS
- 3760196480U, // <4,4,0,3>: Cost 4 vext3 <0,3,1,4>, <4,0,3,1>
- 2576977100U, // <4,4,0,4>: Cost 3 vext1 <4,4,4,0>, <4,4,4,0>
- 2718747538U, // <4,4,0,5>: Cost 3 vext3 <5,6,7,4>, <4,0,5,1>
- 2718747548U, // <4,4,0,6>: Cost 3 vext3 <5,6,7,4>, <4,0,6,2>
- 3668637015U, // <4,4,0,7>: Cost 4 vext1 <7,4,4,0>, <7,4,4,0>
- 1570374301U, // <4,4,0,u>: Cost 2 vext2 <4,4,4,4>, LHS
- 2644116214U, // <4,4,1,0>: Cost 3 vext2 <4,4,4,4>, <1,0,3,2>
- 2644116276U, // <4,4,1,1>: Cost 3 vext2 <4,4,4,4>, <1,1,1,1>
- 2691910602U, // <4,4,1,2>: Cost 3 vext3 <1,2,3,4>, <4,1,2,3>
- 2644116440U, // <4,4,1,3>: Cost 3 vext2 <4,4,4,4>, <1,3,1,3>
- 2711227356U, // <4,4,1,4>: Cost 3 vext3 <4,4,4,4>, <4,1,4,3>
- 2709310438U, // <4,4,1,5>: Cost 3 vext3 <4,1,5,4>, <4,1,5,4>
- 3765652462U, // <4,4,1,6>: Cost 4 vext3 <1,2,3,4>, <4,1,6,3>
- 3768970231U, // <4,4,1,7>: Cost 4 vext3 <1,7,3,4>, <4,1,7,3>
- 2695891968U, // <4,4,1,u>: Cost 3 vext3 <1,u,3,4>, <4,1,u,3>
- 3703260634U, // <4,4,2,0>: Cost 4 vext2 <2,0,4,4>, <2,0,4,4>
- 3765652499U, // <4,4,2,1>: Cost 4 vext3 <1,2,3,4>, <4,2,1,4>
- 2644117096U, // <4,4,2,2>: Cost 3 vext2 <4,4,4,4>, <2,2,2,2>
- 2631509709U, // <4,4,2,3>: Cost 3 vext2 <2,3,4,4>, <2,3,4,4>
- 2644117269U, // <4,4,2,4>: Cost 3 vext2 <4,4,4,4>, <2,4,3,4>
- 3705251698U, // <4,4,2,5>: Cost 4 vext2 <2,3,4,4>, <2,5,4,7>
- 2710047808U, // <4,4,2,6>: Cost 3 vext3 <4,2,6,4>, <4,2,6,4>
- 3783863369U, // <4,4,2,7>: Cost 4 vext3 <4,2,7,4>, <4,2,7,4>
- 2634827874U, // <4,4,2,u>: Cost 3 vext2 <2,u,4,4>, <2,u,4,4>
- 2644117654U, // <4,4,3,0>: Cost 3 vext2 <4,4,4,4>, <3,0,1,2>
- 3638797210U, // <4,4,3,1>: Cost 4 vext1 <2,4,4,3>, <1,2,3,4>
- 3638798082U, // <4,4,3,2>: Cost 4 vext1 <2,4,4,3>, <2,4,1,3>
- 2637482406U, // <4,4,3,3>: Cost 3 vext2 <3,3,4,4>, <3,3,4,4>
- 2638146039U, // <4,4,3,4>: Cost 3 vext2 <3,4,4,4>, <3,4,4,4>
- 3913287374U, // <4,4,3,5>: Cost 4 vuzpr <3,4,5,4>, <2,3,4,5>
- 3765652625U, // <4,4,3,6>: Cost 4 vext3 <1,2,3,4>, <4,3,6,4>
- 3713878762U, // <4,4,3,7>: Cost 4 vext2 <3,7,4,4>, <3,7,4,4>
- 2637482406U, // <4,4,3,u>: Cost 3 vext2 <3,3,4,4>, <3,3,4,4>
- 1503264870U, // <4,4,4,0>: Cost 2 vext1 <4,4,4,4>, LHS
- 2577007514U, // <4,4,4,1>: Cost 3 vext1 <4,4,4,4>, <1,2,3,4>
- 2577008232U, // <4,4,4,2>: Cost 3 vext1 <4,4,4,4>, <2,2,2,2>
- 2571037175U, // <4,4,4,3>: Cost 3 vext1 <3,4,4,4>, <3,4,4,4>
- 161926454U, // <4,4,4,4>: Cost 1 vdup0 RHS
- 1570377014U, // <4,4,4,5>: Cost 2 vext2 <4,4,4,4>, RHS
- 2779680054U, // <4,4,4,6>: Cost 3 vuzpl <4,6,4,6>, RHS
- 2594927963U, // <4,4,4,7>: Cost 3 vext1 <7,4,4,4>, <7,4,4,4>
- 161926454U, // <4,4,4,u>: Cost 1 vdup0 RHS
- 2571042918U, // <4,4,5,0>: Cost 3 vext1 <3,4,4,5>, LHS
- 2571043738U, // <4,4,5,1>: Cost 3 vext1 <3,4,4,5>, <1,2,3,4>
- 3638814495U, // <4,4,5,2>: Cost 4 vext1 <2,4,4,5>, <2,4,4,5>
- 2571045368U, // <4,4,5,3>: Cost 3 vext1 <3,4,4,5>, <3,4,4,5>
- 2571046198U, // <4,4,5,4>: Cost 3 vext1 <3,4,4,5>, RHS
- 1839648054U, // <4,4,5,5>: Cost 2 vzipl RHS, RHS
- 1618169142U, // <4,4,5,6>: Cost 2 vext3 <1,2,3,4>, RHS
- 2594936156U, // <4,4,5,7>: Cost 3 vext1 <7,4,4,5>, <7,4,4,5>
- 1618169160U, // <4,4,5,u>: Cost 2 vext3 <1,2,3,4>, RHS
- 2553135206U, // <4,4,6,0>: Cost 3 vext1 <0,4,4,6>, LHS
- 3626877686U, // <4,4,6,1>: Cost 4 vext1 <0,4,4,6>, <1,0,3,2>
- 2565080782U, // <4,4,6,2>: Cost 3 vext1 <2,4,4,6>, <2,3,4,5>
- 2571053561U, // <4,4,6,3>: Cost 3 vext1 <3,4,4,6>, <3,4,4,6>
- 2553138486U, // <4,4,6,4>: Cost 3 vext1 <0,4,4,6>, RHS
- 2241555675U, // <4,4,6,5>: Cost 3 vrev <4,4,5,6>
- 1973865782U, // <4,4,6,6>: Cost 2 vtrnl RHS, RHS
- 2658055029U, // <4,4,6,7>: Cost 3 vext2 <6,7,4,4>, <6,7,4,4>
- 1973865800U, // <4,4,6,u>: Cost 2 vtrnl RHS, RHS
- 2644120570U, // <4,4,7,0>: Cost 3 vext2 <4,4,4,4>, <7,0,1,2>
- 3638829978U, // <4,4,7,1>: Cost 4 vext1 <2,4,4,7>, <1,2,3,4>
- 3638830881U, // <4,4,7,2>: Cost 4 vext1 <2,4,4,7>, <2,4,4,7>
- 3735115018U, // <4,4,7,3>: Cost 4 vext2 <7,3,4,4>, <7,3,4,4>
- 2662036827U, // <4,4,7,4>: Cost 3 vext2 <7,4,4,4>, <7,4,4,4>
- 2713292236U, // <4,4,7,5>: Cost 3 vext3 <4,7,5,4>, <4,7,5,4>
- 2713365973U, // <4,4,7,6>: Cost 3 vext3 <4,7,6,4>, <4,7,6,4>
- 2644121196U, // <4,4,7,7>: Cost 3 vext2 <4,4,4,4>, <7,7,7,7>
- 2662036827U, // <4,4,7,u>: Cost 3 vext2 <7,4,4,4>, <7,4,4,4>
- 1503297638U, // <4,4,u,0>: Cost 2 vext1 <4,4,4,u>, LHS
- 1570379566U, // <4,4,u,1>: Cost 2 vext2 <4,4,4,4>, LHS
- 2779682606U, // <4,4,u,2>: Cost 3 vuzpl <4,6,4,6>, LHS
- 2571069947U, // <4,4,u,3>: Cost 3 vext1 <3,4,4,u>, <3,4,4,u>
- 161926454U, // <4,4,u,4>: Cost 1 vdup0 RHS
- 1841638710U, // <4,4,u,5>: Cost 2 vzipl RHS, RHS
- 1618169385U, // <4,4,u,6>: Cost 2 vext3 <1,2,3,4>, RHS
- 2594960735U, // <4,4,u,7>: Cost 3 vext1 <7,4,4,u>, <7,4,4,u>
- 161926454U, // <4,4,u,u>: Cost 1 vdup0 RHS
- 2631516160U, // <4,5,0,0>: Cost 3 vext2 <2,3,4,5>, <0,0,0,0>
- 1557774438U, // <4,5,0,1>: Cost 2 vext2 <2,3,4,5>, LHS
- 2618908875U, // <4,5,0,2>: Cost 3 vext2 <0,2,4,5>, <0,2,4,5>
- 2571078140U, // <4,5,0,3>: Cost 3 vext1 <3,4,5,0>, <3,4,5,0>
- 2626871634U, // <4,5,0,4>: Cost 3 vext2 <1,5,4,5>, <0,4,1,5>
- 3705258414U, // <4,5,0,5>: Cost 4 vext2 <2,3,4,5>, <0,5,2,7>
- 2594968438U, // <4,5,0,6>: Cost 3 vext1 <7,4,5,0>, <6,7,4,5>
- 2594968928U, // <4,5,0,7>: Cost 3 vext1 <7,4,5,0>, <7,4,5,0>
- 1557775005U, // <4,5,0,u>: Cost 2 vext2 <2,3,4,5>, LHS
- 2631516918U, // <4,5,1,0>: Cost 3 vext2 <2,3,4,5>, <1,0,3,2>
- 2624217939U, // <4,5,1,1>: Cost 3 vext2 <1,1,4,5>, <1,1,4,5>
- 2631517078U, // <4,5,1,2>: Cost 3 vext2 <2,3,4,5>, <1,2,3,0>
- 2821341286U, // <4,5,1,3>: Cost 3 vuzpr <0,4,1,5>, LHS
- 3895086054U, // <4,5,1,4>: Cost 4 vuzpr <0,4,1,5>, <4,1,5,4>
- 2626872471U, // <4,5,1,5>: Cost 3 vext2 <1,5,4,5>, <1,5,4,5>
- 3895083131U, // <4,5,1,6>: Cost 4 vuzpr <0,4,1,5>, <0,1,4,6>
- 2718748368U, // <4,5,1,7>: Cost 3 vext3 <5,6,7,4>, <5,1,7,3>
- 2821341291U, // <4,5,1,u>: Cost 3 vuzpr <0,4,1,5>, LHS
- 2571092070U, // <4,5,2,0>: Cost 3 vext1 <3,4,5,2>, LHS
- 3699287585U, // <4,5,2,1>: Cost 4 vext2 <1,3,4,5>, <2,1,3,3>
- 2630854269U, // <4,5,2,2>: Cost 3 vext2 <2,2,4,5>, <2,2,4,5>
- 1557776078U, // <4,5,2,3>: Cost 2 vext2 <2,3,4,5>, <2,3,4,5>
- 2631517974U, // <4,5,2,4>: Cost 3 vext2 <2,3,4,5>, <2,4,3,5>
- 3692652384U, // <4,5,2,5>: Cost 4 vext2 <0,2,4,5>, <2,5,2,7>
- 2631518138U, // <4,5,2,6>: Cost 3 vext2 <2,3,4,5>, <2,6,3,7>
- 4164013366U, // <4,5,2,7>: Cost 4 vtrnr <0,4,u,2>, RHS
- 1561094243U, // <4,5,2,u>: Cost 2 vext2 <2,u,4,5>, <2,u,4,5>
- 2631518358U, // <4,5,3,0>: Cost 3 vext2 <2,3,4,5>, <3,0,1,2>
- 3895084710U, // <4,5,3,1>: Cost 4 vuzpr <0,4,1,5>, <2,3,0,1>
- 2631518540U, // <4,5,3,2>: Cost 3 vext2 <2,3,4,5>, <3,2,3,4>
- 2631518620U, // <4,5,3,3>: Cost 3 vext2 <2,3,4,5>, <3,3,3,3>
- 2631518716U, // <4,5,3,4>: Cost 3 vext2 <2,3,4,5>, <3,4,5,0>
- 2631518784U, // <4,5,3,5>: Cost 3 vext2 <2,3,4,5>, <3,5,3,5>
- 2658060980U, // <4,5,3,6>: Cost 3 vext2 <6,7,4,5>, <3,6,7,4>
- 2640145131U, // <4,5,3,7>: Cost 3 vext2 <3,7,4,5>, <3,7,4,5>
- 2631519006U, // <4,5,3,u>: Cost 3 vext2 <2,3,4,5>, <3,u,1,2>
- 2571108454U, // <4,5,4,0>: Cost 3 vext1 <3,4,5,4>, LHS
- 3632907342U, // <4,5,4,1>: Cost 4 vext1 <1,4,5,4>, <1,4,5,4>
- 2571110094U, // <4,5,4,2>: Cost 3 vext1 <3,4,5,4>, <2,3,4,5>
- 2571110912U, // <4,5,4,3>: Cost 3 vext1 <3,4,5,4>, <3,4,5,4>
- 2571111734U, // <4,5,4,4>: Cost 3 vext1 <3,4,5,4>, RHS
- 1557777718U, // <4,5,4,5>: Cost 2 vext2 <2,3,4,5>, RHS
- 2645454195U, // <4,5,4,6>: Cost 3 vext2 <4,6,4,5>, <4,6,4,5>
- 2718748614U, // <4,5,4,7>: Cost 3 vext3 <5,6,7,4>, <5,4,7,6>
- 1557777961U, // <4,5,4,u>: Cost 2 vext2 <2,3,4,5>, RHS
- 1503346790U, // <4,5,5,0>: Cost 2 vext1 <4,4,5,5>, LHS
- 2913398480U, // <4,5,5,1>: Cost 3 vzipl RHS, <5,1,7,3>
- 2631519998U, // <4,5,5,2>: Cost 3 vext2 <2,3,4,5>, <5,2,3,4>
- 2577090710U, // <4,5,5,3>: Cost 3 vext1 <4,4,5,5>, <3,0,1,2>
- 1503349978U, // <4,5,5,4>: Cost 2 vext1 <4,4,5,5>, <4,4,5,5>
- 2631520260U, // <4,5,5,5>: Cost 3 vext2 <2,3,4,5>, <5,5,5,5>
- 2913390690U, // <4,5,5,6>: Cost 3 vzipl RHS, <5,6,7,0>
- 2821344566U, // <4,5,5,7>: Cost 3 vuzpr <0,4,1,5>, RHS
- 1503352622U, // <4,5,5,u>: Cost 2 vext1 <4,4,5,5>, LHS
- 1497383014U, // <4,5,6,0>: Cost 2 vext1 <3,4,5,6>, LHS
- 2559181904U, // <4,5,6,1>: Cost 3 vext1 <1,4,5,6>, <1,4,5,6>
- 2565154601U, // <4,5,6,2>: Cost 3 vext1 <2,4,5,6>, <2,4,5,6>
- 1497385474U, // <4,5,6,3>: Cost 2 vext1 <3,4,5,6>, <3,4,5,6>
- 1497386294U, // <4,5,6,4>: Cost 2 vext1 <3,4,5,6>, RHS
- 3047608324U, // <4,5,6,5>: Cost 3 vtrnl RHS, <5,5,5,5>
- 2571129656U, // <4,5,6,6>: Cost 3 vext1 <3,4,5,6>, <6,6,6,6>
- 27705344U, // <4,5,6,7>: Cost 0 copy RHS
- 27705344U, // <4,5,6,u>: Cost 0 copy RHS
- 2565161062U, // <4,5,7,0>: Cost 3 vext1 <2,4,5,7>, LHS
- 2565161882U, // <4,5,7,1>: Cost 3 vext1 <2,4,5,7>, <1,2,3,4>
- 2565162794U, // <4,5,7,2>: Cost 3 vext1 <2,4,5,7>, <2,4,5,7>
- 2661381387U, // <4,5,7,3>: Cost 3 vext2 <7,3,4,5>, <7,3,4,5>
- 2565164342U, // <4,5,7,4>: Cost 3 vext1 <2,4,5,7>, RHS
- 2718748840U, // <4,5,7,5>: Cost 3 vext3 <5,6,7,4>, <5,7,5,7>
- 2718748846U, // <4,5,7,6>: Cost 3 vext3 <5,6,7,4>, <5,7,6,4>
- 2719412407U, // <4,5,7,7>: Cost 3 vext3 <5,7,7,4>, <5,7,7,4>
- 2565166894U, // <4,5,7,u>: Cost 3 vext1 <2,4,5,7>, LHS
- 1497399398U, // <4,5,u,0>: Cost 2 vext1 <3,4,5,u>, LHS
- 1557780270U, // <4,5,u,1>: Cost 2 vext2 <2,3,4,5>, LHS
- 2631522181U, // <4,5,u,2>: Cost 3 vext2 <2,3,4,5>, <u,2,3,0>
- 1497401860U, // <4,5,u,3>: Cost 2 vext1 <3,4,5,u>, <3,4,5,u>
- 1497402678U, // <4,5,u,4>: Cost 2 vext1 <3,4,5,u>, RHS
- 1557780634U, // <4,5,u,5>: Cost 2 vext2 <2,3,4,5>, RHS
- 2631522512U, // <4,5,u,6>: Cost 3 vext2 <2,3,4,5>, <u,6,3,7>
- 27705344U, // <4,5,u,7>: Cost 0 copy RHS
- 27705344U, // <4,5,u,u>: Cost 0 copy RHS
- 2618916864U, // <4,6,0,0>: Cost 3 vext2 <0,2,4,6>, <0,0,0,0>
- 1545175142U, // <4,6,0,1>: Cost 2 vext2 <0,2,4,6>, LHS
- 1545175244U, // <4,6,0,2>: Cost 2 vext2 <0,2,4,6>, <0,2,4,6>
- 3692658940U, // <4,6,0,3>: Cost 4 vext2 <0,2,4,6>, <0,3,1,0>
- 2618917202U, // <4,6,0,4>: Cost 3 vext2 <0,2,4,6>, <0,4,1,5>
- 3852910806U, // <4,6,0,5>: Cost 4 vuzpl RHS, <0,2,5,7>
- 2253525648U, // <4,6,0,6>: Cost 3 vrev <6,4,6,0>
- 4040764726U, // <4,6,0,7>: Cost 4 vzipr <2,3,4,0>, RHS
- 1545175709U, // <4,6,0,u>: Cost 2 vext2 <0,2,4,6>, LHS
- 2618917622U, // <4,6,1,0>: Cost 3 vext2 <0,2,4,6>, <1,0,3,2>
- 2618917684U, // <4,6,1,1>: Cost 3 vext2 <0,2,4,6>, <1,1,1,1>
- 2618917782U, // <4,6,1,2>: Cost 3 vext2 <0,2,4,6>, <1,2,3,0>
- 2618917848U, // <4,6,1,3>: Cost 3 vext2 <0,2,4,6>, <1,3,1,3>
- 3692659773U, // <4,6,1,4>: Cost 4 vext2 <0,2,4,6>, <1,4,3,5>
- 2618918032U, // <4,6,1,5>: Cost 3 vext2 <0,2,4,6>, <1,5,3,7>
- 3692659937U, // <4,6,1,6>: Cost 4 vext2 <0,2,4,6>, <1,6,3,7>
- 4032146742U, // <4,6,1,7>: Cost 4 vzipr <0,u,4,1>, RHS
- 2618918253U, // <4,6,1,u>: Cost 3 vext2 <0,2,4,6>, <1,u,1,3>
- 2618918380U, // <4,6,2,0>: Cost 3 vext2 <0,2,4,6>, <2,0,6,4>
- 2618918460U, // <4,6,2,1>: Cost 3 vext2 <0,2,4,6>, <2,1,6,3>
- 2618918504U, // <4,6,2,2>: Cost 3 vext2 <0,2,4,6>, <2,2,2,2>
- 2618918566U, // <4,6,2,3>: Cost 3 vext2 <0,2,4,6>, <2,3,0,1>
- 2618918679U, // <4,6,2,4>: Cost 3 vext2 <0,2,4,6>, <2,4,3,6>
- 2618918788U, // <4,6,2,5>: Cost 3 vext2 <0,2,4,6>, <2,5,6,7>
- 2618918842U, // <4,6,2,6>: Cost 3 vext2 <0,2,4,6>, <2,6,3,7>
- 2718749178U, // <4,6,2,7>: Cost 3 vext3 <5,6,7,4>, <6,2,7,3>
- 2618918971U, // <4,6,2,u>: Cost 3 vext2 <0,2,4,6>, <2,u,0,1>
- 2618919062U, // <4,6,3,0>: Cost 3 vext2 <0,2,4,6>, <3,0,1,2>
- 2636171526U, // <4,6,3,1>: Cost 3 vext2 <3,1,4,6>, <3,1,4,6>
- 3692661057U, // <4,6,3,2>: Cost 4 vext2 <0,2,4,6>, <3,2,2,2>
- 2618919324U, // <4,6,3,3>: Cost 3 vext2 <0,2,4,6>, <3,3,3,3>
- 2618919426U, // <4,6,3,4>: Cost 3 vext2 <0,2,4,6>, <3,4,5,6>
- 2638826058U, // <4,6,3,5>: Cost 3 vext2 <3,5,4,6>, <3,5,4,6>
- 3913303030U, // <4,6,3,6>: Cost 4 vuzpr <3,4,5,6>, <1,3,4,6>
- 2722730572U, // <4,6,3,7>: Cost 3 vext3 <6,3,7,4>, <6,3,7,4>
- 2618919710U, // <4,6,3,u>: Cost 3 vext2 <0,2,4,6>, <3,u,1,2>
- 2565210214U, // <4,6,4,0>: Cost 3 vext1 <2,4,6,4>, LHS
- 2718749286U, // <4,6,4,1>: Cost 3 vext3 <5,6,7,4>, <6,4,1,3>
- 2565211952U, // <4,6,4,2>: Cost 3 vext1 <2,4,6,4>, <2,4,6,4>
- 2571184649U, // <4,6,4,3>: Cost 3 vext1 <3,4,6,4>, <3,4,6,4>
- 2565213494U, // <4,6,4,4>: Cost 3 vext1 <2,4,6,4>, RHS
- 1545178422U, // <4,6,4,5>: Cost 2 vext2 <0,2,4,6>, RHS
- 1705430326U, // <4,6,4,6>: Cost 2 vuzpl RHS, RHS
- 2595075437U, // <4,6,4,7>: Cost 3 vext1 <7,4,6,4>, <7,4,6,4>
- 1545178665U, // <4,6,4,u>: Cost 2 vext2 <0,2,4,6>, RHS
- 2565218406U, // <4,6,5,0>: Cost 3 vext1 <2,4,6,5>, LHS
- 2645462736U, // <4,6,5,1>: Cost 3 vext2 <4,6,4,6>, <5,1,7,3>
- 2913399290U, // <4,6,5,2>: Cost 3 vzipl RHS, <6,2,7,3>
- 3913305394U, // <4,6,5,3>: Cost 4 vuzpr <3,4,5,6>, <4,5,6,3>
- 2645462982U, // <4,6,5,4>: Cost 3 vext2 <4,6,4,6>, <5,4,7,6>
- 2779172868U, // <4,6,5,5>: Cost 3 vuzpl RHS, <5,5,5,5>
- 2913391416U, // <4,6,5,6>: Cost 3 vzipl RHS, <6,6,6,6>
- 2821426486U, // <4,6,5,7>: Cost 3 vuzpr <0,4,2,6>, RHS
- 2821426487U, // <4,6,5,u>: Cost 3 vuzpr <0,4,2,6>, RHS
- 1503428710U, // <4,6,6,0>: Cost 2 vext1 <4,4,6,6>, LHS
- 2577171190U, // <4,6,6,1>: Cost 3 vext1 <4,4,6,6>, <1,0,3,2>
- 2645463546U, // <4,6,6,2>: Cost 3 vext2 <4,6,4,6>, <6,2,7,3>
- 2577172630U, // <4,6,6,3>: Cost 3 vext1 <4,4,6,6>, <3,0,1,2>
- 1503431908U, // <4,6,6,4>: Cost 2 vext1 <4,4,6,6>, <4,4,6,6>
- 2253501069U, // <4,6,6,5>: Cost 3 vrev <6,4,5,6>
- 2618921784U, // <4,6,6,6>: Cost 3 vext2 <0,2,4,6>, <6,6,6,6>
- 2954464566U, // <4,6,6,7>: Cost 3 vzipr <0,2,4,6>, RHS
- 1503434542U, // <4,6,6,u>: Cost 2 vext1 <4,4,6,6>, LHS
- 2645464058U, // <4,6,7,0>: Cost 3 vext2 <4,6,4,6>, <7,0,1,2>
- 2779173882U, // <4,6,7,1>: Cost 3 vuzpl RHS, <7,0,1,2>
- 3638978355U, // <4,6,7,2>: Cost 4 vext1 <2,4,6,7>, <2,4,6,7>
- 2725090156U, // <4,6,7,3>: Cost 3 vext3 <6,7,3,4>, <6,7,3,4>
- 2645464422U, // <4,6,7,4>: Cost 3 vext2 <4,6,4,6>, <7,4,5,6>
- 2779174246U, // <4,6,7,5>: Cost 3 vuzpl RHS, <7,4,5,6>
- 3852915914U, // <4,6,7,6>: Cost 4 vuzpl RHS, <7,2,6,3>
- 2779174508U, // <4,6,7,7>: Cost 3 vuzpl RHS, <7,7,7,7>
- 2779173945U, // <4,6,7,u>: Cost 3 vuzpl RHS, <7,0,u,2>
- 1503445094U, // <4,6,u,0>: Cost 2 vext1 <4,4,6,u>, LHS
- 1545180974U, // <4,6,u,1>: Cost 2 vext2 <0,2,4,6>, LHS
- 1705432878U, // <4,6,u,2>: Cost 2 vuzpl RHS, LHS
- 2618922940U, // <4,6,u,3>: Cost 3 vext2 <0,2,4,6>, <u,3,0,1>
- 1503448294U, // <4,6,u,4>: Cost 2 vext1 <4,4,6,u>, <4,4,6,u>
- 1545181338U, // <4,6,u,5>: Cost 2 vext2 <0,2,4,6>, RHS
- 1705433242U, // <4,6,u,6>: Cost 2 vuzpl RHS, RHS
- 2954480950U, // <4,6,u,7>: Cost 3 vzipr <0,2,4,u>, RHS
- 1545181541U, // <4,6,u,u>: Cost 2 vext2 <0,2,4,6>, LHS
- 3706601472U, // <4,7,0,0>: Cost 4 vext2 <2,5,4,7>, <0,0,0,0>
- 2632859750U, // <4,7,0,1>: Cost 3 vext2 <2,5,4,7>, LHS
- 2726343685U, // <4,7,0,2>: Cost 3 vext3 <7,0,2,4>, <7,0,2,4>
- 3701293312U, // <4,7,0,3>: Cost 4 vext2 <1,6,4,7>, <0,3,1,4>
- 3706601810U, // <4,7,0,4>: Cost 4 vext2 <2,5,4,7>, <0,4,1,5>
- 2259424608U, // <4,7,0,5>: Cost 3 vrev <7,4,5,0>
- 3695321617U, // <4,7,0,6>: Cost 4 vext2 <0,6,4,7>, <0,6,4,7>
- 3800454194U, // <4,7,0,7>: Cost 4 vext3 <7,0,7,4>, <7,0,7,4>
- 2632860317U, // <4,7,0,u>: Cost 3 vext2 <2,5,4,7>, LHS
- 2259064116U, // <4,7,1,0>: Cost 3 vrev <7,4,0,1>
- 3700630324U, // <4,7,1,1>: Cost 4 vext2 <1,5,4,7>, <1,1,1,1>
- 2632860570U, // <4,7,1,2>: Cost 3 vext2 <2,5,4,7>, <1,2,3,4>
- 3769635936U, // <4,7,1,3>: Cost 4 vext3 <1,u,3,4>, <7,1,3,5>
- 3656920374U, // <4,7,1,4>: Cost 4 vext1 <5,4,7,1>, RHS
- 3700630681U, // <4,7,1,5>: Cost 4 vext2 <1,5,4,7>, <1,5,4,7>
- 3701294314U, // <4,7,1,6>: Cost 4 vext2 <1,6,4,7>, <1,6,4,7>
- 3793818754U, // <4,7,1,7>: Cost 4 vext3 <5,u,7,4>, <7,1,7,3>
- 2259654012U, // <4,7,1,u>: Cost 3 vrev <7,4,u,1>
- 3656925286U, // <4,7,2,0>: Cost 4 vext1 <5,4,7,2>, LHS
- 3706603050U, // <4,7,2,1>: Cost 4 vext2 <2,5,4,7>, <2,1,4,3>
- 3706603112U, // <4,7,2,2>: Cost 4 vext2 <2,5,4,7>, <2,2,2,2>
- 2727744688U, // <4,7,2,3>: Cost 3 vext3 <7,2,3,4>, <7,2,3,4>
- 3705939745U, // <4,7,2,4>: Cost 4 vext2 <2,4,4,7>, <2,4,4,7>
- 2632861554U, // <4,7,2,5>: Cost 3 vext2 <2,5,4,7>, <2,5,4,7>
- 3706603450U, // <4,7,2,6>: Cost 4 vext2 <2,5,4,7>, <2,6,3,7>
- 3792491731U, // <4,7,2,7>: Cost 4 vext3 <5,6,7,4>, <7,2,7,3>
- 2634852453U, // <4,7,2,u>: Cost 3 vext2 <2,u,4,7>, <2,u,4,7>
- 3706603670U, // <4,7,3,0>: Cost 4 vext2 <2,5,4,7>, <3,0,1,2>
- 3662906266U, // <4,7,3,1>: Cost 4 vext1 <6,4,7,3>, <1,2,3,4>
- 3725183326U, // <4,7,3,2>: Cost 4 vext2 <5,6,4,7>, <3,2,5,4>
- 3706603932U, // <4,7,3,3>: Cost 4 vext2 <2,5,4,7>, <3,3,3,3>
- 3701295618U, // <4,7,3,4>: Cost 4 vext2 <1,6,4,7>, <3,4,5,6>
- 2638834251U, // <4,7,3,5>: Cost 3 vext2 <3,5,4,7>, <3,5,4,7>
- 2639497884U, // <4,7,3,6>: Cost 3 vext2 <3,6,4,7>, <3,6,4,7>
- 3802445093U, // <4,7,3,7>: Cost 4 vext3 <7,3,7,4>, <7,3,7,4>
- 2640825150U, // <4,7,3,u>: Cost 3 vext2 <3,u,4,7>, <3,u,4,7>
- 2718750004U, // <4,7,4,0>: Cost 3 vext3 <5,6,7,4>, <7,4,0,1>
- 3706604490U, // <4,7,4,1>: Cost 4 vext2 <2,5,4,7>, <4,1,2,3>
- 3656943474U, // <4,7,4,2>: Cost 4 vext1 <5,4,7,4>, <2,5,4,7>
- 3779884371U, // <4,7,4,3>: Cost 4 vext3 <3,5,7,4>, <7,4,3,5>
- 2259383643U, // <4,7,4,4>: Cost 3 vrev <7,4,4,4>
- 2632863030U, // <4,7,4,5>: Cost 3 vext2 <2,5,4,7>, RHS
- 2259531117U, // <4,7,4,6>: Cost 3 vrev <7,4,6,4>
- 3907340074U, // <4,7,4,7>: Cost 4 vuzpr <2,4,5,7>, <2,4,5,7>
- 2632863273U, // <4,7,4,u>: Cost 3 vext2 <2,5,4,7>, RHS
- 2913391610U, // <4,7,5,0>: Cost 3 vzipl RHS, <7,0,1,2>
- 3645006848U, // <4,7,5,1>: Cost 4 vext1 <3,4,7,5>, <1,3,5,7>
- 2589181646U, // <4,7,5,2>: Cost 3 vext1 <6,4,7,5>, <2,3,4,5>
- 3645008403U, // <4,7,5,3>: Cost 4 vext1 <3,4,7,5>, <3,4,7,5>
- 2913391974U, // <4,7,5,4>: Cost 3 vzipl RHS, <7,4,5,6>
- 2583211973U, // <4,7,5,5>: Cost 3 vext1 <5,4,7,5>, <5,4,7,5>
- 2589184670U, // <4,7,5,6>: Cost 3 vext1 <6,4,7,5>, <6,4,7,5>
- 2913392236U, // <4,7,5,7>: Cost 3 vzipl RHS, <7,7,7,7>
- 2913392258U, // <4,7,5,u>: Cost 3 vzipl RHS, <7,u,1,2>
- 1509474406U, // <4,7,6,0>: Cost 2 vext1 <5,4,7,6>, LHS
- 3047609338U, // <4,7,6,1>: Cost 3 vtrnl RHS, <7,0,1,2>
- 2583217768U, // <4,7,6,2>: Cost 3 vext1 <5,4,7,6>, <2,2,2,2>
- 2583218326U, // <4,7,6,3>: Cost 3 vext1 <5,4,7,6>, <3,0,1,2>
- 1509477686U, // <4,7,6,4>: Cost 2 vext1 <5,4,7,6>, RHS
- 1509478342U, // <4,7,6,5>: Cost 2 vext1 <5,4,7,6>, <5,4,7,6>
- 2583220730U, // <4,7,6,6>: Cost 3 vext1 <5,4,7,6>, <6,2,7,3>
- 3047609964U, // <4,7,6,7>: Cost 3 vtrnl RHS, <7,7,7,7>
- 1509480238U, // <4,7,6,u>: Cost 2 vext1 <5,4,7,6>, LHS
- 3650994278U, // <4,7,7,0>: Cost 4 vext1 <4,4,7,7>, LHS
- 3650995098U, // <4,7,7,1>: Cost 4 vext1 <4,4,7,7>, <1,2,3,4>
- 3650996010U, // <4,7,7,2>: Cost 4 vext1 <4,4,7,7>, <2,4,5,7>
- 3804804677U, // <4,7,7,3>: Cost 4 vext3 <7,7,3,4>, <7,7,3,4>
- 3650997486U, // <4,7,7,4>: Cost 4 vext1 <4,4,7,7>, <4,4,7,7>
- 2662725039U, // <4,7,7,5>: Cost 3 vext2 <7,5,4,7>, <7,5,4,7>
- 3662942880U, // <4,7,7,6>: Cost 4 vext1 <6,4,7,7>, <6,4,7,7>
- 2718750316U, // <4,7,7,7>: Cost 3 vext3 <5,6,7,4>, <7,7,7,7>
- 2664715938U, // <4,7,7,u>: Cost 3 vext2 <7,u,4,7>, <7,u,4,7>
- 1509490790U, // <4,7,u,0>: Cost 2 vext1 <5,4,7,u>, LHS
- 2632865582U, // <4,7,u,1>: Cost 3 vext2 <2,5,4,7>, LHS
- 2583234152U, // <4,7,u,2>: Cost 3 vext1 <5,4,7,u>, <2,2,2,2>
- 2583234710U, // <4,7,u,3>: Cost 3 vext1 <5,4,7,u>, <3,0,1,2>
- 1509494070U, // <4,7,u,4>: Cost 2 vext1 <5,4,7,u>, RHS
- 1509494728U, // <4,7,u,5>: Cost 2 vext1 <5,4,7,u>, <5,4,7,u>
- 2583237114U, // <4,7,u,6>: Cost 3 vext1 <5,4,7,u>, <6,2,7,3>
- 3047757420U, // <4,7,u,7>: Cost 3 vtrnl RHS, <7,7,7,7>
- 1509496622U, // <4,7,u,u>: Cost 2 vext1 <5,4,7,u>, LHS
- 2618933248U, // <4,u,0,0>: Cost 3 vext2 <0,2,4,u>, <0,0,0,0>
- 1545191526U, // <4,u,0,1>: Cost 2 vext2 <0,2,4,u>, LHS
- 1545191630U, // <4,u,0,2>: Cost 2 vext2 <0,2,4,u>, <0,2,4,u>
- 2691913445U, // <4,u,0,3>: Cost 3 vext3 <1,2,3,4>, <u,0,3,2>
- 2618933586U, // <4,u,0,4>: Cost 3 vext2 <0,2,4,u>, <0,4,1,5>
- 2265397305U, // <4,u,0,5>: Cost 3 vrev <u,4,5,0>
- 2595189625U, // <4,u,0,6>: Cost 3 vext1 <7,4,u,0>, <6,7,4,u>
- 2595190139U, // <4,u,0,7>: Cost 3 vext1 <7,4,u,0>, <7,4,u,0>
- 1545192093U, // <4,u,0,u>: Cost 2 vext2 <0,2,4,u>, LHS
- 2618934006U, // <4,u,1,0>: Cost 3 vext2 <0,2,4,u>, <1,0,3,2>
- 2618934068U, // <4,u,1,1>: Cost 3 vext2 <0,2,4,u>, <1,1,1,1>
- 1618171694U, // <4,u,1,2>: Cost 2 vext3 <1,2,3,4>, LHS
- 2618934232U, // <4,u,1,3>: Cost 3 vext2 <0,2,4,u>, <1,3,1,3>
- 2695894848U, // <4,u,1,4>: Cost 3 vext3 <1,u,3,4>, <u,1,4,3>
- 2618934416U, // <4,u,1,5>: Cost 3 vext2 <0,2,4,u>, <1,5,3,7>
- 3692676321U, // <4,u,1,6>: Cost 4 vext2 <0,2,4,u>, <1,6,3,7>
- 2718750555U, // <4,u,1,7>: Cost 3 vext3 <5,6,7,4>, <u,1,7,3>
- 1618171748U, // <4,u,1,u>: Cost 2 vext3 <1,2,3,4>, LHS
- 2553397350U, // <4,u,2,0>: Cost 3 vext1 <0,4,u,2>, LHS
- 2630215215U, // <4,u,2,1>: Cost 3 vext2 <2,1,4,u>, <2,1,4,u>
- 2618934888U, // <4,u,2,2>: Cost 3 vext2 <0,2,4,u>, <2,2,2,2>
- 1557800657U, // <4,u,2,3>: Cost 2 vext2 <2,3,4,u>, <2,3,4,u>
- 2618935065U, // <4,u,2,4>: Cost 3 vext2 <0,2,4,u>, <2,4,3,u>
- 2733864859U, // <4,u,2,5>: Cost 3 vext3 <u,2,5,4>, <u,2,5,4>
- 2618935226U, // <4,u,2,6>: Cost 3 vext2 <0,2,4,u>, <2,6,3,7>
- 2718750636U, // <4,u,2,7>: Cost 3 vext3 <5,6,7,4>, <u,2,7,3>
- 1561118822U, // <4,u,2,u>: Cost 2 vext2 <2,u,4,u>, <2,u,4,u>
- 2618935446U, // <4,u,3,0>: Cost 3 vext2 <0,2,4,u>, <3,0,1,2>
- 2779318422U, // <4,u,3,1>: Cost 3 vuzpl RHS, <3,0,1,2>
- 2636851545U, // <4,u,3,2>: Cost 3 vext2 <3,2,4,u>, <3,2,4,u>
- 2618935708U, // <4,u,3,3>: Cost 3 vext2 <0,2,4,u>, <3,3,3,3>
- 2618935810U, // <4,u,3,4>: Cost 3 vext2 <0,2,4,u>, <3,4,5,6>
- 2691913711U, // <4,u,3,5>: Cost 3 vext3 <1,2,3,4>, <u,3,5,7>
- 2588725862U, // <4,u,3,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
- 2640169710U, // <4,u,3,7>: Cost 3 vext2 <3,7,4,u>, <3,7,4,u>
- 2618936094U, // <4,u,3,u>: Cost 3 vext2 <0,2,4,u>, <3,u,1,2>
- 1503559782U, // <4,u,4,0>: Cost 2 vext1 <4,4,u,4>, LHS
- 2692282391U, // <4,u,4,1>: Cost 3 vext3 <1,2,u,4>, <u,4,1,2>
- 2565359426U, // <4,u,4,2>: Cost 3 vext1 <2,4,u,4>, <2,4,u,4>
- 2571332123U, // <4,u,4,3>: Cost 3 vext1 <3,4,u,4>, <3,4,u,4>
- 161926454U, // <4,u,4,4>: Cost 1 vdup0 RHS
- 1545194806U, // <4,u,4,5>: Cost 2 vext2 <0,2,4,u>, RHS
- 1705577782U, // <4,u,4,6>: Cost 2 vuzpl RHS, RHS
- 2718750801U, // <4,u,4,7>: Cost 3 vext3 <5,6,7,4>, <u,4,7,6>
- 161926454U, // <4,u,4,u>: Cost 1 vdup0 RHS
- 1479164006U, // <4,u,5,0>: Cost 2 vext1 <0,4,1,5>, LHS
- 1839650606U, // <4,u,5,1>: Cost 2 vzipl RHS, LHS
- 2565367502U, // <4,u,5,2>: Cost 3 vext1 <2,4,u,5>, <2,3,4,5>
- 3089777309U, // <4,u,5,3>: Cost 3 vtrnr <0,4,1,5>, LHS
- 1479167286U, // <4,u,5,4>: Cost 2 vext1 <0,4,1,5>, RHS
- 1839650970U, // <4,u,5,5>: Cost 2 vzipl RHS, RHS
- 1618172058U, // <4,u,5,6>: Cost 2 vext3 <1,2,3,4>, RHS
- 3089780265U, // <4,u,5,7>: Cost 3 vtrnr <0,4,1,5>, RHS
- 1618172076U, // <4,u,5,u>: Cost 2 vext3 <1,2,3,4>, RHS
- 1479688294U, // <4,u,6,0>: Cost 2 vext1 <0,4,u,6>, LHS
- 2553430774U, // <4,u,6,1>: Cost 3 vext1 <0,4,u,6>, <1,0,3,2>
- 1973868334U, // <4,u,6,2>: Cost 2 vtrnl RHS, LHS
- 1497606685U, // <4,u,6,3>: Cost 2 vext1 <3,4,u,6>, <3,4,u,6>
- 1479691574U, // <4,u,6,4>: Cost 2 vext1 <0,4,u,6>, RHS
- 1509552079U, // <4,u,6,5>: Cost 2 vext1 <5,4,u,6>, <5,4,u,6>
- 1973868698U, // <4,u,6,6>: Cost 2 vtrnl RHS, RHS
- 27705344U, // <4,u,6,7>: Cost 0 copy RHS
- 27705344U, // <4,u,6,u>: Cost 0 copy RHS
- 2565382246U, // <4,u,7,0>: Cost 3 vext1 <2,4,u,7>, LHS
- 2565383066U, // <4,u,7,1>: Cost 3 vext1 <2,4,u,7>, <1,2,3,4>
- 2565384005U, // <4,u,7,2>: Cost 3 vext1 <2,4,u,7>, <2,4,u,7>
- 2661405966U, // <4,u,7,3>: Cost 3 vext2 <7,3,4,u>, <7,3,4,u>
- 2565385526U, // <4,u,7,4>: Cost 3 vext1 <2,4,u,7>, RHS
- 2779321702U, // <4,u,7,5>: Cost 3 vuzpl RHS, <7,4,5,6>
- 2589274793U, // <4,u,7,6>: Cost 3 vext1 <6,4,u,7>, <6,4,u,7>
- 2779321964U, // <4,u,7,7>: Cost 3 vuzpl RHS, <7,7,7,7>
- 2565388078U, // <4,u,7,u>: Cost 3 vext1 <2,4,u,7>, LHS
- 1479704678U, // <4,u,u,0>: Cost 2 vext1 <0,4,u,u>, LHS
- 1545197358U, // <4,u,u,1>: Cost 2 vext2 <0,2,4,u>, LHS
- 1618172261U, // <4,u,u,2>: Cost 2 vext3 <1,2,3,4>, LHS
- 1497623071U, // <4,u,u,3>: Cost 2 vext1 <3,4,u,u>, <3,4,u,u>
- 161926454U, // <4,u,u,4>: Cost 1 vdup0 RHS
- 1545197722U, // <4,u,u,5>: Cost 2 vext2 <0,2,4,u>, RHS
- 1618172301U, // <4,u,u,6>: Cost 2 vext3 <1,2,3,4>, RHS
- 27705344U, // <4,u,u,7>: Cost 0 copy RHS
- 27705344U, // <4,u,u,u>: Cost 0 copy RHS
- 2687123456U, // <5,0,0,0>: Cost 3 vext3 <0,4,1,5>, <0,0,0,0>
- 2687123466U, // <5,0,0,1>: Cost 3 vext3 <0,4,1,5>, <0,0,1,1>
- 2687123476U, // <5,0,0,2>: Cost 3 vext3 <0,4,1,5>, <0,0,2,2>
- 3710599434U, // <5,0,0,3>: Cost 4 vext2 <3,2,5,0>, <0,3,2,5>
- 2642166098U, // <5,0,0,4>: Cost 3 vext2 <4,1,5,0>, <0,4,1,5>
- 3657060306U, // <5,0,0,5>: Cost 4 vext1 <5,5,0,0>, <5,5,0,0>
- 3292094923U, // <5,0,0,6>: Cost 4 vrev <0,5,6,0>
- 3669005700U, // <5,0,0,7>: Cost 4 vext1 <7,5,0,0>, <7,5,0,0>
- 2687123530U, // <5,0,0,u>: Cost 3 vext3 <0,4,1,5>, <0,0,u,2>
- 2559434854U, // <5,0,1,0>: Cost 3 vext1 <1,5,0,1>, LHS
- 2559435887U, // <5,0,1,1>: Cost 3 vext1 <1,5,0,1>, <1,5,0,1>
- 1613381734U, // <5,0,1,2>: Cost 2 vext3 <0,4,1,5>, LHS
- 3698656256U, // <5,0,1,3>: Cost 4 vext2 <1,2,5,0>, <1,3,5,7>
- 2559438134U, // <5,0,1,4>: Cost 3 vext1 <1,5,0,1>, RHS
- 2583326675U, // <5,0,1,5>: Cost 3 vext1 <5,5,0,1>, <5,5,0,1>
- 3715908851U, // <5,0,1,6>: Cost 4 vext2 <4,1,5,0>, <1,6,5,7>
- 3657069562U, // <5,0,1,7>: Cost 4 vext1 <5,5,0,1>, <7,0,1,2>
- 1613381788U, // <5,0,1,u>: Cost 2 vext3 <0,4,1,5>, LHS
- 2686017700U, // <5,0,2,0>: Cost 3 vext3 <0,2,4,5>, <0,2,0,2>
- 2685796528U, // <5,0,2,1>: Cost 3 vext3 <0,2,1,5>, <0,2,1,5>
- 2698625208U, // <5,0,2,2>: Cost 3 vext3 <2,3,4,5>, <0,2,2,4>
- 2685944002U, // <5,0,2,3>: Cost 3 vext3 <0,2,3,5>, <0,2,3,5>
- 2686017739U, // <5,0,2,4>: Cost 3 vext3 <0,2,4,5>, <0,2,4,5>
- 2686091476U, // <5,0,2,5>: Cost 3 vext3 <0,2,5,5>, <0,2,5,5>
- 2725167324U, // <5,0,2,6>: Cost 3 vext3 <6,7,4,5>, <0,2,6,4>
- 2595280230U, // <5,0,2,7>: Cost 3 vext1 <7,5,0,2>, <7,4,5,6>
- 2686312687U, // <5,0,2,u>: Cost 3 vext3 <0,2,u,5>, <0,2,u,5>
- 3760128248U, // <5,0,3,0>: Cost 4 vext3 <0,3,0,5>, <0,3,0,5>
- 3759685888U, // <5,0,3,1>: Cost 4 vext3 <0,2,3,5>, <0,3,1,4>
- 2686533898U, // <5,0,3,2>: Cost 3 vext3 <0,3,2,5>, <0,3,2,5>
- 3760349459U, // <5,0,3,3>: Cost 4 vext3 <0,3,3,5>, <0,3,3,5>
- 2638187004U, // <5,0,3,4>: Cost 3 vext2 <3,4,5,0>, <3,4,5,0>
- 3776348452U, // <5,0,3,5>: Cost 4 vext3 <3,0,4,5>, <0,3,5,4>
- 3713256094U, // <5,0,3,6>: Cost 4 vext2 <3,6,5,0>, <3,6,5,0>
- 3914064896U, // <5,0,3,7>: Cost 4 vuzpr <3,5,7,0>, <1,3,5,7>
- 2686976320U, // <5,0,3,u>: Cost 3 vext3 <0,3,u,5>, <0,3,u,5>
- 2559459430U, // <5,0,4,0>: Cost 3 vext1 <1,5,0,4>, LHS
- 1613381970U, // <5,0,4,1>: Cost 2 vext3 <0,4,1,5>, <0,4,1,5>
- 2687123804U, // <5,0,4,2>: Cost 3 vext3 <0,4,1,5>, <0,4,2,6>
- 3761013092U, // <5,0,4,3>: Cost 4 vext3 <0,4,3,5>, <0,4,3,5>
- 2559462710U, // <5,0,4,4>: Cost 3 vext1 <1,5,0,4>, RHS
- 2638187830U, // <5,0,4,5>: Cost 3 vext2 <3,4,5,0>, RHS
- 3761234303U, // <5,0,4,6>: Cost 4 vext3 <0,4,6,5>, <0,4,6,5>
- 2646150600U, // <5,0,4,7>: Cost 3 vext2 <4,7,5,0>, <4,7,5,0>
- 1613381970U, // <5,0,4,u>: Cost 2 vext3 <0,4,1,5>, <0,4,1,5>
- 3766763926U, // <5,0,5,0>: Cost 4 vext3 <1,4,0,5>, <0,5,0,1>
- 2919268454U, // <5,0,5,1>: Cost 3 vzipl <5,5,5,5>, LHS
- 3053486182U, // <5,0,5,2>: Cost 3 vtrnl <5,5,5,5>, LHS
- 3723210589U, // <5,0,5,3>: Cost 4 vext2 <5,3,5,0>, <5,3,5,0>
- 3766763966U, // <5,0,5,4>: Cost 4 vext3 <1,4,0,5>, <0,5,4,5>
- 2650796031U, // <5,0,5,5>: Cost 3 vext2 <5,5,5,0>, <5,5,5,0>
- 3719893090U, // <5,0,5,6>: Cost 4 vext2 <4,7,5,0>, <5,6,7,0>
- 3914067254U, // <5,0,5,7>: Cost 4 vuzpr <3,5,7,0>, RHS
- 2919269021U, // <5,0,5,u>: Cost 3 vzipl <5,5,5,5>, LHS
- 4047519744U, // <5,0,6,0>: Cost 4 vzipr <3,4,5,6>, <0,0,0,0>
- 2920038502U, // <5,0,6,1>: Cost 3 vzipl <5,6,7,0>, LHS
- 3759759871U, // <5,0,6,2>: Cost 4 vext3 <0,2,4,5>, <0,6,2,7>
- 3645164070U, // <5,0,6,3>: Cost 4 vext1 <3,5,0,6>, <3,5,0,6>
- 3762414095U, // <5,0,6,4>: Cost 4 vext3 <0,6,4,5>, <0,6,4,5>
- 3993780690U, // <5,0,6,5>: Cost 4 vzipl <5,6,7,0>, <0,5,6,7>
- 3719893816U, // <5,0,6,6>: Cost 4 vext2 <4,7,5,0>, <6,6,6,6>
- 2662077302U, // <5,0,6,7>: Cost 3 vext2 <7,4,5,0>, <6,7,4,5>
- 2920039069U, // <5,0,6,u>: Cost 3 vzipl <5,6,7,0>, LHS
- 2565455974U, // <5,0,7,0>: Cost 3 vext1 <2,5,0,7>, LHS
- 2565456790U, // <5,0,7,1>: Cost 3 vext1 <2,5,0,7>, <1,2,3,0>
- 2565457742U, // <5,0,7,2>: Cost 3 vext1 <2,5,0,7>, <2,5,0,7>
- 3639199894U, // <5,0,7,3>: Cost 4 vext1 <2,5,0,7>, <3,0,1,2>
- 2565459254U, // <5,0,7,4>: Cost 3 vext1 <2,5,0,7>, RHS
- 2589347938U, // <5,0,7,5>: Cost 3 vext1 <6,5,0,7>, <5,6,7,0>
- 2589348530U, // <5,0,7,6>: Cost 3 vext1 <6,5,0,7>, <6,5,0,7>
- 4188456422U, // <5,0,7,7>: Cost 4 vtrnr RHS, <2,0,5,7>
- 2565461806U, // <5,0,7,u>: Cost 3 vext1 <2,5,0,7>, LHS
- 2687124106U, // <5,0,u,0>: Cost 3 vext3 <0,4,1,5>, <0,u,0,2>
- 1616036502U, // <5,0,u,1>: Cost 2 vext3 <0,u,1,5>, <0,u,1,5>
- 1613382301U, // <5,0,u,2>: Cost 2 vext3 <0,4,1,5>, LHS
- 2689925800U, // <5,0,u,3>: Cost 3 vext3 <0,u,3,5>, <0,u,3,5>
- 2687124146U, // <5,0,u,4>: Cost 3 vext3 <0,4,1,5>, <0,u,4,6>
- 2638190746U, // <5,0,u,5>: Cost 3 vext2 <3,4,5,0>, RHS
- 2589356723U, // <5,0,u,6>: Cost 3 vext1 <6,5,0,u>, <6,5,0,u>
- 2595280230U, // <5,0,u,7>: Cost 3 vext1 <7,5,0,2>, <7,4,5,6>
- 1613382355U, // <5,0,u,u>: Cost 2 vext3 <0,4,1,5>, LHS
- 2646818816U, // <5,1,0,0>: Cost 3 vext2 <4,u,5,1>, <0,0,0,0>
- 1573077094U, // <5,1,0,1>: Cost 2 vext2 <4,u,5,1>, LHS
- 2646818980U, // <5,1,0,2>: Cost 3 vext2 <4,u,5,1>, <0,2,0,2>
- 2687124214U, // <5,1,0,3>: Cost 3 vext3 <0,4,1,5>, <1,0,3,2>
- 2641510738U, // <5,1,0,4>: Cost 3 vext2 <4,0,5,1>, <0,4,1,5>
- 2641510814U, // <5,1,0,5>: Cost 3 vext2 <4,0,5,1>, <0,5,1,0>
- 3720561142U, // <5,1,0,6>: Cost 4 vext2 <4,u,5,1>, <0,6,1,7>
- 3298141357U, // <5,1,0,7>: Cost 4 vrev <1,5,7,0>
- 1573077661U, // <5,1,0,u>: Cost 2 vext2 <4,u,5,1>, LHS
- 2223891567U, // <5,1,1,0>: Cost 3 vrev <1,5,0,1>
- 2687124276U, // <5,1,1,1>: Cost 3 vext3 <0,4,1,5>, <1,1,1,1>
- 2646819734U, // <5,1,1,2>: Cost 3 vext2 <4,u,5,1>, <1,2,3,0>
- 2687124296U, // <5,1,1,3>: Cost 3 vext3 <0,4,1,5>, <1,1,3,3>
- 2691326803U, // <5,1,1,4>: Cost 3 vext3 <1,1,4,5>, <1,1,4,5>
- 2691400540U, // <5,1,1,5>: Cost 3 vext3 <1,1,5,5>, <1,1,5,5>
- 3765216101U, // <5,1,1,6>: Cost 4 vext3 <1,1,6,5>, <1,1,6,5>
- 3765289838U, // <5,1,1,7>: Cost 4 vext3 <1,1,7,5>, <1,1,7,5>
- 2687124341U, // <5,1,1,u>: Cost 3 vext3 <0,4,1,5>, <1,1,u,3>
- 3297641584U, // <5,1,2,0>: Cost 4 vrev <1,5,0,2>
- 3763520391U, // <5,1,2,1>: Cost 4 vext3 <0,u,1,5>, <1,2,1,3>
- 2646820456U, // <5,1,2,2>: Cost 3 vext2 <4,u,5,1>, <2,2,2,2>
- 2687124374U, // <5,1,2,3>: Cost 3 vext3 <0,4,1,5>, <1,2,3,0>
- 2691990436U, // <5,1,2,4>: Cost 3 vext3 <1,2,4,5>, <1,2,4,5>
- 2687124395U, // <5,1,2,5>: Cost 3 vext3 <0,4,1,5>, <1,2,5,3>
- 2646820794U, // <5,1,2,6>: Cost 3 vext2 <4,u,5,1>, <2,6,3,7>
- 3808199610U, // <5,1,2,7>: Cost 4 vext3 <u,3,4,5>, <1,2,7,0>
- 2687124419U, // <5,1,2,u>: Cost 3 vext3 <0,4,1,5>, <1,2,u,0>
- 2577440870U, // <5,1,3,0>: Cost 3 vext1 <4,5,1,3>, LHS
- 2687124440U, // <5,1,3,1>: Cost 3 vext3 <0,4,1,5>, <1,3,1,3>
- 3759686627U, // <5,1,3,2>: Cost 4 vext3 <0,2,3,5>, <1,3,2,5>
- 2692580332U, // <5,1,3,3>: Cost 3 vext3 <1,3,3,5>, <1,3,3,5>
- 2687124469U, // <5,1,3,4>: Cost 3 vext3 <0,4,1,5>, <1,3,4,5>
- 2685207552U, // <5,1,3,5>: Cost 3 vext3 <0,1,2,5>, <1,3,5,7>
- 3760866313U, // <5,1,3,6>: Cost 4 vext3 <0,4,1,5>, <1,3,6,7>
- 2692875280U, // <5,1,3,7>: Cost 3 vext3 <1,3,7,5>, <1,3,7,5>
- 2687124503U, // <5,1,3,u>: Cost 3 vext3 <0,4,1,5>, <1,3,u,3>
- 1567771538U, // <5,1,4,0>: Cost 2 vext2 <4,0,5,1>, <4,0,5,1>
- 2693096491U, // <5,1,4,1>: Cost 3 vext3 <1,4,1,5>, <1,4,1,5>
- 2693170228U, // <5,1,4,2>: Cost 3 vext3 <1,4,2,5>, <1,4,2,5>
- 2687124541U, // <5,1,4,3>: Cost 3 vext3 <0,4,1,5>, <1,4,3,5>
- 2646822096U, // <5,1,4,4>: Cost 3 vext2 <4,u,5,1>, <4,4,4,4>
- 1573080374U, // <5,1,4,5>: Cost 2 vext2 <4,u,5,1>, RHS
- 2646822260U, // <5,1,4,6>: Cost 3 vext2 <4,u,5,1>, <4,6,4,6>
- 3298174129U, // <5,1,4,7>: Cost 4 vrev <1,5,7,4>
- 1573080602U, // <5,1,4,u>: Cost 2 vext2 <4,u,5,1>, <4,u,5,1>
- 2687124591U, // <5,1,5,0>: Cost 3 vext3 <0,4,1,5>, <1,5,0,1>
- 2646822543U, // <5,1,5,1>: Cost 3 vext2 <4,u,5,1>, <5,1,0,1>
- 3760866433U, // <5,1,5,2>: Cost 4 vext3 <0,4,1,5>, <1,5,2,1>
- 2687124624U, // <5,1,5,3>: Cost 3 vext3 <0,4,1,5>, <1,5,3,7>
- 2687124631U, // <5,1,5,4>: Cost 3 vext3 <0,4,1,5>, <1,5,4,5>
- 2646822916U, // <5,1,5,5>: Cost 3 vext2 <4,u,5,1>, <5,5,5,5>
- 2646823010U, // <5,1,5,6>: Cost 3 vext2 <4,u,5,1>, <5,6,7,0>
- 2646823080U, // <5,1,5,7>: Cost 3 vext2 <4,u,5,1>, <5,7,5,7>
- 2687124663U, // <5,1,5,u>: Cost 3 vext3 <0,4,1,5>, <1,5,u,1>
- 2553577574U, // <5,1,6,0>: Cost 3 vext1 <0,5,1,6>, LHS
- 3763520719U, // <5,1,6,1>: Cost 4 vext3 <0,u,1,5>, <1,6,1,7>
- 2646823418U, // <5,1,6,2>: Cost 3 vext2 <4,u,5,1>, <6,2,7,3>
- 3760866529U, // <5,1,6,3>: Cost 4 vext3 <0,4,1,5>, <1,6,3,7>
- 2553580854U, // <5,1,6,4>: Cost 3 vext1 <0,5,1,6>, RHS
- 2687124723U, // <5,1,6,5>: Cost 3 vext3 <0,4,1,5>, <1,6,5,7>
- 2646823736U, // <5,1,6,6>: Cost 3 vext2 <4,u,5,1>, <6,6,6,6>
- 2646823758U, // <5,1,6,7>: Cost 3 vext2 <4,u,5,1>, <6,7,0,1>
- 2646823839U, // <5,1,6,u>: Cost 3 vext2 <4,u,5,1>, <6,u,0,1>
- 2559557734U, // <5,1,7,0>: Cost 3 vext1 <1,5,1,7>, LHS
- 2559558452U, // <5,1,7,1>: Cost 3 vext1 <1,5,1,7>, <1,1,1,1>
- 2571503270U, // <5,1,7,2>: Cost 3 vext1 <3,5,1,7>, <2,3,0,1>
- 2040971366U, // <5,1,7,3>: Cost 2 vtrnr RHS, LHS
- 2559561014U, // <5,1,7,4>: Cost 3 vext1 <1,5,1,7>, RHS
- 2595393232U, // <5,1,7,5>: Cost 3 vext1 <7,5,1,7>, <5,1,7,3>
- 4188455035U, // <5,1,7,6>: Cost 4 vtrnr RHS, <0,1,4,6>
- 2646824556U, // <5,1,7,7>: Cost 3 vext2 <4,u,5,1>, <7,7,7,7>
- 2040971371U, // <5,1,7,u>: Cost 2 vtrnr RHS, LHS
- 1591662326U, // <5,1,u,0>: Cost 2 vext2 <u,0,5,1>, <u,0,5,1>
- 1573082926U, // <5,1,u,1>: Cost 2 vext2 <4,u,5,1>, LHS
- 2695824760U, // <5,1,u,2>: Cost 3 vext3 <1,u,2,5>, <1,u,2,5>
- 2040979558U, // <5,1,u,3>: Cost 2 vtrnr RHS, LHS
- 2687124874U, // <5,1,u,4>: Cost 3 vext3 <0,4,1,5>, <1,u,4,5>
- 1573083290U, // <5,1,u,5>: Cost 2 vext2 <4,u,5,1>, RHS
- 2646825168U, // <5,1,u,6>: Cost 3 vext2 <4,u,5,1>, <u,6,3,7>
- 2646825216U, // <5,1,u,7>: Cost 3 vext2 <4,u,5,1>, <u,7,0,1>
- 2040979563U, // <5,1,u,u>: Cost 2 vtrnr RHS, LHS
- 3702652928U, // <5,2,0,0>: Cost 4 vext2 <1,u,5,2>, <0,0,0,0>
- 2628911206U, // <5,2,0,1>: Cost 3 vext2 <1,u,5,2>, LHS
- 2641518756U, // <5,2,0,2>: Cost 3 vext2 <4,0,5,2>, <0,2,0,2>
- 3759760847U, // <5,2,0,3>: Cost 4 vext3 <0,2,4,5>, <2,0,3,2>
- 3760866775U, // <5,2,0,4>: Cost 4 vext3 <0,4,1,5>, <2,0,4,1>
- 3759539680U, // <5,2,0,5>: Cost 4 vext3 <0,2,1,5>, <2,0,5,1>
- 3760866796U, // <5,2,0,6>: Cost 4 vext3 <0,4,1,5>, <2,0,6,4>
- 3304114054U, // <5,2,0,7>: Cost 4 vrev <2,5,7,0>
- 2628911773U, // <5,2,0,u>: Cost 3 vext2 <1,u,5,2>, LHS
- 2623603464U, // <5,2,1,0>: Cost 3 vext2 <1,0,5,2>, <1,0,5,2>
- 3698008921U, // <5,2,1,1>: Cost 4 vext2 <1,1,5,2>, <1,1,5,2>
- 3633325603U, // <5,2,1,2>: Cost 4 vext1 <1,5,2,1>, <2,1,3,5>
- 2687125027U, // <5,2,1,3>: Cost 3 vext3 <0,4,1,5>, <2,1,3,5>
- 3633327414U, // <5,2,1,4>: Cost 4 vext1 <1,5,2,1>, RHS
- 3759539760U, // <5,2,1,5>: Cost 4 vext3 <0,2,1,5>, <2,1,5,0>
- 3760866876U, // <5,2,1,6>: Cost 4 vext3 <0,4,1,5>, <2,1,6,3>
- 3304122247U, // <5,2,1,7>: Cost 4 vrev <2,5,7,1>
- 2687125072U, // <5,2,1,u>: Cost 3 vext3 <0,4,1,5>, <2,1,u,5>
- 3633332326U, // <5,2,2,0>: Cost 4 vext1 <1,5,2,2>, LHS
- 3759760992U, // <5,2,2,1>: Cost 4 vext3 <0,2,4,5>, <2,2,1,3>
- 2687125096U, // <5,2,2,2>: Cost 3 vext3 <0,4,1,5>, <2,2,2,2>
- 2687125106U, // <5,2,2,3>: Cost 3 vext3 <0,4,1,5>, <2,2,3,3>
- 2697963133U, // <5,2,2,4>: Cost 3 vext3 <2,2,4,5>, <2,2,4,5>
- 3759466120U, // <5,2,2,5>: Cost 4 vext3 <0,2,0,5>, <2,2,5,7>
- 3760866960U, // <5,2,2,6>: Cost 4 vext3 <0,4,1,5>, <2,2,6,6>
- 3771926168U, // <5,2,2,7>: Cost 4 vext3 <2,2,7,5>, <2,2,7,5>
- 2687125151U, // <5,2,2,u>: Cost 3 vext3 <0,4,1,5>, <2,2,u,3>
- 2687125158U, // <5,2,3,0>: Cost 3 vext3 <0,4,1,5>, <2,3,0,1>
- 2698405555U, // <5,2,3,1>: Cost 3 vext3 <2,3,1,5>, <2,3,1,5>
- 2577516238U, // <5,2,3,2>: Cost 3 vext1 <4,5,2,3>, <2,3,4,5>
- 3759687365U, // <5,2,3,3>: Cost 4 vext3 <0,2,3,5>, <2,3,3,5>
- 1624884942U, // <5,2,3,4>: Cost 2 vext3 <2,3,4,5>, <2,3,4,5>
- 2698700503U, // <5,2,3,5>: Cost 3 vext3 <2,3,5,5>, <2,3,5,5>
- 3772368608U, // <5,2,3,6>: Cost 4 vext3 <2,3,4,5>, <2,3,6,5>
- 3702655716U, // <5,2,3,7>: Cost 4 vext2 <1,u,5,2>, <3,7,3,7>
- 1625179890U, // <5,2,3,u>: Cost 2 vext3 <2,3,u,5>, <2,3,u,5>
- 2641521555U, // <5,2,4,0>: Cost 3 vext2 <4,0,5,2>, <4,0,5,2>
- 3772368642U, // <5,2,4,1>: Cost 4 vext3 <2,3,4,5>, <2,4,1,3>
- 2699142925U, // <5,2,4,2>: Cost 3 vext3 <2,4,2,5>, <2,4,2,5>
- 2698626838U, // <5,2,4,3>: Cost 3 vext3 <2,3,4,5>, <2,4,3,5>
- 2698626848U, // <5,2,4,4>: Cost 3 vext3 <2,3,4,5>, <2,4,4,6>
- 2628914486U, // <5,2,4,5>: Cost 3 vext2 <1,u,5,2>, RHS
- 2645503353U, // <5,2,4,6>: Cost 3 vext2 <4,6,5,2>, <4,6,5,2>
- 3304146826U, // <5,2,4,7>: Cost 4 vrev <2,5,7,4>
- 2628914729U, // <5,2,4,u>: Cost 3 vext2 <1,u,5,2>, RHS
- 2553643110U, // <5,2,5,0>: Cost 3 vext1 <0,5,2,5>, LHS
- 3758950227U, // <5,2,5,1>: Cost 4 vext3 <0,1,2,5>, <2,5,1,3>
- 3759761248U, // <5,2,5,2>: Cost 4 vext3 <0,2,4,5>, <2,5,2,7>
- 2982396006U, // <5,2,5,3>: Cost 3 vzipr <4,u,5,5>, LHS
- 2553646390U, // <5,2,5,4>: Cost 3 vext1 <0,5,2,5>, RHS
- 2553647108U, // <5,2,5,5>: Cost 3 vext1 <0,5,2,5>, <5,5,5,5>
- 3760867204U, // <5,2,5,6>: Cost 4 vext3 <0,4,1,5>, <2,5,6,7>
- 3702657141U, // <5,2,5,7>: Cost 4 vext2 <1,u,5,2>, <5,7,0,1>
- 2982396011U, // <5,2,5,u>: Cost 3 vzipr <4,u,5,5>, LHS
- 3627393126U, // <5,2,6,0>: Cost 4 vext1 <0,5,2,6>, LHS
- 3760867236U, // <5,2,6,1>: Cost 4 vext3 <0,4,1,5>, <2,6,1,3>
- 2645504506U, // <5,2,6,2>: Cost 3 vext2 <4,6,5,2>, <6,2,7,3>
- 2687125434U, // <5,2,6,3>: Cost 3 vext3 <0,4,1,5>, <2,6,3,7>
- 2700617665U, // <5,2,6,4>: Cost 3 vext3 <2,6,4,5>, <2,6,4,5>
- 3760867276U, // <5,2,6,5>: Cost 4 vext3 <0,4,1,5>, <2,6,5,7>
- 3763521493U, // <5,2,6,6>: Cost 4 vext3 <0,u,1,5>, <2,6,6,7>
- 3719246670U, // <5,2,6,7>: Cost 4 vext2 <4,6,5,2>, <6,7,0,1>
- 2687125479U, // <5,2,6,u>: Cost 3 vext3 <0,4,1,5>, <2,6,u,7>
- 2565603430U, // <5,2,7,0>: Cost 3 vext1 <2,5,2,7>, LHS
- 2553660150U, // <5,2,7,1>: Cost 3 vext1 <0,5,2,7>, <1,0,3,2>
- 2565605216U, // <5,2,7,2>: Cost 3 vext1 <2,5,2,7>, <2,5,2,7>
- 2961178726U, // <5,2,7,3>: Cost 3 vzipr <1,3,5,7>, LHS
- 2565606710U, // <5,2,7,4>: Cost 3 vext1 <2,5,2,7>, RHS
- 4034920552U, // <5,2,7,5>: Cost 4 vzipr <1,3,5,7>, <0,1,2,5>
- 3114713292U, // <5,2,7,6>: Cost 3 vtrnr RHS, <0,2,4,6>
- 3702658668U, // <5,2,7,7>: Cost 4 vext2 <1,u,5,2>, <7,7,7,7>
- 2961178731U, // <5,2,7,u>: Cost 3 vzipr <1,3,5,7>, LHS
- 2687125563U, // <5,2,u,0>: Cost 3 vext3 <0,4,1,5>, <2,u,0,1>
- 2628917038U, // <5,2,u,1>: Cost 3 vext2 <1,u,5,2>, LHS
- 2565613409U, // <5,2,u,2>: Cost 3 vext1 <2,5,2,u>, <2,5,2,u>
- 2687125592U, // <5,2,u,3>: Cost 3 vext3 <0,4,1,5>, <2,u,3,3>
- 1628203107U, // <5,2,u,4>: Cost 2 vext3 <2,u,4,5>, <2,u,4,5>
- 2628917402U, // <5,2,u,5>: Cost 3 vext2 <1,u,5,2>, RHS
- 2702092405U, // <5,2,u,6>: Cost 3 vext3 <2,u,6,5>, <2,u,6,5>
- 3304179598U, // <5,2,u,7>: Cost 4 vrev <2,5,7,u>
- 1628498055U, // <5,2,u,u>: Cost 2 vext3 <2,u,u,5>, <2,u,u,5>
- 3760867467U, // <5,3,0,0>: Cost 4 vext3 <0,4,1,5>, <3,0,0,0>
- 2687125654U, // <5,3,0,1>: Cost 3 vext3 <0,4,1,5>, <3,0,1,2>
- 3759761565U, // <5,3,0,2>: Cost 4 vext3 <0,2,4,5>, <3,0,2,0>
- 3633391766U, // <5,3,0,3>: Cost 4 vext1 <1,5,3,0>, <3,0,1,2>
- 2687125680U, // <5,3,0,4>: Cost 3 vext3 <0,4,1,5>, <3,0,4,1>
- 3760277690U, // <5,3,0,5>: Cost 4 vext3 <0,3,2,5>, <3,0,5,2>
- 3310013014U, // <5,3,0,6>: Cost 4 vrev <3,5,6,0>
- 2236344927U, // <5,3,0,7>: Cost 3 vrev <3,5,7,0>
- 2687125717U, // <5,3,0,u>: Cost 3 vext3 <0,4,1,5>, <3,0,u,2>
- 3760867551U, // <5,3,1,0>: Cost 4 vext3 <0,4,1,5>, <3,1,0,3>
- 3760867558U, // <5,3,1,1>: Cost 4 vext3 <0,4,1,5>, <3,1,1,1>
- 2624938923U, // <5,3,1,2>: Cost 3 vext2 <1,2,5,3>, <1,2,5,3>
- 2703198460U, // <5,3,1,3>: Cost 3 vext3 <3,1,3,5>, <3,1,3,5>
- 3760867587U, // <5,3,1,4>: Cost 4 vext3 <0,4,1,5>, <3,1,4,3>
- 2636219536U, // <5,3,1,5>: Cost 3 vext2 <3,1,5,3>, <1,5,3,7>
- 3698681075U, // <5,3,1,6>: Cost 4 vext2 <1,2,5,3>, <1,6,5,7>
- 2703493408U, // <5,3,1,7>: Cost 3 vext3 <3,1,7,5>, <3,1,7,5>
- 2628920721U, // <5,3,1,u>: Cost 3 vext2 <1,u,5,3>, <1,u,5,3>
- 3766765870U, // <5,3,2,0>: Cost 4 vext3 <1,4,0,5>, <3,2,0,1>
- 3698681379U, // <5,3,2,1>: Cost 4 vext2 <1,2,5,3>, <2,1,3,5>
- 3760867649U, // <5,3,2,2>: Cost 4 vext3 <0,4,1,5>, <3,2,2,2>
- 2698627404U, // <5,3,2,3>: Cost 3 vext3 <2,3,4,5>, <3,2,3,4>
- 2703935830U, // <5,3,2,4>: Cost 3 vext3 <3,2,4,5>, <3,2,4,5>
- 2698627422U, // <5,3,2,5>: Cost 3 vext3 <2,3,4,5>, <3,2,5,4>
- 3760867686U, // <5,3,2,6>: Cost 4 vext3 <0,4,1,5>, <3,2,6,3>
- 3769788783U, // <5,3,2,7>: Cost 4 vext3 <1,u,5,5>, <3,2,7,3>
- 2701945209U, // <5,3,2,u>: Cost 3 vext3 <2,u,4,5>, <3,2,u,4>
- 3760867711U, // <5,3,3,0>: Cost 4 vext3 <0,4,1,5>, <3,3,0,1>
- 2636220684U, // <5,3,3,1>: Cost 3 vext2 <3,1,5,3>, <3,1,5,3>
- 3772369298U, // <5,3,3,2>: Cost 4 vext3 <2,3,4,5>, <3,3,2,2>
- 2687125916U, // <5,3,3,3>: Cost 3 vext3 <0,4,1,5>, <3,3,3,3>
- 2704599463U, // <5,3,3,4>: Cost 3 vext3 <3,3,4,5>, <3,3,4,5>
- 2704673200U, // <5,3,3,5>: Cost 3 vext3 <3,3,5,5>, <3,3,5,5>
- 3709962935U, // <5,3,3,6>: Cost 4 vext2 <3,1,5,3>, <3,6,7,7>
- 3772369346U, // <5,3,3,7>: Cost 4 vext3 <2,3,4,5>, <3,3,7,5>
- 2704894411U, // <5,3,3,u>: Cost 3 vext3 <3,3,u,5>, <3,3,u,5>
- 2704968148U, // <5,3,4,0>: Cost 3 vext3 <3,4,0,5>, <3,4,0,5>
- 3698682850U, // <5,3,4,1>: Cost 4 vext2 <1,2,5,3>, <4,1,5,0>
- 2642857014U, // <5,3,4,2>: Cost 3 vext2 <4,2,5,3>, <4,2,5,3>
- 2705189359U, // <5,3,4,3>: Cost 3 vext3 <3,4,3,5>, <3,4,3,5>
- 2705263096U, // <5,3,4,4>: Cost 3 vext3 <3,4,4,5>, <3,4,4,5>
- 2685946370U, // <5,3,4,5>: Cost 3 vext3 <0,2,3,5>, <3,4,5,6>
- 3779152394U, // <5,3,4,6>: Cost 4 vext3 <3,4,6,5>, <3,4,6,5>
- 2236377699U, // <5,3,4,7>: Cost 3 vrev <3,5,7,4>
- 2687126045U, // <5,3,4,u>: Cost 3 vext3 <0,4,1,5>, <3,4,u,6>
- 2571632742U, // <5,3,5,0>: Cost 3 vext1 <3,5,3,5>, LHS
- 2559689870U, // <5,3,5,1>: Cost 3 vext1 <1,5,3,5>, <1,5,3,5>
- 2571634382U, // <5,3,5,2>: Cost 3 vext1 <3,5,3,5>, <2,3,4,5>
- 2571635264U, // <5,3,5,3>: Cost 3 vext1 <3,5,3,5>, <3,5,3,5>
- 2571636022U, // <5,3,5,4>: Cost 3 vext1 <3,5,3,5>, RHS
- 2559692804U, // <5,3,5,5>: Cost 3 vext1 <1,5,3,5>, <5,5,5,5>
- 3720581218U, // <5,3,5,6>: Cost 4 vext2 <4,u,5,3>, <5,6,7,0>
- 2236385892U, // <5,3,5,7>: Cost 3 vrev <3,5,7,5>
- 2571638574U, // <5,3,5,u>: Cost 3 vext1 <3,5,3,5>, LHS
- 2565668966U, // <5,3,6,0>: Cost 3 vext1 <2,5,3,6>, LHS
- 3633439887U, // <5,3,6,1>: Cost 4 vext1 <1,5,3,6>, <1,5,3,6>
- 2565670760U, // <5,3,6,2>: Cost 3 vext1 <2,5,3,6>, <2,5,3,6>
- 2565671426U, // <5,3,6,3>: Cost 3 vext1 <2,5,3,6>, <3,4,5,6>
- 2565672246U, // <5,3,6,4>: Cost 3 vext1 <2,5,3,6>, RHS
- 3639414630U, // <5,3,6,5>: Cost 4 vext1 <2,5,3,6>, <5,3,6,0>
- 4047521640U, // <5,3,6,6>: Cost 4 vzipr <3,4,5,6>, <2,5,3,6>
- 2725169844U, // <5,3,6,7>: Cost 3 vext3 <6,7,4,5>, <3,6,7,4>
- 2565674798U, // <5,3,6,u>: Cost 3 vext1 <2,5,3,6>, LHS
- 1485963366U, // <5,3,7,0>: Cost 2 vext1 <1,5,3,7>, LHS
- 1485964432U, // <5,3,7,1>: Cost 2 vext1 <1,5,3,7>, <1,5,3,7>
- 2559706728U, // <5,3,7,2>: Cost 3 vext1 <1,5,3,7>, <2,2,2,2>
- 2559707286U, // <5,3,7,3>: Cost 3 vext1 <1,5,3,7>, <3,0,1,2>
- 1485966646U, // <5,3,7,4>: Cost 2 vext1 <1,5,3,7>, RHS
- 2559708880U, // <5,3,7,5>: Cost 3 vext1 <1,5,3,7>, <5,1,7,3>
- 2601513466U, // <5,3,7,6>: Cost 3 vext1 <u,5,3,7>, <6,2,7,3>
- 3114714112U, // <5,3,7,7>: Cost 3 vtrnr RHS, <1,3,5,7>
- 1485969198U, // <5,3,7,u>: Cost 2 vext1 <1,5,3,7>, LHS
- 1485971558U, // <5,3,u,0>: Cost 2 vext1 <1,5,3,u>, LHS
- 1485972625U, // <5,3,u,1>: Cost 2 vext1 <1,5,3,u>, <1,5,3,u>
- 2559714920U, // <5,3,u,2>: Cost 3 vext1 <1,5,3,u>, <2,2,2,2>
- 2559715478U, // <5,3,u,3>: Cost 3 vext1 <1,5,3,u>, <3,0,1,2>
- 1485974838U, // <5,3,u,4>: Cost 2 vext1 <1,5,3,u>, RHS
- 2687126342U, // <5,3,u,5>: Cost 3 vext3 <0,4,1,5>, <3,u,5,6>
- 2601521658U, // <5,3,u,6>: Cost 3 vext1 <u,5,3,u>, <6,2,7,3>
- 2236410471U, // <5,3,u,7>: Cost 3 vrev <3,5,7,u>
- 1485977390U, // <5,3,u,u>: Cost 2 vext1 <1,5,3,u>, LHS
- 3627491430U, // <5,4,0,0>: Cost 4 vext1 <0,5,4,0>, LHS
- 2636890214U, // <5,4,0,1>: Cost 3 vext2 <3,2,5,4>, LHS
- 3703333028U, // <5,4,0,2>: Cost 4 vext2 <2,0,5,4>, <0,2,0,2>
- 3782249348U, // <5,4,0,3>: Cost 4 vext3 <4,0,3,5>, <4,0,3,5>
- 2642198866U, // <5,4,0,4>: Cost 3 vext2 <4,1,5,4>, <0,4,1,5>
- 2687126418U, // <5,4,0,5>: Cost 3 vext3 <0,4,1,5>, <4,0,5,1>
- 2242243887U, // <5,4,0,6>: Cost 3 vrev <4,5,6,0>
- 3316059448U, // <5,4,0,7>: Cost 4 vrev <4,5,7,0>
- 2636890781U, // <5,4,0,u>: Cost 3 vext2 <3,2,5,4>, LHS
- 2241809658U, // <5,4,1,0>: Cost 3 vrev <4,5,0,1>
- 3698025307U, // <5,4,1,1>: Cost 4 vext2 <1,1,5,4>, <1,1,5,4>
- 3698688940U, // <5,4,1,2>: Cost 4 vext2 <1,2,5,4>, <1,2,5,4>
- 3698689024U, // <5,4,1,3>: Cost 4 vext2 <1,2,5,4>, <1,3,5,7>
- 3700016206U, // <5,4,1,4>: Cost 4 vext2 <1,4,5,4>, <1,4,5,4>
- 2687126498U, // <5,4,1,5>: Cost 3 vext3 <0,4,1,5>, <4,1,5,0>
- 3760868336U, // <5,4,1,6>: Cost 4 vext3 <0,4,1,5>, <4,1,6,5>
- 3316067641U, // <5,4,1,7>: Cost 4 vrev <4,5,7,1>
- 2242399554U, // <5,4,1,u>: Cost 3 vrev <4,5,u,1>
- 3703334371U, // <5,4,2,0>: Cost 4 vext2 <2,0,5,4>, <2,0,5,4>
- 3703998004U, // <5,4,2,1>: Cost 4 vext2 <2,1,5,4>, <2,1,5,4>
- 3704661637U, // <5,4,2,2>: Cost 4 vext2 <2,2,5,4>, <2,2,5,4>
- 2636891854U, // <5,4,2,3>: Cost 3 vext2 <3,2,5,4>, <2,3,4,5>
- 3705988903U, // <5,4,2,4>: Cost 4 vext2 <2,4,5,4>, <2,4,5,4>
- 2698628150U, // <5,4,2,5>: Cost 3 vext3 <2,3,4,5>, <4,2,5,3>
- 3760868415U, // <5,4,2,6>: Cost 4 vext3 <0,4,1,5>, <4,2,6,3>
- 3783871562U, // <5,4,2,7>: Cost 4 vext3 <4,2,7,5>, <4,2,7,5>
- 2666752099U, // <5,4,2,u>: Cost 3 vext2 <u,2,5,4>, <2,u,4,5>
- 3639459942U, // <5,4,3,0>: Cost 4 vext1 <2,5,4,3>, LHS
- 3709970701U, // <5,4,3,1>: Cost 4 vext2 <3,1,5,4>, <3,1,5,4>
- 2636892510U, // <5,4,3,2>: Cost 3 vext2 <3,2,5,4>, <3,2,5,4>
- 3710634396U, // <5,4,3,3>: Cost 4 vext2 <3,2,5,4>, <3,3,3,3>
- 2638219776U, // <5,4,3,4>: Cost 3 vext2 <3,4,5,4>, <3,4,5,4>
- 3766987908U, // <5,4,3,5>: Cost 4 vext3 <1,4,3,5>, <4,3,5,0>
- 2710719634U, // <5,4,3,6>: Cost 3 vext3 <4,3,6,5>, <4,3,6,5>
- 3914097664U, // <5,4,3,7>: Cost 4 vuzpr <3,5,7,4>, <1,3,5,7>
- 2640874308U, // <5,4,3,u>: Cost 3 vext2 <3,u,5,4>, <3,u,5,4>
- 2583642214U, // <5,4,4,0>: Cost 3 vext1 <5,5,4,4>, LHS
- 2642201574U, // <5,4,4,1>: Cost 3 vext2 <4,1,5,4>, <4,1,5,4>
- 3710635062U, // <5,4,4,2>: Cost 4 vext2 <3,2,5,4>, <4,2,5,3>
- 3717270664U, // <5,4,4,3>: Cost 4 vext2 <4,3,5,4>, <4,3,5,4>
- 2713963728U, // <5,4,4,4>: Cost 3 vext3 <4,u,5,5>, <4,4,4,4>
- 1637567706U, // <5,4,4,5>: Cost 2 vext3 <4,4,5,5>, <4,4,5,5>
- 2242276659U, // <5,4,4,6>: Cost 3 vrev <4,5,6,4>
- 2646183372U, // <5,4,4,7>: Cost 3 vext2 <4,7,5,4>, <4,7,5,4>
- 1637788917U, // <5,4,4,u>: Cost 2 vext3 <4,4,u,5>, <4,4,u,5>
- 2559762534U, // <5,4,5,0>: Cost 3 vext1 <1,5,4,5>, LHS
- 2559763607U, // <5,4,5,1>: Cost 3 vext1 <1,5,4,5>, <1,5,4,5>
- 2698628366U, // <5,4,5,2>: Cost 3 vext3 <2,3,4,5>, <4,5,2,3>
- 3633506454U, // <5,4,5,3>: Cost 4 vext1 <1,5,4,5>, <3,0,1,2>
- 2559765814U, // <5,4,5,4>: Cost 3 vext1 <1,5,4,5>, RHS
- 2583654395U, // <5,4,5,5>: Cost 3 vext1 <5,5,4,5>, <5,5,4,5>
- 1613385014U, // <5,4,5,6>: Cost 2 vext3 <0,4,1,5>, RHS
- 3901639990U, // <5,4,5,7>: Cost 4 vuzpr <1,5,0,4>, RHS
- 1613385032U, // <5,4,5,u>: Cost 2 vext3 <0,4,1,5>, RHS
- 2559770726U, // <5,4,6,0>: Cost 3 vext1 <1,5,4,6>, LHS
- 2559771648U, // <5,4,6,1>: Cost 3 vext1 <1,5,4,6>, <1,3,5,7>
- 3633514088U, // <5,4,6,2>: Cost 4 vext1 <1,5,4,6>, <2,2,2,2>
- 2571717122U, // <5,4,6,3>: Cost 3 vext1 <3,5,4,6>, <3,4,5,6>
- 2559774006U, // <5,4,6,4>: Cost 3 vext1 <1,5,4,6>, RHS
- 2712636796U, // <5,4,6,5>: Cost 3 vext3 <4,6,5,5>, <4,6,5,5>
- 3760868743U, // <5,4,6,6>: Cost 4 vext3 <0,4,1,5>, <4,6,6,7>
- 2712784270U, // <5,4,6,7>: Cost 3 vext3 <4,6,7,5>, <4,6,7,5>
- 2559776558U, // <5,4,6,u>: Cost 3 vext1 <1,5,4,6>, LHS
- 2565750886U, // <5,4,7,0>: Cost 3 vext1 <2,5,4,7>, LHS
- 2565751706U, // <5,4,7,1>: Cost 3 vext1 <2,5,4,7>, <1,2,3,4>
- 2565752690U, // <5,4,7,2>: Cost 3 vext1 <2,5,4,7>, <2,5,4,7>
- 2571725387U, // <5,4,7,3>: Cost 3 vext1 <3,5,4,7>, <3,5,4,7>
- 2565754166U, // <5,4,7,4>: Cost 3 vext1 <2,5,4,7>, RHS
- 3114713426U, // <5,4,7,5>: Cost 3 vtrnr RHS, <0,4,1,5>
- 94817590U, // <5,4,7,6>: Cost 1 vrev RHS
- 2595616175U, // <5,4,7,7>: Cost 3 vext1 <7,5,4,7>, <7,5,4,7>
- 94965064U, // <5,4,7,u>: Cost 1 vrev RHS
- 2559787110U, // <5,4,u,0>: Cost 3 vext1 <1,5,4,u>, LHS
- 2559788186U, // <5,4,u,1>: Cost 3 vext1 <1,5,4,u>, <1,5,4,u>
- 2242014483U, // <5,4,u,2>: Cost 3 vrev <4,5,2,u>
- 2667419628U, // <5,4,u,3>: Cost 3 vext2 <u,3,5,4>, <u,3,5,4>
- 2559790390U, // <5,4,u,4>: Cost 3 vext1 <1,5,4,u>, RHS
- 1640222238U, // <5,4,u,5>: Cost 2 vext3 <4,u,5,5>, <4,u,5,5>
- 94825783U, // <5,4,u,6>: Cost 1 vrev RHS
- 2714111536U, // <5,4,u,7>: Cost 3 vext3 <4,u,7,5>, <4,u,7,5>
- 94973257U, // <5,4,u,u>: Cost 1 vrev RHS
- 2646851584U, // <5,5,0,0>: Cost 3 vext2 <4,u,5,5>, <0,0,0,0>
- 1573109862U, // <5,5,0,1>: Cost 2 vext2 <4,u,5,5>, LHS
- 2646851748U, // <5,5,0,2>: Cost 3 vext2 <4,u,5,5>, <0,2,0,2>
- 3760279130U, // <5,5,0,3>: Cost 4 vext3 <0,3,2,5>, <5,0,3,2>
- 2687127138U, // <5,5,0,4>: Cost 3 vext3 <0,4,1,5>, <5,0,4,1>
- 2248142847U, // <5,5,0,5>: Cost 3 vrev <5,5,5,0>
- 3720593910U, // <5,5,0,6>: Cost 4 vext2 <4,u,5,5>, <0,6,1,7>
- 4182502710U, // <5,5,0,7>: Cost 4 vtrnr <3,5,7,0>, RHS
- 1573110429U, // <5,5,0,u>: Cost 2 vext2 <4,u,5,5>, LHS
- 2646852342U, // <5,5,1,0>: Cost 3 vext2 <4,u,5,5>, <1,0,3,2>
- 2624291676U, // <5,5,1,1>: Cost 3 vext2 <1,1,5,5>, <1,1,5,5>
- 2646852502U, // <5,5,1,2>: Cost 3 vext2 <4,u,5,5>, <1,2,3,0>
- 2646852568U, // <5,5,1,3>: Cost 3 vext2 <4,u,5,5>, <1,3,1,3>
- 2715217591U, // <5,5,1,4>: Cost 3 vext3 <5,1,4,5>, <5,1,4,5>
- 2628936848U, // <5,5,1,5>: Cost 3 vext2 <1,u,5,5>, <1,5,3,7>
- 3698033907U, // <5,5,1,6>: Cost 4 vext2 <1,1,5,5>, <1,6,5,7>
- 2713964240U, // <5,5,1,7>: Cost 3 vext3 <4,u,5,5>, <5,1,7,3>
- 2628937107U, // <5,5,1,u>: Cost 3 vext2 <1,u,5,5>, <1,u,5,5>
- 3645497446U, // <5,5,2,0>: Cost 4 vext1 <3,5,5,2>, LHS
- 3760869099U, // <5,5,2,1>: Cost 4 vext3 <0,4,1,5>, <5,2,1,3>
- 2646853224U, // <5,5,2,2>: Cost 3 vext2 <4,u,5,5>, <2,2,2,2>
- 2698628862U, // <5,5,2,3>: Cost 3 vext3 <2,3,4,5>, <5,2,3,4>
- 3772370694U, // <5,5,2,4>: Cost 4 vext3 <2,3,4,5>, <5,2,4,3>
- 2713964303U, // <5,5,2,5>: Cost 3 vext3 <4,u,5,5>, <5,2,5,3>
- 2646853562U, // <5,5,2,6>: Cost 3 vext2 <4,u,5,5>, <2,6,3,7>
- 4038198272U, // <5,5,2,7>: Cost 4 vzipr <1,u,5,2>, <1,3,5,7>
- 2701946667U, // <5,5,2,u>: Cost 3 vext3 <2,u,4,5>, <5,2,u,4>
- 2646853782U, // <5,5,3,0>: Cost 3 vext2 <4,u,5,5>, <3,0,1,2>
- 3698034922U, // <5,5,3,1>: Cost 4 vext2 <1,1,5,5>, <3,1,1,5>
- 3702679919U, // <5,5,3,2>: Cost 4 vext2 <1,u,5,5>, <3,2,7,3>
- 2637564336U, // <5,5,3,3>: Cost 3 vext2 <3,3,5,5>, <3,3,5,5>
- 2646854146U, // <5,5,3,4>: Cost 3 vext2 <4,u,5,5>, <3,4,5,6>
- 2638891602U, // <5,5,3,5>: Cost 3 vext2 <3,5,5,5>, <3,5,5,5>
- 3702680247U, // <5,5,3,6>: Cost 4 vext2 <1,u,5,5>, <3,6,7,7>
- 3702680259U, // <5,5,3,7>: Cost 4 vext2 <1,u,5,5>, <3,7,0,1>
- 2646854430U, // <5,5,3,u>: Cost 3 vext2 <4,u,5,5>, <3,u,1,2>
- 2646854546U, // <5,5,4,0>: Cost 3 vext2 <4,u,5,5>, <4,0,5,1>
- 2642209767U, // <5,5,4,1>: Cost 3 vext2 <4,1,5,5>, <4,1,5,5>
- 3711306806U, // <5,5,4,2>: Cost 4 vext2 <3,3,5,5>, <4,2,5,3>
- 3645516369U, // <5,5,4,3>: Cost 4 vext1 <3,5,5,4>, <3,5,5,4>
- 1570458842U, // <5,5,4,4>: Cost 2 vext2 <4,4,5,5>, <4,4,5,5>
- 1573113142U, // <5,5,4,5>: Cost 2 vext2 <4,u,5,5>, RHS
- 2645527932U, // <5,5,4,6>: Cost 3 vext2 <4,6,5,5>, <4,6,5,5>
- 2713964486U, // <5,5,4,7>: Cost 3 vext3 <4,u,5,5>, <5,4,7,6>
- 1573113374U, // <5,5,4,u>: Cost 2 vext2 <4,u,5,5>, <4,u,5,5>
- 1509982310U, // <5,5,5,0>: Cost 2 vext1 <5,5,5,5>, LHS
- 2646855376U, // <5,5,5,1>: Cost 3 vext2 <4,u,5,5>, <5,1,7,3>
- 2583725672U, // <5,5,5,2>: Cost 3 vext1 <5,5,5,5>, <2,2,2,2>
- 2583726230U, // <5,5,5,3>: Cost 3 vext1 <5,5,5,5>, <3,0,1,2>
- 1509985590U, // <5,5,5,4>: Cost 2 vext1 <5,5,5,5>, RHS
- 229035318U, // <5,5,5,5>: Cost 1 vdup1 RHS
- 2646855778U, // <5,5,5,6>: Cost 3 vext2 <4,u,5,5>, <5,6,7,0>
- 2646855848U, // <5,5,5,7>: Cost 3 vext2 <4,u,5,5>, <5,7,5,7>
- 229035318U, // <5,5,5,u>: Cost 1 vdup1 RHS
- 2577760358U, // <5,5,6,0>: Cost 3 vext1 <4,5,5,6>, LHS
- 3633587361U, // <5,5,6,1>: Cost 4 vext1 <1,5,5,6>, <1,5,5,6>
- 2646856186U, // <5,5,6,2>: Cost 3 vext2 <4,u,5,5>, <6,2,7,3>
- 3633588738U, // <5,5,6,3>: Cost 4 vext1 <1,5,5,6>, <3,4,5,6>
- 2718535756U, // <5,5,6,4>: Cost 3 vext3 <5,6,4,5>, <5,6,4,5>
- 2644202223U, // <5,5,6,5>: Cost 3 vext2 <4,4,5,5>, <6,5,7,5>
- 2973780482U, // <5,5,6,6>: Cost 3 vzipr <3,4,5,6>, <3,4,5,6>
- 2646856526U, // <5,5,6,7>: Cost 3 vext2 <4,u,5,5>, <6,7,0,1>
- 2646856607U, // <5,5,6,u>: Cost 3 vext2 <4,u,5,5>, <6,u,0,1>
- 2571796582U, // <5,5,7,0>: Cost 3 vext1 <3,5,5,7>, LHS
- 3633595392U, // <5,5,7,1>: Cost 4 vext1 <1,5,5,7>, <1,3,5,7>
- 2571798222U, // <5,5,7,2>: Cost 3 vext1 <3,5,5,7>, <2,3,4,5>
- 2571799124U, // <5,5,7,3>: Cost 3 vext1 <3,5,5,7>, <3,5,5,7>
- 2571799862U, // <5,5,7,4>: Cost 3 vext1 <3,5,5,7>, RHS
- 3114717188U, // <5,5,7,5>: Cost 3 vtrnr RHS, <5,5,5,5>
- 4034923010U, // <5,5,7,6>: Cost 4 vzipr <1,3,5,7>, <3,4,5,6>
- 2040974646U, // <5,5,7,7>: Cost 2 vtrnr RHS, RHS
- 2040974647U, // <5,5,7,u>: Cost 2 vtrnr RHS, RHS
- 1509982310U, // <5,5,u,0>: Cost 2 vext1 <5,5,5,5>, LHS
- 1573115694U, // <5,5,u,1>: Cost 2 vext2 <4,u,5,5>, LHS
- 2571806414U, // <5,5,u,2>: Cost 3 vext1 <3,5,5,u>, <2,3,4,5>
- 2571807317U, // <5,5,u,3>: Cost 3 vext1 <3,5,5,u>, <3,5,5,u>
- 1509985590U, // <5,5,u,4>: Cost 2 vext1 <5,5,5,5>, RHS
- 229035318U, // <5,5,u,5>: Cost 1 vdup1 RHS
- 2646857936U, // <5,5,u,6>: Cost 3 vext2 <4,u,5,5>, <u,6,3,7>
- 2040982838U, // <5,5,u,7>: Cost 2 vtrnr RHS, RHS
- 229035318U, // <5,5,u,u>: Cost 1 vdup1 RHS
- 2638233600U, // <5,6,0,0>: Cost 3 vext2 <3,4,5,6>, <0,0,0,0>
- 1564491878U, // <5,6,0,1>: Cost 2 vext2 <3,4,5,6>, LHS
- 2632261796U, // <5,6,0,2>: Cost 3 vext2 <2,4,5,6>, <0,2,0,2>
- 2638233856U, // <5,6,0,3>: Cost 3 vext2 <3,4,5,6>, <0,3,1,4>
- 2638233938U, // <5,6,0,4>: Cost 3 vext2 <3,4,5,6>, <0,4,1,5>
- 3706003885U, // <5,6,0,5>: Cost 4 vext2 <2,4,5,6>, <0,5,2,6>
- 3706003967U, // <5,6,0,6>: Cost 4 vext2 <2,4,5,6>, <0,6,2,7>
- 4047473974U, // <5,6,0,7>: Cost 4 vzipr <3,4,5,0>, RHS
- 1564492445U, // <5,6,0,u>: Cost 2 vext2 <3,4,5,6>, LHS
- 2638234358U, // <5,6,1,0>: Cost 3 vext2 <3,4,5,6>, <1,0,3,2>
- 2638234420U, // <5,6,1,1>: Cost 3 vext2 <3,4,5,6>, <1,1,1,1>
- 2638234518U, // <5,6,1,2>: Cost 3 vext2 <3,4,5,6>, <1,2,3,0>
- 2638234584U, // <5,6,1,3>: Cost 3 vext2 <3,4,5,6>, <1,3,1,3>
- 2626290768U, // <5,6,1,4>: Cost 3 vext2 <1,4,5,6>, <1,4,5,6>
- 2638234768U, // <5,6,1,5>: Cost 3 vext2 <3,4,5,6>, <1,5,3,7>
- 3700032719U, // <5,6,1,6>: Cost 4 vext2 <1,4,5,6>, <1,6,1,7>
- 2982366518U, // <5,6,1,7>: Cost 3 vzipr <4,u,5,1>, RHS
- 2628945300U, // <5,6,1,u>: Cost 3 vext2 <1,u,5,6>, <1,u,5,6>
- 3706004925U, // <5,6,2,0>: Cost 4 vext2 <2,4,5,6>, <2,0,1,2>
- 3711976966U, // <5,6,2,1>: Cost 4 vext2 <3,4,5,6>, <2,1,0,3>
- 2638235240U, // <5,6,2,2>: Cost 3 vext2 <3,4,5,6>, <2,2,2,2>
- 2638235302U, // <5,6,2,3>: Cost 3 vext2 <3,4,5,6>, <2,3,0,1>
- 2632263465U, // <5,6,2,4>: Cost 3 vext2 <2,4,5,6>, <2,4,5,6>
- 2638235496U, // <5,6,2,5>: Cost 3 vext2 <3,4,5,6>, <2,5,3,6>
- 2638235578U, // <5,6,2,6>: Cost 3 vext2 <3,4,5,6>, <2,6,3,7>
- 2713965050U, // <5,6,2,7>: Cost 3 vext3 <4,u,5,5>, <6,2,7,3>
- 2634917997U, // <5,6,2,u>: Cost 3 vext2 <2,u,5,6>, <2,u,5,6>
- 2638235798U, // <5,6,3,0>: Cost 3 vext2 <3,4,5,6>, <3,0,1,2>
- 3711977695U, // <5,6,3,1>: Cost 4 vext2 <3,4,5,6>, <3,1,0,3>
- 3710650720U, // <5,6,3,2>: Cost 4 vext2 <3,2,5,6>, <3,2,5,6>
- 2638236060U, // <5,6,3,3>: Cost 3 vext2 <3,4,5,6>, <3,3,3,3>
- 1564494338U, // <5,6,3,4>: Cost 2 vext2 <3,4,5,6>, <3,4,5,6>
- 2638236234U, // <5,6,3,5>: Cost 3 vext2 <3,4,5,6>, <3,5,4,6>
- 3711978104U, // <5,6,3,6>: Cost 4 vext2 <3,4,5,6>, <3,6,0,7>
- 4034227510U, // <5,6,3,7>: Cost 4 vzipr <1,2,5,3>, RHS
- 1567148870U, // <5,6,3,u>: Cost 2 vext2 <3,u,5,6>, <3,u,5,6>
- 2577817702U, // <5,6,4,0>: Cost 3 vext1 <4,5,6,4>, LHS
- 3700034544U, // <5,6,4,1>: Cost 4 vext2 <1,4,5,6>, <4,1,6,5>
- 2723033713U, // <5,6,4,2>: Cost 3 vext3 <6,4,2,5>, <6,4,2,5>
- 2638236818U, // <5,6,4,3>: Cost 3 vext2 <3,4,5,6>, <4,3,6,5>
- 2644208859U, // <5,6,4,4>: Cost 3 vext2 <4,4,5,6>, <4,4,5,6>
- 1564495158U, // <5,6,4,5>: Cost 2 vext2 <3,4,5,6>, RHS
- 2645536125U, // <5,6,4,6>: Cost 3 vext2 <4,6,5,6>, <4,6,5,6>
- 2723402398U, // <5,6,4,7>: Cost 3 vext3 <6,4,7,5>, <6,4,7,5>
- 1564495401U, // <5,6,4,u>: Cost 2 vext2 <3,4,5,6>, RHS
- 2577825894U, // <5,6,5,0>: Cost 3 vext1 <4,5,6,5>, LHS
- 2662125264U, // <5,6,5,1>: Cost 3 vext2 <7,4,5,6>, <5,1,7,3>
- 3775836867U, // <5,6,5,2>: Cost 4 vext3 <2,u,6,5>, <6,5,2,6>
- 3711979343U, // <5,6,5,3>: Cost 4 vext2 <3,4,5,6>, <5,3,3,4>
- 2650181556U, // <5,6,5,4>: Cost 3 vext2 <5,4,5,6>, <5,4,5,6>
- 2662125572U, // <5,6,5,5>: Cost 3 vext2 <7,4,5,6>, <5,5,5,5>
- 2638237732U, // <5,6,5,6>: Cost 3 vext2 <3,4,5,6>, <5,6,0,1>
- 2982399286U, // <5,6,5,7>: Cost 3 vzipr <4,u,5,5>, RHS
- 2982399287U, // <5,6,5,u>: Cost 3 vzipr <4,u,5,5>, RHS
- 2583806054U, // <5,6,6,0>: Cost 3 vext1 <5,5,6,6>, LHS
- 3711979910U, // <5,6,6,1>: Cost 4 vext2 <3,4,5,6>, <6,1,3,4>
- 2662126074U, // <5,6,6,2>: Cost 3 vext2 <7,4,5,6>, <6,2,7,3>
- 2583808514U, // <5,6,6,3>: Cost 3 vext1 <5,5,6,6>, <3,4,5,6>
- 2583809334U, // <5,6,6,4>: Cost 3 vext1 <5,5,6,6>, RHS
- 2583810062U, // <5,6,6,5>: Cost 3 vext1 <5,5,6,6>, <5,5,6,6>
- 2638238520U, // <5,6,6,6>: Cost 3 vext2 <3,4,5,6>, <6,6,6,6>
- 2973781302U, // <5,6,6,7>: Cost 3 vzipr <3,4,5,6>, RHS
- 2973781303U, // <5,6,6,u>: Cost 3 vzipr <3,4,5,6>, RHS
- 430358630U, // <5,6,7,0>: Cost 1 vext1 RHS, LHS
- 1504101110U, // <5,6,7,1>: Cost 2 vext1 RHS, <1,0,3,2>
- 1504101992U, // <5,6,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 1504102550U, // <5,6,7,3>: Cost 2 vext1 RHS, <3,0,1,2>
- 430361910U, // <5,6,7,4>: Cost 1 vext1 RHS, RHS
- 1504104390U, // <5,6,7,5>: Cost 2 vext1 RHS, <5,4,7,6>
- 1504105272U, // <5,6,7,6>: Cost 2 vext1 RHS, <6,6,6,6>
- 1504106092U, // <5,6,7,7>: Cost 2 vext1 RHS, <7,7,7,7>
- 430364462U, // <5,6,7,u>: Cost 1 vext1 RHS, LHS
- 430366822U, // <5,6,u,0>: Cost 1 vext1 RHS, LHS
- 1564497710U, // <5,6,u,1>: Cost 2 vext2 <3,4,5,6>, LHS
- 1504110184U, // <5,6,u,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 1504110742U, // <5,6,u,3>: Cost 2 vext1 RHS, <3,0,1,2>
- 430370103U, // <5,6,u,4>: Cost 1 vext1 RHS, RHS
- 1564498074U, // <5,6,u,5>: Cost 2 vext2 <3,4,5,6>, RHS
- 1504113146U, // <5,6,u,6>: Cost 2 vext1 RHS, <6,2,7,3>
- 1504113658U, // <5,6,u,7>: Cost 2 vext1 RHS, <7,0,1,2>
- 430372654U, // <5,6,u,u>: Cost 1 vext1 RHS, LHS
- 2625634304U, // <5,7,0,0>: Cost 3 vext2 <1,3,5,7>, <0,0,0,0>
- 1551892582U, // <5,7,0,1>: Cost 2 vext2 <1,3,5,7>, LHS
- 2625634468U, // <5,7,0,2>: Cost 3 vext2 <1,3,5,7>, <0,2,0,2>
- 2571889247U, // <5,7,0,3>: Cost 3 vext1 <3,5,7,0>, <3,5,7,0>
- 2625634642U, // <5,7,0,4>: Cost 3 vext2 <1,3,5,7>, <0,4,1,5>
- 2595778728U, // <5,7,0,5>: Cost 3 vext1 <7,5,7,0>, <5,7,5,7>
- 3699376639U, // <5,7,0,6>: Cost 4 vext2 <1,3,5,7>, <0,6,2,7>
- 2260235715U, // <5,7,0,7>: Cost 3 vrev <7,5,7,0>
- 1551893149U, // <5,7,0,u>: Cost 2 vext2 <1,3,5,7>, LHS
- 2625635062U, // <5,7,1,0>: Cost 3 vext2 <1,3,5,7>, <1,0,3,2>
- 2624308020U, // <5,7,1,1>: Cost 3 vext2 <1,1,5,7>, <1,1,1,1>
- 2625635222U, // <5,7,1,2>: Cost 3 vext2 <1,3,5,7>, <1,2,3,0>
- 1551893504U, // <5,7,1,3>: Cost 2 vext2 <1,3,5,7>, <1,3,5,7>
- 2571898166U, // <5,7,1,4>: Cost 3 vext1 <3,5,7,1>, RHS
- 2625635472U, // <5,7,1,5>: Cost 3 vext2 <1,3,5,7>, <1,5,3,7>
- 2627626227U, // <5,7,1,6>: Cost 3 vext2 <1,6,5,7>, <1,6,5,7>
- 3702031684U, // <5,7,1,7>: Cost 4 vext2 <1,7,5,7>, <1,7,5,7>
- 1555211669U, // <5,7,1,u>: Cost 2 vext2 <1,u,5,7>, <1,u,5,7>
- 2629617126U, // <5,7,2,0>: Cost 3 vext2 <2,0,5,7>, <2,0,5,7>
- 3699377670U, // <5,7,2,1>: Cost 4 vext2 <1,3,5,7>, <2,1,0,3>
- 2625635944U, // <5,7,2,2>: Cost 3 vext2 <1,3,5,7>, <2,2,2,2>
- 2625636006U, // <5,7,2,3>: Cost 3 vext2 <1,3,5,7>, <2,3,0,1>
- 2632271658U, // <5,7,2,4>: Cost 3 vext2 <2,4,5,7>, <2,4,5,7>
- 2625636201U, // <5,7,2,5>: Cost 3 vext2 <1,3,5,7>, <2,5,3,7>
- 2625636282U, // <5,7,2,6>: Cost 3 vext2 <1,3,5,7>, <2,6,3,7>
- 3708004381U, // <5,7,2,7>: Cost 4 vext2 <2,7,5,7>, <2,7,5,7>
- 2625636411U, // <5,7,2,u>: Cost 3 vext2 <1,3,5,7>, <2,u,0,1>
- 2625636502U, // <5,7,3,0>: Cost 3 vext2 <1,3,5,7>, <3,0,1,2>
- 2625636604U, // <5,7,3,1>: Cost 3 vext2 <1,3,5,7>, <3,1,3,5>
- 3699378478U, // <5,7,3,2>: Cost 4 vext2 <1,3,5,7>, <3,2,0,1>
- 2625636764U, // <5,7,3,3>: Cost 3 vext2 <1,3,5,7>, <3,3,3,3>
- 2625636866U, // <5,7,3,4>: Cost 3 vext2 <1,3,5,7>, <3,4,5,6>
- 2625636959U, // <5,7,3,5>: Cost 3 vext2 <1,3,5,7>, <3,5,7,0>
- 3699378808U, // <5,7,3,6>: Cost 4 vext2 <1,3,5,7>, <3,6,0,7>
- 2640235254U, // <5,7,3,7>: Cost 3 vext2 <3,7,5,7>, <3,7,5,7>
- 2625637150U, // <5,7,3,u>: Cost 3 vext2 <1,3,5,7>, <3,u,1,2>
- 2571919462U, // <5,7,4,0>: Cost 3 vext1 <3,5,7,4>, LHS
- 2571920384U, // <5,7,4,1>: Cost 3 vext1 <3,5,7,4>, <1,3,5,7>
- 3699379260U, // <5,7,4,2>: Cost 4 vext2 <1,3,5,7>, <4,2,6,0>
- 2571922019U, // <5,7,4,3>: Cost 3 vext1 <3,5,7,4>, <3,5,7,4>
- 2571922742U, // <5,7,4,4>: Cost 3 vext1 <3,5,7,4>, RHS
- 1551895862U, // <5,7,4,5>: Cost 2 vext2 <1,3,5,7>, RHS
- 2846277980U, // <5,7,4,6>: Cost 3 vuzpr RHS, <0,4,2,6>
- 2646207951U, // <5,7,4,7>: Cost 3 vext2 <4,7,5,7>, <4,7,5,7>
- 1551896105U, // <5,7,4,u>: Cost 2 vext2 <1,3,5,7>, RHS
- 2583871590U, // <5,7,5,0>: Cost 3 vext1 <5,5,7,5>, LHS
- 2652180176U, // <5,7,5,1>: Cost 3 vext2 <5,7,5,7>, <5,1,7,3>
- 2625638177U, // <5,7,5,2>: Cost 3 vext2 <1,3,5,7>, <5,2,7,3>
- 2625638262U, // <5,7,5,3>: Cost 3 vext2 <1,3,5,7>, <5,3,7,7>
- 2583874870U, // <5,7,5,4>: Cost 3 vext1 <5,5,7,5>, RHS
- 2846281732U, // <5,7,5,5>: Cost 3 vuzpr RHS, <5,5,5,5>
- 2651517015U, // <5,7,5,6>: Cost 3 vext2 <5,6,5,7>, <5,6,5,7>
- 1772539190U, // <5,7,5,7>: Cost 2 vuzpr RHS, RHS
- 1772539191U, // <5,7,5,u>: Cost 2 vuzpr RHS, RHS
- 2846281826U, // <5,7,6,0>: Cost 3 vuzpr RHS, <5,6,7,0>
- 3699380615U, // <5,7,6,1>: Cost 4 vext2 <1,3,5,7>, <6,1,3,5>
- 2846281108U, // <5,7,6,2>: Cost 3 vuzpr RHS, <4,6,u,2>
- 2589854210U, // <5,7,6,3>: Cost 3 vext1 <6,5,7,6>, <3,4,5,6>
- 2846281830U, // <5,7,6,4>: Cost 3 vuzpr RHS, <5,6,7,4>
- 2725467658U, // <5,7,6,5>: Cost 3 vext3 <6,7,u,5>, <7,6,5,u>
- 2846281076U, // <5,7,6,6>: Cost 3 vuzpr RHS, <4,6,4,6>
- 2846279610U, // <5,7,6,7>: Cost 3 vuzpr RHS, <2,6,3,7>
- 2846279611U, // <5,7,6,u>: Cost 3 vuzpr RHS, <2,6,3,u>
- 1510146150U, // <5,7,7,0>: Cost 2 vext1 <5,5,7,7>, LHS
- 2846282574U, // <5,7,7,1>: Cost 3 vuzpr RHS, <6,7,0,1>
- 2583889512U, // <5,7,7,2>: Cost 3 vext1 <5,5,7,7>, <2,2,2,2>
- 2846281919U, // <5,7,7,3>: Cost 3 vuzpr RHS, <5,7,u,3>
- 1510149430U, // <5,7,7,4>: Cost 2 vext1 <5,5,7,7>, RHS
- 1510150168U, // <5,7,7,5>: Cost 2 vext1 <5,5,7,7>, <5,5,7,7>
- 2583892474U, // <5,7,7,6>: Cost 3 vext1 <5,5,7,7>, <6,2,7,3>
- 2625640044U, // <5,7,7,7>: Cost 3 vext2 <1,3,5,7>, <7,7,7,7>
- 1510151982U, // <5,7,7,u>: Cost 2 vext1 <5,5,7,7>, LHS
- 1510154342U, // <5,7,u,0>: Cost 2 vext1 <5,5,7,u>, LHS
- 1551898414U, // <5,7,u,1>: Cost 2 vext2 <1,3,5,7>, LHS
- 2625640325U, // <5,7,u,2>: Cost 3 vext2 <1,3,5,7>, <u,2,3,0>
- 1772536477U, // <5,7,u,3>: Cost 2 vuzpr RHS, LHS
- 1510157622U, // <5,7,u,4>: Cost 2 vext1 <5,5,7,u>, RHS
- 1551898778U, // <5,7,u,5>: Cost 2 vext2 <1,3,5,7>, RHS
- 2625640656U, // <5,7,u,6>: Cost 3 vext2 <1,3,5,7>, <u,6,3,7>
- 1772539433U, // <5,7,u,7>: Cost 2 vuzpr RHS, RHS
- 1551898981U, // <5,7,u,u>: Cost 2 vext2 <1,3,5,7>, LHS
- 2625642496U, // <5,u,0,0>: Cost 3 vext2 <1,3,5,u>, <0,0,0,0>
- 1551900774U, // <5,u,0,1>: Cost 2 vext2 <1,3,5,u>, LHS
- 2625642660U, // <5,u,0,2>: Cost 3 vext2 <1,3,5,u>, <0,2,0,2>
- 2698630885U, // <5,u,0,3>: Cost 3 vext3 <2,3,4,5>, <u,0,3,2>
- 2687129325U, // <5,u,0,4>: Cost 3 vext3 <0,4,1,5>, <u,0,4,1>
- 2689783542U, // <5,u,0,5>: Cost 3 vext3 <0,u,1,5>, <u,0,5,1>
- 2266134675U, // <5,u,0,6>: Cost 3 vrev <u,5,6,0>
- 2595853772U, // <5,u,0,7>: Cost 3 vext1 <7,5,u,0>, <7,5,u,0>
- 1551901341U, // <5,u,0,u>: Cost 2 vext2 <1,3,5,u>, LHS
- 2625643254U, // <5,u,1,0>: Cost 3 vext2 <1,3,5,u>, <1,0,3,2>
- 2625643316U, // <5,u,1,1>: Cost 3 vext2 <1,3,5,u>, <1,1,1,1>
- 1613387566U, // <5,u,1,2>: Cost 2 vext3 <0,4,1,5>, LHS
- 1551901697U, // <5,u,1,3>: Cost 2 vext2 <1,3,5,u>, <1,3,5,u>
- 2626307154U, // <5,u,1,4>: Cost 3 vext2 <1,4,5,u>, <1,4,5,u>
- 2689783622U, // <5,u,1,5>: Cost 3 vext3 <0,u,1,5>, <u,1,5,0>
- 2627634420U, // <5,u,1,6>: Cost 3 vext2 <1,6,5,u>, <1,6,5,u>
- 2982366536U, // <5,u,1,7>: Cost 3 vzipr <4,u,5,1>, RHS
- 1613387620U, // <5,u,1,u>: Cost 2 vext3 <0,4,1,5>, LHS
- 2846286742U, // <5,u,2,0>: Cost 3 vuzpr RHS, <1,2,3,0>
- 2685796528U, // <5,u,2,1>: Cost 3 vext3 <0,2,1,5>, <0,2,1,5>
- 2625644136U, // <5,u,2,2>: Cost 3 vext2 <1,3,5,u>, <2,2,2,2>
- 2687129480U, // <5,u,2,3>: Cost 3 vext3 <0,4,1,5>, <u,2,3,3>
- 2632279851U, // <5,u,2,4>: Cost 3 vext2 <2,4,5,u>, <2,4,5,u>
- 2625644394U, // <5,u,2,5>: Cost 3 vext2 <1,3,5,u>, <2,5,3,u>
- 2625644474U, // <5,u,2,6>: Cost 3 vext2 <1,3,5,u>, <2,6,3,7>
- 2713966508U, // <5,u,2,7>: Cost 3 vext3 <4,u,5,5>, <u,2,7,3>
- 2625644603U, // <5,u,2,u>: Cost 3 vext2 <1,3,5,u>, <2,u,0,1>
- 2687129532U, // <5,u,3,0>: Cost 3 vext3 <0,4,1,5>, <u,3,0,1>
- 2636261649U, // <5,u,3,1>: Cost 3 vext2 <3,1,5,u>, <3,1,5,u>
- 2636925282U, // <5,u,3,2>: Cost 3 vext2 <3,2,5,u>, <3,2,5,u>
- 2625644956U, // <5,u,3,3>: Cost 3 vext2 <1,3,5,u>, <3,3,3,3>
- 1564510724U, // <5,u,3,4>: Cost 2 vext2 <3,4,5,u>, <3,4,5,u>
- 2625645160U, // <5,u,3,5>: Cost 3 vext2 <1,3,5,u>, <3,5,u,0>
- 2734610422U, // <5,u,3,6>: Cost 3 vext3 <u,3,6,5>, <u,3,6,5>
- 2640243447U, // <5,u,3,7>: Cost 3 vext2 <3,7,5,u>, <3,7,5,u>
- 1567165256U, // <5,u,3,u>: Cost 2 vext2 <3,u,5,u>, <3,u,5,u>
- 1567828889U, // <5,u,4,0>: Cost 2 vext2 <4,0,5,u>, <4,0,5,u>
- 1661163546U, // <5,u,4,1>: Cost 2 vext3 <u,4,1,5>, <u,4,1,5>
- 2734463012U, // <5,u,4,2>: Cost 3 vext3 <u,3,4,5>, <u,4,2,6>
- 2698631212U, // <5,u,4,3>: Cost 3 vext3 <2,3,4,5>, <u,4,3,5>
- 1570458842U, // <5,u,4,4>: Cost 2 vext2 <4,4,5,5>, <4,4,5,5>
- 1551904054U, // <5,u,4,5>: Cost 2 vext2 <1,3,5,u>, RHS
- 2846286172U, // <5,u,4,6>: Cost 3 vuzpr RHS, <0,4,2,6>
- 2646216144U, // <5,u,4,7>: Cost 3 vext2 <4,7,5,u>, <4,7,5,u>
- 1551904297U, // <5,u,4,u>: Cost 2 vext2 <1,3,5,u>, RHS
- 1509982310U, // <5,u,5,0>: Cost 2 vext1 <5,5,5,5>, LHS
- 2560058555U, // <5,u,5,1>: Cost 3 vext1 <1,5,u,5>, <1,5,u,5>
- 2698926194U, // <5,u,5,2>: Cost 3 vext3 <2,3,u,5>, <u,5,2,3>
- 2698631295U, // <5,u,5,3>: Cost 3 vext3 <2,3,4,5>, <u,5,3,7>
- 1509985590U, // <5,u,5,4>: Cost 2 vext1 <5,5,5,5>, RHS
- 229035318U, // <5,u,5,5>: Cost 1 vdup1 RHS
- 1613387930U, // <5,u,5,6>: Cost 2 vext3 <0,4,1,5>, RHS
- 1772547382U, // <5,u,5,7>: Cost 2 vuzpr RHS, RHS
- 229035318U, // <5,u,5,u>: Cost 1 vdup1 RHS
- 2566037606U, // <5,u,6,0>: Cost 3 vext1 <2,5,u,6>, LHS
- 2920044334U, // <5,u,6,1>: Cost 3 vzipl <5,6,7,0>, LHS
- 2566039445U, // <5,u,6,2>: Cost 3 vext1 <2,5,u,6>, <2,5,u,6>
- 2687129808U, // <5,u,6,3>: Cost 3 vext3 <0,4,1,5>, <u,6,3,7>
- 2566040886U, // <5,u,6,4>: Cost 3 vext1 <2,5,u,6>, RHS
- 2920044698U, // <5,u,6,5>: Cost 3 vzipl <5,6,7,0>, RHS
- 2846289268U, // <5,u,6,6>: Cost 3 vuzpr RHS, <4,6,4,6>
- 2973781320U, // <5,u,6,7>: Cost 3 vzipr <3,4,5,6>, RHS
- 2687129853U, // <5,u,6,u>: Cost 3 vext3 <0,4,1,5>, <u,6,u,7>
- 430506086U, // <5,u,7,0>: Cost 1 vext1 RHS, LHS
- 1486333117U, // <5,u,7,1>: Cost 2 vext1 <1,5,u,7>, <1,5,u,7>
- 1504249448U, // <5,u,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 2040971933U, // <5,u,7,3>: Cost 2 vtrnr RHS, LHS
- 430509384U, // <5,u,7,4>: Cost 1 vext1 RHS, RHS
- 1504251600U, // <5,u,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
- 118708378U, // <5,u,7,6>: Cost 1 vrev RHS
- 2040974889U, // <5,u,7,7>: Cost 2 vtrnr RHS, RHS
- 430511918U, // <5,u,7,u>: Cost 1 vext1 RHS, LHS
- 430514278U, // <5,u,u,0>: Cost 1 vext1 RHS, LHS
- 1551906606U, // <5,u,u,1>: Cost 2 vext2 <1,3,5,u>, LHS
- 1613388133U, // <5,u,u,2>: Cost 2 vext3 <0,4,1,5>, LHS
- 1772544669U, // <5,u,u,3>: Cost 2 vuzpr RHS, LHS
- 430517577U, // <5,u,u,4>: Cost 1 vext1 RHS, RHS
- 229035318U, // <5,u,u,5>: Cost 1 vdup1 RHS
- 118716571U, // <5,u,u,6>: Cost 1 vrev RHS
- 1772547625U, // <5,u,u,7>: Cost 2 vuzpr RHS, RHS
- 430520110U, // <5,u,u,u>: Cost 1 vext1 RHS, LHS
- 2686025728U, // <6,0,0,0>: Cost 3 vext3 <0,2,4,6>, <0,0,0,0>
- 2686025738U, // <6,0,0,1>: Cost 3 vext3 <0,2,4,6>, <0,0,1,1>
- 2686025748U, // <6,0,0,2>: Cost 3 vext3 <0,2,4,6>, <0,0,2,2>
- 3779084320U, // <6,0,0,3>: Cost 4 vext3 <3,4,5,6>, <0,0,3,5>
- 2642903388U, // <6,0,0,4>: Cost 3 vext2 <4,2,6,0>, <0,4,2,6>
- 3657723939U, // <6,0,0,5>: Cost 4 vext1 <5,6,0,0>, <5,6,0,0>
- 3926676514U, // <6,0,0,6>: Cost 4 vuzpr <5,6,7,0>, <7,0,5,6>
- 3926675786U, // <6,0,0,7>: Cost 4 vuzpr <5,6,7,0>, <6,0,5,7>
- 2686025802U, // <6,0,0,u>: Cost 3 vext3 <0,2,4,6>, <0,0,u,2>
- 2566070374U, // <6,0,1,0>: Cost 3 vext1 <2,6,0,1>, LHS
- 3759767642U, // <6,0,1,1>: Cost 4 vext3 <0,2,4,6>, <0,1,1,0>
- 1612284006U, // <6,0,1,2>: Cost 2 vext3 <0,2,4,6>, LHS
- 2583988738U, // <6,0,1,3>: Cost 3 vext1 <5,6,0,1>, <3,4,5,6>
- 2566073654U, // <6,0,1,4>: Cost 3 vext1 <2,6,0,1>, RHS
- 2583990308U, // <6,0,1,5>: Cost 3 vext1 <5,6,0,1>, <5,6,0,1>
- 2589963005U, // <6,0,1,6>: Cost 3 vext1 <6,6,0,1>, <6,6,0,1>
- 2595935702U, // <6,0,1,7>: Cost 3 vext1 <7,6,0,1>, <7,6,0,1>
- 1612284060U, // <6,0,1,u>: Cost 2 vext3 <0,2,4,6>, LHS
- 2686025892U, // <6,0,2,0>: Cost 3 vext3 <0,2,4,6>, <0,2,0,2>
- 2685804721U, // <6,0,2,1>: Cost 3 vext3 <0,2,1,6>, <0,2,1,6>
- 3759620282U, // <6,0,2,2>: Cost 4 vext3 <0,2,2,6>, <0,2,2,6>
- 2705342658U, // <6,0,2,3>: Cost 3 vext3 <3,4,5,6>, <0,2,3,5>
- 1612284108U, // <6,0,2,4>: Cost 2 vext3 <0,2,4,6>, <0,2,4,6>
- 3706029956U, // <6,0,2,5>: Cost 4 vext2 <2,4,6,0>, <2,5,6,7>
- 2686173406U, // <6,0,2,6>: Cost 3 vext3 <0,2,6,6>, <0,2,6,6>
- 3651769338U, // <6,0,2,7>: Cost 4 vext1 <4,6,0,2>, <7,0,1,2>
- 1612579056U, // <6,0,2,u>: Cost 2 vext3 <0,2,u,6>, <0,2,u,6>
- 3706030230U, // <6,0,3,0>: Cost 4 vext2 <2,4,6,0>, <3,0,1,2>
- 2705342720U, // <6,0,3,1>: Cost 3 vext3 <3,4,5,6>, <0,3,1,4>
- 2705342730U, // <6,0,3,2>: Cost 3 vext3 <3,4,5,6>, <0,3,2,5>
- 3706030492U, // <6,0,3,3>: Cost 4 vext2 <2,4,6,0>, <3,3,3,3>
- 2644896258U, // <6,0,3,4>: Cost 3 vext2 <4,5,6,0>, <3,4,5,6>
- 3718638154U, // <6,0,3,5>: Cost 4 vext2 <4,5,6,0>, <3,5,4,6>
- 3729918619U, // <6,0,3,6>: Cost 4 vext2 <6,4,6,0>, <3,6,4,6>
- 3926672384U, // <6,0,3,7>: Cost 4 vuzpr <5,6,7,0>, <1,3,5,7>
- 2705342784U, // <6,0,3,u>: Cost 3 vext3 <3,4,5,6>, <0,3,u,5>
- 2687058250U, // <6,0,4,0>: Cost 3 vext3 <0,4,0,6>, <0,4,0,6>
- 2686026066U, // <6,0,4,1>: Cost 3 vext3 <0,2,4,6>, <0,4,1,5>
- 1613463900U, // <6,0,4,2>: Cost 2 vext3 <0,4,2,6>, <0,4,2,6>
- 3761021285U, // <6,0,4,3>: Cost 4 vext3 <0,4,3,6>, <0,4,3,6>
- 2687353198U, // <6,0,4,4>: Cost 3 vext3 <0,4,4,6>, <0,4,4,6>
- 2632289590U, // <6,0,4,5>: Cost 3 vext2 <2,4,6,0>, RHS
- 2645560704U, // <6,0,4,6>: Cost 3 vext2 <4,6,6,0>, <4,6,6,0>
- 2646224337U, // <6,0,4,7>: Cost 3 vext2 <4,7,6,0>, <4,7,6,0>
- 1613906322U, // <6,0,4,u>: Cost 2 vext3 <0,4,u,6>, <0,4,u,6>
- 3651788902U, // <6,0,5,0>: Cost 4 vext1 <4,6,0,5>, LHS
- 2687795620U, // <6,0,5,1>: Cost 3 vext3 <0,5,1,6>, <0,5,1,6>
- 3761611181U, // <6,0,5,2>: Cost 4 vext3 <0,5,2,6>, <0,5,2,6>
- 3723284326U, // <6,0,5,3>: Cost 4 vext2 <5,3,6,0>, <5,3,6,0>
- 2646224838U, // <6,0,5,4>: Cost 3 vext2 <4,7,6,0>, <5,4,7,6>
- 3718639630U, // <6,0,5,5>: Cost 4 vext2 <4,5,6,0>, <5,5,6,6>
- 2652196962U, // <6,0,5,6>: Cost 3 vext2 <5,7,6,0>, <5,6,7,0>
- 2852932918U, // <6,0,5,7>: Cost 3 vuzpr <5,6,7,0>, RHS
- 2852932919U, // <6,0,5,u>: Cost 3 vuzpr <5,6,7,0>, RHS
- 2852933730U, // <6,0,6,0>: Cost 3 vuzpr <5,6,7,0>, <5,6,7,0>
- 2925985894U, // <6,0,6,1>: Cost 3 vzipl <6,6,6,6>, LHS
- 3060203622U, // <6,0,6,2>: Cost 3 vtrnl <6,6,6,6>, LHS
- 3718640178U, // <6,0,6,3>: Cost 4 vext2 <4,5,6,0>, <6,3,4,5>
- 2656178832U, // <6,0,6,4>: Cost 3 vext2 <6,4,6,0>, <6,4,6,0>
- 3725939378U, // <6,0,6,5>: Cost 4 vext2 <5,7,6,0>, <6,5,0,7>
- 2657506098U, // <6,0,6,6>: Cost 3 vext2 <6,6,6,0>, <6,6,6,0>
- 2619020110U, // <6,0,6,7>: Cost 3 vext2 <0,2,6,0>, <6,7,0,1>
- 2925986461U, // <6,0,6,u>: Cost 3 vzipl <6,6,6,6>, LHS
- 2572091494U, // <6,0,7,0>: Cost 3 vext1 <3,6,0,7>, LHS
- 2572092310U, // <6,0,7,1>: Cost 3 vext1 <3,6,0,7>, <1,2,3,0>
- 2980495524U, // <6,0,7,2>: Cost 3 vzipr RHS, <0,2,0,2>
- 2572094072U, // <6,0,7,3>: Cost 3 vext1 <3,6,0,7>, <3,6,0,7>
- 2572094774U, // <6,0,7,4>: Cost 3 vext1 <3,6,0,7>, RHS
- 4054238242U, // <6,0,7,5>: Cost 4 vzipr RHS, <1,4,0,5>
- 3645837653U, // <6,0,7,6>: Cost 4 vext1 <3,6,0,7>, <6,0,7,0>
- 4054239054U, // <6,0,7,7>: Cost 4 vzipr RHS, <2,5,0,7>
- 2572097326U, // <6,0,7,u>: Cost 3 vext1 <3,6,0,7>, LHS
- 2686026378U, // <6,0,u,0>: Cost 3 vext3 <0,2,4,6>, <0,u,0,2>
- 2686026386U, // <6,0,u,1>: Cost 3 vext3 <0,2,4,6>, <0,u,1,1>
- 1612284573U, // <6,0,u,2>: Cost 2 vext3 <0,2,4,6>, LHS
- 2705343144U, // <6,0,u,3>: Cost 3 vext3 <3,4,5,6>, <0,u,3,5>
- 1616265906U, // <6,0,u,4>: Cost 2 vext3 <0,u,4,6>, <0,u,4,6>
- 2632292506U, // <6,0,u,5>: Cost 3 vext2 <2,4,6,0>, RHS
- 2590020356U, // <6,0,u,6>: Cost 3 vext1 <6,6,0,u>, <6,6,0,u>
- 2852933161U, // <6,0,u,7>: Cost 3 vuzpr <5,6,7,0>, RHS
- 1612284627U, // <6,0,u,u>: Cost 2 vext3 <0,2,4,6>, LHS
- 2595995750U, // <6,1,0,0>: Cost 3 vext1 <7,6,1,0>, LHS
- 2646229094U, // <6,1,0,1>: Cost 3 vext2 <4,7,6,1>, LHS
- 3694092492U, // <6,1,0,2>: Cost 4 vext2 <0,4,6,1>, <0,2,4,6>
- 2686026486U, // <6,1,0,3>: Cost 3 vext3 <0,2,4,6>, <1,0,3,2>
- 2595999030U, // <6,1,0,4>: Cost 3 vext1 <7,6,1,0>, RHS
- 3767730952U, // <6,1,0,5>: Cost 4 vext3 <1,5,4,6>, <1,0,5,2>
- 2596000590U, // <6,1,0,6>: Cost 3 vext1 <7,6,1,0>, <6,7,0,1>
- 2596001246U, // <6,1,0,7>: Cost 3 vext1 <7,6,1,0>, <7,6,1,0>
- 2686026531U, // <6,1,0,u>: Cost 3 vext3 <0,2,4,6>, <1,0,u,2>
- 3763602219U, // <6,1,1,0>: Cost 4 vext3 <0,u,2,6>, <1,1,0,1>
- 2686026548U, // <6,1,1,1>: Cost 3 vext3 <0,2,4,6>, <1,1,1,1>
- 3764929346U, // <6,1,1,2>: Cost 4 vext3 <1,1,2,6>, <1,1,2,6>
- 2686026568U, // <6,1,1,3>: Cost 3 vext3 <0,2,4,6>, <1,1,3,3>
- 2691334996U, // <6,1,1,4>: Cost 3 vext3 <1,1,4,6>, <1,1,4,6>
- 3760874332U, // <6,1,1,5>: Cost 4 vext3 <0,4,1,6>, <1,1,5,5>
- 3765224294U, // <6,1,1,6>: Cost 4 vext3 <1,1,6,6>, <1,1,6,6>
- 3669751263U, // <6,1,1,7>: Cost 4 vext1 <7,6,1,1>, <7,6,1,1>
- 2686026613U, // <6,1,1,u>: Cost 3 vext3 <0,2,4,6>, <1,1,u,3>
- 2554208358U, // <6,1,2,0>: Cost 3 vext1 <0,6,1,2>, LHS
- 3763602311U, // <6,1,2,1>: Cost 4 vext3 <0,u,2,6>, <1,2,1,3>
- 3639895971U, // <6,1,2,2>: Cost 4 vext1 <2,6,1,2>, <2,6,1,2>
- 2686026646U, // <6,1,2,3>: Cost 3 vext3 <0,2,4,6>, <1,2,3,0>
- 2554211638U, // <6,1,2,4>: Cost 3 vext1 <0,6,1,2>, RHS
- 3760874411U, // <6,1,2,5>: Cost 4 vext3 <0,4,1,6>, <1,2,5,3>
- 2554212858U, // <6,1,2,6>: Cost 3 vext1 <0,6,1,2>, <6,2,7,3>
- 3802973114U, // <6,1,2,7>: Cost 4 vext3 <7,4,5,6>, <1,2,7,0>
- 2686026691U, // <6,1,2,u>: Cost 3 vext3 <0,2,4,6>, <1,2,u,0>
- 2566160486U, // <6,1,3,0>: Cost 3 vext1 <2,6,1,3>, LHS
- 2686026712U, // <6,1,3,1>: Cost 3 vext3 <0,2,4,6>, <1,3,1,3>
- 2686026724U, // <6,1,3,2>: Cost 3 vext3 <0,2,4,6>, <1,3,2,6>
- 3759768552U, // <6,1,3,3>: Cost 4 vext3 <0,2,4,6>, <1,3,3,1>
- 2692662262U, // <6,1,3,4>: Cost 3 vext3 <1,3,4,6>, <1,3,4,6>
- 2686026752U, // <6,1,3,5>: Cost 3 vext3 <0,2,4,6>, <1,3,5,7>
- 2590053128U, // <6,1,3,6>: Cost 3 vext1 <6,6,1,3>, <6,6,1,3>
- 3663795194U, // <6,1,3,7>: Cost 4 vext1 <6,6,1,3>, <7,0,1,2>
- 2686026775U, // <6,1,3,u>: Cost 3 vext3 <0,2,4,6>, <1,3,u,3>
- 2641587099U, // <6,1,4,0>: Cost 3 vext2 <4,0,6,1>, <4,0,6,1>
- 2693104684U, // <6,1,4,1>: Cost 3 vext3 <1,4,1,6>, <1,4,1,6>
- 3639912357U, // <6,1,4,2>: Cost 4 vext1 <2,6,1,4>, <2,6,1,4>
- 2687206462U, // <6,1,4,3>: Cost 3 vext3 <0,4,2,6>, <1,4,3,6>
- 3633941814U, // <6,1,4,4>: Cost 4 vext1 <1,6,1,4>, RHS
- 2693399632U, // <6,1,4,5>: Cost 3 vext3 <1,4,5,6>, <1,4,5,6>
- 3765077075U, // <6,1,4,6>: Cost 4 vext3 <1,1,4,6>, <1,4,6,0>
- 2646232530U, // <6,1,4,7>: Cost 3 vext2 <4,7,6,1>, <4,7,6,1>
- 2687206507U, // <6,1,4,u>: Cost 3 vext3 <0,4,2,6>, <1,4,u,6>
- 2647559796U, // <6,1,5,0>: Cost 3 vext2 <5,0,6,1>, <5,0,6,1>
- 3765077118U, // <6,1,5,1>: Cost 4 vext3 <1,1,4,6>, <1,5,1,7>
- 3767583878U, // <6,1,5,2>: Cost 4 vext3 <1,5,2,6>, <1,5,2,6>
- 2686026896U, // <6,1,5,3>: Cost 3 vext3 <0,2,4,6>, <1,5,3,7>
- 2693989528U, // <6,1,5,4>: Cost 3 vext3 <1,5,4,6>, <1,5,4,6>
- 3767805089U, // <6,1,5,5>: Cost 4 vext3 <1,5,5,6>, <1,5,5,6>
- 2652868706U, // <6,1,5,6>: Cost 3 vext2 <5,u,6,1>, <5,6,7,0>
- 3908250934U, // <6,1,5,7>: Cost 4 vuzpr <2,6,0,1>, RHS
- 2686026941U, // <6,1,5,u>: Cost 3 vext3 <0,2,4,6>, <1,5,u,7>
- 2554241126U, // <6,1,6,0>: Cost 3 vext1 <0,6,1,6>, LHS
- 3763602639U, // <6,1,6,1>: Cost 4 vext3 <0,u,2,6>, <1,6,1,7>
- 3759547607U, // <6,1,6,2>: Cost 4 vext3 <0,2,1,6>, <1,6,2,6>
- 3115221094U, // <6,1,6,3>: Cost 3 vtrnr <4,6,4,6>, LHS
- 2554244406U, // <6,1,6,4>: Cost 3 vext1 <0,6,1,6>, RHS
- 3760874739U, // <6,1,6,5>: Cost 4 vext3 <0,4,1,6>, <1,6,5,7>
- 2554245944U, // <6,1,6,6>: Cost 3 vext1 <0,6,1,6>, <6,6,6,6>
- 3719975758U, // <6,1,6,7>: Cost 4 vext2 <4,7,6,1>, <6,7,0,1>
- 3115221099U, // <6,1,6,u>: Cost 3 vtrnr <4,6,4,6>, LHS
- 2560221286U, // <6,1,7,0>: Cost 3 vext1 <1,6,1,7>, LHS
- 2560222415U, // <6,1,7,1>: Cost 3 vext1 <1,6,1,7>, <1,6,1,7>
- 2980497558U, // <6,1,7,2>: Cost 3 vzipr RHS, <3,0,1,2>
- 3103211622U, // <6,1,7,3>: Cost 3 vtrnr <2,6,3,7>, LHS
- 2560224566U, // <6,1,7,4>: Cost 3 vext1 <1,6,1,7>, RHS
- 2980495698U, // <6,1,7,5>: Cost 3 vzipr RHS, <0,4,1,5>
- 3633967526U, // <6,1,7,6>: Cost 4 vext1 <1,6,1,7>, <6,1,7,0>
- 4054237686U, // <6,1,7,7>: Cost 4 vzipr RHS, <0,6,1,7>
- 2560227118U, // <6,1,7,u>: Cost 3 vext1 <1,6,1,7>, LHS
- 2560229478U, // <6,1,u,0>: Cost 3 vext1 <1,6,1,u>, LHS
- 2686027117U, // <6,1,u,1>: Cost 3 vext3 <0,2,4,6>, <1,u,1,3>
- 2686027129U, // <6,1,u,2>: Cost 3 vext3 <0,2,4,6>, <1,u,2,6>
- 2686027132U, // <6,1,u,3>: Cost 3 vext3 <0,2,4,6>, <1,u,3,0>
- 2687206795U, // <6,1,u,4>: Cost 3 vext3 <0,4,2,6>, <1,u,4,6>
- 2686027157U, // <6,1,u,5>: Cost 3 vext3 <0,2,4,6>, <1,u,5,7>
- 2590094093U, // <6,1,u,6>: Cost 3 vext1 <6,6,1,u>, <6,6,1,u>
- 2596066790U, // <6,1,u,7>: Cost 3 vext1 <7,6,1,u>, <7,6,1,u>
- 2686027177U, // <6,1,u,u>: Cost 3 vext3 <0,2,4,6>, <1,u,u,0>
- 2646900736U, // <6,2,0,0>: Cost 3 vext2 <4,u,6,2>, <0,0,0,0>
- 1573159014U, // <6,2,0,1>: Cost 2 vext2 <4,u,6,2>, LHS
- 2646900900U, // <6,2,0,2>: Cost 3 vext2 <4,u,6,2>, <0,2,0,2>
- 3759769037U, // <6,2,0,3>: Cost 4 vext3 <0,2,4,6>, <2,0,3,0>
- 2641592668U, // <6,2,0,4>: Cost 3 vext2 <4,0,6,2>, <0,4,2,6>
- 3779085794U, // <6,2,0,5>: Cost 4 vext3 <3,4,5,6>, <2,0,5,3>
- 2686027244U, // <6,2,0,6>: Cost 3 vext3 <0,2,4,6>, <2,0,6,4>
- 3669816807U, // <6,2,0,7>: Cost 4 vext1 <7,6,2,0>, <7,6,2,0>
- 1573159581U, // <6,2,0,u>: Cost 2 vext2 <4,u,6,2>, LHS
- 2230527897U, // <6,2,1,0>: Cost 3 vrev <2,6,0,1>
- 2646901556U, // <6,2,1,1>: Cost 3 vext2 <4,u,6,2>, <1,1,1,1>
- 2646901654U, // <6,2,1,2>: Cost 3 vext2 <4,u,6,2>, <1,2,3,0>
- 2847047782U, // <6,2,1,3>: Cost 3 vuzpr <4,6,u,2>, LHS
- 3771049517U, // <6,2,1,4>: Cost 4 vext3 <2,1,4,6>, <2,1,4,6>
- 2646901904U, // <6,2,1,5>: Cost 3 vext2 <4,u,6,2>, <1,5,3,7>
- 2686027324U, // <6,2,1,6>: Cost 3 vext3 <0,2,4,6>, <2,1,6,3>
- 3669825000U, // <6,2,1,7>: Cost 4 vext1 <7,6,2,1>, <7,6,2,1>
- 2231117793U, // <6,2,1,u>: Cost 3 vrev <2,6,u,1>
- 3763603029U, // <6,2,2,0>: Cost 4 vext3 <0,u,2,6>, <2,2,0,1>
- 3759769184U, // <6,2,2,1>: Cost 4 vext3 <0,2,4,6>, <2,2,1,3>
- 2686027368U, // <6,2,2,2>: Cost 3 vext3 <0,2,4,6>, <2,2,2,2>
- 2686027378U, // <6,2,2,3>: Cost 3 vext3 <0,2,4,6>, <2,2,3,3>
- 2697971326U, // <6,2,2,4>: Cost 3 vext3 <2,2,4,6>, <2,2,4,6>
- 3759769224U, // <6,2,2,5>: Cost 4 vext3 <0,2,4,6>, <2,2,5,7>
- 2698118800U, // <6,2,2,6>: Cost 3 vext3 <2,2,6,6>, <2,2,6,6>
- 3920794092U, // <6,2,2,7>: Cost 4 vuzpr <4,6,u,2>, <6,2,5,7>
- 2686027423U, // <6,2,2,u>: Cost 3 vext3 <0,2,4,6>, <2,2,u,3>
- 2686027430U, // <6,2,3,0>: Cost 3 vext3 <0,2,4,6>, <2,3,0,1>
- 3759769262U, // <6,2,3,1>: Cost 4 vext3 <0,2,4,6>, <2,3,1,0>
- 2698487485U, // <6,2,3,2>: Cost 3 vext3 <2,3,2,6>, <2,3,2,6>
- 2705344196U, // <6,2,3,3>: Cost 3 vext3 <3,4,5,6>, <2,3,3,4>
- 2686027470U, // <6,2,3,4>: Cost 3 vext3 <0,2,4,6>, <2,3,4,5>
- 2698708696U, // <6,2,3,5>: Cost 3 vext3 <2,3,5,6>, <2,3,5,6>
- 2724660961U, // <6,2,3,6>: Cost 3 vext3 <6,6,6,6>, <2,3,6,6>
- 2729232104U, // <6,2,3,7>: Cost 3 vext3 <7,4,5,6>, <2,3,7,4>
- 2686027502U, // <6,2,3,u>: Cost 3 vext3 <0,2,4,6>, <2,3,u,1>
- 1567853468U, // <6,2,4,0>: Cost 2 vext2 <4,0,6,2>, <4,0,6,2>
- 3759769351U, // <6,2,4,1>: Cost 4 vext3 <0,2,4,6>, <2,4,1,u>
- 2699151118U, // <6,2,4,2>: Cost 3 vext3 <2,4,2,6>, <2,4,2,6>
- 2686027543U, // <6,2,4,3>: Cost 3 vext3 <0,2,4,6>, <2,4,3,6>
- 2699298592U, // <6,2,4,4>: Cost 3 vext3 <2,4,4,6>, <2,4,4,6>
- 1573162294U, // <6,2,4,5>: Cost 2 vext2 <4,u,6,2>, RHS
- 2686027564U, // <6,2,4,6>: Cost 3 vext3 <0,2,4,6>, <2,4,6,0>
- 3719982547U, // <6,2,4,7>: Cost 4 vext2 <4,7,6,2>, <4,7,6,2>
- 1573162532U, // <6,2,4,u>: Cost 2 vext2 <4,u,6,2>, <4,u,6,2>
- 3779086154U, // <6,2,5,0>: Cost 4 vext3 <3,4,5,6>, <2,5,0,3>
- 2646904528U, // <6,2,5,1>: Cost 3 vext2 <4,u,6,2>, <5,1,7,3>
- 3759769440U, // <6,2,5,2>: Cost 4 vext3 <0,2,4,6>, <2,5,2,7>
- 2699888488U, // <6,2,5,3>: Cost 3 vext3 <2,5,3,6>, <2,5,3,6>
- 2230855617U, // <6,2,5,4>: Cost 3 vrev <2,6,4,5>
- 2646904836U, // <6,2,5,5>: Cost 3 vext2 <4,u,6,2>, <5,5,5,5>
- 2646904930U, // <6,2,5,6>: Cost 3 vext2 <4,u,6,2>, <5,6,7,0>
- 2847051062U, // <6,2,5,7>: Cost 3 vuzpr <4,6,u,2>, RHS
- 2700257173U, // <6,2,5,u>: Cost 3 vext3 <2,5,u,6>, <2,5,u,6>
- 2687207321U, // <6,2,6,0>: Cost 3 vext3 <0,4,2,6>, <2,6,0,1>
- 2686027684U, // <6,2,6,1>: Cost 3 vext3 <0,2,4,6>, <2,6,1,3>
- 2566260656U, // <6,2,6,2>: Cost 3 vext1 <2,6,2,6>, <2,6,2,6>
- 2685806522U, // <6,2,6,3>: Cost 3 vext3 <0,2,1,6>, <2,6,3,7>
- 2687207361U, // <6,2,6,4>: Cost 3 vext3 <0,4,2,6>, <2,6,4,5>
- 2686027724U, // <6,2,6,5>: Cost 3 vext3 <0,2,4,6>, <2,6,5,7>
- 2646905656U, // <6,2,6,6>: Cost 3 vext2 <4,u,6,2>, <6,6,6,6>
- 2646905678U, // <6,2,6,7>: Cost 3 vext2 <4,u,6,2>, <6,7,0,1>
- 2686027751U, // <6,2,6,u>: Cost 3 vext3 <0,2,4,6>, <2,6,u,7>
- 2554323046U, // <6,2,7,0>: Cost 3 vext1 <0,6,2,7>, LHS
- 2572239606U, // <6,2,7,1>: Cost 3 vext1 <3,6,2,7>, <1,0,3,2>
- 2566268849U, // <6,2,7,2>: Cost 3 vext1 <2,6,2,7>, <2,6,2,7>
- 1906753638U, // <6,2,7,3>: Cost 2 vzipr RHS, LHS
- 2554326326U, // <6,2,7,4>: Cost 3 vext1 <0,6,2,7>, RHS
- 3304687564U, // <6,2,7,5>: Cost 4 vrev <2,6,5,7>
- 2980495708U, // <6,2,7,6>: Cost 3 vzipr RHS, <0,4,2,6>
- 2646906476U, // <6,2,7,7>: Cost 3 vext2 <4,u,6,2>, <7,7,7,7>
- 1906753643U, // <6,2,7,u>: Cost 2 vzipr RHS, LHS
- 1591744256U, // <6,2,u,0>: Cost 2 vext2 <u,0,6,2>, <u,0,6,2>
- 1573164846U, // <6,2,u,1>: Cost 2 vext2 <4,u,6,2>, LHS
- 2701805650U, // <6,2,u,2>: Cost 3 vext3 <2,u,2,6>, <2,u,2,6>
- 1906761830U, // <6,2,u,3>: Cost 2 vzipr RHS, LHS
- 2686027875U, // <6,2,u,4>: Cost 3 vext3 <0,2,4,6>, <2,u,4,5>
- 1573165210U, // <6,2,u,5>: Cost 2 vext2 <4,u,6,2>, RHS
- 2686322800U, // <6,2,u,6>: Cost 3 vext3 <0,2,u,6>, <2,u,6,0>
- 2847051305U, // <6,2,u,7>: Cost 3 vuzpr <4,6,u,2>, RHS
- 1906761835U, // <6,2,u,u>: Cost 2 vzipr RHS, LHS
- 3759769739U, // <6,3,0,0>: Cost 4 vext3 <0,2,4,6>, <3,0,0,0>
- 2686027926U, // <6,3,0,1>: Cost 3 vext3 <0,2,4,6>, <3,0,1,2>
- 2686027937U, // <6,3,0,2>: Cost 3 vext3 <0,2,4,6>, <3,0,2,4>
- 3640027286U, // <6,3,0,3>: Cost 4 vext1 <2,6,3,0>, <3,0,1,2>
- 2687207601U, // <6,3,0,4>: Cost 3 vext3 <0,4,2,6>, <3,0,4,2>
- 2705344698U, // <6,3,0,5>: Cost 3 vext3 <3,4,5,6>, <3,0,5,2>
- 3663917847U, // <6,3,0,6>: Cost 4 vext1 <6,6,3,0>, <6,6,3,0>
- 2237008560U, // <6,3,0,7>: Cost 3 vrev <3,6,7,0>
- 2686027989U, // <6,3,0,u>: Cost 3 vext3 <0,2,4,6>, <3,0,u,2>
- 3759769823U, // <6,3,1,0>: Cost 4 vext3 <0,2,4,6>, <3,1,0,3>
- 3759769830U, // <6,3,1,1>: Cost 4 vext3 <0,2,4,6>, <3,1,1,1>
- 3759769841U, // <6,3,1,2>: Cost 4 vext3 <0,2,4,6>, <3,1,2,3>
- 3759769848U, // <6,3,1,3>: Cost 4 vext3 <0,2,4,6>, <3,1,3,1>
- 2703280390U, // <6,3,1,4>: Cost 3 vext3 <3,1,4,6>, <3,1,4,6>
- 3759769868U, // <6,3,1,5>: Cost 4 vext3 <0,2,4,6>, <3,1,5,3>
- 3704063194U, // <6,3,1,6>: Cost 4 vext2 <2,1,6,3>, <1,6,3,0>
- 3767732510U, // <6,3,1,7>: Cost 4 vext3 <1,5,4,6>, <3,1,7,3>
- 2703280390U, // <6,3,1,u>: Cost 3 vext3 <3,1,4,6>, <3,1,4,6>
- 3704063468U, // <6,3,2,0>: Cost 4 vext2 <2,1,6,3>, <2,0,6,4>
- 2630321724U, // <6,3,2,1>: Cost 3 vext2 <2,1,6,3>, <2,1,6,3>
- 3759769921U, // <6,3,2,2>: Cost 4 vext3 <0,2,4,6>, <3,2,2,2>
- 3759769928U, // <6,3,2,3>: Cost 4 vext3 <0,2,4,6>, <3,2,3,0>
- 3704063767U, // <6,3,2,4>: Cost 4 vext2 <2,1,6,3>, <2,4,3,6>
- 3704063876U, // <6,3,2,5>: Cost 4 vext2 <2,1,6,3>, <2,5,6,7>
- 2636957626U, // <6,3,2,6>: Cost 3 vext2 <3,2,6,3>, <2,6,3,7>
- 3777907058U, // <6,3,2,7>: Cost 4 vext3 <3,2,7,6>, <3,2,7,6>
- 2630321724U, // <6,3,2,u>: Cost 3 vext2 <2,1,6,3>, <2,1,6,3>
- 3759769983U, // <6,3,3,0>: Cost 4 vext3 <0,2,4,6>, <3,3,0,1>
- 3710036245U, // <6,3,3,1>: Cost 4 vext2 <3,1,6,3>, <3,1,6,3>
- 2636958054U, // <6,3,3,2>: Cost 3 vext2 <3,2,6,3>, <3,2,6,3>
- 2686028188U, // <6,3,3,3>: Cost 3 vext3 <0,2,4,6>, <3,3,3,3>
- 2704607656U, // <6,3,3,4>: Cost 3 vext3 <3,3,4,6>, <3,3,4,6>
- 3773041072U, // <6,3,3,5>: Cost 4 vext3 <2,4,4,6>, <3,3,5,5>
- 3711363731U, // <6,3,3,6>: Cost 4 vext2 <3,3,6,3>, <3,6,3,7>
- 3767732676U, // <6,3,3,7>: Cost 4 vext3 <1,5,4,6>, <3,3,7,7>
- 2707999179U, // <6,3,3,u>: Cost 3 vext3 <3,u,5,6>, <3,3,u,5>
- 2584232038U, // <6,3,4,0>: Cost 3 vext1 <5,6,3,4>, LHS
- 2642267118U, // <6,3,4,1>: Cost 3 vext2 <4,1,6,3>, <4,1,6,3>
- 2642930751U, // <6,3,4,2>: Cost 3 vext2 <4,2,6,3>, <4,2,6,3>
- 2705197552U, // <6,3,4,3>: Cost 3 vext3 <3,4,3,6>, <3,4,3,6>
- 2584235318U, // <6,3,4,4>: Cost 3 vext1 <5,6,3,4>, RHS
- 1631603202U, // <6,3,4,5>: Cost 2 vext3 <3,4,5,6>, <3,4,5,6>
- 2654211444U, // <6,3,4,6>: Cost 3 vext2 <6,1,6,3>, <4,6,4,6>
- 2237041332U, // <6,3,4,7>: Cost 3 vrev <3,6,7,4>
- 1631824413U, // <6,3,4,u>: Cost 2 vext3 <3,4,u,6>, <3,4,u,6>
- 3640066150U, // <6,3,5,0>: Cost 4 vext1 <2,6,3,5>, LHS
- 3772746288U, // <6,3,5,1>: Cost 4 vext3 <2,4,0,6>, <3,5,1,7>
- 3640067790U, // <6,3,5,2>: Cost 4 vext1 <2,6,3,5>, <2,3,4,5>
- 3773041216U, // <6,3,5,3>: Cost 4 vext3 <2,4,4,6>, <3,5,3,5>
- 2705934922U, // <6,3,5,4>: Cost 3 vext3 <3,5,4,6>, <3,5,4,6>
- 3773041236U, // <6,3,5,5>: Cost 4 vext3 <2,4,4,6>, <3,5,5,7>
- 3779086940U, // <6,3,5,6>: Cost 4 vext3 <3,4,5,6>, <3,5,6,6>
- 3767732831U, // <6,3,5,7>: Cost 4 vext3 <1,5,4,6>, <3,5,7,0>
- 2706229870U, // <6,3,5,u>: Cost 3 vext3 <3,5,u,6>, <3,5,u,6>
- 2602164326U, // <6,3,6,0>: Cost 3 vext1 <u,6,3,6>, LHS
- 2654212512U, // <6,3,6,1>: Cost 3 vext2 <6,1,6,3>, <6,1,6,3>
- 2566334393U, // <6,3,6,2>: Cost 3 vext1 <2,6,3,6>, <2,6,3,6>
- 3704066588U, // <6,3,6,3>: Cost 4 vext2 <2,1,6,3>, <6,3,2,1>
- 2602167524U, // <6,3,6,4>: Cost 3 vext1 <u,6,3,6>, <4,4,6,6>
- 3710702321U, // <6,3,6,5>: Cost 4 vext2 <3,2,6,3>, <6,5,7,7>
- 2724661933U, // <6,3,6,6>: Cost 3 vext3 <6,6,6,6>, <3,6,6,6>
- 3710702465U, // <6,3,6,7>: Cost 4 vext2 <3,2,6,3>, <6,7,5,7>
- 2602170158U, // <6,3,6,u>: Cost 3 vext1 <u,6,3,6>, LHS
- 1492598886U, // <6,3,7,0>: Cost 2 vext1 <2,6,3,7>, LHS
- 2560369889U, // <6,3,7,1>: Cost 3 vext1 <1,6,3,7>, <1,6,3,7>
- 1492600762U, // <6,3,7,2>: Cost 2 vext1 <2,6,3,7>, <2,6,3,7>
- 2566342806U, // <6,3,7,3>: Cost 3 vext1 <2,6,3,7>, <3,0,1,2>
- 1492602166U, // <6,3,7,4>: Cost 2 vext1 <2,6,3,7>, RHS
- 2602176208U, // <6,3,7,5>: Cost 3 vext1 <u,6,3,7>, <5,1,7,3>
- 2566345210U, // <6,3,7,6>: Cost 3 vext1 <2,6,3,7>, <6,2,7,3>
- 2980496528U, // <6,3,7,7>: Cost 3 vzipr RHS, <1,5,3,7>
- 1492604718U, // <6,3,7,u>: Cost 2 vext1 <2,6,3,7>, LHS
- 1492607078U, // <6,3,u,0>: Cost 2 vext1 <2,6,3,u>, LHS
- 2686028574U, // <6,3,u,1>: Cost 3 vext3 <0,2,4,6>, <3,u,1,2>
- 1492608955U, // <6,3,u,2>: Cost 2 vext1 <2,6,3,u>, <2,6,3,u>
- 2566350998U, // <6,3,u,3>: Cost 3 vext1 <2,6,3,u>, <3,0,1,2>
- 1492610358U, // <6,3,u,4>: Cost 2 vext1 <2,6,3,u>, RHS
- 1634257734U, // <6,3,u,5>: Cost 2 vext3 <3,u,5,6>, <3,u,5,6>
- 2566353489U, // <6,3,u,6>: Cost 3 vext1 <2,6,3,u>, <6,3,u,0>
- 2980504720U, // <6,3,u,7>: Cost 3 vzipr RHS, <1,5,3,7>
- 1492612910U, // <6,3,u,u>: Cost 2 vext1 <2,6,3,u>, LHS
- 3703406592U, // <6,4,0,0>: Cost 4 vext2 <2,0,6,4>, <0,0,0,0>
- 2629664870U, // <6,4,0,1>: Cost 3 vext2 <2,0,6,4>, LHS
- 2629664972U, // <6,4,0,2>: Cost 3 vext2 <2,0,6,4>, <0,2,4,6>
- 3779087232U, // <6,4,0,3>: Cost 4 vext3 <3,4,5,6>, <4,0,3,1>
- 2642936156U, // <6,4,0,4>: Cost 3 vext2 <4,2,6,4>, <0,4,2,6>
- 2712570770U, // <6,4,0,5>: Cost 3 vext3 <4,6,4,6>, <4,0,5,1>
- 2687208348U, // <6,4,0,6>: Cost 3 vext3 <0,4,2,6>, <4,0,6,2>
- 3316723081U, // <6,4,0,7>: Cost 4 vrev <4,6,7,0>
- 2629665437U, // <6,4,0,u>: Cost 3 vext2 <2,0,6,4>, LHS
- 2242473291U, // <6,4,1,0>: Cost 3 vrev <4,6,0,1>
- 3700089652U, // <6,4,1,1>: Cost 4 vext2 <1,4,6,4>, <1,1,1,1>
- 3703407510U, // <6,4,1,2>: Cost 4 vext2 <2,0,6,4>, <1,2,3,0>
- 2852962406U, // <6,4,1,3>: Cost 3 vuzpr <5,6,7,4>, LHS
- 3628166454U, // <6,4,1,4>: Cost 4 vext1 <0,6,4,1>, RHS
- 3760876514U, // <6,4,1,5>: Cost 4 vext3 <0,4,1,6>, <4,1,5,0>
- 2687208430U, // <6,4,1,6>: Cost 3 vext3 <0,4,2,6>, <4,1,6,3>
- 3316731274U, // <6,4,1,7>: Cost 4 vrev <4,6,7,1>
- 2243063187U, // <6,4,1,u>: Cost 3 vrev <4,6,u,1>
- 2629666284U, // <6,4,2,0>: Cost 3 vext2 <2,0,6,4>, <2,0,6,4>
- 3703408188U, // <6,4,2,1>: Cost 4 vext2 <2,0,6,4>, <2,1,6,3>
- 3703408232U, // <6,4,2,2>: Cost 4 vext2 <2,0,6,4>, <2,2,2,2>
- 3703408294U, // <6,4,2,3>: Cost 4 vext2 <2,0,6,4>, <2,3,0,1>
- 2632320816U, // <6,4,2,4>: Cost 3 vext2 <2,4,6,4>, <2,4,6,4>
- 2923384118U, // <6,4,2,5>: Cost 3 vzipl <6,2,7,3>, RHS
- 2687208508U, // <6,4,2,6>: Cost 3 vext3 <0,4,2,6>, <4,2,6,0>
- 3760950341U, // <6,4,2,7>: Cost 4 vext3 <0,4,2,6>, <4,2,7,0>
- 2634975348U, // <6,4,2,u>: Cost 3 vext2 <2,u,6,4>, <2,u,6,4>
- 3703408790U, // <6,4,3,0>: Cost 4 vext2 <2,0,6,4>, <3,0,1,2>
- 3316305238U, // <6,4,3,1>: Cost 4 vrev <4,6,1,3>
- 3703408947U, // <6,4,3,2>: Cost 4 vext2 <2,0,6,4>, <3,2,0,6>
- 3703409052U, // <6,4,3,3>: Cost 4 vext2 <2,0,6,4>, <3,3,3,3>
- 2644929026U, // <6,4,3,4>: Cost 3 vext2 <4,5,6,4>, <3,4,5,6>
- 3718670922U, // <6,4,3,5>: Cost 4 vext2 <4,5,6,4>, <3,5,4,6>
- 2705345682U, // <6,4,3,6>: Cost 3 vext3 <3,4,5,6>, <4,3,6,5>
- 3926705152U, // <6,4,3,7>: Cost 4 vuzpr <5,6,7,4>, <1,3,5,7>
- 2668817222U, // <6,4,3,u>: Cost 3 vext2 <u,5,6,4>, <3,u,5,6>
- 2590277734U, // <6,4,4,0>: Cost 3 vext1 <6,6,4,4>, LHS
- 3716017135U, // <6,4,4,1>: Cost 4 vext2 <4,1,6,4>, <4,1,6,4>
- 2642938944U, // <6,4,4,2>: Cost 3 vext2 <4,2,6,4>, <4,2,6,4>
- 3717344401U, // <6,4,4,3>: Cost 4 vext2 <4,3,6,4>, <4,3,6,4>
- 2712571088U, // <6,4,4,4>: Cost 3 vext3 <4,6,4,6>, <4,4,4,4>
- 2629668150U, // <6,4,4,5>: Cost 3 vext2 <2,0,6,4>, RHS
- 1637649636U, // <6,4,4,6>: Cost 2 vext3 <4,4,6,6>, <4,4,6,6>
- 2646257109U, // <6,4,4,7>: Cost 3 vext2 <4,7,6,4>, <4,7,6,4>
- 1637649636U, // <6,4,4,u>: Cost 2 vext3 <4,4,6,6>, <4,4,6,6>
- 2566398054U, // <6,4,5,0>: Cost 3 vext1 <2,6,4,5>, LHS
- 3760876805U, // <6,4,5,1>: Cost 4 vext3 <0,4,1,6>, <4,5,1,3>
- 2566399937U, // <6,4,5,2>: Cost 3 vext1 <2,6,4,5>, <2,6,4,5>
- 2584316418U, // <6,4,5,3>: Cost 3 vext1 <5,6,4,5>, <3,4,5,6>
- 2566401334U, // <6,4,5,4>: Cost 3 vext1 <2,6,4,5>, RHS
- 2584318028U, // <6,4,5,5>: Cost 3 vext1 <5,6,4,5>, <5,6,4,5>
- 1612287286U, // <6,4,5,6>: Cost 2 vext3 <0,2,4,6>, RHS
- 2852965686U, // <6,4,5,7>: Cost 3 vuzpr <5,6,7,4>, RHS
- 1612287304U, // <6,4,5,u>: Cost 2 vext3 <0,2,4,6>, RHS
- 1504608358U, // <6,4,6,0>: Cost 2 vext1 <4,6,4,6>, LHS
- 2578350838U, // <6,4,6,1>: Cost 3 vext1 <4,6,4,6>, <1,0,3,2>
- 2578351720U, // <6,4,6,2>: Cost 3 vext1 <4,6,4,6>, <2,2,2,2>
- 2578352278U, // <6,4,6,3>: Cost 3 vext1 <4,6,4,6>, <3,0,1,2>
- 1504611638U, // <6,4,6,4>: Cost 2 vext1 <4,6,4,6>, RHS
- 2578353872U, // <6,4,6,5>: Cost 3 vext1 <4,6,4,6>, <5,1,7,3>
- 2578354682U, // <6,4,6,6>: Cost 3 vext1 <4,6,4,6>, <6,2,7,3>
- 2578355194U, // <6,4,6,7>: Cost 3 vext1 <4,6,4,6>, <7,0,1,2>
- 1504614190U, // <6,4,6,u>: Cost 2 vext1 <4,6,4,6>, LHS
- 2572386406U, // <6,4,7,0>: Cost 3 vext1 <3,6,4,7>, LHS
- 2572387226U, // <6,4,7,1>: Cost 3 vext1 <3,6,4,7>, <1,2,3,4>
- 3640157902U, // <6,4,7,2>: Cost 4 vext1 <2,6,4,7>, <2,3,4,5>
- 2572389020U, // <6,4,7,3>: Cost 3 vext1 <3,6,4,7>, <3,6,4,7>
- 2572389686U, // <6,4,7,4>: Cost 3 vext1 <3,6,4,7>, RHS
- 2980497102U, // <6,4,7,5>: Cost 3 vzipr RHS, <2,3,4,5>
- 2980495564U, // <6,4,7,6>: Cost 3 vzipr RHS, <0,2,4,6>
- 4054239090U, // <6,4,7,7>: Cost 4 vzipr RHS, <2,5,4,7>
- 2572392238U, // <6,4,7,u>: Cost 3 vext1 <3,6,4,7>, LHS
- 1504608358U, // <6,4,u,0>: Cost 2 vext1 <4,6,4,6>, LHS
- 2629670702U, // <6,4,u,1>: Cost 3 vext2 <2,0,6,4>, LHS
- 2566424516U, // <6,4,u,2>: Cost 3 vext1 <2,6,4,u>, <2,6,4,u>
- 2584340994U, // <6,4,u,3>: Cost 3 vext1 <5,6,4,u>, <3,4,5,6>
- 1640156694U, // <6,4,u,4>: Cost 2 vext3 <4,u,4,6>, <4,u,4,6>
- 2629671066U, // <6,4,u,5>: Cost 3 vext2 <2,0,6,4>, RHS
- 1612287529U, // <6,4,u,6>: Cost 2 vext3 <0,2,4,6>, RHS
- 2852965929U, // <6,4,u,7>: Cost 3 vuzpr <5,6,7,4>, RHS
- 1612287547U, // <6,4,u,u>: Cost 2 vext3 <0,2,4,6>, RHS
- 3708723200U, // <6,5,0,0>: Cost 4 vext2 <2,u,6,5>, <0,0,0,0>
- 2634981478U, // <6,5,0,1>: Cost 3 vext2 <2,u,6,5>, LHS
- 3694125260U, // <6,5,0,2>: Cost 4 vext2 <0,4,6,5>, <0,2,4,6>
- 3779087962U, // <6,5,0,3>: Cost 4 vext3 <3,4,5,6>, <5,0,3,2>
- 3760877154U, // <6,5,0,4>: Cost 4 vext3 <0,4,1,6>, <5,0,4,1>
- 4195110916U, // <6,5,0,5>: Cost 4 vtrnr <5,6,7,0>, <5,5,5,5>
- 3696779775U, // <6,5,0,6>: Cost 4 vext2 <0,u,6,5>, <0,6,2,7>
- 1175212130U, // <6,5,0,7>: Cost 2 vrev <5,6,7,0>
- 1175285867U, // <6,5,0,u>: Cost 2 vrev <5,6,u,0>
- 2248445988U, // <6,5,1,0>: Cost 3 vrev <5,6,0,1>
- 3698107237U, // <6,5,1,1>: Cost 4 vext2 <1,1,6,5>, <1,1,6,5>
- 3708724118U, // <6,5,1,2>: Cost 4 vext2 <2,u,6,5>, <1,2,3,0>
- 3908575334U, // <6,5,1,3>: Cost 4 vuzpr <2,6,4,5>, LHS
- 3716023376U, // <6,5,1,4>: Cost 4 vext2 <4,1,6,5>, <1,4,5,6>
- 3708724368U, // <6,5,1,5>: Cost 4 vext2 <2,u,6,5>, <1,5,3,7>
- 3767733960U, // <6,5,1,6>: Cost 4 vext3 <1,5,4,6>, <5,1,6,4>
- 2712571600U, // <6,5,1,7>: Cost 3 vext3 <4,6,4,6>, <5,1,7,3>
- 2712571609U, // <6,5,1,u>: Cost 3 vext3 <4,6,4,6>, <5,1,u,3>
- 2578391142U, // <6,5,2,0>: Cost 3 vext1 <4,6,5,2>, LHS
- 3704079934U, // <6,5,2,1>: Cost 4 vext2 <2,1,6,5>, <2,1,6,5>
- 3708724840U, // <6,5,2,2>: Cost 4 vext2 <2,u,6,5>, <2,2,2,2>
- 3705407182U, // <6,5,2,3>: Cost 4 vext2 <2,3,6,5>, <2,3,4,5>
- 2578394422U, // <6,5,2,4>: Cost 3 vext1 <4,6,5,2>, RHS
- 3717351272U, // <6,5,2,5>: Cost 4 vext2 <4,3,6,5>, <2,5,3,6>
- 2634983354U, // <6,5,2,6>: Cost 3 vext2 <2,u,6,5>, <2,6,3,7>
- 3115486518U, // <6,5,2,7>: Cost 3 vtrnr <4,6,u,2>, RHS
- 2634983541U, // <6,5,2,u>: Cost 3 vext2 <2,u,6,5>, <2,u,6,5>
- 3708725398U, // <6,5,3,0>: Cost 4 vext2 <2,u,6,5>, <3,0,1,2>
- 3710052631U, // <6,5,3,1>: Cost 4 vext2 <3,1,6,5>, <3,1,6,5>
- 3708725606U, // <6,5,3,2>: Cost 4 vext2 <2,u,6,5>, <3,2,6,3>
- 3708725660U, // <6,5,3,3>: Cost 4 vext2 <2,u,6,5>, <3,3,3,3>
- 2643610114U, // <6,5,3,4>: Cost 3 vext2 <4,3,6,5>, <3,4,5,6>
- 3717352010U, // <6,5,3,5>: Cost 4 vext2 <4,3,6,5>, <3,5,4,6>
- 3773632358U, // <6,5,3,6>: Cost 4 vext3 <2,5,3,6>, <5,3,6,0>
- 2248978533U, // <6,5,3,7>: Cost 3 vrev <5,6,7,3>
- 2249052270U, // <6,5,3,u>: Cost 3 vrev <5,6,u,3>
- 2596323430U, // <6,5,4,0>: Cost 3 vext1 <7,6,5,4>, LHS
- 3716025328U, // <6,5,4,1>: Cost 4 vext2 <4,1,6,5>, <4,1,6,5>
- 3716688961U, // <6,5,4,2>: Cost 4 vext2 <4,2,6,5>, <4,2,6,5>
- 2643610770U, // <6,5,4,3>: Cost 3 vext2 <4,3,6,5>, <4,3,6,5>
- 2596326710U, // <6,5,4,4>: Cost 3 vext1 <7,6,5,4>, RHS
- 2634984758U, // <6,5,4,5>: Cost 3 vext2 <2,u,6,5>, RHS
- 3767734199U, // <6,5,4,6>: Cost 4 vext3 <1,5,4,6>, <5,4,6,0>
- 1643696070U, // <6,5,4,7>: Cost 2 vext3 <5,4,7,6>, <5,4,7,6>
- 1643769807U, // <6,5,4,u>: Cost 2 vext3 <5,4,u,6>, <5,4,u,6>
- 2578415718U, // <6,5,5,0>: Cost 3 vext1 <4,6,5,5>, LHS
- 3652158198U, // <6,5,5,1>: Cost 4 vext1 <4,6,5,5>, <1,0,3,2>
- 3652159080U, // <6,5,5,2>: Cost 4 vext1 <4,6,5,5>, <2,2,2,2>
- 3652159638U, // <6,5,5,3>: Cost 4 vext1 <4,6,5,5>, <3,0,1,2>
- 2578418998U, // <6,5,5,4>: Cost 3 vext1 <4,6,5,5>, RHS
- 2712571908U, // <6,5,5,5>: Cost 3 vext3 <4,6,4,6>, <5,5,5,5>
- 2718027790U, // <6,5,5,6>: Cost 3 vext3 <5,5,6,6>, <5,5,6,6>
- 2712571928U, // <6,5,5,7>: Cost 3 vext3 <4,6,4,6>, <5,5,7,7>
- 2712571937U, // <6,5,5,u>: Cost 3 vext3 <4,6,4,6>, <5,5,u,7>
- 2705346596U, // <6,5,6,0>: Cost 3 vext3 <3,4,5,6>, <5,6,0,1>
- 3767144496U, // <6,5,6,1>: Cost 4 vext3 <1,4,5,6>, <5,6,1,4>
- 3773116473U, // <6,5,6,2>: Cost 4 vext3 <2,4,5,6>, <5,6,2,4>
- 2705346626U, // <6,5,6,3>: Cost 3 vext3 <3,4,5,6>, <5,6,3,4>
- 2705346636U, // <6,5,6,4>: Cost 3 vext3 <3,4,5,6>, <5,6,4,5>
- 3908577217U, // <6,5,6,5>: Cost 4 vuzpr <2,6,4,5>, <2,6,4,5>
- 2578428728U, // <6,5,6,6>: Cost 3 vext1 <4,6,5,6>, <6,6,6,6>
- 2712572002U, // <6,5,6,7>: Cost 3 vext3 <4,6,4,6>, <5,6,7,0>
- 2705346668U, // <6,5,6,u>: Cost 3 vext3 <3,4,5,6>, <5,6,u,1>
- 2560516198U, // <6,5,7,0>: Cost 3 vext1 <1,6,5,7>, LHS
- 2560517363U, // <6,5,7,1>: Cost 3 vext1 <1,6,5,7>, <1,6,5,7>
- 2566490060U, // <6,5,7,2>: Cost 3 vext1 <2,6,5,7>, <2,6,5,7>
- 3634260118U, // <6,5,7,3>: Cost 4 vext1 <1,6,5,7>, <3,0,1,2>
- 2560519478U, // <6,5,7,4>: Cost 3 vext1 <1,6,5,7>, RHS
- 2980498650U, // <6,5,7,5>: Cost 3 vzipr RHS, <4,4,5,5>
- 2980497922U, // <6,5,7,6>: Cost 3 vzipr RHS, <3,4,5,6>
- 3103214902U, // <6,5,7,7>: Cost 3 vtrnr <2,6,3,7>, RHS
- 2560522030U, // <6,5,7,u>: Cost 3 vext1 <1,6,5,7>, LHS
- 2560524390U, // <6,5,u,0>: Cost 3 vext1 <1,6,5,u>, LHS
- 2560525556U, // <6,5,u,1>: Cost 3 vext1 <1,6,5,u>, <1,6,5,u>
- 2566498253U, // <6,5,u,2>: Cost 3 vext1 <2,6,5,u>, <2,6,5,u>
- 2646931439U, // <6,5,u,3>: Cost 3 vext2 <4,u,6,5>, <u,3,5,7>
- 2560527670U, // <6,5,u,4>: Cost 3 vext1 <1,6,5,u>, RHS
- 2634987674U, // <6,5,u,5>: Cost 3 vext2 <2,u,6,5>, RHS
- 2980506114U, // <6,5,u,6>: Cost 3 vzipr RHS, <3,4,5,6>
- 1175277674U, // <6,5,u,7>: Cost 2 vrev <5,6,7,u>
- 1175351411U, // <6,5,u,u>: Cost 2 vrev <5,6,u,u>
- 2578448486U, // <6,6,0,0>: Cost 3 vext1 <4,6,6,0>, LHS
- 1573191782U, // <6,6,0,1>: Cost 2 vext2 <4,u,6,6>, LHS
- 2686030124U, // <6,6,0,2>: Cost 3 vext3 <0,2,4,6>, <6,0,2,4>
- 3779088690U, // <6,6,0,3>: Cost 4 vext3 <3,4,5,6>, <6,0,3,1>
- 2687209788U, // <6,6,0,4>: Cost 3 vext3 <0,4,2,6>, <6,0,4,2>
- 3652194000U, // <6,6,0,5>: Cost 4 vext1 <4,6,6,0>, <5,1,7,3>
- 2254852914U, // <6,6,0,6>: Cost 3 vrev <6,6,6,0>
- 4041575734U, // <6,6,0,7>: Cost 4 vzipr <2,4,6,0>, RHS
- 1573192349U, // <6,6,0,u>: Cost 2 vext2 <4,u,6,6>, LHS
- 2646934262U, // <6,6,1,0>: Cost 3 vext2 <4,u,6,6>, <1,0,3,2>
- 2646934324U, // <6,6,1,1>: Cost 3 vext2 <4,u,6,6>, <1,1,1,1>
- 2646934422U, // <6,6,1,2>: Cost 3 vext2 <4,u,6,6>, <1,2,3,0>
- 2846785638U, // <6,6,1,3>: Cost 3 vuzpr <4,6,4,6>, LHS
- 3760951694U, // <6,6,1,4>: Cost 4 vext3 <0,4,2,6>, <6,1,4,3>
- 2646934672U, // <6,6,1,5>: Cost 3 vext2 <4,u,6,6>, <1,5,3,7>
- 2712572320U, // <6,6,1,6>: Cost 3 vext3 <4,6,4,6>, <6,1,6,3>
- 3775549865U, // <6,6,1,7>: Cost 4 vext3 <2,u,2,6>, <6,1,7,3>
- 2846785643U, // <6,6,1,u>: Cost 3 vuzpr <4,6,4,6>, LHS
- 3759772094U, // <6,6,2,0>: Cost 4 vext3 <0,2,4,6>, <6,2,0,6>
- 3704751676U, // <6,6,2,1>: Cost 4 vext2 <2,2,6,6>, <2,1,6,3>
- 2631009936U, // <6,6,2,2>: Cost 3 vext2 <2,2,6,6>, <2,2,6,6>
- 2646935206U, // <6,6,2,3>: Cost 3 vext2 <4,u,6,6>, <2,3,0,1>
- 3759772127U, // <6,6,2,4>: Cost 4 vext3 <0,2,4,6>, <6,2,4,3>
- 3704752004U, // <6,6,2,5>: Cost 4 vext2 <2,2,6,6>, <2,5,6,7>
- 2646935482U, // <6,6,2,6>: Cost 3 vext2 <4,u,6,6>, <2,6,3,7>
- 2712572410U, // <6,6,2,7>: Cost 3 vext3 <4,6,4,6>, <6,2,7,3>
- 2712572419U, // <6,6,2,u>: Cost 3 vext3 <4,6,4,6>, <6,2,u,3>
- 2646935702U, // <6,6,3,0>: Cost 3 vext2 <4,u,6,6>, <3,0,1,2>
- 3777024534U, // <6,6,3,1>: Cost 4 vext3 <3,1,4,6>, <6,3,1,4>
- 3704752453U, // <6,6,3,2>: Cost 4 vext2 <2,2,6,6>, <3,2,2,6>
- 2646935964U, // <6,6,3,3>: Cost 3 vext2 <4,u,6,6>, <3,3,3,3>
- 2705347122U, // <6,6,3,4>: Cost 3 vext3 <3,4,5,6>, <6,3,4,5>
- 3779678778U, // <6,6,3,5>: Cost 4 vext3 <3,5,4,6>, <6,3,5,4>
- 2657553069U, // <6,6,3,6>: Cost 3 vext2 <6,6,6,6>, <3,6,6,6>
- 4039609654U, // <6,6,3,7>: Cost 4 vzipr <2,1,6,3>, RHS
- 2708001366U, // <6,6,3,u>: Cost 3 vext3 <3,u,5,6>, <6,3,u,5>
- 2578481254U, // <6,6,4,0>: Cost 3 vext1 <4,6,6,4>, LHS
- 3652223734U, // <6,6,4,1>: Cost 4 vext1 <4,6,6,4>, <1,0,3,2>
- 3760951922U, // <6,6,4,2>: Cost 4 vext3 <0,4,2,6>, <6,4,2,6>
- 3779089019U, // <6,6,4,3>: Cost 4 vext3 <3,4,5,6>, <6,4,3,6>
- 1570540772U, // <6,6,4,4>: Cost 2 vext2 <4,4,6,6>, <4,4,6,6>
- 1573195062U, // <6,6,4,5>: Cost 2 vext2 <4,u,6,6>, RHS
- 2712572560U, // <6,6,4,6>: Cost 3 vext3 <4,6,4,6>, <6,4,6,0>
- 2723410591U, // <6,6,4,7>: Cost 3 vext3 <6,4,7,6>, <6,4,7,6>
- 1573195304U, // <6,6,4,u>: Cost 2 vext2 <4,u,6,6>, <4,u,6,6>
- 3640287334U, // <6,6,5,0>: Cost 4 vext1 <2,6,6,5>, LHS
- 2646937296U, // <6,6,5,1>: Cost 3 vext2 <4,u,6,6>, <5,1,7,3>
- 3640289235U, // <6,6,5,2>: Cost 4 vext1 <2,6,6,5>, <2,6,6,5>
- 3720679279U, // <6,6,5,3>: Cost 4 vext2 <4,u,6,6>, <5,3,7,0>
- 2646937542U, // <6,6,5,4>: Cost 3 vext2 <4,u,6,6>, <5,4,7,6>
- 2646937604U, // <6,6,5,5>: Cost 3 vext2 <4,u,6,6>, <5,5,5,5>
- 2646937698U, // <6,6,5,6>: Cost 3 vext2 <4,u,6,6>, <5,6,7,0>
- 2846788918U, // <6,6,5,7>: Cost 3 vuzpr <4,6,4,6>, RHS
- 2846788919U, // <6,6,5,u>: Cost 3 vuzpr <4,6,4,6>, RHS
- 1516699750U, // <6,6,6,0>: Cost 2 vext1 <6,6,6,6>, LHS
- 2590442230U, // <6,6,6,1>: Cost 3 vext1 <6,6,6,6>, <1,0,3,2>
- 2646938106U, // <6,6,6,2>: Cost 3 vext2 <4,u,6,6>, <6,2,7,3>
- 2590443670U, // <6,6,6,3>: Cost 3 vext1 <6,6,6,6>, <3,0,1,2>
- 1516703030U, // <6,6,6,4>: Cost 2 vext1 <6,6,6,6>, RHS
- 2590445264U, // <6,6,6,5>: Cost 3 vext1 <6,6,6,6>, <5,1,7,3>
- 296144182U, // <6,6,6,6>: Cost 1 vdup2 RHS
- 2712572738U, // <6,6,6,7>: Cost 3 vext3 <4,6,4,6>, <6,6,7,7>
- 296144182U, // <6,6,6,u>: Cost 1 vdup2 RHS
- 2566561894U, // <6,6,7,0>: Cost 3 vext1 <2,6,6,7>, LHS
- 3634332924U, // <6,6,7,1>: Cost 4 vext1 <1,6,6,7>, <1,6,6,7>
- 2566563797U, // <6,6,7,2>: Cost 3 vext1 <2,6,6,7>, <2,6,6,7>
- 2584480258U, // <6,6,7,3>: Cost 3 vext1 <5,6,6,7>, <3,4,5,6>
- 2566565174U, // <6,6,7,4>: Cost 3 vext1 <2,6,6,7>, RHS
- 2717438846U, // <6,6,7,5>: Cost 3 vext3 <5,4,7,6>, <6,7,5,4>
- 2980500280U, // <6,6,7,6>: Cost 3 vzipr RHS, <6,6,6,6>
- 1906756918U, // <6,6,7,7>: Cost 2 vzipr RHS, RHS
- 1906756919U, // <6,6,7,u>: Cost 2 vzipr RHS, RHS
- 1516699750U, // <6,6,u,0>: Cost 2 vext1 <6,6,6,6>, LHS
- 1573197614U, // <6,6,u,1>: Cost 2 vext2 <4,u,6,6>, LHS
- 2566571990U, // <6,6,u,2>: Cost 3 vext1 <2,6,6,u>, <2,6,6,u>
- 2846786205U, // <6,6,u,3>: Cost 3 vuzpr <4,6,4,6>, LHS
- 1516703030U, // <6,6,u,4>: Cost 2 vext1 <6,6,6,6>, RHS
- 1573197978U, // <6,6,u,5>: Cost 2 vext2 <4,u,6,6>, RHS
- 296144182U, // <6,6,u,6>: Cost 1 vdup2 RHS
- 1906765110U, // <6,6,u,7>: Cost 2 vzipr RHS, RHS
- 296144182U, // <6,6,u,u>: Cost 1 vdup2 RHS
- 1571209216U, // <6,7,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
- 497467494U, // <6,7,0,1>: Cost 1 vext2 RHS, LHS
- 1571209380U, // <6,7,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
- 2644951292U, // <6,7,0,3>: Cost 3 vext2 RHS, <0,3,1,0>
- 1571209554U, // <6,7,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
- 1510756450U, // <6,7,0,5>: Cost 2 vext1 <5,6,7,0>, <5,6,7,0>
- 2644951542U, // <6,7,0,6>: Cost 3 vext2 RHS, <0,6,1,7>
- 2584499194U, // <6,7,0,7>: Cost 3 vext1 <5,6,7,0>, <7,0,1,2>
- 497468061U, // <6,7,0,u>: Cost 1 vext2 RHS, LHS
- 1571209974U, // <6,7,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
- 1571210036U, // <6,7,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
- 1571210134U, // <6,7,1,2>: Cost 2 vext2 RHS, <1,2,3,0>
- 1571210200U, // <6,7,1,3>: Cost 2 vext2 RHS, <1,3,1,3>
- 2644952098U, // <6,7,1,4>: Cost 3 vext2 RHS, <1,4,0,5>
- 1571210384U, // <6,7,1,5>: Cost 2 vext2 RHS, <1,5,3,7>
- 2644952271U, // <6,7,1,6>: Cost 3 vext2 RHS, <1,6,1,7>
- 2578535418U, // <6,7,1,7>: Cost 3 vext1 <4,6,7,1>, <7,0,1,2>
- 1571210605U, // <6,7,1,u>: Cost 2 vext2 RHS, <1,u,1,3>
- 2644952509U, // <6,7,2,0>: Cost 3 vext2 RHS, <2,0,1,2>
- 2644952582U, // <6,7,2,1>: Cost 3 vext2 RHS, <2,1,0,3>
- 1571210856U, // <6,7,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
- 1571210918U, // <6,7,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
- 2644952828U, // <6,7,2,4>: Cost 3 vext2 RHS, <2,4,0,6>
- 2633009028U, // <6,7,2,5>: Cost 3 vext2 <2,5,6,7>, <2,5,6,7>
- 1571211194U, // <6,7,2,6>: Cost 2 vext2 RHS, <2,6,3,7>
- 2668840938U, // <6,7,2,7>: Cost 3 vext2 RHS, <2,7,0,1>
- 1571211323U, // <6,7,2,u>: Cost 2 vext2 RHS, <2,u,0,1>
- 1571211414U, // <6,7,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
- 2644953311U, // <6,7,3,1>: Cost 3 vext2 RHS, <3,1,0,3>
- 2644953390U, // <6,7,3,2>: Cost 3 vext2 RHS, <3,2,0,1>
- 1571211676U, // <6,7,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
- 1571211778U, // <6,7,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
- 2644953648U, // <6,7,3,5>: Cost 3 vext2 RHS, <3,5,1,7>
- 2644953720U, // <6,7,3,6>: Cost 3 vext2 RHS, <3,6,0,7>
- 2644953795U, // <6,7,3,7>: Cost 3 vext2 RHS, <3,7,0,1>
- 1571212062U, // <6,7,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
- 1573202834U, // <6,7,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
- 2644954058U, // <6,7,4,1>: Cost 3 vext2 RHS, <4,1,2,3>
- 2644954166U, // <6,7,4,2>: Cost 3 vext2 RHS, <4,2,5,3>
- 2644954258U, // <6,7,4,3>: Cost 3 vext2 RHS, <4,3,6,5>
- 1571212496U, // <6,7,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
- 497470774U, // <6,7,4,5>: Cost 1 vext2 RHS, RHS
- 1573203316U, // <6,7,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
- 2646281688U, // <6,7,4,7>: Cost 3 vext2 <4,7,6,7>, <4,7,6,7>
- 497471017U, // <6,7,4,u>: Cost 1 vext2 RHS, RHS
- 2644954696U, // <6,7,5,0>: Cost 3 vext2 RHS, <5,0,1,2>
- 1573203664U, // <6,7,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
- 2644954878U, // <6,7,5,2>: Cost 3 vext2 RHS, <5,2,3,4>
- 2644954991U, // <6,7,5,3>: Cost 3 vext2 RHS, <5,3,7,0>
- 1571213254U, // <6,7,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
- 1571213316U, // <6,7,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
- 1571213410U, // <6,7,5,6>: Cost 2 vext2 RHS, <5,6,7,0>
- 1573204136U, // <6,7,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
- 1573204217U, // <6,7,5,u>: Cost 2 vext2 RHS, <5,u,5,7>
- 2644955425U, // <6,7,6,0>: Cost 3 vext2 RHS, <6,0,1,2>
- 2644955561U, // <6,7,6,1>: Cost 3 vext2 RHS, <6,1,7,3>
- 1573204474U, // <6,7,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
- 2644955698U, // <6,7,6,3>: Cost 3 vext2 RHS, <6,3,4,5>
- 2644955789U, // <6,7,6,4>: Cost 3 vext2 RHS, <6,4,5,6>
- 2644955889U, // <6,7,6,5>: Cost 3 vext2 RHS, <6,5,7,7>
- 1571214136U, // <6,7,6,6>: Cost 2 vext2 RHS, <6,6,6,6>
- 1571214158U, // <6,7,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
- 1573204895U, // <6,7,6,u>: Cost 2 vext2 RHS, <6,u,0,1>
- 1573204986U, // <6,7,7,0>: Cost 2 vext2 RHS, <7,0,1,2>
- 2572608656U, // <6,7,7,1>: Cost 3 vext1 <3,6,7,7>, <1,5,3,7>
- 2644956362U, // <6,7,7,2>: Cost 3 vext2 RHS, <7,2,6,3>
- 2572610231U, // <6,7,7,3>: Cost 3 vext1 <3,6,7,7>, <3,6,7,7>
- 1573205350U, // <6,7,7,4>: Cost 2 vext2 RHS, <7,4,5,6>
- 2646947220U, // <6,7,7,5>: Cost 3 vext2 RHS, <7,5,1,7>
- 1516786498U, // <6,7,7,6>: Cost 2 vext1 <6,6,7,7>, <6,6,7,7>
- 1571214956U, // <6,7,7,7>: Cost 2 vext2 RHS, <7,7,7,7>
- 1573205634U, // <6,7,7,u>: Cost 2 vext2 RHS, <7,u,1,2>
- 1571215059U, // <6,7,u,0>: Cost 2 vext2 RHS, <u,0,1,2>
- 497473326U, // <6,7,u,1>: Cost 1 vext2 RHS, LHS
- 1571215237U, // <6,7,u,2>: Cost 2 vext2 RHS, <u,2,3,0>
- 1571215292U, // <6,7,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
- 1571215423U, // <6,7,u,4>: Cost 2 vext2 RHS, <u,4,5,6>
- 497473690U, // <6,7,u,5>: Cost 1 vext2 RHS, RHS
- 1571215568U, // <6,7,u,6>: Cost 2 vext2 RHS, <u,6,3,7>
- 1573206272U, // <6,7,u,7>: Cost 2 vext2 RHS, <u,7,0,1>
- 497473893U, // <6,7,u,u>: Cost 1 vext2 RHS, LHS
- 1571217408U, // <6,u,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
- 497475686U, // <6,u,0,1>: Cost 1 vext2 RHS, LHS
- 1571217572U, // <6,u,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
- 2689865445U, // <6,u,0,3>: Cost 3 vext3 <0,u,2,6>, <u,0,3,2>
- 1571217746U, // <6,u,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
- 1510830187U, // <6,u,0,5>: Cost 2 vext1 <5,6,u,0>, <5,6,u,0>
- 2644959734U, // <6,u,0,6>: Cost 3 vext2 RHS, <0,6,1,7>
- 1193130221U, // <6,u,0,7>: Cost 2 vrev <u,6,7,0>
- 497476253U, // <6,u,0,u>: Cost 1 vext2 RHS, LHS
- 1571218166U, // <6,u,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
- 1571218228U, // <6,u,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
- 1612289838U, // <6,u,1,2>: Cost 2 vext3 <0,2,4,6>, LHS
- 1571218392U, // <6,u,1,3>: Cost 2 vext2 RHS, <1,3,1,3>
- 2566663478U, // <6,u,1,4>: Cost 3 vext1 <2,6,u,1>, RHS
- 1571218576U, // <6,u,1,5>: Cost 2 vext2 RHS, <1,5,3,7>
- 2644960463U, // <6,u,1,6>: Cost 3 vext2 RHS, <1,6,1,7>
- 2717439835U, // <6,u,1,7>: Cost 3 vext3 <5,4,7,6>, <u,1,7,3>
- 1612289892U, // <6,u,1,u>: Cost 2 vext3 <0,2,4,6>, LHS
- 1504870502U, // <6,u,2,0>: Cost 2 vext1 <4,6,u,2>, LHS
- 2644960774U, // <6,u,2,1>: Cost 3 vext2 RHS, <2,1,0,3>
- 1571219048U, // <6,u,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
- 1571219110U, // <6,u,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
- 1504873782U, // <6,u,2,4>: Cost 2 vext1 <4,6,u,2>, RHS
- 2633017221U, // <6,u,2,5>: Cost 3 vext2 <2,5,6,u>, <2,5,6,u>
- 1571219386U, // <6,u,2,6>: Cost 2 vext2 RHS, <2,6,3,7>
- 2712573868U, // <6,u,2,7>: Cost 3 vext3 <4,6,4,6>, <u,2,7,3>
- 1571219515U, // <6,u,2,u>: Cost 2 vext2 RHS, <2,u,0,1>
- 1571219606U, // <6,u,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
- 2644961503U, // <6,u,3,1>: Cost 3 vext2 RHS, <3,1,0,3>
- 2566678499U, // <6,u,3,2>: Cost 3 vext1 <2,6,u,3>, <2,6,u,3>
- 1571219868U, // <6,u,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
- 1571219970U, // <6,u,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
- 2689865711U, // <6,u,3,5>: Cost 3 vext3 <0,u,2,6>, <u,3,5,7>
- 2708002806U, // <6,u,3,6>: Cost 3 vext3 <3,u,5,6>, <u,3,6,5>
- 2644961987U, // <6,u,3,7>: Cost 3 vext2 RHS, <3,7,0,1>
- 1571220254U, // <6,u,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
- 1571220370U, // <6,u,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
- 2644962250U, // <6,u,4,1>: Cost 3 vext2 RHS, <4,1,2,3>
- 1661245476U, // <6,u,4,2>: Cost 2 vext3 <u,4,2,6>, <u,4,2,6>
- 2686031917U, // <6,u,4,3>: Cost 3 vext3 <0,2,4,6>, <u,4,3,6>
- 1571220688U, // <6,u,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
- 497478967U, // <6,u,4,5>: Cost 1 vext2 RHS, RHS
- 1571220852U, // <6,u,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
- 1661614161U, // <6,u,4,7>: Cost 2 vext3 <u,4,7,6>, <u,4,7,6>
- 497479209U, // <6,u,4,u>: Cost 1 vext2 RHS, RHS
- 2566692966U, // <6,u,5,0>: Cost 3 vext1 <2,6,u,5>, LHS
- 1571221200U, // <6,u,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
- 2566694885U, // <6,u,5,2>: Cost 3 vext1 <2,6,u,5>, <2,6,u,5>
- 2689865855U, // <6,u,5,3>: Cost 3 vext3 <0,u,2,6>, <u,5,3,7>
- 1571221446U, // <6,u,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
- 1571221508U, // <6,u,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
- 1612290202U, // <6,u,5,6>: Cost 2 vext3 <0,2,4,6>, RHS
- 1571221672U, // <6,u,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
- 1612290220U, // <6,u,5,u>: Cost 2 vext3 <0,2,4,6>, RHS
- 1504903270U, // <6,u,6,0>: Cost 2 vext1 <4,6,u,6>, LHS
- 2644963752U, // <6,u,6,1>: Cost 3 vext2 RHS, <6,1,7,2>
- 1571222010U, // <6,u,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
- 2686032080U, // <6,u,6,3>: Cost 3 vext3 <0,2,4,6>, <u,6,3,7>
- 1504906550U, // <6,u,6,4>: Cost 2 vext1 <4,6,u,6>, RHS
- 2644964079U, // <6,u,6,5>: Cost 3 vext2 RHS, <6,5,7,5>
- 296144182U, // <6,u,6,6>: Cost 1 vdup2 RHS
- 1571222350U, // <6,u,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
- 296144182U, // <6,u,6,u>: Cost 1 vdup2 RHS
- 1492967526U, // <6,u,7,0>: Cost 2 vext1 <2,6,u,7>, LHS
- 2560738574U, // <6,u,7,1>: Cost 3 vext1 <1,6,u,7>, <1,6,u,7>
- 1492969447U, // <6,u,7,2>: Cost 2 vext1 <2,6,u,7>, <2,6,u,7>
- 1906753692U, // <6,u,7,3>: Cost 2 vzipr RHS, LHS
- 1492970806U, // <6,u,7,4>: Cost 2 vext1 <2,6,u,7>, RHS
- 2980495761U, // <6,u,7,5>: Cost 3 vzipr RHS, <0,4,u,5>
- 1516860235U, // <6,u,7,6>: Cost 2 vext1 <6,6,u,7>, <6,6,u,7>
- 1906756936U, // <6,u,7,7>: Cost 2 vzipr RHS, RHS
- 1492973358U, // <6,u,7,u>: Cost 2 vext1 <2,6,u,7>, LHS
- 1492975718U, // <6,u,u,0>: Cost 2 vext1 <2,6,u,u>, LHS
- 497481518U, // <6,u,u,1>: Cost 1 vext2 RHS, LHS
- 1612290405U, // <6,u,u,2>: Cost 2 vext3 <0,2,4,6>, LHS
- 1571223484U, // <6,u,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
- 1492978998U, // <6,u,u,4>: Cost 2 vext1 <2,6,u,u>, RHS
- 497481882U, // <6,u,u,5>: Cost 1 vext2 RHS, RHS
- 296144182U, // <6,u,u,6>: Cost 1 vdup2 RHS
- 1906765128U, // <6,u,u,7>: Cost 2 vzipr RHS, RHS
- 497482085U, // <6,u,u,u>: Cost 1 vext2 RHS, LHS
- 1638318080U, // <7,0,0,0>: Cost 2 vext3 RHS, <0,0,0,0>
- 1638318090U, // <7,0,0,1>: Cost 2 vext3 RHS, <0,0,1,1>
- 1638318100U, // <7,0,0,2>: Cost 2 vext3 RHS, <0,0,2,2>
- 3646442178U, // <7,0,0,3>: Cost 4 vext1 <3,7,0,0>, <3,7,0,0>
- 2712059941U, // <7,0,0,4>: Cost 3 vext3 RHS, <0,0,4,1>
- 2651603364U, // <7,0,0,5>: Cost 3 vext2 <5,6,7,0>, <0,5,1,6>
- 2590618445U, // <7,0,0,6>: Cost 3 vext1 <6,7,0,0>, <6,7,0,0>
- 3785801798U, // <7,0,0,7>: Cost 4 vext3 RHS, <0,0,7,7>
- 1638318153U, // <7,0,0,u>: Cost 2 vext3 RHS, <0,0,u,1>
- 1516879974U, // <7,0,1,0>: Cost 2 vext1 <6,7,0,1>, LHS
- 2693922911U, // <7,0,1,1>: Cost 3 vext3 <1,5,3,7>, <0,1,1,5>
- 564576358U, // <7,0,1,2>: Cost 1 vext3 RHS, LHS
- 2638996480U, // <7,0,1,3>: Cost 3 vext2 <3,5,7,0>, <1,3,5,7>
- 1516883254U, // <7,0,1,4>: Cost 2 vext1 <6,7,0,1>, RHS
- 2649613456U, // <7,0,1,5>: Cost 3 vext2 <5,3,7,0>, <1,5,3,7>
- 1516884814U, // <7,0,1,6>: Cost 2 vext1 <6,7,0,1>, <6,7,0,1>
- 2590626808U, // <7,0,1,7>: Cost 3 vext1 <6,7,0,1>, <7,0,1,0>
- 564576412U, // <7,0,1,u>: Cost 1 vext3 RHS, LHS
- 1638318244U, // <7,0,2,0>: Cost 2 vext3 RHS, <0,2,0,2>
- 2692743344U, // <7,0,2,1>: Cost 3 vext3 <1,3,5,7>, <0,2,1,5>
- 2712060084U, // <7,0,2,2>: Cost 3 vext3 RHS, <0,2,2,0>
- 2712060094U, // <7,0,2,3>: Cost 3 vext3 RHS, <0,2,3,1>
- 1638318284U, // <7,0,2,4>: Cost 2 vext3 RHS, <0,2,4,6>
- 2712060118U, // <7,0,2,5>: Cost 3 vext3 RHS, <0,2,5,7>
- 2651604922U, // <7,0,2,6>: Cost 3 vext2 <5,6,7,0>, <2,6,3,7>
- 2686255336U, // <7,0,2,7>: Cost 3 vext3 <0,2,7,7>, <0,2,7,7>
- 1638318316U, // <7,0,2,u>: Cost 2 vext3 RHS, <0,2,u,2>
- 2651605142U, // <7,0,3,0>: Cost 3 vext2 <5,6,7,0>, <3,0,1,2>
- 2712060156U, // <7,0,3,1>: Cost 3 vext3 RHS, <0,3,1,0>
- 2712060165U, // <7,0,3,2>: Cost 3 vext3 RHS, <0,3,2,0>
- 2651605404U, // <7,0,3,3>: Cost 3 vext2 <5,6,7,0>, <3,3,3,3>
- 2651605506U, // <7,0,3,4>: Cost 3 vext2 <5,6,7,0>, <3,4,5,6>
- 2638998111U, // <7,0,3,5>: Cost 3 vext2 <3,5,7,0>, <3,5,7,0>
- 2639661744U, // <7,0,3,6>: Cost 3 vext2 <3,6,7,0>, <3,6,7,0>
- 3712740068U, // <7,0,3,7>: Cost 4 vext2 <3,5,7,0>, <3,7,3,7>
- 2640989010U, // <7,0,3,u>: Cost 3 vext2 <3,u,7,0>, <3,u,7,0>
- 2712060232U, // <7,0,4,0>: Cost 3 vext3 RHS, <0,4,0,4>
- 1638318418U, // <7,0,4,1>: Cost 2 vext3 RHS, <0,4,1,5>
- 1638318428U, // <7,0,4,2>: Cost 2 vext3 RHS, <0,4,2,6>
- 3646474950U, // <7,0,4,3>: Cost 4 vext1 <3,7,0,4>, <3,7,0,4>
- 2712060270U, // <7,0,4,4>: Cost 3 vext3 RHS, <0,4,4,6>
- 1577864502U, // <7,0,4,5>: Cost 2 vext2 <5,6,7,0>, RHS
- 2651606388U, // <7,0,4,6>: Cost 3 vext2 <5,6,7,0>, <4,6,4,6>
- 3787792776U, // <7,0,4,7>: Cost 4 vext3 RHS, <0,4,7,5>
- 1638318481U, // <7,0,4,u>: Cost 2 vext3 RHS, <0,4,u,5>
- 2590654566U, // <7,0,5,0>: Cost 3 vext1 <6,7,0,5>, LHS
- 2651606736U, // <7,0,5,1>: Cost 3 vext2 <5,6,7,0>, <5,1,7,3>
- 2712060334U, // <7,0,5,2>: Cost 3 vext3 RHS, <0,5,2,7>
- 2649616239U, // <7,0,5,3>: Cost 3 vext2 <5,3,7,0>, <5,3,7,0>
- 2651606982U, // <7,0,5,4>: Cost 3 vext2 <5,6,7,0>, <5,4,7,6>
- 2651607044U, // <7,0,5,5>: Cost 3 vext2 <5,6,7,0>, <5,5,5,5>
- 1577865314U, // <7,0,5,6>: Cost 2 vext2 <5,6,7,0>, <5,6,7,0>
- 2651607208U, // <7,0,5,7>: Cost 3 vext2 <5,6,7,0>, <5,7,5,7>
- 1579192580U, // <7,0,5,u>: Cost 2 vext2 <5,u,7,0>, <5,u,7,0>
- 2688393709U, // <7,0,6,0>: Cost 3 vext3 <0,6,0,7>, <0,6,0,7>
- 2712060406U, // <7,0,6,1>: Cost 3 vext3 RHS, <0,6,1,7>
- 2688541183U, // <7,0,6,2>: Cost 3 vext3 <0,6,2,7>, <0,6,2,7>
- 2655588936U, // <7,0,6,3>: Cost 3 vext2 <6,3,7,0>, <6,3,7,0>
- 3762430481U, // <7,0,6,4>: Cost 4 vext3 <0,6,4,7>, <0,6,4,7>
- 2651607730U, // <7,0,6,5>: Cost 3 vext2 <5,6,7,0>, <6,5,0,7>
- 2651607864U, // <7,0,6,6>: Cost 3 vext2 <5,6,7,0>, <6,6,6,6>
- 2651607886U, // <7,0,6,7>: Cost 3 vext2 <5,6,7,0>, <6,7,0,1>
- 2688983605U, // <7,0,6,u>: Cost 3 vext3 <0,6,u,7>, <0,6,u,7>
- 2651608058U, // <7,0,7,0>: Cost 3 vext2 <5,6,7,0>, <7,0,1,2>
- 2932703334U, // <7,0,7,1>: Cost 3 vzipl <7,7,7,7>, LHS
- 3066921062U, // <7,0,7,2>: Cost 3 vtrnl <7,7,7,7>, LHS
- 3712742678U, // <7,0,7,3>: Cost 4 vext2 <3,5,7,0>, <7,3,5,7>
- 2651608422U, // <7,0,7,4>: Cost 3 vext2 <5,6,7,0>, <7,4,5,6>
- 2651608513U, // <7,0,7,5>: Cost 3 vext2 <5,6,7,0>, <7,5,6,7>
- 2663552532U, // <7,0,7,6>: Cost 3 vext2 <7,6,7,0>, <7,6,7,0>
- 2651608684U, // <7,0,7,7>: Cost 3 vext2 <5,6,7,0>, <7,7,7,7>
- 2651608706U, // <7,0,7,u>: Cost 3 vext2 <5,6,7,0>, <7,u,1,2>
- 1638318730U, // <7,0,u,0>: Cost 2 vext3 RHS, <0,u,0,2>
- 1638318738U, // <7,0,u,1>: Cost 2 vext3 RHS, <0,u,1,1>
- 564576925U, // <7,0,u,2>: Cost 1 vext3 RHS, LHS
- 2572765898U, // <7,0,u,3>: Cost 3 vext1 <3,7,0,u>, <3,7,0,u>
- 1638318770U, // <7,0,u,4>: Cost 2 vext3 RHS, <0,u,4,6>
- 1577867418U, // <7,0,u,5>: Cost 2 vext2 <5,6,7,0>, RHS
- 1516942165U, // <7,0,u,6>: Cost 2 vext1 <6,7,0,u>, <6,7,0,u>
- 2651609344U, // <7,0,u,7>: Cost 3 vext2 <5,6,7,0>, <u,7,0,1>
- 564576979U, // <7,0,u,u>: Cost 1 vext3 RHS, LHS
- 2590687334U, // <7,1,0,0>: Cost 3 vext1 <6,7,1,0>, LHS
- 2639003750U, // <7,1,0,1>: Cost 3 vext2 <3,5,7,1>, LHS
- 2793357414U, // <7,1,0,2>: Cost 3 vuzpl <7,0,1,2>, LHS
- 1638318838U, // <7,1,0,3>: Cost 2 vext3 RHS, <1,0,3,2>
- 2590690614U, // <7,1,0,4>: Cost 3 vext1 <6,7,1,0>, RHS
- 2712060679U, // <7,1,0,5>: Cost 3 vext3 RHS, <1,0,5,1>
- 2590692182U, // <7,1,0,6>: Cost 3 vext1 <6,7,1,0>, <6,7,1,0>
- 3785802521U, // <7,1,0,7>: Cost 4 vext3 RHS, <1,0,7,1>
- 1638318883U, // <7,1,0,u>: Cost 2 vext3 RHS, <1,0,u,2>
- 2712060715U, // <7,1,1,0>: Cost 3 vext3 RHS, <1,1,0,1>
- 1638318900U, // <7,1,1,1>: Cost 2 vext3 RHS, <1,1,1,1>
- 3774300994U, // <7,1,1,2>: Cost 4 vext3 <2,6,3,7>, <1,1,2,6>
- 1638318920U, // <7,1,1,3>: Cost 2 vext3 RHS, <1,1,3,3>
- 2712060755U, // <7,1,1,4>: Cost 3 vext3 RHS, <1,1,4,5>
- 2691416926U, // <7,1,1,5>: Cost 3 vext3 <1,1,5,7>, <1,1,5,7>
- 2590700375U, // <7,1,1,6>: Cost 3 vext1 <6,7,1,1>, <6,7,1,1>
- 3765158766U, // <7,1,1,7>: Cost 4 vext3 <1,1,5,7>, <1,1,7,5>
- 1638318965U, // <7,1,1,u>: Cost 2 vext3 RHS, <1,1,u,3>
- 2712060796U, // <7,1,2,0>: Cost 3 vext3 RHS, <1,2,0,1>
- 2712060807U, // <7,1,2,1>: Cost 3 vext3 RHS, <1,2,1,3>
- 3712747112U, // <7,1,2,2>: Cost 4 vext2 <3,5,7,1>, <2,2,2,2>
- 1638318998U, // <7,1,2,3>: Cost 2 vext3 RHS, <1,2,3,0>
- 2712060836U, // <7,1,2,4>: Cost 3 vext3 RHS, <1,2,4,5>
- 2712060843U, // <7,1,2,5>: Cost 3 vext3 RHS, <1,2,5,3>
- 2590708568U, // <7,1,2,6>: Cost 3 vext1 <6,7,1,2>, <6,7,1,2>
- 2735948730U, // <7,1,2,7>: Cost 3 vext3 RHS, <1,2,7,0>
- 1638319043U, // <7,1,2,u>: Cost 2 vext3 RHS, <1,2,u,0>
- 2712060876U, // <7,1,3,0>: Cost 3 vext3 RHS, <1,3,0,0>
- 1638319064U, // <7,1,3,1>: Cost 2 vext3 RHS, <1,3,1,3>
- 2712060894U, // <7,1,3,2>: Cost 3 vext3 RHS, <1,3,2,0>
- 2692596718U, // <7,1,3,3>: Cost 3 vext3 <1,3,3,7>, <1,3,3,7>
- 2712060917U, // <7,1,3,4>: Cost 3 vext3 RHS, <1,3,4,5>
- 1619002368U, // <7,1,3,5>: Cost 2 vext3 <1,3,5,7>, <1,3,5,7>
- 2692817929U, // <7,1,3,6>: Cost 3 vext3 <1,3,6,7>, <1,3,6,7>
- 2735948814U, // <7,1,3,7>: Cost 3 vext3 RHS, <1,3,7,3>
- 1619223579U, // <7,1,3,u>: Cost 2 vext3 <1,3,u,7>, <1,3,u,7>
- 2712060962U, // <7,1,4,0>: Cost 3 vext3 RHS, <1,4,0,5>
- 2712060971U, // <7,1,4,1>: Cost 3 vext3 RHS, <1,4,1,5>
- 2712060980U, // <7,1,4,2>: Cost 3 vext3 RHS, <1,4,2,5>
- 2712060989U, // <7,1,4,3>: Cost 3 vext3 RHS, <1,4,3,5>
- 3785802822U, // <7,1,4,4>: Cost 4 vext3 RHS, <1,4,4,5>
- 2639007030U, // <7,1,4,5>: Cost 3 vext2 <3,5,7,1>, RHS
- 2645642634U, // <7,1,4,6>: Cost 3 vext2 <4,6,7,1>, <4,6,7,1>
- 3719384520U, // <7,1,4,7>: Cost 4 vext2 <4,6,7,1>, <4,7,5,0>
- 2639007273U, // <7,1,4,u>: Cost 3 vext2 <3,5,7,1>, RHS
- 2572812390U, // <7,1,5,0>: Cost 3 vext1 <3,7,1,5>, LHS
- 2693776510U, // <7,1,5,1>: Cost 3 vext3 <1,5,1,7>, <1,5,1,7>
- 3774301318U, // <7,1,5,2>: Cost 4 vext3 <2,6,3,7>, <1,5,2,6>
- 1620182160U, // <7,1,5,3>: Cost 2 vext3 <1,5,3,7>, <1,5,3,7>
- 2572815670U, // <7,1,5,4>: Cost 3 vext1 <3,7,1,5>, RHS
- 3766486178U, // <7,1,5,5>: Cost 4 vext3 <1,3,5,7>, <1,5,5,7>
- 2651615331U, // <7,1,5,6>: Cost 3 vext2 <5,6,7,1>, <5,6,7,1>
- 2652278964U, // <7,1,5,7>: Cost 3 vext2 <5,7,7,1>, <5,7,7,1>
- 1620550845U, // <7,1,5,u>: Cost 2 vext3 <1,5,u,7>, <1,5,u,7>
- 3768108230U, // <7,1,6,0>: Cost 4 vext3 <1,6,0,7>, <1,6,0,7>
- 2694440143U, // <7,1,6,1>: Cost 3 vext3 <1,6,1,7>, <1,6,1,7>
- 2712061144U, // <7,1,6,2>: Cost 3 vext3 RHS, <1,6,2,7>
- 2694587617U, // <7,1,6,3>: Cost 3 vext3 <1,6,3,7>, <1,6,3,7>
- 3768403178U, // <7,1,6,4>: Cost 4 vext3 <1,6,4,7>, <1,6,4,7>
- 2694735091U, // <7,1,6,5>: Cost 3 vext3 <1,6,5,7>, <1,6,5,7>
- 3768550652U, // <7,1,6,6>: Cost 4 vext3 <1,6,6,7>, <1,6,6,7>
- 2652279630U, // <7,1,6,7>: Cost 3 vext2 <5,7,7,1>, <6,7,0,1>
- 2694956302U, // <7,1,6,u>: Cost 3 vext3 <1,6,u,7>, <1,6,u,7>
- 2645644282U, // <7,1,7,0>: Cost 3 vext2 <4,6,7,1>, <7,0,1,2>
- 2859062094U, // <7,1,7,1>: Cost 3 vuzpr <6,7,0,1>, <6,7,0,1>
- 3779462437U, // <7,1,7,2>: Cost 4 vext3 <3,5,1,7>, <1,7,2,3>
- 3121938534U, // <7,1,7,3>: Cost 3 vtrnr <5,7,5,7>, LHS
- 2554916150U, // <7,1,7,4>: Cost 3 vext1 <0,7,1,7>, RHS
- 3769140548U, // <7,1,7,5>: Cost 4 vext3 <1,7,5,7>, <1,7,5,7>
- 3726022164U, // <7,1,7,6>: Cost 4 vext2 <5,7,7,1>, <7,6,7,0>
- 2554918508U, // <7,1,7,7>: Cost 3 vext1 <0,7,1,7>, <7,7,7,7>
- 3121938539U, // <7,1,7,u>: Cost 3 vtrnr <5,7,5,7>, LHS
- 2572836966U, // <7,1,u,0>: Cost 3 vext1 <3,7,1,u>, LHS
- 1638319469U, // <7,1,u,1>: Cost 2 vext3 RHS, <1,u,1,3>
- 2712061299U, // <7,1,u,2>: Cost 3 vext3 RHS, <1,u,2,0>
- 1622173059U, // <7,1,u,3>: Cost 2 vext3 <1,u,3,7>, <1,u,3,7>
- 2572840246U, // <7,1,u,4>: Cost 3 vext1 <3,7,1,u>, RHS
- 1622320533U, // <7,1,u,5>: Cost 2 vext3 <1,u,5,7>, <1,u,5,7>
- 2696136094U, // <7,1,u,6>: Cost 3 vext3 <1,u,6,7>, <1,u,6,7>
- 2859060777U, // <7,1,u,7>: Cost 3 vuzpr <6,7,0,1>, RHS
- 1622541744U, // <7,1,u,u>: Cost 2 vext3 <1,u,u,7>, <1,u,u,7>
- 2712061364U, // <7,2,0,0>: Cost 3 vext3 RHS, <2,0,0,2>
- 2712061373U, // <7,2,0,1>: Cost 3 vext3 RHS, <2,0,1,2>
- 2712061380U, // <7,2,0,2>: Cost 3 vext3 RHS, <2,0,2,0>
- 2712061389U, // <7,2,0,3>: Cost 3 vext3 RHS, <2,0,3,0>
- 2712061404U, // <7,2,0,4>: Cost 3 vext3 RHS, <2,0,4,6>
- 2696725990U, // <7,2,0,5>: Cost 3 vext3 <2,0,5,7>, <2,0,5,7>
- 2712061417U, // <7,2,0,6>: Cost 3 vext3 RHS, <2,0,6,1>
- 3785803251U, // <7,2,0,7>: Cost 4 vext3 RHS, <2,0,7,2>
- 2696947201U, // <7,2,0,u>: Cost 3 vext3 <2,0,u,7>, <2,0,u,7>
- 2712061446U, // <7,2,1,0>: Cost 3 vext3 RHS, <2,1,0,3>
- 3785803276U, // <7,2,1,1>: Cost 4 vext3 RHS, <2,1,1,0>
- 3785803285U, // <7,2,1,2>: Cost 4 vext3 RHS, <2,1,2,0>
- 2712061471U, // <7,2,1,3>: Cost 3 vext3 RHS, <2,1,3,1>
- 2712061482U, // <7,2,1,4>: Cost 3 vext3 RHS, <2,1,4,3>
- 3766486576U, // <7,2,1,5>: Cost 4 vext3 <1,3,5,7>, <2,1,5,0>
- 2712061500U, // <7,2,1,6>: Cost 3 vext3 RHS, <2,1,6,3>
- 2602718850U, // <7,2,1,7>: Cost 3 vext1 <u,7,2,1>, <7,u,1,2>
- 2712061516U, // <7,2,1,u>: Cost 3 vext3 RHS, <2,1,u,1>
- 2712061525U, // <7,2,2,0>: Cost 3 vext3 RHS, <2,2,0,1>
- 2712061536U, // <7,2,2,1>: Cost 3 vext3 RHS, <2,2,1,3>
- 1638319720U, // <7,2,2,2>: Cost 2 vext3 RHS, <2,2,2,2>
- 1638319730U, // <7,2,2,3>: Cost 2 vext3 RHS, <2,2,3,3>
- 2712061565U, // <7,2,2,4>: Cost 3 vext3 RHS, <2,2,4,5>
- 2698053256U, // <7,2,2,5>: Cost 3 vext3 <2,2,5,7>, <2,2,5,7>
- 2712061584U, // <7,2,2,6>: Cost 3 vext3 RHS, <2,2,6,6>
- 3771795096U, // <7,2,2,7>: Cost 4 vext3 <2,2,5,7>, <2,2,7,5>
- 1638319775U, // <7,2,2,u>: Cost 2 vext3 RHS, <2,2,u,3>
- 1638319782U, // <7,2,3,0>: Cost 2 vext3 RHS, <2,3,0,1>
- 2693924531U, // <7,2,3,1>: Cost 3 vext3 <1,5,3,7>, <2,3,1,5>
- 2700560061U, // <7,2,3,2>: Cost 3 vext3 <2,6,3,7>, <2,3,2,6>
- 2693924551U, // <7,2,3,3>: Cost 3 vext3 <1,5,3,7>, <2,3,3,7>
- 1638319822U, // <7,2,3,4>: Cost 2 vext3 RHS, <2,3,4,5>
- 2698716889U, // <7,2,3,5>: Cost 3 vext3 <2,3,5,7>, <2,3,5,7>
- 2712061665U, // <7,2,3,6>: Cost 3 vext3 RHS, <2,3,6,6>
- 2735949540U, // <7,2,3,7>: Cost 3 vext3 RHS, <2,3,7,0>
- 1638319854U, // <7,2,3,u>: Cost 2 vext3 RHS, <2,3,u,1>
- 2712061692U, // <7,2,4,0>: Cost 3 vext3 RHS, <2,4,0,6>
- 2712061698U, // <7,2,4,1>: Cost 3 vext3 RHS, <2,4,1,3>
- 2712061708U, // <7,2,4,2>: Cost 3 vext3 RHS, <2,4,2,4>
- 2712061718U, // <7,2,4,3>: Cost 3 vext3 RHS, <2,4,3,5>
- 2712061728U, // <7,2,4,4>: Cost 3 vext3 RHS, <2,4,4,6>
- 2699380522U, // <7,2,4,5>: Cost 3 vext3 <2,4,5,7>, <2,4,5,7>
- 2712061740U, // <7,2,4,6>: Cost 3 vext3 RHS, <2,4,6,0>
- 3809691445U, // <7,2,4,7>: Cost 4 vext3 RHS, <2,4,7,0>
- 2699601733U, // <7,2,4,u>: Cost 3 vext3 <2,4,u,7>, <2,4,u,7>
- 2699675470U, // <7,2,5,0>: Cost 3 vext3 <2,5,0,7>, <2,5,0,7>
- 3766486867U, // <7,2,5,1>: Cost 4 vext3 <1,3,5,7>, <2,5,1,3>
- 2699822944U, // <7,2,5,2>: Cost 3 vext3 <2,5,2,7>, <2,5,2,7>
- 2692745065U, // <7,2,5,3>: Cost 3 vext3 <1,3,5,7>, <2,5,3,7>
- 2699970418U, // <7,2,5,4>: Cost 3 vext3 <2,5,4,7>, <2,5,4,7>
- 3766486907U, // <7,2,5,5>: Cost 4 vext3 <1,3,5,7>, <2,5,5,7>
- 2700117892U, // <7,2,5,6>: Cost 3 vext3 <2,5,6,7>, <2,5,6,7>
- 3771795334U, // <7,2,5,7>: Cost 4 vext3 <2,2,5,7>, <2,5,7,0>
- 2692745110U, // <7,2,5,u>: Cost 3 vext3 <1,3,5,7>, <2,5,u,7>
- 2572894310U, // <7,2,6,0>: Cost 3 vext1 <3,7,2,6>, LHS
- 2712061860U, // <7,2,6,1>: Cost 3 vext3 RHS, <2,6,1,3>
- 2700486577U, // <7,2,6,2>: Cost 3 vext3 <2,6,2,7>, <2,6,2,7>
- 1626818490U, // <7,2,6,3>: Cost 2 vext3 <2,6,3,7>, <2,6,3,7>
- 2572897590U, // <7,2,6,4>: Cost 3 vext1 <3,7,2,6>, RHS
- 2700707788U, // <7,2,6,5>: Cost 3 vext3 <2,6,5,7>, <2,6,5,7>
- 2700781525U, // <7,2,6,6>: Cost 3 vext3 <2,6,6,7>, <2,6,6,7>
- 3774597086U, // <7,2,6,7>: Cost 4 vext3 <2,6,7,7>, <2,6,7,7>
- 1627187175U, // <7,2,6,u>: Cost 2 vext3 <2,6,u,7>, <2,6,u,7>
- 2735949802U, // <7,2,7,0>: Cost 3 vext3 RHS, <2,7,0,1>
- 3780200434U, // <7,2,7,1>: Cost 4 vext3 <3,6,2,7>, <2,7,1,0>
- 3773564928U, // <7,2,7,2>: Cost 4 vext3 <2,5,2,7>, <2,7,2,5>
- 2986541158U, // <7,2,7,3>: Cost 3 vzipr <5,5,7,7>, LHS
- 2554989878U, // <7,2,7,4>: Cost 3 vext1 <0,7,2,7>, RHS
- 3775113245U, // <7,2,7,5>: Cost 4 vext3 <2,7,5,7>, <2,7,5,7>
- 4060283228U, // <7,2,7,6>: Cost 4 vzipr <5,5,7,7>, <0,4,2,6>
- 2554992236U, // <7,2,7,7>: Cost 3 vext1 <0,7,2,7>, <7,7,7,7>
- 2986541163U, // <7,2,7,u>: Cost 3 vzipr <5,5,7,7>, LHS
- 1638320187U, // <7,2,u,0>: Cost 2 vext3 RHS, <2,u,0,1>
- 2693924936U, // <7,2,u,1>: Cost 3 vext3 <1,5,3,7>, <2,u,1,5>
- 1638319720U, // <7,2,u,2>: Cost 2 vext3 RHS, <2,2,2,2>
- 1628145756U, // <7,2,u,3>: Cost 2 vext3 <2,u,3,7>, <2,u,3,7>
- 1638320227U, // <7,2,u,4>: Cost 2 vext3 RHS, <2,u,4,5>
- 2702035054U, // <7,2,u,5>: Cost 3 vext3 <2,u,5,7>, <2,u,5,7>
- 2702108791U, // <7,2,u,6>: Cost 3 vext3 <2,u,6,7>, <2,u,6,7>
- 2735949945U, // <7,2,u,7>: Cost 3 vext3 RHS, <2,u,7,0>
- 1628514441U, // <7,2,u,u>: Cost 2 vext3 <2,u,u,7>, <2,u,u,7>
- 2712062091U, // <7,3,0,0>: Cost 3 vext3 RHS, <3,0,0,0>
- 1638320278U, // <7,3,0,1>: Cost 2 vext3 RHS, <3,0,1,2>
- 2712062109U, // <7,3,0,2>: Cost 3 vext3 RHS, <3,0,2,0>
- 2590836886U, // <7,3,0,3>: Cost 3 vext1 <6,7,3,0>, <3,0,1,2>
- 2712062128U, // <7,3,0,4>: Cost 3 vext3 RHS, <3,0,4,1>
- 2712062138U, // <7,3,0,5>: Cost 3 vext3 RHS, <3,0,5,2>
- 2590839656U, // <7,3,0,6>: Cost 3 vext1 <6,7,3,0>, <6,7,3,0>
- 3311414017U, // <7,3,0,7>: Cost 4 vrev <3,7,7,0>
- 1638320341U, // <7,3,0,u>: Cost 2 vext3 RHS, <3,0,u,2>
- 2237164227U, // <7,3,1,0>: Cost 3 vrev <3,7,0,1>
- 2712062182U, // <7,3,1,1>: Cost 3 vext3 RHS, <3,1,1,1>
- 2712062193U, // <7,3,1,2>: Cost 3 vext3 RHS, <3,1,2,3>
- 2692745468U, // <7,3,1,3>: Cost 3 vext3 <1,3,5,7>, <3,1,3,5>
- 2712062214U, // <7,3,1,4>: Cost 3 vext3 RHS, <3,1,4,6>
- 2693925132U, // <7,3,1,5>: Cost 3 vext3 <1,5,3,7>, <3,1,5,3>
- 3768183059U, // <7,3,1,6>: Cost 4 vext3 <1,6,1,7>, <3,1,6,1>
- 2692745504U, // <7,3,1,7>: Cost 3 vext3 <1,3,5,7>, <3,1,7,5>
- 2696063273U, // <7,3,1,u>: Cost 3 vext3 <1,u,5,7>, <3,1,u,5>
- 2712062254U, // <7,3,2,0>: Cost 3 vext3 RHS, <3,2,0,1>
- 2712062262U, // <7,3,2,1>: Cost 3 vext3 RHS, <3,2,1,0>
- 2712062273U, // <7,3,2,2>: Cost 3 vext3 RHS, <3,2,2,2>
- 2712062280U, // <7,3,2,3>: Cost 3 vext3 RHS, <3,2,3,0>
- 2712062294U, // <7,3,2,4>: Cost 3 vext3 RHS, <3,2,4,5>
- 2712062302U, // <7,3,2,5>: Cost 3 vext3 RHS, <3,2,5,4>
- 2700560742U, // <7,3,2,6>: Cost 3 vext3 <2,6,3,7>, <3,2,6,3>
- 2712062319U, // <7,3,2,7>: Cost 3 vext3 RHS, <3,2,7,3>
- 2712062325U, // <7,3,2,u>: Cost 3 vext3 RHS, <3,2,u,0>
- 2712062335U, // <7,3,3,0>: Cost 3 vext3 RHS, <3,3,0,1>
- 2636368158U, // <7,3,3,1>: Cost 3 vext2 <3,1,7,3>, <3,1,7,3>
- 2637031791U, // <7,3,3,2>: Cost 3 vext2 <3,2,7,3>, <3,2,7,3>
- 1638320540U, // <7,3,3,3>: Cost 2 vext3 RHS, <3,3,3,3>
- 2712062374U, // <7,3,3,4>: Cost 3 vext3 RHS, <3,3,4,4>
- 2704689586U, // <7,3,3,5>: Cost 3 vext3 <3,3,5,7>, <3,3,5,7>
- 2590864235U, // <7,3,3,6>: Cost 3 vext1 <6,7,3,3>, <6,7,3,3>
- 2704837060U, // <7,3,3,7>: Cost 3 vext3 <3,3,7,7>, <3,3,7,7>
- 1638320540U, // <7,3,3,u>: Cost 2 vext3 RHS, <3,3,3,3>
- 2712062416U, // <7,3,4,0>: Cost 3 vext3 RHS, <3,4,0,1>
- 2712062426U, // <7,3,4,1>: Cost 3 vext3 RHS, <3,4,1,2>
- 2566981640U, // <7,3,4,2>: Cost 3 vext1 <2,7,3,4>, <2,7,3,4>
- 2712062447U, // <7,3,4,3>: Cost 3 vext3 RHS, <3,4,3,5>
- 2712062456U, // <7,3,4,4>: Cost 3 vext3 RHS, <3,4,4,5>
- 1638320642U, // <7,3,4,5>: Cost 2 vext3 RHS, <3,4,5,6>
- 2648313204U, // <7,3,4,6>: Cost 3 vext2 <5,1,7,3>, <4,6,4,6>
- 3311446789U, // <7,3,4,7>: Cost 4 vrev <3,7,7,4>
- 1638320669U, // <7,3,4,u>: Cost 2 vext3 RHS, <3,4,u,6>
- 2602819686U, // <7,3,5,0>: Cost 3 vext1 <u,7,3,5>, LHS
- 1574571728U, // <7,3,5,1>: Cost 2 vext2 <5,1,7,3>, <5,1,7,3>
- 2648977185U, // <7,3,5,2>: Cost 3 vext2 <5,2,7,3>, <5,2,7,3>
- 2705869378U, // <7,3,5,3>: Cost 3 vext3 <3,5,3,7>, <3,5,3,7>
- 2237491947U, // <7,3,5,4>: Cost 3 vrev <3,7,4,5>
- 2706016852U, // <7,3,5,5>: Cost 3 vext3 <3,5,5,7>, <3,5,5,7>
- 2648313954U, // <7,3,5,6>: Cost 3 vext2 <5,1,7,3>, <5,6,7,0>
- 2692745823U, // <7,3,5,7>: Cost 3 vext3 <1,3,5,7>, <3,5,7,0>
- 1579217159U, // <7,3,5,u>: Cost 2 vext2 <5,u,7,3>, <5,u,7,3>
- 2706311800U, // <7,3,6,0>: Cost 3 vext3 <3,6,0,7>, <3,6,0,7>
- 2654286249U, // <7,3,6,1>: Cost 3 vext2 <6,1,7,3>, <6,1,7,3>
- 1581208058U, // <7,3,6,2>: Cost 2 vext2 <6,2,7,3>, <6,2,7,3>
- 2706533011U, // <7,3,6,3>: Cost 3 vext3 <3,6,3,7>, <3,6,3,7>
- 2706606748U, // <7,3,6,4>: Cost 3 vext3 <3,6,4,7>, <3,6,4,7>
- 3780422309U, // <7,3,6,5>: Cost 4 vext3 <3,6,5,7>, <3,6,5,7>
- 2712062637U, // <7,3,6,6>: Cost 3 vext3 RHS, <3,6,6,6>
- 2706827959U, // <7,3,6,7>: Cost 3 vext3 <3,6,7,7>, <3,6,7,7>
- 1585189856U, // <7,3,6,u>: Cost 2 vext2 <6,u,7,3>, <6,u,7,3>
- 2693925571U, // <7,3,7,0>: Cost 3 vext3 <1,5,3,7>, <3,7,0,1>
- 2693925584U, // <7,3,7,1>: Cost 3 vext3 <1,5,3,7>, <3,7,1,5>
- 2700561114U, // <7,3,7,2>: Cost 3 vext3 <2,6,3,7>, <3,7,2,6>
- 2572978916U, // <7,3,7,3>: Cost 3 vext1 <3,7,3,7>, <3,7,3,7>
- 2693925611U, // <7,3,7,4>: Cost 3 vext3 <1,5,3,7>, <3,7,4,5>
- 2707344118U, // <7,3,7,5>: Cost 3 vext3 <3,7,5,7>, <3,7,5,7>
- 2654950894U, // <7,3,7,6>: Cost 3 vext2 <6,2,7,3>, <7,6,2,7>
- 2648315500U, // <7,3,7,7>: Cost 3 vext2 <5,1,7,3>, <7,7,7,7>
- 2693925643U, // <7,3,7,u>: Cost 3 vext3 <1,5,3,7>, <3,7,u,1>
- 2237221578U, // <7,3,u,0>: Cost 3 vrev <3,7,0,u>
- 1638320926U, // <7,3,u,1>: Cost 2 vext3 RHS, <3,u,1,2>
- 1593153452U, // <7,3,u,2>: Cost 2 vext2 <u,2,7,3>, <u,2,7,3>
- 1638320540U, // <7,3,u,3>: Cost 2 vext3 RHS, <3,3,3,3>
- 2237516526U, // <7,3,u,4>: Cost 3 vrev <3,7,4,u>
- 1638320966U, // <7,3,u,5>: Cost 2 vext3 RHS, <3,u,5,6>
- 2712062796U, // <7,3,u,6>: Cost 3 vext3 RHS, <3,u,6,3>
- 2692967250U, // <7,3,u,7>: Cost 3 vext3 <1,3,u,7>, <3,u,7,0>
- 1638320989U, // <7,3,u,u>: Cost 2 vext3 RHS, <3,u,u,2>
- 2651635712U, // <7,4,0,0>: Cost 3 vext2 <5,6,7,4>, <0,0,0,0>
- 1577893990U, // <7,4,0,1>: Cost 2 vext2 <5,6,7,4>, LHS
- 2651635876U, // <7,4,0,2>: Cost 3 vext2 <5,6,7,4>, <0,2,0,2>
- 3785804672U, // <7,4,0,3>: Cost 4 vext3 RHS, <4,0,3,1>
- 2651636050U, // <7,4,0,4>: Cost 3 vext2 <5,6,7,4>, <0,4,1,5>
- 1638468498U, // <7,4,0,5>: Cost 2 vext3 RHS, <4,0,5,1>
- 1638468508U, // <7,4,0,6>: Cost 2 vext3 RHS, <4,0,6,2>
- 3787795364U, // <7,4,0,7>: Cost 4 vext3 RHS, <4,0,7,1>
- 1640459181U, // <7,4,0,u>: Cost 2 vext3 RHS, <4,0,u,1>
- 2651636470U, // <7,4,1,0>: Cost 3 vext2 <5,6,7,4>, <1,0,3,2>
- 2651636532U, // <7,4,1,1>: Cost 3 vext2 <5,6,7,4>, <1,1,1,1>
- 2712062922U, // <7,4,1,2>: Cost 3 vext3 RHS, <4,1,2,3>
- 2639029248U, // <7,4,1,3>: Cost 3 vext2 <3,5,7,4>, <1,3,5,7>
- 2712062940U, // <7,4,1,4>: Cost 3 vext3 RHS, <4,1,4,3>
- 2712062946U, // <7,4,1,5>: Cost 3 vext3 RHS, <4,1,5,0>
- 2712062958U, // <7,4,1,6>: Cost 3 vext3 RHS, <4,1,6,3>
- 3785804791U, // <7,4,1,7>: Cost 4 vext3 RHS, <4,1,7,3>
- 2712062973U, // <7,4,1,u>: Cost 3 vext3 RHS, <4,1,u,0>
- 3785804807U, // <7,4,2,0>: Cost 4 vext3 RHS, <4,2,0,1>
- 3785804818U, // <7,4,2,1>: Cost 4 vext3 RHS, <4,2,1,3>
- 2651637352U, // <7,4,2,2>: Cost 3 vext2 <5,6,7,4>, <2,2,2,2>
- 2651637414U, // <7,4,2,3>: Cost 3 vext2 <5,6,7,4>, <2,3,0,1>
- 3716753194U, // <7,4,2,4>: Cost 4 vext2 <4,2,7,4>, <2,4,5,7>
- 2712063030U, // <7,4,2,5>: Cost 3 vext3 RHS, <4,2,5,3>
- 2712063036U, // <7,4,2,6>: Cost 3 vext3 RHS, <4,2,6,0>
- 3773123658U, // <7,4,2,7>: Cost 4 vext3 <2,4,5,7>, <4,2,7,5>
- 2712063054U, // <7,4,2,u>: Cost 3 vext3 RHS, <4,2,u,0>
- 2651637910U, // <7,4,3,0>: Cost 3 vext2 <5,6,7,4>, <3,0,1,2>
- 3712772348U, // <7,4,3,1>: Cost 4 vext2 <3,5,7,4>, <3,1,3,5>
- 3785804906U, // <7,4,3,2>: Cost 4 vext3 RHS, <4,3,2,1>
- 2651638172U, // <7,4,3,3>: Cost 3 vext2 <5,6,7,4>, <3,3,3,3>
- 2651638274U, // <7,4,3,4>: Cost 3 vext2 <5,6,7,4>, <3,4,5,6>
- 2639030883U, // <7,4,3,5>: Cost 3 vext2 <3,5,7,4>, <3,5,7,4>
- 2712063122U, // <7,4,3,6>: Cost 3 vext3 RHS, <4,3,6,5>
- 3712772836U, // <7,4,3,7>: Cost 4 vext2 <3,5,7,4>, <3,7,3,7>
- 2641021782U, // <7,4,3,u>: Cost 3 vext2 <3,u,7,4>, <3,u,7,4>
- 2714053802U, // <7,4,4,0>: Cost 3 vext3 RHS, <4,4,0,2>
- 3785804978U, // <7,4,4,1>: Cost 4 vext3 RHS, <4,4,1,1>
- 3716754505U, // <7,4,4,2>: Cost 4 vext2 <4,2,7,4>, <4,2,7,4>
- 3785804998U, // <7,4,4,3>: Cost 4 vext3 RHS, <4,4,3,3>
- 1638321360U, // <7,4,4,4>: Cost 2 vext3 RHS, <4,4,4,4>
- 1638468826U, // <7,4,4,5>: Cost 2 vext3 RHS, <4,4,5,5>
- 1638468836U, // <7,4,4,6>: Cost 2 vext3 RHS, <4,4,6,6>
- 3785215214U, // <7,4,4,7>: Cost 4 vext3 <4,4,7,7>, <4,4,7,7>
- 1640459509U, // <7,4,4,u>: Cost 2 vext3 RHS, <4,4,u,5>
- 1517207654U, // <7,4,5,0>: Cost 2 vext1 <6,7,4,5>, LHS
- 2573034640U, // <7,4,5,1>: Cost 3 vext1 <3,7,4,5>, <1,5,3,7>
- 2712063246U, // <7,4,5,2>: Cost 3 vext3 RHS, <4,5,2,3>
- 2573036267U, // <7,4,5,3>: Cost 3 vext1 <3,7,4,5>, <3,7,4,5>
- 1517210934U, // <7,4,5,4>: Cost 2 vext1 <6,7,4,5>, RHS
- 2711989549U, // <7,4,5,5>: Cost 3 vext3 <4,5,5,7>, <4,5,5,7>
- 564579638U, // <7,4,5,6>: Cost 1 vext3 RHS, RHS
- 2651639976U, // <7,4,5,7>: Cost 3 vext2 <5,6,7,4>, <5,7,5,7>
- 564579656U, // <7,4,5,u>: Cost 1 vext3 RHS, RHS
- 2712063307U, // <7,4,6,0>: Cost 3 vext3 RHS, <4,6,0,1>
- 3767668056U, // <7,4,6,1>: Cost 4 vext3 <1,5,3,7>, <4,6,1,5>
- 2651640314U, // <7,4,6,2>: Cost 3 vext2 <5,6,7,4>, <6,2,7,3>
- 2655621708U, // <7,4,6,3>: Cost 3 vext2 <6,3,7,4>, <6,3,7,4>
- 1638468980U, // <7,4,6,4>: Cost 2 vext3 RHS, <4,6,4,6>
- 2712063358U, // <7,4,6,5>: Cost 3 vext3 RHS, <4,6,5,7>
- 2712063367U, // <7,4,6,6>: Cost 3 vext3 RHS, <4,6,6,7>
- 2712210826U, // <7,4,6,7>: Cost 3 vext3 RHS, <4,6,7,1>
- 1638469012U, // <7,4,6,u>: Cost 2 vext3 RHS, <4,6,u,2>
- 2651640826U, // <7,4,7,0>: Cost 3 vext2 <5,6,7,4>, <7,0,1,2>
- 3773713830U, // <7,4,7,1>: Cost 4 vext3 <2,5,4,7>, <4,7,1,2>
- 3773713842U, // <7,4,7,2>: Cost 4 vext3 <2,5,4,7>, <4,7,2,5>
- 3780349372U, // <7,4,7,3>: Cost 4 vext3 <3,6,4,7>, <4,7,3,6>
- 2651641140U, // <7,4,7,4>: Cost 3 vext2 <5,6,7,4>, <7,4,0,1>
- 2712210888U, // <7,4,7,5>: Cost 3 vext3 RHS, <4,7,5,0>
- 2712210898U, // <7,4,7,6>: Cost 3 vext3 RHS, <4,7,6,1>
- 2651641452U, // <7,4,7,7>: Cost 3 vext2 <5,6,7,4>, <7,7,7,7>
- 2713538026U, // <7,4,7,u>: Cost 3 vext3 <4,7,u,7>, <4,7,u,7>
- 1517232230U, // <7,4,u,0>: Cost 2 vext1 <6,7,4,u>, LHS
- 1577899822U, // <7,4,u,1>: Cost 2 vext2 <5,6,7,4>, LHS
- 2712063489U, // <7,4,u,2>: Cost 3 vext3 RHS, <4,u,2,3>
- 2573060846U, // <7,4,u,3>: Cost 3 vext1 <3,7,4,u>, <3,7,4,u>
- 1640312342U, // <7,4,u,4>: Cost 2 vext3 RHS, <4,u,4,6>
- 1638469146U, // <7,4,u,5>: Cost 2 vext3 RHS, <4,u,5,1>
- 564579881U, // <7,4,u,6>: Cost 1 vext3 RHS, RHS
- 2714054192U, // <7,4,u,7>: Cost 3 vext3 RHS, <4,u,7,5>
- 564579899U, // <7,4,u,u>: Cost 1 vext3 RHS, RHS
- 2579038310U, // <7,5,0,0>: Cost 3 vext1 <4,7,5,0>, LHS
- 2636382310U, // <7,5,0,1>: Cost 3 vext2 <3,1,7,5>, LHS
- 2796339302U, // <7,5,0,2>: Cost 3 vuzpl <7,4,5,6>, LHS
- 3646810719U, // <7,5,0,3>: Cost 4 vext1 <3,7,5,0>, <3,5,7,0>
- 2712063586U, // <7,5,0,4>: Cost 3 vext3 RHS, <5,0,4,1>
- 2735951467U, // <7,5,0,5>: Cost 3 vext3 RHS, <5,0,5,1>
- 2735951476U, // <7,5,0,6>: Cost 3 vext3 RHS, <5,0,6,1>
- 2579043322U, // <7,5,0,7>: Cost 3 vext1 <4,7,5,0>, <7,0,1,2>
- 2636382877U, // <7,5,0,u>: Cost 3 vext2 <3,1,7,5>, LHS
- 2712211087U, // <7,5,1,0>: Cost 3 vext3 RHS, <5,1,0,1>
- 3698180916U, // <7,5,1,1>: Cost 4 vext2 <1,1,7,5>, <1,1,1,1>
- 3710124950U, // <7,5,1,2>: Cost 4 vext2 <3,1,7,5>, <1,2,3,0>
- 2636383232U, // <7,5,1,3>: Cost 3 vext2 <3,1,7,5>, <1,3,5,7>
- 2712211127U, // <7,5,1,4>: Cost 3 vext3 RHS, <5,1,4,5>
- 2590994128U, // <7,5,1,5>: Cost 3 vext1 <6,7,5,1>, <5,1,7,3>
- 2590995323U, // <7,5,1,6>: Cost 3 vext1 <6,7,5,1>, <6,7,5,1>
- 1638469328U, // <7,5,1,7>: Cost 2 vext3 RHS, <5,1,7,3>
- 1638469337U, // <7,5,1,u>: Cost 2 vext3 RHS, <5,1,u,3>
- 3785805536U, // <7,5,2,0>: Cost 4 vext3 RHS, <5,2,0,1>
- 3785805544U, // <7,5,2,1>: Cost 4 vext3 RHS, <5,2,1,0>
- 3704817288U, // <7,5,2,2>: Cost 4 vext2 <2,2,7,5>, <2,2,5,7>
- 2712063742U, // <7,5,2,3>: Cost 3 vext3 RHS, <5,2,3,4>
- 3716761386U, // <7,5,2,4>: Cost 4 vext2 <4,2,7,5>, <2,4,5,7>
- 2714054415U, // <7,5,2,5>: Cost 3 vext3 RHS, <5,2,5,3>
- 3774304024U, // <7,5,2,6>: Cost 4 vext3 <2,6,3,7>, <5,2,6,3>
- 2712063777U, // <7,5,2,7>: Cost 3 vext3 RHS, <5,2,7,3>
- 2712063787U, // <7,5,2,u>: Cost 3 vext3 RHS, <5,2,u,4>
- 3634888806U, // <7,5,3,0>: Cost 4 vext1 <1,7,5,3>, LHS
- 2636384544U, // <7,5,3,1>: Cost 3 vext2 <3,1,7,5>, <3,1,7,5>
- 3710790001U, // <7,5,3,2>: Cost 4 vext2 <3,2,7,5>, <3,2,7,5>
- 3710126492U, // <7,5,3,3>: Cost 4 vext2 <3,1,7,5>, <3,3,3,3>
- 3634892086U, // <7,5,3,4>: Cost 4 vext1 <1,7,5,3>, RHS
- 2639039076U, // <7,5,3,5>: Cost 3 vext2 <3,5,7,5>, <3,5,7,5>
- 3713444533U, // <7,5,3,6>: Cost 4 vext2 <3,6,7,5>, <3,6,7,5>
- 2693926767U, // <7,5,3,7>: Cost 3 vext3 <1,5,3,7>, <5,3,7,0>
- 2712063864U, // <7,5,3,u>: Cost 3 vext3 RHS, <5,3,u,0>
- 2579071078U, // <7,5,4,0>: Cost 3 vext1 <4,7,5,4>, LHS
- 3646841856U, // <7,5,4,1>: Cost 4 vext1 <3,7,5,4>, <1,3,5,7>
- 3716762698U, // <7,5,4,2>: Cost 4 vext2 <4,2,7,5>, <4,2,7,5>
- 3646843491U, // <7,5,4,3>: Cost 4 vext1 <3,7,5,4>, <3,5,7,4>
- 2579074358U, // <7,5,4,4>: Cost 3 vext1 <4,7,5,4>, RHS
- 2636385590U, // <7,5,4,5>: Cost 3 vext2 <3,1,7,5>, RHS
- 2645675406U, // <7,5,4,6>: Cost 3 vext2 <4,6,7,5>, <4,6,7,5>
- 1638322118U, // <7,5,4,7>: Cost 2 vext3 RHS, <5,4,7,6>
- 1638469583U, // <7,5,4,u>: Cost 2 vext3 RHS, <5,4,u,6>
- 2714054611U, // <7,5,5,0>: Cost 3 vext3 RHS, <5,5,0,1>
- 2652974800U, // <7,5,5,1>: Cost 3 vext2 <5,u,7,5>, <5,1,7,3>
- 3710127905U, // <7,5,5,2>: Cost 4 vext2 <3,1,7,5>, <5,2,7,3>
- 3785805808U, // <7,5,5,3>: Cost 4 vext3 RHS, <5,5,3,3>
- 2712211450U, // <7,5,5,4>: Cost 3 vext3 RHS, <5,5,4,4>
- 1638322180U, // <7,5,5,5>: Cost 2 vext3 RHS, <5,5,5,5>
- 2712064014U, // <7,5,5,6>: Cost 3 vext3 RHS, <5,5,6,6>
- 1638469656U, // <7,5,5,7>: Cost 2 vext3 RHS, <5,5,7,7>
- 1638469665U, // <7,5,5,u>: Cost 2 vext3 RHS, <5,5,u,7>
- 2712064036U, // <7,5,6,0>: Cost 3 vext3 RHS, <5,6,0,1>
- 2714054707U, // <7,5,6,1>: Cost 3 vext3 RHS, <5,6,1,7>
- 3785805879U, // <7,5,6,2>: Cost 4 vext3 RHS, <5,6,2,2>
- 2712064066U, // <7,5,6,3>: Cost 3 vext3 RHS, <5,6,3,4>
- 2712064076U, // <7,5,6,4>: Cost 3 vext3 RHS, <5,6,4,5>
- 2714054743U, // <7,5,6,5>: Cost 3 vext3 RHS, <5,6,5,7>
- 2712064096U, // <7,5,6,6>: Cost 3 vext3 RHS, <5,6,6,7>
- 1638322274U, // <7,5,6,7>: Cost 2 vext3 RHS, <5,6,7,0>
- 1638469739U, // <7,5,6,u>: Cost 2 vext3 RHS, <5,6,u,0>
- 1511325798U, // <7,5,7,0>: Cost 2 vext1 <5,7,5,7>, LHS
- 2692747392U, // <7,5,7,1>: Cost 3 vext3 <1,3,5,7>, <5,7,1,3>
- 2585069160U, // <7,5,7,2>: Cost 3 vext1 <5,7,5,7>, <2,2,2,2>
- 2573126390U, // <7,5,7,3>: Cost 3 vext1 <3,7,5,7>, <3,7,5,7>
- 1511329078U, // <7,5,7,4>: Cost 2 vext1 <5,7,5,7>, RHS
- 1638469800U, // <7,5,7,5>: Cost 2 vext3 RHS, <5,7,5,7>
- 2712211626U, // <7,5,7,6>: Cost 3 vext3 RHS, <5,7,6,0>
- 2712211636U, // <7,5,7,7>: Cost 3 vext3 RHS, <5,7,7,1>
- 1638469823U, // <7,5,7,u>: Cost 2 vext3 RHS, <5,7,u,3>
- 1511333990U, // <7,5,u,0>: Cost 2 vext1 <5,7,5,u>, LHS
- 2636388142U, // <7,5,u,1>: Cost 3 vext2 <3,1,7,5>, LHS
- 2712211671U, // <7,5,u,2>: Cost 3 vext3 RHS, <5,u,2,0>
- 2573134583U, // <7,5,u,3>: Cost 3 vext1 <3,7,5,u>, <3,7,5,u>
- 1511337270U, // <7,5,u,4>: Cost 2 vext1 <5,7,5,u>, RHS
- 1638469881U, // <7,5,u,5>: Cost 2 vext3 RHS, <5,u,5,7>
- 2712064258U, // <7,5,u,6>: Cost 3 vext3 RHS, <5,u,6,7>
- 1638469892U, // <7,5,u,7>: Cost 2 vext3 RHS, <5,u,7,0>
- 1638469904U, // <7,5,u,u>: Cost 2 vext3 RHS, <5,u,u,3>
- 2650324992U, // <7,6,0,0>: Cost 3 vext2 <5,4,7,6>, <0,0,0,0>
- 1576583270U, // <7,6,0,1>: Cost 2 vext2 <5,4,7,6>, LHS
- 2712064300U, // <7,6,0,2>: Cost 3 vext3 RHS, <6,0,2,4>
- 2255295336U, // <7,6,0,3>: Cost 3 vrev <6,7,3,0>
- 2712064316U, // <7,6,0,4>: Cost 3 vext3 RHS, <6,0,4,2>
- 2585088098U, // <7,6,0,5>: Cost 3 vext1 <5,7,6,0>, <5,6,7,0>
- 2735952204U, // <7,6,0,6>: Cost 3 vext3 RHS, <6,0,6,0>
- 2712211799U, // <7,6,0,7>: Cost 3 vext3 RHS, <6,0,7,2>
- 1576583837U, // <7,6,0,u>: Cost 2 vext2 <5,4,7,6>, LHS
- 1181340494U, // <7,6,1,0>: Cost 2 vrev <6,7,0,1>
- 2650325812U, // <7,6,1,1>: Cost 3 vext2 <5,4,7,6>, <1,1,1,1>
- 2650325910U, // <7,6,1,2>: Cost 3 vext2 <5,4,7,6>, <1,2,3,0>
- 2650325976U, // <7,6,1,3>: Cost 3 vext2 <5,4,7,6>, <1,3,1,3>
- 2579123510U, // <7,6,1,4>: Cost 3 vext1 <4,7,6,1>, RHS
- 2650326160U, // <7,6,1,5>: Cost 3 vext2 <5,4,7,6>, <1,5,3,7>
- 2714055072U, // <7,6,1,6>: Cost 3 vext3 RHS, <6,1,6,3>
- 2712064425U, // <7,6,1,7>: Cost 3 vext3 RHS, <6,1,7,3>
- 1181930390U, // <7,6,1,u>: Cost 2 vrev <6,7,u,1>
- 2712211897U, // <7,6,2,0>: Cost 3 vext3 RHS, <6,2,0,1>
- 2714055108U, // <7,6,2,1>: Cost 3 vext3 RHS, <6,2,1,3>
- 2650326632U, // <7,6,2,2>: Cost 3 vext2 <5,4,7,6>, <2,2,2,2>
- 2650326694U, // <7,6,2,3>: Cost 3 vext2 <5,4,7,6>, <2,3,0,1>
- 2714055137U, // <7,6,2,4>: Cost 3 vext3 RHS, <6,2,4,5>
- 2714055148U, // <7,6,2,5>: Cost 3 vext3 RHS, <6,2,5,7>
- 2650326970U, // <7,6,2,6>: Cost 3 vext2 <5,4,7,6>, <2,6,3,7>
- 1638470138U, // <7,6,2,7>: Cost 2 vext3 RHS, <6,2,7,3>
- 1638470147U, // <7,6,2,u>: Cost 2 vext3 RHS, <6,2,u,3>
- 2650327190U, // <7,6,3,0>: Cost 3 vext2 <5,4,7,6>, <3,0,1,2>
- 2255172441U, // <7,6,3,1>: Cost 3 vrev <6,7,1,3>
- 2255246178U, // <7,6,3,2>: Cost 3 vrev <6,7,2,3>
- 2650327452U, // <7,6,3,3>: Cost 3 vext2 <5,4,7,6>, <3,3,3,3>
- 2712064562U, // <7,6,3,4>: Cost 3 vext3 RHS, <6,3,4,5>
- 2650327627U, // <7,6,3,5>: Cost 3 vext2 <5,4,7,6>, <3,5,4,7>
- 3713452726U, // <7,6,3,6>: Cost 4 vext2 <3,6,7,6>, <3,6,7,6>
- 2700563016U, // <7,6,3,7>: Cost 3 vext3 <2,6,3,7>, <6,3,7,0>
- 2712064593U, // <7,6,3,u>: Cost 3 vext3 RHS, <6,3,u,0>
- 2650327954U, // <7,6,4,0>: Cost 3 vext2 <5,4,7,6>, <4,0,5,1>
- 2735952486U, // <7,6,4,1>: Cost 3 vext3 RHS, <6,4,1,3>
- 2735952497U, // <7,6,4,2>: Cost 3 vext3 RHS, <6,4,2,5>
- 2255328108U, // <7,6,4,3>: Cost 3 vrev <6,7,3,4>
- 2712212100U, // <7,6,4,4>: Cost 3 vext3 RHS, <6,4,4,6>
- 1576586550U, // <7,6,4,5>: Cost 2 vext2 <5,4,7,6>, RHS
- 2714055312U, // <7,6,4,6>: Cost 3 vext3 RHS, <6,4,6,0>
- 2712212126U, // <7,6,4,7>: Cost 3 vext3 RHS, <6,4,7,5>
- 1576586793U, // <7,6,4,u>: Cost 2 vext2 <5,4,7,6>, RHS
- 2579152998U, // <7,6,5,0>: Cost 3 vext1 <4,7,6,5>, LHS
- 2650328784U, // <7,6,5,1>: Cost 3 vext2 <5,4,7,6>, <5,1,7,3>
- 2714055364U, // <7,6,5,2>: Cost 3 vext3 RHS, <6,5,2,7>
- 3785806538U, // <7,6,5,3>: Cost 4 vext3 RHS, <6,5,3,4>
- 1576587206U, // <7,6,5,4>: Cost 2 vext2 <5,4,7,6>, <5,4,7,6>
- 2650329092U, // <7,6,5,5>: Cost 3 vext2 <5,4,7,6>, <5,5,5,5>
- 2650329186U, // <7,6,5,6>: Cost 3 vext2 <5,4,7,6>, <5,6,7,0>
- 2712064753U, // <7,6,5,7>: Cost 3 vext3 RHS, <6,5,7,7>
- 1181963162U, // <7,6,5,u>: Cost 2 vrev <6,7,u,5>
- 2714055421U, // <7,6,6,0>: Cost 3 vext3 RHS, <6,6,0,1>
- 2714055432U, // <7,6,6,1>: Cost 3 vext3 RHS, <6,6,1,3>
- 2650329594U, // <7,6,6,2>: Cost 3 vext2 <5,4,7,6>, <6,2,7,3>
- 3785806619U, // <7,6,6,3>: Cost 4 vext3 RHS, <6,6,3,4>
- 2712212260U, // <7,6,6,4>: Cost 3 vext3 RHS, <6,6,4,4>
- 2714055472U, // <7,6,6,5>: Cost 3 vext3 RHS, <6,6,5,7>
- 1638323000U, // <7,6,6,6>: Cost 2 vext3 RHS, <6,6,6,6>
- 1638470466U, // <7,6,6,7>: Cost 2 vext3 RHS, <6,6,7,7>
- 1638470475U, // <7,6,6,u>: Cost 2 vext3 RHS, <6,6,u,7>
- 1638323022U, // <7,6,7,0>: Cost 2 vext3 RHS, <6,7,0,1>
- 2712064854U, // <7,6,7,1>: Cost 3 vext3 RHS, <6,7,1,0>
- 2712064865U, // <7,6,7,2>: Cost 3 vext3 RHS, <6,7,2,2>
- 2712064872U, // <7,6,7,3>: Cost 3 vext3 RHS, <6,7,3,0>
- 1638323062U, // <7,6,7,4>: Cost 2 vext3 RHS, <6,7,4,5>
- 2712064894U, // <7,6,7,5>: Cost 3 vext3 RHS, <6,7,5,4>
- 2712064905U, // <7,6,7,6>: Cost 3 vext3 RHS, <6,7,6,6>
- 2712064915U, // <7,6,7,7>: Cost 3 vext3 RHS, <6,7,7,7>
- 1638323094U, // <7,6,7,u>: Cost 2 vext3 RHS, <6,7,u,1>
- 1638470559U, // <7,6,u,0>: Cost 2 vext3 RHS, <6,u,0,1>
- 1576589102U, // <7,6,u,1>: Cost 2 vext2 <5,4,7,6>, LHS
- 2712212402U, // <7,6,u,2>: Cost 3 vext3 RHS, <6,u,2,2>
- 2712212409U, // <7,6,u,3>: Cost 3 vext3 RHS, <6,u,3,0>
- 1638470599U, // <7,6,u,4>: Cost 2 vext3 RHS, <6,u,4,5>
- 1576589466U, // <7,6,u,5>: Cost 2 vext2 <5,4,7,6>, RHS
- 1638323000U, // <7,6,u,6>: Cost 2 vext3 RHS, <6,6,6,6>
- 1638470624U, // <7,6,u,7>: Cost 2 vext3 RHS, <6,u,7,3>
- 1638470631U, // <7,6,u,u>: Cost 2 vext3 RHS, <6,u,u,1>
- 2712065007U, // <7,7,0,0>: Cost 3 vext3 RHS, <7,0,0,0>
- 1638323194U, // <7,7,0,1>: Cost 2 vext3 RHS, <7,0,1,2>
- 2712065025U, // <7,7,0,2>: Cost 3 vext3 RHS, <7,0,2,0>
- 3646958337U, // <7,7,0,3>: Cost 4 vext1 <3,7,7,0>, <3,7,7,0>
- 2712065044U, // <7,7,0,4>: Cost 3 vext3 RHS, <7,0,4,1>
- 2585161907U, // <7,7,0,5>: Cost 3 vext1 <5,7,7,0>, <5,7,7,0>
- 2591134604U, // <7,7,0,6>: Cost 3 vext1 <6,7,7,0>, <6,7,7,0>
- 2591134714U, // <7,7,0,7>: Cost 3 vext1 <6,7,7,0>, <7,0,1,2>
- 1638323257U, // <7,7,0,u>: Cost 2 vext3 RHS, <7,0,u,2>
- 2712065091U, // <7,7,1,0>: Cost 3 vext3 RHS, <7,1,0,3>
- 2712065098U, // <7,7,1,1>: Cost 3 vext3 RHS, <7,1,1,1>
- 2712065109U, // <7,7,1,2>: Cost 3 vext3 RHS, <7,1,2,3>
- 2692748384U, // <7,7,1,3>: Cost 3 vext3 <1,3,5,7>, <7,1,3,5>
- 2585169206U, // <7,7,1,4>: Cost 3 vext1 <5,7,7,1>, RHS
- 2693928048U, // <7,7,1,5>: Cost 3 vext3 <1,5,3,7>, <7,1,5,3>
- 2585170766U, // <7,7,1,6>: Cost 3 vext1 <5,7,7,1>, <6,7,0,1>
- 2735953024U, // <7,7,1,7>: Cost 3 vext3 RHS, <7,1,7,1>
- 2695918731U, // <7,7,1,u>: Cost 3 vext3 <1,u,3,7>, <7,1,u,3>
- 3770471574U, // <7,7,2,0>: Cost 4 vext3 <2,0,5,7>, <7,2,0,5>
- 3785807002U, // <7,7,2,1>: Cost 4 vext3 RHS, <7,2,1,0>
- 2712065189U, // <7,7,2,2>: Cost 3 vext3 RHS, <7,2,2,2>
- 2712065196U, // <7,7,2,3>: Cost 3 vext3 RHS, <7,2,3,0>
- 3773125818U, // <7,7,2,4>: Cost 4 vext3 <2,4,5,7>, <7,2,4,5>
- 3766490305U, // <7,7,2,5>: Cost 4 vext3 <1,3,5,7>, <7,2,5,3>
- 2700563658U, // <7,7,2,6>: Cost 3 vext3 <2,6,3,7>, <7,2,6,3>
- 2735953107U, // <7,7,2,7>: Cost 3 vext3 RHS, <7,2,7,3>
- 2701890780U, // <7,7,2,u>: Cost 3 vext3 <2,u,3,7>, <7,2,u,3>
- 2712065251U, // <7,7,3,0>: Cost 3 vext3 RHS, <7,3,0,1>
- 3766490350U, // <7,7,3,1>: Cost 4 vext3 <1,3,5,7>, <7,3,1,3>
- 3774305530U, // <7,7,3,2>: Cost 4 vext3 <2,6,3,7>, <7,3,2,6>
- 2637728196U, // <7,7,3,3>: Cost 3 vext2 <3,3,7,7>, <3,3,7,7>
- 2712065291U, // <7,7,3,4>: Cost 3 vext3 RHS, <7,3,4,5>
- 2585186486U, // <7,7,3,5>: Cost 3 vext1 <5,7,7,3>, <5,7,7,3>
- 2639719095U, // <7,7,3,6>: Cost 3 vext2 <3,6,7,7>, <3,6,7,7>
- 2640382728U, // <7,7,3,7>: Cost 3 vext2 <3,7,7,7>, <3,7,7,7>
- 2641046361U, // <7,7,3,u>: Cost 3 vext2 <3,u,7,7>, <3,u,7,7>
- 2712212792U, // <7,7,4,0>: Cost 3 vext3 RHS, <7,4,0,5>
- 3646989312U, // <7,7,4,1>: Cost 4 vext1 <3,7,7,4>, <1,3,5,7>
- 3785807176U, // <7,7,4,2>: Cost 4 vext3 RHS, <7,4,2,3>
- 3646991109U, // <7,7,4,3>: Cost 4 vext1 <3,7,7,4>, <3,7,7,4>
- 2712065371U, // <7,7,4,4>: Cost 3 vext3 RHS, <7,4,4,4>
- 1638323558U, // <7,7,4,5>: Cost 2 vext3 RHS, <7,4,5,6>
- 2712212845U, // <7,7,4,6>: Cost 3 vext3 RHS, <7,4,6,4>
- 2591167846U, // <7,7,4,7>: Cost 3 vext1 <6,7,7,4>, <7,4,5,6>
- 1638323585U, // <7,7,4,u>: Cost 2 vext3 RHS, <7,4,u,6>
- 2585198694U, // <7,7,5,0>: Cost 3 vext1 <5,7,7,5>, LHS
- 2712212884U, // <7,7,5,1>: Cost 3 vext3 RHS, <7,5,1,7>
- 3711471393U, // <7,7,5,2>: Cost 4 vext2 <3,3,7,7>, <5,2,7,3>
- 2649673590U, // <7,7,5,3>: Cost 3 vext2 <5,3,7,7>, <5,3,7,7>
- 2712065455U, // <7,7,5,4>: Cost 3 vext3 RHS, <7,5,4,7>
- 1577259032U, // <7,7,5,5>: Cost 2 vext2 <5,5,7,7>, <5,5,7,7>
- 2712065473U, // <7,7,5,6>: Cost 3 vext3 RHS, <7,5,6,7>
- 2712212936U, // <7,7,5,7>: Cost 3 vext3 RHS, <7,5,7,5>
- 1579249931U, // <7,7,5,u>: Cost 2 vext2 <5,u,7,7>, <5,u,7,7>
- 2591178854U, // <7,7,6,0>: Cost 3 vext1 <6,7,7,6>, LHS
- 2735953374U, // <7,7,6,1>: Cost 3 vext3 RHS, <7,6,1,0>
- 2712212974U, // <7,7,6,2>: Cost 3 vext3 RHS, <7,6,2,7>
- 2655646287U, // <7,7,6,3>: Cost 3 vext2 <6,3,7,7>, <6,3,7,7>
- 2591182134U, // <7,7,6,4>: Cost 3 vext1 <6,7,7,6>, RHS
- 2656973553U, // <7,7,6,5>: Cost 3 vext2 <6,5,7,7>, <6,5,7,7>
- 1583895362U, // <7,7,6,6>: Cost 2 vext2 <6,6,7,7>, <6,6,7,7>
- 2712065556U, // <7,7,6,7>: Cost 3 vext3 RHS, <7,6,7,0>
- 1585222628U, // <7,7,6,u>: Cost 2 vext2 <6,u,7,7>, <6,u,7,7>
- 1523417190U, // <7,7,7,0>: Cost 2 vext1 <7,7,7,7>, LHS
- 2597159670U, // <7,7,7,1>: Cost 3 vext1 <7,7,7,7>, <1,0,3,2>
- 2597160552U, // <7,7,7,2>: Cost 3 vext1 <7,7,7,7>, <2,2,2,2>
- 2597161110U, // <7,7,7,3>: Cost 3 vext1 <7,7,7,7>, <3,0,1,2>
- 1523420470U, // <7,7,7,4>: Cost 2 vext1 <7,7,7,7>, RHS
- 2651002296U, // <7,7,7,5>: Cost 3 vext2 <5,5,7,7>, <7,5,5,7>
- 2657637906U, // <7,7,7,6>: Cost 3 vext2 <6,6,7,7>, <7,6,6,7>
- 363253046U, // <7,7,7,7>: Cost 1 vdup3 RHS
- 363253046U, // <7,7,7,u>: Cost 1 vdup3 RHS
- 1523417190U, // <7,7,u,0>: Cost 2 vext1 <7,7,7,7>, LHS
- 1638471298U, // <7,7,u,1>: Cost 2 vext3 RHS, <7,u,1,2>
- 2712213132U, // <7,7,u,2>: Cost 3 vext3 RHS, <7,u,2,3>
- 2712213138U, // <7,7,u,3>: Cost 3 vext3 RHS, <7,u,3,0>
- 1523420470U, // <7,7,u,4>: Cost 2 vext1 <7,7,7,7>, RHS
- 1638471338U, // <7,7,u,5>: Cost 2 vext3 RHS, <7,u,5,6>
- 1595840756U, // <7,7,u,6>: Cost 2 vext2 <u,6,7,7>, <u,6,7,7>
- 363253046U, // <7,7,u,7>: Cost 1 vdup3 RHS
- 363253046U, // <7,7,u,u>: Cost 1 vdup3 RHS
- 1638318080U, // <7,u,0,0>: Cost 2 vext3 RHS, <0,0,0,0>
- 1638323923U, // <7,u,0,1>: Cost 2 vext3 RHS, <u,0,1,2>
- 1662211804U, // <7,u,0,2>: Cost 2 vext3 RHS, <u,0,2,2>
- 1638323941U, // <7,u,0,3>: Cost 2 vext3 RHS, <u,0,3,2>
- 2712065773U, // <7,u,0,4>: Cost 3 vext3 RHS, <u,0,4,1>
- 1662359286U, // <7,u,0,5>: Cost 2 vext3 RHS, <u,0,5,1>
- 1662359296U, // <7,u,0,6>: Cost 2 vext3 RHS, <u,0,6,2>
- 2987150664U, // <7,u,0,7>: Cost 3 vzipr <5,6,7,0>, RHS
- 1638323986U, // <7,u,0,u>: Cost 2 vext3 RHS, <u,0,u,2>
- 1517469798U, // <7,u,1,0>: Cost 2 vext1 <6,7,u,1>, LHS
- 1638318900U, // <7,u,1,1>: Cost 2 vext3 RHS, <1,1,1,1>
- 564582190U, // <7,u,1,2>: Cost 1 vext3 RHS, LHS
- 1638324023U, // <7,u,1,3>: Cost 2 vext3 RHS, <u,1,3,3>
- 1517473078U, // <7,u,1,4>: Cost 2 vext1 <6,7,u,1>, RHS
- 2693928777U, // <7,u,1,5>: Cost 3 vext3 <1,5,3,7>, <u,1,5,3>
- 1517474710U, // <7,u,1,6>: Cost 2 vext1 <6,7,u,1>, <6,7,u,1>
- 1640462171U, // <7,u,1,7>: Cost 2 vext3 RHS, <u,1,7,3>
- 564582244U, // <7,u,1,u>: Cost 1 vext3 RHS, LHS
- 1638318244U, // <7,u,2,0>: Cost 2 vext3 RHS, <0,2,0,2>
- 2712065907U, // <7,u,2,1>: Cost 3 vext3 RHS, <u,2,1,0>
- 1638319720U, // <7,u,2,2>: Cost 2 vext3 RHS, <2,2,2,2>
- 1638324101U, // <7,u,2,3>: Cost 2 vext3 RHS, <u,2,3,0>
- 1638318284U, // <7,u,2,4>: Cost 2 vext3 RHS, <0,2,4,6>
- 2712065947U, // <7,u,2,5>: Cost 3 vext3 RHS, <u,2,5,4>
- 2700564387U, // <7,u,2,6>: Cost 3 vext3 <2,6,3,7>, <u,2,6,3>
- 1640314796U, // <7,u,2,7>: Cost 2 vext3 RHS, <u,2,7,3>
- 1638324146U, // <7,u,2,u>: Cost 2 vext3 RHS, <u,2,u,0>
- 1638324156U, // <7,u,3,0>: Cost 2 vext3 RHS, <u,3,0,1>
- 1638319064U, // <7,u,3,1>: Cost 2 vext3 RHS, <1,3,1,3>
- 2700564435U, // <7,u,3,2>: Cost 3 vext3 <2,6,3,7>, <u,3,2,6>
- 1638320540U, // <7,u,3,3>: Cost 2 vext3 RHS, <3,3,3,3>
- 1638324196U, // <7,u,3,4>: Cost 2 vext3 RHS, <u,3,4,5>
- 1638324207U, // <7,u,3,5>: Cost 2 vext3 RHS, <u,3,5,7>
- 2700564472U, // <7,u,3,6>: Cost 3 vext3 <2,6,3,7>, <u,3,6,7>
- 2695919610U, // <7,u,3,7>: Cost 3 vext3 <1,u,3,7>, <u,3,7,0>
- 1638324228U, // <7,u,3,u>: Cost 2 vext3 RHS, <u,3,u,1>
- 2712066061U, // <7,u,4,0>: Cost 3 vext3 RHS, <u,4,0,1>
- 1662212122U, // <7,u,4,1>: Cost 2 vext3 RHS, <u,4,1,5>
- 1662212132U, // <7,u,4,2>: Cost 2 vext3 RHS, <u,4,2,6>
- 2712066092U, // <7,u,4,3>: Cost 3 vext3 RHS, <u,4,3,5>
- 1638321360U, // <7,u,4,4>: Cost 2 vext3 RHS, <4,4,4,4>
- 1638324287U, // <7,u,4,5>: Cost 2 vext3 RHS, <u,4,5,6>
- 1662359624U, // <7,u,4,6>: Cost 2 vext3 RHS, <u,4,6,6>
- 1640314961U, // <7,u,4,7>: Cost 2 vext3 RHS, <u,4,7,6>
- 1638324314U, // <7,u,4,u>: Cost 2 vext3 RHS, <u,4,u,6>
- 1517502566U, // <7,u,5,0>: Cost 2 vext1 <6,7,u,5>, LHS
- 1574612693U, // <7,u,5,1>: Cost 2 vext2 <5,1,7,u>, <5,1,7,u>
- 2712066162U, // <7,u,5,2>: Cost 3 vext3 RHS, <u,5,2,3>
- 1638324351U, // <7,u,5,3>: Cost 2 vext3 RHS, <u,5,3,7>
- 1576603592U, // <7,u,5,4>: Cost 2 vext2 <5,4,7,u>, <5,4,7,u>
- 1577267225U, // <7,u,5,5>: Cost 2 vext2 <5,5,7,u>, <5,5,7,u>
- 564582554U, // <7,u,5,6>: Cost 1 vext3 RHS, RHS
- 1640462499U, // <7,u,5,7>: Cost 2 vext3 RHS, <u,5,7,7>
- 564582572U, // <7,u,5,u>: Cost 1 vext3 RHS, RHS
- 2712066223U, // <7,u,6,0>: Cost 3 vext3 RHS, <u,6,0,1>
- 2712066238U, // <7,u,6,1>: Cost 3 vext3 RHS, <u,6,1,7>
- 1581249023U, // <7,u,6,2>: Cost 2 vext2 <6,2,7,u>, <6,2,7,u>
- 1638324432U, // <7,u,6,3>: Cost 2 vext3 RHS, <u,6,3,7>
- 1638468980U, // <7,u,6,4>: Cost 2 vext3 RHS, <4,6,4,6>
- 2712066274U, // <7,u,6,5>: Cost 3 vext3 RHS, <u,6,5,7>
- 1583903555U, // <7,u,6,6>: Cost 2 vext2 <6,6,7,u>, <6,6,7,u>
- 1640315117U, // <7,u,6,7>: Cost 2 vext3 RHS, <u,6,7,0>
- 1638324477U, // <7,u,6,u>: Cost 2 vext3 RHS, <u,6,u,7>
- 1638471936U, // <7,u,7,0>: Cost 2 vext3 RHS, <u,7,0,1>
- 2692970763U, // <7,u,7,1>: Cost 3 vext3 <1,3,u,7>, <u,7,1,3>
- 2700933399U, // <7,u,7,2>: Cost 3 vext3 <2,6,u,7>, <u,7,2,6>
- 2573347601U, // <7,u,7,3>: Cost 3 vext1 <3,7,u,7>, <3,7,u,7>
- 1638471976U, // <7,u,7,4>: Cost 2 vext3 RHS, <u,7,4,5>
- 1511551171U, // <7,u,7,5>: Cost 2 vext1 <5,7,u,7>, <5,7,u,7>
- 2712213815U, // <7,u,7,6>: Cost 3 vext3 RHS, <u,7,6,2>
- 363253046U, // <7,u,7,7>: Cost 1 vdup3 RHS
- 363253046U, // <7,u,7,u>: Cost 1 vdup3 RHS
- 1638324561U, // <7,u,u,0>: Cost 2 vext3 RHS, <u,u,0,1>
- 1638324571U, // <7,u,u,1>: Cost 2 vext3 RHS, <u,u,1,2>
- 564582757U, // <7,u,u,2>: Cost 1 vext3 RHS, LHS
- 1638324587U, // <7,u,u,3>: Cost 2 vext3 RHS, <u,u,3,0>
- 1638324601U, // <7,u,u,4>: Cost 2 vext3 RHS, <u,u,4,5>
- 1638324611U, // <7,u,u,5>: Cost 2 vext3 RHS, <u,u,5,6>
- 564582797U, // <7,u,u,6>: Cost 1 vext3 RHS, RHS
- 363253046U, // <7,u,u,7>: Cost 1 vdup3 RHS
- 564582811U, // <7,u,u,u>: Cost 1 vext3 RHS, LHS
- 135053414U, // <u,0,0,0>: Cost 1 vdup0 LHS
- 1611489290U, // <u,0,0,1>: Cost 2 vext3 LHS, <0,0,1,1>
- 1611489300U, // <u,0,0,2>: Cost 2 vext3 LHS, <0,0,2,2>
- 2568054923U, // <u,0,0,3>: Cost 3 vext1 <3,0,0,0>, <3,0,0,0>
- 1481706806U, // <u,0,0,4>: Cost 2 vext1 <0,u,0,0>, RHS
- 2555449040U, // <u,0,0,5>: Cost 3 vext1 <0,u,0,0>, <5,1,7,3>
- 2591282078U, // <u,0,0,6>: Cost 3 vext1 <6,u,0,0>, <6,u,0,0>
- 2591945711U, // <u,0,0,7>: Cost 3 vext1 <7,0,0,0>, <7,0,0,0>
- 135053414U, // <u,0,0,u>: Cost 1 vdup0 LHS
- 1493655654U, // <u,0,1,0>: Cost 2 vext1 <2,u,0,1>, LHS
- 1860550758U, // <u,0,1,1>: Cost 2 vzipl LHS, LHS
- 537747563U, // <u,0,1,2>: Cost 1 vext3 LHS, LHS
- 2625135576U, // <u,0,1,3>: Cost 3 vext2 <1,2,u,0>, <1,3,1,3>
- 1493658934U, // <u,0,1,4>: Cost 2 vext1 <2,u,0,1>, RHS
- 2625135760U, // <u,0,1,5>: Cost 3 vext2 <1,2,u,0>, <1,5,3,7>
- 1517548447U, // <u,0,1,6>: Cost 2 vext1 <6,u,0,1>, <6,u,0,1>
- 2591290362U, // <u,0,1,7>: Cost 3 vext1 <6,u,0,1>, <7,0,1,2>
- 537747612U, // <u,0,1,u>: Cost 1 vext3 LHS, LHS
- 1611489444U, // <u,0,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
- 2685231276U, // <u,0,2,1>: Cost 3 vext3 LHS, <0,2,1,1>
- 1994768486U, // <u,0,2,2>: Cost 2 vtrnl LHS, LHS
- 2685231294U, // <u,0,2,3>: Cost 3 vext3 LHS, <0,2,3,1>
- 1611489484U, // <u,0,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
- 2712068310U, // <u,0,2,5>: Cost 3 vext3 RHS, <0,2,5,7>
- 2625136570U, // <u,0,2,6>: Cost 3 vext2 <1,2,u,0>, <2,6,3,7>
- 2591962097U, // <u,0,2,7>: Cost 3 vext1 <7,0,0,2>, <7,0,0,2>
- 1611489516U, // <u,0,2,u>: Cost 2 vext3 LHS, <0,2,u,2>
- 2954067968U, // <u,0,3,0>: Cost 3 vzipr LHS, <0,0,0,0>
- 2685231356U, // <u,0,3,1>: Cost 3 vext3 LHS, <0,3,1,0>
- 72589981U, // <u,0,3,2>: Cost 1 vrev LHS
- 2625137052U, // <u,0,3,3>: Cost 3 vext2 <1,2,u,0>, <3,3,3,3>
- 2625137154U, // <u,0,3,4>: Cost 3 vext2 <1,2,u,0>, <3,4,5,6>
- 2639071848U, // <u,0,3,5>: Cost 3 vext2 <3,5,u,0>, <3,5,u,0>
- 2639735481U, // <u,0,3,6>: Cost 3 vext2 <3,6,u,0>, <3,6,u,0>
- 2597279354U, // <u,0,3,7>: Cost 3 vext1 <7,u,0,3>, <7,u,0,3>
- 73032403U, // <u,0,3,u>: Cost 1 vrev LHS
- 2687074636U, // <u,0,4,0>: Cost 3 vext3 <0,4,0,u>, <0,4,0,u>
- 1611489618U, // <u,0,4,1>: Cost 2 vext3 LHS, <0,4,1,5>
- 1611489628U, // <u,0,4,2>: Cost 2 vext3 LHS, <0,4,2,6>
- 3629222038U, // <u,0,4,3>: Cost 4 vext1 <0,u,0,4>, <3,0,1,2>
- 2555481398U, // <u,0,4,4>: Cost 3 vext1 <0,u,0,4>, RHS
- 1551396150U, // <u,0,4,5>: Cost 2 vext2 <1,2,u,0>, RHS
- 2651680116U, // <u,0,4,6>: Cost 3 vext2 <5,6,u,0>, <4,6,4,6>
- 2646150600U, // <u,0,4,7>: Cost 3 vext2 <4,7,5,0>, <4,7,5,0>
- 1611932050U, // <u,0,4,u>: Cost 2 vext3 LHS, <0,4,u,6>
- 2561458278U, // <u,0,5,0>: Cost 3 vext1 <1,u,0,5>, LHS
- 1863532646U, // <u,0,5,1>: Cost 2 vzipl RHS, LHS
- 2712068526U, // <u,0,5,2>: Cost 3 vext3 RHS, <0,5,2,7>
- 2649689976U, // <u,0,5,3>: Cost 3 vext2 <5,3,u,0>, <5,3,u,0>
- 2220237489U, // <u,0,5,4>: Cost 3 vrev <0,u,4,5>
- 2651680772U, // <u,0,5,5>: Cost 3 vext2 <5,6,u,0>, <5,5,5,5>
- 1577939051U, // <u,0,5,6>: Cost 2 vext2 <5,6,u,0>, <5,6,u,0>
- 2830077238U, // <u,0,5,7>: Cost 3 vuzpr <1,u,3,0>, RHS
- 1579266317U, // <u,0,5,u>: Cost 2 vext2 <5,u,u,0>, <5,u,u,0>
- 2555494502U, // <u,0,6,0>: Cost 3 vext1 <0,u,0,6>, LHS
- 2712068598U, // <u,0,6,1>: Cost 3 vext3 RHS, <0,6,1,7>
- 1997750374U, // <u,0,6,2>: Cost 2 vtrnl RHS, LHS
- 2655662673U, // <u,0,6,3>: Cost 3 vext2 <6,3,u,0>, <6,3,u,0>
- 2555497782U, // <u,0,6,4>: Cost 3 vext1 <0,u,0,6>, RHS
- 2651681459U, // <u,0,6,5>: Cost 3 vext2 <5,6,u,0>, <6,5,0,u>
- 2651681592U, // <u,0,6,6>: Cost 3 vext2 <5,6,u,0>, <6,6,6,6>
- 2651681614U, // <u,0,6,7>: Cost 3 vext2 <5,6,u,0>, <6,7,0,1>
- 1997750428U, // <u,0,6,u>: Cost 2 vtrnl RHS, LHS
- 2567446630U, // <u,0,7,0>: Cost 3 vext1 <2,u,0,7>, LHS
- 2567447446U, // <u,0,7,1>: Cost 3 vext1 <2,u,0,7>, <1,2,3,0>
- 2567448641U, // <u,0,7,2>: Cost 3 vext1 <2,u,0,7>, <2,u,0,7>
- 2573421338U, // <u,0,7,3>: Cost 3 vext1 <3,u,0,7>, <3,u,0,7>
- 2567449910U, // <u,0,7,4>: Cost 3 vext1 <2,u,0,7>, RHS
- 2651682242U, // <u,0,7,5>: Cost 3 vext2 <5,6,u,0>, <7,5,6,u>
- 2591339429U, // <u,0,7,6>: Cost 3 vext1 <6,u,0,7>, <6,u,0,7>
- 2651682412U, // <u,0,7,7>: Cost 3 vext2 <5,6,u,0>, <7,7,7,7>
- 2567452462U, // <u,0,7,u>: Cost 3 vext1 <2,u,0,7>, LHS
- 135053414U, // <u,0,u,0>: Cost 1 vdup0 LHS
- 1611489938U, // <u,0,u,1>: Cost 2 vext3 LHS, <0,u,1,1>
- 537748125U, // <u,0,u,2>: Cost 1 vext3 LHS, LHS
- 2685674148U, // <u,0,u,3>: Cost 3 vext3 LHS, <0,u,3,1>
- 1611932338U, // <u,0,u,4>: Cost 2 vext3 LHS, <0,u,4,6>
- 1551399066U, // <u,0,u,5>: Cost 2 vext2 <1,2,u,0>, RHS
- 1517605798U, // <u,0,u,6>: Cost 2 vext1 <6,u,0,u>, <6,u,0,u>
- 2830077481U, // <u,0,u,7>: Cost 3 vuzpr <1,u,3,0>, RHS
- 537748179U, // <u,0,u,u>: Cost 1 vext3 LHS, LHS
- 1544101961U, // <u,1,0,0>: Cost 2 vext2 <0,0,u,1>, <0,0,u,1>
- 1558036582U, // <u,1,0,1>: Cost 2 vext2 <2,3,u,1>, LHS
- 2619171051U, // <u,1,0,2>: Cost 3 vext2 <0,2,u,1>, <0,2,u,1>
- 1611490038U, // <u,1,0,3>: Cost 2 vext3 LHS, <1,0,3,2>
- 2555522358U, // <u,1,0,4>: Cost 3 vext1 <0,u,1,0>, RHS
- 2712068871U, // <u,1,0,5>: Cost 3 vext3 RHS, <1,0,5,1>
- 2591355815U, // <u,1,0,6>: Cost 3 vext1 <6,u,1,0>, <6,u,1,0>
- 2597328512U, // <u,1,0,7>: Cost 3 vext1 <7,u,1,0>, <7,u,1,0>
- 1611490083U, // <u,1,0,u>: Cost 2 vext3 LHS, <1,0,u,2>
- 1481785446U, // <u,1,1,0>: Cost 2 vext1 <0,u,1,1>, LHS
- 202162278U, // <u,1,1,1>: Cost 1 vdup1 LHS
- 2555528808U, // <u,1,1,2>: Cost 3 vext1 <0,u,1,1>, <2,2,2,2>
- 1611490120U, // <u,1,1,3>: Cost 2 vext3 LHS, <1,1,3,3>
- 1481788726U, // <u,1,1,4>: Cost 2 vext1 <0,u,1,1>, RHS
- 2689876828U, // <u,1,1,5>: Cost 3 vext3 LHS, <1,1,5,5>
- 2591364008U, // <u,1,1,6>: Cost 3 vext1 <6,u,1,1>, <6,u,1,1>
- 2592691274U, // <u,1,1,7>: Cost 3 vext1 <7,1,1,1>, <7,1,1,1>
- 202162278U, // <u,1,1,u>: Cost 1 vdup1 LHS
- 1499709542U, // <u,1,2,0>: Cost 2 vext1 <3,u,1,2>, LHS
- 2689876871U, // <u,1,2,1>: Cost 3 vext3 LHS, <1,2,1,3>
- 2631116445U, // <u,1,2,2>: Cost 3 vext2 <2,2,u,1>, <2,2,u,1>
- 835584U, // <u,1,2,3>: Cost 0 copy LHS
- 1499712822U, // <u,1,2,4>: Cost 2 vext1 <3,u,1,2>, RHS
- 2689876907U, // <u,1,2,5>: Cost 3 vext3 LHS, <1,2,5,3>
- 2631780282U, // <u,1,2,6>: Cost 3 vext2 <2,3,u,1>, <2,6,3,7>
- 1523603074U, // <u,1,2,7>: Cost 2 vext1 <7,u,1,2>, <7,u,1,2>
- 835584U, // <u,1,2,u>: Cost 0 copy LHS
- 1487773798U, // <u,1,3,0>: Cost 2 vext1 <1,u,1,3>, LHS
- 1611490264U, // <u,1,3,1>: Cost 2 vext3 LHS, <1,3,1,3>
- 2685232094U, // <u,1,3,2>: Cost 3 vext3 LHS, <1,3,2,0>
- 2018746470U, // <u,1,3,3>: Cost 2 vtrnr LHS, LHS
- 1487777078U, // <u,1,3,4>: Cost 2 vext1 <1,u,1,3>, RHS
- 1611490304U, // <u,1,3,5>: Cost 2 vext3 LHS, <1,3,5,7>
- 2685674505U, // <u,1,3,6>: Cost 3 vext3 LHS, <1,3,6,7>
- 2640407307U, // <u,1,3,7>: Cost 3 vext2 <3,7,u,1>, <3,7,u,1>
- 1611490327U, // <u,1,3,u>: Cost 2 vext3 LHS, <1,3,u,3>
- 1567992749U, // <u,1,4,0>: Cost 2 vext2 <4,0,u,1>, <4,0,u,1>
- 2693121070U, // <u,1,4,1>: Cost 3 vext3 <1,4,1,u>, <1,4,1,u>
- 2693194807U, // <u,1,4,2>: Cost 3 vext3 <1,4,2,u>, <1,4,2,u>
- 1152386432U, // <u,1,4,3>: Cost 2 vrev <1,u,3,4>
- 2555555126U, // <u,1,4,4>: Cost 3 vext1 <0,u,1,4>, RHS
- 1558039862U, // <u,1,4,5>: Cost 2 vext2 <2,3,u,1>, RHS
- 2645716371U, // <u,1,4,6>: Cost 3 vext2 <4,6,u,1>, <4,6,u,1>
- 2597361284U, // <u,1,4,7>: Cost 3 vext1 <7,u,1,4>, <7,u,1,4>
- 1152755117U, // <u,1,4,u>: Cost 2 vrev <1,u,u,4>
- 1481818214U, // <u,1,5,0>: Cost 2 vext1 <0,u,1,5>, LHS
- 2555560694U, // <u,1,5,1>: Cost 3 vext1 <0,u,1,5>, <1,0,3,2>
- 2555561576U, // <u,1,5,2>: Cost 3 vext1 <0,u,1,5>, <2,2,2,2>
- 1611490448U, // <u,1,5,3>: Cost 2 vext3 LHS, <1,5,3,7>
- 1481821494U, // <u,1,5,4>: Cost 2 vext1 <0,u,1,5>, RHS
- 2651025435U, // <u,1,5,5>: Cost 3 vext2 <5,5,u,1>, <5,5,u,1>
- 2651689068U, // <u,1,5,6>: Cost 3 vext2 <5,6,u,1>, <5,6,u,1>
- 2823966006U, // <u,1,5,7>: Cost 3 vuzpr <0,u,1,1>, RHS
- 1611932861U, // <u,1,5,u>: Cost 2 vext3 LHS, <1,5,u,7>
- 2555568230U, // <u,1,6,0>: Cost 3 vext1 <0,u,1,6>, LHS
- 2689877199U, // <u,1,6,1>: Cost 3 vext3 LHS, <1,6,1,7>
- 2712069336U, // <u,1,6,2>: Cost 3 vext3 RHS, <1,6,2,7>
- 2685232353U, // <u,1,6,3>: Cost 3 vext3 LHS, <1,6,3,7>
- 2555571510U, // <u,1,6,4>: Cost 3 vext1 <0,u,1,6>, RHS
- 2689877235U, // <u,1,6,5>: Cost 3 vext3 LHS, <1,6,5,7>
- 2657661765U, // <u,1,6,6>: Cost 3 vext2 <6,6,u,1>, <6,6,u,1>
- 1584583574U, // <u,1,6,7>: Cost 2 vext2 <6,7,u,1>, <6,7,u,1>
- 1585247207U, // <u,1,6,u>: Cost 2 vext2 <6,u,u,1>, <6,u,u,1>
- 2561548390U, // <u,1,7,0>: Cost 3 vext1 <1,u,1,7>, LHS
- 2561549681U, // <u,1,7,1>: Cost 3 vext1 <1,u,1,7>, <1,u,1,7>
- 2573493926U, // <u,1,7,2>: Cost 3 vext1 <3,u,1,7>, <2,3,0,1>
- 2042962022U, // <u,1,7,3>: Cost 2 vtrnr RHS, LHS
- 2561551670U, // <u,1,7,4>: Cost 3 vext1 <1,u,1,7>, RHS
- 2226300309U, // <u,1,7,5>: Cost 3 vrev <1,u,5,7>
- 2658325990U, // <u,1,7,6>: Cost 3 vext2 <6,7,u,1>, <7,6,1,u>
- 2658326124U, // <u,1,7,7>: Cost 3 vext2 <6,7,u,1>, <7,7,7,7>
- 2042962027U, // <u,1,7,u>: Cost 2 vtrnr RHS, LHS
- 1481842790U, // <u,1,u,0>: Cost 2 vext1 <0,u,1,u>, LHS
- 202162278U, // <u,1,u,1>: Cost 1 vdup1 LHS
- 2685674867U, // <u,1,u,2>: Cost 3 vext3 LHS, <1,u,2,0>
- 835584U, // <u,1,u,3>: Cost 0 copy LHS
- 1481846070U, // <u,1,u,4>: Cost 2 vext1 <0,u,1,u>, RHS
- 1611933077U, // <u,1,u,5>: Cost 2 vext3 LHS, <1,u,5,7>
- 2685674910U, // <u,1,u,6>: Cost 3 vext3 LHS, <1,u,6,7>
- 1523652232U, // <u,1,u,7>: Cost 2 vext1 <7,u,1,u>, <7,u,1,u>
- 835584U, // <u,1,u,u>: Cost 0 copy LHS
- 1544110154U, // <u,2,0,0>: Cost 2 vext2 <0,0,u,2>, <0,0,u,2>
- 1545437286U, // <u,2,0,1>: Cost 2 vext2 <0,2,u,2>, LHS
- 1545437420U, // <u,2,0,2>: Cost 2 vext2 <0,2,u,2>, <0,2,u,2>
- 2685232589U, // <u,2,0,3>: Cost 3 vext3 LHS, <2,0,3,0>
- 2619179346U, // <u,2,0,4>: Cost 3 vext2 <0,2,u,2>, <0,4,1,5>
- 2712069606U, // <u,2,0,5>: Cost 3 vext3 RHS, <2,0,5,7>
- 2689877484U, // <u,2,0,6>: Cost 3 vext3 LHS, <2,0,6,4>
- 2659656273U, // <u,2,0,7>: Cost 3 vext2 <7,0,u,2>, <0,7,2,u>
- 1545437853U, // <u,2,0,u>: Cost 2 vext2 <0,2,u,2>, LHS
- 1550082851U, // <u,2,1,0>: Cost 2 vext2 <1,0,u,2>, <1,0,u,2>
- 2619179828U, // <u,2,1,1>: Cost 3 vext2 <0,2,u,2>, <1,1,1,1>
- 2619179926U, // <u,2,1,2>: Cost 3 vext2 <0,2,u,2>, <1,2,3,0>
- 2685232671U, // <u,2,1,3>: Cost 3 vext3 LHS, <2,1,3,1>
- 2555604278U, // <u,2,1,4>: Cost 3 vext1 <0,u,2,1>, RHS
- 2619180176U, // <u,2,1,5>: Cost 3 vext2 <0,2,u,2>, <1,5,3,7>
- 2689877564U, // <u,2,1,6>: Cost 3 vext3 LHS, <2,1,6,3>
- 2602718850U, // <u,2,1,7>: Cost 3 vext1 <u,7,2,1>, <7,u,1,2>
- 1158703235U, // <u,2,1,u>: Cost 2 vrev <2,u,u,1>
- 1481867366U, // <u,2,2,0>: Cost 2 vext1 <0,u,2,2>, LHS
- 2555609846U, // <u,2,2,1>: Cost 3 vext1 <0,u,2,2>, <1,0,3,2>
- 269271142U, // <u,2,2,2>: Cost 1 vdup2 LHS
- 1611490930U, // <u,2,2,3>: Cost 2 vext3 LHS, <2,2,3,3>
- 1481870646U, // <u,2,2,4>: Cost 2 vext1 <0,u,2,2>, RHS
- 2689877640U, // <u,2,2,5>: Cost 3 vext3 LHS, <2,2,5,7>
- 2619180986U, // <u,2,2,6>: Cost 3 vext2 <0,2,u,2>, <2,6,3,7>
- 2593436837U, // <u,2,2,7>: Cost 3 vext1 <7,2,2,2>, <7,2,2,2>
- 269271142U, // <u,2,2,u>: Cost 1 vdup2 LHS
- 408134301U, // <u,2,3,0>: Cost 1 vext1 LHS, LHS
- 1481876214U, // <u,2,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 1481877096U, // <u,2,3,2>: Cost 2 vext1 LHS, <2,2,2,2>
- 1880326246U, // <u,2,3,3>: Cost 2 vzipr LHS, LHS
- 408137014U, // <u,2,3,4>: Cost 1 vext1 LHS, RHS
- 1529654992U, // <u,2,3,5>: Cost 2 vext1 LHS, <5,1,7,3>
- 1529655802U, // <u,2,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 1529656314U, // <u,2,3,7>: Cost 2 vext1 LHS, <7,0,1,2>
- 408139566U, // <u,2,3,u>: Cost 1 vext1 LHS, LHS
- 1567853468U, // <u,2,4,0>: Cost 2 vext2 <4,0,6,2>, <4,0,6,2>
- 2561598362U, // <u,2,4,1>: Cost 3 vext1 <1,u,2,4>, <1,2,3,4>
- 2555627214U, // <u,2,4,2>: Cost 3 vext1 <0,u,2,4>, <2,3,4,5>
- 2685232918U, // <u,2,4,3>: Cost 3 vext3 LHS, <2,4,3,5>
- 2555628854U, // <u,2,4,4>: Cost 3 vext1 <0,u,2,4>, RHS
- 1545440566U, // <u,2,4,5>: Cost 2 vext2 <0,2,u,2>, RHS
- 1571982740U, // <u,2,4,6>: Cost 2 vext2 <4,6,u,2>, <4,6,u,2>
- 2592125957U, // <u,2,4,7>: Cost 3 vext1 <7,0,2,4>, <7,0,2,4>
- 1545440809U, // <u,2,4,u>: Cost 2 vext2 <0,2,u,2>, RHS
- 2555633766U, // <u,2,5,0>: Cost 3 vext1 <0,u,2,5>, LHS
- 2561606550U, // <u,2,5,1>: Cost 3 vext1 <1,u,2,5>, <1,2,3,0>
- 2689877856U, // <u,2,5,2>: Cost 3 vext3 LHS, <2,5,2,7>
- 2685233000U, // <u,2,5,3>: Cost 3 vext3 LHS, <2,5,3,6>
- 1158441059U, // <u,2,5,4>: Cost 2 vrev <2,u,4,5>
- 2645725188U, // <u,2,5,5>: Cost 3 vext2 <4,6,u,2>, <5,5,5,5>
- 2689877892U, // <u,2,5,6>: Cost 3 vext3 LHS, <2,5,6,7>
- 2823900470U, // <u,2,5,7>: Cost 3 vuzpr <0,u,0,2>, RHS
- 1158736007U, // <u,2,5,u>: Cost 2 vrev <2,u,u,5>
- 1481900134U, // <u,2,6,0>: Cost 2 vext1 <0,u,2,6>, LHS
- 2555642614U, // <u,2,6,1>: Cost 3 vext1 <0,u,2,6>, <1,0,3,2>
- 2555643496U, // <u,2,6,2>: Cost 3 vext1 <0,u,2,6>, <2,2,2,2>
- 1611491258U, // <u,2,6,3>: Cost 2 vext3 LHS, <2,6,3,7>
- 1481903414U, // <u,2,6,4>: Cost 2 vext1 <0,u,2,6>, RHS
- 2689877964U, // <u,2,6,5>: Cost 3 vext3 LHS, <2,6,5,7>
- 2689877973U, // <u,2,6,6>: Cost 3 vext3 LHS, <2,6,6,7>
- 2645726030U, // <u,2,6,7>: Cost 3 vext2 <4,6,u,2>, <6,7,0,1>
- 1611933671U, // <u,2,6,u>: Cost 2 vext3 LHS, <2,6,u,7>
- 1585919033U, // <u,2,7,0>: Cost 2 vext2 <7,0,u,2>, <7,0,u,2>
- 2573566710U, // <u,2,7,1>: Cost 3 vext1 <3,u,2,7>, <1,0,3,2>
- 2567596115U, // <u,2,7,2>: Cost 3 vext1 <2,u,2,7>, <2,u,2,7>
- 1906901094U, // <u,2,7,3>: Cost 2 vzipr RHS, LHS
- 2555653430U, // <u,2,7,4>: Cost 3 vext1 <0,u,2,7>, RHS
- 2800080230U, // <u,2,7,5>: Cost 3 vuzpl LHS, <7,4,5,6>
- 2980643164U, // <u,2,7,6>: Cost 3 vzipr RHS, <0,4,2,6>
- 2645726828U, // <u,2,7,7>: Cost 3 vext2 <4,6,u,2>, <7,7,7,7>
- 1906901099U, // <u,2,7,u>: Cost 2 vzipr RHS, LHS
- 408175266U, // <u,2,u,0>: Cost 1 vext1 LHS, LHS
- 1545443118U, // <u,2,u,1>: Cost 2 vext2 <0,2,u,2>, LHS
- 269271142U, // <u,2,u,2>: Cost 1 vdup2 LHS
- 1611491416U, // <u,2,u,3>: Cost 2 vext3 LHS, <2,u,3,3>
- 408177974U, // <u,2,u,4>: Cost 1 vext1 LHS, RHS
- 1545443482U, // <u,2,u,5>: Cost 2 vext2 <0,2,u,2>, RHS
- 1726339226U, // <u,2,u,6>: Cost 2 vuzpl LHS, RHS
- 1529697274U, // <u,2,u,7>: Cost 2 vext1 LHS, <7,0,1,2>
- 408180526U, // <u,2,u,u>: Cost 1 vext1 LHS, LHS
- 1544781824U, // <u,3,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
- 471040156U, // <u,3,0,1>: Cost 1 vext2 LHS, LHS
- 1544781988U, // <u,3,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
- 2618523900U, // <u,3,0,3>: Cost 3 vext2 LHS, <0,3,1,0>
- 1544782162U, // <u,3,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
- 2238188352U, // <u,3,0,5>: Cost 3 vrev <3,u,5,0>
- 2623169023U, // <u,3,0,6>: Cost 3 vext2 LHS, <0,6,2,7>
- 2238335826U, // <u,3,0,7>: Cost 3 vrev <3,u,7,0>
- 471040669U, // <u,3,0,u>: Cost 1 vext2 LHS, LHS
- 1544782582U, // <u,3,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
- 1544782644U, // <u,3,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
- 1544782742U, // <u,3,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
- 1544782808U, // <u,3,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
- 2618524733U, // <u,3,1,4>: Cost 3 vext2 LHS, <1,4,3,5>
- 1544782992U, // <u,3,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
- 2618524897U, // <u,3,1,6>: Cost 3 vext2 LHS, <1,6,3,7>
- 2703517987U, // <u,3,1,7>: Cost 3 vext3 <3,1,7,u>, <3,1,7,u>
- 1544783213U, // <u,3,1,u>: Cost 2 vext2 LHS, <1,u,1,3>
- 1529716838U, // <u,3,2,0>: Cost 2 vext1 <u,u,3,2>, LHS
- 1164167966U, // <u,3,2,1>: Cost 2 vrev <3,u,1,2>
- 1544783464U, // <u,3,2,2>: Cost 2 vext2 LHS, <2,2,2,2>
- 1544783526U, // <u,3,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
- 1529720118U, // <u,3,2,4>: Cost 2 vext1 <u,u,3,2>, RHS
- 2618525544U, // <u,3,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
- 1544783802U, // <u,3,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
- 2704181620U, // <u,3,2,7>: Cost 3 vext3 <3,2,7,u>, <3,2,7,u>
- 1544783931U, // <u,3,2,u>: Cost 2 vext2 LHS, <2,u,0,1>
- 1544784022U, // <u,3,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
- 1487922559U, // <u,3,3,1>: Cost 2 vext1 <1,u,3,3>, <1,u,3,3>
- 1493895256U, // <u,3,3,2>: Cost 2 vext1 <2,u,3,3>, <2,u,3,3>
- 336380006U, // <u,3,3,3>: Cost 1 vdup3 LHS
- 1544784386U, // <u,3,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
- 2824054478U, // <u,3,3,5>: Cost 3 vuzpr LHS, <2,3,4,5>
- 2238286668U, // <u,3,3,6>: Cost 3 vrev <3,u,6,3>
- 2954069136U, // <u,3,3,7>: Cost 3 vzipr LHS, <1,5,3,7>
- 336380006U, // <u,3,3,u>: Cost 1 vdup3 LHS
- 1487929446U, // <u,3,4,0>: Cost 2 vext1 <1,u,3,4>, LHS
- 1487930752U, // <u,3,4,1>: Cost 2 vext1 <1,u,3,4>, <1,u,3,4>
- 2623171644U, // <u,3,4,2>: Cost 3 vext2 LHS, <4,2,6,0>
- 2561673366U, // <u,3,4,3>: Cost 3 vext1 <1,u,3,4>, <3,0,1,2>
- 1487932726U, // <u,3,4,4>: Cost 2 vext1 <1,u,3,4>, RHS
- 471043382U, // <u,3,4,5>: Cost 1 vext2 LHS, RHS
- 1592561012U, // <u,3,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
- 2238368598U, // <u,3,4,7>: Cost 3 vrev <3,u,7,4>
- 471043625U, // <u,3,4,u>: Cost 1 vext2 LHS, RHS
- 2555707494U, // <u,3,5,0>: Cost 3 vext1 <0,u,3,5>, LHS
- 1574645465U, // <u,3,5,1>: Cost 2 vext2 <5,1,u,3>, <5,1,u,3>
- 2567653106U, // <u,3,5,2>: Cost 3 vext1 <2,u,3,5>, <2,3,u,5>
- 2555709954U, // <u,3,5,3>: Cost 3 vext1 <0,u,3,5>, <3,4,5,6>
- 1592561606U, // <u,3,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
- 1592561668U, // <u,3,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
- 1592561762U, // <u,3,5,6>: Cost 2 vext2 LHS, <5,6,7,0>
- 1750314294U, // <u,3,5,7>: Cost 2 vuzpr LHS, RHS
- 1750314295U, // <u,3,5,u>: Cost 2 vuzpr LHS, RHS
- 2623172897U, // <u,3,6,0>: Cost 3 vext2 LHS, <6,0,1,2>
- 2561688962U, // <u,3,6,1>: Cost 3 vext1 <1,u,3,6>, <1,u,3,6>
- 1581281795U, // <u,3,6,2>: Cost 2 vext2 <6,2,u,3>, <6,2,u,3>
- 2706541204U, // <u,3,6,3>: Cost 3 vext3 <3,6,3,u>, <3,6,3,u>
- 2623173261U, // <u,3,6,4>: Cost 3 vext2 LHS, <6,4,5,6>
- 1164495686U, // <u,3,6,5>: Cost 2 vrev <3,u,5,6>
- 1592562488U, // <u,3,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
- 1592562510U, // <u,3,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
- 1164716897U, // <u,3,6,u>: Cost 2 vrev <3,u,u,6>
- 1487954022U, // <u,3,7,0>: Cost 2 vext1 <1,u,3,7>, LHS
- 1487955331U, // <u,3,7,1>: Cost 2 vext1 <1,u,3,7>, <1,u,3,7>
- 1493928028U, // <u,3,7,2>: Cost 2 vext1 <2,u,3,7>, <2,u,3,7>
- 2561697942U, // <u,3,7,3>: Cost 3 vext1 <1,u,3,7>, <3,0,1,2>
- 1487957302U, // <u,3,7,4>: Cost 2 vext1 <1,u,3,7>, RHS
- 2707352311U, // <u,3,7,5>: Cost 3 vext3 <3,7,5,u>, <3,7,5,u>
- 2655024623U, // <u,3,7,6>: Cost 3 vext2 <6,2,u,3>, <7,6,2,u>
- 1592563308U, // <u,3,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
- 1487959854U, // <u,3,7,u>: Cost 2 vext1 <1,u,3,7>, LHS
- 1544787667U, // <u,3,u,0>: Cost 2 vext2 LHS, <u,0,1,2>
- 471045934U, // <u,3,u,1>: Cost 1 vext2 LHS, LHS
- 1549432709U, // <u,3,u,2>: Cost 2 vext2 LHS, <u,2,3,0>
- 336380006U, // <u,3,u,3>: Cost 1 vdup3 LHS
- 1544788031U, // <u,3,u,4>: Cost 2 vext2 LHS, <u,4,5,6>
- 471046298U, // <u,3,u,5>: Cost 1 vext2 LHS, RHS
- 1549433040U, // <u,3,u,6>: Cost 2 vext2 LHS, <u,6,3,7>
- 1750314537U, // <u,3,u,7>: Cost 2 vuzpr LHS, RHS
- 471046501U, // <u,3,u,u>: Cost 1 vext2 LHS, LHS
- 2625167360U, // <u,4,0,0>: Cost 3 vext2 <1,2,u,4>, <0,0,0,0>
- 1551425638U, // <u,4,0,1>: Cost 2 vext2 <1,2,u,4>, LHS
- 2619195630U, // <u,4,0,2>: Cost 3 vext2 <0,2,u,4>, <0,2,u,4>
- 2619343104U, // <u,4,0,3>: Cost 3 vext2 <0,3,1,4>, <0,3,1,4>
- 2625167698U, // <u,4,0,4>: Cost 3 vext2 <1,2,u,4>, <0,4,1,5>
- 1638329234U, // <u,4,0,5>: Cost 2 vext3 RHS, <4,0,5,1>
- 1638329244U, // <u,4,0,6>: Cost 2 vext3 RHS, <4,0,6,2>
- 3787803556U, // <u,4,0,7>: Cost 4 vext3 RHS, <4,0,7,1>
- 1551426205U, // <u,4,0,u>: Cost 2 vext2 <1,2,u,4>, LHS
- 2555748454U, // <u,4,1,0>: Cost 3 vext1 <0,u,4,1>, LHS
- 2625168180U, // <u,4,1,1>: Cost 3 vext2 <1,2,u,4>, <1,1,1,1>
- 1551426503U, // <u,4,1,2>: Cost 2 vext2 <1,2,u,4>, <1,2,u,4>
- 2625168344U, // <u,4,1,3>: Cost 3 vext2 <1,2,u,4>, <1,3,1,3>
- 2555751734U, // <u,4,1,4>: Cost 3 vext1 <0,u,4,1>, RHS
- 1860554038U, // <u,4,1,5>: Cost 2 vzipl LHS, RHS
- 2689879022U, // <u,4,1,6>: Cost 3 vext3 LHS, <4,1,6,3>
- 2592248852U, // <u,4,1,7>: Cost 3 vext1 <7,0,4,1>, <7,0,4,1>
- 1555408301U, // <u,4,1,u>: Cost 2 vext2 <1,u,u,4>, <1,u,u,4>
- 2555756646U, // <u,4,2,0>: Cost 3 vext1 <0,u,4,2>, LHS
- 2625168943U, // <u,4,2,1>: Cost 3 vext2 <1,2,u,4>, <2,1,4,u>
- 2625169000U, // <u,4,2,2>: Cost 3 vext2 <1,2,u,4>, <2,2,2,2>
- 2619197134U, // <u,4,2,3>: Cost 3 vext2 <0,2,u,4>, <2,3,4,5>
- 2555759926U, // <u,4,2,4>: Cost 3 vext1 <0,u,4,2>, RHS
- 2712071222U, // <u,4,2,5>: Cost 3 vext3 RHS, <4,2,5,3>
- 1994771766U, // <u,4,2,6>: Cost 2 vtrnl LHS, RHS
- 2592257045U, // <u,4,2,7>: Cost 3 vext1 <7,0,4,2>, <7,0,4,2>
- 1994771784U, // <u,4,2,u>: Cost 2 vtrnl LHS, RHS
- 2625169558U, // <u,4,3,0>: Cost 3 vext2 <1,2,u,4>, <3,0,1,2>
- 2567709594U, // <u,4,3,1>: Cost 3 vext1 <2,u,4,3>, <1,2,3,4>
- 2567710817U, // <u,4,3,2>: Cost 3 vext1 <2,u,4,3>, <2,u,4,3>
- 2625169820U, // <u,4,3,3>: Cost 3 vext2 <1,2,u,4>, <3,3,3,3>
- 2625169922U, // <u,4,3,4>: Cost 3 vext2 <1,2,u,4>, <3,4,5,6>
- 2954069710U, // <u,4,3,5>: Cost 3 vzipr LHS, <2,3,4,5>
- 2954068172U, // <u,4,3,6>: Cost 3 vzipr LHS, <0,2,4,6>
- 3903849472U, // <u,4,3,7>: Cost 4 vuzpr <1,u,3,4>, <1,3,5,7>
- 2954068174U, // <u,4,3,u>: Cost 3 vzipr LHS, <0,2,4,u>
- 1505919078U, // <u,4,4,0>: Cost 2 vext1 <4,u,4,4>, LHS
- 2567717831U, // <u,4,4,1>: Cost 3 vext1 <2,u,4,4>, <1,2,u,4>
- 2567719010U, // <u,4,4,2>: Cost 3 vext1 <2,u,4,4>, <2,u,4,4>
- 2570373542U, // <u,4,4,3>: Cost 3 vext1 <3,3,4,4>, <3,3,4,4>
- 161926454U, // <u,4,4,4>: Cost 1 vdup0 RHS
- 1551428918U, // <u,4,4,5>: Cost 2 vext2 <1,2,u,4>, RHS
- 1638329572U, // <u,4,4,6>: Cost 2 vext3 RHS, <4,4,6,6>
- 2594927963U, // <u,4,4,7>: Cost 3 vext1 <7,4,4,4>, <7,4,4,4>
- 161926454U, // <u,4,4,u>: Cost 1 vdup0 RHS
- 1493983334U, // <u,4,5,0>: Cost 2 vext1 <2,u,4,5>, LHS
- 2689879301U, // <u,4,5,1>: Cost 3 vext3 LHS, <4,5,1,3>
- 1493985379U, // <u,4,5,2>: Cost 2 vext1 <2,u,4,5>, <2,u,4,5>
- 2567727254U, // <u,4,5,3>: Cost 3 vext1 <2,u,4,5>, <3,0,1,2>
- 1493986614U, // <u,4,5,4>: Cost 2 vext1 <2,u,4,5>, RHS
- 1863535926U, // <u,4,5,5>: Cost 2 vzipl RHS, RHS
- 537750838U, // <u,4,5,6>: Cost 1 vext3 LHS, RHS
- 2830110006U, // <u,4,5,7>: Cost 3 vuzpr <1,u,3,4>, RHS
- 537750856U, // <u,4,5,u>: Cost 1 vext3 LHS, RHS
- 1482047590U, // <u,4,6,0>: Cost 2 vext1 <0,u,4,6>, LHS
- 2555790070U, // <u,4,6,1>: Cost 3 vext1 <0,u,4,6>, <1,0,3,2>
- 2555790952U, // <u,4,6,2>: Cost 3 vext1 <0,u,4,6>, <2,2,2,2>
- 2555791510U, // <u,4,6,3>: Cost 3 vext1 <0,u,4,6>, <3,0,1,2>
- 1482050870U, // <u,4,6,4>: Cost 2 vext1 <0,u,4,6>, RHS
- 2689879422U, // <u,4,6,5>: Cost 3 vext3 LHS, <4,6,5,7>
- 1997753654U, // <u,4,6,6>: Cost 2 vtrnl RHS, RHS
- 2712071562U, // <u,4,6,7>: Cost 3 vext3 RHS, <4,6,7,1>
- 1482053422U, // <u,4,6,u>: Cost 2 vext1 <0,u,4,6>, LHS
- 2567741542U, // <u,4,7,0>: Cost 3 vext1 <2,u,4,7>, LHS
- 2567742362U, // <u,4,7,1>: Cost 3 vext1 <2,u,4,7>, <1,2,3,4>
- 2567743589U, // <u,4,7,2>: Cost 3 vext1 <2,u,4,7>, <2,u,4,7>
- 2573716286U, // <u,4,7,3>: Cost 3 vext1 <3,u,4,7>, <3,u,4,7>
- 2567744822U, // <u,4,7,4>: Cost 3 vext1 <2,u,4,7>, RHS
- 2712071624U, // <u,4,7,5>: Cost 3 vext3 RHS, <4,7,5,0>
- 96808489U, // <u,4,7,6>: Cost 1 vrev RHS
- 2651715180U, // <u,4,7,7>: Cost 3 vext2 <5,6,u,4>, <7,7,7,7>
- 96955963U, // <u,4,7,u>: Cost 1 vrev RHS
- 1482063974U, // <u,4,u,0>: Cost 2 vext1 <0,u,4,u>, LHS
- 1551431470U, // <u,4,u,1>: Cost 2 vext2 <1,2,u,4>, LHS
- 1494009958U, // <u,4,u,2>: Cost 2 vext1 <2,u,4,u>, <2,u,4,u>
- 2555807894U, // <u,4,u,3>: Cost 3 vext1 <0,u,4,u>, <3,0,1,2>
- 161926454U, // <u,4,u,4>: Cost 1 vdup0 RHS
- 1551431834U, // <u,4,u,5>: Cost 2 vext2 <1,2,u,4>, RHS
- 537751081U, // <u,4,u,6>: Cost 1 vext3 LHS, RHS
- 2830110249U, // <u,4,u,7>: Cost 3 vuzpr <1,u,3,4>, RHS
- 537751099U, // <u,4,u,u>: Cost 1 vext3 LHS, RHS
- 2631811072U, // <u,5,0,0>: Cost 3 vext2 <2,3,u,5>, <0,0,0,0>
- 1558069350U, // <u,5,0,1>: Cost 2 vext2 <2,3,u,5>, LHS
- 2619203823U, // <u,5,0,2>: Cost 3 vext2 <0,2,u,5>, <0,2,u,5>
- 2619867456U, // <u,5,0,3>: Cost 3 vext2 <0,3,u,5>, <0,3,u,5>
- 1546273106U, // <u,5,0,4>: Cost 2 vext2 <0,4,1,5>, <0,4,1,5>
- 2733010539U, // <u,5,0,5>: Cost 3 vext3 LHS, <5,0,5,1>
- 2597622682U, // <u,5,0,6>: Cost 3 vext1 <7,u,5,0>, <6,7,u,5>
- 1176539396U, // <u,5,0,7>: Cost 2 vrev <5,u,7,0>
- 1558069917U, // <u,5,0,u>: Cost 2 vext2 <2,3,u,5>, LHS
- 1505968230U, // <u,5,1,0>: Cost 2 vext1 <4,u,5,1>, LHS
- 2624512887U, // <u,5,1,1>: Cost 3 vext2 <1,1,u,5>, <1,1,u,5>
- 2631811990U, // <u,5,1,2>: Cost 3 vext2 <2,3,u,5>, <1,2,3,0>
- 2618541056U, // <u,5,1,3>: Cost 3 vext2 <0,1,u,5>, <1,3,5,7>
- 1505971510U, // <u,5,1,4>: Cost 2 vext1 <4,u,5,1>, RHS
- 2627167419U, // <u,5,1,5>: Cost 3 vext2 <1,5,u,5>, <1,5,u,5>
- 2579714554U, // <u,5,1,6>: Cost 3 vext1 <4,u,5,1>, <6,2,7,3>
- 1638330064U, // <u,5,1,7>: Cost 2 vext3 RHS, <5,1,7,3>
- 1638477529U, // <u,5,1,u>: Cost 2 vext3 RHS, <5,1,u,3>
- 2561802342U, // <u,5,2,0>: Cost 3 vext1 <1,u,5,2>, LHS
- 2561803264U, // <u,5,2,1>: Cost 3 vext1 <1,u,5,2>, <1,3,5,7>
- 2631149217U, // <u,5,2,2>: Cost 3 vext2 <2,2,u,5>, <2,2,u,5>
- 1558071026U, // <u,5,2,3>: Cost 2 vext2 <2,3,u,5>, <2,3,u,5>
- 2561805622U, // <u,5,2,4>: Cost 3 vext1 <1,u,5,2>, RHS
- 2714062607U, // <u,5,2,5>: Cost 3 vext3 RHS, <5,2,5,3>
- 2631813050U, // <u,5,2,6>: Cost 3 vext2 <2,3,u,5>, <2,6,3,7>
- 3092335926U, // <u,5,2,7>: Cost 3 vtrnr <0,u,0,2>, RHS
- 1561389191U, // <u,5,2,u>: Cost 2 vext2 <2,u,u,5>, <2,u,u,5>
- 2561810534U, // <u,5,3,0>: Cost 3 vext1 <1,u,5,3>, LHS
- 2561811857U, // <u,5,3,1>: Cost 3 vext1 <1,u,5,3>, <1,u,5,3>
- 2631813474U, // <u,5,3,2>: Cost 3 vext2 <2,3,u,5>, <3,2,5,u>
- 2631813532U, // <u,5,3,3>: Cost 3 vext2 <2,3,u,5>, <3,3,3,3>
- 2619869698U, // <u,5,3,4>: Cost 3 vext2 <0,3,u,5>, <3,4,5,6>
- 3001847002U, // <u,5,3,5>: Cost 3 vzipr LHS, <4,4,5,5>
- 2954070530U, // <u,5,3,6>: Cost 3 vzipr LHS, <3,4,5,6>
- 2018749750U, // <u,5,3,7>: Cost 2 vtrnr LHS, RHS
- 2018749751U, // <u,5,3,u>: Cost 2 vtrnr LHS, RHS
- 2573762662U, // <u,5,4,0>: Cost 3 vext1 <3,u,5,4>, LHS
- 2620017634U, // <u,5,4,1>: Cost 3 vext2 <0,4,1,5>, <4,1,5,0>
- 2573764338U, // <u,5,4,2>: Cost 3 vext1 <3,u,5,4>, <2,3,u,5>
- 2573765444U, // <u,5,4,3>: Cost 3 vext1 <3,u,5,4>, <3,u,5,4>
- 1570680053U, // <u,5,4,4>: Cost 2 vext2 <4,4,u,5>, <4,4,u,5>
- 1558072630U, // <u,5,4,5>: Cost 2 vext2 <2,3,u,5>, RHS
- 2645749143U, // <u,5,4,6>: Cost 3 vext2 <4,6,u,5>, <4,6,u,5>
- 1638330310U, // <u,5,4,7>: Cost 2 vext3 RHS, <5,4,7,6>
- 1558072873U, // <u,5,4,u>: Cost 2 vext2 <2,3,u,5>, RHS
- 1506000998U, // <u,5,5,0>: Cost 2 vext1 <4,u,5,5>, LHS
- 2561827984U, // <u,5,5,1>: Cost 3 vext1 <1,u,5,5>, <1,5,3,7>
- 2579744360U, // <u,5,5,2>: Cost 3 vext1 <4,u,5,5>, <2,2,2,2>
- 2579744918U, // <u,5,5,3>: Cost 3 vext1 <4,u,5,5>, <3,0,1,2>
- 1506004278U, // <u,5,5,4>: Cost 2 vext1 <4,u,5,5>, RHS
- 229035318U, // <u,5,5,5>: Cost 1 vdup1 RHS
- 2712072206U, // <u,5,5,6>: Cost 3 vext3 RHS, <5,5,6,6>
- 1638330392U, // <u,5,5,7>: Cost 2 vext3 RHS, <5,5,7,7>
- 229035318U, // <u,5,5,u>: Cost 1 vdup1 RHS
- 1500037222U, // <u,5,6,0>: Cost 2 vext1 <3,u,5,6>, LHS
- 2561836436U, // <u,5,6,1>: Cost 3 vext1 <1,u,5,6>, <1,u,5,6>
- 2567809133U, // <u,5,6,2>: Cost 3 vext1 <2,u,5,6>, <2,u,5,6>
- 1500040006U, // <u,5,6,3>: Cost 2 vext1 <3,u,5,6>, <3,u,5,6>
- 1500040502U, // <u,5,6,4>: Cost 2 vext1 <3,u,5,6>, RHS
- 2714062935U, // <u,5,6,5>: Cost 3 vext3 RHS, <5,6,5,7>
- 2712072288U, // <u,5,6,6>: Cost 3 vext3 RHS, <5,6,6,7>
- 27705344U, // <u,5,6,7>: Cost 0 copy RHS
- 27705344U, // <u,5,6,u>: Cost 0 copy RHS
- 1488101478U, // <u,5,7,0>: Cost 2 vext1 <1,u,5,7>, LHS
- 1488102805U, // <u,5,7,1>: Cost 2 vext1 <1,u,5,7>, <1,u,5,7>
- 2561844840U, // <u,5,7,2>: Cost 3 vext1 <1,u,5,7>, <2,2,2,2>
- 2561845398U, // <u,5,7,3>: Cost 3 vext1 <1,u,5,7>, <3,0,1,2>
- 1488104758U, // <u,5,7,4>: Cost 2 vext1 <1,u,5,7>, RHS
- 1638330536U, // <u,5,7,5>: Cost 2 vext3 RHS, <5,7,5,7>
- 2712072362U, // <u,5,7,6>: Cost 3 vext3 RHS, <5,7,6,0>
- 2042965302U, // <u,5,7,7>: Cost 2 vtrnr RHS, RHS
- 1488107310U, // <u,5,7,u>: Cost 2 vext1 <1,u,5,7>, LHS
- 1488109670U, // <u,5,u,0>: Cost 2 vext1 <1,u,5,u>, LHS
- 1488110998U, // <u,5,u,1>: Cost 2 vext1 <1,u,5,u>, <1,u,5,u>
- 2561853032U, // <u,5,u,2>: Cost 3 vext1 <1,u,5,u>, <2,2,2,2>
- 1500056392U, // <u,5,u,3>: Cost 2 vext1 <3,u,5,u>, <3,u,5,u>
- 1488112950U, // <u,5,u,4>: Cost 2 vext1 <1,u,5,u>, RHS
- 229035318U, // <u,5,u,5>: Cost 1 vdup1 RHS
- 2954111490U, // <u,5,u,6>: Cost 3 vzipr LHS, <3,4,5,6>
- 27705344U, // <u,5,u,7>: Cost 0 copy RHS
- 27705344U, // <u,5,u,u>: Cost 0 copy RHS
- 2619211776U, // <u,6,0,0>: Cost 3 vext2 <0,2,u,6>, <0,0,0,0>
- 1545470054U, // <u,6,0,1>: Cost 2 vext2 <0,2,u,6>, LHS
- 1545470192U, // <u,6,0,2>: Cost 2 vext2 <0,2,u,6>, <0,2,u,6>
- 2255958969U, // <u,6,0,3>: Cost 3 vrev <6,u,3,0>
- 1546797458U, // <u,6,0,4>: Cost 2 vext2 <0,4,u,6>, <0,4,u,6>
- 2720624971U, // <u,6,0,5>: Cost 3 vext3 <6,0,5,u>, <6,0,5,u>
- 2256180180U, // <u,6,0,6>: Cost 3 vrev <6,u,6,0>
- 2960682294U, // <u,6,0,7>: Cost 3 vzipr <1,2,u,0>, RHS
- 1545470621U, // <u,6,0,u>: Cost 2 vext2 <0,2,u,6>, LHS
- 1182004127U, // <u,6,1,0>: Cost 2 vrev <6,u,0,1>
- 2619212596U, // <u,6,1,1>: Cost 3 vext2 <0,2,u,6>, <1,1,1,1>
- 2619212694U, // <u,6,1,2>: Cost 3 vext2 <0,2,u,6>, <1,2,3,0>
- 2619212760U, // <u,6,1,3>: Cost 3 vext2 <0,2,u,6>, <1,3,1,3>
- 2626511979U, // <u,6,1,4>: Cost 3 vext2 <1,4,u,6>, <1,4,u,6>
- 2619212944U, // <u,6,1,5>: Cost 3 vext2 <0,2,u,6>, <1,5,3,7>
- 2714063264U, // <u,6,1,6>: Cost 3 vext3 RHS, <6,1,6,3>
- 2967326006U, // <u,6,1,7>: Cost 3 vzipr <2,3,u,1>, RHS
- 1182594023U, // <u,6,1,u>: Cost 2 vrev <6,u,u,1>
- 1506050150U, // <u,6,2,0>: Cost 2 vext1 <4,u,6,2>, LHS
- 2579792630U, // <u,6,2,1>: Cost 3 vext1 <4,u,6,2>, <1,0,3,2>
- 2619213416U, // <u,6,2,2>: Cost 3 vext2 <0,2,u,6>, <2,2,2,2>
- 2619213478U, // <u,6,2,3>: Cost 3 vext2 <0,2,u,6>, <2,3,0,1>
- 1506053430U, // <u,6,2,4>: Cost 2 vext1 <4,u,6,2>, RHS
- 2633148309U, // <u,6,2,5>: Cost 3 vext2 <2,5,u,6>, <2,5,u,6>
- 2619213754U, // <u,6,2,6>: Cost 3 vext2 <0,2,u,6>, <2,6,3,7>
- 1638330874U, // <u,6,2,7>: Cost 2 vext3 RHS, <6,2,7,3>
- 1638478339U, // <u,6,2,u>: Cost 2 vext3 RHS, <6,2,u,3>
- 2619213974U, // <u,6,3,0>: Cost 3 vext2 <0,2,u,6>, <3,0,1,2>
- 2255836074U, // <u,6,3,1>: Cost 3 vrev <6,u,1,3>
- 2255909811U, // <u,6,3,2>: Cost 3 vrev <6,u,2,3>
- 2619214236U, // <u,6,3,3>: Cost 3 vext2 <0,2,u,6>, <3,3,3,3>
- 1564715549U, // <u,6,3,4>: Cost 2 vext2 <3,4,u,6>, <3,4,u,6>
- 2639121006U, // <u,6,3,5>: Cost 3 vext2 <3,5,u,6>, <3,5,u,6>
- 3001847012U, // <u,6,3,6>: Cost 3 vzipr LHS, <4,4,6,6>
- 1880329526U, // <u,6,3,7>: Cost 2 vzipr LHS, RHS
- 1880329527U, // <u,6,3,u>: Cost 2 vzipr LHS, RHS
- 2567864422U, // <u,6,4,0>: Cost 3 vext1 <2,u,6,4>, LHS
- 2733011558U, // <u,6,4,1>: Cost 3 vext3 LHS, <6,4,1,3>
- 2567866484U, // <u,6,4,2>: Cost 3 vext1 <2,u,6,4>, <2,u,6,4>
- 2638458005U, // <u,6,4,3>: Cost 3 vext2 <3,4,u,6>, <4,3,6,u>
- 1570540772U, // <u,6,4,4>: Cost 2 vext2 <4,4,6,6>, <4,4,6,6>
- 1545473334U, // <u,6,4,5>: Cost 2 vext2 <0,2,u,6>, RHS
- 1572015512U, // <u,6,4,6>: Cost 2 vext2 <4,6,u,6>, <4,6,u,6>
- 2960715062U, // <u,6,4,7>: Cost 3 vzipr <1,2,u,4>, RHS
- 1545473577U, // <u,6,4,u>: Cost 2 vext2 <0,2,u,6>, RHS
- 2567872614U, // <u,6,5,0>: Cost 3 vext1 <2,u,6,5>, LHS
- 2645757648U, // <u,6,5,1>: Cost 3 vext2 <4,6,u,6>, <5,1,7,3>
- 2567874490U, // <u,6,5,2>: Cost 3 vext1 <2,u,6,5>, <2,6,3,7>
- 2576501250U, // <u,6,5,3>: Cost 3 vext1 <4,3,6,5>, <3,4,5,6>
- 1576660943U, // <u,6,5,4>: Cost 2 vext2 <5,4,u,6>, <5,4,u,6>
- 2645757956U, // <u,6,5,5>: Cost 3 vext2 <4,6,u,6>, <5,5,5,5>
- 2645758050U, // <u,6,5,6>: Cost 3 vext2 <4,6,u,6>, <5,6,7,0>
- 2824080694U, // <u,6,5,7>: Cost 3 vuzpr <0,u,2,6>, RHS
- 1182626795U, // <u,6,5,u>: Cost 2 vrev <6,u,u,5>
- 1506082918U, // <u,6,6,0>: Cost 2 vext1 <4,u,6,6>, LHS
- 2579825398U, // <u,6,6,1>: Cost 3 vext1 <4,u,6,6>, <1,0,3,2>
- 2645758458U, // <u,6,6,2>: Cost 3 vext2 <4,6,u,6>, <6,2,7,3>
- 2579826838U, // <u,6,6,3>: Cost 3 vext1 <4,u,6,6>, <3,0,1,2>
- 1506086198U, // <u,6,6,4>: Cost 2 vext1 <4,u,6,6>, RHS
- 2579828432U, // <u,6,6,5>: Cost 3 vext1 <4,u,6,6>, <5,1,7,3>
- 296144182U, // <u,6,6,6>: Cost 1 vdup2 RHS
- 1638331202U, // <u,6,6,7>: Cost 2 vext3 RHS, <6,6,7,7>
- 296144182U, // <u,6,6,u>: Cost 1 vdup2 RHS
- 432349286U, // <u,6,7,0>: Cost 1 vext1 RHS, LHS
- 1506091766U, // <u,6,7,1>: Cost 2 vext1 RHS, <1,0,3,2>
- 1506092648U, // <u,6,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 1506093206U, // <u,6,7,3>: Cost 2 vext1 RHS, <3,0,1,2>
- 432352809U, // <u,6,7,4>: Cost 1 vext1 RHS, RHS
- 1506094800U, // <u,6,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
- 1506095610U, // <u,6,7,6>: Cost 2 vext1 RHS, <6,2,7,3>
- 1906904374U, // <u,6,7,7>: Cost 2 vzipr RHS, RHS
- 432355118U, // <u,6,7,u>: Cost 1 vext1 RHS, LHS
- 432357478U, // <u,6,u,0>: Cost 1 vext1 RHS, LHS
- 1545475886U, // <u,6,u,1>: Cost 2 vext2 <0,2,u,6>, LHS
- 1506100840U, // <u,6,u,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 1506101398U, // <u,6,u,3>: Cost 2 vext1 RHS, <3,0,1,2>
- 432361002U, // <u,6,u,4>: Cost 1 vext1 RHS, RHS
- 1545476250U, // <u,6,u,5>: Cost 2 vext2 <0,2,u,6>, RHS
- 296144182U, // <u,6,u,6>: Cost 1 vdup2 RHS
- 1880370486U, // <u,6,u,7>: Cost 2 vzipr LHS, RHS
- 432363310U, // <u,6,u,u>: Cost 1 vext1 RHS, LHS
- 1571356672U, // <u,7,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
- 497614950U, // <u,7,0,1>: Cost 1 vext2 RHS, LHS
- 1571356836U, // <u,7,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
- 2573880146U, // <u,7,0,3>: Cost 3 vext1 <3,u,7,0>, <3,u,7,0>
- 1571357010U, // <u,7,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
- 1512083716U, // <u,7,0,5>: Cost 2 vext1 <5,u,7,0>, <5,u,7,0>
- 2621874741U, // <u,7,0,6>: Cost 3 vext2 <0,6,u,7>, <0,6,u,7>
- 2585826298U, // <u,7,0,7>: Cost 3 vext1 <5,u,7,0>, <7,0,1,2>
- 497615517U, // <u,7,0,u>: Cost 1 vext2 RHS, LHS
- 1571357430U, // <u,7,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
- 1571357492U, // <u,7,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
- 1571357590U, // <u,7,1,2>: Cost 2 vext2 RHS, <1,2,3,0>
- 1552114715U, // <u,7,1,3>: Cost 2 vext2 <1,3,u,7>, <1,3,u,7>
- 2573888822U, // <u,7,1,4>: Cost 3 vext1 <3,u,7,1>, RHS
- 1553441981U, // <u,7,1,5>: Cost 2 vext2 <1,5,u,7>, <1,5,u,7>
- 2627847438U, // <u,7,1,6>: Cost 3 vext2 <1,6,u,7>, <1,6,u,7>
- 2727408775U, // <u,7,1,7>: Cost 3 vext3 <7,1,7,u>, <7,1,7,u>
- 1555432880U, // <u,7,1,u>: Cost 2 vext2 <1,u,u,7>, <1,u,u,7>
- 2629838337U, // <u,7,2,0>: Cost 3 vext2 <2,0,u,7>, <2,0,u,7>
- 1188058754U, // <u,7,2,1>: Cost 2 vrev <7,u,1,2>
- 1571358312U, // <u,7,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
- 1571358374U, // <u,7,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
- 2632492869U, // <u,7,2,4>: Cost 3 vext2 <2,4,u,7>, <2,4,u,7>
- 2633156502U, // <u,7,2,5>: Cost 3 vext2 <2,5,u,7>, <2,5,u,7>
- 1560078311U, // <u,7,2,6>: Cost 2 vext2 <2,6,u,7>, <2,6,u,7>
- 2728072408U, // <u,7,2,7>: Cost 3 vext3 <7,2,7,u>, <7,2,7,u>
- 1561405577U, // <u,7,2,u>: Cost 2 vext2 <2,u,u,7>, <2,u,u,7>
- 1571358870U, // <u,7,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
- 2627184913U, // <u,7,3,1>: Cost 3 vext2 <1,5,u,7>, <3,1,5,u>
- 2633820523U, // <u,7,3,2>: Cost 3 vext2 <2,6,u,7>, <3,2,6,u>
- 1571359132U, // <u,7,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
- 1571359234U, // <u,7,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
- 1512108295U, // <u,7,3,5>: Cost 2 vext1 <5,u,7,3>, <5,u,7,3>
- 1518080992U, // <u,7,3,6>: Cost 2 vext1 <6,u,7,3>, <6,u,7,3>
- 2640456465U, // <u,7,3,7>: Cost 3 vext2 <3,7,u,7>, <3,7,u,7>
- 1571359518U, // <u,7,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
- 1571359634U, // <u,7,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
- 2573911067U, // <u,7,4,1>: Cost 3 vext1 <3,u,7,4>, <1,3,u,7>
- 2645101622U, // <u,7,4,2>: Cost 3 vext2 RHS, <4,2,5,3>
- 2573912918U, // <u,7,4,3>: Cost 3 vext1 <3,u,7,4>, <3,u,7,4>
- 1571359952U, // <u,7,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
- 497618248U, // <u,7,4,5>: Cost 1 vext2 RHS, RHS
- 1571360116U, // <u,7,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
- 2645102024U, // <u,7,4,7>: Cost 3 vext2 RHS, <4,7,5,0>
- 497618473U, // <u,7,4,u>: Cost 1 vext2 RHS, RHS
- 2645102152U, // <u,7,5,0>: Cost 3 vext2 RHS, <5,0,1,2>
- 1571360464U, // <u,7,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
- 2645102334U, // <u,7,5,2>: Cost 3 vext2 RHS, <5,2,3,4>
- 2645102447U, // <u,7,5,3>: Cost 3 vext2 RHS, <5,3,7,0>
- 1571360710U, // <u,7,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
- 1571360772U, // <u,7,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
- 1571360866U, // <u,7,5,6>: Cost 2 vext2 RHS, <5,6,7,0>
- 1571360936U, // <u,7,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
- 1571361017U, // <u,7,5,u>: Cost 2 vext2 RHS, <5,u,5,7>
- 1530044518U, // <u,7,6,0>: Cost 2 vext1 <u,u,7,6>, LHS
- 2645103016U, // <u,7,6,1>: Cost 3 vext2 RHS, <6,1,7,2>
- 1571361274U, // <u,7,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
- 2645103154U, // <u,7,6,3>: Cost 3 vext2 RHS, <6,3,4,5>
- 1530047798U, // <u,7,6,4>: Cost 2 vext1 <u,u,7,6>, RHS
- 1188386474U, // <u,7,6,5>: Cost 2 vrev <7,u,5,6>
- 1571361592U, // <u,7,6,6>: Cost 2 vext2 RHS, <6,6,6,6>
- 1571361614U, // <u,7,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
- 1571361695U, // <u,7,6,u>: Cost 2 vext2 RHS, <6,u,0,1>
- 1571361786U, // <u,7,7,0>: Cost 2 vext2 RHS, <7,0,1,2>
- 2573935616U, // <u,7,7,1>: Cost 3 vext1 <3,u,7,7>, <1,3,5,7>
- 2645103781U, // <u,7,7,2>: Cost 3 vext2 RHS, <7,2,2,2>
- 2573937497U, // <u,7,7,3>: Cost 3 vext1 <3,u,7,7>, <3,u,7,7>
- 1571362150U, // <u,7,7,4>: Cost 2 vext2 RHS, <7,4,5,6>
- 1512141067U, // <u,7,7,5>: Cost 2 vext1 <5,u,7,7>, <5,u,7,7>
- 1518113764U, // <u,7,7,6>: Cost 2 vext1 <6,u,7,7>, <6,u,7,7>
- 363253046U, // <u,7,7,7>: Cost 1 vdup3 RHS
- 363253046U, // <u,7,7,u>: Cost 1 vdup3 RHS
- 1571362515U, // <u,7,u,0>: Cost 2 vext2 RHS, <u,0,1,2>
- 497620782U, // <u,7,u,1>: Cost 1 vext2 RHS, LHS
- 1571362693U, // <u,7,u,2>: Cost 2 vext2 RHS, <u,2,3,0>
- 1571362748U, // <u,7,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
- 1571362879U, // <u,7,u,4>: Cost 2 vext2 RHS, <u,4,5,6>
- 497621146U, // <u,7,u,5>: Cost 1 vext2 RHS, RHS
- 1571363024U, // <u,7,u,6>: Cost 2 vext2 RHS, <u,6,3,7>
- 363253046U, // <u,7,u,7>: Cost 1 vdup3 RHS
- 497621349U, // <u,7,u,u>: Cost 1 vext2 RHS, LHS
- 135053414U, // <u,u,0,0>: Cost 1 vdup0 LHS
- 471081121U, // <u,u,0,1>: Cost 1 vext2 LHS, LHS
- 1544822948U, // <u,u,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
- 1616140005U, // <u,u,0,3>: Cost 2 vext3 LHS, <u,0,3,2>
- 1544823122U, // <u,u,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
- 1512157453U, // <u,u,0,5>: Cost 2 vext1 <5,u,u,0>, <5,u,u,0>
- 1662220032U, // <u,u,0,6>: Cost 2 vext3 RHS, <u,0,6,2>
- 1194457487U, // <u,u,0,7>: Cost 2 vrev <u,u,7,0>
- 471081629U, // <u,u,0,u>: Cost 1 vext2 LHS, LHS
- 1544823542U, // <u,u,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
- 202162278U, // <u,u,1,1>: Cost 1 vdup1 LHS
- 537753390U, // <u,u,1,2>: Cost 1 vext3 LHS, LHS
- 1544823768U, // <u,u,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
- 1494248758U, // <u,u,1,4>: Cost 2 vext1 <2,u,u,1>, RHS
- 1544823952U, // <u,u,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
- 1518138343U, // <u,u,1,6>: Cost 2 vext1 <6,u,u,1>, <6,u,u,1>
- 1640322907U, // <u,u,1,7>: Cost 2 vext3 RHS, <u,1,7,3>
- 537753444U, // <u,u,1,u>: Cost 1 vext3 LHS, LHS
- 1482309734U, // <u,u,2,0>: Cost 2 vext1 <0,u,u,2>, LHS
- 1194031451U, // <u,u,2,1>: Cost 2 vrev <u,u,1,2>
- 269271142U, // <u,u,2,2>: Cost 1 vdup2 LHS
- 835584U, // <u,u,2,3>: Cost 0 copy LHS
- 1482313014U, // <u,u,2,4>: Cost 2 vext1 <0,u,u,2>, RHS
- 2618566504U, // <u,u,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
- 1544824762U, // <u,u,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
- 1638479788U, // <u,u,2,7>: Cost 2 vext3 RHS, <u,2,7,3>
- 835584U, // <u,u,2,u>: Cost 0 copy LHS
- 408576723U, // <u,u,3,0>: Cost 1 vext1 LHS, LHS
- 1482318582U, // <u,u,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 120371557U, // <u,u,3,2>: Cost 1 vrev LHS
- 336380006U, // <u,u,3,3>: Cost 1 vdup3 LHS
- 408579382U, // <u,u,3,4>: Cost 1 vext1 LHS, RHS
- 1616140271U, // <u,u,3,5>: Cost 2 vext3 LHS, <u,3,5,7>
- 1530098170U, // <u,u,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 1880329544U, // <u,u,3,7>: Cost 2 vzipr LHS, RHS
- 408581934U, // <u,u,3,u>: Cost 1 vext1 LHS, LHS
- 1488298086U, // <u,u,4,0>: Cost 2 vext1 <1,u,u,4>, LHS
- 1488299437U, // <u,u,4,1>: Cost 2 vext1 <1,u,u,4>, <1,u,u,4>
- 1659271204U, // <u,u,4,2>: Cost 2 vext3 LHS, <u,4,2,6>
- 1194195311U, // <u,u,4,3>: Cost 2 vrev <u,u,3,4>
- 161926454U, // <u,u,4,4>: Cost 1 vdup0 RHS
- 471084342U, // <u,u,4,5>: Cost 1 vext2 LHS, RHS
- 1571368308U, // <u,u,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
- 1640323153U, // <u,u,4,7>: Cost 2 vext3 RHS, <u,4,7,6>
- 471084585U, // <u,u,4,u>: Cost 1 vext2 LHS, RHS
- 1494278246U, // <u,u,5,0>: Cost 2 vext1 <2,u,u,5>, LHS
- 1571368656U, // <u,u,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
- 1494280327U, // <u,u,5,2>: Cost 2 vext1 <2,u,u,5>, <2,u,u,5>
- 1616140415U, // <u,u,5,3>: Cost 2 vext3 LHS, <u,5,3,7>
- 1494281526U, // <u,u,5,4>: Cost 2 vext1 <2,u,u,5>, RHS
- 229035318U, // <u,u,5,5>: Cost 1 vdup1 RHS
- 537753754U, // <u,u,5,6>: Cost 1 vext3 LHS, RHS
- 1750355254U, // <u,u,5,7>: Cost 2 vuzpr LHS, RHS
- 537753772U, // <u,u,5,u>: Cost 1 vext3 LHS, RHS
- 1482342502U, // <u,u,6,0>: Cost 2 vext1 <0,u,u,6>, LHS
- 2556084982U, // <u,u,6,1>: Cost 3 vext1 <0,u,u,6>, <1,0,3,2>
- 1571369466U, // <u,u,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
- 1611938000U, // <u,u,6,3>: Cost 2 vext3 LHS, <u,6,3,7>
- 1482345782U, // <u,u,6,4>: Cost 2 vext1 <0,u,u,6>, RHS
- 1194359171U, // <u,u,6,5>: Cost 2 vrev <u,u,5,6>
- 296144182U, // <u,u,6,6>: Cost 1 vdup2 RHS
- 27705344U, // <u,u,6,7>: Cost 0 copy RHS
- 27705344U, // <u,u,6,u>: Cost 0 copy RHS
- 432496742U, // <u,u,7,0>: Cost 1 vext1 RHS, LHS
- 1488324016U, // <u,u,7,1>: Cost 2 vext1 <1,u,u,7>, <1,u,u,7>
- 1494296713U, // <u,u,7,2>: Cost 2 vext1 <2,u,u,7>, <2,u,u,7>
- 1906901148U, // <u,u,7,3>: Cost 2 vzipr RHS, LHS
- 432500283U, // <u,u,7,4>: Cost 1 vext1 RHS, RHS
- 1506242256U, // <u,u,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
- 120699277U, // <u,u,7,6>: Cost 1 vrev RHS
- 363253046U, // <u,u,7,7>: Cost 1 vdup3 RHS
- 432502574U, // <u,u,7,u>: Cost 1 vext1 RHS, LHS
- 408617688U, // <u,u,u,0>: Cost 1 vext1 LHS, LHS
- 471086894U, // <u,u,u,1>: Cost 1 vext2 LHS, LHS
- 537753957U, // <u,u,u,2>: Cost 1 vext3 LHS, LHS
- 835584U, // <u,u,u,3>: Cost 0 copy LHS
- 408620342U, // <u,u,u,4>: Cost 1 vext1 LHS, RHS
- 471087258U, // <u,u,u,5>: Cost 1 vext2 LHS, RHS
- 537753997U, // <u,u,u,6>: Cost 1 vext3 LHS, RHS
- 27705344U, // <u,u,u,7>: Cost 0 copy RHS
- 835584U, // <u,u,u,u>: Cost 0 copy LHS
+ 135053414U, // <0,0,0,0>: Cost 1 vdup0 LHS
+ 1543503974U, // <0,0,0,1>: Cost 2 vext2 <0,0,0,0>, LHS
+ 2618572962U, // <0,0,0,2>: Cost 3 vext2 <0,2,0,0>, <0,2,0,0>
+ 2568054923U, // <0,0,0,3>: Cost 3 vext1 <3,0,0,0>, <3,0,0,0>
+ 1476398390U, // <0,0,0,4>: Cost 2 vext1 <0,0,0,0>, RHS
+ 2550140624U, // <0,0,0,5>: Cost 3 vext1 <0,0,0,0>, <5,1,7,3>
+ 2550141434U, // <0,0,0,6>: Cost 3 vext1 <0,0,0,0>, <6,2,7,3>
+ 2591945711U, // <0,0,0,7>: Cost 3 vext1 <7,0,0,0>, <7,0,0,0>
+ 135053414U, // <0,0,0,u>: Cost 1 vdup0 LHS
+ 2886516736U, // <0,0,1,0>: Cost 3 vzipl LHS, <0,0,0,0>
+ 1812775014U, // <0,0,1,1>: Cost 2 vzipl LHS, LHS
+ 1618133094U, // <0,0,1,2>: Cost 2 vext3 <1,2,3,0>, LHS
+ 2625209292U, // <0,0,1,3>: Cost 3 vext2 <1,3,0,0>, <1,3,0,0>
+ 2886558034U, // <0,0,1,4>: Cost 3 vzipl LHS, <0,4,1,5>
+ 2617246864U, // <0,0,1,5>: Cost 3 vext2 <0,0,0,0>, <1,5,3,7>
+ 3659723031U, // <0,0,1,6>: Cost 4 vext1 <6,0,0,1>, <6,0,0,1>
+ 2591953904U, // <0,0,1,7>: Cost 3 vext1 <7,0,0,1>, <7,0,0,1>
+ 1812775581U, // <0,0,1,u>: Cost 2 vzipl LHS, LHS
+ 3020734464U, // <0,0,2,0>: Cost 3 vtrnl LHS, <0,0,0,0>
+ 3020734474U, // <0,0,2,1>: Cost 3 vtrnl LHS, <0,0,1,1>
+ 1946992742U, // <0,0,2,2>: Cost 2 vtrnl LHS, LHS
+ 2631181989U, // <0,0,2,3>: Cost 3 vext2 <2,3,0,0>, <2,3,0,0>
+ 3020734668U, // <0,0,2,4>: Cost 3 vtrnl LHS, <0,2,4,6>
+ 3826550569U, // <0,0,2,5>: Cost 4 vuzpl <0,2,0,2>, <2,4,5,6>
+ 2617247674U, // <0,0,2,6>: Cost 3 vext2 <0,0,0,0>, <2,6,3,7>
+ 2591962097U, // <0,0,2,7>: Cost 3 vext1 <7,0,0,2>, <7,0,0,2>
+ 1946992796U, // <0,0,2,u>: Cost 2 vtrnl LHS, LHS
+ 2635163787U, // <0,0,3,0>: Cost 3 vext2 <3,0,0,0>, <3,0,0,0>
+ 2686419196U, // <0,0,3,1>: Cost 3 vext3 <0,3,1,0>, <0,3,1,0>
+ 2686492933U, // <0,0,3,2>: Cost 3 vext3 <0,3,2,0>, <0,3,2,0>
+ 2617248156U, // <0,0,3,3>: Cost 3 vext2 <0,0,0,0>, <3,3,3,3>
+ 2617248258U, // <0,0,3,4>: Cost 3 vext2 <0,0,0,0>, <3,4,5,6>
+ 3826551298U, // <0,0,3,5>: Cost 4 vuzpl <0,2,0,2>, <3,4,5,6>
+ 3690990200U, // <0,0,3,6>: Cost 4 vext2 <0,0,0,0>, <3,6,0,7>
+ 3713551042U, // <0,0,3,7>: Cost 4 vext2 <3,7,0,0>, <3,7,0,0>
+ 2635163787U, // <0,0,3,u>: Cost 3 vext2 <3,0,0,0>, <3,0,0,0>
+ 2617248658U, // <0,0,4,0>: Cost 3 vext2 <0,0,0,0>, <4,0,5,1>
+ 2888450150U, // <0,0,4,1>: Cost 3 vzipl <0,4,1,5>, LHS
+ 3021570150U, // <0,0,4,2>: Cost 3 vtrnl <0,2,4,6>, LHS
+ 3641829519U, // <0,0,4,3>: Cost 4 vext1 <3,0,0,4>, <3,0,0,4>
+ 3021570252U, // <0,0,4,4>: Cost 3 vtrnl <0,2,4,6>, <0,2,4,6>
+ 1543507254U, // <0,0,4,5>: Cost 2 vext2 <0,0,0,0>, RHS
+ 2752810294U, // <0,0,4,6>: Cost 3 vuzpl <0,2,0,2>, RHS
+ 3786998152U, // <0,0,4,7>: Cost 4 vext3 <4,7,5,0>, <0,4,7,5>
+ 1543507497U, // <0,0,4,u>: Cost 2 vext2 <0,0,0,0>, RHS
+ 2684354972U, // <0,0,5,0>: Cost 3 vext3 <0,0,0,0>, <0,5,0,7>
+ 2617249488U, // <0,0,5,1>: Cost 3 vext2 <0,0,0,0>, <5,1,7,3>
+ 3765617070U, // <0,0,5,2>: Cost 4 vext3 <1,2,3,0>, <0,5,2,7>
+ 3635865780U, // <0,0,5,3>: Cost 4 vext1 <2,0,0,5>, <3,0,4,5>
+ 2617249734U, // <0,0,5,4>: Cost 3 vext2 <0,0,0,0>, <5,4,7,6>
+ 2617249796U, // <0,0,5,5>: Cost 3 vext2 <0,0,0,0>, <5,5,5,5>
+ 2718712274U, // <0,0,5,6>: Cost 3 vext3 <5,6,7,0>, <0,5,6,7>
+ 2617249960U, // <0,0,5,7>: Cost 3 vext2 <0,0,0,0>, <5,7,5,7>
+ 2720039396U, // <0,0,5,u>: Cost 3 vext3 <5,u,7,0>, <0,5,u,7>
+ 2684355053U, // <0,0,6,0>: Cost 3 vext3 <0,0,0,0>, <0,6,0,7>
+ 3963609190U, // <0,0,6,1>: Cost 4 vzipl <0,6,2,7>, LHS
+ 2617250298U, // <0,0,6,2>: Cost 3 vext2 <0,0,0,0>, <6,2,7,3>
+ 3796435464U, // <0,0,6,3>: Cost 4 vext3 <6,3,7,0>, <0,6,3,7>
+ 3659762998U, // <0,0,6,4>: Cost 4 vext1 <6,0,0,6>, RHS
+ 3659763810U, // <0,0,6,5>: Cost 4 vext1 <6,0,0,6>, <5,6,7,0>
+ 2617250616U, // <0,0,6,6>: Cost 3 vext2 <0,0,0,0>, <6,6,6,6>
+ 2657727309U, // <0,0,6,7>: Cost 3 vext2 <6,7,0,0>, <6,7,0,0>
+ 2658390942U, // <0,0,6,u>: Cost 3 vext2 <6,u,0,0>, <6,u,0,0>
+ 2659054575U, // <0,0,7,0>: Cost 3 vext2 <7,0,0,0>, <7,0,0,0>
+ 3635880854U, // <0,0,7,1>: Cost 4 vext1 <2,0,0,7>, <1,2,3,0>
+ 3635881401U, // <0,0,7,2>: Cost 4 vext1 <2,0,0,7>, <2,0,0,7>
+ 3734787298U, // <0,0,7,3>: Cost 4 vext2 <7,3,0,0>, <7,3,0,0>
+ 2617251174U, // <0,0,7,4>: Cost 3 vext2 <0,0,0,0>, <7,4,5,6>
+ 3659772002U, // <0,0,7,5>: Cost 4 vext1 <6,0,0,7>, <5,6,7,0>
+ 3659772189U, // <0,0,7,6>: Cost 4 vext1 <6,0,0,7>, <6,0,0,7>
+ 2617251436U, // <0,0,7,7>: Cost 3 vext2 <0,0,0,0>, <7,7,7,7>
+ 2659054575U, // <0,0,7,u>: Cost 3 vext2 <7,0,0,0>, <7,0,0,0>
+ 135053414U, // <0,0,u,0>: Cost 1 vdup0 LHS
+ 1817419878U, // <0,0,u,1>: Cost 2 vzipl LHS, LHS
+ 1947435110U, // <0,0,u,2>: Cost 2 vtrnl LHS, LHS
+ 2568120467U, // <0,0,u,3>: Cost 3 vext1 <3,0,0,u>, <3,0,0,u>
+ 1476463926U, // <0,0,u,4>: Cost 2 vext1 <0,0,0,u>, RHS
+ 1543510170U, // <0,0,u,5>: Cost 2 vext2 <0,0,0,0>, RHS
+ 2752813210U, // <0,0,u,6>: Cost 3 vuzpl <0,2,0,2>, RHS
+ 2592011255U, // <0,0,u,7>: Cost 3 vext1 <7,0,0,u>, <7,0,0,u>
+ 135053414U, // <0,0,u,u>: Cost 1 vdup0 LHS
+ 2618581002U, // <0,1,0,0>: Cost 3 vext2 <0,2,0,1>, <0,0,1,1>
+ 1557446758U, // <0,1,0,1>: Cost 2 vext2 <2,3,0,1>, LHS
+ 2618581155U, // <0,1,0,2>: Cost 3 vext2 <0,2,0,1>, <0,2,0,1>
+ 2690548468U, // <0,1,0,3>: Cost 3 vext3 <1,0,3,0>, <1,0,3,0>
+ 2626543954U, // <0,1,0,4>: Cost 3 vext2 <1,5,0,1>, <0,4,1,5>
+ 4094985216U, // <0,1,0,5>: Cost 4 vtrnl <0,2,0,2>, <1,3,5,7>
+ 2592019278U, // <0,1,0,6>: Cost 3 vext1 <7,0,1,0>, <6,7,0,1>
+ 2592019448U, // <0,1,0,7>: Cost 3 vext1 <7,0,1,0>, <7,0,1,0>
+ 1557447325U, // <0,1,0,u>: Cost 2 vext2 <2,3,0,1>, LHS
+ 1476476938U, // <0,1,1,0>: Cost 2 vext1 <0,0,1,1>, <0,0,1,1>
+ 2886517556U, // <0,1,1,1>: Cost 3 vzipl LHS, <1,1,1,1>
+ 2886517654U, // <0,1,1,2>: Cost 3 vzipl LHS, <1,2,3,0>
+ 2886517720U, // <0,1,1,3>: Cost 3 vzipl LHS, <1,3,1,3>
+ 1476480310U, // <0,1,1,4>: Cost 2 vext1 <0,0,1,1>, RHS
+ 2886558864U, // <0,1,1,5>: Cost 3 vzipl LHS, <1,5,3,7>
+ 2550223354U, // <0,1,1,6>: Cost 3 vext1 <0,0,1,1>, <6,2,7,3>
+ 2550223856U, // <0,1,1,7>: Cost 3 vext1 <0,0,1,1>, <7,0,0,1>
+ 1476482862U, // <0,1,1,u>: Cost 2 vext1 <0,0,1,1>, LHS
+ 1494401126U, // <0,1,2,0>: Cost 2 vext1 <3,0,1,2>, LHS
+ 3020735284U, // <0,1,2,1>: Cost 3 vtrnl LHS, <1,1,1,1>
+ 2562172349U, // <0,1,2,2>: Cost 3 vext1 <2,0,1,2>, <2,0,1,2>
+ 835584U, // <0,1,2,3>: Cost 0 copy LHS
+ 1494404406U, // <0,1,2,4>: Cost 2 vext1 <3,0,1,2>, RHS
+ 3020735488U, // <0,1,2,5>: Cost 3 vtrnl LHS, <1,3,5,7>
+ 2631190458U, // <0,1,2,6>: Cost 3 vext2 <2,3,0,1>, <2,6,3,7>
+ 1518294010U, // <0,1,2,7>: Cost 2 vext1 <7,0,1,2>, <7,0,1,2>
+ 835584U, // <0,1,2,u>: Cost 0 copy LHS
+ 2692318156U, // <0,1,3,0>: Cost 3 vext3 <1,3,0,0>, <1,3,0,0>
+ 2691875800U, // <0,1,3,1>: Cost 3 vext3 <1,2,3,0>, <1,3,1,3>
+ 2691875806U, // <0,1,3,2>: Cost 3 vext3 <1,2,3,0>, <1,3,2,0>
+ 2692539367U, // <0,1,3,3>: Cost 3 vext3 <1,3,3,0>, <1,3,3,0>
+ 2562182454U, // <0,1,3,4>: Cost 3 vext1 <2,0,1,3>, RHS
+ 2691875840U, // <0,1,3,5>: Cost 3 vext3 <1,2,3,0>, <1,3,5,7>
+ 2692760578U, // <0,1,3,6>: Cost 3 vext3 <1,3,6,0>, <1,3,6,0>
+ 2639817411U, // <0,1,3,7>: Cost 3 vext2 <3,7,0,1>, <3,7,0,1>
+ 2691875863U, // <0,1,3,u>: Cost 3 vext3 <1,2,3,0>, <1,3,u,3>
+ 2568159334U, // <0,1,4,0>: Cost 3 vext1 <3,0,1,4>, LHS
+ 4095312692U, // <0,1,4,1>: Cost 4 vtrnl <0,2,4,6>, <1,1,1,1>
+ 2568160934U, // <0,1,4,2>: Cost 3 vext1 <3,0,1,4>, <2,3,0,1>
+ 2568161432U, // <0,1,4,3>: Cost 3 vext1 <3,0,1,4>, <3,0,1,4>
+ 2568162614U, // <0,1,4,4>: Cost 3 vext1 <3,0,1,4>, RHS
+ 1557450038U, // <0,1,4,5>: Cost 2 vext2 <2,3,0,1>, RHS
+ 2754235702U, // <0,1,4,6>: Cost 3 vuzpl <0,4,1,5>, RHS
+ 2592052220U, // <0,1,4,7>: Cost 3 vext1 <7,0,1,4>, <7,0,1,4>
+ 1557450281U, // <0,1,4,u>: Cost 2 vext2 <2,3,0,1>, RHS
+ 3765617775U, // <0,1,5,0>: Cost 4 vext3 <1,2,3,0>, <1,5,0,1>
+ 2647781007U, // <0,1,5,1>: Cost 3 vext2 <5,1,0,1>, <5,1,0,1>
+ 3704934138U, // <0,1,5,2>: Cost 4 vext2 <2,3,0,1>, <5,2,3,0>
+ 2691875984U, // <0,1,5,3>: Cost 3 vext3 <1,2,3,0>, <1,5,3,7>
+ 2657734598U, // <0,1,5,4>: Cost 3 vext2 <6,7,0,1>, <5,4,7,6>
+ 2650435539U, // <0,1,5,5>: Cost 3 vext2 <5,5,0,1>, <5,5,0,1>
+ 2651099172U, // <0,1,5,6>: Cost 3 vext2 <5,6,0,1>, <5,6,0,1>
+ 2651762805U, // <0,1,5,7>: Cost 3 vext2 <5,7,0,1>, <5,7,0,1>
+ 2691876029U, // <0,1,5,u>: Cost 3 vext3 <1,2,3,0>, <1,5,u,7>
+ 2592063590U, // <0,1,6,0>: Cost 3 vext1 <7,0,1,6>, LHS
+ 3765617871U, // <0,1,6,1>: Cost 4 vext3 <1,2,3,0>, <1,6,1,7>
+ 2654417337U, // <0,1,6,2>: Cost 3 vext2 <6,2,0,1>, <6,2,0,1>
+ 3765617889U, // <0,1,6,3>: Cost 4 vext3 <1,2,3,0>, <1,6,3,7>
+ 2592066870U, // <0,1,6,4>: Cost 3 vext1 <7,0,1,6>, RHS
+ 3765617907U, // <0,1,6,5>: Cost 4 vext3 <1,2,3,0>, <1,6,5,7>
+ 2657071869U, // <0,1,6,6>: Cost 3 vext2 <6,6,0,1>, <6,6,0,1>
+ 1583993678U, // <0,1,6,7>: Cost 2 vext2 <6,7,0,1>, <6,7,0,1>
+ 1584657311U, // <0,1,6,u>: Cost 2 vext2 <6,u,0,1>, <6,u,0,1>
+ 2657735672U, // <0,1,7,0>: Cost 3 vext2 <6,7,0,1>, <7,0,1,0>
+ 2657735808U, // <0,1,7,1>: Cost 3 vext2 <6,7,0,1>, <7,1,7,1>
+ 2631193772U, // <0,1,7,2>: Cost 3 vext2 <2,3,0,1>, <7,2,3,0>
+ 2661053667U, // <0,1,7,3>: Cost 3 vext2 <7,3,0,1>, <7,3,0,1>
+ 2657736038U, // <0,1,7,4>: Cost 3 vext2 <6,7,0,1>, <7,4,5,6>
+ 3721524621U, // <0,1,7,5>: Cost 4 vext2 <5,1,0,1>, <7,5,1,0>
+ 2657736158U, // <0,1,7,6>: Cost 3 vext2 <6,7,0,1>, <7,6,1,0>
+ 2657736300U, // <0,1,7,7>: Cost 3 vext2 <6,7,0,1>, <7,7,7,7>
+ 2657736322U, // <0,1,7,u>: Cost 3 vext2 <6,7,0,1>, <7,u,1,2>
+ 1494450278U, // <0,1,u,0>: Cost 2 vext1 <3,0,1,u>, LHS
+ 1557452590U, // <0,1,u,1>: Cost 2 vext2 <2,3,0,1>, LHS
+ 2754238254U, // <0,1,u,2>: Cost 3 vuzpl <0,4,1,5>, LHS
+ 835584U, // <0,1,u,3>: Cost 0 copy LHS
+ 1494453558U, // <0,1,u,4>: Cost 2 vext1 <3,0,1,u>, RHS
+ 1557452954U, // <0,1,u,5>: Cost 2 vext2 <2,3,0,1>, RHS
+ 2754238618U, // <0,1,u,6>: Cost 3 vuzpl <0,4,1,5>, RHS
+ 1518343168U, // <0,1,u,7>: Cost 2 vext1 <7,0,1,u>, <7,0,1,u>
+ 835584U, // <0,1,u,u>: Cost 0 copy LHS
+ 2752299008U, // <0,2,0,0>: Cost 3 vuzpl LHS, <0,0,0,0>
+ 1544847462U, // <0,2,0,1>: Cost 2 vext2 <0,2,0,2>, LHS
+ 1678557286U, // <0,2,0,2>: Cost 2 vuzpl LHS, LHS
+ 2696521165U, // <0,2,0,3>: Cost 3 vext3 <2,0,3,0>, <2,0,3,0>
+ 2752340172U, // <0,2,0,4>: Cost 3 vuzpl LHS, <0,2,4,6>
+ 2691876326U, // <0,2,0,5>: Cost 3 vext3 <1,2,3,0>, <2,0,5,7>
+ 2618589695U, // <0,2,0,6>: Cost 3 vext2 <0,2,0,2>, <0,6,2,7>
+ 2592093185U, // <0,2,0,7>: Cost 3 vext1 <7,0,2,0>, <7,0,2,0>
+ 1678557340U, // <0,2,0,u>: Cost 2 vuzpl LHS, LHS
+ 2618589942U, // <0,2,1,0>: Cost 3 vext2 <0,2,0,2>, <1,0,3,2>
+ 2752299828U, // <0,2,1,1>: Cost 3 vuzpl LHS, <1,1,1,1>
+ 2886518376U, // <0,2,1,2>: Cost 3 vzipl LHS, <2,2,2,2>
+ 2752299766U, // <0,2,1,3>: Cost 3 vuzpl LHS, <1,0,3,2>
+ 2550295862U, // <0,2,1,4>: Cost 3 vext1 <0,0,2,1>, RHS
+ 2752340992U, // <0,2,1,5>: Cost 3 vuzpl LHS, <1,3,5,7>
+ 2886559674U, // <0,2,1,6>: Cost 3 vzipl LHS, <2,6,3,7>
+ 3934208106U, // <0,2,1,7>: Cost 4 vuzpr <7,0,1,2>, <0,1,2,7>
+ 2752340771U, // <0,2,1,u>: Cost 3 vuzpl LHS, <1,0,u,2>
+ 1476558868U, // <0,2,2,0>: Cost 2 vext1 <0,0,2,2>, <0,0,2,2>
+ 2226628029U, // <0,2,2,1>: Cost 3 vrev <2,0,1,2>
+ 2752300648U, // <0,2,2,2>: Cost 3 vuzpl LHS, <2,2,2,2>
+ 3020736114U, // <0,2,2,3>: Cost 3 vtrnl LHS, <2,2,3,3>
+ 1476562230U, // <0,2,2,4>: Cost 2 vext1 <0,0,2,2>, RHS
+ 2550304464U, // <0,2,2,5>: Cost 3 vext1 <0,0,2,2>, <5,1,7,3>
+ 2618591162U, // <0,2,2,6>: Cost 3 vext2 <0,2,0,2>, <2,6,3,7>
+ 2550305777U, // <0,2,2,7>: Cost 3 vext1 <0,0,2,2>, <7,0,0,2>
+ 1476564782U, // <0,2,2,u>: Cost 2 vext1 <0,0,2,2>, LHS
+ 2618591382U, // <0,2,3,0>: Cost 3 vext2 <0,2,0,2>, <3,0,1,2>
+ 2752301206U, // <0,2,3,1>: Cost 3 vuzpl LHS, <3,0,1,2>
+ 3826043121U, // <0,2,3,2>: Cost 4 vuzpl LHS, <3,1,2,3>
+ 2752301468U, // <0,2,3,3>: Cost 3 vuzpl LHS, <3,3,3,3>
+ 2618591746U, // <0,2,3,4>: Cost 3 vext2 <0,2,0,2>, <3,4,5,6>
+ 2752301570U, // <0,2,3,5>: Cost 3 vuzpl LHS, <3,4,5,6>
+ 3830688102U, // <0,2,3,6>: Cost 4 vuzpl LHS, <3,2,6,3>
+ 2698807012U, // <0,2,3,7>: Cost 3 vext3 <2,3,7,0>, <2,3,7,0>
+ 2752301269U, // <0,2,3,u>: Cost 3 vuzpl LHS, <3,0,u,2>
+ 2562261094U, // <0,2,4,0>: Cost 3 vext1 <2,0,2,4>, LHS
+ 4095313828U, // <0,2,4,1>: Cost 4 vtrnl <0,2,4,6>, <2,6,1,3>
+ 2226718152U, // <0,2,4,2>: Cost 3 vrev <2,0,2,4>
+ 2568235169U, // <0,2,4,3>: Cost 3 vext1 <3,0,2,4>, <3,0,2,4>
+ 2562264374U, // <0,2,4,4>: Cost 3 vext1 <2,0,2,4>, RHS
+ 1544850742U, // <0,2,4,5>: Cost 2 vext2 <0,2,0,2>, RHS
+ 1678560566U, // <0,2,4,6>: Cost 2 vuzpl LHS, RHS
+ 2592125957U, // <0,2,4,7>: Cost 3 vext1 <7,0,2,4>, <7,0,2,4>
+ 1678560584U, // <0,2,4,u>: Cost 2 vuzpl LHS, RHS
+ 2691876686U, // <0,2,5,0>: Cost 3 vext3 <1,2,3,0>, <2,5,0,7>
+ 2618592976U, // <0,2,5,1>: Cost 3 vext2 <0,2,0,2>, <5,1,7,3>
+ 3765618528U, // <0,2,5,2>: Cost 4 vext3 <1,2,3,0>, <2,5,2,7>
+ 3765618536U, // <0,2,5,3>: Cost 4 vext3 <1,2,3,0>, <2,5,3,6>
+ 2618593222U, // <0,2,5,4>: Cost 3 vext2 <0,2,0,2>, <5,4,7,6>
+ 2752303108U, // <0,2,5,5>: Cost 3 vuzpl LHS, <5,5,5,5>
+ 2618593378U, // <0,2,5,6>: Cost 3 vext2 <0,2,0,2>, <5,6,7,0>
+ 2824785206U, // <0,2,5,7>: Cost 3 vuzpr <1,0,3,2>, RHS
+ 2824785207U, // <0,2,5,u>: Cost 3 vuzpr <1,0,3,2>, RHS
+ 2752303950U, // <0,2,6,0>: Cost 3 vuzpl LHS, <6,7,0,1>
+ 3830690081U, // <0,2,6,1>: Cost 4 vuzpl LHS, <6,0,1,2>
+ 2618593786U, // <0,2,6,2>: Cost 3 vext2 <0,2,0,2>, <6,2,7,3>
+ 2691876794U, // <0,2,6,3>: Cost 3 vext3 <1,2,3,0>, <2,6,3,7>
+ 2752303990U, // <0,2,6,4>: Cost 3 vuzpl LHS, <6,7,4,5>
+ 3830690445U, // <0,2,6,5>: Cost 4 vuzpl LHS, <6,4,5,6>
+ 2752303928U, // <0,2,6,6>: Cost 3 vuzpl LHS, <6,6,6,6>
+ 2657743695U, // <0,2,6,7>: Cost 3 vext2 <6,7,0,2>, <6,7,0,2>
+ 2691876839U, // <0,2,6,u>: Cost 3 vext3 <1,2,3,0>, <2,6,u,7>
+ 2659070961U, // <0,2,7,0>: Cost 3 vext2 <7,0,0,2>, <7,0,0,2>
+ 2659734594U, // <0,2,7,1>: Cost 3 vext2 <7,1,0,2>, <7,1,0,2>
+ 3734140051U, // <0,2,7,2>: Cost 4 vext2 <7,2,0,2>, <7,2,0,2>
+ 2701166596U, // <0,2,7,3>: Cost 3 vext3 <2,7,3,0>, <2,7,3,0>
+ 2662389094U, // <0,2,7,4>: Cost 3 vext2 <7,5,0,2>, <7,4,5,6>
+ 2662389126U, // <0,2,7,5>: Cost 3 vext2 <7,5,0,2>, <7,5,0,2>
+ 3736794583U, // <0,2,7,6>: Cost 4 vext2 <7,6,0,2>, <7,6,0,2>
+ 2752304748U, // <0,2,7,7>: Cost 3 vuzpl LHS, <7,7,7,7>
+ 2659070961U, // <0,2,7,u>: Cost 3 vext2 <7,0,0,2>, <7,0,0,2>
+ 1476608026U, // <0,2,u,0>: Cost 2 vext1 <0,0,2,u>, <0,0,2,u>
+ 1544853294U, // <0,2,u,1>: Cost 2 vext2 <0,2,0,2>, LHS
+ 1678563118U, // <0,2,u,2>: Cost 2 vuzpl LHS, LHS
+ 3021178482U, // <0,2,u,3>: Cost 3 vtrnl LHS, <2,2,3,3>
+ 1476611382U, // <0,2,u,4>: Cost 2 vext1 <0,0,2,u>, RHS
+ 1544853658U, // <0,2,u,5>: Cost 2 vext2 <0,2,0,2>, RHS
+ 1678563482U, // <0,2,u,6>: Cost 2 vuzpl LHS, RHS
+ 2824785449U, // <0,2,u,7>: Cost 3 vuzpr <1,0,3,2>, RHS
+ 1678563172U, // <0,2,u,u>: Cost 2 vuzpl LHS, LHS
+ 2556329984U, // <0,3,0,0>: Cost 3 vext1 <1,0,3,0>, <0,0,0,0>
+ 2686421142U, // <0,3,0,1>: Cost 3 vext3 <0,3,1,0>, <3,0,1,2>
+ 2562303437U, // <0,3,0,2>: Cost 3 vext1 <2,0,3,0>, <2,0,3,0>
+ 4094986652U, // <0,3,0,3>: Cost 4 vtrnl <0,2,0,2>, <3,3,3,3>
+ 2556333366U, // <0,3,0,4>: Cost 3 vext1 <1,0,3,0>, RHS
+ 4094986754U, // <0,3,0,5>: Cost 4 vtrnl <0,2,0,2>, <3,4,5,6>
+ 3798796488U, // <0,3,0,6>: Cost 4 vext3 <6,7,3,0>, <3,0,6,7>
+ 3776530634U, // <0,3,0,7>: Cost 4 vext3 <3,0,7,0>, <3,0,7,0>
+ 2556335918U, // <0,3,0,u>: Cost 3 vext1 <1,0,3,0>, LHS
+ 2886518934U, // <0,3,1,0>: Cost 3 vzipl LHS, <3,0,1,2>
+ 2556338933U, // <0,3,1,1>: Cost 3 vext1 <1,0,3,1>, <1,0,3,1>
+ 2691877105U, // <0,3,1,2>: Cost 3 vext3 <1,2,3,0>, <3,1,2,3>
+ 2886519196U, // <0,3,1,3>: Cost 3 vzipl LHS, <3,3,3,3>
+ 2886519298U, // <0,3,1,4>: Cost 3 vzipl LHS, <3,4,5,6>
+ 4095740418U, // <0,3,1,5>: Cost 4 vtrnl <0,3,1,4>, <3,4,5,6>
+ 3659944242U, // <0,3,1,6>: Cost 4 vext1 <6,0,3,1>, <6,0,3,1>
+ 3769600286U, // <0,3,1,7>: Cost 4 vext3 <1,u,3,0>, <3,1,7,3>
+ 2886519582U, // <0,3,1,u>: Cost 3 vzipl LHS, <3,u,1,2>
+ 1482604646U, // <0,3,2,0>: Cost 2 vext1 <1,0,3,2>, LHS
+ 1482605302U, // <0,3,2,1>: Cost 2 vext1 <1,0,3,2>, <1,0,3,2>
+ 2556348008U, // <0,3,2,2>: Cost 3 vext1 <1,0,3,2>, <2,2,2,2>
+ 3020736924U, // <0,3,2,3>: Cost 3 vtrnl LHS, <3,3,3,3>
+ 1482607926U, // <0,3,2,4>: Cost 2 vext1 <1,0,3,2>, RHS
+ 3020737026U, // <0,3,2,5>: Cost 3 vtrnl LHS, <3,4,5,6>
+ 2598154746U, // <0,3,2,6>: Cost 3 vext1 <u,0,3,2>, <6,2,7,3>
+ 2598155258U, // <0,3,2,7>: Cost 3 vext1 <u,0,3,2>, <7,0,1,2>
+ 1482610478U, // <0,3,2,u>: Cost 2 vext1 <1,0,3,2>, LHS
+ 3692341398U, // <0,3,3,0>: Cost 4 vext2 <0,2,0,3>, <3,0,1,2>
+ 2635851999U, // <0,3,3,1>: Cost 3 vext2 <3,1,0,3>, <3,1,0,3>
+ 3636069840U, // <0,3,3,2>: Cost 4 vext1 <2,0,3,3>, <2,0,3,3>
+ 2691877276U, // <0,3,3,3>: Cost 3 vext3 <1,2,3,0>, <3,3,3,3>
+ 3961522690U, // <0,3,3,4>: Cost 4 vzipl <0,3,1,4>, <3,4,5,6>
+ 3826797058U, // <0,3,3,5>: Cost 4 vuzpl <0,2,3,5>, <3,4,5,6>
+ 3703622282U, // <0,3,3,6>: Cost 4 vext2 <2,1,0,3>, <3,6,2,7>
+ 3769600452U, // <0,3,3,7>: Cost 4 vext3 <1,u,3,0>, <3,3,7,7>
+ 2640497430U, // <0,3,3,u>: Cost 3 vext2 <3,u,0,3>, <3,u,0,3>
+ 3962194070U, // <0,3,4,0>: Cost 4 vzipl <0,4,1,5>, <3,0,1,2>
+ 2232617112U, // <0,3,4,1>: Cost 3 vrev <3,0,1,4>
+ 2232690849U, // <0,3,4,2>: Cost 3 vrev <3,0,2,4>
+ 4095314332U, // <0,3,4,3>: Cost 4 vtrnl <0,2,4,6>, <3,3,3,3>
+ 3962194434U, // <0,3,4,4>: Cost 4 vzipl <0,4,1,5>, <3,4,5,6>
+ 2691877378U, // <0,3,4,5>: Cost 3 vext3 <1,2,3,0>, <3,4,5,6>
+ 3826765110U, // <0,3,4,6>: Cost 4 vuzpl <0,2,3,1>, RHS
+ 3665941518U, // <0,3,4,7>: Cost 4 vext1 <7,0,3,4>, <7,0,3,4>
+ 2691877405U, // <0,3,4,u>: Cost 3 vext3 <1,2,3,0>, <3,4,u,6>
+ 3630112870U, // <0,3,5,0>: Cost 4 vext1 <1,0,3,5>, LHS
+ 3630113526U, // <0,3,5,1>: Cost 4 vext1 <1,0,3,5>, <1,0,3,2>
+ 4035199734U, // <0,3,5,2>: Cost 4 vzipr <1,4,0,5>, <1,0,3,2>
+ 3769600578U, // <0,3,5,3>: Cost 4 vext3 <1,u,3,0>, <3,5,3,7>
+ 2232846516U, // <0,3,5,4>: Cost 3 vrev <3,0,4,5>
+ 3779037780U, // <0,3,5,5>: Cost 4 vext3 <3,4,5,0>, <3,5,5,7>
+ 2718714461U, // <0,3,5,6>: Cost 3 vext3 <5,6,7,0>, <3,5,6,7>
+ 2706106975U, // <0,3,5,7>: Cost 3 vext3 <3,5,7,0>, <3,5,7,0>
+ 2233141464U, // <0,3,5,u>: Cost 3 vrev <3,0,u,5>
+ 2691877496U, // <0,3,6,0>: Cost 3 vext3 <1,2,3,0>, <3,6,0,7>
+ 3727511914U, // <0,3,6,1>: Cost 4 vext2 <6,1,0,3>, <6,1,0,3>
+ 3765619338U, // <0,3,6,2>: Cost 4 vext3 <1,2,3,0>, <3,6,2,7>
+ 3765619347U, // <0,3,6,3>: Cost 4 vext3 <1,2,3,0>, <3,6,3,7>
+ 3765987996U, // <0,3,6,4>: Cost 4 vext3 <1,2,u,0>, <3,6,4,7>
+ 3306670270U, // <0,3,6,5>: Cost 4 vrev <3,0,5,6>
+ 3792456365U, // <0,3,6,6>: Cost 4 vext3 <5,6,7,0>, <3,6,6,6>
+ 2706770608U, // <0,3,6,7>: Cost 3 vext3 <3,6,7,0>, <3,6,7,0>
+ 2706844345U, // <0,3,6,u>: Cost 3 vext3 <3,6,u,0>, <3,6,u,0>
+ 3769600707U, // <0,3,7,0>: Cost 4 vext3 <1,u,3,0>, <3,7,0,1>
+ 2659742787U, // <0,3,7,1>: Cost 3 vext2 <7,1,0,3>, <7,1,0,3>
+ 3636102612U, // <0,3,7,2>: Cost 4 vext1 <2,0,3,7>, <2,0,3,7>
+ 3769600740U, // <0,3,7,3>: Cost 4 vext3 <1,u,3,0>, <3,7,3,7>
+ 3769600747U, // <0,3,7,4>: Cost 4 vext3 <1,u,3,0>, <3,7,4,5>
+ 3769600758U, // <0,3,7,5>: Cost 4 vext3 <1,u,3,0>, <3,7,5,7>
+ 3659993400U, // <0,3,7,6>: Cost 4 vext1 <6,0,3,7>, <6,0,3,7>
+ 3781176065U, // <0,3,7,7>: Cost 4 vext3 <3,7,7,0>, <3,7,7,0>
+ 2664388218U, // <0,3,7,u>: Cost 3 vext2 <7,u,0,3>, <7,u,0,3>
+ 1482653798U, // <0,3,u,0>: Cost 2 vext1 <1,0,3,u>, LHS
+ 1482654460U, // <0,3,u,1>: Cost 2 vext1 <1,0,3,u>, <1,0,3,u>
+ 2556397160U, // <0,3,u,2>: Cost 3 vext1 <1,0,3,u>, <2,2,2,2>
+ 3021179292U, // <0,3,u,3>: Cost 3 vtrnl LHS, <3,3,3,3>
+ 1482657078U, // <0,3,u,4>: Cost 2 vext1 <1,0,3,u>, RHS
+ 3021179394U, // <0,3,u,5>: Cost 3 vtrnl LHS, <3,4,5,6>
+ 2598203898U, // <0,3,u,6>: Cost 3 vext1 <u,0,3,u>, <6,2,7,3>
+ 2708097874U, // <0,3,u,7>: Cost 3 vext3 <3,u,7,0>, <3,u,7,0>
+ 1482659630U, // <0,3,u,u>: Cost 2 vext1 <1,0,3,u>, LHS
+ 2617278468U, // <0,4,0,0>: Cost 3 vext2 <0,0,0,4>, <0,0,0,4>
+ 2618605670U, // <0,4,0,1>: Cost 3 vext2 <0,2,0,4>, LHS
+ 2618605734U, // <0,4,0,2>: Cost 3 vext2 <0,2,0,4>, <0,2,0,4>
+ 3642091695U, // <0,4,0,3>: Cost 4 vext1 <3,0,4,0>, <3,0,4,0>
+ 2753134796U, // <0,4,0,4>: Cost 3 vuzpl <0,2,4,6>, <0,2,4,6>
+ 2718714770U, // <0,4,0,5>: Cost 3 vext3 <5,6,7,0>, <4,0,5,1>
+ 3021245750U, // <0,4,0,6>: Cost 3 vtrnl <0,2,0,2>, RHS
+ 3665982483U, // <0,4,0,7>: Cost 4 vext1 <7,0,4,0>, <7,0,4,0>
+ 3021245768U, // <0,4,0,u>: Cost 3 vtrnl <0,2,0,2>, RHS
+ 2568355942U, // <0,4,1,0>: Cost 3 vext1 <3,0,4,1>, LHS
+ 3692348212U, // <0,4,1,1>: Cost 4 vext2 <0,2,0,4>, <1,1,1,1>
+ 3692348310U, // <0,4,1,2>: Cost 4 vext2 <0,2,0,4>, <1,2,3,0>
+ 2568358064U, // <0,4,1,3>: Cost 3 vext1 <3,0,4,1>, <3,0,4,1>
+ 2568359222U, // <0,4,1,4>: Cost 3 vext1 <3,0,4,1>, RHS
+ 1812778294U, // <0,4,1,5>: Cost 2 vzipl LHS, RHS
+ 3022671158U, // <0,4,1,6>: Cost 3 vtrnl <0,4,1,5>, RHS
+ 2592248852U, // <0,4,1,7>: Cost 3 vext1 <7,0,4,1>, <7,0,4,1>
+ 1812778537U, // <0,4,1,u>: Cost 2 vzipl LHS, RHS
+ 2568364134U, // <0,4,2,0>: Cost 3 vext1 <3,0,4,2>, LHS
+ 2238573423U, // <0,4,2,1>: Cost 3 vrev <4,0,1,2>
+ 3692349032U, // <0,4,2,2>: Cost 4 vext2 <0,2,0,4>, <2,2,2,2>
+ 2631214761U, // <0,4,2,3>: Cost 3 vext2 <2,3,0,4>, <2,3,0,4>
+ 2568367414U, // <0,4,2,4>: Cost 3 vext1 <3,0,4,2>, RHS
+ 2887028022U, // <0,4,2,5>: Cost 3 vzipl <0,2,0,2>, RHS
+ 1946996022U, // <0,4,2,6>: Cost 2 vtrnl LHS, RHS
+ 2592257045U, // <0,4,2,7>: Cost 3 vext1 <7,0,4,2>, <7,0,4,2>
+ 1946996040U, // <0,4,2,u>: Cost 2 vtrnl LHS, RHS
+ 3692349590U, // <0,4,3,0>: Cost 4 vext2 <0,2,0,4>, <3,0,1,2>
+ 3826878614U, // <0,4,3,1>: Cost 4 vuzpl <0,2,4,6>, <3,0,1,2>
+ 3826878625U, // <0,4,3,2>: Cost 4 vuzpl <0,2,4,6>, <3,0,2,4>
+ 3692349852U, // <0,4,3,3>: Cost 4 vext2 <0,2,0,4>, <3,3,3,3>
+ 3692349954U, // <0,4,3,4>: Cost 4 vext2 <0,2,0,4>, <3,4,5,6>
+ 3826878978U, // <0,4,3,5>: Cost 4 vuzpl <0,2,4,6>, <3,4,5,6>
+ 4095200566U, // <0,4,3,6>: Cost 4 vtrnl <0,2,3,1>, RHS
+ 3713583814U, // <0,4,3,7>: Cost 4 vext2 <3,7,0,4>, <3,7,0,4>
+ 3692350238U, // <0,4,3,u>: Cost 4 vext2 <0,2,0,4>, <3,u,1,2>
+ 2550464552U, // <0,4,4,0>: Cost 3 vext1 <0,0,4,4>, <0,0,4,4>
+ 3962194914U, // <0,4,4,1>: Cost 4 vzipl <0,4,1,5>, <4,1,5,0>
+ 3693677631U, // <0,4,4,2>: Cost 4 vext2 <0,4,0,4>, <4,2,6,3>
+ 3642124467U, // <0,4,4,3>: Cost 4 vext1 <3,0,4,4>, <3,0,4,4>
+ 2718715088U, // <0,4,4,4>: Cost 3 vext3 <5,6,7,0>, <4,4,4,4>
+ 2618608950U, // <0,4,4,5>: Cost 3 vext2 <0,2,0,4>, RHS
+ 2753137974U, // <0,4,4,6>: Cost 3 vuzpl <0,2,4,6>, RHS
+ 3666015255U, // <0,4,4,7>: Cost 4 vext1 <7,0,4,4>, <7,0,4,4>
+ 2618609193U, // <0,4,4,u>: Cost 3 vext2 <0,2,0,4>, RHS
+ 2568388710U, // <0,4,5,0>: Cost 3 vext1 <3,0,4,5>, LHS
+ 2568389526U, // <0,4,5,1>: Cost 3 vext1 <3,0,4,5>, <1,2,3,0>
+ 3636159963U, // <0,4,5,2>: Cost 4 vext1 <2,0,4,5>, <2,0,4,5>
+ 2568390836U, // <0,4,5,3>: Cost 3 vext1 <3,0,4,5>, <3,0,4,5>
+ 2568391990U, // <0,4,5,4>: Cost 3 vext1 <3,0,4,5>, RHS
+ 2718715180U, // <0,4,5,5>: Cost 3 vext3 <5,6,7,0>, <4,5,5,6>
+ 1618136374U, // <0,4,5,6>: Cost 2 vext3 <1,2,3,0>, RHS
+ 2592281624U, // <0,4,5,7>: Cost 3 vext1 <7,0,4,5>, <7,0,4,5>
+ 1618136392U, // <0,4,5,u>: Cost 2 vext3 <1,2,3,0>, RHS
+ 2550480938U, // <0,4,6,0>: Cost 3 vext1 <0,0,4,6>, <0,0,4,6>
+ 3826880801U, // <0,4,6,1>: Cost 4 vuzpl <0,2,4,6>, <6,0,1,2>
+ 2562426332U, // <0,4,6,2>: Cost 3 vext1 <2,0,4,6>, <2,0,4,6>
+ 3786190181U, // <0,4,6,3>: Cost 4 vext3 <4,6,3,0>, <4,6,3,0>
+ 2718715252U, // <0,4,6,4>: Cost 3 vext3 <5,6,7,0>, <4,6,4,6>
+ 3826881165U, // <0,4,6,5>: Cost 4 vuzpl <0,2,4,6>, <6,4,5,6>
+ 2712669568U, // <0,4,6,6>: Cost 3 vext3 <4,6,6,0>, <4,6,6,0>
+ 2657760081U, // <0,4,6,7>: Cost 3 vext2 <6,7,0,4>, <6,7,0,4>
+ 2718715284U, // <0,4,6,u>: Cost 3 vext3 <5,6,7,0>, <4,6,u,2>
+ 3654090854U, // <0,4,7,0>: Cost 4 vext1 <5,0,4,7>, LHS
+ 3934229326U, // <0,4,7,1>: Cost 4 vuzpr <7,0,1,4>, <6,7,0,1>
+ 3734156437U, // <0,4,7,2>: Cost 4 vext2 <7,2,0,4>, <7,2,0,4>
+ 3734820070U, // <0,4,7,3>: Cost 4 vext2 <7,3,0,4>, <7,3,0,4>
+ 3654094134U, // <0,4,7,4>: Cost 4 vext1 <5,0,4,7>, RHS
+ 2713259464U, // <0,4,7,5>: Cost 3 vext3 <4,7,5,0>, <4,7,5,0>
+ 2713333201U, // <0,4,7,6>: Cost 3 vext3 <4,7,6,0>, <4,7,6,0>
+ 3654095866U, // <0,4,7,7>: Cost 4 vext1 <5,0,4,7>, <7,0,1,2>
+ 2713259464U, // <0,4,7,u>: Cost 3 vext3 <4,7,5,0>, <4,7,5,0>
+ 2568413286U, // <0,4,u,0>: Cost 3 vext1 <3,0,4,u>, LHS
+ 2618611502U, // <0,4,u,1>: Cost 3 vext2 <0,2,0,4>, LHS
+ 2753140526U, // <0,4,u,2>: Cost 3 vuzpl <0,2,4,6>, LHS
+ 2568415415U, // <0,4,u,3>: Cost 3 vext1 <3,0,4,u>, <3,0,4,u>
+ 2568416566U, // <0,4,u,4>: Cost 3 vext1 <3,0,4,u>, RHS
+ 1817423158U, // <0,4,u,5>: Cost 2 vzipl LHS, RHS
+ 1947438390U, // <0,4,u,6>: Cost 2 vtrnl LHS, RHS
+ 2592306203U, // <0,4,u,7>: Cost 3 vext1 <7,0,4,u>, <7,0,4,u>
+ 1947438408U, // <0,4,u,u>: Cost 2 vtrnl LHS, RHS
+ 3630219264U, // <0,5,0,0>: Cost 4 vext1 <1,0,5,0>, <0,0,0,0>
+ 2625912934U, // <0,5,0,1>: Cost 3 vext2 <1,4,0,5>, LHS
+ 3692355748U, // <0,5,0,2>: Cost 4 vext2 <0,2,0,5>, <0,2,0,2>
+ 3693019384U, // <0,5,0,3>: Cost 4 vext2 <0,3,0,5>, <0,3,0,5>
+ 3630222646U, // <0,5,0,4>: Cost 4 vext1 <1,0,5,0>, RHS
+ 3699655062U, // <0,5,0,5>: Cost 4 vext2 <1,4,0,5>, <0,5,0,1>
+ 2718715508U, // <0,5,0,6>: Cost 3 vext3 <5,6,7,0>, <5,0,6,1>
+ 3087011126U, // <0,5,0,7>: Cost 3 vtrnr <0,0,0,0>, RHS
+ 2625913501U, // <0,5,0,u>: Cost 3 vext2 <1,4,0,5>, LHS
+ 1500659814U, // <0,5,1,0>: Cost 2 vext1 <4,0,5,1>, LHS
+ 2886520528U, // <0,5,1,1>: Cost 3 vzipl LHS, <5,1,7,3>
+ 2574403176U, // <0,5,1,2>: Cost 3 vext1 <4,0,5,1>, <2,2,2,2>
+ 2574403734U, // <0,5,1,3>: Cost 3 vext1 <4,0,5,1>, <3,0,1,2>
+ 1500662674U, // <0,5,1,4>: Cost 2 vext1 <4,0,5,1>, <4,0,5,1>
+ 2886520836U, // <0,5,1,5>: Cost 3 vzipl LHS, <5,5,5,5>
+ 2886520930U, // <0,5,1,6>: Cost 3 vzipl LHS, <5,6,7,0>
+ 2718715600U, // <0,5,1,7>: Cost 3 vext3 <5,6,7,0>, <5,1,7,3>
+ 1500665646U, // <0,5,1,u>: Cost 2 vext1 <4,0,5,1>, LHS
+ 2556493926U, // <0,5,2,0>: Cost 3 vext1 <1,0,5,2>, LHS
+ 2244546120U, // <0,5,2,1>: Cost 3 vrev <5,0,1,2>
+ 3692357256U, // <0,5,2,2>: Cost 4 vext2 <0,2,0,5>, <2,2,5,7>
+ 2568439994U, // <0,5,2,3>: Cost 3 vext1 <3,0,5,2>, <3,0,5,2>
+ 2556497206U, // <0,5,2,4>: Cost 3 vext1 <1,0,5,2>, RHS
+ 3020738564U, // <0,5,2,5>: Cost 3 vtrnl LHS, <5,5,5,5>
+ 4027877161U, // <0,5,2,6>: Cost 4 vzipr <0,2,0,2>, <2,4,5,6>
+ 3093220662U, // <0,5,2,7>: Cost 3 vtrnr <1,0,3,2>, RHS
+ 3093220663U, // <0,5,2,u>: Cost 3 vtrnr <1,0,3,2>, RHS
+ 3699656854U, // <0,5,3,0>: Cost 4 vext2 <1,4,0,5>, <3,0,1,2>
+ 3699656927U, // <0,5,3,1>: Cost 4 vext2 <1,4,0,5>, <3,1,0,3>
+ 3699657006U, // <0,5,3,2>: Cost 4 vext2 <1,4,0,5>, <3,2,0,1>
+ 3699657116U, // <0,5,3,3>: Cost 4 vext2 <1,4,0,5>, <3,3,3,3>
+ 2637859284U, // <0,5,3,4>: Cost 3 vext2 <3,4,0,5>, <3,4,0,5>
+ 3790319453U, // <0,5,3,5>: Cost 4 vext3 <5,3,5,0>, <5,3,5,0>
+ 3699657354U, // <0,5,3,6>: Cost 4 vext2 <1,4,0,5>, <3,6,2,7>
+ 2716725103U, // <0,5,3,7>: Cost 3 vext3 <5,3,7,0>, <5,3,7,0>
+ 2716798840U, // <0,5,3,u>: Cost 3 vext3 <5,3,u,0>, <5,3,u,0>
+ 2661747602U, // <0,5,4,0>: Cost 3 vext2 <7,4,0,5>, <4,0,5,1>
+ 3630252810U, // <0,5,4,1>: Cost 4 vext1 <1,0,5,4>, <1,0,5,4>
+ 3636225507U, // <0,5,4,2>: Cost 4 vext1 <2,0,5,4>, <2,0,5,4>
+ 3716910172U, // <0,5,4,3>: Cost 4 vext2 <4,3,0,5>, <4,3,0,5>
+ 3962195892U, // <0,5,4,4>: Cost 4 vzipl <0,4,1,5>, <5,4,5,6>
+ 2625916214U, // <0,5,4,5>: Cost 3 vext2 <1,4,0,5>, RHS
+ 3718901071U, // <0,5,4,6>: Cost 4 vext2 <4,6,0,5>, <4,6,0,5>
+ 2718715846U, // <0,5,4,7>: Cost 3 vext3 <5,6,7,0>, <5,4,7,6>
+ 2625916457U, // <0,5,4,u>: Cost 3 vext2 <1,4,0,5>, RHS
+ 3791278034U, // <0,5,5,0>: Cost 4 vext3 <5,5,0,0>, <5,5,0,0>
+ 3791351771U, // <0,5,5,1>: Cost 4 vext3 <5,5,1,0>, <5,5,1,0>
+ 3318386260U, // <0,5,5,2>: Cost 4 vrev <5,0,2,5>
+ 3791499245U, // <0,5,5,3>: Cost 4 vext3 <5,5,3,0>, <5,5,3,0>
+ 3318533734U, // <0,5,5,4>: Cost 4 vrev <5,0,4,5>
+ 2718715908U, // <0,5,5,5>: Cost 3 vext3 <5,6,7,0>, <5,5,5,5>
+ 2657767522U, // <0,5,5,6>: Cost 3 vext2 <6,7,0,5>, <5,6,7,0>
+ 2718715928U, // <0,5,5,7>: Cost 3 vext3 <5,6,7,0>, <5,5,7,7>
+ 2718715937U, // <0,5,5,u>: Cost 3 vext3 <5,6,7,0>, <5,5,u,7>
+ 2592358502U, // <0,5,6,0>: Cost 3 vext1 <7,0,5,6>, LHS
+ 3792015404U, // <0,5,6,1>: Cost 4 vext3 <5,6,1,0>, <5,6,1,0>
+ 3731509754U, // <0,5,6,2>: Cost 4 vext2 <6,7,0,5>, <6,2,7,3>
+ 3785748546U, // <0,5,6,3>: Cost 4 vext3 <4,5,6,0>, <5,6,3,4>
+ 2592361782U, // <0,5,6,4>: Cost 3 vext1 <7,0,5,6>, RHS
+ 2592362594U, // <0,5,6,5>: Cost 3 vext1 <7,0,5,6>, <5,6,7,0>
+ 3785748576U, // <0,5,6,6>: Cost 4 vext3 <4,5,6,0>, <5,6,6,7>
+ 1644974178U, // <0,5,6,7>: Cost 2 vext3 <5,6,7,0>, <5,6,7,0>
+ 1645047915U, // <0,5,6,u>: Cost 2 vext3 <5,6,u,0>, <5,6,u,0>
+ 2562506854U, // <0,5,7,0>: Cost 3 vext1 <2,0,5,7>, LHS
+ 2562507670U, // <0,5,7,1>: Cost 3 vext1 <2,0,5,7>, <1,2,3,0>
+ 2562508262U, // <0,5,7,2>: Cost 3 vext1 <2,0,5,7>, <2,0,5,7>
+ 3636250774U, // <0,5,7,3>: Cost 4 vext1 <2,0,5,7>, <3,0,1,2>
+ 2562510134U, // <0,5,7,4>: Cost 3 vext1 <2,0,5,7>, RHS
+ 2718716072U, // <0,5,7,5>: Cost 3 vext3 <5,6,7,0>, <5,7,5,7>
+ 2718716074U, // <0,5,7,6>: Cost 3 vext3 <5,6,7,0>, <5,7,6,0>
+ 2719379635U, // <0,5,7,7>: Cost 3 vext3 <5,7,7,0>, <5,7,7,0>
+ 2562512686U, // <0,5,7,u>: Cost 3 vext1 <2,0,5,7>, LHS
+ 1500717158U, // <0,5,u,0>: Cost 2 vext1 <4,0,5,u>, LHS
+ 2625918766U, // <0,5,u,1>: Cost 3 vext2 <1,4,0,5>, LHS
+ 2719674583U, // <0,5,u,2>: Cost 3 vext3 <5,u,2,0>, <5,u,2,0>
+ 2568489152U, // <0,5,u,3>: Cost 3 vext1 <3,0,5,u>, <3,0,5,u>
+ 1500720025U, // <0,5,u,4>: Cost 2 vext1 <4,0,5,u>, <4,0,5,u>
+ 2625919130U, // <0,5,u,5>: Cost 3 vext2 <1,4,0,5>, RHS
+ 2586407243U, // <0,5,u,6>: Cost 3 vext1 <6,0,5,u>, <6,0,5,u>
+ 1646301444U, // <0,5,u,7>: Cost 2 vext3 <5,u,7,0>, <5,u,7,0>
+ 1646375181U, // <0,5,u,u>: Cost 2 vext3 <5,u,u,0>, <5,u,u,0>
+ 2586411110U, // <0,6,0,0>: Cost 3 vext1 <6,0,6,0>, LHS
+ 2619949158U, // <0,6,0,1>: Cost 3 vext2 <0,4,0,6>, LHS
+ 2619949220U, // <0,6,0,2>: Cost 3 vext2 <0,4,0,6>, <0,2,0,2>
+ 3785748789U, // <0,6,0,3>: Cost 4 vext3 <4,5,6,0>, <6,0,3,4>
+ 2619949386U, // <0,6,0,4>: Cost 3 vext2 <0,4,0,6>, <0,4,0,6>
+ 2586415202U, // <0,6,0,5>: Cost 3 vext1 <6,0,6,0>, <5,6,7,0>
+ 2586415436U, // <0,6,0,6>: Cost 3 vext1 <6,0,6,0>, <6,0,6,0>
+ 2952793398U, // <0,6,0,7>: Cost 3 vzipr <0,0,0,0>, RHS
+ 2619949725U, // <0,6,0,u>: Cost 3 vext2 <0,4,0,6>, LHS
+ 2562531430U, // <0,6,1,0>: Cost 3 vext1 <2,0,6,1>, LHS
+ 3693691700U, // <0,6,1,1>: Cost 4 vext2 <0,4,0,6>, <1,1,1,1>
+ 2886521338U, // <0,6,1,2>: Cost 3 vzipl LHS, <6,2,7,3>
+ 3693691864U, // <0,6,1,3>: Cost 4 vext2 <0,4,0,6>, <1,3,1,3>
+ 2562534710U, // <0,6,1,4>: Cost 3 vext1 <2,0,6,1>, RHS
+ 2580450932U, // <0,6,1,5>: Cost 3 vext1 <5,0,6,1>, <5,0,6,1>
+ 2886521656U, // <0,6,1,6>: Cost 3 vzipl LHS, <6,6,6,6>
+ 2966736182U, // <0,6,1,7>: Cost 3 vzipr <2,3,0,1>, RHS
+ 2966736183U, // <0,6,1,u>: Cost 3 vzipr <2,3,0,1>, RHS
+ 1500741734U, // <0,6,2,0>: Cost 2 vext1 <4,0,6,2>, LHS
+ 2250518817U, // <0,6,2,1>: Cost 3 vrev <6,0,1,2>
+ 2574485096U, // <0,6,2,2>: Cost 3 vext1 <4,0,6,2>, <2,2,2,2>
+ 2631894694U, // <0,6,2,3>: Cost 3 vext2 <2,4,0,6>, <2,3,0,1>
+ 1500744604U, // <0,6,2,4>: Cost 2 vext1 <4,0,6,2>, <4,0,6,2>
+ 2574487248U, // <0,6,2,5>: Cost 3 vext1 <4,0,6,2>, <5,1,7,3>
+ 3020739384U, // <0,6,2,6>: Cost 3 vtrnl LHS, <6,6,6,6>
+ 2954136886U, // <0,6,2,7>: Cost 3 vzipr <0,2,0,2>, RHS
+ 1500747566U, // <0,6,2,u>: Cost 2 vext1 <4,0,6,2>, LHS
+ 3693693078U, // <0,6,3,0>: Cost 4 vext2 <0,4,0,6>, <3,0,1,2>
+ 3705637136U, // <0,6,3,1>: Cost 4 vext2 <2,4,0,6>, <3,1,5,7>
+ 3705637192U, // <0,6,3,2>: Cost 4 vext2 <2,4,0,6>, <3,2,3,0>
+ 3693693340U, // <0,6,3,3>: Cost 4 vext2 <0,4,0,6>, <3,3,3,3>
+ 2637867477U, // <0,6,3,4>: Cost 3 vext2 <3,4,0,6>, <3,4,0,6>
+ 3705637424U, // <0,6,3,5>: Cost 4 vext2 <2,4,0,6>, <3,5,1,7>
+ 3666154056U, // <0,6,3,6>: Cost 4 vext1 <7,0,6,3>, <6,3,7,0>
+ 2722697800U, // <0,6,3,7>: Cost 3 vext3 <6,3,7,0>, <6,3,7,0>
+ 2722771537U, // <0,6,3,u>: Cost 3 vext3 <6,3,u,0>, <6,3,u,0>
+ 2562556006U, // <0,6,4,0>: Cost 3 vext1 <2,0,6,4>, LHS
+ 4095316257U, // <0,6,4,1>: Cost 4 vtrnl <0,2,4,6>, <6,0,1,2>
+ 2562557420U, // <0,6,4,2>: Cost 3 vext1 <2,0,6,4>, <2,0,6,4>
+ 3636299926U, // <0,6,4,3>: Cost 4 vext1 <2,0,6,4>, <3,0,1,2>
+ 2562559286U, // <0,6,4,4>: Cost 3 vext1 <2,0,6,4>, RHS
+ 2619952438U, // <0,6,4,5>: Cost 3 vext2 <0,4,0,6>, RHS
+ 2723287696U, // <0,6,4,6>: Cost 3 vext3 <6,4,6,0>, <6,4,6,0>
+ 4027895094U, // <0,6,4,7>: Cost 4 vzipr <0,2,0,4>, RHS
+ 2619952681U, // <0,6,4,u>: Cost 3 vext2 <0,4,0,6>, RHS
+ 2718716594U, // <0,6,5,0>: Cost 3 vext3 <5,6,7,0>, <6,5,0,7>
+ 3648250774U, // <0,6,5,1>: Cost 4 vext1 <4,0,6,5>, <1,2,3,0>
+ 3792458436U, // <0,6,5,2>: Cost 4 vext3 <5,6,7,0>, <6,5,2,7>
+ 3705638767U, // <0,6,5,3>: Cost 5 vext2 <2,4,0,6>, <5,3,7,0>
+ 3648252831U, // <0,6,5,4>: Cost 4 vext1 <4,0,6,5>, <4,0,6,5>
+ 3797619416U, // <0,6,5,5>: Cost 4 vext3 <6,5,5,0>, <6,5,5,0>
+ 3792458472U, // <0,6,5,6>: Cost 4 vext3 <5,6,7,0>, <6,5,6,7>
+ 4035202358U, // <0,6,5,7>: Cost 4 vzipr <1,4,0,5>, RHS
+ 2718716594U, // <0,6,5,u>: Cost 3 vext3 <5,6,7,0>, <6,5,0,7>
+ 3786412796U, // <0,6,6,0>: Cost 4 vext3 <4,6,6,0>, <6,6,0,0>
+ 3792458504U, // <0,6,6,1>: Cost 4 vext3 <5,6,7,0>, <6,6,1,3>
+ 3728200126U, // <0,6,6,2>: Cost 4 vext2 <6,2,0,6>, <6,2,0,6>
+ 3798135575U, // <0,6,6,3>: Cost 4 vext3 <6,6,3,0>, <6,6,3,0>
+ 3786412836U, // <0,6,6,4>: Cost 4 vext3 <4,6,6,0>, <6,6,4,4>
+ 3792458543U, // <0,6,6,5>: Cost 4 vext3 <5,6,7,0>, <6,6,5,6>
+ 2718716728U, // <0,6,6,6>: Cost 3 vext3 <5,6,7,0>, <6,6,6,6>
+ 2718716738U, // <0,6,6,7>: Cost 3 vext3 <5,6,7,0>, <6,6,7,7>
+ 2718716747U, // <0,6,6,u>: Cost 3 vext3 <5,6,7,0>, <6,6,u,7>
+ 2718716750U, // <0,6,7,0>: Cost 3 vext3 <5,6,7,0>, <6,7,0,1>
+ 2724909910U, // <0,6,7,1>: Cost 3 vext3 <6,7,1,0>, <6,7,1,0>
+ 3636323823U, // <0,6,7,2>: Cost 4 vext1 <2,0,6,7>, <2,0,6,7>
+ 2725057384U, // <0,6,7,3>: Cost 3 vext3 <6,7,3,0>, <6,7,3,0>
+ 2718716790U, // <0,6,7,4>: Cost 3 vext3 <5,6,7,0>, <6,7,4,5>
+ 2718716800U, // <0,6,7,5>: Cost 3 vext3 <5,6,7,0>, <6,7,5,6>
+ 3792458629U, // <0,6,7,6>: Cost 4 vext3 <5,6,7,0>, <6,7,6,2>
+ 2725352332U, // <0,6,7,7>: Cost 3 vext3 <6,7,7,0>, <6,7,7,0>
+ 2718716822U, // <0,6,7,u>: Cost 3 vext3 <5,6,7,0>, <6,7,u,1>
+ 1500790886U, // <0,6,u,0>: Cost 2 vext1 <4,0,6,u>, LHS
+ 2619954990U, // <0,6,u,1>: Cost 3 vext2 <0,4,0,6>, LHS
+ 2562590192U, // <0,6,u,2>: Cost 3 vext1 <2,0,6,u>, <2,0,6,u>
+ 2725721017U, // <0,6,u,3>: Cost 3 vext3 <6,u,3,0>, <6,u,3,0>
+ 1500793762U, // <0,6,u,4>: Cost 2 vext1 <4,0,6,u>, <4,0,6,u>
+ 2619955354U, // <0,6,u,5>: Cost 3 vext2 <0,4,0,6>, RHS
+ 2725942228U, // <0,6,u,6>: Cost 3 vext3 <6,u,6,0>, <6,u,6,0>
+ 2954186038U, // <0,6,u,7>: Cost 3 vzipr <0,2,0,u>, RHS
+ 1500796718U, // <0,6,u,u>: Cost 2 vext1 <4,0,6,u>, LHS
+ 2256401391U, // <0,7,0,0>: Cost 3 vrev <7,0,0,0>
+ 2632564838U, // <0,7,0,1>: Cost 3 vext2 <2,5,0,7>, LHS
+ 2256548865U, // <0,7,0,2>: Cost 3 vrev <7,0,2,0>
+ 3700998396U, // <0,7,0,3>: Cost 4 vext2 <1,6,0,7>, <0,3,1,0>
+ 2718716952U, // <0,7,0,4>: Cost 3 vext3 <5,6,7,0>, <7,0,4,5>
+ 2718716962U, // <0,7,0,5>: Cost 3 vext3 <5,6,7,0>, <7,0,5,6>
+ 2621284845U, // <0,7,0,6>: Cost 3 vext2 <0,6,0,7>, <0,6,0,7>
+ 3904685542U, // <0,7,0,7>: Cost 4 vuzpr <2,0,5,7>, <2,0,5,7>
+ 2632565405U, // <0,7,0,u>: Cost 3 vext2 <2,5,0,7>, LHS
+ 2256409584U, // <0,7,1,0>: Cost 3 vrev <7,0,0,1>
+ 3706307380U, // <0,7,1,1>: Cost 4 vext2 <2,5,0,7>, <1,1,1,1>
+ 2632565654U, // <0,7,1,2>: Cost 3 vext2 <2,5,0,7>, <1,2,3,0>
+ 3769603168U, // <0,7,1,3>: Cost 4 vext3 <1,u,3,0>, <7,1,3,5>
+ 2256704532U, // <0,7,1,4>: Cost 3 vrev <7,0,4,1>
+ 3769603184U, // <0,7,1,5>: Cost 4 vext3 <1,u,3,0>, <7,1,5,3>
+ 3700999366U, // <0,7,1,6>: Cost 4 vext2 <1,6,0,7>, <1,6,0,7>
+ 2886522476U, // <0,7,1,7>: Cost 3 vzipl LHS, <7,7,7,7>
+ 2256999480U, // <0,7,1,u>: Cost 3 vrev <7,0,u,1>
+ 2586501222U, // <0,7,2,0>: Cost 3 vext1 <6,0,7,2>, LHS
+ 1182749690U, // <0,7,2,1>: Cost 2 vrev <7,0,1,2>
+ 3636356595U, // <0,7,2,2>: Cost 4 vext1 <2,0,7,2>, <2,0,7,2>
+ 2727711916U, // <0,7,2,3>: Cost 3 vext3 <7,2,3,0>, <7,2,3,0>
+ 2586504502U, // <0,7,2,4>: Cost 3 vext1 <6,0,7,2>, RHS
+ 2632566606U, // <0,7,2,5>: Cost 3 vext2 <2,5,0,7>, <2,5,0,7>
+ 2586505559U, // <0,7,2,6>: Cost 3 vext1 <6,0,7,2>, <6,0,7,2>
+ 3020740204U, // <0,7,2,7>: Cost 3 vtrnl LHS, <7,7,7,7>
+ 1183265849U, // <0,7,2,u>: Cost 2 vrev <7,0,u,2>
+ 3701000342U, // <0,7,3,0>: Cost 4 vext2 <1,6,0,7>, <3,0,1,2>
+ 3706308849U, // <0,7,3,1>: Cost 4 vext2 <2,5,0,7>, <3,1,2,3>
+ 3330315268U, // <0,7,3,2>: Cost 4 vrev <7,0,2,3>
+ 3706309020U, // <0,7,3,3>: Cost 4 vext2 <2,5,0,7>, <3,3,3,3>
+ 3706309122U, // <0,7,3,4>: Cost 4 vext2 <2,5,0,7>, <3,4,5,6>
+ 3712281127U, // <0,7,3,5>: Cost 4 vext2 <3,5,0,7>, <3,5,0,7>
+ 2639202936U, // <0,7,3,6>: Cost 3 vext2 <3,6,0,7>, <3,6,0,7>
+ 3802412321U, // <0,7,3,7>: Cost 4 vext3 <7,3,7,0>, <7,3,7,0>
+ 2640530202U, // <0,7,3,u>: Cost 3 vext2 <3,u,0,7>, <3,u,0,7>
+ 3654287462U, // <0,7,4,0>: Cost 4 vext1 <5,0,7,4>, LHS
+ 2256507900U, // <0,7,4,1>: Cost 3 vrev <7,0,1,4>
+ 2256581637U, // <0,7,4,2>: Cost 3 vrev <7,0,2,4>
+ 3660262008U, // <0,7,4,3>: Cost 4 vext1 <6,0,7,4>, <3,6,0,7>
+ 3786413405U, // <0,7,4,4>: Cost 4 vext3 <4,6,6,0>, <7,4,4,6>
+ 2632568118U, // <0,7,4,5>: Cost 3 vext2 <2,5,0,7>, RHS
+ 3718917457U, // <0,7,4,6>: Cost 4 vext2 <4,6,0,7>, <4,6,0,7>
+ 3787003255U, // <0,7,4,7>: Cost 4 vext3 <4,7,5,0>, <7,4,7,5>
+ 2632568361U, // <0,7,4,u>: Cost 3 vext2 <2,5,0,7>, RHS
+ 3706310268U, // <0,7,5,0>: Cost 4 vext2 <2,5,0,7>, <5,0,7,0>
+ 3792459156U, // <0,7,5,1>: Cost 4 vext3 <5,6,7,0>, <7,5,1,7>
+ 3330331654U, // <0,7,5,2>: Cost 4 vrev <7,0,2,5>
+ 3722899255U, // <0,7,5,3>: Cost 4 vext2 <5,3,0,7>, <5,3,0,7>
+ 2256737304U, // <0,7,5,4>: Cost 3 vrev <7,0,4,5>
+ 3724226521U, // <0,7,5,5>: Cost 4 vext2 <5,5,0,7>, <5,5,0,7>
+ 2718717377U, // <0,7,5,6>: Cost 3 vext3 <5,6,7,0>, <7,5,6,7>
+ 2729997763U, // <0,7,5,7>: Cost 3 vext3 <7,5,7,0>, <7,5,7,0>
+ 2720044499U, // <0,7,5,u>: Cost 3 vext3 <5,u,7,0>, <7,5,u,7>
+ 3712946517U, // <0,7,6,0>: Cost 4 vext2 <3,6,0,7>, <6,0,7,0>
+ 2256524286U, // <0,7,6,1>: Cost 3 vrev <7,0,1,6>
+ 3792459246U, // <0,7,6,2>: Cost 4 vext3 <5,6,7,0>, <7,6,2,7>
+ 3796440567U, // <0,7,6,3>: Cost 4 vext3 <6,3,7,0>, <7,6,3,7>
+ 3654307126U, // <0,7,6,4>: Cost 4 vext1 <5,0,7,6>, RHS
+ 2656457394U, // <0,7,6,5>: Cost 3 vext2 <6,5,0,7>, <6,5,0,7>
+ 3792459281U, // <0,7,6,6>: Cost 4 vext3 <5,6,7,0>, <7,6,6,6>
+ 2730661396U, // <0,7,6,7>: Cost 3 vext3 <7,6,7,0>, <7,6,7,0>
+ 2658448293U, // <0,7,6,u>: Cost 3 vext2 <6,u,0,7>, <6,u,0,7>
+ 3787003431U, // <0,7,7,0>: Cost 4 vext3 <4,7,5,0>, <7,7,0,1>
+ 3654312854U, // <0,7,7,1>: Cost 4 vext1 <5,0,7,7>, <1,2,3,0>
+ 3654313446U, // <0,7,7,2>: Cost 4 vext1 <5,0,7,7>, <2,0,5,7>
+ 3804771905U, // <0,7,7,3>: Cost 4 vext3 <7,7,3,0>, <7,7,3,0>
+ 3654315318U, // <0,7,7,4>: Cost 4 vext1 <5,0,7,7>, RHS
+ 3654315651U, // <0,7,7,5>: Cost 4 vext1 <5,0,7,7>, <5,0,7,7>
+ 3660288348U, // <0,7,7,6>: Cost 4 vext1 <6,0,7,7>, <6,0,7,7>
+ 2718717548U, // <0,7,7,7>: Cost 3 vext3 <5,6,7,0>, <7,7,7,7>
+ 2664420990U, // <0,7,7,u>: Cost 3 vext2 <7,u,0,7>, <7,u,0,7>
+ 2256466935U, // <0,7,u,0>: Cost 3 vrev <7,0,0,u>
+ 1182798848U, // <0,7,u,1>: Cost 2 vrev <7,0,1,u>
+ 2256614409U, // <0,7,u,2>: Cost 3 vrev <7,0,2,u>
+ 2731693714U, // <0,7,u,3>: Cost 3 vext3 <7,u,3,0>, <7,u,3,0>
+ 2256761883U, // <0,7,u,4>: Cost 3 vrev <7,0,4,u>
+ 2632571034U, // <0,7,u,5>: Cost 3 vext2 <2,5,0,7>, RHS
+ 2669066421U, // <0,7,u,6>: Cost 3 vext2 <u,6,0,7>, <u,6,0,7>
+ 2731988662U, // <0,7,u,7>: Cost 3 vext3 <7,u,7,0>, <7,u,7,0>
+ 1183315007U, // <0,7,u,u>: Cost 2 vrev <7,0,u,u>
+ 135053414U, // <0,u,0,0>: Cost 1 vdup0 LHS
+ 1544896614U, // <0,u,0,1>: Cost 2 vext2 <0,2,0,u>, LHS
+ 1678999654U, // <0,u,0,2>: Cost 2 vuzpl LHS, LHS
+ 2691880677U, // <0,u,0,3>: Cost 3 vext3 <1,2,3,0>, <u,0,3,2>
+ 1476988214U, // <0,u,0,4>: Cost 2 vext1 <0,0,u,0>, RHS
+ 2718791419U, // <0,u,0,5>: Cost 3 vext3 <5,6,u,0>, <u,0,5,6>
+ 3021248666U, // <0,u,0,6>: Cost 3 vtrnl <0,2,0,2>, RHS
+ 2592535607U, // <0,u,0,7>: Cost 3 vext1 <7,0,u,0>, <7,0,u,0>
+ 135053414U, // <0,u,0,u>: Cost 1 vdup0 LHS
+ 1476993097U, // <0,u,1,0>: Cost 2 vext1 <0,0,u,1>, <0,0,u,1>
+ 1812780846U, // <0,u,1,1>: Cost 2 vzipl LHS, LHS
+ 1618138926U, // <0,u,1,2>: Cost 2 vext3 <1,2,3,0>, LHS
+ 2752742134U, // <0,u,1,3>: Cost 3 vuzpl LHS, <1,0,3,2>
+ 1476996406U, // <0,u,1,4>: Cost 2 vext1 <0,0,u,1>, RHS
+ 1812781210U, // <0,u,1,5>: Cost 2 vzipl LHS, RHS
+ 2887006416U, // <0,u,1,6>: Cost 3 vzipl LHS, <u,6,3,7>
+ 2966736200U, // <0,u,1,7>: Cost 3 vzipr <2,3,0,1>, RHS
+ 1812781413U, // <0,u,1,u>: Cost 2 vzipl LHS, LHS
+ 1482973286U, // <0,u,2,0>: Cost 2 vext1 <1,0,u,2>, LHS
+ 1482973987U, // <0,u,2,1>: Cost 2 vext1 <1,0,u,2>, <1,0,u,2>
+ 1946998574U, // <0,u,2,2>: Cost 2 vtrnl LHS, LHS
+ 835584U, // <0,u,2,3>: Cost 0 copy LHS
+ 1482976566U, // <0,u,2,4>: Cost 2 vext1 <1,0,u,2>, RHS
+ 3020781631U, // <0,u,2,5>: Cost 3 vtrnl LHS, <u,4,5,6>
+ 1946998938U, // <0,u,2,6>: Cost 2 vtrnl LHS, RHS
+ 1518810169U, // <0,u,2,7>: Cost 2 vext1 <7,0,u,2>, <7,0,u,2>
+ 835584U, // <0,u,2,u>: Cost 0 copy LHS
+ 2618640534U, // <0,u,3,0>: Cost 3 vext2 <0,2,0,u>, <3,0,1,2>
+ 2752743574U, // <0,u,3,1>: Cost 3 vuzpl LHS, <3,0,1,2>
+ 2636556597U, // <0,u,3,2>: Cost 3 vext2 <3,2,0,u>, <3,2,0,u>
+ 2752743836U, // <0,u,3,3>: Cost 3 vuzpl LHS, <3,3,3,3>
+ 2618640898U, // <0,u,3,4>: Cost 3 vext2 <0,2,0,u>, <3,4,5,6>
+ 2752743938U, // <0,u,3,5>: Cost 3 vuzpl LHS, <3,4,5,6>
+ 2639202936U, // <0,u,3,6>: Cost 3 vext2 <3,6,0,7>, <3,6,0,7>
+ 2639874762U, // <0,u,3,7>: Cost 3 vext2 <3,7,0,u>, <3,7,0,u>
+ 2752743637U, // <0,u,3,u>: Cost 3 vuzpl LHS, <3,0,u,2>
+ 2562703462U, // <0,u,4,0>: Cost 3 vext1 <2,0,u,4>, LHS
+ 2888455982U, // <0,u,4,1>: Cost 3 vzipl <0,4,1,5>, LHS
+ 3021575982U, // <0,u,4,2>: Cost 3 vtrnl <0,2,4,6>, LHS
+ 2568677591U, // <0,u,4,3>: Cost 3 vext1 <3,0,u,4>, <3,0,u,4>
+ 2562706742U, // <0,u,4,4>: Cost 3 vext1 <2,0,u,4>, RHS
+ 1544899894U, // <0,u,4,5>: Cost 2 vext2 <0,2,0,u>, RHS
+ 1679002934U, // <0,u,4,6>: Cost 2 vuzpl LHS, RHS
+ 2718718033U, // <0,u,4,7>: Cost 3 vext3 <5,6,7,0>, <u,4,7,6>
+ 1679002952U, // <0,u,4,u>: Cost 2 vuzpl LHS, RHS
+ 2568683622U, // <0,u,5,0>: Cost 3 vext1 <3,0,u,5>, LHS
+ 2568684438U, // <0,u,5,1>: Cost 3 vext1 <3,0,u,5>, <1,2,3,0>
+ 3765622902U, // <0,u,5,2>: Cost 4 vext3 <1,2,3,0>, <u,5,2,7>
+ 2691881087U, // <0,u,5,3>: Cost 3 vext3 <1,2,3,0>, <u,5,3,7>
+ 2568686902U, // <0,u,5,4>: Cost 3 vext1 <3,0,u,5>, RHS
+ 2650492890U, // <0,u,5,5>: Cost 3 vext2 <5,5,0,u>, <5,5,0,u>
+ 1618139290U, // <0,u,5,6>: Cost 2 vext3 <1,2,3,0>, RHS
+ 2824834358U, // <0,u,5,7>: Cost 3 vuzpr <1,0,3,u>, RHS
+ 1618139308U, // <0,u,5,u>: Cost 2 vext3 <1,2,3,0>, RHS
+ 2592579686U, // <0,u,6,0>: Cost 3 vext1 <7,0,u,6>, LHS
+ 2262496983U, // <0,u,6,1>: Cost 3 vrev <u,0,1,6>
+ 2654474688U, // <0,u,6,2>: Cost 3 vext2 <6,2,0,u>, <6,2,0,u>
+ 2691881168U, // <0,u,6,3>: Cost 3 vext3 <1,2,3,0>, <u,6,3,7>
+ 2592582966U, // <0,u,6,4>: Cost 3 vext1 <7,0,u,6>, RHS
+ 2656465587U, // <0,u,6,5>: Cost 3 vext2 <6,5,0,u>, <6,5,0,u>
+ 2657129220U, // <0,u,6,6>: Cost 3 vext2 <6,6,0,u>, <6,6,0,u>
+ 1584051029U, // <0,u,6,7>: Cost 2 vext2 <6,7,0,u>, <6,7,0,u>
+ 1584714662U, // <0,u,6,u>: Cost 2 vext2 <6,u,0,u>, <6,u,0,u>
+ 2562728038U, // <0,u,7,0>: Cost 3 vext1 <2,0,u,7>, LHS
+ 2562728854U, // <0,u,7,1>: Cost 3 vext1 <2,0,u,7>, <1,2,3,0>
+ 2562729473U, // <0,u,7,2>: Cost 3 vext1 <2,0,u,7>, <2,0,u,7>
+ 2661111018U, // <0,u,7,3>: Cost 3 vext2 <7,3,0,u>, <7,3,0,u>
+ 2562731318U, // <0,u,7,4>: Cost 3 vext1 <2,0,u,7>, RHS
+ 2718718258U, // <0,u,7,5>: Cost 3 vext3 <5,6,7,0>, <u,7,5,6>
+ 2586620261U, // <0,u,7,6>: Cost 3 vext1 <6,0,u,7>, <6,0,u,7>
+ 2657793644U, // <0,u,7,7>: Cost 3 vext2 <6,7,0,u>, <7,7,7,7>
+ 2562733870U, // <0,u,7,u>: Cost 3 vext1 <2,0,u,7>, LHS
+ 135053414U, // <0,u,u,0>: Cost 1 vdup0 LHS
+ 1544902446U, // <0,u,u,1>: Cost 2 vext2 <0,2,0,u>, LHS
+ 1679005486U, // <0,u,u,2>: Cost 2 vuzpl LHS, LHS
+ 835584U, // <0,u,u,3>: Cost 0 copy LHS
+ 1483025718U, // <0,u,u,4>: Cost 2 vext1 <1,0,u,u>, RHS
+ 1544902810U, // <0,u,u,5>: Cost 2 vext2 <0,2,0,u>, RHS
+ 1679005850U, // <0,u,u,6>: Cost 2 vuzpl LHS, RHS
+ 1518859327U, // <0,u,u,7>: Cost 2 vext1 <7,0,u,u>, <7,0,u,u>
+ 835584U, // <0,u,u,u>: Cost 0 copy LHS
+ 2689744896U, // <1,0,0,0>: Cost 3 vext3 <0,u,1,1>, <0,0,0,0>
+ 1610694666U, // <1,0,0,1>: Cost 2 vext3 <0,0,1,1>, <0,0,1,1>
+ 2689744916U, // <1,0,0,2>: Cost 3 vext3 <0,u,1,1>, <0,0,2,2>
+ 2619310332U, // <1,0,0,3>: Cost 3 vext2 <0,3,1,0>, <0,3,1,0>
+ 2684657701U, // <1,0,0,4>: Cost 3 vext3 <0,0,4,1>, <0,0,4,1>
+ 2620637598U, // <1,0,0,5>: Cost 3 vext2 <0,5,1,0>, <0,5,1,0>
+ 3708977654U, // <1,0,0,6>: Cost 4 vext2 <3,0,1,0>, <0,6,1,7>
+ 3666351168U, // <1,0,0,7>: Cost 4 vext1 <7,1,0,0>, <7,1,0,0>
+ 1611210825U, // <1,0,0,u>: Cost 2 vext3 <0,0,u,1>, <0,0,u,1>
+ 2556780646U, // <1,0,1,0>: Cost 3 vext1 <1,1,0,1>, LHS
+ 2556781355U, // <1,0,1,1>: Cost 3 vext1 <1,1,0,1>, <1,1,0,1>
+ 1616003174U, // <1,0,1,2>: Cost 2 vext3 <0,u,1,1>, LHS
+ 3693052888U, // <1,0,1,3>: Cost 4 vext2 <0,3,1,0>, <1,3,1,3>
+ 2556783926U, // <1,0,1,4>: Cost 3 vext1 <1,1,0,1>, RHS
+ 2580672143U, // <1,0,1,5>: Cost 3 vext1 <5,1,0,1>, <5,1,0,1>
+ 2724839566U, // <1,0,1,6>: Cost 3 vext3 <6,7,0,1>, <0,1,6,7>
+ 3654415354U, // <1,0,1,7>: Cost 4 vext1 <5,1,0,1>, <7,0,1,2>
+ 1616003228U, // <1,0,1,u>: Cost 2 vext3 <0,u,1,1>, LHS
+ 2685690019U, // <1,0,2,0>: Cost 3 vext3 <0,2,0,1>, <0,2,0,1>
+ 2685763756U, // <1,0,2,1>: Cost 3 vext3 <0,2,1,1>, <0,2,1,1>
+ 2698297524U, // <1,0,2,2>: Cost 3 vext3 <2,3,0,1>, <0,2,2,0>
+ 2685911230U, // <1,0,2,3>: Cost 3 vext3 <0,2,3,1>, <0,2,3,1>
+ 2689745100U, // <1,0,2,4>: Cost 3 vext3 <0,u,1,1>, <0,2,4,6>
+ 3764814038U, // <1,0,2,5>: Cost 4 vext3 <1,1,1,1>, <0,2,5,7>
+ 2724839640U, // <1,0,2,6>: Cost 3 vext3 <6,7,0,1>, <0,2,6,0>
+ 2592625658U, // <1,0,2,7>: Cost 3 vext1 <7,1,0,2>, <7,0,1,2>
+ 2686279915U, // <1,0,2,u>: Cost 3 vext3 <0,2,u,1>, <0,2,u,1>
+ 3087843328U, // <1,0,3,0>: Cost 3 vtrnr LHS, <0,0,0,0>
+ 3087843338U, // <1,0,3,1>: Cost 3 vtrnr LHS, <0,0,1,1>
+ 67944550U, // <1,0,3,2>: Cost 1 vrev LHS
+ 2568743135U, // <1,0,3,3>: Cost 3 vext1 <3,1,0,3>, <3,1,0,3>
+ 2562772278U, // <1,0,3,4>: Cost 3 vext1 <2,1,0,3>, RHS
+ 4099850454U, // <1,0,3,5>: Cost 4 vtrnl <1,0,3,2>, <0,2,5,7>
+ 3704998538U, // <1,0,3,6>: Cost 4 vext2 <2,3,1,0>, <3,6,2,7>
+ 2592633923U, // <1,0,3,7>: Cost 3 vext1 <7,1,0,3>, <7,1,0,3>
+ 68386972U, // <1,0,3,u>: Cost 1 vrev LHS
+ 2620640146U, // <1,0,4,0>: Cost 3 vext2 <0,5,1,0>, <4,0,5,1>
+ 2689745234U, // <1,0,4,1>: Cost 3 vext3 <0,u,1,1>, <0,4,1,5>
+ 2689745244U, // <1,0,4,2>: Cost 3 vext3 <0,u,1,1>, <0,4,2,6>
+ 3760980320U, // <1,0,4,3>: Cost 4 vext3 <0,4,3,1>, <0,4,3,1>
+ 3761054057U, // <1,0,4,4>: Cost 4 vext3 <0,4,4,1>, <0,4,4,1>
+ 2619313462U, // <1,0,4,5>: Cost 3 vext2 <0,3,1,0>, RHS
+ 3761201531U, // <1,0,4,6>: Cost 4 vext3 <0,4,6,1>, <0,4,6,1>
+ 3666383940U, // <1,0,4,7>: Cost 4 vext1 <7,1,0,4>, <7,1,0,4>
+ 2619313705U, // <1,0,4,u>: Cost 3 vext2 <0,3,1,0>, RHS
+ 4029300736U, // <1,0,5,0>: Cost 4 vzipr <0,4,1,5>, <0,0,0,0>
+ 2895249510U, // <1,0,5,1>: Cost 3 vzipl <1,5,3,7>, LHS
+ 3028287590U, // <1,0,5,2>: Cost 3 vtrnl <1,3,5,7>, LHS
+ 3642501345U, // <1,0,5,3>: Cost 4 vext1 <3,1,0,5>, <3,1,0,5>
+ 2215592058U, // <1,0,5,4>: Cost 3 vrev <0,1,4,5>
+ 3724242907U, // <1,0,5,5>: Cost 4 vext2 <5,5,1,0>, <5,5,1,0>
+ 3724906540U, // <1,0,5,6>: Cost 4 vext2 <5,6,1,0>, <5,6,1,0>
+ 3911118134U, // <1,0,5,7>: Cost 4 vuzpr <3,1,3,0>, RHS
+ 3028287644U, // <1,0,5,u>: Cost 3 vtrnl <1,3,5,7>, LHS
+ 3762086375U, // <1,0,6,0>: Cost 4 vext3 <0,6,0,1>, <0,6,0,1>
+ 2698297846U, // <1,0,6,1>: Cost 3 vext3 <2,3,0,1>, <0,6,1,7>
+ 3760022015U, // <1,0,6,2>: Cost 4 vext3 <0,2,u,1>, <0,6,2,7>
+ 3642509538U, // <1,0,6,3>: Cost 4 vext1 <3,1,0,6>, <3,1,0,6>
+ 3762381323U, // <1,0,6,4>: Cost 4 vext3 <0,6,4,1>, <0,6,4,1>
+ 3730215604U, // <1,0,6,5>: Cost 4 vext2 <6,5,1,0>, <6,5,1,0>
+ 3730879237U, // <1,0,6,6>: Cost 4 vext2 <6,6,1,0>, <6,6,1,0>
+ 2657801046U, // <1,0,6,7>: Cost 3 vext2 <6,7,1,0>, <6,7,1,0>
+ 2658464679U, // <1,0,6,u>: Cost 3 vext2 <6,u,1,0>, <6,u,1,0>
+ 2659128312U, // <1,0,7,0>: Cost 3 vext2 <7,0,1,0>, <7,0,1,0>
+ 4047898278U, // <1,0,7,1>: Cost 4 vzipr <3,5,1,7>, <2,3,0,1>
+ 2215460970U, // <1,0,7,2>: Cost 3 vrev <0,1,2,7>
+ 3734861035U, // <1,0,7,3>: Cost 4 vext2 <7,3,1,0>, <7,3,1,0>
+ 3731543398U, // <1,0,7,4>: Cost 4 vext2 <6,7,1,0>, <7,4,5,6>
+ 3736188301U, // <1,0,7,5>: Cost 4 vext2 <7,5,1,0>, <7,5,1,0>
+ 2663110110U, // <1,0,7,6>: Cost 3 vext2 <7,6,1,0>, <7,6,1,0>
+ 3731543660U, // <1,0,7,7>: Cost 4 vext2 <6,7,1,0>, <7,7,7,7>
+ 2664437376U, // <1,0,7,u>: Cost 3 vext2 <7,u,1,0>, <7,u,1,0>
+ 3087884288U, // <1,0,u,0>: Cost 3 vtrnr LHS, <0,0,0,0>
+ 1616003730U, // <1,0,u,1>: Cost 2 vext3 <0,u,1,1>, <0,u,1,1>
+ 67985515U, // <1,0,u,2>: Cost 1 vrev LHS
+ 2689893028U, // <1,0,u,3>: Cost 3 vext3 <0,u,3,1>, <0,u,3,1>
+ 2689745586U, // <1,0,u,4>: Cost 3 vext3 <0,u,1,1>, <0,u,4,6>
+ 2619316378U, // <1,0,u,5>: Cost 3 vext2 <0,3,1,0>, RHS
+ 2669082807U, // <1,0,u,6>: Cost 3 vext2 <u,6,1,0>, <u,6,1,0>
+ 2592674888U, // <1,0,u,7>: Cost 3 vext1 <7,1,0,u>, <7,1,0,u>
+ 68427937U, // <1,0,u,u>: Cost 1 vrev LHS
+ 1543585802U, // <1,1,0,0>: Cost 2 vext2 <0,0,1,1>, <0,0,1,1>
+ 1548894310U, // <1,1,0,1>: Cost 2 vext2 <0,u,1,1>, LHS
+ 2618654892U, // <1,1,0,2>: Cost 3 vext2 <0,2,1,1>, <0,2,1,1>
+ 2689745654U, // <1,1,0,3>: Cost 3 vext3 <0,u,1,1>, <1,0,3,2>
+ 2622636370U, // <1,1,0,4>: Cost 3 vext2 <0,u,1,1>, <0,4,1,5>
+ 2620645791U, // <1,1,0,5>: Cost 3 vext2 <0,5,1,1>, <0,5,1,1>
+ 3696378367U, // <1,1,0,6>: Cost 4 vext2 <0,u,1,1>, <0,6,2,7>
+ 3666424905U, // <1,1,0,7>: Cost 4 vext1 <7,1,1,0>, <7,1,1,0>
+ 1548894866U, // <1,1,0,u>: Cost 2 vext2 <0,u,1,1>, <0,u,1,1>
+ 1483112550U, // <1,1,1,0>: Cost 2 vext1 <1,1,1,1>, LHS
+ 202162278U, // <1,1,1,1>: Cost 1 vdup1 LHS
+ 2622636950U, // <1,1,1,2>: Cost 3 vext2 <0,u,1,1>, <1,2,3,0>
+ 2622637016U, // <1,1,1,3>: Cost 3 vext2 <0,u,1,1>, <1,3,1,3>
+ 1483115830U, // <1,1,1,4>: Cost 2 vext1 <1,1,1,1>, RHS
+ 2622637200U, // <1,1,1,5>: Cost 3 vext2 <0,u,1,1>, <1,5,3,7>
+ 2622637263U, // <1,1,1,6>: Cost 3 vext2 <0,u,1,1>, <1,6,1,7>
+ 2592691274U, // <1,1,1,7>: Cost 3 vext1 <7,1,1,1>, <7,1,1,1>
+ 202162278U, // <1,1,1,u>: Cost 1 vdup1 LHS
+ 2550890588U, // <1,1,2,0>: Cost 3 vext1 <0,1,1,2>, <0,1,1,2>
+ 2617329183U, // <1,1,2,1>: Cost 3 vext2 <0,0,1,1>, <2,1,3,1>
+ 2622637672U, // <1,1,2,2>: Cost 3 vext2 <0,u,1,1>, <2,2,2,2>
+ 2622637734U, // <1,1,2,3>: Cost 3 vext2 <0,u,1,1>, <2,3,0,1>
+ 2550893878U, // <1,1,2,4>: Cost 3 vext1 <0,1,1,2>, RHS
+ 3696379744U, // <1,1,2,5>: Cost 4 vext2 <0,u,1,1>, <2,5,2,7>
+ 2622638010U, // <1,1,2,6>: Cost 3 vext2 <0,u,1,1>, <2,6,3,7>
+ 3804554170U, // <1,1,2,7>: Cost 4 vext3 <7,7,0,1>, <1,2,7,0>
+ 2622638139U, // <1,1,2,u>: Cost 3 vext2 <0,u,1,1>, <2,u,0,1>
+ 2622638230U, // <1,1,3,0>: Cost 3 vext2 <0,u,1,1>, <3,0,1,2>
+ 3087844148U, // <1,1,3,1>: Cost 3 vtrnr LHS, <1,1,1,1>
+ 4161585244U, // <1,1,3,2>: Cost 4 vtrnr LHS, <0,1,1,2>
+ 2014101606U, // <1,1,3,3>: Cost 2 vtrnr LHS, LHS
+ 2622638594U, // <1,1,3,4>: Cost 3 vext2 <0,u,1,1>, <3,4,5,6>
+ 2689745920U, // <1,1,3,5>: Cost 3 vext3 <0,u,1,1>, <1,3,5,7>
+ 3763487753U, // <1,1,3,6>: Cost 4 vext3 <0,u,1,1>, <1,3,6,7>
+ 2592707660U, // <1,1,3,7>: Cost 3 vext1 <7,1,1,3>, <7,1,1,3>
+ 2014101611U, // <1,1,3,u>: Cost 2 vtrnr LHS, LHS
+ 2556878950U, // <1,1,4,0>: Cost 3 vext1 <1,1,1,4>, LHS
+ 2221335351U, // <1,1,4,1>: Cost 3 vrev <1,1,1,4>
+ 3696380988U, // <1,1,4,2>: Cost 4 vext2 <0,u,1,1>, <4,2,6,0>
+ 3763487805U, // <1,1,4,3>: Cost 4 vext3 <0,u,1,1>, <1,4,3,5>
+ 2556882230U, // <1,1,4,4>: Cost 3 vext1 <1,1,1,4>, RHS
+ 1548897590U, // <1,1,4,5>: Cost 2 vext2 <0,u,1,1>, RHS
+ 2758184246U, // <1,1,4,6>: Cost 3 vuzpl <1,1,1,1>, RHS
+ 3666457677U, // <1,1,4,7>: Cost 4 vext1 <7,1,1,4>, <7,1,1,4>
+ 1548897833U, // <1,1,4,u>: Cost 2 vext2 <0,u,1,1>, RHS
+ 2693653615U, // <1,1,5,0>: Cost 3 vext3 <1,5,0,1>, <1,5,0,1>
+ 2617331408U, // <1,1,5,1>: Cost 3 vext2 <0,0,1,1>, <5,1,7,3>
+ 4029302934U, // <1,1,5,2>: Cost 4 vzipr <0,4,1,5>, <3,0,1,2>
+ 2689746064U, // <1,1,5,3>: Cost 3 vext3 <0,u,1,1>, <1,5,3,7>
+ 2221564755U, // <1,1,5,4>: Cost 3 vrev <1,1,4,5>
+ 2955559250U, // <1,1,5,5>: Cost 3 vzipr <0,4,1,5>, <0,4,1,5>
+ 2617331810U, // <1,1,5,6>: Cost 3 vext2 <0,0,1,1>, <5,6,7,0>
+ 2825293110U, // <1,1,5,7>: Cost 3 vuzpr <1,1,1,1>, RHS
+ 2689746109U, // <1,1,5,u>: Cost 3 vext3 <0,u,1,1>, <1,5,u,7>
+ 3696382241U, // <1,1,6,0>: Cost 4 vext2 <0,u,1,1>, <6,0,1,2>
+ 2689746127U, // <1,1,6,1>: Cost 3 vext3 <0,u,1,1>, <1,6,1,7>
+ 2617332218U, // <1,1,6,2>: Cost 3 vext2 <0,0,1,1>, <6,2,7,3>
+ 3763487969U, // <1,1,6,3>: Cost 4 vext3 <0,u,1,1>, <1,6,3,7>
+ 3696382605U, // <1,1,6,4>: Cost 4 vext2 <0,u,1,1>, <6,4,5,6>
+ 4029309266U, // <1,1,6,5>: Cost 4 vzipr <0,4,1,6>, <0,4,1,5>
+ 2617332536U, // <1,1,6,6>: Cost 3 vext2 <0,0,1,1>, <6,6,6,6>
+ 2724840702U, // <1,1,6,7>: Cost 3 vext3 <6,7,0,1>, <1,6,7,0>
+ 2725504263U, // <1,1,6,u>: Cost 3 vext3 <6,u,0,1>, <1,6,u,0>
+ 2617332720U, // <1,1,7,0>: Cost 3 vext2 <0,0,1,1>, <7,0,0,1>
+ 2659800138U, // <1,1,7,1>: Cost 3 vext2 <7,1,1,1>, <7,1,1,1>
+ 3691074717U, // <1,1,7,2>: Cost 4 vext2 <0,0,1,1>, <7,2,1,3>
+ 4167811174U, // <1,1,7,3>: Cost 4 vtrnr <1,1,5,7>, LHS
+ 2617333094U, // <1,1,7,4>: Cost 3 vext2 <0,0,1,1>, <7,4,5,6>
+ 3295396702U, // <1,1,7,5>: Cost 4 vrev <1,1,5,7>
+ 3803891014U, // <1,1,7,6>: Cost 4 vext3 <7,6,0,1>, <1,7,6,0>
+ 2617333356U, // <1,1,7,7>: Cost 3 vext2 <0,0,1,1>, <7,7,7,7>
+ 2659800138U, // <1,1,7,u>: Cost 3 vext2 <7,1,1,1>, <7,1,1,1>
+ 1483112550U, // <1,1,u,0>: Cost 2 vext1 <1,1,1,1>, LHS
+ 202162278U, // <1,1,u,1>: Cost 1 vdup1 LHS
+ 2622642056U, // <1,1,u,2>: Cost 3 vext2 <0,u,1,1>, <u,2,3,3>
+ 2014142566U, // <1,1,u,3>: Cost 2 vtrnr LHS, LHS
+ 1483115830U, // <1,1,u,4>: Cost 2 vext1 <1,1,1,1>, RHS
+ 1548900506U, // <1,1,u,5>: Cost 2 vext2 <0,u,1,1>, RHS
+ 2622642384U, // <1,1,u,6>: Cost 3 vext2 <0,u,1,1>, <u,6,3,7>
+ 2825293353U, // <1,1,u,7>: Cost 3 vuzpr <1,1,1,1>, RHS
+ 202162278U, // <1,1,u,u>: Cost 1 vdup1 LHS
+ 2635251712U, // <1,2,0,0>: Cost 3 vext2 <3,0,1,2>, <0,0,0,0>
+ 1561509990U, // <1,2,0,1>: Cost 2 vext2 <3,0,1,2>, LHS
+ 2618663085U, // <1,2,0,2>: Cost 3 vext2 <0,2,1,2>, <0,2,1,2>
+ 2696529358U, // <1,2,0,3>: Cost 3 vext3 <2,0,3,1>, <2,0,3,1>
+ 2635252050U, // <1,2,0,4>: Cost 3 vext2 <3,0,1,2>, <0,4,1,5>
+ 3769533926U, // <1,2,0,5>: Cost 4 vext3 <1,u,2,1>, <2,0,5,7>
+ 2621317617U, // <1,2,0,6>: Cost 3 vext2 <0,6,1,2>, <0,6,1,2>
+ 2659140170U, // <1,2,0,7>: Cost 3 vext2 <7,0,1,2>, <0,7,2,1>
+ 1561510557U, // <1,2,0,u>: Cost 2 vext2 <3,0,1,2>, LHS
+ 2623308516U, // <1,2,1,0>: Cost 3 vext2 <1,0,1,2>, <1,0,1,2>
+ 2635252532U, // <1,2,1,1>: Cost 3 vext2 <3,0,1,2>, <1,1,1,1>
+ 2631271318U, // <1,2,1,2>: Cost 3 vext2 <2,3,1,2>, <1,2,3,0>
+ 2958180454U, // <1,2,1,3>: Cost 3 vzipr <0,u,1,1>, LHS
+ 2550959414U, // <1,2,1,4>: Cost 3 vext1 <0,1,2,1>, RHS
+ 2635252880U, // <1,2,1,5>: Cost 3 vext2 <3,0,1,2>, <1,5,3,7>
+ 2635252952U, // <1,2,1,6>: Cost 3 vext2 <3,0,1,2>, <1,6,2,7>
+ 3732882731U, // <1,2,1,7>: Cost 4 vext2 <7,0,1,2>, <1,7,3,0>
+ 2958180459U, // <1,2,1,u>: Cost 3 vzipr <0,u,1,1>, LHS
+ 2629281213U, // <1,2,2,0>: Cost 3 vext2 <2,0,1,2>, <2,0,1,2>
+ 2635253280U, // <1,2,2,1>: Cost 3 vext2 <3,0,1,2>, <2,1,3,2>
+ 2618664552U, // <1,2,2,2>: Cost 3 vext2 <0,2,1,2>, <2,2,2,2>
+ 2689746546U, // <1,2,2,3>: Cost 3 vext3 <0,u,1,1>, <2,2,3,3>
+ 3764815485U, // <1,2,2,4>: Cost 4 vext3 <1,1,1,1>, <2,2,4,5>
+ 3760023176U, // <1,2,2,5>: Cost 4 vext3 <0,2,u,1>, <2,2,5,7>
+ 2635253690U, // <1,2,2,6>: Cost 3 vext2 <3,0,1,2>, <2,6,3,7>
+ 2659141610U, // <1,2,2,7>: Cost 3 vext2 <7,0,1,2>, <2,7,0,1>
+ 2689746591U, // <1,2,2,u>: Cost 3 vext3 <0,u,1,1>, <2,2,u,3>
+ 403488870U, // <1,2,3,0>: Cost 1 vext1 LHS, LHS
+ 1477231350U, // <1,2,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 1477232232U, // <1,2,3,2>: Cost 2 vext1 LHS, <2,2,2,2>
+ 1477233052U, // <1,2,3,3>: Cost 2 vext1 LHS, <3,3,3,3>
+ 403492150U, // <1,2,3,4>: Cost 1 vext1 LHS, RHS
+ 1525010128U, // <1,2,3,5>: Cost 2 vext1 LHS, <5,1,7,3>
+ 1525010938U, // <1,2,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 1525011450U, // <1,2,3,7>: Cost 2 vext1 LHS, <7,0,1,2>
+ 403494702U, // <1,2,3,u>: Cost 1 vext1 LHS, LHS
+ 2641226607U, // <1,2,4,0>: Cost 3 vext2 <4,0,1,2>, <4,0,1,2>
+ 3624723446U, // <1,2,4,1>: Cost 4 vext1 <0,1,2,4>, <1,3,4,6>
+ 3301123609U, // <1,2,4,2>: Cost 4 vrev <2,1,2,4>
+ 2598759198U, // <1,2,4,3>: Cost 3 vext1 <u,1,2,4>, <3,u,1,2>
+ 2659142864U, // <1,2,4,4>: Cost 3 vext2 <7,0,1,2>, <4,4,4,4>
+ 1561513270U, // <1,2,4,5>: Cost 2 vext2 <3,0,1,2>, RHS
+ 2659143028U, // <1,2,4,6>: Cost 3 vext2 <7,0,1,2>, <4,6,4,6>
+ 2659143112U, // <1,2,4,7>: Cost 3 vext2 <7,0,1,2>, <4,7,5,0>
+ 1561513513U, // <1,2,4,u>: Cost 2 vext2 <3,0,1,2>, RHS
+ 2550988902U, // <1,2,5,0>: Cost 3 vext1 <0,1,2,5>, LHS
+ 2550989824U, // <1,2,5,1>: Cost 3 vext1 <0,1,2,5>, <1,3,5,7>
+ 3624732264U, // <1,2,5,2>: Cost 4 vext1 <0,1,2,5>, <2,2,2,2>
+ 2955559014U, // <1,2,5,3>: Cost 3 vzipr <0,4,1,5>, LHS
+ 2550992182U, // <1,2,5,4>: Cost 3 vext1 <0,1,2,5>, RHS
+ 2659143684U, // <1,2,5,5>: Cost 3 vext2 <7,0,1,2>, <5,5,5,5>
+ 2659143778U, // <1,2,5,6>: Cost 3 vext2 <7,0,1,2>, <5,6,7,0>
+ 2659143848U, // <1,2,5,7>: Cost 3 vext2 <7,0,1,2>, <5,7,5,7>
+ 2550994734U, // <1,2,5,u>: Cost 3 vext1 <0,1,2,5>, LHS
+ 2700289945U, // <1,2,6,0>: Cost 3 vext3 <2,6,0,1>, <2,6,0,1>
+ 2635256232U, // <1,2,6,1>: Cost 3 vext2 <3,0,1,2>, <6,1,7,2>
+ 2659144186U, // <1,2,6,2>: Cost 3 vext2 <7,0,1,2>, <6,2,7,3>
+ 2689746874U, // <1,2,6,3>: Cost 3 vext3 <0,u,1,1>, <2,6,3,7>
+ 3763488705U, // <1,2,6,4>: Cost 4 vext3 <0,u,1,1>, <2,6,4,5>
+ 3763488716U, // <1,2,6,5>: Cost 4 vext3 <0,u,1,1>, <2,6,5,7>
+ 2659144504U, // <1,2,6,6>: Cost 3 vext2 <7,0,1,2>, <6,6,6,6>
+ 2657817432U, // <1,2,6,7>: Cost 3 vext2 <6,7,1,2>, <6,7,1,2>
+ 2689746919U, // <1,2,6,u>: Cost 3 vext3 <0,u,1,1>, <2,6,u,7>
+ 1585402874U, // <1,2,7,0>: Cost 2 vext2 <7,0,1,2>, <7,0,1,2>
+ 2659144770U, // <1,2,7,1>: Cost 3 vext2 <7,0,1,2>, <7,1,0,2>
+ 3708998858U, // <1,2,7,2>: Cost 4 vext2 <3,0,1,2>, <7,2,6,3>
+ 2635257059U, // <1,2,7,3>: Cost 3 vext2 <3,0,1,2>, <7,3,0,1>
+ 2659145062U, // <1,2,7,4>: Cost 3 vext2 <7,0,1,2>, <7,4,5,6>
+ 3732886916U, // <1,2,7,5>: Cost 4 vext2 <7,0,1,2>, <7,5,0,0>
+ 3732886998U, // <1,2,7,6>: Cost 4 vext2 <7,0,1,2>, <7,6,0,1>
+ 2659145255U, // <1,2,7,7>: Cost 3 vext2 <7,0,1,2>, <7,7,0,1>
+ 1590711938U, // <1,2,7,u>: Cost 2 vext2 <7,u,1,2>, <7,u,1,2>
+ 403529835U, // <1,2,u,0>: Cost 1 vext1 LHS, LHS
+ 1477272310U, // <1,2,u,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 1477273192U, // <1,2,u,2>: Cost 2 vext1 LHS, <2,2,2,2>
+ 1477273750U, // <1,2,u,3>: Cost 2 vext1 LHS, <3,0,1,2>
+ 403533110U, // <1,2,u,4>: Cost 1 vext1 LHS, RHS
+ 1561516186U, // <1,2,u,5>: Cost 2 vext2 <3,0,1,2>, RHS
+ 1525051898U, // <1,2,u,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 1525052410U, // <1,2,u,7>: Cost 2 vext1 LHS, <7,0,1,2>
+ 403535662U, // <1,2,u,u>: Cost 1 vext1 LHS, LHS
+ 2819407872U, // <1,3,0,0>: Cost 3 vuzpr LHS, <0,0,0,0>
+ 1551564902U, // <1,3,0,1>: Cost 2 vext2 <1,3,1,3>, LHS
+ 2819408630U, // <1,3,0,2>: Cost 3 vuzpr LHS, <1,0,3,2>
+ 2619334911U, // <1,3,0,3>: Cost 3 vext2 <0,3,1,3>, <0,3,1,3>
+ 2625306962U, // <1,3,0,4>: Cost 3 vext2 <1,3,1,3>, <0,4,1,5>
+ 3832725879U, // <1,3,0,5>: Cost 4 vuzpl <1,2,3,0>, <0,4,5,6>
+ 3699048959U, // <1,3,0,6>: Cost 4 vext2 <1,3,1,3>, <0,6,2,7>
+ 3776538827U, // <1,3,0,7>: Cost 4 vext3 <3,0,7,1>, <3,0,7,1>
+ 1551565469U, // <1,3,0,u>: Cost 2 vext2 <1,3,1,3>, LHS
+ 2618671862U, // <1,3,1,0>: Cost 3 vext2 <0,2,1,3>, <1,0,3,2>
+ 2819408692U, // <1,3,1,1>: Cost 3 vuzpr LHS, <1,1,1,1>
+ 2624643975U, // <1,3,1,2>: Cost 3 vext2 <1,2,1,3>, <1,2,1,3>
+ 1745666150U, // <1,3,1,3>: Cost 2 vuzpr LHS, LHS
+ 2557005110U, // <1,3,1,4>: Cost 3 vext1 <1,1,3,1>, RHS
+ 2625307792U, // <1,3,1,5>: Cost 3 vext2 <1,3,1,3>, <1,5,3,7>
+ 3698386127U, // <1,3,1,6>: Cost 4 vext2 <1,2,1,3>, <1,6,1,7>
+ 2592838748U, // <1,3,1,7>: Cost 3 vext1 <7,1,3,1>, <7,1,3,1>
+ 1745666155U, // <1,3,1,u>: Cost 2 vuzpr LHS, LHS
+ 2819408790U, // <1,3,2,0>: Cost 3 vuzpr LHS, <1,2,3,0>
+ 2625308193U, // <1,3,2,1>: Cost 3 vext2 <1,3,1,3>, <2,1,3,3>
+ 2819408036U, // <1,3,2,2>: Cost 3 vuzpr LHS, <0,2,0,2>
+ 2819851890U, // <1,3,2,3>: Cost 3 vuzpr LHS, <2,2,3,3>
+ 2819408794U, // <1,3,2,4>: Cost 3 vuzpr LHS, <1,2,3,4>
+ 3893149890U, // <1,3,2,5>: Cost 4 vuzpr LHS, <0,2,3,5>
+ 2819408076U, // <1,3,2,6>: Cost 3 vuzpr LHS, <0,2,4,6>
+ 3772041583U, // <1,3,2,7>: Cost 4 vext3 <2,3,0,1>, <3,2,7,3>
+ 2819408042U, // <1,3,2,u>: Cost 3 vuzpr LHS, <0,2,0,u>
+ 1483276390U, // <1,3,3,0>: Cost 2 vext1 <1,1,3,3>, LHS
+ 1483277128U, // <1,3,3,1>: Cost 2 vext1 <1,1,3,3>, <1,1,3,3>
+ 2557019752U, // <1,3,3,2>: Cost 3 vext1 <1,1,3,3>, <2,2,2,2>
+ 2819408856U, // <1,3,3,3>: Cost 3 vuzpr LHS, <1,3,1,3>
+ 1483279670U, // <1,3,3,4>: Cost 2 vext1 <1,1,3,3>, RHS
+ 2819409614U, // <1,3,3,5>: Cost 3 vuzpr LHS, <2,3,4,5>
+ 2598826490U, // <1,3,3,6>: Cost 3 vext1 <u,1,3,3>, <6,2,7,3>
+ 3087844352U, // <1,3,3,7>: Cost 3 vtrnr LHS, <1,3,5,7>
+ 1483282222U, // <1,3,3,u>: Cost 2 vext1 <1,1,3,3>, LHS
+ 2568970342U, // <1,3,4,0>: Cost 3 vext1 <3,1,3,4>, LHS
+ 2568971224U, // <1,3,4,1>: Cost 3 vext1 <3,1,3,4>, <1,3,1,3>
+ 3832761290U, // <1,3,4,2>: Cost 4 vuzpl <1,2,3,4>, <4,1,2,3>
+ 2233428219U, // <1,3,4,3>: Cost 3 vrev <3,1,3,4>
+ 2568973622U, // <1,3,4,4>: Cost 3 vext1 <3,1,3,4>, RHS
+ 1551568182U, // <1,3,4,5>: Cost 2 vext2 <1,3,1,3>, RHS
+ 2819410434U, // <1,3,4,6>: Cost 3 vuzpr LHS, <3,4,5,6>
+ 3666605151U, // <1,3,4,7>: Cost 4 vext1 <7,1,3,4>, <7,1,3,4>
+ 1551568425U, // <1,3,4,u>: Cost 2 vext2 <1,3,1,3>, RHS
+ 2563006566U, // <1,3,5,0>: Cost 3 vext1 <2,1,3,5>, LHS
+ 2568979456U, // <1,3,5,1>: Cost 3 vext1 <3,1,3,5>, <1,3,5,7>
+ 2563008035U, // <1,3,5,2>: Cost 3 vext1 <2,1,3,5>, <2,1,3,5>
+ 2233436412U, // <1,3,5,3>: Cost 3 vrev <3,1,3,5>
+ 2563009846U, // <1,3,5,4>: Cost 3 vext1 <2,1,3,5>, RHS
+ 2867187716U, // <1,3,5,5>: Cost 3 vuzpr LHS, <5,5,5,5>
+ 2655834214U, // <1,3,5,6>: Cost 3 vext2 <6,4,1,3>, <5,6,7,4>
+ 1745669430U, // <1,3,5,7>: Cost 2 vuzpr LHS, RHS
+ 1745669431U, // <1,3,5,u>: Cost 2 vuzpr LHS, RHS
+ 2867187810U, // <1,3,6,0>: Cost 3 vuzpr LHS, <5,6,7,0>
+ 3699052931U, // <1,3,6,1>: Cost 4 vext2 <1,3,1,3>, <6,1,3,1>
+ 2654507460U, // <1,3,6,2>: Cost 3 vext2 <6,2,1,3>, <6,2,1,3>
+ 3766291091U, // <1,3,6,3>: Cost 4 vext3 <1,3,3,1>, <3,6,3,7>
+ 2655834726U, // <1,3,6,4>: Cost 3 vext2 <6,4,1,3>, <6,4,1,3>
+ 3923384562U, // <1,3,6,5>: Cost 4 vuzpr <5,1,7,3>, <u,6,7,5>
+ 2657161992U, // <1,3,6,6>: Cost 3 vext2 <6,6,1,3>, <6,6,1,3>
+ 2819852218U, // <1,3,6,7>: Cost 3 vuzpr LHS, <2,6,3,7>
+ 2819852219U, // <1,3,6,u>: Cost 3 vuzpr LHS, <2,6,3,u>
+ 2706926275U, // <1,3,7,0>: Cost 3 vext3 <3,7,0,1>, <3,7,0,1>
+ 2659816524U, // <1,3,7,1>: Cost 3 vext2 <7,1,1,3>, <7,1,1,3>
+ 3636766245U, // <1,3,7,2>: Cost 4 vext1 <2,1,3,7>, <2,1,3,7>
+ 2867187903U, // <1,3,7,3>: Cost 3 vuzpr LHS, <5,7,u,3>
+ 2625312102U, // <1,3,7,4>: Cost 3 vext2 <1,3,1,3>, <7,4,5,6>
+ 2867188598U, // <1,3,7,5>: Cost 3 vuzpr LHS, <6,7,4,5>
+ 3728250344U, // <1,3,7,6>: Cost 4 vext2 <6,2,1,3>, <7,6,2,1>
+ 2867187880U, // <1,3,7,7>: Cost 3 vuzpr LHS, <5,7,5,7>
+ 2707516171U, // <1,3,7,u>: Cost 3 vext3 <3,7,u,1>, <3,7,u,1>
+ 1483317350U, // <1,3,u,0>: Cost 2 vext1 <1,1,3,u>, LHS
+ 1483318093U, // <1,3,u,1>: Cost 2 vext1 <1,1,3,u>, <1,1,3,u>
+ 2819410718U, // <1,3,u,2>: Cost 3 vuzpr LHS, <3,u,1,2>
+ 1745666717U, // <1,3,u,3>: Cost 2 vuzpr LHS, LHS
+ 1483320630U, // <1,3,u,4>: Cost 2 vext1 <1,1,3,u>, RHS
+ 1551571098U, // <1,3,u,5>: Cost 2 vext2 <1,3,1,3>, RHS
+ 2819410758U, // <1,3,u,6>: Cost 3 vuzpr LHS, <3,u,5,6>
+ 1745669673U, // <1,3,u,7>: Cost 2 vuzpr LHS, RHS
+ 1745666722U, // <1,3,u,u>: Cost 2 vuzpr LHS, LHS
+ 2617352205U, // <1,4,0,0>: Cost 3 vext2 <0,0,1,4>, <0,0,1,4>
+ 2619342950U, // <1,4,0,1>: Cost 3 vext2 <0,3,1,4>, LHS
+ 3692421295U, // <1,4,0,2>: Cost 4 vext2 <0,2,1,4>, <0,2,1,4>
+ 2619343104U, // <1,4,0,3>: Cost 3 vext2 <0,3,1,4>, <0,3,1,4>
+ 2617352530U, // <1,4,0,4>: Cost 3 vext2 <0,0,1,4>, <0,4,1,5>
+ 1634880402U, // <1,4,0,5>: Cost 2 vext3 <4,0,5,1>, <4,0,5,1>
+ 2713930652U, // <1,4,0,6>: Cost 3 vext3 <4,u,5,1>, <4,0,6,2>
+ 3732898396U, // <1,4,0,7>: Cost 4 vext2 <7,0,1,4>, <0,7,4,1>
+ 1635101613U, // <1,4,0,u>: Cost 2 vext3 <4,0,u,1>, <4,0,u,1>
+ 3693085430U, // <1,4,1,0>: Cost 4 vext2 <0,3,1,4>, <1,0,3,2>
+ 2623988535U, // <1,4,1,1>: Cost 3 vext2 <1,1,1,4>, <1,1,1,4>
+ 3693085590U, // <1,4,1,2>: Cost 4 vext2 <0,3,1,4>, <1,2,3,0>
+ 3692422134U, // <1,4,1,3>: Cost 4 vext2 <0,2,1,4>, <1,3,4,6>
+ 3693085726U, // <1,4,1,4>: Cost 4 vext2 <0,3,1,4>, <1,4,0,1>
+ 2892401974U, // <1,4,1,5>: Cost 3 vzipl <1,1,1,1>, RHS
+ 3026619702U, // <1,4,1,6>: Cost 3 vtrnl <1,1,1,1>, RHS
+ 3800206324U, // <1,4,1,7>: Cost 4 vext3 <7,0,4,1>, <4,1,7,0>
+ 2892402217U, // <1,4,1,u>: Cost 3 vzipl <1,1,1,1>, RHS
+ 3966978927U, // <1,4,2,0>: Cost 4 vzipl <1,2,3,4>, <4,0,1,2>
+ 3966979018U, // <1,4,2,1>: Cost 4 vzipl <1,2,3,4>, <4,1,2,3>
+ 3693086312U, // <1,4,2,2>: Cost 4 vext2 <0,3,1,4>, <2,2,2,2>
+ 2635269798U, // <1,4,2,3>: Cost 3 vext2 <3,0,1,4>, <2,3,0,1>
+ 3966979280U, // <1,4,2,4>: Cost 4 vzipl <1,2,3,4>, <4,4,4,4>
+ 2893204790U, // <1,4,2,5>: Cost 3 vzipl <1,2,3,0>, RHS
+ 3693086650U, // <1,4,2,6>: Cost 4 vext2 <0,3,1,4>, <2,6,3,7>
+ 3666662502U, // <1,4,2,7>: Cost 4 vext1 <7,1,4,2>, <7,1,4,2>
+ 2893205033U, // <1,4,2,u>: Cost 3 vzipl <1,2,3,0>, RHS
+ 2563063910U, // <1,4,3,0>: Cost 3 vext1 <2,1,4,3>, LHS
+ 2563064730U, // <1,4,3,1>: Cost 3 vext1 <2,1,4,3>, <1,2,3,4>
+ 2563065386U, // <1,4,3,2>: Cost 3 vext1 <2,1,4,3>, <2,1,4,3>
+ 3693087132U, // <1,4,3,3>: Cost 4 vext2 <0,3,1,4>, <3,3,3,3>
+ 2619345410U, // <1,4,3,4>: Cost 3 vext2 <0,3,1,4>, <3,4,5,6>
+ 3087843666U, // <1,4,3,5>: Cost 3 vtrnr LHS, <0,4,1,5>
+ 3087843676U, // <1,4,3,6>: Cost 3 vtrnr LHS, <0,4,2,6>
+ 3666670695U, // <1,4,3,7>: Cost 4 vext1 <7,1,4,3>, <7,1,4,3>
+ 3087843669U, // <1,4,3,u>: Cost 3 vtrnr LHS, <0,4,1,u>
+ 2620672914U, // <1,4,4,0>: Cost 3 vext2 <0,5,1,4>, <4,0,5,1>
+ 3630842706U, // <1,4,4,1>: Cost 4 vext1 <1,1,4,4>, <1,1,4,4>
+ 3313069003U, // <1,4,4,2>: Cost 4 vrev <4,1,2,4>
+ 3642788100U, // <1,4,4,3>: Cost 4 vext1 <3,1,4,4>, <3,1,4,4>
+ 2713930960U, // <1,4,4,4>: Cost 3 vext3 <4,u,5,1>, <4,4,4,4>
+ 2619346230U, // <1,4,4,5>: Cost 3 vext2 <0,3,1,4>, RHS
+ 2713930980U, // <1,4,4,6>: Cost 3 vext3 <4,u,5,1>, <4,4,6,6>
+ 3736882642U, // <1,4,4,7>: Cost 4 vext2 <7,6,1,4>, <4,7,6,1>
+ 2619346473U, // <1,4,4,u>: Cost 3 vext2 <0,3,1,4>, RHS
+ 2557108326U, // <1,4,5,0>: Cost 3 vext1 <1,1,4,5>, LHS
+ 2557109075U, // <1,4,5,1>: Cost 3 vext1 <1,1,4,5>, <1,1,4,5>
+ 2598913774U, // <1,4,5,2>: Cost 3 vext1 <u,1,4,5>, <2,3,u,1>
+ 3630852246U, // <1,4,5,3>: Cost 4 vext1 <1,1,4,5>, <3,0,1,2>
+ 2557111606U, // <1,4,5,4>: Cost 3 vext1 <1,1,4,5>, RHS
+ 2895252790U, // <1,4,5,5>: Cost 3 vzipl <1,5,3,7>, RHS
+ 1616006454U, // <1,4,5,6>: Cost 2 vext3 <0,u,1,1>, RHS
+ 3899059510U, // <1,4,5,7>: Cost 4 vuzpr <1,1,1,4>, RHS
+ 1616006472U, // <1,4,5,u>: Cost 2 vext3 <0,u,1,1>, RHS
+ 2557116518U, // <1,4,6,0>: Cost 3 vext1 <1,1,4,6>, LHS
+ 2557117236U, // <1,4,6,1>: Cost 3 vext1 <1,1,4,6>, <1,1,1,1>
+ 3630859880U, // <1,4,6,2>: Cost 4 vext1 <1,1,4,6>, <2,2,2,2>
+ 2569062550U, // <1,4,6,3>: Cost 3 vext1 <3,1,4,6>, <3,0,1,2>
+ 2557119798U, // <1,4,6,4>: Cost 3 vext1 <1,1,4,6>, RHS
+ 3763490174U, // <1,4,6,5>: Cost 4 vext3 <0,u,1,1>, <4,6,5,7>
+ 3763490183U, // <1,4,6,6>: Cost 4 vext3 <0,u,1,1>, <4,6,6,7>
+ 2712751498U, // <1,4,6,7>: Cost 3 vext3 <4,6,7,1>, <4,6,7,1>
+ 2557122350U, // <1,4,6,u>: Cost 3 vext1 <1,1,4,6>, LHS
+ 2659161084U, // <1,4,7,0>: Cost 3 vext2 <7,0,1,4>, <7,0,1,4>
+ 3732903040U, // <1,4,7,1>: Cost 4 vext2 <7,0,1,4>, <7,1,7,1>
+ 3734230174U, // <1,4,7,2>: Cost 4 vext2 <7,2,1,4>, <7,2,1,4>
+ 3734893807U, // <1,4,7,3>: Cost 4 vext2 <7,3,1,4>, <7,3,1,4>
+ 3660729654U, // <1,4,7,4>: Cost 4 vext1 <6,1,4,7>, RHS
+ 3786493384U, // <1,4,7,5>: Cost 4 vext3 <4,6,7,1>, <4,7,5,0>
+ 2713341394U, // <1,4,7,6>: Cost 3 vext3 <4,7,6,1>, <4,7,6,1>
+ 3660731386U, // <1,4,7,7>: Cost 4 vext1 <6,1,4,7>, <7,0,1,2>
+ 2664470148U, // <1,4,7,u>: Cost 3 vext2 <7,u,1,4>, <7,u,1,4>
+ 2557132902U, // <1,4,u,0>: Cost 3 vext1 <1,1,4,u>, LHS
+ 2619348782U, // <1,4,u,1>: Cost 3 vext2 <0,3,1,4>, LHS
+ 2563106351U, // <1,4,u,2>: Cost 3 vext1 <2,1,4,u>, <2,1,4,u>
+ 2713783816U, // <1,4,u,3>: Cost 3 vext3 <4,u,3,1>, <4,u,3,1>
+ 2622666815U, // <1,4,u,4>: Cost 3 vext2 <0,u,1,4>, <u,4,5,6>
+ 1640189466U, // <1,4,u,5>: Cost 2 vext3 <4,u,5,1>, <4,u,5,1>
+ 1616006697U, // <1,4,u,6>: Cost 2 vext3 <0,u,1,1>, RHS
+ 2712751498U, // <1,4,u,7>: Cost 3 vext3 <4,6,7,1>, <4,6,7,1>
+ 1616006715U, // <1,4,u,u>: Cost 2 vext3 <0,u,1,1>, RHS
+ 2620014592U, // <1,5,0,0>: Cost 3 vext2 <0,4,1,5>, <0,0,0,0>
+ 1546272870U, // <1,5,0,1>: Cost 2 vext2 <0,4,1,5>, LHS
+ 2618687664U, // <1,5,0,2>: Cost 3 vext2 <0,2,1,5>, <0,2,1,5>
+ 3693093120U, // <1,5,0,3>: Cost 4 vext2 <0,3,1,5>, <0,3,1,4>
+ 1546273106U, // <1,5,0,4>: Cost 2 vext2 <0,4,1,5>, <0,4,1,5>
+ 2620678563U, // <1,5,0,5>: Cost 3 vext2 <0,5,1,5>, <0,5,1,5>
+ 2714668660U, // <1,5,0,6>: Cost 3 vext3 <5,0,6,1>, <5,0,6,1>
+ 3772042877U, // <1,5,0,7>: Cost 4 vext3 <2,3,0,1>, <5,0,7,1>
+ 1546273437U, // <1,5,0,u>: Cost 2 vext2 <0,4,1,5>, LHS
+ 2620015350U, // <1,5,1,0>: Cost 3 vext2 <0,4,1,5>, <1,0,3,2>
+ 2620015412U, // <1,5,1,1>: Cost 3 vext2 <0,4,1,5>, <1,1,1,1>
+ 2620015510U, // <1,5,1,2>: Cost 3 vext2 <0,4,1,5>, <1,2,3,0>
+ 2618688512U, // <1,5,1,3>: Cost 3 vext2 <0,2,1,5>, <1,3,5,7>
+ 2620015677U, // <1,5,1,4>: Cost 3 vext2 <0,4,1,5>, <1,4,3,5>
+ 2620015727U, // <1,5,1,5>: Cost 3 vext2 <0,4,1,5>, <1,5,0,1>
+ 2620015859U, // <1,5,1,6>: Cost 3 vext2 <0,4,1,5>, <1,6,5,7>
+ 3093728566U, // <1,5,1,7>: Cost 3 vtrnr <1,1,1,1>, RHS
+ 2620015981U, // <1,5,1,u>: Cost 3 vext2 <0,4,1,5>, <1,u,1,3>
+ 3692430816U, // <1,5,2,0>: Cost 4 vext2 <0,2,1,5>, <2,0,5,1>
+ 2620016163U, // <1,5,2,1>: Cost 3 vext2 <0,4,1,5>, <2,1,3,5>
+ 2620016232U, // <1,5,2,2>: Cost 3 vext2 <0,4,1,5>, <2,2,2,2>
+ 2620016294U, // <1,5,2,3>: Cost 3 vext2 <0,4,1,5>, <2,3,0,1>
+ 3693758221U, // <1,5,2,4>: Cost 4 vext2 <0,4,1,5>, <2,4,2,5>
+ 3692431209U, // <1,5,2,5>: Cost 4 vext2 <0,2,1,5>, <2,5,3,7>
+ 2620016570U, // <1,5,2,6>: Cost 3 vext2 <0,4,1,5>, <2,6,3,7>
+ 4173598006U, // <1,5,2,7>: Cost 4 vtrnr <2,1,3,2>, RHS
+ 2620016699U, // <1,5,2,u>: Cost 3 vext2 <0,4,1,5>, <2,u,0,1>
+ 2620016790U, // <1,5,3,0>: Cost 3 vext2 <0,4,1,5>, <3,0,1,2>
+ 2569110672U, // <1,5,3,1>: Cost 3 vext1 <3,1,5,3>, <1,5,3,7>
+ 3693758785U, // <1,5,3,2>: Cost 4 vext2 <0,4,1,5>, <3,2,2,2>
+ 2620017052U, // <1,5,3,3>: Cost 3 vext2 <0,4,1,5>, <3,3,3,3>
+ 2620017154U, // <1,5,3,4>: Cost 3 vext2 <0,4,1,5>, <3,4,5,6>
+ 3135623172U, // <1,5,3,5>: Cost 3 vtrnr LHS, <5,5,5,5>
+ 4161587048U, // <1,5,3,6>: Cost 4 vtrnr LHS, <2,5,3,6>
+ 2014104886U, // <1,5,3,7>: Cost 2 vtrnr LHS, RHS
+ 2014104887U, // <1,5,3,u>: Cost 2 vtrnr LHS, RHS
+ 2620017554U, // <1,5,4,0>: Cost 3 vext2 <0,4,1,5>, <4,0,5,1>
+ 2620017634U, // <1,5,4,1>: Cost 3 vext2 <0,4,1,5>, <4,1,5,0>
+ 3693759551U, // <1,5,4,2>: Cost 4 vext2 <0,4,1,5>, <4,2,6,3>
+ 3642861837U, // <1,5,4,3>: Cost 4 vext1 <3,1,5,4>, <3,1,5,4>
+ 2575092710U, // <1,5,4,4>: Cost 3 vext1 <4,1,5,4>, <4,1,5,4>
+ 1546276150U, // <1,5,4,5>: Cost 2 vext2 <0,4,1,5>, RHS
+ 2759855414U, // <1,5,4,6>: Cost 3 vuzpl <1,3,5,7>, RHS
+ 2713931718U, // <1,5,4,7>: Cost 3 vext3 <4,u,5,1>, <5,4,7,6>
+ 1546276393U, // <1,5,4,u>: Cost 2 vext2 <0,4,1,5>, RHS
+ 2557182054U, // <1,5,5,0>: Cost 3 vext1 <1,1,5,5>, LHS
+ 2557182812U, // <1,5,5,1>: Cost 3 vext1 <1,1,5,5>, <1,1,5,5>
+ 3630925347U, // <1,5,5,2>: Cost 4 vext1 <1,1,5,5>, <2,1,3,5>
+ 4029301675U, // <1,5,5,3>: Cost 4 vzipr <0,4,1,5>, <1,2,5,3>
+ 2557185334U, // <1,5,5,4>: Cost 3 vext1 <1,1,5,5>, RHS
+ 2713931780U, // <1,5,5,5>: Cost 3 vext3 <4,u,5,1>, <5,5,5,5>
+ 2667794530U, // <1,5,5,6>: Cost 3 vext2 <u,4,1,5>, <5,6,7,0>
+ 2713931800U, // <1,5,5,7>: Cost 3 vext3 <4,u,5,1>, <5,5,7,7>
+ 2557187886U, // <1,5,5,u>: Cost 3 vext1 <1,1,5,5>, LHS
+ 2718208036U, // <1,5,6,0>: Cost 3 vext3 <5,6,0,1>, <5,6,0,1>
+ 2620019115U, // <1,5,6,1>: Cost 3 vext2 <0,4,1,5>, <6,1,7,5>
+ 2667794938U, // <1,5,6,2>: Cost 3 vext2 <u,4,1,5>, <6,2,7,3>
+ 3787673666U, // <1,5,6,3>: Cost 4 vext3 <4,u,5,1>, <5,6,3,4>
+ 3693761165U, // <1,5,6,4>: Cost 4 vext2 <0,4,1,5>, <6,4,5,6>
+ 3319279297U, // <1,5,6,5>: Cost 4 vrev <5,1,5,6>
+ 2667795256U, // <1,5,6,6>: Cost 3 vext2 <u,4,1,5>, <6,6,6,6>
+ 2713931874U, // <1,5,6,7>: Cost 3 vext3 <4,u,5,1>, <5,6,7,0>
+ 2713931883U, // <1,5,6,u>: Cost 3 vext3 <4,u,5,1>, <5,6,u,0>
+ 2557198438U, // <1,5,7,0>: Cost 3 vext1 <1,1,5,7>, LHS
+ 2557199156U, // <1,5,7,1>: Cost 3 vext1 <1,1,5,7>, <1,1,1,1>
+ 2569143974U, // <1,5,7,2>: Cost 3 vext1 <3,1,5,7>, <2,3,0,1>
+ 2569144592U, // <1,5,7,3>: Cost 3 vext1 <3,1,5,7>, <3,1,5,7>
+ 2557201718U, // <1,5,7,4>: Cost 3 vext1 <1,1,5,7>, RHS
+ 2713931944U, // <1,5,7,5>: Cost 3 vext3 <4,u,5,1>, <5,7,5,7>
+ 3787673770U, // <1,5,7,6>: Cost 4 vext3 <4,u,5,1>, <5,7,6,0>
+ 2719387828U, // <1,5,7,7>: Cost 3 vext3 <5,7,7,1>, <5,7,7,1>
+ 2557204270U, // <1,5,7,u>: Cost 3 vext1 <1,1,5,7>, LHS
+ 2620020435U, // <1,5,u,0>: Cost 3 vext2 <0,4,1,5>, <u,0,1,2>
+ 1546278702U, // <1,5,u,1>: Cost 2 vext2 <0,4,1,5>, LHS
+ 2620020616U, // <1,5,u,2>: Cost 3 vext2 <0,4,1,5>, <u,2,3,3>
+ 2620020668U, // <1,5,u,3>: Cost 3 vext2 <0,4,1,5>, <u,3,0,1>
+ 1594054682U, // <1,5,u,4>: Cost 2 vext2 <u,4,1,5>, <u,4,1,5>
+ 1546279066U, // <1,5,u,5>: Cost 2 vext2 <0,4,1,5>, RHS
+ 2620020944U, // <1,5,u,6>: Cost 3 vext2 <0,4,1,5>, <u,6,3,7>
+ 2014145846U, // <1,5,u,7>: Cost 2 vtrnr LHS, RHS
+ 2014145847U, // <1,5,u,u>: Cost 2 vtrnr LHS, RHS
+ 3692437504U, // <1,6,0,0>: Cost 4 vext2 <0,2,1,6>, <0,0,0,0>
+ 2618695782U, // <1,6,0,1>: Cost 3 vext2 <0,2,1,6>, LHS
+ 2618695857U, // <1,6,0,2>: Cost 3 vext2 <0,2,1,6>, <0,2,1,6>
+ 3794161970U, // <1,6,0,3>: Cost 4 vext3 <6,0,3,1>, <6,0,3,1>
+ 2620023122U, // <1,6,0,4>: Cost 3 vext2 <0,4,1,6>, <0,4,1,5>
+ 2620686756U, // <1,6,0,5>: Cost 3 vext2 <0,5,1,6>, <0,5,1,6>
+ 2621350389U, // <1,6,0,6>: Cost 3 vext2 <0,6,1,6>, <0,6,1,6>
+ 4028599606U, // <1,6,0,7>: Cost 4 vzipr <0,3,1,0>, RHS
+ 2618696349U, // <1,6,0,u>: Cost 3 vext2 <0,2,1,6>, LHS
+ 3692438262U, // <1,6,1,0>: Cost 4 vext2 <0,2,1,6>, <1,0,3,2>
+ 2625995572U, // <1,6,1,1>: Cost 3 vext2 <1,4,1,6>, <1,1,1,1>
+ 3692438422U, // <1,6,1,2>: Cost 4 vext2 <0,2,1,6>, <1,2,3,0>
+ 3692438488U, // <1,6,1,3>: Cost 4 vext2 <0,2,1,6>, <1,3,1,3>
+ 2625995820U, // <1,6,1,4>: Cost 3 vext2 <1,4,1,6>, <1,4,1,6>
+ 3692438672U, // <1,6,1,5>: Cost 4 vext2 <0,2,1,6>, <1,5,3,7>
+ 3692438720U, // <1,6,1,6>: Cost 4 vext2 <0,2,1,6>, <1,6,0,1>
+ 2958183734U, // <1,6,1,7>: Cost 3 vzipr <0,u,1,1>, RHS
+ 2958183735U, // <1,6,1,u>: Cost 3 vzipr <0,u,1,1>, RHS
+ 2721526201U, // <1,6,2,0>: Cost 3 vext3 <6,2,0,1>, <6,2,0,1>
+ 3692439097U, // <1,6,2,1>: Cost 4 vext2 <0,2,1,6>, <2,1,6,0>
+ 3692439144U, // <1,6,2,2>: Cost 4 vext2 <0,2,1,6>, <2,2,2,2>
+ 3692439206U, // <1,6,2,3>: Cost 4 vext2 <0,2,1,6>, <2,3,0,1>
+ 3636948278U, // <1,6,2,4>: Cost 4 vext1 <2,1,6,2>, RHS
+ 3787674092U, // <1,6,2,5>: Cost 4 vext3 <4,u,5,1>, <6,2,5,7>
+ 2618697658U, // <1,6,2,6>: Cost 3 vext2 <0,2,1,6>, <2,6,3,7>
+ 2970799414U, // <1,6,2,7>: Cost 3 vzipr <3,0,1,2>, RHS
+ 2970799415U, // <1,6,2,u>: Cost 3 vzipr <3,0,1,2>, RHS
+ 2563211366U, // <1,6,3,0>: Cost 3 vext1 <2,1,6,3>, LHS
+ 3699738854U, // <1,6,3,1>: Cost 4 vext2 <1,4,1,6>, <3,1,1,1>
+ 2563212860U, // <1,6,3,2>: Cost 3 vext1 <2,1,6,3>, <2,1,6,3>
+ 3692439964U, // <1,6,3,3>: Cost 4 vext2 <0,2,1,6>, <3,3,3,3>
+ 2563214646U, // <1,6,3,4>: Cost 3 vext1 <2,1,6,3>, RHS
+ 4191820018U, // <1,6,3,5>: Cost 4 vtrnr <5,1,7,3>, <u,6,7,5>
+ 2587103648U, // <1,6,3,6>: Cost 3 vext1 <6,1,6,3>, <6,1,6,3>
+ 3087845306U, // <1,6,3,7>: Cost 3 vtrnr LHS, <2,6,3,7>
+ 3087845307U, // <1,6,3,u>: Cost 3 vtrnr LHS, <2,6,3,u>
+ 3693767570U, // <1,6,4,0>: Cost 4 vext2 <0,4,1,6>, <4,0,5,1>
+ 3693767650U, // <1,6,4,1>: Cost 4 vext2 <0,4,1,6>, <4,1,5,0>
+ 3636962877U, // <1,6,4,2>: Cost 4 vext1 <2,1,6,4>, <2,1,6,4>
+ 3325088134U, // <1,6,4,3>: Cost 4 vrev <6,1,3,4>
+ 3693767898U, // <1,6,4,4>: Cost 4 vext2 <0,4,1,6>, <4,4,5,5>
+ 2618699062U, // <1,6,4,5>: Cost 3 vext2 <0,2,1,6>, RHS
+ 3833670966U, // <1,6,4,6>: Cost 4 vuzpl <1,3,6,7>, RHS
+ 4028632374U, // <1,6,4,7>: Cost 4 vzipr <0,3,1,4>, RHS
+ 2618699305U, // <1,6,4,u>: Cost 3 vext2 <0,2,1,6>, RHS
+ 3693768264U, // <1,6,5,0>: Cost 4 vext2 <0,4,1,6>, <5,0,1,2>
+ 3630998373U, // <1,6,5,1>: Cost 4 vext1 <1,1,6,5>, <1,1,6,5>
+ 3636971070U, // <1,6,5,2>: Cost 4 vext1 <2,1,6,5>, <2,1,6,5>
+ 3642943767U, // <1,6,5,3>: Cost 4 vext1 <3,1,6,5>, <3,1,6,5>
+ 3693768628U, // <1,6,5,4>: Cost 4 vext2 <0,4,1,6>, <5,4,5,6>
+ 3732918276U, // <1,6,5,5>: Cost 4 vext2 <7,0,1,6>, <5,5,5,5>
+ 2620690530U, // <1,6,5,6>: Cost 3 vext2 <0,5,1,6>, <5,6,7,0>
+ 2955562294U, // <1,6,5,7>: Cost 3 vzipr <0,4,1,5>, RHS
+ 2955562295U, // <1,6,5,u>: Cost 3 vzipr <0,4,1,5>, RHS
+ 2724180733U, // <1,6,6,0>: Cost 3 vext3 <6,6,0,1>, <6,6,0,1>
+ 3631006566U, // <1,6,6,1>: Cost 4 vext1 <1,1,6,6>, <1,1,6,6>
+ 3631007674U, // <1,6,6,2>: Cost 4 vext1 <1,1,6,6>, <2,6,3,7>
+ 3692442184U, // <1,6,6,3>: Cost 4 vext2 <0,2,1,6>, <6,3,7,0>
+ 3631009078U, // <1,6,6,4>: Cost 4 vext1 <1,1,6,6>, RHS
+ 3787674416U, // <1,6,6,5>: Cost 4 vext3 <4,u,5,1>, <6,6,5,7>
+ 2713932600U, // <1,6,6,6>: Cost 3 vext3 <4,u,5,1>, <6,6,6,6>
+ 2713932610U, // <1,6,6,7>: Cost 3 vext3 <4,u,5,1>, <6,6,7,7>
+ 2713932619U, // <1,6,6,u>: Cost 3 vext3 <4,u,5,1>, <6,6,u,7>
+ 1651102542U, // <1,6,7,0>: Cost 2 vext3 <6,7,0,1>, <6,7,0,1>
+ 2724918103U, // <1,6,7,1>: Cost 3 vext3 <6,7,1,1>, <6,7,1,1>
+ 2698302306U, // <1,6,7,2>: Cost 3 vext3 <2,3,0,1>, <6,7,2,3>
+ 3642960153U, // <1,6,7,3>: Cost 4 vext1 <3,1,6,7>, <3,1,6,7>
+ 2713932662U, // <1,6,7,4>: Cost 3 vext3 <4,u,5,1>, <6,7,4,5>
+ 2725213051U, // <1,6,7,5>: Cost 3 vext3 <6,7,5,1>, <6,7,5,1>
+ 2724844426U, // <1,6,7,6>: Cost 3 vext3 <6,7,0,1>, <6,7,6,7>
+ 4035956022U, // <1,6,7,7>: Cost 4 vzipr <1,5,1,7>, RHS
+ 1651692438U, // <1,6,7,u>: Cost 2 vext3 <6,7,u,1>, <6,7,u,1>
+ 1651766175U, // <1,6,u,0>: Cost 2 vext3 <6,u,0,1>, <6,u,0,1>
+ 2618701614U, // <1,6,u,1>: Cost 3 vext2 <0,2,1,6>, LHS
+ 3135663508U, // <1,6,u,2>: Cost 3 vtrnr LHS, <4,6,u,2>
+ 3692443580U, // <1,6,u,3>: Cost 4 vext2 <0,2,1,6>, <u,3,0,1>
+ 2713932743U, // <1,6,u,4>: Cost 3 vext3 <4,u,5,1>, <6,u,4,5>
+ 2618701978U, // <1,6,u,5>: Cost 3 vext2 <0,2,1,6>, RHS
+ 2622683344U, // <1,6,u,6>: Cost 3 vext2 <0,u,1,6>, <u,6,3,7>
+ 3087886266U, // <1,6,u,7>: Cost 3 vtrnr LHS, <2,6,3,7>
+ 1652356071U, // <1,6,u,u>: Cost 2 vext3 <6,u,u,1>, <6,u,u,1>
+ 2726171632U, // <1,7,0,0>: Cost 3 vext3 <7,0,0,1>, <7,0,0,1>
+ 2626666598U, // <1,7,0,1>: Cost 3 vext2 <1,5,1,7>, LHS
+ 3695100067U, // <1,7,0,2>: Cost 4 vext2 <0,6,1,7>, <0,2,0,1>
+ 3707044102U, // <1,7,0,3>: Cost 4 vext2 <2,6,1,7>, <0,3,2,1>
+ 2726466580U, // <1,7,0,4>: Cost 3 vext3 <7,0,4,1>, <7,0,4,1>
+ 3654921933U, // <1,7,0,5>: Cost 4 vext1 <5,1,7,0>, <5,1,7,0>
+ 2621358582U, // <1,7,0,6>: Cost 3 vext2 <0,6,1,7>, <0,6,1,7>
+ 2622022215U, // <1,7,0,7>: Cost 3 vext2 <0,7,1,7>, <0,7,1,7>
+ 2626667165U, // <1,7,0,u>: Cost 3 vext2 <1,5,1,7>, LHS
+ 2593128550U, // <1,7,1,0>: Cost 3 vext1 <7,1,7,1>, LHS
+ 2626667316U, // <1,7,1,1>: Cost 3 vext2 <1,5,1,7>, <1,1,1,1>
+ 3700409238U, // <1,7,1,2>: Cost 4 vext2 <1,5,1,7>, <1,2,3,0>
+ 2257294428U, // <1,7,1,3>: Cost 3 vrev <7,1,3,1>
+ 2593131830U, // <1,7,1,4>: Cost 3 vext1 <7,1,7,1>, RHS
+ 2626667646U, // <1,7,1,5>: Cost 3 vext2 <1,5,1,7>, <1,5,1,7>
+ 2627331279U, // <1,7,1,6>: Cost 3 vext2 <1,6,1,7>, <1,6,1,7>
+ 2593133696U, // <1,7,1,7>: Cost 3 vext1 <7,1,7,1>, <7,1,7,1>
+ 2628658545U, // <1,7,1,u>: Cost 3 vext2 <1,u,1,7>, <1,u,1,7>
+ 2587164774U, // <1,7,2,0>: Cost 3 vext1 <6,1,7,2>, LHS
+ 3701073445U, // <1,7,2,1>: Cost 4 vext2 <1,6,1,7>, <2,1,3,7>
+ 3700409960U, // <1,7,2,2>: Cost 4 vext2 <1,5,1,7>, <2,2,2,2>
+ 2638612134U, // <1,7,2,3>: Cost 3 vext2 <3,5,1,7>, <2,3,0,1>
+ 2587168054U, // <1,7,2,4>: Cost 3 vext1 <6,1,7,2>, RHS
+ 3706382167U, // <1,7,2,5>: Cost 4 vext2 <2,5,1,7>, <2,5,1,7>
+ 2587169192U, // <1,7,2,6>: Cost 3 vext1 <6,1,7,2>, <6,1,7,2>
+ 3660911610U, // <1,7,2,7>: Cost 4 vext1 <6,1,7,2>, <7,0,1,2>
+ 2587170606U, // <1,7,2,u>: Cost 3 vext1 <6,1,7,2>, LHS
+ 1507459174U, // <1,7,3,0>: Cost 2 vext1 <5,1,7,3>, LHS
+ 2569257984U, // <1,7,3,1>: Cost 3 vext1 <3,1,7,3>, <1,3,5,7>
+ 2581202536U, // <1,7,3,2>: Cost 3 vext1 <5,1,7,3>, <2,2,2,2>
+ 2569259294U, // <1,7,3,3>: Cost 3 vext1 <3,1,7,3>, <3,1,7,3>
+ 1507462454U, // <1,7,3,4>: Cost 2 vext1 <5,1,7,3>, RHS
+ 1507462864U, // <1,7,3,5>: Cost 2 vext1 <5,1,7,3>, <5,1,7,3>
+ 2581205498U, // <1,7,3,6>: Cost 3 vext1 <5,1,7,3>, <6,2,7,3>
+ 2581206010U, // <1,7,3,7>: Cost 3 vext1 <5,1,7,3>, <7,0,1,2>
+ 1507465006U, // <1,7,3,u>: Cost 2 vext1 <5,1,7,3>, LHS
+ 2728826164U, // <1,7,4,0>: Cost 3 vext3 <7,4,0,1>, <7,4,0,1>
+ 3654951732U, // <1,7,4,1>: Cost 4 vext1 <5,1,7,4>, <1,1,1,1>
+ 3330987094U, // <1,7,4,2>: Cost 4 vrev <7,1,2,4>
+ 3331060831U, // <1,7,4,3>: Cost 4 vrev <7,1,3,4>
+ 3787674971U, // <1,7,4,4>: Cost 4 vext3 <4,u,5,1>, <7,4,4,4>
+ 2626669878U, // <1,7,4,5>: Cost 3 vext2 <1,5,1,7>, RHS
+ 3785979241U, // <1,7,4,6>: Cost 4 vext3 <4,6,0,1>, <7,4,6,0>
+ 3787085176U, // <1,7,4,7>: Cost 4 vext3 <4,7,6,1>, <7,4,7,6>
+ 2626670121U, // <1,7,4,u>: Cost 3 vext2 <1,5,1,7>, RHS
+ 2569273446U, // <1,7,5,0>: Cost 3 vext1 <3,1,7,5>, LHS
+ 2569274368U, // <1,7,5,1>: Cost 3 vext1 <3,1,7,5>, <1,3,5,7>
+ 3643016808U, // <1,7,5,2>: Cost 4 vext1 <3,1,7,5>, <2,2,2,2>
+ 2569275680U, // <1,7,5,3>: Cost 3 vext1 <3,1,7,5>, <3,1,7,5>
+ 2569276726U, // <1,7,5,4>: Cost 3 vext1 <3,1,7,5>, RHS
+ 4102034790U, // <1,7,5,5>: Cost 4 vtrnl <1,3,5,7>, <7,4,5,6>
+ 2651222067U, // <1,7,5,6>: Cost 3 vext2 <5,6,1,7>, <5,6,1,7>
+ 3899378998U, // <1,7,5,7>: Cost 4 vuzpr <1,1,5,7>, RHS
+ 2569279278U, // <1,7,5,u>: Cost 3 vext1 <3,1,7,5>, LHS
+ 2730153430U, // <1,7,6,0>: Cost 3 vext3 <7,6,0,1>, <7,6,0,1>
+ 2724845022U, // <1,7,6,1>: Cost 3 vext3 <6,7,0,1>, <7,6,1,0>
+ 3643025338U, // <1,7,6,2>: Cost 4 vext1 <3,1,7,6>, <2,6,3,7>
+ 3643025697U, // <1,7,6,3>: Cost 4 vext1 <3,1,7,6>, <3,1,7,6>
+ 3643026742U, // <1,7,6,4>: Cost 4 vext1 <3,1,7,6>, RHS
+ 3654971091U, // <1,7,6,5>: Cost 4 vext1 <5,1,7,6>, <5,1,7,6>
+ 3787675153U, // <1,7,6,6>: Cost 4 vext3 <4,u,5,1>, <7,6,6,6>
+ 2724845076U, // <1,7,6,7>: Cost 3 vext3 <6,7,0,1>, <7,6,7,0>
+ 2725508637U, // <1,7,6,u>: Cost 3 vext3 <6,u,0,1>, <7,6,u,0>
+ 2730817063U, // <1,7,7,0>: Cost 3 vext3 <7,7,0,1>, <7,7,0,1>
+ 3631088436U, // <1,7,7,1>: Cost 4 vext1 <1,1,7,7>, <1,1,1,1>
+ 3660949158U, // <1,7,7,2>: Cost 4 vext1 <6,1,7,7>, <2,3,0,1>
+ 3801904705U, // <1,7,7,3>: Cost 4 vext3 <7,3,0,1>, <7,7,3,0>
+ 3631090998U, // <1,7,7,4>: Cost 4 vext1 <1,1,7,7>, RHS
+ 2662503828U, // <1,7,7,5>: Cost 3 vext2 <7,5,1,7>, <7,5,1,7>
+ 3660951981U, // <1,7,7,6>: Cost 4 vext1 <6,1,7,7>, <6,1,7,7>
+ 2713933420U, // <1,7,7,7>: Cost 3 vext3 <4,u,5,1>, <7,7,7,7>
+ 2731406959U, // <1,7,7,u>: Cost 3 vext3 <7,7,u,1>, <7,7,u,1>
+ 1507500134U, // <1,7,u,0>: Cost 2 vext1 <5,1,7,u>, LHS
+ 2626672430U, // <1,7,u,1>: Cost 3 vext2 <1,5,1,7>, LHS
+ 2581243496U, // <1,7,u,2>: Cost 3 vext1 <5,1,7,u>, <2,2,2,2>
+ 2569300259U, // <1,7,u,3>: Cost 3 vext1 <3,1,7,u>, <3,1,7,u>
+ 1507503414U, // <1,7,u,4>: Cost 2 vext1 <5,1,7,u>, RHS
+ 1507503829U, // <1,7,u,5>: Cost 2 vext1 <5,1,7,u>, <5,1,7,u>
+ 2581246458U, // <1,7,u,6>: Cost 3 vext1 <5,1,7,u>, <6,2,7,3>
+ 2581246970U, // <1,7,u,7>: Cost 3 vext1 <5,1,7,u>, <7,0,1,2>
+ 1507505966U, // <1,7,u,u>: Cost 2 vext1 <5,1,7,u>, LHS
+ 1543643153U, // <1,u,0,0>: Cost 2 vext2 <0,0,1,u>, <0,0,1,u>
+ 1546297446U, // <1,u,0,1>: Cost 2 vext2 <0,4,1,u>, LHS
+ 2819448852U, // <1,u,0,2>: Cost 3 vuzpr LHS, <0,0,2,2>
+ 2619375876U, // <1,u,0,3>: Cost 3 vext2 <0,3,1,u>, <0,3,1,u>
+ 1546297685U, // <1,u,0,4>: Cost 2 vext2 <0,4,1,u>, <0,4,1,u>
+ 1658771190U, // <1,u,0,5>: Cost 2 vext3 <u,0,5,1>, <u,0,5,1>
+ 2736789248U, // <1,u,0,6>: Cost 3 vext3 <u,7,0,1>, <u,0,6,2>
+ 2659189376U, // <1,u,0,7>: Cost 3 vext2 <7,0,1,u>, <0,7,u,1>
+ 1546298013U, // <1,u,0,u>: Cost 2 vext2 <0,4,1,u>, LHS
+ 1483112550U, // <1,u,1,0>: Cost 2 vext1 <1,1,1,1>, LHS
+ 202162278U, // <1,u,1,1>: Cost 1 vdup1 LHS
+ 1616009006U, // <1,u,1,2>: Cost 2 vext3 <0,u,1,1>, LHS
+ 1745707110U, // <1,u,1,3>: Cost 2 vuzpr LHS, LHS
+ 1483115830U, // <1,u,1,4>: Cost 2 vext1 <1,1,1,1>, RHS
+ 2620040336U, // <1,u,1,5>: Cost 3 vext2 <0,4,1,u>, <1,5,3,7>
+ 3026622618U, // <1,u,1,6>: Cost 3 vtrnl <1,1,1,1>, RHS
+ 2958183752U, // <1,u,1,7>: Cost 3 vzipr <0,u,1,1>, RHS
+ 202162278U, // <1,u,1,u>: Cost 1 vdup1 LHS
+ 2819449750U, // <1,u,2,0>: Cost 3 vuzpr LHS, <1,2,3,0>
+ 2893207342U, // <1,u,2,1>: Cost 3 vzipl <1,2,3,0>, LHS
+ 2819448996U, // <1,u,2,2>: Cost 3 vuzpr LHS, <0,2,0,2>
+ 2819450482U, // <1,u,2,3>: Cost 3 vuzpr LHS, <2,2,3,3>
+ 2819449754U, // <1,u,2,4>: Cost 3 vuzpr LHS, <1,2,3,4>
+ 2893207706U, // <1,u,2,5>: Cost 3 vzipl <1,2,3,0>, RHS
+ 2819449036U, // <1,u,2,6>: Cost 3 vuzpr LHS, <0,2,4,6>
+ 2970799432U, // <1,u,2,7>: Cost 3 vzipr <3,0,1,2>, RHS
+ 2819449002U, // <1,u,2,u>: Cost 3 vuzpr LHS, <0,2,0,u>
+ 403931292U, // <1,u,3,0>: Cost 1 vext1 LHS, LHS
+ 1477673718U, // <1,u,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 115726126U, // <1,u,3,2>: Cost 1 vrev LHS
+ 2014102173U, // <1,u,3,3>: Cost 2 vtrnr LHS, LHS
+ 403934518U, // <1,u,3,4>: Cost 1 vext1 LHS, RHS
+ 1507536601U, // <1,u,3,5>: Cost 2 vext1 <5,1,u,3>, <5,1,u,3>
+ 1525453306U, // <1,u,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 2014105129U, // <1,u,3,7>: Cost 2 vtrnr LHS, RHS
+ 403937070U, // <1,u,3,u>: Cost 1 vext1 LHS, LHS
+ 2620042157U, // <1,u,4,0>: Cost 3 vext2 <0,4,1,u>, <4,0,u,1>
+ 2620042237U, // <1,u,4,1>: Cost 3 vext2 <0,4,1,u>, <4,1,u,0>
+ 2263217967U, // <1,u,4,2>: Cost 3 vrev <u,1,2,4>
+ 2569341224U, // <1,u,4,3>: Cost 3 vext1 <3,1,u,4>, <3,1,u,4>
+ 2569342262U, // <1,u,4,4>: Cost 3 vext1 <3,1,u,4>, RHS
+ 1546300726U, // <1,u,4,5>: Cost 2 vext2 <0,4,1,u>, RHS
+ 2819449180U, // <1,u,4,6>: Cost 3 vuzpr LHS, <0,4,2,6>
+ 2724845649U, // <1,u,4,7>: Cost 3 vext3 <6,7,0,1>, <u,4,7,6>
+ 1546300969U, // <1,u,4,u>: Cost 2 vext2 <0,4,1,u>, RHS
+ 2551431270U, // <1,u,5,0>: Cost 3 vext1 <0,1,u,5>, LHS
+ 2551432192U, // <1,u,5,1>: Cost 3 vext1 <0,1,u,5>, <1,3,5,7>
+ 3028293422U, // <1,u,5,2>: Cost 3 vtrnl <1,3,5,7>, LHS
+ 2955559068U, // <1,u,5,3>: Cost 3 vzipr <0,4,1,5>, LHS
+ 2551434550U, // <1,u,5,4>: Cost 3 vext1 <0,1,u,5>, RHS
+ 2895255706U, // <1,u,5,5>: Cost 3 vzipl <1,5,3,7>, RHS
+ 1616009370U, // <1,u,5,6>: Cost 2 vext3 <0,u,1,1>, RHS
+ 1745710390U, // <1,u,5,7>: Cost 2 vuzpr LHS, RHS
+ 1745710391U, // <1,u,5,u>: Cost 2 vuzpr LHS, RHS
+ 2653221159U, // <1,u,6,0>: Cost 3 vext2 <6,0,1,u>, <6,0,1,u>
+ 2725509303U, // <1,u,6,1>: Cost 3 vext3 <6,u,0,1>, <u,6,1,0>
+ 2659193338U, // <1,u,6,2>: Cost 3 vext2 <7,0,1,u>, <6,2,7,3>
+ 2689751248U, // <1,u,6,3>: Cost 3 vext3 <0,u,1,1>, <u,6,3,7>
+ 2867228774U, // <1,u,6,4>: Cost 3 vuzpr LHS, <5,6,7,4>
+ 3764820194U, // <1,u,6,5>: Cost 4 vext3 <1,1,1,1>, <u,6,5,7>
+ 2657202957U, // <1,u,6,6>: Cost 3 vext2 <6,6,1,u>, <6,6,1,u>
+ 2819450810U, // <1,u,6,7>: Cost 3 vuzpr LHS, <2,6,3,7>
+ 2819450811U, // <1,u,6,u>: Cost 3 vuzpr LHS, <2,6,3,u>
+ 1585452032U, // <1,u,7,0>: Cost 2 vext2 <7,0,1,u>, <7,0,1,u>
+ 2557420340U, // <1,u,7,1>: Cost 3 vext1 <1,1,u,7>, <1,1,1,1>
+ 2569365158U, // <1,u,7,2>: Cost 3 vext1 <3,1,u,7>, <2,3,0,1>
+ 2569365803U, // <1,u,7,3>: Cost 3 vext1 <3,1,u,7>, <3,1,u,7>
+ 2557422902U, // <1,u,7,4>: Cost 3 vext1 <1,1,u,7>, RHS
+ 2662512021U, // <1,u,7,5>: Cost 3 vext2 <7,5,1,u>, <7,5,1,u>
+ 2724845884U, // <1,u,7,6>: Cost 3 vext3 <6,7,0,1>, <u,7,6,7>
+ 2659194476U, // <1,u,7,7>: Cost 3 vext2 <7,0,1,u>, <7,7,7,7>
+ 1590761096U, // <1,u,7,u>: Cost 2 vext2 <7,u,1,u>, <7,u,1,u>
+ 403972257U, // <1,u,u,0>: Cost 1 vext1 LHS, LHS
+ 202162278U, // <1,u,u,1>: Cost 1 vdup1 LHS
+ 115767091U, // <1,u,u,2>: Cost 1 vrev LHS
+ 1745707677U, // <1,u,u,3>: Cost 2 vuzpr LHS, LHS
+ 403975478U, // <1,u,u,4>: Cost 1 vext1 LHS, RHS
+ 1546303642U, // <1,u,u,5>: Cost 2 vext2 <0,4,1,u>, RHS
+ 1616009613U, // <1,u,u,6>: Cost 2 vext3 <0,u,1,1>, RHS
+ 1745710633U, // <1,u,u,7>: Cost 2 vuzpr LHS, RHS
+ 403978030U, // <1,u,u,u>: Cost 1 vext1 LHS, LHS
+ 2551463936U, // <2,0,0,0>: Cost 3 vext1 <0,2,0,0>, <0,0,0,0>
+ 2685698058U, // <2,0,0,1>: Cost 3 vext3 <0,2,0,2>, <0,0,1,1>
+ 1610776596U, // <2,0,0,2>: Cost 2 vext3 <0,0,2,2>, <0,0,2,2>
+ 2619384069U, // <2,0,0,3>: Cost 3 vext2 <0,3,2,0>, <0,3,2,0>
+ 2551467318U, // <2,0,0,4>: Cost 3 vext1 <0,2,0,0>, RHS
+ 3899836596U, // <2,0,0,5>: Cost 4 vuzpr <1,2,3,0>, <3,0,4,5>
+ 2621374968U, // <2,0,0,6>: Cost 3 vext2 <0,6,2,0>, <0,6,2,0>
+ 4168271334U, // <2,0,0,7>: Cost 4 vtrnr <1,2,3,0>, <2,0,5,7>
+ 1611219018U, // <2,0,0,u>: Cost 2 vext3 <0,0,u,2>, <0,0,u,2>
+ 2551472138U, // <2,0,1,0>: Cost 3 vext1 <0,2,0,1>, <0,0,1,1>
+ 2690564186U, // <2,0,1,1>: Cost 3 vext3 <1,0,3,2>, <0,1,1,0>
+ 1611956326U, // <2,0,1,2>: Cost 2 vext3 <0,2,0,2>, LHS
+ 2826092646U, // <2,0,1,3>: Cost 3 vuzpr <1,2,3,0>, LHS
+ 2551475510U, // <2,0,1,4>: Cost 3 vext1 <0,2,0,1>, RHS
+ 3692463248U, // <2,0,1,5>: Cost 4 vext2 <0,2,2,0>, <1,5,3,7>
+ 2587308473U, // <2,0,1,6>: Cost 3 vext1 <6,2,0,1>, <6,2,0,1>
+ 3661050874U, // <2,0,1,7>: Cost 4 vext1 <6,2,0,1>, <7,0,1,2>
+ 1611956380U, // <2,0,1,u>: Cost 2 vext3 <0,2,0,2>, LHS
+ 1477738598U, // <2,0,2,0>: Cost 2 vext1 <0,2,0,2>, LHS
+ 2551481078U, // <2,0,2,1>: Cost 3 vext1 <0,2,0,2>, <1,0,3,2>
+ 2551481796U, // <2,0,2,2>: Cost 3 vext1 <0,2,0,2>, <2,0,2,0>
+ 2551482518U, // <2,0,2,3>: Cost 3 vext1 <0,2,0,2>, <3,0,1,2>
+ 1477741878U, // <2,0,2,4>: Cost 2 vext1 <0,2,0,2>, RHS
+ 2551484112U, // <2,0,2,5>: Cost 3 vext1 <0,2,0,2>, <5,1,7,3>
+ 2551484759U, // <2,0,2,6>: Cost 3 vext1 <0,2,0,2>, <6,0,7,2>
+ 2551485434U, // <2,0,2,7>: Cost 3 vext1 <0,2,0,2>, <7,0,1,2>
+ 1477744430U, // <2,0,2,u>: Cost 2 vext1 <0,2,0,2>, LHS
+ 2953625600U, // <2,0,3,0>: Cost 3 vzipr LHS, <0,0,0,0>
+ 2953627302U, // <2,0,3,1>: Cost 3 vzipr LHS, <2,3,0,1>
+ 2953625764U, // <2,0,3,2>: Cost 3 vzipr LHS, <0,2,0,2>
+ 4027369695U, // <2,0,3,3>: Cost 4 vzipr LHS, <3,1,0,3>
+ 3625233718U, // <2,0,3,4>: Cost 4 vext1 <0,2,0,3>, RHS
+ 3899836110U, // <2,0,3,5>: Cost 4 vuzpr <1,2,3,0>, <2,3,4,5>
+ 4032012618U, // <2,0,3,6>: Cost 4 vzipr LHS, <0,4,0,6>
+ 3899835392U, // <2,0,3,7>: Cost 4 vuzpr <1,2,3,0>, <1,3,5,7>
+ 2953625770U, // <2,0,3,u>: Cost 3 vzipr LHS, <0,2,0,u>
+ 2551496806U, // <2,0,4,0>: Cost 3 vext1 <0,2,0,4>, LHS
+ 2685698386U, // <2,0,4,1>: Cost 3 vext3 <0,2,0,2>, <0,4,1,5>
+ 2685698396U, // <2,0,4,2>: Cost 3 vext3 <0,2,0,2>, <0,4,2,6>
+ 3625240726U, // <2,0,4,3>: Cost 4 vext1 <0,2,0,4>, <3,0,1,2>
+ 2551500086U, // <2,0,4,4>: Cost 3 vext1 <0,2,0,4>, RHS
+ 2618723638U, // <2,0,4,5>: Cost 3 vext2 <0,2,2,0>, RHS
+ 2765409590U, // <2,0,4,6>: Cost 3 vuzpl <2,3,0,1>, RHS
+ 3799990664U, // <2,0,4,7>: Cost 4 vext3 <7,0,1,2>, <0,4,7,5>
+ 2685698450U, // <2,0,4,u>: Cost 3 vext3 <0,2,0,2>, <0,4,u,6>
+ 3625246822U, // <2,0,5,0>: Cost 4 vext1 <0,2,0,5>, LHS
+ 3289776304U, // <2,0,5,1>: Cost 4 vrev <0,2,1,5>
+ 2690564526U, // <2,0,5,2>: Cost 3 vext3 <1,0,3,2>, <0,5,2,7>
+ 3289923778U, // <2,0,5,3>: Cost 4 vrev <0,2,3,5>
+ 2216255691U, // <2,0,5,4>: Cost 3 vrev <0,2,4,5>
+ 3726307332U, // <2,0,5,5>: Cost 4 vext2 <5,u,2,0>, <5,5,5,5>
+ 3726307426U, // <2,0,5,6>: Cost 4 vext2 <5,u,2,0>, <5,6,7,0>
+ 2826095926U, // <2,0,5,7>: Cost 3 vuzpr <1,2,3,0>, RHS
+ 2216550639U, // <2,0,5,u>: Cost 3 vrev <0,2,u,5>
+ 4162420736U, // <2,0,6,0>: Cost 4 vtrnr <0,2,4,6>, <0,0,0,0>
+ 2901885030U, // <2,0,6,1>: Cost 3 vzipl <2,6,3,7>, LHS
+ 2685698559U, // <2,0,6,2>: Cost 3 vext3 <0,2,0,2>, <0,6,2,7>
+ 3643173171U, // <2,0,6,3>: Cost 4 vext1 <3,2,0,6>, <3,2,0,6>
+ 2216263884U, // <2,0,6,4>: Cost 3 vrev <0,2,4,6>
+ 3730289341U, // <2,0,6,5>: Cost 4 vext2 <6,5,2,0>, <6,5,2,0>
+ 3726308152U, // <2,0,6,6>: Cost 4 vext2 <5,u,2,0>, <6,6,6,6>
+ 3899836346U, // <2,0,6,7>: Cost 4 vuzpr <1,2,3,0>, <2,6,3,7>
+ 2216558832U, // <2,0,6,u>: Cost 3 vrev <0,2,u,6>
+ 2659202049U, // <2,0,7,0>: Cost 3 vext2 <7,0,2,0>, <7,0,2,0>
+ 3726308437U, // <2,0,7,1>: Cost 4 vext2 <5,u,2,0>, <7,1,2,3>
+ 2726249034U, // <2,0,7,2>: Cost 3 vext3 <7,0,1,2>, <0,7,2,1>
+ 3734934772U, // <2,0,7,3>: Cost 4 vext2 <7,3,2,0>, <7,3,2,0>
+ 3726308710U, // <2,0,7,4>: Cost 4 vext2 <5,u,2,0>, <7,4,5,6>
+ 3726308814U, // <2,0,7,5>: Cost 4 vext2 <5,u,2,0>, <7,5,u,2>
+ 3736925671U, // <2,0,7,6>: Cost 4 vext2 <7,6,2,0>, <7,6,2,0>
+ 3726308972U, // <2,0,7,7>: Cost 4 vext2 <5,u,2,0>, <7,7,7,7>
+ 2659202049U, // <2,0,7,u>: Cost 3 vext2 <7,0,2,0>, <7,0,2,0>
+ 1477787750U, // <2,0,u,0>: Cost 2 vext1 <0,2,0,u>, LHS
+ 2953668262U, // <2,0,u,1>: Cost 3 vzipr LHS, <2,3,0,1>
+ 1611956893U, // <2,0,u,2>: Cost 2 vext3 <0,2,0,2>, LHS
+ 2551531670U, // <2,0,u,3>: Cost 3 vext1 <0,2,0,u>, <3,0,1,2>
+ 1477791030U, // <2,0,u,4>: Cost 2 vext1 <0,2,0,u>, RHS
+ 2618726554U, // <2,0,u,5>: Cost 3 vext2 <0,2,2,0>, RHS
+ 2765412506U, // <2,0,u,6>: Cost 3 vuzpl <2,3,0,1>, RHS
+ 2826096169U, // <2,0,u,7>: Cost 3 vuzpr <1,2,3,0>, RHS
+ 1611956947U, // <2,0,u,u>: Cost 2 vext3 <0,2,0,2>, LHS
+ 2569453670U, // <2,1,0,0>: Cost 3 vext1 <3,2,1,0>, LHS
+ 2619392102U, // <2,1,0,1>: Cost 3 vext2 <0,3,2,1>, LHS
+ 3759440619U, // <2,1,0,2>: Cost 4 vext3 <0,2,0,2>, <1,0,2,0>
+ 1616823030U, // <2,1,0,3>: Cost 2 vext3 <1,0,3,2>, <1,0,3,2>
+ 2569456950U, // <2,1,0,4>: Cost 3 vext1 <3,2,1,0>, RHS
+ 2690712328U, // <2,1,0,5>: Cost 3 vext3 <1,0,5,2>, <1,0,5,2>
+ 3661115841U, // <2,1,0,6>: Cost 4 vext1 <6,2,1,0>, <6,2,1,0>
+ 2622046794U, // <2,1,0,7>: Cost 3 vext2 <0,7,2,1>, <0,7,2,1>
+ 1617191715U, // <2,1,0,u>: Cost 2 vext3 <1,0,u,2>, <1,0,u,2>
+ 2551545958U, // <2,1,1,0>: Cost 3 vext1 <0,2,1,1>, LHS
+ 2685698868U, // <2,1,1,1>: Cost 3 vext3 <0,2,0,2>, <1,1,1,1>
+ 2628682646U, // <2,1,1,2>: Cost 3 vext2 <1,u,2,1>, <1,2,3,0>
+ 2685698888U, // <2,1,1,3>: Cost 3 vext3 <0,2,0,2>, <1,1,3,3>
+ 2551549238U, // <2,1,1,4>: Cost 3 vext1 <0,2,1,1>, RHS
+ 3693134992U, // <2,1,1,5>: Cost 4 vext2 <0,3,2,1>, <1,5,3,7>
+ 3661124034U, // <2,1,1,6>: Cost 4 vext1 <6,2,1,1>, <6,2,1,1>
+ 3625292794U, // <2,1,1,7>: Cost 4 vext1 <0,2,1,1>, <7,0,1,2>
+ 2685698933U, // <2,1,1,u>: Cost 3 vext3 <0,2,0,2>, <1,1,u,3>
+ 2551554150U, // <2,1,2,0>: Cost 3 vext1 <0,2,1,2>, LHS
+ 3893649571U, // <2,1,2,1>: Cost 4 vuzpr <0,2,0,1>, <0,2,0,1>
+ 2551555688U, // <2,1,2,2>: Cost 3 vext1 <0,2,1,2>, <2,2,2,2>
+ 2685698966U, // <2,1,2,3>: Cost 3 vext3 <0,2,0,2>, <1,2,3,0>
+ 2551557430U, // <2,1,2,4>: Cost 3 vext1 <0,2,1,2>, RHS
+ 3763422123U, // <2,1,2,5>: Cost 4 vext3 <0,u,0,2>, <1,2,5,3>
+ 3693135802U, // <2,1,2,6>: Cost 4 vext2 <0,3,2,1>, <2,6,3,7>
+ 2726249402U, // <2,1,2,7>: Cost 3 vext3 <7,0,1,2>, <1,2,7,0>
+ 2685699011U, // <2,1,2,u>: Cost 3 vext3 <0,2,0,2>, <1,2,u,0>
+ 2551562342U, // <2,1,3,0>: Cost 3 vext1 <0,2,1,3>, LHS
+ 2953625610U, // <2,1,3,1>: Cost 3 vzipr LHS, <0,0,1,1>
+ 2953627798U, // <2,1,3,2>: Cost 3 vzipr LHS, <3,0,1,2>
+ 2953626584U, // <2,1,3,3>: Cost 3 vzipr LHS, <1,3,1,3>
+ 2551565622U, // <2,1,3,4>: Cost 3 vext1 <0,2,1,3>, RHS
+ 2953625938U, // <2,1,3,5>: Cost 3 vzipr LHS, <0,4,1,5>
+ 2587398596U, // <2,1,3,6>: Cost 3 vext1 <6,2,1,3>, <6,2,1,3>
+ 4032013519U, // <2,1,3,7>: Cost 4 vzipr LHS, <1,6,1,7>
+ 2953625617U, // <2,1,3,u>: Cost 3 vzipr LHS, <0,0,1,u>
+ 2690565154U, // <2,1,4,0>: Cost 3 vext3 <1,0,3,2>, <1,4,0,5>
+ 3625313270U, // <2,1,4,1>: Cost 4 vext1 <0,2,1,4>, <1,3,4,6>
+ 3771532340U, // <2,1,4,2>: Cost 4 vext3 <2,2,2,2>, <1,4,2,5>
+ 1148404634U, // <2,1,4,3>: Cost 2 vrev <1,2,3,4>
+ 3625315638U, // <2,1,4,4>: Cost 4 vext1 <0,2,1,4>, RHS
+ 2619395382U, // <2,1,4,5>: Cost 3 vext2 <0,3,2,1>, RHS
+ 3837242678U, // <2,1,4,6>: Cost 4 vuzpl <2,0,1,2>, RHS
+ 3799991394U, // <2,1,4,7>: Cost 4 vext3 <7,0,1,2>, <1,4,7,6>
+ 1148773319U, // <2,1,4,u>: Cost 2 vrev <1,2,u,4>
+ 2551578726U, // <2,1,5,0>: Cost 3 vext1 <0,2,1,5>, LHS
+ 2551579648U, // <2,1,5,1>: Cost 3 vext1 <0,2,1,5>, <1,3,5,7>
+ 3625321952U, // <2,1,5,2>: Cost 4 vext1 <0,2,1,5>, <2,0,5,1>
+ 2685699216U, // <2,1,5,3>: Cost 3 vext3 <0,2,0,2>, <1,5,3,7>
+ 2551582006U, // <2,1,5,4>: Cost 3 vext1 <0,2,1,5>, RHS
+ 3740913668U, // <2,1,5,5>: Cost 4 vext2 <u,3,2,1>, <5,5,5,5>
+ 3661156806U, // <2,1,5,6>: Cost 4 vext1 <6,2,1,5>, <6,2,1,5>
+ 3893652790U, // <2,1,5,7>: Cost 4 vuzpr <0,2,0,1>, RHS
+ 2685699261U, // <2,1,5,u>: Cost 3 vext3 <0,2,0,2>, <1,5,u,7>
+ 2551586918U, // <2,1,6,0>: Cost 3 vext1 <0,2,1,6>, LHS
+ 3625329398U, // <2,1,6,1>: Cost 4 vext1 <0,2,1,6>, <1,0,3,2>
+ 2551588794U, // <2,1,6,2>: Cost 3 vext1 <0,2,1,6>, <2,6,3,7>
+ 3088679014U, // <2,1,6,3>: Cost 3 vtrnr <0,2,4,6>, LHS
+ 2551590198U, // <2,1,6,4>: Cost 3 vext1 <0,2,1,6>, RHS
+ 4029382994U, // <2,1,6,5>: Cost 4 vzipr <0,4,2,6>, <0,4,1,5>
+ 3625333560U, // <2,1,6,6>: Cost 4 vext1 <0,2,1,6>, <6,6,6,6>
+ 3731624800U, // <2,1,6,7>: Cost 4 vext2 <6,7,2,1>, <6,7,2,1>
+ 2551592750U, // <2,1,6,u>: Cost 3 vext1 <0,2,1,6>, LHS
+ 2622051322U, // <2,1,7,0>: Cost 3 vext2 <0,7,2,1>, <7,0,1,2>
+ 3733615699U, // <2,1,7,1>: Cost 4 vext2 <7,1,2,1>, <7,1,2,1>
+ 3795125538U, // <2,1,7,2>: Cost 4 vext3 <6,1,7,2>, <1,7,2,0>
+ 2222171037U, // <2,1,7,3>: Cost 3 vrev <1,2,3,7>
+ 3740915046U, // <2,1,7,4>: Cost 4 vext2 <u,3,2,1>, <7,4,5,6>
+ 3296060335U, // <2,1,7,5>: Cost 4 vrev <1,2,5,7>
+ 3736933864U, // <2,1,7,6>: Cost 4 vext2 <7,6,2,1>, <7,6,2,1>
+ 3805300055U, // <2,1,7,7>: Cost 4 vext3 <7,u,1,2>, <1,7,7,u>
+ 2669827714U, // <2,1,7,u>: Cost 3 vext2 <u,7,2,1>, <7,u,1,2>
+ 2551603302U, // <2,1,u,0>: Cost 3 vext1 <0,2,1,u>, LHS
+ 2953666570U, // <2,1,u,1>: Cost 3 vzipr LHS, <0,0,1,1>
+ 2953668758U, // <2,1,u,2>: Cost 3 vzipr LHS, <3,0,1,2>
+ 1148437406U, // <2,1,u,3>: Cost 2 vrev <1,2,3,u>
+ 2551606582U, // <2,1,u,4>: Cost 3 vext1 <0,2,1,u>, RHS
+ 2953666898U, // <2,1,u,5>: Cost 3 vzipr LHS, <0,4,1,5>
+ 2587398596U, // <2,1,u,6>: Cost 3 vext1 <6,2,1,3>, <6,2,1,3>
+ 2669828370U, // <2,1,u,7>: Cost 3 vext2 <u,7,2,1>, <u,7,2,1>
+ 1148806091U, // <2,1,u,u>: Cost 2 vrev <1,2,u,u>
+ 1543667732U, // <2,2,0,0>: Cost 2 vext2 <0,0,2,2>, <0,0,2,2>
+ 1548976230U, // <2,2,0,1>: Cost 2 vext2 <0,u,2,2>, LHS
+ 2685699524U, // <2,2,0,2>: Cost 3 vext3 <0,2,0,2>, <2,0,2,0>
+ 2685699535U, // <2,2,0,3>: Cost 3 vext3 <0,2,0,2>, <2,0,3,2>
+ 2551614774U, // <2,2,0,4>: Cost 3 vext1 <0,2,2,0>, RHS
+ 3704422830U, // <2,2,0,5>: Cost 4 vext2 <2,2,2,2>, <0,5,2,7>
+ 3893657642U, // <2,2,0,6>: Cost 4 vuzpr <0,2,0,2>, <0,0,4,6>
+ 3770574323U, // <2,2,0,7>: Cost 4 vext3 <2,0,7,2>, <2,0,7,2>
+ 1548976796U, // <2,2,0,u>: Cost 2 vext2 <0,u,2,2>, <0,u,2,2>
+ 2622718710U, // <2,2,1,0>: Cost 3 vext2 <0,u,2,2>, <1,0,3,2>
+ 2622718772U, // <2,2,1,1>: Cost 3 vext2 <0,u,2,2>, <1,1,1,1>
+ 2622718870U, // <2,2,1,2>: Cost 3 vext2 <0,u,2,2>, <1,2,3,0>
+ 2819915878U, // <2,2,1,3>: Cost 3 vuzpr <0,2,0,2>, LHS
+ 3625364790U, // <2,2,1,4>: Cost 4 vext1 <0,2,2,1>, RHS
+ 2622719120U, // <2,2,1,5>: Cost 3 vext2 <0,u,2,2>, <1,5,3,7>
+ 3760031292U, // <2,2,1,6>: Cost 4 vext3 <0,2,u,2>, <2,1,6,3>
+ 3667170468U, // <2,2,1,7>: Cost 4 vext1 <7,2,2,1>, <7,2,2,1>
+ 2819915883U, // <2,2,1,u>: Cost 3 vuzpr <0,2,0,2>, LHS
+ 1489829990U, // <2,2,2,0>: Cost 2 vext1 <2,2,2,2>, LHS
+ 2563572470U, // <2,2,2,1>: Cost 3 vext1 <2,2,2,2>, <1,0,3,2>
+ 269271142U, // <2,2,2,2>: Cost 1 vdup2 LHS
+ 2685699698U, // <2,2,2,3>: Cost 3 vext3 <0,2,0,2>, <2,2,3,3>
+ 1489833270U, // <2,2,2,4>: Cost 2 vext1 <2,2,2,2>, RHS
+ 2685699720U, // <2,2,2,5>: Cost 3 vext3 <0,2,0,2>, <2,2,5,7>
+ 2622719930U, // <2,2,2,6>: Cost 3 vext2 <0,u,2,2>, <2,6,3,7>
+ 2593436837U, // <2,2,2,7>: Cost 3 vext1 <7,2,2,2>, <7,2,2,2>
+ 269271142U, // <2,2,2,u>: Cost 1 vdup2 LHS
+ 2685699750U, // <2,2,3,0>: Cost 3 vext3 <0,2,0,2>, <2,3,0,1>
+ 2690565806U, // <2,2,3,1>: Cost 3 vext3 <1,0,3,2>, <2,3,1,0>
+ 2953627240U, // <2,2,3,2>: Cost 3 vzipr LHS, <2,2,2,2>
+ 1879883878U, // <2,2,3,3>: Cost 2 vzipr LHS, LHS
+ 2685699790U, // <2,2,3,4>: Cost 3 vext3 <0,2,0,2>, <2,3,4,5>
+ 3893659342U, // <2,2,3,5>: Cost 4 vuzpr <0,2,0,2>, <2,3,4,5>
+ 2958270812U, // <2,2,3,6>: Cost 3 vzipr LHS, <0,4,2,6>
+ 2593445030U, // <2,2,3,7>: Cost 3 vext1 <7,2,2,3>, <7,2,2,3>
+ 1879883883U, // <2,2,3,u>: Cost 2 vzipr LHS, LHS
+ 2551644262U, // <2,2,4,0>: Cost 3 vext1 <0,2,2,4>, LHS
+ 3625386742U, // <2,2,4,1>: Cost 4 vext1 <0,2,2,4>, <1,0,3,2>
+ 2551645902U, // <2,2,4,2>: Cost 3 vext1 <0,2,2,4>, <2,3,4,5>
+ 3759441686U, // <2,2,4,3>: Cost 4 vext3 <0,2,0,2>, <2,4,3,5>
+ 2551647542U, // <2,2,4,4>: Cost 3 vext1 <0,2,2,4>, RHS
+ 1548979510U, // <2,2,4,5>: Cost 2 vext2 <0,u,2,2>, RHS
+ 2764901686U, // <2,2,4,6>: Cost 3 vuzpl <2,2,2,2>, RHS
+ 3667195047U, // <2,2,4,7>: Cost 4 vext1 <7,2,2,4>, <7,2,2,4>
+ 1548979753U, // <2,2,4,u>: Cost 2 vext2 <0,u,2,2>, RHS
+ 3696463432U, // <2,2,5,0>: Cost 4 vext2 <0,u,2,2>, <5,0,1,2>
+ 2617413328U, // <2,2,5,1>: Cost 3 vext2 <0,0,2,2>, <5,1,7,3>
+ 2685699936U, // <2,2,5,2>: Cost 3 vext3 <0,2,0,2>, <2,5,2,7>
+ 4027383910U, // <2,2,5,3>: Cost 4 vzipr <0,1,2,5>, LHS
+ 2228201085U, // <2,2,5,4>: Cost 3 vrev <2,2,4,5>
+ 2617413636U, // <2,2,5,5>: Cost 3 vext2 <0,0,2,2>, <5,5,5,5>
+ 2617413730U, // <2,2,5,6>: Cost 3 vext2 <0,0,2,2>, <5,6,7,0>
+ 2819919158U, // <2,2,5,7>: Cost 3 vuzpr <0,2,0,2>, RHS
+ 2819919159U, // <2,2,5,u>: Cost 3 vuzpr <0,2,0,2>, RHS
+ 3625402554U, // <2,2,6,0>: Cost 4 vext1 <0,2,2,6>, <0,2,2,6>
+ 3760031652U, // <2,2,6,1>: Cost 4 vext3 <0,2,u,2>, <2,6,1,3>
+ 2617414138U, // <2,2,6,2>: Cost 3 vext2 <0,0,2,2>, <6,2,7,3>
+ 2685700026U, // <2,2,6,3>: Cost 3 vext3 <0,2,0,2>, <2,6,3,7>
+ 3625405750U, // <2,2,6,4>: Cost 4 vext1 <0,2,2,6>, RHS
+ 3760031692U, // <2,2,6,5>: Cost 4 vext3 <0,2,u,2>, <2,6,5,7>
+ 3088679116U, // <2,2,6,6>: Cost 3 vtrnr <0,2,4,6>, <0,2,4,6>
+ 2657891169U, // <2,2,6,7>: Cost 3 vext2 <6,7,2,2>, <6,7,2,2>
+ 2685700071U, // <2,2,6,u>: Cost 3 vext3 <0,2,0,2>, <2,6,u,7>
+ 2726250474U, // <2,2,7,0>: Cost 3 vext3 <7,0,1,2>, <2,7,0,1>
+ 3704427616U, // <2,2,7,1>: Cost 4 vext2 <2,2,2,2>, <7,1,3,5>
+ 2660545701U, // <2,2,7,2>: Cost 3 vext2 <7,2,2,2>, <7,2,2,2>
+ 4030718054U, // <2,2,7,3>: Cost 4 vzipr <0,6,2,7>, LHS
+ 2617415014U, // <2,2,7,4>: Cost 3 vext2 <0,0,2,2>, <7,4,5,6>
+ 3302033032U, // <2,2,7,5>: Cost 4 vrev <2,2,5,7>
+ 3661246929U, // <2,2,7,6>: Cost 4 vext1 <6,2,2,7>, <6,2,2,7>
+ 2617415276U, // <2,2,7,7>: Cost 3 vext2 <0,0,2,2>, <7,7,7,7>
+ 2731558962U, // <2,2,7,u>: Cost 3 vext3 <7,u,1,2>, <2,7,u,1>
+ 1489829990U, // <2,2,u,0>: Cost 2 vext1 <2,2,2,2>, LHS
+ 1548982062U, // <2,2,u,1>: Cost 2 vext2 <0,u,2,2>, LHS
+ 269271142U, // <2,2,u,2>: Cost 1 vdup2 LHS
+ 1879924838U, // <2,2,u,3>: Cost 2 vzipr LHS, LHS
+ 1489833270U, // <2,2,u,4>: Cost 2 vext1 <2,2,2,2>, RHS
+ 1548982426U, // <2,2,u,5>: Cost 2 vext2 <0,u,2,2>, RHS
+ 2953666908U, // <2,2,u,6>: Cost 3 vzipr LHS, <0,4,2,6>
+ 2819919401U, // <2,2,u,7>: Cost 3 vuzpr <0,2,0,2>, RHS
+ 269271142U, // <2,2,u,u>: Cost 1 vdup2 LHS
+ 1544339456U, // <2,3,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
+ 470597734U, // <2,3,0,1>: Cost 1 vext2 LHS, LHS
+ 1548984484U, // <2,3,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
+ 2619408648U, // <2,3,0,3>: Cost 3 vext2 <0,3,2,3>, <0,3,2,3>
+ 1548984658U, // <2,3,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
+ 2665857454U, // <2,3,0,5>: Cost 3 vext2 LHS, <0,5,2,7>
+ 2622726655U, // <2,3,0,6>: Cost 3 vext2 LHS, <0,6,2,7>
+ 2593494188U, // <2,3,0,7>: Cost 3 vext1 <7,2,3,0>, <7,2,3,0>
+ 470598301U, // <2,3,0,u>: Cost 1 vext2 LHS, LHS
+ 1544340214U, // <2,3,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
+ 1544340276U, // <2,3,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
+ 1544340374U, // <2,3,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
+ 1548985304U, // <2,3,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
+ 2551696694U, // <2,3,1,4>: Cost 3 vext1 <0,2,3,1>, RHS
+ 1548985488U, // <2,3,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
+ 2622727375U, // <2,3,1,6>: Cost 3 vext2 LHS, <1,6,1,7>
+ 2665858347U, // <2,3,1,7>: Cost 3 vext2 LHS, <1,7,3,0>
+ 1548985709U, // <2,3,1,u>: Cost 2 vext2 LHS, <1,u,1,3>
+ 2622727613U, // <2,3,2,0>: Cost 3 vext2 LHS, <2,0,1,2>
+ 2622727711U, // <2,3,2,1>: Cost 3 vext2 LHS, <2,1,3,1>
+ 1544341096U, // <2,3,2,2>: Cost 2 vext2 LHS, <2,2,2,2>
+ 1544341158U, // <2,3,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
+ 2622727958U, // <2,3,2,4>: Cost 3 vext2 LHS, <2,4,3,5>
+ 2622728032U, // <2,3,2,5>: Cost 3 vext2 LHS, <2,5,2,7>
+ 1548986298U, // <2,3,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
+ 2665859050U, // <2,3,2,7>: Cost 3 vext2 LHS, <2,7,0,1>
+ 1548986427U, // <2,3,2,u>: Cost 2 vext2 LHS, <2,u,0,1>
+ 1548986518U, // <2,3,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
+ 2622728415U, // <2,3,3,1>: Cost 3 vext2 LHS, <3,1,0,3>
+ 1489913458U, // <2,3,3,2>: Cost 2 vext1 <2,2,3,3>, <2,2,3,3>
+ 1544341916U, // <2,3,3,3>: Cost 2 vext2 LHS, <3,3,3,3>
+ 1548986882U, // <2,3,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
+ 2665859632U, // <2,3,3,5>: Cost 3 vext2 LHS, <3,5,1,7>
+ 2234304870U, // <2,3,3,6>: Cost 3 vrev <3,2,6,3>
+ 2958271632U, // <2,3,3,7>: Cost 3 vzipr LHS, <1,5,3,7>
+ 1548987166U, // <2,3,3,u>: Cost 2 vext2 LHS, <3,u,1,2>
+ 1483948134U, // <2,3,4,0>: Cost 2 vext1 <1,2,3,4>, LHS
+ 1483948954U, // <2,3,4,1>: Cost 2 vext1 <1,2,3,4>, <1,2,3,4>
+ 2622729276U, // <2,3,4,2>: Cost 3 vext2 LHS, <4,2,6,0>
+ 2557692054U, // <2,3,4,3>: Cost 3 vext1 <1,2,3,4>, <3,0,1,2>
+ 1483951414U, // <2,3,4,4>: Cost 2 vext1 <1,2,3,4>, RHS
+ 470601014U, // <2,3,4,5>: Cost 1 vext2 LHS, RHS
+ 1592118644U, // <2,3,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
+ 2593526960U, // <2,3,4,7>: Cost 3 vext1 <7,2,3,4>, <7,2,3,4>
+ 470601257U, // <2,3,4,u>: Cost 1 vext2 LHS, RHS
+ 2551726182U, // <2,3,5,0>: Cost 3 vext1 <0,2,3,5>, LHS
+ 1592118992U, // <2,3,5,1>: Cost 2 vext2 LHS, <5,1,7,3>
+ 2665860862U, // <2,3,5,2>: Cost 3 vext2 LHS, <5,2,3,4>
+ 2551728642U, // <2,3,5,3>: Cost 3 vext1 <0,2,3,5>, <3,4,5,6>
+ 1592119238U, // <2,3,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
+ 1592119300U, // <2,3,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
+ 1592119394U, // <2,3,5,6>: Cost 2 vext2 LHS, <5,6,7,0>
+ 1592119464U, // <2,3,5,7>: Cost 2 vext2 LHS, <5,7,5,7>
+ 1592119545U, // <2,3,5,u>: Cost 2 vext2 LHS, <5,u,5,7>
+ 2622730529U, // <2,3,6,0>: Cost 3 vext2 LHS, <6,0,1,2>
+ 2557707164U, // <2,3,6,1>: Cost 3 vext1 <1,2,3,6>, <1,2,3,6>
+ 1592119802U, // <2,3,6,2>: Cost 2 vext2 LHS, <6,2,7,3>
+ 2665861682U, // <2,3,6,3>: Cost 3 vext2 LHS, <6,3,4,5>
+ 2622730893U, // <2,3,6,4>: Cost 3 vext2 LHS, <6,4,5,6>
+ 2665861810U, // <2,3,6,5>: Cost 3 vext2 LHS, <6,5,0,7>
+ 1592120120U, // <2,3,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
+ 1592120142U, // <2,3,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
+ 1592120223U, // <2,3,6,u>: Cost 2 vext2 LHS, <6,u,0,1>
+ 1592120314U, // <2,3,7,0>: Cost 2 vext2 LHS, <7,0,1,2>
+ 2659890261U, // <2,3,7,1>: Cost 3 vext2 <7,1,2,3>, <7,1,2,3>
+ 2660553894U, // <2,3,7,2>: Cost 3 vext2 <7,2,2,3>, <7,2,2,3>
+ 2665862371U, // <2,3,7,3>: Cost 3 vext2 LHS, <7,3,0,1>
+ 1592120678U, // <2,3,7,4>: Cost 2 vext2 LHS, <7,4,5,6>
+ 2665862534U, // <2,3,7,5>: Cost 3 vext2 LHS, <7,5,0,2>
+ 2665862614U, // <2,3,7,6>: Cost 3 vext2 LHS, <7,6,0,1>
+ 1592120940U, // <2,3,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
+ 1592120962U, // <2,3,7,u>: Cost 2 vext2 LHS, <7,u,1,2>
+ 1548990163U, // <2,3,u,0>: Cost 2 vext2 LHS, <u,0,1,2>
+ 470603566U, // <2,3,u,1>: Cost 1 vext2 LHS, LHS
+ 1548990341U, // <2,3,u,2>: Cost 2 vext2 LHS, <u,2,3,0>
+ 1548990396U, // <2,3,u,3>: Cost 2 vext2 LHS, <u,3,0,1>
+ 1548990527U, // <2,3,u,4>: Cost 2 vext2 LHS, <u,4,5,6>
+ 470603930U, // <2,3,u,5>: Cost 1 vext2 LHS, RHS
+ 1548990672U, // <2,3,u,6>: Cost 2 vext2 LHS, <u,6,3,7>
+ 1592121600U, // <2,3,u,7>: Cost 2 vext2 LHS, <u,7,0,1>
+ 470604133U, // <2,3,u,u>: Cost 1 vext2 LHS, LHS
+ 2617425942U, // <2,4,0,0>: Cost 3 vext2 <0,0,2,4>, <0,0,2,4>
+ 2618753126U, // <2,4,0,1>: Cost 3 vext2 <0,2,2,4>, LHS
+ 2618753208U, // <2,4,0,2>: Cost 3 vext2 <0,2,2,4>, <0,2,2,4>
+ 2619416841U, // <2,4,0,3>: Cost 3 vext2 <0,3,2,4>, <0,3,2,4>
+ 2587593628U, // <2,4,0,4>: Cost 3 vext1 <6,2,4,0>, <4,0,6,2>
+ 2712832914U, // <2,4,0,5>: Cost 3 vext3 <4,6,u,2>, <4,0,5,1>
+ 1634962332U, // <2,4,0,6>: Cost 2 vext3 <4,0,6,2>, <4,0,6,2>
+ 3799993252U, // <2,4,0,7>: Cost 4 vext3 <7,0,1,2>, <4,0,7,1>
+ 1634962332U, // <2,4,0,u>: Cost 2 vext3 <4,0,6,2>, <4,0,6,2>
+ 2619417334U, // <2,4,1,0>: Cost 3 vext2 <0,3,2,4>, <1,0,3,2>
+ 3692495668U, // <2,4,1,1>: Cost 4 vext2 <0,2,2,4>, <1,1,1,1>
+ 2625389466U, // <2,4,1,2>: Cost 3 vext2 <1,3,2,4>, <1,2,3,4>
+ 2826125414U, // <2,4,1,3>: Cost 3 vuzpr <1,2,3,4>, LHS
+ 3699794995U, // <2,4,1,4>: Cost 4 vext2 <1,4,2,4>, <1,4,2,4>
+ 3692496016U, // <2,4,1,5>: Cost 4 vext2 <0,2,2,4>, <1,5,3,7>
+ 3763424238U, // <2,4,1,6>: Cost 4 vext3 <0,u,0,2>, <4,1,6,3>
+ 3667317942U, // <2,4,1,7>: Cost 4 vext1 <7,2,4,1>, <7,2,4,1>
+ 2826125419U, // <2,4,1,u>: Cost 3 vuzpr <1,2,3,4>, LHS
+ 2629371336U, // <2,4,2,0>: Cost 3 vext2 <2,0,2,4>, <2,0,2,4>
+ 3699131946U, // <2,4,2,1>: Cost 4 vext2 <1,3,2,4>, <2,1,4,3>
+ 2630698602U, // <2,4,2,2>: Cost 3 vext2 <2,2,2,4>, <2,2,2,4>
+ 2618754766U, // <2,4,2,3>: Cost 3 vext2 <0,2,2,4>, <2,3,4,5>
+ 2826126234U, // <2,4,2,4>: Cost 3 vuzpr <1,2,3,4>, <1,2,3,4>
+ 2899119414U, // <2,4,2,5>: Cost 3 vzipl <2,2,2,2>, RHS
+ 3033337142U, // <2,4,2,6>: Cost 3 vtrnl <2,2,2,2>, RHS
+ 3800214597U, // <2,4,2,7>: Cost 4 vext3 <7,0,4,2>, <4,2,7,0>
+ 2899119657U, // <2,4,2,u>: Cost 3 vzipl <2,2,2,2>, RHS
+ 2635344033U, // <2,4,3,0>: Cost 3 vext2 <3,0,2,4>, <3,0,2,4>
+ 4032012325U, // <2,4,3,1>: Cost 4 vzipr LHS, <0,0,4,1>
+ 3692497228U, // <2,4,3,2>: Cost 4 vext2 <0,2,2,4>, <3,2,3,4>
+ 3692497308U, // <2,4,3,3>: Cost 4 vext2 <0,2,2,4>, <3,3,3,3>
+ 3001404624U, // <2,4,3,4>: Cost 3 vzipr LHS, <4,4,4,4>
+ 2953627342U, // <2,4,3,5>: Cost 3 vzipr LHS, <2,3,4,5>
+ 2953625804U, // <2,4,3,6>: Cost 3 vzipr LHS, <0,2,4,6>
+ 3899868160U, // <2,4,3,7>: Cost 4 vuzpr <1,2,3,4>, <1,3,5,7>
+ 2953625806U, // <2,4,3,u>: Cost 3 vzipr LHS, <0,2,4,u>
+ 2710916266U, // <2,4,4,0>: Cost 3 vext3 <4,4,0,2>, <4,4,0,2>
+ 3899869648U, // <2,4,4,1>: Cost 4 vuzpr <1,2,3,4>, <3,4,0,1>
+ 3899869658U, // <2,4,4,2>: Cost 4 vuzpr <1,2,3,4>, <3,4,1,2>
+ 3899868930U, // <2,4,4,3>: Cost 4 vuzpr <1,2,3,4>, <2,4,1,3>
+ 2712833232U, // <2,4,4,4>: Cost 3 vext3 <4,6,u,2>, <4,4,4,4>
+ 2618756406U, // <2,4,4,5>: Cost 3 vext2 <0,2,2,4>, RHS
+ 2765737270U, // <2,4,4,6>: Cost 3 vuzpl <2,3,4,5>, RHS
+ 4168304426U, // <2,4,4,7>: Cost 4 vtrnr <1,2,3,4>, <2,4,5,7>
+ 2618756649U, // <2,4,4,u>: Cost 3 vext2 <0,2,2,4>, RHS
+ 2551800011U, // <2,4,5,0>: Cost 3 vext1 <0,2,4,5>, <0,2,4,5>
+ 2569716470U, // <2,4,5,1>: Cost 3 vext1 <3,2,4,5>, <1,0,3,2>
+ 2563745405U, // <2,4,5,2>: Cost 3 vext1 <2,2,4,5>, <2,2,4,5>
+ 2569718102U, // <2,4,5,3>: Cost 3 vext1 <3,2,4,5>, <3,2,4,5>
+ 2551803190U, // <2,4,5,4>: Cost 3 vext1 <0,2,4,5>, RHS
+ 3625545732U, // <2,4,5,5>: Cost 4 vext1 <0,2,4,5>, <5,5,5,5>
+ 1611959606U, // <2,4,5,6>: Cost 2 vext3 <0,2,0,2>, RHS
+ 2826128694U, // <2,4,5,7>: Cost 3 vuzpr <1,2,3,4>, RHS
+ 1611959624U, // <2,4,5,u>: Cost 2 vext3 <0,2,0,2>, RHS
+ 1478066278U, // <2,4,6,0>: Cost 2 vext1 <0,2,4,6>, LHS
+ 2551808758U, // <2,4,6,1>: Cost 3 vext1 <0,2,4,6>, <1,0,3,2>
+ 2551809516U, // <2,4,6,2>: Cost 3 vext1 <0,2,4,6>, <2,0,6,4>
+ 2551810198U, // <2,4,6,3>: Cost 3 vext1 <0,2,4,6>, <3,0,1,2>
+ 1478069558U, // <2,4,6,4>: Cost 2 vext1 <0,2,4,6>, RHS
+ 2901888310U, // <2,4,6,5>: Cost 3 vzipl <2,6,3,7>, RHS
+ 2551812920U, // <2,4,6,6>: Cost 3 vext1 <0,2,4,6>, <6,6,6,6>
+ 2726251914U, // <2,4,6,7>: Cost 3 vext3 <7,0,1,2>, <4,6,7,1>
+ 1478072110U, // <2,4,6,u>: Cost 2 vext1 <0,2,4,6>, LHS
+ 2659234821U, // <2,4,7,0>: Cost 3 vext2 <7,0,2,4>, <7,0,2,4>
+ 3786722726U, // <2,4,7,1>: Cost 4 vext3 <4,7,1,2>, <4,7,1,2>
+ 3734303911U, // <2,4,7,2>: Cost 4 vext2 <7,2,2,4>, <7,2,2,4>
+ 3734967544U, // <2,4,7,3>: Cost 4 vext2 <7,3,2,4>, <7,3,2,4>
+ 3727005030U, // <2,4,7,4>: Cost 4 vext2 <6,0,2,4>, <7,4,5,6>
+ 2726251976U, // <2,4,7,5>: Cost 3 vext3 <7,0,1,2>, <4,7,5,0>
+ 2726251986U, // <2,4,7,6>: Cost 3 vext3 <7,0,1,2>, <4,7,6,1>
+ 3727005292U, // <2,4,7,7>: Cost 4 vext2 <6,0,2,4>, <7,7,7,7>
+ 2659234821U, // <2,4,7,u>: Cost 3 vext2 <7,0,2,4>, <7,0,2,4>
+ 1478082662U, // <2,4,u,0>: Cost 2 vext1 <0,2,4,u>, LHS
+ 2618758958U, // <2,4,u,1>: Cost 3 vext2 <0,2,2,4>, LHS
+ 2551826024U, // <2,4,u,2>: Cost 3 vext1 <0,2,4,u>, <2,2,2,2>
+ 2551826582U, // <2,4,u,3>: Cost 3 vext1 <0,2,4,u>, <3,0,1,2>
+ 1478085942U, // <2,4,u,4>: Cost 2 vext1 <0,2,4,u>, RHS
+ 2953668302U, // <2,4,u,5>: Cost 3 vzipr LHS, <2,3,4,5>
+ 1611959849U, // <2,4,u,6>: Cost 2 vext3 <0,2,0,2>, RHS
+ 2826128937U, // <2,4,u,7>: Cost 3 vuzpr <1,2,3,4>, RHS
+ 1611959867U, // <2,4,u,u>: Cost 2 vext3 <0,2,0,2>, RHS
+ 3691839488U, // <2,5,0,0>: Cost 4 vext2 <0,1,2,5>, <0,0,0,0>
+ 2618097766U, // <2,5,0,1>: Cost 3 vext2 <0,1,2,5>, LHS
+ 2620088484U, // <2,5,0,2>: Cost 3 vext2 <0,4,2,5>, <0,2,0,2>
+ 2619425034U, // <2,5,0,3>: Cost 3 vext2 <0,3,2,5>, <0,3,2,5>
+ 2620088667U, // <2,5,0,4>: Cost 3 vext2 <0,4,2,5>, <0,4,2,5>
+ 2620752300U, // <2,5,0,5>: Cost 3 vext2 <0,5,2,5>, <0,5,2,5>
+ 3693830655U, // <2,5,0,6>: Cost 4 vext2 <0,4,2,5>, <0,6,2,7>
+ 3094531382U, // <2,5,0,7>: Cost 3 vtrnr <1,2,3,0>, RHS
+ 2618098333U, // <2,5,0,u>: Cost 3 vext2 <0,1,2,5>, LHS
+ 3691840246U, // <2,5,1,0>: Cost 4 vext2 <0,1,2,5>, <1,0,3,2>
+ 3691840308U, // <2,5,1,1>: Cost 4 vext2 <0,1,2,5>, <1,1,1,1>
+ 2626061206U, // <2,5,1,2>: Cost 3 vext2 <1,4,2,5>, <1,2,3,0>
+ 2618098688U, // <2,5,1,3>: Cost 3 vext2 <0,1,2,5>, <1,3,5,7>
+ 2626061364U, // <2,5,1,4>: Cost 3 vext2 <1,4,2,5>, <1,4,2,5>
+ 3691840656U, // <2,5,1,5>: Cost 4 vext2 <0,1,2,5>, <1,5,3,7>
+ 3789082310U, // <2,5,1,6>: Cost 4 vext3 <5,1,6,2>, <5,1,6,2>
+ 2712833744U, // <2,5,1,7>: Cost 3 vext3 <4,6,u,2>, <5,1,7,3>
+ 2628715896U, // <2,5,1,u>: Cost 3 vext2 <1,u,2,5>, <1,u,2,5>
+ 3693831613U, // <2,5,2,0>: Cost 4 vext2 <0,4,2,5>, <2,0,1,2>
+ 4026698642U, // <2,5,2,1>: Cost 4 vzipr <0,0,2,2>, <4,0,5,1>
+ 2632033896U, // <2,5,2,2>: Cost 3 vext2 <2,4,2,5>, <2,2,2,2>
+ 3691841190U, // <2,5,2,3>: Cost 4 vext2 <0,1,2,5>, <2,3,0,1>
+ 2632034061U, // <2,5,2,4>: Cost 3 vext2 <2,4,2,5>, <2,4,2,5>
+ 3691841352U, // <2,5,2,5>: Cost 4 vext2 <0,1,2,5>, <2,5,0,1>
+ 3691841466U, // <2,5,2,6>: Cost 4 vext2 <0,1,2,5>, <2,6,3,7>
+ 3088354614U, // <2,5,2,7>: Cost 3 vtrnr <0,2,0,2>, RHS
+ 3088354615U, // <2,5,2,u>: Cost 3 vtrnr <0,2,0,2>, RHS
+ 2557829222U, // <2,5,3,0>: Cost 3 vext1 <1,2,5,3>, LHS
+ 2557830059U, // <2,5,3,1>: Cost 3 vext1 <1,2,5,3>, <1,2,5,3>
+ 2575746766U, // <2,5,3,2>: Cost 3 vext1 <4,2,5,3>, <2,3,4,5>
+ 3691841948U, // <2,5,3,3>: Cost 4 vext2 <0,1,2,5>, <3,3,3,3>
+ 2619427330U, // <2,5,3,4>: Cost 3 vext2 <0,3,2,5>, <3,4,5,6>
+ 2581720847U, // <2,5,3,5>: Cost 3 vext1 <5,2,5,3>, <5,2,5,3>
+ 2953628162U, // <2,5,3,6>: Cost 3 vzipr LHS, <3,4,5,6>
+ 2953626624U, // <2,5,3,7>: Cost 3 vzipr LHS, <1,3,5,7>
+ 2953626625U, // <2,5,3,u>: Cost 3 vzipr LHS, <1,3,5,u>
+ 2569781350U, // <2,5,4,0>: Cost 3 vext1 <3,2,5,4>, LHS
+ 3631580076U, // <2,5,4,1>: Cost 4 vext1 <1,2,5,4>, <1,2,5,4>
+ 2569782990U, // <2,5,4,2>: Cost 3 vext1 <3,2,5,4>, <2,3,4,5>
+ 2569783646U, // <2,5,4,3>: Cost 3 vext1 <3,2,5,4>, <3,2,5,4>
+ 2569784630U, // <2,5,4,4>: Cost 3 vext1 <3,2,5,4>, RHS
+ 2618101046U, // <2,5,4,5>: Cost 3 vext2 <0,1,2,5>, RHS
+ 3893905922U, // <2,5,4,6>: Cost 4 vuzpr <0,2,3,5>, <3,4,5,6>
+ 3094564150U, // <2,5,4,7>: Cost 3 vtrnr <1,2,3,4>, RHS
+ 2618101289U, // <2,5,4,u>: Cost 3 vext2 <0,1,2,5>, RHS
+ 2551873638U, // <2,5,5,0>: Cost 3 vext1 <0,2,5,5>, LHS
+ 3637560320U, // <2,5,5,1>: Cost 4 vext1 <2,2,5,5>, <1,3,5,7>
+ 3637560966U, // <2,5,5,2>: Cost 4 vext1 <2,2,5,5>, <2,2,5,5>
+ 3723030343U, // <2,5,5,3>: Cost 4 vext2 <5,3,2,5>, <5,3,2,5>
+ 2551876918U, // <2,5,5,4>: Cost 3 vext1 <0,2,5,5>, RHS
+ 2712834052U, // <2,5,5,5>: Cost 3 vext3 <4,6,u,2>, <5,5,5,5>
+ 4028713474U, // <2,5,5,6>: Cost 4 vzipr <0,3,2,5>, <3,4,5,6>
+ 2712834072U, // <2,5,5,7>: Cost 3 vext3 <4,6,u,2>, <5,5,7,7>
+ 2712834081U, // <2,5,5,u>: Cost 3 vext3 <4,6,u,2>, <5,5,u,7>
+ 2575769702U, // <2,5,6,0>: Cost 3 vext1 <4,2,5,6>, LHS
+ 3631596462U, // <2,5,6,1>: Cost 4 vext1 <1,2,5,6>, <1,2,5,6>
+ 2655924730U, // <2,5,6,2>: Cost 3 vext2 <6,4,2,5>, <6,2,7,3>
+ 3643541856U, // <2,5,6,3>: Cost 4 vext1 <3,2,5,6>, <3,2,5,6>
+ 2655924849U, // <2,5,6,4>: Cost 3 vext2 <6,4,2,5>, <6,4,2,5>
+ 3787755607U, // <2,5,6,5>: Cost 4 vext3 <4,u,6,2>, <5,6,5,7>
+ 4029385218U, // <2,5,6,6>: Cost 4 vzipr <0,4,2,6>, <3,4,5,6>
+ 3088682294U, // <2,5,6,7>: Cost 3 vtrnr <0,2,4,6>, RHS
+ 3088682295U, // <2,5,6,u>: Cost 3 vtrnr <0,2,4,6>, RHS
+ 2563833958U, // <2,5,7,0>: Cost 3 vext1 <2,2,5,7>, LHS
+ 2551890678U, // <2,5,7,1>: Cost 3 vext1 <0,2,5,7>, <1,0,3,2>
+ 2563835528U, // <2,5,7,2>: Cost 3 vext1 <2,2,5,7>, <2,2,5,7>
+ 3637577878U, // <2,5,7,3>: Cost 4 vext1 <2,2,5,7>, <3,0,1,2>
+ 2563837238U, // <2,5,7,4>: Cost 3 vext1 <2,2,5,7>, RHS
+ 2712834216U, // <2,5,7,5>: Cost 3 vext3 <4,6,u,2>, <5,7,5,7>
+ 2712834220U, // <2,5,7,6>: Cost 3 vext3 <4,6,u,2>, <5,7,6,2>
+ 4174449974U, // <2,5,7,7>: Cost 4 vtrnr <2,2,5,7>, RHS
+ 2563839790U, // <2,5,7,u>: Cost 3 vext1 <2,2,5,7>, LHS
+ 2563842150U, // <2,5,u,0>: Cost 3 vext1 <2,2,5,u>, LHS
+ 2618103598U, // <2,5,u,1>: Cost 3 vext2 <0,1,2,5>, LHS
+ 2563843721U, // <2,5,u,2>: Cost 3 vext1 <2,2,5,u>, <2,2,5,u>
+ 2569816418U, // <2,5,u,3>: Cost 3 vext1 <3,2,5,u>, <3,2,5,u>
+ 2622748735U, // <2,5,u,4>: Cost 3 vext2 <0,u,2,5>, <u,4,5,6>
+ 2618103962U, // <2,5,u,5>: Cost 3 vext2 <0,1,2,5>, RHS
+ 2953669122U, // <2,5,u,6>: Cost 3 vzipr LHS, <3,4,5,6>
+ 2953667584U, // <2,5,u,7>: Cost 3 vzipr LHS, <1,3,5,7>
+ 2618104165U, // <2,5,u,u>: Cost 3 vext2 <0,1,2,5>, LHS
+ 2620096512U, // <2,6,0,0>: Cost 3 vext2 <0,4,2,6>, <0,0,0,0>
+ 1546354790U, // <2,6,0,1>: Cost 2 vext2 <0,4,2,6>, LHS
+ 2620096676U, // <2,6,0,2>: Cost 3 vext2 <0,4,2,6>, <0,2,0,2>
+ 3693838588U, // <2,6,0,3>: Cost 4 vext2 <0,4,2,6>, <0,3,1,0>
+ 1546355036U, // <2,6,0,4>: Cost 2 vext2 <0,4,2,6>, <0,4,2,6>
+ 3694502317U, // <2,6,0,5>: Cost 4 vext2 <0,5,2,6>, <0,5,2,6>
+ 2551911246U, // <2,6,0,6>: Cost 3 vext1 <0,2,6,0>, <6,7,0,1>
+ 2720723287U, // <2,6,0,7>: Cost 3 vext3 <6,0,7,2>, <6,0,7,2>
+ 1546355357U, // <2,6,0,u>: Cost 2 vext2 <0,4,2,6>, LHS
+ 2620097270U, // <2,6,1,0>: Cost 3 vext2 <0,4,2,6>, <1,0,3,2>
+ 2620097332U, // <2,6,1,1>: Cost 3 vext2 <0,4,2,6>, <1,1,1,1>
+ 2620097430U, // <2,6,1,2>: Cost 3 vext2 <0,4,2,6>, <1,2,3,0>
+ 2820243558U, // <2,6,1,3>: Cost 3 vuzpr <0,2,4,6>, LHS
+ 2620097598U, // <2,6,1,4>: Cost 3 vext2 <0,4,2,6>, <1,4,3,6>
+ 2620097680U, // <2,6,1,5>: Cost 3 vext2 <0,4,2,6>, <1,5,3,7>
+ 3693839585U, // <2,6,1,6>: Cost 4 vext2 <0,4,2,6>, <1,6,3,7>
+ 2721386920U, // <2,6,1,7>: Cost 3 vext3 <6,1,7,2>, <6,1,7,2>
+ 2820243563U, // <2,6,1,u>: Cost 3 vuzpr <0,2,4,6>, LHS
+ 2714014137U, // <2,6,2,0>: Cost 3 vext3 <4,u,6,2>, <6,2,0,1>
+ 2712834500U, // <2,6,2,1>: Cost 3 vext3 <4,6,u,2>, <6,2,1,3>
+ 2620098152U, // <2,6,2,2>: Cost 3 vext2 <0,4,2,6>, <2,2,2,2>
+ 2620098214U, // <2,6,2,3>: Cost 3 vext2 <0,4,2,6>, <2,3,0,1>
+ 2632042254U, // <2,6,2,4>: Cost 3 vext2 <2,4,2,6>, <2,4,2,6>
+ 2712834540U, // <2,6,2,5>: Cost 3 vext3 <4,6,u,2>, <6,2,5,7>
+ 2820243660U, // <2,6,2,6>: Cost 3 vuzpr <0,2,4,6>, <0,2,4,6>
+ 2958265654U, // <2,6,2,7>: Cost 3 vzipr <0,u,2,2>, RHS
+ 2620098619U, // <2,6,2,u>: Cost 3 vext2 <0,4,2,6>, <2,u,0,1>
+ 2620098710U, // <2,6,3,0>: Cost 3 vext2 <0,4,2,6>, <3,0,1,2>
+ 3893986982U, // <2,6,3,1>: Cost 4 vuzpr <0,2,4,6>, <2,3,0,1>
+ 2569848762U, // <2,6,3,2>: Cost 3 vext1 <3,2,6,3>, <2,6,3,7>
+ 2620098972U, // <2,6,3,3>: Cost 3 vext2 <0,4,2,6>, <3,3,3,3>
+ 2620099074U, // <2,6,3,4>: Cost 3 vext2 <0,4,2,6>, <3,4,5,6>
+ 3893987022U, // <2,6,3,5>: Cost 4 vuzpr <0,2,4,6>, <2,3,4,5>
+ 3001404644U, // <2,6,3,6>: Cost 3 vzipr LHS, <4,4,6,6>
+ 1879887158U, // <2,6,3,7>: Cost 2 vzipr LHS, RHS
+ 1879887159U, // <2,6,3,u>: Cost 2 vzipr LHS, RHS
+ 2620099484U, // <2,6,4,0>: Cost 3 vext2 <0,4,2,6>, <4,0,6,2>
+ 2620099566U, // <2,6,4,1>: Cost 3 vext2 <0,4,2,6>, <4,1,6,3>
+ 2620099644U, // <2,6,4,2>: Cost 3 vext2 <0,4,2,6>, <4,2,6,0>
+ 3643599207U, // <2,6,4,3>: Cost 4 vext1 <3,2,6,4>, <3,2,6,4>
+ 2575830080U, // <2,6,4,4>: Cost 3 vext1 <4,2,6,4>, <4,2,6,4>
+ 1546358070U, // <2,6,4,5>: Cost 2 vext2 <0,4,2,6>, RHS
+ 2667875700U, // <2,6,4,6>: Cost 3 vext2 <u,4,2,6>, <4,6,4,6>
+ 4028042550U, // <2,6,4,7>: Cost 4 vzipr <0,2,2,4>, RHS
+ 1546358313U, // <2,6,4,u>: Cost 2 vext2 <0,4,2,6>, RHS
+ 3693841992U, // <2,6,5,0>: Cost 4 vext2 <0,4,2,6>, <5,0,1,2>
+ 2667876048U, // <2,6,5,1>: Cost 3 vext2 <u,4,2,6>, <5,1,7,3>
+ 2712834756U, // <2,6,5,2>: Cost 3 vext3 <4,6,u,2>, <6,5,2,7>
+ 3643607400U, // <2,6,5,3>: Cost 4 vext1 <3,2,6,5>, <3,2,6,5>
+ 2252091873U, // <2,6,5,4>: Cost 3 vrev <6,2,4,5>
+ 2667876356U, // <2,6,5,5>: Cost 3 vext2 <u,4,2,6>, <5,5,5,5>
+ 2667876450U, // <2,6,5,6>: Cost 3 vext2 <u,4,2,6>, <5,6,7,0>
+ 2820246838U, // <2,6,5,7>: Cost 3 vuzpr <0,2,4,6>, RHS
+ 2820246839U, // <2,6,5,u>: Cost 3 vuzpr <0,2,4,6>, RHS
+ 2563899494U, // <2,6,6,0>: Cost 3 vext1 <2,2,6,6>, LHS
+ 3893988683U, // <2,6,6,1>: Cost 4 vuzpr <0,2,4,6>, <4,6,0,1>
+ 2563901072U, // <2,6,6,2>: Cost 3 vext1 <2,2,6,6>, <2,2,6,6>
+ 3893987236U, // <2,6,6,3>: Cost 4 vuzpr <0,2,4,6>, <2,6,1,3>
+ 2563902774U, // <2,6,6,4>: Cost 3 vext1 <2,2,6,6>, RHS
+ 3893988723U, // <2,6,6,5>: Cost 4 vuzpr <0,2,4,6>, <4,6,4,5>
+ 2712834872U, // <2,6,6,6>: Cost 3 vext3 <4,6,u,2>, <6,6,6,6>
+ 2955644214U, // <2,6,6,7>: Cost 3 vzipr <0,4,2,6>, RHS
+ 2955644215U, // <2,6,6,u>: Cost 3 vzipr <0,4,2,6>, RHS
+ 2712834894U, // <2,6,7,0>: Cost 3 vext3 <4,6,u,2>, <6,7,0,1>
+ 2724926296U, // <2,6,7,1>: Cost 3 vext3 <6,7,1,2>, <6,7,1,2>
+ 2725000033U, // <2,6,7,2>: Cost 3 vext3 <6,7,2,2>, <6,7,2,2>
+ 2702365544U, // <2,6,7,3>: Cost 3 vext3 <3,0,1,2>, <6,7,3,0>
+ 2712834934U, // <2,6,7,4>: Cost 3 vext3 <4,6,u,2>, <6,7,4,5>
+ 3776107393U, // <2,6,7,5>: Cost 4 vext3 <3,0,1,2>, <6,7,5,7>
+ 2725294981U, // <2,6,7,6>: Cost 3 vext3 <6,7,6,2>, <6,7,6,2>
+ 2726253452U, // <2,6,7,7>: Cost 3 vext3 <7,0,1,2>, <6,7,7,0>
+ 2712834966U, // <2,6,7,u>: Cost 3 vext3 <4,6,u,2>, <6,7,u,1>
+ 2620102355U, // <2,6,u,0>: Cost 3 vext2 <0,4,2,6>, <u,0,1,2>
+ 1546360622U, // <2,6,u,1>: Cost 2 vext2 <0,4,2,6>, LHS
+ 2620102536U, // <2,6,u,2>: Cost 3 vext2 <0,4,2,6>, <u,2,3,3>
+ 2820244125U, // <2,6,u,3>: Cost 3 vuzpr <0,2,4,6>, LHS
+ 1594136612U, // <2,6,u,4>: Cost 2 vext2 <u,4,2,6>, <u,4,2,6>
+ 1546360986U, // <2,6,u,5>: Cost 2 vext2 <0,4,2,6>, RHS
+ 2620102864U, // <2,6,u,6>: Cost 3 vext2 <0,4,2,6>, <u,6,3,7>
+ 1879928118U, // <2,6,u,7>: Cost 2 vzipr LHS, RHS
+ 1879928119U, // <2,6,u,u>: Cost 2 vzipr LHS, RHS
+ 2726179825U, // <2,7,0,0>: Cost 3 vext3 <7,0,0,2>, <7,0,0,2>
+ 1652511738U, // <2,7,0,1>: Cost 2 vext3 <7,0,1,2>, <7,0,1,2>
+ 2621431972U, // <2,7,0,2>: Cost 3 vext2 <0,6,2,7>, <0,2,0,2>
+ 2257949868U, // <2,7,0,3>: Cost 3 vrev <7,2,3,0>
+ 2726474773U, // <2,7,0,4>: Cost 3 vext3 <7,0,4,2>, <7,0,4,2>
+ 2620768686U, // <2,7,0,5>: Cost 3 vext2 <0,5,2,7>, <0,5,2,7>
+ 2621432319U, // <2,7,0,6>: Cost 3 vext2 <0,6,2,7>, <0,6,2,7>
+ 2599760953U, // <2,7,0,7>: Cost 3 vext1 <u,2,7,0>, <7,0,u,2>
+ 1653027897U, // <2,7,0,u>: Cost 2 vext3 <7,0,u,2>, <7,0,u,2>
+ 2639348470U, // <2,7,1,0>: Cost 3 vext2 <3,6,2,7>, <1,0,3,2>
+ 3695174452U, // <2,7,1,1>: Cost 4 vext2 <0,6,2,7>, <1,1,1,1>
+ 3695174550U, // <2,7,1,2>: Cost 4 vext2 <0,6,2,7>, <1,2,3,0>
+ 3694511104U, // <2,7,1,3>: Cost 4 vext2 <0,5,2,7>, <1,3,5,7>
+ 3713090594U, // <2,7,1,4>: Cost 4 vext2 <3,6,2,7>, <1,4,0,5>
+ 3693184144U, // <2,7,1,5>: Cost 4 vext2 <0,3,2,7>, <1,5,3,7>
+ 2627405016U, // <2,7,1,6>: Cost 3 vext2 <1,6,2,7>, <1,6,2,7>
+ 3799995519U, // <2,7,1,7>: Cost 4 vext3 <7,0,1,2>, <7,1,7,0>
+ 2639348470U, // <2,7,1,u>: Cost 3 vext2 <3,6,2,7>, <1,0,3,2>
+ 3695175101U, // <2,7,2,0>: Cost 4 vext2 <0,6,2,7>, <2,0,1,2>
+ 3643655168U, // <2,7,2,1>: Cost 4 vext1 <3,2,7,2>, <1,3,5,7>
+ 2257892517U, // <2,7,2,2>: Cost 3 vrev <7,2,2,2>
+ 3695175334U, // <2,7,2,3>: Cost 4 vext2 <0,6,2,7>, <2,3,0,1>
+ 3695175465U, // <2,7,2,4>: Cost 4 vext2 <0,6,2,7>, <2,4,5,6>
+ 2632714080U, // <2,7,2,5>: Cost 3 vext2 <2,5,2,7>, <2,5,2,7>
+ 2633377713U, // <2,7,2,6>: Cost 3 vext2 <2,6,2,7>, <2,6,2,7>
+ 3695175658U, // <2,7,2,7>: Cost 4 vext2 <0,6,2,7>, <2,7,0,1>
+ 2634704979U, // <2,7,2,u>: Cost 3 vext2 <2,u,2,7>, <2,u,2,7>
+ 1514094694U, // <2,7,3,0>: Cost 2 vext1 <6,2,7,3>, LHS
+ 2569921680U, // <2,7,3,1>: Cost 3 vext1 <3,2,7,3>, <1,5,3,7>
+ 2587838056U, // <2,7,3,2>: Cost 3 vext1 <6,2,7,3>, <2,2,2,2>
+ 2569922927U, // <2,7,3,3>: Cost 3 vext1 <3,2,7,3>, <3,2,7,3>
+ 1514097974U, // <2,7,3,4>: Cost 2 vext1 <6,2,7,3>, RHS
+ 2581868321U, // <2,7,3,5>: Cost 3 vext1 <5,2,7,3>, <5,2,7,3>
+ 1514099194U, // <2,7,3,6>: Cost 2 vext1 <6,2,7,3>, <6,2,7,3>
+ 2587841530U, // <2,7,3,7>: Cost 3 vext1 <6,2,7,3>, <7,0,1,2>
+ 1514100526U, // <2,7,3,u>: Cost 2 vext1 <6,2,7,3>, LHS
+ 2708706617U, // <2,7,4,0>: Cost 3 vext3 <4,0,6,2>, <7,4,0,6>
+ 3649643418U, // <2,7,4,1>: Cost 4 vext1 <4,2,7,4>, <1,2,3,4>
+ 3649644330U, // <2,7,4,2>: Cost 4 vext1 <4,2,7,4>, <2,4,5,7>
+ 2257982640U, // <2,7,4,3>: Cost 3 vrev <7,2,3,4>
+ 3649645641U, // <2,7,4,4>: Cost 4 vext1 <4,2,7,4>, <4,2,7,4>
+ 2621435190U, // <2,7,4,5>: Cost 3 vext2 <0,6,2,7>, RHS
+ 2712835441U, // <2,7,4,6>: Cost 3 vext3 <4,6,u,2>, <7,4,6,u>
+ 3799995762U, // <2,7,4,7>: Cost 4 vext3 <7,0,1,2>, <7,4,7,0>
+ 2621435433U, // <2,7,4,u>: Cost 3 vext2 <0,6,2,7>, RHS
+ 2729497990U, // <2,7,5,0>: Cost 3 vext3 <7,5,0,2>, <7,5,0,2>
+ 3643679744U, // <2,7,5,1>: Cost 4 vext1 <3,2,7,5>, <1,3,5,7>
+ 3637708424U, // <2,7,5,2>: Cost 4 vext1 <2,2,7,5>, <2,2,5,7>
+ 3643681137U, // <2,7,5,3>: Cost 4 vext1 <3,2,7,5>, <3,2,7,5>
+ 2599800118U, // <2,7,5,4>: Cost 3 vext1 <u,2,7,5>, RHS
+ 3786577334U, // <2,7,5,5>: Cost 4 vext3 <4,6,u,2>, <7,5,5,5>
+ 3786577345U, // <2,7,5,6>: Cost 4 vext3 <4,6,u,2>, <7,5,6,7>
+ 2599802214U, // <2,7,5,7>: Cost 3 vext1 <u,2,7,5>, <7,4,5,6>
+ 2599802670U, // <2,7,5,u>: Cost 3 vext1 <u,2,7,5>, LHS
+ 2581889126U, // <2,7,6,0>: Cost 3 vext1 <5,2,7,6>, LHS
+ 3643687936U, // <2,7,6,1>: Cost 4 vext1 <3,2,7,6>, <1,3,5,7>
+ 2663240186U, // <2,7,6,2>: Cost 3 vext2 <7,6,2,7>, <6,2,7,3>
+ 3643689330U, // <2,7,6,3>: Cost 4 vext1 <3,2,7,6>, <3,2,7,6>
+ 2581892406U, // <2,7,6,4>: Cost 3 vext1 <5,2,7,6>, RHS
+ 2581892900U, // <2,7,6,5>: Cost 3 vext1 <5,2,7,6>, <5,2,7,6>
+ 2587865597U, // <2,7,6,6>: Cost 3 vext1 <6,2,7,6>, <6,2,7,6>
+ 3786577428U, // <2,7,6,7>: Cost 4 vext3 <4,6,u,2>, <7,6,7,0>
+ 2581894958U, // <2,7,6,u>: Cost 3 vext1 <5,2,7,6>, LHS
+ 2726254119U, // <2,7,7,0>: Cost 3 vext3 <7,0,1,2>, <7,7,0,1>
+ 3804640817U, // <2,7,7,1>: Cost 4 vext3 <7,7,1,2>, <7,7,1,2>
+ 3637724826U, // <2,7,7,2>: Cost 4 vext1 <2,2,7,7>, <2,2,7,7>
+ 3734992123U, // <2,7,7,3>: Cost 4 vext2 <7,3,2,7>, <7,3,2,7>
+ 2552040758U, // <2,7,7,4>: Cost 3 vext1 <0,2,7,7>, RHS
+ 3799995992U, // <2,7,7,5>: Cost 4 vext3 <7,0,1,2>, <7,7,5,5>
+ 2663241198U, // <2,7,7,6>: Cost 3 vext2 <7,6,2,7>, <7,6,2,7>
+ 2712835692U, // <2,7,7,7>: Cost 3 vext3 <4,6,u,2>, <7,7,7,7>
+ 2731562607U, // <2,7,7,u>: Cost 3 vext3 <7,u,1,2>, <7,7,u,1>
+ 1514135654U, // <2,7,u,0>: Cost 2 vext1 <6,2,7,u>, LHS
+ 1657820802U, // <2,7,u,1>: Cost 2 vext3 <7,u,1,2>, <7,u,1,2>
+ 2587879016U, // <2,7,u,2>: Cost 3 vext1 <6,2,7,u>, <2,2,2,2>
+ 2569963892U, // <2,7,u,3>: Cost 3 vext1 <3,2,7,u>, <3,2,7,u>
+ 1514138934U, // <2,7,u,4>: Cost 2 vext1 <6,2,7,u>, RHS
+ 2621438106U, // <2,7,u,5>: Cost 3 vext2 <0,6,2,7>, RHS
+ 1514140159U, // <2,7,u,6>: Cost 2 vext1 <6,2,7,u>, <6,2,7,u>
+ 2587882490U, // <2,7,u,7>: Cost 3 vext1 <6,2,7,u>, <7,0,1,2>
+ 1514141486U, // <2,7,u,u>: Cost 2 vext1 <6,2,7,u>, LHS
+ 1544380416U, // <2,u,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
+ 470638699U, // <2,u,0,1>: Cost 1 vext2 LHS, LHS
+ 1544380580U, // <2,u,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
+ 1658631909U, // <2,u,0,3>: Cost 2 vext3 <u,0,3,2>, <u,0,3,2>
+ 1544380754U, // <2,u,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
+ 2665898414U, // <2,u,0,5>: Cost 3 vext2 LHS, <0,5,2,7>
+ 1658853120U, // <2,u,0,6>: Cost 2 vext3 <u,0,6,2>, <u,0,6,2>
+ 3094531625U, // <2,u,0,7>: Cost 3 vtrnr <1,2,3,0>, RHS
+ 470639261U, // <2,u,0,u>: Cost 1 vext2 LHS, LHS
+ 1544381174U, // <2,u,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
+ 1544381236U, // <2,u,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
+ 1544381334U, // <2,u,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
+ 1544381400U, // <2,u,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
+ 2618123325U, // <2,u,1,4>: Cost 3 vext2 LHS, <1,4,3,5>
+ 1544381584U, // <2,u,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
+ 2618123489U, // <2,u,1,6>: Cost 3 vext2 LHS, <1,6,3,7>
+ 2726254427U, // <2,u,1,7>: Cost 3 vext3 <7,0,1,2>, <u,1,7,3>
+ 1544381823U, // <2,u,1,u>: Cost 2 vext2 LHS, <1,u,3,3>
+ 1478328422U, // <2,u,2,0>: Cost 2 vext1 <0,2,u,2>, LHS
+ 2618123807U, // <2,u,2,1>: Cost 3 vext2 LHS, <2,1,3,1>
+ 269271142U, // <2,u,2,2>: Cost 1 vdup2 LHS
+ 1544382118U, // <2,u,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
+ 1478331702U, // <2,u,2,4>: Cost 2 vext1 <0,2,u,2>, RHS
+ 2618124136U, // <2,u,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
+ 1544382394U, // <2,u,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
+ 3088354857U, // <2,u,2,7>: Cost 3 vtrnr <0,2,0,2>, RHS
+ 269271142U, // <2,u,2,u>: Cost 1 vdup2 LHS
+ 1544382614U, // <2,u,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
+ 2953627374U, // <2,u,3,1>: Cost 3 vzipr LHS, <2,3,u,1>
+ 1490282143U, // <2,u,3,2>: Cost 2 vext1 <2,2,u,3>, <2,2,u,3>
+ 1879883932U, // <2,u,3,3>: Cost 2 vzipr LHS, LHS
+ 1544382978U, // <2,u,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
+ 2953627378U, // <2,u,3,5>: Cost 3 vzipr LHS, <2,3,u,5>
+ 1514172931U, // <2,u,3,6>: Cost 2 vext1 <6,2,u,3>, <6,2,u,3>
+ 1879887176U, // <2,u,3,7>: Cost 2 vzipr LHS, RHS
+ 1879883937U, // <2,u,3,u>: Cost 2 vzipr LHS, LHS
+ 1484316774U, // <2,u,4,0>: Cost 2 vext1 <1,2,u,4>, LHS
+ 1484317639U, // <2,u,4,1>: Cost 2 vext1 <1,2,u,4>, <1,2,u,4>
+ 2552088270U, // <2,u,4,2>: Cost 3 vext1 <0,2,u,4>, <2,3,4,5>
+ 1190213513U, // <2,u,4,3>: Cost 2 vrev <u,2,3,4>
+ 1484320054U, // <2,u,4,4>: Cost 2 vext1 <1,2,u,4>, RHS
+ 470641974U, // <2,u,4,5>: Cost 1 vext2 LHS, RHS
+ 1592159604U, // <2,u,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
+ 3094564393U, // <2,u,4,7>: Cost 3 vtrnr <1,2,3,4>, RHS
+ 470642217U, // <2,u,4,u>: Cost 1 vext2 LHS, RHS
+ 2552094959U, // <2,u,5,0>: Cost 3 vext1 <0,2,u,5>, <0,2,u,5>
+ 1592159952U, // <2,u,5,1>: Cost 2 vext2 LHS, <5,1,7,3>
+ 2564040353U, // <2,u,5,2>: Cost 3 vext1 <2,2,u,5>, <2,2,u,5>
+ 2690275455U, // <2,u,5,3>: Cost 3 vext3 <0,u,u,2>, <u,5,3,7>
+ 1592160198U, // <2,u,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
+ 1592160260U, // <2,u,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
+ 1611962522U, // <2,u,5,6>: Cost 2 vext3 <0,2,0,2>, RHS
+ 1592160424U, // <2,u,5,7>: Cost 2 vext2 LHS, <5,7,5,7>
+ 1611962540U, // <2,u,5,u>: Cost 2 vext3 <0,2,0,2>, RHS
+ 1478361190U, // <2,u,6,0>: Cost 2 vext1 <0,2,u,6>, LHS
+ 2552103670U, // <2,u,6,1>: Cost 3 vext1 <0,2,u,6>, <1,0,3,2>
+ 1592160762U, // <2,u,6,2>: Cost 2 vext2 LHS, <6,2,7,3>
+ 2685704400U, // <2,u,6,3>: Cost 3 vext3 <0,2,0,2>, <u,6,3,7>
+ 1478364470U, // <2,u,6,4>: Cost 2 vext1 <0,2,u,6>, RHS
+ 2901891226U, // <2,u,6,5>: Cost 3 vzipl <2,6,3,7>, RHS
+ 1592161080U, // <2,u,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
+ 1592161102U, // <2,u,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
+ 1478367022U, // <2,u,6,u>: Cost 2 vext1 <0,2,u,6>, LHS
+ 1592161274U, // <2,u,7,0>: Cost 2 vext2 LHS, <7,0,1,2>
+ 2659931226U, // <2,u,7,1>: Cost 3 vext2 <7,1,2,u>, <7,1,2,u>
+ 2564056739U, // <2,u,7,2>: Cost 3 vext1 <2,2,u,7>, <2,2,u,7>
+ 2665903331U, // <2,u,7,3>: Cost 3 vext2 LHS, <7,3,0,1>
+ 1592161638U, // <2,u,7,4>: Cost 2 vext2 LHS, <7,4,5,6>
+ 2665903494U, // <2,u,7,5>: Cost 3 vext2 LHS, <7,5,0,2>
+ 2587947527U, // <2,u,7,6>: Cost 3 vext1 <6,2,u,7>, <6,2,u,7>
+ 1592161900U, // <2,u,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
+ 1592161922U, // <2,u,7,u>: Cost 2 vext2 LHS, <7,u,1,2>
+ 1478377574U, // <2,u,u,0>: Cost 2 vext1 <0,2,u,u>, LHS
+ 470644526U, // <2,u,u,1>: Cost 1 vext2 LHS, LHS
+ 269271142U, // <2,u,u,2>: Cost 1 vdup2 LHS
+ 1879924892U, // <2,u,u,3>: Cost 2 vzipr LHS, LHS
+ 1478380854U, // <2,u,u,4>: Cost 2 vext1 <0,2,u,u>, RHS
+ 470644890U, // <2,u,u,5>: Cost 1 vext2 LHS, RHS
+ 1611962765U, // <2,u,u,6>: Cost 2 vext3 <0,2,0,2>, RHS
+ 1879928136U, // <2,u,u,7>: Cost 2 vzipr LHS, RHS
+ 470645093U, // <2,u,u,u>: Cost 1 vext2 LHS, LHS
+ 1611448320U, // <3,0,0,0>: Cost 2 vext3 LHS, <0,0,0,0>
+ 1611890698U, // <3,0,0,1>: Cost 2 vext3 LHS, <0,0,1,1>
+ 1611890708U, // <3,0,0,2>: Cost 2 vext3 LHS, <0,0,2,2>
+ 3763576860U, // <3,0,0,3>: Cost 4 vext3 LHS, <0,0,3,1>
+ 2689835045U, // <3,0,0,4>: Cost 3 vext3 LHS, <0,0,4,1>
+ 3698508206U, // <3,0,0,5>: Cost 4 vext2 <1,2,3,0>, <0,5,2,7>
+ 3763576887U, // <3,0,0,6>: Cost 4 vext3 LHS, <0,0,6,1>
+ 3667678434U, // <3,0,0,7>: Cost 4 vext1 <7,3,0,0>, <7,3,0,0>
+ 1616093258U, // <3,0,0,u>: Cost 2 vext3 LHS, <0,0,u,2>
+ 1490337894U, // <3,0,1,0>: Cost 2 vext1 <2,3,0,1>, LHS
+ 2685632602U, // <3,0,1,1>: Cost 3 vext3 LHS, <0,1,1,0>
+ 537706598U, // <3,0,1,2>: Cost 1 vext3 LHS, LHS
+ 2624766936U, // <3,0,1,3>: Cost 3 vext2 <1,2,3,0>, <1,3,1,3>
+ 1490341174U, // <3,0,1,4>: Cost 2 vext1 <2,3,0,1>, RHS
+ 2624767120U, // <3,0,1,5>: Cost 3 vext2 <1,2,3,0>, <1,5,3,7>
+ 2732966030U, // <3,0,1,6>: Cost 3 vext3 LHS, <0,1,6,7>
+ 2593944803U, // <3,0,1,7>: Cost 3 vext1 <7,3,0,1>, <7,3,0,1>
+ 537706652U, // <3,0,1,u>: Cost 1 vext3 LHS, LHS
+ 1611890852U, // <3,0,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
+ 2685632684U, // <3,0,2,1>: Cost 3 vext3 LHS, <0,2,1,1>
+ 2685632692U, // <3,0,2,2>: Cost 3 vext3 LHS, <0,2,2,0>
+ 2685632702U, // <3,0,2,3>: Cost 3 vext3 LHS, <0,2,3,1>
+ 1611890892U, // <3,0,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
+ 2732966102U, // <3,0,2,5>: Cost 3 vext3 LHS, <0,2,5,7>
+ 2624767930U, // <3,0,2,6>: Cost 3 vext2 <1,2,3,0>, <2,6,3,7>
+ 2685632744U, // <3,0,2,7>: Cost 3 vext3 LHS, <0,2,7,7>
+ 1611890924U, // <3,0,2,u>: Cost 2 vext3 LHS, <0,2,u,2>
+ 2624768150U, // <3,0,3,0>: Cost 3 vext2 <1,2,3,0>, <3,0,1,2>
+ 2685632764U, // <3,0,3,1>: Cost 3 vext3 LHS, <0,3,1,0>
+ 2685632774U, // <3,0,3,2>: Cost 3 vext3 LHS, <0,3,2,1>
+ 2624768412U, // <3,0,3,3>: Cost 3 vext2 <1,2,3,0>, <3,3,3,3>
+ 2624768514U, // <3,0,3,4>: Cost 3 vext2 <1,2,3,0>, <3,4,5,6>
+ 3702491714U, // <3,0,3,5>: Cost 4 vext2 <1,u,3,0>, <3,5,3,7>
+ 2624768632U, // <3,0,3,6>: Cost 3 vext2 <1,2,3,0>, <3,6,0,7>
+ 3702491843U, // <3,0,3,7>: Cost 4 vext2 <1,u,3,0>, <3,7,0,1>
+ 2686959934U, // <3,0,3,u>: Cost 3 vext3 <0,3,u,3>, <0,3,u,3>
+ 2689835336U, // <3,0,4,0>: Cost 3 vext3 LHS, <0,4,0,4>
+ 1611891026U, // <3,0,4,1>: Cost 2 vext3 LHS, <0,4,1,5>
+ 1611891036U, // <3,0,4,2>: Cost 2 vext3 LHS, <0,4,2,6>
+ 3763577184U, // <3,0,4,3>: Cost 4 vext3 LHS, <0,4,3,1>
+ 2689835374U, // <3,0,4,4>: Cost 3 vext3 LHS, <0,4,4,6>
+ 1551027510U, // <3,0,4,5>: Cost 2 vext2 <1,2,3,0>, RHS
+ 2666573172U, // <3,0,4,6>: Cost 3 vext2 <u,2,3,0>, <4,6,4,6>
+ 3667711206U, // <3,0,4,7>: Cost 4 vext1 <7,3,0,4>, <7,3,0,4>
+ 1616093586U, // <3,0,4,u>: Cost 2 vext3 LHS, <0,4,u,6>
+ 2685190556U, // <3,0,5,0>: Cost 3 vext3 LHS, <0,5,0,7>
+ 2666573520U, // <3,0,5,1>: Cost 3 vext2 <u,2,3,0>, <5,1,7,3>
+ 3040886886U, // <3,0,5,2>: Cost 3 vtrnl <3,4,5,6>, LHS
+ 3625912834U, // <3,0,5,3>: Cost 4 vext1 <0,3,0,5>, <3,4,5,6>
+ 2666573766U, // <3,0,5,4>: Cost 3 vext2 <u,2,3,0>, <5,4,7,6>
+ 2666573828U, // <3,0,5,5>: Cost 3 vext2 <u,2,3,0>, <5,5,5,5>
+ 2732966354U, // <3,0,5,6>: Cost 3 vext3 LHS, <0,5,6,7>
+ 2666573992U, // <3,0,5,7>: Cost 3 vext2 <u,2,3,0>, <5,7,5,7>
+ 3040886940U, // <3,0,5,u>: Cost 3 vtrnl <3,4,5,6>, LHS
+ 2685190637U, // <3,0,6,0>: Cost 3 vext3 LHS, <0,6,0,7>
+ 2732966390U, // <3,0,6,1>: Cost 3 vext3 LHS, <0,6,1,7>
+ 2689835519U, // <3,0,6,2>: Cost 3 vext3 LHS, <0,6,2,7>
+ 3667724438U, // <3,0,6,3>: Cost 4 vext1 <7,3,0,6>, <3,0,1,2>
+ 3763577355U, // <3,0,6,4>: Cost 4 vext3 LHS, <0,6,4,1>
+ 3806708243U, // <3,0,6,5>: Cost 4 vext3 LHS, <0,6,5,0>
+ 2666574648U, // <3,0,6,6>: Cost 3 vext2 <u,2,3,0>, <6,6,6,6>
+ 2657948520U, // <3,0,6,7>: Cost 3 vext2 <6,7,3,0>, <6,7,3,0>
+ 2689835573U, // <3,0,6,u>: Cost 3 vext3 LHS, <0,6,u,7>
+ 2666574842U, // <3,0,7,0>: Cost 3 vext2 <u,2,3,0>, <7,0,1,2>
+ 2685633095U, // <3,0,7,1>: Cost 3 vext3 LHS, <0,7,1,7>
+ 2660603052U, // <3,0,7,2>: Cost 3 vext2 <7,2,3,0>, <7,2,3,0>
+ 3643844997U, // <3,0,7,3>: Cost 4 vext1 <3,3,0,7>, <3,3,0,7>
+ 2666575206U, // <3,0,7,4>: Cost 3 vext2 <u,2,3,0>, <7,4,5,6>
+ 3655790391U, // <3,0,7,5>: Cost 4 vext1 <5,3,0,7>, <5,3,0,7>
+ 3731690968U, // <3,0,7,6>: Cost 4 vext2 <6,7,3,0>, <7,6,0,3>
+ 2666575468U, // <3,0,7,7>: Cost 3 vext2 <u,2,3,0>, <7,7,7,7>
+ 2664584850U, // <3,0,7,u>: Cost 3 vext2 <7,u,3,0>, <7,u,3,0>
+ 1616093834U, // <3,0,u,0>: Cost 2 vext3 LHS, <0,u,0,2>
+ 1611891346U, // <3,0,u,1>: Cost 2 vext3 LHS, <0,u,1,1>
+ 537707165U, // <3,0,u,2>: Cost 1 vext3 LHS, LHS
+ 2689835684U, // <3,0,u,3>: Cost 3 vext3 LHS, <0,u,3,1>
+ 1616093874U, // <3,0,u,4>: Cost 2 vext3 LHS, <0,u,4,6>
+ 1551030426U, // <3,0,u,5>: Cost 2 vext2 <1,2,3,0>, RHS
+ 2624772304U, // <3,0,u,6>: Cost 3 vext2 <1,2,3,0>, <u,6,3,7>
+ 2594002154U, // <3,0,u,7>: Cost 3 vext1 <7,3,0,u>, <7,3,0,u>
+ 537707219U, // <3,0,u,u>: Cost 1 vext3 LHS, LHS
+ 2552201318U, // <3,1,0,0>: Cost 3 vext1 <0,3,1,0>, LHS
+ 2618802278U, // <3,1,0,1>: Cost 3 vext2 <0,2,3,1>, LHS
+ 2618802366U, // <3,1,0,2>: Cost 3 vext2 <0,2,3,1>, <0,2,3,1>
+ 1611449078U, // <3,1,0,3>: Cost 2 vext3 LHS, <1,0,3,2>
+ 2552204598U, // <3,1,0,4>: Cost 3 vext1 <0,3,1,0>, RHS
+ 2732966663U, // <3,1,0,5>: Cost 3 vext3 LHS, <1,0,5,1>
+ 3906258396U, // <3,1,0,6>: Cost 4 vuzpr <2,3,0,1>, <2,0,4,6>
+ 3667752171U, // <3,1,0,7>: Cost 4 vext1 <7,3,1,0>, <7,3,1,0>
+ 1611891491U, // <3,1,0,u>: Cost 2 vext3 LHS, <1,0,u,2>
+ 2689835819U, // <3,1,1,0>: Cost 3 vext3 LHS, <1,1,0,1>
+ 1611449140U, // <3,1,1,1>: Cost 2 vext3 LHS, <1,1,1,1>
+ 2624775063U, // <3,1,1,2>: Cost 3 vext2 <1,2,3,1>, <1,2,3,1>
+ 1611891528U, // <3,1,1,3>: Cost 2 vext3 LHS, <1,1,3,3>
+ 2689835859U, // <3,1,1,4>: Cost 3 vext3 LHS, <1,1,4,5>
+ 2689835868U, // <3,1,1,5>: Cost 3 vext3 LHS, <1,1,5,5>
+ 3763577701U, // <3,1,1,6>: Cost 4 vext3 LHS, <1,1,6,5>
+ 3765273452U, // <3,1,1,7>: Cost 4 vext3 <1,1,7,3>, <1,1,7,3>
+ 1611891573U, // <3,1,1,u>: Cost 2 vext3 LHS, <1,1,u,3>
+ 2629420494U, // <3,1,2,0>: Cost 3 vext2 <2,0,3,1>, <2,0,3,1>
+ 2689835911U, // <3,1,2,1>: Cost 3 vext3 LHS, <1,2,1,3>
+ 2564163248U, // <3,1,2,2>: Cost 3 vext1 <2,3,1,2>, <2,3,1,2>
+ 1611449238U, // <3,1,2,3>: Cost 2 vext3 LHS, <1,2,3,0>
+ 2564164918U, // <3,1,2,4>: Cost 3 vext1 <2,3,1,2>, RHS
+ 2689835947U, // <3,1,2,5>: Cost 3 vext3 LHS, <1,2,5,3>
+ 3692545978U, // <3,1,2,6>: Cost 4 vext2 <0,2,3,1>, <2,6,3,7>
+ 2732966842U, // <3,1,2,7>: Cost 3 vext3 LHS, <1,2,7,0>
+ 1611891651U, // <3,1,2,u>: Cost 2 vext3 LHS, <1,2,u,0>
+ 1484456038U, // <3,1,3,0>: Cost 2 vext1 <1,3,1,3>, LHS
+ 1611891672U, // <3,1,3,1>: Cost 2 vext3 LHS, <1,3,1,3>
+ 2685633502U, // <3,1,3,2>: Cost 3 vext3 LHS, <1,3,2,0>
+ 2685633512U, // <3,1,3,3>: Cost 3 vext3 LHS, <1,3,3,1>
+ 1484459318U, // <3,1,3,4>: Cost 2 vext1 <1,3,1,3>, RHS
+ 1611891712U, // <3,1,3,5>: Cost 2 vext3 LHS, <1,3,5,7>
+ 2689836041U, // <3,1,3,6>: Cost 3 vext3 LHS, <1,3,6,7>
+ 2733409294U, // <3,1,3,7>: Cost 3 vext3 LHS, <1,3,7,3>
+ 1611891735U, // <3,1,3,u>: Cost 2 vext3 LHS, <1,3,u,3>
+ 2552234086U, // <3,1,4,0>: Cost 3 vext1 <0,3,1,4>, LHS
+ 2732966955U, // <3,1,4,1>: Cost 3 vext3 LHS, <1,4,1,5>
+ 2732966964U, // <3,1,4,2>: Cost 3 vext3 LHS, <1,4,2,5>
+ 2685633597U, // <3,1,4,3>: Cost 3 vext3 LHS, <1,4,3,5>
+ 2552237366U, // <3,1,4,4>: Cost 3 vext1 <0,3,1,4>, RHS
+ 2618805558U, // <3,1,4,5>: Cost 3 vext2 <0,2,3,1>, RHS
+ 2769472822U, // <3,1,4,6>: Cost 3 vuzpl <3,0,1,2>, RHS
+ 3667784943U, // <3,1,4,7>: Cost 4 vext1 <7,3,1,4>, <7,3,1,4>
+ 2685633642U, // <3,1,4,u>: Cost 3 vext3 LHS, <1,4,u,5>
+ 2689836143U, // <3,1,5,0>: Cost 3 vext3 LHS, <1,5,0,1>
+ 2564187280U, // <3,1,5,1>: Cost 3 vext1 <2,3,1,5>, <1,5,3,7>
+ 2564187827U, // <3,1,5,2>: Cost 3 vext1 <2,3,1,5>, <2,3,1,5>
+ 1611891856U, // <3,1,5,3>: Cost 2 vext3 LHS, <1,5,3,7>
+ 2689836183U, // <3,1,5,4>: Cost 3 vext3 LHS, <1,5,4,5>
+ 3759375522U, // <3,1,5,5>: Cost 4 vext3 LHS, <1,5,5,7>
+ 3720417378U, // <3,1,5,6>: Cost 4 vext2 <4,u,3,1>, <5,6,7,0>
+ 2832518454U, // <3,1,5,7>: Cost 3 vuzpr <2,3,0,1>, RHS
+ 1611891901U, // <3,1,5,u>: Cost 2 vext3 LHS, <1,5,u,7>
+ 3763578048U, // <3,1,6,0>: Cost 4 vext3 LHS, <1,6,0,1>
+ 2689836239U, // <3,1,6,1>: Cost 3 vext3 LHS, <1,6,1,7>
+ 2732967128U, // <3,1,6,2>: Cost 3 vext3 LHS, <1,6,2,7>
+ 2685633761U, // <3,1,6,3>: Cost 3 vext3 LHS, <1,6,3,7>
+ 3763578088U, // <3,1,6,4>: Cost 4 vext3 LHS, <1,6,4,5>
+ 2689836275U, // <3,1,6,5>: Cost 3 vext3 LHS, <1,6,5,7>
+ 3763578108U, // <3,1,6,6>: Cost 4 vext3 LHS, <1,6,6,7>
+ 2732967166U, // <3,1,6,7>: Cost 3 vext3 LHS, <1,6,7,0>
+ 2685633806U, // <3,1,6,u>: Cost 3 vext3 LHS, <1,6,u,7>
+ 3631972454U, // <3,1,7,0>: Cost 4 vext1 <1,3,1,7>, LHS
+ 2659947612U, // <3,1,7,1>: Cost 3 vext2 <7,1,3,1>, <7,1,3,1>
+ 4036102294U, // <3,1,7,2>: Cost 4 vzipr <1,5,3,7>, <3,0,1,2>
+ 3095396454U, // <3,1,7,3>: Cost 3 vtrnr <1,3,5,7>, LHS
+ 3631975734U, // <3,1,7,4>: Cost 4 vext1 <1,3,1,7>, RHS
+ 2222982144U, // <3,1,7,5>: Cost 3 vrev <1,3,5,7>
+ 3296797705U, // <3,1,7,6>: Cost 4 vrev <1,3,6,7>
+ 3720418924U, // <3,1,7,7>: Cost 4 vext2 <4,u,3,1>, <7,7,7,7>
+ 3095396459U, // <3,1,7,u>: Cost 3 vtrnr <1,3,5,7>, LHS
+ 1484496998U, // <3,1,u,0>: Cost 2 vext1 <1,3,1,u>, LHS
+ 1611892077U, // <3,1,u,1>: Cost 2 vext3 LHS, <1,u,1,3>
+ 2685633907U, // <3,1,u,2>: Cost 3 vext3 LHS, <1,u,2,0>
+ 1611892092U, // <3,1,u,3>: Cost 2 vext3 LHS, <1,u,3,0>
+ 1484500278U, // <3,1,u,4>: Cost 2 vext1 <1,3,1,u>, RHS
+ 1611892117U, // <3,1,u,5>: Cost 2 vext3 LHS, <1,u,5,7>
+ 2685633950U, // <3,1,u,6>: Cost 3 vext3 LHS, <1,u,6,7>
+ 2832518697U, // <3,1,u,7>: Cost 3 vuzpr <2,3,0,1>, RHS
+ 1611892140U, // <3,1,u,u>: Cost 2 vext3 LHS, <1,u,u,3>
+ 2623455232U, // <3,2,0,0>: Cost 3 vext2 <1,0,3,2>, <0,0,0,0>
+ 1549713510U, // <3,2,0,1>: Cost 2 vext2 <1,0,3,2>, LHS
+ 2689836484U, // <3,2,0,2>: Cost 3 vext3 LHS, <2,0,2,0>
+ 2685633997U, // <3,2,0,3>: Cost 3 vext3 LHS, <2,0,3,0>
+ 2623455570U, // <3,2,0,4>: Cost 3 vext2 <1,0,3,2>, <0,4,1,5>
+ 2732967398U, // <3,2,0,5>: Cost 3 vext3 LHS, <2,0,5,7>
+ 2689836524U, // <3,2,0,6>: Cost 3 vext3 LHS, <2,0,6,4>
+ 2229044964U, // <3,2,0,7>: Cost 3 vrev <2,3,7,0>
+ 1549714077U, // <3,2,0,u>: Cost 2 vext2 <1,0,3,2>, LHS
+ 1549714166U, // <3,2,1,0>: Cost 2 vext2 <1,0,3,2>, <1,0,3,2>
+ 2623456052U, // <3,2,1,1>: Cost 3 vext2 <1,0,3,2>, <1,1,1,1>
+ 2623456150U, // <3,2,1,2>: Cost 3 vext2 <1,0,3,2>, <1,2,3,0>
+ 2685634079U, // <3,2,1,3>: Cost 3 vext3 LHS, <2,1,3,1>
+ 2552286518U, // <3,2,1,4>: Cost 3 vext1 <0,3,2,1>, RHS
+ 2623456400U, // <3,2,1,5>: Cost 3 vext2 <1,0,3,2>, <1,5,3,7>
+ 2689836604U, // <3,2,1,6>: Cost 3 vext3 LHS, <2,1,6,3>
+ 3667834101U, // <3,2,1,7>: Cost 4 vext1 <7,3,2,1>, <7,3,2,1>
+ 1155385070U, // <3,2,1,u>: Cost 2 vrev <2,3,u,1>
+ 2689836629U, // <3,2,2,0>: Cost 3 vext3 LHS, <2,2,0,1>
+ 2689836640U, // <3,2,2,1>: Cost 3 vext3 LHS, <2,2,1,3>
+ 1611449960U, // <3,2,2,2>: Cost 2 vext3 LHS, <2,2,2,2>
+ 1611892338U, // <3,2,2,3>: Cost 2 vext3 LHS, <2,2,3,3>
+ 2689836669U, // <3,2,2,4>: Cost 3 vext3 LHS, <2,2,4,5>
+ 2689836680U, // <3,2,2,5>: Cost 3 vext3 LHS, <2,2,5,7>
+ 2689836688U, // <3,2,2,6>: Cost 3 vext3 LHS, <2,2,6,6>
+ 3763578518U, // <3,2,2,7>: Cost 4 vext3 LHS, <2,2,7,3>
+ 1611892383U, // <3,2,2,u>: Cost 2 vext3 LHS, <2,2,u,3>
+ 1611450022U, // <3,2,3,0>: Cost 2 vext3 LHS, <2,3,0,1>
+ 2685191854U, // <3,2,3,1>: Cost 3 vext3 LHS, <2,3,1,0>
+ 2685191865U, // <3,2,3,2>: Cost 3 vext3 LHS, <2,3,2,2>
+ 2685191875U, // <3,2,3,3>: Cost 3 vext3 LHS, <2,3,3,3>
+ 1611450062U, // <3,2,3,4>: Cost 2 vext3 LHS, <2,3,4,5>
+ 2732967635U, // <3,2,3,5>: Cost 3 vext3 LHS, <2,3,5,1>
+ 2732967645U, // <3,2,3,6>: Cost 3 vext3 LHS, <2,3,6,2>
+ 2732967652U, // <3,2,3,7>: Cost 3 vext3 LHS, <2,3,7,0>
+ 1611450094U, // <3,2,3,u>: Cost 2 vext3 LHS, <2,3,u,1>
+ 2558279782U, // <3,2,4,0>: Cost 3 vext1 <1,3,2,4>, LHS
+ 2558280602U, // <3,2,4,1>: Cost 3 vext1 <1,3,2,4>, <1,2,3,4>
+ 2732967692U, // <3,2,4,2>: Cost 3 vext3 LHS, <2,4,2,4>
+ 2685634326U, // <3,2,4,3>: Cost 3 vext3 LHS, <2,4,3,5>
+ 2558283062U, // <3,2,4,4>: Cost 3 vext1 <1,3,2,4>, RHS
+ 1549716790U, // <3,2,4,5>: Cost 2 vext2 <1,0,3,2>, RHS
+ 2689836844U, // <3,2,4,6>: Cost 3 vext3 LHS, <2,4,6,0>
+ 2229077736U, // <3,2,4,7>: Cost 3 vrev <2,3,7,4>
+ 1549717033U, // <3,2,4,u>: Cost 2 vext2 <1,0,3,2>, RHS
+ 2552316006U, // <3,2,5,0>: Cost 3 vext1 <0,3,2,5>, LHS
+ 2228643507U, // <3,2,5,1>: Cost 3 vrev <2,3,1,5>
+ 2689836896U, // <3,2,5,2>: Cost 3 vext3 LHS, <2,5,2,7>
+ 2685634408U, // <3,2,5,3>: Cost 3 vext3 LHS, <2,5,3,6>
+ 1155122894U, // <3,2,5,4>: Cost 2 vrev <2,3,4,5>
+ 2665263108U, // <3,2,5,5>: Cost 3 vext2 <u,0,3,2>, <5,5,5,5>
+ 2689836932U, // <3,2,5,6>: Cost 3 vext3 LHS, <2,5,6,7>
+ 2665263272U, // <3,2,5,7>: Cost 3 vext2 <u,0,3,2>, <5,7,5,7>
+ 1155417842U, // <3,2,5,u>: Cost 2 vrev <2,3,u,5>
+ 2689836953U, // <3,2,6,0>: Cost 3 vext3 LHS, <2,6,0,1>
+ 2689836964U, // <3,2,6,1>: Cost 3 vext3 LHS, <2,6,1,3>
+ 2689836976U, // <3,2,6,2>: Cost 3 vext3 LHS, <2,6,2,6>
+ 1611892666U, // <3,2,6,3>: Cost 2 vext3 LHS, <2,6,3,7>
+ 2689836993U, // <3,2,6,4>: Cost 3 vext3 LHS, <2,6,4,5>
+ 2689837004U, // <3,2,6,5>: Cost 3 vext3 LHS, <2,6,5,7>
+ 2689837013U, // <3,2,6,6>: Cost 3 vext3 LHS, <2,6,6,7>
+ 2665263950U, // <3,2,6,7>: Cost 3 vext2 <u,0,3,2>, <6,7,0,1>
+ 1611892711U, // <3,2,6,u>: Cost 2 vext3 LHS, <2,6,u,7>
+ 2665264122U, // <3,2,7,0>: Cost 3 vext2 <u,0,3,2>, <7,0,1,2>
+ 2623460419U, // <3,2,7,1>: Cost 3 vext2 <1,0,3,2>, <7,1,0,3>
+ 4169138340U, // <3,2,7,2>: Cost 4 vtrnr <1,3,5,7>, <0,2,0,2>
+ 2962358374U, // <3,2,7,3>: Cost 3 vzipr <1,5,3,7>, LHS
+ 2665264486U, // <3,2,7,4>: Cost 3 vext2 <u,0,3,2>, <7,4,5,6>
+ 2228954841U, // <3,2,7,5>: Cost 3 vrev <2,3,5,7>
+ 2229028578U, // <3,2,7,6>: Cost 3 vrev <2,3,6,7>
+ 2665264748U, // <3,2,7,7>: Cost 3 vext2 <u,0,3,2>, <7,7,7,7>
+ 2962358379U, // <3,2,7,u>: Cost 3 vzipr <1,5,3,7>, LHS
+ 1611892795U, // <3,2,u,0>: Cost 2 vext3 LHS, <2,u,0,1>
+ 1549719342U, // <3,2,u,1>: Cost 2 vext2 <1,0,3,2>, LHS
+ 1611449960U, // <3,2,u,2>: Cost 2 vext3 LHS, <2,2,2,2>
+ 1611892824U, // <3,2,u,3>: Cost 2 vext3 LHS, <2,u,3,3>
+ 1611892835U, // <3,2,u,4>: Cost 2 vext3 LHS, <2,u,4,5>
+ 1549719706U, // <3,2,u,5>: Cost 2 vext2 <1,0,3,2>, RHS
+ 2689837168U, // <3,2,u,6>: Cost 3 vext3 LHS, <2,u,6,0>
+ 2665265408U, // <3,2,u,7>: Cost 3 vext2 <u,0,3,2>, <u,7,0,1>
+ 1611892867U, // <3,2,u,u>: Cost 2 vext3 LHS, <2,u,u,1>
+ 2685192331U, // <3,3,0,0>: Cost 3 vext3 LHS, <3,0,0,0>
+ 1611450518U, // <3,3,0,1>: Cost 2 vext3 LHS, <3,0,1,2>
+ 2685634717U, // <3,3,0,2>: Cost 3 vext3 LHS, <3,0,2,0>
+ 2564294806U, // <3,3,0,3>: Cost 3 vext1 <2,3,3,0>, <3,0,1,2>
+ 2685634736U, // <3,3,0,4>: Cost 3 vext3 LHS, <3,0,4,1>
+ 2732968122U, // <3,3,0,5>: Cost 3 vext3 LHS, <3,0,5,2>
+ 3763579075U, // <3,3,0,6>: Cost 4 vext3 LHS, <3,0,6,2>
+ 4034053264U, // <3,3,0,7>: Cost 4 vzipr <1,2,3,0>, <1,5,3,7>
+ 1611450581U, // <3,3,0,u>: Cost 2 vext3 LHS, <3,0,u,2>
+ 2685192415U, // <3,3,1,0>: Cost 3 vext3 LHS, <3,1,0,3>
+ 1550385992U, // <3,3,1,1>: Cost 2 vext2 <1,1,3,3>, <1,1,3,3>
+ 2685192433U, // <3,3,1,2>: Cost 3 vext3 LHS, <3,1,2,3>
+ 2685634808U, // <3,3,1,3>: Cost 3 vext3 LHS, <3,1,3,1>
+ 2558332214U, // <3,3,1,4>: Cost 3 vext1 <1,3,3,1>, RHS
+ 2685634828U, // <3,3,1,5>: Cost 3 vext3 LHS, <3,1,5,3>
+ 3759376661U, // <3,3,1,6>: Cost 4 vext3 LHS, <3,1,6,3>
+ 2703477022U, // <3,3,1,7>: Cost 3 vext3 <3,1,7,3>, <3,1,7,3>
+ 1555031423U, // <3,3,1,u>: Cost 2 vext2 <1,u,3,3>, <1,u,3,3>
+ 2564309094U, // <3,3,2,0>: Cost 3 vext1 <2,3,3,2>, LHS
+ 2630100513U, // <3,3,2,1>: Cost 3 vext2 <2,1,3,3>, <2,1,3,3>
+ 1557022322U, // <3,3,2,2>: Cost 2 vext2 <2,2,3,3>, <2,2,3,3>
+ 2685192520U, // <3,3,2,3>: Cost 3 vext3 LHS, <3,2,3,0>
+ 2564312374U, // <3,3,2,4>: Cost 3 vext1 <2,3,3,2>, RHS
+ 2732968286U, // <3,3,2,5>: Cost 3 vext3 LHS, <3,2,5,4>
+ 2685634918U, // <3,3,2,6>: Cost 3 vext3 LHS, <3,2,6,3>
+ 2704140655U, // <3,3,2,7>: Cost 3 vext3 <3,2,7,3>, <3,2,7,3>
+ 1561004120U, // <3,3,2,u>: Cost 2 vext2 <2,u,3,3>, <2,u,3,3>
+ 1496547430U, // <3,3,3,0>: Cost 2 vext1 <3,3,3,3>, LHS
+ 2624129256U, // <3,3,3,1>: Cost 3 vext2 <1,1,3,3>, <3,1,1,3>
+ 2630764866U, // <3,3,3,2>: Cost 3 vext2 <2,2,3,3>, <3,2,2,3>
+ 336380006U, // <3,3,3,3>: Cost 1 vdup3 LHS
+ 1496550710U, // <3,3,3,4>: Cost 2 vext1 <3,3,3,3>, RHS
+ 2732968368U, // <3,3,3,5>: Cost 3 vext3 LHS, <3,3,5,5>
+ 2624129683U, // <3,3,3,6>: Cost 3 vext2 <1,1,3,3>, <3,6,3,7>
+ 2594182400U, // <3,3,3,7>: Cost 3 vext1 <7,3,3,3>, <7,3,3,3>
+ 336380006U, // <3,3,3,u>: Cost 1 vdup3 LHS
+ 2558353510U, // <3,3,4,0>: Cost 3 vext1 <1,3,3,4>, LHS
+ 2558354411U, // <3,3,4,1>: Cost 3 vext1 <1,3,3,4>, <1,3,3,4>
+ 2564327108U, // <3,3,4,2>: Cost 3 vext1 <2,3,3,4>, <2,3,3,4>
+ 2564327938U, // <3,3,4,3>: Cost 3 vext1 <2,3,3,4>, <3,4,5,6>
+ 2960343962U, // <3,3,4,4>: Cost 3 vzipr <1,2,3,4>, <1,2,3,4>
+ 1611893250U, // <3,3,4,5>: Cost 2 vext3 LHS, <3,4,5,6>
+ 2771619126U, // <3,3,4,6>: Cost 3 vuzpl <3,3,3,3>, RHS
+ 4034086032U, // <3,3,4,7>: Cost 4 vzipr <1,2,3,4>, <1,5,3,7>
+ 1611893277U, // <3,3,4,u>: Cost 2 vext3 LHS, <3,4,u,6>
+ 2558361702U, // <3,3,5,0>: Cost 3 vext1 <1,3,3,5>, LHS
+ 2558362604U, // <3,3,5,1>: Cost 3 vext1 <1,3,3,5>, <1,3,3,5>
+ 2558363342U, // <3,3,5,2>: Cost 3 vext1 <1,3,3,5>, <2,3,4,5>
+ 2732968512U, // <3,3,5,3>: Cost 3 vext3 LHS, <3,5,3,5>
+ 2558364982U, // <3,3,5,4>: Cost 3 vext1 <1,3,3,5>, RHS
+ 3101279950U, // <3,3,5,5>: Cost 3 vtrnr <2,3,4,5>, <2,3,4,5>
+ 2665934946U, // <3,3,5,6>: Cost 3 vext2 <u,1,3,3>, <5,6,7,0>
+ 2826636598U, // <3,3,5,7>: Cost 3 vuzpr <1,3,1,3>, RHS
+ 2826636599U, // <3,3,5,u>: Cost 3 vuzpr <1,3,1,3>, RHS
+ 2732968568U, // <3,3,6,0>: Cost 3 vext3 LHS, <3,6,0,7>
+ 3763579521U, // <3,3,6,1>: Cost 4 vext3 LHS, <3,6,1,7>
+ 2732968586U, // <3,3,6,2>: Cost 3 vext3 LHS, <3,6,2,7>
+ 2732968595U, // <3,3,6,3>: Cost 3 vext3 LHS, <3,6,3,7>
+ 2732968604U, // <3,3,6,4>: Cost 3 vext3 LHS, <3,6,4,7>
+ 3763579557U, // <3,3,6,5>: Cost 4 vext3 LHS, <3,6,5,7>
+ 2732968621U, // <3,3,6,6>: Cost 3 vext3 LHS, <3,6,6,6>
+ 2657973099U, // <3,3,6,7>: Cost 3 vext2 <6,7,3,3>, <6,7,3,3>
+ 2658636732U, // <3,3,6,u>: Cost 3 vext2 <6,u,3,3>, <6,u,3,3>
+ 2558378086U, // <3,3,7,0>: Cost 3 vext1 <1,3,3,7>, LHS
+ 2558378990U, // <3,3,7,1>: Cost 3 vext1 <1,3,3,7>, <1,3,3,7>
+ 2564351687U, // <3,3,7,2>: Cost 3 vext1 <2,3,3,7>, <2,3,3,7>
+ 2661291264U, // <3,3,7,3>: Cost 3 vext2 <7,3,3,3>, <7,3,3,3>
+ 2558381366U, // <3,3,7,4>: Cost 3 vext1 <1,3,3,7>, RHS
+ 2732968694U, // <3,3,7,5>: Cost 3 vext3 LHS, <3,7,5,7>
+ 3781126907U, // <3,3,7,6>: Cost 4 vext3 <3,7,6,3>, <3,7,6,3>
+ 3095397376U, // <3,3,7,7>: Cost 3 vtrnr <1,3,5,7>, <1,3,5,7>
+ 2558383918U, // <3,3,7,u>: Cost 3 vext1 <1,3,3,7>, LHS
+ 1496547430U, // <3,3,u,0>: Cost 2 vext1 <3,3,3,3>, LHS
+ 1611893534U, // <3,3,u,1>: Cost 2 vext3 LHS, <3,u,1,2>
+ 1592858504U, // <3,3,u,2>: Cost 2 vext2 <u,2,3,3>, <u,2,3,3>
+ 336380006U, // <3,3,u,3>: Cost 1 vdup3 LHS
+ 1496550710U, // <3,3,u,4>: Cost 2 vext1 <3,3,3,3>, RHS
+ 1611893574U, // <3,3,u,5>: Cost 2 vext3 LHS, <3,u,5,6>
+ 2690280268U, // <3,3,u,6>: Cost 3 vext3 LHS, <3,u,6,3>
+ 2826636841U, // <3,3,u,7>: Cost 3 vuzpr <1,3,1,3>, RHS
+ 336380006U, // <3,3,u,u>: Cost 1 vdup3 LHS
+ 2624798720U, // <3,4,0,0>: Cost 3 vext2 <1,2,3,4>, <0,0,0,0>
+ 1551056998U, // <3,4,0,1>: Cost 2 vext2 <1,2,3,4>, LHS
+ 2624798884U, // <3,4,0,2>: Cost 3 vext2 <1,2,3,4>, <0,2,0,2>
+ 3693232384U, // <3,4,0,3>: Cost 4 vext2 <0,3,3,4>, <0,3,1,4>
+ 2624799058U, // <3,4,0,4>: Cost 3 vext2 <1,2,3,4>, <0,4,1,5>
+ 1659227026U, // <3,4,0,5>: Cost 2 vext3 LHS, <4,0,5,1>
+ 1659227036U, // <3,4,0,6>: Cost 2 vext3 LHS, <4,0,6,2>
+ 3667973382U, // <3,4,0,7>: Cost 4 vext1 <7,3,4,0>, <7,3,4,0>
+ 1551057565U, // <3,4,0,u>: Cost 2 vext2 <1,2,3,4>, LHS
+ 2624799478U, // <3,4,1,0>: Cost 3 vext2 <1,2,3,4>, <1,0,3,2>
+ 2624799540U, // <3,4,1,1>: Cost 3 vext2 <1,2,3,4>, <1,1,1,1>
+ 1551057818U, // <3,4,1,2>: Cost 2 vext2 <1,2,3,4>, <1,2,3,4>
+ 2624799704U, // <3,4,1,3>: Cost 3 vext2 <1,2,3,4>, <1,3,1,3>
+ 2564377910U, // <3,4,1,4>: Cost 3 vext1 <2,3,4,1>, RHS
+ 2689838050U, // <3,4,1,5>: Cost 3 vext3 LHS, <4,1,5,0>
+ 2689838062U, // <3,4,1,6>: Cost 3 vext3 LHS, <4,1,6,3>
+ 2628117807U, // <3,4,1,7>: Cost 3 vext2 <1,7,3,4>, <1,7,3,4>
+ 1555039616U, // <3,4,1,u>: Cost 2 vext2 <1,u,3,4>, <1,u,3,4>
+ 3626180710U, // <3,4,2,0>: Cost 4 vext1 <0,3,4,2>, LHS
+ 2624800298U, // <3,4,2,1>: Cost 3 vext2 <1,2,3,4>, <2,1,4,3>
+ 2624800360U, // <3,4,2,2>: Cost 3 vext2 <1,2,3,4>, <2,2,2,2>
+ 2624800422U, // <3,4,2,3>: Cost 3 vext2 <1,2,3,4>, <2,3,0,1>
+ 2624800514U, // <3,4,2,4>: Cost 3 vext2 <1,2,3,4>, <2,4,1,3>
+ 2709965878U, // <3,4,2,5>: Cost 3 vext3 <4,2,5,3>, <4,2,5,3>
+ 2689838140U, // <3,4,2,6>: Cost 3 vext3 LHS, <4,2,6,0>
+ 2634090504U, // <3,4,2,7>: Cost 3 vext2 <2,7,3,4>, <2,7,3,4>
+ 2689838158U, // <3,4,2,u>: Cost 3 vext3 LHS, <4,2,u,0>
+ 2624800918U, // <3,4,3,0>: Cost 3 vext2 <1,2,3,4>, <3,0,1,2>
+ 2636081403U, // <3,4,3,1>: Cost 3 vext2 <3,1,3,4>, <3,1,3,4>
+ 2636745036U, // <3,4,3,2>: Cost 3 vext2 <3,2,3,4>, <3,2,3,4>
+ 2624801180U, // <3,4,3,3>: Cost 3 vext2 <1,2,3,4>, <3,3,3,3>
+ 2624801232U, // <3,4,3,4>: Cost 3 vext2 <1,2,3,4>, <3,4,0,1>
+ 2905836854U, // <3,4,3,5>: Cost 3 vzipl <3,3,3,3>, RHS
+ 3040054582U, // <3,4,3,6>: Cost 3 vtrnl <3,3,3,3>, RHS
+ 3702524611U, // <3,4,3,7>: Cost 4 vext2 <1,u,3,4>, <3,7,0,1>
+ 2624801566U, // <3,4,3,u>: Cost 3 vext2 <1,2,3,4>, <3,u,1,2>
+ 2564399206U, // <3,4,4,0>: Cost 3 vext1 <2,3,4,4>, LHS
+ 2564400026U, // <3,4,4,1>: Cost 3 vext1 <2,3,4,4>, <1,2,3,4>
+ 2564400845U, // <3,4,4,2>: Cost 3 vext1 <2,3,4,4>, <2,3,4,4>
+ 2570373542U, // <3,4,4,3>: Cost 3 vext1 <3,3,4,4>, <3,3,4,4>
+ 1659227344U, // <3,4,4,4>: Cost 2 vext3 LHS, <4,4,4,4>
+ 1551060278U, // <3,4,4,5>: Cost 2 vext2 <1,2,3,4>, RHS
+ 1659227364U, // <3,4,4,6>: Cost 2 vext3 LHS, <4,4,6,6>
+ 3668006154U, // <3,4,4,7>: Cost 4 vext1 <7,3,4,4>, <7,3,4,4>
+ 1551060521U, // <3,4,4,u>: Cost 2 vext2 <1,2,3,4>, RHS
+ 1490665574U, // <3,4,5,0>: Cost 2 vext1 <2,3,4,5>, LHS
+ 2689838341U, // <3,4,5,1>: Cost 3 vext3 LHS, <4,5,1,3>
+ 1490667214U, // <3,4,5,2>: Cost 2 vext1 <2,3,4,5>, <2,3,4,5>
+ 2564409494U, // <3,4,5,3>: Cost 3 vext1 <2,3,4,5>, <3,0,1,2>
+ 1490668854U, // <3,4,5,4>: Cost 2 vext1 <2,3,4,5>, RHS
+ 2689838381U, // <3,4,5,5>: Cost 3 vext3 LHS, <4,5,5,7>
+ 537709878U, // <3,4,5,6>: Cost 1 vext3 LHS, RHS
+ 2594272523U, // <3,4,5,7>: Cost 3 vext1 <7,3,4,5>, <7,3,4,5>
+ 537709896U, // <3,4,5,u>: Cost 1 vext3 LHS, RHS
+ 2689838411U, // <3,4,6,0>: Cost 3 vext3 LHS, <4,6,0,1>
+ 2558444534U, // <3,4,6,1>: Cost 3 vext1 <1,3,4,6>, <1,3,4,6>
+ 2666607098U, // <3,4,6,2>: Cost 3 vext2 <u,2,3,4>, <6,2,7,3>
+ 2558446082U, // <3,4,6,3>: Cost 3 vext1 <1,3,4,6>, <3,4,5,6>
+ 1659227508U, // <3,4,6,4>: Cost 2 vext3 LHS, <4,6,4,6>
+ 2689838462U, // <3,4,6,5>: Cost 3 vext3 LHS, <4,6,5,7>
+ 2689838471U, // <3,4,6,6>: Cost 3 vext3 LHS, <4,6,6,7>
+ 2657981292U, // <3,4,6,7>: Cost 3 vext2 <6,7,3,4>, <6,7,3,4>
+ 1659227540U, // <3,4,6,u>: Cost 2 vext3 LHS, <4,6,u,2>
+ 2666607610U, // <3,4,7,0>: Cost 3 vext2 <u,2,3,4>, <7,0,1,2>
+ 3702527072U, // <3,4,7,1>: Cost 4 vext2 <1,u,3,4>, <7,1,3,5>
+ 2660635824U, // <3,4,7,2>: Cost 3 vext2 <7,2,3,4>, <7,2,3,4>
+ 3644139945U, // <3,4,7,3>: Cost 4 vext1 <3,3,4,7>, <3,3,4,7>
+ 2666607974U, // <3,4,7,4>: Cost 3 vext2 <u,2,3,4>, <7,4,5,6>
+ 2732969416U, // <3,4,7,5>: Cost 3 vext3 LHS, <4,7,5,0>
+ 2732969425U, // <3,4,7,6>: Cost 3 vext3 LHS, <4,7,6,0>
+ 2666608236U, // <3,4,7,7>: Cost 3 vext2 <u,2,3,4>, <7,7,7,7>
+ 2664617622U, // <3,4,7,u>: Cost 3 vext2 <7,u,3,4>, <7,u,3,4>
+ 1490690150U, // <3,4,u,0>: Cost 2 vext1 <2,3,4,u>, LHS
+ 1551062830U, // <3,4,u,1>: Cost 2 vext2 <1,2,3,4>, LHS
+ 1490691793U, // <3,4,u,2>: Cost 2 vext1 <2,3,4,u>, <2,3,4,u>
+ 2624804796U, // <3,4,u,3>: Cost 3 vext2 <1,2,3,4>, <u,3,0,1>
+ 1490693430U, // <3,4,u,4>: Cost 2 vext1 <2,3,4,u>, RHS
+ 1551063194U, // <3,4,u,5>: Cost 2 vext2 <1,2,3,4>, RHS
+ 537710121U, // <3,4,u,6>: Cost 1 vext3 LHS, RHS
+ 2594297102U, // <3,4,u,7>: Cost 3 vext1 <7,3,4,u>, <7,3,4,u>
+ 537710139U, // <3,4,u,u>: Cost 1 vext3 LHS, RHS
+ 3692576768U, // <3,5,0,0>: Cost 4 vext2 <0,2,3,5>, <0,0,0,0>
+ 2618835046U, // <3,5,0,1>: Cost 3 vext2 <0,2,3,5>, LHS
+ 2618835138U, // <3,5,0,2>: Cost 3 vext2 <0,2,3,5>, <0,2,3,5>
+ 3692577024U, // <3,5,0,3>: Cost 4 vext2 <0,2,3,5>, <0,3,1,4>
+ 2689838690U, // <3,5,0,4>: Cost 3 vext3 LHS, <5,0,4,1>
+ 2732969579U, // <3,5,0,5>: Cost 3 vext3 LHS, <5,0,5,1>
+ 2732969588U, // <3,5,0,6>: Cost 3 vext3 LHS, <5,0,6,1>
+ 2246963055U, // <3,5,0,7>: Cost 3 vrev <5,3,7,0>
+ 2618835613U, // <3,5,0,u>: Cost 3 vext2 <0,2,3,5>, LHS
+ 2594308198U, // <3,5,1,0>: Cost 3 vext1 <7,3,5,1>, LHS
+ 3692577588U, // <3,5,1,1>: Cost 4 vext2 <0,2,3,5>, <1,1,1,1>
+ 2624807835U, // <3,5,1,2>: Cost 3 vext2 <1,2,3,5>, <1,2,3,5>
+ 2625471468U, // <3,5,1,3>: Cost 3 vext2 <1,3,3,5>, <1,3,3,5>
+ 2626135101U, // <3,5,1,4>: Cost 3 vext2 <1,4,3,5>, <1,4,3,5>
+ 2594311888U, // <3,5,1,5>: Cost 3 vext1 <7,3,5,1>, <5,1,7,3>
+ 3699877107U, // <3,5,1,6>: Cost 4 vext2 <1,4,3,5>, <1,6,5,7>
+ 1641680592U, // <3,5,1,7>: Cost 2 vext3 <5,1,7,3>, <5,1,7,3>
+ 1641754329U, // <3,5,1,u>: Cost 2 vext3 <5,1,u,3>, <5,1,u,3>
+ 3692578274U, // <3,5,2,0>: Cost 4 vext2 <0,2,3,5>, <2,0,5,3>
+ 2630116899U, // <3,5,2,1>: Cost 3 vext2 <2,1,3,5>, <2,1,3,5>
+ 3692578408U, // <3,5,2,2>: Cost 4 vext2 <0,2,3,5>, <2,2,2,2>
+ 2625472206U, // <3,5,2,3>: Cost 3 vext2 <1,3,3,5>, <2,3,4,5>
+ 2632107798U, // <3,5,2,4>: Cost 3 vext2 <2,4,3,5>, <2,4,3,5>
+ 2715938575U, // <3,5,2,5>: Cost 3 vext3 <5,2,5,3>, <5,2,5,3>
+ 3692578746U, // <3,5,2,6>: Cost 4 vext2 <0,2,3,5>, <2,6,3,7>
+ 2716086049U, // <3,5,2,7>: Cost 3 vext3 <5,2,7,3>, <5,2,7,3>
+ 2634762330U, // <3,5,2,u>: Cost 3 vext2 <2,u,3,5>, <2,u,3,5>
+ 3692578966U, // <3,5,3,0>: Cost 4 vext2 <0,2,3,5>, <3,0,1,2>
+ 2636089596U, // <3,5,3,1>: Cost 3 vext2 <3,1,3,5>, <3,1,3,5>
+ 3699214668U, // <3,5,3,2>: Cost 4 vext2 <1,3,3,5>, <3,2,3,4>
+ 2638080412U, // <3,5,3,3>: Cost 3 vext2 <3,4,3,5>, <3,3,3,3>
+ 2618837506U, // <3,5,3,4>: Cost 3 vext2 <0,2,3,5>, <3,4,5,6>
+ 2832844494U, // <3,5,3,5>: Cost 3 vuzpr <2,3,4,5>, <2,3,4,5>
+ 4033415682U, // <3,5,3,6>: Cost 4 vzipr <1,1,3,3>, <3,4,5,6>
+ 3095072054U, // <3,5,3,7>: Cost 3 vtrnr <1,3,1,3>, RHS
+ 3095072055U, // <3,5,3,u>: Cost 3 vtrnr <1,3,1,3>, RHS
+ 2600304742U, // <3,5,4,0>: Cost 3 vext1 <u,3,5,4>, LHS
+ 3763580815U, // <3,5,4,1>: Cost 4 vext3 LHS, <5,4,1,5>
+ 2564474582U, // <3,5,4,2>: Cost 3 vext1 <2,3,5,4>, <2,3,5,4>
+ 3699879044U, // <3,5,4,3>: Cost 4 vext2 <1,4,3,5>, <4,3,5,0>
+ 2600308022U, // <3,5,4,4>: Cost 3 vext1 <u,3,5,4>, RHS
+ 2618838326U, // <3,5,4,5>: Cost 3 vext2 <0,2,3,5>, RHS
+ 2772454710U, // <3,5,4,6>: Cost 3 vuzpl <3,4,5,6>, RHS
+ 1659228102U, // <3,5,4,7>: Cost 2 vext3 LHS, <5,4,7,6>
+ 1659228111U, // <3,5,4,u>: Cost 2 vext3 LHS, <5,4,u,6>
+ 2570453094U, // <3,5,5,0>: Cost 3 vext1 <3,3,5,5>, LHS
+ 2624810704U, // <3,5,5,1>: Cost 3 vext2 <1,2,3,5>, <5,1,7,3>
+ 2570454734U, // <3,5,5,2>: Cost 3 vext1 <3,3,5,5>, <2,3,4,5>
+ 2570455472U, // <3,5,5,3>: Cost 3 vext1 <3,3,5,5>, <3,3,5,5>
+ 2570456374U, // <3,5,5,4>: Cost 3 vext1 <3,3,5,5>, RHS
+ 1659228164U, // <3,5,5,5>: Cost 2 vext3 LHS, <5,5,5,5>
+ 2732969998U, // <3,5,5,6>: Cost 3 vext3 LHS, <5,5,6,6>
+ 1659228184U, // <3,5,5,7>: Cost 2 vext3 LHS, <5,5,7,7>
+ 1659228193U, // <3,5,5,u>: Cost 2 vext3 LHS, <5,5,u,7>
+ 2732970020U, // <3,5,6,0>: Cost 3 vext3 LHS, <5,6,0,1>
+ 2732970035U, // <3,5,6,1>: Cost 3 vext3 LHS, <5,6,1,7>
+ 2564490968U, // <3,5,6,2>: Cost 3 vext1 <2,3,5,6>, <2,3,5,6>
+ 2732970050U, // <3,5,6,3>: Cost 3 vext3 LHS, <5,6,3,4>
+ 2732970060U, // <3,5,6,4>: Cost 3 vext3 LHS, <5,6,4,5>
+ 2732970071U, // <3,5,6,5>: Cost 3 vext3 LHS, <5,6,5,7>
+ 2732970080U, // <3,5,6,6>: Cost 3 vext3 LHS, <5,6,6,7>
+ 1659228258U, // <3,5,6,7>: Cost 2 vext3 LHS, <5,6,7,0>
+ 1659228267U, // <3,5,6,u>: Cost 2 vext3 LHS, <5,6,u,0>
+ 1484783718U, // <3,5,7,0>: Cost 2 vext1 <1,3,5,7>, LHS
+ 1484784640U, // <3,5,7,1>: Cost 2 vext1 <1,3,5,7>, <1,3,5,7>
+ 2558527080U, // <3,5,7,2>: Cost 3 vext1 <1,3,5,7>, <2,2,2,2>
+ 2558527638U, // <3,5,7,3>: Cost 3 vext1 <1,3,5,7>, <3,0,1,2>
+ 1484786998U, // <3,5,7,4>: Cost 2 vext1 <1,3,5,7>, RHS
+ 1659228328U, // <3,5,7,5>: Cost 2 vext3 LHS, <5,7,5,7>
+ 2732970154U, // <3,5,7,6>: Cost 3 vext3 LHS, <5,7,6,0>
+ 2558531180U, // <3,5,7,7>: Cost 3 vext1 <1,3,5,7>, <7,7,7,7>
+ 1484789550U, // <3,5,7,u>: Cost 2 vext1 <1,3,5,7>, LHS
+ 1484791910U, // <3,5,u,0>: Cost 2 vext1 <1,3,5,u>, LHS
+ 1484792833U, // <3,5,u,1>: Cost 2 vext1 <1,3,5,u>, <1,3,5,u>
+ 2558535272U, // <3,5,u,2>: Cost 3 vext1 <1,3,5,u>, <2,2,2,2>
+ 2558535830U, // <3,5,u,3>: Cost 3 vext1 <1,3,5,u>, <3,0,1,2>
+ 1484795190U, // <3,5,u,4>: Cost 2 vext1 <1,3,5,u>, RHS
+ 1659228409U, // <3,5,u,5>: Cost 2 vext3 LHS, <5,u,5,7>
+ 2772457626U, // <3,5,u,6>: Cost 3 vuzpl <3,4,5,6>, RHS
+ 1646326023U, // <3,5,u,7>: Cost 2 vext3 <5,u,7,3>, <5,u,7,3>
+ 1484797742U, // <3,5,u,u>: Cost 2 vext1 <1,3,5,u>, LHS
+ 2558541926U, // <3,6,0,0>: Cost 3 vext1 <1,3,6,0>, LHS
+ 2689839393U, // <3,6,0,1>: Cost 3 vext3 LHS, <6,0,1,2>
+ 2689839404U, // <3,6,0,2>: Cost 3 vext3 LHS, <6,0,2,4>
+ 3706519808U, // <3,6,0,3>: Cost 4 vext2 <2,5,3,6>, <0,3,1,4>
+ 2689839420U, // <3,6,0,4>: Cost 3 vext3 LHS, <6,0,4,2>
+ 2732970314U, // <3,6,0,5>: Cost 3 vext3 LHS, <6,0,5,7>
+ 2732970316U, // <3,6,0,6>: Cost 3 vext3 LHS, <6,0,6,0>
+ 2960313654U, // <3,6,0,7>: Cost 3 vzipr <1,2,3,0>, RHS
+ 2689839456U, // <3,6,0,u>: Cost 3 vext3 LHS, <6,0,u,2>
+ 3763581290U, // <3,6,1,0>: Cost 4 vext3 LHS, <6,1,0,3>
+ 3763581297U, // <3,6,1,1>: Cost 4 vext3 LHS, <6,1,1,1>
+ 2624816028U, // <3,6,1,2>: Cost 3 vext2 <1,2,3,6>, <1,2,3,6>
+ 3763581315U, // <3,6,1,3>: Cost 4 vext3 LHS, <6,1,3,1>
+ 2626143294U, // <3,6,1,4>: Cost 3 vext2 <1,4,3,6>, <1,4,3,6>
+ 3763581335U, // <3,6,1,5>: Cost 4 vext3 LHS, <6,1,5,3>
+ 2721321376U, // <3,6,1,6>: Cost 3 vext3 <6,1,6,3>, <6,1,6,3>
+ 2721395113U, // <3,6,1,7>: Cost 3 vext3 <6,1,7,3>, <6,1,7,3>
+ 2628797826U, // <3,6,1,u>: Cost 3 vext2 <1,u,3,6>, <1,u,3,6>
+ 2594390118U, // <3,6,2,0>: Cost 3 vext1 <7,3,6,2>, LHS
+ 2721616324U, // <3,6,2,1>: Cost 3 vext3 <6,2,1,3>, <6,2,1,3>
+ 2630788725U, // <3,6,2,2>: Cost 3 vext2 <2,2,3,6>, <2,2,3,6>
+ 3763581395U, // <3,6,2,3>: Cost 4 vext3 LHS, <6,2,3,0>
+ 2632115991U, // <3,6,2,4>: Cost 3 vext2 <2,4,3,6>, <2,4,3,6>
+ 2632779624U, // <3,6,2,5>: Cost 3 vext2 <2,5,3,6>, <2,5,3,6>
+ 2594394618U, // <3,6,2,6>: Cost 3 vext1 <7,3,6,2>, <6,2,7,3>
+ 1648316922U, // <3,6,2,7>: Cost 2 vext3 <6,2,7,3>, <6,2,7,3>
+ 1648390659U, // <3,6,2,u>: Cost 2 vext3 <6,2,u,3>, <6,2,u,3>
+ 3693914262U, // <3,6,3,0>: Cost 4 vext2 <0,4,3,6>, <3,0,1,2>
+ 3638281176U, // <3,6,3,1>: Cost 4 vext1 <2,3,6,3>, <1,3,1,3>
+ 3696568678U, // <3,6,3,2>: Cost 4 vext2 <0,u,3,6>, <3,2,6,3>
+ 2638088604U, // <3,6,3,3>: Cost 3 vext2 <3,4,3,6>, <3,3,3,3>
+ 2632780290U, // <3,6,3,4>: Cost 3 vext2 <2,5,3,6>, <3,4,5,6>
+ 3712494145U, // <3,6,3,5>: Cost 4 vext2 <3,5,3,6>, <3,5,3,6>
+ 3698559612U, // <3,6,3,6>: Cost 4 vext2 <1,2,3,6>, <3,6,1,2>
+ 2959674678U, // <3,6,3,7>: Cost 3 vzipr <1,1,3,3>, RHS
+ 2959674679U, // <3,6,3,u>: Cost 3 vzipr <1,1,3,3>, RHS
+ 3763581536U, // <3,6,4,0>: Cost 4 vext3 LHS, <6,4,0,6>
+ 2722943590U, // <3,6,4,1>: Cost 3 vext3 <6,4,1,3>, <6,4,1,3>
+ 2732970609U, // <3,6,4,2>: Cost 3 vext3 LHS, <6,4,2,5>
+ 3698560147U, // <3,6,4,3>: Cost 4 vext2 <1,2,3,6>, <4,3,6,6>
+ 2732970628U, // <3,6,4,4>: Cost 3 vext3 LHS, <6,4,4,6>
+ 2689839757U, // <3,6,4,5>: Cost 3 vext3 LHS, <6,4,5,6>
+ 2732970640U, // <3,6,4,6>: Cost 3 vext3 LHS, <6,4,6,0>
+ 2960346422U, // <3,6,4,7>: Cost 3 vzipr <1,2,3,4>, RHS
+ 2689839784U, // <3,6,4,u>: Cost 3 vext3 LHS, <6,4,u,6>
+ 2576498790U, // <3,6,5,0>: Cost 3 vext1 <4,3,6,5>, LHS
+ 3650241270U, // <3,6,5,1>: Cost 4 vext1 <4,3,6,5>, <1,0,3,2>
+ 2732970692U, // <3,6,5,2>: Cost 3 vext3 LHS, <6,5,2,7>
+ 2576501250U, // <3,6,5,3>: Cost 3 vext1 <4,3,6,5>, <3,4,5,6>
+ 2576501906U, // <3,6,5,4>: Cost 3 vext1 <4,3,6,5>, <4,3,6,5>
+ 3650244622U, // <3,6,5,5>: Cost 4 vext1 <4,3,6,5>, <5,5,6,6>
+ 4114633528U, // <3,6,5,6>: Cost 4 vtrnl <3,4,5,6>, <6,6,6,6>
+ 2732970735U, // <3,6,5,7>: Cost 3 vext3 LHS, <6,5,7,5>
+ 2576504622U, // <3,6,5,u>: Cost 3 vext1 <4,3,6,5>, LHS
+ 2732970749U, // <3,6,6,0>: Cost 3 vext3 LHS, <6,6,0,1>
+ 2724270856U, // <3,6,6,1>: Cost 3 vext3 <6,6,1,3>, <6,6,1,3>
+ 2624819706U, // <3,6,6,2>: Cost 3 vext2 <1,2,3,6>, <6,2,7,3>
+ 3656223234U, // <3,6,6,3>: Cost 4 vext1 <5,3,6,6>, <3,4,5,6>
+ 2732970788U, // <3,6,6,4>: Cost 3 vext3 LHS, <6,6,4,4>
+ 2732970800U, // <3,6,6,5>: Cost 3 vext3 LHS, <6,6,5,7>
+ 1659228984U, // <3,6,6,6>: Cost 2 vext3 LHS, <6,6,6,6>
+ 1659228994U, // <3,6,6,7>: Cost 2 vext3 LHS, <6,6,7,7>
+ 1659229003U, // <3,6,6,u>: Cost 2 vext3 LHS, <6,6,u,7>
+ 1659229006U, // <3,6,7,0>: Cost 2 vext3 LHS, <6,7,0,1>
+ 2558600201U, // <3,6,7,1>: Cost 3 vext1 <1,3,6,7>, <1,3,6,7>
+ 2558601146U, // <3,6,7,2>: Cost 3 vext1 <1,3,6,7>, <2,6,3,7>
+ 2725081963U, // <3,6,7,3>: Cost 3 vext3 <6,7,3,3>, <6,7,3,3>
+ 1659229046U, // <3,6,7,4>: Cost 2 vext3 LHS, <6,7,4,5>
+ 2715423611U, // <3,6,7,5>: Cost 3 vext3 <5,1,7,3>, <6,7,5,1>
+ 2722059141U, // <3,6,7,6>: Cost 3 vext3 <6,2,7,3>, <6,7,6,2>
+ 2962361654U, // <3,6,7,7>: Cost 3 vzipr <1,5,3,7>, RHS
+ 1659229078U, // <3,6,7,u>: Cost 2 vext3 LHS, <6,7,u,1>
+ 1659229087U, // <3,6,u,0>: Cost 2 vext3 LHS, <6,u,0,1>
+ 2689840041U, // <3,6,u,1>: Cost 3 vext3 LHS, <6,u,1,2>
+ 2558609339U, // <3,6,u,2>: Cost 3 vext1 <1,3,6,u>, <2,6,3,u>
+ 2576525853U, // <3,6,u,3>: Cost 3 vext1 <4,3,6,u>, <3,4,u,6>
+ 1659229127U, // <3,6,u,4>: Cost 2 vext3 LHS, <6,u,4,5>
+ 2689840081U, // <3,6,u,5>: Cost 3 vext3 LHS, <6,u,5,6>
+ 1659228984U, // <3,6,u,6>: Cost 2 vext3 LHS, <6,6,6,6>
+ 1652298720U, // <3,6,u,7>: Cost 2 vext3 <6,u,7,3>, <6,u,7,3>
+ 1659229159U, // <3,6,u,u>: Cost 2 vext3 LHS, <6,u,u,1>
+ 2626813952U, // <3,7,0,0>: Cost 3 vext2 <1,5,3,7>, <0,0,0,0>
+ 1553072230U, // <3,7,0,1>: Cost 2 vext2 <1,5,3,7>, LHS
+ 2626814116U, // <3,7,0,2>: Cost 3 vext2 <1,5,3,7>, <0,2,0,2>
+ 3700556028U, // <3,7,0,3>: Cost 4 vext2 <1,5,3,7>, <0,3,1,0>
+ 2626814290U, // <3,7,0,4>: Cost 3 vext2 <1,5,3,7>, <0,4,1,5>
+ 2582507375U, // <3,7,0,5>: Cost 3 vext1 <5,3,7,0>, <5,3,7,0>
+ 2588480072U, // <3,7,0,6>: Cost 3 vext1 <6,3,7,0>, <6,3,7,0>
+ 2732971055U, // <3,7,0,7>: Cost 3 vext3 LHS, <7,0,7,1>
+ 1553072797U, // <3,7,0,u>: Cost 2 vext2 <1,5,3,7>, LHS
+ 2626814710U, // <3,7,1,0>: Cost 3 vext2 <1,5,3,7>, <1,0,3,2>
+ 2626814772U, // <3,7,1,1>: Cost 3 vext2 <1,5,3,7>, <1,1,1,1>
+ 2626814870U, // <3,7,1,2>: Cost 3 vext2 <1,5,3,7>, <1,2,3,0>
+ 2625487854U, // <3,7,1,3>: Cost 3 vext2 <1,3,3,7>, <1,3,3,7>
+ 2582514998U, // <3,7,1,4>: Cost 3 vext1 <5,3,7,1>, RHS
+ 1553073296U, // <3,7,1,5>: Cost 2 vext2 <1,5,3,7>, <1,5,3,7>
+ 2627478753U, // <3,7,1,6>: Cost 3 vext2 <1,6,3,7>, <1,6,3,7>
+ 2727367810U, // <3,7,1,7>: Cost 3 vext3 <7,1,7,3>, <7,1,7,3>
+ 1555064195U, // <3,7,1,u>: Cost 2 vext2 <1,u,3,7>, <1,u,3,7>
+ 2588491878U, // <3,7,2,0>: Cost 3 vext1 <6,3,7,2>, LHS
+ 3700557318U, // <3,7,2,1>: Cost 4 vext2 <1,5,3,7>, <2,1,0,3>
+ 2626815592U, // <3,7,2,2>: Cost 3 vext2 <1,5,3,7>, <2,2,2,2>
+ 2626815654U, // <3,7,2,3>: Cost 3 vext2 <1,5,3,7>, <2,3,0,1>
+ 2588495158U, // <3,7,2,4>: Cost 3 vext1 <6,3,7,2>, RHS
+ 2632787817U, // <3,7,2,5>: Cost 3 vext2 <2,5,3,7>, <2,5,3,7>
+ 1559709626U, // <3,7,2,6>: Cost 2 vext2 <2,6,3,7>, <2,6,3,7>
+ 2728031443U, // <3,7,2,7>: Cost 3 vext3 <7,2,7,3>, <7,2,7,3>
+ 1561036892U, // <3,7,2,u>: Cost 2 vext2 <2,u,3,7>, <2,u,3,7>
+ 2626816150U, // <3,7,3,0>: Cost 3 vext2 <1,5,3,7>, <3,0,1,2>
+ 2626816268U, // <3,7,3,1>: Cost 3 vext2 <1,5,3,7>, <3,1,5,3>
+ 2633451878U, // <3,7,3,2>: Cost 3 vext2 <2,6,3,7>, <3,2,6,3>
+ 2626816412U, // <3,7,3,3>: Cost 3 vext2 <1,5,3,7>, <3,3,3,3>
+ 2626816514U, // <3,7,3,4>: Cost 3 vext2 <1,5,3,7>, <3,4,5,6>
+ 2638760514U, // <3,7,3,5>: Cost 3 vext2 <3,5,3,7>, <3,5,3,7>
+ 2639424147U, // <3,7,3,6>: Cost 3 vext2 <3,6,3,7>, <3,6,3,7>
+ 2826961920U, // <3,7,3,7>: Cost 3 vuzpr <1,3,5,7>, <1,3,5,7>
+ 2626816798U, // <3,7,3,u>: Cost 3 vext2 <1,5,3,7>, <3,u,1,2>
+ 2582536294U, // <3,7,4,0>: Cost 3 vext1 <5,3,7,4>, LHS
+ 2582537360U, // <3,7,4,1>: Cost 3 vext1 <5,3,7,4>, <1,5,3,7>
+ 2588510138U, // <3,7,4,2>: Cost 3 vext1 <6,3,7,4>, <2,6,3,7>
+ 3700558996U, // <3,7,4,3>: Cost 4 vext2 <1,5,3,7>, <4,3,6,7>
+ 2582539574U, // <3,7,4,4>: Cost 3 vext1 <5,3,7,4>, RHS
+ 1553075510U, // <3,7,4,5>: Cost 2 vext2 <1,5,3,7>, RHS
+ 2588512844U, // <3,7,4,6>: Cost 3 vext1 <6,3,7,4>, <6,3,7,4>
+ 2564625766U, // <3,7,4,7>: Cost 3 vext1 <2,3,7,4>, <7,4,5,6>
+ 1553075753U, // <3,7,4,u>: Cost 2 vext2 <1,5,3,7>, RHS
+ 2732971398U, // <3,7,5,0>: Cost 3 vext3 LHS, <7,5,0,2>
+ 2626817744U, // <3,7,5,1>: Cost 3 vext2 <1,5,3,7>, <5,1,7,3>
+ 3700559649U, // <3,7,5,2>: Cost 4 vext2 <1,5,3,7>, <5,2,7,3>
+ 2626817903U, // <3,7,5,3>: Cost 3 vext2 <1,5,3,7>, <5,3,7,0>
+ 2258728203U, // <3,7,5,4>: Cost 3 vrev <7,3,4,5>
+ 2732971446U, // <3,7,5,5>: Cost 3 vext3 LHS, <7,5,5,5>
+ 2732971457U, // <3,7,5,6>: Cost 3 vext3 LHS, <7,5,6,7>
+ 2826964278U, // <3,7,5,7>: Cost 3 vuzpr <1,3,5,7>, RHS
+ 2826964279U, // <3,7,5,u>: Cost 3 vuzpr <1,3,5,7>, RHS
+ 2732971478U, // <3,7,6,0>: Cost 3 vext3 LHS, <7,6,0,1>
+ 2732971486U, // <3,7,6,1>: Cost 3 vext3 LHS, <7,6,1,0>
+ 2633454074U, // <3,7,6,2>: Cost 3 vext2 <2,6,3,7>, <6,2,7,3>
+ 2633454152U, // <3,7,6,3>: Cost 3 vext2 <2,6,3,7>, <6,3,7,0>
+ 2732971518U, // <3,7,6,4>: Cost 3 vext3 LHS, <7,6,4,5>
+ 2732971526U, // <3,7,6,5>: Cost 3 vext3 LHS, <7,6,5,4>
+ 2732971537U, // <3,7,6,6>: Cost 3 vext3 LHS, <7,6,6,6>
+ 2732971540U, // <3,7,6,7>: Cost 3 vext3 LHS, <7,6,7,0>
+ 2726041124U, // <3,7,6,u>: Cost 3 vext3 <6,u,7,3>, <7,6,u,7>
+ 2570616934U, // <3,7,7,0>: Cost 3 vext1 <3,3,7,7>, LHS
+ 2570617856U, // <3,7,7,1>: Cost 3 vext1 <3,3,7,7>, <1,3,5,7>
+ 2564646635U, // <3,7,7,2>: Cost 3 vext1 <2,3,7,7>, <2,3,7,7>
+ 2570619332U, // <3,7,7,3>: Cost 3 vext1 <3,3,7,7>, <3,3,7,7>
+ 2570620214U, // <3,7,7,4>: Cost 3 vext1 <3,3,7,7>, RHS
+ 2582564726U, // <3,7,7,5>: Cost 3 vext1 <5,3,7,7>, <5,3,7,7>
+ 2588537423U, // <3,7,7,6>: Cost 3 vext1 <6,3,7,7>, <6,3,7,7>
+ 1659229804U, // <3,7,7,7>: Cost 2 vext3 LHS, <7,7,7,7>
+ 1659229804U, // <3,7,7,u>: Cost 2 vext3 LHS, <7,7,7,7>
+ 2626819795U, // <3,7,u,0>: Cost 3 vext2 <1,5,3,7>, <u,0,1,2>
+ 1553078062U, // <3,7,u,1>: Cost 2 vext2 <1,5,3,7>, LHS
+ 2626819973U, // <3,7,u,2>: Cost 3 vext2 <1,5,3,7>, <u,2,3,0>
+ 2826961565U, // <3,7,u,3>: Cost 3 vuzpr <1,3,5,7>, LHS
+ 2626820159U, // <3,7,u,4>: Cost 3 vext2 <1,5,3,7>, <u,4,5,6>
+ 1553078426U, // <3,7,u,5>: Cost 2 vext2 <1,5,3,7>, RHS
+ 1595545808U, // <3,7,u,6>: Cost 2 vext2 <u,6,3,7>, <u,6,3,7>
+ 1659229804U, // <3,7,u,7>: Cost 2 vext3 LHS, <7,7,7,7>
+ 1553078629U, // <3,7,u,u>: Cost 2 vext2 <1,5,3,7>, LHS
+ 1611448320U, // <3,u,0,0>: Cost 2 vext3 LHS, <0,0,0,0>
+ 1611896531U, // <3,u,0,1>: Cost 2 vext3 LHS, <u,0,1,2>
+ 1659672284U, // <3,u,0,2>: Cost 2 vext3 LHS, <u,0,2,2>
+ 1616099045U, // <3,u,0,3>: Cost 2 vext3 LHS, <u,0,3,2>
+ 2685638381U, // <3,u,0,4>: Cost 3 vext3 LHS, <u,0,4,1>
+ 1663874806U, // <3,u,0,5>: Cost 2 vext3 LHS, <u,0,5,1>
+ 1663874816U, // <3,u,0,6>: Cost 2 vext3 LHS, <u,0,6,2>
+ 2960313672U, // <3,u,0,7>: Cost 3 vzipr <1,2,3,0>, RHS
+ 1611896594U, // <3,u,0,u>: Cost 2 vext3 LHS, <u,0,u,2>
+ 1549763324U, // <3,u,1,0>: Cost 2 vext2 <1,0,3,u>, <1,0,3,u>
+ 1550426957U, // <3,u,1,1>: Cost 2 vext2 <1,1,3,u>, <1,1,3,u>
+ 537712430U, // <3,u,1,2>: Cost 1 vext3 LHS, LHS
+ 1616541495U, // <3,u,1,3>: Cost 2 vext3 LHS, <u,1,3,3>
+ 1490930998U, // <3,u,1,4>: Cost 2 vext1 <2,3,u,1>, RHS
+ 1553081489U, // <3,u,1,5>: Cost 2 vext2 <1,5,3,u>, <1,5,3,u>
+ 2627486946U, // <3,u,1,6>: Cost 3 vext2 <1,6,3,u>, <1,6,3,u>
+ 1659230043U, // <3,u,1,7>: Cost 2 vext3 LHS, <u,1,7,3>
+ 537712484U, // <3,u,1,u>: Cost 1 vext3 LHS, LHS
+ 1611890852U, // <3,u,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
+ 2624833102U, // <3,u,2,1>: Cost 3 vext2 <1,2,3,u>, <2,1,u,3>
+ 1557063287U, // <3,u,2,2>: Cost 2 vext2 <2,2,3,u>, <2,2,3,u>
+ 1616099205U, // <3,u,2,3>: Cost 2 vext3 LHS, <u,2,3,0>
+ 1611890892U, // <3,u,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
+ 2689841054U, // <3,u,2,5>: Cost 3 vext3 LHS, <u,2,5,7>
+ 1559717819U, // <3,u,2,6>: Cost 2 vext2 <2,6,3,u>, <2,6,3,u>
+ 1659230124U, // <3,u,2,7>: Cost 2 vext3 LHS, <u,2,7,3>
+ 1616541618U, // <3,u,2,u>: Cost 2 vext3 LHS, <u,2,u,0>
+ 1611896764U, // <3,u,3,0>: Cost 2 vext3 LHS, <u,3,0,1>
+ 1484973079U, // <3,u,3,1>: Cost 2 vext1 <1,3,u,3>, <1,3,u,3>
+ 2685638607U, // <3,u,3,2>: Cost 3 vext3 LHS, <u,3,2,2>
+ 336380006U, // <3,u,3,3>: Cost 1 vdup3 LHS
+ 1611896804U, // <3,u,3,4>: Cost 2 vext3 LHS, <u,3,4,5>
+ 1616541679U, // <3,u,3,5>: Cost 2 vext3 LHS, <u,3,5,7>
+ 2690283512U, // <3,u,3,6>: Cost 3 vext3 LHS, <u,3,6,7>
+ 2959674696U, // <3,u,3,7>: Cost 3 vzipr <1,1,3,3>, RHS
+ 336380006U, // <3,u,3,u>: Cost 1 vdup3 LHS
+ 2558722150U, // <3,u,4,0>: Cost 3 vext1 <1,3,u,4>, LHS
+ 1659672602U, // <3,u,4,1>: Cost 2 vext3 LHS, <u,4,1,5>
+ 1659672612U, // <3,u,4,2>: Cost 2 vext3 LHS, <u,4,2,6>
+ 2689841196U, // <3,u,4,3>: Cost 3 vext3 LHS, <u,4,3,5>
+ 1659227344U, // <3,u,4,4>: Cost 2 vext3 LHS, <4,4,4,4>
+ 1611896895U, // <3,u,4,5>: Cost 2 vext3 LHS, <u,4,5,6>
+ 1663875144U, // <3,u,4,6>: Cost 2 vext3 LHS, <u,4,6,6>
+ 1659230289U, // <3,u,4,7>: Cost 2 vext3 LHS, <u,4,7,6>
+ 1611896922U, // <3,u,4,u>: Cost 2 vext3 LHS, <u,4,u,6>
+ 1490960486U, // <3,u,5,0>: Cost 2 vext1 <2,3,u,5>, LHS
+ 2689841261U, // <3,u,5,1>: Cost 3 vext3 LHS, <u,5,1,7>
+ 1490962162U, // <3,u,5,2>: Cost 2 vext1 <2,3,u,5>, <2,3,u,5>
+ 1616541823U, // <3,u,5,3>: Cost 2 vext3 LHS, <u,5,3,7>
+ 1490963766U, // <3,u,5,4>: Cost 2 vext1 <2,3,u,5>, RHS
+ 1659228164U, // <3,u,5,5>: Cost 2 vext3 LHS, <5,5,5,5>
+ 537712794U, // <3,u,5,6>: Cost 1 vext3 LHS, RHS
+ 1659230371U, // <3,u,5,7>: Cost 2 vext3 LHS, <u,5,7,7>
+ 537712812U, // <3,u,5,u>: Cost 1 vext3 LHS, RHS
+ 2689841327U, // <3,u,6,0>: Cost 3 vext3 LHS, <u,6,0,1>
+ 2558739482U, // <3,u,6,1>: Cost 3 vext1 <1,3,u,6>, <1,3,u,6>
+ 2689841351U, // <3,u,6,2>: Cost 3 vext3 LHS, <u,6,2,7>
+ 1616099536U, // <3,u,6,3>: Cost 2 vext3 LHS, <u,6,3,7>
+ 1659227508U, // <3,u,6,4>: Cost 2 vext3 LHS, <4,6,4,6>
+ 2690283746U, // <3,u,6,5>: Cost 3 vext3 LHS, <u,6,5,7>
+ 1659228984U, // <3,u,6,6>: Cost 2 vext3 LHS, <6,6,6,6>
+ 1659230445U, // <3,u,6,7>: Cost 2 vext3 LHS, <u,6,7,0>
+ 1616099581U, // <3,u,6,u>: Cost 2 vext3 LHS, <u,6,u,7>
+ 1485004902U, // <3,u,7,0>: Cost 2 vext1 <1,3,u,7>, LHS
+ 1485005851U, // <3,u,7,1>: Cost 2 vext1 <1,3,u,7>, <1,3,u,7>
+ 2558748264U, // <3,u,7,2>: Cost 3 vext1 <1,3,u,7>, <2,2,2,2>
+ 3095397021U, // <3,u,7,3>: Cost 3 vtrnr <1,3,5,7>, LHS
+ 1485008182U, // <3,u,7,4>: Cost 2 vext1 <1,3,u,7>, RHS
+ 1659228328U, // <3,u,7,5>: Cost 2 vext3 LHS, <5,7,5,7>
+ 2722060599U, // <3,u,7,6>: Cost 3 vext3 <6,2,7,3>, <u,7,6,2>
+ 1659229804U, // <3,u,7,7>: Cost 2 vext3 LHS, <7,7,7,7>
+ 1485010734U, // <3,u,7,u>: Cost 2 vext1 <1,3,u,7>, LHS
+ 1616099665U, // <3,u,u,0>: Cost 2 vext3 LHS, <u,u,0,1>
+ 1611897179U, // <3,u,u,1>: Cost 2 vext3 LHS, <u,u,1,2>
+ 537712997U, // <3,u,u,2>: Cost 1 vext3 LHS, LHS
+ 336380006U, // <3,u,u,3>: Cost 1 vdup3 LHS
+ 1616099705U, // <3,u,u,4>: Cost 2 vext3 LHS, <u,u,4,5>
+ 1611897219U, // <3,u,u,5>: Cost 2 vext3 LHS, <u,u,5,6>
+ 537713037U, // <3,u,u,6>: Cost 1 vext3 LHS, RHS
+ 1659230607U, // <3,u,u,7>: Cost 2 vext3 LHS, <u,u,7,0>
+ 537713051U, // <3,u,u,u>: Cost 1 vext3 LHS, LHS
+ 2691907584U, // <4,0,0,0>: Cost 3 vext3 <1,2,3,4>, <0,0,0,0>
+ 2691907594U, // <4,0,0,1>: Cost 3 vext3 <1,2,3,4>, <0,0,1,1>
+ 2691907604U, // <4,0,0,2>: Cost 3 vext3 <1,2,3,4>, <0,0,2,2>
+ 3709862144U, // <4,0,0,3>: Cost 4 vext2 <3,1,4,0>, <0,3,1,4>
+ 2684682280U, // <4,0,0,4>: Cost 3 vext3 <0,0,4,4>, <0,0,4,4>
+ 3694600633U, // <4,0,0,5>: Cost 4 vext2 <0,5,4,0>, <0,5,4,0>
+ 3291431290U, // <4,0,0,6>: Cost 4 vrev <0,4,6,0>
+ 3668342067U, // <4,0,0,7>: Cost 4 vext1 <7,4,0,0>, <7,4,0,0>
+ 2691907657U, // <4,0,0,u>: Cost 3 vext3 <1,2,3,4>, <0,0,u,1>
+ 2570715238U, // <4,0,1,0>: Cost 3 vext1 <3,4,0,1>, LHS
+ 2570716058U, // <4,0,1,1>: Cost 3 vext1 <3,4,0,1>, <1,2,3,4>
+ 1618165862U, // <4,0,1,2>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2570717648U, // <4,0,1,3>: Cost 3 vext1 <3,4,0,1>, <3,4,0,1>
+ 2570718518U, // <4,0,1,4>: Cost 3 vext1 <3,4,0,1>, RHS
+ 2594607206U, // <4,0,1,5>: Cost 3 vext1 <7,4,0,1>, <5,6,7,4>
+ 3662377563U, // <4,0,1,6>: Cost 4 vext1 <6,4,0,1>, <6,4,0,1>
+ 2594608436U, // <4,0,1,7>: Cost 3 vext1 <7,4,0,1>, <7,4,0,1>
+ 1618165916U, // <4,0,1,u>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2685714598U, // <4,0,2,0>: Cost 3 vext3 <0,2,0,4>, <0,2,0,4>
+ 3759530159U, // <4,0,2,1>: Cost 4 vext3 <0,2,1,4>, <0,2,1,4>
+ 2685862072U, // <4,0,2,2>: Cost 3 vext3 <0,2,2,4>, <0,2,2,4>
+ 2631476937U, // <4,0,2,3>: Cost 3 vext2 <2,3,4,0>, <2,3,4,0>
+ 2685714636U, // <4,0,2,4>: Cost 3 vext3 <0,2,0,4>, <0,2,4,6>
+ 3765649622U, // <4,0,2,5>: Cost 4 vext3 <1,2,3,4>, <0,2,5,7>
+ 2686157020U, // <4,0,2,6>: Cost 3 vext3 <0,2,6,4>, <0,2,6,4>
+ 3668358453U, // <4,0,2,7>: Cost 4 vext1 <7,4,0,2>, <7,4,0,2>
+ 2686304494U, // <4,0,2,u>: Cost 3 vext3 <0,2,u,4>, <0,2,u,4>
+ 3632529510U, // <4,0,3,0>: Cost 4 vext1 <1,4,0,3>, LHS
+ 2686451968U, // <4,0,3,1>: Cost 3 vext3 <0,3,1,4>, <0,3,1,4>
+ 2686525705U, // <4,0,3,2>: Cost 3 vext3 <0,3,2,4>, <0,3,2,4>
+ 3760341266U, // <4,0,3,3>: Cost 4 vext3 <0,3,3,4>, <0,3,3,4>
+ 3632532790U, // <4,0,3,4>: Cost 4 vext1 <1,4,0,3>, RHS
+ 3913254606U, // <4,0,3,5>: Cost 4 vuzpr <3,4,5,0>, <2,3,4,5>
+ 3705219740U, // <4,0,3,6>: Cost 4 vext2 <2,3,4,0>, <3,6,4,7>
+ 3713845990U, // <4,0,3,7>: Cost 4 vext2 <3,7,4,0>, <3,7,4,0>
+ 2686451968U, // <4,0,3,u>: Cost 3 vext3 <0,3,1,4>, <0,3,1,4>
+ 2552823910U, // <4,0,4,0>: Cost 3 vext1 <0,4,0,4>, LHS
+ 2691907922U, // <4,0,4,1>: Cost 3 vext3 <1,2,3,4>, <0,4,1,5>
+ 2691907932U, // <4,0,4,2>: Cost 3 vext3 <1,2,3,4>, <0,4,2,6>
+ 3626567830U, // <4,0,4,3>: Cost 4 vext1 <0,4,0,4>, <3,0,1,2>
+ 2552827190U, // <4,0,4,4>: Cost 3 vext1 <0,4,0,4>, RHS
+ 2631478582U, // <4,0,4,5>: Cost 3 vext2 <2,3,4,0>, RHS
+ 3626570017U, // <4,0,4,6>: Cost 4 vext1 <0,4,0,4>, <6,0,1,2>
+ 3668374839U, // <4,0,4,7>: Cost 4 vext1 <7,4,0,4>, <7,4,0,4>
+ 2552829742U, // <4,0,4,u>: Cost 3 vext1 <0,4,0,4>, LHS
+ 2558804070U, // <4,0,5,0>: Cost 3 vext1 <1,4,0,5>, LHS
+ 1839644774U, // <4,0,5,1>: Cost 2 vzipl RHS, LHS
+ 2913386660U, // <4,0,5,2>: Cost 3 vzipl RHS, <0,2,0,2>
+ 2570750420U, // <4,0,5,3>: Cost 3 vext1 <3,4,0,5>, <3,4,0,5>
+ 2558807350U, // <4,0,5,4>: Cost 3 vext1 <1,4,0,5>, RHS
+ 3987128750U, // <4,0,5,5>: Cost 4 vzipl RHS, <0,5,2,7>
+ 3987128822U, // <4,0,5,6>: Cost 4 vzipl RHS, <0,6,1,7>
+ 2594641208U, // <4,0,5,7>: Cost 3 vext1 <7,4,0,5>, <7,4,0,5>
+ 1839645341U, // <4,0,5,u>: Cost 2 vzipl RHS, LHS
+ 2552840294U, // <4,0,6,0>: Cost 3 vext1 <0,4,0,6>, LHS
+ 3047604234U, // <4,0,6,1>: Cost 3 vtrnl RHS, <0,0,1,1>
+ 1973862502U, // <4,0,6,2>: Cost 2 vtrnl RHS, LHS
+ 2570758613U, // <4,0,6,3>: Cost 3 vext1 <3,4,0,6>, <3,4,0,6>
+ 2552843574U, // <4,0,6,4>: Cost 3 vext1 <0,4,0,6>, RHS
+ 2217664887U, // <4,0,6,5>: Cost 3 vrev <0,4,5,6>
+ 3662418528U, // <4,0,6,6>: Cost 4 vext1 <6,4,0,6>, <6,4,0,6>
+ 2658022257U, // <4,0,6,7>: Cost 3 vext2 <6,7,4,0>, <6,7,4,0>
+ 1973862556U, // <4,0,6,u>: Cost 2 vtrnl RHS, LHS
+ 3731764218U, // <4,0,7,0>: Cost 4 vext2 <6,7,4,0>, <7,0,1,2>
+ 3988324454U, // <4,0,7,1>: Cost 4 vzipl <4,7,5,0>, LHS
+ 4122034278U, // <4,0,7,2>: Cost 4 vtrnl <4,6,7,1>, LHS
+ 3735082246U, // <4,0,7,3>: Cost 4 vext2 <7,3,4,0>, <7,3,4,0>
+ 3731764536U, // <4,0,7,4>: Cost 4 vext2 <6,7,4,0>, <7,4,0,5>
+ 3937145718U, // <4,0,7,5>: Cost 4 vuzpr <7,4,5,0>, <6,7,4,5>
+ 3737073145U, // <4,0,7,6>: Cost 4 vext2 <7,6,4,0>, <7,6,4,0>
+ 3731764844U, // <4,0,7,7>: Cost 4 vext2 <6,7,4,0>, <7,7,7,7>
+ 4122034332U, // <4,0,7,u>: Cost 4 vtrnl <4,6,7,1>, LHS
+ 2552856678U, // <4,0,u,0>: Cost 3 vext1 <0,4,0,u>, LHS
+ 1841635430U, // <4,0,u,1>: Cost 2 vzipl RHS, LHS
+ 1618166429U, // <4,0,u,2>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2570774999U, // <4,0,u,3>: Cost 3 vext1 <3,4,0,u>, <3,4,0,u>
+ 2552859958U, // <4,0,u,4>: Cost 3 vext1 <0,4,0,u>, RHS
+ 2631481498U, // <4,0,u,5>: Cost 3 vext2 <2,3,4,0>, RHS
+ 2686157020U, // <4,0,u,6>: Cost 3 vext3 <0,2,6,4>, <0,2,6,4>
+ 2594665787U, // <4,0,u,7>: Cost 3 vext1 <7,4,0,u>, <7,4,0,u>
+ 1618166483U, // <4,0,u,u>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2617548837U, // <4,1,0,0>: Cost 3 vext2 <0,0,4,1>, <0,0,4,1>
+ 2622857318U, // <4,1,0,1>: Cost 3 vext2 <0,u,4,1>, LHS
+ 3693281484U, // <4,1,0,2>: Cost 4 vext2 <0,3,4,1>, <0,2,4,6>
+ 2691908342U, // <4,1,0,3>: Cost 3 vext3 <1,2,3,4>, <1,0,3,2>
+ 2622857554U, // <4,1,0,4>: Cost 3 vext2 <0,u,4,1>, <0,4,1,5>
+ 3764470538U, // <4,1,0,5>: Cost 4 vext3 <1,0,5,4>, <1,0,5,4>
+ 3695272459U, // <4,1,0,6>: Cost 4 vext2 <0,6,4,1>, <0,6,4,1>
+ 3733094980U, // <4,1,0,7>: Cost 4 vext2 <7,0,4,1>, <0,7,1,4>
+ 2622857885U, // <4,1,0,u>: Cost 3 vext2 <0,u,4,1>, LHS
+ 3696599798U, // <4,1,1,0>: Cost 4 vext2 <0,u,4,1>, <1,0,3,2>
+ 2691097399U, // <4,1,1,1>: Cost 3 vext3 <1,1,1,4>, <1,1,1,4>
+ 2631484314U, // <4,1,1,2>: Cost 3 vext2 <2,3,4,1>, <1,2,3,4>
+ 2691908424U, // <4,1,1,3>: Cost 3 vext3 <1,2,3,4>, <1,1,3,3>
+ 3696600125U, // <4,1,1,4>: Cost 4 vext2 <0,u,4,1>, <1,4,3,5>
+ 3696600175U, // <4,1,1,5>: Cost 4 vext2 <0,u,4,1>, <1,5,0,1>
+ 3696600307U, // <4,1,1,6>: Cost 4 vext2 <0,u,4,1>, <1,6,5,7>
+ 3668423997U, // <4,1,1,7>: Cost 4 vext1 <7,4,1,1>, <7,4,1,1>
+ 2691908469U, // <4,1,1,u>: Cost 3 vext3 <1,2,3,4>, <1,1,u,3>
+ 2570797158U, // <4,1,2,0>: Cost 3 vext1 <3,4,1,2>, LHS
+ 2570797978U, // <4,1,2,1>: Cost 3 vext1 <3,4,1,2>, <1,2,3,4>
+ 3696600680U, // <4,1,2,2>: Cost 4 vext2 <0,u,4,1>, <2,2,2,2>
+ 1618166682U, // <4,1,2,3>: Cost 2 vext3 <1,2,3,4>, <1,2,3,4>
+ 2570800438U, // <4,1,2,4>: Cost 3 vext1 <3,4,1,2>, RHS
+ 3765650347U, // <4,1,2,5>: Cost 4 vext3 <1,2,3,4>, <1,2,5,3>
+ 3696601018U, // <4,1,2,6>: Cost 4 vext2 <0,u,4,1>, <2,6,3,7>
+ 3668432190U, // <4,1,2,7>: Cost 4 vext1 <7,4,1,2>, <7,4,1,2>
+ 1618535367U, // <4,1,2,u>: Cost 2 vext3 <1,2,u,4>, <1,2,u,4>
+ 2564833382U, // <4,1,3,0>: Cost 3 vext1 <2,4,1,3>, LHS
+ 2691908568U, // <4,1,3,1>: Cost 3 vext3 <1,2,3,4>, <1,3,1,3>
+ 2691908578U, // <4,1,3,2>: Cost 3 vext3 <1,2,3,4>, <1,3,2,4>
+ 2692572139U, // <4,1,3,3>: Cost 3 vext3 <1,3,3,4>, <1,3,3,4>
+ 2564836662U, // <4,1,3,4>: Cost 3 vext1 <2,4,1,3>, RHS
+ 2691908608U, // <4,1,3,5>: Cost 3 vext3 <1,2,3,4>, <1,3,5,7>
+ 2588725862U, // <4,1,3,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
+ 3662468090U, // <4,1,3,7>: Cost 4 vext1 <6,4,1,3>, <7,0,1,2>
+ 2691908631U, // <4,1,3,u>: Cost 3 vext3 <1,2,3,4>, <1,3,u,3>
+ 3760194590U, // <4,1,4,0>: Cost 4 vext3 <0,3,1,4>, <1,4,0,1>
+ 3693947874U, // <4,1,4,1>: Cost 4 vext2 <0,4,4,1>, <4,1,5,0>
+ 3765650484U, // <4,1,4,2>: Cost 4 vext3 <1,2,3,4>, <1,4,2,5>
+ 3113877606U, // <4,1,4,3>: Cost 3 vtrnr <4,4,4,4>, LHS
+ 3760194630U, // <4,1,4,4>: Cost 4 vext3 <0,3,1,4>, <1,4,4,5>
+ 2622860598U, // <4,1,4,5>: Cost 3 vext2 <0,u,4,1>, RHS
+ 3297436759U, // <4,1,4,6>: Cost 4 vrev <1,4,6,4>
+ 3800007772U, // <4,1,4,7>: Cost 4 vext3 <7,0,1,4>, <1,4,7,0>
+ 2622860841U, // <4,1,4,u>: Cost 3 vext2 <0,u,4,1>, RHS
+ 1479164006U, // <4,1,5,0>: Cost 2 vext1 <0,4,1,5>, LHS
+ 2552906486U, // <4,1,5,1>: Cost 3 vext1 <0,4,1,5>, <1,0,3,2>
+ 2552907299U, // <4,1,5,2>: Cost 3 vext1 <0,4,1,5>, <2,1,3,5>
+ 2552907926U, // <4,1,5,3>: Cost 3 vext1 <0,4,1,5>, <3,0,1,2>
+ 1479167286U, // <4,1,5,4>: Cost 2 vext1 <0,4,1,5>, RHS
+ 2913387664U, // <4,1,5,5>: Cost 3 vzipl RHS, <1,5,3,7>
+ 2600686074U, // <4,1,5,6>: Cost 3 vext1 <u,4,1,5>, <6,2,7,3>
+ 2600686586U, // <4,1,5,7>: Cost 3 vext1 <u,4,1,5>, <7,0,1,2>
+ 1479169838U, // <4,1,5,u>: Cost 2 vext1 <0,4,1,5>, LHS
+ 2552914022U, // <4,1,6,0>: Cost 3 vext1 <0,4,1,6>, LHS
+ 2558886708U, // <4,1,6,1>: Cost 3 vext1 <1,4,1,6>, <1,1,1,1>
+ 4028205206U, // <4,1,6,2>: Cost 4 vzipr <0,2,4,6>, <3,0,1,2>
+ 3089858662U, // <4,1,6,3>: Cost 3 vtrnr <0,4,2,6>, LHS
+ 2552917302U, // <4,1,6,4>: Cost 3 vext1 <0,4,1,6>, RHS
+ 2223637584U, // <4,1,6,5>: Cost 3 vrev <1,4,5,6>
+ 4121347081U, // <4,1,6,6>: Cost 4 vtrnl RHS, <1,3,6,7>
+ 3721155406U, // <4,1,6,7>: Cost 4 vext2 <5,0,4,1>, <6,7,0,1>
+ 2552919854U, // <4,1,6,u>: Cost 3 vext1 <0,4,1,6>, LHS
+ 2659357716U, // <4,1,7,0>: Cost 3 vext2 <7,0,4,1>, <7,0,4,1>
+ 3733763173U, // <4,1,7,1>: Cost 4 vext2 <7,1,4,1>, <7,1,4,1>
+ 3734426806U, // <4,1,7,2>: Cost 4 vext2 <7,2,4,1>, <7,2,4,1>
+ 2695226671U, // <4,1,7,3>: Cost 3 vext3 <1,7,3,4>, <1,7,3,4>
+ 3721155942U, // <4,1,7,4>: Cost 4 vext2 <5,0,4,1>, <7,4,5,6>
+ 3721155976U, // <4,1,7,5>: Cost 4 vext2 <5,0,4,1>, <7,5,0,4>
+ 3662500458U, // <4,1,7,6>: Cost 4 vext1 <6,4,1,7>, <6,4,1,7>
+ 3721156204U, // <4,1,7,7>: Cost 4 vext2 <5,0,4,1>, <7,7,7,7>
+ 2659357716U, // <4,1,7,u>: Cost 3 vext2 <7,0,4,1>, <7,0,4,1>
+ 1479188582U, // <4,1,u,0>: Cost 2 vext1 <0,4,1,u>, LHS
+ 2552931062U, // <4,1,u,1>: Cost 3 vext1 <0,4,1,u>, <1,0,3,2>
+ 2552931944U, // <4,1,u,2>: Cost 3 vext1 <0,4,1,u>, <2,2,2,2>
+ 1622148480U, // <4,1,u,3>: Cost 2 vext3 <1,u,3,4>, <1,u,3,4>
+ 1479191862U, // <4,1,u,4>: Cost 2 vext1 <0,4,1,u>, RHS
+ 2622863514U, // <4,1,u,5>: Cost 3 vext2 <0,u,4,1>, RHS
+ 2588725862U, // <4,1,u,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
+ 2600686586U, // <4,1,u,7>: Cost 3 vext1 <u,4,1,5>, <7,0,1,2>
+ 1479194414U, // <4,1,u,u>: Cost 2 vext1 <0,4,1,u>, LHS
+ 2617557030U, // <4,2,0,0>: Cost 3 vext2 <0,0,4,2>, <0,0,4,2>
+ 2622865510U, // <4,2,0,1>: Cost 3 vext2 <0,u,4,2>, LHS
+ 2622865612U, // <4,2,0,2>: Cost 3 vext2 <0,u,4,2>, <0,2,4,6>
+ 3693289753U, // <4,2,0,3>: Cost 4 vext2 <0,3,4,2>, <0,3,4,2>
+ 2635473244U, // <4,2,0,4>: Cost 3 vext2 <3,0,4,2>, <0,4,2,6>
+ 3765650918U, // <4,2,0,5>: Cost 4 vext3 <1,2,3,4>, <2,0,5,7>
+ 2696775148U, // <4,2,0,6>: Cost 3 vext3 <2,0,6,4>, <2,0,6,4>
+ 3695944285U, // <4,2,0,7>: Cost 4 vext2 <0,7,4,2>, <0,7,4,2>
+ 2622866077U, // <4,2,0,u>: Cost 3 vext2 <0,u,4,2>, LHS
+ 3696607990U, // <4,2,1,0>: Cost 4 vext2 <0,u,4,2>, <1,0,3,2>
+ 3696608052U, // <4,2,1,1>: Cost 4 vext2 <0,u,4,2>, <1,1,1,1>
+ 3696608150U, // <4,2,1,2>: Cost 4 vext2 <0,u,4,2>, <1,2,3,0>
+ 3895574630U, // <4,2,1,3>: Cost 4 vuzpr <0,4,u,2>, LHS
+ 2691909162U, // <4,2,1,4>: Cost 3 vext3 <1,2,3,4>, <2,1,4,3>
+ 3696608400U, // <4,2,1,5>: Cost 4 vext2 <0,u,4,2>, <1,5,3,7>
+ 3760784956U, // <4,2,1,6>: Cost 4 vext3 <0,4,0,4>, <2,1,6,3>
+ 3773908549U, // <4,2,1,7>: Cost 5 vext3 <2,5,7,4>, <2,1,7,3>
+ 2691909162U, // <4,2,1,u>: Cost 3 vext3 <1,2,3,4>, <2,1,4,3>
+ 3696608748U, // <4,2,2,0>: Cost 4 vext2 <0,u,4,2>, <2,0,6,4>
+ 3696608828U, // <4,2,2,1>: Cost 4 vext2 <0,u,4,2>, <2,1,6,3>
+ 2691909224U, // <4,2,2,2>: Cost 3 vext3 <1,2,3,4>, <2,2,2,2>
+ 2691909234U, // <4,2,2,3>: Cost 3 vext3 <1,2,3,4>, <2,2,3,3>
+ 3759605368U, // <4,2,2,4>: Cost 4 vext3 <0,2,2,4>, <2,2,4,0>
+ 3696609156U, // <4,2,2,5>: Cost 4 vext2 <0,u,4,2>, <2,5,6,7>
+ 3760785040U, // <4,2,2,6>: Cost 4 vext3 <0,4,0,4>, <2,2,6,6>
+ 3668505927U, // <4,2,2,7>: Cost 4 vext1 <7,4,2,2>, <7,4,2,2>
+ 2691909279U, // <4,2,2,u>: Cost 3 vext3 <1,2,3,4>, <2,2,u,3>
+ 2691909286U, // <4,2,3,0>: Cost 3 vext3 <1,2,3,4>, <2,3,0,1>
+ 3764840111U, // <4,2,3,1>: Cost 4 vext3 <1,1,1,4>, <2,3,1,1>
+ 3765651129U, // <4,2,3,2>: Cost 4 vext3 <1,2,3,4>, <2,3,2,2>
+ 2698544836U, // <4,2,3,3>: Cost 3 vext3 <2,3,3,4>, <2,3,3,4>
+ 2685863630U, // <4,2,3,4>: Cost 3 vext3 <0,2,2,4>, <2,3,4,5>
+ 2698692310U, // <4,2,3,5>: Cost 3 vext3 <2,3,5,4>, <2,3,5,4>
+ 3772507871U, // <4,2,3,6>: Cost 4 vext3 <2,3,6,4>, <2,3,6,4>
+ 2698839784U, // <4,2,3,7>: Cost 3 vext3 <2,3,7,4>, <2,3,7,4>
+ 2691909358U, // <4,2,3,u>: Cost 3 vext3 <1,2,3,4>, <2,3,u,1>
+ 2564915302U, // <4,2,4,0>: Cost 3 vext1 <2,4,2,4>, LHS
+ 2564916122U, // <4,2,4,1>: Cost 3 vext1 <2,4,2,4>, <1,2,3,4>
+ 2564917004U, // <4,2,4,2>: Cost 3 vext1 <2,4,2,4>, <2,4,2,4>
+ 2699208469U, // <4,2,4,3>: Cost 3 vext3 <2,4,3,4>, <2,4,3,4>
+ 2564918582U, // <4,2,4,4>: Cost 3 vext1 <2,4,2,4>, RHS
+ 2622868790U, // <4,2,4,5>: Cost 3 vext2 <0,u,4,2>, RHS
+ 2229667632U, // <4,2,4,6>: Cost 3 vrev <2,4,6,4>
+ 3800082229U, // <4,2,4,7>: Cost 4 vext3 <7,0,2,4>, <2,4,7,0>
+ 2622869033U, // <4,2,4,u>: Cost 3 vext2 <0,u,4,2>, RHS
+ 2552979558U, // <4,2,5,0>: Cost 3 vext1 <0,4,2,5>, LHS
+ 2558952342U, // <4,2,5,1>: Cost 3 vext1 <1,4,2,5>, <1,2,3,0>
+ 2564925032U, // <4,2,5,2>: Cost 3 vext1 <2,4,2,5>, <2,2,2,2>
+ 2967060582U, // <4,2,5,3>: Cost 3 vzipr <2,3,4,5>, LHS
+ 2552982838U, // <4,2,5,4>: Cost 3 vext1 <0,4,2,5>, RHS
+ 3987130190U, // <4,2,5,5>: Cost 4 vzipl RHS, <2,5,0,7>
+ 2913388474U, // <4,2,5,6>: Cost 3 vzipl RHS, <2,6,3,7>
+ 3895577910U, // <4,2,5,7>: Cost 4 vuzpr <0,4,u,2>, RHS
+ 2552985390U, // <4,2,5,u>: Cost 3 vext1 <0,4,2,5>, LHS
+ 1479245926U, // <4,2,6,0>: Cost 2 vext1 <0,4,2,6>, LHS
+ 2552988406U, // <4,2,6,1>: Cost 3 vext1 <0,4,2,6>, <1,0,3,2>
+ 2552989288U, // <4,2,6,2>: Cost 3 vext1 <0,4,2,6>, <2,2,2,2>
+ 2954461286U, // <4,2,6,3>: Cost 3 vzipr <0,2,4,6>, LHS
+ 1479249206U, // <4,2,6,4>: Cost 2 vext1 <0,4,2,6>, RHS
+ 2229610281U, // <4,2,6,5>: Cost 3 vrev <2,4,5,6>
+ 2600767994U, // <4,2,6,6>: Cost 3 vext1 <u,4,2,6>, <6,2,7,3>
+ 2600768506U, // <4,2,6,7>: Cost 3 vext1 <u,4,2,6>, <7,0,1,2>
+ 1479251758U, // <4,2,6,u>: Cost 2 vext1 <0,4,2,6>, LHS
+ 2659365909U, // <4,2,7,0>: Cost 3 vext2 <7,0,4,2>, <7,0,4,2>
+ 3733771366U, // <4,2,7,1>: Cost 4 vext2 <7,1,4,2>, <7,1,4,2>
+ 3734434999U, // <4,2,7,2>: Cost 4 vext2 <7,2,4,2>, <7,2,4,2>
+ 2701199368U, // <4,2,7,3>: Cost 3 vext3 <2,7,3,4>, <2,7,3,4>
+ 4175774618U, // <4,2,7,4>: Cost 4 vtrnr <2,4,5,7>, <1,2,3,4>
+ 3303360298U, // <4,2,7,5>: Cost 4 vrev <2,4,5,7>
+ 3727136217U, // <4,2,7,6>: Cost 4 vext2 <6,0,4,2>, <7,6,0,4>
+ 3727136364U, // <4,2,7,7>: Cost 4 vext2 <6,0,4,2>, <7,7,7,7>
+ 2659365909U, // <4,2,7,u>: Cost 3 vext2 <7,0,4,2>, <7,0,4,2>
+ 1479262310U, // <4,2,u,0>: Cost 2 vext1 <0,4,2,u>, LHS
+ 2553004790U, // <4,2,u,1>: Cost 3 vext1 <0,4,2,u>, <1,0,3,2>
+ 2553005672U, // <4,2,u,2>: Cost 3 vext1 <0,4,2,u>, <2,2,2,2>
+ 2954477670U, // <4,2,u,3>: Cost 3 vzipr <0,2,4,u>, LHS
+ 1479265590U, // <4,2,u,4>: Cost 2 vext1 <0,4,2,u>, RHS
+ 2622871706U, // <4,2,u,5>: Cost 3 vext2 <0,u,4,2>, RHS
+ 2229700404U, // <4,2,u,6>: Cost 3 vrev <2,4,6,u>
+ 2600784890U, // <4,2,u,7>: Cost 3 vext1 <u,4,2,u>, <7,0,1,2>
+ 1479268142U, // <4,2,u,u>: Cost 2 vext1 <0,4,2,u>, LHS
+ 3765651595U, // <4,3,0,0>: Cost 4 vext3 <1,2,3,4>, <3,0,0,0>
+ 2691909782U, // <4,3,0,1>: Cost 3 vext3 <1,2,3,4>, <3,0,1,2>
+ 2702452897U, // <4,3,0,2>: Cost 3 vext3 <3,0,2,4>, <3,0,2,4>
+ 3693297946U, // <4,3,0,3>: Cost 4 vext2 <0,3,4,3>, <0,3,4,3>
+ 3760711856U, // <4,3,0,4>: Cost 4 vext3 <0,3,u,4>, <3,0,4,1>
+ 2235533820U, // <4,3,0,5>: Cost 3 vrev <3,4,5,0>
+ 3309349381U, // <4,3,0,6>: Cost 4 vrev <3,4,6,0>
+ 3668563278U, // <4,3,0,7>: Cost 4 vext1 <7,4,3,0>, <7,4,3,0>
+ 2691909845U, // <4,3,0,u>: Cost 3 vext3 <1,2,3,4>, <3,0,u,2>
+ 2235173328U, // <4,3,1,0>: Cost 3 vrev <3,4,0,1>
+ 3764840678U, // <4,3,1,1>: Cost 4 vext3 <1,1,1,4>, <3,1,1,1>
+ 2630173594U, // <4,3,1,2>: Cost 3 vext2 <2,1,4,3>, <1,2,3,4>
+ 2703190267U, // <4,3,1,3>: Cost 3 vext3 <3,1,3,4>, <3,1,3,4>
+ 3760195840U, // <4,3,1,4>: Cost 4 vext3 <0,3,1,4>, <3,1,4,0>
+ 3765651724U, // <4,3,1,5>: Cost 4 vext3 <1,2,3,4>, <3,1,5,3>
+ 3309357574U, // <4,3,1,6>: Cost 4 vrev <3,4,6,1>
+ 3769633054U, // <4,3,1,7>: Cost 4 vext3 <1,u,3,4>, <3,1,7,3>
+ 2703558952U, // <4,3,1,u>: Cost 3 vext3 <3,1,u,4>, <3,1,u,4>
+ 3626770534U, // <4,3,2,0>: Cost 4 vext1 <0,4,3,2>, LHS
+ 2630174250U, // <4,3,2,1>: Cost 3 vext2 <2,1,4,3>, <2,1,4,3>
+ 3765651777U, // <4,3,2,2>: Cost 4 vext3 <1,2,3,4>, <3,2,2,2>
+ 2703853900U, // <4,3,2,3>: Cost 3 vext3 <3,2,3,4>, <3,2,3,4>
+ 3626773814U, // <4,3,2,4>: Cost 4 vext1 <0,4,3,2>, RHS
+ 2704001374U, // <4,3,2,5>: Cost 3 vext3 <3,2,5,4>, <3,2,5,4>
+ 3765651814U, // <4,3,2,6>: Cost 4 vext3 <1,2,3,4>, <3,2,6,3>
+ 3769633135U, // <4,3,2,7>: Cost 4 vext3 <1,u,3,4>, <3,2,7,3>
+ 2634819681U, // <4,3,2,u>: Cost 3 vext2 <2,u,4,3>, <2,u,4,3>
+ 3765651839U, // <4,3,3,0>: Cost 4 vext3 <1,2,3,4>, <3,3,0,1>
+ 3765651848U, // <4,3,3,1>: Cost 4 vext3 <1,2,3,4>, <3,3,1,1>
+ 3710552404U, // <4,3,3,2>: Cost 4 vext2 <3,2,4,3>, <3,2,4,3>
+ 2691910044U, // <4,3,3,3>: Cost 3 vext3 <1,2,3,4>, <3,3,3,3>
+ 2704591270U, // <4,3,3,4>: Cost 3 vext3 <3,3,4,4>, <3,3,4,4>
+ 3769633202U, // <4,3,3,5>: Cost 4 vext3 <1,u,3,4>, <3,3,5,7>
+ 3703917212U, // <4,3,3,6>: Cost 4 vext2 <2,1,4,3>, <3,6,4,7>
+ 3769633220U, // <4,3,3,7>: Cost 4 vext3 <1,u,3,4>, <3,3,7,7>
+ 2691910044U, // <4,3,3,u>: Cost 3 vext3 <1,2,3,4>, <3,3,3,3>
+ 2691910096U, // <4,3,4,0>: Cost 3 vext3 <1,2,3,4>, <3,4,0,1>
+ 2691910106U, // <4,3,4,1>: Cost 3 vext3 <1,2,3,4>, <3,4,1,2>
+ 2564990741U, // <4,3,4,2>: Cost 3 vext1 <2,4,3,4>, <2,4,3,4>
+ 3765651946U, // <4,3,4,3>: Cost 4 vext3 <1,2,3,4>, <3,4,3,0>
+ 2691910136U, // <4,3,4,4>: Cost 3 vext3 <1,2,3,4>, <3,4,4,5>
+ 2686454274U, // <4,3,4,5>: Cost 3 vext3 <0,3,1,4>, <3,4,5,6>
+ 2235640329U, // <4,3,4,6>: Cost 3 vrev <3,4,6,4>
+ 3801483792U, // <4,3,4,7>: Cost 4 vext3 <7,2,3,4>, <3,4,7,2>
+ 2691910168U, // <4,3,4,u>: Cost 3 vext3 <1,2,3,4>, <3,4,u,1>
+ 2559025254U, // <4,3,5,0>: Cost 3 vext1 <1,4,3,5>, LHS
+ 2559026237U, // <4,3,5,1>: Cost 3 vext1 <1,4,3,5>, <1,4,3,5>
+ 2564998862U, // <4,3,5,2>: Cost 3 vext1 <2,4,3,5>, <2,3,4,5>
+ 2570971548U, // <4,3,5,3>: Cost 3 vext1 <3,4,3,5>, <3,3,3,3>
+ 2559028534U, // <4,3,5,4>: Cost 3 vext1 <1,4,3,5>, RHS
+ 4163519477U, // <4,3,5,5>: Cost 4 vtrnr <0,4,1,5>, <1,3,4,5>
+ 3309390346U, // <4,3,5,6>: Cost 4 vrev <3,4,6,5>
+ 2706139747U, // <4,3,5,7>: Cost 3 vext3 <3,5,7,4>, <3,5,7,4>
+ 2559031086U, // <4,3,5,u>: Cost 3 vext1 <1,4,3,5>, LHS
+ 2559033446U, // <4,3,6,0>: Cost 3 vext1 <1,4,3,6>, LHS
+ 2559034430U, // <4,3,6,1>: Cost 3 vext1 <1,4,3,6>, <1,4,3,6>
+ 2565007127U, // <4,3,6,2>: Cost 3 vext1 <2,4,3,6>, <2,4,3,6>
+ 2570979740U, // <4,3,6,3>: Cost 3 vext1 <3,4,3,6>, <3,3,3,3>
+ 2559036726U, // <4,3,6,4>: Cost 3 vext1 <1,4,3,6>, RHS
+ 1161841154U, // <4,3,6,5>: Cost 2 vrev <3,4,5,6>
+ 4028203932U, // <4,3,6,6>: Cost 4 vzipr <0,2,4,6>, <1,2,3,6>
+ 2706803380U, // <4,3,6,7>: Cost 3 vext3 <3,6,7,4>, <3,6,7,4>
+ 1162062365U, // <4,3,6,u>: Cost 2 vrev <3,4,u,6>
+ 3769633475U, // <4,3,7,0>: Cost 4 vext3 <1,u,3,4>, <3,7,0,1>
+ 3769633488U, // <4,3,7,1>: Cost 4 vext3 <1,u,3,4>, <3,7,1,5>
+ 3638757144U, // <4,3,7,2>: Cost 4 vext1 <2,4,3,7>, <2,4,3,7>
+ 3769633508U, // <4,3,7,3>: Cost 4 vext3 <1,u,3,4>, <3,7,3,7>
+ 3769633515U, // <4,3,7,4>: Cost 4 vext3 <1,u,3,4>, <3,7,4,5>
+ 3769633526U, // <4,3,7,5>: Cost 4 vext3 <1,u,3,4>, <3,7,5,7>
+ 3662647932U, // <4,3,7,6>: Cost 4 vext1 <6,4,3,7>, <6,4,3,7>
+ 3781208837U, // <4,3,7,7>: Cost 4 vext3 <3,7,7,4>, <3,7,7,4>
+ 3769633547U, // <4,3,7,u>: Cost 4 vext3 <1,u,3,4>, <3,7,u,1>
+ 2559049830U, // <4,3,u,0>: Cost 3 vext1 <1,4,3,u>, LHS
+ 2691910430U, // <4,3,u,1>: Cost 3 vext3 <1,2,3,4>, <3,u,1,2>
+ 2565023513U, // <4,3,u,2>: Cost 3 vext1 <2,4,3,u>, <2,4,3,u>
+ 2707835698U, // <4,3,u,3>: Cost 3 vext3 <3,u,3,4>, <3,u,3,4>
+ 2559053110U, // <4,3,u,4>: Cost 3 vext1 <1,4,3,u>, RHS
+ 1161857540U, // <4,3,u,5>: Cost 2 vrev <3,4,5,u>
+ 2235673101U, // <4,3,u,6>: Cost 3 vrev <3,4,6,u>
+ 2708130646U, // <4,3,u,7>: Cost 3 vext3 <3,u,7,4>, <3,u,7,4>
+ 1162078751U, // <4,3,u,u>: Cost 2 vrev <3,4,u,u>
+ 2617573416U, // <4,4,0,0>: Cost 3 vext2 <0,0,4,4>, <0,0,4,4>
+ 1570373734U, // <4,4,0,1>: Cost 2 vext2 <4,4,4,4>, LHS
+ 2779676774U, // <4,4,0,2>: Cost 3 vuzpl <4,6,4,6>, LHS
+ 3760196480U, // <4,4,0,3>: Cost 4 vext3 <0,3,1,4>, <4,0,3,1>
+ 2576977100U, // <4,4,0,4>: Cost 3 vext1 <4,4,4,0>, <4,4,4,0>
+ 2718747538U, // <4,4,0,5>: Cost 3 vext3 <5,6,7,4>, <4,0,5,1>
+ 2718747548U, // <4,4,0,6>: Cost 3 vext3 <5,6,7,4>, <4,0,6,2>
+ 3668637015U, // <4,4,0,7>: Cost 4 vext1 <7,4,4,0>, <7,4,4,0>
+ 1570374301U, // <4,4,0,u>: Cost 2 vext2 <4,4,4,4>, LHS
+ 2644116214U, // <4,4,1,0>: Cost 3 vext2 <4,4,4,4>, <1,0,3,2>
+ 2644116276U, // <4,4,1,1>: Cost 3 vext2 <4,4,4,4>, <1,1,1,1>
+ 2691910602U, // <4,4,1,2>: Cost 3 vext3 <1,2,3,4>, <4,1,2,3>
+ 2644116440U, // <4,4,1,3>: Cost 3 vext2 <4,4,4,4>, <1,3,1,3>
+ 2711227356U, // <4,4,1,4>: Cost 3 vext3 <4,4,4,4>, <4,1,4,3>
+ 2709310438U, // <4,4,1,5>: Cost 3 vext3 <4,1,5,4>, <4,1,5,4>
+ 3765652462U, // <4,4,1,6>: Cost 4 vext3 <1,2,3,4>, <4,1,6,3>
+ 3768970231U, // <4,4,1,7>: Cost 4 vext3 <1,7,3,4>, <4,1,7,3>
+ 2695891968U, // <4,4,1,u>: Cost 3 vext3 <1,u,3,4>, <4,1,u,3>
+ 3703260634U, // <4,4,2,0>: Cost 4 vext2 <2,0,4,4>, <2,0,4,4>
+ 3765652499U, // <4,4,2,1>: Cost 4 vext3 <1,2,3,4>, <4,2,1,4>
+ 2644117096U, // <4,4,2,2>: Cost 3 vext2 <4,4,4,4>, <2,2,2,2>
+ 2631509709U, // <4,4,2,3>: Cost 3 vext2 <2,3,4,4>, <2,3,4,4>
+ 2644117269U, // <4,4,2,4>: Cost 3 vext2 <4,4,4,4>, <2,4,3,4>
+ 3705251698U, // <4,4,2,5>: Cost 4 vext2 <2,3,4,4>, <2,5,4,7>
+ 2710047808U, // <4,4,2,6>: Cost 3 vext3 <4,2,6,4>, <4,2,6,4>
+ 3783863369U, // <4,4,2,7>: Cost 4 vext3 <4,2,7,4>, <4,2,7,4>
+ 2634827874U, // <4,4,2,u>: Cost 3 vext2 <2,u,4,4>, <2,u,4,4>
+ 2644117654U, // <4,4,3,0>: Cost 3 vext2 <4,4,4,4>, <3,0,1,2>
+ 3638797210U, // <4,4,3,1>: Cost 4 vext1 <2,4,4,3>, <1,2,3,4>
+ 3638798082U, // <4,4,3,2>: Cost 4 vext1 <2,4,4,3>, <2,4,1,3>
+ 2637482406U, // <4,4,3,3>: Cost 3 vext2 <3,3,4,4>, <3,3,4,4>
+ 2638146039U, // <4,4,3,4>: Cost 3 vext2 <3,4,4,4>, <3,4,4,4>
+ 3913287374U, // <4,4,3,5>: Cost 4 vuzpr <3,4,5,4>, <2,3,4,5>
+ 3765652625U, // <4,4,3,6>: Cost 4 vext3 <1,2,3,4>, <4,3,6,4>
+ 3713878762U, // <4,4,3,7>: Cost 4 vext2 <3,7,4,4>, <3,7,4,4>
+ 2637482406U, // <4,4,3,u>: Cost 3 vext2 <3,3,4,4>, <3,3,4,4>
+ 1503264870U, // <4,4,4,0>: Cost 2 vext1 <4,4,4,4>, LHS
+ 2577007514U, // <4,4,4,1>: Cost 3 vext1 <4,4,4,4>, <1,2,3,4>
+ 2577008232U, // <4,4,4,2>: Cost 3 vext1 <4,4,4,4>, <2,2,2,2>
+ 2571037175U, // <4,4,4,3>: Cost 3 vext1 <3,4,4,4>, <3,4,4,4>
+ 161926454U, // <4,4,4,4>: Cost 1 vdup0 RHS
+ 1570377014U, // <4,4,4,5>: Cost 2 vext2 <4,4,4,4>, RHS
+ 2779680054U, // <4,4,4,6>: Cost 3 vuzpl <4,6,4,6>, RHS
+ 2594927963U, // <4,4,4,7>: Cost 3 vext1 <7,4,4,4>, <7,4,4,4>
+ 161926454U, // <4,4,4,u>: Cost 1 vdup0 RHS
+ 2571042918U, // <4,4,5,0>: Cost 3 vext1 <3,4,4,5>, LHS
+ 2571043738U, // <4,4,5,1>: Cost 3 vext1 <3,4,4,5>, <1,2,3,4>
+ 3638814495U, // <4,4,5,2>: Cost 4 vext1 <2,4,4,5>, <2,4,4,5>
+ 2571045368U, // <4,4,5,3>: Cost 3 vext1 <3,4,4,5>, <3,4,4,5>
+ 2571046198U, // <4,4,5,4>: Cost 3 vext1 <3,4,4,5>, RHS
+ 1839648054U, // <4,4,5,5>: Cost 2 vzipl RHS, RHS
+ 1618169142U, // <4,4,5,6>: Cost 2 vext3 <1,2,3,4>, RHS
+ 2594936156U, // <4,4,5,7>: Cost 3 vext1 <7,4,4,5>, <7,4,4,5>
+ 1618169160U, // <4,4,5,u>: Cost 2 vext3 <1,2,3,4>, RHS
+ 2553135206U, // <4,4,6,0>: Cost 3 vext1 <0,4,4,6>, LHS
+ 3626877686U, // <4,4,6,1>: Cost 4 vext1 <0,4,4,6>, <1,0,3,2>
+ 2565080782U, // <4,4,6,2>: Cost 3 vext1 <2,4,4,6>, <2,3,4,5>
+ 2571053561U, // <4,4,6,3>: Cost 3 vext1 <3,4,4,6>, <3,4,4,6>
+ 2553138486U, // <4,4,6,4>: Cost 3 vext1 <0,4,4,6>, RHS
+ 2241555675U, // <4,4,6,5>: Cost 3 vrev <4,4,5,6>
+ 1973865782U, // <4,4,6,6>: Cost 2 vtrnl RHS, RHS
+ 2658055029U, // <4,4,6,7>: Cost 3 vext2 <6,7,4,4>, <6,7,4,4>
+ 1973865800U, // <4,4,6,u>: Cost 2 vtrnl RHS, RHS
+ 2644120570U, // <4,4,7,0>: Cost 3 vext2 <4,4,4,4>, <7,0,1,2>
+ 3638829978U, // <4,4,7,1>: Cost 4 vext1 <2,4,4,7>, <1,2,3,4>
+ 3638830881U, // <4,4,7,2>: Cost 4 vext1 <2,4,4,7>, <2,4,4,7>
+ 3735115018U, // <4,4,7,3>: Cost 4 vext2 <7,3,4,4>, <7,3,4,4>
+ 2662036827U, // <4,4,7,4>: Cost 3 vext2 <7,4,4,4>, <7,4,4,4>
+ 2713292236U, // <4,4,7,5>: Cost 3 vext3 <4,7,5,4>, <4,7,5,4>
+ 2713365973U, // <4,4,7,6>: Cost 3 vext3 <4,7,6,4>, <4,7,6,4>
+ 2644121196U, // <4,4,7,7>: Cost 3 vext2 <4,4,4,4>, <7,7,7,7>
+ 2662036827U, // <4,4,7,u>: Cost 3 vext2 <7,4,4,4>, <7,4,4,4>
+ 1503297638U, // <4,4,u,0>: Cost 2 vext1 <4,4,4,u>, LHS
+ 1570379566U, // <4,4,u,1>: Cost 2 vext2 <4,4,4,4>, LHS
+ 2779682606U, // <4,4,u,2>: Cost 3 vuzpl <4,6,4,6>, LHS
+ 2571069947U, // <4,4,u,3>: Cost 3 vext1 <3,4,4,u>, <3,4,4,u>
+ 161926454U, // <4,4,u,4>: Cost 1 vdup0 RHS
+ 1841638710U, // <4,4,u,5>: Cost 2 vzipl RHS, RHS
+ 1618169385U, // <4,4,u,6>: Cost 2 vext3 <1,2,3,4>, RHS
+ 2594960735U, // <4,4,u,7>: Cost 3 vext1 <7,4,4,u>, <7,4,4,u>
+ 161926454U, // <4,4,u,u>: Cost 1 vdup0 RHS
+ 2631516160U, // <4,5,0,0>: Cost 3 vext2 <2,3,4,5>, <0,0,0,0>
+ 1557774438U, // <4,5,0,1>: Cost 2 vext2 <2,3,4,5>, LHS
+ 2618908875U, // <4,5,0,2>: Cost 3 vext2 <0,2,4,5>, <0,2,4,5>
+ 2571078140U, // <4,5,0,3>: Cost 3 vext1 <3,4,5,0>, <3,4,5,0>
+ 2626871634U, // <4,5,0,4>: Cost 3 vext2 <1,5,4,5>, <0,4,1,5>
+ 3705258414U, // <4,5,0,5>: Cost 4 vext2 <2,3,4,5>, <0,5,2,7>
+ 2594968438U, // <4,5,0,6>: Cost 3 vext1 <7,4,5,0>, <6,7,4,5>
+ 2594968928U, // <4,5,0,7>: Cost 3 vext1 <7,4,5,0>, <7,4,5,0>
+ 1557775005U, // <4,5,0,u>: Cost 2 vext2 <2,3,4,5>, LHS
+ 2631516918U, // <4,5,1,0>: Cost 3 vext2 <2,3,4,5>, <1,0,3,2>
+ 2624217939U, // <4,5,1,1>: Cost 3 vext2 <1,1,4,5>, <1,1,4,5>
+ 2631517078U, // <4,5,1,2>: Cost 3 vext2 <2,3,4,5>, <1,2,3,0>
+ 2821341286U, // <4,5,1,3>: Cost 3 vuzpr <0,4,1,5>, LHS
+ 3895086054U, // <4,5,1,4>: Cost 4 vuzpr <0,4,1,5>, <4,1,5,4>
+ 2626872471U, // <4,5,1,5>: Cost 3 vext2 <1,5,4,5>, <1,5,4,5>
+ 3895083131U, // <4,5,1,6>: Cost 4 vuzpr <0,4,1,5>, <0,1,4,6>
+ 2718748368U, // <4,5,1,7>: Cost 3 vext3 <5,6,7,4>, <5,1,7,3>
+ 2821341291U, // <4,5,1,u>: Cost 3 vuzpr <0,4,1,5>, LHS
+ 2571092070U, // <4,5,2,0>: Cost 3 vext1 <3,4,5,2>, LHS
+ 3699287585U, // <4,5,2,1>: Cost 4 vext2 <1,3,4,5>, <2,1,3,3>
+ 2630854269U, // <4,5,2,2>: Cost 3 vext2 <2,2,4,5>, <2,2,4,5>
+ 1557776078U, // <4,5,2,3>: Cost 2 vext2 <2,3,4,5>, <2,3,4,5>
+ 2631517974U, // <4,5,2,4>: Cost 3 vext2 <2,3,4,5>, <2,4,3,5>
+ 3692652384U, // <4,5,2,5>: Cost 4 vext2 <0,2,4,5>, <2,5,2,7>
+ 2631518138U, // <4,5,2,6>: Cost 3 vext2 <2,3,4,5>, <2,6,3,7>
+ 4164013366U, // <4,5,2,7>: Cost 4 vtrnr <0,4,u,2>, RHS
+ 1561094243U, // <4,5,2,u>: Cost 2 vext2 <2,u,4,5>, <2,u,4,5>
+ 2631518358U, // <4,5,3,0>: Cost 3 vext2 <2,3,4,5>, <3,0,1,2>
+ 3895084710U, // <4,5,3,1>: Cost 4 vuzpr <0,4,1,5>, <2,3,0,1>
+ 2631518540U, // <4,5,3,2>: Cost 3 vext2 <2,3,4,5>, <3,2,3,4>
+ 2631518620U, // <4,5,3,3>: Cost 3 vext2 <2,3,4,5>, <3,3,3,3>
+ 2631518716U, // <4,5,3,4>: Cost 3 vext2 <2,3,4,5>, <3,4,5,0>
+ 2631518784U, // <4,5,3,5>: Cost 3 vext2 <2,3,4,5>, <3,5,3,5>
+ 2658060980U, // <4,5,3,6>: Cost 3 vext2 <6,7,4,5>, <3,6,7,4>
+ 2640145131U, // <4,5,3,7>: Cost 3 vext2 <3,7,4,5>, <3,7,4,5>
+ 2631519006U, // <4,5,3,u>: Cost 3 vext2 <2,3,4,5>, <3,u,1,2>
+ 2571108454U, // <4,5,4,0>: Cost 3 vext1 <3,4,5,4>, LHS
+ 3632907342U, // <4,5,4,1>: Cost 4 vext1 <1,4,5,4>, <1,4,5,4>
+ 2571110094U, // <4,5,4,2>: Cost 3 vext1 <3,4,5,4>, <2,3,4,5>
+ 2571110912U, // <4,5,4,3>: Cost 3 vext1 <3,4,5,4>, <3,4,5,4>
+ 2571111734U, // <4,5,4,4>: Cost 3 vext1 <3,4,5,4>, RHS
+ 1557777718U, // <4,5,4,5>: Cost 2 vext2 <2,3,4,5>, RHS
+ 2645454195U, // <4,5,4,6>: Cost 3 vext2 <4,6,4,5>, <4,6,4,5>
+ 2718748614U, // <4,5,4,7>: Cost 3 vext3 <5,6,7,4>, <5,4,7,6>
+ 1557777961U, // <4,5,4,u>: Cost 2 vext2 <2,3,4,5>, RHS
+ 1503346790U, // <4,5,5,0>: Cost 2 vext1 <4,4,5,5>, LHS
+ 2913398480U, // <4,5,5,1>: Cost 3 vzipl RHS, <5,1,7,3>
+ 2631519998U, // <4,5,5,2>: Cost 3 vext2 <2,3,4,5>, <5,2,3,4>
+ 2577090710U, // <4,5,5,3>: Cost 3 vext1 <4,4,5,5>, <3,0,1,2>
+ 1503349978U, // <4,5,5,4>: Cost 2 vext1 <4,4,5,5>, <4,4,5,5>
+ 2631520260U, // <4,5,5,5>: Cost 3 vext2 <2,3,4,5>, <5,5,5,5>
+ 2913390690U, // <4,5,5,6>: Cost 3 vzipl RHS, <5,6,7,0>
+ 2821344566U, // <4,5,5,7>: Cost 3 vuzpr <0,4,1,5>, RHS
+ 1503352622U, // <4,5,5,u>: Cost 2 vext1 <4,4,5,5>, LHS
+ 1497383014U, // <4,5,6,0>: Cost 2 vext1 <3,4,5,6>, LHS
+ 2559181904U, // <4,5,6,1>: Cost 3 vext1 <1,4,5,6>, <1,4,5,6>
+ 2565154601U, // <4,5,6,2>: Cost 3 vext1 <2,4,5,6>, <2,4,5,6>
+ 1497385474U, // <4,5,6,3>: Cost 2 vext1 <3,4,5,6>, <3,4,5,6>
+ 1497386294U, // <4,5,6,4>: Cost 2 vext1 <3,4,5,6>, RHS
+ 3047608324U, // <4,5,6,5>: Cost 3 vtrnl RHS, <5,5,5,5>
+ 2571129656U, // <4,5,6,6>: Cost 3 vext1 <3,4,5,6>, <6,6,6,6>
+ 27705344U, // <4,5,6,7>: Cost 0 copy RHS
+ 27705344U, // <4,5,6,u>: Cost 0 copy RHS
+ 2565161062U, // <4,5,7,0>: Cost 3 vext1 <2,4,5,7>, LHS
+ 2565161882U, // <4,5,7,1>: Cost 3 vext1 <2,4,5,7>, <1,2,3,4>
+ 2565162794U, // <4,5,7,2>: Cost 3 vext1 <2,4,5,7>, <2,4,5,7>
+ 2661381387U, // <4,5,7,3>: Cost 3 vext2 <7,3,4,5>, <7,3,4,5>
+ 2565164342U, // <4,5,7,4>: Cost 3 vext1 <2,4,5,7>, RHS
+ 2718748840U, // <4,5,7,5>: Cost 3 vext3 <5,6,7,4>, <5,7,5,7>
+ 2718748846U, // <4,5,7,6>: Cost 3 vext3 <5,6,7,4>, <5,7,6,4>
+ 2719412407U, // <4,5,7,7>: Cost 3 vext3 <5,7,7,4>, <5,7,7,4>
+ 2565166894U, // <4,5,7,u>: Cost 3 vext1 <2,4,5,7>, LHS
+ 1497399398U, // <4,5,u,0>: Cost 2 vext1 <3,4,5,u>, LHS
+ 1557780270U, // <4,5,u,1>: Cost 2 vext2 <2,3,4,5>, LHS
+ 2631522181U, // <4,5,u,2>: Cost 3 vext2 <2,3,4,5>, <u,2,3,0>
+ 1497401860U, // <4,5,u,3>: Cost 2 vext1 <3,4,5,u>, <3,4,5,u>
+ 1497402678U, // <4,5,u,4>: Cost 2 vext1 <3,4,5,u>, RHS
+ 1557780634U, // <4,5,u,5>: Cost 2 vext2 <2,3,4,5>, RHS
+ 2631522512U, // <4,5,u,6>: Cost 3 vext2 <2,3,4,5>, <u,6,3,7>
+ 27705344U, // <4,5,u,7>: Cost 0 copy RHS
+ 27705344U, // <4,5,u,u>: Cost 0 copy RHS
+ 2618916864U, // <4,6,0,0>: Cost 3 vext2 <0,2,4,6>, <0,0,0,0>
+ 1545175142U, // <4,6,0,1>: Cost 2 vext2 <0,2,4,6>, LHS
+ 1545175244U, // <4,6,0,2>: Cost 2 vext2 <0,2,4,6>, <0,2,4,6>
+ 3692658940U, // <4,6,0,3>: Cost 4 vext2 <0,2,4,6>, <0,3,1,0>
+ 2618917202U, // <4,6,0,4>: Cost 3 vext2 <0,2,4,6>, <0,4,1,5>
+ 3852910806U, // <4,6,0,5>: Cost 4 vuzpl RHS, <0,2,5,7>
+ 2253525648U, // <4,6,0,6>: Cost 3 vrev <6,4,6,0>
+ 4040764726U, // <4,6,0,7>: Cost 4 vzipr <2,3,4,0>, RHS
+ 1545175709U, // <4,6,0,u>: Cost 2 vext2 <0,2,4,6>, LHS
+ 2618917622U, // <4,6,1,0>: Cost 3 vext2 <0,2,4,6>, <1,0,3,2>
+ 2618917684U, // <4,6,1,1>: Cost 3 vext2 <0,2,4,6>, <1,1,1,1>
+ 2618917782U, // <4,6,1,2>: Cost 3 vext2 <0,2,4,6>, <1,2,3,0>
+ 2618917848U, // <4,6,1,3>: Cost 3 vext2 <0,2,4,6>, <1,3,1,3>
+ 3692659773U, // <4,6,1,4>: Cost 4 vext2 <0,2,4,6>, <1,4,3,5>
+ 2618918032U, // <4,6,1,5>: Cost 3 vext2 <0,2,4,6>, <1,5,3,7>
+ 3692659937U, // <4,6,1,6>: Cost 4 vext2 <0,2,4,6>, <1,6,3,7>
+ 4032146742U, // <4,6,1,7>: Cost 4 vzipr <0,u,4,1>, RHS
+ 2618918253U, // <4,6,1,u>: Cost 3 vext2 <0,2,4,6>, <1,u,1,3>
+ 2618918380U, // <4,6,2,0>: Cost 3 vext2 <0,2,4,6>, <2,0,6,4>
+ 2618918460U, // <4,6,2,1>: Cost 3 vext2 <0,2,4,6>, <2,1,6,3>
+ 2618918504U, // <4,6,2,2>: Cost 3 vext2 <0,2,4,6>, <2,2,2,2>
+ 2618918566U, // <4,6,2,3>: Cost 3 vext2 <0,2,4,6>, <2,3,0,1>
+ 2618918679U, // <4,6,2,4>: Cost 3 vext2 <0,2,4,6>, <2,4,3,6>
+ 2618918788U, // <4,6,2,5>: Cost 3 vext2 <0,2,4,6>, <2,5,6,7>
+ 2618918842U, // <4,6,2,6>: Cost 3 vext2 <0,2,4,6>, <2,6,3,7>
+ 2718749178U, // <4,6,2,7>: Cost 3 vext3 <5,6,7,4>, <6,2,7,3>
+ 2618918971U, // <4,6,2,u>: Cost 3 vext2 <0,2,4,6>, <2,u,0,1>
+ 2618919062U, // <4,6,3,0>: Cost 3 vext2 <0,2,4,6>, <3,0,1,2>
+ 2636171526U, // <4,6,3,1>: Cost 3 vext2 <3,1,4,6>, <3,1,4,6>
+ 3692661057U, // <4,6,3,2>: Cost 4 vext2 <0,2,4,6>, <3,2,2,2>
+ 2618919324U, // <4,6,3,3>: Cost 3 vext2 <0,2,4,6>, <3,3,3,3>
+ 2618919426U, // <4,6,3,4>: Cost 3 vext2 <0,2,4,6>, <3,4,5,6>
+ 2638826058U, // <4,6,3,5>: Cost 3 vext2 <3,5,4,6>, <3,5,4,6>
+ 3913303030U, // <4,6,3,6>: Cost 4 vuzpr <3,4,5,6>, <1,3,4,6>
+ 2722730572U, // <4,6,3,7>: Cost 3 vext3 <6,3,7,4>, <6,3,7,4>
+ 2618919710U, // <4,6,3,u>: Cost 3 vext2 <0,2,4,6>, <3,u,1,2>
+ 2565210214U, // <4,6,4,0>: Cost 3 vext1 <2,4,6,4>, LHS
+ 2718749286U, // <4,6,4,1>: Cost 3 vext3 <5,6,7,4>, <6,4,1,3>
+ 2565211952U, // <4,6,4,2>: Cost 3 vext1 <2,4,6,4>, <2,4,6,4>
+ 2571184649U, // <4,6,4,3>: Cost 3 vext1 <3,4,6,4>, <3,4,6,4>
+ 2565213494U, // <4,6,4,4>: Cost 3 vext1 <2,4,6,4>, RHS
+ 1545178422U, // <4,6,4,5>: Cost 2 vext2 <0,2,4,6>, RHS
+ 1705430326U, // <4,6,4,6>: Cost 2 vuzpl RHS, RHS
+ 2595075437U, // <4,6,4,7>: Cost 3 vext1 <7,4,6,4>, <7,4,6,4>
+ 1545178665U, // <4,6,4,u>: Cost 2 vext2 <0,2,4,6>, RHS
+ 2565218406U, // <4,6,5,0>: Cost 3 vext1 <2,4,6,5>, LHS
+ 2645462736U, // <4,6,5,1>: Cost 3 vext2 <4,6,4,6>, <5,1,7,3>
+ 2913399290U, // <4,6,5,2>: Cost 3 vzipl RHS, <6,2,7,3>
+ 3913305394U, // <4,6,5,3>: Cost 4 vuzpr <3,4,5,6>, <4,5,6,3>
+ 2645462982U, // <4,6,5,4>: Cost 3 vext2 <4,6,4,6>, <5,4,7,6>
+ 2779172868U, // <4,6,5,5>: Cost 3 vuzpl RHS, <5,5,5,5>
+ 2913391416U, // <4,6,5,6>: Cost 3 vzipl RHS, <6,6,6,6>
+ 2821426486U, // <4,6,5,7>: Cost 3 vuzpr <0,4,2,6>, RHS
+ 2821426487U, // <4,6,5,u>: Cost 3 vuzpr <0,4,2,6>, RHS
+ 1503428710U, // <4,6,6,0>: Cost 2 vext1 <4,4,6,6>, LHS
+ 2577171190U, // <4,6,6,1>: Cost 3 vext1 <4,4,6,6>, <1,0,3,2>
+ 2645463546U, // <4,6,6,2>: Cost 3 vext2 <4,6,4,6>, <6,2,7,3>
+ 2577172630U, // <4,6,6,3>: Cost 3 vext1 <4,4,6,6>, <3,0,1,2>
+ 1503431908U, // <4,6,6,4>: Cost 2 vext1 <4,4,6,6>, <4,4,6,6>
+ 2253501069U, // <4,6,6,5>: Cost 3 vrev <6,4,5,6>
+ 2618921784U, // <4,6,6,6>: Cost 3 vext2 <0,2,4,6>, <6,6,6,6>
+ 2954464566U, // <4,6,6,7>: Cost 3 vzipr <0,2,4,6>, RHS
+ 1503434542U, // <4,6,6,u>: Cost 2 vext1 <4,4,6,6>, LHS
+ 2645464058U, // <4,6,7,0>: Cost 3 vext2 <4,6,4,6>, <7,0,1,2>
+ 2779173882U, // <4,6,7,1>: Cost 3 vuzpl RHS, <7,0,1,2>
+ 3638978355U, // <4,6,7,2>: Cost 4 vext1 <2,4,6,7>, <2,4,6,7>
+ 2725090156U, // <4,6,7,3>: Cost 3 vext3 <6,7,3,4>, <6,7,3,4>
+ 2645464422U, // <4,6,7,4>: Cost 3 vext2 <4,6,4,6>, <7,4,5,6>
+ 2779174246U, // <4,6,7,5>: Cost 3 vuzpl RHS, <7,4,5,6>
+ 3852915914U, // <4,6,7,6>: Cost 4 vuzpl RHS, <7,2,6,3>
+ 2779174508U, // <4,6,7,7>: Cost 3 vuzpl RHS, <7,7,7,7>
+ 2779173945U, // <4,6,7,u>: Cost 3 vuzpl RHS, <7,0,u,2>
+ 1503445094U, // <4,6,u,0>: Cost 2 vext1 <4,4,6,u>, LHS
+ 1545180974U, // <4,6,u,1>: Cost 2 vext2 <0,2,4,6>, LHS
+ 1705432878U, // <4,6,u,2>: Cost 2 vuzpl RHS, LHS
+ 2618922940U, // <4,6,u,3>: Cost 3 vext2 <0,2,4,6>, <u,3,0,1>
+ 1503448294U, // <4,6,u,4>: Cost 2 vext1 <4,4,6,u>, <4,4,6,u>
+ 1545181338U, // <4,6,u,5>: Cost 2 vext2 <0,2,4,6>, RHS
+ 1705433242U, // <4,6,u,6>: Cost 2 vuzpl RHS, RHS
+ 2954480950U, // <4,6,u,7>: Cost 3 vzipr <0,2,4,u>, RHS
+ 1545181541U, // <4,6,u,u>: Cost 2 vext2 <0,2,4,6>, LHS
+ 3706601472U, // <4,7,0,0>: Cost 4 vext2 <2,5,4,7>, <0,0,0,0>
+ 2632859750U, // <4,7,0,1>: Cost 3 vext2 <2,5,4,7>, LHS
+ 2726343685U, // <4,7,0,2>: Cost 3 vext3 <7,0,2,4>, <7,0,2,4>
+ 3701293312U, // <4,7,0,3>: Cost 4 vext2 <1,6,4,7>, <0,3,1,4>
+ 3706601810U, // <4,7,0,4>: Cost 4 vext2 <2,5,4,7>, <0,4,1,5>
+ 2259424608U, // <4,7,0,5>: Cost 3 vrev <7,4,5,0>
+ 3695321617U, // <4,7,0,6>: Cost 4 vext2 <0,6,4,7>, <0,6,4,7>
+ 3800454194U, // <4,7,0,7>: Cost 4 vext3 <7,0,7,4>, <7,0,7,4>
+ 2632860317U, // <4,7,0,u>: Cost 3 vext2 <2,5,4,7>, LHS
+ 2259064116U, // <4,7,1,0>: Cost 3 vrev <7,4,0,1>
+ 3700630324U, // <4,7,1,1>: Cost 4 vext2 <1,5,4,7>, <1,1,1,1>
+ 2632860570U, // <4,7,1,2>: Cost 3 vext2 <2,5,4,7>, <1,2,3,4>
+ 3769635936U, // <4,7,1,3>: Cost 4 vext3 <1,u,3,4>, <7,1,3,5>
+ 3656920374U, // <4,7,1,4>: Cost 4 vext1 <5,4,7,1>, RHS
+ 3700630681U, // <4,7,1,5>: Cost 4 vext2 <1,5,4,7>, <1,5,4,7>
+ 3701294314U, // <4,7,1,6>: Cost 4 vext2 <1,6,4,7>, <1,6,4,7>
+ 3793818754U, // <4,7,1,7>: Cost 4 vext3 <5,u,7,4>, <7,1,7,3>
+ 2259654012U, // <4,7,1,u>: Cost 3 vrev <7,4,u,1>
+ 3656925286U, // <4,7,2,0>: Cost 4 vext1 <5,4,7,2>, LHS
+ 3706603050U, // <4,7,2,1>: Cost 4 vext2 <2,5,4,7>, <2,1,4,3>
+ 3706603112U, // <4,7,2,2>: Cost 4 vext2 <2,5,4,7>, <2,2,2,2>
+ 2727744688U, // <4,7,2,3>: Cost 3 vext3 <7,2,3,4>, <7,2,3,4>
+ 3705939745U, // <4,7,2,4>: Cost 4 vext2 <2,4,4,7>, <2,4,4,7>
+ 2632861554U, // <4,7,2,5>: Cost 3 vext2 <2,5,4,7>, <2,5,4,7>
+ 3706603450U, // <4,7,2,6>: Cost 4 vext2 <2,5,4,7>, <2,6,3,7>
+ 3792491731U, // <4,7,2,7>: Cost 4 vext3 <5,6,7,4>, <7,2,7,3>
+ 2634852453U, // <4,7,2,u>: Cost 3 vext2 <2,u,4,7>, <2,u,4,7>
+ 3706603670U, // <4,7,3,0>: Cost 4 vext2 <2,5,4,7>, <3,0,1,2>
+ 3662906266U, // <4,7,3,1>: Cost 4 vext1 <6,4,7,3>, <1,2,3,4>
+ 3725183326U, // <4,7,3,2>: Cost 4 vext2 <5,6,4,7>, <3,2,5,4>
+ 3706603932U, // <4,7,3,3>: Cost 4 vext2 <2,5,4,7>, <3,3,3,3>
+ 3701295618U, // <4,7,3,4>: Cost 4 vext2 <1,6,4,7>, <3,4,5,6>
+ 2638834251U, // <4,7,3,5>: Cost 3 vext2 <3,5,4,7>, <3,5,4,7>
+ 2639497884U, // <4,7,3,6>: Cost 3 vext2 <3,6,4,7>, <3,6,4,7>
+ 3802445093U, // <4,7,3,7>: Cost 4 vext3 <7,3,7,4>, <7,3,7,4>
+ 2640825150U, // <4,7,3,u>: Cost 3 vext2 <3,u,4,7>, <3,u,4,7>
+ 2718750004U, // <4,7,4,0>: Cost 3 vext3 <5,6,7,4>, <7,4,0,1>
+ 3706604490U, // <4,7,4,1>: Cost 4 vext2 <2,5,4,7>, <4,1,2,3>
+ 3656943474U, // <4,7,4,2>: Cost 4 vext1 <5,4,7,4>, <2,5,4,7>
+ 3779884371U, // <4,7,4,3>: Cost 4 vext3 <3,5,7,4>, <7,4,3,5>
+ 2259383643U, // <4,7,4,4>: Cost 3 vrev <7,4,4,4>
+ 2632863030U, // <4,7,4,5>: Cost 3 vext2 <2,5,4,7>, RHS
+ 2259531117U, // <4,7,4,6>: Cost 3 vrev <7,4,6,4>
+ 3907340074U, // <4,7,4,7>: Cost 4 vuzpr <2,4,5,7>, <2,4,5,7>
+ 2632863273U, // <4,7,4,u>: Cost 3 vext2 <2,5,4,7>, RHS
+ 2913391610U, // <4,7,5,0>: Cost 3 vzipl RHS, <7,0,1,2>
+ 3645006848U, // <4,7,5,1>: Cost 4 vext1 <3,4,7,5>, <1,3,5,7>
+ 2589181646U, // <4,7,5,2>: Cost 3 vext1 <6,4,7,5>, <2,3,4,5>
+ 3645008403U, // <4,7,5,3>: Cost 4 vext1 <3,4,7,5>, <3,4,7,5>
+ 2913391974U, // <4,7,5,4>: Cost 3 vzipl RHS, <7,4,5,6>
+ 2583211973U, // <4,7,5,5>: Cost 3 vext1 <5,4,7,5>, <5,4,7,5>
+ 2589184670U, // <4,7,5,6>: Cost 3 vext1 <6,4,7,5>, <6,4,7,5>
+ 2913392236U, // <4,7,5,7>: Cost 3 vzipl RHS, <7,7,7,7>
+ 2913392258U, // <4,7,5,u>: Cost 3 vzipl RHS, <7,u,1,2>
+ 1509474406U, // <4,7,6,0>: Cost 2 vext1 <5,4,7,6>, LHS
+ 3047609338U, // <4,7,6,1>: Cost 3 vtrnl RHS, <7,0,1,2>
+ 2583217768U, // <4,7,6,2>: Cost 3 vext1 <5,4,7,6>, <2,2,2,2>
+ 2583218326U, // <4,7,6,3>: Cost 3 vext1 <5,4,7,6>, <3,0,1,2>
+ 1509477686U, // <4,7,6,4>: Cost 2 vext1 <5,4,7,6>, RHS
+ 1509478342U, // <4,7,6,5>: Cost 2 vext1 <5,4,7,6>, <5,4,7,6>
+ 2583220730U, // <4,7,6,6>: Cost 3 vext1 <5,4,7,6>, <6,2,7,3>
+ 3047609964U, // <4,7,6,7>: Cost 3 vtrnl RHS, <7,7,7,7>
+ 1509480238U, // <4,7,6,u>: Cost 2 vext1 <5,4,7,6>, LHS
+ 3650994278U, // <4,7,7,0>: Cost 4 vext1 <4,4,7,7>, LHS
+ 3650995098U, // <4,7,7,1>: Cost 4 vext1 <4,4,7,7>, <1,2,3,4>
+ 3650996010U, // <4,7,7,2>: Cost 4 vext1 <4,4,7,7>, <2,4,5,7>
+ 3804804677U, // <4,7,7,3>: Cost 4 vext3 <7,7,3,4>, <7,7,3,4>
+ 3650997486U, // <4,7,7,4>: Cost 4 vext1 <4,4,7,7>, <4,4,7,7>
+ 2662725039U, // <4,7,7,5>: Cost 3 vext2 <7,5,4,7>, <7,5,4,7>
+ 3662942880U, // <4,7,7,6>: Cost 4 vext1 <6,4,7,7>, <6,4,7,7>
+ 2718750316U, // <4,7,7,7>: Cost 3 vext3 <5,6,7,4>, <7,7,7,7>
+ 2664715938U, // <4,7,7,u>: Cost 3 vext2 <7,u,4,7>, <7,u,4,7>
+ 1509490790U, // <4,7,u,0>: Cost 2 vext1 <5,4,7,u>, LHS
+ 2632865582U, // <4,7,u,1>: Cost 3 vext2 <2,5,4,7>, LHS
+ 2583234152U, // <4,7,u,2>: Cost 3 vext1 <5,4,7,u>, <2,2,2,2>
+ 2583234710U, // <4,7,u,3>: Cost 3 vext1 <5,4,7,u>, <3,0,1,2>
+ 1509494070U, // <4,7,u,4>: Cost 2 vext1 <5,4,7,u>, RHS
+ 1509494728U, // <4,7,u,5>: Cost 2 vext1 <5,4,7,u>, <5,4,7,u>
+ 2583237114U, // <4,7,u,6>: Cost 3 vext1 <5,4,7,u>, <6,2,7,3>
+ 3047757420U, // <4,7,u,7>: Cost 3 vtrnl RHS, <7,7,7,7>
+ 1509496622U, // <4,7,u,u>: Cost 2 vext1 <5,4,7,u>, LHS
+ 2618933248U, // <4,u,0,0>: Cost 3 vext2 <0,2,4,u>, <0,0,0,0>
+ 1545191526U, // <4,u,0,1>: Cost 2 vext2 <0,2,4,u>, LHS
+ 1545191630U, // <4,u,0,2>: Cost 2 vext2 <0,2,4,u>, <0,2,4,u>
+ 2691913445U, // <4,u,0,3>: Cost 3 vext3 <1,2,3,4>, <u,0,3,2>
+ 2618933586U, // <4,u,0,4>: Cost 3 vext2 <0,2,4,u>, <0,4,1,5>
+ 2265397305U, // <4,u,0,5>: Cost 3 vrev <u,4,5,0>
+ 2595189625U, // <4,u,0,6>: Cost 3 vext1 <7,4,u,0>, <6,7,4,u>
+ 2595190139U, // <4,u,0,7>: Cost 3 vext1 <7,4,u,0>, <7,4,u,0>
+ 1545192093U, // <4,u,0,u>: Cost 2 vext2 <0,2,4,u>, LHS
+ 2618934006U, // <4,u,1,0>: Cost 3 vext2 <0,2,4,u>, <1,0,3,2>
+ 2618934068U, // <4,u,1,1>: Cost 3 vext2 <0,2,4,u>, <1,1,1,1>
+ 1618171694U, // <4,u,1,2>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2618934232U, // <4,u,1,3>: Cost 3 vext2 <0,2,4,u>, <1,3,1,3>
+ 2695894848U, // <4,u,1,4>: Cost 3 vext3 <1,u,3,4>, <u,1,4,3>
+ 2618934416U, // <4,u,1,5>: Cost 3 vext2 <0,2,4,u>, <1,5,3,7>
+ 3692676321U, // <4,u,1,6>: Cost 4 vext2 <0,2,4,u>, <1,6,3,7>
+ 2718750555U, // <4,u,1,7>: Cost 3 vext3 <5,6,7,4>, <u,1,7,3>
+ 1618171748U, // <4,u,1,u>: Cost 2 vext3 <1,2,3,4>, LHS
+ 2553397350U, // <4,u,2,0>: Cost 3 vext1 <0,4,u,2>, LHS
+ 2630215215U, // <4,u,2,1>: Cost 3 vext2 <2,1,4,u>, <2,1,4,u>
+ 2618934888U, // <4,u,2,2>: Cost 3 vext2 <0,2,4,u>, <2,2,2,2>
+ 1557800657U, // <4,u,2,3>: Cost 2 vext2 <2,3,4,u>, <2,3,4,u>
+ 2618935065U, // <4,u,2,4>: Cost 3 vext2 <0,2,4,u>, <2,4,3,u>
+ 2733864859U, // <4,u,2,5>: Cost 3 vext3 <u,2,5,4>, <u,2,5,4>
+ 2618935226U, // <4,u,2,6>: Cost 3 vext2 <0,2,4,u>, <2,6,3,7>
+ 2718750636U, // <4,u,2,7>: Cost 3 vext3 <5,6,7,4>, <u,2,7,3>
+ 1561118822U, // <4,u,2,u>: Cost 2 vext2 <2,u,4,u>, <2,u,4,u>
+ 2618935446U, // <4,u,3,0>: Cost 3 vext2 <0,2,4,u>, <3,0,1,2>
+ 2779318422U, // <4,u,3,1>: Cost 3 vuzpl RHS, <3,0,1,2>
+ 2636851545U, // <4,u,3,2>: Cost 3 vext2 <3,2,4,u>, <3,2,4,u>
+ 2618935708U, // <4,u,3,3>: Cost 3 vext2 <0,2,4,u>, <3,3,3,3>
+ 2618935810U, // <4,u,3,4>: Cost 3 vext2 <0,2,4,u>, <3,4,5,6>
+ 2691913711U, // <4,u,3,5>: Cost 3 vext3 <1,2,3,4>, <u,3,5,7>
+ 2588725862U, // <4,u,3,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
+ 2640169710U, // <4,u,3,7>: Cost 3 vext2 <3,7,4,u>, <3,7,4,u>
+ 2618936094U, // <4,u,3,u>: Cost 3 vext2 <0,2,4,u>, <3,u,1,2>
+ 1503559782U, // <4,u,4,0>: Cost 2 vext1 <4,4,u,4>, LHS
+ 2692282391U, // <4,u,4,1>: Cost 3 vext3 <1,2,u,4>, <u,4,1,2>
+ 2565359426U, // <4,u,4,2>: Cost 3 vext1 <2,4,u,4>, <2,4,u,4>
+ 2571332123U, // <4,u,4,3>: Cost 3 vext1 <3,4,u,4>, <3,4,u,4>
+ 161926454U, // <4,u,4,4>: Cost 1 vdup0 RHS
+ 1545194806U, // <4,u,4,5>: Cost 2 vext2 <0,2,4,u>, RHS
+ 1705577782U, // <4,u,4,6>: Cost 2 vuzpl RHS, RHS
+ 2718750801U, // <4,u,4,7>: Cost 3 vext3 <5,6,7,4>, <u,4,7,6>
+ 161926454U, // <4,u,4,u>: Cost 1 vdup0 RHS
+ 1479164006U, // <4,u,5,0>: Cost 2 vext1 <0,4,1,5>, LHS
+ 1839650606U, // <4,u,5,1>: Cost 2 vzipl RHS, LHS
+ 2565367502U, // <4,u,5,2>: Cost 3 vext1 <2,4,u,5>, <2,3,4,5>
+ 3089777309U, // <4,u,5,3>: Cost 3 vtrnr <0,4,1,5>, LHS
+ 1479167286U, // <4,u,5,4>: Cost 2 vext1 <0,4,1,5>, RHS
+ 1839650970U, // <4,u,5,5>: Cost 2 vzipl RHS, RHS
+ 1618172058U, // <4,u,5,6>: Cost 2 vext3 <1,2,3,4>, RHS
+ 3089780265U, // <4,u,5,7>: Cost 3 vtrnr <0,4,1,5>, RHS
+ 1618172076U, // <4,u,5,u>: Cost 2 vext3 <1,2,3,4>, RHS
+ 1479688294U, // <4,u,6,0>: Cost 2 vext1 <0,4,u,6>, LHS
+ 2553430774U, // <4,u,6,1>: Cost 3 vext1 <0,4,u,6>, <1,0,3,2>
+ 1973868334U, // <4,u,6,2>: Cost 2 vtrnl RHS, LHS
+ 1497606685U, // <4,u,6,3>: Cost 2 vext1 <3,4,u,6>, <3,4,u,6>
+ 1479691574U, // <4,u,6,4>: Cost 2 vext1 <0,4,u,6>, RHS
+ 1509552079U, // <4,u,6,5>: Cost 2 vext1 <5,4,u,6>, <5,4,u,6>
+ 1973868698U, // <4,u,6,6>: Cost 2 vtrnl RHS, RHS
+ 27705344U, // <4,u,6,7>: Cost 0 copy RHS
+ 27705344U, // <4,u,6,u>: Cost 0 copy RHS
+ 2565382246U, // <4,u,7,0>: Cost 3 vext1 <2,4,u,7>, LHS
+ 2565383066U, // <4,u,7,1>: Cost 3 vext1 <2,4,u,7>, <1,2,3,4>
+ 2565384005U, // <4,u,7,2>: Cost 3 vext1 <2,4,u,7>, <2,4,u,7>
+ 2661405966U, // <4,u,7,3>: Cost 3 vext2 <7,3,4,u>, <7,3,4,u>
+ 2565385526U, // <4,u,7,4>: Cost 3 vext1 <2,4,u,7>, RHS
+ 2779321702U, // <4,u,7,5>: Cost 3 vuzpl RHS, <7,4,5,6>
+ 2589274793U, // <4,u,7,6>: Cost 3 vext1 <6,4,u,7>, <6,4,u,7>
+ 2779321964U, // <4,u,7,7>: Cost 3 vuzpl RHS, <7,7,7,7>
+ 2565388078U, // <4,u,7,u>: Cost 3 vext1 <2,4,u,7>, LHS
+ 1479704678U, // <4,u,u,0>: Cost 2 vext1 <0,4,u,u>, LHS
+ 1545197358U, // <4,u,u,1>: Cost 2 vext2 <0,2,4,u>, LHS
+ 1618172261U, // <4,u,u,2>: Cost 2 vext3 <1,2,3,4>, LHS
+ 1497623071U, // <4,u,u,3>: Cost 2 vext1 <3,4,u,u>, <3,4,u,u>
+ 161926454U, // <4,u,u,4>: Cost 1 vdup0 RHS
+ 1545197722U, // <4,u,u,5>: Cost 2 vext2 <0,2,4,u>, RHS
+ 1618172301U, // <4,u,u,6>: Cost 2 vext3 <1,2,3,4>, RHS
+ 27705344U, // <4,u,u,7>: Cost 0 copy RHS
+ 27705344U, // <4,u,u,u>: Cost 0 copy RHS
+ 2687123456U, // <5,0,0,0>: Cost 3 vext3 <0,4,1,5>, <0,0,0,0>
+ 2687123466U, // <5,0,0,1>: Cost 3 vext3 <0,4,1,5>, <0,0,1,1>
+ 2687123476U, // <5,0,0,2>: Cost 3 vext3 <0,4,1,5>, <0,0,2,2>
+ 3710599434U, // <5,0,0,3>: Cost 4 vext2 <3,2,5,0>, <0,3,2,5>
+ 2642166098U, // <5,0,0,4>: Cost 3 vext2 <4,1,5,0>, <0,4,1,5>
+ 3657060306U, // <5,0,0,5>: Cost 4 vext1 <5,5,0,0>, <5,5,0,0>
+ 3292094923U, // <5,0,0,6>: Cost 4 vrev <0,5,6,0>
+ 3669005700U, // <5,0,0,7>: Cost 4 vext1 <7,5,0,0>, <7,5,0,0>
+ 2687123530U, // <5,0,0,u>: Cost 3 vext3 <0,4,1,5>, <0,0,u,2>
+ 2559434854U, // <5,0,1,0>: Cost 3 vext1 <1,5,0,1>, LHS
+ 2559435887U, // <5,0,1,1>: Cost 3 vext1 <1,5,0,1>, <1,5,0,1>
+ 1613381734U, // <5,0,1,2>: Cost 2 vext3 <0,4,1,5>, LHS
+ 3698656256U, // <5,0,1,3>: Cost 4 vext2 <1,2,5,0>, <1,3,5,7>
+ 2559438134U, // <5,0,1,4>: Cost 3 vext1 <1,5,0,1>, RHS
+ 2583326675U, // <5,0,1,5>: Cost 3 vext1 <5,5,0,1>, <5,5,0,1>
+ 3715908851U, // <5,0,1,6>: Cost 4 vext2 <4,1,5,0>, <1,6,5,7>
+ 3657069562U, // <5,0,1,7>: Cost 4 vext1 <5,5,0,1>, <7,0,1,2>
+ 1613381788U, // <5,0,1,u>: Cost 2 vext3 <0,4,1,5>, LHS
+ 2686017700U, // <5,0,2,0>: Cost 3 vext3 <0,2,4,5>, <0,2,0,2>
+ 2685796528U, // <5,0,2,1>: Cost 3 vext3 <0,2,1,5>, <0,2,1,5>
+ 2698625208U, // <5,0,2,2>: Cost 3 vext3 <2,3,4,5>, <0,2,2,4>
+ 2685944002U, // <5,0,2,3>: Cost 3 vext3 <0,2,3,5>, <0,2,3,5>
+ 2686017739U, // <5,0,2,4>: Cost 3 vext3 <0,2,4,5>, <0,2,4,5>
+ 2686091476U, // <5,0,2,5>: Cost 3 vext3 <0,2,5,5>, <0,2,5,5>
+ 2725167324U, // <5,0,2,6>: Cost 3 vext3 <6,7,4,5>, <0,2,6,4>
+ 2595280230U, // <5,0,2,7>: Cost 3 vext1 <7,5,0,2>, <7,4,5,6>
+ 2686312687U, // <5,0,2,u>: Cost 3 vext3 <0,2,u,5>, <0,2,u,5>
+ 3760128248U, // <5,0,3,0>: Cost 4 vext3 <0,3,0,5>, <0,3,0,5>
+ 3759685888U, // <5,0,3,1>: Cost 4 vext3 <0,2,3,5>, <0,3,1,4>
+ 2686533898U, // <5,0,3,2>: Cost 3 vext3 <0,3,2,5>, <0,3,2,5>
+ 3760349459U, // <5,0,3,3>: Cost 4 vext3 <0,3,3,5>, <0,3,3,5>
+ 2638187004U, // <5,0,3,4>: Cost 3 vext2 <3,4,5,0>, <3,4,5,0>
+ 3776348452U, // <5,0,3,5>: Cost 4 vext3 <3,0,4,5>, <0,3,5,4>
+ 3713256094U, // <5,0,3,6>: Cost 4 vext2 <3,6,5,0>, <3,6,5,0>
+ 3914064896U, // <5,0,3,7>: Cost 4 vuzpr <3,5,7,0>, <1,3,5,7>
+ 2686976320U, // <5,0,3,u>: Cost 3 vext3 <0,3,u,5>, <0,3,u,5>
+ 2559459430U, // <5,0,4,0>: Cost 3 vext1 <1,5,0,4>, LHS
+ 1613381970U, // <5,0,4,1>: Cost 2 vext3 <0,4,1,5>, <0,4,1,5>
+ 2687123804U, // <5,0,4,2>: Cost 3 vext3 <0,4,1,5>, <0,4,2,6>
+ 3761013092U, // <5,0,4,3>: Cost 4 vext3 <0,4,3,5>, <0,4,3,5>
+ 2559462710U, // <5,0,4,4>: Cost 3 vext1 <1,5,0,4>, RHS
+ 2638187830U, // <5,0,4,5>: Cost 3 vext2 <3,4,5,0>, RHS
+ 3761234303U, // <5,0,4,6>: Cost 4 vext3 <0,4,6,5>, <0,4,6,5>
+ 2646150600U, // <5,0,4,7>: Cost 3 vext2 <4,7,5,0>, <4,7,5,0>
+ 1613381970U, // <5,0,4,u>: Cost 2 vext3 <0,4,1,5>, <0,4,1,5>
+ 3766763926U, // <5,0,5,0>: Cost 4 vext3 <1,4,0,5>, <0,5,0,1>
+ 2919268454U, // <5,0,5,1>: Cost 3 vzipl <5,5,5,5>, LHS
+ 3053486182U, // <5,0,5,2>: Cost 3 vtrnl <5,5,5,5>, LHS
+ 3723210589U, // <5,0,5,3>: Cost 4 vext2 <5,3,5,0>, <5,3,5,0>
+ 3766763966U, // <5,0,5,4>: Cost 4 vext3 <1,4,0,5>, <0,5,4,5>
+ 2650796031U, // <5,0,5,5>: Cost 3 vext2 <5,5,5,0>, <5,5,5,0>
+ 3719893090U, // <5,0,5,6>: Cost 4 vext2 <4,7,5,0>, <5,6,7,0>
+ 3914067254U, // <5,0,5,7>: Cost 4 vuzpr <3,5,7,0>, RHS
+ 2919269021U, // <5,0,5,u>: Cost 3 vzipl <5,5,5,5>, LHS
+ 4047519744U, // <5,0,6,0>: Cost 4 vzipr <3,4,5,6>, <0,0,0,0>
+ 2920038502U, // <5,0,6,1>: Cost 3 vzipl <5,6,7,0>, LHS
+ 3759759871U, // <5,0,6,2>: Cost 4 vext3 <0,2,4,5>, <0,6,2,7>
+ 3645164070U, // <5,0,6,3>: Cost 4 vext1 <3,5,0,6>, <3,5,0,6>
+ 3762414095U, // <5,0,6,4>: Cost 4 vext3 <0,6,4,5>, <0,6,4,5>
+ 3993780690U, // <5,0,6,5>: Cost 4 vzipl <5,6,7,0>, <0,5,6,7>
+ 3719893816U, // <5,0,6,6>: Cost 4 vext2 <4,7,5,0>, <6,6,6,6>
+ 2662077302U, // <5,0,6,7>: Cost 3 vext2 <7,4,5,0>, <6,7,4,5>
+ 2920039069U, // <5,0,6,u>: Cost 3 vzipl <5,6,7,0>, LHS
+ 2565455974U, // <5,0,7,0>: Cost 3 vext1 <2,5,0,7>, LHS
+ 2565456790U, // <5,0,7,1>: Cost 3 vext1 <2,5,0,7>, <1,2,3,0>
+ 2565457742U, // <5,0,7,2>: Cost 3 vext1 <2,5,0,7>, <2,5,0,7>
+ 3639199894U, // <5,0,7,3>: Cost 4 vext1 <2,5,0,7>, <3,0,1,2>
+ 2565459254U, // <5,0,7,4>: Cost 3 vext1 <2,5,0,7>, RHS
+ 2589347938U, // <5,0,7,5>: Cost 3 vext1 <6,5,0,7>, <5,6,7,0>
+ 2589348530U, // <5,0,7,6>: Cost 3 vext1 <6,5,0,7>, <6,5,0,7>
+ 4188456422U, // <5,0,7,7>: Cost 4 vtrnr RHS, <2,0,5,7>
+ 2565461806U, // <5,0,7,u>: Cost 3 vext1 <2,5,0,7>, LHS
+ 2687124106U, // <5,0,u,0>: Cost 3 vext3 <0,4,1,5>, <0,u,0,2>
+ 1616036502U, // <5,0,u,1>: Cost 2 vext3 <0,u,1,5>, <0,u,1,5>
+ 1613382301U, // <5,0,u,2>: Cost 2 vext3 <0,4,1,5>, LHS
+ 2689925800U, // <5,0,u,3>: Cost 3 vext3 <0,u,3,5>, <0,u,3,5>
+ 2687124146U, // <5,0,u,4>: Cost 3 vext3 <0,4,1,5>, <0,u,4,6>
+ 2638190746U, // <5,0,u,5>: Cost 3 vext2 <3,4,5,0>, RHS
+ 2589356723U, // <5,0,u,6>: Cost 3 vext1 <6,5,0,u>, <6,5,0,u>
+ 2595280230U, // <5,0,u,7>: Cost 3 vext1 <7,5,0,2>, <7,4,5,6>
+ 1613382355U, // <5,0,u,u>: Cost 2 vext3 <0,4,1,5>, LHS
+ 2646818816U, // <5,1,0,0>: Cost 3 vext2 <4,u,5,1>, <0,0,0,0>
+ 1573077094U, // <5,1,0,1>: Cost 2 vext2 <4,u,5,1>, LHS
+ 2646818980U, // <5,1,0,2>: Cost 3 vext2 <4,u,5,1>, <0,2,0,2>
+ 2687124214U, // <5,1,0,3>: Cost 3 vext3 <0,4,1,5>, <1,0,3,2>
+ 2641510738U, // <5,1,0,4>: Cost 3 vext2 <4,0,5,1>, <0,4,1,5>
+ 2641510814U, // <5,1,0,5>: Cost 3 vext2 <4,0,5,1>, <0,5,1,0>
+ 3720561142U, // <5,1,0,6>: Cost 4 vext2 <4,u,5,1>, <0,6,1,7>
+ 3298141357U, // <5,1,0,7>: Cost 4 vrev <1,5,7,0>
+ 1573077661U, // <5,1,0,u>: Cost 2 vext2 <4,u,5,1>, LHS
+ 2223891567U, // <5,1,1,0>: Cost 3 vrev <1,5,0,1>
+ 2687124276U, // <5,1,1,1>: Cost 3 vext3 <0,4,1,5>, <1,1,1,1>
+ 2646819734U, // <5,1,1,2>: Cost 3 vext2 <4,u,5,1>, <1,2,3,0>
+ 2687124296U, // <5,1,1,3>: Cost 3 vext3 <0,4,1,5>, <1,1,3,3>
+ 2691326803U, // <5,1,1,4>: Cost 3 vext3 <1,1,4,5>, <1,1,4,5>
+ 2691400540U, // <5,1,1,5>: Cost 3 vext3 <1,1,5,5>, <1,1,5,5>
+ 3765216101U, // <5,1,1,6>: Cost 4 vext3 <1,1,6,5>, <1,1,6,5>
+ 3765289838U, // <5,1,1,7>: Cost 4 vext3 <1,1,7,5>, <1,1,7,5>
+ 2687124341U, // <5,1,1,u>: Cost 3 vext3 <0,4,1,5>, <1,1,u,3>
+ 3297641584U, // <5,1,2,0>: Cost 4 vrev <1,5,0,2>
+ 3763520391U, // <5,1,2,1>: Cost 4 vext3 <0,u,1,5>, <1,2,1,3>
+ 2646820456U, // <5,1,2,2>: Cost 3 vext2 <4,u,5,1>, <2,2,2,2>
+ 2687124374U, // <5,1,2,3>: Cost 3 vext3 <0,4,1,5>, <1,2,3,0>
+ 2691990436U, // <5,1,2,4>: Cost 3 vext3 <1,2,4,5>, <1,2,4,5>
+ 2687124395U, // <5,1,2,5>: Cost 3 vext3 <0,4,1,5>, <1,2,5,3>
+ 2646820794U, // <5,1,2,6>: Cost 3 vext2 <4,u,5,1>, <2,6,3,7>
+ 3808199610U, // <5,1,2,7>: Cost 4 vext3 <u,3,4,5>, <1,2,7,0>
+ 2687124419U, // <5,1,2,u>: Cost 3 vext3 <0,4,1,5>, <1,2,u,0>
+ 2577440870U, // <5,1,3,0>: Cost 3 vext1 <4,5,1,3>, LHS
+ 2687124440U, // <5,1,3,1>: Cost 3 vext3 <0,4,1,5>, <1,3,1,3>
+ 3759686627U, // <5,1,3,2>: Cost 4 vext3 <0,2,3,5>, <1,3,2,5>
+ 2692580332U, // <5,1,3,3>: Cost 3 vext3 <1,3,3,5>, <1,3,3,5>
+ 2687124469U, // <5,1,3,4>: Cost 3 vext3 <0,4,1,5>, <1,3,4,5>
+ 2685207552U, // <5,1,3,5>: Cost 3 vext3 <0,1,2,5>, <1,3,5,7>
+ 3760866313U, // <5,1,3,6>: Cost 4 vext3 <0,4,1,5>, <1,3,6,7>
+ 2692875280U, // <5,1,3,7>: Cost 3 vext3 <1,3,7,5>, <1,3,7,5>
+ 2687124503U, // <5,1,3,u>: Cost 3 vext3 <0,4,1,5>, <1,3,u,3>
+ 1567771538U, // <5,1,4,0>: Cost 2 vext2 <4,0,5,1>, <4,0,5,1>
+ 2693096491U, // <5,1,4,1>: Cost 3 vext3 <1,4,1,5>, <1,4,1,5>
+ 2693170228U, // <5,1,4,2>: Cost 3 vext3 <1,4,2,5>, <1,4,2,5>
+ 2687124541U, // <5,1,4,3>: Cost 3 vext3 <0,4,1,5>, <1,4,3,5>
+ 2646822096U, // <5,1,4,4>: Cost 3 vext2 <4,u,5,1>, <4,4,4,4>
+ 1573080374U, // <5,1,4,5>: Cost 2 vext2 <4,u,5,1>, RHS
+ 2646822260U, // <5,1,4,6>: Cost 3 vext2 <4,u,5,1>, <4,6,4,6>
+ 3298174129U, // <5,1,4,7>: Cost 4 vrev <1,5,7,4>
+ 1573080602U, // <5,1,4,u>: Cost 2 vext2 <4,u,5,1>, <4,u,5,1>
+ 2687124591U, // <5,1,5,0>: Cost 3 vext3 <0,4,1,5>, <1,5,0,1>
+ 2646822543U, // <5,1,5,1>: Cost 3 vext2 <4,u,5,1>, <5,1,0,1>
+ 3760866433U, // <5,1,5,2>: Cost 4 vext3 <0,4,1,5>, <1,5,2,1>
+ 2687124624U, // <5,1,5,3>: Cost 3 vext3 <0,4,1,5>, <1,5,3,7>
+ 2687124631U, // <5,1,5,4>: Cost 3 vext3 <0,4,1,5>, <1,5,4,5>
+ 2646822916U, // <5,1,5,5>: Cost 3 vext2 <4,u,5,1>, <5,5,5,5>
+ 2646823010U, // <5,1,5,6>: Cost 3 vext2 <4,u,5,1>, <5,6,7,0>
+ 2646823080U, // <5,1,5,7>: Cost 3 vext2 <4,u,5,1>, <5,7,5,7>
+ 2687124663U, // <5,1,5,u>: Cost 3 vext3 <0,4,1,5>, <1,5,u,1>
+ 2553577574U, // <5,1,6,0>: Cost 3 vext1 <0,5,1,6>, LHS
+ 3763520719U, // <5,1,6,1>: Cost 4 vext3 <0,u,1,5>, <1,6,1,7>
+ 2646823418U, // <5,1,6,2>: Cost 3 vext2 <4,u,5,1>, <6,2,7,3>
+ 3760866529U, // <5,1,6,3>: Cost 4 vext3 <0,4,1,5>, <1,6,3,7>
+ 2553580854U, // <5,1,6,4>: Cost 3 vext1 <0,5,1,6>, RHS
+ 2687124723U, // <5,1,6,5>: Cost 3 vext3 <0,4,1,5>, <1,6,5,7>
+ 2646823736U, // <5,1,6,6>: Cost 3 vext2 <4,u,5,1>, <6,6,6,6>
+ 2646823758U, // <5,1,6,7>: Cost 3 vext2 <4,u,5,1>, <6,7,0,1>
+ 2646823839U, // <5,1,6,u>: Cost 3 vext2 <4,u,5,1>, <6,u,0,1>
+ 2559557734U, // <5,1,7,0>: Cost 3 vext1 <1,5,1,7>, LHS
+ 2559558452U, // <5,1,7,1>: Cost 3 vext1 <1,5,1,7>, <1,1,1,1>
+ 2571503270U, // <5,1,7,2>: Cost 3 vext1 <3,5,1,7>, <2,3,0,1>
+ 2040971366U, // <5,1,7,3>: Cost 2 vtrnr RHS, LHS
+ 2559561014U, // <5,1,7,4>: Cost 3 vext1 <1,5,1,7>, RHS
+ 2595393232U, // <5,1,7,5>: Cost 3 vext1 <7,5,1,7>, <5,1,7,3>
+ 4188455035U, // <5,1,7,6>: Cost 4 vtrnr RHS, <0,1,4,6>
+ 2646824556U, // <5,1,7,7>: Cost 3 vext2 <4,u,5,1>, <7,7,7,7>
+ 2040971371U, // <5,1,7,u>: Cost 2 vtrnr RHS, LHS
+ 1591662326U, // <5,1,u,0>: Cost 2 vext2 <u,0,5,1>, <u,0,5,1>
+ 1573082926U, // <5,1,u,1>: Cost 2 vext2 <4,u,5,1>, LHS
+ 2695824760U, // <5,1,u,2>: Cost 3 vext3 <1,u,2,5>, <1,u,2,5>
+ 2040979558U, // <5,1,u,3>: Cost 2 vtrnr RHS, LHS
+ 2687124874U, // <5,1,u,4>: Cost 3 vext3 <0,4,1,5>, <1,u,4,5>
+ 1573083290U, // <5,1,u,5>: Cost 2 vext2 <4,u,5,1>, RHS
+ 2646825168U, // <5,1,u,6>: Cost 3 vext2 <4,u,5,1>, <u,6,3,7>
+ 2646825216U, // <5,1,u,7>: Cost 3 vext2 <4,u,5,1>, <u,7,0,1>
+ 2040979563U, // <5,1,u,u>: Cost 2 vtrnr RHS, LHS
+ 3702652928U, // <5,2,0,0>: Cost 4 vext2 <1,u,5,2>, <0,0,0,0>
+ 2628911206U, // <5,2,0,1>: Cost 3 vext2 <1,u,5,2>, LHS
+ 2641518756U, // <5,2,0,2>: Cost 3 vext2 <4,0,5,2>, <0,2,0,2>
+ 3759760847U, // <5,2,0,3>: Cost 4 vext3 <0,2,4,5>, <2,0,3,2>
+ 3760866775U, // <5,2,0,4>: Cost 4 vext3 <0,4,1,5>, <2,0,4,1>
+ 3759539680U, // <5,2,0,5>: Cost 4 vext3 <0,2,1,5>, <2,0,5,1>
+ 3760866796U, // <5,2,0,6>: Cost 4 vext3 <0,4,1,5>, <2,0,6,4>
+ 3304114054U, // <5,2,0,7>: Cost 4 vrev <2,5,7,0>
+ 2628911773U, // <5,2,0,u>: Cost 3 vext2 <1,u,5,2>, LHS
+ 2623603464U, // <5,2,1,0>: Cost 3 vext2 <1,0,5,2>, <1,0,5,2>
+ 3698008921U, // <5,2,1,1>: Cost 4 vext2 <1,1,5,2>, <1,1,5,2>
+ 3633325603U, // <5,2,1,2>: Cost 4 vext1 <1,5,2,1>, <2,1,3,5>
+ 2687125027U, // <5,2,1,3>: Cost 3 vext3 <0,4,1,5>, <2,1,3,5>
+ 3633327414U, // <5,2,1,4>: Cost 4 vext1 <1,5,2,1>, RHS
+ 3759539760U, // <5,2,1,5>: Cost 4 vext3 <0,2,1,5>, <2,1,5,0>
+ 3760866876U, // <5,2,1,6>: Cost 4 vext3 <0,4,1,5>, <2,1,6,3>
+ 3304122247U, // <5,2,1,7>: Cost 4 vrev <2,5,7,1>
+ 2687125072U, // <5,2,1,u>: Cost 3 vext3 <0,4,1,5>, <2,1,u,5>
+ 3633332326U, // <5,2,2,0>: Cost 4 vext1 <1,5,2,2>, LHS
+ 3759760992U, // <5,2,2,1>: Cost 4 vext3 <0,2,4,5>, <2,2,1,3>
+ 2687125096U, // <5,2,2,2>: Cost 3 vext3 <0,4,1,5>, <2,2,2,2>
+ 2687125106U, // <5,2,2,3>: Cost 3 vext3 <0,4,1,5>, <2,2,3,3>
+ 2697963133U, // <5,2,2,4>: Cost 3 vext3 <2,2,4,5>, <2,2,4,5>
+ 3759466120U, // <5,2,2,5>: Cost 4 vext3 <0,2,0,5>, <2,2,5,7>
+ 3760866960U, // <5,2,2,6>: Cost 4 vext3 <0,4,1,5>, <2,2,6,6>
+ 3771926168U, // <5,2,2,7>: Cost 4 vext3 <2,2,7,5>, <2,2,7,5>
+ 2687125151U, // <5,2,2,u>: Cost 3 vext3 <0,4,1,5>, <2,2,u,3>
+ 2687125158U, // <5,2,3,0>: Cost 3 vext3 <0,4,1,5>, <2,3,0,1>
+ 2698405555U, // <5,2,3,1>: Cost 3 vext3 <2,3,1,5>, <2,3,1,5>
+ 2577516238U, // <5,2,3,2>: Cost 3 vext1 <4,5,2,3>, <2,3,4,5>
+ 3759687365U, // <5,2,3,3>: Cost 4 vext3 <0,2,3,5>, <2,3,3,5>
+ 1624884942U, // <5,2,3,4>: Cost 2 vext3 <2,3,4,5>, <2,3,4,5>
+ 2698700503U, // <5,2,3,5>: Cost 3 vext3 <2,3,5,5>, <2,3,5,5>
+ 3772368608U, // <5,2,3,6>: Cost 4 vext3 <2,3,4,5>, <2,3,6,5>
+ 3702655716U, // <5,2,3,7>: Cost 4 vext2 <1,u,5,2>, <3,7,3,7>
+ 1625179890U, // <5,2,3,u>: Cost 2 vext3 <2,3,u,5>, <2,3,u,5>
+ 2641521555U, // <5,2,4,0>: Cost 3 vext2 <4,0,5,2>, <4,0,5,2>
+ 3772368642U, // <5,2,4,1>: Cost 4 vext3 <2,3,4,5>, <2,4,1,3>
+ 2699142925U, // <5,2,4,2>: Cost 3 vext3 <2,4,2,5>, <2,4,2,5>
+ 2698626838U, // <5,2,4,3>: Cost 3 vext3 <2,3,4,5>, <2,4,3,5>
+ 2698626848U, // <5,2,4,4>: Cost 3 vext3 <2,3,4,5>, <2,4,4,6>
+ 2628914486U, // <5,2,4,5>: Cost 3 vext2 <1,u,5,2>, RHS
+ 2645503353U, // <5,2,4,6>: Cost 3 vext2 <4,6,5,2>, <4,6,5,2>
+ 3304146826U, // <5,2,4,7>: Cost 4 vrev <2,5,7,4>
+ 2628914729U, // <5,2,4,u>: Cost 3 vext2 <1,u,5,2>, RHS
+ 2553643110U, // <5,2,5,0>: Cost 3 vext1 <0,5,2,5>, LHS
+ 3758950227U, // <5,2,5,1>: Cost 4 vext3 <0,1,2,5>, <2,5,1,3>
+ 3759761248U, // <5,2,5,2>: Cost 4 vext3 <0,2,4,5>, <2,5,2,7>
+ 2982396006U, // <5,2,5,3>: Cost 3 vzipr <4,u,5,5>, LHS
+ 2553646390U, // <5,2,5,4>: Cost 3 vext1 <0,5,2,5>, RHS
+ 2553647108U, // <5,2,5,5>: Cost 3 vext1 <0,5,2,5>, <5,5,5,5>
+ 3760867204U, // <5,2,5,6>: Cost 4 vext3 <0,4,1,5>, <2,5,6,7>
+ 3702657141U, // <5,2,5,7>: Cost 4 vext2 <1,u,5,2>, <5,7,0,1>
+ 2982396011U, // <5,2,5,u>: Cost 3 vzipr <4,u,5,5>, LHS
+ 3627393126U, // <5,2,6,0>: Cost 4 vext1 <0,5,2,6>, LHS
+ 3760867236U, // <5,2,6,1>: Cost 4 vext3 <0,4,1,5>, <2,6,1,3>
+ 2645504506U, // <5,2,6,2>: Cost 3 vext2 <4,6,5,2>, <6,2,7,3>
+ 2687125434U, // <5,2,6,3>: Cost 3 vext3 <0,4,1,5>, <2,6,3,7>
+ 2700617665U, // <5,2,6,4>: Cost 3 vext3 <2,6,4,5>, <2,6,4,5>
+ 3760867276U, // <5,2,6,5>: Cost 4 vext3 <0,4,1,5>, <2,6,5,7>
+ 3763521493U, // <5,2,6,6>: Cost 4 vext3 <0,u,1,5>, <2,6,6,7>
+ 3719246670U, // <5,2,6,7>: Cost 4 vext2 <4,6,5,2>, <6,7,0,1>
+ 2687125479U, // <5,2,6,u>: Cost 3 vext3 <0,4,1,5>, <2,6,u,7>
+ 2565603430U, // <5,2,7,0>: Cost 3 vext1 <2,5,2,7>, LHS
+ 2553660150U, // <5,2,7,1>: Cost 3 vext1 <0,5,2,7>, <1,0,3,2>
+ 2565605216U, // <5,2,7,2>: Cost 3 vext1 <2,5,2,7>, <2,5,2,7>
+ 2961178726U, // <5,2,7,3>: Cost 3 vzipr <1,3,5,7>, LHS
+ 2565606710U, // <5,2,7,4>: Cost 3 vext1 <2,5,2,7>, RHS
+ 4034920552U, // <5,2,7,5>: Cost 4 vzipr <1,3,5,7>, <0,1,2,5>
+ 3114713292U, // <5,2,7,6>: Cost 3 vtrnr RHS, <0,2,4,6>
+ 3702658668U, // <5,2,7,7>: Cost 4 vext2 <1,u,5,2>, <7,7,7,7>
+ 2961178731U, // <5,2,7,u>: Cost 3 vzipr <1,3,5,7>, LHS
+ 2687125563U, // <5,2,u,0>: Cost 3 vext3 <0,4,1,5>, <2,u,0,1>
+ 2628917038U, // <5,2,u,1>: Cost 3 vext2 <1,u,5,2>, LHS
+ 2565613409U, // <5,2,u,2>: Cost 3 vext1 <2,5,2,u>, <2,5,2,u>
+ 2687125592U, // <5,2,u,3>: Cost 3 vext3 <0,4,1,5>, <2,u,3,3>
+ 1628203107U, // <5,2,u,4>: Cost 2 vext3 <2,u,4,5>, <2,u,4,5>
+ 2628917402U, // <5,2,u,5>: Cost 3 vext2 <1,u,5,2>, RHS
+ 2702092405U, // <5,2,u,6>: Cost 3 vext3 <2,u,6,5>, <2,u,6,5>
+ 3304179598U, // <5,2,u,7>: Cost 4 vrev <2,5,7,u>
+ 1628498055U, // <5,2,u,u>: Cost 2 vext3 <2,u,u,5>, <2,u,u,5>
+ 3760867467U, // <5,3,0,0>: Cost 4 vext3 <0,4,1,5>, <3,0,0,0>
+ 2687125654U, // <5,3,0,1>: Cost 3 vext3 <0,4,1,5>, <3,0,1,2>
+ 3759761565U, // <5,3,0,2>: Cost 4 vext3 <0,2,4,5>, <3,0,2,0>
+ 3633391766U, // <5,3,0,3>: Cost 4 vext1 <1,5,3,0>, <3,0,1,2>
+ 2687125680U, // <5,3,0,4>: Cost 3 vext3 <0,4,1,5>, <3,0,4,1>
+ 3760277690U, // <5,3,0,5>: Cost 4 vext3 <0,3,2,5>, <3,0,5,2>
+ 3310013014U, // <5,3,0,6>: Cost 4 vrev <3,5,6,0>
+ 2236344927U, // <5,3,0,7>: Cost 3 vrev <3,5,7,0>
+ 2687125717U, // <5,3,0,u>: Cost 3 vext3 <0,4,1,5>, <3,0,u,2>
+ 3760867551U, // <5,3,1,0>: Cost 4 vext3 <0,4,1,5>, <3,1,0,3>
+ 3760867558U, // <5,3,1,1>: Cost 4 vext3 <0,4,1,5>, <3,1,1,1>
+ 2624938923U, // <5,3,1,2>: Cost 3 vext2 <1,2,5,3>, <1,2,5,3>
+ 2703198460U, // <5,3,1,3>: Cost 3 vext3 <3,1,3,5>, <3,1,3,5>
+ 3760867587U, // <5,3,1,4>: Cost 4 vext3 <0,4,1,5>, <3,1,4,3>
+ 2636219536U, // <5,3,1,5>: Cost 3 vext2 <3,1,5,3>, <1,5,3,7>
+ 3698681075U, // <5,3,1,6>: Cost 4 vext2 <1,2,5,3>, <1,6,5,7>
+ 2703493408U, // <5,3,1,7>: Cost 3 vext3 <3,1,7,5>, <3,1,7,5>
+ 2628920721U, // <5,3,1,u>: Cost 3 vext2 <1,u,5,3>, <1,u,5,3>
+ 3766765870U, // <5,3,2,0>: Cost 4 vext3 <1,4,0,5>, <3,2,0,1>
+ 3698681379U, // <5,3,2,1>: Cost 4 vext2 <1,2,5,3>, <2,1,3,5>
+ 3760867649U, // <5,3,2,2>: Cost 4 vext3 <0,4,1,5>, <3,2,2,2>
+ 2698627404U, // <5,3,2,3>: Cost 3 vext3 <2,3,4,5>, <3,2,3,4>
+ 2703935830U, // <5,3,2,4>: Cost 3 vext3 <3,2,4,5>, <3,2,4,5>
+ 2698627422U, // <5,3,2,5>: Cost 3 vext3 <2,3,4,5>, <3,2,5,4>
+ 3760867686U, // <5,3,2,6>: Cost 4 vext3 <0,4,1,5>, <3,2,6,3>
+ 3769788783U, // <5,3,2,7>: Cost 4 vext3 <1,u,5,5>, <3,2,7,3>
+ 2701945209U, // <5,3,2,u>: Cost 3 vext3 <2,u,4,5>, <3,2,u,4>
+ 3760867711U, // <5,3,3,0>: Cost 4 vext3 <0,4,1,5>, <3,3,0,1>
+ 2636220684U, // <5,3,3,1>: Cost 3 vext2 <3,1,5,3>, <3,1,5,3>
+ 3772369298U, // <5,3,3,2>: Cost 4 vext3 <2,3,4,5>, <3,3,2,2>
+ 2687125916U, // <5,3,3,3>: Cost 3 vext3 <0,4,1,5>, <3,3,3,3>
+ 2704599463U, // <5,3,3,4>: Cost 3 vext3 <3,3,4,5>, <3,3,4,5>
+ 2704673200U, // <5,3,3,5>: Cost 3 vext3 <3,3,5,5>, <3,3,5,5>
+ 3709962935U, // <5,3,3,6>: Cost 4 vext2 <3,1,5,3>, <3,6,7,7>
+ 3772369346U, // <5,3,3,7>: Cost 4 vext3 <2,3,4,5>, <3,3,7,5>
+ 2704894411U, // <5,3,3,u>: Cost 3 vext3 <3,3,u,5>, <3,3,u,5>
+ 2704968148U, // <5,3,4,0>: Cost 3 vext3 <3,4,0,5>, <3,4,0,5>
+ 3698682850U, // <5,3,4,1>: Cost 4 vext2 <1,2,5,3>, <4,1,5,0>
+ 2642857014U, // <5,3,4,2>: Cost 3 vext2 <4,2,5,3>, <4,2,5,3>
+ 2705189359U, // <5,3,4,3>: Cost 3 vext3 <3,4,3,5>, <3,4,3,5>
+ 2705263096U, // <5,3,4,4>: Cost 3 vext3 <3,4,4,5>, <3,4,4,5>
+ 2685946370U, // <5,3,4,5>: Cost 3 vext3 <0,2,3,5>, <3,4,5,6>
+ 3779152394U, // <5,3,4,6>: Cost 4 vext3 <3,4,6,5>, <3,4,6,5>
+ 2236377699U, // <5,3,4,7>: Cost 3 vrev <3,5,7,4>
+ 2687126045U, // <5,3,4,u>: Cost 3 vext3 <0,4,1,5>, <3,4,u,6>
+ 2571632742U, // <5,3,5,0>: Cost 3 vext1 <3,5,3,5>, LHS
+ 2559689870U, // <5,3,5,1>: Cost 3 vext1 <1,5,3,5>, <1,5,3,5>
+ 2571634382U, // <5,3,5,2>: Cost 3 vext1 <3,5,3,5>, <2,3,4,5>
+ 2571635264U, // <5,3,5,3>: Cost 3 vext1 <3,5,3,5>, <3,5,3,5>
+ 2571636022U, // <5,3,5,4>: Cost 3 vext1 <3,5,3,5>, RHS
+ 2559692804U, // <5,3,5,5>: Cost 3 vext1 <1,5,3,5>, <5,5,5,5>
+ 3720581218U, // <5,3,5,6>: Cost 4 vext2 <4,u,5,3>, <5,6,7,0>
+ 2236385892U, // <5,3,5,7>: Cost 3 vrev <3,5,7,5>
+ 2571638574U, // <5,3,5,u>: Cost 3 vext1 <3,5,3,5>, LHS
+ 2565668966U, // <5,3,6,0>: Cost 3 vext1 <2,5,3,6>, LHS
+ 3633439887U, // <5,3,6,1>: Cost 4 vext1 <1,5,3,6>, <1,5,3,6>
+ 2565670760U, // <5,3,6,2>: Cost 3 vext1 <2,5,3,6>, <2,5,3,6>
+ 2565671426U, // <5,3,6,3>: Cost 3 vext1 <2,5,3,6>, <3,4,5,6>
+ 2565672246U, // <5,3,6,4>: Cost 3 vext1 <2,5,3,6>, RHS
+ 3639414630U, // <5,3,6,5>: Cost 4 vext1 <2,5,3,6>, <5,3,6,0>
+ 4047521640U, // <5,3,6,6>: Cost 4 vzipr <3,4,5,6>, <2,5,3,6>
+ 2725169844U, // <5,3,6,7>: Cost 3 vext3 <6,7,4,5>, <3,6,7,4>
+ 2565674798U, // <5,3,6,u>: Cost 3 vext1 <2,5,3,6>, LHS
+ 1485963366U, // <5,3,7,0>: Cost 2 vext1 <1,5,3,7>, LHS
+ 1485964432U, // <5,3,7,1>: Cost 2 vext1 <1,5,3,7>, <1,5,3,7>
+ 2559706728U, // <5,3,7,2>: Cost 3 vext1 <1,5,3,7>, <2,2,2,2>
+ 2559707286U, // <5,3,7,3>: Cost 3 vext1 <1,5,3,7>, <3,0,1,2>
+ 1485966646U, // <5,3,7,4>: Cost 2 vext1 <1,5,3,7>, RHS
+ 2559708880U, // <5,3,7,5>: Cost 3 vext1 <1,5,3,7>, <5,1,7,3>
+ 2601513466U, // <5,3,7,6>: Cost 3 vext1 <u,5,3,7>, <6,2,7,3>
+ 3114714112U, // <5,3,7,7>: Cost 3 vtrnr RHS, <1,3,5,7>
+ 1485969198U, // <5,3,7,u>: Cost 2 vext1 <1,5,3,7>, LHS
+ 1485971558U, // <5,3,u,0>: Cost 2 vext1 <1,5,3,u>, LHS
+ 1485972625U, // <5,3,u,1>: Cost 2 vext1 <1,5,3,u>, <1,5,3,u>
+ 2559714920U, // <5,3,u,2>: Cost 3 vext1 <1,5,3,u>, <2,2,2,2>
+ 2559715478U, // <5,3,u,3>: Cost 3 vext1 <1,5,3,u>, <3,0,1,2>
+ 1485974838U, // <5,3,u,4>: Cost 2 vext1 <1,5,3,u>, RHS
+ 2687126342U, // <5,3,u,5>: Cost 3 vext3 <0,4,1,5>, <3,u,5,6>
+ 2601521658U, // <5,3,u,6>: Cost 3 vext1 <u,5,3,u>, <6,2,7,3>
+ 2236410471U, // <5,3,u,7>: Cost 3 vrev <3,5,7,u>
+ 1485977390U, // <5,3,u,u>: Cost 2 vext1 <1,5,3,u>, LHS
+ 3627491430U, // <5,4,0,0>: Cost 4 vext1 <0,5,4,0>, LHS
+ 2636890214U, // <5,4,0,1>: Cost 3 vext2 <3,2,5,4>, LHS
+ 3703333028U, // <5,4,0,2>: Cost 4 vext2 <2,0,5,4>, <0,2,0,2>
+ 3782249348U, // <5,4,0,3>: Cost 4 vext3 <4,0,3,5>, <4,0,3,5>
+ 2642198866U, // <5,4,0,4>: Cost 3 vext2 <4,1,5,4>, <0,4,1,5>
+ 2687126418U, // <5,4,0,5>: Cost 3 vext3 <0,4,1,5>, <4,0,5,1>
+ 2242243887U, // <5,4,0,6>: Cost 3 vrev <4,5,6,0>
+ 3316059448U, // <5,4,0,7>: Cost 4 vrev <4,5,7,0>
+ 2636890781U, // <5,4,0,u>: Cost 3 vext2 <3,2,5,4>, LHS
+ 2241809658U, // <5,4,1,0>: Cost 3 vrev <4,5,0,1>
+ 3698025307U, // <5,4,1,1>: Cost 4 vext2 <1,1,5,4>, <1,1,5,4>
+ 3698688940U, // <5,4,1,2>: Cost 4 vext2 <1,2,5,4>, <1,2,5,4>
+ 3698689024U, // <5,4,1,3>: Cost 4 vext2 <1,2,5,4>, <1,3,5,7>
+ 3700016206U, // <5,4,1,4>: Cost 4 vext2 <1,4,5,4>, <1,4,5,4>
+ 2687126498U, // <5,4,1,5>: Cost 3 vext3 <0,4,1,5>, <4,1,5,0>
+ 3760868336U, // <5,4,1,6>: Cost 4 vext3 <0,4,1,5>, <4,1,6,5>
+ 3316067641U, // <5,4,1,7>: Cost 4 vrev <4,5,7,1>
+ 2242399554U, // <5,4,1,u>: Cost 3 vrev <4,5,u,1>
+ 3703334371U, // <5,4,2,0>: Cost 4 vext2 <2,0,5,4>, <2,0,5,4>
+ 3703998004U, // <5,4,2,1>: Cost 4 vext2 <2,1,5,4>, <2,1,5,4>
+ 3704661637U, // <5,4,2,2>: Cost 4 vext2 <2,2,5,4>, <2,2,5,4>
+ 2636891854U, // <5,4,2,3>: Cost 3 vext2 <3,2,5,4>, <2,3,4,5>
+ 3705988903U, // <5,4,2,4>: Cost 4 vext2 <2,4,5,4>, <2,4,5,4>
+ 2698628150U, // <5,4,2,5>: Cost 3 vext3 <2,3,4,5>, <4,2,5,3>
+ 3760868415U, // <5,4,2,6>: Cost 4 vext3 <0,4,1,5>, <4,2,6,3>
+ 3783871562U, // <5,4,2,7>: Cost 4 vext3 <4,2,7,5>, <4,2,7,5>
+ 2666752099U, // <5,4,2,u>: Cost 3 vext2 <u,2,5,4>, <2,u,4,5>
+ 3639459942U, // <5,4,3,0>: Cost 4 vext1 <2,5,4,3>, LHS
+ 3709970701U, // <5,4,3,1>: Cost 4 vext2 <3,1,5,4>, <3,1,5,4>
+ 2636892510U, // <5,4,3,2>: Cost 3 vext2 <3,2,5,4>, <3,2,5,4>
+ 3710634396U, // <5,4,3,3>: Cost 4 vext2 <3,2,5,4>, <3,3,3,3>
+ 2638219776U, // <5,4,3,4>: Cost 3 vext2 <3,4,5,4>, <3,4,5,4>
+ 3766987908U, // <5,4,3,5>: Cost 4 vext3 <1,4,3,5>, <4,3,5,0>
+ 2710719634U, // <5,4,3,6>: Cost 3 vext3 <4,3,6,5>, <4,3,6,5>
+ 3914097664U, // <5,4,3,7>: Cost 4 vuzpr <3,5,7,4>, <1,3,5,7>
+ 2640874308U, // <5,4,3,u>: Cost 3 vext2 <3,u,5,4>, <3,u,5,4>
+ 2583642214U, // <5,4,4,0>: Cost 3 vext1 <5,5,4,4>, LHS
+ 2642201574U, // <5,4,4,1>: Cost 3 vext2 <4,1,5,4>, <4,1,5,4>
+ 3710635062U, // <5,4,4,2>: Cost 4 vext2 <3,2,5,4>, <4,2,5,3>
+ 3717270664U, // <5,4,4,3>: Cost 4 vext2 <4,3,5,4>, <4,3,5,4>
+ 2713963728U, // <5,4,4,4>: Cost 3 vext3 <4,u,5,5>, <4,4,4,4>
+ 1637567706U, // <5,4,4,5>: Cost 2 vext3 <4,4,5,5>, <4,4,5,5>
+ 2242276659U, // <5,4,4,6>: Cost 3 vrev <4,5,6,4>
+ 2646183372U, // <5,4,4,7>: Cost 3 vext2 <4,7,5,4>, <4,7,5,4>
+ 1637788917U, // <5,4,4,u>: Cost 2 vext3 <4,4,u,5>, <4,4,u,5>
+ 2559762534U, // <5,4,5,0>: Cost 3 vext1 <1,5,4,5>, LHS
+ 2559763607U, // <5,4,5,1>: Cost 3 vext1 <1,5,4,5>, <1,5,4,5>
+ 2698628366U, // <5,4,5,2>: Cost 3 vext3 <2,3,4,5>, <4,5,2,3>
+ 3633506454U, // <5,4,5,3>: Cost 4 vext1 <1,5,4,5>, <3,0,1,2>
+ 2559765814U, // <5,4,5,4>: Cost 3 vext1 <1,5,4,5>, RHS
+ 2583654395U, // <5,4,5,5>: Cost 3 vext1 <5,5,4,5>, <5,5,4,5>
+ 1613385014U, // <5,4,5,6>: Cost 2 vext3 <0,4,1,5>, RHS
+ 3901639990U, // <5,4,5,7>: Cost 4 vuzpr <1,5,0,4>, RHS
+ 1613385032U, // <5,4,5,u>: Cost 2 vext3 <0,4,1,5>, RHS
+ 2559770726U, // <5,4,6,0>: Cost 3 vext1 <1,5,4,6>, LHS
+ 2559771648U, // <5,4,6,1>: Cost 3 vext1 <1,5,4,6>, <1,3,5,7>
+ 3633514088U, // <5,4,6,2>: Cost 4 vext1 <1,5,4,6>, <2,2,2,2>
+ 2571717122U, // <5,4,6,3>: Cost 3 vext1 <3,5,4,6>, <3,4,5,6>
+ 2559774006U, // <5,4,6,4>: Cost 3 vext1 <1,5,4,6>, RHS
+ 2712636796U, // <5,4,6,5>: Cost 3 vext3 <4,6,5,5>, <4,6,5,5>
+ 3760868743U, // <5,4,6,6>: Cost 4 vext3 <0,4,1,5>, <4,6,6,7>
+ 2712784270U, // <5,4,6,7>: Cost 3 vext3 <4,6,7,5>, <4,6,7,5>
+ 2559776558U, // <5,4,6,u>: Cost 3 vext1 <1,5,4,6>, LHS
+ 2565750886U, // <5,4,7,0>: Cost 3 vext1 <2,5,4,7>, LHS
+ 2565751706U, // <5,4,7,1>: Cost 3 vext1 <2,5,4,7>, <1,2,3,4>
+ 2565752690U, // <5,4,7,2>: Cost 3 vext1 <2,5,4,7>, <2,5,4,7>
+ 2571725387U, // <5,4,7,3>: Cost 3 vext1 <3,5,4,7>, <3,5,4,7>
+ 2565754166U, // <5,4,7,4>: Cost 3 vext1 <2,5,4,7>, RHS
+ 3114713426U, // <5,4,7,5>: Cost 3 vtrnr RHS, <0,4,1,5>
+ 94817590U, // <5,4,7,6>: Cost 1 vrev RHS
+ 2595616175U, // <5,4,7,7>: Cost 3 vext1 <7,5,4,7>, <7,5,4,7>
+ 94965064U, // <5,4,7,u>: Cost 1 vrev RHS
+ 2559787110U, // <5,4,u,0>: Cost 3 vext1 <1,5,4,u>, LHS
+ 2559788186U, // <5,4,u,1>: Cost 3 vext1 <1,5,4,u>, <1,5,4,u>
+ 2242014483U, // <5,4,u,2>: Cost 3 vrev <4,5,2,u>
+ 2667419628U, // <5,4,u,3>: Cost 3 vext2 <u,3,5,4>, <u,3,5,4>
+ 2559790390U, // <5,4,u,4>: Cost 3 vext1 <1,5,4,u>, RHS
+ 1640222238U, // <5,4,u,5>: Cost 2 vext3 <4,u,5,5>, <4,u,5,5>
+ 94825783U, // <5,4,u,6>: Cost 1 vrev RHS
+ 2714111536U, // <5,4,u,7>: Cost 3 vext3 <4,u,7,5>, <4,u,7,5>
+ 94973257U, // <5,4,u,u>: Cost 1 vrev RHS
+ 2646851584U, // <5,5,0,0>: Cost 3 vext2 <4,u,5,5>, <0,0,0,0>
+ 1573109862U, // <5,5,0,1>: Cost 2 vext2 <4,u,5,5>, LHS
+ 2646851748U, // <5,5,0,2>: Cost 3 vext2 <4,u,5,5>, <0,2,0,2>
+ 3760279130U, // <5,5,0,3>: Cost 4 vext3 <0,3,2,5>, <5,0,3,2>
+ 2687127138U, // <5,5,0,4>: Cost 3 vext3 <0,4,1,5>, <5,0,4,1>
+ 2248142847U, // <5,5,0,5>: Cost 3 vrev <5,5,5,0>
+ 3720593910U, // <5,5,0,6>: Cost 4 vext2 <4,u,5,5>, <0,6,1,7>
+ 4182502710U, // <5,5,0,7>: Cost 4 vtrnr <3,5,7,0>, RHS
+ 1573110429U, // <5,5,0,u>: Cost 2 vext2 <4,u,5,5>, LHS
+ 2646852342U, // <5,5,1,0>: Cost 3 vext2 <4,u,5,5>, <1,0,3,2>
+ 2624291676U, // <5,5,1,1>: Cost 3 vext2 <1,1,5,5>, <1,1,5,5>
+ 2646852502U, // <5,5,1,2>: Cost 3 vext2 <4,u,5,5>, <1,2,3,0>
+ 2646852568U, // <5,5,1,3>: Cost 3 vext2 <4,u,5,5>, <1,3,1,3>
+ 2715217591U, // <5,5,1,4>: Cost 3 vext3 <5,1,4,5>, <5,1,4,5>
+ 2628936848U, // <5,5,1,5>: Cost 3 vext2 <1,u,5,5>, <1,5,3,7>
+ 3698033907U, // <5,5,1,6>: Cost 4 vext2 <1,1,5,5>, <1,6,5,7>
+ 2713964240U, // <5,5,1,7>: Cost 3 vext3 <4,u,5,5>, <5,1,7,3>
+ 2628937107U, // <5,5,1,u>: Cost 3 vext2 <1,u,5,5>, <1,u,5,5>
+ 3645497446U, // <5,5,2,0>: Cost 4 vext1 <3,5,5,2>, LHS
+ 3760869099U, // <5,5,2,1>: Cost 4 vext3 <0,4,1,5>, <5,2,1,3>
+ 2646853224U, // <5,5,2,2>: Cost 3 vext2 <4,u,5,5>, <2,2,2,2>
+ 2698628862U, // <5,5,2,3>: Cost 3 vext3 <2,3,4,5>, <5,2,3,4>
+ 3772370694U, // <5,5,2,4>: Cost 4 vext3 <2,3,4,5>, <5,2,4,3>
+ 2713964303U, // <5,5,2,5>: Cost 3 vext3 <4,u,5,5>, <5,2,5,3>
+ 2646853562U, // <5,5,2,6>: Cost 3 vext2 <4,u,5,5>, <2,6,3,7>
+ 4038198272U, // <5,5,2,7>: Cost 4 vzipr <1,u,5,2>, <1,3,5,7>
+ 2701946667U, // <5,5,2,u>: Cost 3 vext3 <2,u,4,5>, <5,2,u,4>
+ 2646853782U, // <5,5,3,0>: Cost 3 vext2 <4,u,5,5>, <3,0,1,2>
+ 3698034922U, // <5,5,3,1>: Cost 4 vext2 <1,1,5,5>, <3,1,1,5>
+ 3702679919U, // <5,5,3,2>: Cost 4 vext2 <1,u,5,5>, <3,2,7,3>
+ 2637564336U, // <5,5,3,3>: Cost 3 vext2 <3,3,5,5>, <3,3,5,5>
+ 2646854146U, // <5,5,3,4>: Cost 3 vext2 <4,u,5,5>, <3,4,5,6>
+ 2638891602U, // <5,5,3,5>: Cost 3 vext2 <3,5,5,5>, <3,5,5,5>
+ 3702680247U, // <5,5,3,6>: Cost 4 vext2 <1,u,5,5>, <3,6,7,7>
+ 3702680259U, // <5,5,3,7>: Cost 4 vext2 <1,u,5,5>, <3,7,0,1>
+ 2646854430U, // <5,5,3,u>: Cost 3 vext2 <4,u,5,5>, <3,u,1,2>
+ 2646854546U, // <5,5,4,0>: Cost 3 vext2 <4,u,5,5>, <4,0,5,1>
+ 2642209767U, // <5,5,4,1>: Cost 3 vext2 <4,1,5,5>, <4,1,5,5>
+ 3711306806U, // <5,5,4,2>: Cost 4 vext2 <3,3,5,5>, <4,2,5,3>
+ 3645516369U, // <5,5,4,3>: Cost 4 vext1 <3,5,5,4>, <3,5,5,4>
+ 1570458842U, // <5,5,4,4>: Cost 2 vext2 <4,4,5,5>, <4,4,5,5>
+ 1573113142U, // <5,5,4,5>: Cost 2 vext2 <4,u,5,5>, RHS
+ 2645527932U, // <5,5,4,6>: Cost 3 vext2 <4,6,5,5>, <4,6,5,5>
+ 2713964486U, // <5,5,4,7>: Cost 3 vext3 <4,u,5,5>, <5,4,7,6>
+ 1573113374U, // <5,5,4,u>: Cost 2 vext2 <4,u,5,5>, <4,u,5,5>
+ 1509982310U, // <5,5,5,0>: Cost 2 vext1 <5,5,5,5>, LHS
+ 2646855376U, // <5,5,5,1>: Cost 3 vext2 <4,u,5,5>, <5,1,7,3>
+ 2583725672U, // <5,5,5,2>: Cost 3 vext1 <5,5,5,5>, <2,2,2,2>
+ 2583726230U, // <5,5,5,3>: Cost 3 vext1 <5,5,5,5>, <3,0,1,2>
+ 1509985590U, // <5,5,5,4>: Cost 2 vext1 <5,5,5,5>, RHS
+ 229035318U, // <5,5,5,5>: Cost 1 vdup1 RHS
+ 2646855778U, // <5,5,5,6>: Cost 3 vext2 <4,u,5,5>, <5,6,7,0>
+ 2646855848U, // <5,5,5,7>: Cost 3 vext2 <4,u,5,5>, <5,7,5,7>
+ 229035318U, // <5,5,5,u>: Cost 1 vdup1 RHS
+ 2577760358U, // <5,5,6,0>: Cost 3 vext1 <4,5,5,6>, LHS
+ 3633587361U, // <5,5,6,1>: Cost 4 vext1 <1,5,5,6>, <1,5,5,6>
+ 2646856186U, // <5,5,6,2>: Cost 3 vext2 <4,u,5,5>, <6,2,7,3>
+ 3633588738U, // <5,5,6,3>: Cost 4 vext1 <1,5,5,6>, <3,4,5,6>
+ 2718535756U, // <5,5,6,4>: Cost 3 vext3 <5,6,4,5>, <5,6,4,5>
+ 2644202223U, // <5,5,6,5>: Cost 3 vext2 <4,4,5,5>, <6,5,7,5>
+ 2973780482U, // <5,5,6,6>: Cost 3 vzipr <3,4,5,6>, <3,4,5,6>
+ 2646856526U, // <5,5,6,7>: Cost 3 vext2 <4,u,5,5>, <6,7,0,1>
+ 2646856607U, // <5,5,6,u>: Cost 3 vext2 <4,u,5,5>, <6,u,0,1>
+ 2571796582U, // <5,5,7,0>: Cost 3 vext1 <3,5,5,7>, LHS
+ 3633595392U, // <5,5,7,1>: Cost 4 vext1 <1,5,5,7>, <1,3,5,7>
+ 2571798222U, // <5,5,7,2>: Cost 3 vext1 <3,5,5,7>, <2,3,4,5>
+ 2571799124U, // <5,5,7,3>: Cost 3 vext1 <3,5,5,7>, <3,5,5,7>
+ 2571799862U, // <5,5,7,4>: Cost 3 vext1 <3,5,5,7>, RHS
+ 3114717188U, // <5,5,7,5>: Cost 3 vtrnr RHS, <5,5,5,5>
+ 4034923010U, // <5,5,7,6>: Cost 4 vzipr <1,3,5,7>, <3,4,5,6>
+ 2040974646U, // <5,5,7,7>: Cost 2 vtrnr RHS, RHS
+ 2040974647U, // <5,5,7,u>: Cost 2 vtrnr RHS, RHS
+ 1509982310U, // <5,5,u,0>: Cost 2 vext1 <5,5,5,5>, LHS
+ 1573115694U, // <5,5,u,1>: Cost 2 vext2 <4,u,5,5>, LHS
+ 2571806414U, // <5,5,u,2>: Cost 3 vext1 <3,5,5,u>, <2,3,4,5>
+ 2571807317U, // <5,5,u,3>: Cost 3 vext1 <3,5,5,u>, <3,5,5,u>
+ 1509985590U, // <5,5,u,4>: Cost 2 vext1 <5,5,5,5>, RHS
+ 229035318U, // <5,5,u,5>: Cost 1 vdup1 RHS
+ 2646857936U, // <5,5,u,6>: Cost 3 vext2 <4,u,5,5>, <u,6,3,7>
+ 2040982838U, // <5,5,u,7>: Cost 2 vtrnr RHS, RHS
+ 229035318U, // <5,5,u,u>: Cost 1 vdup1 RHS
+ 2638233600U, // <5,6,0,0>: Cost 3 vext2 <3,4,5,6>, <0,0,0,0>
+ 1564491878U, // <5,6,0,1>: Cost 2 vext2 <3,4,5,6>, LHS
+ 2632261796U, // <5,6,0,2>: Cost 3 vext2 <2,4,5,6>, <0,2,0,2>
+ 2638233856U, // <5,6,0,3>: Cost 3 vext2 <3,4,5,6>, <0,3,1,4>
+ 2638233938U, // <5,6,0,4>: Cost 3 vext2 <3,4,5,6>, <0,4,1,5>
+ 3706003885U, // <5,6,0,5>: Cost 4 vext2 <2,4,5,6>, <0,5,2,6>
+ 3706003967U, // <5,6,0,6>: Cost 4 vext2 <2,4,5,6>, <0,6,2,7>
+ 4047473974U, // <5,6,0,7>: Cost 4 vzipr <3,4,5,0>, RHS
+ 1564492445U, // <5,6,0,u>: Cost 2 vext2 <3,4,5,6>, LHS
+ 2638234358U, // <5,6,1,0>: Cost 3 vext2 <3,4,5,6>, <1,0,3,2>
+ 2638234420U, // <5,6,1,1>: Cost 3 vext2 <3,4,5,6>, <1,1,1,1>
+ 2638234518U, // <5,6,1,2>: Cost 3 vext2 <3,4,5,6>, <1,2,3,0>
+ 2638234584U, // <5,6,1,3>: Cost 3 vext2 <3,4,5,6>, <1,3,1,3>
+ 2626290768U, // <5,6,1,4>: Cost 3 vext2 <1,4,5,6>, <1,4,5,6>
+ 2638234768U, // <5,6,1,5>: Cost 3 vext2 <3,4,5,6>, <1,5,3,7>
+ 3700032719U, // <5,6,1,6>: Cost 4 vext2 <1,4,5,6>, <1,6,1,7>
+ 2982366518U, // <5,6,1,7>: Cost 3 vzipr <4,u,5,1>, RHS
+ 2628945300U, // <5,6,1,u>: Cost 3 vext2 <1,u,5,6>, <1,u,5,6>
+ 3706004925U, // <5,6,2,0>: Cost 4 vext2 <2,4,5,6>, <2,0,1,2>
+ 3711976966U, // <5,6,2,1>: Cost 4 vext2 <3,4,5,6>, <2,1,0,3>
+ 2638235240U, // <5,6,2,2>: Cost 3 vext2 <3,4,5,6>, <2,2,2,2>
+ 2638235302U, // <5,6,2,3>: Cost 3 vext2 <3,4,5,6>, <2,3,0,1>
+ 2632263465U, // <5,6,2,4>: Cost 3 vext2 <2,4,5,6>, <2,4,5,6>
+ 2638235496U, // <5,6,2,5>: Cost 3 vext2 <3,4,5,6>, <2,5,3,6>
+ 2638235578U, // <5,6,2,6>: Cost 3 vext2 <3,4,5,6>, <2,6,3,7>
+ 2713965050U, // <5,6,2,7>: Cost 3 vext3 <4,u,5,5>, <6,2,7,3>
+ 2634917997U, // <5,6,2,u>: Cost 3 vext2 <2,u,5,6>, <2,u,5,6>
+ 2638235798U, // <5,6,3,0>: Cost 3 vext2 <3,4,5,6>, <3,0,1,2>
+ 3711977695U, // <5,6,3,1>: Cost 4 vext2 <3,4,5,6>, <3,1,0,3>
+ 3710650720U, // <5,6,3,2>: Cost 4 vext2 <3,2,5,6>, <3,2,5,6>
+ 2638236060U, // <5,6,3,3>: Cost 3 vext2 <3,4,5,6>, <3,3,3,3>
+ 1564494338U, // <5,6,3,4>: Cost 2 vext2 <3,4,5,6>, <3,4,5,6>
+ 2638236234U, // <5,6,3,5>: Cost 3 vext2 <3,4,5,6>, <3,5,4,6>
+ 3711978104U, // <5,6,3,6>: Cost 4 vext2 <3,4,5,6>, <3,6,0,7>
+ 4034227510U, // <5,6,3,7>: Cost 4 vzipr <1,2,5,3>, RHS
+ 1567148870U, // <5,6,3,u>: Cost 2 vext2 <3,u,5,6>, <3,u,5,6>
+ 2577817702U, // <5,6,4,0>: Cost 3 vext1 <4,5,6,4>, LHS
+ 3700034544U, // <5,6,4,1>: Cost 4 vext2 <1,4,5,6>, <4,1,6,5>
+ 2723033713U, // <5,6,4,2>: Cost 3 vext3 <6,4,2,5>, <6,4,2,5>
+ 2638236818U, // <5,6,4,3>: Cost 3 vext2 <3,4,5,6>, <4,3,6,5>
+ 2644208859U, // <5,6,4,4>: Cost 3 vext2 <4,4,5,6>, <4,4,5,6>
+ 1564495158U, // <5,6,4,5>: Cost 2 vext2 <3,4,5,6>, RHS
+ 2645536125U, // <5,6,4,6>: Cost 3 vext2 <4,6,5,6>, <4,6,5,6>
+ 2723402398U, // <5,6,4,7>: Cost 3 vext3 <6,4,7,5>, <6,4,7,5>
+ 1564495401U, // <5,6,4,u>: Cost 2 vext2 <3,4,5,6>, RHS
+ 2577825894U, // <5,6,5,0>: Cost 3 vext1 <4,5,6,5>, LHS
+ 2662125264U, // <5,6,5,1>: Cost 3 vext2 <7,4,5,6>, <5,1,7,3>
+ 3775836867U, // <5,6,5,2>: Cost 4 vext3 <2,u,6,5>, <6,5,2,6>
+ 3711979343U, // <5,6,5,3>: Cost 4 vext2 <3,4,5,6>, <5,3,3,4>
+ 2650181556U, // <5,6,5,4>: Cost 3 vext2 <5,4,5,6>, <5,4,5,6>
+ 2662125572U, // <5,6,5,5>: Cost 3 vext2 <7,4,5,6>, <5,5,5,5>
+ 2638237732U, // <5,6,5,6>: Cost 3 vext2 <3,4,5,6>, <5,6,0,1>
+ 2982399286U, // <5,6,5,7>: Cost 3 vzipr <4,u,5,5>, RHS
+ 2982399287U, // <5,6,5,u>: Cost 3 vzipr <4,u,5,5>, RHS
+ 2583806054U, // <5,6,6,0>: Cost 3 vext1 <5,5,6,6>, LHS
+ 3711979910U, // <5,6,6,1>: Cost 4 vext2 <3,4,5,6>, <6,1,3,4>
+ 2662126074U, // <5,6,6,2>: Cost 3 vext2 <7,4,5,6>, <6,2,7,3>
+ 2583808514U, // <5,6,6,3>: Cost 3 vext1 <5,5,6,6>, <3,4,5,6>
+ 2583809334U, // <5,6,6,4>: Cost 3 vext1 <5,5,6,6>, RHS
+ 2583810062U, // <5,6,6,5>: Cost 3 vext1 <5,5,6,6>, <5,5,6,6>
+ 2638238520U, // <5,6,6,6>: Cost 3 vext2 <3,4,5,6>, <6,6,6,6>
+ 2973781302U, // <5,6,6,7>: Cost 3 vzipr <3,4,5,6>, RHS
+ 2973781303U, // <5,6,6,u>: Cost 3 vzipr <3,4,5,6>, RHS
+ 430358630U, // <5,6,7,0>: Cost 1 vext1 RHS, LHS
+ 1504101110U, // <5,6,7,1>: Cost 2 vext1 RHS, <1,0,3,2>
+ 1504101992U, // <5,6,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 1504102550U, // <5,6,7,3>: Cost 2 vext1 RHS, <3,0,1,2>
+ 430361910U, // <5,6,7,4>: Cost 1 vext1 RHS, RHS
+ 1504104390U, // <5,6,7,5>: Cost 2 vext1 RHS, <5,4,7,6>
+ 1504105272U, // <5,6,7,6>: Cost 2 vext1 RHS, <6,6,6,6>
+ 1504106092U, // <5,6,7,7>: Cost 2 vext1 RHS, <7,7,7,7>
+ 430364462U, // <5,6,7,u>: Cost 1 vext1 RHS, LHS
+ 430366822U, // <5,6,u,0>: Cost 1 vext1 RHS, LHS
+ 1564497710U, // <5,6,u,1>: Cost 2 vext2 <3,4,5,6>, LHS
+ 1504110184U, // <5,6,u,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 1504110742U, // <5,6,u,3>: Cost 2 vext1 RHS, <3,0,1,2>
+ 430370103U, // <5,6,u,4>: Cost 1 vext1 RHS, RHS
+ 1564498074U, // <5,6,u,5>: Cost 2 vext2 <3,4,5,6>, RHS
+ 1504113146U, // <5,6,u,6>: Cost 2 vext1 RHS, <6,2,7,3>
+ 1504113658U, // <5,6,u,7>: Cost 2 vext1 RHS, <7,0,1,2>
+ 430372654U, // <5,6,u,u>: Cost 1 vext1 RHS, LHS
+ 2625634304U, // <5,7,0,0>: Cost 3 vext2 <1,3,5,7>, <0,0,0,0>
+ 1551892582U, // <5,7,0,1>: Cost 2 vext2 <1,3,5,7>, LHS
+ 2625634468U, // <5,7,0,2>: Cost 3 vext2 <1,3,5,7>, <0,2,0,2>
+ 2571889247U, // <5,7,0,3>: Cost 3 vext1 <3,5,7,0>, <3,5,7,0>
+ 2625634642U, // <5,7,0,4>: Cost 3 vext2 <1,3,5,7>, <0,4,1,5>
+ 2595778728U, // <5,7,0,5>: Cost 3 vext1 <7,5,7,0>, <5,7,5,7>
+ 3699376639U, // <5,7,0,6>: Cost 4 vext2 <1,3,5,7>, <0,6,2,7>
+ 2260235715U, // <5,7,0,7>: Cost 3 vrev <7,5,7,0>
+ 1551893149U, // <5,7,0,u>: Cost 2 vext2 <1,3,5,7>, LHS
+ 2625635062U, // <5,7,1,0>: Cost 3 vext2 <1,3,5,7>, <1,0,3,2>
+ 2624308020U, // <5,7,1,1>: Cost 3 vext2 <1,1,5,7>, <1,1,1,1>
+ 2625635222U, // <5,7,1,2>: Cost 3 vext2 <1,3,5,7>, <1,2,3,0>
+ 1551893504U, // <5,7,1,3>: Cost 2 vext2 <1,3,5,7>, <1,3,5,7>
+ 2571898166U, // <5,7,1,4>: Cost 3 vext1 <3,5,7,1>, RHS
+ 2625635472U, // <5,7,1,5>: Cost 3 vext2 <1,3,5,7>, <1,5,3,7>
+ 2627626227U, // <5,7,1,6>: Cost 3 vext2 <1,6,5,7>, <1,6,5,7>
+ 3702031684U, // <5,7,1,7>: Cost 4 vext2 <1,7,5,7>, <1,7,5,7>
+ 1555211669U, // <5,7,1,u>: Cost 2 vext2 <1,u,5,7>, <1,u,5,7>
+ 2629617126U, // <5,7,2,0>: Cost 3 vext2 <2,0,5,7>, <2,0,5,7>
+ 3699377670U, // <5,7,2,1>: Cost 4 vext2 <1,3,5,7>, <2,1,0,3>
+ 2625635944U, // <5,7,2,2>: Cost 3 vext2 <1,3,5,7>, <2,2,2,2>
+ 2625636006U, // <5,7,2,3>: Cost 3 vext2 <1,3,5,7>, <2,3,0,1>
+ 2632271658U, // <5,7,2,4>: Cost 3 vext2 <2,4,5,7>, <2,4,5,7>
+ 2625636201U, // <5,7,2,5>: Cost 3 vext2 <1,3,5,7>, <2,5,3,7>
+ 2625636282U, // <5,7,2,6>: Cost 3 vext2 <1,3,5,7>, <2,6,3,7>
+ 3708004381U, // <5,7,2,7>: Cost 4 vext2 <2,7,5,7>, <2,7,5,7>
+ 2625636411U, // <5,7,2,u>: Cost 3 vext2 <1,3,5,7>, <2,u,0,1>
+ 2625636502U, // <5,7,3,0>: Cost 3 vext2 <1,3,5,7>, <3,0,1,2>
+ 2625636604U, // <5,7,3,1>: Cost 3 vext2 <1,3,5,7>, <3,1,3,5>
+ 3699378478U, // <5,7,3,2>: Cost 4 vext2 <1,3,5,7>, <3,2,0,1>
+ 2625636764U, // <5,7,3,3>: Cost 3 vext2 <1,3,5,7>, <3,3,3,3>
+ 2625636866U, // <5,7,3,4>: Cost 3 vext2 <1,3,5,7>, <3,4,5,6>
+ 2625636959U, // <5,7,3,5>: Cost 3 vext2 <1,3,5,7>, <3,5,7,0>
+ 3699378808U, // <5,7,3,6>: Cost 4 vext2 <1,3,5,7>, <3,6,0,7>
+ 2640235254U, // <5,7,3,7>: Cost 3 vext2 <3,7,5,7>, <3,7,5,7>
+ 2625637150U, // <5,7,3,u>: Cost 3 vext2 <1,3,5,7>, <3,u,1,2>
+ 2571919462U, // <5,7,4,0>: Cost 3 vext1 <3,5,7,4>, LHS
+ 2571920384U, // <5,7,4,1>: Cost 3 vext1 <3,5,7,4>, <1,3,5,7>
+ 3699379260U, // <5,7,4,2>: Cost 4 vext2 <1,3,5,7>, <4,2,6,0>
+ 2571922019U, // <5,7,4,3>: Cost 3 vext1 <3,5,7,4>, <3,5,7,4>
+ 2571922742U, // <5,7,4,4>: Cost 3 vext1 <3,5,7,4>, RHS
+ 1551895862U, // <5,7,4,5>: Cost 2 vext2 <1,3,5,7>, RHS
+ 2846277980U, // <5,7,4,6>: Cost 3 vuzpr RHS, <0,4,2,6>
+ 2646207951U, // <5,7,4,7>: Cost 3 vext2 <4,7,5,7>, <4,7,5,7>
+ 1551896105U, // <5,7,4,u>: Cost 2 vext2 <1,3,5,7>, RHS
+ 2583871590U, // <5,7,5,0>: Cost 3 vext1 <5,5,7,5>, LHS
+ 2652180176U, // <5,7,5,1>: Cost 3 vext2 <5,7,5,7>, <5,1,7,3>
+ 2625638177U, // <5,7,5,2>: Cost 3 vext2 <1,3,5,7>, <5,2,7,3>
+ 2625638262U, // <5,7,5,3>: Cost 3 vext2 <1,3,5,7>, <5,3,7,7>
+ 2583874870U, // <5,7,5,4>: Cost 3 vext1 <5,5,7,5>, RHS
+ 2846281732U, // <5,7,5,5>: Cost 3 vuzpr RHS, <5,5,5,5>
+ 2651517015U, // <5,7,5,6>: Cost 3 vext2 <5,6,5,7>, <5,6,5,7>
+ 1772539190U, // <5,7,5,7>: Cost 2 vuzpr RHS, RHS
+ 1772539191U, // <5,7,5,u>: Cost 2 vuzpr RHS, RHS
+ 2846281826U, // <5,7,6,0>: Cost 3 vuzpr RHS, <5,6,7,0>
+ 3699380615U, // <5,7,6,1>: Cost 4 vext2 <1,3,5,7>, <6,1,3,5>
+ 2846281108U, // <5,7,6,2>: Cost 3 vuzpr RHS, <4,6,u,2>
+ 2589854210U, // <5,7,6,3>: Cost 3 vext1 <6,5,7,6>, <3,4,5,6>
+ 2846281830U, // <5,7,6,4>: Cost 3 vuzpr RHS, <5,6,7,4>
+ 2725467658U, // <5,7,6,5>: Cost 3 vext3 <6,7,u,5>, <7,6,5,u>
+ 2846281076U, // <5,7,6,6>: Cost 3 vuzpr RHS, <4,6,4,6>
+ 2846279610U, // <5,7,6,7>: Cost 3 vuzpr RHS, <2,6,3,7>
+ 2846279611U, // <5,7,6,u>: Cost 3 vuzpr RHS, <2,6,3,u>
+ 1510146150U, // <5,7,7,0>: Cost 2 vext1 <5,5,7,7>, LHS
+ 2846282574U, // <5,7,7,1>: Cost 3 vuzpr RHS, <6,7,0,1>
+ 2583889512U, // <5,7,7,2>: Cost 3 vext1 <5,5,7,7>, <2,2,2,2>
+ 2846281919U, // <5,7,7,3>: Cost 3 vuzpr RHS, <5,7,u,3>
+ 1510149430U, // <5,7,7,4>: Cost 2 vext1 <5,5,7,7>, RHS
+ 1510150168U, // <5,7,7,5>: Cost 2 vext1 <5,5,7,7>, <5,5,7,7>
+ 2583892474U, // <5,7,7,6>: Cost 3 vext1 <5,5,7,7>, <6,2,7,3>
+ 2625640044U, // <5,7,7,7>: Cost 3 vext2 <1,3,5,7>, <7,7,7,7>
+ 1510151982U, // <5,7,7,u>: Cost 2 vext1 <5,5,7,7>, LHS
+ 1510154342U, // <5,7,u,0>: Cost 2 vext1 <5,5,7,u>, LHS
+ 1551898414U, // <5,7,u,1>: Cost 2 vext2 <1,3,5,7>, LHS
+ 2625640325U, // <5,7,u,2>: Cost 3 vext2 <1,3,5,7>, <u,2,3,0>
+ 1772536477U, // <5,7,u,3>: Cost 2 vuzpr RHS, LHS
+ 1510157622U, // <5,7,u,4>: Cost 2 vext1 <5,5,7,u>, RHS
+ 1551898778U, // <5,7,u,5>: Cost 2 vext2 <1,3,5,7>, RHS
+ 2625640656U, // <5,7,u,6>: Cost 3 vext2 <1,3,5,7>, <u,6,3,7>
+ 1772539433U, // <5,7,u,7>: Cost 2 vuzpr RHS, RHS
+ 1551898981U, // <5,7,u,u>: Cost 2 vext2 <1,3,5,7>, LHS
+ 2625642496U, // <5,u,0,0>: Cost 3 vext2 <1,3,5,u>, <0,0,0,0>
+ 1551900774U, // <5,u,0,1>: Cost 2 vext2 <1,3,5,u>, LHS
+ 2625642660U, // <5,u,0,2>: Cost 3 vext2 <1,3,5,u>, <0,2,0,2>
+ 2698630885U, // <5,u,0,3>: Cost 3 vext3 <2,3,4,5>, <u,0,3,2>
+ 2687129325U, // <5,u,0,4>: Cost 3 vext3 <0,4,1,5>, <u,0,4,1>
+ 2689783542U, // <5,u,0,5>: Cost 3 vext3 <0,u,1,5>, <u,0,5,1>
+ 2266134675U, // <5,u,0,6>: Cost 3 vrev <u,5,6,0>
+ 2595853772U, // <5,u,0,7>: Cost 3 vext1 <7,5,u,0>, <7,5,u,0>
+ 1551901341U, // <5,u,0,u>: Cost 2 vext2 <1,3,5,u>, LHS
+ 2625643254U, // <5,u,1,0>: Cost 3 vext2 <1,3,5,u>, <1,0,3,2>
+ 2625643316U, // <5,u,1,1>: Cost 3 vext2 <1,3,5,u>, <1,1,1,1>
+ 1613387566U, // <5,u,1,2>: Cost 2 vext3 <0,4,1,5>, LHS
+ 1551901697U, // <5,u,1,3>: Cost 2 vext2 <1,3,5,u>, <1,3,5,u>
+ 2626307154U, // <5,u,1,4>: Cost 3 vext2 <1,4,5,u>, <1,4,5,u>
+ 2689783622U, // <5,u,1,5>: Cost 3 vext3 <0,u,1,5>, <u,1,5,0>
+ 2627634420U, // <5,u,1,6>: Cost 3 vext2 <1,6,5,u>, <1,6,5,u>
+ 2982366536U, // <5,u,1,7>: Cost 3 vzipr <4,u,5,1>, RHS
+ 1613387620U, // <5,u,1,u>: Cost 2 vext3 <0,4,1,5>, LHS
+ 2846286742U, // <5,u,2,0>: Cost 3 vuzpr RHS, <1,2,3,0>
+ 2685796528U, // <5,u,2,1>: Cost 3 vext3 <0,2,1,5>, <0,2,1,5>
+ 2625644136U, // <5,u,2,2>: Cost 3 vext2 <1,3,5,u>, <2,2,2,2>
+ 2687129480U, // <5,u,2,3>: Cost 3 vext3 <0,4,1,5>, <u,2,3,3>
+ 2632279851U, // <5,u,2,4>: Cost 3 vext2 <2,4,5,u>, <2,4,5,u>
+ 2625644394U, // <5,u,2,5>: Cost 3 vext2 <1,3,5,u>, <2,5,3,u>
+ 2625644474U, // <5,u,2,6>: Cost 3 vext2 <1,3,5,u>, <2,6,3,7>
+ 2713966508U, // <5,u,2,7>: Cost 3 vext3 <4,u,5,5>, <u,2,7,3>
+ 2625644603U, // <5,u,2,u>: Cost 3 vext2 <1,3,5,u>, <2,u,0,1>
+ 2687129532U, // <5,u,3,0>: Cost 3 vext3 <0,4,1,5>, <u,3,0,1>
+ 2636261649U, // <5,u,3,1>: Cost 3 vext2 <3,1,5,u>, <3,1,5,u>
+ 2636925282U, // <5,u,3,2>: Cost 3 vext2 <3,2,5,u>, <3,2,5,u>
+ 2625644956U, // <5,u,3,3>: Cost 3 vext2 <1,3,5,u>, <3,3,3,3>
+ 1564510724U, // <5,u,3,4>: Cost 2 vext2 <3,4,5,u>, <3,4,5,u>
+ 2625645160U, // <5,u,3,5>: Cost 3 vext2 <1,3,5,u>, <3,5,u,0>
+ 2734610422U, // <5,u,3,6>: Cost 3 vext3 <u,3,6,5>, <u,3,6,5>
+ 2640243447U, // <5,u,3,7>: Cost 3 vext2 <3,7,5,u>, <3,7,5,u>
+ 1567165256U, // <5,u,3,u>: Cost 2 vext2 <3,u,5,u>, <3,u,5,u>
+ 1567828889U, // <5,u,4,0>: Cost 2 vext2 <4,0,5,u>, <4,0,5,u>
+ 1661163546U, // <5,u,4,1>: Cost 2 vext3 <u,4,1,5>, <u,4,1,5>
+ 2734463012U, // <5,u,4,2>: Cost 3 vext3 <u,3,4,5>, <u,4,2,6>
+ 2698631212U, // <5,u,4,3>: Cost 3 vext3 <2,3,4,5>, <u,4,3,5>
+ 1570458842U, // <5,u,4,4>: Cost 2 vext2 <4,4,5,5>, <4,4,5,5>
+ 1551904054U, // <5,u,4,5>: Cost 2 vext2 <1,3,5,u>, RHS
+ 2846286172U, // <5,u,4,6>: Cost 3 vuzpr RHS, <0,4,2,6>
+ 2646216144U, // <5,u,4,7>: Cost 3 vext2 <4,7,5,u>, <4,7,5,u>
+ 1551904297U, // <5,u,4,u>: Cost 2 vext2 <1,3,5,u>, RHS
+ 1509982310U, // <5,u,5,0>: Cost 2 vext1 <5,5,5,5>, LHS
+ 2560058555U, // <5,u,5,1>: Cost 3 vext1 <1,5,u,5>, <1,5,u,5>
+ 2698926194U, // <5,u,5,2>: Cost 3 vext3 <2,3,u,5>, <u,5,2,3>
+ 2698631295U, // <5,u,5,3>: Cost 3 vext3 <2,3,4,5>, <u,5,3,7>
+ 1509985590U, // <5,u,5,4>: Cost 2 vext1 <5,5,5,5>, RHS
+ 229035318U, // <5,u,5,5>: Cost 1 vdup1 RHS
+ 1613387930U, // <5,u,5,6>: Cost 2 vext3 <0,4,1,5>, RHS
+ 1772547382U, // <5,u,5,7>: Cost 2 vuzpr RHS, RHS
+ 229035318U, // <5,u,5,u>: Cost 1 vdup1 RHS
+ 2566037606U, // <5,u,6,0>: Cost 3 vext1 <2,5,u,6>, LHS
+ 2920044334U, // <5,u,6,1>: Cost 3 vzipl <5,6,7,0>, LHS
+ 2566039445U, // <5,u,6,2>: Cost 3 vext1 <2,5,u,6>, <2,5,u,6>
+ 2687129808U, // <5,u,6,3>: Cost 3 vext3 <0,4,1,5>, <u,6,3,7>
+ 2566040886U, // <5,u,6,4>: Cost 3 vext1 <2,5,u,6>, RHS
+ 2920044698U, // <5,u,6,5>: Cost 3 vzipl <5,6,7,0>, RHS
+ 2846289268U, // <5,u,6,6>: Cost 3 vuzpr RHS, <4,6,4,6>
+ 2973781320U, // <5,u,6,7>: Cost 3 vzipr <3,4,5,6>, RHS
+ 2687129853U, // <5,u,6,u>: Cost 3 vext3 <0,4,1,5>, <u,6,u,7>
+ 430506086U, // <5,u,7,0>: Cost 1 vext1 RHS, LHS
+ 1486333117U, // <5,u,7,1>: Cost 2 vext1 <1,5,u,7>, <1,5,u,7>
+ 1504249448U, // <5,u,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 2040971933U, // <5,u,7,3>: Cost 2 vtrnr RHS, LHS
+ 430509384U, // <5,u,7,4>: Cost 1 vext1 RHS, RHS
+ 1504251600U, // <5,u,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
+ 118708378U, // <5,u,7,6>: Cost 1 vrev RHS
+ 2040974889U, // <5,u,7,7>: Cost 2 vtrnr RHS, RHS
+ 430511918U, // <5,u,7,u>: Cost 1 vext1 RHS, LHS
+ 430514278U, // <5,u,u,0>: Cost 1 vext1 RHS, LHS
+ 1551906606U, // <5,u,u,1>: Cost 2 vext2 <1,3,5,u>, LHS
+ 1613388133U, // <5,u,u,2>: Cost 2 vext3 <0,4,1,5>, LHS
+ 1772544669U, // <5,u,u,3>: Cost 2 vuzpr RHS, LHS
+ 430517577U, // <5,u,u,4>: Cost 1 vext1 RHS, RHS
+ 229035318U, // <5,u,u,5>: Cost 1 vdup1 RHS
+ 118716571U, // <5,u,u,6>: Cost 1 vrev RHS
+ 1772547625U, // <5,u,u,7>: Cost 2 vuzpr RHS, RHS
+ 430520110U, // <5,u,u,u>: Cost 1 vext1 RHS, LHS
+ 2686025728U, // <6,0,0,0>: Cost 3 vext3 <0,2,4,6>, <0,0,0,0>
+ 2686025738U, // <6,0,0,1>: Cost 3 vext3 <0,2,4,6>, <0,0,1,1>
+ 2686025748U, // <6,0,0,2>: Cost 3 vext3 <0,2,4,6>, <0,0,2,2>
+ 3779084320U, // <6,0,0,3>: Cost 4 vext3 <3,4,5,6>, <0,0,3,5>
+ 2642903388U, // <6,0,0,4>: Cost 3 vext2 <4,2,6,0>, <0,4,2,6>
+ 3657723939U, // <6,0,0,5>: Cost 4 vext1 <5,6,0,0>, <5,6,0,0>
+ 3926676514U, // <6,0,0,6>: Cost 4 vuzpr <5,6,7,0>, <7,0,5,6>
+ 3926675786U, // <6,0,0,7>: Cost 4 vuzpr <5,6,7,0>, <6,0,5,7>
+ 2686025802U, // <6,0,0,u>: Cost 3 vext3 <0,2,4,6>, <0,0,u,2>
+ 2566070374U, // <6,0,1,0>: Cost 3 vext1 <2,6,0,1>, LHS
+ 3759767642U, // <6,0,1,1>: Cost 4 vext3 <0,2,4,6>, <0,1,1,0>
+ 1612284006U, // <6,0,1,2>: Cost 2 vext3 <0,2,4,6>, LHS
+ 2583988738U, // <6,0,1,3>: Cost 3 vext1 <5,6,0,1>, <3,4,5,6>
+ 2566073654U, // <6,0,1,4>: Cost 3 vext1 <2,6,0,1>, RHS
+ 2583990308U, // <6,0,1,5>: Cost 3 vext1 <5,6,0,1>, <5,6,0,1>
+ 2589963005U, // <6,0,1,6>: Cost 3 vext1 <6,6,0,1>, <6,6,0,1>
+ 2595935702U, // <6,0,1,7>: Cost 3 vext1 <7,6,0,1>, <7,6,0,1>
+ 1612284060U, // <6,0,1,u>: Cost 2 vext3 <0,2,4,6>, LHS
+ 2686025892U, // <6,0,2,0>: Cost 3 vext3 <0,2,4,6>, <0,2,0,2>
+ 2685804721U, // <6,0,2,1>: Cost 3 vext3 <0,2,1,6>, <0,2,1,6>
+ 3759620282U, // <6,0,2,2>: Cost 4 vext3 <0,2,2,6>, <0,2,2,6>
+ 2705342658U, // <6,0,2,3>: Cost 3 vext3 <3,4,5,6>, <0,2,3,5>
+ 1612284108U, // <6,0,2,4>: Cost 2 vext3 <0,2,4,6>, <0,2,4,6>
+ 3706029956U, // <6,0,2,5>: Cost 4 vext2 <2,4,6,0>, <2,5,6,7>
+ 2686173406U, // <6,0,2,6>: Cost 3 vext3 <0,2,6,6>, <0,2,6,6>
+ 3651769338U, // <6,0,2,7>: Cost 4 vext1 <4,6,0,2>, <7,0,1,2>
+ 1612579056U, // <6,0,2,u>: Cost 2 vext3 <0,2,u,6>, <0,2,u,6>
+ 3706030230U, // <6,0,3,0>: Cost 4 vext2 <2,4,6,0>, <3,0,1,2>
+ 2705342720U, // <6,0,3,1>: Cost 3 vext3 <3,4,5,6>, <0,3,1,4>
+ 2705342730U, // <6,0,3,2>: Cost 3 vext3 <3,4,5,6>, <0,3,2,5>
+ 3706030492U, // <6,0,3,3>: Cost 4 vext2 <2,4,6,0>, <3,3,3,3>
+ 2644896258U, // <6,0,3,4>: Cost 3 vext2 <4,5,6,0>, <3,4,5,6>
+ 3718638154U, // <6,0,3,5>: Cost 4 vext2 <4,5,6,0>, <3,5,4,6>
+ 3729918619U, // <6,0,3,6>: Cost 4 vext2 <6,4,6,0>, <3,6,4,6>
+ 3926672384U, // <6,0,3,7>: Cost 4 vuzpr <5,6,7,0>, <1,3,5,7>
+ 2705342784U, // <6,0,3,u>: Cost 3 vext3 <3,4,5,6>, <0,3,u,5>
+ 2687058250U, // <6,0,4,0>: Cost 3 vext3 <0,4,0,6>, <0,4,0,6>
+ 2686026066U, // <6,0,4,1>: Cost 3 vext3 <0,2,4,6>, <0,4,1,5>
+ 1613463900U, // <6,0,4,2>: Cost 2 vext3 <0,4,2,6>, <0,4,2,6>
+ 3761021285U, // <6,0,4,3>: Cost 4 vext3 <0,4,3,6>, <0,4,3,6>
+ 2687353198U, // <6,0,4,4>: Cost 3 vext3 <0,4,4,6>, <0,4,4,6>
+ 2632289590U, // <6,0,4,5>: Cost 3 vext2 <2,4,6,0>, RHS
+ 2645560704U, // <6,0,4,6>: Cost 3 vext2 <4,6,6,0>, <4,6,6,0>
+ 2646224337U, // <6,0,4,7>: Cost 3 vext2 <4,7,6,0>, <4,7,6,0>
+ 1613906322U, // <6,0,4,u>: Cost 2 vext3 <0,4,u,6>, <0,4,u,6>
+ 3651788902U, // <6,0,5,0>: Cost 4 vext1 <4,6,0,5>, LHS
+ 2687795620U, // <6,0,5,1>: Cost 3 vext3 <0,5,1,6>, <0,5,1,6>
+ 3761611181U, // <6,0,5,2>: Cost 4 vext3 <0,5,2,6>, <0,5,2,6>
+ 3723284326U, // <6,0,5,3>: Cost 4 vext2 <5,3,6,0>, <5,3,6,0>
+ 2646224838U, // <6,0,5,4>: Cost 3 vext2 <4,7,6,0>, <5,4,7,6>
+ 3718639630U, // <6,0,5,5>: Cost 4 vext2 <4,5,6,0>, <5,5,6,6>
+ 2652196962U, // <6,0,5,6>: Cost 3 vext2 <5,7,6,0>, <5,6,7,0>
+ 2852932918U, // <6,0,5,7>: Cost 3 vuzpr <5,6,7,0>, RHS
+ 2852932919U, // <6,0,5,u>: Cost 3 vuzpr <5,6,7,0>, RHS
+ 2852933730U, // <6,0,6,0>: Cost 3 vuzpr <5,6,7,0>, <5,6,7,0>
+ 2925985894U, // <6,0,6,1>: Cost 3 vzipl <6,6,6,6>, LHS
+ 3060203622U, // <6,0,6,2>: Cost 3 vtrnl <6,6,6,6>, LHS
+ 3718640178U, // <6,0,6,3>: Cost 4 vext2 <4,5,6,0>, <6,3,4,5>
+ 2656178832U, // <6,0,6,4>: Cost 3 vext2 <6,4,6,0>, <6,4,6,0>
+ 3725939378U, // <6,0,6,5>: Cost 4 vext2 <5,7,6,0>, <6,5,0,7>
+ 2657506098U, // <6,0,6,6>: Cost 3 vext2 <6,6,6,0>, <6,6,6,0>
+ 2619020110U, // <6,0,6,7>: Cost 3 vext2 <0,2,6,0>, <6,7,0,1>
+ 2925986461U, // <6,0,6,u>: Cost 3 vzipl <6,6,6,6>, LHS
+ 2572091494U, // <6,0,7,0>: Cost 3 vext1 <3,6,0,7>, LHS
+ 2572092310U, // <6,0,7,1>: Cost 3 vext1 <3,6,0,7>, <1,2,3,0>
+ 2980495524U, // <6,0,7,2>: Cost 3 vzipr RHS, <0,2,0,2>
+ 2572094072U, // <6,0,7,3>: Cost 3 vext1 <3,6,0,7>, <3,6,0,7>
+ 2572094774U, // <6,0,7,4>: Cost 3 vext1 <3,6,0,7>, RHS
+ 4054238242U, // <6,0,7,5>: Cost 4 vzipr RHS, <1,4,0,5>
+ 3645837653U, // <6,0,7,6>: Cost 4 vext1 <3,6,0,7>, <6,0,7,0>
+ 4054239054U, // <6,0,7,7>: Cost 4 vzipr RHS, <2,5,0,7>
+ 2572097326U, // <6,0,7,u>: Cost 3 vext1 <3,6,0,7>, LHS
+ 2686026378U, // <6,0,u,0>: Cost 3 vext3 <0,2,4,6>, <0,u,0,2>
+ 2686026386U, // <6,0,u,1>: Cost 3 vext3 <0,2,4,6>, <0,u,1,1>
+ 1612284573U, // <6,0,u,2>: Cost 2 vext3 <0,2,4,6>, LHS
+ 2705343144U, // <6,0,u,3>: Cost 3 vext3 <3,4,5,6>, <0,u,3,5>
+ 1616265906U, // <6,0,u,4>: Cost 2 vext3 <0,u,4,6>, <0,u,4,6>
+ 2632292506U, // <6,0,u,5>: Cost 3 vext2 <2,4,6,0>, RHS
+ 2590020356U, // <6,0,u,6>: Cost 3 vext1 <6,6,0,u>, <6,6,0,u>
+ 2852933161U, // <6,0,u,7>: Cost 3 vuzpr <5,6,7,0>, RHS
+ 1612284627U, // <6,0,u,u>: Cost 2 vext3 <0,2,4,6>, LHS
+ 2595995750U, // <6,1,0,0>: Cost 3 vext1 <7,6,1,0>, LHS
+ 2646229094U, // <6,1,0,1>: Cost 3 vext2 <4,7,6,1>, LHS
+ 3694092492U, // <6,1,0,2>: Cost 4 vext2 <0,4,6,1>, <0,2,4,6>
+ 2686026486U, // <6,1,0,3>: Cost 3 vext3 <0,2,4,6>, <1,0,3,2>
+ 2595999030U, // <6,1,0,4>: Cost 3 vext1 <7,6,1,0>, RHS
+ 3767730952U, // <6,1,0,5>: Cost 4 vext3 <1,5,4,6>, <1,0,5,2>
+ 2596000590U, // <6,1,0,6>: Cost 3 vext1 <7,6,1,0>, <6,7,0,1>
+ 2596001246U, // <6,1,0,7>: Cost 3 vext1 <7,6,1,0>, <7,6,1,0>
+ 2686026531U, // <6,1,0,u>: Cost 3 vext3 <0,2,4,6>, <1,0,u,2>
+ 3763602219U, // <6,1,1,0>: Cost 4 vext3 <0,u,2,6>, <1,1,0,1>
+ 2686026548U, // <6,1,1,1>: Cost 3 vext3 <0,2,4,6>, <1,1,1,1>
+ 3764929346U, // <6,1,1,2>: Cost 4 vext3 <1,1,2,6>, <1,1,2,6>
+ 2686026568U, // <6,1,1,3>: Cost 3 vext3 <0,2,4,6>, <1,1,3,3>
+ 2691334996U, // <6,1,1,4>: Cost 3 vext3 <1,1,4,6>, <1,1,4,6>
+ 3760874332U, // <6,1,1,5>: Cost 4 vext3 <0,4,1,6>, <1,1,5,5>
+ 3765224294U, // <6,1,1,6>: Cost 4 vext3 <1,1,6,6>, <1,1,6,6>
+ 3669751263U, // <6,1,1,7>: Cost 4 vext1 <7,6,1,1>, <7,6,1,1>
+ 2686026613U, // <6,1,1,u>: Cost 3 vext3 <0,2,4,6>, <1,1,u,3>
+ 2554208358U, // <6,1,2,0>: Cost 3 vext1 <0,6,1,2>, LHS
+ 3763602311U, // <6,1,2,1>: Cost 4 vext3 <0,u,2,6>, <1,2,1,3>
+ 3639895971U, // <6,1,2,2>: Cost 4 vext1 <2,6,1,2>, <2,6,1,2>
+ 2686026646U, // <6,1,2,3>: Cost 3 vext3 <0,2,4,6>, <1,2,3,0>
+ 2554211638U, // <6,1,2,4>: Cost 3 vext1 <0,6,1,2>, RHS
+ 3760874411U, // <6,1,2,5>: Cost 4 vext3 <0,4,1,6>, <1,2,5,3>
+ 2554212858U, // <6,1,2,6>: Cost 3 vext1 <0,6,1,2>, <6,2,7,3>
+ 3802973114U, // <6,1,2,7>: Cost 4 vext3 <7,4,5,6>, <1,2,7,0>
+ 2686026691U, // <6,1,2,u>: Cost 3 vext3 <0,2,4,6>, <1,2,u,0>
+ 2566160486U, // <6,1,3,0>: Cost 3 vext1 <2,6,1,3>, LHS
+ 2686026712U, // <6,1,3,1>: Cost 3 vext3 <0,2,4,6>, <1,3,1,3>
+ 2686026724U, // <6,1,3,2>: Cost 3 vext3 <0,2,4,6>, <1,3,2,6>
+ 3759768552U, // <6,1,3,3>: Cost 4 vext3 <0,2,4,6>, <1,3,3,1>
+ 2692662262U, // <6,1,3,4>: Cost 3 vext3 <1,3,4,6>, <1,3,4,6>
+ 2686026752U, // <6,1,3,5>: Cost 3 vext3 <0,2,4,6>, <1,3,5,7>
+ 2590053128U, // <6,1,3,6>: Cost 3 vext1 <6,6,1,3>, <6,6,1,3>
+ 3663795194U, // <6,1,3,7>: Cost 4 vext1 <6,6,1,3>, <7,0,1,2>
+ 2686026775U, // <6,1,3,u>: Cost 3 vext3 <0,2,4,6>, <1,3,u,3>
+ 2641587099U, // <6,1,4,0>: Cost 3 vext2 <4,0,6,1>, <4,0,6,1>
+ 2693104684U, // <6,1,4,1>: Cost 3 vext3 <1,4,1,6>, <1,4,1,6>
+ 3639912357U, // <6,1,4,2>: Cost 4 vext1 <2,6,1,4>, <2,6,1,4>
+ 2687206462U, // <6,1,4,3>: Cost 3 vext3 <0,4,2,6>, <1,4,3,6>
+ 3633941814U, // <6,1,4,4>: Cost 4 vext1 <1,6,1,4>, RHS
+ 2693399632U, // <6,1,4,5>: Cost 3 vext3 <1,4,5,6>, <1,4,5,6>
+ 3765077075U, // <6,1,4,6>: Cost 4 vext3 <1,1,4,6>, <1,4,6,0>
+ 2646232530U, // <6,1,4,7>: Cost 3 vext2 <4,7,6,1>, <4,7,6,1>
+ 2687206507U, // <6,1,4,u>: Cost 3 vext3 <0,4,2,6>, <1,4,u,6>
+ 2647559796U, // <6,1,5,0>: Cost 3 vext2 <5,0,6,1>, <5,0,6,1>
+ 3765077118U, // <6,1,5,1>: Cost 4 vext3 <1,1,4,6>, <1,5,1,7>
+ 3767583878U, // <6,1,5,2>: Cost 4 vext3 <1,5,2,6>, <1,5,2,6>
+ 2686026896U, // <6,1,5,3>: Cost 3 vext3 <0,2,4,6>, <1,5,3,7>
+ 2693989528U, // <6,1,5,4>: Cost 3 vext3 <1,5,4,6>, <1,5,4,6>
+ 3767805089U, // <6,1,5,5>: Cost 4 vext3 <1,5,5,6>, <1,5,5,6>
+ 2652868706U, // <6,1,5,6>: Cost 3 vext2 <5,u,6,1>, <5,6,7,0>
+ 3908250934U, // <6,1,5,7>: Cost 4 vuzpr <2,6,0,1>, RHS
+ 2686026941U, // <6,1,5,u>: Cost 3 vext3 <0,2,4,6>, <1,5,u,7>
+ 2554241126U, // <6,1,6,0>: Cost 3 vext1 <0,6,1,6>, LHS
+ 3763602639U, // <6,1,6,1>: Cost 4 vext3 <0,u,2,6>, <1,6,1,7>
+ 3759547607U, // <6,1,6,2>: Cost 4 vext3 <0,2,1,6>, <1,6,2,6>
+ 3115221094U, // <6,1,6,3>: Cost 3 vtrnr <4,6,4,6>, LHS
+ 2554244406U, // <6,1,6,4>: Cost 3 vext1 <0,6,1,6>, RHS
+ 3760874739U, // <6,1,6,5>: Cost 4 vext3 <0,4,1,6>, <1,6,5,7>
+ 2554245944U, // <6,1,6,6>: Cost 3 vext1 <0,6,1,6>, <6,6,6,6>
+ 3719975758U, // <6,1,6,7>: Cost 4 vext2 <4,7,6,1>, <6,7,0,1>
+ 3115221099U, // <6,1,6,u>: Cost 3 vtrnr <4,6,4,6>, LHS
+ 2560221286U, // <6,1,7,0>: Cost 3 vext1 <1,6,1,7>, LHS
+ 2560222415U, // <6,1,7,1>: Cost 3 vext1 <1,6,1,7>, <1,6,1,7>
+ 2980497558U, // <6,1,7,2>: Cost 3 vzipr RHS, <3,0,1,2>
+ 3103211622U, // <6,1,7,3>: Cost 3 vtrnr <2,6,3,7>, LHS
+ 2560224566U, // <6,1,7,4>: Cost 3 vext1 <1,6,1,7>, RHS
+ 2980495698U, // <6,1,7,5>: Cost 3 vzipr RHS, <0,4,1,5>
+ 3633967526U, // <6,1,7,6>: Cost 4 vext1 <1,6,1,7>, <6,1,7,0>
+ 4054237686U, // <6,1,7,7>: Cost 4 vzipr RHS, <0,6,1,7>
+ 2560227118U, // <6,1,7,u>: Cost 3 vext1 <1,6,1,7>, LHS
+ 2560229478U, // <6,1,u,0>: Cost 3 vext1 <1,6,1,u>, LHS
+ 2686027117U, // <6,1,u,1>: Cost 3 vext3 <0,2,4,6>, <1,u,1,3>
+ 2686027129U, // <6,1,u,2>: Cost 3 vext3 <0,2,4,6>, <1,u,2,6>
+ 2686027132U, // <6,1,u,3>: Cost 3 vext3 <0,2,4,6>, <1,u,3,0>
+ 2687206795U, // <6,1,u,4>: Cost 3 vext3 <0,4,2,6>, <1,u,4,6>
+ 2686027157U, // <6,1,u,5>: Cost 3 vext3 <0,2,4,6>, <1,u,5,7>
+ 2590094093U, // <6,1,u,6>: Cost 3 vext1 <6,6,1,u>, <6,6,1,u>
+ 2596066790U, // <6,1,u,7>: Cost 3 vext1 <7,6,1,u>, <7,6,1,u>
+ 2686027177U, // <6,1,u,u>: Cost 3 vext3 <0,2,4,6>, <1,u,u,0>
+ 2646900736U, // <6,2,0,0>: Cost 3 vext2 <4,u,6,2>, <0,0,0,0>
+ 1573159014U, // <6,2,0,1>: Cost 2 vext2 <4,u,6,2>, LHS
+ 2646900900U, // <6,2,0,2>: Cost 3 vext2 <4,u,6,2>, <0,2,0,2>
+ 3759769037U, // <6,2,0,3>: Cost 4 vext3 <0,2,4,6>, <2,0,3,0>
+ 2641592668U, // <6,2,0,4>: Cost 3 vext2 <4,0,6,2>, <0,4,2,6>
+ 3779085794U, // <6,2,0,5>: Cost 4 vext3 <3,4,5,6>, <2,0,5,3>
+ 2686027244U, // <6,2,0,6>: Cost 3 vext3 <0,2,4,6>, <2,0,6,4>
+ 3669816807U, // <6,2,0,7>: Cost 4 vext1 <7,6,2,0>, <7,6,2,0>
+ 1573159581U, // <6,2,0,u>: Cost 2 vext2 <4,u,6,2>, LHS
+ 2230527897U, // <6,2,1,0>: Cost 3 vrev <2,6,0,1>
+ 2646901556U, // <6,2,1,1>: Cost 3 vext2 <4,u,6,2>, <1,1,1,1>
+ 2646901654U, // <6,2,1,2>: Cost 3 vext2 <4,u,6,2>, <1,2,3,0>
+ 2847047782U, // <6,2,1,3>: Cost 3 vuzpr <4,6,u,2>, LHS
+ 3771049517U, // <6,2,1,4>: Cost 4 vext3 <2,1,4,6>, <2,1,4,6>
+ 2646901904U, // <6,2,1,5>: Cost 3 vext2 <4,u,6,2>, <1,5,3,7>
+ 2686027324U, // <6,2,1,6>: Cost 3 vext3 <0,2,4,6>, <2,1,6,3>
+ 3669825000U, // <6,2,1,7>: Cost 4 vext1 <7,6,2,1>, <7,6,2,1>
+ 2231117793U, // <6,2,1,u>: Cost 3 vrev <2,6,u,1>
+ 3763603029U, // <6,2,2,0>: Cost 4 vext3 <0,u,2,6>, <2,2,0,1>
+ 3759769184U, // <6,2,2,1>: Cost 4 vext3 <0,2,4,6>, <2,2,1,3>
+ 2686027368U, // <6,2,2,2>: Cost 3 vext3 <0,2,4,6>, <2,2,2,2>
+ 2686027378U, // <6,2,2,3>: Cost 3 vext3 <0,2,4,6>, <2,2,3,3>
+ 2697971326U, // <6,2,2,4>: Cost 3 vext3 <2,2,4,6>, <2,2,4,6>
+ 3759769224U, // <6,2,2,5>: Cost 4 vext3 <0,2,4,6>, <2,2,5,7>
+ 2698118800U, // <6,2,2,6>: Cost 3 vext3 <2,2,6,6>, <2,2,6,6>
+ 3920794092U, // <6,2,2,7>: Cost 4 vuzpr <4,6,u,2>, <6,2,5,7>
+ 2686027423U, // <6,2,2,u>: Cost 3 vext3 <0,2,4,6>, <2,2,u,3>
+ 2686027430U, // <6,2,3,0>: Cost 3 vext3 <0,2,4,6>, <2,3,0,1>
+ 3759769262U, // <6,2,3,1>: Cost 4 vext3 <0,2,4,6>, <2,3,1,0>
+ 2698487485U, // <6,2,3,2>: Cost 3 vext3 <2,3,2,6>, <2,3,2,6>
+ 2705344196U, // <6,2,3,3>: Cost 3 vext3 <3,4,5,6>, <2,3,3,4>
+ 2686027470U, // <6,2,3,4>: Cost 3 vext3 <0,2,4,6>, <2,3,4,5>
+ 2698708696U, // <6,2,3,5>: Cost 3 vext3 <2,3,5,6>, <2,3,5,6>
+ 2724660961U, // <6,2,3,6>: Cost 3 vext3 <6,6,6,6>, <2,3,6,6>
+ 2729232104U, // <6,2,3,7>: Cost 3 vext3 <7,4,5,6>, <2,3,7,4>
+ 2686027502U, // <6,2,3,u>: Cost 3 vext3 <0,2,4,6>, <2,3,u,1>
+ 1567853468U, // <6,2,4,0>: Cost 2 vext2 <4,0,6,2>, <4,0,6,2>
+ 3759769351U, // <6,2,4,1>: Cost 4 vext3 <0,2,4,6>, <2,4,1,u>
+ 2699151118U, // <6,2,4,2>: Cost 3 vext3 <2,4,2,6>, <2,4,2,6>
+ 2686027543U, // <6,2,4,3>: Cost 3 vext3 <0,2,4,6>, <2,4,3,6>
+ 2699298592U, // <6,2,4,4>: Cost 3 vext3 <2,4,4,6>, <2,4,4,6>
+ 1573162294U, // <6,2,4,5>: Cost 2 vext2 <4,u,6,2>, RHS
+ 2686027564U, // <6,2,4,6>: Cost 3 vext3 <0,2,4,6>, <2,4,6,0>
+ 3719982547U, // <6,2,4,7>: Cost 4 vext2 <4,7,6,2>, <4,7,6,2>
+ 1573162532U, // <6,2,4,u>: Cost 2 vext2 <4,u,6,2>, <4,u,6,2>
+ 3779086154U, // <6,2,5,0>: Cost 4 vext3 <3,4,5,6>, <2,5,0,3>
+ 2646904528U, // <6,2,5,1>: Cost 3 vext2 <4,u,6,2>, <5,1,7,3>
+ 3759769440U, // <6,2,5,2>: Cost 4 vext3 <0,2,4,6>, <2,5,2,7>
+ 2699888488U, // <6,2,5,3>: Cost 3 vext3 <2,5,3,6>, <2,5,3,6>
+ 2230855617U, // <6,2,5,4>: Cost 3 vrev <2,6,4,5>
+ 2646904836U, // <6,2,5,5>: Cost 3 vext2 <4,u,6,2>, <5,5,5,5>
+ 2646904930U, // <6,2,5,6>: Cost 3 vext2 <4,u,6,2>, <5,6,7,0>
+ 2847051062U, // <6,2,5,7>: Cost 3 vuzpr <4,6,u,2>, RHS
+ 2700257173U, // <6,2,5,u>: Cost 3 vext3 <2,5,u,6>, <2,5,u,6>
+ 2687207321U, // <6,2,6,0>: Cost 3 vext3 <0,4,2,6>, <2,6,0,1>
+ 2686027684U, // <6,2,6,1>: Cost 3 vext3 <0,2,4,6>, <2,6,1,3>
+ 2566260656U, // <6,2,6,2>: Cost 3 vext1 <2,6,2,6>, <2,6,2,6>
+ 2685806522U, // <6,2,6,3>: Cost 3 vext3 <0,2,1,6>, <2,6,3,7>
+ 2687207361U, // <6,2,6,4>: Cost 3 vext3 <0,4,2,6>, <2,6,4,5>
+ 2686027724U, // <6,2,6,5>: Cost 3 vext3 <0,2,4,6>, <2,6,5,7>
+ 2646905656U, // <6,2,6,6>: Cost 3 vext2 <4,u,6,2>, <6,6,6,6>
+ 2646905678U, // <6,2,6,7>: Cost 3 vext2 <4,u,6,2>, <6,7,0,1>
+ 2686027751U, // <6,2,6,u>: Cost 3 vext3 <0,2,4,6>, <2,6,u,7>
+ 2554323046U, // <6,2,7,0>: Cost 3 vext1 <0,6,2,7>, LHS
+ 2572239606U, // <6,2,7,1>: Cost 3 vext1 <3,6,2,7>, <1,0,3,2>
+ 2566268849U, // <6,2,7,2>: Cost 3 vext1 <2,6,2,7>, <2,6,2,7>
+ 1906753638U, // <6,2,7,3>: Cost 2 vzipr RHS, LHS
+ 2554326326U, // <6,2,7,4>: Cost 3 vext1 <0,6,2,7>, RHS
+ 3304687564U, // <6,2,7,5>: Cost 4 vrev <2,6,5,7>
+ 2980495708U, // <6,2,7,6>: Cost 3 vzipr RHS, <0,4,2,6>
+ 2646906476U, // <6,2,7,7>: Cost 3 vext2 <4,u,6,2>, <7,7,7,7>
+ 1906753643U, // <6,2,7,u>: Cost 2 vzipr RHS, LHS
+ 1591744256U, // <6,2,u,0>: Cost 2 vext2 <u,0,6,2>, <u,0,6,2>
+ 1573164846U, // <6,2,u,1>: Cost 2 vext2 <4,u,6,2>, LHS
+ 2701805650U, // <6,2,u,2>: Cost 3 vext3 <2,u,2,6>, <2,u,2,6>
+ 1906761830U, // <6,2,u,3>: Cost 2 vzipr RHS, LHS
+ 2686027875U, // <6,2,u,4>: Cost 3 vext3 <0,2,4,6>, <2,u,4,5>
+ 1573165210U, // <6,2,u,5>: Cost 2 vext2 <4,u,6,2>, RHS
+ 2686322800U, // <6,2,u,6>: Cost 3 vext3 <0,2,u,6>, <2,u,6,0>
+ 2847051305U, // <6,2,u,7>: Cost 3 vuzpr <4,6,u,2>, RHS
+ 1906761835U, // <6,2,u,u>: Cost 2 vzipr RHS, LHS
+ 3759769739U, // <6,3,0,0>: Cost 4 vext3 <0,2,4,6>, <3,0,0,0>
+ 2686027926U, // <6,3,0,1>: Cost 3 vext3 <0,2,4,6>, <3,0,1,2>
+ 2686027937U, // <6,3,0,2>: Cost 3 vext3 <0,2,4,6>, <3,0,2,4>
+ 3640027286U, // <6,3,0,3>: Cost 4 vext1 <2,6,3,0>, <3,0,1,2>
+ 2687207601U, // <6,3,0,4>: Cost 3 vext3 <0,4,2,6>, <3,0,4,2>
+ 2705344698U, // <6,3,0,5>: Cost 3 vext3 <3,4,5,6>, <3,0,5,2>
+ 3663917847U, // <6,3,0,6>: Cost 4 vext1 <6,6,3,0>, <6,6,3,0>
+ 2237008560U, // <6,3,0,7>: Cost 3 vrev <3,6,7,0>
+ 2686027989U, // <6,3,0,u>: Cost 3 vext3 <0,2,4,6>, <3,0,u,2>
+ 3759769823U, // <6,3,1,0>: Cost 4 vext3 <0,2,4,6>, <3,1,0,3>
+ 3759769830U, // <6,3,1,1>: Cost 4 vext3 <0,2,4,6>, <3,1,1,1>
+ 3759769841U, // <6,3,1,2>: Cost 4 vext3 <0,2,4,6>, <3,1,2,3>
+ 3759769848U, // <6,3,1,3>: Cost 4 vext3 <0,2,4,6>, <3,1,3,1>
+ 2703280390U, // <6,3,1,4>: Cost 3 vext3 <3,1,4,6>, <3,1,4,6>
+ 3759769868U, // <6,3,1,5>: Cost 4 vext3 <0,2,4,6>, <3,1,5,3>
+ 3704063194U, // <6,3,1,6>: Cost 4 vext2 <2,1,6,3>, <1,6,3,0>
+ 3767732510U, // <6,3,1,7>: Cost 4 vext3 <1,5,4,6>, <3,1,7,3>
+ 2703280390U, // <6,3,1,u>: Cost 3 vext3 <3,1,4,6>, <3,1,4,6>
+ 3704063468U, // <6,3,2,0>: Cost 4 vext2 <2,1,6,3>, <2,0,6,4>
+ 2630321724U, // <6,3,2,1>: Cost 3 vext2 <2,1,6,3>, <2,1,6,3>
+ 3759769921U, // <6,3,2,2>: Cost 4 vext3 <0,2,4,6>, <3,2,2,2>
+ 3759769928U, // <6,3,2,3>: Cost 4 vext3 <0,2,4,6>, <3,2,3,0>
+ 3704063767U, // <6,3,2,4>: Cost 4 vext2 <2,1,6,3>, <2,4,3,6>
+ 3704063876U, // <6,3,2,5>: Cost 4 vext2 <2,1,6,3>, <2,5,6,7>
+ 2636957626U, // <6,3,2,6>: Cost 3 vext2 <3,2,6,3>, <2,6,3,7>
+ 3777907058U, // <6,3,2,7>: Cost 4 vext3 <3,2,7,6>, <3,2,7,6>
+ 2630321724U, // <6,3,2,u>: Cost 3 vext2 <2,1,6,3>, <2,1,6,3>
+ 3759769983U, // <6,3,3,0>: Cost 4 vext3 <0,2,4,6>, <3,3,0,1>
+ 3710036245U, // <6,3,3,1>: Cost 4 vext2 <3,1,6,3>, <3,1,6,3>
+ 2636958054U, // <6,3,3,2>: Cost 3 vext2 <3,2,6,3>, <3,2,6,3>
+ 2686028188U, // <6,3,3,3>: Cost 3 vext3 <0,2,4,6>, <3,3,3,3>
+ 2704607656U, // <6,3,3,4>: Cost 3 vext3 <3,3,4,6>, <3,3,4,6>
+ 3773041072U, // <6,3,3,5>: Cost 4 vext3 <2,4,4,6>, <3,3,5,5>
+ 3711363731U, // <6,3,3,6>: Cost 4 vext2 <3,3,6,3>, <3,6,3,7>
+ 3767732676U, // <6,3,3,7>: Cost 4 vext3 <1,5,4,6>, <3,3,7,7>
+ 2707999179U, // <6,3,3,u>: Cost 3 vext3 <3,u,5,6>, <3,3,u,5>
+ 2584232038U, // <6,3,4,0>: Cost 3 vext1 <5,6,3,4>, LHS
+ 2642267118U, // <6,3,4,1>: Cost 3 vext2 <4,1,6,3>, <4,1,6,3>
+ 2642930751U, // <6,3,4,2>: Cost 3 vext2 <4,2,6,3>, <4,2,6,3>
+ 2705197552U, // <6,3,4,3>: Cost 3 vext3 <3,4,3,6>, <3,4,3,6>
+ 2584235318U, // <6,3,4,4>: Cost 3 vext1 <5,6,3,4>, RHS
+ 1631603202U, // <6,3,4,5>: Cost 2 vext3 <3,4,5,6>, <3,4,5,6>
+ 2654211444U, // <6,3,4,6>: Cost 3 vext2 <6,1,6,3>, <4,6,4,6>
+ 2237041332U, // <6,3,4,7>: Cost 3 vrev <3,6,7,4>
+ 1631824413U, // <6,3,4,u>: Cost 2 vext3 <3,4,u,6>, <3,4,u,6>
+ 3640066150U, // <6,3,5,0>: Cost 4 vext1 <2,6,3,5>, LHS
+ 3772746288U, // <6,3,5,1>: Cost 4 vext3 <2,4,0,6>, <3,5,1,7>
+ 3640067790U, // <6,3,5,2>: Cost 4 vext1 <2,6,3,5>, <2,3,4,5>
+ 3773041216U, // <6,3,5,3>: Cost 4 vext3 <2,4,4,6>, <3,5,3,5>
+ 2705934922U, // <6,3,5,4>: Cost 3 vext3 <3,5,4,6>, <3,5,4,6>
+ 3773041236U, // <6,3,5,5>: Cost 4 vext3 <2,4,4,6>, <3,5,5,7>
+ 3779086940U, // <6,3,5,6>: Cost 4 vext3 <3,4,5,6>, <3,5,6,6>
+ 3767732831U, // <6,3,5,7>: Cost 4 vext3 <1,5,4,6>, <3,5,7,0>
+ 2706229870U, // <6,3,5,u>: Cost 3 vext3 <3,5,u,6>, <3,5,u,6>
+ 2602164326U, // <6,3,6,0>: Cost 3 vext1 <u,6,3,6>, LHS
+ 2654212512U, // <6,3,6,1>: Cost 3 vext2 <6,1,6,3>, <6,1,6,3>
+ 2566334393U, // <6,3,6,2>: Cost 3 vext1 <2,6,3,6>, <2,6,3,6>
+ 3704066588U, // <6,3,6,3>: Cost 4 vext2 <2,1,6,3>, <6,3,2,1>
+ 2602167524U, // <6,3,6,4>: Cost 3 vext1 <u,6,3,6>, <4,4,6,6>
+ 3710702321U, // <6,3,6,5>: Cost 4 vext2 <3,2,6,3>, <6,5,7,7>
+ 2724661933U, // <6,3,6,6>: Cost 3 vext3 <6,6,6,6>, <3,6,6,6>
+ 3710702465U, // <6,3,6,7>: Cost 4 vext2 <3,2,6,3>, <6,7,5,7>
+ 2602170158U, // <6,3,6,u>: Cost 3 vext1 <u,6,3,6>, LHS
+ 1492598886U, // <6,3,7,0>: Cost 2 vext1 <2,6,3,7>, LHS
+ 2560369889U, // <6,3,7,1>: Cost 3 vext1 <1,6,3,7>, <1,6,3,7>
+ 1492600762U, // <6,3,7,2>: Cost 2 vext1 <2,6,3,7>, <2,6,3,7>
+ 2566342806U, // <6,3,7,3>: Cost 3 vext1 <2,6,3,7>, <3,0,1,2>
+ 1492602166U, // <6,3,7,4>: Cost 2 vext1 <2,6,3,7>, RHS
+ 2602176208U, // <6,3,7,5>: Cost 3 vext1 <u,6,3,7>, <5,1,7,3>
+ 2566345210U, // <6,3,7,6>: Cost 3 vext1 <2,6,3,7>, <6,2,7,3>
+ 2980496528U, // <6,3,7,7>: Cost 3 vzipr RHS, <1,5,3,7>
+ 1492604718U, // <6,3,7,u>: Cost 2 vext1 <2,6,3,7>, LHS
+ 1492607078U, // <6,3,u,0>: Cost 2 vext1 <2,6,3,u>, LHS
+ 2686028574U, // <6,3,u,1>: Cost 3 vext3 <0,2,4,6>, <3,u,1,2>
+ 1492608955U, // <6,3,u,2>: Cost 2 vext1 <2,6,3,u>, <2,6,3,u>
+ 2566350998U, // <6,3,u,3>: Cost 3 vext1 <2,6,3,u>, <3,0,1,2>
+ 1492610358U, // <6,3,u,4>: Cost 2 vext1 <2,6,3,u>, RHS
+ 1634257734U, // <6,3,u,5>: Cost 2 vext3 <3,u,5,6>, <3,u,5,6>
+ 2566353489U, // <6,3,u,6>: Cost 3 vext1 <2,6,3,u>, <6,3,u,0>
+ 2980504720U, // <6,3,u,7>: Cost 3 vzipr RHS, <1,5,3,7>
+ 1492612910U, // <6,3,u,u>: Cost 2 vext1 <2,6,3,u>, LHS
+ 3703406592U, // <6,4,0,0>: Cost 4 vext2 <2,0,6,4>, <0,0,0,0>
+ 2629664870U, // <6,4,0,1>: Cost 3 vext2 <2,0,6,4>, LHS
+ 2629664972U, // <6,4,0,2>: Cost 3 vext2 <2,0,6,4>, <0,2,4,6>
+ 3779087232U, // <6,4,0,3>: Cost 4 vext3 <3,4,5,6>, <4,0,3,1>
+ 2642936156U, // <6,4,0,4>: Cost 3 vext2 <4,2,6,4>, <0,4,2,6>
+ 2712570770U, // <6,4,0,5>: Cost 3 vext3 <4,6,4,6>, <4,0,5,1>
+ 2687208348U, // <6,4,0,6>: Cost 3 vext3 <0,4,2,6>, <4,0,6,2>
+ 3316723081U, // <6,4,0,7>: Cost 4 vrev <4,6,7,0>
+ 2629665437U, // <6,4,0,u>: Cost 3 vext2 <2,0,6,4>, LHS
+ 2242473291U, // <6,4,1,0>: Cost 3 vrev <4,6,0,1>
+ 3700089652U, // <6,4,1,1>: Cost 4 vext2 <1,4,6,4>, <1,1,1,1>
+ 3703407510U, // <6,4,1,2>: Cost 4 vext2 <2,0,6,4>, <1,2,3,0>
+ 2852962406U, // <6,4,1,3>: Cost 3 vuzpr <5,6,7,4>, LHS
+ 3628166454U, // <6,4,1,4>: Cost 4 vext1 <0,6,4,1>, RHS
+ 3760876514U, // <6,4,1,5>: Cost 4 vext3 <0,4,1,6>, <4,1,5,0>
+ 2687208430U, // <6,4,1,6>: Cost 3 vext3 <0,4,2,6>, <4,1,6,3>
+ 3316731274U, // <6,4,1,7>: Cost 4 vrev <4,6,7,1>
+ 2243063187U, // <6,4,1,u>: Cost 3 vrev <4,6,u,1>
+ 2629666284U, // <6,4,2,0>: Cost 3 vext2 <2,0,6,4>, <2,0,6,4>
+ 3703408188U, // <6,4,2,1>: Cost 4 vext2 <2,0,6,4>, <2,1,6,3>
+ 3703408232U, // <6,4,2,2>: Cost 4 vext2 <2,0,6,4>, <2,2,2,2>
+ 3703408294U, // <6,4,2,3>: Cost 4 vext2 <2,0,6,4>, <2,3,0,1>
+ 2632320816U, // <6,4,2,4>: Cost 3 vext2 <2,4,6,4>, <2,4,6,4>
+ 2923384118U, // <6,4,2,5>: Cost 3 vzipl <6,2,7,3>, RHS
+ 2687208508U, // <6,4,2,6>: Cost 3 vext3 <0,4,2,6>, <4,2,6,0>
+ 3760950341U, // <6,4,2,7>: Cost 4 vext3 <0,4,2,6>, <4,2,7,0>
+ 2634975348U, // <6,4,2,u>: Cost 3 vext2 <2,u,6,4>, <2,u,6,4>
+ 3703408790U, // <6,4,3,0>: Cost 4 vext2 <2,0,6,4>, <3,0,1,2>
+ 3316305238U, // <6,4,3,1>: Cost 4 vrev <4,6,1,3>
+ 3703408947U, // <6,4,3,2>: Cost 4 vext2 <2,0,6,4>, <3,2,0,6>
+ 3703409052U, // <6,4,3,3>: Cost 4 vext2 <2,0,6,4>, <3,3,3,3>
+ 2644929026U, // <6,4,3,4>: Cost 3 vext2 <4,5,6,4>, <3,4,5,6>
+ 3718670922U, // <6,4,3,5>: Cost 4 vext2 <4,5,6,4>, <3,5,4,6>
+ 2705345682U, // <6,4,3,6>: Cost 3 vext3 <3,4,5,6>, <4,3,6,5>
+ 3926705152U, // <6,4,3,7>: Cost 4 vuzpr <5,6,7,4>, <1,3,5,7>
+ 2668817222U, // <6,4,3,u>: Cost 3 vext2 <u,5,6,4>, <3,u,5,6>
+ 2590277734U, // <6,4,4,0>: Cost 3 vext1 <6,6,4,4>, LHS
+ 3716017135U, // <6,4,4,1>: Cost 4 vext2 <4,1,6,4>, <4,1,6,4>
+ 2642938944U, // <6,4,4,2>: Cost 3 vext2 <4,2,6,4>, <4,2,6,4>
+ 3717344401U, // <6,4,4,3>: Cost 4 vext2 <4,3,6,4>, <4,3,6,4>
+ 2712571088U, // <6,4,4,4>: Cost 3 vext3 <4,6,4,6>, <4,4,4,4>
+ 2629668150U, // <6,4,4,5>: Cost 3 vext2 <2,0,6,4>, RHS
+ 1637649636U, // <6,4,4,6>: Cost 2 vext3 <4,4,6,6>, <4,4,6,6>
+ 2646257109U, // <6,4,4,7>: Cost 3 vext2 <4,7,6,4>, <4,7,6,4>
+ 1637649636U, // <6,4,4,u>: Cost 2 vext3 <4,4,6,6>, <4,4,6,6>
+ 2566398054U, // <6,4,5,0>: Cost 3 vext1 <2,6,4,5>, LHS
+ 3760876805U, // <6,4,5,1>: Cost 4 vext3 <0,4,1,6>, <4,5,1,3>
+ 2566399937U, // <6,4,5,2>: Cost 3 vext1 <2,6,4,5>, <2,6,4,5>
+ 2584316418U, // <6,4,5,3>: Cost 3 vext1 <5,6,4,5>, <3,4,5,6>
+ 2566401334U, // <6,4,5,4>: Cost 3 vext1 <2,6,4,5>, RHS
+ 2584318028U, // <6,4,5,5>: Cost 3 vext1 <5,6,4,5>, <5,6,4,5>
+ 1612287286U, // <6,4,5,6>: Cost 2 vext3 <0,2,4,6>, RHS
+ 2852965686U, // <6,4,5,7>: Cost 3 vuzpr <5,6,7,4>, RHS
+ 1612287304U, // <6,4,5,u>: Cost 2 vext3 <0,2,4,6>, RHS
+ 1504608358U, // <6,4,6,0>: Cost 2 vext1 <4,6,4,6>, LHS
+ 2578350838U, // <6,4,6,1>: Cost 3 vext1 <4,6,4,6>, <1,0,3,2>
+ 2578351720U, // <6,4,6,2>: Cost 3 vext1 <4,6,4,6>, <2,2,2,2>
+ 2578352278U, // <6,4,6,3>: Cost 3 vext1 <4,6,4,6>, <3,0,1,2>
+ 1504611638U, // <6,4,6,4>: Cost 2 vext1 <4,6,4,6>, RHS
+ 2578353872U, // <6,4,6,5>: Cost 3 vext1 <4,6,4,6>, <5,1,7,3>
+ 2578354682U, // <6,4,6,6>: Cost 3 vext1 <4,6,4,6>, <6,2,7,3>
+ 2578355194U, // <6,4,6,7>: Cost 3 vext1 <4,6,4,6>, <7,0,1,2>
+ 1504614190U, // <6,4,6,u>: Cost 2 vext1 <4,6,4,6>, LHS
+ 2572386406U, // <6,4,7,0>: Cost 3 vext1 <3,6,4,7>, LHS
+ 2572387226U, // <6,4,7,1>: Cost 3 vext1 <3,6,4,7>, <1,2,3,4>
+ 3640157902U, // <6,4,7,2>: Cost 4 vext1 <2,6,4,7>, <2,3,4,5>
+ 2572389020U, // <6,4,7,3>: Cost 3 vext1 <3,6,4,7>, <3,6,4,7>
+ 2572389686U, // <6,4,7,4>: Cost 3 vext1 <3,6,4,7>, RHS
+ 2980497102U, // <6,4,7,5>: Cost 3 vzipr RHS, <2,3,4,5>
+ 2980495564U, // <6,4,7,6>: Cost 3 vzipr RHS, <0,2,4,6>
+ 4054239090U, // <6,4,7,7>: Cost 4 vzipr RHS, <2,5,4,7>
+ 2572392238U, // <6,4,7,u>: Cost 3 vext1 <3,6,4,7>, LHS
+ 1504608358U, // <6,4,u,0>: Cost 2 vext1 <4,6,4,6>, LHS
+ 2629670702U, // <6,4,u,1>: Cost 3 vext2 <2,0,6,4>, LHS
+ 2566424516U, // <6,4,u,2>: Cost 3 vext1 <2,6,4,u>, <2,6,4,u>
+ 2584340994U, // <6,4,u,3>: Cost 3 vext1 <5,6,4,u>, <3,4,5,6>
+ 1640156694U, // <6,4,u,4>: Cost 2 vext3 <4,u,4,6>, <4,u,4,6>
+ 2629671066U, // <6,4,u,5>: Cost 3 vext2 <2,0,6,4>, RHS
+ 1612287529U, // <6,4,u,6>: Cost 2 vext3 <0,2,4,6>, RHS
+ 2852965929U, // <6,4,u,7>: Cost 3 vuzpr <5,6,7,4>, RHS
+ 1612287547U, // <6,4,u,u>: Cost 2 vext3 <0,2,4,6>, RHS
+ 3708723200U, // <6,5,0,0>: Cost 4 vext2 <2,u,6,5>, <0,0,0,0>
+ 2634981478U, // <6,5,0,1>: Cost 3 vext2 <2,u,6,5>, LHS
+ 3694125260U, // <6,5,0,2>: Cost 4 vext2 <0,4,6,5>, <0,2,4,6>
+ 3779087962U, // <6,5,0,3>: Cost 4 vext3 <3,4,5,6>, <5,0,3,2>
+ 3760877154U, // <6,5,0,4>: Cost 4 vext3 <0,4,1,6>, <5,0,4,1>
+ 4195110916U, // <6,5,0,5>: Cost 4 vtrnr <5,6,7,0>, <5,5,5,5>
+ 3696779775U, // <6,5,0,6>: Cost 4 vext2 <0,u,6,5>, <0,6,2,7>
+ 1175212130U, // <6,5,0,7>: Cost 2 vrev <5,6,7,0>
+ 1175285867U, // <6,5,0,u>: Cost 2 vrev <5,6,u,0>
+ 2248445988U, // <6,5,1,0>: Cost 3 vrev <5,6,0,1>
+ 3698107237U, // <6,5,1,1>: Cost 4 vext2 <1,1,6,5>, <1,1,6,5>
+ 3708724118U, // <6,5,1,2>: Cost 4 vext2 <2,u,6,5>, <1,2,3,0>
+ 3908575334U, // <6,5,1,3>: Cost 4 vuzpr <2,6,4,5>, LHS
+ 3716023376U, // <6,5,1,4>: Cost 4 vext2 <4,1,6,5>, <1,4,5,6>
+ 3708724368U, // <6,5,1,5>: Cost 4 vext2 <2,u,6,5>, <1,5,3,7>
+ 3767733960U, // <6,5,1,6>: Cost 4 vext3 <1,5,4,6>, <5,1,6,4>
+ 2712571600U, // <6,5,1,7>: Cost 3 vext3 <4,6,4,6>, <5,1,7,3>
+ 2712571609U, // <6,5,1,u>: Cost 3 vext3 <4,6,4,6>, <5,1,u,3>
+ 2578391142U, // <6,5,2,0>: Cost 3 vext1 <4,6,5,2>, LHS
+ 3704079934U, // <6,5,2,1>: Cost 4 vext2 <2,1,6,5>, <2,1,6,5>
+ 3708724840U, // <6,5,2,2>: Cost 4 vext2 <2,u,6,5>, <2,2,2,2>
+ 3705407182U, // <6,5,2,3>: Cost 4 vext2 <2,3,6,5>, <2,3,4,5>
+ 2578394422U, // <6,5,2,4>: Cost 3 vext1 <4,6,5,2>, RHS
+ 3717351272U, // <6,5,2,5>: Cost 4 vext2 <4,3,6,5>, <2,5,3,6>
+ 2634983354U, // <6,5,2,6>: Cost 3 vext2 <2,u,6,5>, <2,6,3,7>
+ 3115486518U, // <6,5,2,7>: Cost 3 vtrnr <4,6,u,2>, RHS
+ 2634983541U, // <6,5,2,u>: Cost 3 vext2 <2,u,6,5>, <2,u,6,5>
+ 3708725398U, // <6,5,3,0>: Cost 4 vext2 <2,u,6,5>, <3,0,1,2>
+ 3710052631U, // <6,5,3,1>: Cost 4 vext2 <3,1,6,5>, <3,1,6,5>
+ 3708725606U, // <6,5,3,2>: Cost 4 vext2 <2,u,6,5>, <3,2,6,3>
+ 3708725660U, // <6,5,3,3>: Cost 4 vext2 <2,u,6,5>, <3,3,3,3>
+ 2643610114U, // <6,5,3,4>: Cost 3 vext2 <4,3,6,5>, <3,4,5,6>
+ 3717352010U, // <6,5,3,5>: Cost 4 vext2 <4,3,6,5>, <3,5,4,6>
+ 3773632358U, // <6,5,3,6>: Cost 4 vext3 <2,5,3,6>, <5,3,6,0>
+ 2248978533U, // <6,5,3,7>: Cost 3 vrev <5,6,7,3>
+ 2249052270U, // <6,5,3,u>: Cost 3 vrev <5,6,u,3>
+ 2596323430U, // <6,5,4,0>: Cost 3 vext1 <7,6,5,4>, LHS
+ 3716025328U, // <6,5,4,1>: Cost 4 vext2 <4,1,6,5>, <4,1,6,5>
+ 3716688961U, // <6,5,4,2>: Cost 4 vext2 <4,2,6,5>, <4,2,6,5>
+ 2643610770U, // <6,5,4,3>: Cost 3 vext2 <4,3,6,5>, <4,3,6,5>
+ 2596326710U, // <6,5,4,4>: Cost 3 vext1 <7,6,5,4>, RHS
+ 2634984758U, // <6,5,4,5>: Cost 3 vext2 <2,u,6,5>, RHS
+ 3767734199U, // <6,5,4,6>: Cost 4 vext3 <1,5,4,6>, <5,4,6,0>
+ 1643696070U, // <6,5,4,7>: Cost 2 vext3 <5,4,7,6>, <5,4,7,6>
+ 1643769807U, // <6,5,4,u>: Cost 2 vext3 <5,4,u,6>, <5,4,u,6>
+ 2578415718U, // <6,5,5,0>: Cost 3 vext1 <4,6,5,5>, LHS
+ 3652158198U, // <6,5,5,1>: Cost 4 vext1 <4,6,5,5>, <1,0,3,2>
+ 3652159080U, // <6,5,5,2>: Cost 4 vext1 <4,6,5,5>, <2,2,2,2>
+ 3652159638U, // <6,5,5,3>: Cost 4 vext1 <4,6,5,5>, <3,0,1,2>
+ 2578418998U, // <6,5,5,4>: Cost 3 vext1 <4,6,5,5>, RHS
+ 2712571908U, // <6,5,5,5>: Cost 3 vext3 <4,6,4,6>, <5,5,5,5>
+ 2718027790U, // <6,5,5,6>: Cost 3 vext3 <5,5,6,6>, <5,5,6,6>
+ 2712571928U, // <6,5,5,7>: Cost 3 vext3 <4,6,4,6>, <5,5,7,7>
+ 2712571937U, // <6,5,5,u>: Cost 3 vext3 <4,6,4,6>, <5,5,u,7>
+ 2705346596U, // <6,5,6,0>: Cost 3 vext3 <3,4,5,6>, <5,6,0,1>
+ 3767144496U, // <6,5,6,1>: Cost 4 vext3 <1,4,5,6>, <5,6,1,4>
+ 3773116473U, // <6,5,6,2>: Cost 4 vext3 <2,4,5,6>, <5,6,2,4>
+ 2705346626U, // <6,5,6,3>: Cost 3 vext3 <3,4,5,6>, <5,6,3,4>
+ 2705346636U, // <6,5,6,4>: Cost 3 vext3 <3,4,5,6>, <5,6,4,5>
+ 3908577217U, // <6,5,6,5>: Cost 4 vuzpr <2,6,4,5>, <2,6,4,5>
+ 2578428728U, // <6,5,6,6>: Cost 3 vext1 <4,6,5,6>, <6,6,6,6>
+ 2712572002U, // <6,5,6,7>: Cost 3 vext3 <4,6,4,6>, <5,6,7,0>
+ 2705346668U, // <6,5,6,u>: Cost 3 vext3 <3,4,5,6>, <5,6,u,1>
+ 2560516198U, // <6,5,7,0>: Cost 3 vext1 <1,6,5,7>, LHS
+ 2560517363U, // <6,5,7,1>: Cost 3 vext1 <1,6,5,7>, <1,6,5,7>
+ 2566490060U, // <6,5,7,2>: Cost 3 vext1 <2,6,5,7>, <2,6,5,7>
+ 3634260118U, // <6,5,7,3>: Cost 4 vext1 <1,6,5,7>, <3,0,1,2>
+ 2560519478U, // <6,5,7,4>: Cost 3 vext1 <1,6,5,7>, RHS
+ 2980498650U, // <6,5,7,5>: Cost 3 vzipr RHS, <4,4,5,5>
+ 2980497922U, // <6,5,7,6>: Cost 3 vzipr RHS, <3,4,5,6>
+ 3103214902U, // <6,5,7,7>: Cost 3 vtrnr <2,6,3,7>, RHS
+ 2560522030U, // <6,5,7,u>: Cost 3 vext1 <1,6,5,7>, LHS
+ 2560524390U, // <6,5,u,0>: Cost 3 vext1 <1,6,5,u>, LHS
+ 2560525556U, // <6,5,u,1>: Cost 3 vext1 <1,6,5,u>, <1,6,5,u>
+ 2566498253U, // <6,5,u,2>: Cost 3 vext1 <2,6,5,u>, <2,6,5,u>
+ 2646931439U, // <6,5,u,3>: Cost 3 vext2 <4,u,6,5>, <u,3,5,7>
+ 2560527670U, // <6,5,u,4>: Cost 3 vext1 <1,6,5,u>, RHS
+ 2634987674U, // <6,5,u,5>: Cost 3 vext2 <2,u,6,5>, RHS
+ 2980506114U, // <6,5,u,6>: Cost 3 vzipr RHS, <3,4,5,6>
+ 1175277674U, // <6,5,u,7>: Cost 2 vrev <5,6,7,u>
+ 1175351411U, // <6,5,u,u>: Cost 2 vrev <5,6,u,u>
+ 2578448486U, // <6,6,0,0>: Cost 3 vext1 <4,6,6,0>, LHS
+ 1573191782U, // <6,6,0,1>: Cost 2 vext2 <4,u,6,6>, LHS
+ 2686030124U, // <6,6,0,2>: Cost 3 vext3 <0,2,4,6>, <6,0,2,4>
+ 3779088690U, // <6,6,0,3>: Cost 4 vext3 <3,4,5,6>, <6,0,3,1>
+ 2687209788U, // <6,6,0,4>: Cost 3 vext3 <0,4,2,6>, <6,0,4,2>
+ 3652194000U, // <6,6,0,5>: Cost 4 vext1 <4,6,6,0>, <5,1,7,3>
+ 2254852914U, // <6,6,0,6>: Cost 3 vrev <6,6,6,0>
+ 4041575734U, // <6,6,0,7>: Cost 4 vzipr <2,4,6,0>, RHS
+ 1573192349U, // <6,6,0,u>: Cost 2 vext2 <4,u,6,6>, LHS
+ 2646934262U, // <6,6,1,0>: Cost 3 vext2 <4,u,6,6>, <1,0,3,2>
+ 2646934324U, // <6,6,1,1>: Cost 3 vext2 <4,u,6,6>, <1,1,1,1>
+ 2646934422U, // <6,6,1,2>: Cost 3 vext2 <4,u,6,6>, <1,2,3,0>
+ 2846785638U, // <6,6,1,3>: Cost 3 vuzpr <4,6,4,6>, LHS
+ 3760951694U, // <6,6,1,4>: Cost 4 vext3 <0,4,2,6>, <6,1,4,3>
+ 2646934672U, // <6,6,1,5>: Cost 3 vext2 <4,u,6,6>, <1,5,3,7>
+ 2712572320U, // <6,6,1,6>: Cost 3 vext3 <4,6,4,6>, <6,1,6,3>
+ 3775549865U, // <6,6,1,7>: Cost 4 vext3 <2,u,2,6>, <6,1,7,3>
+ 2846785643U, // <6,6,1,u>: Cost 3 vuzpr <4,6,4,6>, LHS
+ 3759772094U, // <6,6,2,0>: Cost 4 vext3 <0,2,4,6>, <6,2,0,6>
+ 3704751676U, // <6,6,2,1>: Cost 4 vext2 <2,2,6,6>, <2,1,6,3>
+ 2631009936U, // <6,6,2,2>: Cost 3 vext2 <2,2,6,6>, <2,2,6,6>
+ 2646935206U, // <6,6,2,3>: Cost 3 vext2 <4,u,6,6>, <2,3,0,1>
+ 3759772127U, // <6,6,2,4>: Cost 4 vext3 <0,2,4,6>, <6,2,4,3>
+ 3704752004U, // <6,6,2,5>: Cost 4 vext2 <2,2,6,6>, <2,5,6,7>
+ 2646935482U, // <6,6,2,6>: Cost 3 vext2 <4,u,6,6>, <2,6,3,7>
+ 2712572410U, // <6,6,2,7>: Cost 3 vext3 <4,6,4,6>, <6,2,7,3>
+ 2712572419U, // <6,6,2,u>: Cost 3 vext3 <4,6,4,6>, <6,2,u,3>
+ 2646935702U, // <6,6,3,0>: Cost 3 vext2 <4,u,6,6>, <3,0,1,2>
+ 3777024534U, // <6,6,3,1>: Cost 4 vext3 <3,1,4,6>, <6,3,1,4>
+ 3704752453U, // <6,6,3,2>: Cost 4 vext2 <2,2,6,6>, <3,2,2,6>
+ 2646935964U, // <6,6,3,3>: Cost 3 vext2 <4,u,6,6>, <3,3,3,3>
+ 2705347122U, // <6,6,3,4>: Cost 3 vext3 <3,4,5,6>, <6,3,4,5>
+ 3779678778U, // <6,6,3,5>: Cost 4 vext3 <3,5,4,6>, <6,3,5,4>
+ 2657553069U, // <6,6,3,6>: Cost 3 vext2 <6,6,6,6>, <3,6,6,6>
+ 4039609654U, // <6,6,3,7>: Cost 4 vzipr <2,1,6,3>, RHS
+ 2708001366U, // <6,6,3,u>: Cost 3 vext3 <3,u,5,6>, <6,3,u,5>
+ 2578481254U, // <6,6,4,0>: Cost 3 vext1 <4,6,6,4>, LHS
+ 3652223734U, // <6,6,4,1>: Cost 4 vext1 <4,6,6,4>, <1,0,3,2>
+ 3760951922U, // <6,6,4,2>: Cost 4 vext3 <0,4,2,6>, <6,4,2,6>
+ 3779089019U, // <6,6,4,3>: Cost 4 vext3 <3,4,5,6>, <6,4,3,6>
+ 1570540772U, // <6,6,4,4>: Cost 2 vext2 <4,4,6,6>, <4,4,6,6>
+ 1573195062U, // <6,6,4,5>: Cost 2 vext2 <4,u,6,6>, RHS
+ 2712572560U, // <6,6,4,6>: Cost 3 vext3 <4,6,4,6>, <6,4,6,0>
+ 2723410591U, // <6,6,4,7>: Cost 3 vext3 <6,4,7,6>, <6,4,7,6>
+ 1573195304U, // <6,6,4,u>: Cost 2 vext2 <4,u,6,6>, <4,u,6,6>
+ 3640287334U, // <6,6,5,0>: Cost 4 vext1 <2,6,6,5>, LHS
+ 2646937296U, // <6,6,5,1>: Cost 3 vext2 <4,u,6,6>, <5,1,7,3>
+ 3640289235U, // <6,6,5,2>: Cost 4 vext1 <2,6,6,5>, <2,6,6,5>
+ 3720679279U, // <6,6,5,3>: Cost 4 vext2 <4,u,6,6>, <5,3,7,0>
+ 2646937542U, // <6,6,5,4>: Cost 3 vext2 <4,u,6,6>, <5,4,7,6>
+ 2646937604U, // <6,6,5,5>: Cost 3 vext2 <4,u,6,6>, <5,5,5,5>
+ 2646937698U, // <6,6,5,6>: Cost 3 vext2 <4,u,6,6>, <5,6,7,0>
+ 2846788918U, // <6,6,5,7>: Cost 3 vuzpr <4,6,4,6>, RHS
+ 2846788919U, // <6,6,5,u>: Cost 3 vuzpr <4,6,4,6>, RHS
+ 1516699750U, // <6,6,6,0>: Cost 2 vext1 <6,6,6,6>, LHS
+ 2590442230U, // <6,6,6,1>: Cost 3 vext1 <6,6,6,6>, <1,0,3,2>
+ 2646938106U, // <6,6,6,2>: Cost 3 vext2 <4,u,6,6>, <6,2,7,3>
+ 2590443670U, // <6,6,6,3>: Cost 3 vext1 <6,6,6,6>, <3,0,1,2>
+ 1516703030U, // <6,6,6,4>: Cost 2 vext1 <6,6,6,6>, RHS
+ 2590445264U, // <6,6,6,5>: Cost 3 vext1 <6,6,6,6>, <5,1,7,3>
+ 296144182U, // <6,6,6,6>: Cost 1 vdup2 RHS
+ 2712572738U, // <6,6,6,7>: Cost 3 vext3 <4,6,4,6>, <6,6,7,7>
+ 296144182U, // <6,6,6,u>: Cost 1 vdup2 RHS
+ 2566561894U, // <6,6,7,0>: Cost 3 vext1 <2,6,6,7>, LHS
+ 3634332924U, // <6,6,7,1>: Cost 4 vext1 <1,6,6,7>, <1,6,6,7>
+ 2566563797U, // <6,6,7,2>: Cost 3 vext1 <2,6,6,7>, <2,6,6,7>
+ 2584480258U, // <6,6,7,3>: Cost 3 vext1 <5,6,6,7>, <3,4,5,6>
+ 2566565174U, // <6,6,7,4>: Cost 3 vext1 <2,6,6,7>, RHS
+ 2717438846U, // <6,6,7,5>: Cost 3 vext3 <5,4,7,6>, <6,7,5,4>
+ 2980500280U, // <6,6,7,6>: Cost 3 vzipr RHS, <6,6,6,6>
+ 1906756918U, // <6,6,7,7>: Cost 2 vzipr RHS, RHS
+ 1906756919U, // <6,6,7,u>: Cost 2 vzipr RHS, RHS
+ 1516699750U, // <6,6,u,0>: Cost 2 vext1 <6,6,6,6>, LHS
+ 1573197614U, // <6,6,u,1>: Cost 2 vext2 <4,u,6,6>, LHS
+ 2566571990U, // <6,6,u,2>: Cost 3 vext1 <2,6,6,u>, <2,6,6,u>
+ 2846786205U, // <6,6,u,3>: Cost 3 vuzpr <4,6,4,6>, LHS
+ 1516703030U, // <6,6,u,4>: Cost 2 vext1 <6,6,6,6>, RHS
+ 1573197978U, // <6,6,u,5>: Cost 2 vext2 <4,u,6,6>, RHS
+ 296144182U, // <6,6,u,6>: Cost 1 vdup2 RHS
+ 1906765110U, // <6,6,u,7>: Cost 2 vzipr RHS, RHS
+ 296144182U, // <6,6,u,u>: Cost 1 vdup2 RHS
+ 1571209216U, // <6,7,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
+ 497467494U, // <6,7,0,1>: Cost 1 vext2 RHS, LHS
+ 1571209380U, // <6,7,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
+ 2644951292U, // <6,7,0,3>: Cost 3 vext2 RHS, <0,3,1,0>
+ 1571209554U, // <6,7,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
+ 1510756450U, // <6,7,0,5>: Cost 2 vext1 <5,6,7,0>, <5,6,7,0>
+ 2644951542U, // <6,7,0,6>: Cost 3 vext2 RHS, <0,6,1,7>
+ 2584499194U, // <6,7,0,7>: Cost 3 vext1 <5,6,7,0>, <7,0,1,2>
+ 497468061U, // <6,7,0,u>: Cost 1 vext2 RHS, LHS
+ 1571209974U, // <6,7,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
+ 1571210036U, // <6,7,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
+ 1571210134U, // <6,7,1,2>: Cost 2 vext2 RHS, <1,2,3,0>
+ 1571210200U, // <6,7,1,3>: Cost 2 vext2 RHS, <1,3,1,3>
+ 2644952098U, // <6,7,1,4>: Cost 3 vext2 RHS, <1,4,0,5>
+ 1571210384U, // <6,7,1,5>: Cost 2 vext2 RHS, <1,5,3,7>
+ 2644952271U, // <6,7,1,6>: Cost 3 vext2 RHS, <1,6,1,7>
+ 2578535418U, // <6,7,1,7>: Cost 3 vext1 <4,6,7,1>, <7,0,1,2>
+ 1571210605U, // <6,7,1,u>: Cost 2 vext2 RHS, <1,u,1,3>
+ 2644952509U, // <6,7,2,0>: Cost 3 vext2 RHS, <2,0,1,2>
+ 2644952582U, // <6,7,2,1>: Cost 3 vext2 RHS, <2,1,0,3>
+ 1571210856U, // <6,7,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
+ 1571210918U, // <6,7,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
+ 2644952828U, // <6,7,2,4>: Cost 3 vext2 RHS, <2,4,0,6>
+ 2633009028U, // <6,7,2,5>: Cost 3 vext2 <2,5,6,7>, <2,5,6,7>
+ 1571211194U, // <6,7,2,6>: Cost 2 vext2 RHS, <2,6,3,7>
+ 2668840938U, // <6,7,2,7>: Cost 3 vext2 RHS, <2,7,0,1>
+ 1571211323U, // <6,7,2,u>: Cost 2 vext2 RHS, <2,u,0,1>
+ 1571211414U, // <6,7,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
+ 2644953311U, // <6,7,3,1>: Cost 3 vext2 RHS, <3,1,0,3>
+ 2644953390U, // <6,7,3,2>: Cost 3 vext2 RHS, <3,2,0,1>
+ 1571211676U, // <6,7,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
+ 1571211778U, // <6,7,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
+ 2644953648U, // <6,7,3,5>: Cost 3 vext2 RHS, <3,5,1,7>
+ 2644953720U, // <6,7,3,6>: Cost 3 vext2 RHS, <3,6,0,7>
+ 2644953795U, // <6,7,3,7>: Cost 3 vext2 RHS, <3,7,0,1>
+ 1571212062U, // <6,7,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
+ 1573202834U, // <6,7,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
+ 2644954058U, // <6,7,4,1>: Cost 3 vext2 RHS, <4,1,2,3>
+ 2644954166U, // <6,7,4,2>: Cost 3 vext2 RHS, <4,2,5,3>
+ 2644954258U, // <6,7,4,3>: Cost 3 vext2 RHS, <4,3,6,5>
+ 1571212496U, // <6,7,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
+ 497470774U, // <6,7,4,5>: Cost 1 vext2 RHS, RHS
+ 1573203316U, // <6,7,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
+ 2646281688U, // <6,7,4,7>: Cost 3 vext2 <4,7,6,7>, <4,7,6,7>
+ 497471017U, // <6,7,4,u>: Cost 1 vext2 RHS, RHS
+ 2644954696U, // <6,7,5,0>: Cost 3 vext2 RHS, <5,0,1,2>
+ 1573203664U, // <6,7,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
+ 2644954878U, // <6,7,5,2>: Cost 3 vext2 RHS, <5,2,3,4>
+ 2644954991U, // <6,7,5,3>: Cost 3 vext2 RHS, <5,3,7,0>
+ 1571213254U, // <6,7,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
+ 1571213316U, // <6,7,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
+ 1571213410U, // <6,7,5,6>: Cost 2 vext2 RHS, <5,6,7,0>
+ 1573204136U, // <6,7,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
+ 1573204217U, // <6,7,5,u>: Cost 2 vext2 RHS, <5,u,5,7>
+ 2644955425U, // <6,7,6,0>: Cost 3 vext2 RHS, <6,0,1,2>
+ 2644955561U, // <6,7,6,1>: Cost 3 vext2 RHS, <6,1,7,3>
+ 1573204474U, // <6,7,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
+ 2644955698U, // <6,7,6,3>: Cost 3 vext2 RHS, <6,3,4,5>
+ 2644955789U, // <6,7,6,4>: Cost 3 vext2 RHS, <6,4,5,6>
+ 2644955889U, // <6,7,6,5>: Cost 3 vext2 RHS, <6,5,7,7>
+ 1571214136U, // <6,7,6,6>: Cost 2 vext2 RHS, <6,6,6,6>
+ 1571214158U, // <6,7,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
+ 1573204895U, // <6,7,6,u>: Cost 2 vext2 RHS, <6,u,0,1>
+ 1573204986U, // <6,7,7,0>: Cost 2 vext2 RHS, <7,0,1,2>
+ 2572608656U, // <6,7,7,1>: Cost 3 vext1 <3,6,7,7>, <1,5,3,7>
+ 2644956362U, // <6,7,7,2>: Cost 3 vext2 RHS, <7,2,6,3>
+ 2572610231U, // <6,7,7,3>: Cost 3 vext1 <3,6,7,7>, <3,6,7,7>
+ 1573205350U, // <6,7,7,4>: Cost 2 vext2 RHS, <7,4,5,6>
+ 2646947220U, // <6,7,7,5>: Cost 3 vext2 RHS, <7,5,1,7>
+ 1516786498U, // <6,7,7,6>: Cost 2 vext1 <6,6,7,7>, <6,6,7,7>
+ 1571214956U, // <6,7,7,7>: Cost 2 vext2 RHS, <7,7,7,7>
+ 1573205634U, // <6,7,7,u>: Cost 2 vext2 RHS, <7,u,1,2>
+ 1571215059U, // <6,7,u,0>: Cost 2 vext2 RHS, <u,0,1,2>
+ 497473326U, // <6,7,u,1>: Cost 1 vext2 RHS, LHS
+ 1571215237U, // <6,7,u,2>: Cost 2 vext2 RHS, <u,2,3,0>
+ 1571215292U, // <6,7,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
+ 1571215423U, // <6,7,u,4>: Cost 2 vext2 RHS, <u,4,5,6>
+ 497473690U, // <6,7,u,5>: Cost 1 vext2 RHS, RHS
+ 1571215568U, // <6,7,u,6>: Cost 2 vext2 RHS, <u,6,3,7>
+ 1573206272U, // <6,7,u,7>: Cost 2 vext2 RHS, <u,7,0,1>
+ 497473893U, // <6,7,u,u>: Cost 1 vext2 RHS, LHS
+ 1571217408U, // <6,u,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
+ 497475686U, // <6,u,0,1>: Cost 1 vext2 RHS, LHS
+ 1571217572U, // <6,u,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
+ 2689865445U, // <6,u,0,3>: Cost 3 vext3 <0,u,2,6>, <u,0,3,2>
+ 1571217746U, // <6,u,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
+ 1510830187U, // <6,u,0,5>: Cost 2 vext1 <5,6,u,0>, <5,6,u,0>
+ 2644959734U, // <6,u,0,6>: Cost 3 vext2 RHS, <0,6,1,7>
+ 1193130221U, // <6,u,0,7>: Cost 2 vrev <u,6,7,0>
+ 497476253U, // <6,u,0,u>: Cost 1 vext2 RHS, LHS
+ 1571218166U, // <6,u,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
+ 1571218228U, // <6,u,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
+ 1612289838U, // <6,u,1,2>: Cost 2 vext3 <0,2,4,6>, LHS
+ 1571218392U, // <6,u,1,3>: Cost 2 vext2 RHS, <1,3,1,3>
+ 2566663478U, // <6,u,1,4>: Cost 3 vext1 <2,6,u,1>, RHS
+ 1571218576U, // <6,u,1,5>: Cost 2 vext2 RHS, <1,5,3,7>
+ 2644960463U, // <6,u,1,6>: Cost 3 vext2 RHS, <1,6,1,7>
+ 2717439835U, // <6,u,1,7>: Cost 3 vext3 <5,4,7,6>, <u,1,7,3>
+ 1612289892U, // <6,u,1,u>: Cost 2 vext3 <0,2,4,6>, LHS
+ 1504870502U, // <6,u,2,0>: Cost 2 vext1 <4,6,u,2>, LHS
+ 2644960774U, // <6,u,2,1>: Cost 3 vext2 RHS, <2,1,0,3>
+ 1571219048U, // <6,u,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
+ 1571219110U, // <6,u,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
+ 1504873782U, // <6,u,2,4>: Cost 2 vext1 <4,6,u,2>, RHS
+ 2633017221U, // <6,u,2,5>: Cost 3 vext2 <2,5,6,u>, <2,5,6,u>
+ 1571219386U, // <6,u,2,6>: Cost 2 vext2 RHS, <2,6,3,7>
+ 2712573868U, // <6,u,2,7>: Cost 3 vext3 <4,6,4,6>, <u,2,7,3>
+ 1571219515U, // <6,u,2,u>: Cost 2 vext2 RHS, <2,u,0,1>
+ 1571219606U, // <6,u,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
+ 2644961503U, // <6,u,3,1>: Cost 3 vext2 RHS, <3,1,0,3>
+ 2566678499U, // <6,u,3,2>: Cost 3 vext1 <2,6,u,3>, <2,6,u,3>
+ 1571219868U, // <6,u,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
+ 1571219970U, // <6,u,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
+ 2689865711U, // <6,u,3,5>: Cost 3 vext3 <0,u,2,6>, <u,3,5,7>
+ 2708002806U, // <6,u,3,6>: Cost 3 vext3 <3,u,5,6>, <u,3,6,5>
+ 2644961987U, // <6,u,3,7>: Cost 3 vext2 RHS, <3,7,0,1>
+ 1571220254U, // <6,u,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
+ 1571220370U, // <6,u,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
+ 2644962250U, // <6,u,4,1>: Cost 3 vext2 RHS, <4,1,2,3>
+ 1661245476U, // <6,u,4,2>: Cost 2 vext3 <u,4,2,6>, <u,4,2,6>
+ 2686031917U, // <6,u,4,3>: Cost 3 vext3 <0,2,4,6>, <u,4,3,6>
+ 1571220688U, // <6,u,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
+ 497478967U, // <6,u,4,5>: Cost 1 vext2 RHS, RHS
+ 1571220852U, // <6,u,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
+ 1661614161U, // <6,u,4,7>: Cost 2 vext3 <u,4,7,6>, <u,4,7,6>
+ 497479209U, // <6,u,4,u>: Cost 1 vext2 RHS, RHS
+ 2566692966U, // <6,u,5,0>: Cost 3 vext1 <2,6,u,5>, LHS
+ 1571221200U, // <6,u,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
+ 2566694885U, // <6,u,5,2>: Cost 3 vext1 <2,6,u,5>, <2,6,u,5>
+ 2689865855U, // <6,u,5,3>: Cost 3 vext3 <0,u,2,6>, <u,5,3,7>
+ 1571221446U, // <6,u,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
+ 1571221508U, // <6,u,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
+ 1612290202U, // <6,u,5,6>: Cost 2 vext3 <0,2,4,6>, RHS
+ 1571221672U, // <6,u,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
+ 1612290220U, // <6,u,5,u>: Cost 2 vext3 <0,2,4,6>, RHS
+ 1504903270U, // <6,u,6,0>: Cost 2 vext1 <4,6,u,6>, LHS
+ 2644963752U, // <6,u,6,1>: Cost 3 vext2 RHS, <6,1,7,2>
+ 1571222010U, // <6,u,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
+ 2686032080U, // <6,u,6,3>: Cost 3 vext3 <0,2,4,6>, <u,6,3,7>
+ 1504906550U, // <6,u,6,4>: Cost 2 vext1 <4,6,u,6>, RHS
+ 2644964079U, // <6,u,6,5>: Cost 3 vext2 RHS, <6,5,7,5>
+ 296144182U, // <6,u,6,6>: Cost 1 vdup2 RHS
+ 1571222350U, // <6,u,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
+ 296144182U, // <6,u,6,u>: Cost 1 vdup2 RHS
+ 1492967526U, // <6,u,7,0>: Cost 2 vext1 <2,6,u,7>, LHS
+ 2560738574U, // <6,u,7,1>: Cost 3 vext1 <1,6,u,7>, <1,6,u,7>
+ 1492969447U, // <6,u,7,2>: Cost 2 vext1 <2,6,u,7>, <2,6,u,7>
+ 1906753692U, // <6,u,7,3>: Cost 2 vzipr RHS, LHS
+ 1492970806U, // <6,u,7,4>: Cost 2 vext1 <2,6,u,7>, RHS
+ 2980495761U, // <6,u,7,5>: Cost 3 vzipr RHS, <0,4,u,5>
+ 1516860235U, // <6,u,7,6>: Cost 2 vext1 <6,6,u,7>, <6,6,u,7>
+ 1906756936U, // <6,u,7,7>: Cost 2 vzipr RHS, RHS
+ 1492973358U, // <6,u,7,u>: Cost 2 vext1 <2,6,u,7>, LHS
+ 1492975718U, // <6,u,u,0>: Cost 2 vext1 <2,6,u,u>, LHS
+ 497481518U, // <6,u,u,1>: Cost 1 vext2 RHS, LHS
+ 1612290405U, // <6,u,u,2>: Cost 2 vext3 <0,2,4,6>, LHS
+ 1571223484U, // <6,u,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
+ 1492978998U, // <6,u,u,4>: Cost 2 vext1 <2,6,u,u>, RHS
+ 497481882U, // <6,u,u,5>: Cost 1 vext2 RHS, RHS
+ 296144182U, // <6,u,u,6>: Cost 1 vdup2 RHS
+ 1906765128U, // <6,u,u,7>: Cost 2 vzipr RHS, RHS
+ 497482085U, // <6,u,u,u>: Cost 1 vext2 RHS, LHS
+ 1638318080U, // <7,0,0,0>: Cost 2 vext3 RHS, <0,0,0,0>
+ 1638318090U, // <7,0,0,1>: Cost 2 vext3 RHS, <0,0,1,1>
+ 1638318100U, // <7,0,0,2>: Cost 2 vext3 RHS, <0,0,2,2>
+ 3646442178U, // <7,0,0,3>: Cost 4 vext1 <3,7,0,0>, <3,7,0,0>
+ 2712059941U, // <7,0,0,4>: Cost 3 vext3 RHS, <0,0,4,1>
+ 2651603364U, // <7,0,0,5>: Cost 3 vext2 <5,6,7,0>, <0,5,1,6>
+ 2590618445U, // <7,0,0,6>: Cost 3 vext1 <6,7,0,0>, <6,7,0,0>
+ 3785801798U, // <7,0,0,7>: Cost 4 vext3 RHS, <0,0,7,7>
+ 1638318153U, // <7,0,0,u>: Cost 2 vext3 RHS, <0,0,u,1>
+ 1516879974U, // <7,0,1,0>: Cost 2 vext1 <6,7,0,1>, LHS
+ 2693922911U, // <7,0,1,1>: Cost 3 vext3 <1,5,3,7>, <0,1,1,5>
+ 564576358U, // <7,0,1,2>: Cost 1 vext3 RHS, LHS
+ 2638996480U, // <7,0,1,3>: Cost 3 vext2 <3,5,7,0>, <1,3,5,7>
+ 1516883254U, // <7,0,1,4>: Cost 2 vext1 <6,7,0,1>, RHS
+ 2649613456U, // <7,0,1,5>: Cost 3 vext2 <5,3,7,0>, <1,5,3,7>
+ 1516884814U, // <7,0,1,6>: Cost 2 vext1 <6,7,0,1>, <6,7,0,1>
+ 2590626808U, // <7,0,1,7>: Cost 3 vext1 <6,7,0,1>, <7,0,1,0>
+ 564576412U, // <7,0,1,u>: Cost 1 vext3 RHS, LHS
+ 1638318244U, // <7,0,2,0>: Cost 2 vext3 RHS, <0,2,0,2>
+ 2692743344U, // <7,0,2,1>: Cost 3 vext3 <1,3,5,7>, <0,2,1,5>
+ 2712060084U, // <7,0,2,2>: Cost 3 vext3 RHS, <0,2,2,0>
+ 2712060094U, // <7,0,2,3>: Cost 3 vext3 RHS, <0,2,3,1>
+ 1638318284U, // <7,0,2,4>: Cost 2 vext3 RHS, <0,2,4,6>
+ 2712060118U, // <7,0,2,5>: Cost 3 vext3 RHS, <0,2,5,7>
+ 2651604922U, // <7,0,2,6>: Cost 3 vext2 <5,6,7,0>, <2,6,3,7>
+ 2686255336U, // <7,0,2,7>: Cost 3 vext3 <0,2,7,7>, <0,2,7,7>
+ 1638318316U, // <7,0,2,u>: Cost 2 vext3 RHS, <0,2,u,2>
+ 2651605142U, // <7,0,3,0>: Cost 3 vext2 <5,6,7,0>, <3,0,1,2>
+ 2712060156U, // <7,0,3,1>: Cost 3 vext3 RHS, <0,3,1,0>
+ 2712060165U, // <7,0,3,2>: Cost 3 vext3 RHS, <0,3,2,0>
+ 2651605404U, // <7,0,3,3>: Cost 3 vext2 <5,6,7,0>, <3,3,3,3>
+ 2651605506U, // <7,0,3,4>: Cost 3 vext2 <5,6,7,0>, <3,4,5,6>
+ 2638998111U, // <7,0,3,5>: Cost 3 vext2 <3,5,7,0>, <3,5,7,0>
+ 2639661744U, // <7,0,3,6>: Cost 3 vext2 <3,6,7,0>, <3,6,7,0>
+ 3712740068U, // <7,0,3,7>: Cost 4 vext2 <3,5,7,0>, <3,7,3,7>
+ 2640989010U, // <7,0,3,u>: Cost 3 vext2 <3,u,7,0>, <3,u,7,0>
+ 2712060232U, // <7,0,4,0>: Cost 3 vext3 RHS, <0,4,0,4>
+ 1638318418U, // <7,0,4,1>: Cost 2 vext3 RHS, <0,4,1,5>
+ 1638318428U, // <7,0,4,2>: Cost 2 vext3 RHS, <0,4,2,6>
+ 3646474950U, // <7,0,4,3>: Cost 4 vext1 <3,7,0,4>, <3,7,0,4>
+ 2712060270U, // <7,0,4,4>: Cost 3 vext3 RHS, <0,4,4,6>
+ 1577864502U, // <7,0,4,5>: Cost 2 vext2 <5,6,7,0>, RHS
+ 2651606388U, // <7,0,4,6>: Cost 3 vext2 <5,6,7,0>, <4,6,4,6>
+ 3787792776U, // <7,0,4,7>: Cost 4 vext3 RHS, <0,4,7,5>
+ 1638318481U, // <7,0,4,u>: Cost 2 vext3 RHS, <0,4,u,5>
+ 2590654566U, // <7,0,5,0>: Cost 3 vext1 <6,7,0,5>, LHS
+ 2651606736U, // <7,0,5,1>: Cost 3 vext2 <5,6,7,0>, <5,1,7,3>
+ 2712060334U, // <7,0,5,2>: Cost 3 vext3 RHS, <0,5,2,7>
+ 2649616239U, // <7,0,5,3>: Cost 3 vext2 <5,3,7,0>, <5,3,7,0>
+ 2651606982U, // <7,0,5,4>: Cost 3 vext2 <5,6,7,0>, <5,4,7,6>
+ 2651607044U, // <7,0,5,5>: Cost 3 vext2 <5,6,7,0>, <5,5,5,5>
+ 1577865314U, // <7,0,5,6>: Cost 2 vext2 <5,6,7,0>, <5,6,7,0>
+ 2651607208U, // <7,0,5,7>: Cost 3 vext2 <5,6,7,0>, <5,7,5,7>
+ 1579192580U, // <7,0,5,u>: Cost 2 vext2 <5,u,7,0>, <5,u,7,0>
+ 2688393709U, // <7,0,6,0>: Cost 3 vext3 <0,6,0,7>, <0,6,0,7>
+ 2712060406U, // <7,0,6,1>: Cost 3 vext3 RHS, <0,6,1,7>
+ 2688541183U, // <7,0,6,2>: Cost 3 vext3 <0,6,2,7>, <0,6,2,7>
+ 2655588936U, // <7,0,6,3>: Cost 3 vext2 <6,3,7,0>, <6,3,7,0>
+ 3762430481U, // <7,0,6,4>: Cost 4 vext3 <0,6,4,7>, <0,6,4,7>
+ 2651607730U, // <7,0,6,5>: Cost 3 vext2 <5,6,7,0>, <6,5,0,7>
+ 2651607864U, // <7,0,6,6>: Cost 3 vext2 <5,6,7,0>, <6,6,6,6>
+ 2651607886U, // <7,0,6,7>: Cost 3 vext2 <5,6,7,0>, <6,7,0,1>
+ 2688983605U, // <7,0,6,u>: Cost 3 vext3 <0,6,u,7>, <0,6,u,7>
+ 2651608058U, // <7,0,7,0>: Cost 3 vext2 <5,6,7,0>, <7,0,1,2>
+ 2932703334U, // <7,0,7,1>: Cost 3 vzipl <7,7,7,7>, LHS
+ 3066921062U, // <7,0,7,2>: Cost 3 vtrnl <7,7,7,7>, LHS
+ 3712742678U, // <7,0,7,3>: Cost 4 vext2 <3,5,7,0>, <7,3,5,7>
+ 2651608422U, // <7,0,7,4>: Cost 3 vext2 <5,6,7,0>, <7,4,5,6>
+ 2651608513U, // <7,0,7,5>: Cost 3 vext2 <5,6,7,0>, <7,5,6,7>
+ 2663552532U, // <7,0,7,6>: Cost 3 vext2 <7,6,7,0>, <7,6,7,0>
+ 2651608684U, // <7,0,7,7>: Cost 3 vext2 <5,6,7,0>, <7,7,7,7>
+ 2651608706U, // <7,0,7,u>: Cost 3 vext2 <5,6,7,0>, <7,u,1,2>
+ 1638318730U, // <7,0,u,0>: Cost 2 vext3 RHS, <0,u,0,2>
+ 1638318738U, // <7,0,u,1>: Cost 2 vext3 RHS, <0,u,1,1>
+ 564576925U, // <7,0,u,2>: Cost 1 vext3 RHS, LHS
+ 2572765898U, // <7,0,u,3>: Cost 3 vext1 <3,7,0,u>, <3,7,0,u>
+ 1638318770U, // <7,0,u,4>: Cost 2 vext3 RHS, <0,u,4,6>
+ 1577867418U, // <7,0,u,5>: Cost 2 vext2 <5,6,7,0>, RHS
+ 1516942165U, // <7,0,u,6>: Cost 2 vext1 <6,7,0,u>, <6,7,0,u>
+ 2651609344U, // <7,0,u,7>: Cost 3 vext2 <5,6,7,0>, <u,7,0,1>
+ 564576979U, // <7,0,u,u>: Cost 1 vext3 RHS, LHS
+ 2590687334U, // <7,1,0,0>: Cost 3 vext1 <6,7,1,0>, LHS
+ 2639003750U, // <7,1,0,1>: Cost 3 vext2 <3,5,7,1>, LHS
+ 2793357414U, // <7,1,0,2>: Cost 3 vuzpl <7,0,1,2>, LHS
+ 1638318838U, // <7,1,0,3>: Cost 2 vext3 RHS, <1,0,3,2>
+ 2590690614U, // <7,1,0,4>: Cost 3 vext1 <6,7,1,0>, RHS
+ 2712060679U, // <7,1,0,5>: Cost 3 vext3 RHS, <1,0,5,1>
+ 2590692182U, // <7,1,0,6>: Cost 3 vext1 <6,7,1,0>, <6,7,1,0>
+ 3785802521U, // <7,1,0,7>: Cost 4 vext3 RHS, <1,0,7,1>
+ 1638318883U, // <7,1,0,u>: Cost 2 vext3 RHS, <1,0,u,2>
+ 2712060715U, // <7,1,1,0>: Cost 3 vext3 RHS, <1,1,0,1>
+ 1638318900U, // <7,1,1,1>: Cost 2 vext3 RHS, <1,1,1,1>
+ 3774300994U, // <7,1,1,2>: Cost 4 vext3 <2,6,3,7>, <1,1,2,6>
+ 1638318920U, // <7,1,1,3>: Cost 2 vext3 RHS, <1,1,3,3>
+ 2712060755U, // <7,1,1,4>: Cost 3 vext3 RHS, <1,1,4,5>
+ 2691416926U, // <7,1,1,5>: Cost 3 vext3 <1,1,5,7>, <1,1,5,7>
+ 2590700375U, // <7,1,1,6>: Cost 3 vext1 <6,7,1,1>, <6,7,1,1>
+ 3765158766U, // <7,1,1,7>: Cost 4 vext3 <1,1,5,7>, <1,1,7,5>
+ 1638318965U, // <7,1,1,u>: Cost 2 vext3 RHS, <1,1,u,3>
+ 2712060796U, // <7,1,2,0>: Cost 3 vext3 RHS, <1,2,0,1>
+ 2712060807U, // <7,1,2,1>: Cost 3 vext3 RHS, <1,2,1,3>
+ 3712747112U, // <7,1,2,2>: Cost 4 vext2 <3,5,7,1>, <2,2,2,2>
+ 1638318998U, // <7,1,2,3>: Cost 2 vext3 RHS, <1,2,3,0>
+ 2712060836U, // <7,1,2,4>: Cost 3 vext3 RHS, <1,2,4,5>
+ 2712060843U, // <7,1,2,5>: Cost 3 vext3 RHS, <1,2,5,3>
+ 2590708568U, // <7,1,2,6>: Cost 3 vext1 <6,7,1,2>, <6,7,1,2>
+ 2735948730U, // <7,1,2,7>: Cost 3 vext3 RHS, <1,2,7,0>
+ 1638319043U, // <7,1,2,u>: Cost 2 vext3 RHS, <1,2,u,0>
+ 2712060876U, // <7,1,3,0>: Cost 3 vext3 RHS, <1,3,0,0>
+ 1638319064U, // <7,1,3,1>: Cost 2 vext3 RHS, <1,3,1,3>
+ 2712060894U, // <7,1,3,2>: Cost 3 vext3 RHS, <1,3,2,0>
+ 2692596718U, // <7,1,3,3>: Cost 3 vext3 <1,3,3,7>, <1,3,3,7>
+ 2712060917U, // <7,1,3,4>: Cost 3 vext3 RHS, <1,3,4,5>
+ 1619002368U, // <7,1,3,5>: Cost 2 vext3 <1,3,5,7>, <1,3,5,7>
+ 2692817929U, // <7,1,3,6>: Cost 3 vext3 <1,3,6,7>, <1,3,6,7>
+ 2735948814U, // <7,1,3,7>: Cost 3 vext3 RHS, <1,3,7,3>
+ 1619223579U, // <7,1,3,u>: Cost 2 vext3 <1,3,u,7>, <1,3,u,7>
+ 2712060962U, // <7,1,4,0>: Cost 3 vext3 RHS, <1,4,0,5>
+ 2712060971U, // <7,1,4,1>: Cost 3 vext3 RHS, <1,4,1,5>
+ 2712060980U, // <7,1,4,2>: Cost 3 vext3 RHS, <1,4,2,5>
+ 2712060989U, // <7,1,4,3>: Cost 3 vext3 RHS, <1,4,3,5>
+ 3785802822U, // <7,1,4,4>: Cost 4 vext3 RHS, <1,4,4,5>
+ 2639007030U, // <7,1,4,5>: Cost 3 vext2 <3,5,7,1>, RHS
+ 2645642634U, // <7,1,4,6>: Cost 3 vext2 <4,6,7,1>, <4,6,7,1>
+ 3719384520U, // <7,1,4,7>: Cost 4 vext2 <4,6,7,1>, <4,7,5,0>
+ 2639007273U, // <7,1,4,u>: Cost 3 vext2 <3,5,7,1>, RHS
+ 2572812390U, // <7,1,5,0>: Cost 3 vext1 <3,7,1,5>, LHS
+ 2693776510U, // <7,1,5,1>: Cost 3 vext3 <1,5,1,7>, <1,5,1,7>
+ 3774301318U, // <7,1,5,2>: Cost 4 vext3 <2,6,3,7>, <1,5,2,6>
+ 1620182160U, // <7,1,5,3>: Cost 2 vext3 <1,5,3,7>, <1,5,3,7>
+ 2572815670U, // <7,1,5,4>: Cost 3 vext1 <3,7,1,5>, RHS
+ 3766486178U, // <7,1,5,5>: Cost 4 vext3 <1,3,5,7>, <1,5,5,7>
+ 2651615331U, // <7,1,5,6>: Cost 3 vext2 <5,6,7,1>, <5,6,7,1>
+ 2652278964U, // <7,1,5,7>: Cost 3 vext2 <5,7,7,1>, <5,7,7,1>
+ 1620550845U, // <7,1,5,u>: Cost 2 vext3 <1,5,u,7>, <1,5,u,7>
+ 3768108230U, // <7,1,6,0>: Cost 4 vext3 <1,6,0,7>, <1,6,0,7>
+ 2694440143U, // <7,1,6,1>: Cost 3 vext3 <1,6,1,7>, <1,6,1,7>
+ 2712061144U, // <7,1,6,2>: Cost 3 vext3 RHS, <1,6,2,7>
+ 2694587617U, // <7,1,6,3>: Cost 3 vext3 <1,6,3,7>, <1,6,3,7>
+ 3768403178U, // <7,1,6,4>: Cost 4 vext3 <1,6,4,7>, <1,6,4,7>
+ 2694735091U, // <7,1,6,5>: Cost 3 vext3 <1,6,5,7>, <1,6,5,7>
+ 3768550652U, // <7,1,6,6>: Cost 4 vext3 <1,6,6,7>, <1,6,6,7>
+ 2652279630U, // <7,1,6,7>: Cost 3 vext2 <5,7,7,1>, <6,7,0,1>
+ 2694956302U, // <7,1,6,u>: Cost 3 vext3 <1,6,u,7>, <1,6,u,7>
+ 2645644282U, // <7,1,7,0>: Cost 3 vext2 <4,6,7,1>, <7,0,1,2>
+ 2859062094U, // <7,1,7,1>: Cost 3 vuzpr <6,7,0,1>, <6,7,0,1>
+ 3779462437U, // <7,1,7,2>: Cost 4 vext3 <3,5,1,7>, <1,7,2,3>
+ 3121938534U, // <7,1,7,3>: Cost 3 vtrnr <5,7,5,7>, LHS
+ 2554916150U, // <7,1,7,4>: Cost 3 vext1 <0,7,1,7>, RHS
+ 3769140548U, // <7,1,7,5>: Cost 4 vext3 <1,7,5,7>, <1,7,5,7>
+ 3726022164U, // <7,1,7,6>: Cost 4 vext2 <5,7,7,1>, <7,6,7,0>
+ 2554918508U, // <7,1,7,7>: Cost 3 vext1 <0,7,1,7>, <7,7,7,7>
+ 3121938539U, // <7,1,7,u>: Cost 3 vtrnr <5,7,5,7>, LHS
+ 2572836966U, // <7,1,u,0>: Cost 3 vext1 <3,7,1,u>, LHS
+ 1638319469U, // <7,1,u,1>: Cost 2 vext3 RHS, <1,u,1,3>
+ 2712061299U, // <7,1,u,2>: Cost 3 vext3 RHS, <1,u,2,0>
+ 1622173059U, // <7,1,u,3>: Cost 2 vext3 <1,u,3,7>, <1,u,3,7>
+ 2572840246U, // <7,1,u,4>: Cost 3 vext1 <3,7,1,u>, RHS
+ 1622320533U, // <7,1,u,5>: Cost 2 vext3 <1,u,5,7>, <1,u,5,7>
+ 2696136094U, // <7,1,u,6>: Cost 3 vext3 <1,u,6,7>, <1,u,6,7>
+ 2859060777U, // <7,1,u,7>: Cost 3 vuzpr <6,7,0,1>, RHS
+ 1622541744U, // <7,1,u,u>: Cost 2 vext3 <1,u,u,7>, <1,u,u,7>
+ 2712061364U, // <7,2,0,0>: Cost 3 vext3 RHS, <2,0,0,2>
+ 2712061373U, // <7,2,0,1>: Cost 3 vext3 RHS, <2,0,1,2>
+ 2712061380U, // <7,2,0,2>: Cost 3 vext3 RHS, <2,0,2,0>
+ 2712061389U, // <7,2,0,3>: Cost 3 vext3 RHS, <2,0,3,0>
+ 2712061404U, // <7,2,0,4>: Cost 3 vext3 RHS, <2,0,4,6>
+ 2696725990U, // <7,2,0,5>: Cost 3 vext3 <2,0,5,7>, <2,0,5,7>
+ 2712061417U, // <7,2,0,6>: Cost 3 vext3 RHS, <2,0,6,1>
+ 3785803251U, // <7,2,0,7>: Cost 4 vext3 RHS, <2,0,7,2>
+ 2696947201U, // <7,2,0,u>: Cost 3 vext3 <2,0,u,7>, <2,0,u,7>
+ 2712061446U, // <7,2,1,0>: Cost 3 vext3 RHS, <2,1,0,3>
+ 3785803276U, // <7,2,1,1>: Cost 4 vext3 RHS, <2,1,1,0>
+ 3785803285U, // <7,2,1,2>: Cost 4 vext3 RHS, <2,1,2,0>
+ 2712061471U, // <7,2,1,3>: Cost 3 vext3 RHS, <2,1,3,1>
+ 2712061482U, // <7,2,1,4>: Cost 3 vext3 RHS, <2,1,4,3>
+ 3766486576U, // <7,2,1,5>: Cost 4 vext3 <1,3,5,7>, <2,1,5,0>
+ 2712061500U, // <7,2,1,6>: Cost 3 vext3 RHS, <2,1,6,3>
+ 2602718850U, // <7,2,1,7>: Cost 3 vext1 <u,7,2,1>, <7,u,1,2>
+ 2712061516U, // <7,2,1,u>: Cost 3 vext3 RHS, <2,1,u,1>
+ 2712061525U, // <7,2,2,0>: Cost 3 vext3 RHS, <2,2,0,1>
+ 2712061536U, // <7,2,2,1>: Cost 3 vext3 RHS, <2,2,1,3>
+ 1638319720U, // <7,2,2,2>: Cost 2 vext3 RHS, <2,2,2,2>
+ 1638319730U, // <7,2,2,3>: Cost 2 vext3 RHS, <2,2,3,3>
+ 2712061565U, // <7,2,2,4>: Cost 3 vext3 RHS, <2,2,4,5>
+ 2698053256U, // <7,2,2,5>: Cost 3 vext3 <2,2,5,7>, <2,2,5,7>
+ 2712061584U, // <7,2,2,6>: Cost 3 vext3 RHS, <2,2,6,6>
+ 3771795096U, // <7,2,2,7>: Cost 4 vext3 <2,2,5,7>, <2,2,7,5>
+ 1638319775U, // <7,2,2,u>: Cost 2 vext3 RHS, <2,2,u,3>
+ 1638319782U, // <7,2,3,0>: Cost 2 vext3 RHS, <2,3,0,1>
+ 2693924531U, // <7,2,3,1>: Cost 3 vext3 <1,5,3,7>, <2,3,1,5>
+ 2700560061U, // <7,2,3,2>: Cost 3 vext3 <2,6,3,7>, <2,3,2,6>
+ 2693924551U, // <7,2,3,3>: Cost 3 vext3 <1,5,3,7>, <2,3,3,7>
+ 1638319822U, // <7,2,3,4>: Cost 2 vext3 RHS, <2,3,4,5>
+ 2698716889U, // <7,2,3,5>: Cost 3 vext3 <2,3,5,7>, <2,3,5,7>
+ 2712061665U, // <7,2,3,6>: Cost 3 vext3 RHS, <2,3,6,6>
+ 2735949540U, // <7,2,3,7>: Cost 3 vext3 RHS, <2,3,7,0>
+ 1638319854U, // <7,2,3,u>: Cost 2 vext3 RHS, <2,3,u,1>
+ 2712061692U, // <7,2,4,0>: Cost 3 vext3 RHS, <2,4,0,6>
+ 2712061698U, // <7,2,4,1>: Cost 3 vext3 RHS, <2,4,1,3>
+ 2712061708U, // <7,2,4,2>: Cost 3 vext3 RHS, <2,4,2,4>
+ 2712061718U, // <7,2,4,3>: Cost 3 vext3 RHS, <2,4,3,5>
+ 2712061728U, // <7,2,4,4>: Cost 3 vext3 RHS, <2,4,4,6>
+ 2699380522U, // <7,2,4,5>: Cost 3 vext3 <2,4,5,7>, <2,4,5,7>
+ 2712061740U, // <7,2,4,6>: Cost 3 vext3 RHS, <2,4,6,0>
+ 3809691445U, // <7,2,4,7>: Cost 4 vext3 RHS, <2,4,7,0>
+ 2699601733U, // <7,2,4,u>: Cost 3 vext3 <2,4,u,7>, <2,4,u,7>
+ 2699675470U, // <7,2,5,0>: Cost 3 vext3 <2,5,0,7>, <2,5,0,7>
+ 3766486867U, // <7,2,5,1>: Cost 4 vext3 <1,3,5,7>, <2,5,1,3>
+ 2699822944U, // <7,2,5,2>: Cost 3 vext3 <2,5,2,7>, <2,5,2,7>
+ 2692745065U, // <7,2,5,3>: Cost 3 vext3 <1,3,5,7>, <2,5,3,7>
+ 2699970418U, // <7,2,5,4>: Cost 3 vext3 <2,5,4,7>, <2,5,4,7>
+ 3766486907U, // <7,2,5,5>: Cost 4 vext3 <1,3,5,7>, <2,5,5,7>
+ 2700117892U, // <7,2,5,6>: Cost 3 vext3 <2,5,6,7>, <2,5,6,7>
+ 3771795334U, // <7,2,5,7>: Cost 4 vext3 <2,2,5,7>, <2,5,7,0>
+ 2692745110U, // <7,2,5,u>: Cost 3 vext3 <1,3,5,7>, <2,5,u,7>
+ 2572894310U, // <7,2,6,0>: Cost 3 vext1 <3,7,2,6>, LHS
+ 2712061860U, // <7,2,6,1>: Cost 3 vext3 RHS, <2,6,1,3>
+ 2700486577U, // <7,2,6,2>: Cost 3 vext3 <2,6,2,7>, <2,6,2,7>
+ 1626818490U, // <7,2,6,3>: Cost 2 vext3 <2,6,3,7>, <2,6,3,7>
+ 2572897590U, // <7,2,6,4>: Cost 3 vext1 <3,7,2,6>, RHS
+ 2700707788U, // <7,2,6,5>: Cost 3 vext3 <2,6,5,7>, <2,6,5,7>
+ 2700781525U, // <7,2,6,6>: Cost 3 vext3 <2,6,6,7>, <2,6,6,7>
+ 3774597086U, // <7,2,6,7>: Cost 4 vext3 <2,6,7,7>, <2,6,7,7>
+ 1627187175U, // <7,2,6,u>: Cost 2 vext3 <2,6,u,7>, <2,6,u,7>
+ 2735949802U, // <7,2,7,0>: Cost 3 vext3 RHS, <2,7,0,1>
+ 3780200434U, // <7,2,7,1>: Cost 4 vext3 <3,6,2,7>, <2,7,1,0>
+ 3773564928U, // <7,2,7,2>: Cost 4 vext3 <2,5,2,7>, <2,7,2,5>
+ 2986541158U, // <7,2,7,3>: Cost 3 vzipr <5,5,7,7>, LHS
+ 2554989878U, // <7,2,7,4>: Cost 3 vext1 <0,7,2,7>, RHS
+ 3775113245U, // <7,2,7,5>: Cost 4 vext3 <2,7,5,7>, <2,7,5,7>
+ 4060283228U, // <7,2,7,6>: Cost 4 vzipr <5,5,7,7>, <0,4,2,6>
+ 2554992236U, // <7,2,7,7>: Cost 3 vext1 <0,7,2,7>, <7,7,7,7>
+ 2986541163U, // <7,2,7,u>: Cost 3 vzipr <5,5,7,7>, LHS
+ 1638320187U, // <7,2,u,0>: Cost 2 vext3 RHS, <2,u,0,1>
+ 2693924936U, // <7,2,u,1>: Cost 3 vext3 <1,5,3,7>, <2,u,1,5>
+ 1638319720U, // <7,2,u,2>: Cost 2 vext3 RHS, <2,2,2,2>
+ 1628145756U, // <7,2,u,3>: Cost 2 vext3 <2,u,3,7>, <2,u,3,7>
+ 1638320227U, // <7,2,u,4>: Cost 2 vext3 RHS, <2,u,4,5>
+ 2702035054U, // <7,2,u,5>: Cost 3 vext3 <2,u,5,7>, <2,u,5,7>
+ 2702108791U, // <7,2,u,6>: Cost 3 vext3 <2,u,6,7>, <2,u,6,7>
+ 2735949945U, // <7,2,u,7>: Cost 3 vext3 RHS, <2,u,7,0>
+ 1628514441U, // <7,2,u,u>: Cost 2 vext3 <2,u,u,7>, <2,u,u,7>
+ 2712062091U, // <7,3,0,0>: Cost 3 vext3 RHS, <3,0,0,0>
+ 1638320278U, // <7,3,0,1>: Cost 2 vext3 RHS, <3,0,1,2>
+ 2712062109U, // <7,3,0,2>: Cost 3 vext3 RHS, <3,0,2,0>
+ 2590836886U, // <7,3,0,3>: Cost 3 vext1 <6,7,3,0>, <3,0,1,2>
+ 2712062128U, // <7,3,0,4>: Cost 3 vext3 RHS, <3,0,4,1>
+ 2712062138U, // <7,3,0,5>: Cost 3 vext3 RHS, <3,0,5,2>
+ 2590839656U, // <7,3,0,6>: Cost 3 vext1 <6,7,3,0>, <6,7,3,0>
+ 3311414017U, // <7,3,0,7>: Cost 4 vrev <3,7,7,0>
+ 1638320341U, // <7,3,0,u>: Cost 2 vext3 RHS, <3,0,u,2>
+ 2237164227U, // <7,3,1,0>: Cost 3 vrev <3,7,0,1>
+ 2712062182U, // <7,3,1,1>: Cost 3 vext3 RHS, <3,1,1,1>
+ 2712062193U, // <7,3,1,2>: Cost 3 vext3 RHS, <3,1,2,3>
+ 2692745468U, // <7,3,1,3>: Cost 3 vext3 <1,3,5,7>, <3,1,3,5>
+ 2712062214U, // <7,3,1,4>: Cost 3 vext3 RHS, <3,1,4,6>
+ 2693925132U, // <7,3,1,5>: Cost 3 vext3 <1,5,3,7>, <3,1,5,3>
+ 3768183059U, // <7,3,1,6>: Cost 4 vext3 <1,6,1,7>, <3,1,6,1>
+ 2692745504U, // <7,3,1,7>: Cost 3 vext3 <1,3,5,7>, <3,1,7,5>
+ 2696063273U, // <7,3,1,u>: Cost 3 vext3 <1,u,5,7>, <3,1,u,5>
+ 2712062254U, // <7,3,2,0>: Cost 3 vext3 RHS, <3,2,0,1>
+ 2712062262U, // <7,3,2,1>: Cost 3 vext3 RHS, <3,2,1,0>
+ 2712062273U, // <7,3,2,2>: Cost 3 vext3 RHS, <3,2,2,2>
+ 2712062280U, // <7,3,2,3>: Cost 3 vext3 RHS, <3,2,3,0>
+ 2712062294U, // <7,3,2,4>: Cost 3 vext3 RHS, <3,2,4,5>
+ 2712062302U, // <7,3,2,5>: Cost 3 vext3 RHS, <3,2,5,4>
+ 2700560742U, // <7,3,2,6>: Cost 3 vext3 <2,6,3,7>, <3,2,6,3>
+ 2712062319U, // <7,3,2,7>: Cost 3 vext3 RHS, <3,2,7,3>
+ 2712062325U, // <7,3,2,u>: Cost 3 vext3 RHS, <3,2,u,0>
+ 2712062335U, // <7,3,3,0>: Cost 3 vext3 RHS, <3,3,0,1>
+ 2636368158U, // <7,3,3,1>: Cost 3 vext2 <3,1,7,3>, <3,1,7,3>
+ 2637031791U, // <7,3,3,2>: Cost 3 vext2 <3,2,7,3>, <3,2,7,3>
+ 1638320540U, // <7,3,3,3>: Cost 2 vext3 RHS, <3,3,3,3>
+ 2712062374U, // <7,3,3,4>: Cost 3 vext3 RHS, <3,3,4,4>
+ 2704689586U, // <7,3,3,5>: Cost 3 vext3 <3,3,5,7>, <3,3,5,7>
+ 2590864235U, // <7,3,3,6>: Cost 3 vext1 <6,7,3,3>, <6,7,3,3>
+ 2704837060U, // <7,3,3,7>: Cost 3 vext3 <3,3,7,7>, <3,3,7,7>
+ 1638320540U, // <7,3,3,u>: Cost 2 vext3 RHS, <3,3,3,3>
+ 2712062416U, // <7,3,4,0>: Cost 3 vext3 RHS, <3,4,0,1>
+ 2712062426U, // <7,3,4,1>: Cost 3 vext3 RHS, <3,4,1,2>
+ 2566981640U, // <7,3,4,2>: Cost 3 vext1 <2,7,3,4>, <2,7,3,4>
+ 2712062447U, // <7,3,4,3>: Cost 3 vext3 RHS, <3,4,3,5>
+ 2712062456U, // <7,3,4,4>: Cost 3 vext3 RHS, <3,4,4,5>
+ 1638320642U, // <7,3,4,5>: Cost 2 vext3 RHS, <3,4,5,6>
+ 2648313204U, // <7,3,4,6>: Cost 3 vext2 <5,1,7,3>, <4,6,4,6>
+ 3311446789U, // <7,3,4,7>: Cost 4 vrev <3,7,7,4>
+ 1638320669U, // <7,3,4,u>: Cost 2 vext3 RHS, <3,4,u,6>
+ 2602819686U, // <7,3,5,0>: Cost 3 vext1 <u,7,3,5>, LHS
+ 1574571728U, // <7,3,5,1>: Cost 2 vext2 <5,1,7,3>, <5,1,7,3>
+ 2648977185U, // <7,3,5,2>: Cost 3 vext2 <5,2,7,3>, <5,2,7,3>
+ 2705869378U, // <7,3,5,3>: Cost 3 vext3 <3,5,3,7>, <3,5,3,7>
+ 2237491947U, // <7,3,5,4>: Cost 3 vrev <3,7,4,5>
+ 2706016852U, // <7,3,5,5>: Cost 3 vext3 <3,5,5,7>, <3,5,5,7>
+ 2648313954U, // <7,3,5,6>: Cost 3 vext2 <5,1,7,3>, <5,6,7,0>
+ 2692745823U, // <7,3,5,7>: Cost 3 vext3 <1,3,5,7>, <3,5,7,0>
+ 1579217159U, // <7,3,5,u>: Cost 2 vext2 <5,u,7,3>, <5,u,7,3>
+ 2706311800U, // <7,3,6,0>: Cost 3 vext3 <3,6,0,7>, <3,6,0,7>
+ 2654286249U, // <7,3,6,1>: Cost 3 vext2 <6,1,7,3>, <6,1,7,3>
+ 1581208058U, // <7,3,6,2>: Cost 2 vext2 <6,2,7,3>, <6,2,7,3>
+ 2706533011U, // <7,3,6,3>: Cost 3 vext3 <3,6,3,7>, <3,6,3,7>
+ 2706606748U, // <7,3,6,4>: Cost 3 vext3 <3,6,4,7>, <3,6,4,7>
+ 3780422309U, // <7,3,6,5>: Cost 4 vext3 <3,6,5,7>, <3,6,5,7>
+ 2712062637U, // <7,3,6,6>: Cost 3 vext3 RHS, <3,6,6,6>
+ 2706827959U, // <7,3,6,7>: Cost 3 vext3 <3,6,7,7>, <3,6,7,7>
+ 1585189856U, // <7,3,6,u>: Cost 2 vext2 <6,u,7,3>, <6,u,7,3>
+ 2693925571U, // <7,3,7,0>: Cost 3 vext3 <1,5,3,7>, <3,7,0,1>
+ 2693925584U, // <7,3,7,1>: Cost 3 vext3 <1,5,3,7>, <3,7,1,5>
+ 2700561114U, // <7,3,7,2>: Cost 3 vext3 <2,6,3,7>, <3,7,2,6>
+ 2572978916U, // <7,3,7,3>: Cost 3 vext1 <3,7,3,7>, <3,7,3,7>
+ 2693925611U, // <7,3,7,4>: Cost 3 vext3 <1,5,3,7>, <3,7,4,5>
+ 2707344118U, // <7,3,7,5>: Cost 3 vext3 <3,7,5,7>, <3,7,5,7>
+ 2654950894U, // <7,3,7,6>: Cost 3 vext2 <6,2,7,3>, <7,6,2,7>
+ 2648315500U, // <7,3,7,7>: Cost 3 vext2 <5,1,7,3>, <7,7,7,7>
+ 2693925643U, // <7,3,7,u>: Cost 3 vext3 <1,5,3,7>, <3,7,u,1>
+ 2237221578U, // <7,3,u,0>: Cost 3 vrev <3,7,0,u>
+ 1638320926U, // <7,3,u,1>: Cost 2 vext3 RHS, <3,u,1,2>
+ 1593153452U, // <7,3,u,2>: Cost 2 vext2 <u,2,7,3>, <u,2,7,3>
+ 1638320540U, // <7,3,u,3>: Cost 2 vext3 RHS, <3,3,3,3>
+ 2237516526U, // <7,3,u,4>: Cost 3 vrev <3,7,4,u>
+ 1638320966U, // <7,3,u,5>: Cost 2 vext3 RHS, <3,u,5,6>
+ 2712062796U, // <7,3,u,6>: Cost 3 vext3 RHS, <3,u,6,3>
+ 2692967250U, // <7,3,u,7>: Cost 3 vext3 <1,3,u,7>, <3,u,7,0>
+ 1638320989U, // <7,3,u,u>: Cost 2 vext3 RHS, <3,u,u,2>
+ 2651635712U, // <7,4,0,0>: Cost 3 vext2 <5,6,7,4>, <0,0,0,0>
+ 1577893990U, // <7,4,0,1>: Cost 2 vext2 <5,6,7,4>, LHS
+ 2651635876U, // <7,4,0,2>: Cost 3 vext2 <5,6,7,4>, <0,2,0,2>
+ 3785804672U, // <7,4,0,3>: Cost 4 vext3 RHS, <4,0,3,1>
+ 2651636050U, // <7,4,0,4>: Cost 3 vext2 <5,6,7,4>, <0,4,1,5>
+ 1638468498U, // <7,4,0,5>: Cost 2 vext3 RHS, <4,0,5,1>
+ 1638468508U, // <7,4,0,6>: Cost 2 vext3 RHS, <4,0,6,2>
+ 3787795364U, // <7,4,0,7>: Cost 4 vext3 RHS, <4,0,7,1>
+ 1640459181U, // <7,4,0,u>: Cost 2 vext3 RHS, <4,0,u,1>
+ 2651636470U, // <7,4,1,0>: Cost 3 vext2 <5,6,7,4>, <1,0,3,2>
+ 2651636532U, // <7,4,1,1>: Cost 3 vext2 <5,6,7,4>, <1,1,1,1>
+ 2712062922U, // <7,4,1,2>: Cost 3 vext3 RHS, <4,1,2,3>
+ 2639029248U, // <7,4,1,3>: Cost 3 vext2 <3,5,7,4>, <1,3,5,7>
+ 2712062940U, // <7,4,1,4>: Cost 3 vext3 RHS, <4,1,4,3>
+ 2712062946U, // <7,4,1,5>: Cost 3 vext3 RHS, <4,1,5,0>
+ 2712062958U, // <7,4,1,6>: Cost 3 vext3 RHS, <4,1,6,3>
+ 3785804791U, // <7,4,1,7>: Cost 4 vext3 RHS, <4,1,7,3>
+ 2712062973U, // <7,4,1,u>: Cost 3 vext3 RHS, <4,1,u,0>
+ 3785804807U, // <7,4,2,0>: Cost 4 vext3 RHS, <4,2,0,1>
+ 3785804818U, // <7,4,2,1>: Cost 4 vext3 RHS, <4,2,1,3>
+ 2651637352U, // <7,4,2,2>: Cost 3 vext2 <5,6,7,4>, <2,2,2,2>
+ 2651637414U, // <7,4,2,3>: Cost 3 vext2 <5,6,7,4>, <2,3,0,1>
+ 3716753194U, // <7,4,2,4>: Cost 4 vext2 <4,2,7,4>, <2,4,5,7>
+ 2712063030U, // <7,4,2,5>: Cost 3 vext3 RHS, <4,2,5,3>
+ 2712063036U, // <7,4,2,6>: Cost 3 vext3 RHS, <4,2,6,0>
+ 3773123658U, // <7,4,2,7>: Cost 4 vext3 <2,4,5,7>, <4,2,7,5>
+ 2712063054U, // <7,4,2,u>: Cost 3 vext3 RHS, <4,2,u,0>
+ 2651637910U, // <7,4,3,0>: Cost 3 vext2 <5,6,7,4>, <3,0,1,2>
+ 3712772348U, // <7,4,3,1>: Cost 4 vext2 <3,5,7,4>, <3,1,3,5>
+ 3785804906U, // <7,4,3,2>: Cost 4 vext3 RHS, <4,3,2,1>
+ 2651638172U, // <7,4,3,3>: Cost 3 vext2 <5,6,7,4>, <3,3,3,3>
+ 2651638274U, // <7,4,3,4>: Cost 3 vext2 <5,6,7,4>, <3,4,5,6>
+ 2639030883U, // <7,4,3,5>: Cost 3 vext2 <3,5,7,4>, <3,5,7,4>
+ 2712063122U, // <7,4,3,6>: Cost 3 vext3 RHS, <4,3,6,5>
+ 3712772836U, // <7,4,3,7>: Cost 4 vext2 <3,5,7,4>, <3,7,3,7>
+ 2641021782U, // <7,4,3,u>: Cost 3 vext2 <3,u,7,4>, <3,u,7,4>
+ 2714053802U, // <7,4,4,0>: Cost 3 vext3 RHS, <4,4,0,2>
+ 3785804978U, // <7,4,4,1>: Cost 4 vext3 RHS, <4,4,1,1>
+ 3716754505U, // <7,4,4,2>: Cost 4 vext2 <4,2,7,4>, <4,2,7,4>
+ 3785804998U, // <7,4,4,3>: Cost 4 vext3 RHS, <4,4,3,3>
+ 1638321360U, // <7,4,4,4>: Cost 2 vext3 RHS, <4,4,4,4>
+ 1638468826U, // <7,4,4,5>: Cost 2 vext3 RHS, <4,4,5,5>
+ 1638468836U, // <7,4,4,6>: Cost 2 vext3 RHS, <4,4,6,6>
+ 3785215214U, // <7,4,4,7>: Cost 4 vext3 <4,4,7,7>, <4,4,7,7>
+ 1640459509U, // <7,4,4,u>: Cost 2 vext3 RHS, <4,4,u,5>
+ 1517207654U, // <7,4,5,0>: Cost 2 vext1 <6,7,4,5>, LHS
+ 2573034640U, // <7,4,5,1>: Cost 3 vext1 <3,7,4,5>, <1,5,3,7>
+ 2712063246U, // <7,4,5,2>: Cost 3 vext3 RHS, <4,5,2,3>
+ 2573036267U, // <7,4,5,3>: Cost 3 vext1 <3,7,4,5>, <3,7,4,5>
+ 1517210934U, // <7,4,5,4>: Cost 2 vext1 <6,7,4,5>, RHS
+ 2711989549U, // <7,4,5,5>: Cost 3 vext3 <4,5,5,7>, <4,5,5,7>
+ 564579638U, // <7,4,5,6>: Cost 1 vext3 RHS, RHS
+ 2651639976U, // <7,4,5,7>: Cost 3 vext2 <5,6,7,4>, <5,7,5,7>
+ 564579656U, // <7,4,5,u>: Cost 1 vext3 RHS, RHS
+ 2712063307U, // <7,4,6,0>: Cost 3 vext3 RHS, <4,6,0,1>
+ 3767668056U, // <7,4,6,1>: Cost 4 vext3 <1,5,3,7>, <4,6,1,5>
+ 2651640314U, // <7,4,6,2>: Cost 3 vext2 <5,6,7,4>, <6,2,7,3>
+ 2655621708U, // <7,4,6,3>: Cost 3 vext2 <6,3,7,4>, <6,3,7,4>
+ 1638468980U, // <7,4,6,4>: Cost 2 vext3 RHS, <4,6,4,6>
+ 2712063358U, // <7,4,6,5>: Cost 3 vext3 RHS, <4,6,5,7>
+ 2712063367U, // <7,4,6,6>: Cost 3 vext3 RHS, <4,6,6,7>
+ 2712210826U, // <7,4,6,7>: Cost 3 vext3 RHS, <4,6,7,1>
+ 1638469012U, // <7,4,6,u>: Cost 2 vext3 RHS, <4,6,u,2>
+ 2651640826U, // <7,4,7,0>: Cost 3 vext2 <5,6,7,4>, <7,0,1,2>
+ 3773713830U, // <7,4,7,1>: Cost 4 vext3 <2,5,4,7>, <4,7,1,2>
+ 3773713842U, // <7,4,7,2>: Cost 4 vext3 <2,5,4,7>, <4,7,2,5>
+ 3780349372U, // <7,4,7,3>: Cost 4 vext3 <3,6,4,7>, <4,7,3,6>
+ 2651641140U, // <7,4,7,4>: Cost 3 vext2 <5,6,7,4>, <7,4,0,1>
+ 2712210888U, // <7,4,7,5>: Cost 3 vext3 RHS, <4,7,5,0>
+ 2712210898U, // <7,4,7,6>: Cost 3 vext3 RHS, <4,7,6,1>
+ 2651641452U, // <7,4,7,7>: Cost 3 vext2 <5,6,7,4>, <7,7,7,7>
+ 2713538026U, // <7,4,7,u>: Cost 3 vext3 <4,7,u,7>, <4,7,u,7>
+ 1517232230U, // <7,4,u,0>: Cost 2 vext1 <6,7,4,u>, LHS
+ 1577899822U, // <7,4,u,1>: Cost 2 vext2 <5,6,7,4>, LHS
+ 2712063489U, // <7,4,u,2>: Cost 3 vext3 RHS, <4,u,2,3>
+ 2573060846U, // <7,4,u,3>: Cost 3 vext1 <3,7,4,u>, <3,7,4,u>
+ 1640312342U, // <7,4,u,4>: Cost 2 vext3 RHS, <4,u,4,6>
+ 1638469146U, // <7,4,u,5>: Cost 2 vext3 RHS, <4,u,5,1>
+ 564579881U, // <7,4,u,6>: Cost 1 vext3 RHS, RHS
+ 2714054192U, // <7,4,u,7>: Cost 3 vext3 RHS, <4,u,7,5>
+ 564579899U, // <7,4,u,u>: Cost 1 vext3 RHS, RHS
+ 2579038310U, // <7,5,0,0>: Cost 3 vext1 <4,7,5,0>, LHS
+ 2636382310U, // <7,5,0,1>: Cost 3 vext2 <3,1,7,5>, LHS
+ 2796339302U, // <7,5,0,2>: Cost 3 vuzpl <7,4,5,6>, LHS
+ 3646810719U, // <7,5,0,3>: Cost 4 vext1 <3,7,5,0>, <3,5,7,0>
+ 2712063586U, // <7,5,0,4>: Cost 3 vext3 RHS, <5,0,4,1>
+ 2735951467U, // <7,5,0,5>: Cost 3 vext3 RHS, <5,0,5,1>
+ 2735951476U, // <7,5,0,6>: Cost 3 vext3 RHS, <5,0,6,1>
+ 2579043322U, // <7,5,0,7>: Cost 3 vext1 <4,7,5,0>, <7,0,1,2>
+ 2636382877U, // <7,5,0,u>: Cost 3 vext2 <3,1,7,5>, LHS
+ 2712211087U, // <7,5,1,0>: Cost 3 vext3 RHS, <5,1,0,1>
+ 3698180916U, // <7,5,1,1>: Cost 4 vext2 <1,1,7,5>, <1,1,1,1>
+ 3710124950U, // <7,5,1,2>: Cost 4 vext2 <3,1,7,5>, <1,2,3,0>
+ 2636383232U, // <7,5,1,3>: Cost 3 vext2 <3,1,7,5>, <1,3,5,7>
+ 2712211127U, // <7,5,1,4>: Cost 3 vext3 RHS, <5,1,4,5>
+ 2590994128U, // <7,5,1,5>: Cost 3 vext1 <6,7,5,1>, <5,1,7,3>
+ 2590995323U, // <7,5,1,6>: Cost 3 vext1 <6,7,5,1>, <6,7,5,1>
+ 1638469328U, // <7,5,1,7>: Cost 2 vext3 RHS, <5,1,7,3>
+ 1638469337U, // <7,5,1,u>: Cost 2 vext3 RHS, <5,1,u,3>
+ 3785805536U, // <7,5,2,0>: Cost 4 vext3 RHS, <5,2,0,1>
+ 3785805544U, // <7,5,2,1>: Cost 4 vext3 RHS, <5,2,1,0>
+ 3704817288U, // <7,5,2,2>: Cost 4 vext2 <2,2,7,5>, <2,2,5,7>
+ 2712063742U, // <7,5,2,3>: Cost 3 vext3 RHS, <5,2,3,4>
+ 3716761386U, // <7,5,2,4>: Cost 4 vext2 <4,2,7,5>, <2,4,5,7>
+ 2714054415U, // <7,5,2,5>: Cost 3 vext3 RHS, <5,2,5,3>
+ 3774304024U, // <7,5,2,6>: Cost 4 vext3 <2,6,3,7>, <5,2,6,3>
+ 2712063777U, // <7,5,2,7>: Cost 3 vext3 RHS, <5,2,7,3>
+ 2712063787U, // <7,5,2,u>: Cost 3 vext3 RHS, <5,2,u,4>
+ 3634888806U, // <7,5,3,0>: Cost 4 vext1 <1,7,5,3>, LHS
+ 2636384544U, // <7,5,3,1>: Cost 3 vext2 <3,1,7,5>, <3,1,7,5>
+ 3710790001U, // <7,5,3,2>: Cost 4 vext2 <3,2,7,5>, <3,2,7,5>
+ 3710126492U, // <7,5,3,3>: Cost 4 vext2 <3,1,7,5>, <3,3,3,3>
+ 3634892086U, // <7,5,3,4>: Cost 4 vext1 <1,7,5,3>, RHS
+ 2639039076U, // <7,5,3,5>: Cost 3 vext2 <3,5,7,5>, <3,5,7,5>
+ 3713444533U, // <7,5,3,6>: Cost 4 vext2 <3,6,7,5>, <3,6,7,5>
+ 2693926767U, // <7,5,3,7>: Cost 3 vext3 <1,5,3,7>, <5,3,7,0>
+ 2712063864U, // <7,5,3,u>: Cost 3 vext3 RHS, <5,3,u,0>
+ 2579071078U, // <7,5,4,0>: Cost 3 vext1 <4,7,5,4>, LHS
+ 3646841856U, // <7,5,4,1>: Cost 4 vext1 <3,7,5,4>, <1,3,5,7>
+ 3716762698U, // <7,5,4,2>: Cost 4 vext2 <4,2,7,5>, <4,2,7,5>
+ 3646843491U, // <7,5,4,3>: Cost 4 vext1 <3,7,5,4>, <3,5,7,4>
+ 2579074358U, // <7,5,4,4>: Cost 3 vext1 <4,7,5,4>, RHS
+ 2636385590U, // <7,5,4,5>: Cost 3 vext2 <3,1,7,5>, RHS
+ 2645675406U, // <7,5,4,6>: Cost 3 vext2 <4,6,7,5>, <4,6,7,5>
+ 1638322118U, // <7,5,4,7>: Cost 2 vext3 RHS, <5,4,7,6>
+ 1638469583U, // <7,5,4,u>: Cost 2 vext3 RHS, <5,4,u,6>
+ 2714054611U, // <7,5,5,0>: Cost 3 vext3 RHS, <5,5,0,1>
+ 2652974800U, // <7,5,5,1>: Cost 3 vext2 <5,u,7,5>, <5,1,7,3>
+ 3710127905U, // <7,5,5,2>: Cost 4 vext2 <3,1,7,5>, <5,2,7,3>
+ 3785805808U, // <7,5,5,3>: Cost 4 vext3 RHS, <5,5,3,3>
+ 2712211450U, // <7,5,5,4>: Cost 3 vext3 RHS, <5,5,4,4>
+ 1638322180U, // <7,5,5,5>: Cost 2 vext3 RHS, <5,5,5,5>
+ 2712064014U, // <7,5,5,6>: Cost 3 vext3 RHS, <5,5,6,6>
+ 1638469656U, // <7,5,5,7>: Cost 2 vext3 RHS, <5,5,7,7>
+ 1638469665U, // <7,5,5,u>: Cost 2 vext3 RHS, <5,5,u,7>
+ 2712064036U, // <7,5,6,0>: Cost 3 vext3 RHS, <5,6,0,1>
+ 2714054707U, // <7,5,6,1>: Cost 3 vext3 RHS, <5,6,1,7>
+ 3785805879U, // <7,5,6,2>: Cost 4 vext3 RHS, <5,6,2,2>
+ 2712064066U, // <7,5,6,3>: Cost 3 vext3 RHS, <5,6,3,4>
+ 2712064076U, // <7,5,6,4>: Cost 3 vext3 RHS, <5,6,4,5>
+ 2714054743U, // <7,5,6,5>: Cost 3 vext3 RHS, <5,6,5,7>
+ 2712064096U, // <7,5,6,6>: Cost 3 vext3 RHS, <5,6,6,7>
+ 1638322274U, // <7,5,6,7>: Cost 2 vext3 RHS, <5,6,7,0>
+ 1638469739U, // <7,5,6,u>: Cost 2 vext3 RHS, <5,6,u,0>
+ 1511325798U, // <7,5,7,0>: Cost 2 vext1 <5,7,5,7>, LHS
+ 2692747392U, // <7,5,7,1>: Cost 3 vext3 <1,3,5,7>, <5,7,1,3>
+ 2585069160U, // <7,5,7,2>: Cost 3 vext1 <5,7,5,7>, <2,2,2,2>
+ 2573126390U, // <7,5,7,3>: Cost 3 vext1 <3,7,5,7>, <3,7,5,7>
+ 1511329078U, // <7,5,7,4>: Cost 2 vext1 <5,7,5,7>, RHS
+ 1638469800U, // <7,5,7,5>: Cost 2 vext3 RHS, <5,7,5,7>
+ 2712211626U, // <7,5,7,6>: Cost 3 vext3 RHS, <5,7,6,0>
+ 2712211636U, // <7,5,7,7>: Cost 3 vext3 RHS, <5,7,7,1>
+ 1638469823U, // <7,5,7,u>: Cost 2 vext3 RHS, <5,7,u,3>
+ 1511333990U, // <7,5,u,0>: Cost 2 vext1 <5,7,5,u>, LHS
+ 2636388142U, // <7,5,u,1>: Cost 3 vext2 <3,1,7,5>, LHS
+ 2712211671U, // <7,5,u,2>: Cost 3 vext3 RHS, <5,u,2,0>
+ 2573134583U, // <7,5,u,3>: Cost 3 vext1 <3,7,5,u>, <3,7,5,u>
+ 1511337270U, // <7,5,u,4>: Cost 2 vext1 <5,7,5,u>, RHS
+ 1638469881U, // <7,5,u,5>: Cost 2 vext3 RHS, <5,u,5,7>
+ 2712064258U, // <7,5,u,6>: Cost 3 vext3 RHS, <5,u,6,7>
+ 1638469892U, // <7,5,u,7>: Cost 2 vext3 RHS, <5,u,7,0>
+ 1638469904U, // <7,5,u,u>: Cost 2 vext3 RHS, <5,u,u,3>
+ 2650324992U, // <7,6,0,0>: Cost 3 vext2 <5,4,7,6>, <0,0,0,0>
+ 1576583270U, // <7,6,0,1>: Cost 2 vext2 <5,4,7,6>, LHS
+ 2712064300U, // <7,6,0,2>: Cost 3 vext3 RHS, <6,0,2,4>
+ 2255295336U, // <7,6,0,3>: Cost 3 vrev <6,7,3,0>
+ 2712064316U, // <7,6,0,4>: Cost 3 vext3 RHS, <6,0,4,2>
+ 2585088098U, // <7,6,0,5>: Cost 3 vext1 <5,7,6,0>, <5,6,7,0>
+ 2735952204U, // <7,6,0,6>: Cost 3 vext3 RHS, <6,0,6,0>
+ 2712211799U, // <7,6,0,7>: Cost 3 vext3 RHS, <6,0,7,2>
+ 1576583837U, // <7,6,0,u>: Cost 2 vext2 <5,4,7,6>, LHS
+ 1181340494U, // <7,6,1,0>: Cost 2 vrev <6,7,0,1>
+ 2650325812U, // <7,6,1,1>: Cost 3 vext2 <5,4,7,6>, <1,1,1,1>
+ 2650325910U, // <7,6,1,2>: Cost 3 vext2 <5,4,7,6>, <1,2,3,0>
+ 2650325976U, // <7,6,1,3>: Cost 3 vext2 <5,4,7,6>, <1,3,1,3>
+ 2579123510U, // <7,6,1,4>: Cost 3 vext1 <4,7,6,1>, RHS
+ 2650326160U, // <7,6,1,5>: Cost 3 vext2 <5,4,7,6>, <1,5,3,7>
+ 2714055072U, // <7,6,1,6>: Cost 3 vext3 RHS, <6,1,6,3>
+ 2712064425U, // <7,6,1,7>: Cost 3 vext3 RHS, <6,1,7,3>
+ 1181930390U, // <7,6,1,u>: Cost 2 vrev <6,7,u,1>
+ 2712211897U, // <7,6,2,0>: Cost 3 vext3 RHS, <6,2,0,1>
+ 2714055108U, // <7,6,2,1>: Cost 3 vext3 RHS, <6,2,1,3>
+ 2650326632U, // <7,6,2,2>: Cost 3 vext2 <5,4,7,6>, <2,2,2,2>
+ 2650326694U, // <7,6,2,3>: Cost 3 vext2 <5,4,7,6>, <2,3,0,1>
+ 2714055137U, // <7,6,2,4>: Cost 3 vext3 RHS, <6,2,4,5>
+ 2714055148U, // <7,6,2,5>: Cost 3 vext3 RHS, <6,2,5,7>
+ 2650326970U, // <7,6,2,6>: Cost 3 vext2 <5,4,7,6>, <2,6,3,7>
+ 1638470138U, // <7,6,2,7>: Cost 2 vext3 RHS, <6,2,7,3>
+ 1638470147U, // <7,6,2,u>: Cost 2 vext3 RHS, <6,2,u,3>
+ 2650327190U, // <7,6,3,0>: Cost 3 vext2 <5,4,7,6>, <3,0,1,2>
+ 2255172441U, // <7,6,3,1>: Cost 3 vrev <6,7,1,3>
+ 2255246178U, // <7,6,3,2>: Cost 3 vrev <6,7,2,3>
+ 2650327452U, // <7,6,3,3>: Cost 3 vext2 <5,4,7,6>, <3,3,3,3>
+ 2712064562U, // <7,6,3,4>: Cost 3 vext3 RHS, <6,3,4,5>
+ 2650327627U, // <7,6,3,5>: Cost 3 vext2 <5,4,7,6>, <3,5,4,7>
+ 3713452726U, // <7,6,3,6>: Cost 4 vext2 <3,6,7,6>, <3,6,7,6>
+ 2700563016U, // <7,6,3,7>: Cost 3 vext3 <2,6,3,7>, <6,3,7,0>
+ 2712064593U, // <7,6,3,u>: Cost 3 vext3 RHS, <6,3,u,0>
+ 2650327954U, // <7,6,4,0>: Cost 3 vext2 <5,4,7,6>, <4,0,5,1>
+ 2735952486U, // <7,6,4,1>: Cost 3 vext3 RHS, <6,4,1,3>
+ 2735952497U, // <7,6,4,2>: Cost 3 vext3 RHS, <6,4,2,5>
+ 2255328108U, // <7,6,4,3>: Cost 3 vrev <6,7,3,4>
+ 2712212100U, // <7,6,4,4>: Cost 3 vext3 RHS, <6,4,4,6>
+ 1576586550U, // <7,6,4,5>: Cost 2 vext2 <5,4,7,6>, RHS
+ 2714055312U, // <7,6,4,6>: Cost 3 vext3 RHS, <6,4,6,0>
+ 2712212126U, // <7,6,4,7>: Cost 3 vext3 RHS, <6,4,7,5>
+ 1576586793U, // <7,6,4,u>: Cost 2 vext2 <5,4,7,6>, RHS
+ 2579152998U, // <7,6,5,0>: Cost 3 vext1 <4,7,6,5>, LHS
+ 2650328784U, // <7,6,5,1>: Cost 3 vext2 <5,4,7,6>, <5,1,7,3>
+ 2714055364U, // <7,6,5,2>: Cost 3 vext3 RHS, <6,5,2,7>
+ 3785806538U, // <7,6,5,3>: Cost 4 vext3 RHS, <6,5,3,4>
+ 1576587206U, // <7,6,5,4>: Cost 2 vext2 <5,4,7,6>, <5,4,7,6>
+ 2650329092U, // <7,6,5,5>: Cost 3 vext2 <5,4,7,6>, <5,5,5,5>
+ 2650329186U, // <7,6,5,6>: Cost 3 vext2 <5,4,7,6>, <5,6,7,0>
+ 2712064753U, // <7,6,5,7>: Cost 3 vext3 RHS, <6,5,7,7>
+ 1181963162U, // <7,6,5,u>: Cost 2 vrev <6,7,u,5>
+ 2714055421U, // <7,6,6,0>: Cost 3 vext3 RHS, <6,6,0,1>
+ 2714055432U, // <7,6,6,1>: Cost 3 vext3 RHS, <6,6,1,3>
+ 2650329594U, // <7,6,6,2>: Cost 3 vext2 <5,4,7,6>, <6,2,7,3>
+ 3785806619U, // <7,6,6,3>: Cost 4 vext3 RHS, <6,6,3,4>
+ 2712212260U, // <7,6,6,4>: Cost 3 vext3 RHS, <6,6,4,4>
+ 2714055472U, // <7,6,6,5>: Cost 3 vext3 RHS, <6,6,5,7>
+ 1638323000U, // <7,6,6,6>: Cost 2 vext3 RHS, <6,6,6,6>
+ 1638470466U, // <7,6,6,7>: Cost 2 vext3 RHS, <6,6,7,7>
+ 1638470475U, // <7,6,6,u>: Cost 2 vext3 RHS, <6,6,u,7>
+ 1638323022U, // <7,6,7,0>: Cost 2 vext3 RHS, <6,7,0,1>
+ 2712064854U, // <7,6,7,1>: Cost 3 vext3 RHS, <6,7,1,0>
+ 2712064865U, // <7,6,7,2>: Cost 3 vext3 RHS, <6,7,2,2>
+ 2712064872U, // <7,6,7,3>: Cost 3 vext3 RHS, <6,7,3,0>
+ 1638323062U, // <7,6,7,4>: Cost 2 vext3 RHS, <6,7,4,5>
+ 2712064894U, // <7,6,7,5>: Cost 3 vext3 RHS, <6,7,5,4>
+ 2712064905U, // <7,6,7,6>: Cost 3 vext3 RHS, <6,7,6,6>
+ 2712064915U, // <7,6,7,7>: Cost 3 vext3 RHS, <6,7,7,7>
+ 1638323094U, // <7,6,7,u>: Cost 2 vext3 RHS, <6,7,u,1>
+ 1638470559U, // <7,6,u,0>: Cost 2 vext3 RHS, <6,u,0,1>
+ 1576589102U, // <7,6,u,1>: Cost 2 vext2 <5,4,7,6>, LHS
+ 2712212402U, // <7,6,u,2>: Cost 3 vext3 RHS, <6,u,2,2>
+ 2712212409U, // <7,6,u,3>: Cost 3 vext3 RHS, <6,u,3,0>
+ 1638470599U, // <7,6,u,4>: Cost 2 vext3 RHS, <6,u,4,5>
+ 1576589466U, // <7,6,u,5>: Cost 2 vext2 <5,4,7,6>, RHS
+ 1638323000U, // <7,6,u,6>: Cost 2 vext3 RHS, <6,6,6,6>
+ 1638470624U, // <7,6,u,7>: Cost 2 vext3 RHS, <6,u,7,3>
+ 1638470631U, // <7,6,u,u>: Cost 2 vext3 RHS, <6,u,u,1>
+ 2712065007U, // <7,7,0,0>: Cost 3 vext3 RHS, <7,0,0,0>
+ 1638323194U, // <7,7,0,1>: Cost 2 vext3 RHS, <7,0,1,2>
+ 2712065025U, // <7,7,0,2>: Cost 3 vext3 RHS, <7,0,2,0>
+ 3646958337U, // <7,7,0,3>: Cost 4 vext1 <3,7,7,0>, <3,7,7,0>
+ 2712065044U, // <7,7,0,4>: Cost 3 vext3 RHS, <7,0,4,1>
+ 2585161907U, // <7,7,0,5>: Cost 3 vext1 <5,7,7,0>, <5,7,7,0>
+ 2591134604U, // <7,7,0,6>: Cost 3 vext1 <6,7,7,0>, <6,7,7,0>
+ 2591134714U, // <7,7,0,7>: Cost 3 vext1 <6,7,7,0>, <7,0,1,2>
+ 1638323257U, // <7,7,0,u>: Cost 2 vext3 RHS, <7,0,u,2>
+ 2712065091U, // <7,7,1,0>: Cost 3 vext3 RHS, <7,1,0,3>
+ 2712065098U, // <7,7,1,1>: Cost 3 vext3 RHS, <7,1,1,1>
+ 2712065109U, // <7,7,1,2>: Cost 3 vext3 RHS, <7,1,2,3>
+ 2692748384U, // <7,7,1,3>: Cost 3 vext3 <1,3,5,7>, <7,1,3,5>
+ 2585169206U, // <7,7,1,4>: Cost 3 vext1 <5,7,7,1>, RHS
+ 2693928048U, // <7,7,1,5>: Cost 3 vext3 <1,5,3,7>, <7,1,5,3>
+ 2585170766U, // <7,7,1,6>: Cost 3 vext1 <5,7,7,1>, <6,7,0,1>
+ 2735953024U, // <7,7,1,7>: Cost 3 vext3 RHS, <7,1,7,1>
+ 2695918731U, // <7,7,1,u>: Cost 3 vext3 <1,u,3,7>, <7,1,u,3>
+ 3770471574U, // <7,7,2,0>: Cost 4 vext3 <2,0,5,7>, <7,2,0,5>
+ 3785807002U, // <7,7,2,1>: Cost 4 vext3 RHS, <7,2,1,0>
+ 2712065189U, // <7,7,2,2>: Cost 3 vext3 RHS, <7,2,2,2>
+ 2712065196U, // <7,7,2,3>: Cost 3 vext3 RHS, <7,2,3,0>
+ 3773125818U, // <7,7,2,4>: Cost 4 vext3 <2,4,5,7>, <7,2,4,5>
+ 3766490305U, // <7,7,2,5>: Cost 4 vext3 <1,3,5,7>, <7,2,5,3>
+ 2700563658U, // <7,7,2,6>: Cost 3 vext3 <2,6,3,7>, <7,2,6,3>
+ 2735953107U, // <7,7,2,7>: Cost 3 vext3 RHS, <7,2,7,3>
+ 2701890780U, // <7,7,2,u>: Cost 3 vext3 <2,u,3,7>, <7,2,u,3>
+ 2712065251U, // <7,7,3,0>: Cost 3 vext3 RHS, <7,3,0,1>
+ 3766490350U, // <7,7,3,1>: Cost 4 vext3 <1,3,5,7>, <7,3,1,3>
+ 3774305530U, // <7,7,3,2>: Cost 4 vext3 <2,6,3,7>, <7,3,2,6>
+ 2637728196U, // <7,7,3,3>: Cost 3 vext2 <3,3,7,7>, <3,3,7,7>
+ 2712065291U, // <7,7,3,4>: Cost 3 vext3 RHS, <7,3,4,5>
+ 2585186486U, // <7,7,3,5>: Cost 3 vext1 <5,7,7,3>, <5,7,7,3>
+ 2639719095U, // <7,7,3,6>: Cost 3 vext2 <3,6,7,7>, <3,6,7,7>
+ 2640382728U, // <7,7,3,7>: Cost 3 vext2 <3,7,7,7>, <3,7,7,7>
+ 2641046361U, // <7,7,3,u>: Cost 3 vext2 <3,u,7,7>, <3,u,7,7>
+ 2712212792U, // <7,7,4,0>: Cost 3 vext3 RHS, <7,4,0,5>
+ 3646989312U, // <7,7,4,1>: Cost 4 vext1 <3,7,7,4>, <1,3,5,7>
+ 3785807176U, // <7,7,4,2>: Cost 4 vext3 RHS, <7,4,2,3>
+ 3646991109U, // <7,7,4,3>: Cost 4 vext1 <3,7,7,4>, <3,7,7,4>
+ 2712065371U, // <7,7,4,4>: Cost 3 vext3 RHS, <7,4,4,4>
+ 1638323558U, // <7,7,4,5>: Cost 2 vext3 RHS, <7,4,5,6>
+ 2712212845U, // <7,7,4,6>: Cost 3 vext3 RHS, <7,4,6,4>
+ 2591167846U, // <7,7,4,7>: Cost 3 vext1 <6,7,7,4>, <7,4,5,6>
+ 1638323585U, // <7,7,4,u>: Cost 2 vext3 RHS, <7,4,u,6>
+ 2585198694U, // <7,7,5,0>: Cost 3 vext1 <5,7,7,5>, LHS
+ 2712212884U, // <7,7,5,1>: Cost 3 vext3 RHS, <7,5,1,7>
+ 3711471393U, // <7,7,5,2>: Cost 4 vext2 <3,3,7,7>, <5,2,7,3>
+ 2649673590U, // <7,7,5,3>: Cost 3 vext2 <5,3,7,7>, <5,3,7,7>
+ 2712065455U, // <7,7,5,4>: Cost 3 vext3 RHS, <7,5,4,7>
+ 1577259032U, // <7,7,5,5>: Cost 2 vext2 <5,5,7,7>, <5,5,7,7>
+ 2712065473U, // <7,7,5,6>: Cost 3 vext3 RHS, <7,5,6,7>
+ 2712212936U, // <7,7,5,7>: Cost 3 vext3 RHS, <7,5,7,5>
+ 1579249931U, // <7,7,5,u>: Cost 2 vext2 <5,u,7,7>, <5,u,7,7>
+ 2591178854U, // <7,7,6,0>: Cost 3 vext1 <6,7,7,6>, LHS
+ 2735953374U, // <7,7,6,1>: Cost 3 vext3 RHS, <7,6,1,0>
+ 2712212974U, // <7,7,6,2>: Cost 3 vext3 RHS, <7,6,2,7>
+ 2655646287U, // <7,7,6,3>: Cost 3 vext2 <6,3,7,7>, <6,3,7,7>
+ 2591182134U, // <7,7,6,4>: Cost 3 vext1 <6,7,7,6>, RHS
+ 2656973553U, // <7,7,6,5>: Cost 3 vext2 <6,5,7,7>, <6,5,7,7>
+ 1583895362U, // <7,7,6,6>: Cost 2 vext2 <6,6,7,7>, <6,6,7,7>
+ 2712065556U, // <7,7,6,7>: Cost 3 vext3 RHS, <7,6,7,0>
+ 1585222628U, // <7,7,6,u>: Cost 2 vext2 <6,u,7,7>, <6,u,7,7>
+ 1523417190U, // <7,7,7,0>: Cost 2 vext1 <7,7,7,7>, LHS
+ 2597159670U, // <7,7,7,1>: Cost 3 vext1 <7,7,7,7>, <1,0,3,2>
+ 2597160552U, // <7,7,7,2>: Cost 3 vext1 <7,7,7,7>, <2,2,2,2>
+ 2597161110U, // <7,7,7,3>: Cost 3 vext1 <7,7,7,7>, <3,0,1,2>
+ 1523420470U, // <7,7,7,4>: Cost 2 vext1 <7,7,7,7>, RHS
+ 2651002296U, // <7,7,7,5>: Cost 3 vext2 <5,5,7,7>, <7,5,5,7>
+ 2657637906U, // <7,7,7,6>: Cost 3 vext2 <6,6,7,7>, <7,6,6,7>
+ 363253046U, // <7,7,7,7>: Cost 1 vdup3 RHS
+ 363253046U, // <7,7,7,u>: Cost 1 vdup3 RHS
+ 1523417190U, // <7,7,u,0>: Cost 2 vext1 <7,7,7,7>, LHS
+ 1638471298U, // <7,7,u,1>: Cost 2 vext3 RHS, <7,u,1,2>
+ 2712213132U, // <7,7,u,2>: Cost 3 vext3 RHS, <7,u,2,3>
+ 2712213138U, // <7,7,u,3>: Cost 3 vext3 RHS, <7,u,3,0>
+ 1523420470U, // <7,7,u,4>: Cost 2 vext1 <7,7,7,7>, RHS
+ 1638471338U, // <7,7,u,5>: Cost 2 vext3 RHS, <7,u,5,6>
+ 1595840756U, // <7,7,u,6>: Cost 2 vext2 <u,6,7,7>, <u,6,7,7>
+ 363253046U, // <7,7,u,7>: Cost 1 vdup3 RHS
+ 363253046U, // <7,7,u,u>: Cost 1 vdup3 RHS
+ 1638318080U, // <7,u,0,0>: Cost 2 vext3 RHS, <0,0,0,0>
+ 1638323923U, // <7,u,0,1>: Cost 2 vext3 RHS, <u,0,1,2>
+ 1662211804U, // <7,u,0,2>: Cost 2 vext3 RHS, <u,0,2,2>
+ 1638323941U, // <7,u,0,3>: Cost 2 vext3 RHS, <u,0,3,2>
+ 2712065773U, // <7,u,0,4>: Cost 3 vext3 RHS, <u,0,4,1>
+ 1662359286U, // <7,u,0,5>: Cost 2 vext3 RHS, <u,0,5,1>
+ 1662359296U, // <7,u,0,6>: Cost 2 vext3 RHS, <u,0,6,2>
+ 2987150664U, // <7,u,0,7>: Cost 3 vzipr <5,6,7,0>, RHS
+ 1638323986U, // <7,u,0,u>: Cost 2 vext3 RHS, <u,0,u,2>
+ 1517469798U, // <7,u,1,0>: Cost 2 vext1 <6,7,u,1>, LHS
+ 1638318900U, // <7,u,1,1>: Cost 2 vext3 RHS, <1,1,1,1>
+ 564582190U, // <7,u,1,2>: Cost 1 vext3 RHS, LHS
+ 1638324023U, // <7,u,1,3>: Cost 2 vext3 RHS, <u,1,3,3>
+ 1517473078U, // <7,u,1,4>: Cost 2 vext1 <6,7,u,1>, RHS
+ 2693928777U, // <7,u,1,5>: Cost 3 vext3 <1,5,3,7>, <u,1,5,3>
+ 1517474710U, // <7,u,1,6>: Cost 2 vext1 <6,7,u,1>, <6,7,u,1>
+ 1640462171U, // <7,u,1,7>: Cost 2 vext3 RHS, <u,1,7,3>
+ 564582244U, // <7,u,1,u>: Cost 1 vext3 RHS, LHS
+ 1638318244U, // <7,u,2,0>: Cost 2 vext3 RHS, <0,2,0,2>
+ 2712065907U, // <7,u,2,1>: Cost 3 vext3 RHS, <u,2,1,0>
+ 1638319720U, // <7,u,2,2>: Cost 2 vext3 RHS, <2,2,2,2>
+ 1638324101U, // <7,u,2,3>: Cost 2 vext3 RHS, <u,2,3,0>
+ 1638318284U, // <7,u,2,4>: Cost 2 vext3 RHS, <0,2,4,6>
+ 2712065947U, // <7,u,2,5>: Cost 3 vext3 RHS, <u,2,5,4>
+ 2700564387U, // <7,u,2,6>: Cost 3 vext3 <2,6,3,7>, <u,2,6,3>
+ 1640314796U, // <7,u,2,7>: Cost 2 vext3 RHS, <u,2,7,3>
+ 1638324146U, // <7,u,2,u>: Cost 2 vext3 RHS, <u,2,u,0>
+ 1638324156U, // <7,u,3,0>: Cost 2 vext3 RHS, <u,3,0,1>
+ 1638319064U, // <7,u,3,1>: Cost 2 vext3 RHS, <1,3,1,3>
+ 2700564435U, // <7,u,3,2>: Cost 3 vext3 <2,6,3,7>, <u,3,2,6>
+ 1638320540U, // <7,u,3,3>: Cost 2 vext3 RHS, <3,3,3,3>
+ 1638324196U, // <7,u,3,4>: Cost 2 vext3 RHS, <u,3,4,5>
+ 1638324207U, // <7,u,3,5>: Cost 2 vext3 RHS, <u,3,5,7>
+ 2700564472U, // <7,u,3,6>: Cost 3 vext3 <2,6,3,7>, <u,3,6,7>
+ 2695919610U, // <7,u,3,7>: Cost 3 vext3 <1,u,3,7>, <u,3,7,0>
+ 1638324228U, // <7,u,3,u>: Cost 2 vext3 RHS, <u,3,u,1>
+ 2712066061U, // <7,u,4,0>: Cost 3 vext3 RHS, <u,4,0,1>
+ 1662212122U, // <7,u,4,1>: Cost 2 vext3 RHS, <u,4,1,5>
+ 1662212132U, // <7,u,4,2>: Cost 2 vext3 RHS, <u,4,2,6>
+ 2712066092U, // <7,u,4,3>: Cost 3 vext3 RHS, <u,4,3,5>
+ 1638321360U, // <7,u,4,4>: Cost 2 vext3 RHS, <4,4,4,4>
+ 1638324287U, // <7,u,4,5>: Cost 2 vext3 RHS, <u,4,5,6>
+ 1662359624U, // <7,u,4,6>: Cost 2 vext3 RHS, <u,4,6,6>
+ 1640314961U, // <7,u,4,7>: Cost 2 vext3 RHS, <u,4,7,6>
+ 1638324314U, // <7,u,4,u>: Cost 2 vext3 RHS, <u,4,u,6>
+ 1517502566U, // <7,u,5,0>: Cost 2 vext1 <6,7,u,5>, LHS
+ 1574612693U, // <7,u,5,1>: Cost 2 vext2 <5,1,7,u>, <5,1,7,u>
+ 2712066162U, // <7,u,5,2>: Cost 3 vext3 RHS, <u,5,2,3>
+ 1638324351U, // <7,u,5,3>: Cost 2 vext3 RHS, <u,5,3,7>
+ 1576603592U, // <7,u,5,4>: Cost 2 vext2 <5,4,7,u>, <5,4,7,u>
+ 1577267225U, // <7,u,5,5>: Cost 2 vext2 <5,5,7,u>, <5,5,7,u>
+ 564582554U, // <7,u,5,6>: Cost 1 vext3 RHS, RHS
+ 1640462499U, // <7,u,5,7>: Cost 2 vext3 RHS, <u,5,7,7>
+ 564582572U, // <7,u,5,u>: Cost 1 vext3 RHS, RHS
+ 2712066223U, // <7,u,6,0>: Cost 3 vext3 RHS, <u,6,0,1>
+ 2712066238U, // <7,u,6,1>: Cost 3 vext3 RHS, <u,6,1,7>
+ 1581249023U, // <7,u,6,2>: Cost 2 vext2 <6,2,7,u>, <6,2,7,u>
+ 1638324432U, // <7,u,6,3>: Cost 2 vext3 RHS, <u,6,3,7>
+ 1638468980U, // <7,u,6,4>: Cost 2 vext3 RHS, <4,6,4,6>
+ 2712066274U, // <7,u,6,5>: Cost 3 vext3 RHS, <u,6,5,7>
+ 1583903555U, // <7,u,6,6>: Cost 2 vext2 <6,6,7,u>, <6,6,7,u>
+ 1640315117U, // <7,u,6,7>: Cost 2 vext3 RHS, <u,6,7,0>
+ 1638324477U, // <7,u,6,u>: Cost 2 vext3 RHS, <u,6,u,7>
+ 1638471936U, // <7,u,7,0>: Cost 2 vext3 RHS, <u,7,0,1>
+ 2692970763U, // <7,u,7,1>: Cost 3 vext3 <1,3,u,7>, <u,7,1,3>
+ 2700933399U, // <7,u,7,2>: Cost 3 vext3 <2,6,u,7>, <u,7,2,6>
+ 2573347601U, // <7,u,7,3>: Cost 3 vext1 <3,7,u,7>, <3,7,u,7>
+ 1638471976U, // <7,u,7,4>: Cost 2 vext3 RHS, <u,7,4,5>
+ 1511551171U, // <7,u,7,5>: Cost 2 vext1 <5,7,u,7>, <5,7,u,7>
+ 2712213815U, // <7,u,7,6>: Cost 3 vext3 RHS, <u,7,6,2>
+ 363253046U, // <7,u,7,7>: Cost 1 vdup3 RHS
+ 363253046U, // <7,u,7,u>: Cost 1 vdup3 RHS
+ 1638324561U, // <7,u,u,0>: Cost 2 vext3 RHS, <u,u,0,1>
+ 1638324571U, // <7,u,u,1>: Cost 2 vext3 RHS, <u,u,1,2>
+ 564582757U, // <7,u,u,2>: Cost 1 vext3 RHS, LHS
+ 1638324587U, // <7,u,u,3>: Cost 2 vext3 RHS, <u,u,3,0>
+ 1638324601U, // <7,u,u,4>: Cost 2 vext3 RHS, <u,u,4,5>
+ 1638324611U, // <7,u,u,5>: Cost 2 vext3 RHS, <u,u,5,6>
+ 564582797U, // <7,u,u,6>: Cost 1 vext3 RHS, RHS
+ 363253046U, // <7,u,u,7>: Cost 1 vdup3 RHS
+ 564582811U, // <7,u,u,u>: Cost 1 vext3 RHS, LHS
+ 135053414U, // <u,0,0,0>: Cost 1 vdup0 LHS
+ 1611489290U, // <u,0,0,1>: Cost 2 vext3 LHS, <0,0,1,1>
+ 1611489300U, // <u,0,0,2>: Cost 2 vext3 LHS, <0,0,2,2>
+ 2568054923U, // <u,0,0,3>: Cost 3 vext1 <3,0,0,0>, <3,0,0,0>
+ 1481706806U, // <u,0,0,4>: Cost 2 vext1 <0,u,0,0>, RHS
+ 2555449040U, // <u,0,0,5>: Cost 3 vext1 <0,u,0,0>, <5,1,7,3>
+ 2591282078U, // <u,0,0,6>: Cost 3 vext1 <6,u,0,0>, <6,u,0,0>
+ 2591945711U, // <u,0,0,7>: Cost 3 vext1 <7,0,0,0>, <7,0,0,0>
+ 135053414U, // <u,0,0,u>: Cost 1 vdup0 LHS
+ 1493655654U, // <u,0,1,0>: Cost 2 vext1 <2,u,0,1>, LHS
+ 1860550758U, // <u,0,1,1>: Cost 2 vzipl LHS, LHS
+ 537747563U, // <u,0,1,2>: Cost 1 vext3 LHS, LHS
+ 2625135576U, // <u,0,1,3>: Cost 3 vext2 <1,2,u,0>, <1,3,1,3>
+ 1493658934U, // <u,0,1,4>: Cost 2 vext1 <2,u,0,1>, RHS
+ 2625135760U, // <u,0,1,5>: Cost 3 vext2 <1,2,u,0>, <1,5,3,7>
+ 1517548447U, // <u,0,1,6>: Cost 2 vext1 <6,u,0,1>, <6,u,0,1>
+ 2591290362U, // <u,0,1,7>: Cost 3 vext1 <6,u,0,1>, <7,0,1,2>
+ 537747612U, // <u,0,1,u>: Cost 1 vext3 LHS, LHS
+ 1611489444U, // <u,0,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
+ 2685231276U, // <u,0,2,1>: Cost 3 vext3 LHS, <0,2,1,1>
+ 1994768486U, // <u,0,2,2>: Cost 2 vtrnl LHS, LHS
+ 2685231294U, // <u,0,2,3>: Cost 3 vext3 LHS, <0,2,3,1>
+ 1611489484U, // <u,0,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
+ 2712068310U, // <u,0,2,5>: Cost 3 vext3 RHS, <0,2,5,7>
+ 2625136570U, // <u,0,2,6>: Cost 3 vext2 <1,2,u,0>, <2,6,3,7>
+ 2591962097U, // <u,0,2,7>: Cost 3 vext1 <7,0,0,2>, <7,0,0,2>
+ 1611489516U, // <u,0,2,u>: Cost 2 vext3 LHS, <0,2,u,2>
+ 2954067968U, // <u,0,3,0>: Cost 3 vzipr LHS, <0,0,0,0>
+ 2685231356U, // <u,0,3,1>: Cost 3 vext3 LHS, <0,3,1,0>
+ 72589981U, // <u,0,3,2>: Cost 1 vrev LHS
+ 2625137052U, // <u,0,3,3>: Cost 3 vext2 <1,2,u,0>, <3,3,3,3>
+ 2625137154U, // <u,0,3,4>: Cost 3 vext2 <1,2,u,0>, <3,4,5,6>
+ 2639071848U, // <u,0,3,5>: Cost 3 vext2 <3,5,u,0>, <3,5,u,0>
+ 2639735481U, // <u,0,3,6>: Cost 3 vext2 <3,6,u,0>, <3,6,u,0>
+ 2597279354U, // <u,0,3,7>: Cost 3 vext1 <7,u,0,3>, <7,u,0,3>
+ 73032403U, // <u,0,3,u>: Cost 1 vrev LHS
+ 2687074636U, // <u,0,4,0>: Cost 3 vext3 <0,4,0,u>, <0,4,0,u>
+ 1611489618U, // <u,0,4,1>: Cost 2 vext3 LHS, <0,4,1,5>
+ 1611489628U, // <u,0,4,2>: Cost 2 vext3 LHS, <0,4,2,6>
+ 3629222038U, // <u,0,4,3>: Cost 4 vext1 <0,u,0,4>, <3,0,1,2>
+ 2555481398U, // <u,0,4,4>: Cost 3 vext1 <0,u,0,4>, RHS
+ 1551396150U, // <u,0,4,5>: Cost 2 vext2 <1,2,u,0>, RHS
+ 2651680116U, // <u,0,4,6>: Cost 3 vext2 <5,6,u,0>, <4,6,4,6>
+ 2646150600U, // <u,0,4,7>: Cost 3 vext2 <4,7,5,0>, <4,7,5,0>
+ 1611932050U, // <u,0,4,u>: Cost 2 vext3 LHS, <0,4,u,6>
+ 2561458278U, // <u,0,5,0>: Cost 3 vext1 <1,u,0,5>, LHS
+ 1863532646U, // <u,0,5,1>: Cost 2 vzipl RHS, LHS
+ 2712068526U, // <u,0,5,2>: Cost 3 vext3 RHS, <0,5,2,7>
+ 2649689976U, // <u,0,5,3>: Cost 3 vext2 <5,3,u,0>, <5,3,u,0>
+ 2220237489U, // <u,0,5,4>: Cost 3 vrev <0,u,4,5>
+ 2651680772U, // <u,0,5,5>: Cost 3 vext2 <5,6,u,0>, <5,5,5,5>
+ 1577939051U, // <u,0,5,6>: Cost 2 vext2 <5,6,u,0>, <5,6,u,0>
+ 2830077238U, // <u,0,5,7>: Cost 3 vuzpr <1,u,3,0>, RHS
+ 1579266317U, // <u,0,5,u>: Cost 2 vext2 <5,u,u,0>, <5,u,u,0>
+ 2555494502U, // <u,0,6,0>: Cost 3 vext1 <0,u,0,6>, LHS
+ 2712068598U, // <u,0,6,1>: Cost 3 vext3 RHS, <0,6,1,7>
+ 1997750374U, // <u,0,6,2>: Cost 2 vtrnl RHS, LHS
+ 2655662673U, // <u,0,6,3>: Cost 3 vext2 <6,3,u,0>, <6,3,u,0>
+ 2555497782U, // <u,0,6,4>: Cost 3 vext1 <0,u,0,6>, RHS
+ 2651681459U, // <u,0,6,5>: Cost 3 vext2 <5,6,u,0>, <6,5,0,u>
+ 2651681592U, // <u,0,6,6>: Cost 3 vext2 <5,6,u,0>, <6,6,6,6>
+ 2651681614U, // <u,0,6,7>: Cost 3 vext2 <5,6,u,0>, <6,7,0,1>
+ 1997750428U, // <u,0,6,u>: Cost 2 vtrnl RHS, LHS
+ 2567446630U, // <u,0,7,0>: Cost 3 vext1 <2,u,0,7>, LHS
+ 2567447446U, // <u,0,7,1>: Cost 3 vext1 <2,u,0,7>, <1,2,3,0>
+ 2567448641U, // <u,0,7,2>: Cost 3 vext1 <2,u,0,7>, <2,u,0,7>
+ 2573421338U, // <u,0,7,3>: Cost 3 vext1 <3,u,0,7>, <3,u,0,7>
+ 2567449910U, // <u,0,7,4>: Cost 3 vext1 <2,u,0,7>, RHS
+ 2651682242U, // <u,0,7,5>: Cost 3 vext2 <5,6,u,0>, <7,5,6,u>
+ 2591339429U, // <u,0,7,6>: Cost 3 vext1 <6,u,0,7>, <6,u,0,7>
+ 2651682412U, // <u,0,7,7>: Cost 3 vext2 <5,6,u,0>, <7,7,7,7>
+ 2567452462U, // <u,0,7,u>: Cost 3 vext1 <2,u,0,7>, LHS
+ 135053414U, // <u,0,u,0>: Cost 1 vdup0 LHS
+ 1611489938U, // <u,0,u,1>: Cost 2 vext3 LHS, <0,u,1,1>
+ 537748125U, // <u,0,u,2>: Cost 1 vext3 LHS, LHS
+ 2685674148U, // <u,0,u,3>: Cost 3 vext3 LHS, <0,u,3,1>
+ 1611932338U, // <u,0,u,4>: Cost 2 vext3 LHS, <0,u,4,6>
+ 1551399066U, // <u,0,u,5>: Cost 2 vext2 <1,2,u,0>, RHS
+ 1517605798U, // <u,0,u,6>: Cost 2 vext1 <6,u,0,u>, <6,u,0,u>
+ 2830077481U, // <u,0,u,7>: Cost 3 vuzpr <1,u,3,0>, RHS
+ 537748179U, // <u,0,u,u>: Cost 1 vext3 LHS, LHS
+ 1544101961U, // <u,1,0,0>: Cost 2 vext2 <0,0,u,1>, <0,0,u,1>
+ 1558036582U, // <u,1,0,1>: Cost 2 vext2 <2,3,u,1>, LHS
+ 2619171051U, // <u,1,0,2>: Cost 3 vext2 <0,2,u,1>, <0,2,u,1>
+ 1611490038U, // <u,1,0,3>: Cost 2 vext3 LHS, <1,0,3,2>
+ 2555522358U, // <u,1,0,4>: Cost 3 vext1 <0,u,1,0>, RHS
+ 2712068871U, // <u,1,0,5>: Cost 3 vext3 RHS, <1,0,5,1>
+ 2591355815U, // <u,1,0,6>: Cost 3 vext1 <6,u,1,0>, <6,u,1,0>
+ 2597328512U, // <u,1,0,7>: Cost 3 vext1 <7,u,1,0>, <7,u,1,0>
+ 1611490083U, // <u,1,0,u>: Cost 2 vext3 LHS, <1,0,u,2>
+ 1481785446U, // <u,1,1,0>: Cost 2 vext1 <0,u,1,1>, LHS
+ 202162278U, // <u,1,1,1>: Cost 1 vdup1 LHS
+ 2555528808U, // <u,1,1,2>: Cost 3 vext1 <0,u,1,1>, <2,2,2,2>
+ 1611490120U, // <u,1,1,3>: Cost 2 vext3 LHS, <1,1,3,3>
+ 1481788726U, // <u,1,1,4>: Cost 2 vext1 <0,u,1,1>, RHS
+ 2689876828U, // <u,1,1,5>: Cost 3 vext3 LHS, <1,1,5,5>
+ 2591364008U, // <u,1,1,6>: Cost 3 vext1 <6,u,1,1>, <6,u,1,1>
+ 2592691274U, // <u,1,1,7>: Cost 3 vext1 <7,1,1,1>, <7,1,1,1>
+ 202162278U, // <u,1,1,u>: Cost 1 vdup1 LHS
+ 1499709542U, // <u,1,2,0>: Cost 2 vext1 <3,u,1,2>, LHS
+ 2689876871U, // <u,1,2,1>: Cost 3 vext3 LHS, <1,2,1,3>
+ 2631116445U, // <u,1,2,2>: Cost 3 vext2 <2,2,u,1>, <2,2,u,1>
+ 835584U, // <u,1,2,3>: Cost 0 copy LHS
+ 1499712822U, // <u,1,2,4>: Cost 2 vext1 <3,u,1,2>, RHS
+ 2689876907U, // <u,1,2,5>: Cost 3 vext3 LHS, <1,2,5,3>
+ 2631780282U, // <u,1,2,6>: Cost 3 vext2 <2,3,u,1>, <2,6,3,7>
+ 1523603074U, // <u,1,2,7>: Cost 2 vext1 <7,u,1,2>, <7,u,1,2>
+ 835584U, // <u,1,2,u>: Cost 0 copy LHS
+ 1487773798U, // <u,1,3,0>: Cost 2 vext1 <1,u,1,3>, LHS
+ 1611490264U, // <u,1,3,1>: Cost 2 vext3 LHS, <1,3,1,3>
+ 2685232094U, // <u,1,3,2>: Cost 3 vext3 LHS, <1,3,2,0>
+ 2018746470U, // <u,1,3,3>: Cost 2 vtrnr LHS, LHS
+ 1487777078U, // <u,1,3,4>: Cost 2 vext1 <1,u,1,3>, RHS
+ 1611490304U, // <u,1,3,5>: Cost 2 vext3 LHS, <1,3,5,7>
+ 2685674505U, // <u,1,3,6>: Cost 3 vext3 LHS, <1,3,6,7>
+ 2640407307U, // <u,1,3,7>: Cost 3 vext2 <3,7,u,1>, <3,7,u,1>
+ 1611490327U, // <u,1,3,u>: Cost 2 vext3 LHS, <1,3,u,3>
+ 1567992749U, // <u,1,4,0>: Cost 2 vext2 <4,0,u,1>, <4,0,u,1>
+ 2693121070U, // <u,1,4,1>: Cost 3 vext3 <1,4,1,u>, <1,4,1,u>
+ 2693194807U, // <u,1,4,2>: Cost 3 vext3 <1,4,2,u>, <1,4,2,u>
+ 1152386432U, // <u,1,4,3>: Cost 2 vrev <1,u,3,4>
+ 2555555126U, // <u,1,4,4>: Cost 3 vext1 <0,u,1,4>, RHS
+ 1558039862U, // <u,1,4,5>: Cost 2 vext2 <2,3,u,1>, RHS
+ 2645716371U, // <u,1,4,6>: Cost 3 vext2 <4,6,u,1>, <4,6,u,1>
+ 2597361284U, // <u,1,4,7>: Cost 3 vext1 <7,u,1,4>, <7,u,1,4>
+ 1152755117U, // <u,1,4,u>: Cost 2 vrev <1,u,u,4>
+ 1481818214U, // <u,1,5,0>: Cost 2 vext1 <0,u,1,5>, LHS
+ 2555560694U, // <u,1,5,1>: Cost 3 vext1 <0,u,1,5>, <1,0,3,2>
+ 2555561576U, // <u,1,5,2>: Cost 3 vext1 <0,u,1,5>, <2,2,2,2>
+ 1611490448U, // <u,1,5,3>: Cost 2 vext3 LHS, <1,5,3,7>
+ 1481821494U, // <u,1,5,4>: Cost 2 vext1 <0,u,1,5>, RHS
+ 2651025435U, // <u,1,5,5>: Cost 3 vext2 <5,5,u,1>, <5,5,u,1>
+ 2651689068U, // <u,1,5,6>: Cost 3 vext2 <5,6,u,1>, <5,6,u,1>
+ 2823966006U, // <u,1,5,7>: Cost 3 vuzpr <0,u,1,1>, RHS
+ 1611932861U, // <u,1,5,u>: Cost 2 vext3 LHS, <1,5,u,7>
+ 2555568230U, // <u,1,6,0>: Cost 3 vext1 <0,u,1,6>, LHS
+ 2689877199U, // <u,1,6,1>: Cost 3 vext3 LHS, <1,6,1,7>
+ 2712069336U, // <u,1,6,2>: Cost 3 vext3 RHS, <1,6,2,7>
+ 2685232353U, // <u,1,6,3>: Cost 3 vext3 LHS, <1,6,3,7>
+ 2555571510U, // <u,1,6,4>: Cost 3 vext1 <0,u,1,6>, RHS
+ 2689877235U, // <u,1,6,5>: Cost 3 vext3 LHS, <1,6,5,7>
+ 2657661765U, // <u,1,6,6>: Cost 3 vext2 <6,6,u,1>, <6,6,u,1>
+ 1584583574U, // <u,1,6,7>: Cost 2 vext2 <6,7,u,1>, <6,7,u,1>
+ 1585247207U, // <u,1,6,u>: Cost 2 vext2 <6,u,u,1>, <6,u,u,1>
+ 2561548390U, // <u,1,7,0>: Cost 3 vext1 <1,u,1,7>, LHS
+ 2561549681U, // <u,1,7,1>: Cost 3 vext1 <1,u,1,7>, <1,u,1,7>
+ 2573493926U, // <u,1,7,2>: Cost 3 vext1 <3,u,1,7>, <2,3,0,1>
+ 2042962022U, // <u,1,7,3>: Cost 2 vtrnr RHS, LHS
+ 2561551670U, // <u,1,7,4>: Cost 3 vext1 <1,u,1,7>, RHS
+ 2226300309U, // <u,1,7,5>: Cost 3 vrev <1,u,5,7>
+ 2658325990U, // <u,1,7,6>: Cost 3 vext2 <6,7,u,1>, <7,6,1,u>
+ 2658326124U, // <u,1,7,7>: Cost 3 vext2 <6,7,u,1>, <7,7,7,7>
+ 2042962027U, // <u,1,7,u>: Cost 2 vtrnr RHS, LHS
+ 1481842790U, // <u,1,u,0>: Cost 2 vext1 <0,u,1,u>, LHS
+ 202162278U, // <u,1,u,1>: Cost 1 vdup1 LHS
+ 2685674867U, // <u,1,u,2>: Cost 3 vext3 LHS, <1,u,2,0>
+ 835584U, // <u,1,u,3>: Cost 0 copy LHS
+ 1481846070U, // <u,1,u,4>: Cost 2 vext1 <0,u,1,u>, RHS
+ 1611933077U, // <u,1,u,5>: Cost 2 vext3 LHS, <1,u,5,7>
+ 2685674910U, // <u,1,u,6>: Cost 3 vext3 LHS, <1,u,6,7>
+ 1523652232U, // <u,1,u,7>: Cost 2 vext1 <7,u,1,u>, <7,u,1,u>
+ 835584U, // <u,1,u,u>: Cost 0 copy LHS
+ 1544110154U, // <u,2,0,0>: Cost 2 vext2 <0,0,u,2>, <0,0,u,2>
+ 1545437286U, // <u,2,0,1>: Cost 2 vext2 <0,2,u,2>, LHS
+ 1545437420U, // <u,2,0,2>: Cost 2 vext2 <0,2,u,2>, <0,2,u,2>
+ 2685232589U, // <u,2,0,3>: Cost 3 vext3 LHS, <2,0,3,0>
+ 2619179346U, // <u,2,0,4>: Cost 3 vext2 <0,2,u,2>, <0,4,1,5>
+ 2712069606U, // <u,2,0,5>: Cost 3 vext3 RHS, <2,0,5,7>
+ 2689877484U, // <u,2,0,6>: Cost 3 vext3 LHS, <2,0,6,4>
+ 2659656273U, // <u,2,0,7>: Cost 3 vext2 <7,0,u,2>, <0,7,2,u>
+ 1545437853U, // <u,2,0,u>: Cost 2 vext2 <0,2,u,2>, LHS
+ 1550082851U, // <u,2,1,0>: Cost 2 vext2 <1,0,u,2>, <1,0,u,2>
+ 2619179828U, // <u,2,1,1>: Cost 3 vext2 <0,2,u,2>, <1,1,1,1>
+ 2619179926U, // <u,2,1,2>: Cost 3 vext2 <0,2,u,2>, <1,2,3,0>
+ 2685232671U, // <u,2,1,3>: Cost 3 vext3 LHS, <2,1,3,1>
+ 2555604278U, // <u,2,1,4>: Cost 3 vext1 <0,u,2,1>, RHS
+ 2619180176U, // <u,2,1,5>: Cost 3 vext2 <0,2,u,2>, <1,5,3,7>
+ 2689877564U, // <u,2,1,6>: Cost 3 vext3 LHS, <2,1,6,3>
+ 2602718850U, // <u,2,1,7>: Cost 3 vext1 <u,7,2,1>, <7,u,1,2>
+ 1158703235U, // <u,2,1,u>: Cost 2 vrev <2,u,u,1>
+ 1481867366U, // <u,2,2,0>: Cost 2 vext1 <0,u,2,2>, LHS
+ 2555609846U, // <u,2,2,1>: Cost 3 vext1 <0,u,2,2>, <1,0,3,2>
+ 269271142U, // <u,2,2,2>: Cost 1 vdup2 LHS
+ 1611490930U, // <u,2,2,3>: Cost 2 vext3 LHS, <2,2,3,3>
+ 1481870646U, // <u,2,2,4>: Cost 2 vext1 <0,u,2,2>, RHS
+ 2689877640U, // <u,2,2,5>: Cost 3 vext3 LHS, <2,2,5,7>
+ 2619180986U, // <u,2,2,6>: Cost 3 vext2 <0,2,u,2>, <2,6,3,7>
+ 2593436837U, // <u,2,2,7>: Cost 3 vext1 <7,2,2,2>, <7,2,2,2>
+ 269271142U, // <u,2,2,u>: Cost 1 vdup2 LHS
+ 408134301U, // <u,2,3,0>: Cost 1 vext1 LHS, LHS
+ 1481876214U, // <u,2,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 1481877096U, // <u,2,3,2>: Cost 2 vext1 LHS, <2,2,2,2>
+ 1880326246U, // <u,2,3,3>: Cost 2 vzipr LHS, LHS
+ 408137014U, // <u,2,3,4>: Cost 1 vext1 LHS, RHS
+ 1529654992U, // <u,2,3,5>: Cost 2 vext1 LHS, <5,1,7,3>
+ 1529655802U, // <u,2,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 1529656314U, // <u,2,3,7>: Cost 2 vext1 LHS, <7,0,1,2>
+ 408139566U, // <u,2,3,u>: Cost 1 vext1 LHS, LHS
+ 1567853468U, // <u,2,4,0>: Cost 2 vext2 <4,0,6,2>, <4,0,6,2>
+ 2561598362U, // <u,2,4,1>: Cost 3 vext1 <1,u,2,4>, <1,2,3,4>
+ 2555627214U, // <u,2,4,2>: Cost 3 vext1 <0,u,2,4>, <2,3,4,5>
+ 2685232918U, // <u,2,4,3>: Cost 3 vext3 LHS, <2,4,3,5>
+ 2555628854U, // <u,2,4,4>: Cost 3 vext1 <0,u,2,4>, RHS
+ 1545440566U, // <u,2,4,5>: Cost 2 vext2 <0,2,u,2>, RHS
+ 1571982740U, // <u,2,4,6>: Cost 2 vext2 <4,6,u,2>, <4,6,u,2>
+ 2592125957U, // <u,2,4,7>: Cost 3 vext1 <7,0,2,4>, <7,0,2,4>
+ 1545440809U, // <u,2,4,u>: Cost 2 vext2 <0,2,u,2>, RHS
+ 2555633766U, // <u,2,5,0>: Cost 3 vext1 <0,u,2,5>, LHS
+ 2561606550U, // <u,2,5,1>: Cost 3 vext1 <1,u,2,5>, <1,2,3,0>
+ 2689877856U, // <u,2,5,2>: Cost 3 vext3 LHS, <2,5,2,7>
+ 2685233000U, // <u,2,5,3>: Cost 3 vext3 LHS, <2,5,3,6>
+ 1158441059U, // <u,2,5,4>: Cost 2 vrev <2,u,4,5>
+ 2645725188U, // <u,2,5,5>: Cost 3 vext2 <4,6,u,2>, <5,5,5,5>
+ 2689877892U, // <u,2,5,6>: Cost 3 vext3 LHS, <2,5,6,7>
+ 2823900470U, // <u,2,5,7>: Cost 3 vuzpr <0,u,0,2>, RHS
+ 1158736007U, // <u,2,5,u>: Cost 2 vrev <2,u,u,5>
+ 1481900134U, // <u,2,6,0>: Cost 2 vext1 <0,u,2,6>, LHS
+ 2555642614U, // <u,2,6,1>: Cost 3 vext1 <0,u,2,6>, <1,0,3,2>
+ 2555643496U, // <u,2,6,2>: Cost 3 vext1 <0,u,2,6>, <2,2,2,2>
+ 1611491258U, // <u,2,6,3>: Cost 2 vext3 LHS, <2,6,3,7>
+ 1481903414U, // <u,2,6,4>: Cost 2 vext1 <0,u,2,6>, RHS
+ 2689877964U, // <u,2,6,5>: Cost 3 vext3 LHS, <2,6,5,7>
+ 2689877973U, // <u,2,6,6>: Cost 3 vext3 LHS, <2,6,6,7>
+ 2645726030U, // <u,2,6,7>: Cost 3 vext2 <4,6,u,2>, <6,7,0,1>
+ 1611933671U, // <u,2,6,u>: Cost 2 vext3 LHS, <2,6,u,7>
+ 1585919033U, // <u,2,7,0>: Cost 2 vext2 <7,0,u,2>, <7,0,u,2>
+ 2573566710U, // <u,2,7,1>: Cost 3 vext1 <3,u,2,7>, <1,0,3,2>
+ 2567596115U, // <u,2,7,2>: Cost 3 vext1 <2,u,2,7>, <2,u,2,7>
+ 1906901094U, // <u,2,7,3>: Cost 2 vzipr RHS, LHS
+ 2555653430U, // <u,2,7,4>: Cost 3 vext1 <0,u,2,7>, RHS
+ 2800080230U, // <u,2,7,5>: Cost 3 vuzpl LHS, <7,4,5,6>
+ 2980643164U, // <u,2,7,6>: Cost 3 vzipr RHS, <0,4,2,6>
+ 2645726828U, // <u,2,7,7>: Cost 3 vext2 <4,6,u,2>, <7,7,7,7>
+ 1906901099U, // <u,2,7,u>: Cost 2 vzipr RHS, LHS
+ 408175266U, // <u,2,u,0>: Cost 1 vext1 LHS, LHS
+ 1545443118U, // <u,2,u,1>: Cost 2 vext2 <0,2,u,2>, LHS
+ 269271142U, // <u,2,u,2>: Cost 1 vdup2 LHS
+ 1611491416U, // <u,2,u,3>: Cost 2 vext3 LHS, <2,u,3,3>
+ 408177974U, // <u,2,u,4>: Cost 1 vext1 LHS, RHS
+ 1545443482U, // <u,2,u,5>: Cost 2 vext2 <0,2,u,2>, RHS
+ 1726339226U, // <u,2,u,6>: Cost 2 vuzpl LHS, RHS
+ 1529697274U, // <u,2,u,7>: Cost 2 vext1 LHS, <7,0,1,2>
+ 408180526U, // <u,2,u,u>: Cost 1 vext1 LHS, LHS
+ 1544781824U, // <u,3,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
+ 471040156U, // <u,3,0,1>: Cost 1 vext2 LHS, LHS
+ 1544781988U, // <u,3,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
+ 2618523900U, // <u,3,0,3>: Cost 3 vext2 LHS, <0,3,1,0>
+ 1544782162U, // <u,3,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
+ 2238188352U, // <u,3,0,5>: Cost 3 vrev <3,u,5,0>
+ 2623169023U, // <u,3,0,6>: Cost 3 vext2 LHS, <0,6,2,7>
+ 2238335826U, // <u,3,0,7>: Cost 3 vrev <3,u,7,0>
+ 471040669U, // <u,3,0,u>: Cost 1 vext2 LHS, LHS
+ 1544782582U, // <u,3,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
+ 1544782644U, // <u,3,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
+ 1544782742U, // <u,3,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
+ 1544782808U, // <u,3,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
+ 2618524733U, // <u,3,1,4>: Cost 3 vext2 LHS, <1,4,3,5>
+ 1544782992U, // <u,3,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
+ 2618524897U, // <u,3,1,6>: Cost 3 vext2 LHS, <1,6,3,7>
+ 2703517987U, // <u,3,1,7>: Cost 3 vext3 <3,1,7,u>, <3,1,7,u>
+ 1544783213U, // <u,3,1,u>: Cost 2 vext2 LHS, <1,u,1,3>
+ 1529716838U, // <u,3,2,0>: Cost 2 vext1 <u,u,3,2>, LHS
+ 1164167966U, // <u,3,2,1>: Cost 2 vrev <3,u,1,2>
+ 1544783464U, // <u,3,2,2>: Cost 2 vext2 LHS, <2,2,2,2>
+ 1544783526U, // <u,3,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
+ 1529720118U, // <u,3,2,4>: Cost 2 vext1 <u,u,3,2>, RHS
+ 2618525544U, // <u,3,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
+ 1544783802U, // <u,3,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
+ 2704181620U, // <u,3,2,7>: Cost 3 vext3 <3,2,7,u>, <3,2,7,u>
+ 1544783931U, // <u,3,2,u>: Cost 2 vext2 LHS, <2,u,0,1>
+ 1544784022U, // <u,3,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
+ 1487922559U, // <u,3,3,1>: Cost 2 vext1 <1,u,3,3>, <1,u,3,3>
+ 1493895256U, // <u,3,3,2>: Cost 2 vext1 <2,u,3,3>, <2,u,3,3>
+ 336380006U, // <u,3,3,3>: Cost 1 vdup3 LHS
+ 1544784386U, // <u,3,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
+ 2824054478U, // <u,3,3,5>: Cost 3 vuzpr LHS, <2,3,4,5>
+ 2238286668U, // <u,3,3,6>: Cost 3 vrev <3,u,6,3>
+ 2954069136U, // <u,3,3,7>: Cost 3 vzipr LHS, <1,5,3,7>
+ 336380006U, // <u,3,3,u>: Cost 1 vdup3 LHS
+ 1487929446U, // <u,3,4,0>: Cost 2 vext1 <1,u,3,4>, LHS
+ 1487930752U, // <u,3,4,1>: Cost 2 vext1 <1,u,3,4>, <1,u,3,4>
+ 2623171644U, // <u,3,4,2>: Cost 3 vext2 LHS, <4,2,6,0>
+ 2561673366U, // <u,3,4,3>: Cost 3 vext1 <1,u,3,4>, <3,0,1,2>
+ 1487932726U, // <u,3,4,4>: Cost 2 vext1 <1,u,3,4>, RHS
+ 471043382U, // <u,3,4,5>: Cost 1 vext2 LHS, RHS
+ 1592561012U, // <u,3,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
+ 2238368598U, // <u,3,4,7>: Cost 3 vrev <3,u,7,4>
+ 471043625U, // <u,3,4,u>: Cost 1 vext2 LHS, RHS
+ 2555707494U, // <u,3,5,0>: Cost 3 vext1 <0,u,3,5>, LHS
+ 1574645465U, // <u,3,5,1>: Cost 2 vext2 <5,1,u,3>, <5,1,u,3>
+ 2567653106U, // <u,3,5,2>: Cost 3 vext1 <2,u,3,5>, <2,3,u,5>
+ 2555709954U, // <u,3,5,3>: Cost 3 vext1 <0,u,3,5>, <3,4,5,6>
+ 1592561606U, // <u,3,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
+ 1592561668U, // <u,3,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
+ 1592561762U, // <u,3,5,6>: Cost 2 vext2 LHS, <5,6,7,0>
+ 1750314294U, // <u,3,5,7>: Cost 2 vuzpr LHS, RHS
+ 1750314295U, // <u,3,5,u>: Cost 2 vuzpr LHS, RHS
+ 2623172897U, // <u,3,6,0>: Cost 3 vext2 LHS, <6,0,1,2>
+ 2561688962U, // <u,3,6,1>: Cost 3 vext1 <1,u,3,6>, <1,u,3,6>
+ 1581281795U, // <u,3,6,2>: Cost 2 vext2 <6,2,u,3>, <6,2,u,3>
+ 2706541204U, // <u,3,6,3>: Cost 3 vext3 <3,6,3,u>, <3,6,3,u>
+ 2623173261U, // <u,3,6,4>: Cost 3 vext2 LHS, <6,4,5,6>
+ 1164495686U, // <u,3,6,5>: Cost 2 vrev <3,u,5,6>
+ 1592562488U, // <u,3,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
+ 1592562510U, // <u,3,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
+ 1164716897U, // <u,3,6,u>: Cost 2 vrev <3,u,u,6>
+ 1487954022U, // <u,3,7,0>: Cost 2 vext1 <1,u,3,7>, LHS
+ 1487955331U, // <u,3,7,1>: Cost 2 vext1 <1,u,3,7>, <1,u,3,7>
+ 1493928028U, // <u,3,7,2>: Cost 2 vext1 <2,u,3,7>, <2,u,3,7>
+ 2561697942U, // <u,3,7,3>: Cost 3 vext1 <1,u,3,7>, <3,0,1,2>
+ 1487957302U, // <u,3,7,4>: Cost 2 vext1 <1,u,3,7>, RHS
+ 2707352311U, // <u,3,7,5>: Cost 3 vext3 <3,7,5,u>, <3,7,5,u>
+ 2655024623U, // <u,3,7,6>: Cost 3 vext2 <6,2,u,3>, <7,6,2,u>
+ 1592563308U, // <u,3,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
+ 1487959854U, // <u,3,7,u>: Cost 2 vext1 <1,u,3,7>, LHS
+ 1544787667U, // <u,3,u,0>: Cost 2 vext2 LHS, <u,0,1,2>
+ 471045934U, // <u,3,u,1>: Cost 1 vext2 LHS, LHS
+ 1549432709U, // <u,3,u,2>: Cost 2 vext2 LHS, <u,2,3,0>
+ 336380006U, // <u,3,u,3>: Cost 1 vdup3 LHS
+ 1544788031U, // <u,3,u,4>: Cost 2 vext2 LHS, <u,4,5,6>
+ 471046298U, // <u,3,u,5>: Cost 1 vext2 LHS, RHS
+ 1549433040U, // <u,3,u,6>: Cost 2 vext2 LHS, <u,6,3,7>
+ 1750314537U, // <u,3,u,7>: Cost 2 vuzpr LHS, RHS
+ 471046501U, // <u,3,u,u>: Cost 1 vext2 LHS, LHS
+ 2625167360U, // <u,4,0,0>: Cost 3 vext2 <1,2,u,4>, <0,0,0,0>
+ 1551425638U, // <u,4,0,1>: Cost 2 vext2 <1,2,u,4>, LHS
+ 2619195630U, // <u,4,0,2>: Cost 3 vext2 <0,2,u,4>, <0,2,u,4>
+ 2619343104U, // <u,4,0,3>: Cost 3 vext2 <0,3,1,4>, <0,3,1,4>
+ 2625167698U, // <u,4,0,4>: Cost 3 vext2 <1,2,u,4>, <0,4,1,5>
+ 1638329234U, // <u,4,0,5>: Cost 2 vext3 RHS, <4,0,5,1>
+ 1638329244U, // <u,4,0,6>: Cost 2 vext3 RHS, <4,0,6,2>
+ 3787803556U, // <u,4,0,7>: Cost 4 vext3 RHS, <4,0,7,1>
+ 1551426205U, // <u,4,0,u>: Cost 2 vext2 <1,2,u,4>, LHS
+ 2555748454U, // <u,4,1,0>: Cost 3 vext1 <0,u,4,1>, LHS
+ 2625168180U, // <u,4,1,1>: Cost 3 vext2 <1,2,u,4>, <1,1,1,1>
+ 1551426503U, // <u,4,1,2>: Cost 2 vext2 <1,2,u,4>, <1,2,u,4>
+ 2625168344U, // <u,4,1,3>: Cost 3 vext2 <1,2,u,4>, <1,3,1,3>
+ 2555751734U, // <u,4,1,4>: Cost 3 vext1 <0,u,4,1>, RHS
+ 1860554038U, // <u,4,1,5>: Cost 2 vzipl LHS, RHS
+ 2689879022U, // <u,4,1,6>: Cost 3 vext3 LHS, <4,1,6,3>
+ 2592248852U, // <u,4,1,7>: Cost 3 vext1 <7,0,4,1>, <7,0,4,1>
+ 1555408301U, // <u,4,1,u>: Cost 2 vext2 <1,u,u,4>, <1,u,u,4>
+ 2555756646U, // <u,4,2,0>: Cost 3 vext1 <0,u,4,2>, LHS
+ 2625168943U, // <u,4,2,1>: Cost 3 vext2 <1,2,u,4>, <2,1,4,u>
+ 2625169000U, // <u,4,2,2>: Cost 3 vext2 <1,2,u,4>, <2,2,2,2>
+ 2619197134U, // <u,4,2,3>: Cost 3 vext2 <0,2,u,4>, <2,3,4,5>
+ 2555759926U, // <u,4,2,4>: Cost 3 vext1 <0,u,4,2>, RHS
+ 2712071222U, // <u,4,2,5>: Cost 3 vext3 RHS, <4,2,5,3>
+ 1994771766U, // <u,4,2,6>: Cost 2 vtrnl LHS, RHS
+ 2592257045U, // <u,4,2,7>: Cost 3 vext1 <7,0,4,2>, <7,0,4,2>
+ 1994771784U, // <u,4,2,u>: Cost 2 vtrnl LHS, RHS
+ 2625169558U, // <u,4,3,0>: Cost 3 vext2 <1,2,u,4>, <3,0,1,2>
+ 2567709594U, // <u,4,3,1>: Cost 3 vext1 <2,u,4,3>, <1,2,3,4>
+ 2567710817U, // <u,4,3,2>: Cost 3 vext1 <2,u,4,3>, <2,u,4,3>
+ 2625169820U, // <u,4,3,3>: Cost 3 vext2 <1,2,u,4>, <3,3,3,3>
+ 2625169922U, // <u,4,3,4>: Cost 3 vext2 <1,2,u,4>, <3,4,5,6>
+ 2954069710U, // <u,4,3,5>: Cost 3 vzipr LHS, <2,3,4,5>
+ 2954068172U, // <u,4,3,6>: Cost 3 vzipr LHS, <0,2,4,6>
+ 3903849472U, // <u,4,3,7>: Cost 4 vuzpr <1,u,3,4>, <1,3,5,7>
+ 2954068174U, // <u,4,3,u>: Cost 3 vzipr LHS, <0,2,4,u>
+ 1505919078U, // <u,4,4,0>: Cost 2 vext1 <4,u,4,4>, LHS
+ 2567717831U, // <u,4,4,1>: Cost 3 vext1 <2,u,4,4>, <1,2,u,4>
+ 2567719010U, // <u,4,4,2>: Cost 3 vext1 <2,u,4,4>, <2,u,4,4>
+ 2570373542U, // <u,4,4,3>: Cost 3 vext1 <3,3,4,4>, <3,3,4,4>
+ 161926454U, // <u,4,4,4>: Cost 1 vdup0 RHS
+ 1551428918U, // <u,4,4,5>: Cost 2 vext2 <1,2,u,4>, RHS
+ 1638329572U, // <u,4,4,6>: Cost 2 vext3 RHS, <4,4,6,6>
+ 2594927963U, // <u,4,4,7>: Cost 3 vext1 <7,4,4,4>, <7,4,4,4>
+ 161926454U, // <u,4,4,u>: Cost 1 vdup0 RHS
+ 1493983334U, // <u,4,5,0>: Cost 2 vext1 <2,u,4,5>, LHS
+ 2689879301U, // <u,4,5,1>: Cost 3 vext3 LHS, <4,5,1,3>
+ 1493985379U, // <u,4,5,2>: Cost 2 vext1 <2,u,4,5>, <2,u,4,5>
+ 2567727254U, // <u,4,5,3>: Cost 3 vext1 <2,u,4,5>, <3,0,1,2>
+ 1493986614U, // <u,4,5,4>: Cost 2 vext1 <2,u,4,5>, RHS
+ 1863535926U, // <u,4,5,5>: Cost 2 vzipl RHS, RHS
+ 537750838U, // <u,4,5,6>: Cost 1 vext3 LHS, RHS
+ 2830110006U, // <u,4,5,7>: Cost 3 vuzpr <1,u,3,4>, RHS
+ 537750856U, // <u,4,5,u>: Cost 1 vext3 LHS, RHS
+ 1482047590U, // <u,4,6,0>: Cost 2 vext1 <0,u,4,6>, LHS
+ 2555790070U, // <u,4,6,1>: Cost 3 vext1 <0,u,4,6>, <1,0,3,2>
+ 2555790952U, // <u,4,6,2>: Cost 3 vext1 <0,u,4,6>, <2,2,2,2>
+ 2555791510U, // <u,4,6,3>: Cost 3 vext1 <0,u,4,6>, <3,0,1,2>
+ 1482050870U, // <u,4,6,4>: Cost 2 vext1 <0,u,4,6>, RHS
+ 2689879422U, // <u,4,6,5>: Cost 3 vext3 LHS, <4,6,5,7>
+ 1997753654U, // <u,4,6,6>: Cost 2 vtrnl RHS, RHS
+ 2712071562U, // <u,4,6,7>: Cost 3 vext3 RHS, <4,6,7,1>
+ 1482053422U, // <u,4,6,u>: Cost 2 vext1 <0,u,4,6>, LHS
+ 2567741542U, // <u,4,7,0>: Cost 3 vext1 <2,u,4,7>, LHS
+ 2567742362U, // <u,4,7,1>: Cost 3 vext1 <2,u,4,7>, <1,2,3,4>
+ 2567743589U, // <u,4,7,2>: Cost 3 vext1 <2,u,4,7>, <2,u,4,7>
+ 2573716286U, // <u,4,7,3>: Cost 3 vext1 <3,u,4,7>, <3,u,4,7>
+ 2567744822U, // <u,4,7,4>: Cost 3 vext1 <2,u,4,7>, RHS
+ 2712071624U, // <u,4,7,5>: Cost 3 vext3 RHS, <4,7,5,0>
+ 96808489U, // <u,4,7,6>: Cost 1 vrev RHS
+ 2651715180U, // <u,4,7,7>: Cost 3 vext2 <5,6,u,4>, <7,7,7,7>
+ 96955963U, // <u,4,7,u>: Cost 1 vrev RHS
+ 1482063974U, // <u,4,u,0>: Cost 2 vext1 <0,u,4,u>, LHS
+ 1551431470U, // <u,4,u,1>: Cost 2 vext2 <1,2,u,4>, LHS
+ 1494009958U, // <u,4,u,2>: Cost 2 vext1 <2,u,4,u>, <2,u,4,u>
+ 2555807894U, // <u,4,u,3>: Cost 3 vext1 <0,u,4,u>, <3,0,1,2>
+ 161926454U, // <u,4,u,4>: Cost 1 vdup0 RHS
+ 1551431834U, // <u,4,u,5>: Cost 2 vext2 <1,2,u,4>, RHS
+ 537751081U, // <u,4,u,6>: Cost 1 vext3 LHS, RHS
+ 2830110249U, // <u,4,u,7>: Cost 3 vuzpr <1,u,3,4>, RHS
+ 537751099U, // <u,4,u,u>: Cost 1 vext3 LHS, RHS
+ 2631811072U, // <u,5,0,0>: Cost 3 vext2 <2,3,u,5>, <0,0,0,0>
+ 1558069350U, // <u,5,0,1>: Cost 2 vext2 <2,3,u,5>, LHS
+ 2619203823U, // <u,5,0,2>: Cost 3 vext2 <0,2,u,5>, <0,2,u,5>
+ 2619867456U, // <u,5,0,3>: Cost 3 vext2 <0,3,u,5>, <0,3,u,5>
+ 1546273106U, // <u,5,0,4>: Cost 2 vext2 <0,4,1,5>, <0,4,1,5>
+ 2733010539U, // <u,5,0,5>: Cost 3 vext3 LHS, <5,0,5,1>
+ 2597622682U, // <u,5,0,6>: Cost 3 vext1 <7,u,5,0>, <6,7,u,5>
+ 1176539396U, // <u,5,0,7>: Cost 2 vrev <5,u,7,0>
+ 1558069917U, // <u,5,0,u>: Cost 2 vext2 <2,3,u,5>, LHS
+ 1505968230U, // <u,5,1,0>: Cost 2 vext1 <4,u,5,1>, LHS
+ 2624512887U, // <u,5,1,1>: Cost 3 vext2 <1,1,u,5>, <1,1,u,5>
+ 2631811990U, // <u,5,1,2>: Cost 3 vext2 <2,3,u,5>, <1,2,3,0>
+ 2618541056U, // <u,5,1,3>: Cost 3 vext2 <0,1,u,5>, <1,3,5,7>
+ 1505971510U, // <u,5,1,4>: Cost 2 vext1 <4,u,5,1>, RHS
+ 2627167419U, // <u,5,1,5>: Cost 3 vext2 <1,5,u,5>, <1,5,u,5>
+ 2579714554U, // <u,5,1,6>: Cost 3 vext1 <4,u,5,1>, <6,2,7,3>
+ 1638330064U, // <u,5,1,7>: Cost 2 vext3 RHS, <5,1,7,3>
+ 1638477529U, // <u,5,1,u>: Cost 2 vext3 RHS, <5,1,u,3>
+ 2561802342U, // <u,5,2,0>: Cost 3 vext1 <1,u,5,2>, LHS
+ 2561803264U, // <u,5,2,1>: Cost 3 vext1 <1,u,5,2>, <1,3,5,7>
+ 2631149217U, // <u,5,2,2>: Cost 3 vext2 <2,2,u,5>, <2,2,u,5>
+ 1558071026U, // <u,5,2,3>: Cost 2 vext2 <2,3,u,5>, <2,3,u,5>
+ 2561805622U, // <u,5,2,4>: Cost 3 vext1 <1,u,5,2>, RHS
+ 2714062607U, // <u,5,2,5>: Cost 3 vext3 RHS, <5,2,5,3>
+ 2631813050U, // <u,5,2,6>: Cost 3 vext2 <2,3,u,5>, <2,6,3,7>
+ 3092335926U, // <u,5,2,7>: Cost 3 vtrnr <0,u,0,2>, RHS
+ 1561389191U, // <u,5,2,u>: Cost 2 vext2 <2,u,u,5>, <2,u,u,5>
+ 2561810534U, // <u,5,3,0>: Cost 3 vext1 <1,u,5,3>, LHS
+ 2561811857U, // <u,5,3,1>: Cost 3 vext1 <1,u,5,3>, <1,u,5,3>
+ 2631813474U, // <u,5,3,2>: Cost 3 vext2 <2,3,u,5>, <3,2,5,u>
+ 2631813532U, // <u,5,3,3>: Cost 3 vext2 <2,3,u,5>, <3,3,3,3>
+ 2619869698U, // <u,5,3,4>: Cost 3 vext2 <0,3,u,5>, <3,4,5,6>
+ 3001847002U, // <u,5,3,5>: Cost 3 vzipr LHS, <4,4,5,5>
+ 2954070530U, // <u,5,3,6>: Cost 3 vzipr LHS, <3,4,5,6>
+ 2018749750U, // <u,5,3,7>: Cost 2 vtrnr LHS, RHS
+ 2018749751U, // <u,5,3,u>: Cost 2 vtrnr LHS, RHS
+ 2573762662U, // <u,5,4,0>: Cost 3 vext1 <3,u,5,4>, LHS
+ 2620017634U, // <u,5,4,1>: Cost 3 vext2 <0,4,1,5>, <4,1,5,0>
+ 2573764338U, // <u,5,4,2>: Cost 3 vext1 <3,u,5,4>, <2,3,u,5>
+ 2573765444U, // <u,5,4,3>: Cost 3 vext1 <3,u,5,4>, <3,u,5,4>
+ 1570680053U, // <u,5,4,4>: Cost 2 vext2 <4,4,u,5>, <4,4,u,5>
+ 1558072630U, // <u,5,4,5>: Cost 2 vext2 <2,3,u,5>, RHS
+ 2645749143U, // <u,5,4,6>: Cost 3 vext2 <4,6,u,5>, <4,6,u,5>
+ 1638330310U, // <u,5,4,7>: Cost 2 vext3 RHS, <5,4,7,6>
+ 1558072873U, // <u,5,4,u>: Cost 2 vext2 <2,3,u,5>, RHS
+ 1506000998U, // <u,5,5,0>: Cost 2 vext1 <4,u,5,5>, LHS
+ 2561827984U, // <u,5,5,1>: Cost 3 vext1 <1,u,5,5>, <1,5,3,7>
+ 2579744360U, // <u,5,5,2>: Cost 3 vext1 <4,u,5,5>, <2,2,2,2>
+ 2579744918U, // <u,5,5,3>: Cost 3 vext1 <4,u,5,5>, <3,0,1,2>
+ 1506004278U, // <u,5,5,4>: Cost 2 vext1 <4,u,5,5>, RHS
+ 229035318U, // <u,5,5,5>: Cost 1 vdup1 RHS
+ 2712072206U, // <u,5,5,6>: Cost 3 vext3 RHS, <5,5,6,6>
+ 1638330392U, // <u,5,5,7>: Cost 2 vext3 RHS, <5,5,7,7>
+ 229035318U, // <u,5,5,u>: Cost 1 vdup1 RHS
+ 1500037222U, // <u,5,6,0>: Cost 2 vext1 <3,u,5,6>, LHS
+ 2561836436U, // <u,5,6,1>: Cost 3 vext1 <1,u,5,6>, <1,u,5,6>
+ 2567809133U, // <u,5,6,2>: Cost 3 vext1 <2,u,5,6>, <2,u,5,6>
+ 1500040006U, // <u,5,6,3>: Cost 2 vext1 <3,u,5,6>, <3,u,5,6>
+ 1500040502U, // <u,5,6,4>: Cost 2 vext1 <3,u,5,6>, RHS
+ 2714062935U, // <u,5,6,5>: Cost 3 vext3 RHS, <5,6,5,7>
+ 2712072288U, // <u,5,6,6>: Cost 3 vext3 RHS, <5,6,6,7>
+ 27705344U, // <u,5,6,7>: Cost 0 copy RHS
+ 27705344U, // <u,5,6,u>: Cost 0 copy RHS
+ 1488101478U, // <u,5,7,0>: Cost 2 vext1 <1,u,5,7>, LHS
+ 1488102805U, // <u,5,7,1>: Cost 2 vext1 <1,u,5,7>, <1,u,5,7>
+ 2561844840U, // <u,5,7,2>: Cost 3 vext1 <1,u,5,7>, <2,2,2,2>
+ 2561845398U, // <u,5,7,3>: Cost 3 vext1 <1,u,5,7>, <3,0,1,2>
+ 1488104758U, // <u,5,7,4>: Cost 2 vext1 <1,u,5,7>, RHS
+ 1638330536U, // <u,5,7,5>: Cost 2 vext3 RHS, <5,7,5,7>
+ 2712072362U, // <u,5,7,6>: Cost 3 vext3 RHS, <5,7,6,0>
+ 2042965302U, // <u,5,7,7>: Cost 2 vtrnr RHS, RHS
+ 1488107310U, // <u,5,7,u>: Cost 2 vext1 <1,u,5,7>, LHS
+ 1488109670U, // <u,5,u,0>: Cost 2 vext1 <1,u,5,u>, LHS
+ 1488110998U, // <u,5,u,1>: Cost 2 vext1 <1,u,5,u>, <1,u,5,u>
+ 2561853032U, // <u,5,u,2>: Cost 3 vext1 <1,u,5,u>, <2,2,2,2>
+ 1500056392U, // <u,5,u,3>: Cost 2 vext1 <3,u,5,u>, <3,u,5,u>
+ 1488112950U, // <u,5,u,4>: Cost 2 vext1 <1,u,5,u>, RHS
+ 229035318U, // <u,5,u,5>: Cost 1 vdup1 RHS
+ 2954111490U, // <u,5,u,6>: Cost 3 vzipr LHS, <3,4,5,6>
+ 27705344U, // <u,5,u,7>: Cost 0 copy RHS
+ 27705344U, // <u,5,u,u>: Cost 0 copy RHS
+ 2619211776U, // <u,6,0,0>: Cost 3 vext2 <0,2,u,6>, <0,0,0,0>
+ 1545470054U, // <u,6,0,1>: Cost 2 vext2 <0,2,u,6>, LHS
+ 1545470192U, // <u,6,0,2>: Cost 2 vext2 <0,2,u,6>, <0,2,u,6>
+ 2255958969U, // <u,6,0,3>: Cost 3 vrev <6,u,3,0>
+ 1546797458U, // <u,6,0,4>: Cost 2 vext2 <0,4,u,6>, <0,4,u,6>
+ 2720624971U, // <u,6,0,5>: Cost 3 vext3 <6,0,5,u>, <6,0,5,u>
+ 2256180180U, // <u,6,0,6>: Cost 3 vrev <6,u,6,0>
+ 2960682294U, // <u,6,0,7>: Cost 3 vzipr <1,2,u,0>, RHS
+ 1545470621U, // <u,6,0,u>: Cost 2 vext2 <0,2,u,6>, LHS
+ 1182004127U, // <u,6,1,0>: Cost 2 vrev <6,u,0,1>
+ 2619212596U, // <u,6,1,1>: Cost 3 vext2 <0,2,u,6>, <1,1,1,1>
+ 2619212694U, // <u,6,1,2>: Cost 3 vext2 <0,2,u,6>, <1,2,3,0>
+ 2619212760U, // <u,6,1,3>: Cost 3 vext2 <0,2,u,6>, <1,3,1,3>
+ 2626511979U, // <u,6,1,4>: Cost 3 vext2 <1,4,u,6>, <1,4,u,6>
+ 2619212944U, // <u,6,1,5>: Cost 3 vext2 <0,2,u,6>, <1,5,3,7>
+ 2714063264U, // <u,6,1,6>: Cost 3 vext3 RHS, <6,1,6,3>
+ 2967326006U, // <u,6,1,7>: Cost 3 vzipr <2,3,u,1>, RHS
+ 1182594023U, // <u,6,1,u>: Cost 2 vrev <6,u,u,1>
+ 1506050150U, // <u,6,2,0>: Cost 2 vext1 <4,u,6,2>, LHS
+ 2579792630U, // <u,6,2,1>: Cost 3 vext1 <4,u,6,2>, <1,0,3,2>
+ 2619213416U, // <u,6,2,2>: Cost 3 vext2 <0,2,u,6>, <2,2,2,2>
+ 2619213478U, // <u,6,2,3>: Cost 3 vext2 <0,2,u,6>, <2,3,0,1>
+ 1506053430U, // <u,6,2,4>: Cost 2 vext1 <4,u,6,2>, RHS
+ 2633148309U, // <u,6,2,5>: Cost 3 vext2 <2,5,u,6>, <2,5,u,6>
+ 2619213754U, // <u,6,2,6>: Cost 3 vext2 <0,2,u,6>, <2,6,3,7>
+ 1638330874U, // <u,6,2,7>: Cost 2 vext3 RHS, <6,2,7,3>
+ 1638478339U, // <u,6,2,u>: Cost 2 vext3 RHS, <6,2,u,3>
+ 2619213974U, // <u,6,3,0>: Cost 3 vext2 <0,2,u,6>, <3,0,1,2>
+ 2255836074U, // <u,6,3,1>: Cost 3 vrev <6,u,1,3>
+ 2255909811U, // <u,6,3,2>: Cost 3 vrev <6,u,2,3>
+ 2619214236U, // <u,6,3,3>: Cost 3 vext2 <0,2,u,6>, <3,3,3,3>
+ 1564715549U, // <u,6,3,4>: Cost 2 vext2 <3,4,u,6>, <3,4,u,6>
+ 2639121006U, // <u,6,3,5>: Cost 3 vext2 <3,5,u,6>, <3,5,u,6>
+ 3001847012U, // <u,6,3,6>: Cost 3 vzipr LHS, <4,4,6,6>
+ 1880329526U, // <u,6,3,7>: Cost 2 vzipr LHS, RHS
+ 1880329527U, // <u,6,3,u>: Cost 2 vzipr LHS, RHS
+ 2567864422U, // <u,6,4,0>: Cost 3 vext1 <2,u,6,4>, LHS
+ 2733011558U, // <u,6,4,1>: Cost 3 vext3 LHS, <6,4,1,3>
+ 2567866484U, // <u,6,4,2>: Cost 3 vext1 <2,u,6,4>, <2,u,6,4>
+ 2638458005U, // <u,6,4,3>: Cost 3 vext2 <3,4,u,6>, <4,3,6,u>
+ 1570540772U, // <u,6,4,4>: Cost 2 vext2 <4,4,6,6>, <4,4,6,6>
+ 1545473334U, // <u,6,4,5>: Cost 2 vext2 <0,2,u,6>, RHS
+ 1572015512U, // <u,6,4,6>: Cost 2 vext2 <4,6,u,6>, <4,6,u,6>
+ 2960715062U, // <u,6,4,7>: Cost 3 vzipr <1,2,u,4>, RHS
+ 1545473577U, // <u,6,4,u>: Cost 2 vext2 <0,2,u,6>, RHS
+ 2567872614U, // <u,6,5,0>: Cost 3 vext1 <2,u,6,5>, LHS
+ 2645757648U, // <u,6,5,1>: Cost 3 vext2 <4,6,u,6>, <5,1,7,3>
+ 2567874490U, // <u,6,5,2>: Cost 3 vext1 <2,u,6,5>, <2,6,3,7>
+ 2576501250U, // <u,6,5,3>: Cost 3 vext1 <4,3,6,5>, <3,4,5,6>
+ 1576660943U, // <u,6,5,4>: Cost 2 vext2 <5,4,u,6>, <5,4,u,6>
+ 2645757956U, // <u,6,5,5>: Cost 3 vext2 <4,6,u,6>, <5,5,5,5>
+ 2645758050U, // <u,6,5,6>: Cost 3 vext2 <4,6,u,6>, <5,6,7,0>
+ 2824080694U, // <u,6,5,7>: Cost 3 vuzpr <0,u,2,6>, RHS
+ 1182626795U, // <u,6,5,u>: Cost 2 vrev <6,u,u,5>
+ 1506082918U, // <u,6,6,0>: Cost 2 vext1 <4,u,6,6>, LHS
+ 2579825398U, // <u,6,6,1>: Cost 3 vext1 <4,u,6,6>, <1,0,3,2>
+ 2645758458U, // <u,6,6,2>: Cost 3 vext2 <4,6,u,6>, <6,2,7,3>
+ 2579826838U, // <u,6,6,3>: Cost 3 vext1 <4,u,6,6>, <3,0,1,2>
+ 1506086198U, // <u,6,6,4>: Cost 2 vext1 <4,u,6,6>, RHS
+ 2579828432U, // <u,6,6,5>: Cost 3 vext1 <4,u,6,6>, <5,1,7,3>
+ 296144182U, // <u,6,6,6>: Cost 1 vdup2 RHS
+ 1638331202U, // <u,6,6,7>: Cost 2 vext3 RHS, <6,6,7,7>
+ 296144182U, // <u,6,6,u>: Cost 1 vdup2 RHS
+ 432349286U, // <u,6,7,0>: Cost 1 vext1 RHS, LHS
+ 1506091766U, // <u,6,7,1>: Cost 2 vext1 RHS, <1,0,3,2>
+ 1506092648U, // <u,6,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 1506093206U, // <u,6,7,3>: Cost 2 vext1 RHS, <3,0,1,2>
+ 432352809U, // <u,6,7,4>: Cost 1 vext1 RHS, RHS
+ 1506094800U, // <u,6,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
+ 1506095610U, // <u,6,7,6>: Cost 2 vext1 RHS, <6,2,7,3>
+ 1906904374U, // <u,6,7,7>: Cost 2 vzipr RHS, RHS
+ 432355118U, // <u,6,7,u>: Cost 1 vext1 RHS, LHS
+ 432357478U, // <u,6,u,0>: Cost 1 vext1 RHS, LHS
+ 1545475886U, // <u,6,u,1>: Cost 2 vext2 <0,2,u,6>, LHS
+ 1506100840U, // <u,6,u,2>: Cost 2 vext1 RHS, <2,2,2,2>
+ 1506101398U, // <u,6,u,3>: Cost 2 vext1 RHS, <3,0,1,2>
+ 432361002U, // <u,6,u,4>: Cost 1 vext1 RHS, RHS
+ 1545476250U, // <u,6,u,5>: Cost 2 vext2 <0,2,u,6>, RHS
+ 296144182U, // <u,6,u,6>: Cost 1 vdup2 RHS
+ 1880370486U, // <u,6,u,7>: Cost 2 vzipr LHS, RHS
+ 432363310U, // <u,6,u,u>: Cost 1 vext1 RHS, LHS
+ 1571356672U, // <u,7,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
+ 497614950U, // <u,7,0,1>: Cost 1 vext2 RHS, LHS
+ 1571356836U, // <u,7,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
+ 2573880146U, // <u,7,0,3>: Cost 3 vext1 <3,u,7,0>, <3,u,7,0>
+ 1571357010U, // <u,7,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
+ 1512083716U, // <u,7,0,5>: Cost 2 vext1 <5,u,7,0>, <5,u,7,0>
+ 2621874741U, // <u,7,0,6>: Cost 3 vext2 <0,6,u,7>, <0,6,u,7>
+ 2585826298U, // <u,7,0,7>: Cost 3 vext1 <5,u,7,0>, <7,0,1,2>
+ 497615517U, // <u,7,0,u>: Cost 1 vext2 RHS, LHS
+ 1571357430U, // <u,7,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
+ 1571357492U, // <u,7,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
+ 1571357590U, // <u,7,1,2>: Cost 2 vext2 RHS, <1,2,3,0>
+ 1552114715U, // <u,7,1,3>: Cost 2 vext2 <1,3,u,7>, <1,3,u,7>
+ 2573888822U, // <u,7,1,4>: Cost 3 vext1 <3,u,7,1>, RHS
+ 1553441981U, // <u,7,1,5>: Cost 2 vext2 <1,5,u,7>, <1,5,u,7>
+ 2627847438U, // <u,7,1,6>: Cost 3 vext2 <1,6,u,7>, <1,6,u,7>
+ 2727408775U, // <u,7,1,7>: Cost 3 vext3 <7,1,7,u>, <7,1,7,u>
+ 1555432880U, // <u,7,1,u>: Cost 2 vext2 <1,u,u,7>, <1,u,u,7>
+ 2629838337U, // <u,7,2,0>: Cost 3 vext2 <2,0,u,7>, <2,0,u,7>
+ 1188058754U, // <u,7,2,1>: Cost 2 vrev <7,u,1,2>
+ 1571358312U, // <u,7,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
+ 1571358374U, // <u,7,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
+ 2632492869U, // <u,7,2,4>: Cost 3 vext2 <2,4,u,7>, <2,4,u,7>
+ 2633156502U, // <u,7,2,5>: Cost 3 vext2 <2,5,u,7>, <2,5,u,7>
+ 1560078311U, // <u,7,2,6>: Cost 2 vext2 <2,6,u,7>, <2,6,u,7>
+ 2728072408U, // <u,7,2,7>: Cost 3 vext3 <7,2,7,u>, <7,2,7,u>
+ 1561405577U, // <u,7,2,u>: Cost 2 vext2 <2,u,u,7>, <2,u,u,7>
+ 1571358870U, // <u,7,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
+ 2627184913U, // <u,7,3,1>: Cost 3 vext2 <1,5,u,7>, <3,1,5,u>
+ 2633820523U, // <u,7,3,2>: Cost 3 vext2 <2,6,u,7>, <3,2,6,u>
+ 1571359132U, // <u,7,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
+ 1571359234U, // <u,7,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
+ 1512108295U, // <u,7,3,5>: Cost 2 vext1 <5,u,7,3>, <5,u,7,3>
+ 1518080992U, // <u,7,3,6>: Cost 2 vext1 <6,u,7,3>, <6,u,7,3>
+ 2640456465U, // <u,7,3,7>: Cost 3 vext2 <3,7,u,7>, <3,7,u,7>
+ 1571359518U, // <u,7,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
+ 1571359634U, // <u,7,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
+ 2573911067U, // <u,7,4,1>: Cost 3 vext1 <3,u,7,4>, <1,3,u,7>
+ 2645101622U, // <u,7,4,2>: Cost 3 vext2 RHS, <4,2,5,3>
+ 2573912918U, // <u,7,4,3>: Cost 3 vext1 <3,u,7,4>, <3,u,7,4>
+ 1571359952U, // <u,7,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
+ 497618248U, // <u,7,4,5>: Cost 1 vext2 RHS, RHS
+ 1571360116U, // <u,7,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
+ 2645102024U, // <u,7,4,7>: Cost 3 vext2 RHS, <4,7,5,0>
+ 497618473U, // <u,7,4,u>: Cost 1 vext2 RHS, RHS
+ 2645102152U, // <u,7,5,0>: Cost 3 vext2 RHS, <5,0,1,2>
+ 1571360464U, // <u,7,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
+ 2645102334U, // <u,7,5,2>: Cost 3 vext2 RHS, <5,2,3,4>
+ 2645102447U, // <u,7,5,3>: Cost 3 vext2 RHS, <5,3,7,0>
+ 1571360710U, // <u,7,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
+ 1571360772U, // <u,7,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
+ 1571360866U, // <u,7,5,6>: Cost 2 vext2 RHS, <5,6,7,0>
+ 1571360936U, // <u,7,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
+ 1571361017U, // <u,7,5,u>: Cost 2 vext2 RHS, <5,u,5,7>
+ 1530044518U, // <u,7,6,0>: Cost 2 vext1 <u,u,7,6>, LHS
+ 2645103016U, // <u,7,6,1>: Cost 3 vext2 RHS, <6,1,7,2>
+ 1571361274U, // <u,7,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
+ 2645103154U, // <u,7,6,3>: Cost 3 vext2 RHS, <6,3,4,5>
+ 1530047798U, // <u,7,6,4>: Cost 2 vext1 <u,u,7,6>, RHS
+ 1188386474U, // <u,7,6,5>: Cost 2 vrev <7,u,5,6>
+ 1571361592U, // <u,7,6,6>: Cost 2 vext2 RHS, <6,6,6,6>
+ 1571361614U, // <u,7,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
+ 1571361695U, // <u,7,6,u>: Cost 2 vext2 RHS, <6,u,0,1>
+ 1571361786U, // <u,7,7,0>: Cost 2 vext2 RHS, <7,0,1,2>
+ 2573935616U, // <u,7,7,1>: Cost 3 vext1 <3,u,7,7>, <1,3,5,7>
+ 2645103781U, // <u,7,7,2>: Cost 3 vext2 RHS, <7,2,2,2>
+ 2573937497U, // <u,7,7,3>: Cost 3 vext1 <3,u,7,7>, <3,u,7,7>
+ 1571362150U, // <u,7,7,4>: Cost 2 vext2 RHS, <7,4,5,6>
+ 1512141067U, // <u,7,7,5>: Cost 2 vext1 <5,u,7,7>, <5,u,7,7>
+ 1518113764U, // <u,7,7,6>: Cost 2 vext1 <6,u,7,7>, <6,u,7,7>
+ 363253046U, // <u,7,7,7>: Cost 1 vdup3 RHS
+ 363253046U, // <u,7,7,u>: Cost 1 vdup3 RHS
+ 1571362515U, // <u,7,u,0>: Cost 2 vext2 RHS, <u,0,1,2>
+ 497620782U, // <u,7,u,1>: Cost 1 vext2 RHS, LHS
+ 1571362693U, // <u,7,u,2>: Cost 2 vext2 RHS, <u,2,3,0>
+ 1571362748U, // <u,7,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
+ 1571362879U, // <u,7,u,4>: Cost 2 vext2 RHS, <u,4,5,6>
+ 497621146U, // <u,7,u,5>: Cost 1 vext2 RHS, RHS
+ 1571363024U, // <u,7,u,6>: Cost 2 vext2 RHS, <u,6,3,7>
+ 363253046U, // <u,7,u,7>: Cost 1 vdup3 RHS
+ 497621349U, // <u,7,u,u>: Cost 1 vext2 RHS, LHS
+ 135053414U, // <u,u,0,0>: Cost 1 vdup0 LHS
+ 471081121U, // <u,u,0,1>: Cost 1 vext2 LHS, LHS
+ 1544822948U, // <u,u,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
+ 1616140005U, // <u,u,0,3>: Cost 2 vext3 LHS, <u,0,3,2>
+ 1544823122U, // <u,u,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
+ 1512157453U, // <u,u,0,5>: Cost 2 vext1 <5,u,u,0>, <5,u,u,0>
+ 1662220032U, // <u,u,0,6>: Cost 2 vext3 RHS, <u,0,6,2>
+ 1194457487U, // <u,u,0,7>: Cost 2 vrev <u,u,7,0>
+ 471081629U, // <u,u,0,u>: Cost 1 vext2 LHS, LHS
+ 1544823542U, // <u,u,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
+ 202162278U, // <u,u,1,1>: Cost 1 vdup1 LHS
+ 537753390U, // <u,u,1,2>: Cost 1 vext3 LHS, LHS
+ 1544823768U, // <u,u,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
+ 1494248758U, // <u,u,1,4>: Cost 2 vext1 <2,u,u,1>, RHS
+ 1544823952U, // <u,u,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
+ 1518138343U, // <u,u,1,6>: Cost 2 vext1 <6,u,u,1>, <6,u,u,1>
+ 1640322907U, // <u,u,1,7>: Cost 2 vext3 RHS, <u,1,7,3>
+ 537753444U, // <u,u,1,u>: Cost 1 vext3 LHS, LHS
+ 1482309734U, // <u,u,2,0>: Cost 2 vext1 <0,u,u,2>, LHS
+ 1194031451U, // <u,u,2,1>: Cost 2 vrev <u,u,1,2>
+ 269271142U, // <u,u,2,2>: Cost 1 vdup2 LHS
+ 835584U, // <u,u,2,3>: Cost 0 copy LHS
+ 1482313014U, // <u,u,2,4>: Cost 2 vext1 <0,u,u,2>, RHS
+ 2618566504U, // <u,u,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
+ 1544824762U, // <u,u,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
+ 1638479788U, // <u,u,2,7>: Cost 2 vext3 RHS, <u,2,7,3>
+ 835584U, // <u,u,2,u>: Cost 0 copy LHS
+ 408576723U, // <u,u,3,0>: Cost 1 vext1 LHS, LHS
+ 1482318582U, // <u,u,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
+ 120371557U, // <u,u,3,2>: Cost 1 vrev LHS
+ 336380006U, // <u,u,3,3>: Cost 1 vdup3 LHS
+ 408579382U, // <u,u,3,4>: Cost 1 vext1 LHS, RHS
+ 1616140271U, // <u,u,3,5>: Cost 2 vext3 LHS, <u,3,5,7>
+ 1530098170U, // <u,u,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
+ 1880329544U, // <u,u,3,7>: Cost 2 vzipr LHS, RHS
+ 408581934U, // <u,u,3,u>: Cost 1 vext1 LHS, LHS
+ 1488298086U, // <u,u,4,0>: Cost 2 vext1 <1,u,u,4>, LHS
+ 1488299437U, // <u,u,4,1>: Cost 2 vext1 <1,u,u,4>, <1,u,u,4>
+ 1659271204U, // <u,u,4,2>: Cost 2 vext3 LHS, <u,4,2,6>
+ 1194195311U, // <u,u,4,3>: Cost 2 vrev <u,u,3,4>
+ 161926454U, // <u,u,4,4>: Cost 1 vdup0 RHS
+ 471084342U, // <u,u,4,5>: Cost 1 vext2 LHS, RHS
+ 1571368308U, // <u,u,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
+ 1640323153U, // <u,u,4,7>: Cost 2 vext3 RHS, <u,4,7,6>
+ 471084585U, // <u,u,4,u>: Cost 1 vext2 LHS, RHS
+ 1494278246U, // <u,u,5,0>: Cost 2 vext1 <2,u,u,5>, LHS
+ 1571368656U, // <u,u,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
+ 1494280327U, // <u,u,5,2>: Cost 2 vext1 <2,u,u,5>, <2,u,u,5>
+ 1616140415U, // <u,u,5,3>: Cost 2 vext3 LHS, <u,5,3,7>
+ 1494281526U, // <u,u,5,4>: Cost 2 vext1 <2,u,u,5>, RHS
+ 229035318U, // <u,u,5,5>: Cost 1 vdup1 RHS
+ 537753754U, // <u,u,5,6>: Cost 1 vext3 LHS, RHS
+ 1750355254U, // <u,u,5,7>: Cost 2 vuzpr LHS, RHS
+ 537753772U, // <u,u,5,u>: Cost 1 vext3 LHS, RHS
+ 1482342502U, // <u,u,6,0>: Cost 2 vext1 <0,u,u,6>, LHS
+ 2556084982U, // <u,u,6,1>: Cost 3 vext1 <0,u,u,6>, <1,0,3,2>
+ 1571369466U, // <u,u,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
+ 1611938000U, // <u,u,6,3>: Cost 2 vext3 LHS, <u,6,3,7>
+ 1482345782U, // <u,u,6,4>: Cost 2 vext1 <0,u,u,6>, RHS
+ 1194359171U, // <u,u,6,5>: Cost 2 vrev <u,u,5,6>
+ 296144182U, // <u,u,6,6>: Cost 1 vdup2 RHS
+ 27705344U, // <u,u,6,7>: Cost 0 copy RHS
+ 27705344U, // <u,u,6,u>: Cost 0 copy RHS
+ 432496742U, // <u,u,7,0>: Cost 1 vext1 RHS, LHS
+ 1488324016U, // <u,u,7,1>: Cost 2 vext1 <1,u,u,7>, <1,u,u,7>
+ 1494296713U, // <u,u,7,2>: Cost 2 vext1 <2,u,u,7>, <2,u,u,7>
+ 1906901148U, // <u,u,7,3>: Cost 2 vzipr RHS, LHS
+ 432500283U, // <u,u,7,4>: Cost 1 vext1 RHS, RHS
+ 1506242256U, // <u,u,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
+ 120699277U, // <u,u,7,6>: Cost 1 vrev RHS
+ 363253046U, // <u,u,7,7>: Cost 1 vdup3 RHS
+ 432502574U, // <u,u,7,u>: Cost 1 vext1 RHS, LHS
+ 408617688U, // <u,u,u,0>: Cost 1 vext1 LHS, LHS
+ 471086894U, // <u,u,u,1>: Cost 1 vext2 LHS, LHS
+ 537753957U, // <u,u,u,2>: Cost 1 vext3 LHS, LHS
+ 835584U, // <u,u,u,3>: Cost 0 copy LHS
+ 408620342U, // <u,u,u,4>: Cost 1 vext1 LHS, RHS
+ 471087258U, // <u,u,u,5>: Cost 1 vext2 LHS, RHS
+ 537753997U, // <u,u,u,6>: Cost 1 vext3 LHS, RHS
+ 27705344U, // <u,u,u,7>: Cost 0 copy RHS
+ 835584U, // <u,u,u,u>: Cost 0 copy LHS
0
};
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
index d5bc3f6..ad51bc1 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
@@ -28,7 +28,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/BitVector.h"
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
index 305b232..22d15b5 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
@@ -201,6 +201,10 @@ def CPSR : ARMReg<0, "cpsr">;
def FPSCR : ARMReg<1, "fpscr">;
def ITSTATE : ARMReg<2, "itstate">;
+// Special Registers - only available in privileged mode.
+def FPSID : ARMReg<0, "fpsid">;
+def FPEXC : ARMReg<8, "fpexc">;
+
// Register classes.
//
// pc == Program Counter
@@ -256,7 +260,7 @@ def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
// restricted GPR register class. Many Thumb2 instructions allow the full
// register range for operands, but have undefined behaviours when PC
-// or SP (R13 or R15) are used. The ARM ARM refers to these operands
+// or SP (R13 or R15) are used. The ARM ISA refers to these operands
// via the BadReg() pseudo-code description.
def rGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
R7, R8, R9, R10, R11, R12, LR]> {
@@ -381,27 +385,29 @@ def DPR : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
iterator allocation_order_end(const MachineFunction &MF) const;
}];
let MethodBodies = [{
- // VFP2
+ // VFP2 / VFPv3-D16
static const unsigned ARM_DPR_VFP2[] = {
ARM::D0, ARM::D1, ARM::D2, ARM::D3,
ARM::D4, ARM::D5, ARM::D6, ARM::D7,
ARM::D8, ARM::D9, ARM::D10, ARM::D11,
ARM::D12, ARM::D13, ARM::D14, ARM::D15 };
- // VFP3
+ // VFP3: D8-D15 are callee saved and should be allocated last.
+ // Save other low registers for use as DPR_VFP2 and DPR_8 classes.
static const unsigned ARM_DPR_VFP3[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3,
- ARM::D4, ARM::D5, ARM::D6, ARM::D7,
- ARM::D8, ARM::D9, ARM::D10, ARM::D11,
- ARM::D12, ARM::D13, ARM::D14, ARM::D15,
ARM::D16, ARM::D17, ARM::D18, ARM::D19,
ARM::D20, ARM::D21, ARM::D22, ARM::D23,
ARM::D24, ARM::D25, ARM::D26, ARM::D27,
- ARM::D28, ARM::D29, ARM::D30, ARM::D31 };
+ ARM::D28, ARM::D29, ARM::D30, ARM::D31,
+ ARM::D0, ARM::D1, ARM::D2, ARM::D3,
+ ARM::D4, ARM::D5, ARM::D6, ARM::D7,
+ ARM::D8, ARM::D9, ARM::D10, ARM::D11,
+ ARM::D12, ARM::D13, ARM::D14, ARM::D15 };
+
DPRClass::iterator
DPRClass::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- if (Subtarget.hasVFP3())
+ if (Subtarget.hasVFP3() && !Subtarget.hasD16())
return ARM_DPR_VFP3;
return ARM_DPR_VFP2;
}
@@ -410,7 +416,7 @@ def DPR : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
DPRClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- if (Subtarget.hasVFP3())
+ if (Subtarget.hasVFP3() && !Subtarget.hasD16())
return ARM_DPR_VFP3 + (sizeof(ARM_DPR_VFP3)/sizeof(unsigned));
else
return ARM_DPR_VFP2 + (sizeof(ARM_DPR_VFP2)/sizeof(unsigned));
@@ -438,6 +444,29 @@ def QPR : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 128,
[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7,
Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15]> {
let SubRegClasses = [(DPR dsub_0, dsub_1)];
+ let MethodProtos = [{
+ iterator allocation_order_begin(const MachineFunction &MF) const;
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ // Q4-Q7 are callee saved and should be allocated last.
+ // Save other low registers for use as QPR_VFP2 and QPR_8 classes.
+ static const unsigned ARM_QPR[] = {
+ ARM::Q8, ARM::Q9, ARM::Q10, ARM::Q11,
+ ARM::Q12, ARM::Q13, ARM::Q14, ARM::Q15,
+ ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
+ ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7 };
+
+ QPRClass::iterator
+ QPRClass::allocation_order_begin(const MachineFunction &MF) const {
+ return ARM_QPR;
+ }
+
+ QPRClass::iterator
+ QPRClass::allocation_order_end(const MachineFunction &MF) const {
+ return ARM_QPR + (sizeof(ARM_QPR)/sizeof(unsigned));
+ }
+ }];
}
// Subset of QPR that have 32-bit SPR subregs.
@@ -463,6 +492,27 @@ def QQPR : RegisterClass<"ARM", [v4i64],
[QQ0, QQ1, QQ2, QQ3, QQ4, QQ5, QQ6, QQ7]> {
let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3),
(QPR qsub_0, qsub_1)];
+ let MethodProtos = [{
+ iterator allocation_order_begin(const MachineFunction &MF) const;
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ // QQ2-QQ3 are callee saved and should be allocated last.
+ // Save other low registers for use as QPR_VFP2 and QPR_8 classes.
+ static const unsigned ARM_QQPR[] = {
+ ARM::QQ4, ARM::QQ5, ARM::QQ6, ARM::QQ7,
+ ARM::QQ0, ARM::QQ1, ARM::QQ2, ARM::QQ3 };
+
+ QQPRClass::iterator
+ QQPRClass::allocation_order_begin(const MachineFunction &MF) const {
+ return ARM_QQPR;
+ }
+
+ QQPRClass::iterator
+ QQPRClass::allocation_order_end(const MachineFunction &MF) const {
+ return ARM_QQPR + (sizeof(ARM_QQPR)/sizeof(unsigned));
+ }
+ }];
}
// Subset of QQPR that have 32-bit SPR subregs.
@@ -483,6 +533,26 @@ def QQQQPR : RegisterClass<"ARM", [v8i64],
let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3,
dsub_4, dsub_5, dsub_6, dsub_7),
(QPR qsub_0, qsub_1, qsub_2, qsub_3)];
+ let MethodProtos = [{
+ iterator allocation_order_begin(const MachineFunction &MF) const;
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ // QQQQ1 is callee saved and should be allocated last.
+ // Save QQQQ0 for use as QPR_VFP2 and QPR_8 classes.
+ static const unsigned ARM_QQQQPR[] = {
+ ARM::QQQQ2, ARM::QQQQ3, ARM::QQQQ0, ARM::QQQQ1 };
+
+ QQQQPRClass::iterator
+ QQQQPRClass::allocation_order_begin(const MachineFunction &MF) const {
+ return ARM_QQQQPR;
+ }
+
+ QQQQPRClass::iterator
+ QQQQPRClass::allocation_order_end(const MachineFunction &MF) const {
+ return ARM_QQQQPR + (sizeof(ARM_QQQQPR)/sizeof(unsigned));
+ }
+ }];
}
// Condition code registers.
diff --git a/contrib/llvm/lib/Target/ARM/ARMSchedule.td b/contrib/llvm/lib/Target/ARM/ARMSchedule.td
index b60ccca..958c5c6 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSchedule.td
+++ b/contrib/llvm/lib/Target/ARM/ARMSchedule.td
@@ -14,42 +14,86 @@ def IIC_iALUx : InstrItinClass;
def IIC_iALUi : InstrItinClass;
def IIC_iALUr : InstrItinClass;
def IIC_iALUsi : InstrItinClass;
+def IIC_iALUsir : InstrItinClass;
def IIC_iALUsr : InstrItinClass;
+def IIC_iBITi : InstrItinClass;
+def IIC_iBITr : InstrItinClass;
+def IIC_iBITsi : InstrItinClass;
+def IIC_iBITsr : InstrItinClass;
def IIC_iUNAr : InstrItinClass;
def IIC_iUNAsi : InstrItinClass;
-def IIC_iUNAsr : InstrItinClass;
+def IIC_iEXTr : InstrItinClass;
+def IIC_iEXTAr : InstrItinClass;
+def IIC_iEXTAsr : InstrItinClass;
def IIC_iCMPi : InstrItinClass;
def IIC_iCMPr : InstrItinClass;
def IIC_iCMPsi : InstrItinClass;
def IIC_iCMPsr : InstrItinClass;
+def IIC_iTSTi : InstrItinClass;
+def IIC_iTSTr : InstrItinClass;
+def IIC_iTSTsi : InstrItinClass;
+def IIC_iTSTsr : InstrItinClass;
def IIC_iMOVi : InstrItinClass;
def IIC_iMOVr : InstrItinClass;
def IIC_iMOVsi : InstrItinClass;
def IIC_iMOVsr : InstrItinClass;
+def IIC_iMOVix2 : InstrItinClass;
+def IIC_iMOVix2addpc : InstrItinClass;
+def IIC_iMOVix2ld : InstrItinClass;
+def IIC_iMVNi : InstrItinClass;
+def IIC_iMVNr : InstrItinClass;
+def IIC_iMVNsi : InstrItinClass;
+def IIC_iMVNsr : InstrItinClass;
def IIC_iCMOVi : InstrItinClass;
def IIC_iCMOVr : InstrItinClass;
def IIC_iCMOVsi : InstrItinClass;
def IIC_iCMOVsr : InstrItinClass;
+def IIC_iCMOVix2 : InstrItinClass;
def IIC_iMUL16 : InstrItinClass;
def IIC_iMAC16 : InstrItinClass;
def IIC_iMUL32 : InstrItinClass;
def IIC_iMAC32 : InstrItinClass;
def IIC_iMUL64 : InstrItinClass;
def IIC_iMAC64 : InstrItinClass;
-def IIC_iLoadi : InstrItinClass;
-def IIC_iLoadr : InstrItinClass;
-def IIC_iLoadsi : InstrItinClass;
-def IIC_iLoadiu : InstrItinClass;
-def IIC_iLoadru : InstrItinClass;
-def IIC_iLoadsiu : InstrItinClass;
-def IIC_iLoadm : InstrItinClass;
-def IIC_iStorei : InstrItinClass;
-def IIC_iStorer : InstrItinClass;
-def IIC_iStoresi : InstrItinClass;
-def IIC_iStoreiu : InstrItinClass;
-def IIC_iStoreru : InstrItinClass;
-def IIC_iStoresiu : InstrItinClass;
-def IIC_iStorem : InstrItinClass;
+def IIC_iLoad_i : InstrItinClass;
+def IIC_iLoad_r : InstrItinClass;
+def IIC_iLoad_si : InstrItinClass;
+def IIC_iLoad_iu : InstrItinClass;
+def IIC_iLoad_ru : InstrItinClass;
+def IIC_iLoad_siu : InstrItinClass;
+def IIC_iLoad_bh_i : InstrItinClass;
+def IIC_iLoad_bh_r : InstrItinClass;
+def IIC_iLoad_bh_si : InstrItinClass;
+def IIC_iLoad_bh_iu : InstrItinClass;
+def IIC_iLoad_bh_ru : InstrItinClass;
+def IIC_iLoad_bh_siu : InstrItinClass;
+def IIC_iLoad_d_i : InstrItinClass;
+def IIC_iLoad_d_r : InstrItinClass;
+def IIC_iLoad_d_ru : InstrItinClass;
+def IIC_iLoad_m : InstrItinClass<0>; // micro-coded
+def IIC_iLoad_mu : InstrItinClass<0>; // micro-coded
+def IIC_iLoad_mBr : InstrItinClass<0>; // micro-coded
+def IIC_iPop : InstrItinClass<0>; // micro-coded
+def IIC_iPop_Br : InstrItinClass<0>; // micro-coded
+def IIC_iLoadiALU : InstrItinClass;
+def IIC_iStore_i : InstrItinClass;
+def IIC_iStore_r : InstrItinClass;
+def IIC_iStore_si : InstrItinClass;
+def IIC_iStore_iu : InstrItinClass;
+def IIC_iStore_ru : InstrItinClass;
+def IIC_iStore_siu : InstrItinClass;
+def IIC_iStore_bh_i : InstrItinClass;
+def IIC_iStore_bh_r : InstrItinClass;
+def IIC_iStore_bh_si : InstrItinClass;
+def IIC_iStore_bh_iu : InstrItinClass;
+def IIC_iStore_bh_ru : InstrItinClass;
+def IIC_iStore_bh_siu : InstrItinClass;
+def IIC_iStore_d_i : InstrItinClass;
+def IIC_iStore_d_r : InstrItinClass;
+def IIC_iStore_d_ru : InstrItinClass;
+def IIC_iStore_m : InstrItinClass<0>; // micro-coded
+def IIC_iStore_mu : InstrItinClass<0>; // micro-coded
+def IIC_Preload : InstrItinClass;
def IIC_Br : InstrItinClass;
def IIC_fpSTAT : InstrItinClass;
def IIC_fpUNA32 : InstrItinClass;
@@ -80,19 +124,76 @@ def IIC_fpSQRT32 : InstrItinClass;
def IIC_fpSQRT64 : InstrItinClass;
def IIC_fpLoad32 : InstrItinClass;
def IIC_fpLoad64 : InstrItinClass;
-def IIC_fpLoadm : InstrItinClass;
+def IIC_fpLoad_m : InstrItinClass<0>; // micro-coded
+def IIC_fpLoad_mu : InstrItinClass<0>; // micro-coded
def IIC_fpStore32 : InstrItinClass;
def IIC_fpStore64 : InstrItinClass;
-def IIC_fpStorem : InstrItinClass;
+def IIC_fpStore_m : InstrItinClass<0>; // micro-coded
+def IIC_fpStore_mu : InstrItinClass<0>; // micro-coded
def IIC_VLD1 : InstrItinClass;
+def IIC_VLD1x2 : InstrItinClass;
+def IIC_VLD1x3 : InstrItinClass;
+def IIC_VLD1x4 : InstrItinClass;
+def IIC_VLD1u : InstrItinClass;
+def IIC_VLD1x2u : InstrItinClass;
+def IIC_VLD1x3u : InstrItinClass;
+def IIC_VLD1x4u : InstrItinClass;
+def IIC_VLD1ln : InstrItinClass;
+def IIC_VLD1lnu : InstrItinClass;
+def IIC_VLD1dup : InstrItinClass;
+def IIC_VLD1dupu : InstrItinClass;
def IIC_VLD2 : InstrItinClass;
+def IIC_VLD2x2 : InstrItinClass;
+def IIC_VLD2u : InstrItinClass;
+def IIC_VLD2x2u : InstrItinClass;
+def IIC_VLD2ln : InstrItinClass;
+def IIC_VLD2lnu : InstrItinClass;
+def IIC_VLD2dup : InstrItinClass;
+def IIC_VLD2dupu : InstrItinClass;
def IIC_VLD3 : InstrItinClass;
+def IIC_VLD3ln : InstrItinClass;
+def IIC_VLD3u : InstrItinClass;
+def IIC_VLD3lnu : InstrItinClass;
+def IIC_VLD3dup : InstrItinClass;
+def IIC_VLD3dupu : InstrItinClass;
def IIC_VLD4 : InstrItinClass;
-def IIC_VST : InstrItinClass;
+def IIC_VLD4ln : InstrItinClass;
+def IIC_VLD4u : InstrItinClass;
+def IIC_VLD4lnu : InstrItinClass;
+def IIC_VLD4dup : InstrItinClass;
+def IIC_VLD4dupu : InstrItinClass;
+def IIC_VST1 : InstrItinClass;
+def IIC_VST1x2 : InstrItinClass;
+def IIC_VST1x3 : InstrItinClass;
+def IIC_VST1x4 : InstrItinClass;
+def IIC_VST1u : InstrItinClass;
+def IIC_VST1x2u : InstrItinClass;
+def IIC_VST1x3u : InstrItinClass;
+def IIC_VST1x4u : InstrItinClass;
+def IIC_VST1ln : InstrItinClass;
+def IIC_VST1lnu : InstrItinClass;
+def IIC_VST2 : InstrItinClass;
+def IIC_VST2x2 : InstrItinClass;
+def IIC_VST2u : InstrItinClass;
+def IIC_VST2x2u : InstrItinClass;
+def IIC_VST2ln : InstrItinClass;
+def IIC_VST2lnu : InstrItinClass;
+def IIC_VST3 : InstrItinClass;
+def IIC_VST3u : InstrItinClass;
+def IIC_VST3ln : InstrItinClass;
+def IIC_VST3lnu : InstrItinClass;
+def IIC_VST4 : InstrItinClass;
+def IIC_VST4u : InstrItinClass;
+def IIC_VST4ln : InstrItinClass;
+def IIC_VST4lnu : InstrItinClass;
def IIC_VUNAD : InstrItinClass;
def IIC_VUNAQ : InstrItinClass;
def IIC_VBIND : InstrItinClass;
def IIC_VBINQ : InstrItinClass;
+def IIC_VPBIND : InstrItinClass;
+def IIC_VFMULD : InstrItinClass;
+def IIC_VFMULQ : InstrItinClass;
+def IIC_VMOV : InstrItinClass;
def IIC_VMOVImm : InstrItinClass;
def IIC_VMOVD : InstrItinClass;
def IIC_VMOVQ : InstrItinClass;
@@ -101,6 +202,7 @@ def IIC_VMOVID : InstrItinClass;
def IIC_VMOVISL : InstrItinClass;
def IIC_VMOVSI : InstrItinClass;
def IIC_VMOVDI : InstrItinClass;
+def IIC_VMOVN : InstrItinClass;
def IIC_VPERMD : InstrItinClass;
def IIC_VPERMQ : InstrItinClass;
def IIC_VPERMQ3 : InstrItinClass;
@@ -152,7 +254,7 @@ def IIC_VTBX4 : InstrItinClass;
//===----------------------------------------------------------------------===//
// Processor instruction itineraries.
-def GenericItineraries : ProcessorItineraries<[], []>;
+def GenericItineraries : ProcessorItineraries<[], [], []>;
include "ARMScheduleV6.td"
include "ARMScheduleA8.td"
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
index 282abca..8d86c01 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
@@ -14,18 +14,17 @@
//
// Scheduling information derived from "Cortex-A8 Technical Reference Manual".
// Functional Units.
-def A8_Issue : FuncUnit; // issue
def A8_Pipe0 : FuncUnit; // pipeline 0
def A8_Pipe1 : FuncUnit; // pipeline 1
-def A8_LdSt0 : FuncUnit; // pipeline 0 load/store
-def A8_LdSt1 : FuncUnit; // pipeline 1 load/store
+def A8_LSPipe : FuncUnit; // Load / store pipeline
def A8_NPipe : FuncUnit; // NEON ALU/MUL pipe
def A8_NLSPipe : FuncUnit; // NEON LS pipe
//
// Dual issue pipeline represented by A8_Pipe0 | A8_Pipe1
//
def CortexA8Itineraries : ProcessorItineraries<
- [A8_Issue, A8_Pipe0, A8_Pipe1, A8_LdSt0, A8_LdSt1, A8_NPipe, A8_NLSPipe], [
+ [A8_Pipe0, A8_Pipe1, A8_LSPipe, A8_NPipe, A8_NLSPipe],
+ [], [
// Two fully-pipelined integer ALU pipelines
//
// No operand cycles
@@ -35,12 +34,23 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrItinData<IIC_iALUi ,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
InstrItinData<IIC_iALUr ,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 2]>,
InstrItinData<IIC_iALUsi,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1]>,
+ InstrItinData<IIC_iALUsir,[InstrStage<1,[A8_Pipe0, A8_Pipe1]>], [2, 1, 2]>,
InstrItinData<IIC_iALUsr,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1, 1]>,
//
+ // Bitwise Instructions that produce a result
+ InstrItinData<IIC_iBITi ,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
+ InstrItinData<IIC_iBITr ,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 2]>,
+ InstrItinData<IIC_iBITsi,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1]>,
+ InstrItinData<IIC_iBITsr,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1, 1]>,
+ //
// Unary Instructions that produce a result
InstrItinData<IIC_iUNAr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
InstrItinData<IIC_iUNAsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iUNAsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
+ //
+ // Zero and sign extension instructions
+ InstrItinData<IIC_iEXTr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
+ InstrItinData<IIC_iEXTAr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1]>,
+ InstrItinData<IIC_iEXTAsr,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>],[2, 2, 1, 1]>,
//
// Compare instructions
InstrItinData<IIC_iCMPi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
@@ -48,124 +58,184 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrItinData<IIC_iCMPsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
InstrItinData<IIC_iCMPsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
//
+ // Test instructions
+ InstrItinData<IIC_iTSTi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
+ InstrItinData<IIC_iTSTr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
+ InstrItinData<IIC_iTSTsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
+ InstrItinData<IIC_iTSTsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
+ //
// Move instructions, unconditional
InstrItinData<IIC_iMOVi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1]>,
InstrItinData<IIC_iMOVr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
InstrItinData<IIC_iMOVsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
InstrItinData<IIC_iMOVsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1, 1]>,
+ InstrItinData<IIC_iMOVix2,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
+ InstrItinData<IIC_iMOVix2addpc,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [3]>,
+ InstrItinData<IIC_iMOVix2ld,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<1, [A8_LSPipe]>], [5]>,
//
// Move instructions, conditional
InstrItinData<IIC_iCMOVi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
InstrItinData<IIC_iCMOVr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
InstrItinData<IIC_iCMOVsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
InstrItinData<IIC_iCMOVsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
+ InstrItinData<IIC_iCMOVix2,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [3, 1]>,
+ //
+ // MVN instructions
+ InstrItinData<IIC_iMVNi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1]>,
+ InstrItinData<IIC_iMVNr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
+ InstrItinData<IIC_iMVNsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
+ InstrItinData<IIC_iMVNsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1, 1]>,
// Integer multiply pipeline
// Result written in E5, but that is relative to the last cycle of multicycle,
// so we use 6 for those cases
//
InstrItinData<IIC_iMUL16 , [InstrStage<1, [A8_Pipe0]>], [5, 1, 1]>,
- InstrItinData<IIC_iMAC16 , [InstrStage<1, [A8_Pipe1], 0>,
- InstrStage<2, [A8_Pipe0]>], [6, 1, 1, 4]>,
- InstrItinData<IIC_iMUL32 , [InstrStage<1, [A8_Pipe1], 0>,
- InstrStage<2, [A8_Pipe0]>], [6, 1, 1]>,
- InstrItinData<IIC_iMAC32 , [InstrStage<1, [A8_Pipe1], 0>,
- InstrStage<2, [A8_Pipe0]>], [6, 1, 1, 4]>,
- InstrItinData<IIC_iMUL64 , [InstrStage<2, [A8_Pipe1], 0>,
- InstrStage<3, [A8_Pipe0]>], [6, 6, 1, 1]>,
- InstrItinData<IIC_iMAC64 , [InstrStage<2, [A8_Pipe1], 0>,
- InstrStage<3, [A8_Pipe0]>], [6, 6, 1, 1]>,
+ InstrItinData<IIC_iMAC16 , [InstrStage<2, [A8_Pipe0]>], [6, 1, 1, 4]>,
+ InstrItinData<IIC_iMUL32 , [InstrStage<2, [A8_Pipe0]>], [6, 1, 1]>,
+ InstrItinData<IIC_iMAC32 , [InstrStage<2, [A8_Pipe0]>], [6, 1, 1, 4]>,
+ InstrItinData<IIC_iMUL64 , [InstrStage<3, [A8_Pipe0]>], [6, 6, 1, 1]>,
+ InstrItinData<IIC_iMAC64 , [InstrStage<3, [A8_Pipe0]>], [6, 6, 1, 1]>,
// Integer load pipeline
//
- // loads have an extra cycle of latency, but are fully pipelined
- // use A8_Issue to enforce the 1 load/store per cycle limit
- //
// Immediate offset
- InstrItinData<IIC_iLoadi , [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [3, 1]>,
+ InstrItinData<IIC_iLoad_i , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1]>,
+ InstrItinData<IIC_iLoad_bh_i, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1]>,
+ InstrItinData<IIC_iLoad_d_i, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1]>,
//
// Register offset
- InstrItinData<IIC_iLoadr , [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [3, 1, 1]>,
+ InstrItinData<IIC_iLoad_r , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_r, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1, 1]>,
+ InstrItinData<IIC_iLoad_d_r , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1, 1]>,
//
// Scaled register offset, issues over 2 cycles
- InstrItinData<IIC_iLoadsi , [InstrStage<2, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0], 0>,
- InstrStage<1, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [4, 1, 1]>,
+ // FIXME: lsl by 2 takes 1 cycle.
+ InstrItinData<IIC_iLoad_si , [InstrStage<2, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [4, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_si,[InstrStage<2, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [4, 1, 1]>,
//
// Immediate offset with update
- InstrItinData<IIC_iLoadiu , [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [3, 2, 1]>,
+ InstrItinData<IIC_iLoad_iu , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 2, 1]>,
+ InstrItinData<IIC_iLoad_bh_iu,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 2, 1]>,
//
// Register offset with update
- InstrItinData<IIC_iLoadru , [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [3, 2, 1, 1]>,
+ InstrItinData<IIC_iLoad_ru , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 2, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_ru,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 2, 1, 1]>,
+ InstrItinData<IIC_iLoad_d_ru, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 2, 1, 1]>,
//
// Scaled register offset with update, issues over 2 cycles
- InstrItinData<IIC_iLoadsiu , [InstrStage<2, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0], 0>,
- InstrStage<1, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [4, 3, 1, 1]>,
- //
- // Load multiple
- InstrItinData<IIC_iLoadm , [InstrStage<2, [A8_Issue], 0>,
- InstrStage<2, [A8_Pipe0], 0>,
- InstrStage<2, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>]>,
+ InstrItinData<IIC_iLoad_siu , [InstrStage<2, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_LSPipe]>], [4, 3, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_siu,[InstrStage<2, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_LSPipe]>], [4, 3, 1, 1]>,
+ //
+ // Load multiple, def is the 5th operand. Pipeline 0 only.
+ // FIXME: A8_LSPipe cycle time is dynamic, this assumes 3 to 4 registers.
+ InstrItinData<IIC_iLoad_m , [InstrStage<2, [A8_Pipe0], 0>,
+ InstrStage<2, [A8_LSPipe]>], [1, 1, 1, 1, 3]>,
+ //
+ // Load multiple + update, defs are the 1st and 5th operands.
+ InstrItinData<IIC_iLoad_mu , [InstrStage<3, [A8_Pipe0], 0>,
+ InstrStage<3, [A8_LSPipe]>], [2, 1, 1, 1, 3]>,
+ //
+ // Load multiple plus branch
+ InstrItinData<IIC_iLoad_mBr, [InstrStage<3, [A8_Pipe0], 0>,
+ InstrStage<3, [A8_LSPipe]>,
+ InstrStage<1, [A8_Pipe0, A8_Pipe1]>],
+ [1, 2, 1, 1, 3]>,
+ //
+ // Pop, def is the 3rd operand.
+ InstrItinData<IIC_iPop , [InstrStage<3, [A8_Pipe0], 0>,
+ InstrStage<3, [A8_LSPipe]>], [1, 1, 3]>,
+ //
+ // Push, def is the 3th operand.
+ InstrItinData<IIC_iPop_Br, [InstrStage<3, [A8_Pipe0], 0>,
+ InstrStage<3, [A8_LSPipe]>,
+ InstrStage<1, [A8_Pipe0, A8_Pipe1]>],
+ [1, 1, 3]>,
- // Integer store pipeline
//
- // use A8_Issue to enforce the 1 load/store per cycle limit
+ // iLoadi + iALUr for t2LDRpci_pic.
+ InstrItinData<IIC_iLoadiALU, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>,
+ InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [4, 1]>,
+
+
+ // Integer store pipeline
//
// Immediate offset
- InstrItinData<IIC_iStorei , [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [3, 1]>,
+ InstrItinData<IIC_iStore_i , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1]>,
+ InstrItinData<IIC_iStore_bh_i,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1]>,
+ InstrItinData<IIC_iStore_d_i, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1]>,
//
// Register offset
- InstrItinData<IIC_iStorer , [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [3, 1, 1]>,
+ InstrItinData<IIC_iStore_r , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_r,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1, 1]>,
+ InstrItinData<IIC_iStore_d_r, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [3, 1, 1]>,
//
// Scaled register offset, issues over 2 cycles
- InstrItinData<IIC_iStoresi , [InstrStage<2, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0], 0>,
- InstrStage<1, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [3, 1, 1]>,
+ InstrItinData<IIC_iStore_si , [InstrStage<2, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_LSPipe]>], [3, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_si,[InstrStage<2, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_LSPipe]>], [3, 1, 1]>,
//
// Immediate offset with update
- InstrItinData<IIC_iStoreiu , [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [2, 3, 1]>,
+ InstrItinData<IIC_iStore_iu , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [2, 3, 1]>,
+ InstrItinData<IIC_iStore_bh_iu,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [2, 3, 1]>,
//
// Register offset with update
- InstrItinData<IIC_iStoreru , [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [2, 3, 1, 1]>,
+ InstrItinData<IIC_iStore_ru , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [2, 3, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_ru,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [2, 3, 1, 1]>,
+ InstrItinData<IIC_iStore_d_ru, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_LSPipe]>], [2, 3, 1, 1]>,
//
// Scaled register offset with update, issues over 2 cycles
- InstrItinData<IIC_iStoresiu, [InstrStage<2, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0], 0>,
- InstrStage<1, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>], [3, 3, 1, 1]>,
- //
- // Store multiple
- InstrItinData<IIC_iStorem , [InstrStage<2, [A8_Issue], 0>,
- InstrStage<2, [A8_Pipe0], 0>,
- InstrStage<2, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0]>]>,
+ InstrItinData<IIC_iStore_siu, [InstrStage<2, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_LSPipe]>], [3, 3, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_siu,[InstrStage<2, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_LSPipe]>], [3, 3, 1, 1]>,
+ //
+ // Store multiple. Pipeline 0 only.
+ // FIXME: A8_LSPipe cycle time is dynamic, this assumes 3 to 4 registers.
+ InstrItinData<IIC_iStore_m , [InstrStage<2, [A8_Pipe0], 0>,
+ InstrStage<2, [A8_LSPipe]>]>,
+ //
+ // Store multiple + update
+ InstrItinData<IIC_iStore_mu, [InstrStage<2, [A8_Pipe0], 0>,
+ InstrStage<2, [A8_LSPipe]>], [2]>,
+
+ //
+ // Preload
+ InstrItinData<IIC_Preload, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
// Branch
//
@@ -178,440 +248,786 @@ def CortexA8Itineraries : ProcessorItineraries<
// possible.
//
// FP Special Register to Integer Register File Move
- InstrItinData<IIC_fpSTAT , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_NLSPipe]>]>,
+ InstrItinData<IIC_fpSTAT , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NLSPipe]>], [20]>,
//
// Single-precision FP Unary
- InstrItinData<IIC_fpUNA32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpUNA32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [7, 1]>,
//
// Double-precision FP Unary
- InstrItinData<IIC_fpUNA64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpUNA64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<4, [A8_NPipe], 0>,
InstrStage<4, [A8_NLSPipe]>], [4, 1]>,
//
// Single-precision FP Compare
- InstrItinData<IIC_fpCMP32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpCMP32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [1, 1]>,
//
// Double-precision FP Compare
- InstrItinData<IIC_fpCMP64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpCMP64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<4, [A8_NPipe], 0>,
InstrStage<4, [A8_NLSPipe]>], [4, 1]>,
//
// Single to Double FP Convert
- InstrItinData<IIC_fpCVTSD , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpCVTSD , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<7, [A8_NPipe], 0>,
InstrStage<7, [A8_NLSPipe]>], [7, 1]>,
//
// Double to Single FP Convert
- InstrItinData<IIC_fpCVTDS , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpCVTDS , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<5, [A8_NPipe], 0>,
InstrStage<5, [A8_NLSPipe]>], [5, 1]>,
//
// Single-Precision FP to Integer Convert
- InstrItinData<IIC_fpCVTSI , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpCVTSI , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [7, 1]>,
//
// Double-Precision FP to Integer Convert
- InstrItinData<IIC_fpCVTDI , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpCVTDI , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<8, [A8_NPipe], 0>,
InstrStage<8, [A8_NLSPipe]>], [8, 1]>,
//
// Integer to Single-Precision FP Convert
- InstrItinData<IIC_fpCVTIS , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpCVTIS , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [7, 1]>,
//
// Integer to Double-Precision FP Convert
- InstrItinData<IIC_fpCVTID , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpCVTID , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<8, [A8_NPipe], 0>,
InstrStage<8, [A8_NLSPipe]>], [8, 1]>,
//
// Single-precision FP ALU
- InstrItinData<IIC_fpALU32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpALU32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [7, 1, 1]>,
//
// Double-precision FP ALU
- InstrItinData<IIC_fpALU64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpALU64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<9, [A8_NPipe], 0>,
InstrStage<9, [A8_NLSPipe]>], [9, 1, 1]>,
//
// Single-precision FP Multiply
- InstrItinData<IIC_fpMUL32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpMUL32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [7, 1, 1]>,
//
// Double-precision FP Multiply
- InstrItinData<IIC_fpMUL64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpMUL64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<11, [A8_NPipe], 0>,
InstrStage<11, [A8_NLSPipe]>], [11, 1, 1]>,
//
// Single-precision FP MAC
- InstrItinData<IIC_fpMAC32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpMAC32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [7, 2, 1, 1]>,
//
// Double-precision FP MAC
- InstrItinData<IIC_fpMAC64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpMAC64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<19, [A8_NPipe], 0>,
InstrStage<19, [A8_NLSPipe]>], [19, 2, 1, 1]>,
//
// Single-precision FP DIV
- InstrItinData<IIC_fpDIV32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpDIV32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<20, [A8_NPipe], 0>,
InstrStage<20, [A8_NLSPipe]>], [20, 1, 1]>,
//
// Double-precision FP DIV
- InstrItinData<IIC_fpDIV64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpDIV64 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<29, [A8_NPipe], 0>,
InstrStage<29, [A8_NLSPipe]>], [29, 1, 1]>,
//
// Single-precision FP SQRT
- InstrItinData<IIC_fpSQRT32, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpSQRT32, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<19, [A8_NPipe], 0>,
InstrStage<19, [A8_NLSPipe]>], [19, 1]>,
//
// Double-precision FP SQRT
- InstrItinData<IIC_fpSQRT64, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_fpSQRT64, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<29, [A8_NPipe], 0>,
InstrStage<29, [A8_NLSPipe]>], [29, 1]>,
+
+ //
+ // Integer to Single-precision Move
+ InstrItinData<IIC_fpMOVIS, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>],
+ [2, 1]>,
+ //
+ // Integer to Double-precision Move
+ InstrItinData<IIC_fpMOVID, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>],
+ [2, 1, 1]>,
+ //
+ // Single-precision to Integer Move
+ InstrItinData<IIC_fpMOVSI, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>],
+ [20, 1]>,
+ //
+ // Double-precision to Integer Move
+ InstrItinData<IIC_fpMOVDI, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>],
+ [20, 20, 1]>,
+
//
// Single-precision FP Load
- // use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpLoad32, [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>]>,
+ InstrItinData<IIC_fpLoad32, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>],
+ [2, 1]>,
//
// Double-precision FP Load
- // use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpLoad64, [InstrStage<2, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0], 0>,
- InstrStage<1, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>]>,
+ InstrItinData<IIC_fpLoad64, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>],
+ [2, 1]>,
//
// FP Load Multiple
- // use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpLoadm, [InstrStage<3, [A8_Issue], 0>,
- InstrStage<2, [A8_Pipe0], 0>,
- InstrStage<2, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>]>,
+ // FIXME: A8_LSPipe cycle time is dynamic, this assumes 3 to 4 registers.
+ InstrItinData<IIC_fpLoad_m, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>], [1, 1, 1, 2]>,
+ //
+ // FP Load Multiple + update
+ InstrItinData<IIC_fpLoad_mu,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>], [2, 1, 1, 1, 2]>,
//
// Single-precision FP Store
- // use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStore32,[InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>]>,
+ InstrItinData<IIC_fpStore32,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>],
+ [1, 1]>,
//
// Double-precision FP Store
- // use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStore64,[InstrStage<2, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0], 0>,
- InstrStage<1, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>]>,
+ InstrItinData<IIC_fpStore64,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>],
+ [1, 1]>,
//
// FP Store Multiple
- // use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStorem, [InstrStage<3, [A8_Issue], 0>,
- InstrStage<2, [A8_Pipe0], 0>,
- InstrStage<2, [A8_Pipe1]>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>]>,
+ InstrItinData<IIC_fpStore_m,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>], [1, 1, 1, 1]>,
+ //
+ // FP Store Multiple + update
+ InstrItinData<IIC_fpStore_mu,[InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>,
+ InstrStage<1, [A8_NLSPipe], 0>,
+ InstrStage<1, [A8_LSPipe]>], [2, 1, 1, 1, 1]>,
// NEON
// Issue through integer pipeline, and execute in NEON unit.
//
// VLD1
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD1, [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>]>,
+ InstrItinData<IIC_VLD1, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 1]>,
+ // VLD1x2
+ InstrItinData<IIC_VLD1x2, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 2, 1]>,
+ //
+ // VLD1x3
+ InstrItinData<IIC_VLD1x3, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 1]>,
+ //
+ // VLD1x4
+ InstrItinData<IIC_VLD1x4, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 3, 1]>,
+ //
+ // VLD1u
+ InstrItinData<IIC_VLD1u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 2, 1]>,
+ //
+ // VLD1x2u
+ InstrItinData<IIC_VLD1x2u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 2, 2, 1]>,
+ //
+ // VLD1x3u
+ InstrItinData<IIC_VLD1x3u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 2, 1]>,
+ //
+ // VLD1x4u
+ InstrItinData<IIC_VLD1x4u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 3, 2, 1]>,
+ //
+ // VLD1ln
+ InstrItinData<IIC_VLD1ln, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [3, 1, 1, 1]>,
+ //
+ // VLD1lnu
+ InstrItinData<IIC_VLD1lnu, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [3, 2, 1, 1, 1, 1]>,
+ //
+ // VLD1dup
+ InstrItinData<IIC_VLD1dup, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 1]>,
+ //
+ // VLD1dupu
+ InstrItinData<IIC_VLD1dupu, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 2, 1, 1]>,
//
// VLD2
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD2, [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>], [2, 2, 1]>,
+ InstrItinData<IIC_VLD2, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 2, 1]>,
+ //
+ // VLD2x2
+ InstrItinData<IIC_VLD2x2, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 3, 1]>,
+ //
+ // VLD2ln
+ InstrItinData<IIC_VLD2ln, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [3, 3, 1, 1, 1, 1]>,
+ //
+ // VLD2u
+ InstrItinData<IIC_VLD2u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 2, 2, 1, 1, 1]>,
+ //
+ // VLD2x2u
+ InstrItinData<IIC_VLD2x2u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 3, 2, 1]>,
+ //
+ // VLD2lnu
+ InstrItinData<IIC_VLD2lnu, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [3, 3, 2, 1, 1, 1, 1, 1]>,
+ //
+ // VLD2dup
+ InstrItinData<IIC_VLD2dup, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 2, 1]>,
+ //
+ // VLD2dupu
+ InstrItinData<IIC_VLD2dupu, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 2, 2, 1, 1]>,
//
// VLD3
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD3, [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>], [2, 2, 2, 1]>,
+ InstrItinData<IIC_VLD3, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [3, 3, 4, 1]>,
+ //
+ // VLD3ln
+ InstrItinData<IIC_VLD3ln, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<5, [A8_NLSPipe], 0>,
+ InstrStage<5, [A8_LSPipe]>],
+ [4, 4, 5, 1, 1, 1, 1, 2]>,
+ //
+ // VLD3u
+ InstrItinData<IIC_VLD3u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [3, 3, 4, 2, 1]>,
+ //
+ // VLD3lnu
+ InstrItinData<IIC_VLD3lnu, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<5, [A8_NLSPipe], 0>,
+ InstrStage<5, [A8_LSPipe]>],
+ [4, 4, 5, 2, 1, 1, 1, 1, 1, 2]>,
+ //
+ // VLD3dup
+ InstrItinData<IIC_VLD3dup, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 1]>,
+ //
+ // VLD3dupu
+ InstrItinData<IIC_VLD3dupu, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 2, 1, 1]>,
//
// VLD4
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD4, [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>], [2, 2, 2, 2, 1]>,
+ InstrItinData<IIC_VLD4, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [3, 3, 4, 4, 1]>,
+ //
+ // VLD4ln
+ InstrItinData<IIC_VLD4ln, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<5, [A8_NLSPipe], 0>,
+ InstrStage<5, [A8_LSPipe]>],
+ [4, 4, 5, 5, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VLD4u
+ InstrItinData<IIC_VLD4u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [3, 3, 4, 4, 2, 1]>,
+ //
+ // VLD4lnu
+ InstrItinData<IIC_VLD4lnu, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<5, [A8_NLSPipe], 0>,
+ InstrStage<5, [A8_LSPipe]>],
+ [4, 4, 5, 5, 2, 1, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VLD4dup
+ InstrItinData<IIC_VLD4dup, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 3, 1]>,
+ //
+ // VLD4dupu
+ InstrItinData<IIC_VLD4dupu, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 2, 3, 3, 2, 1, 1]>,
+ //
+ // VST1
+ InstrItinData<IIC_VST1, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [1, 1, 1]>,
//
- // VST
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VST, [InstrStage<1, [A8_Issue], 0>,
- InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
- InstrStage<1, [A8_LdSt0], 0>,
- InstrStage<1, [A8_NLSPipe]>]>,
+ // VST1x2
+ InstrItinData<IIC_VST1x2, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [1, 1, 1, 1]>,
+ //
+ // VST1x3
+ InstrItinData<IIC_VST1x3, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [1, 1, 1, 1, 2]>,
+ //
+ // VST1x4
+ InstrItinData<IIC_VST1x4, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [1, 1, 1, 1, 2, 2]>,
+ //
+ // VST1u
+ InstrItinData<IIC_VST1u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1]>,
+ //
+ // VST1x2u
+ InstrItinData<IIC_VST1x2u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1]>,
+ //
+ // VST1x3u
+ InstrItinData<IIC_VST1x3u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1, 2]>,
+ //
+ // VST1x4u
+ InstrItinData<IIC_VST1x4u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VST1ln
+ InstrItinData<IIC_VST1ln, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [1, 1, 1]>,
+ //
+ // VST1lnu
+ InstrItinData<IIC_VST1lnu, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1]>,
+ //
+ // VST2
+ InstrItinData<IIC_VST2, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [1, 1, 1, 1]>,
+ //
+ // VST2x2
+ InstrItinData<IIC_VST2x2, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [1, 1, 1, 1, 2, 2]>,
+ //
+ // VST2u
+ InstrItinData<IIC_VST2u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1]>,
+ //
+ // VST2x2u
+ InstrItinData<IIC_VST2x2u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VST2ln
+ InstrItinData<IIC_VST2ln, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [1, 1, 1, 1]>,
+ //
+ // VST2lnu
+ InstrItinData<IIC_VST2lnu, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NLSPipe], 0>,
+ InstrStage<2, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1]>,
+ //
+ // VST3
+ InstrItinData<IIC_VST3, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [1, 1, 1, 1, 2]>,
+ //
+ // VST3u
+ InstrItinData<IIC_VST3u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1, 2]>,
+ //
+ // VST3ln
+ InstrItinData<IIC_VST3ln, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [1, 1, 1, 1, 2]>,
+ //
+ // VST3lnu
+ InstrItinData<IIC_VST3lnu, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<3, [A8_NLSPipe], 0>,
+ InstrStage<3, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1, 2]>,
+ //
+ // VST4
+ InstrItinData<IIC_VST4, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [1, 1, 1, 1, 2, 2]>,
+ //
+ // VST4u
+ InstrItinData<IIC_VST4u, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VST4ln
+ InstrItinData<IIC_VST4ln, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [1, 1, 1, 1, 2, 2]>,
+ //
+ // VST4lnu
+ InstrItinData<IIC_VST4lnu, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<4, [A8_NLSPipe], 0>,
+ InstrStage<4, [A8_LSPipe]>],
+ [2, 1, 1, 1, 1, 1, 2, 2]>,
//
// Double-register FP Unary
- InstrItinData<IIC_VUNAD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VUNAD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [5, 2]>,
//
// Quad-register FP Unary
// Result written in N5, but that is relative to the last cycle of multicycle,
// so we use 6 for those cases
- InstrItinData<IIC_VUNAQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VUNAQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [6, 2]>,
//
// Double-register FP Binary
- InstrItinData<IIC_VBIND, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VBIND, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [5, 2, 2]>,
//
+ // VPADD, etc.
+ InstrItinData<IIC_VPBIND, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>], [5, 2, 2]>,
+ //
+ // Double-register FP VMUL
+ InstrItinData<IIC_VFMULD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>], [5, 2, 1]>,
+
+ //
// Quad-register FP Binary
// Result written in N5, but that is relative to the last cycle of multicycle,
// so we use 6 for those cases
- InstrItinData<IIC_VBINQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VBINQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [6, 2, 2]>,
//
+ // Quad-register FP VMUL
+ InstrItinData<IIC_VFMULQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>], [6, 2, 1]>,
+ //
+ // Move
+ InstrItinData<IIC_VMOV, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>], [1, 1]>,
+ //
// Move Immediate
- InstrItinData<IIC_VMOVImm, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMOVImm, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [3]>,
//
// Double-register Permute Move
- InstrItinData<IIC_VMOVD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMOVD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>], [2, 1]>,
//
// Quad-register Permute Move
// Result written in N2, but that is relative to the last cycle of multicycle,
// so we use 3 for those cases
- InstrItinData<IIC_VMOVQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMOVQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NLSPipe]>], [3, 1]>,
//
// Integer to Single-precision Move
- InstrItinData<IIC_VMOVIS , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMOVIS , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>], [2, 1]>,
//
// Integer to Double-precision Move
- InstrItinData<IIC_VMOVID , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMOVID , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>], [2, 1, 1]>,
//
// Single-precision to Integer Move
- InstrItinData<IIC_VMOVSI , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMOVSI , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>], [20, 1]>,
//
// Double-precision to Integer Move
- InstrItinData<IIC_VMOVDI , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMOVDI , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>], [20, 20, 1]>,
//
// Integer to Lane Move
- InstrItinData<IIC_VMOVISL , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMOVISL , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NLSPipe]>], [3, 1, 1]>,
//
+ // Vector narrow move
+ InstrItinData<IIC_VMOVN , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>], [2, 1]>,
+ //
// Double-register Permute
- InstrItinData<IIC_VPERMD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VPERMD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>], [2, 2, 1, 1]>,
//
// Quad-register Permute
// Result written in N2, but that is relative to the last cycle of multicycle,
// so we use 3 for those cases
- InstrItinData<IIC_VPERMQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VPERMQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NLSPipe]>], [3, 3, 1, 1]>,
//
// Quad-register Permute (3 cycle issue)
// Result written in N2, but that is relative to the last cycle of multicycle,
// so we use 4 for those cases
- InstrItinData<IIC_VPERMQ3, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VPERMQ3, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>,
InstrStage<1, [A8_NPipe], 0>,
InstrStage<2, [A8_NLSPipe]>], [4, 4, 1, 1]>,
//
// Double-register FP Multiple-Accumulate
- InstrItinData<IIC_VMACD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMACD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [9, 3, 2, 2]>,
//
// Quad-register FP Multiple-Accumulate
// Result written in N9, but that is relative to the last cycle of multicycle,
// so we use 10 for those cases
- InstrItinData<IIC_VMACQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMACQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [10, 3, 2, 2]>,
//
// Double-register Reciprical Step
- InstrItinData<IIC_VRECSD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VRECSD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [9, 2, 2]>,
//
// Quad-register Reciprical Step
- InstrItinData<IIC_VRECSQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VRECSQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [10, 2, 2]>,
//
// Double-register Integer Count
- InstrItinData<IIC_VCNTiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VCNTiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [3, 2, 2]>,
//
// Quad-register Integer Count
// Result written in N3, but that is relative to the last cycle of multicycle,
// so we use 4 for those cases
- InstrItinData<IIC_VCNTiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VCNTiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [4, 2, 2]>,
//
// Double-register Integer Unary
- InstrItinData<IIC_VUNAiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VUNAiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [4, 2]>,
//
// Quad-register Integer Unary
- InstrItinData<IIC_VUNAiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VUNAiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [4, 2]>,
//
// Double-register Integer Q-Unary
- InstrItinData<IIC_VQUNAiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VQUNAiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [4, 1]>,
//
// Quad-register Integer CountQ-Unary
- InstrItinData<IIC_VQUNAiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VQUNAiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [4, 1]>,
//
// Double-register Integer Binary
- InstrItinData<IIC_VBINiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VBINiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [3, 2, 2]>,
//
// Quad-register Integer Binary
- InstrItinData<IIC_VBINiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VBINiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [3, 2, 2]>,
//
// Double-register Integer Binary (4 cycle)
- InstrItinData<IIC_VBINi4D, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VBINi4D, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [4, 2, 1]>,
//
// Quad-register Integer Binary (4 cycle)
- InstrItinData<IIC_VBINi4Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VBINi4Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [4, 2, 1]>,
//
// Double-register Integer Subtract
- InstrItinData<IIC_VSUBiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VSUBiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [3, 2, 1]>,
//
// Quad-register Integer Subtract
- InstrItinData<IIC_VSUBiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VSUBiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [3, 2, 1]>,
//
// Double-register Integer Subtract
- InstrItinData<IIC_VSUBi4D, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VSUBi4D, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [4, 2, 1]>,
//
// Quad-register Integer Subtract
- InstrItinData<IIC_VSUBi4Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VSUBi4Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [4, 2, 1]>,
//
// Double-register Integer Shift
- InstrItinData<IIC_VSHLiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VSHLiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [3, 1, 1]>,
//
// Quad-register Integer Shift
- InstrItinData<IIC_VSHLiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VSHLiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [4, 1, 1]>,
//
// Double-register Integer Shift (4 cycle)
- InstrItinData<IIC_VSHLi4D, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VSHLi4D, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [4, 1, 1]>,
//
// Quad-register Integer Shift (4 cycle)
- InstrItinData<IIC_VSHLi4Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VSHLi4Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [5, 1, 1]>,
//
// Double-register Integer Pair Add Long
- InstrItinData<IIC_VPALiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VPALiD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [6, 3, 1]>,
//
// Quad-register Integer Pair Add Long
- InstrItinData<IIC_VPALiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VPALiQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [7, 3, 1]>,
//
// Double-register Absolute Difference and Accumulate
- InstrItinData<IIC_VABAD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VABAD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [6, 3, 2, 1]>,
//
// Quad-register Absolute Difference and Accumulate
- InstrItinData<IIC_VABAQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VABAQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [6, 3, 2, 1]>,
//
// Double-register Integer Multiply (.8, .16)
- InstrItinData<IIC_VMULi16D, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMULi16D, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [6, 2, 2]>,
//
// Double-register Integer Multiply (.32)
- InstrItinData<IIC_VMULi32D, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMULi32D, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [7, 2, 1]>,
//
// Quad-register Integer Multiply (.8, .16)
- InstrItinData<IIC_VMULi16Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMULi16Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [7, 2, 2]>,
//
// Quad-register Integer Multiply (.32)
- InstrItinData<IIC_VMULi32Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMULi32Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>,
InstrStage<2, [A8_NLSPipe], 0>,
InstrStage<3, [A8_NPipe]>], [9, 2, 1]>,
//
// Double-register Integer Multiply-Accumulate (.8, .16)
- InstrItinData<IIC_VMACi16D, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMACi16D, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [6, 3, 2, 2]>,
//
// Double-register Integer Multiply-Accumulate (.32)
- InstrItinData<IIC_VMACi32D, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMACi32D, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [7, 3, 2, 1]>,
//
// Quad-register Integer Multiply-Accumulate (.8, .16)
- InstrItinData<IIC_VMACi16Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMACi16Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [7, 3, 2, 2]>,
//
// Quad-register Integer Multiply-Accumulate (.32)
- InstrItinData<IIC_VMACi32Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VMACi32Q, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>,
InstrStage<2, [A8_NLSPipe], 0>,
InstrStage<3, [A8_NPipe]>], [9, 3, 2, 1]>,
//
// Double-register VEXT
- InstrItinData<IIC_VEXTD, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VEXTD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>], [2, 1, 1]>,
//
// Quad-register VEXT
- InstrItinData<IIC_VEXTQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VEXTQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NLSPipe]>], [3, 1, 1]>,
//
// VTB
- InstrItinData<IIC_VTB1, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VTB1, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NLSPipe]>], [3, 2, 1]>,
- InstrItinData<IIC_VTB2, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VTB2, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NLSPipe]>], [3, 2, 2, 1]>,
- InstrItinData<IIC_VTB3, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VTB3, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>,
InstrStage<1, [A8_NPipe], 0>,
InstrStage<2, [A8_NLSPipe]>], [4, 2, 2, 3, 1]>,
- InstrItinData<IIC_VTB4, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VTB4, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>,
InstrStage<1, [A8_NPipe], 0>,
InstrStage<2, [A8_NLSPipe]>],[4, 2, 2, 3, 3, 1]>,
//
// VTBX
- InstrItinData<IIC_VTBX1, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VTBX1, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NLSPipe]>], [3, 1, 2, 1]>,
- InstrItinData<IIC_VTBX2, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VTBX2, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NLSPipe]>], [3, 1, 2, 2, 1]>,
- InstrItinData<IIC_VTBX3, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VTBX3, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>,
InstrStage<1, [A8_NPipe], 0>,
InstrStage<2, [A8_NLSPipe]>],[4, 1, 2, 2, 3, 1]>,
- InstrItinData<IIC_VTBX4, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
+ InstrItinData<IIC_VTBX4, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NLSPipe]>,
InstrStage<1, [A8_NPipe], 0>,
InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 3, 1]>
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
index df2f896..82c6735 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
@@ -16,130 +16,417 @@
// Reference Manual".
//
// Functional units
-def A9_Pipe0 : FuncUnit; // pipeline 0
-def A9_Pipe1 : FuncUnit; // pipeline 1
-def A9_LSPipe : FuncUnit; // LS pipe
-def A9_NPipe : FuncUnit; // NEON ALU/MUL pipe
+def A9_Issue0 : FuncUnit; // Issue 0
+def A9_Issue1 : FuncUnit; // Issue 1
+def A9_Branch : FuncUnit; // Branch
+def A9_ALU0 : FuncUnit; // ALU / MUL pipeline 0
+def A9_ALU1 : FuncUnit; // ALU pipeline 1
+def A9_AGU : FuncUnit; // Address generation unit for ld / st
+def A9_NPipe : FuncUnit; // NEON pipeline
+def A9_MUX0 : FuncUnit; // AGU + NEON/FPU multiplexer
+def A9_LSUnit : FuncUnit; // L/S Unit
def A9_DRegsVFP: FuncUnit; // FP register set, VFP side
def A9_DRegsN : FuncUnit; // FP register set, NEON side
-// Dual issue pipeline represented by A9_Pipe0 | A9_Pipe1
-//
+// Bypasses
+def A9_LdBypass : Bypass;
+
def CortexA9Itineraries : ProcessorItineraries<
- [A9_NPipe, A9_DRegsN, A9_DRegsVFP, A9_LSPipe, A9_Pipe0, A9_Pipe1], [
+ [A9_Issue0, A9_Issue1, A9_Branch, A9_ALU0, A9_ALU1, A9_AGU, A9_NPipe, A9_MUX0,
+ A9_LSUnit, A9_DRegsVFP, A9_DRegsN],
+ [A9_LdBypass], [
// Two fully-pipelined integer ALU pipelines
- // FIXME: There are no operand latencies for these instructions at all!
+
//
// Move instructions, unconditional
- InstrItinData<IIC_iMOVi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1]>,
- InstrItinData<IIC_iMOVr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1, 1]>,
- InstrItinData<IIC_iMOVsi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1, 1]>,
- InstrItinData<IIC_iMOVsr , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1]>,
+ InstrItinData<IIC_iMOVi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1]>,
+ InstrItinData<IIC_iMOVr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1, 1]>,
+ InstrItinData<IIC_iMOVsi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1, 1]>,
+ InstrItinData<IIC_iMOVsr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>], [2, 1, 1]>,
+ InstrItinData<IIC_iMOVix2 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [2]>,
+ InstrItinData<IIC_iMOVix2addpc,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [3]>,
+ InstrItinData<IIC_iMOVix2ld,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>], [5]>,
+ //
+ // MVN instructions
+ InstrItinData<IIC_iMVNi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>],
+ [1]>,
+ InstrItinData<IIC_iMVNr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>],
+ [1, 1], [NoBypass, A9_LdBypass]>,
+ InstrItinData<IIC_iMVNsi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>],
+ [2, 1]>,
+ InstrItinData<IIC_iMVNsr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<3, [A9_ALU0, A9_ALU1]>],
+ [3, 1, 1]>,
//
// No operand cycles
- InstrItinData<IIC_iALUx , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>]>,
+ InstrItinData<IIC_iALUx , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>]>,
//
// Binary Instructions that produce a result
- InstrItinData<IIC_iALUi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
- InstrItinData<IIC_iALUr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2, 2]>,
- InstrItinData<IIC_iALUsi, [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1]>,
- InstrItinData<IIC_iALUsr,[InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1, 1]>,
+ InstrItinData<IIC_iALUi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>],
+ [1, 1], [NoBypass, A9_LdBypass]>,
+ InstrItinData<IIC_iALUr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>],
+ [1, 1, 1], [NoBypass, A9_LdBypass, A9_LdBypass]>,
+ InstrItinData<IIC_iALUsi, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>],
+ [2, 1, 1], [NoBypass, A9_LdBypass, NoBypass]>,
+ InstrItinData<IIC_iALUsir,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>],
+ [2, 1, 1], [NoBypass, NoBypass, A9_LdBypass]>,
+ InstrItinData<IIC_iALUsr, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<3, [A9_ALU0, A9_ALU1]>],
+ [3, 1, 1, 1],
+ [NoBypass, A9_LdBypass, NoBypass, NoBypass]>,
+ //
+ // Bitwise Instructions that produce a result
+ InstrItinData<IIC_iBITi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1, 1]>,
+ InstrItinData<IIC_iBITr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1, 1, 1]>,
+ InstrItinData<IIC_iBITsi, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>], [2, 1, 1]>,
+ InstrItinData<IIC_iBITsr, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<3, [A9_ALU0, A9_ALU1]>], [3, 1, 1, 1]>,
//
// Unary Instructions that produce a result
- InstrItinData<IIC_iUNAr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
- InstrItinData<IIC_iUNAsi , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iUNAsr , [InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
+
+ // CLZ, RBIT, etc.
+ InstrItinData<IIC_iUNAr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1, 1]>,
+
+ // BFC, BFI, UBFX, SBFX
+ InstrItinData<IIC_iUNAsi, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>], [2, 1]>,
+
+ //
+ // Zero and sign extension instructions
+ InstrItinData<IIC_iEXTr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [2, 1]>,
+ InstrItinData<IIC_iEXTAr, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>], [3, 1, 1]>,
+ InstrItinData<IIC_iEXTAsr,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<3, [A9_ALU0, A9_ALU1]>], [3, 1, 1, 1]>,
//
// Compare instructions
- InstrItinData<IIC_iCMPi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2]>,
- InstrItinData<IIC_iCMPr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
- InstrItinData<IIC_iCMPsi , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iCMPsr , [InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
+ InstrItinData<IIC_iCMPi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>],
+ [1], [A9_LdBypass]>,
+ InstrItinData<IIC_iCMPr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>],
+ [1, 1], [A9_LdBypass, A9_LdBypass]>,
+ InstrItinData<IIC_iCMPsi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>],
+ [1, 1], [A9_LdBypass, NoBypass]>,
+ InstrItinData<IIC_iCMPsr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<3, [A9_ALU0, A9_ALU1]>],
+ [1, 1, 1], [A9_LdBypass, NoBypass, NoBypass]>,
+ //
+ // Test instructions
+ InstrItinData<IIC_iTSTi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1]>,
+ InstrItinData<IIC_iTSTr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1, 1]>,
+ InstrItinData<IIC_iTSTsi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>], [1, 1]>,
+ InstrItinData<IIC_iTSTsr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<3, [A9_ALU0, A9_ALU1]>], [1, 1, 1]>,
//
// Move instructions, conditional
- InstrItinData<IIC_iCMOVi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2]>,
- InstrItinData<IIC_iCMOVr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iCMOVsi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iCMOVsr , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
+ // FIXME: Correctly model the extra input dep on the destination.
+ InstrItinData<IIC_iCMOVi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1]>,
+ InstrItinData<IIC_iCMOVr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1, 1]>,
+ InstrItinData<IIC_iCMOVsi , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [1, 1]>,
+ InstrItinData<IIC_iCMOVsr , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0, A9_ALU1]>], [2, 1, 1]>,
+ InstrItinData<IIC_iCMOVix2, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>,
+ InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>], [2]>,
// Integer multiply pipeline
//
- InstrItinData<IIC_iMUL16 , [InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<2, [A9_Pipe0]>], [4, 1, 1]>,
- InstrItinData<IIC_iMAC16 , [InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<2, [A9_Pipe0]>], [4, 1, 1, 2]>,
- InstrItinData<IIC_iMUL32 , [InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<2, [A9_Pipe0]>], [4, 1, 1]>,
- InstrItinData<IIC_iMAC32 , [InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<2, [A9_Pipe0]>], [4, 1, 1, 2]>,
- InstrItinData<IIC_iMUL64 , [InstrStage<2, [A9_Pipe1], 0>,
- InstrStage<3, [A9_Pipe0]>], [4, 5, 1, 1]>,
- InstrItinData<IIC_iMAC64 , [InstrStage<2, [A9_Pipe1], 0>,
- InstrStage<3, [A9_Pipe0]>], [4, 5, 1, 1]>,
+ InstrItinData<IIC_iMUL16 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0]>], [3, 1, 1]>,
+ InstrItinData<IIC_iMAC16 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0]>],
+ [3, 1, 1, 1]>,
+ InstrItinData<IIC_iMUL32 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0]>], [4, 1, 1]>,
+ InstrItinData<IIC_iMAC32 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<2, [A9_ALU0]>],
+ [4, 1, 1, 1]>,
+ InstrItinData<IIC_iMUL64 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<3, [A9_ALU0]>], [4, 5, 1, 1]>,
+ InstrItinData<IIC_iMAC64 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<3, [A9_ALU0]>],
+ [4, 5, 1, 1]>,
// Integer load pipeline
// FIXME: The timings are some rough approximations
//
// Immediate offset
- InstrItinData<IIC_iLoadi , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe]>], [3, 1]>,
+ InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 1], [A9_LdBypass]>,
+ InstrItinData<IIC_iLoad_bh_i, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [4, 1], [A9_LdBypass]>,
+ // FIXME: If address is 64-bit aligned, AGU cycles is 1.
+ InstrItinData<IIC_iLoad_d_i , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 3, 1], [A9_LdBypass]>,
//
// Register offset
- InstrItinData<IIC_iLoadr , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe]>], [3, 1, 1]>,
+ InstrItinData<IIC_iLoad_r , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 1, 1], [A9_LdBypass]>,
+ InstrItinData<IIC_iLoad_bh_r, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [4, 1, 1], [A9_LdBypass]>,
+ InstrItinData<IIC_iLoad_d_r , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 3, 1, 1], [A9_LdBypass]>,
//
// Scaled register offset
- InstrItinData<IIC_iLoadsi , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_LSPipe]>], [4, 1, 1]>,
+ InstrItinData<IIC_iLoad_si , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit], 0>],
+ [4, 1, 1], [A9_LdBypass]>,
+ InstrItinData<IIC_iLoad_bh_si,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [5, 1, 1], [A9_LdBypass]>,
//
// Immediate offset with update
- InstrItinData<IIC_iLoadiu , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_LSPipe]>], [3, 2, 1]>,
+ InstrItinData<IIC_iLoad_iu , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 2, 1], [A9_LdBypass]>,
+ InstrItinData<IIC_iLoad_bh_iu,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [4, 3, 1], [A9_LdBypass]>,
//
// Register offset with update
- InstrItinData<IIC_iLoadru , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_LSPipe]>], [3, 2, 1, 1]>,
+ InstrItinData<IIC_iLoad_ru , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 2, 1, 1], [A9_LdBypass]>,
+ InstrItinData<IIC_iLoad_bh_ru,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [4, 3, 1, 1], [A9_LdBypass]>,
+ InstrItinData<IIC_iLoad_d_ru, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 3, 1, 1], [A9_LdBypass]>,
//
// Scaled register offset with update
- InstrItinData<IIC_iLoadsiu , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_LSPipe]>], [4, 3, 1, 1]>,
+ InstrItinData<IIC_iLoad_siu , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [4, 3, 1, 1], [A9_LdBypass]>,
+ InstrItinData<IIC_iLoad_bh_siu,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [5, 4, 1, 1], [A9_LdBypass]>,
+ //
+ // Load multiple, def is the 5th operand.
+ // FIXME: This assumes 3 to 4 registers.
+ InstrItinData<IIC_iLoad_m , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<2, [A9_LSUnit]>],
+ [1, 1, 1, 1, 3],
+ [NoBypass, NoBypass, NoBypass, NoBypass, A9_LdBypass]>,
+ //
+ // Load multiple + update, defs are the 1st and 5th operands.
+ InstrItinData<IIC_iLoad_mu , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 1, 1, 1, 3],
+ [NoBypass, NoBypass, NoBypass, NoBypass, A9_LdBypass]>,
+ //
+ // Load multiple plus branch
+ InstrItinData<IIC_iLoad_mBr, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 1>,
+ InstrStage<2, [A9_LSUnit]>,
+ InstrStage<1, [A9_Branch]>],
+ [1, 2, 1, 1, 3],
+ [NoBypass, NoBypass, NoBypass, NoBypass, A9_LdBypass]>,
+ //
+ // Pop, def is the 3rd operand.
+ InstrItinData<IIC_iPop , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<2, [A9_LSUnit]>],
+ [1, 1, 3],
+ [NoBypass, NoBypass, A9_LdBypass]>,
+ //
+ // Pop + branch, def is the 3rd operand.
+ InstrItinData<IIC_iPop_Br, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<2, [A9_LSUnit]>,
+ InstrStage<1, [A9_Branch]>],
+ [1, 1, 3],
+ [NoBypass, NoBypass, A9_LdBypass]>,
+
//
- // Load multiple
- InstrItinData<IIC_iLoadm , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe]>]>,
+ // iLoadi + iALUr for t2LDRpci_pic.
+ InstrItinData<IIC_iLoadiALU, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>,
+ InstrStage<1, [A9_ALU0, A9_ALU1]>],
+ [2, 1]>,
// Integer store pipeline
///
// Immediate offset
- InstrItinData<IIC_iStorei , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe]>], [3, 1]>,
+ InstrItinData<IIC_iStore_i , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1]>,
+ InstrItinData<IIC_iStore_bh_i,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1]>,
+ // FIXME: If address is 64-bit aligned, AGU cycles is 1.
+ InstrItinData<IIC_iStore_d_i, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1]>,
//
// Register offset
- InstrItinData<IIC_iStorer , [InstrStage<1, [ A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe]>], [3, 1, 1]>,
+ InstrItinData<IIC_iStore_r , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_r,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1, 1]>,
+ InstrItinData<IIC_iStore_d_r, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1, 1]>,
//
// Scaled register offset
- InstrItinData<IIC_iStoresi , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_LSPipe]>], [3, 1, 1]>,
+ InstrItinData<IIC_iStore_si , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_si,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1, 1]>,
//
// Immediate offset with update
- InstrItinData<IIC_iStoreiu , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe]>], [2, 3, 1]>,
+ InstrItinData<IIC_iStore_iu , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>], [2, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_iu,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<1, [A9_LSUnit]>], [3, 1, 1]>,
//
// Register offset with update
- InstrItinData<IIC_iStoreru , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe]>], [2, 3, 1, 1]>,
+ InstrItinData<IIC_iStore_ru , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_ru,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 1, 1, 1]>,
+ InstrItinData<IIC_iStore_d_ru, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 1, 1, 1]>,
//
// Scaled register offset with update
- InstrItinData<IIC_iStoresiu, [InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_LSPipe]>], [3, 3, 1, 1]>,
+ InstrItinData<IIC_iStore_siu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_siu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_AGU], 1>,
+ InstrStage<1, [A9_LSUnit]>],
+ [3, 1, 1, 1]>,
//
// Store multiple
- InstrItinData<IIC_iStorem , [InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe]>]>,
+ InstrItinData<IIC_iStore_m , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<2, [A9_LSUnit]>]>,
+ //
+ // Store multiple + update
+ InstrItinData<IIC_iStore_mu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_AGU], 0>,
+ InstrStage<2, [A9_LSUnit]>], [2]>,
+
+ //
+ // Preload
+ InstrItinData<IIC_Preload, [InstrStage<1, [A9_Issue0, A9_Issue1]>], [1, 1]>,
+
// Branch
//
// no delay slots, so the latency of a branch is unimportant
- InstrItinData<IIC_Br , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>]>,
+ InstrItinData<IIC_Br , [InstrStage<1, [A9_Issue0], 0>,
+ InstrStage<1, [A9_Issue1], 0>,
+ InstrStage<1, [A9_Branch]>]>,
// VFP and NEON shares the same register file. This means that every VFP
// instruction should wait for full completion of the consecutive NEON
@@ -159,687 +446,1379 @@ def CortexA9Itineraries : ProcessorItineraries<
// Issue through integer pipeline, and execute in NEON unit.
// FP Special Register to Integer Register File Move
- InstrItinData<IIC_fpSTAT , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpSTAT , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>]>,
+ InstrStage<1, [A9_NPipe]>],
+ [1]>,
//
// Single-precision FP Unary
- InstrItinData<IIC_fpUNA32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpUNA32 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra latency cycles since wbck is 2 cycles
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [1, 1]>,
//
// Double-precision FP Unary
- InstrItinData<IIC_fpUNA64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpUNA64 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra latency cycles since wbck is 2 cycles
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [1, 1]>,
//
// Single-precision FP Compare
- InstrItinData<IIC_fpCMP32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCMP32 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra latency cycles since wbck is 4 cycles
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [1, 1]>,
//
// Double-precision FP Compare
- InstrItinData<IIC_fpCMP64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCMP64 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra latency cycles since wbck is 4 cycles
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [1, 1]>,
//
// Single to Double FP Convert
- InstrItinData<IIC_fpCVTSD , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCVTSD , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1]>,
//
// Double to Single FP Convert
- InstrItinData<IIC_fpCVTDS , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCVTDS , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1]>,
//
// Single to Half FP Convert
- InstrItinData<IIC_fpCVTSH , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCVTSH , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1]>,
//
// Half to Single FP Convert
- InstrItinData<IIC_fpCVTHS , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCVTHS , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [2, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [2, 1]>,
//
// Single-Precision FP to Integer Convert
- InstrItinData<IIC_fpCVTSI , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCVTSI , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1]>,
//
// Double-Precision FP to Integer Convert
- InstrItinData<IIC_fpCVTDI , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCVTDI , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1]>,
//
// Integer to Single-Precision FP Convert
- InstrItinData<IIC_fpCVTIS , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCVTIS , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1]>,
//
// Integer to Double-Precision FP Convert
- InstrItinData<IIC_fpCVTID , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpCVTID , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1]>,
//
// Single-precision FP ALU
- InstrItinData<IIC_fpALU32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpALU32 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1, 1]>,
//
// Double-precision FP ALU
- InstrItinData<IIC_fpALU64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpALU64 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1, 1]>,
//
// Single-precision FP Multiply
- InstrItinData<IIC_fpMUL32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpMUL32 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<6, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [5, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [5, 1, 1]>,
//
// Double-precision FP Multiply
- InstrItinData<IIC_fpMUL64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpMUL64 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<7, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [6, 1, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [6, 1, 1]>,
//
// Single-precision FP MAC
- InstrItinData<IIC_fpMAC32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpMAC32 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<9, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [8, 0, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [8, 1, 1, 1]>,
//
// Double-precision FP MAC
- InstrItinData<IIC_fpMAC64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpMAC64 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<10, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [9, 0, 1, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [9, 1, 1, 1]>,
//
// Single-precision FP DIV
- InstrItinData<IIC_fpDIV32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpDIV32 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<16, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<10, [A9_NPipe]>], [15, 1, 1]>,
+ InstrStage<10, [A9_NPipe]>],
+ [15, 1, 1]>,
//
// Double-precision FP DIV
- InstrItinData<IIC_fpDIV64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpDIV64 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<26, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<20, [A9_NPipe]>], [25, 1, 1]>,
+ InstrStage<20, [A9_NPipe]>],
+ [25, 1, 1]>,
//
// Single-precision FP SQRT
- InstrItinData<IIC_fpSQRT32, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpSQRT32, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<18, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<13, [A9_NPipe]>], [17, 1]>,
+ InstrStage<13, [A9_NPipe]>],
+ [17, 1]>,
//
// Double-precision FP SQRT
- InstrItinData<IIC_fpSQRT64, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpSQRT64, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<33, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<28, [A9_NPipe]>], [32, 1]>,
+ InstrStage<28, [A9_NPipe]>],
+ [32, 1]>,
//
// Integer to Single-precision Move
- InstrItinData<IIC_fpMOVIS, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpMOVIS, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra 1 latency cycle since wbck is 2 cycles
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [1, 1]>,
//
// Integer to Double-precision Move
- InstrItinData<IIC_fpMOVID, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpMOVID, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra 1 latency cycle since wbck is 2 cycles
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [1, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [1, 1, 1]>,
//
// Single-precision to Integer Move
- InstrItinData<IIC_fpMOVSI, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpMOVSI, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [2, 1]>,
//
// Double-precision to Integer Move
- InstrItinData<IIC_fpMOVDI, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpMOVDI, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [1, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [2, 1, 1]>,
//
// Single-precision FP Load
- InstrItinData<IIC_fpLoad32, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpLoad32, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>]>,
+ InstrStage<1, [A9_NPipe], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [1, 1]>,
//
// Double-precision FP Load
- InstrItinData<IIC_fpLoad64, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ // FIXME: Result latency is 1 if address is 64-bit aligned.
+ InstrItinData<IIC_fpLoad64, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>]>,
+ InstrStage<1, [A9_NPipe], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [2, 1]>,
//
// FP Load Multiple
- InstrItinData<IIC_fpLoadm, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpLoad_m, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrStage<2, [A9_DRegsN], 0, Reserved>,
+ InstrStage<1, [A9_NPipe], 0>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1, 1, 1]>,
+ //
+ // FP Load Multiple + update
+ InstrItinData<IIC_fpLoad_mu,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>]>,
+ InstrStage<1, [A9_NPipe], 0>,
+ InstrStage<1, [A9_LSUnit]>], [2, 1, 1, 1]>,
//
// Single-precision FP Store
- InstrItinData<IIC_fpStore32,[InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpStore32,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>]>,
+ InstrStage<1, [A9_NPipe], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [1, 1]>,
//
// Double-precision FP Store
- InstrItinData<IIC_fpStore64,[InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpStore64,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>]>,
+ InstrStage<1, [A9_NPipe], 0>,
+ InstrStage<1, [A9_LSUnit]>],
+ [1, 1]>,
//
// FP Store Multiple
- InstrItinData<IIC_fpStorem, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrItinData<IIC_fpStore_m,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>]>,
+ InstrStage<1, [A9_NPipe], 0>,
+ InstrStage<1, [A9_LSUnit]>], [1, 1, 1, 1]>,
+ //
+ // FP Store Multiple + update
+ InstrItinData<IIC_fpStore_mu,[InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrStage<2, [A9_DRegsN], 0, Reserved>,
+ InstrStage<1, [A9_NPipe], 0>,
+ InstrStage<1, [A9_LSUnit]>], [2, 1, 1, 1]>,
// NEON
- // Issue through integer pipeline, and execute in NEON unit.
- // FIXME: Neon pipeline and LdSt unit are multiplexed.
- // Add some syntactic sugar to model this!
// VLD1
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD1, [InstrStage<1, [A9_DRegsN], 0, Required>,
- InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>]>,
+ // FIXME: Conservatively assume insufficent alignment.
+ InstrItinData<IIC_VLD1, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 1]>,
+ // VLD1x2
+ InstrItinData<IIC_VLD1x2, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 2, 1]>,
+ // VLD1x3
+ InstrItinData<IIC_VLD1x3, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 2, 3, 1]>,
+ // VLD1x4
+ InstrItinData<IIC_VLD1x4, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 2, 3, 3, 1]>,
+ // VLD1u
+ InstrItinData<IIC_VLD1u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 2, 1]>,
+ // VLD1x2u
+ InstrItinData<IIC_VLD1x2u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 2, 2, 1]>,
+ // VLD1x3u
+ InstrItinData<IIC_VLD1x3u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 2, 3, 2, 1]>,
+ // VLD1x4u
+ InstrItinData<IIC_VLD1x4u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 2, 3, 3, 2, 1]>,
+ //
+ // VLD1ln
+ InstrItinData<IIC_VLD1ln, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [4, 1, 1, 1]>,
+ //
+ // VLD1lnu
+ InstrItinData<IIC_VLD1lnu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [4, 2, 1, 1, 1, 1]>,
+ //
+ // VLD1dup
+ InstrItinData<IIC_VLD1dup, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [3, 1]>,
+ //
+ // VLD1dupu
+ InstrItinData<IIC_VLD1dupu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [3, 2, 1, 1]>,
//
// VLD2
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD2, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 6 cycles
- InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>], [2, 2, 1]>,
+ InstrItinData<IIC_VLD2, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 7 cycles
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [3, 3, 1]>,
+ //
+ // VLD2x2
+ InstrItinData<IIC_VLD2x2, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [3, 4, 3, 4, 1]>,
+ //
+ // VLD2ln
+ InstrItinData<IIC_VLD2ln, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [4, 4, 1, 1, 1, 1]>,
+ //
+ // VLD2u
+ InstrItinData<IIC_VLD2u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 7 cycles
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [3, 3, 2, 1, 1, 1]>,
+ //
+ // VLD2x2u
+ InstrItinData<IIC_VLD2x2u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [3, 4, 3, 4, 2, 1]>,
+ //
+ // VLD2lnu
+ InstrItinData<IIC_VLD2lnu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [4, 4, 2, 1, 1, 1, 1, 1]>,
+ //
+ // VLD2dup
+ InstrItinData<IIC_VLD2dup, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [3, 3, 1]>,
+ //
+ // VLD2dupu
+ InstrItinData<IIC_VLD2dupu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [3, 3, 2, 1, 1]>,
//
// VLD3
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD3, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 6 cycles
- InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>], [2, 2, 2, 1]>,
+ InstrItinData<IIC_VLD3, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<10,[A9_DRegsVFP], 0, Reserved>,
+ InstrStage<4, [A9_NPipe], 0>,
+ InstrStage<4, [A9_LSUnit]>],
+ [4, 4, 5, 1]>,
+ //
+ // VLD3ln
+ InstrItinData<IIC_VLD3ln, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<11,[A9_DRegsVFP], 0, Reserved>,
+ InstrStage<5, [A9_NPipe], 0>,
+ InstrStage<5, [A9_LSUnit]>],
+ [5, 5, 6, 1, 1, 1, 1, 2]>,
+ //
+ // VLD3u
+ InstrItinData<IIC_VLD3u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<10,[A9_DRegsVFP], 0, Reserved>,
+ InstrStage<4, [A9_NPipe], 0>,
+ InstrStage<4, [A9_LSUnit]>],
+ [4, 4, 5, 2, 1]>,
+ //
+ // VLD3lnu
+ InstrItinData<IIC_VLD3lnu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<11,[A9_DRegsVFP], 0, Reserved>,
+ InstrStage<5, [A9_NPipe], 0>,
+ InstrStage<5, [A9_LSUnit]>],
+ [5, 5, 6, 2, 1, 1, 1, 1, 1, 2]>,
+ //
+ // VLD3dup
+ InstrItinData<IIC_VLD3dup, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [3, 3, 4, 1]>,
+ //
+ // VLD3dupu
+ InstrItinData<IIC_VLD3dupu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [3, 3, 4, 2, 1, 1]>,
//
// VLD4
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD4, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 6 cycles
- InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>], [2, 2, 2, 2, 1]>,
- //
- // VST
- // FIXME: We don't model this instruction properly
- InstrItinData<IIC_VST, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 6 cycles
- InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1], 0>,
- InstrStage<1, [A9_LSPipe]>,
- InstrStage<1, [A9_NPipe]>]>,
+ InstrItinData<IIC_VLD4, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<10,[A9_DRegsVFP], 0, Reserved>,
+ InstrStage<4, [A9_NPipe], 0>,
+ InstrStage<4, [A9_LSUnit]>],
+ [4, 4, 5, 5, 1]>,
+ //
+ // VLD4ln
+ InstrItinData<IIC_VLD4ln, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<11,[A9_DRegsVFP], 0, Reserved>,
+ InstrStage<5, [A9_NPipe], 0>,
+ InstrStage<5, [A9_LSUnit]>],
+ [5, 5, 6, 6, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VLD4u
+ InstrItinData<IIC_VLD4u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<10,[A9_DRegsVFP], 0, Reserved>,
+ InstrStage<4, [A9_NPipe], 0>,
+ InstrStage<4, [A9_LSUnit]>],
+ [4, 4, 5, 5, 2, 1]>,
+ //
+ // VLD4lnu
+ InstrItinData<IIC_VLD4lnu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<11,[A9_DRegsVFP], 0, Reserved>,
+ InstrStage<5, [A9_NPipe], 0>,
+ InstrStage<5, [A9_LSUnit]>],
+ [5, 5, 6, 6, 2, 1, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VLD4dup
+ InstrItinData<IIC_VLD4dup, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [3, 3, 4, 4, 1]>,
+ //
+ // VLD4dupu
+ InstrItinData<IIC_VLD4dupu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [3, 3, 4, 4, 2, 1, 1]>,
+ //
+ // VST1
+ InstrItinData<IIC_VST1, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [1, 1, 1]>,
+ //
+ // VST1x2
+ InstrItinData<IIC_VST1x2, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [1, 1, 1, 1]>,
+ //
+ // VST1x3
+ InstrItinData<IIC_VST1x3, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [1, 1, 1, 1, 2]>,
+ //
+ // VST1x4
+ InstrItinData<IIC_VST1x4, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [1, 1, 1, 1, 2, 2]>,
+ //
+ // VST1u
+ InstrItinData<IIC_VST1u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1]>,
+ //
+ // VST1x2u
+ InstrItinData<IIC_VST1x2u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1]>,
+ //
+ // VST1x3u
+ InstrItinData<IIC_VST1x3u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1, 2]>,
+ //
+ // VST1x4u
+ InstrItinData<IIC_VST1x4u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VST1ln
+ InstrItinData<IIC_VST1ln, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [1, 1, 1]>,
+ //
+ // VST1lnu
+ InstrItinData<IIC_VST1lnu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1]>,
+ //
+ // VST2
+ InstrItinData<IIC_VST2, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [1, 1, 1, 1]>,
+ //
+ // VST2x2
+ InstrItinData<IIC_VST2x2, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [1, 1, 1, 1, 2, 2]>,
+ //
+ // VST2u
+ InstrItinData<IIC_VST2u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1]>,
+ //
+ // VST2x2u
+ InstrItinData<IIC_VST2x2u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VST2ln
+ InstrItinData<IIC_VST2ln, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe], 0>,
+ InstrStage<2, [A9_LSUnit]>],
+ [1, 1, 1, 1]>,
+ //
+ // VST2lnu
+ InstrItinData<IIC_VST2lnu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1]>,
+ //
+ // VST3
+ InstrItinData<IIC_VST3, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [1, 1, 1, 1, 2]>,
+ //
+ // VST3u
+ InstrItinData<IIC_VST3u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1, 2]>,
+ //
+ // VST3ln
+ InstrItinData<IIC_VST3ln, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [1, 1, 1, 1, 2]>,
+ //
+ // VST3lnu
+ InstrItinData<IIC_VST3lnu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1, 2]>,
+ //
+ // VST4
+ InstrItinData<IIC_VST4, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [1, 1, 1, 1, 2, 2]>,
+ //
+ // VST4u
+ InstrItinData<IIC_VST4u, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1, 2, 2]>,
+ //
+ // VST4ln
+ InstrItinData<IIC_VST4ln, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [1, 1, 1, 1, 2, 2]>,
+ //
+ // VST4lnu
+ InstrItinData<IIC_VST4lnu, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<3, [A9_NPipe], 0>,
+ InstrStage<3, [A9_LSUnit]>],
+ [2, 1, 1, 1, 1, 1, 2, 2]>,
+
//
// Double-register Integer Unary
- InstrItinData<IIC_VUNAiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VUNAiD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 2]>,
//
// Quad-register Integer Unary
- InstrItinData<IIC_VUNAiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VUNAiQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 2]>,
//
// Double-register Integer Q-Unary
- InstrItinData<IIC_VQUNAiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VQUNAiD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1]>,
//
// Quad-register Integer CountQ-Unary
- InstrItinData<IIC_VQUNAiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VQUNAiQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1]>,
//
// Double-register Integer Binary
- InstrItinData<IIC_VBINiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VBINiD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [3, 2, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [3, 2, 2]>,
//
// Quad-register Integer Binary
- InstrItinData<IIC_VBINiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VBINiQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [3, 2, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [3, 2, 2]>,
//
// Double-register Integer Subtract
- InstrItinData<IIC_VSUBiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VSUBiD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [3, 2, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [3, 2, 1]>,
//
// Quad-register Integer Subtract
- InstrItinData<IIC_VSUBiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VSUBiQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [3, 2, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [3, 2, 1]>,
//
// Double-register Integer Shift
- InstrItinData<IIC_VSHLiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VSHLiD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [3, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [3, 1, 1]>,
//
// Quad-register Integer Shift
- InstrItinData<IIC_VSHLiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VSHLiQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [3, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [3, 1, 1]>,
//
// Double-register Integer Shift (4 cycle)
- InstrItinData<IIC_VSHLi4D, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VSHLi4D, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1, 1]>,
//
// Quad-register Integer Shift (4 cycle)
- InstrItinData<IIC_VSHLi4Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VSHLi4Q, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 1, 1]>,
//
// Double-register Integer Binary (4 cycle)
- InstrItinData<IIC_VBINi4D, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VBINi4D, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 2, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 2, 2]>,
//
// Quad-register Integer Binary (4 cycle)
- InstrItinData<IIC_VBINi4Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VBINi4Q, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 2, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 2, 2]>,
//
// Double-register Integer Subtract (4 cycle)
- InstrItinData<IIC_VSUBiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VSUBi4D, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 2, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 2, 1]>,
//
// Quad-register Integer Subtract (4 cycle)
- InstrItinData<IIC_VSUBiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VSUBi4Q, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [4, 2, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [4, 2, 1]>,
//
// Double-register Integer Count
- InstrItinData<IIC_VCNTiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VCNTiD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [3, 2, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [3, 2, 2]>,
//
// Quad-register Integer Count
// Result written in N3, but that is relative to the last cycle of multicycle,
// so we use 4 for those cases
- InstrItinData<IIC_VCNTiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VCNTiQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [4, 2, 2]>,
+ InstrStage<2, [A9_NPipe]>],
+ [4, 2, 2]>,
//
// Double-register Absolute Difference and Accumulate
- InstrItinData<IIC_VABAD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VABAD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [6, 3, 2, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [6, 3, 2, 1]>,
//
// Quad-register Absolute Difference and Accumulate
- InstrItinData<IIC_VABAQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VABAQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [6, 3, 2, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [6, 3, 2, 1]>,
//
// Double-register Integer Pair Add Long
- InstrItinData<IIC_VPALiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VPALiD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [6, 3, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [6, 3, 1]>,
//
// Quad-register Integer Pair Add Long
- InstrItinData<IIC_VPALiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VPALiQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [6, 3, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [6, 3, 1]>,
//
// Double-register Integer Multiply (.8, .16)
- InstrItinData<IIC_VMULi16D, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMULi16D, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [6, 2, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [6, 2, 2]>,
//
// Quad-register Integer Multiply (.8, .16)
- InstrItinData<IIC_VMULi16Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMULi16Q, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [7, 2, 2]>,
+ InstrStage<2, [A9_NPipe]>],
+ [7, 2, 2]>,
//
// Double-register Integer Multiply (.32)
- InstrItinData<IIC_VMULi32D, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMULi32D, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [7, 2, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [7, 2, 1]>,
//
// Quad-register Integer Multiply (.32)
- InstrItinData<IIC_VMULi32Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMULi32Q, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 9 cycles
InstrStage<10, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<4, [A9_NPipe]>], [9, 2, 1]>,
+ InstrStage<4, [A9_NPipe]>],
+ [9, 2, 1]>,
//
// Double-register Integer Multiply-Accumulate (.8, .16)
- InstrItinData<IIC_VMACi16D, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMACi16D, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [6, 3, 2, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [6, 3, 2, 2]>,
//
// Double-register Integer Multiply-Accumulate (.32)
- InstrItinData<IIC_VMACi32D, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMACi32D, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [7, 3, 2, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [7, 3, 2, 1]>,
//
// Quad-register Integer Multiply-Accumulate (.8, .16)
- InstrItinData<IIC_VMACi16Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMACi16Q, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [7, 3, 2, 2]>,
+ InstrStage<2, [A9_NPipe]>],
+ [7, 3, 2, 2]>,
//
// Quad-register Integer Multiply-Accumulate (.32)
- InstrItinData<IIC_VMACi32Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMACi32Q, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 9 cycles
InstrStage<10, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<4, [A9_NPipe]>], [9, 3, 2, 1]>,
+ InstrStage<4, [A9_NPipe]>],
+ [9, 3, 2, 1]>,
+
+ //
+ // Move
+ InstrItinData<IIC_VMOV, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<1, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<1, [A9_NPipe]>],
+ [1,1]>,
//
// Move Immediate
- InstrItinData<IIC_VMOVImm, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMOVImm, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [3]>,
+ InstrStage<1, [A9_NPipe]>],
+ [3]>,
//
// Double-register Permute Move
- InstrItinData<IIC_VMOVD, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // FIXME: all latencies are arbitrary, no information is available
- InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe]>], [2, 1]>,
+ InstrItinData<IIC_VMOVD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 6 cycles
+ InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<1, [A9_NPipe]>],
+ [2, 1]>,
//
// Quad-register Permute Move
- // Result written in N2, but that is relative to the last cycle of multicycle,
- // so we use 3 for those cases
- InstrItinData<IIC_VMOVQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // FIXME: all latencies are arbitrary, no information is available
- InstrStage<4, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [3, 1]>,
+ InstrItinData<IIC_VMOVQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 6 cycles
+ InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<1, [A9_NPipe]>],
+ [2, 1]>,
//
// Integer to Single-precision Move
- InstrItinData<IIC_VMOVIS , [InstrStage<1, [A9_DRegsN], 0, Required>,
- // FIXME: all latencies are arbitrary, no information is available
+ InstrItinData<IIC_VMOVIS , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [2, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [1, 1]>,
//
// Integer to Double-precision Move
- InstrItinData<IIC_VMOVID , [InstrStage<1, [A9_DRegsN], 0, Required>,
- // FIXME: all latencies are arbitrary, no information is available
+ InstrItinData<IIC_VMOVID , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [2, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [1, 1, 1]>,
//
// Single-precision to Integer Move
- InstrItinData<IIC_VMOVSI , [InstrStage<1, [A9_DRegsN], 0, Required>,
- // FIXME: all latencies are arbitrary, no information is available
+ InstrItinData<IIC_VMOVSI , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [2, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [2, 1]>,
//
// Double-precision to Integer Move
- InstrItinData<IIC_VMOVDI , [InstrStage<1, [A9_DRegsN], 0, Required>,
- // FIXME: all latencies are arbitrary, no information is available
+ InstrItinData<IIC_VMOVDI , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [2, 2, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [2, 2, 1]>,
//
// Integer to Lane Move
- InstrItinData<IIC_VMOVISL , [InstrStage<1, [A9_DRegsN], 0, Required>,
- // FIXME: all latencies are arbitrary, no information is available
+ InstrItinData<IIC_VMOVISL , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
InstrStage<4, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [3, 1, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [3, 1, 1]>,
//
+ // Vector narrow move
+ InstrItinData<IIC_VMOVN, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 6 cycles
+ InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<1, [A9_NPipe]>],
+ [3, 1]>,
+ //
// Double-register FP Unary
- InstrItinData<IIC_VUNAD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VUNAD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [5, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [5, 2]>,
//
// Quad-register FP Unary
// Result written in N5, but that is relative to the last cycle of multicycle,
// so we use 6 for those cases
- InstrItinData<IIC_VUNAQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VUNAQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [6, 2]>,
+ InstrStage<2, [A9_NPipe]>],
+ [6, 2]>,
//
// Double-register FP Binary
// FIXME: We're using this itin for many instructions and [2, 2] here is too
// optimistic.
- InstrItinData<IIC_VBIND, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 7 cycles
+ InstrItinData<IIC_VBIND, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [5, 2, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [5, 2, 2]>,
+
+ //
+ // VPADD, etc.
+ InstrItinData<IIC_VPBIND, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 6 cycles
+ InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<1, [A9_NPipe]>],
+ [5, 1, 1]>,
+ //
+ // Double-register FP VMUL
+ InstrItinData<IIC_VFMULD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 6 cycles
+ InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<1, [A9_NPipe]>],
+ [5, 2, 1]>,
//
// Quad-register FP Binary
// Result written in N5, but that is relative to the last cycle of multicycle,
// so we use 6 for those cases
// FIXME: We're using this itin for many instructions and [2, 2] here is too
// optimistic.
- InstrItinData<IIC_VBINQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 8 cycles
+ InstrItinData<IIC_VBINQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 7 cycles
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe]>],
+ [6, 2, 2]>,
+ //
+ // Quad-register FP VMUL
+ InstrItinData<IIC_VFMULQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [6, 2, 2]>,
+ InstrStage<1, [A9_NPipe]>],
+ [6, 2, 1]>,
//
// Double-register FP Multiple-Accumulate
- InstrItinData<IIC_VMACD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMACD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [6, 3, 2, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [6, 3, 2, 1]>,
//
// Quad-register FP Multiple-Accumulate
// Result written in N9, but that is relative to the last cycle of multicycle,
// so we use 10 for those cases
- InstrItinData<IIC_VMACQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VMACQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 9 cycles
InstrStage<10, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<4, [A9_NPipe]>], [8, 4, 2, 1]>,
+ InstrStage<4, [A9_NPipe]>],
+ [8, 4, 2, 1]>,
//
// Double-register Reciprical Step
- InstrItinData<IIC_VRECSD, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 7 cycles
- InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [6, 2, 2]>,
+ InstrItinData<IIC_VRECSD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 10 cycles
+ InstrStage<11, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<1, [A9_NPipe]>],
+ [9, 2, 2]>,
//
// Quad-register Reciprical Step
- InstrItinData<IIC_VRECSQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 9 cycles
- InstrStage<10, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<4, [A9_NPipe]>], [8, 2, 2]>,
+ InstrItinData<IIC_VRECSQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 11 cycles
+ InstrStage<12, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe]>],
+ [10, 2, 2]>,
//
// Double-register Permute
- InstrItinData<IIC_VPERMD, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VPERMD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [2, 2, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [2, 2, 1, 1]>,
//
// Quad-register Permute
// Result written in N2, but that is relative to the last cycle of multicycle,
// so we use 3 for those cases
- InstrItinData<IIC_VPERMQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VPERMQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [3, 3, 1, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [3, 3, 1, 1]>,
//
// Quad-register Permute (3 cycle issue)
// Result written in N2, but that is relative to the last cycle of multicycle,
// so we use 4 for those cases
- InstrItinData<IIC_VPERMQ3, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VPERMQ3, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<3, [A9_LSPipe]>], [4, 4, 1, 1]>,
+ InstrStage<3, [A9_NPipe]>],
+ [4, 4, 1, 1]>,
//
// Double-register VEXT
- InstrItinData<IIC_VEXTD, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 7 cycles
+ InstrItinData<IIC_VEXTD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<1, [A9_NPipe]>], [2, 1, 1]>,
+ InstrStage<1, [A9_NPipe]>],
+ [2, 1, 1]>,
//
// Quad-register VEXT
- InstrItinData<IIC_VEXTQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
- // Extra latency cycles since wbck is 9 cycles
+ InstrItinData<IIC_VEXTQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [3, 1, 1]>,
+ InstrStage<2, [A9_NPipe]>],
+ [3, 1, 2]>,
//
// VTB
- InstrItinData<IIC_VTB1, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VTB1, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [3, 2, 1]>,
- InstrItinData<IIC_VTB2, [InstrStage<2, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_NPipe]>],
+ [3, 2, 1]>,
+ InstrItinData<IIC_VTB2, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [3, 2, 2, 1]>,
- InstrItinData<IIC_VTB3, [InstrStage<2, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_NPipe]>],
+ [3, 2, 2, 1]>,
+ InstrItinData<IIC_VTB3, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<2, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<3, [A9_NPipe]>], [4, 2, 2, 3, 1]>,
- InstrItinData<IIC_VTB4, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_NPipe]>],
+ [4, 2, 2, 3, 1]>,
+ InstrItinData<IIC_VTB4, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<3, [A9_NPipe]>], [4, 2, 2, 3, 3, 1]>,
+ InstrStage<3, [A9_NPipe]>],
+ [4, 2, 2, 3, 3, 1]>,
//
// VTBX
- InstrItinData<IIC_VTBX1, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrItinData<IIC_VTBX1, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [3, 1, 2, 1]>,
- InstrItinData<IIC_VTBX2, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_NPipe]>],
+ [3, 1, 2, 1]>,
+ InstrItinData<IIC_VTBX2, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [3, 1, 2, 2, 1]>,
- InstrItinData<IIC_VTBX3, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<2, [A9_NPipe]>],
+ [3, 1, 2, 2, 1]>,
+ InstrItinData<IIC_VTBX3, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<3, [A9_NPipe]>], [4, 1, 2, 2, 3, 1]>,
- InstrItinData<IIC_VTBX4, [InstrStage<1, [A9_DRegsN], 0, Required>,
+ InstrStage<3, [A9_NPipe]>],
+ [4, 1, 2, 2, 3, 1]>,
+ InstrItinData<IIC_VTBX4, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [4, 1, 2, 2, 3, 3, 1]>
+ InstrStage<2, [A9_NPipe]>],
+ [4, 1, 2, 2, 3, 3, 1]>
]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td b/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
index 08b560c..c1880a7 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
@@ -19,7 +19,7 @@ def V6_Pipe : FuncUnit; // pipeline
// Scheduling information derived from "ARM1176JZF-S Technical Reference Manual"
//
def ARMV6Itineraries : ProcessorItineraries<
- [V6_Pipe], [
+ [V6_Pipe], [], [
//
// No operand cycles
InstrItinData<IIC_iALUx , [InstrStage<1, [V6_Pipe]>]>,
@@ -30,10 +30,20 @@ def ARMV6Itineraries : ProcessorItineraries<
InstrItinData<IIC_iALUsi , [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
InstrItinData<IIC_iALUsr , [InstrStage<2, [V6_Pipe]>], [3, 3, 2, 1]>,
//
+ // Bitwise Instructions that produce a result
+ InstrItinData<IIC_iBITi , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
+ InstrItinData<IIC_iBITr , [InstrStage<1, [V6_Pipe]>], [2, 2, 2]>,
+ InstrItinData<IIC_iBITsi , [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
+ InstrItinData<IIC_iBITsr , [InstrStage<2, [V6_Pipe]>], [3, 3, 2, 1]>,
+ //
// Unary Instructions that produce a result
InstrItinData<IIC_iUNAr , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
InstrItinData<IIC_iUNAsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
- InstrItinData<IIC_iUNAsr , [InstrStage<2, [V6_Pipe]>], [3, 2, 1]>,
+ //
+ // Zero and sign extension instructions
+ InstrItinData<IIC_iEXTr , [InstrStage<1, [V6_Pipe]>], [1, 1]>,
+ InstrItinData<IIC_iEXTAr , [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
+ InstrItinData<IIC_iEXTAsr , [InstrStage<2, [V6_Pipe]>], [3, 3, 2, 1]>,
//
// Compare instructions
InstrItinData<IIC_iCMPi , [InstrStage<1, [V6_Pipe]>], [2]>,
@@ -41,17 +51,39 @@ def ARMV6Itineraries : ProcessorItineraries<
InstrItinData<IIC_iCMPsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
InstrItinData<IIC_iCMPsr , [InstrStage<2, [V6_Pipe]>], [3, 2, 1]>,
//
+ // Test instructions
+ InstrItinData<IIC_iTSTi , [InstrStage<1, [V6_Pipe]>], [2]>,
+ InstrItinData<IIC_iTSTr , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
+ InstrItinData<IIC_iTSTsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
+ InstrItinData<IIC_iTSTsr , [InstrStage<2, [V6_Pipe]>], [3, 2, 1]>,
+ //
// Move instructions, unconditional
InstrItinData<IIC_iMOVi , [InstrStage<1, [V6_Pipe]>], [2]>,
InstrItinData<IIC_iMOVr , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
InstrItinData<IIC_iMOVsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
InstrItinData<IIC_iMOVsr , [InstrStage<2, [V6_Pipe]>], [3, 2, 1]>,
+ InstrItinData<IIC_iMOVix2 , [InstrStage<1, [V6_Pipe]>,
+ InstrStage<1, [V6_Pipe]>], [2]>,
+ InstrItinData<IIC_iMOVix2addpc,[InstrStage<1, [V6_Pipe]>,
+ InstrStage<1, [V6_Pipe]>,
+ InstrStage<1, [V6_Pipe]>], [3]>,
+ InstrItinData<IIC_iMOVix2ld , [InstrStage<1, [V6_Pipe]>,
+ InstrStage<1, [V6_Pipe]>,
+ InstrStage<1, [V6_Pipe]>], [5]>,
//
// Move instructions, conditional
InstrItinData<IIC_iCMOVi , [InstrStage<1, [V6_Pipe]>], [3]>,
InstrItinData<IIC_iCMOVr , [InstrStage<1, [V6_Pipe]>], [3, 2]>,
InstrItinData<IIC_iCMOVsi , [InstrStage<1, [V6_Pipe]>], [3, 1]>,
InstrItinData<IIC_iCMOVsr , [InstrStage<1, [V6_Pipe]>], [4, 2, 1]>,
+ InstrItinData<IIC_iCMOVix2 , [InstrStage<1, [V6_Pipe]>,
+ InstrStage<1, [V6_Pipe]>], [4]>,
+ //
+ // MVN instructions
+ InstrItinData<IIC_iMVNi , [InstrStage<1, [V6_Pipe]>], [2]>,
+ InstrItinData<IIC_iMVNr , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
+ InstrItinData<IIC_iMVNsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
+ InstrItinData<IIC_iMVNsr , [InstrStage<2, [V6_Pipe]>], [3, 2, 1]>,
// Integer multiply pipeline
//
@@ -65,50 +97,90 @@ def ARMV6Itineraries : ProcessorItineraries<
// Integer load pipeline
//
// Immediate offset
- InstrItinData<IIC_iLoadi , [InstrStage<1, [V6_Pipe]>], [4, 1]>,
+ InstrItinData<IIC_iLoad_i , [InstrStage<1, [V6_Pipe]>], [4, 1]>,
+ InstrItinData<IIC_iLoad_bh_i, [InstrStage<1, [V6_Pipe]>], [4, 1]>,
+ InstrItinData<IIC_iLoad_d_i , [InstrStage<1, [V6_Pipe]>], [4, 1]>,
//
// Register offset
- InstrItinData<IIC_iLoadr , [InstrStage<1, [V6_Pipe]>], [4, 1, 1]>,
+ InstrItinData<IIC_iLoad_r , [InstrStage<1, [V6_Pipe]>], [4, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_r, [InstrStage<1, [V6_Pipe]>], [4, 1, 1]>,
+ InstrItinData<IIC_iLoad_d_r , [InstrStage<1, [V6_Pipe]>], [4, 1, 1]>,
//
// Scaled register offset, issues over 2 cycles
- InstrItinData<IIC_iLoadsi , [InstrStage<2, [V6_Pipe]>], [5, 2, 1]>,
+ InstrItinData<IIC_iLoad_si , [InstrStage<2, [V6_Pipe]>], [5, 2, 1]>,
+ InstrItinData<IIC_iLoad_bh_si, [InstrStage<2, [V6_Pipe]>], [5, 2, 1]>,
//
// Immediate offset with update
- InstrItinData<IIC_iLoadiu , [InstrStage<1, [V6_Pipe]>], [4, 2, 1]>,
+ InstrItinData<IIC_iLoad_iu , [InstrStage<1, [V6_Pipe]>], [4, 2, 1]>,
+ InstrItinData<IIC_iLoad_bh_iu, [InstrStage<1, [V6_Pipe]>], [4, 2, 1]>,
//
// Register offset with update
- InstrItinData<IIC_iLoadru , [InstrStage<1, [V6_Pipe]>], [4, 2, 1, 1]>,
+ InstrItinData<IIC_iLoad_ru , [InstrStage<1, [V6_Pipe]>], [4, 2, 1, 1]>,
+ InstrItinData<IIC_iLoad_bh_ru, [InstrStage<1, [V6_Pipe]>], [4, 2, 1, 1]>,
+ InstrItinData<IIC_iLoad_d_ru , [InstrStage<1, [V6_Pipe]>], [4, 2, 1, 1]>,
//
// Scaled register offset with update, issues over 2 cycles
- InstrItinData<IIC_iLoadsiu , [InstrStage<2, [V6_Pipe]>], [5, 2, 2, 1]>,
+ InstrItinData<IIC_iLoad_siu, [InstrStage<2, [V6_Pipe]>], [5, 2, 2, 1]>,
+ InstrItinData<IIC_iLoad_bh_siu,[InstrStage<2, [V6_Pipe]>], [5, 2, 2, 1]>,
+
+ //
+ // Load multiple, def is the 5th operand.
+ InstrItinData<IIC_iLoad_m , [InstrStage<3, [V6_Pipe]>], [1, 1, 1, 1, 4]>,
+ //
+ // Load multiple + update, defs are the 1st and 5th operands.
+ InstrItinData<IIC_iLoad_mu , [InstrStage<3, [V6_Pipe]>], [2, 1, 1, 1, 4]>,
+ //
+ // Load multiple plus branch
+ InstrItinData<IIC_iLoad_mBr, [InstrStage<3, [V6_Pipe]>,
+ InstrStage<1, [V6_Pipe]>], [1, 2, 1, 1, 4]>,
+
+ //
+ // iLoadi + iALUr for t2LDRpci_pic.
+ InstrItinData<IIC_iLoadiALU, [InstrStage<1, [V6_Pipe]>,
+ InstrStage<1, [V6_Pipe]>], [3, 1]>,
//
- // Load multiple
- InstrItinData<IIC_iLoadm , [InstrStage<3, [V6_Pipe]>]>,
+ // Pop, def is the 3rd operand.
+ InstrItinData<IIC_iPop , [InstrStage<3, [V6_Pipe]>], [1, 1, 4]>,
+ //
+ // Pop + branch, def is the 3rd operand.
+ InstrItinData<IIC_iPop_Br, [InstrStage<3, [V6_Pipe]>,
+ InstrStage<1, [V6_Pipe]>], [1, 2, 4]>,
// Integer store pipeline
//
// Immediate offset
- InstrItinData<IIC_iStorei , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
+ InstrItinData<IIC_iStore_i , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
+ InstrItinData<IIC_iStore_bh_i, [InstrStage<1, [V6_Pipe]>], [2, 1]>,
+ InstrItinData<IIC_iStore_d_i , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
//
// Register offset
- InstrItinData<IIC_iStorer , [InstrStage<1, [V6_Pipe]>], [2, 1, 1]>,
-
+ InstrItinData<IIC_iStore_r , [InstrStage<1, [V6_Pipe]>], [2, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_r, [InstrStage<1, [V6_Pipe]>], [2, 1, 1]>,
+ InstrItinData<IIC_iStore_d_r , [InstrStage<1, [V6_Pipe]>], [2, 1, 1]>,
//
// Scaled register offset, issues over 2 cycles
- InstrItinData<IIC_iStoresi , [InstrStage<2, [V6_Pipe]>], [2, 2, 1]>,
+ InstrItinData<IIC_iStore_si , [InstrStage<2, [V6_Pipe]>], [2, 2, 1]>,
+ InstrItinData<IIC_iStore_bh_si, [InstrStage<2, [V6_Pipe]>], [2, 2, 1]>,
//
// Immediate offset with update
- InstrItinData<IIC_iStoreiu , [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
+ InstrItinData<IIC_iStore_iu , [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
+ InstrItinData<IIC_iStore_bh_iu, [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
//
// Register offset with update
- InstrItinData<IIC_iStoreru , [InstrStage<1, [V6_Pipe]>], [2, 2, 1, 1]>,
+ InstrItinData<IIC_iStore_ru, [InstrStage<1, [V6_Pipe]>], [2, 2, 1, 1]>,
+ InstrItinData<IIC_iStore_bh_ru,[InstrStage<1, [V6_Pipe]>], [2, 2, 1, 1]>,
+ InstrItinData<IIC_iStore_d_ru, [InstrStage<1, [V6_Pipe]>], [2, 2, 1, 1]>,
//
// Scaled register offset with update, issues over 2 cycles
- InstrItinData<IIC_iStoresiu, [InstrStage<2, [V6_Pipe]>], [2, 2, 2, 1]>,
+ InstrItinData<IIC_iStore_siu, [InstrStage<2, [V6_Pipe]>], [2, 2, 2, 1]>,
+ InstrItinData<IIC_iStore_bh_siu,[InstrStage<2, [V6_Pipe]>], [2, 2, 2, 1]>,
//
// Store multiple
- InstrItinData<IIC_iStorem , [InstrStage<3, [V6_Pipe]>]>,
+ InstrItinData<IIC_iStore_m , [InstrStage<3, [V6_Pipe]>]>,
+ //
+ // Store multiple + update
+ InstrItinData<IIC_iStore_mu , [InstrStage<3, [V6_Pipe]>], [2]>,
// Branch
//
@@ -183,6 +255,18 @@ def ARMV6Itineraries : ProcessorItineraries<
// Double-precision FP SQRT
InstrItinData<IIC_fpSQRT64 , [InstrStage<29, [V6_Pipe]>], [34, 2, 2]>,
//
+ // Integer to Single-precision Move
+ InstrItinData<IIC_fpMOVIS, [InstrStage<1, [V6_Pipe]>], [10, 1]>,
+ //
+ // Integer to Double-precision Move
+ InstrItinData<IIC_fpMOVID, [InstrStage<1, [V6_Pipe]>], [10, 1, 1]>,
+ //
+ // Single-precision to Integer Move
+ InstrItinData<IIC_fpMOVSI, [InstrStage<1, [V6_Pipe]>], [10, 1]>,
+ //
+ // Double-precision to Integer Move
+ InstrItinData<IIC_fpMOVDI, [InstrStage<1, [V6_Pipe]>], [10, 10, 1]>,
+ //
// Single-precision FP Load
InstrItinData<IIC_fpLoad32 , [InstrStage<1, [V6_Pipe]>], [5, 2, 2]>,
//
@@ -190,7 +274,10 @@ def ARMV6Itineraries : ProcessorItineraries<
InstrItinData<IIC_fpLoad64 , [InstrStage<1, [V6_Pipe]>], [5, 2, 2]>,
//
// FP Load Multiple
- InstrItinData<IIC_fpLoadm , [InstrStage<3, [V6_Pipe]>]>,
+ InstrItinData<IIC_fpLoad_m , [InstrStage<3, [V6_Pipe]>], [2, 1, 1, 5]>,
+ //
+ // FP Load Multiple + update
+ InstrItinData<IIC_fpLoad_mu, [InstrStage<3, [V6_Pipe]>], [3, 2, 1, 1, 5]>,
//
// Single-precision FP Store
InstrItinData<IIC_fpStore32 , [InstrStage<1, [V6_Pipe]>], [2, 2, 2]>,
@@ -200,5 +287,8 @@ def ARMV6Itineraries : ProcessorItineraries<
InstrItinData<IIC_fpStore64 , [InstrStage<1, [V6_Pipe]>], [2, 2, 2]>,
//
// FP Store Multiple
- InstrItinData<IIC_fpStorem , [InstrStage<3, [V6_Pipe]>]>
+ InstrItinData<IIC_fpStore_m, [InstrStage<3, [V6_Pipe]>], [2, 2, 2, 2]>,
+ //
+ // FP Store Multiple + update
+ InstrItinData<IIC_fpStore_mu,[InstrStage<3, [V6_Pipe]>], [3, 2, 2, 2, 2]>
]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index a289407..2b9202b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -29,10 +29,8 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
- const Value *DstSV,
- uint64_t DstSVOff,
- const Value *SrcSV,
- uint64_t SrcSVOff) const {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
// Do repeated 4-byte loads and stores. To be improved.
// This requires 4-byte alignment.
if ((Align & 3) != 0)
@@ -66,7 +64,8 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
Loads[i] = DAG.getLoad(VT, dl, Chain,
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcSVOff + SrcOff, isVolatile, false, 0);
+ SrcPtrInfo.getWithOffset(SrcOff), isVolatile,
+ false, 0);
TFOps[i] = Loads[i].getValue(1);
SrcOff += VTSize;
}
@@ -77,7 +76,8 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstSVOff + DstOff, isVolatile, false, 0);
+ DstPtrInfo.getWithOffset(DstOff),
+ isVolatile, false, 0);
DstOff += VTSize;
}
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
@@ -103,7 +103,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
Loads[i] = DAG.getLoad(VT, dl, Chain,
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcSVOff + SrcOff, false, false, 0);
+ SrcPtrInfo.getWithOffset(SrcOff), false, false, 0);
TFOps[i] = Loads[i].getValue(1);
++i;
SrcOff += VTSize;
@@ -125,7 +125,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstSVOff + DstOff, false, false, 0);
+ DstPtrInfo.getWithOffset(DstOff), false, false, 0);
++i;
DstOff += VTSize;
BytesLeft -= VTSize;
diff --git a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h
index d7d00c2..7533690 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.h
@@ -33,10 +33,8 @@ public:
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
- const Value *DstSV,
- uint64_t DstSVOff,
- const Value *SrcSV,
- uint64_t SrcSVOff) const;
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const;
};
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
index cb539f4..0bd740c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -13,6 +13,7 @@
#include "ARMSubtarget.h"
#include "ARMGenSubtarget.inc"
+#include "ARMBaseRegisterInfo.h"
#include "llvm/GlobalValue.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
@@ -24,45 +25,52 @@ ReserveR9("arm-reserve-r9", cl::Hidden,
cl::desc("Reserve R9, making it unavailable as GPR"));
static cl::opt<bool>
-UseMOVT("arm-use-movt",
- cl::init(true), cl::Hidden);
+DarwinUseMOVT("arm-darwin-use-movt", cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
+StrictAlign("arm-strict-align", cl::Hidden,
+ cl::desc("Disallow all unaligned memory accesses"));
ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &FS,
bool isT)
: ARMArchVersion(V4)
+ , ARMProcFamily(Others)
, ARMFPUType(None)
, UseNEONForSinglePrecisionFP(false)
- , SlowVMLx(false)
+ , SlowFPVMLx(false)
, SlowFPBrcc(false)
, IsThumb(isT)
, ThumbMode(Thumb1)
, NoARM(false)
, PostRAScheduler(false)
, IsR9Reserved(ReserveR9)
- , UseMovt(UseMOVT)
+ , UseMovt(false)
, HasFP16(false)
+ , HasD16(false)
, HasHardwareDivide(false)
, HasT2ExtractPack(false)
, HasDataBarrier(false)
, Pref32BitThumb(false)
+ , HasMPExtension(false)
, FPOnlySP(false)
+ , AllowsUnalignedMem(false)
, stackAlignment(4)
, CPUString("generic")
- , TargetType(isELF) // Default to ELF unless otherwise specified.
+ , TargetTriple(TT)
, TargetABI(ARM_ABI_APCS) {
- // default to soft float ABI
+ // Default to soft float ABI
if (FloatABIType == FloatABI::Default)
FloatABIType = FloatABI::Soft;
// Determine default and user specified characteristics
- // Parse features string.
- CPUString = ParseSubtargetFeatures(FS, CPUString);
-
// When no arch is specified either by CPU or by attributes, make the default
// ARMv4T.
- if (CPUString == "generic" && (FS.empty() || FS == "generic"))
+ const char *ARMArchFeature = "";
+ if (CPUString == "generic" && (FS.empty() || FS == "generic")) {
ARMArchVersion = V4T;
+ ARMArchFeature = ",+v4t";
+ }
// Set the boolean corresponding to the current target triple, or the default
// if one cannot be determined, to true.
@@ -80,47 +88,78 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &FS,
unsigned SubVer = TT[Idx];
if (SubVer >= '7' && SubVer <= '9') {
ARMArchVersion = V7A;
- if (Len >= Idx+2 && TT[Idx+1] == 'm')
+ ARMArchFeature = ",+v7a";
+ if (Len >= Idx+2 && TT[Idx+1] == 'm') {
ARMArchVersion = V7M;
+ ARMArchFeature = ",+v7m";
+ }
} else if (SubVer == '6') {
ARMArchVersion = V6;
- if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == '2')
+ ARMArchFeature = ",+v6";
+ if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == '2') {
ARMArchVersion = V6T2;
+ ARMArchFeature = ",+v6t2";
+ }
} else if (SubVer == '5') {
ARMArchVersion = V5T;
- if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == 'e')
+ ARMArchFeature = ",+v5t";
+ if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == 'e') {
ARMArchVersion = V5TE;
+ ARMArchFeature = ",+v5te";
+ }
} else if (SubVer == '4') {
- if (Len >= Idx+2 && TT[Idx+1] == 't')
+ if (Len >= Idx+2 && TT[Idx+1] == 't') {
ARMArchVersion = V4T;
- else
+ ARMArchFeature = ",+v4t";
+ } else {
ARMArchVersion = V4;
+ ARMArchFeature = "";
+ }
}
}
+ if (TT.find("eabi") != std::string::npos)
+ TargetABI = ARM_ABI_AAPCS;
+
+ // Parse features string. If the first entry in FS (the CPU) is missing,
+ // insert the architecture feature derived from the target triple. This is
+ // important for setting features that are implied based on the architecture
+ // version.
+ std::string FSWithArch;
+ if (FS.empty())
+ FSWithArch = std::string(ARMArchFeature);
+ else if (FS.find(',') == 0)
+ FSWithArch = std::string(ARMArchFeature) + FS;
+ else
+ FSWithArch = FS;
+ CPUString = ParseSubtargetFeatures(FSWithArch, CPUString);
+
+ // After parsing Itineraries, set ItinData.IssueWidth.
+ computeIssueWidth();
+
// Thumb2 implies at least V6T2.
if (ARMArchVersion >= V6T2)
ThumbMode = Thumb2;
else if (ThumbMode >= Thumb2)
ARMArchVersion = V6T2;
- if (Len >= 10) {
- if (TT.find("-darwin") != std::string::npos)
- // arm-darwin
- TargetType = isDarwin;
- }
-
- if (TT.find("eabi") != std::string::npos)
- TargetABI = ARM_ABI_AAPCS;
-
if (isAAPCS_ABI())
stackAlignment = 8;
- if (isTargetDarwin())
+ if (!isTargetDarwin())
+ UseMovt = hasV6T2Ops();
+ else {
IsR9Reserved = ReserveR9 | (ARMArchVersion < V6);
+ UseMovt = DarwinUseMOVT && hasV6T2Ops();
+ }
if (!isThumb() || hasThumb2())
PostRAScheduler = true;
+
+ // v6+ may or may not support unaligned mem access depending on the system
+ // configuration.
+ if (!StrictAlign && hasV6Ops() && isTargetDarwin())
+ AllowsUnalignedMem = true;
}
/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol.
@@ -163,7 +202,7 @@ ARMSubtarget::GVIsIndirectSymbol(const GlobalValue *GV,
// through a stub.
if (!isDecl && !GV->isWeakForLinker())
return false;
-
+
// Unless we have a symbol with hidden visibility, we have to go through a
// normal $non_lazy_ptr stub because this symbol might be resolved late.
if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
@@ -174,6 +213,34 @@ ARMSubtarget::GVIsIndirectSymbol(const GlobalValue *GV,
return false;
}
+unsigned ARMSubtarget::getMispredictionPenalty() const {
+ // If we have a reasonable estimate of the pipeline depth, then we can
+ // estimate the penalty of a misprediction based on that.
+ if (isCortexA8())
+ return 13;
+ else if (isCortexA9())
+ return 8;
+
+ // Otherwise, just return a sensible default.
+ return 10;
+}
+
+void ARMSubtarget::computeIssueWidth() {
+ unsigned allStage1Units = 0;
+ for (const InstrItinerary *itin = InstrItins.Itineraries;
+ itin->FirstStage != ~0U; ++itin) {
+ const InstrStage *IS = InstrItins.Stages + itin->FirstStage;
+ allStage1Units |= IS->getUnits();
+ }
+ InstrItins.IssueWidth = 0;
+ while (allStage1Units) {
+ ++InstrItins.IssueWidth;
+ // clear the lowest bit
+ allStage1Units ^= allStage1Units & ~(allStage1Units - 1);
+ }
+ assert(InstrItins.IssueWidth <= 2 && "itinerary bug, too many stage 1 units");
+}
+
bool ARMSubtarget::enablePostRAScheduler(
CodeGenOpt::Level OptLevel,
TargetSubtarget::AntiDepBreakMode& Mode,
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
index 67e5803..76c1c3f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -17,7 +17,7 @@
#include "llvm/Target/TargetInstrItineraries.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtarget.h"
-#include "ARMBaseRegisterInfo.h"
+#include "llvm/ADT/Triple.h"
#include <string>
namespace llvm {
@@ -29,6 +29,10 @@ protected:
V4, V4T, V5T, V5TE, V6, V6M, V6T2, V7A, V7M
};
+ enum ARMProcFamilyEnum {
+ Others, CortexA8, CortexA9
+ };
+
enum ARMFPEnum {
None, VFPv2, VFPv3, NEON
};
@@ -42,6 +46,9 @@ protected:
/// V6, V6T2, V7A, V7M.
ARMArchEnum ARMArchVersion;
+ /// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
+ ARMProcFamilyEnum ARMProcFamily;
+
/// ARMFPUType - Floating Point Unit type.
ARMFPEnum ARMFPUType;
@@ -50,9 +57,9 @@ protected:
/// determine if NEON should actually be used.
bool UseNEONForSinglePrecisionFP;
- /// SlowVMLx - If the VFP2 instructions are available, indicates whether
- /// the VML[AS] instructions are slow (if so, don't use them).
- bool SlowVMLx;
+ /// SlowFPVMLx - If the VFP2 / NEON instructions are available, indicates
+ /// whether the FP VML[AS] instructions are slow (if so, don't use them).
+ bool SlowFPVMLx;
/// SlowFPBrcc - True if floating point compare + branch is slow.
bool SlowFPBrcc;
@@ -80,6 +87,10 @@ protected:
/// only so far)
bool HasFP16;
+ /// HasD16 - True if subtarget is limited to 16 double precision
+ /// FP registers for VFPv3.
+ bool HasD16;
+
/// HasHardwareDivide - True if subtarget supports [su]div
bool HasHardwareDivide;
@@ -95,10 +106,19 @@ protected:
/// over 16-bit ones.
bool Pref32BitThumb;
+ /// HasMPExtension - True if the subtarget supports Multiprocessing
+ /// extension (ARMv7 only).
+ bool HasMPExtension;
+
/// FPOnlySP - If true, the floating point unit only supports single
/// precision.
bool FPOnlySP;
+ /// AllowsUnalignedMem - If true, the subtarget allows unaligned memory
+ /// accesses for some types. For details, see
+ /// ARMTargetLowering::allowsUnalignedMemoryAccesses().
+ bool AllowsUnalignedMem;
+
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment;
@@ -106,6 +126,9 @@ protected:
/// CPUString - String name of used CPU.
std::string CPUString;
+ /// TargetTriple - What processor and OS we're targeting.
+ Triple TargetTriple;
+
/// Selected instruction itineraries (one entry per itinerary class.)
InstrItineraryData InstrItins;
@@ -136,6 +159,8 @@ protected:
std::string ParseSubtargetFeatures(const std::string &FS,
const std::string &CPU);
+ void computeIssueWidth();
+
bool hasV4TOps() const { return ARMArchVersion >= V4T; }
bool hasV5TOps() const { return ARMArchVersion >= V5T; }
bool hasV5TEOps() const { return ARMArchVersion >= V5TE; }
@@ -143,6 +168,9 @@ protected:
bool hasV6T2Ops() const { return ARMArchVersion >= V6T2; }
bool hasV7Ops() const { return ARMArchVersion >= V7A; }
+ bool isCortexA8() const { return ARMProcFamily == CortexA8; }
+ bool isCortexA9() const { return ARMProcFamily == CortexA9; }
+
bool hasARMOps() const { return !NoARM; }
bool hasVFP2() const { return ARMFPUType >= VFPv2; }
@@ -153,15 +181,17 @@ protected:
bool hasDivide() const { return HasHardwareDivide; }
bool hasT2ExtractPack() const { return HasT2ExtractPack; }
bool hasDataBarrier() const { return HasDataBarrier; }
- bool useVMLx() const {return hasVFP2() && !SlowVMLx; }
+ bool useFPVMLx() const { return !SlowFPVMLx; }
bool isFPBrccSlow() const { return SlowFPBrcc; }
bool isFPOnlySP() const { return FPOnlySP; }
bool prefers32BitThumb() const { return Pref32BitThumb; }
+ bool hasMPExtension() const { return HasMPExtension; }
bool hasFP16() const { return HasFP16; }
+ bool hasD16() const { return HasD16; }
- bool isTargetDarwin() const { return TargetType == isDarwin; }
- bool isTargetELF() const { return TargetType == isELF; }
+ bool isTargetDarwin() const { return TargetTriple.getOS() == Triple::Darwin; }
+ bool isTargetELF() const { return !isTargetDarwin(); }
bool isAPCS_ABI() const { return TargetABI == ARM_ABI_APCS; }
bool isAAPCS_ABI() const { return TargetABI == ARM_ABI_AAPCS; }
@@ -175,8 +205,12 @@ protected:
bool useMovt() const { return UseMovt && hasV6T2Ops(); }
+ bool allowsUnalignedMem() const { return AllowsUnalignedMem; }
+
const std::string & getCPUString() const { return CPUString; }
+ unsigned getMispredictionPenalty() const;
+
/// enablePostRAScheduler - True at 'More' optimization.
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
TargetSubtarget::AntiDepBreakMode& Mode,
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index 30ff827..0ee773b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -12,15 +12,18 @@
#include "ARMTargetMachine.h"
#include "ARMMCAsmInfo.h"
-#include "ARMFrameInfo.h"
+#include "ARMFrameLowering.h"
#include "ARM.h"
#include "llvm/PassManager.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
using namespace llvm;
+static cl::opt<bool>ExpandMLx("expand-fp-mlx", cl::init(false), cl::Hidden);
+
static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
Triple TheTriple(TT);
switch (TheTriple.getOS()) {
@@ -31,6 +34,26 @@ static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
}
}
+// This is duplicated code. Refactor this.
+static MCStreamer *createMCStreamer(const Target &T, const std::string &TT,
+ MCContext &Ctx, TargetAsmBackend &TAB,
+ raw_ostream &OS,
+ MCCodeEmitter *Emitter,
+ bool RelaxAll,
+ bool NoExecStack) {
+ switch (Triple(TT).getOS()) {
+ case Triple::Darwin:
+ return createMachOStreamer(Ctx, TAB, OS, Emitter, RelaxAll);
+ case Triple::MinGW32:
+ case Triple::Cygwin:
+ case Triple::Win32:
+ llvm_unreachable("ARM does not support Windows COFF format");
+ return NULL;
+ default:
+ return createELFStreamer(Ctx, TAB, OS, Emitter, RelaxAll, NoExecStack);
+ }
+}
+
extern "C" void LLVMInitializeARMTarget() {
// Register the target.
RegisterTargetMachine<ARMTargetMachine> X(TheARMTarget);
@@ -39,6 +62,19 @@ extern "C" void LLVMInitializeARMTarget() {
// Register the target asm info.
RegisterAsmInfoFn A(TheARMTarget, createMCAsmInfo);
RegisterAsmInfoFn B(TheThumbTarget, createMCAsmInfo);
+
+ // Register the MC Code Emitter
+ TargetRegistry::RegisterCodeEmitter(TheARMTarget, createARMMCCodeEmitter);
+ TargetRegistry::RegisterCodeEmitter(TheThumbTarget, createARMMCCodeEmitter);
+
+ // Register the asm backend.
+ TargetRegistry::RegisterAsmBackend(TheARMTarget, createARMAsmBackend);
+ TargetRegistry::RegisterAsmBackend(TheThumbTarget, createARMAsmBackend);
+
+ // Register the object streamer.
+ TargetRegistry::RegisterObjectStreamer(TheARMTarget, createMCStreamer);
+ TargetRegistry::RegisterObjectStreamer(TheThumbTarget, createMCStreamer);
+
}
/// TargetMachine ctor - Create an ARM architecture model.
@@ -49,9 +85,9 @@ ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T,
bool isThumb)
: LLVMTargetMachine(T, TT),
Subtarget(TT, FS, isThumb),
- FrameInfo(Subtarget),
JITInfo(),
- InstrItins(Subtarget.getInstrItineraryData()) {
+ InstrItins(Subtarget.getInstrItineraryData())
+{
DefRelocModel = getRelocationModel();
}
@@ -59,12 +95,14 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, const std::string &TT,
const std::string &FS)
: ARMBaseTargetMachine(T, TT, FS, false), InstrInfo(Subtarget),
DataLayout(Subtarget.isAPCS_ABI() ?
- std::string("e-p:32:32-f64:32:32-i64:32:32-"
+ std::string("e-p:32:32-f64:32:64-i64:32:64-"
"v128:32:128-v64:32:64-n32") :
std::string("e-p:32:32-f64:64:64-i64:64:64-"
"v128:64:128-v64:64:64-n32")),
+ ELFWriterInfo(*this),
TLInfo(*this),
- TSInfo(*this) {
+ TSInfo(*this),
+ FrameLowering(Subtarget) {
if (!Subtarget.hasARMOps())
report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not "
"support ARM mode execution!");
@@ -77,14 +115,18 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, const std::string &TT,
? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget))
: ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),
DataLayout(Subtarget.isAPCS_ABI() ?
- std::string("e-p:32:32-f64:32:32-i64:32:32-"
+ std::string("e-p:32:32-f64:32:64-i64:32:64-"
"i16:16:32-i8:8:32-i1:8:32-"
"v128:32:128-v64:32:64-a:0:32-n32") :
std::string("e-p:32:32-f64:64:64-i64:64:64-"
"i16:16:32-i8:8:32-i1:8:32-"
"v128:64:128-v64:64:64-a:0:32-n32")),
+ ELFWriterInfo(*this),
TLInfo(*this),
- TSInfo(*this) {
+ TSInfo(*this),
+ FrameLowering(Subtarget.hasThumb2()
+ ? new ARMFrameLowering(Subtarget)
+ : (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)) {
}
// Pass Pipeline Configuration
@@ -104,12 +146,12 @@ bool ARMBaseTargetMachine::addInstSelector(PassManagerBase &PM,
bool ARMBaseTargetMachine::addPreRegAlloc(PassManagerBase &PM,
CodeGenOpt::Level OptLevel) {
- if (Subtarget.hasNEON())
- PM.add(createNEONPreAllocPass());
-
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (OptLevel != CodeGenOpt::None && !Subtarget.isThumb1Only())
PM.add(createARMLoadStoreOptimizationPass(true));
+ if (ExpandMLx &&
+ OptLevel != CodeGenOpt::None && Subtarget.hasVFP2())
+ PM.add(createMLxExpansionPass());
return true;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
index 17e5425..e0aa149 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
@@ -14,16 +14,19 @@
#ifndef ARMTARGETMACHINE_H
#define ARMTARGETMACHINE_H
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
#include "ARMInstrInfo.h"
-#include "ARMFrameInfo.h"
+#include "ARMELFWriterInfo.h"
+#include "ARMFrameLowering.h"
#include "ARMJITInfo.h"
#include "ARMSubtarget.h"
#include "ARMISelLowering.h"
#include "ARMSelectionDAGInfo.h"
#include "Thumb1InstrInfo.h"
+#include "Thumb1FrameLowering.h"
#include "Thumb2InstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/ADT/OwningPtr.h"
namespace llvm {
@@ -31,9 +34,7 @@ namespace llvm {
class ARMBaseTargetMachine : public LLVMTargetMachine {
protected:
ARMSubtarget Subtarget;
-
private:
- ARMFrameInfo FrameInfo;
ARMJITInfo JITInfo;
InstrItineraryData InstrItins;
Reloc::Model DefRelocModel; // Reloc model before it's overridden.
@@ -42,11 +43,10 @@ public:
ARMBaseTargetMachine(const Target &T, const std::string &TT,
const std::string &FS, bool isThumb);
- virtual const ARMFrameInfo *getFrameInfo() const { return &FrameInfo; }
virtual ARMJITInfo *getJITInfo() { return &JITInfo; }
virtual const ARMSubtarget *getSubtargetImpl() const { return &Subtarget; }
- virtual const InstrItineraryData getInstrItineraryData() const {
- return InstrItins;
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return &InstrItins;
}
// Pass Pipeline Configuration
@@ -64,9 +64,11 @@ public:
class ARMTargetMachine : public ARMBaseTargetMachine {
ARMInstrInfo InstrInfo;
const TargetData DataLayout; // Calculates type size & alignment
+ ARMELFWriterInfo ELFWriterInfo;
ARMTargetLowering TLInfo;
ARMSelectionDAGInfo TSInfo;
-public:
+ ARMFrameLowering FrameLowering;
+ public:
ARMTargetMachine(const Target &T, const std::string &TT,
const std::string &FS);
@@ -81,9 +83,15 @@ public:
virtual const ARMSelectionDAGInfo* getSelectionDAGInfo() const {
return &TSInfo;
}
+ virtual const ARMFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const ARMELFWriterInfo *getELFWriterInfo() const {
+ return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
+ }
};
/// ThumbTargetMachine - Thumb target machine.
@@ -94,8 +102,11 @@ class ThumbTargetMachine : public ARMBaseTargetMachine {
// Either Thumb1InstrInfo or Thumb2InstrInfo.
OwningPtr<ARMBaseInstrInfo> InstrInfo;
const TargetData DataLayout; // Calculates type size & alignment
+ ARMELFWriterInfo ELFWriterInfo;
ARMTargetLowering TLInfo;
ARMSelectionDAGInfo TSInfo;
+ // Either Thumb1FrameLowering or ARMFrameLowering.
+ OwningPtr<ARMFrameLowering> FrameLowering;
public:
ThumbTargetMachine(const Target &T, const std::string &TT,
const std::string &FS);
@@ -117,7 +128,14 @@ public:
virtual const ARMBaseInstrInfo *getInstrInfo() const {
return InstrInfo.get();
}
+ /// returns either Thumb1FrameLowering or ARMFrameLowering
+ virtual const ARMFrameLowering *getFrameLowering() const {
+ return FrameLowering.get();
+ }
virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const ARMELFWriterInfo *getELFWriterInfo() const {
+ return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
index 091a3b3..7535da5 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
@@ -12,6 +12,7 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ELF.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
using namespace dwarf;
@@ -26,14 +27,20 @@ void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
if (TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI()) {
StaticCtorSection =
- getContext().getELFSection(".init_array", MCSectionELF::SHT_INIT_ARRAY,
- MCSectionELF::SHF_WRITE |
- MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".init_array", ELF::SHT_INIT_ARRAY,
+ ELF::SHF_WRITE |
+ ELF::SHF_ALLOC,
SectionKind::getDataRel());
StaticDtorSection =
- getContext().getELFSection(".fini_array", MCSectionELF::SHT_FINI_ARRAY,
- MCSectionELF::SHF_WRITE |
- MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".fini_array", ELF::SHT_FINI_ARRAY,
+ ELF::SHF_WRITE |
+ ELF::SHF_ALLOC,
SectionKind::getDataRel());
}
+
+ AttributesSection =
+ getContext().getELFSection(".ARM.attributes",
+ ELF::SHT_ARM_ATTRIBUTES,
+ 0,
+ SectionKind::getMetadata());
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
index 097fc2c..c6a7261 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
@@ -18,10 +18,19 @@ class MCContext;
class TargetMachine;
class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF {
+protected:
+ const MCSection *AttributesSection;
public:
- ARMElfTargetObjectFile() : TargetLoweringObjectFileELF() {}
+ ARMElfTargetObjectFile() :
+ TargetLoweringObjectFileELF(),
+ AttributesSection(NULL)
+ {}
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+
+ virtual const MCSection *getAttributesSection() const {
+ return AttributesSection;
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp
index f859d1b..2428ce1 100644
--- a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp
@@ -10,10 +10,6 @@
#include "ARM.h"
#include "ARMTargetMachine.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
-
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
@@ -22,119 +18,135 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegistry.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+
#include <string>
#include <map>
using namespace llvm;
namespace {
-
- class ARMBaseAsmLexer : public TargetAsmLexer {
- const MCAsmInfo &AsmInfo;
-
- const AsmToken &lexDefinite() {
- return getLexer()->Lex();
- }
-
- AsmToken LexTokenUAL();
- protected:
- typedef std::map <std::string, unsigned> rmap_ty;
-
- rmap_ty RegisterMap;
-
- void InitRegisterMap(const TargetRegisterInfo *info) {
- unsigned numRegs = info->getNumRegs();
-
- for (unsigned i = 0; i < numRegs; ++i) {
- const char *regName = info->getName(i);
- if (regName)
- RegisterMap[regName] = i;
- }
- }
-
- unsigned MatchRegisterName(StringRef Name) {
- rmap_ty::iterator iter = RegisterMap.find(Name.str());
- if (iter != RegisterMap.end())
- return iter->second;
- else
- return 0;
- }
-
- AsmToken LexToken() {
- if (!Lexer) {
- SetError(SMLoc(), "No MCAsmLexer installed");
- return AsmToken(AsmToken::Error, "", 0);
- }
-
- switch (AsmInfo.getAssemblerDialect()) {
- default:
- SetError(SMLoc(), "Unhandled dialect");
- return AsmToken(AsmToken::Error, "", 0);
- case 0:
- return LexTokenUAL();
- }
- }
- public:
- ARMBaseAsmLexer(const Target &T, const MCAsmInfo &MAI)
- : TargetAsmLexer(T), AsmInfo(MAI) {
+
+class ARMBaseAsmLexer : public TargetAsmLexer {
+ const MCAsmInfo &AsmInfo;
+
+ const AsmToken &lexDefinite() {
+ return getLexer()->Lex();
+ }
+
+ AsmToken LexTokenUAL();
+protected:
+ typedef std::map <std::string, unsigned> rmap_ty;
+
+ rmap_ty RegisterMap;
+
+ void InitRegisterMap(const TargetRegisterInfo *info) {
+ unsigned numRegs = info->getNumRegs();
+
+ for (unsigned i = 0; i < numRegs; ++i) {
+ const char *regName = info->getName(i);
+ if (regName)
+ RegisterMap[regName] = i;
}
- };
-
- class ARMAsmLexer : public ARMBaseAsmLexer {
- public:
- ARMAsmLexer(const Target &T, const MCAsmInfo &MAI)
- : ARMBaseAsmLexer(T, MAI) {
- std::string tripleString("arm-unknown-unknown");
- std::string featureString;
- OwningPtr<const TargetMachine>
- targetMachine(T.createTargetMachine(tripleString, featureString));
- InitRegisterMap(targetMachine->getRegisterInfo());
+ }
+
+ unsigned MatchRegisterName(StringRef Name) {
+ rmap_ty::iterator iter = RegisterMap.find(Name.str());
+ if (iter != RegisterMap.end())
+ return iter->second;
+ else
+ return 0;
+ }
+
+ AsmToken LexToken() {
+ if (!Lexer) {
+ SetError(SMLoc(), "No MCAsmLexer installed");
+ return AsmToken(AsmToken::Error, "", 0);
}
- };
-
- class ThumbAsmLexer : public ARMBaseAsmLexer {
- public:
- ThumbAsmLexer(const Target &T, const MCAsmInfo &MAI)
- : ARMBaseAsmLexer(T, MAI) {
- std::string tripleString("thumb-unknown-unknown");
- std::string featureString;
- OwningPtr<const TargetMachine>
- targetMachine(T.createTargetMachine(tripleString, featureString));
- InitRegisterMap(targetMachine->getRegisterInfo());
+
+ switch (AsmInfo.getAssemblerDialect()) {
+ default:
+ SetError(SMLoc(), "Unhandled dialect");
+ return AsmToken(AsmToken::Error, "", 0);
+ case 0:
+ return LexTokenUAL();
}
- };
-}
+ }
+public:
+ ARMBaseAsmLexer(const Target &T, const MCAsmInfo &MAI)
+ : TargetAsmLexer(T), AsmInfo(MAI) {
+ }
+};
+
+class ARMAsmLexer : public ARMBaseAsmLexer {
+public:
+ ARMAsmLexer(const Target &T, const MCAsmInfo &MAI)
+ : ARMBaseAsmLexer(T, MAI) {
+ std::string tripleString("arm-unknown-unknown");
+ std::string featureString;
+ OwningPtr<const TargetMachine>
+ targetMachine(T.createTargetMachine(tripleString, featureString));
+ InitRegisterMap(targetMachine->getRegisterInfo());
+ }
+};
+
+class ThumbAsmLexer : public ARMBaseAsmLexer {
+public:
+ ThumbAsmLexer(const Target &T, const MCAsmInfo &MAI)
+ : ARMBaseAsmLexer(T, MAI) {
+ std::string tripleString("thumb-unknown-unknown");
+ std::string featureString;
+ OwningPtr<const TargetMachine>
+ targetMachine(T.createTargetMachine(tripleString, featureString));
+ InitRegisterMap(targetMachine->getRegisterInfo());
+ }
+};
+
+} // end anonymous namespace
AsmToken ARMBaseAsmLexer::LexTokenUAL() {
const AsmToken &lexedToken = lexDefinite();
-
+
switch (lexedToken.getKind()) {
- default:
- return AsmToken(lexedToken);
+ default: break;
case AsmToken::Error:
SetError(Lexer->getErrLoc(), Lexer->getErr());
- return AsmToken(lexedToken);
- case AsmToken::Identifier:
- {
+ break;
+ case AsmToken::Identifier: {
std::string upperCase = lexedToken.getString().str();
std::string lowerCase = LowercaseString(upperCase);
StringRef lowerRef(lowerCase);
-
+
unsigned regID = MatchRegisterName(lowerRef);
-
- if (regID) {
+ // Check for register aliases.
+ // r13 -> sp
+ // r14 -> lr
+ // r15 -> pc
+ // ip -> r12
+ // FIXME: Some assemblers support lots of others. Do we want them all?
+ if (!regID) {
+ regID = StringSwitch<unsigned>(lowerCase)
+ .Case("r13", ARM::SP)
+ .Case("r14", ARM::LR)
+ .Case("r15", ARM::PC)
+ .Case("ip", ARM::R12)
+ .Default(0);
+ }
+
+ if (regID)
return AsmToken(AsmToken::Register,
lexedToken.getString(),
static_cast<int64_t>(regID));
- } else {
- return AsmToken(lexedToken);
- }
}
}
+
+ return AsmToken(lexedToken);
}
extern "C" void LLVMInitializeARMAsmLexer() {
RegisterAsmLexer<ARMAsmLexer> X(TheARMTarget);
RegisterAsmLexer<ThumbAsmLexer> Y(TheThumbTarget);
}
-
diff --git a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 75e2a73..129af20 100644
--- a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -8,28 +8,28 @@
//===----------------------------------------------------------------------===//
#include "ARM.h"
+#include "ARMAddressingModes.h"
+#include "ARMMCExpr.h"
+#include "ARMBaseRegisterInfo.h"
#include "ARMSubtarget.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/Target/TargetAsmParser.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
using namespace llvm;
-namespace {
-struct ARMOperand;
-
-// The shift types for register controlled shifts in arm memory addressing
+/// Shift types used for register controlled shifts in ARM memory addressing.
enum ShiftType {
Lsl,
Lsr,
@@ -38,24 +38,30 @@ enum ShiftType {
Rrx
};
+namespace {
+
+class ARMOperand;
+
class ARMAsmParser : public TargetAsmParser {
MCAsmParser &Parser;
TargetMachine &TM;
-private:
MCAsmParser &getParser() const { return Parser; }
-
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
-
bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
- bool MaybeParseRegister(OwningPtr<ARMOperand> &Op, bool ParseWriteBack);
+ int TryParseRegister();
+ virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
+ bool TryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
+ bool ParseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
+ bool ParseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
+ bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
+ bool ParsePrefix(ARMMCExpr::VariantKind &RefKind);
+ const MCExpr *ApplyPrefixToExpr(const MCExpr *E,
+ MCSymbolRefExpr::VariantKind Variant);
- bool ParseRegisterList(OwningPtr<ARMOperand> &Op);
-
- bool ParseMemory(OwningPtr<ARMOperand> &Op);
bool ParseMemoryOffsetReg(bool &Negative,
bool &OffsetRegShifted,
@@ -65,70 +71,76 @@ private:
bool &OffsetIsReg,
int &OffsetRegNum,
SMLoc &E);
-
bool ParseShift(enum ShiftType &St, const MCExpr *&ShiftAmount, SMLoc &E);
-
- bool ParseOperand(OwningPtr<ARMOperand> &Op);
-
bool ParseDirectiveWord(unsigned Size, SMLoc L);
-
bool ParseDirectiveThumb(SMLoc L);
-
bool ParseDirectiveThumbFunc(SMLoc L);
-
bool ParseDirectiveCode(SMLoc L);
-
bool ParseDirectiveSyntax(SMLoc L);
- bool MatchInstruction(SMLoc IDLoc,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCInst &Inst) {
- if (!MatchInstructionImpl(Operands, Inst))
- return false;
-
- // FIXME: We should give nicer diagnostics about the exact failure.
- Error(IDLoc, "unrecognized instruction");
-
- return true;
- }
+ bool MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out);
+ void GetMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
+ bool &CanAcceptPredicationCode);
/// @name Auto-generated Match Functions
/// {
- unsigned ComputeAvailableFeatures(const ARMSubtarget *Subtarget) const;
-
- bool MatchInstructionImpl(const SmallVectorImpl<MCParsedAsmOperand*>
- &Operands,
- MCInst &Inst);
+#define GET_ASSEMBLER_HEADER
+#include "ARMGenAsmMatcher.inc"
/// }
+ OperandMatchResultTy tryParseCoprocNumOperand(
+ SmallVectorImpl<MCParsedAsmOperand*>&);
+ OperandMatchResultTy tryParseCoprocRegOperand(
+ SmallVectorImpl<MCParsedAsmOperand*>&);
+ OperandMatchResultTy tryParseMemBarrierOptOperand(
+ SmallVectorImpl<MCParsedAsmOperand*>&);
+ OperandMatchResultTy tryParseProcIFlagsOperand(
+ SmallVectorImpl<MCParsedAsmOperand*>&);
+ OperandMatchResultTy tryParseMSRMaskOperand(
+ SmallVectorImpl<MCParsedAsmOperand*>&);
public:
ARMAsmParser(const Target &T, MCAsmParser &_Parser, TargetMachine &_TM)
- : TargetAsmParser(T), Parser(_Parser), TM(_TM) {}
+ : TargetAsmParser(T), Parser(_Parser), TM(_TM) {
+ // Initialize the set of available features.
+ setAvailableFeatures(ComputeAvailableFeatures(
+ &TM.getSubtarget<ARMSubtarget>()));
+ }
virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
virtual bool ParseDirective(AsmToken DirectiveID);
};
-
+} // end anonymous namespace
+
+namespace {
+
/// ARMOperand - Instances of this class represent a parsed ARM machine
/// instruction.
-struct ARMOperand : public MCParsedAsmOperand {
-private:
- ARMOperand() {}
-public:
+class ARMOperand : public MCParsedAsmOperand {
enum KindTy {
CondCode,
+ CCOut,
+ CoprocNum,
+ CoprocReg,
Immediate,
+ MemBarrierOpt,
Memory,
+ MSRMask,
+ ProcIFlags,
Register,
+ RegisterList,
+ DPRRegisterList,
+ SPRRegisterList,
Token
} Kind;
SMLoc StartLoc, EndLoc;
+ SmallVector<unsigned, 8> Registers;
union {
struct {
@@ -136,40 +148,54 @@ public:
} CC;
struct {
+ ARM_MB::MemBOpt Val;
+ } MBOpt;
+
+ struct {
+ unsigned Val;
+ } Cop;
+
+ struct {
+ ARM_PROC::IFlags Val;
+ } IFlags;
+
+ struct {
+ unsigned Val;
+ } MMask;
+
+ struct {
const char *Data;
unsigned Length;
} Tok;
struct {
unsigned RegNum;
- bool Writeback;
} Reg;
struct {
const MCExpr *Val;
} Imm;
-
- // This is for all forms of ARM address expressions
+
+ /// Combined record for all forms of ARM address expressions.
struct {
unsigned BaseRegNum;
- unsigned OffsetRegNum; // used when OffsetIsReg is true
- const MCExpr *Offset; // used when OffsetIsReg is false
- const MCExpr *ShiftAmount; // used when OffsetRegShifted is true
- enum ShiftType ShiftType; // used when OffsetRegShifted is true
- unsigned
- OffsetRegShifted : 1, // only used when OffsetIsReg is true
- Preindexed : 1,
- Postindexed : 1,
- OffsetIsReg : 1,
- Negative : 1, // only used when OffsetIsReg is true
- Writeback : 1;
+ union {
+ unsigned RegNum; ///< Offset register num, when OffsetIsReg.
+ const MCExpr *Value; ///< Offset value, when !OffsetIsReg.
+ } Offset;
+ const MCExpr *ShiftAmount; // used when OffsetRegShifted is true
+ enum ShiftType ShiftType; // used when OffsetRegShifted is true
+ unsigned OffsetRegShifted : 1; // only used when OffsetIsReg is true
+ unsigned Preindexed : 1;
+ unsigned Postindexed : 1;
+ unsigned OffsetIsReg : 1;
+ unsigned Negative : 1; // only used when OffsetIsReg is true
+ unsigned Writeback : 1;
} Mem;
-
};
-
- //ARMOperand(KindTy K, SMLoc S, SMLoc E)
- // : Kind(K), StartLoc(S), EndLoc(E) {}
-
+
+ ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+public:
ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
Kind = o.Kind;
StartLoc = o.StartLoc;
@@ -181,18 +207,36 @@ public:
case Token:
Tok = o.Tok;
break;
+ case CCOut:
case Register:
Reg = o.Reg;
break;
+ case RegisterList:
+ case DPRRegisterList:
+ case SPRRegisterList:
+ Registers = o.Registers;
+ break;
+ case CoprocNum:
+ case CoprocReg:
+ Cop = o.Cop;
+ break;
case Immediate:
Imm = o.Imm;
break;
+ case MemBarrierOpt:
+ MBOpt = o.MBOpt;
+ break;
case Memory:
Mem = o.Mem;
break;
+ case MSRMask:
+ MMask = o.MMask;
+ break;
+ case ProcIFlags:
+ IFlags = o.IFlags;
}
}
-
+
/// getStartLoc - Get the location of the first token of this operand.
SMLoc getStartLoc() const { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
@@ -203,32 +247,129 @@ public:
return CC.Val;
}
+ unsigned getCoproc() const {
+ assert((Kind == CoprocNum || Kind == CoprocReg) && "Invalid access!");
+ return Cop.Val;
+ }
+
StringRef getToken() const {
assert(Kind == Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
unsigned getReg() const {
- assert(Kind == Register && "Invalid access!");
+ assert((Kind == Register || Kind == CCOut) && "Invalid access!");
return Reg.RegNum;
}
+ const SmallVectorImpl<unsigned> &getRegList() const {
+ assert((Kind == RegisterList || Kind == DPRRegisterList ||
+ Kind == SPRRegisterList) && "Invalid access!");
+ return Registers;
+ }
+
const MCExpr *getImm() const {
assert(Kind == Immediate && "Invalid access!");
return Imm.Val;
}
- bool isCondCode() const { return Kind == CondCode; }
+ ARM_MB::MemBOpt getMemBarrierOpt() const {
+ assert(Kind == MemBarrierOpt && "Invalid access!");
+ return MBOpt.Val;
+ }
- bool isImm() const { return Kind == Immediate; }
+ ARM_PROC::IFlags getProcIFlags() const {
+ assert(Kind == ProcIFlags && "Invalid access!");
+ return IFlags.Val;
+ }
+
+ unsigned getMSRMask() const {
+ assert(Kind == MSRMask && "Invalid access!");
+ return MMask.Val;
+ }
+
+ /// @name Memory Operand Accessors
+ /// @{
+
+ unsigned getMemBaseRegNum() const {
+ return Mem.BaseRegNum;
+ }
+ unsigned getMemOffsetRegNum() const {
+ assert(Mem.OffsetIsReg && "Invalid access!");
+ return Mem.Offset.RegNum;
+ }
+ const MCExpr *getMemOffset() const {
+ assert(!Mem.OffsetIsReg && "Invalid access!");
+ return Mem.Offset.Value;
+ }
+ unsigned getMemOffsetRegShifted() const {
+ assert(Mem.OffsetIsReg && "Invalid access!");
+ return Mem.OffsetRegShifted;
+ }
+ const MCExpr *getMemShiftAmount() const {
+ assert(Mem.OffsetIsReg && Mem.OffsetRegShifted && "Invalid access!");
+ return Mem.ShiftAmount;
+ }
+ enum ShiftType getMemShiftType() const {
+ assert(Mem.OffsetIsReg && Mem.OffsetRegShifted && "Invalid access!");
+ return Mem.ShiftType;
+ }
+ bool getMemPreindexed() const { return Mem.Preindexed; }
+ bool getMemPostindexed() const { return Mem.Postindexed; }
+ bool getMemOffsetIsReg() const { return Mem.OffsetIsReg; }
+ bool getMemNegative() const { return Mem.Negative; }
+ bool getMemWriteback() const { return Mem.Writeback; }
+
+ /// @}
+ bool isCoprocNum() const { return Kind == CoprocNum; }
+ bool isCoprocReg() const { return Kind == CoprocReg; }
+ bool isCondCode() const { return Kind == CondCode; }
+ bool isCCOut() const { return Kind == CCOut; }
+ bool isImm() const { return Kind == Immediate; }
bool isReg() const { return Kind == Register; }
+ bool isRegList() const { return Kind == RegisterList; }
+ bool isDPRRegList() const { return Kind == DPRRegisterList; }
+ bool isSPRRegList() const { return Kind == SPRRegisterList; }
+ bool isToken() const { return Kind == Token; }
+ bool isMemBarrierOpt() const { return Kind == MemBarrierOpt; }
+ bool isMemory() const { return Kind == Memory; }
+ bool isMemMode5() const {
+ if (!isMemory() || getMemOffsetIsReg() || getMemWriteback() ||
+ getMemNegative())
+ return false;
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemOffset());
+ if (!CE) return false;
+
+ // The offset must be a multiple of 4 in the range 0-1020.
+ int64_t Value = CE->getValue();
+ return ((Value & 0x3) == 0 && Value <= 1020 && Value >= -1020);
+ }
+ bool isMemModeRegThumb() const {
+ if (!isMemory() || !getMemOffsetIsReg() || getMemWriteback())
+ return false;
+ return true;
+ }
+ bool isMemModeImmThumb() const {
+ if (!isMemory() || getMemOffsetIsReg() || getMemWriteback())
+ return false;
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemOffset());
+ if (!CE) return false;
- bool isToken() const {return Kind == Token; }
+ // The offset must be a multiple of 4 in the range 0-124.
+ uint64_t Value = CE->getValue();
+ return ((Value & 0x3) == 0 && Value <= 124);
+ }
+ bool isMSRMask() const { return Kind == MSRMask; }
+ bool isProcIFlags() const { return Kind == ProcIFlags; }
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
- // Add as immediates when possible.
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ // Add as immediates when possible. Null MCExpr = 0.
+ if (Expr == 0)
+ Inst.addOperand(MCOperand::CreateImm(0));
+ else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
else
Inst.addOperand(MCOperand::CreateExpr(Expr));
@@ -237,8 +378,23 @@ public:
void addCondCodeOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
- // FIXME: What belongs here?
- Inst.addOperand(MCOperand::CreateReg(0));
+ unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
+ Inst.addOperand(MCOperand::CreateReg(RegNum));
+ }
+
+ void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getCoproc()));
+ }
+
+ void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getCoproc()));
+ }
+
+ void addCCOutOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
}
void addRegOperands(MCInst &Inst, unsigned N) const {
@@ -246,66 +402,181 @@ public:
Inst.addOperand(MCOperand::CreateReg(getReg()));
}
+ void addRegListOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const SmallVectorImpl<unsigned> &RegList = getRegList();
+ for (SmallVectorImpl<unsigned>::const_iterator
+ I = RegList.begin(), E = RegList.end(); I != E; ++I)
+ Inst.addOperand(MCOperand::CreateReg(*I));
+ }
+
+ void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
+ addRegListOperands(Inst, N);
+ }
+
+ void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
+ addRegListOperands(Inst, N);
+ }
+
void addImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
addExpr(Inst, getImm());
}
+ void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
+ }
+
+ void addMemMode5Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && isMemMode5() && "Invalid number of operands!");
+
+ Inst.addOperand(MCOperand::CreateReg(getMemBaseRegNum()));
+ assert(!getMemOffsetIsReg() && "Invalid mode 5 operand");
+
+ // FIXME: #-0 is encoded differently than #0. Does the parser preserve
+ // the difference?
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemOffset());
+ assert(CE && "Non-constant mode 5 offset operand!");
+
+ // The MCInst offset operand doesn't include the low two bits (like
+ // the instruction encoding).
+ int64_t Offset = CE->getValue() / 4;
+ if (Offset >= 0)
+ Inst.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(ARM_AM::add,
+ Offset)));
+ else
+ Inst.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(ARM_AM::sub,
+ -Offset)));
+ }
+
+ void addMemModeRegThumbOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && isMemModeRegThumb() && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getMemBaseRegNum()));
+ Inst.addOperand(MCOperand::CreateReg(getMemOffsetRegNum()));
+ }
+
+ void addMemModeImmThumbOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && isMemModeImmThumb() && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getMemBaseRegNum()));
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemOffset());
+ assert(CE && "Non-constant mode offset operand!");
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ }
+
+ void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
+ }
+
+ void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
+ }
+
virtual void dump(raw_ostream &OS) const;
- static void CreateCondCode(OwningPtr<ARMOperand> &Op, ARMCC::CondCodes CC,
- SMLoc S) {
- Op.reset(new ARMOperand);
- Op->Kind = CondCode;
+ static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
+ ARMOperand *Op = new ARMOperand(CondCode);
Op->CC.Val = CC;
Op->StartLoc = S;
Op->EndLoc = S;
+ return Op;
+ }
+
+ static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
+ ARMOperand *Op = new ARMOperand(CoprocNum);
+ Op->Cop.Val = CopVal;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
- static void CreateToken(OwningPtr<ARMOperand> &Op, StringRef Str,
- SMLoc S) {
- Op.reset(new ARMOperand);
- Op->Kind = Token;
+ static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
+ ARMOperand *Op = new ARMOperand(CoprocReg);
+ Op->Cop.Val = CopVal;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
+
+ static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
+ ARMOperand *Op = new ARMOperand(CCOut);
+ Op->Reg.RegNum = RegNum;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
+
+ static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
+ ARMOperand *Op = new ARMOperand(Token);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->StartLoc = S;
Op->EndLoc = S;
+ return Op;
}
- static void CreateReg(OwningPtr<ARMOperand> &Op, unsigned RegNum,
- bool Writeback, SMLoc S, SMLoc E) {
- Op.reset(new ARMOperand);
- Op->Kind = Register;
+ static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
+ ARMOperand *Op = new ARMOperand(Register);
Op->Reg.RegNum = RegNum;
- Op->Reg.Writeback = Writeback;
-
Op->StartLoc = S;
Op->EndLoc = E;
+ return Op;
}
- static void CreateImm(OwningPtr<ARMOperand> &Op, const MCExpr *Val,
- SMLoc S, SMLoc E) {
- Op.reset(new ARMOperand);
- Op->Kind = Immediate;
+ static ARMOperand *
+ CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
+ SMLoc StartLoc, SMLoc EndLoc) {
+ KindTy Kind = RegisterList;
+
+ if (ARM::DPRRegClass.contains(Regs.front().first))
+ Kind = DPRRegisterList;
+ else if (ARM::SPRRegClass.contains(Regs.front().first))
+ Kind = SPRRegisterList;
+
+ ARMOperand *Op = new ARMOperand(Kind);
+ for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
+ I = Regs.begin(), E = Regs.end(); I != E; ++I)
+ Op->Registers.push_back(I->first);
+ array_pod_sort(Op->Registers.begin(), Op->Registers.end());
+ Op->StartLoc = StartLoc;
+ Op->EndLoc = EndLoc;
+ return Op;
+ }
+
+ static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
+ ARMOperand *Op = new ARMOperand(Immediate);
Op->Imm.Val = Val;
-
Op->StartLoc = S;
Op->EndLoc = E;
+ return Op;
}
- static void CreateMem(OwningPtr<ARMOperand> &Op,
- unsigned BaseRegNum, bool OffsetIsReg,
- const MCExpr *Offset, unsigned OffsetRegNum,
- bool OffsetRegShifted, enum ShiftType ShiftType,
- const MCExpr *ShiftAmount, bool Preindexed,
- bool Postindexed, bool Negative, bool Writeback,
- SMLoc S, SMLoc E) {
- Op.reset(new ARMOperand);
- Op->Kind = Memory;
+ static ARMOperand *CreateMem(unsigned BaseRegNum, bool OffsetIsReg,
+ const MCExpr *Offset, int OffsetRegNum,
+ bool OffsetRegShifted, enum ShiftType ShiftType,
+ const MCExpr *ShiftAmount, bool Preindexed,
+ bool Postindexed, bool Negative, bool Writeback,
+ SMLoc S, SMLoc E) {
+ assert((OffsetRegNum == -1 || OffsetIsReg) &&
+ "OffsetRegNum must imply OffsetIsReg!");
+ assert((!OffsetRegShifted || OffsetIsReg) &&
+ "OffsetRegShifted must imply OffsetIsReg!");
+ assert((Offset || OffsetIsReg) &&
+ "Offset must exists unless register offset is used!");
+ assert((!ShiftAmount || (OffsetIsReg && OffsetRegShifted)) &&
+ "Cannot have shift amount without shifted register offset!");
+ assert((!Offset || !OffsetIsReg) &&
+ "Cannot have expression offset and register offset!");
+
+ ARMOperand *Op = new ARMOperand(Memory);
Op->Mem.BaseRegNum = BaseRegNum;
Op->Mem.OffsetIsReg = OffsetIsReg;
- Op->Mem.Offset = Offset;
- Op->Mem.OffsetRegNum = OffsetRegNum;
+ if (OffsetIsReg)
+ Op->Mem.Offset.RegNum = OffsetRegNum;
+ else
+ Op->Mem.Offset.Value = Offset;
Op->Mem.OffsetRegShifted = OffsetRegShifted;
Op->Mem.ShiftType = ShiftType;
Op->Mem.ShiftAmount = ShiftAmount;
@@ -313,9 +584,34 @@ public:
Op->Mem.Postindexed = Postindexed;
Op->Mem.Negative = Negative;
Op->Mem.Writeback = Writeback;
-
+
Op->StartLoc = S;
Op->EndLoc = E;
+ return Op;
+ }
+
+ static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
+ ARMOperand *Op = new ARMOperand(MemBarrierOpt);
+ Op->MBOpt.Val = Opt;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
+
+ static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
+ ARMOperand *Op = new ARMOperand(ProcIFlags);
+ Op->IFlags.Val = IFlags;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
+
+ static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
+ ARMOperand *Op = new ARMOperand(MSRMask);
+ Op->MMask.Val = MMask;
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
}
};
@@ -324,17 +620,77 @@ public:
void ARMOperand::dump(raw_ostream &OS) const {
switch (Kind) {
case CondCode:
- OS << ARMCondCodeToString(getCondCode());
+ OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
+ break;
+ case CCOut:
+ OS << "<ccout " << getReg() << ">";
+ break;
+ case CoprocNum:
+ OS << "<coprocessor number: " << getCoproc() << ">";
+ break;
+ case CoprocReg:
+ OS << "<coprocessor register: " << getCoproc() << ">";
+ break;
+ case MSRMask:
+ OS << "<mask: " << getMSRMask() << ">";
break;
case Immediate:
getImm()->print(OS);
break;
+ case MemBarrierOpt:
+ OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
+ break;
case Memory:
- OS << "<memory>";
+ OS << "<memory "
+ << "base:" << getMemBaseRegNum();
+ if (getMemOffsetIsReg()) {
+ OS << " offset:<register " << getMemOffsetRegNum();
+ if (getMemOffsetRegShifted()) {
+ OS << " offset-shift-type:" << getMemShiftType();
+ OS << " offset-shift-amount:" << *getMemShiftAmount();
+ }
+ } else {
+ OS << " offset:" << *getMemOffset();
+ }
+ if (getMemOffsetIsReg())
+ OS << " (offset-is-reg)";
+ if (getMemPreindexed())
+ OS << " (pre-indexed)";
+ if (getMemPostindexed())
+ OS << " (post-indexed)";
+ if (getMemNegative())
+ OS << " (negative)";
+ if (getMemWriteback())
+ OS << " (writeback)";
+ OS << ">";
+ break;
+ case ProcIFlags: {
+ OS << "<ARM_PROC::";
+ unsigned IFlags = getProcIFlags();
+ for (int i=2; i >= 0; --i)
+ if (IFlags & (1 << i))
+ OS << ARM_PROC::IFlagsToString(1 << i);
+ OS << ">";
break;
+ }
case Register:
OS << "<register " << getReg() << ">";
break;
+ case RegisterList:
+ case DPRRegisterList:
+ case SPRRegisterList: {
+ OS << "<register_list ";
+
+ const SmallVectorImpl<unsigned> &RegList = getRegList();
+ for (SmallVectorImpl<unsigned>::const_iterator
+ I = RegList.begin(), E = RegList.end(); I != E; ) {
+ OS << *I;
+ if (++I < E) OS << ", ";
+ }
+
+ OS << ">";
+ break;
+ }
case Token:
OS << "'" << getToken() << "'";
break;
@@ -348,184 +704,456 @@ static unsigned MatchRegisterName(StringRef Name);
/// }
+bool ARMAsmParser::ParseRegister(unsigned &RegNo,
+ SMLoc &StartLoc, SMLoc &EndLoc) {
+ RegNo = TryParseRegister();
+
+ return (RegNo == (unsigned)-1);
+}
+
/// Try to parse a register name. The token must be an Identifier when called,
-/// and if it is a register name a Reg operand is created, the token is eaten
-/// and false is returned. Else true is returned and no token is eaten.
-/// TODO this is likely to change to allow different register types and or to
-/// parse for a specific register type.
-bool ARMAsmParser::MaybeParseRegister
- (OwningPtr<ARMOperand> &Op, bool ParseWriteBack) {
- SMLoc S, E;
+/// and if it is a register name the token is eaten and the register number is
+/// returned. Otherwise return -1.
+///
+int ARMAsmParser::TryParseRegister() {
const AsmToken &Tok = Parser.getTok();
assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
// FIXME: Validate register for the current architecture; we have to do
// validation later, so maybe there is no need for this here.
- int RegNum;
+ std::string upperCase = Tok.getString().str();
+ std::string lowerCase = LowercaseString(upperCase);
+ unsigned RegNum = MatchRegisterName(lowerCase);
+ if (!RegNum) {
+ RegNum = StringSwitch<unsigned>(lowerCase)
+ .Case("r13", ARM::SP)
+ .Case("r14", ARM::LR)
+ .Case("r15", ARM::PC)
+ .Case("ip", ARM::R12)
+ .Default(0);
+ }
+ if (!RegNum) return -1;
- RegNum = MatchRegisterName(Tok.getString());
- if (RegNum == -1)
- return true;
-
- S = Tok.getLoc();
-
Parser.Lex(); // Eat identifier token.
-
- E = Parser.getTok().getLoc();
+ return RegNum;
+}
- bool Writeback = false;
- if (ParseWriteBack) {
- const AsmToken &ExclaimTok = Parser.getTok();
- if (ExclaimTok.is(AsmToken::Exclaim)) {
- E = ExclaimTok.getLoc();
- Writeback = true;
- Parser.Lex(); // Eat exclaim token
+/// Try to parse a register name. The token must be an Identifier when called.
+/// If it's a register, an AsmOperand is created. Another AsmOperand is created
+/// if there is a "writeback". 'true' if it's not a register.
+///
+/// TODO this is likely to change to allow different register types and or to
+/// parse for a specific register type.
+bool ARMAsmParser::
+TryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ SMLoc S = Parser.getTok().getLoc();
+ int RegNo = TryParseRegister();
+ if (RegNo == -1)
+ return true;
+
+ Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
+
+ const AsmToken &ExclaimTok = Parser.getTok();
+ if (ExclaimTok.is(AsmToken::Exclaim)) {
+ Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
+ ExclaimTok.getLoc()));
+ Parser.Lex(); // Eat exclaim token
+ }
+
+ return false;
+}
+
+/// MatchCoprocessorOperandName - Try to parse an coprocessor related
+/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
+/// "c5", ...
+static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
+ // Use the same layout as the tablegen'erated register name matcher. Ugly,
+ // but efficient.
+ switch (Name.size()) {
+ default: break;
+ case 2:
+ if (Name[0] != CoprocOp)
+ return -1;
+ switch (Name[1]) {
+ default: return -1;
+ case '0': return 0;
+ case '1': return 1;
+ case '2': return 2;
+ case '3': return 3;
+ case '4': return 4;
+ case '5': return 5;
+ case '6': return 6;
+ case '7': return 7;
+ case '8': return 8;
+ case '9': return 9;
+ }
+ break;
+ case 3:
+ if (Name[0] != CoprocOp || Name[1] != '1')
+ return -1;
+ switch (Name[2]) {
+ default: return -1;
+ case '0': return 10;
+ case '1': return 11;
+ case '2': return 12;
+ case '3': return 13;
+ case '4': return 14;
+ case '5': return 15;
}
+ break;
}
- ARMOperand::CreateReg(Op, RegNum, Writeback, S, E);
+ return -1;
+}
- return false;
+/// tryParseCoprocNumOperand - Try to parse an coprocessor number operand. The
+/// token must be an Identifier when called, and if it is a coprocessor
+/// number, the token is eaten and the operand is added to the operand list.
+ARMAsmParser::OperandMatchResultTy ARMAsmParser::
+tryParseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ SMLoc S = Parser.getTok().getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+
+ int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
+ if (Num == -1)
+ return MatchOperand_NoMatch;
+
+ Parser.Lex(); // Eat identifier token.
+ Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
+ return MatchOperand_Success;
}
-/// Parse a register list, return false if successful else return true or an
-/// error. The first token must be a '{' when called.
-bool ARMAsmParser::ParseRegisterList(OwningPtr<ARMOperand> &Op) {
- SMLoc S, E;
- assert(Parser.getTok().is(AsmToken::LCurly) &&
- "Token is not an Left Curly Brace");
- S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat left curly brace token.
-
- const AsmToken &RegTok = Parser.getTok();
- SMLoc RegLoc = RegTok.getLoc();
- if (RegTok.isNot(AsmToken::Identifier))
- return Error(RegLoc, "register expected");
- int RegNum = MatchRegisterName(RegTok.getString());
- if (RegNum == -1)
- return Error(RegLoc, "register expected");
+/// tryParseCoprocRegOperand - Try to parse an coprocessor register operand. The
+/// token must be an Identifier when called, and if it is a coprocessor
+/// number, the token is eaten and the operand is added to the operand list.
+ARMAsmParser::OperandMatchResultTy ARMAsmParser::
+tryParseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ SMLoc S = Parser.getTok().getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+
+ int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
+ if (Reg == -1)
+ return MatchOperand_NoMatch;
+
Parser.Lex(); // Eat identifier token.
- unsigned RegList = 1 << RegNum;
+ Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
+ return MatchOperand_Success;
+}
- int HighRegNum = RegNum;
- // TODO ranges like "{Rn-Rm}"
- while (Parser.getTok().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat comma token.
+/// Parse a register list, return it if successful else return null. The first
+/// token must be a '{' when called.
+bool ARMAsmParser::
+ParseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ assert(Parser.getTok().is(AsmToken::LCurly) &&
+ "Token is not a Left Curly Brace");
+ SMLoc S = Parser.getTok().getLoc();
+
+ // Read the rest of the registers in the list.
+ unsigned PrevRegNum = 0;
+ SmallVector<std::pair<unsigned, SMLoc>, 32> Registers;
+
+ do {
+ bool IsRange = Parser.getTok().is(AsmToken::Minus);
+ Parser.Lex(); // Eat non-identifier token.
const AsmToken &RegTok = Parser.getTok();
SMLoc RegLoc = RegTok.getLoc();
- if (RegTok.isNot(AsmToken::Identifier))
- return Error(RegLoc, "register expected");
- int RegNum = MatchRegisterName(RegTok.getString());
- if (RegNum == -1)
- return Error(RegLoc, "register expected");
+ if (RegTok.isNot(AsmToken::Identifier)) {
+ Error(RegLoc, "register expected");
+ return true;
+ }
- if (RegList & (1 << RegNum))
- Warning(RegLoc, "register duplicated in register list");
- else if (RegNum <= HighRegNum)
- Warning(RegLoc, "register not in ascending order in register list");
- RegList |= 1 << RegNum;
- HighRegNum = RegNum;
+ int RegNum = TryParseRegister();
+ if (RegNum == -1) {
+ Error(RegLoc, "register expected");
+ return true;
+ }
- Parser.Lex(); // Eat identifier token.
- }
+ if (IsRange) {
+ int Reg = PrevRegNum;
+ do {
+ ++Reg;
+ Registers.push_back(std::make_pair(Reg, RegLoc));
+ } while (Reg != RegNum);
+ } else {
+ Registers.push_back(std::make_pair(RegNum, RegLoc));
+ }
+
+ PrevRegNum = RegNum;
+ } while (Parser.getTok().is(AsmToken::Comma) ||
+ Parser.getTok().is(AsmToken::Minus));
+
+ // Process the right curly brace of the list.
const AsmToken &RCurlyTok = Parser.getTok();
- if (RCurlyTok.isNot(AsmToken::RCurly))
- return Error(RCurlyTok.getLoc(), "'}' expected");
- E = RCurlyTok.getLoc();
- Parser.Lex(); // Eat left curly brace token.
+ if (RCurlyTok.isNot(AsmToken::RCurly)) {
+ Error(RCurlyTok.getLoc(), "'}' expected");
+ return true;
+ }
+
+ SMLoc E = RCurlyTok.getLoc();
+ Parser.Lex(); // Eat right curly brace token.
+
+ // Verify the register list.
+ SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
+ RI = Registers.begin(), RE = Registers.end();
+
+ unsigned HighRegNum = getARMRegisterNumbering(RI->first);
+ bool EmittedWarning = false;
+
+ DenseMap<unsigned, bool> RegMap;
+ RegMap[HighRegNum] = true;
+ for (++RI; RI != RE; ++RI) {
+ const std::pair<unsigned, SMLoc> &RegInfo = *RI;
+ unsigned Reg = getARMRegisterNumbering(RegInfo.first);
+
+ if (RegMap[Reg]) {
+ Error(RegInfo.second, "register duplicated in register list");
+ return true;
+ }
+
+ if (!EmittedWarning && Reg < HighRegNum)
+ Warning(RegInfo.second,
+ "register not in ascending order in register list");
+
+ RegMap[Reg] = true;
+ HighRegNum = std::max(Reg, HighRegNum);
+ }
+
+ Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
return false;
}
-/// Parse an arm memory expression, return false if successful else return true
+/// tryParseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
+ARMAsmParser::OperandMatchResultTy ARMAsmParser::
+tryParseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ SMLoc S = Parser.getTok().getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+ StringRef OptStr = Tok.getString();
+
+ unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
+ .Case("sy", ARM_MB::SY)
+ .Case("st", ARM_MB::ST)
+ .Case("ish", ARM_MB::ISH)
+ .Case("ishst", ARM_MB::ISHST)
+ .Case("nsh", ARM_MB::NSH)
+ .Case("nshst", ARM_MB::NSHST)
+ .Case("osh", ARM_MB::OSH)
+ .Case("oshst", ARM_MB::OSHST)
+ .Default(~0U);
+
+ if (Opt == ~0U)
+ return MatchOperand_NoMatch;
+
+ Parser.Lex(); // Eat identifier token.
+ Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
+ return MatchOperand_Success;
+}
+
+/// tryParseProcIFlagsOperand - Try to parse iflags from CPS instruction.
+ARMAsmParser::OperandMatchResultTy ARMAsmParser::
+tryParseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ SMLoc S = Parser.getTok().getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+ StringRef IFlagsStr = Tok.getString();
+
+ unsigned IFlags = 0;
+ for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
+ unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
+ .Case("a", ARM_PROC::A)
+ .Case("i", ARM_PROC::I)
+ .Case("f", ARM_PROC::F)
+ .Default(~0U);
+
+ // If some specific iflag is already set, it means that some letter is
+ // present more than once, this is not acceptable.
+ if (Flag == ~0U || (IFlags & Flag))
+ return MatchOperand_NoMatch;
+
+ IFlags |= Flag;
+ }
+
+ Parser.Lex(); // Eat identifier token.
+ Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
+ return MatchOperand_Success;
+}
+
+/// tryParseMSRMaskOperand - Try to parse mask flags from MSR instruction.
+ARMAsmParser::OperandMatchResultTy ARMAsmParser::
+tryParseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ SMLoc S = Parser.getTok().getLoc();
+ const AsmToken &Tok = Parser.getTok();
+ assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+ StringRef Mask = Tok.getString();
+
+ // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
+ size_t Start = 0, Next = Mask.find('_');
+ StringRef Flags = "";
+ StringRef SpecReg = Mask.slice(Start, Next);
+ if (Next != StringRef::npos)
+ Flags = Mask.slice(Next+1, Mask.size());
+
+ // FlagsVal contains the complete mask:
+ // 3-0: Mask
+ // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
+ unsigned FlagsVal = 0;
+
+ if (SpecReg == "apsr") {
+ FlagsVal = StringSwitch<unsigned>(Flags)
+ .Case("nzcvq", 0x8) // same as CPSR_c
+ .Case("g", 0x4) // same as CPSR_s
+ .Case("nzcvqg", 0xc) // same as CPSR_fs
+ .Default(~0U);
+
+ if (FlagsVal == ~0U) {
+ if (!Flags.empty())
+ return MatchOperand_NoMatch;
+ else
+ FlagsVal = 0; // No flag
+ }
+ } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
+ for (int i = 0, e = Flags.size(); i != e; ++i) {
+ unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
+ .Case("c", 1)
+ .Case("x", 2)
+ .Case("s", 4)
+ .Case("f", 8)
+ .Default(~0U);
+
+ // If some specific flag is already set, it means that some letter is
+ // present more than once, this is not acceptable.
+ if (FlagsVal == ~0U || (FlagsVal & Flag))
+ return MatchOperand_NoMatch;
+ FlagsVal |= Flag;
+ }
+ } else // No match for special register.
+ return MatchOperand_NoMatch;
+
+ // Special register without flags are equivalent to "fc" flags.
+ if (!FlagsVal)
+ FlagsVal = 0x9;
+
+ // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
+ if (SpecReg == "spsr")
+ FlagsVal |= 16;
+
+ Parser.Lex(); // Eat identifier token.
+ Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
+ return MatchOperand_Success;
+}
+
+/// Parse an ARM memory expression, return false if successful else return true
/// or an error. The first token must be a '[' when called.
+///
/// TODO Only preindexing and postindexing addressing are started, unindexed
/// with option, etc are still to do.
-bool ARMAsmParser::ParseMemory(OwningPtr<ARMOperand> &Op) {
+bool ARMAsmParser::
+ParseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
SMLoc S, E;
assert(Parser.getTok().is(AsmToken::LBrac) &&
- "Token is not an Left Bracket");
+ "Token is not a Left Bracket");
S = Parser.getTok().getLoc();
Parser.Lex(); // Eat left bracket token.
const AsmToken &BaseRegTok = Parser.getTok();
- if (BaseRegTok.isNot(AsmToken::Identifier))
- return Error(BaseRegTok.getLoc(), "register expected");
- if (MaybeParseRegister(Op, false))
- return Error(BaseRegTok.getLoc(), "register expected");
- int BaseRegNum = Op->getReg();
+ if (BaseRegTok.isNot(AsmToken::Identifier)) {
+ Error(BaseRegTok.getLoc(), "register expected");
+ return true;
+ }
+ int BaseRegNum = TryParseRegister();
+ if (BaseRegNum == -1) {
+ Error(BaseRegTok.getLoc(), "register expected");
+ return true;
+ }
+
+ // The next token must either be a comma or a closing bracket.
+ const AsmToken &Tok = Parser.getTok();
+ if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
+ return true;
bool Preindexed = false;
bool Postindexed = false;
bool OffsetIsReg = false;
bool Negative = false;
bool Writeback = false;
+ ARMOperand *WBOp = 0;
+ int OffsetRegNum = -1;
+ bool OffsetRegShifted = false;
+ enum ShiftType ShiftType = Lsl;
+ const MCExpr *ShiftAmount = 0;
+ const MCExpr *Offset = 0;
// First look for preindexed address forms, that is after the "[Rn" we now
// have to see if the next token is a comma.
- const AsmToken &Tok = Parser.getTok();
if (Tok.is(AsmToken::Comma)) {
Preindexed = true;
Parser.Lex(); // Eat comma token.
- int OffsetRegNum;
- bool OffsetRegShifted;
- enum ShiftType ShiftType;
- const MCExpr *ShiftAmount;
- const MCExpr *Offset;
- if(ParseMemoryOffsetReg(Negative, OffsetRegShifted, ShiftType, ShiftAmount,
- Offset, OffsetIsReg, OffsetRegNum, E))
+
+ if (ParseMemoryOffsetReg(Negative, OffsetRegShifted, ShiftType, ShiftAmount,
+ Offset, OffsetIsReg, OffsetRegNum, E))
return true;
const AsmToken &RBracTok = Parser.getTok();
- if (RBracTok.isNot(AsmToken::RBrac))
- return Error(RBracTok.getLoc(), "']' expected");
+ if (RBracTok.isNot(AsmToken::RBrac)) {
+ Error(RBracTok.getLoc(), "']' expected");
+ return true;
+ }
E = RBracTok.getLoc();
Parser.Lex(); // Eat right bracket token.
const AsmToken &ExclaimTok = Parser.getTok();
if (ExclaimTok.is(AsmToken::Exclaim)) {
- E = ExclaimTok.getLoc();
+ WBOp = ARMOperand::CreateToken(ExclaimTok.getString(),
+ ExclaimTok.getLoc());
Writeback = true;
Parser.Lex(); // Eat exclaim token
}
- ARMOperand::CreateMem(Op, BaseRegNum, OffsetIsReg, Offset, OffsetRegNum,
- OffsetRegShifted, ShiftType, ShiftAmount,
- Preindexed, Postindexed, Negative, Writeback, S, E);
- return false;
- }
- // The "[Rn" we have so far was not followed by a comma.
- else if (Tok.is(AsmToken::RBrac)) {
- // This is a post indexing addressing forms, that is a ']' follows after
- // the "[Rn".
- Postindexed = true;
- Writeback = true;
+ } else {
+ // The "[Rn" we have so far was not followed by a comma.
+
+ // If there's anything other than the right brace, this is a post indexing
+ // addressing form.
E = Tok.getLoc();
Parser.Lex(); // Eat right bracket token.
- int OffsetRegNum = 0;
- bool OffsetRegShifted = false;
- enum ShiftType ShiftType;
- const MCExpr *ShiftAmount;
- const MCExpr *Offset;
-
const AsmToken &NextTok = Parser.getTok();
+
if (NextTok.isNot(AsmToken::EndOfStatement)) {
- if (NextTok.isNot(AsmToken::Comma))
- return Error(NextTok.getLoc(), "',' expected");
+ Postindexed = true;
+ Writeback = true;
+
+ if (NextTok.isNot(AsmToken::Comma)) {
+ Error(NextTok.getLoc(), "',' expected");
+ return true;
+ }
+
Parser.Lex(); // Eat comma token.
- if(ParseMemoryOffsetReg(Negative, OffsetRegShifted, ShiftType,
- ShiftAmount, Offset, OffsetIsReg, OffsetRegNum,
- E))
+
+ if (ParseMemoryOffsetReg(Negative, OffsetRegShifted, ShiftType,
+ ShiftAmount, Offset, OffsetIsReg, OffsetRegNum,
+ E))
return true;
}
+ }
- ARMOperand::CreateMem(Op, BaseRegNum, OffsetIsReg, Offset, OffsetRegNum,
- OffsetRegShifted, ShiftType, ShiftAmount,
- Preindexed, Postindexed, Negative, Writeback, S, E);
- return false;
+ // Force Offset to exist if used.
+ if (!OffsetIsReg) {
+ if (!Offset)
+ Offset = MCConstantExpr::Create(0, getContext());
}
- return true;
+ Operands.push_back(ARMOperand::CreateMem(BaseRegNum, OffsetIsReg, Offset,
+ OffsetRegNum, OffsetRegShifted,
+ ShiftType, ShiftAmount, Preindexed,
+ Postindexed, Negative, Writeback,
+ S, E));
+ if (WBOp)
+ Operands.push_back(WBOp);
+
+ return false;
}
/// Parse the offset of a memory operand after we have seen "[Rn," or "[Rn],"
@@ -543,7 +1171,6 @@ bool ARMAsmParser::ParseMemoryOffsetReg(bool &Negative,
bool &OffsetIsReg,
int &OffsetRegNum,
SMLoc &E) {
- OwningPtr<ARMOperand> Op;
Negative = false;
OffsetRegShifted = false;
OffsetIsReg = false;
@@ -559,13 +1186,15 @@ bool ARMAsmParser::ParseMemoryOffsetReg(bool &Negative,
// See if there is a register following the "[Rn," or "[Rn]," we have so far.
const AsmToken &OffsetRegTok = Parser.getTok();
if (OffsetRegTok.is(AsmToken::Identifier)) {
- OffsetIsReg = !MaybeParseRegister(Op, false);
- if (OffsetIsReg) {
- E = Op->getEndLoc();
- OffsetRegNum = Op->getReg();
+ SMLoc CurLoc = OffsetRegTok.getLoc();
+ OffsetRegNum = TryParseRegister();
+ if (OffsetRegNum != -1) {
+ OffsetIsReg = true;
+ E = CurLoc;
}
}
- // If we parsed a register as the offset then their can be a shift after that
+
+ // If we parsed a register as the offset then there can be a shift after that.
if (OffsetRegNum != -1) {
// Look for a comma then a shift
const AsmToken &Tok = Parser.getTok();
@@ -583,7 +1212,7 @@ bool ARMAsmParser::ParseMemoryOffsetReg(bool &Negative,
const AsmToken &HashTok = Parser.getTok();
if (HashTok.isNot(AsmToken::Hash))
return Error(HashTok.getLoc(), "'#' expected");
-
+
Parser.Lex(); // Eat hash token.
if (getParser().ParseExpression(Offset))
@@ -597,8 +1226,7 @@ bool ARMAsmParser::ParseMemoryOffsetReg(bool &Negative,
/// ( lsl | lsr | asr | ror ) , # shift_amount
/// rrx
/// and returns true if it parses a shift otherwise it returns false.
-bool ARMAsmParser::ParseShift(ShiftType &St,
- const MCExpr *&ShiftAmount,
+bool ARMAsmParser::ParseShift(ShiftType &St, const MCExpr *&ShiftAmount,
SMLoc &E) {
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
@@ -636,13 +1264,33 @@ bool ARMAsmParser::ParseShift(ShiftType &St,
/// Parse a arm instruction operand. For now this parses the operand regardless
/// of the mnemonic.
-bool ARMAsmParser::ParseOperand(OwningPtr<ARMOperand> &Op) {
+bool ARMAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ StringRef Mnemonic) {
SMLoc S, E;
-
+
+ // Check if the current operand has a custom associated parser, if so, try to
+ // custom parse the operand, or fallback to the general approach.
+ OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
+ if (ResTy == MatchOperand_Success)
+ return false;
+ // If there wasn't a custom match, try the generic matcher below. Otherwise,
+ // there was a match, but an error occurred, in which case, just return that
+ // the operand parsing failed.
+ if (ResTy == MatchOperand_ParseFail)
+ return true;
+
switch (getLexer().getKind()) {
+ default:
+ Error(Parser.getTok().getLoc(), "unexpected token in operand");
+ return true;
case AsmToken::Identifier:
- if (!MaybeParseRegister(Op, true))
+ if (!TryParseRegisterWithWriteBack(Operands))
return false;
+
+ // Fall though for the Identifier case that is not a register or a
+ // special name.
+ case AsmToken::Integer: // things like 1f and 2b as a branch targets
+ case AsmToken::Dot: { // . as a branch target
// This was not a register so parse other operands that start with an
// identifier (like labels) as expressions and create them as immediates.
const MCExpr *IdVal;
@@ -650,12 +1298,13 @@ bool ARMAsmParser::ParseOperand(OwningPtr<ARMOperand> &Op) {
if (getParser().ParseExpression(IdVal))
return true;
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- ARMOperand::CreateImm(Op, IdVal, S, E);
+ Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
return false;
+ }
case AsmToken::LBrac:
- return ParseMemory(Op);
+ return ParseMemory(Operands);
case AsmToken::LCurly:
- return ParseRegisterList(Op);
+ return ParseRegisterList(Operands);
case AsmToken::Hash:
// #42 -> immediate.
// TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
@@ -665,28 +1314,134 @@ bool ARMAsmParser::ParseOperand(OwningPtr<ARMOperand> &Op) {
if (getParser().ParseExpression(ImmVal))
return true;
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- ARMOperand::CreateImm(Op, ImmVal, S, E);
+ Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
return false;
- default:
- return Error(Parser.getTok().getLoc(), "unexpected token in operand");
+ case AsmToken::Colon: {
+ // ":lower16:" and ":upper16:" expression prefixes
+ // FIXME: Check it's an expression prefix,
+ // e.g. (FOO - :lower16:BAR) isn't legal.
+ ARMMCExpr::VariantKind RefKind;
+ if (ParsePrefix(RefKind))
+ return true;
+
+ const MCExpr *SubExprVal;
+ if (getParser().ParseExpression(SubExprVal))
+ return true;
+
+ const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
+ getContext());
+ E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
+ return false;
+ }
}
}
-/// Parse an arm instruction mnemonic followed by its operands.
-bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- OwningPtr<ARMOperand> Op;
+// ParsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
+// :lower16: and :upper16:.
+bool ARMAsmParser::ParsePrefix(ARMMCExpr::VariantKind &RefKind) {
+ RefKind = ARMMCExpr::VK_ARM_None;
- // Create the leading tokens for the mnemonic, split by '.' characters.
- size_t Start = 0, Next = Name.find('.');
- StringRef Head = Name.slice(Start, Next);
+ // :lower16: and :upper16: modifiers
+ assert(getLexer().is(AsmToken::Colon) && "expected a :");
+ Parser.Lex(); // Eat ':'
+
+ if (getLexer().isNot(AsmToken::Identifier)) {
+ Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
+ return true;
+ }
+
+ StringRef IDVal = Parser.getTok().getIdentifier();
+ if (IDVal == "lower16") {
+ RefKind = ARMMCExpr::VK_ARM_LO16;
+ } else if (IDVal == "upper16") {
+ RefKind = ARMMCExpr::VK_ARM_HI16;
+ } else {
+ Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
+ return true;
+ }
+ Parser.Lex();
+
+ if (getLexer().isNot(AsmToken::Colon)) {
+ Error(Parser.getTok().getLoc(), "unexpected token after prefix");
+ return true;
+ }
+ Parser.Lex(); // Eat the last ':'
+ return false;
+}
+
+const MCExpr *
+ARMAsmParser::ApplyPrefixToExpr(const MCExpr *E,
+ MCSymbolRefExpr::VariantKind Variant) {
+ // Recurse over the given expression, rebuilding it to apply the given variant
+ // to the leftmost symbol.
+ if (Variant == MCSymbolRefExpr::VK_None)
+ return E;
+
+ switch (E->getKind()) {
+ case MCExpr::Target:
+ llvm_unreachable("Can't handle target expr yet");
+ case MCExpr::Constant:
+ llvm_unreachable("Can't handle lower16/upper16 of constant yet");
+
+ case MCExpr::SymbolRef: {
+ const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(E);
- // Determine the predicate, if any.
+ if (SRE->getKind() != MCSymbolRefExpr::VK_None)
+ return 0;
+
+ return MCSymbolRefExpr::Create(&SRE->getSymbol(), Variant, getContext());
+ }
+
+ case MCExpr::Unary:
+ llvm_unreachable("Can't handle unary expressions yet");
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(E);
+ const MCExpr *LHS = ApplyPrefixToExpr(BE->getLHS(), Variant);
+ const MCExpr *RHS = BE->getRHS();
+ if (!LHS)
+ return 0;
+
+ return MCBinaryExpr::Create(BE->getOpcode(), LHS, RHS, getContext());
+ }
+ }
+
+ assert(0 && "Invalid expression kind!");
+ return 0;
+}
+
+/// \brief Given a mnemonic, split out possible predication code and carry
+/// setting letters to form a canonical mnemonic and flags.
+//
+// FIXME: Would be nice to autogen this.
+static StringRef SplitMnemonic(StringRef Mnemonic,
+ unsigned &PredicationCode,
+ bool &CarrySetting,
+ unsigned &ProcessorIMod) {
+ PredicationCode = ARMCC::AL;
+ CarrySetting = false;
+ ProcessorIMod = 0;
+
+ // Ignore some mnemonics we know aren't predicated forms.
//
- // FIXME: We need a way to check whether a prefix supports predication,
- // otherwise we will end up with an ambiguity for instructions that happen to
- // end with a predicate name.
- unsigned CC = StringSwitch<unsigned>(Head.substr(Head.size()-2))
+ // FIXME: Would be nice to autogen this.
+ if (Mnemonic == "teq" || Mnemonic == "vceq" ||
+ Mnemonic == "movs" ||
+ Mnemonic == "svc" ||
+ (Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
+ Mnemonic == "vmls" || Mnemonic == "vnmls") ||
+ Mnemonic == "vacge" || Mnemonic == "vcge" ||
+ Mnemonic == "vclt" ||
+ Mnemonic == "vacgt" || Mnemonic == "vcgt" ||
+ Mnemonic == "vcle" ||
+ (Mnemonic == "smlal" || Mnemonic == "umaal" || Mnemonic == "umlal" ||
+ Mnemonic == "vabal" || Mnemonic == "vmlal" || Mnemonic == "vpadal" ||
+ Mnemonic == "vqdmlal"))
+ return Mnemonic;
+
+ // First, split out any predication code.
+ unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
.Case("eq", ARMCC::EQ)
.Case("ne", ARMCC::NE)
.Case("hs", ARMCC::HS)
@@ -704,44 +1459,268 @@ bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
.Case("al", ARMCC::AL)
.Default(~0U);
if (CC != ~0U) {
- Head = Head.slice(0, Head.size() - 2);
- } else
- CC = ARMCC::AL;
+ Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
+ PredicationCode = CC;
+ }
+
+ // Next, determine if we have a carry setting bit. We explicitly ignore all
+ // the instructions we know end in 's'.
+ if (Mnemonic.endswith("s") &&
+ !(Mnemonic == "asrs" || Mnemonic == "cps" || Mnemonic == "mls" ||
+ Mnemonic == "movs" || Mnemonic == "mrs" || Mnemonic == "smmls" ||
+ Mnemonic == "vabs" || Mnemonic == "vcls" || Mnemonic == "vmls" ||
+ Mnemonic == "vmrs" || Mnemonic == "vnmls" || Mnemonic == "vqabs" ||
+ Mnemonic == "vrecps" || Mnemonic == "vrsqrts")) {
+ Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
+ CarrySetting = true;
+ }
+
+ // The "cps" instruction can have a interrupt mode operand which is glued into
+ // the mnemonic. Check if this is the case, split it and parse the imod op
+ if (Mnemonic.startswith("cps")) {
+ // Split out any imod code.
+ unsigned IMod =
+ StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
+ .Case("ie", ARM_PROC::IE)
+ .Case("id", ARM_PROC::ID)
+ .Default(~0U);
+ if (IMod != ~0U) {
+ Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
+ ProcessorIMod = IMod;
+ }
+ }
+
+ return Mnemonic;
+}
+
+/// \brief Given a canonical mnemonic, determine if the instruction ever allows
+/// inclusion of carry set or predication code operands.
+//
+// FIXME: It would be nice to autogen this.
+void ARMAsmParser::
+GetMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
+ bool &CanAcceptPredicationCode) {
+ bool isThumb = TM.getSubtarget<ARMSubtarget>().isThumb();
+
+ if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
+ Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
+ Mnemonic == "smull" || Mnemonic == "add" || Mnemonic == "adc" ||
+ Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
+ Mnemonic == "umlal" || Mnemonic == "orr" || Mnemonic == "mov" ||
+ Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
+ Mnemonic == "sbc" || Mnemonic == "mla" || Mnemonic == "umull" ||
+ Mnemonic == "eor" || Mnemonic == "smlal" || Mnemonic == "mvn") {
+ CanAcceptCarrySet = true;
+ } else {
+ CanAcceptCarrySet = false;
+ }
+
+ if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
+ Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
+ Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
+ Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
+ Mnemonic == "dsb" || Mnemonic == "movs" || Mnemonic == "isb" ||
+ Mnemonic == "clrex" || Mnemonic.startswith("cps")) {
+ CanAcceptPredicationCode = false;
+ } else {
+ CanAcceptPredicationCode = true;
+ }
- ARMOperand::CreateToken(Op, Head, NameLoc);
- Operands.push_back(Op.take());
+ if (isThumb)
+ if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
+ Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
+ CanAcceptPredicationCode = false;
+}
+
+/// Parse an arm instruction mnemonic followed by its operands.
+bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // Create the leading tokens for the mnemonic, split by '.' characters.
+ size_t Start = 0, Next = Name.find('.');
+ StringRef Head = Name.slice(Start, Next);
+
+ // Split out the predication code and carry setting flag from the mnemonic.
+ unsigned PredicationCode;
+ unsigned ProcessorIMod;
+ bool CarrySetting;
+ Head = SplitMnemonic(Head, PredicationCode, CarrySetting,
+ ProcessorIMod);
+
+ Operands.push_back(ARMOperand::CreateToken(Head, NameLoc));
+
+ // Next, add the CCOut and ConditionCode operands, if needed.
+ //
+ // For mnemonics which can ever incorporate a carry setting bit or predication
+ // code, our matching model involves us always generating CCOut and
+ // ConditionCode operands to match the mnemonic "as written" and then we let
+ // the matcher deal with finding the right instruction or generating an
+ // appropriate error.
+ bool CanAcceptCarrySet, CanAcceptPredicationCode;
+ GetMnemonicAcceptInfo(Head, CanAcceptCarrySet, CanAcceptPredicationCode);
+
+ // Add the carry setting operand, if necessary.
+ //
+ // FIXME: It would be awesome if we could somehow invent a location such that
+ // match errors on this operand would print a nice diagnostic about how the
+ // 's' character in the mnemonic resulted in a CCOut operand.
+ if (CanAcceptCarrySet) {
+ Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
+ NameLoc));
+ } else {
+ // This mnemonic can't ever accept a carry set, but the user wrote one (or
+ // misspelled another mnemonic).
+
+ // FIXME: Issue a nice error.
+ }
+
+ // Add the predication code operand, if necessary.
+ if (CanAcceptPredicationCode) {
+ Operands.push_back(ARMOperand::CreateCondCode(
+ ARMCC::CondCodes(PredicationCode), NameLoc));
+ } else {
+ // This mnemonic can't ever accept a predication code, but the user wrote
+ // one (or misspelled another mnemonic).
+
+ // FIXME: Issue a nice error.
+ }
+
+ // Add the processor imod operand, if necessary.
+ if (ProcessorIMod) {
+ Operands.push_back(ARMOperand::CreateImm(
+ MCConstantExpr::Create(ProcessorIMod, getContext()),
+ NameLoc, NameLoc));
+ } else {
+ // This mnemonic can't ever accept a imod, but the user wrote
+ // one (or misspelled another mnemonic).
- ARMOperand::CreateCondCode(Op, ARMCC::CondCodes(CC), NameLoc);
- Operands.push_back(Op.take());
+ // FIXME: Issue a nice error.
+ }
// Add the remaining tokens in the mnemonic.
while (Next != StringRef::npos) {
Start = Next;
Next = Name.find('.', Start + 1);
- Head = Name.slice(Start, Next);
+ StringRef ExtraToken = Name.slice(Start, Next);
- ARMOperand::CreateToken(Op, Head, NameLoc);
- Operands.push_back(Op.take());
+ Operands.push_back(ARMOperand::CreateToken(ExtraToken, NameLoc));
}
// Read the remaining operands.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
// Read the first operand.
- OwningPtr<ARMOperand> Op;
- if (ParseOperand(Op)) return true;
- Operands.push_back(Op.take());
+ if (ParseOperand(Operands, Head)) {
+ Parser.EatToEndOfStatement();
+ return true;
+ }
while (getLexer().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
// Parse and remember the operand.
- if (ParseOperand(Op)) return true;
- Operands.push_back(Op.take());
+ if (ParseOperand(Operands, Head)) {
+ Parser.EatToEndOfStatement();
+ return true;
+ }
}
}
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Parser.EatToEndOfStatement();
+ return TokError("unexpected token in argument list");
+ }
+
+ Parser.Lex(); // Consume the EndOfStatement
return false;
}
+bool ARMAsmParser::
+MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out) {
+ MCInst Inst;
+ unsigned ErrorInfo;
+ MatchResultTy MatchResult, MatchResult2;
+ MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
+ if (MatchResult != Match_Success) {
+ // If we get a Match_InvalidOperand it might be some arithmetic instruction
+ // that does not update the condition codes. So try adding a CCOut operand
+ // with a value of reg0.
+ if (MatchResult == Match_InvalidOperand) {
+ Operands.insert(Operands.begin() + 1,
+ ARMOperand::CreateCCOut(0,
+ ((ARMOperand*)Operands[0])->getStartLoc()));
+ MatchResult2 = MatchInstructionImpl(Operands, Inst, ErrorInfo);
+ if (MatchResult2 == Match_Success)
+ MatchResult = Match_Success;
+ else {
+ ARMOperand *CCOut = ((ARMOperand*)Operands[1]);
+ Operands.erase(Operands.begin() + 1);
+ delete CCOut;
+ }
+ }
+ // If we get a Match_MnemonicFail it might be some arithmetic instruction
+ // that updates the condition codes if it ends in 's'. So see if the
+ // mnemonic ends in 's' and if so try removing the 's' and adding a CCOut
+ // operand with a value of CPSR.
+ else if(MatchResult == Match_MnemonicFail) {
+ // Get the instruction mnemonic, which is the first token.
+ StringRef Mnemonic = ((ARMOperand*)Operands[0])->getToken();
+ if (Mnemonic.substr(Mnemonic.size()-1) == "s") {
+ // removed the 's' from the mnemonic for matching.
+ StringRef MnemonicNoS = Mnemonic.slice(0, Mnemonic.size() - 1);
+ SMLoc NameLoc = ((ARMOperand*)Operands[0])->getStartLoc();
+ ARMOperand *OldMnemonic = ((ARMOperand*)Operands[0]);
+ Operands.erase(Operands.begin());
+ delete OldMnemonic;
+ Operands.insert(Operands.begin(),
+ ARMOperand::CreateToken(MnemonicNoS, NameLoc));
+ Operands.insert(Operands.begin() + 1,
+ ARMOperand::CreateCCOut(ARM::CPSR, NameLoc));
+ MatchResult2 = MatchInstructionImpl(Operands, Inst, ErrorInfo);
+ if (MatchResult2 == Match_Success)
+ MatchResult = Match_Success;
+ else {
+ ARMOperand *OldMnemonic = ((ARMOperand*)Operands[0]);
+ Operands.erase(Operands.begin());
+ delete OldMnemonic;
+ Operands.insert(Operands.begin(),
+ ARMOperand::CreateToken(Mnemonic, NameLoc));
+ ARMOperand *CCOut = ((ARMOperand*)Operands[1]);
+ Operands.erase(Operands.begin() + 1);
+ delete CCOut;
+ }
+ }
+ }
+ }
+ switch (MatchResult) {
+ case Match_Success:
+ Out.EmitInstruction(Inst);
+ return false;
+ case Match_MissingFeature:
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ return true;
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0U) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
+ ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ }
+
+ return Error(ErrorLoc, "invalid operand for instruction");
+ }
+ case Match_MnemonicFail:
+ return Error(IDLoc, "unrecognized instruction mnemonic");
+ case Match_ConversionFail:
+ return Error(IDLoc, "unable to convert operands to instruction");
+ }
+
+ llvm_unreachable("Implement any new match types added!");
+ return true;
+}
+
/// ParseDirective parses the arm specific directives
bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getIdentifier();
@@ -771,7 +1750,7 @@ bool ARMAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
if (getLexer().is(AsmToken::EndOfStatement))
break;
-
+
// FIXME: Improve diagnostic.
if (getLexer().isNot(AsmToken::Comma))
return Error(L, "unexpected token in directive");
@@ -801,16 +1780,16 @@ bool ARMAsmParser::ParseDirectiveThumb(SMLoc L) {
bool ARMAsmParser::ParseDirectiveThumbFunc(SMLoc L) {
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
- return Error(L, "unexpected token in .syntax directive");
- StringRef ATTRIBUTE_UNUSED SymbolName = Parser.getTok().getIdentifier();
+ return Error(L, "unexpected token in .thumb_func directive");
+ StringRef Name = Tok.getString();
Parser.Lex(); // Consume the identifier token.
-
if (getLexer().isNot(AsmToken::EndOfStatement))
return Error(L, "unexpected token in directive");
Parser.Lex();
- // TODO: mark symbol as a thumb symbol
- // getParser().getStreamer().Emit???();
+ // Mark symbol as a thumb symbol.
+ MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
+ getParser().getStreamer().EmitThumbFunc(Func);
return false;
}
@@ -824,7 +1803,7 @@ bool ARMAsmParser::ParseDirectiveSyntax(SMLoc L) {
if (Mode == "unified" || Mode == "UNIFIED")
Parser.Lex();
else if (Mode == "divided" || Mode == "DIVIDED")
- Parser.Lex();
+ return Error(L, "'.syntax divided' arm asssembly not supported");
else
return Error(L, "unrecognized syntax mode in .syntax directive");
@@ -855,8 +1834,21 @@ bool ARMAsmParser::ParseDirectiveCode(SMLoc L) {
return Error(Parser.getTok().getLoc(), "unexpected token in directive");
Parser.Lex();
- // TODO tell the MC streamer the mode
- // getParser().getStreamer().Emit???();
+ // FIXME: We need to be able switch subtargets at this point so that
+ // MatchInstructionImpl() will work when it gets the AvailableFeatures which
+ // includes Feature_IsThumb or not to match the right instructions. This is
+ // blocked on the FIXME in llvm-mc.cpp when creating the TargetMachine.
+ if (Val == 16){
+ assert(TM.getSubtarget<ARMSubtarget>().isThumb() &&
+ "switching between arm/thumb not yet suppported via .code 16)");
+ getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
+ }
+ else{
+ assert(!TM.getSubtarget<ARMSubtarget>().isThumb() &&
+ "switching between thumb/arm not yet suppported via .code 32)");
+ getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
+ }
+
return false;
}
@@ -869,4 +1861,6 @@ extern "C" void LLVMInitializeARMAsmParser() {
LLVMInitializeARMAsmLexer();
}
+#define GET_REGISTER_MATCHER
+#define GET_MATCHER_IMPLEMENTATION
#include "ARMGenAsmMatcher.inc"
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index e220289..78d73d3 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -39,9 +39,9 @@
/// o static uint16_t decodeThumbInstruction(field_t insn) - the decoding
/// function for a Thumb instruction.
///
-#include "../ARMGenDecoderTables.inc"
+#include "ARMGenDecoderTables.inc"
-#include "../ARMGenEDInfo.inc"
+#include "ARMGenEDInfo.inc"
using namespace llvm;
@@ -89,7 +89,8 @@ static unsigned decodeARMInstruction(uint32_t &insn) {
return ARM::BFI;
}
- // Ditto for STRBT, which is a super-instruction for A8.6.199 Encoding A1 & A2.
+ // Ditto for STRBT, which is a super-instruction for A8.6.199 Encodings
+ // A1 & A2.
// As a result, the decoder fails to deocode USAT properly.
if (slice(insn, 27, 21) == 0x37 && slice(insn, 5, 4) == 1)
return ARM::USAT;
@@ -252,9 +253,6 @@ static unsigned T2Morph2LoadLiteral(unsigned Opcode) {
default:
return Opcode; // Return unmorphed opcode.
- case ARM::t2LDRDi8:
- return ARM::t2LDRDpci;
-
case ARM::t2LDR_POST: case ARM::t2LDR_PRE:
case ARM::t2LDRi12: case ARM::t2LDRi8:
case ARM::t2LDRs: case ARM::t2LDRT:
@@ -349,36 +347,6 @@ static unsigned decodeThumbSideEffect(bool IsThumb2, unsigned &insn) {
return decodeThumbInstruction(insn);
}
-static inline bool Thumb2PreloadOpcodeNoPCI(unsigned Opcode) {
- switch (Opcode) {
- default:
- return false;
- case ARM::t2PLDi12: case ARM::t2PLDi8:
- case ARM::t2PLDr: case ARM::t2PLDs:
- case ARM::t2PLDWi12: case ARM::t2PLDWi8:
- case ARM::t2PLDWr: case ARM::t2PLDWs:
- case ARM::t2PLIi12: case ARM::t2PLIi8:
- case ARM::t2PLIr: case ARM::t2PLIs:
- return true;
- }
-}
-
-static inline unsigned T2Morph2Preload2PCI(unsigned Opcode) {
- switch (Opcode) {
- default:
- return 0;
- case ARM::t2PLDi12: case ARM::t2PLDi8:
- case ARM::t2PLDr: case ARM::t2PLDs:
- return ARM::t2PLDpci;
- case ARM::t2PLDWi12: case ARM::t2PLDWi8:
- case ARM::t2PLDWr: case ARM::t2PLDWs:
- return ARM::t2PLDWpci;
- case ARM::t2PLIi12: case ARM::t2PLIi8:
- case ARM::t2PLIr: case ARM::t2PLIs:
- return ARM::t2PLIpci;
- }
-}
-
//
// Public interface for the disassembler
//
@@ -485,11 +453,6 @@ bool ThumbDisassembler::getInstruction(MCInst &MI,
// instructions as well.
unsigned Opcode = decodeThumbSideEffect(IsThumb2, insn);
- // A8.6.117/119/120/121.
- // PLD/PLDW/PLI instructions with Rn==15 is transformed to the pci variant.
- if (Thumb2PreloadOpcodeNoPCI(Opcode) && slice(insn, 19, 16) == 15)
- Opcode = T2Morph2Preload2PCI(Opcode);
-
ARMFormat Format = ARMFormats[Opcode];
Size = IsThumb2 ? 4 : 2;
@@ -568,9 +531,9 @@ static MCDisassembler *createThumbDisassembler(const Target &T) {
return new ThumbDisassembler;
}
-extern "C" void LLVMInitializeARMDisassembler() {
+extern "C" void LLVMInitializeARMDisassembler() {
// Register the disassembler.
- TargetRegistry::RegisterMCDisassembler(TheARMTarget,
+ TargetRegistry::RegisterMCDisassembler(TheARMTarget,
createARMDisassembler);
TargetRegistry::RegisterMCDisassembler(TheThumbTarget,
createThumbDisassembler);
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
index 9f493b9..bac68dd 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
@@ -79,22 +79,9 @@ const char *ARMUtils::OpcodeName(unsigned Opcode) {
}
// Return the register enum Based on RegClass and the raw register number.
-// For DRegPair, see comments below.
// FIXME: Auto-gened?
-static unsigned getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister,
- bool DRegPair = false) {
-
- if (DRegPair && RegClassID == ARM::QPRRegClassID) {
- // LLVM expects { Dd, Dd+1 } to form a super register; this is not specified
- // in the ARM Architecture Manual as far as I understand it (A8.6.307).
- // Therefore, we morph the RegClassID to be the sub register class and don't
- // subsequently transform the RawRegister encoding when calculating RegNum.
- //
- // See also ARMinstPrinter::printOperand() wrt "dregpair" modifier part
- // where this workaround is meant for.
- RegClassID = ARM::DPRRegClassID;
- }
-
+static unsigned
+getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister) {
// For this purpose, we can treat rGPR as if it were GPR.
if (RegClassID == ARM::rGPRRegClassID) RegClassID = ARM::GPRRegClassID;
@@ -704,8 +691,8 @@ static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
// MSR/MSRsys: Rm mask=Inst{19-16}
// BXJ: Rm
// MSRi/MSRsysi: so_imm
-// SRSW/SRS: addrmode4:$addr mode_imm
-// RFEW/RFE: addrmode4:$addr Rn
+// SRSW/SRS: ldstm_mode:$amode mode_imm
+// RFEW/RFE: ldstm_mode:$amode Rn
static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
@@ -733,35 +720,34 @@ static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
NumOpsAdded = 1;
return true;
}
- // MSR and MSRsys take one GPR reg Rm, followed by the mask.
- if (Opcode == ARM::MSR || Opcode == ARM::MSRsys) {
- assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
+ // MSR take a mask, followed by one GPR reg Rm. The mask contains the R Bit in
+ // bit 4, and the special register fields in bits 3-0.
+ if (Opcode == ARM::MSR) {
+ assert(NumOps >= 1 && OpInfo[1].RegClass == ARM::GPRRegClassID &&
"Reg operand expected");
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
+ slice(insn, 19, 16) /* Special Reg */ ));
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
decodeRm(insn))));
- MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
NumOpsAdded = 2;
return true;
}
- // MSRi and MSRsysi take one so_imm operand, followed by the mask.
- if (Opcode == ARM::MSRi || Opcode == ARM::MSRsysi) {
+ // MSRi take a mask, followed by one so_imm operand. The mask contains the
+ // R Bit in bit 4, and the special register fields in bits 3-0.
+ if (Opcode == ARM::MSRi) {
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
+ slice(insn, 19, 16) /* Special Reg */ ));
// SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
// A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
// See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
unsigned Imm = insn & 0xFF;
MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
- MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
NumOpsAdded = 2;
return true;
}
- // SRSW and SRS requires addrmode4:$addr for ${addr:submode}, followed by the
- // mode immediate (Inst{4-0}).
if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
Opcode == ARM::RFEW || Opcode == ARM::RFE) {
- // ARMInstPrinter::printAddrMode4Operand() prints special mode string
- // if the base register is SP; so don't set ARM::SP.
- MI.addOperand(MCOperand::CreateReg(0));
ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
@@ -807,9 +793,8 @@ static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
}
// Misc. Branch Instructions.
-// BR_JTadd, BR_JTr, BR_JTm
// BLXr9, BXr9
-// BRIND, BX_RET
+// BX, BX_RET
static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
@@ -820,12 +805,12 @@ static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
OpIdx = 0;
- // BX_RET has only two predicate operands, do an early return.
- if (Opcode == ARM::BX_RET)
+ // BX_RET and MOVPCLR have only two predicate operands; do an early return.
+ if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR)
return true;
- // BLXr9 and BRIND take one GPR reg.
- if (Opcode == ARM::BLXr9 || Opcode == ARM::BRIND) {
+ // BLXr9 and BX take one GPR reg.
+ if (Opcode == ARM::BLXr9 || Opcode == ARM::BX) {
assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
"Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
@@ -834,72 +819,6 @@ static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
return true;
}
- // BR_JTadd is an ADD with Rd = PC, (Rn, Rm) as the target and index regs.
- if (Opcode == ARM::BR_JTadd) {
- // InOperandList with GPR:$target and GPR:$idx regs.
-
- assert(NumOps == 4 && "Expect 4 operands");
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
- decodeRn(insn))));
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
- decodeRm(insn))));
-
- // Fill in the two remaining imm operands to signify build completion.
- MI.addOperand(MCOperand::CreateImm(0));
- MI.addOperand(MCOperand::CreateImm(0));
-
- OpIdx = 4;
- return true;
- }
-
- // BR_JTr is a MOV with Rd = PC, and Rm as the source register.
- if (Opcode == ARM::BR_JTr) {
- // InOperandList with GPR::$target reg.
-
- assert(NumOps == 3 && "Expect 3 operands");
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
- decodeRm(insn))));
-
- // Fill in the two remaining imm operands to signify build completion.
- MI.addOperand(MCOperand::CreateImm(0));
- MI.addOperand(MCOperand::CreateImm(0));
-
- OpIdx = 3;
- return true;
- }
-
- // BR_JTm is an LDR with Rt = PC.
- if (Opcode == ARM::BR_JTm) {
- // This is the reg/reg form, with base reg followed by +/- reg shop imm.
- // See also ARMAddressingModes.h (Addressing Mode #2).
-
- assert(NumOps == 5 && getIBit(insn) == 1 && "Expect 5 operands && I-bit=1");
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
- decodeRn(insn))));
-
- ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
-
- // Disassemble the offset reg (Rm), shift type, and immediate shift length.
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
- decodeRm(insn))));
- // Inst{6-5} encodes the shift opcode.
- ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
- // Inst{11-7} encodes the imm5 shift amount.
- unsigned ShImm = slice(insn, 11, 7);
-
- // A8.4.1. Possible rrx or shift amount of 32...
- getImmShiftSE(ShOp, ShImm);
- MI.addOperand(MCOperand::CreateImm(
- ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
-
- // Fill in the two remaining imm operands to signify build completion.
- MI.addOperand(MCOperand::CreateImm(0));
- MI.addOperand(MCOperand::CreateImm(0));
-
- OpIdx = 5;
- return true;
- }
-
return false;
}
@@ -1324,30 +1243,28 @@ static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
assert(NumOps >= 5 && "LdStMulFrm expects NumOps >= 5");
-
- unsigned &OpIdx = NumOpsAdded;
-
- OpIdx = 0;
+ NumOpsAdded = 0;
unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
// Writeback to base, if necessary.
- if (Opcode == ARM::LDM_UPD || Opcode == ARM::STM_UPD) {
+ if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::STMIA_UPD ||
+ Opcode == ARM::LDMDA_UPD || Opcode == ARM::STMDA_UPD ||
+ Opcode == ARM::LDMDB_UPD || Opcode == ARM::STMDB_UPD ||
+ Opcode == ARM::LDMIB_UPD || Opcode == ARM::STMIB_UPD) {
MI.addOperand(MCOperand::CreateReg(Base));
- ++OpIdx;
+ ++NumOpsAdded;
}
+ // Add the base register operand.
MI.addOperand(MCOperand::CreateReg(Base));
- ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
- MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
-
// Handling the two predicate operands before the reglist.
int64_t CondVal = insn >> ARMII::CondShift;
MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
- OpIdx += 4;
+ NumOpsAdded += 3;
// Fill the variadic part of reglist.
unsigned RegListBits = insn & ((1 << 16) - 1);
@@ -1355,7 +1272,7 @@ static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
if ((RegListBits >> i) & 1) {
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
i)));
- ++OpIdx;
+ ++NumOpsAdded;
}
}
@@ -1586,8 +1503,7 @@ static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
}
// A7.5.1
-#if 0
-static uint64_t VFPExpandImm(unsigned char byte, unsigned N) {
+static APInt VFPExpandImm(unsigned char byte, unsigned N) {
assert(N == 32 || N == 64);
uint64_t Result;
@@ -1602,13 +1518,12 @@ static uint64_t VFPExpandImm(unsigned char byte, unsigned N) {
Result = (uint64_t)slice(byte, 7, 7) << 63 |
(uint64_t)slice(byte, 5, 0) << 48;
if (bit6)
- Result |= 0xffL << 54;
+ Result |= 0xffULL << 54;
else
- Result |= 0x1L << 62;
+ Result |= 0x1ULL << 62;
}
- return Result;
+ return APInt(N, Result);
}
-#endif
// VFP Unary Format Instructions:
//
@@ -1902,8 +1817,10 @@ static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
// Writeback to base, if necessary.
- if (Opcode == ARM::VLDMD_UPD || Opcode == ARM::VLDMS_UPD ||
- Opcode == ARM::VSTMD_UPD || Opcode == ARM::VSTMS_UPD) {
+ if (Opcode == ARM::VLDMDIA_UPD || Opcode == ARM::VLDMSIA_UPD ||
+ Opcode == ARM::VLDMDDB_UPD || Opcode == ARM::VLDMSDB_UPD ||
+ Opcode == ARM::VSTMDIA_UPD || Opcode == ARM::VSTMSIA_UPD ||
+ Opcode == ARM::VSTMDDB_UPD || Opcode == ARM::VSTMSDB_UPD) {
MI.addOperand(MCOperand::CreateReg(Base));
++OpIdx;
}
@@ -1926,8 +1843,10 @@ static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
OpIdx += 4;
- bool isSPVFP = (Opcode == ARM::VLDMS || Opcode == ARM::VLDMS_UPD ||
- Opcode == ARM::VSTMS || Opcode == ARM::VSTMS_UPD);
+ bool isSPVFP = (Opcode == ARM::VLDMSIA || Opcode == ARM::VLDMSDB ||
+ Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMSDB_UPD ||
+ Opcode == ARM::VSTMSIA || Opcode == ARM::VSTMSDB ||
+ Opcode == ARM::VSTMSIA_UPD || Opcode == ARM::VSTMSDB_UPD);
unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
// Extract Dd/Sd.
@@ -1985,10 +1904,14 @@ static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
// Extract/decode the f64/f32 immediate.
if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
- // The asm syntax specifies the before-expanded <imm>.
- // Not VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
- // Opcode == ARM::FCONSTD ? 64 : 32)
- MI.addOperand(MCOperand::CreateImm(slice(insn,19,16)<<4 | slice(insn,3,0)));
+ // The asm syntax specifies the floating point value, not the 8-bit literal.
+ APInt immRaw = VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
+ Opcode == ARM::FCONSTD ? 64 : 32);
+ APFloat immFP = APFloat(immRaw, true);
+ double imm = Opcode == ARM::FCONSTD ? immFP.convertToDouble() :
+ immFP.convertToFloat();
+ MI.addOperand(MCOperand::CreateFPImm(imm));
+
++OpIdx;
}
@@ -2201,22 +2124,6 @@ static unsigned decodeN3VImm(uint32_t insn) {
return (insn >> 8) & 0xF;
}
-static bool UseDRegPair(unsigned Opcode) {
- switch (Opcode) {
- default:
- return false;
- case ARM::VLD1q8_UPD:
- case ARM::VLD1q16_UPD:
- case ARM::VLD1q32_UPD:
- case ARM::VLD1q64_UPD:
- case ARM::VST1q8_UPD:
- case ARM::VST1q16_UPD:
- case ARM::VST1q32_UPD:
- case ARM::VST1q64_UPD:
- return true;
- }
-}
-
// VLD*
// D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
// VLD*LN*
@@ -2243,10 +2150,9 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
// We have homogeneous NEON registers for Load/Store.
unsigned RegClass = 0;
- bool DRegPair = UseDRegPair(Opcode);
// Double-spaced registers have increments of 2.
- unsigned Inc = (DblSpaced || DRegPair) ? 2 : 1;
+ unsigned Inc = DblSpaced ? 2 : 1;
unsigned Rn = decodeRn(insn);
unsigned Rm = decodeRm(insn);
@@ -2292,7 +2198,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
RegClass = OpInfo[OpIdx].RegClass;
while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
MI.addOperand(MCOperand::CreateReg(
- getRegisterEnum(B, RegClass, Rd, DRegPair)));
+ getRegisterEnum(B, RegClass, Rd)));
Rd += Inc;
++OpIdx;
}
@@ -2311,7 +2217,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
MI.addOperand(MCOperand::CreateReg(
- getRegisterEnum(B, RegClass, Rd, DRegPair)));
+ getRegisterEnum(B, RegClass, Rd)));
Rd += Inc;
++OpIdx;
}
@@ -2771,8 +2677,8 @@ static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
N3V_VectorShift, B);
}
-static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
+static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode,
+ uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
N3V_VectorExtract, B);
@@ -2959,9 +2865,9 @@ static inline bool MemBarrierInstr(uint32_t insn) {
static inline bool PreLoadOpcode(unsigned Opcode) {
switch(Opcode) {
- case ARM::PLDi: case ARM::PLDr:
- case ARM::PLDWi: case ARM::PLDWr:
- case ARM::PLIi: case ARM::PLIr:
+ case ARM::PLDi12: case ARM::PLDrs:
+ case ARM::PLDWi12: case ARM::PLDWrs:
+ case ARM::PLIi12: case ARM::PLIrs:
return true;
default:
return false;
@@ -2971,18 +2877,21 @@ static inline bool PreLoadOpcode(unsigned Opcode) {
static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
- // Preload Data/Instruction requires either 2 or 4 operands.
- // PLDi, PLDWi, PLIi: Rn [+/-]imm12 add = (U == '1')
- // PLDr[a|m], PLDWr[a|m], PLIr[a|m]: Rn Rm addrmode2_opc
+ // Preload Data/Instruction requires either 2 or 3 operands.
+ // PLDi, PLDWi, PLIi: addrmode_imm12
+ // PLDr[a|m], PLDWr[a|m], PLIr[a|m]: ldst_so_reg
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
decodeRn(insn))));
- if (Opcode == ARM::PLDi || Opcode == ARM::PLDWi || Opcode == ARM::PLIi) {
+ if (Opcode == ARM::PLDi12 || Opcode == ARM::PLDWi12
+ || Opcode == ARM::PLIi12) {
unsigned Imm12 = slice(insn, 11, 0);
bool Negative = getUBit(insn) == 0;
- int Offset = Negative ? -1 - Imm12 : 1 * Imm12;
- MI.addOperand(MCOperand::CreateImm(Offset));
+ // -0 is represented specially. All other values are as normal.
+ if (Imm12 == 0 && Negative)
+ Imm12 = INT32_MIN;
+ MI.addOperand(MCOperand::CreateImm(Imm12));
NumOpsAdded = 2;
} else {
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
@@ -3026,22 +2935,36 @@ static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
case ARM::WFE:
case ARM::WFI:
case ARM::SEV:
- case ARM::SETENDBE:
- case ARM::SETENDLE:
return true;
default:
break;
}
- // CPS has a singleton $opt operand that contains the following information:
- // opt{4-0} = mode from Inst{4-0}
- // opt{5} = changemode from Inst{17}
- // opt{8-6} = AIF from Inst{8-6}
- // opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
- if (Opcode == ARM::CPS) {
- unsigned Option = slice(insn, 4, 0) | slice(insn, 17, 17) << 5 |
- slice(insn, 8, 6) << 6 | slice(insn, 19, 18) << 9;
- MI.addOperand(MCOperand::CreateImm(Option));
+ if (Opcode == ARM::SETEND) {
+ NumOpsAdded = 1;
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 9, 9)));
+ return true;
+ }
+
+ // FIXME: To enable correct asm parsing and disasm of CPS we need 3 different
+ // opcodes which match the same real instruction. This is needed since there's
+ // no current handling of optional arguments. Fix here when a better handling
+ // of optional arguments is implemented.
+ if (Opcode == ARM::CPS3p) {
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
+ NumOpsAdded = 3;
+ return true;
+ }
+ if (Opcode == ARM::CPS2p) {
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
+ NumOpsAdded = 2;
+ return true;
+ }
+ if (Opcode == ARM::CPS1p) {
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
NumOpsAdded = 1;
return true;
}
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h b/contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h
index 112817b..23372e0 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h
@@ -564,6 +564,38 @@ static bool DisassembleThumb1LdPC(MCInst &MI, unsigned Opcode, uint32_t insn,
// t_addrmode_sp := sp + imm8 * 4
//
+// A8.6.63 LDRB (literal)
+// A8.6.79 LDRSB (literal)
+// A8.6.75 LDRH (literal)
+// A8.6.83 LDRSH (literal)
+// A8.6.59 LDR (literal)
+//
+// These instrs calculate an address from the PC value and an immediate offset.
+// Rd Rn=PC (+/-)imm12 (+ if Inst{23} == 0b1)
+static bool DisassembleThumb2Ldpci(MCInst &MI, unsigned Opcode,
+ uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
+
+ const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+ if (!OpInfo) return false;
+
+ assert(NumOps >= 2 &&
+ OpInfo[0].RegClass == ARM::GPRRegClassID &&
+ OpInfo[1].RegClass < 0 &&
+ "Expect >= 2 operands, first as reg, and second as imm operand");
+
+ // Build the register operand, followed by the (+/-)imm12 immediate.
+
+ MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
+ decodeRd(insn))));
+
+ MI.addOperand(MCOperand::CreateImm(decodeImm12(insn)));
+
+ NumOpsAdded = 2;
+
+ return true;
+}
+
+
// A6.2.4 Load/store single data item
//
// Load/Store Register (reg|imm): tRd tRn imm5 tRm
@@ -796,14 +828,13 @@ static bool DisassembleThumb1Misc(MCInst &MI, unsigned Opcode, uint32_t insn,
}
// CPS has a singleton $opt operand that contains the following information:
- // opt{4-0} = don't care
- // opt{5} = 0 (false)
- // opt{8-6} = AIF from Inst{2-0}
- // opt{10-9} = 1:imod from Inst{4} with 0b10 as enable and 0b11 as disable
+ // The first op would be 0b10 as enable and 0b11 as disable in regular ARM,
+ // but in Thumb it's is 0 as enable and 1 as disable. So map it to ARM's
+ // default one. The second get the AIF flags from Inst{2-0}.
if (Opcode == ARM::tCPS) {
- unsigned Option = slice(insn, 2, 0) << 6 | slice(insn, 4, 4) << 9 | 1 << 10;
- MI.addOperand(MCOperand::CreateImm(Option));
- NumOpsAdded = 1;
+ MI.addOperand(MCOperand::CreateImm(2 + slice(insn, 4, 4)));
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 2, 0)));
+ NumOpsAdded = 2;
return true;
}
@@ -833,40 +864,32 @@ static bool DisassembleThumb1Misc(MCInst &MI, unsigned Opcode, uint32_t insn,
// A8.6.53 LDM / LDMIA
// A8.6.189 STM / STMIA
//
-// tLDM_UPD/tSTM_UPD: tRt tRt AM4ModeImm Pred-Imm Pred-CCR register_list
-// tLDM: tRt AM4ModeImm Pred-Imm Pred-CCR register_list
+// tLDMIA_UPD/tSTMIA_UPD: tRt tRt AM4ModeImm Pred-Imm Pred-CCR register_list
+// tLDMIA: tRt AM4ModeImm Pred-Imm Pred-CCR register_list
static bool DisassembleThumb1LdStMul(bool Ld, MCInst &MI, unsigned Opcode,
- uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
-
- assert((Opcode == ARM::tLDM || Opcode == ARM::tLDM_UPD ||
- Opcode == ARM::tSTM_UPD) && "Unexpected opcode");
-
- unsigned &OpIdx = NumOpsAdded;
+ uint32_t insn, unsigned short NumOps,
+ unsigned &NumOpsAdded, BO B) {
+ assert((Opcode == ARM::tLDMIA || Opcode == ARM::tLDMIA_UPD ||
+ Opcode == ARM::tSTMIA_UPD) && "Unexpected opcode");
unsigned tRt = getT1tRt(insn);
-
- OpIdx = 0;
+ NumOpsAdded = 0;
// WB register, if necessary.
- if (Opcode == ARM::tLDM_UPD || Opcode == ARM::tSTM_UPD) {
+ if (Opcode == ARM::tLDMIA_UPD || Opcode == ARM::tSTMIA_UPD) {
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
tRt)));
- ++OpIdx;
+ ++NumOpsAdded;
}
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
tRt)));
- ++OpIdx;
-
- // A8.6.53 LDM / LDMIA / LDMFD - Encoding T1
- // A8.6.53 STM / STMIA / STMEA - Encoding T1
- MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)));
- ++OpIdx;
+ ++NumOpsAdded;
// Handling the two predicate operands before the reglist.
- if (B->DoPredicateOperands(MI, Opcode, insn, NumOps))
- OpIdx += 2;
- else {
+ if (B->DoPredicateOperands(MI, Opcode, insn, NumOps)) {
+ NumOpsAdded += 2;
+ } else {
DEBUG(errs() << "Expected predicate operands not found.\n");
return false;
}
@@ -874,13 +897,12 @@ static bool DisassembleThumb1LdStMul(bool Ld, MCInst &MI, unsigned Opcode,
unsigned RegListBits = slice(insn, 7, 0);
// Fill the variadic part of reglist.
- for (unsigned i = 0; i < 8; ++i) {
+ for (unsigned i = 0; i < 8; ++i)
if ((RegListBits >> i) & 1) {
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::tGPRRegClassID,
i)));
- ++OpIdx;
+ ++NumOpsAdded;
}
- }
return true;
}
@@ -959,22 +981,23 @@ static bool DisassembleThumb1Br(MCInst &MI, unsigned Opcode, uint32_t insn,
// corresponding to op.
//
// Table A6-1 16-bit Thumb instruction encoding (abridged)
-// op Instruction or instruction class
-// ------ --------------------------------------------------------------------
-// 00xxxx Shift (immediate), add, subtract, move, and compare on page A6-7
-// 010000 Data-processing on page A6-8
-// 010001 Special data instructions and branch and exchange on page A6-9
-// 01001x Load from Literal Pool, see LDR (literal) on page A8-122
-// 0101xx Load/store single data item on page A6-10
+// op Instruction or instruction class
+// ------ --------------------------------------------------------------------
+// 00xxxx Shift (immediate), add, subtract, move, and compare on page A6-7
+// 010000 Data-processing on page A6-8
+// 010001 Special data instructions and branch and exchange on page A6-9
+// 01001x Load from Literal Pool, see LDR (literal) on page A8-122
+// 0101xx Load/store single data item on page A6-10
// 011xxx
// 100xxx
-// 10100x Generate PC-relative address, see ADR on page A8-32
-// 10101x Generate SP-relative address, see ADD (SP plus immediate) on page A8-28
-// 1011xx Miscellaneous 16-bit instructions on page A6-11
-// 11000x Store multiple registers, see STM / STMIA / STMEA on page A8-374
-// 11001x Load multiple registers, see LDM / LDMIA / LDMFD on page A8-110 a
-// 1101xx Conditional branch, and Supervisor Call on page A6-13
-// 11100x Unconditional Branch, see B on page A8-44
+// 10100x Generate PC-relative address, see ADR on page A8-32
+// 10101x Generate SP-relative address, see ADD (SP plus immediate) on
+// page A8-28
+// 1011xx Miscellaneous 16-bit instructions on page A6-11
+// 11000x Store multiple registers, see STM / STMIA / STMEA on page A8-374
+// 11001x Load multiple registers, see LDM / LDMIA / LDMFD on page A8-110 a
+// 1101xx Conditional branch, and Supervisor Call on page A6-13
+// 11100x Unconditional Branch, see B on page A8-44
//
static bool DisassembleThumb1(uint16_t op, MCInst &MI, unsigned Opcode,
uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
@@ -1121,34 +1144,31 @@ static bool DisassembleThumb2LdStMul(MCInst &MI, unsigned Opcode, uint32_t insn,
if (Thumb2RFEOpcode(Opcode))
return DisassembleThumb2RFE(MI, Opcode, insn, NumOps, NumOpsAdded, B);
- assert((Opcode == ARM::t2LDM || Opcode == ARM::t2LDM_UPD ||
- Opcode == ARM::t2STM || Opcode == ARM::t2STM_UPD)
+ assert((Opcode == ARM::t2LDMIA || Opcode == ARM::t2LDMIA_UPD ||
+ Opcode == ARM::t2LDMDB || Opcode == ARM::t2LDMDB_UPD ||
+ Opcode == ARM::t2STMIA || Opcode == ARM::t2STMIA_UPD ||
+ Opcode == ARM::t2STMDB || Opcode == ARM::t2STMDB_UPD)
&& "Unexpected opcode");
assert(NumOps >= 5 && "Thumb2 LdStMul expects NumOps >= 5");
- unsigned &OpIdx = NumOpsAdded;
-
- OpIdx = 0;
+ NumOpsAdded = 0;
unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
// Writeback to base.
- if (Opcode == ARM::t2LDM_UPD || Opcode == ARM::t2STM_UPD) {
+ if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD ||
+ Opcode == ARM::t2STMIA_UPD || Opcode == ARM::t2STMDB_UPD) {
MI.addOperand(MCOperand::CreateReg(Base));
- ++OpIdx;
+ ++NumOpsAdded;
}
MI.addOperand(MCOperand::CreateReg(Base));
- ++OpIdx;
-
- ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
- MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
- ++OpIdx;
+ ++NumOpsAdded;
// Handling the two predicate operands before the reglist.
- if (B->DoPredicateOperands(MI, Opcode, insn, NumOps))
- OpIdx += 2;
- else {
+ if (B->DoPredicateOperands(MI, Opcode, insn, NumOps)) {
+ NumOpsAdded += 2;
+ } else {
DEBUG(errs() << "Expected predicate operands not found.\n");
return false;
}
@@ -1156,13 +1176,12 @@ static bool DisassembleThumb2LdStMul(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned RegListBits = insn & ((1 << 16) - 1);
// Fill the variadic part of reglist.
- for (unsigned i = 0; i < 16; ++i) {
+ for (unsigned i = 0; i < 16; ++i)
if ((RegListBits >> i) & 1) {
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
i)));
- ++OpIdx;
+ ++NumOpsAdded;
}
- }
return true;
}
@@ -1260,13 +1279,7 @@ static bool DisassembleThumb2LdStDual(MCInst &MI, unsigned Opcode,
return true;
}
-// PC-based defined for Codegen, which do not get decoded by design:
-//
-// t2TBB, t2TBH: Rm immDontCare immDontCare
-//
-// Generic version defined for disassembly:
-//
-// t2TBBgen, t2TBHgen: Rn Rm Pred-Imm Pred-CCR
+// t2TBB, t2TBH: Rn Rm Pred-Imm Pred-CCR
static bool DisassembleThumb2TB(MCInst &MI, unsigned Opcode,
uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
@@ -1401,7 +1414,8 @@ static bool DisassembleThumb2DPSoReg(MCInst &MI, unsigned Opcode, uint32_t insn,
//
// Two register operands: Rs Rn ModImm
// One register operands (Rs=0b1111 no explicit dest reg): Rn ModImm
-// One register operands (Rn=0b1111 no explicit src reg): Rs ModImm - {t2MOVi, t2MVNi}
+// One register operands (Rn=0b1111 no explicit src reg): Rs ModImm -
+// {t2MOVi, t2MVNi}
//
// ModImm = ThumbExpandImm(i:imm3:imm8)
static bool DisassembleThumb2DPModImm(MCInst &MI, unsigned Opcode,
@@ -1644,15 +1658,25 @@ static bool DisassembleThumb2BrMiscCtrl(MCInst &MI, unsigned Opcode,
break;
}
- // CPS has a singleton $opt operand that contains the following information:
- // opt{4-0} = mode from Inst{4-0}
- // opt{5} = changemode from Inst{8}
- // opt{8-6} = AIF from Inst{7-5}
- // opt{10-9} = imod from Inst{10-9} with 0b10 as enable and 0b11 as disable
- if (Opcode == ARM::t2CPS) {
- unsigned Option = slice(insn, 4, 0) | slice(insn, 8, 8) << 5 |
- slice(insn, 7, 5) << 6 | slice(insn, 10, 9) << 9;
- MI.addOperand(MCOperand::CreateImm(Option));
+ // FIXME: To enable correct asm parsing and disasm of CPS we need 3 different
+ // opcodes which match the same real instruction. This is needed since there's
+ // no current handling of optional arguments. Fix here when a better handling
+ // of optional arguments is implemented.
+ if (Opcode == ARM::t2CPS3p) {
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 10, 9))); // imod
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 5))); // iflags
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
+ NumOpsAdded = 3;
+ return true;
+ }
+ if (Opcode == ARM::t2CPS2p) {
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 10, 9))); // imod
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 5))); // iflags
+ NumOpsAdded = 2;
+ return true;
+ }
+ if (Opcode == ARM::t2CPS1p) {
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
NumOpsAdded = 1;
return true;
}
@@ -1678,11 +1702,13 @@ static bool DisassembleThumb2BrMiscCtrl(MCInst &MI, unsigned Opcode,
NumOpsAdded = 1;
return true;
}
- // MSR and MSRsys take one GPR reg Rn, followed by the mask.
- if (Opcode == ARM::t2MSR || Opcode == ARM::t2MSRsys || Opcode == ARM::t2BXJ) {
+ // MSR take a mask, followed by one GPR reg Rn. The mask contains the R Bit in
+ // bit 4, and the special register fields in bits 3-0.
+ if (Opcode == ARM::t2MSR) {
+ MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 20) << 4 /* R Bit */ |
+ slice(insn, 11, 8) /* Special Reg */));
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
decodeRn(insn))));
- MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 8)));
NumOpsAdded = 2;
return true;
}
@@ -1728,12 +1754,12 @@ static inline bool Thumb2PreloadOpcode(unsigned Opcode) {
switch (Opcode) {
default:
return false;
- case ARM::t2PLDi12: case ARM::t2PLDi8: case ARM::t2PLDpci:
- case ARM::t2PLDr: case ARM::t2PLDs:
- case ARM::t2PLDWi12: case ARM::t2PLDWi8: case ARM::t2PLDWpci:
- case ARM::t2PLDWr: case ARM::t2PLDWs:
- case ARM::t2PLIi12: case ARM::t2PLIi8: case ARM::t2PLIpci:
- case ARM::t2PLIr: case ARM::t2PLIs:
+ case ARM::t2PLDi12: case ARM::t2PLDi8:
+ case ARM::t2PLDs:
+ case ARM::t2PLDWi12: case ARM::t2PLDWi8:
+ case ARM::t2PLDWs:
+ case ARM::t2PLIi12: case ARM::t2PLIi8:
+ case ARM::t2PLIs:
return true;
}
}
@@ -1769,11 +1795,10 @@ static bool DisassembleThumb2PreLoad(MCInst &MI, unsigned Opcode, uint32_t insn,
&& !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
int Offset = 0;
- if (Opcode == ARM::t2PLDpci || Opcode == ARM::t2PLDWpci ||
- Opcode == ARM::t2PLIpci) {
+ if (slice(insn, 19, 16) == 0xFF) {
bool Negative = slice(insn, 23, 23) == 0;
unsigned Imm12 = getImm12(insn);
- Offset = Negative ? -1 - Imm12 : 1 * Imm12;
+ Offset = Negative ? -1 - Imm12 : 1 * Imm12;
} else if (Opcode == ARM::t2PLDi8 || Opcode == ARM::t2PLDWi8 ||
Opcode == ARM::t2PLIi8) {
// A8.6.117 Encoding T2: add = FALSE
@@ -1795,37 +1820,6 @@ static bool DisassembleThumb2PreLoad(MCInst &MI, unsigned Opcode, uint32_t insn,
return true;
}
-// A8.6.63 LDRB (literal)
-// A8.6.79 LDRSB (literal)
-// A8.6.75 LDRH (literal)
-// A8.6.83 LDRSH (literal)
-// A8.6.59 LDR (literal)
-//
-// These instrs calculate an address from the PC value and an immediate offset.
-// Rd Rn=PC (+/-)imm12 (+ if Inst{23} == 0b1)
-static bool DisassembleThumb2Ldpci(MCInst &MI, unsigned Opcode,
- uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
-
- const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
- if (!OpInfo) return false;
-
- assert(NumOps >= 2 &&
- OpInfo[0].RegClass == ARM::GPRRegClassID &&
- OpInfo[1].RegClass < 0 &&
- "Expect >= 2 operands, first as reg, and second as imm operand");
-
- // Build the register operand, followed by the (+/-)imm12 immediate.
-
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
- decodeRd(insn))));
-
- MI.addOperand(MCOperand::CreateImm(decodeImm12(insn)));
-
- NumOpsAdded = 2;
-
- return true;
-}
-
// A6.3.10 Store single data item
// A6.3.9 Load byte, memory hints
// A6.3.8 Load halfword, memory hints
@@ -1835,13 +1829,15 @@ static bool DisassembleThumb2Ldpci(MCInst &MI, unsigned Opcode,
//
// t2LDRi12: Rd Rn (+)imm12
// t2LDRi8: Rd Rn (+/-)imm8 (+ if Inst{9} == 0b1)
-// t2LDRs: Rd Rn Rm ConstantShiftSpecifier (see also DisassembleThumb2DPSoReg)
+// t2LDRs: Rd Rn Rm ConstantShiftSpecifier (see also
+// DisassembleThumb2DPSoReg)
// t2LDR_POST: Rd Rn Rn(TIED_TO) (+/-)imm8 (+ if Inst{9} == 0b1)
// t2LDR_PRE: Rd Rn Rn(TIED_TO) (+/-)imm8 (+ if Inst{9} == 0b1)
//
// t2STRi12: Rd Rn (+)imm12
// t2STRi8: Rd Rn (+/-)imm8 (+ if Inst{9} == 0b1)
-// t2STRs: Rd Rn Rm ConstantShiftSpecifier (see also DisassembleThumb2DPSoReg)
+// t2STRs: Rd Rn Rm ConstantShiftSpecifier (see also
+// DisassembleThumb2DPSoReg)
// t2STR_POST: Rn Rd Rn(TIED_TO) (+/-)imm8 (+ if Inst{9} == 0b1)
// t2STR_PRE: Rn Rd Rn(TIED_TO) (+/-)imm8 (+ if Inst{9} == 0b1)
//
@@ -1862,7 +1858,6 @@ static bool DisassembleThumb2LdSt(bool Load, MCInst &MI, unsigned Opcode,
// See, for example, A6.3.7 Load word: Table A6-18 Load word.
if (Load && Rn == 15)
return DisassembleThumb2Ldpci(MI, Opcode, insn, NumOps, NumOpsAdded, B);
-
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
@@ -1909,7 +1904,7 @@ static bool DisassembleThumb2LdSt(bool Load, MCInst &MI, unsigned Opcode,
else
Imm = decodeImm8(insn);
}
-
+
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
R0)));
++OpIdx;
@@ -2081,25 +2076,29 @@ static bool DisassembleThumb2LongMul(MCInst &MI, unsigned Opcode, uint32_t insn,
// corresponding to (op1, op2, op).
//
// Table A6-9 32-bit Thumb instruction encoding
-// op1 op2 op Instruction class, see
-// --- ------- -- ------------------------------------------------------------
-// 01 00xx0xx - Load/store multiple on page A6-23
-// 00xx1xx - Load/store dual, load/store exclusive, table branch on page A6-24
-// 01xxxxx - Data-processing (shifted register) on page A6-31
-// 1xxxxxx - Coprocessor instructions on page A6-40
-// 10 x0xxxxx 0 Data-processing (modified immediate) on page A6-15
-// x1xxxxx 0 Data-processing (plain binary immediate) on page A6-19
-// - 1 Branches and miscellaneous control on page A6-20
-// 11 000xxx0 - Store single data item on page A6-30
-// 001xxx0 - Advanced SIMD element or structure load/store instructions on page A7-27
-// 00xx001 - Load byte, memory hints on page A6-28
-// 00xx011 - Load halfword, memory hints on page A6-26
-// 00xx101 - Load word on page A6-25
-// 00xx111 - UNDEFINED
-// 010xxxx - Data-processing (register) on page A6-33
-// 0110xxx - Multiply, multiply accumulate, and absolute difference on page A6-38
-// 0111xxx - Long multiply, long multiply accumulate, and divide on page A6-39
-// 1xxxxxx - Coprocessor instructions on page A6-40
+// op1 op2 op Instruction class, see
+// --- ------- -- -----------------------------------------------------------
+// 01 00xx0xx - Load/store multiple on page A6-23
+// 00xx1xx - Load/store dual, load/store exclusive, table branch on
+// page A6-24
+// 01xxxxx - Data-processing (shifted register) on page A6-31
+// 1xxxxxx - Coprocessor instructions on page A6-40
+// 10 x0xxxxx 0 Data-processing (modified immediate) on page A6-15
+// x1xxxxx 0 Data-processing (plain binary immediate) on page A6-19
+// - 1 Branches and miscellaneous control on page A6-20
+// 11 000xxx0 - Store single data item on page A6-30
+// 001xxx0 - Advanced SIMD element or structure load/store instructions
+// on page A7-27
+// 00xx001 - Load byte, memory hints on page A6-28
+// 00xx011 - Load halfword, memory hints on page A6-26
+// 00xx101 - Load word on page A6-25
+// 00xx111 - UNDEFINED
+// 010xxxx - Data-processing (register) on page A6-33
+// 0110xxx - Multiply, multiply accumulate, and absolute difference on
+// page A6-38
+// 0111xxx - Long multiply, long multiply accumulate, and divide on
+// page A6-39
+// 1xxxxxx - Coprocessor instructions on page A6-40
//
static bool DisassembleThumb2(uint16_t op1, uint16_t op2, uint16_t op,
MCInst &MI, unsigned Opcode, uint32_t insn, unsigned short NumOps,
@@ -2130,7 +2129,7 @@ static bool DisassembleThumb2(uint16_t op1, uint16_t op2, uint16_t op,
return DisassembleThumb2LdStDual(MI, Opcode, insn, NumOps, NumOpsAdded,
B);
}
- if (Opcode == ARM::t2TBBgen || Opcode == ARM::t2TBHgen) {
+ if (Opcode == ARM::t2TBB || Opcode == ARM::t2TBH) {
// Table branch.
return DisassembleThumb2TB(MI, Opcode, insn, NumOps, NumOpsAdded, B);
}
@@ -2175,7 +2174,8 @@ static bool DisassembleThumb2(uint16_t op1, uint16_t op2, uint16_t op,
}
} else {
// Table A6-9 32-bit Thumb instruction encoding: Load byte|halfword|word
- return DisassembleThumb2LdSt(true, MI,Opcode,insn,NumOps,NumOpsAdded, B);
+ return DisassembleThumb2LdSt(true, MI, Opcode, insn, NumOps,
+ NumOpsAdded, B);
}
break;
case 1:
@@ -2229,7 +2229,7 @@ static bool DisassembleThumbFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
}
// A6.3 32-bit Thumb instruction encoding
-
+
uint16_t op1 = slice(HalfWord, 12, 11);
uint16_t op2 = slice(HalfWord, 10, 4);
uint16_t op = slice(insn, 15, 15);
diff --git a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index 8026e77..1499da0 100644
--- a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "asm-printer"
-#include "ARM.h" // FIXME: FACTOR ENUMS BETTER.
+#include "ARMBaseInfo.h"
#include "ARMInstPrinter.h"
#include "ARMAddressingModes.h"
#include "llvm/MC/MCInst.h"
@@ -22,86 +22,20 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-// Include the auto-generated portion of the assembly writer.
-#define MachineInstr MCInst
-#define ARMAsmPrinter ARMInstPrinter // FIXME: REMOVE.
+#define GET_INSTRUCTION_NAME
#include "ARMGenAsmWriter.inc"
-#undef MachineInstr
-#undef ARMAsmPrinter
-static unsigned NextReg(unsigned Reg) {
- switch (Reg) {
- default:
- assert(0 && "Unexpected register enum");
-
- case ARM::D0:
- return ARM::D1;
- case ARM::D1:
- return ARM::D2;
- case ARM::D2:
- return ARM::D3;
- case ARM::D3:
- return ARM::D4;
- case ARM::D4:
- return ARM::D5;
- case ARM::D5:
- return ARM::D6;
- case ARM::D6:
- return ARM::D7;
- case ARM::D7:
- return ARM::D8;
- case ARM::D8:
- return ARM::D9;
- case ARM::D9:
- return ARM::D10;
- case ARM::D10:
- return ARM::D11;
- case ARM::D11:
- return ARM::D12;
- case ARM::D12:
- return ARM::D13;
- case ARM::D13:
- return ARM::D14;
- case ARM::D14:
- return ARM::D15;
- case ARM::D15:
- return ARM::D16;
- case ARM::D16:
- return ARM::D17;
- case ARM::D17:
- return ARM::D18;
- case ARM::D18:
- return ARM::D19;
- case ARM::D19:
- return ARM::D20;
- case ARM::D20:
- return ARM::D21;
- case ARM::D21:
- return ARM::D22;
- case ARM::D22:
- return ARM::D23;
- case ARM::D23:
- return ARM::D24;
- case ARM::D24:
- return ARM::D25;
- case ARM::D25:
- return ARM::D26;
- case ARM::D26:
- return ARM::D27;
- case ARM::D27:
- return ARM::D28;
- case ARM::D28:
- return ARM::D29;
- case ARM::D29:
- return ARM::D30;
- case ARM::D30:
- return ARM::D31;
- }
+StringRef ARMInstPrinter::getOpcodeName(unsigned Opcode) const {
+ return getInstructionName(Opcode);
}
+
void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O) {
+ unsigned Opcode = MI->getOpcode();
+
// Check for MOVs and print canonical forms, instead.
- if (MI->getOpcode() == ARM::MOVs) {
+ if (Opcode == ARM::MOVs) {
+ // FIXME: Thumb variants?
const MCOperand &Dst = MI->getOperand(0);
const MCOperand &MO1 = MI->getOperand(1);
const MCOperand &MO2 = MI->getOperand(2);
@@ -129,118 +63,82 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O) {
}
// A8.6.123 PUSH
- if ((MI->getOpcode() == ARM::STM_UPD || MI->getOpcode() == ARM::t2STM_UPD) &&
+ if ((Opcode == ARM::STMDB_UPD || Opcode == ARM::t2STMDB_UPD) &&
MI->getOperand(0).getReg() == ARM::SP) {
- const MCOperand &MO1 = MI->getOperand(2);
- if (ARM_AM::getAM4SubMode(MO1.getImm()) == ARM_AM::db) {
- O << '\t' << "push";
- printPredicateOperand(MI, 3, O);
- O << '\t';
- printRegisterList(MI, 5, O);
- return;
- }
+ O << '\t' << "push";
+ printPredicateOperand(MI, 2, O);
+ if (Opcode == ARM::t2STMDB_UPD)
+ O << ".w";
+ O << '\t';
+ printRegisterList(MI, 4, O);
+ return;
}
// A8.6.122 POP
- if ((MI->getOpcode() == ARM::LDM_UPD || MI->getOpcode() == ARM::t2LDM_UPD) &&
+ if ((Opcode == ARM::LDMIA_UPD || Opcode == ARM::t2LDMIA_UPD) &&
MI->getOperand(0).getReg() == ARM::SP) {
- const MCOperand &MO1 = MI->getOperand(2);
- if (ARM_AM::getAM4SubMode(MO1.getImm()) == ARM_AM::ia) {
- O << '\t' << "pop";
- printPredicateOperand(MI, 3, O);
- O << '\t';
- printRegisterList(MI, 5, O);
- return;
- }
+ O << '\t' << "pop";
+ printPredicateOperand(MI, 2, O);
+ if (Opcode == ARM::t2LDMIA_UPD)
+ O << ".w";
+ O << '\t';
+ printRegisterList(MI, 4, O);
+ return;
}
// A8.6.355 VPUSH
- if ((MI->getOpcode() == ARM::VSTMS_UPD || MI->getOpcode() ==ARM::VSTMD_UPD) &&
+ if ((Opcode == ARM::VSTMSDB_UPD || Opcode == ARM::VSTMDDB_UPD) &&
MI->getOperand(0).getReg() == ARM::SP) {
- const MCOperand &MO1 = MI->getOperand(2);
- if (ARM_AM::getAM4SubMode(MO1.getImm()) == ARM_AM::db) {
- O << '\t' << "vpush";
- printPredicateOperand(MI, 3, O);
- O << '\t';
- printRegisterList(MI, 5, O);
- return;
- }
+ O << '\t' << "vpush";
+ printPredicateOperand(MI, 2, O);
+ O << '\t';
+ printRegisterList(MI, 4, O);
+ return;
}
// A8.6.354 VPOP
- if ((MI->getOpcode() == ARM::VLDMS_UPD || MI->getOpcode() ==ARM::VLDMD_UPD) &&
+ if ((Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMDIA_UPD) &&
MI->getOperand(0).getReg() == ARM::SP) {
- const MCOperand &MO1 = MI->getOperand(2);
- if (ARM_AM::getAM4SubMode(MO1.getImm()) == ARM_AM::ia) {
- O << '\t' << "vpop";
- printPredicateOperand(MI, 3, O);
- O << '\t';
- printRegisterList(MI, 5, O);
- return;
- }
+ O << '\t' << "vpop";
+ printPredicateOperand(MI, 2, O);
+ O << '\t';
+ printRegisterList(MI, 4, O);
+ return;
}
printInstruction(MI, O);
- }
+}
void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O, const char *Modifier) {
+ raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
unsigned Reg = Op.getReg();
- if (Modifier && strcmp(Modifier, "dregpair") == 0) {
- O << '{' << getRegisterName(Reg) << ", "
- << getRegisterName(NextReg(Reg)) << '}';
-#if 0
- // FIXME: Breaks e.g. ARM/vmul.ll.
- assert(0);
- /*
- unsigned DRegLo = TRI->getSubReg(Reg, ARM::dsub_0);
- unsigned DRegHi = TRI->getSubReg(Reg, ARM::dsub_1);
- O << '{'
- << getRegisterName(DRegLo) << ',' << getRegisterName(DRegHi)
- << '}';*/
-#endif
- } else if (Modifier && strcmp(Modifier, "lane") == 0) {
- assert(0);
- /*
- unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
- unsigned DReg = TRI->getMatchingSuperReg(Reg, RegNum & 1 ? 2 : 1,
- &ARM::DPR_VFP2RegClass);
- O << getRegisterName(DReg) << '[' << (RegNum & 1) << ']';
- */
- } else {
- O << getRegisterName(Reg);
- }
+ O << getRegisterName(Reg);
} else if (Op.isImm()) {
- assert((Modifier && !strcmp(Modifier, "call")) ||
- ((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported"));
O << '#' << Op.getImm();
} else {
- if (Modifier && Modifier[0] != 0 && strcmp(Modifier, "call") != 0)
- llvm_unreachable("Unsupported modifier");
assert(Op.isExpr() && "unknown operand kind in printOperand");
O << *Op.getExpr();
}
}
-static void printSOImm(raw_ostream &O, int64_t V, bool VerboseAsm,
+static void printSOImm(raw_ostream &O, int64_t V, raw_ostream *CommentStream,
const MCAsmInfo *MAI) {
// Break it up into two parts that make up a shifter immediate.
V = ARM_AM::getSOImmVal(V);
assert(V != -1 && "Not a valid so_imm value!");
-
+
unsigned Imm = ARM_AM::getSOImmValImm(V);
unsigned Rot = ARM_AM::getSOImmValRot(V);
-
+
// Print low-level immediate formation info, per
// A5.1.3: "Data-processing operands - Immediate".
if (Rot) {
O << "#" << Imm << ", " << Rot;
// Pretty printed version.
- if (VerboseAsm)
- O << ' ' << MAI->getCommentString()
- << ' ' << (int)ARM_AM::rotr32(Imm, Rot);
+ if (CommentStream)
+ *CommentStream << (int)ARM_AM::rotr32(Imm, Rot) << "\n";
} else {
O << "#" << Imm;
}
@@ -253,15 +151,7 @@ void ARMInstPrinter::printSOImmOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);
assert(MO.isImm() && "Not a valid so_imm value!");
- printSOImm(O, MO.getImm(), VerboseAsm, &MAI);
-}
-
-/// printSOImm2PartOperand - SOImm is broken into two pieces using a 'mov'
-/// followed by an 'orr' to materialize.
-void ARMInstPrinter::printSOImm2PartOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- // FIXME: REMOVE this method.
- abort();
+ printSOImm(O, MO.getImm(), CommentStream, &MAI);
}
// so_reg is a 4-operand unit corresponding to register forms of the A5.1
@@ -274,9 +164,9 @@ void ARMInstPrinter::printSORegOperand(const MCInst *MI, unsigned OpNum,
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
const MCOperand &MO3 = MI->getOperand(OpNum+2);
-
+
O << getRegisterName(MO1.getReg());
-
+
// Print the shift opc.
ARM_AM::ShiftOpc ShOpc = ARM_AM::getSORegShOp(MO3.getImm());
O << ", " << ARM_AM::getShiftOpcStr(ShOpc);
@@ -294,14 +184,14 @@ void ARMInstPrinter::printAddrMode2Operand(const MCInst *MI, unsigned Op,
const MCOperand &MO1 = MI->getOperand(Op);
const MCOperand &MO2 = MI->getOperand(Op+1);
const MCOperand &MO3 = MI->getOperand(Op+2);
-
+
if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
printOperand(MI, Op, O);
return;
}
-
+
O << "[" << getRegisterName(MO1.getReg());
-
+
if (!MO2.getReg()) {
if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
O << ", #"
@@ -310,24 +200,24 @@ void ARMInstPrinter::printAddrMode2Operand(const MCInst *MI, unsigned Op,
O << "]";
return;
}
-
+
O << ", "
<< ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
<< getRegisterName(MO2.getReg());
-
+
if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm()))
O << ", "
<< ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO3.getImm()))
<< " #" << ShImm;
O << "]";
-}
+}
void ARMInstPrinter::printAddrMode2OffsetOperand(const MCInst *MI,
unsigned OpNum,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
-
+
if (!MO1.getReg()) {
unsigned ImmOffs = ARM_AM::getAM2Offset(MO2.getImm());
O << '#'
@@ -335,10 +225,10 @@ void ARMInstPrinter::printAddrMode2OffsetOperand(const MCInst *MI,
<< ImmOffs;
return;
}
-
+
O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
<< getRegisterName(MO1.getReg());
-
+
if (unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()))
O << ", "
<< ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO2.getImm()))
@@ -350,15 +240,15 @@ void ARMInstPrinter::printAddrMode3Operand(const MCInst *MI, unsigned OpNum,
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
const MCOperand &MO3 = MI->getOperand(OpNum+2);
-
+
O << '[' << getRegisterName(MO1.getReg());
-
+
if (MO2.getReg()) {
O << ", " << (char)ARM_AM::getAM3Op(MO3.getImm())
<< getRegisterName(MO2.getReg()) << ']';
return;
}
-
+
if (unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()))
O << ", #"
<< ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO3.getImm()))
@@ -371,53 +261,42 @@ void ARMInstPrinter::printAddrMode3OffsetOperand(const MCInst *MI,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
-
+
if (MO1.getReg()) {
O << (char)ARM_AM::getAM3Op(MO2.getImm())
<< getRegisterName(MO1.getReg());
return;
}
-
+
unsigned ImmOffs = ARM_AM::getAM3Offset(MO2.getImm());
O << '#'
<< ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm()))
<< ImmOffs;
}
-
-void ARMInstPrinter::printAddrMode4Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O,
- const char *Modifier) {
- const MCOperand &MO2 = MI->getOperand(OpNum+1);
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MO2.getImm());
- if (Modifier && strcmp(Modifier, "submode") == 0) {
- O << ARM_AM::getAMSubModeStr(Mode);
- } else if (Modifier && strcmp(Modifier, "wide") == 0) {
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MO2.getImm());
- if (Mode == ARM_AM::ia)
- O << ".w";
- } else {
- printOperand(MI, OpNum, O);
- }
+void ARMInstPrinter::printLdStmModeOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(OpNum)
+ .getImm());
+ O << ARM_AM::getAMSubModeStr(Mode);
}
void ARMInstPrinter::printAddrMode5Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O,
- const char *Modifier) {
+ raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
-
+
if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
printOperand(MI, OpNum, O);
return;
}
-
+
O << "[" << getRegisterName(MO1.getReg());
-
+
if (unsigned ImmOffs = ARM_AM::getAM5Offset(MO2.getImm())) {
O << ", #"
<< ARM_AM::getAddrOpcStr(ARM_AM::getAM5Op(MO2.getImm()))
- << ImmOffs*4;
+ << ImmOffs * 4;
}
O << "]";
}
@@ -426,7 +305,7 @@ void ARMInstPrinter::printAddrMode6Operand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
-
+
O << "[" << getRegisterName(MO1.getReg());
if (MO2.getImm()) {
// FIXME: Both darwin as and GNU as violate ARM docs here.
@@ -445,12 +324,6 @@ void ARMInstPrinter::printAddrMode6OffsetOperand(const MCInst *MI,
O << ", " << getRegisterName(MO.getReg());
}
-void ARMInstPrinter::printAddrModePCOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O,
- const char *Modifier) {
- assert(0 && "FIXME: Implement printAddrModePCOperand");
-}
-
void ARMInstPrinter::printBitfieldInvMaskImmOperand(const MCInst *MI,
unsigned OpNum,
raw_ostream &O) {
@@ -497,33 +370,41 @@ void ARMInstPrinter::printRegisterList(const MCInst *MI, unsigned OpNum,
O << "}";
}
-void ARMInstPrinter::printCPSOptionOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
+void ARMInstPrinter::printSetendOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNum);
- unsigned option = Op.getImm();
- unsigned mode = option & 31;
- bool changemode = option >> 5 & 1;
- unsigned AIF = option >> 6 & 7;
- unsigned imod = option >> 9 & 3;
- if (imod == 2)
- O << "ie";
- else if (imod == 3)
- O << "id";
- O << '\t';
- if (imod > 1) {
- if (AIF & 4) O << 'a';
- if (AIF & 2) O << 'i';
- if (AIF & 1) O << 'f';
- if (AIF > 0 && changemode) O << ", ";
- }
- if (changemode)
- O << '#' << mode;
+ if (Op.getImm())
+ O << "be";
+ else
+ O << "le";
+}
+
+void ARMInstPrinter::printCPSIMod(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNum);
+ O << ARM_PROC::IModToString(Op.getImm());
+}
+
+void ARMInstPrinter::printCPSIFlag(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNum);
+ unsigned IFlags = Op.getImm();
+ for (int i=2; i >= 0; --i)
+ if (IFlags & (1 << i))
+ O << ARM_PROC::IFlagsToString(1 << i);
}
void ARMInstPrinter::printMSRMaskOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNum);
- unsigned Mask = Op.getImm();
+ unsigned SpecRegRBit = Op.getImm() >> 4;
+ unsigned Mask = Op.getImm() & 0xf;
+
+ if (SpecRegRBit)
+ O << "spsr";
+ else
+ O << "cpsr";
+
if (Mask) {
O << '_';
if (Mask & 8) O << 'f';
@@ -550,7 +431,7 @@ void ARMInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNum,
O << ARMCondCodeToString(CC);
}
-void ARMInstPrinter::printMandatoryPredicateOperand(const MCInst *MI,
+void ARMInstPrinter::printMandatoryPredicateOperand(const MCInst *MI,
unsigned OpNum,
raw_ostream &O) {
ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(OpNum).getImm();
@@ -566,25 +447,24 @@ void ARMInstPrinter::printSBitModifierOperand(const MCInst *MI, unsigned OpNum,
}
}
-
-
-void ARMInstPrinter::printCPInstOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O,
- const char *Modifier) {
- // FIXME: remove this.
- abort();
-}
-
void ARMInstPrinter::printNoHashImmediate(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
O << MI->getOperand(OpNum).getImm();
}
+void ARMInstPrinter::printPImmediate(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ O << "p" << MI->getOperand(OpNum).getImm();
+}
+
+void ARMInstPrinter::printCImmediate(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ O << "c" << MI->getOperand(OpNum).getImm();
+}
void ARMInstPrinter::printPCLabel(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
- // FIXME: remove this.
- abort();
+ llvm_unreachable("Unhandled PC-relative pseudo-instruction!");
}
void ARMInstPrinter::printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum,
@@ -611,17 +491,25 @@ void ARMInstPrinter::printThumbITMask(const MCInst *MI, unsigned OpNum,
void ARMInstPrinter::printThumbAddrModeRROperand(const MCInst *MI, unsigned Op,
raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(Op);
- const MCOperand &MO2 = MI->getOperand(Op+1);
+ const MCOperand &MO2 = MI->getOperand(Op + 1);
+
+ if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
+ printOperand(MI, Op, O);
+ return;
+ }
+
O << "[" << getRegisterName(MO1.getReg());
- O << ", " << getRegisterName(MO2.getReg()) << "]";
+ if (unsigned RegNum = MO2.getReg())
+ O << ", " << getRegisterName(RegNum);
+ O << "]";
}
-void ARMInstPrinter::printThumbAddrModeRI5Operand(const MCInst *MI, unsigned Op,
- raw_ostream &O,
- unsigned Scale) {
+void ARMInstPrinter::printThumbAddrModeImm5SOperand(const MCInst *MI,
+ unsigned Op,
+ raw_ostream &O,
+ unsigned Scale) {
const MCOperand &MO1 = MI->getOperand(Op);
- const MCOperand &MO2 = MI->getOperand(Op+1);
- const MCOperand &MO3 = MI->getOperand(Op+2);
+ const MCOperand &MO2 = MI->getOperand(Op + 1);
if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
printOperand(MI, Op, O);
@@ -629,44 +517,32 @@ void ARMInstPrinter::printThumbAddrModeRI5Operand(const MCInst *MI, unsigned Op,
}
O << "[" << getRegisterName(MO1.getReg());
- if (MO3.getReg())
- O << ", " << getRegisterName(MO3.getReg());
- else if (unsigned ImmOffs = MO2.getImm())
+ if (unsigned ImmOffs = MO2.getImm())
O << ", #" << ImmOffs * Scale;
O << "]";
}
-void ARMInstPrinter::printThumbAddrModeS1Operand(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- printThumbAddrModeRI5Operand(MI, Op, O, 1);
+void ARMInstPrinter::printThumbAddrModeImm5S1Operand(const MCInst *MI,
+ unsigned Op,
+ raw_ostream &O) {
+ printThumbAddrModeImm5SOperand(MI, Op, O, 1);
}
-void ARMInstPrinter::printThumbAddrModeS2Operand(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- printThumbAddrModeRI5Operand(MI, Op, O, 2);
+void ARMInstPrinter::printThumbAddrModeImm5S2Operand(const MCInst *MI,
+ unsigned Op,
+ raw_ostream &O) {
+ printThumbAddrModeImm5SOperand(MI, Op, O, 2);
}
-void ARMInstPrinter::printThumbAddrModeS4Operand(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- printThumbAddrModeRI5Operand(MI, Op, O, 4);
+void ARMInstPrinter::printThumbAddrModeImm5S4Operand(const MCInst *MI,
+ unsigned Op,
+ raw_ostream &O) {
+ printThumbAddrModeImm5SOperand(MI, Op, O, 4);
}
void ARMInstPrinter::printThumbAddrModeSPOperand(const MCInst *MI, unsigned Op,
raw_ostream &O) {
- const MCOperand &MO1 = MI->getOperand(Op);
- const MCOperand &MO2 = MI->getOperand(Op+1);
- O << "[" << getRegisterName(MO1.getReg());
- if (unsigned ImmOffs = MO2.getImm())
- O << ", #" << ImmOffs*4;
- O << "]";
-}
-
-void ARMInstPrinter::printTBAddrMode(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- O << "[pc, " << getRegisterName(MI->getOperand(OpNum).getReg());
- if (MI->getOpcode() == ARM::t2TBH)
- O << ", lsl #1";
- O << ']';
+ printThumbAddrModeImm5SOperand(MI, Op, O, 4);
}
// Constant shifts t2_so_reg is a 2-operand unit corresponding to the Thumb2
@@ -689,16 +565,26 @@ void ARMInstPrinter::printT2SOOperand(const MCInst *MI, unsigned OpNum,
O << " #" << ARM_AM::getSORegOffset(MO2.getImm());
}
-void ARMInstPrinter::printT2AddrModeImm12Operand(const MCInst *MI,
- unsigned OpNum,
- raw_ostream &O) {
+void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
+ if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
+ printOperand(MI, OpNum, O);
+ return;
+ }
+
O << "[" << getRegisterName(MO1.getReg());
- unsigned OffImm = MO2.getImm();
- if (OffImm) // Don't print +0.
+ int32_t OffImm = (int32_t)MO2.getImm();
+ bool isSub = OffImm < 0;
+ // Special value for #-0. All others are normal.
+ if (OffImm == INT32_MIN)
+ OffImm = 0;
+ if (isSub)
+ O << ", #-" << -OffImm;
+ else if (OffImm > 0)
O << ", #" << OffImm;
O << "]";
}
@@ -783,12 +669,37 @@ void ARMInstPrinter::printT2AddrModeSoRegOperand(const MCInst *MI,
void ARMInstPrinter::printVFPf32ImmOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
- O << '#' << MI->getOperand(OpNum).getImm();
+ const MCOperand &MO = MI->getOperand(OpNum);
+ O << '#';
+ if (MO.isFPImm()) {
+ O << (float)MO.getFPImm();
+ } else {
+ union {
+ uint32_t I;
+ float F;
+ } FPUnion;
+
+ FPUnion.I = MO.getImm();
+ O << FPUnion.F;
+ }
}
void ARMInstPrinter::printVFPf64ImmOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
- O << '#' << MI->getOperand(OpNum).getImm();
+ const MCOperand &MO = MI->getOperand(OpNum);
+ O << '#';
+ if (MO.isFPImm()) {
+ O << MO.getFPImm();
+ } else {
+ // We expect the binary encoding of a floating point number here.
+ union {
+ uint64_t I;
+ double D;
+ } FPUnion;
+
+ FPUnion.I = MO.getImm();
+ O << FPUnion.D;
+ }
}
void ARMInstPrinter::printNEONModImmOperand(const MCInst *MI, unsigned OpNum,
diff --git a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
index e5ad0d0..679d313 100644
--- a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
@@ -18,26 +18,25 @@
namespace llvm {
class MCOperand;
-
+
class ARMInstPrinter : public MCInstPrinter {
- bool VerboseAsm;
public:
- ARMInstPrinter(const MCAsmInfo &MAI, bool verboseAsm)
- : MCInstPrinter(MAI), VerboseAsm(verboseAsm) {}
+ ARMInstPrinter(const MCAsmInfo &MAI) : MCInstPrinter(MAI) {}
virtual void printInst(const MCInst *MI, raw_ostream &O);
-
+ virtual StringRef getOpcodeName(unsigned Opcode) const;
+
+ static const char *getInstructionName(unsigned Opcode);
+
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
- void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O,
- const char *Modifier = 0);
-
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+
void printSOImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printSOImm2PartOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
+
void printSORegOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printAddrMode2Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printAddrMode2OffsetOperand(const MCInst *MI, unsigned OpNum,
@@ -45,15 +44,11 @@ public:
void printAddrMode3Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printAddrMode3OffsetOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
- void printAddrMode4Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O,
- const char *Modifier = 0);
- void printAddrMode5Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O,
- const char *Modifier = 0);
+ void printLdStmModeOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printAddrMode5Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printAddrMode6Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printAddrMode6OffsetOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
- void printAddrModePCOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O,
- const char *Modifier = 0);
void printBitfieldInvMaskImmOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
@@ -64,20 +59,20 @@ public:
void printThumbITMask(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printThumbAddrModeRROperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
- void printThumbAddrModeRI5Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O, unsigned Scale);
- void printThumbAddrModeS1Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
- void printThumbAddrModeS2Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
- void printThumbAddrModeS4Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
+ void printThumbAddrModeImm5SOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O, unsigned Scale);
+ void printThumbAddrModeImm5S1Operand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printThumbAddrModeImm5S2Operand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printThumbAddrModeImm5S4Operand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
void printThumbAddrModeSPOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
-
+
void printT2SOOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printT2AddrModeImm12Operand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O);
+ void printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
void printT2AddrModeImm8Operand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
void printT2AddrModeImm8s4Operand(const MCInst *MI, unsigned OpNum,
@@ -88,7 +83,10 @@ public:
raw_ostream &O);
void printT2AddrModeSoRegOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
-
+
+ void printSetendOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printCPSIMod(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printCPSIFlag(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printCPSOptionOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printMSRMaskOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printNegZeroOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
@@ -98,21 +96,16 @@ public:
void printSBitModifierOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O);
void printRegisterList(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printCPInstOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O,
- const char *Modifier);
- void printJTBlockOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {}
- void printJT2BlockOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {}
- void printTBAddrMode(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printNoHashImmediate(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printPImmediate(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printCImmediate(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printVFPf32ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printVFPf64ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printNEONModImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printPCLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- // FIXME: Implement.
- void PrintSpecial(const MCInst *MI, raw_ostream &O, const char *Kind) {}
+ void printPCLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
};
-
-}
+
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/CMakeLists.txt b/contrib/llvm/lib/Target/ARM/InstPrinter/CMakeLists.txt
new file mode 100644
index 0000000..18645c0
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/CMakeLists.txt
@@ -0,0 +1,6 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMARMAsmPrinter
+ ARMInstPrinter.cpp
+ )
+add_dependencies(LLVMARMAsmPrinter ARMCodeGenTable_gen)
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/Makefile b/contrib/llvm/lib/Target/ARM/InstPrinter/Makefile
new file mode 100644
index 0000000..65d372e
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Target/ARM/AsmPrinter/Makefile ------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMARMAsmPrinter
+
+# Hack: we need to include 'main' arm target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
new file mode 100644
index 0000000..f9e86eb
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
@@ -0,0 +1,321 @@
+//===-- MLxExpansionPass.cpp - Expand MLx instrs to avoid hazards ----------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Expand VFP / NEON floating point MLA / MLS instructions (each to a pair of
+// multiple and add / sub instructions) when special VMLx hazards are detected.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mlx-expansion"
+#include "ARM.h"
+#include "ARMBaseInstrInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+static cl::opt<bool>
+ForceExapnd("expand-all-fp-mlx", cl::init(false), cl::Hidden);
+static cl::opt<unsigned>
+ExpandLimit("expand-limit", cl::init(~0U), cl::Hidden);
+
+STATISTIC(NumExpand, "Number of fp MLA / MLS instructions expanded");
+
+namespace {
+ struct MLxExpansion : public MachineFunctionPass {
+ static char ID;
+ MLxExpansion() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &Fn);
+
+ virtual const char *getPassName() const {
+ return "ARM MLA / MLS expansion pass";
+ }
+
+ private:
+ const ARMBaseInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+
+ unsigned MIIdx;
+ MachineInstr* LastMIs[4];
+
+ void clearStack();
+ void pushStack(MachineInstr *MI);
+ MachineInstr *getAccDefMI(MachineInstr *MI) const;
+ unsigned getDefReg(MachineInstr *MI) const;
+ bool hasRAWHazard(unsigned Reg, MachineInstr *MI) const;
+ bool FindMLxHazard(MachineInstr *MI) const;
+ void ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
+ unsigned MulOpc, unsigned AddSubOpc,
+ bool NegAcc, bool HasLane);
+ bool ExpandFPMLxInstructions(MachineBasicBlock &MBB);
+ };
+ char MLxExpansion::ID = 0;
+}
+
+void MLxExpansion::clearStack() {
+ std::fill(LastMIs, LastMIs + 4, (MachineInstr*)0);
+ MIIdx = 0;
+}
+
+void MLxExpansion::pushStack(MachineInstr *MI) {
+ LastMIs[MIIdx] = MI;
+ if (++MIIdx == 4)
+ MIIdx = 0;
+}
+
+MachineInstr *MLxExpansion::getAccDefMI(MachineInstr *MI) const {
+ // Look past COPY and INSERT_SUBREG instructions to find the
+ // real definition MI. This is important for _sfp instructions.
+ unsigned Reg = MI->getOperand(1).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ return 0;
+
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineInstr *DefMI = MRI->getVRegDef(Reg);
+ while (true) {
+ if (DefMI->getParent() != MBB)
+ break;
+ if (DefMI->isCopyLike()) {
+ Reg = DefMI->getOperand(1).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ DefMI = MRI->getVRegDef(Reg);
+ continue;
+ }
+ } else if (DefMI->isInsertSubreg()) {
+ Reg = DefMI->getOperand(2).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ DefMI = MRI->getVRegDef(Reg);
+ continue;
+ }
+ }
+ break;
+ }
+ return DefMI;
+}
+
+unsigned MLxExpansion::getDefReg(MachineInstr *MI) const {
+ unsigned Reg = MI->getOperand(0).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
+ !MRI->hasOneNonDBGUse(Reg))
+ return Reg;
+
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineInstr *UseMI = &*MRI->use_nodbg_begin(Reg);
+ if (UseMI->getParent() != MBB)
+ return Reg;
+
+ while (UseMI->isCopy() || UseMI->isInsertSubreg()) {
+ Reg = UseMI->getOperand(0).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
+ !MRI->hasOneNonDBGUse(Reg))
+ return Reg;
+ UseMI = &*MRI->use_nodbg_begin(Reg);
+ if (UseMI->getParent() != MBB)
+ return Reg;
+ }
+
+ return Reg;
+}
+
+bool MLxExpansion::hasRAWHazard(unsigned Reg, MachineInstr *MI) const {
+ const TargetInstrDesc &TID = MI->getDesc();
+ // FIXME: Detect integer instructions properly.
+ unsigned Domain = TID.TSFlags & ARMII::DomainMask;
+ if (Domain == ARMII::DomainVFP) {
+ unsigned Opcode = TID.getOpcode();
+ if (Opcode == ARM::VSTRS || Opcode == ARM::VSTRD ||
+ Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
+ return false;
+ } else if (Domain == ARMII::DomainNEON) {
+ if (TID.mayStore() || TID.mayLoad())
+ return false;
+ } else {
+ return false;
+ }
+
+ return MI->readsRegister(Reg, TRI);
+ return false;
+}
+
+
+bool MLxExpansion::FindMLxHazard(MachineInstr *MI) const {
+ if (NumExpand >= ExpandLimit)
+ return false;
+
+ if (ForceExapnd)
+ return true;
+
+ MachineInstr *DefMI = getAccDefMI(MI);
+ if (TII->isFpMLxInstruction(DefMI->getOpcode()))
+ // r0 = vmla
+ // r3 = vmla r0, r1, r2
+ // takes 16 - 17 cycles
+ //
+ // r0 = vmla
+ // r4 = vmul r1, r2
+ // r3 = vadd r0, r4
+ // takes about 14 - 15 cycles even with vmul stalling for 4 cycles.
+ return true;
+
+ // If a VMLA.F is followed by an VADD.F or VMUL.F with no RAW hazard, the
+ // VADD.F or VMUL.F will stall 4 cycles before issue. The 4 cycle stall
+ // preserves the in-order retirement of the instructions.
+ // Look at the next few instructions, if *most* of them can cause hazards,
+ // then the scheduler can't *fix* this, we'd better break up the VMLA.
+ for (unsigned i = 1; i <= 4; ++i) {
+ int Idx = ((int)MIIdx - i + 4) % 4;
+ MachineInstr *NextMI = LastMIs[Idx];
+ if (!NextMI)
+ continue;
+
+ if (TII->canCauseFpMLxStall(NextMI->getOpcode()))
+ return true;
+
+ // Look for VMLx RAW hazard.
+ if (hasRAWHazard(getDefReg(MI), NextMI))
+ return true;
+ }
+
+ return false;
+}
+
+/// ExpandFPMLxInstructions - Expand a MLA / MLS instruction into a pair
+/// of MUL + ADD / SUB instructions.
+void
+MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
+ unsigned MulOpc, unsigned AddSubOpc,
+ bool NegAcc, bool HasLane) {
+ unsigned DstReg = MI->getOperand(0).getReg();
+ bool DstDead = MI->getOperand(0).isDead();
+ unsigned AccReg = MI->getOperand(1).getReg();
+ unsigned Src1Reg = MI->getOperand(2).getReg();
+ unsigned Src2Reg = MI->getOperand(3).getReg();
+ bool Src1Kill = MI->getOperand(2).isKill();
+ bool Src2Kill = MI->getOperand(3).isKill();
+ unsigned LaneImm = HasLane ? MI->getOperand(4).getImm() : 0;
+ unsigned NextOp = HasLane ? 5 : 4;
+ ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NextOp).getImm();
+ unsigned PredReg = MI->getOperand(++NextOp).getReg();
+
+ const TargetInstrDesc &TID1 = TII->get(MulOpc);
+ const TargetInstrDesc &TID2 = TII->get(AddSubOpc);
+ unsigned TmpReg = MRI->createVirtualRegister(TID1.getRegClass(0, TRI));
+
+ MachineInstrBuilder MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), TID1, TmpReg)
+ .addReg(Src1Reg, getKillRegState(Src1Kill))
+ .addReg(Src2Reg, getKillRegState(Src2Kill));
+ if (HasLane)
+ MIB.addImm(LaneImm);
+ MIB.addImm(Pred).addReg(PredReg);
+
+ MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), TID2)
+ .addReg(DstReg, getDefRegState(true) | getDeadRegState(DstDead));
+
+ if (NegAcc) {
+ bool AccKill = MRI->hasOneNonDBGUse(AccReg);
+ MIB.addReg(TmpReg, getKillRegState(true))
+ .addReg(AccReg, getKillRegState(AccKill));
+ } else {
+ MIB.addReg(AccReg).addReg(TmpReg, getKillRegState(true));
+ }
+ MIB.addImm(Pred).addReg(PredReg);
+
+ DEBUG({
+ dbgs() << "Expanding: " << *MI;
+ dbgs() << " to:\n";
+ MachineBasicBlock::iterator MII = MI;
+ MII = llvm::prior(MII);
+ MachineInstr &MI2 = *MII;
+ MII = llvm::prior(MII);
+ MachineInstr &MI1 = *MII;
+ dbgs() << " " << MI1;
+ dbgs() << " " << MI2;
+ });
+
+ MI->eraseFromParent();
+ ++NumExpand;
+}
+
+bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) {
+ bool Changed = false;
+
+ clearStack();
+
+ unsigned Skip = 0;
+ MachineBasicBlock::reverse_iterator MII = MBB.rbegin(), E = MBB.rend();
+ while (MII != E) {
+ MachineInstr *MI = &*MII;
+
+ if (MI->isLabel() || MI->isImplicitDef() || MI->isCopy()) {
+ ++MII;
+ continue;
+ }
+
+ const TargetInstrDesc &TID = MI->getDesc();
+ if (TID.isBarrier()) {
+ clearStack();
+ Skip = 0;
+ ++MII;
+ continue;
+ }
+
+ unsigned Domain = TID.TSFlags & ARMII::DomainMask;
+ if (Domain == ARMII::DomainGeneral) {
+ if (++Skip == 2)
+ // Assume dual issues of non-VFP / NEON instructions.
+ pushStack(0);
+ } else {
+ Skip = 0;
+
+ unsigned MulOpc, AddSubOpc;
+ bool NegAcc, HasLane;
+ if (!TII->isFpMLxInstruction(TID.getOpcode(),
+ MulOpc, AddSubOpc, NegAcc, HasLane) ||
+ !FindMLxHazard(MI))
+ pushStack(MI);
+ else {
+ ExpandFPMLxInstruction(MBB, MI, MulOpc, AddSubOpc, NegAcc, HasLane);
+ E = MBB.rend(); // May have changed if MI was the 1st instruction.
+ Changed = true;
+ continue;
+ }
+ }
+
+ ++MII;
+ }
+
+ return Changed;
+}
+
+bool MLxExpansion::runOnMachineFunction(MachineFunction &Fn) {
+ TII = static_cast<const ARMBaseInstrInfo*>(Fn.getTarget().getInstrInfo());
+ TRI = Fn.getTarget().getRegisterInfo();
+ MRI = &Fn.getRegInfo();
+
+ bool Modified = false;
+ for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
+ ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ Modified |= ExpandFPMLxInstructions(MBB);
+ }
+
+ return Modified;
+}
+
+FunctionPass *llvm::createMLxExpansionPass() {
+ return new MLxExpansion();
+}
diff --git a/contrib/llvm/lib/Target/ARM/NEONPreAllocPass.cpp b/contrib/llvm/lib/Target/ARM/NEONPreAllocPass.cpp
deleted file mode 100644
index 3407ac6..0000000
--- a/contrib/llvm/lib/Target/ARM/NEONPreAllocPass.cpp
+++ /dev/null
@@ -1,406 +0,0 @@
-//===-- NEONPreAllocPass.cpp - Allocate adjacent NEON registers--*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "neon-prealloc"
-#include "ARM.h"
-#include "ARMInstrInfo.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-using namespace llvm;
-
-namespace {
- class NEONPreAllocPass : public MachineFunctionPass {
- const TargetInstrInfo *TII;
- MachineRegisterInfo *MRI;
-
- public:
- static char ID;
- NEONPreAllocPass() : MachineFunctionPass(ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "NEON register pre-allocation pass";
- }
-
- private:
- bool FormsRegSequence(MachineInstr *MI,
- unsigned FirstOpnd, unsigned NumRegs,
- unsigned Offset, unsigned Stride) const;
- bool PreAllocNEONRegisters(MachineBasicBlock &MBB);
- };
-
- char NEONPreAllocPass::ID = 0;
-}
-
-static bool isNEONMultiRegOp(int Opcode, unsigned &FirstOpnd, unsigned &NumRegs,
- unsigned &Offset, unsigned &Stride) {
- // Default to unit stride with no offset.
- Stride = 1;
- Offset = 0;
-
- switch (Opcode) {
- default:
- break;
-
- case ARM::VLD2LNd8:
- case ARM::VLD2LNd16:
- case ARM::VLD2LNd32:
- FirstOpnd = 0;
- NumRegs = 2;
- return true;
-
- case ARM::VLD2LNq16:
- case ARM::VLD2LNq32:
- FirstOpnd = 0;
- NumRegs = 2;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VLD2LNq16odd:
- case ARM::VLD2LNq32odd:
- FirstOpnd = 0;
- NumRegs = 2;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VLD3LNd8:
- case ARM::VLD3LNd16:
- case ARM::VLD3LNd32:
- FirstOpnd = 0;
- NumRegs = 3;
- return true;
-
- case ARM::VLD3LNq16:
- case ARM::VLD3LNq32:
- FirstOpnd = 0;
- NumRegs = 3;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VLD3LNq16odd:
- case ARM::VLD3LNq32odd:
- FirstOpnd = 0;
- NumRegs = 3;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VLD4LNd8:
- case ARM::VLD4LNd16:
- case ARM::VLD4LNd32:
- FirstOpnd = 0;
- NumRegs = 4;
- return true;
-
- case ARM::VLD4LNq16:
- case ARM::VLD4LNq32:
- FirstOpnd = 0;
- NumRegs = 4;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VLD4LNq16odd:
- case ARM::VLD4LNq32odd:
- FirstOpnd = 0;
- NumRegs = 4;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VST2LNd8:
- case ARM::VST2LNd16:
- case ARM::VST2LNd32:
- FirstOpnd = 2;
- NumRegs = 2;
- return true;
-
- case ARM::VST2LNq16:
- case ARM::VST2LNq32:
- FirstOpnd = 2;
- NumRegs = 2;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VST2LNq16odd:
- case ARM::VST2LNq32odd:
- FirstOpnd = 2;
- NumRegs = 2;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VST3LNd8:
- case ARM::VST3LNd16:
- case ARM::VST3LNd32:
- FirstOpnd = 2;
- NumRegs = 3;
- return true;
-
- case ARM::VST3LNq16:
- case ARM::VST3LNq32:
- FirstOpnd = 2;
- NumRegs = 3;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VST3LNq16odd:
- case ARM::VST3LNq32odd:
- FirstOpnd = 2;
- NumRegs = 3;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VST4LNd8:
- case ARM::VST4LNd16:
- case ARM::VST4LNd32:
- FirstOpnd = 2;
- NumRegs = 4;
- return true;
-
- case ARM::VST4LNq16:
- case ARM::VST4LNq32:
- FirstOpnd = 2;
- NumRegs = 4;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VST4LNq16odd:
- case ARM::VST4LNq32odd:
- FirstOpnd = 2;
- NumRegs = 4;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VTBL2:
- FirstOpnd = 1;
- NumRegs = 2;
- return true;
-
- case ARM::VTBL3:
- FirstOpnd = 1;
- NumRegs = 3;
- return true;
-
- case ARM::VTBL4:
- FirstOpnd = 1;
- NumRegs = 4;
- return true;
-
- case ARM::VTBX2:
- FirstOpnd = 2;
- NumRegs = 2;
- return true;
-
- case ARM::VTBX3:
- FirstOpnd = 2;
- NumRegs = 3;
- return true;
-
- case ARM::VTBX4:
- FirstOpnd = 2;
- NumRegs = 4;
- return true;
- }
-
- return false;
-}
-
-bool
-NEONPreAllocPass::FormsRegSequence(MachineInstr *MI,
- unsigned FirstOpnd, unsigned NumRegs,
- unsigned Offset, unsigned Stride) const {
- MachineOperand &FMO = MI->getOperand(FirstOpnd);
- assert(FMO.isReg() && FMO.getSubReg() == 0 && "unexpected operand");
- unsigned VirtReg = FMO.getReg();
- (void)VirtReg;
- assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "expected a virtual register");
-
- unsigned LastSubIdx = 0;
- if (FMO.isDef()) {
- MachineInstr *RegSeq = 0;
- for (unsigned R = 0; R < NumRegs; ++R) {
- const MachineOperand &MO = MI->getOperand(FirstOpnd + R);
- assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand");
- unsigned VirtReg = MO.getReg();
- assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "expected a virtual register");
- // Feeding into a REG_SEQUENCE.
- if (!MRI->hasOneNonDBGUse(VirtReg))
- return false;
- MachineInstr *UseMI = &*MRI->use_nodbg_begin(VirtReg);
- if (!UseMI->isRegSequence())
- return false;
- if (RegSeq && RegSeq != UseMI)
- return false;
- unsigned OpIdx = 1 + (Offset + R * Stride) * 2;
- if (UseMI->getOperand(OpIdx).getReg() != VirtReg)
- llvm_unreachable("Malformed REG_SEQUENCE instruction!");
- unsigned SubIdx = UseMI->getOperand(OpIdx + 1).getImm();
- if (LastSubIdx) {
- if (LastSubIdx != SubIdx-Stride)
- return false;
- } else {
- // Must start from dsub_0 or qsub_0.
- if (SubIdx != (ARM::dsub_0+Offset) &&
- SubIdx != (ARM::qsub_0+Offset))
- return false;
- }
- RegSeq = UseMI;
- LastSubIdx = SubIdx;
- }
-
- // In the case of vld3, etc., make sure the trailing operand of
- // REG_SEQUENCE is an undef.
- if (NumRegs == 3) {
- unsigned OpIdx = 1 + (Offset + 3 * Stride) * 2;
- const MachineOperand &MO = RegSeq->getOperand(OpIdx);
- unsigned VirtReg = MO.getReg();
- MachineInstr *DefMI = MRI->getVRegDef(VirtReg);
- if (!DefMI || !DefMI->isImplicitDef())
- return false;
- }
- return true;
- }
-
- unsigned LastSrcReg = 0;
- SmallVector<unsigned, 4> SubIds;
- for (unsigned R = 0; R < NumRegs; ++R) {
- const MachineOperand &MO = MI->getOperand(FirstOpnd + R);
- assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand");
- unsigned VirtReg = MO.getReg();
- assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "expected a virtual register");
- // Extracting from a Q or QQ register.
- MachineInstr *DefMI = MRI->getVRegDef(VirtReg);
- if (!DefMI || !DefMI->isCopy() || !DefMI->getOperand(1).getSubReg())
- return false;
- VirtReg = DefMI->getOperand(1).getReg();
- if (LastSrcReg && LastSrcReg != VirtReg)
- return false;
- LastSrcReg = VirtReg;
- const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
- if (RC != ARM::QPRRegisterClass &&
- RC != ARM::QQPRRegisterClass &&
- RC != ARM::QQQQPRRegisterClass)
- return false;
- unsigned SubIdx = DefMI->getOperand(1).getSubReg();
- if (LastSubIdx) {
- if (LastSubIdx != SubIdx-Stride)
- return false;
- } else {
- // Must start from dsub_0 or qsub_0.
- if (SubIdx != (ARM::dsub_0+Offset) &&
- SubIdx != (ARM::qsub_0+Offset))
- return false;
- }
- SubIds.push_back(SubIdx);
- LastSubIdx = SubIdx;
- }
-
- // FIXME: Update the uses of EXTRACT_SUBREG from REG_SEQUENCE is
- // currently required for correctness. e.g.
- // %reg1041<def> = REG_SEQUENCE %reg1040<kill>, 5, %reg1035<kill>, 6
- // %reg1042<def> = EXTRACT_SUBREG %reg1041, 6
- // %reg1043<def> = EXTRACT_SUBREG %reg1041, 5
- // VST1q16 %reg1025<kill>, 0, %reg1043<kill>, %reg1042<kill>,
- // reg1042 and reg1043 should be replaced with reg1041:6 and reg1041:5
- // respectively.
- // We need to change how we model uses of REG_SEQUENCE.
- for (unsigned R = 0; R < NumRegs; ++R) {
- MachineOperand &MO = MI->getOperand(FirstOpnd + R);
- unsigned OldReg = MO.getReg();
- MachineInstr *DefMI = MRI->getVRegDef(OldReg);
- assert(DefMI->isCopy());
- MO.setReg(LastSrcReg);
- MO.setSubReg(SubIds[R]);
- MO.setIsKill(false);
- // Delete the EXTRACT_SUBREG if its result is now dead.
- if (MRI->use_empty(OldReg))
- DefMI->eraseFromParent();
- }
-
- return true;
-}
-
-bool NEONPreAllocPass::PreAllocNEONRegisters(MachineBasicBlock &MBB) {
- bool Modified = false;
-
- MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
- for (; MBBI != E; ++MBBI) {
- MachineInstr *MI = &*MBBI;
- unsigned FirstOpnd, NumRegs, Offset, Stride;
- if (!isNEONMultiRegOp(MI->getOpcode(), FirstOpnd, NumRegs, Offset, Stride))
- continue;
- if (FormsRegSequence(MI, FirstOpnd, NumRegs, Offset, Stride))
- continue;
-
- MachineBasicBlock::iterator NextI = llvm::next(MBBI);
- for (unsigned R = 0; R < NumRegs; ++R) {
- MachineOperand &MO = MI->getOperand(FirstOpnd + R);
- assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand");
- unsigned VirtReg = MO.getReg();
- assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "expected a virtual register");
-
- // For now, just assign a fixed set of adjacent registers.
- // This leaves plenty of room for future improvements.
- static const unsigned NEONDRegs[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3,
- ARM::D4, ARM::D5, ARM::D6, ARM::D7
- };
- MO.setReg(NEONDRegs[Offset + R * Stride]);
-
- if (MO.isUse()) {
- // Insert a copy from VirtReg.
- BuildMI(MBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),MO.getReg())
- .addReg(VirtReg, getKillRegState(MO.isKill()));
- MO.setIsKill();
- } else if (MO.isDef() && !MO.isDead()) {
- // Add a copy to VirtReg.
- BuildMI(MBB, NextI, DebugLoc(), TII->get(TargetOpcode::COPY), VirtReg)
- .addReg(MO.getReg());
- }
- }
- }
-
- return Modified;
-}
-
-bool NEONPreAllocPass::runOnMachineFunction(MachineFunction &MF) {
- TII = MF.getTarget().getInstrInfo();
- MRI = &MF.getRegInfo();
-
- bool Modified = false;
- for (MachineFunction::iterator MFI = MF.begin(), E = MF.end(); MFI != E;
- ++MFI) {
- MachineBasicBlock &MBB = *MFI;
- Modified |= PreAllocNEONRegisters(MBB);
- }
-
- return Modified;
-}
-
-/// createNEONPreAllocPass - returns an instance of the NEON register
-/// pre-allocation pass.
-FunctionPass *llvm::createNEONPreAllocPass() {
- return new NEONPreAllocPass();
-}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
new file mode 100644
index 0000000..233e165
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -0,0 +1,352 @@
+//======- Thumb1FrameLowering.cpp - Thumb1 Frame Information ---*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Thumb1 implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Thumb1FrameLowering.h"
+#include "ARMBaseInstrInfo.h"
+#include "ARMMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+bool Thumb1FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ const MachineFrameInfo *FFI = MF.getFrameInfo();
+ unsigned CFSize = FFI->getMaxCallFrameSize();
+ // It's not always a good idea to include the call frame as part of the
+ // stack frame. ARM (especially Thumb) has small immediate offset to
+ // address the stack frame. So a large call frame can cause poor codegen
+ // and may even makes it impossible to scavenge a register.
+ if (CFSize >= ((1 << 8) - 1) * 4 / 2) // Half of imm8 * 4
+ return false;
+
+ return !MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+static void emitSPUpdate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ const TargetInstrInfo &TII, DebugLoc dl,
+ const Thumb1RegisterInfo &MRI,
+ int NumBytes) {
+ emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII,
+ MRI, dl);
+}
+
+void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ const Thumb1RegisterInfo *RegInfo =
+ static_cast<const Thumb1RegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const Thumb1InstrInfo &TII =
+ *static_cast<const Thumb1InstrInfo*>(MF.getTarget().getInstrInfo());
+
+ unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
+ unsigned NumBytes = MFI->getStackSize();
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ unsigned BasePtr = RegInfo->getBaseRegister();
+
+ // Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
+ NumBytes = (NumBytes + 3) & ~3;
+ MFI->setStackSize(NumBytes);
+
+ // Determine the sizes of each callee-save spill areas and record which frame
+ // belongs to which callee-save spill areas.
+ unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
+ int FramePtrSpillFI = 0;
+
+ if (VARegSaveSize)
+ emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -VARegSaveSize);
+
+ if (!AFI->hasStackFrame()) {
+ if (NumBytes != 0)
+ emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -NumBytes);
+ return;
+ }
+
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ int FI = CSI[i].getFrameIdx();
+ switch (Reg) {
+ case ARM::R4:
+ case ARM::R5:
+ case ARM::R6:
+ case ARM::R7:
+ case ARM::LR:
+ if (Reg == FramePtr)
+ FramePtrSpillFI = FI;
+ AFI->addGPRCalleeSavedArea1Frame(FI);
+ GPRCS1Size += 4;
+ break;
+ case ARM::R8:
+ case ARM::R9:
+ case ARM::R10:
+ case ARM::R11:
+ if (Reg == FramePtr)
+ FramePtrSpillFI = FI;
+ if (STI.isTargetDarwin()) {
+ AFI->addGPRCalleeSavedArea2Frame(FI);
+ GPRCS2Size += 4;
+ } else {
+ AFI->addGPRCalleeSavedArea1Frame(FI);
+ GPRCS1Size += 4;
+ }
+ break;
+ default:
+ AFI->addDPRCalleeSavedAreaFrame(FI);
+ DPRCSSize += 8;
+ }
+ }
+
+ if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH) {
+ ++MBBI;
+ if (MBBI != MBB.end())
+ dl = MBBI->getDebugLoc();
+ }
+
+ // Determine starting offsets of spill areas.
+ unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
+ unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
+ unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
+ AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) + NumBytes);
+ AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
+ AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
+ AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
+ NumBytes = DPRCSOffset;
+
+ // Adjust FP so it point to the stack slot that contains the previous FP.
+ if (hasFP(MF)) {
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr)
+ .addFrameIndex(FramePtrSpillFI).addImm(0);
+ if (NumBytes > 7)
+ // If offset is > 7 then sp cannot be adjusted in a single instruction,
+ // try restoring from fp instead.
+ AFI->setShouldRestoreSPFromFP(true);
+ }
+
+ if (NumBytes)
+ // Insert it after all the callee-save spills.
+ emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -NumBytes);
+
+ if (STI.isTargetELF() && hasFP(MF))
+ MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
+ AFI->getFramePtrSpillOffset());
+
+ AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
+ AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
+ AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
+
+ // If we need a base pointer, set it up here. It's whatever the value
+ // of the stack pointer is at this point. Any variable size objects
+ // will be allocated after this, so we can still use the base pointer
+ // to reference locals.
+ if (RegInfo->hasBasePointer(MF))
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), BasePtr).addReg(ARM::SP);
+
+ // If the frame has variable sized objects then the epilogue must restore
+ // the sp from fp. We can assume there's an FP here since hasFP already
+ // checks for hasVarSizedObjects.
+ if (MFI->hasVarSizedObjects())
+ AFI->setShouldRestoreSPFromFP(true);
+}
+
+static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
+ for (unsigned i = 0; CSRegs[i]; ++i)
+ if (Reg == CSRegs[i])
+ return true;
+ return false;
+}
+
+static bool isCSRestore(MachineInstr *MI, const unsigned *CSRegs) {
+ if (MI->getOpcode() == ARM::tRestore &&
+ MI->getOperand(1).isFI() &&
+ isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs))
+ return true;
+ else if (MI->getOpcode() == ARM::tPOP) {
+ // The first two operands are predicates. The last two are
+ // imp-def and imp-use of SP. Check everything in between.
+ for (int i = 2, e = MI->getNumOperands() - 2; i != e; ++i)
+ if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
+ return false;
+ return true;
+ }
+ return false;
+}
+
+void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ assert((MBBI->getOpcode() == ARM::tBX_RET ||
+ MBBI->getOpcode() == ARM::tPOP_RET) &&
+ "Can only insert epilog into returning blocks");
+ DebugLoc dl = MBBI->getDebugLoc();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ const Thumb1RegisterInfo *RegInfo =
+ static_cast<const Thumb1RegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const Thumb1InstrInfo &TII =
+ *static_cast<const Thumb1InstrInfo*>(MF.getTarget().getInstrInfo());
+
+ unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
+ int NumBytes = (int)MFI->getStackSize();
+ const unsigned *CSRegs = RegInfo->getCalleeSavedRegs();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+
+ if (!AFI->hasStackFrame()) {
+ if (NumBytes != 0)
+ emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes);
+ } else {
+ // Unwind MBBI to point to first LDR / VLDRD.
+ if (MBBI != MBB.begin()) {
+ do
+ --MBBI;
+ while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
+ if (!isCSRestore(MBBI, CSRegs))
+ ++MBBI;
+ }
+
+ // Move SP to start of FP callee save spill area.
+ NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
+ AFI->getGPRCalleeSavedArea2Size() +
+ AFI->getDPRCalleeSavedAreaSize());
+
+ if (AFI->shouldRestoreSPFromFP()) {
+ NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
+ // Reset SP based on frame pointer only if the stack frame extends beyond
+ // frame pointer stack slot, the target is ELF and the function has FP, or
+ // the target uses var sized objects.
+ if (NumBytes) {
+ assert(MF.getRegInfo().isPhysRegUsed(ARM::R4) &&
+ "No scratch register to restore SP from FP!");
+ emitThumbRegPlusImmediate(MBB, MBBI, ARM::R4, FramePtr, -NumBytes,
+ TII, *RegInfo, dl);
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
+ .addReg(ARM::R4);
+ } else
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
+ .addReg(FramePtr);
+ } else {
+ if (MBBI->getOpcode() == ARM::tBX_RET &&
+ &MBB.front() != MBBI &&
+ prior(MBBI)->getOpcode() == ARM::tPOP) {
+ MachineBasicBlock::iterator PMBBI = prior(MBBI);
+ emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes);
+ } else
+ emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes);
+ }
+ }
+
+ if (VARegSaveSize) {
+ // Unlike T2 and ARM mode, the T1 pop instruction cannot restore
+ // to LR, and we can't pop the value directly to the PC since
+ // we need to update the SP after popping the value. Therefore, we
+ // pop the old LR into R3 as a temporary.
+
+ // Move back past the callee-saved register restoration
+ while (MBBI != MBB.end() && isCSRestore(MBBI, CSRegs))
+ ++MBBI;
+ // Epilogue for vararg functions: pop LR to R3 and branch off it.
+ AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP)))
+ .addReg(ARM::R3, RegState::Define);
+
+ emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, VARegSaveSize);
+
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX_RET_vararg))
+ .addReg(ARM::R3, RegState::Kill);
+ // erase the old tBX_RET instruction
+ MBB.erase(MBBI);
+ }
+}
+
+bool Thumb1FrameLowering::
+spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ DebugLoc DL;
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(ARM::tPUSH));
+ AddDefaultPred(MIB);
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i-1].getReg();
+ bool isKill = true;
+
+ // Add the callee-saved register as live-in unless it's LR and
+ // @llvm.returnaddress is called. If LR is returned for @llvm.returnaddress
+ // then it's already added to the function and entry block live-in sets.
+ if (Reg == ARM::LR) {
+ MachineFunction &MF = *MBB.getParent();
+ if (MF.getFrameInfo()->isReturnAddressTaken() &&
+ MF.getRegInfo().isLiveIn(Reg))
+ isKill = false;
+ }
+
+ if (isKill)
+ MBB.addLiveIn(Reg);
+
+ MIB.addReg(Reg, getKillRegState(isKill));
+ }
+ return true;
+}
+
+bool Thumb1FrameLowering::
+restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ MachineFunction &MF = *MBB.getParent();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
+ DebugLoc DL = MI->getDebugLoc();
+ MachineInstrBuilder MIB = BuildMI(MF, DL, TII.get(ARM::tPOP));
+ AddDefaultPred(MIB);
+
+ bool NumRegs = false;
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i-1].getReg();
+ if (Reg == ARM::LR) {
+ // Special epilogue for vararg functions. See emitEpilogue
+ if (isVarArg)
+ continue;
+ Reg = ARM::PC;
+ (*MIB).setDesc(TII.get(ARM::tPOP_RET));
+ MI = MBB.erase(MI);
+ }
+ MIB.addReg(Reg, getDefRegState(true));
+ NumRegs = true;
+ }
+
+ // It's illegal to emit pop instruction without operands.
+ if (NumRegs)
+ MBB.insert(MI, &*MIB);
+ else
+ MF.DeleteMachineInstr(MIB);
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.h b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.h
new file mode 100644
index 0000000..c592e12
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.h
@@ -0,0 +1,52 @@
+//===-- Thumb1FrameLowering.h - Thumb1-specific frame info stuff --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __THUMB_FRAMEINFO_H_
+#define __THUMM_FRAMEINFO_H_
+
+#include "ARM.h"
+#include "ARMFrameLowering.h"
+#include "ARMSubtarget.h"
+#include "Thumb1InstrInfo.h"
+#include "Thumb1RegisterInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class ARMSubtarget;
+
+class Thumb1FrameLowering : public ARMFrameLowering {
+public:
+ explicit Thumb1FrameLowering(const ARMSubtarget &sti)
+ : ARMFrameLowering(sti) {
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool hasReservedCallFrame(const MachineFunction &MF) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index af630ac..3fbb433 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -71,8 +71,9 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOStore, 0,
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MachineMemOperand::MOStore,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tSpill))
@@ -99,85 +100,12 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOLoad, 0,
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tRestore), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
}
}
-
-bool Thumb1InstrInfo::
-spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, get(ARM::tPUSH));
- AddDefaultPred(MIB);
- for (unsigned i = CSI.size(); i != 0; --i) {
- unsigned Reg = CSI[i-1].getReg();
- bool isKill = true;
-
- // Add the callee-saved register as live-in unless it's LR and
- // @llvm.returnaddress is called. If LR is returned for @llvm.returnaddress
- // then it's already added to the function and entry block live-in sets.
- if (Reg == ARM::LR) {
- MachineFunction &MF = *MBB.getParent();
- if (MF.getFrameInfo()->isReturnAddressTaken() &&
- MF.getRegInfo().isLiveIn(Reg))
- isKill = false;
- }
-
- if (isKill)
- MBB.addLiveIn(Reg);
-
- MIB.addReg(Reg, getKillRegState(isKill));
- }
- return true;
-}
-
-bool Thumb1InstrInfo::
-restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- MachineFunction &MF = *MBB.getParent();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- if (CSI.empty())
- return false;
-
- bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
- DebugLoc DL = MI->getDebugLoc();
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::tPOP));
- AddDefaultPred(MIB);
-
- bool NumRegs = false;
- for (unsigned i = CSI.size(); i != 0; --i) {
- unsigned Reg = CSI[i-1].getReg();
- if (Reg == ARM::LR) {
- // Special epilogue for vararg functions. See emitEpilogue
- if (isVarArg)
- continue;
- Reg = ARM::PC;
- (*MIB).setDesc(get(ARM::tPOP_RET));
- MI = MBB.erase(MI);
- }
- MIB.addReg(Reg, getDefRegState(true));
- NumRegs = true;
- }
-
- // It's illegal to emit pop instruction without operands.
- if (NumRegs)
- MBB.insert(MI, &*MIB);
- else
- MF.DeleteMachineInstr(MIB);
-
- return true;
-}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
index 555135a..17ef2f7 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
@@ -37,28 +37,19 @@ public:
///
const Thumb1RegisterInfo &getRegisterInfo() const { return RI; }
- bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
- bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
-
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const;
void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
index a21a3da..f62a13e 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
@@ -29,7 +29,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLocation.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
@@ -63,24 +63,11 @@ void Thumb1RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRcp))
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
.addReg(DestReg, getDefRegState(true), SubIdx)
.addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg);
}
-bool Thumb1RegisterInfo::hasReservedCallFrame(const MachineFunction &MF) const {
- const MachineFrameInfo *FFI = MF.getFrameInfo();
- unsigned CFSize = FFI->getMaxCallFrameSize();
- // It's not always a good idea to include the call frame as part of the
- // stack frame. ARM (especially Thumb) has small immediate offset to
- // address the stack frame. So a large call frame can cause poor codegen
- // and may even makes it impossible to scavenge a register.
- if (CFSize >= ((1 << 8) - 1) * 4 / 2) // Half of imm8 * 4
- return false;
-
- return !MF.getFrameInfo()->hasVarSizedObjects();
-}
-
/// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
/// a destreg = basereg + immediate in Thumb code. Materialize the immediate
@@ -92,7 +79,7 @@ void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
unsigned DestReg, unsigned BaseReg,
int NumBytes, bool CanChangeCC,
const TargetInstrInfo &TII,
- const Thumb1RegisterInfo& MRI,
+ const ARMBaseRegisterInfo& MRI,
DebugLoc dl) {
MachineFunction &MF = *MBB.getParent();
bool isHigh = !isARMLowRegister(DestReg) ||
@@ -162,13 +149,12 @@ static unsigned calcNumMI(int Opc, int ExtraOpc, unsigned Bytes,
/// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
/// a destreg = basereg + immediate in Thumb code.
-static
-void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned DestReg, unsigned BaseReg,
- int NumBytes, const TargetInstrInfo &TII,
- const Thumb1RegisterInfo& MRI,
- DebugLoc dl) {
+void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ unsigned DestReg, unsigned BaseReg,
+ int NumBytes, const TargetInstrInfo &TII,
+ const ARMBaseRegisterInfo& MRI,
+ DebugLoc dl) {
bool isSub = NumBytes < 0;
unsigned Bytes = (unsigned)NumBytes;
if (isSub) Bytes = -NumBytes;
@@ -304,7 +290,9 @@ static void emitSPUpdate(MachineBasicBlock &MBB,
void Thumb1RegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- if (!hasReservedCallFrame(MF)) {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (!TFI->hasReservedCallFrame(MF)) {
// If we have alloca, convert as follows:
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
// ADJCALLSTACKUP -> add, sp, sp, amount
@@ -315,7 +303,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
+ unsigned Align = TFI->getStackAlignment();
Amount = (Amount+Align-1)/Align*Align;
// Replace the pseudo instruction with a new instruction...
@@ -363,6 +351,22 @@ static void removeOperands(MachineInstr &MI, unsigned i) {
MI.RemoveOperand(Op);
}
+/// convertToNonSPOpcode - Change the opcode to the non-SP version, because
+/// we're replacing the frame index with a non-SP register.
+static unsigned convertToNonSPOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ case ARM::tLDRspi:
+ case ARM::tRestore: // FIXME: Should this opcode be here?
+ return ARM::tLDRi;
+
+ case ARM::tSTRspi:
+ case ARM::tSpill: // FIXME: Should this opcode be here?
+ return ARM::tSTRi;
+ }
+
+ return Opcode;
+}
+
bool Thumb1RegisterInfo::
rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
@@ -464,55 +468,51 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
}
return true;
} else {
- unsigned ImmIdx = 0;
- int InstrOffs = 0;
- unsigned NumBits = 0;
- unsigned Scale = 1;
- switch (AddrMode) {
- case ARMII::AddrModeT1_s: {
- ImmIdx = FrameRegIdx+1;
- InstrOffs = MI.getOperand(ImmIdx).getImm();
- NumBits = (FrameReg == ARM::SP) ? 8 : 5;
- Scale = 4;
- break;
- }
- default:
+ if (AddrMode != ARMII::AddrModeT1_s)
llvm_unreachable("Unsupported addressing mode!");
- break;
- }
+
+ unsigned ImmIdx = FrameRegIdx + 1;
+ int InstrOffs = MI.getOperand(ImmIdx).getImm();
+ unsigned NumBits = (FrameReg == ARM::SP) ? 8 : 5;
+ unsigned Scale = 4;
Offset += InstrOffs * Scale;
- assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
+ assert((Offset & (Scale - 1)) == 0 && "Can't encode this offset!");
// Common case: small offset, fits into instruction.
MachineOperand &ImmOp = MI.getOperand(ImmIdx);
int ImmedOffset = Offset / Scale;
unsigned Mask = (1 << NumBits) - 1;
+
if ((unsigned)Offset <= Mask * Scale) {
- // Replace the FrameIndex with sp
+ // Replace the FrameIndex with the frame register (e.g., sp).
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
ImmOp.ChangeToImmediate(ImmedOffset);
+
+ // If we're using a register where sp was stored, convert the instruction
+ // to the non-SP version.
+ unsigned NewOpc = convertToNonSPOpcode(Opcode);
+ if (NewOpc != Opcode && FrameReg != ARM::SP)
+ MI.setDesc(TII.get(NewOpc));
+
return true;
}
- bool isThumSpillRestore = Opcode == ARM::tRestore || Opcode == ARM::tSpill;
- if (AddrMode == ARMII::AddrModeT1_s) {
- // Thumb tLDRspi, tSTRspi. These will change to instructions that use
- // a different base register.
- NumBits = 5;
- Mask = (1 << NumBits) - 1;
- }
+ NumBits = 5;
+ Mask = (1 << NumBits) - 1;
+
// If this is a thumb spill / restore, we will be using a constpool load to
// materialize the offset.
- if (AddrMode == ARMII::AddrModeT1_s && isThumSpillRestore)
+ if (Opcode == ARM::tRestore || Opcode == ARM::tSpill) {
ImmOp.ChangeToImmediate(0);
- else {
+ } else {
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
ImmedOffset = ImmedOffset & Mask;
ImmOp.ChangeToImmediate(ImmedOffset);
- Offset &= ~(Mask*Scale);
+ Offset &= ~(Mask * Scale);
}
}
+
return Offset == 0;
}
@@ -602,7 +602,8 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
else if (AFI->isGPRCalleeSavedArea2Frame(FrameIndex))
Offset -= AFI->getGPRCalleeSavedArea2Offset();
else if (MF.getFrameInfo()->hasVarSizedObjects()) {
- assert(SPAdj == 0 && hasFP(MF) && "Unexpected");
+ assert(SPAdj == 0 && MF.getTarget().getFrameLowering()->hasFP(MF) &&
+ "Unexpected");
// There are alloca()'s in this function, must reference off the frame
// pointer or base pointer instead.
if (!hasBasePointer(MF)) {
@@ -655,13 +656,12 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
*this, dl);
}
- MI.setDesc(TII.get(ARM::tLDR));
+ MI.setDesc(TII.get(UseRR ? ARM::tLDRr : ARM::tLDRi));
MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
if (UseRR)
- // Use [reg, reg] addrmode.
- MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
- else // tLDR has an extra register operand.
- MI.addOperand(MachineOperand::CreateReg(0, false));
+ // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
+ // register. The offset is already handled in the vreg value.
+ MI.getOperand(i+1).ChangeToRegister(FrameReg, false, false, false);
} else if (Desc.mayStore()) {
VReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass);
bool UseRR = false;
@@ -677,14 +677,15 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
} else
emitThumbRegPlusImmediate(MBB, II, VReg, FrameReg, Offset, TII,
*this, dl);
- MI.setDesc(TII.get(ARM::tSTR));
+ MI.setDesc(TII.get(UseRR ? ARM::tSTRr : ARM::tSTRi));
MI.getOperand(i).ChangeToRegister(VReg, false, false, true);
- if (UseRR) // Use [reg, reg] addrmode.
- MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
- else // tSTR has an extra register operand.
- MI.addOperand(MachineOperand::CreateReg(0, false));
- } else
+ if (UseRR)
+ // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
+ // register. The offset is already handled in the vreg value.
+ MI.getOperand(i+1).ChangeToRegister(FrameReg, false, false, false);
+ } else {
assert(false && "Unexpected opcode!");
+ }
// Add predicate back if it's needed.
if (MI.getDesc().isPredicable()) {
@@ -692,206 +693,3 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
AddDefaultPred(MIB);
}
}
-
-void Thumb1RegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
- unsigned NumBytes = MFI->getStackSize();
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- // Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
- NumBytes = (NumBytes + 3) & ~3;
- MFI->setStackSize(NumBytes);
-
- // Determine the sizes of each callee-save spill areas and record which frame
- // belongs to which callee-save spill areas.
- unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
- int FramePtrSpillFI = 0;
-
- if (VARegSaveSize)
- emitSPUpdate(MBB, MBBI, TII, dl, *this, -VARegSaveSize);
-
- if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
- return;
- }
-
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- int FI = CSI[i].getFrameIdx();
- switch (Reg) {
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- case ARM::LR:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- AFI->addGPRCalleeSavedArea1Frame(FI);
- GPRCS1Size += 4;
- break;
- case ARM::R8:
- case ARM::R9:
- case ARM::R10:
- case ARM::R11:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- if (STI.isTargetDarwin()) {
- AFI->addGPRCalleeSavedArea2Frame(FI);
- GPRCS2Size += 4;
- } else {
- AFI->addGPRCalleeSavedArea1Frame(FI);
- GPRCS1Size += 4;
- }
- break;
- default:
- AFI->addDPRCalleeSavedAreaFrame(FI);
- DPRCSSize += 8;
- }
- }
-
- if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH) {
- ++MBBI;
- if (MBBI != MBB.end())
- dl = MBBI->getDebugLoc();
- }
-
- // Adjust FP so it point to the stack slot that contains the previous FP.
- if (hasFP(MF)) {
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr)
- .addFrameIndex(FramePtrSpillFI).addImm(0);
- AFI->setShouldRestoreSPFromFP(true);
- }
-
- // Determine starting offsets of spill areas.
- unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
- unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
- unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
- AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) + NumBytes);
- AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
- AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
- AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
-
- NumBytes = DPRCSOffset;
- if (NumBytes) {
- // Insert it after all the callee-save spills.
- emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
- }
-
- if (STI.isTargetELF() && hasFP(MF))
- MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
- AFI->getFramePtrSpillOffset());
-
- AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
- AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
- AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
-
- // If we need a base pointer, set it up here. It's whatever the value
- // of the stack pointer is at this point. Any variable size objects
- // will be allocated after this, so we can still use the base pointer
- // to reference locals.
- if (hasBasePointer(MF))
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), BasePtr).addReg(ARM::SP);
-}
-
-static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
- for (unsigned i = 0; CSRegs[i]; ++i)
- if (Reg == CSRegs[i])
- return true;
- return false;
-}
-
-static bool isCSRestore(MachineInstr *MI, const unsigned *CSRegs) {
- if (MI->getOpcode() == ARM::tRestore &&
- MI->getOperand(1).isFI() &&
- isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs))
- return true;
- else if (MI->getOpcode() == ARM::tPOP) {
- // The first two operands are predicates. The last two are
- // imp-def and imp-use of SP. Check everything in between.
- for (int i = 2, e = MI->getNumOperands() - 2; i != e; ++i)
- if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
- return false;
- return true;
- }
- return false;
-}
-
-void Thumb1RegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- assert((MBBI->getOpcode() == ARM::tBX_RET ||
- MBBI->getOpcode() == ARM::tPOP_RET) &&
- "Can only insert epilog into returning blocks");
- DebugLoc dl = MBBI->getDebugLoc();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
- int NumBytes = (int)MFI->getStackSize();
- const unsigned *CSRegs = getCalleeSavedRegs();
-
- if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
- } else {
- // Unwind MBBI to point to first LDR / VLDRD.
- if (MBBI != MBB.begin()) {
- do
- --MBBI;
- while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
- if (!isCSRestore(MBBI, CSRegs))
- ++MBBI;
- }
-
- // Move SP to start of FP callee save spill area.
- NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
- AFI->getGPRCalleeSavedArea2Size() +
- AFI->getDPRCalleeSavedAreaSize());
-
- if (AFI->shouldRestoreSPFromFP()) {
- NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
- // Reset SP based on frame pointer only if the stack frame extends beyond
- // frame pointer stack slot or target is ELF and the function has FP.
- if (NumBytes)
- emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes,
- TII, *this, dl);
- else
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
- .addReg(FramePtr);
- } else {
- if (MBBI->getOpcode() == ARM::tBX_RET &&
- &MBB.front() != MBBI &&
- prior(MBBI)->getOpcode() == ARM::tPOP) {
- MachineBasicBlock::iterator PMBBI = prior(MBBI);
- emitSPUpdate(MBB, PMBBI, TII, dl, *this, NumBytes);
- } else
- emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
- }
- }
-
- if (VARegSaveSize) {
- // Unlike T2 and ARM mode, the T1 pop instruction cannot restore
- // to LR, and we can't pop the value directly to the PC since
- // we need to update the SP after popping the value. Therefore, we
- // pop the old LR into R3 as a temporary.
-
- // Move back past the callee-saved register restoration
- while (MBBI != MBB.end() && isCSRestore(MBBI, CSRegs))
- ++MBBI;
- // Epilogue for vararg functions: pop LR to R3 and branch off it.
- AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP)))
- .addReg(ARM::R3, RegState::Define);
-
- emitSPUpdate(MBB, MBBI, TII, dl, *this, VARegSaveSize);
-
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX_RET_vararg))
- .addReg(ARM::R3, RegState::Kill);
- // erase the old tBX_RET instruction
- MBB.erase(MBBI);
- }
-}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
index c578054..8a87cc5 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
@@ -38,8 +38,6 @@ public:
unsigned PredReg = 0) const;
/// Code Generation virtual methods...
- bool hasReservedCallFrame(const MachineFunction &MF) const;
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -59,9 +57,6 @@ public:
unsigned Reg) const;
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
-
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
};
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.cpp b/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.cpp
deleted file mode 100644
index 172908d..0000000
--- a/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-//===-- Thumb2HazardRecognizer.cpp - Thumb2 postra hazard recognizer ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARM.h"
-#include "Thumb2HazardRecognizer.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/ScheduleDAG.h"
-using namespace llvm;
-
-ScheduleHazardRecognizer::HazardType
-Thumb2HazardRecognizer::getHazardType(SUnit *SU) {
- if (ITBlockSize) {
- MachineInstr *MI = SU->getInstr();
- if (!MI->isDebugValue() && MI != ITBlockMIs[ITBlockSize-1])
- return Hazard;
- }
-
- return PostRAHazardRecognizer::getHazardType(SU);
-}
-
-void Thumb2HazardRecognizer::Reset() {
- ITBlockSize = 0;
- PostRAHazardRecognizer::Reset();
-}
-
-void Thumb2HazardRecognizer::EmitInstruction(SUnit *SU) {
- MachineInstr *MI = SU->getInstr();
- unsigned Opcode = MI->getOpcode();
- if (ITBlockSize) {
- --ITBlockSize;
- } else if (Opcode == ARM::t2IT) {
- unsigned Mask = MI->getOperand(1).getImm();
- unsigned NumTZ = CountTrailingZeros_32(Mask);
- assert(NumTZ <= 3 && "Invalid IT mask!");
- ITBlockSize = 4 - NumTZ;
- MachineBasicBlock::iterator I = MI;
- for (unsigned i = 0; i < ITBlockSize; ++i) {
- // Advance to the next instruction, skipping any dbg_value instructions.
- do {
- ++I;
- } while (I->isDebugValue());
- ITBlockMIs[ITBlockSize-1-i] = &*I;
- }
- }
-
- PostRAHazardRecognizer::EmitInstruction(SU);
-}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.h b/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.h
deleted file mode 100644
index 4726658..0000000
--- a/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.h
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- Thumb2HazardRecognizer.h - Thumb2 Hazard Recognizers ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines hazard recognizers for scheduling Thumb2 functions on
-// ARM processors.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef THUMB2HAZARDRECOGNIZER_H
-#define THUMB2HAZARDRECOGNIZER_H
-
-#include "llvm/CodeGen/PostRAHazardRecognizer.h"
-
-namespace llvm {
-
-class MachineInstr;
-
-class Thumb2HazardRecognizer : public PostRAHazardRecognizer {
- unsigned ITBlockSize; // No. of MIs in current IT block yet to be scheduled.
- MachineInstr *ITBlockMIs[4];
-
-public:
- Thumb2HazardRecognizer(const InstrItineraryData &ItinData) :
- PostRAHazardRecognizer(ItinData) {}
-
- virtual HazardType getHazardType(SUnit *SU);
- virtual void Reset();
- virtual void EmitInstruction(SUnit *SU);
-};
-
-
-} // end namespace llvm
-
-#endif // THUMB2HAZARDRECOGNIZER_H
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index 442f41d..2f67257 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -17,7 +17,6 @@
#include "ARMAddressingModes.h"
#include "ARMGenInstrInfo.inc"
#include "ARMMachineFunctionInfo.h"
-#include "Thumb2HazardRecognizer.h"
#include "Thumb2InstrInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -28,15 +27,10 @@
using namespace llvm;
-static cl::opt<unsigned>
-IfCvtLimit("thumb2-ifcvt-limit", cl::Hidden,
- cl::desc("Thumb2 if-conversion limit (default 3)"),
- cl::init(3));
-
-static cl::opt<unsigned>
-IfCvtDiamondLimit("thumb2-ifcvt-diamond-limit", cl::Hidden,
- cl::desc("Thumb2 diamond if-conversion limit (default 3)"),
- cl::init(3));
+static cl::opt<bool>
+OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden,
+ cl::desc("Use old-style Thumb2 if-conversion heuristics"),
+ cl::init(false));
Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
: ARMBaseInstrInfo(STI), RI(*this, STI) {
@@ -105,21 +99,6 @@ Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
return llvm::getITInstrPredicate(MBBI, PredReg) == ARMCC::AL;
}
-bool Thumb2InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
- unsigned NumInstrs) const {
- return NumInstrs && NumInstrs <= IfCvtLimit;
-}
-
-bool Thumb2InstrInfo::
-isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
- MachineBasicBlock &FMBB, unsigned NumF) const {
- // FIXME: Catch optimization such as:
- // r0 = movne
- // r0 = moveq
- return NumT && NumF &&
- NumT <= (IfCvtDiamondLimit) && NumF <= (IfCvtDiamondLimit);
-}
-
void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
@@ -155,8 +134,9 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOStore, 0,
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MachineMemOperand::MOStore,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2STRi12))
@@ -181,8 +161,9 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOLoad, 0,
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg)
@@ -193,11 +174,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
}
-ScheduleHazardRecognizer *Thumb2InstrInfo::
-CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {
- return (ScheduleHazardRecognizer *)new Thumb2HazardRecognizer(II);
-}
-
void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
unsigned DestReg, unsigned BaseReg, int NumBytes,
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
index 3a9f8b1..f2637d7 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
@@ -38,11 +38,6 @@ public:
bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const;
- bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const;
-
- bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
- MachineBasicBlock &FMBB, unsigned NumFInstrs) const;
-
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
@@ -70,9 +65,6 @@ public:
/// always be able to get register info as well (through this method).
///
const Thumb2RegisterInfo &getRegisterInfo() const { return RI; }
-
- ScheduleHazardRecognizer *
- CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const;
};
/// getITInstrPredicate - Valid only in Thumb2 mode. This function is identical
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
index 07dd0be..099b8f7 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
@@ -29,7 +29,6 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLocation.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index 0c3962d..cc8f61c 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -58,7 +58,7 @@ namespace {
{ ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0 },
{ ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0 },
// Note: immediate scale is 4.
- { ARM::t2ADDrSPi,ARM::tADDrSPi,0, 8, 0, 1, 0, 1,0, 0 },
+ { ARM::t2ADDrSPi,ARM::tADDrSPi,0, 8, 0, 1, 0, 1,0, 1 },
{ ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 1 },
{ ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 1 },
{ ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 0 },
@@ -68,9 +68,7 @@ namespace {
//FIXME: Disable CMN, as CCodes are backwards from compare expectations
//{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0 },
{ ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0 },
- { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0 },
- { ARM::t2CMPzri,ARM::tCMPzi8, 0, 8, 0, 1, 0, 2,0, 0 },
- { ARM::t2CMPzrr,ARM::tCMPzhir,0, 0, 0, 0, 0, 2,0, 0 },
+ { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 1 },
{ ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 0 },
// FIXME: adr.n immediate offset must be multiple of 4.
//{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0 },
@@ -106,26 +104,27 @@ namespace {
// FIXME: Clean this up after splitting each Thumb load / store opcode
// into multiple ones.
- { ARM::t2LDRi12,ARM::tLDR, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 1 },
- { ARM::t2LDRs, ARM::tLDR, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRBi12,ARM::tLDRB, 0, 5, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRBs, ARM::tLDRB, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRHi12,ARM::tLDRH, 0, 5, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRHs, ARM::tLDRH, 0, 0, 0, 1, 0, 0,0, 1 },
+ { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 1 },
+ { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 1 },
+ { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 1 },
+ { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 1 },
+ { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 1 },
+ { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 1 },
{ ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 1 },
{ ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRi12,ARM::tSTR, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 1 },
- { ARM::t2STRs, ARM::tSTR, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRBi12,ARM::tSTRB, 0, 5, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRBs, ARM::tSTRB, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRHi12,ARM::tSTRH, 0, 5, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRHs, ARM::tSTRH, 0, 0, 0, 1, 0, 0,0, 1 },
-
- { ARM::t2LDM, ARM::tLDM, 0, 0, 0, 1, 1, 1,1, 1 },
- { ARM::t2LDM_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 1 },
- { ARM::t2LDM_UPD,ARM::tLDM_UPD,ARM::tPOP, 0, 0, 1, 1, 1,1, 1 },
+ { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 1 },
+ { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 1 },
+ { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 1 },
+ { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 1 },
+ { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 1 },
+ { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 1 },
+
+ { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 1 },
+ { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 1 },
+ { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 1 },
// ARM::t2STM (with no basereg writeback) has no Thumb1 equivalent
- { ARM::t2STM_UPD,ARM::tSTM_UPD,ARM::tPUSH, 0, 0, 1, 1, 1,1, 1 },
+ { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 1 },
+ { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 1 },
};
class Thumb2SizeReduce : public MachineFunctionPass {
@@ -217,8 +216,8 @@ Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
/// Old opcode has an optional def of CPSR.
if (HasCC)
return true;
- // If both old opcode does not implicit CPSR def, then it's not ok since
- // these new opcodes CPSR def is not meant to be thrown away. e.g. CMP.
+ // If old opcode does not implicitly define CPSR, then it's not ok since
+ // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP.
if (!HasImplicitCPSRDef(MI->getDesc()))
return false;
HasCC = true;
@@ -233,9 +232,10 @@ Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
static bool VerifyLowRegs(MachineInstr *MI) {
unsigned Opc = MI->getOpcode();
- bool isPCOk = (Opc == ARM::t2LDM_RET || Opc == ARM::t2LDM ||
- Opc == ARM::t2LDM_UPD);
- bool isLROk = (Opc == ARM::t2STM_UPD);
+ bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA ||
+ Opc == ARM::t2LDMDB || Opc == ARM::t2LDMIA_UPD ||
+ Opc == ARM::t2LDMDB_UPD);
+ bool isLROk = (Opc == ARM::t2STMIA_UPD || Opc == ARM::t2STMDB_UPD);
bool isSPOk = isPCOk || isLROk || (Opc == ARM::t2ADDrSPi);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
@@ -275,29 +275,32 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
unsigned Opc = Entry.NarrowOpc1;
unsigned OpNum = 3; // First 'rest' of operands.
uint8_t ImmLimit = Entry.Imm1Limit;
+
switch (Entry.WideOpc) {
default:
llvm_unreachable("Unexpected Thumb2 load / store opcode!");
case ARM::t2LDRi12:
- case ARM::t2STRi12: {
- unsigned BaseReg = MI->getOperand(1).getReg();
- if (BaseReg == ARM::SP) {
+ case ARM::t2STRi12:
+ if (MI->getOperand(1).getReg() == ARM::SP) {
Opc = Entry.NarrowOpc2;
ImmLimit = Entry.Imm2Limit;
HasOffReg = false;
}
+
Scale = 4;
HasImmOffset = true;
+ HasOffReg = false;
break;
- }
case ARM::t2LDRBi12:
case ARM::t2STRBi12:
HasImmOffset = true;
+ HasOffReg = false;
break;
case ARM::t2LDRHi12:
case ARM::t2STRHi12:
Scale = 2;
HasImmOffset = true;
+ HasOffReg = false;
break;
case ARM::t2LDRs:
case ARM::t2LDRBs:
@@ -310,11 +313,12 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
HasShift = true;
OpNum = 4;
break;
- case ARM::t2LDM: {
+ case ARM::t2LDMIA:
+ case ARM::t2LDMDB: {
unsigned BaseReg = MI->getOperand(0).getReg();
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
- if (!isARMLowRegister(BaseReg) || Mode != ARM_AM::ia)
+ if (!isARMLowRegister(BaseReg) || Entry.WideOpc != ARM::t2LDMIA)
return false;
+
// For the non-writeback version (this one), the base register must be
// one of the registers being loaded.
bool isOK = false;
@@ -324,6 +328,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
break;
}
}
+
if (!isOK)
return false;
@@ -331,28 +336,33 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
isLdStMul = true;
break;
}
- case ARM::t2LDM_RET: {
+ case ARM::t2LDMIA_RET: {
unsigned BaseReg = MI->getOperand(1).getReg();
if (BaseReg != ARM::SP)
return false;
Opc = Entry.NarrowOpc2; // tPOP_RET
- OpNum = 3;
+ OpNum = 2;
isLdStMul = true;
break;
}
- case ARM::t2LDM_UPD:
- case ARM::t2STM_UPD: {
+ case ARM::t2LDMIA_UPD:
+ case ARM::t2LDMDB_UPD:
+ case ARM::t2STMIA_UPD:
+ case ARM::t2STMDB_UPD: {
OpNum = 0;
+
unsigned BaseReg = MI->getOperand(1).getReg();
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(2).getImm());
if (BaseReg == ARM::SP &&
- ((Entry.WideOpc == ARM::t2LDM_UPD && Mode == ARM_AM::ia) ||
- (Entry.WideOpc == ARM::t2STM_UPD && Mode == ARM_AM::db))) {
+ (Entry.WideOpc == ARM::t2LDMIA_UPD ||
+ Entry.WideOpc == ARM::t2STMDB_UPD)) {
Opc = Entry.NarrowOpc2; // tPOP or tPUSH
- OpNum = 3;
- } else if (!isARMLowRegister(BaseReg) || Mode != ARM_AM::ia) {
+ OpNum = 2;
+ } else if (!isARMLowRegister(BaseReg) ||
+ (Entry.WideOpc != ARM::t2LDMIA_UPD &&
+ Entry.WideOpc != ARM::t2STMIA_UPD)) {
return false;
}
+
isLdStMul = true;
break;
}
@@ -363,6 +373,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
if (HasShift) {
OffsetReg = MI->getOperand(2).getReg();
OffsetKill = MI->getOperand(2).isKill();
+
if (MI->getOperand(3).getImm())
// Thumb1 addressing mode doesn't support shift.
return false;
@@ -372,23 +383,22 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
if (HasImmOffset) {
OffsetImm = MI->getOperand(2).getImm();
unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
- if ((OffsetImm & (Scale-1)) || OffsetImm > MaxOffset)
+
+ if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset)
// Make sure the immediate field fits.
return false;
}
// Add the 16-bit load / store instruction.
- // FIXME: Thumb1 addressing mode encode both immediate and register offset.
DebugLoc dl = MI->getDebugLoc();
MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, TII->get(Opc));
if (!isLdStMul) {
- MIB.addOperand(MI->getOperand(0)).addOperand(MI->getOperand(1));
- if (Opc != ARM::tLDRSB && Opc != ARM::tLDRSH) {
- // tLDRSB and tLDRSH do not have an immediate offset field. On the other
- // hand, it must have an offset register.
- // FIXME: Remove this special case.
- MIB.addImm(OffsetImm/Scale);
- }
+ MIB.addOperand(MI->getOperand(0));
+ MIB.addOperand(MI->getOperand(1));
+
+ if (HasImmOffset)
+ MIB.addImm(OffsetImm / Scale);
+
assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!");
if (HasOffReg)
@@ -423,7 +433,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
unsigned Opc = MI->getOpcode();
switch (Opc) {
default: break;
- case ARM::t2ADDSri:
+ case ARM::t2ADDSri:
case ARM::t2ADDSrr: {
unsigned PredReg = 0;
if (getInstrPredicate(MI, PredReg) == ARMCC::AL) {
@@ -451,6 +461,25 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
if (MI->getOperand(1).isImm())
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR);
break;
+ case ARM::t2CMPrr: {
+ // Try to reduce to the lo-reg only version first. Why there are two
+ // versions of the instruction is a mystery.
+ // It would be nice to just have two entries in the master table that
+ // are prioritized, but the table assumes a unique entry for each
+ // source insn opcode. So for now, we hack a local entry record to use.
+ static const ReduceEntry NarrowEntry =
+ { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 1 };
+ if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR))
+ return true;
+ return ReduceToNarrow(MBB, MI, Entry, LiveCPSR);
+ }
+ case ARM::t2ADDrSPi: {
+ static const ReduceEntry NarrowEntry =
+ { ARM::t2ADDrSPi,ARM::tADDspi, 0, 7, 0, 1, 0, 1, 0, 1 };
+ if (MI->getOperand(0).getReg() == ARM::SP)
+ return ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR);
+ return ReduceToNarrow(MBB, MI, Entry, LiveCPSR);
+ }
}
return false;
}
diff --git a/contrib/llvm/lib/Target/Alpha/Alpha.h b/contrib/llvm/lib/Target/Alpha/Alpha.h
index 5cf4866..2c359da 100644
--- a/contrib/llvm/lib/Target/Alpha/Alpha.h
+++ b/contrib/llvm/lib/Target/Alpha/Alpha.h
@@ -18,6 +18,13 @@
#include "llvm/Target/TargetMachine.h"
namespace llvm {
+ namespace Alpha {
+ // These describe LDAx
+
+ static const int IMM_LOW = -32768;
+ static const int IMM_HIGH = 32767;
+ static const int IMM_MULT = 65536;
+ }
class AlphaTargetMachine;
class FunctionPass;
diff --git a/contrib/llvm/lib/Target/Alpha/AsmPrinter/AlphaAsmPrinter.cpp b/contrib/llvm/lib/Target/Alpha/AlphaAsmPrinter.cpp
index 5428cb9..46ae286 100644
--- a/contrib/llvm/lib/Target/Alpha/AsmPrinter/AlphaAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Alpha/AlphaAsmPrinter.cpp
@@ -91,7 +91,7 @@ void AlphaAsmPrinter::printOp(const MachineOperand &MO, raw_ostream &O) {
return;
case MachineOperand::MO_Immediate:
- llvm_unreachable("printOp() does not handle immediate values");
+ assert(0 && "printOp() does not handle immediate values");
return;
case MachineOperand::MO_MachineBasicBlock:
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaCodeEmitter.cpp b/contrib/llvm/lib/Target/Alpha/AlphaCodeEmitter.cpp
deleted file mode 100644
index 3aec070..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaCodeEmitter.cpp
+++ /dev/null
@@ -1,222 +0,0 @@
-//===-- Alpha/AlphaCodeEmitter.cpp - Convert Alpha code to machine code ---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the pass that transforms the Alpha machine instructions
-// into relocatable machine code.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "alpha-emitter"
-#include "AlphaTargetMachine.h"
-#include "AlphaRelocations.h"
-#include "Alpha.h"
-#include "llvm/PassManager.h"
-#include "llvm/CodeGen/JITCodeEmitter.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/Function.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
- class AlphaCodeEmitter : public MachineFunctionPass {
- JITCodeEmitter &MCE;
- const AlphaInstrInfo *II;
- public:
- static char ID;
-
- AlphaCodeEmitter(JITCodeEmitter &mce) : MachineFunctionPass(ID),
- MCE(mce) {}
-
- /// getBinaryCodeForInstr - This function, generated by the
- /// CodeEmitterGenerator using TableGen, produces the binary encoding for
- /// machine instructions.
-
- unsigned getBinaryCodeForInstr(const MachineInstr &MI);
-
- /// getMachineOpValue - evaluates the MachineOperand of a given MachineInstr
-
- unsigned getMachineOpValue(const MachineInstr &MI,
- const MachineOperand &MO);
-
- bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "Alpha Machine Code Emitter";
- }
-
- private:
- void emitBasicBlock(MachineBasicBlock &MBB);
- };
-}
-
-char AlphaCodeEmitter::ID = 0;
-
-
-/// createAlphaCodeEmitterPass - Return a pass that emits the collected Alpha
-/// code to the specified MCE object.
-
-FunctionPass *llvm::createAlphaJITCodeEmitterPass(AlphaTargetMachine &TM,
- JITCodeEmitter &JCE) {
- return new AlphaCodeEmitter(JCE);
-}
-
-bool AlphaCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
- II = ((AlphaTargetMachine&)MF.getTarget()).getInstrInfo();
-
- do {
- MCE.startFunction(MF);
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
- emitBasicBlock(*I);
- } while (MCE.finishFunction(MF));
-
- return false;
-}
-
-void AlphaCodeEmitter::emitBasicBlock(MachineBasicBlock &MBB) {
- MCE.StartMachineBasicBlock(&MBB);
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- const MachineInstr &MI = *I;
- MCE.processDebugLoc(MI.getDebugLoc(), true);
- switch(MI.getOpcode()) {
- default:
- MCE.emitWordLE(getBinaryCodeForInstr(*I));
- break;
- case Alpha::ALTENT:
- case Alpha::PCLABEL:
- case Alpha::MEMLABEL:
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::KILL:
- break; //skip these
- }
- MCE.processDebugLoc(MI.getDebugLoc(), false);
- }
-}
-
-static unsigned getAlphaRegNumber(unsigned Reg) {
- switch (Reg) {
- case Alpha::R0 : case Alpha::F0 : return 0;
- case Alpha::R1 : case Alpha::F1 : return 1;
- case Alpha::R2 : case Alpha::F2 : return 2;
- case Alpha::R3 : case Alpha::F3 : return 3;
- case Alpha::R4 : case Alpha::F4 : return 4;
- case Alpha::R5 : case Alpha::F5 : return 5;
- case Alpha::R6 : case Alpha::F6 : return 6;
- case Alpha::R7 : case Alpha::F7 : return 7;
- case Alpha::R8 : case Alpha::F8 : return 8;
- case Alpha::R9 : case Alpha::F9 : return 9;
- case Alpha::R10 : case Alpha::F10 : return 10;
- case Alpha::R11 : case Alpha::F11 : return 11;
- case Alpha::R12 : case Alpha::F12 : return 12;
- case Alpha::R13 : case Alpha::F13 : return 13;
- case Alpha::R14 : case Alpha::F14 : return 14;
- case Alpha::R15 : case Alpha::F15 : return 15;
- case Alpha::R16 : case Alpha::F16 : return 16;
- case Alpha::R17 : case Alpha::F17 : return 17;
- case Alpha::R18 : case Alpha::F18 : return 18;
- case Alpha::R19 : case Alpha::F19 : return 19;
- case Alpha::R20 : case Alpha::F20 : return 20;
- case Alpha::R21 : case Alpha::F21 : return 21;
- case Alpha::R22 : case Alpha::F22 : return 22;
- case Alpha::R23 : case Alpha::F23 : return 23;
- case Alpha::R24 : case Alpha::F24 : return 24;
- case Alpha::R25 : case Alpha::F25 : return 25;
- case Alpha::R26 : case Alpha::F26 : return 26;
- case Alpha::R27 : case Alpha::F27 : return 27;
- case Alpha::R28 : case Alpha::F28 : return 28;
- case Alpha::R29 : case Alpha::F29 : return 29;
- case Alpha::R30 : case Alpha::F30 : return 30;
- case Alpha::R31 : case Alpha::F31 : return 31;
- default:
- llvm_unreachable("Unhandled reg");
- }
-}
-
-unsigned AlphaCodeEmitter::getMachineOpValue(const MachineInstr &MI,
- const MachineOperand &MO) {
-
- unsigned rv = 0; // Return value; defaults to 0 for unhandled cases
- // or things that get fixed up later by the JIT.
-
- if (MO.isReg()) {
- rv = getAlphaRegNumber(MO.getReg());
- } else if (MO.isImm()) {
- rv = MO.getImm();
- } else if (MO.isGlobal() || MO.isSymbol() || MO.isCPI()) {
- DEBUG(errs() << MO << " is a relocated op for " << MI << "\n");
- unsigned Reloc = 0;
- int Offset = 0;
- bool useGOT = false;
- switch (MI.getOpcode()) {
- case Alpha::BSR:
- Reloc = Alpha::reloc_bsr;
- break;
- case Alpha::LDLr:
- case Alpha::LDQr:
- case Alpha::LDBUr:
- case Alpha::LDWUr:
- case Alpha::LDSr:
- case Alpha::LDTr:
- case Alpha::LDAr:
- case Alpha::STQr:
- case Alpha::STLr:
- case Alpha::STWr:
- case Alpha::STBr:
- case Alpha::STSr:
- case Alpha::STTr:
- Reloc = Alpha::reloc_gprellow;
- break;
- case Alpha::LDAHr:
- Reloc = Alpha::reloc_gprelhigh;
- break;
- case Alpha::LDQl:
- Reloc = Alpha::reloc_literal;
- useGOT = true;
- break;
- case Alpha::LDAg:
- case Alpha::LDAHg:
- Reloc = Alpha::reloc_gpdist;
- Offset = MI.getOperand(3).getImm();
- break;
- default:
- llvm_unreachable("unknown relocatable instruction");
- }
- if (MO.isGlobal())
- MCE.addRelocation(MachineRelocation::getGV(
- MCE.getCurrentPCOffset(),
- Reloc,
- const_cast<GlobalValue *>(MO.getGlobal()),
- Offset,
- isa<Function>(MO.getGlobal()),
- useGOT));
- else if (MO.isSymbol())
- MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
- Reloc, MO.getSymbolName(),
- Offset, true));
- else
- MCE.addRelocation(MachineRelocation::getConstPool(MCE.getCurrentPCOffset(),
- Reloc, MO.getIndex(), Offset));
- } else if (MO.isMBB()) {
- MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
- Alpha::reloc_bsr, MO.getMBB()));
- } else {
-#ifndef NDEBUG
- errs() << "ERROR: Unknown type of MachineOperand: " << MO << "\n";
-#endif
- llvm_unreachable(0);
- }
-
- return rv;
-}
-
-#include "AlphaGenCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.cpp b/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.cpp
new file mode 100644
index 0000000..690cd1d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.cpp
@@ -0,0 +1,143 @@
+//=====- AlphaFrameLowering.cpp - Alpha Frame Information ------*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Alpha implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AlphaFrameLowering.h"
+#include "AlphaInstrInfo.h"
+#include "AlphaMachineFunctionInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/ADT/Twine.h"
+
+using namespace llvm;
+
+static long getUpper16(long l) {
+ long y = l / Alpha::IMM_MULT;
+ if (l % Alpha::IMM_MULT > Alpha::IMM_HIGH)
+ ++y;
+ return y;
+}
+
+static long getLower16(long l) {
+ long h = getUpper16(l);
+ return l - h * Alpha::IMM_MULT;
+}
+
+// hasFP - Return true if the specified function should have a dedicated frame
+// pointer register. This is true if the function has variable sized allocas or
+// if frame pointer elimination is disabled.
+//
+bool AlphaFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ return MFI->hasVarSizedObjects();
+}
+
+void AlphaFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ DebugLoc dl = (MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc());
+ bool FP = hasFP(MF);
+
+ // Handle GOP offset
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAHg), Alpha::R29)
+ .addGlobalAddress(MF.getFunction()).addReg(Alpha::R27).addImm(++curgpdist);
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAg), Alpha::R29)
+ .addGlobalAddress(MF.getFunction()).addReg(Alpha::R29).addImm(curgpdist);
+
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::ALTENT))
+ .addGlobalAddress(MF.getFunction());
+
+ // Get the number of bytes to allocate from the FrameInfo
+ long NumBytes = MFI->getStackSize();
+
+ if (FP)
+ NumBytes += 8; //reserve space for the old FP
+
+ // Do we need to allocate space on the stack?
+ if (NumBytes == 0) return;
+
+ unsigned Align = getStackAlignment();
+ NumBytes = (NumBytes+Align-1)/Align*Align;
+
+ // Update frame info to pretend that this is part of the stack...
+ MFI->setStackSize(NumBytes);
+
+ // adjust stack pointer: r30 -= numbytes
+ NumBytes = -NumBytes;
+ if (NumBytes >= Alpha::IMM_LOW) {
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30).addImm(NumBytes)
+ .addReg(Alpha::R30);
+ } else if (getUpper16(NumBytes) >= Alpha::IMM_LOW) {
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAH), Alpha::R30)
+ .addImm(getUpper16(NumBytes)).addReg(Alpha::R30);
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30)
+ .addImm(getLower16(NumBytes)).addReg(Alpha::R30);
+ } else {
+ report_fatal_error("Too big a stack frame at " + Twine(NumBytes));
+ }
+
+ // Now if we need to, save the old FP and set the new
+ if (FP) {
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::STQ))
+ .addReg(Alpha::R15).addImm(0).addReg(Alpha::R30);
+ // This must be the last instr in the prolog
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::BISr), Alpha::R15)
+ .addReg(Alpha::R30).addReg(Alpha::R30);
+ }
+
+}
+
+void AlphaFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ assert((MBBI->getOpcode() == Alpha::RETDAG ||
+ MBBI->getOpcode() == Alpha::RETDAGp)
+ && "Can only insert epilog into returning blocks");
+ DebugLoc dl = MBBI->getDebugLoc();
+
+ bool FP = hasFP(MF);
+
+ // Get the number of bytes allocated from the FrameInfo...
+ long NumBytes = MFI->getStackSize();
+
+ //now if we need to, restore the old FP
+ if (FP) {
+ //copy the FP into the SP (discards allocas)
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::BISr), Alpha::R30).addReg(Alpha::R15)
+ .addReg(Alpha::R15);
+ //restore the FP
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDQ), Alpha::R15)
+ .addImm(0).addReg(Alpha::R15);
+ }
+
+ if (NumBytes != 0) {
+ if (NumBytes <= Alpha::IMM_HIGH) {
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30).addImm(NumBytes)
+ .addReg(Alpha::R30);
+ } else if (getUpper16(NumBytes) <= Alpha::IMM_HIGH) {
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAH), Alpha::R30)
+ .addImm(getUpper16(NumBytes)).addReg(Alpha::R30);
+ BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30)
+ .addImm(getLower16(NumBytes)).addReg(Alpha::R30);
+ } else {
+ report_fatal_error("Too big a stack frame at " + Twine(NumBytes));
+ }
+ }
+}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.h b/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.h
new file mode 100644
index 0000000..ebd9e1b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.h
@@ -0,0 +1,43 @@
+//==-- AlphaFrameLowering.h - Define frame lowering for Alpha --*- C++ -*---==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ALPHA_FRAMEINFO_H
+#define ALPHA_FRAMEINFO_H
+
+#include "Alpha.h"
+#include "AlphaSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class AlphaSubtarget;
+
+class AlphaFrameLowering : public TargetFrameLowering {
+ const AlphaSubtarget &STI;
+ // FIXME: This should end in MachineFunctionInfo, not here!
+ mutable int curgpdist;
+public:
+ explicit AlphaFrameLowering(const AlphaSubtarget &sti)
+ : TargetFrameLowering(StackGrowsDown, 16, 0), STI(sti), curgpdist(0) {
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
index d197bd1..7b91fea 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
@@ -130,19 +130,6 @@ namespace {
return (x - y) == r;
}
- static bool isFPZ(SDValue N) {
- ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
- return (CN && (CN->getValueAPF().isZero()));
- }
- static bool isFPZn(SDValue N) {
- ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
- return (CN && CN->getValueAPF().isNegZero());
- }
- static bool isFPZp(SDValue N) {
- ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
- return (CN && CN->getValueAPF().isPosZero());
- }
-
public:
explicit AlphaDAGToDAGISel(AlphaTargetMachine &TM)
: SelectionDAGISel(TM)
@@ -253,7 +240,7 @@ SDNode *AlphaDAGToDAGISel::Select(SDNode *N) {
Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R27, N0,
Chain.getValue(1));
SDNode *CNode =
- CurDAG->getMachineNode(Alpha::JSRs, dl, MVT::Other, MVT::Flag,
+ CurDAG->getMachineNode(Alpha::JSRs, dl, MVT::Other, MVT::Glue,
Chain, Chain.getValue(1));
Chain = CurDAG->getCopyFromReg(Chain, dl, Alpha::R27, MVT::i64,
SDValue(CNode, 1));
@@ -416,13 +403,13 @@ void AlphaDAGToDAGISel::SelectCALL(SDNode *N) {
Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R29, GOT, InFlag);
InFlag = Chain.getValue(1);
Chain = SDValue(CurDAG->getMachineNode(Alpha::BSR, dl, MVT::Other,
- MVT::Flag, Addr.getOperand(0),
+ MVT::Glue, Addr.getOperand(0),
Chain, InFlag), 0);
} else {
Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R27, Addr, InFlag);
InFlag = Chain.getValue(1);
Chain = SDValue(CurDAG->getMachineNode(Alpha::JSR, dl, MVT::Other,
- MVT::Flag, Chain, InFlag), 0);
+ MVT::Glue, Chain, InFlag), 0);
}
InFlag = Chain.getValue(1);
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp
index ea78bf3..9137d65 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp
@@ -27,6 +27,7 @@
#include "llvm/Function.h"
#include "llvm/Module.h"
#include "llvm/Intrinsics.h"
+#include "llvm/Type.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -124,7 +125,7 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
setOperationAction(ISD::SETCC, MVT::f32, Promote);
- setOperationAction(ISD::BIT_CONVERT, MVT::f32, Promote);
+ setOperationAction(ISD::BITCAST, MVT::f32, Promote);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
@@ -284,8 +285,7 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
DAG.getIntPtrConstant(VA.getLocMemOffset()));
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- PseudoSourceValue::getStack(), 0,
- false, false, 0));
+ MachinePointerInfo(),false, false, 0));
}
}
@@ -306,7 +306,7 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
}
// Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@@ -431,7 +431,7 @@ AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i64);
- ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0,
+ ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
false, false, 0);
}
InVals.push_back(ArgVal);
@@ -448,7 +448,7 @@ AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
int FI = MFI->CreateFixedObject(8, -8 * (6 - i), true);
if (i == 0) FuncInfo->setVarArgsBase(FI);
SDValue SDFI = DAG.getFrameIndex(FI, MVT::i64);
- LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0,
+ LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, MachinePointerInfo(),
false, false, 0));
if (TargetRegisterInfo::isPhysicalRegister(args_float[i]))
@@ -456,7 +456,7 @@ AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
argt = DAG.getCopyFromReg(Chain, dl, args_float[i], MVT::f64);
FI = MFI->CreateFixedObject(8, - 8 * (12 - i), true);
SDFI = DAG.getFrameIndex(FI, MVT::i64);
- LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0,
+ LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, MachinePointerInfo(),
false, false, 0));
}
@@ -537,12 +537,14 @@ void AlphaTargetLowering::LowerVAARG(SDNode *N, SDValue &Chain,
const Value *VAListS = cast<SrcValueSDNode>(N->getOperand(2))->getValue();
DebugLoc dl = N->getDebugLoc();
- SDValue Base = DAG.getLoad(MVT::i64, dl, Chain, VAListP, VAListS, 0,
+ SDValue Base = DAG.getLoad(MVT::i64, dl, Chain, VAListP,
+ MachinePointerInfo(VAListS),
false, false, 0);
SDValue Tmp = DAG.getNode(ISD::ADD, dl, MVT::i64, VAListP,
DAG.getConstant(8, MVT::i64));
- SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, dl, Base.getValue(1),
- Tmp, NULL, 0, MVT::i32, false, false, 0);
+ SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Base.getValue(1),
+ Tmp, MachinePointerInfo(),
+ MVT::i32, false, false, 0);
DataPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Base, Offset);
if (N->getValueType(0).isFloatingPoint())
{
@@ -556,7 +558,8 @@ void AlphaTargetLowering::LowerVAARG(SDNode *N, SDValue &Chain,
SDValue NewOffset = DAG.getNode(ISD::ADD, dl, MVT::i64, Offset,
DAG.getConstant(8, MVT::i64));
- Chain = DAG.getTruncStore(Offset.getValue(1), dl, NewOffset, Tmp, NULL, 0,
+ Chain = DAG.getTruncStore(Offset.getValue(1), dl, NewOffset, Tmp,
+ MachinePointerInfo(),
MVT::i32, false, false, 0);
}
@@ -613,7 +616,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
"Unhandled SINT_TO_FP type in custom expander!");
SDValue LD;
bool isDouble = Op.getValueType() == MVT::f64;
- LD = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op.getOperand(0));
+ LD = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
SDValue FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, dl,
isDouble?MVT::f64:MVT::f32, LD);
return FP;
@@ -627,7 +630,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
src = DAG.getNode(AlphaISD::CVTTQ_, dl, MVT::f64, src);
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, src);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::i64, src);
}
case ISD::ConstantPool: {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
@@ -645,11 +648,11 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
case ISD::GlobalAddress: {
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GSDN->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64,
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64,
GSDN->getOffset());
// FIXME there isn't really any debug info here
- // if (!GV->hasWeakLinkage() && !GV->isDeclaration()
+ // if (!GV->hasWeakLinkage() && !GV->isDeclaration()
// && !GV->hasLinkOnceLinkage()) {
if (GV->hasLocalLinkage()) {
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, GA,
@@ -706,10 +709,11 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
SDValue Result;
if (Op.getValueType() == MVT::i32)
- Result = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, dl, Chain, DataPtr,
- NULL, 0, MVT::i32, false, false, 0);
+ Result = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Chain, DataPtr,
+ MachinePointerInfo(), MVT::i32, false, false, 0);
else
- Result = DAG.getLoad(Op.getValueType(), dl, Chain, DataPtr, NULL, 0,
+ Result = DAG.getLoad(Op.getValueType(), dl, Chain, DataPtr,
+ MachinePointerInfo(),
false, false, 0);
return Result;
}
@@ -720,17 +724,20 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
const Value *DestS = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
const Value *SrcS = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- SDValue Val = DAG.getLoad(getPointerTy(), dl, Chain, SrcP, SrcS, 0,
+ SDValue Val = DAG.getLoad(getPointerTy(), dl, Chain, SrcP,
+ MachinePointerInfo(SrcS),
false, false, 0);
- SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP, DestS, 0,
+ SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP,
+ MachinePointerInfo(DestS),
false, false, 0);
SDValue NP = DAG.getNode(ISD::ADD, dl, MVT::i64, SrcP,
DAG.getConstant(8, MVT::i64));
- Val = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, dl, Result,
- NP, NULL,0, MVT::i32, false, false, 0);
+ Val = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Result,
+ NP, MachinePointerInfo(), MVT::i32, false, false, 0);
SDValue NPD = DAG.getNode(ISD::ADD, dl, MVT::i64, DestP,
DAG.getConstant(8, MVT::i64));
- return DAG.getTruncStore(Val.getValue(1), dl, Val, NPD, NULL, 0, MVT::i32,
+ return DAG.getTruncStore(Val.getValue(1), dl, Val, NPD,
+ MachinePointerInfo(), MVT::i32,
false, false, 0);
}
case ISD::VASTART: {
@@ -743,14 +750,15 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
// vastart stores the address of the VarArgsBase and VarArgsOffset
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsBase(), MVT::i64);
- SDValue S1 = DAG.getStore(Chain, dl, FR, VAListP, VAListS, 0,
- false, false, 0);
+ SDValue S1 = DAG.getStore(Chain, dl, FR, VAListP,
+ MachinePointerInfo(VAListS), false, false, 0);
SDValue SA2 = DAG.getNode(ISD::ADD, dl, MVT::i64, VAListP,
DAG.getConstant(8, MVT::i64));
return DAG.getTruncStore(S1, dl,
DAG.getConstant(FuncInfo->getVarArgsOffset(),
MVT::i64),
- SA2, NULL, 0, MVT::i32, false, false, 0);
+ SA2, MachinePointerInfo(),
+ MVT::i32, false, false, 0);
}
case ISD::RETURNADDR:
return DAG.getNode(AlphaISD::GlobalRetAddr, DebugLoc(), MVT::i64);
@@ -771,7 +779,8 @@ void AlphaTargetLowering::ReplaceNodeResults(SDNode *N,
SDValue Chain, DataPtr;
LowerVAARG(N, Chain, DataPtr, DAG);
- SDValue Res = DAG.getLoad(N->getValueType(0), dl, Chain, DataPtr, NULL, 0,
+ SDValue Res = DAG.getLoad(N->getValueType(0), dl, Chain, DataPtr,
+ MachinePointerInfo(),
false, false, 0);
Results.push_back(Res);
Results.push_back(SDValue(Res.getNode(), 1));
@@ -795,6 +804,30 @@ AlphaTargetLowering::getConstraintType(const std::string &Constraint) const {
return TargetLowering::getConstraintType(Constraint);
}
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+AlphaTargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+ case 'f':
+ weight = CW_Register;
+ break;
+ }
+ return weight;
+}
+
std::vector<unsigned> AlphaTargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const {
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h
index 46e0c7d..b429e9f 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h
+++ b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h
@@ -87,6 +87,11 @@ namespace llvm {
ConstraintType getConstraintType(const std::string &Constraint) const;
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
+
std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td
index 92de78a..099d715 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td
+++ b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td
@@ -27,7 +27,7 @@ def Alpha_gprelhi : SDNode<"AlphaISD::GPRelHi", SDTIntBinOp, []>;
def Alpha_rellit : SDNode<"AlphaISD::RelLit", SDTIntBinOp, [SDNPMayLoad]>;
def retflag : SDNode<"AlphaISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
// These are target-independent nodes, but have target-specific formats.
def SDT_AlphaCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i64> ]>;
@@ -35,9 +35,9 @@ def SDT_AlphaCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i64>,
SDTCisVT<1, i64> ]>;
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_AlphaCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_AlphaCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
//********************
//Paterns for matching
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaJITInfo.cpp b/contrib/llvm/lib/Target/Alpha/AlphaJITInfo.cpp
deleted file mode 100644
index 12685ed..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaJITInfo.cpp
+++ /dev/null
@@ -1,310 +0,0 @@
-//===-- AlphaJITInfo.cpp - Implement the JIT interfaces for the Alpha ---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the JIT interfaces for the Alpha target.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "jit"
-#include "AlphaJITInfo.h"
-#include "AlphaRelocations.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/JITCodeEmitter.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cstdlib>
-using namespace llvm;
-
-#define BUILD_OFormatI(Op, RA, LIT, FUN, RC) \
- ((Op << 26) | (RA << 21) | (LIT << 13) | (1 << 12) | (FUN << 5) | (RC))
-#define BUILD_OFormat(Op, RA, RB, FUN, RC) \
- ((Op << 26) | (RA << 21) | (RB << 16) | (FUN << 5) | (RC))
-
-#define BUILD_LDA(RD, RS, IMM16) \
- ((0x08 << 26) | ((RD) << 21) | ((RS) << 16) | ((IMM16) & 65535))
-#define BUILD_LDAH(RD, RS, IMM16) \
- ((0x09 << 26) | ((RD) << 21) | ((RS) << 16) | ((IMM16) & 65535))
-
-#define BUILD_LDQ(RD, RS, IMM16) \
- ((0x29 << 26) | ((RD) << 21) | ((RS) << 16) | ((IMM16) & 0xFFFF))
-
-#define BUILD_JMP(RD, RS, IMM16) \
- ((0x1A << 26) | ((RD) << 21) | ((RS) << 16) | (0x00 << 14) | ((IMM16) & 0x3FFF))
-#define BUILD_JSR(RD, RS, IMM16) \
- ((0x1A << 26) | ((RD) << 21) | ((RS) << 16) | (0x01 << 14) | ((IMM16) & 0x3FFF))
-
-#define BUILD_SLLi(RD, RS, IMM8) \
- (BUILD_OFormatI(0x12, RS, IMM8, 0x39, RD))
-
-#define BUILD_ORi(RD, RS, IMM8) \
- (BUILD_OFormatI(0x11, RS, IMM8, 0x20, RD))
-
-#define BUILD_OR(RD, RS, RT) \
- (BUILD_OFormat(0x11, RS, RT, 0x20, RD))
-
-
-
-static void EmitBranchToAt(void *At, void *To) {
- unsigned long Fn = (unsigned long)To;
-
- unsigned *AtI = (unsigned*)At;
-
- AtI[0] = BUILD_OR(0, 27, 27);
-
- DEBUG(errs() << "Stub targeting " << To << "\n");
-
- for (int x = 1; x <= 8; ++x) {
- AtI[2*x - 1] = BUILD_SLLi(27,27,8);
- unsigned d = (Fn >> (64 - 8 * x)) & 0x00FF;
- //DEBUG(errs() << "outputing " << hex << d << dec << "\n");
- AtI[2*x] = BUILD_ORi(27, 27, d);
- }
- AtI[17] = BUILD_JMP(31,27,0); //jump, preserving ra, and setting pv
- AtI[18] = 0x00FFFFFF; //mark this as a stub
-}
-
-void AlphaJITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
- //FIXME
- llvm_unreachable(0);
-}
-
-static TargetJITInfo::JITCompilerFn JITCompilerFunction;
-//static AlphaJITInfo* AlphaJTI;
-
-extern "C" {
-#ifdef __alpha
-
- void AlphaCompilationCallbackC(long* oldpv, void* CameFromStub)
- {
- void* Target = JITCompilerFunction(CameFromStub);
-
- //rewrite the stub to an unconditional branch
- if (((unsigned*)CameFromStub)[18] == 0x00FFFFFF) {
- DEBUG(errs() << "Came from a stub, rewriting\n");
- EmitBranchToAt(CameFromStub, Target);
- } else {
- DEBUG(errs() << "confused, didn't come from stub at " << CameFromStub
- << " old jump vector " << oldpv
- << " new jump vector " << Target << "\n");
- }
-
- //Change pv to new Target
- *oldpv = (long)Target;
- }
-
- void AlphaCompilationCallback(void);
-
- asm(
- ".text\n"
- ".globl AlphaCompilationCallbackC\n"
- ".align 4\n"
- ".globl AlphaCompilationCallback\n"
- ".ent AlphaCompilationCallback\n"
-"AlphaCompilationCallback:\n"
- // //get JIT's GOT
- "ldgp $29, 0($27)\n"
- //Save args, callee saved, and perhaps others?
- //args: $16-$21 $f16-$f21 (12)
- //callee: $9-$14 $f2-$f9 (14)
- //others: fp:$15 ra:$26 pv:$27 (3)
- "lda $30, -232($30)\n"
- "stq $16, 0($30)\n"
- "stq $17, 8($30)\n"
- "stq $18, 16($30)\n"
- "stq $19, 24($30)\n"
- "stq $20, 32($30)\n"
- "stq $21, 40($30)\n"
- "stt $f16, 48($30)\n"
- "stt $f17, 56($30)\n"
- "stt $f18, 64($30)\n"
- "stt $f19, 72($30)\n"
- "stt $f20, 80($30)\n"
- "stt $f21, 88($30)\n"
- "stq $9, 96($30)\n"
- "stq $10, 104($30)\n"
- "stq $11, 112($30)\n"
- "stq $12, 120($30)\n"
- "stq $13, 128($30)\n"
- "stq $14, 136($30)\n"
- "stt $f2, 144($30)\n"
- "stt $f3, 152($30)\n"
- "stt $f4, 160($30)\n"
- "stt $f5, 168($30)\n"
- "stt $f6, 176($30)\n"
- "stt $f7, 184($30)\n"
- "stt $f8, 192($30)\n"
- "stt $f9, 200($30)\n"
- "stq $15, 208($30)\n"
- "stq $26, 216($30)\n"
- "stq $27, 224($30)\n"
-
- "addq $30, 224, $16\n" //pass the addr of saved pv as the first arg
- "bis $0, $0, $17\n" //pass the roughly stub addr in second arg
- "jsr $26, AlphaCompilationCallbackC\n" //call without saving ra
-
- "ldq $16, 0($30)\n"
- "ldq $17, 8($30)\n"
- "ldq $18, 16($30)\n"
- "ldq $19, 24($30)\n"
- "ldq $20, 32($30)\n"
- "ldq $21, 40($30)\n"
- "ldt $f16, 48($30)\n"
- "ldt $f17, 56($30)\n"
- "ldt $f18, 64($30)\n"
- "ldt $f19, 72($30)\n"
- "ldt $f20, 80($30)\n"
- "ldt $f21, 88($30)\n"
- "ldq $9, 96($30)\n"
- "ldq $10, 104($30)\n"
- "ldq $11, 112($30)\n"
- "ldq $12, 120($30)\n"
- "ldq $13, 128($30)\n"
- "ldq $14, 136($30)\n"
- "ldt $f2, 144($30)\n"
- "ldt $f3, 152($30)\n"
- "ldt $f4, 160($30)\n"
- "ldt $f5, 168($30)\n"
- "ldt $f6, 176($30)\n"
- "ldt $f7, 184($30)\n"
- "ldt $f8, 192($30)\n"
- "ldt $f9, 200($30)\n"
- "ldq $15, 208($30)\n"
- "ldq $26, 216($30)\n"
- "ldq $27, 224($30)\n" //this was updated in the callback with the target
-
- "lda $30, 232($30)\n" //restore sp
- "jmp $31, ($27)\n" //jump to the new function
- ".end AlphaCompilationCallback\n"
- );
-#else
- void AlphaCompilationCallback() {
- llvm_unreachable("Cannot call AlphaCompilationCallback() on a non-Alpha arch!");
- }
-#endif
-}
-
-TargetJITInfo::StubLayout AlphaJITInfo::getStubLayout() {
- // The stub contains 19 4-byte instructions, aligned at 4 bytes:
- // R0 = R27
- // 8 x "R27 <<= 8; R27 |= 8-bits-of-Target" == 16 instructions
- // JMP R27
- // Magic number so the compilation callback can recognize the stub.
- StubLayout Result = {19 * 4, 4};
- return Result;
-}
-
-void *AlphaJITInfo::emitFunctionStub(const Function* F, void *Fn,
- JITCodeEmitter &JCE) {
- //assert(Fn == AlphaCompilationCallback && "Where are you going?\n");
- //Do things in a stupid slow way!
- void* Addr = (void*)(intptr_t)JCE.getCurrentPCValue();
- for (int x = 0; x < 19; ++ x)
- JCE.emitWordLE(0);
- EmitBranchToAt(Addr, Fn);
- DEBUG(errs() << "Emitting Stub to " << Fn << " at [" << Addr << "]\n");
- return Addr;
-}
-
-TargetJITInfo::LazyResolverFn
-AlphaJITInfo::getLazyResolverFunction(JITCompilerFn F) {
- JITCompilerFunction = F;
- // setZerothGOTEntry((void*)AlphaCompilationCallback);
- return AlphaCompilationCallback;
-}
-
-//These describe LDAx
-static const int IMM_LOW = -32768;
-static const int IMM_HIGH = 32767;
-static const int IMM_MULT = 65536;
-
-static long getUpper16(long l)
-{
- long y = l / IMM_MULT;
- if (l % IMM_MULT > IMM_HIGH)
- ++y;
- if (l % IMM_MULT < IMM_LOW)
- --y;
- assert((short)y == y && "displacement out of range");
- return y;
-}
-
-static long getLower16(long l)
-{
- long h = getUpper16(l);
- long y = l - h * IMM_MULT;
- assert(y == (short)y && "Displacement out of range");
- return y;
-}
-
-void AlphaJITInfo::relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char* GOTBase) {
- for (unsigned i = 0; i != NumRelocs; ++i, ++MR) {
- unsigned *RelocPos = (unsigned*)Function + MR->getMachineCodeOffset()/4;
- long idx = 0;
- bool doCommon = true;
- switch ((Alpha::RelocationType)MR->getRelocationType()) {
- default: llvm_unreachable("Unknown relocation type!");
- case Alpha::reloc_literal:
- //This is a LDQl
- idx = MR->getGOTIndex();
- DEBUG(errs() << "Literal relocation to slot " << idx);
- idx = (idx - GOToffset) * 8;
- DEBUG(errs() << " offset " << idx << "\n");
- break;
- case Alpha::reloc_gprellow:
- idx = (unsigned char*)MR->getResultPointer() - &GOTBase[GOToffset * 8];
- idx = getLower16(idx);
- DEBUG(errs() << "gprellow relocation offset " << idx << "\n");
- DEBUG(errs() << " Pointer is " << (void*)MR->getResultPointer()
- << " GOT is " << (void*)&GOTBase[GOToffset * 8] << "\n");
- break;
- case Alpha::reloc_gprelhigh:
- idx = (unsigned char*)MR->getResultPointer() - &GOTBase[GOToffset * 8];
- idx = getUpper16(idx);
- DEBUG(errs() << "gprelhigh relocation offset " << idx << "\n");
- DEBUG(errs() << " Pointer is " << (void*)MR->getResultPointer()
- << " GOT is " << (void*)&GOTBase[GOToffset * 8] << "\n");
- break;
- case Alpha::reloc_gpdist:
- switch (*RelocPos >> 26) {
- case 0x09: //LDAH
- idx = &GOTBase[GOToffset * 8] - (unsigned char*)RelocPos;
- idx = getUpper16(idx);
- DEBUG(errs() << "LDAH: " << idx << "\n");
- //add the relocation to the map
- gpdistmap[std::make_pair(Function, MR->getConstantVal())] = RelocPos;
- break;
- case 0x08: //LDA
- assert(gpdistmap[std::make_pair(Function, MR->getConstantVal())] &&
- "LDAg without seeing LDAHg");
- idx = &GOTBase[GOToffset * 8] -
- (unsigned char*)gpdistmap[std::make_pair(Function, MR->getConstantVal())];
- idx = getLower16(idx);
- DEBUG(errs() << "LDA: " << idx << "\n");
- break;
- default:
- llvm_unreachable("Cannot handle gpdist yet");
- }
- break;
- case Alpha::reloc_bsr: {
- idx = (((unsigned char*)MR->getResultPointer() -
- (unsigned char*)RelocPos) >> 2) + 1; //skip first 2 inst of fun
- *RelocPos |= (idx & ((1 << 21)-1));
- doCommon = false;
- break;
- }
- }
- if (doCommon) {
- short x = (short)idx;
- assert(x == idx);
- *(short*)RelocPos = x;
- }
- }
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaJITInfo.h b/contrib/llvm/lib/Target/Alpha/AlphaJITInfo.h
deleted file mode 100644
index bd358a4..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaJITInfo.h
+++ /dev/null
@@ -1,53 +0,0 @@
-//===- AlphaJITInfo.h - Alpha impl. of the JIT interface ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Alpha implementation of the TargetJITInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ALPHA_JITINFO_H
-#define ALPHA_JITINFO_H
-
-#include "llvm/Target/TargetJITInfo.h"
-#include <map>
-
-namespace llvm {
- class TargetMachine;
-
- class AlphaJITInfo : public TargetJITInfo {
- protected:
- TargetMachine &TM;
-
- //because gpdist are paired and relative to the pc of the first inst,
- //we need to have some state
- std::map<std::pair<void*, int>, void*> gpdistmap;
- public:
- explicit AlphaJITInfo(TargetMachine &tm) : TM(tm)
- { useGOT = true; }
-
- virtual StubLayout getStubLayout();
- virtual void *emitFunctionStub(const Function* F, void *Fn,
- JITCodeEmitter &JCE);
- virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
- virtual void relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char* GOTBase);
-
- /// replaceMachineCodeForFunction - Make it so that calling the function
- /// whose machine code is at OLD turns into a call to NEW, perhaps by
- /// overwriting OLD with a branch to NEW. This is used for self-modifying
- /// code.
- ///
- virtual void replaceMachineCodeForFunction(void *Old, void *New);
- private:
- static const unsigned GOToffset = 4096;
-
- };
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp
index 327ddb4..7667fd8 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp
@@ -22,7 +22,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -35,29 +35,21 @@
#include <cstdlib>
using namespace llvm;
-//These describe LDAx
-static const int IMM_LOW = -32768;
-static const int IMM_HIGH = 32767;
-static const int IMM_MULT = 65536;
+AlphaRegisterInfo::AlphaRegisterInfo(const TargetInstrInfo &tii)
+ : AlphaGenRegisterInfo(Alpha::ADJUSTSTACKDOWN, Alpha::ADJUSTSTACKUP),
+ TII(tii) {
+}
-static long getUpper16(long l)
-{
- long y = l / IMM_MULT;
- if (l % IMM_MULT > IMM_HIGH)
+static long getUpper16(long l) {
+ long y = l / Alpha::IMM_MULT;
+ if (l % Alpha::IMM_MULT > Alpha::IMM_HIGH)
++y;
return y;
}
-static long getLower16(long l)
-{
+static long getLower16(long l) {
long h = getUpper16(l);
- return l - h * IMM_MULT;
-}
-
-AlphaRegisterInfo::AlphaRegisterInfo(const TargetInstrInfo &tii)
- : AlphaGenRegisterInfo(Alpha::ADJUSTSTACKDOWN, Alpha::ADJUSTSTACKUP),
- TII(tii), curgpdist(0)
-{
+ return l - h * Alpha::IMM_MULT;
}
const unsigned* AlphaRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
@@ -86,19 +78,12 @@ BitVector AlphaRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-//
-bool AlphaRegisterInfo::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return MFI->hasVarSizedObjects();
-}
-
void AlphaRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- if (hasFP(MF)) {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (TFI->hasFP(MF)) {
// If we have a frame pointer, turn the adjcallstackup instruction into a
// 'sub ESP, <amt>' and the adjcallstackdown instruction into 'add ESP,
// <amt>'
@@ -108,7 +93,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
+ unsigned Align = TFI->getStackAlignment();
Amount = (Amount+Align-1)/Align*Align;
MachineInstr *New;
@@ -146,7 +131,9 @@ AlphaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
- bool FP = hasFP(MF);
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ bool FP = TFI->hasFP(MF);
while (!MI.getOperand(i).isFI()) {
++i;
@@ -168,7 +155,7 @@ AlphaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
DEBUG(errs() << "Corrected Offset " << Offset
<< " for stack size: " << MF.getFrameInfo()->getStackSize() << "\n");
- if (Offset > IMM_HIGH || Offset < IMM_LOW) {
+ if (Offset > Alpha::IMM_HIGH || Offset < Alpha::IMM_LOW) {
DEBUG(errs() << "Unconditionally using R28 for evil purposes Offset: "
<< Offset << "\n");
//so in this case, we need to use a temporary register, and move the
@@ -186,111 +173,14 @@ AlphaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
}
-
-void AlphaRegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- DebugLoc dl = (MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc());
- bool FP = hasFP(MF);
-
- //handle GOP offset
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAHg), Alpha::R29)
- .addGlobalAddress(MF.getFunction())
- .addReg(Alpha::R27).addImm(++curgpdist);
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAg), Alpha::R29)
- .addGlobalAddress(MF.getFunction())
- .addReg(Alpha::R29).addImm(curgpdist);
-
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::ALTENT))
- .addGlobalAddress(MF.getFunction());
-
- // Get the number of bytes to allocate from the FrameInfo
- long NumBytes = MFI->getStackSize();
-
- if (FP)
- NumBytes += 8; //reserve space for the old FP
-
- // Do we need to allocate space on the stack?
- if (NumBytes == 0) return;
-
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
- NumBytes = (NumBytes+Align-1)/Align*Align;
-
- // Update frame info to pretend that this is part of the stack...
- MFI->setStackSize(NumBytes);
-
- // adjust stack pointer: r30 -= numbytes
- NumBytes = -NumBytes;
- if (NumBytes >= IMM_LOW) {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30).addImm(NumBytes)
- .addReg(Alpha::R30);
- } else if (getUpper16(NumBytes) >= IMM_LOW) {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAH), Alpha::R30)
- .addImm(getUpper16(NumBytes)).addReg(Alpha::R30);
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30)
- .addImm(getLower16(NumBytes)).addReg(Alpha::R30);
- } else {
- report_fatal_error("Too big a stack frame at " + Twine(NumBytes));
- }
-
- //now if we need to, save the old FP and set the new
- if (FP)
- {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::STQ))
- .addReg(Alpha::R15).addImm(0).addReg(Alpha::R30);
- //this must be the last instr in the prolog
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::BISr), Alpha::R15)
- .addReg(Alpha::R30).addReg(Alpha::R30);
- }
-
-}
-
-void AlphaRegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- assert((MBBI->getOpcode() == Alpha::RETDAG ||
- MBBI->getOpcode() == Alpha::RETDAGp)
- && "Can only insert epilog into returning blocks");
- DebugLoc dl = MBBI->getDebugLoc();
-
- bool FP = hasFP(MF);
-
- // Get the number of bytes allocated from the FrameInfo...
- long NumBytes = MFI->getStackSize();
-
- //now if we need to, restore the old FP
- if (FP) {
- //copy the FP into the SP (discards allocas)
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::BISr), Alpha::R30).addReg(Alpha::R15)
- .addReg(Alpha::R15);
- //restore the FP
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDQ), Alpha::R15)
- .addImm(0).addReg(Alpha::R15);
- }
-
- if (NumBytes != 0) {
- if (NumBytes <= IMM_HIGH) {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30).addImm(NumBytes)
- .addReg(Alpha::R30);
- } else if (getUpper16(NumBytes) <= IMM_HIGH) {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAH), Alpha::R30)
- .addImm(getUpper16(NumBytes)).addReg(Alpha::R30);
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30)
- .addImm(getLower16(NumBytes)).addReg(Alpha::R30);
- } else {
- report_fatal_error("Too big a stack frame at " + Twine(NumBytes));
- }
- }
-}
-
unsigned AlphaRegisterInfo::getRARegister() const {
return Alpha::R26;
}
unsigned AlphaRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- return hasFP(MF) ? Alpha::R15 : Alpha::R30;
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ return TFI->hasFP(MF) ? Alpha::R15 : Alpha::R30;
}
unsigned AlphaRegisterInfo::getEHExceptionRegister() const {
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
index b164979..b0d4dd0 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
@@ -32,8 +32,6 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
BitVector getReservedRegs(const MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const;
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -41,11 +39,6 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
- //void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
-
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
@@ -57,9 +50,6 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
int getDwarfRegNum(unsigned RegNum, bool isEH) const;
static std::string getPrettyName(unsigned reg);
-
-private:
- mutable int curgpdist;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaSchedule.td b/contrib/llvm/lib/Target/Alpha/AlphaSchedule.td
index 4dc04b8..3703dd4 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaSchedule.td
+++ b/contrib/llvm/lib/Target/Alpha/AlphaSchedule.td
@@ -50,11 +50,11 @@ def s_ftoi : InstrItinClass;
def s_itof : InstrItinClass;
def s_pseudo : InstrItinClass;
-//Table 2­4 Instruction Class Latency in Cycles
+//Table 2-4 Instruction Class Latency in Cycles
//modified some
def Alpha21264Itineraries : ProcessorItineraries<
- [L0, L1, FST0, FST1, U0, U1, FA, FM], [
+ [L0, L1, FST0, FST1, U0, U1, FA, FM], [], [
InstrItinData<s_ild , [InstrStage<3, [L0, L1]>]>,
InstrItinData<s_fld , [InstrStage<4, [L0, L1]>]>,
InstrItinData<s_ist , [InstrStage<0, [L0, L1]>]>,
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.cpp b/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.cpp
index fc9be03..b53533b 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.cpp
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "Alpha.h"
-#include "AlphaJITInfo.h"
#include "AlphaMCAsmInfo.h"
#include "AlphaTargetMachine.h"
#include "llvm/PassManager.h"
@@ -29,8 +28,7 @@ AlphaTargetMachine::AlphaTargetMachine(const Target &T, const std::string &TT,
const std::string &FS)
: LLVMTargetMachine(T, TT),
DataLayout("e-f128:128:128-n64"),
- FrameInfo(TargetFrameInfo::StackGrowsDown, 16, 0),
- JITInfo(*this),
+ FrameLowering(Subtarget),
Subtarget(TT, FS),
TLInfo(*this),
TSInfo(*this) {
@@ -54,9 +52,3 @@ bool AlphaTargetMachine::addPreEmitPass(PassManagerBase &PM,
PM.add(createAlphaLLRPPass(*this));
return false;
}
-bool AlphaTargetMachine::addCodeEmitter(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel,
- JITCodeEmitter &JCE) {
- PM.add(createAlphaJITCodeEmitterPass(*this, JCE));
- return false;
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.h b/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.h
index 153944e..26238fb 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.h
+++ b/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.h
@@ -14,14 +14,14 @@
#ifndef ALPHA_TARGETMACHINE_H
#define ALPHA_TARGETMACHINE_H
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "AlphaInstrInfo.h"
-#include "AlphaJITInfo.h"
#include "AlphaISelLowering.h"
+#include "AlphaFrameLowering.h"
#include "AlphaSelectionDAGInfo.h"
#include "AlphaSubtarget.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
@@ -30,8 +30,7 @@ class GlobalValue;
class AlphaTargetMachine : public LLVMTargetMachine {
const TargetData DataLayout; // Calculates type size & alignment
AlphaInstrInfo InstrInfo;
- TargetFrameInfo FrameInfo;
- AlphaJITInfo JITInfo;
+ AlphaFrameLowering FrameLowering;
AlphaSubtarget Subtarget;
AlphaTargetLowering TLInfo;
AlphaSelectionDAGInfo TSInfo;
@@ -41,7 +40,9 @@ public:
const std::string &FS);
virtual const AlphaInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
virtual const AlphaSubtarget *getSubtargetImpl() const{ return &Subtarget; }
virtual const AlphaRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
@@ -53,15 +54,10 @@ public:
return &TSInfo;
}
virtual const TargetData *getTargetData() const { return &DataLayout; }
- virtual AlphaJITInfo* getJITInfo() {
- return &JITInfo;
- }
// Pass Pipeline Configuration
virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addCodeEmitter(PassManagerBase &PM, CodeGenOpt::Level OptLevel,
- JITCodeEmitter &JCE);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Blackfin/AsmPrinter/BlackfinAsmPrinter.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinAsmPrinter.cpp
index 6ba258b..6ba258b 100644
--- a/contrib/llvm/lib/Target/Blackfin/AsmPrinter/BlackfinAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinAsmPrinter.cpp
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.cpp
new file mode 100644
index 0000000..08bb952
--- /dev/null
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.cpp
@@ -0,0 +1,124 @@
+//====- BlackfinFrameLowering.cpp - Blackfin Frame Information --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Blackfin implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BlackfinFrameLowering.h"
+#include "BlackfinInstrInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
+
+
+// hasFP - Return true if the specified function should have a dedicated frame
+// pointer register. This is true if the function has variable sized allocas or
+// if frame pointer elimination is disabled.
+bool BlackfinFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ return DisableFramePointerElim(MF) ||
+ MFI->adjustsStack() || MFI->hasVarSizedObjects();
+}
+
+// Emit a prologue that sets up a stack frame.
+// On function entry, R0-R2 and P0 may hold arguments.
+// R3, P1, and P2 may be used as scratch registers
+void BlackfinFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const BlackfinRegisterInfo *RegInfo =
+ static_cast<const BlackfinRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const BlackfinInstrInfo &TII =
+ *static_cast<const BlackfinInstrInfo*>(MF.getTarget().getInstrInfo());
+
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ int FrameSize = MFI->getStackSize();
+ if (FrameSize%4) {
+ FrameSize = (FrameSize+3) & ~3;
+ MFI->setStackSize(FrameSize);
+ }
+
+ if (!hasFP(MF)) {
+ assert(!MFI->adjustsStack() &&
+ "FP elimination on a non-leaf function is not supported");
+ RegInfo->adjustRegister(MBB, MBBI, dl, BF::SP, BF::P1, -FrameSize);
+ return;
+ }
+
+ // emit a LINK instruction
+ if (FrameSize <= 0x3ffff) {
+ BuildMI(MBB, MBBI, dl, TII.get(BF::LINK)).addImm(FrameSize);
+ return;
+ }
+
+ // Frame is too big, do a manual LINK:
+ // [--SP] = RETS;
+ // [--SP] = FP;
+ // FP = SP;
+ // P1 = -FrameSize;
+ // SP = SP + P1;
+ BuildMI(MBB, MBBI, dl, TII.get(BF::PUSH))
+ .addReg(BF::RETS, RegState::Kill);
+ BuildMI(MBB, MBBI, dl, TII.get(BF::PUSH))
+ .addReg(BF::FP, RegState::Kill);
+ BuildMI(MBB, MBBI, dl, TII.get(BF::MOVE), BF::FP)
+ .addReg(BF::SP);
+ RegInfo->loadConstant(MBB, MBBI, dl, BF::P1, -FrameSize);
+ BuildMI(MBB, MBBI, dl, TII.get(BF::ADDpp), BF::SP)
+ .addReg(BF::SP, RegState::Kill)
+ .addReg(BF::P1, RegState::Kill);
+
+}
+
+void BlackfinFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const BlackfinRegisterInfo *RegInfo =
+ static_cast<const BlackfinRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const BlackfinInstrInfo &TII =
+ *static_cast<const BlackfinInstrInfo*>(MF.getTarget().getInstrInfo());
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ DebugLoc dl = MBBI->getDebugLoc();
+
+ int FrameSize = MFI->getStackSize();
+ assert(FrameSize%4 == 0 && "Misaligned frame size");
+
+ if (!hasFP(MF)) {
+ assert(!MFI->adjustsStack() &&
+ "FP elimination on a non-leaf function is not supported");
+ RegInfo->adjustRegister(MBB, MBBI, dl, BF::SP, BF::P1, FrameSize);
+ return;
+ }
+
+ // emit an UNLINK instruction
+ BuildMI(MBB, MBBI, dl, TII.get(BF::UNLINK));
+}
+
+void BlackfinFrameLowering::
+processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const BlackfinRegisterInfo *RegInfo =
+ static_cast<const BlackfinRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const TargetRegisterClass *RC = BF::DPRegisterClass;
+
+ if (RegInfo->requiresRegisterScavenging(MF)) {
+ // Reserve a slot close to SP or frame pointer.
+ RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
+ RC->getAlignment(),
+ false));
+ }
+}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.h b/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.h
new file mode 100644
index 0000000..3d2ee25
--- /dev/null
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.h
@@ -0,0 +1,46 @@
+//=- BlackfinFrameLowering.h - Define frame lowering for Blackfin -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ALPHA_FRAMEINFO_H
+#define ALPHA_FRAMEINFO_H
+
+#include "Blackfin.h"
+#include "BlackfinSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class BlackfinSubtarget;
+
+class BlackfinFrameLowering : public TargetFrameLowering {
+protected:
+ const BlackfinSubtarget &STI;
+
+public:
+ explicit BlackfinFrameLowering(const BlackfinSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0), STI(sti) {
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
index 80ee107..9df2aee 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
@@ -51,8 +51,7 @@ namespace {
private:
SDNode *Select(SDNode *N);
- bool SelectADDRspii(SDNode *Op, SDValue Addr,
- SDValue &Base, SDValue &Offset);
+ bool SelectADDRspii(SDValue Addr, SDValue &Base, SDValue &Offset);
// Walk the DAG after instruction selection, fixing register class issues.
void FixRegisterClasses(SelectionDAG &DAG);
@@ -94,8 +93,7 @@ SDNode *BlackfinDAGToDAGISel::Select(SDNode *N) {
return SelectCode(N);
}
-bool BlackfinDAGToDAGISel::SelectADDRspii(SDNode *Op,
- SDValue Addr,
+bool BlackfinDAGToDAGISel::SelectADDRspii(SDValue Addr,
SDValue &Base,
SDValue &Offset) {
FrameIndexSDNode *FIN = 0;
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp
index 6e828e1..dd27d0a 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp
@@ -15,6 +15,7 @@
#include "BlackfinISelLowering.h"
#include "BlackfinTargetMachine.h"
#include "llvm/Function.h"
+#include "llvm/Type.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -207,7 +208,8 @@ BlackfinTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned ObjSize = VA.getLocVT().getStoreSize();
int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset(), true);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
- InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0,
+ InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
+ MachinePointerInfo(),
false, false, 0));
}
}
@@ -332,8 +334,7 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SDValue OffsetN = DAG.getIntPtrConstant(Offset);
OffsetN = DAG.getNode(ISD::ADD, dl, MVT::i32, SPN, OffsetN);
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, OffsetN,
- PseudoSourceValue::getStack(),
- Offset, false, false, 0));
+ MachinePointerInfo(),false, false, 0));
}
}
@@ -364,7 +365,7 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
std::vector<EVT> NodeTys;
NodeTys.push_back(MVT::Other); // Returns a chain
- NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
+ NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use.
SDValue Ops[] = { Chain, Callee, InFlag };
Chain = DAG.getNode(BFISD::CALL, dl, NodeTys, Ops,
InFlag.getNode() ? 3 : 2);
@@ -431,7 +432,7 @@ SDValue BlackfinTargetLowering::LowerADDE(SDValue Op, SelectionDAG &DAG) const {
SDValue(CarryIn, 0));
// Add operands, produce sum and carry flag
- SDNode *Sum = DAG.getMachineNode(Opcode, dl, MVT::i32, MVT::Flag,
+ SDNode *Sum = DAG.getMachineNode(Opcode, dl, MVT::i32, MVT::Glue,
Op.getOperand(0), Op.getOperand(1));
// Store intermediate carry from Sum
@@ -439,11 +440,11 @@ SDValue BlackfinTargetLowering::LowerADDE(SDValue Op, SelectionDAG &DAG) const {
/* flag= */ SDValue(Sum, 1));
// Add incoming carry, again producing an output flag
- Sum = DAG.getMachineNode(Opcode, dl, MVT::i32, MVT::Flag,
+ Sum = DAG.getMachineNode(Opcode, dl, MVT::i32, MVT::Glue,
SDValue(Sum, 0), SDValue(CarryIn, 0));
// Update AC0 with the intermediate carry, producing a flag.
- SDNode *CarryOut = DAG.getMachineNode(BF::OR_ac0_cc, dl, MVT::Flag,
+ SDNode *CarryOut = DAG.getMachineNode(BF::OR_ac0_cc, dl, MVT::Glue,
SDValue(Carry1, 0));
// Compose (i32, flag) pair
@@ -549,6 +550,52 @@ BlackfinTargetLowering::getConstraintType(const std::string &Constraint) const {
return TargetLowering::getConstraintType(Constraint);
}
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+BlackfinTargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+
+ // Blackfin-specific constraints
+ case 'a':
+ case 'd':
+ case 'z':
+ case 'D':
+ case 'W':
+ case 'e':
+ case 'b':
+ case 'v':
+ case 'f':
+ case 'c':
+ case 't':
+ case 'u':
+ case 'k':
+ case 'x':
+ case 'y':
+ case 'w':
+ return CW_Register;
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'Z':
+ case 'Y':
+ return CW_SpecificReg;
+ }
+ return weight;
+}
+
/// getRegForInlineAsmConstraint - Return register no and class for a C_Register
/// constraint.
std::pair<unsigned, const TargetRegisterClass*> BlackfinTargetLowering::
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h
index 6bebcc3..15a745f 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h
@@ -39,6 +39,12 @@ namespace llvm {
SelectionDAG &DAG) const;
ConstraintType getConstraintType(const std::string &Constraint) const;
+
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
+
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
std::vector<unsigned>
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td
index 8034a7f..5b59d77 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td
@@ -23,17 +23,17 @@ def SDT_BfinCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
SDTCisVT<1, i32> ]>;
def BfinCallseqStart : SDNode<"ISD::CALLSEQ_START", SDT_BfinCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def BfinCallseqEnd : SDNode<"ISD::CALLSEQ_END", SDT_BfinCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def SDT_BfinCall : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def BfinCall : SDNode<"BFISD::CALL", SDT_BfinCall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def BfinRet: SDNode<"BFISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
def BfinWrapper: SDNode<"BFISD::Wrapper", SDTIntUnaryOp>;
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
index a518312..b4a9b84 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
@@ -22,7 +22,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineLocation.h"
#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -50,6 +50,8 @@ BlackfinRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
BitVector
BlackfinRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
using namespace BF;
BitVector Reserved(getNumRegs());
Reserved.set(AZ);
@@ -70,20 +72,11 @@ BlackfinRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(L3);
Reserved.set(SP);
Reserved.set(RETS);
- if (hasFP(MF))
+ if (TFI->hasFP(MF))
Reserved.set(FP);
return Reserved;
}
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-bool BlackfinRegisterInfo::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return DisableFramePointerElim(MF) ||
- MFI->adjustsStack() || MFI->hasVarSizedObjects();
-}
-
bool BlackfinRegisterInfo::
requiresRegisterScavenging(const MachineFunction &MF) const {
return true;
@@ -161,7 +154,9 @@ void BlackfinRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- if (!hasReservedCallFrame(MF)) {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (!TFI->hasReservedCallFrame(MF)) {
int64_t Amount = I->getOperand(0).getImm();
if (Amount != 0) {
assert(Amount%4 == 0 && "Unaligned call frame size");
@@ -196,6 +191,7 @@ BlackfinRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
DebugLoc DL = MI.getDebugLoc();
unsigned FIPos;
@@ -208,7 +204,7 @@ BlackfinRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex)
+ MI.getOperand(FIPos+1).getImm();
unsigned BaseReg = BF::FP;
- if (hasFP(MF)) {
+ if (TFI->hasFP(MF)) {
assert(SPAdj==0 && "Unexpected SP adjust in function with frame pointer");
} else {
BaseReg = BF::SP;
@@ -329,93 +325,15 @@ BlackfinRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
}
-void BlackfinRegisterInfo::
-processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetRegisterClass *RC = BF::DPRegisterClass;
- if (requiresRegisterScavenging(MF)) {
- // Reserve a slot close to SP or frame pointer.
- RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
- }
-}
-
-// Emit a prologue that sets up a stack frame.
-// On function entry, R0-R2 and P0 may hold arguments.
-// R3, P1, and P2 may be used as scratch registers
-void BlackfinRegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- int FrameSize = MFI->getStackSize();
- if (FrameSize%4) {
- FrameSize = (FrameSize+3) & ~3;
- MFI->setStackSize(FrameSize);
- }
-
- if (!hasFP(MF)) {
- assert(!MFI->adjustsStack() &&
- "FP elimination on a non-leaf function is not supported");
- adjustRegister(MBB, MBBI, dl, BF::SP, BF::P1, -FrameSize);
- return;
- }
-
- // emit a LINK instruction
- if (FrameSize <= 0x3ffff) {
- BuildMI(MBB, MBBI, dl, TII.get(BF::LINK)).addImm(FrameSize);
- return;
- }
-
- // Frame is too big, do a manual LINK:
- // [--SP] = RETS;
- // [--SP] = FP;
- // FP = SP;
- // P1 = -FrameSize;
- // SP = SP + P1;
- BuildMI(MBB, MBBI, dl, TII.get(BF::PUSH))
- .addReg(BF::RETS, RegState::Kill);
- BuildMI(MBB, MBBI, dl, TII.get(BF::PUSH))
- .addReg(BF::FP, RegState::Kill);
- BuildMI(MBB, MBBI, dl, TII.get(BF::MOVE), BF::FP)
- .addReg(BF::SP);
- loadConstant(MBB, MBBI, dl, BF::P1, -FrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(BF::ADDpp), BF::SP)
- .addReg(BF::SP, RegState::Kill)
- .addReg(BF::P1, RegState::Kill);
-
-}
-
-void BlackfinRegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- DebugLoc dl = MBBI->getDebugLoc();
-
- int FrameSize = MFI->getStackSize();
- assert(FrameSize%4 == 0 && "Misaligned frame size");
-
- if (!hasFP(MF)) {
- assert(!MFI->adjustsStack() &&
- "FP elimination on a non-leaf function is not supported");
- adjustRegister(MBB, MBBI, dl, BF::SP, BF::P1, FrameSize);
- return;
- }
-
- // emit an UNLINK instruction
- BuildMI(MBB, MBBI, dl, TII.get(BF::UNLINK));
-}
-
unsigned BlackfinRegisterInfo::getRARegister() const {
return BF::RETS;
}
unsigned
BlackfinRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- return hasFP(MF) ? BF::FP : BF::SP;
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ return TFI->hasFP(MF) ? BF::FP : BF::SP;
}
unsigned BlackfinRegisterInfo::getEHExceptionRegister() const {
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h
index bb83c34..642b8ad 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h
@@ -41,8 +41,6 @@ namespace llvm {
return &BF::PRegClass;
}
- bool hasFP(const MachineFunction &MF) const;
-
// bool hasReservedCallFrame(MachineFunction &MF) const;
bool requiresRegisterScavenging(const MachineFunction &MF) const;
@@ -54,12 +52,6 @@ namespace llvm {
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
-
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
unsigned getFrameRegister(const MachineFunction &MF) const;
unsigned getRARegister() const;
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.td b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.td
index e1cfae9..f5dd439 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.td
@@ -252,9 +252,9 @@ def P : RegisterClass<"BF", [i32], 32, [P0, P1, P2, P3, P4, P5, FP, SP]> {
PClass::iterator
PClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
return allocation_order_begin(MF)
- + (RI->hasFP(MF) ? 7 : 6);
+ + (TFI->hasFP(MF) ? 7 : 6);
}
}];
}
@@ -275,9 +275,9 @@ def DP : RegisterClass<"BF", [i32], 32,
DPClass::iterator
DPClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
return allocation_order_begin(MF)
- + (RI->hasFP(MF) ? 15 : 14);
+ + (TFI->hasFP(MF) ? 15 : 14);
}
}];
}
@@ -295,9 +295,9 @@ def GR : RegisterClass<"BF", [i32], 32,
GRClass::iterator
GRClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
return allocation_order_begin(MF)
- + (RI->hasFP(MF) ? 31 : 30);
+ + (TFI->hasFP(MF) ? 31 : 30);
}
}];
}
@@ -318,9 +318,9 @@ def ALL : RegisterClass<"BF", [i32], 32,
ALLClass::iterator
ALLClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
return allocation_order_begin(MF)
- + (RI->hasFP(MF) ? 31 : 30);
+ + (TFI->hasFP(MF) ? 31 : 30);
}
}];
}
@@ -334,9 +334,9 @@ def PI : RegisterClass<"BF", [i32], 32,
PIClass::iterator
PIClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
return allocation_order_begin(MF)
- + (RI->hasFP(MF) ? 11 : 10);
+ + (TFI->hasFP(MF) ? 11 : 10);
}
}];
}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.cpp
index 66a2f68..e11920f 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.cpp
@@ -33,7 +33,7 @@ BlackfinTargetMachine::BlackfinTargetMachine(const Target &T,
TLInfo(*this),
TSInfo(*this),
InstrInfo(Subtarget),
- FrameInfo(TargetFrameInfo::StackGrowsDown, 4, 0) {
+ FrameLowering(Subtarget) {
}
bool BlackfinTargetMachine::addInstSelector(PassManagerBase &PM,
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.h b/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.h
index a63aa54..29b2b17 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.h
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.h
@@ -14,14 +14,15 @@
#ifndef BLACKFINTARGETMACHINE_H
#define BLACKFINTARGETMACHINE_H
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "BlackfinInstrInfo.h"
-#include "BlackfinSubtarget.h"
+#include "BlackfinIntrinsicInfo.h"
#include "BlackfinISelLowering.h"
+#include "BlackfinFrameLowering.h"
+#include "BlackfinSubtarget.h"
#include "BlackfinSelectionDAGInfo.h"
-#include "BlackfinIntrinsicInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
@@ -31,14 +32,16 @@ namespace llvm {
BlackfinTargetLowering TLInfo;
BlackfinSelectionDAGInfo TSInfo;
BlackfinInstrInfo InstrInfo;
- TargetFrameInfo FrameInfo;
+ BlackfinFrameLowering FrameLowering;
BlackfinIntrinsicInfo IntrinsicInfo;
public:
BlackfinTargetMachine(const Target &T, const std::string &TT,
const std::string &FS);
virtual const BlackfinInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
virtual const BlackfinSubtarget *getSubtargetImpl() const {
return &Subtarget;
}
diff --git a/contrib/llvm/lib/Target/CBackend/CBackend.cpp b/contrib/llvm/lib/Target/CBackend/CBackend.cpp
index 270fff6..6c555a3 100644
--- a/contrib/llvm/lib/Target/CBackend/CBackend.cpp
+++ b/contrib/llvm/lib/Target/CBackend/CBackend.cpp
@@ -47,12 +47,16 @@
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/System/Host.h"
+#include "llvm/Support/Host.h"
#include "llvm/Config/config.h"
#include <algorithm>
+// Some ms header decided to define setjmp as _setjmp, undo this for this file.
+#ifdef _MSC_VER
+#undef setjmp
+#endif
using namespace llvm;
-extern "C" void LLVMInitializeCBackendTarget() {
+extern "C" void LLVMInitializeCBackendTarget() {
// Register the target.
RegisterTargetMachine<CTargetMachine> X(TheCBackendTarget);
}
@@ -72,8 +76,10 @@ namespace {
class CBackendNameAllUsedStructsAndMergeFunctions : public ModulePass {
public:
static char ID;
- CBackendNameAllUsedStructsAndMergeFunctions()
- : ModulePass(ID) {}
+ CBackendNameAllUsedStructsAndMergeFunctions()
+ : ModulePass(ID) {
+ initializeFindUsedTypesPass(*PassRegistry::getPassRegistry());
+ }
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<FindUsedTypes>();
}
@@ -110,9 +116,10 @@ namespace {
public:
static char ID;
explicit CWriter(formatted_raw_ostream &o)
- : FunctionPass(ID), Out(o), IL(0), Mang(0), LI(0),
+ : FunctionPass(ID), Out(o), IL(0), Mang(0), LI(0),
TheModule(0), TAsm(0), TCtx(0), TD(0), OpaqueCounter(0),
NextAnonValueNumber(0) {
+ initializeLoopInfoPass(*PassRegistry::getPassRegistry());
FPCounter = 0;
}
@@ -183,7 +190,7 @@ namespace {
Out << ")";
}
}
-
+
void writeOperand(Value *Operand, bool Static = false);
void writeInstComputationInline(Instruction &I);
void writeOperandInternal(Value *Operand, bool Static = false);
@@ -224,7 +231,7 @@ namespace {
return ByValParams.count(A);
return isa<GlobalVariable>(V) || isDirectAlloca(V);
}
-
+
// isInlinableInst - Attempt to inline instructions into their uses to build
// trees as much as possible. To do this, we have to consistently decide
// what is acceptable to inline, so that variable declarations don't get
@@ -233,7 +240,7 @@ namespace {
static bool isInlinableInst(const Instruction &I) {
// Always inline cmp instructions, even if they are shared by multiple
// expressions. GCC generates horrible code if we don't.
- if (isa<CmpInst>(I))
+ if (isa<CmpInst>(I))
return true;
// Must be an expression, must be used exactly once. If it is dead, we
@@ -270,14 +277,14 @@ namespace {
return 0;
return AI;
}
-
+
// isInlineAsm - Check if the instruction is a call to an inline asm chunk
static bool isInlineAsm(const Instruction& I) {
if (const CallInst *CI = dyn_cast<CallInst>(&I))
return isa<InlineAsm>(CI->getCalledValue());
return false;
}
-
+
// Instruction visitation functions
friend class InstVisitor<CWriter>;
@@ -310,7 +317,7 @@ namespace {
void visitStoreInst (StoreInst &I);
void visitGetElementPtrInst(GetElementPtrInst &I);
void visitVAArgInst (VAArgInst &I);
-
+
void visitInsertElementInst(InsertElementInst &I);
void visitExtractElementInst(ExtractElementInst &I);
void visitShuffleVectorInst(ShuffleVectorInst &SVI);
@@ -346,7 +353,7 @@ char CWriter::ID = 0;
static std::string CBEMangle(const std::string &S) {
std::string Result;
-
+
for (unsigned i = 0, e = S.size(); i != e; ++i)
if (isalnum(S[i]) || S[i] == '_') {
Result += S[i];
@@ -375,7 +382,7 @@ bool CBackendNameAllUsedStructsAndMergeFunctions::runOnModule(Module &M) {
for (TypeSymbolTable::iterator TI = TST.begin(), TE = TST.end();
TI != TE; ) {
TypeSymbolTable::iterator I = TI++;
-
+
// If this isn't a struct or array type, remove it from our set of types
// to name. This simplifies emission later.
if (!I->second->isStructTy() && !I->second->isOpaqueTy() &&
@@ -403,8 +410,8 @@ bool CBackendNameAllUsedStructsAndMergeFunctions::runOnModule(Module &M) {
++RenameCounter;
Changed = true;
}
-
-
+
+
// Loop over all external functions and globals. If we have two with
// identical names, merge them.
// FIXME: This code should disappear when we don't allow values with the same
@@ -440,7 +447,7 @@ bool CBackendNameAllUsedStructsAndMergeFunctions::runOnModule(Module &M) {
}
}
}
-
+
return Changed;
}
@@ -479,20 +486,20 @@ void CWriter::printStructReturnPointerFunctionType(raw_ostream &Out,
FunctionInnards << "void";
}
FunctionInnards << ')';
- printType(Out, RetTy,
+ printType(Out, RetTy,
/*isSigned=*/PAL.paramHasAttr(0, Attribute::SExt), FunctionInnards.str());
}
raw_ostream &
CWriter::printSimpleType(raw_ostream &Out, const Type *Ty, bool isSigned,
const std::string &NameSoFar) {
- assert((Ty->isPrimitiveType() || Ty->isIntegerTy() || Ty->isVectorTy()) &&
+ assert((Ty->isPrimitiveType() || Ty->isIntegerTy() || Ty->isVectorTy()) &&
"Invalid type for printSimpleType");
switch (Ty->getTypeID()) {
case Type::VoidTyID: return Out << "void " << NameSoFar;
case Type::IntegerTyID: {
unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth();
- if (NumBits == 1)
+ if (NumBits == 1)
return Out << "bool " << NameSoFar;
else if (NumBits <= 8)
return Out << (isSigned?"signed":"unsigned") << " char " << NameSoFar;
@@ -502,7 +509,7 @@ CWriter::printSimpleType(raw_ostream &Out, const Type *Ty, bool isSigned,
return Out << (isSigned?"signed":"unsigned") << " int " << NameSoFar;
else if (NumBits <= 64)
return Out << (isSigned?"signed":"unsigned") << " long long "<< NameSoFar;
- else {
+ else {
assert(NumBits <= 128 && "Bit widths > 128 not implemented yet");
return Out << (isSigned?"llvmInt128":"llvmUInt128") << " " << NameSoFar;
}
@@ -514,14 +521,18 @@ CWriter::printSimpleType(raw_ostream &Out, const Type *Ty, bool isSigned,
case Type::X86_FP80TyID:
case Type::PPC_FP128TyID:
case Type::FP128TyID: return Out << "long double " << NameSoFar;
-
+
+ case Type::X86_MMXTyID:
+ return printSimpleType(Out, Type::getInt32Ty(Ty->getContext()), isSigned,
+ " __attribute__((vector_size(64))) " + NameSoFar);
+
case Type::VectorTyID: {
const VectorType *VTy = cast<VectorType>(Ty);
return printSimpleType(Out, VTy->getElementType(), isSigned,
" __attribute__((vector_size(" +
utostr(TD->getTypeAllocSize(VTy)) + " ))) " + NameSoFar);
}
-
+
default:
#ifndef NDEBUG
errs() << "Unknown primitive type: " << *Ty << "\n";
@@ -575,7 +586,7 @@ raw_ostream &CWriter::printType(raw_ostream &Out, const Type *Ty,
FunctionInnards << "void";
}
FunctionInnards << ')';
- printType(Out, FTy->getReturnType(),
+ printType(Out, FTy->getReturnType(),
/*isSigned=*/PAL.paramHasAttr(0, Attribute::SExt), FunctionInnards.str());
return Out;
}
@@ -759,7 +770,7 @@ static bool isFPCSafeToPrint(const ConstantFP *CFP) {
}
/// Print out the casting for a cast operation. This does the double casting
-/// necessary for conversion to the destination type, if necessary.
+/// necessary for conversion to the destination type, if necessary.
/// @brief Print a cast
void CWriter::printCast(unsigned opc, const Type *SrcTy, const Type *DstTy) {
// Print the destination type cast
@@ -782,7 +793,7 @@ void CWriter::printCast(unsigned opc, const Type *SrcTy, const Type *DstTy) {
printSimpleType(Out, DstTy, false);
Out << ')';
break;
- case Instruction::SExt:
+ case Instruction::SExt:
case Instruction::FPToSI: // For these, make sure we get a signed dest
Out << '(';
printSimpleType(Out, DstTy, true);
@@ -803,7 +814,7 @@ void CWriter::printCast(unsigned opc, const Type *SrcTy, const Type *DstTy) {
case Instruction::SIToFP:
case Instruction::SExt:
Out << '(';
- printSimpleType(Out, SrcTy, true);
+ printSimpleType(Out, SrcTy, true);
Out << ')';
break;
case Instruction::IntToPtr:
@@ -895,7 +906,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
case Instruction::AShr:
{
Out << '(';
- bool NeedsClosingParens = printConstExprCast(CE, Static);
+ bool NeedsClosingParens = printConstExprCast(CE, Static);
printConstantWithCast(CE->getOperand(0), CE->getOpcode());
switch (CE->getOpcode()) {
case Instruction::Add:
@@ -905,10 +916,10 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
case Instruction::Mul:
case Instruction::FMul: Out << " * "; break;
case Instruction::URem:
- case Instruction::SRem:
+ case Instruction::SRem:
case Instruction::FRem: Out << " % "; break;
- case Instruction::UDiv:
- case Instruction::SDiv:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
case Instruction::FDiv: Out << " / "; break;
case Instruction::And: Out << " & "; break;
case Instruction::Or: Out << " | "; break;
@@ -920,7 +931,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
switch (CE->getPredicate()) {
case ICmpInst::ICMP_EQ: Out << " == "; break;
case ICmpInst::ICMP_NE: Out << " != "; break;
- case ICmpInst::ICMP_SLT:
+ case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_ULT: Out << " < "; break;
case ICmpInst::ICMP_SLE:
case ICmpInst::ICMP_ULE: Out << " <= "; break;
@@ -940,8 +951,8 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
return;
}
case Instruction::FCmp: {
- Out << '(';
- bool NeedsClosingParens = printConstExprCast(CE, Static);
+ Out << '(';
+ bool NeedsClosingParens = printConstExprCast(CE, Static);
if (CE->getPredicate() == FCmpInst::FCMP_FALSE)
Out << "0";
else if (CE->getPredicate() == FCmpInst::FCMP_TRUE)
@@ -1006,18 +1017,18 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
else {
Out << "((";
printSimpleType(Out, Ty, false) << ')';
- if (CI->isMinValue(true))
+ if (CI->isMinValue(true))
Out << CI->getZExtValue() << 'u';
else
Out << CI->getSExtValue();
Out << ')';
}
return;
- }
+ }
switch (CPV->getType()->getTypeID()) {
case Type::FloatTyID:
- case Type::DoubleTyID:
+ case Type::DoubleTyID:
case Type::X86_FP80TyID:
case Type::PPC_FP128TyID:
case Type::FP128TyID: {
@@ -1027,8 +1038,8 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
// Because of FP precision problems we must load from a stack allocated
// value that holds the value in hex.
Out << "(*(" << (FPC->getType() == Type::getFloatTy(CPV->getContext()) ?
- "float" :
- FPC->getType() == Type::getDoubleTy(CPV->getContext()) ?
+ "float" :
+ FPC->getType() == Type::getDoubleTy(CPV->getContext()) ?
"double" :
"long double")
<< "*)&FPConstant" << I->second << ')';
@@ -1047,7 +1058,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
Tmp.convert(APFloat::IEEEdouble, APFloat::rmTowardZero, &LosesInfo);
V = Tmp.convertToDouble();
}
-
+
if (IsNAN(V)) {
// The value is NaN
@@ -1211,10 +1222,10 @@ bool CWriter::printConstExprCast(const ConstantExpr* CE, bool Static) {
// We need to cast integer arithmetic so that it is always performed
// as unsigned, to avoid undefined behavior on overflow.
case Instruction::LShr:
- case Instruction::URem:
+ case Instruction::URem:
case Instruction::UDiv: NeedsExplicitCast = true; break;
case Instruction::AShr:
- case Instruction::SRem:
+ case Instruction::SRem:
case Instruction::SDiv: NeedsExplicitCast = true; TypeIsSigned = true; break;
case Instruction::SExt:
Ty = CE->getType();
@@ -1267,7 +1278,7 @@ void CWriter::printConstantWithCast(Constant* CPV, unsigned Opcode) {
switch (Opcode) {
default:
// for most instructions, it doesn't matter
- break;
+ break;
case Instruction::Add:
case Instruction::Sub:
case Instruction::Mul:
@@ -1294,7 +1305,7 @@ void CWriter::printConstantWithCast(Constant* CPV, unsigned Opcode) {
Out << ")";
printConstant(CPV, false);
Out << ")";
- } else
+ } else
printConstant(CPV, false);
}
@@ -1312,16 +1323,16 @@ std::string CWriter::GetValueName(const Value *Operand) {
Mang->getNameWithPrefix(Str, GV, false);
return CBEMangle(Str.str().str());
}
-
+
std::string Name = Operand->getName();
-
+
if (Name.empty()) { // Assign unique names to local temporaries.
unsigned &No = AnonValueNumbers[Operand];
if (No == 0)
No = ++NextAnonValueNumber;
Name = "tmp__" + utostr(No);
}
-
+
std::string VarName;
VarName.reserve(Name.capacity());
@@ -1348,7 +1359,7 @@ void CWriter::writeInstComputationInline(Instruction &I) {
// Validate this.
const Type *Ty = I.getType();
if (Ty->isIntegerTy() && (Ty!=Type::getInt1Ty(I.getContext()) &&
- Ty!=Type::getInt8Ty(I.getContext()) &&
+ Ty!=Type::getInt8Ty(I.getContext()) &&
Ty!=Type::getInt16Ty(I.getContext()) &&
Ty!=Type::getInt32Ty(I.getContext()) &&
Ty!=Type::getInt64Ty(I.getContext()))) {
@@ -1364,12 +1375,12 @@ void CWriter::writeInstComputationInline(Instruction &I) {
if (I.getType() == Type::getInt1Ty(I.getContext()) &&
!isa<ICmpInst>(I) && !isa<FCmpInst>(I))
NeedBoolTrunc = true;
-
+
if (NeedBoolTrunc)
Out << "((";
-
+
visit(I);
-
+
if (NeedBoolTrunc)
Out << ")&1)";
}
@@ -1404,9 +1415,9 @@ void CWriter::writeOperand(Value *Operand, bool Static) {
Out << ')';
}
-// Some instructions need to have their result value casted back to the
-// original types because their operands were casted to the expected type.
-// This function takes care of detecting that case and printing the cast
+// Some instructions need to have their result value casted back to the
+// original types because their operands were casted to the expected type.
+// This function takes care of detecting that case and printing the cast
// for the Instruction.
bool CWriter::writeInstructionCast(const Instruction &I) {
const Type *Ty = I.getOperand(0)->getType();
@@ -1417,15 +1428,15 @@ bool CWriter::writeInstructionCast(const Instruction &I) {
// We need to cast integer arithmetic so that it is always performed
// as unsigned, to avoid undefined behavior on overflow.
case Instruction::LShr:
- case Instruction::URem:
- case Instruction::UDiv:
+ case Instruction::URem:
+ case Instruction::UDiv:
Out << "((";
printSimpleType(Out, Ty, false);
Out << ")(";
return true;
case Instruction::AShr:
- case Instruction::SRem:
- case Instruction::SDiv:
+ case Instruction::SRem:
+ case Instruction::SDiv:
Out << "((";
printSimpleType(Out, Ty, true);
Out << ")(";
@@ -1437,7 +1448,7 @@ bool CWriter::writeInstructionCast(const Instruction &I) {
// Write the operand with a cast to another type based on the Opcode being used.
// This will be used in cases where an instruction has specific type
-// requirements (usually signedness) for its operands.
+// requirements (usually signedness) for its operands.
void CWriter::writeOperandWithCast(Value* Operand, unsigned Opcode) {
// Extract the operand's type, we'll need it.
@@ -1455,7 +1466,7 @@ void CWriter::writeOperandWithCast(Value* Operand, unsigned Opcode) {
switch (Opcode) {
default:
// for most instructions, it doesn't matter
- break;
+ break;
case Instruction::Add:
case Instruction::Sub:
case Instruction::Mul:
@@ -1484,14 +1495,14 @@ void CWriter::writeOperandWithCast(Value* Operand, unsigned Opcode) {
Out << ")";
writeOperand(Operand);
Out << ")";
- } else
+ } else
writeOperand(Operand);
}
-// Write the operand with a cast to another type based on the icmp predicate
-// being used.
+// Write the operand with a cast to another type based on the icmp predicate
+// being used.
void CWriter::writeOperandWithCast(Value* Operand, const ICmpInst &Cmp) {
- // This has to do a cast to ensure the operand has the right signedness.
+ // This has to do a cast to ensure the operand has the right signedness.
// Also, if the operand is a pointer, we make sure to cast to an integer when
// doing the comparison both for signedness and so that the C compiler doesn't
// optimize things like "p < NULL" to false (p may contain an integer value
@@ -1504,7 +1515,7 @@ void CWriter::writeOperandWithCast(Value* Operand, const ICmpInst &Cmp) {
writeOperand(Operand);
return;
}
-
+
// Should this be a signed comparison? If so, convert to signed.
bool castIsSigned = Cmp.isSigned();
@@ -1512,7 +1523,7 @@ void CWriter::writeOperandWithCast(Value* Operand, const ICmpInst &Cmp) {
const Type* OpTy = Operand->getType();
if (OpTy->isPointerTy())
OpTy = TD->getIntPtrType(Operand->getContext());
-
+
Out << "((";
printSimpleType(Out, OpTy, castIsSigned);
Out << ")";
@@ -1579,7 +1590,7 @@ static void generateCompilerSpecificCode(formatted_raw_ostream& Out,
Out << "#if defined(__GNUC__)\n"
<< "#define __HIDDEN__ __attribute__((visibility(\"hidden\")))\n"
<< "#endif\n\n";
-
+
// Define NaN and Inf as GCC builtins if using GCC, as 0 otherwise
// From the GCC documentation:
//
@@ -1635,7 +1646,7 @@ static void generateCompilerSpecificCode(formatted_raw_ostream& Out,
<< "#define __ATTRIBUTE_DTOR__\n"
<< "#define LLVM_ASM(X)\n"
<< "#endif\n\n";
-
+
Out << "#if __GNUC__ < 4 /* Old GCC's, or compilers not GCC */ \n"
<< "#define __builtin_stack_save() 0 /* not implemented */\n"
<< "#define __builtin_stack_restore(X) /* noop */\n"
@@ -1658,11 +1669,11 @@ static void generateCompilerSpecificCode(formatted_raw_ostream& Out,
static void FindStaticTors(GlobalVariable *GV, std::set<Function*> &StaticTors){
ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
if (!InitList) return;
-
+
for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i))){
if (CS->getNumOperands() != 2) return; // Not array of 2-element structs.
-
+
if (CS->getOperand(1)->isNullValue())
return; // Found a null terminator, exit printing.
Constant *FP = CS->getOperand(1);
@@ -1690,12 +1701,12 @@ static SpecialGlobalClass getGlobalVariableClass(const GlobalVariable *GV) {
else if (GV->getName() == "llvm.global_dtors")
return GlobalDtors;
}
-
+
// Otherwise, if it is other metadata, don't print it. This catches things
// like debug information.
if (GV->getSection() == "llvm.metadata")
return NotPrinted;
-
+
return NotSpecial;
}
@@ -1726,7 +1737,7 @@ static void PrintEscapedString(const std::string &Str, raw_ostream &Out) {
bool CWriter::doInitialization(Module &M) {
FunctionPass::doInitialization(M);
-
+
// Initialize
TheModule = &M;
@@ -1738,13 +1749,13 @@ bool CWriter::doInitialization(Module &M) {
std::string Triple = TheModule->getTargetTriple();
if (Triple.empty())
Triple = llvm::sys::getHostTriple();
-
+
std::string E;
if (const Target *Match = TargetRegistry::lookupTarget(Triple, E))
TAsm = Match->createAsmInfo(Triple);
-#endif
+#endif
TAsm = new CBEMCAsmInfo();
- TCtx = new MCContext(*TAsm);
+ TCtx = new MCContext(*TAsm, NULL);
Mang = new Mangler(*TCtx, *TD);
// Keep track of which functions are static ctors/dtors so they can have
@@ -1762,7 +1773,7 @@ bool CWriter::doInitialization(Module &M) {
break;
}
}
-
+
// get declaration for alloca
Out << "/* Provide Declarations */\n";
Out << "#include <stdarg.h>\n"; // Varargs support
@@ -1819,7 +1830,7 @@ bool CWriter::doInitialization(Module &M) {
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I) {
- if (I->hasExternalLinkage() || I->hasExternalWeakLinkage() ||
+ if (I->hasExternalLinkage() || I->hasExternalWeakLinkage() ||
I->hasCommonLinkage())
Out << "extern ";
else if (I->hasDLLImportLinkage())
@@ -1844,7 +1855,7 @@ bool CWriter::doInitialization(Module &M) {
Out << "double fmod(double, double);\n"; // Support for FP rem
Out << "float fmodf(float, float);\n";
Out << "long double fmodl(long double, long double);\n";
-
+
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
// Don't print declarations for intrinsic functions.
if (!I->isIntrinsic() && I->getName() != "setjmp" &&
@@ -1852,7 +1863,7 @@ bool CWriter::doInitialization(Module &M) {
if (I->hasExternalWeakLinkage())
Out << "extern ";
printFunctionSignature(I, true);
- if (I->hasWeakLinkage() || I->hasLinkOnceLinkage())
+ if (I->hasWeakLinkage() || I->hasLinkOnceLinkage())
Out << " __ATTRIBUTE_WEAK__";
if (I->hasExternalWeakLinkage())
Out << " __EXTERNAL_WEAK__";
@@ -1862,10 +1873,10 @@ bool CWriter::doInitialization(Module &M) {
Out << " __ATTRIBUTE_DTOR__";
if (I->hasHiddenVisibility())
Out << " __HIDDEN__";
-
+
if (I->hasName() && I->getName()[0] == 1)
Out << " LLVM_ASM(\"" << I->getName().substr(1) << "\")";
-
+
Out << ";\n";
}
}
@@ -1889,7 +1900,7 @@ bool CWriter::doInitialization(Module &M) {
if (I->isThreadLocal())
Out << "__thread ";
- printType(Out, I->getType()->getElementType(), false,
+ printType(Out, I->getType()->getElementType(), false,
GetValueName(I));
if (I->hasLinkOnceLinkage())
@@ -1909,7 +1920,7 @@ bool CWriter::doInitialization(Module &M) {
// Output the global variable definitions and contents...
if (!M.global_empty()) {
Out << "\n\n/* Global Variable Definitions and Initialization */\n";
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
+ for (Module::global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I)
if (!I->isDeclaration()) {
// Ignore special globals, such as debug info.
@@ -1927,7 +1938,7 @@ bool CWriter::doInitialization(Module &M) {
if (I->isThreadLocal())
Out << "__thread ";
- printType(Out, I->getType()->getElementType(), false,
+ printType(Out, I->getType()->getElementType(), false,
GetValueName(I));
if (I->hasLinkOnceLinkage())
Out << " __attribute__((common))";
@@ -1938,7 +1949,7 @@ bool CWriter::doInitialization(Module &M) {
if (I->hasHiddenVisibility())
Out << " __HIDDEN__";
-
+
// If the initializer is not null, emit the initializer. If it is null,
// we try to avoid emitting large amounts of zeros. The problem with
// this, however, occurs when the variable has weak linkage. In this
@@ -1972,7 +1983,7 @@ bool CWriter::doInitialization(Module &M) {
if (!M.empty())
Out << "\n\n/* Function Bodies */\n";
- // Emit some helper functions for dealing with FCMP instruction's
+ // Emit some helper functions for dealing with FCMP instruction's
// predicates
Out << "static inline int llvm_fcmp_ord(double X, double Y) { ";
Out << "return X == X && Y == Y; }\n";
@@ -2027,7 +2038,7 @@ void CWriter::printFloatingPointConstants(const Constant *C) {
printFloatingPointConstants(CE->getOperand(i));
return;
}
-
+
// Otherwise, check for a FP constant that we need to print.
const ConstantFP *FPC = dyn_cast<ConstantFP>(C);
if (FPC == 0 ||
@@ -2038,7 +2049,7 @@ void CWriter::printFloatingPointConstants(const Constant *C) {
return;
FPConstantMap[FPC] = FPCounter; // Number the FP constants
-
+
if (FPC->getType() == Type::getDoubleTy(FPC->getContext())) {
double Val = FPC->getValueAPF().convertToDouble();
uint64_t i = FPC->getValueAPF().bitcastToAPInt().getZExtValue();
@@ -2057,7 +2068,7 @@ void CWriter::printFloatingPointConstants(const Constant *C) {
APInt api = FPC->getValueAPF().bitcastToAPInt();
const uint64_t *p = api.getRawData();
Out << "static const ConstantFP80Ty FPConstant" << FPCounter++
- << " = { 0x" << utohexstr(p[0])
+ << " = { 0x" << utohexstr(p[0])
<< "ULL, 0x" << utohexstr((uint16_t)p[1]) << ",{0,0,0}"
<< "}; /* Long double constant */\n";
} else if (FPC->getType() == Type::getPPC_FP128Ty(FPC->getContext()) ||
@@ -2068,7 +2079,7 @@ void CWriter::printFloatingPointConstants(const Constant *C) {
<< " = { 0x"
<< utohexstr(p[0]) << ", 0x" << utohexstr(p[1])
<< "}; /* Long double constant */\n";
-
+
} else {
llvm_unreachable("Unknown float type!");
}
@@ -2140,12 +2151,12 @@ void CWriter::printContainedStructs(const Type *Ty,
// Don't walk through pointers.
if (Ty->isPointerTy() || Ty->isPrimitiveType() || Ty->isIntegerTy())
return;
-
+
// Print all contained types first.
for (Type::subtype_iterator I = Ty->subtype_begin(),
E = Ty->subtype_end(); I != E; ++I)
printContainedStructs(*I, StructPrinted);
-
+
if (Ty->isStructTy() || Ty->isArrayTy()) {
// Check to see if we have already printed this struct.
if (StructPrinted.insert(Ty).second) {
@@ -2160,10 +2171,10 @@ void CWriter::printContainedStructs(const Type *Ty,
void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
/// isStructReturn - Should this function actually return a struct by-value?
bool isStructReturn = F->hasStructRetAttr();
-
+
if (F->hasLocalLinkage()) Out << "static ";
if (F->hasDLLImportLinkage()) Out << "__declspec(dllimport) ";
- if (F->hasDLLExportLinkage()) Out << "__declspec(dllexport) ";
+ if (F->hasDLLExportLinkage()) Out << "__declspec(dllexport) ";
switch (F->getCallingConv()) {
case CallingConv::X86_StdCall:
Out << "__attribute__((stdcall)) ";
@@ -2177,7 +2188,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
default:
break;
}
-
+
// Loop over the arguments, printing them...
const FunctionType *FT = cast<FunctionType>(F->getFunctionType());
const AttrListPtr &PAL = F->getAttributes();
@@ -2193,7 +2204,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
if (!F->arg_empty()) {
Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
unsigned Idx = 1;
-
+
// If this is a struct-return function, don't print the hidden
// struct-return argument.
if (isStructReturn) {
@@ -2201,7 +2212,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
++I;
++Idx;
}
-
+
std::string ArgName;
for (; I != E; ++I) {
if (PrintedArg) FunctionInnards << ", ";
@@ -2225,7 +2236,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
// Loop over the arguments, printing them.
FunctionType::param_iterator I = FT->param_begin(), E = FT->param_end();
unsigned Idx = 1;
-
+
// If this is a struct-return function, don't print the hidden
// struct-return argument.
if (isStructReturn) {
@@ -2233,7 +2244,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
++I;
++Idx;
}
-
+
for (; I != E; ++I) {
if (PrintedArg) FunctionInnards << ", ";
const Type *ArgTy = *I;
@@ -2262,7 +2273,7 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
FunctionInnards << "void"; // ret() -> ret(void) in C.
}
FunctionInnards << ')';
-
+
// Get the return tpe for the function.
const Type *RetTy;
if (!isStructReturn)
@@ -2271,9 +2282,9 @@ void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
// If this is a struct-return function, print the struct-return type.
RetTy = cast<PointerType>(FT->getParamType(0))->getElementType();
}
-
+
// Print out the return type and the signature built above.
- printType(Out, RetTy,
+ printType(Out, RetTy,
/*isSigned=*/PAL.paramHasAttr(0, Attribute::SExt),
FunctionInnards.str());
}
@@ -2293,7 +2304,7 @@ void CWriter::printFunction(Function &F) {
printFunctionSignature(&F, false);
Out << " {\n";
-
+
// If this is a struct return function, handle the result with magic.
if (isStructReturn) {
const Type *StructTy =
@@ -2303,13 +2314,13 @@ void CWriter::printFunction(Function &F) {
Out << "; /* Struct return temporary */\n";
Out << " ";
- printType(Out, F.arg_begin()->getType(), false,
+ printType(Out, F.arg_begin()->getType(), false,
GetValueName(F.arg_begin()));
Out << " = &StructReturn;\n";
}
bool PrintedVar = false;
-
+
// print local variable information for the function
for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ++I) {
if (const AllocaInst *AI = isDirectAlloca(&*I)) {
@@ -2317,7 +2328,7 @@ void CWriter::printFunction(Function &F) {
printType(Out, AI->getAllocatedType(), false, GetValueName(AI));
Out << "; /* Address-exposed local */\n";
PrintedVar = true;
- } else if (I->getType() != Type::getVoidTy(F.getContext()) &&
+ } else if (I->getType() != Type::getVoidTy(F.getContext()) &&
!isInlinableInst(*I)) {
Out << " ";
printType(Out, I->getType(), false, GetValueName(&*I));
@@ -2333,7 +2344,7 @@ void CWriter::printFunction(Function &F) {
}
// We need a temporary for the BitCast to use so it can pluck a value out
// of a union to do the BitCast. This is separate from the need for a
- // variable to hold the result of the BitCast.
+ // variable to hold the result of the BitCast.
if (isFPIntBitCast(*I)) {
Out << " llvmBitCastUnion " << GetValueName(&*I)
<< "__BITCAST_TEMPORARY;\n";
@@ -2421,7 +2432,7 @@ void CWriter::visitReturnInst(ReturnInst &I) {
Out << " return StructReturn;\n";
return;
}
-
+
// Don't output a void return if this is the last basic block in the function
if (I.getNumOperands() == 0 &&
&*--I.getParent()->getParent()->end() == I.getParent() &&
@@ -2578,7 +2589,7 @@ void CWriter::visitBinaryOperator(Instruction &I) {
// We must cast the results of binary operations which might be promoted.
bool needsCast = false;
if ((I.getType() == Type::getInt8Ty(I.getContext())) ||
- (I.getType() == Type::getInt16Ty(I.getContext()))
+ (I.getType() == Type::getInt16Ty(I.getContext()))
|| (I.getType() == Type::getFloatTy(I.getContext()))) {
needsCast = true;
Out << "((";
@@ -2630,7 +2641,7 @@ void CWriter::visitBinaryOperator(Instruction &I) {
case Instruction::SRem:
case Instruction::FRem: Out << " % "; break;
case Instruction::UDiv:
- case Instruction::SDiv:
+ case Instruction::SDiv:
case Instruction::FDiv: Out << " / "; break;
case Instruction::And: Out << " & "; break;
case Instruction::Or: Out << " | "; break;
@@ -2638,7 +2649,7 @@ void CWriter::visitBinaryOperator(Instruction &I) {
case Instruction::Shl : Out << " << "; break;
case Instruction::LShr:
case Instruction::AShr: Out << " >> "; break;
- default:
+ default:
#ifndef NDEBUG
errs() << "Invalid operator type!" << I;
#endif
@@ -2681,7 +2692,7 @@ void CWriter::visitICmpInst(ICmpInst &I) {
case ICmpInst::ICMP_SGT: Out << " > "; break;
default:
#ifndef NDEBUG
- errs() << "Invalid icmp predicate!" << I;
+ errs() << "Invalid icmp predicate!" << I;
#endif
llvm_unreachable(0);
}
@@ -2754,7 +2765,7 @@ void CWriter::visitCastInst(CastInst &I) {
if (isFPIntBitCast(I)) {
Out << '(';
// These int<->float and long<->double casts need to be handled specially
- Out << GetValueName(&I) << "__BITCAST_TEMPORARY."
+ Out << GetValueName(&I) << "__BITCAST_TEMPORARY."
<< getFloatBitCastField(I.getOperand(0)->getType()) << " = ";
writeOperand(I.getOperand(0));
Out << ", " << GetValueName(&I) << "__BITCAST_TEMPORARY."
@@ -2762,7 +2773,7 @@ void CWriter::visitCastInst(CastInst &I) {
Out << ')';
return;
}
-
+
Out << '(';
printCast(I.getOpcode(), SrcTy, DstTy);
@@ -2770,15 +2781,15 @@ void CWriter::visitCastInst(CastInst &I) {
if (SrcTy == Type::getInt1Ty(I.getContext()) &&
I.getOpcode() == Instruction::SExt)
Out << "0-";
-
+
writeOperand(I.getOperand(0));
-
- if (DstTy == Type::getInt1Ty(I.getContext()) &&
+
+ if (DstTy == Type::getInt1Ty(I.getContext()) &&
(I.getOpcode() == Instruction::Trunc ||
I.getOpcode() == Instruction::FPToUI ||
I.getOpcode() == Instruction::FPToSI ||
I.getOpcode() == Instruction::PtrToInt)) {
- // Make sure we really get a trunc to bool by anding the operand with 1
+ // Make sure we really get a trunc to bool by anding the operand with 1
Out << "&1u";
}
Out << ')';
@@ -2835,7 +2846,7 @@ void CWriter::lowerIntrinsics(Function &F) {
#undef GET_GCC_BUILTIN_NAME
// If we handle it, don't lower it.
if (BuiltinName[0]) break;
-
+
// All other intrinsic calls we must lower.
Instruction *Before = 0;
if (CI != &BB->front())
@@ -2858,7 +2869,7 @@ void CWriter::lowerIntrinsics(Function &F) {
break;
}
- // We may have collected some prototypes to emit in the loop above.
+ // We may have collected some prototypes to emit in the loop above.
// Emit them now, before the function that uses them is emitted. But,
// be careful not to emit them twice.
std::vector<Function*>::iterator I = prototypesToGen.begin();
@@ -2898,9 +2909,9 @@ void CWriter::visitCallInst(CallInst &I) {
writeOperandDeref(I.getArgOperand(0));
Out << " = ";
}
-
+
if (I.isTailCall()) Out << " /*tail*/ ";
-
+
if (!WroteCallee) {
// If this is an indirect call to a struct return function, we need to cast
// the pointer. Ditto for indirect calls with byval arguments.
@@ -2924,7 +2935,7 @@ void CWriter::visitCallInst(CallInst &I) {
NeedsCast = true;
Callee = RF;
}
-
+
if (NeedsCast) {
// Ok, just cast the pointer type.
Out << "((";
@@ -2957,14 +2968,14 @@ void CWriter::visitCallInst(CallInst &I) {
++AI;
++ArgNo;
}
-
+
for (; AI != AE; ++AI, ++ArgNo) {
if (PrintedArg) Out << ", ";
if (ArgNo < NumDeclaredParams &&
(*AI)->getType() != FTy->getParamType(ArgNo)) {
Out << '(';
- printType(Out, FTy->getParamType(ArgNo),
+ printType(Out, FTy->getParamType(ArgNo),
/*isSigned=*/PAL.paramHasAttr(ArgNo+1, Attribute::SExt));
Out << ')';
}
@@ -2993,7 +3004,7 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
#include "llvm/Intrinsics.gen"
#undef GET_GCC_BUILTIN_NAME
assert(BuiltinName[0] && "Unknown LLVM intrinsic!");
-
+
Out << BuiltinName;
WroteCallee = true;
return false;
@@ -3003,7 +3014,7 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
return true;
case Intrinsic::vastart:
Out << "0; ";
-
+
Out << "va_start(*(va_list*)";
writeOperand(I.getArgOperand(0));
Out << ", ";
@@ -3081,7 +3092,7 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
case Intrinsic::x86_sse2_cmp_pd:
Out << '(';
printType(Out, I.getType());
- Out << ')';
+ Out << ')';
// Multiple GCC builtins multiplex onto this intrinsic.
switch (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue()) {
default: llvm_unreachable("Invalid llvm.x86.sse.cmp!");
@@ -3102,7 +3113,7 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
Out << 's';
else
Out << 'd';
-
+
Out << "(";
writeOperand(I.getArgOperand(0));
Out << ", ";
@@ -3112,7 +3123,7 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
case Intrinsic::ppc_altivec_lvsl:
Out << '(';
printType(Out, I.getType());
- Out << ')';
+ Out << ')';
Out << "__builtin_altivec_lvsl(0, (void*)";
writeOperand(I.getArgOperand(0));
Out << ")";
@@ -3132,13 +3143,13 @@ std::string CWriter::InterpretASMConstraint(InlineAsm::ConstraintInfo& c) {
std::string Triple = TheModule->getTargetTriple();
if (Triple.empty())
Triple = llvm::sys::getHostTriple();
-
+
std::string E;
if (const Target *Match = TargetRegistry::lookupTarget(Triple, E))
TargetAsm = Match->createAsmInfo(Triple);
else
return c.Codes[0];
-
+
const char *const *table = TargetAsm->getAsmCBE();
// Search the translation table if it exists.
@@ -3164,7 +3175,7 @@ static std::string gccifyAsm(std::string asmstr) {
if (asmstr[i + 1] == '{') {
std::string::size_type a = asmstr.find_first_of(':', i + 1);
std::string::size_type b = asmstr.find_first_of('}', i + 1);
- std::string n = "%" +
+ std::string n = "%" +
asmstr.substr(a + 1, b - a - 1) +
asmstr.substr(i + 2, a - i - 2);
asmstr.replace(i, b - i + 1, n);
@@ -3174,7 +3185,7 @@ static std::string gccifyAsm(std::string asmstr) {
}
else if (asmstr[i] == '%')//grr
{ asmstr.replace(i, 1, "%%"); ++i;}
-
+
return asmstr;
}
@@ -3182,8 +3193,8 @@ static std::string gccifyAsm(std::string asmstr) {
// handle communitivity
void CWriter::visitInlineAsm(CallInst &CI) {
InlineAsm* as = cast<InlineAsm>(CI.getCalledValue());
- std::vector<InlineAsm::ConstraintInfo> Constraints = as->ParseConstraints();
-
+ InlineAsm::ConstraintInfoVector Constraints = as->ParseConstraints();
+
std::vector<std::pair<Value*, int> > ResultVals;
if (CI.getType() == Type::getVoidTy(CI.getContext()))
;
@@ -3193,27 +3204,27 @@ void CWriter::visitInlineAsm(CallInst &CI) {
} else {
ResultVals.push_back(std::make_pair(&CI, -1));
}
-
+
// Fix up the asm string for gcc and emit it.
Out << "__asm__ volatile (\"" << gccifyAsm(as->getAsmString()) << "\"\n";
Out << " :";
unsigned ValueCount = 0;
bool IsFirst = true;
-
+
// Convert over all the output constraints.
- for (std::vector<InlineAsm::ConstraintInfo>::iterator I = Constraints.begin(),
+ for (InlineAsm::ConstraintInfoVector::iterator I = Constraints.begin(),
E = Constraints.end(); I != E; ++I) {
-
+
if (I->Type != InlineAsm::isOutput) {
++ValueCount;
continue; // Ignore non-output constraints.
}
-
+
assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
std::string C = InterpretASMConstraint(*I);
if (C.empty()) continue;
-
+
if (!IsFirst) {
Out << ", ";
IsFirst = false;
@@ -3222,7 +3233,7 @@ void CWriter::visitInlineAsm(CallInst &CI) {
// Unpack the dest.
Value *DestVal;
int DestValNo = -1;
-
+
if (ValueCount < ResultVals.size()) {
DestVal = ResultVals[ValueCount].first;
DestValNo = ResultVals[ValueCount].second;
@@ -3231,38 +3242,38 @@ void CWriter::visitInlineAsm(CallInst &CI) {
if (I->isEarlyClobber)
C = "&"+C;
-
+
Out << "\"=" << C << "\"(" << GetValueName(DestVal);
if (DestValNo != -1)
Out << ".field" << DestValNo; // Multiple retvals.
Out << ")";
++ValueCount;
}
-
-
+
+
// Convert over all the input constraints.
Out << "\n :";
IsFirst = true;
ValueCount = 0;
- for (std::vector<InlineAsm::ConstraintInfo>::iterator I = Constraints.begin(),
+ for (InlineAsm::ConstraintInfoVector::iterator I = Constraints.begin(),
E = Constraints.end(); I != E; ++I) {
if (I->Type != InlineAsm::isInput) {
++ValueCount;
continue; // Ignore non-input constraints.
}
-
+
assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
std::string C = InterpretASMConstraint(*I);
if (C.empty()) continue;
-
+
if (!IsFirst) {
Out << ", ";
IsFirst = false;
}
-
+
assert(ValueCount >= ResultVals.size() && "Input can't refer to result");
Value *SrcVal = CI.getArgOperand(ValueCount-ResultVals.size());
-
+
Out << "\"" << C << "\"(";
if (!I->isIndirect)
writeOperand(SrcVal);
@@ -3270,10 +3281,10 @@ void CWriter::visitInlineAsm(CallInst &CI) {
writeOperandDeref(SrcVal);
Out << ")";
}
-
+
// Convert over the clobber constraints.
IsFirst = true;
- for (std::vector<InlineAsm::ConstraintInfo>::iterator I = Constraints.begin(),
+ for (InlineAsm::ConstraintInfoVector::iterator I = Constraints.begin(),
E = Constraints.end(); I != E; ++I) {
if (I->Type != InlineAsm::isClobber)
continue; // Ignore non-input constraints.
@@ -3281,15 +3292,15 @@ void CWriter::visitInlineAsm(CallInst &CI) {
assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
std::string C = InterpretASMConstraint(*I);
if (C.empty()) continue;
-
+
if (!IsFirst) {
Out << ", ";
IsFirst = false;
}
-
+
Out << '\"' << C << '"';
}
-
+
Out << ")";
}
@@ -3308,13 +3319,13 @@ void CWriter::visitAllocaInst(AllocaInst &I) {
void CWriter::printGEPExpression(Value *Ptr, gep_type_iterator I,
gep_type_iterator E, bool Static) {
-
+
// If there are no indices, just print out the pointer.
if (I == E) {
writeOperand(Ptr);
return;
}
-
+
// Find out if the last index is into a vector. If so, we have to print this
// specially. Since vectors can't have elements of indexable type, only the
// last index could possibly be of a vector element.
@@ -3323,9 +3334,9 @@ void CWriter::printGEPExpression(Value *Ptr, gep_type_iterator I,
for (gep_type_iterator TmpI = I; TmpI != E; ++TmpI)
LastIndexIsVector = dyn_cast<VectorType>(*TmpI);
}
-
+
Out << "(";
-
+
// If the last index is into a vector, we can't print it as &a[i][j] because
// we can't index into a vector with j in GCC. Instead, emit this as
// (((float*)&a[i])+j)
@@ -3334,7 +3345,7 @@ void CWriter::printGEPExpression(Value *Ptr, gep_type_iterator I,
printType(Out, PointerType::getUnqual(LastIndexIsVector->getElementType()));
Out << ")(";
}
-
+
Out << '&';
// If the first index is 0 (very typical) we can do a number of
@@ -3444,7 +3455,7 @@ void CWriter::visitStoreInst(StoreInst &I) {
if (BitMask) {
Out << ") & ";
printConstant(BitMask, false);
- Out << ")";
+ Out << ")";
}
}
@@ -3477,7 +3488,7 @@ void CWriter::visitInsertElementInst(InsertElementInst &I) {
void CWriter::visitExtractElementInst(ExtractElementInst &I) {
// We know that our operand is not inlined.
Out << "((";
- const Type *EltTy =
+ const Type *EltTy =
cast<VectorType>(I.getOperand(0)->getType())->getElementType();
printType(Out, PointerType::getUnqual(EltTy));
Out << ")(&" << GetValueName(I.getOperand(0)) << "))[";
diff --git a/contrib/llvm/lib/Target/CellSPU/SPU.h b/contrib/llvm/lib/Target/CellSPU/SPU.h
index 1f21511..72f8430 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPU.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPU.h
@@ -23,6 +23,7 @@ namespace llvm {
class formatted_raw_ostream;
FunctionPass *createSPUISelDag(SPUTargetMachine &TM);
+ FunctionPass *createSPUNopFillerPass(SPUTargetMachine &tm);
extern Target TheCellSPUTarget;
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td b/contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td
index 069a182..5ef5716 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td
@@ -54,8 +54,8 @@ class I64SETCCNegCond<PatFrag cond, CodeFrag compare>:
// The i64 seteq fragment that does the scalar->vector conversion and
// comparison:
def CEQr64compare:
- CodeFrag<(CGTIv4i32 (GBv4i32 (CEQv4i32 (ORv2i64_i64 R64C:$rA),
- (ORv2i64_i64 R64C:$rB))), 0xb)>;
+ CodeFrag<(CGTIv4i32 (GBv4i32 (CEQv4i32 (COPY_TO_REGCLASS R64C:$rA, VECREG),
+ (COPY_TO_REGCLASS R64C:$rB, VECREG))), 0xb)>;
// The i64 seteq fragment that does the vector comparison
def CEQv2i64compare:
@@ -67,12 +67,14 @@ def CEQv2i64compare:
// v2i64 seteq (equality): the setcc result is v4i32
multiclass CompareEqual64 {
// Plain old comparison, converts back to i32 scalar
- def r64: CodeFrag<(ORi32_v4i32 CEQr64compare.Fragment)>;
- def v2i64: CodeFrag<(ORi32_v4i32 CEQv2i64compare.Fragment)>;
+ def r64: CodeFrag<(i32 (COPY_TO_REGCLASS CEQr64compare.Fragment, R32C))>;
+ def v2i64: CodeFrag<(i32 (COPY_TO_REGCLASS CEQv2i64compare.Fragment, R32C))>;
// SELB mask from FSM:
- def r64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CEQr64compare.Fragment))>;
- def v2i64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CEQv2i64compare.Fragment))>;
+ def r64mask: CodeFrag<(i32 (COPY_TO_REGCLASS
+ (FSMv4i32 CEQr64compare.Fragment), R32C))>;
+ def v2i64mask: CodeFrag<(i32 (COPY_TO_REGCLASS
+ (FSMv4i32 CEQv2i64compare.Fragment), R32C))>;
}
defm I64EQ: CompareEqual64;
@@ -89,10 +91,12 @@ def : I64SELECTNegCond<setne, I64EQr64>;
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
def CLGTr64ugt:
- CodeFrag<(CLGTv4i32 (ORv2i64_i64 R64C:$rA), (ORv2i64_i64 R64C:$rB))>;
+ CodeFrag<(CLGTv4i32 (COPY_TO_REGCLASS R64C:$rA, VECREG),
+ (COPY_TO_REGCLASS R64C:$rB, VECREG))>;
def CLGTr64eq:
- CodeFrag<(CEQv4i32 (ORv2i64_i64 R64C:$rA), (ORv2i64_i64 R64C:$rB))>;
+ CodeFrag<(CEQv4i32 (COPY_TO_REGCLASS R64C:$rA, VECREG),
+ (COPY_TO_REGCLASS R64C:$rB, VECREG))>;
def CLGTr64compare:
CodeFrag<(SELBv2i64 CLGTr64ugt.Fragment,
@@ -112,12 +116,14 @@ def CLGTv2i64compare:
multiclass CompareLogicalGreaterThan64 {
// Plain old comparison, converts back to i32 scalar
- def r64: CodeFrag<(ORi32_v4i32 CLGTr64compare.Fragment)>;
+ def r64: CodeFrag<(i32 (COPY_TO_REGCLASS CLGTr64compare.Fragment, R32C))>;
def v2i64: CodeFrag<CLGTv2i64compare.Fragment>;
// SELB mask from FSM:
- def r64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CLGTr64compare.Fragment))>;
- def v2i64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CLGTv2i64compare.Fragment))>;
+ def r64mask: CodeFrag<(i32 (COPY_TO_REGCLASS
+ (FSMv4i32 CLGTr64compare.Fragment), R32C))>;
+ def v2i64mask: CodeFrag<(i32 (COPY_TO_REGCLASS
+ (FSMv4i32 CLGTv2i64compare.Fragment), R32C))>;
}
defm I64LGT: CompareLogicalGreaterThan64;
@@ -144,12 +150,14 @@ def CLGEv2i64compare:
multiclass CompareLogicalGreaterEqual64 {
// Plain old comparison, converts back to i32 scalar
- def r64: CodeFrag<(ORi32_v4i32 CLGEr64compare.Fragment)>;
+ def r64: CodeFrag<(i32 (COPY_TO_REGCLASS CLGEr64compare.Fragment, R32C))>;
def v2i64: CodeFrag<CLGEv2i64compare.Fragment>;
// SELB mask from FSM:
- def r64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CLGEr64compare.Fragment))>;
- def v2i64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CLGEv2i64compare.Fragment))>;
+ def r64mask: CodeFrag<(i32 (COPY_TO_REGCLASS
+ (FSMv4i32 CLGEr64compare.Fragment), R32C))>;
+ def v2i64mask: CodeFrag<(i32 (COPY_TO_REGCLASS
+ (FSMv4i32 CLGEv2i64compare.Fragment),R32C))>;
}
defm I64LGE: CompareLogicalGreaterEqual64;
@@ -168,10 +176,12 @@ def : I64SELECTNegCond<setult, I64LGEr64>;
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
def CGTr64sgt:
- CodeFrag<(CGTv4i32 (ORv2i64_i64 R64C:$rA), (ORv2i64_i64 R64C:$rB))>;
+ CodeFrag<(CGTv4i32 (COPY_TO_REGCLASS R64C:$rA, VECREG),
+ (COPY_TO_REGCLASS R64C:$rB, VECREG))>;
def CGTr64eq:
- CodeFrag<(CEQv4i32 (ORv2i64_i64 R64C:$rA), (ORv2i64_i64 R64C:$rB))>;
+ CodeFrag<(CEQv4i32 (COPY_TO_REGCLASS R64C:$rA, VECREG),
+ (COPY_TO_REGCLASS R64C:$rB, VECREG))>;
def CGTr64compare:
CodeFrag<(SELBv2i64 CGTr64sgt.Fragment,
@@ -191,12 +201,14 @@ def CGTv2i64compare:
multiclass CompareGreaterThan64 {
// Plain old comparison, converts back to i32 scalar
- def r64: CodeFrag<(ORi32_v4i32 CGTr64compare.Fragment)>;
+ def r64: CodeFrag<(i32 (COPY_TO_REGCLASS CGTr64compare.Fragment, R32C))>;
def v2i64: CodeFrag<CGTv2i64compare.Fragment>;
// SELB mask from FSM:
- def r64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CGTr64compare.Fragment))>;
- def v2i64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CGTv2i64compare.Fragment))>;
+ def r64mask: CodeFrag<(i32 (COPY_TO_REGCLASS
+ (FSMv4i32 CGTr64compare.Fragment), R32C))>;
+ def v2i64mask: CodeFrag<(i32 (COPY_TO_REGCLASS
+ (FSMv4i32 CGTv2i64compare.Fragment), R32C))>;
}
defm I64GT: CompareLogicalGreaterThan64;
@@ -223,12 +235,12 @@ def CGEv2i64compare:
multiclass CompareGreaterEqual64 {
// Plain old comparison, converts back to i32 scalar
- def r64: CodeFrag<(ORi32_v4i32 CGEr64compare.Fragment)>;
+ def r64: CodeFrag<(i32 (COPY_TO_REGCLASS CGEr64compare.Fragment, R32C))>;
def v2i64: CodeFrag<CGEv2i64compare.Fragment>;
// SELB mask from FSM:
- def r64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CGEr64compare.Fragment))>;
- def v2i64mask: CodeFrag<(ORi32_v4i32 (FSMv4i32 CGEv2i64compare.Fragment))>;
+ def r64mask: CodeFrag<(i32 (COPY_TO_REGCLASS (FSMv4i32 CGEr64compare.Fragment),R32C))>;
+ def v2i64mask: CodeFrag<(i32 (COPY_TO_REGCLASS (FSMv4i32 CGEv2i64compare.Fragment),R32C))>;
}
defm I64GE: CompareGreaterEqual64;
@@ -255,9 +267,9 @@ class v2i64_add<dag lhs, dag rhs, dag cg_mask>:
v2i64_add_1<lhs, rhs, v2i64_add_cg<lhs, rhs>.Fragment, cg_mask>;
def : Pat<(SPUadd64 R64C:$rA, R64C:$rB, (v4i32 VECREG:$rCGmask)),
- (ORi64_v2i64 v2i64_add<(ORv2i64_i64 R64C:$rA),
- (ORv2i64_i64 R64C:$rB),
- (v4i32 VECREG:$rCGmask)>.Fragment)>;
+ (COPY_TO_REGCLASS v2i64_add<(COPY_TO_REGCLASS R64C:$rA, VECREG),
+ (COPY_TO_REGCLASS R64C:$rB, VECREG),
+ (v4i32 VECREG:$rCGmask)>.Fragment, R64C)>;
def : Pat<(SPUadd64 (v2i64 VECREG:$rA), (v2i64 VECREG:$rB),
(v4i32 VECREG:$rCGmask)),
@@ -275,11 +287,12 @@ class v2i64_sub<dag lhs, dag rhs, dag bg, dag bg_mask>:
CodeFrag<(SFXv4i32 lhs, rhs, (SHUFBv4i32 bg, bg, bg_mask))>;
def : Pat<(SPUsub64 R64C:$rA, R64C:$rB, (v4i32 VECREG:$rCGmask)),
- (ORi64_v2i64 v2i64_sub<(ORv2i64_i64 R64C:$rA),
- (ORv2i64_i64 R64C:$rB),
- v2i64_sub_bg<(ORv2i64_i64 R64C:$rA),
- (ORv2i64_i64 R64C:$rB)>.Fragment,
- (v4i32 VECREG:$rCGmask)>.Fragment)>;
+ (COPY_TO_REGCLASS
+ v2i64_sub<(COPY_TO_REGCLASS R64C:$rA, VECREG),
+ (COPY_TO_REGCLASS R64C:$rB, VECREG),
+ v2i64_sub_bg<(COPY_TO_REGCLASS R64C:$rA, VECREG),
+ (COPY_TO_REGCLASS R64C:$rB, VECREG)>.Fragment,
+ (v4i32 VECREG:$rCGmask)>.Fragment, R64C)>;
def : Pat<(SPUsub64 (v2i64 VECREG:$rA), (v2i64 VECREG:$rB),
(v4i32 VECREG:$rCGmask)),
@@ -374,9 +387,9 @@ class v2i64_mul<dag rA, dag rB, dag rCGmask>:
rCGmask>;
def : Pat<(SPUmul64 R64C:$rA, R64C:$rB, (v4i32 VECREG:$rCGmask)),
- (ORi64_v2i64 v2i64_mul<(ORv2i64_i64 R64C:$rA),
- (ORv2i64_i64 R64C:$rB),
- (v4i32 VECREG:$rCGmask)>.Fragment)>;
+ (COPY_TO_REGCLASS v2i64_mul<(COPY_TO_REGCLASS R64C:$rA, VECREG),
+ (COPY_TO_REGCLASS R64C:$rB, VECREG),
+ (v4i32 VECREG:$rCGmask)>.Fragment, R64C)>;
def : Pat<(SPUmul64 (v2i64 VECREG:$rA), (v2i64 VECREG:$rB),
(v4i32 VECREG:$rCGmask)),
diff --git a/contrib/llvm/lib/Target/CellSPU/AsmPrinter/SPUAsmPrinter.cpp b/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
index 3e95531..4040461 100644
--- a/contrib/llvm/lib/Target/CellSPU/AsmPrinter/SPUAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
@@ -46,10 +46,6 @@ namespace {
return "STI CBEA SPU Assembly Printer";
}
- SPUTargetMachine &getTM() {
- return static_cast<SPUTargetMachine&>(TM);
- }
-
/// printInstruction - This method is automatically generated by tablegen
/// from the instruction set description.
void printInstruction(const MachineInstr *MI, raw_ostream &OS);
@@ -64,15 +60,6 @@ namespace {
}
void printOp(const MachineOperand &MO, raw_ostream &OS);
- /// printRegister - Print register according to target requirements.
- ///
- void printRegister(const MachineOperand &MO, bool R0AsZero, raw_ostream &O){
- unsigned RegNo = MO.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(RegNo) &&
- "Not physreg??");
- O << getRegisterName(RegNo);
- }
-
void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
if (MO.isReg()) {
@@ -93,17 +80,6 @@ namespace {
void
- printS7ImmOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O)
- {
- int value = MI->getOperand(OpNo).getImm();
- value = (value << (32 - 7)) >> (32 - 7);
-
- assert((value >= -(1 << 8) && value <= (1 << 7) - 1)
- && "Invalid s7 argument");
- O << value;
- }
-
- void
printU7ImmOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O)
{
unsigned int value = MI->getOperand(OpNo).getImm();
@@ -134,12 +110,6 @@ namespace {
}
void
- printU32ImmOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O)
- {
- O << (unsigned)MI->getOperand(OpNo).getImm();
- }
-
- void
printMemRegReg(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
// When used as the base register, r0 reads constant zero rather than
// the value contained in the register. For this reason, the darwin
@@ -221,13 +191,6 @@ namespace {
printOp(MI->getOperand(OpNo), O);
}
- void printHBROperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- // HBR operands are generated in front of branches, hence, the
- // program counter plus the target.
- O << ".+";
- printOp(MI->getOperand(OpNo), O);
- }
-
void printSymbolHi(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
if (MI->getOperand(OpNo).isImm()) {
printS16ImmOperand(MI, OpNo, O);
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.cpp
deleted file mode 100644
index 60d7ba7..0000000
--- a/contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//===-- SPUTargetMachine.cpp - Define TargetMachine for Cell SPU ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Top-level implementation for the Cell SPU target.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SPU.h"
-#include "SPUFrameInfo.h"
-#include "SPURegisterNames.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// SPUFrameInfo:
-//===----------------------------------------------------------------------===//
-
-SPUFrameInfo::SPUFrameInfo(const TargetMachine &tm):
- TargetFrameInfo(TargetFrameInfo::StackGrowsDown, 16, 0),
- TM(tm)
-{
- LR[0].first = SPU::R0;
- LR[0].second = 16;
-}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp
new file mode 100644
index 0000000..432f4a1
--- /dev/null
+++ b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp
@@ -0,0 +1,276 @@
+//===-- SPUTargetMachine.cpp - Define TargetMachine for Cell SPU ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Top-level implementation for the Cell SPU target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SPU.h"
+#include "SPUFrameLowering.h"
+#include "SPURegisterNames.h"
+#include "SPUInstrBuilder.h"
+#include "SPUInstrInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// SPUFrameLowering:
+//===----------------------------------------------------------------------===//
+
+SPUFrameLowering::SPUFrameLowering(const SPUSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 16, 0),
+ Subtarget(sti) {
+ LR[0].first = SPU::R0;
+ LR[0].second = 16;
+}
+
+
+//--------------------------------------------------------------------------
+// hasFP - Return true if the specified function actually has a dedicated frame
+// pointer register. This is true if the function needs a frame pointer and has
+// a non-zero stack size.
+bool SPUFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ return MFI->getStackSize() &&
+ (DisableFramePointerElim(MF) || MFI->hasVarSizedObjects());
+}
+
+
+/// determineFrameLayout - Determine the size of the frame and maximum call
+/// frame size.
+void SPUFrameLowering::determineFrameLayout(MachineFunction &MF) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Get the number of bytes to allocate from the FrameInfo
+ unsigned FrameSize = MFI->getStackSize();
+
+ // Get the alignments provided by the target, and the maximum alignment
+ // (if any) of the fixed frame objects.
+ unsigned TargetAlign = getStackAlignment();
+ unsigned Align = std::max(TargetAlign, MFI->getMaxAlignment());
+ assert(isPowerOf2_32(Align) && "Alignment is not power of 2");
+ unsigned AlignMask = Align - 1;
+
+ // Get the maximum call frame size of all the calls.
+ unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
+
+ // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
+ // that allocations will be aligned.
+ if (MFI->hasVarSizedObjects())
+ maxCallFrameSize = (maxCallFrameSize + AlignMask) & ~AlignMask;
+
+ // Update maximum call frame size.
+ MFI->setMaxCallFrameSize(maxCallFrameSize);
+
+ // Include call frame size in total.
+ FrameSize += maxCallFrameSize;
+
+ // Make sure the frame is aligned.
+ FrameSize = (FrameSize + AlignMask) & ~AlignMask;
+
+ // Update frame info.
+ MFI->setStackSize(FrameSize);
+}
+
+void SPUFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const SPUInstrInfo &TII =
+ *static_cast<const SPUInstrInfo*>(MF.getTarget().getInstrInfo());
+ MachineModuleInfo &MMI = MF.getMMI();
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ // Prepare for debug frame info.
+ bool hasDebugInfo = MMI.hasDebugInfo();
+ MCSymbol *FrameLabel = 0;
+
+ // Move MBBI back to the beginning of the function.
+ MBBI = MBB.begin();
+
+ // Work out frame sizes.
+ determineFrameLayout(MF);
+ int FrameSize = MFI->getStackSize();
+
+ assert((FrameSize & 0xf) == 0
+ && "SPURegisterInfo::emitPrologue: FrameSize not aligned");
+
+ // the "empty" frame size is 16 - just the register scavenger spill slot
+ if (FrameSize > 16 || MFI->adjustsStack()) {
+ FrameSize = -(FrameSize + SPUFrameLowering::minStackSize());
+ if (hasDebugInfo) {
+ // Mark effective beginning of when frame pointer becomes valid.
+ FrameLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::PROLOG_LABEL)).addSym(FrameLabel);
+ }
+
+ // Adjust stack pointer, spilling $lr -> 16($sp) and $sp -> -FrameSize($sp)
+ // for the ABI
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::STQDr32), SPU::R0).addImm(16)
+ .addReg(SPU::R1);
+ if (isInt<10>(FrameSize)) {
+ // Spill $sp to adjusted $sp
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::STQDr32), SPU::R1).addImm(FrameSize)
+ .addReg(SPU::R1);
+ // Adjust $sp by required amout
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::AIr32), SPU::R1).addReg(SPU::R1)
+ .addImm(FrameSize);
+ } else if (isInt<16>(FrameSize)) {
+ // Frame size can be loaded into ILr32n, so temporarily spill $r2 and use
+ // $r2 to adjust $sp:
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::STQDr128), SPU::R2)
+ .addImm(-16)
+ .addReg(SPU::R1);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::ILr32), SPU::R2)
+ .addImm(FrameSize);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::STQXr32), SPU::R1)
+ .addReg(SPU::R2)
+ .addReg(SPU::R1);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::Ar32), SPU::R1)
+ .addReg(SPU::R1)
+ .addReg(SPU::R2);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::SFIr32), SPU::R2)
+ .addReg(SPU::R2)
+ .addImm(16);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::LQXr128), SPU::R2)
+ .addReg(SPU::R2)
+ .addReg(SPU::R1);
+ } else {
+ report_fatal_error("Unhandled frame size: " + Twine(FrameSize));
+ }
+
+ if (hasDebugInfo) {
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+
+ // Show update of SP.
+ MachineLocation SPDst(MachineLocation::VirtualFP);
+ MachineLocation SPSrc(MachineLocation::VirtualFP, -FrameSize);
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+
+ // Add callee saved registers to move list.
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
+ int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
+ unsigned Reg = CSI[I].getReg();
+ if (Reg == SPU::R0) continue;
+ MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
+ MachineLocation CSSrc(Reg);
+ Moves.push_back(MachineMove(FrameLabel, CSDst, CSSrc));
+ }
+
+ // Mark effective beginning of when frame pointer is ready.
+ MCSymbol *ReadyLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::PROLOG_LABEL)).addSym(ReadyLabel);
+
+ MachineLocation FPDst(SPU::R1);
+ MachineLocation FPSrc(MachineLocation::VirtualFP);
+ Moves.push_back(MachineMove(ReadyLabel, FPDst, FPSrc));
+ }
+ } else {
+ // This is a leaf function -- insert a branch hint iff there are
+ // sufficient number instructions in the basic block. Note that
+ // this is just a best guess based on the basic block's size.
+ if (MBB.size() >= (unsigned) SPUFrameLowering::branchHintPenalty()) {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ dl = MBBI->getDebugLoc();
+
+ // Insert terminator label
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::PROLOG_LABEL))
+ .addSym(MMI.getContext().CreateTempSymbol());
+ }
+ }
+}
+
+void SPUFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ const SPUInstrInfo &TII =
+ *static_cast<const SPUInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ int FrameSize = MFI->getStackSize();
+ int LinkSlotOffset = SPUFrameLowering::stackSlotSize();
+ DebugLoc dl = MBBI->getDebugLoc();
+
+ assert(MBBI->getOpcode() == SPU::RET &&
+ "Can only insert epilog into returning blocks");
+ assert((FrameSize & 0xf) == 0 && "FrameSize not aligned");
+
+ // the "empty" frame size is 16 - just the register scavenger spill slot
+ if (FrameSize > 16 || MFI->adjustsStack()) {
+ FrameSize = FrameSize + SPUFrameLowering::minStackSize();
+ if (isInt<10>(FrameSize + LinkSlotOffset)) {
+ // Reload $lr, adjust $sp by required amount
+ // Note: We do this to slightly improve dual issue -- not by much, but it
+ // is an opportunity for dual issue.
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::LQDr128), SPU::R0)
+ .addImm(FrameSize + LinkSlotOffset)
+ .addReg(SPU::R1);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::AIr32), SPU::R1)
+ .addReg(SPU::R1)
+ .addImm(FrameSize);
+ } else if (FrameSize <= (1 << 16) - 1 && FrameSize >= -(1 << 16)) {
+ // Frame size can be loaded into ILr32n, so temporarily spill $r2 and use
+ // $r2 to adjust $sp:
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::STQDr128), SPU::R2)
+ .addImm(16)
+ .addReg(SPU::R1);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::ILr32), SPU::R2)
+ .addImm(FrameSize);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::Ar32), SPU::R1)
+ .addReg(SPU::R1)
+ .addReg(SPU::R2);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::LQDr128), SPU::R0)
+ .addImm(16)
+ .addReg(SPU::R1);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::SFIr32), SPU::R2).
+ addReg(SPU::R2)
+ .addImm(16);
+ BuildMI(MBB, MBBI, dl, TII.get(SPU::LQXr128), SPU::R2)
+ .addReg(SPU::R2)
+ .addReg(SPU::R1);
+ } else {
+ report_fatal_error("Unhandled frame size: " + Twine(FrameSize));
+ }
+ }
+}
+
+void SPUFrameLowering::getInitialFrameState(std::vector<MachineMove> &Moves)
+ const {
+ // Initial state of the frame pointer is R1.
+ MachineLocation Dst(MachineLocation::VirtualFP);
+ MachineLocation Src(SPU::R1, 0);
+ Moves.push_back(MachineMove(0, Dst, Src));
+}
+
+void SPUFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const{
+ // Mark LR and SP unused, since the prolog spills them to stack and
+ // we don't want anyone else to spill them for us.
+ //
+ // Also, unless R2 is really used someday, don't spill it automatically.
+ MF.getRegInfo().setPhysRegUnused(SPU::R0);
+ MF.getRegInfo().setPhysRegUnused(SPU::R1);
+ MF.getRegInfo().setPhysRegUnused(SPU::R2);
+
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetRegisterClass *RC = &SPU::R32CRegClass;
+ RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
+ RC->getAlignment(),
+ false));
+}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.h b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.h
index f511acd..4fee72d 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.h
@@ -1,4 +1,4 @@
-//===-- SPUFrameInfo.h - Top-level interface for Cell SPU Target -*- C++ -*-==//
+//=====-- SPUFrameLowering.h - SPU Frame Lowering stuff -*- C++ -*----========//
//
// The LLVM Compiler Infrastructure
//
@@ -12,19 +12,39 @@
//
//===----------------------------------------------------------------------===//
-#if !defined(SPUFRAMEINFO_H)
+#ifndef SPU_FRAMEINFO_H
+#define SPU_FRAMEINFO_H
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetMachine.h"
#include "SPURegisterInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
namespace llvm {
- class SPUFrameInfo: public TargetFrameInfo {
- const TargetMachine &TM;
+ class SPUSubtarget;
+
+ class SPUFrameLowering: public TargetFrameLowering {
+ const SPUSubtarget &Subtarget;
std::pair<unsigned, int> LR[1];
public:
- SPUFrameInfo(const TargetMachine &tm);
+ SPUFrameLowering(const SPUSubtarget &sti);
+
+ //! Determine the frame's layour
+ void determineFrameLayout(MachineFunction &MF) const;
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ //! Prediate: Target has dedicated frame pointer
+ bool hasFP(const MachineFunction &MF) const;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS = NULL) const;
+
+ //! Perform target-specific stack frame setup.
+ void getInitialFrameState(std::vector<MachineMove> &Moves) const;
//! Return a function's saved spill slots
/*!
@@ -71,5 +91,4 @@ namespace llvm {
};
}
-#define SPUFRAMEINFO_H 1
#endif
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp b/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp
index 9dbab1d..403d7ef 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.cpp
@@ -41,12 +41,14 @@ SPUHazardRecognizer::SPUHazardRecognizer(const TargetInstrInfo &tii) :
///
/// \return NoHazard
ScheduleHazardRecognizer::HazardType
-SPUHazardRecognizer::getHazardType(SUnit *SU)
+SPUHazardRecognizer::getHazardType(SUnit *SU, int Stalls)
{
// Initial thoughts on how to do this, but this code cannot work unless the
// function's prolog and epilog code are also being scheduled so that we can
// accurately determine which pipeline is being scheduled.
#if 0
+ assert(Stalls == 0 && "SPU hazards don't yet support scoreboard lookahead");
+
const SDNode *Node = SU->getNode()->getFlaggedMachineNode();
ScheduleHazardRecognizer::HazardType retval = NoHazard;
bool mustBeOdd = false;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h b/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h
index d0ae2d8..675632c 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUHazardRecognizers.h
@@ -20,7 +20,7 @@
namespace llvm {
class TargetInstrInfo;
-
+
/// SPUHazardRecognizer
class SPUHazardRecognizer : public ScheduleHazardRecognizer
{
@@ -30,7 +30,7 @@ private:
public:
SPUHazardRecognizer(const TargetInstrInfo &TII);
- virtual HazardType getHazardType(SUnit *SU);
+ virtual HazardType getHazardType(SUnit *SU, int Stalls);
virtual void EmitInstruction(SUnit *SU);
virtual void AdvanceCycle();
virtual void EmitNoop();
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
index 2f15984..d226156 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
@@ -15,7 +15,7 @@
#include "SPU.h"
#include "SPUTargetMachine.h"
#include "SPUHazardRecognizers.h"
-#include "SPUFrameInfo.h"
+#include "SPUFrameLowering.h"
#include "SPURegisterNames.h"
#include "SPUTargetMachine.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -111,55 +111,6 @@ namespace {
return false;
}
- //===------------------------------------------------------------------===//
- //! EVT to "useful stuff" mapping structure:
-
- struct valtype_map_s {
- EVT VT;
- unsigned ldresult_ins; /// LDRESULT instruction (0 = undefined)
- bool ldresult_imm; /// LDRESULT instruction requires immediate?
- unsigned lrinst; /// LR instruction
- };
-
- const valtype_map_s valtype_map[] = {
- { MVT::i8, SPU::ORBIr8, true, SPU::LRr8 },
- { MVT::i16, SPU::ORHIr16, true, SPU::LRr16 },
- { MVT::i32, SPU::ORIr32, true, SPU::LRr32 },
- { MVT::i64, SPU::ORr64, false, SPU::LRr64 },
- { MVT::f32, SPU::ORf32, false, SPU::LRf32 },
- { MVT::f64, SPU::ORf64, false, SPU::LRf64 },
- // vector types... (sigh!)
- { MVT::v16i8, 0, false, SPU::LRv16i8 },
- { MVT::v8i16, 0, false, SPU::LRv8i16 },
- { MVT::v4i32, 0, false, SPU::LRv4i32 },
- { MVT::v2i64, 0, false, SPU::LRv2i64 },
- { MVT::v4f32, 0, false, SPU::LRv4f32 },
- { MVT::v2f64, 0, false, SPU::LRv2f64 }
- };
-
- const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
-
- const valtype_map_s *getValueTypeMapEntry(EVT VT)
- {
- const valtype_map_s *retval = 0;
- for (size_t i = 0; i < n_valtype_map; ++i) {
- if (valtype_map[i].VT == VT) {
- retval = valtype_map + i;
- break;
- }
- }
-
-
-#ifndef NDEBUG
- if (retval == 0) {
- report_fatal_error("SPUISelDAGToDAG.cpp: getValueTypeMapEntry returns"
- "NULL for " + Twine(VT.getEVTString()));
- }
-#endif
-
- return retval;
- }
-
//! Generate the carry-generate shuffle mask.
SDValue getCarryGenerateShufMask(SelectionDAG &DAG, DebugLoc dl) {
SmallVector<SDValue, 16 > ShufBytes;
@@ -221,16 +172,10 @@ namespace {
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
- /// getI64Imm - Return a target constant with the specified value, of type
- /// i64.
- inline SDValue getI64Imm(uint64_t Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i64);
- }
-
/// getSmallIPtrImm - Return a target constant of pointer type.
inline SDValue getSmallIPtrImm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, SPUtli.getPointerTy());
- }
+ }
SDNode *emitBuildVector(SDNode *bvNode) {
EVT vecVT = bvNode->getValueType(0);
@@ -268,10 +213,10 @@ namespace {
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
SDValue CGPoolOffset =
SPU::LowerConstantPool(CPIdx, *CurDAG, TM);
-
+
HandleSDNode Dummy(CurDAG->getLoad(vecVT, dl,
CurDAG->getEntryNode(), CGPoolOffset,
- PseudoSourceValue::getConstantPool(),0,
+ MachinePointerInfo::getConstantPool(),
false, false, Alignment));
CurDAG->ReplaceAllUsesWith(SDValue(bvNode, 0), Dummy.getValue());
if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
@@ -356,13 +301,8 @@ namespace {
return "Cell SPU DAG->DAG Pattern Instruction Selection";
}
- /// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
- /// this target when scheduling the DAG.
- virtual ScheduleHazardRecognizer *CreateTargetHazardRecognizer() {
- const TargetInstrInfo *II = TM.getInstrInfo();
- assert(II && "No InstrInfo?");
- return new SPUHazardRecognizer(*II);
- }
+ private:
+ SDValue getRC( MVT );
// Include the pieces autogenerated from the target description.
#include "SPUGenDAGISel.inc"
@@ -450,8 +390,8 @@ bool
SPUDAGToDAGISel::SelectDFormAddr(SDNode *Op, SDValue N, SDValue &Base,
SDValue &Index) {
return DFormAddressPredicate(Op, N, Base, Index,
- SPUFrameInfo::minFrameOffset(),
- SPUFrameInfo::maxFrameOffset());
+ SPUFrameLowering::minFrameOffset(),
+ SPUFrameLowering::maxFrameOffset());
}
bool
@@ -467,7 +407,7 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
int FI = int(FIN->getIndex());
DEBUG(errs() << "SelectDFormAddr: ISD::FrameIndex = "
<< FI << "\n");
- if (SPUFrameInfo::FItoStackOffset(FI) < maxOffset) {
+ if (SPUFrameLowering::FItoStackOffset(FI) < maxOffset) {
Base = CurDAG->getTargetConstant(0, PtrTy);
Index = CurDAG->getTargetFrameIndex(FI, PtrTy);
return true;
@@ -493,7 +433,7 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
DEBUG(errs() << "SelectDFormAddr: ISD::ADD offset = " << offset
<< " frame index = " << FI << "\n");
- if (SPUFrameInfo::FItoStackOffset(FI) < maxOffset) {
+ if (SPUFrameLowering::FItoStackOffset(FI) < maxOffset) {
Base = CurDAG->getTargetConstant(offset, PtrTy);
Index = CurDAG->getTargetFrameIndex(FI, PtrTy);
return true;
@@ -514,7 +454,7 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
DEBUG(errs() << "SelectDFormAddr: ISD::ADD offset = " << offset
<< " frame index = " << FI << "\n");
- if (SPUFrameInfo::FItoStackOffset(FI) < maxOffset) {
+ if (SPUFrameLowering::FItoStackOffset(FI) < maxOffset) {
Base = CurDAG->getTargetConstant(offset, PtrTy);
Index = CurDAG->getTargetFrameIndex(FI, PtrTy);
return true;
@@ -564,8 +504,8 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
Base = CurDAG->getTargetConstant(0, N.getValueType());
Index = N;
return true;
- } else if (Opc == ISD::Register
- ||Opc == ISD::CopyFromReg
+ } else if (Opc == ISD::Register
+ ||Opc == ISD::CopyFromReg
||Opc == ISD::UNDEF
||Opc == ISD::Constant) {
unsigned OpOpc = Op->getOpcode();
@@ -625,6 +565,46 @@ SPUDAGToDAGISel::SelectXFormAddr(SDNode *Op, SDValue N, SDValue &Base,
return false;
}
+/*!
+ Utility function to use with COPY_TO_REGCLASS instructions. Returns a SDValue
+ to be used as the last parameter of a
+CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call
+ \arg VT the value type for which we want a register class
+*/
+SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
+ switch( VT.SimpleTy ) {
+ case MVT::i8:
+ return CurDAG->getTargetConstant(SPU::R8CRegClass.getID(), MVT::i32);
+ break;
+ case MVT::i16:
+ return CurDAG->getTargetConstant(SPU::R16CRegClass.getID(), MVT::i32);
+ break;
+ case MVT::i32:
+ return CurDAG->getTargetConstant(SPU::R32CRegClass.getID(), MVT::i32);
+ break;
+ case MVT::f32:
+ return CurDAG->getTargetConstant(SPU::R32FPRegClass.getID(), MVT::i32);
+ break;
+ case MVT::i64:
+ return CurDAG->getTargetConstant(SPU::R64CRegClass.getID(), MVT::i32);
+ break;
+ case MVT::i128:
+ return CurDAG->getTargetConstant(SPU::GPRCRegClass.getID(), MVT::i32);
+ break;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v4f32:
+ case MVT::v2i64:
+ case MVT::v2f64:
+ return CurDAG->getTargetConstant(SPU::VECREGRegClass.getID(), MVT::i32);
+ break;
+ default:
+ assert( false && "add a new case here" );
+ }
+ return SDValue();
+}
+
//! Convert the operand from a target-independent to a target-specific node
/*!
*/
@@ -632,7 +612,7 @@ SDNode *
SPUDAGToDAGISel::Select(SDNode *N) {
unsigned Opc = N->getOpcode();
int n_ops = -1;
- unsigned NewOpc;
+ unsigned NewOpc = 0;
EVT OpVT = N->getValueType(0);
SDValue Ops[8];
DebugLoc dl = N->getDebugLoc();
@@ -654,7 +634,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
NewOpc = SPU::Ar32;
Ops[0] = CurDAG->getRegister(SPU::R1, N->getValueType(0));
Ops[1] = SDValue(CurDAG->getMachineNode(SPU::ILAr32, dl,
- N->getValueType(0), TFI, Imm0),
+ N->getValueType(0), TFI),
0);
n_ops = 2;
}
@@ -669,7 +649,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
EVT Op0VT = Op0.getValueType();
EVT Op0VecVT = EVT::getVectorVT(*CurDAG->getContext(),
Op0VT, (128 / Op0VT.getSizeInBits()));
- EVT OpVecVT = EVT::getVectorVT(*CurDAG->getContext(),
+ EVT OpVecVT = EVT::getVectorVT(*CurDAG->getContext(),
OpVT, (128 / OpVT.getSizeInBits()));
SDValue shufMask;
@@ -703,19 +683,19 @@ SPUDAGToDAGISel::Select(SDNode *N) {
}
SDNode *shufMaskLoad = emitBuildVector(shufMask.getNode());
-
+
HandleSDNode PromoteScalar(CurDAG->getNode(SPUISD::PREFSLOT2VEC, dl,
Op0VecVT, Op0));
-
+
SDValue PromScalar;
if (SDNode *N = SelectCode(PromoteScalar.getValue().getNode()))
PromScalar = SDValue(N, 0);
else
PromScalar = PromoteScalar.getValue();
-
+
SDValue zextShuffle =
CurDAG->getNode(SPUISD::SHUFB, dl, OpVecVT,
- PromScalar, PromScalar,
+ PromScalar, PromScalar,
SDValue(shufMaskLoad, 0));
HandleSDNode Dummy2(zextShuffle);
@@ -725,7 +705,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
zextShuffle = Dummy2.getValue();
HandleSDNode Dummy(CurDAG->getNode(SPUISD::VEC2PREFSLOT, dl, OpVT,
zextShuffle));
-
+
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
SelectCode(Dummy.getValue().getNode());
return Dummy.getValue().getNode();
@@ -736,7 +716,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
HandleSDNode Dummy(CurDAG->getNode(SPUISD::ADD64_MARKER, dl, OpVT,
N->getOperand(0), N->getOperand(1),
SDValue(CGLoad, 0)));
-
+
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
return N;
@@ -748,7 +728,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
HandleSDNode Dummy(CurDAG->getNode(SPUISD::SUB64_MARKER, dl, OpVT,
N->getOperand(0), N->getOperand(1),
SDValue(CGLoad, 0)));
-
+
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
return N;
@@ -779,8 +759,8 @@ SPUDAGToDAGISel::Select(SDNode *N) {
if (shift_amt >= 32) {
SDNode *hi32 =
- CurDAG->getMachineNode(SPU::ORr32_r64, dl, OpVT,
- Op0.getOperand(0));
+ CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, OpVT,
+ Op0.getOperand(0), getRC(MVT::i32));
shift_amt -= 32;
if (shift_amt > 0) {
@@ -862,23 +842,12 @@ SPUDAGToDAGISel::Select(SDNode *N) {
SDValue Arg = N->getOperand(0);
SDValue Chain = N->getOperand(1);
SDNode *Result;
- const valtype_map_s *vtm = getValueTypeMapEntry(VT);
-
- if (vtm->ldresult_ins == 0) {
- report_fatal_error("LDRESULT for unsupported type: " +
- Twine(VT.getEVTString()));
- }
-
- Opc = vtm->ldresult_ins;
- if (vtm->ldresult_imm) {
- SDValue Zero = CurDAG->getTargetConstant(0, VT);
-
- Result = CurDAG->getMachineNode(Opc, dl, VT, MVT::Other, Arg, Zero, Chain);
- } else {
- Result = CurDAG->getMachineNode(Opc, dl, VT, MVT::Other, Arg, Arg, Chain);
- }
+ Result = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, VT,
+ MVT::Other, Arg,
+ getRC( VT.getSimpleVT()), Chain);
return Result;
+
} else if (Opc == SPUISD::IndirectAddr) {
// Look at the operands: SelectCode() will catch the cases that aren't
// specifically handled here.
@@ -904,10 +873,10 @@ SPUDAGToDAGISel::Select(SDNode *N) {
NewOpc = SPU::AIr32;
Ops[1] = Op1;
} else {
- Ops[1] = SDValue(CurDAG->getMachineNode(SPU::ILr32, dl,
- N->getValueType(0),
+ Ops[1] = SDValue(CurDAG->getMachineNode(SPU::ILr32, dl,
+ N->getValueType(0),
Op1),
- 0);
+ 0);
}
}
Ops[0] = Op0;
@@ -939,7 +908,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
SDNode *
SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) {
SDValue Op0 = N->getOperand(0);
- EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
+ EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
OpVT, (128 / OpVT.getSizeInBits()));
SDValue ShiftAmt = N->getOperand(1);
EVT ShiftAmtVT = ShiftAmt.getValueType();
@@ -947,7 +916,8 @@ SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) {
SDValue SelMaskVal;
DebugLoc dl = N->getDebugLoc();
- VecOp0 = CurDAG->getMachineNode(SPU::ORv2i64_i64, dl, VecVT, Op0);
+ VecOp0 = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, VecVT,
+ Op0, getRC(MVT::v2i64) );
SelMaskVal = CurDAG->getTargetConstant(0xff00ULL, MVT::i16);
SelMask = CurDAG->getMachineNode(SPU::FSMBIv2i64, dl, VecVT, SelMaskVal);
ZeroFill = CurDAG->getMachineNode(SPU::ILv2i64, dl, VecVT,
@@ -991,7 +961,8 @@ SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) {
SDValue(Shift, 0), SDValue(Bits, 0));
}
- return CurDAG->getMachineNode(SPU::ORi64_v2i64, dl, OpVT, SDValue(Shift, 0));
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ OpVT, SDValue(Shift, 0), getRC(MVT::i64));
}
/*!
@@ -1012,7 +983,8 @@ SPUDAGToDAGISel::SelectSRLi64(SDNode *N, EVT OpVT) {
SDNode *VecOp0, *Shift = 0;
DebugLoc dl = N->getDebugLoc();
- VecOp0 = CurDAG->getMachineNode(SPU::ORv2i64_i64, dl, VecVT, Op0);
+ VecOp0 = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, VecVT,
+ Op0, getRC(MVT::v2i64) );
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(ShiftAmt)) {
unsigned bytes = unsigned(CN->getZExtValue()) >> 3;
@@ -1058,7 +1030,8 @@ SPUDAGToDAGISel::SelectSRLi64(SDNode *N, EVT OpVT) {
SDValue(Shift, 0), SDValue(Bits, 0));
}
- return CurDAG->getMachineNode(SPU::ORi64_v2i64, dl, OpVT, SDValue(Shift, 0));
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ OpVT, SDValue(Shift, 0), getRC(MVT::i64));
}
/*!
@@ -1072,21 +1045,23 @@ SPUDAGToDAGISel::SelectSRLi64(SDNode *N, EVT OpVT) {
SDNode *
SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
// Promote Op0 to vector
- EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
+ EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
OpVT, (128 / OpVT.getSizeInBits()));
SDValue ShiftAmt = N->getOperand(1);
EVT ShiftAmtVT = ShiftAmt.getValueType();
DebugLoc dl = N->getDebugLoc();
SDNode *VecOp0 =
- CurDAG->getMachineNode(SPU::ORv2i64_i64, dl, VecVT, N->getOperand(0));
+ CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ VecVT, N->getOperand(0), getRC(MVT::v2i64));
SDValue SignRotAmt = CurDAG->getTargetConstant(31, ShiftAmtVT);
SDNode *SignRot =
CurDAG->getMachineNode(SPU::ROTMAIv2i64_i32, dl, MVT::v2i64,
SDValue(VecOp0, 0), SignRotAmt);
SDNode *UpperHalfSign =
- CurDAG->getMachineNode(SPU::ORi32_v4i32, dl, MVT::i32, SDValue(SignRot, 0));
+ CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ MVT::i32, SDValue(SignRot, 0), getRC(MVT::i32));
SDNode *UpperHalfSignMask =
CurDAG->getMachineNode(SPU::FSM64r32, dl, VecVT, SDValue(UpperHalfSign, 0));
@@ -1133,7 +1108,8 @@ SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
SDValue(Shift, 0), SDValue(NegShift, 0));
}
- return CurDAG->getMachineNode(SPU::ORi64_v2i64, dl, OpVT, SDValue(Shift, 0));
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ OpVT, SDValue(Shift, 0), getRC(MVT::i64));
}
/*!
@@ -1154,20 +1130,21 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
// Here's where it gets interesting, because we have to parse out the
// subtree handed back in i64vec:
- if (i64vec.getOpcode() == ISD::BIT_CONVERT) {
+ if (i64vec.getOpcode() == ISD::BITCAST) {
// The degenerate case where the upper and lower bits in the splat are
// identical:
SDValue Op0 = i64vec.getOperand(0);
ReplaceUses(i64vec, Op0);
- return CurDAG->getMachineNode(SPU::ORi64_v2i64, dl, OpVT,
- SDValue(emitBuildVector(Op0.getNode()), 0));
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, OpVT,
+ SDValue(emitBuildVector(Op0.getNode()), 0),
+ getRC(MVT::i64));
} else if (i64vec.getOpcode() == SPUISD::SHUFB) {
SDValue lhs = i64vec.getOperand(0);
SDValue rhs = i64vec.getOperand(1);
SDValue shufmask = i64vec.getOperand(2);
- if (lhs.getOpcode() == ISD::BIT_CONVERT) {
+ if (lhs.getOpcode() == ISD::BITCAST) {
ReplaceUses(lhs, lhs.getOperand(0));
lhs = lhs.getOperand(0);
}
@@ -1176,7 +1153,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
? lhs.getNode()
: emitBuildVector(lhs.getNode()));
- if (rhs.getOpcode() == ISD::BIT_CONVERT) {
+ if (rhs.getOpcode() == ISD::BITCAST) {
ReplaceUses(rhs, rhs.getOperand(0));
rhs = rhs.getOperand(0);
}
@@ -1185,7 +1162,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
? rhs.getNode()
: emitBuildVector(rhs.getNode()));
- if (shufmask.getOpcode() == ISD::BIT_CONVERT) {
+ if (shufmask.getOpcode() == ISD::BITCAST) {
ReplaceUses(shufmask, shufmask.getOperand(0));
shufmask = shufmask.getOperand(0);
}
@@ -1201,11 +1178,13 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
HandleSDNode Dummy(shufNode);
SDNode *SN = SelectCode(Dummy.getValue().getNode());
if (SN == 0) SN = Dummy.getValue().getNode();
-
- return CurDAG->getMachineNode(SPU::ORi64_v2i64, dl, OpVT, SDValue(SN, 0));
+
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
+ OpVT, SDValue(SN, 0), getRC(MVT::i64));
} else if (i64vec.getOpcode() == ISD::BUILD_VECTOR) {
- return CurDAG->getMachineNode(SPU::ORi64_v2i64, dl, OpVT,
- SDValue(emitBuildVector(i64vec.getNode()), 0));
+ return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, OpVT,
+ SDValue(emitBuildVector(i64vec.getNode()), 0),
+ getRC(MVT::i64));
} else {
report_fatal_error("SPUDAGToDAGISel::SelectI64Constant: Unhandled i64vec"
"condition");
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
index 46f3189..e6511d0 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -1,4 +1,3 @@
-//
//===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
// The LLVM Compiler Infrastructure
//
@@ -14,12 +13,13 @@
#include "SPURegisterNames.h"
#include "SPUISelLowering.h"
#include "SPUTargetMachine.h"
-#include "SPUFrameInfo.h"
+#include "SPUFrameLowering.h"
#include "SPUMachineFunction.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/Intrinsics.h"
#include "llvm/CallingConv.h"
+#include "llvm/Type.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -41,41 +41,12 @@ using namespace llvm;
namespace {
std::map<unsigned, const char *> node_names;
- //! EVT mapping to useful data for Cell SPU
- struct valtype_map_s {
- EVT valtype;
- int prefslot_byte;
- };
-
- const valtype_map_s valtype_map[] = {
- { MVT::i1, 3 },
- { MVT::i8, 3 },
- { MVT::i16, 2 },
- { MVT::i32, 0 },
- { MVT::f32, 0 },
- { MVT::i64, 0 },
- { MVT::f64, 0 },
- { MVT::i128, 0 }
- };
-
- const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
-
- const valtype_map_s *getValueTypeMapEntry(EVT VT) {
- const valtype_map_s *retval = 0;
-
- for (size_t i = 0; i < n_valtype_map; ++i) {
- if (valtype_map[i].valtype == VT) {
- retval = valtype_map + i;
- break;
- }
- }
-
-#ifndef NDEBUG
- if (retval == 0) {
- report_fatal_error("getValueTypeMapEntry returns NULL for " +
- Twine(VT.getEVTString()));
- }
-#endif
+ // Byte offset of the preferred slot (counted from the MSB)
+ int prefslotOffset(EVT VT) {
+ int retval=0;
+ if (VT==MVT::i1) retval=3;
+ if (VT==MVT::i8) retval=3;
+ if (VT==MVT::i16) retval=2;
return retval;
}
@@ -125,8 +96,6 @@ namespace {
SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()),
SPUTM(TM) {
- // Fold away setcc operations if possible.
- setPow2DivIsCheap();
// Use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmp(true);
@@ -376,10 +345,10 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
- setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal);
- setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal);
- setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal);
- setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal);
+ setOperationAction(ISD::BITCAST, MVT::i32, Legal);
+ setOperationAction(ISD::BITCAST, MVT::f32, Legal);
+ setOperationAction(ISD::BITCAST, MVT::i64, Legal);
+ setOperationAction(ISD::BITCAST, MVT::f64, Legal);
// We cannot sextinreg(i1). Expand to shifts.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@@ -439,9 +408,9 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::AND, VT, Legal);
setOperationAction(ISD::OR, VT, Legal);
setOperationAction(ISD::XOR, VT, Legal);
- setOperationAction(ISD::LOAD, VT, Legal);
+ setOperationAction(ISD::LOAD, VT, Custom);
setOperationAction(ISD::SELECT, VT, Legal);
- setOperationAction(ISD::STORE, VT, Legal);
+ setOperationAction(ISD::STORE, VT, Custom);
// These operations need to be expanded:
setOperationAction(ISD::SDIV, VT, Expand);
@@ -502,8 +471,8 @@ SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
- node_names[(unsigned) SPUISD::SHLQUAD_L_BITS] = "SPUISD::SHLQUAD_L_BITS";
- node_names[(unsigned) SPUISD::SHLQUAD_L_BYTES] = "SPUISD::SHLQUAD_L_BYTES";
+ node_names[(unsigned) SPUISD::SHL_BITS] = "SPUISD::SHL_BITS";
+ node_names[(unsigned) SPUISD::SHL_BYTES] = "SPUISD::SHL_BYTES";
node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
@@ -531,10 +500,20 @@ unsigned SPUTargetLowering::getFunctionAlignment(const Function *) const {
//===----------------------------------------------------------------------===//
MVT::SimpleValueType SPUTargetLowering::getSetCCResultType(EVT VT) const {
- // i16 and i32 are valid SETCC result types
- return ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) ?
- VT.getSimpleVT().SimpleTy :
- MVT::i32);
+ // i8, i16 and i32 are valid SETCC result types
+ MVT::SimpleValueType retval;
+
+ switch(VT.getSimpleVT().SimpleTy){
+ case MVT::i1:
+ case MVT::i8:
+ retval = MVT::i8; break;
+ case MVT::i16:
+ retval = MVT::i16; break;
+ case MVT::i32:
+ default:
+ retval = MVT::i32;
+ }
+ return retval;
}
//===----------------------------------------------------------------------===//
@@ -572,113 +551,174 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
EVT OutVT = Op.getValueType();
ISD::LoadExtType ExtType = LN->getExtensionType();
unsigned alignment = LN->getAlignment();
- const valtype_map_s *vtm = getValueTypeMapEntry(InVT);
+ int pso = prefslotOffset(InVT);
DebugLoc dl = Op.getDebugLoc();
-
- switch (LN->getAddressingMode()) {
- case ISD::UNINDEXED: {
- SDValue result;
- SDValue basePtr = LN->getBasePtr();
- SDValue rotate;
-
- if (alignment == 16) {
- ConstantSDNode *CN;
-
- // Special cases for a known aligned load to simplify the base pointer
- // and the rotation amount:
- if (basePtr.getOpcode() == ISD::ADD
- && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
- // Known offset into basePtr
- int64_t offset = CN->getSExtValue();
- int64_t rotamt = int64_t((offset & 0xf) - vtm->prefslot_byte);
-
- if (rotamt < 0)
- rotamt += 16;
-
- rotate = DAG.getConstant(rotamt, MVT::i16);
-
- // Simplify the base pointer for this case:
- basePtr = basePtr.getOperand(0);
- if ((offset & ~0xf) > 0) {
- basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
- basePtr,
- DAG.getConstant((offset & ~0xf), PtrVT));
- }
- } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
- || (basePtr.getOpcode() == SPUISD::IndirectAddr
- && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
- && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
- // Plain aligned a-form address: rotate into preferred slot
- // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
- int64_t rotamt = -vtm->prefslot_byte;
- if (rotamt < 0)
- rotamt += 16;
- rotate = DAG.getConstant(rotamt, MVT::i16);
- } else {
- // Offset the rotate amount by the basePtr and the preferred slot
- // byte offset
- int64_t rotamt = -vtm->prefslot_byte;
- if (rotamt < 0)
- rotamt += 16;
- rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
- basePtr,
- DAG.getConstant(rotamt, PtrVT));
- }
- } else {
- // Unaligned load: must be more pessimistic about addressing modes:
- if (basePtr.getOpcode() == ISD::ADD) {
- MachineFunction &MF = DAG.getMachineFunction();
- MachineRegisterInfo &RegInfo = MF.getRegInfo();
- unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
- SDValue Flag;
-
- SDValue Op0 = basePtr.getOperand(0);
- SDValue Op1 = basePtr.getOperand(1);
-
- if (isa<ConstantSDNode>(Op1)) {
- // Convert the (add <ptr>, <const>) to an indirect address contained
- // in a register. Note that this is done because we need to avoid
- // creating a 0(reg) d-form address due to the SPU's block loads.
- basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
- the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
- basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
- } else {
- // Convert the (add <arg1>, <arg2>) to an indirect address, which
- // will likely be lowered as a reg(reg) x-form address.
- basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
- }
- } else {
+ EVT vecVT = InVT.isVector()? InVT: EVT::getVectorVT(*DAG.getContext(), InVT,
+ (128 / InVT.getSizeInBits()));
+
+ // two sanity checks
+ assert( LN->getAddressingMode() == ISD::UNINDEXED
+ && "we should get only UNINDEXED adresses");
+ // clean aligned loads can be selected as-is
+ if (InVT.getSizeInBits() == 128 && (alignment%16) == 0)
+ return SDValue();
+
+ // Get pointerinfos to the memory chunk(s) that contain the data to load
+ uint64_t mpi_offset = LN->getPointerInfo().Offset;
+ mpi_offset -= mpi_offset%16;
+ MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
+ MachinePointerInfo highMemPtr(LN->getPointerInfo().V, mpi_offset+16);
+
+ SDValue result;
+ SDValue basePtr = LN->getBasePtr();
+ SDValue rotate;
+
+ if ((alignment%16) == 0) {
+ ConstantSDNode *CN;
+
+ // Special cases for a known aligned load to simplify the base pointer
+ // and the rotation amount:
+ if (basePtr.getOpcode() == ISD::ADD
+ && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
+ // Known offset into basePtr
+ int64_t offset = CN->getSExtValue();
+ int64_t rotamt = int64_t((offset & 0xf) - pso);
+
+ if (rotamt < 0)
+ rotamt += 16;
+
+ rotate = DAG.getConstant(rotamt, MVT::i16);
+
+ // Simplify the base pointer for this case:
+ basePtr = basePtr.getOperand(0);
+ if ((offset & ~0xf) > 0) {
basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
basePtr,
- DAG.getConstant(0, PtrVT));
+ DAG.getConstant((offset & ~0xf), PtrVT));
}
-
+ } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
+ || (basePtr.getOpcode() == SPUISD::IndirectAddr
+ && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
+ && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
+ // Plain aligned a-form address: rotate into preferred slot
+ // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
+ int64_t rotamt = -pso;
+ if (rotamt < 0)
+ rotamt += 16;
+ rotate = DAG.getConstant(rotamt, MVT::i16);
+ } else {
// Offset the rotate amount by the basePtr and the preferred slot
// byte offset
+ int64_t rotamt = -pso;
+ if (rotamt < 0)
+ rotamt += 16;
rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
basePtr,
- DAG.getConstant(-vtm->prefslot_byte, PtrVT));
+ DAG.getConstant(rotamt, PtrVT));
}
+ } else {
+ // Unaligned load: must be more pessimistic about addressing modes:
+ if (basePtr.getOpcode() == ISD::ADD) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
+ SDValue Flag;
+
+ SDValue Op0 = basePtr.getOperand(0);
+ SDValue Op1 = basePtr.getOperand(1);
+
+ if (isa<ConstantSDNode>(Op1)) {
+ // Convert the (add <ptr>, <const>) to an indirect address contained
+ // in a register. Note that this is done because we need to avoid
+ // creating a 0(reg) d-form address due to the SPU's block loads.
+ basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
+ the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
+ basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
+ } else {
+ // Convert the (add <arg1>, <arg2>) to an indirect address, which
+ // will likely be lowered as a reg(reg) x-form address.
+ basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
+ }
+ } else {
+ basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
+ basePtr,
+ DAG.getConstant(0, PtrVT));
+ }
+
+ // Offset the rotate amount by the basePtr and the preferred slot
+ // byte offset
+ rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
+ basePtr,
+ DAG.getConstant(-pso, PtrVT));
+ }
- // Re-emit as a v16i8 vector load
- result = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
- LN->getSrcValue(), LN->getSrcValueOffset(),
- LN->isVolatile(), LN->isNonTemporal(), 16);
+ // Do the load as a i128 to allow possible shifting
+ SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
+ lowMemPtr,
+ LN->isVolatile(), LN->isNonTemporal(), 16);
+ // When the size is not greater than alignment we get all data with just
+ // one load
+ if (alignment >= InVT.getSizeInBits()/8) {
// Update the chain
- the_chain = result.getValue(1);
+ the_chain = low.getValue(1);
// Rotate into the preferred slot:
- result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::v16i8,
- result.getValue(0), rotate);
+ result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::i128,
+ low.getValue(0), rotate);
// Convert the loaded v16i8 vector to the appropriate vector type
// specified by the operand:
- EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
+ EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
InVT, (128 / InVT.getSizeInBits()));
result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
- DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result));
+ DAG.getNode(ISD::BITCAST, dl, vecVT, result));
+ }
+ // When alignment is less than the size, we might need (known only at
+ // run-time) two loads
+ // TODO: if the memory address is composed only from constants, we have
+ // extra kowledge, and might avoid the second load
+ else {
+ // storage position offset from lower 16 byte aligned memory chunk
+ SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
+ basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
+ // get a registerfull of ones. (this implementation is a workaround: LLVM
+ // cannot handle 128 bit signed int constants)
+ SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
+ ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
+
+ SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
+ DAG.getNode(ISD::ADD, dl, PtrVT,
+ basePtr,
+ DAG.getConstant(16, PtrVT)),
+ highMemPtr,
+ LN->isVolatile(), LN->isNonTemporal(), 16);
+
+ the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
+ high.getValue(1));
+
+ // Shift the (possible) high part right to compensate the misalignemnt.
+ // if there is no highpart (i.e. value is i64 and offset is 4), this
+ // will zero out the high value.
+ high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
+ DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant( 16, MVT::i32),
+ offset
+ ));
+
+ // Shift the low similarily
+ // TODO: add SPUISD::SHL_BYTES
+ low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
+
+ // Merge the two parts
+ result = DAG.getNode(ISD::BITCAST, dl, vecVT,
+ DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
+
+ if (!InVT.isVector()) {
+ result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT, result );
+ }
+ }
// Handle extending loads by extending the scalar result:
if (ExtType == ISD::SEXTLOAD) {
result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
@@ -702,21 +742,6 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
retops, sizeof(retops) / sizeof(retops[0]));
return result;
- }
- case ISD::PRE_INC:
- case ISD::PRE_DEC:
- case ISD::POST_INC:
- case ISD::POST_DEC:
- case ISD::LAST_INDEXED_MODE:
- {
- report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
- "than UNINDEXED\n" +
- Twine((unsigned)LN->getAddressingMode()));
- /*NOTREACHED*/
- }
- }
-
- return SDValue();
}
/// Custom lower stores for CellSPU
@@ -734,93 +759,103 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
DebugLoc dl = Op.getDebugLoc();
unsigned alignment = SN->getAlignment();
+ SDValue result;
+ EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
+ (128 / StVT.getSizeInBits()));
+ // Get pointerinfos to the memory chunk(s) that contain the data to load
+ uint64_t mpi_offset = SN->getPointerInfo().Offset;
+ mpi_offset -= mpi_offset%16;
+ MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
+ MachinePointerInfo highMemPtr(SN->getPointerInfo().V, mpi_offset+16);
+
+
+ // two sanity checks
+ assert( SN->getAddressingMode() == ISD::UNINDEXED
+ && "we should get only UNINDEXED adresses");
+ // clean aligned loads can be selected as-is
+ if (StVT.getSizeInBits() == 128 && (alignment%16) == 0)
+ return SDValue();
+
+ SDValue alignLoadVec;
+ SDValue basePtr = SN->getBasePtr();
+ SDValue the_chain = SN->getChain();
+ SDValue insertEltOffs;
+
+ if ((alignment%16) == 0) {
+ ConstantSDNode *CN;
+ // Special cases for a known aligned load to simplify the base pointer
+ // and insertion byte:
+ if (basePtr.getOpcode() == ISD::ADD
+ && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
+ // Known offset into basePtr
+ int64_t offset = CN->getSExtValue();
+
+ // Simplify the base pointer for this case:
+ basePtr = basePtr.getOperand(0);
+ insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
+ basePtr,
+ DAG.getConstant((offset & 0xf), PtrVT));
- switch (SN->getAddressingMode()) {
- case ISD::UNINDEXED: {
- // The vector type we really want to load from the 16-byte chunk.
- EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
- VT, (128 / VT.getSizeInBits()));
-
- SDValue alignLoadVec;
- SDValue basePtr = SN->getBasePtr();
- SDValue the_chain = SN->getChain();
- SDValue insertEltOffs;
-
- if (alignment == 16) {
- ConstantSDNode *CN;
- // Special cases for a known aligned load to simplify the base pointer
- // and insertion byte:
- if (basePtr.getOpcode() == ISD::ADD
- && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
- // Known offset into basePtr
- int64_t offset = CN->getSExtValue();
-
- // Simplify the base pointer for this case:
- basePtr = basePtr.getOperand(0);
- insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
- basePtr,
- DAG.getConstant((offset & 0xf), PtrVT));
-
- if ((offset & ~0xf) > 0) {
- basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
- basePtr,
- DAG.getConstant((offset & ~0xf), PtrVT));
- }
- } else {
- // Otherwise, assume it's at byte 0 of basePtr
- insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
- basePtr,
- DAG.getConstant(0, PtrVT));
- basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
- basePtr,
- DAG.getConstant(0, PtrVT));
- }
- } else {
- // Unaligned load: must be more pessimistic about addressing modes:
- if (basePtr.getOpcode() == ISD::ADD) {
- MachineFunction &MF = DAG.getMachineFunction();
- MachineRegisterInfo &RegInfo = MF.getRegInfo();
- unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
- SDValue Flag;
-
- SDValue Op0 = basePtr.getOperand(0);
- SDValue Op1 = basePtr.getOperand(1);
-
- if (isa<ConstantSDNode>(Op1)) {
- // Convert the (add <ptr>, <const>) to an indirect address contained
- // in a register. Note that this is done because we need to avoid
- // creating a 0(reg) d-form address due to the SPU's block loads.
- basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
- the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
- basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
- } else {
- // Convert the (add <arg1>, <arg2>) to an indirect address, which
- // will likely be lowered as a reg(reg) x-form address.
- basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
- }
- } else {
+ if ((offset & ~0xf) > 0) {
basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
basePtr,
- DAG.getConstant(0, PtrVT));
+ DAG.getConstant((offset & ~0xf), PtrVT));
}
-
- // Insertion point is solely determined by basePtr's contents
- insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
+ } else {
+ // Otherwise, assume it's at byte 0 of basePtr
+ insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
+ basePtr,
+ DAG.getConstant(0, PtrVT));
+ basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
basePtr,
DAG.getConstant(0, PtrVT));
}
+ } else {
+ // Unaligned load: must be more pessimistic about addressing modes:
+ if (basePtr.getOpcode() == ISD::ADD) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
+ SDValue Flag;
+
+ SDValue Op0 = basePtr.getOperand(0);
+ SDValue Op1 = basePtr.getOperand(1);
+
+ if (isa<ConstantSDNode>(Op1)) {
+ // Convert the (add <ptr>, <const>) to an indirect address contained
+ // in a register. Note that this is done because we need to avoid
+ // creating a 0(reg) d-form address due to the SPU's block loads.
+ basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
+ the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
+ basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
+ } else {
+ // Convert the (add <arg1>, <arg2>) to an indirect address, which
+ // will likely be lowered as a reg(reg) x-form address.
+ basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
+ }
+ } else {
+ basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
+ basePtr,
+ DAG.getConstant(0, PtrVT));
+ }
- // Load the memory to which to store.
- alignLoadVec = DAG.getLoad(vecVT, dl, the_chain, basePtr,
- SN->getSrcValue(), SN->getSrcValueOffset(),
- SN->isVolatile(), SN->isNonTemporal(), 16);
+ // Insertion point is solely determined by basePtr's contents
+ insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
+ basePtr,
+ DAG.getConstant(0, PtrVT));
+ }
+ // Load the lower part of the memory to which to store.
+ SDValue low = DAG.getLoad(vecVT, dl, the_chain, basePtr,
+ lowMemPtr, SN->isVolatile(), SN->isNonTemporal(), 16);
+
+ // if we don't need to store over the 16 byte boundary, one store suffices
+ if (alignment >= StVT.getSizeInBits()/8) {
// Update the chain
- the_chain = alignLoadVec.getValue(1);
+ the_chain = low.getValue(1);
- LoadSDNode *LN = cast<LoadSDNode>(alignLoadVec);
+ LoadSDNode *LN = cast<LoadSDNode>(low);
SDValue theValue = SN->getValue();
- SDValue result;
if (StVT != VT
&& (theValue.getOpcode() == ISD::AssertZext
@@ -844,48 +879,114 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
insertEltOffs);
- SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
+ SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
theValue);
result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
- vectorizeOp, alignLoadVec,
- DAG.getNode(ISD::BIT_CONVERT, dl,
+ vectorizeOp, low,
+ DAG.getNode(ISD::BITCAST, dl,
MVT::v4i32, insertEltOp));
result = DAG.getStore(the_chain, dl, result, basePtr,
- LN->getSrcValue(), LN->getSrcValueOffset(),
+ lowMemPtr,
LN->isVolatile(), LN->isNonTemporal(),
- LN->getAlignment());
-
-#if 0 && !defined(NDEBUG)
- if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
- const SDValue &currentRoot = DAG.getRoot();
-
- DAG.setRoot(result);
- errs() << "------- CellSPU:LowerStore result:\n";
- DAG.dump();
- errs() << "-------\n";
- DAG.setRoot(currentRoot);
- }
-#endif
-
- return result;
- /*UNREACHED*/
- }
- case ISD::PRE_INC:
- case ISD::PRE_DEC:
- case ISD::POST_INC:
- case ISD::POST_DEC:
- case ISD::LAST_INDEXED_MODE:
- {
- report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
- "than UNINDEXED\n" +
- Twine((unsigned)SN->getAddressingMode()));
- /*NOTREACHED*/
- }
+ 16);
+
+ }
+ // do the store when it might cross the 16 byte memory access boundary.
+ else {
+ // TODO issue a warning if SN->isVolatile()== true? This is likely not
+ // what the user wanted.
+
+ // address offset from nearest lower 16byte alinged address
+ SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
+ SN->getBasePtr(),
+ DAG.getConstant(0xf, MVT::i32));
+ // 16 - offset
+ SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant( 16, MVT::i32),
+ offset);
+ // 16 - sizeof(Value)
+ SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant( 16, MVT::i32),
+ DAG.getConstant( VT.getSizeInBits()/8,
+ MVT::i32));
+ // get a registerfull of ones
+ SDValue ones = DAG.getConstant(-1, MVT::v4i32);
+ ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
+
+ // Create the 128 bit masks that have ones where the data to store is
+ // located.
+ SDValue lowmask, himask;
+ // if the value to store don't fill up the an entire 128 bits, zero
+ // out the last bits of the mask so that only the value we want to store
+ // is masked.
+ // this is e.g. in the case of store i32, align 2
+ if (!VT.isVector()){
+ Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
+ lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
+ lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
+ surplus);
+ Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
+ Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
+
+ }
+ else {
+ lowmask = ones;
+ Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
+ }
+ // this will zero, if there are no data that goes to the high quad
+ himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
+ offset_compl);
+ lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
+ offset);
+
+ // Load in the old data and zero out the parts that will be overwritten with
+ // the new data to store.
+ SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
+ DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
+ DAG.getConstant( 16, PtrVT)),
+ highMemPtr,
+ SN->isVolatile(), SN->isNonTemporal(), 16);
+ the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
+ hi.getValue(1));
+
+ low = DAG.getNode(ISD::AND, dl, MVT::i128,
+ DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
+ DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
+ hi = DAG.getNode(ISD::AND, dl, MVT::i128,
+ DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
+ DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
+
+ // Shift the Value to store into place. rlow contains the parts that go to
+ // the lower memory chunk, rhi has the parts that go to the upper one.
+ SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
+ rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
+ SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
+ offset_compl);
+
+ // Merge the old data and the new data and store the results
+ // Need to convert vectors here to integer as 'OR'ing floats assert
+ rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
+ DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
+ DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
+ rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
+ DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
+ DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
+
+ low = DAG.getStore(the_chain, dl, rlow, basePtr,
+ lowMemPtr,
+ SN->isVolatile(), SN->isNonTemporal(), 16);
+ hi = DAG.getStore(the_chain, dl, rhi,
+ DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
+ DAG.getConstant( 16, PtrVT)),
+ highMemPtr,
+ SN->isVolatile(), SN->isNonTemporal(), 16);
+ result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
+ hi.getValue(0));
}
- return SDValue();
+ return result;
}
//! Generate the address of a constant pool entry.
@@ -993,7 +1094,7 @@ LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
SDValue T = DAG.getConstant(dbits, MVT::i64);
SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
}
return SDValue();
@@ -1013,9 +1114,9 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
MachineRegisterInfo &RegInfo = MF.getRegInfo();
SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
- unsigned ArgOffset = SPUFrameInfo::minStackSize();
+ unsigned ArgOffset = SPUFrameLowering::minStackSize();
unsigned ArgRegIdx = 0;
- unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
+ unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
@@ -1080,7 +1181,8 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
// or we're forced to do vararg
int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0, false, false, 0);
+ ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
+ false, false, 0);
ArgOffset += StackSlotSize;
}
@@ -1091,8 +1193,8 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
// vararg handling:
if (isVarArg) {
- // FIXME: we should be able to query the argument registers from
- // tablegen generated code.
+ // FIXME: we should be able to query the argument registers from
+ // tablegen generated code.
static const unsigned ArgRegs[] = {
SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
@@ -1117,9 +1219,9 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
FuncInfo->setVarArgsFrameIndex(
MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
- unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::R32CRegClass);
+ unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::R32CRegClass, dl);
SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
- SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, NULL, 0,
+ SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, MachinePointerInfo(),
false, false, 0);
Chain = Store.getOperand(0);
MemOps.push_back(Store);
@@ -1163,14 +1265,14 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
unsigned NumOps = Outs.size();
- unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
+ unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
- *DAG.getContext());
+ *DAG.getContext());
// FIXME: allow for other calling conventions
CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
-
+
const unsigned NumArgRegs = ArgLocs.size();
@@ -1184,7 +1286,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Figure out which arguments are going to go in registers, and which in
// memory.
- unsigned ArgOffset = SPUFrameInfo::minStackSize(); // Just below [LR]
+ unsigned ArgOffset = SPUFrameLowering::minStackSize(); // Just below [LR]
unsigned ArgRegIdx = 0;
// Keep track of registers passing arguments
@@ -1219,7 +1321,8 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (ArgRegIdx != NumArgRegs) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(),
false, false, 0));
ArgOffset += StackSlotSize;
}
@@ -1230,7 +1333,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Accumulate how many bytes are to be pushed on the stack, including the
// linkage area, and parameter passing area. According to the SPU ABI,
// we minimally need space for [LR] and [SP].
- unsigned NumStackBytes = ArgOffset - SPUFrameInfo::minStackSize();
+ unsigned NumStackBytes = ArgOffset - SPUFrameLowering::minStackSize();
// Insert a call sequence start
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
@@ -1311,7 +1414,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (InFlag.getNode())
Ops.push_back(InFlag);
// Returns a chain and a flag for retval copy to use.
- Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
+ Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Glue),
&Ops[0], Ops.size());
InFlag = Chain.getValue(1);
@@ -1334,7 +1437,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// If the call has results, copy the values out of the ret val registers.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign VA = RVLocs[i];
-
+
SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
InFlag);
Chain = Val.getValue(1);
@@ -1567,7 +1670,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
&& "LowerBUILD_VECTOR: Unexpected floating point vector element.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDValue T = DAG.getConstant(Value32, MVT::i32);
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
break;
}
@@ -1577,7 +1680,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
&& "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDValue T = DAG.getConstant(f64val, MVT::i64);
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
break;
}
@@ -1587,7 +1690,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
SmallVector<SDValue, 8> Ops;
Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
+ return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
}
case MVT::v8i16: {
@@ -1621,7 +1724,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
if (upper == lower) {
// Magic constant that can be matched by IL, ILA, et. al.
SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
- return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
+ return DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
Val, Val, Val, Val));
} else {
@@ -1650,7 +1753,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
// Create lower vector if not a special pattern
if (!lower_special) {
SDValue LO32C = DAG.getConstant(lower, MVT::i32);
- LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
+ LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
LO32C, LO32C, LO32C, LO32C));
}
@@ -1658,7 +1761,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
// Create upper vector if not a special pattern
if (!upper_special) {
SDValue HI32C = DAG.getConstant(upper, MVT::i32);
- HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
+ HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
HI32C, HI32C, HI32C, HI32C));
}
@@ -1735,14 +1838,14 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
unsigned CurrElt = 0;
unsigned MaxElts = VecVT.getVectorNumElements();
unsigned PrevElt = 0;
- unsigned V0Elt = 0;
bool monotonic = true;
bool rotate = true;
+ int rotamt=0;
EVT maskVT; // which of the c?d instructions to use
if (EltVT == MVT::i8) {
V2EltIdx0 = 16;
- maskVT = MVT::v16i8;
+ maskVT = MVT::v16i8;
} else if (EltVT == MVT::i16) {
V2EltIdx0 = 8;
maskVT = MVT::v8i16;
@@ -1758,7 +1861,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
for (unsigned i = 0; i != MaxElts; ++i) {
if (SVN->getMaskElt(i) < 0)
continue;
-
+
unsigned SrcElt = SVN->getMaskElt(i);
if (monotonic) {
@@ -1782,13 +1885,12 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if ((PrevElt == SrcElt - 1)
|| (PrevElt == MaxElts - 1 && SrcElt == 0)) {
PrevElt = SrcElt;
- if (SrcElt == 0)
- V0Elt = i;
} else {
rotate = false;
}
- } else if (i == 0) {
- // First time through, need to keep track of previous element
+ } else if (i == 0 || (PrevElt==0 && SrcElt==1)) {
+ // First time or after a "wrap around"
+ rotamt = SrcElt-i;
PrevElt = SrcElt;
} else {
// This isn't a rotation, takes elements from vector 2
@@ -1806,15 +1908,16 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
DAG.getRegister(SPU::R1, PtrVT),
DAG.getConstant(V2EltOffset, MVT::i32));
- SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
+ SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
maskVT, Pointer);
// Use shuffle mask in SHUFB synthetic instruction:
return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
ShufMaskOp);
} else if (rotate) {
- int rotamt = (MaxElts - V0Elt) * EltVT.getSizeInBits()/8;
-
+ if (rotamt < 0)
+ rotamt +=MaxElts;
+ rotamt *= EltVT.getSizeInBits()/8;
return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
V1, DAG.getConstant(rotamt, MVT::i16));
} else {
@@ -1999,7 +2102,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(scaleShift, MVT::i32));
}
- vecShift = DAG.getNode(SPUISD::SHLQUAD_L_BYTES, dl, VecVT, N, Elt);
+ vecShift = DAG.getNode(SPUISD::SHL_BYTES, dl, VecVT, N, Elt);
// Replicate the bytes starting at byte 0 across the entire vector (for
// consistency with the notion of a unified register set)
@@ -2069,7 +2172,7 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
DAG.getRegister(SPU::R1, PtrVT),
DAG.getConstant(Offset, PtrVT));
// widen the mask when dealing with half vectors
- EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
+ EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
128/ VT.getVectorElementType().getSizeInBits());
SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
@@ -2077,7 +2180,7 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(SPUISD::SHUFB, dl, VT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
VecOp,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask));
+ DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
return result;
}
@@ -2197,12 +2300,12 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
ConstVec = Op.getOperand(0);
Arg = Op.getOperand(1);
if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
- if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
+ if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
ConstVec = ConstVec.getOperand(0);
} else {
ConstVec = Op.getOperand(1);
Arg = Op.getOperand(0);
- if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
+ if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
ConstVec = ConstVec.getOperand(0);
}
}
@@ -2243,7 +2346,7 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
*/
static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
- EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
+ EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
VT, (128 / VT.getSizeInBits()));
DebugLoc dl = Op.getDebugLoc();
@@ -2419,7 +2522,7 @@ static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
// Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
// selected to a NOP:
- SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs);
+ SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
SDValue lhsHi32 =
DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
DAG.getNode(ISD::SRL, dl, IntVT,
@@ -2453,7 +2556,7 @@ static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
ISD::SETGT));
}
- SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs);
+ SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
SDValue rhsHi32 =
DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
DAG.getNode(ISD::SRL, dl, IntVT,
@@ -2567,7 +2670,7 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
// Type to truncate to
EVT VT = Op.getValueType();
MVT simpleVT = VT.getSimpleVT();
- EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
+ EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
VT, (128 / VT.getSizeInBits()));
DebugLoc dl = Op.getDebugLoc();
@@ -2575,7 +2678,7 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
SDValue Op0 = Op.getOperand(0);
EVT Op0VT = Op0.getValueType();
- if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) {
+ if (Op0VT == MVT::i128 && simpleVT == MVT::i64) {
// Create shuffle mask, least significant doubleword of quadword
unsigned maskHigh = 0x08090a0b;
unsigned maskLow = 0x0c0d0e0f;
@@ -2616,6 +2719,12 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
SDValue Op0 = Op.getOperand(0);
MVT Op0VT = Op0.getValueType().getSimpleVT();
+ // extend i8 & i16 via i32
+ if (Op0VT == MVT::i8 || Op0VT == MVT::i16) {
+ Op0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Op0);
+ Op0VT = MVT::i32;
+ }
+
// The type to extend to needs to be a i128 and
// the type to extend from needs to be i64 or i32.
assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
@@ -2640,12 +2749,17 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
DAG.getConstant(31, MVT::i32));
+ // reinterpret as a i128 (SHUFB requires it). This gets lowered away.
+ SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
+ dl, Op0VT, Op0,
+ DAG.getTargetConstant(
+ SPU::GPRCRegClass.getID(),
+ MVT::i32)), 0);
// Shuffle bytes - Copy the sign bits into the upper 64 bits
// and the input value into the lower 64 bits.
SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
- DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i128, Op0), sraVal, shufMask);
-
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, extShuffle);
+ extended, sraVal, shufMask);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
}
//! Custom (target-specific) lowering entry point
@@ -2903,8 +3017,8 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
}
break;
}
- case SPUISD::SHLQUAD_L_BITS:
- case SPUISD::SHLQUAD_L_BYTES:
+ case SPUISD::SHL_BITS:
+ case SPUISD::SHL_BYTES:
case SPUISD::ROTBYTES_LEFT: {
SDValue Op1 = N->getOperand(1);
@@ -2982,6 +3096,38 @@ SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const
return TargetLowering::getConstraintType(ConstraintLetter);
}
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+SPUTargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+ //FIXME: Seems like the supported constraint letters were just copied
+ // from PPC, as the following doesn't correspond to the GCC docs.
+ // I'm leaving it so until someone adds the corresponding lowering support.
+ case 'b':
+ case 'r':
+ case 'f':
+ case 'd':
+ case 'v':
+ case 'y':
+ weight = CW_Register;
+ break;
+ }
+ return weight;
+}
+
std::pair<unsigned, const TargetRegisterClass*>
SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const
@@ -3086,3 +3232,28 @@ SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// The SPU target isn't yet aware of offsets.
return false;
}
+
+// can we compare to Imm without writing it into a register?
+bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
+ //ceqi, cgti, etc. all take s10 operand
+ return isInt<10>(Imm);
+}
+
+bool
+SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ const Type * ) const{
+
+ // A-form: 18bit absolute address.
+ if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
+ return true;
+
+ // D-form: reg + 14bit offset
+ if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
+ return true;
+
+ // X-form: reg+reg
+ if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 1 && AM.BaseOffs ==0)
+ return true;
+
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
index 6d3c90b..95d44af 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
@@ -41,8 +41,9 @@ namespace llvm {
CNTB, ///< Count leading ones in bytes
PREFSLOT2VEC, ///< Promote scalar->vector
VEC2PREFSLOT, ///< Extract element 0
- SHLQUAD_L_BITS, ///< Rotate quad left, by bits
- SHLQUAD_L_BYTES, ///< Rotate quad left, by bytes
+ SHL_BITS, ///< Shift quad left, by bits
+ SHL_BYTES, ///< Shift quad left, by bytes
+ SRL_BYTES, ///< Shift quad right, by bytes. Insert zeros.
VEC_ROTL, ///< Vector rotate left
VEC_ROTR, ///< Vector rotate right
ROTBYTES_LEFT, ///< Rotate bytes (loads -> ROTQBYI)
@@ -129,6 +130,11 @@ namespace llvm {
ConstraintType getConstraintType(const std::string &ConstraintLetter) const;
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
+
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
@@ -170,6 +176,19 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
+
+ virtual bool isLegalICmpImmediate(int64_t Imm) const;
+
+ virtual bool isLegalAddressingMode(const AddrMode &AM,
+ const Type *Ty) const;
+
+ /// After allocating this many registers, the allocator should feel
+ /// register pressure. The value is a somewhat random guess, based on the
+ /// number of non callee saved registers in the C calling convention.
+ virtual unsigned getRegPressureLimit( const TargetRegisterClass *RC,
+ MachineFunction &MF) const{
+ return 50;
+ }
};
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
index 26d6b4f..f9e6c72 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
@@ -16,6 +16,7 @@
#include "SPUInstrBuilder.h"
#include "SPUTargetMachine.h"
#include "SPUGenInstrInfo.inc"
+#include "SPUHazardRecognizers.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -54,6 +55,16 @@ SPUInstrInfo::SPUInstrInfo(SPUTargetMachine &tm)
RI(*TM.getSubtargetImpl(), *this)
{ /* NOP */ }
+/// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
+/// this target when scheduling the DAG.
+ScheduleHazardRecognizer *SPUInstrInfo::CreateTargetHazardRecognizer(
+ const TargetMachine *TM,
+ const ScheduleDAG *DAG) const {
+ const TargetInstrInfo *TII = TM->getInstrInfo();
+ assert(TII && "No InstrInfo?");
+ return new SPUHazardRecognizer(*TII);
+}
+
unsigned
SPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
@@ -129,7 +140,7 @@ SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI) const
{
unsigned opc;
- bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
+ bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
if (RC == SPU::GPRCRegisterClass) {
opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
} else if (RC == SPU::R64CRegisterClass) {
@@ -164,7 +175,7 @@ SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI) const
{
unsigned opc;
- bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
+ bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
if (RC == SPU::GPRCRegisterClass) {
opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
} else if (RC == SPU::R64CRegisterClass) {
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h
index 191e55d..e5e9148 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h
@@ -32,6 +32,10 @@ namespace llvm {
///
virtual const SPURegisterInfo &getRegisterInfo() const { return RI; }
+ ScheduleHazardRecognizer *
+ CreateTargetHazardRecognizer(const TargetMachine *TM,
+ const ScheduleDAG *DAG) const;
+
unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
unsigned isStoreToStackSlot(const MachineInstr *MI,
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td
index ca0fe00..25f6fd0 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.td
@@ -416,7 +416,7 @@ multiclass ImmLoadAddress
def lo: ILARegInst<R32C, symbolLo, imm18>;
def lsa: ILAInst<(outs R32C:$rT), (ins symbolLSA:$val),
- [/* no pattern */]>;
+ [(set R32C:$rT, imm18:$val)]>;
}
defm ILA : ImmLoadAddress;
@@ -1167,10 +1167,10 @@ class XSHWRegInst<RegisterClass rclass>:
[(set rclass:$rDest, (sext R16C:$rSrc))]>;
multiclass ExtendHalfwordWord {
- def v4i32: XSHWVecInst<v4i32, v8i16>;
-
+ def v4i32: XSHWVecInst<v8i16, v4i32>;
+
def r16: XSHWRegInst<R32C>;
-
+
def r32: XSHWInRegInst<R32C,
[(set R32C:$rDest, (sext_inreg R32C:$rSrc, i16))]>;
def r64: XSHWInRegInst<R64C, [/* no pattern */]>;
@@ -1385,59 +1385,6 @@ class ORRegInst<RegisterClass rclass>:
ORInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB),
[(set rclass:$rT, (or rclass:$rA, rclass:$rB))]>;
-// ORCvtForm: OR conversion form
-//
-// This is used to "convert" the preferred slot to its vector equivalent, as
-// well as convert a vector back to its preferred slot.
-//
-// These are effectively no-ops, but need to exist for proper type conversion
-// and type coercion.
-
-class ORCvtForm<dag OOL, dag IOL, list<dag> pattern = [/* no pattern */]>
- : SPUInstr<OOL, IOL, "or\t$rT, $rA, $rA", IntegerOp> {
- bits<7> RA;
- bits<7> RT;
-
- let Pattern = pattern;
-
- let Inst{0-10} = 0b10000010000;
- let Inst{11-17} = RA;
- let Inst{18-24} = RA;
- let Inst{25-31} = RT;
-}
-
-class ORPromoteScalar<RegisterClass rclass>:
- ORCvtForm<(outs VECREG:$rT), (ins rclass:$rA)>;
-
-class ORExtractElt<RegisterClass rclass>:
- ORCvtForm<(outs rclass:$rT), (ins VECREG:$rA)>;
-
-/* class ORCvtRegGPRC<RegisterClass rclass>:
- ORCvtForm<(outs GPRC:$rT), (ins rclass:$rA)>; */
-
-/* class ORCvtGPRCReg<RegisterClass rclass>:
- ORCvtForm<(outs rclass:$rT), (ins GPRC:$rA)>; */
-
-class ORCvtFormR32Reg<RegisterClass rclass, list<dag> pattern = [ ]>:
- ORCvtForm<(outs rclass:$rT), (ins R32C:$rA), pattern>;
-
-class ORCvtFormRegR32<RegisterClass rclass, list<dag> pattern = [ ]>:
- ORCvtForm<(outs R32C:$rT), (ins rclass:$rA), pattern>;
-
-class ORCvtFormR64Reg<RegisterClass rclass, list<dag> pattern = [ ]>:
- ORCvtForm<(outs rclass:$rT), (ins R64C:$rA), pattern>;
-
-class ORCvtFormRegR64<RegisterClass rclass, list<dag> pattern = [ ]>:
- ORCvtForm<(outs R64C:$rT), (ins rclass:$rA), pattern>;
-
-class ORCvtGPRCVec:
- ORCvtForm<(outs VECREG:$rT), (ins GPRC:$rA)>;
-
-class ORCvtVecGPRC:
- ORCvtForm<(outs GPRC:$rT), (ins VECREG:$rA)>;
-
-class ORCvtVecVec:
- ORCvtForm<(outs VECREG:$rT), (ins VECREG:$rA)>;
multiclass BitwiseOr
{
@@ -1468,119 +1415,48 @@ multiclass BitwiseOr
def f64: ORInst<(outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
[/* no pattern */]>;
-
- // scalar->vector promotion, prefslot2vec:
- def v16i8_i8: ORPromoteScalar<R8C>;
- def v8i16_i16: ORPromoteScalar<R16C>;
- def v4i32_i32: ORPromoteScalar<R32C>;
- def v2i64_i64: ORPromoteScalar<R64C>;
- def v4f32_f32: ORPromoteScalar<R32FP>;
- def v2f64_f64: ORPromoteScalar<R64FP>;
-
- // vector->scalar demotion, vec2prefslot:
- def i8_v16i8: ORExtractElt<R8C>;
- def i16_v8i16: ORExtractElt<R16C>;
- def i32_v4i32: ORExtractElt<R32C>;
- def i64_v2i64: ORExtractElt<R64C>;
- def f32_v4f32: ORExtractElt<R32FP>;
- def f64_v2f64: ORExtractElt<R64FP>;
-
- // Conversion from vector to GPRC
- def i128_vec: ORCvtVecGPRC;
-
- // Conversion from GPRC to vector
- def vec_i128: ORCvtGPRCVec;
-
-/*
- // Conversion from register to GPRC
- def i128_r64: ORCvtRegGPRC<R64C>;
- def i128_f64: ORCvtRegGPRC<R64FP>;
- def i128_r32: ORCvtRegGPRC<R32C>;
- def i128_f32: ORCvtRegGPRC<R32FP>;
- def i128_r16: ORCvtRegGPRC<R16C>;
- def i128_r8: ORCvtRegGPRC<R8C>;
-
- // Conversion from GPRC to register
- def r64_i128: ORCvtGPRCReg<R64C>;
- def f64_i128: ORCvtGPRCReg<R64FP>;
- def r32_i128: ORCvtGPRCReg<R32C>;
- def f32_i128: ORCvtGPRCReg<R32FP>;
- def r16_i128: ORCvtGPRCReg<R16C>;
- def r8_i128: ORCvtGPRCReg<R8C>;
-*/
-/*
- // Conversion from register to R32C:
- def r32_r16: ORCvtFormRegR32<R16C>;
- def r32_r8: ORCvtFormRegR32<R8C>;
-
- // Conversion from R32C to register
- def r32_r16: ORCvtFormR32Reg<R16C>;
- def r32_r8: ORCvtFormR32Reg<R8C>;
-*/
-
- // Conversion from R64C to register:
- def r32_r64: ORCvtFormR64Reg<R32C>;
- // def r16_r64: ORCvtFormR64Reg<R16C>;
- // def r8_r64: ORCvtFormR64Reg<R8C>;
-
- // Conversion to R64C from register:
- def r64_r32: ORCvtFormRegR64<R32C>;
- // def r64_r16: ORCvtFormRegR64<R16C>;
- // def r64_r8: ORCvtFormRegR64<R8C>;
-
- // bitconvert patterns:
- def r32_f32: ORCvtFormR32Reg<R32FP,
- [(set R32FP:$rT, (bitconvert R32C:$rA))]>;
- def f32_r32: ORCvtFormRegR32<R32FP,
- [(set R32C:$rT, (bitconvert R32FP:$rA))]>;
-
- def r64_f64: ORCvtFormR64Reg<R64FP,
- [(set R64FP:$rT, (bitconvert R64C:$rA))]>;
- def f64_r64: ORCvtFormRegR64<R64FP,
- [(set R64C:$rT, (bitconvert R64FP:$rA))]>;
}
defm OR : BitwiseOr;
-// scalar->vector promotion patterns (preferred slot to vector):
+//===----------------------------------------------------------------------===//
+// SPU::PREFSLOT2VEC and VEC2PREFSLOT re-interpretations of registers
+//===----------------------------------------------------------------------===//
def : Pat<(v16i8 (SPUprefslot2vec R8C:$rA)),
- (ORv16i8_i8 R8C:$rA)>;
+ (COPY_TO_REGCLASS R8C:$rA, VECREG)>;
def : Pat<(v8i16 (SPUprefslot2vec R16C:$rA)),
- (ORv8i16_i16 R16C:$rA)>;
+ (COPY_TO_REGCLASS R16C:$rA, VECREG)>;
def : Pat<(v4i32 (SPUprefslot2vec R32C:$rA)),
- (ORv4i32_i32 R32C:$rA)>;
+ (COPY_TO_REGCLASS R32C:$rA, VECREG)>;
def : Pat<(v2i64 (SPUprefslot2vec R64C:$rA)),
- (ORv2i64_i64 R64C:$rA)>;
+ (COPY_TO_REGCLASS R64C:$rA, VECREG)>;
def : Pat<(v4f32 (SPUprefslot2vec R32FP:$rA)),
- (ORv4f32_f32 R32FP:$rA)>;
+ (COPY_TO_REGCLASS R32FP:$rA, VECREG)>;
def : Pat<(v2f64 (SPUprefslot2vec R64FP:$rA)),
- (ORv2f64_f64 R64FP:$rA)>;
-
-// ORi*_v*: Used to extract vector element 0 (the preferred slot), otherwise
-// known as converting the vector back to its preferred slot
-
-def : Pat<(SPUvec2prefslot (v16i8 VECREG:$rA)),
- (ORi8_v16i8 VECREG:$rA)>;
+ (COPY_TO_REGCLASS R64FP:$rA, VECREG)>;
+
+def : Pat<(i8 (SPUvec2prefslot (v16i8 VECREG:$rA))),
+ (COPY_TO_REGCLASS (v16i8 VECREG:$rA), R8C)>;
-def : Pat<(SPUvec2prefslot (v8i16 VECREG:$rA)),
- (ORi16_v8i16 VECREG:$rA)>;
+def : Pat<(i16 (SPUvec2prefslot (v8i16 VECREG:$rA))),
+ (COPY_TO_REGCLASS (v8i16 VECREG:$rA), R16C)>;
-def : Pat<(SPUvec2prefslot (v4i32 VECREG:$rA)),
- (ORi32_v4i32 VECREG:$rA)>;
+def : Pat<(i32 (SPUvec2prefslot (v4i32 VECREG:$rA))),
+ (COPY_TO_REGCLASS (v4i32 VECREG:$rA), R32C)>;
-def : Pat<(SPUvec2prefslot (v2i64 VECREG:$rA)),
- (ORi64_v2i64 VECREG:$rA)>;
+def : Pat<(i64 (SPUvec2prefslot (v2i64 VECREG:$rA))),
+ (COPY_TO_REGCLASS (v2i64 VECREG:$rA), R64C)>;
-def : Pat<(SPUvec2prefslot (v4f32 VECREG:$rA)),
- (ORf32_v4f32 VECREG:$rA)>;
+def : Pat<(f32 (SPUvec2prefslot (v4f32 VECREG:$rA))),
+ (COPY_TO_REGCLASS (v4f32 VECREG:$rA), R32FP)>;
-def : Pat<(SPUvec2prefslot (v2f64 VECREG:$rA)),
- (ORf64_v2f64 VECREG:$rA)>;
+def : Pat<(f64 (SPUvec2prefslot (v2f64 VECREG:$rA))),
+ (COPY_TO_REGCLASS (v2f64 VECREG:$rA), R64FP)>;
// Load Register: This is an assembler alias for a bitwise OR of a register
// against itself. It's here because it brings some clarity to assembly
@@ -2093,7 +1969,7 @@ defm EQV: BitEquivalence;
class SHUFBInst<dag OOL, dag IOL, list<dag> pattern>:
RRRForm<0b1000, OOL, IOL, "shufb\t$rT, $rA, $rB, $rC",
- IntegerOp, pattern>;
+ ShuffleOp, pattern>;
class SHUFBVecInst<ValueType resultvec, ValueType maskvec>:
SHUFBInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
@@ -2134,7 +2010,7 @@ defm SHUFB : ShuffleBytes;
class SHLHInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b11111010000, OOL, IOL, "shlh\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
class SHLHVecInst<ValueType vectype>:
SHLHInst<(outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB),
@@ -2156,7 +2032,7 @@ defm SHLH : ShiftLeftHalfword;
class SHLHIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b11111010000, OOL, IOL, "shlhi\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
class SHLHIVecInst<ValueType vectype>:
SHLHIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
@@ -2182,7 +2058,7 @@ def : Pat<(shl R16C:$rA, (i32 uimm7:$val)),
class SHLInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b11111010000, OOL, IOL, "shl\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
multiclass ShiftLeftWord
{
@@ -2201,7 +2077,7 @@ defm SHL: ShiftLeftWord;
class SHLIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b11111010000, OOL, IOL, "shli\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
multiclass ShiftLeftWordImm
{
@@ -2230,7 +2106,7 @@ defm SHLI : ShiftLeftWordImm;
class SHLQBIInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b11011011100, OOL, IOL, "shlqbi\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class SHLQBIVecInst<ValueType vectype>:
SHLQBIInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2259,7 +2135,7 @@ defm SHLQBI : ShiftLeftQuadByBits;
// enforcement, whereas with SHLQBI, we have to "take it on faith."
class SHLQBIIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b11011111100, OOL, IOL, "shlqbii\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class SHLQBIIVecInst<ValueType vectype>:
SHLQBIIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val),
@@ -2283,7 +2159,7 @@ defm SHLQBII : ShiftLeftQuadByBitsImm;
class SHLQBYInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b11111011100, OOL, IOL, "shlqby\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class SHLQBYVecInst<ValueType vectype>:
SHLQBYInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2306,7 +2182,7 @@ defm SHLQBY: ShiftLeftQuadBytes;
class SHLQBYIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b11111111100, OOL, IOL, "shlqbyi\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class SHLQBYIVecInst<ValueType vectype>:
SHLQBYIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val),
@@ -2330,7 +2206,7 @@ defm SHLQBYI : ShiftLeftQuadBytesImm;
class SHLQBYBIInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b00111001111, OOL, IOL, "shlqbybi\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class SHLQBYBIVecInst<ValueType vectype>:
SHLQBYBIInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2359,7 +2235,7 @@ defm SHLQBYBI : ShiftLeftQuadBytesBitCount;
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
class ROTHInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b00111010000, OOL, IOL, "roth\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
class ROTHVecInst<ValueType vectype>:
ROTHInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
@@ -2386,7 +2262,7 @@ def ROTHr16_r32: ROTHInst<(outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
class ROTHIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b00111110000, OOL, IOL, "rothi\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
class ROTHIVecInst<ValueType vectype>:
ROTHIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
@@ -2413,7 +2289,7 @@ def : Pat<(SPUvec_rotl (v8i16 VECREG:$rA), (i32 uimm7:$val)),
class ROTInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b00011010000, OOL, IOL, "rot\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
class ROTVecInst<ValueType vectype>:
ROTInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2461,7 +2337,7 @@ def : Pat<(rotl R32C:$rA, (i32 (sext R8C:$rB))),
class ROTIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b00011110000, OOL, IOL, "roti\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
class ROTIVecInst<ValueType vectype, Operand optype, ValueType inttype, PatLeaf pred>:
ROTIInst<(outs VECREG:$rT), (ins VECREG:$rA, optype:$val),
@@ -2491,12 +2367,15 @@ defm ROTI : RotateLeftWordImm;
class ROTQBYInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b00111011100, OOL, IOL, "rotqby\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
-class ROTQBYVecInst<ValueType vectype>:
- ROTQBYInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
- [(set (vectype VECREG:$rT),
- (SPUrotbytes_left (vectype VECREG:$rA), R32C:$rB))]>;
+class ROTQBYGenInst<ValueType type, RegisterClass rc>:
+ ROTQBYInst<(outs rc:$rT), (ins rc:$rA, R32C:$rB),
+ [(set (type rc:$rT),
+ (SPUrotbytes_left (type rc:$rA), R32C:$rB))]>;
+
+class ROTQBYVecInst<ValueType type>:
+ ROTQBYGenInst<type, VECREG>;
multiclass RotateQuadLeftByBytes
{
@@ -2506,6 +2385,7 @@ multiclass RotateQuadLeftByBytes
def v4f32: ROTQBYVecInst<v4f32>;
def v2i64: ROTQBYVecInst<v2i64>;
def v2f64: ROTQBYVecInst<v2f64>;
+ def i128: ROTQBYGenInst<i128, GPRC>;
}
defm ROTQBY: RotateQuadLeftByBytes;
@@ -2516,12 +2396,15 @@ defm ROTQBY: RotateQuadLeftByBytes;
class ROTQBYIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b00111111100, OOL, IOL, "rotqbyi\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
+
+class ROTQBYIGenInst<ValueType type, RegisterClass rclass>:
+ ROTQBYIInst<(outs rclass:$rT), (ins rclass:$rA, u7imm:$val),
+ [(set (type rclass:$rT),
+ (SPUrotbytes_left (type rclass:$rA), (i16 uimm7:$val)))]>;
class ROTQBYIVecInst<ValueType vectype>:
- ROTQBYIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
- [(set (vectype VECREG:$rT),
- (SPUrotbytes_left (vectype VECREG:$rA), (i16 uimm7:$val)))]>;
+ ROTQBYIGenInst<vectype, VECREG>;
multiclass RotateQuadByBytesImm
{
@@ -2531,6 +2414,7 @@ multiclass RotateQuadByBytesImm
def v4f32: ROTQBYIVecInst<v4f32>;
def v2i64: ROTQBYIVecInst<v2i64>;
def vfi64: ROTQBYIVecInst<v2f64>;
+ def i128: ROTQBYIGenInst<i128, GPRC>;
}
defm ROTQBYI: RotateQuadByBytesImm;
@@ -2539,7 +2423,7 @@ defm ROTQBYI: RotateQuadByBytesImm;
class ROTQBYBIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b00110011100, OOL, IOL,
"rotqbybi\t$rT, $rA, $shift",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class ROTQBYBIVecInst<ValueType vectype, RegisterClass rclass>:
ROTQBYBIInst<(outs VECREG:$rT), (ins VECREG:$rA, rclass:$shift),
@@ -2564,7 +2448,7 @@ defm ROTQBYBI : RotateQuadByBytesByBitshift;
class ROTQBIInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b00011011100, OOL, IOL, "rotqbi\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class ROTQBIVecInst<ValueType vectype>:
ROTQBIInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2589,7 +2473,7 @@ defm ROTQBI: RotateQuadByBitCount;
class ROTQBIIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b00011111100, OOL, IOL, "rotqbii\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class ROTQBIIVecInst<ValueType vectype, Operand optype, ValueType inttype,
PatLeaf pred>:
@@ -2624,7 +2508,7 @@ defm ROTQBII : RotateQuadByBitCountImm;
class ROTHMInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b10111010000, OOL, IOL, "rothm\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
def ROTHMv8i16:
ROTHMInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2666,7 +2550,7 @@ def : Pat<(srl R16C:$rA, R8C:$rB),
class ROTHMIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b10111110000, OOL, IOL, "rothmi\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
def ROTHMIv8i16:
ROTHMIInst<(outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val),
@@ -2697,7 +2581,7 @@ def: Pat<(srl R16C:$rA, (i8 uimm7:$val)),
// ROTM v4i32 form: See the ROTHM v8i16 comments.
class ROTMInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b10011010000, OOL, IOL, "rotm\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
def ROTMv4i32:
ROTMInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2732,7 +2616,7 @@ def : Pat<(srl R32C:$rA, R8C:$rB),
// ROTMI v4i32 form: See the comment for ROTHM v8i16.
def ROTMIv4i32:
RI7Form<0b10011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
- "rotmi\t$rT, $rA, $val", RotateShift,
+ "rotmi\t$rT, $rA, $val", RotShiftVec,
[(set (v4i32 VECREG:$rT),
(SPUvec_srl VECREG:$rA, (i32 uimm7:$val)))]>;
@@ -2745,7 +2629,7 @@ def : Pat<(SPUvec_srl (v4i32 VECREG:$rA), (i8 uimm7:$val)),
// ROTMI r32 form: know how to complement the immediate value.
def ROTMIr32:
RI7Form<0b10011110000, (outs R32C:$rT), (ins R32C:$rA, rotNeg7imm:$val),
- "rotmi\t$rT, $rA, $val", RotateShift,
+ "rotmi\t$rT, $rA, $val", RotShiftVec,
[(set R32C:$rT, (srl R32C:$rA, (i32 uimm7:$val)))]>;
def : Pat<(srl R32C:$rA, (i16 imm:$val)),
@@ -2762,7 +2646,7 @@ def : Pat<(srl R32C:$rA, (i8 imm:$val)),
class ROTQMBYInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b10111011100, OOL, IOL, "rotqmby\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class ROTQMBYVecInst<ValueType vectype>:
ROTQMBYInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2785,9 +2669,13 @@ multiclass RotateQuadBytes
defm ROTQMBY : RotateQuadBytes;
+def : Pat<(SPUsrl_bytes GPRC:$rA, R32C:$rB),
+ (ROTQMBYr128 GPRC:$rA,
+ (SFIr32 R32C:$rB, 0))>;
+
class ROTQMBYIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b10111111100, OOL, IOL, "rotqmbyi\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class ROTQMBYIVecInst<ValueType vectype>:
ROTQMBYIInst<(outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
@@ -2827,7 +2715,7 @@ defm ROTQMBYI : RotateQuadBytesImm;
class ROTQMBYBIInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b10110011100, OOL, IOL, "rotqmbybi\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class ROTQMBYBIVecInst<ValueType vectype>:
ROTQMBYBIInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2839,6 +2727,8 @@ multiclass RotateMaskQuadByBitCount
def v8i16: ROTQMBYBIVecInst<v8i16>;
def v4i32: ROTQMBYBIVecInst<v4i32>;
def v2i64: ROTQMBYBIVecInst<v2i64>;
+ def r128: ROTQMBYBIInst<(outs GPRC:$rT), (ins GPRC:$rA, R32C:$rB),
+ [/*no pattern*/]>;
}
defm ROTQMBYBI: RotateMaskQuadByBitCount;
@@ -2850,7 +2740,7 @@ defm ROTQMBYBI: RotateMaskQuadByBitCount;
class ROTQMBIInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b10011011100, OOL, IOL, "rotqmbi\t$rT, $rA, $rB",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class ROTQMBIVecInst<ValueType vectype>:
ROTQMBIInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
@@ -2873,13 +2763,19 @@ multiclass RotateMaskQuadByBits
defm ROTQMBI: RotateMaskQuadByBits;
+def : Pat<(srl GPRC:$rA, R32C:$rB),
+ (ROTQMBYBIr128 (ROTQMBIr128 GPRC:$rA,
+ (SFIr32 R32C:$rB, 0)),
+ (SFIr32 R32C:$rB, 0))>;
+
+
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
// Rotate quad and mask by bits, immediate
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
class ROTQMBIIInst<dag OOL, dag IOL, list<dag> pattern>:
RI7Form<0b10011111100, OOL, IOL, "rotqmbii\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftQuad, pattern>;
class ROTQMBIIVecInst<ValueType vectype>:
ROTQMBIIInst<(outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
@@ -2907,7 +2803,7 @@ defm ROTQMBII: RotateMaskQuadByBitsImm;
def ROTMAHv8i16:
RRForm<0b01111010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
- "rotmah\t$rT, $rA, $rB", RotateShift,
+ "rotmah\t$rT, $rA, $rB", RotShiftVec,
[/* see patterns below - $rB must be negated */]>;
def : Pat<(SPUvec_sra (v8i16 VECREG:$rA), R32C:$rB),
@@ -2923,7 +2819,7 @@ def : Pat<(SPUvec_sra (v8i16 VECREG:$rA), R8C:$rB),
def ROTMAHr16:
RRForm<0b01111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
- "rotmah\t$rT, $rA, $rB", RotateShift,
+ "rotmah\t$rT, $rA, $rB", RotShiftVec,
[/* see patterns below - $rB must be negated */]>;
def : Pat<(sra R16C:$rA, R32C:$rB),
@@ -2939,7 +2835,7 @@ def : Pat<(sra R16C:$rA, R8C:$rB),
def ROTMAHIv8i16:
RRForm<0b01111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val),
- "rotmahi\t$rT, $rA, $val", RotateShift,
+ "rotmahi\t$rT, $rA, $val", RotShiftVec,
[(set (v8i16 VECREG:$rT),
(SPUvec_sra (v8i16 VECREG:$rA), (i32 uimm7:$val)))]>;
@@ -2951,7 +2847,7 @@ def : Pat<(SPUvec_sra (v8i16 VECREG:$rA), (i8 uimm7:$val)),
def ROTMAHIr16:
RRForm<0b01111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm_i16:$val),
- "rotmahi\t$rT, $rA, $val", RotateShift,
+ "rotmahi\t$rT, $rA, $val", RotShiftVec,
[(set R16C:$rT, (sra R16C:$rA, (i16 uimm7:$val)))]>;
def : Pat<(sra R16C:$rA, (i32 imm:$val)),
@@ -2962,7 +2858,7 @@ def : Pat<(sra R16C:$rA, (i8 imm:$val)),
def ROTMAv4i32:
RRForm<0b01011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
- "rotma\t$rT, $rA, $rB", RotateShift,
+ "rotma\t$rT, $rA, $rB", RotShiftVec,
[/* see patterns below - $rB must be negated */]>;
def : Pat<(SPUvec_sra (v4i32 VECREG:$rA), R32C:$rB),
@@ -2978,7 +2874,7 @@ def : Pat<(SPUvec_sra (v4i32 VECREG:$rA), R8C:$rB),
def ROTMAr32:
RRForm<0b01011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
- "rotma\t$rT, $rA, $rB", RotateShift,
+ "rotma\t$rT, $rA, $rB", RotShiftVec,
[/* see patterns below - $rB must be negated */]>;
def : Pat<(sra R32C:$rA, R32C:$rB),
@@ -2995,7 +2891,7 @@ def : Pat<(sra R32C:$rA, R8C:$rB),
class ROTMAIInst<dag OOL, dag IOL, list<dag> pattern>:
RRForm<0b01011110000, OOL, IOL,
"rotmai\t$rT, $rA, $val",
- RotateShift, pattern>;
+ RotShiftVec, pattern>;
class ROTMAIVecInst<ValueType vectype, Operand intop, ValueType inttype>:
ROTMAIInst<(outs VECREG:$rT), (ins VECREG:$rA, intop:$val),
@@ -4010,7 +3906,7 @@ def FCGTf32 :
"fcgt\t$rT, $rA, $rB", SPrecFP,
[(set R32C:$rT, (setugt R32FP:$rA, R32FP:$rB))]>;
-def : Pat<(setugt R32FP:$rA, R32FP:$rB),
+def : Pat<(setogt R32FP:$rA, R32FP:$rB),
(FCGTf32 R32FP:$rA, R32FP:$rB)>;
def FCMGTf32 :
@@ -4018,7 +3914,7 @@ def FCMGTf32 :
"fcmgt\t$rT, $rA, $rB", SPrecFP,
[(set R32C:$rT, (setugt (fabs R32FP:$rA), (fabs R32FP:$rB)))]>;
-def : Pat<(setugt (fabs R32FP:$rA), (fabs R32FP:$rB)),
+def : Pat<(setogt (fabs R32FP:$rA), (fabs R32FP:$rB)),
(FCMGTf32 R32FP:$rA, R32FP:$rB)>;
//--------------------------------------------------------------------------
@@ -4320,7 +4216,7 @@ def : Pat<(fabs (v4f32 VECREG:$rA)),
// in the odd pipeline)
//===----------------------------------------------------------------------===//
-def ENOP : SPUInstr<(outs), (ins), "enop", ExecNOP> {
+def ENOP : SPUInstr<(outs), (ins), "nop", ExecNOP> {
let Pattern = [];
let Inst{0-10} = 0b10000000010;
@@ -4379,30 +4275,43 @@ def : Pat<(v2f64 (bitconvert (v2i64 VECREG:$src))), (v2f64 VECREG:$src)>;
def : Pat<(v2f64 (bitconvert (v4f32 VECREG:$src))), (v2f64 VECREG:$src)>;
def : Pat<(i128 (bitconvert (v16i8 VECREG:$src))),
- (ORi128_vec VECREG:$src)>;
+ (COPY_TO_REGCLASS VECREG:$src, GPRC)>;
def : Pat<(i128 (bitconvert (v8i16 VECREG:$src))),
- (ORi128_vec VECREG:$src)>;
+ (COPY_TO_REGCLASS VECREG:$src, GPRC)>;
def : Pat<(i128 (bitconvert (v4i32 VECREG:$src))),
- (ORi128_vec VECREG:$src)>;
+ (COPY_TO_REGCLASS VECREG:$src, GPRC)>;
def : Pat<(i128 (bitconvert (v2i64 VECREG:$src))),
- (ORi128_vec VECREG:$src)>;
+ (COPY_TO_REGCLASS VECREG:$src, GPRC)>;
def : Pat<(i128 (bitconvert (v4f32 VECREG:$src))),
- (ORi128_vec VECREG:$src)>;
+ (COPY_TO_REGCLASS VECREG:$src, GPRC)>;
def : Pat<(i128 (bitconvert (v2f64 VECREG:$src))),
- (ORi128_vec VECREG:$src)>;
+ (COPY_TO_REGCLASS VECREG:$src, GPRC)>;
def : Pat<(v16i8 (bitconvert (i128 GPRC:$src))),
- (v16i8 (ORvec_i128 GPRC:$src))>;
+ (v16i8 (COPY_TO_REGCLASS GPRC:$src, VECREG))>;
def : Pat<(v8i16 (bitconvert (i128 GPRC:$src))),
- (v8i16 (ORvec_i128 GPRC:$src))>;
+ (v8i16 (COPY_TO_REGCLASS GPRC:$src, VECREG))>;
def : Pat<(v4i32 (bitconvert (i128 GPRC:$src))),
- (v4i32 (ORvec_i128 GPRC:$src))>;
+ (v4i32 (COPY_TO_REGCLASS GPRC:$src, VECREG))>;
def : Pat<(v2i64 (bitconvert (i128 GPRC:$src))),
- (v2i64 (ORvec_i128 GPRC:$src))>;
+ (v2i64 (COPY_TO_REGCLASS GPRC:$src, VECREG))>;
def : Pat<(v4f32 (bitconvert (i128 GPRC:$src))),
- (v4f32 (ORvec_i128 GPRC:$src))>;
+ (v4f32 (COPY_TO_REGCLASS GPRC:$src, VECREG))>;
def : Pat<(v2f64 (bitconvert (i128 GPRC:$src))),
- (v2f64 (ORvec_i128 GPRC:$src))>;
+ (v2f64 (COPY_TO_REGCLASS GPRC:$src, VECREG))>;
+
+def : Pat<(i32 (bitconvert R32FP:$rA)),
+ (COPY_TO_REGCLASS R32FP:$rA, R32C)>;
+
+def : Pat<(f32 (bitconvert R32C:$rA)),
+ (COPY_TO_REGCLASS R32C:$rA, R32FP)>;
+
+def : Pat<(i64 (bitconvert R64FP:$rA)),
+ (COPY_TO_REGCLASS R64FP:$rA, R64C)>;
+
+def : Pat<(f64 (bitconvert R64C:$rA)),
+ (COPY_TO_REGCLASS R64C:$rA, R64FP)>;
+
//===----------------------------------------------------------------------===//
// Instruction patterns:
@@ -4453,11 +4362,12 @@ def : Pat<(i32 (zext R8C:$rSrc)),
// zext 8->64: Zero extend bytes to double words
def : Pat<(i64 (zext R8C:$rSrc)),
- (ORi64_v2i64 (SELBv4i32 (ROTQMBYv4i32
- (ORv4i32_i32 (ANDIi8i32 R8C:$rSrc, 0xff)),
+ (COPY_TO_REGCLASS (SELBv4i32 (ROTQMBYv4i32
+ (COPY_TO_REGCLASS
+ (ANDIi8i32 R8C:$rSrc,0xff), VECREG),
0x4),
(ILv4i32 0x0),
- (FSMBIv4i32 0x0f0f)))>;
+ (FSMBIv4i32 0x0f0f)), R64C)>;
// anyext 8->16: Extend 8->16 bits, irrespective of sign, preserves high bits
def : Pat<(i16 (anyext R8C:$rSrc)),
@@ -4465,7 +4375,7 @@ def : Pat<(i16 (anyext R8C:$rSrc)),
// anyext 8->32: Extend 8->32 bits, irrespective of sign, preserves high bits
def : Pat<(i32 (anyext R8C:$rSrc)),
- (ORIi8i32 R8C:$rSrc, 0)>;
+ (COPY_TO_REGCLASS R8C:$rSrc, R32C)>;
// sext 16->64: Sign extend halfword to double word
def : Pat<(sext_inreg R64C:$rSrc, i16),
@@ -4489,7 +4399,7 @@ def : Pat<(i32 (zext (and R16C:$rSrc, 0xfff))),
// anyext 16->32: Extend 16->32 bits, irrespective of sign
def : Pat<(i32 (anyext R16C:$rSrc)),
- (ORIi16i32 R16C:$rSrc, 0)>;
+ (COPY_TO_REGCLASS R16C:$rSrc, R32C)>;
//===----------------------------------------------------------------------===//
// Truncates:
@@ -4498,61 +4408,61 @@ def : Pat<(i32 (anyext R16C:$rSrc)),
//===----------------------------------------------------------------------===//
def : Pat<(i8 (trunc GPRC:$src)),
- (ORi8_v16i8
+ (COPY_TO_REGCLASS
(SHUFBgprc GPRC:$src, GPRC:$src,
- (IOHLv4i32 (ILHUv4i32 0x0f0f), 0x0f0f)))>;
+ (IOHLv4i32 (ILHUv4i32 0x0f0f), 0x0f0f)), R8C)>;
def : Pat<(i8 (trunc R64C:$src)),
- (ORi8_v16i8
+ (COPY_TO_REGCLASS
(SHUFBv2i64_m32
- (ORv2i64_i64 R64C:$src),
- (ORv2i64_i64 R64C:$src),
- (IOHLv4i32 (ILHUv4i32 0x0707), 0x0707)))>;
+ (COPY_TO_REGCLASS R64C:$src, VECREG),
+ (COPY_TO_REGCLASS R64C:$src, VECREG),
+ (IOHLv4i32 (ILHUv4i32 0x0707), 0x0707)), R8C)>;
def : Pat<(i8 (trunc R32C:$src)),
- (ORi8_v16i8
+ (COPY_TO_REGCLASS
(SHUFBv4i32_m32
- (ORv4i32_i32 R32C:$src),
- (ORv4i32_i32 R32C:$src),
- (IOHLv4i32 (ILHUv4i32 0x0303), 0x0303)))>;
+ (COPY_TO_REGCLASS R32C:$src, VECREG),
+ (COPY_TO_REGCLASS R32C:$src, VECREG),
+ (IOHLv4i32 (ILHUv4i32 0x0303), 0x0303)), R8C)>;
def : Pat<(i8 (trunc R16C:$src)),
- (ORi8_v16i8
+ (COPY_TO_REGCLASS
(SHUFBv4i32_m32
- (ORv8i16_i16 R16C:$src),
- (ORv8i16_i16 R16C:$src),
- (IOHLv4i32 (ILHUv4i32 0x0303), 0x0303)))>;
+ (COPY_TO_REGCLASS R16C:$src, VECREG),
+ (COPY_TO_REGCLASS R16C:$src, VECREG),
+ (IOHLv4i32 (ILHUv4i32 0x0303), 0x0303)), R8C)>;
def : Pat<(i16 (trunc GPRC:$src)),
- (ORi16_v8i16
+ (COPY_TO_REGCLASS
(SHUFBgprc GPRC:$src, GPRC:$src,
- (IOHLv4i32 (ILHUv4i32 0x0e0f), 0x0e0f)))>;
+ (IOHLv4i32 (ILHUv4i32 0x0e0f), 0x0e0f)), R16C)>;
def : Pat<(i16 (trunc R64C:$src)),
- (ORi16_v8i16
+ (COPY_TO_REGCLASS
(SHUFBv2i64_m32
- (ORv2i64_i64 R64C:$src),
- (ORv2i64_i64 R64C:$src),
- (IOHLv4i32 (ILHUv4i32 0x0607), 0x0607)))>;
+ (COPY_TO_REGCLASS R64C:$src, VECREG),
+ (COPY_TO_REGCLASS R64C:$src, VECREG),
+ (IOHLv4i32 (ILHUv4i32 0x0607), 0x0607)), R16C)>;
def : Pat<(i16 (trunc R32C:$src)),
- (ORi16_v8i16
+ (COPY_TO_REGCLASS
(SHUFBv4i32_m32
- (ORv4i32_i32 R32C:$src),
- (ORv4i32_i32 R32C:$src),
- (IOHLv4i32 (ILHUv4i32 0x0203), 0x0203)))>;
+ (COPY_TO_REGCLASS R32C:$src, VECREG),
+ (COPY_TO_REGCLASS R32C:$src, VECREG),
+ (IOHLv4i32 (ILHUv4i32 0x0203), 0x0203)), R16C)>;
def : Pat<(i32 (trunc GPRC:$src)),
- (ORi32_v4i32
+ (COPY_TO_REGCLASS
(SHUFBgprc GPRC:$src, GPRC:$src,
- (IOHLv4i32 (ILHUv4i32 0x0c0d), 0x0e0f)))>;
+ (IOHLv4i32 (ILHUv4i32 0x0c0d), 0x0e0f)), R32C)>;
def : Pat<(i32 (trunc R64C:$src)),
- (ORi32_v4i32
+ (COPY_TO_REGCLASS
(SHUFBv2i64_m32
- (ORv2i64_i64 R64C:$src),
- (ORv2i64_i64 R64C:$src),
- (IOHLv4i32 (ILHUv4i32 0x0405), 0x0607)))>;
+ (COPY_TO_REGCLASS R64C:$src, VECREG),
+ (COPY_TO_REGCLASS R64C:$src, VECREG),
+ (IOHLv4i32 (ILHUv4i32 0x0405), 0x0607)), R32C)>;
//===----------------------------------------------------------------------===//
// Address generation: SPU, like PPC, has to split addresses into high and
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp
index 25ba88a..99aaeb0 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp
@@ -24,9 +24,8 @@ SPULinuxMCAsmInfo::SPULinuxMCAsmInfo(const Target &T, StringRef TT) {
GlobalPrefix = "";
PrivateGlobalPrefix = ".L";
- // Has leb128, .loc and .file
+ // Has leb128
HasLEB128 = true;
- HasDotLocAndDotFile = true;
SupportsDebugInformation = true;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUNodes.td b/contrib/llvm/lib/Target/CellSPU/SPUNodes.td
index 647da30..a6e621f 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUNodes.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUNodes.td
@@ -19,16 +19,16 @@ def SPU_GenControl : SDTypeProfile<1, 1, []>;
def SPUshufmask : SDNode<"SPUISD::SHUFFLE_MASK", SPU_GenControl, []>;
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPUCallSeq,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPUCallSeq,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
//===----------------------------------------------------------------------===//
// Operand constraints:
//===----------------------------------------------------------------------===//
def SDT_SPUCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
def SPUcall : SDNode<"SPUISD::CALL", SDT_SPUCall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
// Operand type constraints for vector shuffle/permute operations
@@ -83,10 +83,6 @@ def SPUcntb : SDNode<"SPUISD::CNTB", SDTIntUnaryOp>;
// SPUISelLowering.h):
def SPUshuffle: SDNode<"SPUISD::SHUFB", SDT_SPUshuffle, []>;
-// Shift left quadword by bits and bytes
-def SPUshlquad_l_bits: SDNode<"SPUISD::SHLQUAD_L_BITS", SPUvecshift_type, []>;
-def SPUshlquad_l_bytes: SDNode<"SPUISD::SHLQUAD_L_BYTES", SPUvecshift_type, []>;
-
// Vector shifts (ISD::SHL,SRL,SRA are for _integers_ only):
def SPUvec_shl: SDNode<"ISD::SHL", SPUvecshift_type, []>;
def SPUvec_srl: SDNode<"ISD::SRL", SPUvecshift_type, []>;
@@ -105,6 +101,12 @@ def SPUrotbytes_left: SDNode<"SPUISD::ROTBYTES_LEFT",
def SPUrotbytes_left_bits : SDNode<"SPUISD::ROTBYTES_LEFT_BITS",
SPUvecshift_type>;
+// Shift entire quad left by bytes/bits. Zeros are shifted in on the right
+// SHL_BITS the same as SHL for i128, but ISD::SHL is not implemented for i128
+def SPUshlquad_l_bytes: SDNode<"SPUISD::SHL_BYTES", SPUvecshift_type, []>;
+def SPUshlquad_l_bits: SDNode<"SPUISD::SHL_BITS", SPUvecshift_type, []>;
+def SPUsrl_bytes: SDNode<"SPUISD::SRL_BYTES", SPUvecshift_type, []>;
+
// SPU form select mask for bytes, immediate
def SPUselmask: SDNode<"SPUISD::SELECT_MASK", SPUselmask_type, []>;
@@ -154,4 +156,4 @@ class NoEncode<string E> {
//===----------------------------------------------------------------------===//
def retflag : SDNode<"SPUISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUNopFiller.cpp b/contrib/llvm/lib/Target/CellSPU/SPUNopFiller.cpp
new file mode 100644
index 0000000..e2bd2d7
--- /dev/null
+++ b/contrib/llvm/lib/Target/CellSPU/SPUNopFiller.cpp
@@ -0,0 +1,153 @@
+//===-- SPUNopFiller.cpp - Add nops/lnops to align the pipelines---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The final pass just before assembly printing. This pass is the last
+// checkpoint where nops and lnops are added to the instruction stream to
+// satisfy the dual issue requirements. The actual dual issue scheduling is
+// done (TODO: nowhere, currently)
+//
+//===----------------------------------------------------------------------===//
+
+#include "SPU.h"
+#include "SPUTargetMachine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+ struct SPUNopFiller : public MachineFunctionPass {
+
+ TargetMachine &TM;
+ const TargetInstrInfo *TII;
+ const InstrItineraryData *IID;
+ bool isEvenPlace; // the instruction slot (mem address) at hand is even/odd
+
+ static char ID;
+ SPUNopFiller(TargetMachine &tm)
+ : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()),
+ IID(tm.getInstrItineraryData())
+ {
+ DEBUG( dbgs() << "********** SPU Nop filler **********\n" ; );
+ }
+
+ virtual const char *getPassName() const {
+ return "SPU nop/lnop Filler";
+ }
+
+ void runOnMachineBasicBlock(MachineBasicBlock &MBB);
+
+ bool runOnMachineFunction(MachineFunction &F) {
+ isEvenPlace = true; //all functions get an .align 3 directive at start
+ for (MachineFunction::iterator FI = F.begin(), FE = F.end();
+ FI != FE; ++FI)
+ runOnMachineBasicBlock(*FI);
+ return true; //never-ever do any more modifications, just print it!
+ }
+
+ typedef enum { none = 0, // no more instructions in this function / BB
+ pseudo = 1, // this does not get executed
+ even = 2,
+ odd = 3 } SPUOpPlace;
+ SPUOpPlace getOpPlacement( MachineInstr &instr );
+
+ };
+ char SPUNopFiller::ID = 0;
+
+}
+
+// Fill a BasicBlock to alignment.
+// In the assebly we align the functions to 'even' adresses, but
+// basic blocks have an implicit alignmnet. We hereby define
+// basic blocks to have the same, even, alignment.
+void SPUNopFiller::
+runOnMachineBasicBlock(MachineBasicBlock &MBB)
+{
+ assert( isEvenPlace && "basic block start from odd address");
+ for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
+ {
+ SPUOpPlace this_optype, next_optype;
+ MachineBasicBlock::iterator J = I;
+ J++;
+
+ this_optype = getOpPlacement( *I );
+ next_optype = none;
+ while (J!=MBB.end()){
+ next_optype = getOpPlacement( *J );
+ ++J;
+ if (next_optype != pseudo )
+ break;
+ }
+
+ // padd: odd(wrong), even(wrong), ...
+ // to: nop(corr), odd(corr), even(corr)...
+ if( isEvenPlace && this_optype == odd && next_optype == even ) {
+ DEBUG( dbgs() <<"Adding NOP before: "; );
+ DEBUG( I->dump(); );
+ BuildMI(MBB, I, I->getDebugLoc(), TII->get(SPU::ENOP));
+ isEvenPlace=false;
+ }
+
+ // padd: even(wrong), odd(wrong), ...
+ // to: lnop(corr), even(corr), odd(corr)...
+ else if ( !isEvenPlace && this_optype == even && next_optype == odd){
+ DEBUG( dbgs() <<"Adding LNOP before: "; );
+ DEBUG( I->dump(); );
+ BuildMI(MBB, I, I->getDebugLoc(), TII->get(SPU::LNOP));
+ isEvenPlace=true;
+ }
+
+ // now go to next mem slot
+ if( this_optype != pseudo )
+ isEvenPlace = !isEvenPlace;
+
+ }
+
+ // padd basicblock end
+ if( !isEvenPlace ){
+ MachineBasicBlock::iterator J = MBB.end();
+ J--;
+ if (getOpPlacement( *J ) == odd) {
+ DEBUG( dbgs() <<"Padding basic block with NOP\n"; );
+ BuildMI(MBB, J, J->getDebugLoc(), TII->get(SPU::ENOP));
+ }
+ else {
+ J++;
+ DEBUG( dbgs() <<"Padding basic block with LNOP\n"; );
+ BuildMI(MBB, J, DebugLoc(), TII->get(SPU::LNOP));
+ }
+ isEvenPlace=true;
+ }
+}
+
+FunctionPass *llvm::createSPUNopFillerPass(SPUTargetMachine &tm) {
+ return new SPUNopFiller(tm);
+}
+
+// Figure out if 'instr' is executed in the even or odd pipeline
+SPUNopFiller::SPUOpPlace
+SPUNopFiller::getOpPlacement( MachineInstr &instr ) {
+ int sc = instr.getDesc().getSchedClass();
+ const InstrStage *stage = IID->beginStage(sc);
+ unsigned FUs = stage->getUnits();
+ SPUOpPlace retval;
+
+ switch( FUs ) {
+ case 0: retval = pseudo; break;
+ case 1: retval = odd; break;
+ case 2: retval = even; break;
+ default: retval= pseudo;
+ assert( false && "got unknown FuncUnit\n");
+ break;
+ };
+ return retval;
+}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUOperands.td b/contrib/llvm/lib/Target/CellSPU/SPUOperands.td
index e1a0358..96cde51 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUOperands.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUOperands.td
@@ -143,7 +143,7 @@ def immU16 : PatLeaf<(imm), [{
def imm18 : PatLeaf<(imm), [{
// imm18 predicate: True if the immediate fits into an 18-bit unsigned field.
int Value = (int) N->getZExtValue();
- return ((Value & ((1 << 19) - 1)) == Value);
+ return isUInt<18>(Value);
}]>;
def lo16 : PatLeaf<(imm), [{
@@ -203,7 +203,7 @@ def FPimm_sext16 : SDNodeXForm<fpimm, [{
def FPimm_u18 : SDNodeXForm<fpimm, [{
float fval = N->getValueAPF().convertToFloat();
- return getI32Imm(FloatToBits(fval) & ((1 << 19) - 1));
+ return getI32Imm(FloatToBits(fval) & ((1 << 18) - 1));
}]>;
def fpimmSExt16 : PatLeaf<(fpimm), [{
@@ -225,7 +225,7 @@ def hi16_f32 : PatLeaf<(fpimm), [{
def fpimm18 : PatLeaf<(fpimm), [{
if (N->getValueType(0) == MVT::f32) {
uint32_t Value = FloatToBits(N->getValueAPF().convertToFloat());
- return ((Value & ((1 << 19) - 1)) == Value);
+ return isUInt<18>(Value);
}
return false;
@@ -654,7 +654,11 @@ def memrr : Operand<iPTR> {
// A-form : abs (256K LSA offset)
// D-form(2): [r+I7] (7-bit signed offset + reg)
-def dform_addr : ComplexPattern<iPTR, 2, "SelectDFormAddr", [], []>;
-def xform_addr : ComplexPattern<iPTR, 2, "SelectXFormAddr", [], []>;
-def aform_addr : ComplexPattern<iPTR, 2, "SelectAFormAddr", [], []>;
-def dform2_addr : ComplexPattern<iPTR, 2, "SelectDForm2Addr", [], []>;
+def dform_addr : ComplexPattern<iPTR, 2, "SelectDFormAddr",
+ [], [SDNPWantRoot]>;
+def xform_addr : ComplexPattern<iPTR, 2, "SelectXFormAddr",
+ [], [SDNPWantRoot]>;
+def aform_addr : ComplexPattern<iPTR, 2, "SelectAFormAddr",
+ [], [SDNPWantRoot]>;
+def dform2_addr : ComplexPattern<iPTR, 2, "SelectDForm2Addr",
+ [], [SDNPWantRoot]>;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
index cf71891..0bdd50a 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
@@ -18,7 +18,7 @@
#include "SPUInstrBuilder.h"
#include "SPUSubtarget.h"
#include "SPUMachineFunction.h"
-#include "SPUFrameInfo.h"
+#include "SPUFrameLowering.h"
#include "llvm/Constants.h"
#include "llvm/Type.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -30,7 +30,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -240,25 +240,6 @@ BitVector SPURegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
-// needsFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-//
-static bool needsFP(const MachineFunction &MF) {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects();
-}
-
-//--------------------------------------------------------------------------
-// hasFP - Return true if the specified function actually has a dedicated frame
-// pointer register. This is true if the function needs a frame pointer and has
-// a non-zero stack size.
-bool
-SPURegisterInfo::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return MFI->getStackSize() && needsFP(MF);
-}
-
//--------------------------------------------------------------------------
void
SPURegisterInfo::eliminateCallFramePseudoInstr(MachineFunction &MF,
@@ -302,7 +283,7 @@ SPURegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
MachineOperand &MO = MI.getOperand(OpNo);
// Offset is biased by $lr's slot at the bottom.
- Offset += MO.getImm() + MFI->getStackSize() + SPUFrameInfo::minStackSize();
+ Offset += MO.getImm() + MFI->getStackSize() + SPUFrameLowering::minStackSize();
assert((Offset & 0xf) == 0
&& "16-byte alignment violated in eliminateFrameIndex");
@@ -329,225 +310,6 @@ SPURegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
}
}
-/// determineFrameLayout - Determine the size of the frame and maximum call
-/// frame size.
-void
-SPURegisterInfo::determineFrameLayout(MachineFunction &MF) const
-{
- MachineFrameInfo *MFI = MF.getFrameInfo();
-
- // Get the number of bytes to allocate from the FrameInfo
- unsigned FrameSize = MFI->getStackSize();
-
- // Get the alignments provided by the target, and the maximum alignment
- // (if any) of the fixed frame objects.
- unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
- unsigned Align = std::max(TargetAlign, MFI->getMaxAlignment());
- assert(isPowerOf2_32(Align) && "Alignment is not power of 2");
- unsigned AlignMask = Align - 1;
-
- // Get the maximum call frame size of all the calls.
- unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
-
- // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
- // that allocations will be aligned.
- if (MFI->hasVarSizedObjects())
- maxCallFrameSize = (maxCallFrameSize + AlignMask) & ~AlignMask;
-
- // Update maximum call frame size.
- MFI->setMaxCallFrameSize(maxCallFrameSize);
-
- // Include call frame size in total.
- FrameSize += maxCallFrameSize;
-
- // Make sure the frame is aligned.
- FrameSize = (FrameSize + AlignMask) & ~AlignMask;
-
- // Update frame info.
- MFI->setStackSize(FrameSize);
-}
-
-void SPURegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS)
- const {
- // Mark LR and SP unused, since the prolog spills them to stack and
- // we don't want anyone else to spill them for us.
- //
- // Also, unless R2 is really used someday, don't spill it automatically.
- MF.getRegInfo().setPhysRegUnused(SPU::R0);
- MF.getRegInfo().setPhysRegUnused(SPU::R1);
- MF.getRegInfo().setPhysRegUnused(SPU::R2);
-
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetRegisterClass *RC = &SPU::R32CRegClass;
- RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
-
-
-}
-
-void SPURegisterInfo::emitPrologue(MachineFunction &MF) const
-{
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineModuleInfo &MMI = MF.getMMI();
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- // Prepare for debug frame info.
- bool hasDebugInfo = MMI.hasDebugInfo();
- MCSymbol *FrameLabel = 0;
-
- // Move MBBI back to the beginning of the function.
- MBBI = MBB.begin();
-
- // Work out frame sizes.
- determineFrameLayout(MF);
- int FrameSize = MFI->getStackSize();
-
- assert((FrameSize & 0xf) == 0
- && "SPURegisterInfo::emitPrologue: FrameSize not aligned");
-
- // the "empty" frame size is 16 - just the register scavenger spill slot
- if (FrameSize > 16 || MFI->adjustsStack()) {
- FrameSize = -(FrameSize + SPUFrameInfo::minStackSize());
- if (hasDebugInfo) {
- // Mark effective beginning of when frame pointer becomes valid.
- FrameLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(SPU::PROLOG_LABEL)).addSym(FrameLabel);
- }
-
- // Adjust stack pointer, spilling $lr -> 16($sp) and $sp -> -FrameSize($sp)
- // for the ABI
- BuildMI(MBB, MBBI, dl, TII.get(SPU::STQDr32), SPU::R0).addImm(16)
- .addReg(SPU::R1);
- if (isInt<10>(FrameSize)) {
- // Spill $sp to adjusted $sp
- BuildMI(MBB, MBBI, dl, TII.get(SPU::STQDr32), SPU::R1).addImm(FrameSize)
- .addReg(SPU::R1);
- // Adjust $sp by required amout
- BuildMI(MBB, MBBI, dl, TII.get(SPU::AIr32), SPU::R1).addReg(SPU::R1)
- .addImm(FrameSize);
- } else if (isInt<16>(FrameSize)) {
- // Frame size can be loaded into ILr32n, so temporarily spill $r2 and use
- // $r2 to adjust $sp:
- BuildMI(MBB, MBBI, dl, TII.get(SPU::STQDr128), SPU::R2)
- .addImm(-16)
- .addReg(SPU::R1);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::ILr32), SPU::R2)
- .addImm(FrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::STQXr32), SPU::R1)
- .addReg(SPU::R2)
- .addReg(SPU::R1);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::Ar32), SPU::R1)
- .addReg(SPU::R1)
- .addReg(SPU::R2);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::SFIr32), SPU::R2)
- .addReg(SPU::R2)
- .addImm(16);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::LQXr128), SPU::R2)
- .addReg(SPU::R2)
- .addReg(SPU::R1);
- } else {
- report_fatal_error("Unhandled frame size: " + Twine(FrameSize));
- }
-
- if (hasDebugInfo) {
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
-
- // Show update of SP.
- MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP, -FrameSize);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
-
- // Add callee saved registers to move list.
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
- int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
- unsigned Reg = CSI[I].getReg();
- if (Reg == SPU::R0) continue;
- MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
- MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(FrameLabel, CSDst, CSSrc));
- }
-
- // Mark effective beginning of when frame pointer is ready.
- MCSymbol *ReadyLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(SPU::PROLOG_LABEL)).addSym(ReadyLabel);
-
- MachineLocation FPDst(SPU::R1);
- MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(ReadyLabel, FPDst, FPSrc));
- }
- } else {
- // This is a leaf function -- insert a branch hint iff there are
- // sufficient number instructions in the basic block. Note that
- // this is just a best guess based on the basic block's size.
- if (MBB.size() >= (unsigned) SPUFrameInfo::branchHintPenalty()) {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- dl = MBBI->getDebugLoc();
-
- // Insert terminator label
- BuildMI(MBB, MBBI, dl, TII.get(SPU::PROLOG_LABEL))
- .addSym(MMI.getContext().CreateTempSymbol());
- }
- }
-}
-
-void
-SPURegisterInfo::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const
-{
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- int FrameSize = MFI->getStackSize();
- int LinkSlotOffset = SPUFrameInfo::stackSlotSize();
- DebugLoc dl = MBBI->getDebugLoc();
-
- assert(MBBI->getOpcode() == SPU::RET &&
- "Can only insert epilog into returning blocks");
- assert((FrameSize & 0xf) == 0
- && "SPURegisterInfo::emitEpilogue: FrameSize not aligned");
-
- // the "empty" frame size is 16 - just the register scavenger spill slot
- if (FrameSize > 16 || MFI->adjustsStack()) {
- FrameSize = FrameSize + SPUFrameInfo::minStackSize();
- if (isInt<10>(FrameSize + LinkSlotOffset)) {
- // Reload $lr, adjust $sp by required amount
- // Note: We do this to slightly improve dual issue -- not by much, but it
- // is an opportunity for dual issue.
- BuildMI(MBB, MBBI, dl, TII.get(SPU::LQDr128), SPU::R0)
- .addImm(FrameSize + LinkSlotOffset)
- .addReg(SPU::R1);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::AIr32), SPU::R1)
- .addReg(SPU::R1)
- .addImm(FrameSize);
- } else if (FrameSize <= (1 << 16) - 1 && FrameSize >= -(1 << 16)) {
- // Frame size can be loaded into ILr32n, so temporarily spill $r2 and use
- // $r2 to adjust $sp:
- BuildMI(MBB, MBBI, dl, TII.get(SPU::STQDr128), SPU::R2)
- .addImm(16)
- .addReg(SPU::R1);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::ILr32), SPU::R2)
- .addImm(FrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::Ar32), SPU::R1)
- .addReg(SPU::R1)
- .addReg(SPU::R2);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::LQDr128), SPU::R0)
- .addImm(16)
- .addReg(SPU::R1);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::SFIr32), SPU::R2).
- addReg(SPU::R2)
- .addImm(16);
- BuildMI(MBB, MBBI, dl, TII.get(SPU::LQXr128), SPU::R2)
- .addReg(SPU::R2)
- .addReg(SPU::R1);
- } else {
- report_fatal_error("Unhandled frame size: " + Twine(FrameSize));
- }
- }
-}
-
unsigned
SPURegisterInfo::getRARegister() const
{
@@ -560,26 +322,16 @@ SPURegisterInfo::getFrameRegister(const MachineFunction &MF) const
return SPU::R1;
}
-void
-SPURegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const
-{
- // Initial state of the frame pointer is R1.
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(SPU::R1, 0);
- Moves.push_back(MachineMove(0, Dst, Src));
-}
-
-
int
SPURegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
// FIXME: Most probably dwarf numbers differs for Linux and Darwin
return SPUGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
}
-int
+int
SPURegisterInfo::convertDFormToXForm(int dFormOpcode) const
{
- switch(dFormOpcode)
+ switch(dFormOpcode)
{
case SPU::AIr32: return SPU::Ar32;
case SPU::LQDr32: return SPU::LQXr32;
@@ -602,10 +354,10 @@ SPURegisterInfo::convertDFormToXForm(int dFormOpcode) const
// TODO this is already copied from PPC. Could this convenience function
// be moved to the RegScavenger class?
-unsigned
-SPURegisterInfo::findScratchRegister(MachineBasicBlock::iterator II,
+unsigned
+SPURegisterInfo::findScratchRegister(MachineBasicBlock::iterator II,
RegScavenger *RS,
- const TargetRegisterClass *RC,
+ const TargetRegisterClass *RC,
int SPAdj) const
{
assert(RS && "Register scavenging must be on");
diff --git a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
index aedb769..641da04 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
@@ -33,7 +33,7 @@ namespace llvm {
public:
SPURegisterInfo(const SPUSubtarget &subtarget, const TargetInstrInfo &tii);
-
+
//! Translate a register's enum value to a register number
/*!
This method translates a register's enum value to it's regiser number,
@@ -56,8 +56,6 @@ namespace llvm {
//! Return the reserved registers
BitVector getReservedRegs(const MachineFunction &MF) const;
- //! Prediate: Target has dedicated frame pointer
- bool hasFP(const MachineFunction &MF) const;
//! Eliminate the call frame setup pseudo-instructions
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
@@ -65,21 +63,11 @@ namespace llvm {
//! Convert frame indicies into machine operands
void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
RegScavenger *RS = NULL) const;
- //! Determine the frame's layour
- void determineFrameLayout(MachineFunction &MF) const;
-
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
- //! Emit the function prologue
- void emitPrologue(MachineFunction &MF) const;
- //! Emit the function epilogue
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
//! Get return address register (LR, aka R0)
unsigned getRARegister() const;
//! Get the stack frame register (SP, aka R1)
unsigned getFrameRegister(const MachineFunction &MF) const;
- //! Perform target-specific stack frame setup.
- void getInitialFrameState(std::vector<MachineMove> &Moves) const;
//------------------------------------------------------------------------
// New methods added:
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUSchedule.td b/contrib/llvm/lib/Target/CellSPU/SPUSchedule.td
index a0b581f..9cd3c23 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUSchedule.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUSchedule.td
@@ -32,11 +32,12 @@ def FPInt : InstrItinClass; // EVEN_UNIT (FP<->integer)
def ByteOp : InstrItinClass; // EVEN_UNIT
def IntegerOp : InstrItinClass; // EVEN_UNIT
def IntegerMulDiv: InstrItinClass; // EVEN_UNIT
-def RotateShift : InstrItinClass; // EVEN_UNIT
+def RotShiftVec : InstrItinClass; // EVEN_UNIT Inter vector
+def RotShiftQuad : InstrItinClass; // ODD_UNIT Entire quad
def ImmLoad : InstrItinClass; // EVEN_UNIT
/* Note: The itinerary for the Cell SPU is somewhat contrived... */
-def SPUItineraries : ProcessorItineraries<[ODD_UNIT, EVEN_UNIT], [
+def SPUItineraries : ProcessorItineraries<[ODD_UNIT, EVEN_UNIT], [], [
InstrItinData<LoadStore , [InstrStage<6, [ODD_UNIT]>]>,
InstrItinData<BranchHints , [InstrStage<6, [ODD_UNIT]>]>,
InstrItinData<BranchResolv, [InstrStage<4, [ODD_UNIT]>]>,
@@ -51,7 +52,8 @@ def SPUItineraries : ProcessorItineraries<[ODD_UNIT, EVEN_UNIT], [
InstrItinData<FPInt , [InstrStage<2, [EVEN_UNIT]>]>,
InstrItinData<ByteOp , [InstrStage<4, [EVEN_UNIT]>]>,
InstrItinData<IntegerOp , [InstrStage<2, [EVEN_UNIT]>]>,
- InstrItinData<RotateShift , [InstrStage<4, [EVEN_UNIT]>]>,
+ InstrItinData<RotShiftVec , [InstrStage<4, [EVEN_UNIT]>]>,
+ InstrItinData<RotShiftQuad, [InstrStage<4, [ODD_UNIT]>]>,
InstrItinData<IntegerMulDiv,[InstrStage<7, [EVEN_UNIT]>]>,
InstrItinData<ImmLoad , [InstrStage<2, [EVEN_UNIT]>]>
]>;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp b/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp
index 0f18b7f..07c8352 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp
@@ -14,6 +14,8 @@
#include "SPUSubtarget.h"
#include "SPU.h"
#include "SPUGenSubtarget.inc"
+#include "llvm/ADT/SmallVector.h"
+#include "SPURegisterInfo.h"
using namespace llvm;
@@ -34,3 +36,22 @@ SPUSubtarget::SPUSubtarget(const std::string &TT, const std::string &FS) :
/// producing code for the JIT.
void SPUSubtarget::SetJITMode() {
}
+
+/// Enable PostRA scheduling for optimization levels -O2 and -O3.
+bool SPUSubtarget::enablePostRAScheduler(
+ CodeGenOpt::Level OptLevel,
+ TargetSubtarget::AntiDepBreakMode& Mode,
+ RegClassVector& CriticalPathRCs) const {
+ Mode = TargetSubtarget::ANTIDEP_CRITICAL;
+ // CriticalPathsRCs seems to be the set of
+ // RegisterClasses that antidep breakings are performed for.
+ // Do it for all register classes
+ CriticalPathRCs.clear();
+ CriticalPathRCs.push_back(&SPU::R8CRegClass);
+ CriticalPathRCs.push_back(&SPU::R16CRegClass);
+ CriticalPathRCs.push_back(&SPU::R32CRegClass);
+ CriticalPathRCs.push_back(&SPU::R32FPRegClass);
+ CriticalPathRCs.push_back(&SPU::R64CRegClass);
+ CriticalPathRCs.push_back(&SPU::VECREGRegClass);
+ return OptLevel >= CodeGenOpt::Default;
+}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h b/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h
index 88201c6..d7929302 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.h
@@ -81,9 +81,13 @@ namespace llvm {
/// properties of this subtarget.
const char *getTargetDataString() const {
return "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128"
- "-i16:16:128-i8:8:128-i1:8:128-a:0:128-v64:128:128-v128:128:128"
+ "-i16:16:128-i8:8:128-i1:8:128-a:0:128-v64:64:128-v128:128:128"
"-s:128:128-n32:64";
}
+
+ bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
+ TargetSubtarget::AntiDepBreakMode& Mode,
+ RegClassVector& CriticalPathRCs) const;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
index 480ec3f..3ed7361 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
@@ -29,7 +29,7 @@ extern "C" void LLVMInitializeCellSPUTarget() {
}
const std::pair<unsigned, int> *
-SPUFrameInfo::getCalleeSaveSpillSlots(unsigned &NumEntries) const {
+SPUFrameLowering::getCalleeSaveSpillSlots(unsigned &NumEntries) const {
NumEntries = 1;
return &LR[0];
}
@@ -40,7 +40,7 @@ SPUTargetMachine::SPUTargetMachine(const Target &T, const std::string &TT,
Subtarget(TT, FS),
DataLayout(Subtarget.getTargetDataString()),
InstrInfo(*this),
- FrameInfo(*this),
+ FrameLowering(Subtarget),
TLInfo(*this),
TSInfo(*this),
InstrItins(Subtarget.getInstrItineraryData()) {
@@ -59,3 +59,12 @@ bool SPUTargetMachine::addInstSelector(PassManagerBase &PM,
PM.add(createSPUISelDag(*this));
return false;
}
+
+// passes to run just before printing the assembly
+bool SPUTargetMachine::
+addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel)
+{
+ //align instructions with nops/lnops for dual issue
+ PM.add(createSPUNopFillerPass(*this));
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h
index 7e02701..75abd5e 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h
@@ -18,14 +18,14 @@
#include "SPUInstrInfo.h"
#include "SPUISelLowering.h"
#include "SPUSelectionDAGInfo.h"
-#include "SPUFrameInfo.h"
+#include "SPUFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetData.h"
namespace llvm {
class PassManager;
class GlobalValue;
-class TargetFrameInfo;
+class TargetFrameLowering;
/// SPUTargetMachine
///
@@ -33,7 +33,7 @@ class SPUTargetMachine : public LLVMTargetMachine {
SPUSubtarget Subtarget;
const TargetData DataLayout;
SPUInstrInfo InstrInfo;
- SPUFrameInfo FrameInfo;
+ SPUFrameLowering FrameLowering;
SPUTargetLowering TLInfo;
SPUSelectionDAGInfo TSInfo;
InstrItineraryData InstrItins;
@@ -48,8 +48,8 @@ public:
virtual const SPUInstrInfo *getInstrInfo() const {
return &InstrInfo;
}
- virtual const SPUFrameInfo *getFrameInfo() const {
- return &FrameInfo;
+ virtual const SPUFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
}
/*!
\note Cell SPU does not support JIT today. It could support JIT at some
@@ -75,13 +75,14 @@ public:
return &DataLayout;
}
- virtual const InstrItineraryData getInstrItineraryData() const {
- return InstrItins;
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return &InstrItins;
}
// Pass Pipeline Configuration
virtual bool addInstSelector(PassManagerBase &PM,
CodeGenOpt::Level OptLevel);
+ virtual bool addPreEmitPass(PassManagerBase &, CodeGenOpt::Level);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
index f08559f..71d6049 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
+++ b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
@@ -358,6 +358,7 @@ std::string CppWriter::getCppName(const Type* Ty) {
case Type::FloatTyID: return "Type::getFloatTy(mod->getContext())";
case Type::DoubleTyID: return "Type::getDoubleTy(mod->getContext())";
case Type::LabelTyID: return "Type::getLabelTy(mod->getContext())";
+ case Type::X86_MMXTyID: return "Type::getX86_MMXTy(mod->getContext())";
default:
error("Invalid primitive type");
break;
@@ -1563,11 +1564,25 @@ void CppWriter::printFunctionUses(const Function* F) {
// If the operand references a GVal or Constant, make a note of it
if (GlobalValue* GV = dyn_cast<GlobalValue>(operand)) {
gvs.insert(GV);
- if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
- if (GVar->hasInitializer())
- consts.insert(GVar->getInitializer());
- } else if (Constant* C = dyn_cast<Constant>(operand))
+ if (GenerationType != GenFunction)
+ if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (GVar->hasInitializer())
+ consts.insert(GVar->getInitializer());
+ } else if (Constant* C = dyn_cast<Constant>(operand)) {
consts.insert(C);
+ for (unsigned j = 0; j < C->getNumOperands(); ++j) {
+ // If the operand references a GVal or Constant, make a note of it
+ Value* operand = C->getOperand(j);
+ printType(operand->getType());
+ if (GlobalValue* GV = dyn_cast<GlobalValue>(operand)) {
+ gvs.insert(GV);
+ if (GenerationType != GenFunction)
+ if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (GVar->hasInitializer())
+ consts.insert(GVar->getInitializer());
+ }
+ }
+ }
}
}
}
@@ -1590,7 +1605,7 @@ void CppWriter::printFunctionUses(const Function* F) {
printVariableHead(F);
}
-// Print the constants found
+ // Print the constants found
nl(Out) << "// Constant Definitions"; nl(Out);
for (SmallPtrSet<Constant*,64>::iterator I = consts.begin(),
E = consts.end(); I != E; ++I) {
@@ -1600,11 +1615,13 @@ void CppWriter::printFunctionUses(const Function* F) {
// Process the global variables definitions now that all the constants have
// been emitted. These definitions just couple the gvars with their constant
// initializers.
- nl(Out) << "// Global Variable Definitions"; nl(Out);
- for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
- I != E; ++I) {
- if (GlobalVariable* GV = dyn_cast<GlobalVariable>(*I))
- printVariableBody(GV);
+ if (GenerationType != GenFunction) {
+ nl(Out) << "// Global Variable Definitions"; nl(Out);
+ for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+ I != E; ++I) {
+ if (GlobalVariable* GV = dyn_cast<GlobalVariable>(*I))
+ printVariableBody(GV);
+ }
}
}
diff --git a/contrib/llvm/lib/Target/MBlaze/AsmParser/CMakeLists.txt b/contrib/llvm/lib/Target/MBlaze/AsmParser/CMakeLists.txt
new file mode 100644
index 0000000..87e7cb5
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/AsmParser/CMakeLists.txt
@@ -0,0 +1,8 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/..
+ ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMMBlazeAsmParser
+ MBlazeAsmLexer.cpp
+ MBlazeAsmParser.cpp
+ )
+
diff --git a/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp
new file mode 100644
index 0000000..1903796
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp
@@ -0,0 +1,127 @@
+//===-- MBlazeAsmLexer.cpp - Tokenize MBlaze assembly to AsmTokens --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MBlaze.h"
+#include "MBlazeTargetMachine.h"
+
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+
+#include "llvm/Target/TargetAsmLexer.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegistry.h"
+
+#include <string>
+#include <map>
+
+using namespace llvm;
+
+namespace {
+
+ class MBlazeBaseAsmLexer : public TargetAsmLexer {
+ const MCAsmInfo &AsmInfo;
+
+ const AsmToken &lexDefinite() {
+ return getLexer()->Lex();
+ }
+
+ AsmToken LexTokenUAL();
+ protected:
+ typedef std::map <std::string, unsigned> rmap_ty;
+
+ rmap_ty RegisterMap;
+
+ void InitRegisterMap(const TargetRegisterInfo *info) {
+ unsigned numRegs = info->getNumRegs();
+
+ for (unsigned i = 0; i < numRegs; ++i) {
+ const char *regName = info->getName(i);
+ if (regName)
+ RegisterMap[regName] = i;
+ }
+ }
+
+ unsigned MatchRegisterName(StringRef Name) {
+ rmap_ty::iterator iter = RegisterMap.find(Name.str());
+ if (iter != RegisterMap.end())
+ return iter->second;
+ else
+ return 0;
+ }
+
+ AsmToken LexToken() {
+ if (!Lexer) {
+ SetError(SMLoc(), "No MCAsmLexer installed");
+ return AsmToken(AsmToken::Error, "", 0);
+ }
+
+ switch (AsmInfo.getAssemblerDialect()) {
+ default:
+ SetError(SMLoc(), "Unhandled dialect");
+ return AsmToken(AsmToken::Error, "", 0);
+ case 0:
+ return LexTokenUAL();
+ }
+ }
+ public:
+ MBlazeBaseAsmLexer(const Target &T, const MCAsmInfo &MAI)
+ : TargetAsmLexer(T), AsmInfo(MAI) {
+ }
+ };
+
+ class MBlazeAsmLexer : public MBlazeBaseAsmLexer {
+ public:
+ MBlazeAsmLexer(const Target &T, const MCAsmInfo &MAI)
+ : MBlazeBaseAsmLexer(T, MAI) {
+ std::string tripleString("mblaze-unknown-unknown");
+ std::string featureString;
+ OwningPtr<const TargetMachine>
+ targetMachine(T.createTargetMachine(tripleString, featureString));
+ InitRegisterMap(targetMachine->getRegisterInfo());
+ }
+ };
+}
+
+AsmToken MBlazeBaseAsmLexer::LexTokenUAL() {
+ const AsmToken &lexedToken = lexDefinite();
+
+ switch (lexedToken.getKind()) {
+ default:
+ return AsmToken(lexedToken);
+ case AsmToken::Error:
+ SetError(Lexer->getErrLoc(), Lexer->getErr());
+ return AsmToken(lexedToken);
+ case AsmToken::Identifier:
+ {
+ std::string upperCase = lexedToken.getString().str();
+ std::string lowerCase = LowercaseString(upperCase);
+ StringRef lowerRef(lowerCase);
+
+ unsigned regID = MatchRegisterName(lowerRef);
+
+ if (regID) {
+ return AsmToken(AsmToken::Register,
+ lexedToken.getString(),
+ static_cast<int64_t>(regID));
+ } else {
+ return AsmToken(lexedToken);
+ }
+ }
+ }
+}
+
+extern "C" void LLVMInitializeMBlazeAsmLexer() {
+ RegisterAsmLexer<MBlazeAsmLexer> X(TheMBlazeTarget);
+}
+
diff --git a/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
new file mode 100644
index 0000000..524f33d
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
@@ -0,0 +1,568 @@
+//===-- MBlazeAsmParser.cpp - Parse MBlaze asm to MCInst instructions -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MBlaze.h"
+#include "MBlazeSubtarget.h"
+#include "MBlazeRegisterInfo.h"
+#include "MBlazeISelLowering.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Target/TargetAsmParser.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+using namespace llvm;
+
+namespace {
+struct MBlazeOperand;
+
+class MBlazeAsmParser : public TargetAsmParser {
+ MCAsmParser &Parser;
+ TargetMachine &TM;
+
+ MCAsmParser &getParser() const { return Parser; }
+ MCAsmLexer &getLexer() const { return Parser.getLexer(); }
+
+ void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
+ bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
+
+ MBlazeOperand *ParseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ MBlazeOperand *ParseRegister(unsigned &RegNo);
+ MBlazeOperand *ParseImmediate();
+ MBlazeOperand *ParseFsl();
+ MBlazeOperand* ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+
+ virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
+
+ bool ParseDirectiveWord(unsigned Size, SMLoc L);
+
+ bool MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out);
+
+ /// @name Auto-generated Match Functions
+ /// {
+
+#define GET_ASSEMBLER_HEADER
+#include "MBlazeGenAsmMatcher.inc"
+
+ /// }
+
+
+public:
+ MBlazeAsmParser(const Target &T, MCAsmParser &_Parser, TargetMachine &_TM)
+ : TargetAsmParser(T), Parser(_Parser), TM(_TM) {}
+
+ virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+
+ virtual bool ParseDirective(AsmToken DirectiveID);
+};
+
+/// MBlazeOperand - Instances of this class represent a parsed MBlaze machine
+/// instruction.
+struct MBlazeOperand : public MCParsedAsmOperand {
+ enum KindTy {
+ Token,
+ Immediate,
+ Register,
+ Memory,
+ Fsl
+ } Kind;
+
+ SMLoc StartLoc, EndLoc;
+
+ union {
+ struct {
+ const char *Data;
+ unsigned Length;
+ } Tok;
+
+ struct {
+ unsigned RegNum;
+ } Reg;
+
+ struct {
+ const MCExpr *Val;
+ } Imm;
+
+ struct {
+ unsigned Base;
+ unsigned OffReg;
+ const MCExpr *Off;
+ } Mem;
+
+ struct {
+ const MCExpr *Val;
+ } FslImm;
+ };
+
+ MBlazeOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+public:
+ MBlazeOperand(const MBlazeOperand &o) : MCParsedAsmOperand() {
+ Kind = o.Kind;
+ StartLoc = o.StartLoc;
+ EndLoc = o.EndLoc;
+ switch (Kind) {
+ case Register:
+ Reg = o.Reg;
+ break;
+ case Immediate:
+ Imm = o.Imm;
+ break;
+ case Token:
+ Tok = o.Tok;
+ break;
+ case Memory:
+ Mem = o.Mem;
+ break;
+ case Fsl:
+ FslImm = o.FslImm;
+ break;
+ }
+ }
+
+ /// getStartLoc - Get the location of the first token of this operand.
+ SMLoc getStartLoc() const { return StartLoc; }
+
+ /// getEndLoc - Get the location of the last token of this operand.
+ SMLoc getEndLoc() const { return EndLoc; }
+
+ unsigned getReg() const {
+ assert(Kind == Register && "Invalid access!");
+ return Reg.RegNum;
+ }
+
+ const MCExpr *getImm() const {
+ assert(Kind == Immediate && "Invalid access!");
+ return Imm.Val;
+ }
+
+ const MCExpr *getFslImm() const {
+ assert(Kind == Fsl && "Invalid access!");
+ return FslImm.Val;
+ }
+
+ unsigned getMemBase() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.Base;
+ }
+
+ const MCExpr* getMemOff() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.Off;
+ }
+
+ unsigned getMemOffReg() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.OffReg;
+ }
+
+ bool isToken() const { return Kind == Token; }
+ bool isImm() const { return Kind == Immediate; }
+ bool isMem() const { return Kind == Memory; }
+ bool isFsl() const { return Kind == Fsl; }
+ bool isReg() const { return Kind == Register; }
+
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const {
+ // Add as immediates when possible. Null MCExpr = 0.
+ if (Expr == 0)
+ Inst.addOperand(MCOperand::CreateImm(0));
+ else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(Expr));
+ }
+
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
+ }
+
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addFslOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getFslImm());
+ }
+
+ void addMemOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+
+ Inst.addOperand(MCOperand::CreateReg(getMemBase()));
+
+ unsigned RegOff = getMemOffReg();
+ if (RegOff)
+ Inst.addOperand(MCOperand::CreateReg(RegOff));
+ else
+ addExpr(Inst, getMemOff());
+ }
+
+ StringRef getToken() const {
+ assert(Kind == Token && "Invalid access!");
+ return StringRef(Tok.Data, Tok.Length);
+ }
+
+ virtual void dump(raw_ostream &OS) const;
+
+ static MBlazeOperand *CreateToken(StringRef Str, SMLoc S) {
+ MBlazeOperand *Op = new MBlazeOperand(Token);
+ Op->Tok.Data = Str.data();
+ Op->Tok.Length = Str.size();
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
+
+ static MBlazeOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
+ MBlazeOperand *Op = new MBlazeOperand(Register);
+ Op->Reg.RegNum = RegNum;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static MBlazeOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
+ MBlazeOperand *Op = new MBlazeOperand(Immediate);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static MBlazeOperand *CreateFslImm(const MCExpr *Val, SMLoc S, SMLoc E) {
+ MBlazeOperand *Op = new MBlazeOperand(Fsl);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static MBlazeOperand *CreateMem(unsigned Base, const MCExpr *Off, SMLoc S,
+ SMLoc E) {
+ MBlazeOperand *Op = new MBlazeOperand(Memory);
+ Op->Mem.Base = Base;
+ Op->Mem.Off = Off;
+ Op->Mem.OffReg = 0;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static MBlazeOperand *CreateMem(unsigned Base, unsigned Off, SMLoc S,
+ SMLoc E) {
+ MBlazeOperand *Op = new MBlazeOperand(Memory);
+ Op->Mem.Base = Base;
+ Op->Mem.OffReg = Off;
+ Op->Mem.Off = 0;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+};
+
+} // end anonymous namespace.
+
+void MBlazeOperand::dump(raw_ostream &OS) const {
+ switch (Kind) {
+ case Immediate:
+ getImm()->print(OS);
+ break;
+ case Register:
+ OS << "<register R";
+ OS << MBlazeRegisterInfo::getRegisterNumbering(getReg()) << ">";
+ break;
+ case Token:
+ OS << "'" << getToken() << "'";
+ break;
+ case Memory: {
+ OS << "<memory R";
+ OS << MBlazeRegisterInfo::getRegisterNumbering(getMemBase());
+ OS << ", ";
+
+ unsigned RegOff = getMemOffReg();
+ if (RegOff)
+ OS << "R" << MBlazeRegisterInfo::getRegisterNumbering(RegOff);
+ else
+ OS << getMemOff();
+ OS << ">";
+ }
+ break;
+ case Fsl:
+ getFslImm()->print(OS);
+ break;
+ }
+}
+
+/// @name Auto-generated Match Functions
+/// {
+
+static unsigned MatchRegisterName(StringRef Name);
+
+/// }
+//
+bool MBlazeAsmParser::
+MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out) {
+ MCInst Inst;
+ SMLoc ErrorLoc;
+ unsigned ErrorInfo;
+
+ switch (MatchInstructionImpl(Operands, Inst, ErrorInfo)) {
+ case Match_Success:
+ Out.EmitInstruction(Inst);
+ return false;
+ case Match_MissingFeature:
+ return Error(IDLoc, "instruction use requires an option to be enabled");
+ case Match_MnemonicFail:
+ return Error(IDLoc, "unrecognized instruction mnemonic");
+ case Match_ConversionFail:
+ return Error(IDLoc, "unable to convert operands to instruction");
+ case Match_InvalidOperand:
+ ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0U) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
+ ErrorLoc = ((MBlazeOperand*)Operands[ErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ }
+
+ return Error(ErrorLoc, "invalid operand for instruction");
+ }
+
+ llvm_unreachable("Implement any new match types added!");
+ return true;
+}
+
+MBlazeOperand *MBlazeAsmParser::
+ParseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ if (Operands.size() != 4)
+ return 0;
+
+ MBlazeOperand &Base = *(MBlazeOperand*)Operands[2];
+ MBlazeOperand &Offset = *(MBlazeOperand*)Operands[3];
+
+ SMLoc S = Base.getStartLoc();
+ SMLoc O = Offset.getStartLoc();
+ SMLoc E = Offset.getEndLoc();
+
+ if (!Base.isReg()) {
+ Error(S, "base address must be a register");
+ return 0;
+ }
+
+ if (!Offset.isReg() && !Offset.isImm()) {
+ Error(O, "offset must be a register or immediate");
+ return 0;
+ }
+
+ MBlazeOperand *Op;
+ if (Offset.isReg())
+ Op = MBlazeOperand::CreateMem(Base.getReg(), Offset.getReg(), S, E);
+ else
+ Op = MBlazeOperand::CreateMem(Base.getReg(), Offset.getImm(), S, E);
+
+ delete Operands.pop_back_val();
+ delete Operands.pop_back_val();
+ Operands.push_back(Op);
+
+ return Op;
+}
+
+bool MBlazeAsmParser::ParseRegister(unsigned &RegNo,
+ SMLoc &StartLoc, SMLoc &EndLoc) {
+ return (ParseRegister(RegNo) == 0);
+}
+
+MBlazeOperand *MBlazeAsmParser::ParseRegister(unsigned &RegNo) {
+ SMLoc S = Parser.getTok().getLoc();
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+
+ switch (getLexer().getKind()) {
+ default: return 0;
+ case AsmToken::Identifier:
+ RegNo = MatchRegisterName(getLexer().getTok().getIdentifier());
+ if (RegNo == 0)
+ return 0;
+
+ getLexer().Lex();
+ return MBlazeOperand::CreateReg(RegNo, S, E);
+ }
+}
+
+static unsigned MatchFslRegister(StringRef String) {
+ if (!String.startswith("rfsl"))
+ return -1;
+
+ unsigned regNum;
+ if (String.substr(4).getAsInteger(10,regNum))
+ return -1;
+
+ return regNum;
+}
+
+MBlazeOperand *MBlazeAsmParser::ParseFsl() {
+ SMLoc S = Parser.getTok().getLoc();
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+
+ switch (getLexer().getKind()) {
+ default: return 0;
+ case AsmToken::Identifier:
+ unsigned reg = MatchFslRegister(getLexer().getTok().getIdentifier());
+ if (reg >= 16)
+ return 0;
+
+ getLexer().Lex();
+ const MCExpr *EVal = MCConstantExpr::Create(reg,getContext());
+ return MBlazeOperand::CreateFslImm(EVal,S,E);
+ }
+}
+
+MBlazeOperand *MBlazeAsmParser::ParseImmediate() {
+ SMLoc S = Parser.getTok().getLoc();
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+
+ const MCExpr *EVal;
+ switch (getLexer().getKind()) {
+ default: return 0;
+ case AsmToken::LParen:
+ case AsmToken::Plus:
+ case AsmToken::Minus:
+ case AsmToken::Integer:
+ case AsmToken::Identifier:
+ if (getParser().ParseExpression(EVal))
+ return 0;
+
+ return MBlazeOperand::CreateImm(EVal, S, E);
+ }
+}
+
+MBlazeOperand *MBlazeAsmParser::
+ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ MBlazeOperand *Op;
+
+ // Attempt to parse the next token as a register name
+ unsigned RegNo;
+ Op = ParseRegister(RegNo);
+
+ // Attempt to parse the next token as an FSL immediate
+ if (!Op)
+ Op = ParseFsl();
+
+ // Attempt to parse the next token as an immediate
+ if (!Op)
+ Op = ParseImmediate();
+
+ // If the token could not be parsed then fail
+ if (!Op) {
+ Error(Parser.getTok().getLoc(), "unknown operand");
+ return 0;
+ }
+
+ // Push the parsed operand into the list of operands
+ Operands.push_back(Op);
+ return Op;
+}
+
+/// Parse an mblaze instruction mnemonic followed by its operands.
+bool MBlazeAsmParser::
+ParseInstruction(StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // The first operands is the token for the instruction name
+ size_t dotLoc = Name.find('.');
+ Operands.push_back(MBlazeOperand::CreateToken(Name.substr(0,dotLoc),NameLoc));
+ if (dotLoc < Name.size())
+ Operands.push_back(MBlazeOperand::CreateToken(Name.substr(dotLoc),NameLoc));
+
+ // If there are no more operands then finish
+ if (getLexer().is(AsmToken::EndOfStatement))
+ return false;
+
+ // Parse the first operand
+ if (!ParseOperand(Operands))
+ return true;
+
+ while (getLexer().isNot(AsmToken::EndOfStatement) &&
+ getLexer().is(AsmToken::Comma)) {
+ // Consume the comma token
+ getLexer().Lex();
+
+ // Parse the next operand
+ if (!ParseOperand(Operands))
+ return true;
+ }
+
+ // If the instruction requires a memory operand then we need to
+ // replace the last two operands (base+offset) with a single
+ // memory operand.
+ if (Name.startswith("lw") || Name.startswith("sw") ||
+ Name.startswith("lh") || Name.startswith("sh") ||
+ Name.startswith("lb") || Name.startswith("sb"))
+ return (ParseMemory(Operands) == NULL);
+
+ return false;
+}
+
+/// ParseDirective parses the arm specific directives
+bool MBlazeAsmParser::ParseDirective(AsmToken DirectiveID) {
+ StringRef IDVal = DirectiveID.getIdentifier();
+ if (IDVal == ".word")
+ return ParseDirectiveWord(2, DirectiveID.getLoc());
+ return true;
+}
+
+/// ParseDirectiveWord
+/// ::= .word [ expression (, expression)* ]
+bool MBlazeAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ for (;;) {
+ const MCExpr *Value;
+ if (getParser().ParseExpression(Value))
+ return true;
+
+ getParser().getStreamer().EmitValue(Value, Size, 0 /*addrspace*/);
+
+ if (getLexer().is(AsmToken::EndOfStatement))
+ break;
+
+ // FIXME: Improve diagnostic.
+ if (getLexer().isNot(AsmToken::Comma))
+ return Error(L, "unexpected token in directive");
+ Parser.Lex();
+ }
+ }
+
+ Parser.Lex();
+ return false;
+}
+
+extern "C" void LLVMInitializeMBlazeAsmLexer();
+
+/// Force static initialization.
+extern "C" void LLVMInitializeMBlazeAsmParser() {
+ RegisterAsmParser<MBlazeAsmParser> X(TheMBlazeTarget);
+ LLVMInitializeMBlazeAsmLexer();
+}
+
+#define GET_REGISTER_MATCHER
+#define GET_MATCHER_IMPLEMENTATION
+#include "MBlazeGenAsmMatcher.inc"
diff --git a/contrib/llvm/lib/Target/MBlaze/AsmParser/Makefile b/contrib/llvm/lib/Target/MBlaze/AsmParser/Makefile
new file mode 100644
index 0000000..611a0f4
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/AsmParser/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Target/ARM/AsmParser/Makefile -------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMMBlazeAsmParser
+
+# Hack: we need to include 'main' MBlaze target directory for private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/MBlaze/Disassembler/CMakeLists.txt b/contrib/llvm/lib/Target/MBlaze/Disassembler/CMakeLists.txt
new file mode 100644
index 0000000..9376e68
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/Disassembler/CMakeLists.txt
@@ -0,0 +1,16 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/..
+ ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMMBlazeDisassembler
+ MBlazeDisassembler.cpp
+ )
+
+# workaround for hanging compilation on MSVC9 and 10
+if( MSVC_VERSION EQUAL 1500 OR MSVC_VERSION EQUAL 1600 )
+set_property(
+ SOURCE MBlazeDisassembler.cpp
+ PROPERTY COMPILE_FLAGS "/Od"
+ )
+endif()
+
+add_dependencies(LLVMMBlazeDisassembler MBlazeCodeGenTable_gen)
diff --git a/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp b/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
new file mode 100644
index 0000000..3379ac2
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
@@ -0,0 +1,647 @@
+//===- MBlazeDisassembler.cpp - Disassembler for MicroBlaze ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the MBlaze Disassembler. It contains code to translate
+// the data produced by the decoder into MCInsts.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MBlaze.h"
+#include "MBlazeInstrInfo.h"
+#include "MBlazeDisassembler.h"
+
+#include "llvm/MC/EDInstInfo.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/raw_ostream.h"
+
+// #include "MBlazeGenDecoderTables.inc"
+// #include "MBlazeGenRegisterNames.inc"
+#include "MBlazeGenInstrInfo.inc"
+#include "MBlazeGenEDInfo.inc"
+
+using namespace llvm;
+
+const unsigned UNSUPPORTED = -1;
+
+static unsigned mblazeBinary2Opcode[] = {
+ MBlaze::ADD, MBlaze::RSUB, MBlaze::ADDC, MBlaze::RSUBC, //00,01,02,03
+ MBlaze::ADDK, MBlaze::RSUBK, MBlaze::ADDKC, MBlaze::RSUBKC, //04,05,06,07
+ MBlaze::ADDI, MBlaze::RSUBI, MBlaze::ADDIC, MBlaze::RSUBIC, //08,09,0A,0B
+ MBlaze::ADDIK, MBlaze::RSUBIK, MBlaze::ADDIKC, MBlaze::RSUBIKC, //0C,0D,0E,0F
+
+ MBlaze::MUL, MBlaze::BSRL, MBlaze::IDIV, MBlaze::GETD, //10,11,12,13
+ UNSUPPORTED, UNSUPPORTED, MBlaze::FADD, UNSUPPORTED, //14,15,16,17
+ MBlaze::MULI, MBlaze::BSRLI, UNSUPPORTED, MBlaze::GET, //18,19,1A,1B
+ UNSUPPORTED, UNSUPPORTED, UNSUPPORTED, UNSUPPORTED, //1C,1D,1E,1F
+
+ MBlaze::OR, MBlaze::AND, MBlaze::XOR, MBlaze::ANDN, //20,21,22,23
+ MBlaze::SEXT8, MBlaze::MFS, MBlaze::BR, MBlaze::BEQ, //24,25,26,27
+ MBlaze::ORI, MBlaze::ANDI, MBlaze::XORI, MBlaze::ANDNI, //28,29,2A,2B
+ MBlaze::IMM, MBlaze::RTSD, MBlaze::BRI, MBlaze::BEQI, //2C,2D,2E,2F
+
+ MBlaze::LBU, MBlaze::LHU, MBlaze::LW, UNSUPPORTED, //30,31,32,33
+ MBlaze::SB, MBlaze::SH, MBlaze::SW, UNSUPPORTED, //34,35,36,37
+ MBlaze::LBUI, MBlaze::LHUI, MBlaze::LWI, UNSUPPORTED, //38,39,3A,3B
+ MBlaze::SBI, MBlaze::SHI, MBlaze::SWI, UNSUPPORTED, //3C,3D,3E,3F
+};
+
+static unsigned getRD(uint32_t insn) {
+ return MBlazeRegisterInfo::getRegisterFromNumbering((insn>>21)&0x1F);
+}
+
+static unsigned getRA(uint32_t insn) {
+ return MBlazeRegisterInfo::getRegisterFromNumbering((insn>>16)&0x1F);
+}
+
+static unsigned getRB(uint32_t insn) {
+ return MBlazeRegisterInfo::getRegisterFromNumbering((insn>>11)&0x1F);
+}
+
+static int64_t getRS(uint32_t insn) {
+ return MBlazeRegisterInfo::getSpecialRegisterFromNumbering(insn&0x3FFF);
+}
+
+static int64_t getIMM(uint32_t insn) {
+ int16_t val = (insn & 0xFFFF);
+ return val;
+}
+
+static int64_t getSHT(uint32_t insn) {
+ int16_t val = (insn & 0x1F);
+ return val;
+}
+
+static unsigned getFLAGS(int32_t insn) {
+ return (insn & 0x7FF);
+}
+
+static int64_t getFSL(uint32_t insn) {
+ int16_t val = (insn & 0xF);
+ return val;
+}
+
+static unsigned decodeMUL(uint32_t insn) {
+ switch (getFLAGS(insn)) {
+ default: return UNSUPPORTED;
+ case 0: return MBlaze::MUL;
+ case 1: return MBlaze::MULH;
+ case 2: return MBlaze::MULHSU;
+ case 3: return MBlaze::MULHU;
+ }
+}
+
+static unsigned decodeSEXT(uint32_t insn) {
+ switch (insn&0x7FF) {
+ default: return UNSUPPORTED;
+ case 0x60: return MBlaze::SEXT8;
+ case 0x68: return MBlaze::WIC;
+ case 0x64: return MBlaze::WDC;
+ case 0x66: return MBlaze::WDCC;
+ case 0x74: return MBlaze::WDCF;
+ case 0x61: return MBlaze::SEXT16;
+ case 0x41: return MBlaze::SRL;
+ case 0x21: return MBlaze::SRC;
+ case 0x01: return MBlaze::SRA;
+ }
+}
+
+static unsigned decodeBEQ(uint32_t insn) {
+ switch ((insn>>21)&0x1F) {
+ default: return UNSUPPORTED;
+ case 0x00: return MBlaze::BEQ;
+ case 0x10: return MBlaze::BEQD;
+ case 0x05: return MBlaze::BGE;
+ case 0x15: return MBlaze::BGED;
+ case 0x04: return MBlaze::BGT;
+ case 0x14: return MBlaze::BGTD;
+ case 0x03: return MBlaze::BLE;
+ case 0x13: return MBlaze::BLED;
+ case 0x02: return MBlaze::BLT;
+ case 0x12: return MBlaze::BLTD;
+ case 0x01: return MBlaze::BNE;
+ case 0x11: return MBlaze::BNED;
+ }
+}
+
+static unsigned decodeBEQI(uint32_t insn) {
+ switch ((insn>>21)&0x1F) {
+ default: return UNSUPPORTED;
+ case 0x00: return MBlaze::BEQI;
+ case 0x10: return MBlaze::BEQID;
+ case 0x05: return MBlaze::BGEI;
+ case 0x15: return MBlaze::BGEID;
+ case 0x04: return MBlaze::BGTI;
+ case 0x14: return MBlaze::BGTID;
+ case 0x03: return MBlaze::BLEI;
+ case 0x13: return MBlaze::BLEID;
+ case 0x02: return MBlaze::BLTI;
+ case 0x12: return MBlaze::BLTID;
+ case 0x01: return MBlaze::BNEI;
+ case 0x11: return MBlaze::BNEID;
+ }
+}
+
+static unsigned decodeBR(uint32_t insn) {
+ switch ((insn>>16)&0x1F) {
+ default: return UNSUPPORTED;
+ case 0x00: return MBlaze::BR;
+ case 0x08: return MBlaze::BRA;
+ case 0x0C: return MBlaze::BRK;
+ case 0x10: return MBlaze::BRD;
+ case 0x14: return MBlaze::BRLD;
+ case 0x18: return MBlaze::BRAD;
+ case 0x1C: return MBlaze::BRALD;
+ }
+}
+
+static unsigned decodeBRI(uint32_t insn) {
+ switch ((insn>>16)&0x1F) {
+ default: return UNSUPPORTED;
+ case 0x00: return MBlaze::BRI;
+ case 0x08: return MBlaze::BRAI;
+ case 0x0C: return MBlaze::BRKI;
+ case 0x10: return MBlaze::BRID;
+ case 0x14: return MBlaze::BRLID;
+ case 0x18: return MBlaze::BRAID;
+ case 0x1C: return MBlaze::BRALID;
+ }
+}
+
+static unsigned decodeBSRL(uint32_t insn) {
+ switch ((insn>>9)&0x3) {
+ default: return UNSUPPORTED;
+ case 0x2: return MBlaze::BSLL;
+ case 0x1: return MBlaze::BSRA;
+ case 0x0: return MBlaze::BSRL;
+ }
+}
+
+static unsigned decodeBSRLI(uint32_t insn) {
+ switch ((insn>>9)&0x3) {
+ default: return UNSUPPORTED;
+ case 0x2: return MBlaze::BSLLI;
+ case 0x1: return MBlaze::BSRAI;
+ case 0x0: return MBlaze::BSRLI;
+ }
+}
+
+static unsigned decodeRSUBK(uint32_t insn) {
+ switch (getFLAGS(insn)) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::RSUBK;
+ case 0x1: return MBlaze::CMP;
+ case 0x3: return MBlaze::CMPU;
+ }
+}
+
+static unsigned decodeFADD(uint32_t insn) {
+ switch (getFLAGS(insn)) {
+ default: return UNSUPPORTED;
+ case 0x000: return MBlaze::FADD;
+ case 0x080: return MBlaze::FRSUB;
+ case 0x100: return MBlaze::FMUL;
+ case 0x180: return MBlaze::FDIV;
+ case 0x200: return MBlaze::FCMP_UN;
+ case 0x210: return MBlaze::FCMP_LT;
+ case 0x220: return MBlaze::FCMP_EQ;
+ case 0x230: return MBlaze::FCMP_LE;
+ case 0x240: return MBlaze::FCMP_GT;
+ case 0x250: return MBlaze::FCMP_NE;
+ case 0x260: return MBlaze::FCMP_GE;
+ case 0x280: return MBlaze::FLT;
+ case 0x300: return MBlaze::FINT;
+ case 0x380: return MBlaze::FSQRT;
+ }
+}
+
+static unsigned decodeGET(uint32_t insn) {
+ switch ((insn>>10)&0x3F) {
+ default: return UNSUPPORTED;
+ case 0x00: return MBlaze::GET;
+ case 0x01: return MBlaze::EGET;
+ case 0x02: return MBlaze::AGET;
+ case 0x03: return MBlaze::EAGET;
+ case 0x04: return MBlaze::TGET;
+ case 0x05: return MBlaze::TEGET;
+ case 0x06: return MBlaze::TAGET;
+ case 0x07: return MBlaze::TEAGET;
+ case 0x08: return MBlaze::CGET;
+ case 0x09: return MBlaze::ECGET;
+ case 0x0A: return MBlaze::CAGET;
+ case 0x0B: return MBlaze::ECAGET;
+ case 0x0C: return MBlaze::TCGET;
+ case 0x0D: return MBlaze::TECGET;
+ case 0x0E: return MBlaze::TCAGET;
+ case 0x0F: return MBlaze::TECAGET;
+ case 0x10: return MBlaze::NGET;
+ case 0x11: return MBlaze::NEGET;
+ case 0x12: return MBlaze::NAGET;
+ case 0x13: return MBlaze::NEAGET;
+ case 0x14: return MBlaze::TNGET;
+ case 0x15: return MBlaze::TNEGET;
+ case 0x16: return MBlaze::TNAGET;
+ case 0x17: return MBlaze::TNEAGET;
+ case 0x18: return MBlaze::NCGET;
+ case 0x19: return MBlaze::NECGET;
+ case 0x1A: return MBlaze::NCAGET;
+ case 0x1B: return MBlaze::NECAGET;
+ case 0x1C: return MBlaze::TNCGET;
+ case 0x1D: return MBlaze::TNECGET;
+ case 0x1E: return MBlaze::TNCAGET;
+ case 0x1F: return MBlaze::TNECAGET;
+ case 0x20: return MBlaze::PUT;
+ case 0x22: return MBlaze::APUT;
+ case 0x24: return MBlaze::TPUT;
+ case 0x26: return MBlaze::TAPUT;
+ case 0x28: return MBlaze::CPUT;
+ case 0x2A: return MBlaze::CAPUT;
+ case 0x2C: return MBlaze::TCPUT;
+ case 0x2E: return MBlaze::TCAPUT;
+ case 0x30: return MBlaze::NPUT;
+ case 0x32: return MBlaze::NAPUT;
+ case 0x34: return MBlaze::TNPUT;
+ case 0x36: return MBlaze::TNAPUT;
+ case 0x38: return MBlaze::NCPUT;
+ case 0x3A: return MBlaze::NCAPUT;
+ case 0x3C: return MBlaze::TNCPUT;
+ case 0x3E: return MBlaze::TNCAPUT;
+ }
+}
+
+static unsigned decodeGETD(uint32_t insn) {
+ switch ((insn>>5)&0x3F) {
+ default: return UNSUPPORTED;
+ case 0x00: return MBlaze::GETD;
+ case 0x01: return MBlaze::EGETD;
+ case 0x02: return MBlaze::AGETD;
+ case 0x03: return MBlaze::EAGETD;
+ case 0x04: return MBlaze::TGETD;
+ case 0x05: return MBlaze::TEGETD;
+ case 0x06: return MBlaze::TAGETD;
+ case 0x07: return MBlaze::TEAGETD;
+ case 0x08: return MBlaze::CGETD;
+ case 0x09: return MBlaze::ECGETD;
+ case 0x0A: return MBlaze::CAGETD;
+ case 0x0B: return MBlaze::ECAGETD;
+ case 0x0C: return MBlaze::TCGETD;
+ case 0x0D: return MBlaze::TECGETD;
+ case 0x0E: return MBlaze::TCAGETD;
+ case 0x0F: return MBlaze::TECAGETD;
+ case 0x10: return MBlaze::NGETD;
+ case 0x11: return MBlaze::NEGETD;
+ case 0x12: return MBlaze::NAGETD;
+ case 0x13: return MBlaze::NEAGETD;
+ case 0x14: return MBlaze::TNGETD;
+ case 0x15: return MBlaze::TNEGETD;
+ case 0x16: return MBlaze::TNAGETD;
+ case 0x17: return MBlaze::TNEAGETD;
+ case 0x18: return MBlaze::NCGETD;
+ case 0x19: return MBlaze::NECGETD;
+ case 0x1A: return MBlaze::NCAGETD;
+ case 0x1B: return MBlaze::NECAGETD;
+ case 0x1C: return MBlaze::TNCGETD;
+ case 0x1D: return MBlaze::TNECGETD;
+ case 0x1E: return MBlaze::TNCAGETD;
+ case 0x1F: return MBlaze::TNECAGETD;
+ case 0x20: return MBlaze::PUTD;
+ case 0x22: return MBlaze::APUTD;
+ case 0x24: return MBlaze::TPUTD;
+ case 0x26: return MBlaze::TAPUTD;
+ case 0x28: return MBlaze::CPUTD;
+ case 0x2A: return MBlaze::CAPUTD;
+ case 0x2C: return MBlaze::TCPUTD;
+ case 0x2E: return MBlaze::TCAPUTD;
+ case 0x30: return MBlaze::NPUTD;
+ case 0x32: return MBlaze::NAPUTD;
+ case 0x34: return MBlaze::TNPUTD;
+ case 0x36: return MBlaze::TNAPUTD;
+ case 0x38: return MBlaze::NCPUTD;
+ case 0x3A: return MBlaze::NCAPUTD;
+ case 0x3C: return MBlaze::TNCPUTD;
+ case 0x3E: return MBlaze::TNCAPUTD;
+ }
+}
+
+static unsigned decodeIDIV(uint32_t insn) {
+ switch (insn&0x3) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::IDIV;
+ case 0x2: return MBlaze::IDIVU;
+ }
+}
+
+static unsigned decodeLBU(uint32_t insn) {
+ switch ((insn>>9)&0x1) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::LBU;
+ case 0x1: return MBlaze::LBUR;
+ }
+}
+
+static unsigned decodeLHU(uint32_t insn) {
+ switch ((insn>>9)&0x1) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::LHU;
+ case 0x1: return MBlaze::LHUR;
+ }
+}
+
+static unsigned decodeLW(uint32_t insn) {
+ switch ((insn>>9)&0x3) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::LW;
+ case 0x1: return MBlaze::LWR;
+ case 0x2: return MBlaze::LWX;
+ }
+}
+
+static unsigned decodeSB(uint32_t insn) {
+ switch ((insn>>9)&0x1) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::SB;
+ case 0x1: return MBlaze::SBR;
+ }
+}
+
+static unsigned decodeSH(uint32_t insn) {
+ switch ((insn>>9)&0x1) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::SH;
+ case 0x1: return MBlaze::SHR;
+ }
+}
+
+static unsigned decodeSW(uint32_t insn) {
+ switch ((insn>>9)&0x3) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::SW;
+ case 0x1: return MBlaze::SWR;
+ case 0x2: return MBlaze::SWX;
+ }
+}
+
+static unsigned decodeMFS(uint32_t insn) {
+ switch ((insn>>15)&0x1) {
+ default: return UNSUPPORTED;
+ case 0x0:
+ switch ((insn>>16)&0x1) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::MSRSET;
+ case 0x1: return MBlaze::MSRCLR;
+ }
+ case 0x1:
+ switch ((insn>>14)&0x1) {
+ default: return UNSUPPORTED;
+ case 0x0: return MBlaze::MFS;
+ case 0x1: return MBlaze::MTS;
+ }
+ }
+}
+
+static unsigned decodeOR(uint32_t insn) {
+ switch (getFLAGS(insn)) {
+ default: return UNSUPPORTED;
+ case 0x000: return MBlaze::OR;
+ case 0x400: return MBlaze::PCMPBF;
+ }
+}
+
+static unsigned decodeXOR(uint32_t insn) {
+ switch (getFLAGS(insn)) {
+ default: return UNSUPPORTED;
+ case 0x000: return MBlaze::XOR;
+ case 0x400: return MBlaze::PCMPEQ;
+ }
+}
+
+static unsigned decodeANDN(uint32_t insn) {
+ switch (getFLAGS(insn)) {
+ default: return UNSUPPORTED;
+ case 0x000: return MBlaze::ANDN;
+ case 0x400: return MBlaze::PCMPNE;
+ }
+}
+
+static unsigned decodeRTSD(uint32_t insn) {
+ switch ((insn>>21)&0x1F) {
+ default: return UNSUPPORTED;
+ case 0x10: return MBlaze::RTSD;
+ case 0x11: return MBlaze::RTID;
+ case 0x12: return MBlaze::RTBD;
+ case 0x14: return MBlaze::RTED;
+ }
+}
+
+static unsigned getOPCODE(uint32_t insn) {
+ unsigned opcode = mblazeBinary2Opcode[ (insn>>26)&0x3F ];
+ switch (opcode) {
+ case MBlaze::MUL: return decodeMUL(insn);
+ case MBlaze::SEXT8: return decodeSEXT(insn);
+ case MBlaze::BEQ: return decodeBEQ(insn);
+ case MBlaze::BEQI: return decodeBEQI(insn);
+ case MBlaze::BR: return decodeBR(insn);
+ case MBlaze::BRI: return decodeBRI(insn);
+ case MBlaze::BSRL: return decodeBSRL(insn);
+ case MBlaze::BSRLI: return decodeBSRLI(insn);
+ case MBlaze::RSUBK: return decodeRSUBK(insn);
+ case MBlaze::FADD: return decodeFADD(insn);
+ case MBlaze::GET: return decodeGET(insn);
+ case MBlaze::GETD: return decodeGETD(insn);
+ case MBlaze::IDIV: return decodeIDIV(insn);
+ case MBlaze::LBU: return decodeLBU(insn);
+ case MBlaze::LHU: return decodeLHU(insn);
+ case MBlaze::LW: return decodeLW(insn);
+ case MBlaze::SB: return decodeSB(insn);
+ case MBlaze::SH: return decodeSH(insn);
+ case MBlaze::SW: return decodeSW(insn);
+ case MBlaze::MFS: return decodeMFS(insn);
+ case MBlaze::OR: return decodeOR(insn);
+ case MBlaze::XOR: return decodeXOR(insn);
+ case MBlaze::ANDN: return decodeANDN(insn);
+ case MBlaze::RTSD: return decodeRTSD(insn);
+ default: return opcode;
+ }
+}
+
+EDInstInfo *MBlazeDisassembler::getEDInfo() const {
+ return instInfoMBlaze;
+}
+
+//
+// Public interface for the disassembler
+//
+
+bool MBlazeDisassembler::getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream) const {
+ // The machine instruction.
+ uint32_t insn;
+ uint8_t bytes[4];
+
+ // We always consume 4 bytes of data
+ size = 4;
+
+ // We want to read exactly 4 bytes of data.
+ if (region.readBytes(address, 4, (uint8_t*)bytes, NULL) == -1)
+ return false;
+
+ // Encoded as a big-endian 32-bit word in the stream.
+ insn = (bytes[0]<<24) | (bytes[1]<<16) | (bytes[2]<< 8) | (bytes[3]<<0);
+
+ // Get the MCInst opcode from the binary instruction and make sure
+ // that it is a valid instruction.
+ unsigned opcode = getOPCODE(insn);
+ if (opcode == UNSUPPORTED)
+ return false;
+
+ instr.setOpcode(opcode);
+
+ uint64_t tsFlags = MBlazeInsts[opcode].TSFlags;
+ switch ((tsFlags & MBlazeII::FormMask)) {
+ default: llvm_unreachable("unknown instruction encoding");
+
+ case MBlazeII::FRRRR:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRB(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ break;
+
+ case MBlazeII::FRRR:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRB(insn)));
+ break;
+
+ case MBlazeII::FRI:
+ switch (opcode) {
+ default: llvm_unreachable("unknown instruction encoding");
+ case MBlaze::MFS:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateImm(insn&0x3FFF));
+ break;
+ case MBlaze::MTS:
+ instr.addOperand(MCOperand::CreateImm(insn&0x3FFF));
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ break;
+ case MBlaze::MSRSET:
+ case MBlaze::MSRCLR:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateImm(insn&0x7FFF));
+ break;
+ }
+ break;
+
+ case MBlazeII::FRRI:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ switch (opcode) {
+ default:
+ instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
+ break;
+ case MBlaze::BSRLI:
+ case MBlaze::BSRAI:
+ case MBlaze::BSLLI:
+ instr.addOperand(MCOperand::CreateImm(insn&0x1F));
+ break;
+ }
+ break;
+
+ case MBlazeII::FCRR:
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRB(insn)));
+ break;
+
+ case MBlazeII::FCRI:
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
+ break;
+
+ case MBlazeII::FRCR:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRB(insn)));
+ break;
+
+ case MBlazeII::FRCI:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
+ break;
+
+ case MBlazeII::FCCR:
+ instr.addOperand(MCOperand::CreateReg(getRB(insn)));
+ break;
+
+ case MBlazeII::FCCI:
+ instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
+ break;
+
+ case MBlazeII::FRRCI:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ instr.addOperand(MCOperand::CreateImm(getSHT(insn)));
+ break;
+
+ case MBlazeII::FRRC:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ break;
+
+ case MBlazeII::FRCX:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateImm(getFSL(insn)));
+ break;
+
+ case MBlazeII::FRCS:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRS(insn)));
+ break;
+
+ case MBlazeII::FCRCS:
+ instr.addOperand(MCOperand::CreateReg(getRS(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ break;
+
+ case MBlazeII::FCRCX:
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ instr.addOperand(MCOperand::CreateImm(getFSL(insn)));
+ break;
+
+ case MBlazeII::FCX:
+ instr.addOperand(MCOperand::CreateImm(getFSL(insn)));
+ break;
+
+ case MBlazeII::FCR:
+ instr.addOperand(MCOperand::CreateReg(getRB(insn)));
+ break;
+
+ case MBlazeII::FRIR:
+ instr.addOperand(MCOperand::CreateReg(getRD(insn)));
+ instr.addOperand(MCOperand::CreateImm(getIMM(insn)));
+ instr.addOperand(MCOperand::CreateReg(getRA(insn)));
+ break;
+ }
+
+ return true;
+}
+
+static MCDisassembler *createMBlazeDisassembler(const Target &T) {
+ return new MBlazeDisassembler;
+}
+
+extern "C" void LLVMInitializeMBlazeDisassembler() {
+ // Register the disassembler.
+ TargetRegistry::RegisterMCDisassembler(TheMBlazeTarget,
+ createMBlazeDisassembler);
+}
diff --git a/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h b/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h
new file mode 100644
index 0000000..d05eced
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h
@@ -0,0 +1,55 @@
+//===- MBlazeDisassembler.h - Disassembler for MicroBlaze ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the MBlaze Disassembler. It it the header for
+// MBlazeDisassembler, a subclass of MCDisassembler.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MBLAZEDISASSEMBLER_H
+#define MBLAZEDISASSEMBLER_H
+
+#include "llvm/MC/MCDisassembler.h"
+
+struct InternalInstruction;
+
+namespace llvm {
+
+class MCInst;
+class MemoryObject;
+class raw_ostream;
+
+struct EDInstInfo;
+
+/// MBlazeDisassembler - Disassembler for all MBlaze platforms.
+class MBlazeDisassembler : public MCDisassembler {
+public:
+ /// Constructor - Initializes the disassembler.
+ ///
+ MBlazeDisassembler() :
+ MCDisassembler() {
+ }
+
+ ~MBlazeDisassembler() {
+ }
+
+ /// getInstruction - See MCDisassembler.
+ bool getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream) const;
+
+ /// getEDInfo - See MCDisassembler.
+ EDInstInfo *getEDInfo() const;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/MBlaze/Disassembler/Makefile b/contrib/llvm/lib/Target/MBlaze/Disassembler/Makefile
new file mode 100644
index 0000000..0530b32
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/Disassembler/Makefile
@@ -0,0 +1,16 @@
+##===- lib/Target/MBlaze/Disassembler/Makefile -------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME = LLVMMBlazeDisassembler
+
+# Hack: we need to include 'main' MBlaze target directory to grab headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/MBlaze/InstPrinter/CMakeLists.txt b/contrib/llvm/lib/Target/MBlaze/InstPrinter/CMakeLists.txt
new file mode 100644
index 0000000..242a573
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/InstPrinter/CMakeLists.txt
@@ -0,0 +1,8 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/..
+ ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMMBlazeAsmPrinter
+ MBlazeInstPrinter.cpp
+ )
+
+add_dependencies(LLVMMBlazeAsmPrinter MBlazeCodeGenTable_gen)
diff --git a/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp b/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp
new file mode 100644
index 0000000..a7fd287
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.cpp
@@ -0,0 +1,69 @@
+//===-- MBlazeInstPrinter.cpp - Convert MBlaze MCInst to assembly syntax --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an MBlaze MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "asm-printer"
+#include "MBlaze.h"
+#include "MBlazeInstPrinter.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+using namespace llvm;
+
+
+// Include the auto-generated portion of the assembly writer.
+#include "MBlazeGenAsmWriter.inc"
+
+void MBlazeInstPrinter::printInst(const MCInst *MI, raw_ostream &O) {
+ printInstruction(MI, O);
+}
+
+void MBlazeInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O, const char *Modifier) {
+ assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ O << getRegisterName(Op.getReg());
+ } else if (Op.isImm()) {
+ O << (int32_t)Op.getImm();
+ } else {
+ assert(Op.isExpr() && "unknown operand kind in printOperand");
+ O << *Op.getExpr();
+ }
+}
+
+void MBlazeInstPrinter::printFSLImm(const MCInst *MI, int OpNo,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNo);
+ if (MO.isImm())
+ O << "rfsl" << MO.getImm();
+ else
+ printOperand(MI, OpNo, O, NULL);
+}
+
+void MBlazeInstPrinter::printUnsignedImm(const MCInst *MI, int OpNo,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNo);
+ if (MO.isImm())
+ O << (uint32_t)MO.getImm();
+ else
+ printOperand(MI, OpNo, O, NULL);
+}
+
+void MBlazeInstPrinter::printMemOperand(const MCInst *MI, int OpNo,
+ raw_ostream &O, const char *Modifier) {
+ printOperand(MI, OpNo, O, NULL);
+ O << ", ";
+ printOperand(MI, OpNo+1, O, NULL);
+}
diff --git a/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h b/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h
new file mode 100644
index 0000000..bebc6c8
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h
@@ -0,0 +1,43 @@
+//===-- MBLazeInstPrinter.h - Convert MBlaze MCInst to assembly syntax ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints a MBlaze MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MBLAZEINSTPRINTER_H
+#define MBLAZEINSTPRINTER_H
+
+#include "llvm/MC/MCInstPrinter.h"
+
+namespace llvm {
+ class MCOperand;
+
+ class MBlazeInstPrinter : public MCInstPrinter {
+ public:
+ MBlazeInstPrinter(const MCAsmInfo &MAI) : MCInstPrinter(MAI) {
+ }
+
+ virtual void printInst(const MCInst *MI, raw_ostream &O);
+
+ // Autogenerated by tblgen.
+ void printInstruction(const MCInst *MI, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo);
+ static const char *getInstructionName(unsigned Opcode);
+
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O,
+ const char *Modifier = 0);
+ void printFSLImm(const MCInst *MI, int OpNo, raw_ostream &O);
+ void printUnsignedImm(const MCInst *MI, int OpNo, raw_ostream &O);
+ void printMemOperand(const MCInst *MI, int OpNo,raw_ostream &O,
+ const char *Modifier = 0);
+ };
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/MBlaze/InstPrinter/Makefile b/contrib/llvm/lib/Target/MBlaze/InstPrinter/Makefile
new file mode 100644
index 0000000..9fb6e86
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/InstPrinter/Makefile
@@ -0,0 +1,16 @@
+##===- lib/Target/MBlaze/AsmPrinter/Makefile ---------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMMBlazeAsmPrinter
+
+# Hack: we need to include 'main' MBlaze target directory to grab
+# private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlaze.h b/contrib/llvm/lib/Target/MBlaze/MBlaze.h
index f9d828b..00c73f0 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlaze.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlaze.h
@@ -21,8 +21,16 @@ namespace llvm {
class MBlazeTargetMachine;
class FunctionPass;
class MachineCodeEmitter;
+ class MCCodeEmitter;
+ class TargetAsmBackend;
class formatted_raw_ostream;
+ MCCodeEmitter *createMBlazeMCCodeEmitter(const Target &,
+ TargetMachine &TM,
+ MCContext &Ctx);
+
+ TargetAsmBackend *createMBlazeAsmBackend(const Target &, const std::string &);
+
FunctionPass *createMBlazeISelDag(MBlazeTargetMachine &TM);
FunctionPass *createMBlazeDelaySlotFillerPass(MBlazeTargetMachine &TM);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlaze.td b/contrib/llvm/lib/Target/MBlaze/MBlaze.td
index 3815b6d..1fa1e4d 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlaze.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlaze.td
@@ -32,35 +32,35 @@ def MBlazeInstrInfo : InstrInfo;
//===----------------------------------------------------------------------===//
def FeaturePipe3 : SubtargetFeature<"pipe3", "HasPipe3", "true",
- "Implements 3-stage pipeline.">;
+ "Implements 3-stage pipeline">;
def FeatureBarrel : SubtargetFeature<"barrel", "HasBarrel", "true",
- "Implements barrel shifter.">;
+ "Implements barrel shifter">;
def FeatureDiv : SubtargetFeature<"div", "HasDiv", "true",
- "Implements hardware divider.">;
+ "Implements hardware divider">;
def FeatureMul : SubtargetFeature<"mul", "HasMul", "true",
- "Implements hardware multiplier.">;
+ "Implements hardware multiplier">;
def FeatureFSL : SubtargetFeature<"fsl", "HasFSL", "true",
- "Implements FSL instructions.">;
+ "Implements FSL instructions">;
def FeatureEFSL : SubtargetFeature<"efsl", "HasEFSL", "true",
- "Implements extended FSL instructions.">;
+ "Implements extended FSL instructions">;
def FeatureMSRSet : SubtargetFeature<"msrset", "HasMSRSet", "true",
- "Implements MSR register set and clear.">;
+ "Implements MSR register set and clear">;
def FeatureException : SubtargetFeature<"exception", "HasException", "true",
- "Implements hardware exception support.">;
+ "Implements hardware exception support">;
def FeaturePatCmp : SubtargetFeature<"patcmp", "HasPatCmp", "true",
- "Implements pattern compare instruction.">;
+ "Implements pattern compare instruction">;
def FeatureFPU : SubtargetFeature<"fpu", "HasFPU", "true",
- "Implements floating point unit.">;
+ "Implements floating point unit">;
def FeatureESR : SubtargetFeature<"esr", "HasESR", "true",
"Implements ESR and EAR registers">;
def FeaturePVR : SubtargetFeature<"pvr", "HasPVR", "true",
- "Implements processor version register.">;
+ "Implements processor version register">;
def FeatureMul64 : SubtargetFeature<"mul64", "HasMul64", "true",
"Implements multiplier with 64-bit result">;
def FeatureSqrt : SubtargetFeature<"sqrt", "HasSqrt", "true",
- "Implements sqrt and floating point convert.">;
+ "Implements sqrt and floating point convert">;
def FeatureMMU : SubtargetFeature<"mmu", "HasMMU", "true",
- "Implements memory management unit.">;
+ "Implements memory management unit">;
//===----------------------------------------------------------------------===//
// MBlaze processors supported.
@@ -69,13 +69,26 @@ def FeatureMMU : SubtargetFeature<"mmu", "HasMMU", "true",
class Proc<string Name, list<SubtargetFeature> Features>
: Processor<Name, MBlazeGenericItineraries, Features>;
-
def : Proc<"v400", []>;
def : Proc<"v500", []>;
def : Proc<"v600", []>;
def : Proc<"v700", []>;
def : Proc<"v710", []>;
+//===----------------------------------------------------------------------===//
+// Instruction Descriptions
+//===----------------------------------------------------------------------===//
+
+def MBlazeAsmWriter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ bit isMCAsmWriter = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Target Declaration
+//===----------------------------------------------------------------------===//
+
def MBlaze : Target {
let InstructionSet = MBlazeInstrInfo;
+ let AssemblyWriters = [MBlazeAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeAsmBackend.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmBackend.cpp
new file mode 100644
index 0000000..a4b21af
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmBackend.cpp
@@ -0,0 +1,163 @@
+//===-- MBlazeAsmBackend.cpp - MBlaze Assembler Backend -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Target/TargetAsmBackend.h"
+#include "MBlaze.h"
+#include "MBlazeELFWriterInfo.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCELFSymbolFlags.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Target/TargetAsmBackend.h"
+using namespace llvm;
+
+static unsigned getFixupKindSize(unsigned Kind) {
+ switch (Kind) {
+ default: assert(0 && "invalid fixup kind!");
+ case FK_Data_1: return 1;
+ case FK_PCRel_2:
+ case FK_Data_2: return 2;
+ case FK_PCRel_4:
+ case FK_Data_4: return 4;
+ case FK_Data_8: return 8;
+ }
+}
+
+
+namespace {
+class MBlazeELFObjectWriter : public MCELFObjectTargetWriter {
+public:
+ MBlazeELFObjectWriter(Triple::OSType OSType)
+ : MCELFObjectTargetWriter(/*is64Bit*/ false, OSType, ELF::EM_MBLAZE,
+ /*HasRelocationAddend*/ true) {}
+};
+
+class MBlazeAsmBackend : public TargetAsmBackend {
+public:
+ MBlazeAsmBackend(const Target &T)
+ : TargetAsmBackend() {
+ }
+
+ unsigned getNumFixupKinds() const {
+ return 2;
+ }
+
+ bool MayNeedRelaxation(const MCInst &Inst) const;
+
+ void RelaxInstruction(const MCInst &Inst, MCInst &Res) const;
+
+ bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
+
+ unsigned getPointerSize() const {
+ return 4;
+ }
+};
+
+static unsigned getRelaxedOpcode(unsigned Op) {
+ switch (Op) {
+ default: return Op;
+ case MBlaze::ADDIK: return MBlaze::ADDIK32;
+ case MBlaze::ORI: return MBlaze::ORI32;
+ case MBlaze::BRLID: return MBlaze::BRLID32;
+ }
+}
+
+bool MBlazeAsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
+ if (getRelaxedOpcode(Inst.getOpcode()) == Inst.getOpcode())
+ return false;
+
+ bool hasExprOrImm = false;
+ for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
+ hasExprOrImm |= Inst.getOperand(i).isExpr();
+
+ return hasExprOrImm;
+}
+
+void MBlazeAsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ Res = Inst;
+ Res.setOpcode(getRelaxedOpcode(Inst.getOpcode()));
+}
+
+bool MBlazeAsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
+ if ((Count % 4) != 0)
+ return false;
+
+ for (uint64_t i = 0; i < Count; i += 4)
+ OW->Write32(0x00000000);
+
+ return true;
+}
+} // end anonymous namespace
+
+namespace {
+class ELFMBlazeAsmBackend : public MBlazeAsmBackend {
+public:
+ Triple::OSType OSType;
+ ELFMBlazeAsmBackend(const Target &T, Triple::OSType _OSType)
+ : MBlazeAsmBackend(T), OSType(_OSType) { }
+
+ void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value) const;
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ return createELFObjectWriter(new MBlazeELFObjectWriter(OSType), OS,
+ /*IsLittleEndian*/ false);
+ }
+};
+
+void ELFMBlazeAsmBackend::ApplyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value) const {
+ unsigned Size = getFixupKindSize(Fixup.getKind());
+
+ assert(Fixup.getOffset() + Size <= DataSize &&
+ "Invalid fixup offset!");
+
+ char *data = Data + Fixup.getOffset();
+ switch (Size) {
+ default: llvm_unreachable("Cannot fixup unknown value.");
+ case 1: llvm_unreachable("Cannot fixup 1 byte value.");
+ case 8: llvm_unreachable("Cannot fixup 8 byte value.");
+
+ case 4:
+ *(data+7) = uint8_t(Value);
+ *(data+6) = uint8_t(Value >> 8);
+ *(data+3) = uint8_t(Value >> 16);
+ *(data+2) = uint8_t(Value >> 24);
+ break;
+
+ case 2:
+ *(data+3) = uint8_t(Value >> 0);
+ *(data+2) = uint8_t(Value >> 8);
+ }
+}
+} // end anonymous namespace
+
+TargetAsmBackend *llvm::createMBlazeAsmBackend(const Target &T,
+ const std::string &TT) {
+ switch (Triple(TT).getOS()) {
+ case Triple::Darwin:
+ assert(0 && "Mac not supported on MBlaze");
+ case Triple::MinGW32:
+ case Triple::Cygwin:
+ case Triple::Win32:
+ assert(0 && "Windows not supported on MBlaze");
+ default:
+ return new ELFMBlazeAsmBackend(T, Triple(TT).getOS());
+ }
+}
diff --git a/contrib/llvm/lib/Target/MBlaze/AsmPrinter/MBlazeAsmPrinter.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
index f4b30ad..0016df5 100644
--- a/contrib/llvm/lib/Target/MBlaze/AsmPrinter/MBlazeAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
@@ -19,6 +19,8 @@
#include "MBlazeInstrInfo.h"
#include "MBlazeTargetMachine.h"
#include "MBlazeMachineFunction.h"
+#include "MBlazeMCInstLower.h"
+#include "InstPrinter/MBlazeInstPrinter.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
@@ -27,6 +29,7 @@
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCSymbol.h"
@@ -57,6 +60,15 @@ namespace {
return "MBlaze Assembly Printer";
}
+ void printSavedRegsBitmask();
+ void emitFrameDirective();
+ virtual void EmitFunctionBodyStart();
+ virtual void EmitFunctionBodyEnd();
+ virtual void EmitFunctionEntryLabel();
+
+ virtual bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB)
+ const;
+
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O);
@@ -65,26 +77,12 @@ namespace {
void printFSLImm(const MachineInstr *MI, int opNum, raw_ostream &O);
void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
const char *Modifier = 0);
- void printSavedRegsBitmask(raw_ostream &OS);
- void emitFrameDirective();
-
- void printInstruction(const MachineInstr *MI, raw_ostream &O);
- void EmitInstruction(const MachineInstr *MI) {
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- printInstruction(MI, OS);
- OutStreamer.EmitRawText(OS.str());
- }
- virtual void EmitFunctionBodyStart();
- virtual void EmitFunctionBodyEnd();
- static const char *getRegisterName(unsigned RegNo);
-
- virtual void EmitFunctionEntryLabel();
+ void EmitInstruction(const MachineInstr *MI);
};
} // end of anonymous namespace
-#include "MBlazeGenAsmWriter.inc"
+// #include "MBlazeGenAsmWriter.inc"
//===----------------------------------------------------------------------===//
//
@@ -117,10 +115,6 @@ namespace {
//
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Mask directives
-//===----------------------------------------------------------------------===//
-
// Print a 32 bit hex number with all numbers.
static void printHex32(unsigned int Value, raw_ostream &O) {
O << "0x";
@@ -128,12 +122,11 @@ static void printHex32(unsigned int Value, raw_ostream &O) {
O << utohexstr((Value & (0xF << (i*4))) >> (i*4));
}
-
// Create a bitmask with all callee saved registers for CPU or Floating Point
// registers. For CPU registers consider RA, GP and FP for saving if necessary.
-void MBlazeAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
+void MBlazeAsmPrinter::printSavedRegsBitmask() {
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const TargetRegisterInfo &RI = *TM.getRegisterInfo();
- const MBlazeFunctionInfo *MBlazeFI = MF->getInfo<MBlazeFunctionInfo>();
// CPU Saved Registers Bitmasks
unsigned int CPUBitmask = 0;
@@ -144,12 +137,12 @@ void MBlazeAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
unsigned RegNum = MBlazeRegisterInfo::getRegisterNumbering(Reg);
- if (MBlaze::CPURegsRegisterClass->contains(Reg))
+ if (MBlaze::GPRRegisterClass->contains(Reg))
CPUBitmask |= (1 << RegNum);
}
// Return Address and Frame registers must also be set in CPUBitmask.
- if (RI.hasFP(*MF))
+ if (TFI->hasFP(*MF))
CPUBitmask |= (1 << MBlazeRegisterInfo::
getRegisterNumbering(RI.getFrameRegister(*MF)));
@@ -158,48 +151,51 @@ void MBlazeAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
getRegisterNumbering(RI.getRARegister()));
// Print CPUBitmask
- O << "\t.mask \t"; printHex32(CPUBitmask, O);
- O << ',' << MBlazeFI->getCPUTopSavedRegOff() << '\n';
+ OutStreamer.EmitRawText("\t.mask\t0x" + Twine::utohexstr(CPUBitmask));
}
-//===----------------------------------------------------------------------===//
-// Frame and Set directives
-//===----------------------------------------------------------------------===//
-
/// Frame Directive
void MBlazeAsmPrinter::emitFrameDirective() {
- const TargetRegisterInfo &RI = *TM.getRegisterInfo();
-
- unsigned stackReg = RI.getFrameRegister(*MF);
- unsigned returnReg = RI.getRARegister();
- unsigned stackSize = MF->getFrameInfo()->getStackSize();
-
+ if (!OutStreamer.hasRawTextSupport())
+ return;
- OutStreamer.EmitRawText("\t.frame\t" + Twine(getRegisterName(stackReg)) +
- "," + Twine(stackSize) + "," +
- Twine(getRegisterName(returnReg)));
+ const TargetRegisterInfo &RI = *TM.getRegisterInfo();
+ unsigned stkReg = RI.getFrameRegister(*MF);
+ unsigned retReg = RI.getRARegister();
+ unsigned stkSze = MF->getFrameInfo()->getStackSize();
+
+ OutStreamer.EmitRawText("\t.frame\t" +
+ Twine(MBlazeInstPrinter::getRegisterName(stkReg)) +
+ "," + Twine(stkSze) + "," +
+ Twine(MBlazeInstPrinter::getRegisterName(retReg)));
}
void MBlazeAsmPrinter::EmitFunctionEntryLabel() {
- OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName()));
- OutStreamer.EmitLabel(CurrentFnSym);
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName()));
+ AsmPrinter::EmitFunctionEntryLabel();
}
-/// EmitFunctionBodyStart - Targets can override this to emit stuff before
-/// the first basic block in the function.
void MBlazeAsmPrinter::EmitFunctionBodyStart() {
+ if (!OutStreamer.hasRawTextSupport())
+ return;
+
emitFrameDirective();
-
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- printSavedRegsBitmask(OS);
- OutStreamer.EmitRawText(OS.str());
+ printSavedRegsBitmask();
}
-/// EmitFunctionBodyEnd - Targets can override this to emit stuff after
-/// the last basic block in the function.
void MBlazeAsmPrinter::EmitFunctionBodyEnd() {
- OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName()));
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName()));
+}
+
+//===----------------------------------------------------------------------===//
+void MBlazeAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ MBlazeMCInstLower MCInstLowering(OutContext, *Mang, *this);
+
+ MCInst TmpInst;
+ MCInstLowering.Lower(MI, TmpInst);
+ OutStreamer.EmitInstruction(TmpInst);
}
// Print out an operand for an inline asm expression.
@@ -220,11 +216,11 @@ void MBlazeAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
switch (MO.getType()) {
case MachineOperand::MO_Register:
- O << getRegisterName(MO.getReg());
+ O << MBlazeInstPrinter::getRegisterName(MO.getReg());
break;
case MachineOperand::MO_Immediate:
- O << (int)MO.getImm();
+ O << (int32_t)MO.getImm();
break;
case MachineOperand::MO_FPImmediate: {
@@ -248,7 +244,7 @@ void MBlazeAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
case MachineOperand::MO_JumpTableIndex:
O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
- << '_' << MO.getIndex();
+ << '_' << MO.getIndex();
break;
case MachineOperand::MO_ConstantPoolIndex:
@@ -267,7 +263,7 @@ void MBlazeAsmPrinter::printUnsignedImm(const MachineInstr *MI, int opNum,
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(opNum);
if (MO.isImm())
- O << (unsigned int)MO.getImm();
+ O << (uint32_t)MO.getImm();
else
printOperand(MI, opNum, O);
}
@@ -284,12 +280,56 @@ void MBlazeAsmPrinter::printFSLImm(const MachineInstr *MI, int opNum,
void MBlazeAsmPrinter::
printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
const char *Modifier) {
- printOperand(MI, opNum+1, O);
- O << ", ";
printOperand(MI, opNum, O);
+ O << ", ";
+ printOperand(MI, opNum+1, O);
+}
+
+/// isBlockOnlyReachableByFallthough - Return true if the basic block has
+/// exactly one predecessor and the control transfer mechanism between
+/// the predecessor and this block is a fall-through.
+bool MBlazeAsmPrinter::
+isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
+ // If this is a landing pad, it isn't a fall through. If it has no preds,
+ // then nothing falls through to it.
+ if (MBB->isLandingPad() || MBB->pred_empty())
+ return false;
+
+ // If there isn't exactly one predecessor, it can't be a fall through.
+ MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PI2 = PI;
+ ++PI2;
+ if (PI2 != MBB->pred_end())
+ return false;
+
+ // The predecessor has to be immediately before this block.
+ const MachineBasicBlock *Pred = *PI;
+
+ if (!Pred->isLayoutSuccessor(MBB))
+ return false;
+
+ // If the block is completely empty, then it definitely does fall through.
+ if (Pred->empty())
+ return true;
+
+ // Check if the last terminator is an unconditional branch.
+ MachineBasicBlock::const_iterator I = Pred->end();
+ while (I != Pred->begin() && !(--I)->getDesc().isTerminator())
+ ; // Noop
+ return I == Pred->end() || !I->getDesc().isBarrier();
+}
+
+static MCInstPrinter *createMBlazeMCInstPrinter(const Target &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI) {
+ if (SyntaxVariant == 0)
+ return new MBlazeInstPrinter(MAI);
+ return 0;
}
// Force static initialization.
extern "C" void LLVMInitializeMBlazeAsmPrinter() {
RegisterAsmPrinter<MBlazeAsmPrinter> X(TheMBlazeTarget);
+ TargetRegistry::RegisterMCInstPrinter(TheMBlazeTarget,
+ createMBlazeMCInstPrinter);
+
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeCallingConv.td b/contrib/llvm/lib/Target/MBlaze/MBlazeCallingConv.td
index 8622e0d..4962573 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeCallingConv.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeCallingConv.td
@@ -1,16 +1,16 @@
//===- MBlazeCallingConv.td - Calling Conventions for MBlaze -*- tablegen -*-=//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
// This describes the calling conventions for MBlaze architecture.
//===----------------------------------------------------------------------===//
/// CCIfSubtarget - Match if the current subtarget has a feature F.
-class CCIfSubtarget<string F, CCAction A>:
+class CCIfSubtarget<string F, CCAction A>:
CCIf<!strconcat("State.getTarget().getSubtarget<MBlazeSubtarget>().", F), A>;
//===----------------------------------------------------------------------===//
@@ -19,8 +19,10 @@ class CCIfSubtarget<string F, CCAction A>:
def RetCC_MBlaze : CallingConv<[
// i32 are returned in registers R3, R4
- CCIfType<[i32], CCAssignToReg<[R3, R4]>>,
+ CCIfType<[i32,f32], CCAssignToReg<[R3, R4]>>
+]>;
- // f32 are returned in registers F3, F4
- CCIfType<[f32], CCAssignToReg<[F3, F4]>>
+def CC_MBlaze : CallingConv<[
+ CCIfType<[i32,f32], CCCustom<"CC_MBlaze_AssignReg">>,
+ CCIfType<[i32,f32], CCAssignToStack<4, 4>>
]>;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp
index b551b79..4399ee2 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp
@@ -7,7 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// Simple pass to fills delay slots with NOPs.
+// A pass that attempts to fill instructions with delay slots. If no
+// instructions can be moved into the delay slot then a NOP is placed there.
//
//===----------------------------------------------------------------------===//
@@ -19,11 +20,23 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
STATISTIC(FilledSlots, "Number of delay slots filled");
+namespace llvm {
+cl::opt<bool> DisableDelaySlotFiller(
+ "disable-mblaze-delay-filler",
+ cl::init(false),
+ cl::desc("Disable the MBlaze delay slot filter."),
+ cl::Hidden);
+}
+
namespace {
struct Filler : public MachineFunctionPass {
@@ -31,7 +44,7 @@ namespace {
const TargetInstrInfo *TII;
static char ID;
- Filler(TargetMachine &tm)
+ Filler(TargetMachine &tm)
: MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { }
virtual const char *getPassName() const {
@@ -51,6 +64,168 @@ namespace {
char Filler::ID = 0;
} // end of anonymous namespace
+static bool hasImmInstruction(MachineBasicBlock::iterator &candidate) {
+ // Any instruction with an immediate mode operand greater than
+ // 16-bits requires an implicit IMM instruction.
+ unsigned numOper = candidate->getNumOperands();
+ for (unsigned op = 0; op < numOper; ++op) {
+ MachineOperand &mop = candidate->getOperand(op);
+
+ // The operand requires more than 16-bits to represent.
+ if (mop.isImm() && (mop.getImm() < -0x8000 || mop.getImm() > 0x7fff))
+ return true;
+
+ // We must assume that unknown immediate values require more than
+ // 16-bits to represent.
+ if (mop.isGlobal() || mop.isSymbol())
+ return true;
+
+ // FIXME: we could probably check to see if the FP value happens
+ // to not need an IMM instruction. For now we just always
+ // assume that FP values do.
+ if (mop.isFPImm())
+ return true;
+ }
+
+ return false;
+}
+
+static unsigned getLastRealOperand(MachineBasicBlock::iterator &instr) {
+ switch (instr->getOpcode()) {
+ default: return instr->getNumOperands();
+
+ // These instructions have a variable number of operands but the first two
+ // are the "real" operands that we care about during hazard detection.
+ case MBlaze::BRLID:
+ case MBlaze::BRALID:
+ case MBlaze::BRLD:
+ case MBlaze::BRALD:
+ return 2;
+ }
+}
+
+static bool delayHasHazard(MachineBasicBlock::iterator &candidate,
+ MachineBasicBlock::iterator &slot) {
+ // Hazard check
+ MachineBasicBlock::iterator a = candidate;
+ MachineBasicBlock::iterator b = slot;
+ TargetInstrDesc desc = candidate->getDesc();
+
+ // MBB layout:-
+ // candidate := a0 = operation(a1, a2)
+ // ...middle bit...
+ // slot := b0 = operation(b1, b2)
+
+ // Possible hazards:-/
+ // 1. a1 or a2 was written during the middle bit
+ // 2. a0 was read or written during the middle bit
+ // 3. a0 is one or more of {b0, b1, b2}
+ // 4. b0 is one or more of {a1, a2}
+ // 5. a accesses memory, and the middle bit
+ // contains a store operation.
+ bool a_is_memory = desc.mayLoad() || desc.mayStore();
+
+ // Determine the number of operands in the slot instruction and in the
+ // candidate instruction.
+ const unsigned aend = getLastRealOperand(a);
+ const unsigned bend = getLastRealOperand(b);
+
+ // Check hazards type 1, 2 and 5 by scanning the middle bit
+ MachineBasicBlock::iterator m = a;
+ for (++m; m != b; ++m) {
+ for (unsigned aop = 0; aop<aend; ++aop) {
+ bool aop_is_reg = a->getOperand(aop).isReg();
+ if (!aop_is_reg) continue;
+
+ bool aop_is_def = a->getOperand(aop).isDef();
+ unsigned aop_reg = a->getOperand(aop).getReg();
+
+ const unsigned mend = getLastRealOperand(m);
+ for (unsigned mop = 0; mop<mend; ++mop) {
+ bool mop_is_reg = m->getOperand(mop).isReg();
+ if (!mop_is_reg) continue;
+
+ bool mop_is_def = m->getOperand(mop).isDef();
+ unsigned mop_reg = m->getOperand(mop).getReg();
+
+ if (aop_is_def && (mop_reg == aop_reg))
+ return true; // Hazard type 2, because aop = a0
+ else if (mop_is_def && (mop_reg == aop_reg))
+ return true; // Hazard type 1, because aop in {a1, a2}
+ }
+ }
+
+ // Check hazard type 5
+ if (a_is_memory && m->getDesc().mayStore())
+ return true;
+ }
+
+ // Check hazard type 3 & 4
+ for (unsigned aop = 0; aop<aend; ++aop) {
+ if (a->getOperand(aop).isReg()) {
+ unsigned aop_reg = a->getOperand(aop).getReg();
+
+ for (unsigned bop = 0; bop<bend; ++bop) {
+ if (b->getOperand(bop).isReg() && !b->getOperand(bop).isImplicit()) {
+ unsigned bop_reg = b->getOperand(bop).getReg();
+ if (aop_reg == bop_reg)
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool isDelayFiller(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator candidate) {
+ if (candidate == MBB.begin())
+ return false;
+
+ TargetInstrDesc brdesc = (--candidate)->getDesc();
+ return (brdesc.hasDelaySlot());
+}
+
+static bool hasUnknownSideEffects(MachineBasicBlock::iterator &I) {
+ if (!I->hasUnmodeledSideEffects())
+ return false;
+
+ unsigned op = I->getOpcode();
+ if (op == MBlaze::ADDK || op == MBlaze::ADDIK ||
+ op == MBlaze::ADDC || op == MBlaze::ADDIC ||
+ op == MBlaze::ADDKC || op == MBlaze::ADDIKC ||
+ op == MBlaze::RSUBK || op == MBlaze::RSUBIK ||
+ op == MBlaze::RSUBC || op == MBlaze::RSUBIC ||
+ op == MBlaze::RSUBKC || op == MBlaze::RSUBIKC)
+ return false;
+
+ return true;
+}
+
+static MachineBasicBlock::iterator
+findDelayInstr(MachineBasicBlock &MBB,MachineBasicBlock::iterator slot) {
+ MachineBasicBlock::iterator I = slot;
+ while (true) {
+ if (I == MBB.begin())
+ break;
+
+ --I;
+ TargetInstrDesc desc = I->getDesc();
+ if (desc.hasDelaySlot() || desc.isBranch() || isDelayFiller(MBB,I) ||
+ desc.isCall() || desc.isReturn() || desc.isBarrier() ||
+ hasUnknownSideEffects(I))
+ break;
+
+ if (hasImmInstruction(I) || delayHasHazard(I,slot))
+ continue;
+
+ return I;
+ }
+
+ return MBB.end();
+}
+
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
/// Currently, we fill delay slots with NOPs. We assume there is only one
/// delay slot per delayed instruction.
@@ -58,11 +233,19 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
if (I->getDesc().hasDelaySlot()) {
+ MachineBasicBlock::iterator D = MBB.end();
MachineBasicBlock::iterator J = I;
- ++J;
- BuildMI(MBB, J, I->getDebugLoc(), TII->get(MBlaze::NOP));
+
+ if (!DisableDelaySlotFiller)
+ D = findDelayInstr(MBB,I);
+
++FilledSlots;
Changed = true;
+
+ if (D == MBB.end())
+ BuildMI(MBB, ++J, I->getDebugLoc(), TII->get(MBlaze::NOP));
+ else
+ MBB.splice(++J, &MBB, D);
}
return Changed;
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
new file mode 100644
index 0000000..3f26ed1
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
@@ -0,0 +1,111 @@
+//===-- MBlazeELFWriterInfo.cpp - ELF Writer Info for the MBlaze backend --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ELF writer information for the MBlaze backend.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MBlazeELFWriterInfo.h"
+#include "MBlazeRelocations.h"
+#include "llvm/Function.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Implementation of the MBlazeELFWriterInfo class
+//===----------------------------------------------------------------------===//
+
+MBlazeELFWriterInfo::MBlazeELFWriterInfo(TargetMachine &TM)
+ : TargetELFWriterInfo(TM.getTargetData()->getPointerSizeInBits() == 64,
+ TM.getTargetData()->isLittleEndian()) {
+}
+
+MBlazeELFWriterInfo::~MBlazeELFWriterInfo() {}
+
+unsigned MBlazeELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
+ switch (MachineRelTy) {
+ case MBlaze::reloc_pcrel_word:
+ return ELF::R_MICROBLAZE_64_PCREL;
+ case MBlaze::reloc_absolute_word:
+ return ELF::R_MICROBLAZE_NONE;
+ default:
+ llvm_unreachable("unknown mblaze machine relocation type");
+ }
+ return 0;
+}
+
+long int MBlazeELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
+ long int Modifier) const {
+ switch (RelTy) {
+ case ELF::R_MICROBLAZE_32_PCREL:
+ return Modifier - 4;
+ case ELF::R_MICROBLAZE_32:
+ return Modifier;
+ default:
+ llvm_unreachable("unknown mblaze relocation type");
+ }
+ return 0;
+}
+
+unsigned MBlazeELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
+ // FIXME: Most of these sizes are guesses based on the name
+ switch (RelTy) {
+ case ELF::R_MICROBLAZE_32:
+ case ELF::R_MICROBLAZE_32_PCREL:
+ case ELF::R_MICROBLAZE_32_PCREL_LO:
+ case ELF::R_MICROBLAZE_32_LO:
+ case ELF::R_MICROBLAZE_SRO32:
+ case ELF::R_MICROBLAZE_SRW32:
+ case ELF::R_MICROBLAZE_32_SYM_OP_SYM:
+ case ELF::R_MICROBLAZE_GOTOFF_32:
+ return 32;
+
+ case ELF::R_MICROBLAZE_64_PCREL:
+ case ELF::R_MICROBLAZE_64:
+ case ELF::R_MICROBLAZE_GOTPC_64:
+ case ELF::R_MICROBLAZE_GOT_64:
+ case ELF::R_MICROBLAZE_PLT_64:
+ case ELF::R_MICROBLAZE_GOTOFF_64:
+ return 64;
+ }
+
+ return 0;
+}
+
+bool MBlazeELFWriterInfo::isPCRelativeRel(unsigned RelTy) const {
+ // FIXME: Most of these are guesses based on the name
+ switch (RelTy) {
+ case ELF::R_MICROBLAZE_32_PCREL:
+ case ELF::R_MICROBLAZE_64_PCREL:
+ case ELF::R_MICROBLAZE_32_PCREL_LO:
+ case ELF::R_MICROBLAZE_GOTPC_64:
+ return true;
+ }
+
+ return false;
+}
+
+unsigned MBlazeELFWriterInfo::getAbsoluteLabelMachineRelTy() const {
+ return MBlaze::reloc_absolute_word;
+}
+
+long int MBlazeELFWriterInfo::computeRelocation(unsigned SymOffset,
+ unsigned RelOffset,
+ unsigned RelTy) const {
+ if (RelTy == ELF::R_MICROBLAZE_32_PCREL || ELF::R_MICROBLAZE_64_PCREL)
+ return SymOffset - (RelOffset + 4);
+ else
+ assert("computeRelocation unknown for this relocation type");
+
+ return 0;
+}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h
new file mode 100644
index 0000000..63bfc0d
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h
@@ -0,0 +1,58 @@
+//===-- MBlazeELFWriterInfo.h - ELF Writer Info for MBlaze ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ELF writer information for the MBlaze backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MBLAZE_ELF_WRITER_INFO_H
+#define MBLAZE_ELF_WRITER_INFO_H
+
+#include "llvm/Target/TargetELFWriterInfo.h"
+
+namespace llvm {
+
+ class MBlazeELFWriterInfo : public TargetELFWriterInfo {
+ public:
+ MBlazeELFWriterInfo(TargetMachine &TM);
+ virtual ~MBlazeELFWriterInfo();
+
+ /// getRelocationType - Returns the target specific ELF Relocation type.
+ /// 'MachineRelTy' contains the object code independent relocation type
+ virtual unsigned getRelocationType(unsigned MachineRelTy) const;
+
+ /// hasRelocationAddend - True if the target uses an addend in the
+ /// ELF relocation entry.
+ virtual bool hasRelocationAddend() const { return false; }
+
+ /// getDefaultAddendForRelTy - Gets the default addend value for a
+ /// relocation entry based on the target ELF relocation type.
+ virtual long int getDefaultAddendForRelTy(unsigned RelTy,
+ long int Modifier = 0) const;
+
+ /// getRelTySize - Returns the size of relocatable field in bits
+ virtual unsigned getRelocationTySize(unsigned RelTy) const;
+
+ /// isPCRelativeRel - True if the relocation type is pc relative
+ virtual bool isPCRelativeRel(unsigned RelTy) const;
+
+ /// getJumpTableRelocationTy - Returns the machine relocation type used
+ /// to reference a jumptable.
+ virtual unsigned getAbsoluteLabelMachineRelTy() const;
+
+ /// computeRelocation - Some relocatable fields could be relocated
+ /// directly, avoiding the relocation symbol emission, compute the
+ /// final relocation value for this symbol.
+ virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset,
+ unsigned RelTy) const;
+ };
+
+} // end llvm namespace
+
+#endif // MBLAZE_ELF_WRITER_INFO_H
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp
new file mode 100644
index 0000000..e763902
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp
@@ -0,0 +1,450 @@
+//=======- MBlazeFrameLowering.cpp - MBlaze Frame Information ------*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the MBlaze implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mblaze-frame-lowering"
+
+#include "MBlazeFrameLowering.h"
+#include "MBlazeInstrInfo.h"
+#include "MBlazeMachineFunction.h"
+#include "InstPrinter/MBlazeInstPrinter.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace llvm {
+ cl::opt<bool> DisableStackAdjust(
+ "disable-mblaze-stack-adjust",
+ cl::init(false),
+ cl::desc("Disable MBlaze stack layout adjustment."),
+ cl::Hidden);
+}
+
+static void replaceFrameIndexes(MachineFunction &MF,
+ SmallVector<std::pair<int,int64_t>, 16> &FR) {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
+ const SmallVector<std::pair<int,int64_t>, 16>::iterator FRB = FR.begin();
+ const SmallVector<std::pair<int,int64_t>, 16>::iterator FRE = FR.end();
+
+ SmallVector<std::pair<int,int64_t>, 16>::iterator FRI = FRB;
+ for (; FRI != FRE; ++FRI) {
+ MFI->RemoveStackObject(FRI->first);
+ int NFI = MFI->CreateFixedObject(4, FRI->second, true);
+ MBlazeFI->recordReplacement(FRI->first, NFI);
+
+ for (MachineFunction::iterator MB=MF.begin(), ME=MF.end(); MB!=ME; ++MB) {
+ MachineBasicBlock::iterator MBB = MB->begin();
+ const MachineBasicBlock::iterator MBE = MB->end();
+
+ for (; MBB != MBE; ++MBB) {
+ MachineInstr::mop_iterator MIB = MBB->operands_begin();
+ const MachineInstr::mop_iterator MIE = MBB->operands_end();
+
+ for (MachineInstr::mop_iterator MII = MIB; MII != MIE; ++MII) {
+ if (!MII->isFI() || MII->getIndex() != FRI->first) continue;
+ DEBUG(dbgs() << "FOUND FI#" << MII->getIndex() << "\n");
+ MII->setIndex(NFI);
+ }
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Stack Frame Processing methods
+// +----------------------------+
+//
+// The stack is allocated decrementing the stack pointer on
+// the first instruction of a function prologue. Once decremented,
+// all stack references are are done through a positive offset
+// from the stack/frame pointer, so the stack is considered
+// to grow up.
+//
+//===----------------------------------------------------------------------===//
+
+static void analyzeFrameIndexes(MachineFunction &MF) {
+ if (DisableStackAdjust) return;
+
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ MachineRegisterInfo::livein_iterator LII = MRI.livein_begin();
+ MachineRegisterInfo::livein_iterator LIE = MRI.livein_end();
+ const SmallVector<int, 16> &LiveInFI = MBlazeFI->getLiveIn();
+ SmallVector<MachineInstr*, 16> EraseInstr;
+ SmallVector<std::pair<int,int64_t>, 16> FrameRelocate;
+
+ MachineBasicBlock *MBB = MF.getBlockNumbered(0);
+ MachineBasicBlock::iterator MIB = MBB->begin();
+ MachineBasicBlock::iterator MIE = MBB->end();
+
+ int StackAdjust = 0;
+ int StackOffset = -28;
+
+ // In this loop we are searching frame indexes that corrospond to incoming
+ // arguments that are already in the stack. We look for instruction sequences
+ // like the following:
+ //
+ // LWI REG, FI1, 0
+ // ...
+ // SWI REG, FI2, 0
+ //
+ // As long as there are no defs of REG in the ... part, we can eliminate
+ // the SWI instruction because the value has already been stored to the
+ // stack by the caller. All we need to do is locate FI at the correct
+ // stack location according to the calling convensions.
+ //
+ // Additionally, if the SWI operation kills the def of REG then we don't
+ // need the LWI operation so we can erase it as well.
+ for (unsigned i = 0, e = LiveInFI.size(); i < e; ++i) {
+ for (MachineBasicBlock::iterator I=MIB; I != MIE; ++I) {
+ if (I->getOpcode() != MBlaze::LWI || I->getNumOperands() != 3 ||
+ !I->getOperand(1).isFI() || !I->getOperand(0).isReg() ||
+ I->getOperand(1).getIndex() != LiveInFI[i]) continue;
+
+ unsigned FIReg = I->getOperand(0).getReg();
+ MachineBasicBlock::iterator SI = I;
+ for (SI++; SI != MIE; ++SI) {
+ if (!SI->getOperand(0).isReg() ||
+ !SI->getOperand(1).isFI() ||
+ SI->getOpcode() != MBlaze::SWI) continue;
+
+ int FI = SI->getOperand(1).getIndex();
+ if (SI->getOperand(0).getReg() != FIReg ||
+ MFI->isFixedObjectIndex(FI) ||
+ MFI->getObjectSize(FI) != 4) continue;
+
+ if (SI->getOperand(0).isDef()) break;
+
+ if (SI->getOperand(0).isKill()) {
+ DEBUG(dbgs() << "LWI for FI#" << I->getOperand(1).getIndex()
+ << " removed\n");
+ EraseInstr.push_back(I);
+ }
+
+ EraseInstr.push_back(SI);
+ DEBUG(dbgs() << "SWI for FI#" << FI << " removed\n");
+
+ FrameRelocate.push_back(std::make_pair(FI,StackOffset));
+ DEBUG(dbgs() << "FI#" << FI << " relocated to " << StackOffset << "\n");
+
+ StackOffset -= 4;
+ StackAdjust += 4;
+ break;
+ }
+ }
+ }
+
+ // In this loop we are searching for frame indexes that corrospond to
+ // incoming arguments that are in registers. We look for instruction
+ // sequences like the following:
+ //
+ // ... SWI REG, FI, 0
+ //
+ // As long as the ... part does not define REG and if REG is an incoming
+ // parameter register then we know that, according to ABI convensions, the
+ // caller has allocated stack space for it already. Instead of allocating
+ // stack space on our frame, we record the correct location in the callers
+ // frame.
+ for (MachineRegisterInfo::livein_iterator LI = LII; LI != LIE; ++LI) {
+ for (MachineBasicBlock::iterator I=MIB; I != MIE; ++I) {
+ if (I->definesRegister(LI->first))
+ break;
+
+ if (I->getOpcode() != MBlaze::SWI || I->getNumOperands() != 3 ||
+ !I->getOperand(1).isFI() || !I->getOperand(0).isReg() ||
+ I->getOperand(1).getIndex() < 0) continue;
+
+ if (I->getOperand(0).getReg() == LI->first) {
+ int FI = I->getOperand(1).getIndex();
+ MBlazeFI->recordLiveIn(FI);
+
+ int FILoc = 0;
+ switch (LI->first) {
+ default: llvm_unreachable("invalid incoming parameter!");
+ case MBlaze::R5: FILoc = -4; break;
+ case MBlaze::R6: FILoc = -8; break;
+ case MBlaze::R7: FILoc = -12; break;
+ case MBlaze::R8: FILoc = -16; break;
+ case MBlaze::R9: FILoc = -20; break;
+ case MBlaze::R10: FILoc = -24; break;
+ }
+
+ StackAdjust += 4;
+ FrameRelocate.push_back(std::make_pair(FI,FILoc));
+ DEBUG(dbgs() << "FI#" << FI << " relocated to " << FILoc << "\n");
+ break;
+ }
+ }
+ }
+
+ // Go ahead and erase all of the instructions that we determined were
+ // no longer needed.
+ for (int i = 0, e = EraseInstr.size(); i < e; ++i)
+ MBB->erase(EraseInstr[i]);
+
+ // Replace all of the frame indexes that we have relocated with new
+ // fixed object frame indexes.
+ replaceFrameIndexes(MF, FrameRelocate);
+}
+
+static void interruptFrameLayout(MachineFunction &MF) {
+ const Function *F = MF.getFunction();
+ llvm::CallingConv::ID CallConv = F->getCallingConv();
+
+ // If this function is not using either the interrupt_handler
+ // calling convention or the save_volatiles calling convention
+ // then we don't need to do any additional frame layout.
+ if (CallConv != llvm::CallingConv::MBLAZE_INTR &&
+ CallConv != llvm::CallingConv::MBLAZE_SVOL)
+ return;
+
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ const MBlazeInstrInfo &TII =
+ *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo());
+
+ // Determine if the calling convention is the interrupt_handler
+ // calling convention. Some pieces of the prologue and epilogue
+ // only need to be emitted if we are lowering and interrupt handler.
+ bool isIntr = CallConv == llvm::CallingConv::MBLAZE_INTR;
+
+ // Determine where to put prologue and epilogue additions
+ MachineBasicBlock &MENT = MF.front();
+ MachineBasicBlock &MEXT = MF.back();
+
+ MachineBasicBlock::iterator MENTI = MENT.begin();
+ MachineBasicBlock::iterator MEXTI = prior(MEXT.end());
+
+ DebugLoc ENTDL = MENTI != MENT.end() ? MENTI->getDebugLoc() : DebugLoc();
+ DebugLoc EXTDL = MEXTI != MEXT.end() ? MEXTI->getDebugLoc() : DebugLoc();
+
+ // Store the frame indexes generated during prologue additions for use
+ // when we are generating the epilogue additions.
+ SmallVector<int, 10> VFI;
+
+ // Build the prologue SWI for R3 - R12 if needed. Note that R11 must
+ // always have a SWI because it is used when processing RMSR.
+ for (unsigned r = MBlaze::R3; r <= MBlaze::R12; ++r) {
+ if (!MRI.isPhysRegUsed(r) && !(isIntr && r == MBlaze::R11)) continue;
+
+ int FI = MFI->CreateStackObject(4,4,false,false);
+ VFI.push_back(FI);
+
+ BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), r)
+ .addFrameIndex(FI).addImm(0);
+ }
+
+ // Build the prologue SWI for R17, R18
+ int R17FI = MFI->CreateStackObject(4,4,false,false);
+ int R18FI = MFI->CreateStackObject(4,4,false,false);
+
+ BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), MBlaze::R17)
+ .addFrameIndex(R17FI).addImm(0);
+
+ BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), MBlaze::R18)
+ .addFrameIndex(R18FI).addImm(0);
+
+ // Buid the prologue SWI and the epilogue LWI for RMSR if needed
+ if (isIntr) {
+ int MSRFI = MFI->CreateStackObject(4,4,false,false);
+ BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::MFS), MBlaze::R11)
+ .addReg(MBlaze::RMSR);
+ BuildMI(MENT, MENTI, ENTDL, TII.get(MBlaze::SWI), MBlaze::R11)
+ .addFrameIndex(MSRFI).addImm(0);
+
+ BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), MBlaze::R11)
+ .addFrameIndex(MSRFI).addImm(0);
+ BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::MTS), MBlaze::RMSR)
+ .addReg(MBlaze::R11);
+ }
+
+ // Build the epilogue LWI for R17, R18
+ BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), MBlaze::R18)
+ .addFrameIndex(R18FI).addImm(0);
+
+ BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), MBlaze::R17)
+ .addFrameIndex(R17FI).addImm(0);
+
+ // Build the epilogue LWI for R3 - R12 if needed
+ for (unsigned r = MBlaze::R12, i = VFI.size(); r >= MBlaze::R3; --r) {
+ if (!MRI.isPhysRegUsed(r)) continue;
+ BuildMI(MEXT, MEXTI, EXTDL, TII.get(MBlaze::LWI), r)
+ .addFrameIndex(VFI[--i]).addImm(0);
+ }
+}
+
+static void determineFrameLayout(MachineFunction &MF) {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
+
+ // Replace the dummy '0' SPOffset by the negative offsets, as explained on
+ // LowerFORMAL_ARGUMENTS. Leaving '0' for while is necessary to avoid
+ // the approach done by calculateFrameObjectOffsets to the stack frame.
+ MBlazeFI->adjustLoadArgsFI(MFI);
+ MBlazeFI->adjustStoreVarArgsFI(MFI);
+
+ // Get the number of bytes to allocate from the FrameInfo
+ unsigned FrameSize = MFI->getStackSize();
+ DEBUG(dbgs() << "Original Frame Size: " << FrameSize << "\n" );
+
+ // Get the alignments provided by the target, and the maximum alignment
+ // (if any) of the fixed frame objects.
+ // unsigned MaxAlign = MFI->getMaxAlignment();
+ unsigned TargetAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
+ unsigned AlignMask = TargetAlign - 1;
+
+ // Make sure the frame is aligned.
+ FrameSize = (FrameSize + AlignMask) & ~AlignMask;
+ MFI->setStackSize(FrameSize);
+ DEBUG(dbgs() << "Aligned Frame Size: " << FrameSize << "\n" );
+}
+
+int MBlazeFrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI)
+ const {
+ const MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
+ if (MBlazeFI->hasReplacement(FI))
+ FI = MBlazeFI->getReplacement(FI);
+ return TargetFrameLowering::getFrameIndexOffset(MF,FI);
+}
+
+// hasFP - Return true if the specified function should have a dedicated frame
+// pointer register. This is true if the function has variable sized allocas or
+// if frame pointer elimination is disabled.
+bool MBlazeFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects();
+}
+
+void MBlazeFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MBlazeInstrInfo &TII =
+ *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo());
+ MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ llvm::CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ bool requiresRA = CallConv == llvm::CallingConv::MBLAZE_INTR;
+
+ // Determine the correct frame layout
+ determineFrameLayout(MF);
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ unsigned StackSize = MFI->getStackSize();
+
+ // No need to allocate space on the stack.
+ if (StackSize == 0 && !MFI->adjustsStack() && !requiresRA) return;
+
+ int FPOffset = MBlazeFI->getFPStackOffset();
+ int RAOffset = MBlazeFI->getRAStackOffset();
+
+ // Adjust stack : addi R1, R1, -imm
+ BuildMI(MBB, MBBI, DL, TII.get(MBlaze::ADDIK), MBlaze::R1)
+ .addReg(MBlaze::R1).addImm(-StackSize);
+
+ // swi R15, R1, stack_loc
+ if (MFI->adjustsStack() || requiresRA) {
+ BuildMI(MBB, MBBI, DL, TII.get(MBlaze::SWI))
+ .addReg(MBlaze::R15).addReg(MBlaze::R1).addImm(RAOffset);
+ }
+
+ if (hasFP(MF)) {
+ // swi R19, R1, stack_loc
+ BuildMI(MBB, MBBI, DL, TII.get(MBlaze::SWI))
+ .addReg(MBlaze::R19).addReg(MBlaze::R1).addImm(FPOffset);
+
+ // add R19, R1, R0
+ BuildMI(MBB, MBBI, DL, TII.get(MBlaze::ADD), MBlaze::R19)
+ .addReg(MBlaze::R1).addReg(MBlaze::R0);
+ }
+}
+
+void MBlazeFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
+ const MBlazeInstrInfo &TII =
+ *static_cast<const MBlazeInstrInfo*>(MF.getTarget().getInstrInfo());
+
+ DebugLoc dl = MBBI->getDebugLoc();
+
+ llvm::CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ bool requiresRA = CallConv == llvm::CallingConv::MBLAZE_INTR;
+
+ // Get the FI's where RA and FP are saved.
+ int FPOffset = MBlazeFI->getFPStackOffset();
+ int RAOffset = MBlazeFI->getRAStackOffset();
+
+ if (hasFP(MF)) {
+ // add R1, R19, R0
+ BuildMI(MBB, MBBI, dl, TII.get(MBlaze::ADD), MBlaze::R1)
+ .addReg(MBlaze::R19).addReg(MBlaze::R0);
+
+ // lwi R19, R1, stack_loc
+ BuildMI(MBB, MBBI, dl, TII.get(MBlaze::LWI), MBlaze::R19)
+ .addReg(MBlaze::R1).addImm(FPOffset);
+ }
+
+ // lwi R15, R1, stack_loc
+ if (MFI->adjustsStack() || requiresRA) {
+ BuildMI(MBB, MBBI, dl, TII.get(MBlaze::LWI), MBlaze::R15)
+ .addReg(MBlaze::R1).addImm(RAOffset);
+ }
+
+ // Get the number of bytes from FrameInfo
+ int StackSize = (int) MFI->getStackSize();
+
+ // addi R1, R1, imm
+ if (StackSize) {
+ BuildMI(MBB, MBBI, dl, TII.get(MBlaze::ADDIK), MBlaze::R1)
+ .addReg(MBlaze::R1).addImm(StackSize);
+ }
+}
+
+void MBlazeFrameLowering::
+processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
+ llvm::CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ bool requiresRA = CallConv == llvm::CallingConv::MBLAZE_INTR;
+
+ if (MFI->adjustsStack() || requiresRA) {
+ MBlazeFI->setRAStackOffset(0);
+ MFI->CreateFixedObject(4,0,true);
+ }
+
+ if (hasFP(MF)) {
+ MBlazeFI->setFPStackOffset(4);
+ MFI->CreateFixedObject(4,4,true);
+ }
+
+ interruptFrameLayout(MF);
+ analyzeFrameIndexes(MF);
+}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.h b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.h
new file mode 100644
index 0000000..8be15bf
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.h
@@ -0,0 +1,53 @@
+//=- MBlazeFrameLowering.h - Define frame lowering for MicroBlaze -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MBLAZE_FRAMEINFO_H
+#define MBLAZE_FRAMEINFO_H
+
+#include "MBlaze.h"
+#include "MBlazeSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class MBlazeSubtarget;
+
+class MBlazeFrameLowering : public TargetFrameLowering {
+protected:
+ const MBlazeSubtarget &STI;
+
+public:
+ explicit MBlazeFrameLowering(const MBlazeSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsUp, 4, 0), STI(sti) {
+ }
+
+ /// targetHandlesStackFrameRounding - Returns true if the target is
+ /// responsible for rounding up the stack frame (probably at emitPrologue
+ /// time).
+ bool targetHandlesStackFrameRounding() const { return true; }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+
+ virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp
index e64dd0e..6b43497 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp
@@ -81,13 +81,9 @@ private:
SDNode *getGlobalBaseReg();
SDNode *Select(SDNode *N);
- // Complex Pattern.
- bool SelectAddr(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &Offset);
-
// Address Selection
- bool SelectAddrRegReg(SDNode *Op, SDValue N, SDValue &Base, SDValue &Index);
- bool SelectAddrRegImm(SDNode *Op, SDValue N, SDValue &Disp, SDValue &Base);
+ bool SelectAddrRegReg(SDValue N, SDValue &Base, SDValue &Index);
+ bool SelectAddrRegImm(SDValue N, SDValue &Disp, SDValue &Base);
// getI32Imm - Return a target constant with the specified value, of type i32.
inline SDValue getI32Imm(unsigned Imm) {
@@ -122,7 +118,7 @@ static bool isIntS32Immediate(SDValue Op, int32_t &Imm) {
/// can be represented as an indexed [r+r] operation. Returns false if it
/// can be more efficiently represented with [r+imm].
bool MBlazeDAGToDAGISel::
-SelectAddrRegReg(SDNode *Op, SDValue N, SDValue &Base, SDValue &Index) {
+SelectAddrRegReg(SDValue N, SDValue &Base, SDValue &Index) {
if (N.getOpcode() == ISD::FrameIndex) return false;
if (N.getOpcode() == ISD::TargetExternalSymbol ||
N.getOpcode() == ISD::TargetGlobalAddress)
@@ -137,8 +133,8 @@ SelectAddrRegReg(SDNode *Op, SDValue N, SDValue &Base, SDValue &Index) {
N.getOperand(1).getOpcode() == ISD::TargetJumpTable)
return false; // jump tables.
- Base = N.getOperand(1);
- Index = N.getOperand(0);
+ Base = N.getOperand(0);
+ Index = N.getOperand(1);
return true;
}
@@ -149,9 +145,9 @@ SelectAddrRegReg(SDNode *Op, SDValue N, SDValue &Base, SDValue &Index) {
/// a signed 32-bit displacement [r+imm], and if it is not better
/// represented as reg+reg.
bool MBlazeDAGToDAGISel::
-SelectAddrRegImm(SDNode *Op, SDValue N, SDValue &Disp, SDValue &Base) {
+SelectAddrRegImm(SDValue N, SDValue &Base, SDValue &Disp) {
// If this can be more profitably realized as r+r, fail.
- if (SelectAddrRegReg(Op, N, Disp, Base))
+ if (SelectAddrRegReg(N, Base, Disp))
return false;
if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
@@ -163,7 +159,6 @@ SelectAddrRegImm(SDNode *Op, SDValue N, SDValue &Disp, SDValue &Base) {
} else {
Base = N.getOperand(0);
}
- DEBUG( errs() << "WESLEY: Using Operand Immediate\n" );
return true; // [r+i]
}
} else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
@@ -171,7 +166,6 @@ SelectAddrRegImm(SDNode *Op, SDValue N, SDValue &Disp, SDValue &Base) {
uint32_t Imm = CN->getZExtValue();
Disp = CurDAG->getTargetConstant(Imm, CN->getValueType(0));
Base = CurDAG->getRegister(MBlaze::R0, CN->getValueType(0));
- DEBUG( errs() << "WESLEY: Using Constant Node\n" );
return true;
}
@@ -190,76 +184,21 @@ SDNode *MBlazeDAGToDAGISel::getGlobalBaseReg() {
return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
}
-/// ComplexPattern used on MBlazeInstrInfo
-/// Used on MBlaze Load/Store instructions
-bool MBlazeDAGToDAGISel::
-SelectAddr(SDNode *Op, SDValue Addr, SDValue &Offset, SDValue &Base) {
- // if Address is FI, get the TargetFrameIndex.
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
- }
-
- // on PIC code Load GA
- if (TM.getRelocationModel() == Reloc::PIC_) {
- if ((Addr.getOpcode() == ISD::TargetGlobalAddress) ||
- (Addr.getOpcode() == ISD::TargetConstantPool) ||
- (Addr.getOpcode() == ISD::TargetJumpTable)){
- Base = CurDAG->getRegister(MBlaze::R15, MVT::i32);
- Offset = Addr;
- return true;
- }
- } else {
- if ((Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress))
- return false;
- }
-
- // Operand is a result from an ADD.
- if (Addr.getOpcode() == ISD::ADD) {
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) {
- if (isUInt<16>(CN->getZExtValue())) {
-
- // If the first operand is a FI, get the TargetFI Node
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
- (Addr.getOperand(0))) {
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- } else {
- Base = Addr.getOperand(0);
- }
-
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
- return true;
- }
- }
- }
-
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
-}
-
/// Select instructions not customized! Used for
/// expanded, promoted and normal instructions
SDNode* MBlazeDAGToDAGISel::Select(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
DebugLoc dl = Node->getDebugLoc();
- // Dump information about the Node being selected
- DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n");
-
// If we have a custom node, we already have selected!
- if (Node->isMachineOpcode()) {
- DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ if (Node->isMachineOpcode())
return NULL;
- }
///
// Instruction Selection not handled by the auto-generated
// tablegen selection should be handled here.
///
- switch(Opcode) {
+ switch (Opcode) {
default: break;
// Get target GOT address.
@@ -271,7 +210,7 @@ SDNode* MBlazeDAGToDAGISel::Select(SDNode *Node) {
int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex();
EVT VT = Node->getValueType(0);
SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
- unsigned Opc = MBlaze::ADDI;
+ unsigned Opc = MBlaze::ADDIK;
if (Node->hasOneUse())
return CurDAG->SelectNodeTo(Node, Opc, VT, TFI, imm);
return CurDAG->getMachineNode(Opc, dl, VT, TFI, imm);
@@ -289,8 +228,8 @@ SDNode* MBlazeDAGToDAGISel::Select(SDNode *Node) {
SDValue R20Reg = CurDAG->getRegister(MBlaze::R20, MVT::i32);
SDValue InFlag(0, 0);
- if ( (isa<GlobalAddressSDNode>(Callee)) ||
- (isa<ExternalSymbolSDNode>(Callee)) )
+ if ((isa<GlobalAddressSDNode>(Callee)) ||
+ (isa<ExternalSymbolSDNode>(Callee)))
{
/// Direct call for global addresses and external symbols
SDValue GPReg = CurDAG->getRegister(MBlaze::R15, MVT::i32);
@@ -309,7 +248,7 @@ SDNode* MBlazeDAGToDAGISel::Select(SDNode *Node) {
// Emit Jump and Link Register
SDNode *ResNode = CurDAG->getMachineNode(MBlaze::BRLID, dl, MVT::Other,
- MVT::Flag, R20Reg, Chain);
+ MVT::Glue, R20Reg, Chain);
Chain = SDValue(ResNode, 0);
InFlag = SDValue(ResNode, 1);
ReplaceUses(SDValue(Node, 0), Chain);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
index 1730b68..2f40bfc 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
@@ -35,6 +35,11 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State);
+
const char *MBlazeTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
case MBlazeISD::JmpLink : return "MBlazeISD::JmpLink";
@@ -56,9 +61,9 @@ MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM)
setBooleanContents(ZeroOrOneBooleanContent);
// Set up the register classes
- addRegisterClass(MVT::i32, MBlaze::CPURegsRegisterClass);
+ addRegisterClass(MVT::i32, MBlaze::GPRRegisterClass);
if (Subtarget->hasFPU()) {
- addRegisterClass(MVT::f32, MBlaze::FGR32RegisterClass);
+ addRegisterClass(MVT::f32, MBlaze::GPRRegisterClass);
setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
}
@@ -86,6 +91,10 @@ MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM)
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ // Sign extended loads must be expanded
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
+
// MBlaze has no REM or DIVREM operations.
setOperationAction(ISD::UREM, MVT::i32, Expand);
setOperationAction(ISD::SREM, MVT::i32, Expand);
@@ -112,8 +121,8 @@ MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM)
}
// Expand unsupported conversions
- setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
- setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::f32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::i32, Expand);
// Expand SELECT_CC
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
@@ -166,7 +175,6 @@ MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM)
// Use the default for now
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
// MBlaze doesn't have extending float->double load/store
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
@@ -204,172 +212,353 @@ SDValue MBlazeTargetLowering::LowerOperation(SDValue Op,
//===----------------------------------------------------------------------===//
MachineBasicBlock*
MBlazeTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
-
+ MachineBasicBlock *MBB)
+ const {
switch (MI->getOpcode()) {
default: assert(false && "Unexpected instr type to insert");
+
case MBlaze::ShiftRL:
case MBlaze::ShiftRA:
- case MBlaze::ShiftL: {
- // To "insert" a shift left instruction, we actually have to insert a
- // simple loop. The incoming instruction knows the destination vreg to
- // set, the source vreg to operate over and the shift amount.
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = BB;
- ++It;
-
- // start:
- // andi samt, samt, 31
- // beqid samt, finish
- // add dst, src, r0
- // loop:
- // addik samt, samt, -1
- // sra dst, dst
- // bneid samt, loop
- // nop
- // finish:
- MachineFunction *F = BB->getParent();
- MachineRegisterInfo &R = F->getRegInfo();
- MachineBasicBlock *loop = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *finish = F->CreateMachineBasicBlock(LLVM_BB);
- F->insert(It, loop);
- F->insert(It, finish);
-
- // Update machine-CFG edges by transfering adding all successors and
- // remaining instructions from the current block to the new block which
- // will contain the Phi node for the select.
- finish->splice(finish->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- finish->transferSuccessorsAndUpdatePHIs(BB);
-
- // Add the true and fallthrough blocks as its successors.
- BB->addSuccessor(loop);
- BB->addSuccessor(finish);
-
- // Next, add the finish block as a successor of the loop block
- loop->addSuccessor(finish);
- loop->addSuccessor(loop);
-
- unsigned IAMT = R.createVirtualRegister(MBlaze::CPURegsRegisterClass);
- BuildMI(BB, dl, TII->get(MBlaze::ANDI), IAMT)
- .addReg(MI->getOperand(2).getReg())
- .addImm(31);
-
- unsigned IVAL = R.createVirtualRegister(MBlaze::CPURegsRegisterClass);
- BuildMI(BB, dl, TII->get(MBlaze::ADDI), IVAL)
- .addReg(MI->getOperand(1).getReg())
- .addImm(0);
-
- BuildMI(BB, dl, TII->get(MBlaze::BEQID))
- .addReg(IAMT)
- .addMBB(finish);
-
- unsigned DST = R.createVirtualRegister(MBlaze::CPURegsRegisterClass);
- unsigned NDST = R.createVirtualRegister(MBlaze::CPURegsRegisterClass);
- BuildMI(loop, dl, TII->get(MBlaze::PHI), DST)
- .addReg(IVAL).addMBB(BB)
- .addReg(NDST).addMBB(loop);
-
- unsigned SAMT = R.createVirtualRegister(MBlaze::CPURegsRegisterClass);
- unsigned NAMT = R.createVirtualRegister(MBlaze::CPURegsRegisterClass);
- BuildMI(loop, dl, TII->get(MBlaze::PHI), SAMT)
- .addReg(IAMT).addMBB(BB)
- .addReg(NAMT).addMBB(loop);
-
- if (MI->getOpcode() == MBlaze::ShiftL)
- BuildMI(loop, dl, TII->get(MBlaze::ADD), NDST).addReg(DST).addReg(DST);
- else if (MI->getOpcode() == MBlaze::ShiftRA)
- BuildMI(loop, dl, TII->get(MBlaze::SRA), NDST).addReg(DST);
- else if (MI->getOpcode() == MBlaze::ShiftRL)
- BuildMI(loop, dl, TII->get(MBlaze::SRL), NDST).addReg(DST);
- else
- llvm_unreachable( "Cannot lower unknown shift instruction" );
-
- BuildMI(loop, dl, TII->get(MBlaze::ADDI), NAMT)
- .addReg(SAMT)
- .addImm(-1);
-
- BuildMI(loop, dl, TII->get(MBlaze::BNEID))
- .addReg(NAMT)
- .addMBB(loop);
-
- BuildMI(*finish, finish->begin(), dl,
- TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
- .addReg(IVAL).addMBB(BB)
- .addReg(NDST).addMBB(loop);
-
- // The pseudo instruction is no longer needed so remove it
+ case MBlaze::ShiftL:
+ return EmitCustomShift(MI, MBB);
+
+ case MBlaze::Select_FCC:
+ case MBlaze::Select_CC:
+ return EmitCustomSelect(MI, MBB);
+
+ case MBlaze::CAS32:
+ case MBlaze::SWP32:
+ case MBlaze::LAA32:
+ case MBlaze::LAS32:
+ case MBlaze::LAD32:
+ case MBlaze::LAO32:
+ case MBlaze::LAX32:
+ case MBlaze::LAN32:
+ return EmitCustomAtomic(MI, MBB);
+
+ case MBlaze::MEMBARRIER:
+ // The Microblaze does not need memory barriers. Just delete the pseudo
+ // instruction and finish.
MI->eraseFromParent();
- return finish;
+ return MBB;
+ }
+}
+
+MachineBasicBlock*
+MBlazeTargetLowering::EmitCustomShift(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ DebugLoc dl = MI->getDebugLoc();
+
+ // To "insert" a shift left instruction, we actually have to insert a
+ // simple loop. The incoming instruction knows the destination vreg to
+ // set, the source vreg to operate over and the shift amount.
+ const BasicBlock *LLVM_BB = MBB->getBasicBlock();
+ MachineFunction::iterator It = MBB;
+ ++It;
+
+ // start:
+ // andi samt, samt, 31
+ // beqid samt, finish
+ // add dst, src, r0
+ // loop:
+ // addik samt, samt, -1
+ // sra dst, dst
+ // bneid samt, loop
+ // nop
+ // finish:
+ MachineFunction *F = MBB->getParent();
+ MachineRegisterInfo &R = F->getRegInfo();
+ MachineBasicBlock *loop = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *finish = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, loop);
+ F->insert(It, finish);
+
+ // Update machine-CFG edges by transfering adding all successors and
+ // remaining instructions from the current block to the new block which
+ // will contain the Phi node for the select.
+ finish->splice(finish->begin(), MBB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ MBB->end());
+ finish->transferSuccessorsAndUpdatePHIs(MBB);
+
+ // Add the true and fallthrough blocks as its successors.
+ MBB->addSuccessor(loop);
+ MBB->addSuccessor(finish);
+
+ // Next, add the finish block as a successor of the loop block
+ loop->addSuccessor(finish);
+ loop->addSuccessor(loop);
+
+ unsigned IAMT = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ BuildMI(MBB, dl, TII->get(MBlaze::ANDI), IAMT)
+ .addReg(MI->getOperand(2).getReg())
+ .addImm(31);
+
+ unsigned IVAL = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ BuildMI(MBB, dl, TII->get(MBlaze::ADDIK), IVAL)
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(0);
+
+ BuildMI(MBB, dl, TII->get(MBlaze::BEQID))
+ .addReg(IAMT)
+ .addMBB(finish);
+
+ unsigned DST = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ unsigned NDST = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ BuildMI(loop, dl, TII->get(MBlaze::PHI), DST)
+ .addReg(IVAL).addMBB(MBB)
+ .addReg(NDST).addMBB(loop);
+
+ unsigned SAMT = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ unsigned NAMT = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ BuildMI(loop, dl, TII->get(MBlaze::PHI), SAMT)
+ .addReg(IAMT).addMBB(MBB)
+ .addReg(NAMT).addMBB(loop);
+
+ if (MI->getOpcode() == MBlaze::ShiftL)
+ BuildMI(loop, dl, TII->get(MBlaze::ADD), NDST).addReg(DST).addReg(DST);
+ else if (MI->getOpcode() == MBlaze::ShiftRA)
+ BuildMI(loop, dl, TII->get(MBlaze::SRA), NDST).addReg(DST);
+ else if (MI->getOpcode() == MBlaze::ShiftRL)
+ BuildMI(loop, dl, TII->get(MBlaze::SRL), NDST).addReg(DST);
+ else
+ llvm_unreachable("Cannot lower unknown shift instruction");
+
+ BuildMI(loop, dl, TII->get(MBlaze::ADDIK), NAMT)
+ .addReg(SAMT)
+ .addImm(-1);
+
+ BuildMI(loop, dl, TII->get(MBlaze::BNEID))
+ .addReg(NAMT)
+ .addMBB(loop);
+
+ BuildMI(*finish, finish->begin(), dl,
+ TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
+ .addReg(IVAL).addMBB(MBB)
+ .addReg(NDST).addMBB(loop);
+
+ // The pseudo instruction is no longer needed so remove it
+ MI->eraseFromParent();
+ return finish;
+}
+
+MachineBasicBlock*
+MBlazeTargetLowering::EmitCustomSelect(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ DebugLoc dl = MI->getDebugLoc();
+
+ // To "insert" a SELECT_CC instruction, we actually have to insert the
+ // diamond control-flow pattern. The incoming instruction knows the
+ // destination vreg to set, the condition code register to branch on, the
+ // true/false values to select between, and a branch opcode to use.
+ const BasicBlock *LLVM_BB = MBB->getBasicBlock();
+ MachineFunction::iterator It = MBB;
+ ++It;
+
+ // thisMBB:
+ // ...
+ // TrueVal = ...
+ // setcc r1, r2, r3
+ // bNE r1, r0, copy1MBB
+ // fallthrough --> copy0MBB
+ MachineFunction *F = MBB->getParent();
+ MachineBasicBlock *flsBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *dneBB = F->CreateMachineBasicBlock(LLVM_BB);
+
+ unsigned Opc;
+ switch (MI->getOperand(4).getImm()) {
+ default: llvm_unreachable("Unknown branch condition");
+ case MBlazeCC::EQ: Opc = MBlaze::BEQID; break;
+ case MBlazeCC::NE: Opc = MBlaze::BNEID; break;
+ case MBlazeCC::GT: Opc = MBlaze::BGTID; break;
+ case MBlazeCC::LT: Opc = MBlaze::BLTID; break;
+ case MBlazeCC::GE: Opc = MBlaze::BGEID; break;
+ case MBlazeCC::LE: Opc = MBlaze::BLEID; break;
+ }
+
+ F->insert(It, flsBB);
+ F->insert(It, dneBB);
+
+ // Transfer the remainder of MBB and its successor edges to dneBB.
+ dneBB->splice(dneBB->begin(), MBB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ MBB->end());
+ dneBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ MBB->addSuccessor(flsBB);
+ MBB->addSuccessor(dneBB);
+ flsBB->addSuccessor(dneBB);
+
+ BuildMI(MBB, dl, TII->get(Opc))
+ .addReg(MI->getOperand(3).getReg())
+ .addMBB(dneBB);
+
+ // sinkMBB:
+ // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
+ // ...
+ //BuildMI(dneBB, dl, TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
+ // .addReg(MI->getOperand(1).getReg()).addMBB(flsBB)
+ // .addReg(MI->getOperand(2).getReg()).addMBB(BB);
+
+ BuildMI(*dneBB, dneBB->begin(), dl,
+ TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(2).getReg()).addMBB(flsBB)
+ .addReg(MI->getOperand(1).getReg()).addMBB(MBB);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return dneBB;
+}
+
+MachineBasicBlock*
+MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ DebugLoc dl = MI->getDebugLoc();
+
+ // All atomic instructions on the Microblaze are implemented using the
+ // load-linked / store-conditional style atomic instruction sequences.
+ // Thus, all operations will look something like the following:
+ //
+ // start:
+ // lwx RV, RP, 0
+ // <do stuff>
+ // swx RV, RP, 0
+ // addic RC, R0, 0
+ // bneid RC, start
+ //
+ // exit:
+ //
+ // To "insert" a shift left instruction, we actually have to insert a
+ // simple loop. The incoming instruction knows the destination vreg to
+ // set, the source vreg to operate over and the shift amount.
+ const BasicBlock *LLVM_BB = MBB->getBasicBlock();
+ MachineFunction::iterator It = MBB;
+ ++It;
+
+ // start:
+ // andi samt, samt, 31
+ // beqid samt, finish
+ // add dst, src, r0
+ // loop:
+ // addik samt, samt, -1
+ // sra dst, dst
+ // bneid samt, loop
+ // nop
+ // finish:
+ MachineFunction *F = MBB->getParent();
+ MachineRegisterInfo &R = F->getRegInfo();
+
+ // Create the start and exit basic blocks for the atomic operation
+ MachineBasicBlock *start = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *exit = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, start);
+ F->insert(It, exit);
+
+ // Update machine-CFG edges by transfering adding all successors and
+ // remaining instructions from the current block to the new block which
+ // will contain the Phi node for the select.
+ exit->splice(exit->begin(), MBB, llvm::next(MachineBasicBlock::iterator(MI)),
+ MBB->end());
+ exit->transferSuccessorsAndUpdatePHIs(MBB);
+
+ // Add the fallthrough block as its successors.
+ MBB->addSuccessor(start);
+
+ BuildMI(start, dl, TII->get(MBlaze::LWX), MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(MBlaze::R0);
+
+ MachineBasicBlock *final = start;
+ unsigned finalReg = 0;
+
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("Cannot lower unknown atomic instruction!");
+
+ case MBlaze::SWP32:
+ finalReg = MI->getOperand(2).getReg();
+ start->addSuccessor(exit);
+ start->addSuccessor(start);
+ break;
+
+ case MBlaze::LAN32:
+ case MBlaze::LAX32:
+ case MBlaze::LAO32:
+ case MBlaze::LAD32:
+ case MBlaze::LAS32:
+ case MBlaze::LAA32: {
+ unsigned opcode = 0;
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("Cannot lower unknown atomic load!");
+ case MBlaze::LAA32: opcode = MBlaze::ADDIK; break;
+ case MBlaze::LAS32: opcode = MBlaze::RSUBIK; break;
+ case MBlaze::LAD32: opcode = MBlaze::AND; break;
+ case MBlaze::LAO32: opcode = MBlaze::OR; break;
+ case MBlaze::LAX32: opcode = MBlaze::XOR; break;
+ case MBlaze::LAN32: opcode = MBlaze::AND; break;
}
- case MBlaze::Select_FCC:
- case MBlaze::Select_CC: {
- // To "insert" a SELECT_CC instruction, we actually have to insert the
- // diamond control-flow pattern. The incoming instruction knows the
- // destination vreg to set, the condition code register to branch on, the
- // true/false values to select between, and a branch opcode to use.
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = BB;
- ++It;
-
- // thisMBB:
- // ...
- // TrueVal = ...
- // setcc r1, r2, r3
- // bNE r1, r0, copy1MBB
- // fallthrough --> copy0MBB
- MachineFunction *F = BB->getParent();
- MachineBasicBlock *flsBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *dneBB = F->CreateMachineBasicBlock(LLVM_BB);
-
- unsigned Opc;
- switch (MI->getOperand(4).getImm()) {
- default: llvm_unreachable( "Unknown branch condition" );
- case MBlazeCC::EQ: Opc = MBlaze::BNEID; break;
- case MBlazeCC::NE: Opc = MBlaze::BEQID; break;
- case MBlazeCC::GT: Opc = MBlaze::BLEID; break;
- case MBlazeCC::LT: Opc = MBlaze::BGEID; break;
- case MBlazeCC::GE: Opc = MBlaze::BLTID; break;
- case MBlazeCC::LE: Opc = MBlaze::BGTID; break;
+ finalReg = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ start->addSuccessor(exit);
+ start->addSuccessor(start);
+
+ BuildMI(start, dl, TII->get(opcode), finalReg)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(2).getReg());
+
+ if (MI->getOpcode() == MBlaze::LAN32) {
+ unsigned tmp = finalReg;
+ finalReg = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ BuildMI(start, dl, TII->get(MBlaze::XORI), finalReg)
+ .addReg(tmp)
+ .addImm(-1);
}
+ break;
+ }
+
+ case MBlaze::CAS32: {
+ finalReg = MI->getOperand(3).getReg();
+ final = F->CreateMachineBasicBlock(LLVM_BB);
+
+ F->insert(It, final);
+ start->addSuccessor(exit);
+ start->addSuccessor(final);
+ final->addSuccessor(exit);
+ final->addSuccessor(start);
+
+ unsigned CMP = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ BuildMI(start, dl, TII->get(MBlaze::CMP), CMP)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(2).getReg());
- F->insert(It, flsBB);
- F->insert(It, dneBB);
-
- // Transfer the remainder of BB and its successor edges to dneBB.
- dneBB->splice(dneBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- dneBB->transferSuccessorsAndUpdatePHIs(BB);
-
- BB->addSuccessor(flsBB);
- BB->addSuccessor(dneBB);
- flsBB->addSuccessor(dneBB);
-
- BuildMI(BB, dl, TII->get(Opc))
- .addReg(MI->getOperand(3).getReg())
- .addMBB(dneBB);
-
- // sinkMBB:
- // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
- // ...
- //BuildMI(dneBB, dl, TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
- // .addReg(MI->getOperand(1).getReg()).addMBB(flsBB)
- // .addReg(MI->getOperand(2).getReg()).addMBB(BB);
-
- BuildMI(*dneBB, dneBB->begin(), dl,
- TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(2).getReg()).addMBB(flsBB)
- .addReg(MI->getOperand(1).getReg()).addMBB(BB);
-
- MI->eraseFromParent(); // The pseudo instruction is gone now.
- return dneBB;
+ BuildMI(start, dl, TII->get(MBlaze::BNEID))
+ .addReg(CMP)
+ .addMBB(exit);
+
+ final->moveAfter(start);
+ exit->moveAfter(final);
+ break;
}
}
+
+ unsigned CHK = R.createVirtualRegister(MBlaze::GPRRegisterClass);
+ BuildMI(final, dl, TII->get(MBlaze::SWX))
+ .addReg(finalReg)
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(MBlaze::R0);
+
+ BuildMI(final, dl, TII->get(MBlaze::ADDIC), CHK)
+ .addReg(MBlaze::R0)
+ .addImm(0);
+
+ BuildMI(final, dl, TII->get(MBlaze::BNEID))
+ .addReg(CHK)
+ .addMBB(start);
+
+ // The pseudo instruction is no longer needed so remove it
+ MI->eraseFromParent();
+ return exit;
}
//===----------------------------------------------------------------------===//
@@ -392,9 +581,9 @@ SDValue MBlazeTargetLowering::LowerSELECT_CC(SDValue Op,
CompareFlag = DAG.getNode(MBlazeISD::ICmp, dl, MVT::i32, LHS, RHS)
.getValue(1);
} else {
- llvm_unreachable( "Cannot lower select_cc with unknown type" );
+ llvm_unreachable("Cannot lower select_cc with unknown type");
}
-
+
return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
CompareFlag);
}
@@ -421,15 +610,12 @@ LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
SDValue HiPart;
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
- bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
- unsigned char OpFlag = IsPIC ? MBlazeII::MO_GOT : MBlazeII::MO_ABS_HILO;
EVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
+ SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 0);
return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, JTI);
- //return JTI;
}
SDValue MBlazeTargetLowering::
@@ -440,7 +626,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
- N->getOffset(), MBlazeII::MO_ABS_HILO);
+ N->getOffset(), 0);
return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, CP);
}
@@ -456,7 +642,8 @@ SDValue MBlazeTargetLowering::LowerVASTART(SDValue Op,
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1), SV, 0,
+ return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
+ MachinePointerInfo(SV),
false, false, 0);
}
@@ -466,52 +653,24 @@ SDValue MBlazeTargetLowering::LowerVASTART(SDValue Op,
#include "MBlazeGenCallingConv.inc"
-static bool CC_MBlaze2(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
- static const unsigned RegsSize=6;
- static const unsigned IntRegs[] = {
+static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ static const unsigned ArgRegs[] = {
MBlaze::R5, MBlaze::R6, MBlaze::R7,
MBlaze::R8, MBlaze::R9, MBlaze::R10
};
- static const unsigned FltRegs[] = {
- MBlaze::F5, MBlaze::F6, MBlaze::F7,
- MBlaze::F8, MBlaze::F9, MBlaze::F10
- };
+ const unsigned NumArgRegs = array_lengthof(ArgRegs);
+ unsigned Reg = State.AllocateReg(ArgRegs, NumArgRegs);
+ if (!Reg) return false;
- unsigned Reg=0;
-
- // Promote i8 and i16
- if (LocVT == MVT::i8 || LocVT == MVT::i16) {
- LocVT = MVT::i32;
- if (ArgFlags.isSExt())
- LocInfo = CCValAssign::SExt;
- else if (ArgFlags.isZExt())
- LocInfo = CCValAssign::ZExt;
- else
- LocInfo = CCValAssign::AExt;
- }
-
- if (ValVT == MVT::i32) {
- Reg = State.AllocateReg(IntRegs, RegsSize);
- LocVT = MVT::i32;
- } else if (ValVT == MVT::f32) {
- Reg = State.AllocateReg(FltRegs, RegsSize);
- LocVT = MVT::f32;
- }
+ unsigned SizeInBytes = ValVT.getSizeInBits() >> 3;
+ State.AllocateStack(SizeInBytes, SizeInBytes);
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- if (!Reg) {
- unsigned SizeInBytes = ValVT.getSizeInBits() >> 3;
- unsigned Offset = State.AllocateStack(SizeInBytes, SizeInBytes);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
- } else {
- unsigned SizeInBytes = ValVT.getSizeInBits() >> 3;
- State.AllocateStack(SizeInBytes, SizeInBytes);
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- }
-
- return false; // CC must always match
+ return true;
}
//===----------------------------------------------------------------------===//
@@ -532,31 +691,35 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
// MBlaze does not yet support tail call optimization
isTailCall = false;
+ // The MBlaze requires stack slots for arguments passed to var arg
+ // functions even if they are passed in registers.
+ bool needsRegArgSlots = isVarArg;
+
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
*DAG.getContext());
- CCInfo.AnalyzeCallOperands(Outs, CC_MBlaze2);
+ CCInfo.AnalyzeCallOperands(Outs, CC_MBlaze);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
+
+ // Variable argument function calls require a minimum of 24-bytes of stack
+ if (isVarArg && NumBytes < 24) NumBytes = 24;
+
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
- // First/LastArgStackLoc contains the first/last
- // "at stack" argument location.
- int LastArgStackLoc = 0;
- unsigned FirstStackArgLoc = 0;
-
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- EVT RegVT = VA.getLocVT();
+ MVT RegVT = VA.getLocVT();
SDValue Arg = OutVals[i];
// Promote the value if needed.
@@ -582,20 +745,31 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
// Register can't get to this point...
assert(VA.isMemLoc());
+ // Since we are alread passing values on the stack we don't
+ // need to worry about creating additional slots for the
+ // values passed via registers.
+ needsRegArgSlots = false;
+
// Create the frame index object for this incoming parameter
- LastArgStackLoc = (FirstStackArgLoc + VA.getLocMemOffset());
- int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
- LastArgStackLoc, true);
+ unsigned ArgSize = VA.getValVT().getSizeInBits()/8;
+ unsigned StackLoc = VA.getLocMemOffset() + 4;
+ int FI = MFI->CreateFixedObject(ArgSize, StackLoc, true);
SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy());
// emit ISD::STORE whichs stores the
// parameter value to a stack Location
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(),
false, false, 0));
}
}
+ // If we need to reserve stack space for the arguments passed via registers
+ // then create a fixed stack object at the beginning of the stack.
+ if (needsRegArgSlots && TFI.hasReservedCallFrame(MF))
+ MFI->CreateFixedObject(28,0,true);
+
// Transform all store nodes into one single node because all store
// nodes are independent of each other.
if (!MemOpChains.empty())
@@ -616,19 +790,18 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
- unsigned char OpFlag = MBlazeII::MO_NO_FLAG;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
- getPointerTy(), 0, OpFlag);
+ getPointerTy(), 0, 0);
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
- getPointerTy(), OpFlag);
+ getPointerTy(), 0);
// MBlazeJmpLink = #chain, #target_address, #opt_in_flags...
// = Chain, Callee, Reg#1, Reg#2, ...
//
// Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@@ -678,7 +851,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv,
RVLocs[i].getValVT(), InFlag).getValue(1);
InFlag = Chain.getValue(2);
InVals.push_back(Chain.getValue(0));
- }
+ }
return Chain;
}
@@ -713,30 +886,28 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeFormalArguments(Ins, CC_MBlaze2);
+ CCInfo.AnalyzeFormalArguments(Ins, CC_MBlaze);
SDValue StackPtr;
- unsigned FirstStackArgLoc = 0;
-
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
// Arguments stored on registers
if (VA.isRegLoc()) {
- EVT RegVT = VA.getLocVT();
+ MVT RegVT = VA.getLocVT();
ArgRegEnd = VA.getLocReg();
TargetRegisterClass *RC = 0;
if (RegVT == MVT::i32)
- RC = MBlaze::CPURegsRegisterClass;
+ RC = MBlaze::GPRRegisterClass;
else if (RegVT == MVT::f32)
- RC = MBlaze::FGR32RegisterClass;
+ RC = MBlaze::GPRRegisterClass;
else
llvm_unreachable("RegVT not supported by LowerFormalArguments");
// Transform the arguments stored on
// physical registers into virtual ones
- unsigned Reg = MF.addLiveIn(ArgRegEnd, RC);
+ unsigned Reg = MF.addLiveIn(ArgRegEnd, RC, dl);
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// If this is an 8 or 16-bit value, it has been passed promoted
@@ -756,9 +927,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
}
InVals.push_back(ArgValue);
-
} else { // VA.isRegLoc()
-
// sanity check
assert(VA.isMemLoc());
@@ -774,41 +943,44 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
// offset on PEI::calculateFrameObjectOffsets.
// Arguments are always 32-bit.
unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
+ unsigned StackLoc = VA.getLocMemOffset() + 4;
int FI = MFI->CreateFixedObject(ArgSize, 0, true);
- MBlazeFI->recordLoadArgsFI(FI, -(ArgSize+
- (FirstStackArgLoc + VA.getLocMemOffset())));
+ MBlazeFI->recordLoadArgsFI(FI, -StackLoc);
+ MBlazeFI->recordLiveIn(FI);
// Create load nodes to retrieve arguments from the stack
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0,
+ InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0));
}
}
// To meet ABI, when VARARGS are passed on registers, the registers
// must have their values written to the caller stack frame. If the last
- // argument was placed in the stack, there's no need to save any register.
+ // argument was placed in the stack, there's no need to save any register.
if ((isVarArg) && ArgRegEnd) {
if (StackPtr.getNode() == 0)
StackPtr = DAG.getRegister(StackReg, getPointerTy());
// The last register argument that must be saved is MBlaze::R10
- TargetRegisterClass *RC = MBlaze::CPURegsRegisterClass;
+ TargetRegisterClass *RC = MBlaze::GPRRegisterClass;
unsigned Begin = MBlazeRegisterInfo::getRegisterNumbering(MBlaze::R5);
unsigned Start = MBlazeRegisterInfo::getRegisterNumbering(ArgRegEnd+1);
unsigned End = MBlazeRegisterInfo::getRegisterNumbering(MBlaze::R10);
- unsigned StackLoc = ArgLocs.size()-1 + (Start - Begin);
+ unsigned StackLoc = Start - Begin + 1;
for (; Start <= End; ++Start, ++StackLoc) {
unsigned Reg = MBlazeRegisterInfo::getRegisterFromNumbering(Start);
- unsigned LiveReg = MF.addLiveIn(Reg, RC);
+ unsigned LiveReg = MF.addLiveIn(Reg, RC, dl);
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, LiveReg, MVT::i32);
int FI = MFI->CreateFixedObject(4, 0, true);
- MBlazeFI->recordStoreVarArgsFI(FI, -(4+(StackLoc*4)));
+ MBlazeFI->recordStoreVarArgsFI(FI, -(StackLoc*4));
SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy());
- OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff, NULL, 0,
+ OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff,
+ MachinePointerInfo(),
false, false, 0));
// Record the frame index of the first variable argument
@@ -818,7 +990,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
}
}
- // All stores are grouped in one node to allow the matching between
+ // All stores are grouped in one node to allow the matching between
// the size of Ins and InVals. This only happens when on varg functions
if (!OutChains.empty()) {
OutChains.push_back(Chain);
@@ -872,13 +1044,18 @@ LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Flag = Chain.getValue(1);
}
- // Return on MBlaze is always a "rtsd R15, 8"
+ // If this function is using the interrupt_handler calling convention
+ // then use "rtid r14, 0" otherwise use "rtsd r15, 8"
+ unsigned Ret = (CallConv == llvm::CallingConv::MBLAZE_INTR) ? MBlazeISD::IRet
+ : MBlazeISD::Ret;
+ unsigned Reg = (CallConv == llvm::CallingConv::MBLAZE_INTR) ? MBlaze::R14
+ : MBlaze::R15;
+ SDValue DReg = DAG.getRegister(Reg, MVT::i32);
+
if (Flag.getNode())
- return DAG.getNode(MBlazeISD::Ret, dl, MVT::Other,
- Chain, DAG.getRegister(MBlaze::R15, MVT::i32), Flag);
- else // Return Void
- return DAG.getNode(MBlazeISD::Ret, dl, MVT::Other,
- Chain, DAG.getRegister(MBlaze::R15, MVT::i32));
+ return DAG.getNode(Ret, dl, MVT::Other, Chain, DReg, Flag);
+
+ return DAG.getNode(Ret, dl, MVT::Other, Chain, DReg);
}
//===----------------------------------------------------------------------===//
@@ -909,6 +1086,37 @@ getConstraintType(const std::string &Constraint) const
return TargetLowering::getConstraintType(Constraint);
}
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+MBlazeTargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ const Type *type = CallOperandVal->getType();
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+ case 'd':
+ case 'y':
+ if (type->isIntegerTy())
+ weight = CW_Register;
+ break;
+ case 'f':
+ if (type->isFloatTy())
+ weight = CW_Register;
+ break;
+ }
+ return weight;
+}
+
/// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
/// return a list of registers that can be used to satisfy the constraint.
/// This should only be used for C_RegisterClass constraints.
@@ -917,10 +1125,10 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
- return std::make_pair(0U, MBlaze::CPURegsRegisterClass);
+ return std::make_pair(0U, MBlaze::GPRRegisterClass);
case 'f':
if (VT == MVT::f32)
- return std::make_pair(0U, MBlaze::FGR32RegisterClass);
+ return std::make_pair(0U, MBlaze::GPRRegisterClass);
}
}
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
@@ -940,6 +1148,7 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
// GCC MBlaze Constraint Letters
case 'd':
case 'y':
+ case 'f':
return make_vector<unsigned>(
MBlaze::R3, MBlaze::R4, MBlaze::R5, MBlaze::R6,
MBlaze::R7, MBlaze::R9, MBlaze::R10, MBlaze::R11,
@@ -947,15 +1156,6 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
MBlaze::R22, MBlaze::R23, MBlaze::R24, MBlaze::R25,
MBlaze::R26, MBlaze::R27, MBlaze::R28, MBlaze::R29,
MBlaze::R30, MBlaze::R31, 0);
-
- case 'f':
- return make_vector<unsigned>(
- MBlaze::F3, MBlaze::F4, MBlaze::F5, MBlaze::F6,
- MBlaze::F7, MBlaze::F9, MBlaze::F10, MBlaze::F11,
- MBlaze::F12, MBlaze::F19, MBlaze::F20, MBlaze::F21,
- MBlaze::F22, MBlaze::F23, MBlaze::F24, MBlaze::F25,
- MBlaze::F26, MBlaze::F27, MBlaze::F28, MBlaze::F29,
- MBlaze::F30, MBlaze::F31, 0);
}
return std::vector<unsigned>();
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
index 5ec2563..91649bc 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
@@ -15,6 +15,7 @@
#ifndef MBlazeISELLOWERING_H
#define MBlazeISELLOWERING_H
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
#include "MBlaze.h"
@@ -31,6 +32,30 @@ namespace llvm {
GE,
LE
};
+
+ inline static CC getOppositeCondition(CC cc) {
+ switch (cc) {
+ default: llvm_unreachable("Unknown condition code");
+ case EQ: return NE;
+ case NE: return EQ;
+ case GT: return LE;
+ case LT: return GE;
+ case GE: return LT;
+ case LE: return GE;
+ }
+ }
+
+ inline static const char *MBlazeCCToString(CC cc) {
+ switch (cc) {
+ default: llvm_unreachable("Unknown condition code");
+ case EQ: return "eq";
+ case NE: return "ne";
+ case GT: return "gt";
+ case LT: return "lt";
+ case GE: return "ge";
+ case LE: return "le";
+ }
+ }
}
namespace MBlazeISD {
@@ -53,8 +78,11 @@ namespace llvm {
// Integer Compare
ICmp,
- // Return
- Ret
+ // Return from subroutine
+ Ret,
+
+ // Return from interrupt
+ IRet
};
}
@@ -121,6 +149,15 @@ namespace llvm {
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
+ virtual MachineBasicBlock*
+ EmitCustomShift(MachineInstr *MI, MachineBasicBlock *MBB) const;
+
+ virtual MachineBasicBlock*
+ EmitCustomSelect(MachineInstr *MI, MachineBasicBlock *MBB) const;
+
+ virtual MachineBasicBlock*
+ EmitCustomAtomic(MachineInstr *MI, MachineBasicBlock *MBB) const;
+
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
@@ -128,6 +165,11 @@ namespace llvm {
// Inline asm support
ConstraintType getConstraintType(const std::string &Constraint) const;
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
+
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td
index 657b1d4..094de5c 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td
@@ -19,72 +19,72 @@
// Memory Access Instructions
//===----------------------------------------------------------------------===//
class LoadFM<bits<6> op, string instr_asm, PatFrag OpNode> :
- TA<op, 0x000, (outs FGR32:$dst), (ins memrr:$addr),
+ TA<op, 0x000, (outs GPR:$dst), (ins memrr:$addr),
!strconcat(instr_asm, " $dst, $addr"),
- [(set FGR32:$dst, (OpNode xaddr:$addr))], IILoad>;
+ [(set (f32 GPR:$dst), (OpNode xaddr:$addr))], IILoad>;
class LoadFMI<bits<6> op, string instr_asm, PatFrag OpNode> :
- TAI<op, (outs FGR32:$dst), (ins memri:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(set FGR32:$dst, (OpNode iaddr:$addr))], IILoad>;
+ TB<op, (outs GPR:$dst), (ins memri:$addr),
+ !strconcat(instr_asm, " $dst, $addr"),
+ [(set (f32 GPR:$dst), (OpNode iaddr:$addr))], IILoad>;
class StoreFM<bits<6> op, string instr_asm, PatFrag OpNode> :
- TA<op, 0x000, (outs), (ins FGR32:$dst, memrr:$addr),
+ TA<op, 0x000, (outs), (ins GPR:$dst, memrr:$addr),
!strconcat(instr_asm, " $dst, $addr"),
- [(OpNode FGR32:$dst, xaddr:$addr)], IIStore>;
+ [(OpNode (f32 GPR:$dst), xaddr:$addr)], IIStore>;
class StoreFMI<bits<6> op, string instr_asm, PatFrag OpNode> :
- TAI<op, (outs), (ins FGR32:$dst, memrr:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(OpNode FGR32:$dst, iaddr:$addr)], IIStore>;
+ TB<op, (outs), (ins GPR:$dst, memrr:$addr),
+ !strconcat(instr_asm, " $dst, $addr"),
+ [(OpNode (f32 GPR:$dst), iaddr:$addr)], IIStore>;
class ArithF<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode,
InstrItinClass itin> :
- TA<op, flags, (outs FGR32:$dst), (ins FGR32:$b, FGR32:$c),
+ TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
!strconcat(instr_asm, " $dst, $b, $c"),
- [(set FGR32:$dst, (OpNode FGR32:$b, FGR32:$c))], itin>;
+ [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>;
class CmpFN<bits<6> op, bits<11> flags, string instr_asm,
InstrItinClass itin> :
- TA<op, flags, (outs CPURegs:$dst), (ins FGR32:$b, FGR32:$c),
+ TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
!strconcat(instr_asm, " $dst, $b, $c"),
[], itin>;
class ArithFR<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode,
InstrItinClass itin> :
- TA<op, flags, (outs FGR32:$dst), (ins FGR32:$b, FGR32:$c),
- !strconcat(instr_asm, " $dst, $c, $b"),
- [(set FGR32:$dst, (OpNode FGR32:$b, FGR32:$c))], itin>;
-
-class ArithF2<bits<6> op, bits<11> flags, string instr_asm,
- InstrItinClass itin> :
- TF<op, flags, (outs FGR32:$dst), (ins FGR32:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [], itin>;
-
-class ArithIF<bits<6> op, bits<11> flags, string instr_asm,
- InstrItinClass itin> :
- TF<op, flags, (outs FGR32:$dst), (ins CPURegs:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [], itin>;
-
-class ArithFI<bits<6> op, bits<11> flags, string instr_asm,
- InstrItinClass itin> :
- TF<op, flags, (outs CPURegs:$dst), (ins FGR32:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [], itin>;
+ TAR<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
+ !strconcat(instr_asm, " $dst, $c, $b"),
+ [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>;
class LogicF<bits<6> op, string instr_asm> :
- TAI<op, (outs FGR32:$dst), (ins FGR32:$b, FGR32:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [],
- IIAlu>;
+ TB<op, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
+ !strconcat(instr_asm, " $dst, $b, $c"),
+ [], IIAlu>;
class LogicFI<bits<6> op, string instr_asm> :
- TAI<op, (outs FGR32:$dst), (ins FGR32:$b, fimm:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [],
- IIAlu>;
+ TB<op, (outs GPR:$dst), (ins GPR:$b, fimm:$c),
+ !strconcat(instr_asm, " $dst, $b, $c"),
+ [], IIAlu>;
+
+let rb=0 in {
+ class ArithF2<bits<6> op, bits<11> flags, string instr_asm,
+ InstrItinClass itin> :
+ TA<op, flags, (outs GPR:$dst), (ins GPR:$b),
+ !strconcat(instr_asm, " $dst, $b"),
+ [], itin>;
+
+ class ArithIF<bits<6> op, bits<11> flags, string instr_asm,
+ InstrItinClass itin> :
+ TA<op, flags, (outs GPR:$dst), (ins GPR:$b),
+ !strconcat(instr_asm, " $dst, $b"),
+ [], itin>;
+
+ class ArithFI<bits<6> op, bits<11> flags, string instr_asm,
+ InstrItinClass itin> :
+ TA<op, flags, (outs GPR:$dst), (ins GPR:$b),
+ !strconcat(instr_asm, " $dst, $b"),
+ [], itin>;
+}
//===----------------------------------------------------------------------===//
// Pseudo instructions
@@ -94,24 +94,25 @@ class LogicFI<bits<6> op, string instr_asm> :
// FPU Arithmetic Instructions
//===----------------------------------------------------------------------===//
let Predicates=[HasFPU] in {
- def FOR : LogicF<0x28, "or ">;
def FORI : LogicFI<0x28, "ori ">;
def FADD : ArithF<0x16, 0x000, "fadd ", fadd, IIAlu>;
def FRSUB : ArithFR<0x16, 0x080, "frsub ", fsub, IIAlu>;
def FMUL : ArithF<0x16, 0x100, "fmul ", fmul, IIAlu>;
def FDIV : ArithF<0x16, 0x180, "fdiv ", fdiv, IIAlu>;
+}
- def LWF : LoadFM<0x32, "lw ", load>;
- def LWFI : LoadFMI<0x32, "lwi ", load>;
+let Predicates=[HasFPU], isCodeGenOnly=1 in {
+ def LWF : LoadFM<0x32, "lw ", load>;
+ def LWFI : LoadFMI<0x3A, "lwi ", load>;
- def SWF : StoreFM<0x32, "sw ", store>;
- def SWFI : StoreFMI<0x32, "swi ", store>;
+ def SWF : StoreFM<0x36, "sw ", store>;
+ def SWFI : StoreFMI<0x3E, "swi ", store>;
}
let Predicates=[HasFPU,HasSqrt] in {
def FLT : ArithIF<0x16, 0x280, "flt ", IIAlu>;
def FINT : ArithFI<0x16, 0x300, "fint ", IIAlu>;
- def FSQRT : ArithF2<0x16, 0x300, "fsqrt ", IIAlu>;
+ def FSQRT : ArithF2<0x16, 0x380, "fsqrt ", IIAlu>;
}
let isAsCheapAsAMove = 1 in {
@@ -126,98 +127,98 @@ let isAsCheapAsAMove = 1 in {
let usesCustomInserter = 1 in {
- def Select_FCC : MBlazePseudo<(outs FGR32:$dst),
- (ins FGR32:$T, FGR32:$F, CPURegs:$CMP, i32imm:$CC),
+ def Select_FCC : MBlazePseudo<(outs GPR:$dst),
+ (ins GPR:$T, GPR:$F, GPR:$CMP, i32imm:$CC),
"; SELECT_FCC PSEUDO!",
[]>;
}
// Floating point conversions
let Predicates=[HasFPU] in {
- def : Pat<(sint_to_fp CPURegs:$V), (FLT CPURegs:$V)>;
- def : Pat<(fp_to_sint FGR32:$V), (FINT FGR32:$V)>;
- def : Pat<(fsqrt FGR32:$V), (FSQRT FGR32:$V)>;
+ def : Pat<(sint_to_fp GPR:$V), (FLT GPR:$V)>;
+ def : Pat<(fp_to_sint GPR:$V), (FINT GPR:$V)>;
+ def : Pat<(fsqrt GPR:$V), (FSQRT GPR:$V)>;
}
// SET_CC operations
let Predicates=[HasFPU] in {
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETEQ),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_EQ FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETNE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_EQ FGR32:$L, FGR32:$R), 1)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETOEQ),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_EQ FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETONE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (XOR (FCMP_UN FGR32:$L, FGR32:$R),
- (FCMP_EQ FGR32:$L, FGR32:$R)), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETONE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (OR (FCMP_UN FGR32:$L, FGR32:$R),
- (FCMP_EQ FGR32:$L, FGR32:$R)), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETGT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_GT FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETLT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_LT FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETGE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_GE FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETLE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_LE FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETOGT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_GT FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETOLT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_LT FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETOGE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_GE FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETOLE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_LE FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETUEQ),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (OR (FCMP_UN FGR32:$L, FGR32:$R),
- (FCMP_EQ FGR32:$L, FGR32:$R)), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETUNE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_NE FGR32:$L, FGR32:$R), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETUGT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (OR (FCMP_UN FGR32:$L, FGR32:$R),
- (FCMP_GT FGR32:$L, FGR32:$R)), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETULT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (OR (FCMP_UN FGR32:$L, FGR32:$R),
- (FCMP_LT FGR32:$L, FGR32:$R)), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETUGE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (OR (FCMP_UN FGR32:$L, FGR32:$R),
- (FCMP_GE FGR32:$L, FGR32:$R)), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETULE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (OR (FCMP_UN FGR32:$L, FGR32:$R),
- (FCMP_LE FGR32:$L, FGR32:$R)), 2)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETO),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_UN FGR32:$L, FGR32:$R), 1)>;
- def : Pat<(setcc FGR32:$L, FGR32:$R, SETUO),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (FCMP_UN FGR32:$L, FGR32:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETEQ),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_EQ GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETNE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_EQ GPR:$L, GPR:$R), 1)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOEQ),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_EQ GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETONE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (XOR (FCMP_UN GPR:$L, GPR:$R),
+ (FCMP_EQ GPR:$L, GPR:$R)), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETONE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (OR (FCMP_UN GPR:$L, GPR:$R),
+ (FCMP_EQ GPR:$L, GPR:$R)), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETGT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_GT GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETLT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_LT GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETGE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_GE GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETLE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_LE GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOGT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_GT GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOLT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_LT GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOGE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_GE GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETOLE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_LE GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUEQ),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (OR (FCMP_UN GPR:$L, GPR:$R),
+ (FCMP_EQ GPR:$L, GPR:$R)), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUNE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_NE GPR:$L, GPR:$R), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUGT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (OR (FCMP_UN GPR:$L, GPR:$R),
+ (FCMP_GT GPR:$L, GPR:$R)), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETULT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (OR (FCMP_UN GPR:$L, GPR:$R),
+ (FCMP_LT GPR:$L, GPR:$R)), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUGE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (OR (FCMP_UN GPR:$L, GPR:$R),
+ (FCMP_GE GPR:$L, GPR:$R)), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETULE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (OR (FCMP_UN GPR:$L, GPR:$R),
+ (FCMP_LE GPR:$L, GPR:$R)), 2)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETO),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_UN GPR:$L, GPR:$R), 1)>;
+ def : Pat<(setcc (f32 GPR:$L), (f32 GPR:$R), SETUO),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (FCMP_UN GPR:$L, GPR:$R), 2)>;
}
// SELECT operations
-def : Pat<(select CPURegs:$C, FGR32:$T, FGR32:$F),
- (Select_FCC FGR32:$T, FGR32:$F, CPURegs:$C, 2)>;
+def : Pat<(select (i32 GPR:$C), (f32 GPR:$T), (f32 GPR:$F)),
+ (Select_FCC GPR:$T, GPR:$F, GPR:$C, 2)>;
//===----------------------------------------------------------------------===//
// Patterns for Floating Point Instructions
//===----------------------------------------------------------------------===//
-def : Pat<(f32 fpimm:$imm), (FORI F0, fpimm:$imm)>;
+def : Pat<(f32 fpimm:$imm), (FORI (i32 R0), fpimm:$imm)>;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td
index 5158411..3209845 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td
@@ -10,144 +10,220 @@
//===----------------------------------------------------------------------===//
// FSL Instruction Formats
//===----------------------------------------------------------------------===//
-class FSLGetD<bits<6> op, bits<11> flags, string instr_asm, Intrinsic OpNode> :
- TA<op, flags, (outs CPURegs:$dst), (ins CPURegs:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b))], IIAlu>;
-
-class FSLGet<bits<6> op, string instr_asm, Intrinsic OpNode> :
- TAI<op, (outs CPURegs:$dst), (ins fslimm:$b),
- !strconcat(instr_asm, " $dst, $b"),
- [(set CPURegs:$dst, (OpNode immZExt4:$b))], IIAlu>;
-
-class FSLPutD<bits<6> op, bits<11> flags, string instr_asm, Intrinsic OpNode> :
- TA<op, flags, (outs), (ins CPURegs:$v, CPURegs:$b),
- !strconcat(instr_asm, " $v, $b"),
- [(OpNode CPURegs:$v, CPURegs:$b)], IIAlu>;
-
-class FSLPut<bits<6> op, string instr_asm, Intrinsic OpNode> :
- TAI<op, (outs), (ins CPURegs:$v, fslimm:$b),
- !strconcat(instr_asm, " $v, $b"),
- [(OpNode CPURegs:$v, immZExt4:$b)], IIAlu>;
-
-class FSLPutTD<bits<6> op, bits<11> flags, string instr_asm, Intrinsic OpNode> :
- TA<op, flags, (outs), (ins CPURegs:$b),
- !strconcat(instr_asm, " $b"),
- [(OpNode CPURegs:$b)], IIAlu>;
-
-class FSLPutT<bits<6> op, string instr_asm, Intrinsic OpNode> :
- TAI<op, (outs), (ins fslimm:$b),
- !strconcat(instr_asm, " $b"),
- [(OpNode immZExt4:$b)], IIAlu>;
+class FSLGet<bits<6> op, bits<5> flags, string instr_asm, Intrinsic OpNode> :
+ MBlazeInst<op, FRCX, (outs GPR:$dst), (ins fslimm:$b),
+ !strconcat(instr_asm, " $dst, $b"),
+ [(set GPR:$dst, (OpNode immZExt4:$b))],IIAlu>
+{
+ bits<5> rd;
+ bits<4> fslno;
+
+ let Inst{6-10} = rd;
+ let Inst{11-15} = 0x0;
+ let Inst{16} = 0x0;
+ let Inst{17-21} = flags; // NCTAE
+ let Inst{22-27} = 0x0;
+ let Inst{28-31} = fslno;
+}
+
+class FSLGetD<bits<6> op, bits<5> flags, string instr_asm, Intrinsic OpNode> :
+ MBlazeInst<op, FRCR, (outs GPR:$dst), (ins GPR:$b),
+ !strconcat(instr_asm, " $dst, $b"),
+ [(set GPR:$dst, (OpNode GPR:$b))], IIAlu>
+{
+ bits<5> rd;
+ bits<5> rb;
+
+ let Inst{6-10} = rd;
+ let Inst{11-15} = 0x0;
+ let Inst{16-20} = rb;
+ let Inst{21} = 0x0;
+ let Inst{22-26} = flags; // NCTAE
+ let Inst{27-31} = 0x0;
+}
+
+class FSLPut<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> :
+ MBlazeInst<op, FCRCX, (outs), (ins GPR:$v, fslimm:$b),
+ !strconcat(instr_asm, " $v, $b"),
+ [(OpNode GPR:$v, immZExt4:$b)], IIAlu>
+{
+ bits<5> ra;
+ bits<4> fslno;
+
+ let Inst{6-10} = 0x0;
+ let Inst{11-15} = ra;
+ let Inst{16} = 0x1;
+ let Inst{17-20} = flags; // NCTA
+ let Inst{21-27} = 0x0;
+ let Inst{28-31} = fslno;
+}
+
+class FSLPutD<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> :
+ MBlazeInst<op, FCRR, (outs), (ins GPR:$v, GPR:$b),
+ !strconcat(instr_asm, " $v, $b"),
+ [(OpNode GPR:$v, GPR:$b)], IIAlu>
+{
+ bits<5> ra;
+ bits<5> rb;
+
+ let Inst{6-10} = 0x0;
+ let Inst{11-15} = ra;
+ let Inst{16-20} = rb;
+ let Inst{21} = 0x1;
+ let Inst{22-25} = flags; // NCTA
+ let Inst{26-31} = 0x0;
+}
+
+class FSLPutT<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> :
+ MBlazeInst<op, FCX, (outs), (ins fslimm:$b),
+ !strconcat(instr_asm, " $b"),
+ [(OpNode immZExt4:$b)], IIAlu>
+{
+ bits<4> fslno;
+
+ let Inst{6-10} = 0x0;
+ let Inst{11-15} = 0x0;
+ let Inst{16} = 0x1;
+ let Inst{17-20} = flags; // NCTA
+ let Inst{21-27} = 0x0;
+ let Inst{28-31} = fslno;
+}
+
+class FSLPutTD<bits<6> op, bits<4> flags, string instr_asm, Intrinsic OpNode> :
+ MBlazeInst<op, FCR, (outs), (ins GPR:$b),
+ !strconcat(instr_asm, " $b"),
+ [(OpNode GPR:$b)], IIAlu>
+{
+ bits<5> rb;
+
+ let Inst{6-10} = 0x0;
+ let Inst{11-15} = 0x0;
+ let Inst{16-20} = rb;
+ let Inst{21} = 0x1;
+ let Inst{22-25} = flags; // NCTA
+ let Inst{26-31} = 0x0;
+}
//===----------------------------------------------------------------------===//
// FSL Get Instructions
//===----------------------------------------------------------------------===//
-def GET : FSLGet<0x1B, "get ", int_mblaze_fsl_get>;
-def AGET : FSLGet<0x1B, "aget ", int_mblaze_fsl_aget>;
-def CGET : FSLGet<0x1B, "cget ", int_mblaze_fsl_cget>;
-def CAGET : FSLGet<0x1B, "caget ", int_mblaze_fsl_caget>;
-def EGET : FSLGet<0x1B, "eget ", int_mblaze_fsl_eget>;
-def EAGET : FSLGet<0x1B, "eaget ", int_mblaze_fsl_eaget>;
-def ECGET : FSLGet<0x1B, "ecget ", int_mblaze_fsl_ecget>;
-def ECAGET : FSLGet<0x1B, "ecaget ", int_mblaze_fsl_ecaget>;
-def NGET : FSLGet<0x1B, "nget ", int_mblaze_fsl_nget>;
-def NAGET : FSLGet<0x1B, "naget ", int_mblaze_fsl_naget>;
-def NCGET : FSLGet<0x1B, "ncget ", int_mblaze_fsl_ncget>;
-def NCAGET : FSLGet<0x1B, "ncaget ", int_mblaze_fsl_ncaget>;
-def NEGET : FSLGet<0x1B, "neget ", int_mblaze_fsl_neget>;
-def NEAGET : FSLGet<0x1B, "neaget ", int_mblaze_fsl_neaget>;
-def NECGET : FSLGet<0x1B, "necget ", int_mblaze_fsl_necget>;
-def NECAGET : FSLGet<0x1B, "necaget ", int_mblaze_fsl_necaget>;
-def TGET : FSLGet<0x1B, "tget ", int_mblaze_fsl_tget>;
-def TAGET : FSLGet<0x1B, "taget ", int_mblaze_fsl_taget>;
-def TCGET : FSLGet<0x1B, "tcget ", int_mblaze_fsl_tcget>;
-def TCAGET : FSLGet<0x1B, "tcaget ", int_mblaze_fsl_tcaget>;
-def TEGET : FSLGet<0x1B, "teget ", int_mblaze_fsl_teget>;
-def TEAGET : FSLGet<0x1B, "teaget ", int_mblaze_fsl_teaget>;
-def TECGET : FSLGet<0x1B, "tecget ", int_mblaze_fsl_tecget>;
-def TECAGET : FSLGet<0x1B, "tecaget ", int_mblaze_fsl_tecaget>;
-def TNGET : FSLGet<0x1B, "tnget ", int_mblaze_fsl_tnget>;
-def TNAGET : FSLGet<0x1B, "tnaget ", int_mblaze_fsl_tnaget>;
-def TNCGET : FSLGet<0x1B, "tncget ", int_mblaze_fsl_tncget>;
-def TNCAGET : FSLGet<0x1B, "tncaget ", int_mblaze_fsl_tncaget>;
-def TNEGET : FSLGet<0x1B, "tneget ", int_mblaze_fsl_tneget>;
-def TNEAGET : FSLGet<0x1B, "tneaget ", int_mblaze_fsl_tneaget>;
-def TNECGET : FSLGet<0x1B, "tnecget ", int_mblaze_fsl_tnecget>;
-def TNECAGET : FSLGet<0x1B, "tnecaget ", int_mblaze_fsl_tnecaget>;
+def GET : FSLGet<0x1B, 0x00, "get ", int_mblaze_fsl_get>;
+def AGET : FSLGet<0x1B, 0x02, "aget ", int_mblaze_fsl_aget>;
+def CGET : FSLGet<0x1B, 0x08, "cget ", int_mblaze_fsl_cget>;
+def CAGET : FSLGet<0x1B, 0x0A, "caget ", int_mblaze_fsl_caget>;
+def EGET : FSLGet<0x1B, 0x01, "eget ", int_mblaze_fsl_eget>;
+def EAGET : FSLGet<0x1B, 0x03, "eaget ", int_mblaze_fsl_eaget>;
+def ECGET : FSLGet<0x1B, 0x09, "ecget ", int_mblaze_fsl_ecget>;
+def ECAGET : FSLGet<0x1B, 0x0B, "ecaget ", int_mblaze_fsl_ecaget>;
+def TGET : FSLGet<0x1B, 0x04, "tget ", int_mblaze_fsl_tget>;
+def TAGET : FSLGet<0x1B, 0x06, "taget ", int_mblaze_fsl_taget>;
+def TCGET : FSLGet<0x1B, 0x0C, "tcget ", int_mblaze_fsl_tcget>;
+def TCAGET : FSLGet<0x1B, 0x0E, "tcaget ", int_mblaze_fsl_tcaget>;
+def TEGET : FSLGet<0x1B, 0x05, "teget ", int_mblaze_fsl_teget>;
+def TEAGET : FSLGet<0x1B, 0x07, "teaget ", int_mblaze_fsl_teaget>;
+def TECGET : FSLGet<0x1B, 0x0D, "tecget ", int_mblaze_fsl_tecget>;
+def TECAGET : FSLGet<0x1B, 0x0F, "tecaget ", int_mblaze_fsl_tecaget>;
+
+let Defs = [CARRY] in {
+ def NGET : FSLGet<0x1B, 0x10, "nget ", int_mblaze_fsl_nget>;
+ def NAGET : FSLGet<0x1B, 0x12, "naget ", int_mblaze_fsl_naget>;
+ def NCGET : FSLGet<0x1B, 0x18, "ncget ", int_mblaze_fsl_ncget>;
+ def NCAGET : FSLGet<0x1B, 0x1A, "ncaget ", int_mblaze_fsl_ncaget>;
+ def NEGET : FSLGet<0x1B, 0x11, "neget ", int_mblaze_fsl_neget>;
+ def NEAGET : FSLGet<0x1B, 0x13, "neaget ", int_mblaze_fsl_neaget>;
+ def NECGET : FSLGet<0x1B, 0x19, "necget ", int_mblaze_fsl_necget>;
+ def NECAGET : FSLGet<0x1B, 0x1B, "necaget ", int_mblaze_fsl_necaget>;
+ def TNGET : FSLGet<0x1B, 0x14, "tnget ", int_mblaze_fsl_tnget>;
+ def TNAGET : FSLGet<0x1B, 0x16, "tnaget ", int_mblaze_fsl_tnaget>;
+ def TNCGET : FSLGet<0x1B, 0x1C, "tncget ", int_mblaze_fsl_tncget>;
+ def TNCAGET : FSLGet<0x1B, 0x1E, "tncaget ", int_mblaze_fsl_tncaget>;
+ def TNEGET : FSLGet<0x1B, 0x15, "tneget ", int_mblaze_fsl_tneget>;
+ def TNEAGET : FSLGet<0x1B, 0x17, "tneaget ", int_mblaze_fsl_tneaget>;
+ def TNECGET : FSLGet<0x1B, 0x1D, "tnecget ", int_mblaze_fsl_tnecget>;
+ def TNECAGET : FSLGet<0x1B, 0x1F, "tnecaget ", int_mblaze_fsl_tnecaget>;
+}
//===----------------------------------------------------------------------===//
// FSL Dynamic Get Instructions
//===----------------------------------------------------------------------===//
-def GETD : FSLGetD<0x1B, 0x00, "getd ", int_mblaze_fsl_get>;
-def AGETD : FSLGetD<0x1B, 0x00, "agetd ", int_mblaze_fsl_aget>;
-def CGETD : FSLGetD<0x1B, 0x00, "cgetd ", int_mblaze_fsl_cget>;
-def CAGETD : FSLGetD<0x1B, 0x00, "cagetd ", int_mblaze_fsl_caget>;
-def EGETD : FSLGetD<0x1B, 0x00, "egetd ", int_mblaze_fsl_eget>;
-def EAGETD : FSLGetD<0x1B, 0x00, "eagetd ", int_mblaze_fsl_eaget>;
-def ECGETD : FSLGetD<0x1B, 0x00, "ecgetd ", int_mblaze_fsl_ecget>;
-def ECAGETD : FSLGetD<0x1B, 0x00, "ecagetd ", int_mblaze_fsl_ecaget>;
-def NGETD : FSLGetD<0x1B, 0x00, "ngetd ", int_mblaze_fsl_nget>;
-def NAGETD : FSLGetD<0x1B, 0x00, "nagetd ", int_mblaze_fsl_naget>;
-def NCGETD : FSLGetD<0x1B, 0x00, "ncgetd ", int_mblaze_fsl_ncget>;
-def NCAGETD : FSLGetD<0x1B, 0x00, "ncagetd ", int_mblaze_fsl_ncaget>;
-def NEGETD : FSLGetD<0x1B, 0x00, "negetd ", int_mblaze_fsl_neget>;
-def NEAGETD : FSLGetD<0x1B, 0x00, "neagetd ", int_mblaze_fsl_neaget>;
-def NECGETD : FSLGetD<0x1B, 0x00, "necgetd ", int_mblaze_fsl_necget>;
-def NECAGETD : FSLGetD<0x1B, 0x00, "necagetd ", int_mblaze_fsl_necaget>;
-def TGETD : FSLGetD<0x1B, 0x00, "tgetd ", int_mblaze_fsl_tget>;
-def TAGETD : FSLGetD<0x1B, 0x00, "tagetd ", int_mblaze_fsl_taget>;
-def TCGETD : FSLGetD<0x1B, 0x00, "tcgetd ", int_mblaze_fsl_tcget>;
-def TCAGETD : FSLGetD<0x1B, 0x00, "tcagetd ", int_mblaze_fsl_tcaget>;
-def TEGETD : FSLGetD<0x1B, 0x00, "tegetd ", int_mblaze_fsl_teget>;
-def TEAGETD : FSLGetD<0x1B, 0x00, "teagetd ", int_mblaze_fsl_teaget>;
-def TECGETD : FSLGetD<0x1B, 0x00, "tecgetd ", int_mblaze_fsl_tecget>;
-def TECAGETD : FSLGetD<0x1B, 0x00, "tecagetd ", int_mblaze_fsl_tecaget>;
-def TNGETD : FSLGetD<0x1B, 0x00, "tngetd ", int_mblaze_fsl_tnget>;
-def TNAGETD : FSLGetD<0x1B, 0x00, "tnagetd ", int_mblaze_fsl_tnaget>;
-def TNCGETD : FSLGetD<0x1B, 0x00, "tncgetd ", int_mblaze_fsl_tncget>;
-def TNCAGETD : FSLGetD<0x1B, 0x00, "tncagetd ", int_mblaze_fsl_tncaget>;
-def TNEGETD : FSLGetD<0x1B, 0x00, "tnegetd ", int_mblaze_fsl_tneget>;
-def TNEAGETD : FSLGetD<0x1B, 0x00, "tneagetd ", int_mblaze_fsl_tneaget>;
-def TNECGETD : FSLGetD<0x1B, 0x00, "tnecgetd ", int_mblaze_fsl_tnecget>;
-def TNECAGETD : FSLGetD<0x1B, 0x00, "tnecagetd", int_mblaze_fsl_tnecaget>;
+def GETD : FSLGetD<0x13, 0x00, "getd ", int_mblaze_fsl_get>;
+def AGETD : FSLGetD<0x13, 0x02, "agetd ", int_mblaze_fsl_aget>;
+def CGETD : FSLGetD<0x13, 0x08, "cgetd ", int_mblaze_fsl_cget>;
+def CAGETD : FSLGetD<0x13, 0x0A, "cagetd ", int_mblaze_fsl_caget>;
+def EGETD : FSLGetD<0x13, 0x01, "egetd ", int_mblaze_fsl_eget>;
+def EAGETD : FSLGetD<0x13, 0x03, "eagetd ", int_mblaze_fsl_eaget>;
+def ECGETD : FSLGetD<0x13, 0x09, "ecgetd ", int_mblaze_fsl_ecget>;
+def ECAGETD : FSLGetD<0x13, 0x0B, "ecagetd ", int_mblaze_fsl_ecaget>;
+def TGETD : FSLGetD<0x13, 0x04, "tgetd ", int_mblaze_fsl_tget>;
+def TAGETD : FSLGetD<0x13, 0x06, "tagetd ", int_mblaze_fsl_taget>;
+def TCGETD : FSLGetD<0x13, 0x0C, "tcgetd ", int_mblaze_fsl_tcget>;
+def TCAGETD : FSLGetD<0x13, 0x0E, "tcagetd ", int_mblaze_fsl_tcaget>;
+def TEGETD : FSLGetD<0x13, 0x05, "tegetd ", int_mblaze_fsl_teget>;
+def TEAGETD : FSLGetD<0x13, 0x07, "teagetd ", int_mblaze_fsl_teaget>;
+def TECGETD : FSLGetD<0x13, 0x0D, "tecgetd ", int_mblaze_fsl_tecget>;
+def TECAGETD : FSLGetD<0x13, 0x0F, "tecagetd ", int_mblaze_fsl_tecaget>;
+
+let Defs = [CARRY] in {
+ def NGETD : FSLGetD<0x13, 0x10, "ngetd ", int_mblaze_fsl_nget>;
+ def NAGETD : FSLGetD<0x13, 0x12, "nagetd ", int_mblaze_fsl_naget>;
+ def NCGETD : FSLGetD<0x13, 0x18, "ncgetd ", int_mblaze_fsl_ncget>;
+ def NCAGETD : FSLGetD<0x13, 0x1A, "ncagetd ", int_mblaze_fsl_ncaget>;
+ def NEGETD : FSLGetD<0x13, 0x11, "negetd ", int_mblaze_fsl_neget>;
+ def NEAGETD : FSLGetD<0x13, 0x13, "neagetd ", int_mblaze_fsl_neaget>;
+ def NECGETD : FSLGetD<0x13, 0x19, "necgetd ", int_mblaze_fsl_necget>;
+ def NECAGETD : FSLGetD<0x13, 0x1B, "necagetd ", int_mblaze_fsl_necaget>;
+ def TNGETD : FSLGetD<0x13, 0x14, "tngetd ", int_mblaze_fsl_tnget>;
+ def TNAGETD : FSLGetD<0x13, 0x16, "tnagetd ", int_mblaze_fsl_tnaget>;
+ def TNCGETD : FSLGetD<0x13, 0x1C, "tncgetd ", int_mblaze_fsl_tncget>;
+ def TNCAGETD : FSLGetD<0x13, 0x1E, "tncagetd ", int_mblaze_fsl_tncaget>;
+ def TNEGETD : FSLGetD<0x13, 0x15, "tnegetd ", int_mblaze_fsl_tneget>;
+ def TNEAGETD : FSLGetD<0x13, 0x17, "tneagetd ", int_mblaze_fsl_tneaget>;
+ def TNECGETD : FSLGetD<0x13, 0x1D, "tnecgetd ", int_mblaze_fsl_tnecget>;
+ def TNECAGETD : FSLGetD<0x13, 0x1F, "tnecagetd", int_mblaze_fsl_tnecaget>;
+}
//===----------------------------------------------------------------------===//
// FSL Put Instructions
//===----------------------------------------------------------------------===//
-def PUT : FSLPut<0x1B, "put ", int_mblaze_fsl_put>;
-def APUT : FSLPut<0x1B, "aput ", int_mblaze_fsl_aput>;
-def CPUT : FSLPut<0x1B, "cput ", int_mblaze_fsl_cput>;
-def CAPUT : FSLPut<0x1B, "caput ", int_mblaze_fsl_caput>;
-def NPUT : FSLPut<0x1B, "nput ", int_mblaze_fsl_nput>;
-def NAPUT : FSLPut<0x1B, "naput ", int_mblaze_fsl_naput>;
-def NCPUT : FSLPut<0x1B, "ncput ", int_mblaze_fsl_ncput>;
-def NCAPUT : FSLPut<0x1B, "ncaput ", int_mblaze_fsl_ncaput>;
-def TPUT : FSLPutT<0x1B, "tput ", int_mblaze_fsl_tput>;
-def TAPUT : FSLPutT<0x1B, "taput ", int_mblaze_fsl_taput>;
-def TCPUT : FSLPutT<0x1B, "tcput ", int_mblaze_fsl_tcput>;
-def TCAPUT : FSLPutT<0x1B, "tcaput ", int_mblaze_fsl_tcaput>;
-def TNPUT : FSLPutT<0x1B, "tnput ", int_mblaze_fsl_tnput>;
-def TNAPUT : FSLPutT<0x1B, "tnaput ", int_mblaze_fsl_tnaput>;
-def TNCPUT : FSLPutT<0x1B, "tncput ", int_mblaze_fsl_tncput>;
-def TNCAPUT : FSLPutT<0x1B, "tncaput ", int_mblaze_fsl_tncaput>;
+def PUT : FSLPut<0x1B, 0x0, "put ", int_mblaze_fsl_put>;
+def APUT : FSLPut<0x1B, 0x1, "aput ", int_mblaze_fsl_aput>;
+def CPUT : FSLPut<0x1B, 0x4, "cput ", int_mblaze_fsl_cput>;
+def CAPUT : FSLPut<0x1B, 0x5, "caput ", int_mblaze_fsl_caput>;
+def TPUT : FSLPutT<0x1B, 0x2, "tput ", int_mblaze_fsl_tput>;
+def TAPUT : FSLPutT<0x1B, 0x3, "taput ", int_mblaze_fsl_taput>;
+def TCPUT : FSLPutT<0x1B, 0x6, "tcput ", int_mblaze_fsl_tcput>;
+def TCAPUT : FSLPutT<0x1B, 0x7, "tcaput ", int_mblaze_fsl_tcaput>;
+
+let Defs = [CARRY] in {
+ def NPUT : FSLPut<0x1B, 0x8, "nput ", int_mblaze_fsl_nput>;
+ def NAPUT : FSLPut<0x1B, 0x9, "naput ", int_mblaze_fsl_naput>;
+ def NCPUT : FSLPut<0x1B, 0xC, "ncput ", int_mblaze_fsl_ncput>;
+ def NCAPUT : FSLPut<0x1B, 0xD, "ncaput ", int_mblaze_fsl_ncaput>;
+ def TNPUT : FSLPutT<0x1B, 0xA, "tnput ", int_mblaze_fsl_tnput>;
+ def TNAPUT : FSLPutT<0x1B, 0xB, "tnaput ", int_mblaze_fsl_tnaput>;
+ def TNCPUT : FSLPutT<0x1B, 0xE, "tncput ", int_mblaze_fsl_tncput>;
+ def TNCAPUT : FSLPutT<0x1B, 0xF, "tncaput ", int_mblaze_fsl_tncaput>;
+}
//===----------------------------------------------------------------------===//
// FSL Dynamic Put Instructions
//===----------------------------------------------------------------------===//
-def PUTD : FSLPutD<0x1B, 0x00, "putd ", int_mblaze_fsl_put>;
-def APUTD : FSLPutD<0x1B, 0x00, "aputd ", int_mblaze_fsl_aput>;
-def CPUTD : FSLPutD<0x1B, 0x00, "cputd ", int_mblaze_fsl_cput>;
-def CAPUTD : FSLPutD<0x1B, 0x00, "caputd ", int_mblaze_fsl_caput>;
-def NPUTD : FSLPutD<0x1B, 0x00, "nputd ", int_mblaze_fsl_nput>;
-def NAPUTD : FSLPutD<0x1B, 0x00, "naputd ", int_mblaze_fsl_naput>;
-def NCPUTD : FSLPutD<0x1B, 0x00, "ncputd ", int_mblaze_fsl_ncput>;
-def NCAPUTD : FSLPutD<0x1B, 0x00, "ncaputd ", int_mblaze_fsl_ncaput>;
-def TPUTD : FSLPutTD<0x1B, 0x00, "tputd ", int_mblaze_fsl_tput>;
-def TAPUTD : FSLPutTD<0x1B, 0x00, "taputd ", int_mblaze_fsl_taput>;
-def TCPUTD : FSLPutTD<0x1B, 0x00, "tcputd ", int_mblaze_fsl_tcput>;
-def TCAPUTD : FSLPutTD<0x1B, 0x00, "tcaputd ", int_mblaze_fsl_tcaput>;
-def TNPUTD : FSLPutTD<0x1B, 0x00, "tnputd ", int_mblaze_fsl_tnput>;
-def TNAPUTD : FSLPutTD<0x1B, 0x00, "tnaputd ", int_mblaze_fsl_tnaput>;
-def TNCPUTD : FSLPutTD<0x1B, 0x00, "tncputd ", int_mblaze_fsl_tncput>;
-def TNCAPUTD : FSLPutTD<0x1B, 0x00, "tncaputd ", int_mblaze_fsl_tncaput>;
+def PUTD : FSLPutD<0x13, 0x0, "putd ", int_mblaze_fsl_put>;
+def APUTD : FSLPutD<0x13, 0x1, "aputd ", int_mblaze_fsl_aput>;
+def CPUTD : FSLPutD<0x13, 0x4, "cputd ", int_mblaze_fsl_cput>;
+def CAPUTD : FSLPutD<0x13, 0x5, "caputd ", int_mblaze_fsl_caput>;
+def TPUTD : FSLPutTD<0x13, 0x2, "tputd ", int_mblaze_fsl_tput>;
+def TAPUTD : FSLPutTD<0x13, 0x3, "taputd ", int_mblaze_fsl_taput>;
+def TCPUTD : FSLPutTD<0x13, 0x6, "tcputd ", int_mblaze_fsl_tcput>;
+def TCAPUTD : FSLPutTD<0x13, 0x7, "tcaputd ", int_mblaze_fsl_tcaput>;
+
+let Defs = [CARRY] in {
+ def NPUTD : FSLPutD<0x13, 0x8, "nputd ", int_mblaze_fsl_nput>;
+ def NAPUTD : FSLPutD<0x13, 0x9, "naputd ", int_mblaze_fsl_naput>;
+ def NCPUTD : FSLPutD<0x13, 0xC, "ncputd ", int_mblaze_fsl_ncput>;
+ def NCAPUTD : FSLPutD<0x13, 0xD, "ncaputd ", int_mblaze_fsl_ncaput>;
+ def TNPUTD : FSLPutTD<0x13, 0xA, "tnputd ", int_mblaze_fsl_tnput>;
+ def TNAPUTD : FSLPutTD<0x13, 0xB, "tnaputd ", int_mblaze_fsl_tnaput>;
+ def TNCPUTD : FSLPutTD<0x13, 0xE, "tncputd ", int_mblaze_fsl_tncput>;
+ def TNCAPUTD : FSLPutTD<0x13, 0xF, "tncaputd ", int_mblaze_fsl_tncaput>;
+}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td
index 28e8e44..d62574d 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td
@@ -7,6 +7,35 @@
//
//===----------------------------------------------------------------------===//
+// Format specifies the encoding used by the instruction. This is part of the
+// ad-hoc solution used to emit machine instruction encodings by our machine
+// code emitter.
+class Format<bits<6> val> {
+ bits<6> Value = val;
+}
+
+def FPseudo : Format<0>;
+def FRRR : Format<1>; // ADD, OR, etc.
+def FRRI : Format<2>; // ADDI, ORI, etc.
+def FCRR : Format<3>; // PUTD, WDC, WIC, BEQ, BNE, BGE, etc.
+def FCRI : Format<4>; // RTID, RTED, RTSD, BEQI, BNEI, BGEI, etc.
+def FRCR : Format<5>; // BRLD, BRALD, GETD
+def FRCI : Format<6>; // BRLID, BRALID, MSRCLR, MSRSET
+def FCCR : Format<7>; // BR, BRA, BRD, etc.
+def FCCI : Format<8>; // IMM, BRI, BRAI, BRID, etc.
+def FRRCI : Format<9>; // BSRLI, BSRAI, BSLLI
+def FRRC : Format<10>; // SEXT8, SEXT16, SRA, SRC, SRL, FLT, FINT, FSQRT
+def FRCX : Format<11>; // GET
+def FRCS : Format<12>; // MFS
+def FCRCS : Format<13>; // MTS
+def FCRCX : Format<14>; // PUT
+def FCX : Format<15>; // TPUT
+def FCR : Format<16>; // TPUTD
+def FRIR : Format<17>; // RSUBI
+def FRRRR : Format<18>; // RSUB, FRSUB
+def FRI : Format<19>; // RSUB, FRSUB
+def FC : Format<20>; // NOP
+
//===----------------------------------------------------------------------===//
// Describe MBlaze instructions format
//
@@ -21,226 +50,155 @@
//===----------------------------------------------------------------------===//
// Generic MBlaze Format
-class MBlazeInst<dag outs, dag ins, string asmstr, list<dag> pattern,
- InstrItinClass itin> : Instruction
-{
- field bits<32> Inst;
-
+class MBlazeInst<bits<6> op, Format form, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin> : Instruction {
let Namespace = "MBlaze";
+ field bits<32> Inst;
- bits<6> opcode;
+ bits<6> opcode = op;
+ Format Form = form;
+ bits<6> FormBits = Form.Value;
// Top 6 bits are the 'opcode' field
- let Inst{0-5} = opcode;
-
+ let Inst{0-5} = opcode;
+
+ // If the instruction is marked as a pseudo, set isCodeGenOnly so that the
+ // assembler and disassmbler ignore it.
+ let isCodeGenOnly = !eq(!cast<string>(form), "FPseudo");
+
dag OutOperandList = outs;
dag InOperandList = ins;
let AsmString = asmstr;
let Pattern = pattern;
let Itinerary = itin;
+
+ // TSFlags layout should be kept in sync with MBlazeInstrInfo.h.
+ let TSFlags{5-0} = FormBits;
}
//===----------------------------------------------------------------------===//
// Pseudo instruction class
//===----------------------------------------------------------------------===//
class MBlazePseudo<dag outs, dag ins, string asmstr, list<dag> pattern>:
- MBlazeInst<outs, ins, asmstr, pattern, IIPseudo>;
+ MBlazeInst<0x0, FPseudo, outs, ins, asmstr, pattern, IIPseudo>;
//===----------------------------------------------------------------------===//
// Type A instruction class in MBlaze : <|opcode|rd|ra|rb|flags|>
//===----------------------------------------------------------------------===//
class TA<bits<6> op, bits<11> flags, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
+ list<dag> pattern, InstrItinClass itin> :
+ MBlazeInst<op,FRRR,outs, ins, asmstr, pattern, itin>
{
bits<5> rd;
bits<5> ra;
bits<5> rb;
- let opcode = op;
-
let Inst{6-10} = rd;
- let Inst{11-15} = ra;
+ let Inst{11-15} = ra;
let Inst{16-20} = rb;
let Inst{21-31} = flags;
}
-class TAI<bits<6> op, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<5> rd;
- bits<5> ra;
- bits<16> imm16;
-
- let opcode = op;
-
- let Inst{6-10} = rd;
- let Inst{11-15} = ra;
- let Inst{16-31} = imm16;
-}
-
-class TIMM<bits<6> op, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<5> ra;
- bits<16> imm16;
-
- let opcode = op;
-
- let Inst{6-15} = 0;
- let Inst{16-31} = imm16;
-}
-
-class TADDR<bits<6> op, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<26> addr;
-
- let opcode = op;
-
- let Inst{6-31} = addr;
-}
-
//===----------------------------------------------------------------------===//
// Type B instruction class in MBlaze : <|opcode|rd|ra|immediate|>
//===----------------------------------------------------------------------===//
class TB<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
- InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
+ InstrItinClass itin> :
+ MBlazeInst<op, FRRI, outs, ins, asmstr, pattern, itin>
{
bits<5> rd;
bits<5> ra;
bits<16> imm16;
- let opcode = op;
-
let Inst{6-10} = rd;
- let Inst{11-15} = ra;
+ let Inst{11-15} = ra;
let Inst{16-31} = imm16;
}
//===----------------------------------------------------------------------===//
-// Float instruction class in MBlaze : <|opcode|rd|ra|flags|>
+// Type A instruction class in MBlaze but with the operands reversed
+// in the LLVM DAG : <|opcode|rd|ra|rb|flags|>
//===----------------------------------------------------------------------===//
-class TF<bits<6> op, bits<11> flags, dag outs, dag ins, string asmstr,
- list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
+class TAR<bits<6> op, bits<11> flags, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin> :
+ TA<op, flags, outs, ins, asmstr, pattern, itin>
{
- bits<5> rd;
- bits<5> ra;
+ bits<5> rrd;
+ bits<5> rrb;
+ bits<5> rra;
- let opcode = op;
+ let Form = FRRRR;
- let Inst{6-10} = rd;
- let Inst{11-15} = ra;
- let Inst{16-20} = 0;
- let Inst{21-31} = flags;
+ let rd = rrd;
+ let ra = rra;
+ let rb = rrb;
}
//===----------------------------------------------------------------------===//
-// Branch instruction class in MBlaze : <|opcode|rd|br|ra|flags|>
+// Type B instruction class in MBlaze but with the operands reversed in
+// the LLVM DAG : <|opcode|rd|ra|immediate|>
//===----------------------------------------------------------------------===//
-
-class TBR<bits<6> op, bits<5> br, bits<11> flags, dag outs, dag ins,
- string asmstr, list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<5> ra;
-
- let opcode = op;
-
- let Inst{6-10} = 0;
- let Inst{11-15} = br;
- let Inst{16-20} = ra;
- let Inst{21-31} = flags;
-}
-
-class TBRC<bits<6> op, bits<5> br, bits<11> flags, dag outs, dag ins,
- string asmstr, list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<5> ra;
- bits<5> rb;
-
- let opcode = op;
-
- let Inst{6-10} = br;
- let Inst{11-15} = ra;
- let Inst{16-20} = rb;
- let Inst{21-31} = flags;
-}
-
-class TBRL<bits<6> op, bits<5> br, bits<11> flags, dag outs, dag ins,
- string asmstr, list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<5> ra;
-
- let opcode = op;
-
- let Inst{6-10} = 0xF;
- let Inst{11-15} = br;
- let Inst{16-20} = ra;
- let Inst{21-31} = flags;
+class TBR<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin> :
+ TB<op, outs, ins, asmstr, pattern, itin> {
+ bits<5> rrd;
+ bits<16> rimm16;
+ bits<5> rra;
+
+ let Form = FRIR;
+
+ let rd = rrd;
+ let ra = rra;
+ let imm16 = rimm16;
}
-class TBRI<bits<6> op, bits<5> br, dag outs, dag ins,
- string asmstr, list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<16> imm16;
-
- let opcode = op;
-
- let Inst{6-10} = 0;
- let Inst{11-15} = br;
- let Inst{16-31} = imm16;
-}
-
-class TBRLI<bits<6> op, bits<5> br, dag outs, dag ins,
- string asmstr, list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<16> imm16;
-
- let opcode = op;
+//===----------------------------------------------------------------------===//
+// Shift immediate instruction class in MBlaze : <|opcode|rd|ra|immediate|>
+//===----------------------------------------------------------------------===//
+class SHT<bits<6> op, bits<2> flags, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin> :
+ MBlazeInst<op, FRRI, outs, ins, asmstr, pattern, itin> {
+ bits<5> rd;
+ bits<5> ra;
+ bits<5> imm5;
- let Inst{6-10} = 0xF;
- let Inst{11-15} = br;
- let Inst{16-31} = imm16;
+ let Inst{6-10} = rd;
+ let Inst{11-15} = ra;
+ let Inst{16-20} = 0x0;
+ let Inst{21-22} = flags;
+ let Inst{23-26} = 0x0;
+ let Inst{27-31} = imm5;
}
-class TBRCI<bits<6> op, bits<5> br, dag outs, dag ins,
- string asmstr, list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<5> ra;
- bits<16> imm16;
-
- let opcode = op;
+//===----------------------------------------------------------------------===//
+// Special instruction class in MBlaze : <|opcode|rd|imm14|>
+//===----------------------------------------------------------------------===//
+class SPC<bits<6> op, bits<2> flags, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin> :
+ MBlazeInst<op, FRI, outs, ins, asmstr, pattern, itin> {
+ bits<5> rd;
+ bits<14> imm14;
- let Inst{6-10} = br;
- let Inst{11-15} = ra;
- let Inst{16-31} = imm16;
+ let Inst{6-10} = rd;
+ let Inst{11-15} = 0x0;
+ let Inst{16-17} = flags;
+ let Inst{18-31} = imm14;
}
-class TRET<bits<6> op, dag outs, dag ins,
- string asmstr, list<dag> pattern, InstrItinClass itin> :
- MBlazeInst<outs, ins, asmstr, pattern, itin>
-{
- bits<5> ra;
- bits<16> imm16;
-
- let opcode = op;
+//===----------------------------------------------------------------------===//
+// MSR instruction class in MBlaze : <|opcode|rd|imm15|>
+//===----------------------------------------------------------------------===//
+class MSR<bits<6> op, bits<6> flags, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin> :
+ MBlazeInst<op, FRI, outs, ins, asmstr, pattern, itin> {
+ bits<5> rd;
+ bits<15> imm15;
- let Inst{6-10} = 0x10;
- let Inst{11-15} = ra;
- let Inst{16-31} = imm16;
+ let Inst{6-10} = rd;
+ let Inst{11-16} = flags;
+ let Inst{17-31} = imm15;
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
index b590c09..b353dcd 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
@@ -38,10 +38,10 @@ static bool isZeroImm(const MachineOperand &op) {
unsigned MBlazeInstrInfo::
isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const {
if (MI->getOpcode() == MBlaze::LWI) {
- if ((MI->getOperand(2).isFI()) && // is a stack slot
- (MI->getOperand(1).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(1)))) {
- FrameIndex = MI->getOperand(2).getIndex();
+ if ((MI->getOperand(1).isFI()) && // is a stack slot
+ (MI->getOperand(2).isImm()) && // the imm is zero
+ (isZeroImm(MI->getOperand(2)))) {
+ FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
}
@@ -57,10 +57,10 @@ isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const {
unsigned MBlazeInstrInfo::
isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const {
if (MI->getOpcode() == MBlaze::SWI) {
- if ((MI->getOperand(2).isFI()) && // is a stack slot
- (MI->getOperand(1).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(1)))) {
- FrameIndex = MI->getOperand(2).getIndex();
+ if ((MI->getOperand(1).isFI()) && // is a stack slot
+ (MI->getOperand(2).isImm()) && // the imm is zero
+ (isZeroImm(MI->getOperand(2)))) {
+ FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
}
@@ -80,7 +80,7 @@ copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
- llvm::BuildMI(MBB, I, DL, get(MBlaze::ADD), DestReg)
+ llvm::BuildMI(MBB, I, DL, get(MBlaze::ADDK), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc)).addReg(MBlaze::R0);
}
@@ -91,7 +91,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const TargetRegisterInfo *TRI) const {
DebugLoc DL;
BuildMI(MBB, I, DL, get(MBlaze::SWI)).addReg(SrcReg,getKillRegState(isKill))
- .addImm(0).addFrameIndex(FI);
+ .addFrameIndex(FI).addImm(0); //.addFrameIndex(FI);
}
void MBlazeInstrInfo::
@@ -101,21 +101,168 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const TargetRegisterInfo *TRI) const {
DebugLoc DL;
BuildMI(MBB, I, DL, get(MBlaze::LWI), DestReg)
- .addImm(0).addFrameIndex(FI);
+ .addFrameIndex(FI).addImm(0); //.addFrameIndex(FI);
}
//===----------------------------------------------------------------------===//
// Branch Analysis
//===----------------------------------------------------------------------===//
+bool MBlazeInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ // If the block has no terminators, it just falls into the block after it.
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin())
+ return false;
+ --I;
+ while (I->isDebugValue()) {
+ if (I == MBB.begin())
+ return false;
+ --I;
+ }
+ if (!isUnpredicatedTerminator(I))
+ return false;
+
+ // Get the last instruction in the block.
+ MachineInstr *LastInst = I;
+
+ // If there is only one terminator instruction, process it.
+ unsigned LastOpc = LastInst->getOpcode();
+ if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ if (MBlaze::isUncondBranchOpcode(LastOpc)) {
+ TBB = LastInst->getOperand(0).getMBB();
+ return false;
+ }
+ if (MBlaze::isCondBranchOpcode(LastOpc)) {
+ // Block ends with fall-through condbranch.
+ TBB = LastInst->getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
+ Cond.push_back(LastInst->getOperand(0));
+ return false;
+ }
+ // Otherwise, don't know what this is.
+ return true;
+ }
+
+ // Get the instruction before it if it's a terminator.
+ MachineInstr *SecondLastInst = I;
+
+ // If there are three terminators, we don't know what sort of block this is.
+ if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
+ return true;
+
+ // If the block ends with something like BEQID then BRID, handle it.
+ if (MBlaze::isCondBranchOpcode(SecondLastInst->getOpcode()) &&
+ MBlaze::isUncondBranchOpcode(LastInst->getOpcode())) {
+ TBB = SecondLastInst->getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
+ Cond.push_back(SecondLastInst->getOperand(0));
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
+ }
+
+ // If the block ends with two unconditional branches, handle it.
+ // The second one is not executed, so remove it.
+ if (MBlaze::isUncondBranchOpcode(SecondLastInst->getOpcode()) &&
+ MBlaze::isUncondBranchOpcode(LastInst->getOpcode())) {
+ TBB = SecondLastInst->getOperand(0).getMBB();
+ I = LastInst;
+ if (AllowModify)
+ I->eraseFromParent();
+ return false;
+ }
+
+ // Otherwise, can't handle this.
+ return true;
+}
+
unsigned MBlazeInstrInfo::
InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const {
- // Can only insert uncond branches so far.
- assert(Cond.empty() && !FBB && TBB && "Can only handle uncond branches!");
- BuildMI(&MBB, DL, get(MBlaze::BRI)).addMBB(TBB);
- return 1;
+ // Shouldn't be a fall through.
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+ assert((Cond.size() == 2 || Cond.size() == 0) &&
+ "MBlaze branch conditions have two components!");
+
+ unsigned Opc = MBlaze::BRID;
+ if (!Cond.empty())
+ Opc = (unsigned)Cond[0].getImm();
+
+ if (FBB == 0) {
+ if (Cond.empty()) // Unconditional branch
+ BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
+ else // Conditional branch
+ BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg()).addMBB(TBB);
+ return 1;
+ }
+
+ BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg()).addMBB(TBB);
+ BuildMI(&MBB, DL, get(MBlaze::BRID)).addMBB(FBB);
+ return 2;
+}
+
+unsigned MBlazeInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin()) return 0;
+ --I;
+ while (I->isDebugValue()) {
+ if (I == MBB.begin())
+ return 0;
+ --I;
+ }
+
+ if (!MBlaze::isUncondBranchOpcode(I->getOpcode()) &&
+ !MBlaze::isCondBranchOpcode(I->getOpcode()))
+ return 0;
+
+ // Remove the branch.
+ I->eraseFromParent();
+
+ I = MBB.end();
+
+ if (I == MBB.begin()) return 1;
+ --I;
+ if (!MBlaze::isCondBranchOpcode(I->getOpcode()))
+ return 1;
+
+ // Remove the branch.
+ I->eraseFromParent();
+ return 2;
+}
+
+bool MBlazeInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+ assert(Cond.size() == 2 && "Invalid MBlaze branch opcode!");
+ switch (Cond[0].getImm()) {
+ default: return true;
+ case MBlaze::BEQ: Cond[0].setImm(MBlaze::BNE); return false;
+ case MBlaze::BNE: Cond[0].setImm(MBlaze::BEQ); return false;
+ case MBlaze::BGT: Cond[0].setImm(MBlaze::BLE); return false;
+ case MBlaze::BGE: Cond[0].setImm(MBlaze::BLT); return false;
+ case MBlaze::BLT: Cond[0].setImm(MBlaze::BGE); return false;
+ case MBlaze::BLE: Cond[0].setImm(MBlaze::BGT); return false;
+ case MBlaze::BEQI: Cond[0].setImm(MBlaze::BNEI); return false;
+ case MBlaze::BNEI: Cond[0].setImm(MBlaze::BEQI); return false;
+ case MBlaze::BGTI: Cond[0].setImm(MBlaze::BLEI); return false;
+ case MBlaze::BGEI: Cond[0].setImm(MBlaze::BLTI); return false;
+ case MBlaze::BLTI: Cond[0].setImm(MBlaze::BGEI); return false;
+ case MBlaze::BLEI: Cond[0].setImm(MBlaze::BGTI); return false;
+ case MBlaze::BEQD: Cond[0].setImm(MBlaze::BNED); return false;
+ case MBlaze::BNED: Cond[0].setImm(MBlaze::BEQD); return false;
+ case MBlaze::BGTD: Cond[0].setImm(MBlaze::BLED); return false;
+ case MBlaze::BGED: Cond[0].setImm(MBlaze::BLTD); return false;
+ case MBlaze::BLTD: Cond[0].setImm(MBlaze::BGED); return false;
+ case MBlaze::BLED: Cond[0].setImm(MBlaze::BGTD); return false;
+ case MBlaze::BEQID: Cond[0].setImm(MBlaze::BNEID); return false;
+ case MBlaze::BNEID: Cond[0].setImm(MBlaze::BEQID); return false;
+ case MBlaze::BGTID: Cond[0].setImm(MBlaze::BLEID); return false;
+ case MBlaze::BGEID: Cond[0].setImm(MBlaze::BLTID); return false;
+ case MBlaze::BLTID: Cond[0].setImm(MBlaze::BGEID); return false;
+ case MBlaze::BLEID: Cond[0].setImm(MBlaze::BGTID); return false;
+ }
}
/// getGlobalBaseReg - Return a virtual register initialized with the
@@ -134,7 +281,7 @@ unsigned MBlazeInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
- GlobalBaseReg = RegInfo.createVirtualRegister(MBlaze::CPURegsRegisterClass);
+ GlobalBaseReg = RegInfo.createVirtualRegister(MBlaze::GPRRegisterClass);
BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
GlobalBaseReg).addReg(MBlaze::R20);
RegInfo.addLiveIn(MBlaze::R20);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h
index b3dba0e..b7300c1 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h
@@ -73,59 +73,92 @@ namespace MBlaze {
FCOND_GT,
// Only integer conditions
- COND_E,
- COND_GZ,
- COND_GEZ,
- COND_LZ,
- COND_LEZ,
+ COND_EQ,
+ COND_GT,
+ COND_GE,
+ COND_LT,
+ COND_LE,
COND_NE,
COND_INVALID
};
// Turn condition code into conditional branch opcode.
- unsigned GetCondBranchFromCond(CondCode CC);
+ inline static unsigned GetCondBranchFromCond(CondCode CC) {
+ switch (CC) {
+ default: llvm_unreachable("Unknown condition code");
+ case COND_EQ: return MBlaze::BEQID;
+ case COND_NE: return MBlaze::BNEID;
+ case COND_GT: return MBlaze::BGTID;
+ case COND_GE: return MBlaze::BGEID;
+ case COND_LT: return MBlaze::BLTID;
+ case COND_LE: return MBlaze::BLEID;
+ }
+ }
/// GetOppositeBranchCondition - Return the inverse of the specified cond,
/// e.g. turning COND_E to COND_NE.
- CondCode GetOppositeBranchCondition(MBlaze::CondCode CC);
+ // CondCode GetOppositeBranchCondition(MBlaze::CondCode CC);
/// MBlazeCCToString - Map each FP condition code to its string
- inline static const char *MBlazeFCCToString(MBlaze::CondCode CC)
- {
+ inline static const char *MBlazeFCCToString(MBlaze::CondCode CC) {
switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case FCOND_F:
- case FCOND_T: return "f";
- case FCOND_UN:
- case FCOND_OR: return "un";
- case FCOND_EQ:
- case FCOND_NEQ: return "eq";
- case FCOND_UEQ:
- case FCOND_OGL: return "ueq";
- case FCOND_OLT:
- case FCOND_UGE: return "olt";
- case FCOND_ULT:
- case FCOND_OGE: return "ult";
- case FCOND_OLE:
- case FCOND_UGT: return "ole";
- case FCOND_ULE:
- case FCOND_OGT: return "ule";
- case FCOND_SF:
- case FCOND_ST: return "sf";
- case FCOND_NGLE:
- case FCOND_GLE: return "ngle";
- case FCOND_SEQ:
- case FCOND_SNE: return "seq";
- case FCOND_NGL:
- case FCOND_GL: return "ngl";
- case FCOND_LT:
- case FCOND_NLT: return "lt";
- case FCOND_NGE:
- case FCOND_GE: return "ge";
- case FCOND_LE:
- case FCOND_NLE: return "nle";
- case FCOND_NGT:
- case FCOND_GT: return "gt";
+ default: llvm_unreachable("Unknown condition code");
+ case FCOND_F:
+ case FCOND_T: return "f";
+ case FCOND_UN:
+ case FCOND_OR: return "un";
+ case FCOND_EQ:
+ case FCOND_NEQ: return "eq";
+ case FCOND_UEQ:
+ case FCOND_OGL: return "ueq";
+ case FCOND_OLT:
+ case FCOND_UGE: return "olt";
+ case FCOND_ULT:
+ case FCOND_OGE: return "ult";
+ case FCOND_OLE:
+ case FCOND_UGT: return "ole";
+ case FCOND_ULE:
+ case FCOND_OGT: return "ule";
+ case FCOND_SF:
+ case FCOND_ST: return "sf";
+ case FCOND_NGLE:
+ case FCOND_GLE: return "ngle";
+ case FCOND_SEQ:
+ case FCOND_SNE: return "seq";
+ case FCOND_NGL:
+ case FCOND_GL: return "ngl";
+ case FCOND_LT:
+ case FCOND_NLT: return "lt";
+ case FCOND_NGE:
+ case FCOND_GE: return "ge";
+ case FCOND_LE:
+ case FCOND_NLE: return "nle";
+ case FCOND_NGT:
+ case FCOND_GT: return "gt";
+ }
+ }
+
+ inline static bool isUncondBranchOpcode(int Opc) {
+ switch (Opc) {
+ default: return false;
+ case MBlaze::BRI:
+ case MBlaze::BRAI:
+ case MBlaze::BRID:
+ case MBlaze::BRAID:
+ return true;
+ }
+ }
+
+ inline static bool isCondBranchOpcode(int Opc) {
+ switch (Opc) {
+ default: return false;
+ case MBlaze::BEQI: case MBlaze::BEQID:
+ case MBlaze::BNEI: case MBlaze::BNEID:
+ case MBlaze::BGTI: case MBlaze::BGTID:
+ case MBlaze::BGEI: case MBlaze::BGEID:
+ case MBlaze::BLTI: case MBlaze::BLTID:
+ case MBlaze::BLEI: case MBlaze::BLEID:
+ return true;
}
}
}
@@ -134,29 +167,54 @@ namespace MBlaze {
/// instruction info tracks.
///
namespace MBlazeII {
- /// Target Operand Flag enum.
- enum TOF {
+ enum {
+ // PseudoFrm - This represents an instruction that is a pseudo instruction
+ // or one that has not been implemented yet. It is illegal to code generate
+ // it, but tolerated for intermediate implementation stages.
+ FPseudo = 0,
+ FRRR,
+ FRRI,
+ FCRR,
+ FCRI,
+ FRCR,
+ FRCI,
+ FCCR,
+ FCCI,
+ FRRCI,
+ FRRC,
+ FRCX,
+ FRCS,
+ FCRCS,
+ FCRCX,
+ FCX,
+ FCR,
+ FRIR,
+ FRRRR,
+ FRI,
+ FC,
+ FormMask = 63
+
//===------------------------------------------------------------------===//
// MBlaze Specific MachineOperand flags.
- MO_NO_FLAG,
+ // MO_NO_FLAG,
/// MO_GOT - Represents the offset into the global offset table at which
/// the address the relocation entry symbol resides during execution.
- MO_GOT,
+ // MO_GOT,
/// MO_GOT_CALL - Represents the offset into the global offset table at
/// which the address of a call site relocation entry symbol resides
/// during execution. This is different from the above since this flag
/// can only be present in call instructions.
- MO_GOT_CALL,
+ // MO_GOT_CALL,
/// MO_GPREL - Represents the offset from the current gp value to be used
/// for the relocatable object file being produced.
- MO_GPREL,
+ // MO_GPREL,
/// MO_ABS_HILO - Represents the hi or low part of an absolute symbol
/// address.
- MO_ABS_HILO
+ // MO_ABS_HILO
};
}
@@ -190,10 +248,20 @@ public:
int &FrameIndex) const;
/// Branch Analysis
+ virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const;
+ virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
+
+ virtual bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
+ const;
+
+
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td
index e5d1534..7b8f70a 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td
@@ -13,35 +13,36 @@
include "MBlazeInstrFormats.td"
//===----------------------------------------------------------------------===//
-// MBlaze profiles and nodes
+// MBlaze type profiles
//===----------------------------------------------------------------------===//
+
+// def SDTMBlazeSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>]>;
def SDT_MBlazeRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDT_MBlazeJmpLink : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def SDT_MBlazeIRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_MBlazeJmpLink : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
+def SDT_MBCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
+def SDT_MBCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-// Call
-def MBlazeJmpLink : SDNode<"MBlazeISD::JmpLink",SDT_MBlazeJmpLink,
- [SDNPHasChain,SDNPOptInFlag,SDNPOutFlag]>;
+//===----------------------------------------------------------------------===//
+// MBlaze specific nodes
+//===----------------------------------------------------------------------===//
-// Return
-def MBlazeRet : SDNode<"MBlazeISD::Ret", SDT_MBlazeRet,
- [SDNPHasChain, SDNPOptInFlag]>;
+def MBlazeRet : SDNode<"MBlazeISD::Ret", SDT_MBlazeRet,
+ [SDNPHasChain, SDNPOptInGlue]>;
+def MBlazeIRet : SDNode<"MBlazeISD::IRet", SDT_MBlazeIRet,
+ [SDNPHasChain, SDNPOptInGlue]>;
-// Hi and Lo nodes are used to handle global addresses. Used on
-// MBlazeISelLowering to lower stuff like GlobalAddress, ExternalSymbol
-// static model.
-def MBWrapper : SDNode<"MBlazeISD::Wrap", SDTIntUnaryOp>;
-def MBlazeGPRel : SDNode<"MBlazeISD::GPRel", SDTIntUnaryOp>;
+def MBlazeJmpLink : SDNode<"MBlazeISD::JmpLink",SDT_MBlazeJmpLink,
+ [SDNPHasChain,SDNPOptInGlue,SDNPOutGlue,
+ SDNPVariadic]>;
-def SDT_MBCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
-def SDT_MBCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
+def MBWrapper : SDNode<"MBlazeISD::Wrap", SDTIntUnaryOp>;
-// These are target-independent nodes, but have target-specific formats.
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MBCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MBCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
-def SDTMBlazeSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MBCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
//===----------------------------------------------------------------------===//
// MBlaze Instruction Predicate Definitions.
@@ -67,11 +68,22 @@ def HasMMU : Predicate<"Subtarget.hasMMU()">;
// MBlaze Operand, Complex Patterns and Transformations Definitions.
//===----------------------------------------------------------------------===//
+def MBlazeMemAsmOperand : AsmOperandClass {
+ let Name = "Mem";
+ let SuperClasses = [];
+}
+
+def MBlazeFslAsmOperand : AsmOperandClass {
+ let Name = "Fsl";
+ let SuperClasses = [];
+}
+
// Instruction operand types
def brtarget : Operand<OtherVT>;
def calltarget : Operand<i32>;
def simm16 : Operand<i32>;
def uimm5 : Operand<i32>;
+def uimm15 : Operand<i32>;
def fimm : Operand<f32>;
// Unsigned Operand
@@ -82,31 +94,23 @@ def uimm16 : Operand<i32> {
// FSL Operand
def fslimm : Operand<i32> {
let PrintMethod = "printFSLImm";
+ let ParserMatchClass = MBlazeFslAsmOperand;
}
// Address operand
def memri : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops simm16, CPURegs);
+ let MIOperandInfo = (ops GPR, simm16);
+ let ParserMatchClass = MBlazeMemAsmOperand;
}
def memrr : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops CPURegs, CPURegs);
+ let MIOperandInfo = (ops GPR, GPR);
+ let ParserMatchClass = MBlazeMemAsmOperand;
}
-// Transformation Function - get the lower 16 bits.
-def LO16 : SDNodeXForm<imm, [{
- return getI32Imm((unsigned)N->getZExtValue() & 0xFFFF);
-}]>;
-
-// Transformation Function - get the higher 16 bits.
-def HI16 : SDNodeXForm<imm, [{
- return getI32Imm((unsigned)N->getZExtValue() >> 16);
-}]>;
-
// Node immediate fits as 16-bit sign extended on target immediate.
-// e.g. addi, andi
def immSExt16 : PatLeaf<(imm), [{
return (N->getZExtValue() >> 16) == 0;
}]>;
@@ -117,19 +121,19 @@ def immSExt16 : PatLeaf<(imm), [{
// e.g. addiu, sltiu
def immZExt16 : PatLeaf<(imm), [{
return (N->getZExtValue() >> 16) == 0;
-}], LO16>;
+}]>;
// FSL immediate field must fit in 4 bits.
def immZExt4 : PatLeaf<(imm), [{
- return N->getZExtValue() == ((N->getZExtValue()) & 0xf) ;
+ return N->getZExtValue() == ((N->getZExtValue()) & 0xf) ;
}]>;
// shamt field must fit in 5 bits.
def immZExt5 : PatLeaf<(imm), [{
- return N->getZExtValue() == ((N->getZExtValue()) & 0x1f) ;
+ return N->getZExtValue() == ((N->getZExtValue()) & 0x1f) ;
}]>;
-// MBlaze Address Mode! SDNode frameindex could possibily be a match
+// MBlaze Address Mode. SDNode frameindex could possibily be a match
// since load and store instructions from stack used it.
def iaddr : ComplexPattern<i32, 2, "SelectAddrRegImm", [frameindex], []>;
def xaddr : ComplexPattern<i32, 2, "SelectAddrRegReg", [], []>;
@@ -141,28 +145,14 @@ def xaddr : ComplexPattern<i32, 2, "SelectAddrRegReg", [], []>;
// As stack alignment is always done with addiu, we need a 16-bit immediate
let Defs = [R1], Uses = [R1] in {
def ADJCALLSTACKDOWN : MBlazePseudo<(outs), (ins simm16:$amt),
- "${:comment} ADJCALLSTACKDOWN $amt",
+ "#ADJCALLSTACKDOWN $amt",
[(callseq_start timm:$amt)]>;
def ADJCALLSTACKUP : MBlazePseudo<(outs),
(ins uimm16:$amt1, simm16:$amt2),
- "${:comment} ADJCALLSTACKUP $amt1",
+ "#ADJCALLSTACKUP $amt1",
[(callseq_end timm:$amt1, timm:$amt2)]>;
}
-// Some assembly macros need to avoid pseudoinstructions and assembler
-// automatic reodering, we should reorder ourselves.
-def MACRO : MBlazePseudo<(outs), (ins), ".set macro", []>;
-def REORDER : MBlazePseudo<(outs), (ins), ".set reorder", []>;
-def NOMACRO : MBlazePseudo<(outs), (ins), ".set nomacro", []>;
-def NOREORDER : MBlazePseudo<(outs), (ins), ".set noreorder", []>;
-
-// When handling PIC code the assembler needs .cpload and .cprestore
-// directives. If the real instructions corresponding these directives
-// are used, we have the same behavior, but get also a bunch of warnings
-// from the assembler.
-def CPLOAD : MBlazePseudo<(outs), (ins CPURegs:$reg), ".cpload $reg", []>;
-def CPRESTORE : MBlazePseudo<(outs), (ins uimm16:$l), ".cprestore $l\n", []>;
-
//===----------------------------------------------------------------------===//
// Instructions specific format
//===----------------------------------------------------------------------===//
@@ -172,47 +162,58 @@ def CPRESTORE : MBlazePseudo<(outs), (ins uimm16:$l), ".cprestore $l\n", []>;
//===----------------------------------------------------------------------===//
class Arith<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode,
InstrItinClass itin> :
- TA<op, flags, (outs CPURegs:$dst), (ins CPURegs:$b, CPURegs:$c),
+ TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
!strconcat(instr_asm, " $dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, CPURegs:$c))], itin>;
+ [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>;
class ArithI<bits<6> op, string instr_asm, SDNode OpNode,
Operand Od, PatLeaf imm_type> :
- TAI<op, (outs CPURegs:$dst), (ins CPURegs:$b, Od:$c),
+ TB<op, (outs GPR:$dst), (ins GPR:$b, Od:$c),
+ !strconcat(instr_asm, " $dst, $b, $c"),
+ [(set GPR:$dst, (OpNode GPR:$b, imm_type:$c))], IIAlu>;
+
+class ArithI32<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> :
+ TB<op, (outs GPR:$dst), (ins GPR:$b, Od:$c),
+ !strconcat(instr_asm, " $dst, $b, $c"),
+ [], IIAlu>;
+
+class ShiftI<bits<6> op, bits<2> flags, string instr_asm, SDNode OpNode,
+ Operand Od, PatLeaf imm_type> :
+ SHT<op, flags, (outs GPR:$dst), (ins GPR:$b, Od:$c),
!strconcat(instr_asm, " $dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, imm_type:$c))], IIAlu>;
+ [(set GPR:$dst, (OpNode GPR:$b, imm_type:$c))], IIAlu>;
class ArithR<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode,
InstrItinClass itin> :
- TA<op, flags, (outs CPURegs:$dst), (ins CPURegs:$c, CPURegs:$b),
- !strconcat(instr_asm, " $dst, $c, $b"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, CPURegs:$c))], itin>;
+ TAR<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
+ !strconcat(instr_asm, " $dst, $c, $b"),
+ [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], itin>;
class ArithRI<bits<6> op, string instr_asm, SDNode OpNode,
Operand Od, PatLeaf imm_type> :
- TAI<op, (outs CPURegs:$dst), (ins Od:$b, CPURegs:$c),
+ TBR<op, (outs GPR:$dst), (ins Od:$b, GPR:$c),
!strconcat(instr_asm, " $dst, $c, $b"),
- [(set CPURegs:$dst, (OpNode imm_type:$b, CPURegs:$c))], IIAlu>;
+ [(set GPR:$dst, (OpNode imm_type:$b, GPR:$c))], IIAlu>;
class ArithN<bits<6> op, bits<11> flags, string instr_asm,
InstrItinClass itin> :
- TA<op, flags, (outs CPURegs:$dst), (ins CPURegs:$b, CPURegs:$c),
+ TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
!strconcat(instr_asm, " $dst, $b, $c"),
[], itin>;
class ArithNI<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> :
- TAI<op, (outs CPURegs:$dst), (ins CPURegs:$b, Od:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], IIAlu>;
+ TB<op, (outs GPR:$dst), (ins GPR:$b, Od:$c),
+ !strconcat(instr_asm, " $dst, $b, $c"),
+ [], IIAlu>;
class ArithRN<bits<6> op, bits<11> flags, string instr_asm,
InstrItinClass itin> :
- TA<op, flags, (outs CPURegs:$dst), (ins CPURegs:$c, CPURegs:$b),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [], itin>;
+ TAR<op, flags, (outs GPR:$dst), (ins GPR:$c, GPR:$b),
+ !strconcat(instr_asm, " $dst, $b, $c"),
+ [], itin>;
class ArithRNI<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> :
- TAI<op, (outs CPURegs:$dst), (ins Od:$c, CPURegs:$b),
+ TBR<op, (outs GPR:$dst), (ins Od:$c, GPR:$b),
!strconcat(instr_asm, " $dst, $b, $c"),
[], IIAlu>;
@@ -221,135 +222,179 @@ class ArithRNI<bits<6> op, string instr_asm,Operand Od, PatLeaf imm_type> :
//===----------------------------------------------------------------------===//
class Logic<bits<6> op, bits<11> flags, string instr_asm, SDNode OpNode> :
- TA<op, flags, (outs CPURegs:$dst), (ins CPURegs:$b, CPURegs:$c),
+ TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
!strconcat(instr_asm, " $dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, CPURegs:$c))], IIAlu>;
+ [(set GPR:$dst, (OpNode GPR:$b, GPR:$c))], IIAlu>;
class LogicI<bits<6> op, string instr_asm, SDNode OpNode> :
- TAI<op, (outs CPURegs:$dst), (ins CPURegs:$b, uimm16:$c),
- !strconcat(instr_asm, " $dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, immZExt16:$c))],
- IIAlu>;
-
-class EffectiveAddress<string instr_asm> :
- TAI<0x08, (outs CPURegs:$dst), (ins memri:$addr),
- instr_asm, [(set CPURegs:$dst, iaddr:$addr)], IIAlu>;
+ TB<op, (outs GPR:$dst), (ins GPR:$b, uimm16:$c),
+ !strconcat(instr_asm, " $dst, $b, $c"),
+ [(set GPR:$dst, (OpNode GPR:$b, immZExt16:$c))],
+ IIAlu>;
+
+class LogicI32<bits<6> op, string instr_asm> :
+ TB<op, (outs GPR:$dst), (ins GPR:$b, uimm16:$c),
+ !strconcat(instr_asm, " $dst, $b, $c"),
+ [], IIAlu>;
+
+class PatCmp<bits<6> op, bits<11> flags, string instr_asm> :
+ TA<op, flags, (outs GPR:$dst), (ins GPR:$b, GPR:$c),
+ !strconcat(instr_asm, " $dst, $b, $c"),
+ [], IIAlu>;
//===----------------------------------------------------------------------===//
// Memory Access Instructions
//===----------------------------------------------------------------------===//
-class LoadM<bits<6> op, string instr_asm, PatFrag OpNode> :
- TA<op, 0x000, (outs CPURegs:$dst), (ins memrr:$addr),
+class LoadM<bits<6> op, bits<11> flags, string instr_asm> :
+ TA<op, flags, (outs GPR:$dst), (ins memrr:$addr),
!strconcat(instr_asm, " $dst, $addr"),
- [(set CPURegs:$dst, (OpNode xaddr:$addr))], IILoad>;
+ [], IILoad>;
class LoadMI<bits<6> op, string instr_asm, PatFrag OpNode> :
- TAI<op, (outs CPURegs:$dst), (ins memri:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(set CPURegs:$dst, (OpNode iaddr:$addr))], IILoad>;
+ TB<op, (outs GPR:$dst), (ins memri:$addr),
+ !strconcat(instr_asm, " $dst, $addr"),
+ [(set (i32 GPR:$dst), (OpNode iaddr:$addr))], IILoad>;
-class StoreM<bits<6> op, string instr_asm, PatFrag OpNode> :
- TA<op, 0x000, (outs), (ins CPURegs:$dst, memrr:$addr),
+class StoreM<bits<6> op, bits<11> flags, string instr_asm> :
+ TA<op, flags, (outs), (ins GPR:$dst, memrr:$addr),
!strconcat(instr_asm, " $dst, $addr"),
- [(OpNode CPURegs:$dst, xaddr:$addr)], IIStore>;
+ [], IIStore>;
class StoreMI<bits<6> op, string instr_asm, PatFrag OpNode> :
- TAI<op, (outs), (ins CPURegs:$dst, memri:$addr),
- !strconcat(instr_asm, " $dst, $addr"),
- [(OpNode CPURegs:$dst, iaddr:$addr)], IIStore>;
+ TB<op, (outs), (ins GPR:$dst, memri:$addr),
+ !strconcat(instr_asm, " $dst, $addr"),
+ [(OpNode (i32 GPR:$dst), iaddr:$addr)], IIStore>;
//===----------------------------------------------------------------------===//
// Branch Instructions
//===----------------------------------------------------------------------===//
class Branch<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> :
- TBR<op, br, flags, (outs), (ins CPURegs:$target),
- !strconcat(instr_asm, " $target"),
- [(brind CPURegs:$target)], IIBranch>;
+ TA<op, flags, (outs), (ins GPR:$target),
+ !strconcat(instr_asm, " $target"),
+ [], IIBranch> {
+ let rd = 0x0;
+ let ra = br;
+ let Form = FCCR;
+}
-class BranchI<bits<6> op, bits<5> brf, string instr_asm> :
- TBRI<op, brf, (outs), (ins brtarget:$target),
- !strconcat(instr_asm, " $target"),
- [(br bb:$target)], IIBranch>;
+class BranchI<bits<6> op, bits<5> br, string instr_asm> :
+ TB<op, (outs), (ins brtarget:$target),
+ !strconcat(instr_asm, " $target"),
+ [], IIBranch> {
+ let rd = 0;
+ let ra = br;
+ let Form = FCCI;
+}
//===----------------------------------------------------------------------===//
// Branch and Link Instructions
//===----------------------------------------------------------------------===//
class BranchL<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> :
- TBRL<op, br, flags, (outs), (ins CPURegs:$target),
- !strconcat(instr_asm, " r15, $target"),
- [], IIBranch>;
+ TA<op, flags, (outs), (ins GPR:$link, GPR:$target, variable_ops),
+ !strconcat(instr_asm, " $link, $target"),
+ [], IIBranch> {
+ let ra = br;
+ let Form = FRCR;
+}
class BranchLI<bits<6> op, bits<5> br, string instr_asm> :
- TBRLI<op, br, (outs), (ins calltarget:$target),
- !strconcat(instr_asm, " r15, $target"),
- [], IIBranch>;
+ TB<op, (outs), (ins GPR:$link, calltarget:$target, variable_ops),
+ !strconcat(instr_asm, " $link, $target"),
+ [], IIBranch> {
+ let ra = br;
+ let Form = FRCI;
+}
//===----------------------------------------------------------------------===//
// Conditional Branch Instructions
//===----------------------------------------------------------------------===//
-class BranchC<bits<6> op, bits<5> br, bits<11> flags, string instr_asm,
- PatFrag cond_op> :
- TBRC<op, br, flags, (outs),
- (ins CPURegs:$a, CPURegs:$b, brtarget:$offset),
- !strconcat(instr_asm, " $a, $b, $offset"),
- [], IIBranch>;
- //(brcond (cond_op CPURegs:$a, CPURegs:$b), bb:$offset)],
- //IIBranch>;
+class BranchC<bits<6> op, bits<5> br, bits<11> flags, string instr_asm> :
+ TA<op, flags, (outs),
+ (ins GPR:$a, GPR:$b),
+ !strconcat(instr_asm, " $a, $b"),
+ [], IIBranch> {
+ let rd = br;
+ let Form = FCRR;
+}
-class BranchCI<bits<6> op, bits<5> br, string instr_asm, PatFrag cond_op> :
- TBRCI<op, br, (outs), (ins CPURegs:$a, brtarget:$offset),
- !strconcat(instr_asm, " $a, $offset"),
- [], IIBranch>;
+class BranchCI<bits<6> op, bits<5> br, string instr_asm> :
+ TB<op, (outs), (ins GPR:$a, brtarget:$offset),
+ !strconcat(instr_asm, " $a, $offset"),
+ [], IIBranch> {
+ let rd = br;
+ let Form = FCRI;
+}
//===----------------------------------------------------------------------===//
// MBlaze arithmetic instructions
//===----------------------------------------------------------------------===//
let isCommutable = 1, isAsCheapAsAMove = 1 in {
- def ADD : Arith<0x00, 0x000, "add ", add, IIAlu>;
- def ADDC : Arith<0x02, 0x000, "addc ", adde, IIAlu>;
- def ADDK : Arith<0x04, 0x000, "addk ", addc, IIAlu>;
+ def ADDK : Arith<0x04, 0x000, "addk ", add, IIAlu>;
+ def AND : Logic<0x21, 0x000, "and ", and>;
+ def OR : Logic<0x20, 0x000, "or ", or>;
+ def XOR : Logic<0x22, 0x000, "xor ", xor>;
+ def PCMPBF : PatCmp<0x20, 0x400, "pcmpbf ">;
+ def PCMPEQ : PatCmp<0x22, 0x400, "pcmpeq ">;
+ def PCMPNE : PatCmp<0x23, 0x400, "pcmpne ">;
+
+ let Defs = [CARRY] in {
+ def ADD : Arith<0x00, 0x000, "add ", addc, IIAlu>;
+
+ let Uses = [CARRY] in {
+ def ADDC : Arith<0x02, 0x000, "addc ", adde, IIAlu>;
+ }
+ }
+
+ let Uses = [CARRY] in {
def ADDKC : ArithN<0x06, 0x000, "addkc ", IIAlu>;
- def AND : Logic<0x21, 0x000, "and ", and>;
- def OR : Logic<0x20, 0x000, "or ", or>;
- def XOR : Logic<0x22, 0x000, "xor ", xor>;
+ }
}
let isAsCheapAsAMove = 1 in {
- def ANDN : ArithN<0x23, 0x000, "andn ", IIAlu>;
- def CMP : ArithN<0x05, 0x001, "cmp ", IIAlu>;
- def CMPU : ArithN<0x05, 0x003, "cmpu ", IIAlu>;
- def RSUB : ArithR<0x01, 0x000, "rsub ", sub, IIAlu>;
- def RSUBC : ArithR<0x03, 0x000, "rsubc ", sube, IIAlu>;
- def RSUBK : ArithR<0x05, 0x000, "rsubk ", subc, IIAlu>;
+ def ANDN : ArithN<0x23, 0x000, "andn ", IIAlu>;
+ def CMP : ArithN<0x05, 0x001, "cmp ", IIAlu>;
+ def CMPU : ArithN<0x05, 0x003, "cmpu ", IIAlu>;
+ def RSUBK : ArithR<0x05, 0x000, "rsubk ", sub, IIAlu>;
+
+ let Defs = [CARRY] in {
+ def RSUB : ArithR<0x01, 0x000, "rsub ", subc, IIAlu>;
+
+ let Uses = [CARRY] in {
+ def RSUBC : ArithR<0x03, 0x000, "rsubc ", sube, IIAlu>;
+ }
+ }
+
+ let Uses = [CARRY] in {
def RSUBKC : ArithRN<0x07, 0x000, "rsubkc ", IIAlu>;
+ }
}
let isCommutable = 1, Predicates=[HasMul] in {
- def MUL : Arith<0x10, 0x000, "mul ", mul, IIAlu>;
+ def MUL : Arith<0x10, 0x000, "mul ", mul, IIAlu>;
}
let isCommutable = 1, Predicates=[HasMul,HasMul64] in {
- def MULH : Arith<0x10, 0x001, "mulh ", mulhs, IIAlu>;
- def MULHU : Arith<0x10, 0x003, "mulhu ", mulhu, IIAlu>;
+ def MULH : Arith<0x10, 0x001, "mulh ", mulhs, IIAlu>;
+ def MULHU : Arith<0x10, 0x003, "mulhu ", mulhu, IIAlu>;
}
let Predicates=[HasMul,HasMul64] in {
- def MULHSU : ArithN<0x10, 0x002, "mulhsu ", IIAlu>;
+ def MULHSU : ArithN<0x10, 0x002, "mulhsu ", IIAlu>;
}
let Predicates=[HasBarrel] in {
- def BSRL : Arith<0x11, 0x000, "bsrl ", srl, IIAlu>;
- def BSRA : Arith<0x11, 0x200, "bsra ", sra, IIAlu>;
- def BSLL : Arith<0x11, 0x400, "bsll ", shl, IIAlu>;
- def BSRLI : ArithI<0x11, "bsrli ", srl, uimm5, immZExt5>;
- def BSRAI : ArithI<0x11, "bsrai ", sra, uimm5, immZExt5>;
- def BSLLI : ArithI<0x11, "bslli ", shl, uimm5, immZExt5>;
+ def BSRL : Arith<0x11, 0x000, "bsrl ", srl, IIAlu>;
+ def BSRA : Arith<0x11, 0x200, "bsra ", sra, IIAlu>;
+ def BSLL : Arith<0x11, 0x400, "bsll ", shl, IIAlu>;
+ def BSRLI : ShiftI<0x19, 0x0, "bsrli ", srl, uimm5, immZExt5>;
+ def BSRAI : ShiftI<0x19, 0x1, "bsrai ", sra, uimm5, immZExt5>;
+ def BSLLI : ShiftI<0x19, 0x2, "bslli ", shl, uimm5, immZExt5>;
}
let Predicates=[HasDiv] in {
- def IDIV : Arith<0x12, 0x000, "idiv ", sdiv, IIAlu>;
- def IDIVU : Arith<0x12, 0x002, "idivu ", udiv, IIAlu>;
+ def IDIV : ArithR<0x12, 0x000, "idiv ", sdiv, IIAlu>;
+ def IDIVU : ArithR<0x12, 0x002, "idivu ", udiv, IIAlu>;
}
//===----------------------------------------------------------------------===//
@@ -357,22 +402,31 @@ let Predicates=[HasDiv] in {
//===----------------------------------------------------------------------===//
let isAsCheapAsAMove = 1 in {
- def ADDI : ArithI<0x08, "addi ", add, simm16, immSExt16>;
- def ADDIC : ArithNI<0x0A, "addic ", simm16, immSExt16>;
- def ADDIK : ArithNI<0x0C, "addik ", simm16, immSExt16>;
- def ADDIKC : ArithI<0x0E, "addikc ", addc, simm16, immSExt16>;
- def RSUBI : ArithRI<0x09, "rsubi ", sub, simm16, immSExt16>;
- def RSUBIC : ArithRNI<0x0B, "rsubi ", simm16, immSExt16>;
- def RSUBIK : ArithRNI<0x0E, "rsubic ", simm16, immSExt16>;
- def RSUBIKC : ArithRI<0x0F, "rsubikc", subc, simm16, immSExt16>;
- def ANDNI : ArithNI<0x2B, "andni ", uimm16, immZExt16>;
- def ANDI : LogicI<0x29, "andi ", and>;
- def ORI : LogicI<0x28, "ori ", or>;
- def XORI : LogicI<0x2A, "xori ", xor>;
+ def ADDIK : ArithI<0x0C, "addik ", add, simm16, immSExt16>;
+ def RSUBIK : ArithRI<0x0D, "rsubik ", sub, simm16, immSExt16>;
+ def ANDNI : ArithNI<0x2B, "andni ", uimm16, immZExt16>;
+ def ANDI : LogicI<0x29, "andi ", and>;
+ def ORI : LogicI<0x28, "ori ", or>;
+ def XORI : LogicI<0x2A, "xori ", xor>;
+
+ let Defs = [CARRY] in {
+ def ADDI : ArithI<0x08, "addi ", addc, simm16, immSExt16>;
+ def RSUBI : ArithRI<0x09, "rsubi ", subc, simm16, immSExt16>;
+
+ let Uses = [CARRY] in {
+ def ADDIC : ArithI<0x0A, "addic ", adde, simm16, immSExt16>;
+ def RSUBIC : ArithRI<0x0B, "rsubic ", sube, simm16, immSExt16>;
+ }
+ }
+
+ let Uses = [CARRY] in {
+ def ADDIKC : ArithNI<0x0E, "addikc ", simm16, immSExt16>;
+ def RSUBIKC : ArithRNI<0x0F, "rsubikc", simm16, immSExt16>;
+ }
}
let Predicates=[HasMul] in {
- def MULI : ArithI<0x18, "muli ", mul, simm16, immSExt16>;
+ def MULI : ArithI<0x18, "muli ", mul, simm16, immSExt16>;
}
//===----------------------------------------------------------------------===//
@@ -380,290 +434,445 @@ let Predicates=[HasMul] in {
//===----------------------------------------------------------------------===//
let canFoldAsLoad = 1, isReMaterializable = 1 in {
- def LBU : LoadM<0x30, "lbu ", zextloadi8>;
- def LHU : LoadM<0x31, "lhu ", zextloadi16>;
- def LW : LoadM<0x32, "lw ", load>;
+ def LBU : LoadM<0x30, 0x000, "lbu ">;
+ def LBUR : LoadM<0x30, 0x200, "lbur ">;
+
+ def LHU : LoadM<0x31, 0x000, "lhu ">;
+ def LHUR : LoadM<0x31, 0x200, "lhur ">;
+
+ def LW : LoadM<0x32, 0x000, "lw ">;
+ def LWR : LoadM<0x32, 0x200, "lwr ">;
- def LBUI : LoadMI<0x30, "lbui ", zextloadi8>;
- def LHUI : LoadMI<0x31, "lhui ", zextloadi16>;
- def LWI : LoadMI<0x32, "lwi ", load>;
+ let Defs = [CARRY] in {
+ def LWX : LoadM<0x32, 0x400, "lwx ">;
+ }
+
+ def LBUI : LoadMI<0x38, "lbui ", zextloadi8>;
+ def LHUI : LoadMI<0x39, "lhui ", zextloadi16>;
+ def LWI : LoadMI<0x3A, "lwi ", load>;
}
- def SB : StoreM<0x34, "sb ", truncstorei8>;
- def SH : StoreM<0x35, "sh ", truncstorei16>;
- def SW : StoreM<0x36, "sw ", store>;
+def SB : StoreM<0x34, 0x000, "sb ">;
+def SBR : StoreM<0x34, 0x200, "sbr ">;
+
+def SH : StoreM<0x35, 0x000, "sh ">;
+def SHR : StoreM<0x35, 0x200, "shr ">;
+
+def SW : StoreM<0x36, 0x000, "sw ">;
+def SWR : StoreM<0x36, 0x200, "swr ">;
- def SBI : StoreMI<0x34, "sbi ", truncstorei8>;
- def SHI : StoreMI<0x35, "shi ", truncstorei16>;
- def SWI : StoreMI<0x36, "swi ", store>;
+let Defs = [CARRY] in {
+ def SWX : StoreM<0x36, 0x400, "swx ">;
+}
+
+def SBI : StoreMI<0x3C, "sbi ", truncstorei8>;
+def SHI : StoreMI<0x3D, "shi ", truncstorei16>;
+def SWI : StoreMI<0x3E, "swi ", store>;
//===----------------------------------------------------------------------===//
// MBlaze branch instructions
//===----------------------------------------------------------------------===//
+let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
+ def BRI : BranchI<0x2E, 0x00, "bri ">;
+ def BRAI : BranchI<0x2E, 0x08, "brai ">;
+}
+
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
- def BRI : BranchI<0x2E, 0x00, "bri ">;
- def BRAI : BranchI<0x2E, 0x08, "brai ">;
- def BEQI : BranchCI<0x2F, 0x00, "beqi ", seteq>;
- def BNEI : BranchCI<0x2F, 0x01, "bnei ", setne>;
- def BLTI : BranchCI<0x2F, 0x02, "blti ", setlt>;
- def BLEI : BranchCI<0x2F, 0x03, "blei ", setle>;
- def BGTI : BranchCI<0x2F, 0x04, "bgti ", setgt>;
- def BGEI : BranchCI<0x2F, 0x05, "bgei ", setge>;
+ def BEQI : BranchCI<0x2F, 0x00, "beqi ">;
+ def BNEI : BranchCI<0x2F, 0x01, "bnei ">;
+ def BLTI : BranchCI<0x2F, 0x02, "blti ">;
+ def BLEI : BranchCI<0x2F, 0x03, "blei ">;
+ def BGTI : BranchCI<0x2F, 0x04, "bgti ">;
+ def BGEI : BranchCI<0x2F, 0x05, "bgei ">;
+}
+
+let isBranch = 1, isIndirectBranch = 1, isTerminator = 1, hasCtrlDep = 1,
+ isBarrier = 1 in {
+ def BR : Branch<0x26, 0x00, 0x000, "br ">;
+ def BRA : Branch<0x26, 0x08, 0x000, "bra ">;
}
let isBranch = 1, isIndirectBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
- def BR : Branch<0x26, 0x00, 0x000, "br ">;
- def BRA : Branch<0x26, 0x08, 0x000, "bra ">;
- def BEQ : BranchC<0x27, 0x00, 0x000, "beq ", seteq>;
- def BNE : BranchC<0x27, 0x01, 0x000, "bne ", setne>;
- def BLT : BranchC<0x27, 0x02, 0x000, "blt ", setlt>;
- def BLE : BranchC<0x27, 0x03, 0x000, "ble ", setle>;
- def BGT : BranchC<0x27, 0x04, 0x000, "bgt ", setgt>;
- def BGE : BranchC<0x27, 0x05, 0x000, "bge ", setge>;
+ def BEQ : BranchC<0x27, 0x00, 0x000, "beq ">;
+ def BNE : BranchC<0x27, 0x01, 0x000, "bne ">;
+ def BLT : BranchC<0x27, 0x02, 0x000, "blt ">;
+ def BLE : BranchC<0x27, 0x03, 0x000, "ble ">;
+ def BGT : BranchC<0x27, 0x04, 0x000, "bgt ">;
+ def BGE : BranchC<0x27, 0x05, 0x000, "bge ">;
+}
+
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1, hasCtrlDep = 1,
+ isBarrier = 1 in {
+ def BRID : BranchI<0x2E, 0x10, "brid ">;
+ def BRAID : BranchI<0x2E, 0x18, "braid ">;
}
let isBranch = 1, isTerminator = 1, hasDelaySlot = 1, hasCtrlDep = 1 in {
- def BRID : BranchI<0x2E, 0x10, "brid ">;
- def BRAID : BranchI<0x2E, 0x18, "braid ">;
- def BEQID : BranchCI<0x2F, 0x10, "beqid ", seteq>;
- def BNEID : BranchCI<0x2F, 0x11, "bneid ", setne>;
- def BLTID : BranchCI<0x2F, 0x12, "bltid ", setlt>;
- def BLEID : BranchCI<0x2F, 0x13, "bleid ", setle>;
- def BGTID : BranchCI<0x2F, 0x14, "bgtid ", setgt>;
- def BGEID : BranchCI<0x2F, 0x15, "bgeid ", setge>;
+ def BEQID : BranchCI<0x2F, 0x10, "beqid ">;
+ def BNEID : BranchCI<0x2F, 0x11, "bneid ">;
+ def BLTID : BranchCI<0x2F, 0x12, "bltid ">;
+ def BLEID : BranchCI<0x2F, 0x13, "bleid ">;
+ def BGTID : BranchCI<0x2F, 0x14, "bgtid ">;
+ def BGEID : BranchCI<0x2F, 0x15, "bgeid ">;
+}
+
+let isBranch = 1, isIndirectBranch = 1, isTerminator = 1,
+ hasDelaySlot = 1, hasCtrlDep = 1, isBarrier = 1 in {
+ def BRD : Branch<0x26, 0x10, 0x000, "brd ">;
+ def BRAD : Branch<0x26, 0x18, 0x000, "brad ">;
}
let isBranch = 1, isIndirectBranch = 1, isTerminator = 1,
hasDelaySlot = 1, hasCtrlDep = 1 in {
- def BRD : Branch<0x26, 0x10, 0x000, "brd ">;
- def BRAD : Branch<0x26, 0x18, 0x000, "brad ">;
- def BEQD : BranchC<0x27, 0x10, 0x000, "beqd ", seteq>;
- def BNED : BranchC<0x27, 0x11, 0x000, "bned ", setne>;
- def BLTD : BranchC<0x27, 0x12, 0x000, "bltd ", setlt>;
- def BLED : BranchC<0x27, 0x13, 0x000, "bled ", setle>;
- def BGTD : BranchC<0x27, 0x14, 0x000, "bgtd ", setgt>;
- def BGED : BranchC<0x27, 0x15, 0x000, "bged ", setge>;
+ def BEQD : BranchC<0x27, 0x10, 0x000, "beqd ">;
+ def BNED : BranchC<0x27, 0x11, 0x000, "bned ">;
+ def BLTD : BranchC<0x27, 0x12, 0x000, "bltd ">;
+ def BLED : BranchC<0x27, 0x13, 0x000, "bled ">;
+ def BGTD : BranchC<0x27, 0x14, 0x000, "bgtd ">;
+ def BGED : BranchC<0x27, 0x15, 0x000, "bged ">;
+}
+
+let isCall =1, hasDelaySlot = 1,
+ Defs = [R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,CARRY],
+ Uses = [R1] in {
+ def BRLID : BranchLI<0x2E, 0x14, "brlid ">;
+ def BRALID : BranchLI<0x2E, 0x1C, "bralid ">;
+}
+
+let isCall = 1, hasDelaySlot = 1,
+ Defs = [R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,CARRY],
+ Uses = [R1] in {
+ def BRLD : BranchL<0x26, 0x14, 0x000, "brld ">;
+ def BRALD : BranchL<0x26, 0x1C, 0x000, "brald ">;
}
-let isCall = 1, hasCtrlDep = 1, isIndirectBranch = 1,
- Defs = [R3,R4,R5,R6,R7,R8,R9,R10,R11,R12],
- Uses = [R1,R5,R6,R7,R8,R9,R10] in {
- def BRL : BranchL<0x26, 0x04, 0x000, "brl ">;
- def BRAL : BranchL<0x26, 0x0C, 0x000, "bral ">;
+let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1,
+ rd=0x10, Form=FCRI in {
+ def RTSD : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm),
+ "rtsd $target, $imm",
+ [],
+ IIBranch>;
}
-let isCall = 1, hasDelaySlot = 1, hasCtrlDep = 1,
- Defs = [R3,R4,R5,R6,R7,R8,R9,R10,R11,R12],
- Uses = [R1,R5,R6,R7,R8,R9,R10] in {
- def BRLID : BranchLI<0x2E, 0x14, "brlid ">;
- def BRALID : BranchLI<0x2E, 0x1C, "bralid ">;
+let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1,
+ rd=0x11, Form=FCRI in {
+ def RTID : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm),
+ "rtid $target, $imm",
+ [],
+ IIBranch>;
}
-let isCall = 1, hasDelaySlot = 1, hasCtrlDep = 1, isIndirectBranch = 1,
- Defs = [R3,R4,R5,R6,R7,R8,R9,R10,R11,R12],
- Uses = [R1,R5,R6,R7,R8,R9,R10] in {
- def BRLD : BranchL<0x26, 0x14, 0x000, "brld ">;
- def BRALD : BranchL<0x26, 0x1C, 0x000, "brald ">;
+let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1,
+ rd=0x12, Form=FCRI in {
+ def RTBD : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm),
+ "rtbd $target, $imm",
+ [],
+ IIBranch>;
}
-let isReturn=1, isTerminator=1, hasDelaySlot=1,
- isBarrier=1, hasCtrlDep=1, imm16=0x8 in {
- def RTSD : TRET<0x2D, (outs), (ins CPURegs:$target),
- "rtsd $target, 8",
- [(MBlazeRet CPURegs:$target)],
- IIBranch>;
+let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1,
+ rd=0x14, Form=FCRI in {
+ def RTED : TB<0x2D, (outs), (ins GPR:$target, simm16:$imm),
+ "rted $target, $imm",
+ [],
+ IIBranch>;
}
//===----------------------------------------------------------------------===//
// MBlaze misc instructions
//===----------------------------------------------------------------------===//
-let addr = 0 in {
- def NOP : TADDR<0x00, (outs), (ins), "nop ", [], IIAlu>;
+let neverHasSideEffects = 1 in {
+ def NOP : MBlazeInst< 0x20, FC, (outs), (ins), "nop ", [], IIAlu>;
}
let usesCustomInserter = 1 in {
- //class PseudoSelCC<RegisterClass RC, string asmstr>:
- // MBlazePseudo<(outs RC:$D), (ins RC:$T, RC:$F, CPURegs:$CMP), asmstr,
- // [(set RC:$D, (MBlazeSelectCC RC:$T, RC:$F, CPURegs:$CMP))]>;
- //def Select_CC : PseudoSelCC<CPURegs, "# MBlazeSelect_CC">;
-
- def Select_CC : MBlazePseudo<(outs CPURegs:$dst),
- (ins CPURegs:$T, CPURegs:$F, CPURegs:$CMP, i32imm:$CC),
+ def Select_CC : MBlazePseudo<(outs GPR:$dst),
+ (ins GPR:$T, GPR:$F, GPR:$CMP, i32imm:$CC), // F T reversed
"; SELECT_CC PSEUDO!",
[]>;
- def ShiftL : MBlazePseudo<(outs CPURegs:$dst),
- (ins CPURegs:$L, CPURegs:$R),
+ def ShiftL : MBlazePseudo<(outs GPR:$dst),
+ (ins GPR:$L, GPR:$R),
"; ShiftL PSEUDO!",
[]>;
- def ShiftRA : MBlazePseudo<(outs CPURegs:$dst),
- (ins CPURegs:$L, CPURegs:$R),
+ def ShiftRA : MBlazePseudo<(outs GPR:$dst),
+ (ins GPR:$L, GPR:$R),
"; ShiftRA PSEUDO!",
[]>;
- def ShiftRL : MBlazePseudo<(outs CPURegs:$dst),
- (ins CPURegs:$L, CPURegs:$R),
+ def ShiftRL : MBlazePseudo<(outs GPR:$dst),
+ (ins GPR:$L, GPR:$R),
"; ShiftRL PSEUDO!",
[]>;
}
-
let rb = 0 in {
- def SEXT16 : TA<0x24, 0x061, (outs CPURegs:$dst), (ins CPURegs:$src),
- "sext16 $dst, $src", [], IIAlu>;
- def SEXT8 : TA<0x24, 0x060, (outs CPURegs:$dst), (ins CPURegs:$src),
- "sext8 $dst, $src", [], IIAlu>;
- def SRL : TA<0x24, 0x041, (outs CPURegs:$dst), (ins CPURegs:$src),
- "srl $dst, $src", [], IIAlu>;
- def SRA : TA<0x24, 0x001, (outs CPURegs:$dst), (ins CPURegs:$src),
- "sra $dst, $src", [], IIAlu>;
- def SRC : TA<0x24, 0x021, (outs CPURegs:$dst), (ins CPURegs:$src),
- "src $dst, $src", [], IIAlu>;
+ def SEXT16 : TA<0x24, 0x061, (outs GPR:$dst), (ins GPR:$src),
+ "sext16 $dst, $src", [], IIAlu>;
+ def SEXT8 : TA<0x24, 0x060, (outs GPR:$dst), (ins GPR:$src),
+ "sext8 $dst, $src", [], IIAlu>;
+ let Defs = [CARRY] in {
+ def SRL : TA<0x24, 0x041, (outs GPR:$dst), (ins GPR:$src),
+ "srl $dst, $src", [], IIAlu>;
+ def SRA : TA<0x24, 0x001, (outs GPR:$dst), (ins GPR:$src),
+ "sra $dst, $src", [], IIAlu>;
+ let Uses = [CARRY] in {
+ def SRC : TA<0x24, 0x021, (outs GPR:$dst), (ins GPR:$src),
+ "src $dst, $src", [], IIAlu>;
+ }
+ }
+}
+
+let isCodeGenOnly=1 in {
+ def ADDIK32 : ArithI32<0x08, "addik ", simm16, immSExt16>;
+ def ORI32 : LogicI32<0x28, "ori ">;
+ def BRLID32 : BranchLI<0x2E, 0x14, "brlid ">;
+}
+
+//===----------------------------------------------------------------------===//
+// Misc. instructions
+//===----------------------------------------------------------------------===//
+let Form=FRCS in {
+ def MFS : SPC<0x25, 0x2, (outs GPR:$dst), (ins SPR:$src),
+ "mfs $dst, $src", [], IIAlu>;
+}
+
+let Form=FCRCS in {
+ def MTS : SPC<0x25, 0x3, (outs SPR:$dst), (ins GPR:$src),
+ "mts $dst, $src", [], IIAlu>;
+}
+
+def MSRSET : MSR<0x25, 0x20, (outs GPR:$dst), (ins uimm15:$set),
+ "msrset $dst, $set", [], IIAlu>;
+
+def MSRCLR : MSR<0x25, 0x22, (outs GPR:$dst), (ins uimm15:$clr),
+ "msrclr $dst, $clr", [], IIAlu>;
+
+let rd=0x0, Form=FCRR in {
+ def WDC : TA<0x24, 0x64, (outs), (ins GPR:$a, GPR:$b),
+ "wdc $a, $b", [], IIAlu>;
+ def WDCF : TA<0x24, 0x74, (outs), (ins GPR:$a, GPR:$b),
+ "wdc.flush $a, $b", [], IIAlu>;
+ def WDCC : TA<0x24, 0x66, (outs), (ins GPR:$a, GPR:$b),
+ "wdc.clear $a, $b", [], IIAlu>;
+ def WIC : TA<0x24, 0x68, (outs), (ins GPR:$a, GPR:$b),
+ "wic $a, $b", [], IIAlu>;
}
-def LEA_ADDI : EffectiveAddress<"addi $dst, ${addr:stackloc}">;
+def BRK : BranchL<0x26, 0x0C, 0x000, "brk ">;
+def BRKI : BranchLI<0x2E, 0x0C, "brki ">;
+
+def IMM : MBlazeInst<0x2C, FCCI, (outs), (ins simm16:$imm),
+ "imm $imm", [], IIAlu>;
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions for atomic operations
+//===----------------------------------------------------------------------===//
+let usesCustomInserter=1 in {
+ def CAS32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$cmp, GPR:$swp),
+ "# atomic compare and swap",
+ [(set GPR:$dst, (atomic_cmp_swap_32 GPR:$ptr, GPR:$cmp, GPR:$swp))]>;
+
+ def SWP32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$swp),
+ "# atomic swap",
+ [(set GPR:$dst, (atomic_swap_32 GPR:$ptr, GPR:$swp))]>;
+
+ def LAA32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
+ "# atomic load and add",
+ [(set GPR:$dst, (atomic_load_add_32 GPR:$ptr, GPR:$val))]>;
+
+ def LAS32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
+ "# atomic load and sub",
+ [(set GPR:$dst, (atomic_load_sub_32 GPR:$ptr, GPR:$val))]>;
+
+ def LAD32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
+ "# atomic load and and",
+ [(set GPR:$dst, (atomic_load_and_32 GPR:$ptr, GPR:$val))]>;
+
+ def LAO32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
+ "# atomic load and or",
+ [(set GPR:$dst, (atomic_load_or_32 GPR:$ptr, GPR:$val))]>;
+
+ def LAX32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
+ "# atomic load and xor",
+ [(set GPR:$dst, (atomic_load_xor_32 GPR:$ptr, GPR:$val))]>;
+
+ def LAN32 : MBlazePseudo<(outs GPR:$dst), (ins GPR:$ptr, GPR:$val),
+ "# atomic load and nand",
+ [(set GPR:$dst, (atomic_load_nand_32 GPR:$ptr, GPR:$val))]>;
+
+ def MEMBARRIER : MBlazePseudo<(outs), (ins),
+ "# memory barrier",
+ [(membarrier (i32 imm), (i32 imm), (i32 imm), (i32 imm), (i32 imm))]>;
+}
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
// Small immediates
-def : Pat<(i32 0), (ADD R0, R0)>;
-def : Pat<(i32 immSExt16:$imm), (ADDI R0, imm:$imm)>;
-def : Pat<(i32 immZExt16:$imm), (ORI R0, imm:$imm)>;
+def : Pat<(i32 0), (ADDK (i32 R0), (i32 R0))>;
+def : Pat<(i32 immSExt16:$imm), (ADDIK (i32 R0), imm:$imm)>;
+def : Pat<(i32 immZExt16:$imm), (ORI (i32 R0), imm:$imm)>;
// Arbitrary immediates
-def : Pat<(i32 imm:$imm), (ADDI R0, imm:$imm)>;
+def : Pat<(i32 imm:$imm), (ADDIK (i32 R0), imm:$imm)>;
// In register sign extension
-def : Pat<(sext_inreg CPURegs:$src, i16), (SEXT16 CPURegs:$src)>;
-def : Pat<(sext_inreg CPURegs:$src, i8), (SEXT8 CPURegs:$src)>;
+def : Pat<(sext_inreg GPR:$src, i16), (SEXT16 GPR:$src)>;
+def : Pat<(sext_inreg GPR:$src, i8), (SEXT8 GPR:$src)>;
// Call
-def : Pat<(MBlazeJmpLink (i32 tglobaladdr:$dst)), (BRLID tglobaladdr:$dst)>;
-def : Pat<(MBlazeJmpLink (i32 texternalsym:$dst)),(BRLID texternalsym:$dst)>;
-def : Pat<(MBlazeJmpLink CPURegs:$dst), (BRLD CPURegs:$dst)>;
+def : Pat<(MBlazeJmpLink (i32 tglobaladdr:$dst)),
+ (BRLID (i32 R15), tglobaladdr:$dst)>;
+
+def : Pat<(MBlazeJmpLink (i32 texternalsym:$dst)),
+ (BRLID (i32 R15), texternalsym:$dst)>;
+
+def : Pat<(MBlazeJmpLink GPR:$dst),
+ (BRALD (i32 R15), GPR:$dst)>;
// Shift Instructions
-def : Pat<(shl CPURegs:$L, CPURegs:$R), (ShiftL CPURegs:$L, CPURegs:$R)>;
-def : Pat<(sra CPURegs:$L, CPURegs:$R), (ShiftRA CPURegs:$L, CPURegs:$R)>;
-def : Pat<(srl CPURegs:$L, CPURegs:$R), (ShiftRL CPURegs:$L, CPURegs:$R)>;
+def : Pat<(shl GPR:$L, GPR:$R), (ShiftL GPR:$L, GPR:$R)>;
+def : Pat<(sra GPR:$L, GPR:$R), (ShiftRA GPR:$L, GPR:$R)>;
+def : Pat<(srl GPR:$L, GPR:$R), (ShiftRL GPR:$L, GPR:$R)>;
// SET_CC operations
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETEQ),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMP CPURegs:$L, CPURegs:$R), 1)>;
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETNE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMP CPURegs:$L, CPURegs:$R), 2)>;
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETGT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMP CPURegs:$L, CPURegs:$R), 3)>;
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETLT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMP CPURegs:$L, CPURegs:$R), 4)>;
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETGE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMP CPURegs:$L, CPURegs:$R), 5)>;
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETLE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMP CPURegs:$L, CPURegs:$R), 6)>;
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETUGT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMPU CPURegs:$L, CPURegs:$R), 3)>;
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETULT),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMPU CPURegs:$L, CPURegs:$R), 4)>;
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETUGE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMPU CPURegs:$L, CPURegs:$R), 5)>;
-def : Pat<(setcc CPURegs:$L, CPURegs:$R, SETULE),
- (Select_CC (ADDI R0, 1), (ADDI R0, 0),
- (CMPU CPURegs:$L, CPURegs:$R), 6)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETEQ),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMP GPR:$R, GPR:$L), 1)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETNE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMP GPR:$R, GPR:$L), 2)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETGT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMP GPR:$R, GPR:$L), 3)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETLT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMP GPR:$R, GPR:$L), 4)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETGE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMP GPR:$R, GPR:$L), 5)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETLE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMP GPR:$R, GPR:$L), 6)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETUGT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU GPR:$R, GPR:$L), 3)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETULT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU GPR:$R, GPR:$L), 4)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETUGE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU GPR:$R, GPR:$L), 5)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETULE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU GPR:$R, GPR:$L), 6)>;
// SELECT operations
-def : Pat<(select CPURegs:$C, CPURegs:$T, CPURegs:$F),
- (Select_CC CPURegs:$T, CPURegs:$F, CPURegs:$C, 2)>;
-
-// SELECT_CC
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETEQ),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMP CPURegs:$L, CPURegs:$R), 1)>;
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETNE),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMP CPURegs:$L, CPURegs:$R), 2)>;
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETGT),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMP CPURegs:$L, CPURegs:$R), 3)>;
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETLT),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMP CPURegs:$L, CPURegs:$R), 4)>;
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETGE),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMP CPURegs:$L, CPURegs:$R), 5)>;
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETLE),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMP CPURegs:$L, CPURegs:$R), 6)>;
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETUGT),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMPU CPURegs:$L, CPURegs:$R), 3)>;
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETULT),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMPU CPURegs:$L, CPURegs:$R), 4)>;
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETUGE),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMPU CPURegs:$L, CPURegs:$R), 5)>;
-def : Pat<(selectcc CPURegs:$L, CPURegs:$R, CPURegs:$T, CPURegs:$F, SETULE),
- (Select_CC CPURegs:$T, CPURegs:$F, (CMPU CPURegs:$L, CPURegs:$R), 6)>;
+def : Pat<(select (i32 GPR:$C), (i32 GPR:$T), (i32 GPR:$F)),
+ (Select_CC GPR:$T, GPR:$F, GPR:$C, 2)>;
+
+// SELECT_CC
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETEQ),
+ (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 1)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETNE),
+ (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 2)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETGT),
+ (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 3)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETLT),
+ (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 4)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETGE),
+ (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 5)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETLE),
+ (Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 6)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETUGT),
+ (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 3)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETULT),
+ (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 4)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETUGE),
+ (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 5)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETULE),
+ (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, GPR:$L), 6)>;
+
+// Ret instructions
+def : Pat<(MBlazeRet GPR:$target), (RTSD GPR:$target, 0x8)>;
+def : Pat<(MBlazeIRet GPR:$target), (RTID GPR:$target, 0x0)>;
+
+// BR instructions
+def : Pat<(br bb:$T), (BRID bb:$T)>;
+def : Pat<(brind GPR:$T), (BRAD GPR:$T)>;
// BRCOND instructions
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETEQ), bb:$T),
- (BEQID (CMP CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETNE), bb:$T),
- (BNEID (CMP CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETGT), bb:$T),
- (BGTID (CMP CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETLT), bb:$T),
- (BLTID (CMP CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETGE), bb:$T),
- (BGEID (CMP CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETLE), bb:$T),
- (BLEID (CMP CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETUGT), bb:$T),
- (BGTID (CMPU CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETULT), bb:$T),
- (BLTID (CMPU CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETUGE), bb:$T),
- (BGEID (CMPU CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond (setcc CPURegs:$L, CPURegs:$R, SETULE), bb:$T),
- (BLEID (CMPU CPURegs:$R, CPURegs:$L), bb:$T)>;
-def : Pat<(brcond CPURegs:$C, bb:$T),
- (BNEID CPURegs:$C, bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETEQ), bb:$T),
+ (BEQID (CMP GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETNE), bb:$T),
+ (BNEID (CMP GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETGT), bb:$T),
+ (BGTID (CMP GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETLT), bb:$T),
+ (BLTID (CMP GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETGE), bb:$T),
+ (BGEID (CMP GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETLE), bb:$T),
+ (BLEID (CMP GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETUGT), bb:$T),
+ (BGTID (CMPU GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETULT), bb:$T),
+ (BLTID (CMPU GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETUGE), bb:$T),
+ (BGEID (CMPU GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETULE), bb:$T),
+ (BLEID (CMPU GPR:$R, GPR:$L), bb:$T)>;
+def : Pat<(brcond (i32 GPR:$C), bb:$T),
+ (BNEID GPR:$C, bb:$T)>;
// Jump tables, global addresses, and constant pools
-def : Pat<(MBWrapper tglobaladdr:$in), (ORI R0, tglobaladdr:$in)>;
-def : Pat<(MBWrapper tjumptable:$in), (ORI R0, tjumptable:$in)>;
-def : Pat<(MBWrapper tconstpool:$in), (ORI R0, tconstpool:$in)>;
+def : Pat<(MBWrapper tglobaladdr:$in), (ORI (i32 R0), tglobaladdr:$in)>;
+def : Pat<(MBWrapper tjumptable:$in), (ORI (i32 R0), tjumptable:$in)>;
+def : Pat<(MBWrapper tconstpool:$in), (ORI (i32 R0), tconstpool:$in)>;
// Misc instructions
-def : Pat<(and CPURegs:$lh, (not CPURegs:$rh)),(ANDN CPURegs:$lh, CPURegs:$rh)>;
+def : Pat<(and (i32 GPR:$lh), (not (i32 GPR:$rh))),(ANDN GPR:$lh, GPR:$rh)>;
// Arithmetic with immediates
-def : Pat<(add CPURegs:$in, imm:$imm),(ADDI CPURegs:$in, imm:$imm)>;
-def : Pat<(or CPURegs:$in, imm:$imm),(ORI CPURegs:$in, imm:$imm)>;
-def : Pat<(xor CPURegs:$in, imm:$imm),(XORI CPURegs:$in, imm:$imm)>;
-
-// extended load and stores
-def : Pat<(extloadi1 iaddr:$src), (LBUI iaddr:$src)>;
-def : Pat<(extloadi8 iaddr:$src), (LBUI iaddr:$src)>;
-def : Pat<(extloadi16 iaddr:$src), (LHUI iaddr:$src)>;
-def : Pat<(extloadi1 xaddr:$src), (LBU xaddr:$src)>;
-def : Pat<(extloadi8 xaddr:$src), (LBU xaddr:$src)>;
-def : Pat<(extloadi16 xaddr:$src), (LHU xaddr:$src)>;
-
-def : Pat<(sextloadi1 iaddr:$src), (SEXT8 (LBUI iaddr:$src))>;
-def : Pat<(sextloadi8 iaddr:$src), (SEXT8 (LBUI iaddr:$src))>;
-def : Pat<(sextloadi16 iaddr:$src), (SEXT16 (LHUI iaddr:$src))>;
-def : Pat<(sextloadi1 xaddr:$src), (SEXT8 (LBU xaddr:$src))>;
-def : Pat<(sextloadi8 xaddr:$src), (SEXT8 (LBU xaddr:$src))>;
-def : Pat<(sextloadi16 xaddr:$src), (SEXT16 (LHU xaddr:$src))>;
-
-// peepholes
-def : Pat<(store (i32 0), iaddr:$dst), (SWI R0, iaddr:$dst)>;
+def : Pat<(add (i32 GPR:$in), imm:$imm),(ADDIK GPR:$in, imm:$imm)>;
+def : Pat<(or (i32 GPR:$in), imm:$imm),(ORI GPR:$in, imm:$imm)>;
+def : Pat<(xor (i32 GPR:$in), imm:$imm),(XORI GPR:$in, imm:$imm)>;
+
+// Convert any extend loads into zero extend loads
+def : Pat<(extloadi8 iaddr:$src), (i32 (LBUI iaddr:$src))>;
+def : Pat<(extloadi16 iaddr:$src), (i32 (LHUI iaddr:$src))>;
+def : Pat<(extloadi8 xaddr:$src), (i32 (LBU xaddr:$src))>;
+def : Pat<(extloadi16 xaddr:$src), (i32 (LHU xaddr:$src))>;
+
+// 32-bit load and store
+def : Pat<(store (i32 GPR:$dst), xaddr:$addr), (SW GPR:$dst, xaddr:$addr)>;
+def : Pat<(load xaddr:$addr), (i32 (LW xaddr:$addr))>;
+
+// 16-bit load and store
+def : Pat<(truncstorei16 (i32 GPR:$dst), xaddr:$addr), (SH GPR:$dst, xaddr:$addr)>;
+def : Pat<(zextloadi16 xaddr:$addr), (i32 (LHU xaddr:$addr))>;
+
+// 8-bit load and store
+def : Pat<(truncstorei8 (i32 GPR:$dst), xaddr:$addr), (SB GPR:$dst, xaddr:$addr)>;
+def : Pat<(zextloadi8 xaddr:$addr), (i32 (LBU xaddr:$addr))>;
+
+// Peepholes
+def : Pat<(store (i32 0), iaddr:$dst), (SWI (i32 R0), iaddr:$dst)>;
//===----------------------------------------------------------------------===//
// Floating Point Support
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
index 4931860..7e4a2f5 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
@@ -48,7 +48,7 @@ std::string MBlazeIntrinsicInfo::getName(unsigned IntrID, const Type **Tys,
assert(!isOverloaded(IntrID) && "MBlaze intrinsics are not overloaded");
if (IntrID < Intrinsic::num_intrinsics)
return 0;
- assert(IntrID < mblazeIntrinsic::num_mblaze_intrinsics &&
+ assert(IntrID < mblazeIntrinsic::num_mblaze_intrinsics &&
"Invalid intrinsic ID");
std::string Result(names[IntrID - Intrinsic::num_intrinsics]);
@@ -94,12 +94,12 @@ static const FunctionType *getType(LLVMContext &Context, unsigned id) {
const Type *ResultTy = NULL;
std::vector<const Type*> ArgTys;
bool IsVarArg = false;
-
+
#define GET_INTRINSIC_GENERATOR
#include "MBlazeGenIntrinsics.inc"
#undef GET_INTRINSIC_GENERATOR
- return FunctionType::get(ResultTy, ArgTys, IsVarArg);
+ return FunctionType::get(ResultTy, ArgTys, IsVarArg);
}
Function *MBlazeIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td
index a27cb5b..278afbe 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td
@@ -1,10 +1,10 @@
//===- IntrinsicsMBlaze.td - Defines MBlaze intrinsics -----*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines all of the MicroBlaze-specific intrinsics.
@@ -16,7 +16,7 @@
//
// MBlaze intrinsic classes.
-let TargetPrefix = "mblaze", isTarget = 1 in {
+let TargetPrefix = "mblaze", isTarget = 1 in {
class MBFSL_Get_Intrinsic : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
class MBFSL_Put_Intrinsic : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp
index 4abeb2e..1467141 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp
@@ -14,14 +14,9 @@
#include "MBlazeMCAsmInfo.h"
using namespace llvm;
-MBlazeMCAsmInfo::MBlazeMCAsmInfo(const Target &T, StringRef TT) {
+MBlazeMCAsmInfo::MBlazeMCAsmInfo() {
+ SupportsDebugInformation = true;
AlignmentIsInBytes = false;
- Data16bitsDirective = "\t.half\t";
- Data32bitsDirective = "\t.word\t";
- Data64bitsDirective = 0;
PrivateGlobalPrefix = "$";
- CommentString = "#";
- ZeroDirective = "\t.space\t";
GPRel32Directive = "\t.gpword\t";
- HasSetDirective = false;
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h
index 9d6ff3a..e68dd58 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h
@@ -19,10 +19,10 @@
namespace llvm {
class Target;
-
+
class MBlazeMCAsmInfo : public MCAsmInfo {
public:
- explicit MBlazeMCAsmInfo(const Target &T, StringRef TT);
+ explicit MBlazeMCAsmInfo();
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMCCodeEmitter.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeMCCodeEmitter.cpp
new file mode 100644
index 0000000..3ece1a8
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCCodeEmitter.cpp
@@ -0,0 +1,223 @@
+//===-- MBlazeMCCodeEmitter.cpp - Convert MBlaze code to machine code -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MBlazeMCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mccodeemitter"
+#include "MBlaze.h"
+#include "MBlazeInstrInfo.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
+
+namespace {
+class MBlazeMCCodeEmitter : public MCCodeEmitter {
+ MBlazeMCCodeEmitter(const MBlazeMCCodeEmitter &); // DO NOT IMPLEMENT
+ void operator=(const MBlazeMCCodeEmitter &); // DO NOT IMPLEMENT
+ const TargetMachine &TM;
+ const TargetInstrInfo &TII;
+ MCContext &Ctx;
+
+public:
+ MBlazeMCCodeEmitter(TargetMachine &tm, MCContext &ctx)
+ : TM(tm), TII(*TM.getInstrInfo()), Ctx(ctx) {
+ }
+
+ ~MBlazeMCCodeEmitter() {}
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ unsigned getBinaryCodeForInstr(const MCInst &MI) const;
+
+ /// getMachineOpValue - Return binary encoding of operand. If the machine
+ /// operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO) const;
+ unsigned getMachineOpValue(const MCInst &MI, unsigned OpIdx) const {
+ return getMachineOpValue(MI, MI.getOperand(OpIdx));
+ }
+
+ static unsigned GetMBlazeRegNum(const MCOperand &MO) {
+ // FIXME: getMBlazeRegisterNumbering() is sufficient?
+ assert(0 && "MBlazeMCCodeEmitter::GetMBlazeRegNum() not yet implemented.");
+ return 0;
+ }
+
+ void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
+ // The MicroBlaze uses a bit reversed format so we need to reverse the
+ // order of the bits. Taken from:
+ // http://graphics.stanford.edu/~seander/bithacks.html
+ C = ((C * 0x80200802ULL) & 0x0884422110ULL) * 0x0101010101ULL >> 32;
+
+ OS << (char)C;
+ ++CurByte;
+ }
+
+ void EmitRawByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
+ OS << (char)C;
+ ++CurByte;
+ }
+
+ void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
+ raw_ostream &OS) const {
+ assert(Size <= 8 && "size too big in emit constant");
+
+ for (unsigned i = 0; i != Size; ++i) {
+ EmitByte(Val & 255, CurByte, OS);
+ Val >>= 8;
+ }
+ }
+
+ void EmitIMM(const MCOperand &imm, unsigned &CurByte, raw_ostream &OS) const;
+ void EmitIMM(const MCInst &MI, unsigned &CurByte, raw_ostream &OS) const;
+
+ void EmitImmediate(const MCInst &MI, unsigned opNo, bool pcrel,
+ unsigned &CurByte, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+};
+
+} // end anonymous namespace
+
+
+MCCodeEmitter *llvm::createMBlazeMCCodeEmitter(const Target &,
+ TargetMachine &TM,
+ MCContext &Ctx) {
+ return new MBlazeMCCodeEmitter(TM, Ctx);
+}
+
+/// getMachineOpValue - Return binary encoding of operand. If the machine
+/// operand requires relocation, record the relocation and return zero.
+unsigned MBlazeMCCodeEmitter::getMachineOpValue(const MCInst &MI,
+ const MCOperand &MO) const {
+ if (MO.isReg())
+ return MBlazeRegisterInfo::getRegisterNumbering(MO.getReg());
+ else if (MO.isImm())
+ return static_cast<unsigned>(MO.getImm());
+ else if (MO.isExpr())
+ return 0; // The relocation has already been recorded at this point.
+ else {
+#ifndef NDEBUG
+ errs() << MO;
+#endif
+ llvm_unreachable(0);
+ }
+ return 0;
+}
+
+void MBlazeMCCodeEmitter::
+EmitIMM(const MCOperand &imm, unsigned &CurByte, raw_ostream &OS) const {
+ int32_t val = (int32_t)imm.getImm();
+ if (val > 32767 || val < -32768) {
+ EmitByte(0x0D, CurByte, OS);
+ EmitByte(0x00, CurByte, OS);
+ EmitRawByte((val >> 24) & 0xFF, CurByte, OS);
+ EmitRawByte((val >> 16) & 0xFF, CurByte, OS);
+ }
+}
+
+void MBlazeMCCodeEmitter::
+EmitIMM(const MCInst &MI, unsigned &CurByte,raw_ostream &OS) const {
+ switch (MI.getOpcode()) {
+ default: break;
+
+ case MBlaze::ADDIK32:
+ case MBlaze::ORI32:
+ case MBlaze::BRLID32:
+ EmitByte(0x0D, CurByte, OS);
+ EmitByte(0x00, CurByte, OS);
+ EmitRawByte(0, CurByte, OS);
+ EmitRawByte(0, CurByte, OS);
+ }
+}
+
+void MBlazeMCCodeEmitter::
+EmitImmediate(const MCInst &MI, unsigned opNo, bool pcrel, unsigned &CurByte,
+ raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups) const {
+ assert(MI.getNumOperands()>opNo && "Not enought operands for instruction");
+
+ MCOperand oper = MI.getOperand(opNo);
+
+ if (oper.isImm()) {
+ EmitIMM(oper, CurByte, OS);
+ } else if (oper.isExpr()) {
+ MCFixupKind FixupKind;
+ switch (MI.getOpcode()) {
+ default:
+ FixupKind = pcrel ? FK_PCRel_2 : FK_Data_2;
+ Fixups.push_back(MCFixup::Create(0,oper.getExpr(),FixupKind));
+ break;
+ case MBlaze::ORI32:
+ case MBlaze::ADDIK32:
+ case MBlaze::BRLID32:
+ FixupKind = pcrel ? FK_PCRel_4 : FK_Data_4;
+ Fixups.push_back(MCFixup::Create(0,oper.getExpr(),FixupKind));
+ break;
+ }
+ }
+}
+
+
+
+void MBlazeMCCodeEmitter::
+EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ unsigned Opcode = MI.getOpcode();
+ const TargetInstrDesc &Desc = TII.get(Opcode);
+ uint64_t TSFlags = Desc.TSFlags;
+ // Keep track of the current byte being emitted.
+ unsigned CurByte = 0;
+
+ // Emit an IMM instruction if the instruction we are encoding requires it
+ EmitIMM(MI,CurByte,OS);
+
+ switch ((TSFlags & MBlazeII::FormMask)) {
+ default: break;
+ case MBlazeII::FPseudo:
+ // Pseudo instructions don't get encoded.
+ return;
+ case MBlazeII::FRRI:
+ EmitImmediate(MI, 2, false, CurByte, OS, Fixups);
+ break;
+ case MBlazeII::FRIR:
+ EmitImmediate(MI, 1, false, CurByte, OS, Fixups);
+ break;
+ case MBlazeII::FCRI:
+ EmitImmediate(MI, 1, true, CurByte, OS, Fixups);
+ break;
+ case MBlazeII::FRCI:
+ EmitImmediate(MI, 1, true, CurByte, OS, Fixups);
+ case MBlazeII::FCCI:
+ EmitImmediate(MI, 0, true, CurByte, OS, Fixups);
+ break;
+ }
+
+ ++MCNumEmitted; // Keep track of the # of mi's emitted
+ unsigned Value = getBinaryCodeForInstr(MI);
+ EmitConstant(Value, 4, CurByte, OS);
+}
+
+// FIXME: These #defines shouldn't be necessary. Instead, tblgen should
+// be able to generate code emitter helpers for either variant, like it
+// does for the AsmWriter.
+#define MBlazeCodeEmitter MBlazeMCCodeEmitter
+#define MachineInstr MCInst
+#include "MBlazeGenCodeEmitter.inc"
+#undef MBlazeCodeEmitter
+#undef MachineInstr
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.cpp
new file mode 100644
index 0000000..a7e400b
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.cpp
@@ -0,0 +1,166 @@
+//===-- MBLazeMCInstLower.cpp - Convert MBlaze MachineInstr to an MCInst---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower MBlaze MachineInstrs to their corresponding
+// MCInst records.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MBlazeMCInstLower.h"
+#include "MBlazeInstrInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/SmallString.h"
+using namespace llvm;
+
+MCSymbol *MBlazeMCInstLower::
+GetGlobalAddressSymbol(const MachineOperand &MO) const {
+ switch (MO.getTargetFlags()) {
+ default: llvm_unreachable("Unknown target flag on GV operand");
+ case 0: break;
+ }
+
+ return Printer.Mang->getSymbol(MO.getGlobal());
+}
+
+MCSymbol *MBlazeMCInstLower::
+GetExternalSymbolSymbol(const MachineOperand &MO) const {
+ switch (MO.getTargetFlags()) {
+ default: llvm_unreachable("Unknown target flag on GV operand");
+ case 0: break;
+ }
+
+ return Printer.GetExternalSymbolSymbol(MO.getSymbolName());
+}
+
+MCSymbol *MBlazeMCInstLower::
+GetJumpTableSymbol(const MachineOperand &MO) const {
+ SmallString<256> Name;
+ raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "JTI"
+ << Printer.getFunctionNumber() << '_'
+ << MO.getIndex();
+ switch (MO.getTargetFlags()) {
+ default: llvm_unreachable("Unknown target flag on GV operand");
+ case 0: break;
+ }
+
+ // Create a symbol for the name.
+ return Ctx.GetOrCreateSymbol(Name.str());
+}
+
+MCSymbol *MBlazeMCInstLower::
+GetConstantPoolIndexSymbol(const MachineOperand &MO) const {
+ SmallString<256> Name;
+ raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "CPI"
+ << Printer.getFunctionNumber() << '_'
+ << MO.getIndex();
+
+ switch (MO.getTargetFlags()) {
+ default:
+ llvm_unreachable("Unknown target flag on GV operand");
+
+ case 0: break;
+ }
+
+ // Create a symbol for the name.
+ return Ctx.GetOrCreateSymbol(Name.str());
+}
+
+MCSymbol *MBlazeMCInstLower::
+GetBlockAddressSymbol(const MachineOperand &MO) const {
+ switch (MO.getTargetFlags()) {
+ default:
+ assert(0 && "Unknown target flag on GV operand");
+
+ case 0: break;
+ }
+
+ return Printer.GetBlockAddressSymbol(MO.getBlockAddress());
+}
+
+MCOperand MBlazeMCInstLower::
+LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const {
+ // FIXME: We would like an efficient form for this, so we don't have to do a
+ // lot of extra uniquing.
+ const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, Ctx);
+
+ switch (MO.getTargetFlags()) {
+ default:
+ llvm_unreachable("Unknown target flag on GV operand");
+
+ case 0: break;
+ }
+
+ if (!MO.isJTI() && MO.getOffset())
+ Expr = MCBinaryExpr::CreateAdd(Expr,
+ MCConstantExpr::Create(MO.getOffset(), Ctx),
+ Ctx);
+ return MCOperand::CreateExpr(Expr);
+}
+
+void MBlazeMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
+ OutMI.setOpcode(MI->getOpcode());
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+
+ MCOperand MCOp;
+ switch (MO.getType()) {
+ default: llvm_unreachable("unknown operand type");
+ case MachineOperand::MO_Register:
+ // Ignore all implicit register operands.
+ if (MO.isImplicit()) continue;
+ MCOp = MCOperand::CreateReg(MO.getReg());
+ break;
+ case MachineOperand::MO_Immediate:
+ MCOp = MCOperand::CreateImm(MO.getImm());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
+ MO.getMBB()->getSymbol(), Ctx));
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
+ break;
+ case MachineOperand::MO_JumpTableIndex:
+ MCOp = LowerSymbolOperand(MO, GetJumpTableSymbol(MO));
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ MCOp = LowerSymbolOperand(MO, GetConstantPoolIndexSymbol(MO));
+ break;
+ case MachineOperand::MO_BlockAddress:
+ MCOp = LowerSymbolOperand(MO, GetBlockAddressSymbol(MO));
+ break;
+ case MachineOperand::MO_FPImmediate:
+ bool ignored;
+ APFloat FVal = MO.getFPImm()->getValueAPF();
+ FVal.convert(APFloat::IEEEsingle, APFloat::rmTowardZero, &ignored);
+
+ APInt IVal = FVal.bitcastToAPInt();
+ uint64_t Val = *IVal.getRawData();
+ MCOp = MCOperand::CreateImm(Val);
+ break;
+ }
+
+ OutMI.addOperand(MCOp);
+ }
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMMCInstLower.h b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h
index b81a306..92196f2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMCInstLower.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h
@@ -1,4 +1,4 @@
-//===-- ARMMCInstLower.h - Lower MachineInstr to MCInst -------------------===//
+//===-- MBlazeMCInstLower.h - Lower MachineInstr to MCInst ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef ARM_MCINSTLOWER_H
-#define ARM_MCINSTLOWER_H
+#ifndef MBLAZE_MCINSTLOWER_H
+#define MBLAZE_MCINSTLOWER_H
#include "llvm/Support/Compiler.h"
@@ -23,32 +23,26 @@ namespace llvm {
class MachineModuleInfoMachO;
class MachineOperand;
class Mangler;
- //class ARMSubtarget;
-
-/// ARMMCInstLower - This class is used to lower an MachineInstr into an MCInst.
-class LLVM_LIBRARY_VISIBILITY ARMMCInstLower {
+
+ /// MBlazeMCInstLower - This class is used to lower an MachineInstr
+ /// into an MCInst.
+class LLVM_LIBRARY_VISIBILITY MBlazeMCInstLower {
MCContext &Ctx;
Mangler &Mang;
- AsmPrinter &Printer;
- //const ARMSubtarget &getSubtarget() const;
+ AsmPrinter &Printer;
public:
- ARMMCInstLower(MCContext &ctx, Mangler &mang, AsmPrinter &printer)
+ MBlazeMCInstLower(MCContext &ctx, Mangler &mang, AsmPrinter &printer)
: Ctx(ctx), Mang(mang), Printer(printer) {}
-
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
- //MCSymbol *GetPICBaseSymbol() const;
+ MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
+
MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const;
MCSymbol *GetJumpTableSymbol(const MachineOperand &MO) const;
MCSymbol *GetConstantPoolIndexSymbol(const MachineOperand &MO) const;
- MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
-
-/*
-private:
- MachineModuleInfoMachO &getMachOMMI() const;
- */
+ MCSymbol *GetBlockAddressSymbol(const MachineOperand &MO) const;
};
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h b/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h
index 1f956c1..df39509 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h
@@ -14,6 +14,7 @@
#ifndef MBLAZE_MACHINE_FUNCTION_INFO_H
#define MBLAZE_MACHINE_FUNCTION_INFO_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -26,20 +27,14 @@ namespace llvm {
class MBlazeFunctionInfo : public MachineFunctionInfo {
private:
- /// Holds for each function where on the stack the Frame Pointer must be
+ /// Holds for each function where on the stack the Frame Pointer must be
/// saved. This is used on Prologue and Epilogue to emit FP save/restore
int FPStackOffset;
- /// Holds for each function where on the stack the Return Address must be
+ /// Holds for each function where on the stack the Return Address must be
/// saved. This is used on Prologue and Epilogue to emit RA save/restore
int RAStackOffset;
- /// At each function entry a special bitmask directive must be emitted
- /// to help in debugging CPU callee saved registers. It needs a negative
- /// offset from the final stack size and its higher register location on
- /// the stack.
- int CPUTopSavedRegOff;
-
/// MBlazeFIHolder - Holds a FrameIndex and it's Stack Pointer Offset
struct MBlazeFIHolder {
@@ -50,25 +45,30 @@ private:
: FI(FrameIndex), SPOffset(StackPointerOffset) {}
};
- /// When PIC is used the GP must be saved on the stack on the function
- /// prologue and must be reloaded from this stack location after every
- /// call. A reference to its stack location and frame index must be kept
+ /// When PIC is used the GP must be saved on the stack on the function
+ /// prologue and must be reloaded from this stack location after every
+ /// call. A reference to its stack location and frame index must be kept
/// to be used on emitPrologue and processFunctionBeforeFrameFinalized.
MBlazeFIHolder GPHolder;
/// On LowerFormalArguments the stack size is unknown, so the Stack
- /// Pointer Offset calculation of "not in register arguments" must be
- /// postponed to emitPrologue.
+ /// Pointer Offset calculation of "not in register arguments" must be
+ /// postponed to emitPrologue.
SmallVector<MBlazeFIHolder, 16> FnLoadArgs;
bool HasLoadArgs;
- // When VarArgs, we must write registers back to caller stack, preserving
- // on register arguments. Since the stack size is unknown on
+ // When VarArgs, we must write registers back to caller stack, preserving
+ // on register arguments. Since the stack size is unknown on
// LowerFormalArguments, the Stack Pointer Offset calculation must be
- // postponed to emitPrologue.
+ // postponed to emitPrologue.
SmallVector<MBlazeFIHolder, 4> FnStoreVarArgs;
bool HasStoreVarArgs;
+ // When determining the final stack layout some of the frame indexes may
+ // be replaced by new frame indexes that reside in the caller's stack
+ // frame. The replacements are recorded in this structure.
+ DenseMap<int,int> FIReplacements;
+
/// SRetReturnReg - Some subtargets require that sret lowering includes
/// returning the value of the returned struct in a register. This field
/// holds the virtual register into which the sret argument is passed.
@@ -82,11 +82,15 @@ private:
// VarArgsFrameIndex - FrameIndex for start of varargs area.
int VarArgsFrameIndex;
+ /// LiveInFI - keeps track of the frame indexes in a callers stack
+ /// frame that are live into a function.
+ SmallVector<int, 16> LiveInFI;
+
public:
- MBlazeFunctionInfo(MachineFunction& MF)
- : FPStackOffset(0), RAStackOffset(0), CPUTopSavedRegOff(0),
- GPHolder(-1,-1), HasLoadArgs(false), HasStoreVarArgs(false),
- SRetReturnReg(0), GlobalBaseReg(0), VarArgsFrameIndex(0)
+ MBlazeFunctionInfo(MachineFunction& MF)
+ : FPStackOffset(0), RAStackOffset(0), GPHolder(-1,-1), HasLoadArgs(false),
+ HasStoreVarArgs(false), SRetReturnReg(0), GlobalBaseReg(0),
+ VarArgsFrameIndex(0), LiveInFI()
{}
int getFPStackOffset() const { return FPStackOffset; }
@@ -95,9 +99,6 @@ public:
int getRAStackOffset() const { return RAStackOffset; }
void setRAStackOffset(int Off) { RAStackOffset = Off; }
- int getCPUTopSavedRegOff() const { return CPUTopSavedRegOff; }
- void setCPUTopSavedRegOff(int Off) { CPUTopSavedRegOff = Off; }
-
int getGPStackOffset() const { return GPHolder.SPOffset; }
int getGPFI() const { return GPHolder.FI; }
void setGPStackOffset(int Off) { GPHolder.SPOffset = Off; }
@@ -105,12 +106,38 @@ public:
bool needGPSaveRestore() const { return GPHolder.SPOffset != -1; }
bool hasLoadArgs() const { return HasLoadArgs; }
- bool hasStoreVarArgs() const { return HasStoreVarArgs; }
+ bool hasStoreVarArgs() const { return HasStoreVarArgs; }
+
+ void recordLiveIn(int FI) {
+ LiveInFI.push_back(FI);
+ }
+
+ bool isLiveIn(int FI) {
+ for (unsigned i = 0, e = LiveInFI.size(); i < e; ++i)
+ if (FI == LiveInFI[i]) return true;
+
+ return false;
+ }
+
+ const SmallVector<int, 16>& getLiveIn() const { return LiveInFI; }
+
+ void recordReplacement(int OFI, int NFI) {
+ FIReplacements.insert(std::make_pair(OFI,NFI));
+ }
+
+ bool hasReplacement(int OFI) const {
+ return FIReplacements.find(OFI) != FIReplacements.end();
+ }
+
+ int getReplacement(int OFI) const {
+ return FIReplacements.lookup(OFI);
+ }
void recordLoadArgsFI(int FI, int SPOffset) {
if (!HasLoadArgs) HasLoadArgs=true;
FnLoadArgs.push_back(MBlazeFIHolder(FI, SPOffset));
}
+
void recordStoreVarArgsFI(int FI, int SPOffset) {
if (!HasStoreVarArgs) HasStoreVarArgs=true;
FnStoreVarArgs.push_back(MBlazeFIHolder(FI, SPOffset));
@@ -118,13 +145,14 @@ public:
void adjustLoadArgsFI(MachineFrameInfo *MFI) const {
if (!hasLoadArgs()) return;
- for (unsigned i = 0, e = FnLoadArgs.size(); i != e; ++i)
- MFI->setObjectOffset( FnLoadArgs[i].FI, FnLoadArgs[i].SPOffset );
+ for (unsigned i = 0, e = FnLoadArgs.size(); i != e; ++i)
+ MFI->setObjectOffset(FnLoadArgs[i].FI, FnLoadArgs[i].SPOffset);
}
+
void adjustStoreVarArgsFI(MachineFrameInfo *MFI) const {
- if (!hasStoreVarArgs()) return;
- for (unsigned i = 0, e = FnStoreVarArgs.size(); i != e; ++i)
- MFI->setObjectOffset( FnStoreVarArgs[i].FI, FnStoreVarArgs[i].SPOffset );
+ if (!hasStoreVarArgs()) return;
+ for (unsigned i = 0, e = FnStoreVarArgs.size(); i != e; ++i)
+ MFI->setObjectOffset(FnStoreVarArgs[i].FI, FnStoreVarArgs[i].SPOffset);
}
unsigned getSRetReturnReg() const { return SRetReturnReg; }
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
index 22b6a30..fa9140d 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mblaze-reg-info"
+#define DEBUG_TYPE "mblaze-frame-info"
#include "MBlaze.h"
#include "MBlazeSubtarget.h"
@@ -26,7 +26,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -48,38 +48,62 @@ MBlazeRegisterInfo(const MBlazeSubtarget &ST, const TargetInstrInfo &tii)
/// MBlaze::R0, return the number that it corresponds to (e.g. 0).
unsigned MBlazeRegisterInfo::getRegisterNumbering(unsigned RegEnum) {
switch (RegEnum) {
- case MBlaze::R0 : case MBlaze::F0 : return 0;
- case MBlaze::R1 : case MBlaze::F1 : return 1;
- case MBlaze::R2 : case MBlaze::F2 : return 2;
- case MBlaze::R3 : case MBlaze::F3 : return 3;
- case MBlaze::R4 : case MBlaze::F4 : return 4;
- case MBlaze::R5 : case MBlaze::F5 : return 5;
- case MBlaze::R6 : case MBlaze::F6 : return 6;
- case MBlaze::R7 : case MBlaze::F7 : return 7;
- case MBlaze::R8 : case MBlaze::F8 : return 8;
- case MBlaze::R9 : case MBlaze::F9 : return 9;
- case MBlaze::R10 : case MBlaze::F10 : return 10;
- case MBlaze::R11 : case MBlaze::F11 : return 11;
- case MBlaze::R12 : case MBlaze::F12 : return 12;
- case MBlaze::R13 : case MBlaze::F13 : return 13;
- case MBlaze::R14 : case MBlaze::F14 : return 14;
- case MBlaze::R15 : case MBlaze::F15 : return 15;
- case MBlaze::R16 : case MBlaze::F16 : return 16;
- case MBlaze::R17 : case MBlaze::F17 : return 17;
- case MBlaze::R18 : case MBlaze::F18 : return 18;
- case MBlaze::R19 : case MBlaze::F19 : return 19;
- case MBlaze::R20 : case MBlaze::F20 : return 20;
- case MBlaze::R21 : case MBlaze::F21 : return 21;
- case MBlaze::R22 : case MBlaze::F22 : return 22;
- case MBlaze::R23 : case MBlaze::F23 : return 23;
- case MBlaze::R24 : case MBlaze::F24 : return 24;
- case MBlaze::R25 : case MBlaze::F25 : return 25;
- case MBlaze::R26 : case MBlaze::F26 : return 26;
- case MBlaze::R27 : case MBlaze::F27 : return 27;
- case MBlaze::R28 : case MBlaze::F28 : return 28;
- case MBlaze::R29 : case MBlaze::F29 : return 29;
- case MBlaze::R30 : case MBlaze::F30 : return 30;
- case MBlaze::R31 : case MBlaze::F31 : return 31;
+ case MBlaze::R0 : return 0;
+ case MBlaze::R1 : return 1;
+ case MBlaze::R2 : return 2;
+ case MBlaze::R3 : return 3;
+ case MBlaze::R4 : return 4;
+ case MBlaze::R5 : return 5;
+ case MBlaze::R6 : return 6;
+ case MBlaze::R7 : return 7;
+ case MBlaze::R8 : return 8;
+ case MBlaze::R9 : return 9;
+ case MBlaze::R10 : return 10;
+ case MBlaze::R11 : return 11;
+ case MBlaze::R12 : return 12;
+ case MBlaze::R13 : return 13;
+ case MBlaze::R14 : return 14;
+ case MBlaze::R15 : return 15;
+ case MBlaze::R16 : return 16;
+ case MBlaze::R17 : return 17;
+ case MBlaze::R18 : return 18;
+ case MBlaze::R19 : return 19;
+ case MBlaze::R20 : return 20;
+ case MBlaze::R21 : return 21;
+ case MBlaze::R22 : return 22;
+ case MBlaze::R23 : return 23;
+ case MBlaze::R24 : return 24;
+ case MBlaze::R25 : return 25;
+ case MBlaze::R26 : return 26;
+ case MBlaze::R27 : return 27;
+ case MBlaze::R28 : return 28;
+ case MBlaze::R29 : return 29;
+ case MBlaze::R30 : return 30;
+ case MBlaze::R31 : return 31;
+ case MBlaze::RPC : return 0x0000;
+ case MBlaze::RMSR : return 0x0001;
+ case MBlaze::REAR : return 0x0003;
+ case MBlaze::RESR : return 0x0005;
+ case MBlaze::RFSR : return 0x0007;
+ case MBlaze::RBTR : return 0x000B;
+ case MBlaze::REDR : return 0x000D;
+ case MBlaze::RPID : return 0x1000;
+ case MBlaze::RZPR : return 0x1001;
+ case MBlaze::RTLBX : return 0x1002;
+ case MBlaze::RTLBLO : return 0x1003;
+ case MBlaze::RTLBHI : return 0x1004;
+ case MBlaze::RPVR0 : return 0x2000;
+ case MBlaze::RPVR1 : return 0x2001;
+ case MBlaze::RPVR2 : return 0x2002;
+ case MBlaze::RPVR3 : return 0x2003;
+ case MBlaze::RPVR4 : return 0x2004;
+ case MBlaze::RPVR5 : return 0x2005;
+ case MBlaze::RPVR6 : return 0x2006;
+ case MBlaze::RPVR7 : return 0x2007;
+ case MBlaze::RPVR8 : return 0x2008;
+ case MBlaze::RPVR9 : return 0x2009;
+ case MBlaze::RPVR10 : return 0x200A;
+ case MBlaze::RPVR11 : return 0x200B;
default: llvm_unreachable("Unknown register number!");
}
return 0; // Not reached
@@ -126,6 +150,37 @@ unsigned MBlazeRegisterInfo::getRegisterFromNumbering(unsigned Reg) {
return 0; // Not reached
}
+unsigned MBlazeRegisterInfo::getSpecialRegisterFromNumbering(unsigned Reg) {
+ switch (Reg) {
+ case 0x0000 : return MBlaze::RPC;
+ case 0x0001 : return MBlaze::RMSR;
+ case 0x0003 : return MBlaze::REAR;
+ case 0x0005 : return MBlaze::RESR;
+ case 0x0007 : return MBlaze::RFSR;
+ case 0x000B : return MBlaze::RBTR;
+ case 0x000D : return MBlaze::REDR;
+ case 0x1000 : return MBlaze::RPID;
+ case 0x1001 : return MBlaze::RZPR;
+ case 0x1002 : return MBlaze::RTLBX;
+ case 0x1003 : return MBlaze::RTLBLO;
+ case 0x1004 : return MBlaze::RTLBHI;
+ case 0x2000 : return MBlaze::RPVR0;
+ case 0x2001 : return MBlaze::RPVR1;
+ case 0x2002 : return MBlaze::RPVR2;
+ case 0x2003 : return MBlaze::RPVR3;
+ case 0x2004 : return MBlaze::RPVR4;
+ case 0x2005 : return MBlaze::RPVR5;
+ case 0x2006 : return MBlaze::RPVR6;
+ case 0x2007 : return MBlaze::RPVR7;
+ case 0x2008 : return MBlaze::RPVR8;
+ case 0x2009 : return MBlaze::RPVR9;
+ case 0x200A : return MBlaze::RPVR10;
+ case 0x200B : return MBlaze::RPVR11;
+ default: llvm_unreachable("Unknown register number!");
+ }
+ return 0; // Not reached
+}
+
unsigned MBlazeRegisterInfo::getPICCallReg() {
return MBlaze::R20;
}
@@ -164,77 +219,40 @@ getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
-//===----------------------------------------------------------------------===//
-//
-// Stack Frame Processing methods
-// +----------------------------+
-//
-// The stack is allocated decrementing the stack pointer on
-// the first instruction of a function prologue. Once decremented,
-// all stack references are are done through a positive offset
-// from the stack/frame pointer, so the stack is considered
-// to grow up.
-//
-//===----------------------------------------------------------------------===//
-
-void MBlazeRegisterInfo::adjustMBlazeStackFrame(MachineFunction &MF) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
-
- // See the description at MicroBlazeMachineFunction.h
- int TopCPUSavedRegOff = -1;
-
- // Adjust CPU Callee Saved Registers Area. Registers RA and FP must
- // be saved in this CPU Area there is the need. This whole Area must
- // be aligned to the default Stack Alignment requirements.
- unsigned StackOffset = MFI->getStackSize();
- unsigned RegSize = 4;
-
- // Replace the dummy '0' SPOffset by the negative offsets, as explained on
- // LowerFORMAL_ARGUMENTS. Leaving '0' for while is necessary to avoid
- // the approach done by calculateFrameObjectOffsets to the stack frame.
- MBlazeFI->adjustLoadArgsFI(MFI);
- MBlazeFI->adjustStoreVarArgsFI(MFI);
-
- if (hasFP(MF)) {
- MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true),
- StackOffset);
- MBlazeFI->setFPStackOffset(StackOffset);
- TopCPUSavedRegOff = StackOffset;
- StackOffset += RegSize;
- }
-
- if (MFI->adjustsStack()) {
- MBlazeFI->setRAStackOffset(0);
- MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true),
- StackOffset);
- TopCPUSavedRegOff = StackOffset;
- StackOffset += RegSize;
- }
-
- // Update frame info
- MFI->setStackSize(StackOffset);
-
- // Recalculate the final tops offset. The final values must be '0'
- // if there isn't a callee saved register for CPU or FPU, otherwise
- // a negative offset is needed.
- if (TopCPUSavedRegOff >= 0)
- MBlazeFI->setCPUTopSavedRegOff(TopCPUSavedRegOff-StackOffset);
-}
-
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-bool MBlazeRegisterInfo::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects();
-}
-
-// This function eliminate ADJCALLSTACKDOWN,
-// ADJCALLSTACKUP pseudo instructions
+// This function eliminate ADJCALLSTACKDOWN/ADJCALLSTACKUP pseudo instructions
void MBlazeRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (!TFI->hasReservedCallFrame(MF)) {
+ // If we have a frame pointer, turn the adjcallstackup instruction into a
+ // 'addi r1, r1, -<amt>' and the adjcallstackdown instruction into
+ // 'addi r1, r1, <amt>'
+ MachineInstr *Old = I;
+ int Amount = Old->getOperand(0).getImm() + 4;
+ if (Amount != 0) {
+ // We need to keep the stack aligned properly. To do this, we round the
+ // amount of space needed for the outgoing arguments up to the next
+ // alignment boundary.
+ unsigned Align = TFI->getStackAlignment();
+ Amount = (Amount+Align-1)/Align*Align;
+
+ MachineInstr *New;
+ if (Old->getOpcode() == MBlaze::ADJCALLSTACKDOWN) {
+ New = BuildMI(MF,Old->getDebugLoc(),TII.get(MBlaze::ADDIK),MBlaze::R1)
+ .addReg(MBlaze::R1).addImm(-Amount);
+ } else {
+ assert(Old->getOpcode() == MBlaze::ADJCALLSTACKUP);
+ New = BuildMI(MF,Old->getDebugLoc(),TII.get(MBlaze::ADDIK),MBlaze::R1)
+ .addReg(MBlaze::R1).addImm(Amount);
+ }
+
+ // Replace the pseudo instruction with a new instruction...
+ MBB.insert(I, New);
+ }
+ }
+
// Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
MBB.erase(I);
}
@@ -247,6 +265,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
RegScavenger *RS) const {
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned i = 0;
while (!MI.getOperand(i).isFI()) {
@@ -257,117 +276,34 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
unsigned oi = i == 2 ? 1 : 2;
- DEBUG(errs() << "\nFunction : " << MF.getFunction()->getName() << "\n";
- errs() << "<--------->\n" << MI);
+ DEBUG(dbgs() << "\nFunction : " << MF.getFunction()->getName() << "\n";
+ dbgs() << "<--------->\n" << MI);
int FrameIndex = MI.getOperand(i).getIndex();
- int stackSize = MF.getFrameInfo()->getStackSize();
- int spOffset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
+ int stackSize = MFI->getStackSize();
+ int spOffset = MFI->getObjectOffset(FrameIndex);
- DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n"
+ DEBUG(MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
+ dbgs() << "FrameIndex : " << FrameIndex << "\n"
<< "spOffset : " << spOffset << "\n"
- << "stackSize : " << stackSize << "\n");
+ << "stackSize : " << stackSize << "\n"
+ << "isFixed : " << MFI->isFixedObjectIndex(FrameIndex) << "\n"
+ << "isLiveIn : " << MBlazeFI->isLiveIn(FrameIndex) << "\n"
+ << "isSpill : " << MFI->isSpillSlotObjectIndex(FrameIndex)
+ << "\n" );
// as explained on LowerFormalArguments, detect negative offsets
// and adjust SPOffsets considering the final stack size.
- int Offset = (spOffset < 0) ? (stackSize - spOffset) : (spOffset + 4);
- Offset += MI.getOperand(oi).getImm();
+ int Offset = (spOffset < 0) ? (stackSize - spOffset) : spOffset;
+ Offset += MI.getOperand(oi).getImm();
- DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
+ DEBUG(dbgs() << "Offset : " << Offset << "\n" << "<--------->\n");
MI.getOperand(oi).ChangeToImmediate(Offset);
MI.getOperand(i).ChangeToRegister(getFrameRegister(MF), false);
}
void MBlazeRegisterInfo::
-emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- // Get the right frame order for MBlaze.
- adjustMBlazeStackFrame(MF);
-
- // Get the number of bytes to allocate from the FrameInfo.
- unsigned StackSize = MFI->getStackSize();
-
- // No need to allocate space on the stack.
- if (StackSize == 0 && !MFI->adjustsStack()) return;
- if (StackSize < 28 && MFI->adjustsStack()) StackSize = 28;
-
- int FPOffset = MBlazeFI->getFPStackOffset();
- int RAOffset = MBlazeFI->getRAStackOffset();
-
- // Adjust stack : addi R1, R1, -imm
- BuildMI(MBB, MBBI, DL, TII.get(MBlaze::ADDI), MBlaze::R1)
- .addReg(MBlaze::R1).addImm(-StackSize);
-
- // Save the return address only if the function isnt a leaf one.
- // swi R15, R1, stack_loc
- if (MFI->adjustsStack()) {
- BuildMI(MBB, MBBI, DL, TII.get(MBlaze::SWI))
- .addReg(MBlaze::R15).addImm(RAOffset).addReg(MBlaze::R1);
- }
-
- // if framepointer enabled, save it and set it
- // to point to the stack pointer
- if (hasFP(MF)) {
- // swi R19, R1, stack_loc
- BuildMI(MBB, MBBI, DL, TII.get(MBlaze::SWI))
- .addReg(MBlaze::R19).addImm(FPOffset).addReg(MBlaze::R1);
-
- // add R19, R1, R0
- BuildMI(MBB, MBBI, DL, TII.get(MBlaze::ADD), MBlaze::R19)
- .addReg(MBlaze::R1).addReg(MBlaze::R0);
- }
-}
-
-void MBlazeRegisterInfo::
-emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- DebugLoc dl = MBBI->getDebugLoc();
-
- // Get the FI's where RA and FP are saved.
- int FPOffset = MBlazeFI->getFPStackOffset();
- int RAOffset = MBlazeFI->getRAStackOffset();
-
- // if framepointer enabled, restore it and restore the
- // stack pointer
- if (hasFP(MF)) {
- // add R1, R19, R0
- BuildMI(MBB, MBBI, dl, TII.get(MBlaze::ADD), MBlaze::R1)
- .addReg(MBlaze::R19).addReg(MBlaze::R0);
-
- // lwi R19, R1, stack_loc
- BuildMI(MBB, MBBI, dl, TII.get(MBlaze::LWI), MBlaze::R19)
- .addImm(FPOffset).addReg(MBlaze::R1);
- }
-
- // Restore the return address only if the function isnt a leaf one.
- // lwi R15, R1, stack_loc
- if (MFI->adjustsStack()) {
- BuildMI(MBB, MBBI, dl, TII.get(MBlaze::LWI), MBlaze::R15)
- .addImm(RAOffset).addReg(MBlaze::R1);
- }
-
- // Get the number of bytes from FrameInfo
- int StackSize = (int) MFI->getStackSize();
- if (StackSize < 28 && MFI->adjustsStack()) StackSize = 28;
-
- // adjust stack.
- // addi R1, R1, imm
- if (StackSize) {
- BuildMI(MBB, MBBI, dl, TII.get(MBlaze::ADDI), MBlaze::R1)
- .addReg(MBlaze::R1).addImm(StackSize);
- }
-}
-
-
-void MBlazeRegisterInfo::
processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
// Set the stack offset where GP must be saved/loaded from.
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -381,7 +317,9 @@ unsigned MBlazeRegisterInfo::getRARegister() const {
}
unsigned MBlazeRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- return hasFP(MF) ? MBlaze::R19 : MBlaze::R1;
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ return TFI->hasFP(MF) ? MBlaze::R19 : MBlaze::R1;
}
unsigned MBlazeRegisterInfo::getEHExceptionRegister() const {
@@ -394,9 +332,8 @@ unsigned MBlazeRegisterInfo::getEHHandlerRegister() const {
return 0;
}
-int MBlazeRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
- llvm_unreachable("What is the dwarf register number");
- return -1;
+int MBlazeRegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const {
+ return MBlazeGenRegisterInfo::getDwarfRegNumFull(RegNo,0);
}
#include "MBlazeGenRegisterInfo.inc"
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h
index 1e1fde1..839536d 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h
@@ -25,8 +25,8 @@ class TargetInstrInfo;
class Type;
namespace MBlaze {
- /// SubregIndex - The index of various sized subregister classes. Note that
- /// these indices must be kept in sync with the class indices in the
+ /// SubregIndex - The index of various sized subregister classes. Note that
+ /// these indices must be kept in sync with the class indices in the
/// MBlazeRegisterInfo.td file.
enum SubregIndex {
SUBREG_FPEVEN = 1, SUBREG_FPODD = 2
@@ -36,7 +36,7 @@ namespace MBlaze {
struct MBlazeRegisterInfo : public MBlazeGenRegisterInfo {
const MBlazeSubtarget &Subtarget;
const TargetInstrInfo &TII;
-
+
MBlazeRegisterInfo(const MBlazeSubtarget &Subtarget,
const TargetInstrInfo &tii);
@@ -44,20 +44,16 @@ struct MBlazeRegisterInfo : public MBlazeGenRegisterInfo {
/// MBlaze::RA, return the number that it corresponds to (e.g. 31).
static unsigned getRegisterNumbering(unsigned RegEnum);
static unsigned getRegisterFromNumbering(unsigned RegEnum);
+ static unsigned getSpecialRegisterFromNumbering(unsigned RegEnum);
/// Get PIC indirect call register
static unsigned getPICCallReg();
- /// Adjust the MBlaze stack frame.
- void adjustMBlazeStackFrame(MachineFunction &MF) const;
-
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const;
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -68,9 +64,6 @@ struct MBlazeRegisterInfo : public MBlazeGenRegisterInfo {
void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
/// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
@@ -79,11 +72,6 @@ struct MBlazeRegisterInfo : public MBlazeGenRegisterInfo {
unsigned getEHExceptionRegister() const;
unsigned getEHHandlerRegister() const;
- /// targetHandlesStackFrameRounding - Returns true if the target is
- /// responsible for rounding up the stack frame (probably at emitPrologue
- /// time).
- bool targetHandlesStackFrameRounding() const { return true; }
-
int getDwarfRegNum(unsigned RegNum, bool isEH) const;
};
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td
index 5e93510..fbefb22 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td
@@ -17,15 +17,20 @@ class MBlazeReg<string n> : Register<n> {
let Namespace = "MBlaze";
}
-// MBlaze CPU Registers
+// Special purpose registers have 15-bit values
+class MBlazeSReg<string n> : Register<n> {
+ field bits<15> Num;
+ let Namespace = "MBlaze";
+}
+
+// MBlaze general purpose registers
class MBlazeGPRReg<bits<5> num, string n> : MBlazeReg<n> {
let Num = num;
}
-// MBlaze 32-bit (aliased) FPU Registers
-class FPR<bits<5> num, string n, list<Register> aliases> : MBlazeReg<n> {
+// MBlaze special purpose registers
+class MBlazeSPRReg<bits<15> num, string n> : MBlazeSReg<n> {
let Num = num;
- let Aliases = aliases;
}
//===----------------------------------------------------------------------===//
@@ -33,7 +38,6 @@ class FPR<bits<5> num, string n, list<Register> aliases> : MBlazeReg<n> {
//===----------------------------------------------------------------------===//
let Namespace = "MBlaze" in {
-
// General Purpose Registers
def R0 : MBlazeGPRReg< 0, "r0">, DwarfRegNum<[0]>;
def R1 : MBlazeGPRReg< 1, "r1">, DwarfRegNum<[1]>;
@@ -68,46 +72,43 @@ let Namespace = "MBlaze" in {
def R30 : MBlazeGPRReg< 30, "r30">, DwarfRegNum<[30]>;
def R31 : MBlazeGPRReg< 31, "r31">, DwarfRegNum<[31]>;
- /// MBlaze Single point precision FPU Registers
- def F0 : FPR< 0, "r0", [R0]>, DwarfRegNum<[32]>;
- def F1 : FPR< 1, "r1", [R1]>, DwarfRegNum<[33]>;
- def F2 : FPR< 2, "r2", [R2]>, DwarfRegNum<[34]>;
- def F3 : FPR< 3, "r3", [R3]>, DwarfRegNum<[35]>;
- def F4 : FPR< 4, "r4", [R4]>, DwarfRegNum<[36]>;
- def F5 : FPR< 5, "r5", [R5]>, DwarfRegNum<[37]>;
- def F6 : FPR< 6, "r6", [R6]>, DwarfRegNum<[38]>;
- def F7 : FPR< 7, "r7", [R7]>, DwarfRegNum<[39]>;
- def F8 : FPR< 8, "r8", [R8]>, DwarfRegNum<[40]>;
- def F9 : FPR< 9, "r9", [R9]>, DwarfRegNum<[41]>;
- def F10 : FPR<10, "r10", [R10]>, DwarfRegNum<[42]>;
- def F11 : FPR<11, "r11", [R11]>, DwarfRegNum<[43]>;
- def F12 : FPR<12, "r12", [R12]>, DwarfRegNum<[44]>;
- def F13 : FPR<13, "r13", [R13]>, DwarfRegNum<[45]>;
- def F14 : FPR<14, "r14", [R14]>, DwarfRegNum<[46]>;
- def F15 : FPR<15, "r15", [R15]>, DwarfRegNum<[47]>;
- def F16 : FPR<16, "r16", [R16]>, DwarfRegNum<[48]>;
- def F17 : FPR<17, "r17", [R17]>, DwarfRegNum<[49]>;
- def F18 : FPR<18, "r18", [R18]>, DwarfRegNum<[50]>;
- def F19 : FPR<19, "r19", [R19]>, DwarfRegNum<[51]>;
- def F20 : FPR<20, "r20", [R20]>, DwarfRegNum<[52]>;
- def F21 : FPR<21, "r21", [R21]>, DwarfRegNum<[53]>;
- def F22 : FPR<22, "r22", [R22]>, DwarfRegNum<[54]>;
- def F23 : FPR<23, "r23", [R23]>, DwarfRegNum<[55]>;
- def F24 : FPR<24, "r24", [R24]>, DwarfRegNum<[56]>;
- def F25 : FPR<25, "r25", [R25]>, DwarfRegNum<[57]>;
- def F26 : FPR<26, "r26", [R26]>, DwarfRegNum<[58]>;
- def F27 : FPR<27, "r27", [R27]>, DwarfRegNum<[59]>;
- def F28 : FPR<28, "r28", [R28]>, DwarfRegNum<[60]>;
- def F29 : FPR<29, "r29", [R29]>, DwarfRegNum<[61]>;
- def F30 : FPR<30, "r30", [R30]>, DwarfRegNum<[62]>;
- def F31 : FPR<31, "r31", [R31]>, DwarfRegNum<[63]>;
+ // Special Purpose Registers
+ def RPC : MBlazeSPRReg<0x0000, "rpc">, DwarfRegNum<[32]>;
+ def RMSR : MBlazeSPRReg<0x0001, "rmsr">, DwarfRegNum<[33]>;
+ def REAR : MBlazeSPRReg<0x0003, "rear">, DwarfRegNum<[34]>;
+ def RESR : MBlazeSPRReg<0x0005, "resr">, DwarfRegNum<[35]>;
+ def RFSR : MBlazeSPRReg<0x0007, "rfsr">, DwarfRegNum<[36]>;
+ def RBTR : MBlazeSPRReg<0x000B, "rbtr">, DwarfRegNum<[37]>;
+ def REDR : MBlazeSPRReg<0x000D, "redr">, DwarfRegNum<[38]>;
+ def RPID : MBlazeSPRReg<0x1000, "rpid">, DwarfRegNum<[39]>;
+ def RZPR : MBlazeSPRReg<0x1001, "rzpr">, DwarfRegNum<[40]>;
+ def RTLBX : MBlazeSPRReg<0x1002, "rtlbx">, DwarfRegNum<[41]>;
+ def RTLBLO : MBlazeSPRReg<0x1003, "rtlblo">, DwarfRegNum<[42]>;
+ def RTLBHI : MBlazeSPRReg<0x1004, "rtlbhi">, DwarfRegNum<[43]>;
+ def RPVR0 : MBlazeSPRReg<0x2000, "rpvr0">, DwarfRegNum<[44]>;
+ def RPVR1 : MBlazeSPRReg<0x2001, "rpvr1">, DwarfRegNum<[45]>;
+ def RPVR2 : MBlazeSPRReg<0x2002, "rpvr2">, DwarfRegNum<[46]>;
+ def RPVR3 : MBlazeSPRReg<0x2003, "rpvr3">, DwarfRegNum<[47]>;
+ def RPVR4 : MBlazeSPRReg<0x2004, "rpvr4">, DwarfRegNum<[48]>;
+ def RPVR5 : MBlazeSPRReg<0x2005, "rpvr5">, DwarfRegNum<[49]>;
+ def RPVR6 : MBlazeSPRReg<0x2006, "rpvr6">, DwarfRegNum<[50]>;
+ def RPVR7 : MBlazeSPRReg<0x2007, "rpvr7">, DwarfRegNum<[51]>;
+ def RPVR8 : MBlazeSPRReg<0x2008, "rpvr8">, DwarfRegNum<[52]>;
+ def RPVR9 : MBlazeSPRReg<0x2009, "rpvr9">, DwarfRegNum<[53]>;
+ def RPVR10 : MBlazeSPRReg<0x200A, "rpvr10">, DwarfRegNum<[54]>;
+ def RPVR11 : MBlazeSPRReg<0x200B, "rpvr11">, DwarfRegNum<[55]>;
+
+ // The carry bit. In the Microblaze this is really bit 29 of the
+ // MSR register but this is the only bit of that register that we
+ // are interested in modeling.
+ def CARRY : MBlazeSPRReg<0x0000, "rmsr[c]">, DwarfRegNum<[33]>;
}
//===----------------------------------------------------------------------===//
// Register Classes
//===----------------------------------------------------------------------===//
-def CPURegs : RegisterClass<"MBlaze", [i32], 32,
+def GPR : RegisterClass<"MBlaze", [i32,f32], 32,
[
// Return Values and Arguments
R3, R4, R5, R6, R7, R8, R9, R10,
@@ -135,46 +136,55 @@ def CPURegs : RegisterClass<"MBlaze", [i32], 32,
iterator allocation_order_end(const MachineFunction &MF) const;
}];
let MethodBodies = [{
- CPURegsClass::iterator
- CPURegsClass::allocation_order_end(const MachineFunction &MF) const {
+ GPRClass::iterator
+ GPRClass::allocation_order_end(const MachineFunction &MF) const {
// The last 10 registers on the list above are reserved
return end()-10;
}
}];
}
-def FGR32 : RegisterClass<"MBlaze", [f32], 32,
+def SPR : RegisterClass<"MBlaze", [i32], 32,
[
- // Return Values and Arguments
- F3, F4, F5, F6, F7, F8, F9, F10,
-
- // Not preserved across procedure calls
- F11, F12,
-
- // Callee save
- F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30, F31,
-
// Reserved
- F0, // Always zero
- F1, // The stack pointer
- F2, // Read-only small data area anchor
- F13, // Read-write small data area anchor
- F14, // Return address for interrupts
- F15, // Return address for sub-routines
- F16, // Return address for trap
- F17, // Return address for exceptions
- F18, // Reserved for assembler
- F19 // The frame pointer
+ RPC,
+ RMSR,
+ REAR,
+ RESR,
+ RFSR,
+ RBTR,
+ REDR,
+ RPID,
+ RZPR,
+ RTLBX,
+ RTLBLO,
+ RTLBHI,
+ RPVR0,
+ RPVR1,
+ RPVR2,
+ RPVR3,
+ RPVR4,
+ RPVR5,
+ RPVR6,
+ RPVR7,
+ RPVR8,
+ RPVR9,
+ RPVR10,
+ RPVR11
]>
{
let MethodProtos = [{
iterator allocation_order_end(const MachineFunction &MF) const;
}];
let MethodBodies = [{
- FGR32Class::iterator
- FGR32Class::allocation_order_end(const MachineFunction &MF) const {
- // The last 10 registers on the list above are reserved
- return end()-10;
+ SPRClass::iterator
+ SPRClass::allocation_order_end(const MachineFunction &MF) const {
+ // None of the special purpose registers are allocatable.
+ return end()-24;
}
}];
}
+
+def CRC : RegisterClass<"MBlaze", [i32], 32, [CARRY]> {
+ let CopyCost = -1;
+}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRelocations.h b/contrib/llvm/lib/Target/MBlaze/MBlazeRelocations.h
new file mode 100644
index 0000000..c298eda
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRelocations.h
@@ -0,0 +1,47 @@
+//===- MBlazeRelocations.h - MBlaze Code Relocations ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MBlaze target-specific relocation types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MBLAZERELOCATIONS_H
+#define MBLAZERELOCATIONS_H
+
+#include "llvm/CodeGen/MachineRelocation.h"
+
+namespace llvm {
+ namespace MBlaze {
+ enum RelocationType {
+ /// reloc_pcrel_word - PC relative relocation, add the relocated value to
+ /// the value already in memory, after we adjust it for where the PC is.
+ reloc_pcrel_word = 0,
+
+ /// reloc_picrel_word - PIC base relative relocation, add the relocated
+ /// value to the value already in memory, after we adjust it for where the
+ /// PIC base is.
+ reloc_picrel_word = 1,
+
+ /// reloc_absolute_word - absolute relocation, just add the relocated
+ /// value to the value already in memory.
+ reloc_absolute_word = 2,
+
+ /// reloc_absolute_word_sext - absolute relocation, just add the relocated
+ /// value to the value already in memory. In object files, it represents a
+ /// value which must be sign-extended when resolving the relocation.
+ reloc_absolute_word_sext = 3,
+
+ /// reloc_absolute_dword - absolute relocation, just add the relocated
+ /// value to the value already in memory.
+ reloc_absolute_dword = 4
+ };
+ }
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td
index 4a65542..ac4d98c 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td
@@ -14,7 +14,7 @@ def ALU : FuncUnit;
def IMULDIV : FuncUnit;
//===----------------------------------------------------------------------===//
-// Instruction Itinerary classes used for MBlaze
+// Instruction Itinerary classes used for MBlaze
//===----------------------------------------------------------------------===//
def IIAlu : InstrItinClass;
def IILoad : InstrItinClass;
@@ -41,7 +41,7 @@ def IIPseudo : InstrItinClass;
// MBlaze Generic instruction itineraries.
//===----------------------------------------------------------------------===//
def MBlazeGenericItineraries : ProcessorItineraries<
- [ALU, IMULDIV], [
+ [ALU, IMULDIV], [], [
InstrItinData<IIAlu , [InstrStage<1, [ALU]>]>,
InstrItinData<IILoad , [InstrStage<3, [ALU]>]>,
InstrItinData<IIStore , [InstrStage<1, [ALU]>]>,
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
index 4252953..cd949e1 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
@@ -15,13 +15,62 @@
#include "MBlazeMCAsmInfo.h"
#include "MBlazeTargetMachine.h"
#include "llvm/PassManager.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
using namespace llvm;
+static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
+ Triple TheTriple(TT);
+ switch (TheTriple.getOS()) {
+ default:
+ return new MBlazeMCAsmInfo();
+ }
+}
+
+static MCStreamer *createMCStreamer(const Target &T, const std::string &TT,
+ MCContext &Ctx, TargetAsmBackend &TAB,
+ raw_ostream &_OS,
+ MCCodeEmitter *_Emitter,
+ bool RelaxAll,
+ bool NoExecStack) {
+ Triple TheTriple(TT);
+ switch (TheTriple.getOS()) {
+ case Triple::Darwin:
+ llvm_unreachable("MBlaze does not support Darwin MACH-O format");
+ return NULL;
+ case Triple::MinGW32:
+ case Triple::Cygwin:
+ case Triple::Win32:
+ llvm_unreachable("MBlaze does not support Windows COFF format");
+ return NULL;
+ default:
+ return createELFStreamer(Ctx, TAB, _OS, _Emitter, RelaxAll,
+ NoExecStack);
+ }
+}
+
+
extern "C" void LLVMInitializeMBlazeTarget() {
// Register the target.
RegisterTargetMachine<MBlazeTargetMachine> X(TheMBlazeTarget);
- RegisterAsmInfo<MBlazeMCAsmInfo> A(TheMBlazeTarget);
+
+ // Register the target asm info.
+ RegisterAsmInfoFn A(TheMBlazeTarget, createMCAsmInfo);
+
+ // Register the MC code emitter
+ TargetRegistry::RegisterCodeEmitter(TheMBlazeTarget,
+ llvm::createMBlazeMCCodeEmitter);
+
+ // Register the asm backend
+ TargetRegistry::RegisterAsmBackend(TheMBlazeTarget,
+ createMBlazeAsmBackend);
+
+ // Register the object streamer
+ TargetRegistry::RegisterObjectStreamer(TheMBlazeTarget,
+ createMCStreamer);
+
}
// DataLayout --> Big-endian, 32-bit pointer/ABI/alignment
@@ -35,11 +84,10 @@ MBlazeTargetMachine(const Target &T, const std::string &TT,
const std::string &FS):
LLVMTargetMachine(T, TT),
Subtarget(TT, FS),
- DataLayout("E-p:32:32-i8:8:8-i16:16:16-i64:32:32-"
- "f64:32:32-v64:32:32-v128:32:32-n32"),
+ DataLayout("E-p:32:32:32-i8:8:8-i16:16:16"),
InstrInfo(*this),
- FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0),
- TLInfo(*this), TSInfo(*this) {
+ FrameLowering(Subtarget),
+ TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this) {
if (getRelocationModel() == Reloc::Default) {
setRelocationModel(Reloc::Static);
}
@@ -50,8 +98,8 @@ MBlazeTargetMachine(const Target &T, const std::string &TT,
// Install an instruction selector pass using
// the ISelDag to gen MBlaze code.
-bool MBlazeTargetMachine::
-addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel) {
+bool MBlazeTargetMachine::addInstSelector(PassManagerBase &PM,
+ CodeGenOpt::Level OptLevel) {
PM.add(createMBlazeISelDag(*this));
return false;
}
@@ -59,8 +107,8 @@ addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel) {
// Implemented by targets that want to run passes immediately before
// machine code is emitted. return true if -print-machineinstrs should
// print out the code after the passes.
-bool MBlazeTargetMachine::
-addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel) {
+bool MBlazeTargetMachine::addPreEmitPass(PassManagerBase &PM,
+ CodeGenOpt::Level OptLevel) {
PM.add(createMBlazeDelaySlotFillerPass(*this));
return true;
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h
index 6a57e58..45ad078 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h
@@ -19,21 +19,25 @@
#include "MBlazeISelLowering.h"
#include "MBlazeSelectionDAGInfo.h"
#include "MBlazeIntrinsicInfo.h"
+#include "MBlazeFrameLowering.h"
+#include "MBlazeELFWriterInfo.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
class formatted_raw_ostream;
class MBlazeTargetMachine : public LLVMTargetMachine {
- MBlazeSubtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
- MBlazeInstrInfo InstrInfo;
- TargetFrameInfo FrameInfo;
- MBlazeTargetLowering TLInfo;
+ MBlazeSubtarget Subtarget;
+ const TargetData DataLayout; // Calculates type size & alignment
+ MBlazeInstrInfo InstrInfo;
+ MBlazeFrameLowering FrameLowering;
+ MBlazeTargetLowering TLInfo;
MBlazeSelectionDAGInfo TSInfo;
- MBlazeIntrinsicInfo IntrinsicInfo;
+ MBlazeIntrinsicInfo IntrinsicInfo;
+ MBlazeELFWriterInfo ELFWriterInfo;
public:
MBlazeTargetMachine(const Target &T, const std::string &TT,
const std::string &FS);
@@ -41,8 +45,8 @@ namespace llvm {
virtual const MBlazeInstrInfo *getInstrInfo() const
{ return &InstrInfo; }
- virtual const TargetFrameInfo *getFrameInfo() const
- { return &FrameInfo; }
+ virtual const TargetFrameLowering *getFrameLowering() const
+ { return &FrameLowering; }
virtual const MBlazeSubtarget *getSubtargetImpl() const
{ return &Subtarget; }
@@ -62,12 +66,13 @@ namespace llvm {
const TargetIntrinsicInfo *getIntrinsicInfo() const
{ return &IntrinsicInfo; }
- // Pass Pipeline Configuration
- virtual bool addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
+ virtual const MBlazeELFWriterInfo *getELFWriterInfo() const {
+ return &ELFWriterInfo;
+ }
- virtual bool addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
+ // Pass Pipeline Configuration
+ virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level Opt);
+ virtual bool addPreEmitPass(PassManagerBase &PM,CodeGenOpt::Level Opt);
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
index 05c01ef..abd1b0b 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
@@ -16,6 +16,7 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ELF.h"
using namespace llvm;
void MBlazeTargetObjectFile::
@@ -23,13 +24,13 @@ Initialize(MCContext &Ctx, const TargetMachine &TM) {
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
SmallDataSection =
- getContext().getELFSection(".sdata", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".sdata", ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE |ELF::SHF_ALLOC,
SectionKind::getDataRel());
SmallBSSSection =
- getContext().getELFSection(".sbss", MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE |ELF::SHF_ALLOC,
SectionKind::getBSS());
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.h b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.h
index 20e7702..c313722 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetObjectFile.h
@@ -18,10 +18,9 @@ namespace llvm {
const MCSection *SmallDataSection;
const MCSection *SmallBSSSection;
public:
-
+
void Initialize(MCContext &Ctx, const TargetMachine &TM);
-
/// IsGlobalInSmallSection - Return true if this global address should be
/// placed into small data/bss section.
bool IsGlobalInSmallSection(const GlobalValue *GV,
@@ -29,8 +28,8 @@ namespace llvm {
SectionKind Kind) const;
bool IsGlobalInSmallSection(const GlobalValue *GV,
- const TargetMachine &TM) const;
-
+ const TargetMachine &TM) const;
+
const MCSection *SelectSectionForGlobal(const GlobalValue *GV,
SectionKind Kind,
Mangler *Mang,
diff --git a/contrib/llvm/lib/Target/MSP430/InstPrinter/CMakeLists.txt b/contrib/llvm/lib/Target/MSP430/InstPrinter/CMakeLists.txt
new file mode 100644
index 0000000..f5458d5
--- /dev/null
+++ b/contrib/llvm/lib/Target/MSP430/InstPrinter/CMakeLists.txt
@@ -0,0 +1,6 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMMSP430AsmPrinter
+ MSP430InstPrinter.cpp
+ )
+add_dependencies(LLVMMSP430AsmPrinter MSP430CodeGenTable_gen)
diff --git a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430InstPrinter.cpp b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp
index c15d408..e10d4fe 100644
--- a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430InstPrinter.cpp
+++ b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp
@@ -13,7 +13,6 @@
#define DEBUG_TYPE "asm-printer"
#include "MSP430.h"
-#include "MSP430InstrInfo.h"
#include "MSP430InstPrinter.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -24,9 +23,7 @@ using namespace llvm;
// Include the auto-generated portion of the assembly writer.
-#define MachineInstr MCInst
#include "MSP430GenAsmWriter.inc"
-#undef MachineInstr
void MSP430InstPrinter::printInst(const MCInst *MI, raw_ostream &O) {
printInstruction(MI, O);
diff --git a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430InstPrinter.h b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
index f0e1ce2..f0e1ce2 100644
--- a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430InstPrinter.h
+++ b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
diff --git a/contrib/llvm/lib/Target/MSP430/InstPrinter/Makefile b/contrib/llvm/lib/Target/MSP430/InstPrinter/Makefile
new file mode 100644
index 0000000..a5293ab
--- /dev/null
+++ b/contrib/llvm/lib/Target/MSP430/InstPrinter/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Target/MSP430/AsmPrinter/Makefile ---------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMMSP430AsmPrinter
+
+# Hack: we need to include 'main' MSP430 target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430.td b/contrib/llvm/lib/Target/MSP430/MSP430.td
index 0f08e3d..5cc5e6e 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430.td
@@ -52,6 +52,7 @@ def MSP430InstrInfo : InstrInfo;
def MSP430InstPrinter : AsmWriter {
string AsmWriterClassName = "InstPrinter";
+ bit isMCAsmWriter = 1;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430AsmPrinter.cpp b/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
index 56f72bb..a1a7f44 100644
--- a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
@@ -15,10 +15,10 @@
#define DEBUG_TYPE "asm-printer"
#include "MSP430.h"
#include "MSP430InstrInfo.h"
-#include "MSP430InstPrinter.h"
#include "MSP430MCAsmInfo.h"
#include "MSP430MCInstLower.h"
#include "MSP430TargetMachine.h"
+#include "InstPrinter/MSP430InstPrinter.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
new file mode 100644
index 0000000..c99f4ab
--- /dev/null
+++ b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
@@ -0,0 +1,223 @@
+//======-- MSP430FrameLowering.cpp - MSP430 Frame Information -------=========//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the MSP430 implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MSP430FrameLowering.h"
+#include "MSP430InstrInfo.h"
+#include "MSP430MachineFunctionInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+bool MSP430FrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ return (DisableFramePointerElim(MF) ||
+ MF.getFrameInfo()->hasVarSizedObjects() ||
+ MFI->isFrameAddressTaken());
+}
+
+bool MSP430FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ return !MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+void MSP430FrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MSP430MachineFunctionInfo *MSP430FI = MF.getInfo<MSP430MachineFunctionInfo>();
+ const MSP430InstrInfo &TII =
+ *static_cast<const MSP430InstrInfo*>(MF.getTarget().getInstrInfo());
+
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ uint64_t StackSize = MFI->getStackSize();
+
+ uint64_t NumBytes = 0;
+ if (hasFP(MF)) {
+ // Calculate required stack adjustment
+ uint64_t FrameSize = StackSize - 2;
+ NumBytes = FrameSize - MSP430FI->getCalleeSavedFrameSize();
+
+ // Get the offset of the stack slot for the EBP register... which is
+ // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
+ // Update the frame offset adjustment.
+ MFI->setOffsetAdjustment(-NumBytes);
+
+ // Save FPW into the appropriate stack slot...
+ BuildMI(MBB, MBBI, DL, TII.get(MSP430::PUSH16r))
+ .addReg(MSP430::FPW, RegState::Kill);
+
+ // Update FPW with the new base value...
+ BuildMI(MBB, MBBI, DL, TII.get(MSP430::MOV16rr), MSP430::FPW)
+ .addReg(MSP430::SPW);
+
+ // Mark the FramePtr as live-in in every block except the entry.
+ for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
+ I != E; ++I)
+ I->addLiveIn(MSP430::FPW);
+
+ } else
+ NumBytes = StackSize - MSP430FI->getCalleeSavedFrameSize();
+
+ // Skip the callee-saved push instructions.
+ while (MBBI != MBB.end() && (MBBI->getOpcode() == MSP430::PUSH16r))
+ ++MBBI;
+
+ if (MBBI != MBB.end())
+ DL = MBBI->getDebugLoc();
+
+ if (NumBytes) { // adjust stack pointer: SPW -= numbytes
+ // If there is an SUB16ri of SPW immediately before this instruction, merge
+ // the two.
+ //NumBytes -= mergeSPUpdates(MBB, MBBI, true);
+ // If there is an ADD16ri or SUB16ri of SPW immediately after this
+ // instruction, merge the two instructions.
+ // mergeSPUpdatesDown(MBB, MBBI, &NumBytes);
+
+ if (NumBytes) {
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL, TII.get(MSP430::SUB16ri), MSP430::SPW)
+ .addReg(MSP430::SPW).addImm(NumBytes);
+ // The SRW implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
+ }
+}
+
+void MSP430FrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ MSP430MachineFunctionInfo *MSP430FI = MF.getInfo<MSP430MachineFunctionInfo>();
+ const MSP430InstrInfo &TII =
+ *static_cast<const MSP430InstrInfo*>(MF.getTarget().getInstrInfo());
+
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ unsigned RetOpcode = MBBI->getOpcode();
+ DebugLoc DL = MBBI->getDebugLoc();
+
+ switch (RetOpcode) {
+ case MSP430::RET:
+ case MSP430::RETI: break; // These are ok
+ default:
+ llvm_unreachable("Can only insert epilog into returning blocks");
+ }
+
+ // Get the number of bytes to allocate from the FrameInfo
+ uint64_t StackSize = MFI->getStackSize();
+ unsigned CSSize = MSP430FI->getCalleeSavedFrameSize();
+ uint64_t NumBytes = 0;
+
+ if (hasFP(MF)) {
+ // Calculate required stack adjustment
+ uint64_t FrameSize = StackSize - 2;
+ NumBytes = FrameSize - CSSize;
+
+ // pop FPW.
+ BuildMI(MBB, MBBI, DL, TII.get(MSP430::POP16r), MSP430::FPW);
+ } else
+ NumBytes = StackSize - CSSize;
+
+ // Skip the callee-saved pop instructions.
+ while (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator PI = prior(MBBI);
+ unsigned Opc = PI->getOpcode();
+ if (Opc != MSP430::POP16r && !PI->getDesc().isTerminator())
+ break;
+ --MBBI;
+ }
+
+ DL = MBBI->getDebugLoc();
+
+ // If there is an ADD16ri or SUB16ri of SPW immediately before this
+ // instruction, merge the two instructions.
+ //if (NumBytes || MFI->hasVarSizedObjects())
+ // mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
+
+ if (MFI->hasVarSizedObjects()) {
+ BuildMI(MBB, MBBI, DL,
+ TII.get(MSP430::MOV16rr), MSP430::SPW).addReg(MSP430::FPW);
+ if (CSSize) {
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL,
+ TII.get(MSP430::SUB16ri), MSP430::SPW)
+ .addReg(MSP430::SPW).addImm(CSSize);
+ // The SRW implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
+ } else {
+ // adjust stack pointer back: SPW += numbytes
+ if (NumBytes) {
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL, TII.get(MSP430::ADD16ri), MSP430::SPW)
+ .addReg(MSP430::SPW).addImm(NumBytes);
+ // The SRW implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
+ }
+}
+
+// FIXME: Can we eleminate these in favour of generic code?
+bool
+MSP430FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ DebugLoc DL;
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ MSP430MachineFunctionInfo *MFI = MF.getInfo<MSP430MachineFunctionInfo>();
+ MFI->setCalleeSavedFrameSize(CSI.size() * 2);
+
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i-1].getReg();
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
+ BuildMI(MBB, MI, DL, TII.get(MSP430::PUSH16r))
+ .addReg(Reg, RegState::Kill);
+ }
+ return true;
+}
+
+bool
+MSP430FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ DebugLoc DL;
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i)
+ BuildMI(MBB, MI, DL, TII.get(MSP430::POP16r), CSI[i].getReg());
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h
new file mode 100644
index 0000000..b636827
--- /dev/null
+++ b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.h
@@ -0,0 +1,53 @@
+//==- MSP430FrameLowering.h - Define frame lowering for MSP430 --*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MSP430_FRAMEINFO_H
+#define MSP430_FRAMEINFO_H
+
+#include "MSP430.h"
+#include "MSP430Subtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class MSP430Subtarget;
+
+class MSP430FrameLowering : public TargetFrameLowering {
+protected:
+ const MSP430Subtarget &STI;
+
+public:
+ explicit MSP430FrameLowering(const MSP430Subtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 2, -2), STI(sti) {
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+ bool hasReservedCallFrame(const MachineFunction &MF) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 3395e9f..5430d43 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -60,15 +60,6 @@ namespace {
return GV != 0 || CP != 0 || ES != 0 || JT != -1;
}
- bool hasBaseReg() const {
- return Base.Reg.getNode() != 0;
- }
-
- void setBaseReg(SDValue Reg) {
- BaseType = RegBase;
- Base.Reg = Reg;
- }
-
void dump() {
errs() << "MSP430ISelAddressMode " << this << '\n';
if (BaseType == RegBase && Base.Reg.getNode() != 0) {
@@ -129,7 +120,7 @@ namespace {
SDNode *SelectIndexedBinOp(SDNode *Op, SDValue N1, SDValue N2,
unsigned Opc8, unsigned Opc16);
- bool SelectAddr(SDNode *Op, SDValue Addr, SDValue &Base, SDValue &Disp);
+ bool SelectAddr(SDValue Addr, SDValue &Base, SDValue &Disp);
};
} // end anonymous namespace
@@ -254,7 +245,7 @@ bool MSP430DAGToDAGISel::MatchAddress(SDValue N, MSP430ISelAddressMode &AM) {
/// SelectAddr - returns true if it is able pattern match an addressing mode.
/// It returns the operands which make up the maximal addressing mode it can
/// match by reference.
-bool MSP430DAGToDAGISel::SelectAddr(SDNode *Op, SDValue N,
+bool MSP430DAGToDAGISel::SelectAddr(SDValue N,
SDValue &Base, SDValue &Disp) {
MSP430ISelAddressMode AM;
@@ -272,7 +263,7 @@ bool MSP430DAGToDAGISel::SelectAddr(SDNode *Op, SDValue N,
AM.Base.Reg;
if (AM.GV)
- Disp = CurDAG->getTargetGlobalAddress(AM.GV, Op->getDebugLoc(),
+ Disp = CurDAG->getTargetGlobalAddress(AM.GV, N->getDebugLoc(),
MVT::i16, AM.Disp,
0/*AM.SymbolFlags*/);
else if (AM.CP)
@@ -298,7 +289,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
switch (ConstraintCode) {
default: return true;
case 'm': // memory
- if (!SelectAddr(Op.getNode(), Op, Op0, Op1))
+ if (!SelectAddr(Op, Op0, Op1))
return true;
break;
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index a1703a3..30ef4f5 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -366,7 +366,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
if (ObjSize > 2) {
errs() << "LowerFormalArguments Unhandled argument type: "
- << VA.getLocVT().getSimpleVT().SimpleTy
+ << EVT(VA.getLocVT()).getEVTString()
<< "\n";
}
// Create the frame index object for this incoming parameter...
@@ -376,7 +376,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i16);
InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
- PseudoSourceValue::getFixedStack(FI), 0,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0));
}
}
@@ -507,8 +507,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- PseudoSourceValue::getStack(),
- VA.getLocMemOffset(), false, false, 0));
+ MachinePointerInfo(),false, false, 0));
}
}
@@ -537,7 +536,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i16);
// Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@@ -748,7 +747,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
}
TargetCC = DAG.getConstant(TCC, MVT::i8);
- return DAG.getNode(MSP430ISD::CMP, dl, MVT::Flag, LHS, RHS);
+ return DAG.getNode(MSP430ISD::CMP, dl, MVT::Glue, LHS, RHS);
}
@@ -837,7 +836,7 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
return SR;
} else {
SDValue Zero = DAG.getConstant(0, VT);
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
SmallVector<SDValue, 4> Ops;
Ops.push_back(One);
Ops.push_back(Zero);
@@ -859,7 +858,7 @@ SDValue MSP430TargetLowering::LowerSELECT_CC(SDValue Op,
SDValue TargetCC;
SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
SmallVector<SDValue, 4> Ops;
Ops.push_back(TrueV);
Ops.push_back(FalseV);
@@ -914,13 +913,13 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
- NULL, 0, false, false, 0);
+ MachinePointerInfo(), false, false, 0);
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- RetAddrFI, NULL, 0, false, false, 0);
+ RetAddrFI, MachinePointerInfo(), false, false, 0);
}
SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op,
@@ -934,7 +933,8 @@ SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op,
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
MSP430::FPW, VT);
while (Depth--)
- FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0,
+ FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
+ MachinePointerInfo(),
false, false, 0);
return FrameAddr;
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
index bfab844..424df13 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -40,8 +40,9 @@ void MSP430InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIdx),
- MachineMemOperand::MOStore, 0,
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FrameIdx)),
+ MachineMemOperand::MOStore,
MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
@@ -68,8 +69,9 @@ void MSP430InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIdx),
- MachineMemOperand::MOLoad, 0,
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FrameIdx)),
+ MachineMemOperand::MOLoad,
MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
@@ -99,48 +101,6 @@ void MSP430InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
.addReg(SrcReg, getKillRegState(KillSrc));
}
-bool
-MSP430InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- MachineFunction &MF = *MBB.getParent();
- MSP430MachineFunctionInfo *MFI = MF.getInfo<MSP430MachineFunctionInfo>();
- MFI->setCalleeSavedFrameSize(CSI.size() * 2);
-
- for (unsigned i = CSI.size(); i != 0; --i) {
- unsigned Reg = CSI[i-1].getReg();
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(Reg);
- BuildMI(MBB, MI, DL, get(MSP430::PUSH16r))
- .addReg(Reg, RegState::Kill);
- }
- return true;
-}
-
-bool
-MSP430InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- for (unsigned i = 0, e = CSI.size(); i != e; ++i)
- BuildMI(MBB, MI, DL, get(MSP430::POP16r), CSI[i].getReg());
-
- return true;
-}
-
unsigned MSP430InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
index 49ccc03..e885cd3 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
@@ -66,15 +66,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
- virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
-
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
// Branch folding goodness
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
index 8792b22..59cb598 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
@@ -40,28 +40,28 @@ def SDT_MSP430Shift : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>,
// MSP430 Specific Node Definitions.
//===----------------------------------------------------------------------===//
def MSP430retflag : SDNode<"MSP430ISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
def MSP430retiflag : SDNode<"MSP430ISD::RETI_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
def MSP430rra : SDNode<"MSP430ISD::RRA", SDTIntUnaryOp, []>;
def MSP430rla : SDNode<"MSP430ISD::RLA", SDTIntUnaryOp, []>;
def MSP430rrc : SDNode<"MSP430ISD::RRC", SDTIntUnaryOp, []>;
def MSP430call : SDNode<"MSP430ISD::CALL", SDT_MSP430Call,
- [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag, SDNPVariadic]>;
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
def MSP430callseq_start :
SDNode<"ISD::CALLSEQ_START", SDT_MSP430CallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def MSP430callseq_end :
SDNode<"ISD::CALLSEQ_END", SDT_MSP430CallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def MSP430Wrapper : SDNode<"MSP430ISD::Wrapper", SDT_MSP430Wrapper>;
-def MSP430cmp : SDNode<"MSP430ISD::CMP", SDT_MSP430Cmp, [SDNPOutFlag]>;
+def MSP430cmp : SDNode<"MSP430ISD::CMP", SDT_MSP430Cmp, [SDNPOutGlue]>;
def MSP430brcc : SDNode<"MSP430ISD::BR_CC", SDT_MSP430BrCC,
- [SDNPHasChain, SDNPInFlag]>;
+ [SDNPHasChain, SDNPInGlue]>;
def MSP430selectcc: SDNode<"MSP430ISD::SELECT_CC", SDT_MSP430SelectCC,
- [SDNPInFlag]>;
+ [SDNPInGlue]>;
def MSP430shl : SDNode<"MSP430ISD::SHL", SDT_MSP430Shift, []>;
def MSP430sra : SDNode<"MSP430ISD::SRA", SDT_MSP430Shift, []>;
def MSP430srl : SDNode<"MSP430ISD::SRL", SDT_MSP430Shift, []>;
diff --git a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.cpp b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp
index d1d9a11..d1d9a11 100644
--- a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp
diff --git a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.h b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h
index e937696..e937696 100644
--- a/contrib/llvm/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
index 3c3fa73..1da6d8d 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
@@ -33,11 +33,12 @@ MSP430RegisterInfo::MSP430RegisterInfo(MSP430TargetMachine &tm,
const TargetInstrInfo &tii)
: MSP430GenRegisterInfo(MSP430::ADJCALLSTACKDOWN, MSP430::ADJCALLSTACKUP),
TM(tm), TII(tii) {
- StackAlign = TM.getFrameInfo()->getStackAlignment();
+ StackAlign = TM.getFrameLowering()->getStackAlignment();
}
const unsigned*
MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ const TargetFrameLowering *TFI = MF->getTarget().getFrameLowering();
const Function* F = MF->getFunction();
static const unsigned CalleeSavedRegs[] = {
MSP430::FPW, MSP430::R5W, MSP430::R6W, MSP430::R7W,
@@ -62,7 +63,7 @@ MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
0
};
- if (hasFP(*MF))
+ if (TFI->hasFP(*MF))
return (F->getCallingConv() == CallingConv::MSP430_INTR ?
CalleeSavedRegsIntrFP : CalleeSavedRegsFP);
else
@@ -73,6 +74,7 @@ MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
BitVector MSP430RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
// Mark 4 special registers as reserved.
Reserved.set(MSP430::PCW);
@@ -81,7 +83,7 @@ BitVector MSP430RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(MSP430::CGW);
// Mark frame pointer as reserved if needed.
- if (hasFP(MF))
+ if (TFI->hasFP(MF))
Reserved.set(MSP430::FPW);
return Reserved;
@@ -92,23 +94,12 @@ MSP430RegisterInfo::getPointerRegClass(unsigned Kind) const {
return &MSP430::GR16RegClass;
}
-
-bool MSP430RegisterInfo::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
-
- return (DisableFramePointerElim(MF) ||
- MF.getFrameInfo()->hasVarSizedObjects() ||
- MFI->isFrameAddressTaken());
-}
-
-bool MSP430RegisterInfo::hasReservedCallFrame(const MachineFunction &MF) const {
- return !MF.getFrameInfo()->hasVarSizedObjects();
-}
-
void MSP430RegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- if (!hasReservedCallFrame(MF)) {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (!TFI->hasReservedCallFrame(MF)) {
// If the stack pointer can be changed after prologue, turn the
// adjcallstackup instruction into a 'sub SPW, <amt>' and the
// adjcallstackdown instruction into 'add SPW, <amt>'
@@ -172,6 +163,7 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
DebugLoc dl = MI.getDebugLoc();
while (!MI.getOperand(i).isFI()) {
++i;
@@ -180,13 +172,13 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int FrameIndex = MI.getOperand(i).getIndex();
- unsigned BasePtr = (hasFP(MF) ? MSP430::FPW : MSP430::SPW);
+ unsigned BasePtr = (TFI->hasFP(MF) ? MSP430::FPW : MSP430::SPW);
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
// Skip the saved PC
Offset += 2;
- if (!hasFP(MF))
+ if (!TFI->hasFP(MF))
Offset += MF.getFrameInfo()->getStackSize();
else
Offset += 2; // Skip the saved FPW
@@ -224,8 +216,10 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
void
MSP430RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
// Create a frame entry for the FPW register that must be saved.
- if (hasFP(MF)) {
+ if (TFI->hasFP(MF)) {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(2, -4, true);
(void)FrameIdx;
assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() &&
@@ -233,144 +227,14 @@ MSP430RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
}
}
-
-void MSP430RegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MSP430MachineFunctionInfo *MSP430FI = MF.getInfo<MSP430MachineFunctionInfo>();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- // Get the number of bytes to allocate from the FrameInfo.
- uint64_t StackSize = MFI->getStackSize();
-
- uint64_t NumBytes = 0;
- if (hasFP(MF)) {
- // Calculate required stack adjustment
- uint64_t FrameSize = StackSize - 2;
- NumBytes = FrameSize - MSP430FI->getCalleeSavedFrameSize();
-
- // Get the offset of the stack slot for the EBP register... which is
- // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
- // Update the frame offset adjustment.
- MFI->setOffsetAdjustment(-NumBytes);
-
- // Save FPW into the appropriate stack slot...
- BuildMI(MBB, MBBI, DL, TII.get(MSP430::PUSH16r))
- .addReg(MSP430::FPW, RegState::Kill);
-
- // Update FPW with the new base value...
- BuildMI(MBB, MBBI, DL, TII.get(MSP430::MOV16rr), MSP430::FPW)
- .addReg(MSP430::SPW);
-
- // Mark the FramePtr as live-in in every block except the entry.
- for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
- I != E; ++I)
- I->addLiveIn(MSP430::FPW);
-
- } else
- NumBytes = StackSize - MSP430FI->getCalleeSavedFrameSize();
-
- // Skip the callee-saved push instructions.
- while (MBBI != MBB.end() && (MBBI->getOpcode() == MSP430::PUSH16r))
- ++MBBI;
-
- if (MBBI != MBB.end())
- DL = MBBI->getDebugLoc();
-
- if (NumBytes) { // adjust stack pointer: SPW -= numbytes
- // If there is an SUB16ri of SPW immediately before this instruction, merge
- // the two.
- //NumBytes -= mergeSPUpdates(MBB, MBBI, true);
- // If there is an ADD16ri or SUB16ri of SPW immediately after this
- // instruction, merge the two instructions.
- // mergeSPUpdatesDown(MBB, MBBI, &NumBytes);
-
- if (NumBytes) {
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL, TII.get(MSP430::SUB16ri), MSP430::SPW)
- .addReg(MSP430::SPW).addImm(NumBytes);
- // The SRW implicit def is dead.
- MI->getOperand(3).setIsDead();
- }
- }
-}
-
-void MSP430RegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- MSP430MachineFunctionInfo *MSP430FI = MF.getInfo<MSP430MachineFunctionInfo>();
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- unsigned RetOpcode = MBBI->getOpcode();
- DebugLoc DL = MBBI->getDebugLoc();
-
- switch (RetOpcode) {
- case MSP430::RET:
- case MSP430::RETI: break; // These are ok
- default:
- llvm_unreachable("Can only insert epilog into returning blocks");
- }
-
- // Get the number of bytes to allocate from the FrameInfo
- uint64_t StackSize = MFI->getStackSize();
- unsigned CSSize = MSP430FI->getCalleeSavedFrameSize();
- uint64_t NumBytes = 0;
-
- if (hasFP(MF)) {
- // Calculate required stack adjustment
- uint64_t FrameSize = StackSize - 2;
- NumBytes = FrameSize - CSSize;
-
- // pop FPW.
- BuildMI(MBB, MBBI, DL, TII.get(MSP430::POP16r), MSP430::FPW);
- } else
- NumBytes = StackSize - CSSize;
-
- // Skip the callee-saved pop instructions.
- while (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PI = prior(MBBI);
- unsigned Opc = PI->getOpcode();
- if (Opc != MSP430::POP16r && !PI->getDesc().isTerminator())
- break;
- --MBBI;
- }
-
- DL = MBBI->getDebugLoc();
-
- // If there is an ADD16ri or SUB16ri of SPW immediately before this
- // instruction, merge the two instructions.
- //if (NumBytes || MFI->hasVarSizedObjects())
- // mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
-
- if (MFI->hasVarSizedObjects()) {
- BuildMI(MBB, MBBI, DL,
- TII.get(MSP430::MOV16rr), MSP430::SPW).addReg(MSP430::FPW);
- if (CSSize) {
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL,
- TII.get(MSP430::SUB16ri), MSP430::SPW)
- .addReg(MSP430::SPW).addImm(CSSize);
- // The SRW implicit def is dead.
- MI->getOperand(3).setIsDead();
- }
- } else {
- // adjust stack pointer back: SPW += numbytes
- if (NumBytes) {
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL, TII.get(MSP430::ADD16ri), MSP430::SPW)
- .addReg(MSP430::SPW).addImm(NumBytes);
- // The SRW implicit def is dead.
- MI->getOperand(3).setIsDead();
- }
- }
-}
-
unsigned MSP430RegisterInfo::getRARegister() const {
return MSP430::PCW;
}
unsigned MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- return hasFP(MF) ? MSP430::FPW : MSP430::SPW;
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ return TFI->hasFP(MF) ? MSP430::FPW : MSP430::SPW;
}
int MSP430RegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
index 4d2795b..56744fa 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
@@ -39,9 +39,6 @@ public:
BitVector getReservedRegs(const MachineFunction &MF) const;
const TargetRegisterClass* getPointerRegClass(unsigned Kind = 0) const;
- bool hasFP(const MachineFunction &MF) const;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -49,9 +46,6 @@ public:
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
// Debug information queries.
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td
index f8aec66..ab7b59b 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td
@@ -79,10 +79,10 @@ def GR8 : RegisterClass<"MSP430", [i8], 8,
GR8Class::iterator
GR8Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
// Depending on whether the function uses frame pointer or not, last 5 or 4
// registers on the list above are reserved
- if (RI->hasFP(MF))
+ if (TFI->hasFP(MF))
return end()-5;
else
return end()-4;
@@ -106,10 +106,10 @@ def GR16 : RegisterClass<"MSP430", [i16], 16,
GR16Class::iterator
GR16Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
// Depending on whether the function uses frame pointer or not, last 5 or 4
// registers on the list above are reserved
- if (RI->hasFP(MF))
+ if (TFI->hasFP(MF))
return end()-5;
else
return end()-4;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
index 99877c8..fba9536 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -28,13 +28,13 @@ extern "C" void LLVMInitializeMSP430Target() {
MSP430TargetMachine::MSP430TargetMachine(const Target &T,
const std::string &TT,
- const std::string &FS) :
- LLVMTargetMachine(T, TT),
- Subtarget(TT, FS),
- // FIXME: Check TargetData string.
- DataLayout("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
- InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameInfo(TargetFrameInfo::StackGrowsDown, 2, -2) { }
+ const std::string &FS)
+ : LLVMTargetMachine(T, TT),
+ Subtarget(TT, FS),
+ // FIXME: Check TargetData string.
+ DataLayout("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
+ InstrInfo(*this), TLInfo(*this), TSInfo(*this),
+ FrameLowering(Subtarget) { }
bool MSP430TargetMachine::addInstSelector(PassManagerBase &PM,
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
index b93edfd..cee3b04 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
@@ -17,11 +17,12 @@
#include "MSP430InstrInfo.h"
#include "MSP430ISelLowering.h"
+#include "MSP430FrameLowering.h"
#include "MSP430SelectionDAGInfo.h"
#include "MSP430RegisterInfo.h"
#include "MSP430Subtarget.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
@@ -34,16 +35,15 @@ class MSP430TargetMachine : public LLVMTargetMachine {
MSP430InstrInfo InstrInfo;
MSP430TargetLowering TLInfo;
MSP430SelectionDAGInfo TSInfo;
-
- // MSP430 does not have any call stack frame, therefore not having
- // any MSP430 specific FrameInfo class.
- TargetFrameInfo FrameInfo;
+ MSP430FrameLowering FrameLowering;
public:
MSP430TargetMachine(const Target &T, const std::string &TT,
const std::string &FS);
- virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
virtual const MSP430InstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const TargetData *getTargetData() const { return &DataLayout;}
virtual const MSP430Subtarget *getSubtargetImpl() const { return &Subtarget; }
diff --git a/contrib/llvm/lib/Target/Mangler.cpp b/contrib/llvm/lib/Target/Mangler.cpp
index 49efe75..46c687b 100644
--- a/contrib/llvm/lib/Target/Mangler.cpp
+++ b/contrib/llvm/lib/Target/Mangler.cpp
@@ -224,16 +224,6 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
}
}
-/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
-/// and the specified global variable's name. If the global variable doesn't
-/// have a name, this fills in a unique name for the global.
-std::string Mangler::getNameWithPrefix(const GlobalValue *GV,
- bool isImplicitlyPrivate) {
- SmallString<64> Buf;
- getNameWithPrefix(Buf, GV, isImplicitlyPrivate);
- return std::string(Buf.begin(), Buf.end());
-}
-
/// getSymbol - Return the MCSymbol for the specified global value. This
/// symbol is the main label that is the address of the global.
MCSymbol *Mangler::getSymbol(const GlobalValue *GV) {
diff --git a/contrib/llvm/lib/Target/Mips/Mips.td b/contrib/llvm/lib/Target/Mips/Mips.td
index a51c377..3e6437b 100644
--- a/contrib/llvm/lib/Target/Mips/Mips.td
+++ b/contrib/llvm/lib/Target/Mips/Mips.td
@@ -36,19 +36,15 @@ def FeatureFP64Bit : SubtargetFeature<"fp64", "IsFP64bit", "true",
"Support 64-bit FP registers.">;
def FeatureSingleFloat : SubtargetFeature<"single-float", "IsSingleFloat",
"true", "Only supports single precision float">;
-def FeatureMips1 : SubtargetFeature<"mips1", "MipsArchVersion", "Mips1",
- "Mips1 ISA Support">;
-def FeatureMips2 : SubtargetFeature<"mips2", "MipsArchVersion", "Mips2",
- "Mips2 ISA Support">;
def FeatureO32 : SubtargetFeature<"o32", "MipsABI", "O32",
"Enable o32 ABI">;
def FeatureEABI : SubtargetFeature<"eabi", "MipsABI", "EABI",
"Enable eabi ABI">;
-def FeatureVFPU : SubtargetFeature<"vfpu", "HasVFPU",
+def FeatureVFPU : SubtargetFeature<"vfpu", "HasVFPU",
"true", "Enable vector FPU instructions.">;
-def FeatureSEInReg : SubtargetFeature<"seinreg", "HasSEInReg", "true",
+def FeatureSEInReg : SubtargetFeature<"seinreg", "HasSEInReg", "true",
"Enable 'signext in register' instructions.">;
-def FeatureCondMov : SubtargetFeature<"condmov", "HasCondMov", "true",
+def FeatureCondMov : SubtargetFeature<"condmov", "HasCondMov", "true",
"Enable 'conditional move' instructions.">;
def FeatureMulDivAdd : SubtargetFeature<"muldivadd", "HasMulDivAdd", "true",
"Enable 'multiply add/sub' instructions.">;
@@ -58,6 +54,16 @@ def FeatureSwap : SubtargetFeature<"swap", "HasSwap", "true",
"Enable 'byte/half swap' instructions.">;
def FeatureBitCount : SubtargetFeature<"bitcount", "HasBitCount", "true",
"Enable 'count leading bits' instructions.">;
+def FeatureMips1 : SubtargetFeature<"mips1", "MipsArchVersion", "Mips1",
+ "Mips1 ISA Support">;
+def FeatureMips2 : SubtargetFeature<"mips2", "MipsArchVersion", "Mips2",
+ "Mips2 ISA Support">;
+def FeatureMips32 : SubtargetFeature<"mips32", "MipsArchVersion", "Mips32",
+ "Mips32 ISA Support",
+ [FeatureCondMov, FeatureBitCount]>;
+def FeatureMips32r2 : SubtargetFeature<"mips32r2", "MipsArchVersion",
+ "Mips32r2", "Mips32r2 ISA Support",
+ [FeatureMips32, FeatureSEInReg]>;
//===----------------------------------------------------------------------===//
// Mips processors supported.
@@ -73,10 +79,12 @@ def : Proc<"r3000", [FeatureMips1]>;
def : Proc<"mips2", [FeatureMips2]>;
def : Proc<"r6000", [FeatureMips2]>;
-// Allegrex is a 32bit subset of r4000, both for interger and fp registers,
-// but much more similar to Mips2 than Mips3. It also contains some of
-// Mips32/Mips32r2 instructions and a custom vector fpu processor.
-def : Proc<"allegrex", [FeatureMips2, FeatureSingleFloat, FeatureEABI,
+def : Proc<"4ke", [FeatureMips32r2]>;
+
+// Allegrex is a 32bit subset of r4000, both for interger and fp registers,
+// but much more similar to Mips2 than Mips3. It also contains some of
+// Mips32/Mips32r2 instructions and a custom vector fpu processor.
+def : Proc<"allegrex", [FeatureMips2, FeatureSingleFloat, FeatureEABI,
FeatureVFPU, FeatureSEInReg, FeatureCondMov, FeatureMulDivAdd,
FeatureMinMax, FeatureSwap, FeatureBitCount]>;
diff --git a/contrib/llvm/lib/Target/Mips/AsmPrinter/MipsAsmPrinter.cpp b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 6660f6b..bd28a9b 100644
--- a/contrib/llvm/lib/Target/Mips/AsmPrinter/MipsAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -125,9 +125,10 @@ namespace {
// Create a bitmask with all callee saved registers for CPU or Floating Point
// registers. For CPU registers consider RA, GP and FP for saving if necessary.
void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
- const TargetRegisterInfo &RI = *TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ const TargetRegisterInfo *RI = TM.getRegisterInfo();
const MipsFunctionInfo *MipsFI = MF->getInfo<MipsFunctionInfo>();
-
+
// CPU and FPU Saved Registers Bitmasks
unsigned int CPUBitmask = 0;
unsigned int FPUBitmask = 0;
@@ -145,13 +146,15 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
}
// Return Address and Frame registers must also be set in CPUBitmask.
- if (RI.hasFP(*MF))
+ // FIXME: Do we really need hasFP() call here? When no FP is present SP is
+ // just returned -- will it be ok?
+ if (TFI->hasFP(*MF))
CPUBitmask |= (1 << MipsRegisterInfo::
- getRegisterNumbering(RI.getFrameRegister(*MF)));
-
- if (MFI->adjustsStack())
+ getRegisterNumbering(RI->getFrameRegister(*MF)));
+
+ if (MFI->adjustsStack())
CPUBitmask |= (1 << MipsRegisterInfo::
- getRegisterNumbering(RI.getRARegister()));
+ getRegisterNumbering(RI->getRARegister()));
// Print CPUBitmask
O << "\t.mask \t"; printHex32(CPUBitmask, O);
@@ -270,12 +273,16 @@ void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
switch(MO.getTargetFlags()) {
case MipsII::MO_GPREL: O << "%gp_rel("; break;
case MipsII::MO_GOT_CALL: O << "%call16("; break;
- case MipsII::MO_GOT:
- if (MI->getOpcode() == Mips::LW)
+ case MipsII::MO_GOT: {
+ const MachineOperand &LastMO = MI->getOperand(opNum-1);
+ bool LastMOIsGP = LastMO.getType() == MachineOperand::MO_Register
+ && LastMO.getReg() == Mips::GP;
+ if (MI->getOpcode() == Mips::LW || LastMOIsGP)
O << "%got(";
else
O << "%lo(";
break;
+ }
case MipsII::MO_ABS_HILO:
if (MI->getOpcode() == Mips::LUi)
O << "%hi(";
diff --git a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
index 597ea0d..b44a0af 100644
--- a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -31,7 +31,7 @@ namespace {
const TargetInstrInfo *TII;
static char ID;
- Filler(TargetMachine &tm)
+ Filler(TargetMachine &tm)
: MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { }
virtual const char *getPassName() const {
@@ -55,17 +55,22 @@ namespace {
/// Currently, we fill delay slots with NOPs. We assume there is only one
/// delay slot per delayed instruction.
bool Filler::
-runOnMachineBasicBlock(MachineBasicBlock &MBB)
+runOnMachineBasicBlock(MachineBasicBlock &MBB)
{
bool Changed = false;
- for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
- if (I->getDesc().hasDelaySlot()) {
+ for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
+ const TargetInstrDesc& Tid = I->getDesc();
+ if (Tid.hasDelaySlot() &&
+ (TM.getSubtarget<MipsSubtarget>().isMips1() ||
+ Tid.isCall() || Tid.isBranch() || Tid.isReturn())) {
MachineBasicBlock::iterator J = I;
++J;
BuildMI(MBB, J, I->getDebugLoc(), TII->get(Mips::NOP));
++FilledSlots;
Changed = true;
}
+ }
+
return Changed;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
new file mode 100644
index 0000000..87a097a
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
@@ -0,0 +1,314 @@
+//=======- MipsFrameLowering.cpp - Mips Frame Information ------*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsFrameLowering.h"
+#include "MipsInstrInfo.h"
+#include "MipsMachineFunction.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+
+//===----------------------------------------------------------------------===//
+//
+// Stack Frame Processing methods
+// +----------------------------+
+//
+// The stack is allocated decrementing the stack pointer on
+// the first instruction of a function prologue. Once decremented,
+// all stack references are done thought a positive offset
+// from the stack/frame pointer, so the stack is considering
+// to grow up! Otherwise terrible hacks would have to be made
+// to get this stack ABI compliant :)
+//
+// The stack frame required by the ABI (after call):
+// Offset
+//
+// 0 ----------
+// 4 Args to pass
+// . saved $GP (used in PIC)
+// . Alloca allocations
+// . Local Area
+// . CPU "Callee Saved" Registers
+// . saved FP
+// . saved RA
+// . FPU "Callee Saved" Registers
+// StackSize -----------
+//
+// Offset - offset from sp after stack allocation on function prologue
+//
+// The sp is the stack pointer subtracted/added from the stack size
+// at the Prologue/Epilogue
+//
+// References to the previous stack (to obtain arguments) are done
+// with offsets that exceeds the stack size: (stacksize+(4*(num_arg-1))
+//
+// Examples:
+// - reference to the actual stack frame
+// for any local area var there is smt like : FI >= 0, StackOffset: 4
+// sw REGX, 4(SP)
+//
+// - reference to previous stack frame
+// suppose there's a load to the 5th arguments : FI < 0, StackOffset: 16.
+// The emitted instruction will be something like:
+// lw REGX, 16+StackSize(SP)
+//
+// Since the total stack size is unknown on LowerFormalArguments, all
+// stack references (ObjectOffset) created to reference the function
+// arguments, are negative numbers. This way, on eliminateFrameIndex it's
+// possible to detect those references and the offsets are adjusted to
+// their real location.
+//
+//===----------------------------------------------------------------------===//
+
+// hasFP - Return true if the specified function should have a dedicated frame
+// pointer register. This is true if the function has variable sized allocas or
+// if frame pointer elimination is disabled.
+bool MipsFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects();
+}
+
+void MipsFrameLowering::adjustMipsStackFrame(MachineFunction &MF) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ unsigned StackAlign = getStackAlignment();
+ unsigned RegSize = STI.isGP32bit() ? 4 : 8;
+ bool HasGP = MipsFI->needGPSaveRestore();
+
+ // Min and Max CSI FrameIndex.
+ int MinCSFI = -1, MaxCSFI = -1;
+
+ // See the description at MipsMachineFunction.h
+ int TopCPUSavedRegOff = -1, TopFPUSavedRegOff = -1;
+
+ // Replace the dummy '0' SPOffset by the negative offsets, as explained on
+ // LowerFormalArguments. Leaving '0' for while is necessary to avoid the
+ // approach done by calculateFrameObjectOffsets to the stack frame.
+ MipsFI->adjustLoadArgsFI(MFI);
+ MipsFI->adjustStoreVarArgsFI(MFI);
+
+ // It happens that the default stack frame allocation order does not directly
+ // map to the convention used for mips. So we must fix it. We move the callee
+ // save register slots after the local variables area, as described in the
+ // stack frame above.
+ unsigned CalleeSavedAreaSize = 0;
+ if (!CSI.empty()) {
+ MinCSFI = CSI[0].getFrameIdx();
+ MaxCSFI = CSI[CSI.size()-1].getFrameIdx();
+ }
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i)
+ CalleeSavedAreaSize += MFI->getObjectAlignment(CSI[i].getFrameIdx());
+
+ unsigned StackOffset = HasGP ? (MipsFI->getGPStackOffset()+RegSize)
+ : (STI.isABI_O32() ? 16 : 0);
+
+ // Adjust local variables. They should come on the stack right
+ // after the arguments.
+ int LastOffsetFI = -1;
+ for (int i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+ if (i >= MinCSFI && i <= MaxCSFI)
+ continue;
+ if (MFI->isDeadObjectIndex(i))
+ continue;
+ unsigned Offset =
+ StackOffset + MFI->getObjectOffset(i) - CalleeSavedAreaSize;
+ if (LastOffsetFI == -1)
+ LastOffsetFI = i;
+ if (Offset > MFI->getObjectOffset(LastOffsetFI))
+ LastOffsetFI = i;
+ MFI->setObjectOffset(i, Offset);
+ }
+
+ // Adjust CPU Callee Saved Registers Area. Registers RA and FP must
+ // be saved in this CPU Area. This whole area must be aligned to the
+ // default Stack Alignment requirements.
+ if (LastOffsetFI >= 0)
+ StackOffset = MFI->getObjectOffset(LastOffsetFI)+
+ MFI->getObjectSize(LastOffsetFI);
+ StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign);
+
+ for (unsigned i = 0, e = CSI.size(); i != e ; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (!Mips::CPURegsRegisterClass->contains(Reg))
+ break;
+ MFI->setObjectOffset(CSI[i].getFrameIdx(), StackOffset);
+ TopCPUSavedRegOff = StackOffset;
+ StackOffset += MFI->getObjectAlignment(CSI[i].getFrameIdx());
+ }
+
+ // Stack locations for FP and RA. If only one of them is used,
+ // the space must be allocated for both, otherwise no space at all.
+ if (hasFP(MF) || MFI->adjustsStack()) {
+ // FP stack location
+ MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true),
+ StackOffset);
+ MipsFI->setFPStackOffset(StackOffset);
+ TopCPUSavedRegOff = StackOffset;
+ StackOffset += RegSize;
+
+ // SP stack location
+ MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true),
+ StackOffset);
+ MipsFI->setRAStackOffset(StackOffset);
+ StackOffset += RegSize;
+
+ if (MFI->adjustsStack())
+ TopCPUSavedRegOff += RegSize;
+ }
+
+ StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign);
+
+ // Adjust FPU Callee Saved Registers Area. This Area must be
+ // aligned to the default Stack Alignment requirements.
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (Mips::CPURegsRegisterClass->contains(Reg))
+ continue;
+ MFI->setObjectOffset(CSI[i].getFrameIdx(), StackOffset);
+ TopFPUSavedRegOff = StackOffset;
+ StackOffset += MFI->getObjectAlignment(CSI[i].getFrameIdx());
+ }
+ StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign);
+
+ // Update frame info
+ MFI->setStackSize(StackOffset);
+
+ // Recalculate the final tops offset. The final values must be '0'
+ // if there isn't a callee saved register for CPU or FPU, otherwise
+ // a negative offset is needed.
+ if (TopCPUSavedRegOff >= 0)
+ MipsFI->setCPUTopSavedRegOff(TopCPUSavedRegOff-StackOffset);
+
+ if (TopFPUSavedRegOff >= 0)
+ MipsFI->setFPUTopSavedRegOff(TopFPUSavedRegOff-StackOffset);
+}
+
+void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+ const MipsRegisterInfo *RegInfo =
+ static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const MipsInstrInfo &TII =
+ *static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo());
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ bool isPIC = (MF.getTarget().getRelocationModel() == Reloc::PIC_);
+
+ // Get the right frame order for Mips.
+ adjustMipsStackFrame(MF);
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ unsigned StackSize = MFI->getStackSize();
+
+ // No need to allocate space on the stack.
+ if (StackSize == 0 && !MFI->adjustsStack()) return;
+
+ int FPOffset = MipsFI->getFPStackOffset();
+ int RAOffset = MipsFI->getRAStackOffset();
+
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::NOREORDER));
+
+ // TODO: check need from GP here.
+ if (isPIC && STI.isABI_O32())
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::CPLOAD))
+ .addReg(RegInfo->getPICCallReg());
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::NOMACRO));
+
+ // Adjust stack : addi sp, sp, (-imm)
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDiu), Mips::SP)
+ .addReg(Mips::SP).addImm(-StackSize);
+
+ // Save the return address only if the function isnt a leaf one.
+ // sw $ra, stack_loc($sp)
+ if (MFI->adjustsStack()) {
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::SW))
+ .addReg(Mips::RA).addImm(RAOffset).addReg(Mips::SP);
+ }
+
+ // if framepointer enabled, save it and set it
+ // to point to the stack pointer
+ if (hasFP(MF)) {
+ // sw $fp,stack_loc($sp)
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::SW))
+ .addReg(Mips::FP).addImm(FPOffset).addReg(Mips::SP);
+
+ // move $fp, $sp
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDu), Mips::FP)
+ .addReg(Mips::SP).addReg(Mips::ZERO);
+ }
+
+ // Restore GP from the saved stack location
+ if (MipsFI->needGPSaveRestore())
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::CPRESTORE))
+ .addImm(MipsFI->getGPStackOffset());
+}
+
+void MipsFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+ const MipsInstrInfo &TII =
+ *static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo());
+ DebugLoc dl = MBBI->getDebugLoc();
+
+ // Get the number of bytes from FrameInfo
+ int NumBytes = (int) MFI->getStackSize();
+
+ // Get the FI's where RA and FP are saved.
+ int FPOffset = MipsFI->getFPStackOffset();
+ int RAOffset = MipsFI->getRAStackOffset();
+
+ // if framepointer enabled, restore it and restore the
+ // stack pointer
+ if (hasFP(MF)) {
+ // move $sp, $fp
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDu), Mips::SP)
+ .addReg(Mips::FP).addReg(Mips::ZERO);
+
+ // lw $fp,stack_loc($sp)
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::LW), Mips::FP)
+ .addImm(FPOffset).addReg(Mips::SP);
+ }
+
+ // Restore the return address only if the function isnt a leaf one.
+ // lw $ra, stack_loc($sp)
+ if (MFI->adjustsStack()) {
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::LW), Mips::RA)
+ .addImm(RAOffset).addReg(Mips::SP);
+ }
+
+ // adjust stack : insert addi sp, sp, (imm)
+ if (NumBytes) {
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDiu), Mips::SP)
+ .addReg(Mips::SP).addImm(NumBytes);
+ }
+}
+
+void MipsFrameLowering::
+processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
+ const MipsRegisterInfo *RegInfo =
+ static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ RegInfo->processFunctionBeforeFrameFinalized(MF);
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
new file mode 100644
index 0000000..a8426c1
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
@@ -0,0 +1,48 @@
+//==--- MipsFrameLowering.h - Define frame lowering for Mips --*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ALPHA_FRAMEINFO_H
+#define ALPHA_FRAMEINFO_H
+
+#include "Mips.h"
+#include "MipsSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class MipsSubtarget;
+
+class MipsFrameLowering : public TargetFrameLowering {
+protected:
+ const MipsSubtarget &STI;
+
+public:
+ explicit MipsFrameLowering(const MipsSubtarget &sti)
+ // FIXME: Is this correct at all?
+ : TargetFrameLowering(StackGrowsUp, 8, 0), STI(sti) {
+ }
+
+ void adjustMipsStackFrame(MachineFunction &MF) const;
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
index a47cf7b..755e04d 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -84,8 +84,7 @@ private:
SDNode *Select(SDNode *N);
// Complex Pattern.
- bool SelectAddr(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &Offset);
+ bool SelectAddr(SDValue N, SDValue &Base, SDValue &Offset);
SDNode *SelectLoadFp64(SDNode *N);
SDNode *SelectStoreFp64(SDNode *N);
@@ -110,8 +109,7 @@ SDNode *MipsDAGToDAGISel::getGlobalBaseReg() {
/// ComplexPattern used on MipsInstrInfo
/// Used on Mips Load/Store instructions
bool MipsDAGToDAGISel::
-SelectAddr(SDNode *Op, SDValue Addr, SDValue &Offset, SDValue &Base)
-{
+SelectAddr(SDValue Addr, SDValue &Offset, SDValue &Base) {
// if Address is FI, get the TargetFrameIndex.
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
@@ -193,7 +191,7 @@ SDNode *MipsDAGToDAGISel::SelectLoadFp64(SDNode *N) {
SDValue N1 = N->getOperand(1);
SDValue Offset0, Offset1, Base;
- if (!SelectAddr(N, N1, Offset0, Base) ||
+ if (!SelectAddr(N1, Offset0, Base) ||
N1.getValueType() != MVT::i32)
return NULL;
@@ -257,7 +255,7 @@ SDNode *MipsDAGToDAGISel::SelectStoreFp64(SDNode *N) {
SDValue N2 = N->getOperand(2);
SDValue Offset0, Offset1, Base;
- if (!SelectAddr(N, N2, Offset0, Base) ||
+ if (!SelectAddr(N2, Offset0, Base) ||
N1.getValueType() != MVT::f64 ||
N2.getValueType() != MVT::i32)
return NULL;
@@ -327,7 +325,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
case ISD::SUBE:
case ISD::ADDE: {
SDValue InFlag = Node->getOperand(2), CmpLHS;
- unsigned Opc = InFlag.getOpcode(); Opc=Opc;
+ unsigned Opc = InFlag.getOpcode(); (void)Opc;
assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) ||
(Opc == ISD::SUBC || Opc == ISD::SUBE)) &&
"(ADD|SUB)E flag operand must come from (ADD|SUB)C/E insn");
@@ -351,7 +349,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
SDNode *AddCarry = CurDAG->getMachineNode(Mips::ADDu, dl, VT,
SDValue(Carry,0), RHS);
- return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Flag,
+ return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue,
LHS, SDValue(AddCarry,0));
}
@@ -369,11 +367,11 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
else
Op = (Opcode == ISD::UDIVREM ? Mips::DIVu : Mips::DIV);
- SDNode *MulDiv = CurDAG->getMachineNode(Op, dl, MVT::Flag, Op1, Op2);
+ SDNode *MulDiv = CurDAG->getMachineNode(Op, dl, MVT::Glue, Op1, Op2);
SDValue InFlag = SDValue(MulDiv, 0);
SDNode *Lo = CurDAG->getMachineNode(Mips::MFLO, dl, MVT::i32,
- MVT::Flag, InFlag);
+ MVT::Glue, InFlag);
InFlag = SDValue(Lo,1);
SDNode *Hi = CurDAG->getMachineNode(Mips::MFHI, dl, MVT::i32, InFlag);
@@ -388,6 +386,8 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
/// Special Muls
case ISD::MUL:
+ if (Subtarget.isMips32())
+ break;
case ISD::MULHS:
case ISD::MULHU: {
SDValue MulOp1 = Node->getOperand(0);
@@ -395,7 +395,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
unsigned MulOp = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT);
SDNode *MulNode = CurDAG->getMachineNode(MulOp, dl,
- MVT::Flag, MulOp1, MulOp2);
+ MVT::Glue, MulOp1, MulOp2);
SDValue InFlag = SDValue(MulNode, 0);
@@ -421,7 +421,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
Op = (Opcode == ISD::SREM ? Mips::DIV : Mips::DIVu);
MOp = Mips::MFHI;
}
- SDNode *Node = CurDAG->getMachineNode(Op, dl, MVT::Flag, Op1, Op2);
+ SDNode *Node = CurDAG->getMachineNode(Op, dl, MVT::Glue, Op1, Op2);
SDValue InFlag = SDValue(Node, 0);
return CurDAG->getMachineNode(MOp, dl, MVT::i32, InFlag);
@@ -474,7 +474,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
SDValue InFlag;
// Skip the incomming flag if present
- if (Node->getOperand(LastOpNum).getValueType() == MVT::Flag)
+ if (Node->getOperand(LastOpNum).getValueType() == MVT::Glue)
LastOpNum--;
if ( (isa<GlobalAddressSDNode>(Callee)) ||
@@ -496,7 +496,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
Chain = CurDAG->getCopyToReg(Chain, dl, Mips::T9, Callee, InFlag);
// Map the JmpLink operands to JALR
- SDVTList NodeTys = CurDAG->getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = CurDAG->getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
Ops.push_back(CurDAG->getRegister(Mips::T9, MVT::i32));
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
index b0b99ba..1d7a1c0 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -41,12 +41,15 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::Lo : return "MipsISD::Lo";
case MipsISD::GPRel : return "MipsISD::GPRel";
case MipsISD::Ret : return "MipsISD::Ret";
- case MipsISD::CMov : return "MipsISD::CMov";
case MipsISD::SelectCC : return "MipsISD::SelectCC";
case MipsISD::FPSelectCC : return "MipsISD::FPSelectCC";
case MipsISD::FPBrcond : return "MipsISD::FPBrcond";
case MipsISD::FPCmp : return "MipsISD::FPCmp";
case MipsISD::FPRound : return "MipsISD::FPRound";
+ case MipsISD::MAdd : return "MipsISD::MAdd";
+ case MipsISD::MAddu : return "MipsISD::MAddu";
+ case MipsISD::MSub : return "MipsISD::MSub";
+ case MipsISD::MSubu : return "MipsISD::MSubu";
default : return NULL;
}
}
@@ -57,7 +60,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
Subtarget = &TM.getSubtarget<MipsSubtarget>();
// Mips does not have i1 type, so use i32 for
- // setcc operations results (slt, sgt, ...).
+ // setcc operations results (slt, sgt, ...).
setBooleanContents(ZeroOrOneBooleanContent);
// Set up the register classes
@@ -69,7 +72,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
if (!Subtarget->isFP64bit())
addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass);
- // Load extented operations for i1 types must be promoted
+ // Load extented operations for i1 types must be promoted
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
@@ -78,9 +81,9 @@ MipsTargetLowering(MipsTargetMachine &TM)
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
- // Used by legalize types to correctly generate the setcc result.
- // Without this, every float setcc comes with a AND/OR with the result,
- // we don't want this, since the fpcmp result goes to a flag register,
+ // Used by legalize types to correctly generate the setcc result.
+ // Without this, every float setcc comes with a AND/OR with the result,
+ // we don't want this, since the fpcmp result goes to a flag register,
// which is used implicitly by brcond and select operations.
AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
@@ -100,8 +103,8 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::VASTART, MVT::Other, Custom);
- // We custom lower AND/OR to handle the case where the DAG contain 'ands/ors'
- // with operands comming from setcc fp comparions. This is necessary since
+ // We custom lower AND/OR to handle the case where the DAG contain 'ands/ors'
+ // with operands comming from setcc fp comparions. This is necessary since
// the result from these setcc are in a flag registers (FCR31).
setOperationAction(ISD::AND, MVT::i32, Custom);
setOperationAction(ISD::OR, MVT::i32, Custom);
@@ -116,7 +119,10 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
setOperationAction(ISD::ROTL, MVT::i32, Expand);
- setOperationAction(ISD::ROTR, MVT::i32, Expand);
+
+ if (!Subtarget->isMips32r2())
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
@@ -152,6 +158,9 @@ MipsTargetLowering(MipsTargetMachine &TM)
if (!Subtarget->hasSwap())
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setTargetDAGCombine(ISD::ADDE);
+ setTargetDAGCombine(ISD::SUBE);
+
setStackPointerRegisterToSaveRestore(Mips::SP);
computeRegisterProperties();
}
@@ -165,10 +174,198 @@ unsigned MipsTargetLowering::getFunctionAlignment(const Function *) const {
return 2;
}
+// SelectMadd -
+// Transforms a subgraph in CurDAG if the following pattern is found:
+// (addc multLo, Lo0), (adde multHi, Hi0),
+// where,
+// multHi/Lo: product of multiplication
+// Lo0: initial value of Lo register
+// Hi0: initial value of Hi register
+// Return true if mattern matching was successful.
+static bool SelectMadd(SDNode* ADDENode, SelectionDAG* CurDAG) {
+ // ADDENode's second operand must be a flag output of an ADDC node in order
+ // for the matching to be successful.
+ SDNode* ADDCNode = ADDENode->getOperand(2).getNode();
+
+ if (ADDCNode->getOpcode() != ISD::ADDC)
+ return false;
+
+ SDValue MultHi = ADDENode->getOperand(0);
+ SDValue MultLo = ADDCNode->getOperand(0);
+ SDNode* MultNode = MultHi.getNode();
+ unsigned MultOpc = MultHi.getOpcode();
+
+ // MultHi and MultLo must be generated by the same node,
+ if (MultLo.getNode() != MultNode)
+ return false;
+
+ // and it must be a multiplication.
+ if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
+ return false;
+
+ // MultLo amd MultHi must be the first and second output of MultNode
+ // respectively.
+ if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
+ return false;
+
+ // Transform this to a MADD only if ADDENode and ADDCNode are the only users
+ // of the values of MultNode, in which case MultNode will be removed in later
+ // phases.
+ // If there exist users other than ADDENode or ADDCNode, this function returns
+ // here, which will result in MultNode being mapped to a single MULT
+ // instruction node rather than a pair of MULT and MADD instructions being
+ // produced.
+ if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
+ return false;
+
+ SDValue Chain = CurDAG->getEntryNode();
+ DebugLoc dl = ADDENode->getDebugLoc();
+
+ // create MipsMAdd(u) node
+ MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd;
+
+ SDValue MAdd = CurDAG->getNode(MultOpc, dl,
+ MVT::Glue,
+ MultNode->getOperand(0),// Factor 0
+ MultNode->getOperand(1),// Factor 1
+ ADDCNode->getOperand(1),// Lo0
+ ADDENode->getOperand(1));// Hi0
+
+ // create CopyFromReg nodes
+ SDValue CopyFromLo = CurDAG->getCopyFromReg(Chain, dl, Mips::LO, MVT::i32,
+ MAdd);
+ SDValue CopyFromHi = CurDAG->getCopyFromReg(CopyFromLo.getValue(1), dl,
+ Mips::HI, MVT::i32,
+ CopyFromLo.getValue(2));
+
+ // replace uses of adde and addc here
+ if (!SDValue(ADDCNode, 0).use_empty())
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), CopyFromLo);
+
+ if (!SDValue(ADDENode, 0).use_empty())
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), CopyFromHi);
+
+ return true;
+}
+
+// SelectMsub -
+// Transforms a subgraph in CurDAG if the following pattern is found:
+// (addc Lo0, multLo), (sube Hi0, multHi),
+// where,
+// multHi/Lo: product of multiplication
+// Lo0: initial value of Lo register
+// Hi0: initial value of Hi register
+// Return true if mattern matching was successful.
+static bool SelectMsub(SDNode* SUBENode, SelectionDAG* CurDAG) {
+ // SUBENode's second operand must be a flag output of an SUBC node in order
+ // for the matching to be successful.
+ SDNode* SUBCNode = SUBENode->getOperand(2).getNode();
+
+ if (SUBCNode->getOpcode() != ISD::SUBC)
+ return false;
+
+ SDValue MultHi = SUBENode->getOperand(1);
+ SDValue MultLo = SUBCNode->getOperand(1);
+ SDNode* MultNode = MultHi.getNode();
+ unsigned MultOpc = MultHi.getOpcode();
+
+ // MultHi and MultLo must be generated by the same node,
+ if (MultLo.getNode() != MultNode)
+ return false;
+
+ // and it must be a multiplication.
+ if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
+ return false;
+
+ // MultLo amd MultHi must be the first and second output of MultNode
+ // respectively.
+ if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
+ return false;
+
+ // Transform this to a MSUB only if SUBENode and SUBCNode are the only users
+ // of the values of MultNode, in which case MultNode will be removed in later
+ // phases.
+ // If there exist users other than SUBENode or SUBCNode, this function returns
+ // here, which will result in MultNode being mapped to a single MULT
+ // instruction node rather than a pair of MULT and MSUB instructions being
+ // produced.
+ if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
+ return false;
+
+ SDValue Chain = CurDAG->getEntryNode();
+ DebugLoc dl = SUBENode->getDebugLoc();
+
+ // create MipsSub(u) node
+ MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub;
+
+ SDValue MSub = CurDAG->getNode(MultOpc, dl,
+ MVT::Glue,
+ MultNode->getOperand(0),// Factor 0
+ MultNode->getOperand(1),// Factor 1
+ SUBCNode->getOperand(0),// Lo0
+ SUBENode->getOperand(0));// Hi0
+
+ // create CopyFromReg nodes
+ SDValue CopyFromLo = CurDAG->getCopyFromReg(Chain, dl, Mips::LO, MVT::i32,
+ MSub);
+ SDValue CopyFromHi = CurDAG->getCopyFromReg(CopyFromLo.getValue(1), dl,
+ Mips::HI, MVT::i32,
+ CopyFromLo.getValue(2));
+
+ // replace uses of sube and subc here
+ if (!SDValue(SUBCNode, 0).use_empty())
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), CopyFromLo);
+
+ if (!SDValue(SUBENode, 0).use_empty())
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), CopyFromHi);
+
+ return true;
+}
+
+static SDValue PerformADDECombine(SDNode *N, SelectionDAG& DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSubtarget* Subtarget) {
+ if (DCI.isBeforeLegalize())
+ return SDValue();
+
+ if (Subtarget->isMips32() && SelectMadd(N, &DAG))
+ return SDValue(N, 0);
+
+ return SDValue();
+}
+
+static SDValue PerformSUBECombine(SDNode *N, SelectionDAG& DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSubtarget* Subtarget) {
+ if (DCI.isBeforeLegalize())
+ return SDValue();
+
+ if (Subtarget->isMips32() && SelectMsub(N, &DAG))
+ return SDValue(N, 0);
+
+ return SDValue();
+}
+
+SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
+ const {
+ SelectionDAG &DAG = DCI.DAG;
+ unsigned opc = N->getOpcode();
+
+ switch (opc) {
+ default: break;
+ case ISD::ADDE:
+ return PerformADDECombine(N, DAG, DCI, Subtarget);
+ case ISD::SUBE:
+ return PerformSUBECombine(N, DAG, DCI, Subtarget);
+ }
+
+ return SDValue();
+}
+
SDValue MipsTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const
{
- switch (Op.getOpcode())
+ switch (Op.getOpcode())
{
case ISD::AND: return LowerANDOR(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
@@ -194,7 +391,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
// MachineFunction as a live in value. It also creates a corresponding
// virtual register for it.
static unsigned
-AddLiveIn(MachineFunction &MF, unsigned PReg, TargetRegisterClass *RC)
+AddLiveIn(MachineFunction &MF, unsigned PReg, TargetRegisterClass *RC)
{
assert(RC->contains(PReg) && "Not the correct regclass!");
unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
@@ -212,7 +409,7 @@ static Mips::FPBranchCode GetFPBranchCodeFromCond(Mips::CondCode CC) {
return Mips::BRANCH_INVALID;
}
-
+
static unsigned FPBranchCodeToOpc(Mips::FPBranchCode BC) {
switch(BC) {
default:
@@ -227,24 +424,24 @@ static unsigned FPBranchCodeToOpc(Mips::FPBranchCode BC) {
static Mips::CondCode FPCondCCodeToFCC(ISD::CondCode CC) {
switch (CC) {
default: llvm_unreachable("Unknown fp condition code!");
- case ISD::SETEQ:
+ case ISD::SETEQ:
case ISD::SETOEQ: return Mips::FCOND_EQ;
case ISD::SETUNE: return Mips::FCOND_OGL;
- case ISD::SETLT:
+ case ISD::SETLT:
case ISD::SETOLT: return Mips::FCOND_OLT;
- case ISD::SETGT:
+ case ISD::SETGT:
case ISD::SETOGT: return Mips::FCOND_OGT;
- case ISD::SETLE:
- case ISD::SETOLE: return Mips::FCOND_OLE;
+ case ISD::SETLE:
+ case ISD::SETOLE: return Mips::FCOND_OLE;
case ISD::SETGE:
case ISD::SETOGE: return Mips::FCOND_OGE;
case ISD::SETULT: return Mips::FCOND_ULT;
- case ISD::SETULE: return Mips::FCOND_ULE;
+ case ISD::SETULE: return Mips::FCOND_ULE;
case ISD::SETUGT: return Mips::FCOND_UGT;
case ISD::SETUGE: return Mips::FCOND_UGE;
- case ISD::SETUO: return Mips::FCOND_UN;
+ case ISD::SETUO: return Mips::FCOND_UN;
case ISD::SETO: return Mips::FCOND_OR;
- case ISD::SETNE:
+ case ISD::SETNE:
case ISD::SETONE: return Mips::FCOND_NEQ;
case ISD::SETUEQ: return Mips::FCOND_UEQ;
}
@@ -364,7 +561,7 @@ LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const
// Emit the round instruction and bit convert to integer
SDValue Trunc = DAG.getNode(MipsISD::FPRound, dl, MVT::f32,
Src, CondReg.getValue(1));
- SDValue BitCvt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Trunc);
+ SDValue BitCvt = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Trunc);
return BitCvt;
}
@@ -382,11 +579,11 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
// obtain the new stack size.
SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
- // The Sub result contains the new stack start address, so it
+ // The Sub result contains the new stack start address, so it
// must be placed in the stack pointer register.
Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, Mips::SP, Sub);
-
- // This node always has two return values: a new stack pointer
+
+ // This node always has two return values: a new stack pointer
// value and a chain
SDValue Ops[2] = { Sub, Chain };
return DAG.getMergeValues(Ops, 2, dl);
@@ -405,9 +602,9 @@ LowerANDOR(SDValue Op, SelectionDAG &DAG) const
SDValue True = DAG.getConstant(1, MVT::i32);
SDValue False = DAG.getConstant(0, MVT::i32);
- SDValue LSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
+ SDValue LSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
LHS, True, False, LHS.getOperand(2));
- SDValue RSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
+ SDValue RSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
RHS, True, False, RHS.getOperand(2));
return DAG.getNode(Op.getOpcode(), dl, MVT::i32, LSEL, RSEL);
@@ -416,7 +613,7 @@ LowerANDOR(SDValue Op, SelectionDAG &DAG) const
SDValue MipsTargetLowering::
LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
{
- // The first operand is the chain, the second is the condition, the third is
+ // The first operand is the chain, the second is the condition, the third is
// the block to branch to if the condition is true.
SDValue Chain = Op.getOperand(0);
SDValue Dest = Op.getOperand(2);
@@ -424,55 +621,55 @@ LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
if (Op.getOperand(1).getOpcode() != MipsISD::FPCmp)
return Op;
-
+
SDValue CondRes = Op.getOperand(1);
SDValue CCNode = CondRes.getOperand(2);
Mips::CondCode CC =
(Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
- SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32);
+ SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32);
- return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode,
+ return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode,
Dest, CondRes);
}
SDValue MipsTargetLowering::
LowerSETCC(SDValue Op, SelectionDAG &DAG) const
{
- // The operands to this are the left and right operands to compare (ops #0,
- // and #1) and the condition code to compare them with (op #2) as a
+ // The operands to this are the left and right operands to compare (ops #0,
+ // and #1) and the condition code to compare them with (op #2) as a
// CondCodeSDNode.
- SDValue LHS = Op.getOperand(0);
+ SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
DebugLoc dl = Op.getDebugLoc();
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
-
- return DAG.getNode(MipsISD::FPCmp, dl, Op.getValueType(), LHS, RHS,
+
+ return DAG.getNode(MipsISD::FPCmp, dl, Op.getValueType(), LHS, RHS,
DAG.getConstant(FPCondCCodeToFCC(CC), MVT::i32));
}
SDValue MipsTargetLowering::
LowerSELECT(SDValue Op, SelectionDAG &DAG) const
{
- SDValue Cond = Op.getOperand(0);
+ SDValue Cond = Op.getOperand(0);
SDValue True = Op.getOperand(1);
SDValue False = Op.getOperand(2);
DebugLoc dl = Op.getDebugLoc();
- // if the incomming condition comes from a integer compare, the select
- // operation must be SelectCC or a conditional move if the subtarget
+ // if the incomming condition comes from a integer compare, the select
+ // operation must be SelectCC or a conditional move if the subtarget
// supports it.
if (Cond.getOpcode() != MipsISD::FPCmp) {
if (Subtarget->hasCondMov() && !True.getValueType().isFloatingPoint())
return Op;
- return DAG.getNode(MipsISD::SelectCC, dl, True.getValueType(),
+ return DAG.getNode(MipsISD::SelectCC, dl, True.getValueType(),
Cond, True, False);
}
// if the incomming condition comes from fpcmp, the select
// operation must use FPSelectCC.
SDValue CCNode = Cond.getOperand(2);
- return DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
+ return DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
Cond, True, False, CCNode);
}
@@ -484,16 +681,16 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
SDVTList VTs = DAG.getVTList(MVT::i32);
-
+
MipsTargetObjectFile &TLOF = (MipsTargetObjectFile&)getObjFileLowering();
-
+
// %gp_rel relocation
- if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
- SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
+ if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
MipsII::MO_GPREL);
SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, dl, VTs, &GA, 1);
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
- return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode);
+ return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode);
}
// %hi/%lo relocation
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
@@ -505,8 +702,8 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
} else {
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
MipsII::MO_GOT);
- SDValue ResNode = DAG.getLoad(MVT::i32, dl,
- DAG.getEntryNode(), GA, NULL, 0,
+ SDValue ResNode = DAG.getLoad(MVT::i32, dl,
+ DAG.getEntryNode(), GA, MachinePointerInfo(),
false, false, 0);
// On functions and global targets not internal linked only
// a load from got/GP is necessary for PIC to work.
@@ -531,7 +728,7 @@ SDValue MipsTargetLowering::
LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
{
SDValue ResNode;
- SDValue HiPart;
+ SDValue HiPart;
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
@@ -546,7 +743,8 @@ LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
SDValue Ops[] = { JTI };
HiPart = DAG.getNode(MipsISD::Hi, dl, DAG.getVTList(MVT::i32), Ops, 1);
} else // Emit Load from Global Pointer
- HiPart = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(), JTI, NULL, 0,
+ HiPart = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(), JTI,
+ MachinePointerInfo(),
false, false, 0);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, JTI);
@@ -565,26 +763,27 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
DebugLoc dl = Op.getDebugLoc();
// gp_rel relocation
- // FIXME: we should reference the constant pool using small data sections,
+ // FIXME: we should reference the constant pool using small data sections,
// but the asm printer currently doens't support this feature without
- // hacking it. This feature should come soon so we can uncomment the
+ // hacking it. This feature should come soon so we can uncomment the
// stuff below.
//if (IsInSmallSection(C->getType())) {
// SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
// SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
- // ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
+ // ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
- SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
+ SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
N->getOffset(), MipsII::MO_ABS_HILO);
SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, CP);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP);
ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
} else {
- SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
+ SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
N->getOffset(), MipsII::MO_GOT);
- SDValue Load = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(),
- CP, NULL, 0, false, false, 0);
+ SDValue Load = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(),
+ CP, MachinePointerInfo::getConstantPool(),
+ false, false, 0);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP);
ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, Load, Lo);
}
@@ -603,7 +802,8 @@ SDValue MipsTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1), SV, 0,
+ return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
+ MachinePointerInfo(SV),
false, false, 0);
}
@@ -614,23 +814,23 @@ SDValue MipsTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
#include "MipsGenCallingConv.inc"
//===----------------------------------------------------------------------===//
-// TODO: Implement a generic logic using tblgen that can support this.
+// TODO: Implement a generic logic using tblgen that can support this.
// Mips O32 ABI rules:
// ---
// i32 - Passed in A0, A1, A2, A3 and stack
-// f32 - Only passed in f32 registers if no int reg has been used yet to hold
+// f32 - Only passed in f32 registers if no int reg has been used yet to hold
// an argument. Otherwise, passed in A1, A2, A3 and stack.
-// f64 - Only passed in two aliased f32 registers if no int reg has been used
-// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
+// f64 - Only passed in two aliased f32 registers if no int reg has been used
+// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
// not used, it must be shadowed. If only A3 is avaiable, shadow it and
// go to stack.
//===----------------------------------------------------------------------===//
-static bool CC_MipsO32(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
+static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
- static const unsigned IntRegsSize=4, FloatRegsSize=2;
+ static const unsigned IntRegsSize=4, FloatRegsSize=2;
static const unsigned IntRegs[] = {
Mips::A0, Mips::A1, Mips::A2, Mips::A3
@@ -642,9 +842,15 @@ static bool CC_MipsO32(unsigned ValNo, EVT ValVT,
Mips::D6, Mips::D7
};
- unsigned Reg=0;
- unsigned UnallocIntReg = State.getFirstUnallocated(IntRegs, IntRegsSize);
- bool IntRegUsed = (IntRegs[UnallocIntReg] != (unsigned (Mips::A0)));
+ unsigned Reg = 0;
+ static bool IntRegUsed = false;
+
+ // This must be the first arg of the call if no regs have been allocated.
+ // Initialize IntRegUsed in that case.
+ if (IntRegs[State.getFirstUnallocated(IntRegs, IntRegsSize)] == Mips::A0 &&
+ F32Regs[State.getFirstUnallocated(F32Regs, FloatRegsSize)] == Mips::F12 &&
+ F64Regs[State.getFirstUnallocated(F64Regs, FloatRegsSize)] == Mips::D6)
+ IntRegUsed = false;
// Promote i8 and i16
if (LocVT == MVT::i8 || LocVT == MVT::i16) {
@@ -657,30 +863,48 @@ static bool CC_MipsO32(unsigned ValNo, EVT ValVT,
LocInfo = CCValAssign::AExt;
}
- if (ValVT == MVT::i32 || (ValVT == MVT::f32 && IntRegUsed)) {
+ if (ValVT == MVT::i32) {
Reg = State.AllocateReg(IntRegs, IntRegsSize);
IntRegUsed = true;
- LocVT = MVT::i32;
- }
-
- if (ValVT.isFloatingPoint() && !IntRegUsed) {
- if (ValVT == MVT::f32)
- Reg = State.AllocateReg(F32Regs, FloatRegsSize);
- else
- Reg = State.AllocateReg(F64Regs, FloatRegsSize);
- }
+ } else if (ValVT == MVT::f32) {
+ // An int reg has to be marked allocated regardless of whether or not
+ // IntRegUsed is true.
+ Reg = State.AllocateReg(IntRegs, IntRegsSize);
- if (ValVT == MVT::f64 && IntRegUsed) {
- if (UnallocIntReg != IntRegsSize) {
- // If we hit register A3 as the first not allocated, we must
- // mark it as allocated (shadow) and use the stack instead.
- if (IntRegs[UnallocIntReg] != (unsigned (Mips::A3)))
- Reg = Mips::A2;
- for (;UnallocIntReg < IntRegsSize; ++UnallocIntReg)
- State.AllocateReg(UnallocIntReg);
- }
- LocVT = MVT::i32;
- }
+ if (IntRegUsed) {
+ if (Reg) // Int reg is available
+ LocVT = MVT::i32;
+ } else {
+ unsigned FReg = State.AllocateReg(F32Regs, FloatRegsSize);
+ if (FReg) // F32 reg is available
+ Reg = FReg;
+ else if (Reg) // No F32 regs are available, but an int reg is available.
+ LocVT = MVT::i32;
+ }
+ } else if (ValVT == MVT::f64) {
+ // Int regs have to be marked allocated regardless of whether or not
+ // IntRegUsed is true.
+ Reg = State.AllocateReg(IntRegs, IntRegsSize);
+ if (Reg == Mips::A1)
+ Reg = State.AllocateReg(IntRegs, IntRegsSize);
+ else if (Reg == Mips::A3)
+ Reg = 0;
+ State.AllocateReg(IntRegs, IntRegsSize);
+
+ // At this point, Reg is A0, A2 or 0, and all the unavailable integer regs
+ // are marked as allocated.
+ if (IntRegUsed) {
+ if (Reg)// if int reg is available
+ LocVT = MVT::i32;
+ } else {
+ unsigned FReg = State.AllocateReg(F64Regs, FloatRegsSize);
+ if (FReg) // F64 reg is available.
+ Reg = FReg;
+ else if (Reg) // No F64 regs are available, but an int reg is available.
+ LocVT = MVT::i32;
+ }
+ } else
+ assert(false && "cannot handle this ValVT");
if (!Reg) {
unsigned SizeInBytes = ValVT.getSizeInBits() >> 3;
@@ -692,8 +916,8 @@ static bool CC_MipsO32(unsigned ValNo, EVT ValVT,
return false; // CC must always match
}
-static bool CC_MipsO32_VarArgs(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
+static bool CC_MipsO32_VarArgs(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
static const unsigned IntRegsSize=4;
@@ -736,7 +960,7 @@ static bool CC_MipsO32_VarArgs(unsigned ValNo, EVT ValVT,
IntRegs[UnallocIntReg] == (unsigned (Mips::A2))) {
unsigned Reg = State.AllocateReg(IntRegs, IntRegsSize);
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, MVT::i32, LocInfo));
- // Shadow the next register so it can be used
+ // Shadow the next register so it can be used
// later to get the other 32bit part.
State.AllocateReg(IntRegs, IntRegsSize);
return false;
@@ -786,13 +1010,13 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// To meet O32 ABI, Mips must always allocate 16 bytes on
// the stack (even if less than 4 are used as arguments)
if (Subtarget->isABI_O32()) {
- int VTsize = EVT(MVT::i32).getSizeInBits()/8;
+ int VTsize = MVT(MVT::i32).getSizeInBits()/8;
MFI->CreateFixedObject(VTsize, (VTsize*3), true);
- CCInfo.AnalyzeCallOperands(Outs,
+ CCInfo.AnalyzeCallOperands(Outs,
isVarArg ? CC_MipsO32_VarArgs : CC_MipsO32);
} else
CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
-
+
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
@@ -801,7 +1025,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
- // First/LastArgStackLoc contains the first/last
+ // First/LastArgStackLoc contains the first/last
// "at stack" argument location.
int LastArgStackLoc = 0;
unsigned FirstStackArgLoc = (Subtarget->isABI_EABI() ? 0 : 16);
@@ -814,12 +1038,12 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Promote the value if needed.
switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full:
+ case CCValAssign::Full:
if (Subtarget->isABI_O32() && VA.isRegLoc()) {
if (VA.getValVT() == MVT::f32 && VA.getLocVT() == MVT::i32)
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Arg);
+ Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
if (VA.getValVT() == MVT::f64 && VA.getLocVT() == MVT::i32) {
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
+ Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
DAG.getConstant(0, getPointerTy()));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
@@ -827,7 +1051,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
RegsToPass.push_back(std::make_pair(VA.getLocReg()+1, Hi));
continue;
- }
+ }
}
break;
case CCValAssign::SExt:
@@ -840,17 +1064,17 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
break;
}
-
- // Arguments that can be passed on register must be kept at
+
+ // Arguments that can be passed on register must be kept at
// RegsToPass vector
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
continue;
}
-
+
// Register can't get to this point...
assert(VA.isMemLoc());
-
+
// Create the frame index object for this incoming parameter
// This guarantees that when allocating Local Area the firsts
// 16 bytes which are alwayes reserved won't be overwritten
@@ -861,50 +1085,51 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy());
- // emit ISD::STORE whichs stores the
+ // emit ISD::STORE whichs stores the
// parameter value to a stack Location
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(),
false, false, 0));
}
// Transform all store nodes into one single node because all store
// nodes are independent of each other.
- if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
- // Build a sequence of copy-to-reg nodes chained together with token
+ // Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
// The InFlag in necessary since all emited instructions must be
// stuck together.
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
- // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
- // node so that legalize doesn't hack it.
+ // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
+ // node so that legalize doesn't hack it.
unsigned char OpFlag = IsPIC ? MipsII::MO_GOT_CALL : MipsII::MO_NO_FLAG;
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
getPointerTy(), 0, OpFlag);
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
getPointerTy(), OpFlag);
// MipsJmpLink = #chain, #target_address, #opt_in_flags...
- // = Chain, Callee, Reg#1, Reg#2, ...
+ // = Chain, Callee, Reg#1, Reg#2, ...
//
// Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
- // Add argument registers to the end of the list so that they are
+ // Add argument registers to the end of the list so that they are
// known live into the call.
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
@@ -916,17 +1141,17 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Chain = DAG.getNode(MipsISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
- // Create a stack location to hold GP when PIC is used. This stack
- // location is used on function prologue to save GP and also after all
- // emited CALL's to restore GP.
+ // Create a stack location to hold GP when PIC is used. This stack
+ // location is used on function prologue to save GP and also after all
+ // emited CALL's to restore GP.
if (IsPIC) {
- // Function can have an arbitrary number of calls, so
+ // Function can have an arbitrary number of calls, so
// hold the LastArgStackLoc with the biggest offset.
int FI;
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
if (LastArgStackLoc >= MipsFI->getGPStackOffset()) {
LastArgStackLoc = (!LastArgStackLoc) ? (16) : (LastArgStackLoc+4);
- // Create the frame index only once. SPOffset here can be anything
+ // Create the frame index only once. SPOffset here can be anything
// (this will be fixed on processFunctionBeforeFrameFinalized)
if (MipsFI->getGPStackOffset() == -1) {
FI = MFI->CreateFixedObject(4, 0, true);
@@ -937,14 +1162,15 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Reload GP value.
FI = MipsFI->getGPFI();
- SDValue FIN = DAG.getFrameIndex(FI,getPointerTy());
- SDValue GPLoad = DAG.getLoad(MVT::i32, dl, Chain, FIN, NULL, 0,
+ SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
+ SDValue GPLoad = DAG.getLoad(MVT::i32, dl, Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0);
Chain = GPLoad.getValue(1);
- Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, MVT::i32),
+ Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, MVT::i32),
GPLoad, SDValue(0,0));
InFlag = Chain.getValue(1);
- }
+ }
// Create the CALLSEQ_END node.
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
@@ -988,7 +1214,7 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Formal Arguments Calling Convention Implementation
//===----------------------------------------------------------------------===//
-/// LowerFormalArguments - transform physical registers into virtual registers
+/// LowerFormalArguments - transform physical registers into virtual registers
/// and generate load operations for arguments places on the stack.
SDValue
MipsTargetLowering::LowerFormalArguments(SDValue Chain,
@@ -1018,7 +1244,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
ArgLocs, *DAG.getContext());
if (Subtarget->isABI_O32())
- CCInfo.AnalyzeFormalArguments(Ins,
+ CCInfo.AnalyzeFormalArguments(Ins,
isVarArg ? CC_MipsO32_VarArgs : CC_MipsO32);
else
CCInfo.AnalyzeFormalArguments(Ins, CC_Mips);
@@ -1037,22 +1263,22 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
TargetRegisterClass *RC = 0;
if (RegVT == MVT::i32)
- RC = Mips::CPURegsRegisterClass;
- else if (RegVT == MVT::f32)
+ RC = Mips::CPURegsRegisterClass;
+ else if (RegVT == MVT::f32)
RC = Mips::FGR32RegisterClass;
else if (RegVT == MVT::f64) {
- if (!Subtarget->isSingleFloat())
+ if (!Subtarget->isSingleFloat())
RC = Mips::AFGR64RegisterClass;
- } else
+ } else
llvm_unreachable("RegVT not supported by FormalArguments Lowering");
- // Transform the arguments stored on
+ // Transform the arguments stored on
// physical registers into virtual ones
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegEnd, RC);
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
-
- // If this is an 8 or 16-bit value, it has been passed promoted
- // to 32 bits. Insert an assert[sz]ext to capture this, then
+
+ // If this is an 8 or 16-bit value, it has been passed promoted
+ // to 32 bits. Insert an assert[sz]ext to capture this, then
// truncate to the right size.
if (VA.getLocInfo() != CCValAssign::Full) {
unsigned Opcode = 0;
@@ -1061,22 +1287,21 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
else if (VA.getLocInfo() == CCValAssign::ZExt)
Opcode = ISD::AssertZext;
if (Opcode)
- ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue,
+ ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
}
- // Handle O32 ABI cases: i32->f32 and (i32,i32)->f64
+ // Handle O32 ABI cases: i32->f32 and (i32,i32)->f64
if (Subtarget->isABI_O32()) {
- if (RegVT == MVT::i32 && VA.getValVT() == MVT::f32)
- ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
+ if (RegVT == MVT::i32 && VA.getValVT() == MVT::f32)
+ ArgValue = DAG.getNode(ISD::BITCAST, dl, MVT::f32, ArgValue);
if (RegVT == MVT::i32 && VA.getValVT() == MVT::f64) {
- unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
+ unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
VA.getLocReg()+1, RC);
SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT);
- SDValue Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
- SDValue Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue2);
- ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::f64, Lo, Hi);
+ SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, ArgValue2, ArgValue);
+ ArgValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Pair);
}
}
@@ -1088,13 +1313,13 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// The last argument is not a register anymore
ArgRegEnd = 0;
-
- // The stack pointer offset is relative to the caller stack frame.
- // Since the real stack size is unknown here, a negative SPOffset
+
+ // The stack pointer offset is relative to the caller stack frame.
+ // Since the real stack size is unknown here, a negative SPOffset
// is used so there's a way to adjust these offsets when the stack
- // size get known (on EliminateFrameIndex). A dummy SPOffset is
+ // size get known (on EliminateFrameIndex). A dummy SPOffset is
// used instead of a direct negative address (which is recorded to
- // be used on emitPrologue) to avoid mis-calc of the first stack
+ // be used on emitPrologue) to avoid mis-calc of the first stack
// offset on PEI::calculateFrameObjectOffsets.
// Arguments are always 32-bit.
unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
@@ -1104,7 +1329,8 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// Create load nodes to retrieve arguments from the stack
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0,
+ InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0));
}
}
@@ -1124,11 +1350,11 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// To meet ABI, when VARARGS are passed on registers, the registers
// must have their values written to the caller stack frame. If the last
- // argument was placed in the stack, there's no need to save any register.
+ // argument was placed in the stack, there's no need to save any register.
if ((isVarArg) && (Subtarget->isABI_O32() && ArgRegEnd)) {
if (StackPtr.getNode() == 0)
StackPtr = DAG.getRegister(StackReg, getPointerTy());
-
+
// The last register argument that must be saved is Mips::A3
TargetRegisterClass *RC = Mips::CPURegsRegisterClass;
unsigned StackLoc = ArgLocs.size()-1;
@@ -1140,7 +1366,8 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
int FI = MFI->CreateFixedObject(4, 0, true);
MipsFI->recordStoreVarArgsFI(FI, -(4+(StackLoc*4)));
SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy());
- OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff, NULL, 0,
+ OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff,
+ MachinePointerInfo(),
false, false, 0));
// Record the frame index of the first variable argument
@@ -1150,7 +1377,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
}
}
- // All stores are grouped in one node to allow the matching between
+ // All stores are grouped in one node to allow the matching between
// the size of Ins and InVals. This only happens when on varg functions
if (!OutChains.empty()) {
OutChains.push_back(Chain);
@@ -1183,7 +1410,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// Analize return values.
CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
- // If this is the first return lowered for this function, add
+ // If this is the first return lowered for this function, add
// the regs to the liveout set for the function.
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
for (unsigned i = 0; i != RVLocs.size(); ++i)
@@ -1198,7 +1425,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
OutVals[i], Flag);
// guarantee that all emitted copies are
@@ -1215,7 +1442,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
unsigned Reg = MipsFI->getSRetReturnReg();
- if (!Reg)
+ if (!Reg)
llvm_unreachable("sret virtual register not created in the entry block");
SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
@@ -1225,10 +1452,10 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// Return on Mips is always a "jr $ra"
if (Flag.getNode())
- return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
+ return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
Chain, DAG.getRegister(Mips::RA, MVT::i32), Flag);
else // Return Void
- return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
+ return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
Chain, DAG.getRegister(Mips::RA, MVT::i32));
}
@@ -1239,21 +1466,21 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
MipsTargetLowering::ConstraintType MipsTargetLowering::
-getConstraintType(const std::string &Constraint) const
+getConstraintType(const std::string &Constraint) const
{
- // Mips specific constrainy
+ // Mips specific constrainy
// GCC config/mips/constraints.md
//
- // 'd' : An address register. Equivalent to r
- // unless generating MIPS16 code.
- // 'y' : Equivalent to r; retained for
- // backwards compatibility.
- // 'f' : Floating Point registers.
+ // 'd' : An address register. Equivalent to r
+ // unless generating MIPS16 code.
+ // 'y' : Equivalent to r; retained for
+ // backwards compatibility.
+ // 'f' : Floating Point registers.
if (Constraint.size() == 1) {
switch (Constraint[0]) {
default : break;
- case 'd':
- case 'y':
+ case 'd':
+ case 'y':
case 'f':
return C_RegisterClass;
break;
@@ -1262,6 +1489,37 @@ getConstraintType(const std::string &Constraint) const
return TargetLowering::getConstraintType(Constraint);
}
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+MipsTargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ const Type *type = CallOperandVal->getType();
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+ case 'd':
+ case 'y':
+ if (type->isIntegerTy())
+ weight = CW_Register;
+ break;
+ case 'f':
+ if (type->isFloatTy())
+ weight = CW_Register;
+ break;
+ }
+ return weight;
+}
+
/// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
/// return a list of registers that can be used to satisfy the constraint.
/// This should only be used for C_RegisterClass constraints.
@@ -1275,7 +1533,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
case 'f':
if (VT == MVT::f32)
return std::make_pair(0U, Mips::FGR32RegisterClass);
- if (VT == MVT::f64)
+ if (VT == MVT::f64)
if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
return std::make_pair(0U, Mips::AFGR64RegisterClass);
}
@@ -1293,15 +1551,15 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
if (Constraint.size() != 1)
return std::vector<unsigned>();
- switch (Constraint[0]) {
+ switch (Constraint[0]) {
default : break;
case 'r':
// GCC Mips Constraint Letters
- case 'd':
- case 'y':
- return make_vector<unsigned>(Mips::T0, Mips::T1, Mips::T2, Mips::T3,
- Mips::T4, Mips::T5, Mips::T6, Mips::T7, Mips::S0, Mips::S1,
- Mips::S2, Mips::S3, Mips::S4, Mips::S5, Mips::S6, Mips::S7,
+ case 'd':
+ case 'y':
+ return make_vector<unsigned>(Mips::T0, Mips::T1, Mips::T2, Mips::T3,
+ Mips::T4, Mips::T5, Mips::T6, Mips::T7, Mips::S0, Mips::S1,
+ Mips::S2, Mips::S3, Mips::S4, Mips::S5, Mips::S6, Mips::S7,
Mips::T8, 0);
case 'f':
@@ -1313,15 +1571,15 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
Mips::F25, Mips::F26, Mips::F27, Mips::F28, Mips::F29,
Mips::F30, Mips::F31, 0);
else
- return make_vector<unsigned>(Mips::F2, Mips::F4, Mips::F6, Mips::F8,
- Mips::F10, Mips::F20, Mips::F22, Mips::F24, Mips::F26,
+ return make_vector<unsigned>(Mips::F2, Mips::F4, Mips::F6, Mips::F8,
+ Mips::F10, Mips::F20, Mips::F22, Mips::F24, Mips::F26,
Mips::F28, Mips::F30, 0);
}
- if (VT == MVT::f64)
+ if (VT == MVT::f64)
if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
- return make_vector<unsigned>(Mips::D1, Mips::D2, Mips::D3, Mips::D4,
- Mips::D5, Mips::D10, Mips::D11, Mips::D12, Mips::D13,
+ return make_vector<unsigned>(Mips::D1, Mips::D2, Mips::D3, Mips::D4,
+ Mips::D5, Mips::D10, Mips::D11, Mips::D12, Mips::D13,
Mips::D14, Mips::D15, 0);
}
return std::vector<unsigned>();
@@ -1336,5 +1594,7 @@ MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
if (VT != MVT::f32 && VT != MVT::f64)
return false;
+ if (Imm.isNegZero())
+ return false;
return Imm.isZero();
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
index 460747b..9d6b9f3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -40,9 +40,6 @@ namespace llvm {
// Handle gp_rel (small data/bss sections) relocation.
GPRel,
- // Conditional Move
- CMov,
-
// Select CC Pseudo Instruction
SelectCC,
@@ -59,7 +56,13 @@ namespace llvm {
FPRound,
// Return
- Ret
+ Ret,
+
+ // MAdd/Sub nodes
+ MAdd,
+ MAddu,
+ MSub,
+ MSubu
};
}
@@ -83,6 +86,8 @@ namespace llvm {
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const;
+
+ virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
private:
// Subtarget Info
const MipsSubtarget *Subtarget;
@@ -139,6 +144,11 @@ namespace llvm {
// Inline asm support
ConstraintType getConstraintType(const std::string &Constraint) const;
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
+
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
index cff79966d..977e0df 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
@@ -32,7 +32,7 @@ def SDT_MipsFPCmp : SDTypeProfile<1, 3, [SDTCisVT<0, i32>,
def SDT_MipsFPSelectCC : SDTypeProfile<1, 4, [SDTCisInt<1>, SDTCisInt<4>,
SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>]>;
-def MipsFPRound : SDNode<"MipsISD::FPRound", SDTFPRoundOp, [SDNPOptInFlag]>;
+def MipsFPRound : SDNode<"MipsISD::FPRound", SDTFPRoundOp, [SDNPOptInGlue]>;
def MipsFPBrcond : SDNode<"MipsISD::FPBrcond", SDT_MipsFPBrcond,
[SDNPHasChain]>;
def MipsFPCmp : SDNode<"MipsISD::FPCmp", SDT_MipsFPCmp>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
index 320c5b8..b70266a 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
@@ -19,41 +19,53 @@ include "MipsInstrFormats.td"
def SDT_MipsRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_MipsJmpLink : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
-def SDT_MipsSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 2>,
+def SDT_MipsSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 2>,
SDTCisSameAs<2, 3>, SDTCisInt<1>]>;
-def SDT_MipsCMov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
+def SDT_MipsCMov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>,
SDTCisInt<4>]>;
def SDT_MipsCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
def SDT_MipsCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
+def SDT_MipsMAddMSub : SDTypeProfile<0, 4,
+ [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>,
+ SDTCisSameAs<1, 2>,
+ SDTCisSameAs<2, 3>]>;
+
// Call
-def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink,
- [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag,
+def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
SDNPVariadic]>;
-// Hi and Lo nodes are used to handle global addresses. Used on
-// MipsISelLowering to lower stuff like GlobalAddress, ExternalSymbol
+// Hi and Lo nodes are used to handle global addresses. Used on
+// MipsISelLowering to lower stuff like GlobalAddress, ExternalSymbol
// static model. (nothing to do with Mips Registers Hi and Lo)
def MipsHi : SDNode<"MipsISD::Hi", SDTIntUnaryOp>;
def MipsLo : SDNode<"MipsISD::Lo", SDTIntUnaryOp>;
def MipsGPRel : SDNode<"MipsISD::GPRel", SDTIntUnaryOp>;
// Return
-def MipsRet : SDNode<"MipsISD::Ret", SDT_MipsRet, [SDNPHasChain,
- SDNPOptInFlag]>;
+def MipsRet : SDNode<"MipsISD::Ret", SDT_MipsRet, [SDNPHasChain,
+ SDNPOptInGlue]>;
// These are target-independent nodes, but have target-specific formats.
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MipsCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
// Select Condition Code
def MipsSelectCC : SDNode<"MipsISD::SelectCC", SDT_MipsSelectCC>;
-// Conditional Move
-def MipsCMov : SDNode<"MipsISD::CMov", SDT_MipsCMov>;
+// MAdd*/MSub* nodes
+def MipsMAdd : SDNode<"MipsISD::MAdd", SDT_MipsMAddMSub,
+ [SDNPOptInGlue, SDNPOutGlue]>;
+def MipsMAddu : SDNode<"MipsISD::MAddu", SDT_MipsMAddMSub,
+ [SDNPOptInGlue, SDNPOutGlue]>;
+def MipsMSub : SDNode<"MipsISD::MSub", SDT_MipsMAddMSub,
+ [SDNPOptInGlue, SDNPOutGlue]>;
+def MipsMSubu : SDNode<"MipsISD::MSubu", SDT_MipsMAddMSub,
+ [SDNPOptInGlue, SDNPOutGlue]>;
//===----------------------------------------------------------------------===//
// Mips Instruction Predicate Definitions.
@@ -62,6 +74,8 @@ def HasSEInReg : Predicate<"Subtarget.hasSEInReg()">;
def HasBitCount : Predicate<"Subtarget.hasBitCount()">;
def HasSwap : Predicate<"Subtarget.hasSwap()">;
def HasCondMov : Predicate<"Subtarget.hasCondMov()">;
+def IsMips32 : Predicate<"Subtarget.isMips32()">;
+def IsMips32r2 : Predicate<"Subtarget.isMips32r2()">;
//===----------------------------------------------------------------------===//
// Mips Operand, Complex Patterns and Transformations Definitions.
@@ -126,90 +140,66 @@ def addr : ComplexPattern<iPTR, 2, "SelectAddr", [frameindex], []>;
let isCommutable = 1 in
class ArithR<bits<6> op, bits<6> func, string instr_asm, SDNode OpNode,
InstrItinClass itin>:
- FR< op,
- func,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, CPURegs:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, CPURegs:$c))], itin>;
+ FR<op, func, (outs CPURegs:$dst), (ins CPURegs:$b, CPURegs:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"),
+ [(set CPURegs:$dst, (OpNode CPURegs:$b, CPURegs:$c))], itin>;
let isCommutable = 1 in
class ArithOverflowR<bits<6> op, bits<6> func, string instr_asm>:
- FR< op,
- func,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, CPURegs:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [], IIAlu>;
+ FR<op, func, (outs CPURegs:$dst), (ins CPURegs:$b, CPURegs:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"), [], IIAlu>;
// Arithmetic 2 register operands
class ArithI<bits<6> op, string instr_asm, SDNode OpNode,
Operand Od, PatLeaf imm_type> :
- FI< op,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, Od:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, imm_type:$c))], IIAlu>;
+ FI<op, (outs CPURegs:$dst), (ins CPURegs:$b, Od:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"),
+ [(set CPURegs:$dst, (OpNode CPURegs:$b, imm_type:$c))], IIAlu>;
class ArithOverflowI<bits<6> op, string instr_asm, SDNode OpNode,
Operand Od, PatLeaf imm_type> :
- FI< op,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, Od:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [], IIAlu>;
+ FI<op, (outs CPURegs:$dst), (ins CPURegs:$b, Od:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"), [], IIAlu>;
// Arithmetic Multiply ADD/SUB
-let rd=0 in
-class MArithR<bits<6> func, string instr_asm> :
- FR< 0x1c,
- func,
- (outs CPURegs:$rs),
- (ins CPURegs:$rt),
- !strconcat(instr_asm, "\t$rs, $rt"),
- [], IIImul>;
+let rd = 0, shamt = 0, Defs = [HI, LO], Uses = [HI, LO] in
+class MArithR<bits<6> func, string instr_asm, SDNode op> :
+ FR<0x1c, func, (outs), (ins CPURegs:$rs, CPURegs:$rt),
+ !strconcat(instr_asm, "\t$rs, $rt"),
+ [(op CPURegs:$rs, CPURegs:$rt, LO, HI)], IIImul>;
// Logical
class LogicR<bits<6> func, string instr_asm, SDNode OpNode>:
- FR< 0x00,
- func,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, CPURegs:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, CPURegs:$c))], IIAlu>;
+ FR<0x00, func, (outs CPURegs:$dst), (ins CPURegs:$b, CPURegs:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"),
+ [(set CPURegs:$dst, (OpNode CPURegs:$b, CPURegs:$c))], IIAlu>;
class LogicI<bits<6> op, string instr_asm, SDNode OpNode>:
- FI< op,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, uimm16:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, immZExt16:$c))], IIAlu>;
+ FI<op, (outs CPURegs:$dst), (ins CPURegs:$b, uimm16:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"),
+ [(set CPURegs:$dst, (OpNode CPURegs:$b, immZExt16:$c))], IIAlu>;
class LogicNOR<bits<6> op, bits<6> func, string instr_asm>:
- FR< op,
- func,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, CPURegs:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [(set CPURegs:$dst, (not (or CPURegs:$b, CPURegs:$c)))], IIAlu>;
+ FR<op, func, (outs CPURegs:$dst), (ins CPURegs:$b, CPURegs:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"),
+ [(set CPURegs:$dst, (not (or CPURegs:$b, CPURegs:$c)))], IIAlu>;
// Shifts
-let rt = 0 in
-class LogicR_shift_imm<bits<6> func, string instr_asm, SDNode OpNode>:
- FR< 0x00,
- func,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, shamt:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, immZExt5:$c))], IIAlu>;
+class LogicR_shift_rotate_imm<bits<6> func, bits<5> _rs, string instr_asm,
+ SDNode OpNode>:
+ FR<0x00, func, (outs CPURegs:$dst), (ins CPURegs:$b, shamt:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"),
+ [(set CPURegs:$dst, (OpNode CPURegs:$b, immZExt5:$c))], IIAlu> {
+ let rs = _rs;
+}
-class LogicR_shift_reg<bits<6> func, string instr_asm, SDNode OpNode>:
- FR< 0x00,
- func,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, CPURegs:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, CPURegs:$c))], IIAlu>;
+class LogicR_shift_rotate_reg<bits<6> func, bits<5> _shamt, string instr_asm,
+ SDNode OpNode>:
+ FR<0x00, func, (outs CPURegs:$dst), (ins CPURegs:$c, CPURegs:$b),
+ !strconcat(instr_asm, "\t$dst, $b, $c"),
+ [(set CPURegs:$dst, (OpNode CPURegs:$b, CPURegs:$c))], IIAlu> {
+ let shamt = _shamt;
+}
// Load Upper Imediate
class LoadUpper<bits<6> op, string instr_asm>:
@@ -222,76 +212,55 @@ class LoadUpper<bits<6> op, string instr_asm>:
// Memory Load/Store
let canFoldAsLoad = 1, hasDelaySlot = 1 in
class LoadM<bits<6> op, string instr_asm, PatFrag OpNode>:
- FI< op,
- (outs CPURegs:$dst),
- (ins mem:$addr),
- !strconcat(instr_asm, "\t$dst, $addr"),
- [(set CPURegs:$dst, (OpNode addr:$addr))], IILoad>;
+ FI<op, (outs CPURegs:$dst), (ins mem:$addr),
+ !strconcat(instr_asm, "\t$dst, $addr"),
+ [(set CPURegs:$dst, (OpNode addr:$addr))], IILoad>;
class StoreM<bits<6> op, string instr_asm, PatFrag OpNode>:
- FI< op,
- (outs),
- (ins CPURegs:$dst, mem:$addr),
- !strconcat(instr_asm, "\t$dst, $addr"),
- [(OpNode CPURegs:$dst, addr:$addr)], IIStore>;
+ FI<op, (outs), (ins CPURegs:$dst, mem:$addr),
+ !strconcat(instr_asm, "\t$dst, $addr"),
+ [(OpNode CPURegs:$dst, addr:$addr)], IIStore>;
// Conditional Branch
let isBranch = 1, isTerminator=1, hasDelaySlot = 1 in {
class CBranch<bits<6> op, string instr_asm, PatFrag cond_op>:
- FI< op,
- (outs),
- (ins CPURegs:$a, CPURegs:$b, brtarget:$offset),
- !strconcat(instr_asm, "\t$a, $b, $offset"),
- [(brcond (cond_op CPURegs:$a, CPURegs:$b), bb:$offset)],
- IIBranch>;
-
+ FI<op, (outs), (ins CPURegs:$a, CPURegs:$b, brtarget:$offset),
+ !strconcat(instr_asm, "\t$a, $b, $offset"),
+ [(brcond (cond_op CPURegs:$a, CPURegs:$b), bb:$offset)],
+ IIBranch>;
class CBranchZero<bits<6> op, string instr_asm, PatFrag cond_op>:
- FI< op,
- (outs),
- (ins CPURegs:$src, brtarget:$offset),
- !strconcat(instr_asm, "\t$src, $offset"),
- [(brcond (cond_op CPURegs:$src, 0), bb:$offset)],
- IIBranch>;
+ FI<op, (outs), (ins CPURegs:$src, brtarget:$offset),
+ !strconcat(instr_asm, "\t$src, $offset"),
+ [(brcond (cond_op CPURegs:$src, 0), bb:$offset)],
+ IIBranch>;
}
// SetCC
class SetCC_R<bits<6> op, bits<6> func, string instr_asm,
PatFrag cond_op>:
- FR< op,
- func,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, CPURegs:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [(set CPURegs:$dst, (cond_op CPURegs:$b, CPURegs:$c))],
- IIAlu>;
+ FR<op, func, (outs CPURegs:$dst), (ins CPURegs:$b, CPURegs:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"),
+ [(set CPURegs:$dst, (cond_op CPURegs:$b, CPURegs:$c))],
+ IIAlu>;
class SetCC_I<bits<6> op, string instr_asm, PatFrag cond_op,
Operand Od, PatLeaf imm_type>:
- FI< op,
- (outs CPURegs:$dst),
- (ins CPURegs:$b, Od:$c),
- !strconcat(instr_asm, "\t$dst, $b, $c"),
- [(set CPURegs:$dst, (cond_op CPURegs:$b, imm_type:$c))],
- IIAlu>;
+ FI<op, (outs CPURegs:$dst), (ins CPURegs:$b, Od:$c),
+ !strconcat(instr_asm, "\t$dst, $b, $c"),
+ [(set CPURegs:$dst, (cond_op CPURegs:$b, imm_type:$c))],
+ IIAlu>;
// Unconditional branch
let isBranch=1, isTerminator=1, isBarrier=1, hasDelaySlot = 1 in
class JumpFJ<bits<6> op, string instr_asm>:
- FJ< op,
- (outs),
- (ins brtarget:$target),
- !strconcat(instr_asm, "\t$target"),
- [(br bb:$target)], IIBranch>;
+ FJ<op, (outs), (ins brtarget:$target),
+ !strconcat(instr_asm, "\t$target"), [(br bb:$target)], IIBranch>;
let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1 in
class JumpFR<bits<6> op, bits<6> func, string instr_asm>:
- FR< op,
- func,
- (outs),
- (ins CPURegs:$target),
- !strconcat(instr_asm, "\t$target"),
- [(brind CPURegs:$target)], IIBranch>;
+ FR<op, func, (outs), (ins CPURegs:$target),
+ !strconcat(instr_asm, "\t$target"), [(brind CPURegs:$target)], IIBranch>;
// Jump and Link (Call)
let isCall=1, hasDelaySlot=1,
@@ -299,86 +268,64 @@ let isCall=1, hasDelaySlot=1,
Defs = [AT, V0, V1, A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
K0, K1, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9], Uses = [GP] in {
class JumpLink<bits<6> op, string instr_asm>:
- FJ< op,
- (outs),
- (ins calltarget:$target, variable_ops),
- !strconcat(instr_asm, "\t$target"),
- [(MipsJmpLink imm:$target)], IIBranch>;
+ FJ<op, (outs), (ins calltarget:$target, variable_ops),
+ !strconcat(instr_asm, "\t$target"), [(MipsJmpLink imm:$target)],
+ IIBranch>;
let rd=31 in
class JumpLinkReg<bits<6> op, bits<6> func, string instr_asm>:
- FR< op,
- func,
- (outs),
- (ins CPURegs:$rs, variable_ops),
- !strconcat(instr_asm, "\t$rs"),
- [(MipsJmpLink CPURegs:$rs)], IIBranch>;
+ FR<op, func, (outs), (ins CPURegs:$rs, variable_ops),
+ !strconcat(instr_asm, "\t$rs"), [(MipsJmpLink CPURegs:$rs)], IIBranch>;
class BranchLink<string instr_asm>:
- FI< 0x1,
- (outs),
- (ins CPURegs:$rs, brtarget:$target, variable_ops),
- !strconcat(instr_asm, "\t$rs, $target"),
- [], IIBranch>;
+ FI<0x1, (outs), (ins CPURegs:$rs, brtarget:$target, variable_ops),
+ !strconcat(instr_asm, "\t$rs, $target"), [], IIBranch>;
}
// Mul, Div
class MulDiv<bits<6> func, string instr_asm, InstrItinClass itin>:
- FR< 0x00,
- func,
- (outs),
- (ins CPURegs:$a, CPURegs:$b),
- !strconcat(instr_asm, "\t$a, $b"),
- [], itin>;
+ FR<0x00, func, (outs), (ins CPURegs:$a, CPURegs:$b),
+ !strconcat(instr_asm, "\t$a, $b"), [], itin>;
// Move from Hi/Lo
class MoveFromLOHI<bits<6> func, string instr_asm>:
- FR< 0x00,
- func,
- (outs CPURegs:$dst),
- (ins),
- !strconcat(instr_asm, "\t$dst"),
- [], IIHiLo>;
+ FR<0x00, func, (outs CPURegs:$dst), (ins),
+ !strconcat(instr_asm, "\t$dst"), [], IIHiLo>;
class MoveToLOHI<bits<6> func, string instr_asm>:
- FR< 0x00,
- func,
- (outs),
- (ins CPURegs:$src),
- !strconcat(instr_asm, "\t$src"),
- [], IIHiLo>;
+ FR<0x00, func, (outs), (ins CPURegs:$src),
+ !strconcat(instr_asm, "\t$src"), [], IIHiLo>;
class EffectiveAddress<string instr_asm> :
- FI<0x09,
- (outs CPURegs:$dst),
- (ins mem:$addr),
- instr_asm,
- [(set CPURegs:$dst, addr:$addr)], IIAlu>;
+ FI<0x09, (outs CPURegs:$dst), (ins mem:$addr),
+ instr_asm, [(set CPURegs:$dst, addr:$addr)], IIAlu>;
// Count Leading Ones/Zeros in Word
-class CountLeading<bits<6> func, string instr_asm, SDNode CountOp>:
- FR< 0x1c, func, (outs CPURegs:$dst), (ins CPURegs:$src),
- !strconcat(instr_asm, "\t$dst, $src"),
- [(set CPURegs:$dst, (CountOp CPURegs:$src))], IIAlu>;
+class CountLeading<bits<6> func, string instr_asm, list<dag> pattern>:
+ FR<0x1c, func, (outs CPURegs:$dst), (ins CPURegs:$src),
+ !strconcat(instr_asm, "\t$dst, $src"), pattern, IIAlu>,
+ Requires<[HasBitCount]> {
+ let shamt = 0;
+ let rt = rd;
+}
// Sign Extend in Register.
class SignExtInReg<bits<6> func, string instr_asm, ValueType vt>:
- FR< 0x3f, func, (outs CPURegs:$dst), (ins CPURegs:$src),
- !strconcat(instr_asm, "\t$dst, $src"),
- [(set CPURegs:$dst, (sext_inreg CPURegs:$src, vt))], NoItinerary>;
+ FR<0x3f, func, (outs CPURegs:$dst), (ins CPURegs:$src),
+ !strconcat(instr_asm, "\t$dst, $src"),
+ [(set CPURegs:$dst, (sext_inreg CPURegs:$src, vt))], NoItinerary>;
// Byte Swap
class ByteSwap<bits<6> func, string instr_asm>:
- FR< 0x1f, func, (outs CPURegs:$dst), (ins CPURegs:$src),
- !strconcat(instr_asm, "\t$dst, $src"),
- [(set CPURegs:$dst, (bswap CPURegs:$src))], NoItinerary>;
+ FR<0x1f, func, (outs CPURegs:$dst), (ins CPURegs:$src),
+ !strconcat(instr_asm, "\t$dst, $src"),
+ [(set CPURegs:$dst, (bswap CPURegs:$src))], NoItinerary>;
// Conditional Move
class CondMov<bits<6> func, string instr_asm, PatLeaf MovCode>:
- FR< 0x00, func, (outs CPURegs:$dst), (ins CPURegs:$F, CPURegs:$T,
- CPURegs:$cond), !strconcat(instr_asm, "\t$dst, $T, $cond"),
- [(set CPURegs:$dst, (MipsCMov CPURegs:$F, CPURegs:$T,
- CPURegs:$cond, MovCode))], NoItinerary>;
+ FR<0x00, func, (outs CPURegs:$dst), (ins CPURegs:$F, CPURegs:$T,
+ CPURegs:$cond), !strconcat(instr_asm, "\t$dst, $T, $cond"),
+ [], NoItinerary>;
//===----------------------------------------------------------------------===//
// Pseudo instructions
@@ -408,13 +355,13 @@ def NOREORDER : MipsPseudo<(outs), (ins), ".set\tnoreorder", []>;
def CPLOAD : MipsPseudo<(outs), (ins CPURegs:$picreg), ".cpload\t$picreg", []>;
def CPRESTORE : MipsPseudo<(outs), (ins uimm16:$loc), ".cprestore\t$loc\n", []>;
-// The supported Mips ISAs dont have any instruction close to the SELECT_CC
+// The supported Mips ISAs dont have any instruction close to the SELECT_CC
// operation. The solution is to create a Mips pseudo SELECT_CC instruction
-// (MipsSelectCC), use LowerSELECT_CC to generate this instruction and finally
+// (MipsSelectCC), use LowerSELECT_CC to generate this instruction and finally
// replace it for real supported nodes into EmitInstrWithCustomInserter
let usesCustomInserter = 1 in {
- class PseudoSelCC<RegisterClass RC, string asmstr>:
- MipsPseudo<(outs RC:$dst), (ins CPURegs:$CmpRes, RC:$T, RC:$F), asmstr,
+ class PseudoSelCC<RegisterClass RC, string asmstr>:
+ MipsPseudo<(outs RC:$dst), (ins CPURegs:$CmpRes, RC:$T, RC:$F), asmstr,
[(set RC:$dst, (MipsSelectCC CPURegs:$CmpRes, RC:$T, RC:$F))]>;
}
@@ -451,12 +398,18 @@ def XOR : LogicR<0x26, "xor", xor>;
def NOR : LogicNOR<0x00, 0x27, "nor">;
/// Shift Instructions
-def SLL : LogicR_shift_imm<0x00, "sll", shl>;
-def SRL : LogicR_shift_imm<0x02, "srl", srl>;
-def SRA : LogicR_shift_imm<0x03, "sra", sra>;
-def SLLV : LogicR_shift_reg<0x04, "sllv", shl>;
-def SRLV : LogicR_shift_reg<0x06, "srlv", srl>;
-def SRAV : LogicR_shift_reg<0x07, "srav", sra>;
+def SLL : LogicR_shift_rotate_imm<0x00, 0x00, "sll", shl>;
+def SRL : LogicR_shift_rotate_imm<0x02, 0x00, "srl", srl>;
+def SRA : LogicR_shift_rotate_imm<0x03, 0x00, "sra", sra>;
+def SLLV : LogicR_shift_rotate_reg<0x04, 0x00, "sllv", shl>;
+def SRLV : LogicR_shift_rotate_reg<0x06, 0x00, "srlv", srl>;
+def SRAV : LogicR_shift_rotate_reg<0x07, 0x00, "srav", sra>;
+
+// Rotate Instructions
+let Predicates = [IsMips32r2] in {
+ def ROTR : LogicR_shift_rotate_imm<0x02, 0x01, "rotr", rotr>;
+ def ROTRV : LogicR_shift_rotate_reg<0x06, 0x01, "rotrv", rotr>;
+}
/// Load and Store Instructions
def LB : LoadM<0x20, "lb", sextloadi8>;
@@ -493,7 +446,7 @@ let isReturn=1, isTerminator=1, hasDelaySlot=1,
def RET : FR <0x00, 0x02, (outs), (ins CPURegs:$target),
"jr\t$target", [(MipsRet CPURegs:$target)], IIBranch>;
-/// Multiply and Divide Instructions.
+/// Multiply and Divide Instructions.
let Defs = [HI, LO] in {
def MULT : MulDiv<0x18, "mult", IIImul>;
def MULTu : MulDiv<0x19, "multu", IIImul>;
@@ -521,10 +474,10 @@ let Predicates = [HasSEInReg] in {
}
/// Count Leading
-let Predicates = [HasBitCount] in {
- let rt = 0 in
- def CLZ : CountLeading<0b010110, "clz", ctlz>;
-}
+def CLZ : CountLeading<0b100000, "clz",
+ [(set CPURegs:$dst, (ctlz CPURegs:$src))]>;
+def CLO : CountLeading<0b100001, "clo",
+ [(set CPURegs:$dst, (ctlz (not CPURegs:$src)))]>;
/// Byte Swap
let Predicates = [HasSwap] in {
@@ -551,15 +504,15 @@ let addr=0 in
// can be matched. It's similar to Sparc LEA_ADDRi
def LEA_ADDiu : EffectiveAddress<"addiu\t$dst, ${addr:stackloc}">;
-// MADD*/MSUB* are not part of MipsI either.
-//def MADD : MArithR<0x00, "madd">;
-//def MADDU : MArithR<0x01, "maddu">;
-//def MSUB : MArithR<0x04, "msub">;
-//def MSUBU : MArithR<0x05, "msubu">;
+// MADD*/MSUB*
+def MADD : MArithR<0, "madd", MipsMAdd>;
+def MADDU : MArithR<1, "maddu", MipsMAddu>;
+def MSUB : MArithR<4, "msub", MipsMSub>;
+def MSUBU : MArithR<5, "msubu", MipsMSubu>;
// MUL is a assembly macro in the current used ISAs. In recent ISA's
// it is a real instruction.
-//def MUL : ArithR<0x1c, 0x02, "mul", mul, IIImul>;
+def MUL : ArithR<0x1c, 0x02, "mul", mul, IIImul>, Requires<[IsMips32]>;
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
@@ -605,9 +558,9 @@ def : Pat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)),
(ADDiu CPURegs:$hi, tconstpool:$lo)>;
// gp_rel relocs
-def : Pat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
+def : Pat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
(ADDiu CPURegs:$gp, tglobaladdr:$in)>;
-def : Pat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)),
+def : Pat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)),
(ADDiu CPURegs:$gp, tconstpool:$in)>;
// Mips does not have "not", so we expand our way
@@ -665,9 +618,15 @@ def : Pat<(select (seteq CPURegs:$lhs, CPURegs:$rhs), CPURegs:$T, CPURegs:$F),
def : Pat<(select (setne CPURegs:$lhs, CPURegs:$rhs), CPURegs:$T, CPURegs:$F),
(MOVN CPURegs:$F, CPURegs:$T, (XOR CPURegs:$lhs, CPURegs:$rhs))>;
-def : Pat<(select CPURegs:$cond, CPURegs:$T, CPURegs:$F),
+def : Pat<(select CPURegs:$cond, CPURegs:$T, CPURegs:$F),
(MOVN CPURegs:$F, CPURegs:$T, CPURegs:$cond)>;
+// select patterns with got access
+def : Pat<(select (setne CPURegs:$lhs, CPURegs:$rhs),
+ (i32 tglobaladdr:$T), CPURegs:$F),
+ (MOVN CPURegs:$F, (ADDiu GP, tglobaladdr:$T),
+ (XOR CPURegs:$lhs, CPURegs:$rhs))>;
+
// setcc patterns
def : Pat<(seteq CPURegs:$lhs, CPURegs:$rhs),
(SLTu (XOR CPURegs:$lhs, CPURegs:$rhs), 1)>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
index 5723f9e..1e8e4fe 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
@@ -26,11 +26,11 @@ namespace llvm {
class MipsFunctionInfo : public MachineFunctionInfo {
private:
- /// Holds for each function where on the stack the Frame Pointer must be
+ /// Holds for each function where on the stack the Frame Pointer must be
/// saved. This is used on Prologue and Epilogue to emit FP save/restore
int FPStackOffset;
- /// Holds for each function where on the stack the Return Address must be
+ /// Holds for each function where on the stack the Return Address must be
/// saved. This is used on Prologue and Epilogue to emit RA save/restore
int RAStackOffset;
@@ -51,22 +51,22 @@ private:
: FI(FrameIndex), SPOffset(StackPointerOffset) {}
};
- /// When PIC is used the GP must be saved on the stack on the function
- /// prologue and must be reloaded from this stack location after every
- /// call. A reference to its stack location and frame index must be kept
+ /// When PIC is used the GP must be saved on the stack on the function
+ /// prologue and must be reloaded from this stack location after every
+ /// call. A reference to its stack location and frame index must be kept
/// to be used on emitPrologue and processFunctionBeforeFrameFinalized.
MipsFIHolder GPHolder;
/// On LowerFormalArguments the stack size is unknown, so the Stack
- /// Pointer Offset calculation of "not in register arguments" must be
- /// postponed to emitPrologue.
+ /// Pointer Offset calculation of "not in register arguments" must be
+ /// postponed to emitPrologue.
SmallVector<MipsFIHolder, 16> FnLoadArgs;
bool HasLoadArgs;
- // When VarArgs, we must write registers back to caller stack, preserving
- // on register arguments. Since the stack size is unknown on
+ // When VarArgs, we must write registers back to caller stack, preserving
+ // on register arguments. Since the stack size is unknown on
// LowerFormalArguments, the Stack Pointer Offset calculation must be
- // postponed to emitPrologue.
+ // postponed to emitPrologue.
SmallVector<MipsFIHolder, 4> FnStoreVarArgs;
bool HasStoreVarArgs;
@@ -84,9 +84,9 @@ private:
int VarArgsFrameIndex;
public:
- MipsFunctionInfo(MachineFunction& MF)
- : FPStackOffset(0), RAStackOffset(0), CPUTopSavedRegOff(0),
- FPUTopSavedRegOff(0), GPHolder(-1,-1), HasLoadArgs(false),
+ MipsFunctionInfo(MachineFunction& MF)
+ : FPStackOffset(0), RAStackOffset(0), CPUTopSavedRegOff(0),
+ FPUTopSavedRegOff(0), GPHolder(-1,-1), HasLoadArgs(false),
HasStoreVarArgs(false), SRetReturnReg(0), GlobalBaseReg(0),
VarArgsFrameIndex(0)
{}
@@ -110,7 +110,7 @@ public:
bool needGPSaveRestore() const { return GPHolder.SPOffset != -1; }
bool hasLoadArgs() const { return HasLoadArgs; }
- bool hasStoreVarArgs() const { return HasStoreVarArgs; }
+ bool hasStoreVarArgs() const { return HasStoreVarArgs; }
void recordLoadArgsFI(int FI, int SPOffset) {
if (!HasLoadArgs) HasLoadArgs=true;
@@ -123,12 +123,12 @@ public:
void adjustLoadArgsFI(MachineFrameInfo *MFI) const {
if (!hasLoadArgs()) return;
- for (unsigned i = 0, e = FnLoadArgs.size(); i != e; ++i)
+ for (unsigned i = 0, e = FnLoadArgs.size(); i != e; ++i)
MFI->setObjectOffset( FnLoadArgs[i].FI, FnLoadArgs[i].SPOffset );
}
void adjustStoreVarArgsFI(MachineFrameInfo *MFI) const {
- if (!hasStoreVarArgs()) return;
- for (unsigned i = 0, e = FnStoreVarArgs.size(); i != e; ++i)
+ if (!hasStoreVarArgs()) return;
+ for (unsigned i = 0, e = FnStoreVarArgs.size(); i != e; ++i)
MFI->setObjectOffset( FnStoreVarArgs[i].FI, FnStoreVarArgs[i].SPOffset );
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
index 69436d2..3719e58 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -25,7 +25,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -117,8 +117,7 @@ getCalleeSavedRegs(const MachineFunction *MF) const
}
BitVector MipsRegisterInfo::
-getReservedRegs(const MachineFunction &MF) const
-{
+getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
Reserved.set(Mips::ZERO);
Reserved.set(Mips::AT);
@@ -137,184 +136,6 @@ getReservedRegs(const MachineFunction &MF) const
return Reserved;
}
-//===----------------------------------------------------------------------===//
-//
-// Stack Frame Processing methods
-// +----------------------------+
-//
-// The stack is allocated decrementing the stack pointer on
-// the first instruction of a function prologue. Once decremented,
-// all stack references are done thought a positive offset
-// from the stack/frame pointer, so the stack is considering
-// to grow up! Otherwise terrible hacks would have to be made
-// to get this stack ABI compliant :)
-//
-// The stack frame required by the ABI (after call):
-// Offset
-//
-// 0 ----------
-// 4 Args to pass
-// . saved $GP (used in PIC)
-// . Alloca allocations
-// . Local Area
-// . CPU "Callee Saved" Registers
-// . saved FP
-// . saved RA
-// . FPU "Callee Saved" Registers
-// StackSize -----------
-//
-// Offset - offset from sp after stack allocation on function prologue
-//
-// The sp is the stack pointer subtracted/added from the stack size
-// at the Prologue/Epilogue
-//
-// References to the previous stack (to obtain arguments) are done
-// with offsets that exceeds the stack size: (stacksize+(4*(num_arg-1))
-//
-// Examples:
-// - reference to the actual stack frame
-// for any local area var there is smt like : FI >= 0, StackOffset: 4
-// sw REGX, 4(SP)
-//
-// - reference to previous stack frame
-// suppose there's a load to the 5th arguments : FI < 0, StackOffset: 16.
-// The emitted instruction will be something like:
-// lw REGX, 16+StackSize(SP)
-//
-// Since the total stack size is unknown on LowerFormalArguments, all
-// stack references (ObjectOffset) created to reference the function
-// arguments, are negative numbers. This way, on eliminateFrameIndex it's
-// possible to detect those references and the offsets are adjusted to
-// their real location.
-//
-//===----------------------------------------------------------------------===//
-
-void MipsRegisterInfo::adjustMipsStackFrame(MachineFunction &MF) const
-{
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
- unsigned RegSize = Subtarget.isGP32bit() ? 4 : 8;
- bool HasGP = MipsFI->needGPSaveRestore();
-
- // Min and Max CSI FrameIndex.
- int MinCSFI = -1, MaxCSFI = -1;
-
- // See the description at MipsMachineFunction.h
- int TopCPUSavedRegOff = -1, TopFPUSavedRegOff = -1;
-
- // Replace the dummy '0' SPOffset by the negative offsets, as explained on
- // LowerFormalArguments. Leaving '0' for while is necessary to avoid
- // the approach done by calculateFrameObjectOffsets to the stack frame.
- MipsFI->adjustLoadArgsFI(MFI);
- MipsFI->adjustStoreVarArgsFI(MFI);
-
- // It happens that the default stack frame allocation order does not directly
- // map to the convention used for mips. So we must fix it. We move the callee
- // save register slots after the local variables area, as described in the
- // stack frame above.
- unsigned CalleeSavedAreaSize = 0;
- if (!CSI.empty()) {
- MinCSFI = CSI[0].getFrameIdx();
- MaxCSFI = CSI[CSI.size()-1].getFrameIdx();
- }
- for (unsigned i = 0, e = CSI.size(); i != e; ++i)
- CalleeSavedAreaSize += MFI->getObjectAlignment(CSI[i].getFrameIdx());
-
- unsigned StackOffset = HasGP ? (MipsFI->getGPStackOffset()+RegSize)
- : (Subtarget.isABI_O32() ? 16 : 0);
-
- // Adjust local variables. They should come on the stack right
- // after the arguments.
- int LastOffsetFI = -1;
- for (int i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
- if (i >= MinCSFI && i <= MaxCSFI)
- continue;
- if (MFI->isDeadObjectIndex(i))
- continue;
- unsigned Offset =
- StackOffset + MFI->getObjectOffset(i) - CalleeSavedAreaSize;
- if (LastOffsetFI == -1)
- LastOffsetFI = i;
- if (Offset > MFI->getObjectOffset(LastOffsetFI))
- LastOffsetFI = i;
- MFI->setObjectOffset(i, Offset);
- }
-
- // Adjust CPU Callee Saved Registers Area. Registers RA and FP must
- // be saved in this CPU Area. This whole area must be aligned to the
- // default Stack Alignment requirements.
- if (LastOffsetFI >= 0)
- StackOffset = MFI->getObjectOffset(LastOffsetFI)+
- MFI->getObjectSize(LastOffsetFI);
- StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign);
-
- for (unsigned i = 0, e = CSI.size(); i != e ; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (!Mips::CPURegsRegisterClass->contains(Reg))
- break;
- MFI->setObjectOffset(CSI[i].getFrameIdx(), StackOffset);
- TopCPUSavedRegOff = StackOffset;
- StackOffset += MFI->getObjectAlignment(CSI[i].getFrameIdx());
- }
-
- // Stack locations for FP and RA. If only one of them is used,
- // the space must be allocated for both, otherwise no space at all.
- if (hasFP(MF) || MFI->adjustsStack()) {
- // FP stack location
- MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true),
- StackOffset);
- MipsFI->setFPStackOffset(StackOffset);
- TopCPUSavedRegOff = StackOffset;
- StackOffset += RegSize;
-
- // SP stack location
- MFI->setObjectOffset(MFI->CreateStackObject(RegSize, RegSize, true),
- StackOffset);
- MipsFI->setRAStackOffset(StackOffset);
- StackOffset += RegSize;
-
- if (MFI->adjustsStack())
- TopCPUSavedRegOff += RegSize;
- }
-
- StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign);
-
- // Adjust FPU Callee Saved Registers Area. This Area must be
- // aligned to the default Stack Alignment requirements.
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (Mips::CPURegsRegisterClass->contains(Reg))
- continue;
- MFI->setObjectOffset(CSI[i].getFrameIdx(), StackOffset);
- TopFPUSavedRegOff = StackOffset;
- StackOffset += MFI->getObjectAlignment(CSI[i].getFrameIdx());
- }
- StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign);
-
- // Update frame info
- MFI->setStackSize(StackOffset);
-
- // Recalculate the final tops offset. The final values must be '0'
- // if there isn't a callee saved register for CPU or FPU, otherwise
- // a negative offset is needed.
- if (TopCPUSavedRegOff >= 0)
- MipsFI->setCPUTopSavedRegOff(TopCPUSavedRegOff-StackOffset);
-
- if (TopFPUSavedRegOff >= 0)
- MipsFI->setFPUTopSavedRegOff(TopFPUSavedRegOff-StackOffset);
-}
-
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-bool MipsRegisterInfo::
-hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects();
-}
-
// This function eliminate ADJCALLSTACKDOWN,
// ADJCALLSTACKUP pseudo instructions
void MipsRegisterInfo::
@@ -363,106 +184,6 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
}
void MipsRegisterInfo::
-emitPrologue(MachineFunction &MF) const
-{
- MachineBasicBlock &MBB = MF.front();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
- bool isPIC = (MF.getTarget().getRelocationModel() == Reloc::PIC_);
-
- // Get the right frame order for Mips.
- adjustMipsStackFrame(MF);
-
- // Get the number of bytes to allocate from the FrameInfo.
- unsigned StackSize = MFI->getStackSize();
-
- // No need to allocate space on the stack.
- if (StackSize == 0 && !MFI->adjustsStack()) return;
-
- int FPOffset = MipsFI->getFPStackOffset();
- int RAOffset = MipsFI->getRAStackOffset();
-
- BuildMI(MBB, MBBI, dl, TII.get(Mips::NOREORDER));
-
- // TODO: check need from GP here.
- if (isPIC && Subtarget.isABI_O32())
- BuildMI(MBB, MBBI, dl, TII.get(Mips::CPLOAD)).addReg(getPICCallReg());
- BuildMI(MBB, MBBI, dl, TII.get(Mips::NOMACRO));
-
- // Adjust stack : addi sp, sp, (-imm)
- BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDiu), Mips::SP)
- .addReg(Mips::SP).addImm(-StackSize);
-
- // Save the return address only if the function isnt a leaf one.
- // sw $ra, stack_loc($sp)
- if (MFI->adjustsStack()) {
- BuildMI(MBB, MBBI, dl, TII.get(Mips::SW))
- .addReg(Mips::RA).addImm(RAOffset).addReg(Mips::SP);
- }
-
- // if framepointer enabled, save it and set it
- // to point to the stack pointer
- if (hasFP(MF)) {
- // sw $fp,stack_loc($sp)
- BuildMI(MBB, MBBI, dl, TII.get(Mips::SW))
- .addReg(Mips::FP).addImm(FPOffset).addReg(Mips::SP);
-
- // move $fp, $sp
- BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDu), Mips::FP)
- .addReg(Mips::SP).addReg(Mips::ZERO);
- }
-
- // Restore GP from the saved stack location
- if (MipsFI->needGPSaveRestore())
- BuildMI(MBB, MBBI, dl, TII.get(Mips::CPRESTORE))
- .addImm(MipsFI->getGPStackOffset());
-}
-
-void MipsRegisterInfo::
-emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const
-{
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- DebugLoc dl = MBBI->getDebugLoc();
-
- // Get the number of bytes from FrameInfo
- int NumBytes = (int) MFI->getStackSize();
-
- // Get the FI's where RA and FP are saved.
- int FPOffset = MipsFI->getFPStackOffset();
- int RAOffset = MipsFI->getRAStackOffset();
-
- // if framepointer enabled, restore it and restore the
- // stack pointer
- if (hasFP(MF)) {
- // move $sp, $fp
- BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDu), Mips::SP)
- .addReg(Mips::FP).addReg(Mips::ZERO);
-
- // lw $fp,stack_loc($sp)
- BuildMI(MBB, MBBI, dl, TII.get(Mips::LW), Mips::FP)
- .addImm(FPOffset).addReg(Mips::SP);
- }
-
- // Restore the return address only if the function isnt a leaf one.
- // lw $ra, stack_loc($sp)
- if (MFI->adjustsStack()) {
- BuildMI(MBB, MBBI, dl, TII.get(Mips::LW), Mips::RA)
- .addImm(RAOffset).addReg(Mips::SP);
- }
-
- // adjust stack : insert addi sp, sp, (imm)
- if (NumBytes) {
- BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDiu), Mips::SP)
- .addReg(Mips::SP).addImm(NumBytes);
- }
-}
-
-
-void MipsRegisterInfo::
processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
// Set the stack offset where GP must be saved/loaded from.
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -478,7 +199,9 @@ getRARegister() const {
unsigned MipsRegisterInfo::
getFrameRegister(const MachineFunction &MF) const {
- return hasFP(MF) ? Mips::FP : Mips::SP;
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ return TFI->hasFP(MF) ? Mips::FP : Mips::SP;
}
unsigned MipsRegisterInfo::
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
index 89282f8..a7f4bf9 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
@@ -44,8 +44,6 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
BitVector getReservedRegs(const MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const;
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -56,9 +54,6 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
/// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/Mips/MipsSchedule.td b/contrib/llvm/lib/Target/Mips/MipsSchedule.td
index 055ff32..49ca5d1 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSchedule.td
+++ b/contrib/llvm/lib/Target/Mips/MipsSchedule.td
@@ -40,7 +40,7 @@ def IIPseudo : InstrItinClass;
//===----------------------------------------------------------------------===//
// Mips Generic instruction itineraries.
//===----------------------------------------------------------------------===//
-def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [
+def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<IIAlu , [InstrStage<1, [ALU]>]>,
InstrItinData<IILoad , [InstrStage<3, [ALU]>]>,
InstrItinData<IIStore , [InstrStage<1, [ALU]>]>,
diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
index 2d5fd22..e4f4b33 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
@@ -31,7 +31,7 @@ public:
protected:
enum MipsArchEnum {
- Mips1, Mips2, Mips3, Mips4, Mips32, Mips32r2, Mips64, Mips64r2
+ Mips1, Mips2, Mips3, Mips4, Mips32, Mips32r2
};
// Mips architecture version
@@ -100,6 +100,8 @@ public:
const std::string &CPU);
bool isMips1() const { return MipsArchVersion == Mips1; }
+ bool isMips32() const { return MipsArchVersion >= Mips32; }
+ bool isMips32r2() const { return MipsArchVersion == Mips32r2; }
bool isLittle() const { return IsLittle; }
bool isFP64bit() const { return IsFP64bit; }
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index ad3eb9e..7a2dd1f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -30,18 +30,18 @@ extern "C" void LLVMInitializeMipsTarget() {
// The stack is always 8 byte aligned
// On function prologue, the stack is created by decrementing
// its pointer. Once decremented, all references are done with positive
-// offset from the stack/frame pointer, using StackGrowsUp enables
+// offset from the stack/frame pointer, using StackGrowsUp enables
// an easier handling.
// Using CodeModel::Large enables different CALL behavior.
MipsTargetMachine::
MipsTargetMachine(const Target &T, const std::string &TT, const std::string &FS,
bool isLittle=false):
LLVMTargetMachine(T, TT),
- Subtarget(TT, FS, isLittle),
+ Subtarget(TT, FS, isLittle),
DataLayout(isLittle ? std::string("e-p:32:32:32-i8:8:32-i16:16:32-n32") :
- std::string("E-p:32:32:32-i8:8:32-i16:16:32-n32")),
- InstrInfo(*this),
- FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0),
+ std::string("E-p:32:32:32-i8:8:32-i16:16:32-n32")),
+ InstrInfo(*this),
+ FrameLowering(Subtarget),
TLInfo(*this), TSInfo(*this) {
// Abicall enables PIC by default
if (getRelocationModel() == Reloc::Default) {
@@ -57,20 +57,20 @@ MipselTargetMachine(const Target &T, const std::string &TT,
const std::string &FS) :
MipsTargetMachine(T, TT, FS, true) {}
-// Install an instruction selector pass using
+// Install an instruction selector pass using
// the ISelDag to gen Mips code.
bool MipsTargetMachine::
-addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel)
+addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel)
{
PM.add(createMipsISelDag(*this));
return false;
}
-// Implemented by targets that want to run passes immediately before
-// machine code is emitted. return true if -print-machineinstrs should
+// Implemented by targets that want to run passes immediately before
+// machine code is emitted. return true if -print-machineinstrs should
// print out the code after the passes.
bool MipsTargetMachine::
-addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel)
+addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel)
{
PM.add(createMipsDelaySlotFillerPass(*this));
return true;
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
index d63976f..43ab798 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
@@ -17,39 +17,40 @@
#include "MipsSubtarget.h"
#include "MipsInstrInfo.h"
#include "MipsISelLowering.h"
+#include "MipsFrameLowering.h"
#include "MipsSelectionDAGInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
class formatted_raw_ostream;
-
+
class MipsTargetMachine : public LLVMTargetMachine {
MipsSubtarget Subtarget;
const TargetData DataLayout; // Calculates type size & alignment
MipsInstrInfo InstrInfo;
- TargetFrameInfo FrameInfo;
+ MipsFrameLowering FrameLowering;
MipsTargetLowering TLInfo;
MipsSelectionDAGInfo TSInfo;
public:
MipsTargetMachine(const Target &T, const std::string &TT,
const std::string &FS, bool isLittle);
-
- virtual const MipsInstrInfo *getInstrInfo() const
+
+ virtual const MipsInstrInfo *getInstrInfo() const
{ return &InstrInfo; }
- virtual const TargetFrameInfo *getFrameInfo() const
- { return &FrameInfo; }
- virtual const MipsSubtarget *getSubtargetImpl() const
+ virtual const TargetFrameLowering *getFrameLowering() const
+ { return &FrameLowering; }
+ virtual const MipsSubtarget *getSubtargetImpl() const
{ return &Subtarget; }
- virtual const TargetData *getTargetData() const
+ virtual const TargetData *getTargetData() const
{ return &DataLayout;}
virtual const MipsRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
- virtual const MipsTargetLowering *getTargetLowering() const {
+ virtual const MipsTargetLowering *getTargetLowering() const {
return &TLInfo;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
index 405f419..cf5d1b5 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
@@ -16,6 +16,7 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ELF.h"
using namespace llvm;
static cl::opt<unsigned>
@@ -25,21 +26,21 @@ SSThreshold("mips-ssection-threshold", cl::Hidden,
void MipsTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
-
+
SmallDataSection =
- getContext().getELFSection(".sdata", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".sdata", ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE |ELF::SHF_ALLOC,
SectionKind::getDataRel());
-
+
SmallBSSSection =
- getContext().getELFSection(".sbss", MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE |ELF::SHF_ALLOC,
SectionKind::getBSS());
-
+
}
-// A address must be loaded from a small section if its size is less than the
-// small section size threshold. Data in this section must be addressed using
+// A address must be loaded from a small section if its size is less than the
+// small section size threshold. Data in this section must be addressed using
// gp_rel operator.
static bool IsInSmallSection(uint64_t Size) {
return Size > 0 && Size <= SSThreshold;
@@ -49,7 +50,7 @@ bool MipsTargetObjectFile::IsGlobalInSmallSection(const GlobalValue *GV,
const TargetMachine &TM) const {
if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
return false;
-
+
return IsGlobalInSmallSection(GV, TM, getKindForGlobal(GV, TM));
}
@@ -68,11 +69,11 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
const GlobalVariable *GVA = dyn_cast<GlobalVariable>(GV);
if (!GVA)
return false;
-
+
// We can only do this for datarel or BSS objects for now.
if (!Kind.isBSS() && !Kind.isDataRel())
return false;
-
+
// If this is a internal constant string, there is a special
// section for it, but not in small data/bss.
if (Kind.isMergeable1ByteCString())
@@ -89,13 +90,13 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const {
// TODO: Could also support "weak" symbols as well with ".gnu.linkonce.s.*"
// sections?
-
+
// Handle Small Section classification here.
if (Kind.isBSS() && IsGlobalInSmallSection(GV, TM, Kind))
return SmallBSSSection;
if (Kind.isDataNoRel() && IsGlobalInSmallSection(GV, TM, Kind))
return SmallDataSection;
-
+
// Otherwise, we work the same as ELF.
return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang,TM);
}
diff --git a/contrib/llvm/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.cpp b/contrib/llvm/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.cpp
deleted file mode 100644
index b665817..0000000
--- a/contrib/llvm/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.cpp
+++ /dev/null
@@ -1,512 +0,0 @@
-//===-- PIC16AsmPrinter.cpp - PIC16 LLVM assembly writer ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to PIC16 assembly language.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PIC16ABINames.h"
-#include "PIC16AsmPrinter.h"
-#include "PIC16Section.h"
-#include "PIC16MCAsmInfo.h"
-#include "PIC16MachineFunctionInfo.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/Module.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/SmallString.h"
-#include <cstring>
-using namespace llvm;
-
-#include "PIC16GenAsmWriter.inc"
-
-PIC16AsmPrinter::PIC16AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
-: AsmPrinter(TM, Streamer), DbgInfo(Streamer, TM.getMCAsmInfo()) {
- PMAI = static_cast<const PIC16MCAsmInfo*>(TM.getMCAsmInfo());
- PTOF = &getObjFileLowering();
-}
-
-void PIC16AsmPrinter::EmitInstruction(const MachineInstr *MI) {
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- printInstruction(MI, OS);
-
- OutStreamer.EmitRawText(OS.str());
-}
-
-static int getFunctionColor(const Function *F) {
- if (F->hasSection()) {
- std::string Sectn = F->getSection();
- std::string StrToFind = "Overlay=";
- std::string::size_type Pos = Sectn.find(StrToFind);
-
- // Retreive the color number if the key is found.
- if (Pos != std::string::npos) {
- Pos += StrToFind.length();
- std::string Color = "";
- char c = Sectn.at(Pos);
- // A Color can only consist of digits.
- while (c >= '0' && c<= '9') {
- Color.append(1,c);
- Pos++;
- if (Pos >= Sectn.length())
- break;
- c = Sectn.at(Pos);
- }
- return atoi(Color.c_str());
- }
- }
-
- // Color was not set for function, so return -1.
- return -1;
-}
-
-// Color the Auto section of the given function.
-void PIC16AsmPrinter::ColorAutoSection(const Function *F) {
- std::string SectionName = PAN::getAutosSectionName(CurrentFnSym->getName());
- PIC16Section* Section = PTOF->findPIC16Section(SectionName);
- if (Section != NULL) {
- int Color = getFunctionColor(F);
- if (Color >= 0)
- Section->setColor(Color);
- }
-}
-
-
-/// runOnMachineFunction - This emits the frame section, autos section and
-/// assembly for each instruction. Also takes care of function begin debug
-/// directive and file begin debug directive (if required) for the function.
-///
-bool PIC16AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
- // This calls the base class function required to be called at beginning
- // of runOnMachineFunction.
- SetupMachineFunction(MF);
-
- // Put the color information from function to its auto section.
- const Function *F = MF.getFunction();
- ColorAutoSection(F);
-
- // Emit the function frame (args and temps).
- EmitFunctionFrame(MF);
-
- DbgInfo.BeginFunction(MF);
-
- // Now emit the instructions of function in its code section.
- const MCSection *fCodeSection =
- getObjFileLowering().SectionForCode(CurrentFnSym->getName(),
- PAN::isISR(F->getSection()));
-
- // Start the Code Section.
- OutStreamer.SwitchSection(fCodeSection);
-
- // Emit the frame address of the function at the beginning of code.
- OutStreamer.EmitRawText("\tretlw low(" +
- Twine(PAN::getFrameLabel(CurrentFnSym->getName())) +
- ")");
- OutStreamer.EmitRawText("\tretlw high(" +
- Twine(PAN::getFrameLabel(CurrentFnSym->getName())) +
- ")");
-
- // Emit function start label.
- OutStreamer.EmitLabel(CurrentFnSym);
-
- DebugLoc CurDL;
- // Print out code for the function.
- for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
- I != E; ++I) {
-
- // Print a label for the basic block.
- if (I != MF.begin())
- EmitBasicBlockStart(I);
-
- // Print a basic block.
- for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end();
- II != E; ++II) {
- // Emit the line directive if source line changed.
- DebugLoc DL = II->getDebugLoc();
- if (!DL.isUnknown() && DL != CurDL) {
- DbgInfo.ChangeDebugLoc(MF, DL);
- CurDL = DL;
- }
-
- // Print the assembly for the instruction.
- EmitInstruction(II);
- }
- }
-
- // Emit function end debug directives.
- DbgInfo.EndFunction(MF);
-
- return false; // we didn't modify anything.
-}
-
-
-// printOperand - print operand of insn.
-void PIC16AsmPrinter::printOperand(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(opNum);
- const Function *F = MI->getParent()->getParent()->getFunction();
-
- switch (MO.getType()) {
- case MachineOperand::MO_Register:
- {
- // For indirect load/store insns, the fsr name is printed as INDF.
- std::string RegName = getRegisterName(MO.getReg());
- if ((MI->getOpcode() == PIC16::load_indirect) ||
- (MI->getOpcode() == PIC16::store_indirect))
- RegName.replace (0, 3, "INDF");
- O << RegName;
- }
- return;
-
- case MachineOperand::MO_Immediate:
- O << (int)MO.getImm();
- return;
-
- case MachineOperand::MO_GlobalAddress: {
- MCSymbol *Sym = Mang->getSymbol(MO.getGlobal());
- // FIXME: currently we do not have a memcpy def coming in the module
- // by any chance, as we do not link in those as .bc lib. So these calls
- // are always external and it is safe to emit an extern.
- if (PAN::isMemIntrinsic(Sym->getName()))
- LibcallDecls.insert(Sym->getName());
-
- O << *Sym;
- break;
- }
- case MachineOperand::MO_ExternalSymbol: {
- const char *Sname = MO.getSymbolName();
- std::string Printname = Sname;
-
- // Intrinsic stuff needs to be renamed if we are printing IL fn.
- if (PAN::isIntrinsicStuff(Printname)) {
- if (PAN::isISR(F->getSection())) {
- Printname = PAN::Rename(Sname);
- }
- // Record these decls, we need to print them in asm as extern.
- LibcallDecls.insert(Printname);
- }
-
- O << Printname;
- break;
- }
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol();
- return;
-
- default:
- llvm_unreachable(" Operand type not supported.");
- }
-}
-
-/// printCCOperand - Print the cond code operand.
-///
-void PIC16AsmPrinter::printCCOperand(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- int CC = (int)MI->getOperand(opNum).getImm();
- O << PIC16CondCodeToString((PIC16CC::CondCodes)CC);
-}
-
-/// printLibcallDecls - print the extern declarations for compiler
-/// intrinsics.
-///
-void PIC16AsmPrinter::printLibcallDecls() {
- // If no libcalls used, return.
- if (LibcallDecls.empty()) return;
-
- OutStreamer.AddComment("External decls for libcalls - BEGIN");
- OutStreamer.AddBlankLine();
-
- for (std::set<std::string>::const_iterator I = LibcallDecls.begin(),
- E = LibcallDecls.end(); I != E; I++)
- OutStreamer.EmitRawText(MAI->getExternDirective() + Twine(*I));
-
- OutStreamer.AddComment("External decls for libcalls - END");
- OutStreamer.AddBlankLine();
-}
-
-/// doInitialization - Perform Module level initializations here.
-/// One task that we do here is to sectionize all global variables.
-/// The MemSelOptimizer pass depends on the sectionizing.
-///
-bool PIC16AsmPrinter::doInitialization(Module &M) {
- bool Result = AsmPrinter::doInitialization(M);
-
- // Every asmbly contains these std headers.
- OutStreamer.EmitRawText(StringRef("\n#include p16f1xxx.inc"));
- OutStreamer.EmitRawText(StringRef("#include stdmacros.inc"));
-
- // Set the section names for all globals.
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
-
- // Record External Var Decls.
- if (I->isDeclaration()) {
- ExternalVarDecls.push_back(I);
- continue;
- }
-
- // Record Exteranl Var Defs.
- if (I->hasExternalLinkage() || I->hasCommonLinkage()) {
- ExternalVarDefs.push_back(I);
- }
-
- // Sectionify actual data.
- if (!I->hasAvailableExternallyLinkage()) {
- const MCSection *S = getObjFileLowering().SectionForGlobal(I, Mang, TM);
-
- I->setSection(((const PIC16Section *)S)->getName());
- }
- }
-
- DbgInfo.BeginModule(M);
- EmitFunctionDecls(M);
- EmitUndefinedVars(M);
- EmitDefinedVars(M);
- EmitIData(M);
- EmitUData(M);
- EmitRomData(M);
- EmitSharedUdata(M);
- EmitUserSections(M);
- return Result;
-}
-
-/// Emit extern decls for functions imported from other modules, and emit
-/// global declarations for function defined in this module and which are
-/// available to other modules.
-///
-void PIC16AsmPrinter::EmitFunctionDecls(Module &M) {
- // Emit declarations for external functions.
- OutStreamer.AddComment("Function Declarations - BEGIN");
- OutStreamer.AddBlankLine();
- for (Module::iterator I = M.begin(), E = M.end(); I != E; I++) {
- if (I->isIntrinsic() || I->getName() == "@abort")
- continue;
-
- if (!I->isDeclaration() && !I->hasExternalLinkage())
- continue;
-
- MCSymbol *Sym = Mang->getSymbol(I);
-
- // Do not emit memcpy, memset, and memmove here.
- // Calls to these routines can be generated in two ways,
- // 1. User calling the standard lib function
- // 2. Codegen generating these calls for llvm intrinsics.
- // In the first case a prototype is alread availale, while in
- // second case the call is via and externalsym and the prototype is missing.
- // So declarations for these are currently always getting printing by
- // tracking both kind of references in printInstrunction.
- if (I->isDeclaration() && PAN::isMemIntrinsic(Sym->getName())) continue;
-
- const char *directive = I->isDeclaration() ? MAI->getExternDirective() :
- MAI->getGlobalDirective();
-
- OutStreamer.EmitRawText(directive + Twine(Sym->getName()));
- OutStreamer.EmitRawText(directive +
- Twine(PAN::getRetvalLabel(Sym->getName())));
- OutStreamer.EmitRawText(directive +
- Twine(PAN::getArgsLabel(Sym->getName())));
- }
-
- OutStreamer.AddComment("Function Declarations - END");
- OutStreamer.AddBlankLine();
-
-}
-
-// Emit variables imported from other Modules.
-void PIC16AsmPrinter::EmitUndefinedVars(Module &M) {
- std::vector<const GlobalVariable*> Items = ExternalVarDecls;
- if (!Items.size()) return;
-
- OutStreamer.AddComment("Imported Variables - BEGIN");
- OutStreamer.AddBlankLine();
- for (unsigned j = 0; j < Items.size(); j++)
- OutStreamer.EmitRawText(MAI->getExternDirective() +
- Twine(Mang->getSymbol(Items[j])->getName()));
-
- OutStreamer.AddComment("Imported Variables - END");
- OutStreamer.AddBlankLine();
-}
-
-// Emit variables defined in this module and are available to other modules.
-void PIC16AsmPrinter::EmitDefinedVars(Module &M) {
- std::vector<const GlobalVariable*> Items = ExternalVarDefs;
- if (!Items.size()) return;
-
- OutStreamer.AddComment("Exported Variables - BEGIN");
- OutStreamer.AddBlankLine();
-
- for (unsigned j = 0; j < Items.size(); j++)
- OutStreamer.EmitRawText(MAI->getGlobalDirective() +
- Twine(Mang->getSymbol(Items[j])->getName()));
- OutStreamer.AddComment("Exported Variables - END");
- OutStreamer.AddBlankLine();
-}
-
-// Emit initialized data placed in ROM.
-void PIC16AsmPrinter::EmitRomData(Module &M) {
- EmitSingleSection(PTOF->ROMDATASection());
-}
-
-// Emit Shared section udata.
-void PIC16AsmPrinter::EmitSharedUdata(Module &M) {
- EmitSingleSection(PTOF->SHAREDUDATASection());
-}
-
-bool PIC16AsmPrinter::doFinalization(Module &M) {
- EmitAllAutos(M);
- printLibcallDecls();
- DbgInfo.EndModule(M);
- OutStreamer.EmitRawText(StringRef("\tEND"));
- return AsmPrinter::doFinalization(M);
-}
-
-void PIC16AsmPrinter::EmitFunctionFrame(MachineFunction &MF) {
- const Function *F = MF.getFunction();
- const TargetData *TD = TM.getTargetData();
- PIC16MachineFunctionInfo *FuncInfo = MF.getInfo<PIC16MachineFunctionInfo>();
-
- // Emit the data section name.
-
- PIC16Section *fPDataSection =
- const_cast<PIC16Section *>(getObjFileLowering().
- SectionForFrame(CurrentFnSym->getName()));
-
- fPDataSection->setColor(getFunctionColor(F));
- OutStreamer.SwitchSection(fPDataSection);
-
- // Emit function frame label
- OutStreamer.EmitRawText(PAN::getFrameLabel(CurrentFnSym->getName()) +
- Twine(":"));
-
- const Type *RetType = F->getReturnType();
- unsigned RetSize = 0;
- if (RetType->getTypeID() != Type::VoidTyID)
- RetSize = TD->getTypeAllocSize(RetType);
-
- //Emit function return value space
- // FIXME: Do not emit RetvalLable when retsize is zero. To do this
- // we will need to avoid printing a global directive for Retval label
- // in emitExternandGloblas.
- if(RetSize > 0)
- OutStreamer.EmitRawText(PAN::getRetvalLabel(CurrentFnSym->getName()) +
- Twine(" RES ") + Twine(RetSize));
- else
- OutStreamer.EmitRawText(PAN::getRetvalLabel(CurrentFnSym->getName()) +
- Twine(":"));
-
- // Emit variable to hold the space for function arguments
- unsigned ArgSize = 0;
- for (Function::const_arg_iterator argi = F->arg_begin(),
- arge = F->arg_end(); argi != arge ; ++argi) {
- const Type *Ty = argi->getType();
- ArgSize += TD->getTypeAllocSize(Ty);
- }
-
- OutStreamer.EmitRawText(PAN::getArgsLabel(CurrentFnSym->getName()) +
- Twine(" RES ") + Twine(ArgSize));
-
- // Emit temporary space
- int TempSize = FuncInfo->getTmpSize();
- if (TempSize > 0)
- OutStreamer.EmitRawText(PAN::getTempdataLabel(CurrentFnSym->getName()) +
- Twine(" RES ") + Twine(TempSize));
-}
-
-
-void PIC16AsmPrinter::EmitInitializedDataSection(const PIC16Section *S) {
- /// Emit Section header.
- OutStreamer.SwitchSection(S);
-
- std::vector<const GlobalVariable*> Items = S->Items;
- for (unsigned j = 0; j < Items.size(); j++) {
- Constant *C = Items[j]->getInitializer();
- int AddrSpace = Items[j]->getType()->getAddressSpace();
- OutStreamer.EmitRawText(Mang->getSymbol(Items[j])->getName());
- EmitGlobalConstant(C, AddrSpace);
- }
-}
-
-// Print all IDATA sections.
-void PIC16AsmPrinter::EmitIData(Module &M) {
- EmitSectionList (M, PTOF->IDATASections());
-}
-
-void PIC16AsmPrinter::
-EmitUninitializedDataSection(const PIC16Section *S) {
- const TargetData *TD = TM.getTargetData();
- OutStreamer.SwitchSection(S);
- std::vector<const GlobalVariable*> Items = S->Items;
- for (unsigned j = 0; j < Items.size(); j++) {
- Constant *C = Items[j]->getInitializer();
- const Type *Ty = C->getType();
- unsigned Size = TD->getTypeAllocSize(Ty);
- OutStreamer.EmitRawText(Mang->getSymbol(Items[j])->getName() +
- Twine(" RES ") + Twine(Size));
- }
-}
-
-// Print all UDATA sections.
-void PIC16AsmPrinter::EmitUData(Module &M) {
- EmitSectionList (M, PTOF->UDATASections());
-}
-
-// Print all USER sections.
-void PIC16AsmPrinter::EmitUserSections(Module &M) {
- EmitSectionList (M, PTOF->USERSections());
-}
-
-// Print all AUTO sections.
-void PIC16AsmPrinter::EmitAllAutos(Module &M) {
- EmitSectionList (M, PTOF->AUTOSections());
-}
-
-extern "C" void LLVMInitializePIC16AsmPrinter() {
- RegisterAsmPrinter<PIC16AsmPrinter> X(ThePIC16Target);
-}
-
-// Emit one data section using correct section emitter based on section type.
-void PIC16AsmPrinter::EmitSingleSection(const PIC16Section *S) {
- if (S == NULL) return;
-
- switch (S->getType()) {
- default: llvm_unreachable ("unknow user section type");
- case UDATA:
- case UDATA_SHR:
- case UDATA_OVR:
- EmitUninitializedDataSection(S);
- break;
- case IDATA:
- case ROMDATA:
- EmitInitializedDataSection(S);
- break;
- }
-}
-
-// Emit a list of sections.
-void PIC16AsmPrinter::
-EmitSectionList(Module &M, const std::vector<PIC16Section *> &SList) {
- for (unsigned i = 0; i < SList.size(); i++) {
- // Exclude llvm specific metadata sections.
- if (SList[i]->getName().find("llvm.") != std::string::npos)
- continue;
- OutStreamer.AddBlankLine();
- EmitSingleSection(SList[i]);
- }
-}
-
diff --git a/contrib/llvm/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.h b/contrib/llvm/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.h
deleted file mode 100644
index aa2e1f4..0000000
--- a/contrib/llvm/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.h
+++ /dev/null
@@ -1,88 +0,0 @@
-//===-- PIC16AsmPrinter.h - PIC16 LLVM assembly writer ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to PIC16 assembly language.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16ASMPRINTER_H
-#define PIC16ASMPRINTER_H
-
-#include "PIC16.h"
-#include "PIC16TargetMachine.h"
-#include "PIC16DebugInfo.h"
-#include "PIC16MCAsmInfo.h"
-#include "PIC16TargetObjectFile.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetMachine.h"
-#include <list>
-#include <set>
-#include <string>
-
-namespace llvm {
- class LLVM_LIBRARY_VISIBILITY PIC16AsmPrinter : public AsmPrinter {
- public:
- explicit PIC16AsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
- private:
- virtual const char *getPassName() const {
- return "PIC16 Assembly Printer";
- }
-
- const PIC16TargetObjectFile &getObjFileLowering() const {
- return (const PIC16TargetObjectFile &)AsmPrinter::getObjFileLowering();
- }
-
- bool runOnMachineFunction(MachineFunction &F);
- void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
- void printCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
- void printInstruction(const MachineInstr *MI, raw_ostream &O);
- static const char *getRegisterName(unsigned RegNo);
-
- void EmitInstruction(const MachineInstr *MI);
- void EmitFunctionDecls (Module &M);
- void EmitUndefinedVars (Module &M);
- void EmitDefinedVars (Module &M);
- void EmitIData (Module &M);
- void EmitUData (Module &M);
- void EmitAllAutos (Module &M);
- void EmitRomData (Module &M);
- void EmitSharedUdata(Module &M);
- void EmitUserSections (Module &M);
- void EmitFunctionFrame(MachineFunction &MF);
- void printLibcallDecls();
- void EmitUninitializedDataSection(const PIC16Section *S);
- void EmitInitializedDataSection(const PIC16Section *S);
- void EmitSingleSection(const PIC16Section *S);
- void EmitSectionList(Module &M,
- const std::vector< PIC16Section *> &SList);
- void ColorAutoSection(const Function *F);
- protected:
- bool doInitialization(Module &M);
- bool doFinalization(Module &M);
-
- /// EmitGlobalVariable - Emit the specified global variable and its
- /// initializer to the output stream.
- virtual void EmitGlobalVariable(const GlobalVariable *GV) {
- // PIC16 doesn't use normal hooks for this.
- }
-
- private:
- const PIC16TargetObjectFile *PTOF;
- PIC16DbgInfo DbgInfo;
- const PIC16MCAsmInfo *PMAI;
- std::set<std::string> LibcallDecls; // Sorted & uniqued set of extern decls.
- std::vector<const GlobalVariable *> ExternalVarDecls;
- std::vector<const GlobalVariable *> ExternalVarDefs;
- };
-} // end of namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16.h b/contrib/llvm/lib/Target/PIC16/PIC16.h
deleted file mode 100644
index 08bb3e6..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16.h
+++ /dev/null
@@ -1,134 +0,0 @@
-//===-- PIC16.h - Top-level interface for PIC16 representation --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the entry points for global functions defined in
-// the LLVM PIC16 back-end.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_PIC16_H
-#define LLVM_TARGET_PIC16_H
-
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetMachine.h"
-#include <cassert>
-#include <sstream>
-#include <cstring>
-#include <string>
-#include <vector>
-
-namespace llvm {
- class PIC16TargetMachine;
- class FunctionPass;
- class MachineCodeEmitter;
- class formatted_raw_ostream;
-
-namespace PIC16CC {
- enum CondCodes {
- EQ,
- NE,
- LT,
- LE,
- GT,
- GE,
- ULT,
- UGT,
- ULE,
- UGE
- };
-}
-
- enum PIC16SectionType {
- CODE,
- UDATA,
- IDATA,
- ROMDATA,
- UDATA_OVR,
- UDATA_SHR
- };
-
- class ESNames {
- std::vector<char*> stk;
- ESNames() {}
- public:
- ~ESNames() {
- while (!stk.empty())
- {
- char* p = stk.back();
- delete [] p;
- stk.pop_back();
- }
- }
-
- // External symbol names require memory to live till the program end.
- // So we have to allocate it and keep. Push all such allocations into a
- // vector so that they get freed up on termination.
- inline static const char *createESName (const std::string &name) {
- static ESNames esn;
- char *tmpName = new char[name.size() + 1];
- memcpy(tmpName, name.c_str(), name.size() + 1);
- esn.stk.push_back(tmpName);
- return tmpName;
- }
-
- };
-
- inline static const char *PIC16CondCodeToString(PIC16CC::CondCodes CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case PIC16CC::NE: return "ne";
- case PIC16CC::EQ: return "eq";
- case PIC16CC::LT: return "lt";
- case PIC16CC::ULT: return "lt";
- case PIC16CC::LE: return "le";
- case PIC16CC::ULE: return "le";
- case PIC16CC::GT: return "gt";
- case PIC16CC::UGT: return "gt";
- case PIC16CC::GE: return "ge";
- case PIC16CC::UGE: return "ge";
- }
- }
-
- inline static bool isSignedComparison(PIC16CC::CondCodes CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case PIC16CC::NE:
- case PIC16CC::EQ:
- case PIC16CC::LT:
- case PIC16CC::LE:
- case PIC16CC::GE:
- case PIC16CC::GT:
- return true;
- case PIC16CC::ULT:
- case PIC16CC::UGT:
- case PIC16CC::ULE:
- case PIC16CC::UGE:
- return false; // condition codes for unsigned comparison.
- }
- }
-
-
-
- FunctionPass *createPIC16ISelDag(PIC16TargetMachine &TM);
- // Banksel optimizer pass.
- FunctionPass *createPIC16MemSelOptimizerPass();
-
- extern Target ThePIC16Target;
- extern Target TheCooperTarget;
-
-} // end namespace llvm;
-
-// Defines symbolic names for PIC16 registers. This defines a mapping from
-// register name to register number.
-#include "PIC16GenRegisterNames.inc"
-
-// Defines symbolic names for the PIC16 instructions.
-#include "PIC16GenInstrNames.inc"
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16.td b/contrib/llvm/lib/Target/PIC16/PIC16.td
deleted file mode 100644
index b2b9b1c..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16.td
+++ /dev/null
@@ -1,40 +0,0 @@
-//===- PIC16.td - Describe the PIC16 Target Machine -----------*- tblgen -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This is the top level entry point for the PIC16 target.
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Target-independent interfaces
-//===----------------------------------------------------------------------===//
-
-include "llvm/Target/Target.td"
-
-include "PIC16RegisterInfo.td"
-include "PIC16InstrInfo.td"
-
-//===----------------------------------------------------------------------===//
-// Subtarget Features.
-//===----------------------------------------------------------------------===//
-def FeatureCooper : SubtargetFeature<"cooper", "IsCooper", "true",
- "PIC16 Cooper ISA Support">;
-
-//===----------------------------------------------------------------------===//
-// PIC16 supported processors.
-//===----------------------------------------------------------------------===//
-
-def : Processor<"generic", NoItineraries, []>;
-def : Processor<"cooper", NoItineraries, [FeatureCooper]>;
-
-
-def PIC16InstrInfo : InstrInfo {}
-
-def PIC16 : Target {
- let InstructionSet = PIC16InstrInfo;
-}
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16ABINames.h b/contrib/llvm/lib/Target/PIC16/PIC16ABINames.h
deleted file mode 100644
index 4c1a8da..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16ABINames.h
+++ /dev/null
@@ -1,399 +0,0 @@
-//===-- PIC16ABINames.h - PIC16 Naming conventios for ABI----- --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the functions to manage ABI Naming conventions for PIC16.
-//
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_PIC16ABINAMES_H
-#define LLVM_TARGET_PIC16ABINAMES_H
-
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetMachine.h"
-#include <cassert>
-#include <sstream>
-#include <cstring>
-#include <string>
-
-namespace llvm {
- class PIC16TargetMachine;
- class FunctionPass;
- class MachineCodeEmitter;
- class formatted_raw_ostream;
-
- // A Central class to manage all ABI naming conventions.
- // PAN - [P]ic16 [A]BI [N]ames
- class PAN {
- public:
- // Map the name of the symbol to its section name.
- // Current ABI:
- // -----------------------------------------------------
- // ALL Names are prefixed with the symobl '@'.
- // ------------------------------------------------------
- // Global variables do not have any '.' in their names.
- // These are maily function names and global variable names.
- // Example - @foo, @i
- // Static local variables - @<func>.<var>
- // -------------------------------------------------------
- // Functions and auto variables.
- // Names are mangled as <prefix><funcname>.<tag>.<varname>
- // Where <prefix> is '@' and <tag> is any one of
- // the following
- // .auto. - an automatic var of a function.
- // .temp. - temproray data of a function.
- // .ret. - return value label for a function.
- // .frame. - Frame label for a function where retval, args
- // and temps are stored.
- // .args. - Label used to pass arguments to a direct call.
- // Example - Function name: @foo
- // Its frame: @foo.frame.
- // Its retval: @foo.ret.
- // Its local vars: @foo.auto.a
- // Its temp data: @foo.temp.
- // Its arg passing: @foo.args.
- //----------------------------------------------
- // Libcall - compiler generated libcall names must start with .lib.
- // This id will be used to emit extern decls for libcalls.
- // Example - libcall name: @.lib.sra.i8
- // To pass args: @.lib.sra.i8.args.
- // To return val: @.lib.sra.i8.ret.
- //----------------------------------------------
- // SECTION Names
- // uninitialized globals - @udata.<num>.#
- // initialized globals - @idata.<num>.#
- // Program memory data - @romdata.#
- // Variables with user defined section name - <user_defined_section>
- // Variables with user defined address - @<var>.user_section.<address>.#
- // Function frame - @<func>.frame_section.
- // Function autos - @<func>.autos_section.
- // Overlay sections - @<color>.##
- // Declarations - Enclosed in comments. No section for them.
- //----------------------------------------------------------
-
- // Tags used to mangle different names.
- enum TAGS {
- PREFIX_SYMBOL,
- GLOBAL,
- STATIC_LOCAL,
- AUTOS_LABEL,
- FRAME_LABEL,
- RET_LABEL,
- ARGS_LABEL,
- TEMPS_LABEL,
-
- LIBCALL,
-
- FRAME_SECTION,
- AUTOS_SECTION,
- CODE_SECTION,
- USER_SECTION
- };
-
- // Textual names of the tags.
- inline static const char *getTagName(TAGS tag) {
- switch (tag) {
- default: return "";
- case PREFIX_SYMBOL: return "@";
- case AUTOS_LABEL: return ".auto.";
- case FRAME_LABEL: return ".frame.";
- case TEMPS_LABEL: return ".temp.";
- case ARGS_LABEL: return ".args.";
- case RET_LABEL: return ".ret.";
- case LIBCALL: return ".lib.";
- case FRAME_SECTION: return ".frame_section.";
- case AUTOS_SECTION: return ".autos_section.";
- case CODE_SECTION: return ".code_section.";
- case USER_SECTION: return ".user_section.";
- }
- }
-
- // Get tag type for the Symbol.
- inline static TAGS getSymbolTag(const std::string &Sym) {
- if (Sym.find(getTagName(TEMPS_LABEL)) != std::string::npos)
- return TEMPS_LABEL;
-
- if (Sym.find(getTagName(FRAME_LABEL)) != std::string::npos)
- return FRAME_LABEL;
-
- if (Sym.find(getTagName(RET_LABEL)) != std::string::npos)
- return RET_LABEL;
-
- if (Sym.find(getTagName(ARGS_LABEL)) != std::string::npos)
- return ARGS_LABEL;
-
- if (Sym.find(getTagName(AUTOS_LABEL)) != std::string::npos)
- return AUTOS_LABEL;
-
- if (Sym.find(getTagName(LIBCALL)) != std::string::npos)
- return LIBCALL;
-
- // It does not have any Tag. So its a true global or static local.
- if (Sym.find(".") == std::string::npos)
- return GLOBAL;
-
- // If a . is there, then it may be static local.
- // We should mangle these as well in clang.
- if (Sym.find(".") != std::string::npos)
- return STATIC_LOCAL;
-
- assert (0 && "Could not determine Symbol's tag");
- return PREFIX_SYMBOL; // Silence warning when assertions are turned off.
- }
-
- // addPrefix - add prefix symbol to a name if there isn't one already.
- inline static std::string addPrefix (const std::string &Name) {
- std::string prefix = getTagName (PREFIX_SYMBOL);
-
- // If this name already has a prefix, nothing to do.
- if (Name.compare(0, prefix.size(), prefix) == 0)
- return Name;
-
- return prefix + Name;
- }
-
- // Get mangled func name from a mangled sym name.
- // In all cases func name is the first component before a '.'.
- static inline std::string getFuncNameForSym(const std::string &Sym1) {
- assert (getSymbolTag(Sym1) != GLOBAL && "not belongs to a function");
-
- std::string Sym = addPrefix(Sym1);
-
- // Position of the . after func name. That's where func name ends.
- size_t func_name_end = Sym.find ('.');
-
- return Sym.substr (0, func_name_end);
- }
-
- // Get Frame start label for a func.
- static std::string getFrameLabel(const std::string &Func) {
- std::string Func1 = addPrefix(Func);
- std::string tag = getTagName(FRAME_LABEL);
- return Func1 + tag;
- }
-
- // Get the retval label for the given function.
- static std::string getRetvalLabel(const std::string &Func) {
- std::string Func1 = addPrefix(Func);
- std::string tag = getTagName(RET_LABEL);
- return Func1 + tag;
- }
-
- // Get the argument label for the given function.
- static std::string getArgsLabel(const std::string &Func) {
- std::string Func1 = addPrefix(Func);
- std::string tag = getTagName(ARGS_LABEL);
- return Func1 + tag;
- }
-
- // Get the tempdata label for the given function.
- static std::string getTempdataLabel(const std::string &Func) {
- std::string Func1 = addPrefix(Func);
- std::string tag = getTagName(TEMPS_LABEL);
- return Func1 + tag;
- }
-
- static std::string getFrameSectionName(const std::string &Func) {
- std::string Func1 = addPrefix(Func);
- std::string tag = getTagName(FRAME_SECTION);
- return Func1 + tag + "#";
- }
-
- static std::string getAutosSectionName(const std::string &Func) {
- std::string Func1 = addPrefix(Func);
- std::string tag = getTagName(AUTOS_SECTION);
- return Func1 + tag + "#";
- }
-
- static std::string getCodeSectionName(const std::string &Func) {
- std::string Func1 = addPrefix(Func);
- std::string tag = getTagName(CODE_SECTION);
- return Func1 + tag + "#";
- }
-
- static std::string getUserSectionName(const std::string &Name) {
- std::string sname = addPrefix(Name);;
- std::string tag = getTagName(USER_SECTION);
- return sname + tag + "#";
- }
-
- // udata, romdata and idata section names are generated by a given number.
- // @udata.<num>.#
- static std::string getUdataSectionName(unsigned num,
- std::string prefix = "") {
- std::ostringstream o;
- o << getTagName(PREFIX_SYMBOL) << prefix << "udata." << num
- << ".#";
- return o.str();
- }
-
- static std::string getRomdataSectionName() {
- return "romdata.#";
- }
-
- static std::string getSharedUDataSectionName() {
- std::ostringstream o;
- o << getTagName(PREFIX_SYMBOL) << "udata_shr" << ".#";
- return o.str();
- }
-
- static std::string getRomdataSectionName(unsigned num,
- std::string prefix = "") {
- std::ostringstream o;
- o << getTagName(PREFIX_SYMBOL) << prefix << "romdata." << num
- << ".#";
- return o.str();
- }
-
- static std::string getIdataSectionName(unsigned num,
- std::string prefix = "") {
- std::ostringstream o;
- o << getTagName(PREFIX_SYMBOL) << prefix << "idata." << num
- << ".#";
- return o.str();
- }
-
- inline static bool isLocalName (const std::string &Name) {
- if (getSymbolTag(Name) == AUTOS_LABEL)
- return true;
-
- return false;
- }
-
-
- inline static bool isMemIntrinsic (const std::string &Name) {
- if (Name.compare("@memcpy") == 0 || Name.compare("@memset") == 0 ||
- Name.compare("@memmove") == 0) {
- return true;
- }
-
- return false;
- }
-
- // Currently names of libcalls are assigned during TargetLowering
- // object construction. There is no provision to change the when the
- // code for a function IL function being generated.
- // So we have to change these names while printing assembly.
- // We need to do that mainly for names related to intrinsics. This
- // function returns true if a name needs to be cloned.
- inline static bool isIntrinsicStuff(const std::string &Name) {
- // Return true if the name contains LIBCALL marker, or a MemIntrinisc.
- // these are mainly ARGS_LABEL, RET_LABEL, and the LIBCALL name itself.
- if ((Name.find(getTagName(LIBCALL)) != std::string::npos)
- || isMemIntrinsic(Name))
- return true;
-
- return false;
- }
-
- // Rename the name for IL.
- inline static std::string Rename(const std::string &Name) {
- std::string Newname;
- // If its a label (LIBCALL+Func+LABEL), change it to
- // (LIBCALL+Func+IL+LABEL).
- TAGS id = getSymbolTag(Name);
- if (id == ARGS_LABEL || id == RET_LABEL) {
- std::size_t pos = Name.find(getTagName(id));
- Newname = Name.substr(0, pos) + ".IL" + getTagName(id);
- return Newname;
- }
-
- // Else, just append IL to name.
- return Name + ".IL";
- }
-
-
-
-
- inline static bool isLocalToFunc (std::string &Func, std::string &Var) {
- if (! isLocalName(Var)) return false;
-
- std::string Func1 = addPrefix(Func);
- // Extract func name of the varilable.
- const std::string &fname = getFuncNameForSym(Var);
-
- if (fname.compare(Func1) == 0)
- return true;
-
- return false;
- }
-
-
- // Get the section for the given external symbol names.
- // This tries to find the type (Tag) of the symbol from its mangled name
- // and return appropriate section name for it.
- static inline std::string getSectionNameForSym(const std::string &Sym1) {
- std::string Sym = addPrefix(Sym1);
-
- std::string SectionName;
-
- std::string Fname = getFuncNameForSym (Sym);
- TAGS id = getSymbolTag (Sym);
-
- switch (id) {
- default : assert (0 && "Could not determine external symbol type");
- case FRAME_LABEL:
- case RET_LABEL:
- case TEMPS_LABEL:
- case ARGS_LABEL: {
- return getFrameSectionName(Fname);
- }
- case AUTOS_LABEL: {
- return getAutosSectionName(Fname);
- }
- }
- }
-
- /// Return Overlay Name for the section.
- /// The ABI Convention is: @<Color>.##.<section_tag>
- /// The section_tag is retrieved from the SectName parameter and
- /// and Color is passed in parameter.
- static inline std::string getOverlayName(std::string SectName, int Color) {
- // FIXME: Only autos_section and frame_section are colored.
- // So check and assert if the passed SectName does not have AUTOS_SECTION
- // or FRAME_SECTION tag in it.
- std::ostringstream o;
- o << getTagName(PREFIX_SYMBOL) << Color << ".##"
- << SectName.substr(SectName.find("."));
-
- return o.str();
- }
-
- // Return true if the current function is an ISR
- inline static bool isISR(const std::string SectName) {
- if (SectName.find("interrupt") != std::string::npos)
- return true;
-
- return false;
- }
-
- // Return the address for ISR starts in rom.
- inline static std::string getISRAddr(void) {
- return "0x4";
- }
-
- // Returns the name of clone of a function.
- static std::string getCloneFnName(const std::string &Func) {
- return (Func + ".IL");
- }
-
- // Returns the name of clone of a variable.
- static std::string getCloneVarName(const std::string &Fn,
- const std::string &Var) {
- std::string cloneVarName = Var;
- // These vars are named like fun.auto.var.
- // Just replace the function name, with clone function name.
- std::string cloneFnName = getCloneFnName(Fn);
- cloneVarName.replace(cloneVarName.find(Fn), Fn.length(), cloneFnName);
- return cloneVarName;
- }
- }; // class PAN.
-} // end namespace llvm;
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.cpp b/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.cpp
deleted file mode 100644
index 7a948de..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.cpp
+++ /dev/null
@@ -1,490 +0,0 @@
-
-//===-- PIC16DebugInfo.cpp - Implementation for PIC16 Debug Information ======//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the helper functions for representing debug information.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PIC16.h"
-#include "PIC16ABINames.h"
-#include "PIC16DebugInfo.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/Support/DebugLoc.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
-using namespace llvm;
-
-/// PopulateDebugInfo - Populate the TypeNo, Aux[] and TagName from Ty.
-///
-void PIC16DbgInfo::PopulateDebugInfo (DIType Ty, unsigned short &TypeNo,
- bool &HasAux, int Aux[],
- std::string &TagName) {
- if (Ty.isBasicType())
- PopulateBasicTypeInfo (Ty, TypeNo);
- else if (Ty.isCompositeType())
- PopulateCompositeTypeInfo (Ty, TypeNo, HasAux, Aux, TagName);
- else if (Ty.isDerivedType())
- PopulateDerivedTypeInfo (Ty, TypeNo, HasAux, Aux, TagName);
- else {
- TypeNo = PIC16Dbg::T_NULL;
- HasAux = false;
- }
- return;
-}
-
-/// PopulateBasicTypeInfo- Populate TypeNo for basic type from Ty.
-///
-void PIC16DbgInfo::PopulateBasicTypeInfo (DIType Ty, unsigned short &TypeNo) {
- std::string Name = Ty.getName();
- unsigned short BaseTy = GetTypeDebugNumber(Name);
- TypeNo = TypeNo << PIC16Dbg::S_BASIC;
- TypeNo = TypeNo | (0xffff & BaseTy);
-}
-
-/// PopulateDerivedTypeInfo - Populate TypeNo, Aux[], TagName for derived type
-/// from Ty. Derived types are mostly pointers.
-///
-void PIC16DbgInfo::PopulateDerivedTypeInfo (DIType Ty, unsigned short &TypeNo,
- bool &HasAux, int Aux[],
- std::string &TagName) {
-
- switch(Ty.getTag())
- {
- case dwarf::DW_TAG_pointer_type:
- TypeNo = TypeNo << PIC16Dbg::S_DERIVED;
- TypeNo = TypeNo | PIC16Dbg::DT_PTR;
- break;
- default:
- TypeNo = TypeNo << PIC16Dbg::S_DERIVED;
- }
-
- // We also need to encode the information about the base type of
- // pointer in TypeNo.
- DIType BaseType = DIDerivedType(Ty).getTypeDerivedFrom();
- PopulateDebugInfo(BaseType, TypeNo, HasAux, Aux, TagName);
-}
-
-/// PopulateArrayTypeInfo - Populate TypeNo, Aux[] for array from Ty.
-void PIC16DbgInfo::PopulateArrayTypeInfo (DIType Ty, unsigned short &TypeNo,
- bool &HasAux, int Aux[],
- std::string &TagName) {
-
- DICompositeType CTy = DICompositeType(Ty);
- DIArray Elements = CTy.getTypeArray();
- unsigned short size = 1;
- unsigned short Dimension[4]={0,0,0,0};
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
- DIDescriptor Element = Elements.getElement(i);
- if (Element.getTag() == dwarf::DW_TAG_subrange_type) {
- TypeNo = TypeNo << PIC16Dbg::S_DERIVED;
- TypeNo = TypeNo | PIC16Dbg::DT_ARY;
- DISubrange SubRange = DISubrange(Element);
- Dimension[i] = SubRange.getHi() - SubRange.getLo() + 1;
- // Each dimension is represented by 2 bytes starting at byte 9.
- Aux[8+i*2+0] = Dimension[i];
- Aux[8+i*2+1] = Dimension[i] >> 8;
- size = size * Dimension[i];
- }
- }
- HasAux = true;
- // In auxillary entry for array, 7th and 8th byte represent array size.
- Aux[6] = size & 0xff;
- Aux[7] = size >> 8;
- DIType BaseType = CTy.getTypeDerivedFrom();
- PopulateDebugInfo(BaseType, TypeNo, HasAux, Aux, TagName);
-}
-
-/// PopulateStructOrUnionTypeInfo - Populate TypeNo, Aux[] , TagName for
-/// structure or union.
-///
-void PIC16DbgInfo::PopulateStructOrUnionTypeInfo (DIType Ty,
- unsigned short &TypeNo,
- bool &HasAux, int Aux[],
- std::string &TagName) {
- DICompositeType CTy = DICompositeType(Ty);
- TypeNo = TypeNo << PIC16Dbg::S_BASIC;
- if (Ty.getTag() == dwarf::DW_TAG_structure_type)
- TypeNo = TypeNo | PIC16Dbg::T_STRUCT;
- else
- TypeNo = TypeNo | PIC16Dbg::T_UNION;
- TagName = CTy.getName();
- // UniqueSuffix is .number where number is obtained from
- // llvm.dbg.composite<number>.
- // FIXME: This will break when composite type is not represented by
- // llvm.dbg.composite* global variable. Since we need to revisit
- // PIC16DebugInfo implementation anyways after the MDNodes based
- // framework is done, let us continue with the way it is.
- std::string UniqueSuffix = "." + Ty->getNameStr().substr(18);
- TagName += UniqueSuffix;
- unsigned short size = CTy.getSizeInBits()/8;
- // 7th and 8th byte represent size.
- HasAux = true;
- Aux[6] = size & 0xff;
- Aux[7] = size >> 8;
-}
-
-/// PopulateEnumTypeInfo - Populate TypeNo for enum from Ty.
-void PIC16DbgInfo::PopulateEnumTypeInfo (DIType Ty, unsigned short &TypeNo) {
- TypeNo = TypeNo << PIC16Dbg::S_BASIC;
- TypeNo = TypeNo | PIC16Dbg::T_ENUM;
-}
-
-/// PopulateCompositeTypeInfo - Populate TypeNo, Aux[] and TagName for
-/// composite types from Ty.
-///
-void PIC16DbgInfo::PopulateCompositeTypeInfo (DIType Ty, unsigned short &TypeNo,
- bool &HasAux, int Aux[],
- std::string &TagName) {
- switch (Ty.getTag()) {
- case dwarf::DW_TAG_array_type: {
- PopulateArrayTypeInfo (Ty, TypeNo, HasAux, Aux, TagName);
- break;
- }
- case dwarf:: DW_TAG_union_type:
- case dwarf::DW_TAG_structure_type: {
- PopulateStructOrUnionTypeInfo (Ty, TypeNo, HasAux, Aux, TagName);
- break;
- }
- case dwarf::DW_TAG_enumeration_type: {
- PopulateEnumTypeInfo (Ty, TypeNo);
- break;
- }
- default:
- TypeNo = TypeNo << PIC16Dbg::S_DERIVED;
- }
-}
-
-/// GetTypeDebugNumber - Get debug type number for given type.
-///
-unsigned PIC16DbgInfo::GetTypeDebugNumber(std::string &type) {
- if (type == "char")
- return PIC16Dbg::T_CHAR;
- else if (type == "short")
- return PIC16Dbg::T_SHORT;
- else if (type == "int")
- return PIC16Dbg::T_INT;
- else if (type == "long")
- return PIC16Dbg::T_LONG;
- else if (type == "unsigned char")
- return PIC16Dbg::T_UCHAR;
- else if (type == "unsigned short")
- return PIC16Dbg::T_USHORT;
- else if (type == "unsigned int")
- return PIC16Dbg::T_UINT;
- else if (type == "unsigned long")
- return PIC16Dbg::T_ULONG;
- else
- return 0;
-}
-
-/// GetStorageClass - Get storage class for give debug variable.
-///
-short PIC16DbgInfo::getStorageClass(DIGlobalVariable DIGV) {
- short ClassNo;
- if (PAN::isLocalName(DIGV.getName())) {
- // Generating C_AUTO here fails due to error in linker. Change it once
- // linker is fixed.
- ClassNo = PIC16Dbg::C_STAT;
- }
- else if (DIGV.isLocalToUnit())
- ClassNo = PIC16Dbg::C_STAT;
- else
- ClassNo = PIC16Dbg::C_EXT;
- return ClassNo;
-}
-
-/// BeginModule - Emit necessary debug info to start a Module and do other
-/// required initializations.
-void PIC16DbgInfo::BeginModule(Module &M) {
- // Emit file directive for module.
- DebugInfoFinder DbgFinder;
- DbgFinder.processModule(M);
- if (DbgFinder.compile_unit_count() != 0) {
- // FIXME : What if more then one CUs are present in a module ?
- MDNode *CU = *DbgFinder.compile_unit_begin();
- EmitDebugDirectives = true;
- SwitchToCU(CU);
- }
- // Emit debug info for decls of composite types.
- EmitCompositeTypeDecls(M);
-}
-
-/// Helper to find first valid debug loc for a function.
-///
-static const DebugLoc GetDebugLocForFunction(const MachineFunction &MF) {
- DebugLoc DL;
- for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
- I != E; ++I) {
- for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end();
- II != E; ++II) {
- DL = II->getDebugLoc();
- if (!DL.isUnknown())
- return DL;
- }
- }
- return DL;
-}
-
-/// BeginFunction - Emit necessary debug info to start a function.
-///
-void PIC16DbgInfo::BeginFunction(const MachineFunction &MF) {
- if (! EmitDebugDirectives) return;
-
- // Retreive the first valid debug Loc and process it.
- const DebugLoc &DL = GetDebugLocForFunction(MF);
- // Emit debug info only if valid debug info is available.
- if (!DL.isUnknown()) {
- ChangeDebugLoc(MF, DL, true);
- EmitFunctBeginDI(MF.getFunction());
- }
- // Set current line to 0 so that.line directive is genearted after .bf.
- CurLine = 0;
-}
-
-/// ChangeDebugLoc - Take necessary steps when DebugLoc changes.
-/// CurFile and CurLine may change as a result of this.
-///
-void PIC16DbgInfo::ChangeDebugLoc(const MachineFunction &MF,
- const DebugLoc &DL, bool IsInBeginFunction) {
- if (!EmitDebugDirectives) return;
- assert(!DL.isUnknown() && "can't change to invalid debug loc");
-
- SwitchToCU(DL.getScope(MF.getFunction()->getContext()));
- SwitchToLine(DL.getLine(), IsInBeginFunction);
-}
-
-/// SwitchToLine - Emit line directive for a new line.
-///
-void PIC16DbgInfo::SwitchToLine(unsigned Line, bool IsInBeginFunction) {
- if (CurLine == Line) return;
- if (!IsInBeginFunction)
- OS.EmitRawText("\n\t.line " + Twine(Line));
- CurLine = Line;
-}
-
-/// EndFunction - Emit .ef for end of function.
-///
-void PIC16DbgInfo::EndFunction(const MachineFunction &MF) {
- if (! EmitDebugDirectives) return;
- const DebugLoc &DL = GetDebugLocForFunction(MF);
- // Emit debug info only if valid debug info is available.
- if (!DL.isUnknown())
- EmitFunctEndDI(MF.getFunction(), CurLine);
-}
-
-/// EndModule - Emit .eof for end of module.
-///
-void PIC16DbgInfo::EndModule(Module &M) {
- if (! EmitDebugDirectives) return;
- EmitVarDebugInfo(M);
- if (CurFile != "") OS.EmitRawText(StringRef("\n\t.eof"));
-}
-
-/// EmitCompositeTypeElements - Emit debug information for members of a
-/// composite type.
-///
-void PIC16DbgInfo::EmitCompositeTypeElements (DICompositeType CTy,
- std::string SuffixNo) {
- unsigned long Value = 0;
- DIArray Elements = CTy.getTypeArray();
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; i++) {
- DIDescriptor Element = Elements.getElement(i);
- unsigned short TypeNo = 0;
- bool HasAux = false;
- int ElementAux[PIC16Dbg::AuxSize] = { 0 };
- std::string TagName = "";
- DIDerivedType DITy(Element);
- unsigned short ElementSize = DITy.getSizeInBits()/8;
- // Get mangleddd name for this structure/union element.
- std::string MangMemName = DITy.getName().str() + SuffixNo;
- PopulateDebugInfo(DITy, TypeNo, HasAux, ElementAux, TagName);
- short Class = 0;
- if( CTy.getTag() == dwarf::DW_TAG_union_type)
- Class = PIC16Dbg::C_MOU;
- else if (CTy.getTag() == dwarf::DW_TAG_structure_type)
- Class = PIC16Dbg::C_MOS;
- EmitSymbol(MangMemName.c_str(), Class, TypeNo, Value);
- if (CTy.getTag() == dwarf::DW_TAG_structure_type)
- Value += ElementSize;
- if (HasAux)
- EmitAuxEntry(MangMemName.c_str(), ElementAux, PIC16Dbg::AuxSize, TagName);
- }
-}
-
-/// EmitCompositeTypeDecls - Emit composite type declarations like structure
-/// and union declarations.
-///
-void PIC16DbgInfo::EmitCompositeTypeDecls(Module &M) {
- DebugInfoFinder DbgFinder;
- DbgFinder.processModule(M);
- for (DebugInfoFinder::iterator I = DbgFinder.type_begin(),
- E = DbgFinder.type_end(); I != E; ++I) {
- DICompositeType CTy(*I);
- if (!CTy.Verify())
- continue;
- if (CTy.getTag() == dwarf::DW_TAG_union_type ||
- CTy.getTag() == dwarf::DW_TAG_structure_type ) {
- // Get the number after llvm.dbg.composite and make UniqueSuffix from
- // it.
- std::string DIVar = CTy->getNameStr();
- std::string UniqueSuffix = "." + DIVar.substr(18);
- std::string MangledCTyName = CTy.getName().str() + UniqueSuffix;
- unsigned short size = CTy.getSizeInBits()/8;
- int Aux[PIC16Dbg::AuxSize] = {0};
- // 7th and 8th byte represent size of structure/union.
- Aux[6] = size & 0xff;
- Aux[7] = size >> 8;
- // Emit .def for structure/union tag.
- if( CTy.getTag() == dwarf::DW_TAG_union_type)
- EmitSymbol(MangledCTyName.c_str(), PIC16Dbg::C_UNTAG);
- else if (CTy.getTag() == dwarf::DW_TAG_structure_type)
- EmitSymbol(MangledCTyName.c_str(), PIC16Dbg::C_STRTAG);
-
- // Emit auxiliary debug information for structure/union tag.
- EmitAuxEntry(MangledCTyName.c_str(), Aux, PIC16Dbg::AuxSize);
-
- // Emit members.
- EmitCompositeTypeElements (CTy, UniqueSuffix);
-
- // Emit mangled Symbol for end of structure/union.
- std::string EOSSymbol = ".eos" + UniqueSuffix;
- EmitSymbol(EOSSymbol.c_str(), PIC16Dbg::C_EOS);
- EmitAuxEntry(EOSSymbol.c_str(), Aux, PIC16Dbg::AuxSize,
- MangledCTyName.c_str());
- }
- }
-}
-
-
-/// EmitFunctBeginDI - Emit .bf for function.
-///
-void PIC16DbgInfo::EmitFunctBeginDI(const Function *F) {
- std::string FunctName = F->getName();
- if (EmitDebugDirectives) {
- std::string FunctBeginSym = ".bf." + FunctName;
- std::string BlockBeginSym = ".bb." + FunctName;
-
- int BFAux[PIC16Dbg::AuxSize] = {0};
- BFAux[4] = CurLine;
- BFAux[5] = CurLine >> 8;
-
- // Emit debug directives for beginning of function.
- EmitSymbol(FunctBeginSym, PIC16Dbg::C_FCN);
- EmitAuxEntry(FunctBeginSym, BFAux, PIC16Dbg::AuxSize);
-
- EmitSymbol(BlockBeginSym, PIC16Dbg::C_BLOCK);
- EmitAuxEntry(BlockBeginSym, BFAux, PIC16Dbg::AuxSize);
- }
-}
-
-/// EmitFunctEndDI - Emit .ef for function end.
-///
-void PIC16DbgInfo::EmitFunctEndDI(const Function *F, unsigned Line) {
- std::string FunctName = F->getName();
- if (EmitDebugDirectives) {
- std::string FunctEndSym = ".ef." + FunctName;
- std::string BlockEndSym = ".eb." + FunctName;
-
- // Emit debug directives for end of function.
- EmitSymbol(BlockEndSym, PIC16Dbg::C_BLOCK);
- int EFAux[PIC16Dbg::AuxSize] = {0};
- // 5th and 6th byte stand for line number.
- EFAux[4] = CurLine;
- EFAux[5] = CurLine >> 8;
- EmitAuxEntry(BlockEndSym, EFAux, PIC16Dbg::AuxSize);
- EmitSymbol(FunctEndSym, PIC16Dbg::C_FCN);
- EmitAuxEntry(FunctEndSym, EFAux, PIC16Dbg::AuxSize);
- }
-}
-
-/// EmitAuxEntry - Emit Auxiliary debug information.
-///
-void PIC16DbgInfo::EmitAuxEntry(const std::string VarName, int Aux[], int Num,
- std::string TagName) {
- std::string Tmp;
- // TagName is emitted in case of structure/union objects.
- if (!TagName.empty()) Tmp += ", " + TagName;
-
- for (int i = 0; i<Num; i++)
- Tmp += "," + utostr(Aux[i] & 0xff);
-
- OS.EmitRawText("\n\t.dim " + Twine(VarName) + ", 1" + Tmp);
-}
-
-/// EmitSymbol - Emit .def for a symbol. Value is offset for the member.
-///
-void PIC16DbgInfo::EmitSymbol(std::string Name, short Class,
- unsigned short Type, unsigned long Value) {
- std::string Tmp;
- if (Value > 0)
- Tmp = ", value = " + utostr(Value);
-
- OS.EmitRawText("\n\t.def " + Twine(Name) + ", type = " + utostr(Type) +
- ", class = " + utostr(Class) + Tmp);
-}
-
-/// EmitVarDebugInfo - Emit debug information for all variables.
-///
-void PIC16DbgInfo::EmitVarDebugInfo(Module &M) {
- DebugInfoFinder DbgFinder;
- DbgFinder.processModule(M);
-
- for (DebugInfoFinder::iterator I = DbgFinder.global_variable_begin(),
- E = DbgFinder.global_variable_end(); I != E; ++I) {
- DIGlobalVariable DIGV(*I);
- DIType Ty = DIGV.getType();
- unsigned short TypeNo = 0;
- bool HasAux = false;
- int Aux[PIC16Dbg::AuxSize] = { 0 };
- std::string TagName = "";
- std::string VarName = DIGV.getName();
- VarName = MAI->getGlobalPrefix() + VarName;
- PopulateDebugInfo(Ty, TypeNo, HasAux, Aux, TagName);
- // Emit debug info only if type information is availaible.
- if (TypeNo != PIC16Dbg::T_NULL) {
- OS.EmitRawText("\t.type " + Twine(VarName) + ", " + Twine(TypeNo));
- short ClassNo = getStorageClass(DIGV);
- OS.EmitRawText("\t.class " + Twine(VarName) + ", " + Twine(ClassNo));
- if (HasAux)
- EmitAuxEntry(VarName, Aux, PIC16Dbg::AuxSize, TagName);
- }
- }
-}
-
-/// SwitchToCU - Switch to a new compilation unit.
-///
-void PIC16DbgInfo::SwitchToCU(MDNode *CU) {
- // Get the file path from CU.
- DICompileUnit cu(CU);
- std::string DirName = cu.getDirectory();
- std::string FileName = cu.getFilename();
- std::string FilePath = DirName + "/" + FileName;
-
- // Nothing to do if source file is still same.
- if ( FilePath == CurFile ) return;
-
- // Else, close the current one and start a new.
- if (CurFile != "")
- OS.EmitRawText(StringRef("\t.eof"));
- OS.EmitRawText("\n\t.file\t\"" + Twine(FilePath) + "\"");
- CurFile = FilePath;
- CurLine = 0;
-}
-
-/// EmitEOF - Emit .eof for end of file.
-///
-void PIC16DbgInfo::EmitEOF() {
- if (CurFile != "")
- OS.EmitRawText(StringRef("\t.EOF"));
-}
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.h b/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.h
deleted file mode 100644
index 031dcf0..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.h
+++ /dev/null
@@ -1,156 +0,0 @@
-//===-- PIC16DebugInfo.h - Interfaces for PIC16 Debug Information ============//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the helper functions for representing debug information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16DBG_H
-#define PIC16DBG_H
-
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Module.h"
-
-namespace llvm {
- class MachineFunction;
- class DebugLoc;
- class MCStreamer;
-
- namespace PIC16Dbg {
- enum VarType {
- T_NULL,
- T_VOID,
- T_CHAR,
- T_SHORT,
- T_INT,
- T_LONG,
- T_FLOAT,
- T_DOUBLE,
- T_STRUCT,
- T_UNION,
- T_ENUM,
- T_MOE,
- T_UCHAR,
- T_USHORT,
- T_UINT,
- T_ULONG
- };
- enum DerivedType {
- DT_NONE,
- DT_PTR,
- DT_FCN,
- DT_ARY
- };
- enum TypeSize {
- S_BASIC = 5,
- S_DERIVED = 3
- };
- enum DbgClass {
- C_NULL,
- C_AUTO,
- C_EXT,
- C_STAT,
- C_REG,
- C_EXTDEF,
- C_LABEL,
- C_ULABEL,
- C_MOS,
- C_ARG,
- C_STRTAG,
- C_MOU,
- C_UNTAG,
- C_TPDEF,
- C_USTATIC,
- C_ENTAG,
- C_MOE,
- C_REGPARM,
- C_FIELD,
- C_AUTOARG,
- C_LASTENT,
- C_BLOCK = 100,
- C_FCN,
- C_EOS,
- C_FILE,
- C_LINE,
- C_ALIAS,
- C_HIDDEN,
- C_EOF,
- C_LIST,
- C_SECTION,
- C_EFCN = 255
- };
- enum SymbolSize {
- AuxSize =20
- };
- }
-
- class PIC16DbgInfo {
- MCStreamer &OS;
- const MCAsmInfo *MAI;
- std::string CurFile;
- unsigned CurLine;
-
- // EmitDebugDirectives is set if debug information is available. Default
- // value for it is false.
- bool EmitDebugDirectives;
-
- public:
- PIC16DbgInfo(MCStreamer &os, const MCAsmInfo *T) : OS(os), MAI(T) {
- CurFile = "";
- CurLine = 0;
- EmitDebugDirectives = false;
- }
-
- void BeginModule (Module &M);
- void BeginFunction (const MachineFunction &MF);
- void ChangeDebugLoc (const MachineFunction &MF, const DebugLoc &DL,
- bool IsInBeginFunction = false);
- void EndFunction (const MachineFunction &MF);
- void EndModule (Module &M);
-
-
- private:
- void SwitchToCU (MDNode *CU);
- void SwitchToLine (unsigned Line, bool IsInBeginFunction = false);
-
- void PopulateDebugInfo (DIType Ty, unsigned short &TypeNo, bool &HasAux,
- int Aux[], std::string &TypeName);
- void PopulateBasicTypeInfo (DIType Ty, unsigned short &TypeNo);
- void PopulateDerivedTypeInfo (DIType Ty, unsigned short &TypeNo,
- bool &HasAux, int Aux[],
- std::string &TypeName);
-
- void PopulateCompositeTypeInfo (DIType Ty, unsigned short &TypeNo,
- bool &HasAux, int Aux[],
- std::string &TypeName);
- void PopulateArrayTypeInfo (DIType Ty, unsigned short &TypeNo,
- bool &HasAux, int Aux[],
- std::string &TypeName);
-
- void PopulateStructOrUnionTypeInfo (DIType Ty, unsigned short &TypeNo,
- bool &HasAux, int Aux[],
- std::string &TypeName);
- void PopulateEnumTypeInfo (DIType Ty, unsigned short &TypeNo);
-
- unsigned GetTypeDebugNumber(std::string &Type);
- short getStorageClass(DIGlobalVariable DIGV);
- void EmitFunctBeginDI(const Function *F);
- void EmitCompositeTypeDecls(Module &M);
- void EmitCompositeTypeElements (DICompositeType CTy, std::string Suffix);
- void EmitFunctEndDI(const Function *F, unsigned Line);
- void EmitAuxEntry(const std::string VarName, int Aux[],
- int num = PIC16Dbg::AuxSize, std::string TagName = "");
- inline void EmitSymbol(std::string Name, short Class,
- unsigned short Type = PIC16Dbg::T_NULL,
- unsigned long Value = 0);
- void EmitVarDebugInfo(Module &M);
- void EmitEOF();
- };
-} // end namespace llvm;
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp
deleted file mode 100644
index 6cbd002..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-//===-- PIC16ISelDAGToDAG.cpp - A dag to dag inst selector for PIC16 ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines an instruction selector for the PIC16 target.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "pic16-isel"
-
-#include "llvm/Support/ErrorHandling.h"
-#include "PIC16ISelDAGToDAG.h"
-using namespace llvm;
-
-/// createPIC16ISelDag - This pass converts a legalized DAG into a
-/// PIC16-specific DAG, ready for instruction scheduling.
-FunctionPass *llvm::createPIC16ISelDag(PIC16TargetMachine &TM) {
- return new PIC16DAGToDAGISel(TM);
-}
-
-
-/// Select - Select instructions not customized! Used for
-/// expanded, promoted and normal instructions.
-SDNode* PIC16DAGToDAGISel::Select(SDNode *N) {
-
- // Select the default instruction.
- SDNode *ResNode = SelectCode(N);
-
- return ResNode;
-}
-
-
-// SelectDirectAddr - Match a direct address for DAG.
-// A direct address could be a globaladdress or externalsymbol.
-bool PIC16DAGToDAGISel::SelectDirectAddr(SDNode *Op, SDValue N,
- SDValue &Address) {
- // Return true if TGA or ES.
- if (N.getOpcode() == ISD::TargetGlobalAddress
- || N.getOpcode() == ISD::TargetExternalSymbol) {
- Address = N;
- return true;
- }
-
- return false;
-}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16ISelDAGToDAG.h b/contrib/llvm/lib/Target/PIC16/PIC16ISelDAGToDAG.h
deleted file mode 100644
index ecaddd3..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16ISelDAGToDAG.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//===-- PIC16ISelDAGToDAG.cpp - A dag to dag inst selector for PIC16 ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines an instruction selector for the PIC16 target.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "pic16-isel"
-
-#include "PIC16.h"
-#include "PIC16RegisterInfo.h"
-#include "PIC16TargetMachine.h"
-#include "PIC16MachineFunctionInfo.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Intrinsics.h"
-using namespace llvm;
-
-namespace {
-
-class LLVM_LIBRARY_VISIBILITY PIC16DAGToDAGISel : public SelectionDAGISel {
-
- /// TM - Keep a reference to PIC16TargetMachine.
- const PIC16TargetMachine &TM;
-
- /// PIC16Lowering - This object fully describes how to lower LLVM code to an
- /// PIC16-specific SelectionDAG.
- const PIC16TargetLowering &PIC16Lowering;
-
-public:
- explicit PIC16DAGToDAGISel(PIC16TargetMachine &tm) :
- SelectionDAGISel(tm),
- TM(tm), PIC16Lowering(*TM.getTargetLowering()) {}
-
- // Pass Name
- virtual const char *getPassName() const {
- return "PIC16 DAG->DAG Pattern Instruction Selection";
- }
-
-private:
- // Include the pieces autogenerated from the target description.
-#include "PIC16GenDAGISel.inc"
-
- SDNode *Select(SDNode *N);
-
- // Match direct address complex pattern.
- bool SelectDirectAddr(SDNode *Op, SDValue N, SDValue &Address);
-
-};
-
-}
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.cpp b/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.cpp
deleted file mode 100644
index 527b31d..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.cpp
+++ /dev/null
@@ -1,2000 +0,0 @@
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that PIC16 uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "pic16-lower"
-#include "PIC16ABINames.h"
-#include "PIC16ISelLowering.h"
-#include "PIC16TargetObjectFile.h"
-#include "PIC16TargetMachine.h"
-#include "PIC16MachineFunctionInfo.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/Function.h"
-#include "llvm/CallingConv.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Support/ErrorHandling.h"
-
-
-using namespace llvm;
-
-static const char *getIntrinsicName(unsigned opcode) {
- std::string Basename;
- switch(opcode) {
- default: llvm_unreachable("do not know intrinsic name");
- // Arithmetic Right shift for integer types.
- case PIC16ISD::SRA_I8: Basename = "sra.i8"; break;
- case RTLIB::SRA_I16: Basename = "sra.i16"; break;
- case RTLIB::SRA_I32: Basename = "sra.i32"; break;
-
- // Left shift for integer types.
- case PIC16ISD::SLL_I8: Basename = "sll.i8"; break;
- case RTLIB::SHL_I16: Basename = "sll.i16"; break;
- case RTLIB::SHL_I32: Basename = "sll.i32"; break;
-
- // Logical Right Shift for integer types.
- case PIC16ISD::SRL_I8: Basename = "srl.i8"; break;
- case RTLIB::SRL_I16: Basename = "srl.i16"; break;
- case RTLIB::SRL_I32: Basename = "srl.i32"; break;
-
- // Multiply for integer types.
- case PIC16ISD::MUL_I8: Basename = "mul.i8"; break;
- case RTLIB::MUL_I16: Basename = "mul.i16"; break;
- case RTLIB::MUL_I32: Basename = "mul.i32"; break;
-
- // Signed division for integers.
- case RTLIB::SDIV_I16: Basename = "sdiv.i16"; break;
- case RTLIB::SDIV_I32: Basename = "sdiv.i32"; break;
-
- // Unsigned division for integers.
- case RTLIB::UDIV_I16: Basename = "udiv.i16"; break;
- case RTLIB::UDIV_I32: Basename = "udiv.i32"; break;
-
- // Signed Modulas for integers.
- case RTLIB::SREM_I16: Basename = "srem.i16"; break;
- case RTLIB::SREM_I32: Basename = "srem.i32"; break;
-
- // Unsigned Modulas for integers.
- case RTLIB::UREM_I16: Basename = "urem.i16"; break;
- case RTLIB::UREM_I32: Basename = "urem.i32"; break;
-
- //////////////////////
- // LIBCALLS FOR FLOATS
- //////////////////////
-
- // Float to signed integrals
- case RTLIB::FPTOSINT_F32_I8: Basename = "f32_to_si32"; break;
- case RTLIB::FPTOSINT_F32_I16: Basename = "f32_to_si32"; break;
- case RTLIB::FPTOSINT_F32_I32: Basename = "f32_to_si32"; break;
-
- // Signed integrals to float. char and int are first sign extended to i32
- // before being converted to float, so an I8_F32 or I16_F32 isn't required.
- case RTLIB::SINTTOFP_I32_F32: Basename = "si32_to_f32"; break;
-
- // Float to Unsigned conversions.
- // Signed conversion can be used for unsigned conversion as well.
- // In signed and unsigned versions only the interpretation of the
- // MSB is different. Bit representation remains the same.
- case RTLIB::FPTOUINT_F32_I8: Basename = "f32_to_si32"; break;
- case RTLIB::FPTOUINT_F32_I16: Basename = "f32_to_si32"; break;
- case RTLIB::FPTOUINT_F32_I32: Basename = "f32_to_si32"; break;
-
- // Unsigned to Float conversions. char and int are first zero extended
- // before being converted to float.
- case RTLIB::UINTTOFP_I32_F32: Basename = "ui32_to_f32"; break;
-
- // Floating point add, sub, mul, div.
- case RTLIB::ADD_F32: Basename = "add.f32"; break;
- case RTLIB::SUB_F32: Basename = "sub.f32"; break;
- case RTLIB::MUL_F32: Basename = "mul.f32"; break;
- case RTLIB::DIV_F32: Basename = "div.f32"; break;
-
- // Floating point comparison
- case RTLIB::O_F32: Basename = "unordered.f32"; break;
- case RTLIB::UO_F32: Basename = "unordered.f32"; break;
- case RTLIB::OLE_F32: Basename = "le.f32"; break;
- case RTLIB::OGE_F32: Basename = "ge.f32"; break;
- case RTLIB::OLT_F32: Basename = "lt.f32"; break;
- case RTLIB::OGT_F32: Basename = "gt.f32"; break;
- case RTLIB::OEQ_F32: Basename = "eq.f32"; break;
- case RTLIB::UNE_F32: Basename = "neq.f32"; break;
- }
-
- std::string prefix = PAN::getTagName(PAN::PREFIX_SYMBOL);
- std::string tagname = PAN::getTagName(PAN::LIBCALL);
- std::string Fullname = prefix + tagname + Basename;
-
- // The name has to live through program life.
- return ESNames::createESName(Fullname);
-}
-
-// getStdLibCallName - Get the name for the standard library function.
-static const char *getStdLibCallName(unsigned opcode) {
- std::string BaseName;
- switch(opcode) {
- case RTLIB::COS_F32: BaseName = "cos";
- break;
- case RTLIB::SIN_F32: BaseName = "sin";
- break;
- case RTLIB::MEMCPY: BaseName = "memcpy";
- break;
- case RTLIB::MEMSET: BaseName = "memset";
- break;
- case RTLIB::MEMMOVE: BaseName = "memmove";
- break;
- default: llvm_unreachable("do not know std lib call name");
- }
- std::string prefix = PAN::getTagName(PAN::PREFIX_SYMBOL);
- std::string LibCallName = prefix + BaseName;
-
- // The name has to live through program life.
- return ESNames::createESName(LibCallName);
-}
-
-// PIC16TargetLowering Constructor.
-PIC16TargetLowering::PIC16TargetLowering(PIC16TargetMachine &TM)
- : TargetLowering(TM, new PIC16TargetObjectFile()) {
-
- Subtarget = &TM.getSubtarget<PIC16Subtarget>();
-
- addRegisterClass(MVT::i8, PIC16::GPRRegisterClass);
-
- setShiftAmountType(MVT::i8);
-
- // Std lib call names
- setLibcallName(RTLIB::COS_F32, getStdLibCallName(RTLIB::COS_F32));
- setLibcallName(RTLIB::SIN_F32, getStdLibCallName(RTLIB::SIN_F32));
- setLibcallName(RTLIB::MEMCPY, getStdLibCallName(RTLIB::MEMCPY));
- setLibcallName(RTLIB::MEMSET, getStdLibCallName(RTLIB::MEMSET));
- setLibcallName(RTLIB::MEMMOVE, getStdLibCallName(RTLIB::MEMMOVE));
-
- // SRA library call names
- setPIC16LibcallName(PIC16ISD::SRA_I8, getIntrinsicName(PIC16ISD::SRA_I8));
- setLibcallName(RTLIB::SRA_I16, getIntrinsicName(RTLIB::SRA_I16));
- setLibcallName(RTLIB::SRA_I32, getIntrinsicName(RTLIB::SRA_I32));
-
- // SHL library call names
- setPIC16LibcallName(PIC16ISD::SLL_I8, getIntrinsicName(PIC16ISD::SLL_I8));
- setLibcallName(RTLIB::SHL_I16, getIntrinsicName(RTLIB::SHL_I16));
- setLibcallName(RTLIB::SHL_I32, getIntrinsicName(RTLIB::SHL_I32));
-
- // SRL library call names
- setPIC16LibcallName(PIC16ISD::SRL_I8, getIntrinsicName(PIC16ISD::SRL_I8));
- setLibcallName(RTLIB::SRL_I16, getIntrinsicName(RTLIB::SRL_I16));
- setLibcallName(RTLIB::SRL_I32, getIntrinsicName(RTLIB::SRL_I32));
-
- // MUL Library call names
- setPIC16LibcallName(PIC16ISD::MUL_I8, getIntrinsicName(PIC16ISD::MUL_I8));
- setLibcallName(RTLIB::MUL_I16, getIntrinsicName(RTLIB::MUL_I16));
- setLibcallName(RTLIB::MUL_I32, getIntrinsicName(RTLIB::MUL_I32));
-
- // Signed division lib call names
- setLibcallName(RTLIB::SDIV_I16, getIntrinsicName(RTLIB::SDIV_I16));
- setLibcallName(RTLIB::SDIV_I32, getIntrinsicName(RTLIB::SDIV_I32));
-
- // Unsigned division lib call names
- setLibcallName(RTLIB::UDIV_I16, getIntrinsicName(RTLIB::UDIV_I16));
- setLibcallName(RTLIB::UDIV_I32, getIntrinsicName(RTLIB::UDIV_I32));
-
- // Signed remainder lib call names
- setLibcallName(RTLIB::SREM_I16, getIntrinsicName(RTLIB::SREM_I16));
- setLibcallName(RTLIB::SREM_I32, getIntrinsicName(RTLIB::SREM_I32));
-
- // Unsigned remainder lib call names
- setLibcallName(RTLIB::UREM_I16, getIntrinsicName(RTLIB::UREM_I16));
- setLibcallName(RTLIB::UREM_I32, getIntrinsicName(RTLIB::UREM_I32));
-
- // Floating point to signed int conversions.
- setLibcallName(RTLIB::FPTOSINT_F32_I8,
- getIntrinsicName(RTLIB::FPTOSINT_F32_I8));
- setLibcallName(RTLIB::FPTOSINT_F32_I16,
- getIntrinsicName(RTLIB::FPTOSINT_F32_I16));
- setLibcallName(RTLIB::FPTOSINT_F32_I32,
- getIntrinsicName(RTLIB::FPTOSINT_F32_I32));
-
- // Signed int to floats.
- setLibcallName(RTLIB::SINTTOFP_I32_F32,
- getIntrinsicName(RTLIB::SINTTOFP_I32_F32));
-
- // Floating points to unsigned ints.
- setLibcallName(RTLIB::FPTOUINT_F32_I8,
- getIntrinsicName(RTLIB::FPTOUINT_F32_I8));
- setLibcallName(RTLIB::FPTOUINT_F32_I16,
- getIntrinsicName(RTLIB::FPTOUINT_F32_I16));
- setLibcallName(RTLIB::FPTOUINT_F32_I32,
- getIntrinsicName(RTLIB::FPTOUINT_F32_I32));
-
- // Unsigned int to floats.
- setLibcallName(RTLIB::UINTTOFP_I32_F32,
- getIntrinsicName(RTLIB::UINTTOFP_I32_F32));
-
- // Floating point add, sub, mul ,div.
- setLibcallName(RTLIB::ADD_F32, getIntrinsicName(RTLIB::ADD_F32));
- setLibcallName(RTLIB::SUB_F32, getIntrinsicName(RTLIB::SUB_F32));
- setLibcallName(RTLIB::MUL_F32, getIntrinsicName(RTLIB::MUL_F32));
- setLibcallName(RTLIB::DIV_F32, getIntrinsicName(RTLIB::DIV_F32));
-
- // Floationg point comparison
- setLibcallName(RTLIB::O_F32, getIntrinsicName(RTLIB::O_F32));
- setLibcallName(RTLIB::UO_F32, getIntrinsicName(RTLIB::UO_F32));
- setLibcallName(RTLIB::OLE_F32, getIntrinsicName(RTLIB::OLE_F32));
- setLibcallName(RTLIB::OGE_F32, getIntrinsicName(RTLIB::OGE_F32));
- setLibcallName(RTLIB::OLT_F32, getIntrinsicName(RTLIB::OLT_F32));
- setLibcallName(RTLIB::OGT_F32, getIntrinsicName(RTLIB::OGT_F32));
- setLibcallName(RTLIB::OEQ_F32, getIntrinsicName(RTLIB::OEQ_F32));
- setLibcallName(RTLIB::UNE_F32, getIntrinsicName(RTLIB::UNE_F32));
-
- // Return value comparisons of floating point calls.
- setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
-
- setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
- setOperationAction(ISD::ExternalSymbol, MVT::i16, Custom);
-
- setOperationAction(ISD::LOAD, MVT::i8, Legal);
- setOperationAction(ISD::LOAD, MVT::i16, Custom);
- setOperationAction(ISD::LOAD, MVT::i32, Custom);
-
- setOperationAction(ISD::STORE, MVT::i8, Legal);
- setOperationAction(ISD::STORE, MVT::i16, Custom);
- setOperationAction(ISD::STORE, MVT::i32, Custom);
- setOperationAction(ISD::STORE, MVT::i64, Custom);
-
- setOperationAction(ISD::ADDE, MVT::i8, Custom);
- setOperationAction(ISD::ADDC, MVT::i8, Custom);
- setOperationAction(ISD::SUBE, MVT::i8, Custom);
- setOperationAction(ISD::SUBC, MVT::i8, Custom);
- setOperationAction(ISD::SUB, MVT::i8, Custom);
- setOperationAction(ISD::ADD, MVT::i8, Custom);
- setOperationAction(ISD::ADD, MVT::i16, Custom);
-
- setOperationAction(ISD::OR, MVT::i8, Custom);
- setOperationAction(ISD::AND, MVT::i8, Custom);
- setOperationAction(ISD::XOR, MVT::i8, Custom);
-
- setOperationAction(ISD::FrameIndex, MVT::i16, Custom);
-
- setOperationAction(ISD::MUL, MVT::i8, Custom);
-
- setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
- setOperationAction(ISD::MULHU, MVT::i8, Expand);
- setOperationAction(ISD::MULHS, MVT::i8, Expand);
-
- setOperationAction(ISD::SRA, MVT::i8, Custom);
- setOperationAction(ISD::SHL, MVT::i8, Custom);
- setOperationAction(ISD::SRL, MVT::i8, Custom);
-
- setOperationAction(ISD::ROTL, MVT::i8, Expand);
- setOperationAction(ISD::ROTR, MVT::i8, Expand);
-
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
- // PIC16 does not support shift parts
- setOperationAction(ISD::SRA_PARTS, MVT::i8, Expand);
- setOperationAction(ISD::SHL_PARTS, MVT::i8, Expand);
- setOperationAction(ISD::SRL_PARTS, MVT::i8, Expand);
-
-
- // PIC16 does not have a SETCC, expand it to SELECT_CC.
- setOperationAction(ISD::SETCC, MVT::i8, Expand);
- setOperationAction(ISD::SELECT, MVT::i8, Expand);
- setOperationAction(ISD::BRCOND, MVT::Other, Expand);
- setOperationAction(ISD::BRIND, MVT::Other, Expand);
-
- setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
- setOperationAction(ISD::BR_CC, MVT::i8, Custom);
-
- //setOperationAction(ISD::TRUNCATE, MVT::i16, Custom);
- setTruncStoreAction(MVT::i16, MVT::i8, Custom);
-
- // Now deduce the information based on the above mentioned
- // actions
- computeRegisterProperties();
-}
-
-std::pair<const TargetRegisterClass*, uint8_t>
-PIC16TargetLowering::findRepresentativeClass(EVT VT) const {
- switch (VT.getSimpleVT().SimpleTy) {
- default:
- return TargetLowering::findRepresentativeClass(VT);
- case MVT::i16:
- return std::make_pair(PIC16::FSR16RegisterClass, 1);
- }
-}
-
-// getOutFlag - Extract the flag result if the Op has it.
-static SDValue getOutFlag(SDValue &Op) {
- // Flag is the last value of the node.
- SDValue Flag = Op.getValue(Op.getNode()->getNumValues() - 1);
-
- assert (Flag.getValueType() == MVT::Flag
- && "Node does not have an out Flag");
-
- return Flag;
-}
-// Get the TmpOffset for FrameIndex
-unsigned PIC16TargetLowering::GetTmpOffsetForFI(unsigned FI, unsigned size,
- MachineFunction &MF) const {
- PIC16MachineFunctionInfo *FuncInfo = MF.getInfo<PIC16MachineFunctionInfo>();
- std::map<unsigned, unsigned> &FiTmpOffsetMap = FuncInfo->getFiTmpOffsetMap();
-
- std::map<unsigned, unsigned>::iterator
- MapIt = FiTmpOffsetMap.find(FI);
- if (MapIt != FiTmpOffsetMap.end())
- return MapIt->second;
-
- // This FI (FrameIndex) is not yet mapped, so map it
- FiTmpOffsetMap[FI] = FuncInfo->getTmpSize();
- FuncInfo->setTmpSize(FuncInfo->getTmpSize() + size);
- return FiTmpOffsetMap[FI];
-}
-
-void PIC16TargetLowering::ResetTmpOffsetMap(SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- PIC16MachineFunctionInfo *FuncInfo = MF.getInfo<PIC16MachineFunctionInfo>();
- FuncInfo->getFiTmpOffsetMap().clear();
- FuncInfo->setTmpSize(0);
-}
-
-// To extract chain value from the SDValue Nodes
-// This function will help to maintain the chain extracting
-// code at one place. In case of any change in future it will
-// help maintain the code.
-static SDValue getChain(SDValue &Op) {
- SDValue Chain = Op.getValue(Op.getNode()->getNumValues() - 1);
-
- // If the last value returned in Flag then the chain is
- // second last value returned.
- if (Chain.getValueType() == MVT::Flag)
- Chain = Op.getValue(Op.getNode()->getNumValues() - 2);
-
- // All nodes may not produce a chain. Therefore following assert
- // verifies that the node is returning a chain only.
- assert (Chain.getValueType() == MVT::Other
- && "Node does not have a chain");
-
- return Chain;
-}
-
-/// PopulateResults - Helper function to LowerOperation.
-/// If a node wants to return multiple results after lowering,
-/// it stuffs them into an array of SDValue called Results.
-
-static void PopulateResults(SDValue N, SmallVectorImpl<SDValue>&Results) {
- if (N.getOpcode() == ISD::MERGE_VALUES) {
- int NumResults = N.getNumOperands();
- for( int i = 0; i < NumResults; i++)
- Results.push_back(N.getOperand(i));
- }
- else
- Results.push_back(N);
-}
-
-MVT::SimpleValueType
-PIC16TargetLowering::getSetCCResultType(EVT ValType) const {
- return MVT::i8;
-}
-
-MVT::SimpleValueType
-PIC16TargetLowering::getCmpLibcallReturnType() const {
- return MVT::i8;
-}
-
-/// The type legalizer framework of generating legalizer can generate libcalls
-/// only when the operand/result types are illegal.
-/// PIC16 needs to generate libcalls even for the legal types (i8) for some ops.
-/// For example an arithmetic right shift. These functions are used to lower
-/// such operations that generate libcall for legal types.
-
-void
-PIC16TargetLowering::setPIC16LibcallName(PIC16ISD::PIC16Libcall Call,
- const char *Name) {
- PIC16LibcallNames[Call] = Name;
-}
-
-const char *
-PIC16TargetLowering::getPIC16LibcallName(PIC16ISD::PIC16Libcall Call) const {
- return PIC16LibcallNames[Call];
-}
-
-SDValue
-PIC16TargetLowering::MakePIC16Libcall(PIC16ISD::PIC16Libcall Call,
- EVT RetVT, const SDValue *Ops,
- unsigned NumOps, bool isSigned,
- SelectionDAG &DAG, DebugLoc dl) const {
-
- TargetLowering::ArgListTy Args;
- Args.reserve(NumOps);
-
- TargetLowering::ArgListEntry Entry;
- for (unsigned i = 0; i != NumOps; ++i) {
- Entry.Node = Ops[i];
- Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
- Entry.isSExt = isSigned;
- Entry.isZExt = !isSigned;
- Args.push_back(Entry);
- }
-
- SDValue Callee = DAG.getExternalSymbol(getPIC16LibcallName(Call), MVT::i16);
-
- const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
- std::pair<SDValue,SDValue> CallInfo =
- LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
- false, 0, CallingConv::C, false,
- /*isReturnValueUsed=*/true,
- Callee, Args, DAG, dl);
-
- return CallInfo.first;
-}
-
-const char *PIC16TargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- default: return NULL;
- case PIC16ISD::Lo: return "PIC16ISD::Lo";
- case PIC16ISD::Hi: return "PIC16ISD::Hi";
- case PIC16ISD::MTLO: return "PIC16ISD::MTLO";
- case PIC16ISD::MTHI: return "PIC16ISD::MTHI";
- case PIC16ISD::MTPCLATH: return "PIC16ISD::MTPCLATH";
- case PIC16ISD::PIC16Connect: return "PIC16ISD::PIC16Connect";
- case PIC16ISD::Banksel: return "PIC16ISD::Banksel";
- case PIC16ISD::PIC16Load: return "PIC16ISD::PIC16Load";
- case PIC16ISD::PIC16LdArg: return "PIC16ISD::PIC16LdArg";
- case PIC16ISD::PIC16LdWF: return "PIC16ISD::PIC16LdWF";
- case PIC16ISD::PIC16Store: return "PIC16ISD::PIC16Store";
- case PIC16ISD::PIC16StWF: return "PIC16ISD::PIC16StWF";
- case PIC16ISD::BCF: return "PIC16ISD::BCF";
- case PIC16ISD::LSLF: return "PIC16ISD::LSLF";
- case PIC16ISD::LRLF: return "PIC16ISD::LRLF";
- case PIC16ISD::RLF: return "PIC16ISD::RLF";
- case PIC16ISD::RRF: return "PIC16ISD::RRF";
- case PIC16ISD::CALL: return "PIC16ISD::CALL";
- case PIC16ISD::CALLW: return "PIC16ISD::CALLW";
- case PIC16ISD::SUBCC: return "PIC16ISD::SUBCC";
- case PIC16ISD::SELECT_ICC: return "PIC16ISD::SELECT_ICC";
- case PIC16ISD::BRCOND: return "PIC16ISD::BRCOND";
- case PIC16ISD::RET: return "PIC16ISD::RET";
- case PIC16ISD::Dummy: return "PIC16ISD::Dummy";
- }
-}
-
-void PIC16TargetLowering::ReplaceNodeResults(SDNode *N,
- SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const {
-
- switch (N->getOpcode()) {
- case ISD::GlobalAddress:
- Results.push_back(ExpandGlobalAddress(N, DAG));
- return;
- case ISD::ExternalSymbol:
- Results.push_back(ExpandExternalSymbol(N, DAG));
- return;
- case ISD::STORE:
- Results.push_back(ExpandStore(N, DAG));
- return;
- case ISD::LOAD:
- PopulateResults(ExpandLoad(N, DAG), Results);
- return;
- case ISD::ADD:
- // Results.push_back(ExpandAdd(N, DAG));
- return;
- case ISD::FrameIndex:
- Results.push_back(ExpandFrameIndex(N, DAG));
- return;
- default:
- assert (0 && "not implemented");
- return;
- }
-}
-
-SDValue PIC16TargetLowering::ExpandFrameIndex(SDNode *N,
- SelectionDAG &DAG) const {
-
- // Currently handling FrameIndex of size MVT::i16 only
- // One example of this scenario is when return value is written on
- // FrameIndex#0
-
- if (N->getValueType(0) != MVT::i16)
- return SDValue();
-
- // Expand the FrameIndex into ExternalSymbol and a Constant node
- // The constant will represent the frame index number
- // Get the current function frame
- MachineFunction &MF = DAG.getMachineFunction();
- const Function *Func = MF.getFunction();
- const std::string Name = Func->getName();
-
- FrameIndexSDNode *FR = dyn_cast<FrameIndexSDNode>(SDValue(N,0));
- // FIXME there isn't really debug info here
- DebugLoc dl = FR->getDebugLoc();
-
- // Expand FrameIndex like GlobalAddress and ExternalSymbol
- // Also use Offset field for lo and hi parts. The default
- // offset is zero.
-
- SDValue ES;
- int FrameOffset;
- SDValue FI = SDValue(N,0);
- LegalizeFrameIndex(FI, DAG, ES, FrameOffset);
- SDValue Offset = DAG.getConstant(FrameOffset, MVT::i8);
- SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, MVT::i8, ES, Offset);
- SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, MVT::i8, ES, Offset);
- return DAG.getNode(ISD::BUILD_PAIR, dl, N->getValueType(0), Lo, Hi);
-}
-
-
-SDValue PIC16TargetLowering::ExpandStore(SDNode *N, SelectionDAG &DAG) const {
- StoreSDNode *St = cast<StoreSDNode>(N);
- SDValue Chain = St->getChain();
- SDValue Src = St->getValue();
- SDValue Ptr = St->getBasePtr();
- EVT ValueType = Src.getValueType();
- unsigned StoreOffset = 0;
- DebugLoc dl = N->getDebugLoc();
-
- SDValue PtrLo, PtrHi;
- LegalizeAddress(Ptr, DAG, PtrLo, PtrHi, StoreOffset, dl);
-
- if (ValueType == MVT::i8) {
- return DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other, Chain, Src,
- PtrLo, PtrHi,
- DAG.getConstant (0 + StoreOffset, MVT::i8));
- }
- else if (ValueType == MVT::i16) {
- // Get the Lo and Hi parts from MERGE_VALUE or BUILD_PAIR.
- SDValue SrcLo, SrcHi;
- GetExpandedParts(Src, DAG, SrcLo, SrcHi);
- SDValue ChainLo = Chain, ChainHi = Chain;
- // FIXME: This makes unsafe assumptions. The Chain may be a TokenFactor
- // created for an unrelated purpose, in which case it may not have
- // exactly two operands. Also, even if it does have two operands, they
- // may not be the low and high parts of an aligned load that was split.
- if (Chain.getOpcode() == ISD::TokenFactor) {
- ChainLo = Chain.getOperand(0);
- ChainHi = Chain.getOperand(1);
- }
- SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other,
- ChainLo,
- SrcLo, PtrLo, PtrHi,
- DAG.getConstant (0 + StoreOffset, MVT::i8));
-
- SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainHi,
- SrcHi, PtrLo, PtrHi,
- DAG.getConstant (1 + StoreOffset, MVT::i8));
-
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, getChain(Store1),
- getChain(Store2));
- }
- else if (ValueType == MVT::i32) {
- // Get the Lo and Hi parts from MERGE_VALUE or BUILD_PAIR.
- SDValue SrcLo, SrcHi;
- GetExpandedParts(Src, DAG, SrcLo, SrcHi);
-
- // Get the expanded parts of each of SrcLo and SrcHi.
- SDValue SrcLo1, SrcLo2, SrcHi1, SrcHi2;
- GetExpandedParts(SrcLo, DAG, SrcLo1, SrcLo2);
- GetExpandedParts(SrcHi, DAG, SrcHi1, SrcHi2);
-
- SDValue ChainLo = Chain, ChainHi = Chain;
- // FIXME: This makes unsafe assumptions; see the FIXME above.
- if (Chain.getOpcode() == ISD::TokenFactor) {
- ChainLo = Chain.getOperand(0);
- ChainHi = Chain.getOperand(1);
- }
- SDValue ChainLo1 = ChainLo, ChainLo2 = ChainLo, ChainHi1 = ChainHi,
- ChainHi2 = ChainHi;
- // FIXME: This makes unsafe assumptions; see the FIXME above.
- if (ChainLo.getOpcode() == ISD::TokenFactor) {
- ChainLo1 = ChainLo.getOperand(0);
- ChainLo2 = ChainLo.getOperand(1);
- }
- // FIXME: This makes unsafe assumptions; see the FIXME above.
- if (ChainHi.getOpcode() == ISD::TokenFactor) {
- ChainHi1 = ChainHi.getOperand(0);
- ChainHi2 = ChainHi.getOperand(1);
- }
- SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other,
- ChainLo1,
- SrcLo1, PtrLo, PtrHi,
- DAG.getConstant (0 + StoreOffset, MVT::i8));
-
- SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainLo2,
- SrcLo2, PtrLo, PtrHi,
- DAG.getConstant (1 + StoreOffset, MVT::i8));
-
- SDValue Store3 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainHi1,
- SrcHi1, PtrLo, PtrHi,
- DAG.getConstant (2 + StoreOffset, MVT::i8));
-
- SDValue Store4 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainHi2,
- SrcHi2, PtrLo, PtrHi,
- DAG.getConstant (3 + StoreOffset, MVT::i8));
-
- SDValue RetLo = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- getChain(Store1), getChain(Store2));
- SDValue RetHi = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- getChain(Store3), getChain(Store4));
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, RetLo, RetHi);
-
- } else if (ValueType == MVT::i64) {
- SDValue SrcLo, SrcHi;
- GetExpandedParts(Src, DAG, SrcLo, SrcHi);
- SDValue ChainLo = Chain, ChainHi = Chain;
- // FIXME: This makes unsafe assumptions; see the FIXME above.
- if (Chain.getOpcode() == ISD::TokenFactor) {
- ChainLo = Chain.getOperand(0);
- ChainHi = Chain.getOperand(1);
- }
- SDValue Store1 = DAG.getStore(ChainLo, dl, SrcLo, Ptr, NULL,
- 0 + StoreOffset, false, false, 0);
-
- Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
- DAG.getConstant(4, Ptr.getValueType()));
- SDValue Store2 = DAG.getStore(ChainHi, dl, SrcHi, Ptr, NULL,
- 1 + StoreOffset, false, false, 0);
-
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1,
- Store2);
- } else {
- assert (0 && "value type not supported");
- return SDValue();
- }
-}
-
-SDValue PIC16TargetLowering::ExpandExternalSymbol(SDNode *N,
- SelectionDAG &DAG)
- const {
- ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(SDValue(N, 0));
- // FIXME there isn't really debug info here
- DebugLoc dl = ES->getDebugLoc();
-
- SDValue TES = DAG.getTargetExternalSymbol(ES->getSymbol(), MVT::i8);
- SDValue Offset = DAG.getConstant(0, MVT::i8);
- SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, MVT::i8, TES, Offset);
- SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, MVT::i8, TES, Offset);
-
- return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16, Lo, Hi);
-}
-
-// ExpandGlobalAddress -
-SDValue PIC16TargetLowering::ExpandGlobalAddress(SDNode *N,
- SelectionDAG &DAG) const {
- GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(SDValue(N, 0));
- // FIXME there isn't really debug info here
- DebugLoc dl = G->getDebugLoc();
-
- SDValue TGA = DAG.getTargetGlobalAddress(G->getGlobal(), N->getDebugLoc(),
- MVT::i8,
- G->getOffset());
-
- SDValue Offset = DAG.getConstant(0, MVT::i8);
- SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, MVT::i8, TGA, Offset);
- SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, MVT::i8, TGA, Offset);
-
- return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16, Lo, Hi);
-}
-
-bool PIC16TargetLowering::isDirectAddress(const SDValue &Op) const {
- assert (Op.getNode() != NULL && "Can't operate on NULL SDNode!!");
-
- if (Op.getOpcode() == ISD::BUILD_PAIR) {
- if (Op.getOperand(0).getOpcode() == PIC16ISD::Lo)
- return true;
- }
- return false;
-}
-
-// Return true if DirectAddress is in ROM_SPACE
-bool PIC16TargetLowering::isRomAddress(const SDValue &Op) const {
-
- // RomAddress is a GlobalAddress in ROM_SPACE_
- // If the Op is not a GlobalAddress return NULL without checking
- // anything further.
- if (!isDirectAddress(Op))
- return false;
-
- // Its a GlobalAddress.
- // It is BUILD_PAIR((PIC16Lo TGA), (PIC16Hi TGA)) and Op is BUILD_PAIR
- SDValue TGA = Op.getOperand(0).getOperand(0);
- GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(TGA);
-
- if (GSDN->getAddressSpace() == PIC16ISD::ROM_SPACE)
- return true;
-
- // Any other address space return it false
- return false;
-}
-
-
-// GetExpandedParts - This function is on the similiar lines as
-// the GetExpandedInteger in type legalizer is. This returns expanded
-// parts of Op in Lo and Hi.
-
-void PIC16TargetLowering::GetExpandedParts(SDValue Op, SelectionDAG &DAG,
- SDValue &Lo, SDValue &Hi) const {
- SDNode *N = Op.getNode();
- DebugLoc dl = N->getDebugLoc();
- EVT NewVT = getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
-
- // Extract the lo component.
- Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NewVT, Op,
- DAG.getConstant(0, MVT::i8));
-
- // extract the hi component
- Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NewVT, Op,
- DAG.getConstant(1, MVT::i8));
-}
-
-// Legalize FrameIndex into ExternalSymbol and offset.
-void
-PIC16TargetLowering::LegalizeFrameIndex(SDValue Op, SelectionDAG &DAG,
- SDValue &ES, int &Offset) const {
-
- MachineFunction &MF = DAG.getMachineFunction();
- const Function *Func = MF.getFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- PIC16MachineFunctionInfo *FuncInfo = MF.getInfo<PIC16MachineFunctionInfo>();
- const std::string Name = Func->getName();
-
- FrameIndexSDNode *FR = dyn_cast<FrameIndexSDNode>(Op);
-
- // FrameIndices are not stack offsets. But they represent the request
- // for space on stack. That space requested may be more than one byte.
- // Therefore, to calculate the stack offset that a FrameIndex aligns
- // with, we need to traverse all the FrameIndices available earlier in
- // the list and add their requested size.
- unsigned FIndex = FR->getIndex();
- const char *tmpName;
- if (FIndex < FuncInfo->getReservedFrameCount()) {
- tmpName = ESNames::createESName(PAN::getFrameLabel(Name));
- ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
- Offset = 0;
- for (unsigned i=0; i<FIndex ; ++i) {
- Offset += MFI->getObjectSize(i);
- }
- } else {
- // FrameIndex has been made for some temporary storage
- tmpName = ESNames::createESName(PAN::getTempdataLabel(Name));
- ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
- Offset = GetTmpOffsetForFI(FIndex, MFI->getObjectSize(FIndex), MF);
- }
-
- return;
-}
-
-// This function legalizes the PIC16 Addresses. If the Pointer is
-// -- Direct address variable residing
-// --> then a Banksel for that variable will be created.
-// -- Rom variable
-// --> then it will be treated as an indirect address.
-// -- Indirect address
-// --> then the address will be loaded into FSR
-// -- ADD with constant operand
-// --> then constant operand of ADD will be returned as Offset
-// and non-constant operand of ADD will be treated as pointer.
-// Returns the high and lo part of the address, and the offset(in case of ADD).
-
-void PIC16TargetLowering::LegalizeAddress(SDValue Ptr, SelectionDAG &DAG,
- SDValue &Lo, SDValue &Hi,
- unsigned &Offset, DebugLoc dl) const {
-
- // Offset, by default, should be 0
- Offset = 0;
-
- // If the pointer is ADD with constant,
- // return the constant value as the offset
- if (Ptr.getOpcode() == ISD::ADD) {
- SDValue OperLeft = Ptr.getOperand(0);
- SDValue OperRight = Ptr.getOperand(1);
- if ((OperLeft.getOpcode() == ISD::Constant) &&
- (dyn_cast<ConstantSDNode>(OperLeft)->getZExtValue() < 32 )) {
- Offset = dyn_cast<ConstantSDNode>(OperLeft)->getZExtValue();
- Ptr = OperRight;
- } else if ((OperRight.getOpcode() == ISD::Constant) &&
- (dyn_cast<ConstantSDNode>(OperRight)->getZExtValue() < 32 )){
- Offset = dyn_cast<ConstantSDNode>(OperRight)->getZExtValue();
- Ptr = OperLeft;
- }
- }
-
- // If the pointer is Type i8 and an external symbol
- // then treat it as direct address.
- // One example for such case is storing and loading
- // from function frame during a call
- if (Ptr.getValueType() == MVT::i8) {
- switch (Ptr.getOpcode()) {
- case ISD::TargetExternalSymbol:
- Lo = Ptr;
- Hi = DAG.getConstant(1, MVT::i8);
- return;
- }
- }
-
- // Expansion of FrameIndex has Lo/Hi parts
- if (isDirectAddress(Ptr)) {
- SDValue TFI = Ptr.getOperand(0).getOperand(0);
- int FrameOffset;
- if (TFI.getOpcode() == ISD::TargetFrameIndex) {
- LegalizeFrameIndex(TFI, DAG, Lo, FrameOffset);
- Hi = DAG.getConstant(1, MVT::i8);
- Offset += FrameOffset;
- return;
- } else if (TFI.getOpcode() == ISD::TargetExternalSymbol) {
- // FrameIndex has already been expanded.
- // Now just make use of its expansion
- Lo = TFI;
- Hi = DAG.getConstant(1, MVT::i8);
- SDValue FOffset = Ptr.getOperand(0).getOperand(1);
- assert (FOffset.getOpcode() == ISD::Constant &&
- "Invalid operand of PIC16ISD::Lo");
- Offset += dyn_cast<ConstantSDNode>(FOffset)->getZExtValue();
- return;
- }
- }
-
- if (isDirectAddress(Ptr) && !isRomAddress(Ptr)) {
- // Direct addressing case for RAM variables. The Hi part is constant
- // and the Lo part is the TGA itself.
- Lo = Ptr.getOperand(0).getOperand(0);
-
- // For direct addresses Hi is a constant. Value 1 for the constant
- // signifies that banksel needs to generated for it. Value 0 for
- // the constant signifies that banksel does not need to be generated
- // for it. Mark it as 1 now and optimize later.
- Hi = DAG.getConstant(1, MVT::i8);
- return;
- }
-
- // Indirect addresses. Get the hi and lo parts of ptr.
- GetExpandedParts(Ptr, DAG, Lo, Hi);
-
- // Put the hi and lo parts into FSR.
- Lo = DAG.getNode(PIC16ISD::MTLO, dl, MVT::i8, Lo);
- Hi = DAG.getNode(PIC16ISD::MTHI, dl, MVT::i8, Hi);
-
- return;
-}
-
-SDValue PIC16TargetLowering::ExpandLoad(SDNode *N, SelectionDAG &DAG) const {
- LoadSDNode *LD = dyn_cast<LoadSDNode>(SDValue(N, 0));
- SDValue Chain = LD->getChain();
- SDValue Ptr = LD->getBasePtr();
- DebugLoc dl = LD->getDebugLoc();
-
- SDValue Load, Offset;
- SDVTList Tys;
- EVT VT, NewVT;
- SDValue PtrLo, PtrHi;
- unsigned LoadOffset;
-
- // Legalize direct/indirect addresses. This will give the lo and hi parts
- // of the address and the offset.
- LegalizeAddress(Ptr, DAG, PtrLo, PtrHi, LoadOffset, dl);
-
- // Load from the pointer (direct address or FSR)
- VT = N->getValueType(0);
- unsigned NumLoads = VT.getSizeInBits() / 8;
- std::vector<SDValue> PICLoads;
- unsigned iter;
- EVT MemVT = LD->getMemoryVT();
- if(ISD::isNON_EXTLoad(N)) {
- for (iter=0; iter<NumLoads ; ++iter) {
- // Add the pointer offset if any
- Offset = DAG.getConstant(iter + LoadOffset, MVT::i8);
- Tys = DAG.getVTList(MVT::i8, MVT::Other);
- Load = DAG.getNode(PIC16ISD::PIC16Load, dl, Tys, Chain, PtrLo, PtrHi,
- Offset);
- PICLoads.push_back(Load);
- }
- } else {
- // If it is extended load then use PIC16Load for Memory Bytes
- // and for all extended bytes perform action based on type of
- // extention - i.e. SignExtendedLoad or ZeroExtendedLoad
-
-
- // For extended loads this is the memory value type
- // i.e. without any extension
- EVT MemVT = LD->getMemoryVT();
- unsigned MemBytes = MemVT.getSizeInBits() / 8;
- // if MVT::i1 is extended to MVT::i8 then MemBytes will be zero
- // So set it to one
- if (MemBytes == 0) MemBytes = 1;
-
- unsigned ExtdBytes = VT.getSizeInBits() / 8;
- Offset = DAG.getConstant(LoadOffset, MVT::i8);
-
- Tys = DAG.getVTList(MVT::i8, MVT::Other);
- // For MemBytes generate PIC16Load with proper offset
- for (iter=0; iter < MemBytes; ++iter) {
- // Add the pointer offset if any
- Offset = DAG.getConstant(iter + LoadOffset, MVT::i8);
- Load = DAG.getNode(PIC16ISD::PIC16Load, dl, Tys, Chain, PtrLo, PtrHi,
- Offset);
- PICLoads.push_back(Load);
- }
-
- // For SignExtendedLoad
- if (ISD::isSEXTLoad(N)) {
- // For all ExtdBytes use the Right Shifted(Arithmetic) Value of the
- // highest MemByte
- SDValue SRA = DAG.getNode(ISD::SRA, dl, MVT::i8, Load,
- DAG.getConstant(7, MVT::i8));
- for (iter=MemBytes; iter<ExtdBytes; ++iter) {
- PICLoads.push_back(SRA);
- }
- } else if (ISD::isZEXTLoad(N) || ISD::isEXTLoad(N)) {
- //} else if (ISD::isZEXTLoad(N)) {
- // ZeroExtendedLoad -- For all ExtdBytes use constant 0
- SDValue ConstZero = DAG.getConstant(0, MVT::i8);
- for (iter=MemBytes; iter<ExtdBytes; ++iter) {
- PICLoads.push_back(ConstZero);
- }
- }
- }
- SDValue BP;
-
- if (VT == MVT::i8) {
- // Operand of Load is illegal -- Load itself is legal
- return PICLoads[0];
- }
- else if (VT == MVT::i16) {
- BP = DAG.getNode(ISD::BUILD_PAIR, dl, VT, PICLoads[0], PICLoads[1]);
- if ((MemVT == MVT::i8) || (MemVT == MVT::i1))
- Chain = getChain(PICLoads[0]);
- else
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- getChain(PICLoads[0]), getChain(PICLoads[1]));
- } else if (VT == MVT::i32) {
- SDValue BPs[2];
- BPs[0] = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16,
- PICLoads[0], PICLoads[1]);
- BPs[1] = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16,
- PICLoads[2], PICLoads[3]);
- BP = DAG.getNode(ISD::BUILD_PAIR, dl, VT, BPs[0], BPs[1]);
- if ((MemVT == MVT::i8) || (MemVT == MVT::i1))
- Chain = getChain(PICLoads[0]);
- else if (MemVT == MVT::i16)
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- getChain(PICLoads[0]), getChain(PICLoads[1]));
- else {
- SDValue Chains[2];
- Chains[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- getChain(PICLoads[0]), getChain(PICLoads[1]));
- Chains[1] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- getChain(PICLoads[2]), getChain(PICLoads[3]));
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- Chains[0], Chains[1]);
- }
- }
- Tys = DAG.getVTList(VT, MVT::Other);
- return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, BP, Chain);
-}
-
-SDValue PIC16TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
- // We should have handled larger operands in type legalizer itself.
- assert (Op.getValueType() == MVT::i8 && "illegal shift to lower");
-
- SDNode *N = Op.getNode();
- SDValue Value = N->getOperand(0);
- SDValue Amt = N->getOperand(1);
- PIC16ISD::PIC16Libcall CallCode;
- switch (N->getOpcode()) {
- case ISD::SRA:
- CallCode = PIC16ISD::SRA_I8;
- break;
- case ISD::SHL:
- CallCode = PIC16ISD::SLL_I8;
- break;
- case ISD::SRL:
- CallCode = PIC16ISD::SRL_I8;
- break;
- default:
- assert ( 0 && "This shift is not implemented yet.");
- return SDValue();
- }
- SmallVector<SDValue, 2> Ops(2);
- Ops[0] = Value;
- Ops[1] = Amt;
- SDValue Call = MakePIC16Libcall(CallCode, N->getValueType(0), &Ops[0], 2,
- true, DAG, N->getDebugLoc());
- return Call;
-}
-
-SDValue PIC16TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
- // We should have handled larger operands in type legalizer itself.
- assert (Op.getValueType() == MVT::i8 && "illegal multiply to lower");
-
- SDNode *N = Op.getNode();
- SmallVector<SDValue, 2> Ops(2);
- Ops[0] = N->getOperand(0);
- Ops[1] = N->getOperand(1);
- SDValue Call = MakePIC16Libcall(PIC16ISD::MUL_I8, N->getValueType(0),
- &Ops[0], 2, true, DAG, N->getDebugLoc());
- return Call;
-}
-
-void
-PIC16TargetLowering::LowerOperationWrapper(SDNode *N,
- SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const {
- SDValue Op = SDValue(N, 0);
- SDValue Res;
- unsigned i;
- switch (Op.getOpcode()) {
- case ISD::LOAD:
- Res = ExpandLoad(Op.getNode(), DAG); break;
- default: {
- // All other operations are handled in LowerOperation.
- Res = LowerOperation(Op, DAG);
- if (Res.getNode())
- Results.push_back(Res);
-
- return;
- }
- }
-
- N = Res.getNode();
- unsigned NumValues = N->getNumValues();
- for (i = 0; i < NumValues ; i++) {
- Results.push_back(SDValue(N, i));
- }
-}
-
-SDValue PIC16TargetLowering::LowerOperation(SDValue Op,
- SelectionDAG &DAG) const {
- switch (Op.getOpcode()) {
- case ISD::ADD:
- case ISD::ADDC:
- case ISD::ADDE:
- return LowerADD(Op, DAG);
- case ISD::SUB:
- case ISD::SUBC:
- case ISD::SUBE:
- return LowerSUB(Op, DAG);
- case ISD::LOAD:
- return ExpandLoad(Op.getNode(), DAG);
- case ISD::STORE:
- return ExpandStore(Op.getNode(), DAG);
- case ISD::MUL:
- return LowerMUL(Op, DAG);
- case ISD::SHL:
- case ISD::SRA:
- case ISD::SRL:
- return LowerShift(Op, DAG);
- case ISD::OR:
- case ISD::AND:
- case ISD::XOR:
- return LowerBinOp(Op, DAG);
- case ISD::BR_CC:
- return LowerBR_CC(Op, DAG);
- case ISD::SELECT_CC:
- return LowerSELECT_CC(Op, DAG);
- }
- return SDValue();
-}
-
-SDValue PIC16TargetLowering::ConvertToMemOperand(SDValue Op,
- SelectionDAG &DAG,
- DebugLoc dl) const {
- assert (Op.getValueType() == MVT::i8
- && "illegal value type to store on stack.");
-
- MachineFunction &MF = DAG.getMachineFunction();
- const Function *Func = MF.getFunction();
- const std::string FuncName = Func->getName();
-
-
- // Put the value on stack.
- // Get a stack slot index and convert to es.
- int FI = MF.getFrameInfo()->CreateStackObject(1, 1, false);
- const char *tmpName = ESNames::createESName(PAN::getTempdataLabel(FuncName));
- SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
-
- // Store the value to ES.
- SDValue Store = DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other,
- DAG.getEntryNode(),
- Op, ES,
- DAG.getConstant (1, MVT::i8), // Banksel.
- DAG.getConstant (GetTmpOffsetForFI(FI, 1, MF),
- MVT::i8));
-
- // Load the value from ES.
- SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other);
- SDValue Load = DAG.getNode(PIC16ISD::PIC16Load, dl, Tys, Store,
- ES, DAG.getConstant (1, MVT::i8),
- DAG.getConstant (GetTmpOffsetForFI(FI, 1, MF),
- MVT::i8));
-
- return Load.getValue(0);
-}
-
-SDValue PIC16TargetLowering::
-LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
- SDValue DataAddr_Lo, SDValue DataAddr_Hi,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG) const {
- unsigned NumOps = Outs.size();
-
- // If call has no arguments then do nothing and return.
- if (NumOps == 0)
- return Chain;
-
- std::vector<SDValue> Ops;
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Arg, StoreRet;
-
- // For PIC16 ABI the arguments come after the return value.
- unsigned RetVals = Ins.size();
- for (unsigned i = 0, ArgOffset = RetVals; i < NumOps; i++) {
- // Get the arguments
- Arg = OutVals[i];
-
- Ops.clear();
- Ops.push_back(Chain);
- Ops.push_back(Arg);
- Ops.push_back(DataAddr_Lo);
- Ops.push_back(DataAddr_Hi);
- Ops.push_back(DAG.getConstant(ArgOffset, MVT::i8));
- Ops.push_back(InFlag);
-
- StoreRet = DAG.getNode (PIC16ISD::PIC16StWF, dl, Tys, &Ops[0], Ops.size());
-
- Chain = getChain(StoreRet);
- InFlag = getOutFlag(StoreRet);
- ArgOffset++;
- }
- return Chain;
-}
-
-SDValue PIC16TargetLowering::
-LowerDirectCallArguments(SDValue ArgLabel, SDValue Chain, SDValue InFlag,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
- unsigned NumOps = Outs.size();
- std::string Name;
- SDValue Arg, StoreAt;
- EVT ArgVT;
- unsigned Size=0;
-
- // If call has no arguments then do nothing and return.
- if (NumOps == 0)
- return Chain;
-
- // FIXME: This portion of code currently assumes only
- // primitive types being passed as arguments.
-
- // Legalize the address before use
- SDValue PtrLo, PtrHi;
- unsigned AddressOffset;
- int StoreOffset = 0;
- LegalizeAddress(ArgLabel, DAG, PtrLo, PtrHi, AddressOffset, dl);
- SDValue StoreRet;
-
- std::vector<SDValue> Ops;
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- for (unsigned i=0, Offset = 0; i<NumOps; i++) {
- // Get the argument
- Arg = OutVals[i];
- StoreOffset = (Offset + AddressOffset);
-
- // Store the argument on frame
-
- Ops.clear();
- Ops.push_back(Chain);
- Ops.push_back(Arg);
- Ops.push_back(PtrLo);
- Ops.push_back(PtrHi);
- Ops.push_back(DAG.getConstant(StoreOffset, MVT::i8));
- Ops.push_back(InFlag);
-
- StoreRet = DAG.getNode (PIC16ISD::PIC16StWF, dl, Tys, &Ops[0], Ops.size());
-
- Chain = getChain(StoreRet);
- InFlag = getOutFlag(StoreRet);
-
- // Update the frame offset to be used for next argument
- ArgVT = Arg.getValueType();
- Size = ArgVT.getSizeInBits();
- Size = Size/8; // Calculate size in bytes
- Offset += Size; // Increase the frame offset
- }
- return Chain;
-}
-
-SDValue PIC16TargetLowering::
-LowerIndirectCallReturn(SDValue Chain, SDValue InFlag,
- SDValue DataAddr_Lo, SDValue DataAddr_Hi,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- unsigned RetVals = Ins.size();
-
- // If call does not have anything to return
- // then do nothing and go back.
- if (RetVals == 0)
- return Chain;
-
- // Call has something to return
- SDValue LoadRet;
-
- SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
- for(unsigned i=0;i<RetVals;i++) {
- LoadRet = DAG.getNode(PIC16ISD::PIC16LdWF, dl, Tys, Chain, DataAddr_Lo,
- DataAddr_Hi, DAG.getConstant(i, MVT::i8),
- InFlag);
- InFlag = getOutFlag(LoadRet);
- Chain = getChain(LoadRet);
- InVals.push_back(LoadRet);
- }
- return Chain;
-}
-
-SDValue PIC16TargetLowering::
-LowerDirectCallReturn(SDValue RetLabel, SDValue Chain, SDValue InFlag,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
-
- // Currently handling primitive types only. They will come in
- // i8 parts
- unsigned RetVals = Ins.size();
-
- // Return immediately if the return type is void
- if (RetVals == 0)
- return Chain;
-
- // Call has something to return
-
- // Legalize the address before use
- SDValue LdLo, LdHi;
- unsigned LdOffset;
- LegalizeAddress(RetLabel, DAG, LdLo, LdHi, LdOffset, dl);
-
- SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
- SDValue LoadRet;
-
- for(unsigned i=0, Offset=0;i<RetVals;i++) {
-
- LoadRet = DAG.getNode(PIC16ISD::PIC16LdWF, dl, Tys, Chain, LdLo, LdHi,
- DAG.getConstant(LdOffset + Offset, MVT::i8),
- InFlag);
-
- InFlag = getOutFlag(LoadRet);
-
- Chain = getChain(LoadRet);
- Offset++;
- InVals.push_back(LoadRet);
- }
-
- return Chain;
-}
-
-SDValue
-PIC16TargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
-
- // Number of values to return
- unsigned NumRet = Outs.size();
-
- // Function returns value always on stack with the offset starting
- // from 0
- MachineFunction &MF = DAG.getMachineFunction();
- const Function *F = MF.getFunction();
- std::string FuncName = F->getName();
-
- const char *tmpName = ESNames::createESName(PAN::getFrameLabel(FuncName));
- SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
- SDValue BS = DAG.getConstant(1, MVT::i8);
- SDValue RetVal;
- for(unsigned i=0;i<NumRet; ++i) {
- RetVal = OutVals[i];
- Chain = DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other, Chain, RetVal,
- ES, BS,
- DAG.getConstant (i, MVT::i8));
-
- }
- return DAG.getNode(PIC16ISD::RET, dl, MVT::Other, Chain);
-}
-
-void PIC16TargetLowering::
-GetDataAddress(DebugLoc dl, SDValue Callee, SDValue &Chain,
- SDValue &DataAddr_Lo, SDValue &DataAddr_Hi,
- SelectionDAG &DAG) const {
- assert (Callee.getOpcode() == PIC16ISD::PIC16Connect
- && "Don't know what to do of such callee!!");
- SDValue ZeroOperand = DAG.getConstant(0, MVT::i8);
- SDValue SeqStart = DAG.getCALLSEQ_START(Chain, ZeroOperand);
- Chain = getChain(SeqStart);
- SDValue OperFlag = getOutFlag(SeqStart); // To manage the data dependency
-
- // Get the Lo and Hi part of code address
- SDValue Lo = Callee.getOperand(0);
- SDValue Hi = Callee.getOperand(1);
-
- SDValue Data_Lo, Data_Hi;
- SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
- // Subtract 2 from Address to get the Lower part of DataAddress.
- SDVTList VTList = DAG.getVTList(MVT::i8, MVT::Flag);
- Data_Lo = DAG.getNode(ISD::SUBC, dl, VTList, Lo,
- DAG.getConstant(2, MVT::i8));
- SDValue Ops[3] = { Hi, DAG.getConstant(0, MVT::i8), Data_Lo.getValue(1)};
- Data_Hi = DAG.getNode(ISD::SUBE, dl, VTList, Ops, 3);
- SDValue PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, MVT::i8, Data_Hi);
- Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Data_Lo, PCLATH);
- SDValue Call = DAG.getNode(PIC16ISD::CALLW, dl, Tys, Chain, Callee,
- OperFlag);
- Chain = getChain(Call);
- OperFlag = getOutFlag(Call);
- SDValue SeqEnd = DAG.getCALLSEQ_END(Chain, ZeroOperand, ZeroOperand,
- OperFlag);
- Chain = getChain(SeqEnd);
- OperFlag = getOutFlag(SeqEnd);
-
- // Low part of Data Address
- DataAddr_Lo = DAG.getNode(PIC16ISD::MTLO, dl, MVT::i8, Call, OperFlag);
-
- // Make the second call.
- SeqStart = DAG.getCALLSEQ_START(Chain, ZeroOperand);
- Chain = getChain(SeqStart);
- OperFlag = getOutFlag(SeqStart); // To manage the data dependency
-
- // Subtract 1 from Address to get high part of data address.
- Data_Lo = DAG.getNode(ISD::SUBC, dl, VTList, Lo,
- DAG.getConstant(1, MVT::i8));
- SDValue HiOps[3] = { Hi, DAG.getConstant(0, MVT::i8), Data_Lo.getValue(1)};
- Data_Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps, 3);
- PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, MVT::i8, Data_Hi);
-
- // Use new Lo to make another CALLW
- Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Data_Lo, PCLATH);
- Call = DAG.getNode(PIC16ISD::CALLW, dl, Tys, Chain, Callee, OperFlag);
- Chain = getChain(Call);
- OperFlag = getOutFlag(Call);
- SeqEnd = DAG.getCALLSEQ_END(Chain, ZeroOperand, ZeroOperand,
- OperFlag);
- Chain = getChain(SeqEnd);
- OperFlag = getOutFlag(SeqEnd);
- // Hi part of Data Address
- DataAddr_Hi = DAG.getNode(PIC16ISD::MTHI, dl, MVT::i8, Call, OperFlag);
-}
-
-SDValue
-PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- // PIC16 target does not yet support tail call optimization.
- isTailCall = false;
-
- assert(Callee.getValueType() == MVT::i16 &&
- "Don't know how to legalize this call node!!!");
-
- // The flag to track if this is a direct or indirect call.
- bool IsDirectCall = true;
- unsigned RetVals = Ins.size();
- unsigned NumArgs = Outs.size();
-
- SDValue DataAddr_Lo, DataAddr_Hi;
- if (!isa<GlobalAddressSDNode>(Callee) &&
- !isa<ExternalSymbolSDNode>(Callee)) {
- IsDirectCall = false; // This is indirect call
-
- // If this is an indirect call then to pass the arguments
- // and read the return value back, we need the data address
- // of the function being called.
- // To get the data address two more calls need to be made.
-
- // Come here for indirect calls
- SDValue Lo, Hi;
- // Indirect addresses. Get the hi and lo parts of ptr.
- GetExpandedParts(Callee, DAG, Lo, Hi);
- // Connect Lo and Hi parts of the callee with the PIC16Connect
- Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Lo, Hi);
-
- // Read DataAddress only if we have to pass arguments or
- // read return value.
- if ((RetVals > 0) || (NumArgs > 0))
- GetDataAddress(dl, Callee, Chain, DataAddr_Lo, DataAddr_Hi, DAG);
- }
-
- SDValue ZeroOperand = DAG.getConstant(0, MVT::i8);
-
- // Start the call sequence.
- // Carring the Constant 0 along the CALLSEQSTART
- // because there is nothing else to carry.
- SDValue SeqStart = DAG.getCALLSEQ_START(Chain, ZeroOperand);
- Chain = getChain(SeqStart);
- SDValue OperFlag = getOutFlag(SeqStart); // To manage the data dependency
- std::string Name;
-
- // For any direct call - callee will be GlobalAddressNode or
- // ExternalSymbol
- SDValue ArgLabel, RetLabel;
- if (IsDirectCall) {
- // Considering the GlobalAddressNode case here.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- const GlobalValue *GV = G->getGlobal();
- Callee = DAG.getTargetGlobalAddress(GV, dl, MVT::i8);
- Name = G->getGlobal()->getName();
- } else {// Considering the ExternalSymbol case here
- ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Callee);
- Callee = DAG.getTargetExternalSymbol(ES->getSymbol(), MVT::i8);
- Name = ES->getSymbol();
- }
-
- // Label for argument passing
- const char *argFrame = ESNames::createESName(PAN::getArgsLabel(Name));
- ArgLabel = DAG.getTargetExternalSymbol(argFrame, MVT::i8);
-
- // Label for reading return value
- const char *retName = ESNames::createESName(PAN::getRetvalLabel(Name));
- RetLabel = DAG.getTargetExternalSymbol(retName, MVT::i8);
- } else {
- // if indirect call
- SDValue CodeAddr_Lo = Callee.getOperand(0);
- SDValue CodeAddr_Hi = Callee.getOperand(1);
-
- /*CodeAddr_Lo = DAG.getNode(ISD::ADD, dl, MVT::i8, CodeAddr_Lo,
- DAG.getConstant(2, MVT::i8));*/
-
- // move Hi part in PCLATH
- CodeAddr_Hi = DAG.getNode(PIC16ISD::MTPCLATH, dl, MVT::i8, CodeAddr_Hi);
- Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, CodeAddr_Lo,
- CodeAddr_Hi);
- }
-
- // Pass the argument to function before making the call.
- SDValue CallArgs;
- if (IsDirectCall) {
- CallArgs = LowerDirectCallArguments(ArgLabel, Chain, OperFlag,
- Outs, OutVals, dl, DAG);
- Chain = getChain(CallArgs);
- OperFlag = getOutFlag(CallArgs);
- } else {
- CallArgs = LowerIndirectCallArguments(Chain, OperFlag, DataAddr_Lo,
- DataAddr_Hi, Outs, OutVals, Ins,
- dl, DAG);
- Chain = getChain(CallArgs);
- OperFlag = getOutFlag(CallArgs);
- }
-
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue PICCall = DAG.getNode(PIC16ISD::CALL, dl, Tys, Chain, Callee,
- OperFlag);
- Chain = getChain(PICCall);
- OperFlag = getOutFlag(PICCall);
-
-
- // Carrying the Constant 0 along the CALLSEQSTART
- // because there is nothing else to carry.
- SDValue SeqEnd = DAG.getCALLSEQ_END(Chain, ZeroOperand, ZeroOperand,
- OperFlag);
- Chain = getChain(SeqEnd);
- OperFlag = getOutFlag(SeqEnd);
-
- // Lower the return value reading after the call.
- if (IsDirectCall)
- return LowerDirectCallReturn(RetLabel, Chain, OperFlag,
- Ins, dl, DAG, InVals);
- else
- return LowerIndirectCallReturn(Chain, OperFlag, DataAddr_Lo,
- DataAddr_Hi, Ins, dl, DAG, InVals);
-}
-
-bool PIC16TargetLowering::isDirectLoad(const SDValue Op) const {
- if (Op.getOpcode() == PIC16ISD::PIC16Load)
- if (Op.getOperand(1).getOpcode() == ISD::TargetGlobalAddress
- || Op.getOperand(1).getOpcode() == ISD::TargetExternalSymbol)
- return true;
- return false;
-}
-
-// NeedToConvertToMemOp - Returns true if one of the operands of the
-// operation 'Op' needs to be put into memory. Also returns the
-// operand no. of the operand to be converted in 'MemOp'. Remember, PIC16 has
-// no instruction that can operation on two registers. Most insns take
-// one register and one memory operand (addwf) / Constant (addlw).
-bool PIC16TargetLowering::NeedToConvertToMemOp(SDValue Op, unsigned &MemOp,
- SelectionDAG &DAG) const {
- // If one of the operand is a constant, return false.
- if (Op.getOperand(0).getOpcode() == ISD::Constant ||
- Op.getOperand(1).getOpcode() == ISD::Constant)
- return false;
-
- // Return false if one of the operands is already a direct
- // load and that operand has only one use.
- if (isDirectLoad(Op.getOperand(0))) {
- if (Op.getOperand(0).hasOneUse()) {
- // Legal and profitable folding check uses the NodeId of DAG nodes.
- // This NodeId is assigned by topological order. Therefore first
- // assign topological order then perform legal and profitable check.
- // Note:- Though this ordering is done before begining with legalization,
- // newly added node during legalization process have NodeId=-1 (NewNode)
- // therefore before performing any check proper ordering of the node is
- // required.
- DAG.AssignTopologicalOrder();
-
- // Direct load operands are folded in binary operations. But before folding
- // verify if this folding is legal. Fold only if it is legal otherwise
- // convert this direct load to a separate memory operation.
- if (SelectionDAGISel::IsLegalToFold(Op.getOperand(0),
- Op.getNode(), Op.getNode(),
- CodeGenOpt::Default))
- return false;
- else
- MemOp = 0;
- }
- }
-
- // For operations that are non-cummutative there is no need to check
- // for right operand because folding right operand may result in
- // incorrect operation.
- if (! SelectionDAG::isCommutativeBinOp(Op.getOpcode()))
- return true;
-
- if (isDirectLoad(Op.getOperand(1))) {
- if (Op.getOperand(1).hasOneUse()) {
- // Legal and profitable folding check uses the NodeId of DAG nodes.
- // This NodeId is assigned by topological order. Therefore first
- // assign topological order then perform legal and profitable check.
- // Note:- Though this ordering is done before begining with legalization,
- // newly added node during legalization process have NodeId=-1 (NewNode)
- // therefore before performing any check proper ordering of the node is
- // required.
- DAG.AssignTopologicalOrder();
-
- // Direct load operands are folded in binary operations. But before folding
- // verify if this folding is legal. Fold only if it is legal otherwise
- // convert this direct load to a separate memory operation.
- if (SelectionDAGISel::IsLegalToFold(Op.getOperand(1),
- Op.getNode(), Op.getNode(),
- CodeGenOpt::Default))
- return false;
- else
- MemOp = 1;
- }
- }
- return true;
-}
-
-// LowerBinOp - Lower a commutative binary operation that does not
-// affect status flag carry.
-SDValue PIC16TargetLowering::LowerBinOp(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
-
- // We should have handled larger operands in type legalizer itself.
- assert (Op.getValueType() == MVT::i8 && "illegal Op to lower");
-
- unsigned MemOp = 1;
- if (NeedToConvertToMemOp(Op, MemOp, DAG)) {
- // Put one value on stack.
- SDValue NewVal = ConvertToMemOperand (Op.getOperand(MemOp), DAG, dl);
-
- return DAG.getNode(Op.getOpcode(), dl, MVT::i8, Op.getOperand(MemOp ^ 1),
- NewVal);
- }
- else {
- return Op;
- }
-}
-
-// LowerADD - Lower all types of ADD operations including the ones
-// that affects carry.
-SDValue PIC16TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const {
- // We should have handled larger operands in type legalizer itself.
- assert (Op.getValueType() == MVT::i8 && "illegal add to lower");
- DebugLoc dl = Op.getDebugLoc();
- unsigned MemOp = 1;
- if (NeedToConvertToMemOp(Op, MemOp, DAG)) {
- // Put one value on stack.
- SDValue NewVal = ConvertToMemOperand (Op.getOperand(MemOp), DAG, dl);
-
- // ADDC and ADDE produce two results.
- SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Flag);
-
- // ADDE has three operands, the last one is the carry bit.
- if (Op.getOpcode() == ISD::ADDE)
- return DAG.getNode(Op.getOpcode(), dl, Tys, Op.getOperand(MemOp ^ 1),
- NewVal, Op.getOperand(2));
- // ADDC has two operands.
- else if (Op.getOpcode() == ISD::ADDC)
- return DAG.getNode(Op.getOpcode(), dl, Tys, Op.getOperand(MemOp ^ 1),
- NewVal);
- // ADD it is. It produces only one result.
- else
- return DAG.getNode(Op.getOpcode(), dl, MVT::i8, Op.getOperand(MemOp ^ 1),
- NewVal);
- }
- else
- return Op;
-}
-
-SDValue PIC16TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
- // We should have handled larger operands in type legalizer itself.
- assert (Op.getValueType() == MVT::i8 && "illegal sub to lower");
- unsigned MemOp = 1;
- SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Flag);
-
- // Since we don't have an instruction for X - c ,
- // we can change it to X + (-c)
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
- if (C && (Op.getOpcode() == ISD::SUB))
- {
- return DAG.getNode(ISD::ADD,
- dl, MVT::i8, Op.getOperand(0),
- DAG.getConstant(0-(C->getZExtValue()), MVT::i8));
- }
-
- if (NeedToConvertToMemOp(Op, MemOp, DAG) ||
- (isDirectLoad(Op.getOperand(1)) &&
- (!isDirectLoad(Op.getOperand(0))) &&
- (Op.getOperand(0).getOpcode() != ISD::Constant)))
- {
- // Put first operand on stack.
- SDValue NewVal = ConvertToMemOperand (Op.getOperand(0), DAG, dl);
-
- switch (Op.getOpcode()) {
- default:
- assert (0 && "Opcode unknown.");
- case ISD::SUBE:
- return DAG.getNode(Op.getOpcode(),
- dl, Tys, NewVal, Op.getOperand(1),
- Op.getOperand(2));
- break;
- case ISD::SUBC:
- return DAG.getNode(Op.getOpcode(),
- dl, Tys, NewVal, Op.getOperand(1));
- break;
- case ISD::SUB:
- return DAG.getNode(Op.getOpcode(),
- dl, MVT::i8, NewVal, Op.getOperand(1));
- break;
- }
- }
- else
- return Op;
-}
-
-void PIC16TargetLowering::InitReservedFrameCount(const Function *F,
- SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- PIC16MachineFunctionInfo *FuncInfo = MF.getInfo<PIC16MachineFunctionInfo>();
-
- unsigned NumArgs = F->arg_size();
-
- bool isVoidFunc = (F->getReturnType()->getTypeID() == Type::VoidTyID);
-
- if (isVoidFunc)
- FuncInfo->setReservedFrameCount(NumArgs);
- else
- FuncInfo->setReservedFrameCount(NumArgs + 1);
-}
-
-// LowerFormalArguments - Argument values are loaded from the
-// <fname>.args + offset. All arguments are already broken to leaglized
-// types, so the offset just runs from 0 to NumArgVals - 1.
-
-SDValue
-PIC16TargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
- unsigned NumArgVals = Ins.size();
-
- // Get the callee's name to create the <fname>.args label to pass args.
- MachineFunction &MF = DAG.getMachineFunction();
- const Function *F = MF.getFunction();
- std::string FuncName = F->getName();
-
- // Reset the map of FI and TmpOffset
- ResetTmpOffsetMap(DAG);
- // Initialize the ReserveFrameCount
- InitReservedFrameCount(F, DAG);
-
- // Create the <fname>.args external symbol.
- const char *tmpName = ESNames::createESName(PAN::getArgsLabel(FuncName));
- SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
-
- // Load arg values from the label + offset.
- SDVTList VTs = DAG.getVTList (MVT::i8, MVT::Other);
- SDValue BS = DAG.getConstant(1, MVT::i8);
- for (unsigned i = 0; i < NumArgVals ; ++i) {
- SDValue Offset = DAG.getConstant(i, MVT::i8);
- SDValue PICLoad = DAG.getNode(PIC16ISD::PIC16LdArg, dl, VTs, Chain, ES, BS,
- Offset);
- Chain = getChain(PICLoad);
- InVals.push_back(PICLoad);
- }
-
- return Chain;
-}
-
-// Perform DAGCombine of PIC16Load.
-// FIXME - Need a more elaborate comment here.
-SDValue PIC16TargetLowering::
-PerformPIC16LoadCombine(SDNode *N, DAGCombinerInfo &DCI) const {
- SelectionDAG &DAG = DCI.DAG;
- SDValue Chain = N->getOperand(0);
- if (N->hasNUsesOfValue(0, 0)) {
- DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), Chain);
- }
- return SDValue();
-}
-
-// For all the functions with arguments some STORE nodes are generated
-// that store the argument on the frameindex. However in PIC16 the arguments
-// are passed on stack only. Therefore these STORE nodes are redundant.
-// To remove these STORE nodes will be removed in PerformStoreCombine
-//
-// Currently this function is doint nothing and will be updated for removing
-// unwanted store operations
-SDValue PIC16TargetLowering::
-PerformStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const {
- return SDValue(N, 0);
- /*
- // Storing an undef value is of no use, so remove it
- if (isStoringUndef(N, Chain, DAG)) {
- return Chain; // remove the store and return the chain
- }
- //else everything is ok.
- return SDValue(N, 0);
- */
-}
-
-SDValue PIC16TargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
- switch (N->getOpcode()) {
- case ISD::STORE:
- return PerformStoreCombine(N, DCI);
- case PIC16ISD::PIC16Load:
- return PerformPIC16LoadCombine(N, DCI);
- }
- return SDValue();
-}
-
-static PIC16CC::CondCodes IntCCToPIC16CC(ISD::CondCode CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code!");
- case ISD::SETNE: return PIC16CC::NE;
- case ISD::SETEQ: return PIC16CC::EQ;
- case ISD::SETGT: return PIC16CC::GT;
- case ISD::SETGE: return PIC16CC::GE;
- case ISD::SETLT: return PIC16CC::LT;
- case ISD::SETLE: return PIC16CC::LE;
- case ISD::SETULT: return PIC16CC::ULT;
- case ISD::SETULE: return PIC16CC::ULE;
- case ISD::SETUGE: return PIC16CC::UGE;
- case ISD::SETUGT: return PIC16CC::UGT;
- }
-}
-
-// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
-// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
-static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
- ISD::CondCode CC, unsigned &SPCC) {
- if (isa<ConstantSDNode>(RHS) &&
- cast<ConstantSDNode>(RHS)->isNullValue() &&
- CC == ISD::SETNE &&
- (LHS.getOpcode() == PIC16ISD::SELECT_ICC &&
- LHS.getOperand(3).getOpcode() == PIC16ISD::SUBCC) &&
- isa<ConstantSDNode>(LHS.getOperand(0)) &&
- isa<ConstantSDNode>(LHS.getOperand(1)) &&
- cast<ConstantSDNode>(LHS.getOperand(0))->isOne() &&
- cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) {
- SDValue CMPCC = LHS.getOperand(3);
- SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
- LHS = CMPCC.getOperand(0);
- RHS = CMPCC.getOperand(1);
- }
-}
-
-// Returns appropriate CMP insn and corresponding condition code in PIC16CC
-SDValue PIC16TargetLowering::getPIC16Cmp(SDValue LHS, SDValue RHS,
- unsigned CC, SDValue &PIC16CC,
- SelectionDAG &DAG, DebugLoc dl) const {
- PIC16CC::CondCodes CondCode = (PIC16CC::CondCodes) CC;
-
- // PIC16 sub is literal - W. So Swap the operands and condition if needed.
- // i.e. a < 12 can be rewritten as 12 > a.
- if (RHS.getOpcode() == ISD::Constant) {
-
- SDValue Tmp = LHS;
- LHS = RHS;
- RHS = Tmp;
-
- switch (CondCode) {
- default: break;
- case PIC16CC::LT:
- CondCode = PIC16CC::GT;
- break;
- case PIC16CC::GT:
- CondCode = PIC16CC::LT;
- break;
- case PIC16CC::ULT:
- CondCode = PIC16CC::UGT;
- break;
- case PIC16CC::UGT:
- CondCode = PIC16CC::ULT;
- break;
- case PIC16CC::GE:
- CondCode = PIC16CC::LE;
- break;
- case PIC16CC::LE:
- CondCode = PIC16CC::GE;
- break;
- case PIC16CC::ULE:
- CondCode = PIC16CC::UGE;
- break;
- case PIC16CC::UGE:
- CondCode = PIC16CC::ULE;
- break;
- }
- }
-
- PIC16CC = DAG.getConstant(CondCode, MVT::i8);
-
- // These are signed comparisons.
- SDValue Mask = DAG.getConstant(128, MVT::i8);
- if (isSignedComparison(CondCode)) {
- LHS = DAG.getNode (ISD::XOR, dl, MVT::i8, LHS, Mask);
- RHS = DAG.getNode (ISD::XOR, dl, MVT::i8, RHS, Mask);
- }
-
- SDVTList VTs = DAG.getVTList (MVT::i8, MVT::Flag);
- // We can use a subtract operation to set the condition codes. But
- // we need to put one operand in memory if required.
- // Nothing to do if the first operand is already a valid type (direct load
- // for subwf and literal for sublw) and it is used by this operation only.
- if ((LHS.getOpcode() == ISD::Constant || isDirectLoad(LHS))
- && LHS.hasOneUse())
- return DAG.getNode(PIC16ISD::SUBCC, dl, VTs, LHS, RHS);
-
- // else convert the first operand to mem.
- LHS = ConvertToMemOperand (LHS, DAG, dl);
- return DAG.getNode(PIC16ISD::SUBCC, dl, VTs, LHS, RHS);
-}
-
-
-SDValue PIC16TargetLowering::LowerSELECT_CC(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
- SDValue TrueVal = Op.getOperand(2);
- SDValue FalseVal = Op.getOperand(3);
- unsigned ORIGCC = ~0;
- DebugLoc dl = Op.getDebugLoc();
-
- // If this is a select_cc of a "setcc", and if the setcc got lowered into
- // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
- // i.e.
- // A setcc: lhs, rhs, cc is expanded by llvm to
- // select_cc: result of setcc, 0, 1, 0, setne
- // We can think of it as:
- // select_cc: lhs, rhs, 1, 0, cc
- LookThroughSetCC(LHS, RHS, CC, ORIGCC);
- if (ORIGCC == ~0U) ORIGCC = IntCCToPIC16CC (CC);
-
- SDValue PIC16CC;
- SDValue Cmp = getPIC16Cmp(LHS, RHS, ORIGCC, PIC16CC, DAG, dl);
-
- return DAG.getNode (PIC16ISD::SELECT_ICC, dl, TrueVal.getValueType(), TrueVal,
- FalseVal, PIC16CC, Cmp.getValue(1));
-}
-
-MachineBasicBlock *
-PIC16TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const {
- const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
- unsigned CC = (PIC16CC::CondCodes)MI->getOperand(3).getImm();
- DebugLoc dl = MI->getDebugLoc();
-
- // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
- // control-flow pattern. The incoming instruction knows the destination vreg
- // to set, the condition code register to branch on, the true/false values to
- // select between, and a branch opcode to use.
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = BB;
- ++It;
-
- // thisMBB:
- // ...
- // TrueVal = ...
- // [f]bCC copy1MBB
- // fallthrough --> copy0MBB
- MachineBasicBlock *thisMBB = BB;
- MachineFunction *F = BB->getParent();
- MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- BuildMI(BB, dl, TII.get(PIC16::pic16brcond)).addMBB(sinkMBB).addImm(CC);
- F->insert(It, copy0MBB);
- F->insert(It, sinkMBB);
-
- // Transfer the remainder of BB and its successor edges to sinkMBB.
- sinkMBB->splice(sinkMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
-
- // Next, add the true and fallthrough blocks as its successors.
- BB->addSuccessor(copy0MBB);
- BB->addSuccessor(sinkMBB);
-
- // copy0MBB:
- // %FalseValue = ...
- // # fallthrough to sinkMBB
- BB = copy0MBB;
-
- // Update machine-CFG edges
- BB->addSuccessor(sinkMBB);
-
- // sinkMBB:
- // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
- // ...
- BB = sinkMBB;
- BuildMI(*BB, BB->begin(), dl,
- TII.get(PIC16::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
- .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
-
- MI->eraseFromParent(); // The pseudo instruction is gone now.
- return BB;
-}
-
-
-SDValue PIC16TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
- SDValue Chain = Op.getOperand(0);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
- SDValue LHS = Op.getOperand(2); // LHS of the condition.
- SDValue RHS = Op.getOperand(3); // RHS of the condition.
- SDValue Dest = Op.getOperand(4); // BB to jump to
- unsigned ORIGCC = ~0;
- DebugLoc dl = Op.getDebugLoc();
-
- // If this is a br_cc of a "setcc", and if the setcc got lowered into
- // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
- LookThroughSetCC(LHS, RHS, CC, ORIGCC);
- if (ORIGCC == ~0U) ORIGCC = IntCCToPIC16CC (CC);
-
- // Get the Compare insn and condition code.
- SDValue PIC16CC;
- SDValue Cmp = getPIC16Cmp(LHS, RHS, ORIGCC, PIC16CC, DAG, dl);
-
- return DAG.getNode(PIC16ISD::BRCOND, dl, MVT::Other, Chain, Dest, PIC16CC,
- Cmp.getValue(1));
-}
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.h b/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.h
deleted file mode 100644
index d942af4..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.h
+++ /dev/null
@@ -1,253 +0,0 @@
-//===-- PIC16ISelLowering.h - PIC16 DAG Lowering Interface ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that PIC16 uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16ISELLOWERING_H
-#define PIC16ISELLOWERING_H
-
-#include "PIC16.h"
-#include "PIC16Subtarget.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetLowering.h"
-#include <map>
-
-namespace llvm {
- namespace PIC16ISD {
- enum NodeType {
- // Start the numbering from where ISD NodeType finishes.
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
-
- Lo, // Low 8-bits of GlobalAddress.
- Hi, // High 8-bits of GlobalAddress.
- PIC16Load,
- PIC16LdArg, // This is replica of PIC16Load but used to load function
- // arguments and is being used for facilitating for some
- // store removal optimizations.
-
- PIC16LdWF,
- PIC16Store,
- PIC16StWF,
- Banksel,
- MTLO, // Move to low part of FSR
- MTHI, // Move to high part of FSR
- MTPCLATH, // Move to PCLATCH
- PIC16Connect, // General connector for PIC16 nodes
- BCF,
- LSLF, // PIC16 Logical shift left
- LRLF, // PIC16 Logical shift right
- RLF, // Rotate left through carry
- RRF, // Rotate right through carry
- CALL, // PIC16 Call instruction
- CALLW, // PIC16 CALLW instruction
- SUBCC, // Compare for equality or inequality.
- SELECT_ICC, // Pseudo to be caught in scheduler and expanded to brcond.
- BRCOND, // Conditional branch.
- RET, // Return.
- Dummy
- };
-
- // Keep track of different address spaces.
- enum AddressSpace {
- RAM_SPACE = 0, // RAM address space
- ROM_SPACE = 1 // ROM address space number is 1
- };
- enum PIC16Libcall {
- MUL_I8 = RTLIB::UNKNOWN_LIBCALL + 1,
- SRA_I8,
- SLL_I8,
- SRL_I8,
- PIC16UnknownCall
- };
- }
-
-
- //===--------------------------------------------------------------------===//
- // TargetLowering Implementation
- //===--------------------------------------------------------------------===//
- class PIC16TargetLowering : public TargetLowering {
- public:
- explicit PIC16TargetLowering(PIC16TargetMachine &TM);
-
- /// getTargetNodeName - This method returns the name of a target specific
- /// DAG node.
- virtual const char *getTargetNodeName(unsigned Opcode) const;
- /// getSetCCResultType - Return the ISD::SETCC ValueType
- virtual MVT::SimpleValueType getSetCCResultType(EVT ValType) const;
- virtual MVT::SimpleValueType getCmpLibcallReturnType() const;
- SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerBinOp(SDValue Op, SelectionDAG &DAG) const;
- // Call returns
- SDValue
- LowerDirectCallReturn(SDValue RetLabel, SDValue Chain, SDValue InFlag,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
- SDValue
- LowerIndirectCallReturn(SDValue Chain, SDValue InFlag,
- SDValue DataAddr_Lo, SDValue DataAddr_Hi,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- // Call arguments
- SDValue
- LowerDirectCallArguments(SDValue ArgLabel, SDValue Chain, SDValue InFlag,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
-
- SDValue
- LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
- SDValue DataAddr_Lo, SDValue DataAddr_Hi,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG) const;
-
- SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue getPIC16Cmp(SDValue LHS, SDValue RHS, unsigned OrigCC, SDValue &CC,
- SelectionDAG &DAG, DebugLoc dl) const;
- virtual MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
-
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
- virtual void ReplaceNodeResults(SDNode *N,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const;
- virtual void LowerOperationWrapper(SDNode *N,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const;
-
- virtual SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
-
- SDValue ExpandStore(SDNode *N, SelectionDAG &DAG) const;
- SDValue ExpandLoad(SDNode *N, SelectionDAG &DAG) const;
- SDValue ExpandGlobalAddress(SDNode *N, SelectionDAG &DAG) const;
- SDValue ExpandExternalSymbol(SDNode *N, SelectionDAG &DAG) const;
- SDValue ExpandFrameIndex(SDNode *N, SelectionDAG &DAG) const;
-
- SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- SDValue PerformPIC16LoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- SDValue PerformStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-
- // This function returns the Tmp Offset for FrameIndex. If any TmpOffset
- // already exists for the FI then it returns the same else it creates the
- // new offset and returns.
- unsigned GetTmpOffsetForFI(unsigned FI, unsigned slot_size,
- MachineFunction &MF) const;
- void ResetTmpOffsetMap(SelectionDAG &DAG) const;
- void InitReservedFrameCount(const Function *F,
- SelectionDAG &DAG) const;
-
- /// getFunctionAlignment - Return the Log2 alignment of this function.
- virtual unsigned getFunctionAlignment(const Function *) const {
- // FIXME: The function never seems to be aligned.
- return 1;
- }
- protected:
- std::pair<const TargetRegisterClass*, uint8_t>
- findRepresentativeClass(EVT VT) const;
- private:
- // If the Node is a BUILD_PAIR representing a direct Address,
- // then this function will return true.
- bool isDirectAddress(const SDValue &Op) const;
-
- // If the Node is a DirectAddress in ROM_SPACE then this
- // function will return true
- bool isRomAddress(const SDValue &Op) const;
-
- // Extract the Lo and Hi component of Op.
- void GetExpandedParts(SDValue Op, SelectionDAG &DAG, SDValue &Lo,
- SDValue &Hi) const;
-
-
- // Load pointer can be a direct or indirect address. In PIC16 direct
- // addresses need Banksel and Indirect addresses need to be loaded to
- // FSR first. Handle address specific cases here.
- void LegalizeAddress(SDValue Ptr, SelectionDAG &DAG, SDValue &Chain,
- SDValue &NewPtr, unsigned &Offset, DebugLoc dl) const;
-
- // FrameIndex should be broken down into ExternalSymbol and FrameOffset.
- void LegalizeFrameIndex(SDValue Op, SelectionDAG &DAG, SDValue &ES,
- int &Offset) const;
-
- // For indirect calls data address of the callee frame need to be
- // extracted. This function fills the arguments DataAddr_Lo and
- // DataAddr_Hi with the address of the callee frame.
- void GetDataAddress(DebugLoc dl, SDValue Callee, SDValue &Chain,
- SDValue &DataAddr_Lo, SDValue &DataAddr_Hi,
- SelectionDAG &DAG) const;
-
- // We can not have both operands of a binary operation in W.
- // This function is used to put one operand on stack and generate a load.
- SDValue ConvertToMemOperand(SDValue Op, SelectionDAG &DAG,
- DebugLoc dl) const;
-
- // This function checks if we need to put an operand of an operation on
- // stack and generate a load or not.
- // DAG parameter is required to access DAG information during
- // analysis.
- bool NeedToConvertToMemOp(SDValue Op, unsigned &MemOp,
- SelectionDAG &DAG) const;
-
- /// Subtarget - Keep a pointer to the PIC16Subtarget around so that we can
- /// make the right decision when generating code for different targets.
- const PIC16Subtarget *Subtarget;
-
-
- // Extending the LIB Call framework of LLVM
- // to hold the names of PIC16Libcalls.
- const char *PIC16LibcallNames[PIC16ISD::PIC16UnknownCall];
-
- // To set and retrieve the lib call names.
- void setPIC16LibcallName(PIC16ISD::PIC16Libcall Call, const char *Name);
- const char *getPIC16LibcallName(PIC16ISD::PIC16Libcall Call) const;
-
- // Make PIC16 Libcall.
- SDValue MakePIC16Libcall(PIC16ISD::PIC16Libcall Call, EVT RetVT,
- const SDValue *Ops, unsigned NumOps, bool isSigned,
- SelectionDAG &DAG, DebugLoc dl) const;
-
- // Check if operation has a direct load operand.
- inline bool isDirectLoad(const SDValue Op) const;
- };
-} // namespace llvm
-
-#endif // PIC16ISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16InstrFormats.td b/contrib/llvm/lib/Target/PIC16/PIC16InstrFormats.td
deleted file mode 100644
index e213ea8..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16InstrFormats.td
+++ /dev/null
@@ -1,117 +0,0 @@
-//===- PIC16InstrFormats.td - PIC16 Instruction Formats-------*- tblgen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Describe PIC16 instructions format
-//
-// All the possible PIC16 fields are:
-//
-// opcode - operation code.
-// f - 7-bit register file address.
-// d - 1-bit direction specifier
-// k - 8/11 bit literals
-// b - 3 bits bit num specifier
-//
-//===----------------------------------------------------------------------===//
-
-// Generic PIC16 Format
-// PIC16 Instructions are 14-bit wide.
-
-// FIXME: Add Cooper Specific Formats if any.
-
-class PIC16Inst<dag outs, dag ins, string asmstr, list<dag> pattern>
- : Instruction {
- field bits<14> Inst;
-
- let Namespace = "PIC16";
- dag OutOperandList = outs;
- dag InOperandList = ins;
- let AsmString = asmstr;
- let Pattern = pattern;
-}
-
-
-//===----------------------------------------------------------------------===//
-// Byte Oriented instruction class in PIC16 : <|opcode|d|f|>
-// opcode = 6 bits.
-// d = direction = 1 bit.
-// f = file register address = 7 bits.
-//===----------------------------------------------------------------------===//
-
-class ByteFormat<bits<6> opcode, dag outs, dag ins, string asmstr,
- list<dag> pattern>
- :PIC16Inst<outs, ins, asmstr, pattern> {
- bits<1> d;
- bits<7> f;
-
- let Inst{13-8} = opcode;
-
- let Inst{7} = d;
- let Inst{6-0} = f;
-}
-
-//===----------------------------------------------------------------------===//
-// Bit Oriented instruction class in PIC16 : <|opcode|b|f|>
-// opcode = 4 bits.
-// b = bit specifier = 3 bits.
-// f = file register address = 7 bits.
-//===----------------------------------------------------------------------===//
-
-class BitFormat<bits<4> opcode, dag outs, dag ins, string asmstr,
- list<dag> pattern>
- : PIC16Inst<outs, ins, asmstr, pattern> {
- bits<3> b;
- bits<7> f;
-
- let Inst{13-10} = opcode;
-
- let Inst{9-7} = b;
- let Inst{6-0} = f;
-}
-
-//===----------------------------------------------------------------------===//
-// Literal Format instruction class in PIC16 : <|opcode|k|>
-// opcode = 6 bits
-// k = literal = 8 bits
-//===----------------------------------------------------------------------===//
-
-class LiteralFormat<bits<6> opcode, dag outs, dag ins, string asmstr,
- list<dag> pattern>
- : PIC16Inst<outs, ins, asmstr, pattern> {
- bits<8> k;
-
- let Inst{13-8} = opcode;
-
- let Inst{7-0} = k;
-}
-
-//===----------------------------------------------------------------------===//
-// Control Format instruction class in PIC16 : <|opcode|k|>
-// opcode = 3 bits.
-// k = jump address = 11 bits.
-//===----------------------------------------------------------------------===//
-
-class ControlFormat<bits<3> opcode, dag outs, dag ins, string asmstr,
- list<dag> pattern>
- : PIC16Inst<outs, ins, asmstr, pattern> {
- bits<11> k;
-
- let Inst{13-11} = opcode;
-
- let Inst{10-0} = k;
-}
-
-//===----------------------------------------------------------------------===//
-// Pseudo instruction class in PIC16
-//===----------------------------------------------------------------------===//
-
-class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
- : PIC16Inst<outs, ins, asmstr, pattern> {
- let Inst{13-6} = 0;
-}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.cpp b/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.cpp
deleted file mode 100644
index 81257f3..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.cpp
+++ /dev/null
@@ -1,224 +0,0 @@
-//===- PIC16InstrInfo.cpp - PIC16 Instruction Information -----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PIC16 implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PIC16.h"
-#include "PIC16ABINames.h"
-#include "PIC16InstrInfo.h"
-#include "PIC16TargetMachine.h"
-#include "PIC16GenInstrInfo.inc"
-#include "llvm/Function.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/ErrorHandling.h"
-#include <cstdio>
-
-
-using namespace llvm;
-
-// FIXME: Add the subtarget support on this constructor.
-PIC16InstrInfo::PIC16InstrInfo(PIC16TargetMachine &tm)
- : TargetInstrInfoImpl(PIC16Insts, array_lengthof(PIC16Insts)),
- TM(tm),
- RegInfo(*this, *TM.getSubtargetImpl()) {}
-
-
-/// isStoreToStackSlot - If the specified machine instruction is a direct
-/// store to a stack slot, return the virtual or physical register number of
-/// the source reg along with the FrameIndex of the loaded stack slot.
-/// If not, return 0. This predicate must return 0 if the instruction has
-/// any side effects other than storing to the stack slot.
-unsigned PIC16InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- if (MI->getOpcode() == PIC16::movwf
- && MI->getOperand(0).isReg()
- && MI->getOperand(1).isSymbol()) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- return 0;
-}
-
-/// isLoadFromStackSlot - If the specified machine instruction is a direct
-/// load from a stack slot, return the virtual or physical register number of
-/// the dest reg along with the FrameIndex of the stack slot.
-/// If not, return 0. This predicate must return 0 if the instruction has
-/// any side effects other than storing to the stack slot.
-unsigned PIC16InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- if (MI->getOpcode() == PIC16::movf
- && MI->getOperand(0).isReg()
- && MI->getOperand(1).isSymbol()) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- return 0;
-}
-
-
-void PIC16InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned SrcReg, bool isKill, int FI,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- const PIC16TargetLowering *PTLI = TM.getTargetLowering();
- DebugLoc DL;
- if (I != MBB.end()) DL = I->getDebugLoc();
-
- const Function *Func = MBB.getParent()->getFunction();
- const std::string FuncName = Func->getName();
-
- const char *tmpName = ESNames::createESName(PAN::getTempdataLabel(FuncName));
-
- // On the order of operands here: think "movwf SrcReg, tmp_slot, offset".
- if (RC == PIC16::GPRRegisterClass) {
- //MachineFunction &MF = *MBB.getParent();
- //MachineRegisterInfo &RI = MF.getRegInfo();
- BuildMI(MBB, I, DL, get(PIC16::movwf))
- .addReg(SrcReg, getKillRegState(isKill))
- .addImm(PTLI->GetTmpOffsetForFI(FI, 1, *MBB.getParent()))
- .addExternalSymbol(tmpName)
- .addImm(1); // Emit banksel for it.
- }
- else if (RC == PIC16::FSR16RegisterClass) {
- // This is a 16-bit register and the frameindex given by llvm is of
- // size two here. Break this index N into two zero based indexes and
- // put one into the map. The second one is always obtained by adding 1
- // to the first zero based index. In fact it is going to use 3 slots
- // as saving FSRs corrupts W also and hence we need to save/restore W also.
-
- unsigned opcode = (SrcReg == PIC16::FSR0) ? PIC16::save_fsr0
- : PIC16::save_fsr1;
- BuildMI(MBB, I, DL, get(opcode))
- .addReg(SrcReg, getKillRegState(isKill))
- .addImm(PTLI->GetTmpOffsetForFI(FI, 3, *MBB.getParent()))
- .addExternalSymbol(tmpName)
- .addImm(1); // Emit banksel for it.
- }
- else
- llvm_unreachable("Can't store this register to stack slot");
-}
-
-void PIC16InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, int FI,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- const PIC16TargetLowering *PTLI = TM.getTargetLowering();
- DebugLoc DL;
- if (I != MBB.end()) DL = I->getDebugLoc();
-
- const Function *Func = MBB.getParent()->getFunction();
- const std::string FuncName = Func->getName();
-
- const char *tmpName = ESNames::createESName(PAN::getTempdataLabel(FuncName));
-
- // On the order of operands here: think "movf FrameIndex, W".
- if (RC == PIC16::GPRRegisterClass) {
- //MachineFunction &MF = *MBB.getParent();
- //MachineRegisterInfo &RI = MF.getRegInfo();
- BuildMI(MBB, I, DL, get(PIC16::movf), DestReg)
- .addImm(PTLI->GetTmpOffsetForFI(FI, 1, *MBB.getParent()))
- .addExternalSymbol(tmpName)
- .addImm(1); // Emit banksel for it.
- }
- else if (RC == PIC16::FSR16RegisterClass) {
- // This is a 16-bit register and the frameindex given by llvm is of
- // size two here. Break this index N into two zero based indexes and
- // put one into the map. The second one is always obtained by adding 1
- // to the first zero based index. In fact it is going to use 3 slots
- // as saving FSRs corrupts W also and hence we need to save/restore W also.
-
- unsigned opcode = (DestReg == PIC16::FSR0) ? PIC16::restore_fsr0
- : PIC16::restore_fsr1;
- BuildMI(MBB, I, DL, get(opcode), DestReg)
- .addImm(PTLI->GetTmpOffsetForFI(FI, 3, *MBB.getParent()))
- .addExternalSymbol(tmpName)
- .addImm(1); // Emit banksel for it.
- }
- else
- llvm_unreachable("Can't load this register from stack slot");
-}
-
-void PIC16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
- unsigned Opc;
- if (PIC16::FSR16RegClass.contains(DestReg, SrcReg))
- Opc = PIC16::copy_fsr;
- else if (PIC16::GPRRegClass.contains(DestReg, SrcReg))
- Opc = PIC16::copy_w;
- else
- llvm_unreachable("Impossible reg-to-reg copy");
-
- BuildMI(MBB, I, DL, get(Opc), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
-}
-
-/// InsertBranch - Insert a branch into the end of the specified
-/// MachineBasicBlock. This operands to this method are the same as those
-/// returned by AnalyzeBranch. This is invoked in cases where AnalyzeBranch
-/// returns success and when an unconditional branch (TBB is non-null, FBB is
-/// null, Cond is empty) needs to be inserted. It returns the number of
-/// instructions inserted.
-unsigned PIC16InstrInfo::
-InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const {
- // Shouldn't be a fall through.
- assert(TBB && "InsertBranch must not be told to insert a fallthrough");
-
- if (FBB == 0) { // One way branch.
- if (Cond.empty()) {
- // Unconditional branch?
- BuildMI(&MBB, DL, get(PIC16::br_uncond)).addMBB(TBB);
- }
- return 1;
- }
-
- // FIXME: If the there are some conditions specified then conditional branch
- // should be generated.
- // For the time being no instruction is being generated therefore
- // returning NULL.
- return 0;
-}
-
-bool PIC16InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
- MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin())
- return true;
-
- // Get the terminator instruction.
- --I;
- while (I->isDebugValue()) {
- if (I == MBB.begin())
- return true;
- --I;
- }
- // Handle unconditional branches. If the unconditional branch's target is
- // successor basic block then remove the unconditional branch.
- if (I->getOpcode() == PIC16::br_uncond && AllowModify) {
- if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
- TBB = 0;
- I->eraseFromParent();
- }
- }
- return true;
-}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.h b/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.h
deleted file mode 100644
index 661b335..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.h
+++ /dev/null
@@ -1,76 +0,0 @@
-//===- PIC16InstrInfo.h - PIC16 Instruction Information----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the niversity of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PIC16 implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16INSTRUCTIONINFO_H
-#define PIC16INSTRUCTIONINFO_H
-
-#include "PIC16.h"
-#include "PIC16RegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-namespace llvm {
-
-
-class PIC16InstrInfo : public TargetInstrInfoImpl
-{
- PIC16TargetMachine &TM;
- const PIC16RegisterInfo RegInfo;
-public:
- explicit PIC16InstrInfo(PIC16TargetMachine &TM);
-
- virtual const PIC16RegisterInfo &getRegisterInfo() const { return RegInfo; }
-
- /// isLoadFromStackSlot - If the specified machine instruction is a direct
- /// load from a stack slot, return the virtual or physical register number of
- /// the destination along with the FrameIndex of the loaded stack slot. If
- /// not, return 0. This predicate must return 0 if the instruction has
- /// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- /// isStoreToStackSlot - If the specified machine instruction is a direct
- /// store to a stack slot, return the virtual or physical register number of
- /// the source reg along with the FrameIndex of the loaded stack slot. If
- /// not, return 0. This predicate must return 0 if the instruction has
- /// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
- virtual
- unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
- };
-} // namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.td b/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.td
deleted file mode 100644
index 86d36cb..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.td
+++ /dev/null
@@ -1,540 +0,0 @@
-//===- PIC16InstrInfo.td - PIC16 Instruction defs -------------*- tblgen-*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the PIC16 instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// PIC16 Specific Type Constraints.
-//===----------------------------------------------------------------------===//
-class SDTCisI8<int OpNum> : SDTCisVT<OpNum, i8>;
-class SDTCisI16<int OpNum> : SDTCisVT<OpNum, i16>;
-
-//===----------------------------------------------------------------------===//
-// PIC16 Specific Type Profiles.
-//===----------------------------------------------------------------------===//
-
-// Generic type profiles for i8/i16 unary/binary operations.
-// Taking one i8 or i16 and producing void.
-def SDTI8VoidOp : SDTypeProfile<0, 1, [SDTCisI8<0>]>;
-def SDTI16VoidOp : SDTypeProfile<0, 1, [SDTCisI16<0>]>;
-
-// Taking one value and producing an output of same type.
-def SDTI8UnaryOp : SDTypeProfile<1, 1, [SDTCisI8<0>, SDTCisI8<1>]>;
-def SDTI16UnaryOp : SDTypeProfile<1, 1, [SDTCisI16<0>, SDTCisI16<1>]>;
-
-// Taking two values and producing an output of same type.
-def SDTI8BinOp : SDTypeProfile<1, 2, [SDTCisI8<0>, SDTCisI8<1>, SDTCisI8<2>]>;
-def SDTI16BinOp : SDTypeProfile<1, 2, [SDTCisI16<0>, SDTCisI16<1>,
- SDTCisI16<2>]>;
-
-// Node specific type profiles.
-def SDT_PIC16Load : SDTypeProfile<1, 3, [SDTCisI8<0>, SDTCisI8<1>,
- SDTCisI8<2>, SDTCisI8<3>]>;
-
-def SDT_PIC16Store : SDTypeProfile<0, 4, [SDTCisI8<0>, SDTCisI8<1>,
- SDTCisI8<2>, SDTCisI8<3>]>;
-
-def SDT_PIC16Connect : SDTypeProfile<1, 2, [SDTCisI8<0>, SDTCisI8<1>,
- SDTCisI8<2>]>;
-
-// PIC16ISD::CALL type prorile
-def SDT_PIC16call : SDTypeProfile<0, -1, [SDTCisInt<0>]>;
-def SDT_PIC16callw : SDTypeProfile<1, -1, [SDTCisInt<0>]>;
-
-// PIC16ISD::BRCOND
-def SDT_PIC16Brcond: SDTypeProfile<0, 2,
- [SDTCisVT<0, OtherVT>, SDTCisI8<1>]>;
-
-// PIC16ISD::BRCOND
-def SDT_PIC16Selecticc: SDTypeProfile<1, 3,
- [SDTCisI8<0>, SDTCisI8<1>, SDTCisI8<2>,
- SDTCisI8<3>]>;
-
-//===----------------------------------------------------------------------===//
-// PIC16 addressing modes matching via DAG.
-//===----------------------------------------------------------------------===//
-def diraddr : ComplexPattern<i8, 1, "SelectDirectAddr", [], []>;
-
-//===----------------------------------------------------------------------===//
-// PIC16 Specific Node Definitions.
-//===----------------------------------------------------------------------===//
-def PIC16callseq_start : SDNode<"ISD::CALLSEQ_START", SDTI8VoidOp,
- [SDNPHasChain, SDNPOutFlag]>;
-def PIC16callseq_end : SDNode<"ISD::CALLSEQ_END", SDTI8VoidOp,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
-
-// Low 8-bits of GlobalAddress.
-def PIC16Lo : SDNode<"PIC16ISD::Lo", SDTI8BinOp>;
-
-// High 8-bits of GlobalAddress.
-def PIC16Hi : SDNode<"PIC16ISD::Hi", SDTI8BinOp>;
-
-// The MTHI and MTLO nodes are used only to match them in the incoming
-// DAG for replacement by corresponding set_fsrhi, set_fsrlo insntructions.
-// These nodes are not used for defining any instructions.
-def MTLO : SDNode<"PIC16ISD::MTLO", SDTI8UnaryOp>;
-def MTHI : SDNode<"PIC16ISD::MTHI", SDTI8UnaryOp>;
-def MTPCLATH : SDNode<"PIC16ISD::MTPCLATH", SDTI8UnaryOp>;
-
-// Node to generate Bank Select for a GlobalAddress.
-def Banksel : SDNode<"PIC16ISD::Banksel", SDTI8UnaryOp>;
-
-// Node to match a direct store operation.
-def PIC16Store : SDNode<"PIC16ISD::PIC16Store", SDT_PIC16Store, [SDNPHasChain]>;
-def PIC16StWF : SDNode<"PIC16ISD::PIC16StWF", SDT_PIC16Store,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
-
-// Node to match a direct load operation.
-def PIC16Load : SDNode<"PIC16ISD::PIC16Load", SDT_PIC16Load, [SDNPHasChain]>;
-def PIC16LdArg : SDNode<"PIC16ISD::PIC16LdArg", SDT_PIC16Load, [SDNPHasChain]>;
-def PIC16LdWF : SDNode<"PIC16ISD::PIC16LdWF", SDT_PIC16Load,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
-def PIC16Connect: SDNode<"PIC16ISD::PIC16Connect", SDT_PIC16Connect, []>;
-
-// Node to match PIC16 call
-def PIC16call : SDNode<"PIC16ISD::CALL", SDT_PIC16call,
- [SDNPHasChain , SDNPOptInFlag, SDNPOutFlag]>;
-def PIC16callw : SDNode<"PIC16ISD::CALLW", SDT_PIC16callw,
- [SDNPHasChain , SDNPOptInFlag, SDNPOutFlag]>;
-
-// Node to match a comparison instruction.
-def PIC16Subcc : SDNode<"PIC16ISD::SUBCC", SDTI8BinOp, [SDNPOutFlag]>;
-
-// Node to match a conditional branch.
-def PIC16Brcond : SDNode<"PIC16ISD::BRCOND", SDT_PIC16Brcond,
- [SDNPHasChain, SDNPInFlag]>;
-
-def PIC16Selecticc : SDNode<"PIC16ISD::SELECT_ICC", SDT_PIC16Selecticc,
- [SDNPInFlag]>;
-
-def PIC16ret : SDNode<"PIC16ISD::RET", SDTNone, [SDNPHasChain]>;
-
-//===----------------------------------------------------------------------===//
-// PIC16 Operand Definitions.
-//===----------------------------------------------------------------------===//
-def i8mem : Operand<i8>;
-def brtarget: Operand<OtherVT>;
-
-// Operand for printing out a condition code.
-let PrintMethod = "printCCOperand" in
- def CCOp : Operand<i8>;
-
-include "PIC16InstrFormats.td"
-
-//===----------------------------------------------------------------------===//
-// PIC16 Common Classes.
-//===----------------------------------------------------------------------===//
-
-// W = W Op F : Load the value from F and do Op to W.
-let Constraints = "$src = $dst", mayLoad = 1 in
-class BinOpFW<bits<6> OpCode, string OpcStr, SDNode OpNode>:
- ByteFormat<OpCode, (outs GPR:$dst),
- (ins GPR:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
- !strconcat(OpcStr, " $ptrlo + $offset, W"),
- [(set GPR:$dst, (OpNode GPR:$src, (PIC16Load diraddr:$ptrlo,
- (i8 imm:$ptrhi),
- (i8 imm:$offset))))]>;
-
-// F = F Op W : Load the value from F, do op with W and store in F.
-// This insn class is not marked as TwoAddress because the reg is
-// being used as a source operand only. (Remember a TwoAddress insn
-// needs a copy.)
-let mayStore = 1 in
-class BinOpWF<bits<6> OpCode, string OpcStr, SDNode OpNode>:
- ByteFormat<OpCode, (outs),
- (ins GPR:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
- !strconcat(OpcStr, " $ptrlo + $offset, F"),
- [(PIC16Store (OpNode GPR:$src, (PIC16Load diraddr:$ptrlo,
- (i8 imm:$ptrhi),
- (i8 imm:$offset))),
- diraddr:$ptrlo,
- (i8 imm:$ptrhi), (i8 imm:$offset)
- )]>;
-
-// W = W Op L : Do Op of L with W and place result in W.
-let Constraints = "$src = $dst" in
-class BinOpWL<bits<6> opcode, string OpcStr, SDNode OpNode> :
- LiteralFormat<opcode, (outs GPR:$dst),
- (ins GPR:$src, i8imm:$literal),
- !strconcat(OpcStr, " $literal"),
- [(set GPR:$dst, (OpNode GPR:$src, (i8 imm:$literal)))]>;
-
-//===----------------------------------------------------------------------===//
-// PIC16 Instructions.
-//===----------------------------------------------------------------------===//
-
-// Pseudo-instructions.
-def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i8imm:$amt),
- "!ADJCALLSTACKDOWN $amt",
- [(PIC16callseq_start imm:$amt)]>;
-
-def ADJCALLSTACKUP : Pseudo<(outs), (ins i8imm:$amt),
- "!ADJCALLSTACKUP $amt",
- [(PIC16callseq_end imm:$amt)]>;
-
-//-----------------------------------
-// Vaious movlw insn patterns.
-//-----------------------------------
-let isReMaterializable = 1 in {
-// Move 8-bit literal to W.
-def movlw : BitFormat<12, (outs GPR:$dst), (ins i8imm:$src),
- "movlw $src",
- [(set GPR:$dst, (i8 imm:$src))]>;
-
-// Move a Lo(TGA) to W.
-def movlw_lo_1 : BitFormat<12, (outs GPR:$dst), (ins i8imm:$src, i8imm:$src2),
- "movlw LOW(${src} + ${src2})",
- [(set GPR:$dst, (PIC16Lo tglobaladdr:$src, imm:$src2 ))]>;
-
-// Move a Lo(TES) to W.
-def movlw_lo_2 : BitFormat<12, (outs GPR:$dst), (ins i8imm:$src, i8imm:$src2),
- "movlw LOW(${src} + ${src2})",
- [(set GPR:$dst, (PIC16Lo texternalsym:$src, imm:$src2 ))]>;
-
-// Move a Hi(TGA) to W.
-def movlw_hi_1 : BitFormat<12, (outs GPR:$dst), (ins i8imm:$src, i8imm:$src2),
- "movlw HIGH(${src} + ${src2})",
- [(set GPR:$dst, (PIC16Hi tglobaladdr:$src, imm:$src2))]>;
-
-// Move a Hi(TES) to W.
-def movlw_hi_2 : BitFormat<12, (outs GPR:$dst), (ins i8imm:$src, i8imm:$src2),
- "movlw HIGH(${src} + ${src2})",
- [(set GPR:$dst, (PIC16Hi texternalsym:$src, imm:$src2))]>;
-}
-
-//-------------------
-// FSR setting insns.
-//-------------------
-// These insns are matched via a DAG replacement pattern.
-def set_fsrlo:
- ByteFormat<0, (outs FSR16:$fsr),
- (ins GPR:$val),
- "movwf ${fsr}L",
- []>;
-
-let Constraints = "$src = $dst" in
-def set_fsrhi:
- ByteFormat<0, (outs FSR16:$dst),
- (ins FSR16:$src, GPR:$val),
- "movwf ${dst}H",
- []>;
-
-def set_pclath:
- ByteFormat<0, (outs PCLATHR:$dst),
- (ins GPR:$val),
- "movwf ${dst}",
- [(set PCLATHR:$dst , (MTPCLATH GPR:$val))]>;
-
-//----------------------------
-// copyPhysReg
-// copyPhysReg insns. These are dummy. They should always be deleted
-// by the optimizer and never be present in the final generated code.
-// if they are, then we have to write correct macros for these insns.
-//----------------------------
-def copy_fsr:
- Pseudo<(outs FSR16:$dst), (ins FSR16:$src), "copy_fsr $dst, $src", []>;
-
-def copy_w:
- Pseudo<(outs GPR:$dst), (ins GPR:$src), "copy_w $dst, $src", []>;
-
-class SAVE_FSR<string OpcStr>:
- Pseudo<(outs),
- (ins FSR16:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
- !strconcat(OpcStr, " $ptrlo, $offset"),
- []>;
-
-def save_fsr0: SAVE_FSR<"save_fsr0">;
-def save_fsr1: SAVE_FSR<"save_fsr1">;
-
-class RESTORE_FSR<string OpcStr>:
- Pseudo<(outs FSR16:$dst),
- (ins i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
- !strconcat(OpcStr, " $ptrlo, $offset"),
- []>;
-
-def restore_fsr0: RESTORE_FSR<"restore_fsr0">;
-def restore_fsr1: RESTORE_FSR<"restore_fsr1">;
-
-//--------------------------
-// Store to memory
-//-------------------------
-
-// Direct store.
-// Input operands are: val = W, ptrlo = GA, offset = offset, ptrhi = banksel.
-let mayStore = 1 in
-class MOVWF_INSN<bits<6> OpCode, SDNode OpNodeDest, SDNode Op>:
- ByteFormat<0, (outs),
- (ins GPR:$val, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
- "movwf ${ptrlo} + ${offset}",
- [(Op GPR:$val, OpNodeDest:$ptrlo, (i8 imm:$ptrhi),
- (i8 imm:$offset))]>;
-
-// Store W to a Global Address.
-def movwf : MOVWF_INSN<0, tglobaladdr, PIC16Store>;
-
-// Store W to an External Symobol.
-def movwf_1 : MOVWF_INSN<0, texternalsym, PIC16Store>;
-
-// Store with InFlag and OutFlag
-// This is same as movwf_1 but has a flag. A flag is required to
-// order the stores while passing the params to function.
-def movwf_2 : MOVWF_INSN<0, texternalsym, PIC16StWF>;
-
-// Indirect store. Matched via a DAG replacement pattern.
-def store_indirect :
- ByteFormat<0, (outs),
- (ins GPR:$val, FSR16:$fsr, i8imm:$offset),
- "movwi $offset[$fsr]",
- []>;
-
-//----------------------------
-// Load from memory
-//----------------------------
-// Direct load.
-// Input Operands are: ptrlo = GA, offset = offset, ptrhi = banksel.
-// Output: dst = W
-let Defs = [STATUS], mayLoad = 1 in
-class MOVF_INSN<bits<6> OpCode, SDNode OpNodeSrc, SDNode Op>:
- ByteFormat<0, (outs GPR:$dst),
- (ins i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
- "movf ${ptrlo} + ${offset}, W",
- [(set GPR:$dst,
- (Op OpNodeSrc:$ptrlo, (i8 imm:$ptrhi),
- (i8 imm:$offset)))]>;
-
-// Load from a GA.
-def movf : MOVF_INSN<0, tglobaladdr, PIC16Load>;
-
-// Load from an ES.
-def movf_1 : MOVF_INSN<0, texternalsym, PIC16Load>;
-def movf_1_1 : MOVF_INSN<0, texternalsym, PIC16LdArg>;
-
-// Load with InFlag and OutFlag
-// This is same as movf_1 but has a flag. A flag is required to
-// order the loads while copying the return value of a function.
-def movf_2 : MOVF_INSN<0, texternalsym, PIC16LdWF>;
-
-// Indirect load. Matched via a DAG replacement pattern.
-def load_indirect :
- ByteFormat<0, (outs GPR:$dst),
- (ins FSR16:$fsr, i8imm:$offset),
- "moviw $offset[$fsr]",
- []>;
-
-//-------------------------
-// Bitwise operations patterns
-//--------------------------
-// W = W op [F]
-let Defs = [STATUS] in {
-def OrFW : BinOpFW<0, "iorwf", or>;
-def XOrFW : BinOpFW<0, "xorwf", xor>;
-def AndFW : BinOpFW<0, "andwf", and>;
-
-// F = W op [F]
-def OrWF : BinOpWF<0, "iorwf", or>;
-def XOrWF : BinOpWF<0, "xorwf", xor>;
-def AndWF : BinOpWF<0, "andwf", and>;
-
-//-------------------------
-// Various add/sub patterns.
-//-------------------------
-
-// W = W + [F]
-def addfw_1: BinOpFW<0, "addwf", add>;
-def addfw_2: BinOpFW<0, "addwf", addc>;
-
-let Uses = [STATUS] in
-def addfwc: BinOpFW<0, "addwfc", adde>; // With Carry.
-
-// F = W + [F]
-def addwf_1: BinOpWF<0, "addwf", add>;
-def addwf_2: BinOpWF<0, "addwf", addc>;
-let Uses = [STATUS] in
-def addwfc: BinOpWF<0, "addwfc", adde>; // With Carry.
-}
-
-// W -= [F] ; load from F and sub the value from W.
-let Constraints = "$src = $dst", mayLoad = 1 in
-class SUBFW<bits<6> OpCode, string OpcStr, SDNode OpNode>:
- ByteFormat<OpCode, (outs GPR:$dst),
- (ins GPR:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
- !strconcat(OpcStr, " $ptrlo + $offset, W"),
- [(set GPR:$dst, (OpNode (PIC16Load diraddr:$ptrlo,
- (i8 imm:$ptrhi), (i8 imm:$offset)),
- GPR:$src))]>;
-let Defs = [STATUS] in {
-def subfw_1: SUBFW<0, "subwf", sub>;
-def subfw_2: SUBFW<0, "subwf", subc>;
-
-let Uses = [STATUS] in
-def subfwb: SUBFW<0, "subwfb", sube>; // With Borrow.
-
-}
-let Defs = [STATUS], isTerminator = 1 in
-def subfw_cc: SUBFW<0, "subwf", PIC16Subcc>;
-
-// [F] -= W ;
-let mayStore = 1 in
-class SUBWF<bits<6> OpCode, string OpcStr, SDNode OpNode>:
- ByteFormat<OpCode, (outs),
- (ins GPR:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
- !strconcat(OpcStr, " $ptrlo + $offset"),
- [(PIC16Store (OpNode (PIC16Load diraddr:$ptrlo,
- (i8 imm:$ptrhi), (i8 imm:$offset)),
- GPR:$src), diraddr:$ptrlo,
- (i8 imm:$ptrhi), (i8 imm:$offset))]>;
-
-let Defs = [STATUS] in {
-def subwf_1: SUBWF<0, "subwf", sub>;
-def subwf_2: SUBWF<0, "subwf", subc>;
-
-let Uses = [STATUS] in
- def subwfb: SUBWF<0, "subwfb", sube>; // With Borrow.
-
-def subwf_cc: SUBWF<0, "subwf", PIC16Subcc>;
-}
-
-// addlw
-let Defs = [STATUS] in {
-def addlw_1 : BinOpWL<0, "addlw", add>;
-def addlw_2 : BinOpWL<0, "addlw", addc>;
-
-let Uses = [STATUS] in
-def addlwc : BinOpWL<0, "addlwc", adde>; // With Carry. (Assembler macro).
-
-// bitwise operations involving a literal and w.
-def andlw : BinOpWL<0, "andlw", and>;
-def xorlw : BinOpWL<0, "xorlw", xor>;
-def orlw : BinOpWL<0, "iorlw", or>;
-}
-
-// sublw
-// W = C - W ; sub W from literal. (Without borrow).
-let Constraints = "$src = $dst" in
-class SUBLW<bits<6> opcode, string OpcStr, SDNode OpNode> :
- LiteralFormat<opcode, (outs GPR:$dst),
- (ins GPR:$src, i8imm:$literal),
- !strconcat(OpcStr, " $literal"),
- [(set GPR:$dst, (OpNode (i8 imm:$literal), GPR:$src))]>;
-// subwl
-// W = W - C ; sub literal from W (Without borrow).
-let Constraints = "$src = $dst" in
-class SUBWL<bits<6> opcode, string OpcStr, SDNode OpNode> :
- LiteralFormat<opcode, (outs GPR:$dst),
- (ins GPR:$src, i8imm:$literal),
- !strconcat(OpcStr, " $literal"),
- [(set GPR:$dst, (OpNode GPR:$src, (i8 imm:$literal)))]>;
-
-let Defs = [STATUS] in {
-def sublw_1 : SUBLW<0, "sublw", sub>;
-def sublw_2 : SUBLW<0, "sublw", subc>;
-def sublw_3 : SUBLW<0, "sublwb", sube>; // With borrow (Assembler macro).
-
-def sublw_4 : SUBWL<0, "subwl", sub>; // Assembler macro replace with addlw
-def sublw_5 : SUBWL<0, "subwl", subc>; // Assembler macro replace with addlw
-def sublw_6 : SUBWL<0, "subwlb", sube>; // With borrow (Assembler macro).
-}
-let Defs = [STATUS], isTerminator = 1 in
-def sublw_cc : SUBLW<0, "sublw", PIC16Subcc>;
-
-// Call instruction.
-let isCall = 1,
- Defs = [W, FSR0, FSR1] in {
- def CALL: LiteralFormat<0x1, (outs), (ins i8imm:$func),
- //"call ${func} + 2",
- "call ${func}",
- [(PIC16call diraddr:$func)]>;
-}
-
-let isCall = 1,
- Defs = [W, FSR0, FSR1] in {
- def CALL_1: LiteralFormat<0x1, (outs), (ins GPR:$func, PCLATHR:$pc),
- "callw",
- [(PIC16call (PIC16Connect GPR:$func, PCLATHR:$pc))]>;
-}
-
-let isCall = 1,
- Defs = [FSR0, FSR1] in {
- def CALLW: LiteralFormat<0x1, (outs GPR:$dest),
- (ins GPR:$func, PCLATHR:$pc),
- "callw",
- [(set GPR:$dest, (PIC16callw (PIC16Connect GPR:$func, PCLATHR:$pc)))]>;
-}
-
-let Uses = [STATUS], isBranch = 1, isTerminator = 1, hasDelaySlot = 0 in
-def pic16brcond: ControlFormat<0x0, (outs), (ins brtarget:$dst, CCOp:$cc),
- "b$cc $dst",
- [(PIC16Brcond bb:$dst, imm:$cc)]>;
-
-// Unconditional branch.
-let isBranch = 1, isTerminator = 1, hasDelaySlot = 0 in
-def br_uncond: ControlFormat<0x0, (outs), (ins brtarget:$dst),
- "goto $dst",
- [(br bb:$dst)]>;
-
-// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
-// instruction selection into a branch sequence.
-let usesCustomInserter = 1 in { // Expanded after instruction selection.
- def SELECT_CC_Int_ICC
- : Pseudo<(outs GPR:$dst), (ins GPR:$T, GPR:$F, i8imm:$Cond),
- "; SELECT_CC_Int_ICC PSEUDO!",
- [(set GPR:$dst, (PIC16Selecticc GPR:$T, GPR:$F,
- imm:$Cond))]>;
-}
-
-
-// Banksel.
-def banksel :
- Pseudo<(outs),
- (ins i8mem:$ptr),
- "banksel $ptr",
- []>;
-
-def pagesel :
- Pseudo<(outs),
- (ins i8mem:$ptr),
- "movlp $ptr",
- []>;
-
-
-// Return insn.
-let isTerminator = 1, isBarrier = 1, isReturn = 1 in
-def Return :
- ControlFormat<0, (outs), (ins), "return", [(PIC16ret)]>;
-
-//===----------------------------------------------------------------------===//
-// PIC16 Replacment Patterns.
-//===----------------------------------------------------------------------===//
-
-// Identify an indirect store and select insns for it.
-def : Pat<(PIC16Store GPR:$val, (MTLO GPR:$loaddr), (MTHI GPR:$hiaddr),
- imm:$offset),
- (store_indirect GPR:$val,
- (set_fsrhi (set_fsrlo GPR:$loaddr), GPR:$hiaddr),
- imm:$offset)>;
-
-def : Pat<(PIC16StWF GPR:$val, (MTLO GPR:$loaddr), (MTHI GPR:$hiaddr),
- imm:$offset),
- (store_indirect GPR:$val,
- (set_fsrhi (set_fsrlo GPR:$loaddr), GPR:$hiaddr),
- imm:$offset)>;
-
-// Identify an indirect load and select insns for it.
-def : Pat<(PIC16Load (MTLO GPR:$loaddr), (MTHI GPR:$hiaddr),
- imm:$offset),
- (load_indirect (set_fsrhi (set_fsrlo GPR:$loaddr), GPR:$hiaddr),
- imm:$offset)>;
-
-def : Pat<(PIC16LdWF (MTLO GPR:$loaddr), (MTHI GPR:$hiaddr),
- imm:$offset),
- (load_indirect (set_fsrhi (set_fsrlo GPR:$loaddr), GPR:$hiaddr),
- imm:$offset)>;
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.cpp b/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.cpp
deleted file mode 100644
index 1bcc497..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-//===-- PIC16MCAsmInfo.cpp - PIC16 asm properties -------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declarations of the PIC16MCAsmInfo properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PIC16MCAsmInfo.h"
-
-// FIXME: Layering violation to get enums and static function, should be moved
-// to separate headers.
-#include "PIC16.h"
-#include "PIC16ABINames.h"
-#include "PIC16ISelLowering.h"
-using namespace llvm;
-
-PIC16MCAsmInfo::PIC16MCAsmInfo(const Target &T, StringRef TT) {
- CommentString = ";";
- GlobalPrefix = PAN::getTagName(PAN::PREFIX_SYMBOL);
- GlobalDirective = "\tglobal\t";
- ExternDirective = "\textern\t";
-
- Data8bitsDirective = " db ";
- Data16bitsDirective = " dw ";
- Data32bitsDirective = " dl ";
- Data64bitsDirective = NULL;
- ZeroDirective = NULL;
- AsciiDirective = " dt ";
- AscizDirective = NULL;
-
- RomData8bitsDirective = " dw ";
- RomData16bitsDirective = " rom_di ";
- RomData32bitsDirective = " rom_dl ";
- HasSetDirective = false;
-
- // Set it to false because we weed to generate c file name and not bc file
- // name.
- HasSingleParameterDotFile = false;
-}
-
-const char *PIC16MCAsmInfo::getDataASDirective(unsigned Size,
- unsigned AS) const {
- if (AS != PIC16ISD::ROM_SPACE)
- return 0;
-
- switch (Size) {
- case 8: return RomData8bitsDirective;
- case 16: return RomData16bitsDirective;
- case 32: return RomData32bitsDirective;
- default: return NULL;
- }
-}
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.h b/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.h
deleted file mode 100644
index 6e1c111..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//=====-- PIC16MCAsmInfo.h - PIC16 asm properties -------------*- C++ -*--====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declaration of the PIC16MCAsmInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16TARGETASMINFO_H
-#define PIC16TARGETASMINFO_H
-
-#include "llvm/MC/MCAsmInfo.h"
-
-namespace llvm {
- class Target;
- class StringRef;
-
- class PIC16MCAsmInfo : public MCAsmInfo {
- const char *RomData8bitsDirective;
- const char *RomData16bitsDirective;
- const char *RomData32bitsDirective;
- public:
- PIC16MCAsmInfo(const Target &T, StringRef TT);
-
- virtual const char *getDataASDirective(unsigned size, unsigned AS) const;
- };
-
-} // namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16MachineFunctionInfo.h b/contrib/llvm/lib/Target/PIC16/PIC16MachineFunctionInfo.h
deleted file mode 100644
index bdf5086..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16MachineFunctionInfo.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//====- PIC16MachineFuctionInfo.h - PIC16 machine function info -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares PIC16-specific per-machine-function information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16MACHINEFUNCTIONINFO_H
-#define PIC16MACHINEFUNCTIONINFO_H
-
-#include "llvm/CodeGen/MachineFunction.h"
-
-namespace llvm {
-
-/// PIC16MachineFunctionInfo - This class is derived from MachineFunction
-/// private PIC16 target-specific information for each MachineFunction.
-class PIC16MachineFunctionInfo : public MachineFunctionInfo {
- // The frameindexes generated for spill/reload are stack based.
- // This maps maintain zero based indexes for these FIs.
- std::map<unsigned, unsigned> FiTmpOffsetMap;
- unsigned TmpSize;
-
- // These are the frames for return value and argument passing
- // These FrameIndices will be expanded to foo.frame external symbol
- // and all others will be expanded to foo.tmp external symbol.
- unsigned ReservedFrameCount;
-
-public:
- PIC16MachineFunctionInfo()
- : TmpSize(0), ReservedFrameCount(0) {}
-
- explicit PIC16MachineFunctionInfo(MachineFunction &MF)
- : TmpSize(0), ReservedFrameCount(0) {}
-
- std::map<unsigned, unsigned> &getFiTmpOffsetMap() { return FiTmpOffsetMap; }
-
- unsigned getTmpSize() const { return TmpSize; }
- void setTmpSize(unsigned Size) { TmpSize = Size; }
-
- unsigned getReservedFrameCount() const { return ReservedFrameCount; }
- void setReservedFrameCount(unsigned Count) { ReservedFrameCount = Count; }
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16MemSelOpt.cpp b/contrib/llvm/lib/Target/PIC16/PIC16MemSelOpt.cpp
deleted file mode 100644
index b6aa38f..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16MemSelOpt.cpp
+++ /dev/null
@@ -1,254 +0,0 @@
-//===-- PIC16MemSelOpt.cpp - PIC16 banksel optimizer --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the pass which optimizes the emitting of banksel
-// instructions before accessing data memory. This currently works within
-// a basic block only and keep tracks of the last accessed memory bank.
-// If memory access continues to be in the same bank it just makes banksel
-// immediate, which is a part of the insn accessing the data memory, from 1
-// to zero. The asm printer emits a banksel only if that immediate is 1.
-//
-// FIXME: this is not implemented yet. The banksel pass only works on local
-// basic blocks.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "pic16-codegen"
-#include "PIC16.h"
-#include "PIC16ABINames.h"
-#include "PIC16InstrInfo.h"
-#include "PIC16MCAsmInfo.h"
-#include "PIC16TargetMachine.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/DerivedTypes.h"
-
-using namespace llvm;
-
-namespace {
- struct MemSelOpt : public MachineFunctionPass {
- static char ID;
- MemSelOpt() : MachineFunctionPass(ID) {}
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreservedID(MachineLoopInfoID);
- AU.addPreservedID(MachineDominatorsID);
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "PIC16 Memsel Optimizer";
- }
-
- bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
- bool processInstruction(MachineInstr *MI);
-
- private:
- const TargetInstrInfo *TII; // Machine instruction info.
- MachineBasicBlock *MBB; // Current basic block
- std::string CurBank;
- int PageChanged;
-
- };
- char MemSelOpt::ID = 0;
-}
-
-FunctionPass *llvm::createPIC16MemSelOptimizerPass() {
- return new MemSelOpt();
-}
-
-
-/// runOnMachineFunction - Loop over all of the basic blocks, transforming FP
-/// register references into FP stack references.
-///
-bool MemSelOpt::runOnMachineFunction(MachineFunction &MF) {
- TII = MF.getTarget().getInstrInfo();
- bool Changed = false;
- for (MachineFunction::iterator I = MF.begin(), E = MF.end();
- I != E; ++I) {
- Changed |= processBasicBlock(MF, *I);
- }
-
- return Changed;
-}
-
-/// processBasicBlock - Loop over all of the instructions in the basic block,
-/// transforming FP instructions into their stack form.
-///
-bool MemSelOpt::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
- bool Changed = false;
- MBB = &BB;
-
- // Let us assume that when entering a basic block now bank is selected.
- // Ideally we should look at the predecessors for this information.
- CurBank="";
- PageChanged=0;
-
- MachineBasicBlock::iterator I;
- for (I = BB.begin(); I != BB.end(); ++I) {
- Changed |= processInstruction(I);
-
- // if the page has changed insert a page sel before
- // any instruction that needs one
- if (PageChanged == 1)
- {
- // Restore the page if it was changed, before leaving the basic block,
- // because it may be required by the goto terminator or the fall thru
- // basic blcok.
- // If the terminator is return, we don't need to restore since there
- // is no goto or fall thru basic block.
- if ((I->getOpcode() == PIC16::sublw_3) || //macro has goto
- (I->getOpcode() == PIC16::sublw_6) || //macro has goto
- (I->getOpcode() == PIC16::addlwc) || //macro has goto
- (TII->get(I->getOpcode()).isBranch()))
- {
- DebugLoc dl = I->getDebugLoc();
- BuildMI(*MBB, I, dl, TII->get(PIC16::pagesel)).addExternalSymbol("$");
- Changed = true;
- PageChanged = 0;
- }
- }
- }
-
- // The basic block is over, but if we did not find any goto yet,
- // we haven't restored the page.
- // Restore the page if it was changed, before leaving the basic block,
- // because it may be required by fall thru basic blcok.
- // If the terminator is return, we don't need to restore since there
- // is fall thru basic block.
- if (PageChanged == 1) {
- // save the end pointer before we move back to last insn.
- MachineBasicBlock::iterator J = I;
- I--;
- const TargetInstrDesc &TID = TII->get(I->getOpcode());
- if (! TID.isReturn())
- {
- DebugLoc dl = I->getDebugLoc();
- BuildMI(*MBB, J, dl,
- TII->get(PIC16::pagesel)).addExternalSymbol("$");
- Changed = true;
- PageChanged = 0;
- }
- }
-
-
- return Changed;
-}
-
-bool MemSelOpt::processInstruction(MachineInstr *MI) {
- bool Changed = false;
-
- unsigned NumOperands = MI->getNumOperands();
- if (NumOperands == 0) return false;
-
-
- // If this insn is not going to access any memory, return.
- const TargetInstrDesc &TID = TII->get(MI->getOpcode());
- if (!(TID.isBranch() || TID.isCall() || TID.mayLoad() || TID.mayStore()))
- return false;
-
- // The first thing we should do is that record if banksel/pagesel are
- // changed in an unknown way. This can happend via any type of call.
- // We do it here first before scanning of MemOp / BBOp as the indirect
- // call insns do not have any operands, but they still may change bank/page.
- if (TID.isCall()) {
- // Record that we have changed the page, so that we can restore it
- // before basic block ends.
- // We require to signal that a page anc bank change happened even for
- // indirect calls.
- PageChanged = 1;
-
- // When a call is made, there may be banksel for variables in callee.
- // Hence the banksel in caller needs to be reset.
- CurBank = "";
- }
-
- // Scan for the memory address operand.
- // FIXME: Should we use standard interfaces like memoperands_iterator,
- // hasMemOperand() etc ?
- int MemOpPos = -1;
- int BBOpPos = -1;
- for (unsigned i = 0; i < NumOperands; i++) {
- MachineOperand Op = MI->getOperand(i);
- if (Op.getType() == MachineOperand::MO_GlobalAddress ||
- Op.getType() == MachineOperand::MO_ExternalSymbol) {
- // We found one mem operand. Next one may be BS.
- MemOpPos = i;
- }
- if (Op.getType() == MachineOperand::MO_MachineBasicBlock) {
- // We found one BB operand. Next one may be pagesel.
- BBOpPos = i;
- }
- }
-
- // If we did not find an insn accessing memory. Continue.
- if ((MemOpPos == -1) &&
- (BBOpPos == -1))
- return false;
- assert ((BBOpPos != MemOpPos) && "operand can only be of one type");
-
-
- // If this is a pagesel material, handle it first.
- // CALL and br_ucond insns use MemOp (GA or ES) and not BBOp.
- // Pagesel is required only for a direct call.
- if ((MI->getOpcode() == PIC16::CALL)) {
- // Get the BBOp.
- MachineOperand &MemOp = MI->getOperand(MemOpPos);
- DebugLoc dl = MI->getDebugLoc();
- BuildMI(*MBB, MI, dl, TII->get(PIC16::pagesel)).addOperand(MemOp);
-
- // CALL and br_ucond needs only pagesel. so we are done.
- return true;
- }
-
- // Pagesel is handled. Now, add a Banksel if needed.
- if (MemOpPos == -1) return Changed;
- // Get the MemOp.
- MachineOperand &Op = MI->getOperand(MemOpPos);
-
- // Get the section name(NewBank) for MemOp.
- // This assumes that the section names for globals are already set by
- // AsmPrinter->doInitialization.
- std::string NewBank = CurBank;
- bool hasExternalLinkage = false;
- if (Op.getType() == MachineOperand::MO_GlobalAddress &&
- Op.getGlobal()->getType()->getAddressSpace() == PIC16ISD::RAM_SPACE) {
- if (Op.getGlobal()->hasExternalLinkage())
- hasExternalLinkage= true;
- NewBank = Op.getGlobal()->getSection();
- } else if (Op.getType() == MachineOperand::MO_ExternalSymbol) {
- // External Symbol is generated for temp data and arguments. They are
- // in fpdata.<functionname>.# section.
- std::string Sym = Op.getSymbolName();
- NewBank = PAN::getSectionNameForSym(Sym);
- }
-
- // If the section is shared section, do not emit banksel.
- if (NewBank == PAN::getSharedUDataSectionName())
- return Changed;
-
- // If the previous and new section names are same, we don't need to
- // emit banksel.
- if (NewBank.compare(CurBank) != 0 || hasExternalLinkage) {
- DebugLoc dl = MI->getDebugLoc();
- BuildMI(*MBB, MI, dl, TII->get(PIC16::banksel)).
- addOperand(Op);
- Changed = true;
- CurBank = NewBank;
- }
-
- return Changed;
-}
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp b/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp
deleted file mode 100644
index 56f0211..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp
+++ /dev/null
@@ -1,299 +0,0 @@
-//===-- PIC16Cloner.cpp - PIC16 LLVM Cloner for shared functions -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains code to clone all functions that are shared between
-// the main line code (ML) and interrupt line code (IL). It clones all such
-// shared functions and their automatic global vars by adding the .IL suffix.
-//
-// This pass is supposed to be run on the linked .bc module.
-// It traveses the module call graph twice. Once starting from the main function
-// and marking each reached function as "ML". Again, starting from the ISR
-// and cloning any reachable function that was marked as "ML". After cloning
-// the function, it remaps all the call sites in IL functions to call the
-// cloned functions.
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Analysis/CallGraph.h"
-#include "llvm/Pass.h"
-#include "llvm/Module.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/Utils/Cloning.h"
-#include "PIC16Cloner.h"
-#include "../PIC16ABINames.h"
-#include <vector>
-
-using namespace llvm;
-using std::vector;
-using std::string;
-using std::map;
-
-namespace llvm {
- char PIC16Cloner::ID = 0;
-
- ModulePass *createPIC16ClonerPass() { return new PIC16Cloner(); }
-}
-
-// We currently intend to run these passes in opt, which does not have any
-// diagnostic support. So use these functions for now. In future
-// we will probably write our own driver tool.
-//
-void PIC16Cloner::reportError(string ErrorString) {
- errs() << "ERROR : " << ErrorString << "\n";
- exit(1);
-}
-
-void PIC16Cloner::
-reportError (string ErrorString, vector<string> &Values) {
- unsigned ValCount = Values.size();
- string TargetString;
- for (unsigned i=0; i<ValCount; ++i) {
- TargetString = "%";
- TargetString += ((char)i + '0');
- ErrorString.replace(ErrorString.find(TargetString), TargetString.length(),
- Values[i]);
- }
- errs() << "ERROR : " << ErrorString << "\n";
- exit(1);
-}
-
-
-// Entry point
-//
-bool PIC16Cloner::runOnModule(Module &M) {
- CallGraph &CG = getAnalysis<CallGraph>();
-
- // Search for the "main" and "ISR" functions.
- CallGraphNode *mainCGN = NULL, *isrCGN = NULL;
- for (CallGraph::iterator it = CG.begin() ; it != CG.end(); it++)
- {
- // External calling node doesn't have any function associated with it.
- if (! it->first)
- continue;
-
- if (it->first->getName().str() == "main") {
- mainCGN = it->second;
- }
-
- if (PAN::isISR(it->first->getSection())) {
- isrCGN = it->second;
- }
-
- // Don't search further if we've found both.
- if (mainCGN && isrCGN)
- break;
- }
-
- // We have nothing to do if any of the main or ISR is missing.
- if (! mainCGN || ! isrCGN) return false;
-
- // Time for some diagnostics.
- // See if the main itself is interrupt function then report an error.
- if (PAN::isISR(mainCGN->getFunction()->getSection())) {
- reportError("Function 'main' can't be interrupt function");
- }
-
-
- // Mark all reachable functions from main as ML.
- markCallGraph(mainCGN, "ML");
-
- // And then all the functions reachable from ISR will be cloned.
- cloneSharedFunctions(isrCGN);
-
- return true;
-}
-
-// Mark all reachable functions from the given node, with the given mark.
-//
-void PIC16Cloner::markCallGraph(CallGraphNode *CGN, string StringMark) {
- // Mark the top node first.
- Function *thisF = CGN->getFunction();
-
- thisF->setSection(StringMark);
-
- // Mark all the called functions
- for(CallGraphNode::iterator cgn_it = CGN->begin();
- cgn_it != CGN->end(); ++cgn_it) {
- Function *CalledF = cgn_it->second->getFunction();
-
- // If calling an external function then CallGraphNode
- // will not be associated with any function.
- if (! CalledF)
- continue;
-
- // Issue diagnostic if interrupt function is being called.
- if (PAN::isISR(CalledF->getSection())) {
- vector<string> Values;
- Values.push_back(CalledF->getName().str());
- reportError("Interrupt function (%0) can't be called", Values);
- }
-
- // Has already been mark
- if (CalledF->getSection().find(StringMark) != string::npos) {
- // Should we do anything here?
- } else {
- // Mark now
- CalledF->setSection(StringMark);
- }
-
- // Before going any further mark all the called function by current
- // function.
- markCallGraph(cgn_it->second ,StringMark);
- } // end of loop of all called functions.
-}
-
-
-// For PIC16, automatic variables of a function are emitted as globals.
-// Clone the auto variables of a function and put them in VMap,
-// this VMap will be used while
-// Cloning the code of function itself.
-//
-void PIC16Cloner::CloneAutos(Function *F) {
- // We'll need to update module's globals list as well. So keep a reference
- // handy.
- Module *M = F->getParent();
- Module::GlobalListType &Globals = M->getGlobalList();
-
- // Clear the leftovers in VMap by any previous cloning.
- VMap.clear();
-
- // Find the auto globls for this function and clone them, and put them
- // in VMap.
- std::string FnName = F->getName().str();
- std::string VarName, ClonedVarName;
- for (Module::global_iterator I = M->global_begin(), E = M->global_end();
- I != E; ++I) {
- VarName = I->getName().str();
- if (PAN::isLocalToFunc(FnName, VarName)) {
- // Auto variable for current function found. Clone it.
- const GlobalVariable *GV = I;
-
- const Type *InitTy = GV->getInitializer()->getType();
- GlobalVariable *ClonedGV =
- new GlobalVariable(InitTy, false, GV->getLinkage(),
- GV->getInitializer());
- ClonedGV->setName(PAN::getCloneVarName(FnName, VarName));
- // Add these new globals to module's globals list.
- Globals.push_back(ClonedGV);
-
- // Update VMap.
- VMap[GV] = ClonedGV;
- }
- }
-}
-
-
-// Clone all functions that are reachable from ISR and are already
-// marked as ML.
-//
-void PIC16Cloner::cloneSharedFunctions(CallGraphNode *CGN) {
-
- // Check all the called functions from ISR.
- for(CallGraphNode::iterator cgn_it = CGN->begin();
- cgn_it != CGN->end(); ++cgn_it) {
- Function *CalledF = cgn_it->second->getFunction();
-
- // If calling an external function then CallGraphNode
- // will not be associated with any function.
- if (!CalledF)
- continue;
-
- // Issue diagnostic if interrupt function is being called.
- if (PAN::isISR(CalledF->getSection())) {
- vector<string> Values;
- Values.push_back(CalledF->getName().str());
- reportError("Interrupt function (%0) can't be called", Values);
- }
-
- if (CalledF->getSection().find("ML") != string::npos) {
- // Function is alternatively marked. It should be a shared one.
- // Create IL copy. Passing called function as first argument
- // and the caller as the second argument.
-
- // Before making IL copy, first ensure that this function has a
- // body. If the function does have a body. It can't be cloned.
- // Such a case may occur when the function has been declarated
- // in the C source code but its body exists in assembly file.
- if (!CalledF->isDeclaration()) {
- Function *cf = cloneFunction(CalledF);
- remapAllSites(CGN->getFunction(), CalledF, cf);
- } else {
- // It is called only from ISR. Still mark it as we need this info
- // in code gen while calling intrinsics.Function is not marked.
- CalledF->setSection("IL");
- }
- }
- // Before going any further clone all the shared function reachaable
- // by current function.
- cloneSharedFunctions(cgn_it->second);
- } // end of loop of all called functions.
-}
-
-// Clone the given function and return it.
-// Note: it uses the VMap member of the class, which is already populated
-// by cloneAutos by the time we reach here.
-// FIXME: Should we just pass VMap's ref as a parameter here? rather
-// than keeping the VMap as a member.
-Function *
-PIC16Cloner::cloneFunction(Function *OrgF) {
- Function *ClonedF;
-
- // See if we already cloned it. Return that.
- cloned_map_iterator cm_it = ClonedFunctionMap.find(OrgF);
- if(cm_it != ClonedFunctionMap.end()) {
- ClonedF = cm_it->second;
- return ClonedF;
- }
-
- // Clone does not exist.
- // First clone the autos, and populate VMap.
- CloneAutos(OrgF);
-
- // Now create the clone.
- ClonedF = CloneFunction(OrgF, VMap, /*ModuleLevelChanges=*/false);
-
- // The new function should be for interrupt line. Therefore should have
- // the name suffixed with IL and section attribute marked with IL.
- ClonedF->setName(PAN::getCloneFnName(OrgF->getName()));
- ClonedF->setSection("IL");
-
- // Add the newly created function to the module.
- OrgF->getParent()->getFunctionList().push_back(ClonedF);
-
- // Update the ClonedFunctionMap to record this cloning activity.
- ClonedFunctionMap[OrgF] = ClonedF;
-
- return ClonedF;
-}
-
-
-// Remap the call sites of shared functions, that are in IL.
-// Change the IL call site of a shared function to its clone.
-//
-void PIC16Cloner::
-remapAllSites(Function *Caller, Function *OrgF, Function *Clone) {
- // First find the caller to update. If the caller itself is cloned
- // then use the cloned caller. Otherwise use it.
- cloned_map_iterator cm_it = ClonedFunctionMap.find(Caller);
- if (cm_it != ClonedFunctionMap.end())
- Caller = cm_it->second;
-
- // For the lack of a better call site finding mechanism, iterate over
- // all insns to find the uses of original fn.
- for (Function::iterator BI = Caller->begin(); BI != Caller->end(); ++BI) {
- BasicBlock &BB = *BI;
- for (BasicBlock::iterator II = BB.begin(); II != BB.end(); ++II) {
- if (II->getNumOperands() > 0 && II->getOperand(0) == OrgF)
- II->setOperand(0, Clone);
- }
- }
-}
-
-
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h b/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h
deleted file mode 100644
index e7d67ce..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h
+++ /dev/null
@@ -1,83 +0,0 @@
-//===-- PIC16Cloner.h - PIC16 LLVM Cloner for shared functions --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains declaration of a cloner class clone all functions that
-// are shared between the main line code (ML) and interrupt line code (IL).
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16CLONER_H
-#define PIC16CLONER_H
-
-#include "llvm/ADT/ValueMap.h"
-
-using namespace llvm;
-using std::vector;
-using std::string;
-using std::map;
-
-namespace llvm {
- // forward classes.
- class Value;
- class Function;
- class Module;
- class ModulePass;
- class CallGraph;
- class CallGraphNode;
- class AnalysisUsage;
-
- class PIC16Cloner : public ModulePass {
- public:
- static char ID; // Class identification
- PIC16Cloner() : ModulePass(ID) {}
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<CallGraph>();
- }
- virtual bool runOnModule(Module &M);
-
- private: // Functions
- // Mark reachable functions for the MainLine or InterruptLine.
- void markCallGraph(CallGraphNode *CGN, string StringMark);
-
- // Clone auto variables of function specified.
- void CloneAutos(Function *F);
-
- // Clone the body of a function.
- Function *cloneFunction(Function *F);
-
- // Clone all shared functions.
- void cloneSharedFunctions(CallGraphNode *isrCGN);
-
- // Remap all call sites to the shared function.
- void remapAllSites(Function *Caller, Function *OrgF, Function *Clone);
-
- // Error reporting for PIC16Pass
- void reportError(string ErrorString, vector<string> &Values);
- void reportError(string ErrorString);
-
- private: //data
- // Records if the interrupt function has already been found.
- // If more than one interrupt function is found then an error
- // should be thrown.
- bool foundISR;
-
- // This ValueMap maps the auto variables of the original functions with
- // the corresponding cloned auto variable of the cloned function.
- // This value map is passed during the function cloning so that all the
- // uses of auto variables be updated properly.
- ValueMap<const Value*, Value*> VMap;
-
- // Map of a already cloned functions.
- map<Function *, Function *> ClonedFunctionMap;
- typedef map<Function *, Function *>::iterator cloned_map_iterator;
- };
-} // End of anonymous namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Overlay.cpp b/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Overlay.cpp
deleted file mode 100644
index 0f8928a..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Overlay.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-//===-- PIC16Overlay.cpp - Implementation for PIC16 Frame Overlay===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PIC16 Frame Overlay implementation.
-//
-//===----------------------------------------------------------------------===//
-
-
-#include "llvm/Analysis/CallGraph.h"
-#include "llvm/Pass.h"
-#include "llvm/Module.h"
-#include "llvm/Instructions.h"
-#include "llvm/Value.h"
-#include "PIC16Overlay.h"
-#include "llvm/Function.h"
-#include <cstdlib>
-#include <sstream>
-using namespace llvm;
-
-namespace llvm {
- char PIC16Overlay::ID = 0;
- ModulePass *createPIC16OverlayPass() { return new PIC16Overlay(); }
-}
-
-void PIC16Overlay::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequired<CallGraph>();
-}
-
-void PIC16Overlay::DFSTraverse(CallGraphNode *CGN, unsigned Depth) {
- // Do not set any color for external calling node.
- if (Depth != 0 && CGN->getFunction()) {
- unsigned Color = getColor(CGN->getFunction());
-
- // Handle indirectly called functions
- if (Color >= PIC16OVERLAY::StartIndirectCallColor ||
- Depth >= PIC16OVERLAY::StartIndirectCallColor) {
- // All functions called from an indirectly called function are given
- // an unique color.
- if (Color < PIC16OVERLAY::StartIndirectCallColor &&
- Depth >= PIC16OVERLAY::StartIndirectCallColor)
- setColor(CGN->getFunction(), Depth);
-
- for (unsigned int i = 0; i < CGN->size(); i++)
- DFSTraverse((*CGN)[i], ++IndirectCallColor);
- return;
- }
- // Just return if the node already has a color greater than the current
- // depth. A node must be colored with the maximum depth that it has.
- if (Color >= Depth)
- return;
-
- Depth = ModifyDepthForInterrupt(CGN, Depth);
- setColor(CGN->getFunction(), Depth);
- }
-
- // Color all children of this node with color depth+1.
- for (unsigned int i = 0; i < CGN->size(); i++)
- DFSTraverse((*CGN)[i], Depth+1);
-}
-
-unsigned PIC16Overlay::ModifyDepthForInterrupt(CallGraphNode *CGN,
- unsigned Depth) {
- Function *Fn = CGN->getFunction();
-
- // Return original Depth if function or section for function do not exist.
- if (!Fn || !Fn->hasSection())
- return Depth;
-
- // Return original Depth if this function is not marked as interrupt.
- if (Fn->getSection().find("interrupt") == string::npos)
- return Depth;
-
- Depth = Depth + InterruptDepth;
- return Depth;
-}
-
-void PIC16Overlay::setColor(Function *Fn, unsigned Color) {
- std::string Section = "";
- if (Fn->hasSection())
- Section = Fn->getSection();
-
- size_t Pos = Section.find(OverlayStr);
-
- // Convert Color to string.
- std::stringstream ss;
- ss << Color;
- std::string ColorString = ss.str();
-
- // If color is already set then reset it with the new value. Else append
- // the Color string to section.
- if (Pos != std::string::npos) {
- Pos += OverlayStr.length();
- char c = Section.at(Pos);
- unsigned OldColorLength = 0;
- while (c >= '0' && c<= '9') {
- OldColorLength++;
- if (Pos < Section.length() - 1)
- Pos++;
- else
- break;
- c = Section.at(Pos);
- }
- // Replace old color with new one.
- Section.replace(Pos-OldColorLength +1, OldColorLength, ColorString);
- }
- else {
- // Append Color information to section string.
- if (Fn->hasSection())
- Section.append(" ");
- Section.append(OverlayStr + ColorString);
- }
- Fn->setSection(Section);
-}
-
-unsigned PIC16Overlay::getColor(Function *Fn) {
- int Color = 0;
- if (!Fn->hasSection())
- return 0;
-
- std::string Section = Fn->getSection();
- size_t Pos = Section.find(OverlayStr);
-
- // Return 0 if Color is not set.
- if (Pos == std::string::npos)
- return 0;
-
- // Set Pos to after "Overlay=".
- Pos += OverlayStr.length();
- char c = Section.at(Pos);
- std::string ColorString = "";
-
- // Find the string representing Color. A Color can only consist of digits.
- while (c >= '0' && c<= '9') {
- ColorString.append(1,c);
- if (Pos < Section.length() - 1)
- Pos++;
- else
- break;
- c = Section.at(Pos);
- }
- Color = atoi(ColorString.c_str());
-
- return Color;
-}
-
-bool PIC16Overlay::runOnModule(Module &M) {
- CallGraph &CG = getAnalysis<CallGraph>();
- CallGraphNode *ECN = CG.getExternalCallingNode();
-
- MarkIndirectlyCalledFunctions(M);
- // Since External Calling Node is the base function, do a depth first
- // traversal of CallGraph with ECN as root. Each node with be marked with
- // a color that is max(color(callers)) + 1.
- if(ECN) {
- DFSTraverse(ECN, 0);
- }
- return false;
-}
-
-void PIC16Overlay::MarkIndirectlyCalledFunctions(Module &M) {
- // If the use of a function is not a call instruction then this
- // function might be called indirectly. In that case give it
- // an unique color.
- for (Module::iterator MI = M.begin(), E = M.end(); MI != E; ++MI) {
- for (Value::use_iterator I = MI->use_begin(), E = MI->use_end(); I != E;
- ++I) {
- User *U = *I;
- if ((!isa<CallInst>(U) && !isa<InvokeInst>(U))
- || !CallSite(cast<Instruction>(U)).isCallee(I)) {
- setColor(MI, ++IndirectCallColor);
- break;
- }
- }
- }
-}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Overlay.h b/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Overlay.h
deleted file mode 100644
index 2f611e6..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Overlay.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//===-- PIC16Overlay.h - Interface for PIC16 Frame Overlay -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PIC16 Overlay infrastructure.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16FRAMEOVERLAY_H
-#define PIC16FRAMEOVERLAY_H
-
-
-using std::string;
-using namespace llvm;
-
-namespace llvm {
- // Forward declarations.
- class Function;
- class Module;
- class ModulePass;
- class AnalysisUsage;
- class CallGraphNode;
- class CallGraph;
-
- namespace PIC16OVERLAY {
- enum OverlayConsts {
- StartInterruptColor = 200,
- StartIndirectCallColor = 300
- };
- }
- class PIC16Overlay : public ModulePass {
- std::string OverlayStr;
- unsigned InterruptDepth;
- unsigned IndirectCallColor;
- public:
- static char ID; // Class identification
- PIC16Overlay() : ModulePass(ID) {
- OverlayStr = "Overlay=";
- InterruptDepth = PIC16OVERLAY::StartInterruptColor;
- IndirectCallColor = PIC16OVERLAY::StartIndirectCallColor;
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool runOnModule(Module &M);
-
- private:
- unsigned getColor(Function *Fn);
- void setColor(Function *Fn, unsigned Color);
- unsigned ModifyDepthForInterrupt(CallGraphNode *CGN, unsigned Depth);
- void MarkIndirectlyCalledFunctions(Module &M);
- void DFSTraverse(CallGraphNode *CGN, unsigned Depth);
- };
-} // End of namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.cpp b/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.cpp
deleted file mode 100644
index 76de47f..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-//===- PIC16RegisterInfo.cpp - PIC16 Register Information -----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PIC16 implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "pic16-reg-info"
-
-#include "PIC16.h"
-#include "PIC16RegisterInfo.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/Support/ErrorHandling.h"
-
-using namespace llvm;
-
-PIC16RegisterInfo::PIC16RegisterInfo(const TargetInstrInfo &tii,
- const PIC16Subtarget &st)
- : PIC16GenRegisterInfo(PIC16::ADJCALLSTACKDOWN, PIC16::ADJCALLSTACKUP),
- TII(tii),
- ST(st) {}
-
-#include "PIC16GenRegisterInfo.inc"
-
-/// PIC16 Callee Saved Registers
-const unsigned* PIC16RegisterInfo::
-getCalleeSavedRegs(const MachineFunction *MF) const {
- static const unsigned CalleeSavedRegs[] = { 0 };
- return CalleeSavedRegs;
-}
-
-BitVector PIC16RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
- BitVector Reserved(getNumRegs());
- return Reserved;
-}
-
-bool PIC16RegisterInfo::hasFP(const MachineFunction &MF) const {
- return false;
-}
-
-void PIC16RegisterInfo::
-eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
- RegScavenger *RS) const
-{ /* NOT YET IMPLEMENTED */ }
-
-void PIC16RegisterInfo::emitPrologue(MachineFunction &MF) const
-{ /* NOT YET IMPLEMENTED */ }
-
-void PIC16RegisterInfo::
-emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const
-{ /* NOT YET IMPLEMENTED */ }
-
-int PIC16RegisterInfo::
-getDwarfRegNum(unsigned RegNum, bool isEH) const {
- llvm_unreachable("Not keeping track of debug information yet!!");
- return -1;
-}
-
-unsigned PIC16RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- llvm_unreachable("PIC16 Does not have any frame register");
- return 0;
-}
-
-unsigned PIC16RegisterInfo::getRARegister() const {
- llvm_unreachable("PIC16 Does not have any return address register");
- return 0;
-}
-
-// This function eliminates ADJCALLSTACKDOWN,
-// ADJCALLSTACKUP pseudo instructions
-void PIC16RegisterInfo::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
- // Simply discard ADJCALLSTACKDOWN,
- // ADJCALLSTACKUP instructions.
- MBB.erase(I);
-}
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.h b/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.h
deleted file mode 100644
index 20052b0..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//===- PIC16RegisterInfo.h - PIC16 Register Information Impl ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the PIC16 implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16REGISTERINFO_H
-#define PIC16REGISTERINFO_H
-
-#include "PIC16GenRegisterInfo.h.inc"
-#include "llvm/Target/TargetRegisterInfo.h"
-
-namespace llvm {
-
-// Forward Declarations.
- class PIC16Subtarget;
- class TargetInstrInfo;
-
-class PIC16RegisterInfo : public PIC16GenRegisterInfo {
- private:
- const TargetInstrInfo &TII;
- const PIC16Subtarget &ST;
-
- public:
- PIC16RegisterInfo(const TargetInstrInfo &tii,
- const PIC16Subtarget &st);
-
-
- //------------------------------------------------------
- // Pure virtual functions from TargetRegisterInfo
- //------------------------------------------------------
-
- // PIC16 callee saved registers
- virtual const unsigned*
- getCalleeSavedRegs(const MachineFunction *MF = 0) const;
-
- virtual BitVector getReservedRegs(const MachineFunction &MF) const;
- virtual bool hasFP(const MachineFunction &MF) const;
-
- virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
- int SPAdj, RegScavenger *RS=NULL) const;
-
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
-
- virtual void emitPrologue(MachineFunction &MF) const;
- virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
- virtual int getDwarfRegNum(unsigned RegNum, bool isEH) const;
- virtual unsigned getFrameRegister(const MachineFunction &MF) const;
- virtual unsigned getRARegister() const;
-
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.td b/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.td
deleted file mode 100644
index 2959d91..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.td
+++ /dev/null
@@ -1,33 +0,0 @@
-//===- PIC16RegisterInfo.td - PIC16 Register defs ------------*- tblgen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Declarations that describe the PIC16 register file
-//===----------------------------------------------------------------------===//
-
-class PIC16Reg<string n> : Register<n> {
- let Namespace = "PIC16";
-}
-
-// PIC16 Registers.
-def W : PIC16Reg<"W">;
-def FSR0 : PIC16Reg<"FSR0">;
-def FSR1 : PIC16Reg<"FSR1">;
-def BS : PIC16Reg<"BS">;
-def PCLATH : PIC16Reg<"PCLATH">;
-
-def STATUS : PIC16Reg<"STATUS">;
-
-// PIC16 Register classes.
-def GPR : RegisterClass<"PIC16", [i8], 8, [W]>;
-def FSR16 : RegisterClass<"PIC16", [i16], 8, [FSR0, FSR1]>;
-def BSR : RegisterClass<"PIC16", [i8], 8, [BS]>;
-def PCLATHR : RegisterClass<"PIC16", [i8], 8, [PCLATH]>;
-def STATUSR : RegisterClass<"PIC16", [i8], 8, [STATUS]>;
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Section.cpp b/contrib/llvm/lib/Target/PIC16/PIC16Section.cpp
deleted file mode 100644
index 2505b11..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16Section.cpp
+++ /dev/null
@@ -1,104 +0,0 @@
-//===-- PIC16Section.cpp - PIC16 Section ----------- --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PIC16.h"
-#include "PIC16ABINames.h"
-#include "PIC16Section.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-
-// This is the only way to create a PIC16Section. Sections created here
-// do not need to be explicitly deleted as they are managed by auto_ptrs.
-PIC16Section *PIC16Section::Create(StringRef Name, PIC16SectionType Ty,
- StringRef Address, int Color,
- MCContext &Ctx) {
-
- /// Determine the internal SectionKind info.
- /// Users of PIC16Section class should not need to know the internal
- /// SectionKind. They should work only with PIC16SectionType.
- ///
- /// PIC16 Terminology for section kinds is as below.
- /// UDATA - BSS
- /// IDATA - initialized data (equiv to Metadata)
- /// ROMDATA - ReadOnly.
- /// UDATA_OVR - Sections that can be overlaid. Section of such type is
- /// used to contain function autos an frame. We can think of
- /// it as equiv to llvm ThreadBSS)
- /// UDATA_SHR - Shared RAM. Memory area that is mapped to all banks.
-
- SectionKind K;
- switch (Ty) {
- default: llvm_unreachable ("can not create unknown section type");
- case UDATA_OVR: {
- K = SectionKind::getThreadBSS();
- break;
- }
- case UDATA_SHR:
- case UDATA: {
- K = SectionKind::getBSS();
- break;
- }
- case ROMDATA:
- case IDATA: {
- K = SectionKind::getMetadata();
- break;
- }
- case CODE: {
- K = SectionKind::getText();
- break;
- }
-
- }
-
- // Copy strings into context allocated memory so they get free'd when the
- // context is destroyed.
- char *NameCopy = static_cast<char*>(Ctx.Allocate(Name.size(), 1));
- memcpy(NameCopy, Name.data(), Name.size());
- char *AddressCopy = static_cast<char*>(Ctx.Allocate(Address.size(), 1));
- memcpy(AddressCopy, Address.data(), Address.size());
-
- // Create the Section.
- PIC16Section *S =
- new (Ctx) PIC16Section(StringRef(NameCopy, Name.size()), K,
- StringRef(AddressCopy, Address.size()), Color);
- S->T = Ty;
- return S;
-}
-
-// A generic way to print all types of sections.
-void PIC16Section::PrintSwitchToSection(const MCAsmInfo &MAI,
- raw_ostream &OS) const {
-
- // If the section is overlaid(i.e. it has a color), print overlay name for
- // it. Otherwise print its normal name.
- if (Color != -1)
- OS << PAN::getOverlayName(getName(), Color) << '\t';
- else
- OS << getName() << '\t';
-
- // Print type.
- switch (getType()) {
- default : llvm_unreachable ("unknown section type");
- case UDATA: OS << "UDATA"; break;
- case IDATA: OS << "IDATA"; break;
- case ROMDATA: OS << "ROMDATA"; break;
- case UDATA_SHR: OS << "UDATA_SHR"; break;
- case UDATA_OVR: OS << "UDATA_OVR"; break;
- case CODE: OS << "CODE"; break;
- }
-
- OS << '\t';
-
- // Print Address.
- OS << Address;
-
- OS << '\n';
-}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Section.h b/contrib/llvm/lib/Target/PIC16/PIC16Section.h
deleted file mode 100644
index 5b33b51..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16Section.h
+++ /dev/null
@@ -1,99 +0,0 @@
-//===- PIC16Section.h - PIC16-specific section representation -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the PIC16Section class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_PIC16SECTION_H
-#define LLVM_PIC16SECTION_H
-
-#include "llvm/MC/MCSection.h"
-#include "llvm/GlobalVariable.h"
-#include <vector>
-
-namespace llvm {
- /// PIC16Section - Represents a physical section in PIC16 COFF.
- /// Contains data objects.
- ///
- class PIC16Section : public MCSection {
- /// PIC16 Sections does not really use the SectionKind class to
- /// to distinguish between various types of sections. PIC16 maintain
- /// its own Section Type info. See the PIC16SectionType enum in PIC16.h
- /// for various section types.
- PIC16SectionType T;
-
- /// Name of the section to uniquely identify it.
- StringRef Name;
-
- /// User can specify an address at which a section should be placed.
- /// Negative value here means user hasn't specified any.
- StringRef Address;
-
- /// Overlay information - Sections with same color can be overlaid on
- /// one another.
- int Color;
-
- /// Total size of all data objects contained here.
- unsigned Size;
-
- PIC16Section(StringRef name, SectionKind K, StringRef addr, int color)
- : MCSection(SV_PIC16, K), Name(name), Address(addr),
- Color(color), Size(0) {
- }
-
- public:
- /// Return the name of the section.
- StringRef getName() const { return Name; }
-
- /// Return the Address of the section.
- StringRef getAddress() const { return Address; }
-
- /// Return the Color of the section.
- int getColor() const { return Color; }
- void setColor(int color) { Color = color; }
-
- /// Return the size of the section.
- unsigned getSize() const { return Size; }
- void setSize(unsigned size) { Size = size; }
-
- /// Conatined data objects.
- // FIXME: This vector is leaked because sections are allocated with a
- // BumpPtrAllocator.
- std::vector<const GlobalVariable *>Items;
-
- /// Check section type.
- bool isUDATA_Type() const { return T == UDATA; }
- bool isIDATA_Type() const { return T == IDATA; }
- bool isROMDATA_Type() const { return T == ROMDATA; }
- bool isUDATA_OVR_Type() const { return T == UDATA_OVR; }
- bool isUDATA_SHR_Type() const { return T == UDATA_SHR; }
- bool isCODE_Type() const { return T == CODE; }
-
- PIC16SectionType getType() const { return T; }
-
- /// This would be the only way to create a section.
- static PIC16Section *Create(StringRef Name, PIC16SectionType Ty,
- StringRef Address, int Color,
- MCContext &Ctx);
-
- /// Override this as PIC16 has its own way of printing switching
- /// to a section.
- virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
- raw_ostream &OS) const;
-
- static bool classof(const MCSection *S) {
- return S->getVariant() == SV_PIC16;
- }
- static bool classof(const PIC16Section *) { return true; }
- };
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16SelectionDAGInfo.cpp b/contrib/llvm/lib/Target/PIC16/PIC16SelectionDAGInfo.cpp
deleted file mode 100644
index 995955a..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16SelectionDAGInfo.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//===-- PIC16SelectionDAGInfo.cpp - PIC16 SelectionDAG Info ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PIC16SelectionDAGInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "pic16-selectiondag-info"
-#include "PIC16TargetMachine.h"
-using namespace llvm;
-
-PIC16SelectionDAGInfo::PIC16SelectionDAGInfo(const PIC16TargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
-
-PIC16SelectionDAGInfo::~PIC16SelectionDAGInfo() {
-}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16SelectionDAGInfo.h b/contrib/llvm/lib/Target/PIC16/PIC16SelectionDAGInfo.h
deleted file mode 100644
index c67fd8b..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16SelectionDAGInfo.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===-- PIC16SelectionDAGInfo.h - PIC16 SelectionDAG Info -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the PIC16 subclass for TargetSelectionDAGInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16SELECTIONDAGINFO_H
-#define PIC16SELECTIONDAGINFO_H
-
-#include "llvm/Target/TargetSelectionDAGInfo.h"
-
-namespace llvm {
-
-class PIC16TargetMachine;
-
-class PIC16SelectionDAGInfo : public TargetSelectionDAGInfo {
-public:
- explicit PIC16SelectionDAGInfo(const PIC16TargetMachine &TM);
- ~PIC16SelectionDAGInfo();
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Subtarget.cpp b/contrib/llvm/lib/Target/PIC16/PIC16Subtarget.cpp
deleted file mode 100644
index 33fc3fb..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16Subtarget.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-//===- PIC16Subtarget.cpp - PIC16 Subtarget Information -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PIC16 specific subclass of TargetSubtarget.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PIC16Subtarget.h"
-#include "PIC16GenSubtarget.inc"
-
-using namespace llvm;
-
-PIC16Subtarget::PIC16Subtarget(const std::string &TT, const std::string &FS,
- bool Cooper)
- :IsCooper(Cooper)
-{
- std::string CPU = "generic";
-
- // Parse features string.
- ParseSubtargetFeatures(FS, CPU);
-}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Subtarget.h b/contrib/llvm/lib/Target/PIC16/PIC16Subtarget.h
deleted file mode 100644
index 81e3783..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16Subtarget.h
+++ /dev/null
@@ -1,44 +0,0 @@
-//=====-- PIC16Subtarget.h - Define Subtarget for the PIC16 ---*- C++ -*--====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the PIC16 specific subclass of TargetSubtarget.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef PIC16SUBTARGET_H
-#define PIC16SUBTARGET_H
-
-#include "llvm/Target/TargetSubtarget.h"
-
-#include <string>
-
-namespace llvm {
-
-class PIC16Subtarget : public TargetSubtarget {
-
- // IsCooper - Target ISA is Cooper.
- bool IsCooper;
-
-public:
- /// This constructor initializes the data members to match that
- /// of the specified triple.
- ///
- PIC16Subtarget(const std::string &TT, const std::string &FS, bool Cooper);
-
- /// isCooper - Returns true if the target ISA is Cooper.
- bool isCooper() const { return IsCooper; }
-
- /// ParseSubtargetFeatures - Parses features string setting specified
- /// subtarget options. Definition of function is auto generated by tblgen.
- std::string ParseSubtargetFeatures(const std::string &FS,
- const std::string &CPU);
-};
-} // End llvm namespace
-
-#endif // PIC16SUBTARGET_H
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16TargetMachine.cpp b/contrib/llvm/lib/Target/PIC16/PIC16TargetMachine.cpp
deleted file mode 100644
index 82b69be..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16TargetMachine.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-//===-- PIC16TargetMachine.cpp - Define TargetMachine for PIC16 -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Top-level implementation for the PIC16 target.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PIC16.h"
-#include "PIC16MCAsmInfo.h"
-#include "PIC16TargetMachine.h"
-#include "llvm/PassManager.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/Target/TargetRegistry.h"
-
-using namespace llvm;
-
-extern "C" void LLVMInitializePIC16Target() {
- // Register the target. Curretnly the codegen works for
- // enhanced pic16 mid-range.
- RegisterTargetMachine<PIC16TargetMachine> X(ThePIC16Target);
- RegisterAsmInfo<PIC16MCAsmInfo> A(ThePIC16Target);
-}
-
-
-// PIC16TargetMachine - Enhanced PIC16 mid-range Machine. May also represent
-// a Traditional Machine if 'Trad' is true.
-PIC16TargetMachine::PIC16TargetMachine(const Target &T, const std::string &TT,
- const std::string &FS, bool Trad)
-: LLVMTargetMachine(T, TT),
- Subtarget(TT, FS, Trad),
- DataLayout("e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8-n8"),
- InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0) { }
-
-
-bool PIC16TargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- // Install an instruction selector.
- PM.add(createPIC16ISelDag(*this));
- return false;
-}
-
-bool PIC16TargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createPIC16MemSelOptimizerPass());
- return true; // -print-machineinstr should print after this.
-}
-
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16TargetMachine.h b/contrib/llvm/lib/Target/PIC16/PIC16TargetMachine.h
deleted file mode 100644
index dae5d31..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16TargetMachine.h
+++ /dev/null
@@ -1,70 +0,0 @@
-//===-- PIC16TargetMachine.h - Define TargetMachine for PIC16 ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the PIC16 specific subclass of TargetMachine.
-//
-//===----------------------------------------------------------------------===//
-
-
-#ifndef PIC16_TARGETMACHINE_H
-#define PIC16_TARGETMACHINE_H
-
-#include "PIC16InstrInfo.h"
-#include "PIC16ISelLowering.h"
-#include "PIC16SelectionDAGInfo.h"
-#include "PIC16RegisterInfo.h"
-#include "PIC16Subtarget.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-
-/// PIC16TargetMachine
-///
-class PIC16TargetMachine : public LLVMTargetMachine {
- PIC16Subtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
- PIC16InstrInfo InstrInfo;
- PIC16TargetLowering TLInfo;
- PIC16SelectionDAGInfo TSInfo;
-
- // PIC16 does not have any call stack frame, therefore not having
- // any PIC16 specific FrameInfo class.
- TargetFrameInfo FrameInfo;
-
-public:
- PIC16TargetMachine(const Target &T, const std::string &TT,
- const std::string &FS, bool Cooper = false);
-
- virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
- virtual const PIC16InstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetData *getTargetData() const { return &DataLayout;}
- virtual const PIC16Subtarget *getSubtargetImpl() const { return &Subtarget; }
-
- virtual const PIC16RegisterInfo *getRegisterInfo() const {
- return &(InstrInfo.getRegisterInfo());
- }
-
- virtual const PIC16TargetLowering *getTargetLowering() const {
- return &TLInfo;
- }
-
- virtual const PIC16SelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
- }
-
- virtual bool addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
-}; // PIC16TargetMachine.
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16TargetObjectFile.cpp b/contrib/llvm/lib/Target/PIC16/PIC16TargetObjectFile.cpp
deleted file mode 100644
index ff0f971..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16TargetObjectFile.cpp
+++ /dev/null
@@ -1,384 +0,0 @@
-//===-- PIC16TargetObjectFile.cpp - PIC16 object files --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PIC16TargetObjectFile.h"
-#include "PIC16TargetMachine.h"
-#include "PIC16Section.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/MC/MCSection.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-
-PIC16TargetObjectFile::PIC16TargetObjectFile() {
-}
-
-PIC16TargetObjectFile::~PIC16TargetObjectFile() {
-}
-
-/// Find a pic16 section. Return null if not found. Do not create one.
-PIC16Section *PIC16TargetObjectFile::
-findPIC16Section(const std::string &Name) const {
- /// Return if we have an already existing one.
- PIC16Section *Entry = SectionsByName[Name];
- if (Entry)
- return Entry;
-
- return NULL;
-}
-
-
-/// Find a pic16 section. If not found, create one.
-PIC16Section *PIC16TargetObjectFile::
-getPIC16Section(const std::string &Name, PIC16SectionType Ty,
- const std::string &Address, int Color) const {
-
- /// Return if we have an already existing one.
- PIC16Section *&Entry = SectionsByName[Name];
- if (Entry)
- return Entry;
-
-
- Entry = PIC16Section::Create(Name, Ty, Address, Color, getContext());
- return Entry;
-}
-
-/// Find a standard pic16 data section. If not found, create one and keep
-/// track of it by adding it to appropriate std section list.
-PIC16Section *PIC16TargetObjectFile::
-getPIC16DataSection(const std::string &Name, PIC16SectionType Ty,
- const std::string &Address, int Color) const {
-
- /// Return if we have an already existing one.
- PIC16Section *&Entry = SectionsByName[Name];
- if (Entry)
- return Entry;
-
-
- /// Else create a new one and add it to appropriate section list.
- Entry = PIC16Section::Create(Name, Ty, Address, Color, getContext());
-
- switch (Ty) {
- default: llvm_unreachable ("unknow standard section type.");
- case UDATA: UDATASections_.push_back(Entry); break;
- case IDATA: IDATASections_.push_back(Entry); break;
- case ROMDATA: ROMDATASection_ = Entry; break;
- case UDATA_SHR: SHAREDUDATASection_ = Entry; break;
- }
-
- return Entry;
-}
-
-
-/// Find a standard pic16 autos section. If not found, create one and keep
-/// track of it by adding it to appropriate std section list.
-PIC16Section *PIC16TargetObjectFile::
-getPIC16AutoSection(const std::string &Name, PIC16SectionType Ty,
- const std::string &Address, int Color) const {
-
- /// Return if we have an already existing one.
- PIC16Section *&Entry = SectionsByName[Name];
- if (Entry)
- return Entry;
-
-
- /// Else create a new one and add it to appropriate section list.
- Entry = PIC16Section::Create(Name, Ty, Address, Color, getContext());
-
- assert (Ty == UDATA_OVR && "incorrect section type for autos");
- AUTOSections_.push_back(Entry);
-
- return Entry;
-}
-
-/// Find a pic16 user section. If not found, create one and keep
-/// track of it by adding it to appropriate std section list.
-PIC16Section *PIC16TargetObjectFile::
-getPIC16UserSection(const std::string &Name, PIC16SectionType Ty,
- const std::string &Address, int Color) const {
-
- /// Return if we have an already existing one.
- PIC16Section *&Entry = SectionsByName[Name];
- if (Entry)
- return Entry;
-
-
- /// Else create a new one and add it to appropriate section list.
- Entry = PIC16Section::Create(Name, Ty, Address, Color, getContext());
-
- USERSections_.push_back(Entry);
-
- return Entry;
-}
-
-/// Do some standard initialization.
-void PIC16TargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &tm){
- TargetLoweringObjectFile::Initialize(Ctx, tm);
- TM = &tm;
-
- ROMDATASection_ = NULL;
- SHAREDUDATASection_ = NULL;
-}
-
-/// allocateUDATA - Allocate a un-initialized global to an existing or new UDATA
-/// section and return that section.
-const MCSection *
-PIC16TargetObjectFile::allocateUDATA(const GlobalVariable *GV) const {
- assert(GV->hasInitializer() && "This global doesn't need space");
- const Constant *C = GV->getInitializer();
- assert(C->isNullValue() && "Unitialized globals has non-zero initializer");
-
- // Find how much space this global needs.
- const TargetData *TD = TM->getTargetData();
- const Type *Ty = C->getType();
- unsigned ValSize = TD->getTypeAllocSize(Ty);
-
- // Go through all UDATA Sections and assign this variable
- // to the first available section having enough space.
- PIC16Section *Found = NULL;
- for (unsigned i = 0; i < UDATASections_.size(); i++) {
- if (DataBankSize - UDATASections_[i]->getSize() >= ValSize) {
- Found = UDATASections_[i];
- break;
- }
- }
-
- // No UDATA section spacious enough was found. Crate a new one.
- if (!Found) {
- std::string name = PAN::getUdataSectionName(UDATASections_.size());
- Found = getPIC16DataSection(name.c_str(), UDATA);
- }
-
- // Insert the GV into this UDATA section.
- Found->Items.push_back(GV);
- Found->setSize(Found->getSize() + ValSize);
- return Found;
-}
-
-/// allocateIDATA - allocate an initialized global into an existing
-/// or new section and return that section.
-const MCSection *
-PIC16TargetObjectFile::allocateIDATA(const GlobalVariable *GV) const{
- assert(GV->hasInitializer() && "This global doesn't need space");
- const Constant *C = GV->getInitializer();
- assert(!C->isNullValue() && "initialized globals has zero initializer");
- assert(GV->getType()->getAddressSpace() == PIC16ISD::RAM_SPACE &&
- "can allocate initialized RAM data only");
-
- // Find how much space this global needs.
- const TargetData *TD = TM->getTargetData();
- const Type *Ty = C->getType();
- unsigned ValSize = TD->getTypeAllocSize(Ty);
-
- // Go through all IDATA Sections and assign this variable
- // to the first available section having enough space.
- PIC16Section *Found = NULL;
- for (unsigned i = 0; i < IDATASections_.size(); i++) {
- if (DataBankSize - IDATASections_[i]->getSize() >= ValSize) {
- Found = IDATASections_[i];
- break;
- }
- }
-
- // No IDATA section spacious enough was found. Crate a new one.
- if (!Found) {
- std::string name = PAN::getIdataSectionName(IDATASections_.size());
- Found = getPIC16DataSection(name.c_str(), IDATA);
- }
-
- // Insert the GV into this IDATA.
- Found->Items.push_back(GV);
- Found->setSize(Found->getSize() + ValSize);
- return Found;
-}
-
-// Allocate a program memory variable into ROMDATA section.
-const MCSection *
-PIC16TargetObjectFile::allocateROMDATA(const GlobalVariable *GV) const {
-
- std::string name = PAN::getRomdataSectionName();
- PIC16Section *S = getPIC16DataSection(name.c_str(), ROMDATA);
-
- S->Items.push_back(GV);
- return S;
-}
-
-// Get the section for an automatic variable of a function.
-// For PIC16 they are globals only with mangled names.
-const MCSection *
-PIC16TargetObjectFile::allocateAUTO(const GlobalVariable *GV) const {
-
- const std::string name = PAN::getSectionNameForSym(GV->getName());
- PIC16Section *S = getPIC16AutoSection(name.c_str());
-
- S->Items.push_back(GV);
- return S;
-}
-
-
-// Override default implementation to put the true globals into
-// multiple data sections if required.
-const MCSection *
-PIC16TargetObjectFile::SelectSectionForGlobal(const GlobalValue *GV1,
- SectionKind Kind,
- Mangler *Mang,
- const TargetMachine &TM) const {
- // We select the section based on the initializer here, so it really
- // has to be a GlobalVariable.
- const GlobalVariable *GV = dyn_cast<GlobalVariable>(GV1);
- if (!GV)
- return TargetLoweringObjectFile::SelectSectionForGlobal(GV1, Kind, Mang,TM);
-
- assert(GV->hasInitializer() && "A def without initializer?");
-
- // First, if this is an automatic variable for a function, get the section
- // name for it and return.
- std::string name = GV->getName();
- if (PAN::isLocalName(name))
- return allocateAUTO(GV);
-
- // See if this is an uninitialized global.
- const Constant *C = GV->getInitializer();
- if (C->isNullValue())
- return allocateUDATA(GV);
-
- // If this is initialized data in RAM. Put it in the correct IDATA section.
- if (GV->getType()->getAddressSpace() == PIC16ISD::RAM_SPACE)
- return allocateIDATA(GV);
-
- // This is initialized data in rom, put it in the readonly section.
- if (GV->getType()->getAddressSpace() == PIC16ISD::ROM_SPACE)
- return allocateROMDATA(GV);
-
- // Else let the default implementation take care of it.
- return TargetLoweringObjectFile::SelectSectionForGlobal(GV, Kind, Mang,TM);
-}
-
-
-
-
-/// getExplicitSectionGlobal - Allow the target to completely override
-/// section assignment of a global.
-const MCSection *PIC16TargetObjectFile::
-getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const {
- assert(GV->hasSection());
-
- if (const GlobalVariable *GVar = cast<GlobalVariable>(GV)) {
- std::string SectName = GVar->getSection();
- // If address for a variable is specified, get the address and create
- // section.
- // FIXME: move this attribute checking in PAN.
- std::string AddrStr = "Address=";
- if (SectName.compare(0, AddrStr.length(), AddrStr) == 0) {
- std::string SectAddr = SectName.substr(AddrStr.length());
- if (SectAddr.compare("NEAR") == 0)
- return allocateSHARED(GVar, Mang);
- else
- return allocateAtGivenAddress(GVar, SectAddr);
- }
-
- // Create the section specified with section attribute.
- return allocateInGivenSection(GVar);
- }
-
- return getPIC16DataSection(GV->getSection().c_str(), UDATA);
-}
-
-const MCSection *
-PIC16TargetObjectFile::allocateSHARED(const GlobalVariable *GV,
- Mangler *Mang) const {
- // Make sure that this is an uninitialized global.
- assert(GV->hasInitializer() && "This global doesn't need space");
- if (!GV->getInitializer()->isNullValue()) {
- // FIXME: Generate a warning in this case that near qualifier will be
- // ignored.
- return SelectSectionForGlobal(GV, SectionKind::getDataRel(), Mang, *TM);
- }
- std::string Name = PAN::getSharedUDataSectionName();
-
- PIC16Section *SharedUDataSect = getPIC16DataSection(Name.c_str(), UDATA_SHR);
- // Insert the GV into shared section.
- SharedUDataSect->Items.push_back(GV);
- return SharedUDataSect;
-}
-
-
-// Interface used by AsmPrinter to get a code section for a function.
-const PIC16Section *
-PIC16TargetObjectFile::SectionForCode(const std::string &FnName,
- bool isISR) const {
- const std::string &sec_name = PAN::getCodeSectionName(FnName);
- // If it is ISR, its code section starts at a specific address.
- if (isISR)
- return getPIC16Section(sec_name, CODE, PAN::getISRAddr());
- return getPIC16Section(sec_name, CODE);
-}
-
-// Interface used by AsmPrinter to get a frame section for a function.
-const PIC16Section *
-PIC16TargetObjectFile::SectionForFrame(const std::string &FnName) const {
- const std::string &sec_name = PAN::getFrameSectionName(FnName);
- return getPIC16Section(sec_name, UDATA_OVR);
-}
-
-// Allocate a global var in existing or new section of given name.
-const MCSection *
-PIC16TargetObjectFile::allocateInGivenSection(const GlobalVariable *GV) const {
- // Determine the type of section that we need to create.
- PIC16SectionType SecTy;
-
- // See if this is an uninitialized global.
- const Constant *C = GV->getInitializer();
- if (C->isNullValue())
- SecTy = UDATA;
- // If this is initialized data in RAM. Put it in the correct IDATA section.
- else if (GV->getType()->getAddressSpace() == PIC16ISD::RAM_SPACE)
- SecTy = IDATA;
- // This is initialized data in rom, put it in the readonly section.
- else if (GV->getType()->getAddressSpace() == PIC16ISD::ROM_SPACE)
- SecTy = ROMDATA;
- else
- llvm_unreachable ("Could not determine section type for global");
-
- PIC16Section *S = getPIC16UserSection(GV->getSection().c_str(), SecTy);
- S->Items.push_back(GV);
- return S;
-}
-
-// Allocate a global var in a new absolute sections at given address.
-const MCSection *
-PIC16TargetObjectFile::allocateAtGivenAddress(const GlobalVariable *GV,
- const std::string &Addr) const {
- // Determine the type of section that we need to create.
- PIC16SectionType SecTy;
-
- // See if this is an uninitialized global.
- const Constant *C = GV->getInitializer();
- if (C->isNullValue())
- SecTy = UDATA;
- // If this is initialized data in RAM. Put it in the correct IDATA section.
- else if (GV->getType()->getAddressSpace() == PIC16ISD::RAM_SPACE)
- SecTy = IDATA;
- // This is initialized data in rom, put it in the readonly section.
- else if (GV->getType()->getAddressSpace() == PIC16ISD::ROM_SPACE)
- SecTy = ROMDATA;
- else
- llvm_unreachable ("Could not determine section type for global");
-
- std::string Prefix = GV->getNameStr() + "." + Addr + ".";
- std::string SName = PAN::getUserSectionName(Prefix);
- PIC16Section *S = getPIC16UserSection(SName.c_str(), SecTy, Addr.c_str());
- S->Items.push_back(GV);
- return S;
-}
-
-
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16TargetObjectFile.h b/contrib/llvm/lib/Target/PIC16/PIC16TargetObjectFile.h
deleted file mode 100644
index b1eb9f9..0000000
--- a/contrib/llvm/lib/Target/PIC16/PIC16TargetObjectFile.h
+++ /dev/null
@@ -1,168 +0,0 @@
-//===-- PIC16TargetObjectFile.h - PIC16 Object Info -------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_PIC16_TARGETOBJECTFILE_H
-#define LLVM_TARGET_PIC16_TARGETOBJECTFILE_H
-
-#include "PIC16.h"
-#include "PIC16ABINames.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/ADT/StringMap.h"
-#include <vector>
-#include <string>
-
-namespace llvm {
- class GlobalVariable;
- class Module;
- class PIC16TargetMachine;
- class PIC16Section;
-
- enum { DataBankSize = 80 };
-
- /// PIC16 Splits the global data into mulitple udata and idata sections.
- /// Each udata and idata section needs to contain a list of globals that
- /// they contain, in order to avoid scanning over all the global values
- /// again and printing only those that match the current section.
- /// Keeping values inside the sections make printing a section much easier.
- ///
- /// FIXME: MOVE ALL THIS STUFF TO PIC16Section.
- ///
-
- /// PIC16TargetObjectFile - PIC16 Object file. Contains data and code
- /// sections.
- // PIC16 Object File has two types of sections.
- // 1. Standard Sections
- // 1.1 un-initialized global data
- // 1.2 initialized global data
- // 1.3 program memory data
- // 1.4 local variables of functions.
- // 2. User defined sections
- // 2.1 Objects placed in a specific section. (By _Section() macro)
- // 2.2 Objects placed at a specific address. (By _Address() macro)
- class PIC16TargetObjectFile : public TargetLoweringObjectFile {
- /// SectionsByName - Bindings of names to allocated sections.
- mutable StringMap<PIC16Section*> SectionsByName;
-
- const TargetMachine *TM;
-
- /// Lists of sections.
- /// Standard Data Sections.
- mutable std::vector<PIC16Section *> UDATASections_;
- mutable std::vector<PIC16Section *> IDATASections_;
- mutable PIC16Section * ROMDATASection_;
- mutable PIC16Section * SHAREDUDATASection_;
-
- /// Standard Auto Sections.
- mutable std::vector<PIC16Section *> AUTOSections_;
-
- /// User specified sections.
- mutable std::vector<PIC16Section *> USERSections_;
-
-
- /// Find or Create a PIC16 Section, without adding it to any
- /// section list.
- PIC16Section *getPIC16Section(const std::string &Name,
- PIC16SectionType Ty,
- const std::string &Address = "",
- int Color = -1) const;
-
- /// Convenience functions. These wrappers also take care of adding
- /// the newly created section to the appropriate sections list.
-
- /// Find or Create PIC16 Standard Data Section.
- PIC16Section *getPIC16DataSection(const std::string &Name,
- PIC16SectionType Ty,
- const std::string &Address = "",
- int Color = -1) const;
-
- /// Find or Create PIC16 Standard Auto Section.
- PIC16Section *getPIC16AutoSection(const std::string &Name,
- PIC16SectionType Ty = UDATA_OVR,
- const std::string &Address = "",
- int Color = -1) const;
-
- /// Find or Create PIC16 Standard Auto Section.
- PIC16Section *getPIC16UserSection(const std::string &Name,
- PIC16SectionType Ty,
- const std::string &Address = "",
- int Color = -1) const;
-
- /// Allocate Un-initialized data to a standard UDATA section.
- const MCSection *allocateUDATA(const GlobalVariable *GV) const;
-
- /// Allocate Initialized data to a standard IDATA section.
- const MCSection *allocateIDATA(const GlobalVariable *GV) const;
-
- /// Allocate ROM data to the standard ROMDATA section.
- const MCSection *allocateROMDATA(const GlobalVariable *GV) const;
-
- /// Allocate an AUTO variable to an AUTO section.
- const MCSection *allocateAUTO(const GlobalVariable *GV) const;
-
- /// Allocate DATA in user specified section.
- const MCSection *allocateInGivenSection(const GlobalVariable *GV) const;
-
- /// Allocate DATA at user specified address.
- const MCSection *allocateAtGivenAddress(const GlobalVariable *GV,
- const std::string &Addr) const;
-
- /// Allocate a shared variable to SHARED section.
- const MCSection *allocateSHARED(const GlobalVariable *GV,
- Mangler *Mang) const;
-
- public:
- PIC16TargetObjectFile();
- ~PIC16TargetObjectFile();
- void Initialize(MCContext &Ctx, const TargetMachine &TM);
-
- /// Return the section with the given Name. Null if not found.
- PIC16Section *findPIC16Section(const std::string &Name) const;
-
- /// Override section allocations for user specified sections.
- virtual const MCSection *
- getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler *Mang, const TargetMachine &TM) const;
-
- /// Select sections for Data and Auto variables(globals).
- virtual const MCSection *SelectSectionForGlobal(const GlobalValue *GV,
- SectionKind Kind,
- Mangler *Mang,
- const TargetMachine&) const;
-
-
- /// Return a code section for a function.
- const PIC16Section *SectionForCode (const std::string &FnName,
- bool isISR) const;
-
- /// Return a frame section for a function.
- const PIC16Section *SectionForFrame (const std::string &FnName) const;
-
- /// Accessors for various section lists.
- const std::vector<PIC16Section *> &UDATASections() const {
- return UDATASections_;
- }
- const std::vector<PIC16Section *> &IDATASections() const {
- return IDATASections_;
- }
- const PIC16Section *ROMDATASection() const {
- return ROMDATASection_;
- }
- const PIC16Section *SHAREDUDATASection() const {
- return SHAREDUDATASection_;
- }
- const std::vector<PIC16Section *> &AUTOSections() const {
- return AUTOSections_;
- }
- const std::vector<PIC16Section *> &USERSections() const {
- return USERSections_;
- }
- };
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/PIC16/TargetInfo/PIC16TargetInfo.cpp b/contrib/llvm/lib/Target/PIC16/TargetInfo/PIC16TargetInfo.cpp
deleted file mode 100644
index f1bdb12..0000000
--- a/contrib/llvm/lib/Target/PIC16/TargetInfo/PIC16TargetInfo.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-//===-- PIC16TargetInfo.cpp - PIC16 Target Implementation -----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "PIC16.h"
-#include "llvm/Module.h"
-#include "llvm/Target/TargetRegistry.h"
-using namespace llvm;
-
-Target llvm::ThePIC16Target, llvm::TheCooperTarget;
-
-extern "C" void LLVMInitializePIC16TargetInfo() {
- RegisterTarget<Triple::pic16> X(ThePIC16Target, "pic16",
- "PIC16 14-bit [experimental]");
-
- RegisterTarget<> Y(TheCooperTarget, "cooper", "PIC16 Cooper [experimental]");
-}
diff --git a/contrib/llvm/lib/Target/PTX/CMakeLists.txt b/contrib/llvm/lib/Target/PTX/CMakeLists.txt
new file mode 100644
index 0000000..331266d
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/CMakeLists.txt
@@ -0,0 +1,26 @@
+set(LLVM_TARGET_DEFINITIONS PTX.td)
+
+tablegen(PTXGenAsmWriter.inc -gen-asm-writer)
+tablegen(PTXGenDAGISel.inc -gen-dag-isel)
+tablegen(PTXGenInstrInfo.inc -gen-instr-desc)
+tablegen(PTXGenInstrNames.inc -gen-instr-enums)
+tablegen(PTXGenRegisterInfo.inc -gen-register-desc)
+tablegen(PTXGenRegisterInfo.h.inc -gen-register-desc-header)
+tablegen(PTXGenRegisterNames.inc -gen-register-enums)
+tablegen(PTXGenSubtarget.inc -gen-subtarget)
+
+add_llvm_target(PTXCodeGen
+ PTXAsmPrinter.cpp
+ PTXISelDAGToDAG.cpp
+ PTXISelLowering.cpp
+ PTXInstrInfo.cpp
+ PTXFrameLowering.cpp
+ PTXMCAsmInfo.cpp
+ PTXMCAsmStreamer.cpp
+ PTXMFInfoExtract.cpp
+ PTXRegisterInfo.cpp
+ PTXSubtarget.cpp
+ PTXTargetMachine.cpp
+ )
+
+add_subdirectory(TargetInfo)
diff --git a/contrib/llvm/lib/Target/PTX/Makefile b/contrib/llvm/lib/Target/PTX/Makefile
new file mode 100644
index 0000000..2c40d69
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/Makefile
@@ -0,0 +1,26 @@
+##===- lib/Target/PTX/Makefile -----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../..
+LIBRARYNAME = LLVMPTXCodeGen
+TARGET = PTX
+
+# Make sure that tblgen is run, first thing.
+BUILT_SOURCES = PTXGenAsmWriter.inc \
+ PTXGenDAGISel.inc \
+ PTXGenInstrInfo.inc \
+ PTXGenInstrNames.inc \
+ PTXGenRegisterInfo.inc \
+ PTXGenRegisterInfo.h.inc \
+ PTXGenRegisterNames.inc \
+ PTXGenSubtarget.inc
+
+DIRS = TargetInfo
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/PTX/PTX.h b/contrib/llvm/lib/Target/PTX/PTX.h
new file mode 100644
index 0000000..19385ba
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTX.h
@@ -0,0 +1,49 @@
+//===-- PTX.h - Top-level interface for PTX representation ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in the LLVM
+// PTX back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTX_H
+#define PTX_H
+
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+ class PTXTargetMachine;
+ class FunctionPass;
+
+ namespace PTX {
+ enum StateSpace {
+ GLOBAL = 0, // default to global state space
+ CONSTANT = 1,
+ LOCAL = 2,
+ PARAMETER = 3,
+ SHARED = 4
+ };
+ } // namespace PTX
+
+ FunctionPass *createPTXISelDag(PTXTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
+
+ FunctionPass *createPTXMFInfoExtract(PTXTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
+
+ extern Target ThePTXTarget;
+} // namespace llvm;
+
+// Defines symbolic names for PTX registers.
+#include "PTXGenRegisterNames.inc"
+
+// Defines symbolic names for the PTX instructions.
+#include "PTXGenInstrNames.inc"
+
+#endif // PTX_H
diff --git a/contrib/llvm/lib/Target/PTX/PTX.td b/contrib/llvm/lib/Target/PTX/PTX.td
new file mode 100644
index 0000000..8b1a1b1
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTX.td
@@ -0,0 +1,54 @@
+//===- PTX.td - Describe the PTX Target Machine ---------------*- tblgen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This is the top level entry point for the PTX target.
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// Subtarget Features.
+//===----------------------------------------------------------------------===//
+
+def FeatureSM20 : SubtargetFeature<"sm20", "is_sm20", "true",
+ "Enable sm_20 target architecture">;
+
+//===----------------------------------------------------------------------===//
+// PTX supported processors.
+//===----------------------------------------------------------------------===//
+
+class Proc<string Name, list<SubtargetFeature> Features>
+ : Processor<Name, NoItineraries, Features>;
+
+def : Proc<"generic", []>;
+
+//===----------------------------------------------------------------------===//
+// Register File Description
+//===----------------------------------------------------------------------===//
+
+include "PTXRegisterInfo.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction Descriptions
+//===----------------------------------------------------------------------===//
+
+include "PTXInstrInfo.td"
+
+def PTXInstrInfo : InstrInfo;
+
+//===----------------------------------------------------------------------===//
+// Target Declaration
+//===----------------------------------------------------------------------===//
+
+def PTX : Target {
+ let InstructionSet = PTXInstrInfo;
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp b/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp
new file mode 100644
index 0000000..a605997
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp
@@ -0,0 +1,347 @@
+//===-- PTXAsmPrinter.cpp - PTX LLVM assembly writer ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to PTX assembly language.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ptx-asm-printer"
+
+#include "PTX.h"
+#include "PTXMachineFunctionInfo.h"
+#include "PTXTargetMachine.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+static cl::opt<std::string>
+OptPTXVersion("ptx-version", cl::desc("Set PTX version"), cl::init("1.4"));
+
+static cl::opt<std::string>
+OptPTXTarget("ptx-target", cl::desc("Set GPU target (comma-separated list)"),
+ cl::init("sm_10"));
+
+namespace {
+class PTXAsmPrinter : public AsmPrinter {
+public:
+ explicit PTXAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer) {}
+
+ const char *getPassName() const { return "PTX Assembly Printer"; }
+
+ bool doFinalization(Module &M);
+
+ virtual void EmitStartOfAsmFile(Module &M);
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ virtual void EmitFunctionBodyStart();
+ virtual void EmitFunctionBodyEnd() { OutStreamer.EmitRawText(Twine("}")); }
+
+ virtual void EmitInstruction(const MachineInstr *MI);
+
+ void printOperand(const MachineInstr *MI, int opNum, raw_ostream &OS);
+ void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &OS,
+ const char *Modifier = 0);
+ void printParamOperand(const MachineInstr *MI, int opNum, raw_ostream &OS,
+ const char *Modifier = 0);
+
+ // autogen'd.
+ void printInstruction(const MachineInstr *MI, raw_ostream &OS);
+ static const char *getRegisterName(unsigned RegNo);
+
+private:
+ void EmitVariableDeclaration(const GlobalVariable *gv);
+ void EmitFunctionDeclaration();
+}; // class PTXAsmPrinter
+} // namespace
+
+static const char PARAM_PREFIX[] = "__param_";
+
+static const char *getRegisterTypeName(unsigned RegNo) {
+#define TEST_REGCLS(cls, clsstr) \
+ if (PTX::cls ## RegisterClass->contains(RegNo)) return # clsstr;
+ TEST_REGCLS(RRegs32, s32);
+ TEST_REGCLS(Preds, pred);
+#undef TEST_REGCLS
+
+ llvm_unreachable("Not in any register class!");
+ return NULL;
+}
+
+static const char *getInstructionTypeName(const MachineInstr *MI) {
+ for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.getType() == MachineOperand::MO_Register)
+ return getRegisterTypeName(MO.getReg());
+ }
+
+ llvm_unreachable("No reg operand found in instruction!");
+ return NULL;
+}
+
+static const char *getStateSpaceName(unsigned addressSpace) {
+ switch (addressSpace) {
+ default: llvm_unreachable("Unknown state space");
+ case PTX::GLOBAL: return "global";
+ case PTX::CONSTANT: return "const";
+ case PTX::LOCAL: return "local";
+ case PTX::PARAMETER: return "param";
+ case PTX::SHARED: return "shared";
+ }
+ return NULL;
+}
+
+bool PTXAsmPrinter::doFinalization(Module &M) {
+ // XXX Temproarily remove global variables so that doFinalization() will not
+ // emit them again (global variables are emitted at beginning).
+
+ Module::GlobalListType &global_list = M.getGlobalList();
+ int i, n = global_list.size();
+ GlobalVariable **gv_array = new GlobalVariable* [n];
+
+ // first, back-up GlobalVariable in gv_array
+ i = 0;
+ for (Module::global_iterator I = global_list.begin(), E = global_list.end();
+ I != E; ++I)
+ gv_array[i++] = &*I;
+
+ // second, empty global_list
+ while (!global_list.empty())
+ global_list.remove(global_list.begin());
+
+ // call doFinalization
+ bool ret = AsmPrinter::doFinalization(M);
+
+ // now we restore global variables
+ for (i = 0; i < n; i ++)
+ global_list.insert(global_list.end(), gv_array[i]);
+
+ delete[] gv_array;
+ return ret;
+}
+
+void PTXAsmPrinter::EmitStartOfAsmFile(Module &M)
+{
+ OutStreamer.EmitRawText(Twine("\t.version " + OptPTXVersion));
+ OutStreamer.EmitRawText(Twine("\t.target " + OptPTXTarget));
+ OutStreamer.AddBlankLine();
+
+ // declare global variables
+ for (Module::const_global_iterator i = M.global_begin(), e = M.global_end();
+ i != e; ++i)
+ EmitVariableDeclaration(i);
+}
+
+bool PTXAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+ SetupMachineFunction(MF);
+ EmitFunctionDeclaration();
+ EmitFunctionBody();
+ return false;
+}
+
+void PTXAsmPrinter::EmitFunctionBodyStart() {
+ OutStreamer.EmitRawText(Twine("{"));
+
+ const PTXMachineFunctionInfo *MFI = MF->getInfo<PTXMachineFunctionInfo>();
+
+ // Print local variable definition
+ for (PTXMachineFunctionInfo::reg_iterator
+ i = MFI->localVarRegBegin(), e = MFI->localVarRegEnd(); i != e; ++ i) {
+ unsigned reg = *i;
+
+ std::string def = "\t.reg .";
+ def += getRegisterTypeName(reg);
+ def += ' ';
+ def += getRegisterName(reg);
+ def += ';';
+ OutStreamer.EmitRawText(Twine(def));
+ }
+}
+
+void PTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ std::string str;
+ str.reserve(64);
+
+ // Write instruction to str
+ raw_string_ostream OS(str);
+ printInstruction(MI, OS);
+ OS << ';';
+ OS.flush();
+
+ // Replace "%type" if found
+ size_t pos;
+ if ((pos = str.find("%type")) != std::string::npos)
+ str.replace(pos, /*strlen("%type")==*/5, getInstructionTypeName(MI));
+
+ StringRef strref = StringRef(str);
+ OutStreamer.EmitRawText(strref);
+}
+
+void PTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
+ raw_ostream &OS) {
+ const MachineOperand &MO = MI->getOperand(opNum);
+
+ switch (MO.getType()) {
+ default:
+ llvm_unreachable("<unknown operand type>");
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ OS << *Mang->getSymbol(MO.getGlobal());
+ break;
+ case MachineOperand::MO_Immediate:
+ OS << (int) MO.getImm();
+ break;
+ case MachineOperand::MO_Register:
+ OS << getRegisterName(MO.getReg());
+ break;
+ }
+}
+
+void PTXAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
+ raw_ostream &OS, const char *Modifier) {
+ printOperand(MI, opNum, OS);
+
+ if (MI->getOperand(opNum+1).isImm() && MI->getOperand(opNum+1).getImm() == 0)
+ return; // don't print "+0"
+
+ OS << "+";
+ printOperand(MI, opNum+1, OS);
+}
+
+void PTXAsmPrinter::printParamOperand(const MachineInstr *MI, int opNum,
+ raw_ostream &OS, const char *Modifier) {
+ OS << PARAM_PREFIX << (int) MI->getOperand(opNum).getImm() + 1;
+}
+
+void PTXAsmPrinter::EmitVariableDeclaration(const GlobalVariable *gv) {
+ // Check to see if this is a special global used by LLVM, if so, emit it.
+ if (EmitSpecialLLVMGlobal(gv))
+ return;
+
+ MCSymbol *gvsym = Mang->getSymbol(gv);
+
+ assert(gvsym->isUndefined() && "Cannot define a symbol twice!");
+
+ std::string decl;
+
+ // check if it is defined in some other translation unit
+ if (gv->isDeclaration())
+ decl += ".extern ";
+
+ // state space: e.g., .global
+ decl += ".";
+ decl += getStateSpaceName(gv->getType()->getAddressSpace());
+ decl += " ";
+
+ // alignment (optional)
+ unsigned alignment = gv->getAlignment();
+ if (alignment != 0) {
+ decl += ".align ";
+ decl += utostr(Log2_32(gv->getAlignment()));
+ decl += " ";
+ }
+
+ // TODO: add types
+ decl += ".s32 ";
+
+ decl += gvsym->getName();
+
+ if (ArrayType::classof(gv->getType()) || PointerType::classof(gv->getType()))
+ decl += "[]";
+
+ decl += ";";
+
+ OutStreamer.EmitRawText(Twine(decl));
+
+ OutStreamer.AddBlankLine();
+}
+
+void PTXAsmPrinter::EmitFunctionDeclaration() {
+ // The function label could have already been emitted if two symbols end up
+ // conflicting due to asm renaming. Detect this and emit an error.
+ if (!CurrentFnSym->isUndefined()) {
+ report_fatal_error("'" + Twine(CurrentFnSym->getName()) +
+ "' label emitted multiple times to assembly file");
+ return;
+ }
+
+ const PTXMachineFunctionInfo *MFI = MF->getInfo<PTXMachineFunctionInfo>();
+ const bool isKernel = MFI->isKernel();
+ unsigned reg;
+
+ std::string decl = isKernel ? ".entry" : ".func";
+
+ // Print return register
+ reg = MFI->retReg();
+ if (!isKernel && reg != PTX::NoRegister) {
+ decl += " (.reg ."; // FIXME: could it return in .param space?
+ decl += getRegisterTypeName(reg);
+ decl += " ";
+ decl += getRegisterName(reg);
+ decl += ")";
+ }
+
+ // Print function name
+ decl += " ";
+ decl += CurrentFnSym->getName().str();
+
+ // Print parameter list
+ if (!MFI->argRegEmpty()) {
+ decl += " (";
+ if (isKernel) {
+ for (int i = 0, e = MFI->getNumArg(); i != e; ++i) {
+ if (i != 0)
+ decl += ", ";
+ decl += ".param .s32 "; // TODO: add types
+ decl += PARAM_PREFIX;
+ decl += utostr(i + 1);
+ }
+ } else {
+ for (PTXMachineFunctionInfo::reg_iterator
+ i = MFI->argRegBegin(), e = MFI->argRegEnd(), b = i; i != e; ++i) {
+ reg = *i;
+ assert(reg != PTX::NoRegister && "Not a valid register!");
+ if (i != b)
+ decl += ", ";
+ decl += ".reg .";
+ decl += getRegisterTypeName(reg);
+ decl += " ";
+ decl += getRegisterName(reg);
+ }
+ }
+ decl += ")";
+ }
+
+ OutStreamer.EmitRawText(Twine(decl));
+}
+
+#include "PTXGenAsmWriter.inc"
+
+// Force static initialization.
+extern "C" void LLVMInitializePTXAsmPrinter() {
+ RegisterAsmPrinter<PTXAsmPrinter> X(ThePTXTarget);
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp
new file mode 100644
index 0000000..b621b9d
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp
@@ -0,0 +1,24 @@
+//=======- PTXFrameLowering.cpp - PTX Frame Information -------*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PTX implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTXFrameLowering.h"
+#include "llvm/CodeGen/MachineFunction.h"
+
+using namespace llvm;
+
+void PTXFrameLowering::emitPrologue(MachineFunction &MF) const {
+}
+
+void PTXFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h
new file mode 100644
index 0000000..574ae7a
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h
@@ -0,0 +1,43 @@
+//===--- PTXFrameLowering.h - Define frame lowering for PTX --*- C++ -*----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTX_FRAMEINFO_H
+#define PTX_FRAMEINFO_H
+
+#include "PTX.h"
+#include "PTXSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class PTXSubtarget;
+
+class PTXFrameLowering : public TargetFrameLowering {
+protected:
+ const PTXSubtarget &STI;
+
+public:
+ explicit PTXFrameLowering(const PTXSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 2, -2), STI(sti) {
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool hasFP(const MachineFunction &MF) const { return false; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/PTX/PTXISelDAGToDAG.cpp b/contrib/llvm/lib/Target/PTX/PTXISelDAGToDAG.cpp
new file mode 100644
index 0000000..efb0e8b
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXISelDAGToDAG.cpp
@@ -0,0 +1,151 @@
+//===-- PTXISelDAGToDAG.cpp - A dag to dag inst selector for PTX ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the PTX target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTX.h"
+#include "PTXTargetMachine.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/DerivedTypes.h"
+
+using namespace llvm;
+
+namespace {
+// PTXDAGToDAGISel - PTX specific code to select PTX machine
+// instructions for SelectionDAG operations.
+class PTXDAGToDAGISel : public SelectionDAGISel {
+ public:
+ PTXDAGToDAGISel(PTXTargetMachine &TM, CodeGenOpt::Level OptLevel);
+
+ virtual const char *getPassName() const {
+ return "PTX DAG->DAG Pattern Instruction Selection";
+ }
+
+ SDNode *Select(SDNode *Node);
+
+ // Complex Pattern Selectors.
+ bool SelectADDRrr(SDValue &Addr, SDValue &R1, SDValue &R2);
+ bool SelectADDRri(SDValue &Addr, SDValue &Base, SDValue &Offset);
+ bool SelectADDRii(SDValue &Addr, SDValue &Base, SDValue &Offset);
+
+ // Include the pieces auto'gened from the target description
+#include "PTXGenDAGISel.inc"
+
+ private:
+ SDNode *SelectREAD_PARAM(SDNode *Node);
+
+ bool isImm(const SDValue &operand);
+ bool SelectImm(const SDValue &operand, SDValue &imm);
+}; // class PTXDAGToDAGISel
+} // namespace
+
+// createPTXISelDag - This pass converts a legalized DAG into a
+// PTX-specific DAG, ready for instruction scheduling
+FunctionPass *llvm::createPTXISelDag(PTXTargetMachine &TM,
+ CodeGenOpt::Level OptLevel) {
+ return new PTXDAGToDAGISel(TM, OptLevel);
+}
+
+PTXDAGToDAGISel::PTXDAGToDAGISel(PTXTargetMachine &TM,
+ CodeGenOpt::Level OptLevel)
+ : SelectionDAGISel(TM, OptLevel) {}
+
+SDNode *PTXDAGToDAGISel::Select(SDNode *Node) {
+ if (Node->getOpcode() == PTXISD::READ_PARAM)
+ return SelectREAD_PARAM(Node);
+ else
+ return SelectCode(Node);
+}
+
+SDNode *PTXDAGToDAGISel::SelectREAD_PARAM(SDNode *Node) {
+ SDValue index = Node->getOperand(1);
+ DebugLoc dl = Node->getDebugLoc();
+
+ if (index.getOpcode() != ISD::TargetConstant)
+ llvm_unreachable("READ_PARAM: index is not ISD::TargetConstant");
+
+ return PTXInstrInfo::
+ GetPTXMachineNode(CurDAG, PTX::LDpi, dl, MVT::i32, index);
+}
+
+// Match memory operand of the form [reg+reg]
+bool PTXDAGToDAGISel::SelectADDRrr(SDValue &Addr, SDValue &R1, SDValue &R2) {
+ if (Addr.getOpcode() != ISD::ADD || Addr.getNumOperands() < 2 ||
+ isImm(Addr.getOperand(0)) || isImm(Addr.getOperand(1)))
+ return false;
+
+ R1 = Addr;
+ R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+}
+
+// Match memory operand of the form [reg], [imm+reg], and [reg+imm]
+bool PTXDAGToDAGISel::SelectADDRri(SDValue &Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() != ISD::ADD) {
+ // let SelectADDRii handle the [imm] case
+ if (isImm(Addr))
+ return false;
+ // it is [reg]
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+ }
+
+ if (Addr.getNumOperands() < 2)
+ return false;
+
+ // let SelectADDRii handle the [imm+imm] case
+ if (isImm(Addr.getOperand(0)) && isImm(Addr.getOperand(1)))
+ return false;
+
+ // try [reg+imm] and [imm+reg]
+ for (int i = 0; i < 2; i ++)
+ if (SelectImm(Addr.getOperand(1-i), Offset)) {
+ Base = Addr.getOperand(i);
+ return true;
+ }
+
+ // neither [reg+imm] nor [imm+reg]
+ return false;
+}
+
+// Match memory operand of the form [imm+imm] and [imm]
+bool PTXDAGToDAGISel::SelectADDRii(SDValue &Addr, SDValue &Base,
+ SDValue &Offset) {
+ // is [imm+imm]?
+ if (Addr.getOpcode() == ISD::ADD) {
+ return SelectImm(Addr.getOperand(0), Base) &&
+ SelectImm(Addr.getOperand(1), Offset);
+ }
+
+ // is [imm]?
+ if (SelectImm(Addr, Base)) {
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+ }
+
+ return false;
+}
+
+bool PTXDAGToDAGISel::isImm(const SDValue &operand) {
+ return ConstantSDNode::classof(operand.getNode());
+}
+
+bool PTXDAGToDAGISel::SelectImm(const SDValue &operand, SDValue &imm) {
+ SDNode *node = operand.getNode();
+ if (!ConstantSDNode::classof(node))
+ return false;
+
+ ConstantSDNode *CN = cast<ConstantSDNode>(node);
+ imm = CurDAG->getTargetConstant(*CN->getConstantIntValue(), MVT::i32);
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp b/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp
new file mode 100644
index 0000000..e6d4490
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp
@@ -0,0 +1,210 @@
+//===-- PTXISelLowering.cpp - PTX DAG Lowering Implementation -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PTXTargetLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTX.h"
+#include "PTXISelLowering.h"
+#include "PTXMachineFunctionInfo.h"
+#include "PTXRegisterInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+
+using namespace llvm;
+
+PTXTargetLowering::PTXTargetLowering(TargetMachine &TM)
+ : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
+ // Set up the register classes.
+ addRegisterClass(MVT::i1, PTX::PredsRegisterClass);
+ addRegisterClass(MVT::i32, PTX::RRegs32RegisterClass);
+
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
+
+ // Customize translation of memory addresses
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+
+ // Compute derived properties from the register classes
+ computeRegisterProperties();
+}
+
+SDValue PTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ default: llvm_unreachable("Unimplemented operand");
+ case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
+ }
+}
+
+const char *PTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (Opcode) {
+ default:
+ llvm_unreachable("Unknown opcode");
+ case PTXISD::READ_PARAM:
+ return "PTXISD::READ_PARAM";
+ case PTXISD::EXIT:
+ return "PTXISD::EXIT";
+ case PTXISD::RET:
+ return "PTXISD::RET";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Custom Lower Operation
+//===----------------------------------------------------------------------===//
+
+SDValue PTXTargetLowering::
+LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
+ EVT PtrVT = getPointerTy();
+ DebugLoc dl = Op.getDebugLoc();
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ return DAG.getTargetGlobalAddress(GV, dl, PtrVT);
+}
+
+//===----------------------------------------------------------------------===//
+// Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct argmap_entry {
+ MVT::SimpleValueType VT;
+ TargetRegisterClass *RC;
+ TargetRegisterClass::iterator loc;
+
+ argmap_entry(MVT::SimpleValueType _VT, TargetRegisterClass *_RC)
+ : VT(_VT), RC(_RC), loc(_RC->begin()) {}
+
+ void reset() { loc = RC->begin(); }
+ bool operator==(MVT::SimpleValueType _VT) const { return VT == _VT; }
+} argmap[] = {
+ argmap_entry(MVT::i1, PTX::PredsRegisterClass),
+ argmap_entry(MVT::i32, PTX::RRegs32RegisterClass)
+};
+} // end anonymous namespace
+
+SDValue PTXTargetLowering::
+ LowerFormalArguments(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+ if (isVarArg) llvm_unreachable("PTX does not support varargs");
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>();
+
+ switch (CallConv) {
+ default:
+ llvm_unreachable("Unsupported calling convention");
+ break;
+ case CallingConv::PTX_Kernel:
+ MFI->setKernel(true);
+ break;
+ case CallingConv::PTX_Device:
+ MFI->setKernel(false);
+ break;
+ }
+
+ // Make sure we don't add argument registers twice
+ if (MFI->isDoneAddArg())
+ llvm_unreachable("cannot add argument registers twice");
+
+ // Reset argmap before allocation
+ for (struct argmap_entry *i = argmap, *e = argmap + array_lengthof(argmap);
+ i != e; ++ i)
+ i->reset();
+
+ for (int i = 0, e = Ins.size(); i != e; ++ i) {
+ MVT::SimpleValueType VT = Ins[i].VT.SimpleTy;
+
+ struct argmap_entry *entry = std::find(argmap,
+ argmap + array_lengthof(argmap), VT);
+ if (entry == argmap + array_lengthof(argmap))
+ llvm_unreachable("Type of argument is not supported");
+
+ if (MFI->isKernel() && entry->RC == PTX::PredsRegisterClass)
+ llvm_unreachable("cannot pass preds to kernel");
+
+ MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
+
+ unsigned preg = *++(entry->loc); // allocate start from register 1
+ unsigned vreg = RegInfo.createVirtualRegister(entry->RC);
+ RegInfo.addLiveIn(preg, vreg);
+
+ MFI->addArgReg(preg);
+
+ SDValue inval;
+ if (MFI->isKernel())
+ inval = DAG.getNode(PTXISD::READ_PARAM, dl, VT, Chain,
+ DAG.getTargetConstant(i, MVT::i32));
+ else
+ inval = DAG.getCopyFromReg(Chain, dl, vreg, VT);
+ InVals.push_back(inval);
+ }
+
+ MFI->doneAddArg();
+
+ return Chain;
+}
+
+SDValue PTXTargetLowering::
+ LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl,
+ SelectionDAG &DAG) const {
+ if (isVarArg) llvm_unreachable("PTX does not support varargs");
+
+ switch (CallConv) {
+ default:
+ llvm_unreachable("Unsupported calling convention.");
+ case CallingConv::PTX_Kernel:
+ assert(Outs.size() == 0 && "Kernel must return void.");
+ return DAG.getNode(PTXISD::EXIT, dl, MVT::Other, Chain);
+ case CallingConv::PTX_Device:
+ assert(Outs.size() <= 1 && "Can at most return one value.");
+ break;
+ }
+
+ // PTX_Device
+
+ // return void
+ if (Outs.size() == 0)
+ return DAG.getNode(PTXISD::RET, dl, MVT::Other, Chain);
+
+ assert(Outs[0].VT == MVT::i32 && "Can return only basic types");
+
+ SDValue Flag;
+ unsigned reg = PTX::R0;
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>();
+ MFI->setRetReg(reg);
+
+ // If this is the first return lowered for this function, add the regs to the
+ // liveout set for the function
+ if (DAG.getMachineFunction().getRegInfo().liveout_empty())
+ DAG.getMachineFunction().getRegInfo().addLiveOut(reg);
+
+ // Copy the result values into the output registers
+ Chain = DAG.getCopyToReg(Chain, dl, reg, OutVals[0], Flag);
+
+ // Guarantee that all emitted copies are stuck together,
+ // avoiding something bad
+ Flag = Chain.getValue(1);
+
+ return DAG.getNode(PTXISD::RET, dl, MVT::Other, Chain, Flag);
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXISelLowering.h b/contrib/llvm/lib/Target/PTX/PTXISelLowering.h
new file mode 100644
index 0000000..b03a9f6
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXISelLowering.h
@@ -0,0 +1,67 @@
+//==-- PTXISelLowering.h - PTX DAG Lowering Interface ------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that PTX uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTX_ISEL_LOWERING_H
+#define PTX_ISEL_LOWERING_H
+
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+class PTXSubtarget;
+class PTXTargetMachine;
+
+namespace PTXISD {
+ enum NodeType {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+ READ_PARAM,
+ EXIT,
+ RET
+ };
+} // namespace PTXISD
+
+class PTXTargetLowering : public TargetLowering {
+ public:
+ explicit PTXTargetLowering(TargetMachine &TM);
+
+ virtual const char *getTargetNodeName(unsigned Opcode) const;
+
+ virtual unsigned getFunctionAlignment(const Function *F) const {
+ return 2; }
+
+ virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+
+ virtual SDValue
+ LowerFormalArguments(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl,
+ SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ virtual SDValue
+ LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl,
+ SelectionDAG &DAG) const;
+
+ private:
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+}; // class PTXTargetLowering
+} // namespace llvm
+
+#endif // PTX_ISEL_LOWERING_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td b/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td
new file mode 100644
index 0000000..e4e0999
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td
@@ -0,0 +1,24 @@
+//===- PTXInstrFormats.td - PTX Instruction Formats ----------*- tblgen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// PTX Predicate operand, default to (0, 0) = (zero-reg, always).
+// Leave PrintMethod empty; predicate printing is defined elsewhere.
+def pred : PredicateOperand<OtherVT, (ops Preds, i32imm),
+ (ops (i1 zero_reg), (i32 0))>;
+
+let Namespace = "PTX" in {
+ class InstPTX<dag oops, dag iops, string asmstr, list<dag> pattern>
+ : Instruction {
+ dag OutOperandList = oops;
+ dag InOperandList = !con(iops, (ins pred:$_p));
+ let AsmString = asmstr; // Predicate printing is defined elsewhere.
+ let Pattern = pattern;
+ let isPredicable = 1;
+ }
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp
new file mode 100644
index 0000000..805759b
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp
@@ -0,0 +1,87 @@
+//===- PTXInstrInfo.cpp - PTX Instruction Information ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PTX implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTX.h"
+#include "PTXInstrInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+
+using namespace llvm;
+
+#include "PTXGenInstrInfo.inc"
+
+PTXInstrInfo::PTXInstrInfo(PTXTargetMachine &_TM)
+ : TargetInstrInfoImpl(PTXInsts, array_lengthof(PTXInsts)),
+ RI(_TM, *this), TM(_TM) {}
+
+static const struct map_entry {
+ const TargetRegisterClass *cls;
+ const int opcode;
+} map[] = {
+ { &PTX::RRegs32RegClass, PTX::MOVrr },
+ { &PTX::PredsRegClass, PTX::MOVpp }
+};
+
+void PTXInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DstReg, unsigned SrcReg,
+ bool KillSrc) const {
+ for (int i = 0, e = sizeof(map)/sizeof(map[0]); i != e; ++ i)
+ if (PTX::RRegs32RegClass.contains(DstReg, SrcReg)) {
+ BuildMI(MBB, I, DL,
+ get(PTX::MOVrr), DstReg).addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+
+ llvm_unreachable("Impossible reg-to-reg copy");
+}
+
+bool PTXInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg,
+ const TargetRegisterClass *DstRC,
+ const TargetRegisterClass *SrcRC,
+ DebugLoc DL) const {
+ if (DstRC != SrcRC)
+ return false;
+
+ for (int i = 0, e = sizeof(map)/sizeof(map[0]); i != e; ++ i)
+ if (DstRC == map[i].cls) {
+ MachineInstr *MI = BuildMI(MBB, I, DL, get(map[i].opcode),
+ DstReg).addReg(SrcReg);
+ if (MI->findFirstPredOperandIdx() == -1) {
+ MI->addOperand(MachineOperand::CreateReg(0, false));
+ MI->addOperand(MachineOperand::CreateImm(/*IsInv=*/0));
+ }
+ return true;
+ }
+
+ return false;
+}
+
+bool PTXInstrInfo::isMoveInstr(const MachineInstr& MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SrcSubIdx, unsigned &DstSubIdx) const {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case PTX::MOVpp:
+ case PTX::MOVrr:
+ assert(MI.getNumOperands() >= 2 &&
+ MI.getOperand(0).isReg() && MI.getOperand(1).isReg() &&
+ "Invalid register-register move instruction");
+ SrcSubIdx = DstSubIdx = 0; // No sub-registers
+ DstReg = MI.getOperand(0).getReg();
+ SrcReg = MI.getOperand(1).getReg();
+ return true;
+ }
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h
new file mode 100644
index 0000000..e7f00f0
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h
@@ -0,0 +1,75 @@
+//===- PTXInstrInfo.h - PTX Instruction Information -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PTX implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTX_INSTR_INFO_H
+#define PTX_INSTR_INFO_H
+
+#include "PTXRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+namespace llvm {
+class PTXTargetMachine;
+
+class PTXInstrInfo : public TargetInstrInfoImpl {
+ private:
+ const PTXRegisterInfo RI;
+ PTXTargetMachine &TM;
+
+ public:
+ explicit PTXInstrInfo(PTXTargetMachine &_TM);
+
+ virtual const PTXRegisterInfo &getRegisterInfo() const { return RI; }
+
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DstReg, unsigned SrcReg,
+ bool KillSrc) const;
+
+ virtual bool copyRegToReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg,
+ const TargetRegisterClass *DstRC,
+ const TargetRegisterClass *SrcRC,
+ DebugLoc DL) const;
+
+ virtual bool isMoveInstr(const MachineInstr& MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
+
+ // static helper routines
+
+ static MachineSDNode *GetPTXMachineNode(SelectionDAG *DAG, unsigned Opcode,
+ DebugLoc dl, EVT VT,
+ SDValue Op1) {
+ SDValue pred_reg = DAG->getRegister(0, MVT::i1);
+ SDValue pred_imm = DAG->getTargetConstant(0, MVT::i32);
+ SDValue ops[] = { Op1, pred_reg, pred_imm };
+ return DAG->getMachineNode(Opcode, dl, VT, ops, array_lengthof(ops));
+ }
+
+ static MachineSDNode *GetPTXMachineNode(SelectionDAG *DAG, unsigned Opcode,
+ DebugLoc dl, EVT VT,
+ SDValue Op1,
+ SDValue Op2) {
+ SDValue pred_reg = DAG->getRegister(0, MVT::i1);
+ SDValue pred_imm = DAG->getTargetConstant(0, MVT::i32);
+ SDValue ops[] = { Op1, Op2, pred_reg, pred_imm };
+ return DAG->getMachineNode(Opcode, dl, VT, ops, array_lengthof(ops));
+ }
+
+ }; // class PTXInstrInfo
+} // namespace llvm
+
+#endif // PTX_INSTR_INFO_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td
new file mode 100644
index 0000000..9a74778
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td
@@ -0,0 +1,257 @@
+//===- PTXInstrInfo.td - PTX Instruction defs -----------------*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the PTX instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction format superclass
+//===----------------------------------------------------------------------===//
+
+include "PTXInstrFormats.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction Pattern Stuff
+//===----------------------------------------------------------------------===//
+
+def load_global : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ const Value *Src;
+ const PointerType *PT;
+ if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
+ (PT = dyn_cast<PointerType>(Src->getType())))
+ return PT->getAddressSpace() == PTX::GLOBAL;
+ return false;
+}]>;
+
+def load_constant : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ const Value *Src;
+ const PointerType *PT;
+ if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
+ (PT = dyn_cast<PointerType>(Src->getType())))
+ return PT->getAddressSpace() == PTX::CONSTANT;
+ return false;
+}]>;
+
+def load_local : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ const Value *Src;
+ const PointerType *PT;
+ if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
+ (PT = dyn_cast<PointerType>(Src->getType())))
+ return PT->getAddressSpace() == PTX::LOCAL;
+ return false;
+}]>;
+
+def load_parameter : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ const Value *Src;
+ const PointerType *PT;
+ if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
+ (PT = dyn_cast<PointerType>(Src->getType())))
+ return PT->getAddressSpace() == PTX::PARAMETER;
+ return false;
+}]>;
+
+def load_shared : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ const Value *Src;
+ const PointerType *PT;
+ if ((Src = cast<LoadSDNode>(N)->getSrcValue()) &&
+ (PT = dyn_cast<PointerType>(Src->getType())))
+ return PT->getAddressSpace() == PTX::SHARED;
+ return false;
+}]>;
+
+def store_global
+ : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
+ const Value *Src;
+ const PointerType *PT;
+ if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
+ (PT = dyn_cast<PointerType>(Src->getType())))
+ return PT->getAddressSpace() == PTX::GLOBAL;
+ return false;
+}]>;
+
+def store_local
+ : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
+ const Value *Src;
+ const PointerType *PT;
+ if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
+ (PT = dyn_cast<PointerType>(Src->getType())))
+ return PT->getAddressSpace() == PTX::LOCAL;
+ return false;
+}]>;
+
+def store_parameter
+ : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
+ const Value *Src;
+ const PointerType *PT;
+ if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
+ (PT = dyn_cast<PointerType>(Src->getType())))
+ return PT->getAddressSpace() == PTX::PARAMETER;
+ return false;
+}]>;
+
+def store_shared
+ : PatFrag<(ops node:$d, node:$ptr), (store node:$d, node:$ptr), [{
+ const Value *Src;
+ const PointerType *PT;
+ if ((Src = cast<StoreSDNode>(N)->getSrcValue()) &&
+ (PT = dyn_cast<PointerType>(Src->getType())))
+ return PT->getAddressSpace() == PTX::SHARED;
+ return false;
+}]>;
+
+// Addressing modes.
+def ADDRrr : ComplexPattern<i32, 2, "SelectADDRrr", [], []>;
+def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [], []>;
+def ADDRii : ComplexPattern<i32, 2, "SelectADDRii", [], []>;
+
+// Address operands
+def MEMri : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops RRegs32, i32imm);
+}
+def MEMii : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops i32imm, i32imm);
+}
+def MEMpi : Operand<i32> {
+ let PrintMethod = "printParamOperand";
+ let MIOperandInfo = (ops i32imm);
+}
+
+//===----------------------------------------------------------------------===//
+// PTX Specific Node Definitions
+//===----------------------------------------------------------------------===//
+
+// PTX allow generic 3-reg shifts like shl r0, r1, r2
+def PTXshl : SDNode<"ISD::SHL", SDTIntBinOp>;
+def PTXsrl : SDNode<"ISD::SRL", SDTIntBinOp>;
+def PTXsra : SDNode<"ISD::SRA", SDTIntBinOp>;
+
+def PTXexit
+ : SDNode<"PTXISD::EXIT", SDTNone, [SDNPHasChain]>;
+def PTXret
+ : SDNode<"PTXISD::RET", SDTNone, [SDNPHasChain]>;
+
+//===----------------------------------------------------------------------===//
+// Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+multiclass INT3<string opcstr, SDNode opnode> {
+ def rr : InstPTX<(outs RRegs32:$d),
+ (ins RRegs32:$a, RRegs32:$b),
+ !strconcat(opcstr, ".%type\t$d, $a, $b"),
+ [(set RRegs32:$d, (opnode RRegs32:$a, RRegs32:$b))]>;
+ def ri : InstPTX<(outs RRegs32:$d),
+ (ins RRegs32:$a, i32imm:$b),
+ !strconcat(opcstr, ".%type\t$d, $a, $b"),
+ [(set RRegs32:$d, (opnode RRegs32:$a, imm:$b))]>;
+}
+
+// no %type directive, non-communtable
+multiclass INT3ntnc<string opcstr, SDNode opnode> {
+ def rr : InstPTX<(outs RRegs32:$d),
+ (ins RRegs32:$a, RRegs32:$b),
+ !strconcat(opcstr, "\t$d, $a, $b"),
+ [(set RRegs32:$d, (opnode RRegs32:$a, RRegs32:$b))]>;
+ def ri : InstPTX<(outs RRegs32:$d),
+ (ins RRegs32:$a, i32imm:$b),
+ !strconcat(opcstr, "\t$d, $a, $b"),
+ [(set RRegs32:$d, (opnode RRegs32:$a, imm:$b))]>;
+ def ir : InstPTX<(outs RRegs32:$d),
+ (ins i32imm:$a, RRegs32:$b),
+ !strconcat(opcstr, "\t$d, $a, $b"),
+ [(set RRegs32:$d, (opnode imm:$a, RRegs32:$b))]>;
+}
+
+multiclass PTX_LD<string opstr, RegisterClass RC, PatFrag pat_load> {
+ def rr : InstPTX<(outs RC:$d),
+ (ins MEMri:$a),
+ !strconcat(opstr, ".%type\t$d, [$a]"),
+ [(set RC:$d, (pat_load ADDRrr:$a))]>;
+ def ri : InstPTX<(outs RC:$d),
+ (ins MEMri:$a),
+ !strconcat(opstr, ".%type\t$d, [$a]"),
+ [(set RC:$d, (pat_load ADDRri:$a))]>;
+ def ii : InstPTX<(outs RC:$d),
+ (ins MEMii:$a),
+ !strconcat(opstr, ".%type\t$d, [$a]"),
+ [(set RC:$d, (pat_load ADDRii:$a))]>;
+}
+
+multiclass PTX_ST<string opstr, RegisterClass RC, PatFrag pat_store> {
+ def rr : InstPTX<(outs),
+ (ins RC:$d, MEMri:$a),
+ !strconcat(opstr, ".%type\t[$a], $d"),
+ [(pat_store RC:$d, ADDRrr:$a)]>;
+ def ri : InstPTX<(outs),
+ (ins RC:$d, MEMri:$a),
+ !strconcat(opstr, ".%type\t[$a], $d"),
+ [(pat_store RC:$d, ADDRri:$a)]>;
+ def ii : InstPTX<(outs),
+ (ins RC:$d, MEMii:$a),
+ !strconcat(opstr, ".%type\t[$a], $d"),
+ [(pat_store RC:$d, ADDRii:$a)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+///===- Integer Arithmetic Instructions -----------------------------------===//
+
+defm ADD : INT3<"add", add>;
+defm SUB : INT3<"sub", sub>;
+
+///===- Logic and Shift Instructions --------------------------------------===//
+
+defm SHL : INT3ntnc<"shl.b32", PTXshl>;
+defm SRL : INT3ntnc<"shr.u32", PTXsrl>;
+defm SRA : INT3ntnc<"shr.s32", PTXsra>;
+
+///===- Data Movement and Conversion Instructions -------------------------===//
+
+let neverHasSideEffects = 1 in {
+ // rely on isMoveInstr to separate MOVpp, MOVrr, etc.
+ def MOVpp
+ : InstPTX<(outs Preds:$d), (ins Preds:$a), "mov.pred\t$d, $a", []>;
+ def MOVrr
+ : InstPTX<(outs RRegs32:$d), (ins RRegs32:$a), "mov.%type\t$d, $a", []>;
+}
+
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
+ def MOVpi
+ : InstPTX<(outs Preds:$d), (ins i1imm:$a), "mov.pred\t$d, $a",
+ [(set Preds:$d, imm:$a)]>;
+ def MOVri
+ : InstPTX<(outs RRegs32:$d), (ins i32imm:$a), "mov.s32\t$d, $a",
+ [(set RRegs32:$d, imm:$a)]>;
+}
+
+defm LDg : PTX_LD<"ld.global", RRegs32, load_global>;
+defm LDc : PTX_LD<"ld.const", RRegs32, load_constant>;
+defm LDl : PTX_LD<"ld.local", RRegs32, load_local>;
+defm LDp : PTX_LD<"ld.param", RRegs32, load_parameter>;
+defm LDs : PTX_LD<"ld.shared", RRegs32, load_shared>;
+
+def LDpi : InstPTX<(outs RRegs32:$d), (ins MEMpi:$a),
+ "ld.param.%type\t$d, [$a]", []>;
+
+defm STg : PTX_ST<"st.global", RRegs32, store_global>;
+defm STl : PTX_ST<"st.local", RRegs32, store_local>;
+// Store to parameter state space requires PTX 2.0 or higher?
+// defm STp : PTX_ST<"st.param", RRegs32, store_parameter>;
+defm STs : PTX_ST<"st.shared", RRegs32, store_shared>;
+
+///===- Control Flow Instructions -----------------------------------------===//
+
+let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
+ def EXIT : InstPTX<(outs), (ins), "exit", [(PTXexit)]>;
+ def RET : InstPTX<(outs), (ins), "ret", [(PTXret)]>;
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXMCAsmInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXMCAsmInfo.cpp
new file mode 100644
index 0000000..b670abd
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXMCAsmInfo.cpp
@@ -0,0 +1,30 @@
+//===-- PTXMCAsmInfo.cpp - PTX asm properties -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the PTXMCAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTXMCAsmInfo.h"
+
+using namespace llvm;
+
+PTXMCAsmInfo::PTXMCAsmInfo(const Target &T, const StringRef &TT) {
+ CommentString = "//";
+
+ PrivateGlobalPrefix = "$L__";
+
+ AllowPeriodsInName = false;
+
+ HasSetDirective = false;
+
+ HasDotTypeDotSizeDirective = false;
+
+ HasSingleParameterDotFile = false;
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXMCAsmInfo.h b/contrib/llvm/lib/Target/PTX/PTXMCAsmInfo.h
new file mode 100644
index 0000000..03f5d66
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXMCAsmInfo.h
@@ -0,0 +1,28 @@
+//=====-- PTXMCAsmInfo.h - PTX asm properties -----------------*- C++ -*--====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the PTXMCAsmInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTX_MCASM_INFO_H
+#define PTX_MCASM_INFO_H
+
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+ class Target;
+ class StringRef;
+
+ struct PTXMCAsmInfo : public MCAsmInfo {
+ explicit PTXMCAsmInfo(const Target &T, const StringRef &TT);
+ };
+} // namespace llvm
+
+#endif // PTX_MCASM_INFO_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp b/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp
new file mode 100644
index 0000000..0886ba8
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp
@@ -0,0 +1,542 @@
+//===- lib/Target/PTX/PTXMCAsmStreamer.cpp - PTX Text Assembly Output -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetAsmInfo.h"
+
+using namespace llvm;
+
+namespace {
+class PTXMCAsmStreamer : public MCStreamer {
+ formatted_raw_ostream &OS;
+ const MCAsmInfo &MAI;
+ OwningPtr<MCInstPrinter> InstPrinter;
+ OwningPtr<MCCodeEmitter> Emitter;
+
+ SmallString<128> CommentToEmit;
+ raw_svector_ostream CommentStream;
+
+ unsigned IsVerboseAsm : 1;
+ unsigned ShowInst : 1;
+
+public:
+ PTXMCAsmStreamer(MCContext &Context,
+ formatted_raw_ostream &os,
+ bool isVerboseAsm, bool useLoc,
+ MCInstPrinter *printer,
+ MCCodeEmitter *emitter,
+ bool showInst)
+ : MCStreamer(Context), OS(os), MAI(Context.getAsmInfo()),
+ InstPrinter(printer), Emitter(emitter), CommentStream(CommentToEmit),
+ IsVerboseAsm(isVerboseAsm),
+ ShowInst(showInst) {
+ if (InstPrinter && IsVerboseAsm)
+ InstPrinter->setCommentStream(CommentStream);
+ }
+
+ ~PTXMCAsmStreamer() {}
+
+ inline void EmitEOL() {
+ // If we don't have any comments, just emit a \n.
+ if (!IsVerboseAsm) {
+ OS << '\n';
+ return;
+ }
+ EmitCommentsAndEOL();
+ }
+ void EmitCommentsAndEOL();
+
+ /// isVerboseAsm - Return true if this streamer supports verbose assembly at
+ /// all.
+ virtual bool isVerboseAsm() const { return IsVerboseAsm; }
+
+ /// hasRawTextSupport - We support EmitRawText.
+ virtual bool hasRawTextSupport() const { return true; }
+
+ /// AddComment - Add a comment that can be emitted to the generated .s
+ /// file if applicable as a QoI issue to make the output of the compiler
+ /// more readable. This only affects the MCAsmStreamer, and only when
+ /// verbose assembly output is enabled.
+ virtual void AddComment(const Twine &T);
+
+ /// AddEncodingComment - Add a comment showing the encoding of an instruction.
+ virtual void AddEncodingComment(const MCInst &Inst);
+
+ /// GetCommentOS - Return a raw_ostream that comments can be written to.
+ /// Unlike AddComment, you are required to terminate comments with \n if you
+ /// use this method.
+ virtual raw_ostream &GetCommentOS() {
+ if (!IsVerboseAsm)
+ return nulls(); // Discard comments unless in verbose asm mode.
+ return CommentStream;
+ }
+
+ /// AddBlankLine - Emit a blank line to a .s file to pretty it up.
+ virtual void AddBlankLine() {
+ EmitEOL();
+ }
+
+ /// @name MCStreamer Interface
+ /// @{
+
+ virtual void ChangeSection(const MCSection *Section);
+ virtual void InitSections() {}
+
+ virtual void EmitLabel(MCSymbol *Symbol);
+
+ virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+
+ virtual void EmitThumbFunc(MCSymbol *Func);
+
+ virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+
+ virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
+
+ virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
+ const MCSymbol *LastLabel,
+ const MCSymbol *Label);
+
+ virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
+
+ virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol);
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass);
+ virtual void EmitCOFFSymbolType(int Type);
+ virtual void EndCOFFSymbolDef();
+ virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment);
+
+ /// EmitLocalCommonSymbol - Emit a local common (.lcomm) symbol.
+ ///
+ /// @param Symbol - The common symbol to emit.
+ /// @param Size - The size of the common symbol.
+ virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size);
+
+ virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
+ unsigned Size = 0, unsigned ByteAlignment = 0);
+
+ virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0);
+
+ virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
+
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ bool isPCRel, unsigned AddrSpace);
+ virtual void EmitULEB128Value(const MCExpr *Value, unsigned AddrSpace = 0);
+ virtual void EmitSLEB128Value(const MCExpr *Value, unsigned AddrSpace = 0);
+ virtual void EmitGPRel32Value(const MCExpr *Value);
+
+
+ virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue,
+ unsigned AddrSpace);
+
+ virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0);
+
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0);
+
+ virtual void EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value = 0);
+
+ virtual void EmitFileDirective(StringRef Filename);
+ virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Filename);
+
+ virtual void EmitInstruction(const MCInst &Inst);
+
+ /// EmitRawText - If this file is backed by an assembly streamer, this dumps
+ /// the specified string in the output .s file. This capability is
+ /// indicated by the hasRawTextSupport() predicate.
+ virtual void EmitRawText(StringRef String);
+
+ virtual void Finish();
+
+ /// @}
+
+}; // class PTXMCAsmStreamer
+
+}
+
+/// TODO: Add appropriate implementation of Emit*() methods when needed
+
+void PTXMCAsmStreamer::AddComment(const Twine &T) {
+ if (!IsVerboseAsm) return;
+
+ // Make sure that CommentStream is flushed.
+ CommentStream.flush();
+
+ T.toVector(CommentToEmit);
+ // Each comment goes on its own line.
+ CommentToEmit.push_back('\n');
+
+ // Tell the comment stream that the vector changed underneath it.
+ CommentStream.resync();
+}
+
+void PTXMCAsmStreamer::EmitCommentsAndEOL() {
+ if (CommentToEmit.empty() && CommentStream.GetNumBytesInBuffer() == 0) {
+ OS << '\n';
+ return;
+ }
+
+ CommentStream.flush();
+ StringRef Comments = CommentToEmit.str();
+
+ assert(Comments.back() == '\n' &&
+ "Comment array not newline terminated");
+ do {
+ // Emit a line of comments.
+ OS.PadToColumn(MAI.getCommentColumn());
+ size_t Position = Comments.find('\n');
+ OS << MAI.getCommentString() << ' ' << Comments.substr(0, Position) << '\n';
+
+ Comments = Comments.substr(Position+1);
+ } while (!Comments.empty());
+
+ CommentToEmit.clear();
+ // Tell the comment stream that the vector changed underneath it.
+ CommentStream.resync();
+}
+
+static inline int64_t truncateToSize(int64_t Value, unsigned Bytes) {
+ assert(Bytes && "Invalid size!");
+ return Value & ((uint64_t) (int64_t) -1 >> (64 - Bytes * 8));
+}
+
+void PTXMCAsmStreamer::ChangeSection(const MCSection *Section) {
+ assert(Section && "Cannot switch to a null section!");
+}
+
+void PTXMCAsmStreamer::EmitLabel(MCSymbol *Symbol) {
+ assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
+ assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
+ assert(getCurrentSection() && "Cannot emit before setting section!");
+
+ OS << *Symbol << MAI.getLabelSuffix();
+ EmitEOL();
+ Symbol->setSection(*getCurrentSection());
+}
+
+void PTXMCAsmStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {}
+
+void PTXMCAsmStreamer::EmitThumbFunc(MCSymbol *Func) {}
+
+void PTXMCAsmStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
+ OS << *Symbol << " = " << *Value;
+ EmitEOL();
+
+ // FIXME: Lift context changes into super class.
+ Symbol->setVariableValue(Value);
+}
+
+void PTXMCAsmStreamer::EmitWeakReference(MCSymbol *Alias,
+ const MCSymbol *Symbol) {
+ OS << ".weakref " << *Alias << ", " << *Symbol;
+ EmitEOL();
+}
+
+void PTXMCAsmStreamer::EmitDwarfAdvanceLineAddr(int64_t LineDelta,
+ const MCSymbol *LastLabel,
+ const MCSymbol *Label) {
+ report_fatal_error("Unimplemented.");
+}
+
+void PTXMCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
+ MCSymbolAttr Attribute) {}
+
+void PTXMCAsmStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {}
+
+void PTXMCAsmStreamer::BeginCOFFSymbolDef(const MCSymbol *Symbol) {}
+
+void PTXMCAsmStreamer::EmitCOFFSymbolStorageClass (int StorageClass) {}
+
+void PTXMCAsmStreamer::EmitCOFFSymbolType (int Type) {}
+
+void PTXMCAsmStreamer::EndCOFFSymbolDef() {}
+
+void PTXMCAsmStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {}
+
+void PTXMCAsmStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {}
+
+void PTXMCAsmStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) {}
+
+void PTXMCAsmStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
+ unsigned Size, unsigned ByteAlignment) {}
+
+void PTXMCAsmStreamer::EmitTBSSSymbol(const MCSection *Section,
+ MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment) {}
+
+static inline char toOctal(int X) { return (X&7)+'0'; }
+
+static void PrintQuotedString(StringRef Data, raw_ostream &OS) {
+ OS << '"';
+
+ for (unsigned i = 0, e = Data.size(); i != e; ++i) {
+ unsigned char C = Data[i];
+ if (C == '"' || C == '\\') {
+ OS << '\\' << (char)C;
+ continue;
+ }
+
+ if (isprint((unsigned char)C)) {
+ OS << (char)C;
+ continue;
+ }
+
+ switch (C) {
+ case '\b': OS << "\\b"; break;
+ case '\f': OS << "\\f"; break;
+ case '\n': OS << "\\n"; break;
+ case '\r': OS << "\\r"; break;
+ case '\t': OS << "\\t"; break;
+ default:
+ OS << '\\';
+ OS << toOctal(C >> 6);
+ OS << toOctal(C >> 3);
+ OS << toOctal(C >> 0);
+ break;
+ }
+ }
+
+ OS << '"';
+}
+
+void PTXMCAsmStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
+ assert(getCurrentSection() && "Cannot emit contents before setting section!");
+ if (Data.empty()) return;
+
+ if (Data.size() == 1) {
+ OS << MAI.getData8bitsDirective(AddrSpace);
+ OS << (unsigned)(unsigned char)Data[0];
+ EmitEOL();
+ return;
+ }
+
+ // If the data ends with 0 and the target supports .asciz, use it, otherwise
+ // use .ascii
+ if (MAI.getAscizDirective() && Data.back() == 0) {
+ OS << MAI.getAscizDirective();
+ Data = Data.substr(0, Data.size()-1);
+ } else {
+ OS << MAI.getAsciiDirective();
+ }
+
+ OS << ' ';
+ PrintQuotedString(Data, OS);
+ EmitEOL();
+}
+
+void PTXMCAsmStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
+ bool isPCRel, unsigned AddrSpace) {
+ assert(getCurrentSection() && "Cannot emit contents before setting section!");
+ assert(!isPCRel && "Cannot emit pc relative relocations!");
+ const char *Directive = 0;
+ switch (Size) {
+ default: break;
+ case 1: Directive = MAI.getData8bitsDirective(AddrSpace); break;
+ case 2: Directive = MAI.getData16bitsDirective(AddrSpace); break;
+ case 4: Directive = MAI.getData32bitsDirective(AddrSpace); break;
+ case 8:
+ Directive = MAI.getData64bitsDirective(AddrSpace);
+ // If the target doesn't support 64-bit data, emit as two 32-bit halves.
+ if (Directive) break;
+ int64_t IntValue;
+ if (!Value->EvaluateAsAbsolute(IntValue))
+ report_fatal_error("Don't know how to emit this value.");
+ if (getContext().getTargetAsmInfo().isLittleEndian()) {
+ EmitIntValue((uint32_t)(IntValue >> 0 ), 4, AddrSpace);
+ EmitIntValue((uint32_t)(IntValue >> 32), 4, AddrSpace);
+ } else {
+ EmitIntValue((uint32_t)(IntValue >> 32), 4, AddrSpace);
+ EmitIntValue((uint32_t)(IntValue >> 0 ), 4, AddrSpace);
+ }
+ return;
+ }
+
+ assert(Directive && "Invalid size for machine code value!");
+ OS << Directive << *Value;
+ EmitEOL();
+}
+
+void PTXMCAsmStreamer::EmitULEB128Value(const MCExpr *Value,
+ unsigned AddrSpace) {
+ assert(MAI.hasLEB128() && "Cannot print a .uleb");
+ OS << ".uleb128 " << *Value;
+ EmitEOL();
+}
+
+void PTXMCAsmStreamer::EmitSLEB128Value(const MCExpr *Value,
+ unsigned AddrSpace) {
+ assert(MAI.hasLEB128() && "Cannot print a .sleb");
+ OS << ".sleb128 " << *Value;
+ EmitEOL();
+}
+
+void PTXMCAsmStreamer::EmitGPRel32Value(const MCExpr *Value) {
+ assert(MAI.getGPRel32Directive() != 0);
+ OS << MAI.getGPRel32Directive() << *Value;
+ EmitEOL();
+}
+
+
+/// EmitFill - Emit NumBytes bytes worth of the value specified by
+/// FillValue. This implements directives such as '.space'.
+void PTXMCAsmStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue,
+ unsigned AddrSpace) {
+ if (NumBytes == 0) return;
+
+ if (AddrSpace == 0)
+ if (const char *ZeroDirective = MAI.getZeroDirective()) {
+ OS << ZeroDirective << NumBytes;
+ if (FillValue != 0)
+ OS << ',' << (int)FillValue;
+ EmitEOL();
+ return;
+ }
+
+ // Emit a byte at a time.
+ MCStreamer::EmitFill(NumBytes, FillValue, AddrSpace);
+}
+
+void PTXMCAsmStreamer::EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
+ unsigned ValueSize,
+ unsigned MaxBytesToEmit) {
+ // Some assemblers don't support non-power of two alignments, so we always
+ // emit alignments as a power of two if possible.
+ if (isPowerOf2_32(ByteAlignment)) {
+ switch (ValueSize) {
+ default: llvm_unreachable("Invalid size for machine code value!");
+ case 1: OS << MAI.getAlignDirective(); break;
+ // FIXME: use MAI for this!
+ case 2: OS << ".p2alignw "; break;
+ case 4: OS << ".p2alignl "; break;
+ case 8: llvm_unreachable("Unsupported alignment size!");
+ }
+
+ if (MAI.getAlignmentIsInBytes())
+ OS << ByteAlignment;
+ else
+ OS << Log2_32(ByteAlignment);
+
+ if (Value || MaxBytesToEmit) {
+ OS << ", 0x";
+ OS.write_hex(truncateToSize(Value, ValueSize));
+
+ if (MaxBytesToEmit)
+ OS << ", " << MaxBytesToEmit;
+ }
+ EmitEOL();
+ return;
+ }
+
+ // Non-power of two alignment. This is not widely supported by assemblers.
+ // FIXME: Parameterize this based on MAI.
+ switch (ValueSize) {
+ default: llvm_unreachable("Invalid size for machine code value!");
+ case 1: OS << ".balign"; break;
+ case 2: OS << ".balignw"; break;
+ case 4: OS << ".balignl"; break;
+ case 8: llvm_unreachable("Unsupported alignment size!");
+ }
+
+ OS << ' ' << ByteAlignment;
+ OS << ", " << truncateToSize(Value, ValueSize);
+ if (MaxBytesToEmit)
+ OS << ", " << MaxBytesToEmit;
+ EmitEOL();
+}
+
+void PTXMCAsmStreamer::EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit) {}
+
+void PTXMCAsmStreamer::EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value) {}
+
+
+void PTXMCAsmStreamer::EmitFileDirective(StringRef Filename) {
+ assert(MAI.hasSingleParameterDotFile());
+ OS << "\t.file\t";
+ PrintQuotedString(Filename, OS);
+ EmitEOL();
+}
+
+// FIXME: should we inherit from MCAsmStreamer?
+bool PTXMCAsmStreamer::EmitDwarfFileDirective(unsigned FileNo,
+ StringRef Filename){
+ OS << "\t.file\t" << FileNo << ' ';
+ PrintQuotedString(Filename, OS);
+ EmitEOL();
+ return this->MCStreamer::EmitDwarfFileDirective(FileNo, Filename);
+}
+
+void PTXMCAsmStreamer::AddEncodingComment(const MCInst &Inst) {}
+
+void PTXMCAsmStreamer::EmitInstruction(const MCInst &Inst) {
+ assert(getCurrentSection() && "Cannot emit contents before setting section!");
+
+ // Show the encoding in a comment if we have a code emitter.
+ if (Emitter)
+ AddEncodingComment(Inst);
+
+ // Show the MCInst if enabled.
+ if (ShowInst) {
+ Inst.dump_pretty(GetCommentOS(), &MAI, InstPrinter.get(), "\n ");
+ GetCommentOS() << "\n";
+ }
+
+ // If we have an AsmPrinter, use that to print, otherwise print the MCInst.
+ if (InstPrinter)
+ InstPrinter->printInst(&Inst, OS);
+ else
+ Inst.print(OS, &MAI);
+ EmitEOL();
+}
+
+/// EmitRawText - If this file is backed by an assembly streamer, this dumps
+/// the specified string in the output .s file. This capability is
+/// indicated by the hasRawTextSupport() predicate.
+void PTXMCAsmStreamer::EmitRawText(StringRef String) {
+ if (!String.empty() && String.back() == '\n')
+ String = String.substr(0, String.size()-1);
+ OS << String;
+ EmitEOL();
+}
+
+void PTXMCAsmStreamer::Finish() {}
+
+namespace llvm {
+ MCStreamer *createPTXAsmStreamer(MCContext &Context,
+ formatted_raw_ostream &OS,
+ bool isVerboseAsm, bool useLoc,
+ MCInstPrinter *IP,
+ MCCodeEmitter *CE, TargetAsmBackend *TAB,
+ bool ShowInst) {
+ return new PTXMCAsmStreamer(Context, OS, isVerboseAsm, useLoc,
+ IP, CE, ShowInst);
+ }
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp b/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp
new file mode 100644
index 0000000..b37c740
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp
@@ -0,0 +1,96 @@
+//===-- PTXMFInfoExtract.cpp - Extract PTX machine function info ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an information extractor for PTX machine functions.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ptx-mf-info-extract"
+
+#include "PTX.h"
+#include "PTXTargetMachine.h"
+#include "PTXMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+// NOTE: PTXMFInfoExtract must after register allocation!
+
+namespace llvm {
+ /// PTXMFInfoExtract - PTX specific code to extract of PTX machine
+ /// function information for PTXAsmPrinter
+ ///
+ class PTXMFInfoExtract : public MachineFunctionPass {
+ private:
+ static char ID;
+
+ public:
+ PTXMFInfoExtract(PTXTargetMachine &TM, CodeGenOpt::Level OptLevel)
+ : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ virtual const char *getPassName() const {
+ return "PTX Machine Function Info Extractor";
+ }
+ }; // class PTXMFInfoExtract
+} // namespace llvm
+
+using namespace llvm;
+
+char PTXMFInfoExtract::ID = 0;
+
+bool PTXMFInfoExtract::runOnMachineFunction(MachineFunction &MF) {
+ PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ DEBUG(dbgs() << "******** PTX FUNCTION LOCAL VAR REG DEF ********\n");
+
+ unsigned retreg = MFI->retReg();
+
+ DEBUG(dbgs()
+ << "PTX::NoRegister == " << PTX::NoRegister << "\n"
+ << "PTX::NUM_TARGET_REGS == " << PTX::NUM_TARGET_REGS << "\n");
+
+ DEBUG(for (unsigned reg = PTX::NoRegister + 1;
+ reg < PTX::NUM_TARGET_REGS; ++reg)
+ if (MRI.isPhysRegUsed(reg))
+ dbgs() << "Used Reg: " << reg << "\n";);
+
+ // FIXME: This is a slow linear scanning
+ for (unsigned reg = PTX::NoRegister + 1; reg < PTX::NUM_TARGET_REGS; ++reg)
+ if (MRI.isPhysRegUsed(reg) &&
+ reg != retreg &&
+ (MFI->isKernel() || !MFI->isArgReg(reg)))
+ MFI->addLocalVarReg(reg);
+
+ // Notify MachineFunctionInfo that I've done adding local var reg
+ MFI->doneAddLocalVar();
+
+ DEBUG(dbgs() << "Return Reg: " << retreg << "\n");
+
+ DEBUG(for (PTXMachineFunctionInfo::reg_iterator
+ i = MFI->argRegBegin(), e = MFI->argRegEnd();
+ i != e; ++i)
+ dbgs() << "Arg Reg: " << *i << "\n";);
+
+ DEBUG(for (PTXMachineFunctionInfo::reg_iterator
+ i = MFI->localVarRegBegin(), e = MFI->localVarRegEnd();
+ i != e; ++i)
+ dbgs() << "Local Var Reg: " << *i << "\n";);
+
+ return false;
+}
+
+FunctionPass *llvm::createPTXMFInfoExtract(PTXTargetMachine &TM,
+ CodeGenOpt::Level OptLevel) {
+ return new PTXMFInfoExtract(TM, OptLevel);
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h b/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h
new file mode 100644
index 0000000..56d044b
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h
@@ -0,0 +1,79 @@
+//===- PTXMachineFuctionInfo.h - PTX machine function info -------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares PTX-specific per-machine-function information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTX_MACHINE_FUNCTION_INFO_H
+#define PTX_MACHINE_FUNCTION_INFO_H
+
+#include "PTX.h"
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+/// PTXMachineFunctionInfo - This class is derived from MachineFunction and
+/// contains private PTX target-specific information for each MachineFunction.
+///
+class PTXMachineFunctionInfo : public MachineFunctionInfo {
+private:
+ bool is_kernel;
+ std::vector<unsigned> reg_arg, reg_local_var;
+ unsigned reg_ret;
+ bool _isDoneAddArg;
+
+public:
+ PTXMachineFunctionInfo(MachineFunction &MF)
+ : is_kernel(false), reg_ret(PTX::NoRegister), _isDoneAddArg(false) {
+ reg_arg.reserve(8);
+ reg_local_var.reserve(32);
+ }
+
+ void setKernel(bool _is_kernel=true) { is_kernel = _is_kernel; }
+
+ void addArgReg(unsigned reg) { reg_arg.push_back(reg); }
+ void addLocalVarReg(unsigned reg) { reg_local_var.push_back(reg); }
+ void setRetReg(unsigned reg) { reg_ret = reg; }
+
+ void doneAddArg(void) {
+ std::sort(reg_arg.begin(), reg_arg.end());
+ _isDoneAddArg = true;
+ }
+ void doneAddLocalVar(void) {
+ std::sort(reg_local_var.begin(), reg_local_var.end());
+ }
+
+ bool isDoneAddArg(void) { return _isDoneAddArg; }
+
+ bool isKernel() const { return is_kernel; }
+
+ typedef std::vector<unsigned>::const_iterator reg_iterator;
+
+ bool argRegEmpty() const { return reg_arg.empty(); }
+ int getNumArg() const { return reg_arg.size(); }
+ reg_iterator argRegBegin() const { return reg_arg.begin(); }
+ reg_iterator argRegEnd() const { return reg_arg.end(); }
+
+ bool localVarRegEmpty() const { return reg_local_var.empty(); }
+ reg_iterator localVarRegBegin() const { return reg_local_var.begin(); }
+ reg_iterator localVarRegEnd() const { return reg_local_var.end(); }
+
+ unsigned retReg() const { return reg_ret; }
+
+ bool isArgReg(unsigned reg) const {
+ return std::binary_search(reg_arg.begin(), reg_arg.end(), reg);
+ }
+
+ bool isLocalVarReg(unsigned reg) const {
+ return std::binary_search(reg_local_var.begin(), reg_local_var.end(), reg);
+ }
+}; // class PTXMachineFunctionInfo
+} // namespace llvm
+
+#endif // PTX_MACHINE_FUNCTION_INFO_H
diff --git a/contrib/llvm/lib/Target/TargetFrameInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp
index 873d60a..0f3e7bc 100644
--- a/contrib/llvm/lib/Target/TargetFrameInfo.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===-- TargetFrameInfo.cpp - Implement machine frame interface -*- C++ -*-===//
+//===- PTXRegisterInfo.cpp - PTX Register Information ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,13 +7,13 @@
//
//===----------------------------------------------------------------------===//
//
-// Implements the layout of a stack frame on the target machine.
+// This file contains the PTX implementation of the TargetRegisterInfo class.
//
//===----------------------------------------------------------------------===//
-#include "llvm/Target/TargetFrameInfo.h"
-#include <cstdlib>
+#include "PTX.h"
+#include "PTXRegisterInfo.h"
+
using namespace llvm;
-TargetFrameInfo::~TargetFrameInfo() {
-}
+#include "PTXGenRegisterInfo.inc"
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h
new file mode 100644
index 0000000..67e130f
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h
@@ -0,0 +1,63 @@
+//===- PTXRegisterInfo.h - PTX Register Information Impl --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PTX implementation of the MRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTX_REGISTER_INFO_H
+#define PTX_REGISTER_INFO_H
+
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/BitVector.h"
+
+#include "PTXGenRegisterInfo.h.inc"
+
+namespace llvm {
+class PTXTargetMachine;
+class MachineFunction;
+
+struct PTXRegisterInfo : public PTXGenRegisterInfo {
+ PTXRegisterInfo(PTXTargetMachine &TM,
+ const TargetInstrInfo &TII) {}
+
+ virtual const unsigned
+ *getCalleeSavedRegs(const MachineFunction *MF = 0) const {
+ static const unsigned CalleeSavedRegs[] = { 0 };
+ return CalleeSavedRegs; // save nothing
+ }
+
+ virtual BitVector getReservedRegs(const MachineFunction &MF) const {
+ BitVector Reserved(getNumRegs());
+ return Reserved; // reserve no regs
+ }
+
+ virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj,
+ RegScavenger *RS = NULL) const {
+ llvm_unreachable("PTX does not support general function call");
+ }
+
+ virtual unsigned getFrameRegister(const MachineFunction &MF) const {
+ llvm_unreachable("PTX does not have a frame register");
+ return 0;
+ }
+
+ virtual unsigned getRARegister() const {
+ llvm_unreachable("PTX does not have a return address register");
+ return 0;
+ }
+
+ virtual int getDwarfRegNum(unsigned RegNum, bool isEH) const {
+ return PTXGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
+ }
+}; // struct PTXRegisterInfo
+} // namespace llvm
+
+#endif // PTX_REGISTER_INFO_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td
new file mode 100644
index 0000000..22e2b34
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td
@@ -0,0 +1,102 @@
+//===- PTXRegisterInfo.td - PTX Register defs ----------------*- tblgen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Declarations that describe the PTX register file
+//===----------------------------------------------------------------------===//
+
+class PTXReg<string n> : Register<n> {
+ let Namespace = "PTX";
+}
+
+//===----------------------------------------------------------------------===//
+// Registers
+//===----------------------------------------------------------------------===//
+
+def P0 : PTXReg<"p0">;
+def P1 : PTXReg<"p1">;
+def P2 : PTXReg<"p2">;
+def P3 : PTXReg<"p3">;
+def P4 : PTXReg<"p4">;
+def P5 : PTXReg<"p5">;
+def P6 : PTXReg<"p6">;
+def P7 : PTXReg<"p7">;
+def P8 : PTXReg<"p8">;
+def P9 : PTXReg<"p9">;
+def P10 : PTXReg<"p10">;
+def P11 : PTXReg<"p11">;
+def P12 : PTXReg<"p12">;
+def P13 : PTXReg<"p13">;
+def P14 : PTXReg<"p14">;
+def P15 : PTXReg<"p15">;
+def P16 : PTXReg<"p16">;
+def P17 : PTXReg<"p17">;
+def P18 : PTXReg<"p18">;
+def P19 : PTXReg<"p19">;
+def P20 : PTXReg<"p20">;
+def P21 : PTXReg<"p21">;
+def P22 : PTXReg<"p22">;
+def P23 : PTXReg<"p23">;
+def P24 : PTXReg<"p24">;
+def P25 : PTXReg<"p25">;
+def P26 : PTXReg<"p26">;
+def P27 : PTXReg<"p27">;
+def P28 : PTXReg<"p28">;
+def P29 : PTXReg<"p29">;
+def P30 : PTXReg<"p30">;
+def P31 : PTXReg<"p31">;
+
+def R0 : PTXReg<"r0">;
+def R1 : PTXReg<"r1">;
+def R2 : PTXReg<"r2">;
+def R3 : PTXReg<"r3">;
+def R4 : PTXReg<"r4">;
+def R5 : PTXReg<"r5">;
+def R6 : PTXReg<"r6">;
+def R7 : PTXReg<"r7">;
+def R8 : PTXReg<"r8">;
+def R9 : PTXReg<"r9">;
+def R10 : PTXReg<"r10">;
+def R11 : PTXReg<"r11">;
+def R12 : PTXReg<"r12">;
+def R13 : PTXReg<"r13">;
+def R14 : PTXReg<"r14">;
+def R15 : PTXReg<"r15">;
+def R16 : PTXReg<"r16">;
+def R17 : PTXReg<"r17">;
+def R18 : PTXReg<"r18">;
+def R19 : PTXReg<"r19">;
+def R20 : PTXReg<"r20">;
+def R21 : PTXReg<"r21">;
+def R22 : PTXReg<"r22">;
+def R23 : PTXReg<"r23">;
+def R24 : PTXReg<"r24">;
+def R25 : PTXReg<"r25">;
+def R26 : PTXReg<"r26">;
+def R27 : PTXReg<"r27">;
+def R28 : PTXReg<"r28">;
+def R29 : PTXReg<"r29">;
+def R30 : PTXReg<"r30">;
+def R31 : PTXReg<"r31">;
+
+//===----------------------------------------------------------------------===//
+// Register classes
+//===----------------------------------------------------------------------===//
+
+def Preds : RegisterClass<"PTX", [i1], 8,
+ [P0, P1, P2, P3, P4, P5, P6, P7,
+ P8, P9, P10, P11, P12, P13, P14, P15,
+ P16, P17, P18, P19, P20, P21, P22, P23,
+ P24, P25, P26, P27, P28, P29, P30, P31]>;
+
+def RRegs32 : RegisterClass<"PTX", [i32], 32,
+ [R0, R1, R2, R3, R4, R5, R6, R7,
+ R8, R9, R10, R11, R12, R13, R14, R15,
+ R16, R17, R18, R19, R20, R21, R22, R23,
+ R24, R25, R26, R27, R28, R29, R30, R31]>;
diff --git a/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp b/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp
new file mode 100644
index 0000000..00e2c88
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp
@@ -0,0 +1,23 @@
+//===- PTXSubtarget.cpp - PTX Subtarget Information ---------------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PTX specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTXSubtarget.h"
+
+using namespace llvm;
+
+PTXSubtarget::PTXSubtarget(const std::string &TT, const std::string &FS) {
+ std::string TARGET = "sm_20";
+ // TODO: call ParseSubtargetFeatures(FS, TARGET);
+}
+
+#include "PTXGenSubtarget.inc"
diff --git a/contrib/llvm/lib/Target/PTX/PTXSubtarget.h b/contrib/llvm/lib/Target/PTX/PTXSubtarget.h
new file mode 100644
index 0000000..7fd85f8
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXSubtarget.h
@@ -0,0 +1,32 @@
+//====-- PTXSubtarget.h - Define Subtarget for the PTX ---------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the PTX specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTX_SUBTARGET_H
+#define PTX_SUBTARGET_H
+
+#include "llvm/Target/TargetSubtarget.h"
+
+namespace llvm {
+ class PTXSubtarget : public TargetSubtarget {
+ private:
+ bool is_sm20;
+
+ public:
+ PTXSubtarget(const std::string &TT, const std::string &FS);
+
+ std::string ParseSubtargetFeatures(const std::string &FS,
+ const std::string &CPU);
+ }; // class PTXSubtarget
+} // namespace llvm
+
+#endif // PTX_SUBTARGET_H
diff --git a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp
new file mode 100644
index 0000000..b263813
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp
@@ -0,0 +1,60 @@
+//===-- PTXTargetMachine.cpp - Define TargetMachine for PTX ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Top-level implementation for the PTX target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTX.h"
+#include "PTXMCAsmInfo.h"
+#include "PTXTargetMachine.h"
+#include "llvm/PassManager.h"
+#include "llvm/Target/TargetRegistry.h"
+
+using namespace llvm;
+
+namespace llvm {
+ MCStreamer *createPTXAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
+ bool isVerboseAsm, bool useLoc,
+ MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE,
+ TargetAsmBackend *TAB,
+ bool ShowInst);
+}
+
+extern "C" void LLVMInitializePTXTarget() {
+ RegisterTargetMachine<PTXTargetMachine> X(ThePTXTarget);
+ RegisterAsmInfo<PTXMCAsmInfo> Y(ThePTXTarget);
+ TargetRegistry::RegisterAsmStreamer(ThePTXTarget, createPTXAsmStreamer);
+}
+
+// DataLayout and FrameLowering are filled with dummy data
+PTXTargetMachine::PTXTargetMachine(const Target &T,
+ const std::string &TT,
+ const std::string &FS)
+ : LLVMTargetMachine(T, TT),
+ DataLayout("e-p:32:32-i64:32:32-f64:32:32-v128:32:128-v64:32:64-n32:64"),
+ FrameLowering(Subtarget),
+ InstrInfo(*this),
+ TLInfo(*this),
+ Subtarget(TT, FS) {
+}
+
+bool PTXTargetMachine::addInstSelector(PassManagerBase &PM,
+ CodeGenOpt::Level OptLevel) {
+ PM.add(createPTXISelDag(*this, OptLevel));
+ return false;
+}
+
+bool PTXTargetMachine::addPostRegAlloc(PassManagerBase &PM,
+ CodeGenOpt::Level OptLevel) {
+ // PTXMFInfoExtract must after register allocation!
+ PM.add(createPTXMFInfoExtract(*this, OptLevel));
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h
new file mode 100644
index 0000000..728e36f
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h
@@ -0,0 +1,60 @@
+//===-- PTXTargetMachine.h - Define TargetMachine for PTX -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the PTX specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTX_TARGET_MACHINE_H
+#define PTX_TARGET_MACHINE_H
+
+#include "PTXISelLowering.h"
+#include "PTXInstrInfo.h"
+#include "PTXFrameLowering.h"
+#include "PTXSubtarget.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class PTXTargetMachine : public LLVMTargetMachine {
+ private:
+ const TargetData DataLayout;
+ PTXFrameLowering FrameLowering;
+ PTXInstrInfo InstrInfo;
+ PTXTargetLowering TLInfo;
+ PTXSubtarget Subtarget;
+
+ public:
+ PTXTargetMachine(const Target &T, const std::string &TT,
+ const std::string &FS);
+
+ virtual const TargetData *getTargetData() const { return &DataLayout; }
+
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
+
+ virtual const PTXInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ virtual const TargetRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo.getRegisterInfo(); }
+
+ virtual const PTXTargetLowering *getTargetLowering() const {
+ return &TLInfo; }
+
+ virtual const PTXSubtarget *getSubtargetImpl() const { return &Subtarget; }
+
+ virtual bool addInstSelector(PassManagerBase &PM,
+ CodeGenOpt::Level OptLevel);
+ virtual bool addPostRegAlloc(PassManagerBase &PM,
+ CodeGenOpt::Level OptLevel);
+}; // class PTXTargetMachine
+} // namespace llvm
+
+#endif // PTX_TARGET_MACHINE_H
diff --git a/contrib/llvm/lib/Target/PTX/TargetInfo/CMakeLists.txt b/contrib/llvm/lib/Target/PTX/TargetInfo/CMakeLists.txt
new file mode 100644
index 0000000..4b09cf5
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/TargetInfo/CMakeLists.txt
@@ -0,0 +1,7 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMPTXInfo
+ PTXTargetInfo.cpp
+ )
+
+add_dependencies(LLVMPTXInfo PTXCodeGenTable_gen)
diff --git a/contrib/llvm/lib/Target/PTX/TargetInfo/Makefile b/contrib/llvm/lib/Target/PTX/TargetInfo/Makefile
new file mode 100644
index 0000000..8619785
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/TargetInfo/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Target/PTX/TargetInfo/Makefile ------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMPTXInfo
+
+# Hack: we need to include 'main' target directory to grab private headers
+CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/PTX/TargetInfo/PTXTargetInfo.cpp b/contrib/llvm/lib/Target/PTX/TargetInfo/PTXTargetInfo.cpp
new file mode 100644
index 0000000..a577d77
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/TargetInfo/PTXTargetInfo.cpp
@@ -0,0 +1,21 @@
+//===-- PTXTargetInfo.cpp - PTX Target Implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTX.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetRegistry.h"
+
+using namespace llvm;
+
+Target llvm::ThePTXTarget;
+
+extern "C" void LLVMInitializePTXTargetInfo() {
+ // see llvm/ADT/Triple.h
+ RegisterTarget<Triple::ptx> X(ThePTXTarget, "ptx", "PTX");
+}
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/CMakeLists.txt b/contrib/llvm/lib/Target/PowerPC/InstPrinter/CMakeLists.txt
new file mode 100644
index 0000000..389ea77
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/CMakeLists.txt
@@ -0,0 +1,6 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMPowerPCAsmPrinter
+ PPCInstPrinter.cpp
+ )
+add_dependencies(LLVMPowerPCAsmPrinter PowerPCCodeGenTable_gen)
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/Makefile b/contrib/llvm/lib/Target/PowerPC/InstPrinter/Makefile
new file mode 100644
index 0000000..f097e84
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/Makefile
@@ -0,0 +1,16 @@
+##===- lib/Target/PowerPC/AsmPrinter/Makefile --------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME = LLVMPowerPCAsmPrinter
+
+# Hack: we need to include 'main' powerpc target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
new file mode 100644
index 0000000..c8db0c4
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
@@ -0,0 +1,292 @@
+//===-- PPCInstPrinter.cpp - Convert PPC MCInst to assembly syntax --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an PPC MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "asm-printer"
+#include "PPCInstPrinter.h"
+#include "PPCPredicates.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define GET_INSTRUCTION_NAME
+#include "PPCGenAsmWriter.inc"
+
+StringRef PPCInstPrinter::getOpcodeName(unsigned Opcode) const {
+ return getInstructionName(Opcode);
+}
+
+
+void PPCInstPrinter::printInst(const MCInst *MI, raw_ostream &O) {
+ // Check for slwi/srwi mnemonics.
+ if (MI->getOpcode() == PPC::RLWINM) {
+ unsigned char SH = MI->getOperand(2).getImm();
+ unsigned char MB = MI->getOperand(3).getImm();
+ unsigned char ME = MI->getOperand(4).getImm();
+ bool useSubstituteMnemonic = false;
+ if (SH <= 31 && MB == 0 && ME == (31-SH)) {
+ O << "\tslwi "; useSubstituteMnemonic = true;
+ }
+ if (SH <= 31 && MB == (32-SH) && ME == 31) {
+ O << "\tsrwi "; useSubstituteMnemonic = true;
+ SH = 32-SH;
+ }
+ if (useSubstituteMnemonic) {
+ printOperand(MI, 0, O);
+ O << ", ";
+ printOperand(MI, 1, O);
+ O << ", " << (unsigned int)SH;
+ return;
+ }
+ }
+
+ if ((MI->getOpcode() == PPC::OR || MI->getOpcode() == PPC::OR8) &&
+ MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
+ O << "\tmr ";
+ printOperand(MI, 0, O);
+ O << ", ";
+ printOperand(MI, 1, O);
+ return;
+ }
+
+ if (MI->getOpcode() == PPC::RLDICR) {
+ unsigned char SH = MI->getOperand(2).getImm();
+ unsigned char ME = MI->getOperand(3).getImm();
+ // rldicr RA, RS, SH, 63-SH == sldi RA, RS, SH
+ if (63-SH == ME) {
+ O << "\tsldi ";
+ printOperand(MI, 0, O);
+ O << ", ";
+ printOperand(MI, 1, O);
+ O << ", " << (unsigned int)SH;
+ return;
+ }
+ }
+
+ printInstruction(MI, O);
+}
+
+
+void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O,
+ const char *Modifier) {
+ assert(Modifier && "Must specify 'cc' or 'reg' as predicate op modifier!");
+ unsigned Code = MI->getOperand(OpNo).getImm();
+ if (StringRef(Modifier) == "cc") {
+ switch ((PPC::Predicate)Code) {
+ default: assert(0 && "Invalid predicate");
+ case PPC::PRED_ALWAYS: return; // Don't print anything for always.
+ case PPC::PRED_LT: O << "lt"; return;
+ case PPC::PRED_LE: O << "le"; return;
+ case PPC::PRED_EQ: O << "eq"; return;
+ case PPC::PRED_GE: O << "ge"; return;
+ case PPC::PRED_GT: O << "gt"; return;
+ case PPC::PRED_NE: O << "ne"; return;
+ case PPC::PRED_UN: O << "un"; return;
+ case PPC::PRED_NU: O << "nu"; return;
+ }
+ }
+
+ assert(StringRef(Modifier) == "reg" &&
+ "Need to specify 'cc' or 'reg' as predicate op modifier!");
+ // Don't print the register for 'always'.
+ if (Code == PPC::PRED_ALWAYS) return;
+ printOperand(MI, OpNo+1, O);
+}
+
+void PPCInstPrinter::printS5ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ char Value = MI->getOperand(OpNo).getImm();
+ Value = (Value << (32-5)) >> (32-5);
+ O << (int)Value;
+}
+
+void PPCInstPrinter::printU5ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned char Value = MI->getOperand(OpNo).getImm();
+ assert(Value <= 31 && "Invalid u5imm argument!");
+ O << (unsigned int)Value;
+}
+
+void PPCInstPrinter::printU6ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned char Value = MI->getOperand(OpNo).getImm();
+ assert(Value <= 63 && "Invalid u6imm argument!");
+ O << (unsigned int)Value;
+}
+
+void PPCInstPrinter::printS16ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << (short)MI->getOperand(OpNo).getImm();
+}
+
+void PPCInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << (unsigned short)MI->getOperand(OpNo).getImm();
+}
+
+void PPCInstPrinter::printS16X4ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ if (MI->getOperand(OpNo).isImm())
+ O << (short)(MI->getOperand(OpNo).getImm()*4);
+ else
+ printOperand(MI, OpNo, O);
+}
+
+void PPCInstPrinter::printBranchOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ if (!MI->getOperand(OpNo).isImm())
+ return printOperand(MI, OpNo, O);
+
+ // Branches can take an immediate operand. This is used by the branch
+ // selection pass to print $+8, an eight byte displacement from the PC.
+ O << "$+";
+ printAbsAddrOperand(MI, OpNo, O);
+}
+
+void PPCInstPrinter::printAbsAddrOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << (int)MI->getOperand(OpNo).getImm()*4;
+}
+
+
+void PPCInstPrinter::printcrbitm(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned CCReg = MI->getOperand(OpNo).getReg();
+ unsigned RegNo;
+ switch (CCReg) {
+ default: assert(0 && "Unknown CR register");
+ case PPC::CR0: RegNo = 0; break;
+ case PPC::CR1: RegNo = 1; break;
+ case PPC::CR2: RegNo = 2; break;
+ case PPC::CR3: RegNo = 3; break;
+ case PPC::CR4: RegNo = 4; break;
+ case PPC::CR5: RegNo = 5; break;
+ case PPC::CR6: RegNo = 6; break;
+ case PPC::CR7: RegNo = 7; break;
+ }
+ O << (0x80 >> RegNo);
+}
+
+void PPCInstPrinter::printMemRegImm(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ printSymbolLo(MI, OpNo, O);
+ O << '(';
+ if (MI->getOperand(OpNo+1).getReg() == PPC::R0)
+ O << "0";
+ else
+ printOperand(MI, OpNo+1, O);
+ O << ')';
+}
+
+void PPCInstPrinter::printMemRegImmShifted(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ if (MI->getOperand(OpNo).isImm())
+ printS16X4ImmOperand(MI, OpNo, O);
+ else
+ printSymbolLo(MI, OpNo, O);
+ O << '(';
+
+ if (MI->getOperand(OpNo+1).getReg() == PPC::R0)
+ O << "0";
+ else
+ printOperand(MI, OpNo+1, O);
+ O << ')';
+}
+
+
+void PPCInstPrinter::printMemRegReg(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ // When used as the base register, r0 reads constant zero rather than
+ // the value contained in the register. For this reason, the darwin
+ // assembler requires that we print r0 as 0 (no r) when used as the base.
+ if (MI->getOperand(OpNo).getReg() == PPC::R0)
+ O << "0";
+ else
+ printOperand(MI, OpNo, O);
+ O << ", ";
+ printOperand(MI, OpNo+1, O);
+}
+
+
+
+/// stripRegisterPrefix - This method strips the character prefix from a
+/// register name so that only the number is left. Used by for linux asm.
+static const char *stripRegisterPrefix(const char *RegName) {
+ switch (RegName[0]) {
+ case 'r':
+ case 'f':
+ case 'v': return RegName + 1;
+ case 'c': if (RegName[1] == 'r') return RegName + 2;
+ }
+
+ return RegName;
+}
+
+void PPCInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ const char *RegName = getRegisterName(Op.getReg());
+ // The linux and AIX assembler does not take register prefixes.
+ if (!isDarwinSyntax())
+ RegName = stripRegisterPrefix(RegName);
+
+ O << RegName;
+ return;
+ }
+
+ if (Op.isImm()) {
+ O << Op.getImm();
+ return;
+ }
+
+ assert(Op.isExpr() && "unknown operand kind in printOperand");
+ O << *Op.getExpr();
+}
+
+void PPCInstPrinter::printSymbolLo(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ if (MI->getOperand(OpNo).isImm())
+ return printS16ImmOperand(MI, OpNo, O);
+
+ // FIXME: This is a terrible hack because we can't encode lo16() as an operand
+ // flag of a subtraction. See the FIXME in GetSymbolRef in PPCMCInstLower.
+ if (MI->getOperand(OpNo).isExpr() &&
+ isa<MCBinaryExpr>(MI->getOperand(OpNo).getExpr())) {
+ O << "lo16(";
+ printOperand(MI, OpNo, O);
+ O << ')';
+ } else {
+ printOperand(MI, OpNo, O);
+ }
+}
+
+void PPCInstPrinter::printSymbolHi(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ if (MI->getOperand(OpNo).isImm())
+ return printS16ImmOperand(MI, OpNo, O);
+
+ // FIXME: This is a terrible hack because we can't encode lo16() as an operand
+ // flag of a subtraction. See the FIXME in GetSymbolRef in PPCMCInstLower.
+ if (MI->getOperand(OpNo).isExpr() &&
+ isa<MCBinaryExpr>(MI->getOperand(OpNo).getExpr())) {
+ O << "ha16(";
+ printOperand(MI, OpNo, O);
+ O << ')';
+ } else {
+ printOperand(MI, OpNo, O);
+ }
+}
+
+
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
new file mode 100644
index 0000000..ebc10da
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
@@ -0,0 +1,69 @@
+//===-- PPCInstPrinter.h - Convert PPC MCInst to assembly syntax ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an PPC MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PPCINSTPRINTER_H
+#define PPCINSTPRINTER_H
+
+#include "llvm/MC/MCInstPrinter.h"
+
+namespace llvm {
+ class MCOperand;
+
+class PPCInstPrinter : public MCInstPrinter {
+ // 0 -> AIX, 1 -> Darwin.
+ unsigned SyntaxVariant;
+public:
+ PPCInstPrinter(const MCAsmInfo &MAI, unsigned syntaxVariant)
+ : MCInstPrinter(MAI), SyntaxVariant(syntaxVariant) {}
+
+ bool isDarwinSyntax() const {
+ return SyntaxVariant == 1;
+ }
+
+ virtual void printInst(const MCInst *MI, raw_ostream &O);
+ virtual StringRef getOpcodeName(unsigned Opcode) const;
+
+ static const char *getInstructionName(unsigned Opcode);
+
+ // Autogenerated by tblgen.
+ void printInstruction(const MCInst *MI, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo);
+
+
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printPredicateOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O, const char *Modifier);
+
+
+ void printS5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printU5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printU6ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printS16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printS16X4ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printAbsAddrOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+
+ void printcrbitm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+
+ void printMemRegImm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printMemRegImmShifted(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printMemRegReg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+
+ // FIXME: Remove
+ void printSymbolLo(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSymbolHi(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/PowerPC/PPC.h b/contrib/llvm/lib/Target/PowerPC/PPC.h
index 67e3a4a..7242f3a 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPC.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPC.h
@@ -15,24 +15,70 @@
#ifndef LLVM_TARGET_POWERPC_H
#define LLVM_TARGET_POWERPC_H
+#include <string>
+
// GCC #defines PPC on Linux but we use it as our namespace name
#undef PPC
-#include "llvm/Target/TargetMachine.h"
-
namespace llvm {
class PPCTargetMachine;
class FunctionPass;
class formatted_raw_ostream;
+ class JITCodeEmitter;
+ class Target;
+ class MachineInstr;
+ class AsmPrinter;
+ class MCInst;
+ class MCCodeEmitter;
+ class MCContext;
+ class TargetMachine;
+ class TargetAsmBackend;
+
+ FunctionPass *createPPCBranchSelectionPass();
+ FunctionPass *createPPCISelDag(PPCTargetMachine &TM);
+ FunctionPass *createPPCJITCodeEmitterPass(PPCTargetMachine &TM,
+ JITCodeEmitter &MCE);
+ MCCodeEmitter *createPPCMCCodeEmitter(const Target &, TargetMachine &TM,
+ MCContext &Ctx);
+ TargetAsmBackend *createPPCAsmBackend(const Target &, const std::string &);
-FunctionPass *createPPCBranchSelectionPass();
-FunctionPass *createPPCISelDag(PPCTargetMachine &TM);
-FunctionPass *createPPCJITCodeEmitterPass(PPCTargetMachine &TM,
- JITCodeEmitter &MCE);
+ void LowerPPCMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
+ AsmPrinter &AP);
+
+ extern Target ThePPC32Target;
+ extern Target ThePPC64Target;
+
+ namespace PPCII {
+
+ /// Target Operand Flag enum.
+ enum TOF {
+ //===------------------------------------------------------------------===//
+ // PPC Specific MachineOperand flags.
+ MO_NO_FLAG,
+
+ /// MO_DARWIN_STUB - On a symbol operand "FOO", this indicates that the
+ /// reference is actually to the "FOO$stub" symbol. This is used for calls
+ /// and jumps to external functions on Tiger and earlier.
+ MO_DARWIN_STUB = 1,
+
+ /// MO_LO16, MO_HA16 - lo16(symbol) and ha16(symbol)
+ MO_LO16 = 4, MO_HA16 = 8,
-extern Target ThePPC32Target;
-extern Target ThePPC64Target;
+ /// MO_PIC_FLAG - If this bit is set, the symbol reference is relative to
+ /// the function's picbase, e.g. lo16(symbol-picbase).
+ MO_PIC_FLAG = 16,
+ /// MO_NLP_FLAG - If this bit is set, the symbol reference is actually to
+ /// the non_lazy_ptr for the global, e.g. lo16(symbol$non_lazy_ptr-picbase).
+ MO_NLP_FLAG = 32,
+
+ /// MO_NLP_HIDDEN_FLAG - If this bit is set, the symbol reference is to a
+ /// symbol with hidden visibility. This causes a different kind of
+ /// non-lazy-pointer to be generated.
+ MO_NLP_HIDDEN_FLAG = 64
+ };
+ } // end namespace PPCII
+
} // end namespace llvm;
// Defines symbolic names for PowerPC registers. This defines a mapping from
diff --git a/contrib/llvm/lib/Target/PowerPC/PPC.td b/contrib/llvm/lib/Target/PowerPC/PPC.td
index 27644b2..aabf494 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPC.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPC.td
@@ -99,8 +99,14 @@ def PPCInstrInfo : InstrInfo {
let isLittleEndianEncoding = 1;
}
+def PPCAsmWriter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ bit isMCAsmWriter = 1;
+}
def PPC : Target {
// Information about the instructions.
let InstructionSet = PPCInstrInfo;
+
+ let AssemblyWriters = [PPCAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCAsmBackend.cpp b/contrib/llvm/lib/Target/PowerPC/PPCAsmBackend.cpp
new file mode 100644
index 0000000..c4d4ac9
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCAsmBackend.cpp
@@ -0,0 +1,119 @@
+//===-- PPCAsmBackend.cpp - PPC Assembler Backend -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Target/TargetAsmBackend.h"
+#include "PPC.h"
+#include "PPCFixupKinds.h"
+#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Object/MachOFormat.h"
+#include "llvm/Target/TargetRegistry.h"
+using namespace llvm;
+
+namespace {
+class PPCMachObjectWriter : public MCMachObjectTargetWriter {
+public:
+ PPCMachObjectWriter(bool Is64Bit, uint32_t CPUType,
+ uint32_t CPUSubtype)
+ : MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype) {}
+};
+
+class PPCAsmBackend : public TargetAsmBackend {
+const Target &TheTarget;
+public:
+ PPCAsmBackend(const Target &T) : TargetAsmBackend(), TheTarget(T) {}
+
+ unsigned getNumFixupKinds() const { return PPC::NumTargetFixupKinds; }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[PPC::NumTargetFixupKinds] = {
+ // name offset bits flags
+ { "fixup_ppc_br24", 6, 24, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_ppc_brcond14", 16, 14, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_ppc_lo16", 16, 16, 0 },
+ { "fixup_ppc_ha16", 16, 16, 0 },
+ { "fixup_ppc_lo14", 16, 14, 0 }
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return TargetAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+ }
+
+ bool MayNeedRelaxation(const MCInst &Inst) const {
+ // FIXME.
+ return false;
+ }
+
+ void RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ // FIXME.
+ assert(0 && "RelaxInstruction() unimplemented");
+ }
+
+ bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
+ // FIXME: Zero fill for now. That's not right, but at least will get the
+ // section size right.
+ for (uint64_t i = 0; i != Count; ++i)
+ OW->Write8(0);
+ return true;
+ }
+
+ unsigned getPointerSize() const {
+ StringRef Name = TheTarget.getName();
+ if (Name == "ppc64") return 8;
+ assert(Name == "ppc32" && "Unknown target name!");
+ return 4;
+ }
+};
+} // end anonymous namespace
+
+
+// FIXME: This should be in a separate file.
+namespace {
+ class DarwinPPCAsmBackend : public PPCAsmBackend {
+ public:
+ DarwinPPCAsmBackend(const Target &T) : PPCAsmBackend(T) { }
+
+ void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value) const {
+ assert(0 && "UNIMP");
+ }
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ bool is64 = getPointerSize() == 8;
+ return createMachObjectWriter(new PPCMachObjectWriter(
+ /*Is64Bit=*/is64,
+ (is64 ? object::mach::CTM_PowerPC64 :
+ object::mach::CTM_PowerPC),
+ object::mach::CSPPC_ALL),
+ OS, /*IsLittleEndian=*/false);
+ }
+
+ virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
+ return false;
+ }
+ };
+} // end anonymous namespace
+
+
+
+
+TargetAsmBackend *llvm::createPPCAsmBackend(const Target &T,
+ const std::string &TT) {
+ switch (Triple(TT).getOS()) {
+ case Triple::Darwin:
+ return new DarwinPPCAsmBackend(T);
+ default:
+ return 0;
+ }
+}
diff --git a/contrib/llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index c1a5663..8ed5d7f 100644
--- a/contrib/llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -35,6 +35,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
@@ -43,6 +44,7 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ErrorHandling.h"
@@ -50,6 +52,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/SmallString.h"
+#include "InstPrinter/PPCInstPrinter.h"
using namespace llvm;
namespace {
@@ -57,88 +60,20 @@ namespace {
protected:
DenseMap<MCSymbol*, MCSymbol*> TOC;
const PPCSubtarget &Subtarget;
- uint64_t LabelID;
+ uint64_t TOCLabelID;
public:
explicit PPCAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
: AsmPrinter(TM, Streamer),
- Subtarget(TM.getSubtarget<PPCSubtarget>()), LabelID(0) {}
+ Subtarget(TM.getSubtarget<PPCSubtarget>()), TOCLabelID(0) {}
virtual const char *getPassName() const {
return "PowerPC Assembly Printer";
}
- PPCTargetMachine &getTM() {
- return static_cast<PPCTargetMachine&>(TM);
- }
-
- unsigned enumRegToMachineReg(unsigned enumReg) {
- switch (enumReg) {
- default: llvm_unreachable("Unhandled register!");
- case PPC::CR0: return 0;
- case PPC::CR1: return 1;
- case PPC::CR2: return 2;
- case PPC::CR3: return 3;
- case PPC::CR4: return 4;
- case PPC::CR5: return 5;
- case PPC::CR6: return 6;
- case PPC::CR7: return 7;
- }
- llvm_unreachable(0);
- }
-
- /// printInstruction - This method is automatically generated by tablegen
- /// from the instruction set description. This method returns true if the
- /// machine instruction was sufficiently described to print it, otherwise it
- /// returns false.
- void printInstruction(const MachineInstr *MI, raw_ostream &O);
- static const char *getRegisterName(unsigned RegNo);
-
virtual void EmitInstruction(const MachineInstr *MI);
- void printOp(const MachineOperand &MO, raw_ostream &O);
-
- /// stripRegisterPrefix - This method strips the character prefix from a
- /// register name so that only the number is left. Used by for linux asm.
- const char *stripRegisterPrefix(const char *RegName) {
- switch (RegName[0]) {
- case 'r':
- case 'f':
- case 'v': return RegName + 1;
- case 'c': if (RegName[1] == 'r') return RegName + 2;
- }
- return RegName;
- }
-
- /// printRegister - Print register according to target requirements.
- ///
- void printRegister(const MachineOperand &MO, bool R0AsZero, raw_ostream &O){
- unsigned RegNo = MO.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(RegNo) && "Not physreg??");
-
- // If we should use 0 for R0.
- if (R0AsZero && RegNo == PPC::R0) {
- O << "0";
- return;
- }
-
- const char *RegName = getRegisterName(RegNo);
- // Linux assembler (Others?) does not take register mnemonics.
- // FIXME - What about special registers used in mfspr/mtspr?
- if (!Subtarget.isDarwin()) RegName = stripRegisterPrefix(RegName);
- O << RegName;
- }
-
- void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- if (MO.isReg()) {
- printRegister(MO, false, O);
- } else if (MO.isImm()) {
- O << MO.getImm();
- } else {
- printOp(MO, O);
- }
- }
+ void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
@@ -147,192 +82,9 @@ namespace {
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O);
-
- void printS5ImmOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- char value = MI->getOperand(OpNo).getImm();
- value = (value << (32-5)) >> (32-5);
- O << (int)value;
- }
- void printU5ImmOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- unsigned char value = MI->getOperand(OpNo).getImm();
- assert(value <= 31 && "Invalid u5imm argument!");
- O << (unsigned int)value;
- }
- void printU6ImmOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- unsigned char value = MI->getOperand(OpNo).getImm();
- assert(value <= 63 && "Invalid u6imm argument!");
- O << (unsigned int)value;
- }
- void printS16ImmOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- O << (short)MI->getOperand(OpNo).getImm();
- }
- void printU16ImmOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- O << (unsigned short)MI->getOperand(OpNo).getImm();
- }
- void printS16X4ImmOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- if (MI->getOperand(OpNo).isImm()) {
- O << (short)(MI->getOperand(OpNo).getImm()*4);
- } else {
- O << "lo16(";
- printOp(MI->getOperand(OpNo), O);
- if (TM.getRelocationModel() == Reloc::PIC_)
- O << "-\"L" << getFunctionNumber() << "$pb\")";
- else
- O << ')';
- }
- }
- void printBranchOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- // Branches can take an immediate operand. This is used by the branch
- // selection pass to print $+8, an eight byte displacement from the PC.
- if (MI->getOperand(OpNo).isImm()) {
- O << "$+" << MI->getOperand(OpNo).getImm()*4;
- } else {
- printOp(MI->getOperand(OpNo), O);
- }
- }
- void printCallOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- if (TM.getRelocationModel() != Reloc::Static) {
- if (MO.isGlobal()) {
- const GlobalValue *GV = MO.getGlobal();
- if (GV->isDeclaration() || GV->isWeakForLinker()) {
- // Dynamically-resolved functions need a stub for the function.
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$stub");
- MachineModuleInfoImpl::StubValueTy &StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym.getPointer() == 0)
- StubSym = MachineModuleInfoImpl::
- StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
- O << *Sym;
- return;
- }
- }
- if (MO.isSymbol()) {
- SmallString<128> TempNameStr;
- TempNameStr += StringRef(MO.getSymbolName());
- TempNameStr += StringRef("$stub");
-
- MCSymbol *Sym = GetExternalSymbolSymbol(TempNameStr.str());
- MachineModuleInfoImpl::StubValueTy &StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym.getPointer() == 0)
- StubSym = MachineModuleInfoImpl::
- StubValueTy(GetExternalSymbolSymbol(MO.getSymbolName()), true);
- O << *Sym;
- return;
- }
- }
-
- printOp(MI->getOperand(OpNo), O);
- }
- void printAbsAddrOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- O << (int)MI->getOperand(OpNo).getImm()*4;
- }
- void printPICLabel(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- O << "\"L" << getFunctionNumber() << "$pb\"\n";
- O << "\"L" << getFunctionNumber() << "$pb\":";
- }
- void printSymbolHi(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- if (MI->getOperand(OpNo).isImm()) {
- printS16ImmOperand(MI, OpNo, O);
- } else {
- if (Subtarget.isDarwin()) O << "ha16(";
- printOp(MI->getOperand(OpNo), O);
- if (TM.getRelocationModel() == Reloc::PIC_)
- O << "-\"L" << getFunctionNumber() << "$pb\"";
- if (Subtarget.isDarwin())
- O << ')';
- else
- O << "@ha";
- }
- }
- void printSymbolLo(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- if (MI->getOperand(OpNo).isImm()) {
- printS16ImmOperand(MI, OpNo, O);
- } else {
- if (Subtarget.isDarwin()) O << "lo16(";
- printOp(MI->getOperand(OpNo), O);
- if (TM.getRelocationModel() == Reloc::PIC_)
- O << "-\"L" << getFunctionNumber() << "$pb\"";
- if (Subtarget.isDarwin())
- O << ')';
- else
- O << "@l";
- }
- }
- void printcrbitm(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- unsigned CCReg = MI->getOperand(OpNo).getReg();
- unsigned RegNo = enumRegToMachineReg(CCReg);
- O << (0x80 >> RegNo);
- }
- // The new addressing mode printers.
- void printMemRegImm(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- printSymbolLo(MI, OpNo, O);
- O << '(';
- if (MI->getOperand(OpNo+1).isReg() &&
- MI->getOperand(OpNo+1).getReg() == PPC::R0)
- O << "0";
- else
- printOperand(MI, OpNo+1, O);
- O << ')';
- }
- void printMemRegImmShifted(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- if (MI->getOperand(OpNo).isImm())
- printS16X4ImmOperand(MI, OpNo, O);
- else
- printSymbolLo(MI, OpNo, O);
- O << '(';
- if (MI->getOperand(OpNo+1).isReg() &&
- MI->getOperand(OpNo+1).getReg() == PPC::R0)
- O << "0";
- else
- printOperand(MI, OpNo+1, O);
- O << ')';
- }
-
- void printMemRegReg(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
- // When used as the base register, r0 reads constant zero rather than
- // the value contained in the register. For this reason, the darwin
- // assembler requires that we print r0 as 0 (no r) when used as the base.
- const MachineOperand &MO = MI->getOperand(OpNo);
- printRegister(MO, true, O);
- O << ", ";
- printOperand(MI, OpNo+1, O);
- }
-
- void printTOCEntryLabel(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- assert(MO.isGlobal());
- MCSymbol *Sym = Mang->getSymbol(MO.getGlobal());
-
- // Map symbol -> label of TOC entry.
- MCSymbol *&TOCEntry = TOC[Sym];
- if (TOCEntry == 0)
- TOCEntry = OutContext.
- GetOrCreateSymbol(StringRef(MAI->getPrivateGlobalPrefix()) +
- "C" + Twine(LabelID++));
-
- O << *TOCEntry << "@toc";
- }
-
- void printPredicateOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O, const char *Modifier);
-
MachineLocation getDebugValueLocation(const MachineInstr *MI) const {
-
MachineLocation Location;
- assert (MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
+ assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
// Frame address. Currently handles register +- offset only.
if (MI->getOperand(0).isReg() && MI->getOperand(2).isImm())
Location.set(MI->getOperand(0).getReg(), MI->getOperand(2).getImm());
@@ -376,13 +128,35 @@ namespace {
};
} // end of anonymous namespace
-// Include the auto-generated portion of the assembly writer
-#include "PPCGenAsmWriter.inc"
+/// stripRegisterPrefix - This method strips the character prefix from a
+/// register name so that only the number is left. Used by for linux asm.
+static const char *stripRegisterPrefix(const char *RegName) {
+ switch (RegName[0]) {
+ case 'r':
+ case 'f':
+ case 'v': return RegName + 1;
+ case 'c': if (RegName[1] == 'r') return RegName + 2;
+ }
+
+ return RegName;
+}
-void PPCAsmPrinter::printOp(const MachineOperand &MO, raw_ostream &O) {
+void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+
switch (MO.getType()) {
+ case MachineOperand::MO_Register: {
+ const char *RegName = PPCInstPrinter::getRegisterName(MO.getReg());
+ // Linux assembler (Others?) does not take register mnemonics.
+ // FIXME - What about special registers used in mfspr/mtspr?
+ if (!Subtarget.isDarwin()) RegName = stripRegisterPrefix(RegName);
+ O << RegName;
+ return;
+ }
case MachineOperand::MO_Immediate:
- llvm_unreachable("printOp() does not handle immediate values");
+ O << MO.getImm();
+ return;
case MachineOperand::MO_MachineBasicBlock:
O << *MO.getMBB()->getSymbol();
@@ -475,9 +249,7 @@ bool PPCAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
switch (ExtraCode[0]) {
default: return true; // Unknown modifier.
case 'c': // Don't print "$" before a global var name or constant.
- // PPC never has a prefix.
- printOperand(MI, OpNo, O);
- return false;
+ break; // PPC never has a prefix.
case 'L': // Write second word of DImode reference.
// Verify that this operand has two consecutive registers.
if (!MI->getOperand(OpNo).isReg() ||
@@ -509,48 +281,28 @@ bool PPCAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
raw_ostream &O) {
if (ExtraCode && ExtraCode[0])
return true; // Unknown modifier.
- assert (MI->getOperand(OpNo).isReg());
+ assert(MI->getOperand(OpNo).isReg());
O << "0(";
printOperand(MI, OpNo, O);
O << ")";
return false;
}
-void PPCAsmPrinter::printPredicateOperand(const MachineInstr *MI, unsigned OpNo,
- raw_ostream &O, const char *Modifier){
- assert(Modifier && "Must specify 'cc' or 'reg' as predicate op modifier!");
- unsigned Code = MI->getOperand(OpNo).getImm();
- if (!strcmp(Modifier, "cc")) {
- switch ((PPC::Predicate)Code) {
- case PPC::PRED_ALWAYS: return; // Don't print anything for always.
- case PPC::PRED_LT: O << "lt"; return;
- case PPC::PRED_LE: O << "le"; return;
- case PPC::PRED_EQ: O << "eq"; return;
- case PPC::PRED_GE: O << "ge"; return;
- case PPC::PRED_GT: O << "gt"; return;
- case PPC::PRED_NE: O << "ne"; return;
- case PPC::PRED_UN: O << "un"; return;
- case PPC::PRED_NU: O << "nu"; return;
- }
-
- } else {
- assert(!strcmp(Modifier, "reg") &&
- "Need to specify 'cc' or 'reg' as predicate op modifier!");
- // Don't print the register for 'always'.
- if (Code == PPC::PRED_ALWAYS) return;
- printOperand(MI, OpNo+1, O);
- }
-}
-
/// EmitInstruction -- Print out a single PowerPC MI in Darwin syntax to
/// the current output stream.
///
void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- SmallString<128> Str;
- raw_svector_ostream O(Str);
-
- if (MI->getOpcode() == TargetOpcode::DBG_VALUE) {
+ MCInst TmpInst;
+
+ // Lower multi-instruction pseudo operations.
+ switch (MI->getOpcode()) {
+ default: break;
+ case TargetOpcode::DBG_VALUE: {
+ if (!isVerbose() || !OutStreamer.hasRawTextSupport()) return;
+
+ SmallString<32> Str;
+ raw_svector_ostream O(Str);
unsigned NOps = MI->getNumOperands();
assert(NOps==4);
O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
@@ -567,56 +319,65 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
OutStreamer.EmitRawText(O.str());
return;
}
- // Check for slwi/srwi mnemonics.
- if (MI->getOpcode() == PPC::RLWINM) {
- unsigned char SH = MI->getOperand(2).getImm();
- unsigned char MB = MI->getOperand(3).getImm();
- unsigned char ME = MI->getOperand(4).getImm();
- bool useSubstituteMnemonic = false;
- if (SH <= 31 && MB == 0 && ME == (31-SH)) {
- O << "\tslwi "; useSubstituteMnemonic = true;
- }
- if (SH <= 31 && MB == (32-SH) && ME == 31) {
- O << "\tsrwi "; useSubstituteMnemonic = true;
- SH = 32-SH;
- }
- if (useSubstituteMnemonic) {
- printOperand(MI, 0, O);
- O << ", ";
- printOperand(MI, 1, O);
- O << ", " << (unsigned int)SH;
- OutStreamer.EmitRawText(O.str());
- return;
- }
+
+ case PPC::MovePCtoLR:
+ case PPC::MovePCtoLR8: {
+ // Transform %LR = MovePCtoLR
+ // Into this, where the label is the PIC base:
+ // bl L1$pb
+ // L1$pb:
+ MCSymbol *PICBase = MF->getPICBaseSymbol();
+
+ // Emit the 'bl'.
+ TmpInst.setOpcode(PPC::BL_Darwin); // Darwin vs SVR4 doesn't matter here.
+
+
+ // FIXME: We would like an efficient form for this, so we don't have to do
+ // a lot of extra uniquing.
+ TmpInst.addOperand(MCOperand::CreateExpr(MCSymbolRefExpr::
+ Create(PICBase, OutContext)));
+ OutStreamer.EmitInstruction(TmpInst);
+
+ // Emit the label.
+ OutStreamer.EmitLabel(PICBase);
+ return;
}
-
- if ((MI->getOpcode() == PPC::OR || MI->getOpcode() == PPC::OR8) &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
- O << "\tmr ";
- printOperand(MI, 0, O);
- O << ", ";
- printOperand(MI, 1, O);
- OutStreamer.EmitRawText(O.str());
+ case PPC::LDtoc: {
+ // Transform %X3 = LDtoc <ga:@min1>, %X2
+ LowerPPCMachineInstrToMCInst(MI, TmpInst, *this);
+
+ // Change the opcode to LD, and the global address operand to be a
+ // reference to the TOC entry we will synthesize later.
+ TmpInst.setOpcode(PPC::LD);
+ const MachineOperand &MO = MI->getOperand(1);
+ assert(MO.isGlobal());
+
+ // Map symbol -> label of TOC entry.
+ MCSymbol *&TOCEntry = TOC[Mang->getSymbol(MO.getGlobal())];
+ if (TOCEntry == 0)
+ TOCEntry = GetTempSymbol("C", TOCLabelID++);
+
+ const MCExpr *Exp =
+ MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC,
+ OutContext);
+ TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp);
+ OutStreamer.EmitInstruction(TmpInst);
return;
}
-
- if (MI->getOpcode() == PPC::RLDICR) {
- unsigned char SH = MI->getOperand(2).getImm();
- unsigned char ME = MI->getOperand(3).getImm();
- // rldicr RA, RS, SH, 63-SH == sldi RA, RS, SH
- if (63-SH == ME) {
- O << "\tsldi ";
- printOperand(MI, 0, O);
- O << ", ";
- printOperand(MI, 1, O);
- O << ", " << (unsigned int)SH;
- OutStreamer.EmitRawText(O.str());
- return;
- }
+
+ case PPC::MFCRpseud:
+ // Transform: %R3 = MFCRpseud %CR7
+ // Into: %R3 = MFCR ;; cr7
+ OutStreamer.AddComment(PPCInstPrinter::
+ getRegisterName(MI->getOperand(1).getReg()));
+ TmpInst.setOpcode(PPC::MFCR);
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ OutStreamer.EmitInstruction(TmpInst);
+ return;
}
- printInstruction(MI, O);
- OutStreamer.EmitRawText(O.str());
+ LowerPPCMachineInstrToMCInst(MI, TmpInst, *this);
+ OutStreamer.EmitInstruction(TmpInst);
}
void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
@@ -677,7 +438,10 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
if (Subtarget.isPPC64() && Directive < PPC::DIR_970)
Directive = PPC::DIR_64;
assert(Directive <= PPC::DIR_64 && "Directive out of range.");
- OutStreamer.EmitRawText("\t.machine " + Twine(CPUDirectives[Directive]));
+
+ // FIXME: This is a total hack, finish mc'izing the PPC backend.
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText("\t.machine " + Twine(CPUDirectives[Directive]));
// Prime text sections so they are adjacent. This reduces the likelihood a
// large data or debug section causes a branch to exceed 16M limit.
@@ -915,8 +679,18 @@ static AsmPrinter *createPPCAsmPrinterPass(TargetMachine &tm,
return new PPCLinuxAsmPrinter(tm, Streamer);
}
+static MCInstPrinter *createPPCMCInstPrinter(const Target &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI) {
+ return new PPCInstPrinter(MAI, SyntaxVariant);
+}
+
+
// Force static initialization.
extern "C" void LLVMInitializePowerPCAsmPrinter() {
TargetRegistry::RegisterAsmPrinter(ThePPC32Target, createPPCAsmPrinterPass);
TargetRegistry::RegisterAsmPrinter(ThePPC64Target, createPPCAsmPrinterPass);
+
+ TargetRegistry::RegisterMCInstPrinter(ThePPC32Target, createPPCMCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(ThePPC64Target, createPPCMCInstPrinter);
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp b/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
index df9ab52..42232a0 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
@@ -50,13 +50,24 @@ namespace {
/// getBinaryCodeForInstr - This function, generated by the
/// CodeEmitterGenerator using TableGen, produces the binary encoding for
/// machine instructions.
+ unsigned getBinaryCodeForInstr(const MachineInstr &MI) const;
- unsigned getBinaryCodeForInstr(const MachineInstr &MI);
-
+
+ MachineRelocation GetRelocation(const MachineOperand &MO,
+ unsigned RelocID) const;
+
/// getMachineOpValue - evaluates the MachineOperand of a given MachineInstr
-
unsigned getMachineOpValue(const MachineInstr &MI,
- const MachineOperand &MO);
+ const MachineOperand &MO) const;
+
+ unsigned get_crbitm_encoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getDirectBrEncoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getCondBrEncoding(const MachineInstr &MI, unsigned OpNo) const;
+
+ unsigned getHA16Encoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getLO16Encoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getMemRIEncoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getMemRIXEncoding(const MachineInstr &MI, unsigned OpNo) const;
const char *getPassName() const { return "PowerPC Machine Code Emitter"; }
@@ -67,10 +78,6 @@ namespace {
/// emitBasicBlock - emits the given MachineBasicBlock to memory
///
void emitBasicBlock(MachineBasicBlock &MBB);
-
- /// getValueBit - return the particular bit of Val
- ///
- unsigned getValueBit(int64_t Val, unsigned bit) { return (Val >> bit) & 1; }
};
}
@@ -128,125 +135,127 @@ void PPCCodeEmitter::emitBasicBlock(MachineBasicBlock &MBB) {
}
}
-unsigned PPCCodeEmitter::getMachineOpValue(const MachineInstr &MI,
- const MachineOperand &MO) {
+unsigned PPCCodeEmitter::get_crbitm_encoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ const MachineOperand &MO = MI.getOperand(OpNo);
+ assert((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MFOCRF) &&
+ (MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7));
+ return 0x80 >> PPCRegisterInfo::getRegisterNumbering(MO.getReg());
+}
- unsigned rv = 0; // Return value; defaults to 0 for unhandled cases
- // or things that get fixed up later by the JIT.
- if (MO.isReg()) {
- rv = PPCRegisterInfo::getRegisterNumbering(MO.getReg());
+MachineRelocation PPCCodeEmitter::GetRelocation(const MachineOperand &MO,
+ unsigned RelocID) const {
+ // If in PIC mode, we need to encode the negated address of the
+ // 'movepctolr' into the unrelocated field. After relocation, we'll have
+ // &gv-&movepctolr-4 in the imm field. Once &movepctolr is added to the imm
+ // field, we get &gv. This doesn't happen for branch relocations, which are
+ // always implicitly pc relative.
+ intptr_t Cst = 0;
+ if (TM.getRelocationModel() == Reloc::PIC_) {
+ assert(MovePCtoLROffset && "MovePCtoLR not seen yet?");
+ Cst = -(intptr_t)MovePCtoLROffset - 4;
+ }
+
+ if (MO.isGlobal())
+ return MachineRelocation::getGV(MCE.getCurrentPCOffset(), RelocID,
+ const_cast<GlobalValue *>(MO.getGlobal()),
+ Cst, isa<Function>(MO.getGlobal()));
+ if (MO.isSymbol())
+ return MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
+ RelocID, MO.getSymbolName(), Cst);
+ if (MO.isCPI())
+ return MachineRelocation::getConstPool(MCE.getCurrentPCOffset(),
+ RelocID, MO.getIndex(), Cst);
- // Special encoding for MTCRF and MFOCRF, which uses a bit mask for the
- // register, not the register number directly.
- if ((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MFOCRF) &&
- (MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7)) {
- rv = 0x80 >> rv;
- }
- } else if (MO.isImm()) {
- rv = MO.getImm();
- } else if (MO.isGlobal() || MO.isSymbol() ||
- MO.isCPI() || MO.isJTI()) {
- unsigned Reloc = 0;
- if (MI.getOpcode() == PPC::BL_Darwin || MI.getOpcode() == PPC::BL8_Darwin ||
- MI.getOpcode() == PPC::BL_SVR4 || MI.getOpcode() == PPC::BL8_ELF ||
- MI.getOpcode() == PPC::TAILB || MI.getOpcode() == PPC::TAILB8)
- Reloc = PPC::reloc_pcrel_bx;
- else {
- if (TM.getRelocationModel() == Reloc::PIC_) {
- assert(MovePCtoLROffset && "MovePCtoLR not seen yet?");
- }
- switch (MI.getOpcode()) {
- default: MI.dump(); llvm_unreachable("Unknown instruction for relocation!");
- case PPC::LIS:
- case PPC::LIS8:
- case PPC::ADDIS:
- case PPC::ADDIS8:
- Reloc = PPC::reloc_absolute_high; // Pointer to symbol
- break;
- case PPC::LI:
- case PPC::LI8:
- case PPC::LA:
- // Loads.
- case PPC::LBZ:
- case PPC::LBZ8:
- case PPC::LHA:
- case PPC::LHA8:
- case PPC::LHZ:
- case PPC::LHZ8:
- case PPC::LWZ:
- case PPC::LWZ8:
- case PPC::LFS:
- case PPC::LFD:
-
- // Stores.
- case PPC::STB:
- case PPC::STB8:
- case PPC::STH:
- case PPC::STH8:
- case PPC::STW:
- case PPC::STW8:
- case PPC::STFS:
- case PPC::STFD:
- Reloc = PPC::reloc_absolute_low;
- break;
-
- case PPC::LWA:
- case PPC::LD:
- case PPC::STD:
- case PPC::STD_32:
- Reloc = PPC::reloc_absolute_low_ix;
- break;
- }
- }
+ if (MO.isMBB())
+ return MachineRelocation::getBB(MCE.getCurrentPCOffset(),
+ RelocID, MO.getMBB());
+
+ assert(MO.isJTI());
+ return MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(),
+ RelocID, MO.getIndex(), Cst);
+}
- MachineRelocation R;
- if (MO.isGlobal()) {
- R = MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
- const_cast<GlobalValue *>(MO.getGlobal()), 0,
- isa<Function>(MO.getGlobal()));
- } else if (MO.isSymbol()) {
- R = MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
- Reloc, MO.getSymbolName(), 0);
- } else if (MO.isCPI()) {
- R = MachineRelocation::getConstPool(MCE.getCurrentPCOffset(),
- Reloc, MO.getIndex(), 0);
- } else {
- assert(MO.isJTI());
- R = MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(),
- Reloc, MO.getIndex(), 0);
- }
+unsigned PPCCodeEmitter::getDirectBrEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ const MachineOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO);
+
+ MCE.addRelocation(GetRelocation(MO, PPC::reloc_pcrel_bx));
+ return 0;
+}
- // If in PIC mode, we need to encode the negated address of the
- // 'movepctolr' into the unrelocated field. After relocation, we'll have
- // &gv-&movepctolr-4 in the imm field. Once &movepctolr is added to the imm
- // field, we get &gv. This doesn't happen for branch relocations, which are
- // always implicitly pc relative.
- if (TM.getRelocationModel() == Reloc::PIC_ && Reloc != PPC::reloc_pcrel_bx){
- assert(MovePCtoLROffset && "MovePCtoLR not seen yet?");
- R.setConstantVal(-(intptr_t)MovePCtoLROffset - 4);
- }
- MCE.addRelocation(R);
-
- } else if (MO.isMBB()) {
- unsigned Reloc = 0;
- unsigned Opcode = MI.getOpcode();
- if (Opcode == PPC::B || Opcode == PPC::BL_Darwin ||
- Opcode == PPC::BLA_Darwin|| Opcode == PPC::BL_SVR4 ||
- Opcode == PPC::BLA_SVR4)
- Reloc = PPC::reloc_pcrel_bx;
- else // BCC instruction
- Reloc = PPC::reloc_pcrel_bcx;
-
- MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
- Reloc, MO.getMBB()));
- } else {
-#ifndef NDEBUG
- errs() << "ERROR: Unknown type of MachineOperand: " << MO << "\n";
-#endif
- llvm_unreachable(0);
- }
+unsigned PPCCodeEmitter::getCondBrEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ const MachineOperand &MO = MI.getOperand(OpNo);
+ MCE.addRelocation(GetRelocation(MO, PPC::reloc_pcrel_bcx));
+ return 0;
+}
- return rv;
+unsigned PPCCodeEmitter::getHA16Encoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ const MachineOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO);
+
+ MCE.addRelocation(GetRelocation(MO, PPC::reloc_absolute_high));
+ return 0;
+}
+
+unsigned PPCCodeEmitter::getLO16Encoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ const MachineOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO);
+
+ MCE.addRelocation(GetRelocation(MO, PPC::reloc_absolute_low));
+ return 0;
+}
+
+unsigned PPCCodeEmitter::getMemRIEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ // Encode (imm, reg) as a memri, which has the low 16-bits as the
+ // displacement and the next 5 bits as the register #.
+ assert(MI.getOperand(OpNo+1).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1)) << 16;
+
+ const MachineOperand &MO = MI.getOperand(OpNo);
+ if (MO.isImm())
+ return (getMachineOpValue(MI, MO) & 0xFFFF) | RegBits;
+
+ // Add a fixup for the displacement field.
+ MCE.addRelocation(GetRelocation(MO, PPC::reloc_absolute_low));
+ return RegBits;
+}
+
+unsigned PPCCodeEmitter::getMemRIXEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ // Encode (imm, reg) as a memrix, which has the low 14-bits as the
+ // displacement and the next 5 bits as the register #.
+ assert(MI.getOperand(OpNo+1).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1)) << 14;
+
+ const MachineOperand &MO = MI.getOperand(OpNo);
+ if (MO.isImm())
+ return (getMachineOpValue(MI, MO) & 0x3FFF) | RegBits;
+
+ MCE.addRelocation(GetRelocation(MO, PPC::reloc_absolute_low_ix));
+ return RegBits;
+}
+
+
+unsigned PPCCodeEmitter::getMachineOpValue(const MachineInstr &MI,
+ const MachineOperand &MO) const {
+
+ if (MO.isReg()) {
+ // MTCRF/MFOCRF should go through get_crbitm_encoding for the CR operand.
+ // The GPR operand should come through here though.
+ assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MFOCRF) ||
+ MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7);
+ return PPCRegisterInfo::getRegisterNumbering(MO.getReg());
+ }
+
+ assert(MO.isImm() &&
+ "Relocation required in an instruction that we cannot encode!");
+ return MO.getImm();
}
#include "PPCGenCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFixupKinds.h b/contrib/llvm/lib/Target/PowerPC/PPCFixupKinds.h
new file mode 100644
index 0000000..b3c889e
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFixupKinds.h
@@ -0,0 +1,45 @@
+//===-- PPCFixupKinds.h - PPC Specific Fixup Entries ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PPC_PPCFIXUPKINDS_H
+#define LLVM_PPC_PPCFIXUPKINDS_H
+
+#include "llvm/MC/MCFixup.h"
+
+namespace llvm {
+namespace PPC {
+enum Fixups {
+ // fixup_ppc_br24 - 24-bit PC relative relocation for direct branches like 'b'
+ // and 'bl'.
+ fixup_ppc_br24 = FirstTargetFixupKind,
+
+ /// fixup_ppc_brcond14 - 14-bit PC relative relocation for conditional
+ /// branches.
+ fixup_ppc_brcond14,
+
+ /// fixup_ppc_lo16 - A 16-bit fixup corresponding to lo16(_foo) for instrs
+ /// like 'li'.
+ fixup_ppc_lo16,
+
+ /// fixup_ppc_ha16 - A 16-bit fixup corresponding to ha16(_foo) for instrs
+ /// like 'lis'.
+ fixup_ppc_ha16,
+
+ /// fixup_ppc_lo14 - A 14-bit fixup corresponding to lo16(_foo) for instrs
+ /// like 'std'.
+ fixup_ppc_lo14,
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
new file mode 100644
index 0000000..6aca6b0
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -0,0 +1,971 @@
+//=====- PPCFrameLowering.cpp - PPC Frame Information -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PPC implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCFrameLowering.h"
+#include "PPCInstrInfo.h"
+#include "PPCMachineFunctionInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
+
+// FIXME This disables some code that aligns the stack to a boundary bigger than
+// the default (16 bytes on Darwin) when there is a stack local of greater
+// alignment. This does not currently work, because the delta between old and
+// new stack pointers is added to offsets that reference incoming parameters
+// after the prolog is generated, and the code that does that doesn't handle a
+// variable delta. You don't want to do that anyway; a better approach is to
+// reserve another register that retains to the incoming stack pointer, and
+// reference parameters relative to that.
+#define ALIGN_STACK 0
+
+
+/// VRRegNo - Map from a numbered VR register to its enum value.
+///
+static const unsigned short VRRegNo[] = {
+ PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 , PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
+ PPC::V8 , PPC::V9 , PPC::V10, PPC::V11, PPC::V12, PPC::V13, PPC::V14, PPC::V15,
+ PPC::V16, PPC::V17, PPC::V18, PPC::V19, PPC::V20, PPC::V21, PPC::V22, PPC::V23,
+ PPC::V24, PPC::V25, PPC::V26, PPC::V27, PPC::V28, PPC::V29, PPC::V30, PPC::V31
+};
+
+/// RemoveVRSaveCode - We have found that this function does not need any code
+/// to manipulate the VRSAVE register, even though it uses vector registers.
+/// This can happen when the only registers used are known to be live in or out
+/// of the function. Remove all of the VRSAVE related code from the function.
+static void RemoveVRSaveCode(MachineInstr *MI) {
+ MachineBasicBlock *Entry = MI->getParent();
+ MachineFunction *MF = Entry->getParent();
+
+ // We know that the MTVRSAVE instruction immediately follows MI. Remove it.
+ MachineBasicBlock::iterator MBBI = MI;
+ ++MBBI;
+ assert(MBBI != Entry->end() && MBBI->getOpcode() == PPC::MTVRSAVE);
+ MBBI->eraseFromParent();
+
+ bool RemovedAllMTVRSAVEs = true;
+ // See if we can find and remove the MTVRSAVE instruction from all of the
+ // epilog blocks.
+ for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I) {
+ // If last instruction is a return instruction, add an epilogue
+ if (!I->empty() && I->back().getDesc().isReturn()) {
+ bool FoundIt = false;
+ for (MBBI = I->end(); MBBI != I->begin(); ) {
+ --MBBI;
+ if (MBBI->getOpcode() == PPC::MTVRSAVE) {
+ MBBI->eraseFromParent(); // remove it.
+ FoundIt = true;
+ break;
+ }
+ }
+ RemovedAllMTVRSAVEs &= FoundIt;
+ }
+ }
+
+ // If we found and removed all MTVRSAVE instructions, remove the read of
+ // VRSAVE as well.
+ if (RemovedAllMTVRSAVEs) {
+ MBBI = MI;
+ assert(MBBI != Entry->begin() && "UPDATE_VRSAVE is first instr in block?");
+ --MBBI;
+ assert(MBBI->getOpcode() == PPC::MFVRSAVE && "VRSAVE instrs wandered?");
+ MBBI->eraseFromParent();
+ }
+
+ // Finally, nuke the UPDATE_VRSAVE.
+ MI->eraseFromParent();
+}
+
+// HandleVRSaveUpdate - MI is the UPDATE_VRSAVE instruction introduced by the
+// instruction selector. Based on the vector registers that have been used,
+// transform this into the appropriate ORI instruction.
+static void HandleVRSaveUpdate(MachineInstr *MI, const TargetInstrInfo &TII) {
+ MachineFunction *MF = MI->getParent()->getParent();
+ DebugLoc dl = MI->getDebugLoc();
+
+ unsigned UsedRegMask = 0;
+ for (unsigned i = 0; i != 32; ++i)
+ if (MF->getRegInfo().isPhysRegUsed(VRRegNo[i]))
+ UsedRegMask |= 1 << (31-i);
+
+ // Live in and live out values already must be in the mask, so don't bother
+ // marking them.
+ for (MachineRegisterInfo::livein_iterator
+ I = MF->getRegInfo().livein_begin(),
+ E = MF->getRegInfo().livein_end(); I != E; ++I) {
+ unsigned RegNo = PPCRegisterInfo::getRegisterNumbering(I->first);
+ if (VRRegNo[RegNo] == I->first) // If this really is a vector reg.
+ UsedRegMask &= ~(1 << (31-RegNo)); // Doesn't need to be marked.
+ }
+ for (MachineRegisterInfo::liveout_iterator
+ I = MF->getRegInfo().liveout_begin(),
+ E = MF->getRegInfo().liveout_end(); I != E; ++I) {
+ unsigned RegNo = PPCRegisterInfo::getRegisterNumbering(*I);
+ if (VRRegNo[RegNo] == *I) // If this really is a vector reg.
+ UsedRegMask &= ~(1 << (31-RegNo)); // Doesn't need to be marked.
+ }
+
+ // If no registers are used, turn this into a copy.
+ if (UsedRegMask == 0) {
+ // Remove all VRSAVE code.
+ RemoveVRSaveCode(MI);
+ return;
+ }
+
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ unsigned DstReg = MI->getOperand(0).getReg();
+
+ if ((UsedRegMask & 0xFFFF) == UsedRegMask) {
+ if (DstReg != SrcReg)
+ BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORI), DstReg)
+ .addReg(SrcReg)
+ .addImm(UsedRegMask);
+ else
+ BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORI), DstReg)
+ .addReg(SrcReg, RegState::Kill)
+ .addImm(UsedRegMask);
+ } else if ((UsedRegMask & 0xFFFF0000) == UsedRegMask) {
+ if (DstReg != SrcReg)
+ BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORIS), DstReg)
+ .addReg(SrcReg)
+ .addImm(UsedRegMask >> 16);
+ else
+ BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORIS), DstReg)
+ .addReg(SrcReg, RegState::Kill)
+ .addImm(UsedRegMask >> 16);
+ } else {
+ if (DstReg != SrcReg)
+ BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORIS), DstReg)
+ .addReg(SrcReg)
+ .addImm(UsedRegMask >> 16);
+ else
+ BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORIS), DstReg)
+ .addReg(SrcReg, RegState::Kill)
+ .addImm(UsedRegMask >> 16);
+
+ BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORI), DstReg)
+ .addReg(DstReg, RegState::Kill)
+ .addImm(UsedRegMask & 0xFFFF);
+ }
+
+ // Remove the old UPDATE_VRSAVE instruction.
+ MI->eraseFromParent();
+}
+
+/// determineFrameLayout - Determine the size of the frame and maximum call
+/// frame size.
+void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Get the number of bytes to allocate from the FrameInfo
+ unsigned FrameSize = MFI->getStackSize();
+
+ // Get the alignments provided by the target, and the maximum alignment
+ // (if any) of the fixed frame objects.
+ unsigned MaxAlign = MFI->getMaxAlignment();
+ unsigned TargetAlign = getStackAlignment();
+ unsigned AlignMask = TargetAlign - 1; //
+
+ // If we are a leaf function, and use up to 224 bytes of stack space,
+ // don't have a frame pointer, calls, or dynamic alloca then we do not need
+ // to adjust the stack pointer (we fit in the Red Zone).
+ bool DisableRedZone = MF.getFunction()->hasFnAttr(Attribute::NoRedZone);
+ // FIXME SVR4 The 32-bit SVR4 ABI has no red zone.
+ if (!DisableRedZone &&
+ FrameSize <= 224 && // Fits in red zone.
+ !MFI->hasVarSizedObjects() && // No dynamic alloca.
+ !MFI->adjustsStack() && // No calls.
+ (!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment.
+ // No need for frame
+ MFI->setStackSize(0);
+ return;
+ }
+
+ // Get the maximum call frame size of all the calls.
+ unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
+
+ // Maximum call frame needs to be at least big enough for linkage and 8 args.
+ unsigned minCallFrameSize = getMinCallFrameSize(Subtarget.isPPC64(),
+ Subtarget.isDarwinABI());
+ maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize);
+
+ // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
+ // that allocations will be aligned.
+ if (MFI->hasVarSizedObjects())
+ maxCallFrameSize = (maxCallFrameSize + AlignMask) & ~AlignMask;
+
+ // Update maximum call frame size.
+ MFI->setMaxCallFrameSize(maxCallFrameSize);
+
+ // Include call frame size in total.
+ FrameSize += maxCallFrameSize;
+
+ // Make sure the frame is aligned.
+ FrameSize = (FrameSize + AlignMask) & ~AlignMask;
+
+ // Update frame info.
+ MFI->setStackSize(FrameSize);
+}
+
+// hasFP - Return true if the specified function actually has a dedicated frame
+// pointer register.
+bool PPCFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // FIXME: This is pretty much broken by design: hasFP() might be called really
+ // early, before the stack layout was calculated and thus hasFP() might return
+ // true or false here depending on the time of call.
+ return (MFI->getStackSize()) && needsFP(MF);
+}
+
+// needsFP - Return true if the specified function should have a dedicated frame
+// pointer register. This is true if the function has variable sized allocas or
+// if frame pointer elimination is disabled.
+bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Naked functions have no stack frame pushed, so we don't have a frame
+ // pointer.
+ if (MF.getFunction()->hasFnAttr(Attribute::Naked))
+ return false;
+
+ return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects() ||
+ (GuaranteedTailCallOpt && MF.getInfo<PPCFunctionInfo>()->hasFastCall());
+}
+
+
+void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const PPCInstrInfo &TII =
+ *static_cast<const PPCInstrInfo*>(MF.getTarget().getInstrInfo());
+
+ MachineModuleInfo &MMI = MF.getMMI();
+ DebugLoc dl;
+ bool needsFrameMoves = MMI.hasDebugInfo() ||
+ !MF.getFunction()->doesNotThrow() ||
+ UnwindTablesMandatory;
+
+ // Prepare for frame info.
+ MCSymbol *FrameLabel = 0;
+
+ // Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it,
+ // process it.
+ for (unsigned i = 0; MBBI != MBB.end(); ++i, ++MBBI) {
+ if (MBBI->getOpcode() == PPC::UPDATE_VRSAVE) {
+ HandleVRSaveUpdate(MBBI, TII);
+ break;
+ }
+ }
+
+ // Move MBBI back to the beginning of the function.
+ MBBI = MBB.begin();
+
+ // Work out frame sizes.
+ // FIXME: determineFrameLayout() may change the frame size. This should be
+ // moved upper, to some hook.
+ determineFrameLayout(MF);
+ unsigned FrameSize = MFI->getStackSize();
+
+ int NegFrameSize = -FrameSize;
+
+ // Get processor type.
+ bool isPPC64 = Subtarget.isPPC64();
+ // Get operating system
+ bool isDarwinABI = Subtarget.isDarwinABI();
+ // Check if the link register (LR) must be saved.
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ bool MustSaveLR = FI->mustSaveLR();
+ // Do we have a frame pointer for this function?
+ bool HasFP = hasFP(MF);
+
+ int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
+
+ int FPOffset = 0;
+ if (HasFP) {
+ if (Subtarget.isSVR4ABI()) {
+ MachineFrameInfo *FFI = MF.getFrameInfo();
+ int FPIndex = FI->getFramePointerSaveIndex();
+ assert(FPIndex && "No Frame Pointer Save Slot!");
+ FPOffset = FFI->getObjectOffset(FPIndex);
+ } else {
+ FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ }
+ }
+
+ if (isPPC64) {
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR8), PPC::X0);
+
+ if (HasFP)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STD))
+ .addReg(PPC::X31)
+ .addImm(FPOffset/4)
+ .addReg(PPC::X1);
+
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STD))
+ .addReg(PPC::X0)
+ .addImm(LROffset / 4)
+ .addReg(PPC::X1);
+ } else {
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR), PPC::R0);
+
+ if (HasFP)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STW))
+ .addReg(PPC::R31)
+ .addImm(FPOffset)
+ .addReg(PPC::R1);
+
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STW))
+ .addReg(PPC::R0)
+ .addImm(LROffset)
+ .addReg(PPC::R1);
+ }
+
+ // Skip if a leaf routine.
+ if (!FrameSize) return;
+
+ // Get stack alignments.
+ unsigned TargetAlign = getStackAlignment();
+ unsigned MaxAlign = MFI->getMaxAlignment();
+
+ // Adjust stack pointer: r1 += NegFrameSize.
+ // If there is a preferred stack alignment, align R1 now
+ if (!isPPC64) {
+ // PPC32.
+ if (ALIGN_STACK && MaxAlign > TargetAlign) {
+ assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
+ "Invalid alignment!");
+ assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
+
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), PPC::R0)
+ .addReg(PPC::R1)
+ .addImm(0)
+ .addImm(32 - Log2_32(MaxAlign))
+ .addImm(31);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC) ,PPC::R0)
+ .addReg(PPC::R0, RegState::Kill)
+ .addImm(NegFrameSize);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX))
+ .addReg(PPC::R1)
+ .addReg(PPC::R1)
+ .addReg(PPC::R0);
+ } else if (isInt<16>(NegFrameSize)) {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STWU), PPC::R1)
+ .addReg(PPC::R1)
+ .addImm(NegFrameSize)
+ .addReg(PPC::R1);
+ } else {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS), PPC::R0)
+ .addImm(NegFrameSize >> 16);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI), PPC::R0)
+ .addReg(PPC::R0, RegState::Kill)
+ .addImm(NegFrameSize & 0xFFFF);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX))
+ .addReg(PPC::R1)
+ .addReg(PPC::R1)
+ .addReg(PPC::R0);
+ }
+ } else { // PPC64.
+ if (ALIGN_STACK && MaxAlign > TargetAlign) {
+ assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
+ "Invalid alignment!");
+ assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
+
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), PPC::X0)
+ .addReg(PPC::X1)
+ .addImm(0)
+ .addImm(64 - Log2_32(MaxAlign));
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC8), PPC::X0)
+ .addReg(PPC::X0)
+ .addImm(NegFrameSize);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX))
+ .addReg(PPC::X1)
+ .addReg(PPC::X1)
+ .addReg(PPC::X0);
+ } else if (isInt<16>(NegFrameSize)) {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STDU), PPC::X1)
+ .addReg(PPC::X1)
+ .addImm(NegFrameSize / 4)
+ .addReg(PPC::X1);
+ } else {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS8), PPC::X0)
+ .addImm(NegFrameSize >> 16);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI8), PPC::X0)
+ .addReg(PPC::X0, RegState::Kill)
+ .addImm(NegFrameSize & 0xFFFF);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX))
+ .addReg(PPC::X1)
+ .addReg(PPC::X1)
+ .addReg(PPC::X0);
+ }
+ }
+
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+
+ // Add the "machine moves" for the instructions we generated above, but in
+ // reverse order.
+ if (needsFrameMoves) {
+ // Mark effective beginning of when frame pointer becomes valid.
+ FrameLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(FrameLabel);
+
+ // Show update of SP.
+ if (NegFrameSize) {
+ MachineLocation SPDst(MachineLocation::VirtualFP);
+ MachineLocation SPSrc(MachineLocation::VirtualFP, NegFrameSize);
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ } else {
+ MachineLocation SP(isPPC64 ? PPC::X31 : PPC::R31);
+ Moves.push_back(MachineMove(FrameLabel, SP, SP));
+ }
+
+ if (HasFP) {
+ MachineLocation FPDst(MachineLocation::VirtualFP, FPOffset);
+ MachineLocation FPSrc(isPPC64 ? PPC::X31 : PPC::R31);
+ Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
+ }
+
+ if (MustSaveLR) {
+ MachineLocation LRDst(MachineLocation::VirtualFP, LROffset);
+ MachineLocation LRSrc(isPPC64 ? PPC::LR8 : PPC::LR);
+ Moves.push_back(MachineMove(FrameLabel, LRDst, LRSrc));
+ }
+ }
+
+ MCSymbol *ReadyLabel = 0;
+
+ // If there is a frame pointer, copy R1 into R31
+ if (HasFP) {
+ if (!isPPC64) {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::OR), PPC::R31)
+ .addReg(PPC::R1)
+ .addReg(PPC::R1);
+ } else {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::OR8), PPC::X31)
+ .addReg(PPC::X1)
+ .addReg(PPC::X1);
+ }
+
+ if (needsFrameMoves) {
+ ReadyLabel = MMI.getContext().CreateTempSymbol();
+
+ // Mark effective beginning of when frame pointer is ready.
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(ReadyLabel);
+
+ MachineLocation FPDst(HasFP ? (isPPC64 ? PPC::X31 : PPC::R31) :
+ (isPPC64 ? PPC::X1 : PPC::R1));
+ MachineLocation FPSrc(MachineLocation::VirtualFP);
+ Moves.push_back(MachineMove(ReadyLabel, FPDst, FPSrc));
+ }
+ }
+
+ if (needsFrameMoves) {
+ MCSymbol *Label = HasFP ? ReadyLabel : FrameLabel;
+
+ // Add callee saved registers to move list.
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
+ int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
+ unsigned Reg = CSI[I].getReg();
+ if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
+ MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
+ MachineLocation CSSrc(Reg);
+ Moves.push_back(MachineMove(Label, CSDst, CSSrc));
+ }
+ }
+}
+
+void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI != MBB.end() && "Returning block has no terminator");
+ const PPCInstrInfo &TII =
+ *static_cast<const PPCInstrInfo*>(MF.getTarget().getInstrInfo());
+
+ unsigned RetOpcode = MBBI->getOpcode();
+ DebugLoc dl;
+
+ assert((RetOpcode == PPC::BLR ||
+ RetOpcode == PPC::TCRETURNri ||
+ RetOpcode == PPC::TCRETURNdi ||
+ RetOpcode == PPC::TCRETURNai ||
+ RetOpcode == PPC::TCRETURNri8 ||
+ RetOpcode == PPC::TCRETURNdi8 ||
+ RetOpcode == PPC::TCRETURNai8) &&
+ "Can only insert epilog into returning blocks");
+
+ // Get alignment info so we know how to restore r1
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ unsigned TargetAlign = getStackAlignment();
+ unsigned MaxAlign = MFI->getMaxAlignment();
+
+ // Get the number of bytes allocated from the FrameInfo.
+ int FrameSize = MFI->getStackSize();
+
+ // Get processor type.
+ bool isPPC64 = Subtarget.isPPC64();
+ // Get operating system
+ bool isDarwinABI = Subtarget.isDarwinABI();
+ // Check if the link register (LR) has been saved.
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ bool MustSaveLR = FI->mustSaveLR();
+ // Do we have a frame pointer for this function?
+ bool HasFP = hasFP(MF);
+
+ int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
+
+ int FPOffset = 0;
+ if (HasFP) {
+ if (Subtarget.isSVR4ABI()) {
+ MachineFrameInfo *FFI = MF.getFrameInfo();
+ int FPIndex = FI->getFramePointerSaveIndex();
+ assert(FPIndex && "No Frame Pointer Save Slot!");
+ FPOffset = FFI->getObjectOffset(FPIndex);
+ } else {
+ FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ }
+ }
+
+ bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
+ RetOpcode == PPC::TCRETURNdi ||
+ RetOpcode == PPC::TCRETURNai ||
+ RetOpcode == PPC::TCRETURNri8 ||
+ RetOpcode == PPC::TCRETURNdi8 ||
+ RetOpcode == PPC::TCRETURNai8;
+
+ if (UsesTCRet) {
+ int MaxTCRetDelta = FI->getTailCallSPDelta();
+ MachineOperand &StackAdjust = MBBI->getOperand(1);
+ assert(StackAdjust.isImm() && "Expecting immediate value.");
+ // Adjust stack pointer.
+ int StackAdj = StackAdjust.getImm();
+ int Delta = StackAdj - MaxTCRetDelta;
+ assert((Delta >= 0) && "Delta must be positive");
+ if (MaxTCRetDelta>0)
+ FrameSize += (StackAdj +Delta);
+ else
+ FrameSize += StackAdj;
+ }
+
+ if (FrameSize) {
+ // The loaded (or persistent) stack pointer value is offset by the 'stwu'
+ // on entry to the function. Add this offset back now.
+ if (!isPPC64) {
+ // If this function contained a fastcc call and GuaranteedTailCallOpt is
+ // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
+ // call which invalidates the stack pointer value in SP(0). So we use the
+ // value of R31 in this case.
+ if (FI->hasFastCall() && isInt<16>(FrameSize)) {
+ assert(hasFP(MF) && "Expecting a valid the frame pointer.");
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
+ .addReg(PPC::R31).addImm(FrameSize);
+ } else if(FI->hasFastCall()) {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS), PPC::R0)
+ .addImm(FrameSize >> 16);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI), PPC::R0)
+ .addReg(PPC::R0, RegState::Kill)
+ .addImm(FrameSize & 0xFFFF);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ADD4))
+ .addReg(PPC::R1)
+ .addReg(PPC::R31)
+ .addReg(PPC::R0);
+ } else if (isInt<16>(FrameSize) &&
+ (!ALIGN_STACK || TargetAlign >= MaxAlign) &&
+ !MFI->hasVarSizedObjects()) {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
+ .addReg(PPC::R1).addImm(FrameSize);
+ } else {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ),PPC::R1)
+ .addImm(0).addReg(PPC::R1);
+ }
+ } else {
+ if (FI->hasFastCall() && isInt<16>(FrameSize)) {
+ assert(hasFP(MF) && "Expecting a valid the frame pointer.");
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
+ .addReg(PPC::X31).addImm(FrameSize);
+ } else if(FI->hasFastCall()) {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS8), PPC::X0)
+ .addImm(FrameSize >> 16);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI8), PPC::X0)
+ .addReg(PPC::X0, RegState::Kill)
+ .addImm(FrameSize & 0xFFFF);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ADD8))
+ .addReg(PPC::X1)
+ .addReg(PPC::X31)
+ .addReg(PPC::X0);
+ } else if (isInt<16>(FrameSize) && TargetAlign >= MaxAlign &&
+ !MFI->hasVarSizedObjects()) {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
+ .addReg(PPC::X1).addImm(FrameSize);
+ } else {
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X1)
+ .addImm(0).addReg(PPC::X1);
+ }
+ }
+ }
+
+ if (isPPC64) {
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X0)
+ .addImm(LROffset/4).addReg(PPC::X1);
+
+ if (HasFP)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X31)
+ .addImm(FPOffset/4).addReg(PPC::X1);
+
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::MTLR8)).addReg(PPC::X0);
+ } else {
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R0)
+ .addImm(LROffset).addReg(PPC::R1);
+
+ if (HasFP)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R31)
+ .addImm(FPOffset).addReg(PPC::R1);
+
+ if (MustSaveLR)
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::MTLR)).addReg(PPC::R0);
+ }
+
+ // Callee pop calling convention. Pop parameter/linkage area. Used for tail
+ // call optimization
+ if (GuaranteedTailCallOpt && RetOpcode == PPC::BLR &&
+ MF.getFunction()->getCallingConv() == CallingConv::Fast) {
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ unsigned CallerAllocatedAmt = FI->getMinReservedArea();
+ unsigned StackReg = isPPC64 ? PPC::X1 : PPC::R1;
+ unsigned FPReg = isPPC64 ? PPC::X31 : PPC::R31;
+ unsigned TmpReg = isPPC64 ? PPC::X0 : PPC::R0;
+ unsigned ADDIInstr = isPPC64 ? PPC::ADDI8 : PPC::ADDI;
+ unsigned ADDInstr = isPPC64 ? PPC::ADD8 : PPC::ADD4;
+ unsigned LISInstr = isPPC64 ? PPC::LIS8 : PPC::LIS;
+ unsigned ORIInstr = isPPC64 ? PPC::ORI8 : PPC::ORI;
+
+ if (CallerAllocatedAmt && isInt<16>(CallerAllocatedAmt)) {
+ BuildMI(MBB, MBBI, dl, TII.get(ADDIInstr), StackReg)
+ .addReg(StackReg).addImm(CallerAllocatedAmt);
+ } else {
+ BuildMI(MBB, MBBI, dl, TII.get(LISInstr), TmpReg)
+ .addImm(CallerAllocatedAmt >> 16);
+ BuildMI(MBB, MBBI, dl, TII.get(ORIInstr), TmpReg)
+ .addReg(TmpReg, RegState::Kill)
+ .addImm(CallerAllocatedAmt & 0xFFFF);
+ BuildMI(MBB, MBBI, dl, TII.get(ADDInstr))
+ .addReg(StackReg)
+ .addReg(FPReg)
+ .addReg(TmpReg);
+ }
+ } else if (RetOpcode == PPC::TCRETURNdi) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
+ addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
+ } else if (RetOpcode == PPC::TCRETURNri) {
+ MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR));
+ } else if (RetOpcode == PPC::TCRETURNai) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
+ } else if (RetOpcode == PPC::TCRETURNdi8) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
+ addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
+ } else if (RetOpcode == PPC::TCRETURNri8) {
+ MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR8));
+ } else if (RetOpcode == PPC::TCRETURNai8) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
+ }
+}
+
+void PPCFrameLowering::getInitialFrameState(std::vector<MachineMove> &Moves) const {
+ // Initial state of the frame pointer is R1.
+ MachineLocation Dst(MachineLocation::VirtualFP);
+ MachineLocation Src(PPC::R1, 0);
+ Moves.push_back(MachineMove(0, Dst, Src));
+}
+
+static bool spillsCR(const MachineFunction &MF) {
+ const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ return FuncInfo->isCRSpilled();
+}
+
+/// MustSaveLR - Return true if this function requires that we save the LR
+/// register onto the stack in the prolog and restore it in the epilog of the
+/// function.
+static bool MustSaveLR(const MachineFunction &MF, unsigned LR) {
+ const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>();
+
+ // We need a save/restore of LR if there is any def of LR (which is
+ // defined by calls, including the PIC setup sequence), or if there is
+ // some use of the LR stack slot (e.g. for builtin_return_address).
+ // (LR comes in 32 and 64 bit versions.)
+ MachineRegisterInfo::def_iterator RI = MF.getRegInfo().def_begin(LR);
+ return RI !=MF.getRegInfo().def_end() || MFI->isLRStoreRequired();
+}
+
+void
+PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
+
+ // Save and clear the LR state.
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ unsigned LR = RegInfo->getRARegister();
+ FI->setMustSaveLR(MustSaveLR(MF, LR));
+ MF.getRegInfo().setPhysRegUnused(LR);
+
+ // Save R31 if necessary
+ int FPSI = FI->getFramePointerSaveIndex();
+ bool isPPC64 = Subtarget.isPPC64();
+ bool isDarwinABI = Subtarget.isDarwinABI();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // If the frame pointer save index hasn't been defined yet.
+ if (!FPSI && needsFP(MF)) {
+ // Find out what the fix offset of the frame pointer save area.
+ int FPOffset = getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ // Allocate the frame index for frame pointer save area.
+ FPSI = MFI->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
+ // Save the result.
+ FI->setFramePointerSaveIndex(FPSI);
+ }
+
+ // Reserve stack space to move the linkage area to in case of a tail call.
+ int TCSPDelta = 0;
+ if (GuaranteedTailCallOpt && (TCSPDelta = FI->getTailCallSPDelta()) < 0) {
+ MFI->CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true);
+ }
+
+ // Reserve a slot closest to SP or frame pointer if we have a dynalloc or
+ // a large stack, which will require scavenging a register to materialize a
+ // large offset.
+ // FIXME: this doesn't actually check stack size, so is a bit pessimistic
+ // FIXME: doesn't detect whether or not we need to spill vXX, which requires
+ // r0 for now.
+
+ if (RegInfo->requiresRegisterScavenging(MF)) // FIXME (64-bit): Enable.
+ if (needsFP(MF) || spillsCR(MF)) {
+ const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
+ const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
+ const TargetRegisterClass *RC = isPPC64 ? G8RC : GPRC;
+ RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
+ RC->getAlignment(),
+ false));
+ }
+}
+
+void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
+ const {
+ // Early exit if not using the SVR4 ABI.
+ if (!Subtarget.isSVR4ABI())
+ return;
+
+ // Get callee saved register information.
+ MachineFrameInfo *FFI = MF.getFrameInfo();
+ const std::vector<CalleeSavedInfo> &CSI = FFI->getCalleeSavedInfo();
+
+ // Early exit if no callee saved registers are modified!
+ if (CSI.empty() && !needsFP(MF)) {
+ return;
+ }
+
+ unsigned MinGPR = PPC::R31;
+ unsigned MinG8R = PPC::X31;
+ unsigned MinFPR = PPC::F31;
+ unsigned MinVR = PPC::V31;
+
+ bool HasGPSaveArea = false;
+ bool HasG8SaveArea = false;
+ bool HasFPSaveArea = false;
+ bool HasCRSaveArea = false;
+ bool HasVRSAVESaveArea = false;
+ bool HasVRSaveArea = false;
+
+ SmallVector<CalleeSavedInfo, 18> GPRegs;
+ SmallVector<CalleeSavedInfo, 18> G8Regs;
+ SmallVector<CalleeSavedInfo, 18> FPRegs;
+ SmallVector<CalleeSavedInfo, 18> VRegs;
+
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (PPC::GPRCRegisterClass->contains(Reg)) {
+ HasGPSaveArea = true;
+
+ GPRegs.push_back(CSI[i]);
+
+ if (Reg < MinGPR) {
+ MinGPR = Reg;
+ }
+ } else if (PPC::G8RCRegisterClass->contains(Reg)) {
+ HasG8SaveArea = true;
+
+ G8Regs.push_back(CSI[i]);
+
+ if (Reg < MinG8R) {
+ MinG8R = Reg;
+ }
+ } else if (PPC::F8RCRegisterClass->contains(Reg)) {
+ HasFPSaveArea = true;
+
+ FPRegs.push_back(CSI[i]);
+
+ if (Reg < MinFPR) {
+ MinFPR = Reg;
+ }
+// FIXME SVR4: Disable CR save area for now.
+ } else if (PPC::CRBITRCRegisterClass->contains(Reg)
+ || PPC::CRRCRegisterClass->contains(Reg)) {
+// HasCRSaveArea = true;
+ } else if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
+ HasVRSAVESaveArea = true;
+ } else if (PPC::VRRCRegisterClass->contains(Reg)) {
+ HasVRSaveArea = true;
+
+ VRegs.push_back(CSI[i]);
+
+ if (Reg < MinVR) {
+ MinVR = Reg;
+ }
+ } else {
+ llvm_unreachable("Unknown RegisterClass!");
+ }
+ }
+
+ PPCFunctionInfo *PFI = MF.getInfo<PPCFunctionInfo>();
+
+ int64_t LowerBound = 0;
+
+ // Take into account stack space reserved for tail calls.
+ int TCSPDelta = 0;
+ if (GuaranteedTailCallOpt && (TCSPDelta = PFI->getTailCallSPDelta()) < 0) {
+ LowerBound = TCSPDelta;
+ }
+
+ // The Floating-point register save area is right below the back chain word
+ // of the previous stack frame.
+ if (HasFPSaveArea) {
+ for (unsigned i = 0, e = FPRegs.size(); i != e; ++i) {
+ int FI = FPRegs[i].getFrameIdx();
+
+ FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
+ }
+
+ LowerBound -= (31 - PPCRegisterInfo::getRegisterNumbering(MinFPR) + 1) * 8;
+ }
+
+ // Check whether the frame pointer register is allocated. If so, make sure it
+ // is spilled to the correct offset.
+ if (needsFP(MF)) {
+ HasGPSaveArea = true;
+
+ int FI = PFI->getFramePointerSaveIndex();
+ assert(FI && "No Frame Pointer Save Slot!");
+
+ FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
+ }
+
+ // General register save area starts right below the Floating-point
+ // register save area.
+ if (HasGPSaveArea || HasG8SaveArea) {
+ // Move general register save area spill slots down, taking into account
+ // the size of the Floating-point register save area.
+ for (unsigned i = 0, e = GPRegs.size(); i != e; ++i) {
+ int FI = GPRegs[i].getFrameIdx();
+
+ FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
+ }
+
+ // Move general register save area spill slots down, taking into account
+ // the size of the Floating-point register save area.
+ for (unsigned i = 0, e = G8Regs.size(); i != e; ++i) {
+ int FI = G8Regs[i].getFrameIdx();
+
+ FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
+ }
+
+ unsigned MinReg =
+ std::min<unsigned>(PPCRegisterInfo::getRegisterNumbering(MinGPR),
+ PPCRegisterInfo::getRegisterNumbering(MinG8R));
+
+ if (Subtarget.isPPC64()) {
+ LowerBound -= (31 - MinReg + 1) * 8;
+ } else {
+ LowerBound -= (31 - MinReg + 1) * 4;
+ }
+ }
+
+ // The CR save area is below the general register save area.
+ if (HasCRSaveArea) {
+ // FIXME SVR4: Is it actually possible to have multiple elements in CSI
+ // which have the CR/CRBIT register class?
+ // Adjust the frame index of the CR spill slot.
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+
+ if (PPC::CRBITRCRegisterClass->contains(Reg) ||
+ PPC::CRRCRegisterClass->contains(Reg)) {
+ int FI = CSI[i].getFrameIdx();
+
+ FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
+ }
+ }
+
+ LowerBound -= 4; // The CR save area is always 4 bytes long.
+ }
+
+ if (HasVRSAVESaveArea) {
+ // FIXME SVR4: Is it actually possible to have multiple elements in CSI
+ // which have the VRSAVE register class?
+ // Adjust the frame index of the VRSAVE spill slot.
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+
+ if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
+ int FI = CSI[i].getFrameIdx();
+
+ FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
+ }
+ }
+
+ LowerBound -= 4; // The VRSAVE save area is always 4 bytes long.
+ }
+
+ if (HasVRSaveArea) {
+ // Insert alignment padding, we need 16-byte alignment.
+ LowerBound = (LowerBound - 15) & ~(15);
+
+ for (unsigned i = 0, e = VRegs.size(); i != e; ++i) {
+ int FI = VRegs[i].getFrameIdx();
+
+ FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
+ }
+ }
+}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
index 7587b03..0c18de1 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFrameInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
@@ -1,4 +1,4 @@
-//===-- PPCFrameInfo.h - Define TargetFrameInfo for PowerPC -----*- C++ -*-===//
+//==-- PPCFrameLowering.h - Define frame lowering for PowerPC ----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -15,20 +15,42 @@
#include "PPC.h"
#include "PPCSubtarget.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/STLExtras.h"
namespace llvm {
+ class PPCSubtarget;
-class PPCFrameInfo: public TargetFrameInfo {
- const TargetMachine &TM;
+class PPCFrameLowering: public TargetFrameLowering {
+ const PPCSubtarget &Subtarget;
public:
- PPCFrameInfo(const TargetMachine &tm, bool LP64)
- : TargetFrameInfo(TargetFrameInfo::StackGrowsDown, 16, 0), TM(tm) {
+ PPCFrameLowering(const PPCSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 16, 0),
+ Subtarget(sti) {
}
+ void determineFrameLayout(MachineFunction &MF) const;
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+ bool needsFP(const MachineFunction &MF) const;
+ void getInitialFrameState(std::vector<MachineMove> &Moves) const;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS = NULL) const;
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
+
+ /// targetHandlesStackFrameRounding - Returns true if the target is
+ /// responsible for rounding up the stack frame (probably at emitPrologue
+ /// time).
+ bool targetHandlesStackFrameRounding() const { return true; }
+
/// getReturnSaveOffset - Return the previous frame offset to save the
/// return address.
static unsigned getReturnSaveOffset(bool isPPC64, bool isDarwinABI) {
@@ -48,17 +70,17 @@ public:
// around that does use it, and that needs to continue to work.
if (isDarwinABI)
return isPPC64 ? -8U : -4U;
-
+
// SVR4 ABI: First slot in the general register save area.
return isPPC64 ? -8U : -4U;
}
-
+
/// getLinkageSize - Return the size of the PowerPC ABI linkage area.
///
static unsigned getLinkageSize(bool isPPC64, bool isDarwinABI) {
if (isDarwinABI || isPPC64)
return 6 * (isPPC64 ? 8 : 4);
-
+
// SVR4 ABI:
return 8;
}
@@ -74,7 +96,7 @@ public:
// least enough stack space for the caller to store the 8 GPRs.
if (isDarwinABI || isPPC64)
return 8 * (isPPC64 ? 8 : 4);
-
+
// 32-bit SVR4 ABI:
// There is no default stack allocated for the 8 first GPR arguments.
return 0;
@@ -91,9 +113,9 @@ public:
// With the SVR4 ABI, callee-saved registers have fixed offsets on the stack.
const SpillSlot *
getCalleeSavedSpillSlots(unsigned &NumEntries) const {
- if (TM.getSubtarget<PPCSubtarget>().isDarwinABI()) {
+ if (Subtarget.isDarwinABI()) {
NumEntries = 1;
- if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
+ if (Subtarget.isPPC64()) {
static const SpillSlot darwin64Offsets = {PPC::X31, -8};
return &darwin64Offsets;
} else {
@@ -103,7 +125,7 @@ public:
}
// Early exit if not using the SVR4 ABI.
- if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI()) {
+ if (!Subtarget.isSVR4ABI()) {
NumEntries = 0;
return 0;
}
@@ -283,7 +305,7 @@ public:
{PPC::V20, -192}
};
- if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
+ if (Subtarget.isPPC64()) {
NumEntries = array_lengthof(Offsets64);
return Offsets64;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index db11fde..0de5844 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
//
// This models the dispatch group formation of the PPC970 processor. Dispatch
// groups are bundles of up to five instructions that can contain various mixes
-// of instructions. The PPC970 can dispatch a peak of 4 non-branch and one
+// of instructions. The PPC970 can dispatch a peak of 4 non-branch and one
// branch instruction per-cycle.
//
// There are a number of restrictions to dispatch group formation: some
@@ -55,14 +55,14 @@ PPCHazardRecognizer970::PPCHazardRecognizer970(const TargetInstrInfo &tii)
void PPCHazardRecognizer970::EndDispatchGroup() {
DEBUG(errs() << "=== Start of dispatch group\n");
NumIssued = 0;
-
+
// Structural hazard info.
HasCTRSet = false;
NumStores = 0;
}
-PPCII::PPC970_Unit
+PPCII::PPC970_Unit
PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
bool &isFirst, bool &isSingle,
bool &isCracked,
@@ -72,14 +72,14 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
return PPCII::PPC970_Pseudo;
}
Opcode = ~Opcode;
-
+
const TargetInstrDesc &TID = TII.get(Opcode);
-
+
isLoad = TID.mayLoad();
isStore = TID.mayStore();
-
+
uint64_t TSFlags = TID.TSFlags;
-
+
isFirst = TSFlags & PPCII::PPC970_First;
isSingle = TSFlags & PPCII::PPC970_Single;
isCracked = TSFlags & PPCII::PPC970_Cracked;
@@ -96,7 +96,7 @@ isLoadOfStoredAddress(unsigned LoadSize, SDValue Ptr1, SDValue Ptr2) const {
return true;
if (Ptr2 == StorePtr1[i] && Ptr1 == StorePtr2[i])
return true;
-
+
// Okay, we don't have an exact match, if this is an indexed offset, see if
// we have overlap (which happens during fp->int conversion for example).
if (StorePtr2[i] == Ptr2) {
@@ -122,26 +122,28 @@ isLoadOfStoredAddress(unsigned LoadSize, SDValue Ptr1, SDValue Ptr2) const {
/// instructions that wouldn't terminate the dispatch group that would cause a
/// pipeline flush.
ScheduleHazardRecognizer::HazardType PPCHazardRecognizer970::
-getHazardType(SUnit *SU) {
- const SDNode *Node = SU->getNode()->getFlaggedMachineNode();
+getHazardType(SUnit *SU, int Stalls) {
+ assert(Stalls == 0 && "PPC hazards don't support scoreboard lookahead");
+
+ const SDNode *Node = SU->getNode()->getGluedMachineNode();
bool isFirst, isSingle, isCracked, isLoad, isStore;
- PPCII::PPC970_Unit InstrType =
+ PPCII::PPC970_Unit InstrType =
GetInstrType(Node->getOpcode(), isFirst, isSingle, isCracked,
isLoad, isStore);
- if (InstrType == PPCII::PPC970_Pseudo) return NoHazard;
+ if (InstrType == PPCII::PPC970_Pseudo) return NoHazard;
unsigned Opcode = Node->getMachineOpcode();
// We can only issue a PPC970_First/PPC970_Single instruction (such as
// crand/mtspr/etc) if this is the first cycle of the dispatch group.
if (NumIssued != 0 && (isFirst || isSingle))
return Hazard;
-
+
// If this instruction is cracked into two ops by the decoder, we know that
// it is not a branch and that it cannot issue if 3 other instructions are
// already in the dispatch group.
if (isCracked && NumIssued > 2)
return Hazard;
-
+
switch (InstrType) {
default: llvm_unreachable("Unknown instruction type!");
case PPCII::PPC970_FXU:
@@ -159,11 +161,11 @@ getHazardType(SUnit *SU) {
case PPCII::PPC970_BRU:
break;
}
-
+
// Do not allow MTCTR and BCTRL to be in the same dispatch group.
if (HasCTRSet && (Opcode == PPC::BCTRL_Darwin || Opcode == PPC::BCTRL_SVR4))
return NoopHazard;
-
+
// If this is a load following a store, make sure it's not to the same or
// overlapping address.
if (isLoad && NumStores) {
@@ -212,27 +214,27 @@ getHazardType(SUnit *SU) {
LoadSize = 16;
break;
}
-
- if (isLoadOfStoredAddress(LoadSize,
+
+ if (isLoadOfStoredAddress(LoadSize,
Node->getOperand(0), Node->getOperand(1)))
return NoopHazard;
}
-
+
return NoHazard;
}
void PPCHazardRecognizer970::EmitInstruction(SUnit *SU) {
- const SDNode *Node = SU->getNode()->getFlaggedMachineNode();
+ const SDNode *Node = SU->getNode()->getGluedMachineNode();
bool isFirst, isSingle, isCracked, isLoad, isStore;
- PPCII::PPC970_Unit InstrType =
+ PPCII::PPC970_Unit InstrType =
GetInstrType(Node->getOpcode(), isFirst, isSingle, isCracked,
isLoad, isStore);
- if (InstrType == PPCII::PPC970_Pseudo) return;
+ if (InstrType == PPCII::PPC970_Pseudo) return;
unsigned Opcode = Node->getMachineOpcode();
// Update structural hazard information.
if (Opcode == PPC::MTCTR) HasCTRSet = true;
-
+
// Track the address stored to.
if (isStore) {
unsigned ThisStoreSize;
@@ -278,22 +280,22 @@ void PPCHazardRecognizer970::EmitInstruction(SUnit *SU) {
ThisStoreSize = 16;
break;
}
-
+
StoreSize[NumStores] = ThisStoreSize;
StorePtr1[NumStores] = Node->getOperand(1);
StorePtr2[NumStores] = Node->getOperand(2);
++NumStores;
}
-
+
if (InstrType == PPCII::PPC970_BRU || isSingle)
NumIssued = 4; // Terminate a d-group.
++NumIssued;
-
+
// If this instruction is cracked into two ops by the decoder, remember that
// we issued two pieces.
if (isCracked)
++NumIssued;
-
+
if (NumIssued == 5)
EndDispatchGroup();
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h
index 74bf8e5..2f81f0f 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h
@@ -19,7 +19,7 @@
#include "PPCInstrInfo.h"
namespace llvm {
-
+
/// PPCHazardRecognizer970 - This class defines a finite state automata that
/// models the dispatch logic on the PowerPC 970 (aka G5) processor. This
/// promotes good dispatch group formation and implements noop insertion to
@@ -28,14 +28,14 @@ namespace llvm {
/// or storing then loading from the same address within a dispatch group.
class PPCHazardRecognizer970 : public ScheduleHazardRecognizer {
const TargetInstrInfo &TII;
-
+
unsigned NumIssued; // Number of insts issued, including advanced cycles.
-
+
// Various things that can cause a structural hazard.
-
+
// HasCTRSet - If the CTR register is set in this group, disallow BCTRL.
bool HasCTRSet;
-
+
// StoredPtr - Keep track of the address of any store. If we see a load from
// the same address (or one that aliases it), disallow the store. We can have
// up to four stores in one dispatch group, hence we track up to 4.
@@ -45,24 +45,24 @@ class PPCHazardRecognizer970 : public ScheduleHazardRecognizer {
SDValue StorePtr1[4], StorePtr2[4];
unsigned StoreSize[4];
unsigned NumStores;
-
+
public:
PPCHazardRecognizer970(const TargetInstrInfo &TII);
- virtual HazardType getHazardType(SUnit *SU);
+ virtual HazardType getHazardType(SUnit *SU, int Stalls);
virtual void EmitInstruction(SUnit *SU);
virtual void AdvanceCycle();
-
+
private:
/// EndDispatchGroup - Called when we are finishing a new dispatch group.
///
void EndDispatchGroup();
-
+
/// GetInstrType - Classify the specified powerpc opcode according to its
/// pipeline.
PPCII::PPC970_Unit GetInstrType(unsigned Opcode,
bool &isFirst, bool &isSingle,bool &isCracked,
bool &isLoad, bool &isStore);
-
+
bool isLoadOfStoredAddress(unsigned LoadSize,
SDValue Ptr1, SDValue Ptr2) const;
};
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 00eebb8..faae9b2 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -16,7 +16,6 @@
#include "PPC.h"
#include "PPCPredicates.h"
#include "PPCTargetMachine.h"
-#include "PPCHazardRecognizers.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
@@ -49,16 +48,16 @@ namespace {
: SelectionDAGISel(tm), TM(tm),
PPCLowering(*TM.getTargetLowering()),
PPCSubTarget(*TM.getSubtargetImpl()) {}
-
+
virtual bool runOnMachineFunction(MachineFunction &MF) {
// Make sure we re-emit a set of the global base reg if necessary
GlobalBaseReg = 0;
SelectionDAGISel::runOnMachineFunction(MF);
-
+
InsertVRSaveCode(MF);
return true;
}
-
+
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
inline SDValue getI32Imm(unsigned Imm) {
@@ -70,13 +69,13 @@ namespace {
inline SDValue getI64Imm(uint64_t Imm) {
return CurDAG->getTargetConstant(Imm, MVT::i64);
}
-
+
/// getSmallIPtrImm - Return a target constant of pointer type.
inline SDValue getSmallIPtrImm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, PPCLowering.getPointerTy());
}
-
- /// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s
+
+ /// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s
/// with any number of 0s on either side. The 1s are allowed to wrap from
/// LSB to MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs.
/// 0x0F0F0000 is not, since all 1s are not contiguous.
@@ -87,15 +86,15 @@ namespace {
/// rotate and mask opcode and mask operation.
static bool isRotateAndMask(SDNode *N, unsigned Mask, bool isShiftMask,
unsigned &SH, unsigned &MB, unsigned &ME);
-
+
/// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
/// base register. Return the virtual register that holds this value.
SDNode *getGlobalBaseReg();
-
+
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
SDNode *Select(SDNode *N);
-
+
SDNode *SelectBitfieldInsert(SDNode *N);
/// SelectCC - Select a comparison of the specified values with the
@@ -104,42 +103,39 @@ namespace {
/// SelectAddrImm - Returns true if the address N can be represented by
/// a base register plus a signed 16-bit displacement [r+imm].
- bool SelectAddrImm(SDNode *Op, SDValue N, SDValue &Disp,
+ bool SelectAddrImm(SDValue N, SDValue &Disp,
SDValue &Base) {
return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG);
}
-
+
/// SelectAddrImmOffs - Return true if the operand is valid for a preinc
/// immediate field. Because preinc imms have already been validated, just
/// accept it.
- bool SelectAddrImmOffs(SDNode *Op, SDValue N, SDValue &Out) const {
+ bool SelectAddrImmOffs(SDValue N, SDValue &Out) const {
Out = N;
return true;
}
-
+
/// SelectAddrIdx - Given the specified addressed, check to see if it can be
/// represented as an indexed [r+r] operation. Returns false if it can
/// be represented by [r+imm], which are preferred.
- bool SelectAddrIdx(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Index) {
+ bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) {
return PPCLowering.SelectAddressRegReg(N, Base, Index, *CurDAG);
}
-
+
/// SelectAddrIdxOnly - Given the specified addressed, force it to be
/// represented as an indexed [r+r] operation.
- bool SelectAddrIdxOnly(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Index) {
+ bool SelectAddrIdxOnly(SDValue N, SDValue &Base, SDValue &Index) {
return PPCLowering.SelectAddressRegRegOnly(N, Base, Index, *CurDAG);
}
/// SelectAddrImmShift - Returns true if the address N can be represented by
/// a base register plus a signed 14-bit displacement [r+imm*4]. Suitable
/// for use by STD and friends.
- bool SelectAddrImmShift(SDNode *Op, SDValue N, SDValue &Disp,
- SDValue &Base) {
+ bool SelectAddrImmShift(SDValue N, SDValue &Disp, SDValue &Base) {
return PPCLowering.SelectAddressRegImmShift(N, Disp, Base, *CurDAG);
}
-
+
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions. It is always correct to compute the value into
/// a register. The case of adding a (possibly relocatable) constant to a
@@ -151,29 +147,16 @@ namespace {
OutOps.push_back(Op);
return false;
}
-
- SDValue BuildSDIVSequence(SDNode *N);
- SDValue BuildUDIVSequence(SDNode *N);
-
+
void InsertVRSaveCode(MachineFunction &MF);
virtual const char *getPassName() const {
return "PowerPC DAG->DAG Pattern Instruction Selection";
- }
-
- /// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
- /// this target when scheduling the DAG.
- virtual ScheduleHazardRecognizer *CreateTargetHazardRecognizer() {
- // Should use subtarget info to pick the right hazard recognizer. For
- // now, always return a PPC970 recognizer.
- const TargetInstrInfo *II = TM.getInstrInfo();
- assert(II && "No InstrInfo?");
- return new PPCHazardRecognizer970(*II);
}
// Include the pieces autogenerated from the target description.
#include "PPCGenDAGISel.inc"
-
+
private:
SDNode *SelectSETCC(SDNode *N);
};
@@ -184,19 +167,20 @@ private:
/// check to see if we need to save/restore VRSAVE. If so, do it.
void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
// Check to see if this function uses vector registers, which means we have to
- // save and restore the VRSAVE register and update it with the regs we use.
+ // save and restore the VRSAVE register and update it with the regs we use.
//
// In this case, there will be virtual registers of vector type created
// by the scheduler. Detect them now.
bool HasVectorVReg = false;
- for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
- e = RegInfo->getLastVirtReg()+1; i != e; ++i)
- if (RegInfo->getRegClass(i) == &PPC::VRRCRegClass) {
+ for (unsigned i = 0, e = RegInfo->getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (RegInfo->getRegClass(Reg) == &PPC::VRRCRegClass) {
HasVectorVReg = true;
break;
}
+ }
if (!HasVectorVReg) return; // nothing to do.
-
+
// If we have a vector register, we want to emit code into the entry and exit
// blocks to save and restore the VRSAVE register. We do this here (instead
// of marking all vector instructions as clobbering VRSAVE) for two reasons:
@@ -211,7 +195,7 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
// function and one for the value after having bits or'd into it.
unsigned InVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
unsigned UpdatedVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
-
+
const TargetInstrInfo &TII = *TM.getInstrInfo();
MachineBasicBlock &EntryBB = *Fn.begin();
DebugLoc dl;
@@ -224,21 +208,21 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
BuildMI(EntryBB, IP, dl, TII.get(PPC::UPDATE_VRSAVE),
UpdatedVRSAVE).addReg(InVRSAVE);
BuildMI(EntryBB, IP, dl, TII.get(PPC::MTVRSAVE)).addReg(UpdatedVRSAVE);
-
+
// Find all return blocks, outputting a restore in each epilog.
for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
if (!BB->empty() && BB->back().getDesc().isReturn()) {
IP = BB->end(); --IP;
-
+
// Skip over all terminator instructions, which are part of the return
// sequence.
MachineBasicBlock::iterator I2 = IP;
while (I2 != BB->begin() && (--I2)->getDesc().isTerminator())
IP = I2;
-
+
// Emit: MTVRSAVE InVRSave
BuildMI(*BB, IP, dl, TII.get(PPC::MTVRSAVE)).addReg(InVRSAVE);
- }
+ }
}
}
@@ -344,8 +328,8 @@ bool PPCDAGToDAGISel::isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME) {
return false;
}
-bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
- bool isShiftMask, unsigned &SH,
+bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
+ bool isShiftMask, unsigned &SH,
unsigned &MB, unsigned &ME) {
// Don't even go down this path for i64, since different logic will be
// necessary for rldicl/rldicr/rldimi.
@@ -358,13 +342,13 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
if (N->getNumOperands() != 2 ||
!isInt32Immediate(N->getOperand(1).getNode(), Shift) || (Shift > 31))
return false;
-
+
if (Opcode == ISD::SHL) {
// apply shift left to mask if it comes first
if (isShiftMask) Mask = Mask << Shift;
// determine which bits are made indeterminant by shift
Indeterminant = ~(0xFFFFFFFFu << Shift);
- } else if (Opcode == ISD::SRL) {
+ } else if (Opcode == ISD::SRL) {
// apply shift right to mask if it comes first
if (isShiftMask) Mask = Mask >> Shift;
// determine which bits are made indeterminant by shift
@@ -376,7 +360,7 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
} else {
return false;
}
-
+
// if the mask doesn't intersect any Indeterminant bits
if (Mask && !(Mask & Indeterminant)) {
SH = Shift & 31;
@@ -392,14 +376,14 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
DebugLoc dl = N->getDebugLoc();
-
+
APInt LKZ, LKO, RKZ, RKO;
CurDAG->ComputeMaskedBits(Op0, APInt::getAllOnesValue(32), LKZ, LKO);
CurDAG->ComputeMaskedBits(Op1, APInt::getAllOnesValue(32), RKZ, RKO);
-
+
unsigned TargetMask = LKZ.getZExtValue();
unsigned InsertMask = RKZ.getZExtValue();
-
+
if ((TargetMask | InsertMask) == 0xFFFFFFFF) {
unsigned Op0Opc = Op0.getOpcode();
unsigned Op1Opc = Op1.getOpcode();
@@ -427,7 +411,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
std::swap(TargetMask, InsertMask);
}
}
-
+
unsigned MB, ME;
if (InsertMask && isRunOfOnes(InsertMask, MB, ME)) {
SDValue Tmp1, Tmp2;
@@ -463,7 +447,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
ISD::CondCode CC, DebugLoc dl) {
// Always select the LHS.
unsigned Opc;
-
+
if (LHS.getValueType() == MVT::i32) {
unsigned Imm;
if (CC == ISD::SETEQ || CC == ISD::SETNE) {
@@ -476,11 +460,11 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
if (isInt<16>((int)Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
-
+
// For non-equality comparisons, the default code would materialize the
// constant, then compare against it, like this:
// lis r2, 4660
- // ori r2, r2, 22136
+ // ori r2, r2, 22136
// cmpw cr0, r3, r2
// Since we are just comparing for equality, we can emit this instead:
// xoris r0,r3,0x1234
@@ -517,11 +501,11 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
if (isInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
-
+
// For non-equality comparisons, the default code would materialize the
// constant, then compare against it, like this:
// lis r2, 4660
- // ori r2, r2, 22136
+ // ori r2, r2, 22136
// cmpd cr0, r3, r2
// Since we are just comparing for equality, we can emit this instead:
// xoris r0,r3,0x1234
@@ -610,9 +594,9 @@ static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert, int &Other) {
case ISD::SETUNE:
case ISD::SETNE: Invert = true; return 2; // !Bit #2 = SETUNE
case ISD::SETO: Invert = true; return 3; // !Bit #3 = SETO
- case ISD::SETUEQ:
- case ISD::SETOGE:
- case ISD::SETOLE:
+ case ISD::SETUEQ:
+ case ISD::SETOGE:
+ case ISD::SETOLE:
case ISD::SETONE:
llvm_unreachable("Invalid branch code: should be expanded by legalize");
// These are invalid for floating point. Assume integer.
@@ -641,9 +625,9 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
}
case ISD::SETNE: {
SDValue AD =
- SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
+ SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
Op, getI32Imm(~0U)), 0);
- return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op,
+ return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op,
AD.getValue(1));
}
case ISD::SETLT: {
@@ -663,16 +647,16 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
switch (CC) {
default: break;
case ISD::SETEQ:
- Op = SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
+ Op = SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
Op, getI32Imm(1)), 0);
- return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
- SDValue(CurDAG->getMachineNode(PPC::LI, dl,
+ return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
+ SDValue(CurDAG->getMachineNode(PPC::LI, dl,
MVT::i32,
getI32Imm(0)), 0),
Op.getValue(1));
case ISD::SETNE: {
Op = SDValue(CurDAG->getMachineNode(PPC::NOR, dl, MVT::i32, Op, Op), 0);
- SDNode *AD = CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
+ SDNode *AD = CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
Op, getI32Imm(~0U));
return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0),
Op, SDValue(AD, 1));
@@ -687,35 +671,35 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
}
case ISD::SETGT: {
SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
- Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops, 4),
+ Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops, 4),
0);
- return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op,
+ return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op,
getI32Imm(1));
}
}
}
}
-
+
bool Inv;
int OtherCondIdx;
unsigned Idx = getCRIdxForSetCC(CC, Inv, OtherCondIdx);
SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC, dl);
SDValue IntCR;
-
+
// Force the ccreg into CR7.
SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32);
-
+
SDValue InFlag(0, 0); // Null incoming flag value.
- CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
+ CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
InFlag).getValue(1);
-
+
if (PPCSubTarget.isGigaProcessor() && OtherCondIdx == -1)
IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
CCReg), 0);
else
IntCR = SDValue(CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32,
CR7Reg, CCReg), 0);
-
+
SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31),
getI32Imm(31), getI32Imm(31) };
if (OtherCondIdx == -1 && !Inv)
@@ -734,7 +718,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
// Get the other bit of the comparison.
Ops[1] = getI32Imm((32-(3-OtherCondIdx)) & 31);
- SDValue OtherCond =
+ SDValue OtherCond =
SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), 0);
return CurDAG->SelectNodeTo(N, PPC::OR, MVT::i32, Tmp, OtherCond);
@@ -750,7 +734,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
default: break;
-
+
case ISD::Constant: {
if (N->getValueType(0) == MVT::i64) {
// Get 64 bit value.
@@ -759,12 +743,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
unsigned Remainder = 0;
// Assume no shift required.
unsigned Shift = 0;
-
+
// If it can't be represented as a 32 bit value.
if (!isInt<32>(Imm)) {
Shift = CountTrailingZeros_64(Imm);
int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
-
+
// If the shifted value fits 32 bits.
if (isInt<32>(ImmSh)) {
// Go with the shifted value.
@@ -776,14 +760,14 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
Imm >>= 32;
}
}
-
+
// Intermediate operand.
SDNode *Result;
// Handle first 32 bits.
unsigned Lo = Imm & 0xFFFF;
unsigned Hi = (Imm >> 16) & 0xFFFF;
-
+
// Simple value.
if (isInt<16>(Imm)) {
// Just the Lo bits.
@@ -799,7 +783,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// Just the Hi bits.
Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, getI32Imm(Hi));
}
-
+
// If no shift, we're done.
if (!Shift) return Result;
@@ -815,22 +799,22 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if ((Hi = (Remainder >> 16) & 0xFFFF)) {
Result = CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64,
SDValue(Result, 0), getI32Imm(Hi));
- }
+ }
if ((Lo = Remainder & 0xFFFF)) {
Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64,
SDValue(Result, 0), getI32Imm(Lo));
}
-
+
return Result;
}
break;
}
-
+
case ISD::SETCC:
return SelectSETCC(N);
case PPCISD::GlobalBaseReg:
return getGlobalBaseReg();
-
+
case ISD::FrameIndex: {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, N->getValueType(0));
@@ -852,11 +836,11 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32,
N->getOperand(0), InFlag);
}
-
+
case ISD::SDIV: {
// FIXME: since this depends on the setting of the carry flag from the srawi
// we should really be making notes about that for the scheduler.
- // FIXME: It sure would be nice if we could cheaply recognize the
+ // FIXME: It sure would be nice if we could cheaply recognize the
// srl/add/sra pattern the dag combiner will generate for this as
// sra/addze rather than having to handle sdiv ourselves. oh well.
unsigned Imm;
@@ -864,13 +848,13 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue N0 = N->getOperand(0);
if ((signed)Imm > 0 && isPowerOf2_32(Imm)) {
SDNode *Op =
- CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Flag,
+ CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue,
N0, getI32Imm(Log2_32(Imm)));
- return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
+ return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
SDValue(Op, 0), SDValue(Op, 1));
} else if ((signed)Imm < 0 && isPowerOf2_32(-Imm)) {
SDNode *Op =
- CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Flag,
+ CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue,
N0, getI32Imm(Log2_32(-Imm)));
SDValue PT =
SDValue(CurDAG->getMachineNode(PPC::ADDZE, dl, MVT::i32,
@@ -879,24 +863,24 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->SelectNodeTo(N, PPC::NEG, MVT::i32, PT);
}
}
-
+
// Other cases are autogenerated.
break;
}
-
+
case ISD::LOAD: {
// Handle preincrement loads.
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT LoadedVT = LD->getMemoryVT();
-
+
// Normal loads are handled by code generated from the .td file.
if (LD->getAddressingMode() != ISD::PRE_INC)
break;
-
+
SDValue Offset = LD->getOffset();
if (isa<ConstantSDNode>(Offset) ||
Offset.getOpcode() == ISD::TargetGlobalAddress) {
-
+
unsigned Opcode;
bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD;
if (LD->getValueType(0) != MVT::i64) {
@@ -923,7 +907,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
case MVT::i8: Opcode = PPC::LBZU8; break;
}
}
-
+
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[] = { Offset, Base, Chain };
@@ -935,7 +919,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
llvm_unreachable("R+R preindex loads not supported yet!");
}
}
-
+
case ISD::AND: {
unsigned Imm, Imm2, SH, MB, ME;
@@ -950,7 +934,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// If this is just a masked value where the input is not handled above, and
// is not a rotate-left (handled by a pattern in the .td file), emit rlwinm
if (isInt32Immediate(N->getOperand(1), Imm) &&
- isRunOfOnes(Imm, MB, ME) &&
+ isRunOfOnes(Imm, MB, ME) &&
N->getOperand(0).getOpcode() != ISD::ROTL) {
SDValue Val = N->getOperand(0);
SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) };
@@ -963,7 +947,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
}
// ISD::OR doesn't get all the bitfield insertion fun.
// (and (or x, c1), c2) where isRunOfOnes(~(c1^c2)) is a bitfield insert
- if (isInt32Immediate(N->getOperand(1), Imm) &&
+ if (isInt32Immediate(N->getOperand(1), Imm) &&
N->getOperand(0).getOpcode() == ISD::OR &&
isInt32Immediate(N->getOperand(0).getOperand(1), Imm2)) {
unsigned MB, ME;
@@ -975,7 +959,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5);
}
}
-
+
// Other cases are autogenerated.
break;
}
@@ -983,7 +967,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if (N->getValueType(0) == MVT::i32)
if (SDNode *I = SelectBitfieldInsert(N))
return I;
-
+
// Other cases are autogenerated.
break;
case ISD::SHL: {
@@ -994,25 +978,25 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
-
+
// Other cases are autogenerated.
break;
}
case ISD::SRL: {
unsigned Imm, SH, MB, ME;
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) &&
- isRotateAndMask(N, Imm, true, SH, MB, ME)) {
+ isRotateAndMask(N, Imm, true, SH, MB, ME)) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
-
+
// Other cases are autogenerated.
break;
}
case ISD::SELECT_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
-
+
// Handle the setcc cases here. select_cc lhs, 0, 1, 0, cc
if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N->getOperand(2)))
@@ -1022,7 +1006,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// FIXME: Implement this optzn for PPC64.
N->getValueType(0) == MVT::i32) {
SDNode *Tmp =
- CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
+ CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
N->getOperand(0), getI32Imm(~0U));
return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32,
SDValue(Tmp, 0), N->getOperand(0),
@@ -1064,7 +1048,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
case ISD::BR_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl);
- SDValue Ops[] = { getI32Imm(getPredicateForSetCC(CC)), CondCode,
+ SDValue Ops[] = { getI32Imm(getPredicateForSetCC(CC)), CondCode,
N->getOperand(4), N->getOperand(0) };
return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 4);
}
@@ -1078,13 +1062,13 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->SelectNodeTo(N, PPC::BCTR, MVT::Other, Chain);
}
}
-
+
return SelectCode(N);
}
-/// createPPCISelDag - This pass converts a legalized DAG into a
+/// createPPCISelDag - This pass converts a legalized DAG into a
/// PowerPC-specific DAG, ready for instruction scheduling.
///
FunctionPass *llvm::createPPCISelDag(PPCTargetMachine &TM) {
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 14d1b15..8f623b8 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -38,17 +38,17 @@
#include "llvm/DerivedTypes.h"
using namespace llvm;
-static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
+static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State);
-static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT,
- EVT &LocVT,
+static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State);
-static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT,
- EVT &LocVT,
+static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State);
@@ -73,6 +73,10 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(true);
+ // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
+ // arguments are at least 4/8 bytes aligned.
+ setMinStackArgumentAlignment(TM.getSubtarget<PPCSubtarget>().isPPC64() ? 8:4);
+
// Set up the register classes.
addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
@@ -174,10 +178,10 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
- setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
- setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
- setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
- setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
+ setOperationAction(ISD::BITCAST, MVT::f32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::i32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::i64, Expand);
+ setOperationAction(ISD::BITCAST, MVT::f64, Expand);
// We cannot sextinreg(i1). Expand to shifts.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@@ -545,7 +549,7 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
-bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
+bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
bool isUnary) {
if (!isUnary)
return isVMerge(N, UnitSize, 8, 24);
@@ -554,7 +558,7 @@ bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
-bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
+bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
bool isUnary) {
if (!isUnary)
return isVMerge(N, UnitSize, 0, 16);
@@ -569,7 +573,7 @@ int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
"PPC only supports shuffles by bytes!");
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
-
+
// Find the first non-undef value in the shuffle mask.
unsigned i;
for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
@@ -607,7 +611,7 @@ bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
// This is a splat operation if each element of the permute is the same, and
// if the value doesn't reference the second vector.
unsigned ElementBase = N->getMaskElt(0);
-
+
// FIXME: Handle UNDEF elements too!
if (ElementBase >= 16)
return false;
@@ -635,7 +639,7 @@ bool PPC::isAllNegativeZeroVector(SDNode *N) {
APInt APVal, APUndef;
unsigned BitSize;
bool HasAnyUndefs;
-
+
if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true))
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
return CFP->getValueAPF().isNegZero();
@@ -1054,7 +1058,6 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
VT = LD->getMemoryVT();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ST = ST;
Ptr = ST->getBasePtr();
VT = ST->getMemoryVT();
} else
@@ -1094,158 +1097,126 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
// LowerOperation implementation
//===----------------------------------------------------------------------===//
-SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
- SelectionDAG &DAG) const {
- EVT PtrVT = Op.getValueType();
- ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
- const Constant *C = CP->getConstVal();
- SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
- SDValue Zero = DAG.getConstant(0, PtrVT);
- // FIXME there isn't really any debug info here
- DebugLoc dl = Op.getDebugLoc();
-
- const TargetMachine &TM = DAG.getTarget();
-
- SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, CPI, Zero);
- SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, CPI, Zero);
-
- // If this is a non-darwin platform, we don't support non-static relo models
- // yet.
- if (TM.getRelocationModel() == Reloc::Static ||
- !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
- // Generate non-pic code that has direct accesses to the constant pool.
- // The address of the global is just (hi(&g)+lo(&g)).
- return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
+/// GetLabelAccessInfo - Return true if we should reference labels using a
+/// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags.
+static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
+ unsigned &LoOpFlags, const GlobalValue *GV = 0) {
+ HiOpFlags = PPCII::MO_HA16;
+ LoOpFlags = PPCII::MO_LO16;
+
+ // Don't use the pic base if not in PIC relocation model. Or if we are on a
+ // non-darwin platform. We don't support PIC on other platforms yet.
+ bool isPIC = TM.getRelocationModel() == Reloc::PIC_ &&
+ TM.getSubtarget<PPCSubtarget>().isDarwin();
+ if (isPIC) {
+ HiOpFlags |= PPCII::MO_PIC_FLAG;
+ LoOpFlags |= PPCII::MO_PIC_FLAG;
}
- if (TM.getRelocationModel() == Reloc::PIC_) {
- // With PIC, the first instruction is actually "GR+hi(&G)".
- Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
- DAG.getNode(PPCISD::GlobalBaseReg,
- DebugLoc(), PtrVT), Hi);
+ // If this is a reference to a global value that requires a non-lazy-ptr, make
+ // sure that instruction lowering adds it.
+ if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) {
+ HiOpFlags |= PPCII::MO_NLP_FLAG;
+ LoOpFlags |= PPCII::MO_NLP_FLAG;
+
+ if (GV->hasHiddenVisibility()) {
+ HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
+ LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
+ }
}
- Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
- return Lo;
+ return isPIC;
}
-SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
- EVT PtrVT = Op.getValueType();
- JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
+static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
+ SelectionDAG &DAG) {
+ EVT PtrVT = HiPart.getValueType();
SDValue Zero = DAG.getConstant(0, PtrVT);
- // FIXME there isn't really any debug loc here
- DebugLoc dl = Op.getDebugLoc();
+ DebugLoc DL = HiPart.getDebugLoc();
- const TargetMachine &TM = DAG.getTarget();
+ SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
+ SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
- SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, JTI, Zero);
- SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, JTI, Zero);
+ // With PIC, the first instruction is actually "GR+hi(&G)".
+ if (isPIC)
+ Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
+ DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
- // If this is a non-darwin platform, we don't support non-static relo models
- // yet.
- if (TM.getRelocationModel() == Reloc::Static ||
- !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
- // Generate non-pic code that has direct accesses to the constant pool.
- // The address of the global is just (hi(&g)+lo(&g)).
- return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
- }
+ // Generate non-pic code that has direct accesses to the constant pool.
+ // The address of the global is just (hi(&g)+lo(&g)).
+ return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
+}
- if (TM.getRelocationModel() == Reloc::PIC_) {
- // With PIC, the first instruction is actually "GR+hi(&G)".
- Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
- DAG.getNode(PPCISD::GlobalBaseReg,
- DebugLoc(), PtrVT), Hi);
- }
+SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT PtrVT = Op.getValueType();
+ ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
+ const Constant *C = CP->getConstVal();
- Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
- return Lo;
+ unsigned MOHiFlag, MOLoFlag;
+ bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
+ SDValue CPIHi =
+ DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
+ SDValue CPILo =
+ DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
+ return LowerLabelRef(CPIHi, CPILo, isPIC, DAG);
}
-SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
- SelectionDAG &DAG) const {
- llvm_unreachable("TLS not implemented for PPC.");
- return SDValue(); // Not reached
+SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
+ EVT PtrVT = Op.getValueType();
+ JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+
+ unsigned MOHiFlag, MOLoFlag;
+ bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
+ SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
+ SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
+ return LowerLabelRef(JTIHi, JTILo, isPIC, DAG);
}
SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
- SDValue TgtBA = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true);
- SDValue Zero = DAG.getConstant(0, PtrVT);
- SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, TgtBA, Zero);
- SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, TgtBA, Zero);
-
- // If this is a non-darwin platform, we don't support non-static relo models
- // yet.
- const TargetMachine &TM = DAG.getTarget();
- if (TM.getRelocationModel() == Reloc::Static ||
- !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
- // Generate non-pic code that has direct accesses to globals.
- // The address of the global is just (hi(&g)+lo(&g)).
- return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
- }
- if (TM.getRelocationModel() == Reloc::PIC_) {
- // With PIC, the first instruction is actually "GR+hi(&G)".
- Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
- DAG.getNode(PPCISD::GlobalBaseReg,
- DebugLoc(), PtrVT), Hi);
- }
-
- return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
+ unsigned MOHiFlag, MOLoFlag;
+ bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
+ SDValue TgtBAHi = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOHiFlag);
+ SDValue TgtBALo = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOLoFlag);
+ return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
}
SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
- // FIXME there isn't really any debug info here
- DebugLoc dl = GSDN->getDebugLoc();
+ DebugLoc DL = GSDN->getDebugLoc();
const GlobalValue *GV = GSDN->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GSDN->getOffset());
- SDValue Zero = DAG.getConstant(0, PtrVT);
-
- const TargetMachine &TM = DAG.getTarget();
// 64-bit SVR4 ABI code is always position-independent.
// The actual address of the GlobalValue is stored in the TOC.
if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
- return DAG.getNode(PPCISD::TOC_ENTRY, dl, MVT::i64, GA,
+ SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
+ return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA,
DAG.getRegister(PPC::X2, MVT::i64));
}
- SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, GA, Zero);
- SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, GA, Zero);
-
- // If this is a non-darwin platform, we don't support non-static relo models
- // yet.
- if (TM.getRelocationModel() == Reloc::Static ||
- !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
- // Generate non-pic code that has direct accesses to globals.
- // The address of the global is just (hi(&g)+lo(&g)).
- return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
- }
-
- if (TM.getRelocationModel() == Reloc::PIC_) {
- // With PIC, the first instruction is actually "GR+hi(&G)".
- Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
- DAG.getNode(PPCISD::GlobalBaseReg,
- DebugLoc(), PtrVT), Hi);
- }
+ unsigned MOHiFlag, MOLoFlag;
+ bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV);
- Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
+ SDValue GAHi =
+ DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
+ SDValue GALo =
+ DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
- if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM))
- return Lo;
+ SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG);
- // If the global is weak or external, we have to go through the lazy
- // resolution stub.
- return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Lo, NULL, 0,
- false, false, 0);
+ // If the global reference is actually to a non-lazy-pointer, we have to do an
+ // extra load to get the address of the global.
+ if (MOHiFlag & PPCII::MO_NLP_FLAG)
+ Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(),
+ false, false, 0);
+ return Ptr;
}
SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
@@ -1353,7 +1324,8 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0,
+ return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
+ MachinePointerInfo(SV),
false, false, 0);
}
@@ -1406,43 +1378,47 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
// Store first byte : number of int regs
SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR,
- Op.getOperand(1), SV, 0, MVT::i8,
- false, false, 0);
+ Op.getOperand(1),
+ MachinePointerInfo(SV),
+ MVT::i8, false, false, 0);
uint64_t nextOffset = FPROffset;
SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
ConstFPROffset);
// Store second byte : number of float regs
SDValue secondStore =
- DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset, MVT::i8,
+ DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
+ MachinePointerInfo(SV, nextOffset), MVT::i8,
false, false, 0);
nextOffset += StackOffset;
nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
// Store second word : arguments given on stack
SDValue thirdStore =
- DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, SV, nextOffset,
+ DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
+ MachinePointerInfo(SV, nextOffset),
false, false, 0);
nextOffset += FrameOffset;
nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
// Store third word : arguments given in registers
- return DAG.getStore(thirdStore, dl, FR, nextPtr, SV, nextOffset,
+ return DAG.getStore(thirdStore, dl, FR, nextPtr,
+ MachinePointerInfo(SV, nextOffset),
false, false, 0);
}
#include "PPCGenCallingConv.inc"
-static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
+static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State) {
return true;
}
-static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT,
- EVT &LocVT,
+static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State) {
@@ -1451,7 +1427,7 @@ static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
const unsigned NumArgRegs = array_lengthof(ArgRegs);
-
+
unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
// Skip one register if the first unallocated register has an even register
@@ -1461,15 +1437,15 @@ static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT,
if (RegNum != NumArgRegs && RegNum % 2 == 1) {
State.AllocateReg(ArgRegs[RegNum]);
}
-
+
// Always return false here, as this function only makes sure that the first
// unallocated register has an odd register number and does not actually
// allocate a register for the current argument.
return false;
}
-static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT,
- EVT &LocVT,
+static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State) {
@@ -1479,7 +1455,7 @@ static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT,
};
const unsigned NumArgRegs = array_lengthof(ArgRegs);
-
+
unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
// If there is only one Floating-point register left we need to put both f64
@@ -1487,7 +1463,7 @@ static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT,
if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
State.AllocateReg(ArgRegs[RegNum]);
}
-
+
// Always return false here, as this function only makes sure that the two f64
// values a ppc_fp128 value is split into are both passed in registers or both
// passed on the stack and does not actually allocate a register for the
@@ -1572,7 +1548,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// Specifications:
// System V Application Binary Interface PowerPC Processor Supplement
// AltiVec Technology Programming Interface Manual
-
+
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
@@ -1588,18 +1564,18 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
*DAG.getContext());
// Reserve space for the linkage area on the stack.
- CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize);
+ CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4);
-
+
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
-
+
// Arguments stored in registers.
if (VA.isRegLoc()) {
TargetRegisterClass *RC;
EVT ValVT = VA.getValVT();
-
+
switch (ValVT.getSimpleVT().SimpleTy) {
default:
llvm_unreachable("ValVT not supported by formal arguments Lowering");
@@ -1619,9 +1595,9 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
RC = PPC::VRRCRegisterClass;
break;
}
-
+
// Transform the arguments stored in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC, dl);
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT);
InVals.push_back(ArgValue);
@@ -1635,7 +1611,8 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0,
+ InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
+ MachinePointerInfo(),
false, false, 0));
}
}
@@ -1654,7 +1631,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// Area that is at least reserved in the caller of this function.
unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
-
+
// Set the size that is at least reserved in caller of this function. Tail
// call optimized function's reserved stack space needs to be aligned so that
// taking the difference between two stack areas will result in an aligned
@@ -1663,17 +1640,17 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
MinReservedArea =
std::max(MinReservedArea,
- PPCFrameInfo::getMinCallFrameSize(false, false));
-
- unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
+ PPCFrameLowering::getMinCallFrameSize(false, false));
+
+ unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
getStackAlignment();
unsigned AlignMask = TargetAlign-1;
MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
-
+
FI->setMinReservedArea(MinReservedArea);
SmallVector<SDValue, 8> MemOps;
-
+
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) {
@@ -1705,28 +1682,18 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false));
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
- // The fixed integer arguments of a variadic function are
- // stored to the VarArgsFrameIndex on the stack.
- unsigned GPRIndex = 0;
- for (; GPRIndex != FuncInfo->getVarArgsNumGPR(); ++GPRIndex) {
- SDValue Val = DAG.getRegister(GPArgRegs[GPRIndex], PtrVT);
- SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0,
- false, false, 0);
- MemOps.push_back(Store);
- // Increment the address by four for the next argument to store
- SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
- FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
- }
-
- // If this function is vararg, store any remaining integer argument regs
- // to their spots on the stack so that they may be loaded by deferencing the
- // result of va_next.
- for (; GPRIndex != NumGPArgRegs; ++GPRIndex) {
- unsigned VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
+ // The fixed integer arguments of a variadic function are stored to the
+ // VarArgsFrameIndex on the stack so that they may be loaded by deferencing
+ // the result of va_next.
+ for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
+ // Get an existing live-in vreg, or add a new one.
+ unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
+ if (!VReg)
+ VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass, dl);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0,
- false, false, 0);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(), false, false, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
@@ -1735,27 +1702,17 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
// is set.
-
// The double arguments are stored to the VarArgsFrameIndex
// on the stack.
- unsigned FPRIndex = 0;
- for (FPRIndex = 0; FPRIndex != FuncInfo->getVarArgsNumFPR(); ++FPRIndex) {
- SDValue Val = DAG.getRegister(FPArgRegs[FPRIndex], MVT::f64);
- SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0,
- false, false, 0);
- MemOps.push_back(Store);
- // Increment the address by eight for the next argument to store
- SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8,
- PtrVT);
- FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
- }
-
- for (; FPRIndex != NumFPArgRegs; ++FPRIndex) {
- unsigned VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
+ for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
+ // Get an existing live-in vreg, or add a new one.
+ unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
+ if (!VReg)
+ VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass, dl);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0,
- false, false, 0);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(), false, false, 0);
MemOps.push_back(Store);
// Increment the address by eight for the next argument to store
SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8,
@@ -1791,7 +1748,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
bool isImmutable = !(GuaranteedTailCallOpt && (CallConv==CallingConv::Fast));
unsigned PtrByteSize = isPPC64 ? 8 : 4;
- unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, true);
+ unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
// Area that is at least reserved in caller of this function.
unsigned MinReservedArea = ArgOffset;
@@ -1915,18 +1872,18 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
InVals.push_back(FIN);
if (ObjSize==1 || ObjSize==2) {
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass, dl);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
- NULL, 0,
+ MachinePointerInfo(),
ObjSize==1 ? MVT::i8 : MVT::i16,
false, false, 0);
MemOps.push_back(Store);
++GPR_idx;
}
-
+
ArgOffset += PtrByteSize;
-
+
continue;
}
for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
@@ -1934,11 +1891,12 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// to memory. ArgVal will be address of the beginning of
// the object.
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass, dl);
int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0,
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(),
false, false, 0);
MemOps.push_back(Store);
++GPR_idx;
@@ -1956,7 +1914,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
case MVT::i32:
if (!isPPC64) {
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass, dl);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
++GPR_idx;
} else {
@@ -1970,7 +1928,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// FALLTHROUGH
case MVT::i64: // PPC64
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass, dl);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::i32) {
@@ -2008,9 +1966,9 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
unsigned VReg;
if (ObjectVT == MVT::f32)
- VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
+ VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass, dl);
else
- VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
+ VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass, dl);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++FPR_idx;
@@ -2028,7 +1986,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// Note that vector arguments in registers don't reserve stack space,
// except in varargs functions.
if (VR_idx != Num_VR_Regs) {
- unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
+ unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass, dl);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
if (isVarArg) {
while ((ArgOffset % 16) != 0) {
@@ -2063,7 +2021,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
CurArgOffset + (ArgSize - ObjSize),
isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0,
+ ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
false, false, 0);
}
@@ -2082,8 +2040,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
}
MinReservedArea =
std::max(MinReservedArea,
- PPCFrameInfo::getMinCallFrameSize(isPPC64, true));
- unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
+ PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
+ unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
getStackAlignment();
unsigned AlignMask = TargetAlign-1;
MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
@@ -2104,15 +2062,15 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// result of va_next.
for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
unsigned VReg;
-
+
if (isPPC64)
- VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass, dl);
else
- VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
+ VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass, dl);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0,
- false, false, 0);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(), false, false, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
@@ -2141,7 +2099,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
// Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. We start with 24/48 bytes, which is
// prereserved space for [SP][CR][LR][3 x unused].
- unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, true);
+ unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true);
unsigned NumOps = Outs.size();
unsigned PtrByteSize = isPPC64 ? 8 : 4;
@@ -2153,7 +2111,6 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
// 16-byte aligned.
nAltivecParamsAtEnd = 0;
for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
EVT ArgVT = Outs[i].VT;
// Varargs Altivec parameters are padded to a 16 byte boundary.
@@ -2183,11 +2140,11 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
// conservatively assume that it is needed. As such, make sure we have at
// least enough stack space for the caller to store the 8 GPRs.
NumBytes = std::max(NumBytes,
- PPCFrameInfo::getMinCallFrameSize(isPPC64, true));
+ PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
// Tail call needs the stack to be aligned.
if (CC==CallingConv::Fast && GuaranteedTailCallOpt) {
- unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
+ unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
getStackAlignment();
unsigned AlignMask = TargetAlign-1;
NumBytes = (NumBytes + AlignMask) & ~AlignMask;
@@ -2292,8 +2249,8 @@ StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
int FI = TailCallArgs[i].FrameIdx;
// Store relative to framepointer.
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN,
- PseudoSourceValue::getFixedStack(FI),
- 0, false, false, 0));
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, 0));
}
}
@@ -2311,26 +2268,26 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
if (SPDiff) {
// Calculate the new stack slot for the return address.
int SlotSize = isPPC64 ? 8 : 4;
- int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64,
+ int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64,
isDarwinABI);
int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
NewRetAddrLoc, true);
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
- PseudoSourceValue::getFixedStack(NewRetAddr), 0,
+ MachinePointerInfo::getFixedStack(NewRetAddr),
false, false, 0);
// When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
// slot as the FP is never overwritten.
if (isDarwinABI) {
int NewFPLoc =
- SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64, isDarwinABI);
+ SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc,
true);
SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
- PseudoSourceValue::getFixedStack(NewFPIdx), 0,
+ MachinePointerInfo::getFixedStack(NewFPIdx),
false, false, 0);
}
}
@@ -2369,15 +2326,15 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
// Load the LR and FP stack slot for later adjusting.
EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
LROpOut = getReturnAddrFrameIndex(DAG);
- LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, NULL, 0,
+ LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
false, false, 0);
Chain = SDValue(LROpOut.getNode(), 1);
-
+
// When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
// slot as the FP is never overwritten.
if (isDarwinABI) {
FPOpOut = getFramePointerFrameIndex(DAG);
- FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, NULL, 0,
+ FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(),
false, false, 0);
Chain = SDValue(FPOpOut.getNode(), 1);
}
@@ -2397,7 +2354,8 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- false, false, NULL, 0, NULL, 0);
+ false, false, MachinePointerInfo(0),
+ MachinePointerInfo(0));
}
/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
@@ -2407,7 +2365,7 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
SDValue Arg, SDValue PtrOff, int SPDiff,
unsigned ArgOffset, bool isPPC64, bool isTailCall,
bool isVector, SmallVector<SDValue, 8> &MemOpChains,
- SmallVector<TailCallArgumentInfo, 8>& TailCallArguments,
+ SmallVector<TailCallArgumentInfo, 8> &TailCallArguments,
DebugLoc dl) {
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
if (!isTailCall) {
@@ -2420,8 +2378,8 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
DAG.getConstant(ArgOffset, PtrVT));
}
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
- false, false, 0));
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(), false, false, 0));
// Calculate and remember argument location.
} else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
TailCallArguments);
@@ -2460,10 +2418,14 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall,
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys,
- bool isPPC64, bool isSVR4ABI) {
+ const PPCSubtarget &PPCSubTarget) {
+
+ bool isPPC64 = PPCSubTarget.isPPC64();
+ bool isSVR4ABI = PPCSubTarget.isSVR4ABI();
+
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
NodeTys.push_back(MVT::Other); // Returns a chain
- NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
+ NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use.
unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin;
@@ -2473,24 +2435,49 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
Callee = SDValue(Dest, 0);
needIndirectCall = false;
}
- // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201
- // Use indirect calls for ALL functions calls in JIT mode, since the
- // far-call stubs may be outside relocation limits for a BL instruction.
- if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) {
- // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
- // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
- // node so that legalize doesn't hack it.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201
+ // Use indirect calls for ALL functions calls in JIT mode, since the
+ // far-call stubs may be outside relocation limits for a BL instruction.
+ if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) {
+ unsigned OpFlags = 0;
+ if (DAG.getTarget().getRelocationModel() != Reloc::Static &&
+ PPCSubTarget.getDarwinVers() < 9 &&
+ (G->getGlobal()->isDeclaration() ||
+ G->getGlobal()->isWeakForLinker())) {
+ // PC-relative references to external symbols should go through $stub,
+ // unless we're building with the leopard linker or later, which
+ // automatically synthesizes these stubs.
+ OpFlags = PPCII::MO_DARWIN_STUB;
+ }
+
+ // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
+ // every direct call is) turn it into a TargetGlobalAddress /
+ // TargetExternalSymbol node so that legalize doesn't hack it.
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
- Callee.getValueType());
+ Callee.getValueType(),
+ 0, OpFlags);
needIndirectCall = false;
}
}
+
if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
- Callee.getValueType());
- needIndirectCall = false;
+ unsigned char OpFlags = 0;
+
+ if (DAG.getTarget().getRelocationModel() != Reloc::Static &&
+ PPCSubTarget.getDarwinVers() < 9) {
+ // PC-relative references to external symbols should go through $stub,
+ // unless we're building with the leopard linker or later, which
+ // automatically synthesizes these stubs.
+ OpFlags = PPCII::MO_DARWIN_STUB;
+ }
+
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
+ OpFlags);
+ needIndirectCall = false;
}
+
if (needIndirectCall) {
// Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
// to do the call, we can't use PPCISD::CALL.
@@ -2525,7 +2512,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// Load the address of the function entry point from the function
// descriptor.
- SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Flag);
+ SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue);
SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps,
InFlag.getNode() ? 3 : 2);
Chain = LoadFuncPtr.getValue(1);
@@ -2552,7 +2539,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// prevents the register allocator from allocating it), resulting in an
// additional register being allocated and an unnecessary move instruction
// being generated.
- VTs = DAG.getVTList(MVT::Other, MVT::Flag);
+ VTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain,
Callee, InFlag);
Chain = LoadTOCPtr.getValue(0);
@@ -2569,7 +2556,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
NodeTys.clear();
NodeTys.push_back(MVT::Other);
- NodeTys.push_back(MVT::Flag);
+ NodeTys.push_back(MVT::Glue);
Ops.push_back(Chain);
CallOpc = isSVR4ABI ? PPCISD::BCTRL_SVR4 : PPCISD::BCTRL_Darwin;
Callee.setNode(0);
@@ -2637,8 +2624,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
SmallVector<SDValue, 8> Ops;
unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
isTailCall, RegsToPass, Ops, NodeTys,
- PPCSubTarget.isPPC64(),
- PPCSubTarget.isSVR4ABI());
+ PPCSubTarget);
// When performing tail call optimization the callee pops its arguments off
// the stack. Account for this here so these bytes can be pushed back on in
@@ -2684,7 +2670,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
// stack frame. If caller and callee belong to the same module (and have the
// same TOC), the NOP will remain unchanged.
if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) {
- SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
if (CallOpc == PPCISD::BCTRL_SVR4) {
// This is a call through a function pointer.
// Restore the caller TOC from the save area into R2.
@@ -2699,7 +2685,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
InFlag = Chain.getValue(1);
} else {
// Otherwise insert NOP.
- InFlag = DAG.getNode(PPCISD::NOP, dl, MVT::Flag, InFlag);
+ InFlag = DAG.getNode(PPCISD::NOP, dl, MVT::Glue, InFlag);
}
}
@@ -2726,15 +2712,14 @@ PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
Ins, DAG);
- if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) {
+ if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64())
return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg,
isTailCall, Outs, OutVals, Ins,
dl, DAG, InVals);
- } else {
- return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
- isTailCall, Outs, OutVals, Ins,
- dl, DAG, InVals);
- }
+
+ return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
+ isTailCall, Outs, OutVals, Ins,
+ dl, DAG, InVals);
}
SDValue
@@ -2763,7 +2748,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// in this function's (MF) stack pointer stack slot 0(SP).
if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
-
+
// Count how many bytes are to be pushed on the stack, including the linkage
// area, parameter list area and the part of the local variable space which
// contains copies of aggregates which are passed by value.
@@ -2774,19 +2759,19 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
ArgLocs, *DAG.getContext());
// Reserve space for the linkage area on the stack.
- CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize);
+ CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
if (isVarArg) {
// Handle fixed and variable vector arguments differently.
// Fixed vector arguments go into registers as long as registers are
// available. Variable vector arguments always go into memory.
unsigned NumArgs = Outs.size();
-
+
for (unsigned i = 0; i != NumArgs; ++i) {
- EVT ArgVT = Outs[i].VT;
+ MVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
bool Result;
-
+
if (Outs[i].IsFixed) {
Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
CCInfo);
@@ -2794,11 +2779,11 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
ArgFlags, CCInfo);
}
-
+
if (Result) {
#ifndef NDEBUG
errs() << "Call operand #" << i << " has unhandled type "
- << ArgVT.getEVTString() << "\n";
+ << EVT(ArgVT).getEVTString() << "\n";
#endif
llvm_unreachable(0);
}
@@ -2807,7 +2792,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// All arguments are treated the same.
CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4);
}
-
+
// Assign locations to all of the outgoing aggregate by value arguments.
SmallVector<CCValAssign, 16> ByValArgLocs;
CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), ByValArgLocs,
@@ -2822,7 +2807,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// space variable where copies of aggregates which are passed by value are
// stored.
unsigned NumBytes = CCByValInfo.getNextStackOffset();
-
+
// Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization.
int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
@@ -2842,7 +2827,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// arguments that may not fit in the registers available for argument
// passing.
SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
-
+
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
SmallVector<SDValue, 8> MemOpChains;
@@ -2854,7 +2839,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
-
+
if (Flags.isByVal()) {
// Argument is an aggregate which is passed by value, thus we need to
// create a copy of it in the local variable space of the current stack
@@ -2863,33 +2848,33 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
assert((j < ByValArgLocs.size()) && "Index out of bounds!");
CCValAssign &ByValVA = ByValArgLocs[j++];
assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
-
+
// Memory reserved in the local variable space of the callers stack frame.
unsigned LocMemOffset = ByValVA.getLocMemOffset();
-
+
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
-
+
// Create a copy of the argument in the local area of the current
// stack frame.
SDValue MemcpyCall =
CreateCopyOfByValArgument(Arg, PtrOff,
CallSeqStart.getNode()->getOperand(0),
Flags, DAG, dl);
-
+
// This must go outside the CALLSEQ_START..END.
SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
CallSeqStart.getNode()->getOperand(1));
DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
NewCallSeqStart.getNode());
Chain = CallSeqStart = NewCallSeqStart;
-
+
// Pass the address of the aggregate copy on the stack either in a
// physical register or in the parameter list area of the current stack
// frame to the callee.
Arg = PtrOff;
}
-
+
if (VA.isRegLoc()) {
// Put argument in a physical register.
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
@@ -2903,7 +2888,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- PseudoSourceValue::getStack(), LocMemOffset,
+ MachinePointerInfo(),
false, false, 0));
} else {
// Calculate and remember argument location.
@@ -2912,11 +2897,11 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
}
}
}
-
+
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
-
+
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into the appropriate regs.
SDValue InFlag;
@@ -2925,7 +2910,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
-
+
// Set CR6 to true if this is a vararg call.
if (isVarArg) {
SDValue SetCR(DAG.getMachineNode(PPC::CRSET, dl, MVT::i32), 0);
@@ -2933,10 +2918,9 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
InFlag = Chain.getValue(1);
}
- if (isTailCall) {
+ if (isTailCall)
PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp,
false, TailCallArguments);
- }
return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
@@ -3012,7 +2996,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// memory. Also, if this is a vararg function, floating point operations
// must be stored to our stack, and loaded into integer regs as well, if
// any integer regs are available for argument passing.
- unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, true);
+ unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
static const unsigned GPR_32[] = { // 32-bit registers.
@@ -3066,8 +3050,9 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Everything else is passed left-justified.
EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, dl, Chain, Arg,
- NULL, 0, VT, false, false, 0);
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
+ MachinePointerInfo(), VT,
+ false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -3104,7 +3089,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, NULL, 0,
+ SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
+ MachinePointerInfo(),
false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -3136,21 +3122,22 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
if (isVarArg) {
- SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
- false, false, 0);
+ SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(), false, false, 0);
MemOpChains.push_back(Store);
// Float varargs are always shadowed in available integer registers
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0,
- false, false, 0);
+ SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
+ MachinePointerInfo(), false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
- SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0,
+ SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
+ MachinePointerInfo(),
false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -3194,11 +3181,12 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// entirely in R registers. Maybe later.
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
DAG.getConstant(ArgOffset, PtrVT));
- SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
- false, false, 0);
+ SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(), false, false, 0);
MemOpChains.push_back(Store);
if (VR_idx != NumVRs) {
- SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, NULL, 0,
+ SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
+ MachinePointerInfo(),
false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
@@ -3209,7 +3197,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
break;
SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
DAG.getConstant(i, PtrVT));
- SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, NULL, 0,
+ SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -3275,14 +3263,14 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// TOC save area offset.
SDValue PtrOff = DAG.getIntPtrConstant(40);
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
- Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, NULL, 0,
+ Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(),
false, false, 0);
}
// On Darwin, R12 must contain the address of an indirect callee. This does
// not mean the MTCTR instruction must use R12; it's easier to model this as
// an extra parameter, so do that.
- if (!isTailCall &&
+ if (!isTailCall &&
!dyn_cast<GlobalAddressSDNode>(Callee) &&
!dyn_cast<ExternalSymbolSDNode>(Callee) &&
!isBLACompatibleAddress(Callee, DAG))
@@ -3298,10 +3286,9 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
InFlag = Chain.getValue(1);
}
- if (isTailCall) {
+ if (isTailCall)
PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp,
FPOp, true, TailCallArguments);
- }
return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
@@ -3362,14 +3349,15 @@ SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
SDValue SaveSP = Op.getOperand(1);
// Load the old link SP.
- SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, NULL, 0,
+ SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr,
+ MachinePointerInfo(),
false, false, 0);
// Restore the stack pointer.
Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
// Store the old link SP.
- return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, NULL, 0,
+ return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(),
false, false, 0);
}
@@ -3390,7 +3378,7 @@ PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
// If the frame pointer save index hasn't been defined yet.
if (!RASI) {
// Find out what the fix offset of the frame pointer save area.
- int LROffset = PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI);
+ int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
// Allocate the frame index for frame pointer save area.
RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true);
// Save the result.
@@ -3414,7 +3402,7 @@ PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
// If the frame pointer save index hasn't been defined yet.
if (!FPSI) {
// Find out what the fix offset of the frame pointer save area.
- int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(isPPC64,
+ int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64,
isDarwinABI);
// Allocate the frame index for frame pointer save area.
@@ -3533,7 +3521,7 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
case MVT::i32:
Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
- PPCISD::FCTIDZ,
+ PPCISD::FCTIDZ,
dl, MVT::f64, Src);
break;
case MVT::i64:
@@ -3545,15 +3533,15 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64);
// Emit a store to the stack slot.
- SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, NULL, 0,
- false, false, 0);
+ SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr,
+ MachinePointerInfo(), false, false, 0);
// Result is a load from the stack slot. If loading 4 bytes, make sure to
// add in a bias.
if (Op.getValueType() == MVT::i32)
FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
DAG.getConstant(4, FIPtr.getValueType()));
- return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, NULL, 0,
+ return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MachinePointerInfo(),
false, false, 0);
}
@@ -3565,8 +3553,7 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
return SDValue();
if (Op.getOperand(0).getValueType() == MVT::i64) {
- SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl,
- MVT::f64, Op.getOperand(0));
+ SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
if (Op.getValueType() == MVT::f32)
FP = DAG.getNode(ISD::FP_ROUND, dl,
@@ -3591,14 +3578,15 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
// STD the extended value into the stack slot.
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIdx),
- MachineMemOperand::MOStore, 0, 8, 8);
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
+ MachineMemOperand::MOStore, 8, 8);
SDValue Ops[] = { DAG.getEntryNode(), Ext64, FIdx };
SDValue Store =
DAG.getMemIntrinsicNode(PPCISD::STD_32, dl, DAG.getVTList(MVT::Other),
Ops, 4, MVT::i64, MMO);
// Load the value as a double.
- SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, NULL, 0, false, false, 0);
+ SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, MachinePointerInfo(),
+ false, false, 0);
// FCFID it and return it.
SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld);
@@ -3637,19 +3625,19 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
// Save FP Control Word to register
NodeTys.push_back(MVT::f64); // return register
- NodeTys.push_back(MVT::Flag); // unused in this context
+ NodeTys.push_back(MVT::Glue); // unused in this context
SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
// Save FP register to stack slot
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain,
- StackSlot, NULL, 0, false, false, 0);
+ StackSlot, MachinePointerInfo(), false, false,0);
// Load FP Control Word from low 32 bits of stack slot.
SDValue Four = DAG.getConstant(4, PtrVT);
SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
- SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, NULL, 0,
+ SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(),
false, false, 0);
// Transform as necessary
@@ -3786,7 +3774,7 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
&Ops[0], Ops.size());
- return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res);
+ return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
}
/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
@@ -3815,14 +3803,14 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
EVT VT, SelectionDAG &DAG, DebugLoc dl) {
// Force LHS/RHS to be the right type.
- LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS);
- RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS);
+ LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
+ RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
int Ops[16];
for (unsigned i = 0; i != 16; ++i)
Ops[i] = i + Amt;
SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
+ return DAG.getNode(ISD::BITCAST, dl, VT, T);
}
// If this is a case we can't handle, return null and let the default
@@ -3856,7 +3844,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
SDValue Z = DAG.getConstant(0, MVT::i32);
Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
- Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z);
+ Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
}
return Op;
}
@@ -3875,7 +3863,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl);
Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res);
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
// If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
@@ -3891,7 +3879,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
// xor by OnesV to invert it.
Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
// Check to see if this is a wide variety of vsplti*, binop self cases.
@@ -3917,7 +3905,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
Intrinsic::ppc_altivec_vslw
};
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
// vsplti + srl self.
@@ -3928,7 +3916,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
Intrinsic::ppc_altivec_vsrw
};
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
// vsplti + sra self.
@@ -3939,7 +3927,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
Intrinsic::ppc_altivec_vsraw
};
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
// vsplti + rol self.
@@ -3951,7 +3939,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
Intrinsic::ppc_altivec_vrlw
};
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
// t = vsplti c, result = vsldoi t, t, 1
@@ -3978,14 +3966,14 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl);
SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS);
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
}
// Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
if (SextVal >= -31 && SextVal <= 0) {
SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl);
SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS);
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
}
return SDValue();
@@ -4062,10 +4050,10 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
}
EVT VT = OpLHS.getValueType();
- OpLHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpLHS);
- OpRHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpRHS);
+ OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
+ OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
+ return DAG.getNode(ISD::BITCAST, dl, VT, T);
}
/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
@@ -4118,7 +4106,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// perfect shuffle table to emit an optimal matching sequence.
SmallVector<int, 16> PermMask;
SVOp->getMask(PermMask);
-
+
unsigned PFIndexes[4];
bool isFourElementShuffle = true;
for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
@@ -4253,7 +4241,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
Op.getOperand(1), Op.getOperand(2),
DAG.getConstant(CompareOpc, MVT::i32));
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
}
// Create the PPCISD altivec 'dot' comparison node.
@@ -4264,7 +4252,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
};
std::vector<EVT> VTs;
VTs.push_back(Op.getOperand(2).getValueType());
- VTs.push_back(MVT::Flag);
+ VTs.push_back(MVT::Glue);
SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
// Now that we have the comparison, emit a copy from the CR to a GPR.
@@ -4317,10 +4305,10 @@ SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
// Store the input value into Value#0 of the stack slot.
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
- Op.getOperand(0), FIdx, NULL, 0,
+ Op.getOperand(0), FIdx, MachinePointerInfo(),
false, false, 0);
// Load it out.
- return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, NULL, 0,
+ return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(),
false, false, 0);
}
@@ -4336,9 +4324,9 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
// Shrinkify inputs to v8i16.
- LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS);
- RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS);
- RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap);
+ LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
+ RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
+ RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
// Low parts multiplied together, generating 32-bit results (we ignore the
// top parts).
@@ -4364,12 +4352,12 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
// Multiply the even 8-bit parts, producing 16-bit sums.
SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
LHS, RHS, DAG, dl, MVT::v8i16);
- EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts);
+ EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
// Multiply the odd 8-bit parts, producing 16-bit sums.
SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
LHS, RHS, DAG, dl, MVT::v8i16);
- OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts);
+ OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
// Merge the results together.
int Ops[16];
@@ -4391,7 +4379,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
- case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
+ case ISD::GlobalTLSAddress: llvm_unreachable("TLS not implemented for PPC");
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
@@ -4456,20 +4444,20 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
SDValue Ops[4], Result, MFFSreg, InFlag, FPreg;
NodeTys.push_back(MVT::f64); // Return register
- NodeTys.push_back(MVT::Flag); // Returns a flag for later insns
+ NodeTys.push_back(MVT::Glue); // Returns a flag for later insns
Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
MFFSreg = Result.getValue(0);
InFlag = Result.getValue(1);
NodeTys.clear();
- NodeTys.push_back(MVT::Flag); // Returns a flag
+ NodeTys.push_back(MVT::Glue); // Returns a flag
Ops[0] = DAG.getConstant(31, MVT::i32);
Ops[1] = InFlag;
Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2);
InFlag = Result.getValue(0);
NodeTys.clear();
- NodeTys.push_back(MVT::Flag); // Returns a flag
+ NodeTys.push_back(MVT::Glue); // Returns a flag
Ops[0] = DAG.getConstant(30, MVT::i32);
Ops[1] = InFlag;
Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2);
@@ -4477,7 +4465,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
NodeTys.clear();
NodeTys.push_back(MVT::f64); // result of add
- NodeTys.push_back(MVT::Flag); // Returns a flag
+ NodeTys.push_back(MVT::Glue); // Returns a flag
Ops[0] = Lo;
Ops[1] = Hi;
Ops[2] = InFlag;
@@ -5283,7 +5271,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DAG.getConstant(CompareOpc, MVT::i32)
};
VTs.push_back(LHS.getOperand(2).getValueType());
- VTs.push_back(MVT::Flag);
+ VTs.push_back(MVT::Glue);
SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
// Unpack the result based on how the target uses it.
@@ -5377,6 +5365,47 @@ PPCTargetLowering::getConstraintType(const std::string &Constraint) const {
return TargetLowering::getConstraintType(Constraint);
}
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+PPCTargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ const Type *type = CallOperandVal->getType();
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+ case 'b':
+ if (type->isIntegerTy())
+ weight = CW_Register;
+ break;
+ case 'f':
+ if (type->isFloatTy())
+ weight = CW_Register;
+ break;
+ case 'd':
+ if (type->isDoubleTy())
+ weight = CW_Register;
+ break;
+ case 'v':
+ if (type->isVectorTy())
+ weight = CW_Register;
+ break;
+ case 'y':
+ weight = CW_Register;
+ break;
+ }
+ return weight;
+}
+
std::pair<unsigned, const TargetRegisterClass*>
PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const {
@@ -5536,19 +5565,19 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset =
-
- DAG.getConstant(PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI),
+
+ DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI),
isPPC64? MVT::i64 : MVT::i32);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
- NULL, 0, false, false, 0);
+ MachinePointerInfo(), false, false, 0);
}
// Just load the return address off the stack.
SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- RetAddrFI, NULL, 0, false, false, 0);
+ RetAddrFI, MachinePointerInfo(), false, false, 0);
}
SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
@@ -5571,7 +5600,7 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
PtrVT);
while (Depth--)
FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
- FrameAddr, NULL, 0, false, false, 0);
+ FrameAddr, MachinePointerInfo(), false, false, 0);
return FrameAddr;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 700816f..80cab75 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -308,6 +308,12 @@ namespace llvm {
bool is8bit, unsigned Opcode) const;
ConstraintType getConstraintType(const std::string &Constraint) const;
+
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
+
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
@@ -383,7 +389,6 @@ namespace llvm {
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index a0781b9..6636b69 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -23,9 +23,11 @@ def u16imm64 : Operand<i64> {
}
def symbolHi64 : Operand<i64> {
let PrintMethod = "printSymbolHi";
+ let EncoderMethod = "getHA16Encoding";
}
def symbolLo64 : Operand<i64> {
let PrintMethod = "printSymbolLo";
+ let EncoderMethod = "getLO16Encoding";
}
//===----------------------------------------------------------------------===//
@@ -58,7 +60,7 @@ def HI48_64 : SDNodeXForm<imm, [{
//
let Defs = [LR8] in
- def MovePCtoLR8 : Pseudo<(outs), (ins piclabel:$label), "bl $label", []>,
+ def MovePCtoLR8 : Pseudo<(outs), (ins piclabel:$label), "", []>,
PPC970_Unit_BRU;
// Darwin ABI Calls.
@@ -130,39 +132,31 @@ def : Pat<(PPCnop),
let usesCustomInserter = 1 in {
let Uses = [CR0] in {
def ATOMIC_LOAD_ADD_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
- "${:comment} ATOMIC_LOAD_ADD_I64 PSEUDO!",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
[(set G8RC:$dst, (atomic_load_add_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_SUB_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
- "${:comment} ATOMIC_LOAD_SUB_I64 PSEUDO!",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
[(set G8RC:$dst, (atomic_load_sub_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_OR_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
- "${:comment} ATOMIC_LOAD_OR_I64 PSEUDO!",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
[(set G8RC:$dst, (atomic_load_or_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_XOR_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
- "${:comment} ATOMIC_LOAD_XOR_I64 PSEUDO!",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
[(set G8RC:$dst, (atomic_load_xor_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_AND_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
- "${:comment} ATOMIC_LOAD_AND_I64 PSEUDO!",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
[(set G8RC:$dst, (atomic_load_and_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_NAND_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
- "${:comment} ATOMIC_LOAD_NAND_I64 PSEUDO!",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "",
[(set G8RC:$dst, (atomic_load_nand_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_CMP_SWAP_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$old, G8RC:$new),
- "${:comment} ATOMIC_CMP_SWAP_I64 PSEUDO!",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$old, G8RC:$new), "",
[(set G8RC:$dst,
(atomic_cmp_swap_64 xoaddr:$ptr, G8RC:$old, G8RC:$new))]>;
def ATOMIC_SWAP_I64 : Pseudo<
- (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$new),
- "${:comment} ATOMIC_SWAP_I64 PSEUDO!",
+ (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$new), "",
[(set G8RC:$dst, (atomic_swap_64 xoaddr:$ptr, G8RC:$new))]>;
}
}
@@ -240,8 +234,7 @@ def MTCTR8 : XFXForm_7_ext<31, 467, 9, (outs), (ins G8RC:$rS),
}
let Defs = [X1], Uses = [X1] in
-def DYNALLOC8 : Pseudo<(outs G8RC:$result), (ins G8RC:$negsize, memri:$fpsi),
- "${:comment} DYNALLOC8 $result, $negsize, $fpsi",
+def DYNALLOC8 : Pseudo<(outs G8RC:$result), (ins G8RC:$negsize, memri:$fpsi),"",
[(set G8RC:$result,
(PPCdynalloc G8RC:$negsize, iaddr:$fpsi))]>;
@@ -500,7 +493,7 @@ def LWAX : XForm_1<31, 341, (outs G8RC:$rD), (ins memrr:$src),
// Update forms.
let mayLoad = 1 in
-def LHAU8 : DForm_1<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp,
+def LHAU8 : DForm_1a<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp,
ptr_rc:$rA),
"lhau $rD, $disp($rA)", LdStGeneral,
[]>, RegConstraint<"$rA = $ea_result">,
@@ -555,18 +548,20 @@ let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LD : DSForm_1<58, 0, (outs G8RC:$rD), (ins memrix:$src),
"ld $rD, $src", LdStLD,
[(set G8RC:$rD, (load ixaddr:$src))]>, isPPC64;
-def LDtoc: DSForm_1<58, 0, (outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg),
- "ld $rD, $disp($reg)", LdStLD,
- [(set G8RC:$rD,
+def LDtoc: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg),
+ "",
+ [(set G8RC:$rD,
(PPCtoc_entry tglobaladdr:$disp, G8RC:$reg))]>, isPPC64;
-let RST = 2, DS = 8 in
+
+let RST = 2, DS_RA = 0 in // FIXME: Should be a pseudo.
def LDinto_toc: DSForm_1<58, 0, (outs), (ins G8RC:$reg),
"ld 2, 8($reg)", LdStLD,
[(PPCload_toc G8RC:$reg)]>, isPPC64;
-let RST = 2, DS = 40, RA = 1 in
+
+let RST = 2, DS_RA = 0 in // FIXME: Should be a pseudo.
def LDtoc_restore : DSForm_1<58, 0, (outs), (ins),
"ld 2, 40(1)", LdStLD,
- []>, isPPC64;
+ [(PPCtoc_restore)]>, isPPC64;
def LDX : XForm_1<31, 21, (outs G8RC:$rD), (ins memrr:$src),
"ldx $rD, $src", LdStLD,
[(set G8RC:$rD, (load xaddr:$src))]>, isPPC64;
@@ -579,8 +574,6 @@ def LDU : DSForm_1<58, 1, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrix:$addr
}
-def : Pat<(PPCtoc_restore),
- (LDtoc_restore)>;
def : Pat<(PPCload ixaddr:$src),
(LD ixaddr:$src)>;
def : Pat<(PPCload xaddr:$src),
@@ -621,14 +614,14 @@ def STDX : XForm_8<31, 149, (outs), (ins G8RC:$rS, memrr:$dst),
let PPC970_Unit = 2 in {
-def STBU8 : DForm_1<38, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
+def STBU8 : DForm_1a<38, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
"stbu $rS, $ptroff($ptrreg)", LdStGeneral,
[(set ptr_rc:$ea_res,
(pre_truncsti8 G8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
-def STHU8 : DForm_1<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
+def STHU8 : DForm_1a<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
"sthu $rS, $ptroff($ptrreg)", LdStGeneral,
[(set ptr_rc:$ea_res,
@@ -636,8 +629,8 @@ def STHU8 : DForm_1<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
-def STDU : DSForm_1<62, 1, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
- s16immX4:$ptroff, ptr_rc:$ptrreg),
+def STDU : DSForm_1a<62, 1, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
+ s16immX4:$ptroff, ptr_rc:$ptrreg),
"stdu $rS, $ptroff($ptrreg)", LdStSTD,
[(set ptr_rc:$ea_res, (pre_store G8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
index 4357bdc..84a15b1 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
@@ -102,6 +102,19 @@ class DForm_1<bits<6> opcode, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
: I<opcode, OOL, IOL, asmstr, itin> {
bits<5> A;
+ bits<21> Addr;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = A;
+ let Inst{11-15} = Addr{20-16}; // Base Reg
+ let Inst{16-31} = Addr{15-0}; // Displacement
+}
+
+class DForm_1a<bits<6> opcode, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<5> A;
bits<16> C;
bits<5> B;
@@ -112,6 +125,7 @@ class DForm_1<bits<6> opcode, dag OOL, dag IOL, string asmstr,
let Inst{16-31} = C;
}
+
class DForm_2<bits<6> opcode, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
: DForm_base<opcode, OOL, IOL, asmstr, itin, pattern>;
@@ -147,8 +161,7 @@ class DForm_4_zero<bits<6> opcode, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
: DForm_1<opcode, OOL, IOL, asmstr, itin, pattern> {
let A = 0;
- let B = 0;
- let C = 0;
+ let Addr = 0;
}
class DForm_5<bits<6> opcode, dag OOL, dag IOL, string asmstr,
@@ -188,17 +201,31 @@ class DSForm_1<bits<6> opcode, bits<2> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
: I<opcode, OOL, IOL, asmstr, itin> {
bits<5> RST;
- bits<14> DS;
- bits<5> RA;
+ bits<19> DS_RA;
let Pattern = pattern;
let Inst{6-10} = RST;
- let Inst{11-15} = RA;
- let Inst{16-29} = DS;
+ let Inst{11-15} = DS_RA{18-14}; // Register #
+ let Inst{16-29} = DS_RA{13-0}; // Displacement.
let Inst{30-31} = xo;
}
+class DSForm_1a<bits<6> opcode, bits<2> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<5> RST;
+ bits<14> DS;
+ bits<5> RA;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = RST;
+ let Inst{11-15} = RA;
+ let Inst{16-29} = DS;
+ let Inst{30-31} = xo;
+}
+
// 1.7.6 X-Form
class XForm_base_r3xo<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index c17108f..53b0491 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -17,6 +17,7 @@
#include "PPCPredicates.h"
#include "PPCGenInstrInfo.inc"
#include "PPCTargetMachine.h"
+#include "PPCHazardRecognizers.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -39,7 +40,19 @@ PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm)
: TargetInstrInfoImpl(PPCInsts, array_lengthof(PPCInsts)), TM(tm),
RI(*TM.getSubtargetImpl(), *this) {}
-unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+/// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
+/// this target when scheduling the DAG.
+ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetHazardRecognizer(
+ const TargetMachine *TM,
+ const ScheduleDAG *DAG) const {
+ // Should use subtarget info to pick the right hazard recognizer. For
+ // now, always return a PPC970 recognizer.
+ const TargetInstrInfo *TII = TM->getInstrInfo();
+ assert(TII && "No InstrInfo?");
+ return new PPCHazardRecognizer970(*TII);
+}
+
+unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
@@ -57,7 +70,7 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
return 0;
}
-unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
@@ -84,11 +97,11 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
// Normal instructions can be commuted the obvious way.
if (MI->getOpcode() != PPC::RLWIMI)
return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
-
+
// Cannot commute if it has a non-zero rotate count.
if (MI->getOperand(3).getImm() != 0)
return 0;
-
+
// If we have a zero rotate count, we have:
// M = mask(MB,ME)
// Op0 = (Op1 & ~M) | (Op2 & M)
@@ -135,14 +148,14 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
MI->getOperand(1).setReg(Reg2);
MI->getOperand(2).setIsKill(Reg1IsKill);
MI->getOperand(1).setIsKill(Reg2IsKill);
-
+
// Swap the mask around.
MI->getOperand(4).setImm((ME+1) & 31);
MI->getOperand(5).setImm((MB-1) & 31);
return MI;
}
-void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
+void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
DebugLoc DL;
BuildMI(MBB, MI, DL, get(PPC::NOP));
@@ -169,7 +182,7 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// Get the last instruction in the block.
MachineInstr *LastInst = I;
-
+
// If there is only one terminator instruction, process it.
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
if (LastInst->getOpcode() == PPC::B) {
@@ -189,7 +202,7 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// Otherwise, don't know what this is.
return true;
}
-
+
// Get the instruction before it if it's a terminator.
MachineInstr *SecondLastInst = I;
@@ -197,9 +210,9 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
if (SecondLastInst && I != MBB.begin() &&
isUnpredicatedTerminator(--I))
return true;
-
+
// If the block ends with PPC::B and PPC:BCC, handle it.
- if (SecondLastInst->getOpcode() == PPC::BCC &&
+ if (SecondLastInst->getOpcode() == PPC::BCC &&
LastInst->getOpcode() == PPC::B) {
if (!SecondLastInst->getOperand(2).isMBB() ||
!LastInst->getOperand(0).isMBB())
@@ -210,10 +223,10 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
FBB = LastInst->getOperand(0).getMBB();
return false;
}
-
+
// If the block ends with two PPC:Bs, handle it. The second one is not
// executed, so remove it.
- if (SecondLastInst->getOpcode() == PPC::B &&
+ if (SecondLastInst->getOpcode() == PPC::B &&
LastInst->getOpcode() == PPC::B) {
if (!SecondLastInst->getOperand(0).isMBB())
return true;
@@ -239,17 +252,17 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
}
if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC)
return 0;
-
+
// Remove the branch.
I->eraseFromParent();
-
+
I = MBB.end();
if (I == MBB.begin()) return 1;
--I;
if (I->getOpcode() != PPC::BCC)
return 1;
-
+
// Remove the branch.
I->eraseFromParent();
return 2;
@@ -262,9 +275,9 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- assert((Cond.size() == 2 || Cond.size() == 0) &&
+ assert((Cond.size() == 2 || Cond.size() == 0) &&
"PPC branch conditions have two components!");
-
+
// One-way branch.
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch
@@ -274,7 +287,7 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
return 1;
}
-
+
// Two-way Conditional Branch.
BuildMI(&MBB, DL, get(PPC::BCC))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
@@ -377,11 +390,11 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
// We need to store the CR in the low 4-bits of the saved value. First,
// issue a MFCR to save all of the CRBits.
- unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
+ unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
PPC::R2 : PPC::R0;
NewMIs.push_back(BuildMI(MF, DL, get(PPC::MFCRpseud), ScratchReg)
.addReg(SrcReg, getKillRegState(isKill)));
-
+
// If the saved register wasn't CR0, shift the bits left so that they are
// in CR0's slot.
if (SrcReg != PPC::CR0) {
@@ -391,7 +404,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
.addReg(ScratchReg).addImm(ShiftBits)
.addImm(0).addImm(31));
}
-
+
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW))
.addReg(ScratchReg,
getKillRegState(isKill)),
@@ -428,14 +441,14 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
SrcReg == PPC::CR7EQ || SrcReg == PPC::CR7UN)
Reg = PPC::CR7;
- return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx,
+ return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx,
PPC::CRRCRegisterClass, NewMIs);
} else if (RC == PPC::VRRCRegisterClass) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#
// STVX VAL, 0, R0
- //
+ //
// FIXME: We use R0 here, because it isn't available for RA.
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::ADDI), PPC::R0),
FrameIdx, 0, 0));
@@ -469,8 +482,9 @@ PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
const MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIdx),
- MachineMemOperand::MOStore, /*Offset=*/0,
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FrameIdx)),
+ MachineMemOperand::MOStore,
MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
NewMIs.back()->addMemOperand(MF, MMO);
@@ -513,9 +527,9 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
// at the moment.
unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
PPC::R2 : PPC::R0;
- NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
ScratchReg), FrameIdx));
-
+
// If the reloaded register isn't CR0, shift the bits right so that they are
// in the right CR's slot.
if (DestReg != PPC::CR0) {
@@ -525,11 +539,11 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
.addReg(ScratchReg).addImm(32-ShiftBits).addImm(0)
.addImm(31));
}
-
+
NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTCRF), DestReg)
.addReg(ScratchReg));
} else if (RC == PPC::CRBITRCRegisterClass) {
-
+
unsigned Reg = 0;
if (DestReg == PPC::CR0LT || DestReg == PPC::CR0GT ||
DestReg == PPC::CR0EQ || DestReg == PPC::CR0UN)
@@ -556,14 +570,14 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
DestReg == PPC::CR7EQ || DestReg == PPC::CR7UN)
Reg = PPC::CR7;
- return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx,
+ return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx,
PPC::CRRCRegisterClass, NewMIs);
} else if (RC == PPC::VRRCRegisterClass) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#
// Dest = LVX 0, R0
- //
+ //
// FIXME: We use R0 here, because it isn't available for RA.
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::ADDI), PPC::R0),
FrameIdx, 0, 0));
@@ -590,8 +604,9 @@ PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
const MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIdx),
- MachineMemOperand::MOLoad, /*Offset=*/0,
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FrameIdx)),
+ MachineMemOperand::MOLoad,
MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
NewMIs.back()->addMemOperand(MF, MMO);
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
index fc7b7b3..b5249ae 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
@@ -32,7 +32,7 @@ enum {
/// PPC970_First - This instruction starts a new dispatch group, so it will
/// always be the first one in the group.
PPC970_First = 0x1,
-
+
/// PPC970_Single - This instruction starts a new dispatch group and
/// terminates it, so it will be the sole instruction in the group.
PPC970_Single = 0x2,
@@ -40,7 +40,7 @@ enum {
/// PPC970_Cracked - This instruction is cracked into two pieces, requiring
/// two dispatch pipes to be available to issue.
PPC970_Cracked = 0x4,
-
+
/// PPC970_Mask/Shift - This is a bitmask that selects the pipeline type that
/// an instruction is issued to.
PPC970_Shift = 3,
@@ -58,9 +58,9 @@ enum PPC970_Unit {
PPC970_VPERM = 6 << PPC970_Shift, // Vector Permute Unit
PPC970_BRU = 7 << PPC970_Shift // Branch Unit
};
-}
-
-
+} // end namespace PPCII
+
+
class PPCInstrInfo : public TargetInstrInfoImpl {
PPCTargetMachine &TM;
const PPCRegisterInfo RI;
@@ -69,7 +69,7 @@ class PPCInstrInfo : public TargetInstrInfoImpl {
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
- void LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
+ void LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
@@ -82,6 +82,10 @@ public:
///
virtual const PPCRegisterInfo &getRegisterInfo() const { return RI; }
+ ScheduleHazardRecognizer *
+ CreateTargetHazardRecognizer(const TargetMachine *TM,
+ const ScheduleDAG *DAG) const;
+
unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
unsigned isStoreToStackSlot(const MachineInstr *MI,
@@ -90,8 +94,8 @@ public:
// commuteInstruction - We can commute rlwimi instructions, but only if the
// rotate amt is zero. We also have to munge the immediates a bit.
virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const;
-
- virtual void insertNoop(MachineBasicBlock &MBB,
+
+ virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
@@ -109,7 +113,7 @@ public:
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const;
-
+
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
@@ -121,7 +125,7 @@ public:
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
-
+
virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
int FrameIx,
uint64_t Offset,
@@ -130,7 +134,7 @@ public:
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
-
+
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index eb100ec..82aadeb 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -68,17 +68,17 @@ def PPCstfiwx : SDNode<"PPCISD::STFIWX", SDT_PPCstfiwx,
// This sequence is used for long double->int conversions. It changes the
// bits in the FPSCR which is not modelled.
def PPCmffs : SDNode<"PPCISD::MFFS", SDTypeProfile<1, 0, [SDTCisVT<0, f64>]>,
- [SDNPOutFlag]>;
+ [SDNPOutGlue]>;
def PPCmtfsb0 : SDNode<"PPCISD::MTFSB0", SDTypeProfile<0, 1, [SDTCisInt<0>]>,
- [SDNPInFlag, SDNPOutFlag]>;
+ [SDNPInGlue, SDNPOutGlue]>;
def PPCmtfsb1 : SDNode<"PPCISD::MTFSB1", SDTypeProfile<0, 1, [SDTCisInt<0>]>,
- [SDNPInFlag, SDNPOutFlag]>;
+ [SDNPInGlue, SDNPOutGlue]>;
def PPCfaddrtz: SDNode<"PPCISD::FADDRTZ", SDTFPBinOp,
- [SDNPInFlag, SDNPOutFlag]>;
+ [SDNPInGlue, SDNPOutGlue]>;
def PPCmtfsf : SDNode<"PPCISD::MTFSF", SDTypeProfile<1, 3,
[SDTCisVT<0, f64>, SDTCisInt<1>, SDTCisVT<2, f64>,
SDTCisVT<3, f64>]>,
- [SDNPInFlag]>;
+ [SDNPInGlue]>;
def PPCfsel : SDNode<"PPCISD::FSEL",
// Type constraint for fsel.
@@ -105,45 +105,45 @@ def PPCstd_32 : SDNode<"PPCISD::STD_32" , SDTStore,
// These are target-independent nodes, but have target-specific formats.
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_PPCCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_PPCCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def SDT_PPCCall : SDTypeProfile<0, -1, [SDTCisInt<0>]>;
def PPCcall_Darwin : SDNode<"PPCISD::CALL_Darwin", SDT_PPCCall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def PPCcall_SVR4 : SDNode<"PPCISD::CALL_SVR4", SDT_PPCCall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
-def PPCnop : SDNode<"PPCISD::NOP", SDT_PPCnop, [SDNPInFlag, SDNPOutFlag]>;
+def PPCnop : SDNode<"PPCISD::NOP", SDT_PPCnop, [SDNPInGlue, SDNPOutGlue]>;
def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def PPCload_toc : SDNode<"PPCISD::LOAD_TOC", SDTypeProfile<0, 1, []>,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
def PPCtoc_restore : SDNode<"PPCISD::TOC_RESTORE", SDTypeProfile<0, 0, []>,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
def PPCmtctr : SDNode<"PPCISD::MTCTR", SDT_PPCCall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def PPCbctrl_Darwin : SDNode<"PPCISD::BCTRL_Darwin", SDTNone,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def PPCbctrl_SVR4 : SDNode<"PPCISD::BCTRL_SVR4", SDTNone,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def retflag : SDNode<"PPCISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def PPCtc_return : SDNode<"PPCISD::TC_RETURN", SDT_PPCTC_ret,
- [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def PPCvcmp : SDNode<"PPCISD::VCMP" , SDT_PPCvcmp, []>;
-def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutFlag]>;
+def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutGlue]>;
def PPCcondbranch : SDNode<"PPCISD::COND_BRANCH", SDT_PPCcondbr,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
def PPClbrx : SDNode<"PPCISD::LBRX", SDT_PPClbrx,
[SDNPHasChain, SDNPMayLoad]>;
@@ -286,31 +286,38 @@ def u16imm : Operand<i32> {
def s16immX4 : Operand<i32> { // Multiply imm by 4 before printing.
let PrintMethod = "printS16X4ImmOperand";
}
-def target : Operand<OtherVT> {
+def directbrtarget : Operand<OtherVT> {
let PrintMethod = "printBranchOperand";
+ let EncoderMethod = "getDirectBrEncoding";
+}
+def condbrtarget : Operand<OtherVT> {
+ let PrintMethod = "printBranchOperand";
+ let EncoderMethod = "getCondBrEncoding";
}
def calltarget : Operand<iPTR> {
- let PrintMethod = "printCallOperand";
+ let EncoderMethod = "getDirectBrEncoding";
}
def aaddr : Operand<iPTR> {
let PrintMethod = "printAbsAddrOperand";
}
-def piclabel: Operand<iPTR> {
- let PrintMethod = "printPICLabel";
-}
+def piclabel: Operand<iPTR> {}
def symbolHi: Operand<i32> {
let PrintMethod = "printSymbolHi";
+ let EncoderMethod = "getHA16Encoding";
}
def symbolLo: Operand<i32> {
let PrintMethod = "printSymbolLo";
+ let EncoderMethod = "getLO16Encoding";
}
def crbitm: Operand<i8> {
let PrintMethod = "printcrbitm";
+ let EncoderMethod = "get_crbitm_encoding";
}
// Address operands
def memri : Operand<iPTR> {
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops i32imm:$imm, ptr_rc:$reg);
+ let EncoderMethod = "getMemRIEncoding";
}
def memrr : Operand<iPTR> {
let PrintMethod = "printMemRegReg";
@@ -319,9 +326,9 @@ def memrr : Operand<iPTR> {
def memrix : Operand<iPTR> { // memri where the imm is shifted 2 bits.
let PrintMethod = "printMemRegImmShifted";
let MIOperandInfo = (ops i32imm:$imm, ptr_rc:$reg);
+ let EncoderMethod = "getMemRIXEncoding";
}
def tocentry : Operand<iPTR> {
- let PrintMethod = "printTOCEntryLabel";
let MIOperandInfo = (ops i32imm:$imm);
}
@@ -355,11 +362,9 @@ def In64BitMode : Predicate<"PPCSubTarget.isPPC64()">;
let hasCtrlDep = 1 in {
let Defs = [R1], Uses = [R1] in {
-def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt),
- "${:comment} ADJCALLSTACKDOWN",
+def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt), "",
[(callseq_start timm:$amt)]>;
-def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt1, u16imm:$amt2),
- "${:comment} ADJCALLSTACKUP",
+def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt1, u16imm:$amt2), "",
[(callseq_end timm:$amt1, timm:$amt2)]>;
}
@@ -368,8 +373,7 @@ def UPDATE_VRSAVE : Pseudo<(outs GPRC:$rD), (ins GPRC:$rS),
}
let Defs = [R1], Uses = [R1] in
-def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi),
- "${:comment} DYNALLOC $result, $negsize, $fpsi",
+def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi), "",
[(set GPRC:$result,
(PPCdynalloc GPRC:$negsize, iaddr:$fpsi))]>;
@@ -378,26 +382,26 @@ def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi),
let usesCustomInserter = 1, // Expanded after instruction selection.
PPC970_Single = 1 in {
def SELECT_CC_I4 : Pseudo<(outs GPRC:$dst), (ins CRRC:$cond, GPRC:$T, GPRC:$F,
- i32imm:$BROPC), "${:comment} SELECT_CC PSEUDO!",
+ i32imm:$BROPC), "",
[]>;
def SELECT_CC_I8 : Pseudo<(outs G8RC:$dst), (ins CRRC:$cond, G8RC:$T, G8RC:$F,
- i32imm:$BROPC), "${:comment} SELECT_CC PSEUDO!",
+ i32imm:$BROPC), "",
[]>;
def SELECT_CC_F4 : Pseudo<(outs F4RC:$dst), (ins CRRC:$cond, F4RC:$T, F4RC:$F,
- i32imm:$BROPC), "${:comment} SELECT_CC PSEUDO!",
+ i32imm:$BROPC), "",
[]>;
def SELECT_CC_F8 : Pseudo<(outs F8RC:$dst), (ins CRRC:$cond, F8RC:$T, F8RC:$F,
- i32imm:$BROPC), "${:comment} SELECT_CC PSEUDO!",
+ i32imm:$BROPC), "",
[]>;
def SELECT_CC_VRRC: Pseudo<(outs VRRC:$dst), (ins CRRC:$cond, VRRC:$T, VRRC:$F,
- i32imm:$BROPC), "${:comment} SELECT_CC PSEUDO!",
+ i32imm:$BROPC), "",
[]>;
}
// SPILL_CR - Indicate that we're dumping the CR register, so we'll need to
// scavenge a register for it.
def SPILL_CR : Pseudo<(outs), (ins GPRC:$cond, memri:$F),
- "${:comment} SPILL_CR $cond $F", []>;
+ "", []>;
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
let isReturn = 1, Uses = [LR, RM] in
@@ -409,12 +413,12 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
}
let Defs = [LR] in
- def MovePCtoLR : Pseudo<(outs), (ins piclabel:$label), "bl $label", []>,
+ def MovePCtoLR : Pseudo<(outs), (ins piclabel:$label), "", []>,
PPC970_Unit_BRU;
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
let isBarrier = 1 in {
- def B : IForm<18, 0, 0, (outs), (ins target:$dst),
+ def B : IForm<18, 0, 0, (outs), (ins directbrtarget:$dst),
"b $dst", BrB,
[(br bb:$dst)]>;
}
@@ -422,7 +426,7 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
// BCC represents an arbitrary conditional branch on a predicate.
// FIXME: should be able to write a pattern for PPCcondbranch, but can't use
// a two-value operand where a dag node expects two operands. :(
- def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, target:$dst),
+ def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst),
"b${cond:cc} ${cond:reg}, $dst"
/*[(PPCcondbranch CRRC:$crS, imm:$opc, bb:$dst)]*/>;
}
@@ -548,105 +552,81 @@ def DCBZL : DCB_Form<1014, 1, (outs), (ins memrr:$dst),
let usesCustomInserter = 1 in {
let Uses = [CR0] in {
def ATOMIC_LOAD_ADD_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_ADD_I8 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_add_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_SUB_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_SUB_I8 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_sub_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_AND_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_AND_I8 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_and_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_OR_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_OR_I8 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_or_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_XOR_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_XOR_I8 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_xor_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_NAND_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_NAND_I8 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_nand_8 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_ADD_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_ADD_I16 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_add_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_SUB_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_SUB_I16 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_sub_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_AND_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_AND_I16 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_and_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_OR_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_OR_I16 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_or_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_XOR_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_XOR_I16 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_xor_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_NAND_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_NAND_I16 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_nand_16 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_ADD_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_ADD_I32 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_add_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_SUB_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_SUB_I32 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_sub_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_AND_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_AND_I32 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_and_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_OR_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_OR_I32 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_or_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_XOR_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_XOR_I32 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_xor_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_NAND_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
- "${:comment} ATOMIC_LOAD_NAND_I32 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "",
[(set GPRC:$dst, (atomic_load_nand_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_CMP_SWAP_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new),
- "${:comment} ATOMIC_CMP_SWAP_I8 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "",
[(set GPRC:$dst,
(atomic_cmp_swap_8 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>;
def ATOMIC_CMP_SWAP_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new),
- "${:comment} ATOMIC_CMP_SWAP_I16 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "",
[(set GPRC:$dst,
(atomic_cmp_swap_16 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>;
def ATOMIC_CMP_SWAP_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new),
- "${:comment} ATOMIC_CMP_SWAP_I32 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "",
[(set GPRC:$dst,
(atomic_cmp_swap_32 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>;
def ATOMIC_SWAP_I8 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new),
- "${:comment} ATOMIC_SWAP_I8 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "",
[(set GPRC:$dst, (atomic_swap_8 xoaddr:$ptr, GPRC:$new))]>;
def ATOMIC_SWAP_I16 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new),
- "${:comment} ATOMIC_SWAP_I16 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "",
[(set GPRC:$dst, (atomic_swap_16 xoaddr:$ptr, GPRC:$new))]>;
def ATOMIC_SWAP_I32 : Pseudo<
- (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new),
- "${:comment} ATOMIC_SWAP_I32 PSEUDO!",
+ (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "",
[(set GPRC:$dst, (atomic_swap_32 xoaddr:$ptr, GPRC:$new))]>;
}
}
@@ -785,33 +765,33 @@ def STFD : DForm_1<54, (outs), (ins F8RC:$rS, memri:$dst),
// Unindexed (r+i) Stores with Update (preinc).
let PPC970_Unit = 2 in {
-def STBU : DForm_1<39, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
+def STBU : DForm_1a<39, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
"stbu $rS, $ptroff($ptrreg)", LdStGeneral,
[(set ptr_rc:$ea_res,
(pre_truncsti8 GPRC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
-def STHU : DForm_1<45, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
+def STHU : DForm_1a<45, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
"sthu $rS, $ptroff($ptrreg)", LdStGeneral,
[(set ptr_rc:$ea_res,
(pre_truncsti16 GPRC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
-def STWU : DForm_1<37, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
+def STWU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
"stwu $rS, $ptroff($ptrreg)", LdStGeneral,
[(set ptr_rc:$ea_res, (pre_store GPRC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
-def STFSU : DForm_1<37, (outs ptr_rc:$ea_res), (ins F4RC:$rS,
+def STFSU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F4RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
"stfsu $rS, $ptroff($ptrreg)", LdStGeneral,
[(set ptr_rc:$ea_res, (pre_store F4RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
-def STFDU : DForm_1<37, (outs ptr_rc:$ea_res), (ins F8RC:$rS,
+def STFDU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
"stfdu $rS, $ptroff($ptrreg)", LdStGeneral,
[(set ptr_rc:$ea_res, (pre_store F8RC:$rS, ptr_rc:$ptrreg,
@@ -1120,9 +1100,16 @@ def MTCRF : XFXForm_5<31, 144, (outs), (ins crbitm:$FXM, GPRC:$rS),
// As it turns out, in all cases where we currently use this,
// we're only interested in one subregister of it. Represent this in the
// instruction to keep the register allocator from becoming confused.
+//
+// FIXME: Make this a real Pseudo instruction when the JIT switches to MC.
def MFCRpseud: XFXForm_3<31, 19, (outs GPRC:$rT), (ins crbitm:$FXM),
- "mfcr $rT ${:comment} $FXM", SprMFCR>,
+ "", SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
+
+def MFCR : XFXForm_3<31, 19, (outs GPRC:$rT), (ins),
+ "mfcr $rT", SprMFCR>,
+ PPC970_MicroCode, PPC970_Unit_CRU;
+
def MFOCRF: XFXForm_5a<31, 19, (outs GPRC:$rT), (ins crbitm:$FXM),
"mfcr $rT, $FXM", SprMFCR>,
PPC970_DGroup_First, PPC970_Unit_CRU;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
index daf4ec6..78383e0 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
@@ -16,7 +16,7 @@
#include "PPCRelocations.h"
#include "PPCTargetMachine.h"
#include "llvm/Function.h"
-#include "llvm/System/Memory.h"
+#include "llvm/Support/Memory.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp
index 3644c79..d1178dd 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp
@@ -17,10 +17,11 @@ using namespace llvm;
PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit) {
PCSymbol = ".";
CommentString = ";";
- ExceptionsType = ExceptionHandling::Dwarf;
+ ExceptionsType = ExceptionHandling::DwarfTable;
if (!is64Bit)
Data64bitsDirective = 0; // We can't emit a 64-bit unit in PPC32 mode.
+
AssemblerDialect = 1; // New-Style mnemonics.
SupportsDebugInformation= true; // Debug information.
}
@@ -47,7 +48,7 @@ PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) {
// Exceptions handling
if (!is64Bit)
- ExceptionsType = ExceptionHandling::Dwarf;
+ ExceptionsType = ExceptionHandling::DwarfTable;
ZeroDirective = "\t.space\t";
Data64bitsDirective = is64Bit ? "\t.quad\t" : 0;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCMCCodeEmitter.cpp b/contrib/llvm/lib/Target/PowerPC/PPCMCCodeEmitter.cpp
new file mode 100644
index 0000000..65c2c82
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCMCCodeEmitter.cpp
@@ -0,0 +1,195 @@
+//===-- PPCMCCodeEmitter.cpp - Convert PPC code to machine code -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PPCMCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mccodeemitter"
+#include "PPC.h"
+#include "PPCRegisterInfo.h"
+#include "PPCFixupKinds.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
+
+namespace {
+class PPCMCCodeEmitter : public MCCodeEmitter {
+ PPCMCCodeEmitter(const PPCMCCodeEmitter &); // DO NOT IMPLEMENT
+ void operator=(const PPCMCCodeEmitter &); // DO NOT IMPLEMENT
+ const TargetMachine &TM;
+ MCContext &Ctx;
+
+public:
+ PPCMCCodeEmitter(TargetMachine &tm, MCContext &ctx)
+ : TM(tm), Ctx(ctx) {
+ }
+
+ ~PPCMCCodeEmitter() {}
+
+ unsigned getDirectBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getCondBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getHA16Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getLO16Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getMemRIEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned get_crbitm_encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getMachineOpValue - Return binary encoding of operand. If the machine
+ /// operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ unsigned getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ unsigned Bits = getBinaryCodeForInstr(MI, Fixups);
+
+ // Output the constant in big endian byte order.
+ for (unsigned i = 0; i != 4; ++i) {
+ OS << (char)(Bits >> 24);
+ Bits <<= 8;
+ }
+
+ ++MCNumEmitted; // Keep track of the # of mi's emitted.
+ }
+
+};
+
+} // end anonymous namespace
+
+MCCodeEmitter *llvm::createPPCMCCodeEmitter(const Target &, TargetMachine &TM,
+ MCContext &Ctx) {
+ return new PPCMCCodeEmitter(TM, Ctx);
+}
+
+unsigned PPCMCCodeEmitter::
+getDirectBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+
+ // Add a fixup for the branch target.
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_br24));
+ return 0;
+}
+
+unsigned PPCMCCodeEmitter::getCondBrEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+
+ // Add a fixup for the branch target.
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_brcond14));
+ return 0;
+}
+
+unsigned PPCMCCodeEmitter::getHA16Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+
+ // Add a fixup for the branch target.
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_ha16));
+ return 0;
+}
+
+unsigned PPCMCCodeEmitter::getLO16Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups);
+
+ // Add a fixup for the branch target.
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_lo16));
+ return 0;
+}
+
+unsigned PPCMCCodeEmitter::getMemRIEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Encode (imm, reg) as a memri, which has the low 16-bits as the
+ // displacement and the next 5 bits as the register #.
+ assert(MI.getOperand(OpNo+1).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups) << 16;
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isImm())
+ return (getMachineOpValue(MI, MO, Fixups) & 0xFFFF) | RegBits;
+
+ // Add a fixup for the displacement field.
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_lo16));
+ return RegBits;
+}
+
+
+unsigned PPCMCCodeEmitter::getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Encode (imm, reg) as a memrix, which has the low 14-bits as the
+ // displacement and the next 5 bits as the register #.
+ assert(MI.getOperand(OpNo+1).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups) << 14;
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isImm())
+ return (getMachineOpValue(MI, MO, Fixups) & 0x3FFF) | RegBits;
+
+ // Add a fixup for the branch target.
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(),
+ (MCFixupKind)PPC::fixup_ppc_lo14));
+ return RegBits;
+}
+
+
+unsigned PPCMCCodeEmitter::
+get_crbitm_encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ assert((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MFOCRF) &&
+ (MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7));
+ return 0x80 >> PPCRegisterInfo::getRegisterNumbering(MO.getReg());
+}
+
+
+unsigned PPCMCCodeEmitter::
+getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (MO.isReg()) {
+ // MTCRF/MFOCRF should go through get_crbitm_encoding for the CR operand.
+ // The GPR operand should come through here though.
+ assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MFOCRF) ||
+ MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7);
+ return PPCRegisterInfo::getRegisterNumbering(MO.getReg());
+ }
+
+ assert(MO.isImm() &&
+ "Relocation required in an instruction that we cannot encode!");
+ return MO.getImm();
+}
+
+
+#include "PPCGenMCCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp b/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
new file mode 100644
index 0000000..6082587
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
@@ -0,0 +1,172 @@
+//===-- PPCMCInstLower.cpp - Convert PPC MachineInstr to an MCInst --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower PPC MachineInstrs to their corresponding
+// MCInst records.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPC.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/ADT/SmallString.h"
+using namespace llvm;
+
+static MachineModuleInfoMachO &getMachOMMI(AsmPrinter &AP) {
+ return AP.MMI->getObjFileInfo<MachineModuleInfoMachO>();
+}
+
+
+static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
+ MCContext &Ctx = AP.OutContext;
+
+ SmallString<128> Name;
+ if (!MO.isGlobal()) {
+ assert(MO.isSymbol() && "Isn't a symbol reference");
+ Name += AP.MAI->getGlobalPrefix();
+ Name += MO.getSymbolName();
+ } else {
+ const GlobalValue *GV = MO.getGlobal();
+ bool isImplicitlyPrivate = false;
+ if (MO.getTargetFlags() == PPCII::MO_DARWIN_STUB ||
+ (MO.getTargetFlags() & PPCII::MO_NLP_FLAG))
+ isImplicitlyPrivate = true;
+
+ AP.Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
+ }
+
+ // If the target flags on the operand changes the name of the symbol, do that
+ // before we return the symbol.
+ if (MO.getTargetFlags() == PPCII::MO_DARWIN_STUB) {
+ Name += "$stub";
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ getMachOMMI(AP).getFnStubEntry(Sym);
+ if (StubSym.getPointer())
+ return Sym;
+
+ if (MO.isGlobal()) {
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(AP.Mang->getSymbol(MO.getGlobal()),
+ !MO.getGlobal()->hasInternalLinkage());
+ } else {
+ Name.erase(Name.end()-5, Name.end());
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(Ctx.GetOrCreateSymbol(Name.str()), false);
+ }
+ return Sym;
+ }
+
+ // If the symbol reference is actually to a non_lazy_ptr, not to the symbol,
+ // then add the suffix.
+ if (MO.getTargetFlags() & PPCII::MO_NLP_FLAG) {
+ Name += "$non_lazy_ptr";
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
+
+ MachineModuleInfoMachO &MachO = getMachOMMI(AP);
+
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ (MO.getTargetFlags() & PPCII::MO_NLP_HIDDEN_FLAG) ?
+ MachO.getHiddenGVStubEntry(Sym) : MachO.getGVStubEntry(Sym);
+
+ if (StubSym.getPointer() == 0) {
+ assert(MO.isGlobal() && "Extern symbol not handled yet");
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(AP.Mang->getSymbol(MO.getGlobal()),
+ !MO.getGlobal()->hasInternalLinkage());
+ }
+ return Sym;
+ }
+
+ return Ctx.GetOrCreateSymbol(Name.str());
+}
+
+static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
+ AsmPrinter &Printer) {
+ MCContext &Ctx = Printer.OutContext;
+ MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
+
+ if (MO.getTargetFlags() & PPCII::MO_LO16)
+ RefKind = MCSymbolRefExpr::VK_PPC_LO16;
+ else if (MO.getTargetFlags() & PPCII::MO_HA16)
+ RefKind = MCSymbolRefExpr::VK_PPC_HA16;
+
+ // FIXME: This isn't right, but we don't have a good way to express this in
+ // the MC Level, see below.
+ if (MO.getTargetFlags() & PPCII::MO_PIC_FLAG)
+ RefKind = MCSymbolRefExpr::VK_None;
+
+ const MCExpr *Expr = MCSymbolRefExpr::Create(Symbol, RefKind, Ctx);
+
+ if (!MO.isJTI() && MO.getOffset())
+ Expr = MCBinaryExpr::CreateAdd(Expr,
+ MCConstantExpr::Create(MO.getOffset(), Ctx),
+ Ctx);
+
+ // Subtract off the PIC base if required.
+ if (MO.getTargetFlags() & PPCII::MO_PIC_FLAG) {
+ const MachineFunction *MF = MO.getParent()->getParent()->getParent();
+
+ const MCExpr *PB = MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
+ Expr = MCBinaryExpr::CreateSub(Expr, PB, Ctx);
+ // FIXME: We have no way to make the result be VK_PPC_LO16/VK_PPC_HA16,
+ // since it is not a symbol!
+ }
+
+ return MCOperand::CreateExpr(Expr);
+}
+
+void llvm::LowerPPCMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
+ AsmPrinter &AP) {
+ OutMI.setOpcode(MI->getOpcode());
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+
+ MCOperand MCOp;
+ switch (MO.getType()) {
+ default:
+ MI->dump();
+ assert(0 && "unknown operand type");
+ case MachineOperand::MO_Register:
+ assert(!MO.getSubReg() && "Subregs should be eliminated!");
+ MCOp = MCOperand::CreateReg(MO.getReg());
+ break;
+ case MachineOperand::MO_Immediate:
+ MCOp = MCOperand::CreateImm(MO.getImm());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
+ MO.getMBB()->getSymbol(), AP.OutContext));
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_ExternalSymbol:
+ MCOp = GetSymbolRef(MO, GetSymbolFromOperand(MO, AP), AP);
+ break;
+ case MachineOperand::MO_JumpTableIndex:
+ MCOp = GetSymbolRef(MO, AP.GetJTISymbol(MO.getIndex()), AP);
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ MCOp = GetSymbolRef(MO, AP.GetCPISymbol(MO.getIndex()), AP);
+ break;
+ case MachineOperand::MO_BlockAddress:
+ MCOp = GetSymbolRef(MO,AP.GetBlockAddressSymbol(MO.getBlockAddress()),AP);
+ break;
+ }
+
+ OutMI.addOperand(MCOp);
+ }
+}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 653e143..45d8b6b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -17,7 +17,7 @@
#include "PPCInstrBuilder.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCRegisterInfo.h"
-#include "PPCFrameInfo.h"
+#include "PPCFrameLowering.h"
#include "PPCSubtarget.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
@@ -31,7 +31,7 @@
#include "llvm/CodeGen/MachineLocation.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -44,16 +44,6 @@
#include "llvm/ADT/STLExtras.h"
#include <cstdlib>
-// FIXME This disables some code that aligns the stack to a boundary
-// bigger than the default (16 bytes on Darwin) when there is a stack local
-// of greater alignment. This does not currently work, because the delta
-// between old and new stack pointers is added to offsets that reference
-// incoming parameters after the prolog is generated, and the code that
-// does that doesn't handle a variable delta. You don't want to do that
-// anyway; a better approach is to reserve another register that retains
-// to the incoming stack pointer, and reference parameters relative to that.
-#define ALIGN_STACK 0
-
// FIXME (64-bit): Eventually enable by default.
namespace llvm {
cl::opt<bool> EnablePPC32RS("enable-ppc32-regscavenger",
@@ -68,14 +58,11 @@ cl::opt<bool> EnablePPC64RS("enable-ppc64-regscavenger",
using namespace llvm;
-#define EnableRegisterScavenging \
- ((EnablePPC32RS && !Subtarget.isPPC64()) || \
- (EnablePPC64RS && Subtarget.isPPC64()))
-
// FIXME (64-bit): Should be inlined.
bool
PPCRegisterInfo::requiresRegisterScavenging(const MachineFunction &) const {
- return EnableRegisterScavenging;
+ return ((EnablePPC32RS && !Subtarget.isPPC64()) ||
+ (EnablePPC64RS && Subtarget.isPPC64()));
}
/// getRegisterNumbering - Given the enum value for some register, e.g.
@@ -269,26 +256,11 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return Subtarget.isPPC64() ? SVR4_64_CalleeSavedRegs : SVR4_CalleeSavedRegs;
}
-// needsFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-//
-static bool needsFP(const MachineFunction &MF) {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- // Naked functions have no stack frame pushed, so we don't have a frame pointer.
- if (MF.getFunction()->hasFnAttr(Attribute::Naked))
- return false;
- return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects() ||
- (GuaranteedTailCallOpt && MF.getInfo<PPCFunctionInfo>()->hasFastCall());
-}
-
-static bool spillsCR(const MachineFunction &MF) {
- const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
- return FuncInfo->isCRSpilled();
-}
-
BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
+ const PPCFrameLowering *PPCFI =
+ static_cast<const PPCFrameLowering*>(MF.getTarget().getFrameLowering());
+
Reserved.set(PPC::R0);
Reserved.set(PPC::R1);
Reserved.set(PPC::LR);
@@ -314,7 +286,7 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(PPC::R13);
Reserved.set(PPC::R31);
- if (!EnableRegisterScavenging)
+ if (!requiresRegisterScavenging(MF))
Reserved.set(PPC::R0); // FIXME (64-bit): Remove
Reserved.set(PPC::X0);
@@ -334,7 +306,7 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
}
}
- if (needsFP(MF))
+ if (PPCFI->needsFP(MF))
Reserved.set(PPC::R31);
return Reserved;
@@ -344,30 +316,6 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
-// hasFP - Return true if the specified function actually has a dedicated frame
-// pointer register. This is true if the function needs a frame pointer and has
-// a non-zero stack size.
-bool PPCRegisterInfo::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return MFI->getStackSize() && needsFP(MF);
-}
-
-/// MustSaveLR - Return true if this function requires that we save the LR
-/// register onto the stack in the prolog and restore it in the epilog of the
-/// function.
-static bool MustSaveLR(const MachineFunction &MF, unsigned LR) {
- const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>();
-
- // We need a save/restore of LR if there is any def of LR (which is
- // defined by calls, including the PIC setup sequence), or if there is
- // some use of the LR stack slot (e.g. for builtin_return_address).
- // (LR comes in 32 and 64 bit versions.)
- MachineRegisterInfo::def_iterator RI = MF.getRegInfo().def_begin(LR);
- return RI !=MF.getRegInfo().def_end() || MFI->isLRStoreRequired();
-}
-
-
-
void PPCRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
@@ -447,7 +395,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
unsigned FrameSize = MFI->getStackSize();
// Get stack alignments.
- unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
+ unsigned TargetAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
unsigned MaxAlign = MFI->getMaxAlignment();
if (MaxAlign > TargetAlign)
report_fatal_error("Dynamic alloca with large aligns not supported");
@@ -464,7 +412,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
// FIXME (64-bit): Use "findScratchRegister"
unsigned Reg;
- if (EnableRegisterScavenging)
+ if (requiresRegisterScavenging(MF))
Reg = findScratchRegister(II, RS, RC, SPAdj);
else
Reg = PPC::R0;
@@ -474,7 +422,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
.addReg(PPC::R31)
.addImm(FrameSize);
} else if (LP64) {
- if (EnableRegisterScavenging) // FIXME (64-bit): Use "true" part.
+ if (requiresRegisterScavenging(MF)) // FIXME (64-bit): Use "true" part.
BuildMI(MBB, II, dl, TII.get(PPC::LD), Reg)
.addImm(0)
.addReg(PPC::X1);
@@ -491,7 +439,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
// Grow the stack and update the stack pointer link, then determine the
// address of new allocated space.
if (LP64) {
- if (EnableRegisterScavenging) // FIXME (64-bit): Use "true" part.
+ if (requiresRegisterScavenging(MF)) // FIXME (64-bit): Use "true" part.
BuildMI(MBB, II, dl, TII.get(PPC::STDUX))
.addReg(Reg, RegState::Kill)
.addReg(PPC::X1)
@@ -593,6 +541,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineFunction &MF = *MBB.getParent();
// Get the frame info.
MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
DebugLoc dl = MI.getDebugLoc();
// Find out which operand is the frame index.
@@ -625,14 +574,15 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
// Special case for pseudo-op SPILL_CR.
- if (EnableRegisterScavenging) // FIXME (64-bit): Enable by default.
+ if (requiresRegisterScavenging(MF)) // FIXME (64-bit): Enable by default.
if (OpC == PPC::SPILL_CR) {
lowerCRSpilling(II, FrameIndex, SPAdj, RS);
return;
}
// Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
- MI.getOperand(FIOperandNo).ChangeToRegister(hasFP(MF) ? PPC::R31 : PPC::R1,
+ MI.getOperand(FIOperandNo).ChangeToRegister(TFI->hasFP(MF) ?
+ PPC::R31 : PPC::R1,
false);
// Figure out if the offset in the instruction is shifted right two bits. This
@@ -682,7 +632,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// FIXME (64-bit): Use "findScratchRegister".
unsigned SReg;
- if (EnableRegisterScavenging)
+ if (requiresRegisterScavenging(MF))
SReg = findScratchRegister(II, RS, &PPC::GPRCRegClass, SPAdj);
else
SReg = PPC::R0;
@@ -715,898 +665,17 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false);
}
-/// VRRegNo - Map from a numbered VR register to its enum value.
-///
-static const unsigned short VRRegNo[] = {
- PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 , PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
- PPC::V8 , PPC::V9 , PPC::V10, PPC::V11, PPC::V12, PPC::V13, PPC::V14, PPC::V15,
- PPC::V16, PPC::V17, PPC::V18, PPC::V19, PPC::V20, PPC::V21, PPC::V22, PPC::V23,
- PPC::V24, PPC::V25, PPC::V26, PPC::V27, PPC::V28, PPC::V29, PPC::V30, PPC::V31
-};
-
-/// RemoveVRSaveCode - We have found that this function does not need any code
-/// to manipulate the VRSAVE register, even though it uses vector registers.
-/// This can happen when the only registers used are known to be live in or out
-/// of the function. Remove all of the VRSAVE related code from the function.
-static void RemoveVRSaveCode(MachineInstr *MI) {
- MachineBasicBlock *Entry = MI->getParent();
- MachineFunction *MF = Entry->getParent();
-
- // We know that the MTVRSAVE instruction immediately follows MI. Remove it.
- MachineBasicBlock::iterator MBBI = MI;
- ++MBBI;
- assert(MBBI != Entry->end() && MBBI->getOpcode() == PPC::MTVRSAVE);
- MBBI->eraseFromParent();
-
- bool RemovedAllMTVRSAVEs = true;
- // See if we can find and remove the MTVRSAVE instruction from all of the
- // epilog blocks.
- for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I) {
- // If last instruction is a return instruction, add an epilogue
- if (!I->empty() && I->back().getDesc().isReturn()) {
- bool FoundIt = false;
- for (MBBI = I->end(); MBBI != I->begin(); ) {
- --MBBI;
- if (MBBI->getOpcode() == PPC::MTVRSAVE) {
- MBBI->eraseFromParent(); // remove it.
- FoundIt = true;
- break;
- }
- }
- RemovedAllMTVRSAVEs &= FoundIt;
- }
- }
-
- // If we found and removed all MTVRSAVE instructions, remove the read of
- // VRSAVE as well.
- if (RemovedAllMTVRSAVEs) {
- MBBI = MI;
- assert(MBBI != Entry->begin() && "UPDATE_VRSAVE is first instr in block?");
- --MBBI;
- assert(MBBI->getOpcode() == PPC::MFVRSAVE && "VRSAVE instrs wandered?");
- MBBI->eraseFromParent();
- }
-
- // Finally, nuke the UPDATE_VRSAVE.
- MI->eraseFromParent();
-}
-
-// HandleVRSaveUpdate - MI is the UPDATE_VRSAVE instruction introduced by the
-// instruction selector. Based on the vector registers that have been used,
-// transform this into the appropriate ORI instruction.
-static void HandleVRSaveUpdate(MachineInstr *MI, const TargetInstrInfo &TII) {
- MachineFunction *MF = MI->getParent()->getParent();
- DebugLoc dl = MI->getDebugLoc();
-
- unsigned UsedRegMask = 0;
- for (unsigned i = 0; i != 32; ++i)
- if (MF->getRegInfo().isPhysRegUsed(VRRegNo[i]))
- UsedRegMask |= 1 << (31-i);
-
- // Live in and live out values already must be in the mask, so don't bother
- // marking them.
- for (MachineRegisterInfo::livein_iterator
- I = MF->getRegInfo().livein_begin(),
- E = MF->getRegInfo().livein_end(); I != E; ++I) {
- unsigned RegNo = PPCRegisterInfo::getRegisterNumbering(I->first);
- if (VRRegNo[RegNo] == I->first) // If this really is a vector reg.
- UsedRegMask &= ~(1 << (31-RegNo)); // Doesn't need to be marked.
- }
- for (MachineRegisterInfo::liveout_iterator
- I = MF->getRegInfo().liveout_begin(),
- E = MF->getRegInfo().liveout_end(); I != E; ++I) {
- unsigned RegNo = PPCRegisterInfo::getRegisterNumbering(*I);
- if (VRRegNo[RegNo] == *I) // If this really is a vector reg.
- UsedRegMask &= ~(1 << (31-RegNo)); // Doesn't need to be marked.
- }
-
- // If no registers are used, turn this into a copy.
- if (UsedRegMask == 0) {
- // Remove all VRSAVE code.
- RemoveVRSaveCode(MI);
- return;
- }
-
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned DstReg = MI->getOperand(0).getReg();
-
- if ((UsedRegMask & 0xFFFF) == UsedRegMask) {
- if (DstReg != SrcReg)
- BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORI), DstReg)
- .addReg(SrcReg)
- .addImm(UsedRegMask);
- else
- BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORI), DstReg)
- .addReg(SrcReg, RegState::Kill)
- .addImm(UsedRegMask);
- } else if ((UsedRegMask & 0xFFFF0000) == UsedRegMask) {
- if (DstReg != SrcReg)
- BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORIS), DstReg)
- .addReg(SrcReg)
- .addImm(UsedRegMask >> 16);
- else
- BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORIS), DstReg)
- .addReg(SrcReg, RegState::Kill)
- .addImm(UsedRegMask >> 16);
- } else {
- if (DstReg != SrcReg)
- BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORIS), DstReg)
- .addReg(SrcReg)
- .addImm(UsedRegMask >> 16);
- else
- BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORIS), DstReg)
- .addReg(SrcReg, RegState::Kill)
- .addImm(UsedRegMask >> 16);
-
- BuildMI(*MI->getParent(), MI, dl, TII.get(PPC::ORI), DstReg)
- .addReg(DstReg, RegState::Kill)
- .addImm(UsedRegMask & 0xFFFF);
- }
-
- // Remove the old UPDATE_VRSAVE instruction.
- MI->eraseFromParent();
-}
-
-/// determineFrameLayout - Determine the size of the frame and maximum call
-/// frame size.
-void PPCRegisterInfo::determineFrameLayout(MachineFunction &MF) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
-
- // Get the number of bytes to allocate from the FrameInfo
- unsigned FrameSize = MFI->getStackSize();
-
- // Get the alignments provided by the target, and the maximum alignment
- // (if any) of the fixed frame objects.
- unsigned MaxAlign = MFI->getMaxAlignment();
- unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
- unsigned AlignMask = TargetAlign - 1; //
-
- // If we are a leaf function, and use up to 224 bytes of stack space,
- // don't have a frame pointer, calls, or dynamic alloca then we do not need
- // to adjust the stack pointer (we fit in the Red Zone).
- bool DisableRedZone = MF.getFunction()->hasFnAttr(Attribute::NoRedZone);
- // FIXME SVR4 The 32-bit SVR4 ABI has no red zone.
- if (!DisableRedZone &&
- FrameSize <= 224 && // Fits in red zone.
- !MFI->hasVarSizedObjects() && // No dynamic alloca.
- !MFI->adjustsStack() && // No calls.
- (!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment.
- // No need for frame
- MFI->setStackSize(0);
- return;
- }
-
- // Get the maximum call frame size of all the calls.
- unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
-
- // Maximum call frame needs to be at least big enough for linkage and 8 args.
- unsigned minCallFrameSize =
- PPCFrameInfo::getMinCallFrameSize(Subtarget.isPPC64(),
- Subtarget.isDarwinABI());
- maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize);
-
- // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
- // that allocations will be aligned.
- if (MFI->hasVarSizedObjects())
- maxCallFrameSize = (maxCallFrameSize + AlignMask) & ~AlignMask;
-
- // Update maximum call frame size.
- MFI->setMaxCallFrameSize(maxCallFrameSize);
-
- // Include call frame size in total.
- FrameSize += maxCallFrameSize;
-
- // Make sure the frame is aligned.
- FrameSize = (FrameSize + AlignMask) & ~AlignMask;
-
- // Update frame info.
- MFI->setStackSize(FrameSize);
-}
-
-void
-PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- // Save and clear the LR state.
- PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
- unsigned LR = getRARegister();
- FI->setMustSaveLR(MustSaveLR(MF, LR));
- MF.getRegInfo().setPhysRegUnused(LR);
-
- // Save R31 if necessary
- int FPSI = FI->getFramePointerSaveIndex();
- bool isPPC64 = Subtarget.isPPC64();
- bool isDarwinABI = Subtarget.isDarwinABI();
- MachineFrameInfo *MFI = MF.getFrameInfo();
-
- // If the frame pointer save index hasn't been defined yet.
- if (!FPSI && needsFP(MF)) {
- // Find out what the fix offset of the frame pointer save area.
- int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(isPPC64,
- isDarwinABI);
- // Allocate the frame index for frame pointer save area.
- FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
- // Save the result.
- FI->setFramePointerSaveIndex(FPSI);
- }
-
- // Reserve stack space to move the linkage area to in case of a tail call.
- int TCSPDelta = 0;
- if (GuaranteedTailCallOpt && (TCSPDelta = FI->getTailCallSPDelta()) < 0) {
- MF.getFrameInfo()->CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true);
- }
-
- // Reserve a slot closest to SP or frame pointer if we have a dynalloc or
- // a large stack, which will require scavenging a register to materialize a
- // large offset.
- // FIXME: this doesn't actually check stack size, so is a bit pessimistic
- // FIXME: doesn't detect whether or not we need to spill vXX, which requires
- // r0 for now.
-
- if (EnableRegisterScavenging) // FIXME (64-bit): Enable.
- if (needsFP(MF) || spillsCR(MF)) {
- const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
- const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
- const TargetRegisterClass *RC = isPPC64 ? G8RC : GPRC;
- RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
- }
-}
-
-void
-PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
- const {
- // Early exit if not using the SVR4 ABI.
- if (!Subtarget.isSVR4ABI()) {
- return;
- }
-
- // Get callee saved register information.
- MachineFrameInfo *FFI = MF.getFrameInfo();
- const std::vector<CalleeSavedInfo> &CSI = FFI->getCalleeSavedInfo();
-
- // Early exit if no callee saved registers are modified!
- if (CSI.empty() && !needsFP(MF)) {
- return;
- }
-
- unsigned MinGPR = PPC::R31;
- unsigned MinG8R = PPC::X31;
- unsigned MinFPR = PPC::F31;
- unsigned MinVR = PPC::V31;
-
- bool HasGPSaveArea = false;
- bool HasG8SaveArea = false;
- bool HasFPSaveArea = false;
- bool HasCRSaveArea = false;
- bool HasVRSAVESaveArea = false;
- bool HasVRSaveArea = false;
-
- SmallVector<CalleeSavedInfo, 18> GPRegs;
- SmallVector<CalleeSavedInfo, 18> G8Regs;
- SmallVector<CalleeSavedInfo, 18> FPRegs;
- SmallVector<CalleeSavedInfo, 18> VRegs;
-
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (PPC::GPRCRegisterClass->contains(Reg)) {
- HasGPSaveArea = true;
-
- GPRegs.push_back(CSI[i]);
-
- if (Reg < MinGPR) {
- MinGPR = Reg;
- }
- } else if (PPC::G8RCRegisterClass->contains(Reg)) {
- HasG8SaveArea = true;
-
- G8Regs.push_back(CSI[i]);
-
- if (Reg < MinG8R) {
- MinG8R = Reg;
- }
- } else if (PPC::F8RCRegisterClass->contains(Reg)) {
- HasFPSaveArea = true;
-
- FPRegs.push_back(CSI[i]);
-
- if (Reg < MinFPR) {
- MinFPR = Reg;
- }
-// FIXME SVR4: Disable CR save area for now.
- } else if (PPC::CRBITRCRegisterClass->contains(Reg)
- || PPC::CRRCRegisterClass->contains(Reg)) {
-// HasCRSaveArea = true;
- } else if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
- HasVRSAVESaveArea = true;
- } else if (PPC::VRRCRegisterClass->contains(Reg)) {
- HasVRSaveArea = true;
-
- VRegs.push_back(CSI[i]);
-
- if (Reg < MinVR) {
- MinVR = Reg;
- }
- } else {
- llvm_unreachable("Unknown RegisterClass!");
- }
- }
-
- PPCFunctionInfo *PFI = MF.getInfo<PPCFunctionInfo>();
-
- int64_t LowerBound = 0;
-
- // Take into account stack space reserved for tail calls.
- int TCSPDelta = 0;
- if (GuaranteedTailCallOpt && (TCSPDelta = PFI->getTailCallSPDelta()) < 0) {
- LowerBound = TCSPDelta;
- }
-
- // The Floating-point register save area is right below the back chain word
- // of the previous stack frame.
- if (HasFPSaveArea) {
- for (unsigned i = 0, e = FPRegs.size(); i != e; ++i) {
- int FI = FPRegs[i].getFrameIdx();
-
- FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
- }
-
- LowerBound -= (31 - getRegisterNumbering(MinFPR) + 1) * 8;
- }
-
- // Check whether the frame pointer register is allocated. If so, make sure it
- // is spilled to the correct offset.
- if (needsFP(MF)) {
- HasGPSaveArea = true;
-
- int FI = PFI->getFramePointerSaveIndex();
- assert(FI && "No Frame Pointer Save Slot!");
-
- FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
- }
-
- // General register save area starts right below the Floating-point
- // register save area.
- if (HasGPSaveArea || HasG8SaveArea) {
- // Move general register save area spill slots down, taking into account
- // the size of the Floating-point register save area.
- for (unsigned i = 0, e = GPRegs.size(); i != e; ++i) {
- int FI = GPRegs[i].getFrameIdx();
-
- FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
- }
-
- // Move general register save area spill slots down, taking into account
- // the size of the Floating-point register save area.
- for (unsigned i = 0, e = G8Regs.size(); i != e; ++i) {
- int FI = G8Regs[i].getFrameIdx();
-
- FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
- }
-
- unsigned MinReg = std::min<unsigned>(getRegisterNumbering(MinGPR),
- getRegisterNumbering(MinG8R));
-
- if (Subtarget.isPPC64()) {
- LowerBound -= (31 - MinReg + 1) * 8;
- } else {
- LowerBound -= (31 - MinReg + 1) * 4;
- }
- }
-
- // The CR save area is below the general register save area.
- if (HasCRSaveArea) {
- // FIXME SVR4: Is it actually possible to have multiple elements in CSI
- // which have the CR/CRBIT register class?
- // Adjust the frame index of the CR spill slot.
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
-
- if (PPC::CRBITRCRegisterClass->contains(Reg) ||
- PPC::CRRCRegisterClass->contains(Reg)) {
- int FI = CSI[i].getFrameIdx();
-
- FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
- }
- }
-
- LowerBound -= 4; // The CR save area is always 4 bytes long.
- }
-
- if (HasVRSAVESaveArea) {
- // FIXME SVR4: Is it actually possible to have multiple elements in CSI
- // which have the VRSAVE register class?
- // Adjust the frame index of the VRSAVE spill slot.
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
-
- if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
- int FI = CSI[i].getFrameIdx();
-
- FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
- }
- }
-
- LowerBound -= 4; // The VRSAVE save area is always 4 bytes long.
- }
-
- if (HasVRSaveArea) {
- // Insert alignment padding, we need 16-byte alignment.
- LowerBound = (LowerBound - 15) & ~(15);
-
- for (unsigned i = 0, e = VRegs.size(); i != e; ++i) {
- int FI = VRegs[i].getFrameIdx();
-
- FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
- }
- }
-}
-
-void
-PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineModuleInfo &MMI = MF.getMMI();
- DebugLoc dl;
- bool needsFrameMoves = MMI.hasDebugInfo() ||
- !MF.getFunction()->doesNotThrow() ||
- UnwindTablesMandatory;
-
- // Prepare for frame info.
- MCSymbol *FrameLabel = 0;
-
- // Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it,
- // process it.
- for (unsigned i = 0; MBBI != MBB.end(); ++i, ++MBBI) {
- if (MBBI->getOpcode() == PPC::UPDATE_VRSAVE) {
- HandleVRSaveUpdate(MBBI, TII);
- break;
- }
- }
-
- // Move MBBI back to the beginning of the function.
- MBBI = MBB.begin();
-
- // Work out frame sizes.
- determineFrameLayout(MF);
- unsigned FrameSize = MFI->getStackSize();
-
- int NegFrameSize = -FrameSize;
-
- // Get processor type.
- bool isPPC64 = Subtarget.isPPC64();
- // Get operating system
- bool isDarwinABI = Subtarget.isDarwinABI();
- // Check if the link register (LR) must be saved.
- PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
- bool MustSaveLR = FI->mustSaveLR();
- // Do we have a frame pointer for this function?
- bool HasFP = hasFP(MF) && FrameSize;
-
- int LROffset = PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI);
-
- int FPOffset = 0;
- if (HasFP) {
- if (Subtarget.isSVR4ABI()) {
- MachineFrameInfo *FFI = MF.getFrameInfo();
- int FPIndex = FI->getFramePointerSaveIndex();
- assert(FPIndex && "No Frame Pointer Save Slot!");
- FPOffset = FFI->getObjectOffset(FPIndex);
- } else {
- FPOffset = PPCFrameInfo::getFramePointerSaveOffset(isPPC64, isDarwinABI);
- }
- }
-
- if (isPPC64) {
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR8), PPC::X0);
-
- if (HasFP)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STD))
- .addReg(PPC::X31)
- .addImm(FPOffset/4)
- .addReg(PPC::X1);
-
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STD))
- .addReg(PPC::X0)
- .addImm(LROffset / 4)
- .addReg(PPC::X1);
- } else {
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MFLR), PPC::R0);
-
- if (HasFP)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STW))
- .addReg(PPC::R31)
- .addImm(FPOffset)
- .addReg(PPC::R1);
-
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STW))
- .addReg(PPC::R0)
- .addImm(LROffset)
- .addReg(PPC::R1);
- }
-
- // Skip if a leaf routine.
- if (!FrameSize) return;
-
- // Get stack alignments.
- unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
- unsigned MaxAlign = MFI->getMaxAlignment();
-
- // Adjust stack pointer: r1 += NegFrameSize.
- // If there is a preferred stack alignment, align R1 now
- if (!isPPC64) {
- // PPC32.
- if (ALIGN_STACK && MaxAlign > TargetAlign) {
- assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
- "Invalid alignment!");
- assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
-
- BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), PPC::R0)
- .addReg(PPC::R1)
- .addImm(0)
- .addImm(32 - Log2_32(MaxAlign))
- .addImm(31);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC) ,PPC::R0)
- .addReg(PPC::R0, RegState::Kill)
- .addImm(NegFrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX))
- .addReg(PPC::R1)
- .addReg(PPC::R1)
- .addReg(PPC::R0);
- } else if (isInt<16>(NegFrameSize)) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STWU), PPC::R1)
- .addReg(PPC::R1)
- .addImm(NegFrameSize)
- .addReg(PPC::R1);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS), PPC::R0)
- .addImm(NegFrameSize >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI), PPC::R0)
- .addReg(PPC::R0, RegState::Kill)
- .addImm(NegFrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX))
- .addReg(PPC::R1)
- .addReg(PPC::R1)
- .addReg(PPC::R0);
- }
- } else { // PPC64.
- if (ALIGN_STACK && MaxAlign > TargetAlign) {
- assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
- "Invalid alignment!");
- assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
-
- BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), PPC::X0)
- .addReg(PPC::X1)
- .addImm(0)
- .addImm(64 - Log2_32(MaxAlign));
- BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBFIC8), PPC::X0)
- .addReg(PPC::X0)
- .addImm(NegFrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX))
- .addReg(PPC::X1)
- .addReg(PPC::X1)
- .addReg(PPC::X0);
- } else if (isInt<16>(NegFrameSize)) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STDU), PPC::X1)
- .addReg(PPC::X1)
- .addImm(NegFrameSize / 4)
- .addReg(PPC::X1);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS8), PPC::X0)
- .addImm(NegFrameSize >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI8), PPC::X0)
- .addReg(PPC::X0, RegState::Kill)
- .addImm(NegFrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX))
- .addReg(PPC::X1)
- .addReg(PPC::X1)
- .addReg(PPC::X0);
- }
- }
-
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
-
- // Add the "machine moves" for the instructions we generated above, but in
- // reverse order.
- if (needsFrameMoves) {
- // Mark effective beginning of when frame pointer becomes valid.
- FrameLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(FrameLabel);
-
- // Show update of SP.
- if (NegFrameSize) {
- MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP, NegFrameSize);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
- } else {
- MachineLocation SP(isPPC64 ? PPC::X31 : PPC::R31);
- Moves.push_back(MachineMove(FrameLabel, SP, SP));
- }
-
- if (HasFP) {
- MachineLocation FPDst(MachineLocation::VirtualFP, FPOffset);
- MachineLocation FPSrc(isPPC64 ? PPC::X31 : PPC::R31);
- Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
- }
-
- if (MustSaveLR) {
- MachineLocation LRDst(MachineLocation::VirtualFP, LROffset);
- MachineLocation LRSrc(isPPC64 ? PPC::LR8 : PPC::LR);
- Moves.push_back(MachineMove(FrameLabel, LRDst, LRSrc));
- }
- }
-
- MCSymbol *ReadyLabel = 0;
-
- // If there is a frame pointer, copy R1 into R31
- if (HasFP) {
- if (!isPPC64) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::OR), PPC::R31)
- .addReg(PPC::R1)
- .addReg(PPC::R1);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::OR8), PPC::X31)
- .addReg(PPC::X1)
- .addReg(PPC::X1);
- }
-
- if (needsFrameMoves) {
- ReadyLabel = MMI.getContext().CreateTempSymbol();
-
- // Mark effective beginning of when frame pointer is ready.
- BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(ReadyLabel);
-
- MachineLocation FPDst(HasFP ? (isPPC64 ? PPC::X31 : PPC::R31) :
- (isPPC64 ? PPC::X1 : PPC::R1));
- MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(ReadyLabel, FPDst, FPSrc));
- }
- }
-
- if (needsFrameMoves) {
- MCSymbol *Label = HasFP ? ReadyLabel : FrameLabel;
-
- // Add callee saved registers to move list.
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
- int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
- unsigned Reg = CSI[I].getReg();
- if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
- MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
- MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(Label, CSDst, CSSrc));
- }
- }
-}
-
-void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- unsigned RetOpcode = MBBI->getOpcode();
- DebugLoc dl;
-
- assert( (RetOpcode == PPC::BLR ||
- RetOpcode == PPC::TCRETURNri ||
- RetOpcode == PPC::TCRETURNdi ||
- RetOpcode == PPC::TCRETURNai ||
- RetOpcode == PPC::TCRETURNri8 ||
- RetOpcode == PPC::TCRETURNdi8 ||
- RetOpcode == PPC::TCRETURNai8) &&
- "Can only insert epilog into returning blocks");
-
- // Get alignment info so we know how to restore r1
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
- unsigned MaxAlign = MFI->getMaxAlignment();
-
- // Get the number of bytes allocated from the FrameInfo.
- int FrameSize = MFI->getStackSize();
-
- // Get processor type.
- bool isPPC64 = Subtarget.isPPC64();
- // Get operating system
- bool isDarwinABI = Subtarget.isDarwinABI();
- // Check if the link register (LR) has been saved.
- PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
- bool MustSaveLR = FI->mustSaveLR();
- // Do we have a frame pointer for this function?
- bool HasFP = hasFP(MF) && FrameSize;
-
- int LROffset = PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI);
-
- int FPOffset = 0;
- if (HasFP) {
- if (Subtarget.isSVR4ABI()) {
- MachineFrameInfo *FFI = MF.getFrameInfo();
- int FPIndex = FI->getFramePointerSaveIndex();
- assert(FPIndex && "No Frame Pointer Save Slot!");
- FPOffset = FFI->getObjectOffset(FPIndex);
- } else {
- FPOffset = PPCFrameInfo::getFramePointerSaveOffset(isPPC64, isDarwinABI);
- }
- }
-
- bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
- RetOpcode == PPC::TCRETURNdi ||
- RetOpcode == PPC::TCRETURNai ||
- RetOpcode == PPC::TCRETURNri8 ||
- RetOpcode == PPC::TCRETURNdi8 ||
- RetOpcode == PPC::TCRETURNai8;
-
- if (UsesTCRet) {
- int MaxTCRetDelta = FI->getTailCallSPDelta();
- MachineOperand &StackAdjust = MBBI->getOperand(1);
- assert(StackAdjust.isImm() && "Expecting immediate value.");
- // Adjust stack pointer.
- int StackAdj = StackAdjust.getImm();
- int Delta = StackAdj - MaxTCRetDelta;
- assert((Delta >= 0) && "Delta must be positive");
- if (MaxTCRetDelta>0)
- FrameSize += (StackAdj +Delta);
- else
- FrameSize += StackAdj;
- }
-
- if (FrameSize) {
- // The loaded (or persistent) stack pointer value is offset by the 'stwu'
- // on entry to the function. Add this offset back now.
- if (!isPPC64) {
- // If this function contained a fastcc call and GuaranteedTailCallOpt is
- // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
- // call which invalidates the stack pointer value in SP(0). So we use the
- // value of R31 in this case.
- if (FI->hasFastCall() && isInt<16>(FrameSize)) {
- assert(hasFP(MF) && "Expecting a valid the frame pointer.");
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
- .addReg(PPC::R31).addImm(FrameSize);
- } else if(FI->hasFastCall()) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS), PPC::R0)
- .addImm(FrameSize >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI), PPC::R0)
- .addReg(PPC::R0, RegState::Kill)
- .addImm(FrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADD4))
- .addReg(PPC::R1)
- .addReg(PPC::R31)
- .addReg(PPC::R0);
- } else if (isInt<16>(FrameSize) &&
- (!ALIGN_STACK || TargetAlign >= MaxAlign) &&
- !MFI->hasVarSizedObjects()) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
- .addReg(PPC::R1).addImm(FrameSize);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ),PPC::R1)
- .addImm(0).addReg(PPC::R1);
- }
- } else {
- if (FI->hasFastCall() && isInt<16>(FrameSize)) {
- assert(hasFP(MF) && "Expecting a valid the frame pointer.");
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
- .addReg(PPC::X31).addImm(FrameSize);
- } else if(FI->hasFastCall()) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS8), PPC::X0)
- .addImm(FrameSize >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ORI8), PPC::X0)
- .addReg(PPC::X0, RegState::Kill)
- .addImm(FrameSize & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADD8))
- .addReg(PPC::X1)
- .addReg(PPC::X31)
- .addReg(PPC::X0);
- } else if (isInt<16>(FrameSize) && TargetAlign >= MaxAlign &&
- !MFI->hasVarSizedObjects()) {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
- .addReg(PPC::X1).addImm(FrameSize);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X1)
- .addImm(0).addReg(PPC::X1);
- }
- }
- }
-
- if (isPPC64) {
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X0)
- .addImm(LROffset/4).addReg(PPC::X1);
-
- if (HasFP)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X31)
- .addImm(FPOffset/4).addReg(PPC::X1);
-
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MTLR8)).addReg(PPC::X0);
- } else {
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R0)
- .addImm(LROffset).addReg(PPC::R1);
-
- if (HasFP)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ), PPC::R31)
- .addImm(FPOffset).addReg(PPC::R1);
-
- if (MustSaveLR)
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MTLR)).addReg(PPC::R0);
- }
-
- // Callee pop calling convention. Pop parameter/linkage area. Used for tail
- // call optimization
- if (GuaranteedTailCallOpt && RetOpcode == PPC::BLR &&
- MF.getFunction()->getCallingConv() == CallingConv::Fast) {
- PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
- unsigned CallerAllocatedAmt = FI->getMinReservedArea();
- unsigned StackReg = isPPC64 ? PPC::X1 : PPC::R1;
- unsigned FPReg = isPPC64 ? PPC::X31 : PPC::R31;
- unsigned TmpReg = isPPC64 ? PPC::X0 : PPC::R0;
- unsigned ADDIInstr = isPPC64 ? PPC::ADDI8 : PPC::ADDI;
- unsigned ADDInstr = isPPC64 ? PPC::ADD8 : PPC::ADD4;
- unsigned LISInstr = isPPC64 ? PPC::LIS8 : PPC::LIS;
- unsigned ORIInstr = isPPC64 ? PPC::ORI8 : PPC::ORI;
-
- if (CallerAllocatedAmt && isInt<16>(CallerAllocatedAmt)) {
- BuildMI(MBB, MBBI, dl, TII.get(ADDIInstr), StackReg)
- .addReg(StackReg).addImm(CallerAllocatedAmt);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(LISInstr), TmpReg)
- .addImm(CallerAllocatedAmt >> 16);
- BuildMI(MBB, MBBI, dl, TII.get(ORIInstr), TmpReg)
- .addReg(TmpReg, RegState::Kill)
- .addImm(CallerAllocatedAmt & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(ADDInstr))
- .addReg(StackReg)
- .addReg(FPReg)
- .addReg(TmpReg);
- }
- } else if (RetOpcode == PPC::TCRETURNdi) {
- MBBI = prior(MBB.end());
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
- addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
- } else if (RetOpcode == PPC::TCRETURNri) {
- MBBI = prior(MBB.end());
- assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR));
- } else if (RetOpcode == PPC::TCRETURNai) {
- MBBI = prior(MBB.end());
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
- } else if (RetOpcode == PPC::TCRETURNdi8) {
- MBBI = prior(MBB.end());
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
- addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
- } else if (RetOpcode == PPC::TCRETURNri8) {
- MBBI = prior(MBB.end());
- assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR8));
- } else if (RetOpcode == PPC::TCRETURNai8) {
- MBBI = prior(MBB.end());
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
- }
-}
-
unsigned PPCRegisterInfo::getRARegister() const {
return !Subtarget.isPPC64() ? PPC::LR : PPC::LR8;
}
unsigned PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
if (!Subtarget.isPPC64())
- return hasFP(MF) ? PPC::R31 : PPC::R1;
+ return TFI->hasFP(MF) ? PPC::R31 : PPC::R1;
else
- return hasFP(MF) ? PPC::X31 : PPC::X1;
-}
-
-void PPCRegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves)
- const {
- // Initial state of the frame pointer is R1.
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(PPC::R1, 0);
- Moves.push_back(MachineMove(0, Dst, Src));
+ return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
}
unsigned PPCRegisterInfo::getEHExceptionRegister() const {
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
index 890b24b..aa29ffe 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -44,17 +44,10 @@ public:
BitVector getReservedRegs(const MachineFunction &MF) const;
- /// targetHandlesStackFrameRounding - Returns true if the target is
- /// responsible for rounding up the stack frame (probably at emitPrologue
- /// time).
- bool targetHandlesStackFrameRounding() const { return true; }
-
/// requiresRegisterScavenging - We require a register scavenger.
/// FIXME (64-bit): Should be inlined.
bool requiresRegisterScavenging(const MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const;
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -66,21 +59,9 @@ public:
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
- /// determineFrameLayout - Determine the size of the frame and maximum call
- /// frame size.
- void determineFrameLayout(MachineFunction &MF) const;
-
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
- void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
-
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
- void getInitialFrameState(std::vector<MachineMove> &Moves) const;
// Exception handling queries.
unsigned getEHExceptionRegister() const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
index 8604f54..2639165 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
@@ -300,13 +300,14 @@ def GPRC : RegisterClass<"PPC", [i32], 32,
// R31 when the FP is not needed.
// When using the 32-bit SVR4 ABI, r13 is reserved for the Small Data Area
// pointer.
- const PPCSubtarget &Subtarget
- = MF.getTarget().getSubtarget<PPCSubtarget>();
-
+ const PPCSubtarget &Subtarget = MF.getTarget().getSubtarget<PPCSubtarget>();
+ const PPCFrameLowering *PPCFI =
+ static_cast<const PPCFrameLowering*>(MF.getTarget().getFrameLowering());
+
if (Subtarget.isPPC64() || Subtarget.isSVR4ABI())
return end()-5; // don't allocate R13, R31, R0, R1, LR
- if (needsFP(MF))
+ if (PPCFI->needsFP(MF))
return end()-4; // don't allocate R31, R0, R1, LR
else
return end()-3; // don't allocate R0, R1, LR
@@ -331,7 +332,9 @@ def G8RC : RegisterClass<"PPC", [i64], 64,
}
G8RCClass::iterator
G8RCClass::allocation_order_end(const MachineFunction &MF) const {
- if (needsFP(MF))
+ const PPCFrameLowering *PPCFI =
+ static_cast<const PPCFrameLowering*>(MF.getTarget().getFrameLowering());
+ if (PPCFI->needsFP(MF))
return end()-5;
else
return end()-4;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
index 7344763..ad4da1f 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
@@ -13,7 +13,7 @@
def G3Itineraries : ProcessorItineraries<
- [IU1, IU2, FPU1, BPU, SRU, SLU], [
+ [IU1, IU2, FPU1, BPU, SRU, SLU], [], [
InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntDivW , [InstrStage<19, [IU1]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
index 7efc693..03c3b29 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
def G4Itineraries : ProcessorItineraries<
- [IU1, IU2, SLU, SRU, BPU, FPU1, VIU1, VIU2, VPU, VFPU], [
+ [IU1, IU2, SLU, SRU, BPU, FPU1, VIU1, VIU2, VPU, VFPU], [], [
InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntDivW , [InstrStage<19, [IU1]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
index 15056c0..00cac3c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
@@ -15,7 +15,7 @@ def IU3 : FuncUnit; // integer unit 3 (7450 simple)
def IU4 : FuncUnit; // integer unit 4 (7450 simple)
def G4PlusItineraries : ProcessorItineraries<
- [IU1, IU2, IU3, IU4, BPU, SLU, FPU1, VFPU, VIU1, VIU2, VPU], [
+ [IU1, IU2, IU3, IU4, BPU, SLU, FPU1, VFPU, VIU1, VIU2, VPU], [], [
InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
InstrItinData<IntDivW , [InstrStage<23, [IU2]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
index 2dffc48..1671f22 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
def G5Itineraries : ProcessorItineraries<
- [IU1, IU2, SLU, BPU, FPU1, FPU2, VFPU, VIU1, VIU2, VPU], [
+ [IU1, IU2, SLU, BPU, FPU1, FPU2, VFPU, VIU1, VIU2, VPU], [], [
InstrItinData<IntGeneral , [InstrStage<2, [IU1, IU2]>]>,
InstrItinData<IntCompare , [InstrStage<3, [IU1, IU2]>]>,
InstrItinData<IntDivD , [InstrStage<68, [IU1]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
index 5d46065..72a1dee 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
@@ -129,7 +129,7 @@ void PPCSubtarget::SetJITMode() {
/// is required to get the address of the global.
bool PPCSubtarget::hasLazyResolverStub(const GlobalValue *GV,
const TargetMachine &TM) const {
- // We never hae stubs if HasLazyResolverStubs=false or if in static mode.
+ // We never have stubs if HasLazyResolverStubs=false or if in static mode.
if (!HasLazyResolverStubs || TM.getRelocationModel() == Reloc::Static)
return false;
// If symbol visibility is hidden, the extra load is not needed if
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index 10cd10b..212b450 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -15,6 +15,7 @@
#include "PPCMCAsmInfo.h"
#include "PPCTargetMachine.h"
#include "llvm/PassManager.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/Support/FormattedStream.h"
@@ -29,6 +30,21 @@ static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
}
+// This is duplicated code. Refactor this.
+static MCStreamer *createMCStreamer(const Target &T, const std::string &TT,
+ MCContext &Ctx, TargetAsmBackend &TAB,
+ raw_ostream &OS,
+ MCCodeEmitter *Emitter,
+ bool RelaxAll,
+ bool NoExecStack) {
+ switch (Triple(TT).getOS()) {
+ case Triple::Darwin:
+ return createMachOStreamer(Ctx, TAB, OS, Emitter, RelaxAll);
+ default:
+ return NULL;
+ }
+}
+
extern "C" void LLVMInitializePowerPCTarget() {
// Register the targets
RegisterTargetMachine<PPC32TargetMachine> A(ThePPC32Target);
@@ -36,6 +52,19 @@ extern "C" void LLVMInitializePowerPCTarget() {
RegisterAsmInfoFn C(ThePPC32Target, createMCAsmInfo);
RegisterAsmInfoFn D(ThePPC64Target, createMCAsmInfo);
+
+ // Register the MC Code Emitter
+ TargetRegistry::RegisterCodeEmitter(ThePPC32Target, createPPCMCCodeEmitter);
+ TargetRegistry::RegisterCodeEmitter(ThePPC64Target, createPPCMCCodeEmitter);
+
+
+ // Register the asm backend.
+ TargetRegistry::RegisterAsmBackend(ThePPC32Target, createPPCAsmBackend);
+ TargetRegistry::RegisterAsmBackend(ThePPC64Target, createPPCAsmBackend);
+
+ // Register the object streamer.
+ TargetRegistry::RegisterObjectStreamer(ThePPC32Target, createMCStreamer);
+ TargetRegistry::RegisterObjectStreamer(ThePPC64Target, createMCStreamer);
}
@@ -44,7 +73,7 @@ PPCTargetMachine::PPCTargetMachine(const Target &T, const std::string &TT,
: LLVMTargetMachine(T, TT),
Subtarget(TT, FS, is64Bit),
DataLayout(Subtarget.getTargetDataString()), InstrInfo(*this),
- FrameInfo(*this, is64Bit), JITInfo(*this, is64Bit),
+ FrameLowering(Subtarget), JITInfo(*this, is64Bit),
TLInfo(*this), TSInfo(*this),
InstrItins(Subtarget.getInstrItineraryData()) {
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
index 626ddbb..2d24989 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
@@ -14,7 +14,7 @@
#ifndef PPC_TARGETMACHINE_H
#define PPC_TARGETMACHINE_H
-#include "PPCFrameInfo.h"
+#include "PPCFrameLowering.h"
#include "PPCSubtarget.h"
#include "PPCJITInfo.h"
#include "PPCInstrInfo.h"
@@ -33,7 +33,7 @@ class PPCTargetMachine : public LLVMTargetMachine {
PPCSubtarget Subtarget;
const TargetData DataLayout; // Calculates type size & alignment
PPCInstrInfo InstrInfo;
- PPCFrameInfo FrameInfo;
+ PPCFrameLowering FrameLowering;
PPCJITInfo JITInfo;
PPCTargetLowering TLInfo;
PPCSelectionDAGInfo TSInfo;
@@ -43,23 +43,25 @@ public:
PPCTargetMachine(const Target &T, const std::string &TT,
const std::string &FS, bool is64Bit);
- virtual const PPCInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const PPCFrameInfo *getFrameInfo() const { return &FrameInfo; }
- virtual PPCJITInfo *getJITInfo() { return &JITInfo; }
+ virtual const PPCInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ virtual const PPCFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
+ virtual PPCJITInfo *getJITInfo() { return &JITInfo; }
virtual const PPCTargetLowering *getTargetLowering() const {
return &TLInfo;
}
virtual const PPCSelectionDAGInfo* getSelectionDAGInfo() const {
return &TSInfo;
}
- virtual const PPCRegisterInfo *getRegisterInfo() const {
+ virtual const PPCRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
virtual const TargetData *getTargetData() const { return &DataLayout; }
virtual const PPCSubtarget *getSubtargetImpl() const { return &Subtarget; }
- virtual const InstrItineraryData getInstrItineraryData() const {
- return InstrItins;
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return &InstrItins;
}
// Pass Pipeline Configuration
diff --git a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
index aae5da8..ee29275 100644
--- a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -7,21 +7,32 @@
//
//===----------------------------------------------------------------------===//
//
-// This is a simple local pass that fills delay slots with NOPs.
-//
+// This is a simple local pass that attempts to fill delay slots with useful
+// instructions. If no instructions can be moved into the delay slot, then a
+// NOP is placed.
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "delayslotfiller"
+#define DEBUG_TYPE "delay-slot-filler"
#include "Sparc.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+
using namespace llvm;
STATISTIC(FilledSlots, "Number of delay slots filled");
+static cl::opt<bool> DisableDelaySlotFiller(
+ "disable-sparc-delay-filler",
+ cl::init(false),
+ cl::desc("Disable the Sparc delay slot filler."),
+ cl::Hidden);
+
namespace {
struct Filler : public MachineFunctionPass {
/// Target machine description which we query for reg. names, data
@@ -47,6 +58,28 @@ namespace {
return Changed;
}
+ bool isDelayFiller(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator candidate);
+
+ void insertCallUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegUses);
+
+ void insertDefsUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegDefs,
+ SmallSet<unsigned, 32>& RegUses);
+
+ bool IsRegInSet(SmallSet<unsigned, 32>& RegSet,
+ unsigned Reg);
+
+ bool delayHasHazard(MachineBasicBlock::iterator candidate,
+ bool &sawLoad, bool &sawStore,
+ SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses);
+
+ MachineBasicBlock::iterator
+ findDelayInstr(MachineBasicBlock &MBB, MachineBasicBlock::iterator slot);
+
+
};
char Filler::ID = 0;
} // end of anonymous namespace
@@ -59,18 +92,201 @@ FunctionPass *llvm::createSparcDelaySlotFillerPass(TargetMachine &tm) {
}
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
-/// Currently, we fill delay slots with NOPs. We assume there is only one
-/// delay slot per delayed instruction.
+/// We assume there is only one delay slot per delayed instruction.
///
bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
+
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
if (I->getDesc().hasDelaySlot()) {
+ MachineBasicBlock::iterator D = MBB.end();
MachineBasicBlock::iterator J = I;
- ++J;
- BuildMI(MBB, J, DebugLoc(), TII->get(SP::NOP));
+
+ if (!DisableDelaySlotFiller)
+ D = findDelayInstr(MBB, I);
+
++FilledSlots;
Changed = true;
+
+ if (D == MBB.end())
+ BuildMI(MBB, ++J, I->getDebugLoc(), TII->get(SP::NOP));
+ else
+ MBB.splice(++J, &MBB, D);
}
return Changed;
}
+
+MachineBasicBlock::iterator
+Filler::findDelayInstr(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator slot)
+{
+ SmallSet<unsigned, 32> RegDefs;
+ SmallSet<unsigned, 32> RegUses;
+ bool sawLoad = false;
+ bool sawStore = false;
+
+ MachineBasicBlock::iterator I = slot;
+
+ if (slot->getOpcode() == SP::RET)
+ return MBB.end();
+
+ if (slot->getOpcode() == SP::RETL) {
+ --I;
+ if (I->getOpcode() != SP::RESTORErr)
+ return MBB.end();
+ //change retl to ret
+ slot->setDesc(TII->get(SP::RET));
+ return I;
+ }
+
+ //Call's delay filler can def some of call's uses.
+ if (slot->getDesc().isCall())
+ insertCallUses(slot, RegUses);
+ else
+ insertDefsUses(slot, RegDefs, RegUses);
+
+ bool done = false;
+
+ while (!done) {
+ done = (I == MBB.begin());
+
+ if (!done)
+ --I;
+
+ // skip debug value
+ if (I->isDebugValue())
+ continue;
+
+
+ if (I->hasUnmodeledSideEffects()
+ || I->isInlineAsm()
+ || I->isLabel()
+ || I->getDesc().hasDelaySlot()
+ || isDelayFiller(MBB, I))
+ break;
+
+ if (delayHasHazard(I, sawLoad, sawStore, RegDefs, RegUses)) {
+ insertDefsUses(I, RegDefs, RegUses);
+ continue;
+ }
+
+ return I;
+ }
+ return MBB.end();
+}
+
+bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
+ bool &sawLoad,
+ bool &sawStore,
+ SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses)
+{
+
+ if (candidate->isImplicitDef() || candidate->isKill())
+ return true;
+
+ if (candidate->getDesc().mayLoad()) {
+ sawLoad = true;
+ if (sawStore)
+ return true;
+ }
+
+ if (candidate->getDesc().mayStore()) {
+ if (sawStore)
+ return true;
+ sawStore = true;
+ if (sawLoad)
+ return true;
+ }
+
+ for (unsigned i = 0, e = candidate->getNumOperands(); i!= e; ++i) {
+ const MachineOperand &MO = candidate->getOperand(i);
+ if (!MO.isReg())
+ continue; // skip
+
+ unsigned Reg = MO.getReg();
+
+ if (MO.isDef()) {
+ //check whether Reg is defined or used before delay slot.
+ if (IsRegInSet(RegDefs, Reg) || IsRegInSet(RegUses, Reg))
+ return true;
+ }
+ if (MO.isUse()) {
+ //check whether Reg is defined before delay slot.
+ if (IsRegInSet(RegDefs, Reg))
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void Filler::insertCallUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegUses)
+{
+
+ switch(MI->getOpcode()) {
+ default: llvm_unreachable("Unknown opcode.");
+ case SP::CALL: break;
+ case SP::JMPLrr:
+ case SP::JMPLri:
+ assert(MI->getNumOperands() >= 2);
+ const MachineOperand &Reg = MI->getOperand(0);
+ assert(Reg.isReg() && "JMPL first operand is not a register.");
+ assert(Reg.isUse() && "JMPL first operand is not a use.");
+ RegUses.insert(Reg.getReg());
+
+ const MachineOperand &RegOrImm = MI->getOperand(1);
+ if (RegOrImm.isImm())
+ break;
+ assert(RegOrImm.isReg() && "JMPLrr second operand is not a register.");
+ assert(RegOrImm.isUse() && "JMPLrr second operand is not a use.");
+ RegUses.insert(RegOrImm.getReg());
+ break;
+ }
+}
+
+//Insert Defs and Uses of MI into the sets RegDefs and RegUses.
+void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegDefs,
+ SmallSet<unsigned, 32>& RegUses)
+{
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+
+ unsigned Reg = MO.getReg();
+ if (Reg == 0)
+ continue;
+ if (MO.isDef())
+ RegDefs.insert(Reg);
+ if (MO.isUse())
+ RegUses.insert(Reg);
+
+ }
+}
+
+//returns true if the Reg or its alias is in the RegSet.
+bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg)
+{
+ if (RegSet.count(Reg))
+ return true;
+ // check Aliased Registers
+ for (const unsigned *Alias = TM.getRegisterInfo()->getAliasSet(Reg);
+ *Alias; ++ Alias)
+ if (RegSet.count(*Alias))
+ return true;
+
+ return false;
+}
+
+// return true if the candidate is a delay filler.
+bool Filler::isDelayFiller(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator candidate)
+{
+ if (candidate == MBB.begin())
+ return false;
+ const TargetInstrDesc &prevdesc = (--candidate)->getDesc();
+ return prevdesc.hasDelaySlot();
+}
diff --git a/contrib/llvm/lib/Target/Sparc/AsmPrinter/SparcAsmPrinter.cpp b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
index ab948bb..edde842 100644
--- a/contrib/llvm/lib/Target/Sparc/AsmPrinter/SparcAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -145,6 +145,8 @@ bool SparcAsmPrinter::printGetPCX(const MachineInstr *MI, unsigned opNum,
case MachineOperand::MO_Register:
assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
"Operand is not a physical register ");
+ assert(MO.getReg() != SP::O7 &&
+ "%o7 is assigned as destination for getpcx!");
operand = "%" + LowercaseString(getRegisterName(MO.getReg()));
break;
}
@@ -156,8 +158,8 @@ bool SparcAsmPrinter::printGetPCX(const MachineInstr *MI, unsigned opNum,
O << "\tcall\t.LLGETPC" << mfNum << '_' << bbNum << '\n' ;
O << "\t sethi\t"
- << "%hi(_GLOBAL_OFFSET_TABLE_+(.-.LLGETPCH" << mfNum << '_' << bbNum << ")), "
- << operand << '\n' ;
+ << "%hi(_GLOBAL_OFFSET_TABLE_+(.-.LLGETPCH" << mfNum << '_' << bbNum
+ << ")), " << operand << '\n' ;
O << ".LLGETPC" << mfNum << '_' << bbNum << ":\n" ;
O << "\tor\t" << operand
diff --git a/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td b/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td
index 33ecfdf..856f87a 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td
@@ -24,9 +24,13 @@ def RetCC_Sparc32 : CallingConv<[
// Sparc 32-bit C Calling convention.
def CC_Sparc32 : CallingConv<[
- // All arguments get passed in integer registers if there is space.
- CCIfType<[i32, f32, f64], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>,
-
+ //Custom assign SRet to [sp+64].
+ CCIfSRet<CCCustom<"CC_Sparc_Assign_SRet">>,
+ // i32 f32 arguments get passed in integer registers if there is space.
+ CCIfType<[i32, f32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>,
+ // f64 arguments are split and passed through registers or through stack.
+ CCIfType<[f64], CCCustom<"CC_Sparc_Assign_f64">>,
+
// Alternatively, they are assigned to the stack in 4-byte aligned units.
CCAssignToStack<4, 4>
]>;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
new file mode 100644
index 0000000..320c8ca
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -0,0 +1,80 @@
+//====- SparcFrameLowering.cpp - Sparc Frame Information -------*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Sparc implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcFrameLowering.h"
+#include "SparcInstrInfo.h"
+#include "SparcMachineFunctionInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+void SparcFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const SparcInstrInfo &TII =
+ *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo());
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ // Get the number of bytes to allocate from the FrameInfo
+ int NumBytes = (int) MFI->getStackSize();
+
+ // Emit the correct save instruction based on the number of bytes in
+ // the frame. Minimum stack frame size according to V8 ABI is:
+ // 16 words for register window spill
+ // 1 word for address of returned aggregate-value
+ // + 6 words for passing parameters on the stack
+ // ----------
+ // 23 words * 4 bytes per word = 92 bytes
+ NumBytes += 92;
+
+ // Round up to next doubleword boundary -- a double-word boundary
+ // is required by the ABI.
+ NumBytes = (NumBytes + 7) & ~7;
+ NumBytes = -NumBytes;
+
+ if (NumBytes >= -4096) {
+ BuildMI(MBB, MBBI, dl, TII.get(SP::SAVEri), SP::O6)
+ .addReg(SP::O6).addImm(NumBytes);
+ } else {
+ // Emit this the hard way. This clobbers G1 which we always know is
+ // available here.
+ unsigned OffHi = (unsigned)NumBytes >> 10U;
+ BuildMI(MBB, MBBI, dl, TII.get(SP::SETHIi), SP::G1).addImm(OffHi);
+ // Emit G1 = G1 + I6
+ BuildMI(MBB, MBBI, dl, TII.get(SP::ORri), SP::G1)
+ .addReg(SP::G1).addImm(NumBytes & ((1 << 10)-1));
+ BuildMI(MBB, MBBI, dl, TII.get(SP::SAVErr), SP::O6)
+ .addReg(SP::O6).addReg(SP::G1);
+ }
+}
+
+void SparcFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ const SparcInstrInfo &TII =
+ *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo());
+ DebugLoc dl = MBBI->getDebugLoc();
+ assert(MBBI->getOpcode() == SP::RETL &&
+ "Can only put epilog before 'retl' instruction!");
+ BuildMI(MBB, MBBI, dl, TII.get(SP::RESTORErr), SP::G0).addReg(SP::G0)
+ .addReg(SP::G0);
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
new file mode 100644
index 0000000..9a2ddc8
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
@@ -0,0 +1,41 @@
+//===- SparcFrameLowering.h - Define frame lowering for Sparc --*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SPARC_FRAMEINFO_H
+#define SPARC_FRAMEINFO_H
+
+#include "Sparc.h"
+#include "SparcSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class SparcSubtarget;
+
+class SparcFrameLowering : public TargetFrameLowering {
+ const SparcSubtarget &STI;
+public:
+ explicit SparcFrameLowering(const SparcSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, 0), STI(sti) {
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool hasFP(const MachineFunction &MF) const { return false; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index 4ea94c4..8c6103d 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -44,9 +44,8 @@ public:
SDNode *Select(SDNode *N);
// Complex Pattern Selectors.
- bool SelectADDRrr(SDNode *Op, SDValue N, SDValue &R1, SDValue &R2);
- bool SelectADDRri(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Offset);
+ bool SelectADDRrr(SDValue N, SDValue &R1, SDValue &R2);
+ bool SelectADDRri(SDValue N, SDValue &Base, SDValue &Offset);
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
@@ -71,7 +70,7 @@ SDNode* SparcDAGToDAGISel::getGlobalBaseReg() {
return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
}
-bool SparcDAGToDAGISel::SelectADDRri(SDNode *Op, SDValue Addr,
+bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
SDValue &Base, SDValue &Offset) {
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
@@ -112,8 +111,7 @@ bool SparcDAGToDAGISel::SelectADDRri(SDNode *Op, SDValue Addr,
return true;
}
-bool SparcDAGToDAGISel::SelectADDRrr(SDNode *Op, SDValue Addr,
- SDValue &R1, SDValue &R2) {
+bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) {
if (Addr.getOpcode() == ISD::FrameIndex) return false;
if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
Addr.getOpcode() == ISD::TargetGlobalAddress)
@@ -160,7 +158,7 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
} else {
TopPart = CurDAG->getRegister(SP::G0, MVT::i32);
}
- TopPart = SDValue(CurDAG->getMachineNode(SP::WRYrr, dl, MVT::Flag, TopPart,
+ TopPart = SDValue(CurDAG->getMachineNode(SP::WRYrr, dl, MVT::Glue, TopPart,
CurDAG->getRegister(SP::G0, MVT::i32)), 0);
// FIXME: Handle div by immediate.
@@ -174,7 +172,7 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
SDValue MulLHS = N->getOperand(0);
SDValue MulRHS = N->getOperand(1);
unsigned Opcode = N->getOpcode() == ISD::MULHU ? SP::UMULrr : SP::SMULrr;
- SDNode *Mul = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::Flag,
+ SDNode *Mul = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::Glue,
MulLHS, MulRHS);
// The high part is in the Y register.
return CurDAG->SelectNodeTo(N, SP::RDY, MVT::i32, SDValue(Mul, 1));
@@ -196,8 +194,8 @@ SparcDAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
switch (ConstraintCode) {
default: return true;
case 'm': // memory
- if (!SelectADDRrr(Op.getNode(), Op, Op0, Op1))
- SelectADDRri(Op.getNode(), Op, Op0, Op1);
+ if (!SelectADDRrr(Op, Op0, Op1))
+ SelectADDRri(Op, Op0, Op1);
break;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 4099a62..196b87d 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1,3 +1,4 @@
+
//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
//
// The LLVM Compiler Infrastructure
@@ -32,6 +33,47 @@ using namespace llvm;
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
+static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State)
+{
+ assert (ArgFlags.isSRet());
+
+ //Assign SRet argument
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ 0,
+ LocVT, LocInfo));
+ return true;
+}
+
+static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State)
+{
+ static const unsigned RegList[] = {
+ SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
+ };
+ //Try to get first reg
+ if (unsigned Reg = State.AllocateReg(RegList, 6)) {
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ } else {
+ //Assign whole thing in stack
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ State.AllocateStack(8,4),
+ LocVT, LocInfo));
+ return true;
+ }
+
+ //Try to get second reg
+ if (unsigned Reg = State.AllocateReg(RegList, 6))
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ else
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ State.AllocateStack(4,4),
+ LocVT, LocInfo));
+ return true;
+}
+
#include "SparcGenCallingConv.inc"
SDValue
@@ -41,6 +83,8 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+
// CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs;
@@ -53,10 +97,10 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
// If this is the first return lowered for this function, add the regs to the
// liveout set for the function.
- if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
+ if (MF.getRegInfo().liveout_empty()) {
for (unsigned i = 0; i != RVLocs.size(); ++i)
if (RVLocs[i].isRegLoc())
- DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
+ MF.getRegInfo().addLiveOut(RVLocs[i].getLocReg());
}
SDValue Flag;
@@ -66,12 +110,24 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
OutVals[i], Flag);
// Guarantee that all emitted copies are stuck together with flags.
Flag = Chain.getValue(1);
}
+ // If the function returns a struct, copy the SRetReturnReg to I0
+ if (MF.getFunction()->hasStructRetAttr()) {
+ SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
+ unsigned Reg = SFI->getSRetReturnReg();
+ if (!Reg)
+ llvm_unreachable("sret virtual register not created in the entry block");
+ SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
+ Chain = DAG.getCopyToReg(Chain, dl, SP::I0, Val, Flag);
+ Flag = Chain.getValue(1);
+ if (MF.getRegInfo().liveout_empty())
+ MF.getRegInfo().addLiveOut(SP::I0);
+ }
if (Flag.getNode())
return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
@@ -100,135 +156,159 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
- static const unsigned ArgRegs[] = {
- SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
- };
- const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs+6;
- unsigned ArgOffset = 68;
+ const unsigned StackOffset = 92;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- SDValue ArgValue;
CCValAssign &VA = ArgLocs[i];
- // FIXME: We ignore the register assignments of AnalyzeFormalArguments
- // because it doesn't know how to split a double into two i32 registers.
- EVT ObjectVT = VA.getValVT();
- switch (ObjectVT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unhandled argument type!");
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- if (!Ins[i].Used) { // Argument is dead.
- if (CurArgReg < ArgRegEnd) ++CurArgReg;
- InVals.push_back(DAG.getUNDEF(ObjectVT));
- } else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
- unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
- MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
- SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
- if (ObjectVT != MVT::i32) {
- unsigned AssertOp = ISD::AssertSext;
- Arg = DAG.getNode(AssertOp, dl, MVT::i32, Arg,
- DAG.getValueType(ObjectVT));
- Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg);
- }
- InVals.push_back(Arg);
- } else {
- int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
- true);
- SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
- SDValue Load;
- if (ObjectVT == MVT::i32) {
- Load = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0,
- false, false, 0);
- } else {
- ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
-
- // Sparc is big endian, so add an offset based on the ObjectVT.
- unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8);
- FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
- DAG.getConstant(Offset, MVT::i32));
- Load = DAG.getExtLoad(LoadOp, MVT::i32, dl, Chain, FIPtr,
- NULL, 0, ObjectVT, false, false, 0);
- Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load);
- }
- InVals.push_back(Load);
- }
- ArgOffset += 4;
- break;
- case MVT::f32:
- if (!Ins[i].Used) { // Argument is dead.
- if (CurArgReg < ArgRegEnd) ++CurArgReg;
- InVals.push_back(DAG.getUNDEF(ObjectVT));
- } else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
- // FP value is passed in an integer register.
- unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
- MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
- SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
-
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg);
- InVals.push_back(Arg);
- } else {
- int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
- true);
- SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
- SDValue Load = DAG.getLoad(MVT::f32, dl, Chain, FIPtr, NULL, 0,
- false, false, 0);
- InVals.push_back(Load);
- }
- ArgOffset += 4;
- break;
+ if (i == 0 && Ins[i].Flags.isSRet()) {
+ //Get SRet from [%fp+64]
+ int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true);
+ SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
+ SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
+ MachinePointerInfo(),
+ false, false, 0);
+ InVals.push_back(Arg);
+ continue;
+ }
- case MVT::i64:
- case MVT::f64:
- if (!Ins[i].Used) { // Argument is dead.
- if (CurArgReg < ArgRegEnd) ++CurArgReg;
- if (CurArgReg < ArgRegEnd) ++CurArgReg;
- InVals.push_back(DAG.getUNDEF(ObjectVT));
- } else {
- SDValue HiVal;
- if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
- unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
- MF.getRegInfo().addLiveIn(*CurArgReg++, VRegHi);
- HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
- } else {
- int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
- true);
- SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
- HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0,
- false, false, 0);
- }
+ if (VA.isRegLoc()) {
+ EVT RegVT = VA.getLocVT();
+
+ if (VA.needsCustom()) {
+ assert(VA.getLocVT() == MVT::f64);
+ unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
+ MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
+ SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
+
+ assert(i+1 < e);
+ CCValAssign &NextVA = ArgLocs[++i];
SDValue LoVal;
- if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
- unsigned VRegLo = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
- MF.getRegInfo().addLiveIn(*CurArgReg++, VRegLo);
- LoVal = DAG.getCopyFromReg(Chain, dl, VRegLo, MVT::i32);
- } else {
- int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4,
- true);
+ if (NextVA.isMemLoc()) {
+ int FrameIdx = MF.getFrameInfo()->
+ CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
- LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0,
+ LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
+ MachinePointerInfo(),
false, false, 0);
+ } else {
+ unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
+ &SP::IntRegsRegClass, dl);
+ LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
}
-
- // Compose the two halves together into an i64 unit.
SDValue WholeValue =
DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
+ WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue);
+ InVals.push_back(WholeValue);
+ continue;
+ }
+ unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
+ MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
+ SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
+ if (VA.getLocVT() == MVT::f32)
+ Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
+ else if (VA.getLocVT() != MVT::i32) {
+ Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
+ DAG.getValueType(VA.getLocVT()));
+ Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
+ }
+ InVals.push_back(Arg);
+ continue;
+ }
- // If we want a double, do a bit convert.
- if (ObjectVT == MVT::f64)
- WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue);
+ assert(VA.isMemLoc());
- InVals.push_back(WholeValue);
+ unsigned Offset = VA.getLocMemOffset()+StackOffset;
+
+ if (VA.needsCustom()) {
+ assert(VA.getValVT() == MVT::f64);
+ //If it is double-word aligned, just load.
+ if (Offset % 8 == 0) {
+ int FI = MF.getFrameInfo()->CreateFixedObject(8,
+ Offset,
+ true);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
+ SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
+ MachinePointerInfo(),
+ false,false, 0);
+ InVals.push_back(Load);
+ continue;
}
- ArgOffset += 8;
- break;
+
+ int FI = MF.getFrameInfo()->CreateFixedObject(4,
+ Offset,
+ true);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
+ SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
+ MachinePointerInfo(),
+ false, false, 0);
+ int FI2 = MF.getFrameInfo()->CreateFixedObject(4,
+ Offset+4,
+ true);
+ SDValue FIPtr2 = DAG.getFrameIndex(FI2, getPointerTy());
+
+ SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2,
+ MachinePointerInfo(),
+ false, false, 0);
+
+ SDValue WholeValue =
+ DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
+ WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue);
+ InVals.push_back(WholeValue);
+ continue;
+ }
+
+ int FI = MF.getFrameInfo()->CreateFixedObject(4,
+ Offset,
+ true);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
+ SDValue Load ;
+ if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
+ Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
+ MachinePointerInfo(),
+ false, false, 0);
+ } else {
+ ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
+ // Sparc is big endian, so add an offset based on the ObjectVT.
+ unsigned Offset = 4-std::max(1U, VA.getValVT().getSizeInBits()/8);
+ FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
+ DAG.getConstant(Offset, MVT::i32));
+ Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
+ MachinePointerInfo(),
+ VA.getValVT(), false, false,0);
+ Load = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Load);
}
+ InVals.push_back(Load);
+ }
+
+ if (MF.getFunction()->hasStructRetAttr()) {
+ //Copy the SRet Argument to SRetReturnReg
+ SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
+ unsigned Reg = SFI->getSRetReturnReg();
+ if (!Reg) {
+ Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
+ SFI->setSRetReturnReg(Reg);
+ }
+ SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
}
// Store remaining ArgRegs to the stack if this is a varargs function.
if (isVarArg) {
+ static const unsigned ArgRegs[] = {
+ SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
+ };
+ unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6);
+ const unsigned *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
+ unsigned ArgOffset = CCInfo.getNextStackOffset();
+ if (NumAllocated == 6)
+ ArgOffset += StackOffset;
+ else {
+ assert(!ArgOffset);
+ ArgOffset = 68+4*NumAllocated;
+ }
+
// Remember the vararg offset for the va_start implementation.
FuncInfo->setVarArgsFrameOffset(ArgOffset);
@@ -243,7 +323,8 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
- OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, NULL, 0,
+ OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr,
+ MachinePointerInfo(),
false, false, 0));
ArgOffset += 4;
}
@@ -270,191 +351,180 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Sparc target does not yet support tail call optimization.
isTailCall = false;
-#if 0
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getTarget(), ArgLocs);
+ CCState CCInfo(CallConv, isVarArg, DAG.getTarget(), ArgLocs,
+ *DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
// Get the size of the outgoing arguments stack space requirement.
unsigned ArgsSize = CCInfo.getNextStackOffset();
- // FIXME: We can't use this until f64 is known to take two GPRs.
-#else
- (void)CC_Sparc32;
-
- // Count the size of the outgoing arguments.
- unsigned ArgsSize = 0;
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- switch (Outs[i].VT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unknown value type!");
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- case MVT::f32:
- ArgsSize += 4;
- break;
- case MVT::i64:
- case MVT::f64:
- ArgsSize += 8;
- break;
- }
- }
- if (ArgsSize > 4*6)
- ArgsSize -= 4*6; // Space for first 6 arguments is prereserved.
- else
- ArgsSize = 0;
-#endif
// Keep stack frames 8-byte aligned.
ArgsSize = (ArgsSize+7) & ~7;
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+
+ //Create local copies for byval args.
+ SmallVector<SDValue, 8> ByValArgs;
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ if (!Flags.isByVal())
+ continue;
+
+ SDValue Arg = OutVals[i];
+ unsigned Size = Flags.getByValSize();
+ unsigned Align = Flags.getByValAlign();
+
+ int FI = MFI->CreateStackObject(Size, Align, false);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
+ SDValue SizeNode = DAG.getConstant(Size, MVT::i32);
+
+ Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
+ false, //isVolatile,
+ (Size <= 32), //AlwaysInline if size <= 32
+ MachinePointerInfo(), MachinePointerInfo());
+ ByValArgs.push_back(FIPtr);
+ }
+
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true));
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
-#if 0
+ const unsigned StackOffset = 92;
// Walk the register/memloc assignments, inserting copies/loads.
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
+ i != e;
+ ++i, ++realArgIdx) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = OutVals[i];
+ SDValue Arg = OutVals[realArgIdx];
+
+ ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
+
+ //Use local copy if it is a byval arg.
+ if (Flags.isByVal())
+ Arg = ByValArgs[byvalArgIdx++];
// Promote the value if needed.
switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::SExt:
- Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
break;
case CCValAssign::ZExt:
- Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
break;
case CCValAssign::AExt:
- Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::BCvt:
+ Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
break;
}
- // Arguments that can be passed on register must be kept at
- // RegsToPass vector
- if (VA.isRegLoc()) {
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ if (Flags.isSRet()) {
+ assert(VA.needsCustom());
+ // store SRet argument in %sp+64
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(64);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(),
+ false, false, 0));
continue;
}
- assert(VA.isMemLoc());
-
- // Create a store off the stack pointer for this argument.
- SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
- // FIXME: VERIFY THAT 68 IS RIGHT.
- SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+68);
- PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0,
- false, false, 0));
- }
-
-#else
- static const unsigned ArgRegs[] = {
- SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
- };
- unsigned ArgOffset = 68;
-
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- SDValue Val = OutVals[i];
- EVT ObjectVT = Outs[i].VT;
- SDValue ValToStore(0, 0);
- unsigned ObjSize;
- switch (ObjectVT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unhandled argument type!");
- case MVT::i32:
- ObjSize = 4;
-
- if (RegsToPass.size() >= 6) {
- ValToStore = Val;
- } else {
- RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
- }
- break;
- case MVT::f32:
- ObjSize = 4;
- if (RegsToPass.size() >= 6) {
- ValToStore = Val;
- } else {
- // Convert this to a FP value in an int reg.
- Val = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Val);
- RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
- }
- break;
- case MVT::f64: {
- ObjSize = 8;
- if (RegsToPass.size() >= 6) {
- ValToStore = Val; // Whole thing is passed in memory.
- break;
+ if (VA.needsCustom()) {
+ assert(VA.getLocVT() == MVT::f64);
+
+ if (VA.isMemLoc()) {
+ unsigned Offset = VA.getLocMemOffset() + StackOffset;
+ //if it is double-word aligned, just store.
+ if (Offset % 8 == 0) {
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(),
+ false, false, 0));
+ continue;
+ }
}
- // Break into top and bottom parts by storing to the stack and loading
- // out the parts as integers. Top part goes in a reg.
SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32);
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
- Val, StackPtr, NULL, 0,
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
+ Arg, StackPtr, MachinePointerInfo(),
false, false, 0);
// Sparc is big-endian, so the high part comes first.
- SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr, NULL, 0,
- false, false, 0);
+ SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr,
+ MachinePointerInfo(), false, false, 0);
// Increment the pointer to the other half.
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
DAG.getIntPtrConstant(4));
// Load the low part.
- SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr, NULL, 0,
- false, false, 0);
-
- RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Hi));
-
- if (RegsToPass.size() >= 6) {
- ValToStore = Lo;
- ArgOffset += 4;
- ObjSize = 4;
+ SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr,
+ MachinePointerInfo(), false, false, 0);
+
+ if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Hi));
+ assert(i+1 != e);
+ CCValAssign &NextVA = ArgLocs[++i];
+ if (NextVA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Lo));
+ } else {
+ //Store the low part in stack.
+ unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff,
+ MachinePointerInfo(),
+ false, false, 0));
+ }
} else {
- RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Lo));
+ unsigned Offset = VA.getLocMemOffset() + StackOffset;
+ // Store the high part.
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Hi, PtrOff,
+ MachinePointerInfo(),
+ false, false, 0));
+ // Store the low part.
+ PtrOff = DAG.getIntPtrConstant(Offset+4);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff,
+ MachinePointerInfo(),
+ false, false, 0));
}
- break;
+ continue;
}
- case MVT::i64: {
- ObjSize = 8;
- if (RegsToPass.size() >= 6) {
- ValToStore = Val; // Whole thing is passed in memory.
- break;
- }
- // Split the value into top and bottom part. Top part goes in a reg.
- SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Val,
- DAG.getConstant(1, MVT::i32));
- SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Val,
- DAG.getConstant(0, MVT::i32));
- RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Hi));
-
- if (RegsToPass.size() >= 6) {
- ValToStore = Lo;
- ArgOffset += 4;
- ObjSize = 4;
- } else {
- RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Lo));
+ // Arguments that can be passed on register must be kept at
+ // RegsToPass vector
+ if (VA.isRegLoc()) {
+ if (VA.getLocVT() != MVT::f32) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ continue;
}
- break;
- }
+ Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ continue;
}
- if (ValToStore.getNode()) {
- SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
- SDValue PtrOff = DAG.getConstant(ArgOffset, MVT::i32);
- PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore,
- PtrOff, NULL, 0,
- false, false, 0));
- }
- ArgOffset += ObjSize;
+ assert(VA.isMemLoc());
+
+ // Create a store off the stack pointer for this argument.
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+StackOffset);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(),
+ false, false, 0));
}
-#endif
+
// Emit all stores, make sure the occur before any copies into physregs.
if (!MemOpChains.empty())
@@ -484,11 +554,22 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
- std::vector<EVT> NodeTys;
- NodeTys.push_back(MVT::Other); // Returns a chain
- NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
- SDValue Ops[] = { Chain, Callee, InFlag };
- Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops, InFlag.getNode() ? 3 : 2);
+ // Returns a chain & a flag for retval copy to use
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ unsigned Reg = RegsToPass[i].first;
+ if (Reg >= SP::I0 && Reg <= SP::I7)
+ Reg = Reg-SP::I0+SP::O0;
+
+ Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
+ }
+ if (InFlag.getNode())
+ Ops.push_back(InFlag);
+
+ Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
@@ -610,8 +691,8 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
- setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
- setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::f32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::i32, Expand);
// Sparc has no select or setcc: expand to SELECT_CC.
setOperationAction(ISD::SELECT, MVT::i32, Expand);
@@ -701,6 +782,8 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
case SPISD::ITOF: return "SPISD::ITOF";
case SPISD::CALL: return "SPISD::CALL";
case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
+ case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
+ case SPISD::FLUSHW: return "SPISD::FLUSHW";
}
}
@@ -756,7 +839,7 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
}
}
-SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
+SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
// FIXME there isn't really any debug info here
@@ -765,16 +848,16 @@ SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA);
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
-
+
SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
getPointerTy());
SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
- SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
+ SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
GlobalBase, RelAddr);
- return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- AbsAddr, NULL, 0, false, false, 0);
+ return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
+ AbsAddr, MachinePointerInfo(), false, false, 0);
}
SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
@@ -786,16 +869,16 @@ SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP);
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
- SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
+ SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
getPointerTy());
SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
GlobalBase, RelAddr);
- return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- AbsAddr, NULL, 0, false, false, 0);
+ return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
+ AbsAddr, MachinePointerInfo(), false, false, 0);
}
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
@@ -803,13 +886,13 @@ static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
// Convert the fp value to integer in an FP register.
assert(Op.getValueType() == MVT::i32);
Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
}
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
assert(Op.getOperand(0).getValueType() == MVT::i32);
- SDValue Tmp = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
+ SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
// Convert the int value to FP in an FP register.
return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp);
}
@@ -832,13 +915,13 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
if (LHS.getValueType() == MVT::i32) {
std::vector<EVT> VTs;
VTs.push_back(MVT::i32);
- VTs.push_back(MVT::Flag);
+ VTs.push_back(MVT::Glue);
SDValue Ops[2] = { LHS, RHS };
CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
Opc = SPISD::BRICC;
} else {
- CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Flag, LHS, RHS);
+ CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
Opc = SPISD::BRFCC;
}
@@ -863,13 +946,13 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
if (LHS.getValueType() == MVT::i32) {
std::vector<EVT> VTs;
VTs.push_back(LHS.getValueType()); // subcc returns a value
- VTs.push_back(MVT::Flag);
+ VTs.push_back(MVT::Glue);
SDValue Ops[2] = { LHS, RHS };
CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
Opc = SPISD::SELECT_ICC;
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
} else {
- CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Flag, LHS, RHS);
+ CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
Opc = SPISD::SELECT_FCC;
if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
}
@@ -891,8 +974,8 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(FuncInfo->getVarArgsFrameOffset(),
MVT::i32));
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), dl, Offset, Op.getOperand(1), SV, 0,
- false, false, 0);
+ return DAG.getStore(Op.getOperand(0), dl, Offset, Op.getOperand(1),
+ MachinePointerInfo(SV), false, false, 0);
}
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
@@ -902,27 +985,28 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
SDValue VAListPtr = Node->getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
DebugLoc dl = Node->getDebugLoc();
- SDValue VAList = DAG.getLoad(MVT::i32, dl, InChain, VAListPtr, SV, 0,
- false, false, 0);
+ SDValue VAList = DAG.getLoad(MVT::i32, dl, InChain, VAListPtr,
+ MachinePointerInfo(SV), false, false, 0);
// Increment the pointer, VAList, to the next vaarg
SDValue NextPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, VAList,
DAG.getConstant(VT.getSizeInBits()/8,
MVT::i32));
// Store the incremented VAList to the legalized pointer
InChain = DAG.getStore(VAList.getValue(1), dl, NextPtr,
- VAListPtr, SV, 0, false, false, 0);
+ VAListPtr, MachinePointerInfo(SV), false, false, 0);
// Load the actual argument out of the pointer VAList, unless this is an
// f64 load.
if (VT != MVT::f64)
- return DAG.getLoad(VT, dl, InChain, VAList, NULL, 0, false, false, 0);
+ return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(),
+ false, false, 0);
// Otherwise, load it as i64, then do a bitconvert.
- SDValue V = DAG.getLoad(MVT::i64, dl, InChain, VAList, NULL, 0,
+ SDValue V = DAG.getLoad(MVT::i64, dl, InChain, VAList, MachinePointerInfo(),
false, false, 0);
// Bit-Convert the value to f64.
SDValue Ops[2] = {
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, V),
+ DAG.getNode(ISD::BITCAST, dl, MVT::f64, V),
V.getValue(1)
};
return DAG.getMergeValues(Ops, 2, dl);
@@ -947,13 +1031,82 @@ static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
}
+static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
+ SDValue Chain = DAG.getNode(SPISD::FLUSHW,
+ dl, MVT::Other, DAG.getEntryNode());
+ return Chain;
+}
+
+static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ unsigned FrameReg = SP::I6;
+
+ uint64_t depth = Op.getConstantOperandVal(0);
+
+ SDValue FrameAddr;
+ if (depth == 0)
+ FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
+ else {
+ // flush first to make sure the windowed registers' values are in stack
+ SDValue Chain = getFLUSHW(Op, DAG);
+ FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
+
+ for (uint64_t i = 0; i != depth; ++i) {
+ SDValue Ptr = DAG.getNode(ISD::ADD,
+ dl, MVT::i32,
+ FrameAddr, DAG.getIntPtrConstant(56));
+ FrameAddr = DAG.getLoad(MVT::i32, dl,
+ Chain,
+ Ptr,
+ MachinePointerInfo(), false, false, 0);
+ }
+ }
+ return FrameAddr;
+}
+
+static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setReturnAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ unsigned RetReg = SP::I7;
+
+ uint64_t depth = Op.getConstantOperandVal(0);
+
+ SDValue RetAddr;
+ if (depth == 0)
+ RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
+ else {
+ // flush first to make sure the windowed registers' values are in stack
+ SDValue Chain = getFLUSHW(Op, DAG);
+ RetAddr = DAG.getCopyFromReg(Chain, dl, SP::I6, VT);
+
+ for (uint64_t i = 0; i != depth; ++i) {
+ SDValue Ptr = DAG.getNode(ISD::ADD,
+ dl, MVT::i32,
+ RetAddr,
+ DAG.getIntPtrConstant((i == depth-1)?60:56));
+ RetAddr = DAG.getLoad(MVT::i32, dl,
+ Chain,
+ Ptr,
+ MachinePointerInfo(), false, false, 0);
+ }
+ }
+ return RetAddr;
+}
+
SDValue SparcTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
- // Frame & Return address. Currently unimplemented
- case ISD::RETURNADDR: return SDValue();
- case ISD::FRAMEADDR: return SDValue();
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
+ case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::GlobalTLSAddress:
llvm_unreachable("TLS not implemented for Sparc.");
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
@@ -1009,6 +1162,8 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, copy0MBB);
+ F->insert(It, sinkMBB);
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), BB,
@@ -1021,8 +1176,6 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BB->addSuccessor(sinkMBB);
BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC);
- F->insert(It, copy0MBB);
- F->insert(It, sinkMBB);
// copy0MBB:
// %FalseValue = ...
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
index db39e08..849e401 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -36,7 +36,8 @@ namespace llvm {
CALL, // A call instruction.
RET_FLAG, // Return with a flag operand.
- GLOBAL_BASE_REG // Global base reg for PIC
+ GLOBAL_BASE_REG, // Global base reg for PIC
+ FLUSHW // FLUSH register windows to stack
};
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index 7ede8e7..afa3c1f 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -66,15 +66,200 @@ unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
return 0;
}
+static bool IsIntegerCC(unsigned CC)
+{
+ return (CC <= SPCC::ICC_VC);
+}
+
+
+static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
+{
+ switch(CC) {
+ default: llvm_unreachable("Unknown condition code");
+ case SPCC::ICC_NE: return SPCC::ICC_E;
+ case SPCC::ICC_E: return SPCC::ICC_NE;
+ case SPCC::ICC_G: return SPCC::ICC_LE;
+ case SPCC::ICC_LE: return SPCC::ICC_G;
+ case SPCC::ICC_GE: return SPCC::ICC_L;
+ case SPCC::ICC_L: return SPCC::ICC_GE;
+ case SPCC::ICC_GU: return SPCC::ICC_LEU;
+ case SPCC::ICC_LEU: return SPCC::ICC_GU;
+ case SPCC::ICC_CC: return SPCC::ICC_CS;
+ case SPCC::ICC_CS: return SPCC::ICC_CC;
+ case SPCC::ICC_POS: return SPCC::ICC_NEG;
+ case SPCC::ICC_NEG: return SPCC::ICC_POS;
+ case SPCC::ICC_VC: return SPCC::ICC_VS;
+ case SPCC::ICC_VS: return SPCC::ICC_VC;
+
+ case SPCC::FCC_U: return SPCC::FCC_O;
+ case SPCC::FCC_O: return SPCC::FCC_U;
+ case SPCC::FCC_G: return SPCC::FCC_LE;
+ case SPCC::FCC_LE: return SPCC::FCC_G;
+ case SPCC::FCC_UG: return SPCC::FCC_ULE;
+ case SPCC::FCC_ULE: return SPCC::FCC_UG;
+ case SPCC::FCC_L: return SPCC::FCC_GE;
+ case SPCC::FCC_GE: return SPCC::FCC_L;
+ case SPCC::FCC_UL: return SPCC::FCC_UGE;
+ case SPCC::FCC_UGE: return SPCC::FCC_UL;
+ case SPCC::FCC_LG: return SPCC::FCC_UE;
+ case SPCC::FCC_UE: return SPCC::FCC_LG;
+ case SPCC::FCC_NE: return SPCC::FCC_E;
+ case SPCC::FCC_E: return SPCC::FCC_NE;
+ }
+}
+
+
+bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const
+{
+
+ MachineBasicBlock::iterator I = MBB.end();
+ MachineBasicBlock::iterator UnCondBrIter = MBB.end();
+ while (I != MBB.begin()) {
+ --I;
+
+ if (I->isDebugValue())
+ continue;
+
+ //When we see a non-terminator, we are done
+ if (!isUnpredicatedTerminator(I))
+ break;
+
+ //Terminator is not a branch
+ if (!I->getDesc().isBranch())
+ return true;
+
+ //Handle Unconditional branches
+ if (I->getOpcode() == SP::BA) {
+ UnCondBrIter = I;
+
+ if (!AllowModify) {
+ TBB = I->getOperand(0).getMBB();
+ continue;
+ }
+
+ while (llvm::next(I) != MBB.end())
+ llvm::next(I)->eraseFromParent();
+
+ Cond.clear();
+ FBB = 0;
+
+ if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
+ TBB = 0;
+ I->eraseFromParent();
+ I = MBB.end();
+ UnCondBrIter = MBB.end();
+ continue;
+ }
+
+ TBB = I->getOperand(0).getMBB();
+ continue;
+ }
+
+ unsigned Opcode = I->getOpcode();
+ if (Opcode != SP::BCOND && Opcode != SP::FBCOND)
+ return true; //Unknown Opcode
+
+ SPCC::CondCodes BranchCode = (SPCC::CondCodes)I->getOperand(1).getImm();
+
+ if (Cond.empty()) {
+ MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
+ if (AllowModify && UnCondBrIter != MBB.end() &&
+ MBB.isLayoutSuccessor(TargetBB)) {
+
+ //Transform the code
+ //
+ // brCC L1
+ // ba L2
+ // L1:
+ // ..
+ // L2:
+ //
+ // into
+ //
+ // brnCC L2
+ // L1:
+ // ...
+ // L2:
+ //
+ BranchCode = GetOppositeBranchCondition(BranchCode);
+ MachineBasicBlock::iterator OldInst = I;
+ BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(Opcode))
+ .addMBB(UnCondBrIter->getOperand(0).getMBB()).addImm(BranchCode);
+ BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(SP::BA))
+ .addMBB(TargetBB);
+ MBB.addSuccessor(TargetBB);
+ OldInst->eraseFromParent();
+ UnCondBrIter->eraseFromParent();
+
+ UnCondBrIter = MBB.end();
+ I = MBB.end();
+ continue;
+ }
+ FBB = TBB;
+ TBB = I->getOperand(0).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(BranchCode));
+ continue;
+ }
+ //FIXME: Handle subsequent conditional branches
+ //For now, we can't handle multiple conditional branches
+ return true;
+ }
+ return false;
+}
+
unsigned
SparcInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL)const{
- // Can only insert uncond branches so far.
- assert(Cond.empty() && !FBB && TBB && "Can only handle uncond branches!");
- BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
- return 1;
+ DebugLoc DL) const {
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+ assert((Cond.size() == 1 || Cond.size() == 0) &&
+ "Sparc branch conditions should have one component!");
+
+ if (Cond.empty()) {
+ assert(!FBB && "Unconditional branch with multiple successors!");
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
+ return 1;
+ }
+
+ //Conditional branch
+ unsigned CC = Cond[0].getImm();
+
+ if (IsIntegerCC(CC))
+ BuildMI(&MBB, DL, get(SP::BCOND)).addMBB(TBB).addImm(CC);
+ else
+ BuildMI(&MBB, DL, get(SP::FBCOND)).addMBB(TBB).addImm(CC);
+ if (!FBB)
+ return 1;
+
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(FBB);
+ return 2;
+}
+
+unsigned SparcInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const
+{
+ MachineBasicBlock::iterator I = MBB.end();
+ unsigned Count = 0;
+ while (I != MBB.begin()) {
+ --I;
+
+ if (I->isDebugValue())
+ continue;
+
+ if (I->getOpcode() != SP::BA
+ && I->getOpcode() != SP::BCOND
+ && I->getOpcode() != SP::FBCOND)
+ break; // Not a branch
+
+ I->eraseFromParent();
+ I = MBB.end();
+ ++Count;
+ }
+ return Count;
}
void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
index c00bd21..b2d24f5 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
@@ -58,8 +58,15 @@ public:
/// any side effects other than storing to the stack slot.
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
-
-
+
+
+ virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify = false) const ;
+
+ virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
+
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
index 467ed48..1072323 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -95,10 +95,10 @@ SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
def SDTSPITOF :
SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
-def SPcmpicc : SDNode<"SPISD::CMPICC", SDTIntBinOp, [SDNPOutFlag]>;
-def SPcmpfcc : SDNode<"SPISD::CMPFCC", SDTSPcmpfcc, [SDNPOutFlag]>;
-def SPbricc : SDNode<"SPISD::BRICC", SDTSPbrcc, [SDNPHasChain, SDNPInFlag]>;
-def SPbrfcc : SDNode<"SPISD::BRFCC", SDTSPbrcc, [SDNPHasChain, SDNPInFlag]>;
+def SPcmpicc : SDNode<"SPISD::CMPICC", SDTIntBinOp, [SDNPOutGlue]>;
+def SPcmpfcc : SDNode<"SPISD::CMPFCC", SDTSPcmpfcc, [SDNPOutGlue]>;
+def SPbricc : SDNode<"SPISD::BRICC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
+def SPbrfcc : SDNode<"SPISD::BRFCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
def SPhi : SDNode<"SPISD::Hi", SDTIntUnaryOp>;
def SPlo : SDNode<"SPISD::Lo", SDTIntUnaryOp>;
@@ -106,8 +106,8 @@ def SPlo : SDNode<"SPISD::Lo", SDTIntUnaryOp>;
def SPftoi : SDNode<"SPISD::FTOI", SDTSPFTOI>;
def SPitof : SDNode<"SPISD::ITOF", SDTSPITOF>;
-def SPselecticc : SDNode<"SPISD::SELECT_ICC", SDTSPselectcc, [SDNPInFlag]>;
-def SPselectfcc : SDNode<"SPISD::SELECT_FCC", SDTSPselectcc, [SDNPInFlag]>;
+def SPselecticc : SDNode<"SPISD::SELECT_ICC", SDTSPselectcc, [SDNPInGlue]>;
+def SPselectfcc : SDNode<"SPISD::SELECT_FCC", SDTSPselectcc, [SDNPInGlue]>;
// These are target-independent nodes, but have target-specific formats.
def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
@@ -115,16 +115,20 @@ def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
SDTCisVT<1, i32> ]>;
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-def SDT_SPCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def SDT_SPCall : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
def call : SDNode<"SPISD::CALL", SDT_SPCall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
def retflag : SDNode<"SPISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
+
+def flushw : SDNode<"SPISD::FLUSHW", SDTNone,
+ [SDNPHasChain]>;
def getPCX : Operand<i32> {
let PrintMethod = "printGetPCX";
@@ -204,7 +208,7 @@ class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSP<outs, ins, asmstr, pattern>;
// GETPCX for PIC
-let Defs = [O7], Uses = [O7] in {
+let Defs = [O7] in {
def GETPCX : Pseudo<(outs getPCX:$getpcseq), (ins), "$getpcseq", [] >;
}
@@ -217,6 +221,17 @@ def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
[(callseq_end timm:$amt1, timm:$amt2)]>;
}
+let hasSideEffects = 1, mayStore = 1 in {
+ let rd = 0, rs1 = 0, rs2 = 0 in
+ def FLUSHW : F3_1<0b10, 0b101011, (outs), (ins),
+ "flushw",
+ [(flushw)]>, Requires<[HasV9]>;
+ let rd = 0, rs1 = 1, simm13 = 3 in
+ def TA3 : F3_2<0b10, 0b111010, (outs), (ins),
+ "ta 3",
+ [(flushw)]>;
+}
+
// FpMOVD/FpNEGD/FpABSD - These are lowered to single-precision ops by the
// fpmover pass.
let Predicates = [HasNoV9] in { // Only emit these in V8 mode.
@@ -233,32 +248,39 @@ let Predicates = [HasNoV9] in { // Only emit these in V8 mode.
// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
// instruction selection into a branch sequence. This has to handle all
// permutations of selection between i32/f32/f64 on ICC and FCC.
-let usesCustomInserter = 1 in { // Expanded after instruction selection.
+ // Expanded after instruction selection.
+let Uses = [ICC], usesCustomInserter = 1 in {
def SELECT_CC_Int_ICC
: Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond),
"; SELECT_CC_Int_ICC PSEUDO!",
[(set IntRegs:$dst, (SPselecticc IntRegs:$T, IntRegs:$F,
imm:$Cond))]>;
- def SELECT_CC_Int_FCC
- : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond),
- "; SELECT_CC_Int_FCC PSEUDO!",
- [(set IntRegs:$dst, (SPselectfcc IntRegs:$T, IntRegs:$F,
- imm:$Cond))]>;
def SELECT_CC_FP_ICC
: Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, i32imm:$Cond),
"; SELECT_CC_FP_ICC PSEUDO!",
[(set FPRegs:$dst, (SPselecticc FPRegs:$T, FPRegs:$F,
imm:$Cond))]>;
- def SELECT_CC_FP_FCC
- : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, i32imm:$Cond),
- "; SELECT_CC_FP_FCC PSEUDO!",
- [(set FPRegs:$dst, (SPselectfcc FPRegs:$T, FPRegs:$F,
- imm:$Cond))]>;
+
def SELECT_CC_DFP_ICC
: Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond),
"; SELECT_CC_DFP_ICC PSEUDO!",
[(set DFPRegs:$dst, (SPselecticc DFPRegs:$T, DFPRegs:$F,
imm:$Cond))]>;
+}
+
+let usesCustomInserter = 1, Uses = [FCC] in {
+
+ def SELECT_CC_Int_FCC
+ : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_Int_FCC PSEUDO!",
+ [(set IntRegs:$dst, (SPselectfcc IntRegs:$T, IntRegs:$F,
+ imm:$Cond))]>;
+
+ def SELECT_CC_FP_FCC
+ : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_FP_FCC PSEUDO!",
+ [(set FPRegs:$dst, (SPselectfcc FPRegs:$T, FPRegs:$F,
+ imm:$Cond))]>;
def SELECT_CC_DFP_FCC
: Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond),
"; SELECT_CC_DFP_FCC PSEUDO!",
@@ -272,6 +294,9 @@ let usesCustomInserter = 1 in { // Expanded after instruction selection.
let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1 in {
let rd = O7.Num, rs1 = G0.Num, simm13 = 8 in
def RETL: F3_2<2, 0b111000, (outs), (ins), "retl", [(retflag)]>;
+
+ let rd = I7.Num, rs1 = G0.Num, simm13 = 8 in
+ def RET: F3_2<2, 0b111000, (outs), (ins), "ret", []>;
}
// Section B.1 - Load Integer Instructions, p. 90
@@ -436,28 +461,34 @@ def LEA_ADDri : F3_2<2, 0b000000,
let Defs = [ICC] in
defm ADDCC : F3_12<"addcc", 0b010000, addc>;
-defm ADDX : F3_12<"addx", 0b001000, adde>;
+let Uses = [ICC] in
+ defm ADDX : F3_12<"addx", 0b001000, adde>;
// Section B.15 - Subtract Instructions, p. 110
defm SUB : F3_12 <"sub" , 0b000100, sub>;
-defm SUBX : F3_12 <"subx" , 0b001100, sube>;
+let Uses = [ICC] in
+ defm SUBX : F3_12 <"subx" , 0b001100, sube>;
-let Defs = [ICC] in {
+let Defs = [ICC] in
defm SUBCC : F3_12 <"subcc", 0b010100, SPcmpicc>;
+let Uses = [ICC], Defs = [ICC] in
def SUBXCCrr: F3_1<2, 0b011100,
(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
"subxcc $b, $c, $dst", []>;
-}
-// Section B.18 - Multiply Instructions, p. 113
-defm UMUL : F3_12np<"umul", 0b001010>;
-defm SMUL : F3_12 <"smul", 0b001011, mul>;
+// Section B.18 - Multiply Instructions, p. 113
+let Defs = [Y] in {
+ defm UMUL : F3_12np<"umul", 0b001010>;
+ defm SMUL : F3_12 <"smul", 0b001011, mul>;
+}
// Section B.19 - Divide Instructions, p. 115
-defm UDIV : F3_12np<"udiv", 0b001110>;
-defm SDIV : F3_12np<"sdiv", 0b001111>;
+let Defs = [Y] in {
+ defm UDIV : F3_12np<"udiv", 0b001110>;
+ defm SDIV : F3_12np<"sdiv", 0b001111>;
+}
// Section B.20 - SAVE and RESTORE, p. 117
defm SAVE : F3_12np<"save" , 0b111100>;
@@ -504,11 +535,12 @@ let Uses = [FCC] in
// Section B.24 - Call and Link Instruction, p. 125
// This is the only Format 1 instruction
-let Uses = [O0, O1, O2, O3, O4, O5],
+let Uses = [O6],
hasDelaySlot = 1, isCall = 1,
Defs = [O0, O1, O2, O3, O4, O5, O7, G1, G2, G3, G4, G5, G6, G7,
- D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15] in {
- def CALL : InstSP<(outs), (ins calltarget:$dst),
+ D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
+ ICC, FCC, Y] in {
+ def CALL : InstSP<(outs), (ins calltarget:$dst, variable_ops),
"call $dst", []> {
bits<30> disp;
let op = 1;
@@ -517,28 +549,30 @@ let Uses = [O0, O1, O2, O3, O4, O5],
// indirect calls
def JMPLrr : F3_1<2, 0b111000,
- (outs), (ins MEMrr:$ptr),
+ (outs), (ins MEMrr:$ptr, variable_ops),
"call $ptr",
[(call ADDRrr:$ptr)]>;
def JMPLri : F3_2<2, 0b111000,
- (outs), (ins MEMri:$ptr),
+ (outs), (ins MEMri:$ptr, variable_ops),
"call $ptr",
[(call ADDRri:$ptr)]>;
}
// Section B.28 - Read State Register Instructions
-def RDY : F3_1<2, 0b101000,
- (outs IntRegs:$dst), (ins),
- "rd %y, $dst", []>;
+let Uses = [Y] in
+ def RDY : F3_1<2, 0b101000,
+ (outs IntRegs:$dst), (ins),
+ "rd %y, $dst", []>;
// Section B.29 - Write State Register Instructions
-def WRYrr : F3_1<2, 0b110000,
- (outs), (ins IntRegs:$b, IntRegs:$c),
- "wr $b, $c, %y", []>;
-def WRYri : F3_2<2, 0b110000,
- (outs), (ins IntRegs:$b, i32imm:$c),
- "wr $b, $c, %y", []>;
-
+let Defs = [Y] in {
+ def WRYrr : F3_1<2, 0b110000,
+ (outs), (ins IntRegs:$b, IntRegs:$c),
+ "wr $b, $c, %y", []>;
+ def WRYri : F3_2<2, 0b110000,
+ (outs), (ins IntRegs:$b, i32imm:$c),
+ "wr $b, $c, %y", []>;
+}
// Convert Integer to Floating-point Instructions, p. 141
def FITOS : F3_3<2, 0b110100, 0b011000100,
(outs FPRegs:$dst), (ins FPRegs:$src),
@@ -660,48 +694,57 @@ let Defs = [FCC] in {
let Predicates = [HasV9], Constraints = "$T = $dst" in {
// Move Integer Register on Condition (MOVcc) p. 194 of the V9 manual.
// FIXME: Add instruction encodings for the JIT some day.
- def MOVICCrr
- : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc),
- "mov$cc %icc, $F, $dst",
- [(set IntRegs:$dst,
- (SPselecticc IntRegs:$F, IntRegs:$T, imm:$cc))]>;
- def MOVICCri
- : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc),
- "mov$cc %icc, $F, $dst",
- [(set IntRegs:$dst,
- (SPselecticc simm11:$F, IntRegs:$T, imm:$cc))]>;
-
- def MOVFCCrr
- : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc),
- "mov$cc %fcc0, $F, $dst",
- [(set IntRegs:$dst,
- (SPselectfcc IntRegs:$F, IntRegs:$T, imm:$cc))]>;
- def MOVFCCri
- : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc),
- "mov$cc %fcc0, $F, $dst",
- [(set IntRegs:$dst,
- (SPselectfcc simm11:$F, IntRegs:$T, imm:$cc))]>;
-
- def FMOVS_ICC
- : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc),
- "fmovs$cc %icc, $F, $dst",
- [(set FPRegs:$dst,
- (SPselecticc FPRegs:$F, FPRegs:$T, imm:$cc))]>;
- def FMOVD_ICC
- : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc),
- "fmovd$cc %icc, $F, $dst",
- [(set DFPRegs:$dst,
- (SPselecticc DFPRegs:$F, DFPRegs:$T, imm:$cc))]>;
- def FMOVS_FCC
- : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc),
- "fmovs$cc %fcc0, $F, $dst",
- [(set FPRegs:$dst,
- (SPselectfcc FPRegs:$F, FPRegs:$T, imm:$cc))]>;
- def FMOVD_FCC
- : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc),
- "fmovd$cc %fcc0, $F, $dst",
- [(set DFPRegs:$dst,
- (SPselectfcc DFPRegs:$F, DFPRegs:$T, imm:$cc))]>;
+ let Uses = [ICC] in {
+ def MOVICCrr
+ : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc),
+ "mov$cc %icc, $F, $dst",
+ [(set IntRegs:$dst,
+ (SPselecticc IntRegs:$F, IntRegs:$T, imm:$cc))]>;
+ def MOVICCri
+ : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc),
+ "mov$cc %icc, $F, $dst",
+ [(set IntRegs:$dst,
+ (SPselecticc simm11:$F, IntRegs:$T, imm:$cc))]>;
+ }
+
+ let Uses = [FCC] in {
+ def MOVFCCrr
+ : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, CCOp:$cc),
+ "mov$cc %fcc0, $F, $dst",
+ [(set IntRegs:$dst,
+ (SPselectfcc IntRegs:$F, IntRegs:$T, imm:$cc))]>;
+ def MOVFCCri
+ : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, i32imm:$F, CCOp:$cc),
+ "mov$cc %fcc0, $F, $dst",
+ [(set IntRegs:$dst,
+ (SPselectfcc simm11:$F, IntRegs:$T, imm:$cc))]>;
+ }
+
+ let Uses = [ICC] in {
+ def FMOVS_ICC
+ : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc),
+ "fmovs$cc %icc, $F, $dst",
+ [(set FPRegs:$dst,
+ (SPselecticc FPRegs:$F, FPRegs:$T, imm:$cc))]>;
+ def FMOVD_ICC
+ : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc),
+ "fmovd$cc %icc, $F, $dst",
+ [(set DFPRegs:$dst,
+ (SPselecticc DFPRegs:$F, DFPRegs:$T, imm:$cc))]>;
+ }
+
+ let Uses = [FCC] in {
+ def FMOVS_FCC
+ : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, CCOp:$cc),
+ "fmovs$cc %fcc0, $F, $dst",
+ [(set FPRegs:$dst,
+ (SPselectfcc FPRegs:$F, FPRegs:$T, imm:$cc))]>;
+ def FMOVD_FCC
+ : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, CCOp:$cc),
+ "fmovd$cc %fcc0, $F, $dst",
+ [(set DFPRegs:$dst,
+ (SPselectfcc DFPRegs:$F, DFPRegs:$T, imm:$cc))]>;
+ }
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h
index e34c131..0b74308 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h
@@ -24,16 +24,23 @@ namespace llvm {
/// VarArgsFrameOffset - Frame offset to start of varargs area.
int VarArgsFrameOffset;
+ /// SRetReturnReg - Holds the virtual register into which the sret
+ /// argument is passed.
+ unsigned SRetReturnReg;
public:
- SparcMachineFunctionInfo() : GlobalBaseReg(0), VarArgsFrameOffset(0) {}
+ SparcMachineFunctionInfo()
+ : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0) {}
explicit SparcMachineFunctionInfo(MachineFunction &MF)
- : GlobalBaseReg(0), VarArgsFrameOffset(0) {}
+ : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0) {}
unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
int getVarArgsFrameOffset() const { return VarArgsFrameOffset; }
void setVarArgsFrameOffset(int Offset) { VarArgsFrameOffset = Offset; }
+
+ unsigned getSRetReturnReg() const { return SRetReturnReg; }
+ void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
};
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
index c85db20..b010d04 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -52,10 +52,6 @@ BitVector SparcRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
-bool SparcRegisterInfo::hasFP(const MachineFunction &MF) const {
- return false;
-}
-
void SparcRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
@@ -112,55 +108,6 @@ SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
void SparcRegisterInfo::
processFunctionBeforeFrameFinalized(MachineFunction &MF) const {}
-void SparcRegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- // Get the number of bytes to allocate from the FrameInfo
- int NumBytes = (int) MFI->getStackSize();
-
- // Emit the correct save instruction based on the number of bytes in
- // the frame. Minimum stack frame size according to V8 ABI is:
- // 16 words for register window spill
- // 1 word for address of returned aggregate-value
- // + 6 words for passing parameters on the stack
- // ----------
- // 23 words * 4 bytes per word = 92 bytes
- NumBytes += 92;
-
- // Round up to next doubleword boundary -- a double-word boundary
- // is required by the ABI.
- NumBytes = (NumBytes + 7) & ~7;
- NumBytes = -NumBytes;
-
- if (NumBytes >= -4096) {
- BuildMI(MBB, MBBI, dl, TII.get(SP::SAVEri), SP::O6)
- .addReg(SP::O6).addImm(NumBytes);
- } else {
- // Emit this the hard way. This clobbers G1 which we always know is
- // available here.
- unsigned OffHi = (unsigned)NumBytes >> 10U;
- BuildMI(MBB, MBBI, dl, TII.get(SP::SETHIi), SP::G1).addImm(OffHi);
- // Emit G1 = G1 + I6
- BuildMI(MBB, MBBI, dl, TII.get(SP::ORri), SP::G1)
- .addReg(SP::G1).addImm(NumBytes & ((1 << 10)-1));
- BuildMI(MBB, MBBI, dl, TII.get(SP::SAVErr), SP::O6)
- .addReg(SP::O6).addReg(SP::G1);
- }
-}
-
-void SparcRegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- DebugLoc dl = MBBI->getDebugLoc();
- assert(MBBI->getOpcode() == SP::RETL &&
- "Can only put epilog before 'retl' instruction!");
- BuildMI(MBB, MBBI, dl, TII.get(SP::RESTORErr), SP::G0).addReg(SP::G0)
- .addReg(SP::G0);
-}
-
unsigned SparcRegisterInfo::getRARegister() const {
return SP::I7;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
index 020ce56..d930b53 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
@@ -26,16 +26,14 @@ class Type;
struct SparcRegisterInfo : public SparcGenRegisterInfo {
SparcSubtarget &Subtarget;
const TargetInstrInfo &TII;
-
+
SparcRegisterInfo(SparcSubtarget &st, const TargetInstrInfo &tii);
- /// Code Generation virtual methods...
+ /// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const;
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -45,9 +43,6 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
index fede929..5ef4dae 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
@@ -45,6 +45,9 @@ class Rd<bits<5> num, string n, list<Register> subregs> : SparcReg<n> {
def ICC : SparcCtrlReg<"ICC">;
def FCC : SparcCtrlReg<"FCC">;
+// Y register
+def Y : SparcCtrlReg<"Y">;
+
// Integer registers
def G0 : Ri< 0, "G0">, DwarfRegNum<[0]>;
def G1 : Ri< 1, "G1">, DwarfRegNum<[1]>;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index b58d6ba..b84eab5 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -10,9 +10,9 @@
//
//===----------------------------------------------------------------------===//
+#include "Sparc.h"
#include "SparcMCAsmInfo.h"
#include "SparcTargetMachine.h"
-#include "Sparc.h"
#include "llvm/PassManager.h"
#include "llvm/Target/TargetRegistry.h"
using namespace llvm;
@@ -34,8 +34,8 @@ SparcTargetMachine::SparcTargetMachine(const Target &T, const std::string &TT,
: LLVMTargetMachine(T, TT),
Subtarget(TT, FS, is64bit),
DataLayout(Subtarget.getDataLayout()),
- TLInfo(*this), TSInfo(*this), InstrInfo(Subtarget),
- FrameInfo(TargetFrameInfo::StackGrowsDown, 8, 0) {
+ TLInfo(*this), TSInfo(*this), InstrInfo(Subtarget),
+ FrameLowering(Subtarget) {
}
bool SparcTargetMachine::addInstSelector(PassManagerBase &PM,
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
index 322c82a..c4bb6bd 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
@@ -14,13 +14,14 @@
#ifndef SPARCTARGETMACHINE_H
#define SPARCTARGETMACHINE_H
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "SparcInstrInfo.h"
-#include "SparcSubtarget.h"
#include "SparcISelLowering.h"
+#include "SparcFrameLowering.h"
#include "SparcSelectionDAGInfo.h"
+#include "SparcSubtarget.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
@@ -30,13 +31,15 @@ class SparcTargetMachine : public LLVMTargetMachine {
SparcTargetLowering TLInfo;
SparcSelectionDAGInfo TSInfo;
SparcInstrInfo InstrInfo;
- TargetFrameInfo FrameInfo;
+ SparcFrameLowering FrameLowering;
public:
SparcTargetMachine(const Target &T, const std::string &TT,
const std::string &FS, bool is64bit);
virtual const SparcInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
virtual const SparcSubtarget *getSubtargetImpl() const{ return &Subtarget; }
virtual const SparcRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
diff --git a/contrib/llvm/lib/Target/SubtargetFeature.cpp b/contrib/llvm/lib/Target/SubtargetFeature.cpp
index b35190a..3cf95b5 100644
--- a/contrib/llvm/lib/Target/SubtargetFeature.cpp
+++ b/contrib/llvm/lib/Target/SubtargetFeature.cpp
@@ -18,6 +18,7 @@
#include <algorithm>
#include <cassert>
#include <cctype>
+#include <cstdlib>
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -162,7 +163,7 @@ static void Help(const SubtargetFeatureKV *CPUTable, size_t CPUTableSize,
errs() << "Use +feature to enable a feature, or -feature to disable it.\n"
<< "For example, llc -mcpu=mycpu -mattr=+feature1,-feature2\n";
- exit(1);
+ std::exit(1);
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/SystemZ/AsmPrinter/SystemZAsmPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index d7ac8f5..fd4d8b7 100644
--- a/contrib/llvm/lib/Target/SystemZ/AsmPrinter/SystemZAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -55,9 +55,15 @@ namespace {
void printS16ImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O) {
O << (int16_t)MI->getOperand(OpNum).getImm();
}
+ void printU16ImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O) {
+ O << (uint16_t)MI->getOperand(OpNum).getImm();
+ }
void printS32ImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O) {
O << (int32_t)MI->getOperand(OpNum).getImm();
}
+ void printU32ImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O) {
+ O << (uint32_t)MI->getOperand(OpNum).getImm();
+ }
void printInstruction(const MachineInstr *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
new file mode 100644
index 0000000..2ad84a2
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -0,0 +1,386 @@
+//=====- SystemZFrameLowering.cpp - SystemZ Frame Information ------*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the SystemZ implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZFrameLowering.h"
+#include "SystemZInstrBuilder.h"
+#include "SystemZInstrInfo.h"
+#include "SystemZMachineFunctionInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+SystemZFrameLowering::SystemZFrameLowering(const SystemZSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, -160), STI(sti) {
+ // Fill the spill offsets map
+ static const unsigned SpillOffsTab[][2] = {
+ { SystemZ::R2D, 0x10 },
+ { SystemZ::R3D, 0x18 },
+ { SystemZ::R4D, 0x20 },
+ { SystemZ::R5D, 0x28 },
+ { SystemZ::R6D, 0x30 },
+ { SystemZ::R7D, 0x38 },
+ { SystemZ::R8D, 0x40 },
+ { SystemZ::R9D, 0x48 },
+ { SystemZ::R10D, 0x50 },
+ { SystemZ::R11D, 0x58 },
+ { SystemZ::R12D, 0x60 },
+ { SystemZ::R13D, 0x68 },
+ { SystemZ::R14D, 0x70 },
+ { SystemZ::R15D, 0x78 }
+ };
+
+ RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
+
+ for (unsigned i = 0, e = array_lengthof(SpillOffsTab); i != e; ++i)
+ RegSpillOffsets[SpillOffsTab[i][0]] = SpillOffsTab[i][1];
+}
+
+/// needsFP - Return true if the specified function should have a dedicated
+/// frame pointer register. This is true if the function has variable sized
+/// allocas or if frame pointer elimination is disabled.
+bool SystemZFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects();
+}
+
+/// emitSPUpdate - Emit a series of instructions to increment / decrement the
+/// stack pointer by a constant value.
+static
+void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ int64_t NumBytes, const TargetInstrInfo &TII) {
+ unsigned Opc; uint64_t Chunk;
+ bool isSub = NumBytes < 0;
+ uint64_t Offset = isSub ? -NumBytes : NumBytes;
+
+ if (Offset >= (1LL << 15) - 1) {
+ Opc = SystemZ::ADD64ri32;
+ Chunk = (1LL << 31) - 1;
+ } else {
+ Opc = SystemZ::ADD64ri16;
+ Chunk = (1LL << 15) - 1;
+ }
+
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ while (Offset) {
+ uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), SystemZ::R15D)
+ .addReg(SystemZ::R15D).addImm(isSub ? -ThisVal : ThisVal);
+ // The PSW implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ Offset -= ThisVal;
+ }
+}
+
+void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const SystemZInstrInfo &TII =
+ *static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
+ SystemZMachineFunctionInfo *SystemZMFI =
+ MF.getInfo<SystemZMachineFunctionInfo>();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ // Note that area for callee-saved stuff is already allocated, thus we need to
+ // 'undo' the stack movement.
+ uint64_t StackSize = MFI->getStackSize();
+ StackSize -= SystemZMFI->getCalleeSavedFrameSize();
+
+ uint64_t NumBytes = StackSize - getOffsetOfLocalArea();
+
+ // Skip the callee-saved push instructions.
+ while (MBBI != MBB.end() &&
+ (MBBI->getOpcode() == SystemZ::MOV64mr ||
+ MBBI->getOpcode() == SystemZ::MOV64mrm))
+ ++MBBI;
+
+ if (MBBI != MBB.end())
+ DL = MBBI->getDebugLoc();
+
+ // adjust stack pointer: R15 -= numbytes
+ if (StackSize || MFI->hasCalls()) {
+ assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) &&
+ "Invalid stack frame calculation!");
+ emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, TII);
+ }
+
+ if (hasFP(MF)) {
+ // Update R11 with the new base value...
+ BuildMI(MBB, MBBI, DL, TII.get(SystemZ::MOV64rr), SystemZ::R11D)
+ .addReg(SystemZ::R15D);
+
+ // Mark the FramePtr as live-in in every block except the entry.
+ for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
+ I != E; ++I)
+ I->addLiveIn(SystemZ::R11D);
+
+ }
+}
+
+void SystemZFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ const SystemZInstrInfo &TII =
+ *static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
+ SystemZMachineFunctionInfo *SystemZMFI =
+ MF.getInfo<SystemZMachineFunctionInfo>();
+ unsigned RetOpcode = MBBI->getOpcode();
+
+ switch (RetOpcode) {
+ case SystemZ::RET: break; // These are ok
+ default:
+ assert(0 && "Can only insert epilog into returning blocks");
+ }
+
+ // Get the number of bytes to allocate from the FrameInfo
+ // Note that area for callee-saved stuff is already allocated, thus we need to
+ // 'undo' the stack movement.
+ uint64_t StackSize =
+ MFI->getStackSize() - SystemZMFI->getCalleeSavedFrameSize();
+ uint64_t NumBytes = StackSize - getOffsetOfLocalArea();
+
+ // Skip the final terminator instruction.
+ while (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator PI = prior(MBBI);
+ --MBBI;
+ if (!PI->getDesc().isTerminator())
+ break;
+ }
+
+ // During callee-saved restores emission stack frame was not yet finialized
+ // (and thus - the stack size was unknown). Tune the offset having full stack
+ // size in hands.
+ if (StackSize || MFI->hasCalls()) {
+ assert((MBBI->getOpcode() == SystemZ::MOV64rmm ||
+ MBBI->getOpcode() == SystemZ::MOV64rm) &&
+ "Expected to see callee-save register restore code");
+ assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) &&
+ "Invalid stack frame calculation!");
+
+ unsigned i = 0;
+ MachineInstr &MI = *MBBI;
+ while (!MI.getOperand(i).isImm()) {
+ ++i;
+ assert(i < MI.getNumOperands() && "Unexpected restore code!");
+ }
+
+ uint64_t Offset = NumBytes + MI.getOperand(i).getImm();
+ // If Offset does not fit into 20-bit signed displacement field we need to
+ // emit some additional code...
+ if (Offset > 524287) {
+ // Fold the displacement into load instruction as much as possible.
+ NumBytes = Offset - 524287;
+ Offset = 524287;
+ emitSPUpdate(MBB, MBBI, NumBytes, TII);
+ }
+
+ MI.getOperand(i).ChangeToImmediate(Offset);
+ }
+}
+
+int SystemZFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const SystemZMachineFunctionInfo *SystemZMFI =
+ MF.getInfo<SystemZMachineFunctionInfo>();
+ int Offset = MFI->getObjectOffset(FI) + MFI->getOffsetAdjustment();
+ uint64_t StackSize = MFI->getStackSize();
+
+ // Fixed objects are really located in the "previous" frame.
+ if (FI < 0)
+ StackSize -= SystemZMFI->getCalleeSavedFrameSize();
+
+ Offset += StackSize - getOffsetOfLocalArea();
+
+ // Skip the register save area if we generated the stack frame.
+ if (StackSize || MFI->hasCalls())
+ Offset -= getOffsetOfLocalArea();
+
+ return Offset;
+}
+
+bool
+SystemZFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ DebugLoc DL;
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
+ unsigned CalleeFrameSize = 0;
+
+ // Scan the callee-saved and find the bounds of register spill area.
+ unsigned LowReg = 0, HighReg = 0, StartOffset = -1U, EndOffset = 0;
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (!SystemZ::FP64RegClass.contains(Reg)) {
+ unsigned Offset = RegSpillOffsets[Reg];
+ CalleeFrameSize += 8;
+ if (StartOffset > Offset) {
+ LowReg = Reg; StartOffset = Offset;
+ }
+ if (EndOffset < Offset) {
+ HighReg = Reg; EndOffset = RegSpillOffsets[Reg];
+ }
+ }
+ }
+
+ // Save information for epilogue inserter.
+ MFI->setCalleeSavedFrameSize(CalleeFrameSize);
+ MFI->setLowReg(LowReg); MFI->setHighReg(HighReg);
+
+ // Save GPRs
+ if (StartOffset) {
+ // Build a store instruction. Use STORE MULTIPLE instruction if there are many
+ // registers to store, otherwise - just STORE.
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MI, DL, TII.get((LowReg == HighReg ?
+ SystemZ::MOV64mr : SystemZ::MOV64mrm)));
+
+ // Add store operands.
+ MIB.addReg(SystemZ::R15D).addImm(StartOffset);
+ if (LowReg == HighReg)
+ MIB.addReg(0);
+ MIB.addReg(LowReg, RegState::Kill);
+ if (LowReg != HighReg)
+ MIB.addReg(HighReg, RegState::Kill);
+
+ // Do a second scan adding regs as being killed by instruction
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
+ if (Reg != LowReg && Reg != HighReg)
+ MIB.addReg(Reg, RegState::ImplicitKill);
+ }
+ }
+
+ // Save FPRs
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (SystemZ::FP64RegClass.contains(Reg)) {
+ MBB.addLiveIn(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i].getFrameIdx(),
+ &SystemZ::FP64RegClass, TRI);
+ }
+ }
+
+ return true;
+}
+
+bool
+SystemZFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ DebugLoc DL;
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
+
+ // Restore FP registers
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (SystemZ::FP64RegClass.contains(Reg))
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
+ &SystemZ::FP64RegClass, TRI);
+ }
+
+ // Restore GP registers
+ unsigned LowReg = MFI->getLowReg(), HighReg = MFI->getHighReg();
+ unsigned StartOffset = RegSpillOffsets[LowReg];
+
+ if (StartOffset) {
+ // Build a load instruction. Use LOAD MULTIPLE instruction if there are many
+ // registers to load, otherwise - just LOAD.
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MI, DL, TII.get((LowReg == HighReg ?
+ SystemZ::MOV64rm : SystemZ::MOV64rmm)));
+ // Add store operands.
+ MIB.addReg(LowReg, RegState::Define);
+ if (LowReg != HighReg)
+ MIB.addReg(HighReg, RegState::Define);
+
+ MIB.addReg(hasFP(MF) ? SystemZ::R11D : SystemZ::R15D);
+ MIB.addImm(StartOffset);
+ if (LowReg == HighReg)
+ MIB.addReg(0);
+
+ // Do a second scan adding regs as being defined by instruction
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (Reg != LowReg && Reg != HighReg)
+ MIB.addReg(Reg, RegState::ImplicitDefine);
+ }
+ }
+
+ return true;
+}
+
+void
+SystemZFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ // Determine whether R15/R14 will ever be clobbered inside the function. And
+ // if yes - mark it as 'callee' saved.
+ MachineFrameInfo *FFI = MF.getFrameInfo();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ // Check whether high FPRs are ever used, if yes - we need to save R15 as
+ // well.
+ static const unsigned HighFPRs[] = {
+ SystemZ::F8L, SystemZ::F9L, SystemZ::F10L, SystemZ::F11L,
+ SystemZ::F12L, SystemZ::F13L, SystemZ::F14L, SystemZ::F15L,
+ SystemZ::F8S, SystemZ::F9S, SystemZ::F10S, SystemZ::F11S,
+ SystemZ::F12S, SystemZ::F13S, SystemZ::F14S, SystemZ::F15S,
+ };
+
+ bool HighFPRsUsed = false;
+ for (unsigned i = 0, e = array_lengthof(HighFPRs); i != e; ++i)
+ HighFPRsUsed |= MRI.isPhysRegUsed(HighFPRs[i]);
+
+ if (FFI->hasCalls())
+ /* FIXME: function is varargs */
+ /* FIXME: function grabs RA */
+ /* FIXME: function calls eh_return */
+ MRI.setPhysRegUsed(SystemZ::R14D);
+
+ if (HighFPRsUsed ||
+ FFI->hasCalls() ||
+ FFI->getObjectIndexEnd() != 0 || // Contains automatic variables
+ FFI->hasVarSizedObjects() // Function calls dynamic alloca's
+ /* FIXME: function is varargs */)
+ MRI.setPhysRegUsed(SystemZ::R15D);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
new file mode 100644
index 0000000..1284b68
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -0,0 +1,57 @@
+//=- SystemZFrameLowering.h - Define frame lowering for z/System -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZ_FRAMEINFO_H
+#define SYSTEMZ_FRAMEINFO_H
+
+#include "SystemZ.h"
+#include "SystemZSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/ADT/IndexedMap.h"
+
+namespace llvm {
+ class SystemZSubtarget;
+
+class SystemZFrameLowering : public TargetFrameLowering {
+ IndexedMap<unsigned> RegSpillOffsets;
+protected:
+ const SystemZSubtarget &STI;
+
+public:
+ explicit SystemZFrameLowering(const SystemZSubtarget &sti);
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const;
+
+ bool hasReservedCallFrame(const MachineFunction &MF) const { return true; }
+ bool hasFP(const MachineFunction &MF) const;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index ed290ca..2186ff1 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -120,18 +120,17 @@ namespace {
#include "SystemZGenDAGISel.inc"
private:
- bool SelectAddrRI12Only(SDNode *Op, SDValue& Addr,
+ bool SelectAddrRI12Only(SDValue& Addr,
SDValue &Base, SDValue &Disp);
- bool SelectAddrRI12(SDNode *Op, SDValue& Addr,
+ bool SelectAddrRI12(SDValue& Addr,
SDValue &Base, SDValue &Disp,
bool is12BitOnly = false);
- bool SelectAddrRI(SDNode *Op, SDValue& Addr,
- SDValue &Base, SDValue &Disp);
- bool SelectAddrRRI12(SDNode *Op, SDValue Addr,
+ bool SelectAddrRI(SDValue& Addr, SDValue &Base, SDValue &Disp);
+ bool SelectAddrRRI12(SDValue Addr,
SDValue &Base, SDValue &Disp, SDValue &Index);
- bool SelectAddrRRI20(SDNode *Op, SDValue Addr,
+ bool SelectAddrRRI20(SDValue Addr,
SDValue &Base, SDValue &Disp, SDValue &Index);
- bool SelectLAAddr(SDNode *Op, SDValue Addr,
+ bool SelectLAAddr(SDValue Addr,
SDValue &Base, SDValue &Disp, SDValue &Index);
SDNode *Select(SDNode *Node);
@@ -142,8 +141,6 @@ namespace {
bool MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
bool is12Bit, unsigned Depth = 0);
bool MatchAddressBase(SDValue N, SystemZRRIAddressMode &AM);
- bool MatchAddressRI(SDValue N, SystemZRRIAddressMode &AM,
- bool is12Bit);
};
} // end anonymous namespace
@@ -355,12 +352,12 @@ void SystemZDAGToDAGISel::getAddressOperands(const SystemZRRIAddressMode &AM,
/// Returns true if the address can be represented by a base register plus
/// an unsigned 12-bit displacement [r+imm].
-bool SystemZDAGToDAGISel::SelectAddrRI12Only(SDNode *Op, SDValue& Addr,
+bool SystemZDAGToDAGISel::SelectAddrRI12Only(SDValue &Addr,
SDValue &Base, SDValue &Disp) {
- return SelectAddrRI12(Op, Addr, Base, Disp, /*is12BitOnly*/true);
+ return SelectAddrRI12(Addr, Base, Disp, /*is12BitOnly*/true);
}
-bool SystemZDAGToDAGISel::SelectAddrRI12(SDNode *Op, SDValue& Addr,
+bool SystemZDAGToDAGISel::SelectAddrRI12(SDValue &Addr,
SDValue &Base, SDValue &Disp,
bool is12BitOnly) {
SystemZRRIAddressMode AM20(/*isRI*/true), AM12(/*isRI*/true);
@@ -410,7 +407,7 @@ bool SystemZDAGToDAGISel::SelectAddrRI12(SDNode *Op, SDValue& Addr,
/// Returns true if the address can be represented by a base register plus
/// a signed 20-bit displacement [r+imm].
-bool SystemZDAGToDAGISel::SelectAddrRI(SDNode *Op, SDValue& Addr,
+bool SystemZDAGToDAGISel::SelectAddrRI(SDValue& Addr,
SDValue &Base, SDValue &Disp) {
SystemZRRIAddressMode AM(/*isRI*/true);
bool Done = false;
@@ -453,7 +450,7 @@ bool SystemZDAGToDAGISel::SelectAddrRI(SDNode *Op, SDValue& Addr,
/// Returns true if the address can be represented by a base register plus
/// index register plus an unsigned 12-bit displacement [base + idx + imm].
-bool SystemZDAGToDAGISel::SelectAddrRRI12(SDNode *Op, SDValue Addr,
+bool SystemZDAGToDAGISel::SelectAddrRRI12(SDValue Addr,
SDValue &Base, SDValue &Disp, SDValue &Index) {
SystemZRRIAddressMode AM20, AM12;
bool Done = false;
@@ -502,7 +499,7 @@ bool SystemZDAGToDAGISel::SelectAddrRRI12(SDNode *Op, SDValue Addr,
/// Returns true if the address can be represented by a base register plus
/// index register plus a signed 20-bit displacement [base + idx + imm].
-bool SystemZDAGToDAGISel::SelectAddrRRI20(SDNode *Op, SDValue Addr,
+bool SystemZDAGToDAGISel::SelectAddrRRI20(SDValue Addr,
SDValue &Base, SDValue &Disp, SDValue &Index) {
SystemZRRIAddressMode AM;
bool Done = false;
@@ -546,7 +543,7 @@ bool SystemZDAGToDAGISel::SelectAddrRRI20(SDNode *Op, SDValue Addr,
/// SelectLAAddr - it calls SelectAddr and determines if the maximal addressing
/// mode it matches can be cost effectively emitted as an LA/LAY instruction.
-bool SystemZDAGToDAGISel::SelectLAAddr(SDNode *Op, SDValue Addr,
+bool SystemZDAGToDAGISel::SelectLAAddr(SDValue Addr,
SDValue &Base, SDValue &Disp, SDValue &Index) {
SystemZRRIAddressMode AM;
@@ -583,7 +580,7 @@ bool SystemZDAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
SDValue &Base, SDValue &Disp, SDValue &Index) {
if (ISD::isNON_EXTLoad(N.getNode()) &&
IsLegalToFold(N, P, P, OptLevel))
- return SelectAddrRRI20(P, N.getOperand(1), Base, Disp, Index);
+ return SelectAddrRRI20(N.getOperand(1), Base, Disp, Index);
return false;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 67f739f..d694f2e 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -147,8 +147,8 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
setOperationAction(ISD::FREM, MVT::f64, Expand);
// We have only 64-bit bitconverts
- setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
- setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::f32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
@@ -341,7 +341,7 @@ SystemZTargetLowering::LowerCCCArguments(SDValue Chain,
// from this parameter
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValue = DAG.getLoad(LocVT, dl, Chain, FIN,
- PseudoSourceValue::getFixedStack(FI), 0,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0);
}
@@ -377,8 +377,8 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
-
MachineFunction &MF = DAG.getMachineFunction();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
// Offset to first argument stack slot.
const unsigned FirstArgOffset = 160;
@@ -431,7 +431,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
if (StackPtr.getNode() == 0)
StackPtr =
DAG.getCopyFromReg(Chain, dl,
- (RegInfo->hasFP(MF) ?
+ (TFI->hasFP(MF) ?
SystemZ::R11D : SystemZ::R15D),
getPointerTy());
@@ -441,7 +441,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
DAG.getIntPtrConstant(Offset));
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- PseudoSourceValue::getStack(), Offset,
+ MachinePointerInfo(),
false, false, 0));
}
}
@@ -471,7 +471,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy());
// Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@@ -710,7 +710,7 @@ SDValue SystemZTargetLowering::LowerSELECT_CC(SDValue Op,
SDValue SystemZCC;
SDValue Flag = EmitCmp(LHS, RHS, CC, SystemZCC, DAG);
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
SmallVector<SDValue, 4> Ops;
Ops.push_back(TrueV);
Ops.push_back(FalseV);
@@ -747,7 +747,7 @@ SDValue SystemZTargetLowering::LowerGlobalAddress(SDValue Op,
if (ExtraLoadRequired)
Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
- PseudoSourceValue::getGOT(), 0, false, false, 0);
+ MachinePointerInfo::getGOT(), false, false, 0);
// If there was a non-zero offset that we didn't fold, create an explicit
// addition for it.
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
index fa87061..2f2ef08 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
@@ -115,9 +115,9 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
if (TID.mayStore())
Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- Flags, Offset,
- MFI.getObjectSize(FI),
+ MF.getMachineMemOperand(MachinePointerInfo(
+ PseudoSourceValue::getFixedStack(FI), Offset),
+ Flags, MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
return addOffset(MIB.addFrameIndex(FI), Offset)
.addMemOperand(MMO);
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 367bed3..be52803 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -28,28 +28,6 @@ using namespace llvm;
SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm)
: TargetInstrInfoImpl(SystemZInsts, array_lengthof(SystemZInsts)),
RI(tm, *this), TM(tm) {
- // Fill the spill offsets map
- static const unsigned SpillOffsTab[][2] = {
- { SystemZ::R2D, 0x10 },
- { SystemZ::R3D, 0x18 },
- { SystemZ::R4D, 0x20 },
- { SystemZ::R5D, 0x28 },
- { SystemZ::R6D, 0x30 },
- { SystemZ::R7D, 0x38 },
- { SystemZ::R8D, 0x40 },
- { SystemZ::R9D, 0x48 },
- { SystemZ::R10D, 0x50 },
- { SystemZ::R11D, 0x58 },
- { SystemZ::R12D, 0x60 },
- { SystemZ::R13D, 0x68 },
- { SystemZ::R14D, 0x70 },
- { SystemZ::R15D, 0x78 }
- };
-
- RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
-
- for (unsigned i = 0, e = array_lengthof(SpillOffsTab); i != e; ++i)
- RegSpillOffsets[SpillOffsTab[i][0]] = SpillOffsTab[i][1];
}
/// isGVStub - Return true if the GV requires an extra load to get the
@@ -211,134 +189,6 @@ unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
return 0;
}
-bool
-SystemZInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- MachineFunction &MF = *MBB.getParent();
- SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
- unsigned CalleeFrameSize = 0;
-
- // Scan the callee-saved and find the bounds of register spill area.
- unsigned LowReg = 0, HighReg = 0, StartOffset = -1U, EndOffset = 0;
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (!SystemZ::FP64RegClass.contains(Reg)) {
- unsigned Offset = RegSpillOffsets[Reg];
- CalleeFrameSize += 8;
- if (StartOffset > Offset) {
- LowReg = Reg; StartOffset = Offset;
- }
- if (EndOffset < Offset) {
- HighReg = Reg; EndOffset = RegSpillOffsets[Reg];
- }
- }
- }
-
- // Save information for epilogue inserter.
- MFI->setCalleeSavedFrameSize(CalleeFrameSize);
- MFI->setLowReg(LowReg); MFI->setHighReg(HighReg);
-
- // Save GPRs
- if (StartOffset) {
- // Build a store instruction. Use STORE MULTIPLE instruction if there are many
- // registers to store, otherwise - just STORE.
- MachineInstrBuilder MIB =
- BuildMI(MBB, MI, DL, get((LowReg == HighReg ?
- SystemZ::MOV64mr : SystemZ::MOV64mrm)));
-
- // Add store operands.
- MIB.addReg(SystemZ::R15D).addImm(StartOffset);
- if (LowReg == HighReg)
- MIB.addReg(0);
- MIB.addReg(LowReg, RegState::Kill);
- if (LowReg != HighReg)
- MIB.addReg(HighReg, RegState::Kill);
-
- // Do a second scan adding regs as being killed by instruction
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(Reg);
- if (Reg != LowReg && Reg != HighReg)
- MIB.addReg(Reg, RegState::ImplicitKill);
- }
- }
-
- // Save FPRs
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (SystemZ::FP64RegClass.contains(Reg)) {
- MBB.addLiveIn(Reg);
- storeRegToStackSlot(MBB, MI, Reg, true, CSI[i].getFrameIdx(),
- &SystemZ::FP64RegClass, &RI);
- }
- }
-
- return true;
-}
-
-bool
-SystemZInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- MachineFunction &MF = *MBB.getParent();
- const TargetRegisterInfo *RegInfo= MF.getTarget().getRegisterInfo();
- SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
-
- // Restore FP registers
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (SystemZ::FP64RegClass.contains(Reg))
- loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
- &SystemZ::FP64RegClass, &RI);
- }
-
- // Restore GP registers
- unsigned LowReg = MFI->getLowReg(), HighReg = MFI->getHighReg();
- unsigned StartOffset = RegSpillOffsets[LowReg];
-
- if (StartOffset) {
- // Build a load instruction. Use LOAD MULTIPLE instruction if there are many
- // registers to load, otherwise - just LOAD.
- MachineInstrBuilder MIB =
- BuildMI(MBB, MI, DL, get((LowReg == HighReg ?
- SystemZ::MOV64rm : SystemZ::MOV64rmm)));
- // Add store operands.
- MIB.addReg(LowReg, RegState::Define);
- if (LowReg != HighReg)
- MIB.addReg(HighReg, RegState::Define);
-
- MIB.addReg((RegInfo->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D));
- MIB.addImm(StartOffset);
- if (LowReg == HighReg)
- MIB.addReg(0);
-
- // Do a second scan adding regs as being defined by instruction
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (Reg != LowReg && Reg != HighReg)
- MIB.addReg(Reg, RegState::ImplicitDefine);
- }
- }
-
- return true;
-}
-
bool SystemZInstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 1 && "Invalid Xbranch condition!");
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index c248f24..6cb7200 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -50,7 +50,6 @@ namespace SystemZII {
class SystemZInstrInfo : public TargetInstrInfoImpl {
const SystemZRegisterInfo RI;
SystemZTargetMachine &TM;
- IndexedMap<unsigned> RegSpillOffsets;
public:
explicit SystemZInstrInfo(SystemZTargetMachine &TM);
@@ -80,15 +79,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
- virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
-
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
virtual bool AnalyzeBranch(MachineBasicBlock &MBB,
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
index 8df07c0..11a39fc 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -46,15 +46,15 @@ def SDT_Address : SDTypeProfile<1, 1,
// SystemZ Specific Node Definitions.
//===----------------------------------------------------------------------===//
def SystemZretflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
def SystemZcall : SDNode<"SystemZISD::CALL", SDT_SystemZCall,
- [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag, SDNPVariadic]>;
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
def SystemZcallseq_start :
SDNode<"ISD::CALLSEQ_START", SDT_SystemZCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def SystemZcallseq_end :
SDNode<"ISD::CALLSEQ_END", SDT_SystemZCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def SystemZcmp : SDNode<"SystemZISD::CMP", SDT_CmpTest>;
def SystemZucmp : SDNode<"SystemZISD::UCMP", SDT_CmpTest>;
def SystemZbrcond : SDNode<"SystemZISD::BRCOND", SDT_BrCond,
@@ -229,19 +229,19 @@ def MOV64ri16 : RII<0x9A7,
[(set GR64:$dst, immSExt16:$src)]>;
def MOV64rill16 : RII<0xFA5,
- (outs GR64:$dst), (ins i64imm:$src),
+ (outs GR64:$dst), (ins u16imm:$src),
"llill\t{$dst, $src}",
[(set GR64:$dst, i64ll16:$src)]>;
def MOV64rilh16 : RII<0xEA5,
- (outs GR64:$dst), (ins i64imm:$src),
+ (outs GR64:$dst), (ins u16imm:$src),
"llilh\t{$dst, $src}",
[(set GR64:$dst, i64lh16:$src)]>;
def MOV64rihl16 : RII<0xDA5,
- (outs GR64:$dst), (ins i64imm:$src),
+ (outs GR64:$dst), (ins u16imm:$src),
"llihl\t{$dst, $src}",
[(set GR64:$dst, i64hl16:$src)]>;
def MOV64rihh16 : RII<0xCA5,
- (outs GR64:$dst), (ins i64imm:$src),
+ (outs GR64:$dst), (ins u16imm:$src),
"llihh\t{$dst, $src}",
[(set GR64:$dst, i64hh16:$src)]>;
@@ -250,10 +250,10 @@ def MOV64ri32 : RILI<0x1C0,
"lgfi\t{$dst, $src}",
[(set GR64:$dst, immSExt32:$src)]>;
def MOV64rilo32 : RILI<0xFC0,
- (outs GR64:$dst), (ins i64imm:$src),
+ (outs GR64:$dst), (ins u32imm:$src),
"llilf\t{$dst, $src}",
[(set GR64:$dst, i64lo32:$src)]>;
-def MOV64rihi32 : RILI<0xEC0, (outs GR64:$dst), (ins i64imm:$src),
+def MOV64rihi32 : RILI<0xEC0, (outs GR64:$dst), (ins u32imm:$src),
"llihf\t{$dst, $src}",
[(set GR64:$dst, i64hi32:$src)]>;
}
@@ -642,42 +642,42 @@ def AND64rm : RXYI<0xE360, (outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
(implicit PSW)]>;
def AND32rill16 : RII<0xA57,
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ (outs GR32:$dst), (ins GR32:$src1, u16imm:$src2),
"nill\t{$dst, $src2}",
[(set GR32:$dst, (and GR32:$src1, i32ll16c:$src2))]>;
def AND64rill16 : RII<0xA57,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
"nill\t{$dst, $src2}",
[(set GR64:$dst, (and GR64:$src1, i64ll16c:$src2))]>;
def AND32rilh16 : RII<0xA56,
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ (outs GR32:$dst), (ins GR32:$src1, u16imm:$src2),
"nilh\t{$dst, $src2}",
[(set GR32:$dst, (and GR32:$src1, i32lh16c:$src2))]>;
def AND64rilh16 : RII<0xA56,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
"nilh\t{$dst, $src2}",
[(set GR64:$dst, (and GR64:$src1, i64lh16c:$src2))]>;
def AND64rihl16 : RII<0xA55,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
"nihl\t{$dst, $src2}",
[(set GR64:$dst, (and GR64:$src1, i64hl16c:$src2))]>;
def AND64rihh16 : RII<0xA54,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
"nihh\t{$dst, $src2}",
[(set GR64:$dst, (and GR64:$src1, i64hh16c:$src2))]>;
def AND32ri : RILI<0xC0B,
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ (outs GR32:$dst), (ins GR32:$src1, u32imm:$src2),
"nilf\t{$dst, $src2}",
[(set GR32:$dst, (and GR32:$src1, imm:$src2))]>;
def AND64rilo32 : RILI<0xC0B,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u32imm:$src2),
"nilf\t{$dst, $src2}",
[(set GR64:$dst, (and GR64:$src1, i64lo32c:$src2))]>;
def AND64rihi32 : RILI<0xC0A,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u32imm:$src2),
"nihf\t{$dst, $src2}",
[(set GR64:$dst, (and GR64:$src1, i64hi32c:$src2))]>;
@@ -707,41 +707,41 @@ def OR64rm : RXYI<0xE381, (outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
// FIXME: Provide proper encoding!
def OR32ri16 : RII<0xA5B,
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ (outs GR32:$dst), (ins GR32:$src1, u32imm:$src2),
"oill\t{$dst, $src2}",
[(set GR32:$dst, (or GR32:$src1, i32ll16:$src2))]>;
def OR32ri16h : RII<0xA5A,
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ (outs GR32:$dst), (ins GR32:$src1, u32imm:$src2),
"oilh\t{$dst, $src2}",
[(set GR32:$dst, (or GR32:$src1, i32lh16:$src2))]>;
def OR32ri : RILI<0xC0D,
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ (outs GR32:$dst), (ins GR32:$src1, u32imm:$src2),
"oilf\t{$dst, $src2}",
[(set GR32:$dst, (or GR32:$src1, imm:$src2))]>;
def OR64rill16 : RII<0xA5B,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
"oill\t{$dst, $src2}",
[(set GR64:$dst, (or GR64:$src1, i64ll16:$src2))]>;
def OR64rilh16 : RII<0xA5A,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
"oilh\t{$dst, $src2}",
[(set GR64:$dst, (or GR64:$src1, i64lh16:$src2))]>;
def OR64rihl16 : RII<0xA59,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
"oihl\t{$dst, $src2}",
[(set GR64:$dst, (or GR64:$src1, i64hl16:$src2))]>;
def OR64rihh16 : RII<0xA58,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
"oihh\t{$dst, $src2}",
[(set GR64:$dst, (or GR64:$src1, i64hh16:$src2))]>;
def OR64rilo32 : RILI<0xC0D,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u32imm:$src2),
"oilf\t{$dst, $src2}",
[(set GR64:$dst, (or GR64:$src1, i64lo32:$src2))]>;
def OR64rihi32 : RILI<0xC0C,
- (outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
+ (outs GR64:$dst), (ins GR64:$src1, u32imm:$src2),
"oihf\t{$dst, $src2}",
[(set GR64:$dst, (or GR64:$src1, i64hi32:$src2))]>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp
index 4f7f70b..2dc7e7b 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp
@@ -14,6 +14,7 @@
#include "SystemZMCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
+#include "llvm/Support/ELF.h"
using namespace llvm;
SystemZMCAsmInfo::SystemZMCAsmInfo(const Target &T, StringRef TT) {
@@ -24,6 +25,6 @@ SystemZMCAsmInfo::SystemZMCAsmInfo(const Target &T, StringRef TT) {
const MCSection *SystemZMCAsmInfo::
getNonexecutableStackSection(MCContext &Ctx) const{
- return Ctx.getELFSection(".note.GNU-stack", MCSectionELF::SHT_PROGBITS,
- 0, SectionKind::getMetadata(), false);
+ return Ctx.getELFSection(".note.GNU-stack", ELF::SHT_PROGBITS,
+ 0, SectionKind::getMetadata());
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
index 0de50fd..8b835cc 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
@@ -246,6 +246,14 @@ def s16imm : Operand<i32> {
def s16imm64 : Operand<i64> {
let PrintMethod = "printS16ImmOperand";
}
+// Unsigned i16
+def u16imm : Operand<i32> {
+ let PrintMethod = "printU16ImmOperand";
+}
+def u16imm64 : Operand<i64> {
+ let PrintMethod = "printU16ImmOperand";
+}
+
// Signed i20
def s20imm : Operand<i32> {
let PrintMethod = "printS20ImmOperand";
@@ -260,6 +268,13 @@ def s32imm : Operand<i32> {
def s32imm64 : Operand<i64> {
let PrintMethod = "printS32ImmOperand";
}
+// Unsigned i32
+def u32imm : Operand<i32> {
+ let PrintMethod = "printU32ImmOperand";
+}
+def u32imm64 : Operand<i64> {
+ let PrintMethod = "printU32ImmOperand";
+}
def imm_pcrel : Operand<i64> {
let PrintMethod = "printPCRelImmOperand";
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
index f8d3e6a..28f94f4 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -20,7 +20,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -49,49 +49,21 @@ SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
BitVector SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
- if (hasFP(MF))
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (TFI->hasFP(MF))
Reserved.set(SystemZ::R11D);
Reserved.set(SystemZ::R14D);
Reserved.set(SystemZ::R15D);
return Reserved;
}
-/// needsFP - Return true if the specified function should have a dedicated
-/// frame pointer register. This is true if the function has variable sized
-/// allocas or if frame pointer elimination is disabled.
-bool SystemZRegisterInfo::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects();
-}
-
void SystemZRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
MBB.erase(I);
}
-int SystemZRegisterInfo::getFrameIndexOffset(const MachineFunction &MF,
- int FI) const {
- const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- const SystemZMachineFunctionInfo *SystemZMFI =
- MF.getInfo<SystemZMachineFunctionInfo>();
- int Offset = MFI->getObjectOffset(FI) + MFI->getOffsetAdjustment();
- uint64_t StackSize = MFI->getStackSize();
-
- // Fixed objects are really located in the "previous" frame.
- if (FI < 0)
- StackSize -= SystemZMFI->getCalleeSavedFrameSize();
-
- Offset += StackSize - TFI.getOffsetOfLocalArea();
-
- // Skip the register save area if we generated the stack frame.
- if (StackSize || MFI->hasCalls())
- Offset -= TFI.getOffsetOfLocalArea();
-
- return Offset;
-}
-
void
SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS) const {
@@ -100,6 +72,8 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
unsigned i = 0;
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
while (!MI.getOperand(i).isFI()) {
++i;
assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
@@ -107,7 +81,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int FrameIndex = MI.getOperand(i).getIndex();
- unsigned BasePtr = (hasFP(MF) ? SystemZ::R11D : SystemZ::R15D);
+ unsigned BasePtr = (TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D);
// This must be part of a rri or ri operand memory reference. Replace the
// FrameIndex with base register with BasePtr. Add an offset to the
@@ -117,7 +91,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Offset is a either 12-bit unsigned or 20-bit signed integer.
// FIXME: handle "too long" displacements.
int Offset =
- getFrameIndexOffset(MF, FrameIndex) + MI.getOperand(i+1).getImm();
+ TFI->getFrameIndexOffset(MF, FrameIndex) + MI.getOperand(i+1).getImm();
// Check whether displacement is too long to fit into 12 bit zext field.
MI.setDesc(TII.getMemoryInstr(MI.getOpcode(), Offset));
@@ -125,178 +99,6 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(i+1).ChangeToImmediate(Offset);
}
-void
-SystemZRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- // Determine whether R15/R14 will ever be clobbered inside the function. And
- // if yes - mark it as 'callee' saved.
- MachineFrameInfo *FFI = MF.getFrameInfo();
- MachineRegisterInfo &MRI = MF.getRegInfo();
-
- // Check whether high FPRs are ever used, if yes - we need to save R15 as
- // well.
- static const unsigned HighFPRs[] = {
- SystemZ::F8L, SystemZ::F9L, SystemZ::F10L, SystemZ::F11L,
- SystemZ::F12L, SystemZ::F13L, SystemZ::F14L, SystemZ::F15L,
- SystemZ::F8S, SystemZ::F9S, SystemZ::F10S, SystemZ::F11S,
- SystemZ::F12S, SystemZ::F13S, SystemZ::F14S, SystemZ::F15S,
- };
-
- bool HighFPRsUsed = false;
- for (unsigned i = 0, e = array_lengthof(HighFPRs); i != e; ++i)
- HighFPRsUsed |= MRI.isPhysRegUsed(HighFPRs[i]);
-
- if (FFI->hasCalls())
- /* FIXME: function is varargs */
- /* FIXME: function grabs RA */
- /* FIXME: function calls eh_return */
- MRI.setPhysRegUsed(SystemZ::R14D);
-
- if (HighFPRsUsed ||
- FFI->hasCalls() ||
- FFI->getObjectIndexEnd() != 0 || // Contains automatic variables
- FFI->hasVarSizedObjects() // Function calls dynamic alloca's
- /* FIXME: function is varargs */)
- MRI.setPhysRegUsed(SystemZ::R15D);
-}
-
-/// emitSPUpdate - Emit a series of instructions to increment / decrement the
-/// stack pointer by a constant value.
-static
-void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
- int64_t NumBytes, const TargetInstrInfo &TII) {
- unsigned Opc; uint64_t Chunk;
- bool isSub = NumBytes < 0;
- uint64_t Offset = isSub ? -NumBytes : NumBytes;
-
- if (Offset >= (1LL << 15) - 1) {
- Opc = SystemZ::ADD64ri32;
- Chunk = (1LL << 31) - 1;
- } else {
- Opc = SystemZ::ADD64ri16;
- Chunk = (1LL << 15) - 1;
- }
-
- DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- while (Offset) {
- uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL, TII.get(Opc), SystemZ::R15D)
- .addReg(SystemZ::R15D).addImm(isSub ? -ThisVal : ThisVal);
- // The PSW implicit def is dead.
- MI->getOperand(3).setIsDead();
- Offset -= ThisVal;
- }
-}
-
-void SystemZRegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- SystemZMachineFunctionInfo *SystemZMFI =
- MF.getInfo<SystemZMachineFunctionInfo>();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- // Get the number of bytes to allocate from the FrameInfo.
- // Note that area for callee-saved stuff is already allocated, thus we need to
- // 'undo' the stack movement.
- uint64_t StackSize = MFI->getStackSize();
- StackSize -= SystemZMFI->getCalleeSavedFrameSize();
-
- uint64_t NumBytes = StackSize - TFI.getOffsetOfLocalArea();
-
- // Skip the callee-saved push instructions.
- while (MBBI != MBB.end() &&
- (MBBI->getOpcode() == SystemZ::MOV64mr ||
- MBBI->getOpcode() == SystemZ::MOV64mrm))
- ++MBBI;
-
- if (MBBI != MBB.end())
- DL = MBBI->getDebugLoc();
-
- // adjust stack pointer: R15 -= numbytes
- if (StackSize || MFI->hasCalls()) {
- assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) &&
- "Invalid stack frame calculation!");
- emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, TII);
- }
-
- if (hasFP(MF)) {
- // Update R11 with the new base value...
- BuildMI(MBB, MBBI, DL, TII.get(SystemZ::MOV64rr), SystemZ::R11D)
- .addReg(SystemZ::R15D);
-
- // Mark the FramePtr as live-in in every block except the entry.
- for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
- I != E; ++I)
- I->addLiveIn(SystemZ::R11D);
-
- }
-}
-
-void SystemZRegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- SystemZMachineFunctionInfo *SystemZMFI =
- MF.getInfo<SystemZMachineFunctionInfo>();
- unsigned RetOpcode = MBBI->getOpcode();
-
- switch (RetOpcode) {
- case SystemZ::RET: break; // These are ok
- default:
- assert(0 && "Can only insert epilog into returning blocks");
- }
-
- // Get the number of bytes to allocate from the FrameInfo
- // Note that area for callee-saved stuff is already allocated, thus we need to
- // 'undo' the stack movement.
- uint64_t StackSize =
- MFI->getStackSize() - SystemZMFI->getCalleeSavedFrameSize();
- uint64_t NumBytes = StackSize - TFI.getOffsetOfLocalArea();
-
- // Skip the final terminator instruction.
- while (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PI = prior(MBBI);
- --MBBI;
- if (!PI->getDesc().isTerminator())
- break;
- }
-
- // During callee-saved restores emission stack frame was not yet finialized
- // (and thus - the stack size was unknown). Tune the offset having full stack
- // size in hands.
- if (StackSize || MFI->hasCalls()) {
- assert((MBBI->getOpcode() == SystemZ::MOV64rmm ||
- MBBI->getOpcode() == SystemZ::MOV64rm) &&
- "Expected to see callee-save register restore code");
- assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) &&
- "Invalid stack frame calculation!");
-
- unsigned i = 0;
- MachineInstr &MI = *MBBI;
- while (!MI.getOperand(i).isImm()) {
- ++i;
- assert(i < MI.getNumOperands() && "Unexpected restore code!");
- }
-
- uint64_t Offset = NumBytes + MI.getOperand(i).getImm();
- // If Offset does not fit into 20-bit signed displacement field we need to
- // emit some additional code...
- if (Offset > 524287) {
- // Fold the displacement into load instruction as much as possible.
- NumBytes = Offset - 524287;
- Offset = 524287;
- emitSPUpdate(MBB, MBBI, NumBytes, TII);
- }
-
- MI.getOperand(i).ChangeToImmediate(Offset);
- }
-}
-
unsigned SystemZRegisterInfo::getRARegister() const {
assert(0 && "What is the return address register");
return 0;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
index 5dae865..b450798 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
@@ -34,11 +34,6 @@ struct SystemZRegisterInfo : public SystemZGenRegisterInfo {
BitVector getReservedRegs(const MachineFunction &MF) const;
- bool hasReservedCallFrame(const MachineFunction &MF) const { return true; }
- bool hasFP(const MachineFunction &MF) const;
-
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -46,13 +41,6 @@ struct SystemZRegisterInfo : public SystemZGenRegisterInfo {
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
-
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
-
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
index 33be8dd..0028c85 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
@@ -190,8 +190,8 @@ def GR32 : RegisterClass<"SystemZ", [i32], 32,
GR32Class::iterator
GR32Class::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_REG32_nofp;
else
return SystemZ_REG32;
@@ -199,8 +199,8 @@ def GR32 : RegisterClass<"SystemZ", [i32], 32,
GR32Class::iterator
GR32Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_REG32_nofp + (sizeof(SystemZ_REG32_nofp) / sizeof(unsigned));
else
return SystemZ_REG32 + (sizeof(SystemZ_REG32) / sizeof(unsigned));
@@ -237,8 +237,8 @@ def ADDR32 : RegisterClass<"SystemZ", [i32], 32,
ADDR32Class::iterator
ADDR32Class::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_ADDR32_nofp;
else
return SystemZ_ADDR32;
@@ -246,8 +246,8 @@ def ADDR32 : RegisterClass<"SystemZ", [i32], 32,
ADDR32Class::iterator
ADDR32Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_ADDR32_nofp + (sizeof(SystemZ_ADDR32_nofp) / sizeof(unsigned));
else
return SystemZ_ADDR32 + (sizeof(SystemZ_ADDR32) / sizeof(unsigned));
@@ -284,8 +284,8 @@ def GR64 : RegisterClass<"SystemZ", [i64], 64,
GR64Class::iterator
GR64Class::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_REG64_nofp;
else
return SystemZ_REG64;
@@ -293,8 +293,8 @@ def GR64 : RegisterClass<"SystemZ", [i64], 64,
GR64Class::iterator
GR64Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_REG64_nofp + (sizeof(SystemZ_REG64_nofp) / sizeof(unsigned));
else
return SystemZ_REG64 + (sizeof(SystemZ_REG64) / sizeof(unsigned));
@@ -331,8 +331,8 @@ def ADDR64 : RegisterClass<"SystemZ", [i64], 64,
ADDR64Class::iterator
ADDR64Class::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_ADDR64_nofp;
else
return SystemZ_ADDR64;
@@ -340,8 +340,8 @@ def ADDR64 : RegisterClass<"SystemZ", [i64], 64,
ADDR64Class::iterator
ADDR64Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_ADDR64_nofp + (sizeof(SystemZ_ADDR64_nofp) / sizeof(unsigned));
else
return SystemZ_ADDR64 + (sizeof(SystemZ_ADDR64) / sizeof(unsigned));
@@ -368,8 +368,8 @@ def GR64P : RegisterClass<"SystemZ", [v2i32], 64,
GR64PClass::iterator
GR64PClass::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_REG64P_nofp;
else
return SystemZ_REG64P;
@@ -377,8 +377,8 @@ def GR64P : RegisterClass<"SystemZ", [v2i32], 64,
GR64PClass::iterator
GR64PClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_REG64P_nofp + (sizeof(SystemZ_REG64P_nofp) / sizeof(unsigned));
else
return SystemZ_REG64P + (sizeof(SystemZ_REG64P) / sizeof(unsigned));
@@ -405,8 +405,8 @@ def GR128 : RegisterClass<"SystemZ", [v2i64], 128,
GR128Class::iterator
GR128Class::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_REG128_nofp;
else
return SystemZ_REG128;
@@ -414,8 +414,8 @@ def GR128 : RegisterClass<"SystemZ", [v2i64], 128,
GR128Class::iterator
GR128Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return SystemZ_REG128_nofp + (sizeof(SystemZ_REG128_nofp) / sizeof(unsigned));
else
return SystemZ_REG128 + (sizeof(SystemZ_REG128) / sizeof(unsigned));
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
index f45827b..1603899 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -30,7 +30,7 @@ SystemZTargetMachine::SystemZTargetMachine(const Target &T,
DataLayout("E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32"
"-f64:64:64-f128:128:128-a0:16:16-n32:64"),
InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameInfo(TargetFrameInfo::StackGrowsDown, 8, -160) {
+ FrameLowering(Subtarget) {
if (getRelocationModel() == Reloc::Default)
setRelocationModel(Reloc::Static);
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
index 6af829b..524f83d 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
@@ -17,11 +17,12 @@
#include "SystemZInstrInfo.h"
#include "SystemZISelLowering.h"
+#include "SystemZFrameLowering.h"
#include "SystemZSelectionDAGInfo.h"
#include "SystemZRegisterInfo.h"
#include "SystemZSubtarget.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
@@ -34,15 +35,14 @@ class SystemZTargetMachine : public LLVMTargetMachine {
SystemZInstrInfo InstrInfo;
SystemZTargetLowering TLInfo;
SystemZSelectionDAGInfo TSInfo;
-
- // SystemZ does not have any call stack frame, therefore not having
- // any SystemZ specific FrameInfo class.
- TargetFrameInfo FrameInfo;
+ SystemZFrameLowering FrameLowering;
public:
SystemZTargetMachine(const Target &T, const std::string &TT,
const std::string &FS);
- virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
virtual const SystemZInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const TargetData *getTargetData() const { return &DataLayout;}
virtual const SystemZSubtarget *getSubtargetImpl() const { return &Subtarget; }
diff --git a/contrib/llvm/lib/Target/Target.cpp b/contrib/llvm/lib/Target/Target.cpp
index f5c969a..0919fe4 100644
--- a/contrib/llvm/lib/Target/Target.cpp
+++ b/contrib/llvm/lib/Target/Target.cpp
@@ -7,12 +7,14 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the C bindings for libLLVMTarget.a, which implements
-// target information.
+// This file implements the common infrastructure (including C bindings) for
+// libLLVMTarget.a, which implements target information.
//
//===----------------------------------------------------------------------===//
#include "llvm-c/Target.h"
+#include "llvm-c/Initialization.h"
+#include "llvm/InitializePasses.h"
#include "llvm/PassManager.h"
#include "llvm/Target/TargetData.h"
#include "llvm/LLVMContext.h"
@@ -20,6 +22,15 @@
using namespace llvm;
+void llvm::initializeTarget(PassRegistry &Registry) {
+ initializeTargetDataPass(Registry);
+ initializeTargetLibraryInfoPass(Registry);
+}
+
+void LLVMInitializeTarget(LLVMPassRegistryRef R) {
+ initializeTarget(*unwrap(R));
+}
+
LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep) {
return wrap(new TargetData(StringRep));
}
diff --git a/contrib/llvm/lib/Target/TargetAsmInfo.cpp b/contrib/llvm/lib/Target/TargetAsmInfo.cpp
new file mode 100644
index 0000000..6fa5420
--- /dev/null
+++ b/contrib/llvm/lib/Target/TargetAsmInfo.cpp
@@ -0,0 +1,27 @@
+//===-- llvm/Target/TargetAsmInfo.cpp - Target Assembly Info --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Target/TargetAsmInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+using namespace llvm;
+
+TargetAsmInfo::TargetAsmInfo(const TargetMachine &TM) {
+ TLOF = &TM.getTargetLowering()->getObjFileLowering();
+ const TargetData &TD = *TM.getTargetData();
+ IsLittleEndian = TD.isLittleEndian();
+ PointerSize = TD.getPointerSize();
+ const TargetFrameLowering &TFI = *TM.getFrameLowering();
+ StackDir = TFI.getStackGrowthDirection();
+ TRI = TM.getRegisterInfo();
+ TFI.getInitialFrameState(InitialFrameState);
+}
diff --git a/contrib/llvm/lib/Target/TargetData.cpp b/contrib/llvm/lib/Target/TargetData.cpp
index f35c96d..c628df0 100644
--- a/contrib/llvm/lib/Target/TargetData.cpp
+++ b/contrib/llvm/lib/Target/TargetData.cpp
@@ -25,7 +25,7 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include "llvm/ADT/DenseMap.h"
#include <algorithm>
#include <cstdlib>
@@ -34,7 +34,7 @@ using namespace llvm;
// Handle the Pass registration stuff necessary to use TargetData's.
// Register the default SparcV9 implementation...
-INITIALIZE_PASS(TargetData, "targetdata", "Target Data Layout", false, true);
+INITIALIZE_PASS(TargetData, "targetdata", "Target Data Layout", false, true)
char TargetData::ID = 0;
//===----------------------------------------------------------------------===//
@@ -83,7 +83,7 @@ unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
assert((SI == &MemberOffsets[0] || *(SI-1) <= Offset) &&
(SI+1 == &MemberOffsets[NumElements] || *(SI+1) > Offset) &&
"Upper bound didn't work!");
-
+
// Multiple fields can have the same offset if any of them are zero sized.
// For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop
// at the i32 element, because it is the last element at that offset. This is
@@ -131,6 +131,8 @@ static unsigned getInt(StringRef R) {
}
void TargetData::init(StringRef Desc) {
+ initializeTargetDataPass(*PassRegistry::getPassRegistry());
+
LayoutMap = 0;
LittleEndian = false;
PointerMemSize = 8;
@@ -153,16 +155,16 @@ void TargetData::init(StringRef Desc) {
std::pair<StringRef, StringRef> Split = Desc.split('-');
StringRef Token = Split.first;
Desc = Split.second;
-
+
if (Token.empty())
continue;
-
+
Split = Token.split(':');
StringRef Specifier = Split.first;
Token = Split.second;
-
+
assert(!Specifier.empty() && "Can't be empty here");
-
+
switch (Specifier[0]) {
case 'E':
LittleEndian = false;
@@ -197,7 +199,7 @@ void TargetData::init(StringRef Desc) {
unsigned Size = getInt(Specifier.substr(1));
Split = Token.split(':');
unsigned ABIAlign = getInt(Split.first) / 8;
-
+
Split = Split.second.split(':');
unsigned PrefAlign = getInt(Split.first) / 8;
if (PrefAlign == 0)
@@ -215,7 +217,7 @@ void TargetData::init(StringRef Desc) {
Token = Split.second;
} while (!Specifier.empty() || !Token.empty());
break;
-
+
default:
break;
}
@@ -231,7 +233,7 @@ TargetData::TargetData() : ImmutablePass(ID) {
"Tool did not specify a TargetData to use?");
}
-TargetData::TargetData(const Module *M)
+TargetData::TargetData(const Module *M)
: ImmutablePass(ID) {
init(M->getDataLayout());
}
@@ -249,14 +251,14 @@ TargetData::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
return;
}
}
-
+
Alignments.push_back(TargetAlignElem::get(align_type, abi_align,
pref_align, bit_width));
}
-/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or
+/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or
/// preferred if ABIInfo = false) the target wants for the specified datatype.
-unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
+unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
uint32_t BitWidth, bool ABIInfo,
const Type *Ty) const {
// Check to see if we have an exact match and remember the best match we see.
@@ -266,18 +268,18 @@ unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
if (Alignments[i].AlignType == AlignType &&
Alignments[i].TypeBitWidth == BitWidth)
return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign;
-
+
// The best match so far depends on what we're looking for.
- if (AlignType == INTEGER_ALIGN &&
+ if (AlignType == INTEGER_ALIGN &&
Alignments[i].AlignType == INTEGER_ALIGN) {
// The "best match" for integers is the smallest size that is larger than
// the BitWidth requested.
- if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 ||
+ if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 ||
Alignments[i].TypeBitWidth < Alignments[BestMatchIdx].TypeBitWidth))
BestMatchIdx = i;
// However, if there isn't one that's larger, then we must use the
// largest one we have (see below)
- if (LargestInt == -1 ||
+ if (LargestInt == -1 ||
Alignments[i].TypeBitWidth > Alignments[LargestInt].TypeBitWidth)
LargestInt = i;
}
@@ -322,8 +324,8 @@ class StructLayoutMap : public AbstractTypeUser {
I->first->removeAbstractTypeUser(this);
LayoutInfo.erase(I);
}
-
-
+
+
/// refineAbstractType - The callback method invoked when an abstract type is
/// resolved to another type. An object must override this method to update
/// its internal state to reference NewType instead of OldType.
@@ -385,21 +387,21 @@ TargetData::~TargetData() {
const StructLayout *TargetData::getStructLayout(const StructType *Ty) const {
if (!LayoutMap)
LayoutMap = new StructLayoutMap();
-
+
StructLayoutMap *STM = static_cast<StructLayoutMap*>(LayoutMap);
StructLayout *&SL = (*STM)[Ty];
if (SL) return SL;
- // Otherwise, create the struct layout. Because it is variable length, we
+ // Otherwise, create the struct layout. Because it is variable length, we
// malloc it, then use placement new.
int NumElts = Ty->getNumElements();
StructLayout *L =
(StructLayout *)malloc(sizeof(StructLayout)+(NumElts-1) * sizeof(uint64_t));
-
+
// Set SL before calling StructLayout's ctor. The ctor could cause other
// entries to be added to TheMap, invalidating our reference.
SL = L;
-
+
new (L) StructLayout(Ty, *this);
if (Ty->isAbstract())
@@ -414,14 +416,14 @@ const StructLayout *TargetData::getStructLayout(const StructType *Ty) const {
/// avoid a dangling pointer in this cache.
void TargetData::InvalidateStructLayoutInfo(const StructType *Ty) const {
if (!LayoutMap) return; // No cache.
-
+
static_cast<StructLayoutMap*>(LayoutMap)->InvalidateEntry(Ty);
}
std::string TargetData::getStringRepresentation() const {
std::string Result;
raw_string_ostream OS(Result);
-
+
OS << (LittleEndian ? "e" : "E")
<< "-p:" << PointerMemSize*8 << ':' << PointerABIAlign*8
<< ':' << PointerPrefAlign*8;
@@ -430,10 +432,10 @@ std::string TargetData::getStringRepresentation() const {
OS << '-' << (char)AI.AlignType << AI.TypeBitWidth << ':'
<< AI.ABIAlign*8 << ':' << AI.PrefAlign*8;
}
-
+
if (!LegalIntWidths.empty()) {
OS << "-n" << (unsigned)LegalIntWidths[0];
-
+
for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i)
OS << ':' << (unsigned)LegalIntWidths[i];
}
@@ -461,6 +463,7 @@ uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const {
case Type::FloatTyID:
return 32;
case Type::DoubleTyID:
+ case Type::X86_MMXTyID:
return 64;
case Type::PPC_FP128TyID:
case Type::FP128TyID:
@@ -523,6 +526,7 @@ unsigned TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const {
case Type::X86_FP80TyID:
AlignType = FLOAT_ALIGN;
break;
+ case Type::X86_MMXTyID:
case Type::VectorTyID:
AlignType = VECTOR_ALIGN;
break;
diff --git a/contrib/llvm/lib/Target/TargetELFWriterInfo.cpp b/contrib/llvm/lib/Target/TargetELFWriterInfo.cpp
index 3631b35..a661ee9 100644
--- a/contrib/llvm/lib/Target/TargetELFWriterInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetELFWriterInfo.cpp
@@ -17,9 +17,8 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
-TargetELFWriterInfo::TargetELFWriterInfo(TargetMachine &tm) : TM(tm) {
- is64Bit = TM.getTargetData()->getPointerSizeInBits() == 64;
- isLittleEndian = TM.getTargetData()->isLittleEndian();
+TargetELFWriterInfo::TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_) :
+ is64Bit(is64Bit_), isLittleEndian(isLittleEndian_) {
}
TargetELFWriterInfo::~TargetELFWriterInfo() {}
diff --git a/contrib/llvm/lib/Target/TargetFrameLowering.cpp b/contrib/llvm/lib/Target/TargetFrameLowering.cpp
new file mode 100644
index 0000000..19fd581
--- /dev/null
+++ b/contrib/llvm/lib/Target/TargetFrameLowering.cpp
@@ -0,0 +1,53 @@
+//===----- TargetFrameLowering.cpp - Implement target frame interface ------==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements the layout of a stack frame on the target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+#include <cstdlib>
+using namespace llvm;
+
+TargetFrameLowering::~TargetFrameLowering() {
+}
+
+/// getInitialFrameState - Returns a list of machine moves that are assumed
+/// on entry to a function.
+void
+TargetFrameLowering::getInitialFrameState(std::vector<MachineMove> &Moves)
+ const {
+ // Default is to do nothing.
+}
+
+/// getFrameIndexOffset - Returns the displacement from the frame register to
+/// the stack frame of the specified index. This is the default implementation
+/// which is overridden for some targets.
+int TargetFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ return MFI->getObjectOffset(FI) + MFI->getStackSize() -
+ getOffsetOfLocalArea() + MFI->getOffsetAdjustment();
+}
+
+int TargetFrameLowering::getFrameIndexReference(const MachineFunction &MF,
+ int FI, unsigned &FrameReg) const {
+ const TargetRegisterInfo *RI = MF.getTarget().getRegisterInfo();
+
+ // By default, assume all frame indices are referenced via whatever
+ // getFrameRegister() says. The target can override this if it's doing
+ // something different.
+ FrameReg = RI->getFrameRegister(MF);
+ return getFrameIndexOffset(MF, FI);
+}
diff --git a/contrib/llvm/lib/Target/TargetInstrInfo.cpp b/contrib/llvm/lib/Target/TargetInstrInfo.cpp
index c099a7e..97f3bf6 100644
--- a/contrib/llvm/lib/Target/TargetInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetInstrInfo.cpp
@@ -12,9 +12,12 @@
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Target/TargetInstrItineraries.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cctype>
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -47,9 +50,85 @@ TargetInstrInfo::TargetInstrInfo(const TargetInstrDesc* Desc,
TargetInstrInfo::~TargetInstrInfo() {
}
+unsigned
+TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const {
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ unsigned Class = MI->getDesc().getSchedClass();
+ unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
+ if (UOps)
+ return UOps;
+
+ // The # of u-ops is dynamically determined. The specific target should
+ // override this function to return the right number.
+ return 1;
+}
+
+int
+TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const {
+ if (!ItinData || ItinData->isEmpty())
+ return -1;
+
+ unsigned DefClass = DefMI->getDesc().getSchedClass();
+ unsigned UseClass = UseMI->getDesc().getSchedClass();
+ return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
+}
+
+int
+TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const {
+ if (!ItinData || ItinData->isEmpty())
+ return -1;
+
+ if (!DefNode->isMachineOpcode())
+ return -1;
+
+ unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
+ if (!UseNode->isMachineOpcode())
+ return ItinData->getOperandCycle(DefClass, DefIdx);
+ unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
+ return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
+}
+
+int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost) const {
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ return ItinData->getStageLatency(MI->getDesc().getSchedClass());
+}
+
+int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *N) const {
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ if (!N->isMachineOpcode())
+ return 1;
+
+ return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
+}
+
+bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI,
+ unsigned DefIdx) const {
+ if (!ItinData || ItinData->isEmpty())
+ return false;
+
+ unsigned DefClass = DefMI->getDesc().getSchedClass();
+ int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
+ return (DefCycle != -1 && DefCycle <= 1);
+}
+
/// insertNoop - Insert a noop into the instruction stream at the specified
/// point.
-void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
+void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
llvm_unreachable("Target didn't implement insertNoop!");
}
@@ -58,7 +137,7 @@ void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isTerminator()) return false;
-
+
// Conditional branch is a special case.
if (TID.isBranch() && !TID.isBarrier())
return true;
@@ -78,15 +157,15 @@ bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
/// may be overloaded in the target code to do that.
unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const {
-
-
+
+
// Count the number of instructions in the asm.
bool atInsnStart = true;
unsigned Length = 0;
for (; *Str; ++Str) {
if (*Str == '\n' || *Str == MAI.getSeparatorChar())
atInsnStart = true;
- if (atInsnStart && !isspace(*Str)) {
+ if (atInsnStart && !std::isspace(*Str)) {
Length += MAI.getMaxInstLength();
atInsnStart = false;
}
@@ -94,6 +173,6 @@ unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
strlen(MAI.getCommentString())) == 0)
atInsnStart = false;
}
-
+
return Length;
}
diff --git a/contrib/llvm/lib/Target/TargetLibraryInfo.cpp b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
new file mode 100644
index 0000000..c8bed18
--- /dev/null
+++ b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
@@ -0,0 +1,55 @@
+//===-- TargetLibraryInfo.cpp - Runtime library information ----------------==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TargetLibraryInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/ADT/Triple.h"
+using namespace llvm;
+
+// Register the default implementation.
+INITIALIZE_PASS(TargetLibraryInfo, "targetlibinfo",
+ "Target Library Information", false, true)
+char TargetLibraryInfo::ID = 0;
+
+/// initialize - Initialize the set of available library functions based on the
+/// specified target triple. This should be carefully written so that a missing
+/// target triple gets a sane set of defaults.
+static void initialize(TargetLibraryInfo &TLI, const Triple &T) {
+ initializeTargetLibraryInfoPass(*PassRegistry::getPassRegistry());
+
+
+ // memset_pattern16 is only available on iOS 3.0 and Mac OS/X 10.5 and later.
+ if (T.getOS() != Triple::Darwin || T.getDarwinMajorNumber() < 9)
+ TLI.setUnavailable(LibFunc::memset_pattern16);
+
+}
+
+
+TargetLibraryInfo::TargetLibraryInfo() : ImmutablePass(ID) {
+ // Default to everything being available.
+ memset(AvailableArray, -1, sizeof(AvailableArray));
+
+ initialize(*this, Triple());
+}
+
+TargetLibraryInfo::TargetLibraryInfo(const Triple &T) : ImmutablePass(ID) {
+ // Default to everything being available.
+ memset(AvailableArray, -1, sizeof(AvailableArray));
+
+ initialize(*this, T);
+}
+
+/// disableAllFunctions - This disables all builtins, which is used for options
+/// like -fno-builtin.
+void TargetLibraryInfo::disableAllFunctions() {
+ memset(AvailableArray, 0, sizeof(AvailableArray));
+}
diff --git a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
index dd7b532..5d34c7d 100644
--- a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
+++ b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
@@ -43,8 +43,8 @@ TargetLoweringObjectFile::TargetLoweringObjectFile() : Ctx(0) {
StaticCtorSection = 0;
StaticDtorSection = 0;
LSDASection = 0;
- EHFrameSection = 0;
+ CommDirectiveSupportsAlignment = true;
DwarfAbbrevSection = 0;
DwarfInfoSection = 0;
DwarfLineSection = 0;
@@ -168,6 +168,12 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
switch (C->getRelocationInfo()) {
default: assert(0 && "unknown relocation info kind");
case Constant::NoRelocation:
+ // If the global is required to have a unique address, it can't be put
+ // into a mergable section: just drop it into the general read-only
+ // section instead.
+ if (!GVar->hasUnnamedAddr())
+ return SectionKind::getReadOnly();
+
// If initializer is a null-terminated string, put it in a "cstring"
// section of the right width.
if (const ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
diff --git a/contrib/llvm/lib/Target/TargetMachine.cpp b/contrib/llvm/lib/Target/TargetMachine.cpp
index 705b1c0..d579d95 100644
--- a/contrib/llvm/lib/Target/TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/TargetMachine.cpp
@@ -219,7 +219,9 @@ FunctionSections("ffunction-sections",
TargetMachine::TargetMachine(const Target &T)
: TheTarget(T), AsmInfo(0),
- MCRelaxAll(false) {
+ MCRelaxAll(false),
+ MCNoExecStack(false),
+ MCUseLoc(true) {
// Typically it will be subtargets that will adjust FloatABIType from Default
// to Soft or Hard.
if (UseSoftFloat)
diff --git a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
index 55f222c..4811ba5 100644
--- a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
@@ -13,10 +13,10 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -30,7 +30,7 @@ TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR,
AliasesHash(aliases), AliasesHashSize(aliasessize),
Desc(D), SubRegIndexNames(subregindexnames), NumRegs(NR),
RegClassBegin(RCB), RegClassEnd(RCE) {
- assert(NumRegs < FirstVirtualRegister &&
+ assert(isPhysicalRegister(NumRegs) &&
"Target has too many physical registers!");
CallFrameSetupOpcode = CFSO;
@@ -39,6 +39,25 @@ TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR,
TargetRegisterInfo::~TargetRegisterInfo() {}
+void PrintReg::print(raw_ostream &OS) const {
+ if (!Reg)
+ OS << "%noreg";
+ else if (TargetRegisterInfo::isStackSlot(Reg))
+ OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg);
+ else if (TargetRegisterInfo::isVirtualRegister(Reg))
+ OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Reg);
+ else if (TRI && Reg < TRI->getNumRegs())
+ OS << '%' << TRI->getName(Reg);
+ else
+ OS << "%physreg" << Reg;
+ if (SubIdx) {
+ if (TRI)
+ OS << ':' << TRI->getSubRegIndexName(SubIdx);
+ else
+ OS << ":sub(" << SubIdx << ')';
+ }
+}
+
/// getMinimalPhysRegClass - Returns the Register Class of a physical
/// register of the given type, picking the most sub register class of
/// the right type that contains this physreg.
@@ -82,29 +101,11 @@ BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
// Mask out the reserved registers
BitVector Reserved = getReservedRegs(MF);
- Allocatable ^= Reserved & Allocatable;
+ Allocatable &= Reserved.flip();
return Allocatable;
}
-/// getFrameIndexOffset - Returns the displacement from the frame register to
-/// the stack frame of the specified index. This is the default implementation
-/// which is overridden for some targets.
-int TargetRegisterInfo::getFrameIndexOffset(const MachineFunction &MF,
- int FI) const {
- const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return MFI->getObjectOffset(FI) + MFI->getStackSize() -
- TFI.getOffsetOfLocalArea() + MFI->getOffsetAdjustment();
-}
-
-/// getInitialFrameState - Returns a list of machine moves that are assumed
-/// on entry to a function.
-void
-TargetRegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const{
- // Default is to do nothing.
-}
-
const TargetRegisterClass *
llvm::getCommonSubClass(const TargetRegisterClass *A,
const TargetRegisterClass *B) {
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
index 26797ab..ec73087 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
@@ -65,9 +65,10 @@ public:
}
};
-}
+} // end anonymous namespace
-static unsigned MatchRegisterName(StringRef Name);
+#define GET_REGISTER_MATCHER
+#include "X86GenAsmMatcher.inc"
AsmToken X86AsmLexer::LexTokenATT() {
AsmToken lexedToken = lexDefinite();
@@ -162,7 +163,3 @@ extern "C" void LLVMInitializeX86AsmLexer() {
RegisterAsmLexer<X86AsmLexer> X(TheX86_32Target);
RegisterAsmLexer<X86AsmLexer> Y(TheX86_64Target);
}
-
-#define REGISTERS_ONLY
-#include "X86GenAsmMatcher.inc"
-#undef REGISTERS_ONLY
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index f8588d8..1cac07a 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -10,20 +10,21 @@
#include "llvm/Target/TargetAsmParser.h"
#include "X86.h"
#include "X86Subtarget.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Twine.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Target/TargetAsmParser.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/Target/TargetAsmParser.h"
using namespace llvm;
namespace {
@@ -43,35 +44,32 @@ private:
bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
- bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
-
X86Operand *ParseOperand();
X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc);
bool ParseDirectiveWord(unsigned Size, SMLoc L);
- bool MatchInstruction(SMLoc IDLoc,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCInst &Inst);
+ bool MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out);
/// @name Auto-generated Matcher Functions
/// {
- unsigned ComputeAvailableFeatures(const X86Subtarget *Subtarget) const;
-
- bool MatchInstructionImpl(
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands, MCInst &Inst);
+#define GET_ASSEMBLER_HEADER
+#include "X86GenAsmMatcher.inc"
/// }
public:
- X86ATTAsmParser(const Target &T, MCAsmParser &_Parser, TargetMachine &TM)
- : TargetAsmParser(T), Parser(_Parser), TM(TM) {
+ X86ATTAsmParser(const Target &T, MCAsmParser &parser, TargetMachine &TM)
+ : TargetAsmParser(T), Parser(parser), TM(TM) {
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(
&TM.getSubtarget<X86Subtarget>()));
}
+ virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
@@ -81,16 +79,16 @@ public:
class X86_32ATTAsmParser : public X86ATTAsmParser {
public:
- X86_32ATTAsmParser(const Target &T, MCAsmParser &_Parser, TargetMachine &TM)
- : X86ATTAsmParser(T, _Parser, TM) {
+ X86_32ATTAsmParser(const Target &T, MCAsmParser &Parser, TargetMachine &TM)
+ : X86ATTAsmParser(T, Parser, TM) {
Is64Bit = false;
}
};
class X86_64ATTAsmParser : public X86ATTAsmParser {
public:
- X86_64ATTAsmParser(const Target &T, MCAsmParser &_Parser, TargetMachine &TM)
- : X86ATTAsmParser(T, _Parser, TM) {
+ X86_64ATTAsmParser(const Target &T, MCAsmParser &Parser, TargetMachine &TM)
+ : X86ATTAsmParser(T, Parser, TM) {
Is64Bit = true;
}
};
@@ -375,14 +373,18 @@ bool X86ATTAsmParser::ParseRegister(unsigned &RegNo,
// validation later, so maybe there is no need for this here.
RegNo = MatchRegisterName(Tok.getString());
+ // If the match failed, try the register name as lowercase.
+ if (RegNo == 0)
+ RegNo = MatchRegisterName(LowercaseString(Tok.getString()));
+
// FIXME: This should be done using Requires<In32BitMode> and
// Requires<In64BitMode> so "eiz" usage in 64-bit instructions
// can be also checked.
if (RegNo == X86::RIZ && !Is64Bit)
return Error(Tok.getLoc(), "riz register in 64-bit mode only");
- // Parse %st(1) and "%st" as "%st(0)"
- if (RegNo == 0 && Tok.getString() == "st") {
+ // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
+ if (RegNo == 0 && (Tok.getString() == "st" || Tok.getString() == "ST")) {
RegNo = X86::ST0;
EndLoc = Tok.getLoc();
Parser.Lex(); // Eat 'st'
@@ -617,88 +619,13 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
bool X86ATTAsmParser::
ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // The various flavors of pushf and popf use Requires<In32BitMode> and
- // Requires<In64BitMode>, but the assembler doesn't yet implement that.
- // For now, just do a manual check to prevent silent misencoding.
- if (Is64Bit) {
- if (Name == "popfl")
- return Error(NameLoc, "popfl cannot be encoded in 64-bit mode");
- else if (Name == "pushfl")
- return Error(NameLoc, "pushfl cannot be encoded in 64-bit mode");
- else if (Name == "pusha")
- return Error(NameLoc, "pusha cannot be encoded in 64-bit mode");
- } else {
- if (Name == "popfq")
- return Error(NameLoc, "popfq cannot be encoded in 32-bit mode");
- else if (Name == "pushfq")
- return Error(NameLoc, "pushfq cannot be encoded in 32-bit mode");
- }
-
- // The "Jump if rCX Zero" form jcxz is not allowed in 64-bit mode and
- // the form jrcxz is not allowed in 32-bit mode.
- if (Is64Bit) {
- if (Name == "jcxz")
- return Error(NameLoc, "jcxz cannot be encoded in 64-bit mode");
- } else {
- if (Name == "jrcxz")
- return Error(NameLoc, "jrcxz cannot be encoded in 32-bit mode");
- }
-
- // FIXME: Hack to recognize "sal..." and "rep..." for now. We need a way to
- // represent alternative syntaxes in the .td file, without requiring
- // instruction duplication.
- StringRef PatchedName = StringSwitch<StringRef>(Name)
- .Case("sal", "shl")
- .Case("salb", "shlb")
- .Case("sall", "shll")
- .Case("salq", "shlq")
- .Case("salw", "shlw")
- .Case("repe", "rep")
- .Case("repz", "rep")
- .Case("repnz", "repne")
- .Case("pushf", Is64Bit ? "pushfq" : "pushfl")
- .Case("popf", Is64Bit ? "popfq" : "popfl")
- .Case("retl", Is64Bit ? "retl" : "ret")
- .Case("retq", Is64Bit ? "ret" : "retq")
- .Case("setz", "sete")
- .Case("setnz", "setne")
- .Case("jz", "je")
- .Case("jnz", "jne")
- .Case("jc", "jb")
- // FIXME: in 32-bit mode jcxz requires an AdSize prefix. In 64-bit mode
- // jecxz requires an AdSize prefix but jecxz does not have a prefix in
- // 32-bit mode.
- .Case("jecxz", "jcxz")
- .Case("jrcxz", "jcxz")
- .Case("jna", "jbe")
- .Case("jnae", "jb")
- .Case("jnb", "jae")
- .Case("jnbe", "ja")
- .Case("jnc", "jae")
- .Case("jng", "jle")
- .Case("jnge", "jl")
- .Case("jnl", "jge")
- .Case("jnle", "jg")
- .Case("jpe", "jp")
- .Case("jpo", "jnp")
- .Case("cmovcl", "cmovbl")
- .Case("cmovcl", "cmovbl")
- .Case("cmovnal", "cmovbel")
- .Case("cmovnbl", "cmovael")
- .Case("cmovnbel", "cmoval")
- .Case("cmovncl", "cmovael")
- .Case("cmovngl", "cmovlel")
- .Case("cmovnl", "cmovgel")
- .Case("cmovngl", "cmovlel")
- .Case("cmovngel", "cmovll")
- .Case("cmovnll", "cmovgel")
- .Case("cmovnlel", "cmovgl")
- .Case("cmovnzl", "cmovnel")
- .Case("cmovzl", "cmovel")
- .Case("fwait", "wait")
- .Case("movzx", "movzb")
- .Default(Name);
+ StringRef PatchedName = Name;
+ // FIXME: Hack to recognize setneb as setne.
+ if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
+ PatchedName != "setb" && PatchedName != "setnb")
+ PatchedName = PatchedName.substr(0, Name.size()-1);
+
// FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
const MCExpr *ExtraImmOp = 0;
if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
@@ -773,12 +700,26 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
PatchedName = "vpclmulqdq";
}
}
+
Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
if (ExtraImmOp)
Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
+
+ // Determine whether this is an instruction prefix.
+ bool isPrefix =
+ Name == "lock" || Name == "rep" ||
+ Name == "repe" || Name == "repz" ||
+ Name == "repne" || Name == "repnz" ||
+ Name == "rex64" || Name == "data16";
+
+
+ // This does the actual operand parsing. Don't parse any more if we have a
+ // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
+ // just want to parse the "lock" as the first instruction and the "incl" as
+ // the next one.
+ if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) {
// Parse '*' modifier.
if (getLexer().is(AsmToken::Star)) {
@@ -790,8 +731,10 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
// Read the first operand.
if (X86Operand *Op = ParseOperand())
Operands.push_back(Op);
- else
+ else {
+ Parser.EatToEndOfStatement();
return true;
+ }
while (getLexer().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
@@ -799,23 +742,27 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
// Parse and remember the operand.
if (X86Operand *Op = ParseOperand())
Operands.push_back(Op);
- else
+ else {
+ Parser.EatToEndOfStatement();
return true;
+ }
}
- }
- // FIXME: Hack to handle recognizing s{hr,ar,hl}? $1.
- if ((Name.startswith("shr") || Name.startswith("sar") ||
- Name.startswith("shl")) &&
- Operands.size() == 3 &&
- static_cast<X86Operand*>(Operands[1])->isImm() &&
- isa<MCConstantExpr>(static_cast<X86Operand*>(Operands[1])->getImm()) &&
- cast<MCConstantExpr>(static_cast<X86Operand*>(Operands[1])->getImm())->getValue() == 1) {
- delete Operands[1];
- Operands.erase(Operands.begin() + 1);
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.EatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
}
- // FIXME: Hack to handle "out[bwl]? %al, (%dx)" -> "outb %al, %dx".
+ if (getLexer().is(AsmToken::EndOfStatement))
+ Parser.Lex(); // Consume the EndOfStatement
+ else if (isPrefix && getLexer().is(AsmToken::Slash))
+ Parser.Lex(); // Consume the prefix separator Slash
+
+ // This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
+ // "outb %al, %dx". Out doesn't take a memory form, but this is a widely
+ // documented form in various unofficial manuals, so a lot of code uses it.
if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") &&
Operands.size() == 3) {
X86Operand &Op = *(X86Operand*)Operands.back();
@@ -829,76 +776,80 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
}
}
- // FIXME: Hack to handle "f{mul*,add*,sub*,div*} $op, st(0)" the same as
- // "f{mul*,add*,sub*,div*} $op"
- if ((Name.startswith("fmul") || Name.startswith("fadd") ||
- Name.startswith("fsub") || Name.startswith("fdiv")) &&
- Operands.size() == 3 &&
- static_cast<X86Operand*>(Operands[2])->isReg() &&
- static_cast<X86Operand*>(Operands[2])->getReg() == X86::ST0) {
- delete Operands[2];
- Operands.erase(Operands.begin() + 2);
- }
-
- // FIXME: Hack to handle "imul <imm>, B" which is an alias for "imul <imm>, B,
- // B".
- if (Name.startswith("imul") && Operands.size() == 3 &&
- static_cast<X86Operand*>(Operands[1])->isImm() &&
- static_cast<X86Operand*>(Operands.back())->isReg()) {
- X86Operand *Op = static_cast<X86Operand*>(Operands.back());
- Operands.push_back(X86Operand::CreateReg(Op->getReg(), Op->getStartLoc(),
- Op->getEndLoc()));
- }
-
- return false;
-}
-
-bool X86ATTAsmParser::ParseDirective(AsmToken DirectiveID) {
- StringRef IDVal = DirectiveID.getIdentifier();
- if (IDVal == ".word")
- return ParseDirectiveWord(2, DirectiveID.getLoc());
- return true;
-}
-
-/// ParseDirectiveWord
-/// ::= .word [ expression (, expression)* ]
-bool X86ATTAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- for (;;) {
- const MCExpr *Value;
- if (getParser().ParseExpression(Value))
- return true;
-
- getParser().getStreamer().EmitValue(Value, Size, 0 /*addrspace*/);
-
- if (getLexer().is(AsmToken::EndOfStatement))
- break;
-
- // FIXME: Improve diagnostic.
- if (getLexer().isNot(AsmToken::Comma))
- return Error(L, "unexpected token in directive");
- Parser.Lex();
+ // FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
+ // "shift <op>".
+ if ((Name.startswith("shr") || Name.startswith("sar") ||
+ Name.startswith("shl") || Name.startswith("sal") ||
+ Name.startswith("rcl") || Name.startswith("rcr") ||
+ Name.startswith("rol") || Name.startswith("ror")) &&
+ Operands.size() == 3) {
+ X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
+ if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
+ cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
+ delete Operands[1];
+ Operands.erase(Operands.begin() + 1);
}
}
- Parser.Lex();
return false;
}
-
-bool
-X86ATTAsmParser::MatchInstruction(SMLoc IDLoc,
- const SmallVectorImpl<MCParsedAsmOperand*>
- &Operands,
- MCInst &Inst) {
+bool X86ATTAsmParser::
+MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out) {
assert(!Operands.empty() && "Unexpect empty operand list!");
-
X86Operand *Op = static_cast<X86Operand*>(Operands[0]);
assert(Op->isToken() && "Leading operand should always be a mnemonic!");
+ // First, handle aliases that expand to multiple instructions.
+ // FIXME: This should be replaced with a real .td file alias mechanism.
+ // Also, MatchInstructionImpl should do actually *do* the EmitInstruction
+ // call.
+ if (Op->getToken() == "fstsw" || Op->getToken() == "fstcw" ||
+ Op->getToken() == "fstsww" || Op->getToken() == "fstcww" ||
+ Op->getToken() == "finit" || Op->getToken() == "fsave" ||
+ Op->getToken() == "fstenv" || Op->getToken() == "fclex") {
+ MCInst Inst;
+ Inst.setOpcode(X86::WAIT);
+ Out.EmitInstruction(Inst);
+
+ const char *Repl =
+ StringSwitch<const char*>(Op->getToken())
+ .Case("finit", "fninit")
+ .Case("fsave", "fnsave")
+ .Case("fstcw", "fnstcw")
+ .Case("fstcww", "fnstcw")
+ .Case("fstenv", "fnstenv")
+ .Case("fstsw", "fnstsw")
+ .Case("fstsww", "fnstsw")
+ .Case("fclex", "fnclex")
+ .Default(0);
+ assert(Repl && "Unknown wait-prefixed instruction");
+ delete Operands[0];
+ Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
+ }
+
+ bool WasOriginallyInvalidOperand = false;
+ unsigned OrigErrorInfo;
+ MCInst Inst;
+
// First, try a direct match.
- if (!MatchInstructionImpl(Operands, Inst))
+ switch (MatchInstructionImpl(Operands, Inst, OrigErrorInfo)) {
+ case Match_Success:
+ Out.EmitInstruction(Inst);
return false;
+ case Match_MissingFeature:
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ return true;
+ case Match_ConversionFail:
+ return Error(IDLoc, "unable to convert operands to instruction");
+ case Match_InvalidOperand:
+ WasOriginallyInvalidOperand = true;
+ break;
+ case Match_MnemonicFail:
+ break;
+ }
// FIXME: Ideally, we would only attempt suffix matches for things which are
// valid prefixes, and we could just infer the right unambiguous
@@ -912,15 +863,26 @@ X86ATTAsmParser::MatchInstruction(SMLoc IDLoc,
Tmp += ' ';
Op->setTokenValue(Tmp.str());
+ // If this instruction starts with an 'f', then it is a floating point stack
+ // instruction. These come in up to three forms for 32-bit, 64-bit, and
+ // 80-bit floating point, which use the suffixes s,l,t respectively.
+ //
+ // Otherwise, we assume that this may be an integer instruction, which comes
+ // in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
+ const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
+
// Check for the various suffix matches.
- Tmp[Base.size()] = 'b';
- bool MatchB = MatchInstructionImpl(Operands, Inst);
- Tmp[Base.size()] = 'w';
- bool MatchW = MatchInstructionImpl(Operands, Inst);
- Tmp[Base.size()] = 'l';
- bool MatchL = MatchInstructionImpl(Operands, Inst);
- Tmp[Base.size()] = 'q';
- bool MatchQ = MatchInstructionImpl(Operands, Inst);
+ Tmp[Base.size()] = Suffixes[0];
+ unsigned ErrorInfoIgnore;
+ MatchResultTy Match1, Match2, Match3, Match4;
+
+ Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
+ Tmp[Base.size()] = Suffixes[1];
+ Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
+ Tmp[Base.size()] = Suffixes[2];
+ Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
+ Tmp[Base.size()] = Suffixes[3];
+ Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore);
// Restore the old token.
Op->setTokenValue(Base);
@@ -928,24 +890,25 @@ X86ATTAsmParser::MatchInstruction(SMLoc IDLoc,
// If exactly one matched, then we treat that as a successful match (and the
// instruction will already have been filled in correctly, since the failing
// matches won't have modified it).
- if (MatchB + MatchW + MatchL + MatchQ == 3)
+ unsigned NumSuccessfulMatches =
+ (Match1 == Match_Success) + (Match2 == Match_Success) +
+ (Match3 == Match_Success) + (Match4 == Match_Success);
+ if (NumSuccessfulMatches == 1) {
+ Out.EmitInstruction(Inst);
return false;
+ }
- // Otherwise, the match failed.
+ // Otherwise, the match failed, try to produce a decent error message.
// If we had multiple suffix matches, then identify this as an ambiguous
// match.
- if (MatchB + MatchW + MatchL + MatchQ != 4) {
+ if (NumSuccessfulMatches > 1) {
char MatchChars[4];
unsigned NumMatches = 0;
- if (!MatchB)
- MatchChars[NumMatches++] = 'b';
- if (!MatchW)
- MatchChars[NumMatches++] = 'w';
- if (!MatchL)
- MatchChars[NumMatches++] = 'l';
- if (!MatchQ)
- MatchChars[NumMatches++] = 'q';
+ if (Match1 == Match_Success) MatchChars[NumMatches++] = Suffixes[0];
+ if (Match2 == Match_Success) MatchChars[NumMatches++] = Suffixes[1];
+ if (Match3 == Match_Success) MatchChars[NumMatches++] = Suffixes[2];
+ if (Match4 == Match_Success) MatchChars[NumMatches++] = Suffixes[3];
SmallString<126> Msg;
raw_svector_ostream OS(Msg);
@@ -959,14 +922,90 @@ X86ATTAsmParser::MatchInstruction(SMLoc IDLoc,
}
OS << ")";
Error(IDLoc, OS.str());
- } else {
- // FIXME: We should give nicer diagnostics about the exact failure.
- Error(IDLoc, "unrecognized instruction");
+ return true;
}
+ // Okay, we know that none of the variants matched successfully.
+
+ // If all of the instructions reported an invalid mnemonic, then the original
+ // mnemonic was invalid.
+ if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) &&
+ (Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) {
+ if (!WasOriginallyInvalidOperand) {
+ Error(IDLoc, "invalid instruction mnemonic '" + Base + "'");
+ return true;
+ }
+
+ // Recover location info for the operand if we know which was the problem.
+ SMLoc ErrorLoc = IDLoc;
+ if (OrigErrorInfo != ~0U) {
+ if (OrigErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
+ ErrorLoc = ((X86Operand*)Operands[OrigErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ }
+
+ return Error(ErrorLoc, "invalid operand for instruction");
+ }
+
+ // If one instruction matched with a missing feature, report this as a
+ // missing feature.
+ if ((Match1 == Match_MissingFeature) + (Match2 == Match_MissingFeature) +
+ (Match3 == Match_MissingFeature) + (Match4 == Match_MissingFeature) == 1){
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ return true;
+ }
+
+ // If one instruction matched with an invalid operand, report this as an
+ // operand failure.
+ if ((Match1 == Match_InvalidOperand) + (Match2 == Match_InvalidOperand) +
+ (Match3 == Match_InvalidOperand) + (Match4 == Match_InvalidOperand) == 1){
+ Error(IDLoc, "invalid operand for instruction");
+ return true;
+ }
+
+ // If all of these were an outright failure, report it in a useless way.
+ // FIXME: We should give nicer diagnostics about the exact failure.
+ Error(IDLoc, "unknown use of instruction mnemonic without a size suffix");
+ return true;
+}
+
+
+bool X86ATTAsmParser::ParseDirective(AsmToken DirectiveID) {
+ StringRef IDVal = DirectiveID.getIdentifier();
+ if (IDVal == ".word")
+ return ParseDirectiveWord(2, DirectiveID.getLoc());
return true;
}
+/// ParseDirectiveWord
+/// ::= .word [ expression (, expression)* ]
+bool X86ATTAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ for (;;) {
+ const MCExpr *Value;
+ if (getParser().ParseExpression(Value))
+ return true;
+
+ getParser().getStreamer().EmitValue(Value, Size, 0 /*addrspace*/);
+
+ if (getLexer().is(AsmToken::EndOfStatement))
+ break;
+
+ // FIXME: Improve diagnostic.
+ if (getLexer().isNot(AsmToken::Comma))
+ return Error(L, "unexpected token in directive");
+ Parser.Lex();
+ }
+ }
+
+ Parser.Lex();
+ return false;
+}
+
+
+
extern "C" void LLVMInitializeX86AsmLexer();
@@ -977,4 +1016,6 @@ extern "C" void LLVMInitializeX86AsmParser() {
LLVMInitializeX86AsmLexer();
}
+#define GET_REGISTER_MATCHER
+#define GET_MATCHER_IMPLEMENTATION
#include "X86GenAsmMatcher.inc"
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 09f1584..691e2d7 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -157,9 +157,8 @@ static void translateRegister(MCInst &mcInst, Reg reg) {
/// @param immediate - The immediate value to append.
/// @param operand - The operand, as stored in the descriptor table.
/// @param insn - The internal instruction.
-static void translateImmediate(MCInst &mcInst,
- uint64_t immediate,
- OperandSpecifier &operand,
+static void translateImmediate(MCInst &mcInst, uint64_t immediate,
+ const OperandSpecifier &operand,
InternalInstruction &insn) {
// Sign-extend the immediate if necessary.
@@ -392,9 +391,8 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn) {
/// @param insn - The instruction to extract Mod, R/M, and SIB fields
/// from.
/// @return - 0 on success; nonzero otherwise
-static bool translateRM(MCInst &mcInst,
- OperandSpecifier &operand,
- InternalInstruction &insn) {
+static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
+ InternalInstruction &insn) {
switch (operand.type) {
default:
debug("Unexpected type for a R/M operand");
@@ -461,9 +459,8 @@ static bool translateFPRegister(MCInst &mcInst,
/// @param operand - The operand, as stored in the descriptor table.
/// @param insn - The internal instruction.
/// @return - false on success; true otherwise.
-static bool translateOperand(MCInst &mcInst,
- OperandSpecifier &operand,
- InternalInstruction &insn) {
+static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
+ InternalInstruction &insn) {
switch (operand.encoding) {
default:
debug("Unhandled operand encoding during translation");
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
index 9c54262..550cf9d 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
@@ -78,7 +78,7 @@
const char* name;
#define INSTRUCTION_IDS \
- InstrUID* instructionIDs;
+ const InstrUID *instructionIDs;
#include "X86DisassemblerDecoderCommon.h"
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
index 6c3ff6b..b6546fc 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
@@ -27,12 +27,6 @@
typedef int8_t bool;
-#ifdef __GNUC__
-#define NORETURN __attribute__((noreturn))
-#else
-#define NORETURN
-#endif
-
#ifndef NDEBUG
#define debug(s) do { x86DisassemblerDebug(__FILE__, __LINE__, s); } while (0)
#else
@@ -103,7 +97,7 @@ static InstrUID decode(OpcodeType type,
InstructionContext insnContext,
uint8_t opcode,
uint8_t modRM) {
- struct ModRMDecision* dec;
+ const struct ModRMDecision* dec;
switch (type) {
default:
@@ -147,7 +141,7 @@ static InstrUID decode(OpcodeType type,
* decode(); specifierForUID will not check bounds.
* @return - A pointer to the specification for that instruction.
*/
-static struct InstructionSpecifier* specifierForUID(InstrUID uid) {
+static const struct InstructionSpecifier *specifierForUID(InstrUID uid) {
return &INSTRUCTIONS_SYM[uid];
}
@@ -296,7 +290,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
BOOL isPrefix = TRUE;
BOOL prefixGroups[4] = { FALSE };
uint64_t prefixLocation;
- uint8_t byte;
+ uint8_t byte = 0;
BOOL hasAdSize = FALSE;
BOOL hasOpSize = FALSE;
@@ -394,6 +388,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
}
} else {
unconsumeByte(insn);
+ insn->necessaryPrefixLocation = insn->readerCursor - 1;
}
if (insn->mode == MODE_16BIT) {
@@ -405,7 +400,7 @@ static int readPrefixes(struct InternalInstruction* insn) {
insn->registerSize = (hasOpSize ? 2 : 4);
insn->addressSize = (hasAdSize ? 2 : 4);
insn->displacementSize = (hasAdSize ? 2 : 4);
- insn->immediateSize = (hasAdSize ? 2 : 4);
+ insn->immediateSize = (hasOpSize ? 2 : 4);
} else if (insn->mode == MODE_64BIT) {
if (insn->rexPrefix && wFromREX(insn->rexPrefix)) {
insn->registerSize = 8;
@@ -517,7 +512,8 @@ static int getIDWithAttrMask(uint16_t* instructionID,
insn->opcode);
if (hasModRMExtension) {
- readModRM(insn);
+ if (readModRM(insn))
+ return -1;
*instructionID = decode(insn->opcodeType,
instructionClass,
@@ -632,9 +628,9 @@ static int getID(struct InternalInstruction* insn) {
* instead of F2 changes a 32 to a 64, we adopt the new encoding.
*/
- struct InstructionSpecifier* spec;
+ const struct InstructionSpecifier *spec;
uint16_t instructionIDWithREXw;
- struct InstructionSpecifier* specWithREXw;
+ const struct InstructionSpecifier *specWithREXw;
spec = specifierForUID(instructionID);
@@ -672,9 +668,9 @@ static int getID(struct InternalInstruction* insn) {
* in the right place we check if there's a 16-bit operation.
*/
- struct InstructionSpecifier* spec;
+ const struct InstructionSpecifier *spec;
uint16_t instructionIDWithOpsize;
- struct InstructionSpecifier* specWithOpsize;
+ const struct InstructionSpecifier *specWithOpsize;
spec = specifierForUID(instructionID);
@@ -866,7 +862,8 @@ static int readModRM(struct InternalInstruction* insn) {
if (insn->consumedModRM)
return 0;
- consumeByte(insn, &insn->modRM);
+ if (consumeByte(insn, &insn->modRM))
+ return -1;
insn->consumedModRM = TRUE;
mod = modFromModRM(insn->modRM);
@@ -1067,7 +1064,7 @@ GENERIC_FIXUP_FUNC(fixupRMValue, insn->eaRegBase, EA_REG)
* invalid for its class.
*/
static int fixupReg(struct InternalInstruction *insn,
- struct OperandSpecifier *op) {
+ const struct OperandSpecifier *op) {
uint8_t valid;
dbgprintf(insn, "fixupReg()");
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index 28ba86b..4f4fbcd 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -24,7 +24,7 @@ extern "C" {
const char* name;
#define INSTRUCTION_IDS \
- InstrUID* instructionIDs;
+ const InstrUID *instructionIDs;
#include "X86DisassemblerDecoderCommon.h"
@@ -423,7 +423,7 @@ struct InternalInstruction {
/* The instruction ID, extracted from the decode table */
uint16_t instructionID;
/* The specifier for the instruction, from the instruction info table */
- struct InstructionSpecifier* spec;
+ const struct InstructionSpecifier *spec;
/* state for additional bytes, consumed during operand decode. Pattern:
consumed___ indicates that the byte was already consumed and does not
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
index 0f33f52..1425b86 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
@@ -22,7 +22,7 @@
#ifndef X86DISASSEMBLERDECODERCOMMON_H
#define X86DISASSEMBLERDECODERCOMMON_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#define INSTRUCTIONS_SYM x86DisassemblerInstrSpecifiers
#define CONTEXTS_SYM x86DisassemblerContexts
@@ -248,6 +248,7 @@ struct ContextDecision {
ENUM_ENTRY(TYPE_M64, "8-byte") \
ENUM_ENTRY(TYPE_LEA, "Effective address") \
ENUM_ENTRY(TYPE_M128, "16-byte (SSE/SSE2)") \
+ ENUM_ENTRY(TYPE_M256, "256-byte (AVX)") \
ENUM_ENTRY(TYPE_M1616, "2+2-byte segment+offset address") \
ENUM_ENTRY(TYPE_M1632, "2+4-byte") \
ENUM_ENTRY(TYPE_M1664, "2+8-byte") \
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/CMakeLists.txt b/contrib/llvm/lib/Target/X86/InstPrinter/CMakeLists.txt
new file mode 100644
index 0000000..033973e
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/CMakeLists.txt
@@ -0,0 +1,8 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMX86AsmPrinter
+ X86ATTInstPrinter.cpp
+ X86IntelInstPrinter.cpp
+ X86InstComments.cpp
+ )
+add_dependencies(LLVMX86AsmPrinter X86CodeGenTable_gen)
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/Makefile b/contrib/llvm/lib/Target/X86/InstPrinter/Makefile
new file mode 100644
index 0000000..c82aa33
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Target/X86/AsmPrinter/Makefile ------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMX86AsmPrinter
+
+# Hack: we need to include 'main' x86 target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index 554b96c..d6950f4 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -25,10 +25,8 @@
using namespace llvm;
// Include the auto-generated portion of the assembly writer.
-#define MachineInstr MCInst
#define GET_INSTRUCTION_NAME
#include "X86GenAsmWriter.inc"
-#undef MachineInstr
void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS) {
printInstruction(MI, OS);
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
index eb98664..eb98664 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86InstComments.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
index da9d5a3..12144e3 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86InstComments.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
@@ -16,7 +16,7 @@
#include "X86GenInstrNames.inc"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/raw_ostream.h"
-#include "../X86ShuffleDecode.h"
+#include "../Utils/X86ShuffleDecode.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86InstComments.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.h
index 6b86db4..6b86db4 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86InstComments.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.h
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
index 5625b0e..0484529 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
@@ -21,13 +21,12 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "X86GenInstrNames.inc"
+#include <cctype>
using namespace llvm;
// Include the auto-generated portion of the assembly writer.
-#define MachineInstr MCInst
#define GET_INSTRUCTION_NAME
#include "X86GenAsmWriter1.inc"
-#undef MachineInstr
void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS) {
printInstruction(MI, OS);
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
index 6f12032..6f12032 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
diff --git a/contrib/llvm/lib/Target/X86/Utils/CMakeLists.txt b/contrib/llvm/lib/Target/X86/Utils/CMakeLists.txt
new file mode 100644
index 0000000..3ad5f99
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/Utils/CMakeLists.txt
@@ -0,0 +1,6 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMX86Utils
+ X86ShuffleDecode.cpp
+ )
+add_dependencies(LLVMX86Utils X86CodeGenTable_gen)
diff --git a/contrib/llvm/lib/Target/X86/Utils/Makefile b/contrib/llvm/lib/Target/X86/Utils/Makefile
new file mode 100644
index 0000000..1df6f0f
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/Utils/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Target/X86/Utils/Makefile -----------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMX86Utils
+
+# Hack: we need to include 'main' x86 target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Target/X86/X86ShuffleDecode.h b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
index df04052..1287977 100644
--- a/contrib/llvm/lib/Target/X86/X86ShuffleDecode.h
+++ b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
@@ -12,21 +12,14 @@
//
//===----------------------------------------------------------------------===//
-#ifndef X86_SHUFFLE_DECODE_H
-#define X86_SHUFFLE_DECODE_H
-
-#include "llvm/ADT/SmallVector.h"
-using namespace llvm;
+#include "X86ShuffleDecode.h"
//===----------------------------------------------------------------------===//
// Vector Mask Decoding
//===----------------------------------------------------------------------===//
-enum {
- SM_SentinelZero = ~0U
-};
+namespace llvm {
-static inline
void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<unsigned> &ShuffleMask) {
// Defaults the copying the dest value.
ShuffleMask.push_back(0);
@@ -51,8 +44,8 @@ void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<unsigned> &ShuffleMask) {
}
// <3,1> or <6,7,2,3>
-static void DecodeMOVHLPSMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodeMOVHLPSMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
for (unsigned i = NElts/2; i != NElts; ++i)
ShuffleMask.push_back(NElts+i);
@@ -61,8 +54,8 @@ static void DecodeMOVHLPSMask(unsigned NElts,
}
// <0,2> or <0,1,4,5>
-static void DecodeMOVLHPSMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodeMOVLHPSMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
for (unsigned i = 0; i != NElts/2; ++i)
ShuffleMask.push_back(i);
@@ -70,16 +63,16 @@ static void DecodeMOVLHPSMask(unsigned NElts,
ShuffleMask.push_back(NElts+i);
}
-static void DecodePSHUFMask(unsigned NElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodePSHUFMask(unsigned NElts, unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
for (unsigned i = 0; i != NElts; ++i) {
ShuffleMask.push_back(Imm % NElts);
Imm /= NElts;
}
}
-static void DecodePSHUFHWMask(unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodePSHUFHWMask(unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
ShuffleMask.push_back(0);
ShuffleMask.push_back(1);
ShuffleMask.push_back(2);
@@ -90,8 +83,8 @@ static void DecodePSHUFHWMask(unsigned Imm,
}
}
-static void DecodePSHUFLWMask(unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodePSHUFLWMask(unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
for (unsigned i = 0; i != 4; ++i) {
ShuffleMask.push_back((Imm & 3));
Imm >>= 2;
@@ -102,24 +95,24 @@ static void DecodePSHUFLWMask(unsigned Imm,
ShuffleMask.push_back(7);
}
-static void DecodePUNPCKLMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodePUNPCKLMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
for (unsigned i = 0; i != NElts/2; ++i) {
ShuffleMask.push_back(i);
ShuffleMask.push_back(i+NElts);
}
}
-static void DecodePUNPCKHMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodePUNPCKHMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
for (unsigned i = 0; i != NElts/2; ++i) {
ShuffleMask.push_back(i+NElts/2);
ShuffleMask.push_back(i+NElts+NElts/2);
}
}
-static void DecodeSHUFPSMask(unsigned NElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodeSHUFPSMask(unsigned NElts, unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
// Part that reads from dest.
for (unsigned i = 0; i != NElts/2; ++i) {
ShuffleMask.push_back(Imm % NElts);
@@ -132,8 +125,8 @@ static void DecodeSHUFPSMask(unsigned NElts, unsigned Imm,
}
}
-static void DecodeUNPCKHPMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodeUNPCKHPMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
for (unsigned i = 0; i != NElts/2; ++i) {
ShuffleMask.push_back(i+NElts/2); // Reads from dest
ShuffleMask.push_back(i+NElts+NElts/2); // Reads from src
@@ -144,12 +137,12 @@ static void DecodeUNPCKHPMask(unsigned NElts,
/// DecodeUNPCKLPMask - This decodes the shuffle masks for unpcklps/unpcklpd
/// etc. NElts indicates the number of elements in the vector allowing it to
/// handle different datatypes and vector widths.
-static void DecodeUNPCKLPMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodeUNPCKLPMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
for (unsigned i = 0; i != NElts/2; ++i) {
ShuffleMask.push_back(i); // Reads from dest
ShuffleMask.push_back(i+NElts); // Reads from src
}
}
-#endif
+} // llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
new file mode 100644
index 0000000..50d9ccb
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
@@ -0,0 +1,69 @@
+//===-- X86ShuffleDecode.h - X86 shuffle decode logic -----------*-C++-*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Define several functions to decode x86 specific shuffle semantics into a
+// generic vector mask.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86_SHUFFLE_DECODE_H
+#define X86_SHUFFLE_DECODE_H
+
+#include "llvm/ADT/SmallVector.h"
+
+//===----------------------------------------------------------------------===//
+// Vector Mask Decoding
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+enum {
+ SM_SentinelZero = ~0U
+};
+
+void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<unsigned> &ShuffleMask);
+
+// <3,1> or <6,7,2,3>
+void DecodeMOVHLPSMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+// <0,2> or <0,1,4,5>
+void DecodeMOVLHPSMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+void DecodePSHUFMask(unsigned NElts, unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+void DecodePSHUFHWMask(unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+void DecodePSHUFLWMask(unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+void DecodePUNPCKLMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+void DecodePUNPCKHMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+void DecodeSHUFPSMask(unsigned NElts, unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+void DecodeUNPCKHPMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+
+/// DecodeUNPCKLPMask - This decodes the shuffle masks for unpcklps/unpcklpd
+/// etc. NElts indicates the number of elements in the vector allowing it to
+/// handle different datatypes and vector widths.
+void DecodeUNPCKLPMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask);
+
+} // llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/X86/X86.h b/contrib/llvm/lib/Target/X86/X86.h
index 27e8850..0ca4366 100644
--- a/contrib/llvm/lib/Target/X86/X86.h
+++ b/contrib/llvm/lib/Target/X86/X86.h
@@ -15,6 +15,7 @@
#ifndef TARGET_X86_H
#define TARGET_X86_H
+#include "llvm/Support/DataTypes.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
@@ -23,11 +24,13 @@ class FunctionPass;
class JITCodeEmitter;
class MCCodeEmitter;
class MCContext;
+class MCObjectWriter;
class MachineCodeEmitter;
class Target;
class TargetAsmBackend;
class X86TargetMachine;
class formatted_raw_ostream;
+class raw_ostream;
/// createX86ISelDag - This pass converts a legalized DAG into a
/// X86-specific DAG, ready for instruction scheduling.
@@ -74,6 +77,13 @@ FunctionPass *createEmitX86CodeToMemory();
///
FunctionPass *createX86MaxStackAlignmentHeuristicPass();
+
+/// createX86MachObjectWriter - Construct an X86 Mach-O object writer.
+MCObjectWriter *createX86MachObjectWriter(raw_ostream &OS,
+ bool Is64Bit,
+ uint32_t CPUType,
+ uint32_t CPUSubtype);
+
extern Target TheX86_32Target, TheX86_64Target;
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86.td b/contrib/llvm/lib/Target/X86/X86.td
index a19f1ac..efb6c8c 100644
--- a/contrib/llvm/lib/Target/X86/X86.td
+++ b/contrib/llvm/lib/Target/X86/X86.td
@@ -23,6 +23,9 @@ include "llvm/Target/Target.td"
def FeatureCMOV : SubtargetFeature<"cmov","HasCMov", "true",
"Enable conditional move instructions">;
+def FeaturePOPCNT : SubtargetFeature<"popcnt", "HasPOPCNT", "true",
+ "Support POPCNT instruction">;
+
def FeatureMMX : SubtargetFeature<"mmx","X86SSELevel", "MMX",
"Enable MMX instructions">;
@@ -45,7 +48,7 @@ def FeatureSSE41 : SubtargetFeature<"sse41", "X86SSELevel", "SSE41",
[FeatureSSSE3]>;
def FeatureSSE42 : SubtargetFeature<"sse42", "X86SSELevel", "SSE42",
"Enable SSE 4.2 instructions",
- [FeatureSSE41]>;
+ [FeatureSSE41, FeaturePOPCNT]>;
def Feature3DNow : SubtargetFeature<"3dnow", "X863DNowLevel", "ThreeDNow",
"Enable 3DNow! instructions">;
def Feature3DNowA : SubtargetFeature<"3dnowa", "X863DNowLevel", "ThreeDNowA",
@@ -63,7 +66,8 @@ def FeatureFastUAMem : SubtargetFeature<"fast-unaligned-mem",
"IsUAMemFast", "true",
"Fast unaligned memory access">;
def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true",
- "Support SSE 4a instructions">;
+ "Support SSE 4a instructions",
+ [FeaturePOPCNT]>;
def FeatureAVX : SubtargetFeature<"avx", "HasAVX", "true",
"Enable AVX instructions">;
@@ -112,11 +116,13 @@ def : Proc<"nehalem", [FeatureSSE42, Feature64Bit, FeatureSlowBTMem,
FeatureFastUAMem]>;
// Westmere is a similar machine to nehalem with some additional features.
// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
-def : Proc<"westmere", [FeatureSSE42, Feature64Bit, FeatureSlowBTMem,
- FeatureFastUAMem, FeatureAES]>;
-// Sandy Bridge does not have FMA
-// FIXME: Wikipedia says it does... it should have AES as well.
-def : Proc<"sandybridge", [FeatureSSE42, FeatureAVX, Feature64Bit]>;
+def : Proc<"westmere", [FeatureSSE42, Feature64Bit, FeatureSlowBTMem,
+ FeatureFastUAMem, FeatureAES, FeatureCLMUL]>;
+// SSE is not listed here since llvm treats AVX as a reimplementation of SSE,
+// rather than a superset.
+// FIXME: Disabling AVX for now since it's not ready.
+def : Proc<"sandybridge", [FeatureSSE42, Feature64Bit,
+ FeatureAES, FeatureCLMUL]>;
def : Proc<"k6", [FeatureMMX]>;
def : Proc<"k6-2", [FeatureMMX, Feature3DNow]>;
@@ -176,7 +182,7 @@ include "X86CallingConv.td"
//===----------------------------------------------------------------------===//
-// Assembly Printers
+// Assembly Parser
//===----------------------------------------------------------------------===//
// Currently the X86 assembly parser only supports ATT syntax.
@@ -191,15 +197,21 @@ def ATTAsmParser : AsmParser {
string RegisterPrefix = "%";
}
+//===----------------------------------------------------------------------===//
+// Assembly Printers
+//===----------------------------------------------------------------------===//
+
// The X86 target supports two different syntaxes for emitting machine code.
// This is controlled by the -x86-asm-syntax={att|intel}
def ATTAsmWriter : AsmWriter {
string AsmWriterClassName = "ATTInstPrinter";
int Variant = 0;
+ bit isMCAsmWriter = 1;
}
def IntelAsmWriter : AsmWriter {
string AsmWriterClassName = "IntelInstPrinter";
int Variant = 1;
+ bit isMCAsmWriter = 1;
}
def X86 : Target {
diff --git a/contrib/llvm/lib/Target/X86/X86AsmBackend.cpp b/contrib/llvm/lib/Target/X86/X86AsmBackend.cpp
index 69dc967..da5f5b1 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmBackend.cpp
+++ b/contrib/llvm/lib/Target/X86/X86AsmBackend.cpp
@@ -11,50 +11,83 @@
#include "X86.h"
#include "X86FixupKinds.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/MC/ELFObjectWriter.h"
#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCMachObjectWriter.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/MC/MachObjectWriter.h"
+#include "llvm/Object/MachOFormat.h"
+#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/Target/TargetAsmBackend.h"
using namespace llvm;
-
static unsigned getFixupKindLog2Size(unsigned Kind) {
switch (Kind) {
default: assert(0 && "invalid fixup kind!");
- case X86::reloc_pcrel_1byte:
+ case FK_PCRel_1:
case FK_Data_1: return 0;
- case X86::reloc_pcrel_2byte:
+ case FK_PCRel_2:
case FK_Data_2: return 1;
- case X86::reloc_pcrel_4byte:
+ case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_signed_4byte:
+ case X86::reloc_global_offset_table:
case FK_Data_4: return 2;
+ case FK_PCRel_8:
case FK_Data_8: return 3;
}
}
namespace {
+
+class X86ELFObjectWriter : public MCELFObjectTargetWriter {
+public:
+ X86ELFObjectWriter(bool is64Bit, Triple::OSType OSType, uint16_t EMachine,
+ bool HasRelocationAddend)
+ : MCELFObjectTargetWriter(is64Bit, OSType, EMachine, HasRelocationAddend) {}
+};
+
class X86AsmBackend : public TargetAsmBackend {
public:
X86AsmBackend(const Target &T)
- : TargetAsmBackend(T) {}
+ : TargetAsmBackend() {}
+
+ unsigned getNumFixupKinds() const {
+ return X86::NumTargetFixupKinds;
+ }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
+ { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
+ { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
+ { "reloc_signed_4byte", 0, 4 * 8, 0},
+ { "reloc_global_offset_table", 0, 4 * 8, 0}
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return TargetAsmBackend::getFixupKindInfo(Kind);
- void ApplyFixup(const MCFixup &Fixup, MCDataFragment &DF,
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+ }
+
+ void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const {
unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
- assert(Fixup.getOffset() + Size <= DF.getContents().size() &&
+ assert(Fixup.getOffset() + Size <= DataSize &&
"Invalid fixup offset!");
for (unsigned i = 0; i != Size; ++i)
- DF.getContents()[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
+ Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
}
bool MayNeedRelaxation(const MCInst &Inst) const;
@@ -63,9 +96,9 @@ public:
bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
};
-} // end anonymous namespace
+} // end anonymous namespace
-static unsigned getRelaxedOpcode(unsigned Op) {
+static unsigned getRelaxedOpcodeBranch(unsigned Op) {
switch (Op) {
default:
return Op;
@@ -90,16 +123,104 @@ static unsigned getRelaxedOpcode(unsigned Op) {
}
}
+static unsigned getRelaxedOpcodeArith(unsigned Op) {
+ switch (Op) {
+ default:
+ return Op;
+
+ // IMUL
+ case X86::IMUL16rri8: return X86::IMUL16rri;
+ case X86::IMUL16rmi8: return X86::IMUL16rmi;
+ case X86::IMUL32rri8: return X86::IMUL32rri;
+ case X86::IMUL32rmi8: return X86::IMUL32rmi;
+ case X86::IMUL64rri8: return X86::IMUL64rri32;
+ case X86::IMUL64rmi8: return X86::IMUL64rmi32;
+
+ // AND
+ case X86::AND16ri8: return X86::AND16ri;
+ case X86::AND16mi8: return X86::AND16mi;
+ case X86::AND32ri8: return X86::AND32ri;
+ case X86::AND32mi8: return X86::AND32mi;
+ case X86::AND64ri8: return X86::AND64ri32;
+ case X86::AND64mi8: return X86::AND64mi32;
+
+ // OR
+ case X86::OR16ri8: return X86::OR16ri;
+ case X86::OR16mi8: return X86::OR16mi;
+ case X86::OR32ri8: return X86::OR32ri;
+ case X86::OR32mi8: return X86::OR32mi;
+ case X86::OR64ri8: return X86::OR64ri32;
+ case X86::OR64mi8: return X86::OR64mi32;
+
+ // XOR
+ case X86::XOR16ri8: return X86::XOR16ri;
+ case X86::XOR16mi8: return X86::XOR16mi;
+ case X86::XOR32ri8: return X86::XOR32ri;
+ case X86::XOR32mi8: return X86::XOR32mi;
+ case X86::XOR64ri8: return X86::XOR64ri32;
+ case X86::XOR64mi8: return X86::XOR64mi32;
+
+ // ADD
+ case X86::ADD16ri8: return X86::ADD16ri;
+ case X86::ADD16mi8: return X86::ADD16mi;
+ case X86::ADD32ri8: return X86::ADD32ri;
+ case X86::ADD32mi8: return X86::ADD32mi;
+ case X86::ADD64ri8: return X86::ADD64ri32;
+ case X86::ADD64mi8: return X86::ADD64mi32;
+
+ // SUB
+ case X86::SUB16ri8: return X86::SUB16ri;
+ case X86::SUB16mi8: return X86::SUB16mi;
+ case X86::SUB32ri8: return X86::SUB32ri;
+ case X86::SUB32mi8: return X86::SUB32mi;
+ case X86::SUB64ri8: return X86::SUB64ri32;
+ case X86::SUB64mi8: return X86::SUB64mi32;
+
+ // CMP
+ case X86::CMP16ri8: return X86::CMP16ri;
+ case X86::CMP16mi8: return X86::CMP16mi;
+ case X86::CMP32ri8: return X86::CMP32ri;
+ case X86::CMP32mi8: return X86::CMP32mi;
+ case X86::CMP64ri8: return X86::CMP64ri32;
+ case X86::CMP64mi8: return X86::CMP64mi32;
+
+ // PUSH
+ case X86::PUSHi8: return X86::PUSHi32;
+ }
+}
+
+static unsigned getRelaxedOpcode(unsigned Op) {
+ unsigned R = getRelaxedOpcodeArith(Op);
+ if (R != Op)
+ return R;
+ return getRelaxedOpcodeBranch(Op);
+}
+
bool X86AsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
+ // Branches can always be relaxed.
+ if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
+ return true;
+
// Check if this instruction is ever relaxable.
- if (getRelaxedOpcode(Inst.getOpcode()) == Inst.getOpcode())
+ if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
return false;
- // If so, just assume it can be relaxed. Once we support relaxing more complex
- // instructions we should check that the instruction actually has symbolic
- // operands before doing this, but we need to be careful about things like
- // PCrel.
- return true;
+
+ // Check if it has an expression and is not RIP relative.
+ bool hasExp = false;
+ bool hasRIP = false;
+ for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
+ const MCOperand &Op = Inst.getOperand(i);
+ if (Op.isExpr())
+ hasExp = true;
+
+ if (Op.isReg() && Op.getReg() == X86::RIP)
+ hasRIP = true;
+ }
+
+ // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on
+ // how we do relaxations?
+ return hasExp && !hasRIP;
}
// FIXME: Can tblgen help at all here to verify there aren't other instructions
@@ -123,10 +244,8 @@ void X86AsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
/// WriteNopData - Write optimal nops to the output file for the \arg Count
/// bytes. This returns the number of bytes written. It may return 0 if
/// the \arg Count is more than the maximum optimal nops.
-///
-/// FIXME this is X86 32-bit specific and should move to a better place.
bool X86AsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
- static const uint8_t Nops[16][16] = {
+ static const uint8_t Nops[10][10] = {
// nop
{0x90},
// xchg %ax,%ax
@@ -147,32 +266,16 @@ bool X86AsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
{0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
// nopw %cs:0L(%[re]ax,%[re]ax,1)
{0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
- // nopl 0(%[re]ax,%[re]ax,1)
- // nopw 0(%[re]ax,%[re]ax,1)
- {0x0f, 0x1f, 0x44, 0x00, 0x00,
- 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
- // nopw 0(%[re]ax,%[re]ax,1)
- // nopw 0(%[re]ax,%[re]ax,1)
- {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
- 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
- // nopw 0(%[re]ax,%[re]ax,1)
- // nopl 0L(%[re]ax) */
- {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
- 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
- // nopl 0L(%[re]ax)
- // nopl 0L(%[re]ax)
- {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
- // nopl 0L(%[re]ax)
- // nopl 0L(%[re]ax,%[re]ax,1)
- {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}
};
// Write an optimal sequence for the first 15 bytes.
- uint64_t OptimalCount = (Count < 16) ? Count : 15;
- for (uint64_t i = 0, e = OptimalCount; i != e; i++)
- OW->Write8(Nops[OptimalCount - 1][i]);
+ const uint64_t OptimalCount = (Count < 16) ? Count : 15;
+ const uint64_t Prefixes = OptimalCount <= 10 ? 0 : OptimalCount - 10;
+ for (uint64_t i = 0, e = Prefixes; i != e; i++)
+ OW->Write8(0x66);
+ const uint64_t Rest = OptimalCount - Prefixes;
+ for (uint64_t i = 0, e = Rest; i != e; i++)
+ OW->Write8(Nops[Rest - 1][i]);
// Finish with single byte nops.
for (uint64_t i = OptimalCount, e = Count; i != e; ++i)
@@ -186,75 +289,60 @@ bool X86AsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
namespace {
class ELFX86AsmBackend : public X86AsmBackend {
public:
- ELFX86AsmBackend(const Target &T)
- : X86AsmBackend(T) {
- HasAbsolutizedSet = true;
- HasScatteredSymbols = true;
+ Triple::OSType OSType;
+ ELFX86AsmBackend(const Target &T, Triple::OSType _OSType)
+ : X86AsmBackend(T), OSType(_OSType) {
+ HasReliableSymbolDifference = true;
}
- bool isVirtualSection(const MCSection &Section) const {
- const MCSectionELF &SE = static_cast<const MCSectionELF&>(Section);
- return SE.getType() == MCSectionELF::SHT_NOBITS;;
+ virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
+ const MCSectionELF &ES = static_cast<const MCSectionELF&>(Section);
+ return ES.getFlags() & ELF::SHF_MERGE;
}
};
class ELFX86_32AsmBackend : public ELFX86AsmBackend {
public:
- ELFX86_32AsmBackend(const Target &T)
- : ELFX86AsmBackend(T) {}
+ ELFX86_32AsmBackend(const Target &T, Triple::OSType OSType)
+ : ELFX86AsmBackend(T, OSType) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return new ELFObjectWriter(OS, /*Is64Bit=*/false,
- /*IsLittleEndian=*/true,
- /*HasRelocationAddend=*/false);
+ return createELFObjectWriter(new X86ELFObjectWriter(false, OSType,
+ ELF::EM_386, false),
+ OS, /*IsLittleEndian*/ true);
}
};
class ELFX86_64AsmBackend : public ELFX86AsmBackend {
public:
- ELFX86_64AsmBackend(const Target &T)
- : ELFX86AsmBackend(T) {}
+ ELFX86_64AsmBackend(const Target &T, Triple::OSType OSType)
+ : ELFX86AsmBackend(T, OSType) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return new ELFObjectWriter(OS, /*Is64Bit=*/true,
- /*IsLittleEndian=*/true,
- /*HasRelocationAddend=*/true);
+ return createELFObjectWriter(new X86ELFObjectWriter(true, OSType,
+ ELF::EM_X86_64, true),
+ OS, /*IsLittleEndian*/ true);
}
};
class WindowsX86AsmBackend : public X86AsmBackend {
bool Is64Bit;
+
public:
WindowsX86AsmBackend(const Target &T, bool is64Bit)
: X86AsmBackend(T)
, Is64Bit(is64Bit) {
- HasScatteredSymbols = true;
}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
return createWinCOFFObjectWriter(OS, Is64Bit);
}
-
- bool isVirtualSection(const MCSection &Section) const {
- const MCSectionCOFF &SE = static_cast<const MCSectionCOFF&>(Section);
- return SE.getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
- }
};
class DarwinX86AsmBackend : public X86AsmBackend {
public:
DarwinX86AsmBackend(const Target &T)
- : X86AsmBackend(T) {
- HasAbsolutizedSet = true;
- HasScatteredSymbols = true;
- }
-
- bool isVirtualSection(const MCSection &Section) const {
- const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
- return (SMO.getType() == MCSectionMachO::S_ZEROFILL ||
- SMO.getType() == MCSectionMachO::S_GB_ZEROFILL ||
- SMO.getType() == MCSectionMachO::S_THREAD_LOCAL_ZEROFILL);
- }
+ : X86AsmBackend(T) { }
};
class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
@@ -263,7 +351,9 @@ public:
: DarwinX86AsmBackend(T) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return new MachObjectWriter(OS, /*Is64Bit=*/false);
+ return createX86MachObjectWriter(OS, /*Is64Bit=*/false,
+ object::mach::CTM_i386,
+ object::mach::CSX86_ALL);
}
};
@@ -275,7 +365,9 @@ public:
}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return new MachObjectWriter(OS, /*Is64Bit=*/true);
+ return createX86MachObjectWriter(OS, /*Is64Bit=*/true,
+ object::mach::CTM_x86_64,
+ object::mach::CSX86_ALL);
}
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
@@ -312,7 +404,7 @@ public:
}
};
-} // end anonymous namespace
+} // end anonymous namespace
TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
const std::string &TT) {
@@ -322,9 +414,12 @@ TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
case Triple::MinGW32:
case Triple::Cygwin:
case Triple::Win32:
- return new WindowsX86AsmBackend(T, false);
+ if (Triple(TT).getEnvironment() == Triple::MachO)
+ return new DarwinX86_32AsmBackend(T);
+ else
+ return new WindowsX86AsmBackend(T, false);
default:
- return new ELFX86_32AsmBackend(T);
+ return new ELFX86_32AsmBackend(T, Triple(TT).getOS());
}
}
@@ -333,11 +428,14 @@ TargetAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
switch (Triple(TT).getOS()) {
case Triple::Darwin:
return new DarwinX86_64AsmBackend(T);
- case Triple::MinGW64:
+ case Triple::MinGW32:
case Triple::Cygwin:
case Triple::Win32:
- return new WindowsX86AsmBackend(T, true);
+ if (Triple(TT).getEnvironment() == Triple::MachO)
+ return new DarwinX86_64AsmBackend(T);
+ else
+ return new WindowsX86AsmBackend(T, true);
default:
- return new ELFX86_64AsmBackend(T);
+ return new ELFX86_64AsmBackend(T, Triple(TT).getOS());
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 20110ad..99b4479 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#include "X86AsmPrinter.h"
-#include "AsmPrinter/X86ATTInstPrinter.h"
-#include "AsmPrinter/X86IntelInstPrinter.h"
+#include "InstPrinter/X86ATTInstPrinter.h"
+#include "InstPrinter/X86IntelInstPrinter.h"
#include "X86MCInstLower.h"
#include "X86.h"
#include "X86COFFMachineModuleInfo.h"
@@ -48,21 +48,15 @@ using namespace llvm;
// Primitive Helper Functions.
//===----------------------------------------------------------------------===//
-void X86AsmPrinter::PrintPICBaseSymbol(raw_ostream &O) const {
- const TargetLowering *TLI = TM.getTargetLowering();
- O << *static_cast<const X86TargetLowering*>(TLI)->getPICBaseSymbol(MF,
- OutContext);
-}
-
/// runOnMachineFunction - Emit the function body.
///
bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
SetupMachineFunction(MF);
- if (Subtarget->isTargetCOFF()) {
+ if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) {
bool Intrn = MF.getFunction()->hasInternalLinkage();
OutStreamer.BeginCOFFSymbolDef(CurrentFnSym);
- OutStreamer.EmitCOFFSymbolStorageClass(Intrn ? COFF::IMAGE_SYM_CLASS_STATIC
+ OutStreamer.EmitCOFFSymbolStorageClass(Intrn ? COFF::IMAGE_SYM_CLASS_STATIC
: COFF::IMAGE_SYM_CLASS_EXTERNAL);
OutStreamer.EmitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
<< COFF::SCT_COMPLEX_TYPE_SHIFT);
@@ -95,7 +89,7 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
break;
case MachineOperand::MO_GlobalAddress: {
const GlobalValue *GV = MO.getGlobal();
-
+
MCSymbol *GVSym;
if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB)
GVSym = GetSymbolWithGlobalValueBase(GV, "$stub");
@@ -109,11 +103,11 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
// Handle dllimport linkage.
if (MO.getTargetFlags() == X86II::MO_DLLIMPORT)
GVSym = OutContext.GetOrCreateSymbol(Twine("__imp_") + GVSym->getName());
-
+
if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE) {
MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- MachineModuleInfoImpl::StubValueTy &StubSym =
+ MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(Sym);
if (StubSym.getPointer() == 0)
StubSym = MachineModuleInfoImpl::
@@ -133,7 +127,7 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
StubSym = MachineModuleInfoImpl::
StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
}
-
+
// If the name begins with a dollar-sign, enclose it in parens. We do this
// to avoid having it look like an integer immediate to the assembler.
if (GVSym->getName()[0] != '$')
@@ -149,7 +143,7 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
SmallString<128> TempNameStr;
TempNameStr += StringRef(MO.getSymbolName());
TempNameStr += StringRef("$stub");
-
+
MCSymbol *Sym = GetExternalSymbolSymbol(TempNameStr.str());
MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
@@ -163,17 +157,17 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
} else {
SymToPrint = GetExternalSymbolSymbol(MO.getSymbolName());
}
-
+
// If the name begins with a dollar-sign, enclose it in parens. We do this
// to avoid having it look like an integer immediate to the assembler.
- if (SymToPrint->getName()[0] != '$')
+ if (SymToPrint->getName()[0] != '$')
O << *SymToPrint;
else
O << '(' << *SymToPrint << '(';
break;
}
}
-
+
switch (MO.getTargetFlags()) {
default:
llvm_unreachable("Unknown target flag on GV operand");
@@ -185,15 +179,12 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
// These affect the name of the symbol, not any suffix.
break;
case X86II::MO_GOT_ABSOLUTE_ADDRESS:
- O << " + [.-";
- PrintPICBaseSymbol(O);
- O << ']';
- break;
+ O << " + [.-" << *MF->getPICBaseSymbol() << ']';
+ break;
case X86II::MO_PIC_BASE_OFFSET:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
- O << '-';
- PrintPICBaseSymbol(O);
+ O << '-' << *MF->getPICBaseSymbol();
break;
case X86II::MO_TLSGD: O << "@TLSGD"; break;
case X86II::MO_GOTTPOFF: O << "@GOTTPOFF"; break;
@@ -206,8 +197,7 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
case X86II::MO_PLT: O << "@PLT"; break;
case X86II::MO_TLVP: O << "@TLVP"; break;
case X86II::MO_TLVP_PIC_BASE:
- O << "@TLVP" << '-';
- PrintPICBaseSymbol(O);
+ O << "@TLVP" << '-' << *MF->getPICBaseSymbol();
break;
}
}
@@ -262,7 +252,7 @@ void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
case MachineOperand::MO_JumpTableIndex:
case MachineOperand::MO_ConstantPoolIndex:
- case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_GlobalAddress:
case MachineOperand::MO_ExternalSymbol: {
O << '$';
printSymbolOperand(MO, O);
@@ -298,10 +288,10 @@ void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
if (HasBaseReg && Modifier && !strcmp(Modifier, "no-rip") &&
BaseReg.getReg() == X86::RIP)
HasBaseReg = false;
-
+
// HasParenPart - True if we will print out the () part of the mem ref.
bool HasParenPart = IndexReg.getReg() || HasBaseReg;
-
+
if (DispSpec.isImm()) {
int DispVal = DispSpec.getImm();
if (DispVal || !HasParenPart)
@@ -312,6 +302,9 @@ void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
printSymbolOperand(MI->getOperand(Op+3), O);
}
+ if (Modifier && strcmp(Modifier, "H") == 0)
+ O << "+8";
+
if (HasParenPart) {
assert(IndexReg.getReg() != X86::ESP &&
"X86 doesn't allow scaling by ESP");
@@ -344,10 +337,8 @@ void X86AsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
void X86AsmPrinter::printPICLabel(const MachineInstr *MI, unsigned Op,
raw_ostream &O) {
- PrintPICBaseSymbol(O);
- O << '\n';
- PrintPICBaseSymbol(O);
- O << ':';
+ O << *MF->getPICBaseSymbol() << '\n';
+ O << *MF->getPICBaseSymbol() << ':';
}
bool X86AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
@@ -386,14 +377,14 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
if (ExtraCode[1] != 0) return true; // Unknown modifier.
const MachineOperand &MO = MI->getOperand(OpNo);
-
+
switch (ExtraCode[0]) {
default: return true; // Unknown modifier.
case 'a': // This is an address. Currently only 'i' and 'r' are expected.
if (MO.isImm()) {
O << MO.getImm();
return false;
- }
+ }
if (MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isSymbol()) {
printSymbolOperand(MO, O);
if (Subtarget->isPICStyleRIPRel())
@@ -470,6 +461,9 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
case 'q': // Print SImode register
// These only apply to registers, ignore on mem.
break;
+ case 'H':
+ printMemReference(MI, OpNo, O, "H");
+ return false;
case 'P': // Don't print @PLT, but do print as memory.
printMemReference(MI, OpNo, O, "no-rip");
return false;
@@ -480,23 +474,23 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
}
void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
- if (Subtarget->isTargetDarwin())
+ if (Subtarget->isTargetEnvMacho())
OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
}
void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget->isTargetEnvMacho()) {
// All darwin targets use mach-o.
MachineModuleInfoMachO &MMIMacho =
MMI->getObjFileInfo<MachineModuleInfoMachO>();
-
+
// Output stubs for dynamically-linked functions.
MachineModuleInfoMachO::SymbolListTy Stubs;
Stubs = MMIMacho.GetFnStubList();
if (!Stubs.empty()) {
- const MCSection *TheSection =
+ const MCSection *TheSection =
OutContext.getMachOSection("__IMPORT", "__jump_table",
MCSectionMachO::S_SYMBOL_STUBS |
MCSectionMachO::S_ATTR_SELF_MODIFYING_CODE |
@@ -514,7 +508,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
const char HltInsts[] = { -12, -12, -12, -12, -12 };
OutStreamer.EmitBytes(StringRef(HltInsts, 5), 0/*addrspace*/);
}
-
+
Stubs.clear();
OutStreamer.AddBlankLine();
}
@@ -522,7 +516,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
// Output stubs for external and common global variables.
Stubs = MMIMacho.GetGVStubList();
if (!Stubs.empty()) {
- const MCSection *TheSection =
+ const MCSection *TheSection =
OutContext.getMachOSection("__IMPORT", "__pointers",
MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS,
SectionKind::getMetadata());
@@ -580,7 +574,14 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
}
- if (Subtarget->isTargetCOFF()) {
+ if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing() &&
+ MMI->callsExternalVAFunctionWithFloatingPointArguments()) {
+ StringRef SymbolName = Subtarget->is64Bit() ? "_fltused" : "__fltused";
+ MCSymbol *S = MMI->getContext().GetOrCreateSymbol(SymbolName);
+ OutStreamer.EmitSymbolAttribute(S, MCSA_Global);
+ }
+
+ if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) {
X86COFFMachineModuleInfo &COFFMMI =
MMI->getObjFileInfo<X86COFFMachineModuleInfo>();
@@ -661,12 +662,12 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
}
}
-MachineLocation
+MachineLocation
X86AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
MachineLocation Location;
assert (MI->getNumOperands() == 7 && "Invalid no. of machine operands!");
// Frame address. Currently handles register +- offset only.
-
+
if (MI->getOperand(0).isReg() && MI->getOperand(3).isImm())
Location.set(MI->getOperand(0).getReg(), MI->getOperand(3).getImm());
else {
@@ -690,9 +691,9 @@ void X86AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
O << V.getName();
O << " <- ";
// Frame address. Currently handles register +- offset only.
- O << '[';
+ O << '[';
if (MI->getOperand(0).isReg() && MI->getOperand(0).getReg())
- printOperand(MI, 0, O);
+ printOperand(MI, 0, O);
else
O << "undef";
O << '+'; printOperand(MI, 3, O);
@@ -718,10 +719,10 @@ static MCInstPrinter *createX86MCInstPrinter(const Target &T,
}
// Force static initialization.
-extern "C" void LLVMInitializeX86AsmPrinter() {
+extern "C" void LLVMInitializeX86AsmPrinter() {
RegisterAsmPrinter<X86AsmPrinter> X(TheX86_32Target);
RegisterAsmPrinter<X86AsmPrinter> Y(TheX86_64Target);
-
+
TargetRegistry::RegisterMCInstPrinter(TheX86_32Target,createX86MCInstPrinter);
TargetRegistry::RegisterMCInstPrinter(TheX86_64Target,createX86MCInstPrinter);
}
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
index e61be66..3a50435 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
@@ -75,8 +75,6 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
void printPICLabel(const MachineInstr *MI, unsigned Op, raw_ostream &O);
- void PrintPICBaseSymbol(raw_ostream &O) const;
-
bool runOnMachineFunction(MachineFunction &F);
void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
diff --git a/contrib/llvm/lib/Target/X86/X86CallingConv.td b/contrib/llvm/lib/Target/X86/X86CallingConv.td
index e3409ef..a44fb69 100644
--- a/contrib/llvm/lib/Target/X86/X86CallingConv.td
+++ b/contrib/llvm/lib/Target/X86/X86CallingConv.td
@@ -48,7 +48,7 @@ def RetCC_X86Common : CallingConv<[
// MMX vector types are always returned in MM0. If the target doesn't have
// MM0, it doesn't support these vector types.
- CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToReg<[MM0]>>,
+ CCIfType<[x86mmx, v1i64], CCAssignToReg<[MM0]>>,
// Long double types are always returned in ST0 (even with SSE).
CCIfType<[f80], CCAssignToReg<[ST0, ST1]>>
@@ -61,7 +61,7 @@ def RetCC_X86_32_C : CallingConv<[
// weirdly; this is really the sse-regparm calling convention) in which
// case they use XMM0, otherwise it is the same as the common X86 calling
// conv.
- CCIfInReg<CCIfSubtarget<"hasSSE2()",
+ CCIfInReg<CCIfSubtarget<"hasXMMInt()",
CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
CCIfType<[f32,f64], CCAssignToReg<[ST0, ST1]>>,
CCDelegateTo<RetCC_X86Common>
@@ -73,8 +73,8 @@ def RetCC_X86_32_Fast : CallingConv<[
// SSE2.
// This can happen when a float, 2 x float, or 3 x float vector is split by
// target lowering, and is returned in 1-3 sse regs.
- CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
- CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
+ CCIfType<[f32], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
+ CCIfType<[f64], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
// For integers, ECX can be used as an extra return register
CCIfType<[i8], CCAssignToReg<[AL, DL, CL]>>,
@@ -95,14 +95,14 @@ def RetCC_X86_64_C : CallingConv<[
// returned in RAX. This disagrees with ABI documentation but is bug
// compatible with gcc.
CCIfType<[v1i64], CCAssignToReg<[RAX]>>,
- CCIfType<[v8i8, v4i16, v2i32], CCAssignToReg<[XMM0, XMM1]>>,
+ CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
CCDelegateTo<RetCC_X86Common>
]>;
// X86-Win64 C return-value convention.
def RetCC_X86_Win64_C : CallingConv<[
// The X86-Win64 calling convention always returns __m64 values in RAX.
- CCIfType<[v8i8, v4i16, v2i32, v1i64], CCBitConvertToType<i64>>,
+ CCIfType<[x86mmx, v1i64], CCBitConvertToType<i64>>,
// And FP in XMM0 only.
CCIfType<[f32], CCAssignToReg<[XMM0]>>,
@@ -161,14 +161,14 @@ def CC_X86_64_C : CallingConv<[
// The first 8 MMX (except for v1i64) vector arguments are passed in XMM
// registers on Darwin.
- CCIfType<[v8i8, v4i16, v2i32],
+ CCIfType<[x86mmx],
CCIfSubtarget<"isTargetDarwin()",
- CCIfSubtarget<"hasSSE2()",
+ CCIfSubtarget<"hasXMMInt()",
CCPromoteToType<v2i64>>>>,
// The first 8 FP/Vector arguments are passed in XMM registers.
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCIfSubtarget<"hasSSE1()",
+ CCIfSubtarget<"hasXMM()",
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
// The first 8 256-bit vector arguments are passed in YMM registers.
@@ -192,7 +192,7 @@ def CC_X86_64_C : CallingConv<[
CCAssignToStack<32, 32>>,
// __m64 vectors get 8-byte stack slots that are 8-byte aligned.
- CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>>
+ CCIfType<[x86mmx,v1i64], CCAssignToStack<8, 8>>
]>;
// Calling convention used on Win64
@@ -210,8 +210,7 @@ def CC_X86_Win64_C : CallingConv<[
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
// The first 4 MMX vector arguments are passed in GPRs.
- CCIfType<[v8i8, v4i16, v2i32, v1i64],
- CCBitConvertToType<i64>>,
+ CCIfType<[x86mmx, v1i64], CCBitConvertToType<i64>>,
// The first 4 integer arguments are passed in integer registers.
CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
@@ -233,7 +232,7 @@ def CC_X86_Win64_C : CallingConv<[
CCIfType<[f80], CCAssignToStack<0, 0>>,
// __m64 vectors get 8-byte stack slots that are 8-byte aligned.
- CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>>
+ CCIfType<[x86mmx,v1i64], CCAssignToStack<8, 8>>
]>;
def CC_X86_64_GHC : CallingConv<[
@@ -246,7 +245,7 @@ def CC_X86_64_GHC : CallingConv<[
// Pass in STG registers: F1, F2, F3, F4, D1, D2
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCIfSubtarget<"hasSSE1()",
+ CCIfSubtarget<"hasXMM()",
CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
]>;
@@ -264,12 +263,12 @@ def CC_X86_32_Common : CallingConv<[
// The first 3 float or double arguments, if marked 'inreg' and if the call
// is not a vararg call and if SSE2 is available, are passed in SSE registers.
CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
- CCIfSubtarget<"hasSSE2()",
+ CCIfSubtarget<"hasXMMInt()",
CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
// The first 3 __m64 (except for v1i64) vector arguments are passed in mmx
// registers if the call is not a vararg call.
- CCIfNotVarArg<CCIfType<[v8i8, v4i16, v2i32],
+ CCIfNotVarArg<CCIfType<[x86mmx],
CCAssignToReg<[MM0, MM1, MM2]>>>,
// Integer/Float values get stored in stack slots that are 4 bytes in
@@ -300,7 +299,7 @@ def CC_X86_32_Common : CallingConv<[
// __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
// passed in the parameter area.
- CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 4>>]>;
+ CCIfType<[x86mmx,v1i64], CCAssignToStack<8, 4>>]>;
def CC_X86_32_C : CallingConv<[
// Promote i8/i16 arguments to i32.
@@ -363,7 +362,7 @@ def CC_X86_32_FastCC : CallingConv<[
// The first 3 float or double arguments, if the call is not a vararg
// call and if SSE2 is available, are passed in SSE registers.
CCIfNotVarArg<CCIfType<[f32,f64],
- CCIfSubtarget<"hasSSE2()",
+ CCIfSubtarget<"hasXMMInt()",
CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
// Doubles get 8-byte slots that are 8-byte aligned.
@@ -380,3 +379,35 @@ def CC_X86_32_GHC : CallingConv<[
// Pass in STG registers: Base, Sp, Hp, R1
CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
]>;
+
+//===----------------------------------------------------------------------===//
+// X86 Root Argument Calling Conventions
+//===----------------------------------------------------------------------===//
+
+// This is the root argument convention for the X86-32 backend.
+def CC_X86_32 : CallingConv<[
+ CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
+ CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
+ CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
+ CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
+
+ // Otherwise, drop to normal X86-32 CC
+ CCDelegateTo<CC_X86_32_C>
+]>;
+
+// This is the root argument convention for the X86-64 backend.
+def CC_X86_64 : CallingConv<[
+ CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
+
+ // Mingw64 and native Win64 use Win64 CC
+ CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
+
+ // Otherwise, drop to normal X86-64 CC
+ CCDelegateTo<CC_X86_64_C>
+]>;
+
+// This is the argument convention used for the entire X86 backend.
+def CC_X86 : CallingConv<[
+ CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
+ CCDelegateTo<CC_X86_32>
+]>;
diff --git a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
index 824021c..60d9d4a 100644
--- a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
@@ -68,8 +68,7 @@ namespace {
return "X86 Machine Code Emitter";
}
- void emitInstruction(const MachineInstr &MI,
- const TargetInstrDesc *Desc);
+ void emitInstruction(MachineInstr &MI, const TargetInstrDesc *Desc);
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -131,7 +130,7 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
MBB != E; ++MBB) {
MCE.StartMachineBasicBlock(MBB);
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
I != E; ++I) {
const TargetInstrDesc &Desc = I->getDesc();
emitInstruction(*I, &Desc);
@@ -598,9 +597,23 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
}
template<class CodeEmitter>
-void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
+void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
const TargetInstrDesc *Desc) {
DEBUG(dbgs() << MI);
+
+ // If this is a pseudo instruction, lower it.
+ switch (Desc->getOpcode()) {
+ case X86::ADD16rr_DB: Desc = &II->get(X86::OR16rr); MI.setDesc(*Desc);break;
+ case X86::ADD32rr_DB: Desc = &II->get(X86::OR32rr); MI.setDesc(*Desc);break;
+ case X86::ADD64rr_DB: Desc = &II->get(X86::OR64rr); MI.setDesc(*Desc);break;
+ case X86::ADD16ri_DB: Desc = &II->get(X86::OR16ri); MI.setDesc(*Desc);break;
+ case X86::ADD32ri_DB: Desc = &II->get(X86::OR32ri); MI.setDesc(*Desc);break;
+ case X86::ADD64ri32_DB:Desc = &II->get(X86::OR64ri32);MI.setDesc(*Desc);break;
+ case X86::ADD16ri8_DB: Desc = &II->get(X86::OR16ri8);MI.setDesc(*Desc);break;
+ case X86::ADD32ri8_DB: Desc = &II->get(X86::OR32ri8);MI.setDesc(*Desc);break;
+ case X86::ADD64ri8_DB: Desc = &II->get(X86::OR64ri8);MI.setDesc(*Desc);break;
+ }
+
MCE.processDebugLoc(MI.getDebugLoc(), true);
diff --git a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp b/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
index f84995d..f1d7ede 100644
--- a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
@@ -14,6 +14,7 @@
#include "X86ELFWriterInfo.h"
#include "X86Relocations.h"
#include "llvm/Function.h"
+#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
@@ -24,8 +25,8 @@ using namespace llvm;
// Implementation of the X86ELFWriterInfo class
//===----------------------------------------------------------------------===//
-X86ELFWriterInfo::X86ELFWriterInfo(TargetMachine &TM)
- : TargetELFWriterInfo(TM) {
+X86ELFWriterInfo::X86ELFWriterInfo(bool is64Bit_, bool isLittleEndian_)
+ : TargetELFWriterInfo(is64Bit_, isLittleEndian_) {
EMachine = is64Bit ? EM_X86_64 : EM_386;
}
@@ -35,13 +36,13 @@ unsigned X86ELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
if (is64Bit) {
switch(MachineRelTy) {
case X86::reloc_pcrel_word:
- return R_X86_64_PC32;
+ return ELF::R_X86_64_PC32;
case X86::reloc_absolute_word:
- return R_X86_64_32;
+ return ELF::R_X86_64_32;
case X86::reloc_absolute_word_sext:
- return R_X86_64_32S;
+ return ELF::R_X86_64_32S;
case X86::reloc_absolute_dword:
- return R_X86_64_64;
+ return ELF::R_X86_64_64;
case X86::reloc_picrel_word:
default:
llvm_unreachable("unknown x86_64 machine relocation type");
@@ -49,9 +50,9 @@ unsigned X86ELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
} else {
switch(MachineRelTy) {
case X86::reloc_pcrel_word:
- return R_386_PC32;
+ return ELF::R_386_PC32;
case X86::reloc_absolute_word:
- return R_386_32;
+ return ELF::R_386_32;
case X86::reloc_absolute_word_sext:
case X86::reloc_absolute_dword:
case X86::reloc_picrel_word:
@@ -66,18 +67,18 @@ long int X86ELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
long int Modifier) const {
if (is64Bit) {
switch(RelTy) {
- case R_X86_64_PC32: return Modifier - 4;
- case R_X86_64_32:
- case R_X86_64_32S:
- case R_X86_64_64:
+ case ELF::R_X86_64_PC32: return Modifier - 4;
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S:
+ case ELF::R_X86_64_64:
return Modifier;
default:
llvm_unreachable("unknown x86_64 relocation type");
}
} else {
switch(RelTy) {
- case R_386_PC32: return Modifier - 4;
- case R_386_32: return Modifier;
+ case ELF::R_386_PC32: return Modifier - 4;
+ case ELF::R_386_32: return Modifier;
default:
llvm_unreachable("unknown x86 relocation type");
}
@@ -88,19 +89,19 @@ long int X86ELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
unsigned X86ELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
if (is64Bit) {
switch(RelTy) {
- case R_X86_64_PC32:
- case R_X86_64_32:
- case R_X86_64_32S:
+ case ELF::R_X86_64_PC32:
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S:
return 32;
- case R_X86_64_64:
+ case ELF::R_X86_64_64:
return 64;
default:
llvm_unreachable("unknown x86_64 relocation type");
}
} else {
switch(RelTy) {
- case R_386_PC32:
- case R_386_32:
+ case ELF::R_386_PC32:
+ case ELF::R_386_32:
return 32;
default:
llvm_unreachable("unknown x86 relocation type");
@@ -112,20 +113,20 @@ unsigned X86ELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
bool X86ELFWriterInfo::isPCRelativeRel(unsigned RelTy) const {
if (is64Bit) {
switch(RelTy) {
- case R_X86_64_PC32:
+ case ELF::R_X86_64_PC32:
return true;
- case R_X86_64_32:
- case R_X86_64_32S:
- case R_X86_64_64:
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S:
+ case ELF::R_X86_64_64:
return false;
default:
llvm_unreachable("unknown x86_64 relocation type");
}
} else {
switch(RelTy) {
- case R_386_PC32:
+ case ELF::R_386_PC32:
return true;
- case R_386_32:
+ case ELF::R_386_32:
return false;
default:
llvm_unreachable("unknown x86 relocation type");
@@ -143,7 +144,7 @@ long int X86ELFWriterInfo::computeRelocation(unsigned SymOffset,
unsigned RelOffset,
unsigned RelTy) const {
- if (RelTy == R_X86_64_PC32 || RelTy == R_386_PC32)
+ if (RelTy == ELF::R_X86_64_PC32 || RelTy == ELF::R_386_PC32)
return SymOffset - (RelOffset + 4);
else
assert("computeRelocation unknown for this relocation type");
diff --git a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.h b/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.h
index 342e6e6..a45b5bb 100644
--- a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.h
@@ -20,25 +20,8 @@ namespace llvm {
class X86ELFWriterInfo : public TargetELFWriterInfo {
- // ELF Relocation types for X86
- enum X86RelocationType {
- R_386_NONE = 0,
- R_386_32 = 1,
- R_386_PC32 = 2
- };
-
- // ELF Relocation types for X86_64
- enum X86_64RelocationType {
- R_X86_64_NONE = 0,
- R_X86_64_64 = 1,
- R_X86_64_PC32 = 2,
- R_X86_64_32 = 10,
- R_X86_64_32S = 11,
- R_X86_64_PC64 = 24
- };
-
public:
- X86ELFWriterInfo(TargetMachine &TM);
+ X86ELFWriterInfo(bool is64Bit_, bool isLittleEndian_);
virtual ~X86ELFWriterInfo();
/// getRelocationType - Returns the target specific ELF Relocation type.
diff --git a/contrib/llvm/lib/Target/X86/X86FastISel.cpp b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
index 0c70eec..9d42ac2 100644
--- a/contrib/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
@@ -36,7 +36,7 @@
using namespace llvm;
namespace {
-
+
class X86FastISel : public FastISel {
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
@@ -46,7 +46,7 @@ class X86FastISel : public FastISel {
///
unsigned StackPtr;
- /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
+ /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
/// floating point ops.
/// When SSE is available, use it for f32 operations.
/// When SSE2 is available, use it for f64 operations.
@@ -63,11 +63,18 @@ public:
virtual bool TargetSelectInstruction(const Instruction *I);
+ /// TryToFoldLoad - The specified machine instr operand is a vreg, and that
+ /// vreg is being provided by the specified load instruction. If possible,
+ /// try to fold the load as an operand to the instruction, returning true if
+ /// possible.
+ virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI);
+
#include "X86GenFastISel.inc"
private:
bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT);
-
+
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
bool X86FastEmitStore(EVT VT, const Value *Val,
@@ -77,12 +84,12 @@ private:
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
unsigned &ResultReg);
-
+
bool X86SelectAddress(const Value *V, X86AddressMode &AM);
bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
bool X86SelectLoad(const Instruction *I);
-
+
bool X86SelectStore(const Instruction *I);
bool X86SelectRet(const Instruction *I);
@@ -98,7 +105,7 @@ private:
bool X86SelectSelect(const Instruction *I);
bool X86SelectTrunc(const Instruction *I);
-
+
bool X86SelectFPExt(const Instruction *I);
bool X86SelectFPTrunc(const Instruction *I);
@@ -107,9 +114,6 @@ private:
bool X86VisitIntrinsicCall(const IntrinsicInst &I);
bool X86SelectCall(const Instruction *I);
- CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false);
- CCAssignFn *CCAssignFnForRet(CallingConv::ID CC, bool isTailCall = false);
-
const X86InstrInfo *getInstrInfo() const {
return getTargetMachine()->getInstrInfo();
}
@@ -128,17 +132,18 @@ private:
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
- bool isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1 = false);
+ bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false);
};
-
+
} // end anonymous namespace.
-bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) {
- VT = TLI.getValueType(Ty, /*HandleUnknown=*/true);
- if (VT == MVT::Other || !VT.isSimple())
+bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) {
+ EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true);
+ if (evt == MVT::Other || !evt.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
-
+
+ VT = evt.getSimpleVT();
// For now, require SSE/SSE2 for performing floating-point operations,
// since x87 requires additional work.
if (VT == MVT::f64 && !X86ScalarSSEf64)
@@ -157,45 +162,6 @@ bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) {
#include "X86GenCallingConv.inc"
-/// CCAssignFnForCall - Selects the correct CCAssignFn for a given calling
-/// convention.
-CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC,
- bool isTaillCall) {
- if (Subtarget->is64Bit()) {
- if (CC == CallingConv::GHC)
- return CC_X86_64_GHC;
- else if (Subtarget->isTargetWin64())
- return CC_X86_Win64_C;
- else
- return CC_X86_64_C;
- }
-
- if (CC == CallingConv::X86_FastCall)
- return CC_X86_32_FastCall;
- else if (CC == CallingConv::X86_ThisCall)
- return CC_X86_32_ThisCall;
- else if (CC == CallingConv::Fast)
- return CC_X86_32_FastCC;
- else if (CC == CallingConv::GHC)
- return CC_X86_32_GHC;
- else
- return CC_X86_32_C;
-}
-
-/// CCAssignFnForRet - Selects the correct CCAssignFn for a given calling
-/// convention.
-CCAssignFn *X86FastISel::CCAssignFnForRet(CallingConv::ID CC,
- bool isTaillCall) {
- if (Subtarget->is64Bit()) {
- if (Subtarget->isTargetWin64())
- return RetCC_X86_Win64_C;
- else
- return RetCC_X86_64_C;
- }
-
- return RetCC_X86_32_C;
-}
-
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
/// Return true and the result register by reference if it is possible.
@@ -284,7 +250,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m;
break;
}
-
+
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(Opc)), AM).addReg(Val);
return true;
@@ -295,7 +261,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Val))
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
-
+
// If this is a store of a simple constant, fold the constant into the store.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
unsigned Opc = 0;
@@ -312,7 +278,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
Opc = X86::MOV64mi32;
break;
}
-
+
if (Opc) {
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(Opc)), AM)
@@ -321,11 +287,11 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
return true;
}
}
-
+
unsigned ValReg = getRegForValue(Val);
if (ValReg == 0)
- return false;
-
+ return false;
+
return X86FastEmitStore(VT, ValReg, AM);
}
@@ -337,7 +303,7 @@ bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
unsigned &ResultReg) {
unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
Src, /*TODO: Kill=*/false);
-
+
if (RR != 0) {
ResultReg = RR;
return true;
@@ -354,11 +320,11 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// Don't walk into other basic blocks; it's possible we haven't
// visited them yet, so the instructions may not yet be assigned
// virtual registers.
- if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
- return false;
-
- Opcode = I->getOpcode();
- U = I;
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
Opcode = C->getOpcode();
U = C;
@@ -472,7 +438,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
AM.Disp = (uint32_t)Disp;
if (X86SelectAddress(U->getOperand(0), AM))
return true;
-
+
// If we couldn't merge the sub value into this addr mode, revert back to
// our address and just match the value instead of completely failing.
AM = SavedAM;
@@ -501,7 +467,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// Okay, we've committed to selecting this global. Set up the basic address.
AM.GV = GV;
-
+
// Allow the subtarget to classify the global.
unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
@@ -510,7 +476,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// FIXME: How do we know Base.Reg is free??
AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
}
-
+
// Unless the ABI requires an extra load, return a direct reference to
// the global.
if (!isGlobalStubReference(GVFlags)) {
@@ -523,7 +489,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
AM.GVOpFlags = GVFlags;
return true;
}
-
+
// Ok, we need to do a load from a stub. If we've already loaded from this
// stub, reuse the loaded pointer, otherwise emit the load now.
DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V);
@@ -545,14 +511,14 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass;
-
+
if (Subtarget->isPICStyleRIPRel())
StubAM.Base.Reg = X86::RIP;
} else {
Opc = X86::MOV32rm;
RC = X86::GR32RegisterClass;
}
-
+
LoadReg = createResultReg(RC);
MachineInstrBuilder LoadMI =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
@@ -564,7 +530,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = LoadReg;
}
-
+
// Now construct the final address. Note that the Disp, Scale,
// and Index values may already be set here.
AM.Base.Reg = LoadReg;
@@ -638,7 +604,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
// Okay, we've committed to selecting this global. Set up the basic address.
AM.GV = GV;
-
+
// No ABI requires an extra load for anything other than DLLImport, which
// we rejected above. Return a direct reference to the global.
if (Subtarget->isPICStyleRIPRel()) {
@@ -651,7 +617,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
} else if (Subtarget->isPICStyleGOT()) {
AM.GVOpFlags = X86II::MO_GOTOFF;
}
-
+
return true;
}
@@ -674,7 +640,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
/// X86SelectStore - Select and emit code to implement store instructions.
bool X86FastISel::X86SelectStore(const Instruction *I) {
- EVT VT;
+ MVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true))
return false;
@@ -724,7 +690,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;
CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
- CCInfo.AnalyzeReturn(Outs, CCAssignFnForRet(CC));
+ CCInfo.AnalyzeReturn(Outs, RetCC_X86);
const Value *RV = Ret->getOperand(0);
unsigned Reg = getRegForValue(RV);
@@ -736,7 +702,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
return false;
CCValAssign &VA = ValLocs[0];
-
+
// Don't bother handling odd stuff for now.
if (VA.getLocInfo() != CCValAssign::Full)
return false;
@@ -745,7 +711,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
return false;
// TODO: For now, don't try to handle cases where getLocInfo()
// says Full but the types don't match.
- if (VA.getValVT() != TLI.getValueType(RV->getType()))
+ if (TLI.getValueType(RV->getType()) != VA.getValVT())
return false;
// The calling-convention tables for x87 returns don't tell
@@ -775,7 +741,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
/// X86SelectLoad - Select and emit code to implement load instructions.
///
bool X86FastISel::X86SelectLoad(const Instruction *I) {
- EVT VT;
+ MVT VT;
if (!isTypeLegal(I->getType(), VT, /*AllowI1=*/true))
return false;
@@ -826,11 +792,11 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
EVT VT) {
unsigned Op0Reg = getRegForValue(Op0);
if (Op0Reg == 0) return false;
-
+
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Op1))
Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
-
+
// We have two options: compare with register or immediate. If the RHS of
// the compare is an immediate that we can fold into this compare, use
// CMPri, otherwise use CMPrr.
@@ -842,23 +808,23 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
return true;
}
}
-
+
unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
if (CompareOpc == 0) return false;
-
+
unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
.addReg(Op0Reg)
.addReg(Op1Reg);
-
+
return true;
}
bool X86FastISel::X86SelectCmp(const Instruction *I) {
const CmpInst *CI = cast<CmpInst>(I);
- EVT VT;
+ MVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
return false;
@@ -869,13 +835,13 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
case CmpInst::FCMP_OEQ: {
if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
return false;
-
+
unsigned EReg = createResultReg(&X86::GR8RegClass);
unsigned NPReg = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::SETNPr), NPReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
UpdateValueMap(I, ResultReg);
return true;
@@ -908,7 +874,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
case CmpInst::FCMP_UGE: SwapArgs = true; SetCCOpc = X86::SETBEr; break;
case CmpInst::FCMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
case CmpInst::FCMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
-
+
case CmpInst::ICMP_EQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
case CmpInst::ICMP_NE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
case CmpInst::ICMP_UGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
@@ -930,7 +896,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
// Emit a compare of Op0/Op1.
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
-
+
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
UpdateValueMap(I, ResultReg);
return true;
@@ -995,7 +961,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE_4; break;
case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break;
case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break;
-
+
case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE_4; break;
case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA_4; break;
@@ -1009,7 +975,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
default:
return false;
}
-
+
const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
if (SwapArgs)
std::swap(Op0, Op1);
@@ -1017,7 +983,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Emit a compare of the LHS and RHS, setting the flags.
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
-
+
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
.addMBB(TrueMBB);
@@ -1070,8 +1036,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
}
const TargetInstrDesc &TID = MI.getDesc();
- if (TID.hasUnmodeledSideEffects() ||
- TID.hasImplicitDefOfPhysReg(X86::EFLAGS))
+ if (TID.hasImplicitDefOfPhysReg(X86::EFLAGS) ||
+ MI.hasUnmodeledSideEffects())
break;
}
@@ -1147,22 +1113,22 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
return false;
}
- EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
- if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
+ MVT VT;
+ if (!isTypeLegal(I->getType(), VT))
return false;
unsigned Op0Reg = getRegForValue(I->getOperand(0));
if (Op0Reg == 0) return false;
-
+
// Fold immediate in shl(x,3).
if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm),
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
UpdateValueMap(I, ResultReg);
return true;
}
-
+
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
@@ -1183,23 +1149,26 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
}
bool X86FastISel::X86SelectSelect(const Instruction *I) {
- EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
- if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
+ MVT VT;
+ if (!isTypeLegal(I->getType(), VT))
return false;
-
+
+ // We only use cmov here, if we don't have a cmov instruction bail.
+ if (!Subtarget->hasCMov()) return false;
+
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
- if (VT.getSimpleVT() == MVT::i16) {
+ if (VT == MVT::i16) {
Opc = X86::CMOVE16rr;
RC = &X86::GR16RegClass;
- } else if (VT.getSimpleVT() == MVT::i32) {
+ } else if (VT == MVT::i32) {
Opc = X86::CMOVE32rr;
RC = &X86::GR32RegClass;
- } else if (VT.getSimpleVT() == MVT::i64) {
+ } else if (VT == MVT::i64) {
Opc = X86::CMOVE64rr;
RC = &X86::GR64RegClass;
} else {
- return false;
+ return false;
}
unsigned Op0Reg = getRegForValue(I->getOperand(0));
@@ -1264,7 +1233,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
return false;
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType());
-
+
// This code only handles truncation to byte right now.
if (DstVT != MVT::i8 && DstVT != MVT::i1)
// All other cases should be handled by the tblgen generated code.
@@ -1335,21 +1304,21 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
// Grab the frame index.
X86AddressMode AM;
if (!X86SelectAddress(Slot, AM)) return false;
-
+
if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
-
+
return true;
}
case Intrinsic::objectsize: {
ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
const Type *Ty = I.getCalledFunction()->getReturnType();
-
+
assert(CI && "Non-constant type in Intrinsic::objectsize?");
-
- EVT VT;
+
+ MVT VT;
if (!isTypeLegal(Ty, VT))
return false;
-
+
unsigned OpC = 0;
if (VT == MVT::i32)
OpC = X86::MOV32ri;
@@ -1357,7 +1326,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
OpC = X86::MOV64ri;
else
return false;
-
+
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg).
addImm(CI->isZero() ? -1ULL : 0);
@@ -1392,7 +1361,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
const Type *RetTy =
cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
- EVT VT;
+ MVT VT;
if (!isTypeLegal(RetTy, VT))
return false;
@@ -1429,7 +1398,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
ResultReg = DestReg1+1;
else
ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
-
+
unsigned Opc = X86::SETBr;
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
Opc = X86::SETOr;
@@ -1476,7 +1445,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Handle *simple* calls for now.
const Type *RetTy = CS.getType();
- EVT RetVT;
+ MVT RetVT;
if (RetTy->isVoidTy())
RetVT = MVT::isVoid;
else if (!isTypeLegal(RetTy, RetVT, true))
@@ -1506,7 +1475,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Deal with call operands first.
SmallVector<const Value *, 8> ArgVals;
SmallVector<unsigned, 8> Args;
- SmallVector<EVT, 8> ArgVTs;
+ SmallVector<MVT, 8> ArgVTs;
SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
Args.reserve(CS.arg_size());
ArgVals.reserve(CS.arg_size());
@@ -1532,7 +1501,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
return false;
const Type *ArgTy = (*i)->getType();
- EVT ArgVT;
+ MVT ArgVT;
if (!isTypeLegal(ArgTy, ArgVT))
return false;
unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
@@ -1547,13 +1516,13 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext());
-
+
// Allocate shadow area for Win64
- if (Subtarget->isTargetWin64()) {
- CCInfo.AllocateStack(32, 8);
+ if (Subtarget->isTargetWin64()) {
+ CCInfo.AllocateStack(32, 8);
}
- CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
+ CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
@@ -1570,7 +1539,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
CCValAssign &VA = ArgLocs[i];
unsigned Arg = Args[VA.getValNo()];
EVT ArgVT = ArgVTs[VA.getValNo()];
-
+
// Promote the value if needed.
switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!");
@@ -1578,20 +1547,21 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
case CCValAssign::SExt: {
bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
Arg, ArgVT, Arg);
- assert(Emitted && "Failed to emit a sext!"); Emitted=Emitted;
- Emitted = true;
+ assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
ArgVT = VA.getLocVT();
break;
}
case CCValAssign::ZExt: {
bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
Arg, ArgVT, Arg);
- assert(Emitted && "Failed to emit a zext!"); Emitted=Emitted;
- Emitted = true;
+ assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
ArgVT = VA.getLocVT();
break;
}
case CCValAssign::AExt: {
+ // We don't handle MMX parameters yet.
+ if (VA.getLocVT().isVector() && VA.getLocVT().getSizeInBits() == 128)
+ return false;
bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
Arg, ArgVT, Arg);
if (!Emitted)
@@ -1600,21 +1570,21 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (!Emitted)
Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
Arg, ArgVT, Arg);
-
- assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted;
+
+ assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
ArgVT = VA.getLocVT();
break;
}
case CCValAssign::BCvt: {
- unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT().getSimpleVT(),
- ISD::BIT_CONVERT, Arg, /*TODO: Kill=*/false);
+ unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(),
+ ISD::BITCAST, Arg, /*TODO: Kill=*/false);
assert(BC != 0 && "Failed to emit a bitcast!");
Arg = BC;
ArgVT = VA.getLocVT();
break;
}
}
-
+
if (VA.isRegLoc()) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
VA.getLocReg()).addReg(Arg);
@@ -1625,7 +1595,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
AM.Base.Reg = StackPtr;
AM.Disp = LocMemOffset;
const Value *ArgVal = ArgVals[VA.getValNo()];
-
+
// If this is a really simple value, emit this with the Value* version of
// X86FastEmitStore. If it isn't simple, we don't want to do this, as it
// can cause us to reevaluate the argument.
@@ -1637,13 +1607,13 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
}
// ELF / PIC requires GOT in the EBX register before function calls via PLT
- // GOT pointer.
+ // GOT pointer.
if (Subtarget->isPICStyleGOT()) {
unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
X86::EBX).addReg(Base);
}
-
+
// Issue the call.
MachineInstrBuilder MIB;
if (CalleeOp) {
@@ -1657,7 +1627,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
CallOpc = X86::CALL32r;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
.addReg(CalleeOp);
-
+
} else {
// Direct call.
assert(GV && "Not a direct call");
@@ -1668,10 +1638,10 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
CallOpc = X86::CALL64pcrel32;
else
CallOpc = X86::CALLpcrel32;
-
+
// See if we need any target-specific flags on the GV operand.
unsigned char OpFlags = 0;
-
+
// On ELF targets, in both X86-64 and X86-32 mode, direct calls to
// external symbols most go through the PLT in PIC mode. If the symbol
// has hidden or protected visibility, or if it is static or local, then
@@ -1688,8 +1658,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// automatically synthesizes these stubs.
OpFlags = X86II::MO_DARWIN_STUB;
}
-
-
+
+
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
.addGlobalAddress(GV, 0, OpFlags);
}
@@ -1709,7 +1679,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Now handle call return value (if any).
SmallVector<unsigned, 4> UsedRegs;
- if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) {
+ if (RetVT != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
@@ -1718,7 +1688,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
EVT CopyVT = RVLocs[0].getValVT();
TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
-
+
// If this is a call to a function that returns an fp value on the x87 fp
// stack, but where we prefer to use the value in xmm registers, copy it
// out as F80 and use a truncate to move it from fp stack reg to xmm reg.
@@ -1756,7 +1726,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (AndToI1) {
// Mask out all but lowest bit for some call which produces an i1.
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
ResultReg = AndResult;
}
@@ -1823,14 +1793,14 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) {
}
unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
- EVT VT;
+ MVT VT;
if (!isTypeLegal(C->getType(), VT))
return false;
-
+
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
- switch (VT.getSimpleVT().SimpleTy) {
+ switch (VT.SimpleTy) {
default: return false;
case MVT::i8:
Opc = X86::MOV8rm;
@@ -1871,7 +1841,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
// No f80 support yet.
return false;
}
-
+
// Materialize addresses with LEA instructions.
if (isa<GlobalValue>(C)) {
X86AddressMode AM;
@@ -1887,14 +1857,14 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
}
return 0;
}
-
+
// MachineConstantPool wants an explicit alignment.
unsigned Align = TD.getPrefTypeAlignment(C->getType());
if (Align == 0) {
// Alignment of vector types. FIXME!
Align = TD.getTypeAllocSize(C->getType());
}
-
+
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
unsigned char OpFlag = 0;
@@ -1941,6 +1911,34 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
return ResultReg;
}
+/// TryToFoldLoad - The specified machine instr operand is a vreg, and that
+/// vreg is being provided by the specified load instruction. If possible,
+/// try to fold the load as an operand to the instruction, returning true if
+/// possible.
+bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) {
+ X86AddressMode AM;
+ if (!X86SelectAddress(LI->getOperand(0), AM))
+ return false;
+
+ X86InstrInfo &XII = (X86InstrInfo&)TII;
+
+ unsigned Size = TD.getTypeAllocSize(LI->getType());
+ unsigned Alignment = LI->getAlignment();
+
+ SmallVector<MachineOperand, 8> AddrOps;
+ AM.getFullAddress(AddrOps);
+
+ MachineInstr *Result =
+ XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment);
+ if (Result == 0) return false;
+
+ FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);
+ MI->eraseFromParent();
+ return true;
+}
+
+
namespace llvm {
llvm::FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo) {
return new X86FastISel(funcInfo);
diff --git a/contrib/llvm/lib/Target/X86/X86FixupKinds.h b/contrib/llvm/lib/Target/X86/X86FixupKinds.h
index 96e0aae..17d242a 100644
--- a/contrib/llvm/lib/Target/X86/X86FixupKinds.h
+++ b/contrib/llvm/lib/Target/X86/X86FixupKinds.h
@@ -15,11 +15,17 @@
namespace llvm {
namespace X86 {
enum Fixups {
- reloc_pcrel_4byte = FirstTargetFixupKind, // 32-bit pcrel, e.g. a branch.
- reloc_pcrel_1byte, // 8-bit pcrel, e.g. branch_1
- reloc_pcrel_2byte, // 16-bit pcrel, e.g. callw
- reloc_riprel_4byte, // 32-bit rip-relative
- reloc_riprel_4byte_movq_load // 32-bit rip-relative in movq
+ reloc_riprel_4byte = FirstTargetFixupKind, // 32-bit rip-relative
+ reloc_riprel_4byte_movq_load, // 32-bit rip-relative in movq
+ reloc_signed_4byte, // 32-bit signed. Unlike FK_Data_4
+ // this will be sign extended at
+ // runtime.
+ reloc_global_offset_table, // 32-bit, relative to the start
+ // of the instruction. Used only
+ // for _GLOBAL_OFFSET_TABLE_.
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
};
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
index e6ebf66..3aaa693 100644
--- a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -32,6 +32,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/EdgeBundles.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -51,6 +52,7 @@ namespace {
struct FPS : public MachineFunctionPass {
static char ID;
FPS() : MachineFunctionPass(ID) {
+ initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
// This is really only to keep valgrind quiet.
// The logic in isLive() is too much for it.
memset(Stack, 0, sizeof(Stack));
@@ -59,6 +61,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
+ AU.addRequired<EdgeBundles>();
AU.addPreservedID(MachineLoopInfoID);
AU.addPreservedID(MachineDominatorsID);
MachineFunctionPass::getAnalysisUsage(AU);
@@ -94,7 +97,7 @@ namespace {
// FixStack[i] == getStackEntry(i) for all i < FixCount.
unsigned char FixStack[8];
- LiveBundle(unsigned m = 0) : Mask(m), FixCount(0) {}
+ LiveBundle() : Mask(0), FixCount(0) {}
// Have the live registers been assigned a stack order yet?
bool isFixed() const { return !Mask || FixCount; }
@@ -104,10 +107,8 @@ namespace {
// with no live FP registers.
SmallVector<LiveBundle, 8> LiveBundles;
- // Map each MBB in the current function to an (ingoing, outgoing) index into
- // LiveBundles. Blocks with no FP registers live in or out map to (0, 0)
- // and are not actually stored in the map.
- DenseMap<MachineBasicBlock*, std::pair<unsigned, unsigned> > BlockBundle;
+ // The edge bundle analysis provides indices into the LiveBundles vector.
+ EdgeBundles *Bundles;
// Return a bitmask of FP registers in block's live-in list.
unsigned calcLiveInMask(MachineBasicBlock *MBB) {
@@ -167,7 +168,8 @@ namespace {
/// getStackEntry - Return the X86::FP<n> register in register ST(i).
unsigned getStackEntry(unsigned STi) const {
- assert(STi < StackTop && "Access past stack top!");
+ if (STi >= StackTop)
+ report_fatal_error("Access past stack top!");
return Stack[StackTop-1-STi];
}
@@ -180,7 +182,8 @@ namespace {
// pushReg - Push the specified FP<n> register onto the stack.
void pushReg(unsigned Reg) {
assert(Reg < 8 && "Register number out of range!");
- assert(StackTop < 8 && "Stack overflow!");
+ if (StackTop >= 8)
+ report_fatal_error("Stack overflow!");
Stack[StackTop] = Reg;
RegMap[Reg] = StackTop++;
}
@@ -197,7 +200,8 @@ namespace {
std::swap(RegMap[RegNo], RegMap[RegOnTop]);
// Swap stack slot contents.
- assert(RegMap[RegOnTop] < StackTop);
+ if (RegMap[RegOnTop] >= StackTop)
+ report_fatal_error("Access past stack top!");
std::swap(Stack[RegMap[RegOnTop]], Stack[StackTop-1]);
// Emit an fxch to update the runtime processors version of the state.
@@ -281,6 +285,7 @@ bool FPS::runOnMachineFunction(MachineFunction &MF) {
// Early exit.
if (!FPIsUsed) return false;
+ Bundles = &getAnalysis<EdgeBundles>();
TII = MF.getTarget().getInstrInfo();
// Prepare cross-MBB liveness.
@@ -305,7 +310,6 @@ bool FPS::runOnMachineFunction(MachineFunction &MF) {
if (Processed.insert(BB))
Changed |= processBasicBlock(MF, *BB);
- BlockBundle.clear();
LiveBundles.clear();
return Changed;
@@ -318,90 +322,16 @@ bool FPS::runOnMachineFunction(MachineFunction &MF) {
/// registers may be implicitly defined, or not used by all successors.
void FPS::bundleCFG(MachineFunction &MF) {
assert(LiveBundles.empty() && "Stale data in LiveBundles");
- assert(BlockBundle.empty() && "Stale data in BlockBundle");
- SmallPtrSet<MachineBasicBlock*, 8> PropDown, PropUp;
+ LiveBundles.resize(Bundles->getNumBundles());
- // LiveBundle[0] is the empty live-in set.
- LiveBundles.resize(1);
-
- // First gather the actual live-in masks for all MBBs.
+ // Gather the actual live-in masks for all MBBs.
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = I;
const unsigned Mask = calcLiveInMask(MBB);
if (!Mask)
continue;
- // Ingoing bundle index.
- unsigned &Idx = BlockBundle[MBB].first;
- // Already assigned an ingoing bundle?
- if (Idx)
- continue;
- // Allocate a new LiveBundle struct for this block's live-ins.
- const unsigned BundleIdx = Idx = LiveBundles.size();
- DEBUG(dbgs() << "Creating LB#" << BundleIdx << ": in:BB#"
- << MBB->getNumber());
- LiveBundles.push_back(Mask);
- LiveBundle &Bundle = LiveBundles.back();
-
- // Make sure all predecessors have the same live-out set.
- PropUp.insert(MBB);
-
- // Keep pushing liveness up and down the CFG until convergence.
- // Only critical edges cause iteration here, but when they do, multiple
- // blocks can be assigned to the same LiveBundle index.
- do {
- // Assign BundleIdx as liveout from predecessors in PropUp.
- for (SmallPtrSet<MachineBasicBlock*, 16>::iterator I = PropUp.begin(),
- E = PropUp.end(); I != E; ++I) {
- MachineBasicBlock *MBB = *I;
- for (MachineBasicBlock::const_pred_iterator LinkI = MBB->pred_begin(),
- LinkE = MBB->pred_end(); LinkI != LinkE; ++LinkI) {
- MachineBasicBlock *PredMBB = *LinkI;
- // PredMBB's liveout bundle should be set to LIIdx.
- unsigned &Idx = BlockBundle[PredMBB].second;
- if (Idx) {
- assert(Idx == BundleIdx && "Inconsistent CFG");
- continue;
- }
- Idx = BundleIdx;
- DEBUG(dbgs() << " out:BB#" << PredMBB->getNumber());
- // Propagate to siblings.
- if (PredMBB->succ_size() > 1)
- PropDown.insert(PredMBB);
- }
- }
- PropUp.clear();
-
- // Assign BundleIdx as livein to successors in PropDown.
- for (SmallPtrSet<MachineBasicBlock*, 16>::iterator I = PropDown.begin(),
- E = PropDown.end(); I != E; ++I) {
- MachineBasicBlock *MBB = *I;
- for (MachineBasicBlock::const_succ_iterator LinkI = MBB->succ_begin(),
- LinkE = MBB->succ_end(); LinkI != LinkE; ++LinkI) {
- MachineBasicBlock *SuccMBB = *LinkI;
- // LinkMBB's livein bundle should be set to BundleIdx.
- unsigned &Idx = BlockBundle[SuccMBB].first;
- if (Idx) {
- assert(Idx == BundleIdx && "Inconsistent CFG");
- continue;
- }
- Idx = BundleIdx;
- DEBUG(dbgs() << " in:BB#" << SuccMBB->getNumber());
- // Propagate to siblings.
- if (SuccMBB->pred_size() > 1)
- PropUp.insert(SuccMBB);
- // Also accumulate the bundle liveness mask from the liveins here.
- Bundle.Mask |= calcLiveInMask(SuccMBB);
- }
- }
- PropDown.clear();
- } while (!PropUp.empty());
- DEBUG({
- dbgs() << " live:";
- for (unsigned i = 0; i < 8; ++i)
- if (Bundle.Mask & (1<<i))
- dbgs() << " %FP" << i;
- dbgs() << '\n';
- });
+ // Update MBB ingoing bundle mask.
+ LiveBundles[Bundles->getBundle(MBB->getNumber(), false)].Mask |= Mask;
}
}
@@ -489,13 +419,15 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
return Changed;
}
-/// setupBlockStack - Use the BlockBundle map to set up our model of the stack
+/// setupBlockStack - Use the live bundles to set up our model of the stack
/// to match predecessors' live out stack.
void FPS::setupBlockStack() {
DEBUG(dbgs() << "\nSetting up live-ins for BB#" << MBB->getNumber()
<< " derived from " << MBB->getName() << ".\n");
StackTop = 0;
- const LiveBundle &Bundle = LiveBundles[BlockBundle.lookup(MBB).first];
+ // Get the live-in bundle for MBB.
+ const LiveBundle &Bundle =
+ LiveBundles[Bundles->getBundle(MBB->getNumber(), false)];
if (!Bundle.Mask) {
DEBUG(dbgs() << "Block has no FP live-ins.\n");
@@ -532,7 +464,8 @@ void FPS::finishBlockStack() {
DEBUG(dbgs() << "Setting up live-outs for BB#" << MBB->getNumber()
<< " derived from " << MBB->getName() << ".\n");
- unsigned BundleIdx = BlockBundle.lookup(MBB).second;
+ // Get MBB's live-out bundle.
+ unsigned BundleIdx = Bundles->getBundle(MBB->getNumber(), true);
LiveBundle &Bundle = LiveBundles[BundleIdx];
// We may need to kill and define some registers to match successors.
@@ -572,7 +505,8 @@ namespace {
friend bool operator<(const TableEntry &TE, unsigned V) {
return TE.from < V;
}
- friend bool ATTRIBUTE_USED operator<(unsigned V, const TableEntry &TE) {
+ friend bool LLVM_ATTRIBUTE_USED operator<(unsigned V,
+ const TableEntry &TE) {
return V < TE.from;
}
};
@@ -824,7 +758,8 @@ void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
MachineInstr* MI = I;
DebugLoc dl = MI->getDebugLoc();
ASSERT_SORTED(PopTable);
- assert(StackTop > 0 && "Cannot pop empty stack!");
+ if (StackTop == 0)
+ report_fatal_error("Cannot pop empty stack!");
RegMap[Stack[--StackTop]] = ~0; // Update state
// Check to see if there is a popping version of this instruction...
@@ -1016,7 +951,8 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
MI->getOpcode() == X86::ISTT_FP32m ||
MI->getOpcode() == X86::ISTT_FP64m ||
MI->getOpcode() == X86::ST_FP80m) {
- assert(StackTop > 0 && "Stack empty??");
+ if (StackTop == 0)
+ report_fatal_error("Stack empty??");
--StackTop;
} else if (KillsSrc) { // Last use of operand?
popStackAfter(I);
@@ -1047,7 +983,8 @@ void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
// If this is the last use of the source register, just make sure it's on
// the top of the stack.
moveToTop(Reg, I);
- assert(StackTop > 0 && "Stack cannot be empty!");
+ if (StackTop == 0)
+ report_fatal_error("Stack cannot be empty!");
--StackTop;
pushReg(getFPReg(MI->getOperand(0)));
} else {
@@ -1300,7 +1237,6 @@ void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
///
void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
MachineInstr *MI = I;
- DebugLoc dl = MI->getDebugLoc();
switch (MI->getOpcode()) {
default: llvm_unreachable("Unknown SpecialFP instruction!");
case X86::FpGET_ST0_32:// Appears immediately after a call returning FP type!
@@ -1341,7 +1277,8 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
std::swap(RegMap[RegNo], RegMap[RegOnTop]);
// Swap stack slot contents.
- assert(RegMap[RegOnTop] < StackTop);
+ if (RegMap[RegOnTop] >= StackTop)
+ report_fatal_error("Access past stack top!");
std::swap(Stack[RegMap[RegOnTop]], Stack[StackTop-1]);
break;
}
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
new file mode 100644
index 0000000..0a3f931
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -0,0 +1,994 @@
+//=======- X86FrameLowering.cpp - X86 Frame Information ------------*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the X86 implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86FrameLowering.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86TargetMachine.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/ADT/SmallSet.h"
+
+using namespace llvm;
+
+// FIXME: completely move here.
+extern cl::opt<bool> ForceStackAlign;
+
+bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ return !MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+/// hasFP - Return true if the specified function should have a dedicated frame
+/// pointer register. This is true if the function has variable sized allocas
+/// or if frame pointer elimination is disabled.
+bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineModuleInfo &MMI = MF.getMMI();
+ const TargetRegisterInfo *RI = TM.getRegisterInfo();
+
+ return (DisableFramePointerElim(MF) ||
+ RI->needsStackRealignment(MF) ||
+ MFI->hasVarSizedObjects() ||
+ MFI->isFrameAddressTaken() ||
+ MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
+ MMI.callsUnwindInit());
+}
+
+static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
+ if (is64Bit) {
+ if (isInt<8>(Imm))
+ return X86::SUB64ri8;
+ return X86::SUB64ri32;
+ } else {
+ if (isInt<8>(Imm))
+ return X86::SUB32ri8;
+ return X86::SUB32ri;
+ }
+}
+
+static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
+ if (is64Bit) {
+ if (isInt<8>(Imm))
+ return X86::ADD64ri8;
+ return X86::ADD64ri32;
+ } else {
+ if (isInt<8>(Imm))
+ return X86::ADD32ri8;
+ return X86::ADD32ri;
+ }
+}
+
+/// findDeadCallerSavedReg - Return a caller-saved register that isn't live
+/// when it reaches the "return" instruction. We can then pop a stack object
+/// to this register without worry about clobbering it.
+static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ const TargetRegisterInfo &TRI,
+ bool Is64Bit) {
+ const MachineFunction *MF = MBB.getParent();
+ const Function *F = MF->getFunction();
+ if (!F || MF->getMMI().callsEHReturn())
+ return 0;
+
+ static const unsigned CallerSavedRegs32Bit[] = {
+ X86::EAX, X86::EDX, X86::ECX
+ };
+
+ static const unsigned CallerSavedRegs64Bit[] = {
+ X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
+ X86::R8, X86::R9, X86::R10, X86::R11
+ };
+
+ unsigned Opc = MBBI->getOpcode();
+ switch (Opc) {
+ default: return 0;
+ case X86::RET:
+ case X86::RETI:
+ case X86::TCRETURNdi:
+ case X86::TCRETURNri:
+ case X86::TCRETURNmi:
+ case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64:
+ case X86::EH_RETURN:
+ case X86::EH_RETURN64: {
+ SmallSet<unsigned, 8> Uses;
+ for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MBBI->getOperand(i);
+ if (!MO.isReg() || MO.isDef())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ for (const unsigned *AsI = TRI.getOverlaps(Reg); *AsI; ++AsI)
+ Uses.insert(*AsI);
+ }
+
+ const unsigned *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
+ for (; *CS; ++CS)
+ if (!Uses.count(*CS))
+ return *CS;
+ }
+ }
+
+ return 0;
+}
+
+
+/// emitSPUpdate - Emit a series of instructions to increment / decrement the
+/// stack pointer by a constant value.
+static
+void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr, int64_t NumBytes,
+ bool Is64Bit, const TargetInstrInfo &TII,
+ const TargetRegisterInfo &TRI) {
+ bool isSub = NumBytes < 0;
+ uint64_t Offset = isSub ? -NumBytes : NumBytes;
+ unsigned Opc = isSub ?
+ getSUBriOpcode(Is64Bit, Offset) :
+ getADDriOpcode(Is64Bit, Offset);
+ uint64_t Chunk = (1LL << 31) - 1;
+ DebugLoc DL = MBB.findDebugLoc(MBBI);
+
+ while (Offset) {
+ uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
+ if (ThisVal == (Is64Bit ? 8 : 4)) {
+ // Use push / pop instead.
+ unsigned Reg = isSub
+ ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
+ : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
+ if (Reg) {
+ Opc = isSub
+ ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
+ : (Is64Bit ? X86::POP64r : X86::POP32r);
+ BuildMI(MBB, MBBI, DL, TII.get(Opc))
+ .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
+ Offset -= ThisVal;
+ continue;
+ }
+ }
+
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(ThisVal);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ Offset -= ThisVal;
+ }
+}
+
+/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
+static
+void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr, uint64_t *NumBytes = NULL) {
+ if (MBBI == MBB.begin()) return;
+
+ MachineBasicBlock::iterator PI = prior(MBBI);
+ unsigned Opc = PI->getOpcode();
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes += PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes -= PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ }
+}
+
+/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
+static
+void mergeSPUpdatesDown(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr, uint64_t *NumBytes = NULL) {
+ // FIXME: THIS ISN'T RUN!!!
+ return;
+
+ if (MBBI == MBB.end()) return;
+
+ MachineBasicBlock::iterator NI = llvm::next(MBBI);
+ if (NI == MBB.end()) return;
+
+ unsigned Opc = NI->getOpcode();
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ NI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes -= NI->getOperand(2).getImm();
+ MBB.erase(NI);
+ MBBI = NI;
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ NI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes += NI->getOperand(2).getImm();
+ MBB.erase(NI);
+ MBBI = NI;
+ }
+}
+
+/// mergeSPUpdates - Checks the instruction before/after the passed
+/// instruction. If it is an ADD/SUB instruction it is deleted argument and the
+/// stack adjustment is returned as a positive value for ADD and a negative for
+/// SUB.
+static int mergeSPUpdates(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr,
+ bool doMergeWithPrevious) {
+ if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
+ (!doMergeWithPrevious && MBBI == MBB.end()))
+ return 0;
+
+ MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI;
+ MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI);
+ unsigned Opc = PI->getOpcode();
+ int Offset = 0;
+
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr){
+ Offset += PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ if (!doMergeWithPrevious) MBBI = NI;
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ Offset -= PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ if (!doMergeWithPrevious) MBBI = NI;
+ }
+
+ return Offset;
+}
+
+static bool isEAXLiveIn(MachineFunction &MF) {
+ for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
+ EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
+ unsigned Reg = II->first;
+
+ if (Reg == X86::EAX || Reg == X86::AX ||
+ Reg == X86::AH || Reg == X86::AL)
+ return true;
+ }
+
+ return false;
+}
+
+void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
+ MCSymbol *Label,
+ unsigned FramePtr) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+
+ // Add callee saved registers to move list.
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ if (CSI.empty()) return;
+
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ const TargetData *TD = TM.getTargetData();
+ bool HasFP = hasFP(MF);
+
+ // Calculate amount of bytes used for return address storing.
+ int stackGrowth = -TD->getPointerSize();
+
+ // FIXME: This is dirty hack. The code itself is pretty mess right now.
+ // It should be rewritten from scratch and generalized sometimes.
+
+ // Determine maximum offset (minumum due to stack growth).
+ int64_t MaxOffset = 0;
+ for (std::vector<CalleeSavedInfo>::const_iterator
+ I = CSI.begin(), E = CSI.end(); I != E; ++I)
+ MaxOffset = std::min(MaxOffset,
+ MFI->getObjectOffset(I->getFrameIdx()));
+
+ // Calculate offsets.
+ int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
+ for (std::vector<CalleeSavedInfo>::const_iterator
+ I = CSI.begin(), E = CSI.end(); I != E; ++I) {
+ int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
+ unsigned Reg = I->getReg();
+ Offset = MaxOffset - Offset + saveAreaOffset;
+
+ // Don't output a new machine move if we're re-saving the frame
+ // pointer. This happens when the PrologEpilogInserter has inserted an extra
+ // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
+ // generates one when frame pointers are used. If we generate a "machine
+ // move" for this extra "PUSH", the linker will lose track of the fact that
+ // the frame pointer should have the value of the first "PUSH" when it's
+ // trying to unwind.
+ //
+ // FIXME: This looks inelegant. It's possibly correct, but it's covering up
+ // another bug. I.e., one where we generate a prolog like this:
+ //
+ // pushl %ebp
+ // movl %esp, %ebp
+ // pushl %ebp
+ // pushl %esi
+ // ...
+ //
+ // The immediate re-push of EBP is unnecessary. At the least, it's an
+ // optimization bug. EBP can be used as a scratch register in certain
+ // cases, but probably not when we have a frame pointer.
+ if (HasFP && FramePtr == Reg)
+ continue;
+
+ MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
+ MachineLocation CSSrc(Reg);
+ Moves.push_back(MachineMove(Label, CSDst, CSSrc));
+ }
+}
+
+/// emitPrologue - Push callee-saved registers onto the stack, which
+/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
+/// space for local variables. Also emit labels used by the exception handler to
+/// generate the exception handling frames.
+void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const Function *Fn = MF.getFunction();
+ const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
+ const X86InstrInfo &TII = *TM.getInstrInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ bool needsFrameMoves = MMI.hasDebugInfo() ||
+ !Fn->doesNotThrow() || UnwindTablesMandatory;
+ uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
+ uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
+ bool HasFP = hasFP(MF);
+ bool Is64Bit = STI.is64Bit();
+ bool IsWin64 = STI.isTargetWin64();
+ unsigned StackAlign = getStackAlignment();
+ unsigned SlotSize = RegInfo->getSlotSize();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ unsigned StackPtr = RegInfo->getStackRegister();
+
+ DebugLoc DL;
+
+ // If we're forcing a stack realignment we can't rely on just the frame
+ // info, we need to know the ABI stack alignment as well in case we
+ // have a call out. Otherwise just make sure we have some alignment - we'll
+ // go with the minimum SlotSize.
+ if (ForceStackAlign) {
+ if (MFI->hasCalls())
+ MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+ else if (MaxAlign < SlotSize)
+ MaxAlign = SlotSize;
+ }
+
+ // Add RETADDR move area to callee saved frame size.
+ int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+ if (TailCallReturnAddrDelta < 0)
+ X86FI->setCalleeSavedFrameSize(
+ X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
+
+ // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
+ // function, and use up to 128 bytes of stack space, don't have a frame
+ // pointer, calls, or dynamic alloca then we do not need to adjust the
+ // stack pointer (we fit in the Red Zone).
+ if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) &&
+ !RegInfo->needsStackRealignment(MF) &&
+ !MFI->hasVarSizedObjects() && // No dynamic alloca.
+ !MFI->adjustsStack() && // No calls.
+ !IsWin64) { // Win64 has no Red Zone
+ uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
+ if (HasFP) MinSize += SlotSize;
+ StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
+ MFI->setStackSize(StackSize);
+ }
+
+ // Insert stack pointer adjustment for later moving of return addr. Only
+ // applies to tail call optimized functions where the callee argument stack
+ // size is bigger than the callers.
+ if (TailCallReturnAddrDelta < 0) {
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL,
+ TII.get(getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta)),
+ StackPtr)
+ .addReg(StackPtr)
+ .addImm(-TailCallReturnAddrDelta);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ }
+
+ // Mapping for machine moves:
+ //
+ // DST: VirtualFP AND
+ // SRC: VirtualFP => DW_CFA_def_cfa_offset
+ // ELSE => DW_CFA_def_cfa
+ //
+ // SRC: VirtualFP AND
+ // DST: Register => DW_CFA_def_cfa_register
+ //
+ // ELSE
+ // OFFSET < 0 => DW_CFA_offset_extended_sf
+ // REG < 64 => DW_CFA_offset + Reg
+ // ELSE => DW_CFA_offset_extended
+
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ const TargetData *TD = MF.getTarget().getTargetData();
+ uint64_t NumBytes = 0;
+ int stackGrowth = -TD->getPointerSize();
+
+ if (HasFP) {
+ // Calculate required stack adjustment.
+ uint64_t FrameSize = StackSize - SlotSize;
+ if (RegInfo->needsStackRealignment(MF))
+ FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
+
+ NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
+
+ // Get the offset of the stack slot for the EBP register, which is
+ // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
+ // Update the frame offset adjustment.
+ MFI->setOffsetAdjustment(-NumBytes);
+
+ // Save EBP/RBP into the appropriate stack slot.
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(FramePtr, RegState::Kill);
+
+ if (needsFrameMoves) {
+ // Mark the place where EBP/RBP was saved.
+ MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel);
+
+ // Define the current CFA rule to use the provided offset.
+ if (StackSize) {
+ MachineLocation SPDst(MachineLocation::VirtualFP);
+ MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth);
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ } else {
+ MachineLocation SPDst(StackPtr);
+ MachineLocation SPSrc(StackPtr, stackGrowth);
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ }
+
+ // Change the rule for the FramePtr to be an "offset" rule.
+ MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
+ MachineLocation FPSrc(FramePtr);
+ Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
+ }
+
+ // Update EBP with the new base value...
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
+ .addReg(StackPtr);
+
+ if (needsFrameMoves) {
+ // Mark effective beginning of when frame pointer becomes valid.
+ MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel);
+
+ // Define the current CFA to use the EBP/RBP register.
+ MachineLocation FPDst(FramePtr);
+ MachineLocation FPSrc(MachineLocation::VirtualFP);
+ Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
+ }
+
+ // Mark the FramePtr as live-in in every block except the entry.
+ for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
+ I != E; ++I)
+ I->addLiveIn(FramePtr);
+
+ // Realign stack
+ if (RegInfo->needsStackRealignment(MF)) {
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri),
+ StackPtr).addReg(StackPtr).addImm(-MaxAlign);
+
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
+ } else {
+ NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
+ }
+
+ // Skip the callee-saved push instructions.
+ bool PushedRegs = false;
+ int StackOffset = 2 * stackGrowth;
+
+ while (MBBI != MBB.end() &&
+ (MBBI->getOpcode() == X86::PUSH32r ||
+ MBBI->getOpcode() == X86::PUSH64r)) {
+ PushedRegs = true;
+ ++MBBI;
+
+ if (!HasFP && needsFrameMoves) {
+ // Mark callee-saved push instruction.
+ MCSymbol *Label = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
+
+ // Define the current CFA rule to use the provided offset.
+ unsigned Ptr = StackSize ?
+ MachineLocation::VirtualFP : StackPtr;
+ MachineLocation SPDst(Ptr);
+ MachineLocation SPSrc(Ptr, StackOffset);
+ Moves.push_back(MachineMove(Label, SPDst, SPSrc));
+ StackOffset += stackGrowth;
+ }
+ }
+
+ DL = MBB.findDebugLoc(MBBI);
+
+ // If there is an SUB32ri of ESP immediately before this instruction, merge
+ // the two. This can be the case when tail call elimination is enabled and
+ // the callee has more arguments then the caller.
+ NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
+
+ // If there is an ADD32ri or SUB32ri of ESP immediately after this
+ // instruction, merge the two instructions.
+ mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
+
+ // Adjust stack pointer: ESP -= numbytes.
+
+ // Windows and cygwin/mingw require a prologue helper routine when allocating
+ // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
+ // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
+ // stack and adjust the stack pointer in one go. The 64-bit version of
+ // __chkstk is only responsible for probing the stack. The 64-bit prologue is
+ // responsible for adjusting the stack pointer. Touching the stack at 4K
+ // increments is necessary to ensure that the guard pages used by the OS
+ // virtual memory manager are allocated in correct sequence.
+ if (NumBytes >= 4096 &&
+ (STI.isTargetCygMing() || STI.isTargetWin32()) &&
+ !STI.isTargetEnvMacho()) {
+ // Check whether EAX is livein for this function.
+ bool isEAXAlive = isEAXLiveIn(MF);
+
+ const char *StackProbeSymbol =
+ STI.isTargetWindows() ? "_chkstk" : "_alloca";
+ if (Is64Bit && STI.isTargetCygMing())
+ StackProbeSymbol = "__chkstk";
+ unsigned CallOp = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
+ if (!isEAXAlive) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
+ .addImm(NumBytes);
+ BuildMI(MBB, MBBI, DL, TII.get(CallOp))
+ .addExternalSymbol(StackProbeSymbol)
+ .addReg(StackPtr, RegState::Define | RegState::Implicit)
+ .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
+ } else {
+ // Save EAX
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
+ .addReg(X86::EAX, RegState::Kill);
+
+ // Allocate NumBytes-4 bytes on stack. We'll also use 4 already
+ // allocated bytes for EAX.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
+ .addImm(NumBytes - 4);
+ BuildMI(MBB, MBBI, DL, TII.get(CallOp))
+ .addExternalSymbol(StackProbeSymbol)
+ .addReg(StackPtr, RegState::Define | RegState::Implicit)
+ .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
+
+ // Restore EAX
+ MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
+ X86::EAX),
+ StackPtr, false, NumBytes - 4);
+ MBB.insert(MBBI, MI);
+ }
+ } else if (NumBytes >= 4096 &&
+ STI.isTargetWin64() &&
+ !STI.isTargetEnvMacho()) {
+ // Sanity check that EAX is not livein for this function. It should
+ // not be, so throw an assert.
+ assert(!isEAXLiveIn(MF) && "EAX is livein in the Win64 case!");
+
+ // Handle the 64-bit Windows ABI case where we need to call __chkstk.
+ // Function prologue is responsible for adjusting the stack pointer.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
+ .addImm(NumBytes);
+ BuildMI(MBB, MBBI, DL, TII.get(X86::WINCALL64pcrel32))
+ .addExternalSymbol("__chkstk")
+ .addReg(StackPtr, RegState::Define | RegState::Implicit);
+ emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit,
+ TII, *RegInfo);
+ } else if (NumBytes)
+ emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit,
+ TII, *RegInfo);
+
+ if ((NumBytes || PushedRegs) && needsFrameMoves) {
+ // Mark end of stack pointer adjustment.
+ MCSymbol *Label = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
+
+ if (!HasFP && NumBytes) {
+ // Define the current CFA rule to use the provided offset.
+ if (StackSize) {
+ MachineLocation SPDst(MachineLocation::VirtualFP);
+ MachineLocation SPSrc(MachineLocation::VirtualFP,
+ -StackSize + stackGrowth);
+ Moves.push_back(MachineMove(Label, SPDst, SPSrc));
+ } else {
+ MachineLocation SPDst(StackPtr);
+ MachineLocation SPSrc(StackPtr, stackGrowth);
+ Moves.push_back(MachineMove(Label, SPDst, SPSrc));
+ }
+ }
+
+ // Emit DWARF info specifying the offsets of the callee-saved registers.
+ if (PushedRegs)
+ emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
+ }
+}
+
+void X86FrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
+ const X86InstrInfo &TII = *TM.getInstrInfo();
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI != MBB.end() && "Returning block has no instructions");
+ unsigned RetOpcode = MBBI->getOpcode();
+ DebugLoc DL = MBBI->getDebugLoc();
+ bool Is64Bit = STI.is64Bit();
+ unsigned StackAlign = getStackAlignment();
+ unsigned SlotSize = RegInfo->getSlotSize();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ unsigned StackPtr = RegInfo->getStackRegister();
+
+ switch (RetOpcode) {
+ default:
+ llvm_unreachable("Can only insert epilog into returning blocks");
+ case X86::RET:
+ case X86::RETI:
+ case X86::TCRETURNdi:
+ case X86::TCRETURNri:
+ case X86::TCRETURNmi:
+ case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64:
+ case X86::EH_RETURN:
+ case X86::EH_RETURN64:
+ break; // These are ok
+ }
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ uint64_t StackSize = MFI->getStackSize();
+ uint64_t MaxAlign = MFI->getMaxAlignment();
+ unsigned CSSize = X86FI->getCalleeSavedFrameSize();
+ uint64_t NumBytes = 0;
+
+ // If we're forcing a stack realignment we can't rely on just the frame
+ // info, we need to know the ABI stack alignment as well in case we
+ // have a call out. Otherwise just make sure we have some alignment - we'll
+ // go with the minimum.
+ if (ForceStackAlign) {
+ if (MFI->hasCalls())
+ MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+ else
+ MaxAlign = MaxAlign ? MaxAlign : 4;
+ }
+
+ if (hasFP(MF)) {
+ // Calculate required stack adjustment.
+ uint64_t FrameSize = StackSize - SlotSize;
+ if (RegInfo->needsStackRealignment(MF))
+ FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
+
+ NumBytes = FrameSize - CSSize;
+
+ // Pop EBP.
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
+ } else {
+ NumBytes = StackSize - CSSize;
+ }
+
+ // Skip the callee-saved pop instructions.
+ MachineBasicBlock::iterator LastCSPop = MBBI;
+ while (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator PI = prior(MBBI);
+ unsigned Opc = PI->getOpcode();
+
+ if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
+ !PI->getDesc().isTerminator())
+ break;
+
+ --MBBI;
+ }
+
+ DL = MBBI->getDebugLoc();
+
+ // If there is an ADD32ri or SUB32ri of ESP immediately before this
+ // instruction, merge the two instructions.
+ if (NumBytes || MFI->hasVarSizedObjects())
+ mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
+
+ // If dynamic alloca is used, then reset esp to point to the last callee-saved
+ // slot before popping them off! Same applies for the case, when stack was
+ // realigned.
+ if (RegInfo->needsStackRealignment(MF)) {
+ // We cannot use LEA here, because stack pointer was realigned. We need to
+ // deallocate local frame back.
+ if (CSSize) {
+ emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo);
+ MBBI = prior(LastCSPop);
+ }
+
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
+ StackPtr).addReg(FramePtr);
+ } else if (MFI->hasVarSizedObjects()) {
+ if (CSSize) {
+ unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
+ MachineInstr *MI =
+ addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
+ FramePtr, false, -CSSize);
+ MBB.insert(MBBI, MI);
+ } else {
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr)
+ .addReg(FramePtr);
+ }
+ } else if (NumBytes) {
+ // Adjust stack pointer back: ESP += numbytes.
+ emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo);
+ }
+
+ // We're returning from function via eh_return.
+ if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &DestAddr = MBBI->getOperand(0);
+ assert(DestAddr.isReg() && "Offset should be in register!");
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
+ StackPtr).addReg(DestAddr.getReg());
+ } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
+ RetOpcode == X86::TCRETURNmi ||
+ RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
+ RetOpcode == X86::TCRETURNmi64) {
+ bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
+ // Tail call return: adjust the stack pointer and jump to callee.
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
+ assert(StackAdjust.isImm() && "Expecting immediate value.");
+
+ // Adjust stack pointer.
+ int StackAdj = StackAdjust.getImm();
+ int MaxTCDelta = X86FI->getTCReturnAddrDelta();
+ int Offset = 0;
+ assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
+
+ // Incoporate the retaddr area.
+ Offset = StackAdj-MaxTCDelta;
+ assert(Offset >= 0 && "Offset should never be negative");
+
+ if (Offset) {
+ // Check for possible merge with preceeding ADD instruction.
+ Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
+ emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII, *RegInfo);
+ }
+
+ // Jump to label or value in register.
+ if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
+ ? X86::TAILJMPd : X86::TAILJMPd64));
+ if (JumpTarget.isGlobal())
+ MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
+ JumpTarget.getTargetFlags());
+ else {
+ assert(JumpTarget.isSymbol());
+ MIB.addExternalSymbol(JumpTarget.getSymbolName(),
+ JumpTarget.getTargetFlags());
+ }
+ } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
+ ? X86::TAILJMPm : X86::TAILJMPm64));
+ for (unsigned i = 0; i != 5; ++i)
+ MIB.addOperand(MBBI->getOperand(i));
+ } else if (RetOpcode == X86::TCRETURNri64) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
+ } else {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
+ }
+
+ MachineInstr *NewMI = prior(MBBI);
+ for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
+ NewMI->addOperand(MBBI->getOperand(i));
+
+ // Delete the pseudo instruction TCRETURN.
+ MBB.erase(MBBI);
+ } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&
+ (X86FI->getTCReturnAddrDelta() < 0)) {
+ // Add the return addr area delta back since we are not tail calling.
+ int delta = -1*X86FI->getTCReturnAddrDelta();
+ MBBI = MBB.getLastNonDebugInstr();
+
+ // Check for possible merge with preceeding ADD instruction.
+ delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
+ emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII, *RegInfo);
+ }
+}
+
+void
+X86FrameLowering::getInitialFrameState(std::vector<MachineMove> &Moves) const {
+ // Calculate amount of bytes used for return address storing
+ int stackGrowth = (STI.is64Bit() ? -8 : -4);
+ const X86RegisterInfo *RI = TM.getRegisterInfo();
+
+ // Initial state of the frame pointer is esp+stackGrowth.
+ MachineLocation Dst(MachineLocation::VirtualFP);
+ MachineLocation Src(RI->getStackRegister(), stackGrowth);
+ Moves.push_back(MachineMove(0, Dst, Src));
+
+ // Add return address to move list
+ MachineLocation CSDst(RI->getStackRegister(), stackGrowth);
+ MachineLocation CSSrc(RI->getRARegister());
+ Moves.push_back(MachineMove(0, CSDst, CSSrc));
+}
+
+int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) const {
+ const X86RegisterInfo *RI =
+ static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
+ uint64_t StackSize = MFI->getStackSize();
+
+ if (RI->needsStackRealignment(MF)) {
+ if (FI < 0) {
+ // Skip the saved EBP.
+ Offset += RI->getSlotSize();
+ } else {
+ unsigned Align = MFI->getObjectAlignment(FI);
+ assert((-(Offset + StackSize)) % Align == 0);
+ Align = 0;
+ return Offset + StackSize;
+ }
+ // FIXME: Support tail calls
+ } else {
+ if (!hasFP(MF))
+ return Offset + StackSize;
+
+ // Skip the saved EBP.
+ Offset += RI->getSlotSize();
+
+ // Skip the RETADDR move area
+ const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+ if (TailCallReturnAddrDelta < 0)
+ Offset -= TailCallReturnAddrDelta;
+ }
+
+ return Offset;
+}
+
+bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ MachineFunction &MF = *MBB.getParent();
+
+ bool isWin64 = STI.isTargetWin64();
+ unsigned SlotSize = STI.is64Bit() ? 8 : 4;
+ unsigned FPReg = TRI->getFrameRegister(MF);
+ unsigned CalleeFrameSize = 0;
+
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+
+ unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i-1].getReg();
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
+ if (Reg == FPReg)
+ // X86RegisterInfo::emitPrologue will handle spilling of frame register.
+ continue;
+ if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
+ CalleeFrameSize += SlotSize;
+ BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill);
+ } else {
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
+ RC, TRI);
+ }
+ }
+
+ X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
+ return true;
+}
+
+bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ unsigned FPReg = TRI->getFrameRegister(MF);
+ bool isWin64 = STI.isTargetWin64();
+ unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (Reg == FPReg)
+ // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
+ continue;
+ if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
+ BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
+ } else {
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
+ RC, TRI);
+ }
+ }
+ return true;
+}
+
+void
+X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
+ unsigned SlotSize = RegInfo->getSlotSize();
+
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+
+ if (TailCallReturnAddrDelta < 0) {
+ // create RETURNADDR area
+ // arg
+ // arg
+ // RETADDR
+ // { ...
+ // RETADDR area
+ // ...
+ // }
+ // [EBP]
+ MFI->CreateFixedObject(-TailCallReturnAddrDelta,
+ (-1U*SlotSize)+TailCallReturnAddrDelta, true);
+ }
+
+ if (hasFP(MF)) {
+ assert((TailCallReturnAddrDelta <= 0) &&
+ "The Delta should always be zero or negative");
+ const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
+
+ // Create a frame entry for the EBP register that must be saved.
+ int FrameIdx = MFI->CreateFixedObject(SlotSize,
+ -(int)SlotSize +
+ TFI.getOffsetOfLocalArea() +
+ TailCallReturnAddrDelta,
+ true);
+ assert(FrameIdx == MFI->getObjectIndexBegin() &&
+ "Slot for EBP register must be last in order to be found!");
+ FrameIdx = 0;
+ }
+}
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.h b/contrib/llvm/lib/Target/X86/X86FrameLowering.h
new file mode 100644
index 0000000..d71108c
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.h
@@ -0,0 +1,65 @@
+//=-- X86TargetFrameLowering.h - Define frame lowering for X86 ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements X86-specific bits of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86_FRAMELOWERING_H
+#define X86_FRAMELOWERING_H
+
+#include "X86Subtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+ class MCSymbol;
+ class X86TargetMachine;
+
+class X86FrameLowering : public TargetFrameLowering {
+ const X86TargetMachine &TM;
+ const X86Subtarget &STI;
+public:
+ explicit X86FrameLowering(const X86TargetMachine &tm, const X86Subtarget &sti)
+ : TargetFrameLowering(StackGrowsDown,
+ sti.getStackAlignment(),
+ (sti.is64Bit() ? -8 : -4)),
+ TM(tm), STI(sti) {
+ }
+
+ void emitCalleeSavedFrameMoves(MachineFunction &MF, MCSymbol *Label,
+ unsigned FramePtr) const;
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS = NULL) const;
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+ bool hasReservedCallFrame(const MachineFunction &MF) const;
+
+ void getInitialFrameState(std::vector<MachineMove> &Moves) const;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index c523441..9b0ec6e 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -190,20 +190,19 @@ namespace {
SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
- bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM);
- bool MatchLoad(SDValue N, X86ISelAddressMode &AM);
+ bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
unsigned Depth);
bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
- bool SelectAddr(SDNode *Op, SDValue N, SDValue &Base,
+ bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp,
SDValue &Segment);
- bool SelectLEAAddr(SDNode *Op, SDValue N, SDValue &Base,
+ bool SelectLEAAddr(SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp,
SDValue &Segment);
- bool SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
+ bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp,
SDValue &Segment);
bool SelectScalarSSELoad(SDNode *Root, SDValue N,
@@ -264,12 +263,6 @@ namespace {
return CurDAG->getTargetConstant(Imm, MVT::i8);
}
- /// getI16Imm - Return a target constant with the specified value, of type
- /// i16.
- inline SDValue getI16Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i16);
- }
-
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
inline SDValue getI32Imm(unsigned Imm) {
@@ -511,10 +504,11 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
// FIXME: optimize the case where the src/dest is a load or store?
SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
N->getOperand(0),
- MemTmp, NULL, 0, MemVT,
+ MemTmp, MachinePointerInfo(), MemVT,
false, false, 0);
- SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, DstVT, dl, Store, MemTmp,
- NULL, 0, MemVT, false, false, 0);
+ SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
+ MachinePointerInfo(),
+ MemVT, false, false, 0);
// We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
// extload we created. This will cause general havok on the dag because
@@ -536,9 +530,12 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
MachineFrameInfo *MFI) {
const TargetInstrInfo *TII = TM.getInstrInfo();
- if (Subtarget->isTargetCygMing())
+ if (Subtarget->isTargetCygMing()) {
+ unsigned CallOp =
+ Subtarget->is64Bit() ? X86::WINCALL64pcrel32 : X86::CALLpcrel32;
BuildMI(BB, DebugLoc(),
- TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
+ TII->get(CallOp)).addExternalSymbol("__main");
+ }
}
void X86DAGToDAGISel::EmitFunctionEntryCode() {
@@ -549,29 +546,27 @@ void X86DAGToDAGISel::EmitFunctionEntryCode() {
}
-bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N,
- X86ISelAddressMode &AM) {
- assert(N.getOpcode() == X86ISD::SegmentBaseAddress);
- SDValue Segment = N.getOperand(0);
-
- if (AM.Segment.getNode() == 0) {
- AM.Segment = Segment;
- return false;
- }
-
- return true;
-}
-
-bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
+bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
+ SDValue Address = N->getOperand(1);
+
+ // load gs:0 -> GS segment register.
+ // load fs:0 -> FS segment register.
+ //
// This optimization is valid because the GNU TLS model defines that
// gs:0 (or fs:0 on X86-64) contains its own address.
// For more information see http://people.redhat.com/drepper/tls.pdf
-
- SDValue Address = N.getOperand(1);
- if (Address.getOpcode() == X86ISD::SegmentBaseAddress &&
- !MatchSegmentBaseAddress (Address, AM))
- return false;
-
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
+ if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
+ Subtarget->isTargetELF())
+ switch (N->getPointerInfo().getAddrSpace()) {
+ case 256:
+ AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
+ return false;
+ case 257:
+ AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
+ return false;
+ }
+
return true;
}
@@ -690,25 +685,6 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
return false;
}
-/// isLogicallyAddWithConstant - Return true if this node is semantically an
-/// add of a value with a constantint.
-static bool isLogicallyAddWithConstant(SDValue V, SelectionDAG *CurDAG) {
- // Check for (add x, Cst)
- if (V->getOpcode() == ISD::ADD)
- return isa<ConstantSDNode>(V->getOperand(1));
-
- // Check for (or x, Cst), where Cst & x == 0.
- if (V->getOpcode() != ISD::OR ||
- !isa<ConstantSDNode>(V->getOperand(1)))
- return false;
-
- // Handle "X | C" as "X + C" iff X is known to have C bits clear.
- ConstantSDNode *CN = cast<ConstantSDNode>(V->getOperand(1));
-
- // Check to see if the LHS & C is zero.
- return CurDAG->MaskedValueIsZero(V->getOperand(0), CN->getAPIntValue());
-}
-
bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
unsigned Depth) {
bool is64Bit = Subtarget->is64Bit();
@@ -756,11 +732,6 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
break;
}
- case X86ISD::SegmentBaseAddress:
- if (!MatchSegmentBaseAddress(N, AM))
- return false;
- break;
-
case X86ISD::Wrapper:
case X86ISD::WrapperRIP:
if (!MatchWrapper(N, AM))
@@ -768,7 +739,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
break;
case ISD::LOAD:
- if (!MatchLoad(N, AM))
+ if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
return false;
break;
@@ -799,7 +770,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// Okay, we know that we have a scale by now. However, if the scaled
// value is an add of something and a constant, we can fold the
// constant into the disp field here.
- if (isLogicallyAddWithConstant(ShVal, CurDAG)) {
+ if (CurDAG->isBaseWithConstantOffset(ShVal)) {
AM.IndexReg = ShVal.getNode()->getOperand(0);
ConstantSDNode *AddVal =
cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
@@ -943,24 +914,18 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// Add an artificial use to this node so that we can keep track of
// it if it gets CSE'd with a different node.
HandleSDNode Handle(N);
- SDValue LHS = Handle.getValue().getNode()->getOperand(0);
- SDValue RHS = Handle.getValue().getNode()->getOperand(1);
X86ISelAddressMode Backup = AM;
- if (!MatchAddressRecursively(LHS, AM, Depth+1) &&
- !MatchAddressRecursively(RHS, AM, Depth+1))
+ if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
+ !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
return false;
AM = Backup;
- LHS = Handle.getValue().getNode()->getOperand(0);
- RHS = Handle.getValue().getNode()->getOperand(1);
-
+
// Try again after commuting the operands.
- if (!MatchAddressRecursively(RHS, AM, Depth+1) &&
- !MatchAddressRecursively(LHS, AM, Depth+1))
+ if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
+ !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
return false;
AM = Backup;
- LHS = Handle.getValue().getNode()->getOperand(0);
- RHS = Handle.getValue().getNode()->getOperand(1);
// If we couldn't fold both operands into the address at the same time,
// see if we can just put each operand into a register and fold at least
@@ -968,17 +933,19 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
if (AM.BaseType == X86ISelAddressMode::RegBase &&
!AM.Base_Reg.getNode() &&
!AM.IndexReg.getNode()) {
- AM.Base_Reg = LHS;
- AM.IndexReg = RHS;
+ N = Handle.getValue();
+ AM.Base_Reg = N.getOperand(0);
+ AM.IndexReg = N.getOperand(1);
AM.Scale = 1;
return false;
}
+ N = Handle.getValue();
break;
}
case ISD::OR:
// Handle "X | C" as "X + C" iff X is known to have C bits clear.
- if (isLogicallyAddWithConstant(N, CurDAG)) {
+ if (CurDAG->isBaseWithConstantOffset(N)) {
X86ISelAddressMode Backup = AM;
ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
uint64_t Offset = CN->getSExtValue();
@@ -1148,10 +1115,30 @@ bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
/// SelectAddr - returns true if it is able pattern match an addressing mode.
/// It returns the operands which make up the maximal addressing mode it can
/// match by reference.
-bool X86DAGToDAGISel::SelectAddr(SDNode *Op, SDValue N, SDValue &Base,
+///
+/// Parent is the parent node of the addr operand that is being matched. It
+/// is always a load, store, atomic node, or null. It is only null when
+/// checking memory operands for inline asm nodes.
+bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &Segment) {
X86ISelAddressMode AM;
+
+ if (Parent &&
+ // This list of opcodes are all the nodes that have an "addr:$ptr" operand
+ // that are not a MemSDNode, and thus don't have proper addrspace info.
+ Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
+ Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
+ Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
+ unsigned AddrSpace =
+ cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
+ // AddrSpace 256 -> GS, 257 -> FS.
+ if (AddrSpace == 256)
+ AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
+ if (AddrSpace == 257)
+ AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
+ }
+
if (MatchAddress(N, AM))
return false;
@@ -1187,7 +1174,7 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
- if (!SelectAddr(Root, LD->getBasePtr(), Base, Scale, Index, Disp,Segment))
+ if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
return false;
return true;
}
@@ -1205,7 +1192,7 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
// Okay, this is a zero extending load. Fold it.
LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
- if (!SelectAddr(Root, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
+ if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
return false;
PatternNodeWithChain = SDValue(LD, 0);
return true;
@@ -1216,7 +1203,7 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
/// mode it matches can be cost effectively emitted as an LEA instruction.
-bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
+bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp,
SDValue &Segment) {
@@ -1278,7 +1265,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
}
/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
-bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
+bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &Segment) {
assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
@@ -1311,7 +1298,8 @@ bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
!IsLegalToFold(N, P, P, OptLevel))
return false;
- return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment);
+ return SelectAddr(N.getNode(),
+ N.getOperand(1), Base, Scale, Index, Disp, Segment);
}
/// getGlobalBaseReg - Return an SDNode that returns the value of
@@ -1329,7 +1317,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
SDValue In2L = Node->getOperand(2);
SDValue In2H = Node->getOperand(3);
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
- if (!SelectAddr(In1.getNode(), In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
+ if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
return NULL;
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
@@ -1355,7 +1343,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
SDValue Ptr = Node->getOperand(1);
SDValue Val = Node->getOperand(2);
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
- if (!SelectAddr(Ptr.getNode(), Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
+ if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
return 0;
bool isInc = false, isDec = false, isSub = false, isCN = false;
@@ -1592,7 +1580,32 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
return RetVal;
break;
}
-
+ case X86ISD::UMUL: {
+ SDValue N0 = Node->getOperand(0);
+ SDValue N1 = Node->getOperand(1);
+
+ unsigned LoReg;
+ switch (NVT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unsupported VT!");
+ case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break;
+ case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
+ case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
+ case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
+ }
+
+ SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
+ N0, SDValue()).getValue(1);
+
+ SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
+ SDValue Ops[] = {N1, InFlag};
+ SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
+
+ ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
+ ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
+ ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
+ return NULL;
+ }
+
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI: {
SDValue N0 = Node->getOperand(0);
@@ -1642,14 +1655,15 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
InFlag };
SDNode *CNode =
- CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
+ CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
array_lengthof(Ops));
InFlag = SDValue(CNode, 1);
+
// Update the chain.
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
} else {
- InFlag =
- SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
+ SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag);
+ InFlag = SDValue(CNode, 0);
}
// Prevent use of AH in a REX instruction by referencing AX instead.
@@ -1688,7 +1702,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(Node, 1), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
}
-
+
return NULL;
}
@@ -1773,7 +1787,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if (isSigned && !signBitIsZero) {
// Sign extend the low part into the high part.
InFlag =
- SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Flag, InFlag),0);
+ SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
} else {
// Zero out the high part, effectively zero extending the input.
SDValue ClrNode =
@@ -1787,14 +1801,14 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
InFlag };
SDNode *CNode =
- CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
+ CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
array_lengthof(Ops));
InFlag = SDValue(CNode, 1);
// Update the chain.
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
} else {
InFlag =
- SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
+ SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
}
// Prevent use of AH in a REX instruction by referencing AX instead.
@@ -1971,7 +1985,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
case 'v': // not offsetable ??
default: return true;
case 'm': // memory
- if (!SelectAddr(Op.getNode(), Op, Op0, Op1, Op2, Op3, Op4))
+ if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
return true;
break;
}
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
index a6db979..27024b4 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -16,9 +16,9 @@
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86ISelLowering.h"
-#include "X86ShuffleDecode.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
+#include "Utils/X86ShuffleDecode.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
@@ -28,6 +28,7 @@
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -56,39 +57,172 @@ using namespace dwarf;
STATISTIC(NumTailCalls, "Number of tail calls");
static cl::opt<bool>
-DisableMMX("disable-mmx", cl::Hidden, cl::desc("Disable use of MMX"));
+Disable256Bit("disable-256bit", cl::Hidden,
+ cl::desc("Disable use of 256-bit vectors"));
// Forward declarations.
static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2);
+static SDValue Insert128BitVector(SDValue Result,
+ SDValue Vec,
+ SDValue Idx,
+ SelectionDAG &DAG,
+ DebugLoc dl);
+
+static SDValue Extract128BitVector(SDValue Vec,
+ SDValue Idx,
+ SelectionDAG &DAG,
+ DebugLoc dl);
+
+static SDValue ConcatVectors(SDValue Lower, SDValue Upper, SelectionDAG &DAG);
+
+
+/// Generate a DAG to grab 128-bits from a vector > 128 bits. This
+/// sets things up to match to an AVX VEXTRACTF128 instruction or a
+/// simple subregister reference. Idx is an index in the 128 bits we
+/// want. It need not be aligned to a 128-bit bounday. That makes
+/// lowering EXTRACT_VECTOR_ELT operations easier.
+static SDValue Extract128BitVector(SDValue Vec,
+ SDValue Idx,
+ SelectionDAG &DAG,
+ DebugLoc dl) {
+ EVT VT = Vec.getValueType();
+ assert(VT.getSizeInBits() == 256 && "Unexpected vector size!");
+
+ EVT ElVT = VT.getVectorElementType();
+
+ int Factor = VT.getSizeInBits() / 128;
+
+ EVT ResultVT = EVT::getVectorVT(*DAG.getContext(),
+ ElVT,
+ VT.getVectorNumElements() / Factor);
+
+ // Extract from UNDEF is UNDEF.
+ if (Vec.getOpcode() == ISD::UNDEF)
+ return DAG.getNode(ISD::UNDEF, dl, ResultVT);
+
+ if (isa<ConstantSDNode>(Idx)) {
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+
+ // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR
+ // we can match to VEXTRACTF128.
+ unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits();
+
+ // This is the index of the first element of the 128-bit chunk
+ // we want.
+ unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128)
+ * ElemsPerChunk);
+
+ SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
+
+ SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec,
+ VecIdx);
+
+ return Result;
+ }
+
+ return SDValue();
+}
+
+/// Generate a DAG to put 128-bits into a vector > 128 bits. This
+/// sets things up to match to an AVX VINSERTF128 instruction or a
+/// simple superregister reference. Idx is an index in the 128 bits
+/// we want. It need not be aligned to a 128-bit bounday. That makes
+/// lowering INSERT_VECTOR_ELT operations easier.
+static SDValue Insert128BitVector(SDValue Result,
+ SDValue Vec,
+ SDValue Idx,
+ SelectionDAG &DAG,
+ DebugLoc dl) {
+ if (isa<ConstantSDNode>(Idx)) {
+ EVT VT = Vec.getValueType();
+ assert(VT.getSizeInBits() == 128 && "Unexpected vector size!");
+
+ EVT ElVT = VT.getVectorElementType();
+
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+
+ EVT ResultVT = Result.getValueType();
+
+ // Insert the relevant 128 bits.
+ unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits();
+
+ // This is the index of the first element of the 128-bit chunk
+ // we want.
+ unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128)
+ * ElemsPerChunk);
+
+ SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32);
+
+ Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec,
+ VecIdx);
+ return Result;
+ }
+
+ return SDValue();
+}
+
+/// Given two vectors, concat them.
+static SDValue ConcatVectors(SDValue Lower, SDValue Upper, SelectionDAG &DAG) {
+ DebugLoc dl = Lower.getDebugLoc();
+
+ assert(Lower.getValueType() == Upper.getValueType() && "Mismatched vectors!");
+
+ EVT VT = EVT::getVectorVT(*DAG.getContext(),
+ Lower.getValueType().getVectorElementType(),
+ Lower.getValueType().getVectorNumElements() * 2);
+
+ // TODO: Generalize to arbitrary vector length (this assumes 256-bit vectors).
+ assert(VT.getSizeInBits() == 256 && "Unsupported vector concat!");
+
+ // Insert the upper subvector.
+ SDValue Vec = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Upper,
+ DAG.getConstant(
+ // This is half the length of the result
+ // vector. Start inserting the upper 128
+ // bits here.
+ Lower.getValueType().getVectorNumElements(),
+ MVT::i32),
+ DAG, dl);
+
+ // Insert the lower subvector.
+ Vec = Insert128BitVector(Vec, Lower, DAG.getConstant(0, MVT::i32), DAG, dl);
+ return Vec;
+}
+
static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
-
- bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
-
- if (TM.getSubtarget<X86Subtarget>().isTargetDarwin()) {
- if (is64Bit) return new X8664_MachoTargetObjectFile();
+ const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
+ bool is64Bit = Subtarget->is64Bit();
+
+ if (Subtarget->isTargetEnvMacho()) {
+ if (is64Bit)
+ return new X8664_MachoTargetObjectFile();
return new TargetLoweringObjectFileMachO();
- } else if (TM.getSubtarget<X86Subtarget>().isTargetELF() ){
- if (is64Bit) return new X8664_ELFTargetObjectFile(TM);
+ }
+
+ if (Subtarget->isTargetELF()) {
+ if (is64Bit)
+ return new X8664_ELFTargetObjectFile(TM);
return new X8632_ELFTargetObjectFile(TM);
- } else if (TM.getSubtarget<X86Subtarget>().isTargetCOFF()) {
+ }
+ if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho())
return new TargetLoweringObjectFileCOFF();
- }
llvm_unreachable("unknown subtarget type");
}
X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
: TargetLowering(TM, createTLOF(TM)) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
- X86ScalarSSEf64 = Subtarget->hasSSE2();
- X86ScalarSSEf32 = Subtarget->hasSSE1();
+ X86ScalarSSEf64 = Subtarget->hasXMMInt();
+ X86ScalarSSEf32 = Subtarget->hasXMM();
X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
RegInfo = TM.getRegisterInfo();
TD = getTargetData();
// Set up the TargetLowering object.
+ static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
// X86 is weird, it always uses i8 for shift amounts and setcc results.
setShiftAmountType(MVT::i8);
@@ -96,6 +230,18 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setSchedulingPreference(Sched::RegPressure);
setStackPointerRegisterToSaveRestore(X86StackPtr);
+ if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) {
+ // Setup Windows compiler runtime calls.
+ setLibcallName(RTLIB::SDIV_I64, "_alldiv");
+ setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
+ setLibcallName(RTLIB::FPTOUINT_F64_I64, "_ftol2");
+ setLibcallName(RTLIB::FPTOUINT_F32_I64, "_ftol2");
+ setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
+ setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
+ setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::C);
+ setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::C);
+ }
+
if (Subtarget->isTargetDarwin()) {
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmp(false);
@@ -213,16 +359,13 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
- if (!X86ScalarSSEf64) {
- setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
- setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
+ if (!X86ScalarSSEf64) {
+ setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
+ setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
if (Subtarget->is64Bit()) {
- setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand);
- // Without SSE, i64->f64 goes through memory; i64->MMX is Legal.
- if (Subtarget->hasMMX() && !DisableMMX)
- setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Custom);
- else
- setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand);
+ setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
+ // Without SSE, i64->f64 goes through memory.
+ setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
}
}
@@ -236,30 +379,21 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// (low) operations are left as Legal, as there are single-result
// instructions for this in x86. Using the two-result multiply instructions
// when both high and low results are needed must be arranged by dagcombine.
- setOperationAction(ISD::MULHS , MVT::i8 , Expand);
- setOperationAction(ISD::MULHU , MVT::i8 , Expand);
- setOperationAction(ISD::SDIV , MVT::i8 , Expand);
- setOperationAction(ISD::UDIV , MVT::i8 , Expand);
- setOperationAction(ISD::SREM , MVT::i8 , Expand);
- setOperationAction(ISD::UREM , MVT::i8 , Expand);
- setOperationAction(ISD::MULHS , MVT::i16 , Expand);
- setOperationAction(ISD::MULHU , MVT::i16 , Expand);
- setOperationAction(ISD::SDIV , MVT::i16 , Expand);
- setOperationAction(ISD::UDIV , MVT::i16 , Expand);
- setOperationAction(ISD::SREM , MVT::i16 , Expand);
- setOperationAction(ISD::UREM , MVT::i16 , Expand);
- setOperationAction(ISD::MULHS , MVT::i32 , Expand);
- setOperationAction(ISD::MULHU , MVT::i32 , Expand);
- setOperationAction(ISD::SDIV , MVT::i32 , Expand);
- setOperationAction(ISD::UDIV , MVT::i32 , Expand);
- setOperationAction(ISD::SREM , MVT::i32 , Expand);
- setOperationAction(ISD::UREM , MVT::i32 , Expand);
- setOperationAction(ISD::MULHS , MVT::i64 , Expand);
- setOperationAction(ISD::MULHU , MVT::i64 , Expand);
- setOperationAction(ISD::SDIV , MVT::i64 , Expand);
- setOperationAction(ISD::UDIV , MVT::i64 , Expand);
- setOperationAction(ISD::SREM , MVT::i64 , Expand);
- setOperationAction(ISD::UREM , MVT::i64 , Expand);
+ for (unsigned i = 0, e = 4; i != e; ++i) {
+ MVT VT = IntVTs[i];
+ setOperationAction(ISD::MULHS, VT, Expand);
+ setOperationAction(ISD::MULHU, VT, Expand);
+ setOperationAction(ISD::SDIV, VT, Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::SREM, VT, Expand);
+ setOperationAction(ISD::UREM, VT, Expand);
+
+ // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
+ setOperationAction(ISD::ADDC, VT, Custom);
+ setOperationAction(ISD::ADDE, VT, Custom);
+ setOperationAction(ISD::SUBC, VT, Custom);
+ setOperationAction(ISD::SUBE, VT, Custom);
+ }
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
setOperationAction(ISD::BRCOND , MVT::Other, Custom);
@@ -276,21 +410,27 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FREM , MVT::f80 , Expand);
setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
- setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTTZ , MVT::i8 , Custom);
setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
- setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
- setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
if (Subtarget->is64Bit()) {
- setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
}
+ if (Subtarget->hasPOPCNT()) {
+ setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
+ } else {
+ setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
+ setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
+ setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
+ if (Subtarget->is64Bit())
+ setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
+ }
+
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
@@ -298,7 +438,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
// X86 wants to expand cmov itself.
setOperationAction(ISD::SELECT , MVT::i8 , Custom);
- setOperationAction(ISD::SELECT , MVT::i16 , Custom);
+ setOperationAction(ISD::SELECT , MVT::i16 , Custom);
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
setOperationAction(ISD::SELECT , MVT::f32 , Custom);
setOperationAction(ISD::SELECT , MVT::f64 , Custom);
@@ -341,12 +481,12 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
}
- if (Subtarget->hasSSE1())
+ if (Subtarget->hasXMM())
setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
// We may not have a libcall for MEMBARRIER so we should lower this.
setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom);
-
+
// On X86 and X86-64, atomic operations are lowered to locked instructions.
// Locked instructions, in turn, have implicit fence semantics (all memory
// operations are flushed before issuing the locked instruction, and they
@@ -355,15 +495,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setShouldFoldAtomicFences(true);
// Expand certain atomics
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
-
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
+ for (unsigned i = 0, e = 4; i != e; ++i) {
+ MVT VT = IntVTs[i];
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
+ }
if (!Subtarget->is64Bit()) {
setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
@@ -415,7 +551,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
if (Subtarget->is64Bit())
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
- if (Subtarget->isTargetCygMing())
+ if (Subtarget->isTargetCygMing() || Subtarget->isTargetWindows())
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
else
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
@@ -512,13 +648,12 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
{
- bool ignored;
- APFloat TmpFlt(+0.0);
- TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
- &ignored);
+ APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
addLegalFPImmediate(TmpFlt); // FLD0
TmpFlt.changeSign();
addLegalFPImmediate(TmpFlt); // FLD0/FCHS
+
+ bool ignored;
APFloat TmpFlt2(+1.0);
TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
&ignored);
@@ -564,8 +699,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand);
- setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand);
+ setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand);
setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand);
@@ -613,91 +749,44 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
- if (!UseSoftFloat && !DisableMMX && Subtarget->hasMMX()) {
- addRegisterClass(MVT::v8i8, X86::VR64RegisterClass, false);
- addRegisterClass(MVT::v4i16, X86::VR64RegisterClass, false);
- addRegisterClass(MVT::v2i32, X86::VR64RegisterClass, false);
-
- addRegisterClass(MVT::v1i64, X86::VR64RegisterClass, false);
-
- setOperationAction(ISD::ADD, MVT::v8i8, Legal);
- setOperationAction(ISD::ADD, MVT::v4i16, Legal);
- setOperationAction(ISD::ADD, MVT::v2i32, Legal);
- setOperationAction(ISD::ADD, MVT::v1i64, Legal);
-
- setOperationAction(ISD::SUB, MVT::v8i8, Legal);
- setOperationAction(ISD::SUB, MVT::v4i16, Legal);
- setOperationAction(ISD::SUB, MVT::v2i32, Legal);
- setOperationAction(ISD::SUB, MVT::v1i64, Legal);
-
- setOperationAction(ISD::MULHS, MVT::v4i16, Legal);
- setOperationAction(ISD::MUL, MVT::v4i16, Legal);
-
- setOperationAction(ISD::AND, MVT::v8i8, Promote);
- AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64);
- setOperationAction(ISD::AND, MVT::v4i16, Promote);
- AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64);
- setOperationAction(ISD::AND, MVT::v2i32, Promote);
- AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64);
- setOperationAction(ISD::AND, MVT::v1i64, Legal);
-
- setOperationAction(ISD::OR, MVT::v8i8, Promote);
- AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64);
- setOperationAction(ISD::OR, MVT::v4i16, Promote);
- AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64);
- setOperationAction(ISD::OR, MVT::v2i32, Promote);
- AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64);
- setOperationAction(ISD::OR, MVT::v1i64, Legal);
-
- setOperationAction(ISD::XOR, MVT::v8i8, Promote);
- AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64);
- setOperationAction(ISD::XOR, MVT::v4i16, Promote);
- AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64);
- setOperationAction(ISD::XOR, MVT::v2i32, Promote);
- AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64);
- setOperationAction(ISD::XOR, MVT::v1i64, Legal);
-
- setOperationAction(ISD::LOAD, MVT::v8i8, Promote);
- AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64);
- setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
- AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
- setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
- AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
- setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
-
- setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
-
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
-
- setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
- setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
- setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
-
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
-
- setOperationAction(ISD::SELECT, MVT::v8i8, Promote);
- setOperationAction(ISD::SELECT, MVT::v4i16, Promote);
- setOperationAction(ISD::SELECT, MVT::v2i32, Promote);
- setOperationAction(ISD::SELECT, MVT::v1i64, Custom);
- setOperationAction(ISD::VSETCC, MVT::v8i8, Custom);
- setOperationAction(ISD::VSETCC, MVT::v4i16, Custom);
- setOperationAction(ISD::VSETCC, MVT::v2i32, Custom);
-
- if (!X86ScalarSSEf64 && Subtarget->is64Bit()) {
- setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Custom);
- setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Custom);
- setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Custom);
- setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Custom);
- }
- }
-
- if (!UseSoftFloat && Subtarget->hasSSE1()) {
+ if (!UseSoftFloat && Subtarget->hasMMX()) {
+ addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass);
+ // No operations on x86mmx supported, everything uses intrinsics.
+ }
+
+ // MMX-sized vectors (other than x86mmx) are expected to be expanded
+ // into smaller operations.
+ setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
+ setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
+ setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
+ setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
+ setOperationAction(ISD::AND, MVT::v8i8, Expand);
+ setOperationAction(ISD::AND, MVT::v4i16, Expand);
+ setOperationAction(ISD::AND, MVT::v2i32, Expand);
+ setOperationAction(ISD::AND, MVT::v1i64, Expand);
+ setOperationAction(ISD::OR, MVT::v8i8, Expand);
+ setOperationAction(ISD::OR, MVT::v4i16, Expand);
+ setOperationAction(ISD::OR, MVT::v2i32, Expand);
+ setOperationAction(ISD::OR, MVT::v1i64, Expand);
+ setOperationAction(ISD::XOR, MVT::v8i8, Expand);
+ setOperationAction(ISD::XOR, MVT::v4i16, Expand);
+ setOperationAction(ISD::XOR, MVT::v2i32, Expand);
+ setOperationAction(ISD::XOR, MVT::v1i64, Expand);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
+ setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
+ setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
+ setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
+ setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
+ setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
+ setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
+
+ if (!UseSoftFloat && Subtarget->hasXMM()) {
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
setOperationAction(ISD::FADD, MVT::v4f32, Legal);
@@ -714,7 +803,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::VSETCC, MVT::v4f32, Custom);
}
- if (!UseSoftFloat && Subtarget->hasSSE2()) {
+ if (!UseSoftFloat && Subtarget->hasXMMInt()) {
addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
// FIXME: Unfortunately -soft-float and -no-implicit-float means XMM
@@ -795,7 +884,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// Do not attempt to promote non-128-bit vectors
if (!VT.is128BitVector())
continue;
-
+
setOperationAction(ISD::AND, SVT, Promote);
AddPromotedToType (ISD::AND, SVT, MVT::v2i64);
setOperationAction(ISD::OR, SVT, Promote);
@@ -818,10 +907,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
- if (!DisableMMX && Subtarget->hasMMX()) {
- setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
- setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
- }
}
if (Subtarget->hasSSE41()) {
@@ -863,9 +948,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
}
- if (Subtarget->hasSSE42()) {
+ if (Subtarget->hasSSE42())
setOperationAction(ISD::VSETCC, MVT::v2i64, Custom);
- }
if (!UseSoftFloat && Subtarget->hasAVX()) {
addRegisterClass(MVT::v8f32, X86::VR256RegisterClass);
@@ -878,27 +962,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::LOAD, MVT::v8i32, Legal);
setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
+
setOperationAction(ISD::FADD, MVT::v8f32, Legal);
setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v8f32, Custom);
- //setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Custom);
- //setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8f32, Custom);
- //setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
- //setOperationAction(ISD::VSETCC, MVT::v8f32, Custom);
-
- // Operations to consider commented out -v16i16 v32i8
- //setOperationAction(ISD::ADD, MVT::v16i16, Legal);
- setOperationAction(ISD::ADD, MVT::v8i32, Custom);
- setOperationAction(ISD::ADD, MVT::v4i64, Custom);
- //setOperationAction(ISD::SUB, MVT::v32i8, Legal);
- //setOperationAction(ISD::SUB, MVT::v16i16, Legal);
- setOperationAction(ISD::SUB, MVT::v8i32, Custom);
- setOperationAction(ISD::SUB, MVT::v4i64, Custom);
- //setOperationAction(ISD::MUL, MVT::v16i16, Legal);
+
setOperationAction(ISD::FADD, MVT::v4f64, Legal);
setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
@@ -906,85 +977,66 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
- setOperationAction(ISD::VSETCC, MVT::v4f64, Custom);
- // setOperationAction(ISD::VSETCC, MVT::v32i8, Custom);
- // setOperationAction(ISD::VSETCC, MVT::v16i16, Custom);
- setOperationAction(ISD::VSETCC, MVT::v8i32, Custom);
-
- // setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i8, Custom);
- // setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i16, Custom);
- // setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i16, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i32, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8f32, Custom);
-
- setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v4i64, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f64, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i64, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f64, Custom);
-
-#if 0
- // Not sure we want to do this since there are no 256-bit integer
- // operations in AVX
-
- // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
- // This includes 256-bit vectors
- for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; ++i) {
- EVT VT = (MVT::SimpleValueType)i;
-
- // Do not attempt to custom lower non-power-of-2 vectors
- if (!isPowerOf2_32(VT.getVectorNumElements()))
+ // Custom lower build_vector, vector_shuffle, scalar_to_vector,
+ // insert_vector_elt extract_subvector and extract_vector_elt for
+ // 256-bit types.
+ for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE;
+ ++i) {
+ MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
+ // Do not attempt to custom lower non-256-bit vectors
+ if (!isPowerOf2_32(MVT(VT).getVectorNumElements())
+ || (MVT(VT).getSizeInBits() < 256))
continue;
-
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
- }
+ setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+ }
+ // Custom-lower insert_subvector and extract_subvector based on
+ // the result type.
+ for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE;
+ ++i) {
+ MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
+ // Do not attempt to custom lower non-256-bit vectors
+ if (!isPowerOf2_32(MVT(VT).getVectorNumElements()))
+ continue;
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i64, Custom);
+ if (MVT(VT).getSizeInBits() == 128) {
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
+ }
+ else if (MVT(VT).getSizeInBits() == 256) {
+ setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
+ }
}
-#endif
-#if 0
- // Not sure we want to do this since there are no 256-bit integer
- // operations in AVX
-
- // Promote v32i8, v16i16, v8i32 load, select, and, or, xor to v4i64.
- // Including 256-bit vectors
- for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; i++) {
- EVT VT = (MVT::SimpleValueType)i;
+ // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
+ // Don't promote loads because we need them for VPERM vector index versions.
- if (!VT.is256BitVector()) {
+ for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE;
+ VT++) {
+ if (!isPowerOf2_32(MVT((MVT::SimpleValueType)VT).getVectorNumElements())
+ || (MVT((MVT::SimpleValueType)VT).getSizeInBits() < 256))
continue;
- }
- setOperationAction(ISD::AND, VT, Promote);
- AddPromotedToType (ISD::AND, VT, MVT::v4i64);
- setOperationAction(ISD::OR, VT, Promote);
- AddPromotedToType (ISD::OR, VT, MVT::v4i64);
- setOperationAction(ISD::XOR, VT, Promote);
- AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
- setOperationAction(ISD::LOAD, VT, Promote);
- AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
- setOperationAction(ISD::SELECT, VT, Promote);
- AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
+ setOperationAction(ISD::AND, (MVT::SimpleValueType)VT, Promote);
+ AddPromotedToType (ISD::AND, (MVT::SimpleValueType)VT, MVT::v4i64);
+ setOperationAction(ISD::OR, (MVT::SimpleValueType)VT, Promote);
+ AddPromotedToType (ISD::OR, (MVT::SimpleValueType)VT, MVT::v4i64);
+ setOperationAction(ISD::XOR, (MVT::SimpleValueType)VT, Promote);
+ AddPromotedToType (ISD::XOR, (MVT::SimpleValueType)VT, MVT::v4i64);
+ //setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Promote);
+ //AddPromotedToType (ISD::LOAD, (MVT::SimpleValueType)VT, MVT::v4i64);
+ setOperationAction(ISD::SELECT, (MVT::SimpleValueType)VT, Promote);
+ AddPromotedToType (ISD::SELECT, (MVT::SimpleValueType)VT, MVT::v4i64);
}
-
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-#endif
}
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- // Add/Sub/Mul with overflow operations are custom lowered.
- setOperationAction(ISD::SADDO, MVT::i32, Custom);
- setOperationAction(ISD::UADDO, MVT::i32, Custom);
- setOperationAction(ISD::SSUBO, MVT::i32, Custom);
- setOperationAction(ISD::USUBO, MVT::i32, Custom);
- setOperationAction(ISD::SMULO, MVT::i32, Custom);
// Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
// handle type legalization for these operations here.
@@ -992,14 +1044,21 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// FIXME: We really should do custom legalization for addition and
// subtraction on x86-32 once PR3203 is fixed. We really can't do much better
// than generic legalization for 64-bit multiplication-with-overflow, though.
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::SADDO, MVT::i64, Custom);
- setOperationAction(ISD::UADDO, MVT::i64, Custom);
- setOperationAction(ISD::SSUBO, MVT::i64, Custom);
- setOperationAction(ISD::USUBO, MVT::i64, Custom);
- setOperationAction(ISD::SMULO, MVT::i64, Custom);
+ for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
+ // Add/Sub/Mul with overflow operations are custom lowered.
+ MVT VT = IntVTs[i];
+ setOperationAction(ISD::SADDO, VT, Custom);
+ setOperationAction(ISD::UADDO, VT, Custom);
+ setOperationAction(ISD::SSUBO, VT, Custom);
+ setOperationAction(ISD::USUBO, VT, Custom);
+ setOperationAction(ISD::SMULO, VT, Custom);
+ setOperationAction(ISD::UMULO, VT, Custom);
}
+ // There are no 8-bit 3-address imul/mul instructions
+ setOperationAction(ISD::SMULO, MVT::i8, Expand);
+ setOperationAction(ISD::UMULO, MVT::i8, Expand);
+
if (!Subtarget->is64Bit()) {
// These libcalls are not available in 32-bit.
setLibcallName(RTLIB::SHL_I128, 0);
@@ -1016,6 +1075,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setTargetDAGCombine(ISD::SRA);
setTargetDAGCombine(ISD::SRL);
setTargetDAGCombine(ISD::OR);
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::SUB);
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::ZERO_EXTEND);
if (Subtarget->is64Bit())
@@ -1023,11 +1085,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
computeRegisterProperties();
- // FIXME: These should be based on subtarget info. Plus, the values should
- // be smaller when we are in optimizing for size mode.
+ // On Darwin, -Os means optimize for size without hurting performance,
+ // do not reduce the limit.
maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
+ maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
- maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores
+ maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
+ maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
+ maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
setPrefLoopAlignment(16);
benefitFromCodePlacementOpt = true;
}
@@ -1078,7 +1143,7 @@ unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const {
}
unsigned Align = 4;
- if (Subtarget->hasSSE1())
+ if (Subtarget->hasXMM())
getMaxByValAlign(Ty, Align);
return Align;
}
@@ -1119,7 +1184,7 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
} else if (!MemcpyStrSrc && Size >= 8 &&
!Subtarget->is64Bit() &&
Subtarget->getStackAlignment() >= 8 &&
- Subtarget->hasSSE2()) {
+ Subtarget->hasXMMInt()) {
// Do not use f64 to lower memcpy if source is string constant. It's
// better to use i32 to avoid the loads.
return MVT::f64;
@@ -1139,21 +1204,11 @@ unsigned X86TargetLowering::getJumpTableEncoding() const {
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
Subtarget->isPICStyleGOT())
return MachineJumpTableInfo::EK_Custom32;
-
+
// Otherwise, use the normal jump table encoding heuristics.
return TargetLowering::getJumpTableEncoding();
}
-/// getPICBaseSymbol - Return the X86-32 PIC base.
-MCSymbol *
-X86TargetLowering::getPICBaseSymbol(const MachineFunction *MF,
- MCContext &Ctx) const {
- const MCAsmInfo &MAI = *getTargetMachine().getMCAsmInfo();
- return Ctx.GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix())+
- Twine(MF->getFunctionNumber())+"$pb");
-}
-
-
const MCExpr *
X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
@@ -1188,7 +1243,7 @@ getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
// Otherwise, the reference is relative to the PIC base.
- return MCSymbolRefExpr::Create(getPICBaseSymbol(MF, Ctx), Ctx);
+ return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
}
/// getFunctionAlignment - Return the Log2 alignment of this function.
@@ -1196,6 +1251,7 @@ unsigned X86TargetLowering::getFunctionAlignment(const Function *F) const {
return F->hasFnAttr(Attribute::OptimizeForSize) ? 0 : 4;
}
+// FIXME: Why this routine is here? Move to RegInfo!
std::pair<const TargetRegisterClass*, uint8_t>
X86TargetLowering::findRepresentativeClass(EVT VT) const{
const TargetRegisterClass *RRC = 0;
@@ -1207,8 +1263,7 @@ X86TargetLowering::findRepresentativeClass(EVT VT) const{
RRC = (Subtarget->is64Bit()
? X86::GR64RegisterClass : X86::GR32RegisterClass);
break;
- case MVT::v8i8: case MVT::v4i16:
- case MVT::v2i32: case MVT::v1i64:
+ case MVT::x86mmx:
RRC = X86::VR64RegisterClass;
break;
case MVT::f32: case MVT::f64:
@@ -1222,10 +1277,13 @@ X86TargetLowering::findRepresentativeClass(EVT VT) const{
return std::make_pair(RRC, Cost);
}
+// FIXME: Why this routine is here? Move to RegInfo!
unsigned
X86TargetLowering::getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
- unsigned FPDiff = RegInfo->hasFP(MF) ? 1 : 0;
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
switch (RC->getID()) {
default:
return 0;
@@ -1267,7 +1325,7 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
#include "X86GenCallingConv.inc"
-bool
+bool
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
@@ -1312,16 +1370,18 @@ X86TargetLowering::LowerReturn(SDValue Chain,
SDValue ValToCopy = OutVals[i];
EVT ValVT = ValToCopy.getValueType();
- // If this is x86-64, and we disabled SSE, we can't return FP values
- if ((ValVT == MVT::f32 || ValVT == MVT::f64) &&
- (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
+ // If this is x86-64, and we disabled SSE, we can't return FP values,
+ // or SSE or MMX vectors.
+ if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
+ VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
+ (Subtarget->is64Bit() && !Subtarget->hasXMM())) {
report_fatal_error("SSE register return with SSE disabled");
}
// Likewise we can't return F64 values with SSE1 only. gcc does so, but
// llvm-gcc has never done it right and no one has noticed, so this
// should be OK for now.
if (ValVT == MVT::f64 &&
- (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
+ (Subtarget->is64Bit() && !Subtarget->hasXMMInt()))
report_fatal_error("SSE2 register return with SSE2 disabled");
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
@@ -1340,20 +1400,19 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
// which is returned in RAX / RDX.
if (Subtarget->is64Bit()) {
- if (ValVT.isVector() && ValVT.getSizeInBits() == 64) {
- ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy);
+ if (ValVT == MVT::x86mmx) {
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
+ ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
ValToCopy);
-
// If we don't have SSE2 available, convert to v4f32 so the generated
// register is legal.
if (!Subtarget->hasSSE2())
- ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,ValToCopy);
+ ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
}
}
}
-
+
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
Flag = Chain.getValue(1);
}
@@ -1367,7 +1426,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
unsigned Reg = FuncInfo->getSRetReturnReg();
- assert(Reg &&
+ assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments().");
SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
@@ -1388,6 +1447,28 @@ X86TargetLowering::LowerReturn(SDValue Chain,
MVT::Other, &RetOps[0], RetOps.size());
}
+bool X86TargetLowering::isUsedByReturnOnly(SDNode *N) const {
+ if (N->getNumValues() != 1)
+ return false;
+ if (!N->hasNUsesOfValue(1, 0))
+ return false;
+
+ SDNode *Copy = *N->use_begin();
+ if (Copy->getOpcode() != ISD::CopyToReg &&
+ Copy->getOpcode() != ISD::FP_EXTEND)
+ return false;
+
+ bool HasRet = false;
+ for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
+ UI != UE; ++UI) {
+ if (UI->getOpcode() != X86ISD::RET_FLAG)
+ return false;
+ HasRet = true;
+ }
+
+ return HasRet;
+}
+
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
@@ -1412,7 +1493,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// If this is x86-64, and we disabled SSE, we can't return FP values
if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
- ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
+ ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasXMM())) {
report_fatal_error("SSE register return with SSE disabled");
}
@@ -1433,7 +1514,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
if (CopyVT == MVT::f64) Opc = isST0 ? X86::FpGET_ST0_64:X86::FpGET_ST1_64;
if (CopyVT == MVT::f80) Opc = isST0 ? X86::FpGET_ST0_80:X86::FpGET_ST1_80;
SDValue Ops[] = { Chain, InFlag };
- Chain = SDValue(DAG.getMachineNode(Opc, dl, CopyVT, MVT::Other, MVT::Flag,
+ Chain = SDValue(DAG.getMachineNode(Opc, dl, CopyVT, MVT::Other, MVT::Glue,
Ops, 2), 1);
Val = Chain.getValue(0);
@@ -1456,7 +1537,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
MVT::i64, InFlag).getValue(1);
Val = Chain.getValue(0);
}
- Val = DAG.getNode(ISD::BIT_CONVERT, dl, CopyVT, Val);
+ Val = DAG.getNode(ISD::BITCAST, dl, CopyVT, Val);
} else {
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
CopyVT, InFlag).getValue(1);
@@ -1499,30 +1580,6 @@ ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
return Ins[0].Flags.isSRet();
}
-/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
-/// given CallingConvention value.
-CCAssignFn *X86TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
- if (Subtarget->is64Bit()) {
- if (CC == CallingConv::GHC)
- return CC_X86_64_GHC;
- else if (Subtarget->isTargetWin64())
- return CC_X86_Win64_C;
- else
- return CC_X86_64_C;
- }
-
- if (CC == CallingConv::X86_FastCall)
- return CC_X86_32_FastCall;
- else if (CC == CallingConv::X86_ThisCall)
- return CC_X86_32_ThisCall;
- else if (CC == CallingConv::Fast)
- return CC_X86_32_FastCC;
- else if (CC == CallingConv::GHC)
- return CC_X86_32_GHC;
- else
- return CC_X86_32_C;
-}
-
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
/// by "Src" to address "Dst" with size and alignment information specified by
/// the specific parameter attribute. The copy will be passed as a byval
@@ -1531,10 +1588,11 @@ static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
DebugLoc dl) {
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile*/false, /*AlwaysInline=*/true,
- NULL, 0, NULL, 0);
+ MachinePointerInfo(), MachinePointerInfo());
}
/// IsTailCallConvention - Return true if the calling convention is one that
@@ -1583,7 +1641,7 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
VA.getLocMemOffset(), isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
return DAG.getLoad(ValVT, dl, Chain, FIN,
- PseudoSourceValue::getFixedStack(FI), 0,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0);
}
}
@@ -1617,7 +1675,13 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
+
+ // Allocate shadow area for Win64
+ if (IsWin64) {
+ CCInfo.AllocateStack(32, 8);
+ }
+
+ CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
unsigned LastVal = ~0U;
SDValue ArgValue;
@@ -1644,12 +1708,12 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
RC = X86::VR256RegisterClass;
else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
RC = X86::VR128RegisterClass;
- else if (RegVT.isVector() && RegVT.getSizeInBits() == 64)
+ else if (RegVT == MVT::x86mmx)
RC = X86::VR64RegisterClass;
else
llvm_unreachable("Unknown argument type!");
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC, dl);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// If this is an 8 or 16-bit value, it is really passed promoted to 32
@@ -1662,14 +1726,13 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
else if (VA.getLocInfo() == CCValAssign::BCvt)
- ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
+ ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
if (VA.isExtInLoc()) {
// Handle MMX values passed in XMM regs.
if (RegVT.isVector()) {
- ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
- ArgValue, DAG.getConstant(0, MVT::i64));
- ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
+ ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(),
+ ArgValue);
} else
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
}
@@ -1680,8 +1743,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
// If value is passed via pointer - do a load.
if (VA.getLocInfo() == CCValAssign::Indirect)
- ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, NULL, 0,
- false, false, 0);
+ ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
+ MachinePointerInfo(), false, false, 0);
InVals.push_back(ArgValue);
}
@@ -1708,8 +1771,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) {
- if (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
- CallConv != CallingConv::X86_ThisCall)) {
+ if (!IsWin64 && (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
+ CallConv != CallingConv::X86_ThisCall))) {
FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true));
}
if (Is64Bit) {
@@ -1719,9 +1782,6 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
static const unsigned GPR64ArgRegsWin64[] = {
X86::RCX, X86::RDX, X86::R8, X86::R9
};
- static const unsigned XMMArgRegsWin64[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
- };
static const unsigned GPR64ArgRegs64Bit[] = {
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
};
@@ -1729,40 +1789,52 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
- const unsigned *GPR64ArgRegs, *XMMArgRegs;
+ const unsigned *GPR64ArgRegs;
+ unsigned NumXMMRegs = 0;
if (IsWin64) {
- TotalNumIntRegs = 4; TotalNumXMMRegs = 4;
+ // The XMM registers which might contain var arg parameters are shadowed
+ // in their paired GPR. So we only need to save the GPR to their home
+ // slots.
+ TotalNumIntRegs = 4;
GPR64ArgRegs = GPR64ArgRegsWin64;
- XMMArgRegs = XMMArgRegsWin64;
} else {
TotalNumIntRegs = 6; TotalNumXMMRegs = 8;
GPR64ArgRegs = GPR64ArgRegs64Bit;
- XMMArgRegs = XMMArgRegs64Bit;
+
+ NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, TotalNumXMMRegs);
}
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
TotalNumIntRegs);
- unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs,
- TotalNumXMMRegs);
bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat);
- assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
+ assert(!(NumXMMRegs && !Subtarget->hasXMM()) &&
"SSE register cannot be used when SSE is disabled!");
assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
- if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasSSE1())
+ if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasXMM())
// Kernel mode asks for SSE to be disabled, so don't push them
// on the stack.
TotalNumXMMRegs = 0;
- // For X86-64, if there are vararg parameters that are passed via
- // registers, then we must store them to their spots on the stack so they
- // may be loaded by deferencing the result of va_next.
- FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
- FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16);
- FuncInfo->setRegSaveFrameIndex(
- MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16,
+ if (IsWin64) {
+ const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering();
+ // Get to the caller-allocated home save location. Add 8 to account
+ // for the return address.
+ int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
+ FuncInfo->setRegSaveFrameIndex(
+ MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
+ FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
+ } else {
+ // For X86-64, if there are vararg parameters that are passed via
+ // registers, then we must store them to their spots on the stack so they
+ // may be loaded by deferencing the result of va_next.
+ FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
+ FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16);
+ FuncInfo->setRegSaveFrameIndex(
+ MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16,
false));
+ }
// Store the integer parameter registers.
SmallVector<SDValue, 8> MemOps;
@@ -1773,13 +1845,13 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
DAG.getIntPtrConstant(Offset));
unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs],
- X86::GR64RegisterClass);
+ X86::GR64RegisterClass, dl);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
- PseudoSourceValue::getFixedStack(
- FuncInfo->getRegSaveFrameIndex()),
- Offset, false, false, 0);
+ MachinePointerInfo::getFixedStack(
+ FuncInfo->getRegSaveFrameIndex(), Offset),
+ false, false, 0);
MemOps.push_back(Store);
Offset += 8;
}
@@ -1789,7 +1861,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<SDValue, 11> SaveXMMOps;
SaveXMMOps.push_back(Chain);
- unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass);
+ unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass, dl);
SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8);
SaveXMMOps.push_back(ALVal);
@@ -1799,8 +1871,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
FuncInfo->getVarArgsFPOffset()));
for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
- unsigned VReg = MF.addLiveIn(XMMArgRegs[NumXMMRegs],
- X86::VR128RegisterClass);
+ unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs],
+ X86::VR128RegisterClass, dl);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32);
SaveXMMOps.push_back(Val);
}
@@ -1843,15 +1915,14 @@ X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const {
- const unsigned FirstStackArgOffset = (Subtarget->isTargetWin64() ? 32 : 0);
- unsigned LocMemOffset = FirstStackArgOffset + VA.getLocMemOffset();
+ unsigned LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
- if (Flags.isByVal()) {
+ if (Flags.isByVal())
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
- }
+
return DAG.getStore(Chain, dl, Arg, PtrOff,
- PseudoSourceValue::getStack(), LocMemOffset,
+ MachinePointerInfo::getStack(LocMemOffset),
false, false, 0);
}
@@ -1867,7 +1938,8 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
OutRetAddr = getReturnAddressFrameIndex(DAG);
// Load the "old" Return address.
- OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, NULL, 0, false, false, 0);
+ OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
+ false, false, 0);
return SDValue(OutRetAddr.getNode(), 1);
}
@@ -1886,7 +1958,7 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
- PseudoSourceValue::getFixedStack(NewReturnAddrFI), 0,
+ MachinePointerInfo::getFixedStack(NewReturnAddrFI),
false, false, 0);
return Chain;
}
@@ -1902,6 +1974,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget->is64Bit();
+ bool IsWin64 = Subtarget->isTargetWin64();
bool IsStructRet = CallIsStructReturn(Outs);
bool IsSibcall = false;
@@ -1927,7 +2000,13 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
+
+ // Allocate shadow area for Win64
+ if (IsWin64) {
+ CCInfo.AllocateStack(32, 8);
+ }
+
+ CCInfo.AnalyzeCallOperands(Outs, CC_X86);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
@@ -1986,21 +2065,21 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
case CCValAssign::AExt:
if (RegVT.isVector() && RegVT.getSizeInBits() == 128) {
// Special case: passing MMX values in XMM registers.
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
+ Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
} else
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, RegVT, Arg);
+ Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
break;
case CCValAssign::Indirect: {
// Store the argument.
SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
- PseudoSourceValue::getFixedStack(FI), 0,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0);
Arg = SpillSlot;
break;
@@ -2009,7 +2088,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
- if (isVarArg && Subtarget->isTargetWin64()) {
+ if (isVarArg && IsWin64) {
// Win64 ABI requires argument XMM reg to be copied to the corresponding
// shadow reg if callee is a varargs function.
unsigned ShadowReg = 0;
@@ -2075,7 +2154,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
}
}
- if (Is64Bit && isVarArg && !Subtarget->isTargetWin64()) {
+ if (Is64Bit && isVarArg && !IsWin64) {
// From AMD64 ABI document:
// For calls that may call functions that use varargs or stdargs
// (prototype-less calls or calls to functions containing ellipsis (...) in
@@ -2090,7 +2169,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
- assert((Subtarget->hasSSE1() || !NumXMMRegs)
+ assert((Subtarget->hasXMM() || !NumXMMRegs)
&& "SSE registers cannot be used when SSE is disabled");
Chain = DAG.getCopyToReg(Chain, dl, X86::AL,
@@ -2143,7 +2222,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Store relative to framepointer.
MemOpChains2.push_back(
DAG.getStore(ArgChain, dl, Arg, FIN,
- PseudoSourceValue::getFixedStack(FI), 0,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0));
}
}
@@ -2192,8 +2271,8 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
OpFlags = X86II::MO_PLT;
} else if (Subtarget->isPICStyleStubAny() &&
- (GV->isDeclaration() || GV->isWeakForLinker()) &&
- Subtarget->getDarwinVers() < 9) {
+ (GV->isDeclaration() || GV->isWeakForLinker()) &&
+ Subtarget->getDarwinVers() < 9) {
// PC-relative references to external symbols should go through $stub,
// unless we're building with the leopard linker or later, which
// automatically synthesizes these stubs.
@@ -2206,13 +2285,13 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
unsigned char OpFlags = 0;
- // On ELF targets, in either X86-64 or X86-32 mode, direct calls to external
- // symbols should go through the PLT.
+ // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
+ // external symbols should go through the PLT.
if (Subtarget->isTargetELF() &&
getTargetMachine().getRelocationModel() == Reloc::PIC_) {
OpFlags = X86II::MO_PLT;
} else if (Subtarget->isPICStyleStubAny() &&
- Subtarget->getDarwinVers() < 9) {
+ Subtarget->getDarwinVers() < 9) {
// PC-relative references to external symbols should go through $stub,
// unless we're building with the leopard linker or later, which
// automatically synthesizes these stubs.
@@ -2224,7 +2303,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
}
// Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
if (!IsSibcall && isTailCall) {
@@ -2250,7 +2329,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
// Add an implicit use of AL for non-Windows x86 64-bit vararg functions.
- if (Is64Bit && isVarArg && !Subtarget->isTargetWin64())
+ if (Is64Bit && isVarArg && !IsWin64)
Ops.push_back(DAG.getRegister(X86::AL, MVT::i8));
if (InFlag.getNode())
@@ -2337,7 +2416,7 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG& DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
const TargetMachine &TM = MF.getTarget();
- const TargetFrameInfo &TFI = *TM.getFrameInfo();
+ const TargetFrameLowering &TFI = *TM.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1;
int64_t Offset = StackSize;
@@ -2364,7 +2443,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
- if (!VR || TargetRegisterInfo::isPhysicalRegister(VR))
+ if (!TargetRegisterInfo::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
if (!Def)
@@ -2510,14 +2589,17 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
+
+ // Allocate shadow area for Win64
+ if (Subtarget->isTargetWin64()) {
+ CCInfo.AllocateStack(32, 8);
+ }
+
+ CCInfo.AnalyzeCallOperands(Outs, CC_X86);
if (CCInfo.getNextStackOffset()) {
MachineFunction &MF = DAG.getMachineFunction();
if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
return false;
- if (Subtarget->isTargetWin64())
- // Win64 ABI has additional complications.
- return false;
// Check if the arguments are already laid out in the right way as
// the caller's fixed stack objects.
@@ -2564,6 +2646,11 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
}
}
+ // An stdcall caller is expected to clean up its arguments; the callee
+ // isn't going to do that.
+ if (!CCMatch && CallerCC==CallingConv::X86_StdCall)
+ return false;
+
return true;
}
@@ -2592,6 +2679,7 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
case X86ISD::SHUFPD:
+ case X86ISD::PALIGN:
case X86ISD::SHUFPS:
case X86ISD::MOVLHPS:
case X86ISD::MOVLHPD:
@@ -2600,6 +2688,7 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::MOVLPD:
case X86ISD::MOVSHDUP:
case X86ISD::MOVSLDUP:
+ case X86ISD::MOVDDUP:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
case X86ISD::UNPCKLPS:
@@ -2625,6 +2714,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
default: llvm_unreachable("Unknown x86 shuffle node");
case X86ISD::MOVSHDUP:
case X86ISD::MOVSLDUP:
+ case X86ISD::MOVDDUP:
return DAG.getNode(Opc, dl, VT, V1);
}
@@ -2648,6 +2738,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) {
switch(Opc) {
default: llvm_unreachable("Unknown x86 shuffle node");
+ case X86ISD::PALIGN:
case X86ISD::SHUFPD:
case X86ISD::SHUFPS:
return DAG.getNode(Opc, dl, VT, V1, V2,
@@ -2770,8 +2861,8 @@ static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
// First determine if it is required or is profitable to flip the operands.
// If LHS is a foldable load, but RHS is not, flip the condition.
- if ((ISD::isNON_EXTLoad(LHS.getNode()) && LHS.hasOneUse()) &&
- !(ISD::isNON_EXTLoad(RHS.getNode()) && RHS.hasOneUse())) {
+ if (ISD::isNON_EXTLoad(LHS.getNode()) &&
+ !ISD::isNON_EXTLoad(RHS.getNode())) {
SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
std::swap(LHS, RHS);
}
@@ -2865,7 +2956,7 @@ static bool isUndefOrEqual(int Val, int CmpVal) {
/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference
/// the second operand.
static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) {
- if (VT == MVT::v4f32 || VT == MVT::v4i32 || VT == MVT::v4i16)
+ if (VT == MVT::v4f32 || VT == MVT::v4i32 )
return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4);
if (VT == MVT::v2f64 || VT == MVT::v2i64)
return (Mask[0] < 2 && Mask[1] < 2);
@@ -2933,15 +3024,15 @@ bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) {
static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT,
bool hasSSSE3) {
int i, e = VT.getVectorNumElements();
-
+
// Do not handle v2i64 / v2f64 shuffles with palignr.
if (e < 4 || !hasSSSE3)
return false;
-
+
for (i = 0; i != e; ++i)
if (Mask[i] >= 0)
break;
-
+
// All undef, not a palignr.
if (i == e)
return false;
@@ -2952,13 +3043,13 @@ static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT,
bool NeedsUnary = false;
int s = Mask[i] - i;
-
+
// Check the rest of the elements to see if they are consecutive.
for (++i; i != e; ++i) {
int m = Mask[i];
- if (m < 0)
+ if (m < 0)
continue;
-
+
Unary = Unary && (m < (int)e);
NeedsUnary = NeedsUnary || (m < s);
@@ -3046,10 +3137,10 @@ bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) {
/// <2, 3, 2, 3>
bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) {
unsigned NumElems = N->getValueType(0).getVectorNumElements();
-
+
if (NumElems != 4)
return false;
-
+
return isUndefOrEqual(N->getMaskElt(0), 2) &&
isUndefOrEqual(N->getMaskElt(1), 3) &&
isUndefOrEqual(N->getMaskElt(2), 2) &&
@@ -3320,6 +3411,44 @@ bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) {
return true;
}
+/// isVEXTRACTF128Index - Return true if the specified
+/// EXTRACT_SUBVECTOR operand specifies a vector extract that is
+/// suitable for input to VEXTRACTF128.
+bool X86::isVEXTRACTF128Index(SDNode *N) {
+ if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
+ return false;
+
+ // The index should be aligned on a 128-bit boundary.
+ uint64_t Index =
+ cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
+
+ unsigned VL = N->getValueType(0).getVectorNumElements();
+ unsigned VBits = N->getValueType(0).getSizeInBits();
+ unsigned ElSize = VBits / VL;
+ bool Result = (Index * ElSize) % 128 == 0;
+
+ return Result;
+}
+
+/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR
+/// operand specifies a subvector insert that is suitable for input to
+/// VINSERTF128.
+bool X86::isVINSERTF128Index(SDNode *N) {
+ if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
+ return false;
+
+ // The index should be aligned on a 128-bit boundary.
+ uint64_t Index =
+ cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
+
+ unsigned VL = N->getValueType(0).getVectorNumElements();
+ unsigned VBits = N->getValueType(0).getSizeInBits();
+ unsigned ElSize = VBits / VL;
+ bool Result = (Index * ElSize) % 128 == 0;
+
+ return Result;
+}
+
/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
@@ -3388,6 +3517,42 @@ unsigned X86::getShufflePALIGNRImmediate(SDNode *N) {
return (Val - i) * EltSize;
}
+/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate
+/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
+/// instructions.
+unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) {
+ if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
+ llvm_unreachable("Illegal extract subvector for VEXTRACTF128");
+
+ uint64_t Index =
+ cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
+
+ EVT VecVT = N->getOperand(0).getValueType();
+ EVT ElVT = VecVT.getVectorElementType();
+
+ unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits();
+
+ return Index / NumElemsPerChunk;
+}
+
+/// getInsertVINSERTF128Immediate - Return the appropriate immediate
+/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
+/// instructions.
+unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) {
+ if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
+ llvm_unreachable("Illegal insert subvector for VINSERTF128");
+
+ uint64_t Index =
+ cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
+
+ EVT VecVT = N->getValueType(0);
+ EVT ElVT = VecVT.getVectorElementType();
+
+ unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits();
+
+ return Index / NumElemsPerChunk;
+}
+
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
/// constant +0.0.
bool X86::isZeroNode(SDValue Elt) {
@@ -3537,13 +3702,10 @@ static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG,
DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type");
- // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted
+ // Always build SSE zero vectors as <4 x i32> bitcasted
// to their dest type. This ensures they get CSE'd.
SDValue Vec;
- if (VT.getSizeInBits() == 64) { // MMX
- SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
- } else if (VT.getSizeInBits() == 128) {
+ if (VT.getSizeInBits() == 128) { // SSE
if (HasSSE2) { // SSE2
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
@@ -3559,7 +3721,7 @@ static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG,
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8);
}
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
}
/// getOnesVector - Returns a vector of specified type with all bits set.
@@ -3571,11 +3733,8 @@ static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
// type. This ensures they get CSE'd.
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
SDValue Vec;
- if (VT.getSizeInBits() == 64) // MMX
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
- else // SSE
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
}
@@ -3640,9 +3799,6 @@ static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
/// PromoteSplat - Promote a splat of v4i32, v8i16 or v16i8 to v4f32.
static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
- if (SV->getValueType(0).getVectorNumElements() <= 4)
- return SDValue(SV, 0);
-
EVT PVT = MVT::v4f32;
EVT VT = SV->getValueType(0);
DebugLoc dl = SV->getDebugLoc();
@@ -3663,9 +3819,9 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
// Perform the splat.
int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
- V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1);
+ V1 = DAG.getNode(ISD::BITCAST, dl, PVT, V1);
V1 = DAG.getVectorShuffle(PVT, dl, V1, DAG.getUNDEF(PVT), &SplatMask[0]);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, V1);
+ return DAG.getNode(ISD::BITCAST, dl, VT, V1);
}
/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
@@ -3789,7 +3945,7 @@ SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,
}
// Actual nodes that may contain scalar elements
- if (Opcode == ISD::BIT_CONVERT) {
+ if (Opcode == ISD::BITCAST) {
V = V.getOperand(0);
EVT SrcVT = V.getValueType();
unsigned NumElems = VT.getVectorNumElements();
@@ -3978,7 +4134,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
}
}
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
}
/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
@@ -4017,11 +4173,10 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
unsigned NumBits, SelectionDAG &DAG,
const TargetLowering &TLI, DebugLoc dl) {
- bool isMMX = VT.getSizeInBits() == 64;
- EVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64;
+ EVT ShVT = MVT::v2i64;
unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
- SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
+ SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
+ return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(Opc, dl, ShVT, SrcOp,
DAG.getConstant(NumBits, TLI.getShiftAmountTy())));
}
@@ -4029,7 +4184,7 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
SDValue
X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
SelectionDAG &DAG) const {
-
+
// Check if the scalar load can be widened into a vector load. And if
// the address is "base + cst" see if the cst can be "absorbed" into
// the shuffle mask.
@@ -4046,8 +4201,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
FI = FINode->getIndex();
Offset = 0;
- } else if (Ptr.getOpcode() == ISD::ADD &&
- isa<ConstantSDNode>(Ptr.getOperand(1)) &&
+ } else if (DAG.isBaseWithConstantOffset(Ptr) &&
isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
Offset = Ptr.getConstantOperandVal(1);
@@ -4084,41 +4238,42 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
int EltNo = (Offset - StartOffset) >> 2;
int Mask[4] = { EltNo, EltNo, EltNo, EltNo };
EVT VT = (PVT == MVT::i32) ? MVT::v4i32 : MVT::v4f32;
- SDValue V1 = DAG.getLoad(VT, dl, Chain, Ptr,LD->getSrcValue(),0,
+ SDValue V1 = DAG.getLoad(VT, dl, Chain, Ptr,
+ LD->getPointerInfo().getWithOffset(StartOffset),
false, false, 0);
// Canonicalize it to a v4i32 shuffle.
- V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, V1);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
+ V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
+ return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getVectorShuffle(MVT::v4i32, dl, V1,
- DAG.getUNDEF(MVT::v4i32), &Mask[0]));
+ DAG.getUNDEF(MVT::v4i32),&Mask[0]));
}
return SDValue();
}
-/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
-/// vector of type 'VT', see if the elements can be replaced by a single large
+/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
+/// vector of type 'VT', see if the elements can be replaced by a single large
/// load which has the same value as a build_vector whose operands are 'elts'.
///
/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
-///
+///
/// FIXME: we'd also like to handle the case where the last elements are zero
/// rather than undef via VZEXT_LOAD, but we do not detect that case today.
/// There's even a handy isZeroNode for that purpose.
static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
- DebugLoc &dl, SelectionDAG &DAG) {
+ DebugLoc &DL, SelectionDAG &DAG) {
EVT EltVT = VT.getVectorElementType();
unsigned NumElems = Elts.size();
-
+
LoadSDNode *LDBase = NULL;
unsigned LastLoadedElt = -1U;
-
+
// For each element in the initializer, see if we've found a load or an undef.
- // If we don't find an initial load element, or later load elements are
+ // If we don't find an initial load element, or later load elements are
// non-consecutive, bail out.
for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = Elts[i];
-
+
if (!Elt.getNode() ||
(Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
return SDValue();
@@ -4143,18 +4298,20 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
// consecutive loads for the low half, generate a vzext_load node.
if (LastLoadedElt == NumElems - 1) {
if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16)
- return DAG.getLoad(VT, dl, LDBase->getChain(), LDBase->getBasePtr(),
- LDBase->getSrcValue(), LDBase->getSrcValueOffset(),
+ return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
+ LDBase->getPointerInfo(),
LDBase->isVolatile(), LDBase->isNonTemporal(), 0);
- return DAG.getLoad(VT, dl, LDBase->getChain(), LDBase->getBasePtr(),
- LDBase->getSrcValue(), LDBase->getSrcValueOffset(),
+ return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
+ LDBase->getPointerInfo(),
LDBase->isVolatile(), LDBase->isNonTemporal(),
LDBase->getAlignment());
} else if (NumElems == 4 && LastLoadedElt == 1) {
SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
- SDValue ResNode = DAG.getNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, ResNode);
+ SDValue ResNode = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys,
+ Ops, 2, MVT::i32,
+ LDBase->getMemOperand());
+ return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
}
return SDValue();
}
@@ -4162,6 +4319,35 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
SDValue
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
+
+ EVT VT = Op.getValueType();
+ EVT ExtVT = VT.getVectorElementType();
+
+ unsigned NumElems = Op.getNumOperands();
+
+ // For AVX-length vectors, build the individual 128-bit pieces and
+ // use shuffles to put them in place.
+ if (VT.getSizeInBits() > 256 &&
+ Subtarget->hasAVX() &&
+ !Disable256Bit &&
+ !ISD::isBuildVectorAllZeros(Op.getNode())) {
+ SmallVector<SDValue, 8> V;
+ V.resize(NumElems);
+ for (unsigned i = 0; i < NumElems; ++i) {
+ V[i] = Op.getOperand(i);
+ }
+
+ EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
+
+ // Build the lower subvector.
+ SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2);
+ // Build the upper subvector.
+ SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2],
+ NumElems/2);
+
+ return ConcatVectors(Lower, Upper, DAG);
+ }
+
// All zero's are handled with pxor in SSE2 and above, xorps in SSE1.
// All one's are handled with pcmpeqd. In AVX, zero's are handled with
// vpxor in 128-bit and xor{pd,ps} in 256-bit, but no 256 version of pcmpeqd
@@ -4169,10 +4355,10 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
(Op.getValueType().getSizeInBits() != 256 &&
ISD::isBuildVectorAllOnes(Op.getNode()))) {
- // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to
+ // Canonicalize this to <4 x i32> (SSE) to
// 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
// eliminated on x86-32 hosts.
- if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32)
+ if (Op.getValueType() == MVT::v4i32)
return Op;
if (ISD::isBuildVectorAllOnes(Op.getNode()))
@@ -4180,11 +4366,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl);
}
- EVT VT = Op.getValueType();
- EVT ExtVT = VT.getVectorElementType();
unsigned EVTBits = ExtVT.getSizeInBits();
- unsigned NumElems = Op.getNumOperands();
unsigned NumZero = 0;
unsigned NumNonZero = 0;
unsigned NonZeros = 0;
@@ -4223,9 +4406,10 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
(!IsAllConstants || Idx == 0)) {
if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
- // Handle MMX and SSE both.
- EVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32;
- unsigned VecElts = VT == MVT::v2i64 ? 4 : 2;
+ // Handle SSE only.
+ assert(VT == MVT::v2i64 && "Expected an SSE value type!");
+ EVT VecVT = MVT::v4i32;
+ unsigned VecElts = 4;
// Truncate the value (which may itself be a constant) to i32, and
// convert it to a vector with movd (S2V+shuffle to zero extend).
@@ -4245,7 +4429,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DAG.getUNDEF(Item.getValueType()),
&Mask[0]);
}
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Item);
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Item);
}
}
@@ -4264,11 +4448,12 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DAG);
} else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
- EVT MiddleVT = VT.getSizeInBits() == 64 ? MVT::v2i32 : MVT::v4i32;
+ assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!");
+ EVT MiddleVT = MVT::v4i32;
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true,
Subtarget->hasSSE2(), DAG);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Item);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Item);
}
}
@@ -4394,20 +4579,20 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Check for a build vector of consecutive loads.
for (unsigned i = 0; i < NumElems; ++i)
V[i] = Op.getOperand(i);
-
+
// Check for elements which are consecutive loads.
SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG);
if (LD.getNode())
return LD;
-
- // For SSE 4.1, use insertps to put the high elements into the low element.
+
+ // For SSE 4.1, use insertps to put the high elements into the low element.
if (getSubtarget()->hasSSE41()) {
SDValue Result;
if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
else
Result = DAG.getUNDEF(VT);
-
+
for (unsigned i = 1; i < NumElems; ++i) {
if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
@@ -4415,7 +4600,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
}
return Result;
}
-
+
// Otherwise, expand into a number of unpckl*, start by extending each of
// our (non-undef) elements to the full vector width with the element in the
// bottom slot of the vector (which generates no code for SSE).
@@ -4441,7 +4626,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
EltStride == NumElems/2)
continue;
-
+
V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
}
EltStride >>= 1;
@@ -4461,21 +4646,21 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 ||
ResVT == MVT::v8i16 || ResVT == MVT::v16i8);
int Mask[2];
- SDValue InVec = DAG.getNode(ISD::BIT_CONVERT,dl, MVT::v1i64, Op.getOperand(0));
+ SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0));
SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
InVec = Op.getOperand(1);
if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
unsigned NumElts = ResVT.getVectorNumElements();
- VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp);
+ VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp,
InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1));
} else {
- InVec = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v1i64, InVec);
+ InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec);
SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
Mask[0] = 0; Mask[1] = 2;
VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask);
}
- return DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp);
+ return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
}
// v8i16 shuffles - Prefer shuffles in the following order:
@@ -4557,9 +4742,9 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad);
MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad);
NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V1),
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V2), &MaskV[0]);
- NewV = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, NewV);
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
+ NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
// Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
// source words for the shuffle, to aid later transformations.
@@ -4628,12 +4813,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8));
}
- V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V1);
+ V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1);
V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::v16i8, &pshufbMask[0], 16));
if (!TwoInputs)
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
// Calculate the shuffle mask for the second input, shuffle it, and
// OR it with the first shuffled input.
@@ -4648,12 +4833,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8));
}
- V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V2);
+ V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2);
V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::v16i8, &pshufbMask[0], 16));
V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
}
// If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
@@ -4820,8 +5005,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
// No SSSE3 - Calculate in place words and then fix all out of place words
// With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
// the 16 different words that comprise the two doublequadword input vectors.
- V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
- V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V2);
+ V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
+ V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
SDValue NewV = V2Only ? V2 : V1;
for (int i = 0; i != 8; ++i) {
int Elt0 = MaskVals[i*2];
@@ -4883,25 +5068,23 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
DAG.getIntPtrConstant(i));
}
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, NewV);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
}
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
-/// ones, or rewriting v4i32 / v2i32 as 2 wide ones if possible. This can be
+/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
/// done when every pair / quad of shuffle mask elements point to elements in
/// the right sequence. e.g.
-/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
+/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
static
SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
- SelectionDAG &DAG,
- const TargetLowering &TLI, DebugLoc dl) {
+ SelectionDAG &DAG, DebugLoc dl) {
EVT VT = SVOp->getValueType(0);
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
unsigned NumElems = VT.getVectorNumElements();
unsigned NewWidth = (NumElems == 4) ? 2 : 4;
- EVT MaskVT = (NewWidth == 4) ? MVT::v4i16 : MVT::v2i32;
- EVT NewVT = MaskVT;
+ EVT NewVT;
switch (VT.getSimpleVT().SimpleTy) {
default: assert(false && "Unexpected!");
case MVT::v4f32: NewVT = MVT::v2f64; break;
@@ -4910,12 +5093,6 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
case MVT::v16i8: NewVT = MVT::v4i32; break;
}
- if (NewWidth == 2) {
- if (VT.isInteger())
- NewVT = MVT::v2i64;
- else
- NewVT = MVT::v2f64;
- }
int Scale = NumElems / NewWidth;
SmallVector<int, 8> MaskVec;
for (unsigned i = 0; i < NumElems; i += Scale) {
@@ -4935,8 +5112,8 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
MaskVec.push_back(StartIdx / Scale);
}
- V1 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V1);
- V2 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V2);
+ V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
+ V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
}
@@ -4953,13 +5130,13 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT,
// movssrr and movsdrr do not clear top bits. Try to use movd, movq
// instead.
MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
- if ((ExtVT.SimpleTy != MVT::i64 || Subtarget->is64Bit()) &&
+ if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
- SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT &&
+ SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
// PR2108
OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
+ return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
OpVT,
@@ -4969,9 +5146,9 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT,
}
}
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
+ return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
- DAG.getNode(ISD::BIT_CONVERT, dl,
+ DAG.getNode(ISD::BITCAST, dl,
OpVT, SrcOp)));
}
@@ -5125,7 +5302,7 @@ LowerVECTOR_SHUFFLE_4wide(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
}
static bool MayFoldVectorLoad(SDValue V) {
- if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT)
+ if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
V = V.getOperand(0);
if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
V = V.getOperand(0);
@@ -5134,6 +5311,110 @@ static bool MayFoldVectorLoad(SDValue V) {
return false;
}
+// FIXME: the version above should always be used. Since there's
+// a bug where several vector shuffles can't be folded because the
+// DAG is not updated during lowering and a node claims to have two
+// uses while it only has one, use this version, and let isel match
+// another instruction if the load really happens to have more than
+// one use. Remove this version after this bug get fixed.
+// rdar://8434668, PR8156
+static bool RelaxedMayFoldVectorLoad(SDValue V) {
+ if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
+ V = V.getOperand(0);
+ if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
+ V = V.getOperand(0);
+ if (ISD::isNormalLoad(V.getNode()))
+ return true;
+ return false;
+}
+
+/// CanFoldShuffleIntoVExtract - Check if the current shuffle is used by
+/// a vector extract, and if both can be later optimized into a single load.
+/// This is done in visitEXTRACT_VECTOR_ELT and the conditions are checked
+/// here because otherwise a target specific shuffle node is going to be
+/// emitted for this shuffle, and the optimization not done.
+/// FIXME: This is probably not the best approach, but fix the problem
+/// until the right path is decided.
+static
+bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG,
+ const TargetLowering &TLI) {
+ EVT VT = V.getValueType();
+ ShuffleVectorSDNode *SVOp = dyn_cast<ShuffleVectorSDNode>(V);
+
+ // Be sure that the vector shuffle is present in a pattern like this:
+ // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), c) -> (f32 load $addr)
+ if (!V.hasOneUse())
+ return false;
+
+ SDNode *N = *V.getNode()->use_begin();
+ if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return false;
+
+ SDValue EltNo = N->getOperand(1);
+ if (!isa<ConstantSDNode>(EltNo))
+ return false;
+
+ // If the bit convert changed the number of elements, it is unsafe
+ // to examine the mask.
+ bool HasShuffleIntoBitcast = false;
+ if (V.getOpcode() == ISD::BITCAST) {
+ EVT SrcVT = V.getOperand(0).getValueType();
+ if (SrcVT.getVectorNumElements() != VT.getVectorNumElements())
+ return false;
+ V = V.getOperand(0);
+ HasShuffleIntoBitcast = true;
+ }
+
+ // Select the input vector, guarding against out of range extract vector.
+ unsigned NumElems = VT.getVectorNumElements();
+ unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ int Idx = (Elt > NumElems) ? -1 : SVOp->getMaskElt(Elt);
+ V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1);
+
+ // Skip one more bit_convert if necessary
+ if (V.getOpcode() == ISD::BITCAST)
+ V = V.getOperand(0);
+
+ if (ISD::isNormalLoad(V.getNode())) {
+ // Is the original load suitable?
+ LoadSDNode *LN0 = cast<LoadSDNode>(V);
+
+ // FIXME: avoid the multi-use bug that is preventing lots of
+ // of foldings to be detected, this is still wrong of course, but
+ // give the temporary desired behavior, and if it happens that
+ // the load has real more uses, during isel it will not fold, and
+ // will generate poor code.
+ if (!LN0 || LN0->isVolatile()) // || !LN0->hasOneUse()
+ return false;
+
+ if (!HasShuffleIntoBitcast)
+ return true;
+
+ // If there's a bitcast before the shuffle, check if the load type and
+ // alignment is valid.
+ unsigned Align = LN0->getAlignment();
+ unsigned NewAlign =
+ TLI.getTargetData()->getABITypeAlignment(
+ VT.getTypeForEVT(*DAG.getContext()));
+
+ if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT))
+ return false;
+ }
+
+ return true;
+}
+
+static
+SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) {
+ EVT VT = Op.getValueType();
+
+ // Canonizalize to v2f64.
+ V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
+ return DAG.getNode(ISD::BITCAST, dl, VT,
+ getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
+ V1, DAG));
+}
+
static
SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG,
bool HasSSE2) {
@@ -5191,6 +5472,10 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
CanFoldLoad = true;
+ // Both of them can't be memory operations though.
+ if (MayFoldVectorLoad(V1) && MayFoldVectorLoad(V2))
+ CanFoldLoad = false;
+
if (CanFoldLoad) {
if (HasSSE2 && NumElems == 2)
return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
@@ -5228,7 +5513,7 @@ static inline unsigned getUNPCKLOpcode(EVT VT) {
case MVT::v16i8: return X86ISD::PUNPCKLBW;
case MVT::v8i16: return X86ISD::PUNPCKLWD;
default:
- llvm_unreachable("Unknow type for unpckl");
+ llvm_unreachable("Unknown type for unpckl");
}
return 0;
}
@@ -5242,63 +5527,111 @@ static inline unsigned getUNPCKHOpcode(EVT VT) {
case MVT::v16i8: return X86ISD::PUNPCKHBW;
case MVT::v8i16: return X86ISD::PUNPCKHWD;
default:
- llvm_unreachable("Unknow type for unpckh");
+ llvm_unreachable("Unknown type for unpckh");
}
return 0;
}
-SDValue
-X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
+static
+SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG,
+ const TargetLowering &TLI,
+ const X86Subtarget *Subtarget) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
- unsigned NumElems = VT.getVectorNumElements();
- bool isMMX = VT.getSizeInBits() == 64;
- bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
- bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
- bool V1IsSplat = false;
- bool V2IsSplat = false;
- bool HasSSE2 = Subtarget->hasSSE2() || Subtarget->hasAVX();
- bool HasSSE3 = Subtarget->hasSSE3() || Subtarget->hasAVX();
- MachineFunction &MF = DAG.getMachineFunction();
- bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
if (isZeroShuffle(SVOp))
return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl);
- // Promote splats to v4f32.
+ // Handle splat operations
if (SVOp->isSplat()) {
- if (isMMX || NumElems < 4)
+ // Special case, this is the only place now where it's
+ // allowed to return a vector_shuffle operation without
+ // using a target specific node, because *hopefully* it
+ // will be optimized away by the dag combiner.
+ if (VT.getVectorNumElements() <= 4 &&
+ CanXFormVExtractWithShuffleIntoLoad(Op, DAG, TLI))
return Op;
+
+ // Handle splats by matching through known masks
+ if (VT.getVectorNumElements() <= 4)
+ return SDValue();
+
+ // Canonicalize all of the remaining to v4f32.
return PromoteSplat(SVOp, DAG);
}
// If the shuffle can be profitably rewritten as a narrower shuffle, then
// do it!
if (VT == MVT::v8i16 || VT == MVT::v16i8) {
- SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, *this, dl);
+ SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
if (NewOp.getNode())
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
- LowerVECTOR_SHUFFLE(NewOp, DAG));
+ return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
} else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
// FIXME: Figure out a cleaner way to do this.
// Try to make use of movq to zero out the top part.
if (ISD::isBuildVectorAllZeros(V2.getNode())) {
- SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, *this, dl);
+ SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
if (NewOp.getNode()) {
if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false))
return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0),
DAG, Subtarget, dl);
}
} else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
- SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, *this, dl);
+ SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)))
return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1),
DAG, Subtarget, dl);
}
}
+ return SDValue();
+}
+
+SDValue
+X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ unsigned NumElems = VT.getVectorNumElements();
+ bool isMMX = VT.getSizeInBits() == 64;
+ bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
+ bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
+ bool V1IsSplat = false;
+ bool V2IsSplat = false;
+ bool HasSSE2 = Subtarget->hasSSE2() || Subtarget->hasAVX();
+ bool HasSSE3 = Subtarget->hasSSE3() || Subtarget->hasAVX();
+ bool HasSSSE3 = Subtarget->hasSSSE3() || Subtarget->hasAVX();
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
+
+ // Shuffle operations on MMX not supported.
+ if (isMMX)
+ return Op;
+
+ // Vector shuffle lowering takes 3 steps:
+ //
+ // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
+ // narrowing and commutation of operands should be handled.
+ // 2) Matching of shuffles with known shuffle masks to x86 target specific
+ // shuffle nodes.
+ // 3) Rewriting of unmatched masks into new generic shuffle operations,
+ // so the shuffle can be broken into other shuffles and the legalizer can
+ // try the lowering again.
+ //
+ // The general ideia is that no vector_shuffle operation should be left to
+ // be matched during isel, all of them must be converted to a target specific
+ // node here.
+
+ // Normalize the input vectors. Here splats, zeroed vectors, profitable
+ // narrowing and commutation of operands should be handled. The actual code
+ // doesn't include all of those, work in progress...
+ SDValue NewOp = NormalizeVectorShuffle(Op, DAG, *this, Subtarget);
+ if (NewOp.getNode())
+ return NewOp;
// NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
// unpckh_undef). Only use pshufd if speed is more important than size.
@@ -5309,6 +5642,18 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
if (VT != MVT::v2i64 && VT != MVT::v2f64)
return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
+ if (X86::isMOVDDUPMask(SVOp) && HasSSE3 && V2IsUndef &&
+ RelaxedMayFoldVectorLoad(V1))
+ return getMOVDDup(Op, dl, V1, DAG);
+
+ if (X86::isMOVHLPS_v_undef_Mask(SVOp))
+ return getMOVHighToLow(Op, dl, DAG);
+
+ // Use to match splats
+ if (HasSSE2 && X86::isUNPCKHMask(SVOp) && V2IsUndef &&
+ (VT == MVT::v2f64 || VT == MVT::v2i64))
+ return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
+
if (X86::isPSHUFDMask(SVOp)) {
// The actual implementation will match the mask in the if above and then
// during isel it can match several different instructions, not only pshufd
@@ -5349,7 +5694,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return V2;
if (ISD::isBuildVectorAllZeros(V1.getNode()))
return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
- if (!isMMX && !X86::isMOVLPMask(SVOp)) {
+ if (!X86::isMOVLPMask(SVOp)) {
if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
@@ -5359,22 +5704,20 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
}
// FIXME: fold these into legal mask.
- if (!isMMX) {
- if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp))
- return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
+ if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp))
+ return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
- if (X86::isMOVHLPSMask(SVOp))
- return getMOVHighToLow(Op, dl, DAG);
+ if (X86::isMOVHLPSMask(SVOp))
+ return getMOVHighToLow(Op, dl, DAG);
- if (X86::isMOVSHDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4)
- return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
+ if (X86::isMOVSHDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4)
+ return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
- if (X86::isMOVSLDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4)
- return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
+ if (X86::isMOVSLDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4)
+ return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
- if (X86::isMOVLPMask(SVOp))
- return getMOVLP(Op, dl, DAG, HasSSE2);
- }
+ if (X86::isMOVLPMask(SVOp))
+ return getMOVLP(Op, dl, DAG, HasSSE2);
if (ShouldXformToMOVHLPS(SVOp) ||
ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp))
@@ -5414,13 +5757,11 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return getMOVL(DAG, dl, VT, V2, V1);
}
- if (X86::isUNPCKL_v_undef_Mask(SVOp) || X86::isUNPCKLMask(SVOp))
- return (isMMX) ?
- Op : getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG);
+ if (X86::isUNPCKLMask(SVOp))
+ return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG);
- if (X86::isUNPCKH_v_undef_Mask(SVOp) || X86::isUNPCKHMask(SVOp))
- return (isMMX) ?
- Op : getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG);
+ if (X86::isUNPCKHMask(SVOp))
+ return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG);
if (V2IsSplat) {
// Normalize mask so all entries that point to V2 points to its first
@@ -5443,19 +5784,15 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
SDValue NewOp = CommuteVectorShuffle(SVOp, DAG);
ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp);
- if (X86::isUNPCKL_v_undef_Mask(NewSVOp) || X86::isUNPCKLMask(NewSVOp))
- return (isMMX) ?
- NewOp : getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG);
+ if (X86::isUNPCKLMask(NewSVOp))
+ return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG);
- if (X86::isUNPCKH_v_undef_Mask(NewSVOp) || X86::isUNPCKHMask(NewSVOp))
- return (isMMX) ?
- NewOp : getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG);
+ if (X86::isUNPCKHMask(NewSVOp))
+ return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG);
}
- // FIXME: for mmx, bitcast v2i32 to v4i16 for shuffle.
-
// Normalize the node to match x86 shuffle ops if needed
- if (!isMMX && V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp))
+ if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp))
return CommuteVectorShuffle(SVOp, DAG);
// The checks below are all present in isShuffleMaskLegal, but they are
@@ -5464,15 +5801,18 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
SmallVector<int, 16> M;
SVOp->getMask(M);
- // Very little shuffling can be done for 64-bit vectors right now.
- if (VT.getSizeInBits() == 64)
- return isPALIGNRMask(M, VT, Subtarget->hasSSSE3()) ? Op : SDValue();
+ if (isPALIGNRMask(M, VT, HasSSSE3))
+ return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2,
+ X86::getShufflePALIGNRImmediate(SVOp),
+ DAG);
- // FIXME: pshufb, blends, shifts.
- if (VT.getVectorNumElements() == 2 ||
- ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
- isPALIGNRMask(M, VT, Subtarget->hasSSSE3()))
- return Op;
+ if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
+ SVOp->getSplatIndex() == 0 && V2IsUndef) {
+ if (VT == MVT::v2f64)
+ return getTargetShuffleNode(X86ISD::UNPCKLPD, dl, VT, V1, V1, DAG);
+ if (VT == MVT::v2i64)
+ return getTargetShuffleNode(X86ISD::PUNPCKLQDQ, dl, VT, V1, V1, DAG);
+ }
if (isPSHUFHWMask(M, VT))
return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
@@ -5494,6 +5834,13 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
TargetMask, DAG);
}
+ if (X86::isUNPCKL_v_undef_Mask(SVOp))
+ if (VT != MVT::v2i64 && VT != MVT::v2f64)
+ return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG);
+ if (X86::isUNPCKH_v_undef_Mask(SVOp))
+ if (VT != MVT::v2i64 && VT != MVT::v2f64)
+ return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
+
// Handle v8i16 specifically since SSE can do byte extraction and insertion.
if (VT == MVT::v8i16) {
SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG);
@@ -5507,8 +5854,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return NewOp;
}
- // Handle all 4 wide cases with a number of shuffles except for MMX.
- if (NumElems == 4 && !isMMX)
+ // Handle all 4 wide cases with a number of shuffles.
+ if (NumElems == 4)
return LowerVECTOR_SHUFFLE_4wide(SVOp, DAG);
return SDValue();
@@ -5531,7 +5878,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
if (Idx == 0)
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
- DAG.getNode(ISD::BIT_CONVERT, dl,
+ DAG.getNode(ISD::BITCAST, dl,
MVT::v4i32,
Op.getOperand(0)),
Op.getOperand(1)));
@@ -5552,14 +5899,14 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
if ((User->getOpcode() != ISD::STORE ||
(isa<ConstantSDNode>(Op.getOperand(1)) &&
cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
- (User->getOpcode() != ISD::BIT_CONVERT ||
+ (User->getOpcode() != ISD::BITCAST ||
User->getValueType(0) != MVT::i32))
return SDValue();
SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32,
+ DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
Op.getOperand(0)),
Op.getOperand(1));
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Extract);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
} else if (VT == MVT::i32) {
// ExtractPS works with constant index.
if (isa<ConstantSDNode>(Op.getOperand(1)))
@@ -5575,6 +5922,38 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
if (!isa<ConstantSDNode>(Op.getOperand(1)))
return SDValue();
+ SDValue Vec = Op.getOperand(0);
+ EVT VecVT = Vec.getValueType();
+
+ // If this is a 256-bit vector result, first extract the 128-bit
+ // vector and then extract from the 128-bit vector.
+ if (VecVT.getSizeInBits() > 128) {
+ DebugLoc dl = Op.getNode()->getDebugLoc();
+ unsigned NumElems = VecVT.getVectorNumElements();
+ SDValue Idx = Op.getOperand(1);
+
+ if (!isa<ConstantSDNode>(Idx))
+ return SDValue();
+
+ unsigned ExtractNumElems = NumElems / (VecVT.getSizeInBits() / 128);
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+
+ // Get the 128-bit vector.
+ bool Upper = IdxVal >= ExtractNumElems;
+ Vec = Extract128BitVector(Vec, Idx, DAG, dl);
+
+ // Extract from it.
+ SDValue ScaledIdx = Idx;
+ if (Upper)
+ ScaledIdx = DAG.getNode(ISD::SUB, dl, Idx.getValueType(), Idx,
+ DAG.getConstant(ExtractNumElems,
+ Idx.getValueType()));
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
+ ScaledIdx);
+ }
+
+ assert(Vec.getValueSizeInBits() <= 128 && "Unexpected vector length");
+
if (Subtarget->hasSSE41()) {
SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
if (Res.getNode())
@@ -5590,7 +5969,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
if (Idx == 0)
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
- DAG.getNode(ISD::BIT_CONVERT, dl,
+ DAG.getNode(ISD::BITCAST, dl,
MVT::v4i32, Vec),
Op.getOperand(1)));
// Transform it so it match pextrw which produces a 32-bit result.
@@ -5650,8 +6029,6 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op,
unsigned Opc;
if (VT == MVT::v8i16)
Opc = X86ISD::PINSRW;
- else if (VT == MVT::v4i16)
- Opc = X86ISD::MMX_PINSRW;
else if (VT == MVT::v16i8)
Opc = X86ISD::PINSRB;
else
@@ -5689,17 +6066,45 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
EVT EltVT = VT.getVectorElementType();
+ DebugLoc dl = Op.getDebugLoc();
+ SDValue N0 = Op.getOperand(0);
+ SDValue N1 = Op.getOperand(1);
+ SDValue N2 = Op.getOperand(2);
+
+ // If this is a 256-bit vector result, first insert into a 128-bit
+ // vector and then insert into the 256-bit vector.
+ if (VT.getSizeInBits() > 128) {
+ if (!isa<ConstantSDNode>(N2))
+ return SDValue();
+
+ // Get the 128-bit vector.
+ unsigned NumElems = VT.getVectorNumElements();
+ unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue();
+ bool Upper = IdxVal >= NumElems / 2;
+
+ SDValue SubN0 = Extract128BitVector(N0, N2, DAG, dl);
+
+ // Insert into it.
+ SDValue ScaledN2 = N2;
+ if (Upper)
+ ScaledN2 = DAG.getNode(ISD::SUB, dl, N2.getValueType(), N2,
+ DAG.getConstant(NumElems /
+ (VT.getSizeInBits() / 128),
+ N2.getValueType()));
+ Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubN0.getValueType(), SubN0,
+ N1, ScaledN2);
+
+ // Insert the 128-bit vector
+ // FIXME: Why UNDEF?
+ return Insert128BitVector(N0, Op, N2, DAG, dl);
+ }
+
if (Subtarget->hasSSE41())
return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG);
if (EltVT == MVT::i8)
return SDValue();
- DebugLoc dl = Op.getDebugLoc();
- SDValue N0 = Op.getOperand(0);
- SDValue N1 = Op.getOperand(1);
- SDValue N2 = Op.getOperand(2);
-
if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) {
// Transform it so it match pinsrw which expects a 16-bit value in a GR32
// as its second argument.
@@ -5707,31 +6112,79 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
if (N2.getValueType() != MVT::i32)
N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
- return DAG.getNode(VT == MVT::v8i16 ? X86ISD::PINSRW : X86ISD::MMX_PINSRW,
- dl, VT, N0, N1, N2);
+ return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
}
return SDValue();
}
SDValue
X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
+ LLVMContext *Context = DAG.getContext();
DebugLoc dl = Op.getDebugLoc();
-
+ EVT OpVT = Op.getValueType();
+
+ // If this is a 256-bit vector result, first insert into a 128-bit
+ // vector and then insert into the 256-bit vector.
+ if (OpVT.getSizeInBits() > 128) {
+ // Insert into a 128-bit vector.
+ EVT VT128 = EVT::getVectorVT(*Context,
+ OpVT.getVectorElementType(),
+ OpVT.getVectorNumElements() / 2);
+
+ Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
+
+ // Insert the 128-bit vector.
+ return Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, OpVT), Op,
+ DAG.getConstant(0, MVT::i32),
+ DAG, dl);
+ }
+
if (Op.getValueType() == MVT::v1i64 &&
Op.getOperand(0).getValueType() == MVT::i64)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
- EVT VT = MVT::v2i32;
- switch (Op.getValueType().getSimpleVT().SimpleTy) {
- default: break;
- case MVT::v16i8:
- case MVT::v8i16:
- VT = MVT::v4i32;
- break;
+ assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 &&
+ "Expected an SSE type!");
+ return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(),
+ DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
+}
+
+// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
+// a simple subregister reference or explicit instructions to grab
+// upper bits of a vector.
+SDValue
+X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const {
+ if (Subtarget->hasAVX()) {
+ DebugLoc dl = Op.getNode()->getDebugLoc();
+ SDValue Vec = Op.getNode()->getOperand(0);
+ SDValue Idx = Op.getNode()->getOperand(1);
+
+ if (Op.getNode()->getValueType(0).getSizeInBits() == 128
+ && Vec.getNode()->getValueType(0).getSizeInBits() == 256) {
+ return Extract128BitVector(Vec, Idx, DAG, dl);
+ }
}
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(),
- DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, AnyExt));
+ return SDValue();
+}
+
+// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
+// simple superregister reference or explicit instructions to insert
+// the upper bits of a vector.
+SDValue
+X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const {
+ if (Subtarget->hasAVX()) {
+ DebugLoc dl = Op.getNode()->getDebugLoc();
+ SDValue Vec = Op.getNode()->getOperand(0);
+ SDValue SubVec = Op.getNode()->getOperand(1);
+ SDValue Idx = Op.getNode()->getOperand(2);
+
+ if (Op.getNode()->getValueType(0).getSizeInBits() == 256
+ && SubVec.getNode()->getValueType(0).getSizeInBits() == 128) {
+ return Insert128BitVector(Vec, SubVec, Idx, DAG, dl);
+ }
+ }
+ return SDValue();
}
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
@@ -5797,12 +6250,11 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
- if (OpFlag) {
+ if (OpFlag)
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
DebugLoc(), getPointerTy()),
Result);
- }
return Result;
}
@@ -5906,7 +6358,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
// load.
if (isGlobalStubReference(OpFlags))
Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
- PseudoSourceValue::getGOT(), 0, false, false, 0);
+ MachinePointerInfo::getGOT(), false, false, 0);
// If there was a non-zero offset that we didn't fold, create an explicit
// addition for it.
@@ -5929,7 +6381,7 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
unsigned char OperandFlags) {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
DebugLoc dl = GA->getDebugLoc();
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
@@ -5978,14 +6430,14 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT, TLSModel::Model model,
bool is64Bit) {
DebugLoc dl = GA->getDebugLoc();
- // Get the Thread Pointer
- SDValue Base = DAG.getNode(X86ISD::SegmentBaseAddress,
- DebugLoc(), PtrVT,
- DAG.getRegister(is64Bit? X86::FS : X86::GS,
- MVT::i32));
- SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Base,
- NULL, 0, false, false, 0);
+ // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
+ Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
+ is64Bit ? 257 : 256));
+
+ SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
+ DAG.getIntPtrConstant(0),
+ MachinePointerInfo(Ptr), false, false, 0);
unsigned char OperandFlags = 0;
// Most TLS accesses are not RIP relative, even on x86-64. One exception is
@@ -6004,14 +6456,14 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
// emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
// exec)
- SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
if (model == TLSModel::InitialExec)
Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
- PseudoSourceValue::getGOT(), 0, false, false, 0);
+ MachinePointerInfo::getGOT(), false, false, 0);
// The address of the thread local variable is the add of the thread
// pointer with the offset of the variable.
@@ -6020,29 +6472,29 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
SDValue
X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
-
+
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GA->getGlobal();
if (Subtarget->isTargetELF()) {
// TODO: implement the "local dynamic" model
// TODO: implement the "initial exec"model for pic executables
-
+
// If GV is an alias then use the aliasee for determining
// thread-localness.
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
GV = GA->resolveAliasedGlobal(false);
-
- TLSModel::Model model
+
+ TLSModel::Model model
= getTLSModel(GV, getTargetMachine().getRelocationModel());
-
+
switch (model) {
case TLSModel::GeneralDynamic:
case TLSModel::LocalDynamic: // not implemented
if (Subtarget->is64Bit())
return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
-
+
case TLSModel::InitialExec:
case TLSModel::LocalExec:
return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
@@ -6053,7 +6505,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
unsigned char OpFlag = 0;
unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
X86ISD::WrapperRIP : X86ISD::Wrapper;
-
+
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) &&
@@ -6062,24 +6514,26 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
OpFlag = X86II::MO_TLVP_PIC_BASE;
else
OpFlag = X86II::MO_TLVP;
- DebugLoc DL = Op.getDebugLoc();
+ DebugLoc DL = Op.getDebugLoc();
SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
- getPointerTy(),
+ GA->getValueType(0),
GA->getOffset(), OpFlag);
SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
-
+
// With PIC32, the address is actually $g + Offset.
if (PIC32)
Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
DebugLoc(), getPointerTy()),
Offset);
-
+
// Lowering the machine isd will make sure everything is in the right
// location.
- SDValue Args[] = { Offset };
- SDValue Chain = DAG.getNode(X86ISD::TLSCALL, DL, MVT::Other, Args, 1);
-
+ SDValue Chain = DAG.getEntryNode();
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue Args[] = { Chain, Offset };
+ Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2);
+
// TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setAdjustsStack(true);
@@ -6089,7 +6543,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
}
-
+
assert(false &&
"TLS not implemented for this target.");
@@ -6148,12 +6602,8 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
EVT SrcVT = Op.getOperand(0).getValueType();
- if (SrcVT.isVector()) {
- if (SrcVT == MVT::v2i32 && Op.getValueType() == MVT::v2f64) {
- return Op;
- }
+ if (SrcVT.isVector())
return SDValue();
- }
assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 &&
"Unknown SINT_TO_FP to lower!");
@@ -6174,25 +6624,36 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
StackSlot,
- PseudoSourceValue::getFixedStack(SSFI), 0,
+ MachinePointerInfo::getFixedStack(SSFI),
false, false, 0);
return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
}
SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
- SDValue StackSlot,
+ SDValue StackSlot,
SelectionDAG &DAG) const {
// Build the FILD
- DebugLoc dl = Op.getDebugLoc();
+ DebugLoc DL = Op.getDebugLoc();
SDVTList Tys;
bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
if (useSSE)
- Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
+ Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
else
Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
+
+ unsigned ByteSize = SrcVT.getSizeInBits()/8;
+
+ int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
+ MachineMemOperand *MMO =
+ DAG.getMachineFunction()
+ .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
+ MachineMemOperand::MOLoad, ByteSize, ByteSize);
+
SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
- SDValue Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, dl,
- Tys, Ops, array_lengthof(Ops));
+ SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
+ X86ISD::FILD, DL,
+ Tys, Ops, array_lengthof(Ops),
+ SrcVT, MMO);
if (useSSE) {
Chain = Result.getValue(1);
@@ -6202,15 +6663,23 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
// shouldn't be necessary except that RFP cannot be live across
// multiple blocks. When stackifier is fixed, they can be uncoupled.
MachineFunction &MF = DAG.getMachineFunction();
- int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false);
+ unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
+ int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Tys = DAG.getVTList(MVT::Other);
SDValue Ops[] = {
Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
};
- Chain = DAG.getNode(X86ISD::FST, dl, Tys, Ops, array_lengthof(Ops));
- Result = DAG.getLoad(Op.getValueType(), dl, Chain, StackSlot,
- PseudoSourceValue::getFixedStack(SSFI), 0,
+ MachineMemOperand *MMO =
+ DAG.getMachineFunction()
+ .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
+ MachineMemOperand::MOStore, SSFISize, SSFISize);
+
+ Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
+ Ops, array_lengthof(Ops),
+ Op.getValueType(), MMO);
+ Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
+ MachinePointerInfo::getFixedStack(SSFI),
false, false, 0);
}
@@ -6284,12 +6753,12 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
DAG.getIntPtrConstant(0)));
SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, XR1, XR2);
SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 16);
SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0);
- SDValue XR2F = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Unpck2);
+ SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck2);
SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 16);
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
@@ -6317,19 +6786,19 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
DAG.getIntPtrConstant(0)));
Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Load),
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
DAG.getIntPtrConstant(0));
// Or the load with the bias.
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
MVT::v2f64, Load)),
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
MVT::v2f64, Bias)));
Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Or),
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
DAG.getIntPtrConstant(0));
// Subtract the bias.
@@ -6374,24 +6843,34 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
getPointerTy(), StackSlot, WordOff);
SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
- StackSlot, NULL, 0, false, false, 0);
+ StackSlot, MachinePointerInfo(),
+ false, false, 0);
SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
- OffsetSlot, NULL, 0, false, false, 0);
+ OffsetSlot, MachinePointerInfo(),
+ false, false, 0);
SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
return Fild;
}
assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
- StackSlot, NULL, 0, false, false, 0);
+ StackSlot, MachinePointerInfo(),
+ false, false, 0);
// For i64 source, we need to add the appropriate power of 2 if the input
// was negative. This is the same as the optimization in
// DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
// we must be careful to do the computation in x87 extended precision, not
// in SSE. (The generic code can't know it's OK to do this, or how to.)
+ int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
+ MachineMemOperand *MMO =
+ DAG.getMachineFunction()
+ .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
+ MachineMemOperand::MOLoad, 8, 8);
+
SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
- SDValue Fild = DAG.getNode(X86ISD::FILD, dl, Tys, Ops, 3);
+ SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3,
+ MVT::i64, MMO);
APInt FF(32, 0x5F800000ULL);
@@ -6414,9 +6893,9 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
// Load the value out, extending it from f32 to f80.
// FIXME: Avoid the extend by constructing the right constant pool?
- SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, MVT::f80, dl, DAG.getEntryNode(),
- FudgePtr, PseudoSourceValue::getConstantPool(),
- 0, MVT::f32, false, false, 4);
+ SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
+ FudgePtr, MachinePointerInfo::getConstantPool(),
+ MVT::f32, false, false, 4);
// Extend everything to 80 bits to force it to be done on x87.
SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
@@ -6424,7 +6903,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
std::pair<SDValue,SDValue> X86TargetLowering::
FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
- DebugLoc dl = Op.getDebugLoc();
+ DebugLoc DL = Op.getDebugLoc();
EVT DstTy = Op.getValueType();
@@ -6453,6 +6932,8 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
+
+
unsigned Opc;
switch (DstTy.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
@@ -6463,37 +6944,43 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
SDValue Chain = DAG.getEntryNode();
SDValue Value = Op.getOperand(0);
- if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) {
+ EVT TheVT = Op.getOperand(0).getValueType();
+ if (isScalarFPTypeInSSEReg(TheVT)) {
assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
- Chain = DAG.getStore(Chain, dl, Value, StackSlot,
- PseudoSourceValue::getFixedStack(SSFI), 0,
+ Chain = DAG.getStore(Chain, DL, Value, StackSlot,
+ MachinePointerInfo::getFixedStack(SSFI),
false, false, 0);
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
SDValue Ops[] = {
- Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType())
+ Chain, StackSlot, DAG.getValueType(TheVT)
};
- Value = DAG.getNode(X86ISD::FLD, dl, Tys, Ops, 3);
+
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
+ MachineMemOperand::MOLoad, MemSize, MemSize);
+ Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3,
+ DstTy, MMO);
Chain = Value.getValue(1);
SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
}
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
+ MachineMemOperand::MOStore, MemSize, MemSize);
+
// Build the FP_TO_INT*_IN_MEM
SDValue Ops[] = { Chain, Value, StackSlot };
- SDValue FIST = DAG.getNode(Opc, dl, MVT::Other, Ops, 3);
+ SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
+ Ops, 3, DstTy, MMO);
return std::make_pair(FIST, StackSlot);
}
SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
SelectionDAG &DAG) const {
- if (Op.getValueType().isVector()) {
- if (Op.getValueType() == MVT::v2i32 &&
- Op.getOperand(0).getValueType() == MVT::v2f64) {
- return Op;
- }
+ if (Op.getValueType().isVector())
return SDValue();
- }
std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true);
SDValue FIST = Vals.first, StackSlot = Vals.second;
@@ -6502,7 +6989,7 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
// Load the result.
return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
- FIST, StackSlot, NULL, 0, false, false, 0);
+ FIST, StackSlot, MachinePointerInfo(), false, false, 0);
}
SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
@@ -6513,7 +7000,7 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
// Load the result.
return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
- FIST, StackSlot, NULL, 0, false, false, 0);
+ FIST, StackSlot, MachinePointerInfo(), false, false, 0);
}
SDValue X86TargetLowering::LowerFABS(SDValue Op,
@@ -6539,7 +7026,7 @@ SDValue X86TargetLowering::LowerFABS(SDValue Op,
Constant *C = ConstantVector::get(CV);
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 16);
return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask);
}
@@ -6566,14 +7053,14 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
Constant *C = ConstantVector::get(CV);
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 16);
if (VT.isVector()) {
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
+ return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(ISD::XOR, dl, MVT::v2i64,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
Op.getOperand(0)),
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, Mask)));
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Mask)));
} else {
return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
}
@@ -6615,7 +7102,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
Constant *C = ConstantVector::get(CV);
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 16);
SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
@@ -6625,7 +7112,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit);
SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit,
DAG.getConstant(32, MVT::i32));
- SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, SignBit);
+ SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit);
SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit,
DAG.getIntPtrConstant(0));
}
@@ -6644,7 +7131,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
C = ConstantVector::get(CV);
CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 16);
SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2);
@@ -6884,8 +7371,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
// Lower (X & (1 << N)) == 0 to BT(X, N).
// Lower ((X >>u N) & 1) != 0 to BT(X, N).
// Lower ((X >>s N) & 1) != 0 to BT(X, N).
- if (Op0.getOpcode() == ISD::AND &&
- Op0.hasOneUse() &&
+ if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(Op1)->isNullValue() &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
@@ -6894,19 +7380,25 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
return NewSetCC;
}
- // Look for "(setcc) == / != 1" to avoid unncessary setcc.
- if (Op0.getOpcode() == X86ISD::SETCC &&
- Op1.getOpcode() == ISD::Constant &&
+ // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
+ // these.
+ if (Op1.getOpcode() == ISD::Constant &&
(cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
cast<ConstantSDNode>(Op1)->isNullValue()) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
- X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
- bool Invert = (CC == ISD::SETNE) ^
- cast<ConstantSDNode>(Op1)->isNullValue();
- if (Invert)
+
+ // If the input is a setcc, then reuse the input setcc or use a new one with
+ // the inverted condition.
+ if (Op0.getOpcode() == X86ISD::SETCC) {
+ X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
+ bool Invert = (CC == ISD::SETNE) ^
+ cast<ConstantSDNode>(Op1)->isNullValue();
+ if (!Invert) return Op0;
+
CCode = X86::GetOppositeBranchCondition(CCode);
- return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1));
+ return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1));
+ }
}
bool isFP = Op1.getValueType().isFloatingPoint();
@@ -6914,17 +7406,9 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
if (X86CC == X86::COND_INVALID)
return SDValue();
- SDValue Cond = EmitCmp(Op0, Op1, X86CC, DAG);
-
- // Use sbb x, x to materialize carry bit into a GPR.
- if (X86CC == X86::COND_B)
- return DAG.getNode(ISD::AND, dl, MVT::i8,
- DAG.getNode(X86ISD::SETCC_CARRY, dl, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8), Cond),
- DAG.getConstant(1, MVT::i8));
-
+ SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG);
return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8), Cond);
+ DAG.getConstant(X86CC, MVT::i8), EFLAGS);
}
SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
@@ -6996,11 +7480,8 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
switch (VT.getSimpleVT().SimpleTy) {
default: break;
- case MVT::v8i8:
case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
- case MVT::v4i16:
case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break;
- case MVT::v2i32:
case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
}
@@ -7051,6 +7532,8 @@ static bool isX86LogicalCmp(SDValue Op) {
if (Op.getResNo() == 1 &&
(Opc == X86ISD::ADD ||
Opc == X86ISD::SUB ||
+ Opc == X86ISD::ADC ||
+ Opc == X86ISD::SBB ||
Opc == X86ISD::SMUL ||
Opc == X86ISD::UMUL ||
Opc == X86ISD::INC ||
@@ -7060,13 +7543,28 @@ static bool isX86LogicalCmp(SDValue Op) {
Opc == X86ISD::AND))
return true;
+ if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
+ return true;
+
return false;
}
+static bool isZero(SDValue V) {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
+ return C && C->isNullValue();
+}
+
+static bool isAllOnes(SDValue V) {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
+ return C && C->isAllOnesValue();
+}
+
SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
bool addTest = true;
SDValue Cond = Op.getOperand(0);
- DebugLoc dl = Op.getDebugLoc();
+ SDValue Op1 = Op.getOperand(1);
+ SDValue Op2 = Op.getOperand(2);
+ DebugLoc DL = Op.getDebugLoc();
SDValue CC;
if (Cond.getOpcode() == ISD::SETCC) {
@@ -7075,34 +7573,44 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Cond = NewCond;
}
- // (select (x == 0), -1, 0) -> (sign_bit (x - 1))
- SDValue Op1 = Op.getOperand(1);
- SDValue Op2 = Op.getOperand(2);
+ // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
+ // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
+ // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
+ // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
if (Cond.getOpcode() == X86ISD::SETCC &&
- cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue() == X86::COND_E) {
+ Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
+ isZero(Cond.getOperand(1).getOperand(1))) {
SDValue Cmp = Cond.getOperand(1);
- if (Cmp.getOpcode() == X86ISD::CMP) {
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op1);
+
+ unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
+
+ if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
+ (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
+ SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
+
+ SDValue CmpOp0 = Cmp.getOperand(0);
+ Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
+ CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
+
+ SDValue Res = // Res = 0 or -1.
+ DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
+ DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
+
+ if (isAllOnes(Op1) != (CondCode == X86::COND_E))
+ Res = DAG.getNOT(DL, Res, Res.getValueType());
+
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
- ConstantSDNode *RHSC =
- dyn_cast<ConstantSDNode>(Cmp.getOperand(1).getNode());
- if (N1C && N1C->isAllOnesValue() &&
- N2C && N2C->isNullValue() &&
- RHSC && RHSC->isNullValue()) {
- SDValue CmpOp0 = Cmp.getOperand(0);
- Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
- CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
- return DAG.getNode(X86ISD::SETCC_CARRY, dl, Op.getValueType(),
- DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
- }
+ if (N2C == 0 || !N2C->isNullValue())
+ Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
+ return Res;
}
}
- // Look pass (and (setcc_carry (cmp ...)), 1).
+ // Look past (and (setcc_carry (cmp ...)), 1).
if (Cond.getOpcode() == ISD::AND &&
Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
- if (C && C->getAPIntValue() == 1)
+ if (C && C->getAPIntValue() == 1)
Cond = Cond.getOperand(0);
}
@@ -7135,8 +7643,8 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// We know the result of AND is compared against zero. Try to match
// it to BT.
- if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
- SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
+ if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
+ SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
if (NewSetCC.getNode()) {
CC = NewSetCC.getOperand(0);
Cond = NewSetCC.getOperand(1);
@@ -7150,11 +7658,28 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Cond = EmitTest(Cond, X86::COND_NE, DAG);
}
+ // a < b ? -1 : 0 -> RES = ~setcc_carry
+ // a < b ? 0 : -1 -> RES = setcc_carry
+ // a >= b ? -1 : 0 -> RES = setcc_carry
+ // a >= b ? 0 : -1 -> RES = ~setcc_carry
+ if (Cond.getOpcode() == X86ISD::CMP) {
+ unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
+
+ if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
+ (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
+ SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
+ DAG.getConstant(X86::COND_B, MVT::i8), Cond);
+ if (isAllOnes(Op1) != (CondCode == X86::COND_B))
+ return DAG.getNOT(DL, Res, Res.getValueType());
+ return Res;
+ }
+ }
+
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
// condition is true.
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
SDValue Ops[] = { Op2, Op1, CC, Cond };
- return DAG.getNode(X86ISD::CMOV, dl, VTs, Ops, array_lengthof(Ops));
+ return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops));
}
// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
@@ -7209,7 +7734,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
if (Cond.getOpcode() == ISD::AND &&
Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
- if (C && C->getAPIntValue() == 1)
+ if (C && C->getAPIntValue() == 1)
Cond = Cond.getOperand(0);
}
@@ -7310,7 +7835,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
// We know the result of AND is compared against zero. Try to match
// it to BT.
- if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
+ if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
if (NewSetCC.getNode()) {
CC = NewSetCC.getOperand(0);
@@ -7337,8 +7862,8 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
- assert(Subtarget->isTargetCygMing() &&
- "This should be used only on Cygwin/Mingw targets");
+ assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows()) &&
+ "This should be used only on Windows targets");
DebugLoc dl = Op.getDebugLoc();
// Get the inputs.
@@ -7353,9 +7878,9 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Size, Flag);
Flag = Chain.getValue(1);
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- Chain = DAG.getNode(X86ISD::MINGW_ALLOCA, dl, NodeTys, Chain, Flag);
+ Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
Flag = Chain.getValue(1);
Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1);
@@ -7369,15 +7894,15 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- DebugLoc dl = Op.getDebugLoc();
+ DebugLoc DL = Op.getDebugLoc();
- if (!Subtarget->is64Bit()) {
+ if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
getPointerTy());
- return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0,
- false, false, 0);
+ return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
+ MachinePointerInfo(SV), false, false, 0);
}
// __va_list_tag:
@@ -7388,48 +7913,107 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
SmallVector<SDValue, 8> MemOps;
SDValue FIN = Op.getOperand(1);
// Store gp_offset
- SDValue Store = DAG.getStore(Op.getOperand(0), dl,
+ SDValue Store = DAG.getStore(Op.getOperand(0), DL,
DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
MVT::i32),
- FIN, SV, 0, false, false, 0);
+ FIN, MachinePointerInfo(SV), false, false, 0);
MemOps.push_back(Store);
// Store fp_offset
- FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
+ FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
FIN, DAG.getIntPtrConstant(4));
- Store = DAG.getStore(Op.getOperand(0), dl,
+ Store = DAG.getStore(Op.getOperand(0), DL,
DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
MVT::i32),
- FIN, SV, 4, false, false, 0);
+ FIN, MachinePointerInfo(SV, 4), false, false, 0);
MemOps.push_back(Store);
// Store ptr to overflow_arg_area
- FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
+ FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
FIN, DAG.getIntPtrConstant(4));
SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
getPointerTy());
- Store = DAG.getStore(Op.getOperand(0), dl, OVFIN, FIN, SV, 8,
+ Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
+ MachinePointerInfo(SV, 8),
false, false, 0);
MemOps.push_back(Store);
// Store ptr to reg_save_area.
- FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
+ FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
FIN, DAG.getIntPtrConstant(8));
SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
getPointerTy());
- Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 16,
- false, false, 0);
+ Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
+ MachinePointerInfo(SV, 16), false, false, 0);
MemOps.push_back(Store);
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
&MemOps[0], MemOps.size());
}
SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
- // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
- assert(Subtarget->is64Bit() && "This code only handles 64-bit va_arg!");
+ assert(Subtarget->is64Bit() &&
+ "LowerVAARG only handles 64-bit va_arg!");
+ assert((Subtarget->isTargetLinux() ||
+ Subtarget->isTargetDarwin()) &&
+ "Unhandled target in LowerVAARG");
+ assert(Op.getNode()->getNumOperands() == 4);
+ SDValue Chain = Op.getOperand(0);
+ SDValue SrcPtr = Op.getOperand(1);
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ unsigned Align = Op.getConstantOperandVal(3);
+ DebugLoc dl = Op.getDebugLoc();
- report_fatal_error("VAArgInst is not yet implemented for x86-64!");
- return SDValue();
+ EVT ArgVT = Op.getNode()->getValueType(0);
+ const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy);
+ uint8_t ArgMode;
+
+ // Decide which area this value should be read from.
+ // TODO: Implement the AMD64 ABI in its entirety. This simple
+ // selection mechanism works only for the basic types.
+ if (ArgVT == MVT::f80) {
+ llvm_unreachable("va_arg for f80 not yet implemented");
+ } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
+ ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
+ } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
+ ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
+ } else {
+ llvm_unreachable("Unhandled argument type in LowerVAARG");
+ }
+
+ if (ArgMode == 2) {
+ // Sanity Check: Make sure using fp_offset makes sense.
+ assert(!UseSoftFloat &&
+ !(DAG.getMachineFunction()
+ .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) &&
+ Subtarget->hasXMM());
+ }
+
+ // Insert VAARG_64 node into the DAG
+ // VAARG_64 returns two values: Variable Argument Address, Chain
+ SmallVector<SDValue, 11> InstOps;
+ InstOps.push_back(Chain);
+ InstOps.push_back(SrcPtr);
+ InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
+ InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
+ InstOps.push_back(DAG.getConstant(Align, MVT::i32));
+ SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
+ SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
+ VTs, &InstOps[0], InstOps.size(),
+ MVT::i64,
+ MachinePointerInfo(SV),
+ /*Align=*/0,
+ /*Volatile=*/false,
+ /*ReadMem=*/true,
+ /*WriteMem=*/true);
+ Chain = VAARG.getValue(1);
+
+ // Load the next argument and return it
+ return DAG.getLoad(ArgVT, dl,
+ Chain,
+ VAARG,
+ MachinePointerInfo(),
+ false, false, 0);
}
SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
@@ -7440,11 +8024,12 @@ SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
SDValue SrcPtr = Op.getOperand(2);
const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- DebugLoc dl = Op.getDebugLoc();
+ DebugLoc DL = Op.getDebugLoc();
- return DAG.getMemcpy(Chain, dl, DstPtr, SrcPtr,
+ return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
- false, DstSV, 0, SrcSV, 0);
+ false,
+ MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
SDValue
@@ -7713,10 +8298,11 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4);
} else {
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
+// FIXME this must be lowered to get rid of the invalid type.
}
EVT VT = Op.getValueType();
- ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, VT, ShAmt);
+ ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt);
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
DAG.getConstant(NewIntNo, MVT::i32),
Op.getOperand(1), ShAmt);
@@ -7740,13 +8326,13 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
- NULL, 0, false, false, 0);
+ MachinePointerInfo(), false, false, 0);
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- RetAddrFI, NULL, 0, false, false, 0);
+ RetAddrFI, MachinePointerInfo(), false, false, 0);
}
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
@@ -7759,7 +8345,8 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP;
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
- FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0,
+ FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
+ MachinePointerInfo(),
false, false, 0);
return FrameAddr;
}
@@ -7784,7 +8371,8 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame,
DAG.getIntPtrConstant(TD->getPointerSize()));
StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset);
- Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, NULL, 0, false, false, 0);
+ Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
+ false, false, 0);
Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
MF.getRegInfo().addLiveOut(StoreAddrReg);
@@ -7819,11 +8407,13 @@ SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
SDValue Addr = Trmp;
OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
- Addr, TrmpAddr, 0, false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr),
+ false, false, 0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(2, MVT::i64));
- OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, TrmpAddr, 2,
+ OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
+ MachinePointerInfo(TrmpAddr, 2),
false, false, 2);
// Load the 'nest' parameter value into R10.
@@ -7832,11 +8422,13 @@ SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(10, MVT::i64));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
- Addr, TrmpAddr, 10, false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr, 10),
+ false, false, 0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(12, MVT::i64));
- OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, TrmpAddr, 12,
+ OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
+ MachinePointerInfo(TrmpAddr, 12),
false, false, 2);
// Jump to the nested function.
@@ -7844,13 +8436,15 @@ SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(20, MVT::i64));
OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
- Addr, TrmpAddr, 20, false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr, 20),
+ false, false, 0);
unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(22, MVT::i64));
OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
- TrmpAddr, 22, false, false, 0);
+ MachinePointerInfo(TrmpAddr, 22),
+ false, false, 0);
SDValue Ops[] =
{ Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6) };
@@ -7912,22 +8506,26 @@ SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
const unsigned char N86Reg = RegInfo->getX86RegNum(NestReg);
OutChains[0] = DAG.getStore(Root, dl,
DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
- Trmp, TrmpAddr, 0, false, false, 0);
+ Trmp, MachinePointerInfo(TrmpAddr),
+ false, false, 0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(1, MVT::i32));
- OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, TrmpAddr, 1,
+ OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
+ MachinePointerInfo(TrmpAddr, 1),
false, false, 1);
const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(5, MVT::i32));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
- TrmpAddr, 5, false, false, 1);
+ MachinePointerInfo(TrmpAddr, 5),
+ false, false, 1);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(6, MVT::i32));
- OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, TrmpAddr, 6,
+ OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
+ MachinePointerInfo(TrmpAddr, 6),
false, false, 1);
SDValue Ops[] =
@@ -7959,44 +8557,51 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
const TargetMachine &TM = MF.getTarget();
- const TargetFrameInfo &TFI = *TM.getFrameInfo();
+ const TargetFrameLowering &TFI = *TM.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ DebugLoc DL = Op.getDebugLoc();
// Save FP Control Word to stack slot
int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
- SDValue Chain = DAG.getNode(X86ISD::FNSTCW16m, dl, MVT::Other,
- DAG.getEntryNode(), StackSlot);
+
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
+ MachineMemOperand::MOStore, 2, 2);
+
+ SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
+ SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
+ DAG.getVTList(MVT::Other),
+ Ops, 2, MVT::i16, MMO);
// Load FP Control Word from stack slot
- SDValue CWD = DAG.getLoad(MVT::i16, dl, Chain, StackSlot, NULL, 0,
- false, false, 0);
+ SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
+ MachinePointerInfo(), false, false, 0);
// Transform as necessary
SDValue CWD1 =
- DAG.getNode(ISD::SRL, dl, MVT::i16,
- DAG.getNode(ISD::AND, dl, MVT::i16,
+ DAG.getNode(ISD::SRL, DL, MVT::i16,
+ DAG.getNode(ISD::AND, DL, MVT::i16,
CWD, DAG.getConstant(0x800, MVT::i16)),
DAG.getConstant(11, MVT::i8));
SDValue CWD2 =
- DAG.getNode(ISD::SRL, dl, MVT::i16,
- DAG.getNode(ISD::AND, dl, MVT::i16,
+ DAG.getNode(ISD::SRL, DL, MVT::i16,
+ DAG.getNode(ISD::AND, DL, MVT::i16,
CWD, DAG.getConstant(0x400, MVT::i16)),
DAG.getConstant(9, MVT::i8));
SDValue RetVal =
- DAG.getNode(ISD::AND, dl, MVT::i16,
- DAG.getNode(ISD::ADD, dl, MVT::i16,
- DAG.getNode(ISD::OR, dl, MVT::i16, CWD1, CWD2),
+ DAG.getNode(ISD::AND, DL, MVT::i16,
+ DAG.getNode(ISD::ADD, DL, MVT::i16,
+ DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
DAG.getConstant(1, MVT::i16)),
DAG.getConstant(3, MVT::i16));
return DAG.getNode((VT.getSizeInBits() < 16 ?
- ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
+ ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
}
SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
@@ -8122,16 +8727,16 @@ SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const {
Op.getOperand(1), DAG.getConstant(23, MVT::i32));
ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U));
-
+
std::vector<Constant*> CV(4, CI);
Constant *C = ConstantVector::get(CV);
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 16);
Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend);
- Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, Op);
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
return DAG.getNode(ISD::MUL, dl, VT, Op, R);
}
@@ -8149,7 +8754,7 @@ SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const {
Constant *C = ConstantVector::get(CVM1);
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0,
+ MachinePointerInfo::getConstantPool(),
false, false, 16);
// r = pblendv(r, psllw(r & (char16)15, 4), a);
@@ -8157,31 +8762,27 @@ SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const {
M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M,
DAG.getConstant(4, MVT::i32));
- R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32),
- R, M, Op);
+ R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, R, M, Op);
// a += a
Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
-
+
C = ConstantVector::get(CVM2);
CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- PseudoSourceValue::getConstantPool(), 0, false, false, 16);
-
+ MachinePointerInfo::getConstantPool(),
+ false, false, 16);
+
// r = pblendv(r, psllw(r & (char16)63, 2), a);
M = DAG.getNode(ISD::AND, dl, VT, R, M);
M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M,
DAG.getConstant(2, MVT::i32));
- R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32),
- R, M, Op);
+ R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, R, M, Op);
// a += a
Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
-
+
// return pblendv(r, r+r, a);
- R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32),
+ R = DAG.getNode(X86ISD::PBLENDVB, dl, VT,
R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op);
return R;
}
@@ -8198,8 +8799,7 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
SDValue RHS = N->getOperand(1);
unsigned BaseOp = 0;
unsigned Cond = 0;
- DebugLoc dl = Op.getDebugLoc();
-
+ DebugLoc DL = Op.getDebugLoc();
switch (Op.getOpcode()) {
default: llvm_unreachable("Unknown ovf instruction!");
case ISD::SADDO:
@@ -8238,19 +8838,29 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
BaseOp = X86ISD::SMUL;
Cond = X86::COND_O;
break;
- case ISD::UMULO:
- BaseOp = X86ISD::UMUL;
- Cond = X86::COND_B;
- break;
+ case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
+ SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
+ MVT::i32);
+ SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
+
+ SDValue SetCC =
+ DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
+ DAG.getConstant(X86::COND_O, MVT::i32),
+ SDValue(Sum.getNode(), 2));
+
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC);
+ return Sum;
+ }
}
// Also sets EFLAGS.
SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
- SDValue Sum = DAG.getNode(BaseOp, dl, VTs, LHS, RHS);
+ SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
SDValue SetCC =
- DAG.getNode(X86ISD::SETCC, dl, N->getValueType(1),
- DAG.getConstant(Cond, MVT::i32), SDValue(Sum.getNode(), 1));
+ DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
+ DAG.getConstant(Cond, MVT::i32),
+ SDValue(Sum.getNode(), 1));
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC);
return Sum;
@@ -8258,10 +8868,10 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
DebugLoc dl = Op.getDebugLoc();
-
+
if (!Subtarget->hasSSE2()) {
SDValue Chain = Op.getOperand(0);
- SDValue Zero = DAG.getConstant(0,
+ SDValue Zero = DAG.getConstant(0,
Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
SDValue Ops[] = {
DAG.getRegister(X86::ESP, MVT::i32), // Base
@@ -8272,37 +8882,37 @@ SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
Zero,
Chain
};
- SDNode *Res =
+ SDNode *Res =
DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops,
array_lengthof(Ops));
return SDValue(Res, 0);
}
-
+
unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
if (!isDev)
return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
-
+
unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
-
+
// def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
if (!Op1 && !Op2 && !Op3 && Op4)
return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0));
-
+
// def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
if (Op1 && !Op2 && !Op3 && !Op4)
return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0));
-
- // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)),
+
+ // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)),
// (MFENCE)>;
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
}
SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
EVT T = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
+ DebugLoc DL = Op.getDebugLoc();
unsigned Reg = 0;
unsigned size = 0;
switch(T.getSimpleVT().SimpleTy) {
@@ -8316,24 +8926,26 @@ SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
Reg = X86::RAX; size = 8;
break;
}
- SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), dl, Reg,
+ SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
Op.getOperand(2), SDValue());
SDValue Ops[] = { cpIn.getValue(0),
Op.getOperand(1),
Op.getOperand(3),
DAG.getTargetConstant(size, MVT::i8),
cpIn.getValue(1) };
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, dl, Tys, Ops, 5);
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
+ MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
+ SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
+ Ops, 5, T, MMO);
SDValue cpOut =
- DAG.getCopyFromReg(Result.getValue(0), dl, Reg, T, Result.getValue(1));
+ DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
return cpOut;
}
SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
SelectionDAG &DAG) const {
assert(Subtarget->is64Bit() && "Result not type legalized?");
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue TheChain = Op.getOperand(0);
DebugLoc dl = Op.getDebugLoc();
SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
@@ -8349,16 +8961,15 @@ SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
return DAG.getMergeValues(Ops, 2, dl);
}
-SDValue X86TargetLowering::LowerBIT_CONVERT(SDValue Op,
+SDValue X86TargetLowering::LowerBITCAST(SDValue Op,
SelectionDAG &DAG) const {
EVT SrcVT = Op.getOperand(0).getValueType();
EVT DstVT = Op.getValueType();
- assert((Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
- Subtarget->hasMMX() && !DisableMMX) &&
- "Unexpected custom BIT_CONVERT");
- assert((DstVT == MVT::i64 ||
+ assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
+ Subtarget->hasMMX() && "Unexpected custom BITCAST");
+ assert((DstVT == MVT::i64 ||
(DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
- "Unexpected custom BIT_CONVERT");
+ "Unexpected custom BITCAST");
// i64 <=> MMX conversions are Legal.
if (SrcVT==MVT::i64 && DstVT.isVector())
return Op;
@@ -8370,6 +8981,7 @@ SDValue X86TargetLowering::LowerBIT_CONVERT(SDValue Op,
// All other conversions need to be expanded.
return SDValue();
}
+
SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
DebugLoc dl = Node->getDebugLoc();
@@ -8384,6 +8996,32 @@ SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const {
cast<AtomicSDNode>(Node)->getAlignment());
}
+static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
+ EVT VT = Op.getNode()->getValueType(0);
+
+ // Let legalize expand this if it isn't a legal type yet.
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
+ return SDValue();
+
+ SDVTList VTs = DAG.getVTList(VT, MVT::i32);
+
+ unsigned Opc;
+ bool ExtraOp = false;
+ switch (Op.getOpcode()) {
+ default: assert(0 && "Invalid code");
+ case ISD::ADDC: Opc = X86ISD::ADD; break;
+ case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
+ case ISD::SUBC: Opc = X86ISD::SUB; break;
+ case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
+ }
+
+ if (!ExtraOp)
+ return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
+ Op.getOperand(1));
+ return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
+ Op.getOperand(1), Op.getOperand(2));
+}
+
/// LowerOperation - Provide custom lowering hooks for some operations.
///
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
@@ -8397,6 +9035,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
+ case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
@@ -8441,7 +9081,11 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SMULO:
case ISD::UMULO: return LowerXALUO(Op, DAG);
case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
- case ISD::BIT_CONVERT: return LowerBIT_CONVERT(Op, DAG);
+ case ISD::BITCAST: return LowerBITCAST(Op, DAG);
+ case ISD::ADDC:
+ case ISD::ADDE:
+ case ISD::SUBC:
+ case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
}
}
@@ -8478,6 +9122,12 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
default:
assert(false && "Do not know how to custom type legalize this operation!");
return;
+ case ISD::ADDC:
+ case ISD::ADDE:
+ case ISD::SUBC:
+ case ISD::SUBE:
+ // We don't want to expand or promote these.
+ return;
case ISD::FP_TO_SINT: {
std::pair<SDValue,SDValue> Vals =
FP_TO_INTHelper(SDValue(N, 0), DAG, true);
@@ -8485,13 +9135,13 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
if (FIST.getNode() != 0) {
EVT VT = N->getValueType(0);
// Return a load from the stack slot.
- Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, NULL, 0,
- false, false, 0));
+ Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
+ MachinePointerInfo(), false, false, 0));
}
return;
}
case ISD::READCYCLECOUNTER: {
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue TheChain = N->getOperand(0);
SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32,
@@ -8527,8 +9177,10 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SDValue Ops[] = { swapInH.getValue(0),
N->getOperand(1),
swapInH.getValue(1) };
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, 3);
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
+ MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
+ SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys,
+ Ops, 3, T, MMO);
SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, X86::EAX,
MVT::i32, Result.getValue(1));
SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, X86::EDX,
@@ -8601,15 +9253,18 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
case X86ISD::PINSRB: return "X86ISD::PINSRB";
case X86ISD::PINSRW: return "X86ISD::PINSRW";
- case X86ISD::MMX_PINSRW: return "X86ISD::MMX_PINSRW";
case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
+ case X86ISD::PANDN: return "X86ISD::PANDN";
+ case X86ISD::PSIGNB: return "X86ISD::PSIGNB";
+ case X86ISD::PSIGNW: return "X86ISD::PSIGNW";
+ case X86ISD::PSIGND: return "X86ISD::PSIGND";
+ case X86ISD::PBLENDVB: return "X86ISD::PBLENDVB";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN";
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
- case X86ISD::SegmentBaseAddress: return "X86ISD::SegmentBaseAddress";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
@@ -8637,6 +9292,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ";
case X86ISD::ADD: return "X86ISD::ADD";
case X86ISD::SUB: return "X86ISD::SUB";
+ case X86ISD::ADC: return "X86ISD::ADC";
+ case X86ISD::SBB: return "X86ISD::SBB";
case X86ISD::SMUL: return "X86ISD::SMUL";
case X86ISD::UMUL: return "X86ISD::UMUL";
case X86ISD::INC: return "X86ISD::INC";
@@ -8681,7 +9338,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::PUNPCKHDQ: return "X86ISD::PUNPCKHDQ";
case X86ISD::PUNPCKHQDQ: return "X86ISD::PUNPCKHQDQ";
case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
- case X86ISD::MINGW_ALLOCA: return "X86ISD::MINGW_ALLOCA";
+ case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
+ case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
}
}
@@ -9203,15 +9861,12 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
MachineBasicBlock *
X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
unsigned numArgs, bool memArg) const {
-
assert((Subtarget->hasSSE42() || Subtarget->hasAVX()) &&
"Target must have SSE4.2 or AVX features enabled");
DebugLoc dl = MI->getDebugLoc();
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-
unsigned Opc;
-
if (!Subtarget->hasAVX()) {
if (memArg)
Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm;
@@ -9224,24 +9879,318 @@ X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr;
}
- MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(Opc));
-
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
for (unsigned i = 0; i < numArgs; ++i) {
MachineOperand &Op = MI->getOperand(i+1);
-
if (!(Op.isReg() && Op.isImplicit()))
MIB.addOperand(Op);
}
-
- BuildMI(BB, dl, TII->get(X86::MOVAPSrr), MI->getOperand(0).getReg())
+ BuildMI(*BB, MI, dl, TII->get(X86::MOVAPSrr), MI->getOperand(0).getReg())
.addReg(X86::XMM0);
MI->eraseFromParent();
+ return BB;
+}
+MachineBasicBlock *
+X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const {
+ DebugLoc dl = MI->getDebugLoc();
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+
+ // Address into RAX/EAX, other two args into ECX, EDX.
+ unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
+ unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
+ for (int i = 0; i < X86::AddrNumOperands; ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ unsigned ValOps = X86::AddrNumOperands;
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
+ .addReg(MI->getOperand(ValOps).getReg());
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
+ .addReg(MI->getOperand(ValOps+1).getReg());
+
+ // The instruction doesn't actually take any operands though.
+ BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
+
+ MI->eraseFromParent(); // The pseudo is gone now.
+ return BB;
+}
+
+MachineBasicBlock *
+X86TargetLowering::EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const {
+ DebugLoc dl = MI->getDebugLoc();
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+
+ // First arg in ECX, the second in EAX.
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
+ .addReg(MI->getOperand(0).getReg());
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
+ .addReg(MI->getOperand(1).getReg());
+
+ // The instruction doesn't actually take any operands though.
+ BuildMI(*BB, MI, dl, TII->get(X86::MWAITrr));
+
+ MI->eraseFromParent(); // The pseudo is gone now.
return BB;
}
MachineBasicBlock *
+X86TargetLowering::EmitVAARG64WithCustomInserter(
+ MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ // Emit va_arg instruction on X86-64.
+
+ // Operands to this pseudo-instruction:
+ // 0 ) Output : destination address (reg)
+ // 1-5) Input : va_list address (addr, i64mem)
+ // 6 ) ArgSize : Size (in bytes) of vararg type
+ // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
+ // 8 ) Align : Alignment of type
+ // 9 ) EFLAGS (implicit-def)
+
+ assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
+ assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
+
+ unsigned DestReg = MI->getOperand(0).getReg();
+ MachineOperand &Base = MI->getOperand(1);
+ MachineOperand &Scale = MI->getOperand(2);
+ MachineOperand &Index = MI->getOperand(3);
+ MachineOperand &Disp = MI->getOperand(4);
+ MachineOperand &Segment = MI->getOperand(5);
+ unsigned ArgSize = MI->getOperand(6).getImm();
+ unsigned ArgMode = MI->getOperand(7).getImm();
+ unsigned Align = MI->getOperand(8).getImm();
+
+ // Memory Reference
+ assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
+ MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+
+ // Machine Information
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
+ const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
+ DebugLoc DL = MI->getDebugLoc();
+
+ // struct va_list {
+ // i32 gp_offset
+ // i32 fp_offset
+ // i64 overflow_area (address)
+ // i64 reg_save_area (address)
+ // }
+ // sizeof(va_list) = 24
+ // alignment(va_list) = 8
+
+ unsigned TotalNumIntRegs = 6;
+ unsigned TotalNumXMMRegs = 8;
+ bool UseGPOffset = (ArgMode == 1);
+ bool UseFPOffset = (ArgMode == 2);
+ unsigned MaxOffset = TotalNumIntRegs * 8 +
+ (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
+
+ /* Align ArgSize to a multiple of 8 */
+ unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
+ bool NeedsAlign = (Align > 8);
+
+ MachineBasicBlock *thisMBB = MBB;
+ MachineBasicBlock *overflowMBB;
+ MachineBasicBlock *offsetMBB;
+ MachineBasicBlock *endMBB;
+
+ unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
+ unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
+ unsigned OffsetReg = 0;
+
+ if (!UseGPOffset && !UseFPOffset) {
+ // If we only pull from the overflow region, we don't create a branch.
+ // We don't need to alter control flow.
+ OffsetDestReg = 0; // unused
+ OverflowDestReg = DestReg;
+
+ offsetMBB = NULL;
+ overflowMBB = thisMBB;
+ endMBB = thisMBB;
+ } else {
+ // First emit code to check if gp_offset (or fp_offset) is below the bound.
+ // If so, pull the argument from reg_save_area. (branch to offsetMBB)
+ // If not, pull from overflow_area. (branch to overflowMBB)
+ //
+ // thisMBB
+ // | .
+ // | .
+ // offsetMBB overflowMBB
+ // | .
+ // | .
+ // endMBB
+
+ // Registers for the PHI in endMBB
+ OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
+ OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
+
+ const BasicBlock *LLVM_BB = MBB->getBasicBlock();
+ MachineFunction *MF = MBB->getParent();
+ overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
+ endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
+
+ MachineFunction::iterator MBBIter = MBB;
+ ++MBBIter;
+
+ // Insert the new basic blocks
+ MF->insert(MBBIter, offsetMBB);
+ MF->insert(MBBIter, overflowMBB);
+ MF->insert(MBBIter, endMBB);
+
+ // Transfer the remainder of MBB and its successor edges to endMBB.
+ endMBB->splice(endMBB->begin(), thisMBB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ thisMBB->end());
+ endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
+
+ // Make offsetMBB and overflowMBB successors of thisMBB
+ thisMBB->addSuccessor(offsetMBB);
+ thisMBB->addSuccessor(overflowMBB);
+
+ // endMBB is a successor of both offsetMBB and overflowMBB
+ offsetMBB->addSuccessor(endMBB);
+ overflowMBB->addSuccessor(endMBB);
+
+ // Load the offset value into a register
+ OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
+ BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
+ .addOperand(Base)
+ .addOperand(Scale)
+ .addOperand(Index)
+ .addDisp(Disp, UseFPOffset ? 4 : 0)
+ .addOperand(Segment)
+ .setMemRefs(MMOBegin, MMOEnd);
+
+ // Check if there is enough room left to pull this argument.
+ BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
+ .addReg(OffsetReg)
+ .addImm(MaxOffset + 8 - ArgSizeA8);
+
+ // Branch to "overflowMBB" if offset >= max
+ // Fall through to "offsetMBB" otherwise
+ BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
+ .addMBB(overflowMBB);
+ }
+
+ // In offsetMBB, emit code to use the reg_save_area.
+ if (offsetMBB) {
+ assert(OffsetReg != 0);
+
+ // Read the reg_save_area address.
+ unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
+ BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
+ .addOperand(Base)
+ .addOperand(Scale)
+ .addOperand(Index)
+ .addDisp(Disp, 16)
+ .addOperand(Segment)
+ .setMemRefs(MMOBegin, MMOEnd);
+
+ // Zero-extend the offset
+ unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
+ BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
+ .addImm(0)
+ .addReg(OffsetReg)
+ .addImm(X86::sub_32bit);
+
+ // Add the offset to the reg_save_area to get the final address.
+ BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
+ .addReg(OffsetReg64)
+ .addReg(RegSaveReg);
+
+ // Compute the offset for the next argument
+ unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
+ BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
+ .addReg(OffsetReg)
+ .addImm(UseFPOffset ? 16 : 8);
+
+ // Store it back into the va_list.
+ BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
+ .addOperand(Base)
+ .addOperand(Scale)
+ .addOperand(Index)
+ .addDisp(Disp, UseFPOffset ? 4 : 0)
+ .addOperand(Segment)
+ .addReg(NextOffsetReg)
+ .setMemRefs(MMOBegin, MMOEnd);
+
+ // Jump to endMBB
+ BuildMI(offsetMBB, DL, TII->get(X86::JMP_4))
+ .addMBB(endMBB);
+ }
+
+ //
+ // Emit code to use overflow area
+ //
+
+ // Load the overflow_area address into a register.
+ unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
+ BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
+ .addOperand(Base)
+ .addOperand(Scale)
+ .addOperand(Index)
+ .addDisp(Disp, 8)
+ .addOperand(Segment)
+ .setMemRefs(MMOBegin, MMOEnd);
+
+ // If we need to align it, do so. Otherwise, just copy the address
+ // to OverflowDestReg.
+ if (NeedsAlign) {
+ // Align the overflow address
+ assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
+ unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
+
+ // aligned_addr = (addr + (align-1)) & ~(align-1)
+ BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
+ .addReg(OverflowAddrReg)
+ .addImm(Align-1);
+
+ BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
+ .addReg(TmpReg)
+ .addImm(~(uint64_t)(Align-1));
+ } else {
+ BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
+ .addReg(OverflowAddrReg);
+ }
+
+ // Compute the next overflow address after this argument.
+ // (the overflow address should be kept 8-byte aligned)
+ unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
+ BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
+ .addReg(OverflowDestReg)
+ .addImm(ArgSizeA8);
+
+ // Store the new overflow address.
+ BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
+ .addOperand(Base)
+ .addOperand(Scale)
+ .addOperand(Index)
+ .addDisp(Disp, 8)
+ .addOperand(Segment)
+ .addReg(NextAddrReg)
+ .setMemRefs(MMOBegin, MMOEnd);
+
+ // If we branched, emit the PHI to the front of endMBB.
+ if (offsetMBB) {
+ BuildMI(*endMBB, endMBB->begin(), DL,
+ TII->get(X86::PHI), DestReg)
+ .addReg(OffsetDestReg).addMBB(offsetMBB)
+ .addReg(OverflowDestReg).addMBB(overflowMBB);
+ }
+
+ // Erase the pseudo instruction
+ MI->eraseFromParent();
+
+ return endMBB;
+}
+
+MachineBasicBlock *
X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
MachineInstr *MI,
MachineBasicBlock *MBB) const {
@@ -9296,8 +10245,8 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
MachineMemOperand *MMO =
F->getMachineMemOperand(
- PseudoSourceValue::getFixedStack(RegSaveFrameIndex),
- MachineMemOperand::MOStore, Offset,
+ MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
+ MachineMemOperand::MOStore,
/*Size=*/16, /*Align=*/16);
BuildMI(XMMSaveMBB, DL, TII->get(X86::MOVAPSmr))
.addFrameIndex(RegSaveFrameIndex)
@@ -9389,7 +10338,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredMingwAlloca(MachineInstr *MI,
+X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
@@ -9399,8 +10348,11 @@ X86TargetLowering::EmitLoweredMingwAlloca(MachineInstr *MI,
// FIXME: The code should be tweaked as soon as we'll try to do codegen for
// mingw-w64.
+ const char *StackProbeSymbol =
+ Subtarget->isTargetWindows() ? "_chkstk" : "_alloca";
+
BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32))
- .addExternalSymbol("_alloca")
+ .addExternalSymbol(StackProbeSymbol)
.addReg(X86::EAX, RegState::Implicit)
.addReg(X86::ESP, RegState::Implicit)
.addReg(X86::EAX, RegState::Define | RegState::Implicit)
@@ -9418,30 +10370,30 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
// our load from the relocation, sticking it in either RDI (x86-64)
// or EAX and doing an indirect call. The return value will then
// be in the normal return register.
- const X86InstrInfo *TII
+ const X86InstrInfo *TII
= static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo());
DebugLoc DL = MI->getDebugLoc();
MachineFunction *F = BB->getParent();
- bool IsWin64 = Subtarget->isTargetWin64();
-
+
+ assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
assert(MI->getOperand(3).isGlobal() && "This should be a global");
-
+
if (Subtarget->is64Bit()) {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV64rm), X86::RDI)
.addReg(X86::RIP)
.addImm(0).addReg(0)
- .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
MI->getOperand(3).getTargetFlags())
.addReg(0);
- MIB = BuildMI(*BB, MI, DL, TII->get(IsWin64 ? X86::WINCALL64m : X86::CALL64m));
+ MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
addDirectMem(MIB, X86::RDI);
} else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV32rm), X86::EAX)
.addReg(0)
.addImm(0).addReg(0)
- .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
MI->getOperand(3).getTargetFlags())
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
@@ -9451,13 +10403,13 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
TII->get(X86::MOV32rm), X86::EAX)
.addReg(TII->getGlobalBaseReg(F))
.addImm(0).addReg(0)
- .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
MI->getOperand(3).getTargetFlags())
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
}
-
+
MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -9467,13 +10419,36 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
switch (MI->getOpcode()) {
default: assert(false && "Unexpected instr type to insert");
- case X86::MINGW_ALLOCA:
- return EmitLoweredMingwAlloca(MI, BB);
+ case X86::TAILJMPd64:
+ case X86::TAILJMPr64:
+ case X86::TAILJMPm64:
+ assert(!"TAILJMP64 would not be touched here.");
+ case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64:
+ // Defs of TCRETURNxx64 has Win64's callee-saved registers, as subset.
+ // On AMD64, additional defs should be added before register allocation.
+ if (!Subtarget->isTargetWin64()) {
+ MI->addRegisterDefined(X86::RSI);
+ MI->addRegisterDefined(X86::RDI);
+ MI->addRegisterDefined(X86::XMM6);
+ MI->addRegisterDefined(X86::XMM7);
+ MI->addRegisterDefined(X86::XMM8);
+ MI->addRegisterDefined(X86::XMM9);
+ MI->addRegisterDefined(X86::XMM10);
+ MI->addRegisterDefined(X86::XMM11);
+ MI->addRegisterDefined(X86::XMM12);
+ MI->addRegisterDefined(X86::XMM13);
+ MI->addRegisterDefined(X86::XMM14);
+ MI->addRegisterDefined(X86::XMM15);
+ }
+ return BB;
+ case X86::WIN_ALLOCA:
+ return EmitLoweredWinAlloca(MI, BB);
case X86::TLSCall_32:
case X86::TLSCall_64:
return EmitLoweredTLSCall(MI, BB);
case X86::CMOV_GR8:
- case X86::CMOV_V1I64:
case X86::CMOV_FR32:
case X86::CMOV_FR64:
case X86::CMOV_V4F32:
@@ -9583,6 +10558,12 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRM128MEM:
return EmitPCMP(MI, BB, 5, true /* in mem */);
+ // Thread synchronization.
+ case X86::MONITOR:
+ return EmitMonitor(MI, BB);
+ case X86::MWAIT:
+ return EmitMwait(MI, BB);
+
// Atomic Lowering.
case X86::ATOMAND32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
@@ -9747,6 +10728,9 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
false);
case X86::VASTART_SAVE_XMM_REGS:
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
+
+ case X86::VAARG_64:
+ return EmitVAARG64WithCustomInserter(MI, BB);
}
}
@@ -9773,6 +10757,8 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
default: break;
case X86ISD::ADD:
case X86ISD::SUB:
+ case X86ISD::ADC:
+ case X86ISD::SBB:
case X86ISD::SMUL:
case X86ISD::UMUL:
case X86ISD::INC:
@@ -9791,6 +10777,16 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
}
}
+unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
+ unsigned Depth) const {
+ // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
+ if (Op.getOpcode() == X86ISD::SETCC_CARRY)
+ return Op.getValueType().getScalarType().getSizeInBits();
+
+ // Fallback case.
+ return 1;
+}
+
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
bool X86TargetLowering::isGAPlusOffset(SDNode *N,
@@ -9811,13 +10807,18 @@ bool X86TargetLowering::isGAPlusOffset(SDNode *N,
/// if the load addresses are consecutive, non-overlapping, and in the right
/// order.
static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
- const TargetLowering &TLI) {
+ TargetLowering::DAGCombinerInfo &DCI) {
DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0);
if (VT.getSizeInBits() != 128)
return SDValue();
+ // Don't create instructions with illegal types after legalize types has run.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
+ return SDValue();
+
SmallVector<SDValue, 16> Elts;
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
@@ -9877,8 +10878,8 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
// Store the value to a temporary stack slot.
SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
- SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, NULL,
- 0, false, false, 0);
+ SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
+ MachinePointerInfo(), false, false, 0);
// Replace each use (extract) with a load of the appropriate element.
for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
@@ -9893,11 +10894,12 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(),
- OffsetVal, StackPtr);
+ StackPtr, OffsetVal);
// Load the scalar.
SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch,
- ScalarAddr, NULL, 0, false, false, 0);
+ ScalarAddr, MachinePointerInfo(),
+ false, false, 0);
// Replace the exact with the load.
DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar);
@@ -10473,6 +11475,36 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
return SDValue();
}
+
+static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ // Want to form PANDN nodes, in the hopes of then easily combining them with
+ // OR and AND nodes to form PBLEND/PSIGN.
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::v2i64)
+ return SDValue();
+
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ DebugLoc DL = N->getDebugLoc();
+
+ // Check LHS for vnot
+ if (N0.getOpcode() == ISD::XOR &&
+ ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
+ return DAG.getNode(X86ISD::PANDN, DL, VT, N0.getOperand(0), N1);
+
+ // Check RHS for vnot
+ if (N1.getOpcode() == ISD::XOR &&
+ ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
+ return DAG.getNode(X86ISD::PANDN, DL, VT, N1.getOperand(0), N0);
+
+ return SDValue();
+}
+
static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
@@ -10480,12 +11512,99 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
EVT VT = N->getValueType(0);
- if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
+ if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64 && VT != MVT::v2i64)
return SDValue();
- // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
+
+ // look for psign/blend
+ if (Subtarget->hasSSSE3()) {
+ if (VT == MVT::v2i64) {
+ // Canonicalize pandn to RHS
+ if (N0.getOpcode() == X86ISD::PANDN)
+ std::swap(N0, N1);
+ // or (and (m, x), (pandn m, y))
+ if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::PANDN) {
+ SDValue Mask = N1.getOperand(0);
+ SDValue X = N1.getOperand(1);
+ SDValue Y;
+ if (N0.getOperand(0) == Mask)
+ Y = N0.getOperand(1);
+ if (N0.getOperand(1) == Mask)
+ Y = N0.getOperand(0);
+
+ // Check to see if the mask appeared in both the AND and PANDN and
+ if (!Y.getNode())
+ return SDValue();
+
+ // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
+ if (Mask.getOpcode() != ISD::BITCAST ||
+ X.getOpcode() != ISD::BITCAST ||
+ Y.getOpcode() != ISD::BITCAST)
+ return SDValue();
+
+ // Look through mask bitcast.
+ Mask = Mask.getOperand(0);
+ EVT MaskVT = Mask.getValueType();
+
+ // Validate that the Mask operand is a vector sra node. The sra node
+ // will be an intrinsic.
+ if (Mask.getOpcode() != ISD::INTRINSIC_WO_CHAIN)
+ return SDValue();
+
+ // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
+ // there is no psrai.b
+ switch (cast<ConstantSDNode>(Mask.getOperand(0))->getZExtValue()) {
+ case Intrinsic::x86_sse2_psrai_w:
+ case Intrinsic::x86_sse2_psrai_d:
+ break;
+ default: return SDValue();
+ }
+
+ // Check that the SRA is all signbits.
+ SDValue SraC = Mask.getOperand(2);
+ unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
+ unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
+ if ((SraAmt + 1) != EltBits)
+ return SDValue();
+
+ DebugLoc DL = N->getDebugLoc();
+
+ // Now we know we at least have a plendvb with the mask val. See if
+ // we can form a psignb/w/d.
+ // psign = x.type == y.type == mask.type && y = sub(0, x);
+ X = X.getOperand(0);
+ Y = Y.getOperand(0);
+ if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
+ ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
+ X.getValueType() == MaskVT && X.getValueType() == Y.getValueType()){
+ unsigned Opc = 0;
+ switch (EltBits) {
+ case 8: Opc = X86ISD::PSIGNB; break;
+ case 16: Opc = X86ISD::PSIGNW; break;
+ case 32: Opc = X86ISD::PSIGND; break;
+ default: break;
+ }
+ if (Opc) {
+ SDValue Sign = DAG.getNode(Opc, DL, MaskVT, X, Mask.getOperand(1));
+ return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Sign);
+ }
+ }
+ // PBLENDVB only available on SSE 4.1
+ if (!Subtarget->hasSSE41())
+ return SDValue();
+
+ X = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, X);
+ Y = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Y);
+ Mask = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Mask);
+ Mask = DAG.getNode(X86ISD::PBLENDVB, DL, MVT::v16i8, X, Y, Mask);
+ return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Mask);
+ }
+ }
+ }
+
+ // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
std::swap(N0, N1);
if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
@@ -10600,9 +11719,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// pair instead.
if (Subtarget->is64Bit() || F64IsLegal) {
EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
- SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(),
- Ld->getBasePtr(), Ld->getSrcValue(),
- Ld->getSrcValueOffset(), Ld->isVolatile(),
+ SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
+ Ld->getPointerInfo(), Ld->isVolatile(),
Ld->isNonTemporal(), Ld->getAlignment());
SDValue NewChain = NewLd.getValue(1);
if (TokenFactorIndex != -1) {
@@ -10611,7 +11729,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
Ops.size());
}
return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
- St->getSrcValue(), St->getSrcValueOffset(),
+ St->getPointerInfo(),
St->isVolatile(), St->isNonTemporal(),
St->getAlignment());
}
@@ -10622,11 +11740,11 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
DAG.getConstant(4, MVT::i32));
SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
- Ld->getSrcValue(), Ld->getSrcValueOffset(),
+ Ld->getPointerInfo(),
Ld->isVolatile(), Ld->isNonTemporal(),
Ld->getAlignment());
SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
- Ld->getSrcValue(), Ld->getSrcValueOffset()+4,
+ Ld->getPointerInfo().getWithOffset(4),
Ld->isVolatile(), Ld->isNonTemporal(),
MinAlign(Ld->getAlignment(), 4));
@@ -10643,12 +11761,11 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
DAG.getConstant(4, MVT::i32));
SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
- St->getSrcValue(), St->getSrcValueOffset(),
+ St->getPointerInfo(),
St->isVolatile(), St->isNonTemporal(),
St->getAlignment());
SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
- St->getSrcValue(),
- St->getSrcValueOffset() + 4,
+ St->getPointerInfo().getWithOffset(4),
St->isVolatile(),
St->isNonTemporal(),
MinAlign(St->getAlignment(), 4));
@@ -10706,13 +11823,13 @@ static SDValue PerformBTCombine(SDNode *N,
static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
SDValue Op = N->getOperand(0);
- if (Op.getOpcode() == ISD::BIT_CONVERT)
+ if (Op.getOpcode() == ISD::BITCAST)
Op = Op.getOperand(0);
EVT VT = N->getValueType(0), OpVT = Op.getValueType();
if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
VT.getVectorElementType().getSizeInBits() ==
OpVT.getVectorElementType().getSizeInBits()) {
- return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op);
+ return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
}
return SDValue();
}
@@ -10743,19 +11860,106 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
+// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
+static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) {
+ unsigned X86CC = N->getConstantOperandVal(0);
+ SDValue EFLAG = N->getOperand(1);
+ DebugLoc DL = N->getDebugLoc();
+
+ // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
+ // a zext and produces an all-ones bit which is more useful than 0/1 in some
+ // cases.
+ if (X86CC == X86::COND_B)
+ return DAG.getNode(ISD::AND, DL, MVT::i8,
+ DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), EFLAG),
+ DAG.getConstant(1, MVT::i8));
+
+ return SDValue();
+}
+
+// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
+static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
+ X86TargetLowering::DAGCombinerInfo &DCI) {
+ // If the LHS and RHS of the ADC node are zero, then it can't overflow and
+ // the result is either zero or one (depending on the input carry bit).
+ // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
+ if (X86::isZeroNode(N->getOperand(0)) &&
+ X86::isZeroNode(N->getOperand(1)) &&
+ // We don't have a good way to replace an EFLAGS use, so only do this when
+ // dead right now.
+ SDValue(N, 1).use_empty()) {
+ DebugLoc DL = N->getDebugLoc();
+ EVT VT = N->getValueType(0);
+ SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
+ SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
+ DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
+ DAG.getConstant(X86::COND_B,MVT::i8),
+ N->getOperand(2)),
+ DAG.getConstant(1, VT));
+ return DCI.CombineTo(N, Res1, CarryOut);
+ }
+
+ return SDValue();
+}
+
+// fold (add Y, (sete X, 0)) -> adc 0, Y
+// (add Y, (setne X, 0)) -> sbb -1, Y
+// (sub (sete X, 0), Y) -> sbb 0, Y
+// (sub (setne X, 0), Y) -> adc -1, Y
+static SDValue OptimizeConditonalInDecrement(SDNode *N, SelectionDAG &DAG) {
+ DebugLoc DL = N->getDebugLoc();
+
+ // Look through ZExts.
+ SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
+ if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
+ return SDValue();
+
+ SDValue SetCC = Ext.getOperand(0);
+ if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
+ return SDValue();
+
+ X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
+ if (CC != X86::COND_E && CC != X86::COND_NE)
+ return SDValue();
+
+ SDValue Cmp = SetCC.getOperand(1);
+ if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
+ !X86::isZeroNode(Cmp.getOperand(1)) ||
+ !Cmp.getOperand(0).getValueType().isInteger())
+ return SDValue();
+
+ SDValue CmpOp0 = Cmp.getOperand(0);
+ SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
+ DAG.getConstant(1, CmpOp0.getValueType()));
+
+ SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
+ if (CC == X86::COND_NE)
+ return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
+ DL, OtherVal.getValueType(), OtherVal,
+ DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
+ return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
+ DL, OtherVal.getValueType(), OtherVal,
+ DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
+}
+
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
case ISD::EXTRACT_VECTOR_ELT:
- return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this);
+ return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this);
case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI);
+ case ISD::ADD:
+ case ISD::SUB: return OptimizeConditonalInDecrement(N, DAG);
+ case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
case ISD::SHL:
case ISD::SRA:
case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget);
+ case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
case X86ISD::FXOR:
@@ -10764,8 +11968,10 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG);
+ case X86ISD::SETCC: return PerformSETCCCombine(N, DAG);
case X86ISD::SHUFPS: // Handle all target specific shuffles
case X86ISD::SHUFPD:
+ case X86ISD::PALIGN:
case X86ISD::PUNPCKHBW:
case X86ISD::PUNPCKHWD:
case X86ISD::PUNPCKHDQ:
@@ -10785,7 +11991,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::PSHUFLW:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
- case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
+ case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI);
}
return SDValue();
@@ -10892,44 +12098,14 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
// X86 Inline Assembly Support
//===----------------------------------------------------------------------===//
-static bool LowerToBSwap(CallInst *CI) {
- // FIXME: this should verify that we are targetting a 486 or better. If not,
- // we will turn this bswap into something that will be lowered to logical ops
- // instead of emitting the bswap asm. For now, we don't support 486 or lower
- // so don't worry about this.
-
- // Verify this is a simple bswap.
- if (CI->getNumArgOperands() != 1 ||
- CI->getType() != CI->getArgOperand(0)->getType() ||
- !CI->getType()->isIntegerTy())
- return false;
-
- const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
- if (!Ty || Ty->getBitWidth() % 16 != 0)
- return false;
-
- // Okay, we can do this xform, do so now.
- const Type *Tys[] = { Ty };
- Module *M = CI->getParent()->getParent()->getParent();
- Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
-
- Value *Op = CI->getArgOperand(0);
- Op = CallInst::Create(Int, Op, CI->getName(), CI);
-
- CI->replaceAllUsesWith(Op);
- CI->eraseFromParent();
- return true;
-}
-
bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
- std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
std::string AsmStr = IA->getAsmString();
// TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
SmallVector<StringRef, 4> AsmPieces;
- SplitString(AsmStr, AsmPieces, "\n"); // ; as separator?
+ SplitString(AsmStr, AsmPieces, ";\n");
switch (AsmPieces.size()) {
default: return false;
@@ -10938,6 +12114,10 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
AsmPieces.clear();
SplitString(AsmStr, AsmPieces, " \t"); // Split with whitespace.
+ // FIXME: this should verify that we are targetting a 486 or better. If not,
+ // we will turn this bswap into something that will be lowered to logical ops
+ // instead of emitting the bswap asm. For now, we don't support 486 or lower
+ // so don't worry about this.
// bswap $0
if (AsmPieces.size() == 2 &&
(AsmPieces[0] == "bswap" ||
@@ -10947,7 +12127,10 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
AsmPieces[1] == "${0:q}")) {
// No need to check constraints, nothing other than the equivalent of
// "=r,0" would be valid here.
- return LowerToBSwap(CI);
+ const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ if (!Ty || Ty->getBitWidth() % 16 != 0)
+ return false;
+ return IntrinsicLowering::LowerToByteSwap(CI);
}
// rorw $$8, ${0:w} --> llvm.bswap.i16
if (CI->getType()->isIntegerTy(16) &&
@@ -10957,35 +12140,76 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
AsmPieces[2] == "${0:w}" &&
IA->getConstraintString().compare(0, 5, "=r,0,") == 0) {
AsmPieces.clear();
- const std::string &Constraints = IA->getConstraintString();
- SplitString(StringRef(Constraints).substr(5), AsmPieces, ",");
+ const std::string &ConstraintsStr = IA->getConstraintString();
+ SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
std::sort(AsmPieces.begin(), AsmPieces.end());
if (AsmPieces.size() == 4 &&
AsmPieces[0] == "~{cc}" &&
AsmPieces[1] == "~{dirflag}" &&
AsmPieces[2] == "~{flags}" &&
AsmPieces[3] == "~{fpsr}") {
- return LowerToBSwap(CI);
+ const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ if (!Ty || Ty->getBitWidth() % 16 != 0)
+ return false;
+ return IntrinsicLowering::LowerToByteSwap(CI);
}
}
break;
case 3:
- if (CI->getType()->isIntegerTy(64) &&
- Constraints.size() >= 2 &&
- Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
- Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
- // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
+ if (CI->getType()->isIntegerTy(32) &&
+ IA->getConstraintString().compare(0, 5, "=r,0,") == 0) {
SmallVector<StringRef, 4> Words;
- SplitString(AsmPieces[0], Words, " \t");
- if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%eax") {
+ SplitString(AsmPieces[0], Words, " \t,");
+ if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" &&
+ Words[2] == "${0:w}") {
Words.clear();
- SplitString(AsmPieces[1], Words, " \t");
- if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%edx") {
+ SplitString(AsmPieces[1], Words, " \t,");
+ if (Words.size() == 3 && Words[0] == "rorl" && Words[1] == "$$16" &&
+ Words[2] == "$0") {
Words.clear();
SplitString(AsmPieces[2], Words, " \t,");
- if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" &&
- Words[2] == "%edx") {
- return LowerToBSwap(CI);
+ if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" &&
+ Words[2] == "${0:w}") {
+ AsmPieces.clear();
+ const std::string &ConstraintsStr = IA->getConstraintString();
+ SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
+ std::sort(AsmPieces.begin(), AsmPieces.end());
+ if (AsmPieces.size() == 4 &&
+ AsmPieces[0] == "~{cc}" &&
+ AsmPieces[1] == "~{dirflag}" &&
+ AsmPieces[2] == "~{flags}" &&
+ AsmPieces[3] == "~{fpsr}") {
+ const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ if (!Ty || Ty->getBitWidth() % 16 != 0)
+ return false;
+ return IntrinsicLowering::LowerToByteSwap(CI);
+ }
+ }
+ }
+ }
+ }
+
+ if (CI->getType()->isIntegerTy(64)) {
+ InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
+ if (Constraints.size() >= 2 &&
+ Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
+ Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
+ // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
+ SmallVector<StringRef, 4> Words;
+ SplitString(AsmPieces[0], Words, " \t");
+ if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%eax") {
+ Words.clear();
+ SplitString(AsmPieces[1], Words, " \t");
+ if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%edx") {
+ Words.clear();
+ SplitString(AsmPieces[2], Words, " \t,");
+ if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" &&
+ Words[2] == "%edx") {
+ const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ if (!Ty || Ty->getBitWidth() % 16 != 0)
+ return false;
+ return IntrinsicLowering::LowerToByteSwap(CI);
+ }
}
}
}
@@ -11003,18 +12227,32 @@ X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(const std::string &Constraint) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
- case 'A':
- return C_Register;
- case 'f':
- case 'r':
case 'R':
- case 'l':
case 'q':
case 'Q':
- case 'x':
+ case 'f':
+ case 't':
+ case 'u':
case 'y':
+ case 'x':
case 'Y':
return C_RegisterClass;
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'd':
+ case 'S':
+ case 'D':
+ case 'A':
+ return C_Register;
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'G':
+ case 'C':
case 'e':
case 'Z':
return C_Other;
@@ -11025,6 +12263,110 @@ X86TargetLowering::getConstraintType(const std::string &Constraint) const {
return TargetLowering::getConstraintType(Constraint);
}
+/// Examine constraint type and operand type and determine a weight value.
+/// This object must already have been set up with the operand type
+/// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+ X86TargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ const Type *type = CallOperandVal->getType();
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ case 'R':
+ case 'q':
+ case 'Q':
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'd':
+ case 'S':
+ case 'D':
+ case 'A':
+ if (CallOperandVal->getType()->isIntegerTy())
+ weight = CW_SpecificReg;
+ break;
+ case 'f':
+ case 't':
+ case 'u':
+ if (type->isFloatingPointTy())
+ weight = CW_SpecificReg;
+ break;
+ case 'y':
+ if (type->isX86_MMXTy() && Subtarget->hasMMX())
+ weight = CW_SpecificReg;
+ break;
+ case 'x':
+ case 'Y':
+ if ((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasXMM())
+ weight = CW_Register;
+ break;
+ case 'I':
+ if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
+ if (C->getZExtValue() <= 31)
+ weight = CW_Constant;
+ }
+ break;
+ case 'J':
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
+ if (C->getZExtValue() <= 63)
+ weight = CW_Constant;
+ }
+ break;
+ case 'K':
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
+ if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
+ weight = CW_Constant;
+ }
+ break;
+ case 'L':
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
+ if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
+ weight = CW_Constant;
+ }
+ break;
+ case 'M':
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
+ if (C->getZExtValue() <= 3)
+ weight = CW_Constant;
+ }
+ break;
+ case 'N':
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
+ if (C->getZExtValue() <= 0xff)
+ weight = CW_Constant;
+ }
+ break;
+ case 'G':
+ case 'C':
+ if (dyn_cast<ConstantFP>(CallOperandVal)) {
+ weight = CW_Constant;
+ }
+ break;
+ case 'e':
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
+ if ((C->getSExtValue() >= -0x80000000LL) &&
+ (C->getSExtValue() <= 0x7fffffffLL))
+ weight = CW_Constant;
+ }
+ break;
+ case 'Z':
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
+ if (C->getZExtValue() <= 0xffffffff)
+ weight = CW_Constant;
+ }
+ break;
+ }
+ return weight;
+}
+
/// LowerXConstraint - try to replace an X constraint, which matches anything,
/// with another that has more specific requirements based on the type of the
/// corresponding operand.
@@ -11033,9 +12375,9 @@ LowerXConstraint(EVT ConstraintVT) const {
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
// 'f' like normal targets.
if (ConstraintVT.isFloatingPoint()) {
- if (Subtarget->hasSSE2())
+ if (Subtarget->hasXMMInt())
return "Y";
- if (Subtarget->hasSSE1())
+ if (Subtarget->hasXMM())
return "x";
}
@@ -11265,10 +12607,10 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
if (!Subtarget->hasMMX()) break;
return std::make_pair(0U, X86::VR64RegisterClass);
case 'Y': // SSE_REGS if SSE2 allowed
- if (!Subtarget->hasSSE2()) break;
+ if (!Subtarget->hasXMMInt()) break;
// FALL THROUGH.
case 'x': // SSE_REGS if SSE1 allowed
- if (!Subtarget->hasSSE1()) break;
+ if (!Subtarget->hasXMM()) break;
switch (VT.getSimpleVT().SimpleTy) {
default: break;
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
index d2d9b28..419da37 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
@@ -57,35 +57,6 @@ namespace llvm {
/// corresponds to X86::PSRLDQ.
FSRL,
- /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
- /// integer source in memory and FP reg result. This corresponds to the
- /// X86::FILD*m instructions. It has three inputs (token chain, address,
- /// and source type) and two outputs (FP value and token chain). FILD_FLAG
- /// also produces a flag).
- FILD,
- FILD_FLAG,
-
- /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
- /// integer destination in memory and a FP reg source. This corresponds
- /// to the X86::FIST*m instructions and the rounding mode change stuff. It
- /// has two inputs (token chain and address) and two outputs (int value
- /// and token chain).
- FP_TO_INT16_IN_MEM,
- FP_TO_INT32_IN_MEM,
- FP_TO_INT64_IN_MEM,
-
- /// FLD - This instruction implements an extending load to FP stack slots.
- /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
- /// operand, ptr to load from, and a ValueType node indicating the type
- /// to load to.
- FLD,
-
- /// FST - This instruction implements a truncating store to FP stack
- /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
- /// chain operand, value to store, address, and a ValueType to store it
- /// as.
- FST,
-
/// CALL - These operations represent an abstract X86 call
/// instruction, which includes a bunch of information. In particular the
/// operands of these node are:
@@ -105,7 +76,7 @@ namespace llvm {
///
CALL,
- /// RDTSC_DAG - This operation implements the lowering for
+ /// RDTSC_DAG - This operation implements the lowering for
/// readcyclecounter
RDTSC_DAG,
@@ -115,13 +86,13 @@ namespace llvm {
/// X86 bit-test instructions.
BT,
- /// X86 SetCC. Operand 0 is condition code, and operand 1 is the flag
- /// operand produced by a CMP instruction.
+ /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
+ /// operand, usually produced by a CMP instruction.
SETCC,
// Same as SETCC except it's materialized with a sbb and the value is all
// one's or all zero's.
- SETCC_CARRY,
+ SETCC_CARRY, // R = carry_bit ? ~0 : 0
/// X86 conditional moves. Operand 0 and operand 1 are the two values
/// to select from. Operand 2 is the condition code, and operand 3 is the
@@ -157,11 +128,15 @@ namespace llvm {
/// relative displacements.
WrapperRIP,
- /// MOVQ2DQ - Copies a 64-bit value from a vector to another vector.
- /// Can be used to move a vector value from a MMX register to a XMM
- /// register.
+ /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word
+ /// of an XMM vector, with the high word zero filled.
MOVQ2DQ,
+ /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
+ /// to an MMX vector. If you think this is too close to the previous
+ /// mnemonic, so do I; blame Intel.
+ MOVDQ2Q,
+
/// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
/// i32, corresponds to X86::PEXTRB.
PEXTRB,
@@ -184,7 +159,16 @@ namespace llvm {
/// PSHUFB - Shuffle 16 8-bit values within a vector.
PSHUFB,
-
+
+ /// PANDN - and with not'd value.
+ PANDN,
+
+ /// PSIGNB/W/D - Copy integer sign.
+ PSIGNB, PSIGNW, PSIGND,
+
+ /// PBLENDVB - Variable blend
+ PBLENDVB,
+
/// FMAX, FMIN - Floating point max and min.
///
FMAX, FMIN,
@@ -196,17 +180,14 @@ namespace llvm {
// TLSADDR - Thread Local Storage.
TLSADDR,
-
+
// TLSCALL - Thread Local Storage. When calling to an OS provided
// thunk at the address from an earlier relocation.
TLSCALL,
- // SegmentBaseAddress - The address segment:0
- SegmentBaseAddress,
-
// EH_RETURN - Exception Handling helpers.
EH_RETURN,
-
+
/// TC_RETURN - Tail call return.
/// operand #0 chain
/// operand #1 callee (register or absolute)
@@ -214,37 +195,29 @@ namespace llvm {
/// operand #3 optional in flag
TC_RETURN,
- // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap.
- LCMPXCHG_DAG,
- LCMPXCHG8_DAG,
-
- // FNSTCW16m - Store FP control world into i16 memory.
- FNSTCW16m,
-
// VZEXT_MOVL - Vector move low and zero extend.
VZEXT_MOVL,
- // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
- VZEXT_LOAD,
-
// VSHL, VSRL - Vector logical left / right shift.
VSHL, VSRL,
// CMPPD, CMPPS - Vector double/float comparison.
// CMPPD, CMPPS - Vector double/float comparison.
CMPPD, CMPPS,
-
+
// PCMP* - Vector integer comparisons.
PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
- // ADD, SUB, SMUL, UMUL, etc. - Arithmetic operations with FLAGS results.
- ADD, SUB, SMUL, UMUL,
+ // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
+ ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
+
+ UMUL, // LOW, HI, FLAGS = umul LHS, RHS
// MUL_IMM - X86 specific multiply by immediate.
MUL_IMM,
-
+
// PTEST - Vector bitwise comparisons
PTEST,
@@ -291,11 +264,17 @@ namespace llvm {
// with control flow.
VASTART_SAVE_XMM_REGS,
- // MINGW_ALLOCA - MingW's __alloca call to do stack probing.
- MINGW_ALLOCA,
+ // WIN_ALLOCA - Windows's _chkstk call to do stack probing.
+ WIN_ALLOCA,
+
+ // Memory barrier
+ MEMBARRIER,
+ MFENCE,
+ SFENCE,
+ LFENCE,
- // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
- // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
+ // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
+ // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
// Atomic 64-bit binary operations.
ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
ATOMSUB64_DAG,
@@ -304,12 +283,49 @@ namespace llvm {
ATOMAND64_DAG,
ATOMNAND64_DAG,
ATOMSWAP64_DAG,
-
- // Memory barrier
- MEMBARRIER,
- MFENCE,
- SFENCE,
- LFENCE
+
+ // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap.
+ LCMPXCHG_DAG,
+ LCMPXCHG8_DAG,
+
+ // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
+ VZEXT_LOAD,
+
+ // FNSTCW16m - Store FP control world into i16 memory.
+ FNSTCW16m,
+
+ /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
+ /// integer destination in memory and a FP reg source. This corresponds
+ /// to the X86::FIST*m instructions and the rounding mode change stuff. It
+ /// has two inputs (token chain and address) and two outputs (int value
+ /// and token chain).
+ FP_TO_INT16_IN_MEM,
+ FP_TO_INT32_IN_MEM,
+ FP_TO_INT64_IN_MEM,
+
+ /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
+ /// integer source in memory and FP reg result. This corresponds to the
+ /// X86::FILD*m instructions. It has three inputs (token chain, address,
+ /// and source type) and two outputs (FP value and token chain). FILD_FLAG
+ /// also produces a flag).
+ FILD,
+ FILD_FLAG,
+
+ /// FLD - This instruction implements an extending load to FP stack slots.
+ /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
+ /// operand, ptr to load from, and a ValueType node indicating the type
+ /// to load to.
+ FLD,
+
+ /// FST - This instruction implements a truncating store to FP stack
+ /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
+ /// chain operand, value to store, address, and a ValueType to store it
+ /// as.
+ FST,
+
+ /// VAARG_64 - This instruction grabs the address of the next argument
+ /// from a va_list. (reads and modifies the va_list in memory)
+ VAARG_64
// WARNING: Do not add anything in the end unless you want the node to
// have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
@@ -392,6 +408,16 @@ namespace llvm {
/// specifies a shuffle of elements that is suitable for input to PALIGNR.
bool isPALIGNRMask(ShuffleVectorSDNode *N);
+ /// isVEXTRACTF128Index - Return true if the specified
+ /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
+ /// suitable for input to VEXTRACTF128.
+ bool isVEXTRACTF128Index(SDNode *N);
+
+ /// isVINSERTF128Index - Return true if the specified
+ /// INSERT_SUBVECTOR operand specifies a subvector insert that is
+ /// suitable for input to VINSERTF128.
+ bool isVINSERTF128Index(SDNode *N);
+
/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
/// instructions.
@@ -409,6 +435,16 @@ namespace llvm {
/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
unsigned getShufflePALIGNRImmediate(SDNode *N);
+ /// getExtractVEXTRACTF128Immediate - Return the appropriate
+ /// immediate to extract the specified EXTRACT_SUBVECTOR index
+ /// with VEXTRACTF128 instructions.
+ unsigned getExtractVEXTRACTF128Immediate(SDNode *N);
+
+ /// getInsertVINSERTF128Immediate - Return the appropriate
+ /// immediate to insert at the specified INSERT_SUBVECTOR index
+ /// with VINSERTF128 instructions.
+ unsigned getInsertVINSERTF128Immediate(SDNode *N);
+
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
/// constant +0.0.
bool isZeroNode(SDValue Elt);
@@ -425,16 +461,13 @@ namespace llvm {
public:
explicit X86TargetLowering(X86TargetMachine &TM);
- /// getPICBaseSymbol - Return the X86-32 PIC base.
- MCSymbol *getPICBaseSymbol(const MachineFunction *MF, MCContext &Ctx) const;
-
virtual unsigned getJumpTableEncoding() const;
virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid,
MCContext &Ctx) const;
-
+
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
virtual SDValue getPICJumpTableRelocBase(SDValue Table,
@@ -442,7 +475,7 @@ namespace llvm {
virtual const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI, MCContext &Ctx) const;
-
+
/// getStackPtrReg - Return the stack pointer register we are using: either
/// ESP or RSP.
unsigned getStackPtrReg() const { return X86StackPtr; }
@@ -486,7 +519,7 @@ namespace llvm {
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const;
-
+
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
/// isTypeDesirableForOp - Return true if the target has native support for
@@ -505,7 +538,7 @@ namespace llvm {
EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
-
+
/// getTargetNodeName - This method returns the name of a target specific
/// DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
@@ -513,26 +546,36 @@ namespace llvm {
/// getSetCCResultType - Return the ISD::SETCC ValueType
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
- /// computeMaskedBitsForTargetNode - Determine which of the bits specified
- /// in Mask are known to be either zero or one and return them in the
+ /// computeMaskedBitsForTargetNode - Determine which of the bits specified
+ /// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
- APInt &KnownZero,
+ APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
+ // ComputeNumSignBitsForTargetNode - Determine the number of bits in the
+ // operation that are sign bits.
+ virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+ unsigned Depth) const;
+
virtual bool
isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
-
+
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
virtual bool ExpandInlineAsm(CallInst *CI) const;
-
+
ConstraintType getConstraintType(const std::string &Constraint) const;
-
- std::vector<unsigned>
+
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ virtual ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
+
+ std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
@@ -546,15 +589,15 @@ namespace llvm {
char ConstraintLetter,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
-
+
/// getRegForInlineAsmConstraint - Given a physical register constraint
/// (e.g. {edx}), return the register number and the register class for the
/// register. This should only be used for C_Register constraints. On
/// error, this returns a register number of 0.
- std::pair<unsigned, const TargetRegisterClass*>
+ std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
-
+
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
@@ -609,7 +652,7 @@ namespace llvm {
// shrink long double fp constant since fldt is very slow.
return !X86ScalarSSEf64 || VT == MVT::f80;
}
-
+
const X86Subtarget* getSubtarget() const {
return Subtarget;
}
@@ -650,8 +693,8 @@ namespace llvm {
/// X86StackPtr - X86 physical register used as stack ptr.
unsigned X86StackPtr;
-
- /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
+
+ /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
/// floating point ops.
/// When SSE is available, use it for f32 operations.
/// When SSE2 is available, use it for f64 operations.
@@ -702,7 +745,6 @@ namespace llvm {
SDValue Chain, bool IsTailCall, bool Is64Bit,
int FPDiff, DebugLoc dl) const;
- CCAssignFn *CCAssignFnForNode(CallingConv::ID CallConv) const;
unsigned GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG &DAG) const;
@@ -719,6 +761,8 @@ namespace llvm {
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
@@ -729,7 +773,7 @@ namespace llvm {
SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
SelectionDAG &DAG) const;
- SDValue LowerBIT_CONVERT(SDValue op, SelectionDAG &DAG) const;
+ SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
@@ -794,6 +838,8 @@ namespace llvm {
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
+ virtual bool isUsedByReturnOnly(SDNode *N) const;
+
virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
@@ -810,6 +856,13 @@ namespace llvm {
MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
unsigned argNum, bool inMem) const;
+ /// Utility functions to emit monitor and mwait instructions. These
+ /// need to make sure that the arguments to the intrinsic are in the
+ /// correct registers.
+ MachineBasicBlock *EmitMonitor(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const;
+
/// Utility function to emit atomic bitwise operations (and, or, xor).
/// It takes the bitwise instruction to expand, the associated machine basic
/// block, and the associated X86 opcodes for reg/reg and reg/imm.
@@ -833,7 +886,7 @@ namespace llvm {
unsigned immOpcL,
unsigned immOpcH,
bool invSrc = false) const;
-
+
/// Utility function to emit atomic min and max. It takes the min/max
/// instruction to expand, the associated basic block, and the associated
/// cmov opcode for moving the min or max value.
@@ -841,6 +894,11 @@ namespace llvm {
MachineBasicBlock *BB,
unsigned cmovOpc) const;
+ // Utility function to emit the low-level va_arg code for X86-64.
+ MachineBasicBlock *EmitVAARG64WithCustomInserter(
+ MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
+
/// Utility function to emit the xmm reg save portion of va_start.
MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
MachineInstr *BInstr,
@@ -849,12 +907,15 @@ namespace llvm {
MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredMingwAlloca(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
MachineBasicBlock *BB) const;
-
+
MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
diff --git a/contrib/llvm/lib/Target/X86/X86Instr3DNow.td b/contrib/llvm/lib/Target/X86/X86Instr3DNow.td
new file mode 100644
index 0000000..45d1c6b
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86Instr3DNow.td
@@ -0,0 +1,77 @@
+//====- X86Instr3DNow.td - The 3DNow! Instruction Set ------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the 3DNow! instruction set, which extends MMX to support
+// floating point and also adds a few more random instructions for good measure.
+//
+//===----------------------------------------------------------------------===//
+
+// FIXME: We don't support any intrinsics for these instructions yet.
+
+class I3DNow<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, asm, pattern>, TB, Requires<[Has3DNow]> {
+}
+
+class I3DNow_binop<bits<8> o, Format F, dag ins, string Mnemonic>
+ : I<o, F, (outs VR64:$dst), ins,
+ !strconcat(Mnemonic, "\t{$src2, $dst|$dst, $src2}"), []>,
+ TB, Requires<[Has3DNow]>, Has3DNow0F0FOpcode {
+ // FIXME: The disassembler doesn't support Has3DNow0F0FOpcode yet.
+ let isAsmParserOnly = 1;
+}
+
+
+let Constraints = "$src1 = $dst" in {
+ // MMXI_binop_rm_int - Simple MMX binary operator based on intrinsic.
+ // When this is cleaned up, remove the FIXME from X86RecognizableInstr.cpp.
+ multiclass I3DNow_binop_rm<bits<8> opc, string Mn> {
+ def rr : I3DNow_binop<opc, MRMSrcReg, (ins VR64:$src1, VR64:$src2), Mn>;
+ def rm : I3DNow_binop<opc, MRMSrcMem, (ins VR64:$src1, i64mem:$src2), Mn>;
+ }
+}
+
+defm PAVGUSB : I3DNow_binop_rm<0xBF, "pavgusb">;
+defm PF2ID : I3DNow_binop_rm<0x1D, "pf2id">;
+defm PFACC : I3DNow_binop_rm<0xAE, "pfacc">;
+defm PFADD : I3DNow_binop_rm<0x9E, "pfadd">;
+defm PFCMPEQ : I3DNow_binop_rm<0xB0, "pfcmpeq">;
+defm PFCMPGE : I3DNow_binop_rm<0x90, "pfcmpge">;
+defm PFCMPGT : I3DNow_binop_rm<0xA0, "pfcmpgt">;
+defm PFMAX : I3DNow_binop_rm<0xA4, "pfmax">;
+defm PFMIN : I3DNow_binop_rm<0x94, "pfmin">;
+defm PFMUL : I3DNow_binop_rm<0xB4, "pfmul">;
+defm PFRCP : I3DNow_binop_rm<0x96, "pfrcp">;
+defm PFRCPIT1 : I3DNow_binop_rm<0xA6, "pfrcpit1">;
+defm PFRCPIT2 : I3DNow_binop_rm<0xB6, "pfrcpit2">;
+defm PFRSQIT1 : I3DNow_binop_rm<0xA7, "pfrsqit1">;
+defm PFRSQRT : I3DNow_binop_rm<0x97, "pfrsqrt">;
+defm PFSUB : I3DNow_binop_rm<0x9A, "pfsub">;
+defm PFSUBR : I3DNow_binop_rm<0xAA, "pfsubr">;
+defm PI2FD : I3DNow_binop_rm<0x0D, "pi2fd">;
+defm PMULHRW : I3DNow_binop_rm<0xB7, "pmulhrw">;
+
+
+def FEMMS : I3DNow<0x0E, RawFrm, (outs), (ins), "femms", [(int_x86_mmx_femms)]>;
+
+def PREFETCH : I3DNow<0x0D, MRM0m, (outs), (ins i32mem:$addr),
+ "prefetch $addr", []>;
+
+// FIXME: Diassembler gets a bogus decode conflict.
+let isAsmParserOnly = 1 in {
+def PREFETCHW : I3DNow<0x0D, MRM1m, (outs), (ins i16mem:$addr),
+ "prefetchw $addr", []>;
+}
+
+// "3DNowA" instructions
+defm PF2IW : I3DNow_binop_rm<0x1C, "pf2iw">;
+defm PI2FW : I3DNow_binop_rm<0x0C, "pi2fw">;
+defm PFNACC : I3DNow_binop_rm<0x8A, "pfnacc">;
+defm PFPNACC : I3DNow_binop_rm<0x8E, "pfpnacc">;
+defm PSWAPD : I3DNow_binop_rm<0xBB, "pswapd">;
diff --git a/contrib/llvm/lib/Target/X86/X86Instr64bit.td b/contrib/llvm/lib/Target/X86/X86Instr64bit.td
deleted file mode 100644
index 0884b61..0000000
--- a/contrib/llvm/lib/Target/X86/X86Instr64bit.td
+++ /dev/null
@@ -1,2250 +0,0 @@
-//====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the X86-64 instruction set, defining the instructions,
-// and properties of the instructions which are needed for code generation,
-// machine code emission, and analysis.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Operand Definitions.
-//
-
-// 64-bits but only 32 bits are significant.
-def i64i32imm : Operand<i64> {
- let ParserMatchClass = ImmSExti64i32AsmOperand;
-}
-
-// 64-bits but only 32 bits are significant, and those bits are treated as being
-// pc relative.
-def i64i32imm_pcrel : Operand<i64> {
- let PrintMethod = "print_pcrel_imm";
- let ParserMatchClass = X86AbsMemAsmOperand;
-}
-
-
-// 64-bits but only 8 bits are significant.
-def i64i8imm : Operand<i64> {
- let ParserMatchClass = ImmSExti64i8AsmOperand;
-}
-
-def lea64_32mem : Operand<i32> {
- let PrintMethod = "printi32mem";
- let AsmOperandLowerMethod = "lower_lea64_32mem";
- let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
-}
-
-
-// Special i64mem for addresses of load folding tail calls. These are not
-// allowed to use callee-saved registers since they must be scheduled
-// after callee-saved register are popped.
-def i64mem_TC : Operand<i64> {
- let PrintMethod = "printi64mem";
- let MIOperandInfo = (ops GR64_TC, i8imm, GR64_TC, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
-}
-
-//===----------------------------------------------------------------------===//
-// Complex Pattern Definitions.
-//
-def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
- [add, sub, mul, X86mul_imm, shl, or, frameindex,
- X86WrapperRIP], []>;
-
-def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
- [tglobaltlsaddr], []>;
-
-//===----------------------------------------------------------------------===//
-// Pattern fragments.
-//
-
-def i64immSExt8 : PatLeaf<(i64 immSext8)>;
-
-def GetLo32XForm : SDNodeXForm<imm, [{
- // Transformation function: get the low 32 bits.
- return getI32Imm((unsigned)N->getZExtValue());
-}]>;
-
-def i64immSExt32 : PatLeaf<(i64 imm), [{ return i64immSExt32(N); }]>;
-
-
-def i64immZExt32 : PatLeaf<(i64 imm), [{
- // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
- // unsignedsign extended field.
- return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
-}]>;
-
-def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
-def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
-def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
-
-def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
-def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
-def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
-def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
-
-def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
-def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
-def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
-def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
-
-//===----------------------------------------------------------------------===//
-// Instruction list...
-//
-
-// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
-// a stack adjustment and the codegen must know that they may modify the stack
-// pointer before prolog-epilog rewriting occurs.
-// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
-// sub / add which can clobber EFLAGS.
-let Defs = [RSP, EFLAGS], Uses = [RSP] in {
-def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
- "#ADJCALLSTACKDOWN",
- [(X86callseq_start timm:$amt)]>,
- Requires<[In64BitMode]>;
-def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
- "#ADJCALLSTACKUP",
- [(X86callseq_end timm:$amt1, timm:$amt2)]>,
- Requires<[In64BitMode]>;
-}
-
-// Interrupt Instructions
-def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iret{q}", []>;
-
-//===----------------------------------------------------------------------===//
-// Call Instructions...
-//
-let isCall = 1 in
- // All calls clobber the non-callee saved registers. RSP is marked as
- // a use to prevent stack-pointer assignments that appear immediately
- // before calls from potentially appearing dead. Uses for argument
- // registers are added manually.
- let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
- FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [RSP] in {
-
- // NOTE: this pattern doesn't match "X86call imm", because we do not know
- // that the offset between an arbitrary immediate and the call will fit in
- // the 32-bit pcrel field that we have.
- def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
- (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
- "call{q}\t$dst", []>,
- Requires<[In64BitMode, NotWin64]>;
- def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
- "call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
- Requires<[NotWin64]>;
- def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
- "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
- Requires<[NotWin64]>;
-
- def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
- "lcall{q}\t{*}$dst", []>;
- }
-
- // FIXME: We need to teach codegen about single list of call-clobbered
- // registers.
-let isCall = 1, isCodeGenOnly = 1 in
- // All calls clobber the non-callee saved registers. RSP is marked as
- // a use to prevent stack-pointer assignments that appear immediately
- // before calls from potentially appearing dead. Uses for argument
- // registers are added manually.
- let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
- FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
- Uses = [RSP] in {
- def WINCALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
- (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
- "call\t$dst", []>,
- Requires<[IsWin64]>;
- def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
- "call\t{*}$dst",
- [(X86call GR64:$dst)]>, Requires<[IsWin64]>;
- def WINCALL64m : I<0xFF, MRM2m, (outs),
- (ins i64mem:$dst, variable_ops), "call\t{*}$dst",
- [(X86call (loadi64 addr:$dst))]>,
- Requires<[IsWin64]>;
- }
-
-
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
- isCodeGenOnly = 1 in
- let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
- FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [RSP] in {
- def TCRETURNdi64 : I<0, Pseudo, (outs),
- (ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
- "#TC_RETURN $dst $offset", []>;
- def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64_TC:$dst, i32imm:$offset,
- variable_ops),
- "#TC_RETURN $dst $offset", []>;
- let mayLoad = 1 in
- def TCRETURNmi64 : I<0, Pseudo, (outs),
- (ins i64mem_TC:$dst, i32imm:$offset, variable_ops),
- "#TC_RETURN $dst $offset", []>;
-
- def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
- (ins i64i32imm_pcrel:$dst, variable_ops),
- "jmp\t$dst # TAILCALL", []>;
- def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64_TC:$dst, variable_ops),
- "jmp{q}\t{*}$dst # TAILCALL", []>;
-
- let mayLoad = 1 in
- def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
- "jmp{q}\t{*}$dst # TAILCALL", []>;
-}
-
-// Branches
-let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
- def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
- "jmp{q}\t$dst", []>;
- def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
- [(brind GR64:$dst)]>, Requires<[In64BitMode]>;
- def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
- [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>;
- def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
- "ljmp{q}\t{*}$dst", []>;
-}
-
-//===----------------------------------------------------------------------===//
-// EH Pseudo Instructions
-//
-let isTerminator = 1, isReturn = 1, isBarrier = 1,
- hasCtrlDep = 1, isCodeGenOnly = 1 in {
-def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
- "ret\t#eh_return, addr: $addr",
- [(X86ehret GR64:$addr)]>;
-
-}
-
-//===----------------------------------------------------------------------===//
-// Miscellaneous Instructions...
-//
-
-def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
-let mayLoad = 1 in
-def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
-
-let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
-def LEAVE64 : I<0xC9, RawFrm,
- (outs), (ins), "leave", []>, Requires<[In64BitMode]>;
-let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
-let mayLoad = 1 in {
-def POP64r : I<0x58, AddRegFrm,
- (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
-def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
-def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", []>;
-}
-let mayStore = 1 in {
-def PUSH64r : I<0x50, AddRegFrm,
- (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
-def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
-def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>;
-}
-}
-
-let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
-def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
- "push{q}\t$imm", []>;
-def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
- "push{q}\t$imm", []>;
-def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
- "push{q}\t$imm", []>;
-}
-
-let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
-def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
- Requires<[In64BitMode]>;
-let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
-def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
- Requires<[In64BitMode]>;
-
-def LEA64_32r : I<0x8D, MRMSrcMem,
- (outs GR32:$dst), (ins lea64_32mem:$src),
- "lea{l}\t{$src|$dst}, {$dst|$src}",
- [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
-
-let isReMaterializable = 1 in
-def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "lea{q}\t{$src|$dst}, {$dst|$src}",
- [(set GR64:$dst, lea64addr:$src)]>;
-
-let Constraints = "$src = $dst" in
-def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
- "bswap{q}\t$dst",
- [(set GR64:$dst, (bswap GR64:$src))]>, TB;
-
-// Bit scan instructions.
-let Defs = [EFLAGS] in {
-def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "bsf{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>, TB;
-def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "bsf{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>, TB;
-
-def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "bsr{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>, TB;
-def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "bsr{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>, TB;
-} // Defs = [EFLAGS]
-
-// Repeat string ops
-let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
-def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
- [(X86rep_movs i64)]>, REP;
-let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
-def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
- [(X86rep_stos i64)]>, REP;
-
-let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in
-def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", []>;
-
-let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in
-def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", []>;
-
-def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", []>;
-
-def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>;
-
-// Fast system-call instructions
-def SYSEXIT64 : RI<0x35, RawFrm,
- (outs), (ins), "sysexit", []>, TB, Requires<[In64BitMode]>;
-
-//===----------------------------------------------------------------------===//
-// Move Instructions...
-//
-
-let neverHasSideEffects = 1 in
-def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
-
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
-def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
- "movabs{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, imm:$src)]>;
-def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
- "mov{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, i64immSExt32:$src)]>;
-}
-
-// The assembler accepts movq of a 64-bit immediate as an alternate spelling of
-// movabsq.
-let isAsmParserOnly = 1 in {
-def MOV64ri_alt : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
-}
-
-let isCodeGenOnly = 1 in {
-def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
-}
-
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "mov{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (load addr:$src))]>;
-
-def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}",
- [(store GR64:$src, addr:$dst)]>;
-def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
- "mov{q}\t{$src, $dst|$dst, $src}",
- [(store i64immSExt32:$src, addr:$dst)]>;
-
-/// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC.
-let isCodeGenOnly = 1 in {
-let neverHasSideEffects = 1 in
-def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
-
-let mayLoad = 1,
- canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
- "mov{q}\t{$src, $dst|$dst, $src}",
- []>;
-
-let mayStore = 1 in
-def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
- "mov{q}\t{$src, $dst|$dst, $src}",
- []>;
-}
-
-// FIXME: These definitions are utterly broken
-// Just leave them commented out for now because they're useless outside
-// of the large code model, and most compilers won't generate the instructions
-// in question.
-/*
-def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
- "mov{q}\t{$src, %rax|%rax, $src}", []>;
-def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
- "mov{q}\t{$src, %rax|%rax, $src}", []>;
-def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
- "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
-def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
- "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
-*/
-
-// Moves to and from segment registers
-def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
-def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
-def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
-def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>;
-
-// Moves to and from debug registers
-def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
-
-// Moves to and from control registers
-def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
-
-// Sign/Zero extenders
-
-// MOVSX64rr8 always has a REX prefix and it has an 8-bit register
-// operand, which makes it a rare instruction with an 8-bit register
-// operand that can never access an h register. If support for h registers
-// were generalized, this would require a special register class.
-def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
- "movs{bq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sext GR8:$src))]>, TB;
-def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
- "movs{bq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
-def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
- "movs{wq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sext GR16:$src))]>, TB;
-def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
- "movs{wq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
-def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
- "movs{lq|xd}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sext GR32:$src))]>;
-def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
- "movs{lq|xd}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
-
-// movzbq and movzwq encodings for the disassembler
-def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
- "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
-def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src),
- "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
-def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
- "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
-def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
- "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
-
-// Use movzbl instead of movzbq when the destination is a register; it's
-// equivalent due to implicit zero-extending, and it has a smaller encoding.
-def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
- "", [(set GR64:$dst, (zext GR8:$src))]>, TB;
-def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
- "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
-// Use movzwl instead of movzwq when the destination is a register; it's
-// equivalent due to implicit zero-extending, and it has a smaller encoding.
-def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
- "", [(set GR64:$dst, (zext GR16:$src))]>, TB;
-def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
- "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
-
-// There's no movzlq instruction, but movl can be used for this purpose, using
-// implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
-// extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
-// zero-extension, however this isn't possible when the 32-bit value is
-// defined by a truncate or is copied from something where the high bits aren't
-// necessarily all zero. In such cases, we fall back to these explicit zext
-// instructions.
-def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
- "", [(set GR64:$dst, (zext GR32:$src))]>;
-def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
- "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
-
-// Any instruction that defines a 32-bit result leaves the high half of the
-// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
-// be copying from a truncate. And x86's cmov doesn't do anything if the
-// condition is false. But any other 32-bit operation will zero-extend
-// up to 64 bits.
-def def32 : PatLeaf<(i32 GR32:$src), [{
- return N->getOpcode() != ISD::TRUNCATE &&
- N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
- N->getOpcode() != ISD::CopyFromReg &&
- N->getOpcode() != X86ISD::CMOV;
-}]>;
-
-// In the case of a 32-bit def that is known to implicitly zero-extend,
-// we can use a SUBREG_TO_REG.
-def : Pat<(i64 (zext def32:$src)),
- (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
-
-let neverHasSideEffects = 1 in {
- let Defs = [RAX], Uses = [EAX] in
- def CDQE : RI<0x98, RawFrm, (outs), (ins),
- "{cltq|cdqe}", []>; // RAX = signext(EAX)
-
- let Defs = [RAX,RDX], Uses = [RAX] in
- def CQO : RI<0x99, RawFrm, (outs), (ins),
- "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
-}
-
-//===----------------------------------------------------------------------===//
-// Arithmetic Instructions...
-//
-
-let Defs = [EFLAGS] in {
-
-def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i64i32imm:$src),
- "add{q}\t{$src, %rax|%rax, $src}", []>;
-
-let Constraints = "$src1 = $dst" in {
-let isConvertibleToThreeAddress = 1 in {
-let isCommutable = 1 in
-// Register-Register Addition
-def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86add_flag GR64:$src1, GR64:$src2))]>;
-
-// These are alternate spellings for use by the disassembler, we mark them as
-// code gen only to ensure they aren't matched by the assembler.
-let isCodeGenOnly = 1 in {
- def ADD64rr_alt : RI<0x03, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-// Register-Integer Addition
-def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst),
- (ins GR64:$src1, i64i8imm:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86add_flag GR64:$src1, i64immSExt8:$src2))]>;
-def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst),
- (ins GR64:$src1, i64i32imm:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86add_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // isConvertibleToThreeAddress
-
-// Register-Memory Addition
-def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst),
- (ins GR64:$src1, i64mem:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86add_flag GR64:$src1, (load addr:$src2)))]>;
-
-} // Constraints = "$src1 = $dst"
-
-// Memory-Register Addition
-def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
- (implicit EFLAGS)]>;
-def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
- (implicit EFLAGS)]>;
-def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
- (implicit EFLAGS)]>;
-
-let Uses = [EFLAGS] in {
-
-def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
- "adc{q}\t{$src, %rax|%rax, $src}", []>;
-
-let Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in
-def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
-
-let isCodeGenOnly = 1 in {
-def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst),
- (ins GR64:$src1, GR64:$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst),
- (ins GR64:$src1, i64mem:$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
-
-def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst),
- (ins GR64:$src1, i64i8imm:$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
-def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst),
- (ins GR64:$src1, i64i32imm:$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
-} // Constraints = "$src1 = $dst"
-
-def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
-def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (load addr:$dst), i64immSExt8:$src2),
- addr:$dst)]>;
-def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (load addr:$dst), i64immSExt32:$src2),
- addr:$dst)]>;
-} // Uses = [EFLAGS]
-
-let Constraints = "$src1 = $dst" in {
-// Register-Register Subtraction
-def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86sub_flag GR64:$src1, GR64:$src2))]>;
-
-let isCodeGenOnly = 1 in {
-def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-// Register-Memory Subtraction
-def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst),
- (ins GR64:$src1, i64mem:$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86sub_flag GR64:$src1, (load addr:$src2)))]>;
-
-// Register-Integer Subtraction
-def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
- (ins GR64:$src1, i64i8imm:$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86sub_flag GR64:$src1, i64immSExt8:$src2))]>;
-def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
- (ins GR64:$src1, i64i32imm:$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // Constraints = "$src1 = $dst"
-
-def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
- "sub{q}\t{$src, %rax|%rax, $src}", []>;
-
-// Memory-Register Subtraction
-def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
- (implicit EFLAGS)]>;
-
-// Memory-Integer Subtraction
-def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i64immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)]>;
-def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i64immSExt32:$src2),
- addr:$dst),
- (implicit EFLAGS)]>;
-
-let Uses = [EFLAGS] in {
-let Constraints = "$src1 = $dst" in {
-def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
-
-let isCodeGenOnly = 1 in {
-def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst),
- (ins GR64:$src1, i64mem:$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
-
-def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst),
- (ins GR64:$src1, i64i8imm:$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
-def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
- (ins GR64:$src1, i64i32imm:$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
-} // Constraints = "$src1 = $dst"
-
-def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i64i32imm:$src),
- "sbb{q}\t{$src, %rax|%rax, $src}", []>;
-
-def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
-def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
-def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
-} // Uses = [EFLAGS]
-} // Defs = [EFLAGS]
-
-// Unsigned multiplication
-let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
-def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
- "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
-let mayLoad = 1 in
-def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
- "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
-
-// Signed multiplication
-def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
- "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
-let mayLoad = 1 in
-def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
- "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
-}
-
-let Defs = [EFLAGS] in {
-let Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in
-// Register-Register Signed Integer Multiplication
-def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86smul_flag GR64:$src1, GR64:$src2))]>, TB;
-
-// Register-Memory Signed Integer Multiplication
-def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
- (ins GR64:$src1, i64mem:$src2),
- "imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
-} // Constraints = "$src1 = $dst"
-
-// Suprisingly enough, these are not two address instructions!
-
-// Register-Integer Signed Integer Multiplication
-def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
- (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
- "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86smul_flag GR64:$src1, i64immSExt8:$src2))]>;
-def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
- (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86smul_flag GR64:$src1, i64immSExt32:$src2))]>;
-
-// Memory-Integer Signed Integer Multiplication
-def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
- (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
- "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86smul_flag (load addr:$src1),
- i64immSExt8:$src2))]>;
-def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
- (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
- "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86smul_flag (load addr:$src1),
- i64immSExt32:$src2))]>;
-} // Defs = [EFLAGS]
-
-// Unsigned division / remainder
-let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
-// RDX:RAX/r64 = RAX,RDX
-def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
- "div{q}\t$src", []>;
-// Signed division / remainder
-// RDX:RAX/r64 = RAX,RDX
-def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
- "idiv{q}\t$src", []>;
-let mayLoad = 1 in {
-// RDX:RAX/[mem64] = RAX,RDX
-def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
- "div{q}\t$src", []>;
-// RDX:RAX/[mem64] = RAX,RDX
-def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
- "idiv{q}\t$src", []>;
-}
-}
-
-// Unary instructions
-let Defs = [EFLAGS], CodeSize = 2 in {
-let Constraints = "$src = $dst" in
-def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
- [(set GR64:$dst, (ineg GR64:$src)),
- (implicit EFLAGS)]>;
-def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
- [(store (ineg (loadi64 addr:$dst)), addr:$dst),
- (implicit EFLAGS)]>;
-
-let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
-def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
- [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src))]>;
-def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
- [(store (add (loadi64 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>;
-
-let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
-def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
- [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src))]>;
-def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
- [(store (add (loadi64 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>;
-
-// In 64-bit mode, single byte INC and DEC cannot be encoded.
-let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in {
-// Can transform into LEA.
-def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
- "inc{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src))]>,
- OpSize, Requires<[In64BitMode]>;
-def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src),
- "inc{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src))]>,
- Requires<[In64BitMode]>;
-def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src),
- "dec{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src))]>,
- OpSize, Requires<[In64BitMode]>;
-def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src),
- "dec{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
- Requires<[In64BitMode]>;
-} // Constraints = "$src = $dst", isConvertibleToThreeAddress
-
-// These are duplicates of their 32-bit counterparts. Only needed so X86 knows
-// how to unfold them.
-def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
- [(store (add (loadi16 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In64BitMode]>;
-def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
- [(store (add (loadi32 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In64BitMode]>;
-def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
- [(store (add (loadi16 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In64BitMode]>;
-def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
- [(store (add (loadi32 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In64BitMode]>;
-} // Defs = [EFLAGS], CodeSize
-
-
-let Defs = [EFLAGS] in {
-// Shift instructions
-let Constraints = "$src1 = $dst" in {
-let Uses = [CL] in
-def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
- "shl{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (shl GR64:$src1, CL))]>;
-let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
-def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
- (ins GR64:$src1, i8imm:$src2),
- "shl{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
-// NOTE: We don't include patterns for shifts of a register by one, because
-// 'add reg,reg' is cheaper.
-def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
- "shl{q}\t$dst", []>;
-} // Constraints = "$src1 = $dst"
-
-let Uses = [CL] in
-def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
- "shl{q}\t{%cl, $dst|$dst, %CL}",
- [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
-def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
- "shl{q}\t{$src, $dst|$dst, $src}",
- [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
- "shl{q}\t$dst",
- [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-
-let Constraints = "$src1 = $dst" in {
-let Uses = [CL] in
-def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
- "shr{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (srl GR64:$src1, CL))]>;
-def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
- "shr{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
-def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
- "shr{q}\t$dst",
- [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
-} // Constraints = "$src1 = $dst"
-
-let Uses = [CL] in
-def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
- "shr{q}\t{%cl, $dst|$dst, %CL}",
- [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
-def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
- "shr{q}\t{$src, $dst|$dst, $src}",
- [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
- "shr{q}\t$dst",
- [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-
-let Constraints = "$src1 = $dst" in {
-let Uses = [CL] in
-def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
- "sar{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (sra GR64:$src1, CL))]>;
-def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
- (ins GR64:$src1, i8imm:$src2),
- "sar{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
-def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
- "sar{q}\t$dst",
- [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
-} // Constraints = "$src = $dst"
-
-let Uses = [CL] in
-def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
- "sar{q}\t{%cl, $dst|$dst, %CL}",
- [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
-def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
- "sar{q}\t{$src, $dst|$dst, $src}",
- [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
- "sar{q}\t$dst",
- [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-
-// Rotate instructions
-
-let Constraints = "$src = $dst" in {
-def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src),
- "rcl{q}\t{1, $dst|$dst, 1}", []>;
-def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
- "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
-
-def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src),
- "rcr{q}\t{1, $dst|$dst, 1}", []>;
-def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
- "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
-
-let Uses = [CL] in {
-def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src),
- "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
-def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src),
- "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
-}
-} // Constraints = "$src = $dst"
-
-def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
- "rcl{q}\t{1, $dst|$dst, 1}", []>;
-def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
- "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
- "rcr{q}\t{1, $dst|$dst, 1}", []>;
-def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt),
- "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
-
-let Uses = [CL] in {
-def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
- "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
-def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
- "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
-}
-
-let Constraints = "$src1 = $dst" in {
-let Uses = [CL] in
-def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
- "rol{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
-def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
- (ins GR64:$src1, i8imm:$src2),
- "rol{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
-def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
- "rol{q}\t$dst",
- [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
-} // Constraints = "$src1 = $dst"
-
-let Uses = [CL] in
-def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
- "rol{q}\t{%cl, $dst|$dst, %CL}",
- [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
-def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
- "rol{q}\t{$src, $dst|$dst, $src}",
- [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
- "rol{q}\t$dst",
- [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-
-let Constraints = "$src1 = $dst" in {
-let Uses = [CL] in
-def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
- "ror{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
-def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
- (ins GR64:$src1, i8imm:$src2),
- "ror{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
-def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
- "ror{q}\t$dst",
- [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
-} // Constraints = "$src1 = $dst"
-
-let Uses = [CL] in
-def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
- "ror{q}\t{%cl, $dst|$dst, %CL}",
- [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
-def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
- "ror{q}\t{$src, $dst|$dst, $src}",
- [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
- "ror{q}\t$dst",
- [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-
-// Double shift instructions (generalizations of rotate)
-let Constraints = "$src1 = $dst" in {
-let Uses = [CL] in {
-def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
- [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
- TB;
-def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
- [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
- TB;
-}
-
-let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
-def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
- (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2, i8imm:$src3),
- "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
- (i8 imm:$src3)))]>,
- TB;
-def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
- (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2, i8imm:$src3),
- "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
- (i8 imm:$src3)))]>,
- TB;
-} // isCommutable
-} // Constraints = "$src1 = $dst"
-
-let Uses = [CL] in {
-def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
- [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
- addr:$dst)]>, TB;
-def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
- [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
- addr:$dst)]>, TB;
-}
-def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
- (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
- "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
- TB;
-def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
- (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
- "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
- TB;
-} // Defs = [EFLAGS]
-
-//===----------------------------------------------------------------------===//
-// Logical Instructions...
-//
-
-let Constraints = "$src = $dst" , AddedComplexity = 15 in
-def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
- [(set GR64:$dst, (not GR64:$src))]>;
-def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
- [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
-
-let Defs = [EFLAGS] in {
-def AND64i32 : RIi32<0x25, RawFrm, (outs), (ins i64i32imm:$src),
- "and{q}\t{$src, %rax|%rax, $src}", []>;
-
-let Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in
-def AND64rr : RI<0x21, MRMDestReg,
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86and_flag GR64:$src1, GR64:$src2))]>;
-let isCodeGenOnly = 1 in {
-def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "and{q}\t{$src2, $dst|$dst, $src2}", []>;
-}
-def AND64rm : RI<0x23, MRMSrcMem,
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86and_flag GR64:$src1, (load addr:$src2)))]>;
-def AND64ri8 : RIi8<0x83, MRM4r,
- (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
- "and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86and_flag GR64:$src1, i64immSExt8:$src2))]>;
-def AND64ri32 : RIi32<0x81, MRM4r,
- (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86and_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // Constraints = "$src1 = $dst"
-
-def AND64mr : RI<0x21, MRMDestMem,
- (outs), (ins i64mem:$dst, GR64:$src),
- "and{q}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), GR64:$src), addr:$dst),
- (implicit EFLAGS)]>;
-def AND64mi8 : RIi8<0x83, MRM4m,
- (outs), (ins i64mem:$dst, i64i8imm :$src),
- "and{q}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
- (implicit EFLAGS)]>;
-def AND64mi32 : RIi32<0x81, MRM4m,
- (outs), (ins i64mem:$dst, i64i32imm:$src),
- "and{q}\t{$src, $dst|$dst, $src}",
- [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
- (implicit EFLAGS)]>;
-
-let Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in
-def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86or_flag GR64:$src1, GR64:$src2))]>;
-let isCodeGenOnly = 1 in {
-def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "or{q}\t{$src2, $dst|$dst, $src2}", []>;
-}
-def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst),
- (ins GR64:$src1, i64mem:$src2),
- "or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86or_flag GR64:$src1, (load addr:$src2)))]>;
-def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst),
- (ins GR64:$src1, i64i8imm:$src2),
- "or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86or_flag GR64:$src1, i64immSExt8:$src2))]>;
-def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
- (ins GR64:$src1, i64i32imm:$src2),
- "or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86or_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // Constraints = "$src1 = $dst"
-
-def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "or{q}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), GR64:$src), addr:$dst),
- (implicit EFLAGS)]>;
-def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
- "or{q}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
- (implicit EFLAGS)]>;
-def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
- "or{q}\t{$src, $dst|$dst, $src}",
- [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
- (implicit EFLAGS)]>;
-
-def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i64i32imm:$src),
- "or{q}\t{$src, %rax|%rax, $src}", []>;
-
-let Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in
-def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86xor_flag GR64:$src1, GR64:$src2))]>;
-let isCodeGenOnly = 1 in {
-def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "xor{q}\t{$src2, $dst|$dst, $src2}", []>;
-}
-def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst),
- (ins GR64:$src1, i64mem:$src2),
- "xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86xor_flag GR64:$src1, (load addr:$src2)))]>;
-def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst),
- (ins GR64:$src1, i64i8imm:$src2),
- "xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86xor_flag GR64:$src1, i64immSExt8:$src2))]>;
-def XOR64ri32 : RIi32<0x81, MRM6r,
- (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, EFLAGS,
- (X86xor_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // Constraints = "$src1 = $dst"
-
-def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "xor{q}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
- (implicit EFLAGS)]>;
-def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
- "xor{q}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
- (implicit EFLAGS)]>;
-def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
- "xor{q}\t{$src, $dst|$dst, $src}",
- [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
- (implicit EFLAGS)]>;
-
-def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i64i32imm:$src),
- "xor{q}\t{$src, %rax|%rax, $src}", []>;
-
-} // Defs = [EFLAGS]
-
-//===----------------------------------------------------------------------===//
-// Comparison Instructions...
-//
-
-// Integer comparison
-let Defs = [EFLAGS] in {
-def TEST64i32 : RIi32<0xa9, RawFrm, (outs), (ins i64i32imm:$src),
- "test{q}\t{$src, %rax|%rax, $src}", []>;
-let isCommutable = 1 in
-def TEST64rr : RI<0x85, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
- "test{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and GR64:$src1, GR64:$src2), 0))]>;
-def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
- "test{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and GR64:$src1, (loadi64 addr:$src2)),
- 0))]>;
-def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
- (ins GR64:$src1, i64i32imm:$src2),
- "test{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and GR64:$src1, i64immSExt32:$src2),
- 0))]>;
-def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
- (ins i64mem:$src1, i64i32imm:$src2),
- "test{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and (loadi64 addr:$src1),
- i64immSExt32:$src2), 0))]>;
-
-
-def CMP64i32 : RIi32<0x3D, RawFrm, (outs), (ins i64i32imm:$src),
- "cmp{q}\t{$src, %rax|%rax, $src}", []>;
-def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR64:$src1, GR64:$src2))]>;
-
-// These are alternate spellings for use by the disassembler, we mark them as
-// code gen only to ensure they aren't matched by the assembler.
-let isCodeGenOnly = 1 in {
- def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
-}
-
-def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi64 addr:$src1), GR64:$src2))]>;
-def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR64:$src1, (loadi64 addr:$src2)))]>;
-def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt8:$src2))]>;
-def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt32:$src2))]>;
-def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
- i64immSExt8:$src2))]>;
-def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
- (ins i64mem:$src1, i64i32imm:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
- i64immSExt32:$src2))]>;
-} // Defs = [EFLAGS]
-
-// Bit tests.
-// TODO: BTC, BTR, and BTS
-let Defs = [EFLAGS] in {
-def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
- "bt{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
-
-// Unlike with the register+register form, the memory+register form of the
-// bt instruction does not ignore the high bits of the index. From ISel's
-// perspective, this is pretty bizarre. Disable these instructions for now.
-def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
- "bt{q}\t{$src2, $src1|$src1, $src2}",
-// [(X86bt (loadi64 addr:$src1), GR64:$src2),
-// (implicit EFLAGS)]
- []
- >, TB;
-
-def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
- "bt{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
-// Note that these instructions don't need FastBTMem because that
-// only applies when the other operand is in a register. When it's
-// an immediate, bt is still fast.
-def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
- "bt{q}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86bt (loadi64 addr:$src1),
- i64immSExt8:$src2))]>, TB;
-
-def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
- "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
- "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
- "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
- "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-
-def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
- "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
- "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
- "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
- "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-
-def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
- "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
- "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
- "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
- "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
-} // Defs = [EFLAGS]
-
-// Conditional moves
-let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in {
-def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovb{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_B, EFLAGS))]>, TB;
-def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovae{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_AE, EFLAGS))]>, TB;
-def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmove{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_E, EFLAGS))]>, TB;
-def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovne{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_NE, EFLAGS))]>, TB;
-def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_BE, EFLAGS))]>, TB;
-def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmova{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_A, EFLAGS))]>, TB;
-def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovl{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_L, EFLAGS))]>, TB;
-def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovge{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_GE, EFLAGS))]>, TB;
-def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovle{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_LE, EFLAGS))]>, TB;
-def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovg{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_G, EFLAGS))]>, TB;
-def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovs{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_S, EFLAGS))]>, TB;
-def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovns{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_NS, EFLAGS))]>, TB;
-def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovp{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_P, EFLAGS))]>, TB;
-def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_NP, EFLAGS))]>, TB;
-def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovo{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_O, EFLAGS))]>, TB;
-def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "cmovno{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
- X86_COND_NO, EFLAGS))]>, TB;
-} // isCommutable = 1
-
-def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovb{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_B, EFLAGS))]>, TB;
-def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovae{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_AE, EFLAGS))]>, TB;
-def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmove{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_E, EFLAGS))]>, TB;
-def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovne{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_NE, EFLAGS))]>, TB;
-def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_BE, EFLAGS))]>, TB;
-def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmova{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_A, EFLAGS))]>, TB;
-def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovl{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_L, EFLAGS))]>, TB;
-def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovge{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_GE, EFLAGS))]>, TB;
-def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovle{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_LE, EFLAGS))]>, TB;
-def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovg{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_G, EFLAGS))]>, TB;
-def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovs{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_S, EFLAGS))]>, TB;
-def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovns{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_NS, EFLAGS))]>, TB;
-def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovp{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_P, EFLAGS))]>, TB;
-def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_NP, EFLAGS))]>, TB;
-def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovo{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_O, EFLAGS))]>, TB;
-def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
- (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "cmovno{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_NO, EFLAGS))]>, TB;
-} // Constraints = "$src1 = $dst"
-
-// Use sbb to materialize carry flag into a GPR.
-// FIXME: This are pseudo ops that should be replaced with Pat<> patterns.
-// However, Pat<> can't replicate the destination reg into the inputs of the
-// result.
-// FIXME: Change this to have encoding Pseudo when X86MCCodeEmitter replaces
-// X86CodeEmitter.
-let Defs = [EFLAGS], Uses = [EFLAGS], isCodeGenOnly = 1 in
-def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
- [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
-
-def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
- (SETB_C64r)>;
-
-//===----------------------------------------------------------------------===//
-// Descriptor-table support instructions
-
-// LLDT is not interpreted specially in 64-bit mode because there is no sign
-// extension.
-def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
- "sldt{q}\t$dst", []>, TB;
-def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
- "sldt{q}\t$dst", []>, TB;
-
-//===----------------------------------------------------------------------===//
-// Alias Instructions
-//===----------------------------------------------------------------------===//
-
-// We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
-// smaller encoding, but doing so at isel time interferes with rematerialization
-// in the current register allocator. For now, this is rewritten when the
-// instruction is lowered to an MCInst.
-// FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
-// when we have a better way to specify isel priority.
-let Defs = [EFLAGS],
- AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
- [(set GR64:$dst, 0)]>;
-
-// Materialize i64 constant where top 32-bits are zero. This could theoretically
-// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
-// that would make it more difficult to rematerialize.
-let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
- "", [(set GR64:$dst, i64immZExt32:$src)]>;
-
-//===----------------------------------------------------------------------===//
-// Thread Local Storage Instructions
-//===----------------------------------------------------------------------===//
-
-// ELF TLS Support
-// All calls clobber the non-callee saved registers. RSP is marked as
-// a use to prevent stack-pointer assignments that appear immediately
-// before calls from potentially appearing dead.
-let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
- FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [RSP] in
-def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
- ".byte\t0x66; "
- "leaq\t$sym(%rip), %rdi; "
- ".word\t0x6666; "
- "rex64; "
- "call\t__tls_get_addr@PLT",
- [(X86tlsaddr tls64addr:$sym)]>,
- Requires<[In64BitMode]>;
-
-// Darwin TLS Support
-// For x86_64, the address of the thunk is passed in %rdi, on return
-// the address of the variable is in %rax. All other registers are preserved.
-let Defs = [RAX],
- Uses = [RDI],
- usesCustomInserter = 1 in
-def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
- "# TLSCall_64",
- [(X86TLSCall addr:$sym)]>,
- Requires<[In64BitMode]>;
-
-let AddedComplexity = 5, isCodeGenOnly = 1 in
-def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "movq\t%gs:$src, $dst",
- [(set GR64:$dst, (gsload addr:$src))]>, SegGS;
-
-let AddedComplexity = 5, isCodeGenOnly = 1 in
-def MOV64FSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "movq\t%fs:$src, $dst",
- [(set GR64:$dst, (fsload addr:$src))]>, SegFS;
-
-//===----------------------------------------------------------------------===//
-// Atomic Instructions
-//===----------------------------------------------------------------------===//
-
-// TODO: Get this to fold the constant into the instruction.
-let hasSideEffects = 1, Defs = [ESP] in
-def Int_MemBarrierNoSSE64 : RI<0x09, MRM1r, (outs), (ins GR64:$zero),
- "lock\n\t"
- "or{q}\t{$zero, (%rsp)|(%rsp), $zero}",
- [(X86MemBarrierNoSSE GR64:$zero)]>,
- Requires<[In64BitMode]>, LOCK;
-
-let Defs = [RAX, EFLAGS], Uses = [RAX] in {
-def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
- "lock\n\t"
- "cmpxchgq\t$swap,$ptr",
- [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
-}
-
-let Constraints = "$val = $dst" in {
-let Defs = [EFLAGS] in
-def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
- "lock\n\t"
- "xadd\t$val, $ptr",
- [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
- TB, LOCK;
-
-def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),
- (ins GR64:$val,i64mem:$ptr),
- "xchg{q}\t{$val, $ptr|$ptr, $val}",
- [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
-
-def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
- "xchg{q}\t{$val, $src|$src, $val}", []>;
-}
-
-def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
-let mayLoad = 1, mayStore = 1 in
-def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
-
-def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
-let mayLoad = 1, mayStore = 1 in
-def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
-
-let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
-def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
- "cmpxchg16b\t$dst", []>, TB;
-
-def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
- "xchg{q}\t{$src, %rax|%rax, $src}", []>;
-
-// Optimized codegen when the non-memory output is not used.
-let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in {
-// FIXME: Use normal add / sub instructions and add lock prefix dynamically.
-def LOCK_ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "lock\n\t"
- "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs),
- (ins i64mem:$dst, i64i8imm :$src2),
- "lock\n\t"
- "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_ADD64mi32 : RIi32<0x81, MRM0m, (outs),
- (ins i64mem:$dst, i64i32imm :$src2),
- "lock\n\t"
- "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "lock\n\t"
- "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_SUB64mi8 : RIi8<0x83, MRM5m, (outs),
- (ins i64mem:$dst, i64i8imm :$src2),
- "lock\n\t"
- "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_SUB64mi32 : RIi32<0x81, MRM5m, (outs),
- (ins i64mem:$dst, i64i32imm:$src2),
- "lock\n\t"
- "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
- "lock\n\t"
- "inc{q}\t$dst", []>, LOCK;
-def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
- "lock\n\t"
- "dec{q}\t$dst", []>, LOCK;
-}
-// Atomic exchange, and, or, xor
-let Constraints = "$val = $dst", Defs = [EFLAGS],
- usesCustomInserter = 1 in {
-def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMAND64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
-def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMOR64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
-def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMXOR64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
-def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMNAND64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
-def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
- "#ATOMMIN64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
-def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMMAX64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
-def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMUMIN64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
-def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
- "#ATOMUMAX64 PSEUDO!",
- [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
-}
-
-// Segmentation support instructions
-
-// i16mem operand in LAR64rm and GR32 operand in LAR32rr is not a typo.
-def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
- "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
- "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
-
-def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
-
-def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", []>, TB;
-
-def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins),
- "push{q}\t%fs", []>, TB;
-def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins),
- "push{q}\t%gs", []>, TB;
-
-def POPFS64 : I<0xa1, RawFrm, (outs), (ins),
- "pop{q}\t%fs", []>, TB;
-def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
- "pop{q}\t%gs", []>, TB;
-
-def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
- "lss{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
- "lfs{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
- "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB;
-
-// Specialized register support
-
-// no m form encodable; use SMSW16m
-def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
- "smsw{q}\t$dst", []>, TB;
-
-// String manipulation instructions
-
-def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>;
-
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//===----------------------------------------------------------------------===//
-
-// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
-// code model mode, should use 'movabs'. FIXME: This is really a hack, the
-// 'movabs' predicate should handle this sort of thing.
-def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
- (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
-def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
- (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
-def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
- (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
-def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
- (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
-def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
- (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
-
-// In static codegen with small code model, we can get the address of a label
-// into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
-// the MOV64ri64i32 should accept these.
-def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
- (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
-def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
- (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
-def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
- (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
-def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
- (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
-def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
- (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
-
-// In kernel code model, we can get the address of a label
-// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
-// the MOV64ri32 should accept these.
-def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
- (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
-def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
- (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
-def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
- (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
-def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
- (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
-def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
- (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
-
-// If we have small model and -static mode, it is safe to store global addresses
-// directly as immediates. FIXME: This is really a hack, the 'imm' predicate
-// for MOV64mi32 should handle this sort of thing.
-def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
- (MOV64mi32 addr:$dst, tconstpool:$src)>,
- Requires<[NearData, IsStatic]>;
-def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
- (MOV64mi32 addr:$dst, tjumptable:$src)>,
- Requires<[NearData, IsStatic]>;
-def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
- (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
- Requires<[NearData, IsStatic]>;
-def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
- (MOV64mi32 addr:$dst, texternalsym:$src)>,
- Requires<[NearData, IsStatic]>;
-def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
- (MOV64mi32 addr:$dst, tblockaddress:$src)>,
- Requires<[NearData, IsStatic]>;
-
-// Calls
-// Direct PC relative function call for small code model. 32-bit displacement
-// sign extended to 64-bit.
-def : Pat<(X86call (i64 tglobaladdr:$dst)),
- (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
-def : Pat<(X86call (i64 texternalsym:$dst)),
- (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
-
-def : Pat<(X86call (i64 tglobaladdr:$dst)),
- (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
-def : Pat<(X86call (i64 texternalsym:$dst)),
- (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
-
-// tailcall stuff
-def : Pat<(X86tcret GR64_TC:$dst, imm:$off),
- (TCRETURNri64 GR64_TC:$dst, imm:$off)>,
- Requires<[In64BitMode]>;
-
-def : Pat<(X86tcret (load addr:$dst), imm:$off),
- (TCRETURNmi64 addr:$dst, imm:$off)>,
- Requires<[In64BitMode]>;
-
-def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
- (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
- Requires<[In64BitMode]>;
-
-def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
- (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
- Requires<[In64BitMode]>;
-
-// tls has some funny stuff here...
-// This corresponds to movabs $foo@tpoff, %rax
-def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
- (MOV64ri tglobaltlsaddr :$dst)>;
-// This corresponds to add $foo@tpoff, %rax
-def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
- (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
-// This corresponds to mov foo@tpoff(%rbx), %eax
-def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
- (MOV64rm tglobaltlsaddr :$dst)>;
-
-// Comparisons.
-
-// TEST R,R is smaller than CMP R,0
-def : Pat<(X86cmp GR64:$src1, 0),
- (TEST64rr GR64:$src1, GR64:$src1)>;
-
-// Conditional moves with folded loads with operands swapped and conditions
-// inverted.
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS),
- (CMOVAE64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS),
- (CMOVB64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS),
- (CMOVNE64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS),
- (CMOVE64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS),
- (CMOVA64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS),
- (CMOVBE64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS),
- (CMOVGE64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS),
- (CMOVL64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS),
- (CMOVG64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS),
- (CMOVLE64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS),
- (CMOVNP64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS),
- (CMOVP64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS),
- (CMOVNS64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS),
- (CMOVS64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS),
- (CMOVNO64rm GR64:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS),
- (CMOVO64rm GR64:$src2, addr:$src1)>;
-
-// zextload bool -> zextload byte
-def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
-
-// extload
-// When extloading from 16-bit and smaller memory locations into 64-bit
-// registers, use zero-extending loads so that the entire 64-bit register is
-// defined, avoiding partial-register updates.
-def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
-def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
-def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
-// For other extloads, use subregs, since the high contents of the register are
-// defined after an extload.
-def : Pat<(extloadi64i32 addr:$src),
- (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
- sub_32bit)>;
-
-// anyext. Define these to do an explicit zero-extend to
-// avoid partial-register updates.
-def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
-def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
-def : Pat<(i64 (anyext GR32:$src)),
- (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
-
-//===----------------------------------------------------------------------===//
-// Some peepholes
-//===----------------------------------------------------------------------===//
-
-// Odd encoding trick: -128 fits into an 8-bit immediate field while
-// +128 doesn't, so in this special case use a sub instead of an add.
-def : Pat<(add GR64:$src1, 128),
- (SUB64ri8 GR64:$src1, -128)>;
-def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
- (SUB64mi8 addr:$dst, -128)>;
-
-// The same trick applies for 32-bit immediate fields in 64-bit
-// instructions.
-def : Pat<(add GR64:$src1, 0x0000000080000000),
- (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
-def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
- (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
-
-// Use a 32-bit and with implicit zero-extension instead of a 64-bit and if it
-// has an immediate with at least 32 bits of leading zeros, to avoid needing to
-// materialize that immediate in a register first.
-def : Pat<(and GR64:$src, i64immZExt32:$imm),
- (SUBREG_TO_REG
- (i64 0),
- (AND32ri
- (EXTRACT_SUBREG GR64:$src, sub_32bit),
- (i32 (GetLo32XForm imm:$imm))),
- sub_32bit)>;
-
-// r & (2^32-1) ==> movz
-def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
- (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
-// r & (2^16-1) ==> movz
-def : Pat<(and GR64:$src, 0xffff),
- (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
-// r & (2^8-1) ==> movz
-def : Pat<(and GR64:$src, 0xff),
- (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
-// r & (2^8-1) ==> movz
-def : Pat<(and GR32:$src1, 0xff),
- (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
- Requires<[In64BitMode]>;
-// r & (2^8-1) ==> movz
-def : Pat<(and GR16:$src1, 0xff),
- (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
- Requires<[In64BitMode]>;
-
-// sext_inreg patterns
-def : Pat<(sext_inreg GR64:$src, i32),
- (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
-def : Pat<(sext_inreg GR64:$src, i16),
- (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
-def : Pat<(sext_inreg GR64:$src, i8),
- (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
-def : Pat<(sext_inreg GR32:$src, i8),
- (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
- Requires<[In64BitMode]>;
-def : Pat<(sext_inreg GR16:$src, i8),
- (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
- Requires<[In64BitMode]>;
-
-// trunc patterns
-def : Pat<(i32 (trunc GR64:$src)),
- (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
-def : Pat<(i16 (trunc GR64:$src)),
- (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
-def : Pat<(i8 (trunc GR64:$src)),
- (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
-def : Pat<(i8 (trunc GR32:$src)),
- (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
- Requires<[In64BitMode]>;
-def : Pat<(i8 (trunc GR16:$src)),
- (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
- Requires<[In64BitMode]>;
-
-// h-register tricks.
-// For now, be conservative on x86-64 and use an h-register extract only if the
-// value is immediately zero-extended or stored, which are somewhat common
-// cases. This uses a bunch of code to prevent a register requiring a REX prefix
-// from being allocated in the same instruction as the h register, as there's
-// currently no way to describe this requirement to the register allocator.
-
-// h-register extract and zero-extend.
-def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
- (SUBREG_TO_REG
- (i64 0),
- (MOVZX32_NOREXrr8
- (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
- sub_8bit_hi)),
- sub_32bit)>;
-def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
- (MOVZX32_NOREXrr8
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
- sub_8bit_hi))>,
- Requires<[In64BitMode]>;
-def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
- (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
- GR32_ABCD)),
- sub_8bit_hi))>,
- Requires<[In64BitMode]>;
-def : Pat<(srl GR16:$src, (i8 8)),
- (EXTRACT_SUBREG
- (MOVZX32_NOREXrr8
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- sub_8bit_hi)),
- sub_16bit)>,
- Requires<[In64BitMode]>;
-def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
- (MOVZX32_NOREXrr8
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- sub_8bit_hi))>,
- Requires<[In64BitMode]>;
-def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
- (MOVZX32_NOREXrr8
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- sub_8bit_hi))>,
- Requires<[In64BitMode]>;
-def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
- (SUBREG_TO_REG
- (i64 0),
- (MOVZX32_NOREXrr8
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- sub_8bit_hi)),
- sub_32bit)>;
-def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
- (SUBREG_TO_REG
- (i64 0),
- (MOVZX32_NOREXrr8
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- sub_8bit_hi)),
- sub_32bit)>;
-
-// h-register extract and store.
-def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
- (MOV8mr_NOREX
- addr:$dst,
- (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
- sub_8bit_hi))>;
-def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
- (MOV8mr_NOREX
- addr:$dst,
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
- sub_8bit_hi))>,
- Requires<[In64BitMode]>;
-def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
- (MOV8mr_NOREX
- addr:$dst,
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- sub_8bit_hi))>,
- Requires<[In64BitMode]>;
-
-// (shl x, 1) ==> (add x, x)
-def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
-
-// (shl x (and y, 63)) ==> (shl x, y)
-def : Pat<(shl GR64:$src1, (and CL, 63)),
- (SHL64rCL GR64:$src1)>;
-def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
- (SHL64mCL addr:$dst)>;
-
-def : Pat<(srl GR64:$src1, (and CL, 63)),
- (SHR64rCL GR64:$src1)>;
-def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
- (SHR64mCL addr:$dst)>;
-
-def : Pat<(sra GR64:$src1, (and CL, 63)),
- (SAR64rCL GR64:$src1)>;
-def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
- (SAR64mCL addr:$dst)>;
-
-// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
-let AddedComplexity = 5 in { // Try this before the selecting to OR
-def : Pat<(or_is_add GR64:$src1, i64immSExt8:$src2),
- (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(or_is_add GR64:$src1, i64immSExt32:$src2),
- (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
-def : Pat<(or_is_add GR64:$src1, GR64:$src2),
- (ADD64rr GR64:$src1, GR64:$src2)>;
-} // AddedComplexity
-
-// X86 specific add which produces a flag.
-def : Pat<(addc GR64:$src1, GR64:$src2),
- (ADD64rr GR64:$src1, GR64:$src2)>;
-def : Pat<(addc GR64:$src1, (load addr:$src2)),
- (ADD64rm GR64:$src1, addr:$src2)>;
-def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
- (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
- (ADD64ri32 GR64:$src1, imm:$src2)>;
-
-def : Pat<(subc GR64:$src1, GR64:$src2),
- (SUB64rr GR64:$src1, GR64:$src2)>;
-def : Pat<(subc GR64:$src1, (load addr:$src2)),
- (SUB64rm GR64:$src1, addr:$src2)>;
-def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
- (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(subc GR64:$src1, imm:$src2),
- (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
-
-//===----------------------------------------------------------------------===//
-// EFLAGS-defining Patterns
-//===----------------------------------------------------------------------===//
-
-// addition
-def : Pat<(add GR64:$src1, GR64:$src2),
- (ADD64rr GR64:$src1, GR64:$src2)>;
-def : Pat<(add GR64:$src1, i64immSExt8:$src2),
- (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(add GR64:$src1, i64immSExt32:$src2),
- (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
-def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
- (ADD64rm GR64:$src1, addr:$src2)>;
-
-// subtraction
-def : Pat<(sub GR64:$src1, GR64:$src2),
- (SUB64rr GR64:$src1, GR64:$src2)>;
-def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
- (SUB64rm GR64:$src1, addr:$src2)>;
-def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
- (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
- (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
-
-// Multiply
-def : Pat<(mul GR64:$src1, GR64:$src2),
- (IMUL64rr GR64:$src1, GR64:$src2)>;
-def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
- (IMUL64rm GR64:$src1, addr:$src2)>;
-def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
- (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
- (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
-def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
- (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
-def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
- (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
-
-// inc/dec
-def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
-def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
-def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
-def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
-def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
-def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
-
-// or
-def : Pat<(or GR64:$src1, GR64:$src2),
- (OR64rr GR64:$src1, GR64:$src2)>;
-def : Pat<(or GR64:$src1, i64immSExt8:$src2),
- (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(or GR64:$src1, i64immSExt32:$src2),
- (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
-def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
- (OR64rm GR64:$src1, addr:$src2)>;
-
-// xor
-def : Pat<(xor GR64:$src1, GR64:$src2),
- (XOR64rr GR64:$src1, GR64:$src2)>;
-def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
- (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
- (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
-def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
- (XOR64rm GR64:$src1, addr:$src2)>;
-
-// and
-def : Pat<(and GR64:$src1, GR64:$src2),
- (AND64rr GR64:$src1, GR64:$src2)>;
-def : Pat<(and GR64:$src1, i64immSExt8:$src2),
- (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(and GR64:$src1, i64immSExt32:$src2),
- (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
-def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
- (AND64rm GR64:$src1, addr:$src2)>;
-
-//===----------------------------------------------------------------------===//
-// X86-64 SSE Instructions
-//===----------------------------------------------------------------------===//
-
-// Move instructions...
-
-def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v2i64 (scalar_to_vector GR64:$src)))]>;
-def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
- (iPTR 0)))]>;
-
-def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert GR64:$src))]>;
-def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
-
-def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (bitconvert FR64:$src))]>;
-def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
-
diff --git a/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td b/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
new file mode 100644
index 0000000..f0ea068
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -0,0 +1,1125 @@
+//===- X86InstrArithmetic.td - Integer Arithmetic Instrs ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the integer arithmetic instructions in the X86
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// LEA - Load Effective Address
+
+let neverHasSideEffects = 1 in
+def LEA16r : I<0x8D, MRMSrcMem,
+ (outs GR16:$dst), (ins i32mem:$src),
+ "lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize;
+let isReMaterializable = 1 in
+def LEA32r : I<0x8D, MRMSrcMem,
+ (outs GR32:$dst), (ins i32mem:$src),
+ "lea{l}\t{$src|$dst}, {$dst|$src}",
+ [(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>;
+
+def LEA64_32r : I<0x8D, MRMSrcMem,
+ (outs GR32:$dst), (ins lea64_32mem:$src),
+ "lea{l}\t{$src|$dst}, {$dst|$src}",
+ [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
+
+let isReMaterializable = 1 in
+def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ "lea{q}\t{$src|$dst}, {$dst|$src}",
+ [(set GR64:$dst, lea64addr:$src)]>;
+
+
+
+//===----------------------------------------------------------------------===//
+// Fixed-Register Multiplication and Division Instructions.
+//
+
+// Extra precision multiplication
+
+// AL is really implied by AX, but the registers in Defs must match the
+// SDNode results (i8, i32).
+let Defs = [AL,EFLAGS,AX], Uses = [AL] in
+def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src",
+ // FIXME: Used for 8-bit mul, ignore result upper 8 bits.
+ // This probably ought to be moved to a def : Pat<> if the
+ // syntax can be accepted.
+ [(set AL, (mul AL, GR8:$src)),
+ (implicit EFLAGS)]>; // AL,AH = AL*GR8
+
+let Defs = [AX,DX,EFLAGS], Uses = [AX], neverHasSideEffects = 1 in
+def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src),
+ "mul{w}\t$src",
+ []>, OpSize; // AX,DX = AX*GR16
+
+let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], neverHasSideEffects = 1 in
+def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src),
+ "mul{l}\t$src", // EAX,EDX = EAX*GR32
+ [/*(set EAX, EDX, EFLAGS, (X86umul_flag EAX, GR32:$src))*/]>;
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
+def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
+ "mul{q}\t$src", // RAX,RDX = RAX*GR64
+ [/*(set RAX, RDX, EFLAGS, (X86umul_flag RAX, GR64:$src))*/]>;
+
+let Defs = [AL,EFLAGS,AX], Uses = [AL] in
+def MUL8m : I<0xF6, MRM4m, (outs), (ins i8mem :$src),
+ "mul{b}\t$src",
+ // FIXME: Used for 8-bit mul, ignore result upper 8 bits.
+ // This probably ought to be moved to a def : Pat<> if the
+ // syntax can be accepted.
+ [(set AL, (mul AL, (loadi8 addr:$src))),
+ (implicit EFLAGS)]>; // AL,AH = AL*[mem8]
+
+let mayLoad = 1, neverHasSideEffects = 1 in {
+let Defs = [AX,DX,EFLAGS], Uses = [AX] in
+def MUL16m : I<0xF7, MRM4m, (outs), (ins i16mem:$src),
+ "mul{w}\t$src",
+ []>, OpSize; // AX,DX = AX*[mem16]
+
+let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
+def MUL32m : I<0xF7, MRM4m, (outs), (ins i32mem:$src),
+ "mul{l}\t$src",
+ []>; // EAX,EDX = EAX*[mem32]
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
+def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
+ "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
+}
+
+let neverHasSideEffects = 1 in {
+let Defs = [AL,EFLAGS,AX], Uses = [AL] in
+def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", []>;
+ // AL,AH = AL*GR8
+let Defs = [AX,DX,EFLAGS], Uses = [AX] in
+def IMUL16r : I<0xF7, MRM5r, (outs), (ins GR16:$src), "imul{w}\t$src", []>,
+ OpSize; // AX,DX = AX*GR16
+let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
+def IMUL32r : I<0xF7, MRM5r, (outs), (ins GR32:$src), "imul{l}\t$src", []>;
+ // EAX,EDX = EAX*GR32
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
+def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src), "imul{q}\t$src", []>;
+ // RAX,RDX = RAX*GR64
+
+let mayLoad = 1 in {
+let Defs = [AL,EFLAGS,AX], Uses = [AL] in
+def IMUL8m : I<0xF6, MRM5m, (outs), (ins i8mem :$src),
+ "imul{b}\t$src", []>; // AL,AH = AL*[mem8]
+let Defs = [AX,DX,EFLAGS], Uses = [AX] in
+def IMUL16m : I<0xF7, MRM5m, (outs), (ins i16mem:$src),
+ "imul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16]
+let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
+def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src),
+ "imul{l}\t$src", []>; // EAX,EDX = EAX*[mem32]
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
+def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
+ "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
+}
+} // neverHasSideEffects
+
+
+let Defs = [EFLAGS] in {
+let Constraints = "$src1 = $dst" in {
+
+let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
+// Register-Register Signed Integer Multiply
+def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
+ "imul{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag GR16:$src1, GR16:$src2))]>, TB, OpSize;
+def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
+ "imul{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag GR32:$src1, GR32:$src2))]>, TB;
+def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2),
+ "imul{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag GR64:$src1, GR64:$src2))]>, TB;
+}
+
+// Register-Memory Signed Integer Multiply
+def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst),
+ (ins GR16:$src1, i16mem:$src2),
+ "imul{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag GR16:$src1, (load addr:$src2)))]>,
+ TB, OpSize;
+def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$src1, i32mem:$src2),
+ "imul{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag GR32:$src1, (load addr:$src2)))]>, TB;
+def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
+ (ins GR64:$src1, i64mem:$src2),
+ "imul{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
+} // Constraints = "$src1 = $dst"
+
+} // Defs = [EFLAGS]
+
+// Suprisingly enough, these are not two address instructions!
+let Defs = [EFLAGS] in {
+// Register-Integer Signed Integer Multiply
+def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
+ (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag GR16:$src1, imm:$src2))]>, OpSize;
+def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
+ (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
+ "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag GR16:$src1, i16immSExt8:$src2))]>,
+ OpSize;
+def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
+ (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag GR32:$src1, imm:$src2))]>;
+def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
+ (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
+ "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag GR32:$src1, i32immSExt8:$src2))]>;
+def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
+ (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag GR64:$src1, i64immSExt32:$src2))]>;
+def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
+ (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag GR64:$src1, i64immSExt8:$src2))]>;
+
+
+// Memory-Integer Signed Integer Multiply
+def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
+ (outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
+ "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1), imm:$src2))]>,
+ OpSize;
+def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
+ (outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
+ "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1),
+ i16immSExt8:$src2))]>, OpSize;
+def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
+ (outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
+ "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1), imm:$src2))]>;
+def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
+ (outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
+ "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1),
+ i32immSExt8:$src2))]>;
+def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
+ (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1),
+ i64immSExt32:$src2))]>;
+def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
+ (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1),
+ i64immSExt8:$src2))]>;
+} // Defs = [EFLAGS]
+
+
+
+
+// unsigned division/remainder
+let Defs = [AL,EFLAGS,AX], Uses = [AX] in
+def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
+ "div{b}\t$src", []>;
+let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
+def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
+ "div{w}\t$src", []>, OpSize;
+let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
+def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
+ "div{l}\t$src", []>;
+// RDX:RAX/r64 = RAX,RDX
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in
+def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
+ "div{q}\t$src", []>;
+
+let mayLoad = 1 in {
+let Defs = [AL,EFLAGS,AX], Uses = [AX] in
+def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
+ "div{b}\t$src", []>;
+let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
+def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
+ "div{w}\t$src", []>, OpSize;
+let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in // EDX:EAX/[mem32] = EAX,EDX
+def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src),
+ "div{l}\t$src", []>;
+// RDX:RAX/[mem64] = RAX,RDX
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in
+def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
+ "div{q}\t$src", []>;
+}
+
+// Signed division/remainder.
+let Defs = [AL,EFLAGS,AX], Uses = [AX] in
+def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
+ "idiv{b}\t$src", []>;
+let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
+def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
+ "idiv{w}\t$src", []>, OpSize;
+let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
+def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
+ "idiv{l}\t$src", []>;
+// RDX:RAX/r64 = RAX,RDX
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in
+def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
+ "idiv{q}\t$src", []>;
+
+let mayLoad = 1, mayLoad = 1 in {
+let Defs = [AL,EFLAGS,AX], Uses = [AX] in
+def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
+ "idiv{b}\t$src", []>;
+let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
+def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
+ "idiv{w}\t$src", []>, OpSize;
+let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in // EDX:EAX/[mem32] = EAX,EDX
+def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src),
+ "idiv{l}\t$src", []>;
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in // RDX:RAX/[mem64] = RAX,RDX
+def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
+ "idiv{q}\t$src", []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Two address Instructions.
+//
+
+// unary instructions
+let CodeSize = 2 in {
+let Defs = [EFLAGS] in {
+let Constraints = "$src1 = $dst" in {
+def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "neg{b}\t$dst",
+ [(set GR8:$dst, (ineg GR8:$src1)),
+ (implicit EFLAGS)]>;
+def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
+ "neg{w}\t$dst",
+ [(set GR16:$dst, (ineg GR16:$src1)),
+ (implicit EFLAGS)]>, OpSize;
+def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
+ "neg{l}\t$dst",
+ [(set GR32:$dst, (ineg GR32:$src1)),
+ (implicit EFLAGS)]>;
+def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src1), "neg{q}\t$dst",
+ [(set GR64:$dst, (ineg GR64:$src1)),
+ (implicit EFLAGS)]>;
+} // Constraints = "$src1 = $dst"
+
+def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst),
+ "neg{b}\t$dst",
+ [(store (ineg (loadi8 addr:$dst)), addr:$dst),
+ (implicit EFLAGS)]>;
+def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst),
+ "neg{w}\t$dst",
+ [(store (ineg (loadi16 addr:$dst)), addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
+def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst),
+ "neg{l}\t$dst",
+ [(store (ineg (loadi32 addr:$dst)), addr:$dst),
+ (implicit EFLAGS)]>;
+def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
+ [(store (ineg (loadi64 addr:$dst)), addr:$dst),
+ (implicit EFLAGS)]>;
+} // Defs = [EFLAGS]
+
+
+// Note: NOT does not set EFLAGS!
+
+let Constraints = "$src1 = $dst" in {
+// Match xor -1 to not. Favors these over a move imm + xor to save code size.
+let AddedComplexity = 15 in {
+def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "not{b}\t$dst",
+ [(set GR8:$dst, (not GR8:$src1))]>;
+def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
+ "not{w}\t$dst",
+ [(set GR16:$dst, (not GR16:$src1))]>, OpSize;
+def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
+ "not{l}\t$dst",
+ [(set GR32:$dst, (not GR32:$src1))]>;
+def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src1), "not{q}\t$dst",
+ [(set GR64:$dst, (not GR64:$src1))]>;
+}
+} // Constraints = "$src1 = $dst"
+
+def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst),
+ "not{b}\t$dst",
+ [(store (not (loadi8 addr:$dst)), addr:$dst)]>;
+def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst),
+ "not{w}\t$dst",
+ [(store (not (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
+def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst),
+ "not{l}\t$dst",
+ [(store (not (loadi32 addr:$dst)), addr:$dst)]>;
+def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
+ [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
+} // CodeSize
+
+// TODO: inc/dec is slow for P4, but fast for Pentium-M.
+let Defs = [EFLAGS] in {
+let Constraints = "$src1 = $dst" in {
+let CodeSize = 2 in
+def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "inc{b}\t$dst",
+ [(set GR8:$dst, EFLAGS, (X86inc_flag GR8:$src1))]>;
+
+let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
+def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
+ "inc{w}\t$dst",
+ [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))]>,
+ OpSize, Requires<[In32BitMode]>;
+def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
+ "inc{l}\t$dst",
+ [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))]>,
+ Requires<[In32BitMode]>;
+def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src1), "inc{q}\t$dst",
+ [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src1))]>;
+} // isConvertibleToThreeAddress = 1, CodeSize = 1
+
+
+// In 64-bit mode, single byte INC and DEC cannot be encoded.
+let isConvertibleToThreeAddress = 1, CodeSize = 2 in {
+// Can transform into LEA.
+def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
+ "inc{w}\t$dst",
+ [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))]>,
+ OpSize, Requires<[In64BitMode]>;
+def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
+ "inc{l}\t$dst",
+ [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))]>,
+ Requires<[In64BitMode]>;
+def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
+ "dec{w}\t$dst",
+ [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))]>,
+ OpSize, Requires<[In64BitMode]>;
+def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
+ "dec{l}\t$dst",
+ [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))]>,
+ Requires<[In64BitMode]>;
+} // isConvertibleToThreeAddress = 1, CodeSize = 2
+
+} // Constraints = "$src1 = $dst"
+
+let CodeSize = 2 in {
+ def INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), "inc{b}\t$dst",
+ [(store (add (loadi8 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>;
+ def INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In32BitMode]>;
+ def INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In32BitMode]>;
+ def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
+ [(store (add (loadi64 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>;
+
+// These are duplicates of their 32-bit counterparts. Only needed so X86 knows
+// how to unfold them.
+// FIXME: What is this for??
+def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In64BitMode]>;
+def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In64BitMode]>;
+def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In64BitMode]>;
+def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In64BitMode]>;
+} // CodeSize = 2
+
+let Constraints = "$src1 = $dst" in {
+let CodeSize = 2 in
+def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "dec{b}\t$dst",
+ [(set GR8:$dst, EFLAGS, (X86dec_flag GR8:$src1))]>;
+let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
+def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
+ "dec{w}\t$dst",
+ [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))]>,
+ OpSize, Requires<[In32BitMode]>;
+def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
+ "dec{l}\t$dst",
+ [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))]>,
+ Requires<[In32BitMode]>;
+def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src1), "dec{q}\t$dst",
+ [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src1))]>;
+} // CodeSize = 2
+} // Constraints = "$src1 = $dst"
+
+
+let CodeSize = 2 in {
+ def DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), "dec{b}\t$dst",
+ [(store (add (loadi8 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>;
+ def DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In32BitMode]>;
+ def DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In32BitMode]>;
+ def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
+ [(store (add (loadi64 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>;
+} // CodeSize = 2
+} // Defs = [EFLAGS]
+
+
+/// X86TypeInfo - This is a bunch of information that describes relevant X86
+/// information about value types. For example, it can tell you what the
+/// register class and preferred load to use.
+class X86TypeInfo<ValueType vt, string instrsuffix, RegisterClass regclass,
+ PatFrag loadnode, X86MemOperand memoperand, ImmType immkind,
+ Operand immoperand, SDPatternOperator immoperator,
+ Operand imm8operand, SDPatternOperator imm8operator,
+ bit hasOddOpcode, bit hasOpSizePrefix, bit hasREX_WPrefix> {
+ /// VT - This is the value type itself.
+ ValueType VT = vt;
+
+ /// InstrSuffix - This is the suffix used on instructions with this type. For
+ /// example, i8 -> "b", i16 -> "w", i32 -> "l", i64 -> "q".
+ string InstrSuffix = instrsuffix;
+
+ /// RegClass - This is the register class associated with this type. For
+ /// example, i8 -> GR8, i16 -> GR16, i32 -> GR32, i64 -> GR64.
+ RegisterClass RegClass = regclass;
+
+ /// LoadNode - This is the load node associated with this type. For
+ /// example, i8 -> loadi8, i16 -> loadi16, i32 -> loadi32, i64 -> loadi64.
+ PatFrag LoadNode = loadnode;
+
+ /// MemOperand - This is the memory operand associated with this type. For
+ /// example, i8 -> i8mem, i16 -> i16mem, i32 -> i32mem, i64 -> i64mem.
+ X86MemOperand MemOperand = memoperand;
+
+ /// ImmEncoding - This is the encoding of an immediate of this type. For
+ /// example, i8 -> Imm8, i16 -> Imm16, i32 -> Imm32. Note that i64 -> Imm32
+ /// since the immediate fields of i64 instructions is a 32-bit sign extended
+ /// value.
+ ImmType ImmEncoding = immkind;
+
+ /// ImmOperand - This is the operand kind of an immediate of this type. For
+ /// example, i8 -> i8imm, i16 -> i16imm, i32 -> i32imm. Note that i64 ->
+ /// i64i32imm since the immediate fields of i64 instructions is a 32-bit sign
+ /// extended value.
+ Operand ImmOperand = immoperand;
+
+ /// ImmOperator - This is the operator that should be used to match an
+ /// immediate of this kind in a pattern (e.g. imm, or i64immSExt32).
+ SDPatternOperator ImmOperator = immoperator;
+
+ /// Imm8Operand - This is the operand kind to use for an imm8 of this type.
+ /// For example, i8 -> <invalid>, i16 -> i16i8imm, i32 -> i32i8imm. This is
+ /// only used for instructions that have a sign-extended imm8 field form.
+ Operand Imm8Operand = imm8operand;
+
+ /// Imm8Operator - This is the operator that should be used to match an 8-bit
+ /// sign extended immediate of this kind in a pattern (e.g. imm16immSExt8).
+ SDPatternOperator Imm8Operator = imm8operator;
+
+ /// HasOddOpcode - This bit is true if the instruction should have an odd (as
+ /// opposed to even) opcode. Operations on i8 are usually even, operations on
+ /// other datatypes are odd.
+ bit HasOddOpcode = hasOddOpcode;
+
+ /// HasOpSizePrefix - This bit is set to true if the instruction should have
+ /// the 0x66 operand size prefix. This is set for i16 types.
+ bit HasOpSizePrefix = hasOpSizePrefix;
+
+ /// HasREX_WPrefix - This bit is set to true if the instruction should have
+ /// the 0x40 REX prefix. This is set for i64 types.
+ bit HasREX_WPrefix = hasREX_WPrefix;
+}
+
+def invalid_node : SDNode<"<<invalid_node>>", SDTIntLeaf,[],"<<invalid_node>>">;
+
+
+def Xi8 : X86TypeInfo<i8 , "b", GR8 , loadi8 , i8mem ,
+ Imm8 , i8imm , imm, i8imm , invalid_node,
+ 0, 0, 0>;
+def Xi16 : X86TypeInfo<i16, "w", GR16, loadi16, i16mem,
+ Imm16, i16imm, imm, i16i8imm, i16immSExt8,
+ 1, 1, 0>;
+def Xi32 : X86TypeInfo<i32, "l", GR32, loadi32, i32mem,
+ Imm32, i32imm, imm, i32i8imm, i32immSExt8,
+ 1, 0, 0>;
+def Xi64 : X86TypeInfo<i64, "q", GR64, loadi64, i64mem,
+ Imm32, i64i32imm, i64immSExt32, i64i8imm, i64immSExt8,
+ 1, 0, 1>;
+
+/// ITy - This instruction base class takes the type info for the instruction.
+/// Using this, it:
+/// 1. Concatenates together the instruction mnemonic with the appropriate
+/// suffix letter, a tab, and the arguments.
+/// 2. Infers whether the instruction should have a 0x66 prefix byte.
+/// 3. Infers whether the instruction should have a 0x40 REX_W prefix.
+/// 4. Infers whether the low bit of the opcode should be 0 (for i8 operations)
+/// or 1 (for i16,i32,i64 operations).
+class ITy<bits<8> opcode, Format f, X86TypeInfo typeinfo, dag outs, dag ins,
+ string mnemonic, string args, list<dag> pattern>
+ : I<{opcode{7}, opcode{6}, opcode{5}, opcode{4},
+ opcode{3}, opcode{2}, opcode{1}, typeinfo.HasOddOpcode },
+ f, outs, ins,
+ !strconcat(mnemonic, "{", typeinfo.InstrSuffix, "}\t", args), pattern> {
+
+ // Infer instruction prefixes from type info.
+ let hasOpSizePrefix = typeinfo.HasOpSizePrefix;
+ let hasREX_WPrefix = typeinfo.HasREX_WPrefix;
+}
+
+// BinOpRR - Instructions like "add reg, reg, reg".
+class BinOpRR<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ dag outlist, list<dag> pattern, Format f = MRMDestReg>
+ : ITy<opcode, f, typeinfo, outlist,
+ (ins typeinfo.RegClass:$src1, typeinfo.RegClass:$src2),
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern>;
+
+// BinOpRR_R - Instructions like "add reg, reg, reg", where the pattern has
+// just a regclass (no eflags) as a result.
+class BinOpRR_R<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode>
+ : BinOpRR<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst,
+ (opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2))]>;
+
+// BinOpRR_F - Instructions like "cmp reg, Reg", where the pattern has
+// just a EFLAGS as a result.
+class BinOpRR_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDPatternOperator opnode, Format f = MRMDestReg>
+ : BinOpRR<opcode, mnemonic, typeinfo, (outs),
+ [(set EFLAGS,
+ (opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2))],
+ f>;
+
+// BinOpRR_RF - Instructions like "add reg, reg, reg", where the pattern has
+// both a regclass and EFLAGS as a result.
+class BinOpRR_RF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode>
+ : BinOpRR<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst, EFLAGS,
+ (opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2))]>;
+
+// BinOpRR_RFF - Instructions like "adc reg, reg, reg", where the pattern has
+// both a regclass and EFLAGS as a result, and has EFLAGS as input.
+class BinOpRR_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode>
+ : BinOpRR<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst, EFLAGS,
+ (opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2,
+ EFLAGS))]>;
+
+// BinOpRR_Rev - Instructions like "add reg, reg, reg" (reversed encoding).
+class BinOpRR_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo>
+ : ITy<opcode, MRMSrcReg, typeinfo,
+ (outs typeinfo.RegClass:$dst),
+ (ins typeinfo.RegClass:$src1, typeinfo.RegClass:$src2),
+ mnemonic, "{$src2, $dst|$dst, $src2}", []> {
+ // The disassembler should know about this, but not the asmparser.
+ let isCodeGenOnly = 1;
+}
+
+// BinOpRM - Instructions like "add reg, reg, [mem]".
+class BinOpRM<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ dag outlist, list<dag> pattern>
+ : ITy<opcode, MRMSrcMem, typeinfo, outlist,
+ (ins typeinfo.RegClass:$src1, typeinfo.MemOperand:$src2),
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern>;
+
+// BinOpRM_R - Instructions like "add reg, reg, [mem]".
+class BinOpRM_R<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode>
+ : BinOpRM<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst,
+ (opnode typeinfo.RegClass:$src1, (typeinfo.LoadNode addr:$src2)))]>;
+
+// BinOpRM_F - Instructions like "cmp reg, [mem]".
+class BinOpRM_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDPatternOperator opnode>
+ : BinOpRM<opcode, mnemonic, typeinfo, (outs),
+ [(set EFLAGS,
+ (opnode typeinfo.RegClass:$src1, (typeinfo.LoadNode addr:$src2)))]>;
+
+// BinOpRM_RF - Instructions like "add reg, reg, [mem]".
+class BinOpRM_RF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode>
+ : BinOpRM<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst, EFLAGS,
+ (opnode typeinfo.RegClass:$src1, (typeinfo.LoadNode addr:$src2)))]>;
+
+// BinOpRM_RFF - Instructions like "adc reg, reg, [mem]".
+class BinOpRM_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode>
+ : BinOpRM<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst, EFLAGS,
+ (opnode typeinfo.RegClass:$src1, (typeinfo.LoadNode addr:$src2),
+ EFLAGS))]>;
+
+// BinOpRI - Instructions like "add reg, reg, imm".
+class BinOpRI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ Format f, dag outlist, list<dag> pattern>
+ : ITy<opcode, f, typeinfo, outlist,
+ (ins typeinfo.RegClass:$src1, typeinfo.ImmOperand:$src2),
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern> {
+ let ImmT = typeinfo.ImmEncoding;
+}
+
+// BinOpRI_R - Instructions like "add reg, reg, imm".
+class BinOpRI_R<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpRI<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst,
+ (opnode typeinfo.RegClass:$src1, typeinfo.ImmOperator:$src2))]>;
+
+// BinOpRI_F - Instructions like "cmp reg, imm".
+class BinOpRI_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDPatternOperator opnode, Format f>
+ : BinOpRI<opcode, mnemonic, typeinfo, f, (outs),
+ [(set EFLAGS,
+ (opnode typeinfo.RegClass:$src1, typeinfo.ImmOperator:$src2))]>;
+
+// BinOpRI_RF - Instructions like "add reg, reg, imm".
+class BinOpRI_RF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpRI<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst, EFLAGS,
+ (opnode typeinfo.RegClass:$src1, typeinfo.ImmOperator:$src2))]>;
+
+// BinOpRI_RFF - Instructions like "adc reg, reg, imm".
+class BinOpRI_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpRI<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst, EFLAGS,
+ (opnode typeinfo.RegClass:$src1, typeinfo.ImmOperator:$src2,
+ EFLAGS))]>;
+
+// BinOpRI8 - Instructions like "add reg, reg, imm8".
+class BinOpRI8<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ Format f, dag outlist, list<dag> pattern>
+ : ITy<opcode, f, typeinfo, outlist,
+ (ins typeinfo.RegClass:$src1, typeinfo.Imm8Operand:$src2),
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern> {
+ let ImmT = Imm8; // Always 8-bit immediate.
+}
+
+// BinOpRI8_R - Instructions like "add reg, reg, imm8".
+class BinOpRI8_R<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst,
+ (opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2))]>;
+
+// BinOpRI8_F - Instructions like "cmp reg, imm8".
+class BinOpRI8_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs),
+ [(set EFLAGS,
+ (opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2))]>;
+
+// BinOpRI8_RF - Instructions like "add reg, reg, imm8".
+class BinOpRI8_RF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst, EFLAGS,
+ (opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2))]>;
+
+// BinOpRI8_RFF - Instructions like "adc reg, reg, imm8".
+class BinOpRI8_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
+ [(set typeinfo.RegClass:$dst, EFLAGS,
+ (opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2,
+ EFLAGS))]>;
+
+// BinOpMR - Instructions like "add [mem], reg".
+class BinOpMR<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ list<dag> pattern>
+ : ITy<opcode, MRMDestMem, typeinfo,
+ (outs), (ins typeinfo.MemOperand:$dst, typeinfo.RegClass:$src),
+ mnemonic, "{$src, $dst|$dst, $src}", pattern>;
+
+// BinOpMR_RMW - Instructions like "add [mem], reg".
+class BinOpMR_RMW<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode>
+ : BinOpMR<opcode, mnemonic, typeinfo,
+ [(store (opnode (load addr:$dst), typeinfo.RegClass:$src), addr:$dst),
+ (implicit EFLAGS)]>;
+
+// BinOpMR_RMW_FF - Instructions like "adc [mem], reg".
+class BinOpMR_RMW_FF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode>
+ : BinOpMR<opcode, mnemonic, typeinfo,
+ [(store (opnode (load addr:$dst), typeinfo.RegClass:$src, EFLAGS),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+
+// BinOpMR_F - Instructions like "cmp [mem], reg".
+class BinOpMR_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode>
+ : BinOpMR<opcode, mnemonic, typeinfo,
+ [(set EFLAGS, (opnode (load addr:$dst), typeinfo.RegClass:$src))]>;
+
+// BinOpMI - Instructions like "add [mem], imm".
+class BinOpMI<string mnemonic, X86TypeInfo typeinfo,
+ Format f, list<dag> pattern, bits<8> opcode = 0x80>
+ : ITy<opcode, f, typeinfo,
+ (outs), (ins typeinfo.MemOperand:$dst, typeinfo.ImmOperand:$src),
+ mnemonic, "{$src, $dst|$dst, $src}", pattern> {
+ let ImmT = typeinfo.ImmEncoding;
+}
+
+// BinOpMI_RMW - Instructions like "add [mem], imm".
+class BinOpMI_RMW<string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpMI<mnemonic, typeinfo, f,
+ [(store (opnode (typeinfo.VT (load addr:$dst)),
+ typeinfo.ImmOperator:$src), addr:$dst),
+ (implicit EFLAGS)]>;
+
+// BinOpMI_RMW_FF - Instructions like "adc [mem], imm".
+class BinOpMI_RMW_FF<string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpMI<mnemonic, typeinfo, f,
+ [(store (opnode (typeinfo.VT (load addr:$dst)),
+ typeinfo.ImmOperator:$src, EFLAGS), addr:$dst),
+ (implicit EFLAGS)]>;
+
+// BinOpMI_F - Instructions like "cmp [mem], imm".
+class BinOpMI_F<string mnemonic, X86TypeInfo typeinfo,
+ SDPatternOperator opnode, Format f, bits<8> opcode = 0x80>
+ : BinOpMI<mnemonic, typeinfo, f,
+ [(set EFLAGS, (opnode (typeinfo.VT (load addr:$dst)),
+ typeinfo.ImmOperator:$src))],
+ opcode>;
+
+// BinOpMI8 - Instructions like "add [mem], imm8".
+class BinOpMI8<string mnemonic, X86TypeInfo typeinfo,
+ Format f, list<dag> pattern>
+ : ITy<0x82, f, typeinfo,
+ (outs), (ins typeinfo.MemOperand:$dst, typeinfo.Imm8Operand:$src),
+ mnemonic, "{$src, $dst|$dst, $src}", pattern> {
+ let ImmT = Imm8; // Always 8-bit immediate.
+}
+
+// BinOpMI8_RMW - Instructions like "add [mem], imm8".
+class BinOpMI8_RMW<string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpMI8<mnemonic, typeinfo, f,
+ [(store (opnode (load addr:$dst),
+ typeinfo.Imm8Operator:$src), addr:$dst),
+ (implicit EFLAGS)]>;
+
+// BinOpMI8_RMW_FF - Instructions like "adc [mem], imm8".
+class BinOpMI8_RMW_FF<string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpMI8<mnemonic, typeinfo, f,
+ [(store (opnode (load addr:$dst),
+ typeinfo.Imm8Operator:$src, EFLAGS), addr:$dst),
+ (implicit EFLAGS)]>;
+
+// BinOpMI8_F - Instructions like "cmp [mem], imm8".
+class BinOpMI8_F<string mnemonic, X86TypeInfo typeinfo,
+ SDNode opnode, Format f>
+ : BinOpMI8<mnemonic, typeinfo, f,
+ [(set EFLAGS, (opnode (load addr:$dst),
+ typeinfo.Imm8Operator:$src))]>;
+
+// BinOpAI - Instructions like "add %eax, %eax, imm".
+class BinOpAI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
+ Register areg>
+ : ITy<opcode, RawFrm, typeinfo,
+ (outs), (ins typeinfo.ImmOperand:$src),
+ mnemonic, !strconcat("{$src, %", areg.AsmName, "|%",
+ areg.AsmName, ", $src}"), []> {
+ let ImmT = typeinfo.ImmEncoding;
+ let Uses = [areg];
+ let Defs = [areg];
+}
+
+/// ArithBinOp_RF - This is an arithmetic binary operator where the pattern is
+/// defined with "(set GPR:$dst, EFLAGS, (...".
+///
+/// It would be nice to get rid of the second and third argument here, but
+/// tblgen can't handle dependent type references aggressively enough: PR8330
+multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
+ string mnemonic, Format RegMRM, Format MemMRM,
+ SDNode opnodeflag, SDNode opnode,
+ bit CommutableRR, bit ConvertibleToThreeAddress> {
+ let Defs = [EFLAGS] in {
+ let Constraints = "$src1 = $dst" in {
+ let isCommutable = CommutableRR,
+ isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ def #NAME#8rr : BinOpRR_RF<BaseOpc, mnemonic, Xi8 , opnodeflag>;
+ def #NAME#16rr : BinOpRR_RF<BaseOpc, mnemonic, Xi16, opnodeflag>;
+ def #NAME#32rr : BinOpRR_RF<BaseOpc, mnemonic, Xi32, opnodeflag>;
+ def #NAME#64rr : BinOpRR_RF<BaseOpc, mnemonic, Xi64, opnodeflag>;
+ } // isCommutable
+
+ def #NAME#8rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi8>;
+ def #NAME#16rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi16>;
+ def #NAME#32rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi32>;
+ def #NAME#64rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi64>;
+
+ def #NAME#8rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi8 , opnodeflag>;
+ def #NAME#16rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi16, opnodeflag>;
+ def #NAME#32rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi32, opnodeflag>;
+ def #NAME#64rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi64, opnodeflag>;
+
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ // NOTE: These are order specific, we want the ri8 forms to be listed
+ // first so that they are slightly preferred to the ri forms.
+ def #NAME#16ri8 : BinOpRI8_RF<0x82, mnemonic, Xi16, opnodeflag, RegMRM>;
+ def #NAME#32ri8 : BinOpRI8_RF<0x82, mnemonic, Xi32, opnodeflag, RegMRM>;
+ def #NAME#64ri8 : BinOpRI8_RF<0x82, mnemonic, Xi64, opnodeflag, RegMRM>;
+
+ def #NAME#8ri : BinOpRI_RF<0x80, mnemonic, Xi8 , opnodeflag, RegMRM>;
+ def #NAME#16ri : BinOpRI_RF<0x80, mnemonic, Xi16, opnodeflag, RegMRM>;
+ def #NAME#32ri : BinOpRI_RF<0x80, mnemonic, Xi32, opnodeflag, RegMRM>;
+ def #NAME#64ri32: BinOpRI_RF<0x80, mnemonic, Xi64, opnodeflag, RegMRM>;
+ }
+ } // Constraints = "$src1 = $dst"
+
+ def #NAME#8mr : BinOpMR_RMW<BaseOpc, mnemonic, Xi8 , opnode>;
+ def #NAME#16mr : BinOpMR_RMW<BaseOpc, mnemonic, Xi16, opnode>;
+ def #NAME#32mr : BinOpMR_RMW<BaseOpc, mnemonic, Xi32, opnode>;
+ def #NAME#64mr : BinOpMR_RMW<BaseOpc, mnemonic, Xi64, opnode>;
+
+ // NOTE: These are order specific, we want the mi8 forms to be listed
+ // first so that they are slightly preferred to the mi forms.
+ def #NAME#16mi8 : BinOpMI8_RMW<mnemonic, Xi16, opnode, MemMRM>;
+ def #NAME#32mi8 : BinOpMI8_RMW<mnemonic, Xi32, opnode, MemMRM>;
+ def #NAME#64mi8 : BinOpMI8_RMW<mnemonic, Xi64, opnode, MemMRM>;
+
+ def #NAME#8mi : BinOpMI_RMW<mnemonic, Xi8 , opnode, MemMRM>;
+ def #NAME#16mi : BinOpMI_RMW<mnemonic, Xi16, opnode, MemMRM>;
+ def #NAME#32mi : BinOpMI_RMW<mnemonic, Xi32, opnode, MemMRM>;
+ def #NAME#64mi32 : BinOpMI_RMW<mnemonic, Xi64, opnode, MemMRM>;
+
+ def #NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL>;
+ def #NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX>;
+ def #NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX>;
+ def #NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX>;
+ }
+}
+
+/// ArithBinOp_RFF - This is an arithmetic binary operator where the pattern is
+/// defined with "(set GPR:$dst, EFLAGS, (node LHS, RHS, EFLAGS))" like ADC and
+/// SBB.
+///
+/// It would be nice to get rid of the second and third argument here, but
+/// tblgen can't handle dependent type references aggressively enough: PR8330
+multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
+ string mnemonic, Format RegMRM, Format MemMRM,
+ SDNode opnode, bit CommutableRR,
+ bit ConvertibleToThreeAddress> {
+ let Defs = [EFLAGS] in {
+ let Constraints = "$src1 = $dst" in {
+ let isCommutable = CommutableRR,
+ isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ def #NAME#8rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi8 , opnode>;
+ def #NAME#16rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi16, opnode>;
+ def #NAME#32rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi32, opnode>;
+ def #NAME#64rr : BinOpRR_RFF<BaseOpc, mnemonic, Xi64, opnode>;
+ } // isCommutable
+
+ def #NAME#8rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi8>;
+ def #NAME#16rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi16>;
+ def #NAME#32rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi32>;
+ def #NAME#64rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi64>;
+
+ def #NAME#8rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi8 , opnode>;
+ def #NAME#16rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi16, opnode>;
+ def #NAME#32rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi32, opnode>;
+ def #NAME#64rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi64, opnode>;
+
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ // NOTE: These are order specific, we want the ri8 forms to be listed
+ // first so that they are slightly preferred to the ri forms.
+ def #NAME#16ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi16, opnode, RegMRM>;
+ def #NAME#32ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi32, opnode, RegMRM>;
+ def #NAME#64ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi64, opnode, RegMRM>;
+
+ def #NAME#8ri : BinOpRI_RFF<0x80, mnemonic, Xi8 , opnode, RegMRM>;
+ def #NAME#16ri : BinOpRI_RFF<0x80, mnemonic, Xi16, opnode, RegMRM>;
+ def #NAME#32ri : BinOpRI_RFF<0x80, mnemonic, Xi32, opnode, RegMRM>;
+ def #NAME#64ri32: BinOpRI_RFF<0x80, mnemonic, Xi64, opnode, RegMRM>;
+ }
+ } // Constraints = "$src1 = $dst"
+
+ def #NAME#8mr : BinOpMR_RMW_FF<BaseOpc, mnemonic, Xi8 , opnode>;
+ def #NAME#16mr : BinOpMR_RMW_FF<BaseOpc, mnemonic, Xi16, opnode>;
+ def #NAME#32mr : BinOpMR_RMW_FF<BaseOpc, mnemonic, Xi32, opnode>;
+ def #NAME#64mr : BinOpMR_RMW_FF<BaseOpc, mnemonic, Xi64, opnode>;
+
+ // NOTE: These are order specific, we want the mi8 forms to be listed
+ // first so that they are slightly preferred to the mi forms.
+ def #NAME#16mi8 : BinOpMI8_RMW_FF<mnemonic, Xi16, opnode, MemMRM>;
+ def #NAME#32mi8 : BinOpMI8_RMW_FF<mnemonic, Xi32, opnode, MemMRM>;
+ def #NAME#64mi8 : BinOpMI8_RMW_FF<mnemonic, Xi64, opnode, MemMRM>;
+
+ def #NAME#8mi : BinOpMI_RMW_FF<mnemonic, Xi8 , opnode, MemMRM>;
+ def #NAME#16mi : BinOpMI_RMW_FF<mnemonic, Xi16, opnode, MemMRM>;
+ def #NAME#32mi : BinOpMI_RMW_FF<mnemonic, Xi32, opnode, MemMRM>;
+ def #NAME#64mi32 : BinOpMI_RMW_FF<mnemonic, Xi64, opnode, MemMRM>;
+
+ def #NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL>;
+ def #NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX>;
+ def #NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX>;
+ def #NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX>;
+ }
+}
+
+/// ArithBinOp_F - This is an arithmetic binary operator where the pattern is
+/// defined with "(set EFLAGS, (...". It would be really nice to find a way
+/// to factor this with the other ArithBinOp_*.
+///
+multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
+ string mnemonic, Format RegMRM, Format MemMRM,
+ SDNode opnode,
+ bit CommutableRR, bit ConvertibleToThreeAddress> {
+ let Defs = [EFLAGS] in {
+ let isCommutable = CommutableRR,
+ isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ def #NAME#8rr : BinOpRR_F<BaseOpc, mnemonic, Xi8 , opnode>;
+ def #NAME#16rr : BinOpRR_F<BaseOpc, mnemonic, Xi16, opnode>;
+ def #NAME#32rr : BinOpRR_F<BaseOpc, mnemonic, Xi32, opnode>;
+ def #NAME#64rr : BinOpRR_F<BaseOpc, mnemonic, Xi64, opnode>;
+ } // isCommutable
+
+ def #NAME#8rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi8>;
+ def #NAME#16rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi16>;
+ def #NAME#32rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi32>;
+ def #NAME#64rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi64>;
+
+ def #NAME#8rm : BinOpRM_F<BaseOpc2, mnemonic, Xi8 , opnode>;
+ def #NAME#16rm : BinOpRM_F<BaseOpc2, mnemonic, Xi16, opnode>;
+ def #NAME#32rm : BinOpRM_F<BaseOpc2, mnemonic, Xi32, opnode>;
+ def #NAME#64rm : BinOpRM_F<BaseOpc2, mnemonic, Xi64, opnode>;
+
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ // NOTE: These are order specific, we want the ri8 forms to be listed
+ // first so that they are slightly preferred to the ri forms.
+ def #NAME#16ri8 : BinOpRI8_F<0x82, mnemonic, Xi16, opnode, RegMRM>;
+ def #NAME#32ri8 : BinOpRI8_F<0x82, mnemonic, Xi32, opnode, RegMRM>;
+ def #NAME#64ri8 : BinOpRI8_F<0x82, mnemonic, Xi64, opnode, RegMRM>;
+
+ def #NAME#8ri : BinOpRI_F<0x80, mnemonic, Xi8 , opnode, RegMRM>;
+ def #NAME#16ri : BinOpRI_F<0x80, mnemonic, Xi16, opnode, RegMRM>;
+ def #NAME#32ri : BinOpRI_F<0x80, mnemonic, Xi32, opnode, RegMRM>;
+ def #NAME#64ri32: BinOpRI_F<0x80, mnemonic, Xi64, opnode, RegMRM>;
+ }
+
+ def #NAME#8mr : BinOpMR_F<BaseOpc, mnemonic, Xi8 , opnode>;
+ def #NAME#16mr : BinOpMR_F<BaseOpc, mnemonic, Xi16, opnode>;
+ def #NAME#32mr : BinOpMR_F<BaseOpc, mnemonic, Xi32, opnode>;
+ def #NAME#64mr : BinOpMR_F<BaseOpc, mnemonic, Xi64, opnode>;
+
+ // NOTE: These are order specific, we want the mi8 forms to be listed
+ // first so that they are slightly preferred to the mi forms.
+ def #NAME#16mi8 : BinOpMI8_F<mnemonic, Xi16, opnode, MemMRM>;
+ def #NAME#32mi8 : BinOpMI8_F<mnemonic, Xi32, opnode, MemMRM>;
+ def #NAME#64mi8 : BinOpMI8_F<mnemonic, Xi64, opnode, MemMRM>;
+
+ def #NAME#8mi : BinOpMI_F<mnemonic, Xi8 , opnode, MemMRM>;
+ def #NAME#16mi : BinOpMI_F<mnemonic, Xi16, opnode, MemMRM>;
+ def #NAME#32mi : BinOpMI_F<mnemonic, Xi32, opnode, MemMRM>;
+ def #NAME#64mi32 : BinOpMI_F<mnemonic, Xi64, opnode, MemMRM>;
+
+ def #NAME#8i8 : BinOpAI<BaseOpc4, mnemonic, Xi8 , AL>;
+ def #NAME#16i16 : BinOpAI<BaseOpc4, mnemonic, Xi16, AX>;
+ def #NAME#32i32 : BinOpAI<BaseOpc4, mnemonic, Xi32, EAX>;
+ def #NAME#64i32 : BinOpAI<BaseOpc4, mnemonic, Xi64, RAX>;
+ }
+}
+
+
+defm AND : ArithBinOp_RF<0x20, 0x22, 0x24, "and", MRM4r, MRM4m,
+ X86and_flag, and, 1, 0>;
+defm OR : ArithBinOp_RF<0x08, 0x0A, 0x0C, "or", MRM1r, MRM1m,
+ X86or_flag, or, 1, 0>;
+defm XOR : ArithBinOp_RF<0x30, 0x32, 0x34, "xor", MRM6r, MRM6m,
+ X86xor_flag, xor, 1, 0>;
+defm ADD : ArithBinOp_RF<0x00, 0x02, 0x04, "add", MRM0r, MRM0m,
+ X86add_flag, add, 1, 1>;
+defm SUB : ArithBinOp_RF<0x28, 0x2A, 0x2C, "sub", MRM5r, MRM5m,
+ X86sub_flag, sub, 0, 0>;
+
+// Arithmetic.
+let Uses = [EFLAGS] in {
+ defm ADC : ArithBinOp_RFF<0x10, 0x12, 0x14, "adc", MRM2r, MRM2m, X86adc_flag,
+ 1, 0>;
+ defm SBB : ArithBinOp_RFF<0x18, 0x1A, 0x1C, "sbb", MRM3r, MRM3m, X86sbb_flag,
+ 0, 0>;
+}
+
+defm CMP : ArithBinOp_F<0x38, 0x3A, 0x3C, "cmp", MRM7r, MRM7m, X86cmp, 0, 0>;
+
+
+//===----------------------------------------------------------------------===//
+// Semantically, test instructions are similar like AND, except they don't
+// generate a result. From an encoding perspective, they are very different:
+// they don't have all the usual imm8 and REV forms, and are encoded into a
+// different space.
+def X86testpat : PatFrag<(ops node:$lhs, node:$rhs),
+ (X86cmp (and_su node:$lhs, node:$rhs), 0)>;
+
+let Defs = [EFLAGS] in {
+ let isCommutable = 1 in {
+ def TEST8rr : BinOpRR_F<0x84, "test", Xi8 , X86testpat, MRMSrcReg>;
+ def TEST16rr : BinOpRR_F<0x84, "test", Xi16, X86testpat, MRMSrcReg>;
+ def TEST32rr : BinOpRR_F<0x84, "test", Xi32, X86testpat, MRMSrcReg>;
+ def TEST64rr : BinOpRR_F<0x84, "test", Xi64, X86testpat, MRMSrcReg>;
+ } // isCommutable
+
+ def TEST8rm : BinOpRM_F<0x84, "test", Xi8 , X86testpat>;
+ def TEST16rm : BinOpRM_F<0x84, "test", Xi16, X86testpat>;
+ def TEST32rm : BinOpRM_F<0x84, "test", Xi32, X86testpat>;
+ def TEST64rm : BinOpRM_F<0x84, "test", Xi64, X86testpat>;
+
+ def TEST8ri : BinOpRI_F<0xF6, "test", Xi8 , X86testpat, MRM0r>;
+ def TEST16ri : BinOpRI_F<0xF6, "test", Xi16, X86testpat, MRM0r>;
+ def TEST32ri : BinOpRI_F<0xF6, "test", Xi32, X86testpat, MRM0r>;
+ def TEST64ri32 : BinOpRI_F<0xF6, "test", Xi64, X86testpat, MRM0r>;
+
+ def TEST8mi : BinOpMI_F<"test", Xi8 , X86testpat, MRM0m, 0xF6>;
+ def TEST16mi : BinOpMI_F<"test", Xi16, X86testpat, MRM0m, 0xF6>;
+ def TEST32mi : BinOpMI_F<"test", Xi32, X86testpat, MRM0m, 0xF6>;
+ def TEST64mi32 : BinOpMI_F<"test", Xi64, X86testpat, MRM0m, 0xF6>;
+
+ def TEST8i8 : BinOpAI<0xA8, "test", Xi8 , AL>;
+ def TEST16i16 : BinOpAI<0xA8, "test", Xi16, AX>;
+ def TEST32i32 : BinOpAI<0xA8, "test", Xi32, EAX>;
+ def TEST64i32 : BinOpAI<0xA8, "test", Xi64, RAX>;
+}
+
diff --git a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
index 2a6a71d..1ea8071 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
@@ -56,6 +56,31 @@ struct X86AddressMode {
: BaseType(RegBase), Scale(1), IndexReg(0), Disp(0), GV(0), GVOpFlags(0) {
Base.Reg = 0;
}
+
+
+ void getFullAddress(SmallVectorImpl<MachineOperand> &MO) {
+ assert(Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8);
+
+ if (BaseType == X86AddressMode::RegBase)
+ MO.push_back(MachineOperand::CreateReg(Base.Reg, false, false,
+ false, false, false, 0, false));
+ else {
+ assert(BaseType == X86AddressMode::FrameIndexBase);
+ MO.push_back(MachineOperand::CreateFI(Base.FrameIndex));
+ }
+
+ MO.push_back(MachineOperand::CreateImm(Scale));
+ MO.push_back(MachineOperand::CreateReg(IndexReg, false, false,
+ false, false, false, 0, false));
+
+ if (GV)
+ MO.push_back(MachineOperand::CreateGA(GV, Disp, GVOpFlags));
+ else
+ MO.push_back(MachineOperand::CreateImm(Disp));
+
+ MO.push_back(MachineOperand::CreateReg(0, false, false,
+ false, false, false, 0, false));
+ }
};
/// addDirectMem - This function is used to add a direct memory reference to the
@@ -101,10 +126,11 @@ addFullAddress(const MachineInstrBuilder &MIB,
if (AM.BaseType == X86AddressMode::RegBase)
MIB.addReg(AM.Base.Reg);
- else if (AM.BaseType == X86AddressMode::FrameIndexBase)
+ else {
+ assert(AM.BaseType == X86AddressMode::FrameIndexBase);
MIB.addFrameIndex(AM.Base.FrameIndex);
- else
- assert (0);
+ }
+
MIB.addImm(AM.Scale).addReg(AM.IndexReg);
if (AM.GV)
MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
@@ -131,9 +157,8 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
if (TID.mayStore())
Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- Flags, Offset,
- MFI.getObjectSize(FI),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI, Offset),
+ Flags, MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
return addOffset(MIB.addFrameIndex(FI), Offset)
.addMemOperand(MMO);
diff --git a/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td b/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td
new file mode 100644
index 0000000..3a43b22
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td
@@ -0,0 +1,104 @@
+//===- X86InstrCMovSetCC.td - Conditional Move and SetCC ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the X86 conditional move and set on condition
+// instructions.
+//
+//===----------------------------------------------------------------------===//
+
+
+// SetCC instructions.
+multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
+ let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
+ isCommutable = 1 in {
+ def #NAME#16rr
+ : I<opc, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
+ [(set GR16:$dst,
+ (X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))]>,TB,OpSize;
+ def #NAME#32rr
+ : I<opc, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
+ [(set GR32:$dst,
+ (X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))]>, TB;
+ def #NAME#64rr
+ :RI<opc, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
+ [(set GR64:$dst,
+ (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))]>, TB;
+ }
+
+ let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst" in {
+ def #NAME#16rm
+ : I<opc, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
+ [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
+ CondNode, EFLAGS))]>, TB, OpSize;
+ def #NAME#32rm
+ : I<opc, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
+ [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
+ CondNode, EFLAGS))]>, TB;
+ def #NAME#64rm
+ :RI<opc, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
+ [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
+ CondNode, EFLAGS))]>, TB;
+ } // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
+} // end multiclass
+
+
+// Conditional Moves.
+defm CMOVO : CMOV<0x40, "cmovo" , X86_COND_O>;
+defm CMOVNO : CMOV<0x41, "cmovno", X86_COND_NO>;
+defm CMOVB : CMOV<0x42, "cmovb" , X86_COND_B>;
+defm CMOVAE : CMOV<0x43, "cmovae", X86_COND_AE>;
+defm CMOVE : CMOV<0x44, "cmove" , X86_COND_E>;
+defm CMOVNE : CMOV<0x45, "cmovne", X86_COND_NE>;
+defm CMOVBE : CMOV<0x46, "cmovbe", X86_COND_BE>;
+defm CMOVA : CMOV<0x47, "cmova" , X86_COND_A>;
+defm CMOVS : CMOV<0x48, "cmovs" , X86_COND_S>;
+defm CMOVNS : CMOV<0x49, "cmovns", X86_COND_NS>;
+defm CMOVP : CMOV<0x4A, "cmovp" , X86_COND_P>;
+defm CMOVNP : CMOV<0x4B, "cmovnp", X86_COND_NP>;
+defm CMOVL : CMOV<0x4C, "cmovl" , X86_COND_L>;
+defm CMOVGE : CMOV<0x4D, "cmovge", X86_COND_GE>;
+defm CMOVLE : CMOV<0x4E, "cmovle", X86_COND_LE>;
+defm CMOVG : CMOV<0x4F, "cmovg" , X86_COND_G>;
+
+
+// SetCC instructions.
+multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
+ let Uses = [EFLAGS] in {
+ def r : I<opc, MRM0r, (outs GR8:$dst), (ins),
+ !strconcat(Mnemonic, "\t$dst"),
+ [(set GR8:$dst, (X86setcc OpNode, EFLAGS))]>, TB;
+ def m : I<opc, MRM0m, (outs), (ins i8mem:$dst),
+ !strconcat(Mnemonic, "\t$dst"),
+ [(store (X86setcc OpNode, EFLAGS), addr:$dst)]>, TB;
+ } // Uses = [EFLAGS]
+}
+
+defm SETO : SETCC<0x90, "seto", X86_COND_O>; // is overflow bit set
+defm SETNO : SETCC<0x91, "setno", X86_COND_NO>; // is overflow bit not set
+defm SETB : SETCC<0x92, "setb", X86_COND_B>; // unsigned less than
+defm SETAE : SETCC<0x93, "setae", X86_COND_AE>; // unsigned greater or equal
+defm SETE : SETCC<0x94, "sete", X86_COND_E>; // equal to
+defm SETNE : SETCC<0x95, "setne", X86_COND_NE>; // not equal to
+defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>; // unsigned less than or equal
+defm SETA : SETCC<0x97, "seta", X86_COND_A>; // unsigned greater than
+defm SETS : SETCC<0x98, "sets", X86_COND_S>; // is signed bit set
+defm SETNS : SETCC<0x99, "setns", X86_COND_NS>; // is not signed
+defm SETP : SETCC<0x9A, "setp", X86_COND_P>; // is parity bit set
+defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>; // is parity bit not set
+defm SETL : SETCC<0x9C, "setl", X86_COND_L>; // signed less than
+defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>; // signed greater or equal
+defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>; // signed less than or equal
+defm SETG : SETCC<0x9F, "setg", X86_COND_G>; // signed greater than
+
diff --git a/contrib/llvm/lib/Target/X86/X86InstrCompiler.td b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
new file mode 100644
index 0000000..4c915d9
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -0,0 +1,1626 @@
+//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the various pseudo instructions used by the compiler,
+// as well as Pat patterns used during instruction selection.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Pattern Matching Support
+
+def GetLo32XForm : SDNodeXForm<imm, [{
+ // Transformation function: get the low 32 bits.
+ return getI32Imm((unsigned)N->getZExtValue());
+}]>;
+
+def GetLo8XForm : SDNodeXForm<imm, [{
+ // Transformation function: get the low 8 bits.
+ return getI8Imm((uint8_t)N->getZExtValue());
+}]>;
+
+
+//===----------------------------------------------------------------------===//
+// Random Pseudo Instructions.
+
+// PIC base construction. This expands to code that looks like this:
+// call $next_inst
+// popl %destreg"
+let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
+ def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
+ "", []>;
+
+
+// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
+// a stack adjustment and the codegen must know that they may modify the stack
+// pointer before prolog-epilog rewriting occurs.
+// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
+// sub / add which can clobber EFLAGS.
+let Defs = [ESP, EFLAGS], Uses = [ESP] in {
+def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
+ "#ADJCALLSTACKDOWN",
+ [(X86callseq_start timm:$amt)]>,
+ Requires<[In32BitMode]>;
+def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "#ADJCALLSTACKUP",
+ [(X86callseq_end timm:$amt1, timm:$amt2)]>,
+ Requires<[In32BitMode]>;
+}
+
+// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
+// a stack adjustment and the codegen must know that they may modify the stack
+// pointer before prolog-epilog rewriting occurs.
+// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
+// sub / add which can clobber EFLAGS.
+let Defs = [RSP, EFLAGS], Uses = [RSP] in {
+def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
+ "#ADJCALLSTACKDOWN",
+ [(X86callseq_start timm:$amt)]>,
+ Requires<[In64BitMode]>;
+def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "#ADJCALLSTACKUP",
+ [(X86callseq_end timm:$amt1, timm:$amt2)]>,
+ Requires<[In64BitMode]>;
+}
+
+
+
+// x86-64 va_start lowering magic.
+let usesCustomInserter = 1 in {
+def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
+ (outs),
+ (ins GR8:$al,
+ i64imm:$regsavefi, i64imm:$offset,
+ variable_ops),
+ "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
+ [(X86vastart_save_xmm_regs GR8:$al,
+ imm:$regsavefi,
+ imm:$offset)]>;
+
+// The VAARG_64 pseudo-instruction takes the address of the va_list,
+// and places the address of the next argument into a register.
+let Defs = [EFLAGS] in
+def VAARG_64 : I<0, Pseudo,
+ (outs GR64:$dst),
+ (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
+ "#VAARG_64 $dst, $ap, $size, $mode, $align",
+ [(set GR64:$dst,
+ (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
+ (implicit EFLAGS)]>;
+
+// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
+// targets. These calls are needed to probe the stack when allocating more than
+// 4k bytes in one go. Touching the stack at 4K increments is necessary to
+// ensure that the guard pages used by the OS virtual memory manager are
+// allocated in correct sequence.
+// The main point of having separate instruction are extra unmodelled effects
+// (compared to ordinary calls) like stack pointer change.
+
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
+ def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
+ "# dynamic stack allocation",
+ [(X86WinAlloca)]>;
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// EH Pseudo Instructions
+//
+let isTerminator = 1, isReturn = 1, isBarrier = 1,
+ hasCtrlDep = 1, isCodeGenOnly = 1 in {
+def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
+ "ret\t#eh_return, addr: $addr",
+ [(X86ehret GR32:$addr)]>;
+
+}
+
+let isTerminator = 1, isReturn = 1, isBarrier = 1,
+ hasCtrlDep = 1, isCodeGenOnly = 1 in {
+def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
+ "ret\t#eh_return, addr: $addr",
+ [(X86ehret GR64:$addr)]>;
+
+}
+
+//===----------------------------------------------------------------------===//
+// Alias Instructions
+//===----------------------------------------------------------------------===//
+
+// Alias instructions that map movr0 to xor.
+// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
+// FIXME: Set encoding to pseudo.
+let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
+ isCodeGenOnly = 1 in {
+def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
+ [(set GR8:$dst, 0)]>;
+
+// We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller
+// encoding and avoids a partial-register update sometimes, but doing so
+// at isel time interferes with rematerialization in the current register
+// allocator. For now, this is rewritten when the instruction is lowered
+// to an MCInst.
+def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
+ "",
+ [(set GR16:$dst, 0)]>, OpSize;
+
+// FIXME: Set encoding to pseudo.
+def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
+ [(set GR32:$dst, 0)]>;
+}
+
+// We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
+// smaller encoding, but doing so at isel time interferes with rematerialization
+// in the current register allocator. For now, this is rewritten when the
+// instruction is lowered to an MCInst.
+// FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
+// when we have a better way to specify isel priority.
+let Defs = [EFLAGS], isCodeGenOnly=1,
+ AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
+def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
+ [(set GR64:$dst, 0)]>;
+
+// Materialize i64 constant where top 32-bits are zero. This could theoretically
+// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
+// that would make it more difficult to rematerialize.
+let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
+ isCodeGenOnly = 1 in
+def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
+ "", [(set GR64:$dst, i64immZExt32:$src)]>;
+
+// Use sbb to materialize carry bit.
+let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
+// FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
+// However, Pat<> can't replicate the destination reg into the inputs of the
+// result.
+// FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
+// X86CodeEmitter.
+def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
+ [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
+ [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>,
+ OpSize;
+def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
+ [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
+ [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+} // isCodeGenOnly
+
+
+def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C16r)>;
+def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C32r)>;
+def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C64r)>;
+
+def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C16r)>;
+def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C32r)>;
+def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C64r)>;
+
+// We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
+// will be eliminated and that the sbb can be extended up to a wider type. When
+// this happens, it is great. However, if we are left with an 8-bit sbb and an
+// and, we might as well just match it as a setb.
+def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
+ (SETBr)>;
+
+//===----------------------------------------------------------------------===//
+// String Pseudo Instructions
+//
+let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
+def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
+ [(X86rep_movs i8)]>, REP;
+def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
+ [(X86rep_movs i16)]>, REP, OpSize;
+def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
+ [(X86rep_movs i32)]>, REP;
+}
+
+let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
+def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
+ [(X86rep_movs i64)]>, REP;
+
+
+// FIXME: Should use "(X86rep_stos AL)" as the pattern.
+let Defs = [ECX,EDI], Uses = [AL,ECX,EDI], isCodeGenOnly = 1 in
+def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
+ [(X86rep_stos i8)]>, REP;
+let Defs = [ECX,EDI], Uses = [AX,ECX,EDI], isCodeGenOnly = 1 in
+def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
+ [(X86rep_stos i16)]>, REP, OpSize;
+let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI], isCodeGenOnly = 1 in
+def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
+ [(X86rep_stos i32)]>, REP;
+
+let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
+def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
+ [(X86rep_stos i64)]>, REP;
+
+
+//===----------------------------------------------------------------------===//
+// Thread Local Storage Instructions
+//
+
+// ELF TLS Support
+// All calls clobber the non-callee saved registers. ESP is marked as
+// a use to prevent stack-pointer assignments that appear immediately
+// before calls from potentially appearing dead.
+let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [ESP] in
+def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
+ "# TLS_addr32",
+ [(X86tlsaddr tls32addr:$sym)]>,
+ Requires<[In32BitMode]>;
+
+// All calls clobber the non-callee saved registers. RSP is marked as
+// a use to prevent stack-pointer assignments that appear immediately
+// before calls from potentially appearing dead.
+let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
+ FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [RSP] in
+def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
+ "# TLS_addr64",
+ [(X86tlsaddr tls64addr:$sym)]>,
+ Requires<[In64BitMode]>;
+
+// Darwin TLS Support
+// For i386, the address of the thunk is passed on the stack, on return the
+// address of the variable is in %eax. %ecx is trashed during the function
+// call. All other registers are preserved.
+let Defs = [EAX, ECX, EFLAGS],
+ Uses = [ESP],
+ usesCustomInserter = 1 in
+def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
+ "# TLSCall_32",
+ [(X86TLSCall addr:$sym)]>,
+ Requires<[In32BitMode]>;
+
+// For x86_64, the address of the thunk is passed in %rdi, on return
+// the address of the variable is in %rax. All other registers are preserved.
+let Defs = [RAX, EFLAGS],
+ Uses = [RSP, RDI],
+ usesCustomInserter = 1 in
+def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
+ "# TLSCall_64",
+ [(X86TLSCall addr:$sym)]>,
+ Requires<[In64BitMode]>;
+
+
+//===----------------------------------------------------------------------===//
+// Conditional Move Pseudo Instructions
+
+let Constraints = "$src1 = $dst" in {
+
+// Conditional moves
+let Uses = [EFLAGS] in {
+
+// X86 doesn't have 8-bit conditional moves. Use a customInserter to
+// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
+// however that requires promoting the operands, and can induce additional
+// i8 register pressure. Note that CMOV_GR8 is conservatively considered to
+// clobber EFLAGS, because if one of the operands is zero, the expansion
+// could involve an xor.
+let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
+def CMOV_GR8 : I<0, Pseudo,
+ (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
+ "#CMOV_GR8 PSEUDO!",
+ [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
+ imm:$cond, EFLAGS))]>;
+
+let Predicates = [NoCMov] in {
+def CMOV_GR32 : I<0, Pseudo,
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
+ "#CMOV_GR32* PSEUDO!",
+ [(set GR32:$dst,
+ (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
+def CMOV_GR16 : I<0, Pseudo,
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
+ "#CMOV_GR16* PSEUDO!",
+ [(set GR16:$dst,
+ (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
+def CMOV_RFP32 : I<0, Pseudo,
+ (outs RFP32:$dst),
+ (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
+ "#CMOV_RFP32 PSEUDO!",
+ [(set RFP32:$dst,
+ (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
+ EFLAGS))]>;
+def CMOV_RFP64 : I<0, Pseudo,
+ (outs RFP64:$dst),
+ (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
+ "#CMOV_RFP64 PSEUDO!",
+ [(set RFP64:$dst,
+ (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
+ EFLAGS))]>;
+def CMOV_RFP80 : I<0, Pseudo,
+ (outs RFP80:$dst),
+ (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
+ "#CMOV_RFP80 PSEUDO!",
+ [(set RFP80:$dst,
+ (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
+ EFLAGS))]>;
+} // Predicates = [NoCMov]
+} // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
+} // Uses = [EFLAGS]
+
+} // Constraints = "$src1 = $dst" in
+
+
+//===----------------------------------------------------------------------===//
+// Atomic Instruction Pseudo Instructions
+//===----------------------------------------------------------------------===//
+
+// Atomic exchange, and, or, xor
+let Constraints = "$val = $dst", Defs = [EFLAGS],
+ usesCustomInserter = 1 in {
+
+def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
+ "#ATOMAND8 PSEUDO!",
+ [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
+def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
+ "#ATOMOR8 PSEUDO!",
+ [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
+def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
+ "#ATOMXOR8 PSEUDO!",
+ [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
+def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
+ "#ATOMNAND8 PSEUDO!",
+ [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
+
+def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
+ "#ATOMAND16 PSEUDO!",
+ [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
+def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
+ "#ATOMOR16 PSEUDO!",
+ [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
+def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
+ "#ATOMXOR16 PSEUDO!",
+ [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
+def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
+ "#ATOMNAND16 PSEUDO!",
+ [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
+def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
+ "#ATOMMIN16 PSEUDO!",
+ [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
+def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
+ "#ATOMMAX16 PSEUDO!",
+ [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
+def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
+ "#ATOMUMIN16 PSEUDO!",
+ [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
+def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
+ "#ATOMUMAX16 PSEUDO!",
+ [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
+
+
+def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
+ "#ATOMAND32 PSEUDO!",
+ [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
+def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
+ "#ATOMOR32 PSEUDO!",
+ [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
+def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
+ "#ATOMXOR32 PSEUDO!",
+ [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
+def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
+ "#ATOMNAND32 PSEUDO!",
+ [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
+def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
+ "#ATOMMIN32 PSEUDO!",
+ [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
+def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
+ "#ATOMMAX32 PSEUDO!",
+ [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
+def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
+ "#ATOMUMIN32 PSEUDO!",
+ [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
+def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
+ "#ATOMUMAX32 PSEUDO!",
+ [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
+
+
+
+def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMAND64 PSEUDO!",
+ [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
+def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMOR64 PSEUDO!",
+ [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
+def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMXOR64 PSEUDO!",
+ [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
+def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMNAND64 PSEUDO!",
+ [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
+def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
+ "#ATOMMIN64 PSEUDO!",
+ [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
+def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMMAX64 PSEUDO!",
+ [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
+def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMUMIN64 PSEUDO!",
+ [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
+def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+ "#ATOMUMAX64 PSEUDO!",
+ [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
+}
+
+let Constraints = "$val1 = $dst1, $val2 = $dst2",
+ Defs = [EFLAGS, EAX, EBX, ECX, EDX],
+ Uses = [EAX, EBX, ECX, EDX],
+ mayLoad = 1, mayStore = 1,
+ usesCustomInserter = 1 in {
+def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
+ (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
+ "#ATOMAND6432 PSEUDO!", []>;
+def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
+ (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
+ "#ATOMOR6432 PSEUDO!", []>;
+def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
+ (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
+ "#ATOMXOR6432 PSEUDO!", []>;
+def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
+ (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
+ "#ATOMNAND6432 PSEUDO!", []>;
+def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
+ (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
+ "#ATOMADD6432 PSEUDO!", []>;
+def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
+ (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
+ "#ATOMSUB6432 PSEUDO!", []>;
+def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
+ (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
+ "#ATOMSWAP6432 PSEUDO!", []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
+//===----------------------------------------------------------------------===//
+
+// FIXME: Use normal instructions and add lock prefix dynamically.
+
+// Memory barriers
+
+// TODO: Get this to fold the constant into the instruction.
+let isCodeGenOnly = 1 in
+def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
+ "lock\n\t"
+ "or{l}\t{$zero, $dst|$dst, $zero}",
+ []>, Requires<[In32BitMode]>, LOCK;
+
+let hasSideEffects = 1 in
+def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
+ "#MEMBARRIER",
+ [(X86MemBarrier)]>, Requires<[HasSSE2]>;
+
+// TODO: Get this to fold the constant into the instruction.
+let hasSideEffects = 1, Defs = [ESP], isCodeGenOnly = 1 in
+def Int_MemBarrierNoSSE64 : RI<0x09, MRM1r, (outs), (ins GR64:$zero),
+ "lock\n\t"
+ "or{q}\t{$zero, (%rsp)|(%rsp), $zero}",
+ [(X86MemBarrierNoSSE GR64:$zero)]>,
+ Requires<[In64BitMode]>, LOCK;
+
+
+// Optimized codegen when the non-memory output is not used.
+let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
+def LOCK_ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
+ "lock\n\t"
+ "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
+ "lock\n\t"
+ "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
+def LOCK_ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
+ "lock\n\t"
+ "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
+ "lock\n\t"
+ "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+
+def LOCK_ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
+ "lock\n\t"
+ "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
+ "lock\n\t"
+ "add{w}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
+ "lock\n\t"
+ "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_ADD64mi32 : RIi32<0x81, MRM0m, (outs),
+ (ins i64mem:$dst, i64i32imm :$src2),
+ "lock\n\t"
+ "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+
+def LOCK_ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
+ "lock\n\t"
+ "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
+def LOCK_ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
+ "lock\n\t"
+ "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs),
+ (ins i64mem:$dst, i64i8imm :$src2),
+ "lock\n\t"
+ "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+
+def LOCK_SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
+ "lock\n\t"
+ "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
+ "lock\n\t"
+ "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
+def LOCK_SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
+ "lock\n\t"
+ "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
+ "lock\n\t"
+ "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+
+
+def LOCK_SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
+ "lock\n\t"
+ "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
+ "lock\n\t"
+ "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
+def LOCK_SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
+ "lock\n\t"
+ "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_SUB64mi32 : RIi32<0x81, MRM5m, (outs),
+ (ins i64mem:$dst, i64i32imm:$src2),
+ "lock\n\t"
+ "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+
+
+def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
+ "lock\n\t"
+ "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
+def LOCK_SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
+ "lock\n\t"
+ "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+def LOCK_SUB64mi8 : RIi8<0x83, MRM5m, (outs),
+ (ins i64mem:$dst, i64i8imm :$src2),
+ "lock\n\t"
+ "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
+
+def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
+ "lock\n\t"
+ "inc{b}\t$dst", []>, LOCK;
+def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
+ "lock\n\t"
+ "inc{w}\t$dst", []>, OpSize, LOCK;
+def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
+ "lock\n\t"
+ "inc{l}\t$dst", []>, LOCK;
+def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
+ "lock\n\t"
+ "inc{q}\t$dst", []>, LOCK;
+
+def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
+ "lock\n\t"
+ "dec{b}\t$dst", []>, LOCK;
+def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
+ "lock\n\t"
+ "dec{w}\t$dst", []>, OpSize, LOCK;
+def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
+ "lock\n\t"
+ "dec{l}\t$dst", []>, LOCK;
+def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
+ "lock\n\t"
+ "dec{q}\t$dst", []>, LOCK;
+}
+
+// Atomic compare and swap.
+let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
+ isCodeGenOnly = 1 in {
+def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
+ "lock\n\t"
+ "cmpxchg8b\t$ptr",
+ [(X86cas8 addr:$ptr)]>, TB, LOCK;
+}
+let Defs = [AL, EFLAGS], Uses = [AL], isCodeGenOnly = 1 in {
+def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
+ "lock\n\t"
+ "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
+ [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
+}
+
+let Defs = [AX, EFLAGS], Uses = [AX], isCodeGenOnly = 1 in {
+def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
+ "lock\n\t"
+ "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
+ [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
+}
+
+let Defs = [EAX, EFLAGS], Uses = [EAX], isCodeGenOnly = 1 in {
+def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
+ "lock\n\t"
+ "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
+ [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK;
+}
+
+let Defs = [RAX, EFLAGS], Uses = [RAX], isCodeGenOnly = 1 in {
+def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
+ "lock\n\t"
+ "cmpxchgq\t$swap,$ptr",
+ [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
+}
+
+// Atomic exchange and add
+let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in {
+def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
+ "lock\n\t"
+ "xadd{b}\t{$val, $ptr|$ptr, $val}",
+ [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
+ TB, LOCK;
+def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr),
+ "lock\n\t"
+ "xadd{w}\t{$val, $ptr|$ptr, $val}",
+ [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
+ TB, OpSize, LOCK;
+def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr),
+ "lock\n\t"
+ "xadd{l}\t{$val, $ptr|$ptr, $val}",
+ [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
+ TB, LOCK;
+def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
+ "lock\n\t"
+ "xadd\t$val, $ptr",
+ [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
+ TB, LOCK;
+}
+
+//===----------------------------------------------------------------------===//
+// Conditional Move Pseudo Instructions.
+//===----------------------------------------------------------------------===//
+
+
+// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
+// instruction selection into a branch sequence.
+let Uses = [EFLAGS], usesCustomInserter = 1 in {
+ def CMOV_FR32 : I<0, Pseudo,
+ (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
+ "#CMOV_FR32 PSEUDO!",
+ [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
+ EFLAGS))]>;
+ def CMOV_FR64 : I<0, Pseudo,
+ (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
+ "#CMOV_FR64 PSEUDO!",
+ [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
+ EFLAGS))]>;
+ def CMOV_V4F32 : I<0, Pseudo,
+ (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
+ "#CMOV_V4F32 PSEUDO!",
+ [(set VR128:$dst,
+ (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V2F64 : I<0, Pseudo,
+ (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
+ "#CMOV_V2F64 PSEUDO!",
+ [(set VR128:$dst,
+ (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
+ EFLAGS)))]>;
+ def CMOV_V2I64 : I<0, Pseudo,
+ (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
+ "#CMOV_V2I64 PSEUDO!",
+ [(set VR128:$dst,
+ (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
+ EFLAGS)))]>;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DAG Pattern Matching Rules
+//===----------------------------------------------------------------------===//
+
+// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
+def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
+def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
+def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
+def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
+def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
+def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
+
+def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
+ (ADD32ri GR32:$src1, tconstpool:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
+ (ADD32ri GR32:$src1, tjumptable:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
+ (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
+ (ADD32ri GR32:$src1, texternalsym:$src2)>;
+def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
+ (ADD32ri GR32:$src1, tblockaddress:$src2)>;
+
+def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
+ (MOV32mi addr:$dst, tglobaladdr:$src)>;
+def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
+ (MOV32mi addr:$dst, texternalsym:$src)>;
+def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
+ (MOV32mi addr:$dst, tblockaddress:$src)>;
+
+
+
+// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
+// code model mode, should use 'movabs'. FIXME: This is really a hack, the
+// 'movabs' predicate should handle this sort of thing.
+def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
+ (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
+ (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
+ (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
+ (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
+def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
+ (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
+
+// In static codegen with small code model, we can get the address of a label
+// into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
+// the MOV64ri64i32 should accept these.
+def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
+ (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
+def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
+ (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
+def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
+ (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
+def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
+ (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
+def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
+ (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
+
+// In kernel code model, we can get the address of a label
+// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
+// the MOV64ri32 should accept these.
+def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
+ (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
+def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
+ (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
+def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
+ (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
+def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
+ (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
+def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
+ (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
+
+// If we have small model and -static mode, it is safe to store global addresses
+// directly as immediates. FIXME: This is really a hack, the 'imm' predicate
+// for MOV64mi32 should handle this sort of thing.
+def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tconstpool:$src)>,
+ Requires<[NearData, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tjumptable:$src)>,
+ Requires<[NearData, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
+ Requires<[NearData, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, texternalsym:$src)>,
+ Requires<[NearData, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tblockaddress:$src)>,
+ Requires<[NearData, IsStatic]>;
+
+
+
+// Calls
+
+// tls has some funny stuff here...
+// This corresponds to movabs $foo@tpoff, %rax
+def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
+ (MOV64ri tglobaltlsaddr :$dst)>;
+// This corresponds to add $foo@tpoff, %rax
+def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
+ (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
+// This corresponds to mov foo@tpoff(%rbx), %eax
+def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
+ (MOV64rm tglobaltlsaddr :$dst)>;
+
+
+// Direct PC relative function call for small code model. 32-bit displacement
+// sign extended to 64-bit.
+def : Pat<(X86call (i64 tglobaladdr:$dst)),
+ (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
+def : Pat<(X86call (i64 texternalsym:$dst)),
+ (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
+
+def : Pat<(X86call (i64 tglobaladdr:$dst)),
+ (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
+def : Pat<(X86call (i64 texternalsym:$dst)),
+ (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
+
+// tailcall stuff
+def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
+ (TCRETURNri GR32_TC:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
+
+// FIXME: This is disabled for 32-bit PIC mode because the global base
+// register which is part of the address mode may be assigned a
+// callee-saved register.
+def : Pat<(X86tcret (load addr:$dst), imm:$off),
+ (TCRETURNmi addr:$dst, imm:$off)>,
+ Requires<[In32BitMode, IsNotPIC]>;
+
+def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
+ (TCRETURNdi texternalsym:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
+
+def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
+ (TCRETURNdi texternalsym:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
+
+def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
+ (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
+
+def : Pat<(X86tcret (load addr:$dst), imm:$off),
+ (TCRETURNmi64 addr:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
+
+def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
+ (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
+
+def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
+ (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
+
+// Normal calls, with various flavors of addresses.
+def : Pat<(X86call (i32 tglobaladdr:$dst)),
+ (CALLpcrel32 tglobaladdr:$dst)>;
+def : Pat<(X86call (i32 texternalsym:$dst)),
+ (CALLpcrel32 texternalsym:$dst)>;
+def : Pat<(X86call (i32 imm:$dst)),
+ (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
+
+// Comparisons.
+
+// TEST R,R is smaller than CMP R,0
+def : Pat<(X86cmp GR8:$src1, 0),
+ (TEST8rr GR8:$src1, GR8:$src1)>;
+def : Pat<(X86cmp GR16:$src1, 0),
+ (TEST16rr GR16:$src1, GR16:$src1)>;
+def : Pat<(X86cmp GR32:$src1, 0),
+ (TEST32rr GR32:$src1, GR32:$src1)>;
+def : Pat<(X86cmp GR64:$src1, 0),
+ (TEST64rr GR64:$src1, GR64:$src1)>;
+
+// Conditional moves with folded loads with operands swapped and conditions
+// inverted.
+multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
+ Instruction Inst64> {
+ def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
+ (Inst16 GR16:$src2, addr:$src1)>;
+ def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
+ (Inst32 GR32:$src2, addr:$src1)>;
+ def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
+ (Inst64 GR64:$src2, addr:$src1)>;
+}
+
+defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
+defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
+defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
+defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
+defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
+defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
+defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
+defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
+defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
+defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
+defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
+defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
+defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
+defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
+defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
+defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
+
+// zextload bool -> zextload byte
+def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
+def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
+def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
+def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
+
+// extload bool -> extload byte
+// When extloading from 16-bit and smaller memory locations into 64-bit
+// registers, use zero-extending loads so that the entire 64-bit register is
+// defined, avoiding partial-register updates.
+
+def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
+def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
+def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
+def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
+def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
+def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
+
+def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
+def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
+def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
+// For other extloads, use subregs, since the high contents of the register are
+// defined after an extload.
+def : Pat<(extloadi64i32 addr:$src),
+ (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
+ sub_32bit)>;
+
+// anyext. Define these to do an explicit zero-extend to
+// avoid partial-register updates.
+def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
+def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
+
+// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
+def : Pat<(i32 (anyext GR16:$src)),
+ (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
+
+def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
+def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
+def : Pat<(i64 (anyext GR32:$src)),
+ (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
+
+
+// Any instruction that defines a 32-bit result leaves the high half of the
+// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
+// be copying from a truncate. And x86's cmov doesn't do anything if the
+// condition is false. But any other 32-bit operation will zero-extend
+// up to 64 bits.
+def def32 : PatLeaf<(i32 GR32:$src), [{
+ return N->getOpcode() != ISD::TRUNCATE &&
+ N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
+ N->getOpcode() != ISD::CopyFromReg &&
+ N->getOpcode() != X86ISD::CMOV;
+}]>;
+
+// In the case of a 32-bit def that is known to implicitly zero-extend,
+// we can use a SUBREG_TO_REG.
+def : Pat<(i64 (zext def32:$src)),
+ (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
+
+//===----------------------------------------------------------------------===//
+// Pattern match OR as ADD
+//===----------------------------------------------------------------------===//
+
+// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
+// 3-addressified into an LEA instruction to avoid copies. However, we also
+// want to finally emit these instructions as an or at the end of the code
+// generator to make the generated code easier to read. To do this, we select
+// into "disjoint bits" pseudo ops.
+
+// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
+def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
+ return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
+
+ unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
+ APInt Mask = APInt::getAllOnesValue(BitWidth);
+ APInt KnownZero0, KnownOne0;
+ CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
+ APInt KnownZero1, KnownOne1;
+ CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
+ return (~KnownZero0 & ~KnownZero1) == 0;
+}]>;
+
+
+// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
+let AddedComplexity = 5 in { // Try this before the selecting to OR
+
+let isConvertibleToThreeAddress = 1,
+ Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
+let isCommutable = 1 in {
+def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "", // orw/addw REG, REG
+ [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
+def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "", // orl/addl REG, REG
+ [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
+def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "", // orq/addq REG, REG
+ [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
+} // isCommutable
+
+// NOTE: These are order specific, we want the ri8 forms to be listed
+// first so that they are slightly preferred to the ri forms.
+
+def ADD16ri8_DB : I<0, Pseudo,
+ (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
+ "", // orw/addw REG, imm8
+ [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
+def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ "", // orw/addw REG, imm
+ [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
+
+def ADD32ri8_DB : I<0, Pseudo,
+ (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
+ "", // orl/addl REG, imm8
+ [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
+def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
+ "", // orl/addl REG, imm
+ [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
+
+
+def ADD64ri8_DB : I<0, Pseudo,
+ (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+ "", // orq/addq REG, imm8
+ [(set GR64:$dst, (or_is_add GR64:$src1,
+ i64immSExt8:$src2))]>;
+def ADD64ri32_DB : I<0, Pseudo,
+ (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "", // orq/addq REG, imm
+ [(set GR64:$dst, (or_is_add GR64:$src1,
+ i64immSExt32:$src2))]>;
+}
+} // AddedComplexity
+
+
+//===----------------------------------------------------------------------===//
+// Some peepholes
+//===----------------------------------------------------------------------===//
+
+// Odd encoding trick: -128 fits into an 8-bit immediate field while
+// +128 doesn't, so in this special case use a sub instead of an add.
+def : Pat<(add GR16:$src1, 128),
+ (SUB16ri8 GR16:$src1, -128)>;
+def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
+ (SUB16mi8 addr:$dst, -128)>;
+
+def : Pat<(add GR32:$src1, 128),
+ (SUB32ri8 GR32:$src1, -128)>;
+def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
+ (SUB32mi8 addr:$dst, -128)>;
+
+def : Pat<(add GR64:$src1, 128),
+ (SUB64ri8 GR64:$src1, -128)>;
+def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
+ (SUB64mi8 addr:$dst, -128)>;
+
+// The same trick applies for 32-bit immediate fields in 64-bit
+// instructions.
+def : Pat<(add GR64:$src1, 0x0000000080000000),
+ (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
+def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
+ (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
+
+// To avoid needing to materialize an immediate in a register, use a 32-bit and
+// with implicit zero-extension instead of a 64-bit and if the immediate has at
+// least 32 bits of leading zeros. If in addition the last 32 bits can be
+// represented with a sign extension of a 8 bit constant, use that.
+
+def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
+ (SUBREG_TO_REG
+ (i64 0),
+ (AND32ri8
+ (EXTRACT_SUBREG GR64:$src, sub_32bit),
+ (i32 (GetLo8XForm imm:$imm))),
+ sub_32bit)>;
+
+def : Pat<(and GR64:$src, i64immZExt32:$imm),
+ (SUBREG_TO_REG
+ (i64 0),
+ (AND32ri
+ (EXTRACT_SUBREG GR64:$src, sub_32bit),
+ (i32 (GetLo32XForm imm:$imm))),
+ sub_32bit)>;
+
+
+// r & (2^16-1) ==> movz
+def : Pat<(and GR32:$src1, 0xffff),
+ (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR32:$src1, 0xff),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
+ GR32_ABCD)),
+ sub_8bit))>,
+ Requires<[In32BitMode]>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR16:$src1, 0xff),
+ (MOVZX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src1,
+ GR16_ABCD)),
+ sub_8bit))>,
+ Requires<[In32BitMode]>;
+
+// r & (2^32-1) ==> movz
+def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
+ (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
+// r & (2^16-1) ==> movz
+def : Pat<(and GR64:$src, 0xffff),
+ (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR64:$src, 0xff),
+ (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR32:$src1, 0xff),
+ (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
+ Requires<[In64BitMode]>;
+// r & (2^8-1) ==> movz
+def : Pat<(and GR16:$src1, 0xff),
+ (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
+ Requires<[In64BitMode]>;
+
+
+// sext_inreg patterns
+def : Pat<(sext_inreg GR32:$src, i16),
+ (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
+def : Pat<(sext_inreg GR32:$src, i8),
+ (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit))>,
+ Requires<[In32BitMode]>;
+def : Pat<(sext_inreg GR16:$src, i8),
+ (MOVSX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
+ GR16_ABCD)),
+ sub_8bit))>,
+ Requires<[In32BitMode]>;
+
+def : Pat<(sext_inreg GR64:$src, i32),
+ (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
+def : Pat<(sext_inreg GR64:$src, i16),
+ (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
+def : Pat<(sext_inreg GR64:$src, i8),
+ (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
+def : Pat<(sext_inreg GR32:$src, i8),
+ (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
+ Requires<[In64BitMode]>;
+def : Pat<(sext_inreg GR16:$src, i8),
+ (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
+ Requires<[In64BitMode]>;
+
+
+// trunc patterns
+def : Pat<(i16 (trunc GR32:$src)),
+ (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
+def : Pat<(i8 (trunc GR32:$src)),
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit)>,
+ Requires<[In32BitMode]>;
+def : Pat<(i8 (trunc GR16:$src)),
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit)>,
+ Requires<[In32BitMode]>;
+def : Pat<(i32 (trunc GR64:$src)),
+ (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
+def : Pat<(i16 (trunc GR64:$src)),
+ (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
+def : Pat<(i8 (trunc GR64:$src)),
+ (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
+def : Pat<(i8 (trunc GR32:$src)),
+ (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
+ Requires<[In64BitMode]>;
+def : Pat<(i8 (trunc GR16:$src)),
+ (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
+ Requires<[In64BitMode]>;
+
+// h-register tricks
+def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)>,
+ Requires<[In32BitMode]>;
+def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi)>,
+ Requires<[In32BitMode]>;
+def : Pat<(srl GR16:$src, (i8 8)),
+ (EXTRACT_SUBREG
+ (MOVZX32rr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_16bit)>,
+ Requires<[In32BitMode]>;
+def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
+ GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In32BitMode]>;
+def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
+ GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In32BitMode]>;
+def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In32BitMode]>;
+def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In32BitMode]>;
+
+// h-register tricks.
+// For now, be conservative on x86-64 and use an h-register extract only if the
+// value is immediately zero-extended or stored, which are somewhat common
+// cases. This uses a bunch of code to prevent a register requiring a REX prefix
+// from being allocated in the same instruction as the h register, as there's
+// currently no way to describe this requirement to the register allocator.
+
+// h-register extract and zero-extend.
+def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
+ (SUBREG_TO_REG
+ (i64 0),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
+ sub_8bit_hi)),
+ sub_32bit)>;
+def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
+ (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(srl GR16:$src, (i8 8)),
+ (EXTRACT_SUBREG
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_16bit)>,
+ Requires<[In64BitMode]>;
+def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
+ (SUBREG_TO_REG
+ (i64 0),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_32bit)>;
+def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
+ (SUBREG_TO_REG
+ (i64 0),
+ (MOVZX32_NOREXrr8
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)),
+ sub_32bit)>;
+
+// h-register extract and store.
+def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
+ (MOV8mr_NOREX
+ addr:$dst,
+ (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
+ sub_8bit_hi))>;
+def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
+ (MOV8mr_NOREX
+ addr:$dst,
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
+ (MOV8mr_NOREX
+ addr:$dst,
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+
+
+// (shl x, 1) ==> (add x, x)
+def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
+def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
+def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
+def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
+
+// (shl x (and y, 31)) ==> (shl x, y)
+def : Pat<(shl GR8:$src1, (and CL, 31)),
+ (SHL8rCL GR8:$src1)>;
+def : Pat<(shl GR16:$src1, (and CL, 31)),
+ (SHL16rCL GR16:$src1)>;
+def : Pat<(shl GR32:$src1, (and CL, 31)),
+ (SHL32rCL GR32:$src1)>;
+def : Pat<(store (shl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
+ (SHL8mCL addr:$dst)>;
+def : Pat<(store (shl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
+ (SHL16mCL addr:$dst)>;
+def : Pat<(store (shl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
+ (SHL32mCL addr:$dst)>;
+
+def : Pat<(srl GR8:$src1, (and CL, 31)),
+ (SHR8rCL GR8:$src1)>;
+def : Pat<(srl GR16:$src1, (and CL, 31)),
+ (SHR16rCL GR16:$src1)>;
+def : Pat<(srl GR32:$src1, (and CL, 31)),
+ (SHR32rCL GR32:$src1)>;
+def : Pat<(store (srl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
+ (SHR8mCL addr:$dst)>;
+def : Pat<(store (srl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
+ (SHR16mCL addr:$dst)>;
+def : Pat<(store (srl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
+ (SHR32mCL addr:$dst)>;
+
+def : Pat<(sra GR8:$src1, (and CL, 31)),
+ (SAR8rCL GR8:$src1)>;
+def : Pat<(sra GR16:$src1, (and CL, 31)),
+ (SAR16rCL GR16:$src1)>;
+def : Pat<(sra GR32:$src1, (and CL, 31)),
+ (SAR32rCL GR32:$src1)>;
+def : Pat<(store (sra (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
+ (SAR8mCL addr:$dst)>;
+def : Pat<(store (sra (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
+ (SAR16mCL addr:$dst)>;
+def : Pat<(store (sra (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
+ (SAR32mCL addr:$dst)>;
+
+// (shl x (and y, 63)) ==> (shl x, y)
+def : Pat<(shl GR64:$src1, (and CL, 63)),
+ (SHL64rCL GR64:$src1)>;
+def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
+ (SHL64mCL addr:$dst)>;
+
+def : Pat<(srl GR64:$src1, (and CL, 63)),
+ (SHR64rCL GR64:$src1)>;
+def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
+ (SHR64mCL addr:$dst)>;
+
+def : Pat<(sra GR64:$src1, (and CL, 63)),
+ (SAR64rCL GR64:$src1)>;
+def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
+ (SAR64mCL addr:$dst)>;
+
+
+// (anyext (setcc_carry)) -> (setcc_carry)
+def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C16r)>;
+def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C32r)>;
+def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C32r)>;
+
+
+
+
+//===----------------------------------------------------------------------===//
+// EFLAGS-defining Patterns
+//===----------------------------------------------------------------------===//
+
+// add reg, reg
+def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
+
+// add reg, mem
+def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
+ (ADD8rm GR8:$src1, addr:$src2)>;
+def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
+ (ADD16rm GR16:$src1, addr:$src2)>;
+def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
+ (ADD32rm GR32:$src1, addr:$src2)>;
+
+// add reg, imm
+def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
+def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
+def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
+def : Pat<(add GR16:$src1, i16immSExt8:$src2),
+ (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(add GR32:$src1, i32immSExt8:$src2),
+ (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
+
+// sub reg, reg
+def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
+
+// sub reg, mem
+def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
+ (SUB8rm GR8:$src1, addr:$src2)>;
+def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
+ (SUB16rm GR16:$src1, addr:$src2)>;
+def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
+ (SUB32rm GR32:$src1, addr:$src2)>;
+
+// sub reg, imm
+def : Pat<(sub GR8:$src1, imm:$src2),
+ (SUB8ri GR8:$src1, imm:$src2)>;
+def : Pat<(sub GR16:$src1, imm:$src2),
+ (SUB16ri GR16:$src1, imm:$src2)>;
+def : Pat<(sub GR32:$src1, imm:$src2),
+ (SUB32ri GR32:$src1, imm:$src2)>;
+def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
+ (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
+ (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
+
+// mul reg, reg
+def : Pat<(mul GR16:$src1, GR16:$src2),
+ (IMUL16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(mul GR32:$src1, GR32:$src2),
+ (IMUL32rr GR32:$src1, GR32:$src2)>;
+
+// mul reg, mem
+def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
+ (IMUL16rm GR16:$src1, addr:$src2)>;
+def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
+ (IMUL32rm GR32:$src1, addr:$src2)>;
+
+// mul reg, imm
+def : Pat<(mul GR16:$src1, imm:$src2),
+ (IMUL16rri GR16:$src1, imm:$src2)>;
+def : Pat<(mul GR32:$src1, imm:$src2),
+ (IMUL32rri GR32:$src1, imm:$src2)>;
+def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
+ (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
+ (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
+
+// reg = mul mem, imm
+def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
+ (IMUL16rmi addr:$src1, imm:$src2)>;
+def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
+ (IMUL32rmi addr:$src1, imm:$src2)>;
+def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
+ (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
+def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
+ (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
+
+// Optimize multiply by 2 with EFLAGS result.
+let AddedComplexity = 2 in {
+def : Pat<(X86smul_flag GR16:$src1, 2), (ADD16rr GR16:$src1, GR16:$src1)>;
+def : Pat<(X86smul_flag GR32:$src1, 2), (ADD32rr GR32:$src1, GR32:$src1)>;
+}
+
+// Patterns for nodes that do not produce flags, for instructions that do.
+
+// addition
+def : Pat<(add GR64:$src1, GR64:$src2),
+ (ADD64rr GR64:$src1, GR64:$src2)>;
+def : Pat<(add GR64:$src1, i64immSExt8:$src2),
+ (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(add GR64:$src1, i64immSExt32:$src2),
+ (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
+def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
+ (ADD64rm GR64:$src1, addr:$src2)>;
+
+// subtraction
+def : Pat<(sub GR64:$src1, GR64:$src2),
+ (SUB64rr GR64:$src1, GR64:$src2)>;
+def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
+ (SUB64rm GR64:$src1, addr:$src2)>;
+def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
+ (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
+ (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// Multiply
+def : Pat<(mul GR64:$src1, GR64:$src2),
+ (IMUL64rr GR64:$src1, GR64:$src2)>;
+def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
+ (IMUL64rm GR64:$src1, addr:$src2)>;
+def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
+ (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
+ (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
+def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
+ (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
+def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
+ (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
+
+// Increment reg.
+def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>;
+def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>;
+def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
+def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>;
+def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
+def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
+
+// Decrement reg.
+def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>;
+def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
+def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
+def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
+def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
+def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
+
+// or reg/reg.
+def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
+def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
+
+// or reg/mem
+def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
+ (OR8rm GR8:$src1, addr:$src2)>;
+def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
+ (OR16rm GR16:$src1, addr:$src2)>;
+def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
+ (OR32rm GR32:$src1, addr:$src2)>;
+def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
+ (OR64rm GR64:$src1, addr:$src2)>;
+
+// or reg/imm
+def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
+def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
+def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
+def : Pat<(or GR16:$src1, i16immSExt8:$src2),
+ (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(or GR32:$src1, i32immSExt8:$src2),
+ (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
+def : Pat<(or GR64:$src1, i64immSExt8:$src2),
+ (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(or GR64:$src1, i64immSExt32:$src2),
+ (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// xor reg/reg
+def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
+def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
+
+// xor reg/mem
+def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
+ (XOR8rm GR8:$src1, addr:$src2)>;
+def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
+ (XOR16rm GR16:$src1, addr:$src2)>;
+def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
+ (XOR32rm GR32:$src1, addr:$src2)>;
+def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
+ (XOR64rm GR64:$src1, addr:$src2)>;
+
+// xor reg/imm
+def : Pat<(xor GR8:$src1, imm:$src2),
+ (XOR8ri GR8:$src1, imm:$src2)>;
+def : Pat<(xor GR16:$src1, imm:$src2),
+ (XOR16ri GR16:$src1, imm:$src2)>;
+def : Pat<(xor GR32:$src1, imm:$src2),
+ (XOR32ri GR32:$src1, imm:$src2)>;
+def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
+ (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
+ (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
+def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
+ (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
+ (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// and reg/reg
+def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
+def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
+
+// and reg/mem
+def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
+ (AND8rm GR8:$src1, addr:$src2)>;
+def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
+ (AND16rm GR16:$src1, addr:$src2)>;
+def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
+ (AND32rm GR32:$src1, addr:$src2)>;
+def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
+ (AND64rm GR64:$src1, addr:$src2)>;
+
+// and reg/imm
+def : Pat<(and GR8:$src1, imm:$src2),
+ (AND8ri GR8:$src1, imm:$src2)>;
+def : Pat<(and GR16:$src1, imm:$src2),
+ (AND16ri GR16:$src1, imm:$src2)>;
+def : Pat<(and GR32:$src1, imm:$src2),
+ (AND32ri GR32:$src1, imm:$src2)>;
+def : Pat<(and GR16:$src1, i16immSExt8:$src2),
+ (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
+def : Pat<(and GR32:$src1, i32immSExt8:$src2),
+ (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
+def : Pat<(and GR64:$src1, i64immSExt8:$src2),
+ (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(and GR64:$src1, i64immSExt32:$src2),
+ (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrControl.td b/contrib/llvm/lib/Target/X86/X86InstrControl.td
new file mode 100644
index 0000000..77f4725
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrControl.td
@@ -0,0 +1,294 @@
+//===- X86InstrControl.td - Control Flow Instructions ------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the X86 jump, return, call, and related instructions.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Control Flow Instructions.
+//
+
+// Return instructions.
+let isTerminator = 1, isReturn = 1, isBarrier = 1,
+ hasCtrlDep = 1, FPForm = SpecialFP in {
+ def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
+ "ret",
+ [(X86retflag 0)]>;
+ def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
+ "ret\t$amt",
+ [(X86retflag timm:$amt)]>;
+ def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
+ "retw\t$amt",
+ []>, OpSize;
+ def LRETL : I <0xCB, RawFrm, (outs), (ins),
+ "lretl", []>;
+ def LRETQ : RI <0xCB, RawFrm, (outs), (ins),
+ "lretq", []>;
+ def LRETI : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
+ "lret\t$amt", []>;
+ def LRETIW : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
+ "lretw\t$amt", []>, OpSize;
+}
+
+// Unconditional branches.
+let isBarrier = 1, isBranch = 1, isTerminator = 1 in {
+ def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst),
+ "jmp\t$dst", [(br bb:$dst)]>;
+ def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
+ "jmp\t$dst", []>;
+ def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
+ "jmp{q}\t$dst", []>;
+}
+
+// Conditional Branches.
+let isBranch = 1, isTerminator = 1, Uses = [EFLAGS] in {
+ multiclass ICBr<bits<8> opc1, bits<8> opc4, string asm, PatFrag Cond> {
+ def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm, []>;
+ def _4 : Ii32PCRel<opc4, RawFrm, (outs), (ins brtarget:$dst), asm,
+ [(X86brcond bb:$dst, Cond, EFLAGS)]>, TB;
+ }
+}
+
+defm JO : ICBr<0x70, 0x80, "jo\t$dst" , X86_COND_O>;
+defm JNO : ICBr<0x71, 0x81, "jno\t$dst" , X86_COND_NO>;
+defm JB : ICBr<0x72, 0x82, "jb\t$dst" , X86_COND_B>;
+defm JAE : ICBr<0x73, 0x83, "jae\t$dst", X86_COND_AE>;
+defm JE : ICBr<0x74, 0x84, "je\t$dst" , X86_COND_E>;
+defm JNE : ICBr<0x75, 0x85, "jne\t$dst", X86_COND_NE>;
+defm JBE : ICBr<0x76, 0x86, "jbe\t$dst", X86_COND_BE>;
+defm JA : ICBr<0x77, 0x87, "ja\t$dst" , X86_COND_A>;
+defm JS : ICBr<0x78, 0x88, "js\t$dst" , X86_COND_S>;
+defm JNS : ICBr<0x79, 0x89, "jns\t$dst", X86_COND_NS>;
+defm JP : ICBr<0x7A, 0x8A, "jp\t$dst" , X86_COND_P>;
+defm JNP : ICBr<0x7B, 0x8B, "jnp\t$dst", X86_COND_NP>;
+defm JL : ICBr<0x7C, 0x8C, "jl\t$dst" , X86_COND_L>;
+defm JGE : ICBr<0x7D, 0x8D, "jge\t$dst", X86_COND_GE>;
+defm JLE : ICBr<0x7E, 0x8E, "jle\t$dst", X86_COND_LE>;
+defm JG : ICBr<0x7F, 0x8F, "jg\t$dst" , X86_COND_G>;
+
+// jcx/jecx/jrcx instructions.
+let isAsmParserOnly = 1, isBranch = 1, isTerminator = 1 in {
+ // These are the 32-bit versions of this instruction for the asmparser. In
+ // 32-bit mode, the address size prefix is jcxz and the unprefixed version is
+ // jecxz.
+ let Uses = [CX] in
+ def JCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
+ "jcxz\t$dst", []>, AdSize, Requires<[In32BitMode]>;
+ let Uses = [ECX] in
+ def JECXZ_32 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
+ "jecxz\t$dst", []>, Requires<[In32BitMode]>;
+
+ // J*CXZ instruction: 64-bit versions of this instruction for the asmparser.
+ // In 64-bit mode, the address size prefix is jecxz and the unprefixed version
+ // is jrcxz.
+ let Uses = [ECX] in
+ def JECXZ_64 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
+ "jecxz\t$dst", []>, AdSize, Requires<[In64BitMode]>;
+ let Uses = [RCX] in
+ def JRCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
+ "jrcxz\t$dst", []>, Requires<[In64BitMode]>;
+}
+
+// Indirect branches
+let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
+ def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst",
+ [(brind GR32:$dst)]>, Requires<[In32BitMode]>;
+ def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
+ [(brind (loadi32 addr:$dst))]>, Requires<[In32BitMode]>;
+
+ def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
+ [(brind GR64:$dst)]>, Requires<[In64BitMode]>;
+ def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
+ [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>;
+
+ def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs),
+ (ins i16imm:$off, i16imm:$seg),
+ "ljmp{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
+ def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs),
+ (ins i32imm:$off, i16imm:$seg),
+ "ljmp{l}\t{$seg, $off|$off, $seg}", []>;
+ def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
+ "ljmp{q}\t{*}$dst", []>;
+
+ def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst),
+ "ljmp{w}\t{*}$dst", []>, OpSize;
+ def FARJMP32m : I<0xFF, MRM5m, (outs), (ins opaque48mem:$dst),
+ "ljmp{l}\t{*}$dst", []>;
+}
+
+
+// Loop instructions
+
+def LOOP : Ii8PCRel<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", []>;
+def LOOPE : Ii8PCRel<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", []>;
+def LOOPNE : Ii8PCRel<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", []>;
+
+//===----------------------------------------------------------------------===//
+// Call Instructions...
+//
+let isCall = 1 in
+ // All calls clobber the non-callee saved registers. ESP is marked as
+ // a use to prevent stack-pointer assignments that appear immediately
+ // before calls from potentially appearing dead. Uses for argument
+ // registers are added manually.
+ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [ESP] in {
+ def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
+ (outs), (ins i32imm_pcrel:$dst,variable_ops),
+ "call{l}\t$dst", []>, Requires<[In32BitMode]>;
+ def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops),
+ "call{l}\t{*}$dst", [(X86call GR32:$dst)]>,
+ Requires<[In32BitMode]>;
+ def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
+ "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))]>,
+ Requires<[In32BitMode]>;
+
+ def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs),
+ (ins i16imm:$off, i16imm:$seg),
+ "lcall{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
+ def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs),
+ (ins i32imm:$off, i16imm:$seg),
+ "lcall{l}\t{$seg, $off|$off, $seg}", []>;
+
+ def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst),
+ "lcall{w}\t{*}$dst", []>, OpSize;
+ def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst),
+ "lcall{l}\t{*}$dst", []>;
+
+ // callw for 16 bit code for the assembler.
+ let isAsmParserOnly = 1 in
+ def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
+ (outs), (ins i16imm_pcrel:$dst, variable_ops),
+ "callw\t$dst", []>, OpSize;
+ }
+
+
+// Tail call stuff.
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
+ isCodeGenOnly = 1 in
+ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [ESP] in {
+ def TCRETURNdi : PseudoI<(outs),
+ (ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops), []>;
+ def TCRETURNri : PseudoI<(outs),
+ (ins GR32_TC:$dst, i32imm:$offset, variable_ops), []>;
+ let mayLoad = 1 in
+ def TCRETURNmi : PseudoI<(outs),
+ (ins i32mem_TC:$dst, i32imm:$offset, variable_ops), []>;
+
+ // FIXME: The should be pseudo instructions that are lowered when going to
+ // mcinst.
+ def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs),
+ (ins i32imm_pcrel:$dst, variable_ops),
+ "jmp\t$dst # TAILCALL",
+ []>;
+ def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops),
+ "", []>; // FIXME: Remove encoding when JIT is dead.
+ let mayLoad = 1 in
+ def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
+ "jmp{l}\t{*}$dst # TAILCALL", []>;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Call Instructions...
+//
+let isCall = 1 in
+ // All calls clobber the non-callee saved registers. RSP is marked as
+ // a use to prevent stack-pointer assignments that appear immediately
+ // before calls from potentially appearing dead. Uses for argument
+ // registers are added manually.
+ let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
+ FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [RSP] in {
+
+ // NOTE: this pattern doesn't match "X86call imm", because we do not know
+ // that the offset between an arbitrary immediate and the call will fit in
+ // the 32-bit pcrel field that we have.
+ def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
+ (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
+ "call{q}\t$dst", []>,
+ Requires<[In64BitMode, NotWin64]>;
+ def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
+ "call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
+ Requires<[In64BitMode, NotWin64]>;
+ def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
+ "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
+ Requires<[In64BitMode, NotWin64]>;
+
+ def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
+ "lcall{q}\t{*}$dst", []>;
+ }
+
+ // FIXME: We need to teach codegen about single list of call-clobbered
+ // registers.
+let isCall = 1, isCodeGenOnly = 1 in
+ // All calls clobber the non-callee saved registers. RSP is marked as
+ // a use to prevent stack-pointer assignments that appear immediately
+ // before calls from potentially appearing dead. Uses for argument
+ // registers are added manually.
+ let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
+ FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
+ Uses = [RSP] in {
+ def WINCALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
+ (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
+ "call{q}\t$dst", []>,
+ Requires<[IsWin64]>;
+ def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
+ "call{q}\t{*}$dst",
+ [(X86call GR64:$dst)]>, Requires<[IsWin64]>;
+ def WINCALL64m : I<0xFF, MRM2m, (outs),
+ (ins i64mem:$dst,variable_ops),
+ "call{q}\t{*}$dst",
+ [(X86call (loadi64 addr:$dst))]>,
+ Requires<[IsWin64]>;
+ }
+
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
+ isCodeGenOnly = 1 in
+ // AMD64 cc clobbers RSI, RDI, XMM6-XMM15.
+ let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
+ FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
+ Uses = [RSP],
+ usesCustomInserter = 1 in {
+ def TCRETURNdi64 : PseudoI<(outs),
+ (ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
+ []>;
+ def TCRETURNri64 : PseudoI<(outs),
+ (ins ptr_rc_tailcall:$dst, i32imm:$offset, variable_ops), []>;
+ let mayLoad = 1 in
+ def TCRETURNmi64 : PseudoI<(outs),
+ (ins i64mem_TC:$dst, i32imm:$offset, variable_ops), []>;
+
+ def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
+ (ins i64i32imm_pcrel:$dst, variable_ops),
+ "jmp\t$dst # TAILCALL", []>;
+ def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst, variable_ops),
+ "jmp{q}\t{*}$dst # TAILCALL", []>;
+
+ let mayLoad = 1 in
+ def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
+ "jmp{q}\t{*}$dst # TAILCALL", []>;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrExtension.td b/contrib/llvm/lib/Target/X86/X86InstrExtension.td
new file mode 100644
index 0000000..867c0f8
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrExtension.td
@@ -0,0 +1,172 @@
+//===- X86InstrExtension.td - Sign and Zero Extensions -----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the sign and zero extension operations.
+//
+//===----------------------------------------------------------------------===//
+
+let neverHasSideEffects = 1 in {
+ let Defs = [AX], Uses = [AL] in
+ def CBW : I<0x98, RawFrm, (outs), (ins),
+ "{cbtw|cbw}", []>, OpSize; // AX = signext(AL)
+ let Defs = [EAX], Uses = [AX] in
+ def CWDE : I<0x98, RawFrm, (outs), (ins),
+ "{cwtl|cwde}", []>; // EAX = signext(AX)
+
+ let Defs = [AX,DX], Uses = [AX] in
+ def CWD : I<0x99, RawFrm, (outs), (ins),
+ "{cwtd|cwd}", []>, OpSize; // DX:AX = signext(AX)
+ let Defs = [EAX,EDX], Uses = [EAX] in
+ def CDQ : I<0x99, RawFrm, (outs), (ins),
+ "{cltd|cdq}", []>; // EDX:EAX = signext(EAX)
+
+
+ let Defs = [RAX], Uses = [EAX] in
+ def CDQE : RI<0x98, RawFrm, (outs), (ins),
+ "{cltq|cdqe}", []>; // RAX = signext(EAX)
+
+ let Defs = [RAX,RDX], Uses = [RAX] in
+ def CQO : RI<0x99, RawFrm, (outs), (ins),
+ "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
+}
+
+
+// Sign/Zero extenders
+// Use movsbl intead of movsbw; we don't care about the high 16 bits
+// of the register here. This has a smaller encoding and avoids a
+// partial-register update. Actual movsbw included for the disassembler.
+def MOVSX16rr8W : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
+ "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+def MOVSX16rm8W : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
+ "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+
+// FIXME: Use a pat pattern or define a syntax here.
+let isCodeGenOnly=1 in {
+def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
+ "", [(set GR16:$dst, (sext GR8:$src))]>, TB;
+def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
+ "", [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB;
+}
+def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
+ "movs{bl|x}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (sext GR8:$src))]>, TB;
+def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
+ "movs{bl|x}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (sextloadi32i8 addr:$src))]>, TB;
+def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
+ "movs{wl|x}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (sext GR16:$src))]>, TB;
+def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
+ "movs{wl|x}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (sextloadi32i16 addr:$src))]>, TB;
+
+// Use movzbl intead of movzbw; we don't care about the high 16 bits
+// of the register here. This has a smaller encoding and avoids a
+// partial-register update. Actual movzbw included for the disassembler.
+def MOVZX16rr8W : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
+ "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+def MOVZX16rm8W : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
+ "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+// FIXME: Use a pat pattern or define a syntax here.
+let isCodeGenOnly=1 in {
+def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
+ "", [(set GR16:$dst, (zext GR8:$src))]>, TB;
+def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
+ "", [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB;
+}
+def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
+ "movz{bl|x}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (zext GR8:$src))]>, TB;
+def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
+ "movz{bl|x}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (zextloadi32i8 addr:$src))]>, TB;
+def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
+ "movz{wl|x}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (zext GR16:$src))]>, TB;
+def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
+ "movz{wl|x}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (zextloadi32i16 addr:$src))]>, TB;
+
+// These are the same as the regular MOVZX32rr8 and MOVZX32rm8
+// except that they use GR32_NOREX for the output operand register class
+// instead of GR32. This allows them to operate on h registers on x86-64.
+def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg,
+ (outs GR32_NOREX:$dst), (ins GR8:$src),
+ "movz{bl|x}\t{$src, $dst|$dst, $src}",
+ []>, TB;
+let mayLoad = 1 in
+def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem,
+ (outs GR32_NOREX:$dst), (ins i8mem:$src),
+ "movz{bl|x}\t{$src, $dst|$dst, $src}",
+ []>, TB;
+
+// MOVSX64rr8 always has a REX prefix and it has an 8-bit register
+// operand, which makes it a rare instruction with an 8-bit register
+// operand that can never access an h register. If support for h registers
+// were generalized, this would require a special register class.
+def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
+ "movs{bq|x}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (sext GR8:$src))]>, TB;
+def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
+ "movs{bq|x}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
+def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
+ "movs{wq|x}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (sext GR16:$src))]>, TB;
+def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
+ "movs{wq|x}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
+def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
+ "movs{lq|xd}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (sext GR32:$src))]>;
+def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
+ "movs{lq|xd}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
+
+// movzbq and movzwq encodings for the disassembler
+def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
+ "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
+def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src),
+ "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
+def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
+ "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
+def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
+ "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
+
+// FIXME: These should be Pat patterns.
+let isCodeGenOnly = 1 in {
+
+// Use movzbl instead of movzbq when the destination is a register; it's
+// equivalent due to implicit zero-extending, and it has a smaller encoding.
+def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
+ "", [(set GR64:$dst, (zext GR8:$src))]>, TB;
+def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
+ "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
+// Use movzwl instead of movzwq when the destination is a register; it's
+// equivalent due to implicit zero-extending, and it has a smaller encoding.
+def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
+ "", [(set GR64:$dst, (zext GR16:$src))]>, TB;
+def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
+ "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
+
+// There's no movzlq instruction, but movl can be used for this purpose, using
+// implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
+// extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
+// zero-extension, however this isn't possible when the 32-bit value is
+// defined by a truncate or is copied from something where the high bits aren't
+// necessarily all zero. In such cases, we fall back to these explicit zext
+// instructions.
+def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
+ "", [(set GR64:$dst, (zext GR32:$src))]>;
+def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
+ "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
+
+
+}
+
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
index 9c9bcc7..b506f5e 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
@@ -32,21 +32,24 @@ def SDTX86FpToIMem : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisPtrTy<1>]>;
def SDTX86CwdStore : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def X86fld : SDNode<"X86ISD::FLD", SDTX86Fld,
- [SDNPHasChain, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def X86fst : SDNode<"X86ISD::FST", SDTX86Fst,
- [SDNPHasChain, SDNPInFlag, SDNPMayStore]>;
+ [SDNPHasChain, SDNPInGlue, SDNPMayStore,
+ SDNPMemOperand]>;
def X86fild : SDNode<"X86ISD::FILD", SDTX86Fild,
- [SDNPHasChain, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def X86fildflag : SDNode<"X86ISD::FILD_FLAG", SDTX86Fild,
- [SDNPHasChain, SDNPOutFlag, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPOutGlue, SDNPMayLoad,
+ SDNPMemOperand]>;
def X86fp_to_i16mem : SDNode<"X86ISD::FP_TO_INT16_IN_MEM", SDTX86FpToIMem,
- [SDNPHasChain, SDNPMayStore]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fp_to_i32mem : SDNode<"X86ISD::FP_TO_INT32_IN_MEM", SDTX86FpToIMem,
- [SDNPHasChain, SDNPMayStore]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fp_to_i64mem : SDNode<"X86ISD::FP_TO_INT64_IN_MEM", SDTX86FpToIMem,
- [SDNPHasChain, SDNPMayStore]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fp_cwd_get16 : SDNode<"X86ISD::FNSTCW16m", SDTX86CwdStore,
- [SDNPHasChain, SDNPMayStore, SDNPSideEffect]>;
+ [SDNPHasChain, SDNPMayStore, SDNPSideEffect,
+ SDNPMemOperand]>;
//===----------------------------------------------------------------------===//
// FPStack pattern fragments
@@ -70,41 +73,23 @@ def fpimmneg1 : PatLeaf<(fpimm), [{
// Some 'special' instructions
let usesCustomInserter = 1 in { // Expanded after instruction selection.
- def FP32_TO_INT16_IN_MEM : I<0, Pseudo,
- (outs), (ins i16mem:$dst, RFP32:$src),
- "##FP32_TO_INT16_IN_MEM PSEUDO!",
+ def FP32_TO_INT16_IN_MEM : PseudoI<(outs), (ins i16mem:$dst, RFP32:$src),
[(X86fp_to_i16mem RFP32:$src, addr:$dst)]>;
- def FP32_TO_INT32_IN_MEM : I<0, Pseudo,
- (outs), (ins i32mem:$dst, RFP32:$src),
- "##FP32_TO_INT32_IN_MEM PSEUDO!",
+ def FP32_TO_INT32_IN_MEM : PseudoI<(outs), (ins i32mem:$dst, RFP32:$src),
[(X86fp_to_i32mem RFP32:$src, addr:$dst)]>;
- def FP32_TO_INT64_IN_MEM : I<0, Pseudo,
- (outs), (ins i64mem:$dst, RFP32:$src),
- "##FP32_TO_INT64_IN_MEM PSEUDO!",
+ def FP32_TO_INT64_IN_MEM : PseudoI<(outs), (ins i64mem:$dst, RFP32:$src),
[(X86fp_to_i64mem RFP32:$src, addr:$dst)]>;
- def FP64_TO_INT16_IN_MEM : I<0, Pseudo,
- (outs), (ins i16mem:$dst, RFP64:$src),
- "##FP64_TO_INT16_IN_MEM PSEUDO!",
+ def FP64_TO_INT16_IN_MEM : PseudoI<(outs), (ins i16mem:$dst, RFP64:$src),
[(X86fp_to_i16mem RFP64:$src, addr:$dst)]>;
- def FP64_TO_INT32_IN_MEM : I<0, Pseudo,
- (outs), (ins i32mem:$dst, RFP64:$src),
- "##FP64_TO_INT32_IN_MEM PSEUDO!",
+ def FP64_TO_INT32_IN_MEM : PseudoI<(outs), (ins i32mem:$dst, RFP64:$src),
[(X86fp_to_i32mem RFP64:$src, addr:$dst)]>;
- def FP64_TO_INT64_IN_MEM : I<0, Pseudo,
- (outs), (ins i64mem:$dst, RFP64:$src),
- "##FP64_TO_INT64_IN_MEM PSEUDO!",
+ def FP64_TO_INT64_IN_MEM : PseudoI<(outs), (ins i64mem:$dst, RFP64:$src),
[(X86fp_to_i64mem RFP64:$src, addr:$dst)]>;
- def FP80_TO_INT16_IN_MEM : I<0, Pseudo,
- (outs), (ins i16mem:$dst, RFP80:$src),
- "##FP80_TO_INT16_IN_MEM PSEUDO!",
+ def FP80_TO_INT16_IN_MEM : PseudoI<(outs), (ins i16mem:$dst, RFP80:$src),
[(X86fp_to_i16mem RFP80:$src, addr:$dst)]>;
- def FP80_TO_INT32_IN_MEM : I<0, Pseudo,
- (outs), (ins i32mem:$dst, RFP80:$src),
- "##FP80_TO_INT32_IN_MEM PSEUDO!",
+ def FP80_TO_INT32_IN_MEM : PseudoI<(outs), (ins i32mem:$dst, RFP80:$src),
[(X86fp_to_i32mem RFP80:$src, addr:$dst)]>;
- def FP80_TO_INT64_IN_MEM : I<0, Pseudo,
- (outs), (ins i64mem:$dst, RFP80:$src),
- "##FP80_TO_INT64_IN_MEM PSEUDO!",
+ def FP80_TO_INT64_IN_MEM : PseudoI<(outs), (ins i64mem:$dst, RFP80:$src),
[(X86fp_to_i64mem RFP80:$src, addr:$dst)]>;
}
@@ -212,11 +197,11 @@ def _Fp80m64: FpI_<(outs RFP80:$dst),
[(set RFP80:$dst,
(OpNode RFP80:$src1, (f80 (extloadf64 addr:$src2))))]>;
def _F32m : FPI<0xD8, fp, (outs), (ins f32mem:$src),
- !strconcat("f", !strconcat(asmstring, "{s}\t$src"))> {
+ !strconcat("f", asmstring, "{s}\t$src")> {
let mayLoad = 1;
}
def _F64m : FPI<0xDC, fp, (outs), (ins f64mem:$src),
- !strconcat("f", !strconcat(asmstring, "{l}\t$src"))> {
+ !strconcat("f", asmstring, "{l}\t$src")> {
let mayLoad = 1;
}
// ST(0) = ST(0) + [memint]
@@ -245,11 +230,11 @@ def _FpI32m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i32mem:$src2),
[(set RFP80:$dst, (OpNode RFP80:$src1,
(X86fild addr:$src2, i32)))]>;
def _FI16m : FPI<0xDE, fp, (outs), (ins i16mem:$src),
- !strconcat("fi", !strconcat(asmstring, "{s}\t$src"))> {
+ !strconcat("fi", asmstring, "{s}\t$src")> {
let mayLoad = 1;
}
def _FI32m : FPI<0xDA, fp, (outs), (ins i32mem:$src),
- !strconcat("fi", !strconcat(asmstring, "{l}\t$src"))> {
+ !strconcat("fi", asmstring, "{l}\t$src")> {
let mayLoad = 1;
}
}
@@ -580,16 +565,16 @@ def UCOM_FPPr : FPI<0xE9, RawFrm, // cmp ST(0) with ST(1), pop, pop
def UCOM_FIr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i)
(outs), (ins RST:$reg),
- "fucomi\t{$reg, %st(0)|%ST(0), $reg}">, DB;
+ "fucomi\t$reg">, DB;
def UCOM_FIPr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i), pop
(outs), (ins RST:$reg),
- "fucomip\t{$reg, %st(0)|%ST(0), $reg}">, DF;
+ "fucompi\t$reg">, DF;
}
def COM_FIr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg),
- "fcomi\t{$reg, %st(0)|%ST(0), $reg}">, DB;
+ "fcomi\t$reg">, DB;
def COM_FIPr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg),
- "fcomip\t{$reg, %st(0)|%ST(0), $reg}">, DF;
+ "fcompi\t$reg">, DF;
// Floating point flag ops.
let Defs = [AX] in
@@ -604,8 +589,8 @@ let mayLoad = 1 in
def FLDCW16m : I<0xD9, MRM5m, // X87 control world = [mem16]
(outs), (ins i16mem:$dst), "fldcw\t$dst", []>;
-// Register free
-
+// FPU control instructions
+def FNINIT : I<0xE3, RawFrm, (outs), (ins), "fninit", []>, DB;
def FFREE : FPI<0xC0, AddRegFrm, (outs), (ins RST:$reg),
"ffree\t$reg">, DD;
@@ -613,7 +598,8 @@ def FFREE : FPI<0xC0, AddRegFrm, (outs), (ins RST:$reg),
def FNCLEX : I<0xE2, RawFrm, (outs), (ins), "fnclex", []>, DB;
-// Operandless floating-point instructions for the disassembler
+// Operandless floating-point instructions for the disassembler.
+def WAIT : I<0x9B, RawFrm, (outs), (ins), "wait", []>;
def FNOP : I<0xD0, RawFrm, (outs), (ins), "fnop", []>, D9;
def FXAM : I<0xE5, RawFrm, (outs), (ins), "fxam", []>, D9;
@@ -639,8 +625,12 @@ def FCOMPP : I<0xD9, RawFrm, (outs), (ins), "fcompp", []>, DE;
def FXSAVE : I<0xAE, MRM0m, (outs opaque512mem:$dst), (ins),
"fxsave\t$dst", []>, TB;
+def FXSAVE64 : I<0xAE, MRM0m, (outs opaque512mem:$dst), (ins),
+ "fxsaveq\t$dst", []>, TB, REX_W, Requires<[In64BitMode]>;
def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src),
"fxrstor\t$src", []>, TB;
+def FXRSTOR64 : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src),
+ "fxrstorq\t$src", []>, TB, REX_W, Requires<[In64BitMode]>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFormats.td b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
index 79187e9..344c14c 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
@@ -39,7 +39,8 @@ def MRM_E8 : Format<39>;
def MRM_F0 : Format<40>;
def MRM_F8 : Format<41>;
def MRM_F9 : Format<42>;
-def RawFrmImm16 : Format<43>;
+def RawFrmImm8 : Format<43>;
+def RawFrmImm16 : Format<44>;
// ImmType - This specifies the immediate type used by an instruction. This is
// part of the ad-hoc solution used to emit machine instruction encodings by our
@@ -108,6 +109,7 @@ class VEX_W { bit hasVEX_WPrefix = 1; }
class VEX_4V : VEX { bit hasVEX_4VPrefix = 1; }
class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; }
class VEX_L { bit hasVEX_L = 1; }
+class Has3DNow0F0FOpcode { bit has3DNow0F0FOpcode = 1; }
class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
string AsmStr, Domain d = GenericDomain>
@@ -123,6 +125,9 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
dag InOperandList = ins;
string AsmString = AsmStr;
+ // If this is a pseudo instruction, mark it isCodeGenOnly.
+ let isCodeGenOnly = !eq(!cast<string>(f), "Pseudo");
+
//
// Attributes specific to X86 instructions...
//
@@ -130,17 +135,18 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
bit hasAdSizePrefix = 0; // Does this inst have a 0x67 prefix?
bits<4> Prefix = 0; // Which prefix byte does this inst have?
- bit hasREX_WPrefix = 0; // Does this inst requires the REX.W prefix?
+ bit hasREX_WPrefix = 0; // Does this inst require the REX.W prefix?
FPFormat FPForm = NotFP; // What flavor of FP instruction is this?
bit hasLockPrefix = 0; // Does this inst have a 0xF0 prefix?
bits<2> SegOvrBits = 0; // Segment override prefix.
Domain ExeDomain = d;
- bit hasVEXPrefix = 0; // Does this inst requires a VEX prefix?
+ bit hasVEXPrefix = 0; // Does this inst require a VEX prefix?
bit hasVEX_WPrefix = 0; // Does this inst set the VEX_W field?
- bit hasVEX_4VPrefix = 0; // Does this inst requires the VEX.VVVV field?
- bit hasVEX_i8ImmReg = 0; // Does this inst requires the last source register
+ bit hasVEX_4VPrefix = 0; // Does this inst require the VEX.VVVV field?
+ bit hasVEX_i8ImmReg = 0; // Does this inst require the last source register
// to be encoded in a immediate field?
- bit hasVEX_L = 0; // Does this inst uses large (256-bit) registers?
+ bit hasVEX_L = 0; // Does this inst use large (256-bit) registers?
+ bit has3DNow0F0FOpcode =0;// Wacky 3dNow! encoding?
// TSFlags layout should be kept in sync with X86InstrInfo.h.
let TSFlags{5-0} = FormBits;
@@ -159,6 +165,12 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
let TSFlags{34} = hasVEX_4VPrefix;
let TSFlags{35} = hasVEX_i8ImmReg;
let TSFlags{36} = hasVEX_L;
+ let TSFlags{37} = has3DNow0F0FOpcode;
+}
+
+class PseudoI<dag oops, dag iops, list<dag> pattern>
+ : X86Inst<0, Pseudo, NoImm, oops, iops, ""> {
+ let Pattern = pattern;
}
class I<bits<8> o, Format f, dag outs, dag ins, string asm,
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 01149b6..5016c0f 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -15,51 +15,8 @@
// MMX Pattern Fragments
//===----------------------------------------------------------------------===//
-def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>;
-
-def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>;
-def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>;
-def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>;
-def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>;
-
-//===----------------------------------------------------------------------===//
-// MMX Masks
-//===----------------------------------------------------------------------===//
-
-// MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to
-// PSHUFW imm.
-def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShuffleSHUFImmediate(N));
-}]>;
-
-// Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...>
-def mmx_unpckh : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-// Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...>
-def mmx_unpckl : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-// Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
-def mmx_unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-// Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
-def mmx_unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
-}], MMX_SHUFFLE_get_shuf_imm>;
+def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
+def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
//===----------------------------------------------------------------------===//
// SSE specific DAG Nodes.
@@ -86,6 +43,21 @@ def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
def X86pshufb : SDNode<"X86ISD::PSHUFB",
SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>>;
+def X86pandn : SDNode<"X86ISD::PANDN",
+ SDTypeProfile<1, 2, [SDTCisVT<0, v2i64>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>>;
+def X86psignb : SDNode<"X86ISD::PSIGNB",
+ SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>>;
+def X86psignw : SDNode<"X86ISD::PSIGNW",
+ SDTypeProfile<1, 2, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>>;
+def X86psignd : SDNode<"X86ISD::PSIGND",
+ SDTypeProfile<1, 2, [SDTCisVT<0, v4i32>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>>;
+def X86pblendv : SDNode<"X86ISD::PBLENDVB",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>, SDTCisSameAs<0,3>]>>;
def X86pextrb : SDNode<"X86ISD::PEXTRB",
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
def X86pextrw : SDNode<"X86ISD::PEXTRW",
@@ -102,7 +74,7 @@ def X86insrtps : SDNode<"X86ISD::INSERTPS",
def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
- [SDNPHasChain, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
@@ -134,18 +106,12 @@ def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>, SDTCisInt<3>]>;
-def SDTShuff2OpLdI : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>,
- SDTCisInt<2>]>;
-
def X86PAlign : SDNode<"X86ISD::PALIGN", SDTShuff3OpI>;
def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
-def X86PShufhwLd : SDNode<"X86ISD::PSHUFHW_LD", SDTShuff2OpLdI>;
-def X86PShuflwLd : SDNode<"X86ISD::PSHUFLW_LD", SDTShuff2OpLdI>;
-
def X86Shufpd : SDNode<"X86ISD::SHUFPD", SDTShuff3OpI>;
def X86Shufps : SDNode<"X86ISD::SHUFPS", SDTShuff3OpI>;
@@ -187,9 +153,11 @@ def X86Punpckhqdq : SDNode<"X86ISD::PUNPCKHQDQ", SDTShuff2Op>;
// the top elements. These are used for the SSE 'ss' and 'sd' instruction
// forms.
def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
- [SDNPHasChain, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
+ SDNPWantRoot]>;
def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
- [SDNPHasChain, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
+ SDNPWantRoot]>;
def ssmem : Operand<v4f32> {
let PrintMethod = "printf32mem";
@@ -273,6 +241,7 @@ def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
+def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop node:$ptr))>;
def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
// 256-bit memop pattern fragments
@@ -289,10 +258,7 @@ def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 8;
}]>;
-def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
-def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
-def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
-def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
+def memopmmx : PatFrag<(ops node:$ptr), (x86mmx (memop64 node:$ptr))>;
// MOVNT Support
// Like 'store', but requires the non-temporal bit to be set
@@ -376,6 +342,18 @@ def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
return getI8Imm(X86::getShufflePALIGNRImmediate(N));
}]>;
+// EXTRACT_get_vextractf128_imm xform function: convert extract_subvector index
+// to VEXTRACTF128 imm.
+def EXTRACT_get_vextractf128_imm : SDNodeXForm<extract_subvector, [{
+ return getI8Imm(X86::getExtractVEXTRACTF128Immediate(N));
+}]>;
+
+// INSERT_get_vinsertf128_imm xform function: convert insert_subvector index to
+// VINSERTF128 imm.
+def INSERT_get_vinsertf128_imm : SDNodeXForm<insert_subvector, [{
+ return getI8Imm(X86::getInsertVINSERTF128Immediate(N));
+}]>;
+
def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
@@ -466,3 +444,16 @@ def palign : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_palign_imm>;
+
+def vextractf128_extract : PatFrag<(ops node:$bigvec, node:$index),
+ (extract_subvector node:$bigvec,
+ node:$index), [{
+ return X86::isVEXTRACTF128Index(N);
+}], EXTRACT_get_vextractf128_imm>;
+
+def vinsertf128_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
+ node:$index),
+ (insert_subvector node:$bigvec, node:$smallvec,
+ node:$index), [{
+ return X86::isVINSERTF128Index(N);
+}], INSERT_get_vinsertf128_imm>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
index 5280940..ceb1b65 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -34,7 +34,6 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/MC/MCAsmInfo.h"
-
#include <limits>
using namespace llvm;
@@ -55,7 +54,11 @@ ReMatPICStubLoad("remat-pic-stub-load",
X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
: TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)),
TM(tm), RI(tm, *this) {
- SmallVector<unsigned,16> AmbEntries;
+ enum {
+ TB_NOT_REVERSABLE = 1U << 31,
+ TB_FLAGS = TB_NOT_REVERSABLE
+ };
+
static const unsigned OpTbl2Addr[][2] = {
{ X86::ADC32ri, X86::ADC32mi },
{ X86::ADC32ri8, X86::ADC32mi8 },
@@ -65,13 +68,22 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::ADC64rr, X86::ADC64mr },
{ X86::ADD16ri, X86::ADD16mi },
{ X86::ADD16ri8, X86::ADD16mi8 },
+ { X86::ADD16ri_DB, X86::ADD16mi | TB_NOT_REVERSABLE },
+ { X86::ADD16ri8_DB, X86::ADD16mi8 | TB_NOT_REVERSABLE },
{ X86::ADD16rr, X86::ADD16mr },
+ { X86::ADD16rr_DB, X86::ADD16mr | TB_NOT_REVERSABLE },
{ X86::ADD32ri, X86::ADD32mi },
{ X86::ADD32ri8, X86::ADD32mi8 },
+ { X86::ADD32ri_DB, X86::ADD32mi | TB_NOT_REVERSABLE },
+ { X86::ADD32ri8_DB, X86::ADD32mi8 | TB_NOT_REVERSABLE },
{ X86::ADD32rr, X86::ADD32mr },
+ { X86::ADD32rr_DB, X86::ADD32mr | TB_NOT_REVERSABLE },
{ X86::ADD64ri32, X86::ADD64mi32 },
{ X86::ADD64ri8, X86::ADD64mi8 },
+ { X86::ADD64ri32_DB,X86::ADD64mi32 | TB_NOT_REVERSABLE },
+ { X86::ADD64ri8_DB, X86::ADD64mi8 | TB_NOT_REVERSABLE },
{ X86::ADD64rr, X86::ADD64mr },
+ { X86::ADD64rr_DB, X86::ADD64mr | TB_NOT_REVERSABLE },
{ X86::ADD8ri, X86::ADD8mi },
{ X86::ADD8rr, X86::ADD8mr },
{ X86::AND16ri, X86::AND16mi },
@@ -216,16 +228,21 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
unsigned RegOp = OpTbl2Addr[i][0];
- unsigned MemOp = OpTbl2Addr[i][1];
- if (!RegOp2MemOpTable2Addr.insert(std::make_pair((unsigned*)RegOp,
- std::make_pair(MemOp,0))).second)
- assert(false && "Duplicated entries?");
+ unsigned MemOp = OpTbl2Addr[i][1] & ~TB_FLAGS;
+ assert(!RegOp2MemOpTable2Addr.count(RegOp) && "Duplicated entries?");
+ RegOp2MemOpTable2Addr[RegOp] = std::make_pair(MemOp, 0U);
+
+ // If this is not a reversable operation (because there is a many->one)
+ // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
+ if (OpTbl2Addr[i][1] & TB_NOT_REVERSABLE)
+ continue;
+
// Index 0, folded load and store, no alignment requirement.
unsigned AuxInfo = 0 | (1 << 4) | (1 << 5);
- if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
- std::make_pair(RegOp,
- AuxInfo))).second)
- AmbEntries.push_back(MemOp);
+
+ assert(!MemOp2RegOpTable.count(MemOp) &&
+ "Duplicated entries in unfolding maps?");
+ MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
}
// If the third value is 1, then it's folding either a load or a store.
@@ -252,8 +269,8 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::DIV64r, X86::DIV64m, 1, 0 },
{ X86::DIV8r, X86::DIV8m, 1, 0 },
{ X86::EXTRACTPSrr, X86::EXTRACTPSmr, 0, 16 },
- { X86::FsMOVAPDrr, X86::MOVSDmr, 0, 0 },
- { X86::FsMOVAPSrr, X86::MOVSSmr, 0, 0 },
+ { X86::FsMOVAPDrr, X86::MOVSDmr | TB_NOT_REVERSABLE , 0, 0 },
+ { X86::FsMOVAPSrr, X86::MOVSSmr | TB_NOT_REVERSABLE , 0, 0 },
{ X86::IDIV16r, X86::IDIV16m, 1, 0 },
{ X86::IDIV32r, X86::IDIV32m, 1, 0 },
{ X86::IDIV64r, X86::IDIV64m, 1, 0 },
@@ -268,7 +285,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::MOV16rr, X86::MOV16mr, 0, 0 },
{ X86::MOV32ri, X86::MOV32mi, 0, 0 },
{ X86::MOV32rr, X86::MOV32mr, 0, 0 },
- { X86::MOV32rr_TC, X86::MOV32mr_TC, 0, 0 },
{ X86::MOV64ri32, X86::MOV64mi32, 0, 0 },
{ X86::MOV64rr, X86::MOV64mr, 0, 0 },
{ X86::MOV8ri, X86::MOV8mi, 0, 0 },
@@ -312,19 +328,22 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
};
for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
- unsigned RegOp = OpTbl0[i][0];
- unsigned MemOp = OpTbl0[i][1];
- unsigned Align = OpTbl0[i][3];
- if (!RegOp2MemOpTable0.insert(std::make_pair((unsigned*)RegOp,
- std::make_pair(MemOp,Align))).second)
- assert(false && "Duplicated entries?");
+ unsigned RegOp = OpTbl0[i][0];
+ unsigned MemOp = OpTbl0[i][1] & ~TB_FLAGS;
unsigned FoldedLoad = OpTbl0[i][2];
+ unsigned Align = OpTbl0[i][3];
+ assert(!RegOp2MemOpTable0.count(RegOp) && "Duplicated entries?");
+ RegOp2MemOpTable0[RegOp] = std::make_pair(MemOp, Align);
+
+ // If this is not a reversable operation (because there is a many->one)
+ // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
+ if (OpTbl0[i][1] & TB_NOT_REVERSABLE)
+ continue;
+
// Index 0, folded load or store.
unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5);
- if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
- if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
- std::make_pair(RegOp, AuxInfo))).second)
- AmbEntries.push_back(MemOp);
+ assert(!MemOp2RegOpTable.count(MemOp) && "Duplicated entries?");
+ MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
}
static const unsigned OpTbl1[][3] = {
@@ -342,8 +361,8 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::CVTTSD2SIrr, X86::CVTTSD2SIrm, 0 },
{ X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm, 0 },
{ X86::CVTTSS2SIrr, X86::CVTTSS2SIrm, 0 },
- { X86::FsMOVAPDrr, X86::MOVSDrm, 0 },
- { X86::FsMOVAPSrr, X86::MOVSSrm, 0 },
+ { X86::FsMOVAPDrr, X86::MOVSDrm | TB_NOT_REVERSABLE , 0 },
+ { X86::FsMOVAPSrr, X86::MOVSSrm | TB_NOT_REVERSABLE , 0 },
{ X86::IMUL16rri, X86::IMUL16rmi, 0 },
{ X86::IMUL16rri8, X86::IMUL16rmi8, 0 },
{ X86::IMUL32rri, X86::IMUL32rmi, 0 },
@@ -360,8 +379,8 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm, 16 },
{ X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm, 16 },
{ X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm, 0 },
- { X86::Int_CVTSD2SI64rr,X86::Int_CVTSD2SI64rm, 0 },
- { X86::Int_CVTSD2SIrr, X86::Int_CVTSD2SIrm, 0 },
+ { X86::CVTSD2SI64rr, X86::CVTSD2SI64rm, 0 },
+ { X86::CVTSD2SIrr, X86::CVTSD2SIrm, 0 },
{ X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm, 0 },
{ X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm, 0 },
{ X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm, 0 },
@@ -370,8 +389,8 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm, 0 },
{ X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm, 0 },
{ X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm, 0 },
- { X86::Int_CVTTPD2DQrr, X86::Int_CVTTPD2DQrm, 16 },
- { X86::Int_CVTTPS2DQrr, X86::Int_CVTTPS2DQrm, 16 },
+ { X86::CVTTPD2DQrr, X86::CVTTPD2DQrm, 16 },
+ { X86::CVTTPS2DQrr, X86::CVTTPS2DQrm, 16 },
{ X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm, 0 },
{ X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm, 0 },
{ X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm, 0 },
@@ -380,7 +399,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::Int_UCOMISSrr, X86::Int_UCOMISSrm, 0 },
{ X86::MOV16rr, X86::MOV16rm, 0 },
{ X86::MOV32rr, X86::MOV32rm, 0 },
- { X86::MOV32rr_TC, X86::MOV32rm_TC, 0 },
{ X86::MOV64rr, X86::MOV64rm, 0 },
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 },
{ X86::MOV64toSDrr, X86::MOV64toSDrm, 0 },
@@ -439,25 +457,31 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
unsigned RegOp = OpTbl1[i][0];
- unsigned MemOp = OpTbl1[i][1];
+ unsigned MemOp = OpTbl1[i][1] & ~TB_FLAGS;
unsigned Align = OpTbl1[i][2];
- if (!RegOp2MemOpTable1.insert(std::make_pair((unsigned*)RegOp,
- std::make_pair(MemOp,Align))).second)
- assert(false && "Duplicated entries?");
+ assert(!RegOp2MemOpTable1.count(RegOp) && "Duplicate entries");
+ RegOp2MemOpTable1[RegOp] = std::make_pair(MemOp, Align);
+
+ // If this is not a reversable operation (because there is a many->one)
+ // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
+ if (OpTbl1[i][1] & TB_NOT_REVERSABLE)
+ continue;
+
// Index 1, folded load
unsigned AuxInfo = 1 | (1 << 4);
- if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
- if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
- std::make_pair(RegOp, AuxInfo))).second)
- AmbEntries.push_back(MemOp);
+ assert(!MemOp2RegOpTable.count(MemOp) && "Duplicate entries");
+ MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
}
static const unsigned OpTbl2[][3] = {
{ X86::ADC32rr, X86::ADC32rm, 0 },
{ X86::ADC64rr, X86::ADC64rm, 0 },
{ X86::ADD16rr, X86::ADD16rm, 0 },
+ { X86::ADD16rr_DB, X86::ADD16rm | TB_NOT_REVERSABLE, 0 },
{ X86::ADD32rr, X86::ADD32rm, 0 },
+ { X86::ADD32rr_DB, X86::ADD32rm | TB_NOT_REVERSABLE, 0 },
{ X86::ADD64rr, X86::ADD64rm, 0 },
+ { X86::ADD64rr_DB, X86::ADD64rm | TB_NOT_REVERSABLE, 0 },
{ X86::ADD8rr, X86::ADD8rm, 0 },
{ X86::ADDPDrr, X86::ADDPDrm, 16 },
{ X86::ADDPSrr, X86::ADDPSrm, 16 },
@@ -652,20 +676,23 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
unsigned RegOp = OpTbl2[i][0];
- unsigned MemOp = OpTbl2[i][1];
+ unsigned MemOp = OpTbl2[i][1] & ~TB_FLAGS;
unsigned Align = OpTbl2[i][2];
- if (!RegOp2MemOpTable2.insert(std::make_pair((unsigned*)RegOp,
- std::make_pair(MemOp,Align))).second)
- assert(false && "Duplicated entries?");
+
+ assert(!RegOp2MemOpTable2.count(RegOp) && "Duplicate entry!");
+ RegOp2MemOpTable2[RegOp] = std::make_pair(MemOp, Align);
+
+ // If this is not a reversable operation (because there is a many->one)
+ // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
+ if (OpTbl2[i][1] & TB_NOT_REVERSABLE)
+ continue;
+
// Index 2, folded load
unsigned AuxInfo = 2 | (1 << 4);
- if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
- std::make_pair(RegOp, AuxInfo))).second)
- AmbEntries.push_back(MemOp);
+ assert(!MemOp2RegOpTable.count(MemOp) &&
+ "Duplicated entries in unfolding maps?");
+ MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
}
-
- // Remove ambiguous entries.
- assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?");
}
bool
@@ -745,9 +772,7 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
- case X86::MOV32rm_TC:
case X86::MOV64rm:
- case X86::MOV64rm_TC:
case X86::LD_Fp64m:
case X86::MOVSSrm:
case X86::MOVSDrm:
@@ -768,9 +793,7 @@ static bool isFrameStoreOpcode(int Opcode) {
case X86::MOV8mr:
case X86::MOV16mr:
case X86::MOV32mr:
- case X86::MOV32mr_TC:
case X86::MOV64mr:
- case X86::MOV64mr_TC:
case X86::ST_FpP64m:
case X86::MOVSSmr:
case X86::MOVSDmr:
@@ -785,7 +808,7 @@ static bool isFrameStoreOpcode(int Opcode) {
return false;
}
-unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
if (isFrameLoadOpcode(MI->getOpcode()))
if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
@@ -793,7 +816,7 @@ unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
return 0;
}
-unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
+unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const {
if (isFrameLoadOpcode(MI->getOpcode())) {
unsigned Reg;
@@ -923,10 +946,10 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
isPICBase = true;
}
return isPICBase;
- }
+ }
return false;
}
-
+
case X86::LEA32r:
case X86::LEA64r: {
if (MI->getOperand(2).isImm() &&
@@ -1099,11 +1122,11 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
? X86::LEA64_32r : X86::LEA32r;
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
- unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
+ unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
-
+
// Build and insert into an implicit UNDEF value. This is OK because
- // well be shifting and then extracting the lower 16-bits.
+ // well be shifting and then extracting the lower 16-bits.
// This has the potential to cause partial register stall. e.g.
// movw (%rbp,%rcx,2), %dx
// leal -65(%rdx), %esi
@@ -1137,9 +1160,12 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
break;
case X86::ADD16ri:
case X86::ADD16ri8:
- addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
+ case X86::ADD16ri_DB:
+ case X86::ADD16ri8_DB:
+ addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
break;
- case X86::ADD16rr: {
+ case X86::ADD16rr:
+ case X86::ADD16rr_DB: {
unsigned Src2 = MI->getOperand(2).getReg();
bool isKill2 = MI->getOperand(2).isKill();
unsigned leaInReg2 = 0;
@@ -1149,9 +1175,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
// just a single insert_subreg.
addRegReg(MIB, leaInReg, true, leaInReg, false);
} else {
- leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32RegClass);
+ leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
// Build and insert into an implicit UNDEF value. This is OK because
- // well be shifting and then extracting the lower 16-bits.
+ // well be shifting and then extracting the lower 16-bits.
BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
InsMI2 =
BuildMI(*MFI, MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
@@ -1218,7 +1244,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::SHUFPSrri: {
assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
-
+
unsigned B = MI->getOperand(1).getReg();
unsigned C = MI->getOperand(2).getReg();
if (B != C) return 0;
@@ -1236,6 +1262,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 0 || ShAmt >= 4) return 0;
+ // LEA can't handle RSP.
+ if (TargetRegisterInfo::isVirtualRegister(Src) &&
+ !MF.getRegInfo().constrainRegClass(Src, &X86::GR64_NOSPRegClass))
+ return 0;
+
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
.addReg(0).addImm(1 << ShAmt)
@@ -1250,6 +1281,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 0 || ShAmt >= 4) return 0;
+ // LEA can't handle ESP.
+ if (TargetRegisterInfo::isVirtualRegister(Src) &&
+ !MF.getRegInfo().constrainRegClass(Src, &X86::GR32_NOSPRegClass))
+ return 0;
+
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
@@ -1288,6 +1324,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
+
+ // LEA can't handle RSP.
+ if (TargetRegisterInfo::isVirtualRegister(Src) &&
+ !MF.getRegInfo().constrainRegClass(Src,
+ MIOpc == X86::INC64r ? X86::GR64_NOSPRegisterClass :
+ X86::GR32_NOSPRegisterClass))
+ return 0;
+
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
@@ -1310,6 +1354,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
+ // LEA can't handle RSP.
+ if (TargetRegisterInfo::isVirtualRegister(Src) &&
+ !MF.getRegInfo().constrainRegClass(Src,
+ MIOpc == X86::DEC64r ? X86::GR64_NOSPRegisterClass :
+ X86::GR32_NOSPRegisterClass))
+ return 0;
+
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
@@ -1327,12 +1378,29 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
Src, isKill, -1);
break;
case X86::ADD64rr:
- case X86::ADD32rr: {
+ case X86::ADD64rr_DB:
+ case X86::ADD32rr:
+ case X86::ADD32rr_DB: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r
- : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
+ unsigned Opc;
+ TargetRegisterClass *RC;
+ if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) {
+ Opc = X86::LEA64r;
+ RC = X86::GR64_NOSPRegisterClass;
+ } else {
+ Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
+ RC = X86::GR32_NOSPRegisterClass;
+ }
+
+
unsigned Src2 = MI->getOperand(2).getReg();
bool isKill2 = MI->getOperand(2).isKill();
+
+ // LEA can't handle RSP.
+ if (TargetRegisterInfo::isVirtualRegister(Src2) &&
+ !MF.getRegInfo().constrainRegClass(Src2, RC))
+ return 0;
+
NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
@@ -1341,7 +1409,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
LV->replaceKillInstruction(Src2, MI, NewMI);
break;
}
- case X86::ADD16rr: {
+ case X86::ADD16rr:
+ case X86::ADD16rr_DB: {
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
@@ -1357,6 +1426,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
case X86::ADD64ri32:
case X86::ADD64ri8:
+ case X86::ADD64ri32_DB:
+ case X86::ADD64ri8_DB:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
.addReg(Dest, RegState::Define |
@@ -1364,7 +1435,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
Src, isKill, MI->getOperand(2).getImm());
break;
case X86::ADD32ri:
- case X86::ADD32ri8: {
+ case X86::ADD32ri8:
+ case X86::ADD32ri_DB:
+ case X86::ADD32ri8_DB: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
@@ -1375,6 +1448,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
case X86::ADD16ri:
case X86::ADD16ri8:
+ case X86::ADD16ri_DB:
+ case X86::ADD16ri8_DB:
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
@@ -1396,7 +1471,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
LV->replaceKillInstruction(Dest, MI, NewMI);
}
- MFI->insert(MBBI, NewMI); // Insert the new inst
+ MFI->insert(MBBI, NewMI); // Insert the new inst
return NewMI;
}
@@ -1617,7 +1692,7 @@ X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isTerminator()) return false;
-
+
// Conditional branch is a special case.
if (TID.isBranch() && !TID.isBarrier())
return true;
@@ -1626,7 +1701,7 @@ bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
return !isPredicated(MI);
}
-bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -1787,7 +1862,7 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
I = MBB.end();
++Count;
}
-
+
return Count;
}
@@ -1945,13 +2020,23 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
default:
llvm_unreachable("Unknown regclass");
case X86::GR64RegClassID:
+ case X86::GR64_ABCDRegClassID:
+ case X86::GR64_NOREXRegClassID:
+ case X86::GR64_NOREX_NOSPRegClassID:
case X86::GR64_NOSPRegClassID:
+ case X86::GR64_TCRegClassID:
+ case X86::GR64_TCW64RegClassID:
return load ? X86::MOV64rm : X86::MOV64mr;
case X86::GR32RegClassID:
- case X86::GR32_NOSPRegClassID:
+ case X86::GR32_ABCDRegClassID:
case X86::GR32_ADRegClassID:
+ case X86::GR32_NOREXRegClassID:
+ case X86::GR32_NOSPRegClassID:
+ case X86::GR32_TCRegClassID:
return load ? X86::MOV32rm : X86::MOV32mr;
case X86::GR16RegClassID:
+ case X86::GR16_ABCDRegClassID:
+ case X86::GR16_NOREXRegClassID:
return load ? X86::MOV16rm : X86::MOV16mr;
case X86::GR8RegClassID:
// Copying to or from a physical H register on x86-64 requires a NOREX
@@ -1961,32 +2046,14 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
return load ? X86::MOV8rm : X86::MOV8mr;
- case X86::GR64_ABCDRegClassID:
- return load ? X86::MOV64rm : X86::MOV64mr;
- case X86::GR32_ABCDRegClassID:
- return load ? X86::MOV32rm : X86::MOV32mr;
- case X86::GR16_ABCDRegClassID:
- return load ? X86::MOV16rm : X86::MOV16mr;
case X86::GR8_ABCD_LRegClassID:
+ case X86::GR8_NOREXRegClassID:
return load ? X86::MOV8rm :X86::MOV8mr;
case X86::GR8_ABCD_HRegClassID:
if (TM.getSubtarget<X86Subtarget>().is64Bit())
return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
return load ? X86::MOV8rm : X86::MOV8mr;
- case X86::GR64_NOREXRegClassID:
- case X86::GR64_NOREX_NOSPRegClassID:
- return load ? X86::MOV64rm : X86::MOV64mr;
- case X86::GR32_NOREXRegClassID:
- return load ? X86::MOV32rm : X86::MOV32mr;
- case X86::GR16_NOREXRegClassID:
- return load ? X86::MOV16rm : X86::MOV16mr;
- case X86::GR8_NOREXRegClassID:
- return load ? X86::MOV8rm : X86::MOV8mr;
- case X86::GR64_TCRegClassID:
- return load ? X86::MOV64rm_TC : X86::MOV64mr_TC;
- case X86::GR32_TCRegClassID:
- return load ? X86::MOV32rm_TC : X86::MOV32mr_TC;
case X86::RFP80RegClassID:
return load ? X86::LD_Fp80m : X86::ST_FpP80m;
case X86::RFP64RegClassID:
@@ -2085,76 +2152,6 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
NewMIs.push_back(MIB);
}
-bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL = MBB.findDebugLoc(MI);
-
- bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
- bool isWin64 = TM.getSubtarget<X86Subtarget>().isTargetWin64();
- unsigned SlotSize = is64Bit ? 8 : 4;
-
- MachineFunction &MF = *MBB.getParent();
- unsigned FPReg = RI.getFrameRegister(MF);
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- unsigned CalleeFrameSize = 0;
-
- unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r;
- for (unsigned i = CSI.size(); i != 0; --i) {
- unsigned Reg = CSI[i-1].getReg();
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(Reg);
- if (Reg == FPReg)
- // X86RegisterInfo::emitPrologue will handle spilling of frame register.
- continue;
- if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
- CalleeFrameSize += SlotSize;
- BuildMI(MBB, MI, DL, get(Opc)).addReg(Reg, RegState::Kill);
- } else {
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
- RC, &RI);
- }
- }
-
- X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
- return true;
-}
-
-bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL = MBB.findDebugLoc(MI);
-
- MachineFunction &MF = *MBB.getParent();
- unsigned FPReg = RI.getFrameRegister(MF);
- bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
- bool isWin64 = TM.getSubtarget<X86Subtarget>().isTargetWin64();
- unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r;
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (Reg == FPReg)
- // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
- continue;
- if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
- BuildMI(MBB, MI, DL, get(Opc), Reg);
- } else {
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
- RC, &RI);
- }
- }
- return true;
-}
-
MachineInstr*
X86InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
int FrameIx, uint64_t Offset,
@@ -2181,7 +2178,7 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
addOffset(MIB, 0);
-
+
// Loop over the rest of the ri operands, converting them over.
unsigned NumOps = MI->getDesc().getNumOperands()-2;
for (unsigned i = 0; i != NumOps; ++i) {
@@ -2202,7 +2199,7 @@ static MachineInstr *FuseInst(MachineFunction &MF,
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
MI->getDebugLoc(), true);
MachineInstrBuilder MIB(NewMI);
-
+
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (i == OpNo) {
@@ -2238,7 +2235,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI, unsigned i,
const SmallVectorImpl<MachineOperand> &MOs,
unsigned Size, unsigned Align) const {
- const DenseMap<unsigned*, std::pair<unsigned,unsigned> > *OpcodeTablePtr=NULL;
+ const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0;
bool isTwoAddrFold = false;
unsigned NumOps = MI->getDesc().getNumOperands();
bool isTwoAddr = NumOps > 1 &&
@@ -2251,7 +2248,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
if (isTwoAddr && NumOps >= 2 && i < 2 &&
MI->getOperand(0).isReg() &&
MI->getOperand(1).isReg() &&
- MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
+ MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
} else if (i == 0) { // If operand 0
@@ -2265,19 +2262,19 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
if (NewMI)
return NewMI;
-
+
OpcodeTablePtr = &RegOp2MemOpTable0;
} else if (i == 1) {
OpcodeTablePtr = &RegOp2MemOpTable1;
} else if (i == 2) {
OpcodeTablePtr = &RegOp2MemOpTable2;
}
-
+
// If table selected...
if (OpcodeTablePtr) {
// Find the Opcode to fuse
- DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
- OpcodeTablePtr->find((unsigned*)MI->getOpcode());
+ DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
+ OpcodeTablePtr->find(MI->getOpcode());
if (I != OpcodeTablePtr->end()) {
unsigned Opcode = I->second.first;
unsigned MinAlign = I->second.second;
@@ -2320,8 +2317,8 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return NewMI;
}
}
-
- // No fusion
+
+ // No fusion
if (PrintFailedFusing && !MI->isCopy())
dbgs() << "We failed to fuse operand " << i << " in " << *MI;
return NULL;
@@ -2332,7 +2329,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
- // Check switch flag
+ // Check switch flag
if (NoFusing) return NULL;
if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
@@ -2343,8 +2340,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
case X86::Int_CVTSS2SDrr:
case X86::RCPSSr:
case X86::RCPSSr_Int:
- case X86::ROUNDSDr_Int:
- case X86::ROUNDSSr_Int:
+ case X86::ROUNDSDr:
+ case X86::ROUNDSSr:
case X86::RSQRTSSr:
case X86::RSQRTSSr_Int:
case X86::SQRTSSr:
@@ -2384,7 +2381,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const {
- // Check switch flag
+ // Check switch flag
if (NoFusing) return NULL;
if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
@@ -2395,8 +2392,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
case X86::Int_CVTSS2SDrr:
case X86::RCPSSr:
case X86::RCPSSr_Int:
- case X86::ROUNDSDr_Int:
- case X86::ROUNDSSr_Int:
+ case X86::ROUNDSDr:
+ case X86::ROUNDSSr:
case X86::RSQRTSSr:
case X86::RSQRTSSr_Int:
case X86::SQRTSSr:
@@ -2424,9 +2421,11 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
Alignment = 16;
break;
case X86::FsFLD0SD:
+ case X86::VFsFLD0SD:
Alignment = 8;
break;
case X86::FsFLD0SS:
+ case X86::VFsFLD0SS:
Alignment = 4;
break;
default:
@@ -2490,9 +2489,9 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineConstantPool &MCP = *MF.getConstantPool();
const Type *Ty;
unsigned Opc = LoadMI->getOpcode();
- if (Opc == X86::FsFLD0SS)
+ if (Opc == X86::FsFLD0SS || Opc == X86::VFsFLD0SS)
Ty = Type::getFloatTy(MF.getFunction()->getContext());
- else if (Opc == X86::FsFLD0SD)
+ else if (Opc == X86::FsFLD0SD || Opc == X86::VFsFLD0SD)
Ty = Type::getDoubleTy(MF.getFunction()->getContext());
else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY)
Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8);
@@ -2525,13 +2524,13 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const {
- // Check switch flag
+ // Check switch flag
if (NoFusing) return 0;
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
switch (MI->getOpcode()) {
default: return false;
- case X86::TEST8rr:
+ case X86::TEST8rr:
case X86::TEST16rr:
case X86::TEST32rr:
case X86::TEST64rr:
@@ -2551,16 +2550,15 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
- const DenseMap<unsigned*, std::pair<unsigned,unsigned> > *OpcodeTablePtr=NULL;
- if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
+ const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0;
+ if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
} else if (OpNum == 0) { // If operand 0
switch (Opc) {
case X86::MOV8r0:
case X86::MOV16r0:
case X86::MOV32r0:
- case X86::MOV64r0:
- return true;
+ case X86::MOV64r0: return true;
default: break;
}
OpcodeTablePtr = &RegOp2MemOpTable0;
@@ -2569,22 +2567,17 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
} else if (OpNum == 2) {
OpcodeTablePtr = &RegOp2MemOpTable2;
}
-
- if (OpcodeTablePtr) {
- // Find the Opcode to fuse
- DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
- OpcodeTablePtr->find((unsigned*)Opc);
- if (I != OpcodeTablePtr->end())
- return true;
- }
+
+ if (OpcodeTablePtr && OpcodeTablePtr->count(Opc))
+ return true;
return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
}
bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
- MemOp2RegOpTable.find((unsigned*)MI->getOpcode());
+ DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
+ MemOp2RegOpTable.find(MI->getOpcode());
if (I == MemOp2RegOpTable.end())
return false;
unsigned Opc = I->second.first;
@@ -2644,7 +2637,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
// Emit the data processing instruction.
MachineInstr *DataMI = MF.CreateMachineInstr(TID, MI->getDebugLoc(), true);
MachineInstrBuilder MIB(DataMI);
-
+
if (FoldedStore)
MIB.addReg(Reg, RegState::Define);
for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i)
@@ -2712,8 +2705,8 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
if (!N->isMachineOpcode())
return false;
- DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
- MemOp2RegOpTable.find((unsigned*)N->getMachineOpcode());
+ DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
+ MemOp2RegOpTable.find(N->getMachineOpcode());
if (I == MemOp2RegOpTable.end())
return false;
unsigned Opc = I->second.first;
@@ -2813,8 +2806,8 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
unsigned *LoadRegIndex) const {
- DenseMap<unsigned*, std::pair<unsigned,unsigned> >::const_iterator I =
- MemOp2RegOpTable.find((unsigned*)Opc);
+ DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
+ MemOp2RegOpTable.find(Opc);
if (I == MemOp2RegOpTable.end())
return 0;
bool FoldedLoad = I->second.second & (1 << 4);
@@ -2993,6 +2986,8 @@ bool X86InstrInfo::isX86_64ExtendedReg(unsigned RegNo) {
case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
+ case X86::CR8: case X86::CR9: case X86::CR10: case X86::CR11:
+ case X86::CR12: case X86::CR13: case X86::CR14: case X86::CR15:
return true;
}
return false;
@@ -3090,6 +3085,41 @@ void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
NopInst.setOpcode(X86::NOOP);
}
+bool X86InstrInfo::
+hasHighOperandLatency(const InstrItineraryData *ItinData,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const {
+ switch (DefMI->getOpcode()) {
+ default: return false;
+ case X86::DIVSDrm:
+ case X86::DIVSDrm_Int:
+ case X86::DIVSDrr:
+ case X86::DIVSDrr_Int:
+ case X86::DIVSSrm:
+ case X86::DIVSSrm_Int:
+ case X86::DIVSSrr:
+ case X86::DIVSSrr_Int:
+ case X86::SQRTPDm:
+ case X86::SQRTPDm_Int:
+ case X86::SQRTPDr:
+ case X86::SQRTPDr_Int:
+ case X86::SQRTPSm:
+ case X86::SQRTPSm_Int:
+ case X86::SQRTPSr:
+ case X86::SQRTPSr_Int:
+ case X86::SQRTSDm:
+ case X86::SQRTSDm_Int:
+ case X86::SQRTSDr:
+ case X86::SQRTSDr_Int:
+ case X86::SQRTSSm:
+ case X86::SQRTSSm_Int:
+ case X86::SQRTSSr:
+ case X86::SQRTSSr_Int:
+ return true;
+ }
+}
+
namespace {
/// CGBR - Create Global Base Reg pass. This initializes the PIC
/// global base register for x86-32.
@@ -3108,6 +3138,13 @@ namespace {
if (TM->getRelocationModel() != Reloc::PIC_)
return false;
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
+
+ // If we didn't need a GlobalBaseReg, don't insert code.
+ if (GlobalBaseReg == 0)
+ return false;
+
// Insert the set of GlobalBaseReg into the first MBB of the function
MachineBasicBlock &FirstMBB = MF.front();
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
@@ -3119,16 +3156,15 @@ namespace {
if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT())
PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
else
- PC = TII->getGlobalBaseReg(&MF);
-
+ PC = GlobalBaseReg;
+
// Operand of MovePCtoStack is completely ignored by asm printer. It's
// only used in JIT code emission as displacement to pc.
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
-
+
// If we're using vanilla 'GOT' PIC style, we should use relative addressing
// not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) {
- unsigned GlobalBaseReg = TII->getGlobalBaseReg(&MF);
// Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
.addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.h b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
index f336206..1d44207 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
@@ -174,7 +174,7 @@ namespace X86II {
/// MO_DARWIN_STUB - On a symbol operand "FOO", this indicates that the
/// reference is actually to the "FOO$stub" symbol. This is used for calls
- /// and jumps to external functions on Tiger and before.
+ /// and jumps to external functions on Tiger and earlier.
MO_DARWIN_STUB,
/// MO_DARWIN_NONLAZY - On a symbol operand "FOO", this indicates that the
@@ -311,12 +311,17 @@ namespace X86II {
MRM_F0 = 40,
MRM_F8 = 41,
MRM_F9 = 42,
+
+ /// RawFrmImm8 - This is used for the ENTER instruction, which has two
+ /// immediates, the first of which is a 16-bit immediate (specified by
+ /// the imm encoding) and the second is a 8-bit fixed value.
+ RawFrmImm8 = 43,
/// RawFrmImm16 - This is used for CALL FAR instructions, which have two
/// immediates, the first of which is a 16 or 32-bit immediate (specified by
/// the imm encoding) and the second is a 16-bit fixed value. In the AMD
/// manual, this operand is described as pntr16:32 and pntr16:16
- RawFrmImm16 = 43,
+ RawFrmImm16 = 44,
FormMask = 63,
@@ -444,28 +449,36 @@ namespace X86II {
OpcodeMask = 0xFF << OpcodeShift,
//===------------------------------------------------------------------===//
- // VEX - The opcode prefix used by AVX instructions
+ /// VEX - The opcode prefix used by AVX instructions
VEX = 1U << 0,
- // VEX_W - Has a opcode specific functionality, but is used in the same
- // way as REX_W is for regular SSE instructions.
+ /// VEX_W - Has a opcode specific functionality, but is used in the same
+ /// way as REX_W is for regular SSE instructions.
VEX_W = 1U << 1,
- // VEX_4V - Used to specify an additional AVX/SSE register. Several 2
- // address instructions in SSE are represented as 3 address ones in AVX
- // and the additional register is encoded in VEX_VVVV prefix.
+ /// VEX_4V - Used to specify an additional AVX/SSE register. Several 2
+ /// address instructions in SSE are represented as 3 address ones in AVX
+ /// and the additional register is encoded in VEX_VVVV prefix.
VEX_4V = 1U << 2,
- // VEX_I8IMM - Specifies that the last register used in a AVX instruction,
- // must be encoded in the i8 immediate field. This usually happens in
- // instructions with 4 operands.
+ /// VEX_I8IMM - Specifies that the last register used in a AVX instruction,
+ /// must be encoded in the i8 immediate field. This usually happens in
+ /// instructions with 4 operands.
VEX_I8IMM = 1U << 3,
- // VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
- // instruction uses 256-bit wide registers. This is usually auto detected if
- // a VR256 register is used, but some AVX instructions also have this field
- // marked when using a f256 memory references.
- VEX_L = 1U << 4
+ /// VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
+ /// instruction uses 256-bit wide registers. This is usually auto detected
+ /// if a VR256 register is used, but some AVX instructions also have this
+ /// field marked when using a f256 memory references.
+ VEX_L = 1U << 4,
+
+ /// Has3DNow0F0FOpcode - This flag indicates that the instruction uses the
+ /// wacky 0x0F 0x0F prefix for 3DNow! instructions. The manual documents
+ /// this as having a 0x0F prefix with a 0x0F opcode, and each instruction
+ /// storing a classifier in the imm8 field. To simplify our implementation,
+ /// we handle this by storeing the classifier in the opcode field and using
+ /// this flag to indicate that the encoder should do the wacky 3DNow! thing.
+ Has3DNow0F0FOpcode = 1U << 5
};
// getBaseOpcodeFor - This function returns the "base" X86 opcode for the
@@ -528,6 +541,7 @@ namespace X86II {
case X86II::AddRegFrm:
case X86II::MRMDestReg:
case X86II::MRMSrcReg:
+ case X86II::RawFrmImm8:
case X86II::RawFrmImm16:
return -1;
case X86II::MRMDestMem:
@@ -599,14 +613,14 @@ class X86InstrInfo : public TargetInstrInfoImpl {
/// RegOp2MemOpTable2Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
/// RegOp2MemOpTable2 - Load / store folding opcode maps.
///
- DenseMap<unsigned*, std::pair<unsigned,unsigned> > RegOp2MemOpTable2Addr;
- DenseMap<unsigned*, std::pair<unsigned,unsigned> > RegOp2MemOpTable0;
- DenseMap<unsigned*, std::pair<unsigned,unsigned> > RegOp2MemOpTable1;
- DenseMap<unsigned*, std::pair<unsigned,unsigned> > RegOp2MemOpTable2;
+ DenseMap<unsigned, std::pair<unsigned,unsigned> > RegOp2MemOpTable2Addr;
+ DenseMap<unsigned, std::pair<unsigned,unsigned> > RegOp2MemOpTable0;
+ DenseMap<unsigned, std::pair<unsigned,unsigned> > RegOp2MemOpTable1;
+ DenseMap<unsigned, std::pair<unsigned,unsigned> > RegOp2MemOpTable2;
/// MemOp2RegOpTable - Load / store unfolding opcode map.
///
- DenseMap<unsigned*, std::pair<unsigned, unsigned> > MemOp2RegOpTable;
+ DenseMap<unsigned, std::pair<unsigned, unsigned> > MemOp2RegOpTable;
public:
explicit X86InstrInfo(X86TargetMachine &tm);
@@ -728,17 +742,6 @@ public:
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
-
- virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
-
- virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
-
virtual
MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
int FrameIx, uint64_t Offset,
@@ -845,18 +848,23 @@ public:
/// SetSSEDomain - Set the SSEDomain of MI.
void SetSSEDomain(MachineInstr *MI, unsigned Domain) const;
+ MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr* MI,
+ unsigned OpNum,
+ const SmallVectorImpl<MachineOperand> &MOs,
+ unsigned Size, unsigned Alignment) const;
+
+ bool hasHighOperandLatency(const InstrItineraryData *ItinData,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const;
+
private:
MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const;
- MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- unsigned OpNum,
- const SmallVectorImpl<MachineOperand> &MOs,
- unsigned Size, unsigned Alignment) const;
-
/// isFrameOperand - Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
bool isFrameOperand(const MachineInstr *MI, unsigned int Op,
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.td b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
index 09b7721..87dc4be 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
@@ -1,10 +1,10 @@
-//===----------------------------------------------------------------------===//
-//
+//===- X86InstrInfo.td - Main X86 Instruction Definition ---*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the X86 instruction set, defining the instructions, and
@@ -35,6 +35,20 @@ def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
[SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>,
SDTCisInt<0>, SDTCisVT<1, i32>]>;
+
+// SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
+def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
+ [SDTCisSameAs<0, 2>,
+ SDTCisSameAs<0, 3>,
+ SDTCisInt<0>,
+ SDTCisVT<1, i32>,
+ SDTCisVT<4, i32>]>;
+// RES1, RES2, FLAGS = op LHS, RHS
+def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
+ [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisSameAs<0, 3>,
+ SDTCisInt<0>, SDTCisVT<1, i32>]>;
def SDTX86BrCond : SDTypeProfile<0, 3,
[SDTCisVT<0, OtherVT>,
SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
@@ -46,7 +60,7 @@ def SDTX86SetCC_C : SDTypeProfile<1, 2,
[SDTCisInt<0>,
SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
-def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
+def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
SDTCisVT<2, i8>]>;
def SDTX86cas8 : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
@@ -64,6 +78,12 @@ def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
SDTCisVT<1, iPTR>,
SDTCisVT<2, iPTR>]>;
+def SDT_X86VAARG_64 : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
+ SDTCisPtrTy<1>,
+ SDTCisVT<2, i32>,
+ SDTCisVT<3, i8>,
+ SDTCisVT<4, i32>]>;
+
def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
def SDTX86Void : SDTypeProfile<0, 0, []>;
@@ -72,9 +92,7 @@ def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
-
-def SDT_X86SegmentBaseAddress : SDTypeProfile<1, 1, [SDTCisPtrTy<0>]>;
+def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
@@ -110,82 +128,85 @@ def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
- SDNPMayLoad]>;
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
+ SDNPMayLoad, SDNPMemOperand]>;
def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86cas8,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
- SDNPMayLoad]>;
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
+ SDNPMayLoad, SDNPMemOperand]>;
def X86AtomAdd64 : SDNode<"X86ISD::ATOMADD64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
+ [SDNPHasChain, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86AtomSub64 : SDNode<"X86ISD::ATOMSUB64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
+ [SDNPHasChain, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86AtomOr64 : SDNode<"X86ISD::ATOMOR64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
+ [SDNPHasChain, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86AtomXor64 : SDNode<"X86ISD::ATOMXOR64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
+ [SDNPHasChain, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86AtomAnd64 : SDNode<"X86ISD::ATOMAND64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
+ [SDNPHasChain, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86AtomNand64 : SDNode<"X86ISD::ATOMNAND64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
+ [SDNPHasChain, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86AtomSwap64 : SDNode<"X86ISD::ATOMSWAP64_DAG", SDTX86atomicBinary,
- [SDNPHasChain, SDNPMayStore,
+ [SDNPHasChain, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
- [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def X86vastart_save_xmm_regs :
SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
SDT_X86VASTART_SAVE_XMM_REGS,
[SDNPHasChain, SDNPVariadic]>;
-
+def X86vaarg64 :
+ SDNode<"X86ISD::VAARG_64", SDT_X86VAARG_64,
+ [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
+ SDNPMemOperand]>;
def X86callseq_start :
SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def X86callseq_end :
SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
- [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
SDNPVariadic]>;
def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore]>;
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>;
def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
SDNPMayLoad]>;
def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG", SDTX86Void,
- [SDNPHasChain, SDNPOutFlag, SDNPSideEffect]>;
+ [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
-def X86SegmentBaseAddress : SDNode<"X86ISD::SegmentBaseAddress",
- SDT_X86SegmentBaseAddress, []>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
[SDNPHasChain]>;
-def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
- [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
+def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
-def X86umul_flag : SDNode<"X86ISD::UMUL", SDTUnaryArithWithFlags,
+def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
[SDNPCommutative]>;
-
+def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>;
+def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>;
+
def X86inc_flag : SDNode<"X86ISD::INC", SDTUnaryArithWithFlags>;
def X86dec_flag : SDNode<"X86ISD::DEC", SDTUnaryArithWithFlags>;
def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
@@ -197,11 +218,11 @@ def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
-def X86MingwAlloca : SDNode<"X86ISD::MINGW_ALLOCA", SDTX86Void,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
-
+def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDTX86Void,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
+
def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
- []>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
//===----------------------------------------------------------------------===//
// X86 Operand Definitions.
@@ -252,6 +273,10 @@ def i8mem_NOREX : Operand<i64> {
let ParserMatchClass = X86MemAsmOperand;
}
+// GPRs available for tailcall.
+// It represents GR64_TC or GR64_TCW64.
+def ptr_rc_tailcall : PointerLikeRegClass<2>;
+
// Special i32mem for addresses of load folding tail calls. These are not
// allowed to use callee-saved registers since they must be scheduled
// after callee-saved register are popped.
@@ -261,6 +286,15 @@ def i32mem_TC : Operand<i32> {
let ParserMatchClass = X86MemAsmOperand;
}
+// Special i64mem for addresses of load folding tail calls. These are not
+// allowed to use callee-saved registers since they must be scheduled
+// after callee-saved register are popped.
+def i64mem_TC : Operand<i64> {
+ let PrintMethod = "printi64mem";
+ let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
+ ptr_rc_tailcall, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
+}
let ParserMatchClass = X86AbsMemAsmOperand,
PrintMethod = "print_pcrel_imm" in {
@@ -332,43 +366,77 @@ def i32i8imm : Operand<i32> {
let ParserMatchClass = ImmSExti32i8AsmOperand;
}
+// 64-bits but only 32 bits are significant.
+def i64i32imm : Operand<i64> {
+ let ParserMatchClass = ImmSExti64i32AsmOperand;
+}
+
+// 64-bits but only 32 bits are significant, and those bits are treated as being
+// pc relative.
+def i64i32imm_pcrel : Operand<i64> {
+ let PrintMethod = "print_pcrel_imm";
+ let ParserMatchClass = X86AbsMemAsmOperand;
+}
+
+// 64-bits but only 8 bits are significant.
+def i64i8imm : Operand<i64> {
+ let ParserMatchClass = ImmSExti64i8AsmOperand;
+}
+
+def lea64_32mem : Operand<i32> {
+ let PrintMethod = "printi32mem";
+ let AsmOperandLowerMethod = "lower_lea64_32mem";
+ let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
+}
+
+
//===----------------------------------------------------------------------===//
// X86 Complex Pattern Definitions.
//
// Define X86 specific addressing mode.
-def addr : ComplexPattern<iPTR, 5, "SelectAddr", [], []>;
+def addr : ComplexPattern<iPTR, 5, "SelectAddr", [], [SDNPWantParent]>;
def lea32addr : ComplexPattern<i32, 5, "SelectLEAAddr",
[add, sub, mul, X86mul_imm, shl, or, frameindex],
[]>;
def tls32addr : ComplexPattern<i32, 5, "SelectTLSADDRAddr",
[tglobaltlsaddr], []>;
+def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
+ [add, sub, mul, X86mul_imm, shl, or, frameindex,
+ X86WrapperRIP], []>;
+
+def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
+ [tglobaltlsaddr], []>;
+
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
def HasCMov : Predicate<"Subtarget->hasCMov()">;
def NoCMov : Predicate<"!Subtarget->hasCMov()">;
-// FIXME: temporary hack to let codegen assert or generate poor code in case
-// no AVX version of the desired intructions is present, this is better for
-// incremental dev (without fallbacks it's easier to spot what's missing)
-def HasMMX : Predicate<"Subtarget->hasMMX() && !Subtarget->hasAVX()">;
-def HasSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
-def HasSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
-def HasSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
-def HasSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
-def HasSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
-def HasSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
-def HasSSE4A : Predicate<"Subtarget->hasSSE4A() && !Subtarget->hasAVX()">;
+def HasMMX : Predicate<"Subtarget->hasMMX()">;
+def Has3DNow : Predicate<"Subtarget->has3DNow()">;
+def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
+def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
+def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
+def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
+def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
+def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
+def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
+def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
def HasAVX : Predicate<"Subtarget->hasAVX()">;
+def HasXMMInt : Predicate<"Subtarget->hasXMMInt()">;
+
+def HasAES : Predicate<"Subtarget->hasAES()">;
def HasCLMUL : Predicate<"Subtarget->hasCLMUL()">;
def HasFMA3 : Predicate<"Subtarget->hasFMA3()">;
def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
-def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
-def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
-def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
-def In64BitMode : Predicate<"Subtarget->is64Bit()">;
+def FPStackf32 : Predicate<"!Subtarget->hasXMM()">;
+def FPStackf64 : Predicate<"!Subtarget->hasXMMInt()">;
+def In32BitMode : Predicate<"!Subtarget->is64Bit()">, AssemblerPredicate;
+def In64BitMode : Predicate<"Subtarget->is64Bit()">, AssemblerPredicate;
def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
@@ -383,7 +451,6 @@ def OptForSize : Predicate<"OptForSize">;
def OptForSpeed : Predicate<"!OptForSize">;
def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">;
def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">;
-def HasAES : Predicate<"Subtarget->hasAES()">;
//===----------------------------------------------------------------------===//
// X86 Instruction Format Definitions.
@@ -418,40 +485,24 @@ def immSext8 : PatLeaf<(imm), [{ return immSext8(N); }]>;
def i16immSExt8 : PatLeaf<(i16 immSext8)>;
def i32immSExt8 : PatLeaf<(i32 immSext8)>;
-
-/// Load patterns: these constraint the match to the right address space.
-def dsload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- if (PT->getAddressSpace() > 255)
- return false;
- return true;
-}]>;
-
-def gsload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- return PT->getAddressSpace() == 256;
- return false;
+def i64immSExt8 : PatLeaf<(i64 immSext8)>;
+def i64immSExt32 : PatLeaf<(i64 imm), [{ return i64immSExt32(N); }]>;
+def i64immZExt32 : PatLeaf<(i64 imm), [{
+ // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
+ // unsignedsign extended field.
+ return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
}]>;
-def fsload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- return PT->getAddressSpace() == 257;
- return false;
+def i64immZExt32SExt8 : PatLeaf<(i64 imm), [{
+ uint64_t v = N->getZExtValue();
+ return v == (uint32_t)v && (int32_t)v == (int8_t)v;
}]>;
-
// Helper fragments for loads.
// It's always safe to treat a anyext i16 load as a i32 load if the i16 is
// known to be 32-bit aligned or better. Ditto for i8 to i16.
def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
- if (const Value *Src = LD->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- if (PT->getAddressSpace() > 255)
- return false;
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD)
return true;
@@ -462,10 +513,6 @@ def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)),[{
LoadSDNode *LD = cast<LoadSDNode>(N);
- if (const Value *Src = LD->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- if (PT->getAddressSpace() > 255)
- return false;
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::EXTLOAD)
return LD->getAlignment() >= 2 && !LD->isVolatile();
@@ -474,10 +521,6 @@ def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)),[{
def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
- if (const Value *Src = LD->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- if (PT->getAddressSpace() > 255)
- return false;
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD)
return true;
@@ -486,15 +529,18 @@ def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
return false;
}]>;
-def loadi8 : PatFrag<(ops node:$ptr), (i8 (dsload node:$ptr))>;
-def loadi64 : PatFrag<(ops node:$ptr), (i64 (dsload node:$ptr))>;
-def loadf32 : PatFrag<(ops node:$ptr), (f32 (dsload node:$ptr))>;
-def loadf64 : PatFrag<(ops node:$ptr), (f64 (dsload node:$ptr))>;
-def loadf80 : PatFrag<(ops node:$ptr), (f80 (dsload node:$ptr))>;
+def loadi8 : PatFrag<(ops node:$ptr), (i8 (load node:$ptr))>;
+def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
+def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
+def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
+def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
+def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
+def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
+def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
@@ -502,6 +548,10 @@ def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
+def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
+def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
+def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
+def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
@@ -509,6 +559,10 @@ def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
+def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
+def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
+def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
+def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
// An 'and' node with a single use.
@@ -524,66 +578,10 @@ def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
return N->hasOneUse();
}]>;
-// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
-def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
- return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
-
- unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
- APInt Mask = APInt::getAllOnesValue(BitWidth);
- APInt KnownZero0, KnownOne0;
- CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
- APInt KnownZero1, KnownOne1;
- CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
- return (~KnownZero0 & ~KnownZero1) == 0;
-}]>;
-
//===----------------------------------------------------------------------===//
-// Instruction list...
+// Instruction list.
//
-// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
-// a stack adjustment and the codegen must know that they may modify the stack
-// pointer before prolog-epilog rewriting occurs.
-// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
-// sub / add which can clobber EFLAGS.
-let Defs = [ESP, EFLAGS], Uses = [ESP] in {
-def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
- "#ADJCALLSTACKDOWN",
- [(X86callseq_start timm:$amt)]>,
- Requires<[In32BitMode]>;
-def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
- "#ADJCALLSTACKUP",
- [(X86callseq_end timm:$amt1, timm:$amt2)]>,
- Requires<[In32BitMode]>;
-}
-
-// x86-64 va_start lowering magic.
-let usesCustomInserter = 1 in {
-def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
- (outs),
- (ins GR8:$al,
- i64imm:$regsavefi, i64imm:$offset,
- variable_ops),
- "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
- [(X86vastart_save_xmm_regs GR8:$al,
- imm:$regsavefi,
- imm:$offset)]>;
-
-// Dynamic stack allocation yields _alloca call for Cygwin/Mingw targets. Calls
-// to _alloca is needed to probe the stack when allocating more than 4k bytes in
-// one go. Touching the stack at 4K increments is necessary to ensure that the
-// guard pages used by the OS virtual memory manager are allocated in correct
-// sequence.
-// The main point of having separate instruction are extra unmodelled effects
-// (compared to ordinary calls) like stack pointer change.
-
-let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
- def MINGW_ALLOCA : I<0, Pseudo, (outs), (ins),
- "# dynamic stack allocation",
- [(X86MingwAlloca)]>;
-}
-
// Nop
let neverHasSideEffects = 1 in {
def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
@@ -593,206 +591,22 @@ let neverHasSideEffects = 1 in {
"nop{l}\t$zero", []>, TB;
}
-// Trap
-let Uses = [EFLAGS] in {
- def INTO : I<0xce, RawFrm, (outs), (ins), "into", []>;
-}
-def INT3 : I<0xcc, RawFrm, (outs), (ins), "int3",
- [(int_x86_int (i8 3))]>;
-// FIXME: need to make sure that "int $3" matches int3
-def INT : Ii8<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap",
- [(int_x86_int imm:$trap)]>;
-def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", []>, OpSize;
-def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l}", []>;
-
-// PIC base construction. This expands to code that looks like this:
-// call $next_inst
-// popl %destreg"
-let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
- def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
- "", []>;
-
-//===----------------------------------------------------------------------===//
-// Control Flow Instructions.
-//
-
-// Return instructions.
-let isTerminator = 1, isReturn = 1, isBarrier = 1,
- hasCtrlDep = 1, FPForm = SpecialFP in {
- def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
- "ret",
- [(X86retflag 0)]>;
- def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
- "ret\t$amt",
- [(X86retflag timm:$amt)]>;
- def LRET : I <0xCB, RawFrm, (outs), (ins),
- "lret", []>;
- def LRETI : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
- "lret\t$amt", []>;
-}
-
-// Unconditional branches.
-let isBarrier = 1, isBranch = 1, isTerminator = 1 in {
- def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst),
- "jmp\t$dst", [(br bb:$dst)]>;
- def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
- "jmp\t$dst", []>;
-}
-
-// Conditional Branches.
-let isBranch = 1, isTerminator = 1, Uses = [EFLAGS] in {
- multiclass ICBr<bits<8> opc1, bits<8> opc4, string asm, PatFrag Cond> {
- def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm, []>;
- def _4 : Ii32PCRel<opc4, RawFrm, (outs), (ins brtarget:$dst), asm,
- [(X86brcond bb:$dst, Cond, EFLAGS)]>, TB;
- }
-}
-
-defm JO : ICBr<0x70, 0x80, "jo\t$dst" , X86_COND_O>;
-defm JNO : ICBr<0x71, 0x81, "jno\t$dst" , X86_COND_NO>;
-defm JB : ICBr<0x72, 0x82, "jb\t$dst" , X86_COND_B>;
-defm JAE : ICBr<0x73, 0x83, "jae\t$dst", X86_COND_AE>;
-defm JE : ICBr<0x74, 0x84, "je\t$dst" , X86_COND_E>;
-defm JNE : ICBr<0x75, 0x85, "jne\t$dst", X86_COND_NE>;
-defm JBE : ICBr<0x76, 0x86, "jbe\t$dst", X86_COND_BE>;
-defm JA : ICBr<0x77, 0x87, "ja\t$dst" , X86_COND_A>;
-defm JS : ICBr<0x78, 0x88, "js\t$dst" , X86_COND_S>;
-defm JNS : ICBr<0x79, 0x89, "jns\t$dst", X86_COND_NS>;
-defm JP : ICBr<0x7A, 0x8A, "jp\t$dst" , X86_COND_P>;
-defm JNP : ICBr<0x7B, 0x8B, "jnp\t$dst", X86_COND_NP>;
-defm JL : ICBr<0x7C, 0x8C, "jl\t$dst" , X86_COND_L>;
-defm JGE : ICBr<0x7D, 0x8D, "jge\t$dst", X86_COND_GE>;
-defm JLE : ICBr<0x7E, 0x8E, "jle\t$dst", X86_COND_LE>;
-defm JG : ICBr<0x7F, 0x8F, "jg\t$dst" , X86_COND_G>;
-
-// FIXME: What about the CX/RCX versions of this instruction?
-let Uses = [ECX], isBranch = 1, isTerminator = 1 in
- def JCXZ8 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jcxz\t$dst", []>;
-
-
-// Indirect branches
-let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
- def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst",
- [(brind GR32:$dst)]>, Requires<[In32BitMode]>;
- def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
- [(brind (loadi32 addr:$dst))]>, Requires<[In32BitMode]>;
-
- def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs),
- (ins i16imm:$off, i16imm:$seg),
- "ljmp{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
- def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs),
- (ins i32imm:$off, i16imm:$seg),
- "ljmp{l}\t{$seg, $off|$off, $seg}", []>;
-
- def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst),
- "ljmp{w}\t{*}$dst", []>, OpSize;
- def FARJMP32m : I<0xFF, MRM5m, (outs), (ins opaque48mem:$dst),
- "ljmp{l}\t{*}$dst", []>;
-}
-
-
-// Loop instructions
-
-def LOOP : Ii8PCRel<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", []>;
-def LOOPE : Ii8PCRel<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", []>;
-def LOOPNE : Ii8PCRel<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", []>;
-
-//===----------------------------------------------------------------------===//
-// Call Instructions...
-//
-let isCall = 1 in
- // All calls clobber the non-callee saved registers. ESP is marked as
- // a use to prevent stack-pointer assignments that appear immediately
- // before calls from potentially appearing dead. Uses for argument
- // registers are added manually.
- let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [ESP] in {
- def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
- (outs), (ins i32imm_pcrel:$dst,variable_ops),
- "call\t$dst", []>;
- def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops),
- "call\t{*}$dst", [(X86call GR32:$dst)]>;
- def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
- "call\t{*}$dst", [(X86call (loadi32 addr:$dst))]>;
-
- def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs),
- (ins i16imm:$off, i16imm:$seg),
- "lcall{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
- def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs),
- (ins i32imm:$off, i16imm:$seg),
- "lcall{l}\t{$seg, $off|$off, $seg}", []>;
-
- def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst),
- "lcall{w}\t{*}$dst", []>, OpSize;
- def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst),
- "lcall{l}\t{*}$dst", []>;
-
- // callw for 16 bit code for the assembler.
- let isAsmParserOnly = 1 in
- def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
- (outs), (ins i16imm_pcrel:$dst, variable_ops),
- "callw\t$dst", []>, OpSize;
- }
// Constructing a stack frame.
+def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
+ "enter\t$len, $lvl", []>;
-def ENTER : I<0xC8, RawFrm, (outs), (ins i16imm:$len, i8imm:$lvl),
- "enter\t$len, $lvl", []>;
-
-// Tail call stuff.
-
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
- isCodeGenOnly = 1 in
- let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [ESP] in {
- def TCRETURNdi : I<0, Pseudo, (outs),
- (ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
- "#TC_RETURN $dst $offset", []>;
- def TCRETURNri : I<0, Pseudo, (outs),
- (ins GR32_TC:$dst, i32imm:$offset, variable_ops),
- "#TC_RETURN $dst $offset", []>;
- let mayLoad = 1 in
- def TCRETURNmi : I<0, Pseudo, (outs),
- (ins i32mem_TC:$dst, i32imm:$offset, variable_ops),
- "#TC_RETURN $dst $offset", []>;
-
- // FIXME: The should be pseudo instructions that are lowered when going to
- // mcinst.
- def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs),
- (ins i32imm_pcrel:$dst, variable_ops),
- "jmp\t$dst # TAILCALL",
- []>;
- def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops),
- "", []>; // FIXME: Remove encoding when JIT is dead.
- let mayLoad = 1 in
- def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
- "jmp{l}\t{*}$dst # TAILCALL", []>;
-}
-
-//===----------------------------------------------------------------------===//
-// Miscellaneous Instructions...
-//
let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in
def LEAVE : I<0xC9, RawFrm,
(outs), (ins), "leave", []>, Requires<[In32BitMode]>;
-def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS;
-let mayLoad = 1 in
-def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
- "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS;
-def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS;
-let mayLoad = 1 in
-def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS;
+let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
+def LEAVE64 : I<0xC9, RawFrm,
+ (outs), (ins), "leave", []>, Requires<[In64BitMode]>;
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Instructions.
+//
let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in {
let mayLoad = 1 in {
@@ -805,6 +619,10 @@ def POP16rmm: I<0x8F, MRM0m, (outs i16mem:$dst), (ins), "pop{w}\t$dst", []>,
OpSize;
def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>;
def POP32rmm: I<0x8F, MRM0m, (outs i32mem:$dst), (ins), "pop{l}\t$dst", []>;
+
+def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize;
+def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>,
+ Requires<[In32BitMode]>;
}
let mayStore = 1 in {
@@ -817,29 +635,54 @@ def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src",[]>,
OpSize;
def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>;
def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[]>;
-}
-}
-let Defs = [ESP], Uses = [ESP], neverHasSideEffects = 1, mayStore = 1 in {
-def PUSHi8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
+def PUSHi8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
"push{l}\t$imm", []>;
-def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
+def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
"push{w}\t$imm", []>, OpSize;
-def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
+def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
"push{l}\t$imm", []>;
-}
-let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, neverHasSideEffects=1 in {
-def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize;
-def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>,
- Requires<[In32BitMode]>;
-}
-let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in {
def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize;
def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>,
Requires<[In32BitMode]>;
+
+}
+}
+
+let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
+let mayLoad = 1 in {
+def POP64r : I<0x58, AddRegFrm,
+ (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
+def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
+def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", []>;
+}
+let mayStore = 1 in {
+def PUSH64r : I<0x50, AddRegFrm,
+ (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
+def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
+def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>;
+}
}
+let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
+def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
+ "push{q}\t$imm", []>;
+def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
+ "push{q}\t$imm", []>;
+def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
+ "push{q}\t$imm", []>;
+}
+
+let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
+def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
+ Requires<[In64BitMode]>;
+let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
+def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
+ Requires<[In64BitMode]>;
+
+
+
let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
mayLoad=1, neverHasSideEffects=1 in {
def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", []>,
@@ -851,12 +694,16 @@ def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", []>,
Requires<[In32BitMode]>;
}
-let Uses = [EFLAGS], Constraints = "$src = $dst" in // GR32 = bswap GR32
- def BSWAP32r : I<0xC8, AddRegFrm,
- (outs GR32:$dst), (ins GR32:$src),
- "bswap{l}\t$dst",
- [(set GR32:$dst, (bswap GR32:$src))]>, TB;
+let Constraints = "$src = $dst" in { // GR32 = bswap GR32
+def BSWAP32r : I<0xC8, AddRegFrm,
+ (outs GR32:$dst), (ins GR32:$src),
+ "bswap{l}\t$dst",
+ [(set GR32:$dst, (bswap GR32:$src))]>, TB;
+def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
+ "bswap{q}\t$dst",
+ [(set GR64:$dst, (bswap GR64:$src))]>, TB;
+} // Constraints = "$src = $dst"
// Bit scan instructions.
let Defs = [EFLAGS] in {
@@ -873,6 +720,12 @@ def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>, TB;
+def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+ "bsf{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>, TB;
+def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ "bsf{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>, TB;
def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
@@ -887,44 +740,23 @@ def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>, TB;
+def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+ "bsr{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>, TB;
+def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ "bsr{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>, TB;
} // Defs = [EFLAGS]
-let neverHasSideEffects = 1 in
-def LEA16r : I<0x8D, MRMSrcMem,
- (outs GR16:$dst), (ins i32mem:$src),
- "lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize;
-let isReMaterializable = 1 in
-def LEA32r : I<0x8D, MRMSrcMem,
- (outs GR32:$dst), (ins i32mem:$src),
- "lea{l}\t{$src|$dst}, {$dst|$src}",
- [(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>;
-
-let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
-def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
- [(X86rep_movs i8)]>, REP;
-def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
- [(X86rep_movs i16)]>, REP, OpSize;
-def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
- [(X86rep_movs i32)]>, REP;
-}
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in {
def MOVSB : I<0xA4, RawFrm, (outs), (ins), "{movsb}", []>;
def MOVSW : I<0xA5, RawFrm, (outs), (ins), "{movsw}", []>, OpSize;
def MOVSD : I<0xA5, RawFrm, (outs), (ins), "{movsl|movsd}", []>;
+def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", []>;
}
-let Defs = [ECX,EDI], Uses = [AL,ECX,EDI], isCodeGenOnly = 1 in
-def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
- [(X86rep_stos i8)]>, REP;
-let Defs = [ECX,EDI], Uses = [AX,ECX,EDI], isCodeGenOnly = 1 in
-def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
- [(X86rep_stos i16)]>, REP, OpSize;
-let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI], isCodeGenOnly = 1 in
-def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
- [(X86rep_stos i32)]>, REP;
-
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI], Uses = [AL,EDI,EFLAGS] in
def STOSB : I<0xAA, RawFrm, (outs), (ins), "{stosb}", []>;
@@ -932,91 +764,24 @@ let Defs = [EDI], Uses = [AX,EDI,EFLAGS] in
def STOSW : I<0xAB, RawFrm, (outs), (ins), "{stosw}", []>, OpSize;
let Defs = [EDI], Uses = [EAX,EDI,EFLAGS] in
def STOSD : I<0xAB, RawFrm, (outs), (ins), "{stosl|stosd}", []>;
+let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in
+def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", []>;
def SCAS8 : I<0xAE, RawFrm, (outs), (ins), "scas{b}", []>;
def SCAS16 : I<0xAF, RawFrm, (outs), (ins), "scas{w}", []>, OpSize;
def SCAS32 : I<0xAF, RawFrm, (outs), (ins), "scas{l}", []>;
+def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", []>;
def CMPS8 : I<0xA6, RawFrm, (outs), (ins), "cmps{b}", []>;
def CMPS16 : I<0xA7, RawFrm, (outs), (ins), "cmps{w}", []>, OpSize;
def CMPS32 : I<0xA7, RawFrm, (outs), (ins), "cmps{l}", []>;
-
-let Defs = [RAX, RDX] in
-def RDTSC : I<0x31, RawFrm, (outs), (ins), "rdtsc", [(X86rdtsc)]>,
- TB;
-
-let Defs = [RAX, RCX, RDX] in
-def RDTSCP : I<0x01, MRM_F9, (outs), (ins), "rdtscp", []>, TB;
-
-let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in {
-def TRAP : I<0x0B, RawFrm, (outs), (ins), "ud2", [(trap)]>, TB;
-}
-
-def SYSCALL : I<0x05, RawFrm,
- (outs), (ins), "syscall", []>, TB;
-def SYSRET : I<0x07, RawFrm,
- (outs), (ins), "sysret", []>, TB;
-def SYSENTER : I<0x34, RawFrm,
- (outs), (ins), "sysenter", []>, TB;
-def SYSEXIT : I<0x35, RawFrm,
- (outs), (ins), "sysexit", []>, TB, Requires<[In32BitMode]>;
-
-def WAIT : I<0x9B, RawFrm, (outs), (ins), "wait", []>;
+def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>;
//===----------------------------------------------------------------------===//
-// Input/Output Instructions...
+// Move Instructions.
//
-let Defs = [AL], Uses = [DX] in
-def IN8rr : I<0xEC, RawFrm, (outs), (ins),
- "in{b}\t{%dx, %al|%AL, %DX}", []>;
-let Defs = [AX], Uses = [DX] in
-def IN16rr : I<0xED, RawFrm, (outs), (ins),
- "in{w}\t{%dx, %ax|%AX, %DX}", []>, OpSize;
-let Defs = [EAX], Uses = [DX] in
-def IN32rr : I<0xED, RawFrm, (outs), (ins),
- "in{l}\t{%dx, %eax|%EAX, %DX}", []>;
-
-let Defs = [AL] in
-def IN8ri : Ii8<0xE4, RawFrm, (outs), (ins i16i8imm:$port),
- "in{b}\t{$port, %al|%AL, $port}", []>;
-let Defs = [AX] in
-def IN16ri : Ii8<0xE5, RawFrm, (outs), (ins i16i8imm:$port),
- "in{w}\t{$port, %ax|%AX, $port}", []>, OpSize;
-let Defs = [EAX] in
-def IN32ri : Ii8<0xE5, RawFrm, (outs), (ins i16i8imm:$port),
- "in{l}\t{$port, %eax|%EAX, $port}", []>;
-
-let Uses = [DX, AL] in
-def OUT8rr : I<0xEE, RawFrm, (outs), (ins),
- "out{b}\t{%al, %dx|%DX, %AL}", []>;
-let Uses = [DX, AX] in
-def OUT16rr : I<0xEF, RawFrm, (outs), (ins),
- "out{w}\t{%ax, %dx|%DX, %AX}", []>, OpSize;
-let Uses = [DX, EAX] in
-def OUT32rr : I<0xEF, RawFrm, (outs), (ins),
- "out{l}\t{%eax, %dx|%DX, %EAX}", []>;
-
-let Uses = [AL] in
-def OUT8ir : Ii8<0xE6, RawFrm, (outs), (ins i16i8imm:$port),
- "out{b}\t{%al, $port|$port, %AL}", []>;
-let Uses = [AX] in
-def OUT16ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port),
- "out{w}\t{%ax, $port|$port, %AX}", []>, OpSize;
-let Uses = [EAX] in
-def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port),
- "out{l}\t{%eax, $port|$port, %EAX}", []>;
-
-def IN8 : I<0x6C, RawFrm, (outs), (ins),
- "ins{b}", []>;
-def IN16 : I<0x6D, RawFrm, (outs), (ins),
- "ins{w}", []>, OpSize;
-def IN32 : I<0x6D, RawFrm, (outs), (ins),
- "ins{l}", []>;
-//===----------------------------------------------------------------------===//
-// Move Instructions...
-//
let neverHasSideEffects = 1 in {
def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
"mov{b}\t{$src, $dst|$dst, $src}", []>;
@@ -1024,6 +789,8 @@ def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
"mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}", []>;
+def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
}
let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
@@ -1035,6 +802,12 @@ def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, imm:$src)]>;
+def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
+ "movabs{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, imm:$src)]>;
+def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, i64immSExt32:$src)]>;
}
def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
@@ -1046,6 +819,9 @@ def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(store (i32 imm:$src), addr:$dst)]>;
+def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ [(store i64immSExt32:$src, addr:$dst)]>;
/// moffs8, moffs16 and moffs32 versions of moves. The immediate is a
/// 32-bit offset from the PC. These are only valid in x86-32 mode.
@@ -1067,24 +843,22 @@ def MOV16ao16 : Ii32 <0xA3, RawFrm, (outs offset16:$dst), (ins),
def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins),
"mov{l}\t{%eax, $dst|$dst, %eax}", []>,
Requires<[In32BitMode]>;
-
-// Moves to and from segment registers
-def MOV16rs : I<0x8C, MRMDestReg, (outs GR16:$dst), (ins SEGMENT_REG:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def MOV32rs : I<0x8C, MRMDestReg, (outs GR32:$dst), (ins SEGMENT_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
-def MOV16ms : I<0x8C, MRMDestMem, (outs i16mem:$dst), (ins SEGMENT_REG:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def MOV32ms : I<0x8C, MRMDestMem, (outs i32mem:$dst), (ins SEGMENT_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
-def MOV16sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR16:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def MOV32sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
-def MOV16sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i16mem:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def MOV32sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i32mem:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
+
+// FIXME: These definitions are utterly broken
+// Just leave them commented out for now because they're useless outside
+// of the large code model, and most compilers won't generate the instructions
+// in question.
+/*
+def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
+ "mov{q}\t{$src, %rax|%rax, $src}", []>;
+def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
+ "mov{q}\t{$src, %rax|%rax, $src}", []>;
+def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
+ "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
+def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
+ "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
+*/
+
let isCodeGenOnly = 1 in {
def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
@@ -1093,6 +867,8 @@ def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}", []>;
+def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
}
let canFoldAsLoad = 1, isReMaterializable = 1 in {
@@ -1105,6 +881,9 @@ def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (loadi32 addr:$src))]>;
+def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (load addr:$src))]>;
}
def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
@@ -1116,24 +895,9 @@ def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(store GR32:$src, addr:$dst)]>;
-
-/// Versions of MOV32rr, MOV32rm, and MOV32mr for i32mem_TC and GR32_TC.
-let isCodeGenOnly = 1 in {
-let neverHasSideEffects = 1 in
-def MOV32rr_TC : I<0x89, MRMDestReg, (outs GR32_TC:$dst), (ins GR32_TC:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>;
-
-let mayLoad = 1,
- canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOV32rm_TC : I<0x8B, MRMSrcMem, (outs GR32_TC:$dst), (ins i32mem_TC:$src),
- "mov{l}\t{$src, $dst|$dst, $src}",
- []>;
-
-let mayStore = 1 in
-def MOV32mr_TC : I<0x89, MRMDestMem, (outs), (ins i32mem_TC:$dst, GR32_TC:$src),
- "mov{l}\t{$src, $dst|$dst, $src}",
- []>;
-}
+def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ [(store GR64:$src, addr:$dst)]>;
// Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
// that they can be used for copying and storing h registers, which can't be
@@ -1154,2219 +918,6 @@ def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
"mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>;
}
-// Moves to and from debug registers
-def MOV32rd : I<0x21, MRMDestReg, (outs GR32:$dst), (ins DEBUG_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
-def MOV32dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
-
-// Moves to and from control registers
-def MOV32rc : I<0x20, MRMDestReg, (outs GR32:$dst), (ins CONTROL_REG:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
-def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
-
-//===----------------------------------------------------------------------===//
-// Fixed-Register Multiplication and Division Instructions...
-//
-
-// Extra precision multiplication
-
-// AL is really implied by AX, but the registers in Defs must match the
-// SDNode results (i8, i32).
-let Defs = [AL,EFLAGS,AX], Uses = [AL] in
-def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src",
- // FIXME: Used for 8-bit mul, ignore result upper 8 bits.
- // This probably ought to be moved to a def : Pat<> if the
- // syntax can be accepted.
- [(set AL, (mul AL, GR8:$src)),
- (implicit EFLAGS)]>; // AL,AH = AL*GR8
-
-let Defs = [AX,DX,EFLAGS], Uses = [AX], neverHasSideEffects = 1 in
-def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src),
- "mul{w}\t$src",
- []>, OpSize; // AX,DX = AX*GR16
-
-let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], neverHasSideEffects = 1 in
-def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src),
- "mul{l}\t$src",
- []>; // EAX,EDX = EAX*GR32
-
-let Defs = [AL,EFLAGS,AX], Uses = [AL] in
-def MUL8m : I<0xF6, MRM4m, (outs), (ins i8mem :$src),
- "mul{b}\t$src",
- // FIXME: Used for 8-bit mul, ignore result upper 8 bits.
- // This probably ought to be moved to a def : Pat<> if the
- // syntax can be accepted.
- [(set AL, (mul AL, (loadi8 addr:$src))),
- (implicit EFLAGS)]>; // AL,AH = AL*[mem8]
-
-let mayLoad = 1, neverHasSideEffects = 1 in {
-let Defs = [AX,DX,EFLAGS], Uses = [AX] in
-def MUL16m : I<0xF7, MRM4m, (outs), (ins i16mem:$src),
- "mul{w}\t$src",
- []>, OpSize; // AX,DX = AX*[mem16]
-
-let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
-def MUL32m : I<0xF7, MRM4m, (outs), (ins i32mem:$src),
- "mul{l}\t$src",
- []>; // EAX,EDX = EAX*[mem32]
-}
-
-let neverHasSideEffects = 1 in {
-let Defs = [AL,EFLAGS,AX], Uses = [AL] in
-def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", []>;
- // AL,AH = AL*GR8
-let Defs = [AX,DX,EFLAGS], Uses = [AX] in
-def IMUL16r : I<0xF7, MRM5r, (outs), (ins GR16:$src), "imul{w}\t$src", []>,
- OpSize; // AX,DX = AX*GR16
-let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
-def IMUL32r : I<0xF7, MRM5r, (outs), (ins GR32:$src), "imul{l}\t$src", []>;
- // EAX,EDX = EAX*GR32
-let mayLoad = 1 in {
-let Defs = [AL,EFLAGS,AX], Uses = [AL] in
-def IMUL8m : I<0xF6, MRM5m, (outs), (ins i8mem :$src),
- "imul{b}\t$src", []>; // AL,AH = AL*[mem8]
-let Defs = [AX,DX,EFLAGS], Uses = [AX] in
-def IMUL16m : I<0xF7, MRM5m, (outs), (ins i16mem:$src),
- "imul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16]
-let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
-def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src),
- "imul{l}\t$src", []>; // EAX,EDX = EAX*[mem32]
-}
-} // neverHasSideEffects
-
-// unsigned division/remainder
-let Defs = [AL,EFLAGS,AX], Uses = [AX] in
-def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
- "div{b}\t$src", []>;
-let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
-def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
- "div{w}\t$src", []>, OpSize;
-let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
-def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
- "div{l}\t$src", []>;
-let mayLoad = 1 in {
-let Defs = [AL,EFLAGS,AX], Uses = [AX] in
-def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
- "div{b}\t$src", []>;
-let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
-def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
- "div{w}\t$src", []>, OpSize;
-let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
- // EDX:EAX/[mem32] = EAX,EDX
-def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src),
- "div{l}\t$src", []>;
-}
-
-// Signed division/remainder.
-let Defs = [AL,EFLAGS,AX], Uses = [AX] in
-def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
- "idiv{b}\t$src", []>;
-let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
-def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
- "idiv{w}\t$src", []>, OpSize;
-let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
-def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
- "idiv{l}\t$src", []>;
-let mayLoad = 1, mayLoad = 1 in {
-let Defs = [AL,EFLAGS,AX], Uses = [AX] in
-def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
- "idiv{b}\t$src", []>;
-let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
-def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
- "idiv{w}\t$src", []>, OpSize;
-let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
-def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src),
- // EDX:EAX/[mem32] = EAX,EDX
- "idiv{l}\t$src", []>;
-}
-
-//===----------------------------------------------------------------------===//
-// Two address Instructions.
-//
-let Constraints = "$src1 = $dst" in {
-
-// Conditional moves
-let Uses = [EFLAGS] in {
-
-let Predicates = [HasCMov] in {
-let isCommutable = 1 in {
-def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovb{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_B, EFLAGS))]>,
- TB, OpSize;
-def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovb{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_B, EFLAGS))]>,
- TB;
-def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovae{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_AE, EFLAGS))]>,
- TB, OpSize;
-def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovae{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_AE, EFLAGS))]>,
- TB;
-def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmove{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_E, EFLAGS))]>,
- TB, OpSize;
-def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmove{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_E, EFLAGS))]>,
- TB;
-def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovne{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_NE, EFLAGS))]>,
- TB, OpSize;
-def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovne{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_NE, EFLAGS))]>,
- TB;
-def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_BE, EFLAGS))]>,
- TB, OpSize;
-def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_BE, EFLAGS))]>,
- TB;
-def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmova{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_A, EFLAGS))]>,
- TB, OpSize;
-def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmova{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_A, EFLAGS))]>,
- TB;
-def CMOVL16rr : I<0x4C, MRMSrcReg, // if <s, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovl{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_L, EFLAGS))]>,
- TB, OpSize;
-def CMOVL32rr : I<0x4C, MRMSrcReg, // if <s, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovl{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_L, EFLAGS))]>,
- TB;
-def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovge{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_GE, EFLAGS))]>,
- TB, OpSize;
-def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovge{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_GE, EFLAGS))]>,
- TB;
-def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovle{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_LE, EFLAGS))]>,
- TB, OpSize;
-def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovle{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_LE, EFLAGS))]>,
- TB;
-def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovg{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_G, EFLAGS))]>,
- TB, OpSize;
-def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovg{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_G, EFLAGS))]>,
- TB;
-def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovs{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_S, EFLAGS))]>,
- TB, OpSize;
-def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovs{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_S, EFLAGS))]>,
- TB;
-def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovns{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_NS, EFLAGS))]>,
- TB, OpSize;
-def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovns{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_NS, EFLAGS))]>,
- TB;
-def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovp{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_P, EFLAGS))]>,
- TB, OpSize;
-def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovp{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_P, EFLAGS))]>,
- TB;
-def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_NP, EFLAGS))]>,
- TB, OpSize;
-def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_NP, EFLAGS))]>,
- TB;
-def CMOVO16rr : I<0x40, MRMSrcReg, // if overflow, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovo{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_O, EFLAGS))]>,
- TB, OpSize;
-def CMOVO32rr : I<0x40, MRMSrcReg, // if overflow, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovo{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_O, EFLAGS))]>,
- TB;
-def CMOVNO16rr : I<0x41, MRMSrcReg, // if !overflow, GR16 = GR16
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "cmovno{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
- X86_COND_NO, EFLAGS))]>,
- TB, OpSize;
-def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "cmovno{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
- X86_COND_NO, EFLAGS))]>,
- TB;
-} // isCommutable = 1
-
-def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovb{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_B, EFLAGS))]>,
- TB, OpSize;
-def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovb{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_B, EFLAGS))]>,
- TB;
-def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovae{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_AE, EFLAGS))]>,
- TB, OpSize;
-def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovae{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_AE, EFLAGS))]>,
- TB;
-def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmove{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_E, EFLAGS))]>,
- TB, OpSize;
-def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmove{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_E, EFLAGS))]>,
- TB;
-def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovne{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_NE, EFLAGS))]>,
- TB, OpSize;
-def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovne{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_NE, EFLAGS))]>,
- TB;
-def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_BE, EFLAGS))]>,
- TB, OpSize;
-def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_BE, EFLAGS))]>,
- TB;
-def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmova{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_A, EFLAGS))]>,
- TB, OpSize;
-def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmova{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_A, EFLAGS))]>,
- TB;
-def CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovl{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_L, EFLAGS))]>,
- TB, OpSize;
-def CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovl{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_L, EFLAGS))]>,
- TB;
-def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovge{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_GE, EFLAGS))]>,
- TB, OpSize;
-def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovge{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_GE, EFLAGS))]>,
- TB;
-def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovle{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_LE, EFLAGS))]>,
- TB, OpSize;
-def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovle{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_LE, EFLAGS))]>,
- TB;
-def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovg{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_G, EFLAGS))]>,
- TB, OpSize;
-def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovg{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_G, EFLAGS))]>,
- TB;
-def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovs{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_S, EFLAGS))]>,
- TB, OpSize;
-def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovs{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_S, EFLAGS))]>,
- TB;
-def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovns{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_NS, EFLAGS))]>,
- TB, OpSize;
-def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovns{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_NS, EFLAGS))]>,
- TB;
-def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovp{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_P, EFLAGS))]>,
- TB, OpSize;
-def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovp{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_P, EFLAGS))]>,
- TB;
-def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_NP, EFLAGS))]>,
- TB, OpSize;
-def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_NP, EFLAGS))]>,
- TB;
-def CMOVO16rm : I<0x40, MRMSrcMem, // if overflow, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovo{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_O, EFLAGS))]>,
- TB, OpSize;
-def CMOVO32rm : I<0x40, MRMSrcMem, // if overflow, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovo{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_O, EFLAGS))]>,
- TB;
-def CMOVNO16rm : I<0x41, MRMSrcMem, // if !overflow, GR16 = [mem16]
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "cmovno{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- X86_COND_NO, EFLAGS))]>,
- TB, OpSize;
-def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32]
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "cmovno{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- X86_COND_NO, EFLAGS))]>,
- TB;
-} // Predicates = [HasCMov]
-
-// X86 doesn't have 8-bit conditional moves. Use a customInserter to
-// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
-// however that requires promoting the operands, and can induce additional
-// i8 register pressure. Note that CMOV_GR8 is conservatively considered to
-// clobber EFLAGS, because if one of the operands is zero, the expansion
-// could involve an xor.
-let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
-def CMOV_GR8 : I<0, Pseudo,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
- "#CMOV_GR8 PSEUDO!",
- [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
- imm:$cond, EFLAGS))]>;
-
-let Predicates = [NoCMov] in {
-def CMOV_GR32 : I<0, Pseudo,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
- "#CMOV_GR32* PSEUDO!",
- [(set GR32:$dst,
- (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
-def CMOV_GR16 : I<0, Pseudo,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
- "#CMOV_GR16* PSEUDO!",
- [(set GR16:$dst,
- (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
-def CMOV_RFP32 : I<0, Pseudo,
- (outs RFP32:$dst),
- (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
- "#CMOV_RFP32 PSEUDO!",
- [(set RFP32:$dst,
- (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
- EFLAGS))]>;
-def CMOV_RFP64 : I<0, Pseudo,
- (outs RFP64:$dst),
- (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
- "#CMOV_RFP64 PSEUDO!",
- [(set RFP64:$dst,
- (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
- EFLAGS))]>;
-def CMOV_RFP80 : I<0, Pseudo,
- (outs RFP80:$dst),
- (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
- "#CMOV_RFP80 PSEUDO!",
- [(set RFP80:$dst,
- (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
- EFLAGS))]>;
-} // Predicates = [NoCMov]
-} // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
-} // Uses = [EFLAGS]
-
-
-// unary instructions
-let CodeSize = 2 in {
-let Defs = [EFLAGS] in {
-def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src1),
- "neg{b}\t$dst",
- [(set GR8:$dst, (ineg GR8:$src1)),
- (implicit EFLAGS)]>;
-def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
- "neg{w}\t$dst",
- [(set GR16:$dst, (ineg GR16:$src1)),
- (implicit EFLAGS)]>, OpSize;
-def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
- "neg{l}\t$dst",
- [(set GR32:$dst, (ineg GR32:$src1)),
- (implicit EFLAGS)]>;
-
-let Constraints = "" in {
- def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst),
- "neg{b}\t$dst",
- [(store (ineg (loadi8 addr:$dst)), addr:$dst),
- (implicit EFLAGS)]>;
- def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst),
- "neg{w}\t$dst",
- [(store (ineg (loadi16 addr:$dst)), addr:$dst),
- (implicit EFLAGS)]>, OpSize;
- def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst),
- "neg{l}\t$dst",
- [(store (ineg (loadi32 addr:$dst)), addr:$dst),
- (implicit EFLAGS)]>;
-} // Constraints = ""
-} // Defs = [EFLAGS]
-
-// Match xor -1 to not. Favors these over a move imm + xor to save code size.
-let AddedComplexity = 15 in {
-def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src1),
- "not{b}\t$dst",
- [(set GR8:$dst, (not GR8:$src1))]>;
-def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
- "not{w}\t$dst",
- [(set GR16:$dst, (not GR16:$src1))]>, OpSize;
-def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
- "not{l}\t$dst",
- [(set GR32:$dst, (not GR32:$src1))]>;
-}
-let Constraints = "" in {
- def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst),
- "not{b}\t$dst",
- [(store (not (loadi8 addr:$dst)), addr:$dst)]>;
- def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst),
- "not{w}\t$dst",
- [(store (not (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
- def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst),
- "not{l}\t$dst",
- [(store (not (loadi32 addr:$dst)), addr:$dst)]>;
-} // Constraints = ""
-} // CodeSize
-
-// TODO: inc/dec is slow for P4, but fast for Pentium-M.
-let Defs = [EFLAGS] in {
-let CodeSize = 2 in
-def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
- "inc{b}\t$dst",
- [(set GR8:$dst, EFLAGS, (X86inc_flag GR8:$src1))]>;
-
-let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
-def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
- "inc{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))]>,
- OpSize, Requires<[In32BitMode]>;
-def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
- "inc{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))]>,
- Requires<[In32BitMode]>;
-}
-let Constraints = "", CodeSize = 2 in {
- def INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), "inc{b}\t$dst",
- [(store (add (loadi8 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>;
- def INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
- [(store (add (loadi16 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In32BitMode]>;
- def INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
- [(store (add (loadi32 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In32BitMode]>;
-} // Constraints = "", CodeSize = 2
-
-let CodeSize = 2 in
-def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
- "dec{b}\t$dst",
- [(set GR8:$dst, EFLAGS, (X86dec_flag GR8:$src1))]>;
-let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
-def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
- "dec{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))]>,
- OpSize, Requires<[In32BitMode]>;
-def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
- "dec{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))]>,
- Requires<[In32BitMode]>;
-} // CodeSize = 2
-
-let Constraints = "", CodeSize = 2 in {
- def DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), "dec{b}\t$dst",
- [(store (add (loadi8 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>;
- def DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
- [(store (add (loadi16 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In32BitMode]>;
- def DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
- [(store (add (loadi32 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In32BitMode]>;
-} // Constraints = "", CodeSize = 2
-} // Defs = [EFLAGS]
-
-// Logical operators...
-let Defs = [EFLAGS] in {
-let isCommutable = 1 in { // X = AND Y, Z --> X = AND Z, Y
-def AND8rr : I<0x20, MRMDestReg,
- (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
- "and{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86and_flag GR8:$src1, GR8:$src2))]>;
-def AND16rr : I<0x21, MRMDestReg,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86and_flag GR16:$src1,
- GR16:$src2))]>, OpSize;
-def AND32rr : I<0x21, MRMDestReg,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86and_flag GR32:$src1,
- GR32:$src2))]>;
-}
-
-// AND instructions with the destination register in REG and the source register
-// in R/M. Included for the disassembler.
-let isCodeGenOnly = 1 in {
-def AND8rr_REV : I<0x22, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "and{b}\t{$src2, $dst|$dst, $src2}", []>;
-def AND16rr_REV : I<0x23, MRMSrcReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "and{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
-def AND32rr_REV : I<0x23, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "and{l}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-def AND8rm : I<0x22, MRMSrcMem,
- (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
- "and{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86and_flag GR8:$src1,
- (loadi8 addr:$src2)))]>;
-def AND16rm : I<0x23, MRMSrcMem,
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86and_flag GR16:$src1,
- (loadi16 addr:$src2)))]>,
- OpSize;
-def AND32rm : I<0x23, MRMSrcMem,
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86and_flag GR32:$src1,
- (loadi32 addr:$src2)))]>;
-
-def AND8ri : Ii8<0x80, MRM4r,
- (outs GR8 :$dst), (ins GR8 :$src1, i8imm :$src2),
- "and{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86and_flag GR8:$src1,
- imm:$src2))]>;
-def AND16ri : Ii16<0x81, MRM4r,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
- "and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86and_flag GR16:$src1,
- imm:$src2))]>, OpSize;
-def AND32ri : Ii32<0x81, MRM4r,
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
- "and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86and_flag GR32:$src1,
- imm:$src2))]>;
-def AND16ri8 : Ii8<0x83, MRM4r,
- (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
- "and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86and_flag GR16:$src1,
- i16immSExt8:$src2))]>,
- OpSize;
-def AND32ri8 : Ii8<0x83, MRM4r,
- (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
- "and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86and_flag GR32:$src1,
- i32immSExt8:$src2))]>;
-
-let Constraints = "" in {
- def AND8mr : I<0x20, MRMDestMem,
- (outs), (ins i8mem :$dst, GR8 :$src),
- "and{b}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), GR8:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def AND16mr : I<0x21, MRMDestMem,
- (outs), (ins i16mem:$dst, GR16:$src),
- "and{w}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), GR16:$src), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
- def AND32mr : I<0x21, MRMDestMem,
- (outs), (ins i32mem:$dst, GR32:$src),
- "and{l}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), GR32:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def AND8mi : Ii8<0x80, MRM4m,
- (outs), (ins i8mem :$dst, i8imm :$src),
- "and{b}\t{$src, $dst|$dst, $src}",
- [(store (and (loadi8 addr:$dst), imm:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def AND16mi : Ii16<0x81, MRM4m,
- (outs), (ins i16mem:$dst, i16imm:$src),
- "and{w}\t{$src, $dst|$dst, $src}",
- [(store (and (loadi16 addr:$dst), imm:$src), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
- def AND32mi : Ii32<0x81, MRM4m,
- (outs), (ins i32mem:$dst, i32imm:$src),
- "and{l}\t{$src, $dst|$dst, $src}",
- [(store (and (loadi32 addr:$dst), imm:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def AND16mi8 : Ii8<0x83, MRM4m,
- (outs), (ins i16mem:$dst, i16i8imm :$src),
- "and{w}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), i16immSExt8:$src), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
- def AND32mi8 : Ii8<0x83, MRM4m,
- (outs), (ins i32mem:$dst, i32i8imm :$src),
- "and{l}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), i32immSExt8:$src), addr:$dst),
- (implicit EFLAGS)]>;
-
- def AND8i8 : Ii8<0x24, RawFrm, (outs), (ins i8imm:$src),
- "and{b}\t{$src, %al|%al, $src}", []>;
- def AND16i16 : Ii16<0x25, RawFrm, (outs), (ins i16imm:$src),
- "and{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
- def AND32i32 : Ii32<0x25, RawFrm, (outs), (ins i32imm:$src),
- "and{l}\t{$src, %eax|%eax, $src}", []>;
-
-} // Constraints = ""
-
-
-let isCommutable = 1 in { // X = OR Y, Z --> X = OR Z, Y
-def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst),
- (ins GR8 :$src1, GR8 :$src2),
- "or{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86or_flag GR8:$src1, GR8:$src2))]>;
-def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,GR16:$src2))]>,
- OpSize;
-def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,GR32:$src2))]>;
-}
-
-// OR instructions with the destination register in REG and the source register
-// in R/M. Included for the disassembler.
-let isCodeGenOnly = 1 in {
-def OR8rr_REV : I<0x0A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "or{b}\t{$src2, $dst|$dst, $src2}", []>;
-def OR16rr_REV : I<0x0B, MRMSrcReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "or{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
-def OR32rr_REV : I<0x0B, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "or{l}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-def OR8rm : I<0x0A, MRMSrcMem, (outs GR8 :$dst),
- (ins GR8 :$src1, i8mem :$src2),
- "or{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86or_flag GR8:$src1,
- (load addr:$src2)))]>;
-def OR16rm : I<0x0B, MRMSrcMem, (outs GR16:$dst),
- (ins GR16:$src1, i16mem:$src2),
- "or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,
- (load addr:$src2)))]>,
- OpSize;
-def OR32rm : I<0x0B, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i32mem:$src2),
- "or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
- (load addr:$src2)))]>;
-
-def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst),
- (ins GR8 :$src1, i8imm:$src2),
- "or{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst,EFLAGS, (X86or_flag GR8:$src1, imm:$src2))]>;
-def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst),
- (ins GR16:$src1, i16imm:$src2),
- "or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,
- imm:$src2))]>, OpSize;
-def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst),
- (ins GR32:$src1, i32imm:$src2),
- "or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
- imm:$src2))]>;
-
-def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst),
- (ins GR16:$src1, i16i8imm:$src2),
- "or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,
- i16immSExt8:$src2))]>, OpSize;
-def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst),
- (ins GR32:$src1, i32i8imm:$src2),
- "or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
- i32immSExt8:$src2))]>;
-let Constraints = "" in {
- def OR8mr : I<0x08, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
- "or{b}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), GR8:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def OR16mr : I<0x09, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
- "or{w}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), GR16:$src), addr:$dst),
- (implicit EFLAGS)]>, OpSize;
- def OR32mr : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "or{l}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), GR32:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def OR8mi : Ii8<0x80, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
- "or{b}\t{$src, $dst|$dst, $src}",
- [(store (or (loadi8 addr:$dst), imm:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def OR16mi : Ii16<0x81, MRM1m, (outs), (ins i16mem:$dst, i16imm:$src),
- "or{w}\t{$src, $dst|$dst, $src}",
- [(store (or (loadi16 addr:$dst), imm:$src), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
- def OR32mi : Ii32<0x81, MRM1m, (outs), (ins i32mem:$dst, i32imm:$src),
- "or{l}\t{$src, $dst|$dst, $src}",
- [(store (or (loadi32 addr:$dst), imm:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def OR16mi8 : Ii8<0x83, MRM1m, (outs), (ins i16mem:$dst, i16i8imm:$src),
- "or{w}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), i16immSExt8:$src), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
- def OR32mi8 : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$src),
- "or{l}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), i32immSExt8:$src), addr:$dst),
- (implicit EFLAGS)]>;
-
- def OR8i8 : Ii8 <0x0C, RawFrm, (outs), (ins i8imm:$src),
- "or{b}\t{$src, %al|%al, $src}", []>;
- def OR16i16 : Ii16 <0x0D, RawFrm, (outs), (ins i16imm:$src),
- "or{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
- def OR32i32 : Ii32 <0x0D, RawFrm, (outs), (ins i32imm:$src),
- "or{l}\t{$src, %eax|%eax, $src}", []>;
-} // Constraints = ""
-
-
-let isCommutable = 1 in { // X = XOR Y, Z --> X = XOR Z, Y
- def XOR8rr : I<0x30, MRMDestReg,
- (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
- "xor{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86xor_flag GR8:$src1,
- GR8:$src2))]>;
- def XOR16rr : I<0x31, MRMDestReg,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
- GR16:$src2))]>, OpSize;
- def XOR32rr : I<0x31, MRMDestReg,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
- GR32:$src2))]>;
-} // isCommutable = 1
-
-// XOR instructions with the destination register in REG and the source register
-// in R/M. Included for the disassembler.
-let isCodeGenOnly = 1 in {
-def XOR8rr_REV : I<0x32, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "xor{b}\t{$src2, $dst|$dst, $src2}", []>;
-def XOR16rr_REV : I<0x33, MRMSrcReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "xor{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
-def XOR32rr_REV : I<0x33, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "xor{l}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-def XOR8rm : I<0x32, MRMSrcMem,
- (outs GR8 :$dst), (ins GR8:$src1, i8mem :$src2),
- "xor{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86xor_flag GR8:$src1,
- (load addr:$src2)))]>;
-def XOR16rm : I<0x33, MRMSrcMem,
- (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
- "xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
- (load addr:$src2)))]>,
- OpSize;
-def XOR32rm : I<0x33, MRMSrcMem,
- (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
- "xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
- (load addr:$src2)))]>;
-
-def XOR8ri : Ii8<0x80, MRM6r,
- (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
- "xor{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86xor_flag GR8:$src1, imm:$src2))]>;
-def XOR16ri : Ii16<0x81, MRM6r,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
- "xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
- imm:$src2))]>, OpSize;
-def XOR32ri : Ii32<0x81, MRM6r,
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
- "xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
- imm:$src2))]>;
-def XOR16ri8 : Ii8<0x83, MRM6r,
- (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
- "xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
- i16immSExt8:$src2))]>,
- OpSize;
-def XOR32ri8 : Ii8<0x83, MRM6r,
- (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
- "xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
- i32immSExt8:$src2))]>;
-
-let Constraints = "" in {
- def XOR8mr : I<0x30, MRMDestMem,
- (outs), (ins i8mem :$dst, GR8 :$src),
- "xor{b}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), GR8:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def XOR16mr : I<0x31, MRMDestMem,
- (outs), (ins i16mem:$dst, GR16:$src),
- "xor{w}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), GR16:$src), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
- def XOR32mr : I<0x31, MRMDestMem,
- (outs), (ins i32mem:$dst, GR32:$src),
- "xor{l}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), GR32:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def XOR8mi : Ii8<0x80, MRM6m,
- (outs), (ins i8mem :$dst, i8imm :$src),
- "xor{b}\t{$src, $dst|$dst, $src}",
- [(store (xor (loadi8 addr:$dst), imm:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def XOR16mi : Ii16<0x81, MRM6m,
- (outs), (ins i16mem:$dst, i16imm:$src),
- "xor{w}\t{$src, $dst|$dst, $src}",
- [(store (xor (loadi16 addr:$dst), imm:$src), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
- def XOR32mi : Ii32<0x81, MRM6m,
- (outs), (ins i32mem:$dst, i32imm:$src),
- "xor{l}\t{$src, $dst|$dst, $src}",
- [(store (xor (loadi32 addr:$dst), imm:$src), addr:$dst),
- (implicit EFLAGS)]>;
- def XOR16mi8 : Ii8<0x83, MRM6m,
- (outs), (ins i16mem:$dst, i16i8imm :$src),
- "xor{w}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), i16immSExt8:$src), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize;
- def XOR32mi8 : Ii8<0x83, MRM6m,
- (outs), (ins i32mem:$dst, i32i8imm :$src),
- "xor{l}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), i32immSExt8:$src), addr:$dst),
- (implicit EFLAGS)]>;
-
- def XOR8i8 : Ii8 <0x34, RawFrm, (outs), (ins i8imm:$src),
- "xor{b}\t{$src, %al|%al, $src}", []>;
- def XOR16i16 : Ii16<0x35, RawFrm, (outs), (ins i16imm:$src),
- "xor{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
- def XOR32i32 : Ii32<0x35, RawFrm, (outs), (ins i32imm:$src),
- "xor{l}\t{$src, %eax|%eax, $src}", []>;
-} // Constraints = ""
-} // Defs = [EFLAGS]
-
-// Shift instructions
-let Defs = [EFLAGS] in {
-let Uses = [CL] in {
-def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
- "shl{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (shl GR8:$src1, CL))]>;
-def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
- "shl{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize;
-def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
- "shl{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (shl GR32:$src1, CL))]>;
-} // Uses = [CL]
-
-def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
- "shl{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
-
-let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
-def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
- "shl{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
-def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
- "shl{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>;
-
-// NOTE: We don't include patterns for shifts of a register by one, because
-// 'add reg,reg' is cheaper.
-
-def SHL8r1 : I<0xD0, MRM4r, (outs GR8:$dst), (ins GR8:$src1),
- "shl{b}\t$dst", []>;
-def SHL16r1 : I<0xD1, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
- "shl{w}\t$dst", []>, OpSize;
-def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
- "shl{l}\t$dst", []>;
-
-} // isConvertibleToThreeAddress = 1
-
-let Constraints = "" in {
- let Uses = [CL] in {
- def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
- "shl{b}\t{%cl, $dst|$dst, CL}",
- [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>;
- def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
- "shl{w}\t{%cl, $dst|$dst, CL}",
- [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
- def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
- "shl{l}\t{%cl, $dst|$dst, CL}",
- [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>;
- }
- def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, i8imm:$src),
- "shl{b}\t{$src, $dst|$dst, $src}",
- [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
- def SHL16mi : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, i8imm:$src),
- "shl{w}\t{$src, $dst|$dst, $src}",
- [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
- OpSize;
- def SHL32mi : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, i8imm:$src),
- "shl{l}\t{$src, $dst|$dst, $src}",
- [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-
- // Shift by 1
- def SHL8m1 : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
- "shl{b}\t$dst",
- [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
- def SHL16m1 : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
- "shl{w}\t$dst",
- [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
- OpSize;
- def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
- "shl{l}\t$dst",
- [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-} // Constraints = ""
-
-let Uses = [CL] in {
-def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
- "shr{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (srl GR8:$src1, CL))]>;
-def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
- "shr{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (srl GR16:$src1, CL))]>, OpSize;
-def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
- "shr{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (srl GR32:$src1, CL))]>;
-}
-
-def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
- "shr{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))]>;
-def SHR16ri : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
- "shr{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
-def SHR32ri : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
- "shr{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))]>;
-
-// Shift by 1
-def SHR8r1 : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
- "shr{b}\t$dst",
- [(set GR8:$dst, (srl GR8:$src1, (i8 1)))]>;
-def SHR16r1 : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
- "shr{w}\t$dst",
- [(set GR16:$dst, (srl GR16:$src1, (i8 1)))]>, OpSize;
-def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
- "shr{l}\t$dst",
- [(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>;
-
-let Constraints = "" in {
- let Uses = [CL] in {
- def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
- "shr{b}\t{%cl, $dst|$dst, CL}",
- [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>;
- def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
- "shr{w}\t{%cl, $dst|$dst, CL}",
- [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>,
- OpSize;
- def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
- "shr{l}\t{%cl, $dst|$dst, CL}",
- [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>;
- }
- def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src),
- "shr{b}\t{$src, $dst|$dst, $src}",
- [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
- def SHR16mi : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, i8imm:$src),
- "shr{w}\t{$src, $dst|$dst, $src}",
- [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
- OpSize;
- def SHR32mi : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, i8imm:$src),
- "shr{l}\t{$src, $dst|$dst, $src}",
- [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-
- // Shift by 1
- def SHR8m1 : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
- "shr{b}\t$dst",
- [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
- def SHR16m1 : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
- "shr{w}\t$dst",
- [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,OpSize;
- def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
- "shr{l}\t$dst",
- [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-} // Constraints = ""
-
-let Uses = [CL] in {
-def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
- "sar{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (sra GR8:$src1, CL))]>;
-def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
- "sar{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (sra GR16:$src1, CL))]>, OpSize;
-def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
- "sar{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (sra GR32:$src1, CL))]>;
-}
-
-def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
- "sar{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))]>;
-def SAR16ri : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
- "sar{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))]>,
- OpSize;
-def SAR32ri : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
- "sar{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))]>;
-
-// Shift by 1
-def SAR8r1 : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
- "sar{b}\t$dst",
- [(set GR8:$dst, (sra GR8:$src1, (i8 1)))]>;
-def SAR16r1 : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
- "sar{w}\t$dst",
- [(set GR16:$dst, (sra GR16:$src1, (i8 1)))]>, OpSize;
-def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
- "sar{l}\t$dst",
- [(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>;
-
-let Constraints = "" in {
- let Uses = [CL] in {
- def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
- "sar{b}\t{%cl, $dst|$dst, CL}",
- [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>;
- def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
- "sar{w}\t{%cl, $dst|$dst, CL}",
- [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
- def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
- "sar{l}\t{%cl, $dst|$dst, CL}",
- [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>;
- }
- def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, i8imm:$src),
- "sar{b}\t{$src, $dst|$dst, $src}",
- [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
- def SAR16mi : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, i8imm:$src),
- "sar{w}\t{$src, $dst|$dst, $src}",
- [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
- OpSize;
- def SAR32mi : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, i8imm:$src),
- "sar{l}\t{$src, $dst|$dst, $src}",
- [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-
- // Shift by 1
- def SAR8m1 : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
- "sar{b}\t$dst",
- [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
- def SAR16m1 : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
- "sar{w}\t$dst",
- [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
- OpSize;
- def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
- "sar{l}\t$dst",
- [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-} // Constraints = ""
-
-// Rotate instructions
-
-def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
- "rcl{b}\t{1, $dst|$dst, 1}", []>;
-let Uses = [CL] in {
-def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
- "rcl{b}\t{%cl, $dst|$dst, CL}", []>;
-}
-def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
- "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
-
-def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
- "rcl{w}\t{1, $dst|$dst, 1}", []>, OpSize;
-let Uses = [CL] in {
-def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
- "rcl{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
-}
-def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
- "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
-
-def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
- "rcl{l}\t{1, $dst|$dst, 1}", []>;
-let Uses = [CL] in {
-def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
- "rcl{l}\t{%cl, $dst|$dst, CL}", []>;
-}
-def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
- "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>;
-
-def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
- "rcr{b}\t{1, $dst|$dst, 1}", []>;
-let Uses = [CL] in {
-def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
- "rcr{b}\t{%cl, $dst|$dst, CL}", []>;
-}
-def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
- "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
-
-def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
- "rcr{w}\t{1, $dst|$dst, 1}", []>, OpSize;
-let Uses = [CL] in {
-def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
- "rcr{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
-}
-def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
- "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
-
-def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
- "rcr{l}\t{1, $dst|$dst, 1}", []>;
-let Uses = [CL] in {
-def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
- "rcr{l}\t{%cl, $dst|$dst, CL}", []>;
-}
-def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
- "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>;
-
-let Constraints = "" in {
-def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
- "rcl{b}\t{1, $dst|$dst, 1}", []>;
-def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, i8imm:$cnt),
- "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCL16m1 : I<0xD1, MRM2m, (outs), (ins i16mem:$dst),
- "rcl{w}\t{1, $dst|$dst, 1}", []>, OpSize;
-def RCL16mi : Ii8<0xC1, MRM2m, (outs), (ins i16mem:$dst, i8imm:$cnt),
- "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
-def RCL32m1 : I<0xD1, MRM2m, (outs), (ins i32mem:$dst),
- "rcl{l}\t{1, $dst|$dst, 1}", []>;
-def RCL32mi : Ii8<0xC1, MRM2m, (outs), (ins i32mem:$dst, i8imm:$cnt),
- "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCR8m1 : I<0xD0, MRM3m, (outs), (ins i8mem:$dst),
- "rcr{b}\t{1, $dst|$dst, 1}", []>;
-def RCR8mi : Ii8<0xC0, MRM3m, (outs), (ins i8mem:$dst, i8imm:$cnt),
- "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCR16m1 : I<0xD1, MRM3m, (outs), (ins i16mem:$dst),
- "rcr{w}\t{1, $dst|$dst, 1}", []>, OpSize;
-def RCR16mi : Ii8<0xC1, MRM3m, (outs), (ins i16mem:$dst, i8imm:$cnt),
- "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
-def RCR32m1 : I<0xD1, MRM3m, (outs), (ins i32mem:$dst),
- "rcr{l}\t{1, $dst|$dst, 1}", []>;
-def RCR32mi : Ii8<0xC1, MRM3m, (outs), (ins i32mem:$dst, i8imm:$cnt),
- "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>;
-
-let Uses = [CL] in {
-def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
- "rcl{b}\t{%cl, $dst|$dst, CL}", []>;
-def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
- "rcl{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
-def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
- "rcl{l}\t{%cl, $dst|$dst, CL}", []>;
-def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
- "rcr{b}\t{%cl, $dst|$dst, CL}", []>;
-def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
- "rcr{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
-def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
- "rcr{l}\t{%cl, $dst|$dst, CL}", []>;
-}
-} // Constraints = ""
-
-// FIXME: provide shorter instructions when imm8 == 1
-let Uses = [CL] in {
-def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
- "rol{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (rotl GR8:$src1, CL))]>;
-def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
- "rol{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (rotl GR16:$src1, CL))]>, OpSize;
-def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
- "rol{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (rotl GR32:$src1, CL))]>;
-}
-
-def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
- "rol{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))]>;
-def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
- "rol{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>,
- OpSize;
-def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
- "rol{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))]>;
-
-// Rotate by 1
-def ROL8r1 : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
- "rol{b}\t$dst",
- [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))]>;
-def ROL16r1 : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
- "rol{w}\t$dst",
- [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))]>, OpSize;
-def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
- "rol{l}\t$dst",
- [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>;
-
-let Constraints = "" in {
- let Uses = [CL] in {
- def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
- "rol{b}\t{%cl, $dst|$dst, CL}",
- [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>;
- def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
- "rol{w}\t{%cl, $dst|$dst, CL}",
- [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
- def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
- "rol{l}\t{%cl, $dst|$dst, CL}",
- [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>;
- }
- def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, i8imm:$src),
- "rol{b}\t{$src, $dst|$dst, $src}",
- [(store (rotl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
- def ROL16mi : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, i8imm:$src),
- "rol{w}\t{$src, $dst|$dst, $src}",
- [(store (rotl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
- OpSize;
- def ROL32mi : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, i8imm:$src),
- "rol{l}\t{$src, $dst|$dst, $src}",
- [(store (rotl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-
- // Rotate by 1
- def ROL8m1 : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
- "rol{b}\t$dst",
- [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
- def ROL16m1 : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
- "rol{w}\t$dst",
- [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
- OpSize;
- def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
- "rol{l}\t$dst",
- [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-} // Constraints = ""
-
-let Uses = [CL] in {
-def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
- "ror{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (rotr GR8:$src1, CL))]>;
-def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
- "ror{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (rotr GR16:$src1, CL))]>, OpSize;
-def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
- "ror{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (rotr GR32:$src1, CL))]>;
-}
-
-def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
- "ror{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))]>;
-def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
- "ror{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>,
- OpSize;
-def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
- "ror{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))]>;
-
-// Rotate by 1
-def ROR8r1 : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
- "ror{b}\t$dst",
- [(set GR8:$dst, (rotr GR8:$src1, (i8 1)))]>;
-def ROR16r1 : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
- "ror{w}\t$dst",
- [(set GR16:$dst, (rotr GR16:$src1, (i8 1)))]>, OpSize;
-def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
- "ror{l}\t$dst",
- [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>;
-
-let Constraints = "" in {
- let Uses = [CL] in {
- def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
- "ror{b}\t{%cl, $dst|$dst, CL}",
- [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>;
- def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
- "ror{w}\t{%cl, $dst|$dst, CL}",
- [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
- def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
- "ror{l}\t{%cl, $dst|$dst, CL}",
- [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>;
- }
- def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
- "ror{b}\t{$src, $dst|$dst, $src}",
- [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
- def ROR16mi : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, i8imm:$src),
- "ror{w}\t{$src, $dst|$dst, $src}",
- [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
- OpSize;
- def ROR32mi : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, i8imm:$src),
- "ror{l}\t{$src, $dst|$dst, $src}",
- [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
-
- // Rotate by 1
- def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
- "ror{b}\t$dst",
- [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
- def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
- "ror{w}\t$dst",
- [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
- OpSize;
- def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
- "ror{l}\t$dst",
- [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-} // Constraints = ""
-
-
-// Double shift instructions (generalizations of rotate)
-let Uses = [CL] in {
-def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>, TB;
-def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>, TB;
-def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>,
- TB, OpSize;
-def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>,
- TB, OpSize;
-}
-
-let isCommutable = 1 in { // These instructions commute to each other.
-def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
- (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2, i8imm:$src3),
- "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
- (i8 imm:$src3)))]>,
- TB;
-def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
- (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2, i8imm:$src3),
- "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
- (i8 imm:$src3)))]>,
- TB;
-def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
- (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2, i8imm:$src3),
- "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
- (i8 imm:$src3)))]>,
- TB, OpSize;
-def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
- (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2, i8imm:$src3),
- "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
- (i8 imm:$src3)))]>,
- TB, OpSize;
-}
-
-let Constraints = "" in {
- let Uses = [CL] in {
- def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
- addr:$dst)]>, TB;
- def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
- addr:$dst)]>, TB;
- }
- def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
- (outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
- "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(store (X86shld (loadi32 addr:$dst), GR32:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
- TB;
- def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
- (outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
- "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
- TB;
-
- let Uses = [CL] in {
- def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
- addr:$dst)]>, TB, OpSize;
- def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
- addr:$dst)]>, TB, OpSize;
- }
- def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
- (outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
- "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(store (X86shld (loadi16 addr:$dst), GR16:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
- TB, OpSize;
- def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
- (outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
- "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
- TB, OpSize;
-} // Constraints = ""
-} // Defs = [EFLAGS]
-
-
-// Arithmetic.
-let Defs = [EFLAGS] in {
-let isCommutable = 1 in { // X = ADD Y, Z --> X = ADD Z, Y
-// Register-Register Addition
-def ADD8rr : I<0x00, MRMDestReg, (outs GR8 :$dst),
- (ins GR8 :$src1, GR8 :$src2),
- "add{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86add_flag GR8:$src1, GR8:$src2))]>;
-
-let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
-// Register-Register Addition
-def ADD16rr : I<0x01, MRMDestReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86add_flag GR16:$src1,
- GR16:$src2))]>, OpSize;
-def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86add_flag GR32:$src1,
- GR32:$src2))]>;
-} // end isConvertibleToThreeAddress
-} // end isCommutable
-
-// These are alternate spellings for use by the disassembler, we mark them as
-// code gen only to ensure they aren't matched by the assembler.
-let isCodeGenOnly = 1 in {
- def ADD8rr_alt: I<0x02, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "add{b}\t{$src2, $dst|$dst, $src2}", []>;
- def ADD16rr_alt: I<0x03, MRMSrcReg,(outs GR16:$dst),(ins GR16:$src1, GR16:$src2),
- "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
- def ADD32rr_alt: I<0x03, MRMSrcReg,(outs GR32:$dst),(ins GR32:$src1, GR32:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-// Register-Memory Addition
-def ADD8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst),
- (ins GR8 :$src1, i8mem :$src2),
- "add{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS, (X86add_flag GR8:$src1,
- (load addr:$src2)))]>;
-def ADD16rm : I<0x03, MRMSrcMem, (outs GR16:$dst),
- (ins GR16:$src1, i16mem:$src2),
- "add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS, (X86add_flag GR16:$src1,
- (load addr:$src2)))]>, OpSize;
-def ADD32rm : I<0x03, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i32mem:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS, (X86add_flag GR32:$src1,
- (load addr:$src2)))]>;
-
-// Register-Integer Addition
-def ADD8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
- "add{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS,
- (X86add_flag GR8:$src1, imm:$src2))]>;
-
-let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
-// Register-Integer Addition
-def ADD16ri : Ii16<0x81, MRM0r, (outs GR16:$dst),
- (ins GR16:$src1, i16imm:$src2),
- "add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86add_flag GR16:$src1, imm:$src2))]>, OpSize;
-def ADD32ri : Ii32<0x81, MRM0r, (outs GR32:$dst),
- (ins GR32:$src1, i32imm:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86add_flag GR32:$src1, imm:$src2))]>;
-def ADD16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst),
- (ins GR16:$src1, i16i8imm:$src2),
- "add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86add_flag GR16:$src1, i16immSExt8:$src2))]>, OpSize;
-def ADD32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst),
- (ins GR32:$src1, i32i8imm:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86add_flag GR32:$src1, i32immSExt8:$src2))]>;
-}
-
-let Constraints = "" in {
- // Memory-Register Addition
- def ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
- "add{b}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), GR8:$src2), addr:$dst),
- (implicit EFLAGS)]>;
- def ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "add{w}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), GR16:$src2), addr:$dst),
- (implicit EFLAGS)]>, OpSize;
- def ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), GR32:$src2), addr:$dst),
- (implicit EFLAGS)]>;
- def ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
- "add{b}\t{$src2, $dst|$dst, $src2}",
- [(store (add (loadi8 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>;
- def ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
- "add{w}\t{$src2, $dst|$dst, $src2}",
- [(store (add (loadi16 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>, OpSize;
- def ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}",
- [(store (add (loadi32 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>;
- def ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
- "add{w}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), i16immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)]>, OpSize;
- def ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), i32immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)]>;
-
- // addition to rAX
- def ADD8i8 : Ii8<0x04, RawFrm, (outs), (ins i8imm:$src),
- "add{b}\t{$src, %al|%al, $src}", []>;
- def ADD16i16 : Ii16<0x05, RawFrm, (outs), (ins i16imm:$src),
- "add{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
- def ADD32i32 : Ii32<0x05, RawFrm, (outs), (ins i32imm:$src),
- "add{l}\t{$src, %eax|%eax, $src}", []>;
-} // Constraints = ""
-
-let Uses = [EFLAGS] in {
-let isCommutable = 1 in { // X = ADC Y, Z --> X = ADC Z, Y
-def ADC8rr : I<0x10, MRMDestReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "adc{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (adde GR8:$src1, GR8:$src2))]>;
-def ADC16rr : I<0x11, MRMDestReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "adc{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (adde GR16:$src1, GR16:$src2))]>, OpSize;
-def ADC32rr : I<0x11, MRMDestReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "adc{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (adde GR32:$src1, GR32:$src2))]>;
-}
-
-let isCodeGenOnly = 1 in {
-def ADC8rr_REV : I<0x12, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "adc{b}\t{$src2, $dst|$dst, $src2}", []>;
-def ADC16rr_REV : I<0x13, MRMSrcReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "adc{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
-def ADC32rr_REV : I<0x13, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "adc{l}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-def ADC8rm : I<0x12, MRMSrcMem , (outs GR8:$dst),
- (ins GR8:$src1, i8mem:$src2),
- "adc{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (adde GR8:$src1, (load addr:$src2)))]>;
-def ADC16rm : I<0x13, MRMSrcMem , (outs GR16:$dst),
- (ins GR16:$src1, i16mem:$src2),
- "adc{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (adde GR16:$src1, (load addr:$src2)))]>,
- OpSize;
-def ADC32rm : I<0x13, MRMSrcMem , (outs GR32:$dst),
- (ins GR32:$src1, i32mem:$src2),
- "adc{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (adde GR32:$src1, (load addr:$src2)))]>;
-def ADC8ri : Ii8<0x80, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
- "adc{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (adde GR8:$src1, imm:$src2))]>;
-def ADC16ri : Ii16<0x81, MRM2r, (outs GR16:$dst),
- (ins GR16:$src1, i16imm:$src2),
- "adc{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (adde GR16:$src1, imm:$src2))]>, OpSize;
-def ADC16ri8 : Ii8<0x83, MRM2r, (outs GR16:$dst),
- (ins GR16:$src1, i16i8imm:$src2),
- "adc{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (adde GR16:$src1, i16immSExt8:$src2))]>,
- OpSize;
-def ADC32ri : Ii32<0x81, MRM2r, (outs GR32:$dst),
- (ins GR32:$src1, i32imm:$src2),
- "adc{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (adde GR32:$src1, imm:$src2))]>;
-def ADC32ri8 : Ii8<0x83, MRM2r, (outs GR32:$dst),
- (ins GR32:$src1, i32i8imm:$src2),
- "adc{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (adde GR32:$src1, i32immSExt8:$src2))]>;
-
-let Constraints = "" in {
- def ADC8mr : I<0x10, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
- "adc{b}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (load addr:$dst), GR8:$src2), addr:$dst)]>;
- def ADC16mr : I<0x11, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "adc{w}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (load addr:$dst), GR16:$src2), addr:$dst)]>,
- OpSize;
- def ADC32mr : I<0x11, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "adc{l}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (load addr:$dst), GR32:$src2), addr:$dst)]>;
- def ADC8mi : Ii8<0x80, MRM2m, (outs), (ins i8mem:$dst, i8imm:$src2),
- "adc{b}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
- def ADC16mi : Ii16<0x81, MRM2m, (outs), (ins i16mem:$dst, i16imm:$src2),
- "adc{w}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (loadi16 addr:$dst), imm:$src2), addr:$dst)]>,
- OpSize;
- def ADC16mi8 : Ii8<0x83, MRM2m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
- "adc{w}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (load addr:$dst), i16immSExt8:$src2), addr:$dst)]>,
- OpSize;
- def ADC32mi : Ii32<0x81, MRM2m, (outs), (ins i32mem:$dst, i32imm:$src2),
- "adc{l}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
- def ADC32mi8 : Ii8<0x83, MRM2m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
- "adc{l}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
-
- def ADC8i8 : Ii8<0x14, RawFrm, (outs), (ins i8imm:$src),
- "adc{b}\t{$src, %al|%al, $src}", []>;
- def ADC16i16 : Ii16<0x15, RawFrm, (outs), (ins i16imm:$src),
- "adc{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
- def ADC32i32 : Ii32<0x15, RawFrm, (outs), (ins i32imm:$src),
- "adc{l}\t{$src, %eax|%eax, $src}", []>;
-} // Constraints = ""
-} // Uses = [EFLAGS]
-
-// Register-Register Subtraction
-def SUB8rr : I<0x28, MRMDestReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS,
- (X86sub_flag GR8:$src1, GR8:$src2))]>;
-def SUB16rr : I<0x29, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86sub_flag GR16:$src1, GR16:$src2))]>, OpSize;
-def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86sub_flag GR32:$src1, GR32:$src2))]>;
-
-let isCodeGenOnly = 1 in {
-def SUB8rr_REV : I<0x2A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "sub{b}\t{$src2, $dst|$dst, $src2}", []>;
-def SUB16rr_REV : I<0x2B, MRMSrcReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
-def SUB32rr_REV : I<0x2B, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-// Register-Memory Subtraction
-def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst),
- (ins GR8 :$src1, i8mem :$src2),
- "sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS,
- (X86sub_flag GR8:$src1, (load addr:$src2)))]>;
-def SUB16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst),
- (ins GR16:$src1, i16mem:$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86sub_flag GR16:$src1, (load addr:$src2)))]>, OpSize;
-def SUB32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i32mem:$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86sub_flag GR32:$src1, (load addr:$src2)))]>;
-
-// Register-Integer Subtraction
-def SUB8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst),
- (ins GR8:$src1, i8imm:$src2),
- "sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, EFLAGS,
- (X86sub_flag GR8:$src1, imm:$src2))]>;
-def SUB16ri : Ii16<0x81, MRM5r, (outs GR16:$dst),
- (ins GR16:$src1, i16imm:$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86sub_flag GR16:$src1, imm:$src2))]>, OpSize;
-def SUB32ri : Ii32<0x81, MRM5r, (outs GR32:$dst),
- (ins GR32:$src1, i32imm:$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86sub_flag GR32:$src1, imm:$src2))]>;
-def SUB16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst),
- (ins GR16:$src1, i16i8imm:$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86sub_flag GR16:$src1, i16immSExt8:$src2))]>, OpSize;
-def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst),
- (ins GR32:$src1, i32i8imm:$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86sub_flag GR32:$src1, i32immSExt8:$src2))]>;
-
-let Constraints = "" in {
- // Memory-Register Subtraction
- def SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
- "sub{b}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR8:$src2), addr:$dst),
- (implicit EFLAGS)]>;
- def SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR16:$src2), addr:$dst),
- (implicit EFLAGS)]>, OpSize;
- def SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR32:$src2), addr:$dst),
- (implicit EFLAGS)]>;
-
- // Memory-Integer Subtraction
- def SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
- "sub{b}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst),
- (implicit EFLAGS)]>;
- def SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (loadi16 addr:$dst), imm:$src2),addr:$dst),
- (implicit EFLAGS)]>, OpSize;
- def SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (loadi32 addr:$dst), imm:$src2),addr:$dst),
- (implicit EFLAGS)]>;
- def SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
- "sub{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i16immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)]>, OpSize;
- def SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
- "sub{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i32immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)]>;
-
- def SUB8i8 : Ii8<0x2C, RawFrm, (outs), (ins i8imm:$src),
- "sub{b}\t{$src, %al|%al, $src}", []>;
- def SUB16i16 : Ii16<0x2D, RawFrm, (outs), (ins i16imm:$src),
- "sub{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
- def SUB32i32 : Ii32<0x2D, RawFrm, (outs), (ins i32imm:$src),
- "sub{l}\t{$src, %eax|%eax, $src}", []>;
-} // Constraints = ""
-
-let Uses = [EFLAGS] in {
-def SBB8rr : I<0x18, MRMDestReg, (outs GR8:$dst),
- (ins GR8:$src1, GR8:$src2),
- "sbb{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sube GR8:$src1, GR8:$src2))]>;
-def SBB16rr : I<0x19, MRMDestReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "sbb{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sube GR16:$src1, GR16:$src2))]>, OpSize;
-def SBB32rr : I<0x19, MRMDestReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "sbb{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sube GR32:$src1, GR32:$src2))]>;
-
-let Constraints = "" in {
- def SBB8mr : I<0x18, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
- "sbb{b}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (load addr:$dst), GR8:$src2), addr:$dst)]>;
- def SBB16mr : I<0x19, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "sbb{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (load addr:$dst), GR16:$src2), addr:$dst)]>,
- OpSize;
- def SBB32mr : I<0x19, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "sbb{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (load addr:$dst), GR32:$src2), addr:$dst)]>;
- def SBB8mi : Ii8<0x80, MRM3m, (outs), (ins i8mem:$dst, i8imm:$src2),
- "sbb{b}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
- def SBB16mi : Ii16<0x81, MRM3m, (outs), (ins i16mem:$dst, i16imm:$src2),
- "sbb{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (loadi16 addr:$dst), imm:$src2), addr:$dst)]>,
- OpSize;
- def SBB16mi8 : Ii8<0x83, MRM3m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
- "sbb{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (load addr:$dst), i16immSExt8:$src2), addr:$dst)]>,
- OpSize;
- def SBB32mi : Ii32<0x81, MRM3m, (outs), (ins i32mem:$dst, i32imm:$src2),
- "sbb{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
- def SBB32mi8 : Ii8<0x83, MRM3m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
- "sbb{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
-
- def SBB8i8 : Ii8<0x1C, RawFrm, (outs), (ins i8imm:$src),
- "sbb{b}\t{$src, %al|%al, $src}", []>;
- def SBB16i16 : Ii16<0x1D, RawFrm, (outs), (ins i16imm:$src),
- "sbb{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
- def SBB32i32 : Ii32<0x1D, RawFrm, (outs), (ins i32imm:$src),
- "sbb{l}\t{$src, %eax|%eax, $src}", []>;
-} // Constraints = ""
-
-let isCodeGenOnly = 1 in {
-def SBB8rr_REV : I<0x1A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "sbb{b}\t{$src2, $dst|$dst, $src2}", []>;
-def SBB16rr_REV : I<0x1B, MRMSrcReg, (outs GR16:$dst),
- (ins GR16:$src1, GR16:$src2),
- "sbb{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
-def SBB32rr_REV : I<0x1B, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "sbb{l}\t{$src2, $dst|$dst, $src2}", []>;
-}
-
-def SBB8rm : I<0x1A, MRMSrcMem, (outs GR8:$dst), (ins GR8:$src1, i8mem:$src2),
- "sbb{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sube GR8:$src1, (load addr:$src2)))]>;
-def SBB16rm : I<0x1B, MRMSrcMem, (outs GR16:$dst),
- (ins GR16:$src1, i16mem:$src2),
- "sbb{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sube GR16:$src1, (load addr:$src2)))]>,
- OpSize;
-def SBB32rm : I<0x1B, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i32mem:$src2),
- "sbb{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sube GR32:$src1, (load addr:$src2)))]>;
-def SBB8ri : Ii8<0x80, MRM3r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
- "sbb{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sube GR8:$src1, imm:$src2))]>;
-def SBB16ri : Ii16<0x81, MRM3r, (outs GR16:$dst),
- (ins GR16:$src1, i16imm:$src2),
- "sbb{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sube GR16:$src1, imm:$src2))]>, OpSize;
-def SBB16ri8 : Ii8<0x83, MRM3r, (outs GR16:$dst),
- (ins GR16:$src1, i16i8imm:$src2),
- "sbb{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sube GR16:$src1, i16immSExt8:$src2))]>,
- OpSize;
-def SBB32ri : Ii32<0x81, MRM3r, (outs GR32:$dst),
- (ins GR32:$src1, i32imm:$src2),
- "sbb{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sube GR32:$src1, imm:$src2))]>;
-def SBB32ri8 : Ii8<0x83, MRM3r, (outs GR32:$dst),
- (ins GR32:$src1, i32i8imm:$src2),
- "sbb{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sube GR32:$src1, i32immSExt8:$src2))]>;
-} // Uses = [EFLAGS]
-} // Defs = [EFLAGS]
-
-let Defs = [EFLAGS] in {
-let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
-// Register-Register Signed Integer Multiply
-def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
- "imul{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86smul_flag GR16:$src1, GR16:$src2))]>, TB, OpSize;
-def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
- "imul{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86smul_flag GR32:$src1, GR32:$src2))]>, TB;
-}
-
-// Register-Memory Signed Integer Multiply
-def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst),
- (ins GR16:$src1, i16mem:$src2),
- "imul{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86smul_flag GR16:$src1, (load addr:$src2)))]>,
- TB, OpSize;
-def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i32mem:$src2),
- "imul{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86smul_flag GR32:$src1, (load addr:$src2)))]>, TB;
-} // Defs = [EFLAGS]
-} // end Two Address instructions
-
-// Suprisingly enough, these are not two address instructions!
-let Defs = [EFLAGS] in {
-// Register-Integer Signed Integer Multiply
-def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
- "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86smul_flag GR16:$src1, imm:$src2))]>, OpSize;
-def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
- "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86smul_flag GR32:$src1, imm:$src2))]>;
-def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
- (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
- "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86smul_flag GR16:$src1, i16immSExt8:$src2))]>,
- OpSize;
-def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
- (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
- "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86smul_flag GR32:$src1, i32immSExt8:$src2))]>;
-
-// Memory-Integer Signed Integer Multiply
-def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
- (outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
- "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86smul_flag (load addr:$src1), imm:$src2))]>,
- OpSize;
-def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
- (outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
- "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86smul_flag (load addr:$src1), imm:$src2))]>;
-def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
- (outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
- "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, EFLAGS,
- (X86smul_flag (load addr:$src1),
- i16immSExt8:$src2))]>, OpSize;
-def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
- (outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
- "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, EFLAGS,
- (X86smul_flag (load addr:$src1),
- i32immSExt8:$src2))]>;
-} // Defs = [EFLAGS]
-
-//===----------------------------------------------------------------------===//
-// Test instructions are just like AND, except they don't generate a result.
-//
-let Defs = [EFLAGS] in {
-let isCommutable = 1 in { // TEST X, Y --> TEST Y, X
-def TEST8rr : I<0x84, MRMSrcReg, (outs), (ins GR8:$src1, GR8:$src2),
- "test{b}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and_su GR8:$src1, GR8:$src2), 0))]>;
-def TEST16rr : I<0x85, MRMSrcReg, (outs), (ins GR16:$src1, GR16:$src2),
- "test{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and_su GR16:$src1, GR16:$src2),
- 0))]>,
- OpSize;
-def TEST32rr : I<0x85, MRMSrcReg, (outs), (ins GR32:$src1, GR32:$src2),
- "test{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and_su GR32:$src1, GR32:$src2),
- 0))]>;
-}
-
-def TEST8i8 : Ii8<0xA8, RawFrm, (outs), (ins i8imm:$src),
- "test{b}\t{$src, %al|%al, $src}", []>;
-def TEST16i16 : Ii16<0xA9, RawFrm, (outs), (ins i16imm:$src),
- "test{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
-def TEST32i32 : Ii32<0xA9, RawFrm, (outs), (ins i32imm:$src),
- "test{l}\t{$src, %eax|%eax, $src}", []>;
-
-def TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2),
- "test{b}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and GR8:$src1, (loadi8 addr:$src2)),
- 0))]>;
-def TEST16rm : I<0x85, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2),
- "test{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and GR16:$src1,
- (loadi16 addr:$src2)), 0))]>, OpSize;
-def TEST32rm : I<0x85, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2),
- "test{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and GR32:$src1,
- (loadi32 addr:$src2)), 0))]>;
-
-def TEST8ri : Ii8 <0xF6, MRM0r, // flags = GR8 & imm8
- (outs), (ins GR8:$src1, i8imm:$src2),
- "test{b}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and_su GR8:$src1, imm:$src2), 0))]>;
-def TEST16ri : Ii16<0xF7, MRM0r, // flags = GR16 & imm16
- (outs), (ins GR16:$src1, i16imm:$src2),
- "test{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and_su GR16:$src1, imm:$src2), 0))]>,
- OpSize;
-def TEST32ri : Ii32<0xF7, MRM0r, // flags = GR32 & imm32
- (outs), (ins GR32:$src1, i32imm:$src2),
- "test{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and_su GR32:$src1, imm:$src2), 0))]>;
-
-def TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8
- (outs), (ins i8mem:$src1, i8imm:$src2),
- "test{b}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and (loadi8 addr:$src1), imm:$src2),
- 0))]>;
-def TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16
- (outs), (ins i16mem:$src1, i16imm:$src2),
- "test{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and (loadi16 addr:$src1), imm:$src2),
- 0))]>, OpSize;
-def TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32
- (outs), (ins i32mem:$src1, i32imm:$src2),
- "test{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (and (loadi32 addr:$src1), imm:$src2),
- 0))]>;
-} // Defs = [EFLAGS]
-
// Condition code ops, incl. set if equal/not equal/...
let Defs = [EFLAGS], Uses = [AH], neverHasSideEffects = 1 in
@@ -3374,305 +925,10 @@ def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>; // flags = AH
let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in
def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags
-let Uses = [EFLAGS] in {
-// Use sbb to materialize carry bit.
-let Defs = [EFLAGS], isCodeGenOnly = 1 in {
-// FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
-// However, Pat<> can't replicate the destination reg into the inputs of the
-// result.
-// FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
-// X86CodeEmitter.
-def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
- [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
-def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
- [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>,
- OpSize;
-def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
- [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
-} // isCodeGenOnly
-
-def SETEr : I<0x94, MRM0r,
- (outs GR8 :$dst), (ins),
- "sete\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_E, EFLAGS))]>,
- TB; // GR8 = ==
-def SETEm : I<0x94, MRM0m,
- (outs), (ins i8mem:$dst),
- "sete\t$dst",
- [(store (X86setcc X86_COND_E, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = ==
-
-def SETNEr : I<0x95, MRM0r,
- (outs GR8 :$dst), (ins),
- "setne\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_NE, EFLAGS))]>,
- TB; // GR8 = !=
-def SETNEm : I<0x95, MRM0m,
- (outs), (ins i8mem:$dst),
- "setne\t$dst",
- [(store (X86setcc X86_COND_NE, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = !=
-
-def SETLr : I<0x9C, MRM0r,
- (outs GR8 :$dst), (ins),
- "setl\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_L, EFLAGS))]>,
- TB; // GR8 = < signed
-def SETLm : I<0x9C, MRM0m,
- (outs), (ins i8mem:$dst),
- "setl\t$dst",
- [(store (X86setcc X86_COND_L, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = < signed
-
-def SETGEr : I<0x9D, MRM0r,
- (outs GR8 :$dst), (ins),
- "setge\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_GE, EFLAGS))]>,
- TB; // GR8 = >= signed
-def SETGEm : I<0x9D, MRM0m,
- (outs), (ins i8mem:$dst),
- "setge\t$dst",
- [(store (X86setcc X86_COND_GE, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = >= signed
-
-def SETLEr : I<0x9E, MRM0r,
- (outs GR8 :$dst), (ins),
- "setle\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_LE, EFLAGS))]>,
- TB; // GR8 = <= signed
-def SETLEm : I<0x9E, MRM0m,
- (outs), (ins i8mem:$dst),
- "setle\t$dst",
- [(store (X86setcc X86_COND_LE, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = <= signed
-
-def SETGr : I<0x9F, MRM0r,
- (outs GR8 :$dst), (ins),
- "setg\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_G, EFLAGS))]>,
- TB; // GR8 = > signed
-def SETGm : I<0x9F, MRM0m,
- (outs), (ins i8mem:$dst),
- "setg\t$dst",
- [(store (X86setcc X86_COND_G, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = > signed
-
-def SETBr : I<0x92, MRM0r,
- (outs GR8 :$dst), (ins),
- "setb\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_B, EFLAGS))]>,
- TB; // GR8 = < unsign
-def SETBm : I<0x92, MRM0m,
- (outs), (ins i8mem:$dst),
- "setb\t$dst",
- [(store (X86setcc X86_COND_B, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = < unsign
-
-def SETAEr : I<0x93, MRM0r,
- (outs GR8 :$dst), (ins),
- "setae\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_AE, EFLAGS))]>,
- TB; // GR8 = >= unsign
-def SETAEm : I<0x93, MRM0m,
- (outs), (ins i8mem:$dst),
- "setae\t$dst",
- [(store (X86setcc X86_COND_AE, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = >= unsign
-
-def SETBEr : I<0x96, MRM0r,
- (outs GR8 :$dst), (ins),
- "setbe\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_BE, EFLAGS))]>,
- TB; // GR8 = <= unsign
-def SETBEm : I<0x96, MRM0m,
- (outs), (ins i8mem:$dst),
- "setbe\t$dst",
- [(store (X86setcc X86_COND_BE, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = <= unsign
-
-def SETAr : I<0x97, MRM0r,
- (outs GR8 :$dst), (ins),
- "seta\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_A, EFLAGS))]>,
- TB; // GR8 = > signed
-def SETAm : I<0x97, MRM0m,
- (outs), (ins i8mem:$dst),
- "seta\t$dst",
- [(store (X86setcc X86_COND_A, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = > signed
-
-def SETSr : I<0x98, MRM0r,
- (outs GR8 :$dst), (ins),
- "sets\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_S, EFLAGS))]>,
- TB; // GR8 = <sign bit>
-def SETSm : I<0x98, MRM0m,
- (outs), (ins i8mem:$dst),
- "sets\t$dst",
- [(store (X86setcc X86_COND_S, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = <sign bit>
-def SETNSr : I<0x99, MRM0r,
- (outs GR8 :$dst), (ins),
- "setns\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_NS, EFLAGS))]>,
- TB; // GR8 = !<sign bit>
-def SETNSm : I<0x99, MRM0m,
- (outs), (ins i8mem:$dst),
- "setns\t$dst",
- [(store (X86setcc X86_COND_NS, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = !<sign bit>
-
-def SETPr : I<0x9A, MRM0r,
- (outs GR8 :$dst), (ins),
- "setp\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_P, EFLAGS))]>,
- TB; // GR8 = parity
-def SETPm : I<0x9A, MRM0m,
- (outs), (ins i8mem:$dst),
- "setp\t$dst",
- [(store (X86setcc X86_COND_P, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = parity
-def SETNPr : I<0x9B, MRM0r,
- (outs GR8 :$dst), (ins),
- "setnp\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_NP, EFLAGS))]>,
- TB; // GR8 = not parity
-def SETNPm : I<0x9B, MRM0m,
- (outs), (ins i8mem:$dst),
- "setnp\t$dst",
- [(store (X86setcc X86_COND_NP, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = not parity
-
-def SETOr : I<0x90, MRM0r,
- (outs GR8 :$dst), (ins),
- "seto\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_O, EFLAGS))]>,
- TB; // GR8 = overflow
-def SETOm : I<0x90, MRM0m,
- (outs), (ins i8mem:$dst),
- "seto\t$dst",
- [(store (X86setcc X86_COND_O, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = overflow
-def SETNOr : I<0x91, MRM0r,
- (outs GR8 :$dst), (ins),
- "setno\t$dst",
- [(set GR8:$dst, (X86setcc X86_COND_NO, EFLAGS))]>,
- TB; // GR8 = not overflow
-def SETNOm : I<0x91, MRM0m,
- (outs), (ins i8mem:$dst),
- "setno\t$dst",
- [(store (X86setcc X86_COND_NO, EFLAGS), addr:$dst)]>,
- TB; // [mem8] = not overflow
-} // Uses = [EFLAGS]
-
-
-// Integer comparisons
-let Defs = [EFLAGS] in {
-def CMP8i8 : Ii8<0x3C, RawFrm, (outs), (ins i8imm:$src),
- "cmp{b}\t{$src, %al|%al, $src}", []>;
-def CMP16i16 : Ii16<0x3D, RawFrm, (outs), (ins i16imm:$src),
- "cmp{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
-def CMP32i32 : Ii32<0x3D, RawFrm, (outs), (ins i32imm:$src),
- "cmp{l}\t{$src, %eax|%eax, $src}", []>;
-
-def CMP8rr : I<0x38, MRMDestReg,
- (outs), (ins GR8 :$src1, GR8 :$src2),
- "cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR8:$src1, GR8:$src2))]>;
-def CMP16rr : I<0x39, MRMDestReg,
- (outs), (ins GR16:$src1, GR16:$src2),
- "cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR16:$src1, GR16:$src2))]>, OpSize;
-def CMP32rr : I<0x39, MRMDestReg,
- (outs), (ins GR32:$src1, GR32:$src2),
- "cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR32:$src1, GR32:$src2))]>;
-def CMP8mr : I<0x38, MRMDestMem,
- (outs), (ins i8mem :$src1, GR8 :$src2),
- "cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi8 addr:$src1), GR8:$src2))]>;
-def CMP16mr : I<0x39, MRMDestMem,
- (outs), (ins i16mem:$src1, GR16:$src2),
- "cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi16 addr:$src1), GR16:$src2))]>,
- OpSize;
-def CMP32mr : I<0x39, MRMDestMem,
- (outs), (ins i32mem:$src1, GR32:$src2),
- "cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi32 addr:$src1), GR32:$src2))]>;
-def CMP8rm : I<0x3A, MRMSrcMem,
- (outs), (ins GR8 :$src1, i8mem :$src2),
- "cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR8:$src1, (loadi8 addr:$src2)))]>;
-def CMP16rm : I<0x3B, MRMSrcMem,
- (outs), (ins GR16:$src1, i16mem:$src2),
- "cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR16:$src1, (loadi16 addr:$src2)))]>,
- OpSize;
-def CMP32rm : I<0x3B, MRMSrcMem,
- (outs), (ins GR32:$src1, i32mem:$src2),
- "cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR32:$src1, (loadi32 addr:$src2)))]>;
-
-// These are alternate spellings for use by the disassembler, we mark them as
-// code gen only to ensure they aren't matched by the assembler.
-let isCodeGenOnly = 1 in {
- def CMP8rr_alt : I<0x3A, MRMSrcReg, (outs), (ins GR8:$src1, GR8:$src2),
- "cmp{b}\t{$src2, $src1|$src1, $src2}", []>;
- def CMP16rr_alt : I<0x3B, MRMSrcReg, (outs), (ins GR16:$src1, GR16:$src2),
- "cmp{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize;
- def CMP32rr_alt : I<0x3B, MRMSrcReg, (outs), (ins GR32:$src1, GR32:$src2),
- "cmp{l}\t{$src2, $src1|$src1, $src2}", []>;
-}
-def CMP8ri : Ii8<0x80, MRM7r,
- (outs), (ins GR8:$src1, i8imm:$src2),
- "cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR8:$src1, imm:$src2))]>;
-def CMP16ri : Ii16<0x81, MRM7r,
- (outs), (ins GR16:$src1, i16imm:$src2),
- "cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR16:$src1, imm:$src2))]>, OpSize;
-def CMP32ri : Ii32<0x81, MRM7r,
- (outs), (ins GR32:$src1, i32imm:$src2),
- "cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR32:$src1, imm:$src2))]>;
-def CMP8mi : Ii8 <0x80, MRM7m,
- (outs), (ins i8mem :$src1, i8imm :$src2),
- "cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi8 addr:$src1), imm:$src2))]>;
-def CMP16mi : Ii16<0x81, MRM7m,
- (outs), (ins i16mem:$src1, i16imm:$src2),
- "cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi16 addr:$src1), imm:$src2))]>,
- OpSize;
-def CMP32mi : Ii32<0x81, MRM7m,
- (outs), (ins i32mem:$src1, i32imm:$src2),
- "cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi32 addr:$src1), imm:$src2))]>;
-def CMP16ri8 : Ii8<0x83, MRM7r,
- (outs), (ins GR16:$src1, i16i8imm:$src2),
- "cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR16:$src1, i16immSExt8:$src2))]>,
- OpSize;
-def CMP16mi8 : Ii8<0x83, MRM7m,
- (outs), (ins i16mem:$src1, i16i8imm:$src2),
- "cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi16 addr:$src1),
- i16immSExt8:$src2))]>, OpSize;
-def CMP32mi8 : Ii8<0x83, MRM7m,
- (outs), (ins i32mem:$src1, i32i8imm:$src2),
- "cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp (loadi32 addr:$src1),
- i32immSExt8:$src2))]>;
-def CMP32ri8 : Ii8<0x83, MRM7r,
- (outs), (ins GR32:$src1, i32i8imm:$src2),
- "cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp GR32:$src1, i32immSExt8:$src2))]>;
-} // Defs = [EFLAGS]
+//===----------------------------------------------------------------------===//
+// Bit tests instructions: BT, BTS, BTR, BTC.
-// Bit tests.
-// TODO: BTC, BTR, and BTS
let Defs = [EFLAGS] in {
def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
@@ -3680,6 +936,9 @@ def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>, TB;
+def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
+ "bt{q}\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
// Unlike with the register+register form, the memory+register form of the
// bt instruction does not ignore the high bits of the index. From ISel's
@@ -3687,17 +946,23 @@ def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
// only for now.
def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
- "bt{w}\t{$src2, $src1|$src1, $src2}",
+ "bt{w}\t{$src2, $src1|$src1, $src2}",
// [(X86bt (loadi16 addr:$src1), GR16:$src2),
// (implicit EFLAGS)]
[]
>, OpSize, TB, Requires<[FastBTMem]>;
def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
- "bt{l}\t{$src2, $src1|$src1, $src2}",
+ "bt{l}\t{$src2, $src1|$src1, $src2}",
// [(X86bt (loadi32 addr:$src1), GR32:$src2),
// (implicit EFLAGS)]
[]
>, TB, Requires<[FastBTMem]>;
+def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
+ "bt{q}\t{$src2, $src1|$src1, $src2}",
+// [(X86bt (loadi64 addr:$src1), GR64:$src2),
+// (implicit EFLAGS)]
+ []
+ >, TB;
def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
@@ -3706,6 +971,10 @@ def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))]>, TB;
+def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
+ "bt{q}\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
+
// Note that these instructions don't need FastBTMem because that
// only applies when the other operand is in a register. When it's
// an immediate, bt is still fast.
@@ -3717,307 +986,129 @@ def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86bt (loadi32 addr:$src1), i32immSExt8:$src2))
]>, TB;
+def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
+ "bt{q}\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86bt (loadi64 addr:$src1),
+ i64immSExt8:$src2))]>, TB;
+
def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTC32rr : I<0xBB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
"btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
+ "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
"btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
+ "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTC16ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTC32ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2),
"btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
+ "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
+ "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTR16rr : I<0xB3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTR32rr : I<0xB3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
"btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
+ "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
"btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
+ "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTR16ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTR32ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR32:$src1, i32i8imm:$src2),
"btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
+ "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
+ "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTS16rr : I<0xAB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTS32rr : I<0xAB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
"bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
+ "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
"bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
+ "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTS16ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTS32ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR32:$src1, i32i8imm:$src2),
"bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
+ "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
+def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
+ "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
} // Defs = [EFLAGS]
-// Sign/Zero extenders
-// Use movsbl intead of movsbw; we don't care about the high 16 bits
-// of the register here. This has a smaller encoding and avoids a
-// partial-register update. Actual movsbw included for the disassembler.
-def MOVSX16rr8W : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
- "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def MOVSX16rm8W : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
- "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
- "", [(set GR16:$dst, (sext GR8:$src))]>, TB;
-def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
- "", [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB;
-def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
- "movs{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (sext GR8:$src))]>, TB;
-def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
- "movs{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (sextloadi32i8 addr:$src))]>, TB;
-def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
- "movs{wl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (sext GR16:$src))]>, TB;
-def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
- "movs{wl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (sextloadi32i16 addr:$src))]>, TB;
-
-// Use movzbl intead of movzbw; we don't care about the high 16 bits
-// of the register here. This has a smaller encoding and avoids a
-// partial-register update. Actual movzbw included for the disassembler.
-def MOVZX16rr8W : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
- "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def MOVZX16rm8W : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
- "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
- "", [(set GR16:$dst, (zext GR8:$src))]>, TB;
-def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
- "", [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB;
-def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
- "movz{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (zext GR8:$src))]>, TB;
-def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
- "movz{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (zextloadi32i8 addr:$src))]>, TB;
-def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
- "movz{wl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (zext GR16:$src))]>, TB;
-def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
- "movz{wl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (zextloadi32i16 addr:$src))]>, TB;
-
-// These are the same as the regular MOVZX32rr8 and MOVZX32rm8
-// except that they use GR32_NOREX for the output operand register class
-// instead of GR32. This allows them to operate on h registers on x86-64.
-def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg,
- (outs GR32_NOREX:$dst), (ins GR8:$src),
- "movz{bl|x}\t{$src, $dst|$dst, $src} # NOREX",
- []>, TB;
-let mayLoad = 1 in
-def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem,
- (outs GR32_NOREX:$dst), (ins i8mem:$src),
- "movz{bl|x}\t{$src, $dst|$dst, $src} # NOREX",
- []>, TB;
-
-let neverHasSideEffects = 1 in {
- let Defs = [AX], Uses = [AL] in
- def CBW : I<0x98, RawFrm, (outs), (ins),
- "{cbtw|cbw}", []>, OpSize; // AX = signext(AL)
- let Defs = [EAX], Uses = [AX] in
- def CWDE : I<0x98, RawFrm, (outs), (ins),
- "{cwtl|cwde}", []>; // EAX = signext(AX)
-
- let Defs = [AX,DX], Uses = [AX] in
- def CWD : I<0x99, RawFrm, (outs), (ins),
- "{cwtd|cwd}", []>, OpSize; // DX:AX = signext(AX)
- let Defs = [EAX,EDX], Uses = [EAX] in
- def CDQ : I<0x99, RawFrm, (outs), (ins),
- "{cltd|cdq}", []>; // EDX:EAX = signext(EAX)
-}
-
-//===----------------------------------------------------------------------===//
-// Alias Instructions
-//===----------------------------------------------------------------------===//
-
-// Alias instructions that map movr0 to xor.
-// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
-// FIXME: Set encoding to pseudo.
-let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
- isCodeGenOnly = 1 in {
-def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
- [(set GR8:$dst, 0)]>;
-
-// We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller
-// encoding and avoids a partial-register update sometimes, but doing so
-// at isel time interferes with rematerialization in the current register
-// allocator. For now, this is rewritten when the instruction is lowered
-// to an MCInst.
-def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
- "",
- [(set GR16:$dst, 0)]>, OpSize;
-
-// FIXME: Set encoding to pseudo.
-def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
- [(set GR32:$dst, 0)]>;
-}
-
-//===----------------------------------------------------------------------===//
-// Thread Local Storage Instructions
-//
-
-// ELF TLS Support
-// All calls clobber the non-callee saved registers. ESP is marked as
-// a use to prevent stack-pointer assignments that appear immediately
-// before calls from potentially appearing dead.
-let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [ESP] in
-def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
- "leal\t$sym, %eax; "
- "call\t___tls_get_addr@PLT",
- [(X86tlsaddr tls32addr:$sym)]>,
- Requires<[In32BitMode]>;
-
-// Darwin TLS Support
-// For i386, the address of the thunk is passed on the stack, on return the
-// address of the variable is in %eax. %ecx is trashed during the function
-// call. All other registers are preserved.
-let Defs = [EAX, ECX],
- Uses = [ESP],
- usesCustomInserter = 1 in
-def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
- "# TLSCall_32",
- [(X86TLSCall addr:$sym)]>,
- Requires<[In32BitMode]>;
-
-let AddedComplexity = 5, isCodeGenOnly = 1 in
-def GS_MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "movl\t%gs:$src, $dst",
- [(set GR32:$dst, (gsload addr:$src))]>, SegGS;
-
-let AddedComplexity = 5, isCodeGenOnly = 1 in
-def FS_MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "movl\t%fs:$src, $dst",
- [(set GR32:$dst, (fsload addr:$src))]>, SegFS;
-
-//===----------------------------------------------------------------------===//
-// EH Pseudo Instructions
-//
-let isTerminator = 1, isReturn = 1, isBarrier = 1,
- hasCtrlDep = 1, isCodeGenOnly = 1 in {
-def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
- "ret\t#eh_return, addr: $addr",
- [(X86ehret GR32:$addr)]>;
-
-}
//===----------------------------------------------------------------------===//
// Atomic support
//
-// Memory barriers
-
-// TODO: Get this to fold the constant into the instruction.
-def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
- "lock\n\t"
- "or{l}\t{$zero, $dst|$dst, $zero}",
- []>, Requires<[In32BitMode]>, LOCK;
-
-let hasSideEffects = 1 in {
-def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
- "#MEMBARRIER",
- [(X86MemBarrier)]>, Requires<[HasSSE2]>;
-}
// Atomic swap. These are just normal xchg instructions. But since a memory
// operand is referenced, the atomicity is ensured.
let Constraints = "$val = $dst" in {
-def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$val, i32mem:$ptr),
- "xchg{l}\t{$val, $ptr|$ptr, $val}",
- [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))]>;
-def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst),
- (ins GR16:$val, i16mem:$ptr),
- "xchg{w}\t{$val, $ptr|$ptr, $val}",
- [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))]>,
- OpSize;
def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
- "xchg{b}\t{$val, $ptr|$ptr, $val}",
+ "xchg{b}\t{$val, $ptr|$ptr, $val}",
[(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))]>;
+def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst),(ins GR16:$val, i16mem:$ptr),
+ "xchg{w}\t{$val, $ptr|$ptr, $val}",
+ [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))]>,
+ OpSize;
+def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst),(ins GR32:$val, i32mem:$ptr),
+ "xchg{l}\t{$val, $ptr|$ptr, $val}",
+ [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))]>;
+def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),(ins GR64:$val,i64mem:$ptr),
+ "xchg{q}\t{$val, $ptr|$ptr, $val}",
+ [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
-def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src),
- "xchg{l}\t{$val, $src|$src, $val}", []>;
-def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src),
- "xchg{w}\t{$val, $src|$src, $val}", []>, OpSize;
def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src),
"xchg{b}\t{$val, $src|$src, $val}", []>;
+def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src),
+ "xchg{w}\t{$val, $src|$src, $val}", []>, OpSize;
+def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src),
+ "xchg{l}\t{$val, $src|$src, $val}", []>;
+def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
+ "xchg{q}\t{$val, $src|$src, $val}", []>;
}
def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src),
"xchg{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src),
"xchg{l}\t{$src, %eax|%eax, $src}", []>;
+def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
+ "xchg{q}\t{$src, %rax|%rax, $src}", []>;
-// Atomic compare and swap.
-let Defs = [EAX, EFLAGS], Uses = [EAX] in {
-def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
- "lock\n\t"
- "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK;
-}
-let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in {
-def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
- "lock\n\t"
- "cmpxchg8b\t$ptr",
- [(X86cas8 addr:$ptr)]>, TB, LOCK;
-}
-
-let Defs = [AX, EFLAGS], Uses = [AX] in {
-def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
- "lock\n\t"
- "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
-}
-let Defs = [AL, EFLAGS], Uses = [AL] in {
-def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
- "lock\n\t"
- "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
-}
-// Atomic exchange and add
-let Constraints = "$val = $dst", Defs = [EFLAGS] in {
-def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr),
- "lock\n\t"
- "xadd{l}\t{$val, $ptr|$ptr, $val}",
- [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
- TB, LOCK;
-def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr),
- "lock\n\t"
- "xadd{w}\t{$val, $ptr|$ptr, $val}",
- [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
- TB, OpSize, LOCK;
-def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
- "lock\n\t"
- "xadd{b}\t{$val, $ptr|$ptr, $val}",
- [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
- TB, LOCK;
-}
def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
"xadd{b}\t{$src, $dst|$dst, $src}", []>, TB;
@@ -4025,6 +1116,8 @@ def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
"xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"xadd{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
+ "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
let mayLoad = 1, mayStore = 1 in {
def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
@@ -4033,6 +1126,9 @@ def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"xadd{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
+ "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
}
def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
@@ -4041,6 +1137,8 @@ def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
"cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
+ "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
let mayLoad = 1, mayStore = 1 in {
def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
@@ -4049,284 +1147,29 @@ def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
+ "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
}
let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
"cmpxchg8b\t$dst", []>, TB;
-// Optimized codegen when the non-memory output is not used.
-// FIXME: Use normal add / sub instructions and add lock prefix dynamically.
-let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in {
-def LOCK_ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
- "lock\n\t"
- "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "lock\n\t"
- "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
-def LOCK_ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "lock\n\t"
- "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
- "lock\n\t"
- "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
- "lock\n\t"
- "add{w}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
- "lock\n\t"
- "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
- "lock\n\t"
- "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
-def LOCK_ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
- "lock\n\t"
- "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-
-def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
- "lock\n\t"
- "inc{b}\t$dst", []>, LOCK;
-def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
- "lock\n\t"
- "inc{w}\t$dst", []>, OpSize, LOCK;
-def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
- "lock\n\t"
- "inc{l}\t$dst", []>, LOCK;
-
-def LOCK_SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
- "lock\n\t"
- "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
- "lock\n\t"
- "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
-def LOCK_SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
- "lock\n\t"
- "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
- "lock\n\t"
- "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
- "lock\n\t"
- "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
-def LOCK_SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
- "lock\n\t"
- "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
- "lock\n\t"
- "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
-def LOCK_SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
- "lock\n\t"
- "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
-
-def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
- "lock\n\t"
- "dec{b}\t$dst", []>, LOCK;
-def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
- "lock\n\t"
- "dec{w}\t$dst", []>, OpSize, LOCK;
-def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
- "lock\n\t"
- "dec{l}\t$dst", []>, LOCK;
-}
+let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
+def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
+ "cmpxchg16b\t$dst", []>, TB;
-// Atomic exchange, and, or, xor
-let Constraints = "$val = $dst", Defs = [EFLAGS],
- usesCustomInserter = 1 in {
-def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMAND32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
-def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMOR32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
-def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMXOR32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
-def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMNAND32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
-def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
- "#ATOMMIN32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
-def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMMAX32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
-def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMUMIN32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
-def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMUMAX32 PSEUDO!",
- [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
-
-def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMAND16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
-def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMOR16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
-def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMXOR16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
-def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMNAND16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
-def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
- "#ATOMMIN16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
-def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMMAX16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
-def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMUMIN16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
-def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
- "#ATOMUMAX16 PSEUDO!",
- [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
-
-def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
- "#ATOMAND8 PSEUDO!",
- [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
-def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
- "#ATOMOR8 PSEUDO!",
- [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
-def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
- "#ATOMXOR8 PSEUDO!",
- [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
-def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
- "#ATOMNAND8 PSEUDO!",
- [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
-}
-let Constraints = "$val1 = $dst1, $val2 = $dst2",
- Defs = [EFLAGS, EAX, EBX, ECX, EDX],
- Uses = [EAX, EBX, ECX, EDX],
- mayLoad = 1, mayStore = 1,
- usesCustomInserter = 1 in {
-def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMAND6432 PSEUDO!", []>;
-def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMOR6432 PSEUDO!", []>;
-def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMXOR6432 PSEUDO!", []>;
-def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMNAND6432 PSEUDO!", []>;
-def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMADD6432 PSEUDO!", []>;
-def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMSUB6432 PSEUDO!", []>;
-def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
- (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
- "#ATOMSWAP6432 PSEUDO!", []>;
-}
-// Segmentation support instructions.
-
-def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
- "lar{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def LAR16rr : I<0x02, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "lar{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-
-// i16mem operand in LAR32rm and GR32 operand in LAR32rr is not a typo.
-def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
- "lar{l}\t{$src, $dst|$dst, $src}", []>, TB;
-def LAR32rr : I<0x02, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "lar{l}\t{$src, $dst|$dst, $src}", []>, TB;
-
-def LSL16rm : I<0x03, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
- "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def LSL16rr : I<0x03, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def LSL32rm : I<0x03, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB;
-def LSL32rr : I<0x03, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB;
-
-def INVLPG : I<0x01, MRM7m, (outs), (ins i8mem:$addr), "invlpg\t$addr", []>, TB;
-
-def STRr : I<0x00, MRM1r, (outs GR16:$dst), (ins),
- "str{w}\t{$dst}", []>, TB;
-def STRm : I<0x00, MRM1m, (outs i16mem:$dst), (ins),
- "str{w}\t{$dst}", []>, TB;
-def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src),
- "ltr{w}\t{$src}", []>, TB;
-def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src),
- "ltr{w}\t{$src}", []>, TB;
-
-def PUSHFS16 : I<0xa0, RawFrm, (outs), (ins),
- "push{w}\t%fs", []>, OpSize, TB;
-def PUSHFS32 : I<0xa0, RawFrm, (outs), (ins),
- "push{l}\t%fs", []>, TB;
-def PUSHGS16 : I<0xa8, RawFrm, (outs), (ins),
- "push{w}\t%gs", []>, OpSize, TB;
-def PUSHGS32 : I<0xa8, RawFrm, (outs), (ins),
- "push{l}\t%gs", []>, TB;
-
-def POPFS16 : I<0xa1, RawFrm, (outs), (ins),
- "pop{w}\t%fs", []>, OpSize, TB;
-def POPFS32 : I<0xa1, RawFrm, (outs), (ins),
- "pop{l}\t%fs", []>, TB;
-def POPGS16 : I<0xa9, RawFrm, (outs), (ins),
- "pop{w}\t%gs", []>, OpSize, TB;
-def POPGS32 : I<0xa9, RawFrm, (outs), (ins),
- "pop{l}\t%gs", []>, TB;
-
-def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lds{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def LDS32rm : I<0xc5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lds{l}\t{$src, $dst|$dst, $src}", []>;
-def LSS16rm : I<0xb2, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lss{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def LSS32rm : I<0xb2, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lss{l}\t{$src, $dst|$dst, $src}", []>, TB;
-def LES16rm : I<0xc4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "les{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def LES32rm : I<0xc4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "les{l}\t{$src, $dst|$dst, $src}", []>;
-def LFS16rm : I<0xb4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lfs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def LFS32rm : I<0xb4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lfs{l}\t{$src, $dst|$dst, $src}", []>, TB;
-def LGS16rm : I<0xb5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lgs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
-def LGS32rm : I<0xb5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lgs{l}\t{$src, $dst|$dst, $src}", []>, TB;
-
-def VERRr : I<0x00, MRM4r, (outs), (ins GR16:$seg),
- "verr\t$seg", []>, TB;
-def VERRm : I<0x00, MRM4m, (outs), (ins i16mem:$seg),
- "verr\t$seg", []>, TB;
-def VERWr : I<0x00, MRM5r, (outs), (ins GR16:$seg),
- "verw\t$seg", []>, TB;
-def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg),
- "verw\t$seg", []>, TB;
-
-// Descriptor-table support instructions
-
-def SGDTm : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
- "sgdt\t$dst", []>, TB;
-def SIDTm : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
- "sidt\t$dst", []>, TB;
-def SLDT16r : I<0x00, MRM0r, (outs GR16:$dst), (ins),
- "sldt{w}\t$dst", []>, TB;
-def SLDT16m : I<0x00, MRM0m, (outs i16mem:$dst), (ins),
- "sldt{w}\t$dst", []>, TB;
-def LGDTm : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
- "lgdt\t$src", []>, TB;
-def LIDTm : I<0x01, MRM3m, (outs), (ins opaque48mem:$src),
- "lidt\t$src", []>, TB;
-def LLDT16r : I<0x00, MRM2r, (outs), (ins GR16:$src),
- "lldt{w}\t$src", []>, TB;
-def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src),
- "lldt{w}\t$src", []>, TB;
-
// Lock instruction prefix
def LOCK_PREFIX : I<0xF0, RawFrm, (outs), (ins), "lock", []>;
+// Rex64 instruction prefix
+def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>;
+
+// Data16 instruction prefix
+def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>;
+
// Repeat string operation instruction prefixes
// These uses the DF flag in the EFLAGS register to inc or dec ECX
let Defs = [ECX], Uses = [ECX,EFLAGS] in {
@@ -4336,35 +1179,19 @@ def REP_PREFIX : I<0xF3, RawFrm, (outs), (ins), "rep", []>;
def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>;
}
-// Segment override instruction prefixes
-def CS_PREFIX : I<0x2E, RawFrm, (outs), (ins), "cs", []>;
-def SS_PREFIX : I<0x36, RawFrm, (outs), (ins), "ss", []>;
-def DS_PREFIX : I<0x3E, RawFrm, (outs), (ins), "ds", []>;
-def ES_PREFIX : I<0x26, RawFrm, (outs), (ins), "es", []>;
-def FS_PREFIX : I<0x64, RawFrm, (outs), (ins), "fs", []>;
-def GS_PREFIX : I<0x65, RawFrm, (outs), (ins), "gs", []>;
// String manipulation instructions
-
def LODSB : I<0xAC, RawFrm, (outs), (ins), "lodsb", []>;
def LODSW : I<0xAD, RawFrm, (outs), (ins), "lodsw", []>, OpSize;
def LODSD : I<0xAD, RawFrm, (outs), (ins), "lods{l|d}", []>;
+def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>;
def OUTSB : I<0x6E, RawFrm, (outs), (ins), "outsb", []>;
def OUTSW : I<0x6F, RawFrm, (outs), (ins), "outsw", []>, OpSize;
def OUTSD : I<0x6F, RawFrm, (outs), (ins), "outs{l|d}", []>;
-// CPU flow control instructions
-
-def HLT : I<0xF4, RawFrm, (outs), (ins), "hlt", []>;
-def RSM : I<0xAA, RawFrm, (outs), (ins), "rsm", []>, TB;
-
-// FPU control instructions
-
-def FNINIT : I<0xE3, RawFrm, (outs), (ins), "fninit", []>, DB;
// Flag instructions
-
def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
def CLI : I<0xFA, RawFrm, (outs), (ins), "cli", []>;
@@ -4376,620 +1203,423 @@ def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
def CLTS : I<0x06, RawFrm, (outs), (ins), "clts", []>, TB;
// Table lookup instructions
-
def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>;
-// Specialized register support
-
-def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", []>, TB;
-def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", []>, TB;
-def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", []>, TB;
-
-def SMSW16r : I<0x01, MRM4r, (outs GR16:$dst), (ins),
- "smsw{w}\t$dst", []>, OpSize, TB;
-def SMSW32r : I<0x01, MRM4r, (outs GR32:$dst), (ins),
- "smsw{l}\t$dst", []>, TB;
-// For memory operands, there is only a 16-bit form
-def SMSW16m : I<0x01, MRM4m, (outs i16mem:$dst), (ins),
- "smsw{w}\t$dst", []>, TB;
-
-def LMSW16r : I<0x01, MRM6r, (outs), (ins GR16:$src),
- "lmsw{w}\t$src", []>, TB;
-def LMSW16m : I<0x01, MRM6m, (outs), (ins i16mem:$src),
- "lmsw{w}\t$src", []>, TB;
-
-def CPUID : I<0xA2, RawFrm, (outs), (ins), "cpuid", []>, TB;
-
-// Cache instructions
-
-def INVD : I<0x08, RawFrm, (outs), (ins), "invd", []>, TB;
-def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", []>, TB;
-
-// VMX instructions
-
-// 66 0F 38 80
-def INVEPT : I<0x80, RawFrm, (outs), (ins), "invept", []>, OpSize, T8;
-// 66 0F 38 81
-def INVVPID : I<0x81, RawFrm, (outs), (ins), "invvpid", []>, OpSize, T8;
-// 0F 01 C1
-def VMCALL : I<0x01, MRM_C1, (outs), (ins), "vmcall", []>, TB;
-def VMCLEARm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
- "vmclear\t$vmcs", []>, OpSize, TB;
-// 0F 01 C2
-def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
-// 0F 01 C3
-def VMRESUME : I<0x01, MRM_C3, (outs), (ins), "vmresume", []>, TB;
-def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
- "vmptrld\t$vmcs", []>, TB;
-def VMPTRSTm : I<0xC7, MRM7m, (outs i64mem:$vmcs), (ins),
- "vmptrst\t$vmcs", []>, TB;
-def VMREAD64rm : I<0x78, MRMDestMem, (outs i64mem:$dst), (ins GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def VMREAD32rm : I<0x78, MRMDestMem, (outs i32mem:$dst), (ins GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB;
-def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB;
-def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB;
-def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB;
-// 0F 01 C4
-def VMXOFF : I<0x01, MRM_C4, (outs), (ins), "vmxoff", []>, TB;
-def VMXON : I<0xC7, MRM6m, (outs), (ins i64mem:$vmxon),
- "vmxon\t{$vmxon}", []>, XS;
+// ASCII Adjust After Addition
+// sets AL, AH and CF and AF of EFLAGS and uses AL and AF of EFLAGS
+def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>, Requires<[In32BitMode]>;
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//===----------------------------------------------------------------------===//
+// ASCII Adjust AX Before Division
+// sets AL, AH and EFLAGS and uses AL and AH
+def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
+ "aad\t$src", []>, Requires<[In32BitMode]>;
-// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
-def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
-def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
-def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
-def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
-def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
-def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
-
-def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
- (ADD32ri GR32:$src1, tconstpool:$src2)>;
-def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
- (ADD32ri GR32:$src1, tjumptable:$src2)>;
-def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
- (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
-def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
- (ADD32ri GR32:$src1, texternalsym:$src2)>;
-def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
- (ADD32ri GR32:$src1, tblockaddress:$src2)>;
-
-def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
- (MOV32mi addr:$dst, tglobaladdr:$src)>;
-def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
- (MOV32mi addr:$dst, texternalsym:$src)>;
-def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
- (MOV32mi addr:$dst, tblockaddress:$src)>;
-
-// Calls
-// tailcall stuff
-def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
- (TCRETURNri GR32_TC:$dst, imm:$off)>,
- Requires<[In32BitMode]>;
-
-// FIXME: This is disabled for 32-bit PIC mode because the global base
-// register which is part of the address mode may be assigned a
-// callee-saved register.
-def : Pat<(X86tcret (load addr:$dst), imm:$off),
- (TCRETURNmi addr:$dst, imm:$off)>,
- Requires<[In32BitMode, IsNotPIC]>;
-
-def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
- (TCRETURNdi texternalsym:$dst, imm:$off)>,
- Requires<[In32BitMode]>;
-
-def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
- (TCRETURNdi texternalsym:$dst, imm:$off)>,
- Requires<[In32BitMode]>;
-
-// Normal calls, with various flavors of addresses.
-def : Pat<(X86call (i32 tglobaladdr:$dst)),
- (CALLpcrel32 tglobaladdr:$dst)>;
-def : Pat<(X86call (i32 texternalsym:$dst)),
- (CALLpcrel32 texternalsym:$dst)>;
-def : Pat<(X86call (i32 imm:$dst)),
- (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
-
-// X86 specific add which produces a flag.
-def : Pat<(addc GR32:$src1, GR32:$src2),
- (ADD32rr GR32:$src1, GR32:$src2)>;
-def : Pat<(addc GR32:$src1, (load addr:$src2)),
- (ADD32rm GR32:$src1, addr:$src2)>;
-def : Pat<(addc GR32:$src1, imm:$src2),
- (ADD32ri GR32:$src1, imm:$src2)>;
-def : Pat<(addc GR32:$src1, i32immSExt8:$src2),
- (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
-
-def : Pat<(subc GR32:$src1, GR32:$src2),
- (SUB32rr GR32:$src1, GR32:$src2)>;
-def : Pat<(subc GR32:$src1, (load addr:$src2)),
- (SUB32rm GR32:$src1, addr:$src2)>;
-def : Pat<(subc GR32:$src1, imm:$src2),
- (SUB32ri GR32:$src1, imm:$src2)>;
-def : Pat<(subc GR32:$src1, i32immSExt8:$src2),
- (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
-
-// Comparisons.
-
-// TEST R,R is smaller than CMP R,0
-def : Pat<(X86cmp GR8:$src1, 0),
- (TEST8rr GR8:$src1, GR8:$src1)>;
-def : Pat<(X86cmp GR16:$src1, 0),
- (TEST16rr GR16:$src1, GR16:$src1)>;
-def : Pat<(X86cmp GR32:$src1, 0),
- (TEST32rr GR32:$src1, GR32:$src1)>;
-
-// Conditional moves with folded loads with operands swapped and conditions
-// inverted.
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_B, EFLAGS),
- (CMOVAE16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_B, EFLAGS),
- (CMOVAE32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_AE, EFLAGS),
- (CMOVB16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_AE, EFLAGS),
- (CMOVB32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_E, EFLAGS),
- (CMOVNE16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_E, EFLAGS),
- (CMOVNE32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NE, EFLAGS),
- (CMOVE16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NE, EFLAGS),
- (CMOVE32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_BE, EFLAGS),
- (CMOVA16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_BE, EFLAGS),
- (CMOVA32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_A, EFLAGS),
- (CMOVBE16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_A, EFLAGS),
- (CMOVBE32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_L, EFLAGS),
- (CMOVGE16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_L, EFLAGS),
- (CMOVGE32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_GE, EFLAGS),
- (CMOVL16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_GE, EFLAGS),
- (CMOVL32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_LE, EFLAGS),
- (CMOVG16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_LE, EFLAGS),
- (CMOVG32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_G, EFLAGS),
- (CMOVLE16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_G, EFLAGS),
- (CMOVLE32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_P, EFLAGS),
- (CMOVNP16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_P, EFLAGS),
- (CMOVNP32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NP, EFLAGS),
- (CMOVP16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NP, EFLAGS),
- (CMOVP32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_S, EFLAGS),
- (CMOVNS16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_S, EFLAGS),
- (CMOVNS32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NS, EFLAGS),
- (CMOVS16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NS, EFLAGS),
- (CMOVS32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_O, EFLAGS),
- (CMOVNO16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_O, EFLAGS),
- (CMOVNO32rm GR32:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NO, EFLAGS),
- (CMOVO16rm GR16:$src2, addr:$src1)>;
-def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NO, EFLAGS),
- (CMOVO32rm GR32:$src2, addr:$src1)>;
-
-// zextload bool -> zextload byte
-def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
-def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
-def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
-
-// extload bool -> extload byte
-def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
-def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
-def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
-def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
-def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
-def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
-
-// anyext. Define these to do an explicit zero-extend to
-// avoid partial-register updates.
-def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
-def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
-
-// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
-def : Pat<(i32 (anyext GR16:$src)),
- (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
+// ASCII Adjust AX After Multiply
+// sets AL, AH and EFLAGS and uses AL
+def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
+ "aam\t$src", []>, Requires<[In32BitMode]>;
+// ASCII Adjust AL After Subtraction - sets
+// sets AL, AH and CF and AF of EFLAGS and uses AL and AF of EFLAGS
+def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>, Requires<[In32BitMode]>;
-//===----------------------------------------------------------------------===//
-// Some peepholes
-//===----------------------------------------------------------------------===//
-
-// Odd encoding trick: -128 fits into an 8-bit immediate field while
-// +128 doesn't, so in this special case use a sub instead of an add.
-def : Pat<(add GR16:$src1, 128),
- (SUB16ri8 GR16:$src1, -128)>;
-def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
- (SUB16mi8 addr:$dst, -128)>;
-def : Pat<(add GR32:$src1, 128),
- (SUB32ri8 GR32:$src1, -128)>;
-def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
- (SUB32mi8 addr:$dst, -128)>;
-
-// r & (2^16-1) ==> movz
-def : Pat<(and GR32:$src1, 0xffff),
- (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
-// r & (2^8-1) ==> movz
-def : Pat<(and GR32:$src1, 0xff),
- (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
- GR32_ABCD)),
- sub_8bit))>,
- Requires<[In32BitMode]>;
-// r & (2^8-1) ==> movz
-def : Pat<(and GR16:$src1, 0xff),
- (MOVZX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src1,
- GR16_ABCD)),
- sub_8bit))>,
- Requires<[In32BitMode]>;
-
-// sext_inreg patterns
-def : Pat<(sext_inreg GR32:$src, i16),
- (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
-def : Pat<(sext_inreg GR32:$src, i8),
- (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
- GR32_ABCD)),
- sub_8bit))>,
- Requires<[In32BitMode]>;
-def : Pat<(sext_inreg GR16:$src, i8),
- (MOVSX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
- GR16_ABCD)),
- sub_8bit))>,
- Requires<[In32BitMode]>;
-
-// trunc patterns
-def : Pat<(i16 (trunc GR32:$src)),
- (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
-def : Pat<(i8 (trunc GR32:$src)),
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
- sub_8bit)>,
- Requires<[In32BitMode]>;
-def : Pat<(i8 (trunc GR16:$src)),
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- sub_8bit)>,
- Requires<[In32BitMode]>;
-
-// h-register tricks
-def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- sub_8bit_hi)>,
- Requires<[In32BitMode]>;
-def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
- sub_8bit_hi)>,
- Requires<[In32BitMode]>;
-def : Pat<(srl GR16:$src, (i8 8)),
- (EXTRACT_SUBREG
- (MOVZX32rr8
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- sub_8bit_hi)),
- sub_16bit)>,
- Requires<[In32BitMode]>;
-def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
- (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
- GR16_ABCD)),
- sub_8bit_hi))>,
- Requires<[In32BitMode]>;
-def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
- (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
- GR16_ABCD)),
- sub_8bit_hi))>,
- Requires<[In32BitMode]>;
-def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
- (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
- GR32_ABCD)),
- sub_8bit_hi))>,
- Requires<[In32BitMode]>;
-def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
- (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
- GR32_ABCD)),
- sub_8bit_hi))>,
- Requires<[In32BitMode]>;
-
-// (shl x, 1) ==> (add x, x)
-def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
-def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
-def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
-
-// (shl x (and y, 31)) ==> (shl x, y)
-def : Pat<(shl GR8:$src1, (and CL, 31)),
- (SHL8rCL GR8:$src1)>;
-def : Pat<(shl GR16:$src1, (and CL, 31)),
- (SHL16rCL GR16:$src1)>;
-def : Pat<(shl GR32:$src1, (and CL, 31)),
- (SHL32rCL GR32:$src1)>;
-def : Pat<(store (shl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
- (SHL8mCL addr:$dst)>;
-def : Pat<(store (shl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
- (SHL16mCL addr:$dst)>;
-def : Pat<(store (shl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
- (SHL32mCL addr:$dst)>;
-
-def : Pat<(srl GR8:$src1, (and CL, 31)),
- (SHR8rCL GR8:$src1)>;
-def : Pat<(srl GR16:$src1, (and CL, 31)),
- (SHR16rCL GR16:$src1)>;
-def : Pat<(srl GR32:$src1, (and CL, 31)),
- (SHR32rCL GR32:$src1)>;
-def : Pat<(store (srl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
- (SHR8mCL addr:$dst)>;
-def : Pat<(store (srl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
- (SHR16mCL addr:$dst)>;
-def : Pat<(store (srl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
- (SHR32mCL addr:$dst)>;
-
-def : Pat<(sra GR8:$src1, (and CL, 31)),
- (SAR8rCL GR8:$src1)>;
-def : Pat<(sra GR16:$src1, (and CL, 31)),
- (SAR16rCL GR16:$src1)>;
-def : Pat<(sra GR32:$src1, (and CL, 31)),
- (SAR32rCL GR32:$src1)>;
-def : Pat<(store (sra (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
- (SAR8mCL addr:$dst)>;
-def : Pat<(store (sra (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
- (SAR16mCL addr:$dst)>;
-def : Pat<(store (sra (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
- (SAR32mCL addr:$dst)>;
-
-// (anyext (setcc_carry)) -> (setcc_carry)
-def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
- (SETB_C16r)>;
-def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
- (SETB_C32r)>;
-def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
- (SETB_C32r)>;
-
-// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
-let AddedComplexity = 5 in { // Try this before the selecting to OR
-def : Pat<(or_is_add GR16:$src1, imm:$src2),
- (ADD16ri GR16:$src1, imm:$src2)>;
-def : Pat<(or_is_add GR32:$src1, imm:$src2),
- (ADD32ri GR32:$src1, imm:$src2)>;
-def : Pat<(or_is_add GR16:$src1, i16immSExt8:$src2),
- (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(or_is_add GR32:$src1, i32immSExt8:$src2),
- (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
-def : Pat<(or_is_add GR16:$src1, GR16:$src2),
- (ADD16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(or_is_add GR32:$src1, GR32:$src2),
- (ADD32rr GR32:$src1, GR32:$src2)>;
-} // AddedComplexity
+// Decimal Adjust AL after Addition
+// sets AL, CF and AF of EFLAGS and uses AL, CF and AF of EFLAGS
+def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>, Requires<[In32BitMode]>;
-//===----------------------------------------------------------------------===//
-// EFLAGS-defining Patterns
-//===----------------------------------------------------------------------===//
+// Decimal Adjust AL after Subtraction
+// sets AL, CF and AF of EFLAGS and uses AL, CF and AF of EFLAGS
+def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>, Requires<[In32BitMode]>;
-// add reg, reg
-def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
-def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
-
-// add reg, mem
-def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
- (ADD8rm GR8:$src1, addr:$src2)>;
-def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
- (ADD16rm GR16:$src1, addr:$src2)>;
-def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
- (ADD32rm GR32:$src1, addr:$src2)>;
-
-// add reg, imm
-def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
-def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
-def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
-def : Pat<(add GR16:$src1, i16immSExt8:$src2),
- (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(add GR32:$src1, i32immSExt8:$src2),
- (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
-
-// sub reg, reg
-def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
-def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
-
-// sub reg, mem
-def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
- (SUB8rm GR8:$src1, addr:$src2)>;
-def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
- (SUB16rm GR16:$src1, addr:$src2)>;
-def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
- (SUB32rm GR32:$src1, addr:$src2)>;
-
-// sub reg, imm
-def : Pat<(sub GR8:$src1, imm:$src2),
- (SUB8ri GR8:$src1, imm:$src2)>;
-def : Pat<(sub GR16:$src1, imm:$src2),
- (SUB16ri GR16:$src1, imm:$src2)>;
-def : Pat<(sub GR32:$src1, imm:$src2),
- (SUB32ri GR32:$src1, imm:$src2)>;
-def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
- (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
- (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
-
-// mul reg, reg
-def : Pat<(mul GR16:$src1, GR16:$src2),
- (IMUL16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(mul GR32:$src1, GR32:$src2),
- (IMUL32rr GR32:$src1, GR32:$src2)>;
-
-// mul reg, mem
-def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
- (IMUL16rm GR16:$src1, addr:$src2)>;
-def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
- (IMUL32rm GR32:$src1, addr:$src2)>;
-
-// mul reg, imm
-def : Pat<(mul GR16:$src1, imm:$src2),
- (IMUL16rri GR16:$src1, imm:$src2)>;
-def : Pat<(mul GR32:$src1, imm:$src2),
- (IMUL32rri GR32:$src1, imm:$src2)>;
-def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
- (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
- (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
-
-// reg = mul mem, imm
-def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
- (IMUL16rmi addr:$src1, imm:$src2)>;
-def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
- (IMUL32rmi addr:$src1, imm:$src2)>;
-def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
- (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
-def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
- (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
-
-// Optimize multiply by 2 with EFLAGS result.
-let AddedComplexity = 2 in {
-def : Pat<(X86smul_flag GR16:$src1, 2), (ADD16rr GR16:$src1, GR16:$src1)>;
-def : Pat<(X86smul_flag GR32:$src1, 2), (ADD32rr GR32:$src1, GR32:$src1)>;
-}
+// Check Array Index Against Bounds
+def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
+ "bound\t{$src, $dst|$dst, $src}", []>, OpSize,
+ Requires<[In32BitMode]>;
+def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
+ "bound\t{$src, $dst|$dst, $src}", []>,
+ Requires<[In32BitMode]>;
-// Patterns for nodes that do not produce flags, for instructions that do.
-
-// Increment reg.
-def : Pat<(add GR8:$src1 , 1), (INC8r GR8:$src1)>;
-def : Pat<(add GR16:$src1, 1), (INC16r GR16:$src1)>, Requires<[In32BitMode]>;
-def : Pat<(add GR32:$src1, 1), (INC32r GR32:$src1)>, Requires<[In32BitMode]>;
-
-// Decrement reg.
-def : Pat<(add GR8:$src1 , -1), (DEC8r GR8:$src1)>;
-def : Pat<(add GR16:$src1, -1), (DEC16r GR16:$src1)>, Requires<[In32BitMode]>;
-def : Pat<(add GR32:$src1, -1), (DEC32r GR32:$src1)>, Requires<[In32BitMode]>;
-
-// or reg/reg.
-def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
-def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
-
-// or reg/mem
-def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
- (OR8rm GR8:$src1, addr:$src2)>;
-def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
- (OR16rm GR16:$src1, addr:$src2)>;
-def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
- (OR32rm GR32:$src1, addr:$src2)>;
-
-// or reg/imm
-def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
-def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
-def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
-def : Pat<(or GR16:$src1, i16immSExt8:$src2),
- (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(or GR32:$src1, i32immSExt8:$src2),
- (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
-
-// xor reg/reg
-def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
-def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
-
-// xor reg/mem
-def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
- (XOR8rm GR8:$src1, addr:$src2)>;
-def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
- (XOR16rm GR16:$src1, addr:$src2)>;
-def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
- (XOR32rm GR32:$src1, addr:$src2)>;
-
-// xor reg/imm
-def : Pat<(xor GR8:$src1, imm:$src2),
- (XOR8ri GR8:$src1, imm:$src2)>;
-def : Pat<(xor GR16:$src1, imm:$src2),
- (XOR16ri GR16:$src1, imm:$src2)>;
-def : Pat<(xor GR32:$src1, imm:$src2),
- (XOR32ri GR32:$src1, imm:$src2)>;
-def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
- (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
- (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
-
-// and reg/reg
-def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
-def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
-
-// and reg/mem
-def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
- (AND8rm GR8:$src1, addr:$src2)>;
-def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
- (AND16rm GR16:$src1, addr:$src2)>;
-def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
- (AND32rm GR32:$src1, addr:$src2)>;
-
-// and reg/imm
-def : Pat<(and GR8:$src1, imm:$src2),
- (AND8ri GR8:$src1, imm:$src2)>;
-def : Pat<(and GR16:$src1, imm:$src2),
- (AND16ri GR16:$src1, imm:$src2)>;
-def : Pat<(and GR32:$src1, imm:$src2),
- (AND32ri GR32:$src1, imm:$src2)>;
-def : Pat<(and GR16:$src1, i16immSExt8:$src2),
- (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(and GR32:$src1, i32immSExt8:$src2),
- (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
+// Adjust RPL Field of Segment Selector
+def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$src), (ins GR16:$dst),
+ "arpl\t{$src, $dst|$dst, $src}", []>, Requires<[In32BitMode]>;
+def ARPL16mr : I<0x63, MRMSrcMem, (outs GR16:$src), (ins i16mem:$dst),
+ "arpl\t{$src, $dst|$dst, $src}", []>, Requires<[In32BitMode]>;
//===----------------------------------------------------------------------===//
-// Floating Point Stack Support
+// Subsystems.
//===----------------------------------------------------------------------===//
-include "X86InstrFPStack.td"
-
-//===----------------------------------------------------------------------===//
-// X86-64 Support
-//===----------------------------------------------------------------------===//
+include "X86InstrArithmetic.td"
+include "X86InstrCMovSetCC.td"
+include "X86InstrExtension.td"
+include "X86InstrControl.td"
+include "X86InstrShiftRotate.td"
-include "X86Instr64bit.td"
+// X87 Floating Point Stack.
+include "X86InstrFPStack.td"
-//===----------------------------------------------------------------------===//
// SIMD support (SSE, MMX and AVX)
-//===----------------------------------------------------------------------===//
-
include "X86InstrFragmentsSIMD.td"
-//===----------------------------------------------------------------------===//
// FMA - Fused Multiply-Add support (requires FMA)
-//===----------------------------------------------------------------------===//
-
include "X86InstrFMA.td"
+// SSE, MMX and 3DNow! vector support.
+include "X86InstrSSE.td"
+include "X86InstrMMX.td"
+include "X86Instr3DNow.td"
+
+include "X86InstrVMX.td"
+
+// System instructions.
+include "X86InstrSystem.td"
+
+// Compiler Pseudo Instructions and Pat Patterns
+include "X86InstrCompiler.td"
+
//===----------------------------------------------------------------------===//
-// XMM Floating point support (requires SSE / SSE2)
+// Assembler Mnemonic Aliases
//===----------------------------------------------------------------------===//
-include "X86InstrSSE.td"
+def : MnemonicAlias<"call", "calll">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"call", "callq">, Requires<[In64BitMode]>;
+
+def : MnemonicAlias<"cbw", "cbtw">;
+def : MnemonicAlias<"cwd", "cwtd">;
+def : MnemonicAlias<"cdq", "cltd">;
+def : MnemonicAlias<"cwde", "cwtl">;
+def : MnemonicAlias<"cdqe", "cltq">;
+
+// lret maps to lretl, it is not ambiguous with lretq.
+def : MnemonicAlias<"lret", "lretl">;
+
+def : MnemonicAlias<"leavel", "leave">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"leaveq", "leave">, Requires<[In64BitMode]>;
+
+def : MnemonicAlias<"pop", "popl">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"pop", "popq">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"popf", "popfl">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"popf", "popfq">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"popfd", "popfl">;
+
+// FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
+// all modes. However: "push (addr)" and "push $42" should default to
+// pushl/pushq depending on the current mode. Similar for "pop %bx"
+def : MnemonicAlias<"push", "pushl">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"push", "pushq">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"pushf", "pushfl">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"pushf", "pushfq">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"pushfd", "pushfl">;
+
+def : MnemonicAlias<"repe", "rep">;
+def : MnemonicAlias<"repz", "rep">;
+def : MnemonicAlias<"repnz", "repne">;
+
+def : MnemonicAlias<"retl", "ret">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"retq", "ret">, Requires<[In64BitMode]>;
+
+def : MnemonicAlias<"salb", "shlb">;
+def : MnemonicAlias<"salw", "shlw">;
+def : MnemonicAlias<"sall", "shll">;
+def : MnemonicAlias<"salq", "shlq">;
+
+def : MnemonicAlias<"smovb", "movsb">;
+def : MnemonicAlias<"smovw", "movsw">;
+def : MnemonicAlias<"smovl", "movsl">;
+def : MnemonicAlias<"smovq", "movsq">;
+
+def : MnemonicAlias<"ud2a", "ud2">;
+def : MnemonicAlias<"verrw", "verr">;
+
+// System instruction aliases.
+def : MnemonicAlias<"iret", "iretl">;
+def : MnemonicAlias<"sysret", "sysretl">;
+
+def : MnemonicAlias<"lgdtl", "lgdt">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"lgdtq", "lgdt">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"lidtl", "lidt">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"lidtq", "lidt">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"sgdtl", "sgdt">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"sgdtq", "sgdt">, Requires<[In64BitMode]>;
+def : MnemonicAlias<"sidtl", "sidt">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"sidtq", "sidt">, Requires<[In64BitMode]>;
+
+
+// Floating point stack aliases.
+def : MnemonicAlias<"fcmovz", "fcmove">;
+def : MnemonicAlias<"fcmova", "fcmovnbe">;
+def : MnemonicAlias<"fcmovnae", "fcmovb">;
+def : MnemonicAlias<"fcmovna", "fcmovbe">;
+def : MnemonicAlias<"fcmovae", "fcmovnb">;
+def : MnemonicAlias<"fcomip", "fcompi">;
+def : MnemonicAlias<"fildq", "fildll">;
+def : MnemonicAlias<"fldcww", "fldcw">;
+def : MnemonicAlias<"fnstcww", "fnstcw">;
+def : MnemonicAlias<"fnstsww", "fnstsw">;
+def : MnemonicAlias<"fucomip", "fucompi">;
+def : MnemonicAlias<"fwait", "wait">;
+
+
+class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond>
+ : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
+ !strconcat(Prefix, NewCond, Suffix)>;
+
+/// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
+/// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
+/// example "setz" -> "sete".
+multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix> {
+ def C : CondCodeAlias<Prefix, Suffix, "c", "b">; // setc -> setb
+ def Z : CondCodeAlias<Prefix, Suffix, "z" , "e">; // setz -> sete
+ def NA : CondCodeAlias<Prefix, Suffix, "na", "be">; // setna -> setbe
+ def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae">; // setnb -> setae
+ def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae">; // setnc -> setae
+ def NG : CondCodeAlias<Prefix, Suffix, "ng", "le">; // setng -> setle
+ def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge">; // setnl -> setge
+ def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne">; // setnz -> setne
+ def PE : CondCodeAlias<Prefix, Suffix, "pe", "p">; // setpe -> setp
+ def PO : CondCodeAlias<Prefix, Suffix, "po", "np">; // setpo -> setnp
+
+ def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b">; // setnae -> setb
+ def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a">; // setnbe -> seta
+ def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l">; // setnge -> setl
+ def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g">; // setnle -> setg
+}
+
+// Aliases for set<CC>
+defm : IntegerCondCodeMnemonicAlias<"set", "">;
+// Aliases for j<CC>
+defm : IntegerCondCodeMnemonicAlias<"j", "">;
+// Aliases for cmov<CC>{w,l,q}
+defm : IntegerCondCodeMnemonicAlias<"cmov", "w">;
+defm : IntegerCondCodeMnemonicAlias<"cmov", "l">;
+defm : IntegerCondCodeMnemonicAlias<"cmov", "q">;
+
//===----------------------------------------------------------------------===//
-// MMX and XMM Packed Integer support (requires MMX, SSE, and SSE2)
+// Assembler Instruction Aliases
//===----------------------------------------------------------------------===//
-include "X86InstrMMX.td"
+// aad/aam default to base 10 if no operand is specified.
+def : InstAlias<"aad", (AAD8i8 10)>;
+def : InstAlias<"aam", (AAM8i8 10)>;
+
+// Disambiguate the mem/imm form of bt-without-a-suffix as btl.
+def : InstAlias<"bt $imm, $mem", (BT32mi8 i32mem:$mem, i32i8imm:$imm)>;
+
+// clr aliases.
+def : InstAlias<"clrb $reg", (XOR8rr GR8 :$reg, GR8 :$reg)>;
+def : InstAlias<"clrw $reg", (XOR16rr GR16:$reg, GR16:$reg)>;
+def : InstAlias<"clrl $reg", (XOR32rr GR32:$reg, GR32:$reg)>;
+def : InstAlias<"clrq $reg", (XOR64rr GR64:$reg, GR64:$reg)>;
+
+// div and idiv aliases for explicit A register.
+def : InstAlias<"divb $src, %al", (DIV8r GR8 :$src)>;
+def : InstAlias<"divw $src, %ax", (DIV16r GR16:$src)>;
+def : InstAlias<"divl $src, %eax", (DIV32r GR32:$src)>;
+def : InstAlias<"divq $src, %rax", (DIV64r GR64:$src)>;
+def : InstAlias<"divb $src, %al", (DIV8m i8mem :$src)>;
+def : InstAlias<"divw $src, %ax", (DIV16m i16mem:$src)>;
+def : InstAlias<"divl $src, %eax", (DIV32m i32mem:$src)>;
+def : InstAlias<"divq $src, %rax", (DIV64m i64mem:$src)>;
+def : InstAlias<"idivb $src, %al", (IDIV8r GR8 :$src)>;
+def : InstAlias<"idivw $src, %ax", (IDIV16r GR16:$src)>;
+def : InstAlias<"idivl $src, %eax", (IDIV32r GR32:$src)>;
+def : InstAlias<"idivq $src, %rax", (IDIV64r GR64:$src)>;
+def : InstAlias<"idivb $src, %al", (IDIV8m i8mem :$src)>;
+def : InstAlias<"idivw $src, %ax", (IDIV16m i16mem:$src)>;
+def : InstAlias<"idivl $src, %eax", (IDIV32m i32mem:$src)>;
+def : InstAlias<"idivq $src, %rax", (IDIV64m i64mem:$src)>;
+
+
+
+// Various unary fpstack operations default to operating on on ST1.
+// For example, "fxch" -> "fxch %st(1)"
+def : InstAlias<"faddp", (ADD_FPrST0 ST1)>;
+def : InstAlias<"fsubp", (SUBR_FPrST0 ST1)>;
+def : InstAlias<"fsubrp", (SUB_FPrST0 ST1)>;
+def : InstAlias<"fmulp", (MUL_FPrST0 ST1)>;
+def : InstAlias<"fdivp", (DIVR_FPrST0 ST1)>;
+def : InstAlias<"fdivrp", (DIV_FPrST0 ST1)>;
+def : InstAlias<"fxch", (XCH_F ST1)>;
+def : InstAlias<"fcomi", (COM_FIr ST1)>;
+def : InstAlias<"fcompi", (COM_FIPr ST1)>;
+def : InstAlias<"fucom", (UCOM_Fr ST1)>;
+def : InstAlias<"fucomp", (UCOM_FPr ST1)>;
+def : InstAlias<"fucomi", (UCOM_FIr ST1)>;
+def : InstAlias<"fucompi", (UCOM_FIPr ST1)>;
+
+// Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
+// For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
+// instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
+// gas.
+multiclass FpUnaryAlias<string Mnemonic, Instruction Inst> {
+ def : InstAlias<!strconcat(Mnemonic, " $op, %st(0)"), (Inst RST:$op)>;
+ def : InstAlias<!strconcat(Mnemonic, " %st(0), %st(0)"), (Inst ST0)>;
+}
+
+defm : FpUnaryAlias<"fadd", ADD_FST0r>;
+defm : FpUnaryAlias<"faddp", ADD_FPrST0>;
+defm : FpUnaryAlias<"fsub", SUB_FST0r>;
+defm : FpUnaryAlias<"fsubp", SUBR_FPrST0>;
+defm : FpUnaryAlias<"fsubr", SUBR_FST0r>;
+defm : FpUnaryAlias<"fsubrp", SUB_FPrST0>;
+defm : FpUnaryAlias<"fmul", MUL_FST0r>;
+defm : FpUnaryAlias<"fmulp", MUL_FPrST0>;
+defm : FpUnaryAlias<"fdiv", DIV_FST0r>;
+defm : FpUnaryAlias<"fdivp", DIVR_FPrST0>;
+defm : FpUnaryAlias<"fdivr", DIVR_FST0r>;
+defm : FpUnaryAlias<"fdivrp", DIV_FPrST0>;
+defm : FpUnaryAlias<"fcomi", COM_FIr>;
+defm : FpUnaryAlias<"fucomi", UCOM_FIr>;
+defm : FpUnaryAlias<"fcompi", COM_FIPr>;
+defm : FpUnaryAlias<"fucompi", UCOM_FIPr>;
+
+
+// Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they
+// commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
+// solely because gas supports it.
+def : InstAlias<"faddp %st(0), $op", (ADD_FPrST0 RST:$op)>;
+def : InstAlias<"fmulp %st(0), $op", (MUL_FPrST0 RST:$op)>;
+def : InstAlias<"fsubrp %st(0), $op", (SUB_FPrST0 RST:$op)>;
+def : InstAlias<"fdivp %st(0), $op", (DIVR_FPrST0 RST:$op)>;
+def : InstAlias<"fdivrp %st(0), $op", (DIV_FPrST0 RST:$op)>;
+
+// We accept "fnstsw %eax" even though it only writes %ax.
+def : InstAlias<"fnstsw %eax", (FNSTSW8r)>;
+def : InstAlias<"fnstsw %al" , (FNSTSW8r)>;
+def : InstAlias<"fnstsw" , (FNSTSW8r)>;
+
+// lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
+// this is compatible with what GAS does.
+def : InstAlias<"lcall $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
+def : InstAlias<"ljmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
+def : InstAlias<"lcall *$dst", (FARCALL32m opaque48mem:$dst)>;
+def : InstAlias<"ljmp *$dst", (FARJMP32m opaque48mem:$dst)>;
+
+// "imul <imm>, B" is an alias for "imul <imm>, B, B".
+def : InstAlias<"imulw $imm, $r", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm)>;
+def : InstAlias<"imulw $imm, $r", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm)>;
+def : InstAlias<"imull $imm, $r", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm)>;
+def : InstAlias<"imull $imm, $r", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm)>;
+def : InstAlias<"imulq $imm, $r",(IMUL64rri32 GR64:$r, GR64:$r,i64i32imm:$imm)>;
+def : InstAlias<"imulq $imm, $r", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm)>;
+
+// inb %dx -> inb %al, %dx
+def : InstAlias<"inb %dx", (IN8rr)>;
+def : InstAlias<"inw %dx", (IN16rr)>;
+def : InstAlias<"inl %dx", (IN32rr)>;
+def : InstAlias<"inb $port", (IN8ri i8imm:$port)>;
+def : InstAlias<"inw $port", (IN16ri i8imm:$port)>;
+def : InstAlias<"inl $port", (IN32ri i8imm:$port)>;
+
+
+// jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
+def : InstAlias<"call $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
+def : InstAlias<"jmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
+def : InstAlias<"callw $seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>;
+def : InstAlias<"jmpw $seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>;
+def : InstAlias<"calll $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
+def : InstAlias<"jmpl $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
+
+// Force mov without a suffix with a segment and mem to prefer the 'l' form of
+// the move. All segment/mem forms are equivalent, this has the shortest
+// encoding.
+def : InstAlias<"mov $mem, $seg", (MOV32sm SEGMENT_REG:$seg, i32mem:$mem)>;
+def : InstAlias<"mov $seg, $mem", (MOV32ms i32mem:$mem, SEGMENT_REG:$seg)>;
+
+// Match 'movq <largeimm>, <reg>' as an alias for movabsq.
+def : InstAlias<"movq $imm, $reg", (MOV64ri GR64:$reg, i64imm:$imm)>;
+
+// Match 'movq GR64, MMX' as an alias for movd.
+def : InstAlias<"movq $src, $dst", (MMX_MOVD64to64rr VR64:$dst, GR64:$src)>;
+def : InstAlias<"movq $src, $dst", (MMX_MOVD64from64rr GR64:$dst, VR64:$src)>;
+
+// movsd with no operands (as opposed to the SSE scalar move of a double) is an
+// alias for movsl. (as in rep; movsd)
+def : InstAlias<"movsd", (MOVSD)>;
+
+// movsx aliases
+def : InstAlias<"movsx $src, $dst", (MOVSX16rr8W GR16:$dst, GR8:$src)>;
+def : InstAlias<"movsx $src, $dst", (MOVSX16rm8W GR16:$dst, i8mem:$src)>;
+def : InstAlias<"movsx $src, $dst", (MOVSX32rr8 GR32:$dst, GR8:$src)>;
+def : InstAlias<"movsx $src, $dst", (MOVSX32rr16 GR32:$dst, GR16:$src)>;
+def : InstAlias<"movsx $src, $dst", (MOVSX64rr8 GR64:$dst, GR8:$src)>;
+def : InstAlias<"movsx $src, $dst", (MOVSX64rr16 GR64:$dst, GR16:$src)>;
+def : InstAlias<"movsx $src, $dst", (MOVSX64rr32 GR64:$dst, GR32:$src)>;
+
+// movzx aliases
+def : InstAlias<"movzx $src, $dst", (MOVZX16rr8W GR16:$dst, GR8:$src)>;
+def : InstAlias<"movzx $src, $dst", (MOVZX16rm8W GR16:$dst, i8mem:$src)>;
+def : InstAlias<"movzx $src, $dst", (MOVZX32rr8 GR32:$dst, GR8:$src)>;
+def : InstAlias<"movzx $src, $dst", (MOVZX32rr16 GR32:$dst, GR16:$src)>;
+def : InstAlias<"movzx $src, $dst", (MOVZX64rr8_Q GR64:$dst, GR8:$src)>;
+def : InstAlias<"movzx $src, $dst", (MOVZX64rr16_Q GR64:$dst, GR16:$src)>;
+// Note: No GR32->GR64 movzx form.
+
+// outb %dx -> outb %al, %dx
+def : InstAlias<"outb %dx", (OUT8rr)>;
+def : InstAlias<"outw %dx", (OUT16rr)>;
+def : InstAlias<"outl %dx", (OUT32rr)>;
+def : InstAlias<"outb $port", (OUT8ir i8imm:$port)>;
+def : InstAlias<"outw $port", (OUT16ir i8imm:$port)>;
+def : InstAlias<"outl $port", (OUT32ir i8imm:$port)>;
+
+// 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
+// effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
+// errors, since its encoding is the most compact.
+def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem)>;
+
+// shld/shrd op,op -> shld op, op, 1
+def : InstAlias<"shldw $r1, $r2", (SHLD16rri8 GR16:$r1, GR16:$r2, 1)>;
+def : InstAlias<"shldl $r1, $r2", (SHLD32rri8 GR32:$r1, GR32:$r2, 1)>;
+def : InstAlias<"shldq $r1, $r2", (SHLD64rri8 GR64:$r1, GR64:$r2, 1)>;
+def : InstAlias<"shrdw $r1, $r2", (SHRD16rri8 GR16:$r1, GR16:$r2, 1)>;
+def : InstAlias<"shrdl $r1, $r2", (SHRD32rri8 GR32:$r1, GR32:$r2, 1)>;
+def : InstAlias<"shrdq $r1, $r2", (SHRD64rri8 GR64:$r1, GR64:$r2, 1)>;
+
+def : InstAlias<"shldw $mem, $reg", (SHLD16mri8 i16mem:$mem, GR16:$reg, 1)>;
+def : InstAlias<"shldl $mem, $reg", (SHLD32mri8 i32mem:$mem, GR32:$reg, 1)>;
+def : InstAlias<"shldq $mem, $reg", (SHLD64mri8 i64mem:$mem, GR64:$reg, 1)>;
+def : InstAlias<"shrdw $mem, $reg", (SHRD16mri8 i16mem:$mem, GR16:$reg, 1)>;
+def : InstAlias<"shrdl $mem, $reg", (SHRD32mri8 i32mem:$mem, GR32:$reg, 1)>;
+def : InstAlias<"shrdq $mem, $reg", (SHRD64mri8 i64mem:$mem, GR64:$reg, 1)>;
+
+/* FIXME: This is disabled because the asm matcher is currently incapable of
+ * matching a fixed immediate like $1.
+// "shl X, $1" is an alias for "shl X".
+multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
+ def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
+ (!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
+ def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
+ (!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
+ def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
+ (!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
+ def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
+ (!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
+ def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
+ (!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
+ def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
+ (!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
+ def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
+ (!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
+ def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
+ (!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
+}
+
+defm : ShiftRotateByOneAlias<"rcl", "RCL">;
+defm : ShiftRotateByOneAlias<"rcr", "RCR">;
+defm : ShiftRotateByOneAlias<"rol", "ROL">;
+defm : ShiftRotateByOneAlias<"ror", "ROR">;
+FIXME */
+
+// test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
+def : InstAlias<"testb $val, $mem", (TEST8rm GR8 :$val, i8mem :$mem)>;
+def : InstAlias<"testw $val, $mem", (TEST16rm GR16:$val, i16mem:$mem)>;
+def : InstAlias<"testl $val, $mem", (TEST32rm GR32:$val, i32mem:$mem)>;
+def : InstAlias<"testq $val, $mem", (TEST64rm GR64:$val, i64mem:$mem)>;
+
+// xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
+def : InstAlias<"xchgb $mem, $val", (XCHG8rm GR8 :$val, i8mem :$mem)>;
+def : InstAlias<"xchgw $mem, $val", (XCHG16rm GR16:$val, i16mem:$mem)>;
+def : InstAlias<"xchgl $mem, $val", (XCHG32rm GR32:$val, i32mem:$mem)>;
+def : InstAlias<"xchgq $mem, $val", (XCHG64rm GR64:$val, i64mem:$mem)>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrMMX.td b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
index 11d4179..bb2165a 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
@@ -1,4 +1,4 @@
-//====- X86InstrMMX.td - Describe the X86 Instruction Set --*- tablegen -*-===//
+//====- X86InstrMMX.td - Describe the MMX Instruction Set --*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,6 +11,9 @@
// and properties of the instructions which are needed for code generation,
// machine code emission, and analysis.
//
+// All instructions that use MMX should be in this file, even if they also use
+// SSE.
+//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@@ -18,58 +21,23 @@
//===----------------------------------------------------------------------===//
let Constraints = "$src1 = $dst" in {
- // MMXI_binop_rm - Simple MMX binary operator.
- multiclass MMXI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- ValueType OpVT, bit Commutable = 0> {
- def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (OpVT (OpNode VR64:$src1, VR64:$src2)))]> {
- let isCommutable = Commutable;
- }
- def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (OpVT (OpNode VR64:$src1,
- (bitconvert
- (load_mmx addr:$src2)))))]>;
- }
-
+ // MMXI_binop_rm_int - Simple MMX binary operator based on intrinsic.
+ // When this is cleaned up, remove the FIXME from X86RecognizableInstr.cpp.
multiclass MMXI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
bit Commutable = 0> {
- def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
+ def irr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))]> {
let isCommutable = Commutable;
}
- def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
+ def irm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
(ins VR64:$src1, i64mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, (IntId VR64:$src1,
(bitconvert (load_mmx addr:$src2))))]>;
}
- // MMXI_binop_rm_v1i64 - Simple MMX binary operator whose type is v1i64.
- //
- // FIXME: we could eliminate this and use MMXI_binop_rm instead if tblgen knew
- // to collapse (bitconvert VT to VT) into its operand.
- //
- multiclass MMXI_binop_rm_v1i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
- bit Commutable = 0> {
- def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (v1i64 (OpNode VR64:$src1, VR64:$src2)))]> {
- let isCommutable = Commutable;
- }
- def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst,
- (OpNode VR64:$src1,(load_mmx addr:$src2)))]>;
- }
-
multiclass MMXI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
string OpcodeStr, Intrinsic IntId,
Intrinsic IntId2> {
@@ -89,14 +57,75 @@ let Constraints = "$src1 = $dst" in {
}
}
+/// Unary MMX instructions requiring SSSE3.
+multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId64> {
+ def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR64:$dst, (IntId64 VR64:$src))]>;
+
+ def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR64:$dst,
+ (IntId64 (bitconvert (memopmmx addr:$src))))]>;
+}
+
+/// Binary MMX instructions requiring SSSE3.
+let ImmT = NoImm, Constraints = "$src1 = $dst" in {
+multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId64> {
+ let isCommutable = 0 in
+ def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
+ (ins VR64:$src1, VR64:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
+ def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
+ (ins VR64:$src1, i64mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ [(set VR64:$dst,
+ (IntId64 VR64:$src1,
+ (bitconvert (memopmmx addr:$src2))))]>;
+}
+}
+
+/// PALIGN MMX instructions (require SSSE3).
+multiclass ssse3_palign_mm<string asm, Intrinsic IntId> {
+ def R64irr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
+ (ins VR64:$src1, VR64:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2, (i8 imm:$src3)))]>;
+ def R64irm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
+ (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [(set VR64:$dst, (IntId VR64:$src1,
+ (bitconvert (load_mmx addr:$src2)), (i8 imm:$src3)))]>;
+}
+
+multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm, Domain d> {
+ def irr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [(set DstRC:$dst, (Int SrcRC:$src))], d>;
+ def irm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
+}
+
+multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
+ RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
+ PatFrag ld_frag, string asm, Domain d> {
+ def irr : PI<opc, MRMSrcReg, (outs DstRC:$dst),(ins DstRC:$src1, SrcRC:$src2),
+ asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
+ def irm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins DstRC:$src1, x86memop:$src2), asm,
+ [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
+}
+
//===----------------------------------------------------------------------===//
-// MMX EMMS & FEMMS Instructions
+// MMX EMMS Instruction
//===----------------------------------------------------------------------===//
def MMX_EMMS : MMXI<0x77, RawFrm, (outs), (ins), "emms",
[(int_x86_mmx_emms)]>;
-def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms",
- [(int_x86_mmx_femms)]>;
//===----------------------------------------------------------------------===//
// MMX Scalar Instructions
@@ -106,12 +135,12 @@ def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms",
def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
- (v2i32 (scalar_to_vector GR32:$src)))]>;
-let canFoldAsLoad = 1, isReMaterializable = 1 in
+ (x86mmx (scalar_to_vector GR32:$src)))]>;
+let canFoldAsLoad = 1 in
def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
- (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>;
+ (x86mmx (scalar_to_vector (loadi32 addr:$src))))]>;
let mayStore = 1 in
def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src),
"movd\t{$src, $dst|$dst, $src}", []>;
@@ -123,42 +152,41 @@ def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
"movd\t{$src, $dst|$dst, $src}",
[]>;
-let neverHasSideEffects = 1 in
// These are 64 bit moves, but since the OS X assembler doesn't
// recognize a register-register movq, we write them as
// movd.
def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg,
(outs GR64:$dst), (ins VR64:$src),
- "movd\t{$src, $dst|$dst, $src}", []>;
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst,
+ (bitconvert VR64:$src))]>;
def MMX_MOVD64rrv164 : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
- (v1i64 (scalar_to_vector GR64:$src)))]>;
-
+ (bitconvert GR64:$src))]>;
let neverHasSideEffects = 1 in
def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
"movq\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1, isReMaterializable = 1 in
+let canFoldAsLoad = 1 in
def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (load_mmx addr:$src))]>;
def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(store (v1i64 VR64:$src), addr:$dst)]>;
+ [(store (x86mmx VR64:$src), addr:$dst)]>;
def MMX_MOVDQ2Qrr : SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
"movdq2q\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
- (v1i64 (bitconvert
+ (x86mmx (bitconvert
(i64 (vector_extract (v2i64 VR128:$src),
(iPTR 0))))))]>;
def MMX_MOVQ2DQrr : SSDIi8<0xD6, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
"movq2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (movl immAllZerosV,
- (v2i64 (scalar_to_vector
- (i64 (bitconvert (v1i64 VR64:$src)))))))]>;
+ (v2i64 (scalar_to_vector
+ (i64 (bitconvert (x86mmx VR64:$src))))))]>;
let neverHasSideEffects = 1 in
def MMX_MOVQ2FR64rr: SSDIi8<0xD6, MRMSrcReg, (outs FR64:$dst), (ins VR64:$src),
@@ -176,34 +204,40 @@ let AddedComplexity = 15 in
def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
- (v2i32 (X86vzmovl (v2i32 (scalar_to_vector GR32:$src)))))]>;
+ (x86mmx (X86vzmovl (x86mmx (scalar_to_vector GR32:$src)))))]>;
let AddedComplexity = 20 in
def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst),
(ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
- (v2i32 (X86vzmovl (v2i32
+ (x86mmx (X86vzmovl (x86mmx
(scalar_to_vector (loadi32 addr:$src))))))]>;
// Arithmetic Instructions
-
+defm MMX_PABSB : SS3I_unop_rm_int_mm<0x1C, "pabsb", int_x86_ssse3_pabs_b>;
+defm MMX_PABSW : SS3I_unop_rm_int_mm<0x1D, "pabsw", int_x86_ssse3_pabs_w>;
+defm MMX_PABSD : SS3I_unop_rm_int_mm<0x1E, "pabsd", int_x86_ssse3_pabs_d>;
// -- Addition
-defm MMX_PADDB : MMXI_binop_rm<0xFC, "paddb", add, v8i8, 1>;
-defm MMX_PADDW : MMXI_binop_rm<0xFD, "paddw", add, v4i16, 1>;
-defm MMX_PADDD : MMXI_binop_rm<0xFE, "paddd", add, v2i32, 1>;
-defm MMX_PADDQ : MMXI_binop_rm<0xD4, "paddq", add, v1i64, 1>;
-
+defm MMX_PADDB : MMXI_binop_rm_int<0xFC, "paddb", int_x86_mmx_padd_b, 1>;
+defm MMX_PADDW : MMXI_binop_rm_int<0xFD, "paddw", int_x86_mmx_padd_w, 1>;
+defm MMX_PADDD : MMXI_binop_rm_int<0xFE, "paddd", int_x86_mmx_padd_d, 1>;
+defm MMX_PADDQ : MMXI_binop_rm_int<0xD4, "paddq", int_x86_mmx_padd_q, 1>;
defm MMX_PADDSB : MMXI_binop_rm_int<0xEC, "paddsb" , int_x86_mmx_padds_b, 1>;
defm MMX_PADDSW : MMXI_binop_rm_int<0xED, "paddsw" , int_x86_mmx_padds_w, 1>;
defm MMX_PADDUSB : MMXI_binop_rm_int<0xDC, "paddusb", int_x86_mmx_paddus_b, 1>;
defm MMX_PADDUSW : MMXI_binop_rm_int<0xDD, "paddusw", int_x86_mmx_paddus_w, 1>;
+defm MMX_PHADDW : SS3I_binop_rm_int_mm<0x01, "phaddw", int_x86_ssse3_phadd_w>;
+defm MMX_PHADD : SS3I_binop_rm_int_mm<0x02, "phaddd", int_x86_ssse3_phadd_d>;
+defm MMX_PHADDSW : SS3I_binop_rm_int_mm<0x03, "phaddsw",int_x86_ssse3_phadd_sw>;
+
+
// -- Subtraction
-defm MMX_PSUBB : MMXI_binop_rm<0xF8, "psubb", sub, v8i8>;
-defm MMX_PSUBW : MMXI_binop_rm<0xF9, "psubw", sub, v4i16>;
-defm MMX_PSUBD : MMXI_binop_rm<0xFA, "psubd", sub, v2i32>;
-defm MMX_PSUBQ : MMXI_binop_rm<0xFB, "psubq", sub, v1i64>;
+defm MMX_PSUBB : MMXI_binop_rm_int<0xF8, "psubb", int_x86_mmx_psub_b>;
+defm MMX_PSUBW : MMXI_binop_rm_int<0xF9, "psubw", int_x86_mmx_psub_w>;
+defm MMX_PSUBD : MMXI_binop_rm_int<0xFA, "psubd", int_x86_mmx_psub_d>;
+defm MMX_PSUBQ : MMXI_binop_rm_int<0xFB, "psubq", int_x86_mmx_psub_q>;
defm MMX_PSUBSB : MMXI_binop_rm_int<0xE8, "psubsb" , int_x86_mmx_psubs_b>;
defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w>;
@@ -211,16 +245,25 @@ defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w>;
defm MMX_PSUBUSB : MMXI_binop_rm_int<0xD8, "psubusb", int_x86_mmx_psubus_b>;
defm MMX_PSUBUSW : MMXI_binop_rm_int<0xD9, "psubusw", int_x86_mmx_psubus_w>;
+defm MMX_PHSUBW : SS3I_binop_rm_int_mm<0x05, "phsubw", int_x86_ssse3_phsub_w>;
+defm MMX_PHSUBD : SS3I_binop_rm_int_mm<0x06, "phsubd", int_x86_ssse3_phsub_d>;
+defm MMX_PHSUBSW : SS3I_binop_rm_int_mm<0x07, "phsubsw",int_x86_ssse3_phsub_sw>;
+
// -- Multiplication
-defm MMX_PMULLW : MMXI_binop_rm<0xD5, "pmullw", mul, v4i16, 1>;
+defm MMX_PMULLW : MMXI_binop_rm_int<0xD5, "pmullw", int_x86_mmx_pmull_w, 1>;
defm MMX_PMULHW : MMXI_binop_rm_int<0xE5, "pmulhw", int_x86_mmx_pmulh_w, 1>;
defm MMX_PMULHUW : MMXI_binop_rm_int<0xE4, "pmulhuw", int_x86_mmx_pmulhu_w, 1>;
defm MMX_PMULUDQ : MMXI_binop_rm_int<0xF4, "pmuludq", int_x86_mmx_pmulu_dq, 1>;
+let isCommutable = 1 in
+defm MMX_PMULHRSW : SS3I_binop_rm_int_mm<0x0B, "pmulhrsw",
+ int_x86_ssse3_pmul_hr_sw>;
// -- Miscellanea
defm MMX_PMADDWD : MMXI_binop_rm_int<0xF5, "pmaddwd", int_x86_mmx_pmadd_wd, 1>;
+defm MMX_PMADDUBSW : SS3I_binop_rm_int_mm<0x04, "pmaddubsw",
+ int_x86_ssse3_pmadd_ub_sw>;
defm MMX_PAVGB : MMXI_binop_rm_int<0xE0, "pavgb", int_x86_mmx_pavg_b, 1>;
defm MMX_PAVGW : MMXI_binop_rm_int<0xE3, "pavgw", int_x86_mmx_pavg_w, 1>;
@@ -232,23 +275,17 @@ defm MMX_PMAXSW : MMXI_binop_rm_int<0xEE, "pmaxsw", int_x86_mmx_pmaxs_w, 1>;
defm MMX_PSADBW : MMXI_binop_rm_int<0xF6, "psadbw", int_x86_mmx_psad_bw, 1>;
-// Logical Instructions
-defm MMX_PAND : MMXI_binop_rm_v1i64<0xDB, "pand", and, 1>;
-defm MMX_POR : MMXI_binop_rm_v1i64<0xEB, "por" , or, 1>;
-defm MMX_PXOR : MMXI_binop_rm_v1i64<0xEF, "pxor", xor, 1>;
+defm MMX_PSIGNB : SS3I_binop_rm_int_mm<0x08, "psignb", int_x86_ssse3_psign_b>;
+defm MMX_PSIGNW : SS3I_binop_rm_int_mm<0x09, "psignw", int_x86_ssse3_psign_w>;
+defm MMX_PSIGND : SS3I_binop_rm_int_mm<0x0A, "psignd", int_x86_ssse3_psign_d>;
+let Constraints = "$src1 = $dst" in
+ defm MMX_PALIGN : ssse3_palign_mm<"palignr", int_x86_mmx_palignr_b>;
-let Constraints = "$src1 = $dst" in {
- def MMX_PANDNrr : MMXI<0xDF, MRMSrcReg,
- (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst, (v1i64 (and (vnot VR64:$src1),
- VR64:$src2)))]>;
- def MMX_PANDNrm : MMXI<0xDF, MRMSrcMem,
- (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst, (v1i64 (and (vnot VR64:$src1),
- (load addr:$src2))))]>;
-}
+// Logical Instructions
+defm MMX_PAND : MMXI_binop_rm_int<0xDB, "pand", int_x86_mmx_pand, 1>;
+defm MMX_POR : MMXI_binop_rm_int<0xEB, "por" , int_x86_mmx_por, 1>;
+defm MMX_PXOR : MMXI_binop_rm_int<0xEF, "pxor", int_x86_mmx_pxor, 1>;
+defm MMX_PANDN : MMXI_binop_rm_int<0xDF, "pandn", int_x86_mmx_pandn, 1>;
// Shift Instructions
defm MMX_PSRLW : MMXI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
@@ -270,12 +307,6 @@ defm MMX_PSRAW : MMXI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
int_x86_mmx_psra_d, int_x86_mmx_psrai_d>;
-// Shift up / down and insert zero's.
-def : Pat<(v1i64 (X86vshl VR64:$src, (i8 imm:$amt))),
- (MMX_PSLLQri VR64:$src, (GetLo32XForm imm:$amt))>;
-def : Pat<(v1i64 (X86vshr VR64:$src, (i8 imm:$amt))),
- (MMX_PSRLQri VR64:$src, (GetLo32XForm imm:$amt))>;
-
// Comparison Instructions
defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>;
defm MMX_PCMPEQW : MMXI_binop_rm_int<0x75, "pcmpeqw", int_x86_mmx_pcmpeq_w>;
@@ -285,84 +316,19 @@ defm MMX_PCMPGTB : MMXI_binop_rm_int<0x64, "pcmpgtb", int_x86_mmx_pcmpgt_b>;
defm MMX_PCMPGTW : MMXI_binop_rm_int<0x65, "pcmpgtw", int_x86_mmx_pcmpgt_w>;
defm MMX_PCMPGTD : MMXI_binop_rm_int<0x66, "pcmpgtd", int_x86_mmx_pcmpgt_d>;
-// Conversion Instructions
-
// -- Unpack Instructions
-let Constraints = "$src1 = $dst" in {
- // Unpack High Packed Data Instructions
- def MMX_PUNPCKHBWrr : MMXI<0x68, MRMSrcReg,
- (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
- "punpckhbw\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v8i8 (mmx_unpckh VR64:$src1, VR64:$src2)))]>;
- def MMX_PUNPCKHBWrm : MMXI<0x68, MRMSrcMem,
- (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
- "punpckhbw\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v8i8 (mmx_unpckh VR64:$src1,
- (bc_v8i8 (load_mmx addr:$src2)))))]>;
-
- def MMX_PUNPCKHWDrr : MMXI<0x69, MRMSrcReg,
- (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
- "punpckhwd\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v4i16 (mmx_unpckh VR64:$src1, VR64:$src2)))]>;
- def MMX_PUNPCKHWDrm : MMXI<0x69, MRMSrcMem,
- (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
- "punpckhwd\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v4i16 (mmx_unpckh VR64:$src1,
- (bc_v4i16 (load_mmx addr:$src2)))))]>;
-
- def MMX_PUNPCKHDQrr : MMXI<0x6A, MRMSrcReg,
- (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
- "punpckhdq\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v2i32 (mmx_unpckh VR64:$src1, VR64:$src2)))]>;
- def MMX_PUNPCKHDQrm : MMXI<0x6A, MRMSrcMem,
- (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
- "punpckhdq\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v2i32 (mmx_unpckh VR64:$src1,
- (bc_v2i32 (load_mmx addr:$src2)))))]>;
-
- // Unpack Low Packed Data Instructions
- def MMX_PUNPCKLBWrr : MMXI<0x60, MRMSrcReg,
- (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
- "punpcklbw\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v8i8 (mmx_unpckl VR64:$src1, VR64:$src2)))]>;
- def MMX_PUNPCKLBWrm : MMXI<0x60, MRMSrcMem,
- (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
- "punpcklbw\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v8i8 (mmx_unpckl VR64:$src1,
- (bc_v8i8 (load_mmx addr:$src2)))))]>;
-
- def MMX_PUNPCKLWDrr : MMXI<0x61, MRMSrcReg,
- (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
- "punpcklwd\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v4i16 (mmx_unpckl VR64:$src1, VR64:$src2)))]>;
- def MMX_PUNPCKLWDrm : MMXI<0x61, MRMSrcMem,
- (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
- "punpcklwd\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v4i16 (mmx_unpckl VR64:$src1,
- (bc_v4i16 (load_mmx addr:$src2)))))]>;
-
- def MMX_PUNPCKLDQrr : MMXI<0x62, MRMSrcReg,
- (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
- "punpckldq\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v2i32 (mmx_unpckl VR64:$src1, VR64:$src2)))]>;
- def MMX_PUNPCKLDQrm : MMXI<0x62, MRMSrcMem,
- (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
- "punpckldq\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (v2i32 (mmx_unpckl VR64:$src1,
- (bc_v2i32 (load_mmx addr:$src2)))))]>;
-}
+defm MMX_PUNPCKHBW : MMXI_binop_rm_int<0x68, "punpckhbw",
+ int_x86_mmx_punpckhbw>;
+defm MMX_PUNPCKHWD : MMXI_binop_rm_int<0x69, "punpckhwd",
+ int_x86_mmx_punpckhwd>;
+defm MMX_PUNPCKHDQ : MMXI_binop_rm_int<0x6A, "punpckhdq",
+ int_x86_mmx_punpckhdq>;
+defm MMX_PUNPCKLBW : MMXI_binop_rm_int<0x60, "punpcklbw",
+ int_x86_mmx_punpcklbw>;
+defm MMX_PUNPCKLWD : MMXI_binop_rm_int<0x61, "punpcklwd",
+ int_x86_mmx_punpcklwd>;
+defm MMX_PUNPCKLDQ : MMXI_binop_rm_int<0x62, "punpckldq",
+ int_x86_mmx_punpckldq>;
// -- Pack Instructions
defm MMX_PACKSSWB : MMXI_binop_rm_int<0x63, "packsswb", int_x86_mmx_packsswb>;
@@ -370,93 +336,80 @@ defm MMX_PACKSSDW : MMXI_binop_rm_int<0x6B, "packssdw", int_x86_mmx_packssdw>;
defm MMX_PACKUSWB : MMXI_binop_rm_int<0x67, "packuswb", int_x86_mmx_packuswb>;
// -- Shuffle Instructions
+defm MMX_PSHUFB : SS3I_binop_rm_int_mm<0x00, "pshufb", int_x86_ssse3_pshuf_b>;
+
def MMX_PSHUFWri : MMXIi8<0x70, MRMSrcReg,
(outs VR64:$dst), (ins VR64:$src1, i8imm:$src2),
"pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR64:$dst,
- (v4i16 (mmx_pshufw:$src2 VR64:$src1, (undef))))]>;
+ (int_x86_sse_pshuf_w VR64:$src1, imm:$src2))]>;
def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
(outs VR64:$dst), (ins i64mem:$src1, i8imm:$src2),
"pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR64:$dst,
- (mmx_pshufw:$src2 (bc_v4i16 (load_mmx addr:$src1)),
- (undef)))]>;
+ (int_x86_sse_pshuf_w (load_mmx addr:$src1),
+ imm:$src2))]>;
-// -- Conversion Instructions
-let neverHasSideEffects = 1 in {
-def MMX_CVTPD2PIrr : MMX2I<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvtpd2pi\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
-def MMX_CVTPD2PIrm : MMX2I<0x2D, MRMSrcMem, (outs VR64:$dst),
- (ins f128mem:$src),
- "cvtpd2pi\t{$src, $dst|$dst, $src}", []>;
-
-def MMX_CVTPI2PDrr : MMX2I<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
- "cvtpi2pd\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
-def MMX_CVTPI2PDrm : MMX2I<0x2A, MRMSrcMem, (outs VR128:$dst),
- (ins i64mem:$src),
- "cvtpi2pd\t{$src, $dst|$dst, $src}", []>;
-
-def MMX_CVTPI2PSrr : MMXI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
- "cvtpi2ps\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
-def MMX_CVTPI2PSrm : MMXI<0x2A, MRMSrcMem, (outs VR128:$dst),
- (ins i64mem:$src),
- "cvtpi2ps\t{$src, $dst|$dst, $src}", []>;
-
-def MMX_CVTPS2PIrr : MMXI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvtps2pi\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
-def MMX_CVTPS2PIrm : MMXI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
- "cvtps2pi\t{$src, $dst|$dst, $src}", []>;
-
-def MMX_CVTTPD2PIrr : MMX2I<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvttpd2pi\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
-def MMX_CVTTPD2PIrm : MMX2I<0x2C, MRMSrcMem, (outs VR64:$dst),
- (ins f128mem:$src),
- "cvttpd2pi\t{$src, $dst|$dst, $src}", []>;
-
-def MMX_CVTTPS2PIrr : MMXI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvttps2pi\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
-def MMX_CVTTPS2PIrm : MMXI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
- "cvttps2pi\t{$src, $dst|$dst, $src}", []>;
-} // end neverHasSideEffects
-// Extract / Insert
-def MMX_X86pinsrw : SDNode<"X86ISD::MMX_PINSRW",
- SDTypeProfile<1, 3, [SDTCisVT<0, v4i16>, SDTCisSameAs<0,1>,
- SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
-def MMX_PEXTRWri : MMXIi8<0xC5, MRMSrcReg,
- (outs GR32:$dst), (ins VR64:$src1, i16i8imm:$src2),
+// -- Conversion Instructions
+defm MMX_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
+ f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB;
+defm MMX_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
+ f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedDouble>, TB, OpSize;
+defm MMX_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
+ f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB;
+defm MMX_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
+ f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedDouble>, TB, OpSize;
+defm MMX_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
+ i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
+ SSEPackedDouble>, TB, OpSize;
+let Constraints = "$src1 = $dst" in {
+ defm MMX_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
+ int_x86_sse_cvtpi2ps,
+ i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
+ SSEPackedSingle>, TB;
+}
+
+// Extract / Insert
+def MMX_PEXTRWirri: MMXIi8<0xC5, MRMSrcReg,
+ (outs GR32:$dst), (ins VR64:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (X86pextrw (v4i16 VR64:$src1),
+ [(set GR32:$dst, (int_x86_mmx_pextr_w VR64:$src1,
(iPTR imm:$src2)))]>;
let Constraints = "$src1 = $dst" in {
- def MMX_PINSRWrri : MMXIi8<0xC4, MRMSrcReg,
+ def MMX_PINSRWirri : MMXIi8<0xC4, MRMSrcReg,
(outs VR64:$dst),
- (ins VR64:$src1, GR32:$src2,i16i8imm:$src3),
+ (ins VR64:$src1, GR32:$src2, i32i8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR64:$dst, (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1),
- GR32:$src2,(iPTR imm:$src3))))]>;
- def MMX_PINSRWrmi : MMXIi8<0xC4, MRMSrcMem,
+ [(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1,
+ GR32:$src2, (iPTR imm:$src3)))]>;
+
+ def MMX_PINSRWirmi : MMXIi8<0xC4, MRMSrcMem,
(outs VR64:$dst),
- (ins VR64:$src1, i16mem:$src2, i16i8imm:$src3),
+ (ins VR64:$src1, i16mem:$src2, i32i8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR64:$dst,
- (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1),
- (i32 (anyext (loadi16 addr:$src2))),
- (iPTR imm:$src3))))]>;
+ [(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1,
+ (i32 (anyext (loadi16 addr:$src2))),
+ (iPTR imm:$src3)))]>;
}
+// Mask creation
+def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src),
+ "pmovmskb\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst,
+ (int_x86_mmx_pmovmskb VR64:$src))]>;
+
+
// MMX to XMM for vector types
def MMX_X86movq2dq : SDNode<"X86ISD::MOVQ2DQ", SDTypeProfile<1, 1,
- [SDTCisVT<0, v2i64>, SDTCisVT<1, v1i64>]>>;
+ [SDTCisVT<0, v2i64>, SDTCisVT<1, x86mmx>]>>;
def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)),
(v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
@@ -464,14 +417,19 @@ def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)),
def : Pat<(v2i64 (MMX_X86movq2dq (load_mmx addr:$src))),
(v2i64 (MOVQI2PQIrm addr:$src))>;
-def : Pat<(v2i64 (MMX_X86movq2dq (v1i64 (bitconvert
- (v2i32 (scalar_to_vector (loadi32 addr:$src))))))),
+def : Pat<(v2i64 (MMX_X86movq2dq
+ (x86mmx (scalar_to_vector (loadi32 addr:$src))))),
(v2i64 (MOVDI2PDIrm addr:$src))>;
-// Mask creation
-def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src),
- "pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_mmx_pmovmskb VR64:$src))]>;
+// Low word of XMM to MMX.
+def MMX_X86movdq2q : SDNode<"X86ISD::MOVDQ2Q", SDTypeProfile<1, 1,
+ [SDTCisVT<0, x86mmx>, SDTCisVT<1, v2i64>]>>;
+
+def : Pat<(x86mmx (MMX_X86movdq2q VR128:$src)),
+ (x86mmx (MMX_MOVDQ2Qrr VR128:$src))>;
+
+def : Pat<(x86mmx (MMX_X86movdq2q (loadv2i64 addr:$src))),
+ (x86mmx (MMX_MOVQ64rm addr:$src))>;
// Misc.
let Uses = [EDI] in
@@ -483,181 +441,14 @@ def MMX_MASKMOVQ64: MMXI64<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
"maskmovq\t{$mask, $src|$src, $mask}",
[(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, RDI)]>;
-//===----------------------------------------------------------------------===//
-// Alias Instructions
-//===----------------------------------------------------------------------===//
-
-// Alias instructions that map zero vector to pxor.
-let isReMaterializable = 1, isCodeGenOnly = 1 in {
- // FIXME: Change encoding to pseudo.
- def MMX_V_SET0 : MMXI<0xEF, MRMInitReg, (outs VR64:$dst), (ins), "",
- [(set VR64:$dst, (v2i32 immAllZerosV))]>;
- def MMX_V_SETALLONES : MMXI<0x76, MRMInitReg, (outs VR64:$dst), (ins), "",
- [(set VR64:$dst, (v2i32 immAllOnesV))]>;
-}
-
-let Predicates = [HasMMX] in {
- def : Pat<(v1i64 immAllZerosV), (MMX_V_SET0)>;
- def : Pat<(v4i16 immAllZerosV), (MMX_V_SET0)>;
- def : Pat<(v8i8 immAllZerosV), (MMX_V_SET0)>;
-}
-
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//===----------------------------------------------------------------------===//
-
-// Store 64-bit integer vector values.
-def : Pat<(store (v8i8 VR64:$src), addr:$dst),
- (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
-def : Pat<(store (v4i16 VR64:$src), addr:$dst),
- (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
-def : Pat<(store (v2i32 VR64:$src), addr:$dst),
- (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
-def : Pat<(store (v1i64 VR64:$src), addr:$dst),
- (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
-
-// Bit convert.
-def : Pat<(v8i8 (bitconvert (v1i64 VR64:$src))), (v8i8 VR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v2i32 VR64:$src))), (v8i8 VR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v4i16 VR64:$src))), (v8i8 VR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v1i64 VR64:$src))), (v4i16 VR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v2i32 VR64:$src))), (v4i16 VR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v8i8 VR64:$src))), (v4i16 VR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v1i64 VR64:$src))), (v2i32 VR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v4i16 VR64:$src))), (v2i32 VR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v8i8 VR64:$src))), (v2i32 VR64:$src)>;
-def : Pat<(v1i64 (bitconvert (v2i32 VR64:$src))), (v1i64 VR64:$src)>;
-def : Pat<(v1i64 (bitconvert (v4i16 VR64:$src))), (v1i64 VR64:$src)>;
-def : Pat<(v1i64 (bitconvert (v8i8 VR64:$src))), (v1i64 VR64:$src)>;
-
// 64-bit bit convert.
-def : Pat<(v1i64 (bitconvert (i64 GR64:$src))),
+def : Pat<(x86mmx (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
-def : Pat<(v2i32 (bitconvert (i64 GR64:$src))),
- (MMX_MOVD64to64rr GR64:$src)>;
-def : Pat<(v4i16 (bitconvert (i64 GR64:$src))),
- (MMX_MOVD64to64rr GR64:$src)>;
-def : Pat<(v8i8 (bitconvert (i64 GR64:$src))),
- (MMX_MOVD64to64rr GR64:$src)>;
-def : Pat<(i64 (bitconvert (v1i64 VR64:$src))),
- (MMX_MOVD64from64rr VR64:$src)>;
-def : Pat<(i64 (bitconvert (v2i32 VR64:$src))),
+def : Pat<(i64 (bitconvert (x86mmx VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
-def : Pat<(i64 (bitconvert (v4i16 VR64:$src))),
- (MMX_MOVD64from64rr VR64:$src)>;
-def : Pat<(i64 (bitconvert (v8i8 VR64:$src))),
- (MMX_MOVD64from64rr VR64:$src)>;
-def : Pat<(f64 (bitconvert (v1i64 VR64:$src))),
- (MMX_MOVQ2FR64rr VR64:$src)>;
-def : Pat<(f64 (bitconvert (v2i32 VR64:$src))),
- (MMX_MOVQ2FR64rr VR64:$src)>;
-def : Pat<(f64 (bitconvert (v4i16 VR64:$src))),
+def : Pat<(f64 (bitconvert (x86mmx VR64:$src))),
(MMX_MOVQ2FR64rr VR64:$src)>;
-def : Pat<(f64 (bitconvert (v8i8 VR64:$src))),
- (MMX_MOVQ2FR64rr VR64:$src)>;
-def : Pat<(v1i64 (bitconvert (f64 FR64:$src))),
- (MMX_MOVFR642Qrr FR64:$src)>;
-def : Pat<(v2i32 (bitconvert (f64 FR64:$src))),
- (MMX_MOVFR642Qrr FR64:$src)>;
-def : Pat<(v4i16 (bitconvert (f64 FR64:$src))),
+def : Pat<(x86mmx (bitconvert (f64 FR64:$src))),
(MMX_MOVFR642Qrr FR64:$src)>;
-def : Pat<(v8i8 (bitconvert (f64 FR64:$src))),
- (MMX_MOVFR642Qrr FR64:$src)>;
-
-let AddedComplexity = 20 in {
- def : Pat<(v2i32 (X86vzmovl (bc_v2i32 (load_mmx addr:$src)))),
- (MMX_MOVZDI2PDIrm addr:$src)>;
-}
-
-// Clear top half.
-let AddedComplexity = 15 in {
- def : Pat<(v2i32 (X86vzmovl VR64:$src)),
- (MMX_PUNPCKLDQrr VR64:$src, (v2i32 (MMX_V_SET0)))>;
-}
-
-// Patterns to perform canonical versions of vector shuffling.
-let AddedComplexity = 10 in {
- def : Pat<(v8i8 (mmx_unpckl_undef VR64:$src, (undef))),
- (MMX_PUNPCKLBWrr VR64:$src, VR64:$src)>;
- def : Pat<(v4i16 (mmx_unpckl_undef VR64:$src, (undef))),
- (MMX_PUNPCKLWDrr VR64:$src, VR64:$src)>;
- def : Pat<(v2i32 (mmx_unpckl_undef VR64:$src, (undef))),
- (MMX_PUNPCKLDQrr VR64:$src, VR64:$src)>;
-}
-let AddedComplexity = 10 in {
- def : Pat<(v8i8 (mmx_unpckh_undef VR64:$src, (undef))),
- (MMX_PUNPCKHBWrr VR64:$src, VR64:$src)>;
- def : Pat<(v4i16 (mmx_unpckh_undef VR64:$src, (undef))),
- (MMX_PUNPCKHWDrr VR64:$src, VR64:$src)>;
- def : Pat<(v2i32 (mmx_unpckh_undef VR64:$src, (undef))),
- (MMX_PUNPCKHDQrr VR64:$src, VR64:$src)>;
-}
-// Some special case PANDN patterns.
-// FIXME: Get rid of these.
-def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
- VR64:$src2)),
- (MMX_PANDNrr VR64:$src1, VR64:$src2)>;
-def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
- (load addr:$src2))),
- (MMX_PANDNrm VR64:$src1, addr:$src2)>;
-
-// Move MMX to lower 64-bit of XMM
-def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v8i8 VR64:$src))))),
- (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
-def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v4i16 VR64:$src))))),
- (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
-def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v2i32 VR64:$src))))),
- (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
-def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v1i64 VR64:$src))))),
- (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
-
-// Move lower 64-bit of XMM to MMX.
-def : Pat<(v2i32 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
- (iPTR 0))))),
- (v2i32 (MMX_MOVDQ2Qrr VR128:$src))>;
-def : Pat<(v4i16 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
- (iPTR 0))))),
- (v4i16 (MMX_MOVDQ2Qrr VR128:$src))>;
-def : Pat<(v8i8 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
- (iPTR 0))))),
- (v8i8 (MMX_MOVDQ2Qrr VR128:$src))>;
-
-// Patterns for vector comparisons
-def : Pat<(v8i8 (X86pcmpeqb VR64:$src1, VR64:$src2)),
- (MMX_PCMPEQBrr VR64:$src1, VR64:$src2)>;
-def : Pat<(v8i8 (X86pcmpeqb VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
- (MMX_PCMPEQBrm VR64:$src1, addr:$src2)>;
-def : Pat<(v4i16 (X86pcmpeqw VR64:$src1, VR64:$src2)),
- (MMX_PCMPEQWrr VR64:$src1, VR64:$src2)>;
-def : Pat<(v4i16 (X86pcmpeqw VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
- (MMX_PCMPEQWrm VR64:$src1, addr:$src2)>;
-def : Pat<(v2i32 (X86pcmpeqd VR64:$src1, VR64:$src2)),
- (MMX_PCMPEQDrr VR64:$src1, VR64:$src2)>;
-def : Pat<(v2i32 (X86pcmpeqd VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
- (MMX_PCMPEQDrm VR64:$src1, addr:$src2)>;
-
-def : Pat<(v8i8 (X86pcmpgtb VR64:$src1, VR64:$src2)),
- (MMX_PCMPGTBrr VR64:$src1, VR64:$src2)>;
-def : Pat<(v8i8 (X86pcmpgtb VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
- (MMX_PCMPGTBrm VR64:$src1, addr:$src2)>;
-def : Pat<(v4i16 (X86pcmpgtw VR64:$src1, VR64:$src2)),
- (MMX_PCMPGTWrr VR64:$src1, VR64:$src2)>;
-def : Pat<(v4i16 (X86pcmpgtw VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
- (MMX_PCMPGTWrm VR64:$src1, addr:$src2)>;
-def : Pat<(v2i32 (X86pcmpgtd VR64:$src1, VR64:$src2)),
- (MMX_PCMPGTDrr VR64:$src1, VR64:$src2)>;
-def : Pat<(v2i32 (X86pcmpgtd VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
- (MMX_PCMPGTDrm VR64:$src1, addr:$src2)>;
-
-// CMOV* - Used to implement the SELECT DAG operation. Expanded after
-// instruction selection into a branch sequence.
-let Uses = [EFLAGS], usesCustomInserter = 1 in {
- def CMOV_V1I64 : I<0, Pseudo,
- (outs VR64:$dst), (ins VR64:$t, VR64:$f, i8imm:$cond),
- "#CMOV_V1I64 PSEUDO!",
- [(set VR64:$dst,
- (v1i64 (X86cmov VR64:$t, VR64:$f, imm:$cond,
- EFLAGS)))]>;
-}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSSE.td b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
index f5466f8..b912949 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
@@ -15,43 +15,6 @@
//===----------------------------------------------------------------------===//
-// SSE scalar FP Instructions
-//===----------------------------------------------------------------------===//
-
-// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
-// instruction selection into a branch sequence.
-let Uses = [EFLAGS], usesCustomInserter = 1 in {
- def CMOV_FR32 : I<0, Pseudo,
- (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
- "#CMOV_FR32 PSEUDO!",
- [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
- EFLAGS))]>;
- def CMOV_FR64 : I<0, Pseudo,
- (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
- "#CMOV_FR64 PSEUDO!",
- [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
- EFLAGS))]>;
- def CMOV_V4F32 : I<0, Pseudo,
- (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
- "#CMOV_V4F32 PSEUDO!",
- [(set VR128:$dst,
- (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V2F64 : I<0, Pseudo,
- (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
- "#CMOV_V2F64 PSEUDO!",
- [(set VR128:$dst,
- (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
- EFLAGS)))]>;
- def CMOV_V2I64 : I<0, Pseudo,
- (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
- "#CMOV_V2I64 PSEUDO!",
- [(set VR128:$dst,
- (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
- EFLAGS)))]>;
-}
-
-//===----------------------------------------------------------------------===//
// SSE 1 & 2 Instructions Classes
//===----------------------------------------------------------------------===//
@@ -82,17 +45,15 @@ multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
- !strconcat(SSEVer, !strconcat("_",
- !strconcat(OpcodeStr, FPSizeStr))))
+ [(set RC:$dst, (!cast<Intrinsic>(
+ !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
RC:$src1, RC:$src2))]>;
def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
- !strconcat(SSEVer, !strconcat("_",
- !strconcat(OpcodeStr, FPSizeStr))))
+ [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
+ SSEVer, "_", OpcodeStr, FPSizeStr))
RC:$src1, mem_cpat:$src2))]>;
}
@@ -142,17 +103,15 @@ multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
- !strconcat(SSEVer, !strconcat("_",
- !strconcat(OpcodeStr, FPSizeStr))))
+ [(set RC:$dst, (!cast<Intrinsic>(
+ !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
RC:$src1, RC:$src2))], d>;
def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
- !strconcat(SSEVer, !strconcat("_",
- !strconcat(OpcodeStr, FPSizeStr))))
+ [(set RC:$dst, (!cast<Intrinsic>(
+ !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
RC:$src1, (mem_frag addr:$src2)))], d>;
}
@@ -221,6 +180,12 @@ def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
// Implicitly promote a 64-bit scalar to a vector.
def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
(INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
+// Implicitly promote a 32-bit scalar to a vector.
+def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
+ (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
+// Implicitly promote a 64-bit scalar to a vector.
+def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
+ (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
let AddedComplexity = 20 in {
// MOVSSrm zeros the high parts of the register; represent this
@@ -403,7 +368,7 @@ multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
string asm_opr> {
def PSrm : PI<opc, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- !strconcat(!strconcat(base_opc,"s"), asm_opr),
+ !strconcat(base_opc, "s", asm_opr),
[(set RC:$dst,
(mov_frag RC:$src1,
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
@@ -411,7 +376,7 @@ multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
def PDrm : PI<opc, MRMSrcMem,
(outs RC:$dst), (ins RC:$src1, f64mem:$src2),
- !strconcat(!strconcat(base_opc,"d"), asm_opr),
+ !strconcat(base_opc, "d", asm_opr),
[(set RC:$dst, (v2f64 (mov_frag RC:$src1,
(scalar_to_vector (loadf64 addr:$src2)))))],
SSEPackedDouble>, TB, OpSize;
@@ -598,14 +563,6 @@ defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
// Conversion Instructions Intrinsics - Match intrinsics which expect MM
// and/or XMM operand(s).
-multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
- Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
- string asm, Domain d> {
- def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
- [(set DstRC:$dst, (Int SrcRC:$src))], d>;
- def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
- [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
-}
multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
@@ -618,16 +575,6 @@ multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
[(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
}
-multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
- RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
- PatFrag ld_frag, string asm, Domain d> {
- def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
- asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
- def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
- (ins DstRC:$src1, x86memop:$src2), asm,
- [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
-}
-
multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
PatFrag ld_frag, string asm, bit Is2Addr = 1> {
@@ -669,13 +616,11 @@ defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
f32mem, load, "cvtss2si">, XS;
defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
f32mem, load, "cvtss2si{q}">, XS, REX_W;
-defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
- f128mem, load, "cvtsd2si">, XD;
-defm Int_CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
- f128mem, load, "cvtsd2si">, XD, REX_W;
+defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
+ f128mem, load, "cvtsd2si{l}">, XD;
+defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
+ f128mem, load, "cvtsd2si{q}">, XD, REX_W;
-defm CVTSD2SI64 : sse12_cvt_s_np<0x2D, VR128, GR64, f64mem, "cvtsd2si{q}">, XD,
- REX_W;
let isAsmParserOnly = 1 in {
defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
@@ -705,29 +650,6 @@ let Constraints = "$src1 = $dst" in {
"cvtsi2sd">, XD, REX_W;
}
-// Instructions below don't have an AVX form.
-defm Int_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
- f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
- SSEPackedSingle>, TB;
-defm Int_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
- f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
- SSEPackedDouble>, TB, OpSize;
-defm Int_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
- f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
- SSEPackedSingle>, TB;
-defm Int_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
- f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
- SSEPackedDouble>, TB, OpSize;
-defm Int_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
- i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
- SSEPackedDouble>, TB, OpSize;
-let Constraints = "$src1 = $dst" in {
- defm Int_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
- int_x86_sse_cvtpi2ps,
- i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
- SSEPackedSingle>, TB;
-}
-
/// SSE 1 Only
// Aliases for intrinsics
@@ -738,10 +660,10 @@ defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse_cvttss2si64, f32mem, load,
"cvttss2si">, XS, VEX, VEX_W;
defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
- f128mem, load, "cvttss2si">, XD, VEX;
+ f128mem, load, "cvttsd2si">, XD, VEX;
defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse2_cvttsd2si64, f128mem, load,
- "cvttss2si">, XD, VEX, VEX_W;
+ "cvttsd2si">, XD, VEX, VEX_W;
}
defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
f32mem, load, "cvttss2si">, XS;
@@ -749,10 +671,10 @@ defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse_cvttss2si64, f32mem, load,
"cvttss2si{q}">, XS, REX_W;
defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
- f128mem, load, "cvttss2si">, XD;
+ f128mem, load, "cvttsd2si">, XD;
defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse2_cvttsd2si64, f128mem, load,
- "cvttss2si{q}">, XD, REX_W;
+ "cvttsd2si{q}">, XD, REX_W;
let isAsmParserOnly = 1, Pattern = []<dag> in {
defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
@@ -790,6 +712,9 @@ def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
}
+def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
+ Requires<[HasAVX]>;
+
def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (fround FR64:$src))]>;
@@ -817,6 +742,9 @@ def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
}
+def : Pat<(f64 (fextend FR32:$src)), (VCVTSS2SDrr FR32:$src, FR32:$src)>,
+ Requires<[HasAVX]>;
+
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (fextend FR32:$src))]>, XS,
@@ -973,9 +901,13 @@ def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
}
def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvttps2dq VR128:$src))]>;
def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}", []>;
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
let isAsmParserOnly = 1 in {
@@ -990,16 +922,6 @@ def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
(memop addr:$src)))]>,
XS, VEX, Requires<[HasAVX]>;
}
-def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvttps2dq VR128:$src))]>,
- XS, Requires<[HasSSE2]>;
-def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttps2dq
- (memop addr:$src)))]>,
- XS, Requires<[HasSSE2]>;
let isAsmParserOnly = 1 in {
def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
@@ -1013,13 +935,13 @@ def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
(memop addr:$src)))]>, VEX;
}
-def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvttpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
-def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
- "cvttpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
- (memop addr:$src)))]>;
+def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
+def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
+ (memop addr:$src)))]>;
let isAsmParserOnly = 1 in {
// The assembler can recognize rr 256-bit instructions by seeing a ymm
@@ -1469,9 +1391,11 @@ let AddedComplexity = 10 in {
/// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
Domain d> {
- def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set GR32:$dst, (Int RC:$src))], d>;
+ def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
}
// Mask creation
@@ -1522,6 +1446,12 @@ def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
[(set FR64:$dst, fpimm0)]>,
Requires<[HasSSE2]>, TB, OpSize;
+def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
+ [(set FR32:$dst, fp32imm0)]>,
+ Requires<[HasAVX]>, TB, OpSize, VEX_4V;
+def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
+ [(set FR64:$dst, fpimm0)]>,
+ Requires<[HasAVX]>, TB, OpSize, VEX_4V;
}
// Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
@@ -1654,19 +1584,13 @@ defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
let isCommutable = 0 in
defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
// single r+r
- [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
- (bc_v2i64 (v4i32 immAllOnesV))),
- VR128:$src2)))],
+ [(set VR128:$dst, (X86pandn VR128:$src1, VR128:$src2))],
// double r+r
- [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
- (bc_v2i64 (v2f64 VR128:$src2))))],
+ [],
// single r+m
- [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
- (bc_v2i64 (v4i32 immAllOnesV))),
- (memopv2i64 addr:$src2))))],
+ [(set VR128:$dst, (X86pandn VR128:$src1, (memopv2i64 addr:$src2)))],
// double r+m
- [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
- (memopv2i64 addr:$src2)))]]>;
+ []]>;
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Arithmetic Instructions
@@ -2170,7 +2094,7 @@ def : Pat<(X86SFence), (SFENCE)>;
// We set canFoldAsLoad because this can be converted to a constant-pool
// load of an all-zeros value if folding it would be beneficial.
// FIXME: Change encoding to pseudo! This is blocked right now by the x86
-// JIT implementatioan, it does not expand the instructions below like
+// JIT implementation, it does not expand the instructions below like
// X86MCInstLower does.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
isCodeGenOnly = 1 in {
@@ -2277,6 +2201,10 @@ let neverHasSideEffects = 1 in
def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>;
+def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "movdqu\t{$src, $dst|$dst, $src}",
+ []>, XS, Requires<[HasSSE2]>;
+
let canFoldAsLoad = 1, mayLoad = 1 in {
def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}",
@@ -2606,15 +2534,11 @@ let ExeDomain = SSEPackedInt in {
}
def PANDNrr : PDI<0xDF, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
- VR128:$src2)))]>;
+ "pandn\t{$src2, $dst|$dst, $src2}", []>;
def PANDNrm : PDI<0xDF, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
- (memopv2i64 addr:$src2))))]>;
+ "pandn\t{$src2, $dst|$dst, $src2}", []>;
}
} // Constraints = "$src1 = $dst"
@@ -3009,6 +2933,13 @@ def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
+def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+ "mov{d|q}\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v2i64 (scalar_to_vector GR64:$src)))]>;
+def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
+ "mov{d|q}\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (bitconvert GR64:$src))]>;
// Move Int Doubleword to Single Scalar
@@ -3051,6 +2982,21 @@ def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
[(store (i32 (vector_extract (v4i32 VR128:$src),
(iPTR 0))), addr:$dst)]>;
+def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
+ "mov{d|q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
+ (iPTR 0)))]>;
+def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
+
+def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
+ "mov{d|q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (bitconvert FR64:$src))]>;
+def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
+
// Move Scalar Single to Double Int
let isAsmParserOnly = 1 in {
def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
@@ -3532,18 +3478,6 @@ let Constraints = "$src1 = $dst" in {
// SSSE3 - Packed Absolute Instructions
//===---------------------------------------------------------------------===//
-/// SS3I_unop_rm_int_mm - Simple SSSE3 unary whose type can be v*{i8,i16,i32}.
-multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr,
- PatFrag mem_frag64, Intrinsic IntId64> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst, (IntId64 VR64:$src))]>;
-
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst,
- (IntId64 (bitconvert (mem_frag64 addr:$src))))]>;
-}
/// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
@@ -3572,19 +3506,11 @@ let isAsmParserOnly = 1, Predicates = [HasAVX] in {
}
defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
- int_x86_ssse3_pabs_b_128>,
- SS3I_unop_rm_int_mm<0x1C, "pabsb", memopv8i8,
- int_x86_ssse3_pabs_b>;
-
+ int_x86_ssse3_pabs_b_128>;
defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
- int_x86_ssse3_pabs_w_128>,
- SS3I_unop_rm_int_mm<0x1D, "pabsw", memopv4i16,
- int_x86_ssse3_pabs_w>;
-
+ int_x86_ssse3_pabs_w_128>;
defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
- int_x86_ssse3_pabs_d_128>,
- SS3I_unop_rm_int_mm<0x1E, "pabsd", memopv2i32,
- int_x86_ssse3_pabs_d>;
+ int_x86_ssse3_pabs_d_128>;
//===---------------------------------------------------------------------===//
// SSSE3 - Packed Binary Operator Instructions
@@ -3611,20 +3537,6 @@ multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
(IntId128 VR128:$src1,
(bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
}
-multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
- PatFrag mem_frag64, Intrinsic IntId64> {
- let isCommutable = 1 in
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst,
- (IntId64 VR64:$src1,
- (bitconvert (memopv8i8 addr:$src2))))]>;
-}
let isAsmParserOnly = 1, Predicates = [HasAVX] in {
let isCommutable = 0 in {
@@ -3659,54 +3571,30 @@ defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
let ImmT = NoImm, Constraints = "$src1 = $dst" in {
let isCommutable = 0 in {
defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
- int_x86_ssse3_phadd_w_128>,
- SS3I_binop_rm_int_mm<0x01, "phaddw", memopv4i16,
- int_x86_ssse3_phadd_w>;
+ int_x86_ssse3_phadd_w_128>;
defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
- int_x86_ssse3_phadd_d_128>,
- SS3I_binop_rm_int_mm<0x02, "phaddd", memopv2i32,
- int_x86_ssse3_phadd_d>;
+ int_x86_ssse3_phadd_d_128>;
defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
- int_x86_ssse3_phadd_sw_128>,
- SS3I_binop_rm_int_mm<0x03, "phaddsw", memopv4i16,
- int_x86_ssse3_phadd_sw>;
+ int_x86_ssse3_phadd_sw_128>;
defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
- int_x86_ssse3_phsub_w_128>,
- SS3I_binop_rm_int_mm<0x05, "phsubw", memopv4i16,
- int_x86_ssse3_phsub_w>;
+ int_x86_ssse3_phsub_w_128>;
defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
- int_x86_ssse3_phsub_d_128>,
- SS3I_binop_rm_int_mm<0x06, "phsubd", memopv2i32,
- int_x86_ssse3_phsub_d>;
+ int_x86_ssse3_phsub_d_128>;
defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
- int_x86_ssse3_phsub_sw_128>,
- SS3I_binop_rm_int_mm<0x07, "phsubsw", memopv4i16,
- int_x86_ssse3_phsub_sw>;
+ int_x86_ssse3_phsub_sw_128>;
defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
- int_x86_ssse3_pmadd_ub_sw_128>,
- SS3I_binop_rm_int_mm<0x04, "pmaddubsw", memopv8i8,
- int_x86_ssse3_pmadd_ub_sw>;
- defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8,
- int_x86_ssse3_pshuf_b_128>,
- SS3I_binop_rm_int_mm<0x00, "pshufb", memopv8i8,
- int_x86_ssse3_pshuf_b>;
+ int_x86_ssse3_pmadd_ub_sw_128>;
+ defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
+ int_x86_ssse3_pshuf_b_128>;
defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
- int_x86_ssse3_psign_b_128>,
- SS3I_binop_rm_int_mm<0x08, "psignb", memopv8i8,
- int_x86_ssse3_psign_b>;
+ int_x86_ssse3_psign_b_128>;
defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
- int_x86_ssse3_psign_w_128>,
- SS3I_binop_rm_int_mm<0x09, "psignw", memopv4i16,
- int_x86_ssse3_psign_w>;
+ int_x86_ssse3_psign_w_128>;
defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
- int_x86_ssse3_psign_d_128>,
- SS3I_binop_rm_int_mm<0x0A, "psignd", memopv2i32,
- int_x86_ssse3_psign_d>;
+ int_x86_ssse3_psign_d_128>;
}
defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
- int_x86_ssse3_pmul_hr_sw_128>,
- SS3I_binop_rm_int_mm<0x0B, "pmulhrsw", memopv4i16,
- int_x86_ssse3_pmul_hr_sw>;
+ int_x86_ssse3_pmul_hr_sw_128>;
}
def : Pat<(X86pshufb VR128:$src, VR128:$mask),
@@ -3714,19 +3602,17 @@ def : Pat<(X86pshufb VR128:$src, VR128:$mask),
def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
(PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
+def : Pat<(X86psignb VR128:$src1, VR128:$src2),
+ (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
+def : Pat<(X86psignw VR128:$src1, VR128:$src2),
+ (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
+def : Pat<(X86psignd VR128:$src1, VR128:$src2),
+ (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
+
//===---------------------------------------------------------------------===//
// SSSE3 - Packed Align Instruction Patterns
//===---------------------------------------------------------------------===//
-multiclass ssse3_palign_mm<string asm> {
- def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2, i8imm:$src3),
- !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
- def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
- !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
-}
-
multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
@@ -3747,28 +3633,9 @@ multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
let isAsmParserOnly = 1, Predicates = [HasAVX] in
defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
let Constraints = "$src1 = $dst" in
- defm PALIGN : ssse3_palign<"palignr">,
- ssse3_palign_mm<"palignr">;
+ defm PALIGN : ssse3_palign<"palignr">;
let AddedComplexity = 5 in {
-
-def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
- (PALIGNR64rr VR64:$src2, VR64:$src1,
- (SHUFFLE_get_palign_imm VR64:$src3))>,
- Requires<[HasSSSE3]>;
-def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
- (PALIGNR64rr VR64:$src2, VR64:$src1,
- (SHUFFLE_get_palign_imm VR64:$src3))>,
- Requires<[HasSSSE3]>;
-def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
- (PALIGNR64rr VR64:$src2, VR64:$src1,
- (SHUFFLE_get_palign_imm VR64:$src3))>,
- Requires<[HasSSSE3]>;
-def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)),
- (PALIGNR64rr VR64:$src2, VR64:$src1,
- (SHUFFLE_get_palign_imm VR64:$src3))>,
- Requires<[HasSSSE3]>;
-
def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
(PALIGNR128rr VR128:$src2, VR128:$src1,
(SHUFFLE_get_palign_imm VR128:$src3))>,
@@ -3792,10 +3659,27 @@ def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
//===---------------------------------------------------------------------===//
// Thread synchronization
-def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
- [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
-def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
- [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
+let usesCustomInserter = 1 in {
+def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
+ [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
+def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
+ [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
+}
+
+let Uses = [EAX, ECX, EDX] in
+def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
+ Requires<[HasSSE3]>;
+let Uses = [ECX, EAX] in
+def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
+ Requires<[HasSSE3]>;
+
+def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
+def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
+
+def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
+ Requires<[In32BitMode]>;
+def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
+ Requires<[In64BitMode]>;
//===---------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -3811,7 +3695,7 @@ let Predicates = [HasSSE2] in
(CVTSS2SDrm addr:$src)>;
// bit_convert
-let Predicates = [HasSSE2] in {
+let Predicates = [HasXMMInt] in {
def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
@@ -3844,6 +3728,10 @@ let Predicates = [HasSSE2] in {
def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
}
+let Predicates = [HasAVX] in {
+ def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
+}
+
// Move scalar to XMM zero-extended
// movd to XMM register zero-extends
let AddedComplexity = 15 in {
@@ -4017,36 +3905,11 @@ def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
(MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
-// Some special case pandn patterns.
-def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
- VR128:$src2)),
- (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
-def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
- VR128:$src2)),
- (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
-def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
- VR128:$src2)),
- (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
-
-def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
- (memop addr:$src2))),
- (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
-def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
- (memop addr:$src2))),
- (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
-def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
- (memop addr:$src2))),
- (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
-
// vector -> vector casts
def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
(Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
- (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
-def : Pat<(v2f64 (sint_to_fp (v2i32 VR64:$src))),
- (Int_CVTPI2PDrr VR64:$src)>, Requires<[HasSSE2]>;
-def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
- (Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
+ (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
// Use movaps / movups for SSE integer load / store (one byte shorter).
let Predicates = [HasSSE1] in {
@@ -4504,7 +4367,7 @@ multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
Intrinsic V4F32Int, Intrinsic V2F64Int> {
// Intrinsic operation, reg.
// Vector intrinsic operation, reg
- def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
+ def PSr : SS4AIi8<opcps, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -4512,7 +4375,7 @@ multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
OpSize;
// Vector intrinsic operation, mem
- def PSm_Int : Ii8<opcps, MRMSrcMem,
+ def PSm : Ii8<opcps, MRMSrcMem,
(outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -4522,7 +4385,7 @@ multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
Requires<[HasSSE41]>;
// Vector intrinsic operation, reg
- def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
+ def PDr : SS4AIi8<opcpd, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -4530,7 +4393,7 @@ multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
OpSize;
// Vector intrinsic operation, mem
- def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
+ def PDm : SS4AIi8<opcpd, MRMSrcMem,
(outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -4543,28 +4406,28 @@ multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
// Intrinsic operation, reg.
// Vector intrinsic operation, reg
- def PSr : SS4AIi8<opcps, MRMSrcReg,
+ def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, OpSize;
// Vector intrinsic operation, mem
- def PSm : Ii8<opcps, MRMSrcMem,
+ def PSm_AVX : Ii8<opcps, MRMSrcMem,
(outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, TA, OpSize, Requires<[HasSSE41]>;
// Vector intrinsic operation, reg
- def PDr : SS4AIi8<opcpd, MRMSrcReg,
+ def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, OpSize;
// Vector intrinsic operation, mem
- def PDm : SS4AIi8<opcpd, MRMSrcMem,
+ def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
(outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -4576,7 +4439,7 @@ multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
Intrinsic F32Int,
Intrinsic F64Int, bit Is2Addr = 1> {
// Intrinsic operation, reg.
- def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
+ def SSr : SS4AIi8<opcss, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
@@ -4587,7 +4450,7 @@ multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
OpSize;
// Intrinsic operation, mem.
- def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
+ def SSm : SS4AIi8<opcss, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
@@ -4599,7 +4462,7 @@ multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
OpSize;
// Intrinsic operation, reg.
- def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
+ def SDr : SS4AIi8<opcsd, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
@@ -4610,7 +4473,7 @@ multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
OpSize;
// Intrinsic operation, mem.
- def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
+ def SDm : SS4AIi8<opcsd, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
@@ -4625,28 +4488,28 @@ multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
string OpcodeStr> {
// Intrinsic operation, reg.
- def SSr : SS4AIi8<opcss, MRMSrcReg,
+ def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
!strconcat(OpcodeStr,
"ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, OpSize;
// Intrinsic operation, mem.
- def SSm : SS4AIi8<opcss, MRMSrcMem,
+ def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
!strconcat(OpcodeStr,
"ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, OpSize;
// Intrinsic operation, reg.
- def SDr : SS4AIi8<opcsd, MRMSrcReg,
+ def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
!strconcat(OpcodeStr,
"sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, OpSize;
// Intrinsic operation, mem.
- def SDm : SS4AIi8<opcsd, MRMSrcMem,
+ def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
!strconcat(OpcodeStr,
"sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
@@ -4743,6 +4606,29 @@ defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
// SSE4.1 - Misc Instructions
//===----------------------------------------------------------------------===//
+def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
+ "popcnt{w}\t{$src, $dst|$dst, $src}",
+ [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
+def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
+ "popcnt{w}\t{$src, $dst|$dst, $src}",
+ [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
+
+def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
+ "popcnt{l}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
+def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
+ "popcnt{l}\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
+
+def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+ "popcnt{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
+def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ "popcnt{q}\t{$src, $dst|$dst, $src}",
+ [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
+
+
+
// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
Intrinsic IntId128> {
@@ -4981,6 +4867,9 @@ defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
+def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
+ (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
+
let isAsmParserOnly = 1, Predicates = [HasAVX] in
def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
@@ -5032,12 +4921,12 @@ def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
// Packed Compare Implicit Length Strings, Return Mask
multiclass pseudo_pcmpistrm<string asm> {
- def REG : Ii8<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3), !strconcat(asm, "rr PSEUDO"),
+ def REG : PseudoI<(outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
[(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
imm:$src3))]>;
- def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3), !strconcat(asm, "rm PSEUDO"),
+ def MEM : PseudoI<(outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
[(set VR128:$dst, (int_x86_sse42_pcmpistrm128
VR128:$src1, (load addr:$src2), imm:$src3))]>;
}
@@ -5068,12 +4957,12 @@ let Defs = [XMM0, EFLAGS] in {
// Packed Compare Explicit Length Strings, Return Mask
multiclass pseudo_pcmpestrm<string asm> {
- def REG : Ii8<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5), !strconcat(asm, "rr PSEUDO"),
+ def REG : PseudoI<(outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
[(set VR128:$dst, (int_x86_sse42_pcmpestrm128
VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
- def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5), !strconcat(asm, "rm PSEUDO"),
+ def MEM : PseudoI<(outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
[(set VR128:$dst, (int_x86_sse42_pcmpestrm128
VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
}
@@ -5555,6 +5444,23 @@ def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
(VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
+def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
+ (i32 imm)),
+ (VINSERTF128rr VR256:$src1, VR128:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
+ (i32 imm)),
+ (VINSERTF128rr VR256:$src1, VR128:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
+ (i32 imm)),
+ (VINSERTF128rr VR256:$src1, VR128:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
+ (i32 imm)),
+ (VINSERTF128rr VR256:$src1, VR128:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+
def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
(VEXTRACTF128rr VR256:$src1, imm:$src2)>;
def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
@@ -5562,6 +5468,23 @@ def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
(VEXTRACTF128rr VR256:$src1, imm:$src2)>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v4f32 (VEXTRACTF128rr
+ (v8f32 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v2f64 (VEXTRACTF128rr
+ (v4f64 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v4i32 (VEXTRACTF128rr
+ (v8i32 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v2i64 (VEXTRACTF128rr
+ (v4i64 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+
def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
(VBROADCASTF128 addr:$src)>;
@@ -5673,19 +5596,14 @@ def : Pat<(X86Movddup (memopv2f64 addr:$src)),
def : Pat<(X86Movddup (memopv2f64 addr:$src)),
(MOVDDUPrm addr:$src)>;
-def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
- (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
-def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
- (MOVDDUPrm addr:$src)>;
-
-def : Pat<(X86Movddup (memopv2i64 addr:$src)),
+def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
-def : Pat<(X86Movddup (memopv2i64 addr:$src)),
+def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
(MOVDDUPrm addr:$src)>;
-def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
+def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
-def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
+def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
(MOVDDUPrm addr:$src)>;
def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
@@ -5700,6 +5618,7 @@ def : Pat<(X86Movddup (bc_v2f64
(v2i64 (scalar_to_vector (loadi64 addr:$src))))),
(MOVDDUPrm addr:$src)>;
+
// Shuffle with UNPCKLPS
def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
(VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
@@ -5724,9 +5643,9 @@ def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
// Shuffle with UNPCKLPD
def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
- (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
+ (VUNPCKLPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
- (UNPCKLPSrm VR128:$src1, addr:$src2)>;
+ (UNPCKLPDrm VR128:$src1, addr:$src2)>;
def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
(VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
@@ -5735,9 +5654,9 @@ def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
// Shuffle with UNPCKHPD
def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
- (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
+ (VUNPCKHPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
- (UNPCKLPSrm VR128:$src1, addr:$src2)>;
+ (UNPCKHPDrm VR128:$src1, addr:$src2)>;
def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
(VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
@@ -5812,10 +5731,18 @@ def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
(MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
+// FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
+// is during lowering, where it's not possible to recognize the load fold cause
+// it has two uses through a bitcast. One use disappears at isel time and the
+// fold opportunity reappears.
+def : Pat<(v2f64 (X86Movddup VR128:$src)),
+ (UNPCKLPDrr VR128:$src, VR128:$src)>;
+
// Shuffle with MOVLHPD
def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
(scalar_to_vector (loadf64 addr:$src2)))),
(MOVHPDrm VR128:$src1, addr:$src2)>;
+
// FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
// is during lowering, where it's not possible to recognize the load fold cause
// it has two uses through a bitcast. One use disappears at isel time and the
@@ -5878,31 +5805,18 @@ def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
(MOVSLDUPrm addr:$src)>;
// Shuffle with PSHUFHW
-def : Pat<(v8i16 (X86PShufhwLd addr:$src, (i8 imm:$imm))),
- (PSHUFHWmi addr:$src, imm:$imm)>;
def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
(PSHUFHWri VR128:$src, imm:$imm)>;
def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
(PSHUFHWmi addr:$src, imm:$imm)>;
// Shuffle with PSHUFLW
-def : Pat<(v8i16 (X86PShuflwLd addr:$src, (i8 imm:$imm))),
- (PSHUFLWmi addr:$src, imm:$imm)>;
def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
(PSHUFLWri VR128:$src, imm:$imm)>;
def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
(PSHUFLWmi addr:$src, imm:$imm)>;
// Shuffle with PALIGN
-def : Pat<(v1i64 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
- (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
-def : Pat<(v2i32 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
- (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
-def : Pat<(v4i16 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
- (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
-def : Pat<(v8i8 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
- (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
-
def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
(PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
@@ -5920,6 +5834,15 @@ def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
def : Pat<(X86Movlps VR128:$src1,
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
(MOVLPSrm VR128:$src1, addr:$src2)>;
+// FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
+// is during lowering, where it's not possible to recognize the load fold cause
+// it has two uses through a bitcast. One use disappears at isel time and the
+// fold opportunity reappears.
+def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
+
+def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
// Shuffle with MOVLPD
def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
diff --git a/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td b/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
new file mode 100644
index 0000000..8278568
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -0,0 +1,746 @@
+//===- X86InstrShiftRotate.td - Shift and Rotate Instrs ----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the shift and rotate instructions.
+//
+//===----------------------------------------------------------------------===//
+
+// FIXME: Someone needs to smear multipattern goodness all over this file.
+
+let Defs = [EFLAGS] in {
+
+let Constraints = "$src1 = $dst" in {
+let Uses = [CL] in {
+def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "shl{b}\t{%cl, $dst|$dst, CL}",
+ [(set GR8:$dst, (shl GR8:$src1, CL))]>;
+def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
+ "shl{w}\t{%cl, $dst|$dst, CL}",
+ [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize;
+def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
+ "shl{l}\t{%cl, $dst|$dst, CL}",
+ [(set GR32:$dst, (shl GR32:$src1, CL))]>;
+def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
+ "shl{q}\t{%cl, $dst|$dst, %CL}",
+ [(set GR64:$dst, (shl GR64:$src1, CL))]>;
+} // Uses = [CL]
+
+def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
+ "shl{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
+
+let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
+def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
+ "shl{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
+def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
+ "shl{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>;
+def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
+ (ins GR64:$src1, i8imm:$src2),
+ "shl{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
+
+// NOTE: We don't include patterns for shifts of a register by one, because
+// 'add reg,reg' is cheaper (and we have a Pat pattern for shift-by-one).
+def SHL8r1 : I<0xD0, MRM4r, (outs GR8:$dst), (ins GR8:$src1),
+ "shl{b}\t$dst", []>;
+def SHL16r1 : I<0xD1, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
+ "shl{w}\t$dst", []>, OpSize;
+def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
+ "shl{l}\t$dst", []>;
+def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
+ "shl{q}\t$dst", []>;
+} // isConvertibleToThreeAddress = 1
+} // Constraints = "$src = $dst"
+
+
+// FIXME: Why do we need an explicit "Uses = [CL]" when the instr has a pattern
+// using CL?
+let Uses = [CL] in {
+def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
+ "shl{b}\t{%cl, $dst|$dst, CL}",
+ [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>;
+def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
+ "shl{w}\t{%cl, $dst|$dst, CL}",
+ [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
+def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
+ "shl{l}\t{%cl, $dst|$dst, CL}",
+ [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>;
+def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
+ "shl{q}\t{%cl, $dst|$dst, %CL}",
+ [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
+}
+def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, i8imm:$src),
+ "shl{b}\t{$src, $dst|$dst, $src}",
+ [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+def SHL16mi : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, i8imm:$src),
+ "shl{w}\t{$src, $dst|$dst, $src}",
+ [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
+ OpSize;
+def SHL32mi : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, i8imm:$src),
+ "shl{l}\t{$src, $dst|$dst, $src}",
+ [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
+ "shl{q}\t{$src, $dst|$dst, $src}",
+ [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+
+// Shift by 1
+def SHL8m1 : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
+ "shl{b}\t$dst",
+ [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+def SHL16m1 : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
+ "shl{w}\t$dst",
+ [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
+ OpSize;
+def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
+ "shl{l}\t$dst",
+ [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
+ "shl{q}\t$dst",
+ [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+
+let Constraints = "$src1 = $dst" in {
+let Uses = [CL] in {
+def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "shr{b}\t{%cl, $dst|$dst, CL}",
+ [(set GR8:$dst, (srl GR8:$src1, CL))]>;
+def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
+ "shr{w}\t{%cl, $dst|$dst, CL}",
+ [(set GR16:$dst, (srl GR16:$src1, CL))]>, OpSize;
+def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
+ "shr{l}\t{%cl, $dst|$dst, CL}",
+ [(set GR32:$dst, (srl GR32:$src1, CL))]>;
+def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
+ "shr{q}\t{%cl, $dst|$dst, %CL}",
+ [(set GR64:$dst, (srl GR64:$src1, CL))]>;
+}
+
+def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ "shr{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))]>;
+def SHR16ri : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
+ "shr{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
+def SHR32ri : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
+ "shr{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))]>;
+def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
+ "shr{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
+
+// Shift right by 1
+def SHR8r1 : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
+ "shr{b}\t$dst",
+ [(set GR8:$dst, (srl GR8:$src1, (i8 1)))]>;
+def SHR16r1 : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
+ "shr{w}\t$dst",
+ [(set GR16:$dst, (srl GR16:$src1, (i8 1)))]>, OpSize;
+def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
+ "shr{l}\t$dst",
+ [(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>;
+def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
+ "shr{q}\t$dst",
+ [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
+} // Constraints = "$src = $dst"
+
+
+let Uses = [CL] in {
+def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
+ "shr{b}\t{%cl, $dst|$dst, CL}",
+ [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>;
+def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
+ "shr{w}\t{%cl, $dst|$dst, CL}",
+ [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>,
+ OpSize;
+def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
+ "shr{l}\t{%cl, $dst|$dst, CL}",
+ [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>;
+def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
+ "shr{q}\t{%cl, $dst|$dst, %CL}",
+ [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
+}
+def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src),
+ "shr{b}\t{$src, $dst|$dst, $src}",
+ [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+def SHR16mi : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, i8imm:$src),
+ "shr{w}\t{$src, $dst|$dst, $src}",
+ [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
+ OpSize;
+def SHR32mi : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, i8imm:$src),
+ "shr{l}\t{$src, $dst|$dst, $src}",
+ [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
+ "shr{q}\t{$src, $dst|$dst, $src}",
+ [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+
+// Shift by 1
+def SHR8m1 : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
+ "shr{b}\t$dst",
+ [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+def SHR16m1 : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
+ "shr{w}\t$dst",
+ [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,OpSize;
+def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
+ "shr{l}\t$dst",
+ [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
+ "shr{q}\t$dst",
+ [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+
+let Constraints = "$src1 = $dst" in {
+let Uses = [CL] in {
+def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "sar{b}\t{%cl, $dst|$dst, CL}",
+ [(set GR8:$dst, (sra GR8:$src1, CL))]>;
+def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
+ "sar{w}\t{%cl, $dst|$dst, CL}",
+ [(set GR16:$dst, (sra GR16:$src1, CL))]>, OpSize;
+def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
+ "sar{l}\t{%cl, $dst|$dst, CL}",
+ [(set GR32:$dst, (sra GR32:$src1, CL))]>;
+def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
+ "sar{q}\t{%cl, $dst|$dst, %CL}",
+ [(set GR64:$dst, (sra GR64:$src1, CL))]>;
+}
+
+def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
+ "sar{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))]>;
+def SAR16ri : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
+ "sar{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))]>,
+ OpSize;
+def SAR32ri : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
+ "sar{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))]>;
+def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
+ (ins GR64:$src1, i8imm:$src2),
+ "sar{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
+
+// Shift by 1
+def SAR8r1 : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "sar{b}\t$dst",
+ [(set GR8:$dst, (sra GR8:$src1, (i8 1)))]>;
+def SAR16r1 : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
+ "sar{w}\t$dst",
+ [(set GR16:$dst, (sra GR16:$src1, (i8 1)))]>, OpSize;
+def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
+ "sar{l}\t$dst",
+ [(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>;
+def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
+ "sar{q}\t$dst",
+ [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
+} // Constraints = "$src = $dst"
+
+
+let Uses = [CL] in {
+def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
+ "sar{b}\t{%cl, $dst|$dst, CL}",
+ [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>;
+def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
+ "sar{w}\t{%cl, $dst|$dst, CL}",
+ [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
+def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
+ "sar{l}\t{%cl, $dst|$dst, CL}",
+ [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>;
+def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
+ "sar{q}\t{%cl, $dst|$dst, %CL}",
+ [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
+}
+def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, i8imm:$src),
+ "sar{b}\t{$src, $dst|$dst, $src}",
+ [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+def SAR16mi : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, i8imm:$src),
+ "sar{w}\t{$src, $dst|$dst, $src}",
+ [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
+ OpSize;
+def SAR32mi : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, i8imm:$src),
+ "sar{l}\t{$src, $dst|$dst, $src}",
+ [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
+ "sar{q}\t{$src, $dst|$dst, $src}",
+ [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+
+// Shift by 1
+def SAR8m1 : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
+ "sar{b}\t$dst",
+ [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+def SAR16m1 : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
+ "sar{w}\t$dst",
+ [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
+ OpSize;
+def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
+ "sar{l}\t$dst",
+ [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
+ "sar{q}\t$dst",
+ [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+
+//===----------------------------------------------------------------------===//
+// Rotate instructions
+//===----------------------------------------------------------------------===//
+
+let Constraints = "$src1 = $dst" in {
+def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
+ "rcl{b}\t$dst", []>;
+def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
+ "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
+let Uses = [CL] in
+def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
+ "rcl{b}\t{%cl, $dst|$dst, CL}", []>;
+
+def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
+ "rcl{w}\t$dst", []>, OpSize;
+def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
+ "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
+let Uses = [CL] in
+def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
+ "rcl{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
+
+def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
+ "rcl{l}\t$dst", []>;
+def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
+ "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>;
+let Uses = [CL] in
+def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
+ "rcl{l}\t{%cl, $dst|$dst, CL}", []>;
+
+
+def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
+ "rcl{q}\t$dst", []>;
+def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$cnt),
+ "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
+let Uses = [CL] in
+def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
+ "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
+
+
+def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
+ "rcr{b}\t$dst", []>;
+def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
+ "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
+let Uses = [CL] in
+def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
+ "rcr{b}\t{%cl, $dst|$dst, CL}", []>;
+
+def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
+ "rcr{w}\t$dst", []>, OpSize;
+def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
+ "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
+let Uses = [CL] in
+def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
+ "rcr{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
+
+def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
+ "rcr{l}\t$dst", []>;
+def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
+ "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>;
+let Uses = [CL] in
+def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
+ "rcr{l}\t{%cl, $dst|$dst, CL}", []>;
+
+def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
+ "rcr{q}\t$dst", []>;
+def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$cnt),
+ "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
+let Uses = [CL] in
+def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
+ "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
+
+} // Constraints = "$src = $dst"
+
+def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
+ "rcl{b}\t$dst", []>;
+def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, i8imm:$cnt),
+ "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
+def RCL16m1 : I<0xD1, MRM2m, (outs), (ins i16mem:$dst),
+ "rcl{w}\t$dst", []>, OpSize;
+def RCL16mi : Ii8<0xC1, MRM2m, (outs), (ins i16mem:$dst, i8imm:$cnt),
+ "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
+def RCL32m1 : I<0xD1, MRM2m, (outs), (ins i32mem:$dst),
+ "rcl{l}\t$dst", []>;
+def RCL32mi : Ii8<0xC1, MRM2m, (outs), (ins i32mem:$dst, i8imm:$cnt),
+ "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>;
+def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
+ "rcl{q}\t$dst", []>;
+def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
+ "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
+
+def RCR8m1 : I<0xD0, MRM3m, (outs), (ins i8mem:$dst),
+ "rcr{b}\t$dst", []>;
+def RCR8mi : Ii8<0xC0, MRM3m, (outs), (ins i8mem:$dst, i8imm:$cnt),
+ "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
+def RCR16m1 : I<0xD1, MRM3m, (outs), (ins i16mem:$dst),
+ "rcr{w}\t$dst", []>, OpSize;
+def RCR16mi : Ii8<0xC1, MRM3m, (outs), (ins i16mem:$dst, i8imm:$cnt),
+ "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
+def RCR32m1 : I<0xD1, MRM3m, (outs), (ins i32mem:$dst),
+ "rcr{l}\t$dst", []>;
+def RCR32mi : Ii8<0xC1, MRM3m, (outs), (ins i32mem:$dst, i8imm:$cnt),
+ "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>;
+def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
+ "rcr{q}\t$dst", []>;
+def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt),
+ "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
+
+let Uses = [CL] in {
+def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
+ "rcl{b}\t{%cl, $dst|$dst, CL}", []>;
+def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
+ "rcl{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
+def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
+ "rcl{l}\t{%cl, $dst|$dst, CL}", []>;
+def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
+ "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
+
+def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
+ "rcr{b}\t{%cl, $dst|$dst, CL}", []>;
+def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
+ "rcr{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
+def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
+ "rcr{l}\t{%cl, $dst|$dst, CL}", []>;
+def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
+ "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
+}
+
+let Constraints = "$src1 = $dst" in {
+// FIXME: provide shorter instructions when imm8 == 1
+let Uses = [CL] in {
+def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "rol{b}\t{%cl, $dst|$dst, CL}",
+ [(set GR8:$dst, (rotl GR8:$src1, CL))]>;
+def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
+ "rol{w}\t{%cl, $dst|$dst, CL}",
+ [(set GR16:$dst, (rotl GR16:$src1, CL))]>, OpSize;
+def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
+ "rol{l}\t{%cl, $dst|$dst, CL}",
+ [(set GR32:$dst, (rotl GR32:$src1, CL))]>;
+def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
+ "rol{q}\t{%cl, $dst|$dst, %CL}",
+ [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
+}
+
+def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
+ "rol{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))]>;
+def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
+ "rol{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>,
+ OpSize;
+def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
+ "rol{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))]>;
+def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
+ (ins GR64:$src1, i8imm:$src2),
+ "rol{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
+
+// Rotate by 1
+def ROL8r1 : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "rol{b}\t$dst",
+ [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))]>;
+def ROL16r1 : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
+ "rol{w}\t$dst",
+ [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))]>, OpSize;
+def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
+ "rol{l}\t$dst",
+ [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>;
+def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
+ "rol{q}\t$dst",
+ [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
+} // Constraints = "$src = $dst"
+
+let Uses = [CL] in {
+def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
+ "rol{b}\t{%cl, $dst|$dst, CL}",
+ [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>;
+def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
+ "rol{w}\t{%cl, $dst|$dst, CL}",
+ [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
+def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
+ "rol{l}\t{%cl, $dst|$dst, CL}",
+ [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>;
+def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
+ "rol{q}\t{%cl, $dst|$dst, %CL}",
+ [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
+}
+def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, i8imm:$src1),
+ "rol{b}\t{$src1, $dst|$dst, $src1}",
+ [(store (rotl (loadi8 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
+def ROL16mi : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, i8imm:$src1),
+ "rol{w}\t{$src1, $dst|$dst, $src1}",
+ [(store (rotl (loadi16 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
+ OpSize;
+def ROL32mi : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, i8imm:$src1),
+ "rol{l}\t{$src1, $dst|$dst, $src1}",
+ [(store (rotl (loadi32 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
+def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src1),
+ "rol{q}\t{$src1, $dst|$dst, $src1}",
+ [(store (rotl (loadi64 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
+
+// Rotate by 1
+def ROL8m1 : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
+ "rol{b}\t$dst",
+ [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+def ROL16m1 : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
+ "rol{w}\t$dst",
+ [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
+ OpSize;
+def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
+ "rol{l}\t$dst",
+ [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
+ "rol{q}\t$dst",
+ [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+
+let Constraints = "$src1 = $dst" in {
+let Uses = [CL] in {
+def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "ror{b}\t{%cl, $dst|$dst, CL}",
+ [(set GR8:$dst, (rotr GR8:$src1, CL))]>;
+def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
+ "ror{w}\t{%cl, $dst|$dst, CL}",
+ [(set GR16:$dst, (rotr GR16:$src1, CL))]>, OpSize;
+def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
+ "ror{l}\t{%cl, $dst|$dst, CL}",
+ [(set GR32:$dst, (rotr GR32:$src1, CL))]>;
+def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
+ "ror{q}\t{%cl, $dst|$dst, %CL}",
+ [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
+}
+
+def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
+ "ror{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))]>;
+def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
+ "ror{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>,
+ OpSize;
+def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
+ "ror{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))]>;
+def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
+ (ins GR64:$src1, i8imm:$src2),
+ "ror{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
+
+// Rotate by 1
+def ROR8r1 : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "ror{b}\t$dst",
+ [(set GR8:$dst, (rotr GR8:$src1, (i8 1)))]>;
+def ROR16r1 : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
+ "ror{w}\t$dst",
+ [(set GR16:$dst, (rotr GR16:$src1, (i8 1)))]>, OpSize;
+def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
+ "ror{l}\t$dst",
+ [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>;
+def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
+ "ror{q}\t$dst",
+ [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
+} // Constraints = "$src = $dst"
+
+let Uses = [CL] in {
+def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
+ "ror{b}\t{%cl, $dst|$dst, CL}",
+ [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>;
+def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
+ "ror{w}\t{%cl, $dst|$dst, CL}",
+ [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
+def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
+ "ror{l}\t{%cl, $dst|$dst, CL}",
+ [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>;
+def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
+ "ror{q}\t{%cl, $dst|$dst, %CL}",
+ [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
+}
+def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
+ "ror{b}\t{$src, $dst|$dst, $src}",
+ [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+def ROR16mi : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, i8imm:$src),
+ "ror{w}\t{$src, $dst|$dst, $src}",
+ [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
+ OpSize;
+def ROR32mi : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, i8imm:$src),
+ "ror{l}\t{$src, $dst|$dst, $src}",
+ [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
+ "ror{q}\t{$src, $dst|$dst, $src}",
+ [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+
+// Rotate by 1
+def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
+ "ror{b}\t$dst",
+ [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
+ "ror{w}\t$dst",
+ [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
+ OpSize;
+def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
+ "ror{l}\t$dst",
+ [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
+ "ror{q}\t$dst",
+ [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+
+
+//===----------------------------------------------------------------------===//
+// Double shift instructions (generalizations of rotate)
+//===----------------------------------------------------------------------===//
+
+let Constraints = "$src1 = $dst" in {
+
+let Uses = [CL] in {
+def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
+ (ins GR16:$src1, GR16:$src2),
+ "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>,
+ TB, OpSize;
+def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
+ (ins GR16:$src1, GR16:$src2),
+ "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>,
+ TB, OpSize;
+def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
+ (ins GR32:$src1, GR32:$src2),
+ "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>, TB;
+def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
+ (ins GR32:$src1, GR32:$src2),
+ "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>, TB;
+def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2),
+ "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
+ [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
+ TB;
+def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2),
+ "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
+ [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
+ TB;
+}
+
+let isCommutable = 1 in { // These instructions commute to each other.
+def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
+ (outs GR16:$dst),
+ (ins GR16:$src1, GR16:$src2, i8imm:$src3),
+ "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
+ (i8 imm:$src3)))]>,
+ TB, OpSize;
+def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
+ (outs GR16:$dst),
+ (ins GR16:$src1, GR16:$src2, i8imm:$src3),
+ "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
+ (i8 imm:$src3)))]>,
+ TB, OpSize;
+def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
+ (outs GR32:$dst),
+ (ins GR32:$src1, GR32:$src2, i8imm:$src3),
+ "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
+ (i8 imm:$src3)))]>,
+ TB;
+def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
+ (outs GR32:$dst),
+ (ins GR32:$src1, GR32:$src2, i8imm:$src3),
+ "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
+ (i8 imm:$src3)))]>,
+ TB;
+def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
+ (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2, i8imm:$src3),
+ "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
+ (i8 imm:$src3)))]>,
+ TB;
+def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
+ (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2, i8imm:$src3),
+ "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
+ (i8 imm:$src3)))]>,
+ TB;
+}
+} // Constraints = "$src = $dst"
+
+let Uses = [CL] in {
+def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
+ "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
+ addr:$dst)]>, TB, OpSize;
+def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
+ "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
+ addr:$dst)]>, TB, OpSize;
+
+def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
+ "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
+ addr:$dst)]>, TB;
+def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
+ "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
+ addr:$dst)]>, TB;
+
+def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
+ "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
+ [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
+ addr:$dst)]>, TB;
+def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
+ "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
+ [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
+ addr:$dst)]>, TB;
+}
+
+def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
+ (outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
+ "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(store (X86shld (loadi16 addr:$dst), GR16:$src2,
+ (i8 imm:$src3)), addr:$dst)]>,
+ TB, OpSize;
+def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
+ (outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
+ "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
+ (i8 imm:$src3)), addr:$dst)]>,
+ TB, OpSize;
+
+def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
+ (outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
+ "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(store (X86shld (loadi32 addr:$dst), GR32:$src2,
+ (i8 imm:$src3)), addr:$dst)]>,
+ TB;
+def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
+ (outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
+ "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
+ (i8 imm:$src3)), addr:$dst)]>,
+ TB;
+
+def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
+ (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
+ "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
+ (i8 imm:$src3)), addr:$dst)]>,
+ TB;
+def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
+ (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
+ "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
+ (i8 imm:$src3)), addr:$dst)]>,
+ TB;
+
+} // Defs = [EFLAGS]
+
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSystem.td b/contrib/llvm/lib/Target/X86/X86InstrSystem.td
new file mode 100644
index 0000000..1a58ba0
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrSystem.td
@@ -0,0 +1,390 @@
+//===- X86InstrSystem.td - System Instructions -------------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the X86 instructions that are generally used in
+// privileged modes. These are not typically used by the compiler, but are
+// supported for the assembler and disassembler.
+//
+//===----------------------------------------------------------------------===//
+
+let Defs = [RAX, RDX] in
+ def RDTSC : I<0x31, RawFrm, (outs), (ins), "rdtsc", [(X86rdtsc)]>, TB;
+
+let Defs = [RAX, RCX, RDX] in
+ def RDTSCP : I<0x01, MRM_F9, (outs), (ins), "rdtscp", []>, TB;
+
+// CPU flow control instructions
+
+let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in {
+ def TRAP : I<0x0B, RawFrm, (outs), (ins), "ud2", [(trap)]>, TB;
+ def UD2B : I<0xB9, RawFrm, (outs), (ins), "ud2b", []>, TB;
+}
+
+def HLT : I<0xF4, RawFrm, (outs), (ins), "hlt", []>;
+def RSM : I<0xAA, RawFrm, (outs), (ins), "rsm", []>, TB;
+
+// Interrupt and SysCall Instructions.
+let Uses = [EFLAGS] in
+ def INTO : I<0xce, RawFrm, (outs), (ins), "into", []>;
+def INT3 : I<0xcc, RawFrm, (outs), (ins), "int3",
+ [(int_x86_int (i8 3))]>;
+def INT : Ii8<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap",
+ [(int_x86_int imm:$trap)]>;
+
+def SYSCALL : I<0x05, RawFrm, (outs), (ins), "syscall", []>, TB;
+def SYSRETL : I<0x07, RawFrm, (outs), (ins), "sysretl", []>, TB;
+def SYSRETQ :RI<0x07, RawFrm, (outs), (ins), "sysretq", []>, TB,
+ Requires<[In64BitMode]>;
+
+def SYSENTER : I<0x34, RawFrm, (outs), (ins), "sysenter", []>, TB;
+
+def SYSEXIT : I<0x35, RawFrm, (outs), (ins), "sysexit", []>, TB,
+ Requires<[In32BitMode]>;
+def SYSEXIT64 :RI<0x35, RawFrm, (outs), (ins), "sysexit", []>, TB,
+ Requires<[In64BitMode]>;
+
+def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iretw", []>, OpSize;
+def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l|d}", []>;
+def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", []>,
+ Requires<[In64BitMode]>;
+
+
+//===----------------------------------------------------------------------===//
+// Input/Output Instructions.
+//
+let Defs = [AL], Uses = [DX] in
+def IN8rr : I<0xEC, RawFrm, (outs), (ins),
+ "in{b}\t{%dx, %al|%AL, %DX}", []>;
+let Defs = [AX], Uses = [DX] in
+def IN16rr : I<0xED, RawFrm, (outs), (ins),
+ "in{w}\t{%dx, %ax|%AX, %DX}", []>, OpSize;
+let Defs = [EAX], Uses = [DX] in
+def IN32rr : I<0xED, RawFrm, (outs), (ins),
+ "in{l}\t{%dx, %eax|%EAX, %DX}", []>;
+
+let Defs = [AL] in
+def IN8ri : Ii8<0xE4, RawFrm, (outs), (ins i8imm:$port),
+ "in{b}\t{$port, %al|%AL, $port}", []>;
+let Defs = [AX] in
+def IN16ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port),
+ "in{w}\t{$port, %ax|%AX, $port}", []>, OpSize;
+let Defs = [EAX] in
+def IN32ri : Ii8<0xE5, RawFrm, (outs), (ins i8imm:$port),
+ "in{l}\t{$port, %eax|%EAX, $port}", []>;
+
+let Uses = [DX, AL] in
+def OUT8rr : I<0xEE, RawFrm, (outs), (ins),
+ "out{b}\t{%al, %dx|%DX, %AL}", []>;
+let Uses = [DX, AX] in
+def OUT16rr : I<0xEF, RawFrm, (outs), (ins),
+ "out{w}\t{%ax, %dx|%DX, %AX}", []>, OpSize;
+let Uses = [DX, EAX] in
+def OUT32rr : I<0xEF, RawFrm, (outs), (ins),
+ "out{l}\t{%eax, %dx|%DX, %EAX}", []>;
+
+let Uses = [AL] in
+def OUT8ir : Ii8<0xE6, RawFrm, (outs), (ins i8imm:$port),
+ "out{b}\t{%al, $port|$port, %AL}", []>;
+let Uses = [AX] in
+def OUT16ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port),
+ "out{w}\t{%ax, $port|$port, %AX}", []>, OpSize;
+let Uses = [EAX] in
+def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i8imm:$port),
+ "out{l}\t{%eax, $port|$port, %EAX}", []>;
+
+def IN8 : I<0x6C, RawFrm, (outs), (ins), "ins{b}", []>;
+def IN16 : I<0x6D, RawFrm, (outs), (ins), "ins{w}", []>, OpSize;
+def IN32 : I<0x6D, RawFrm, (outs), (ins), "ins{l}", []>;
+
+//===----------------------------------------------------------------------===//
+// Moves to and from debug registers
+
+def MOV32rd : I<0x21, MRMDestReg, (outs GR32:$dst), (ins DEBUG_REG:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
+def MOV32dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR32:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
+//===----------------------------------------------------------------------===//
+// Moves to and from control registers
+
+def MOV32rc : I<0x20, MRMDestReg, (outs GR32:$dst), (ins CONTROL_REG:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
+def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR32:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR64:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
+//===----------------------------------------------------------------------===//
+// Segment override instruction prefixes
+
+def CS_PREFIX : I<0x2E, RawFrm, (outs), (ins), "cs", []>;
+def SS_PREFIX : I<0x36, RawFrm, (outs), (ins), "ss", []>;
+def DS_PREFIX : I<0x3E, RawFrm, (outs), (ins), "ds", []>;
+def ES_PREFIX : I<0x26, RawFrm, (outs), (ins), "es", []>;
+def FS_PREFIX : I<0x64, RawFrm, (outs), (ins), "fs", []>;
+def GS_PREFIX : I<0x65, RawFrm, (outs), (ins), "gs", []>;
+
+
+//===----------------------------------------------------------------------===//
+// Moves to and from segment registers.
+//
+
+def MOV16rs : I<0x8C, MRMDestReg, (outs GR16:$dst), (ins SEGMENT_REG:$src),
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def MOV32rs : I<0x8C, MRMDestReg, (outs GR32:$dst), (ins SEGMENT_REG:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
+def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
+
+def MOV16ms : I<0x8C, MRMDestMem, (outs i16mem:$dst), (ins SEGMENT_REG:$src),
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def MOV32ms : I<0x8C, MRMDestMem, (outs i32mem:$dst), (ins SEGMENT_REG:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
+def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
+
+def MOV16sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR16:$src),
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def MOV32sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR32:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
+def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
+
+def MOV16sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i16mem:$src),
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def MOV32sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i32mem:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
+def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
+
+//===----------------------------------------------------------------------===//
+// Segmentation support instructions.
+
+def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", []>, TB;
+
+def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
+ "lar{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+def LAR16rr : I<0x02, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
+ "lar{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+
+// i16mem operand in LAR32rm and GR32 operand in LAR32rr is not a typo.
+def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
+ "lar{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def LAR32rr : I<0x02, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
+ "lar{l}\t{$src, $dst|$dst, $src}", []>, TB;
+// i16mem operand in LAR64rm and GR32 operand in LAR32rr is not a typo.
+def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
+ "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
+def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
+ "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
+def LSL16rm : I<0x03, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
+ "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+def LSL16rr : I<0x03, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
+ "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+def LSL32rm : I<0x03, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
+ "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def LSL32rr : I<0x03, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
+ "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
+def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+ "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
+def INVLPG : I<0x01, MRM7m, (outs), (ins i8mem:$addr), "invlpg\t$addr", []>, TB;
+
+def STRr : I<0x00, MRM1r, (outs GR16:$dst), (ins),
+ "str{w}\t{$dst}", []>, TB;
+def STRm : I<0x00, MRM1m, (outs i16mem:$dst), (ins),
+ "str{w}\t{$dst}", []>, TB;
+def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src),
+ "ltr{w}\t{$src}", []>, TB;
+def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src),
+ "ltr{w}\t{$src}", []>, TB;
+
+def PUSHCS16 : I<0x0E, RawFrm, (outs), (ins),
+ "push{w}\t%cs", []>, Requires<[In32BitMode]>, OpSize;
+def PUSHCS32 : I<0x0E, RawFrm, (outs), (ins),
+ "push{l}\t%cs", []>, Requires<[In32BitMode]>;
+def PUSHSS16 : I<0x16, RawFrm, (outs), (ins),
+ "push{w}\t%ss", []>, Requires<[In32BitMode]>, OpSize;
+def PUSHSS32 : I<0x16, RawFrm, (outs), (ins),
+ "push{l}\t%ss", []>, Requires<[In32BitMode]>;
+def PUSHDS16 : I<0x1E, RawFrm, (outs), (ins),
+ "push{w}\t%ds", []>, Requires<[In32BitMode]>, OpSize;
+def PUSHDS32 : I<0x1E, RawFrm, (outs), (ins),
+ "push{l}\t%ds", []>, Requires<[In32BitMode]>;
+def PUSHES16 : I<0x06, RawFrm, (outs), (ins),
+ "push{w}\t%es", []>, Requires<[In32BitMode]>, OpSize;
+def PUSHES32 : I<0x06, RawFrm, (outs), (ins),
+ "push{l}\t%es", []>, Requires<[In32BitMode]>;
+
+def PUSHFS16 : I<0xa0, RawFrm, (outs), (ins),
+ "push{w}\t%fs", []>, OpSize, TB;
+def PUSHFS32 : I<0xa0, RawFrm, (outs), (ins),
+ "push{l}\t%fs", []>, TB, Requires<[In32BitMode]>;
+def PUSHGS16 : I<0xa8, RawFrm, (outs), (ins),
+ "push{w}\t%gs", []>, OpSize, TB;
+def PUSHGS32 : I<0xa8, RawFrm, (outs), (ins),
+ "push{l}\t%gs", []>, TB, Requires<[In32BitMode]>;
+
+def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins),
+ "push{q}\t%fs", []>, TB;
+def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins),
+ "push{q}\t%gs", []>, TB;
+
+// No "pop cs" instruction.
+def POPSS16 : I<0x17, RawFrm, (outs), (ins),
+ "pop{w}\t%ss", []>, OpSize, Requires<[In32BitMode]>;
+def POPSS32 : I<0x17, RawFrm, (outs), (ins),
+ "pop{l}\t%ss", []> , Requires<[In32BitMode]>;
+
+def POPDS16 : I<0x1F, RawFrm, (outs), (ins),
+ "pop{w}\t%ds", []>, OpSize, Requires<[In32BitMode]>;
+def POPDS32 : I<0x1F, RawFrm, (outs), (ins),
+ "pop{l}\t%ds", []> , Requires<[In32BitMode]>;
+
+def POPES16 : I<0x07, RawFrm, (outs), (ins),
+ "pop{w}\t%es", []>, OpSize, Requires<[In32BitMode]>;
+def POPES32 : I<0x07, RawFrm, (outs), (ins),
+ "pop{l}\t%es", []> , Requires<[In32BitMode]>;
+
+def POPFS16 : I<0xa1, RawFrm, (outs), (ins),
+ "pop{w}\t%fs", []>, OpSize, TB;
+def POPFS32 : I<0xa1, RawFrm, (outs), (ins),
+ "pop{l}\t%fs", []>, TB , Requires<[In32BitMode]>;
+def POPFS64 : I<0xa1, RawFrm, (outs), (ins),
+ "pop{q}\t%fs", []>, TB;
+
+def POPGS16 : I<0xa9, RawFrm, (outs), (ins),
+ "pop{w}\t%gs", []>, OpSize, TB;
+def POPGS32 : I<0xa9, RawFrm, (outs), (ins),
+ "pop{l}\t%gs", []>, TB , Requires<[In32BitMode]>;
+def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
+ "pop{q}\t%gs", []>, TB;
+
+
+def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
+ "lds{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def LDS32rm : I<0xc5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
+ "lds{l}\t{$src, $dst|$dst, $src}", []>;
+
+def LSS16rm : I<0xb2, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
+ "lss{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+def LSS32rm : I<0xb2, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
+ "lss{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
+ "lss{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
+def LES16rm : I<0xc4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
+ "les{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def LES32rm : I<0xc4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
+ "les{l}\t{$src, $dst|$dst, $src}", []>;
+
+def LFS16rm : I<0xb4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
+ "lfs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+def LFS32rm : I<0xb4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
+ "lfs{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
+ "lfs{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
+def LGS16rm : I<0xb5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
+ "lgs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+def LGS32rm : I<0xb5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
+ "lgs{l}\t{$src, $dst|$dst, $src}", []>, TB;
+
+def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
+ "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB;
+
+
+def VERRr : I<0x00, MRM4r, (outs), (ins GR16:$seg),
+ "verr\t$seg", []>, TB;
+def VERRm : I<0x00, MRM4m, (outs), (ins i16mem:$seg),
+ "verr\t$seg", []>, TB;
+def VERWr : I<0x00, MRM5r, (outs), (ins GR16:$seg),
+ "verw\t$seg", []>, TB;
+def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg),
+ "verw\t$seg", []>, TB;
+
+//===----------------------------------------------------------------------===//
+// Descriptor-table support instructions
+
+def SGDT16m : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
+ "sgdtw\t$dst", []>, TB, OpSize, Requires<[In32BitMode]>;
+def SGDTm : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
+ "sgdt\t$dst", []>, TB;
+def SIDT16m : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
+ "sidtw\t$dst", []>, TB, OpSize, Requires<[In32BitMode]>;
+def SIDTm : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
+ "sidt\t$dst", []>, TB;
+def SLDT16r : I<0x00, MRM0r, (outs GR16:$dst), (ins),
+ "sldt{w}\t$dst", []>, TB, OpSize;
+def SLDT16m : I<0x00, MRM0m, (outs i16mem:$dst), (ins),
+ "sldt{w}\t$dst", []>, TB;
+def SLDT32r : I<0x00, MRM0r, (outs GR32:$dst), (ins),
+ "sldt{l}\t$dst", []>, TB;
+
+// LLDT is not interpreted specially in 64-bit mode because there is no sign
+// extension.
+def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
+ "sldt{q}\t$dst", []>, TB;
+def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
+ "sldt{q}\t$dst", []>, TB;
+
+def LGDT16m : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
+ "lgdtw\t$src", []>, TB, OpSize, Requires<[In32BitMode]>;
+def LGDTm : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
+ "lgdt\t$src", []>, TB;
+def LIDT16m : I<0x01, MRM3m, (outs), (ins opaque48mem:$src),
+ "lidtw\t$src", []>, TB, OpSize, Requires<[In32BitMode]>;
+def LIDTm : I<0x01, MRM3m, (outs), (ins opaque48mem:$src),
+ "lidt\t$src", []>, TB;
+def LLDT16r : I<0x00, MRM2r, (outs), (ins GR16:$src),
+ "lldt{w}\t$src", []>, TB;
+def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src),
+ "lldt{w}\t$src", []>, TB;
+
+//===----------------------------------------------------------------------===//
+// Specialized register support
+def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", []>, TB;
+def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", []>, TB;
+def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", []>, TB;
+
+def SMSW16r : I<0x01, MRM4r, (outs GR16:$dst), (ins),
+ "smsw{w}\t$dst", []>, OpSize, TB;
+def SMSW32r : I<0x01, MRM4r, (outs GR32:$dst), (ins),
+ "smsw{l}\t$dst", []>, TB;
+// no m form encodable; use SMSW16m
+def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
+ "smsw{q}\t$dst", []>, TB;
+
+// For memory operands, there is only a 16-bit form
+def SMSW16m : I<0x01, MRM4m, (outs i16mem:$dst), (ins),
+ "smsw{w}\t$dst", []>, TB;
+
+def LMSW16r : I<0x01, MRM6r, (outs), (ins GR16:$src),
+ "lmsw{w}\t$src", []>, TB;
+def LMSW16m : I<0x01, MRM6m, (outs), (ins i16mem:$src),
+ "lmsw{w}\t$src", []>, TB;
+
+def CPUID : I<0xA2, RawFrm, (outs), (ins), "cpuid", []>, TB;
+
+//===----------------------------------------------------------------------===//
+// Cache instructions
+def INVD : I<0x08, RawFrm, (outs), (ins), "invd", []>, TB;
+def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", []>, TB;
+
diff --git a/contrib/llvm/lib/Target/X86/X86InstrVMX.td b/contrib/llvm/lib/Target/X86/X86InstrVMX.td
new file mode 100644
index 0000000..daf61e4
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrVMX.td
@@ -0,0 +1,54 @@
+//===- X86InstrVMX.td - VMX Instruction Set Extension ------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the instructions that make up the Intel VMX instruction
+// set.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// VMX instructions
+
+// 66 0F 38 80
+def INVEPT : I<0x80, RawFrm, (outs), (ins), "invept", []>, OpSize, T8;
+// 66 0F 38 81
+def INVVPID : I<0x81, RawFrm, (outs), (ins), "invvpid", []>, OpSize, T8;
+// 0F 01 C1
+def VMCALL : I<0x01, MRM_C1, (outs), (ins), "vmcall", []>, TB;
+def VMCLEARm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
+ "vmclear\t$vmcs", []>, OpSize, TB;
+// 0F 01 C2
+def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
+// 0F 01 C3
+def VMRESUME : I<0x01, MRM_C3, (outs), (ins), "vmresume", []>, TB;
+def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
+ "vmptrld\t$vmcs", []>, TB;
+def VMPTRSTm : I<0xC7, MRM7m, (outs i64mem:$vmcs), (ins),
+ "vmptrst\t$vmcs", []>, TB;
+def VMREAD64rm : I<0x78, MRMDestMem, (outs i64mem:$dst), (ins GR64:$src),
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB;
+def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB;
+def VMREAD32rm : I<0x78, MRMDestMem, (outs i32mem:$dst), (ins GR32:$src),
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB;
+def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB;
+def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB;
+// 0F 01 C4
+def VMXOFF : I<0x01, MRM_C4, (outs), (ins), "vmxoff", []>, TB;
+def VMXON : I<0xC7, MRM6m, (outs), (ins i64mem:$vmxon),
+ "vmxon\t{$vmxon}", []>, XS;
+
diff --git a/contrib/llvm/lib/Target/X86/X86JITInfo.cpp b/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
index 6f0a8d9..3f88fa6 100644
--- a/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
@@ -19,7 +19,7 @@
#include "llvm/Function.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/Valgrind.h"
+#include "llvm/Support/Valgrind.h"
#include <cstdlib>
#include <cstring>
using namespace llvm;
@@ -127,9 +127,17 @@ extern "C" {
"movaps %xmm6, 96(%rsp)\n"
"movaps %xmm7, 112(%rsp)\n"
// JIT callee
+#ifdef _WIN64
+ "subq $32, %rsp\n"
+ "movq %rbp, %rcx\n" // Pass prev frame and return address
+ "movq 8(%rbp), %rdx\n"
+ "call " ASMPREFIX "X86CompilationCallback2\n"
+ "addq $32, %rsp\n"
+#else
"movq %rbp, %rdi\n" // Pass prev frame and return address
"movq 8(%rbp), %rsi\n"
"call " ASMPREFIX "X86CompilationCallback2\n"
+#endif
// Restore all XMM arg registers
"movaps 112(%rsp), %xmm7\n"
"movaps 96(%rsp), %xmm6\n"
@@ -333,11 +341,11 @@ extern "C" {
extern "C" {
#if !(defined (X86_64_JIT) && defined(_MSC_VER))
// the following function is called only from this translation unit,
- // unless we are under 64bit Windows with MSC, where there is
+ // unless we are under 64bit Windows with MSC, where there is
// no support for inline assembly
static
#endif
-void ATTRIBUTE_USED
+void LLVM_ATTRIBUTE_USED
X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) {
intptr_t *RetAddrLoc = &StackPtr[1];
assert(*RetAddrLoc == RetAddr &&
@@ -462,7 +470,7 @@ TargetJITInfo::StubLayout X86JITInfo::getStubLayout() {
void *X86JITInfo::emitFunctionStub(const Function* F, void *Target,
JITCodeEmitter &JCE) {
- // Note, we cast to intptr_t here to silence a -pedantic warning that
+ // Note, we cast to intptr_t here to silence a -pedantic warning that
// complains about casting a function pointer to a normal pointer.
#if defined (X86_32_JIT) && !defined (_MSC_VER)
bool NotCC = (Target != (void*)(intptr_t)X86CompilationCallback &&
diff --git a/contrib/llvm/lib/Target/X86/X86MCAsmInfo.cpp b/contrib/llvm/lib/Target/X86/X86MCAsmInfo.cpp
index 36badb4..6686214 100644
--- a/contrib/llvm/lib/Target/X86/X86MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86MCAsmInfo.cpp
@@ -17,6 +17,7 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ELF.h"
using namespace llvm;
enum AsmWriterFlavorTy {
@@ -68,7 +69,7 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &Triple) {
DwarfUsesInlineInfoSection = true;
// Exceptions handling
- ExceptionsType = ExceptionHandling::Dwarf;
+ ExceptionsType = ExceptionHandling::DwarfTable;
}
X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
@@ -88,8 +89,8 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
SupportsDebugInformation = true;
// Exceptions handling
- ExceptionsType = ExceptionHandling::Dwarf;
-
+ ExceptionsType = ExceptionHandling::DwarfTable;
+
// OpenBSD has buggy support for .quad in 32-bit mode, just split into two
// .words.
if (T.getOS() == Triple::OpenBSD && T.getArch() == Triple::x86)
@@ -98,13 +99,15 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
const MCSection *X86ELFMCAsmInfo::
getNonexecutableStackSection(MCContext &Ctx) const {
- return Ctx.getELFSection(".note.GNU-stack", MCSectionELF::SHT_PROGBITS,
- 0, SectionKind::getMetadata(), false);
+ return Ctx.getELFSection(".note.GNU-stack", ELF::SHT_PROGBITS,
+ 0, SectionKind::getMetadata());
}
X86MCAsmInfoCOFF::X86MCAsmInfoCOFF(const Triple &Triple) {
- if (Triple.getArch() == Triple::x86_64)
+ if (Triple.getArch() == Triple::x86_64) {
GlobalPrefix = "";
+ PrivateGlobalPrefix = ".L";
+ }
AsmTransCBE = x86_asm_table;
AssemblerDialect = AsmWriterFlavor;
diff --git a/contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp b/contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp
index 9564fe0..e6dc74e 100644
--- a/contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp
@@ -11,13 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86-emitter"
+#define DEBUG_TYPE "mccodeemitter"
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86FixupKinds.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -37,27 +38,6 @@ public:
~X86MCCodeEmitter() {}
- unsigned getNumFixupKinds() const {
- return 5;
- }
-
- const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
- const static MCFixupKindInfo Infos[] = {
- { "reloc_pcrel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
- { "reloc_pcrel_1byte", 0, 1 * 8, MCFixupKindInfo::FKF_IsPCRel },
- { "reloc_pcrel_2byte", 0, 2 * 8, MCFixupKindInfo::FKF_IsPCRel },
- { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
- { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel }
- };
-
- if (Kind < FirstTargetFixupKind)
- return MCCodeEmitter::getFixupKindInfo(Kind);
-
- assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
- "Invalid kind!");
- return Infos[Kind - FirstTargetFixupKind];
- }
-
static unsigned GetX86RegNum(const MCOperand &MO) {
return X86RegisterInfo::getX86RegNum(MO.getReg());
}
@@ -170,41 +150,77 @@ static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
unsigned Size = X86II::getSizeOfImm(TSFlags);
bool isPCRel = X86II::isImmPCRel(TSFlags);
- switch (Size) {
- default: assert(0 && "Unknown immediate size");
- case 1: return isPCRel ? MCFixupKind(X86::reloc_pcrel_1byte) : FK_Data_1;
- case 2: return isPCRel ? MCFixupKind(X86::reloc_pcrel_2byte) : FK_Data_2;
- case 4: return isPCRel ? MCFixupKind(X86::reloc_pcrel_4byte) : FK_Data_4;
- case 8: assert(!isPCRel); return FK_Data_8;
- }
+ return MCFixup::getKindForSize(Size, isPCRel);
+}
+
+/// Is32BitMemOperand - Return true if the specified instruction with a memory
+/// operand should emit the 0x67 prefix byte in 64-bit mode due to a 32-bit
+/// memory operand. Op specifies the operand # of the memoperand.
+static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
+ const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
+ const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
+
+ if ((BaseReg.getReg() != 0 && X86::GR32RegClass.contains(BaseReg.getReg())) ||
+ (IndexReg.getReg() != 0 && X86::GR32RegClass.contains(IndexReg.getReg())))
+ return true;
+ return false;
}
+/// StartsWithGlobalOffsetTable - Return true for the simple cases where this
+/// expression starts with _GLOBAL_OFFSET_TABLE_. This is a needed to support
+/// PIC on ELF i386 as that symbol is magic. We check only simple case that
+/// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
+/// of a binary expression.
+static bool StartsWithGlobalOffsetTable(const MCExpr *Expr) {
+ if (Expr->getKind() == MCExpr::Binary) {
+ const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
+ Expr = BE->getLHS();
+ }
+
+ if (Expr->getKind() != MCExpr::SymbolRef)
+ return false;
+
+ const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
+ const MCSymbol &S = Ref->getSymbol();
+ return S.getName() == "_GLOBAL_OFFSET_TABLE_";
+}
void X86MCCodeEmitter::
EmitImmediate(const MCOperand &DispOp, unsigned Size, MCFixupKind FixupKind,
unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
- // If this is a simple integer displacement that doesn't require a relocation,
- // emit it now.
+ const MCExpr *Expr = NULL;
if (DispOp.isImm()) {
- // FIXME: is this right for pc-rel encoding?? Probably need to emit this as
- // a fixup if so.
- EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
- return;
+ // If this is a simple integer displacement that doesn't require a relocation,
+ // emit it now.
+ if (FixupKind != FK_PCRel_1 &&
+ FixupKind != FK_PCRel_2 &&
+ FixupKind != FK_PCRel_4) {
+ EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
+ return;
+ }
+ Expr = MCConstantExpr::Create(DispOp.getImm(), Ctx);
+ } else {
+ Expr = DispOp.getExpr();
}
// If we have an immoffset, add it to the expression.
- const MCExpr *Expr = DispOp.getExpr();
+ if (FixupKind == FK_Data_4 && StartsWithGlobalOffsetTable(Expr)) {
+ assert(ImmOffset == 0);
+
+ FixupKind = MCFixupKind(X86::reloc_global_offset_table);
+ ImmOffset = CurByte;
+ }
// If the fixup is pc-relative, we need to bias the value to be relative to
// the start of the field, not the end of the field.
- if (FixupKind == MCFixupKind(X86::reloc_pcrel_4byte) ||
+ if (FixupKind == FK_PCRel_4 ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
ImmOffset -= 4;
- if (FixupKind == MCFixupKind(X86::reloc_pcrel_2byte))
+ if (FixupKind == FK_PCRel_2)
ImmOffset -= 2;
- if (FixupKind == MCFixupKind(X86::reloc_pcrel_1byte))
+ if (FixupKind == FK_PCRel_1)
ImmOffset -= 1;
if (ImmOffset)
@@ -221,10 +237,10 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
uint64_t TSFlags, unsigned &CurByte,
raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const{
- const MCOperand &Disp = MI.getOperand(Op+3);
- const MCOperand &Base = MI.getOperand(Op);
- const MCOperand &Scale = MI.getOperand(Op+1);
- const MCOperand &IndexReg = MI.getOperand(Op+2);
+ const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
+ const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
+ const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
+ const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
unsigned BaseReg = Base.getReg();
// Handle %rip relative addressing.
@@ -238,8 +254,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// movq loads are handled with a special relocation form which allows the
// linker to eliminate some loads for GOT references which end up in the
// same linkage unit.
- if (MI.getOpcode() == X86::MOV64rm ||
- MI.getOpcode() == X86::MOV64rm_TC)
+ if (MI.getOpcode() == X86::MOV64rm)
FixupKind = X86::reloc_riprel_4byte_movq_load;
// rip-relative addressing is actually relative to the *next* instruction.
@@ -295,7 +310,8 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Otherwise, emit the most general non-SIB encoding: [REG+disp32]
EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
- EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
+ EmitImmediate(Disp, 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
+ Fixups);
return;
}
@@ -355,7 +371,8 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
if (ForceDisp8)
EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
else if (ForceDisp32 || Disp.getImm() != 0)
- EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
+ EmitImmediate(Disp, 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
+ Fixups);
}
/// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
@@ -708,14 +725,15 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if ((TSFlags & X86II::Op0Mask) == X86II::REP)
EmitByte(0xF3, CurByte, OS);
+ // Emit the address size opcode prefix as needed.
+ if ((TSFlags & X86II::AdSize) ||
+ (MemOperand != -1 && Is64BitMode && Is32BitMemOperand(MI, MemOperand)))
+ EmitByte(0x67, CurByte, OS);
+
// Emit the operand size opcode prefix as needed.
if (TSFlags & X86II::OpSize)
EmitByte(0x66, CurByte, OS);
- // Emit the address size opcode prefix as needed.
- if (TSFlags & X86II::AdSize)
- EmitByte(0x67, CurByte, OS);
-
bool Need0FPrefix = false;
switch (TSFlags & X86II::Op0Mask) {
default: assert(0 && "Invalid prefix!");
@@ -806,6 +824,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
if ((TSFlags >> 32) & X86II::VEX_4V)
HasVEX_4V = true;
+
// Determine where the memory operand starts, if present.
int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
if (MemoryOperand != -1) MemoryOperand += CurOp;
@@ -815,7 +834,12 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
else
EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
+
unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
+
+ if ((TSFlags >> 32) & X86II::Has3DNow0F0FOpcode)
+ BaseOpcode = 0x0F; // Weird 3DNow! encoding.
+
unsigned SrcRegNum = 0;
switch (TSFlags & X86II::FormMask) {
case X86II::MRMInitReg:
@@ -828,6 +852,13 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode, CurByte, OS);
break;
+ case X86II::RawFrmImm8:
+ EmitByte(BaseOpcode, CurByte, OS);
+ EmitImmediate(MI.getOperand(CurOp++),
+ X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
+ CurByte, OS, Fixups);
+ EmitImmediate(MI.getOperand(CurOp++), 1, FK_Data_1, CurByte, OS, Fixups);
+ break;
case X86II::RawFrmImm16:
EmitByte(BaseOpcode, CurByte, OS);
EmitImmediate(MI.getOperand(CurOp++),
@@ -963,12 +994,24 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
RegNum |= GetX86RegNum(MO) << 4;
EmitImmediate(MCOperand::CreateImm(RegNum), 1, FK_Data_1, CurByte, OS,
Fixups);
- } else
+ } else {
+ unsigned FixupKind;
+ // FIXME: Is there a better way to know that we need a signed relocation?
+ if (MI.getOpcode() == X86::MOV64ri32 ||
+ MI.getOpcode() == X86::MOV64mi32 ||
+ MI.getOpcode() == X86::PUSH64i32)
+ FixupKind = X86::reloc_signed_4byte;
+ else
+ FixupKind = getImmFixupKind(TSFlags);
EmitImmediate(MI.getOperand(CurOp++),
- X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
+ X86II::getSizeOfImm(TSFlags), MCFixupKind(FixupKind),
CurByte, OS, Fixups);
+ }
}
+ if ((TSFlags >> 32) & X86II::Has3DNow0F0FOpcode)
+ EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
+
#ifndef NDEBUG
// FIXME: Verify.
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
index 8c4620f..cbe6db2 100644
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "InstPrinter/X86ATTInstPrinter.h"
#include "X86MCInstLower.h"
#include "X86AsmPrinter.h"
#include "X86COFFMachineModuleInfo.h"
@@ -38,11 +39,6 @@ MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
}
-MCSymbol *X86MCInstLower::GetPICBaseSymbol() const {
- return static_cast<const X86TargetLowering*>(TM.getTargetLowering())->
- getPICBaseSymbol(&MF, Ctx);
-}
-
/// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
/// operand to an MCSymbol.
MCSymbol *X86MCInstLower::
@@ -154,7 +150,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
// Subtract the pic base.
Expr = MCBinaryExpr::CreateSub(Expr,
- MCSymbolRefExpr::Create(GetPICBaseSymbol(),
+ MCSymbolRefExpr::Create(MF.getPICBaseSymbol(),
Ctx),
Ctx);
break;
@@ -173,7 +169,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
Expr = MCSymbolRefExpr::Create(Sym, Ctx);
// Subtract the pic base.
Expr = MCBinaryExpr::CreateSub(Expr,
- MCSymbolRefExpr::Create(GetPICBaseSymbol(), Ctx),
+ MCSymbolRefExpr::Create(MF.getPICBaseSymbol(), Ctx),
Ctx);
if (MO.isJTI() && MAI.hasSetDirective()) {
// If .set directive is supported, use it to reduce the number of
@@ -326,8 +322,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
MO.getMBB()->getSymbol(), Ctx));
break;
case MachineOperand::MO_GlobalAddress:
- MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
- break;
case MachineOperand::MO_ExternalSymbol:
MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
break;
@@ -347,6 +341,7 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
}
// Handle a few special cases to eliminate operand modifiers.
+ReSimplify:
switch (OutMI.getOpcode()) {
case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
lower_lea64_32mem(&OutMI, 1);
@@ -377,11 +372,10 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break;
case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break;
case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
- case X86::MMX_V_SET0: LowerUnaryToTwoAddr(OutMI, X86::MMX_PXORrr); break;
- case X86::MMX_V_SETALLONES:
- LowerUnaryToTwoAddr(OutMI, X86::MMX_PCMPEQDrr); break;
case X86::FsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
case X86::FsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
+ case X86::VFsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::VPXORrr); break;
+ case X86::VFsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::VPXORrr); break;
case X86::V_SET0PS: LowerUnaryToTwoAddr(OutMI, X86::XORPSrr); break;
case X86::V_SET0PD: LowerUnaryToTwoAddr(OutMI, X86::XORPDrr); break;
case X86::V_SET0PI: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
@@ -417,6 +411,13 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
break;
}
+ case X86::EH_RETURN:
+ case X86::EH_RETURN64: {
+ OutMI = MCInst();
+ OutMI.setOpcode(X86::RET);
+ break;
+ }
+
// TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions.
case X86::TAILJMPr:
case X86::TAILJMPd:
@@ -436,6 +437,19 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
break;
}
+ // These are pseudo-ops for OR to help with the OR->ADD transformation. We do
+ // this with an ugly goto in case the resultant OR uses EAX and needs the
+ // short form.
+ case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify;
+ case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify;
+ case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify;
+ case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify;
+ case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify;
+ case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify;
+ case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify;
+ case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify;
+ case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify;
+
// The assembler backend wants to see branches in their small form and relax
// them to their large form. The JIT can only handle the large form because
// it does not do relaxation. For now, translate the large form to the
@@ -513,6 +527,66 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
}
}
+static void LowerTlsAddr(MCStreamer &OutStreamer,
+ X86MCInstLower &MCInstLowering,
+ const MachineInstr &MI) {
+ bool is64Bits = MI.getOpcode() == X86::TLS_addr64;
+ MCContext &context = OutStreamer.getContext();
+
+ if (is64Bits) {
+ MCInst prefix;
+ prefix.setOpcode(X86::DATA16_PREFIX);
+ OutStreamer.EmitInstruction(prefix);
+ }
+ MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3));
+ const MCSymbolRefExpr *symRef =
+ MCSymbolRefExpr::Create(sym, MCSymbolRefExpr::VK_TLSGD, context);
+
+ MCInst LEA;
+ if (is64Bits) {
+ LEA.setOpcode(X86::LEA64r);
+ LEA.addOperand(MCOperand::CreateReg(X86::RDI)); // dest
+ LEA.addOperand(MCOperand::CreateReg(X86::RIP)); // base
+ LEA.addOperand(MCOperand::CreateImm(1)); // scale
+ LEA.addOperand(MCOperand::CreateReg(0)); // index
+ LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp
+ LEA.addOperand(MCOperand::CreateReg(0)); // seg
+ } else {
+ LEA.setOpcode(X86::LEA32r);
+ LEA.addOperand(MCOperand::CreateReg(X86::EAX)); // dest
+ LEA.addOperand(MCOperand::CreateReg(0)); // base
+ LEA.addOperand(MCOperand::CreateImm(1)); // scale
+ LEA.addOperand(MCOperand::CreateReg(X86::EBX)); // index
+ LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp
+ LEA.addOperand(MCOperand::CreateReg(0)); // seg
+ }
+ OutStreamer.EmitInstruction(LEA);
+
+ if (is64Bits) {
+ MCInst prefix;
+ prefix.setOpcode(X86::DATA16_PREFIX);
+ OutStreamer.EmitInstruction(prefix);
+ prefix.setOpcode(X86::DATA16_PREFIX);
+ OutStreamer.EmitInstruction(prefix);
+ prefix.setOpcode(X86::REX64_PREFIX);
+ OutStreamer.EmitInstruction(prefix);
+ }
+
+ MCInst call;
+ if (is64Bits)
+ call.setOpcode(X86::CALL64pcrel32);
+ else
+ call.setOpcode(X86::CALLpcrel32);
+ StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr";
+ MCSymbol *tlsGetAddr = context.GetOrCreateSymbol(name);
+ const MCSymbolRefExpr *tlsRef =
+ MCSymbolRefExpr::Create(tlsGetAddr,
+ MCSymbolRefExpr::VK_PLT,
+ context);
+
+ call.addOperand(MCOperand::CreateExpr(tlsRef));
+ OutStreamer.EmitInstruction(call);
+}
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
X86MCInstLower MCInstLowering(Mang, *MF, *this);
@@ -532,13 +606,26 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
OutStreamer.EmitRawText(StringRef("\t#MEMBARRIER"));
return;
+
+ case X86::EH_RETURN:
+ case X86::EH_RETURN64: {
+ // Lower these as normal, but add some comments.
+ unsigned Reg = MI->getOperand(0).getReg();
+ OutStreamer.AddComment(StringRef("eh_return, addr: %") +
+ X86ATTInstPrinter::getRegisterName(Reg));
+ break;
+ }
case X86::TAILJMPr:
case X86::TAILJMPd:
case X86::TAILJMPd64:
// Lower these as normal, but add some comments.
OutStreamer.AddComment("TAILCALL");
break;
-
+
+ case X86::TLS_addr32:
+ case X86::TLS_addr64:
+ return LowerTlsAddr(OutStreamer, MCInstLowering, *MI);
+
case X86::MOVPC32r: {
MCInst TmpInst;
// This is a pseudo op for a two instruction sequence with a label, which
@@ -548,7 +635,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
// popl %esi
// Emit the call.
- MCSymbol *PICBase = MCInstLowering.GetPICBaseSymbol();
+ MCSymbol *PICBase = MF->getPICBaseSymbol();
TmpInst.setOpcode(X86::CALLpcrel32);
// FIXME: We would like an efficient form for this, so we don't have to do a
// lot of extra uniquing.
@@ -586,7 +673,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext);
const MCExpr *PICBase =
- MCSymbolRefExpr::Create(MCInstLowering.GetPICBaseSymbol(), OutContext);
+ MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), OutContext);
DotExpr = MCBinaryExpr::CreateSub(DotExpr, PICBase, OutContext);
DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym,OutContext),
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.h b/contrib/llvm/lib/Target/X86/X86MCInstLower.h
index 539b09b..0210072 100644
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.h
+++ b/contrib/llvm/lib/Target/X86/X86MCInstLower.h
@@ -40,8 +40,6 @@ public:
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
- MCSymbol *GetPICBaseSymbol() const;
-
MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
diff --git a/contrib/llvm/lib/Target/X86/X86MachObjectWriter.cpp b/contrib/llvm/lib/Target/X86/X86MachObjectWriter.cpp
new file mode 100644
index 0000000..8f3dd32
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86MachObjectWriter.cpp
@@ -0,0 +1,32 @@
+//===-- X86MachObjectWriter.cpp - X86 Mach-O Writer -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "llvm/MC/MCMachObjectWriter.h"
+using namespace llvm;
+
+namespace {
+class X86MachObjectWriter : public MCMachObjectTargetWriter {
+public:
+ X86MachObjectWriter(bool Is64Bit, uint32_t CPUType,
+ uint32_t CPUSubtype)
+ : MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype,
+ /*UseAggressiveSymbolFolding=*/Is64Bit) {}
+};
+}
+
+MCObjectWriter *llvm::createX86MachObjectWriter(raw_ostream &OS,
+ bool Is64Bit,
+ uint32_t CPUType,
+ uint32_t CPUSubtype) {
+ return createMachObjectWriter(new X86MachObjectWriter(Is64Bit,
+ CPUType,
+ CPUSubtype),
+ OS, /*IsLittleEndian=*/true);
+}
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
index fedd49e..2f6bd88 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -31,7 +31,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -41,7 +41,7 @@
#include "llvm/Support/CommandLine.h"
using namespace llvm;
-static cl::opt<bool>
+cl::opt<bool>
ForceStackAlign("force-align-stack",
cl::desc("Force align the stack to the minimum alignment"
" needed for the function."),
@@ -60,7 +60,7 @@ X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
Is64Bit = Subtarget->is64Bit();
IsWin64 = Subtarget->isTargetWin64();
- StackAlign = TM.getFrameInfo()->getStackAlignment();
+ StackAlign = TM.getFrameLowering()->getStackAlignment();
if (Is64Bit) {
SlotSize = 8;
@@ -159,46 +159,21 @@ unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
case X86::YMM7: case X86::YMM15: case X86::MM7:
return 7;
- case X86::ES:
- return 0;
- case X86::CS:
- return 1;
- case X86::SS:
- return 2;
- case X86::DS:
- return 3;
- case X86::FS:
- return 4;
- case X86::GS:
- return 5;
-
- case X86::CR0:
- return 0;
- case X86::CR1:
- return 1;
- case X86::CR2:
- return 2;
- case X86::CR3:
- return 3;
- case X86::CR4:
- return 4;
-
- case X86::DR0:
- return 0;
- case X86::DR1:
- return 1;
- case X86::DR2:
- return 2;
- case X86::DR3:
- return 3;
- case X86::DR4:
- return 4;
- case X86::DR5:
- return 5;
- case X86::DR6:
- return 6;
- case X86::DR7:
- return 7;
+ case X86::ES: return 0;
+ case X86::CS: return 1;
+ case X86::SS: return 2;
+ case X86::DS: return 3;
+ case X86::FS: return 4;
+ case X86::GS: return 5;
+
+ case X86::CR0: case X86::CR8 : case X86::DR0: return 0;
+ case X86::CR1: case X86::CR9 : case X86::DR1: return 1;
+ case X86::CR2: case X86::CR10: case X86::DR2: return 2;
+ case X86::CR3: case X86::CR11: case X86::DR3: return 3;
+ case X86::CR4: case X86::CR12: case X86::DR4: return 4;
+ case X86::CR5: case X86::CR13: case X86::DR5: return 5;
+ case X86::CR6: case X86::CR14: case X86::DR6: return 6;
+ case X86::CR7: case X86::CR15: case X86::DR7: return 7;
// Pseudo index registers are equivalent to a "none"
// scaled index (See Intel Manual 2A, table 2-3)
@@ -295,9 +270,14 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
}
break;
case X86::sub_32bit:
- if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) {
+ if (B == &X86::GR32RegClass) {
if (A->getSize() == 8)
return A;
+ } else if (B == &X86::GR32_NOSPRegClass) {
+ if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass)
+ return &X86::GR64_NOSPRegClass;
+ if (A->getSize() == 8)
+ return getCommonSubClass(A, &X86::GR64_NOSPRegClass);
} else if (B == &X86::GR32_ABCDRegClass) {
if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
A == &X86::GR64_NOREXRegClass ||
@@ -336,10 +316,16 @@ X86RegisterInfo::getPointerRegClass(unsigned Kind) const {
if (TM.getSubtarget<X86Subtarget>().is64Bit())
return &X86::GR64RegClass;
return &X86::GR32RegClass;
- case 1: // Normal GRPs except the stack pointer (for encoding reasons).
+ case 1: // Normal GPRs except the stack pointer (for encoding reasons).
if (TM.getSubtarget<X86Subtarget>().is64Bit())
return &X86::GR64_NOSPRegClass;
return &X86::GR32_NOSPRegClass;
+ case 2: // Available for tailcall (not callee-saved GPRs).
+ if (TM.getSubtarget<X86Subtarget>().isTargetWin64())
+ return &X86::GR64_TCW64RegClass;
+ if (TM.getSubtarget<X86Subtarget>().is64Bit())
+ return &X86::GR64_TCRegClass;
+ return &X86::GR32_TCRegClass;
}
}
@@ -408,6 +394,8 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
// Set the stack-pointer register and its aliases as reserved.
Reserved.set(X86::RSP);
Reserved.set(X86::ESP);
@@ -420,7 +408,7 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(X86::IP);
// Set the frame-pointer register and its aliases as reserved if needed.
- if (hasFP(MF)) {
+ if (TFI->hasFP(MF)) {
Reserved.set(X86::RBP);
Reserved.set(X86::EBP);
Reserved.set(X86::BP);
@@ -445,21 +433,6 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
-/// hasFP - Return true if the specified function should have a dedicated frame
-/// pointer register. This is true if the function has variable sized allocas
-/// or if frame pointer elimination is disabled.
-bool X86RegisterInfo::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- const MachineModuleInfo &MMI = MF.getMMI();
-
- return (DisableFramePointerElim(MF) ||
- needsStackRealignment(MF) ||
- MFI->hasVarSizedObjects() ||
- MFI->isFrameAddressTaken() ||
- MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
- MMI.callsUnwindInit());
-}
-
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
return (RealignStack &&
@@ -478,62 +451,25 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
if (0 && requiresRealignment && MFI->hasVarSizedObjects())
report_fatal_error(
"Stack realignment in presense of dynamic allocas is not supported");
-
+
// If we've requested that we force align the stack do so now.
if (ForceStackAlign)
return canRealignStack(MF);
-
- return requiresRealignment && canRealignStack(MF);
-}
-bool X86RegisterInfo::hasReservedCallFrame(const MachineFunction &MF) const {
- return !MF.getFrameInfo()->hasVarSizedObjects();
+ return requiresRealignment && canRealignStack(MF);
}
bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
unsigned Reg, int &FrameIdx) const {
- if (Reg == FramePtr && hasFP(MF)) {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (Reg == FramePtr && TFI->hasFP(MF)) {
FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
return true;
}
return false;
}
-int
-X86RegisterInfo::getFrameIndexOffset(const MachineFunction &MF, int FI) const {
- const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- int Offset = MFI->getObjectOffset(FI) - TFI.getOffsetOfLocalArea();
- uint64_t StackSize = MFI->getStackSize();
-
- if (needsStackRealignment(MF)) {
- if (FI < 0) {
- // Skip the saved EBP.
- Offset += SlotSize;
- } else {
- unsigned Align = MFI->getObjectAlignment(FI);
- assert((-(Offset + StackSize)) % Align == 0);
- Align = 0;
- return Offset + StackSize;
- }
- // FIXME: Support tail calls
- } else {
- if (!hasFP(MF))
- return Offset + StackSize;
-
- // Skip the saved EBP.
- Offset += SlotSize;
-
- // Skip the RETADDR move area
- const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
- if (TailCallReturnAddrDelta < 0)
- Offset -= TailCallReturnAddrDelta;
- }
-
- return Offset;
-}
-
static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
if (is64Bit) {
if (isInt<8>(Imm))
@@ -561,69 +497,70 @@ static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
void X86RegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- if (!hasReservedCallFrame(MF)) {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ bool reseveCallFrame = TFI->hasReservedCallFrame(MF);
+ int Opcode = I->getOpcode();
+ bool isDestroy = Opcode == getCallFrameDestroyOpcode();
+ DebugLoc DL = I->getDebugLoc();
+ uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
+ uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
+ I = MBB.erase(I);
+
+ if (!reseveCallFrame) {
// If the stack pointer can be changed after prologue, turn the
// adjcallstackup instruction into a 'sub ESP, <amt>' and the
// adjcallstackdown instruction into 'add ESP, <amt>'
// TODO: consider using push / pop instead of sub + store / add
- MachineInstr *Old = I;
- uint64_t Amount = Old->getOperand(0).getImm();
- if (Amount != 0) {
- // We need to keep the stack aligned properly. To do this, we round the
- // amount of space needed for the outgoing arguments up to the next
- // alignment boundary.
- Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
-
- MachineInstr *New = 0;
- if (Old->getOpcode() == getCallFrameSetupOpcode()) {
- New = BuildMI(MF, Old->getDebugLoc(),
- TII.get(getSUBriOpcode(Is64Bit, Amount)),
- StackPtr)
- .addReg(StackPtr)
- .addImm(Amount);
- } else {
- assert(Old->getOpcode() == getCallFrameDestroyOpcode());
-
- // Factor out the amount the callee already popped.
- uint64_t CalleeAmt = Old->getOperand(1).getImm();
- Amount -= CalleeAmt;
-
- if (Amount) {
- unsigned Opc = getADDriOpcode(Is64Bit, Amount);
- New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr)
- .addReg(StackPtr)
- .addImm(Amount);
- }
- }
+ if (Amount == 0)
+ return;
+
+ // We need to keep the stack aligned properly. To do this, we round the
+ // amount of space needed for the outgoing arguments up to the next
+ // alignment boundary.
+ Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
+
+ MachineInstr *New = 0;
+ if (Opcode == getCallFrameSetupOpcode()) {
+ New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)),
+ StackPtr)
+ .addReg(StackPtr)
+ .addImm(Amount);
+ } else {
+ assert(Opcode == getCallFrameDestroyOpcode());
- if (New) {
- // The EFLAGS implicit def is dead.
- New->getOperand(3).setIsDead();
+ // Factor out the amount the callee already popped.
+ Amount -= CalleeAmt;
- // Replace the pseudo instruction with a new instruction.
- MBB.insert(I, New);
+ if (Amount) {
+ unsigned Opc = getADDriOpcode(Is64Bit, Amount);
+ New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr).addImm(Amount);
}
}
- } else if (I->getOpcode() == getCallFrameDestroyOpcode()) {
- // If we are performing frame pointer elimination and if the callee pops
- // something off the stack pointer, add it back. We do this until we have
- // more advanced stack pointer tracking ability.
- if (uint64_t CalleeAmt = I->getOperand(1).getImm()) {
- unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
- MachineInstr *Old = I;
- MachineInstr *New =
- BuildMI(MF, Old->getDebugLoc(), TII.get(Opc),
- StackPtr)
- .addReg(StackPtr)
- .addImm(CalleeAmt);
+ if (New) {
// The EFLAGS implicit def is dead.
New->getOperand(3).setIsDead();
+
+ // Replace the pseudo instruction with a new instruction.
MBB.insert(I, New);
}
+
+ return;
}
- MBB.erase(I);
+ if (Opcode == getCallFrameDestroyOpcode() && CalleeAmt) {
+ // If we are performing frame pointer elimination and if the callee pops
+ // something off the stack pointer, add it back. We do this until we have
+ // more advanced stack pointer tracking ability.
+ unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
+ MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr).addImm(CalleeAmt);
+
+ // The EFLAGS implicit def is dead.
+ New->getOperand(3).setIsDead();
+ MBB.insert(I, New);
+ }
}
void
@@ -634,6 +571,7 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
unsigned i = 0;
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
while (!MI.getOperand(i).isFI()) {
++i;
@@ -650,7 +588,7 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
else if (AfterFPPop)
BasePtr = StackPtr;
else
- BasePtr = (hasFP(MF) ? FramePtr : StackPtr);
+ BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
// This must be part of a four operand memory reference. Replace the
// FrameIndex with base register with EBP. Add an offset to the offset.
@@ -660,11 +598,10 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int FIOffset;
if (AfterFPPop) {
// Tail call jmp happens after FP is popped.
- const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
const MachineFrameInfo *MFI = MF.getFrameInfo();
- FIOffset = MFI->getObjectOffset(FrameIndex) - TFI.getOffsetOfLocalArea();
+ FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
} else
- FIOffset = getFrameIndexOffset(MF, FrameIndex);
+ FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
if (MI.getOperand(i+3).isImm()) {
// Offset is a 32-bit integer.
@@ -677,710 +614,14 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
}
}
-void
-X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
-
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
-
- if (TailCallReturnAddrDelta < 0) {
- // create RETURNADDR area
- // arg
- // arg
- // RETADDR
- // { ...
- // RETADDR area
- // ...
- // }
- // [EBP]
- MFI->CreateFixedObject(-TailCallReturnAddrDelta,
- (-1U*SlotSize)+TailCallReturnAddrDelta, true);
- }
-
- if (hasFP(MF)) {
- assert((TailCallReturnAddrDelta <= 0) &&
- "The Delta should always be zero or negative");
- const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
-
- // Create a frame entry for the EBP register that must be saved.
- int FrameIdx = MFI->CreateFixedObject(SlotSize,
- -(int)SlotSize +
- TFI.getOffsetOfLocalArea() +
- TailCallReturnAddrDelta,
- true);
- assert(FrameIdx == MFI->getObjectIndexBegin() &&
- "Slot for EBP register must be last in order to be found!");
- FrameIdx = 0;
- }
-}
-
-/// emitSPUpdate - Emit a series of instructions to increment / decrement the
-/// stack pointer by a constant value.
-static
-void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr, int64_t NumBytes, bool Is64Bit,
- const TargetInstrInfo &TII) {
- bool isSub = NumBytes < 0;
- uint64_t Offset = isSub ? -NumBytes : NumBytes;
- unsigned Opc = isSub ?
- getSUBriOpcode(Is64Bit, Offset) :
- getADDriOpcode(Is64Bit, Offset);
- uint64_t Chunk = (1LL << 31) - 1;
- DebugLoc DL = MBB.findDebugLoc(MBBI);
-
- while (Offset) {
- uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
- .addReg(StackPtr)
- .addImm(ThisVal);
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
- Offset -= ThisVal;
- }
-}
-
-/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
-static
-void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr, uint64_t *NumBytes = NULL) {
- if (MBBI == MBB.begin()) return;
-
- MachineBasicBlock::iterator PI = prior(MBBI);
- unsigned Opc = PI->getOpcode();
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
- PI->getOperand(0).getReg() == StackPtr) {
- if (NumBytes)
- *NumBytes += PI->getOperand(2).getImm();
- MBB.erase(PI);
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
- PI->getOperand(0).getReg() == StackPtr) {
- if (NumBytes)
- *NumBytes -= PI->getOperand(2).getImm();
- MBB.erase(PI);
- }
-}
-
-/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
-static
-void mergeSPUpdatesDown(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr, uint64_t *NumBytes = NULL) {
- // FIXME: THIS ISN'T RUN!!!
- return;
-
- if (MBBI == MBB.end()) return;
-
- MachineBasicBlock::iterator NI = llvm::next(MBBI);
- if (NI == MBB.end()) return;
-
- unsigned Opc = NI->getOpcode();
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
- NI->getOperand(0).getReg() == StackPtr) {
- if (NumBytes)
- *NumBytes -= NI->getOperand(2).getImm();
- MBB.erase(NI);
- MBBI = NI;
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
- NI->getOperand(0).getReg() == StackPtr) {
- if (NumBytes)
- *NumBytes += NI->getOperand(2).getImm();
- MBB.erase(NI);
- MBBI = NI;
- }
-}
-
-/// mergeSPUpdates - Checks the instruction before/after the passed
-/// instruction. If it is an ADD/SUB instruction it is deleted argument and the
-/// stack adjustment is returned as a positive value for ADD and a negative for
-/// SUB.
-static int mergeSPUpdates(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr,
- bool doMergeWithPrevious) {
- if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
- (!doMergeWithPrevious && MBBI == MBB.end()))
- return 0;
-
- MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI;
- MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI);
- unsigned Opc = PI->getOpcode();
- int Offset = 0;
-
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
- PI->getOperand(0).getReg() == StackPtr){
- Offset += PI->getOperand(2).getImm();
- MBB.erase(PI);
- if (!doMergeWithPrevious) MBBI = NI;
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
- PI->getOperand(0).getReg() == StackPtr) {
- Offset -= PI->getOperand(2).getImm();
- MBB.erase(PI);
- if (!doMergeWithPrevious) MBBI = NI;
- }
-
- return Offset;
-}
-
-void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF,
- MCSymbol *Label,
- unsigned FramePtr) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineModuleInfo &MMI = MF.getMMI();
-
- // Add callee saved registers to move list.
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- if (CSI.empty()) return;
-
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
- const TargetData *TD = MF.getTarget().getTargetData();
- bool HasFP = hasFP(MF);
-
- // Calculate amount of bytes used for return address storing.
- int stackGrowth =
- (MF.getTarget().getFrameInfo()->getStackGrowthDirection() ==
- TargetFrameInfo::StackGrowsUp ?
- TD->getPointerSize() : -TD->getPointerSize());
-
- // FIXME: This is dirty hack. The code itself is pretty mess right now.
- // It should be rewritten from scratch and generalized sometimes.
-
- // Determine maximum offset (minumum due to stack growth).
- int64_t MaxOffset = 0;
- for (std::vector<CalleeSavedInfo>::const_iterator
- I = CSI.begin(), E = CSI.end(); I != E; ++I)
- MaxOffset = std::min(MaxOffset,
- MFI->getObjectOffset(I->getFrameIdx()));
-
- // Calculate offsets.
- int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
- for (std::vector<CalleeSavedInfo>::const_iterator
- I = CSI.begin(), E = CSI.end(); I != E; ++I) {
- int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
- unsigned Reg = I->getReg();
- Offset = MaxOffset - Offset + saveAreaOffset;
-
- // Don't output a new machine move if we're re-saving the frame
- // pointer. This happens when the PrologEpilogInserter has inserted an extra
- // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
- // generates one when frame pointers are used. If we generate a "machine
- // move" for this extra "PUSH", the linker will lose track of the fact that
- // the frame pointer should have the value of the first "PUSH" when it's
- // trying to unwind.
- //
- // FIXME: This looks inelegant. It's possibly correct, but it's covering up
- // another bug. I.e., one where we generate a prolog like this:
- //
- // pushl %ebp
- // movl %esp, %ebp
- // pushl %ebp
- // pushl %esi
- // ...
- //
- // The immediate re-push of EBP is unnecessary. At the least, it's an
- // optimization bug. EBP can be used as a scratch register in certain
- // cases, but probably not when we have a frame pointer.
- if (HasFP && FramePtr == Reg)
- continue;
-
- MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
- MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(Label, CSDst, CSSrc));
- }
-}
-
-/// emitPrologue - Push callee-saved registers onto the stack, which
-/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
-/// space for local variables. Also emit labels used by the exception handler to
-/// generate the exception handling frames.
-void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const Function *Fn = MF.getFunction();
- const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>();
- MachineModuleInfo &MMI = MF.getMMI();
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- bool needsFrameMoves = MMI.hasDebugInfo() ||
- !Fn->doesNotThrow() || UnwindTablesMandatory;
- uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
- uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
- bool HasFP = hasFP(MF);
- DebugLoc DL;
-
- // If we're forcing a stack realignment we can't rely on just the frame
- // info, we need to know the ABI stack alignment as well in case we
- // have a call out. Otherwise just make sure we have some alignment - we'll
- // go with the minimum SlotSize.
- if (ForceStackAlign) {
- if (MFI->hasCalls())
- MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
- else if (MaxAlign < SlotSize)
- MaxAlign = SlotSize;
- }
-
- // Add RETADDR move area to callee saved frame size.
- int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
- if (TailCallReturnAddrDelta < 0)
- X86FI->setCalleeSavedFrameSize(
- X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
-
- // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
- // function, and use up to 128 bytes of stack space, don't have a frame
- // pointer, calls, or dynamic alloca then we do not need to adjust the
- // stack pointer (we fit in the Red Zone).
- if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) &&
- !needsStackRealignment(MF) &&
- !MFI->hasVarSizedObjects() && // No dynamic alloca.
- !MFI->adjustsStack() && // No calls.
- !Subtarget->isTargetWin64()) { // Win64 has no Red Zone
- uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
- if (HasFP) MinSize += SlotSize;
- StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
- MFI->setStackSize(StackSize);
- } else if (Subtarget->isTargetWin64()) {
- // We need to always allocate 32 bytes as register spill area.
- // FIXME: We might reuse these 32 bytes for leaf functions.
- StackSize += 32;
- MFI->setStackSize(StackSize);
- }
-
- // Insert stack pointer adjustment for later moving of return addr. Only
- // applies to tail call optimized functions where the callee argument stack
- // size is bigger than the callers.
- if (TailCallReturnAddrDelta < 0) {
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL,
- TII.get(getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta)),
- StackPtr)
- .addReg(StackPtr)
- .addImm(-TailCallReturnAddrDelta);
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
- }
-
- // Mapping for machine moves:
- //
- // DST: VirtualFP AND
- // SRC: VirtualFP => DW_CFA_def_cfa_offset
- // ELSE => DW_CFA_def_cfa
- //
- // SRC: VirtualFP AND
- // DST: Register => DW_CFA_def_cfa_register
- //
- // ELSE
- // OFFSET < 0 => DW_CFA_offset_extended_sf
- // REG < 64 => DW_CFA_offset + Reg
- // ELSE => DW_CFA_offset_extended
-
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
- const TargetData *TD = MF.getTarget().getTargetData();
- uint64_t NumBytes = 0;
- int stackGrowth = -TD->getPointerSize();
-
- if (HasFP) {
- // Calculate required stack adjustment.
- uint64_t FrameSize = StackSize - SlotSize;
- if (needsStackRealignment(MF))
- FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
-
- NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
-
- // Get the offset of the stack slot for the EBP register, which is
- // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
- // Update the frame offset adjustment.
- MFI->setOffsetAdjustment(-NumBytes);
-
- // Save EBP/RBP into the appropriate stack slot.
- BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
- .addReg(FramePtr, RegState::Kill);
-
- if (needsFrameMoves) {
- // Mark the place where EBP/RBP was saved.
- MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel);
-
- // Define the current CFA rule to use the provided offset.
- if (StackSize) {
- MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
- } else {
- // FIXME: Verify & implement for FP
- MachineLocation SPDst(StackPtr);
- MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
- }
-
- // Change the rule for the FramePtr to be an "offset" rule.
- MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
- MachineLocation FPSrc(FramePtr);
- Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
- }
-
- // Update EBP with the new base value...
- BuildMI(MBB, MBBI, DL,
- TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
- .addReg(StackPtr);
-
- if (needsFrameMoves) {
- // Mark effective beginning of when frame pointer becomes valid.
- MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel);
-
- // Define the current CFA to use the EBP/RBP register.
- MachineLocation FPDst(FramePtr);
- MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
- }
-
- // Mark the FramePtr as live-in in every block except the entry.
- for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
- I != E; ++I)
- I->addLiveIn(FramePtr);
-
- // Realign stack
- if (needsStackRealignment(MF)) {
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL,
- TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri),
- StackPtr).addReg(StackPtr).addImm(-MaxAlign);
-
- // The EFLAGS implicit def is dead.
- MI->getOperand(3).setIsDead();
- }
- } else {
- NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
- }
-
- // Skip the callee-saved push instructions.
- bool PushedRegs = false;
- int StackOffset = 2 * stackGrowth;
-
- while (MBBI != MBB.end() &&
- (MBBI->getOpcode() == X86::PUSH32r ||
- MBBI->getOpcode() == X86::PUSH64r)) {
- PushedRegs = true;
- ++MBBI;
-
- if (!HasFP && needsFrameMoves) {
- // Mark callee-saved push instruction.
- MCSymbol *Label = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
-
- // Define the current CFA rule to use the provided offset.
- unsigned Ptr = StackSize ?
- MachineLocation::VirtualFP : StackPtr;
- MachineLocation SPDst(Ptr);
- MachineLocation SPSrc(Ptr, StackOffset);
- Moves.push_back(MachineMove(Label, SPDst, SPSrc));
- StackOffset += stackGrowth;
- }
- }
-
- DL = MBB.findDebugLoc(MBBI);
-
- // Adjust stack pointer: ESP -= numbytes.
-
- // Windows and cygwin/mingw require a prologue helper routine when allocating
- // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
- // uses __alloca. __alloca and the 32-bit version of __chkstk will probe
- // the stack and adjust the stack pointer in one go. The 64-bit version
- // of __chkstk is only responsible for probing the stack. The 64-bit
- // prologue is responsible for adjusting the stack pointer. Touching the
- // stack at 4K increments is necessary to ensure that the guard pages used
- // by the OS virtual memory manager are allocated in correct sequence.
- if (NumBytes >= 4096 &&
- (Subtarget->isTargetCygMing() || Subtarget->isTargetWin32())) {
- // Check, whether EAX is livein for this function.
- bool isEAXAlive = false;
- for (MachineRegisterInfo::livein_iterator
- II = MF.getRegInfo().livein_begin(),
- EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) {
- unsigned Reg = II->first;
- isEAXAlive = (Reg == X86::EAX || Reg == X86::AX ||
- Reg == X86::AH || Reg == X86::AL);
- }
-
-
- const char *StackProbeSymbol =
- Subtarget->isTargetWindows() ? "_chkstk" : "_alloca";
- if (!isEAXAlive) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
- .addImm(NumBytes);
- BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
- .addExternalSymbol(StackProbeSymbol)
- .addReg(StackPtr, RegState::Define | RegState::Implicit)
- .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
- } else {
- // Save EAX
- BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
- .addReg(X86::EAX, RegState::Kill);
-
- // Allocate NumBytes-4 bytes on stack. We'll also use 4 already
- // allocated bytes for EAX.
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
- .addImm(NumBytes - 4);
- BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
- .addExternalSymbol(StackProbeSymbol)
- .addReg(StackPtr, RegState::Define | RegState::Implicit)
- .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
-
- // Restore EAX
- MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
- X86::EAX),
- StackPtr, false, NumBytes - 4);
- MBB.insert(MBBI, MI);
- }
- } else if (NumBytes) {
- // If there is an SUB32ri of ESP immediately before this instruction, merge
- // the two. This can be the case when tail call elimination is enabled and
- // the callee has more arguments then the caller.
- NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
-
- // If there is an ADD32ri or SUB32ri of ESP immediately after this
- // instruction, merge the two instructions.
- mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
-
- if (NumBytes)
- emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII);
- }
-
- if ((NumBytes || PushedRegs) && needsFrameMoves) {
- // Mark end of stack pointer adjustment.
- MCSymbol *Label = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
-
- if (!HasFP && NumBytes) {
- // Define the current CFA rule to use the provided offset.
- if (StackSize) {
- MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP,
- -StackSize + stackGrowth);
- Moves.push_back(MachineMove(Label, SPDst, SPSrc));
- } else {
- // FIXME: Verify & implement for FP
- MachineLocation SPDst(StackPtr);
- MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(Label, SPDst, SPSrc));
- }
- }
-
- // Emit DWARF info specifying the offsets of the callee-saved registers.
- if (PushedRegs)
- emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
- }
-}
-
-void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- unsigned RetOpcode = MBBI->getOpcode();
- DebugLoc DL = MBBI->getDebugLoc();
-
- switch (RetOpcode) {
- default:
- llvm_unreachable("Can only insert epilog into returning blocks");
- case X86::RET:
- case X86::RETI:
- case X86::TCRETURNdi:
- case X86::TCRETURNri:
- case X86::TCRETURNmi:
- case X86::TCRETURNdi64:
- case X86::TCRETURNri64:
- case X86::TCRETURNmi64:
- case X86::EH_RETURN:
- case X86::EH_RETURN64:
- break; // These are ok
- }
-
- // Get the number of bytes to allocate from the FrameInfo.
- uint64_t StackSize = MFI->getStackSize();
- uint64_t MaxAlign = MFI->getMaxAlignment();
- unsigned CSSize = X86FI->getCalleeSavedFrameSize();
- uint64_t NumBytes = 0;
-
- // If we're forcing a stack realignment we can't rely on just the frame
- // info, we need to know the ABI stack alignment as well in case we
- // have a call out. Otherwise just make sure we have some alignment - we'll
- // go with the minimum.
- if (ForceStackAlign) {
- if (MFI->hasCalls())
- MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
- else
- MaxAlign = MaxAlign ? MaxAlign : 4;
- }
-
- if (hasFP(MF)) {
- // Calculate required stack adjustment.
- uint64_t FrameSize = StackSize - SlotSize;
- if (needsStackRealignment(MF))
- FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
-
- NumBytes = FrameSize - CSSize;
-
- // Pop EBP.
- BuildMI(MBB, MBBI, DL,
- TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
- } else {
- NumBytes = StackSize - CSSize;
- }
-
- // Skip the callee-saved pop instructions.
- MachineBasicBlock::iterator LastCSPop = MBBI;
- while (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PI = prior(MBBI);
- unsigned Opc = PI->getOpcode();
-
- if (Opc != X86::POP32r && Opc != X86::POP64r &&
- !PI->getDesc().isTerminator())
- break;
-
- --MBBI;
- }
-
- DL = MBBI->getDebugLoc();
-
- // If there is an ADD32ri or SUB32ri of ESP immediately before this
- // instruction, merge the two instructions.
- if (NumBytes || MFI->hasVarSizedObjects())
- mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
-
- // If dynamic alloca is used, then reset esp to point to the last callee-saved
- // slot before popping them off! Same applies for the case, when stack was
- // realigned.
- if (needsStackRealignment(MF)) {
- // We cannot use LEA here, because stack pointer was realigned. We need to
- // deallocate local frame back.
- if (CSSize) {
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
- MBBI = prior(LastCSPop);
- }
-
- BuildMI(MBB, MBBI, DL,
- TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
- StackPtr).addReg(FramePtr);
- } else if (MFI->hasVarSizedObjects()) {
- if (CSSize) {
- unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
- MachineInstr *MI =
- addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
- FramePtr, false, -CSSize);
- MBB.insert(MBBI, MI);
- } else {
- BuildMI(MBB, MBBI, DL,
- TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr)
- .addReg(FramePtr);
- }
- } else if (NumBytes) {
- // Adjust stack pointer back: ESP += numbytes.
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
- }
-
- // We're returning from function via eh_return.
- if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
- MBBI = prior(MBB.end());
- MachineOperand &DestAddr = MBBI->getOperand(0);
- assert(DestAddr.isReg() && "Offset should be in register!");
- BuildMI(MBB, MBBI, DL,
- TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
- StackPtr).addReg(DestAddr.getReg());
- } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
- RetOpcode == X86::TCRETURNmi ||
- RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
- RetOpcode == X86::TCRETURNmi64) {
- bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
- // Tail call return: adjust the stack pointer and jump to callee.
- MBBI = prior(MBB.end());
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
- assert(StackAdjust.isImm() && "Expecting immediate value.");
-
- // Adjust stack pointer.
- int StackAdj = StackAdjust.getImm();
- int MaxTCDelta = X86FI->getTCReturnAddrDelta();
- int Offset = 0;
- assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
-
- // Incoporate the retaddr area.
- Offset = StackAdj-MaxTCDelta;
- assert(Offset >= 0 && "Offset should never be negative");
-
- if (Offset) {
- // Check for possible merge with preceeding ADD instruction.
- Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
- emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII);
- }
-
- // Jump to label or value in register.
- if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
- BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
- ? X86::TAILJMPd : X86::TAILJMPd64)).
- addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
- JumpTarget.getTargetFlags());
- } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
- ? X86::TAILJMPm : X86::TAILJMPm64));
- for (unsigned i = 0; i != 5; ++i)
- MIB.addOperand(MBBI->getOperand(i));
- } else if (RetOpcode == X86::TCRETURNri64) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
- addReg(JumpTarget.getReg(), RegState::Kill);
- } else {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
- addReg(JumpTarget.getReg(), RegState::Kill);
- }
-
- MachineInstr *NewMI = prior(MBBI);
- for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
- NewMI->addOperand(MBBI->getOperand(i));
-
- // Delete the pseudo instruction TCRETURN.
- MBB.erase(MBBI);
- } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&
- (X86FI->getTCReturnAddrDelta() < 0)) {
- // Add the return addr area delta back since we are not tail calling.
- int delta = -1*X86FI->getTCReturnAddrDelta();
- MBBI = prior(MBB.end());
-
- // Check for possible merge with preceeding ADD instruction.
- delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
- emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII);
- }
-}
-
unsigned X86RegisterInfo::getRARegister() const {
return Is64Bit ? X86::RIP // Should have dwarf #16.
: X86::EIP; // Should have dwarf #8.
}
unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- return hasFP(MF) ? FramePtr : StackPtr;
-}
-
-void
-X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const {
- // Calculate amount of bytes used for return address storing
- int stackGrowth = (Is64Bit ? -8 : -4);
-
- // Initial state of the frame pointer is esp+stackGrowth.
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(0, Dst, Src));
-
- // Add return address to move list
- MachineLocation CSDst(StackPtr, stackGrowth);
- MachineLocation CSSrc(getRARegister());
- Moves.push_back(MachineMove(0, CSDst, CSSrc));
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ return TFI->hasFP(MF) ? FramePtr : StackPtr;
}
unsigned X86RegisterInfo::getEHExceptionRegister() const {
@@ -1579,13 +820,13 @@ namespace {
// Be over-conservative: scan over all vreg defs and find whether vector
// registers are used. If yes, there is a possibility that vector register
// will be spilled and thus require dynamic stack realignment.
- for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister;
- RegNum < RI.getLastVirtReg(); ++RegNum)
- if (RI.getRegClass(RegNum)->getAlignment() > StackAlignment) {
+ for (unsigned i = 0, e = RI.getNumVirtRegs(); i != e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (RI.getRegClass(Reg)->getAlignment() > StackAlignment) {
FuncInfo->setReserveFP(true);
return true;
}
-
+ }
// Nothing to do
return false;
}
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
index 527df05..064be64 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -111,14 +111,10 @@ public:
/// register scavenger to determine what registers are free.
BitVector getReservedRegs(const MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const;
-
bool canRealignStack(const MachineFunction &MF) const;
bool needsStackRealignment(const MachineFunction &MF) const;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
-
bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
int &FrameIdx) const;
@@ -129,19 +125,12 @@ public:
void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, RegScavenger *RS = NULL) const;
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
-
- void emitCalleeSavedFrameMoves(MachineFunction &MF, MCSymbol *Label,
- unsigned FramePtr) const;
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
- void getInitialFrameState(std::vector<MachineMove> &Moves) const;
+ unsigned getStackRegister() const { return StackPtr; }
+ // FIXME: Move to FrameInfok
+ unsigned getSlotSize() const { return SlotSize; }
// Exception handling queries.
unsigned getEHExceptionRegister() const;
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
index 95269b1..612fac2 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -1,10 +1,10 @@
//===- X86RegisterInfo.td - Describe the X86 Register File --*- tablegen -*-==//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the X86 Register file, defining the registers themselves,
@@ -34,8 +34,8 @@ let Namespace = "X86" in {
// because the register file generator is smart enough to figure out that
// AL aliases AX if we tell it that AX aliased AL (for example).
- // Dwarf numbering is different for 32-bit and 64-bit, and there are
- // variations by target as well. Currently the first entry is for X86-64,
+ // Dwarf numbering is different for 32-bit and 64-bit, and there are
+ // variations by target as well. Currently the first entry is for X86-64,
// second - for EH on X86-32/Darwin and third is 'generic' one (X86-32/Linux
// and debug information on X86-32/Darwin)
@@ -81,7 +81,7 @@ let Namespace = "X86" in {
def SP : RegisterWithSubRegs<"sp", [SPL]>, DwarfRegNum<[7, 5, 4]>;
}
def IP : Register<"ip">, DwarfRegNum<[16]>;
-
+
// X86-64 only
let SubRegIndices = [sub_8bit] in {
def R8W : RegisterWithSubRegs<"r8w", [R8B]>, DwarfRegNum<[8, -2, -2]>;
@@ -103,8 +103,8 @@ let Namespace = "X86" in {
def EDI : RegisterWithSubRegs<"edi", [DI]>, DwarfRegNum<[5, 7, 7]>;
def EBP : RegisterWithSubRegs<"ebp", [BP]>, DwarfRegNum<[6, 4, 5]>;
def ESP : RegisterWithSubRegs<"esp", [SP]>, DwarfRegNum<[7, 5, 4]>;
- def EIP : RegisterWithSubRegs<"eip", [IP]>, DwarfRegNum<[16, 8, 8]>;
-
+ def EIP : RegisterWithSubRegs<"eip", [IP]>, DwarfRegNum<[16, 8, 8]>;
+
// X86-64 only
def R8D : RegisterWithSubRegs<"r8d", [R8W]>, DwarfRegNum<[8, -2, -2]>;
def R9D : RegisterWithSubRegs<"r9d", [R9W]>, DwarfRegNum<[9, -2, -2]>;
@@ -208,7 +208,7 @@ let Namespace = "X86" in {
def ST4 : Register<"st(4)">, DwarfRegNum<[37, 16, 15]>;
def ST5 : Register<"st(5)">, DwarfRegNum<[38, 17, 16]>;
def ST6 : Register<"st(6)">, DwarfRegNum<[39, 18, 17]>;
- def ST7 : Register<"st(7)">, DwarfRegNum<[40, 19, 18]>;
+ def ST7 : Register<"st(7)">, DwarfRegNum<[40, 19, 18]>;
// Status flags register
def EFLAGS : Register<"flags">;
@@ -220,7 +220,7 @@ let Namespace = "X86" in {
def ES : Register<"es">;
def FS : Register<"fs">;
def GS : Register<"gs">;
-
+
// Debug registers
def DR0 : Register<"dr0">;
def DR1 : Register<"dr1">;
@@ -230,8 +230,8 @@ let Namespace = "X86" in {
def DR5 : Register<"dr5">;
def DR6 : Register<"dr6">;
def DR7 : Register<"dr7">;
-
- // Condition registers
+
+ // Control registers
def CR0 : Register<"cr0">;
def CR1 : Register<"cr1">;
def CR2 : Register<"cr2">;
@@ -241,6 +241,13 @@ let Namespace = "X86" in {
def CR6 : Register<"cr6">;
def CR7 : Register<"cr7">;
def CR8 : Register<"cr8">;
+ def CR9 : Register<"cr9">;
+ def CR10 : Register<"cr10">;
+ def CR11 : Register<"cr11">;
+ def CR12 : Register<"cr12">;
+ def CR13 : Register<"cr13">;
+ def CR14 : Register<"cr14">;
+ def CR15 : Register<"cr15">;
// Pseudo index registers
def EIZ : Register<"eiz">;
@@ -254,10 +261,10 @@ let Namespace = "X86" in {
// implicitly defined to be the register allocation order.
//
-// List call-clobbered registers before callee-save registers. RBX, RBP, (and
+// List call-clobbered registers before callee-save registers. RBX, RBP, (and
// R12, R13, R14, and R15 for X86-64) are callee-save registers.
// In 64-mode, there are 12 additional i8 registers, SIL, DIL, BPL, SPL, and
-// R8B, ... R15B.
+// R8B, ... R15B.
// Allocate R12 and R13 last, as these require an extra byte when
// encoded in x86_64 instructions.
// FIXME: Allow AH, CH, DH, BH to be used as general-purpose registers in
@@ -292,14 +299,14 @@ def GR8 : RegisterClass<"X86", [i8], 8,
GR8Class::iterator
GR8Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP / EBP to being a frame ptr?
if (!Subtarget.is64Bit())
// In 32-mode, none of the 8-bit registers aliases EBP or ESP.
return begin() + 8;
- else if (RI->hasFP(MF) || MFI->getReserveFP())
+ else if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate SPL or BPL.
return array_endof(X86_GR8_AO_64) - 1;
else
@@ -337,12 +344,12 @@ def GR16 : RegisterClass<"X86", [i16], 16,
GR16Class::iterator
GR16Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (Subtarget.is64Bit()) {
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate SP or BP.
return array_endof(X86_GR16_AO_64) - 1;
else
@@ -350,7 +357,7 @@ def GR16 : RegisterClass<"X86", [i16], 16,
return array_endof(X86_GR16_AO_64);
} else {
// Does the function dedicate EBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate SP or BP.
return begin() + 6;
else
@@ -389,12 +396,12 @@ def GR32 : RegisterClass<"X86", [i32], 32,
GR32Class::iterator
GR32Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (Subtarget.is64Bit()) {
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate ESP or EBP.
return array_endof(X86_GR32_AO_64) - 1;
else
@@ -402,7 +409,7 @@ def GR32 : RegisterClass<"X86", [i32], 32,
return array_endof(X86_GR32_AO_64);
} else {
// Does the function dedicate EBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate ESP or EBP.
return begin() + 6;
else
@@ -429,13 +436,13 @@ def GR64 : RegisterClass<"X86", [i64], 64,
GR64Class::iterator
GR64Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (!Subtarget.is64Bit())
return begin(); // None of these are allocatable in 32-bit.
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
return end()-3; // If so, don't allocate RIP, RSP or RBP
else
return end()-2; // If not, just don't allocate RIP or RSP
@@ -446,18 +453,16 @@ def GR64 : RegisterClass<"X86", [i64], 64,
// Segment registers for use by MOV instructions (and others) that have a
// segment register as one operand. Always contain a 16-bit segment
// descriptor.
-def SEGMENT_REG : RegisterClass<"X86", [i16], 16, [CS, DS, SS, ES, FS, GS]> {
-}
+def SEGMENT_REG : RegisterClass<"X86", [i16], 16, [CS, DS, SS, ES, FS, GS]>;
// Debug registers.
def DEBUG_REG : RegisterClass<"X86", [i32], 32,
- [DR0, DR1, DR2, DR3, DR4, DR5, DR6, DR7]> {
-}
+ [DR0, DR1, DR2, DR3, DR4, DR5, DR6, DR7]>;
// Control registers.
def CONTROL_REG : RegisterClass<"X86", [i64], 64,
- [CR0, CR1, CR2, CR3, CR4, CR5, CR6, CR7, CR8]> {
-}
+ [CR0, CR1, CR2, CR3, CR4, CR5, CR6, CR7, CR8,
+ CR9, CR10, CR11, CR12, CR13, CR14, CR15]>;
// GR8_ABCD_L, GR8_ABCD_H, GR16_ABCD, GR32_ABCD, GR64_ABCD - Subclasses of
// GR8, GR16, GR32, and GR64 which contain just the "a" "b", "c", and "d"
@@ -465,10 +470,8 @@ def CONTROL_REG : RegisterClass<"X86", [i64], 64,
// that support 8-bit subreg operations. On x86-64, GR16_ABCD, GR32_ABCD,
// and GR64_ABCD are classes for registers that support 8-bit h-register
// operations.
-def GR8_ABCD_L : RegisterClass<"X86", [i8], 8, [AL, CL, DL, BL]> {
-}
-def GR8_ABCD_H : RegisterClass<"X86", [i8], 8, [AH, CH, DH, BH]> {
-}
+def GR8_ABCD_L : RegisterClass<"X86", [i8], 8, [AL, CL, DL, BL]>;
+def GR8_ABCD_H : RegisterClass<"X86", [i8], 8, [AH, CH, DH, BH]>;
def GR16_ABCD : RegisterClass<"X86", [i16], 16, [AX, CX, DX, BX]> {
let SubRegClasses = [(GR8_ABCD_L sub_8bit), (GR8_ABCD_H sub_8bit_hi)];
}
@@ -493,6 +496,9 @@ def GR64_TC : RegisterClass<"X86", [i64], 64, [RAX, RCX, RDX, RSI, RDI,
(GR32_TC sub_32bit)];
}
+def GR64_TCW64 : RegisterClass<"X86", [i64], 64, [RAX, RCX, RDX,
+ R8, R9, R11]>;
+
// GR8_NOREX - GR8 registers which do not require a REX prefix.
def GR8_NOREX : RegisterClass<"X86", [i8], 8,
[AL, CL, DL, AH, CH, DH, BL, BH]> {
@@ -538,10 +544,10 @@ def GR16_NOREX : RegisterClass<"X86", [i16], 16,
GR16_NOREXClass::iterator
GR16_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP / EBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate SP or BP.
return end() - 2;
else
@@ -562,10 +568,10 @@ def GR32_NOREX : RegisterClass<"X86", [i32], 32,
GR32_NOREXClass::iterator
GR32_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP / EBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate ESP or EBP.
return end() - 2;
else
@@ -587,10 +593,10 @@ def GR64_NOREX : RegisterClass<"X86", [i64], 64,
GR64_NOREXClass::iterator
GR64_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate RIP, RSP or RBP.
return end() - 3;
else
@@ -629,12 +635,12 @@ def GR32_NOSP : RegisterClass<"X86", [i32], 32,
GR32_NOSPClass::iterator
GR32_NOSPClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (Subtarget.is64Bit()) {
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate EBP.
return array_endof(X86_GR32_NOSP_AO_64) - 1;
else
@@ -642,7 +648,7 @@ def GR32_NOSP : RegisterClass<"X86", [i32], 32,
return array_endof(X86_GR32_NOSP_AO_64);
} else {
// Does the function dedicate EBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate EBP.
return begin() + 6;
else
@@ -667,13 +673,13 @@ def GR64_NOSP : RegisterClass<"X86", [i64], 64,
GR64_NOSPClass::iterator
GR64_NOSPClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (!Subtarget.is64Bit())
return begin(); // None of these are allocatable in 32-bit.
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
return end()-1; // If so, don't allocate RBP
else
return end(); // If not, any reg in this class is ok.
@@ -695,10 +701,10 @@ def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64,
GR64_NOREX_NOSPClass::allocation_order_end(const MachineFunction &MF) const
{
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF) || MFI->getReserveFP())
+ if (TFI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate RBP.
return end() - 1;
else
@@ -784,7 +790,7 @@ def RST : RegisterClass<"X86", [f80, f64, f32], 32,
}
// Generic vector registers: VR64 and VR128.
-def VR64 : RegisterClass<"X86", [v8i8, v4i16, v2i32, v1i64], 64,
+def VR64: RegisterClass<"X86", [x86mmx], 64,
[MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7]>;
def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128,
[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
diff --git a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index 6297a27..42e8193 100644
--- a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -32,10 +32,13 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile,
- const Value *DstSV,
- uint64_t DstSVOff) const {
+ MachinePointerInfo DstPtrInfo) const {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+ // If to a segment-relative address space, use the default lowering.
+ if (DstPtrInfo.getAddrSpace() >= 256)
+ return SDValue();
+
// If not DWORD aligned or size is more than the threshold, call the library.
// The libc version is likely to be faster for these cases. It can use the
// address value and run time information about the CPU.
@@ -133,7 +136,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
Dst, InFlag);
InFlag = Chain.getValue(1);
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops));
@@ -147,7 +150,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
X86::ECX,
Left, InFlag);
InFlag = Chain.getValue(1);
- Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+ Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, DAG.getValueType(MVT::i8), InFlag };
Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops));
} else if (BytesLeft) {
@@ -161,7 +164,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
DAG.getConstant(Offset, AddrVT)),
Src,
DAG.getConstant(BytesLeft, SizeVT),
- Align, isVolatile, DstSV, DstSVOff + Offset);
+ Align, isVolatile, DstPtrInfo.getWithOffset(Offset));
}
// TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
@@ -173,10 +176,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
- const Value *DstSV,
- uint64_t DstSVOff,
- const Value *SrcSV,
- uint64_t SrcSVOff) const {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
// This requires the copy size to be a constant, preferrably
// within a subtarget-specific limit.
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
@@ -186,14 +187,29 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
if (!AlwaysInline && SizeVal > Subtarget->getMaxInlineSizeThreshold())
return SDValue();
- /// If not DWORD aligned, call the library.
- if ((Align & 3) != 0)
+ /// If not DWORD aligned, it is more efficient to call the library. However
+ /// if calling the library is not allowed (AlwaysInline), then soldier on as
+ /// the code generated here is better than the long load-store sequence we
+ /// would otherwise get.
+ if (!AlwaysInline && (Align & 3) != 0)
+ return SDValue();
+
+ // If to a segment-relative address space, use the default lowering.
+ if (DstPtrInfo.getAddrSpace() >= 256 ||
+ SrcPtrInfo.getAddrSpace() >= 256)
return SDValue();
- // DWORD aligned
- EVT AVT = MVT::i32;
- if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) // QWORD aligned
- AVT = MVT::i64;
+ MVT AVT;
+ if (Align & 1)
+ AVT = MVT::i8;
+ else if (Align & 2)
+ AVT = MVT::i16;
+ else if (Align & 4)
+ // DWORD aligned
+ AVT = MVT::i32;
+ else
+ // QWORD aligned
+ AVT = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
unsigned UBytes = AVT.getSizeInBits() / 8;
unsigned CountVal = SizeVal / UBytes;
@@ -214,7 +230,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
Src, InFlag);
InFlag = Chain.getValue(1);
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops,
array_lengthof(Ops));
@@ -234,8 +250,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
DAG.getConstant(Offset, SrcVT)),
DAG.getConstant(BytesLeft, SizeVT),
Align, isVolatile, AlwaysInline,
- DstSV, DstSVOff + Offset,
- SrcSV, SrcSVOff + Offset));
+ DstPtrInfo.getWithOffset(Offset),
+ SrcPtrInfo.getWithOffset(Offset)));
}
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
diff --git a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h
index 4f30f31..d1d66fe 100644
--- a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.h
@@ -39,8 +39,7 @@ public:
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile,
- const Value *DstSV,
- uint64_t DstSVOff) const;
+ MachinePointerInfo DstPtrInfo) const;
virtual
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
@@ -48,10 +47,8 @@ public:
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
- const Value *DstSV,
- uint64_t DstSVOff,
- const Value *SrcSV,
- uint64_t SrcSVOff) const;
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const;
};
}
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
index 0d02e5e..de76856 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -1,4 +1,4 @@
-//===-- X86Subtarget.cpp - X86 Subtarget Information ------------*- C++ -*-===//
+//===-- X86Subtarget.cpp - X86 Subtarget Information ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,7 +18,7 @@
#include "llvm/GlobalValue.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Host.h"
+#include "llvm/Support/Host.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/SmallVector.h"
@@ -256,13 +256,14 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
if ((ECX >> 9) & 1) X86SSELevel = SSSE3;
if ((ECX >> 19) & 1) X86SSELevel = SSE41;
if ((ECX >> 20) & 1) X86SSELevel = SSE42;
+ // FIXME: AVX codegen support is not ready.
+ //if ((ECX >> 28) & 1) { HasAVX = true; X86SSELevel = NoMMXSSE; }
bool IsIntel = memcmp(text.c, "GenuineIntel", 12) == 0;
bool IsAMD = !IsIntel && memcmp(text.c, "AuthenticAMD", 12) == 0;
HasCLMUL = IsIntel && ((ECX >> 1) & 0x1);
HasFMA3 = IsIntel && ((ECX >> 12) & 0x1);
- HasAVX = ((ECX >> 28) & 0x1);
HasAES = IsIntel && ((ECX >> 25) & 0x1);
if (IsIntel || IsAMD) {
@@ -289,6 +290,7 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS,
, X863DNowLevel(NoThreeDNow)
, HasCMov(false)
, HasX86_64(false)
+ , HasPOPCNT(false)
, HasSSE4A(false)
, HasAVX(false)
, HasAES(false)
@@ -315,11 +317,13 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS,
ParseSubtargetFeatures(FS, CPU);
// All X86-64 CPUs also have SSE2, however user might request no SSE via
// -mattr, so don't force SSELevel here.
+ if (HasAVX)
+ X86SSELevel = NoMMXSSE;
} else {
// Otherwise, use CPUID to auto-detect feature set.
AutoDetectSubtargetFeatures();
// Make sure SSE2 is enabled; it is available on all X86-64 CPUs.
- if (Is64Bit && X86SSELevel < SSE2)
+ if (Is64Bit && !HasAVX && X86SSELevel < SSE2)
X86SSELevel = SSE2;
}
@@ -338,9 +342,9 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS,
assert((!Is64Bit || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
- // Stack alignment is 16 bytes on Darwin (both 32 and 64 bit) and for all 64
- // bit targets.
- if (isTargetDarwin() || Is64Bit)
+ // Stack alignment is 16 bytes on Darwin and Linux (both 32 and 64 bit) and
+ // for all 64-bit targets.
+ if (isTargetDarwin() || isTargetLinux() || Is64Bit)
stackAlignment = 16;
if (StackAlignment)
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.h b/contrib/llvm/lib/Target/X86/X86Subtarget.h
index 0ee91ab..8a119b4 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.h
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.h
@@ -65,6 +65,9 @@ protected:
///
bool HasX86_64;
+ /// HasPOPCNT - True if the processor supports POPCNT.
+ bool HasPOPCNT;
+
/// HasSSE4A - True if the processor supports SSE4A instructions.
bool HasSSE4A;
@@ -100,7 +103,7 @@ protected:
/// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
///
unsigned MaxInlineSizeThreshold;
-
+
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
@@ -150,7 +153,10 @@ public:
bool hasSSE4A() const { return HasSSE4A; }
bool has3DNow() const { return X863DNowLevel >= ThreeDNow; }
bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
+ bool hasPOPCNT() const { return HasPOPCNT; }
bool hasAVX() const { return HasAVX; }
+ bool hasXMM() const { return hasSSE1() || hasAVX(); }
+ bool hasXMMInt() const { return hasSSE2() || hasAVX(); }
bool hasAES() const { return HasAES; }
bool hasCLMUL() const { return HasCLMUL; }
bool hasFMA3() const { return HasFMA3; }
@@ -160,23 +166,21 @@ public:
bool hasVectorUAMem() const { return HasVectorUAMem; }
bool isTargetDarwin() const { return TargetTriple.getOS() == Triple::Darwin; }
-
+
// ELF is a reasonably sane default and the only other X86 targets we
// support are Darwin and Windows. Just use "not those".
- bool isTargetELF() const {
+ bool isTargetELF() const {
return !isTargetDarwin() && !isTargetWindows() && !isTargetCygMing();
}
bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; }
bool isTargetWindows() const { return TargetTriple.getOS() == Triple::Win32; }
- bool isTargetMingw() const {
- return TargetTriple.getOS() == Triple::MinGW32 ||
- TargetTriple.getOS() == Triple::MinGW64; }
+ bool isTargetMingw() const { return TargetTriple.getOS() == Triple::MinGW32; }
bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; }
bool isTargetCygMing() const {
return isTargetMingw() || isTargetCygwin();
}
-
+
/// isTargetCOFF - Return true if this is any COFF/Windows target variant.
bool isTargetCOFF() const {
return isTargetMingw() || isTargetCygwin() || isTargetWindows();
@@ -186,22 +190,12 @@ public:
return Is64Bit && (isTargetMingw() || isTargetWindows());
}
- bool isTargetWin32() const {
- return !Is64Bit && (isTargetMingw() || isTargetWindows());
+ bool isTargetEnvMacho() const {
+ return isTargetDarwin() || (TargetTriple.getEnvironment() == Triple::MachO);
}
- std::string getDataLayout() const {
- const char *p;
- if (is64Bit())
- p = "e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-n8:16:32:64";
- else if (isTargetDarwin())
- p = "e-p:32:32-f64:32:64-i64:32:64-f80:128:128-n8:16:32";
- else if (isTargetMingw() || isTargetWindows())
- p = "e-p:32:32-f64:64:64-i64:64:64-f80:32:32-n8:16:32";
- else
- p = "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32";
-
- return std::string(p);
+ bool isTargetWin32() const {
+ return !Is64Bit && (isTargetMingw() || isTargetWindows());
}
bool isPICStyleSet() const { return PICStyle != PICStyles::None; }
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
index ce8636eb..889c824 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -30,10 +30,12 @@ static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
case Triple::Darwin:
return new X86MCAsmInfoDarwin(TheTriple);
case Triple::MinGW32:
- case Triple::MinGW64:
case Triple::Cygwin:
case Triple::Win32:
- return new X86MCAsmInfoCOFF(TheTriple);
+ if (TheTriple.getEnvironment() == Triple::MachO)
+ return new X86MCAsmInfoDarwin(TheTriple);
+ else
+ return new X86MCAsmInfoCOFF(TheTriple);
default:
return new X86ELFMCAsmInfo(TheTriple);
}
@@ -43,22 +45,25 @@ static MCStreamer *createMCStreamer(const Target &T, const std::string &TT,
MCContext &Ctx, TargetAsmBackend &TAB,
raw_ostream &_OS,
MCCodeEmitter *_Emitter,
- bool RelaxAll) {
+ bool RelaxAll,
+ bool NoExecStack) {
Triple TheTriple(TT);
switch (TheTriple.getOS()) {
case Triple::Darwin:
return createMachOStreamer(Ctx, TAB, _OS, _Emitter, RelaxAll);
case Triple::MinGW32:
- case Triple::MinGW64:
case Triple::Cygwin:
case Triple::Win32:
- return createWinCOFFStreamer(Ctx, TAB, *_Emitter, _OS, RelaxAll);
+ if (TheTriple.getEnvironment() == Triple::MachO)
+ return createMachOStreamer(Ctx, TAB, _OS, _Emitter, RelaxAll);
+ else
+ return createWinCOFFStreamer(Ctx, TAB, *_Emitter, _OS, RelaxAll);
default:
- return createELFStreamer(Ctx, TAB, _OS, _Emitter, RelaxAll);
+ return createELFStreamer(Ctx, TAB, _OS, _Emitter, RelaxAll, NoExecStack);
}
}
-extern "C" void LLVMInitializeX86Target() {
+extern "C" void LLVMInitializeX86Target() {
// Register the target.
RegisterTargetMachine<X86_32TargetMachine> X(TheX86_32Target);
RegisterTargetMachine<X86_64TargetMachine> Y(TheX86_64Target);
@@ -89,28 +94,38 @@ extern "C" void LLVMInitializeX86Target() {
X86_32TargetMachine::X86_32TargetMachine(const Target &T, const std::string &TT,
const std::string &FS)
- : X86TargetMachine(T, TT, FS, false) {
+ : X86TargetMachine(T, TT, FS, false),
+ DataLayout(getSubtargetImpl()->isTargetDarwin() ?
+ "e-p:32:32-f64:32:64-i64:32:64-f80:128:128-n8:16:32" :
+ (getSubtargetImpl()->isTargetCygMing() ||
+ getSubtargetImpl()->isTargetWindows()) ?
+ "e-p:32:32-f64:64:64-i64:64:64-f80:32:32-n8:16:32" :
+ "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32"),
+ InstrInfo(*this),
+ TSInfo(*this),
+ TLInfo(*this),
+ JITInfo(*this) {
}
X86_64TargetMachine::X86_64TargetMachine(const Target &T, const std::string &TT,
const std::string &FS)
- : X86TargetMachine(T, TT, FS, true) {
+ : X86TargetMachine(T, TT, FS, true),
+ DataLayout("e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-n8:16:32:64"),
+ InstrInfo(*this),
+ TSInfo(*this),
+ TLInfo(*this),
+ JITInfo(*this) {
}
/// X86TargetMachine ctor - Create an X86 target.
///
-X86TargetMachine::X86TargetMachine(const Target &T, const std::string &TT,
+X86TargetMachine::X86TargetMachine(const Target &T, const std::string &TT,
const std::string &FS, bool is64Bit)
- : LLVMTargetMachine(T, TT),
+ : LLVMTargetMachine(T, TT),
Subtarget(TT, FS, is64Bit),
- DataLayout(Subtarget.getDataLayout()),
- FrameInfo(TargetFrameInfo::StackGrowsDown,
- Subtarget.getStackAlignment(),
- (Subtarget.isTargetWin64() ? -40 :
- (Subtarget.is64Bit() ? -8 : -4))),
- InstrInfo(*this), JITInfo(*this), TLInfo(*this), TSInfo(*this),
- ELFWriterInfo(*this) {
+ FrameLowering(*this, Subtarget),
+ ELFWriterInfo(is64Bit, true) {
DefRelocModel = getRelocationModel();
// If no relocation model was picked, default as appropriate for the target.
@@ -217,12 +232,12 @@ bool X86TargetMachine::addCodeEmitter(PassManagerBase &PM,
JITCodeEmitter &JCE) {
// FIXME: Move this to TargetJITInfo!
// On Darwin, do not override 64-bit setting made in X86TargetMachine().
- if (DefRelocModel == Reloc::Default &&
+ if (DefRelocModel == Reloc::Default &&
(!Subtarget.isTargetDarwin() || !Subtarget.is64Bit())) {
setRelocationModel(Reloc::Static);
Subtarget.setPICStyle(PICStyles::None);
}
-
+
PM.add(createX86JITCodeEmitterPass(*this, JCE));
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.h b/contrib/llvm/lib/Target/X86/X86TargetMachine.h
index f9fb424..5973922 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.h
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.h
@@ -14,16 +14,17 @@
#ifndef X86TARGETMACHINE_H
#define X86TARGETMACHINE_H
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "X86.h"
#include "X86ELFWriterInfo.h"
#include "X86InstrInfo.h"
-#include "X86JITInfo.h"
-#include "X86Subtarget.h"
#include "X86ISelLowering.h"
+#include "X86FrameLowering.h"
+#include "X86JITInfo.h"
#include "X86SelectionDAGInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
@@ -31,12 +32,7 @@ class formatted_raw_ostream;
class X86TargetMachine : public LLVMTargetMachine {
X86Subtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
- TargetFrameInfo FrameInfo;
- X86InstrInfo InstrInfo;
- X86JITInfo JITInfo;
- X86TargetLowering TLInfo;
- X86SelectionDAGInfo TSInfo;
+ X86FrameLowering FrameLowering;
X86ELFWriterInfo ELFWriterInfo;
Reloc::Model DefRelocModel; // Reloc model before it's overridden.
@@ -49,20 +45,25 @@ public:
X86TargetMachine(const Target &T, const std::string &TT,
const std::string &FS, bool is64Bit);
- virtual const X86InstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
- virtual X86JITInfo *getJITInfo() { return &JITInfo; }
+ virtual const X86InstrInfo *getInstrInfo() const {
+ llvm_unreachable("getInstrInfo not implemented");
+ }
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
+ virtual X86JITInfo *getJITInfo() {
+ llvm_unreachable("getJITInfo not implemented");
+ }
virtual const X86Subtarget *getSubtargetImpl() const{ return &Subtarget; }
- virtual const X86TargetLowering *getTargetLowering() const {
- return &TLInfo;
+ virtual const X86TargetLowering *getTargetLowering() const {
+ llvm_unreachable("getTargetLowering not implemented");
}
virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
- return &TSInfo;
+ llvm_unreachable("getSelectionDAGInfo not implemented");
}
virtual const X86RegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
+ return &getInstrInfo()->getRegisterInfo();
}
- virtual const TargetData *getTargetData() const { return &DataLayout; }
virtual const X86ELFWriterInfo *getELFWriterInfo() const {
return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
}
@@ -79,17 +80,53 @@ public:
/// X86_32TargetMachine - X86 32-bit target machine.
///
class X86_32TargetMachine : public X86TargetMachine {
+ const TargetData DataLayout; // Calculates type size & alignment
+ X86InstrInfo InstrInfo;
+ X86SelectionDAGInfo TSInfo;
+ X86TargetLowering TLInfo;
+ X86JITInfo JITInfo;
public:
X86_32TargetMachine(const Target &T, const std::string &M,
const std::string &FS);
+ virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const X86TargetLowering *getTargetLowering() const {
+ return &TLInfo;
+ }
+ virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
+ return &TSInfo;
+ }
+ virtual const X86InstrInfo *getInstrInfo() const {
+ return &InstrInfo;
+ }
+ virtual X86JITInfo *getJITInfo() {
+ return &JITInfo;
+ }
};
/// X86_64TargetMachine - X86 64-bit target machine.
///
class X86_64TargetMachine : public X86TargetMachine {
+ const TargetData DataLayout; // Calculates type size & alignment
+ X86InstrInfo InstrInfo;
+ X86SelectionDAGInfo TSInfo;
+ X86TargetLowering TLInfo;
+ X86JITInfo JITInfo;
public:
X86_64TargetMachine(const Target &T, const std::string &TT,
const std::string &FS);
+ virtual const TargetData *getTargetData() const { return &DataLayout; }
+ virtual const X86TargetLowering *getTargetLowering() const {
+ return &TLInfo;
+ }
+ virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
+ return &TSInfo;
+ }
+ virtual const X86InstrInfo *getInstrInfo() const {
+ return &InstrInfo;
+ }
+ virtual X86JITInfo *getJITInfo() {
+ return &JITInfo;
+ }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp b/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
index 8f06dd3..8f06dd3 100644
--- a/contrib/llvm/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
diff --git a/contrib/llvm/lib/Target/XCore/XCoreCallingConv.td b/contrib/llvm/lib/Target/XCore/XCoreCallingConv.td
index 8107e32..b20d71f 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreCallingConv.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreCallingConv.td
@@ -24,6 +24,9 @@ def CC_XCore : CallingConv<[
// Promote i8/i16 arguments to i32.
CCIfType<[i8, i16], CCPromoteToType<i32>>,
+ // The 'nest' parameter, if any, is passed in R11.
+ CCIfNest<CCAssignToReg<[R11]>>,
+
// The first 4 integer arguments are passed in integer registers.
CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>,
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreFrameInfo.cpp
deleted file mode 100644
index f50dc96..0000000
--- a/contrib/llvm/lib/Target/XCore/XCoreFrameInfo.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-//===-- XCoreFrameInfo.cpp - Frame info for XCore Target ---------*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains XCore frame information that doesn't fit anywhere else
-// cleanly...
-//
-//===----------------------------------------------------------------------===//
-
-#include "XCore.h"
-#include "XCoreFrameInfo.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// XCoreFrameInfo:
-//===----------------------------------------------------------------------===//
-
-XCoreFrameInfo::XCoreFrameInfo(const TargetMachine &tm):
- TargetFrameInfo(TargetFrameInfo::StackGrowsDown, 4, 0)
-{
- // Do nothing
-}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameInfo.h b/contrib/llvm/lib/Target/XCore/XCoreFrameInfo.h
deleted file mode 100644
index 2c67577..0000000
--- a/contrib/llvm/lib/Target/XCore/XCoreFrameInfo.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//===-- XCoreFrameInfo.h - Frame info for XCore Target -----------*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains XCore frame information that doesn't fit anywhere else
-// cleanly...
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef XCOREFRAMEINFO_H
-#define XCOREFRAMEINFO_H
-
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
- class XCoreFrameInfo: public TargetFrameInfo {
-
- public:
- XCoreFrameInfo(const TargetMachine &tm);
-
- //! Stack slot size (4 bytes)
- static int stackSlotSize() {
- return 4;
- }
- };
-}
-
-#endif // XCOREFRAMEINFO_H
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
new file mode 100644
index 0000000..0578220
--- /dev/null
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -0,0 +1,387 @@
+//===-- XCoreFrameLowering.cpp - Frame info for XCore Target -----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains XCore frame information that doesn't fit anywhere else
+// cleanly...
+//
+//===----------------------------------------------------------------------===//
+
+#include "XCore.h"
+#include "XCoreFrameLowering.h"
+#include "XCoreInstrInfo.h"
+#include "XCoreMachineFunctionInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+// helper functions. FIXME: Eliminate.
+static inline bool isImmUs(unsigned val) {
+ return val <= 11;
+}
+
+static inline bool isImmU6(unsigned val) {
+ return val < (1 << 6);
+}
+
+static inline bool isImmU16(unsigned val) {
+ return val < (1 << 16);
+}
+
+static void loadFromStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, int Offset, DebugLoc dl,
+ const TargetInstrInfo &TII) {
+ assert(Offset%4 == 0 && "Misaligned stack offset");
+ Offset/=4;
+ bool isU6 = isImmU6(Offset);
+ if (!isU6 && !isImmU16(Offset))
+ report_fatal_error("loadFromStack offset too big " + Twine(Offset));
+ int Opcode = isU6 ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6;
+ BuildMI(MBB, I, dl, TII.get(Opcode), DstReg)
+ .addImm(Offset);
+}
+
+
+static void storeToStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned SrcReg, int Offset, DebugLoc dl,
+ const TargetInstrInfo &TII) {
+ assert(Offset%4 == 0 && "Misaligned stack offset");
+ Offset/=4;
+ bool isU6 = isImmU6(Offset);
+ if (!isU6 && !isImmU16(Offset))
+ report_fatal_error("storeToStack offset too big " + Twine(Offset));
+ int Opcode = isU6 ? XCore::STWSP_ru6 : XCore::STWSP_lru6;
+ BuildMI(MBB, I, dl, TII.get(Opcode))
+ .addReg(SrcReg)
+ .addImm(Offset);
+}
+
+
+//===----------------------------------------------------------------------===//
+// XCoreFrameLowering:
+//===----------------------------------------------------------------------===//
+
+XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0),
+ STI(sti) {
+ // Do nothing
+}
+
+bool XCoreFrameLowering::hasFP(const MachineFunction &MF) const {
+ return DisableFramePointerElim(MF) || MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineModuleInfo *MMI = &MF.getMMI();
+ const XCoreRegisterInfo *RegInfo =
+ static_cast<const XCoreRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ const XCoreInstrInfo &TII =
+ *static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
+ XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ bool FP = hasFP(MF);
+ bool Nested = MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::Nest);
+
+ if (Nested) {
+ loadFromStack(MBB, MBBI, XCore::R11, 0, dl, TII);
+ }
+
+ // Work out frame sizes.
+ int FrameSize = MFI->getStackSize();
+ assert(FrameSize%4 == 0 && "Misaligned frame size");
+ FrameSize/=4;
+
+ bool isU6 = isImmU6(FrameSize);
+
+ if (!isU6 && !isImmU16(FrameSize)) {
+ // FIXME could emit multiple instructions.
+ report_fatal_error("emitPrologue Frame size too big: " + Twine(FrameSize));
+ }
+ bool emitFrameMoves = RegInfo->needsFrameMoves(MF);
+
+ // Do we need to allocate space on the stack?
+ if (FrameSize) {
+ bool saveLR = XFI->getUsesLR();
+ bool LRSavedOnEntry = false;
+ int Opcode;
+ if (saveLR && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0)) {
+ Opcode = (isU6) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6;
+ MBB.addLiveIn(XCore::LR);
+ saveLR = false;
+ LRSavedOnEntry = true;
+ } else {
+ Opcode = (isU6) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6;
+ }
+ BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
+
+ if (emitFrameMoves) {
+ std::vector<MachineMove> &Moves = MMI->getFrameMoves();
+
+ // Show update of SP.
+ MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(FrameLabel);
+
+ MachineLocation SPDst(MachineLocation::VirtualFP);
+ MachineLocation SPSrc(MachineLocation::VirtualFP, -FrameSize * 4);
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+
+ if (LRSavedOnEntry) {
+ MachineLocation CSDst(MachineLocation::VirtualFP, 0);
+ MachineLocation CSSrc(XCore::LR);
+ Moves.push_back(MachineMove(FrameLabel, CSDst, CSSrc));
+ }
+ }
+ if (saveLR) {
+ int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
+ storeToStack(MBB, MBBI, XCore::LR, LRSpillOffset + FrameSize*4, dl, TII);
+ MBB.addLiveIn(XCore::LR);
+
+ if (emitFrameMoves) {
+ MCSymbol *SaveLRLabel = MMI->getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveLRLabel);
+ MachineLocation CSDst(MachineLocation::VirtualFP, LRSpillOffset);
+ MachineLocation CSSrc(XCore::LR);
+ MMI->getFrameMoves().push_back(MachineMove(SaveLRLabel, CSDst, CSSrc));
+ }
+ }
+ }
+
+ if (FP) {
+ // Save R10 to the stack.
+ int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot());
+ storeToStack(MBB, MBBI, XCore::R10, FPSpillOffset + FrameSize*4, dl, TII);
+ // R10 is live-in. It is killed at the spill.
+ MBB.addLiveIn(XCore::R10);
+ if (emitFrameMoves) {
+ MCSymbol *SaveR10Label = MMI->getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveR10Label);
+ MachineLocation CSDst(MachineLocation::VirtualFP, FPSpillOffset);
+ MachineLocation CSSrc(XCore::R10);
+ MMI->getFrameMoves().push_back(MachineMove(SaveR10Label, CSDst, CSSrc));
+ }
+ // Set the FP from the SP.
+ unsigned FramePtr = XCore::R10;
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr)
+ .addImm(0);
+ if (emitFrameMoves) {
+ // Show FP is now valid.
+ MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(FrameLabel);
+ MachineLocation SPDst(FramePtr);
+ MachineLocation SPSrc(MachineLocation::VirtualFP);
+ MMI->getFrameMoves().push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ }
+ }
+
+ if (emitFrameMoves) {
+ // Frame moves for callee saved.
+ std::vector<MachineMove> &Moves = MMI->getFrameMoves();
+ std::vector<std::pair<MCSymbol*, CalleeSavedInfo> >&SpillLabels =
+ XFI->getSpillLabels();
+ for (unsigned I = 0, E = SpillLabels.size(); I != E; ++I) {
+ MCSymbol *SpillLabel = SpillLabels[I].first;
+ CalleeSavedInfo &CSI = SpillLabels[I].second;
+ int Offset = MFI->getObjectOffset(CSI.getFrameIdx());
+ unsigned Reg = CSI.getReg();
+ MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
+ MachineLocation CSSrc(Reg);
+ Moves.push_back(MachineMove(SpillLabel, CSDst, CSSrc));
+ }
+ }
+}
+
+void XCoreFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ const XCoreInstrInfo &TII =
+ *static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
+ DebugLoc dl = MBBI->getDebugLoc();
+
+ bool FP = hasFP(MF);
+ if (FP) {
+ // Restore the stack pointer.
+ unsigned FramePtr = XCore::R10;
+ BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r))
+ .addReg(FramePtr);
+ }
+
+ // Work out frame sizes.
+ int FrameSize = MFI->getStackSize();
+
+ assert(FrameSize%4 == 0 && "Misaligned frame size");
+
+ FrameSize/=4;
+
+ bool isU6 = isImmU6(FrameSize);
+
+ if (!isU6 && !isImmU16(FrameSize)) {
+ // FIXME could emit multiple instructions.
+ report_fatal_error("emitEpilogue Frame size too big: " + Twine(FrameSize));
+ }
+
+ if (FrameSize) {
+ XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
+
+ if (FP) {
+ // Restore R10
+ int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot());
+ FPSpillOffset += FrameSize*4;
+ loadFromStack(MBB, MBBI, XCore::R10, FPSpillOffset, dl, TII);
+ }
+ bool restoreLR = XFI->getUsesLR();
+ if (restoreLR && MFI->getObjectOffset(XFI->getLRSpillSlot()) != 0) {
+ int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
+ LRSpillOffset += FrameSize*4;
+ loadFromStack(MBB, MBBI, XCore::LR, LRSpillOffset, dl, TII);
+ restoreLR = false;
+ }
+ if (restoreLR) {
+ // Fold prologue into return instruction
+ assert(MBBI->getOpcode() == XCore::RETSP_u6
+ || MBBI->getOpcode() == XCore::RETSP_lu6);
+ int Opcode = (isU6) ? XCore::RETSP_u6 : XCore::RETSP_lu6;
+ BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
+ MBB.erase(MBBI);
+ } else {
+ int Opcode = (isU6) ? XCore::LDAWSP_ru6_RRegs : XCore::LDAWSP_lru6_RRegs;
+ BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(FrameSize);
+ }
+ }
+}
+
+void XCoreFrameLowering::getInitialFrameState(std::vector<MachineMove> &Moves)
+ const {
+ // Initial state of the frame pointer is SP.
+ MachineLocation Dst(MachineLocation::VirtualFP);
+ MachineLocation Src(XCore::SP, 0);
+ Moves.push_back(MachineMove(0, Dst, Src));
+}
+
+bool XCoreFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return true;
+
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
+
+ XCoreFunctionInfo *XFI = MF->getInfo<XCoreFunctionInfo>();
+ bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(*MF);
+
+ DebugLoc DL;
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
+ for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin();
+ it != CSI.end(); ++it) {
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(it->getReg());
+
+ unsigned Reg = it->getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true,
+ it->getFrameIdx(), RC, TRI);
+ if (emitFrameMoves) {
+ MCSymbol *SaveLabel = MF->getContext().CreateTempSymbol();
+ BuildMI(MBB, MI, DL, TII.get(XCore::PROLOG_LABEL)).addSym(SaveLabel);
+ XFI->getSpillLabels().push_back(std::make_pair(SaveLabel, *it));
+ }
+ }
+ return true;
+}
+
+bool XCoreFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const{
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
+
+ bool AtStart = MI == MBB.begin();
+ MachineBasicBlock::iterator BeforeI = MI;
+ if (!AtStart)
+ --BeforeI;
+ for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin();
+ it != CSI.end(); ++it) {
+ unsigned Reg = it->getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, it->getReg(), it->getFrameIdx(),
+ RC, TRI);
+ assert(MI != MBB.begin() &&
+ "loadRegFromStackSlot didn't insert any code!");
+ // Insert in reverse order. loadRegFromStackSlot can insert multiple
+ // instructions.
+ if (AtStart)
+ MI = MBB.begin();
+ else {
+ MI = BeforeI;
+ ++MI;
+ }
+ }
+ return true;
+}
+
+void
+XCoreFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
+ bool LRUsed = MF.getRegInfo().isPhysRegUsed(XCore::LR);
+ const TargetRegisterClass *RC = XCore::GRRegsRegisterClass;
+ XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
+ if (LRUsed) {
+ MF.getRegInfo().setPhysRegUnused(XCore::LR);
+
+ bool isVarArg = MF.getFunction()->isVarArg();
+ int FrameIdx;
+ if (! isVarArg) {
+ // A fixed offset of 0 allows us to save / restore LR using entsp / retsp.
+ FrameIdx = MFI->CreateFixedObject(RC->getSize(), 0, true);
+ } else {
+ FrameIdx = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(),
+ false);
+ }
+ XFI->setUsesLR(FrameIdx);
+ XFI->setLRSpillSlot(FrameIdx);
+ }
+ if (RegInfo->requiresRegisterScavenging(MF)) {
+ // Reserve a slot close to SP or frame pointer.
+ RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
+ RC->getAlignment(),
+ false));
+ }
+ if (hasFP(MF)) {
+ // A callee save register is used to hold the FP.
+ // This needs saving / restoring in the epilogue / prologue.
+ XFI->setFPSpillSlot(MFI->CreateStackObject(RC->getSize(),
+ RC->getAlignment(),
+ false));
+ }
+}
+
+void XCoreFrameLowering::
+processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
+
+}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
new file mode 100644
index 0000000..7da19f0
--- /dev/null
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
@@ -0,0 +1,59 @@
+//===-- XCoreFrameLowering.h - Frame info for XCore Target -------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains XCore frame information that doesn't fit anywhere else
+// cleanly...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef XCOREFRAMEINFO_H
+#define XCOREFRAMEINFO_H
+
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+ class XCoreSubtarget;
+
+ class XCoreFrameLowering: public TargetFrameLowering {
+ const XCoreSubtarget &STI;
+ public:
+ XCoreFrameLowering(const XCoreSubtarget &STI);
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+
+ void getInitialFrameState(std::vector<MachineMove> &Moves) const;
+
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS = NULL) const;
+
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
+
+ //! Stack slot size (4 bytes)
+ static int stackSlotSize() {
+ return 4;
+ }
+ };
+}
+
+#endif // XCOREFRAMEINFO_H
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index 755ece7..fc8a07a 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -68,12 +68,9 @@ namespace {
}
// Complex Pattern Selectors.
- bool SelectADDRspii(SDNode *Op, SDValue Addr, SDValue &Base,
- SDValue &Offset);
- bool SelectADDRdpii(SDNode *Op, SDValue Addr, SDValue &Base,
- SDValue &Offset);
- bool SelectADDRcpii(SDNode *Op, SDValue Addr, SDValue &Base,
- SDValue &Offset);
+ bool SelectADDRspii(SDValue Addr, SDValue &Base, SDValue &Offset);
+ bool SelectADDRdpii(SDValue Addr, SDValue &Base, SDValue &Offset);
+ bool SelectADDRcpii(SDValue Addr, SDValue &Base, SDValue &Offset);
virtual const char *getPassName() const {
return "XCore DAG->DAG Pattern Instruction Selection";
@@ -91,8 +88,8 @@ FunctionPass *llvm::createXCoreISelDag(XCoreTargetMachine &TM) {
return new XCoreDAGToDAGISel(TM);
}
-bool XCoreDAGToDAGISel::SelectADDRspii(SDNode *Op, SDValue Addr,
- SDValue &Base, SDValue &Offset) {
+bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
FrameIndexSDNode *FIN = 0;
if ((FIN = dyn_cast<FrameIndexSDNode>(Addr))) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
@@ -113,8 +110,8 @@ bool XCoreDAGToDAGISel::SelectADDRspii(SDNode *Op, SDValue Addr,
return false;
}
-bool XCoreDAGToDAGISel::SelectADDRdpii(SDNode *Op, SDValue Addr,
- SDValue &Base, SDValue &Offset) {
+bool XCoreDAGToDAGISel::SelectADDRdpii(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
if (Addr.getOpcode() == XCoreISD::DPRelativeWrapper) {
Base = Addr.getOperand(0);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
@@ -134,8 +131,8 @@ bool XCoreDAGToDAGISel::SelectADDRdpii(SDNode *Op, SDValue Addr,
return false;
}
-bool XCoreDAGToDAGISel::SelectADDRcpii(SDNode *Op, SDValue Addr,
- SDValue &Base, SDValue &Offset) {
+bool XCoreDAGToDAGISel::SelectADDRcpii(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
if (Addr.getOpcode() == XCoreISD::CPRelativeWrapper) {
Base = Addr.getOperand(0);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index abe7b2f..828d6f9 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -148,9 +148,13 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
-
- maxStoresPerMemset = 4;
- maxStoresPerMemmove = maxStoresPerMemcpy = 2;
+
+ // TRAMPOLINE is custom lowered.
+ setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
+
+ maxStoresPerMemset = maxStoresPerMemsetOptSize = 4;
+ maxStoresPerMemmove = maxStoresPerMemmoveOptSize
+ = maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 2;
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::STORE);
@@ -177,6 +181,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ADD:
case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
default:
llvm_unreachable("unimplemented operand");
return SDValue();
@@ -392,24 +397,23 @@ IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase,
}
SDValue XCoreTargetLowering::
-LowerLOAD(SDValue Op, SelectionDAG &DAG) const
-{
+LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
LoadSDNode *LD = cast<LoadSDNode>(Op);
assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
"Unexpected extension type");
assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
- if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ if (allowsUnalignedMemoryAccesses(LD->getMemoryVT()))
return SDValue();
- }
+
unsigned ABIAlignment = getTargetData()->
getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
// Leave aligned load alone.
- if (LD->getAlignment() >= ABIAlignment) {
+ if (LD->getAlignment() >= ABIAlignment)
return SDValue();
- }
+
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
- DebugLoc dl = Op.getDebugLoc();
+ DebugLoc DL = Op.getDebugLoc();
SDValue Base;
int64_t Offset;
@@ -419,10 +423,8 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const
// We've managed to infer better alignment information than the load
// already has. Use an aligned load.
//
- // FIXME: No new alignment information is actually passed here.
- // Should the offset really be 4?
- //
- return DAG.getLoad(getPointerTy(), dl, Chain, BasePtr, NULL, 4,
+ return DAG.getLoad(getPointerTy(), DL, Chain, BasePtr,
+ MachinePointerInfo(),
false, false, 0);
}
// Lower to
@@ -436,40 +438,40 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const
SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32);
SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32);
- SDValue LowAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, LowOffset);
- SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, HighOffset);
+ SDValue LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, LowOffset);
+ SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, HighOffset);
- SDValue Low = DAG.getLoad(getPointerTy(), dl, Chain,
- LowAddr, NULL, 4, false, false, 0);
- SDValue High = DAG.getLoad(getPointerTy(), dl, Chain,
- HighAddr, NULL, 4, false, false, 0);
- SDValue LowShifted = DAG.getNode(ISD::SRL, dl, MVT::i32, Low, LowShift);
- SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, HighShift);
- SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, LowShifted, HighShifted);
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1),
+ SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
+ LowAddr, MachinePointerInfo(), false, false, 0);
+ SDValue High = DAG.getLoad(getPointerTy(), DL, Chain,
+ HighAddr, MachinePointerInfo(), false, false, 0);
+ SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
+ SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
+ SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
High.getValue(1));
SDValue Ops[] = { Result, Chain };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, 2, DL);
}
if (LD->getAlignment() == 2) {
- int SVOffset = LD->getSrcValueOffset();
- SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, MVT::i32, dl, Chain,
- BasePtr, LD->getSrcValue(), SVOffset, MVT::i16,
+ SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
+ BasePtr, LD->getPointerInfo(), MVT::i16,
LD->isVolatile(), LD->isNonTemporal(), 2);
- SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
+ SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
DAG.getConstant(2, MVT::i32));
- SDValue High = DAG.getExtLoad(ISD::EXTLOAD, MVT::i32, dl, Chain,
- HighAddr, LD->getSrcValue(), SVOffset + 2,
+ SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
+ HighAddr,
+ LD->getPointerInfo().getWithOffset(2),
MVT::i16, LD->isVolatile(),
LD->isNonTemporal(), 2);
- SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High,
+ SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
DAG.getConstant(16, MVT::i32));
- SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, Low, HighShifted);
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1),
+ SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
High.getValue(1));
SDValue Ops[] = { Result, Chain };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, 2, DL);
}
// Lower to a call to __misaligned_load(BasePtr).
@@ -486,12 +488,12 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const
false, false, 0, CallingConv::C, false,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
- Args, DAG, dl);
+ Args, DAG, DL);
SDValue Ops[] =
{ CallResult.first, CallResult.second };
- return DAG.getMergeValues(Ops, 2, dl);
+ return DAG.getMergeValues(Ops, 2, DL);
}
SDValue XCoreTargetLowering::
@@ -515,18 +517,17 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
DebugLoc dl = Op.getDebugLoc();
if (ST->getAlignment() == 2) {
- int SVOffset = ST->getSrcValueOffset();
SDValue Low = Value;
SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
DAG.getConstant(16, MVT::i32));
SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
- ST->getSrcValue(), SVOffset, MVT::i16,
+ ST->getPointerInfo(), MVT::i16,
ST->isVolatile(), ST->isNonTemporal(),
2);
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
DAG.getConstant(2, MVT::i32));
SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
- ST->getSrcValue(), SVOffset + 2,
+ ST->getPointerInfo().getWithOffset(2),
MVT::i16, ST->isVolatile(),
ST->isNonTemporal(), 2);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
@@ -757,16 +758,18 @@ LowerVAARG(SDValue Op, SelectionDAG &DAG) const
const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
EVT VT = Node->getValueType(0);
SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0),
- Node->getOperand(1), V, 0, false, false, 0);
+ Node->getOperand(1), MachinePointerInfo(V),
+ false, false, 0);
// Increment the pointer, VAList, to the next vararg
SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
DAG.getConstant(VT.getSizeInBits(),
getPointerTy()));
// Store the incremented VAList to the legalized pointer
- Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1), V, 0,
- false, false, 0);
+ Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1),
+ MachinePointerInfo(V), false, false, 0);
// Load the actual argument out of the pointer VAList
- return DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0, false, false, 0);
+ return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
+ false, false, 0);
}
SDValue XCoreTargetLowering::
@@ -778,9 +781,8 @@ LowerVASTART(SDValue Op, SelectionDAG &DAG) const
MachineFunction &MF = DAG.getMachineFunction();
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
- const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), SV, 0,
- false, false, 0);
+ return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
+ MachinePointerInfo(), false, false, 0);
}
SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
@@ -796,6 +798,64 @@ SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
RegInfo->getFrameRegister(MF), MVT::i32);
}
+SDValue XCoreTargetLowering::
+LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Trmp = Op.getOperand(1); // trampoline
+ SDValue FPtr = Op.getOperand(2); // nested function
+ SDValue Nest = Op.getOperand(3); // 'nest' parameter value
+
+ const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
+
+ // .align 4
+ // LDAPF_u10 r11, nest
+ // LDW_2rus r11, r11[0]
+ // STWSP_ru6 r11, sp[0]
+ // LDAPF_u10 r11, fptr
+ // LDW_2rus r11, r11[0]
+ // BAU_1r r11
+ // nest:
+ // .word nest
+ // fptr:
+ // .word fptr
+ SDValue OutChains[5];
+
+ SDValue Addr = Trmp;
+
+ DebugLoc dl = Op.getDebugLoc();
+ OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32),
+ Addr, MachinePointerInfo(TrmpAddr), false, false,
+ 0);
+
+ Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
+ DAG.getConstant(4, MVT::i32));
+ OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32),
+ Addr, MachinePointerInfo(TrmpAddr, 4), false,
+ false, 0);
+
+ Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
+ DAG.getConstant(8, MVT::i32));
+ OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32),
+ Addr, MachinePointerInfo(TrmpAddr, 8), false,
+ false, 0);
+
+ Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
+ DAG.getConstant(12, MVT::i32));
+ OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr,
+ MachinePointerInfo(TrmpAddr, 12), false, false,
+ 0);
+
+ Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
+ DAG.getConstant(16, MVT::i32));
+ OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr,
+ MachinePointerInfo(TrmpAddr, 16), false, false,
+ 0);
+
+ SDValue Ops[] =
+ { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5) };
+ return DAG.getMergeValues(Ops, 2, dl);
+}
+
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -929,7 +989,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// = Chain, Callee, Reg#1, Reg#2, ...
//
// Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@@ -1035,7 +1095,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
- unsigned StackSlotSize = XCoreFrameInfo::stackSlotSize();
+ unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
unsigned LRSaveSize = StackSlotSize;
@@ -1068,7 +1128,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
if (ObjSize > StackSlotSize) {
errs() << "LowerFormalArguments Unhandled argument type: "
- << (unsigned)VA.getLocVT().getSimpleVT().SimpleTy
+ << EVT(VA.getLocVT()).getEVTString()
<< "\n";
}
// Create the frame index object for this incoming parameter...
@@ -1079,7 +1139,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
- InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, NULL, 0,
+ InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
false, false, 0));
}
}
@@ -1111,8 +1172,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
RegInfo.addLiveIn(ArgRegs[i], VReg);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
// Move argument from virt reg -> stack
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0,
- false, false, 0);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(), false, false, 0);
MemOps.push_back(Store);
}
if (!MemOps.empty())
@@ -1443,9 +1504,8 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
LD->getBasePtr(),
DAG.getConstant(StoreBits/8, MVT::i32),
- Alignment, false, ST->getSrcValue(),
- ST->getSrcValueOffset(), LD->getSrcValue(),
- LD->getSrcValueOffset());
+ Alignment, false, ST->getPointerInfo(),
+ LD->getPointerInfo());
}
}
break;
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
index febc198..7e5dd2e 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
@@ -147,6 +147,7 @@ namespace llvm {
SDValue LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
// Inline asm support
std::vector<unsigned>
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
index ad00046..9cb6a7d 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -384,74 +384,10 @@ void XCoreInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
.addImm(0);
}
-bool XCoreInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty()) {
- return true;
- }
- MachineFunction *MF = MBB.getParent();
- XCoreFunctionInfo *XFI = MF->getInfo<XCoreFunctionInfo>();
-
- bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(*MF);
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin();
- it != CSI.end(); ++it) {
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(it->getReg());
-
- unsigned Reg = it->getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- storeRegToStackSlot(MBB, MI, Reg, true,
- it->getFrameIdx(), RC, &RI);
- if (emitFrameMoves) {
- MCSymbol *SaveLabel = MF->getContext().CreateTempSymbol();
- BuildMI(MBB, MI, DL, get(XCore::PROLOG_LABEL)).addSym(SaveLabel);
- XFI->getSpillLabels().push_back(std::make_pair(SaveLabel, *it));
- }
- }
- return true;
-}
-
-bool XCoreInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const
-{
- bool AtStart = MI == MBB.begin();
- MachineBasicBlock::iterator BeforeI = MI;
- if (!AtStart)
- --BeforeI;
- for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin();
- it != CSI.end(); ++it) {
- unsigned Reg = it->getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- loadRegFromStackSlot(MBB, MI, it->getReg(),
- it->getFrameIdx(),
- RC, &RI);
- assert(MI != MBB.begin() &&
- "loadRegFromStackSlot didn't insert any code!");
- // Insert in reverse order. loadRegFromStackSlot can insert multiple
- // instructions.
- if (AtStart)
- MI = MBB.begin();
- else {
- MI = BeforeI;
- ++MI;
- }
- }
- return true;
-}
-
/// ReverseBranchCondition - Return the inverse opcode of the
/// specified Branch instruction.
bool XCoreInstrInfo::
-ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
-{
+ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert((Cond.size() == 2) &&
"Invalid XCore branch condition!");
Cond[0].setImm(GetOppositeBranchCondition((XCore::CondCode)Cond[0].getImm()));
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
index d2b116e..977fe8d 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
@@ -75,15 +75,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
-
- virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
virtual bool ReverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const;
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
index 6b3b39b..38cc734 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
@@ -29,11 +29,11 @@ include "XCoreInstrFormats.td"
// Call
def SDT_XCoreBranchLink : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def XCoreBranchLink : SDNode<"XCoreISD::BL",SDT_XCoreBranchLink,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def XCoreRetsp : SDNode<"XCoreISD::RETSP", SDTBrind,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInGlue]>;
def SDT_XCoreBR_JT : SDTypeProfile<0, 2,
[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
@@ -66,9 +66,9 @@ def SDT_XCoreCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
SDTCisVT<1, i32> ]>;
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_XCoreCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOutGlue]>;
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_XCoreCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
//===----------------------------------------------------------------------===//
// Instruction Pattern Stuff
@@ -610,8 +610,15 @@ def LDC_lru6 : _FLRU6<
[(set GRRegs:$dst, immU16:$b)]>;
}
+def SETC_ru6 : _FRU6<(outs), (ins GRRegs:$r, i32imm:$val),
+ "setc res[$r], $val",
+ [(int_xcore_setc GRRegs:$r, immU6:$val)]>;
+
+def SETC_lru6 : _FLRU6<(outs), (ins GRRegs:$r, i32imm:$val),
+ "setc res[$r], $val",
+ [(int_xcore_setc GRRegs:$r, immU16:$val)]>;
+
// Operand register - U6
-// TODO setc
let isBranch = 1, isTerminator = 1 in {
defm BRFT: FRU6_LRU6_branch<"bt">;
defm BRBT: FRU6_LRU6_branch<"bt">;
@@ -720,9 +727,8 @@ def NEG : _F2R<(outs GRRegs:$dst), (ins GRRegs:$b),
"neg $dst, $b",
[(set GRRegs:$dst, (ineg GRRegs:$b))]>;
-// TODO setd, eet, eef, getts, setpt, outct, inct, chkct, outt, intt, out,
-// in, outshr, inshr, testct, testwct, tinitpc, tinitdp, tinitsp, tinitcp,
-// tsetmr, sext (reg), zext (reg)
+// TODO setd, eet, eef, getts, setpt, outshr, inshr, testwct, tinitpc, tinitdp,
+// tinitsp, tinitcp, tsetmr, sext (reg), zext (reg)
let Constraints = "$src1 = $dst" in {
let neverHasSideEffects = 1 in
def SEXT_rus : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$src1, i32imm:$src2),
@@ -748,6 +754,50 @@ def MKMSK_2r : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$size),
"mkmsk $dst, $size",
[(set GRRegs:$dst, (add (shl 1, GRRegs:$size), 0xffffffff))]>;
+def GETR_rus : _FRUS<(outs GRRegs:$dst), (ins i32imm:$type),
+ "getr $dst, $type",
+ [(set GRRegs:$dst, (int_xcore_getr immUs:$type))]>;
+
+def OUTCT_2r : _F2R<(outs), (ins GRRegs:$r, GRRegs:$val),
+ "outct res[$r], $val",
+ [(int_xcore_outct GRRegs:$r, GRRegs:$val)]>;
+
+def OUTCT_rus : _F2R<(outs), (ins GRRegs:$r, i32imm:$val),
+ "outct res[$r], $val",
+ [(int_xcore_outct GRRegs:$r, immUs:$val)]>;
+
+def OUTT_2r : _F2R<(outs), (ins GRRegs:$r, GRRegs:$val),
+ "outt res[$r], $val",
+ [(int_xcore_outt GRRegs:$r, GRRegs:$val)]>;
+
+def OUT_2r : _F2R<(outs), (ins GRRegs:$r, GRRegs:$val),
+ "out res[$r], $val",
+ [(int_xcore_out GRRegs:$r, GRRegs:$val)]>;
+
+def INCT_2r : _F2R<(outs GRRegs:$dst), (ins GRRegs:$r),
+ "inct $dst, res[$r]",
+ [(set GRRegs:$dst, (int_xcore_inct GRRegs:$r))]>;
+
+def INT_2r : _F2R<(outs GRRegs:$dst), (ins GRRegs:$r),
+ "int $dst, res[$r]",
+ [(set GRRegs:$dst, (int_xcore_int GRRegs:$r))]>;
+
+def IN_2r : _F2R<(outs GRRegs:$dst), (ins GRRegs:$r),
+ "in $dst, res[$r]",
+ [(set GRRegs:$dst, (int_xcore_in GRRegs:$r))]>;
+
+def CHKCT_2r : _F2R<(outs), (ins GRRegs:$r, GRRegs:$val),
+ "chkct res[$r], $val",
+ [(int_xcore_chkct GRRegs:$r, GRRegs:$val)]>;
+
+def CHKCT_rus : _F2R<(outs), (ins GRRegs:$r, i32imm:$val),
+ "chkct res[$r], $val",
+ [(int_xcore_chkct GRRegs:$r, immUs:$val)]>;
+
+def SETD_2r : _F2R<(outs), (ins GRRegs:$r, GRRegs:$val),
+ "setd res[$r], $val",
+ [(int_xcore_setd GRRegs:$r, GRRegs:$val)]>;
+
// Two operand long
// TODO settw, setclk, setrdy, setpsc, endin, peek,
// getd, testlcl, tinitlr, getps, setps
@@ -763,8 +813,12 @@ def CLZ_l2r : _FL2R<(outs GRRegs:$dst), (ins GRRegs:$src),
"clz $dst, $src",
[(set GRRegs:$dst, (ctlz GRRegs:$src))]>;
+def SETC_l2r : _FRU6<(outs), (ins GRRegs:$r, GRRegs:$val),
+ "setc res[$r], $val",
+ [(int_xcore_setc GRRegs:$r, GRRegs:$val)]>;
+
// One operand short
-// TODO edu, eeu, waitet, waitef, freer, tstart, msync, mjoin, syncr, clrtp
+// TODO edu, eeu, waitet, waitef, tstart, msync, mjoin, syncr, clrtp
// setdp, setcp, setv, setev, kcall
// dgetreg
let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in
@@ -805,6 +859,10 @@ def BLA_1r : _F1R<(outs), (ins GRRegs:$addr, variable_ops),
[(XCoreBranchLink GRRegs:$addr)]>;
}
+def FREER_1r : _F1R<(outs), (ins GRRegs:$r),
+ "freer res[$r]",
+ [(int_xcore_freer GRRegs:$r)]>;
+
// Zero operand short
// TODO waiteu, clre, ssync, freet, ldspc, stspc, ldssr, stssr, ldsed, stsed,
// stet, geted, getet, getkep, getksp, setkep, getid, kret, dcall, dret,
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
index f82e598..56c0879 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -21,7 +21,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -84,11 +84,13 @@ const unsigned* XCoreRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
BitVector XCoreRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
Reserved.set(XCore::CP);
Reserved.set(XCore::DP);
Reserved.set(XCore::SP);
Reserved.set(XCore::LR);
- if (hasFP(MF)) {
+ if (TFI->hasFP(MF)) {
Reserved.set(XCore::R10);
}
return Reserved;
@@ -96,12 +98,10 @@ BitVector XCoreRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
bool
XCoreRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
- // TODO can we estimate stack size?
- return hasFP(MF);
-}
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-bool XCoreRegisterInfo::hasFP(const MachineFunction &MF) const {
- return DisableFramePointerElim(MF) || MF.getFrameInfo()->hasVarSizedObjects();
+ // TODO can we estimate stack size?
+ return TFI->hasFP(MF);
}
// This function eliminates ADJCALLSTACKDOWN,
@@ -109,7 +109,9 @@ bool XCoreRegisterInfo::hasFP(const MachineFunction &MF) const {
void XCoreRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- if (!hasReservedCallFrame(MF)) {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (!TFI->hasReservedCallFrame(MF)) {
// Turn the adjcallstackdown instruction into 'extsp <amt>' and the
// adjcallstackup instruction into 'ldaw sp, sp[<amt>]'
MachineInstr *Old = I;
@@ -118,14 +120,13 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
+ unsigned Align = TFI->getStackAlignment();
Amount = (Amount+Align-1)/Align*Align;
assert(Amount%4 == 0);
Amount /= 4;
-
+
bool isU6 = isImmU6(Amount);
-
if (!isU6 && !isImmU16(Amount)) {
// FIX could emit multiple instructions in this case.
#ifndef NDEBUG
@@ -172,6 +173,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int FrameIndex = FrameOp.getIndex();
MachineFunction &MF = *MI.getParent()->getParent();
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
int StackSize = MF.getFrameInfo()->getStackSize();
@@ -197,7 +199,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
Offset/=4;
- bool FP = hasFP(MF);
+ bool FP = TFI->hasFP(MF);
unsigned Reg = MI.getOperand(0).getReg();
bool isKill = MI.getOpcode() == XCore::STWFI && MI.getOperand(0).isKill();
@@ -292,48 +294,6 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MBB.erase(II);
}
-void
-XCoreRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- bool LRUsed = MF.getRegInfo().isPhysRegUsed(XCore::LR);
- const TargetRegisterClass *RC = XCore::GRRegsRegisterClass;
- XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
- if (LRUsed) {
- MF.getRegInfo().setPhysRegUnused(XCore::LR);
-
- bool isVarArg = MF.getFunction()->isVarArg();
- int FrameIdx;
- if (! isVarArg) {
- // A fixed offset of 0 allows us to save / restore LR using entsp / retsp.
- FrameIdx = MFI->CreateFixedObject(RC->getSize(), 0, true);
- } else {
- FrameIdx = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(),
- false);
- }
- XFI->setUsesLR(FrameIdx);
- XFI->setLRSpillSlot(FrameIdx);
- }
- if (requiresRegisterScavenging(MF)) {
- // Reserve a slot close to SP or frame pointer.
- RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
- }
- if (hasFP(MF)) {
- // A callee save register is used to hold the FP.
- // This needs saving / restoring in the epilogue / prologue.
- XFI->setFPSpillSlot(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
- }
-}
-
-void XCoreRegisterInfo::
-processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
-
-}
-
void XCoreRegisterInfo::
loadConstant(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DstReg, int64_t Value, DebugLoc dl) const {
@@ -346,229 +306,19 @@ loadConstant(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
BuildMI(MBB, I, dl, TII.get(Opcode), DstReg).addImm(Value);
}
-void XCoreRegisterInfo::
-storeToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned SrcReg, int Offset, DebugLoc dl) const {
- assert(Offset%4 == 0 && "Misaligned stack offset");
- Offset/=4;
- bool isU6 = isImmU6(Offset);
- if (!isU6 && !isImmU16(Offset))
- report_fatal_error("storeToStack offset too big " + Twine(Offset));
- int Opcode = isU6 ? XCore::STWSP_ru6 : XCore::STWSP_lru6;
- BuildMI(MBB, I, dl, TII.get(Opcode))
- .addReg(SrcReg)
- .addImm(Offset);
-}
-
-void XCoreRegisterInfo::
-loadFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DstReg, int Offset, DebugLoc dl) const {
- assert(Offset%4 == 0 && "Misaligned stack offset");
- Offset/=4;
- bool isU6 = isImmU6(Offset);
- if (!isU6 && !isImmU16(Offset))
- report_fatal_error("loadFromStack offset too big " + Twine(Offset));
- int Opcode = isU6 ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6;
- BuildMI(MBB, I, dl, TII.get(Opcode), DstReg)
- .addImm(Offset);
-}
-
-void XCoreRegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineModuleInfo *MMI = &MF.getMMI();
- XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- bool FP = hasFP(MF);
-
- // Work out frame sizes.
- int FrameSize = MFI->getStackSize();
-
- assert(FrameSize%4 == 0 && "Misaligned frame size");
-
- FrameSize/=4;
-
- bool isU6 = isImmU6(FrameSize);
-
- if (!isU6 && !isImmU16(FrameSize)) {
- // FIXME could emit multiple instructions.
- report_fatal_error("emitPrologue Frame size too big: " + Twine(FrameSize));
- }
- bool emitFrameMoves = needsFrameMoves(MF);
-
- // Do we need to allocate space on the stack?
- if (FrameSize) {
- bool saveLR = XFI->getUsesLR();
- bool LRSavedOnEntry = false;
- int Opcode;
- if (saveLR && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0)) {
- Opcode = (isU6) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6;
- MBB.addLiveIn(XCore::LR);
- saveLR = false;
- LRSavedOnEntry = true;
- } else {
- Opcode = (isU6) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6;
- }
- BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
-
- if (emitFrameMoves) {
- std::vector<MachineMove> &Moves = MMI->getFrameMoves();
-
- // Show update of SP.
- MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(FrameLabel);
-
- MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP, -FrameSize * 4);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
-
- if (LRSavedOnEntry) {
- MachineLocation CSDst(MachineLocation::VirtualFP, 0);
- MachineLocation CSSrc(XCore::LR);
- Moves.push_back(MachineMove(FrameLabel, CSDst, CSSrc));
- }
- }
- if (saveLR) {
- int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
- storeToStack(MBB, MBBI, XCore::LR, LRSpillOffset + FrameSize*4, dl);
- MBB.addLiveIn(XCore::LR);
-
- if (emitFrameMoves) {
- MCSymbol *SaveLRLabel = MMI->getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveLRLabel);
- MachineLocation CSDst(MachineLocation::VirtualFP, LRSpillOffset);
- MachineLocation CSSrc(XCore::LR);
- MMI->getFrameMoves().push_back(MachineMove(SaveLRLabel, CSDst, CSSrc));
- }
- }
- }
-
- if (FP) {
- // Save R10 to the stack.
- int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot());
- storeToStack(MBB, MBBI, XCore::R10, FPSpillOffset + FrameSize*4, dl);
- // R10 is live-in. It is killed at the spill.
- MBB.addLiveIn(XCore::R10);
- if (emitFrameMoves) {
- MCSymbol *SaveR10Label = MMI->getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveR10Label);
- MachineLocation CSDst(MachineLocation::VirtualFP, FPSpillOffset);
- MachineLocation CSSrc(XCore::R10);
- MMI->getFrameMoves().push_back(MachineMove(SaveR10Label, CSDst, CSSrc));
- }
- // Set the FP from the SP.
- unsigned FramePtr = XCore::R10;
- BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr)
- .addImm(0);
- if (emitFrameMoves) {
- // Show FP is now valid.
- MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(FrameLabel);
- MachineLocation SPDst(FramePtr);
- MachineLocation SPSrc(MachineLocation::VirtualFP);
- MMI->getFrameMoves().push_back(MachineMove(FrameLabel, SPDst, SPSrc));
- }
- }
-
- if (emitFrameMoves) {
- // Frame moves for callee saved.
- std::vector<MachineMove> &Moves = MMI->getFrameMoves();
- std::vector<std::pair<MCSymbol*, CalleeSavedInfo> >&SpillLabels =
- XFI->getSpillLabels();
- for (unsigned I = 0, E = SpillLabels.size(); I != E; ++I) {
- MCSymbol *SpillLabel = SpillLabels[I].first;
- CalleeSavedInfo &CSI = SpillLabels[I].second;
- int Offset = MFI->getObjectOffset(CSI.getFrameIdx());
- unsigned Reg = CSI.getReg();
- MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
- MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(SpillLabel, CSDst, CSSrc));
- }
- }
-}
-
-void XCoreRegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- DebugLoc dl = MBBI->getDebugLoc();
-
- bool FP = hasFP(MF);
-
- if (FP) {
- // Restore the stack pointer.
- unsigned FramePtr = XCore::R10;
- BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r))
- .addReg(FramePtr);
- }
-
- // Work out frame sizes.
- int FrameSize = MFI->getStackSize();
-
- assert(FrameSize%4 == 0 && "Misaligned frame size");
-
- FrameSize/=4;
-
- bool isU6 = isImmU6(FrameSize);
-
- if (!isU6 && !isImmU16(FrameSize)) {
- // FIXME could emit multiple instructions.
- report_fatal_error("emitEpilogue Frame size too big: " + Twine(FrameSize));
- }
-
- if (FrameSize) {
- XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
-
- if (FP) {
- // Restore R10
- int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot());
- FPSpillOffset += FrameSize*4;
- loadFromStack(MBB, MBBI, XCore::R10, FPSpillOffset, dl);
- }
- bool restoreLR = XFI->getUsesLR();
- if (restoreLR && MFI->getObjectOffset(XFI->getLRSpillSlot()) != 0) {
- int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot());
- LRSpillOffset += FrameSize*4;
- loadFromStack(MBB, MBBI, XCore::LR, LRSpillOffset, dl);
- restoreLR = false;
- }
- if (restoreLR) {
- // Fold prologue into return instruction
- assert(MBBI->getOpcode() == XCore::RETSP_u6
- || MBBI->getOpcode() == XCore::RETSP_lu6);
- int Opcode = (isU6) ? XCore::RETSP_u6 : XCore::RETSP_lu6;
- BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
- MBB.erase(MBBI);
- } else {
- int Opcode = (isU6) ? XCore::LDAWSP_ru6_RRegs : XCore::LDAWSP_lru6_RRegs;
- BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(FrameSize);
- }
- }
-}
-
int XCoreRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
return XCoreGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
}
unsigned XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- bool FP = hasFP(MF);
-
- return FP ? XCore::R10 : XCore::SP;
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ return TFI->hasFP(MF) ? XCore::R10 : XCore::SP;
}
unsigned XCoreRegisterInfo::getRARegister() const {
return XCore::LR;
}
-void XCoreRegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves)
- const {
- // Initial state of the frame pointer is SP.
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(XCore::SP, 0);
- Moves.push_back(MachineMove(0, Dst, Src));
-}
-
#include "XCoreGenRegisterInfo.inc"
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
index e636c1c..2185755 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
@@ -48,8 +48,6 @@ public:
bool requiresRegisterScavenging(const MachineFunction &MF) const;
- bool hasFP(const MachineFunction &MF) const;
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
@@ -57,18 +55,9 @@ public:
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
-
- void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
-
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
// Debug information queries.
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
- void getInitialFrameState(std::vector<MachineMove> &Moves) const;
//! Return the array of argument passing registers
/*!
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td
index 62daf5d..765f717 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td
@@ -61,8 +61,8 @@ def GRRegs : RegisterClass<"XCore", [i32], 32,
GRRegsClass::iterator
GRRegsClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- if (RI->hasFP(MF))
+ const TargetFrameLowering *TFI = TM.getFrameLowering();
+ if (TFI->hasFP(MF))
return end()-1; // don't allocate R10
else
return end();
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
index b0013eb..30da2c8 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -27,7 +27,7 @@ XCoreTargetMachine::XCoreTargetMachine(const Target &T, const std::string &TT,
DataLayout("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-"
"i16:16:32-i32:32:32-i64:32:32-n32"),
InstrInfo(),
- FrameInfo(*this),
+ FrameLowering(Subtarget),
TLInfo(*this),
TSInfo(*this) {
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
index 14073ba..24daadc 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
@@ -16,7 +16,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetData.h"
-#include "XCoreFrameInfo.h"
+#include "XCoreFrameLowering.h"
#include "XCoreSubtarget.h"
#include "XCoreInstrInfo.h"
#include "XCoreISelLowering.h"
@@ -28,7 +28,7 @@ class XCoreTargetMachine : public LLVMTargetMachine {
XCoreSubtarget Subtarget;
const TargetData DataLayout; // Calculates type size & alignment
XCoreInstrInfo InstrInfo;
- XCoreFrameInfo FrameInfo;
+ XCoreFrameLowering FrameLowering;
XCoreTargetLowering TLInfo;
XCoreSelectionDAGInfo TSInfo;
public:
@@ -36,7 +36,9 @@ public:
const std::string &FS);
virtual const XCoreInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const XCoreFrameInfo *getFrameInfo() const { return &FrameInfo; }
+ virtual const XCoreFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
virtual const XCoreSubtarget *getSubtargetImpl() const { return &Subtarget; }
virtual const XCoreTargetLowering *getTargetLowering() const {
return &TLInfo;
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp b/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp
index cdf5a53..7f4e1c1 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.cpp
@@ -12,6 +12,7 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/ELF.h"
using namespace llvm;
@@ -19,31 +20,31 @@ void XCoreTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
DataSection =
- Ctx.getELFSection(".dp.data", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE |
- MCSectionELF::XCORE_SHF_DP_SECTION,
- SectionKind::getDataRel(), false);
+ Ctx.getELFSection(".dp.data", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION,
+ SectionKind::getDataRel());
BSSSection =
- Ctx.getELFSection(".dp.bss", MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE |
- MCSectionELF::XCORE_SHF_DP_SECTION,
- SectionKind::getBSS(), false);
+ Ctx.getELFSection(".dp.bss", ELF::SHT_NOBITS,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE |
+ ELF::XCORE_SHF_DP_SECTION,
+ SectionKind::getBSS());
MergeableConst4Section =
- Ctx.getELFSection(".cp.rodata.cst4", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_MERGE |
- MCSectionELF::XCORE_SHF_CP_SECTION,
- SectionKind::getMergeableConst4(), false);
+ Ctx.getELFSection(".cp.rodata.cst4", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_MERGE |
+ ELF::XCORE_SHF_CP_SECTION,
+ SectionKind::getMergeableConst4());
MergeableConst8Section =
- Ctx.getELFSection(".cp.rodata.cst8", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_MERGE |
- MCSectionELF::XCORE_SHF_CP_SECTION,
- SectionKind::getMergeableConst8(), false);
+ Ctx.getELFSection(".cp.rodata.cst8", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_MERGE |
+ ELF::XCORE_SHF_CP_SECTION,
+ SectionKind::getMergeableConst8());
MergeableConst16Section =
- Ctx.getELFSection(".cp.rodata.cst16", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_MERGE |
- MCSectionELF::XCORE_SHF_CP_SECTION,
- SectionKind::getMergeableConst16(), false);
+ Ctx.getELFSection(".cp.rodata.cst16", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_MERGE |
+ ELF::XCORE_SHF_CP_SECTION,
+ SectionKind::getMergeableConst16());
// TLS globals are lowered in the backend to arrays indexed by the current
// thread id. After lowering they require no special handling by the linker
@@ -52,10 +53,10 @@ void XCoreTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
TLSBSSSection = BSSSection;
ReadOnlySection =
- Ctx.getELFSection(".cp.rodata", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |
- MCSectionELF::XCORE_SHF_CP_SECTION,
- SectionKind::getReadOnlyWithRel(), false);
+ Ctx.getELFSection(".cp.rodata", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |
+ ELF::XCORE_SHF_CP_SECTION,
+ SectionKind::getReadOnlyWithRel());
// Dynamic linking is not supported. Data with relocations is placed in the
// same section as data without relocations.
diff --git a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 0c77e1f..0c650cf 100644
--- a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -39,7 +39,6 @@
#include "llvm/LLVMContext.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
@@ -67,7 +66,9 @@ namespace {
virtual bool runOnSCC(CallGraphSCC &SCC);
static char ID; // Pass identification, replacement for typeid
explicit ArgPromotion(unsigned maxElements = 3)
- : CallGraphSCCPass(ID), maxElements(maxElements) {}
+ : CallGraphSCCPass(ID), maxElements(maxElements) {
+ initializeArgPromotionPass(*PassRegistry::getPassRegistry());
+ }
/// A vector used to hold the indices of a single GEP instruction
typedef std::vector<uint64_t> IndicesVector;
@@ -84,8 +85,12 @@ namespace {
}
char ArgPromotion::ID = 0;
-INITIALIZE_PASS(ArgPromotion, "argpromotion",
- "Promote 'by reference' arguments to scalars", false, false);
+INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
+ "Promote 'by reference' arguments to scalars", false, false)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_AG_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_END(ArgPromotion, "argpromotion",
+ "Promote 'by reference' arguments to scalars", false, false)
Pass *llvm::createArgumentPromotionPass(unsigned maxElements) {
return new ArgPromotion(maxElements);
@@ -130,47 +135,74 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
if (PointerArgs.empty()) return 0;
// Second check: make sure that all callers are direct callers. We can't
- // transform functions that have indirect callers.
- if (F->hasAddressTaken())
- return 0;
-
+ // transform functions that have indirect callers. Also see if the function
+ // is self-recursive.
+ bool isSelfRecursive = false;
+ for (Value::use_iterator UI = F->use_begin(), E = F->use_end();
+ UI != E; ++UI) {
+ CallSite CS(*UI);
+ // Must be a direct call.
+ if (CS.getInstruction() == 0 || !CS.isCallee(UI)) return 0;
+
+ if (CS.getInstruction()->getParent()->getParent() == F)
+ isSelfRecursive = true;
+ }
+
// Check to see which arguments are promotable. If an argument is promotable,
// add it to ArgsToPromote.
SmallPtrSet<Argument*, 8> ArgsToPromote;
SmallPtrSet<Argument*, 8> ByValArgsToTransform;
for (unsigned i = 0; i != PointerArgs.size(); ++i) {
bool isByVal = F->paramHasAttr(PointerArgs[i].second+1, Attribute::ByVal);
+ Argument *PtrArg = PointerArgs[i].first;
+ const Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
// If this is a byval argument, and if the aggregate type is small, just
// pass the elements, which is always safe.
- Argument *PtrArg = PointerArgs[i].first;
if (isByVal) {
- const Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
if (const StructType *STy = dyn_cast<StructType>(AgTy)) {
if (maxElements > 0 && STy->getNumElements() > maxElements) {
DEBUG(dbgs() << "argpromotion disable promoting argument '"
<< PtrArg->getName() << "' because it would require adding more"
<< " than " << maxElements << " arguments to the function.\n");
- } else {
- // If all the elements are single-value types, we can promote it.
- bool AllSimple = true;
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
- if (!STy->getElementType(i)->isSingleValueType()) {
- AllSimple = false;
- break;
- }
-
- // Safe to transform, don't even bother trying to "promote" it.
- // Passing the elements as a scalar will allow scalarrepl to hack on
- // the new alloca we introduce.
- if (AllSimple) {
- ByValArgsToTransform.insert(PtrArg);
- continue;
+ continue;
+ }
+
+ // If all the elements are single-value types, we can promote it.
+ bool AllSimple = true;
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ if (!STy->getElementType(i)->isSingleValueType()) {
+ AllSimple = false;
+ break;
}
}
+
+ // Safe to transform, don't even bother trying to "promote" it.
+ // Passing the elements as a scalar will allow scalarrepl to hack on
+ // the new alloca we introduce.
+ if (AllSimple) {
+ ByValArgsToTransform.insert(PtrArg);
+ continue;
+ }
}
}
+ // If the argument is a recursive type and we're in a recursive
+ // function, we could end up infinitely peeling the function argument.
+ if (isSelfRecursive) {
+ if (const StructType *STy = dyn_cast<StructType>(AgTy)) {
+ bool RecursiveType = false;
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ if (STy->getElementType(i) == PtrArg->getType()) {
+ RecursiveType = true;
+ break;
+ }
+ }
+ if (RecursiveType)
+ continue;
+ }
+ }
+
// Otherwise, see if we can promote the pointer to its value.
if (isSafeToPromoteArgument(PtrArg, isByVal))
ArgsToPromote.insert(PtrArg);
@@ -183,22 +215,9 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
return DoPromotion(F, ArgsToPromote, ByValArgsToTransform);
}
-/// IsAlwaysValidPointer - Return true if the specified pointer is always legal
-/// to load.
-static bool IsAlwaysValidPointer(Value *V) {
- if (isa<AllocaInst>(V) || isa<GlobalVariable>(V)) return true;
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V))
- return IsAlwaysValidPointer(GEP->getOperand(0));
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- if (CE->getOpcode() == Instruction::GetElementPtr)
- return IsAlwaysValidPointer(CE->getOperand(0));
-
- return false;
-}
-
-/// AllCalleesPassInValidPointerForArgument - Return true if we can prove that
+/// AllCallersPassInValidPointerForArgument - Return true if we can prove that
/// all callees pass in a valid pointer for the specified function argument.
-static bool AllCalleesPassInValidPointerForArgument(Argument *Arg) {
+static bool AllCallersPassInValidPointerForArgument(Argument *Arg) {
Function *Callee = Arg->getParent();
unsigned ArgNo = std::distance(Callee->arg_begin(),
@@ -211,7 +230,7 @@ static bool AllCalleesPassInValidPointerForArgument(Argument *Arg) {
CallSite CS(*UI);
assert(CS && "Should only have direct calls!");
- if (!IsAlwaysValidPointer(CS.getArgument(ArgNo)))
+ if (!CS.getArgument(ArgNo)->isDereferenceablePointer())
return false;
}
return true;
@@ -318,7 +337,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
GEPIndicesSet ToPromote;
// If the pointer is always valid, any load with first index 0 is valid.
- if (isByVal || AllCalleesPassInValidPointerForArgument(Arg))
+ if (isByVal || AllCallersPassInValidPointerForArgument(Arg))
SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
// First, iterate the entry block and mark loads of (geps of) arguments as
@@ -434,8 +453,6 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
SmallPtrSet<BasicBlock*, 16> TranspBlocks;
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- TargetData *TD = getAnalysisIfAvailable<TargetData>();
- if (!TD) return false; // Without TargetData, assume the worst.
for (unsigned i = 0, e = Loads.size(); i != e; ++i) {
// Check to see if the load is invalidated from the start of the block to
@@ -443,11 +460,8 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
LoadInst *Load = Loads[i];
BasicBlock *BB = Load->getParent();
- const PointerType *LoadTy =
- cast<PointerType>(Load->getPointerOperand()->getType());
- unsigned LoadSize =(unsigned)TD->getTypeStoreSize(LoadTy->getElementType());
-
- if (AA.canInstructionRangeModify(BB->front(), *Load, Arg, LoadSize))
+ AliasAnalysis::Location Loc = AA.getLocation(Load);
+ if (AA.canInstructionRangeModify(BB->front(), *Load, Loc))
return false; // Pointer is invalidated!
// Now check every path from the entry block to the load for transparency.
@@ -458,7 +472,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
for (idf_ext_iterator<BasicBlock*, SmallPtrSet<BasicBlock*, 16> >
I = idf_ext_begin(P, TranspBlocks),
E = idf_ext_end(P, TranspBlocks); I != E; ++I)
- if (AA.canBasicBlockModify(**I, Arg, LoadSize))
+ if (AA.canBasicBlockModify(**I, Loc))
return false;
}
}
@@ -694,6 +708,9 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// of the previous load.
LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call);
newLoad->setAlignment(OrigLoad->getAlignment());
+ // Transfer the TBAA info too.
+ newLoad->setMetadata(LLVMContext::MD_tbaa,
+ OrigLoad->getMetadata(LLVMContext::MD_tbaa));
Args.push_back(newLoad);
AA.copyValue(OrigLoad, Args.back());
}
diff --git a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
index 64e8d79..a21efce 100644
--- a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
@@ -33,7 +33,9 @@ STATISTIC(NumMerged, "Number of global constants merged");
namespace {
struct ConstantMerge : public ModulePass {
static char ID; // Pass identification, replacement for typeid
- ConstantMerge() : ModulePass(ID) {}
+ ConstantMerge() : ModulePass(ID) {
+ initializeConstantMergePass(*PassRegistry::getPassRegistry());
+ }
// run - For this pass, process all of the globals in the module,
// eliminating duplicate constants.
@@ -44,7 +46,7 @@ namespace {
char ConstantMerge::ID = 0;
INITIALIZE_PASS(ConstantMerge, "constmerge",
- "Merge Duplicate Global Constants", false, false);
+ "Merge Duplicate Global Constants", false, false)
ModulePass *llvm::createConstantMergePass() { return new ConstantMerge(); }
@@ -63,6 +65,18 @@ static void FindUsedValues(GlobalVariable *LLVMUsed,
UsedValues.insert(GV);
}
+// True if A is better than B.
+static bool IsBetterCannonical(const GlobalVariable &A,
+ const GlobalVariable &B) {
+ if (!A.hasLocalLinkage() && B.hasLocalLinkage())
+ return true;
+
+ if (A.hasLocalLinkage() && !B.hasLocalLinkage())
+ return false;
+
+ return A.hasUnnamedAddr();
+}
+
bool ConstantMerge::runOnModule(Module &M) {
// Find all the globals that are marked "used". These cannot be merged.
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
@@ -83,44 +97,76 @@ bool ConstantMerge::runOnModule(Module &M) {
// second level constants have initializers which point to the globals that
// were just merged.
while (1) {
- // First pass: identify all globals that can be merged together, filling in
- // the Replacements vector. We cannot do the replacement in this pass
- // because doing so may cause initializers of other globals to be rewritten,
- // invalidating the Constant* pointers in CMap.
- //
+
+ // First: Find the canonical constants others will be merged with.
for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
GVI != E; ) {
GlobalVariable *GV = GVI++;
-
+
// If this GV is dead, remove it.
GV->removeDeadConstantUsers();
if (GV->use_empty() && GV->hasLocalLinkage()) {
GV->eraseFromParent();
continue;
}
-
- // Only process constants with initializers in the default addres space.
- if (!GV->isConstant() ||!GV->hasDefinitiveInitializer() ||
- GV->getType()->getAddressSpace() != 0 || !GV->getSection().empty() ||
+
+ // Only process constants with initializers in the default address space.
+ if (!GV->isConstant() || !GV->hasDefinitiveInitializer() ||
+ GV->getType()->getAddressSpace() != 0 || GV->hasSection() ||
// Don't touch values marked with attribute(used).
UsedGlobals.count(GV))
continue;
-
-
-
+
Constant *Init = GV->getInitializer();
// Check to see if the initializer is already known.
GlobalVariable *&Slot = CMap[Init];
- if (Slot == 0) { // Nope, add it to the map.
+ // If this is the first constant we find or if the old on is local,
+ // replace with the current one. It the current is externally visible
+ // it cannot be replace, but can be the canonical constant we merge with.
+ if (Slot == 0 || IsBetterCannonical(*GV, *Slot)) {
Slot = GV;
- } else if (GV->hasLocalLinkage()) { // Yup, this is a duplicate!
- // Make all uses of the duplicate constant use the canonical version.
- Replacements.push_back(std::make_pair(GV, Slot));
}
}
+ // Second: identify all globals that can be merged together, filling in
+ // the Replacements vector. We cannot do the replacement in this pass
+ // because doing so may cause initializers of other globals to be rewritten,
+ // invalidating the Constant* pointers in CMap.
+ for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
+ GVI != E; ) {
+ GlobalVariable *GV = GVI++;
+
+ // Only process constants with initializers in the default address space.
+ if (!GV->isConstant() || !GV->hasDefinitiveInitializer() ||
+ GV->getType()->getAddressSpace() != 0 || GV->hasSection() ||
+ // Don't touch values marked with attribute(used).
+ UsedGlobals.count(GV))
+ continue;
+
+ // We can only replace constant with local linkage.
+ if (!GV->hasLocalLinkage())
+ continue;
+
+ Constant *Init = GV->getInitializer();
+
+ // Check to see if the initializer is already known.
+ GlobalVariable *Slot = CMap[Init];
+
+ if (!Slot || Slot == GV)
+ continue;
+
+ if (!Slot->hasUnnamedAddr() && !GV->hasUnnamedAddr())
+ continue;
+
+ if (!GV->hasUnnamedAddr())
+ Slot->setUnnamedAddr(false);
+
+ // Make all uses of the duplicate constant use the canonical version.
+ Replacements.push_back(std::make_pair(GV, Slot));
+ }
+
if (Replacements.empty())
return MadeChange;
CMap.clear();
@@ -133,6 +179,8 @@ bool ConstantMerge::runOnModule(Module &M) {
Replacements[i].first->replaceAllUsesWith(Replacements[i].second);
// Delete the global value from the module.
+ assert(Replacements[i].first->hasLocalLinkage() &&
+ "Refusing to delete an externally visible global variable.");
Replacements[i].first->eraseFromParent();
}
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 47df235..b423221 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -39,7 +39,8 @@ using namespace llvm;
STATISTIC(NumArgumentsEliminated, "Number of unread args removed");
STATISTIC(NumRetValsEliminated , "Number of unused return values removed");
-
+STATISTIC(NumArgumentsReplacedWithUndef,
+ "Number of unread args replaced with undef");
namespace {
/// DAE - The dead argument elimination pass.
///
@@ -126,7 +127,9 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- DAE() : ModulePass(ID) {}
+ DAE() : ModulePass(ID) {
+ initializeDAEPass(*PassRegistry::getPassRegistry());
+ }
bool runOnModule(Module &M);
@@ -146,12 +149,13 @@ namespace {
void PropagateLiveness(const RetOrArg &RA);
bool RemoveDeadStuffFromFunction(Function *F);
bool DeleteDeadVarargs(Function &Fn);
+ bool RemoveDeadArgumentsFromCallers(Function &Fn);
};
}
char DAE::ID = 0;
-INITIALIZE_PASS(DAE, "deadargelim", "Dead Argument Elimination", false, false);
+INITIALIZE_PASS(DAE, "deadargelim", "Dead Argument Elimination", false, false)
namespace {
/// DAH - DeadArgumentHacking pass - Same as dead argument elimination, but
@@ -168,7 +172,7 @@ namespace {
char DAH::ID = 0;
INITIALIZE_PASS(DAH, "deadarghaX0r",
"Dead Argument Hacking (BUGPOINT USE ONLY; DO NOT USE)",
- false, false);
+ false, false)
/// createDeadArgEliminationPass - This pass removes arguments from functions
/// which are not used by the body of the function.
@@ -285,6 +289,55 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
return true;
}
+/// RemoveDeadArgumentsFromCallers - Checks if the given function has any
+/// arguments that are unused, and changes the caller parameters to be undefined
+/// instead.
+bool DAE::RemoveDeadArgumentsFromCallers(Function &Fn)
+{
+ if (Fn.isDeclaration())
+ return false;
+
+ // Functions with local linkage should already have been handled.
+ if (Fn.hasLocalLinkage())
+ return false;
+
+ if (Fn.use_empty())
+ return false;
+
+ llvm::SmallVector<unsigned, 8> UnusedArgs;
+ for (Function::arg_iterator I = Fn.arg_begin(), E = Fn.arg_end();
+ I != E; ++I) {
+ Argument *Arg = I;
+
+ if (Arg->use_empty() && !Arg->hasByValAttr())
+ UnusedArgs.push_back(Arg->getArgNo());
+ }
+
+ if (UnusedArgs.empty())
+ return false;
+
+ bool Changed = false;
+
+ for (Function::use_iterator I = Fn.use_begin(), E = Fn.use_end();
+ I != E; ++I) {
+ CallSite CS(*I);
+ if (!CS || !CS.isCallee(I))
+ continue;
+
+ // Now go through all unused args and replace them with "undef".
+ for (unsigned I = 0, E = UnusedArgs.size(); I != E; ++I) {
+ unsigned ArgNo = UnusedArgs[I];
+
+ Value *Arg = CS.getArgument(ArgNo);
+ CS.setArgument(ArgNo, UndefValue::get(Arg->getType()));
+ ++NumArgumentsReplacedWithUndef;
+ Changed = true;
+ }
+ }
+
+ return Changed;
+}
+
/// Convenience function that returns the number of return values. It returns 0
/// for void functions and 1 for functions not returning a struct. It returns
/// the number of struct elements for functions returning a struct.
@@ -791,7 +844,8 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
} else if (New->getType()->isVoidTy()) {
// Our return value has uses, but they will get removed later on.
// Replace by null for now.
- Call->replaceAllUsesWith(Constant::getNullValue(Call->getType()));
+ if (!Call->getType()->isX86_MMXTy())
+ Call->replaceAllUsesWith(Constant::getNullValue(Call->getType()));
} else {
assert(RetTy->isStructTy() &&
"Return type changed, but not into a void. The old return type"
@@ -854,7 +908,8 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
} else {
// If this argument is dead, replace any uses of it with null constants
// (these are guaranteed to become unused later on).
- I->replaceAllUsesWith(Constant::getNullValue(I->getType()));
+ if (!I->getType()->isX86_MMXTy())
+ I->replaceAllUsesWith(Constant::getNullValue(I->getType()));
}
// If we change the return value of the function we must rewrite any return
@@ -935,5 +990,14 @@ bool DAE::runOnModule(Module &M) {
Function *F = I++;
Changed |= RemoveDeadStuffFromFunction(F);
}
+
+ // Finally, look for any unused parameters in functions with non-local
+ // linkage and replace the passed in parameters with undef.
+ for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ Function& F = *I;
+
+ Changed |= RemoveDeadArgumentsFromCallers(F);
+ }
+
return Changed;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadTypeElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadTypeElimination.cpp
index 5dc50c5..a509931 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadTypeElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadTypeElimination.cpp
@@ -26,7 +26,9 @@ STATISTIC(NumKilled, "Number of unused typenames removed from symtab");
namespace {
struct DTE : public ModulePass {
static char ID; // Pass identification, replacement for typeid
- DTE() : ModulePass(ID) {}
+ DTE() : ModulePass(ID) {
+ initializeDTEPass(*PassRegistry::getPassRegistry());
+ }
// doPassInitialization - For this pass, it removes global symbol table
// entries for primitive types. These are never used for linking in GCC and
@@ -45,7 +47,10 @@ namespace {
}
char DTE::ID = 0;
-INITIALIZE_PASS(DTE, "deadtypeelim", "Dead Type Elimination", false, false);
+INITIALIZE_PASS_BEGIN(DTE, "deadtypeelim", "Dead Type Elimination",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(FindUsedTypes)
+INITIALIZE_PASS_END(DTE, "deadtypeelim", "Dead Type Elimination", false, false)
ModulePass *llvm::createDeadTypeEliminationPass() {
return new DTE();
diff --git a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
index 45c5fe7..9d432de 100644
--- a/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ExtractGV.cpp
@@ -50,24 +50,22 @@ namespace {
// Visit the GlobalVariables.
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- if (!I->isDeclaration()) {
- if (I->hasLocalLinkage())
- I->setVisibility(GlobalValue::HiddenVisibility);
- I->setLinkage(GlobalValue::ExternalLinkage);
- if (deleteStuff == Named.count(I))
- I->setInitializer(0);
- }
+ I != E; ++I) {
+ if (I->hasLocalLinkage())
+ I->setVisibility(GlobalValue::HiddenVisibility);
+ I->setLinkage(GlobalValue::ExternalLinkage);
+ if (deleteStuff == (bool)Named.count(I) && !I->isDeclaration())
+ I->setInitializer(0);
+ }
// Visit the Functions.
- for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
- if (!I->isDeclaration()) {
- if (I->hasLocalLinkage())
- I->setVisibility(GlobalValue::HiddenVisibility);
- I->setLinkage(GlobalValue::ExternalLinkage);
- if (deleteStuff == Named.count(I))
- I->deleteBody();
- }
+ for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ if (I->hasLocalLinkage())
+ I->setVisibility(GlobalValue::HiddenVisibility);
+ I->setLinkage(GlobalValue::ExternalLinkage);
+ if (deleteStuff == (bool)Named.count(I) && !I->isDeclaration())
+ I->deleteBody();
+ }
return true;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 6165ba0..95decec 100644
--- a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -23,10 +23,10 @@
#include "llvm/CallGraphSCCPass.h"
#include "llvm/GlobalVariable.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CaptureTracking.h"
-#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/UniqueVector.h"
@@ -41,7 +41,9 @@ STATISTIC(NumNoAlias, "Number of function returns marked noalias");
namespace {
struct FunctionAttrs : public CallGraphSCCPass {
static char ID; // Pass identification, replacement for typeid
- FunctionAttrs() : CallGraphSCCPass(ID) {}
+ FunctionAttrs() : CallGraphSCCPass(ID), AA(0) {
+ initializeFunctionAttrsPass(*PassRegistry::getPassRegistry());
+ }
// runOnSCC - Analyze the SCC, performing the transformation if possible.
bool runOnSCC(CallGraphSCC &SCC);
@@ -61,67 +63,25 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
+ AU.addRequired<AliasAnalysis>();
CallGraphSCCPass::getAnalysisUsage(AU);
}
- bool PointsToLocalMemory(Value *V);
+ private:
+ AliasAnalysis *AA;
};
}
char FunctionAttrs::ID = 0;
-INITIALIZE_PASS(FunctionAttrs, "functionattrs",
- "Deduce function attributes", false, false);
+INITIALIZE_PASS_BEGIN(FunctionAttrs, "functionattrs",
+ "Deduce function attributes", false, false)
+INITIALIZE_AG_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_END(FunctionAttrs, "functionattrs",
+ "Deduce function attributes", false, false)
Pass *llvm::createFunctionAttrsPass() { return new FunctionAttrs(); }
-/// PointsToLocalMemory - Returns whether the given pointer value points to
-/// memory that is local to the function. Global constants are considered
-/// local to all functions.
-bool FunctionAttrs::PointsToLocalMemory(Value *V) {
- SmallVector<Value*, 16> Worklist;
- unsigned MaxLookup = 8;
-
- Worklist.push_back(V);
-
- do {
- V = Worklist.pop_back_val()->getUnderlyingObject();
-
- // An alloca instruction defines local memory.
- if (isa<AllocaInst>(V))
- continue;
-
- // A global constant counts as local memory for our purposes.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
- if (!GV->isConstant())
- return false;
- continue;
- }
-
- // If both select values point to local memory, then so does the select.
- if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
- Worklist.push_back(SI->getTrueValue());
- Worklist.push_back(SI->getFalseValue());
- continue;
- }
-
- // If all values incoming to a phi node point to local memory, then so does
- // the phi.
- if (PHINode *PN = dyn_cast<PHINode>(V)) {
- // Don't bother inspecting phi nodes with many operands.
- if (PN->getNumIncomingValues() > MaxLookup)
- return false;
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- Worklist.push_back(PN->getIncomingValue(i));
- continue;
- }
-
- return false;
- } while (!Worklist.empty() && --MaxLookup);
-
- return Worklist.empty();
-}
-
/// AddReadAttrs - Deduce readonly/readnone attributes for the SCC.
bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
SmallPtrSet<Function*, 8> SCCNodes;
@@ -141,14 +101,15 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
// External node - may write memory. Just give up.
return false;
- if (F->doesNotAccessMemory())
+ AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(F);
+ if (MRB == AliasAnalysis::DoesNotAccessMemory)
// Already perfect!
continue;
// Definitions with weak linkage may be overridden at linktime with
// something that writes memory, so treat them like declarations.
if (F->isDeclaration() || F->mayBeOverridden()) {
- if (!F->onlyReadsMemory())
+ if (!AliasAnalysis::onlyReadsMemory(MRB))
// May write memory. Just give up.
return false;
@@ -163,32 +124,62 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
// Some instructions can be ignored even if they read or write memory.
// Detect these now, skipping to the next instruction if one is found.
CallSite CS(cast<Value>(I));
- if (CS && CS.getCalledFunction()) {
+ if (CS) {
// Ignore calls to functions in the same SCC.
- if (SCCNodes.count(CS.getCalledFunction()))
+ if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction()))
continue;
- // Ignore intrinsics that only access local memory.
- if (unsigned id = CS.getCalledFunction()->getIntrinsicID())
- if (AliasAnalysis::getIntrinsicModRefBehavior(id) ==
- AliasAnalysis::AccessesArguments) {
- // Check that all pointer arguments point to local memory.
+ AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(CS);
+ // If the call doesn't access arbitrary memory, we may be able to
+ // figure out something.
+ if (AliasAnalysis::onlyAccessesArgPointees(MRB)) {
+ // If the call does access argument pointees, check each argument.
+ if (AliasAnalysis::doesAccessArgPointees(MRB))
+ // Check whether all pointer arguments point to local memory, and
+ // ignore calls that only access local memory.
for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
CI != CE; ++CI) {
Value *Arg = *CI;
- if (Arg->getType()->isPointerTy() && !PointsToLocalMemory(Arg))
- // Writes memory. Just give up.
- return false;
+ if (Arg->getType()->isPointerTy()) {
+ AliasAnalysis::Location Loc(Arg,
+ AliasAnalysis::UnknownSize,
+ I->getMetadata(LLVMContext::MD_tbaa));
+ if (!AA->pointsToConstantMemory(Loc, /*OrLocal=*/true)) {
+ if (MRB & AliasAnalysis::Mod)
+ // Writes non-local memory. Give up.
+ return false;
+ if (MRB & AliasAnalysis::Ref)
+ // Ok, it reads non-local memory.
+ ReadsMemory = true;
+ }
+ }
}
- // Only reads and writes local memory.
- continue;
- }
- } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- // Ignore loads from local memory.
- if (PointsToLocalMemory(LI->getPointerOperand()))
continue;
+ }
+ // The call could access any memory. If that includes writes, give up.
+ if (MRB & AliasAnalysis::Mod)
+ return false;
+ // If it reads, note it.
+ if (MRB & AliasAnalysis::Ref)
+ ReadsMemory = true;
+ continue;
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ // Ignore non-volatile loads from local memory.
+ if (!LI->isVolatile()) {
+ AliasAnalysis::Location Loc = AA->getLocation(LI);
+ if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true))
+ continue;
+ }
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
- // Ignore stores to local memory.
- if (PointsToLocalMemory(SI->getPointerOperand()))
+ // Ignore non-volatile stores to local memory.
+ if (!SI->isVolatile()) {
+ AliasAnalysis::Location Loc = AA->getLocation(SI);
+ if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true))
+ continue;
+ }
+ } else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) {
+ // Ignore vaargs on local memory.
+ AliasAnalysis::Location Loc = AA->getLocation(VI);
+ if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true))
continue;
}
@@ -198,10 +189,6 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
// Writes memory. Just give up.
return false;
- if (isMalloc(I))
- // malloc claims not to write memory! PR3754.
- return false;
-
// If this instruction may read memory, remember that.
ReadsMemory |= I->mayReadFromMemory();
}
@@ -384,6 +371,8 @@ bool FunctionAttrs::AddNoAliasAttrs(const CallGraphSCC &SCC) {
}
bool FunctionAttrs::runOnSCC(CallGraphSCC &SCC) {
+ AA = &getAnalysis<AliasAnalysis>();
+
bool Changed = AddReadAttrs(SCC);
Changed |= AddNoCaptureAttrs(SCC);
Changed |= AddNoAliasAttrs(SCC);
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
index aa18601..2b427aa 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalDCE.cpp
@@ -31,7 +31,9 @@ STATISTIC(NumVariables, "Number of global variables removed");
namespace {
struct GlobalDCE : public ModulePass {
static char ID; // Pass identification, replacement for typeid
- GlobalDCE() : ModulePass(ID) {}
+ GlobalDCE() : ModulePass(ID) {
+ initializeGlobalDCEPass(*PassRegistry::getPassRegistry());
+ }
// run - Do the GlobalDCE pass on the specified module, optionally updating
// the specified callgraph to reflect the changes.
@@ -52,7 +54,7 @@ namespace {
char GlobalDCE::ID = 0;
INITIALIZE_PASS(GlobalDCE, "globaldce",
- "Dead Global Elimination", false, false);
+ "Dead Global Elimination", false, false)
ModulePass *llvm::createGlobalDCEPass() { return new GlobalDCE(); }
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index a77af54..d4cb712 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -40,6 +40,7 @@
using namespace llvm;
STATISTIC(NumMarked , "Number of globals marked constant");
+STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
@@ -55,11 +56,14 @@ STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
namespace {
+ struct GlobalStatus;
struct GlobalOpt : public ModulePass {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
}
static char ID; // Pass identification, replacement for typeid
- GlobalOpt() : ModulePass(ID) {}
+ GlobalOpt() : ModulePass(ID) {
+ initializeGlobalOptPass(*PassRegistry::getPassRegistry());
+ }
bool runOnModule(Module &M);
@@ -69,13 +73,16 @@ namespace {
bool OptimizeGlobalVars(Module &M);
bool OptimizeGlobalAliases(Module &M);
bool OptimizeGlobalCtorsList(GlobalVariable *&GCL);
- bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI);
+ bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI);
+ bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI,
+ const SmallPtrSet<const PHINode*, 16> &PHIUsers,
+ const GlobalStatus &GS);
};
}
char GlobalOpt::ID = 0;
INITIALIZE_PASS(GlobalOpt, "globalopt",
- "Global Variable Optimizer", false, false);
+ "Global Variable Optimizer", false, false)
ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
@@ -85,6 +92,9 @@ namespace {
/// about it. If we find out that the address of the global is taken, none of
/// this info will be accurate.
struct GlobalStatus {
+ /// isCompared - True if the global's address is used in a comparison.
+ bool isCompared;
+
/// isLoaded - True if the global is ever loaded. If the global isn't ever
/// loaded it can be deleted.
bool isLoaded;
@@ -129,10 +139,11 @@ struct GlobalStatus {
/// HasPHIUser - Set to true if this global has a user that is a PHI node.
bool HasPHIUser;
-
- GlobalStatus() : isLoaded(false), StoredType(NotStored), StoredOnceValue(0),
- AccessingFunction(0), HasMultipleAccessingFunctions(false),
- HasNonInstructionUser(false), HasPHIUser(false) {}
+
+ GlobalStatus() : isCompared(false), isLoaded(false), StoredType(NotStored),
+ StoredOnceValue(0), AccessingFunction(0),
+ HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
+ HasPHIUser(false) {}
};
}
@@ -165,6 +176,11 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
const User *U = *UI;
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
GS.HasNonInstructionUser = true;
+
+ // If the result of the constantexpr isn't pointer type, then we won't
+ // know to expect it in various places. Just reject early.
+ if (!isa<PointerType>(CE->getType())) return true;
+
if (AnalyzeGlobal(CE, GS, PHIUsers)) return true;
} else if (const Instruction *I = dyn_cast<Instruction>(U)) {
if (!GS.HasMultipleAccessingFunctions) {
@@ -221,7 +237,7 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
GS.HasPHIUser = true;
} else if (isa<CmpInst>(I)) {
- // Nothing to analyse.
+ GS.isCompared = true;
} else if (isa<MemTransferInst>(I)) {
const MemTransferInst *MTI = cast<MemTransferInst>(I);
if (MTI->getArgOperand(0) == V)
@@ -308,7 +324,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
if (Init)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
Changed |= CleanupConstantGlobalUsers(CE, SubInit);
- } else if (CE->getOpcode() == Instruction::BitCast &&
+ } else if (CE->getOpcode() == Instruction::BitCast &&
CE->getType()->isPointerTy()) {
// Pointer cast, delete any stores and memsets to the global.
Changed |= CleanupConstantGlobalUsers(CE, 0);
@@ -324,7 +340,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
// and will invalidate our notion of what Init is.
Constant *SubInit = 0;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
- ConstantExpr *CE =
+ ConstantExpr *CE =
dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP));
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
@@ -361,7 +377,7 @@ static bool isSafeSROAElementUse(Value *V) {
// We might have a dead and dangling constant hanging off of here.
if (Constant *C = dyn_cast<Constant>(V))
return SafeToDestroyConstant(C);
-
+
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
@@ -371,15 +387,15 @@ static bool isSafeSROAElementUse(Value *V) {
// Stores *to* the pointer are ok.
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getOperand(0) != V;
-
+
// Otherwise, it must be a GEP.
GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
if (GEPI == 0) return false;
-
+
if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
!cast<Constant>(GEPI->getOperand(1))->isNullValue())
return false;
-
+
for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end();
I != E; ++I)
if (!isSafeSROAElementUse(*I))
@@ -393,11 +409,11 @@ static bool isSafeSROAElementUse(Value *V) {
///
static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
// The user of the global must be a GEP Inst or a ConstantExpr GEP.
- if (!isa<GetElementPtrInst>(U) &&
- (!isa<ConstantExpr>(U) ||
+ if (!isa<GetElementPtrInst>(U) &&
+ (!isa<ConstantExpr>(U) ||
cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
return false;
-
+
// Check to see if this ConstantExpr GEP is SRA'able. In particular, we
// don't like < 3 operand CE's, and we don't like non-constant integer
// indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
@@ -409,18 +425,18 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
++GEPI; // Skip over the pointer index.
-
+
// If this is a use of an array allocation, do a bit more checking for sanity.
if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
uint64_t NumElements = AT->getNumElements();
ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
-
+
// Check to make sure that index falls within the array. If not,
// something funny is going on, so we won't do the optimization.
//
if (Idx->getZExtValue() >= NumElements)
return false;
-
+
// We cannot scalar repl this level of the array unless any array
// sub-indices are in-range constants. In particular, consider:
// A[0][i]. We cannot know that the user isn't doing invalid things like
@@ -441,7 +457,7 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
"Indexed GEP type is not array, vector, or struct!");
continue;
}
-
+
ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
return false;
@@ -465,7 +481,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
}
return true;
}
-
+
/// SRAGlobal - Perform scalar replacement of aggregates on the specified global
/// variable. This opens the door for other optimizations by exposing the
@@ -476,7 +492,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
// Make sure this global only has simple uses that we can SRA.
if (!GlobalUsersSafeToSRA(GV))
return 0;
-
+
assert(GV->hasLocalLinkage() && !GV->isConstant());
Constant *Init = GV->getInitializer();
const Type *Ty = Init->getType();
@@ -488,7 +504,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
unsigned StartAlignment = GV->getAlignment();
if (StartAlignment == 0)
StartAlignment = TD.getABITypeAlignment(GV->getType());
-
+
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
NewGlobals.reserve(STy->getNumElements());
const StructLayout &Layout = *TD.getStructLayout(STy);
@@ -503,7 +519,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
GV->getType()->getAddressSpace());
Globals.insert(GV, NGV);
NewGlobals.push_back(NGV);
-
+
// Calculate the known alignment of the field. If the original aggregate
// had 256 byte alignment for example, something might depend on that:
// propagate info to each field.
@@ -522,7 +538,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
if (NumElements > 16 && GV->hasNUsesOrMore(16))
return 0; // It's not worth it.
NewGlobals.reserve(NumElements);
-
+
uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
for (unsigned i = 0, e = NumElements; i != e; ++i) {
@@ -537,7 +553,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
GV->getType()->getAddressSpace());
Globals.insert(GV, NGV);
NewGlobals.push_back(NGV);
-
+
// Calculate the known alignment of the field. If the original aggregate
// had 256 byte alignment for example, something might depend on that:
// propagate info to each field.
@@ -549,7 +565,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
if (NewGlobals.empty())
return 0;
-
+
DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV);
Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
@@ -615,7 +631,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
}
/// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
-/// value will trap if the value is dynamically null. PHIs keeps track of any
+/// value will trap if the value is dynamically null. PHIs keeps track of any
/// phi nodes we've seen to avoid reprocessing them.
static bool AllUsesOfValueWillTrapIfNull(const Value *V,
SmallPtrSet<const PHINode*, 8> &PHIs) {
@@ -757,7 +773,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) {
// Keep track of whether we are able to remove all the uses of the global
// other than the store that defines it.
bool AllNonStoreUsesGone = true;
-
+
// Replace all uses of loads with uses of uses of the stored value.
for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){
User *GlobalUser = *GUI++;
@@ -830,7 +846,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
ConstantInt *NElements,
TargetData* TD) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
-
+
const Type *GlobalType;
if (NElements->getZExtValue() == 1)
GlobalType = AllocTy;
@@ -840,14 +856,14 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// Create the new global variable. The contents of the malloc'd memory is
// undefined, so initialize with an undef value.
- GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
+ GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
GlobalType, false,
GlobalValue::InternalLinkage,
UndefValue::get(GlobalType),
GV->getName()+".body",
GV,
GV->isThreadLocal());
-
+
// If there are bitcast users of the malloc (which is typical, usually we have
// a malloc + bitcast) then replace them with uses of the new global. Update
// other users to use the global as well.
@@ -867,10 +883,10 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
User->replaceUsesOfWith(CI, TheBC);
}
}
-
+
Constant *RepValue = NewGV;
if (NewGV->getType() != GV->getType()->getElementType())
- RepValue = ConstantExpr::getBitCast(RepValue,
+ RepValue = ConstantExpr::getBitCast(RepValue,
GV->getType()->getElementType());
// If there is a comparison against null, we will insert a global bool to
@@ -890,7 +906,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
SI->eraseFromParent();
continue;
}
-
+
LoadInst *LI = cast<LoadInst>(GV->use_back());
while (!LI->use_empty()) {
Use &LoadUse = LI->use_begin().getUse();
@@ -898,7 +914,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
LoadUse = RepValue;
continue;
}
-
+
ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
// Replace the cmp X, 0 with a use of the bool value.
Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", ICI);
@@ -963,20 +979,20 @@ static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
continue; // Fine, ignore.
}
-
+
if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
return false; // Storing the pointer itself... bad.
continue; // Otherwise, storing through it, or storing into GV... fine.
}
-
+
// Must index into the array and into the struct.
if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
return false;
continue;
}
-
+
if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
// PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
// cycles.
@@ -985,13 +1001,13 @@ static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
return false;
continue;
}
-
+
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
return false;
continue;
}
-
+
return false;
}
return true;
@@ -1000,9 +1016,9 @@ static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
/// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV
/// somewhere. Transform all uses of the allocation into loads from the
/// global and uses of the resultant pointer. Further, delete the store into
-/// GV. This assumes that these value pass the
+/// GV. This assumes that these value pass the
/// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
-static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
+static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
GlobalVariable *GV) {
while (!Alloc->use_empty()) {
Instruction *U = cast<Instruction>(*Alloc->use_begin());
@@ -1035,7 +1051,7 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
continue;
}
}
-
+
// Insert a load from the global, and use it instead of the malloc.
Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
U->replaceUsesOfWith(Alloc, NL);
@@ -1053,24 +1069,24 @@ static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
++UI) {
const Instruction *User = cast<Instruction>(*UI);
-
+
// Comparison against null is ok.
if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
return false;
continue;
}
-
+
// getelementptr is also ok, but only a simple form.
if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
// Must index into the array and into the struct.
if (GEPI->getNumOperands() < 3)
return false;
-
+
// Otherwise the GEP is ok.
continue;
}
-
+
if (const PHINode *PN = dyn_cast<PHINode>(User)) {
if (!LoadUsingPHIsPerLoad.insert(PN))
// This means some phi nodes are dependent on each other.
@@ -1079,19 +1095,19 @@ static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
if (!LoadUsingPHIs.insert(PN))
// If we have already analyzed this PHI, then it is safe.
continue;
-
+
// Make sure all uses of the PHI are simple enough to transform.
if (!LoadUsesSimpleEnoughForHeapSRA(PN,
LoadUsingPHIs, LoadUsingPHIsPerLoad))
return false;
-
+
continue;
}
-
+
// Otherwise we don't know what this is, not ok.
return false;
}
-
+
return true;
}
@@ -1110,10 +1126,10 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
return false;
LoadUsingPHIsPerLoad.clear();
}
-
+
// If we reach here, we know that all uses of the loads and transitive uses
// (through PHI nodes) are simple enough to transform. However, we don't know
- // that all inputs the to the PHI nodes are in the same equivalence sets.
+ // that all inputs the to the PHI nodes are in the same equivalence sets.
// Check to verify that all operands of the PHIs are either PHIS that can be
// transformed, loads from GV, or MI itself.
for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin()
@@ -1121,29 +1137,29 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
const PHINode *PN = *I;
for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
Value *InVal = PN->getIncomingValue(op);
-
+
// PHI of the stored value itself is ok.
if (InVal == StoredVal) continue;
-
+
if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
// One of the PHIs in our set is (optimistically) ok.
if (LoadUsingPHIs.count(InPN))
continue;
return false;
}
-
+
// Load from GV is ok.
if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
if (LI->getOperand(0) == GV)
continue;
-
+
// UNDEF? NULL?
-
+
// Anything else is rejected.
return false;
}
}
-
+
return true;
}
@@ -1151,15 +1167,15 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
-
+
if (FieldNo >= FieldVals.size())
FieldVals.resize(FieldNo+1);
-
+
// If we already have this value, just reuse the previously scalarized
// version.
if (Value *FieldVal = FieldVals[FieldNo])
return FieldVal;
-
+
// Depending on what instruction this is, we have several cases.
Value *Result;
if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
@@ -1172,9 +1188,9 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
} else if (PHINode *PN = dyn_cast<PHINode>(V)) {
// PN's type is pointer to struct. Make a new PHI of pointer to struct
// field.
- const StructType *ST =
+ const StructType *ST =
cast<StructType>(cast<PointerType>(PN->getType())->getElementType());
-
+
Result =
PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)),
PN->getName()+".f"+Twine(FieldNo), PN);
@@ -1183,13 +1199,13 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
llvm_unreachable("Unknown usable value");
Result = 0;
}
-
+
return FieldVals[FieldNo] = Result;
}
/// RewriteHeapSROALoadUser - Given a load instruction and a value derived from
/// the load, rewrite the derived value to use the HeapSRoA'd load.
-static void RewriteHeapSROALoadUser(Instruction *LoadUser,
+static void RewriteHeapSROALoadUser(Instruction *LoadUser,
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
// If this is a comparison against null, handle it.
@@ -1199,30 +1215,30 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
// field.
Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
InsertedScalarizedValues, PHIsToRewrite);
-
+
Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
- Constant::getNullValue(NPtr->getType()),
+ Constant::getNullValue(NPtr->getType()),
SCI->getName());
SCI->replaceAllUsesWith(New);
SCI->eraseFromParent();
return;
}
-
+
// Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
&& "Unexpected GEPI!");
-
+
// Load the pointer for this field.
unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
InsertedScalarizedValues, PHIsToRewrite);
-
+
// Create the new GEP idx vector.
SmallVector<Value*, 8> GEPIdx;
GEPIdx.push_back(GEPI->getOperand(1));
GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
-
+
Value *NGEPI = GetElementPtrInst::Create(NewPtr,
GEPIdx.begin(), GEPIdx.end(),
GEPI->getName(), GEPI);
@@ -1243,7 +1259,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
tie(InsertPos, Inserted) =
InsertedScalarizedValues.insert(std::make_pair(PN, std::vector<Value*>()));
if (!Inserted) return;
-
+
// If this is the first time we've seen this PHI, recursively process all
// users.
for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) {
@@ -1256,7 +1272,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
/// is a value loaded from the global. Eliminate all uses of Ptr, making them
/// use FieldGlobals instead. All uses of loaded values satisfy
/// AllGlobalLoadUsesSimpleEnoughForHeapSRA.
-static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
+static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
@@ -1264,7 +1280,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
Instruction *User = cast<Instruction>(*UI++);
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
}
-
+
if (Load->use_empty()) {
Load->eraseFromParent();
InsertedScalarizedValues.erase(Load);
@@ -1289,11 +1305,11 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// new mallocs at the same place as CI, and N globals.
std::vector<Value*> FieldGlobals;
std::vector<Value*> FieldMallocs;
-
+
for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
const Type *FieldTy = STy->getElementType(FieldNo);
const PointerType *PFieldTy = PointerType::getUnqual(FieldTy);
-
+
GlobalVariable *NGV =
new GlobalVariable(*GV->getParent(),
PFieldTy, false, GlobalValue::InternalLinkage,
@@ -1301,7 +1317,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
GV->getName() + ".f" + Twine(FieldNo), GV,
GV->isThreadLocal());
FieldGlobals.push_back(NGV);
-
+
unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
if (const StructType *ST = dyn_cast<StructType>(FieldTy))
TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
@@ -1313,7 +1329,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
FieldMallocs.push_back(NMI);
new StoreInst(NMI, NGV, CI);
}
-
+
// The tricky aspect of this transformation is handling the case when malloc
// fails. In the original code, malloc failing would set the result pointer
// of malloc to null. In this case, some mallocs could succeed and others
@@ -1340,23 +1356,23 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// Split the basic block at the old malloc.
BasicBlock *OrigBB = CI->getParent();
BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont");
-
+
// Create the block to check the first condition. Put all these blocks at the
// end of the function as they are unlikely to be executed.
BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
"malloc_ret_null",
OrigBB->getParent());
-
+
// Remove the uncond branch from OrigBB to ContBB, turning it into a cond
// branch on RunningOr.
OrigBB->getTerminator()->eraseFromParent();
BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
-
+
// Within the NullPtrBlock, we need to emit a comparison and branch for each
// pointer, because some may be null while others are not.
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
- Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
+ Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
Constant::getNullValue(GVVal->getType()),
"tmp");
BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
@@ -1371,10 +1387,10 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
FreeBlock);
BranchInst::Create(NextBlock, FreeBlock);
-
+
NullPtrBlock = NextBlock;
}
-
+
BranchInst::Create(ContBB, NullPtrBlock);
// CI is no longer needed, remove it.
@@ -1385,25 +1401,25 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
/// inserted for a given load.
DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
InsertedScalarizedValues[GV] = FieldGlobals;
-
+
std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
-
+
// Okay, the malloc site is completely handled. All of the uses of GV are now
// loads, and all uses of those loads are simple. Rewrite them to use loads
// of the per-field globals instead.
for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
-
+
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
continue;
}
-
+
// Must be a store of null.
StoreInst *SI = cast<StoreInst>(User);
assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
"Unexpected heap-sra user!");
-
+
// Insert a store of null into each global.
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
@@ -1430,7 +1446,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
}
}
-
+
// Drop all inter-phi links and any loads that made it this far.
for (DenseMap<Value*, std::vector<Value*> >::iterator
I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
@@ -1440,7 +1456,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
LI->dropAllReferences();
}
-
+
// Delete all the phis and loads now that inter-references are dead.
for (DenseMap<Value*, std::vector<Value*> >::iterator
I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
@@ -1450,7 +1466,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
LI->eraseFromParent();
}
-
+
// The old global is now dead, remove it.
GV->eraseFromParent();
@@ -1468,7 +1484,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
TargetData *TD) {
if (!TD)
return false;
-
+
// If this is a malloc of an abstract type, don't touch it.
if (!AllocTy->isSized())
return false;
@@ -1508,7 +1524,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD);
return true;
}
-
+
// If the allocation is an array of structures, consider transforming this
// into multiple malloc'd arrays, one for each field. This is basically
// SRoA for malloc'd memory.
@@ -1544,13 +1560,13 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CI = dyn_cast<BitCastInst>(Malloc) ?
extractMallocCallFromBitCast(Malloc) : cast<CallInst>(Malloc);
}
-
+
GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true),TD);
return true;
}
-
+
return false;
-}
+}
// OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge
// that only one value (besides its initializer) is ever stored to the global.
@@ -1568,7 +1584,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
GV->getInitializer()->isNullValue()) {
if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
if (GV->getInitializer()->getType() != SOVC->getType())
- SOVC =
+ SOVC =
ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
// Optimize away any trapping uses of the loaded value.
@@ -1576,7 +1592,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
return true;
} else if (CallInst *CI = extractMallocCall(StoredOnceVal)) {
const Type* MallocType = getMallocAllocatedType(CI);
- if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
+ if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
GVI, TD))
return true;
}
@@ -1591,7 +1607,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
/// whenever it is used. This exposes the values to other scalar optimizations.
static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
const Type *GVElType = GV->getType()->getElementType();
-
+
// If GVElType is already i1, it is already shrunk. If the type of the GV is
// an FP value, pointer or vector, don't do this optimization because a select
// between them is very expensive and unlikely to lead to later
@@ -1611,11 +1627,11 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
}
DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
-
+
// Create the new global, initializing it to false.
GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
false,
- GlobalValue::InternalLinkage,
+ GlobalValue::InternalLinkage,
ConstantInt::getFalse(GV->getContext()),
GV->getName()+".b",
GV->isThreadLocal());
@@ -1684,10 +1700,12 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
/// ProcessInternalGlobal - Analyze the specified global variable and optimize
/// it if possible. If we make a change, return true.
-bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
- Module::global_iterator &GVI) {
- SmallPtrSet<const PHINode*, 16> PHIUsers;
- GlobalStatus GS;
+bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
+ Module::global_iterator &GVI) {
+ if (!GV->hasLocalLinkage())
+ return false;
+
+ // Do more involved optimizations if the global is internal.
GV->removeDeadConstantUsers();
if (GV->use_empty()) {
@@ -1697,140 +1715,139 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
return true;
}
- if (!AnalyzeGlobal(GV, GS, PHIUsers)) {
-#if 0
- DEBUG(dbgs() << "Global: " << *GV);
- DEBUG(dbgs() << " isLoaded = " << GS.isLoaded << "\n");
- DEBUG(dbgs() << " StoredType = ");
- switch (GS.StoredType) {
- case GlobalStatus::NotStored: DEBUG(dbgs() << "NEVER STORED\n"); break;
- case GlobalStatus::isInitializerStored: DEBUG(dbgs() << "INIT STORED\n");
- break;
- case GlobalStatus::isStoredOnce: DEBUG(dbgs() << "STORED ONCE\n"); break;
- case GlobalStatus::isStored: DEBUG(dbgs() << "stored\n"); break;
- }
- if (GS.StoredType == GlobalStatus::isStoredOnce && GS.StoredOnceValue)
- DEBUG(dbgs() << " StoredOnceValue = " << *GS.StoredOnceValue << "\n");
- if (GS.AccessingFunction && !GS.HasMultipleAccessingFunctions)
- DEBUG(dbgs() << " AccessingFunction = "
- << GS.AccessingFunction->getName() << "\n");
- DEBUG(dbgs() << " HasMultipleAccessingFunctions = "
- << GS.HasMultipleAccessingFunctions << "\n");
- DEBUG(dbgs() << " HasNonInstructionUser = "
- << GS.HasNonInstructionUser<<"\n");
- DEBUG(dbgs() << "\n");
-#endif
-
- // If this is a first class global and has only one accessing function
- // and this function is main (which we know is not recursive we can make
- // this global a local variable) we replace the global with a local alloca
- // in this function.
- //
- // NOTE: It doesn't make sense to promote non single-value types since we
- // are just replacing static memory to stack memory.
- //
- // If the global is in different address space, don't bring it to stack.
- if (!GS.HasMultipleAccessingFunctions &&
- GS.AccessingFunction && !GS.HasNonInstructionUser &&
- GV->getType()->getElementType()->isSingleValueType() &&
- GS.AccessingFunction->getName() == "main" &&
- GS.AccessingFunction->hasExternalLinkage() &&
- GV->getType()->getAddressSpace() == 0) {
- DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
- Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction
- ->getEntryBlock().begin());
- const Type* ElemTy = GV->getType()->getElementType();
- // FIXME: Pass Global's alignment when globals have alignment
- AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
- if (!isa<UndefValue>(GV->getInitializer()))
- new StoreInst(GV->getInitializer(), Alloca, &FirstI);
-
- GV->replaceAllUsesWith(Alloca);
+ SmallPtrSet<const PHINode*, 16> PHIUsers;
+ GlobalStatus GS;
+
+ if (AnalyzeGlobal(GV, GS, PHIUsers))
+ return false;
+
+ if (!GS.isCompared && !GV->hasUnnamedAddr()) {
+ GV->setUnnamedAddr(true);
+ NumUnnamed++;
+ }
+
+ if (GV->isConstant() || !GV->hasInitializer())
+ return false;
+
+ return ProcessInternalGlobal(GV, GVI, PHIUsers, GS);
+}
+
+/// ProcessInternalGlobal - Analyze the specified global variable and optimize
+/// it if possible. If we make a change, return true.
+bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
+ Module::global_iterator &GVI,
+ const SmallPtrSet<const PHINode*, 16> &PHIUsers,
+ const GlobalStatus &GS) {
+ // If this is a first class global and has only one accessing function
+ // and this function is main (which we know is not recursive we can make
+ // this global a local variable) we replace the global with a local alloca
+ // in this function.
+ //
+ // NOTE: It doesn't make sense to promote non single-value types since we
+ // are just replacing static memory to stack memory.
+ //
+ // If the global is in different address space, don't bring it to stack.
+ if (!GS.HasMultipleAccessingFunctions &&
+ GS.AccessingFunction && !GS.HasNonInstructionUser &&
+ GV->getType()->getElementType()->isSingleValueType() &&
+ GS.AccessingFunction->getName() == "main" &&
+ GS.AccessingFunction->hasExternalLinkage() &&
+ GV->getType()->getAddressSpace() == 0) {
+ DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
+ Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction
+ ->getEntryBlock().begin());
+ const Type* ElemTy = GV->getType()->getElementType();
+ // FIXME: Pass Global's alignment when globals have alignment
+ AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
+ if (!isa<UndefValue>(GV->getInitializer()))
+ new StoreInst(GV->getInitializer(), Alloca, &FirstI);
+
+ GV->replaceAllUsesWith(Alloca);
+ GV->eraseFromParent();
+ ++NumLocalized;
+ return true;
+ }
+
+ // If the global is never loaded (but may be stored to), it is dead.
+ // Delete it now.
+ if (!GS.isLoaded) {
+ DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV);
+
+ // Delete any stores we can find to the global. We may not be able to
+ // make it completely dead though.
+ bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer());
+
+ // If the global is dead now, delete it.
+ if (GV->use_empty()) {
GV->eraseFromParent();
- ++NumLocalized;
- return true;
+ ++NumDeleted;
+ Changed = true;
}
-
- // If the global is never loaded (but may be stored to), it is dead.
- // Delete it now.
- if (!GS.isLoaded) {
- DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV);
-
- // Delete any stores we can find to the global. We may not be able to
- // make it completely dead though.
- bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer());
-
- // If the global is dead now, delete it.
- if (GV->use_empty()) {
- GV->eraseFromParent();
- ++NumDeleted;
- Changed = true;
- }
- return Changed;
+ return Changed;
- } else if (GS.StoredType <= GlobalStatus::isInitializerStored) {
- DEBUG(dbgs() << "MARKING CONSTANT: " << *GV);
- GV->setConstant(true);
+ } else if (GS.StoredType <= GlobalStatus::isInitializerStored) {
+ DEBUG(dbgs() << "MARKING CONSTANT: " << *GV);
+ GV->setConstant(true);
- // Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer());
+ // Clean up any obviously simplifiable users now.
+ CleanupConstantGlobalUsers(GV, GV->getInitializer());
- // If the global is dead now, just nuke it.
- if (GV->use_empty()) {
- DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
- << "all users and delete global!\n");
- GV->eraseFromParent();
- ++NumDeleted;
+ // If the global is dead now, just nuke it.
+ if (GV->use_empty()) {
+ DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
+ << "all users and delete global!\n");
+ GV->eraseFromParent();
+ ++NumDeleted;
+ }
+
+ ++NumMarked;
+ return true;
+ } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
+ if (TargetData *TD = getAnalysisIfAvailable<TargetData>())
+ if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
+ GVI = FirstNewGV; // Don't skip the newly produced globals!
+ return true;
+ }
+ } else if (GS.StoredType == GlobalStatus::isStoredOnce) {
+ // If the initial value for the global was an undef value, and if only
+ // one other value was stored into it, we can just change the
+ // initializer to be the stored value, then delete all stores to the
+ // global. This allows us to mark it constant.
+ if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
+ if (isa<UndefValue>(GV->getInitializer())) {
+ // Change the initial value here.
+ GV->setInitializer(SOVConstant);
+
+ // Clean up any obviously simplifiable users now.
+ CleanupConstantGlobalUsers(GV, GV->getInitializer());
+
+ if (GV->use_empty()) {
+ DEBUG(dbgs() << " *** Substituting initializer allowed us to "
+ << "simplify all users and delete global!\n");
+ GV->eraseFromParent();
+ ++NumDeleted;
+ } else {
+ GVI = GV;
+ }
+ ++NumSubstitute;
+ return true;
}
- ++NumMarked;
+ // Try to optimize globals based on the knowledge that only one value
+ // (besides its initializer) is ever stored to the global.
+ if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI,
+ getAnalysisIfAvailable<TargetData>()))
return true;
- } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
- if (TargetData *TD = getAnalysisIfAvailable<TargetData>())
- if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
- GVI = FirstNewGV; // Don't skip the newly produced globals!
- return true;
- }
- } else if (GS.StoredType == GlobalStatus::isStoredOnce) {
- // If the initial value for the global was an undef value, and if only
- // one other value was stored into it, we can just change the
- // initializer to be the stored value, then delete all stores to the
- // global. This allows us to mark it constant.
- if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
- if (isa<UndefValue>(GV->getInitializer())) {
- // Change the initial value here.
- GV->setInitializer(SOVConstant);
-
- // Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer());
-
- if (GV->use_empty()) {
- DEBUG(dbgs() << " *** Substituting initializer allowed us to "
- << "simplify all users and delete global!\n");
- GV->eraseFromParent();
- ++NumDeleted;
- } else {
- GVI = GV;
- }
- ++NumSubstitute;
- return true;
- }
- // Try to optimize globals based on the knowledge that only one value
- // (besides its initializer) is ever stored to the global.
- if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI,
- getAnalysisIfAvailable<TargetData>()))
+ // Otherwise, if the global was not a boolean, we can shrink it to be a
+ // boolean.
+ if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
+ if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
+ ++NumShrunkToBool;
return true;
-
- // Otherwise, if the global was not a boolean, we can shrink it to be a
- // boolean.
- if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
- if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
- ++NumShrunkToBool;
- return true;
- }
- }
+ }
}
+
return false;
}
@@ -1917,10 +1934,8 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
if (New && New != CE)
GV->setInitializer(New);
}
- // Do more involved optimizations if the global is internal.
- if (!GV->isConstant() && GV->hasLocalLinkage() &&
- GV->hasInitializer())
- Changed |= ProcessInternalGlobal(GV, GVI);
+
+ Changed |= ProcessGlobal(GV, GVI);
}
return Changed;
}
@@ -1928,46 +1943,47 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
/// FindGlobalCtors - Find the llvm.globalctors list, verifying that all
/// initializers have an init priority of 65535.
GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- if (I->getName() == "llvm.global_ctors") {
- // Found it, verify it's an array of { int, void()* }.
- const ArrayType *ATy =dyn_cast<ArrayType>(I->getType()->getElementType());
- if (!ATy) return 0;
- const StructType *STy = dyn_cast<StructType>(ATy->getElementType());
- if (!STy || STy->getNumElements() != 2 ||
- !STy->getElementType(0)->isIntegerTy(32)) return 0;
- const PointerType *PFTy = dyn_cast<PointerType>(STy->getElementType(1));
- if (!PFTy) return 0;
- const FunctionType *FTy = dyn_cast<FunctionType>(PFTy->getElementType());
- if (!FTy || !FTy->getReturnType()->isVoidTy() ||
- FTy->isVarArg() || FTy->getNumParams() != 0)
- return 0;
-
- // Verify that the initializer is simple enough for us to handle.
- if (!I->hasDefinitiveInitializer()) return 0;
- ConstantArray *CA = dyn_cast<ConstantArray>(I->getInitializer());
- if (!CA) return 0;
- for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i)
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(*i)) {
- if (isa<ConstantPointerNull>(CS->getOperand(1)))
- continue;
+ GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
+ if (GV == 0) return 0;
+
+ // Found it, verify it's an array of { int, void()* }.
+ const ArrayType *ATy =dyn_cast<ArrayType>(GV->getType()->getElementType());
+ if (!ATy) return 0;
+ const StructType *STy = dyn_cast<StructType>(ATy->getElementType());
+ if (!STy || STy->getNumElements() != 2 ||
+ !STy->getElementType(0)->isIntegerTy(32)) return 0;
+ const PointerType *PFTy = dyn_cast<PointerType>(STy->getElementType(1));
+ if (!PFTy) return 0;
+ const FunctionType *FTy = dyn_cast<FunctionType>(PFTy->getElementType());
+ if (!FTy || !FTy->getReturnType()->isVoidTy() ||
+ FTy->isVarArg() || FTy->getNumParams() != 0)
+ return 0;
- // Must have a function or null ptr.
- if (!isa<Function>(CS->getOperand(1)))
- return 0;
-
- // Init priority must be standard.
- ConstantInt *CI = dyn_cast<ConstantInt>(CS->getOperand(0));
- if (!CI || CI->getZExtValue() != 65535)
- return 0;
- } else {
- return 0;
- }
-
- return I;
- }
- return 0;
+ // Verify that the initializer is simple enough for us to handle. We are
+ // only allowed to optimize the initializer if it is unique.
+ if (!GV->hasUniqueInitializer()) return 0;
+
+ ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!CA) return 0;
+
+ for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(*i);
+ if (CS == 0) return 0;
+
+ if (isa<ConstantPointerNull>(CS->getOperand(1)))
+ continue;
+
+ // Must have a function or null ptr.
+ if (!isa<Function>(CS->getOperand(1)))
+ return 0;
+
+ // Init priority must be standard.
+ ConstantInt *CI = dyn_cast<ConstantInt>(CS->getOperand(0));
+ if (!CI || CI->getZExtValue() != 65535)
+ return 0;
+ }
+
+ return GV;
}
/// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand,
@@ -1985,13 +2001,13 @@ static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) {
/// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the
/// specified array, returning the new global to use.
-static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
+static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
const std::vector<Function*> &Ctors) {
// If we made a change, reassemble the initializer list.
std::vector<Constant*> CSVals;
CSVals.push_back(ConstantInt::get(Type::getInt32Ty(GCL->getContext()),65535));
CSVals.push_back(0);
-
+
// Create the new init list.
std::vector<Constant*> CAList;
for (unsigned i = 0, e = Ctors.size(); i != e; ++i) {
@@ -2007,26 +2023,26 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
}
CAList.push_back(ConstantStruct::get(GCL->getContext(), CSVals, false));
}
-
+
// Create the array initializer.
const Type *StructTy =
cast<ArrayType>(GCL->getType()->getElementType())->getElementType();
- Constant *CA = ConstantArray::get(ArrayType::get(StructTy,
+ Constant *CA = ConstantArray::get(ArrayType::get(StructTy,
CAList.size()), CAList);
-
+
// If we didn't change the number of elements, don't create a new GV.
if (CA->getType() == GCL->getInitializer()->getType()) {
GCL->setInitializer(CA);
return GCL;
}
-
+
// Create the new global and insert it next to the existing list.
GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(),
GCL->getLinkage(), CA, "",
GCL->isThreadLocal());
GCL->getParent()->getGlobalList().insert(GCL, NGV);
NGV->takeName(GCL);
-
+
// Nuke the old list, replacing any uses with the new one.
if (!GCL->use_empty()) {
Constant *V = NGV;
@@ -2035,7 +2051,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
GCL->replaceAllUsesWith(V);
}
GCL->eraseFromParent();
-
+
if (Ctors.size())
return NGV;
else
@@ -2043,17 +2059,86 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
}
-static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues,
- Value *V) {
+static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues, Value *V) {
if (Constant *CV = dyn_cast<Constant>(V)) return CV;
Constant *R = ComputedValues[V];
assert(R && "Reference to an uncomputed value!");
return R;
}
+static inline bool
+isSimpleEnoughValueToCommit(Constant *C,
+ SmallPtrSet<Constant*, 8> &SimpleConstants);
+
+
+/// isSimpleEnoughValueToCommit - Return true if the specified constant can be
+/// handled by the code generator. We don't want to generate something like:
+/// void *X = &X/42;
+/// because the code generator doesn't have a relocation that can handle that.
+///
+/// This function should be called if C was not found (but just got inserted)
+/// in SimpleConstants to avoid having to rescan the same constants all the
+/// time.
+static bool isSimpleEnoughValueToCommitHelper(Constant *C,
+ SmallPtrSet<Constant*, 8> &SimpleConstants) {
+ // Simple integer, undef, constant aggregate zero, global addresses, etc are
+ // all supported.
+ if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
+ isa<GlobalValue>(C))
+ return true;
+
+ // Aggregate values are safe if all their elements are.
+ if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) ||
+ isa<ConstantVector>(C)) {
+ for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
+ Constant *Op = cast<Constant>(C->getOperand(i));
+ if (!isSimpleEnoughValueToCommit(Op, SimpleConstants))
+ return false;
+ }
+ return true;
+ }
+
+ // We don't know exactly what relocations are allowed in constant expressions,
+ // so we allow &global+constantoffset, which is safe and uniformly supported
+ // across targets.
+ ConstantExpr *CE = cast<ConstantExpr>(C);
+ switch (CE->getOpcode()) {
+ case Instruction::BitCast:
+ case Instruction::IntToPtr:
+ case Instruction::PtrToInt:
+ // These casts are always fine if the casted value is.
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
+
+ // GEP is fine if it is simple + constant offset.
+ case Instruction::GetElementPtr:
+ for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
+ if (!isa<ConstantInt>(CE->getOperand(i)))
+ return false;
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
+
+ case Instruction::Add:
+ // We allow simple+cst.
+ if (!isa<ConstantInt>(CE->getOperand(1)))
+ return false;
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
+ }
+ return false;
+}
+
+static inline bool
+isSimpleEnoughValueToCommit(Constant *C,
+ SmallPtrSet<Constant*, 8> &SimpleConstants) {
+ // If we already checked this constant, we win.
+ if (!SimpleConstants.insert(C)) return true;
+ // Check the constant.
+ return isSimpleEnoughValueToCommitHelper(C, SimpleConstants);
+}
+
+
/// isSimpleEnoughPointerToCommit - Return true if this constant is simple
-/// enough for us to understand. In particular, if it is a cast of something,
-/// we punt. We basically just support direct accesses to globals and GEP's of
+/// enough for us to understand. In particular, if it is a cast to anything
+/// other than from one pointer type to another pointer type, we punt.
+/// We basically just support direct accesses to globals and GEP's of
/// globals. This should be kept up to date with CommitValueTo.
static bool isSimpleEnoughPointerToCommit(Constant *C) {
// Conservatively, avoid aggregate types. This is because we don't
@@ -2062,19 +2147,19 @@ static bool isSimpleEnoughPointerToCommit(Constant *C) {
return false;
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
- // Do not allow weak/linkonce/dllimport/dllexport linkage or
+ // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
// external globals.
- return GV->hasDefinitiveInitializer();
+ return GV->hasUniqueInitializer();
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
// Handle a constantexpr gep.
if (CE->getOpcode() == Instruction::GetElementPtr &&
isa<GlobalVariable>(CE->getOperand(0)) &&
cast<GEPOperator>(CE)->isInBounds()) {
GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
- // Do not allow weak/linkonce/dllimport/dllexport linkage or
+ // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
// external globals.
- if (!GV->hasDefinitiveInitializer())
+ if (!GV->hasUniqueInitializer())
return false;
// The first index must be zero.
@@ -2087,7 +2172,18 @@ static bool isSimpleEnoughPointerToCommit(Constant *C) {
return false;
return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
+
+ // A constantexpr bitcast from a pointer to another pointer is a no-op,
+ // and we know how to evaluate it by moving the bitcast from the pointer
+ // operand to the value operand.
+ } else if (CE->getOpcode() == Instruction::BitCast &&
+ isa<GlobalVariable>(CE->getOperand(0))) {
+ // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
+ // external globals.
+ return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer();
}
+ }
+
return false;
}
@@ -2101,7 +2197,7 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
assert(Val->getType() == Init->getType() && "Type mismatch!");
return Val;
}
-
+
std::vector<Constant*> Elts;
if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
@@ -2119,13 +2215,13 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
llvm_unreachable("This code is out of sync with "
" ConstantFoldLoadThroughGEPConstantExpr");
}
-
+
// Replace the element that we are supposed to.
ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
unsigned Idx = CU->getZExtValue();
assert(Idx < STy->getNumElements() && "Struct index out of range!");
Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
-
+
// Return the modified struct.
return ConstantStruct::get(Init->getContext(), &Elts[0], Elts.size(),
STy->isPacked());
@@ -2138,8 +2234,8 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
NumElts = ATy->getNumElements();
else
NumElts = cast<VectorType>(InitTy)->getNumElements();
-
-
+
+
// Break up the array into elements.
if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i)
@@ -2154,16 +2250,15 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
" ConstantFoldLoadThroughGEPConstantExpr");
Elts.assign(NumElts, UndefValue::get(InitTy->getElementType()));
}
-
+
assert(CI->getZExtValue() < NumElts);
Elts[CI->getZExtValue()] =
EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
-
+
if (Init->getType()->isArrayTy())
return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
- else
- return ConstantVector::get(&Elts[0], Elts.size());
- }
+ return ConstantVector::get(Elts);
+ }
}
/// CommitValueTo - We have decided that Addr (which satisfies the predicate
@@ -2189,14 +2284,14 @@ static Constant *ComputeLoadResult(Constant *P,
// is the most up-to-date.
DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P);
if (I != Memory.end()) return I->second;
-
+
// Access it.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
if (GV->hasDefinitiveInitializer())
return GV->getInitializer();
return 0;
}
-
+
// Handle a constantexpr getelementptr.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P))
if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -2216,17 +2311,19 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
const SmallVectorImpl<Constant*> &ActualArgs,
std::vector<Function*> &CallStack,
DenseMap<Constant*, Constant*> &MutatedMemory,
- std::vector<GlobalVariable*> &AllocaTmps) {
+ std::vector<GlobalVariable*> &AllocaTmps,
+ SmallPtrSet<Constant*, 8> &SimpleConstants,
+ const TargetData *TD) {
// Check to see if this function is already executing (recursion). If so,
// bail out. TODO: we might want to accept limited recursion.
if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
return false;
-
+
CallStack.push_back(F);
-
+
/// Values - As we compute SSA register values, we store their contents here.
DenseMap<Value*, Constant*> Values;
-
+
// Initialize arguments to the incoming values specified.
unsigned ArgNo = 0;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
@@ -2237,21 +2334,65 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
/// we can only evaluate any one basic block at most once. This set keeps
/// track of what we have executed so we can detect recursive cases etc.
SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
-
+
// CurInst - The current instruction we're evaluating.
BasicBlock::iterator CurInst = F->begin()->begin();
-
+
// This is the main evaluation loop.
while (1) {
Constant *InstResult = 0;
-
+
if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
if (SI->isVolatile()) return false; // no volatile accesses.
Constant *Ptr = getVal(Values, SI->getOperand(1));
if (!isSimpleEnoughPointerToCommit(Ptr))
// If this is too complex for us to commit, reject it.
return false;
+
Constant *Val = getVal(Values, SI->getOperand(0));
+
+ // If this might be too difficult for the backend to handle (e.g. the addr
+ // of one global variable divided by another) then we can't commit it.
+ if (!isSimpleEnoughValueToCommit(Val, SimpleConstants))
+ return false;
+
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ if (CE->getOpcode() == Instruction::BitCast) {
+ // If we're evaluating a store through a bitcast, then we need
+ // to pull the bitcast off the pointer type and push it onto the
+ // stored value.
+ Ptr = CE->getOperand(0);
+
+ const Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType();
+
+ // In order to push the bitcast onto the stored value, a bitcast
+ // from NewTy to Val's type must be legal. If it's not, we can try
+ // introspecting NewTy to find a legal conversion.
+ while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) {
+ // If NewTy is a struct, we can convert the pointer to the struct
+ // into a pointer to its first member.
+ // FIXME: This could be extended to support arrays as well.
+ if (const StructType *STy = dyn_cast<StructType>(NewTy)) {
+ NewTy = STy->getTypeAtIndex(0U);
+
+ const IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32);
+ Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
+ Constant * const IdxList[] = {IdxZero, IdxZero};
+
+ Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList, 2);
+
+ // If we can't improve the situation by introspecting NewTy,
+ // we have to give up.
+ } else {
+ return 0;
+ }
+ }
+
+ // If we found compatible types, go ahead and push the bitcast
+ // onto the stored value.
+ Val = ConstantExpr::getBitCast(Val, NewTy);
+ }
+
MutatedMemory[Ptr] = Val;
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
InstResult = ConstantExpr::get(BO->getOpcode(),
@@ -2290,7 +2431,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
GlobalValue::InternalLinkage,
UndefValue::get(Ty),
AI->getName()));
- InstResult = AllocaTmps.back();
+ InstResult = AllocaTmps.back();
} else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) {
// Debug info can safely be ignored here.
@@ -2324,11 +2465,11 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
} else {
if (Callee->getFunctionType()->isVarArg())
return false;
-
+
Constant *RetVal;
// Execute the call, if successful, use the return value.
if (!EvaluateFunction(Callee, RetVal, Formals, CallStack,
- MutatedMemory, AllocaTmps))
+ MutatedMemory, AllocaTmps, SimpleConstants, TD))
return false;
InstResult = RetVal;
}
@@ -2342,7 +2483,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
dyn_cast<ConstantInt>(getVal(Values, BI->getCondition()));
if (!Cond) return false; // Cannot determine.
- NewBB = BI->getSuccessor(!Cond->getZExtValue());
+ NewBB = BI->getSuccessor(!Cond->getZExtValue());
}
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
ConstantInt *Val =
@@ -2358,20 +2499,20 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
} else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) {
if (RI->getNumOperands())
RetVal = getVal(Values, RI->getOperand(0));
-
+
CallStack.pop_back(); // return from fn.
return true; // We succeeded at evaluating this ctor!
} else {
// invoke, unwind, unreachable.
return false; // Cannot handle this terminator.
}
-
+
// Okay, we succeeded in evaluating this control flow. See if we have
// executed the new block before. If so, we have a looping function,
// which we cannot evaluate in reasonable time.
if (!ExecutedBlocks.insert(NewBB))
return false; // looped!
-
+
// Okay, we have never been in this block before. Check to see if there
// are any PHI nodes. If so, evaluate them with information about where
// we came from.
@@ -2387,10 +2528,14 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
// Did not know how to evaluate this!
return false;
}
-
- if (!CurInst->use_empty())
+
+ if (!CurInst->use_empty()) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
+ InstResult = ConstantFoldConstantExpression(CE, TD);
+
Values[CurInst] = InstResult;
-
+ }
+
// Advance program counter.
++CurInst;
}
@@ -2398,7 +2543,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
/// we can. Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F) {
+static bool EvaluateStaticConstructor(Function *F, const TargetData *TD) {
/// MutatedMemory - For each store we execute, we update this map. Loads
/// check this to get the most up-to-date value. If evaluation is successful,
/// this state is committed to the process.
@@ -2408,17 +2553,23 @@ static bool EvaluateStaticConstructor(Function *F) {
/// to represent its body. This vector is needed so we can delete the
/// temporary globals when we are done.
std::vector<GlobalVariable*> AllocaTmps;
-
+
/// CallStack - This is used to detect recursion. In pathological situations
/// we could hit exponential behavior, but at least there is nothing
/// unbounded.
std::vector<Function*> CallStack;
+ /// SimpleConstants - These are constants we have checked and know to be
+ /// simple enough to live in a static initializer of a global.
+ SmallPtrSet<Constant*, 8> SimpleConstants;
+
// Call the function.
Constant *RetValDummy;
bool EvalSuccess = EvaluateFunction(F, RetValDummy,
SmallVector<Constant*, 0>(), CallStack,
- MutatedMemory, AllocaTmps);
+ MutatedMemory, AllocaTmps,
+ SimpleConstants, TD);
+
if (EvalSuccess) {
// We succeeded at evaluation: commit the result.
DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
@@ -2428,13 +2579,13 @@ static bool EvaluateStaticConstructor(Function *F) {
E = MutatedMemory.end(); I != E; ++I)
CommitValueTo(I->second, I->first);
}
-
+
// At this point, we are done interpreting. If we created any 'alloca'
// temporaries, release them now.
while (!AllocaTmps.empty()) {
GlobalVariable *Tmp = AllocaTmps.back();
AllocaTmps.pop_back();
-
+
// If there are still users of the alloca, the program is doing something
// silly, e.g. storing the address of the alloca somewhere and using it
// later. Since this is undefined, we'll just make it be null.
@@ -2442,7 +2593,7 @@ static bool EvaluateStaticConstructor(Function *F) {
Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
delete Tmp;
}
-
+
return EvalSuccess;
}
@@ -2454,7 +2605,8 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
std::vector<Function*> Ctors = ParseGlobalCtors(GCL);
bool MadeChange = false;
if (Ctors.empty()) return false;
-
+
+ const TargetData *TD = getAnalysisIfAvailable<TargetData>();
// Loop over global ctors, optimizing them when we can.
for (unsigned i = 0; i != Ctors.size(); ++i) {
Function *F = Ctors[i];
@@ -2467,12 +2619,12 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
}
break;
}
-
+
// We cannot simplify external ctor functions.
if (F->empty()) continue;
-
+
// If we can evaluate the ctor at compile time, do.
- if (EvaluateStaticConstructor(F)) {
+ if (EvaluateStaticConstructor(F, TD)) {
Ctors.erase(Ctors.begin()+i);
MadeChange = true;
--i;
@@ -2480,9 +2632,9 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
continue;
}
}
-
+
if (!MadeChange) return false;
-
+
GCL = InstallGlobalCtors(GCL, Ctors);
return true;
}
@@ -2546,21 +2698,21 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
-
+
// Try to find the llvm.globalctors list.
GlobalVariable *GlobalCtors = FindGlobalCtors(M);
bool LocalChange = true;
while (LocalChange) {
LocalChange = false;
-
+
// Delete functions that are trivially dead, ccc -> fastcc
LocalChange |= OptimizeFunctions(M);
-
+
// Optimize global_ctors list.
if (GlobalCtors)
LocalChange |= OptimizeGlobalCtorsList(GlobalCtors);
-
+
// Optimize non-address-taken globals.
LocalChange |= OptimizeGlobalVars(M);
@@ -2568,9 +2720,9 @@ bool GlobalOpt::runOnModule(Module &M) {
LocalChange |= OptimizeGlobalAliases(M);
Changed |= LocalChange;
}
-
+
// TODO: Move all global ctors functions to the end of the module for code
// layout.
-
+
return Changed;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
index 1b3cf78..c7c2939 100644
--- a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -35,7 +35,9 @@ namespace {
///
struct IPCP : public ModulePass {
static char ID; // Pass identification, replacement for typeid
- IPCP() : ModulePass(ID) {}
+ IPCP() : ModulePass(ID) {
+ initializeIPCPPass(*PassRegistry::getPassRegistry());
+ }
bool runOnModule(Module &M);
private:
@@ -46,7 +48,7 @@ namespace {
char IPCP::ID = 0;
INITIALIZE_PASS(IPCP, "ipconstprop",
- "Interprocedural constant propagation", false, false);
+ "Interprocedural constant propagation", false, false)
ModulePass *llvm::createIPConstantPropagationPass() { return new IPCP(); }
diff --git a/contrib/llvm/lib/Transforms/IPO/IPO.cpp b/contrib/llvm/lib/Transforms/IPO/IPO.cpp
index 340b70e..fbe90ce 100644
--- a/contrib/llvm/lib/Transforms/IPO/IPO.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/IPO.cpp
@@ -7,17 +7,51 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the C bindings for libLLVMIPO.a, which implements
-// several transformations over the LLVM intermediate representation.
+// This file implements the common infrastructure (including C bindings) for
+// libLLVMIPO.a, which implements several transformations over the LLVM
+// intermediate representation.
//
//===----------------------------------------------------------------------===//
#include "llvm-c/Transforms/IPO.h"
+#include "llvm/InitializePasses.h"
#include "llvm/PassManager.h"
#include "llvm/Transforms/IPO.h"
using namespace llvm;
+void llvm::initializeIPO(PassRegistry &Registry) {
+ initializeArgPromotionPass(Registry);
+ initializeConstantMergePass(Registry);
+ initializeDAEPass(Registry);
+ initializeDAHPass(Registry);
+ initializeDTEPass(Registry);
+ initializeFunctionAttrsPass(Registry);
+ initializeGlobalDCEPass(Registry);
+ initializeGlobalOptPass(Registry);
+ initializeIPCPPass(Registry);
+ initializeAlwaysInlinerPass(Registry);
+ initializeSimpleInlinerPass(Registry);
+ initializeInternalizePassPass(Registry);
+ initializeLoopExtractorPass(Registry);
+ initializeBlockExtractorPassPass(Registry);
+ initializeSingleLoopExtractorPass(Registry);
+ initializeLowerSetJmpPass(Registry);
+ initializeMergeFunctionsPass(Registry);
+ initializePartialInlinerPass(Registry);
+ initializePruneEHPass(Registry);
+ initializeStripDeadPrototypesPassPass(Registry);
+ initializeStripSymbolsPass(Registry);
+ initializeStripDebugDeclarePass(Registry);
+ initializeStripDeadDebugInfoPass(Registry);
+ initializeStripNonDebugSymbolsPass(Registry);
+ initializeSRETPromotionPass(Registry);
+}
+
+void LLVMInitializeIPO(LLVMPassRegistryRef R) {
+ initializeIPO(*unwrap(R));
+}
+
void LLVMAddArgumentPromotionPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createArgumentPromotionPass());
}
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
index ecc60ad..ce795b7 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
@@ -36,7 +36,9 @@ namespace {
InlineCostAnalyzer CA;
public:
// Use extremely low threshold.
- AlwaysInliner() : Inliner(ID, -2000000000) {}
+ AlwaysInliner() : Inliner(ID, -2000000000) {
+ initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry());
+ }
static char ID; // Pass identification, replacement for typeid
InlineCost getInlineCost(CallSite CS) {
return CA.getInlineCost(CS, NeverInline);
@@ -61,8 +63,11 @@ namespace {
}
char AlwaysInliner::ID = 0;
-INITIALIZE_PASS(AlwaysInliner, "always-inline",
- "Inliner for always_inline functions", false, false);
+INITIALIZE_PASS_BEGIN(AlwaysInliner, "always-inline",
+ "Inliner for always_inline functions", false, false)
+INITIALIZE_AG_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_END(AlwaysInliner, "always-inline",
+ "Inliner for always_inline functions", false, false)
Pass *llvm::createAlwaysInlinerPass() { return new AlwaysInliner(); }
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
index 9c6637d..0c5b3be 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
@@ -33,8 +33,12 @@ namespace {
SmallPtrSet<const Function*, 16> NeverInline;
InlineCostAnalyzer CA;
public:
- SimpleInliner() : Inliner(ID) {}
- SimpleInliner(int Threshold) : Inliner(ID, Threshold) {}
+ SimpleInliner() : Inliner(ID) {
+ initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
+ }
+ SimpleInliner(int Threshold) : Inliner(ID, Threshold) {
+ initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
+ }
static char ID; // Pass identification, replacement for typeid
InlineCost getInlineCost(CallSite CS) {
return CA.getInlineCost(CS, NeverInline);
@@ -56,8 +60,11 @@ namespace {
}
char SimpleInliner::ID = 0;
-INITIALIZE_PASS(SimpleInliner, "inline",
- "Function Integration/Inlining", false, false);
+INITIALIZE_PASS_BEGIN(SimpleInliner, "inline",
+ "Function Integration/Inlining", false, false)
+INITIALIZE_AG_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_END(SimpleInliner, "inline",
+ "Function Integration/Inlining", false, false)
Pass *llvm::createFunctionInliningPass() { return new SimpleInliner(); }
diff --git a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
index 4983e8e..37eafd7 100644
--- a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -52,7 +52,8 @@ Inliner::Inliner(char &ID)
: CallGraphSCCPass(ID), InlineThreshold(InlineLimit) {}
Inliner::Inliner(char &ID, int Threshold)
- : CallGraphSCCPass(ID), InlineThreshold(Threshold) {}
+ : CallGraphSCCPass(ID), InlineThreshold(InlineLimit.getNumOccurrences() > 0 ?
+ InlineLimit : Threshold) {}
/// getAnalysisUsage - For this class, we declare that we require and preserve
/// the call graph. If the derived class implements this method, it should
@@ -74,7 +75,8 @@ InlinedArrayAllocasTy;
/// inline this call site we attempt to reuse already available allocas or add
/// any new allocas to the set if not possible.
static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
- InlinedArrayAllocasTy &InlinedArrayAllocas) {
+ InlinedArrayAllocasTy &InlinedArrayAllocas,
+ int InlineHistory) {
Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller();
@@ -91,7 +93,6 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
!Caller->hasFnAttr(Attribute::StackProtectReq))
Caller->addFnAttr(Attribute::StackProtect);
-
// Look at all of the allocas that we inlined through this call site. If we
// have already inlined other allocas through other calls into this function,
// then we know that they have disjoint lifetimes and that we can merge them.
@@ -115,6 +116,21 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
//
SmallPtrSet<AllocaInst*, 16> UsedAllocas;
+ // When processing our SCC, check to see if CS was inlined from some other
+ // call site. For example, if we're processing "A" in this code:
+ // A() { B() }
+ // B() { x = alloca ... C() }
+ // C() { y = alloca ... }
+ // Assume that C was not inlined into B initially, and so we're processing A
+ // and decide to inline B into A. Doing this makes an alloca available for
+ // reuse and makes a callsite (C) available for inlining. When we process
+ // the C call site we don't want to do any alloca merging between X and Y
+ // because their scopes are not disjoint. We could make this smarter by
+ // keeping track of the inline history for each alloca in the
+ // InlinedArrayAllocas but this isn't likely to be a significant win.
+ if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
+ return true;
+
// Loop over all the allocas we have so far and see if they can be merged with
// a previously inlined alloca. If not, remember that we had it.
for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
@@ -152,19 +168,21 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
// success!
- DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI);
+ DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI << "\n\t\tINTO: "
+ << *AvailableAlloca << '\n');
AI->replaceAllUsesWith(AvailableAlloca);
AI->eraseFromParent();
MergedAwayAlloca = true;
++NumMergedAllocas;
+ IFI.StaticAllocas[AllocaNo] = 0;
break;
}
// If we already nuked the alloca, we're done with it.
if (MergedAwayAlloca)
continue;
-
+
// If we were unable to merge away the alloca either because there are no
// allocas of the right type available or because we reused them all
// already, remember that this alloca came from an inlined function and mark
@@ -234,20 +252,25 @@ bool Inliner::shouldInline(CallSite CS) {
if (Caller->hasLocalLinkage()) {
int TotalSecondaryCost = 0;
bool outerCallsFound = false;
- bool allOuterCallsWillBeInlined = true;
- bool someOuterCallWouldNotBeInlined = false;
+ // This bool tracks what happens if we do NOT inline C into B.
+ bool callerWillBeRemoved = true;
+ // This bool tracks what happens if we DO inline C into B.
+ bool inliningPreventsSomeOuterInline = false;
for (Value::use_iterator I = Caller->use_begin(), E =Caller->use_end();
I != E; ++I) {
CallSite CS2(*I);
// If this isn't a call to Caller (it could be some other sort
- // of reference) skip it.
- if (!CS2 || CS2.getCalledFunction() != Caller)
+ // of reference) skip it. Such references will prevent the caller
+ // from being removed.
+ if (!CS2 || CS2.getCalledFunction() != Caller) {
+ callerWillBeRemoved = false;
continue;
+ }
InlineCost IC2 = getInlineCost(CS2);
if (IC2.isNever())
- allOuterCallsWillBeInlined = false;
+ callerWillBeRemoved = false;
if (IC2.isAlways() || IC2.isNever())
continue;
@@ -257,14 +280,14 @@ bool Inliner::shouldInline(CallSite CS) {
float FudgeFactor2 = getInlineFudgeFactor(CS2);
if (Cost2 >= (int)(CurrentThreshold2 * FudgeFactor2))
- allOuterCallsWillBeInlined = false;
+ callerWillBeRemoved = false;
// See if we have this case. We subtract off the penalty
// for the call instruction, which we would be deleting.
if (Cost2 < (int)(CurrentThreshold2 * FudgeFactor2) &&
Cost2 + Cost - (InlineConstants::CallPenalty + 1) >=
(int)(CurrentThreshold2 * FudgeFactor2)) {
- someOuterCallWouldNotBeInlined = true;
+ inliningPreventsSomeOuterInline = true;
TotalSecondaryCost += Cost2;
}
}
@@ -272,10 +295,10 @@ bool Inliner::shouldInline(CallSite CS) {
// one is set very low by getInlineCost, in anticipation that Caller will
// be removed entirely. We did not account for this above unless there
// is only one caller of Caller.
- if (allOuterCallsWillBeInlined && Caller->use_begin() != Caller->use_end())
+ if (callerWillBeRemoved && Caller->use_begin() != Caller->use_end())
TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
- if (outerCallsFound && someOuterCallWouldNotBeInlined &&
+ if (outerCallsFound && inliningPreventsSomeOuterInline &&
TotalSecondaryCost < Cost) {
DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() <<
" Cost = " << Cost <<
@@ -401,7 +424,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// If this call site was obtained by inlining another function, verify
// that the include path for the function did not include the callee
- // itself. If so, we'd be recursively inlinling the same function,
+ // itself. If so, we'd be recursively inlining the same function,
// which would provide the same callsites, which would cause us to
// infinitely inline.
int InlineHistoryID = CallSites[CSi].second;
@@ -416,7 +439,8 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
continue;
// Attempt to inline the function.
- if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas))
+ if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
+ InlineHistoryID))
continue;
++NumInlined;
diff --git a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
index a1d919f..9b9ebad 100644
--- a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
@@ -64,10 +64,11 @@ namespace {
char InternalizePass::ID = 0;
INITIALIZE_PASS(InternalizePass, "internalize",
- "Internalize Global Symbols", false, false);
+ "Internalize Global Symbols", false, false)
InternalizePass::InternalizePass(bool AllButMain)
: ModulePass(ID), AllButMain(AllButMain){
+ initializeInternalizePassPass(*PassRegistry::getPassRegistry());
if (!APIFile.empty()) // If a filename is specified, use it.
LoadFile(APIFile.c_str());
if (!APIList.empty()) // If a list is specified, use it as well.
@@ -76,6 +77,7 @@ InternalizePass::InternalizePass(bool AllButMain)
InternalizePass::InternalizePass(const std::vector<const char *>&exportList)
: ModulePass(ID), AllButMain(false){
+ initializeInternalizePassPass(*PassRegistry::getPassRegistry());
for(std::vector<const char *>::const_iterator itr = exportList.begin();
itr != exportList.end(); itr++) {
ExternalNames.insert(*itr);
diff --git a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
index f88dff6..848944d 100644
--- a/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/LoopExtractor.cpp
@@ -37,7 +37,9 @@ namespace {
unsigned NumLoops;
explicit LoopExtractor(unsigned numLoops = ~0)
- : LoopPass(ID), NumLoops(numLoops) {}
+ : LoopPass(ID), NumLoops(numLoops) {
+ initializeLoopExtractorPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -50,8 +52,13 @@ namespace {
}
char LoopExtractor::ID = 0;
-INITIALIZE_PASS(LoopExtractor, "loop-extract",
- "Extract loops into new functions", false, false);
+INITIALIZE_PASS_BEGIN(LoopExtractor, "loop-extract",
+ "Extract loops into new functions", false, false)
+INITIALIZE_PASS_DEPENDENCY(BreakCriticalEdges)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(LoopExtractor, "loop-extract",
+ "Extract loops into new functions", false, false)
namespace {
/// SingleLoopExtractor - For bugpoint.
@@ -63,7 +70,7 @@ namespace {
char SingleLoopExtractor::ID = 0;
INITIALIZE_PASS(SingleLoopExtractor, "loop-extract-single",
- "Extract at most one loop into a new function", false, false);
+ "Extract at most one loop into a new function", false, false)
// createLoopExtractorPass - This pass extracts all natural loops from the
// program into a function if it can.
@@ -159,7 +166,7 @@ namespace {
char BlockExtractorPass::ID = 0;
INITIALIZE_PASS(BlockExtractorPass, "extract-blocks",
"Extract Basic Blocks From Module (for bugpoint use)",
- false, false);
+ false, false)
// createBlockExtractorPass - This pass extracts all blocks (except those
// specified in the argument list) from the functions in the module.
diff --git a/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp b/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp
index 6c715de..b545f0b 100644
--- a/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp
@@ -109,7 +109,9 @@ namespace {
bool IsTransformableFunction(StringRef Name);
public:
static char ID; // Pass identification, replacement for typeid
- LowerSetJmp() : ModulePass(ID) {}
+ LowerSetJmp() : ModulePass(ID) {
+ initializeLowerSetJmpPass(*PassRegistry::getPassRegistry());
+ }
void visitCallInst(CallInst& CI);
void visitInvokeInst(InvokeInst& II);
@@ -122,7 +124,7 @@ namespace {
} // end anonymous namespace
char LowerSetJmp::ID = 0;
-INITIALIZE_PASS(LowerSetJmp, "lowersetjmp", "Lower Set Jump", false, false);
+INITIALIZE_PASS(LowerSetJmp, "lowersetjmp", "Lower Set Jump", false, false)
// run - Run the transformation on the program. We grab the function
// prototypes for longjmp and setjmp. If they are used in the program,
diff --git a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index 5d838f9..cccffca 100644
--- a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -67,42 +67,87 @@
using namespace llvm;
STATISTIC(NumFunctionsMerged, "Number of functions merged");
+STATISTIC(NumThunksWritten, "Number of thunks generated");
+STATISTIC(NumAliasesWritten, "Number of aliases generated");
+STATISTIC(NumDoubleWeak, "Number of new functions created");
+
+/// Creates a hash-code for the function which is the same for any two
+/// functions that will compare equal, without looking at the instructions
+/// inside the function.
+static unsigned profileFunction(const Function *F) {
+ const FunctionType *FTy = F->getFunctionType();
-namespace {
- /// MergeFunctions finds functions which will generate identical machine code,
- /// by considering all pointer types to be equivalent. Once identified,
- /// MergeFunctions will fold them by replacing a call to one to a call to a
- /// bitcast of the other.
- ///
- class MergeFunctions : public ModulePass {
- public:
- static char ID;
- MergeFunctions() : ModulePass(ID) {}
-
- bool runOnModule(Module &M);
-
- private:
- /// MergeTwoFunctions - Merge two equivalent functions. Upon completion, G
- /// may be deleted, or may be converted into a thunk. In either case, it
- /// should never be visited again.
- void MergeTwoFunctions(Function *F, Function *G) const;
-
- /// WriteThunk - Replace G with a simple tail call to bitcast(F). Also
- /// replace direct uses of G with bitcast(F).
- void WriteThunk(Function *F, Function *G) const;
-
- TargetData *TD;
- };
+ FoldingSetNodeID ID;
+ ID.AddInteger(F->size());
+ ID.AddInteger(F->getCallingConv());
+ ID.AddBoolean(F->hasGC());
+ ID.AddBoolean(FTy->isVarArg());
+ ID.AddInteger(FTy->getReturnType()->getTypeID());
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
+ ID.AddInteger(FTy->getParamType(i)->getTypeID());
+ return ID.ComputeHash();
}
-char MergeFunctions::ID = 0;
-INITIALIZE_PASS(MergeFunctions, "mergefunc", "Merge Functions", false, false);
+namespace {
+
+/// ComparableFunction - A struct that pairs together functions with a
+/// TargetData so that we can keep them together as elements in the DenseSet.
+class ComparableFunction {
+public:
+ static const ComparableFunction EmptyKey;
+ static const ComparableFunction TombstoneKey;
+ static TargetData * const LookupOnly;
+
+ ComparableFunction(Function *Func, TargetData *TD)
+ : Func(Func), Hash(profileFunction(Func)), TD(TD) {}
+
+ Function *getFunc() const { return Func; }
+ unsigned getHash() const { return Hash; }
+ TargetData *getTD() const { return TD; }
+
+ // Drops AssertingVH reference to the function. Outside of debug mode, this
+ // does nothing.
+ void release() {
+ assert(Func &&
+ "Attempted to release function twice, or release empty/tombstone!");
+ Func = NULL;
+ }
+
+private:
+ explicit ComparableFunction(unsigned Hash)
+ : Func(NULL), Hash(Hash), TD(NULL) {}
+
+ AssertingVH<Function> Func;
+ unsigned Hash;
+ TargetData *TD;
+};
+
+const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);
+const ComparableFunction ComparableFunction::TombstoneKey =
+ ComparableFunction(1);
+TargetData * const ComparableFunction::LookupOnly = (TargetData*)(-1);
-ModulePass *llvm::createMergeFunctionsPass() {
- return new MergeFunctions();
+}
+
+namespace llvm {
+ template <>
+ struct DenseMapInfo<ComparableFunction> {
+ static ComparableFunction getEmptyKey() {
+ return ComparableFunction::EmptyKey;
+ }
+ static ComparableFunction getTombstoneKey() {
+ return ComparableFunction::TombstoneKey;
+ }
+ static unsigned getHashValue(const ComparableFunction &CF) {
+ return CF.getHash();
+ }
+ static bool isEqual(const ComparableFunction &LHS,
+ const ComparableFunction &RHS);
+ };
}
namespace {
+
/// FunctionComparator - Compares two functions to determine whether or not
/// they will generate machine code with the same behaviour. TargetData is
/// used if available. The comparator always fails conservatively (erring on the
@@ -111,34 +156,34 @@ class FunctionComparator {
public:
FunctionComparator(const TargetData *TD, const Function *F1,
const Function *F2)
- : F1(F1), F2(F2), TD(TD), IDMap1Count(0), IDMap2Count(0) {}
+ : F1(F1), F2(F2), TD(TD) {}
- /// Compare - test whether the two functions have equivalent behaviour.
- bool Compare();
+ /// Test whether the two functions have equivalent behaviour.
+ bool compare();
private:
- /// Compare - test whether two basic blocks have equivalent behaviour.
- bool Compare(const BasicBlock *BB1, const BasicBlock *BB2);
+ /// Test whether two basic blocks have equivalent behaviour.
+ bool compare(const BasicBlock *BB1, const BasicBlock *BB2);
- /// Enumerate - Assign or look up previously assigned numbers for the two
- /// values, and return whether the numbers are equal. Numbers are assigned in
- /// the order visited.
- bool Enumerate(const Value *V1, const Value *V2);
+ /// Assign or look up previously assigned numbers for the two values, and
+ /// return whether the numbers are equal. Numbers are assigned in the order
+ /// visited.
+ bool enumerate(const Value *V1, const Value *V2);
- /// isEquivalentOperation - Compare two Instructions for equivalence, similar
- /// to Instruction::isSameOperationAs but with modifications to the type
+ /// Compare two Instructions for equivalence, similar to
+ /// Instruction::isSameOperationAs but with modifications to the type
/// comparison.
bool isEquivalentOperation(const Instruction *I1,
const Instruction *I2) const;
- /// isEquivalentGEP - Compare two GEPs for equivalent pointer arithmetic.
+ /// Compare two GEPs for equivalent pointer arithmetic.
bool isEquivalentGEP(const GEPOperator *GEP1, const GEPOperator *GEP2);
bool isEquivalentGEP(const GetElementPtrInst *GEP1,
const GetElementPtrInst *GEP2) {
return isEquivalentGEP(cast<GEPOperator>(GEP1), cast<GEPOperator>(GEP2));
}
- /// isEquivalentType - Compare two Types, treating all pointer types as equal.
+ /// Compare two Types, treating all pointer types as equal.
bool isEquivalentType(const Type *Ty1, const Type *Ty2) const;
// The two functions undergoing comparison.
@@ -146,20 +191,26 @@ private:
const TargetData *TD;
- typedef DenseMap<const Value *, unsigned long> IDMap;
- IDMap Map1, Map2;
- unsigned long IDMap1Count, IDMap2Count;
+ DenseMap<const Value *, const Value *> id_map;
+ DenseSet<const Value *> seen_values;
};
+
}
-/// isEquivalentType - any two pointers in the same address space are
-/// equivalent. Otherwise, standard type equivalence rules apply.
+// Any two pointers in the same address space are equivalent, intptr_t and
+// pointers are equivalent. Otherwise, standard type equivalence rules apply.
bool FunctionComparator::isEquivalentType(const Type *Ty1,
const Type *Ty2) const {
if (Ty1 == Ty2)
return true;
- if (Ty1->getTypeID() != Ty2->getTypeID())
+ if (Ty1->getTypeID() != Ty2->getTypeID()) {
+ if (TD) {
+ LLVMContext &Ctx = Ty1->getContext();
+ if (isa<PointerType>(Ty1) && Ty2 == TD->getIntPtrType(Ctx)) return true;
+ if (isa<PointerType>(Ty2) && Ty1 == TD->getIntPtrType(Ctx)) return true;
+ }
return false;
+ }
switch(Ty1->getTypeID()) {
default:
@@ -167,6 +218,7 @@ bool FunctionComparator::isEquivalentType(const Type *Ty1,
// Fall through in Release mode.
case Type::IntegerTyID:
case Type::OpaqueTyID:
+ case Type::VectorTyID:
// Ty1 == Ty2 would have returned true earlier.
return false;
@@ -225,21 +277,18 @@ bool FunctionComparator::isEquivalentType(const Type *Ty1,
return ATy1->getNumElements() == ATy2->getNumElements() &&
isEquivalentType(ATy1->getElementType(), ATy2->getElementType());
}
-
- case Type::VectorTyID: {
- const VectorType *VTy1 = cast<VectorType>(Ty1);
- const VectorType *VTy2 = cast<VectorType>(Ty2);
- return VTy1->getNumElements() == VTy2->getNumElements() &&
- isEquivalentType(VTy1->getElementType(), VTy2->getElementType());
- }
}
}
-/// isEquivalentOperation - determine whether the two operations are the same
-/// except that pointer-to-A and pointer-to-B are equivalent. This should be
-/// kept in sync with Instruction::isSameOperationAs.
+// Determine whether the two operations are the same except that pointer-to-A
+// and pointer-to-B are equivalent. This should be kept in sync with
+// Instruction::isSameOperationAs.
bool FunctionComparator::isEquivalentOperation(const Instruction *I1,
const Instruction *I2) const {
+ // Differences from Instruction::isSameOperationAs:
+ // * replace type comparison with calls to isEquivalentType.
+ // * we test for I->hasSameSubclassOptionalData (nuw/nsw/tail) at the top
+ // * because of the above, we don't test for the tail bit on calls later on
if (I1->getOpcode() != I2->getOpcode() ||
I1->getNumOperands() != I2->getNumOperands() ||
!isEquivalentType(I1->getType(), I2->getType()) ||
@@ -263,14 +312,11 @@ bool FunctionComparator::isEquivalentOperation(const Instruction *I1,
if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
if (const CallInst *CI = dyn_cast<CallInst>(I1))
- return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
- CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
- CI->getAttributes().getRawPointer() ==
- cast<CallInst>(I2)->getAttributes().getRawPointer();
+ return CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
+ CI->getAttributes() == cast<CallInst>(I2)->getAttributes();
if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
- CI->getAttributes().getRawPointer() ==
- cast<InvokeInst>(I2)->getAttributes().getRawPointer();
+ CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes();
if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1)) {
if (IVI->getNumIndices() != cast<InsertValueInst>(I2)->getNumIndices())
return false;
@@ -291,8 +337,7 @@ bool FunctionComparator::isEquivalentOperation(const Instruction *I1,
return true;
}
-/// isEquivalentGEP - determine whether two GEP operations perform the same
-/// underlying arithmetic.
+// Determine whether two GEP operations perform the same underlying arithmetic.
bool FunctionComparator::isEquivalentGEP(const GEPOperator *GEP1,
const GEPOperator *GEP2) {
// When we have target data, we can reduce the GEP down to the value in bytes
@@ -315,17 +360,17 @@ bool FunctionComparator::isEquivalentGEP(const GEPOperator *GEP1,
return false;
for (unsigned i = 0, e = GEP1->getNumOperands(); i != e; ++i) {
- if (!Enumerate(GEP1->getOperand(i), GEP2->getOperand(i)))
+ if (!enumerate(GEP1->getOperand(i), GEP2->getOperand(i)))
return false;
}
return true;
}
-/// Enumerate - Compare two values used by the two functions under pair-wise
-/// comparison. If this is the first time the values are seen, they're added to
-/// the mapping so that we will detect mismatches on next use.
-bool FunctionComparator::Enumerate(const Value *V1, const Value *V2) {
+// Compare two values used by the two functions under pair-wise comparison. If
+// this is the first time the values are seen, they're added to the mapping so
+// that we will detect mismatches on next use.
+bool FunctionComparator::enumerate(const Value *V1, const Value *V2) {
// Check for function @f1 referring to itself and function @f2 referring to
// itself, or referring to each other, or both referring to either of them.
// They're all equivalent if the two functions are otherwise equivalent.
@@ -334,35 +379,44 @@ bool FunctionComparator::Enumerate(const Value *V1, const Value *V2) {
if (V1 == F2 && V2 == F1)
return true;
- // TODO: constant expressions with GEP or references to F1 or F2.
- if (isa<Constant>(V1))
- return V1 == V2;
-
- if (isa<InlineAsm>(V1) && isa<InlineAsm>(V2)) {
- const InlineAsm *IA1 = cast<InlineAsm>(V1);
- const InlineAsm *IA2 = cast<InlineAsm>(V2);
- return IA1->getAsmString() == IA2->getAsmString() &&
- IA1->getConstraintString() == IA2->getConstraintString();
+ if (const Constant *C1 = dyn_cast<Constant>(V1)) {
+ if (V1 == V2) return true;
+ const Constant *C2 = dyn_cast<Constant>(V2);
+ if (!C2) return false;
+ // TODO: constant expressions with GEP or references to F1 or F2.
+ if (C1->isNullValue() && C2->isNullValue() &&
+ isEquivalentType(C1->getType(), C2->getType()))
+ return true;
+ // Try bitcasting C2 to C1's type. If the bitcast is legal and returns C1
+ // then they must have equal bit patterns.
+ return C1->getType()->canLosslesslyBitCastTo(C2->getType()) &&
+ C1 == ConstantExpr::getBitCast(const_cast<Constant*>(C2), C1->getType());
}
- unsigned long &ID1 = Map1[V1];
- if (!ID1)
- ID1 = ++IDMap1Count;
+ if (isa<InlineAsm>(V1) || isa<InlineAsm>(V2))
+ return V1 == V2;
- unsigned long &ID2 = Map2[V2];
- if (!ID2)
- ID2 = ++IDMap2Count;
+ // Check that V1 maps to V2. If we find a value that V1 maps to then we simply
+ // check whether it's equal to V2. When there is no mapping then we need to
+ // ensure that V2 isn't already equivalent to something else. For this
+ // purpose, we track the V2 values in a set.
- return ID1 == ID2;
+ const Value *&map_elem = id_map[V1];
+ if (map_elem)
+ return map_elem == V2;
+ if (!seen_values.insert(V2).second)
+ return false;
+ map_elem = V2;
+ return true;
}
-/// Compare - test whether two basic blocks have equivalent behaviour.
-bool FunctionComparator::Compare(const BasicBlock *BB1, const BasicBlock *BB2) {
+// Test whether two basic blocks have equivalent behaviour.
+bool FunctionComparator::compare(const BasicBlock *BB1, const BasicBlock *BB2) {
BasicBlock::const_iterator F1I = BB1->begin(), F1E = BB1->end();
BasicBlock::const_iterator F2I = BB2->begin(), F2E = BB2->end();
do {
- if (!Enumerate(F1I, F2I))
+ if (!enumerate(F1I, F2I))
return false;
if (const GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(F1I)) {
@@ -370,7 +424,7 @@ bool FunctionComparator::Compare(const BasicBlock *BB1, const BasicBlock *BB2) {
if (!GEP2)
return false;
- if (!Enumerate(GEP1->getPointerOperand(), GEP2->getPointerOperand()))
+ if (!enumerate(GEP1->getPointerOperand(), GEP2->getPointerOperand()))
return false;
if (!isEquivalentGEP(GEP1, GEP2))
@@ -384,7 +438,7 @@ bool FunctionComparator::Compare(const BasicBlock *BB1, const BasicBlock *BB2) {
Value *OpF1 = F1I->getOperand(i);
Value *OpF2 = F2I->getOperand(i);
- if (!Enumerate(OpF1, OpF2))
+ if (!enumerate(OpF1, OpF2))
return false;
if (OpF1->getValueID() != OpF2->getValueID() ||
@@ -399,8 +453,8 @@ bool FunctionComparator::Compare(const BasicBlock *BB1, const BasicBlock *BB2) {
return F1I == F1E && F2I == F2E;
}
-/// Compare - test whether the two functions have equivalent behaviour.
-bool FunctionComparator::Compare() {
+// Test whether the two functions have equivalent behaviour.
+bool FunctionComparator::compare() {
// We need to recheck everything, but check the things that weren't included
// in the hash first.
@@ -431,14 +485,14 @@ bool FunctionComparator::Compare() {
return false;
assert(F1->arg_size() == F2->arg_size() &&
- "Identical functions have a different number of args.");
+ "Identically typed functions have different numbers of args!");
// Visit the arguments so that they get enumerated in the order they're
// passed in.
for (Function::const_arg_iterator f1i = F1->arg_begin(),
f2i = F2->arg_begin(), f1e = F1->arg_end(); f1i != f1e; ++f1i, ++f2i) {
- if (!Enumerate(f1i, f2i))
- llvm_unreachable("Arguments repeat");
+ if (!enumerate(f1i, f2i))
+ llvm_unreachable("Arguments repeat!");
}
// We do a CFG-ordered walk since the actual ordering of the blocks in the
@@ -456,7 +510,7 @@ bool FunctionComparator::Compare() {
const BasicBlock *F1BB = F1BBs.pop_back_val();
const BasicBlock *F2BB = F2BBs.pop_back_val();
- if (!Enumerate(F1BB, F2BB) || !Compare(F1BB, F2BB))
+ if (!enumerate(F1BB, F2BB) || !compare(F1BB, F2BB))
return false;
const TerminatorInst *F1TI = F1BB->getTerminator();
@@ -474,23 +528,190 @@ bool FunctionComparator::Compare() {
return true;
}
-/// WriteThunk - Replace G with a simple tail call to bitcast(F). Also replace
-/// direct uses of G with bitcast(F).
-void MergeFunctions::WriteThunk(Function *F, Function *G) const {
+namespace {
+
+/// MergeFunctions finds functions which will generate identical machine code,
+/// by considering all pointer types to be equivalent. Once identified,
+/// MergeFunctions will fold them by replacing a call to one to a call to a
+/// bitcast of the other.
+///
+class MergeFunctions : public ModulePass {
+public:
+ static char ID;
+ MergeFunctions()
+ : ModulePass(ID), HasGlobalAliases(false) {
+ initializeMergeFunctionsPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnModule(Module &M);
+
+private:
+ typedef DenseSet<ComparableFunction> FnSetType;
+
+ /// A work queue of functions that may have been modified and should be
+ /// analyzed again.
+ std::vector<WeakVH> Deferred;
+
+ /// Insert a ComparableFunction into the FnSet, or merge it away if it's
+ /// equal to one that's already present.
+ bool insert(ComparableFunction &NewF);
+
+ /// Remove a Function from the FnSet and queue it up for a second sweep of
+ /// analysis.
+ void remove(Function *F);
+
+ /// Find the functions that use this Value and remove them from FnSet and
+ /// queue the functions.
+ void removeUsers(Value *V);
+
+ /// Replace all direct calls of Old with calls of New. Will bitcast New if
+ /// necessary to make types match.
+ void replaceDirectCallers(Function *Old, Function *New);
+
+ /// Merge two equivalent functions. Upon completion, G may be deleted, or may
+ /// be converted into a thunk. In either case, it should never be visited
+ /// again.
+ void mergeTwoFunctions(Function *F, Function *G);
+
+ /// Replace G with a thunk or an alias to F. Deletes G.
+ void writeThunkOrAlias(Function *F, Function *G);
+
+ /// Replace G with a simple tail call to bitcast(F). Also replace direct uses
+ /// of G with bitcast(F). Deletes G.
+ void writeThunk(Function *F, Function *G);
+
+ /// Replace G with an alias to F. Deletes G.
+ void writeAlias(Function *F, Function *G);
+
+ /// The set of all distinct functions. Use the insert() and remove() methods
+ /// to modify it.
+ FnSetType FnSet;
+
+ /// TargetData for more accurate GEP comparisons. May be NULL.
+ TargetData *TD;
+
+ /// Whether or not the target supports global aliases.
+ bool HasGlobalAliases;
+};
+
+} // end anonymous namespace
+
+char MergeFunctions::ID = 0;
+INITIALIZE_PASS(MergeFunctions, "mergefunc", "Merge Functions", false, false)
+
+ModulePass *llvm::createMergeFunctionsPass() {
+ return new MergeFunctions();
+}
+
+bool MergeFunctions::runOnModule(Module &M) {
+ bool Changed = false;
+ TD = getAnalysisIfAvailable<TargetData>();
+
+ for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
+ Deferred.push_back(WeakVH(I));
+ }
+ FnSet.resize(Deferred.size());
+
+ do {
+ std::vector<WeakVH> Worklist;
+ Deferred.swap(Worklist);
+
+ DEBUG(dbgs() << "size of module: " << M.size() << '\n');
+ DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
+
+ // Insert only strong functions and merge them. Strong function merging
+ // always deletes one of them.
+ for (std::vector<WeakVH>::iterator I = Worklist.begin(),
+ E = Worklist.end(); I != E; ++I) {
+ if (!*I) continue;
+ Function *F = cast<Function>(*I);
+ if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
+ !F->mayBeOverridden()) {
+ ComparableFunction CF = ComparableFunction(F, TD);
+ Changed |= insert(CF);
+ }
+ }
+
+ // Insert only weak functions and merge them. By doing these second we
+ // create thunks to the strong function when possible. When two weak
+ // functions are identical, we create a new strong function with two weak
+ // weak thunks to it which are identical but not mergable.
+ for (std::vector<WeakVH>::iterator I = Worklist.begin(),
+ E = Worklist.end(); I != E; ++I) {
+ if (!*I) continue;
+ Function *F = cast<Function>(*I);
+ if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
+ F->mayBeOverridden()) {
+ ComparableFunction CF = ComparableFunction(F, TD);
+ Changed |= insert(CF);
+ }
+ }
+ DEBUG(dbgs() << "size of FnSet: " << FnSet.size() << '\n');
+ } while (!Deferred.empty());
+
+ FnSet.clear();
+
+ return Changed;
+}
+
+bool DenseMapInfo<ComparableFunction>::isEqual(const ComparableFunction &LHS,
+ const ComparableFunction &RHS) {
+ if (LHS.getFunc() == RHS.getFunc() &&
+ LHS.getHash() == RHS.getHash())
+ return true;
+ if (!LHS.getFunc() || !RHS.getFunc())
+ return false;
+
+ // One of these is a special "underlying pointer comparison only" object.
+ if (LHS.getTD() == ComparableFunction::LookupOnly ||
+ RHS.getTD() == ComparableFunction::LookupOnly)
+ return false;
+
+ assert(LHS.getTD() == RHS.getTD() &&
+ "Comparing functions for different targets");
+
+ return FunctionComparator(LHS.getTD(), LHS.getFunc(),
+ RHS.getFunc()).compare();
+}
+
+// Replace direct callers of Old with New.
+void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
+ Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
+ for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
+ UI != UE;) {
+ Value::use_iterator TheIter = UI;
+ ++UI;
+ CallSite CS(*TheIter);
+ if (CS && CS.isCallee(TheIter)) {
+ remove(CS.getInstruction()->getParent()->getParent());
+ TheIter.getUse().set(BitcastNew);
+ }
+ }
+}
+
+// Replace G with an alias to F if possible, or else a thunk to F. Deletes G.
+void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) {
+ if (HasGlobalAliases && G->hasUnnamedAddr()) {
+ if (G->hasExternalLinkage() || G->hasLocalLinkage() ||
+ G->hasWeakLinkage()) {
+ writeAlias(F, G);
+ return;
+ }
+ }
+
+ writeThunk(F, G);
+}
+
+// Replace G with a simple tail call to bitcast(F). Also replace direct uses
+// of G with bitcast(F). Deletes G.
+void MergeFunctions::writeThunk(Function *F, Function *G) {
if (!G->mayBeOverridden()) {
// Redirect direct callers of G to F.
- Constant *BitcastF = ConstantExpr::getBitCast(F, G->getType());
- for (Value::use_iterator UI = G->use_begin(), UE = G->use_end();
- UI != UE;) {
- Value::use_iterator TheIter = UI;
- ++UI;
- CallSite CS(*TheIter);
- if (CS && CS.isCallee(TheIter))
- TheIter.getUse().set(BitcastF);
- }
+ replaceDirectCallers(G, F);
}
- // If G was internal then we may have replaced all uses if G with F. If so,
+ // If G was internal then we may have replaced all uses of G with F. If so,
// stop here and delete G. There's no need for a thunk.
if (G->hasLocalLinkage() && G->use_empty()) {
G->eraseFromParent();
@@ -522,131 +743,126 @@ void MergeFunctions::WriteThunk(Function *F, Function *G) const {
NewG->copyAttributesFrom(G);
NewG->takeName(G);
+ removeUsers(G);
G->replaceAllUsesWith(NewG);
G->eraseFromParent();
+
+ DEBUG(dbgs() << "writeThunk: " << NewG->getName() << '\n');
+ ++NumThunksWritten;
}
-/// MergeTwoFunctions - Merge two equivalent functions. Upon completion,
-/// Function G is deleted.
-void MergeFunctions::MergeTwoFunctions(Function *F, Function *G) const {
- if (F->isWeakForLinker()) {
- assert(G->isWeakForLinker());
+// Replace G with an alias to F and delete G.
+void MergeFunctions::writeAlias(Function *F, Function *G) {
+ Constant *BitcastF = ConstantExpr::getBitCast(F, G->getType());
+ GlobalAlias *GA = new GlobalAlias(G->getType(), G->getLinkage(), "",
+ BitcastF, G->getParent());
+ F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
+ GA->takeName(G);
+ GA->setVisibility(G->getVisibility());
+ removeUsers(G);
+ G->replaceAllUsesWith(GA);
+ G->eraseFromParent();
+
+ DEBUG(dbgs() << "writeAlias: " << GA->getName() << '\n');
+ ++NumAliasesWritten;
+}
+
+// Merge two equivalent functions. Upon completion, Function G is deleted.
+void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) {
+ if (F->mayBeOverridden()) {
+ assert(G->mayBeOverridden());
+
+ if (HasGlobalAliases) {
+ // Make them both thunks to the same internal function.
+ Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
+ F->getParent());
+ H->copyAttributesFrom(F);
+ H->takeName(F);
+ removeUsers(F);
+ F->replaceAllUsesWith(H);
- // Make them both thunks to the same internal function.
- Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
- F->getParent());
- H->copyAttributesFrom(F);
- H->takeName(F);
- F->replaceAllUsesWith(H);
+ unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment());
- unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment());
+ writeAlias(F, G);
+ writeAlias(F, H);
- WriteThunk(F, G);
- WriteThunk(F, H);
+ F->setAlignment(MaxAlignment);
+ F->setLinkage(GlobalValue::PrivateLinkage);
+ } else {
+ // We can't merge them. Instead, pick one and update all direct callers
+ // to call it and hope that we improve the instruction cache hit rate.
+ replaceDirectCallers(G, F);
+ }
- F->setAlignment(MaxAlignment);
- F->setLinkage(GlobalValue::InternalLinkage);
+ ++NumDoubleWeak;
} else {
- WriteThunk(F, G);
+ writeThunkOrAlias(F, G);
}
++NumFunctionsMerged;
}
-static unsigned ProfileFunction(const Function *F) {
- const FunctionType *FTy = F->getFunctionType();
-
- FoldingSetNodeID ID;
- ID.AddInteger(F->size());
- ID.AddInteger(F->getCallingConv());
- ID.AddBoolean(F->hasGC());
- ID.AddBoolean(FTy->isVarArg());
- ID.AddInteger(FTy->getReturnType()->getTypeID());
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- ID.AddInteger(FTy->getParamType(i)->getTypeID());
- return ID.ComputeHash();
-}
-
-class ComparableFunction {
-public:
- ComparableFunction(Function *Func, TargetData *TD)
- : Func(Func), Hash(ProfileFunction(Func)), TD(TD) {}
+// Insert a ComparableFunction into the FnSet, or merge it away if equal to one
+// that was already inserted.
+bool MergeFunctions::insert(ComparableFunction &NewF) {
+ std::pair<FnSetType::iterator, bool> Result = FnSet.insert(NewF);
+ if (Result.second) {
+ DEBUG(dbgs() << "Inserting as unique: " << NewF.getFunc()->getName() << '\n');
+ return false;
+ }
- AssertingVH<Function> const Func;
- const unsigned Hash;
- TargetData * const TD;
-};
+ const ComparableFunction &OldF = *Result.first;
-struct MergeFunctionsEqualityInfo {
- static ComparableFunction *getEmptyKey() {
- return reinterpret_cast<ComparableFunction*>(0);
- }
- static ComparableFunction *getTombstoneKey() {
- return reinterpret_cast<ComparableFunction*>(-1);
- }
- static unsigned getHashValue(const ComparableFunction *CF) {
- return CF->Hash;
- }
- static bool isEqual(const ComparableFunction *LHS,
- const ComparableFunction *RHS) {
- if (LHS == RHS)
- return true;
- if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
- RHS == getEmptyKey() || RHS == getTombstoneKey())
- return false;
- assert(LHS->TD == RHS->TD && "Comparing functions for different targets");
- return FunctionComparator(LHS->TD, LHS->Func, RHS->Func).Compare();
- }
-};
+ // Never thunk a strong function to a weak function.
+ assert(!OldF.getFunc()->mayBeOverridden() ||
+ NewF.getFunc()->mayBeOverridden());
-bool MergeFunctions::runOnModule(Module &M) {
- typedef DenseSet<ComparableFunction *, MergeFunctionsEqualityInfo> FnSetType;
+ DEBUG(dbgs() << " " << OldF.getFunc()->getName() << " == "
+ << NewF.getFunc()->getName() << '\n');
- bool Changed = false;
- TD = getAnalysisIfAvailable<TargetData>();
+ Function *DeleteF = NewF.getFunc();
+ NewF.release();
+ mergeTwoFunctions(OldF.getFunc(), DeleteF);
+ return true;
+}
- std::vector<Function *> Funcs;
- for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
- if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage())
- Funcs.push_back(F);
+// Remove a function from FnSet. If it was already in FnSet, add it to Deferred
+// so that we'll look at it in the next round.
+void MergeFunctions::remove(Function *F) {
+ // We need to make sure we remove F, not a function "equal" to F per the
+ // function equality comparator.
+ //
+ // The special "lookup only" ComparableFunction bypasses the expensive
+ // function comparison in favour of a pointer comparison on the underlying
+ // Function*'s.
+ ComparableFunction CF = ComparableFunction(F, ComparableFunction::LookupOnly);
+ if (FnSet.erase(CF)) {
+ DEBUG(dbgs() << "Removed " << F->getName() << " from set and deferred it.\n");
+ Deferred.push_back(F);
}
+}
- bool LocalChanged;
- do {
- LocalChanged = false;
-
- FnSetType FnSet;
- for (unsigned i = 0, e = Funcs.size(); i != e;) {
- Function *F = Funcs[i];
- ComparableFunction *NewF = new ComparableFunction(F, TD);
- std::pair<FnSetType::iterator, bool> Result = FnSet.insert(NewF);
- if (!Result.second) {
- ComparableFunction *&OldF = *Result.first;
- assert(OldF && "Expected a hash collision");
-
- // NewF will be deleted in favour of OldF unless NewF is strong and
- // OldF is weak in which case swap them to keep the strong definition.
-
- if (OldF->Func->isWeakForLinker() && !NewF->Func->isWeakForLinker())
- std::swap(OldF, NewF);
-
- DEBUG(dbgs() << " " << OldF->Func->getName() << " == "
- << NewF->Func->getName() << '\n');
-
- Funcs.erase(Funcs.begin() + i);
- --e;
-
- Function *DeleteF = NewF->Func;
- delete NewF;
- MergeTwoFunctions(OldF->Func, DeleteF);
- LocalChanged = true;
- Changed = true;
- } else {
- ++i;
+// For each instruction used by the value, remove() the function that contains
+// the instruction. This should happen right before a call to RAUW.
+void MergeFunctions::removeUsers(Value *V) {
+ std::vector<Value *> Worklist;
+ Worklist.push_back(V);
+ while (!Worklist.empty()) {
+ Value *V = Worklist.back();
+ Worklist.pop_back();
+
+ for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
+ UI != UE; ++UI) {
+ Use &U = UI.getUse();
+ if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
+ remove(I->getParent()->getParent());
+ } else if (isa<GlobalValue>(U.getUser())) {
+ // do nothing
+ } else if (Constant *C = dyn_cast<Constant>(U.getUser())) {
+ for (Value::use_iterator CUI = C->use_begin(), CUE = C->use_end();
+ CUI != CUE; ++CUI)
+ Worklist.push_back(*CUI);
}
}
- DeleteContainerPointers(FnSet);
- } while (LocalChanged);
-
- return Changed;
+ }
}
diff --git a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
index 432f7c5..2afd029 100644
--- a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
@@ -30,7 +30,9 @@ namespace {
struct PartialInliner : public ModulePass {
virtual void getAnalysisUsage(AnalysisUsage &AU) const { }
static char ID; // Pass identification, replacement for typeid
- PartialInliner() : ModulePass(ID) {}
+ PartialInliner() : ModulePass(ID) {
+ initializePartialInlinerPass(*PassRegistry::getPassRegistry());
+ }
bool runOnModule(Module& M);
@@ -41,7 +43,7 @@ namespace {
char PartialInliner::ID = 0;
INITIALIZE_PASS(PartialInliner, "partial-inliner",
- "Partial Inliner", false, false);
+ "Partial Inliner", false, false)
ModulePass* llvm::createPartialInliningPass() { return new PartialInliner(); }
@@ -67,7 +69,7 @@ Function* PartialInliner::unswitchFunction(Function* F) {
return 0;
// Clone the function, so that we can hack away on it.
- ValueMap<const Value*, Value*> VMap;
+ ValueToValueMapTy VMap;
Function* duplicateFunction = CloneFunction(F, VMap,
/*ModuleLevelChanges=*/false);
duplicateFunction->setLinkage(GlobalValue::InternalLinkage);
diff --git a/contrib/llvm/lib/Transforms/IPO/PartialSpecialization.cpp b/contrib/llvm/lib/Transforms/IPO/PartialSpecialization.cpp
deleted file mode 100644
index 4a99a41..0000000
--- a/contrib/llvm/lib/Transforms/IPO/PartialSpecialization.cpp
+++ /dev/null
@@ -1,216 +0,0 @@
-//===-- PartialSpecialization.cpp - Specialize for common constants--------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass finds function arguments that are often a common constant and
-// specializes a version of the called function for that constant.
-//
-// This pass simply does the cloning for functions it specializes. It depends
-// on IPSCCP and DAE to clean up the results.
-//
-// The initial heuristic favors constant arguments that are used in control
-// flow.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "partialspecialization"
-#include "llvm/Transforms/IPO.h"
-#include "llvm/Constant.h"
-#include "llvm/Instructions.h"
-#include "llvm/Module.h"
-#include "llvm/Pass.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Transforms/Utils/Cloning.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/ADT/DenseSet.h"
-#include <map>
-using namespace llvm;
-
-STATISTIC(numSpecialized, "Number of specialized functions created");
-STATISTIC(numReplaced, "Number of callers replaced by specialization");
-
-// Maximum number of arguments markable interested
-static const int MaxInterests = 6;
-
-// Call must be used at least occasionally
-static const int CallsMin = 5;
-
-// Must have 10% of calls having the same constant to specialize on
-static const double ConstValPercent = .1;
-
-namespace {
- typedef SmallVector<int, MaxInterests> InterestingArgVector;
- class PartSpec : public ModulePass {
- void scanForInterest(Function&, InterestingArgVector&);
- int scanDistribution(Function&, int, std::map<Constant*, int>&);
- public :
- static char ID; // Pass identification, replacement for typeid
- PartSpec() : ModulePass(ID) {}
- bool runOnModule(Module &M);
- };
-}
-
-char PartSpec::ID = 0;
-INITIALIZE_PASS(PartSpec, "partialspecialization",
- "Partial Specialization", false, false);
-
-// Specialize F by replacing the arguments (keys) in replacements with the
-// constants (values). Replace all calls to F with those constants with
-// a call to the specialized function. Returns the specialized function
-static Function*
-SpecializeFunction(Function* F,
- ValueMap<const Value*, Value*>& replacements) {
- // arg numbers of deleted arguments
- DenseMap<unsigned, const Argument*> deleted;
- for (ValueMap<const Value*, Value*>::iterator
- repb = replacements.begin(), repe = replacements.end();
- repb != repe; ++repb) {
- Argument const *arg = cast<const Argument>(repb->first);
- deleted[arg->getArgNo()] = arg;
- }
-
- Function* NF = CloneFunction(F, replacements,
- /*ModuleLevelChanges=*/false);
- NF->setLinkage(GlobalValue::InternalLinkage);
- F->getParent()->getFunctionList().push_back(NF);
-
- for (Value::use_iterator ii = F->use_begin(), ee = F->use_end();
- ii != ee; ) {
- Value::use_iterator i = ii;
- ++ii;
- User *U = *i;
- CallSite CS(U);
- if (CS) {
- if (CS.getCalledFunction() == F) {
- SmallVector<Value*, 6> args;
- // Assemble the non-specialized arguments for the updated callsite.
- // In the process, make sure that the specialized arguments are
- // constant and match the specialization. If that's not the case,
- // this callsite needs to call the original or some other
- // specialization; don't change it here.
- CallSite::arg_iterator as = CS.arg_begin(), ae = CS.arg_end();
- for (CallSite::arg_iterator ai = as; ai != ae; ++ai) {
- DenseMap<unsigned, const Argument*>::iterator delit = deleted.find(
- std::distance(as, ai));
- if (delit == deleted.end())
- args.push_back(cast<Value>(ai));
- else {
- Constant *ci = dyn_cast<Constant>(ai);
- if (!(ci && ci == replacements[delit->second]))
- goto next_use;
- }
- }
- Value* NCall;
- if (CallInst *CI = dyn_cast<CallInst>(U)) {
- NCall = CallInst::Create(NF, args.begin(), args.end(),
- CI->getName(), CI);
- cast<CallInst>(NCall)->setTailCall(CI->isTailCall());
- cast<CallInst>(NCall)->setCallingConv(CI->getCallingConv());
- } else {
- InvokeInst *II = cast<InvokeInst>(U);
- NCall = InvokeInst::Create(NF, II->getNormalDest(),
- II->getUnwindDest(),
- args.begin(), args.end(),
- II->getName(), II);
- cast<InvokeInst>(NCall)->setCallingConv(II->getCallingConv());
- }
- CS.getInstruction()->replaceAllUsesWith(NCall);
- CS.getInstruction()->eraseFromParent();
- ++numReplaced;
- }
- }
- next_use:;
- }
- return NF;
-}
-
-
-bool PartSpec::runOnModule(Module &M) {
- bool Changed = false;
- for (Module::iterator I = M.begin(); I != M.end(); ++I) {
- Function &F = *I;
- if (F.isDeclaration() || F.mayBeOverridden()) continue;
- InterestingArgVector interestingArgs;
- scanForInterest(F, interestingArgs);
-
- // Find the first interesting Argument that we can specialize on
- // If there are multiple interesting Arguments, then those will be found
- // when processing the cloned function.
- bool breakOuter = false;
- for (unsigned int x = 0; !breakOuter && x < interestingArgs.size(); ++x) {
- std::map<Constant*, int> distribution;
- int total = scanDistribution(F, interestingArgs[x], distribution);
- if (total > CallsMin)
- for (std::map<Constant*, int>::iterator ii = distribution.begin(),
- ee = distribution.end(); ii != ee; ++ii)
- if (total > ii->second && ii->first &&
- ii->second > total * ConstValPercent) {
- ValueMap<const Value*, Value*> m;
- Function::arg_iterator arg = F.arg_begin();
- for (int y = 0; y < interestingArgs[x]; ++y)
- ++arg;
- m[&*arg] = ii->first;
- SpecializeFunction(&F, m);
- ++numSpecialized;
- breakOuter = true;
- Changed = true;
- }
- }
- }
- return Changed;
-}
-
-/// scanForInterest - This function decides which arguments would be worth
-/// specializing on.
-void PartSpec::scanForInterest(Function& F, InterestingArgVector& args) {
- for(Function::arg_iterator ii = F.arg_begin(), ee = F.arg_end();
- ii != ee; ++ii) {
- for(Value::use_iterator ui = ii->use_begin(), ue = ii->use_end();
- ui != ue; ++ui) {
-
- bool interesting = false;
- User *U = *ui;
- if (isa<CmpInst>(U)) interesting = true;
- else if (isa<CallInst>(U))
- interesting = ui->getOperand(0) == ii;
- else if (isa<InvokeInst>(U))
- interesting = ui->getOperand(0) == ii;
- else if (isa<SwitchInst>(U)) interesting = true;
- else if (isa<BranchInst>(U)) interesting = true;
-
- if (interesting) {
- args.push_back(std::distance(F.arg_begin(), ii));
- break;
- }
- }
- }
-}
-
-/// scanDistribution - Construct a histogram of constants for arg of F at arg.
-int PartSpec::scanDistribution(Function& F, int arg,
- std::map<Constant*, int>& dist) {
- bool hasIndirect = false;
- int total = 0;
- for (Value::use_iterator ii = F.use_begin(), ee = F.use_end();
- ii != ee; ++ii) {
- User *U = *ii;
- CallSite CS(U);
- if (CS && CS.getCalledFunction() == &F) {
- ++dist[dyn_cast<Constant>(CS.getArgument(arg))];
- ++total;
- } else
- hasIndirect = true;
- }
-
- // Preserve the original address taken function even if all other uses
- // will be specialized.
- if (hasIndirect) ++total;
- return total;
-}
-
-ModulePass* llvm::createPartialSpecializationPass() { return new PartSpec(); }
diff --git a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
index 09ac76f..d91c2c4 100644
--- a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
@@ -37,7 +37,9 @@ STATISTIC(NumUnreach, "Number of noreturn calls optimized");
namespace {
struct PruneEH : public CallGraphSCCPass {
static char ID; // Pass identification, replacement for typeid
- PruneEH() : CallGraphSCCPass(ID) {}
+ PruneEH() : CallGraphSCCPass(ID) {
+ initializePruneEHPass(*PassRegistry::getPassRegistry());
+ }
// runOnSCC - Analyze the SCC, performing the transformation if possible.
bool runOnSCC(CallGraphSCC &SCC);
@@ -48,8 +50,11 @@ namespace {
}
char PruneEH::ID = 0;
-INITIALIZE_PASS(PruneEH, "prune-eh",
- "Remove unused exception handling info", false, false);
+INITIALIZE_PASS_BEGIN(PruneEH, "prune-eh",
+ "Remove unused exception handling info", false, false)
+INITIALIZE_AG_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_END(PruneEH, "prune-eh",
+ "Remove unused exception handling info", false, false)
Pass *llvm::createPruneEHPass() { return new PruneEH(); }
diff --git a/contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp b/contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp
index ee10ad0..b5f09ec 100644
--- a/contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp
@@ -29,7 +29,9 @@ namespace {
class StripDeadPrototypesPass : public ModulePass {
public:
static char ID; // Pass identification, replacement for typeid
- StripDeadPrototypesPass() : ModulePass(ID) { }
+ StripDeadPrototypesPass() : ModulePass(ID) {
+ initializeStripDeadPrototypesPassPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnModule(Module &M);
};
@@ -37,7 +39,7 @@ public:
char StripDeadPrototypesPass::ID = 0;
INITIALIZE_PASS(StripDeadPrototypesPass, "strip-dead-prototypes",
- "Strip Unused Function Prototypes", false, false);
+ "Strip Unused Function Prototypes", false, false)
bool StripDeadPrototypesPass::runOnModule(Module &M) {
bool MadeChange = false;
diff --git a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
index 20b7b8f..a690765 100644
--- a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
@@ -39,7 +39,9 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit StripSymbols(bool ODI = false)
- : ModulePass(ID), OnlyDebugInfo(ODI) {}
+ : ModulePass(ID), OnlyDebugInfo(ODI) {
+ initializeStripSymbolsPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnModule(Module &M);
@@ -52,7 +54,9 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit StripNonDebugSymbols()
- : ModulePass(ID) {}
+ : ModulePass(ID) {
+ initializeStripNonDebugSymbolsPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnModule(Module &M);
@@ -65,7 +69,9 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit StripDebugDeclare()
- : ModulePass(ID) {}
+ : ModulePass(ID) {
+ initializeStripDebugDeclarePass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnModule(Module &M);
@@ -78,7 +84,9 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit StripDeadDebugInfo()
- : ModulePass(ID) {}
+ : ModulePass(ID) {
+ initializeStripDeadDebugInfoPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnModule(Module &M);
@@ -90,7 +98,7 @@ namespace {
char StripSymbols::ID = 0;
INITIALIZE_PASS(StripSymbols, "strip",
- "Strip all symbols from a module", false, false);
+ "Strip all symbols from a module", false, false)
ModulePass *llvm::createStripSymbolsPass(bool OnlyDebugInfo) {
return new StripSymbols(OnlyDebugInfo);
@@ -99,7 +107,7 @@ ModulePass *llvm::createStripSymbolsPass(bool OnlyDebugInfo) {
char StripNonDebugSymbols::ID = 0;
INITIALIZE_PASS(StripNonDebugSymbols, "strip-nondebug",
"Strip all symbols, except dbg symbols, from a module",
- false, false);
+ false, false)
ModulePass *llvm::createStripNonDebugSymbolsPass() {
return new StripNonDebugSymbols();
@@ -107,7 +115,7 @@ ModulePass *llvm::createStripNonDebugSymbolsPass() {
char StripDebugDeclare::ID = 0;
INITIALIZE_PASS(StripDebugDeclare, "strip-debug-declare",
- "Strip all llvm.dbg.declare intrinsics", false, false);
+ "Strip all llvm.dbg.declare intrinsics", false, false)
ModulePass *llvm::createStripDebugDeclarePass() {
return new StripDebugDeclare();
@@ -115,7 +123,7 @@ ModulePass *llvm::createStripDebugDeclarePass() {
char StripDeadDebugInfo::ID = 0;
INITIALIZE_PASS(StripDeadDebugInfo, "strip-dead-debug-info",
- "Strip debug info for unused symbols", false, false);
+ "Strip debug info for unused symbols", false, false)
ModulePass *llvm::createStripDeadDebugInfoPass() {
return new StripDeadDebugInfo();
diff --git a/contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp
index b82b03f..584deac 100644
--- a/contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp
@@ -50,7 +50,9 @@ namespace {
virtual bool runOnSCC(CallGraphSCC &SCC);
static char ID; // Pass identification, replacement for typeid
- SRETPromotion() : CallGraphSCCPass(ID) {}
+ SRETPromotion() : CallGraphSCCPass(ID) {
+ initializeSRETPromotionPass(*PassRegistry::getPassRegistry());
+ }
private:
CallGraphNode *PromoteReturn(CallGraphNode *CGN);
@@ -61,8 +63,11 @@ namespace {
}
char SRETPromotion::ID = 0;
-INITIALIZE_PASS(SRETPromotion, "sretpromotion",
- "Promote sret arguments to multiple ret values", false, false);
+INITIALIZE_PASS_BEGIN(SRETPromotion, "sretpromotion",
+ "Promote sret arguments to multiple ret values", false, false)
+INITIALIZE_AG_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_END(SRETPromotion, "sretpromotion",
+ "Promote sret arguments to multiple ret values", false, false)
Pass *llvm::createStructRetPromotionPass() {
return new SRETPromotion();
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
index 6f9609c..9c2969c 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
@@ -81,7 +81,9 @@ public:
BuilderTy *Builder;
static char ID; // Pass identification, replacement for typeid
- InstCombiner() : FunctionPass(ID), TD(0), Builder(0) {}
+ InstCombiner() : FunctionPass(ID), TD(0), Builder(0) {
+ initializeInstCombinerPass(*PassRegistry::getPassRegistry());
+ }
public:
virtual bool runOnFunction(Function &F);
@@ -143,6 +145,8 @@ public:
ConstantInt *RHS);
Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
ConstantInt *DivRHS);
+ Instruction *FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *DivI,
+ ConstantInt *DivRHS);
Instruction *FoldICmpAddOpCst(ICmpInst &ICI, Value *X, ConstantInt *CI,
ICmpInst::Predicate Pred, Value *TheAdd);
Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
@@ -284,9 +288,16 @@ public:
private:
- /// SimplifyCommutative - This performs a few simplifications for
- /// commutative operators.
- bool SimplifyCommutative(BinaryOperator &I);
+ /// SimplifyAssociativeOrCommutative - This performs a few simplifications for
+ /// operators which are associative or commutative.
+ bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
+
+ /// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
+ /// which some other binary operation distributes over either by factorizing
+ /// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
+ /// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is
+ /// a win). Returns the simplified value, or null if it didn't simplify.
+ Value *SimplifyUsingDistributiveLaws(BinaryOperator &I);
/// SimplifyDemandedUseBits - Attempts to replace V with a simpler value
/// based on the demanded bits.
@@ -310,10 +321,7 @@ private:
// into the PHI (which is only possible if all operands to the PHI are
// constants).
//
- // If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
- // that would normally be unprofitable because they strongly encourage jump
- // threading.
- Instruction *FoldOpIntoPhi(Instruction &I, bool AllowAggressive = false);
+ Instruction *FoldOpIntoPhi(Instruction &I);
// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
// operator and they all are only used by the PHI, PHI together their
@@ -339,10 +347,6 @@ private:
Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
-
- unsigned GetOrEnforceKnownAlignment(Value *V,
- unsigned PrefAlign = 0);
-
};
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 4d2c89e..c36a955 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -84,43 +84,37 @@ bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
}
Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
+ bool Changed = SimplifyAssociativeOrCommutative(I);
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
I.hasNoUnsignedWrap(), TD))
return ReplaceInstUsesWith(I, V);
-
- if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) {
- // X + (signbit) --> X ^ signbit
- const APInt& Val = CI->getValue();
- uint32_t BitWidth = Val.getBitWidth();
- if (Val == APInt::getSignBit(BitWidth))
- return BinaryOperator::CreateXor(LHS, RHS);
-
- // See if SimplifyDemandedBits can simplify this. This handles stuff like
- // (X & 254)+1 -> (X&254)|1
- if (SimplifyDemandedInstructionBits(I))
- return &I;
-
- // zext(bool) + C -> bool ? C + 1 : C
- if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
- if (ZI->getSrcTy() == Type::getInt1Ty(I.getContext()))
- return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
- }
+ // (A*B)+(A*C) -> A*(B+C) etc
+ if (Value *V = SimplifyUsingDistributiveLaws(I))
+ return ReplaceInstUsesWith(I, V);
- if (isa<PHINode>(LHS))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ // X + (signbit) --> X ^ signbit
+ const APInt &Val = CI->getValue();
+ if (Val.isSignBit())
+ return BinaryOperator::CreateXor(LHS, RHS);
+
+ // See if SimplifyDemandedBits can simplify this. This handles stuff like
+ // (X & 254)+1 -> (X&254)|1
+ if (SimplifyDemandedInstructionBits(I))
+ return &I;
+
+ // zext(bool) + C -> bool ? C + 1 : C
+ if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
+ if (ZI->getSrcTy()->isIntegerTy(1))
+ return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
- ConstantInt *XorRHS = 0;
- Value *XorLHS = 0;
- if (isa<ConstantInt>(RHSC) &&
- match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
+ Value *XorLHS = 0; ConstantInt *XorRHS = 0;
+ if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
- const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue();
+ const APInt &RHSVal = CI->getValue();
unsigned ExtendAmt = 0;
// If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
// If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
@@ -130,13 +124,13 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
else if (XorRHS->getValue().isPowerOf2())
ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
}
-
+
if (ExtendAmt) {
APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
if (!MaskedValueIsZero(XorLHS, Mask))
ExtendAmt = 0;
}
-
+
if (ExtendAmt) {
Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
@@ -145,34 +139,28 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
}
+ if (isa<Constant>(RHS) && isa<PHINode>(LHS))
+ if (Instruction *NV = FoldOpIntoPhi(I))
+ return NV;
+
if (I.getType()->isIntegerTy(1))
return BinaryOperator::CreateXor(LHS, RHS);
- if (I.getType()->isIntegerTy()) {
- // X + X --> X << 1
- if (LHS == RHS)
- return BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
-
- if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) {
- if (RHSI->getOpcode() == Instruction::Sub)
- if (LHS == RHSI->getOperand(1)) // A + (B - A) --> B
- return ReplaceInstUsesWith(I, RHSI->getOperand(0));
- }
- if (Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
- if (LHSI->getOpcode() == Instruction::Sub)
- if (RHS == LHSI->getOperand(1)) // (B - A) + A --> B
- return ReplaceInstUsesWith(I, LHSI->getOperand(0));
- }
+ // X + X --> X << 1
+ if (LHS == RHS) {
+ BinaryOperator *New =
+ BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
+ New->setHasNoSignedWrap(I.hasNoSignedWrap());
+ New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
+ return New;
}
// -A + B --> B - A
// -A + -B --> -(A + B)
if (Value *LHSV = dyn_castNegVal(LHS)) {
- if (LHS->getType()->isIntOrIntVectorTy()) {
- if (Value *RHSV = dyn_castNegVal(RHS)) {
- Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
- return BinaryOperator::CreateNeg(NewAdd);
- }
+ if (Value *RHSV = dyn_castNegVal(RHS)) {
+ Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
+ return BinaryOperator::CreateNeg(NewAdd);
}
return BinaryOperator::CreateSub(RHS, LHSV);
@@ -199,11 +187,6 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (dyn_castFoldableMul(RHS, C2) == LHS)
return BinaryOperator::CreateMul(LHS, AddOne(C2));
- // X + ~X --> -1 since ~X = -X-1
- if (match(LHS, m_Not(m_Specific(RHS))) ||
- match(RHS, m_Not(m_Specific(LHS))))
- return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
-
// A+B --> A|B iff A and B have no bits set in common.
if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
@@ -222,7 +205,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
// W*X + Y*Z --> W * (X+Z) iff W == Y
- if (I.getType()->isIntOrIntVectorTy()) {
+ {
Value *W, *X, *Y, *Z;
if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
@@ -251,24 +234,22 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// (X & FF00) + xx00 -> (X+xx00) & FF00
if (LHS->hasOneUse() &&
- match(LHS, m_And(m_Value(X), m_ConstantInt(C2)))) {
- Constant *Anded = ConstantExpr::getAnd(CRHS, C2);
- if (Anded == CRHS) {
- // See if all bits from the first bit set in the Add RHS up are included
- // in the mask. First, get the rightmost bit.
- const APInt &AddRHSV = CRHS->getValue();
-
- // Form a mask of all bits from the lowest bit added through the top.
- APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
-
- // See if the and mask includes all of these bits.
- APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
-
- if (AddRHSHighBits == AddRHSHighBitsAnd) {
- // Okay, the xform is safe. Insert the new add pronto.
- Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
- return BinaryOperator::CreateAnd(NewAdd, C2);
- }
+ match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) &&
+ CRHS->getValue() == (CRHS->getValue() & C2->getValue())) {
+ // See if all bits from the first bit set in the Add RHS up are included
+ // in the mask. First, get the rightmost bit.
+ const APInt &AddRHSV = CRHS->getValue();
+
+ // Form a mask of all bits from the lowest bit added through the top.
+ APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
+
+ // See if the and mask includes all of these bits.
+ APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
+
+ if (AddRHSHighBits == AddRHSHighBitsAnd) {
+ // Okay, the xform is safe. Insert the new add pronto.
+ Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
+ return BinaryOperator::CreateAnd(NewAdd, C2);
}
}
@@ -293,12 +274,11 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// Can we fold the add into the argument of the select?
// We check both true and false select arguments for a matching subtract.
- if (match(FV, m_Zero()) &&
- match(TV, m_Sub(m_Value(N), m_Specific(A))))
+ if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
// Fold the add into the true select value.
return SelectInst::Create(SI->getCondition(), N, A);
- if (match(TV, m_Zero()) &&
- match(FV, m_Sub(m_Value(N), m_Specific(A))))
+
+ if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
// Fold the add into the false select value.
return SelectInst::Create(SI->getCondition(), A, N);
}
@@ -342,7 +322,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
+ bool Changed = SimplifyAssociativeOrCommutative(I);
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
@@ -424,6 +404,10 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
Value *Result = Constant::getNullValue(IntPtrTy);
+ // If the GEP is inbounds, we know that none of the addressing operations will
+ // overflow in an unsigned sense.
+ bool isInBounds = cast<GEPOperator>(GEP)->isInBounds();
+
// Build a mask for high order bits.
unsigned IntPtrWidth = TD.getPointerSizeInBits();
uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
@@ -439,16 +423,16 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
- Result = Builder->CreateAdd(Result,
- ConstantInt::get(IntPtrTy, Size),
- GEP->getName()+".offs");
+ if (Size)
+ Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".offs");
continue;
}
Constant *Scale = ConstantInt::get(IntPtrTy, Size);
Constant *OC =
ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
- Scale = ConstantExpr::getMul(OC, Scale);
+ Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
// Emit an add instruction.
Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
continue;
@@ -457,9 +441,9 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
if (Op->getType() != IntPtrTy)
Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
if (Size != 1) {
- Constant *Scale = ConstantInt::get(IntPtrTy, Size);
// We'll let instcombine(mul) convert this to a shl if possible.
- Op = Builder->CreateMul(Op, Scale, GEP->getName()+".idx");
+ Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".idx", isInBounds /*NUW*/);
}
// Emit an add instruction.
@@ -545,8 +529,13 @@ Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
Instruction *InstCombiner::visitSub(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Op0 == Op1) // sub X, X -> 0
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
+ if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
+ I.hasNoUnsignedWrap(), TD))
+ return ReplaceInstUsesWith(I, V);
+
+ // (A*B)-(A*C) -> A*(B-C) etc
+ if (Value *V = SimplifyUsingDistributiveLaws(I))
+ return ReplaceInstUsesWith(I, V);
// If this is a 'B = x-(-A)', change to B = x+A. This preserves NSW/NUW.
if (Value *V = dyn_castNegVal(Op1)) {
@@ -556,18 +545,14 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return Res;
}
- if (isa<UndefValue>(Op0))
- return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
- if (isa<UndefValue>(Op1))
- return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
if (I.getType()->isIntegerTy(1))
return BinaryOperator::CreateXor(Op0, Op1);
+
+ // Replace (-1 - A) with (~A).
+ if (match(Op0, m_AllOnes()))
+ return BinaryOperator::CreateNot(Op1);
if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
- // Replace (-1 - A) with (~A).
- if (C->isAllOnesValue())
- return BinaryOperator::CreateNot(Op1);
-
// C - ~X == X + (1+C)
Value *X = 0;
if (match(Op1, m_Not(m_Value(X))))
@@ -576,29 +561,16 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// -(X >>u 31) -> (X >>s 31)
// -(X >>s 31) -> (X >>u 31)
if (C->isZero()) {
- if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1)) {
- if (SI->getOpcode() == Instruction::LShr) {
- if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
- // Check to see if we are shifting out everything but the sign bit.
- if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
- SI->getType()->getPrimitiveSizeInBits()-1) {
- // Ok, the transformation is safe. Insert AShr.
- return BinaryOperator::Create(Instruction::AShr,
- SI->getOperand(0), CU, SI->getName());
- }
- }
- } else if (SI->getOpcode() == Instruction::AShr) {
- if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
- // Check to see if we are shifting out everything but the sign bit.
- if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
- SI->getType()->getPrimitiveSizeInBits()-1) {
- // Ok, the transformation is safe. Insert LShr.
- return BinaryOperator::CreateLShr(
- SI->getOperand(0), CU, SI->getName());
- }
- }
- }
- }
+ Value *X; ConstantInt *CI;
+ if (match(Op1, m_LShr(m_Value(X), m_ConstantInt(CI))) &&
+ // Verify we are shifting out everything but the sign bit.
+ CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
+ return BinaryOperator::CreateAShr(X, CI);
+
+ if (match(Op1, m_AShr(m_Value(X), m_ConstantInt(CI))) &&
+ // Verify we are shifting out everything but the sign bit.
+ CI->getValue() == I.getType()->getPrimitiveSizeInBits()-1)
+ return BinaryOperator::CreateLShr(X, CI);
}
// Try to fold constant sub into select arguments.
@@ -608,86 +580,80 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// C - zext(bool) -> bool ? C - 1 : C
if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1))
- if (ZI->getSrcTy() == Type::getInt1Ty(I.getContext()))
+ if (ZI->getSrcTy()->isIntegerTy(1))
return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
+
+ // C-(X+C2) --> (C-C2)-X
+ ConstantInt *C2;
+ if (match(Op1, m_Add(m_Value(X), m_ConstantInt(C2))))
+ return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
}
- if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
- if (Op1I->getOpcode() == Instruction::Add) {
- if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
- return BinaryOperator::CreateNeg(Op1I->getOperand(1),
- I.getName());
- else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
- return BinaryOperator::CreateNeg(Op1I->getOperand(0),
- I.getName());
- else if (ConstantInt *CI1 = dyn_cast<ConstantInt>(I.getOperand(0))) {
- if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Op1I->getOperand(1)))
- // C1-(X+C2) --> (C1-C2)-X
- return BinaryOperator::CreateSub(
- ConstantExpr::getSub(CI1, CI2), Op1I->getOperand(0));
- }
+
+ { Value *Y;
+ // X-(X+Y) == -Y X-(Y+X) == -Y
+ if (match(Op1, m_Add(m_Specific(Op0), m_Value(Y))) ||
+ match(Op1, m_Add(m_Value(Y), m_Specific(Op0))))
+ return BinaryOperator::CreateNeg(Y);
+
+ // (X-Y)-X == -Y
+ if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
+ return BinaryOperator::CreateNeg(Y);
+ }
+
+ if (Op1->hasOneUse()) {
+ Value *X = 0, *Y = 0, *Z = 0;
+ Constant *C = 0;
+ ConstantInt *CI = 0;
+
+ // (X - (Y - Z)) --> (X + (Z - Y)).
+ if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
+ return BinaryOperator::CreateAdd(Op0,
+ Builder->CreateSub(Z, Y, Op1->getName()));
+
+ // (X - (X & Y)) --> (X & ~Y)
+ //
+ if (match(Op1, m_And(m_Value(Y), m_Specific(Op0))) ||
+ match(Op1, m_And(m_Specific(Op0), m_Value(Y))))
+ return BinaryOperator::CreateAnd(Op0,
+ Builder->CreateNot(Y, Y->getName() + ".not"));
+
+ // 0 - (X sdiv C) -> (X sdiv -C)
+ if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) &&
+ match(Op0, m_Zero()))
+ return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
+
+ // 0 - (X << Y) -> (-X << Y) when X is freely negatable.
+ if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero()))
+ if (Value *XNeg = dyn_castNegVal(X))
+ return BinaryOperator::CreateShl(XNeg, Y);
+
+ // X - X*C --> X * (1-C)
+ if (match(Op1, m_Mul(m_Specific(Op0), m_ConstantInt(CI)))) {
+ Constant *CP1 = ConstantExpr::getSub(ConstantInt::get(I.getType(),1), CI);
+ return BinaryOperator::CreateMul(Op0, CP1);
}
- if (Op1I->hasOneUse()) {
- // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
- // is not used by anyone else...
- //
- if (Op1I->getOpcode() == Instruction::Sub) {
- // Swap the two operands of the subexpr...
- Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1);
- Op1I->setOperand(0, IIOp1);
- Op1I->setOperand(1, IIOp0);
-
- // Create the new top level add instruction...
- return BinaryOperator::CreateAdd(Op0, Op1);
- }
-
- // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
- //
- if (Op1I->getOpcode() == Instruction::And &&
- (Op1I->getOperand(0) == Op0 || Op1I->getOperand(1) == Op0)) {
- Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0);
-
- Value *NewNot = Builder->CreateNot(OtherOp, "B.not");
- return BinaryOperator::CreateAnd(Op0, NewNot);
- }
-
- // 0 - (X sdiv C) -> (X sdiv -C)
- if (Op1I->getOpcode() == Instruction::SDiv)
- if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
- if (CSI->isZero())
- if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1)))
- return BinaryOperator::CreateSDiv(Op1I->getOperand(0),
- ConstantExpr::getNeg(DivRHS));
-
- // 0 - (C << X) -> (-C << X)
- if (Op1I->getOpcode() == Instruction::Shl)
- if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
- if (CSI->isZero())
- if (Value *ShlLHSNeg = dyn_castNegVal(Op1I->getOperand(0)))
- return BinaryOperator::CreateShl(ShlLHSNeg, Op1I->getOperand(1));
-
- // X - X*C --> X * (1-C)
- ConstantInt *C2 = 0;
- if (dyn_castFoldableMul(Op1I, C2) == Op0) {
- Constant *CP1 =
- ConstantExpr::getSub(ConstantInt::get(I.getType(), 1),
- C2);
- return BinaryOperator::CreateMul(Op0, CP1);
- }
+ // X - X<<C --> X * (1-(1<<C))
+ if (match(Op1, m_Shl(m_Specific(Op0), m_ConstantInt(CI)))) {
+ Constant *One = ConstantInt::get(I.getType(), 1);
+ C = ConstantExpr::getSub(One, ConstantExpr::getShl(One, CI));
+ return BinaryOperator::CreateMul(Op0, C);
}
- }
-
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
- if (Op0I->getOpcode() == Instruction::Add) {
- if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X
- return ReplaceInstUsesWith(I, Op0I->getOperand(1));
- else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X
- return ReplaceInstUsesWith(I, Op0I->getOperand(0));
- } else if (Op0I->getOpcode() == Instruction::Sub) {
- if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y
- return BinaryOperator::CreateNeg(Op0I->getOperand(1),
- I.getName());
+
+ // X - A*-B -> X + A*B
+ // X - -A*B -> X + A*B
+ Value *A, *B;
+ if (match(Op1, m_Mul(m_Value(A), m_Neg(m_Value(B)))) ||
+ match(Op1, m_Mul(m_Neg(m_Value(A)), m_Value(B))))
+ return BinaryOperator::CreateAdd(Op0, Builder->CreateMul(A, B));
+
+ // X - A*CI -> X + A*-CI
+ // X - CI*A -> X + A*-CI
+ if (match(Op1, m_Mul(m_Value(A), m_ConstantInt(CI))) ||
+ match(Op1, m_Mul(m_ConstantInt(CI), m_Value(A)))) {
+ Value *NewMul = Builder->CreateMul(A, ConstantExpr::getNeg(CI));
+ return BinaryOperator::CreateAdd(Op0, NewMul);
}
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 19a05bf..b6b6b84 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -172,7 +172,9 @@ static Value *getFCmpValue(bool isordered, unsigned code,
case 4: Pred = isordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; break;
case 5: Pred = isordered ? FCmpInst::FCMP_ONE : FCmpInst::FCMP_UNE; break;
case 6: Pred = isordered ? FCmpInst::FCMP_OLE : FCmpInst::FCMP_ULE; break;
- case 7: return ConstantInt::getTrue(LHS->getContext());
+ case 7:
+ if (!isordered) return ConstantInt::getTrue(LHS->getContext());
+ Pred = FCmpInst::FCMP_ORD; break;
}
return Builder->CreateFCmp(Pred, LHS, RHS);
}
@@ -207,15 +209,26 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
}
break;
case Instruction::Or:
- if (Together == AndRHS) // (X | C) & C --> C
- return ReplaceInstUsesWith(TheAnd, AndRHS);
-
- if (Op->hasOneUse() && Together != OpRHS) {
- // (X | C1) & C2 --> (X | (C1&C2)) & C2
- Value *Or = Builder->CreateOr(X, Together);
- Or->takeName(Op);
- return BinaryOperator::CreateAnd(Or, AndRHS);
+ if (Op->hasOneUse()){
+ if (Together != OpRHS) {
+ // (X | C1) & C2 --> (X | (C1&C2)) & C2
+ Value *Or = Builder->CreateOr(X, Together);
+ Or->takeName(Op);
+ return BinaryOperator::CreateAnd(Or, AndRHS);
+ }
+
+ ConstantInt *TogetherCI = dyn_cast<ConstantInt>(Together);
+ if (TogetherCI && !TogetherCI->isZero()){
+ // (X | C1) & C2 --> (X & (C2^(C1&C2))) | C1
+ // NOTE: This reduces the number of bits set in the & mask, which
+ // can expose opportunities for store narrowing.
+ Together = ConstantExpr::getXor(AndRHS, Together);
+ Value *And = Builder->CreateAnd(X, Together);
+ And->takeName(Op);
+ return BinaryOperator::CreateOr(And, OpRHS);
+ }
}
+
break;
case Instruction::Add:
if (Op->hasOneUse()) {
@@ -261,10 +274,11 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
ConstantInt *CI = ConstantInt::get(AndRHS->getContext(),
AndRHS->getValue() & ShlMask);
- if (CI->getValue() == ShlMask) {
- // Masking out bits that the shift already masks
+ if (CI->getValue() == ShlMask)
+ // Masking out bits that the shift already masks.
return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
- } else if (CI != AndRHS) { // Reducing bits set in and.
+
+ if (CI != AndRHS) { // Reducing bits set in and.
TheAnd.setOperand(1, CI);
return &TheAnd;
}
@@ -281,10 +295,11 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
ConstantInt *CI = ConstantInt::get(Op->getContext(),
AndRHS->getValue() & ShrMask);
- if (CI->getValue() == ShrMask) {
- // Masking out bits that the shift already masks.
+ if (CI->getValue() == ShrMask)
+ // Masking out bits that the shift already masks.
return ReplaceInstUsesWith(TheAnd, Op);
- } else if (CI != AndRHS) {
+
+ if (CI != AndRHS) {
TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
return &TheAnd;
}
@@ -434,6 +449,270 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
}
+/// enum for classifying (icmp eq (A & B), C) and (icmp ne (A & B), C)
+/// One of A and B is considered the mask, the other the value. This is
+/// described as the "AMask" or "BMask" part of the enum. If the enum
+/// contains only "Mask", then both A and B can be considered masks.
+/// If A is the mask, then it was proven, that (A & C) == C. This
+/// is trivial if C == A, or C == 0. If both A and C are constants, this
+/// proof is also easy.
+/// For the following explanations we assume that A is the mask.
+/// The part "AllOnes" declares, that the comparison is true only
+/// if (A & B) == A, or all bits of A are set in B.
+/// Example: (icmp eq (A & 3), 3) -> FoldMskICmp_AMask_AllOnes
+/// The part "AllZeroes" declares, that the comparison is true only
+/// if (A & B) == 0, or all bits of A are cleared in B.
+/// Example: (icmp eq (A & 3), 0) -> FoldMskICmp_Mask_AllZeroes
+/// The part "Mixed" declares, that (A & B) == C and C might or might not
+/// contain any number of one bits and zero bits.
+/// Example: (icmp eq (A & 3), 1) -> FoldMskICmp_AMask_Mixed
+/// The Part "Not" means, that in above descriptions "==" should be replaced
+/// by "!=".
+/// Example: (icmp ne (A & 3), 3) -> FoldMskICmp_AMask_NotAllOnes
+/// If the mask A contains a single bit, then the following is equivalent:
+/// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
+/// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
+enum MaskedICmpType {
+ FoldMskICmp_AMask_AllOnes = 1,
+ FoldMskICmp_AMask_NotAllOnes = 2,
+ FoldMskICmp_BMask_AllOnes = 4,
+ FoldMskICmp_BMask_NotAllOnes = 8,
+ FoldMskICmp_Mask_AllZeroes = 16,
+ FoldMskICmp_Mask_NotAllZeroes = 32,
+ FoldMskICmp_AMask_Mixed = 64,
+ FoldMskICmp_AMask_NotMixed = 128,
+ FoldMskICmp_BMask_Mixed = 256,
+ FoldMskICmp_BMask_NotMixed = 512
+};
+
+/// return the set of pattern classes (from MaskedICmpType)
+/// that (icmp SCC (A & B), C) satisfies
+static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
+ ICmpInst::Predicate SCC)
+{
+ ConstantInt *ACst = dyn_cast<ConstantInt>(A);
+ ConstantInt *BCst = dyn_cast<ConstantInt>(B);
+ ConstantInt *CCst = dyn_cast<ConstantInt>(C);
+ bool icmp_eq = (SCC == ICmpInst::ICMP_EQ);
+ bool icmp_abit = (ACst != 0 && !ACst->isZero() &&
+ ACst->getValue().isPowerOf2());
+ bool icmp_bbit = (BCst != 0 && !BCst->isZero() &&
+ BCst->getValue().isPowerOf2());
+ unsigned result = 0;
+ if (CCst != 0 && CCst->isZero()) {
+ // if C is zero, then both A and B qualify as mask
+ result |= (icmp_eq ? (FoldMskICmp_Mask_AllZeroes |
+ FoldMskICmp_Mask_AllZeroes |
+ FoldMskICmp_AMask_Mixed |
+ FoldMskICmp_BMask_Mixed)
+ : (FoldMskICmp_Mask_NotAllZeroes |
+ FoldMskICmp_Mask_NotAllZeroes |
+ FoldMskICmp_AMask_NotMixed |
+ FoldMskICmp_BMask_NotMixed));
+ if (icmp_abit)
+ result |= (icmp_eq ? (FoldMskICmp_AMask_NotAllOnes |
+ FoldMskICmp_AMask_NotMixed)
+ : (FoldMskICmp_AMask_AllOnes |
+ FoldMskICmp_AMask_Mixed));
+ if (icmp_bbit)
+ result |= (icmp_eq ? (FoldMskICmp_BMask_NotAllOnes |
+ FoldMskICmp_BMask_NotMixed)
+ : (FoldMskICmp_BMask_AllOnes |
+ FoldMskICmp_BMask_Mixed));
+ return result;
+ }
+ if (A == C) {
+ result |= (icmp_eq ? (FoldMskICmp_AMask_AllOnes |
+ FoldMskICmp_AMask_Mixed)
+ : (FoldMskICmp_AMask_NotAllOnes |
+ FoldMskICmp_AMask_NotMixed));
+ if (icmp_abit)
+ result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes |
+ FoldMskICmp_AMask_NotMixed)
+ : (FoldMskICmp_Mask_AllZeroes |
+ FoldMskICmp_AMask_Mixed));
+ }
+ else if (ACst != 0 && CCst != 0 &&
+ ConstantExpr::getAnd(ACst, CCst) == CCst) {
+ result |= (icmp_eq ? FoldMskICmp_AMask_Mixed
+ : FoldMskICmp_AMask_NotMixed);
+ }
+ if (B == C)
+ {
+ result |= (icmp_eq ? (FoldMskICmp_BMask_AllOnes |
+ FoldMskICmp_BMask_Mixed)
+ : (FoldMskICmp_BMask_NotAllOnes |
+ FoldMskICmp_BMask_NotMixed));
+ if (icmp_bbit)
+ result |= (icmp_eq ? (FoldMskICmp_Mask_NotAllZeroes |
+ FoldMskICmp_BMask_NotMixed)
+ : (FoldMskICmp_Mask_AllZeroes |
+ FoldMskICmp_BMask_Mixed));
+ }
+ else if (BCst != 0 && CCst != 0 &&
+ ConstantExpr::getAnd(BCst, CCst) == CCst) {
+ result |= (icmp_eq ? FoldMskICmp_BMask_Mixed
+ : FoldMskICmp_BMask_NotMixed);
+ }
+ return result;
+}
+
+/// foldLogOpOfMaskedICmpsHelper:
+/// handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
+/// return the set of pattern classes (from MaskedICmpType)
+/// that both LHS and RHS satisfy
+static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
+ Value*& B, Value*& C,
+ Value*& D, Value*& E,
+ ICmpInst *LHS, ICmpInst *RHS) {
+ ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
+ if (LHSCC != ICmpInst::ICMP_EQ && LHSCC != ICmpInst::ICMP_NE) return 0;
+ if (RHSCC != ICmpInst::ICMP_EQ && RHSCC != ICmpInst::ICMP_NE) return 0;
+ if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType()) return 0;
+ // vectors are not (yet?) supported
+ if (LHS->getOperand(0)->getType()->isVectorTy()) return 0;
+
+ // Here comes the tricky part:
+ // LHS might be of the form L11 & L12 == X, X == L21 & L22,
+ // and L11 & L12 == L21 & L22. The same goes for RHS.
+ // Now we must find those components L** and R**, that are equal, so
+ // that we can extract the parameters A, B, C, D, and E for the canonical
+ // above.
+ Value *L1 = LHS->getOperand(0);
+ Value *L2 = LHS->getOperand(1);
+ Value *L11,*L12,*L21,*L22;
+ if (match(L1, m_And(m_Value(L11), m_Value(L12)))) {
+ if (!match(L2, m_And(m_Value(L21), m_Value(L22))))
+ L21 = L22 = 0;
+ }
+ else {
+ if (!match(L2, m_And(m_Value(L11), m_Value(L12))))
+ return 0;
+ std::swap(L1, L2);
+ L21 = L22 = 0;
+ }
+
+ Value *R1 = RHS->getOperand(0);
+ Value *R2 = RHS->getOperand(1);
+ Value *R11,*R12;
+ bool ok = false;
+ if (match(R1, m_And(m_Value(R11), m_Value(R12)))) {
+ if (R11 != 0 && (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22)) {
+ A = R11; D = R12; E = R2; ok = true;
+ }
+ else
+ if (R12 != 0 && (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22)) {
+ A = R12; D = R11; E = R2; ok = true;
+ }
+ }
+ if (!ok && match(R2, m_And(m_Value(R11), m_Value(R12)))) {
+ if (R11 != 0 && (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22)) {
+ A = R11; D = R12; E = R1; ok = true;
+ }
+ else
+ if (R12 != 0 && (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22)) {
+ A = R12; D = R11; E = R1; ok = true;
+ }
+ else
+ return 0;
+ }
+ if (!ok)
+ return 0;
+
+ if (L11 == A) {
+ B = L12; C = L2;
+ }
+ else if (L12 == A) {
+ B = L11; C = L2;
+ }
+ else if (L21 == A) {
+ B = L22; C = L1;
+ }
+ else if (L22 == A) {
+ B = L21; C = L1;
+ }
+
+ unsigned left_type = getTypeOfMaskedICmp(A, B, C, LHSCC);
+ unsigned right_type = getTypeOfMaskedICmp(A, D, E, RHSCC);
+ return left_type & right_type;
+}
+/// foldLogOpOfMaskedICmps:
+/// try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
+/// into a single (icmp(A & X) ==/!= Y)
+static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS,
+ ICmpInst::Predicate NEWCC,
+ llvm::InstCombiner::BuilderTy* Builder) {
+ Value *A = 0, *B = 0, *C = 0, *D = 0, *E = 0;
+ unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS);
+ if (mask == 0) return 0;
+
+ if (NEWCC == ICmpInst::ICMP_NE)
+ mask >>= 1; // treat "Not"-states as normal states
+
+ if (mask & FoldMskICmp_Mask_AllZeroes) {
+ // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
+ // -> (icmp eq (A & (B|D)), 0)
+ Value* newOr = Builder->CreateOr(B, D);
+ Value* newAnd = Builder->CreateAnd(A, newOr);
+ // we can't use C as zero, because we might actually handle
+ // (icmp ne (A & B), B) & (icmp ne (A & D), D)
+ // with B and D, having a single bit set
+ Value* zero = Constant::getNullValue(A->getType());
+ return Builder->CreateICmp(NEWCC, newAnd, zero);
+ }
+ else if (mask & FoldMskICmp_BMask_AllOnes) {
+ // (icmp eq (A & B), B) & (icmp eq (A & D), D)
+ // -> (icmp eq (A & (B|D)), (B|D))
+ Value* newOr = Builder->CreateOr(B, D);
+ Value* newAnd = Builder->CreateAnd(A, newOr);
+ return Builder->CreateICmp(NEWCC, newAnd, newOr);
+ }
+ else if (mask & FoldMskICmp_AMask_AllOnes) {
+ // (icmp eq (A & B), A) & (icmp eq (A & D), A)
+ // -> (icmp eq (A & (B&D)), A)
+ Value* newAnd1 = Builder->CreateAnd(B, D);
+ Value* newAnd = Builder->CreateAnd(A, newAnd1);
+ return Builder->CreateICmp(NEWCC, newAnd, A);
+ }
+ else if (mask & FoldMskICmp_BMask_Mixed) {
+ // (icmp eq (A & B), C) & (icmp eq (A & D), E)
+ // We already know that B & C == C && D & E == E.
+ // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
+ // C and E, which are shared by both the mask B and the mask D, don't
+ // contradict, then we can transform to
+ // -> (icmp eq (A & (B|D)), (C|E))
+ // Currently, we only handle the case of B, C, D, and E being constant.
+ ConstantInt *BCst = dyn_cast<ConstantInt>(B);
+ if (BCst == 0) return 0;
+ ConstantInt *DCst = dyn_cast<ConstantInt>(D);
+ if (DCst == 0) return 0;
+ // we can't simply use C and E, because we might actually handle
+ // (icmp ne (A & B), B) & (icmp eq (A & D), D)
+ // with B and D, having a single bit set
+
+ ConstantInt *CCst = dyn_cast<ConstantInt>(C);
+ if (CCst == 0) return 0;
+ if (LHS->getPredicate() != NEWCC)
+ CCst = dyn_cast<ConstantInt>( ConstantExpr::getXor(BCst, CCst) );
+ ConstantInt *ECst = dyn_cast<ConstantInt>(E);
+ if (ECst == 0) return 0;
+ if (RHS->getPredicate() != NEWCC)
+ ECst = dyn_cast<ConstantInt>( ConstantExpr::getXor(DCst, ECst) );
+ ConstantInt* MCst = dyn_cast<ConstantInt>(
+ ConstantExpr::getAnd(ConstantExpr::getAnd(BCst, DCst),
+ ConstantExpr::getXor(CCst, ECst)) );
+ // if there is a conflict we should actually return a false for the
+ // whole construct
+ if (!MCst->isZero())
+ return 0;
+ Value *newOr1 = Builder->CreateOr(B, D);
+ Value *newOr2 = ConstantExpr::getOr(CCst, ECst);
+ Value *newAnd = Builder->CreateAnd(A, newOr1);
+ return Builder->CreateICmp(NEWCC, newAnd, newOr2);
+ }
+ return 0;
+}
+
/// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
@@ -451,6 +730,10 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
return getICmpValue(isSigned, Code, Op0, Op1, Builder);
}
}
+
+ // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E)
+ if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_EQ, Builder))
+ return V;
// This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
@@ -472,22 +755,6 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
Value *NewOr = Builder->CreateOr(Val, Val2);
return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
}
-
- // (icmp ne (A & C1), 0) & (icmp ne (A & C2), 0) -->
- // (icmp eq (A & (C1|C2)), (C1|C2)) where C1 and C2 are non-zero POT
- if (LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
- Value *Op1 = 0, *Op2 = 0;
- ConstantInt *CI1 = 0, *CI2 = 0;
- if (match(LHS->getOperand(0), m_And(m_Value(Op1), m_ConstantInt(CI1))) &&
- match(RHS->getOperand(0), m_And(m_Value(Op2), m_ConstantInt(CI2)))) {
- if (Op1 == Op2 && !CI1->isZero() && !CI2->isZero() &&
- CI1->getValue().isPowerOf2() && CI2->getValue().isPowerOf2()) {
- Constant *ConstOr = ConstantExpr::getOr(CI1, CI2);
- Value *NewAnd = Builder->CreateAnd(Op1, ConstOr);
- return Builder->CreateICmp(ICmpInst::ICMP_EQ, NewAnd, ConstOr);
- }
- }
- }
}
// From here on, we only handle:
@@ -712,12 +979,16 @@ Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
+ bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
if (Value *V = SimplifyAndInst(Op0, Op1, TD))
return ReplaceInstUsesWith(I, V);
+ // (A|B)&(A|C) -> A|(B&C) etc
+ if (Value *V = SimplifyUsingDistributiveLaws(I))
+ return ReplaceInstUsesWith(I, V);
+
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(I))
@@ -725,7 +996,6 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
const APInt &AndRHSMask = AndRHS->getValue();
- APInt NotAndRHS(~AndRHSMask);
// Optimize a variety of ((val OP C1) & C2) combinations...
if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
@@ -734,10 +1004,11 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
switch (Op0I->getOpcode()) {
default: break;
case Instruction::Xor:
- case Instruction::Or:
+ case Instruction::Or: {
// If the mask is only needed on one incoming arm, push it up.
if (!Op0I->hasOneUse()) break;
+ APInt NotAndRHS(~AndRHSMask);
if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
// Not masking anything out for the LHS, move to RHS.
Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
@@ -753,6 +1024,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
}
break;
+ }
case Instruction::Add:
// ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
// ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
@@ -772,14 +1044,12 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
// has 1's for all bits that the subtraction with A might affect.
- if (Op0I->hasOneUse()) {
+ if (Op0I->hasOneUse() && !match(Op0LHS, m_Zero())) {
uint32_t BitWidth = AndRHSMask.getBitWidth();
uint32_t Zeros = AndRHSMask.countLeadingZeros();
APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
- ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
- if (!(A && A->isZero()) && // avoid infinite recursion.
- MaskedValueIsZero(Op0LHS, Mask)) {
+ if (MaskedValueIsZero(Op0LHS, Mask)) {
Value *NewNeg = Builder->CreateNeg(Op0RHS);
return BinaryOperator::CreateAnd(NewNeg, AndRHS);
}
@@ -797,39 +1067,25 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
}
break;
}
-
+
if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
return Res;
- } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
- // If this is an integer truncation or change from signed-to-unsigned, and
- // if the source is an and/or with immediate, transform it. This
- // frequently occurs for bitfield accesses.
- if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
- if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
- CastOp->getNumOperands() == 2)
- if (ConstantInt *AndCI =dyn_cast<ConstantInt>(CastOp->getOperand(1))){
- if (CastOp->getOpcode() == Instruction::And) {
- // Change: and (cast (and X, C1) to T), C2
- // into : and (cast X to T), trunc_or_bitcast(C1)&C2
- // This will fold the two constants together, which may allow
- // other simplifications.
- Value *NewCast = Builder->CreateTruncOrBitCast(
- CastOp->getOperand(0), I.getType(),
- CastOp->getName()+".shrunk");
- // trunc_or_bitcast(C1)&C2
- Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
- C3 = ConstantExpr::getAnd(C3, AndRHS);
- return BinaryOperator::CreateAnd(NewCast, C3);
- } else if (CastOp->getOpcode() == Instruction::Or) {
- // Change: and (cast (or X, C1) to T), C2
- // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
- Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
- if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS)
- // trunc(C1)&C2
- return ReplaceInstUsesWith(I, AndRHS);
- }
- }
+ }
+
+ // If this is an integer truncation, and if the source is an 'and' with
+ // immediate, transform it. This frequently occurs for bitfield accesses.
+ {
+ Value *X = 0; ConstantInt *YC = 0;
+ if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
+ // Change: and (trunc (and X, YC) to T), C2
+ // into : and (trunc X to T), trunc(YC) & C2
+ // This will fold the two constants together, which may allow
+ // other simplifications.
+ Value *NewCast = Builder->CreateTrunc(X, I.getType(), "and.shrunk");
+ Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
+ C3 = ConstantExpr::getAnd(C3, AndRHS);
+ return BinaryOperator::CreateAnd(NewCast, C3);
}
}
@@ -851,7 +1107,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
I.getName()+".demorgan");
return BinaryOperator::CreateNot(Or);
}
-
+
{
Value *A = 0, *B = 0, *C = 0, *D = 0;
// (A|B) & ~(A&B) -> A^B
@@ -884,7 +1140,11 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
cast<BinaryOperator>(Op1)->swapOperands();
std::swap(A, B);
}
- if (A == Op0) // A&(A^B) -> A & ~B
+ // Notice that the patten (A&(~B)) is actually (A&(-1^B)), so if
+ // A is originally -1 (or a vector of -1 and undefs), then we enter
+ // an endless loop. By checking that A is non-constant we ensure that
+ // we will never get to the loop.
+ if (A == Op0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B
return BinaryOperator::CreateAnd(A, Builder->CreateNot(B, "tmp"));
}
@@ -1160,7 +1420,12 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
return getICmpValue(isSigned, Code, Op0, Op1, Builder);
}
}
-
+
+ // handle (roughly):
+ // (icmp ne (A & B), C) | (icmp ne (A & D), E)
+ if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, ICmpInst::ICMP_NE, Builder))
+ return V;
+
// This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
@@ -1173,24 +1438,17 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
Value *NewOr = Builder->CreateOr(Val, Val2);
return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
}
-
- // (icmp eq (A & C1), 0) | (icmp eq (A & C2), 0) -->
- // (icmp ne (A & (C1|C2)), (C1|C2)) where C1 and C2 are non-zero POT
- if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) {
- Value *Op1 = 0, *Op2 = 0;
- ConstantInt *CI1 = 0, *CI2 = 0;
- if (match(LHS->getOperand(0), m_And(m_Value(Op1), m_ConstantInt(CI1))) &&
- match(RHS->getOperand(0), m_And(m_Value(Op2), m_ConstantInt(CI2)))) {
- if (Op1 == Op2 && !CI1->isZero() && !CI2->isZero() &&
- CI1->getValue().isPowerOf2() && CI2->getValue().isPowerOf2()) {
- Constant *ConstOr = ConstantExpr::getOr(CI1, CI2);
- Value *NewAnd = Builder->CreateAnd(Op1, ConstOr);
- return Builder->CreateICmp(ICmpInst::ICMP_NE, NewAnd, ConstOr);
- }
- }
- }
}
-
+
+ // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
+ // iff C2 + CA == C1.
+ if (LHSCC == ICmpInst::ICMP_ULT && RHSCC == ICmpInst::ICMP_EQ) {
+ ConstantInt *AddCst;
+ if (match(Val, m_Add(m_Specific(Val2), m_ConstantInt(AddCst))))
+ if (RHSCst->getValue() + AddCst->getValue() == LHSCst->getValue())
+ return Builder->CreateICmpULE(Val, LHSCst);
+ }
+
// From here on, we only handle:
// (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
if (Val != Val2) return 0;
@@ -1429,12 +1687,16 @@ Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
}
Instruction *InstCombiner::visitOr(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
+ bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
if (Value *V = SimplifyOrInst(Op0, Op1, TD))
return ReplaceInstUsesWith(I, V);
+ // (A&B)|(A&C) -> A&(B|C) etc
+ if (Value *V = SimplifyUsingDistributiveLaws(I))
+ return ReplaceInstUsesWith(I, V);
+
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(I))
@@ -1481,8 +1743,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
if (match(Op0, m_Or(m_Value(), m_Value())) ||
match(Op1, m_Or(m_Value(), m_Value())) ||
- (match(Op0, m_Shift(m_Value(), m_Value())) &&
- match(Op1, m_Shift(m_Value(), m_Value())))) {
+ (match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
+ match(Op1, m_LogicalShift(m_Value(), m_Value())))) {
if (Instruction *BSwap = MatchBSwap(I))
return BSwap;
}
@@ -1509,7 +1771,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
Value *C = 0, *D = 0;
if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
match(Op1, m_And(m_Value(B), m_Value(D)))) {
- Value *V1 = 0, *V2 = 0, *V3 = 0;
+ Value *V1 = 0, *V2 = 0;
C1 = dyn_cast<ConstantInt>(C);
C2 = dyn_cast<ConstantInt>(D);
if (C1 && C2) { // (A & C1)|(B & C2)
@@ -1567,25 +1829,6 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
}
}
-
- // Check to see if we have any common things being and'ed. If so, find the
- // terms for V1 & (V2|V3).
- if (Op0->hasOneUse() || Op1->hasOneUse()) {
- V1 = 0;
- if (A == B) // (A & C)|(A & D) == A & (C|D)
- V1 = A, V2 = C, V3 = D;
- else if (A == D) // (A & C)|(B & A) == A & (B|C)
- V1 = A, V2 = B, V3 = C;
- else if (C == B) // (A & C)|(C & D) == C & (A|D)
- V1 = C, V2 = A, V3 = D;
- else if (C == D) // (A & C)|(B & C) == C & (A|B)
- V1 = C, V2 = A, V3 = B;
-
- if (V1) {
- Value *Or = Builder->CreateOr(V2, V3, "tmp");
- return BinaryOperator::CreateAnd(V1, Or);
- }
- }
// (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants.
// Don't do this for vector select idioms, the code generator doesn't handle
@@ -1667,65 +1910,69 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// fold (or (cast A), (cast B)) -> (cast (or A, B))
if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
- if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
- if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
- const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() &&
- SrcTy->isIntOrIntVectorTy()) {
- Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
-
- if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) &&
- // Only do this if the casts both really cause code to be
- // generated.
- ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
- ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
- Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName());
- return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
- }
-
- // If this is or(cast(icmp), cast(icmp)), try to fold this even if the
- // cast is otherwise not optimizable. This happens for vector sexts.
- if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
- if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
- if (Value *Res = FoldOrOfICmps(LHS, RHS))
- return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
-
- // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the
- // cast is otherwise not optimizable. This happens for vector sexts.
- if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
- if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
- if (Value *Res = FoldOrOfFCmps(LHS, RHS))
- return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
+ CastInst *Op1C = dyn_cast<CastInst>(Op1);
+ if (Op1C && Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
+ const Type *SrcTy = Op0C->getOperand(0)->getType();
+ if (SrcTy == Op1C->getOperand(0)->getType() &&
+ SrcTy->isIntOrIntVectorTy()) {
+ Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
+
+ if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) &&
+ // Only do this if the casts both really cause code to be
+ // generated.
+ ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
+ ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
+ Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName());
+ return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
}
+
+ // If this is or(cast(icmp), cast(icmp)), try to fold this even if the
+ // cast is otherwise not optimizable. This happens for vector sexts.
+ if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
+ if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
+ if (Value *Res = FoldOrOfICmps(LHS, RHS))
+ return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
+
+ // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the
+ // cast is otherwise not optimizable. This happens for vector sexts.
+ if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
+ if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
+ if (Value *Res = FoldOrOfFCmps(LHS, RHS))
+ return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
}
+ }
+ }
+
+ // Note: If we've gotten to the point of visiting the outer OR, then the
+ // inner one couldn't be simplified. If it was a constant, then it won't
+ // be simplified by a later pass either, so we try swapping the inner/outer
+ // ORs in the hopes that we'll be able to simplify it this way.
+ // (X|C) | V --> (X|V) | C
+ if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
+ match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) {
+ Value *Inner = Builder->CreateOr(A, Op1);
+ Inner->takeName(Op0);
+ return BinaryOperator::CreateOr(Inner, C1);
}
return Changed ? &I : 0;
}
Instruction *InstCombiner::visitXor(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
+ bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (isa<UndefValue>(Op1)) {
- if (isa<UndefValue>(Op0))
- // Handle undef ^ undef -> 0 special case. This is a common
- // idiom (misuse).
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
- }
+ if (Value *V = SimplifyXorInst(Op0, Op1, TD))
+ return ReplaceInstUsesWith(I, V);
+
+ // (A&B)^(A&C) -> A&(B^C) etc
+ if (Value *V = SimplifyUsingDistributiveLaws(I))
+ return ReplaceInstUsesWith(I, V);
- // xor X, X = 0
- if (Op0 == Op1)
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
-
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(I))
return &I;
- if (I.getType()->isVectorTy())
- if (isa<ConstantAggregateZero>(Op1))
- return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
// Is this a ~ operation?
if (Value *NotOp = dyn_castNotVal(&I)) {
@@ -1844,15 +2091,6 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
return NV;
}
- if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
- if (X == Op1)
- return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
-
- if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
- if (X == Op0)
- return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
-
-
BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
if (Op1I) {
Value *A, *B;
@@ -1865,10 +2103,6 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
I.swapOperands(); // Simplified below.
std::swap(Op0, Op1);
}
- } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) {
- return ReplaceInstUsesWith(I, B); // A^(A^B) == B
- } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) {
- return ReplaceInstUsesWith(I, A); // A^(B^A) == B
} else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
Op1I->hasOneUse()){
if (A == Op0) { // A^(A&B) -> A^(B&A)
@@ -1891,10 +2125,6 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
std::swap(A, B);
if (B == Op1) // (A|B)^B == A & ~B
return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1, "tmp"));
- } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) {
- return ReplaceInstUsesWith(I, B); // (A^B)^A == B
- } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) {
- return ReplaceInstUsesWith(I, A); // (B^A)^A == B
} else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
Op0I->hasOneUse()){
if (A == Op1) // (A&B)^A -> (B&A)^A
@@ -1932,29 +2162,8 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if ((A == C && B == D) || (A == D && B == C))
return BinaryOperator::CreateXor(A, B);
}
-
- // (A & B)^(C & D)
- if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
- match(Op0I, m_And(m_Value(A), m_Value(B))) &&
- match(Op1I, m_And(m_Value(C), m_Value(D)))) {
- // (X & Y)^(X & Y) -> (Y^Z) & X
- Value *X = 0, *Y = 0, *Z = 0;
- if (A == C)
- X = A, Y = B, Z = D;
- else if (A == D)
- X = A, Y = B, Z = C;
- else if (B == C)
- X = B, Y = A, Z = D;
- else if (B == D)
- X = B, Y = A, Z = C;
-
- if (X) {
- Value *NewOp = Builder->CreateXor(Y, Z, Op0->getName());
- return BinaryOperator::CreateAnd(NewOp, X);
- }
- }
}
-
+
// (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 0ebe3b4..8449f7b 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -17,6 +17,7 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
+#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
/// getPromotedType - Return the specified type promoted as it would be to pass
@@ -29,100 +30,10 @@ static const Type *getPromotedType(const Type *Ty) {
return Ty;
}
-/// EnforceKnownAlignment - If the specified pointer points to an object that
-/// we control, modify the object's alignment to PrefAlign. This isn't
-/// often possible though. If alignment is important, a more reliable approach
-/// is to simply align all global variables and allocation instructions to
-/// their preferred alignment from the beginning.
-///
-static unsigned EnforceKnownAlignment(Value *V,
- unsigned Align, unsigned PrefAlign) {
-
- User *U = dyn_cast<User>(V);
- if (!U) return Align;
-
- switch (Operator::getOpcode(U)) {
- default: break;
- case Instruction::BitCast:
- return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
- case Instruction::GetElementPtr: {
- // If all indexes are zero, it is just the alignment of the base pointer.
- bool AllZeroOperands = true;
- for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
- if (!isa<Constant>(*i) ||
- !cast<Constant>(*i)->isNullValue()) {
- AllZeroOperands = false;
- break;
- }
-
- if (AllZeroOperands) {
- // Treat this like a bitcast.
- return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
- }
- return Align;
- }
- case Instruction::Alloca: {
- AllocaInst *AI = cast<AllocaInst>(V);
- // If there is a requested alignment and if this is an alloca, round up.
- if (AI->getAlignment() >= PrefAlign)
- return AI->getAlignment();
- AI->setAlignment(PrefAlign);
- return PrefAlign;
- }
- }
-
- if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- // If there is a large requested alignment and we can, bump up the alignment
- // of the global.
- if (GV->isDeclaration()) return Align;
-
- if (GV->getAlignment() >= PrefAlign)
- return GV->getAlignment();
- // We can only increase the alignment of the global if it has no alignment
- // specified or if it is not assigned a section. If it is assigned a
- // section, the global could be densely packed with other objects in the
- // section, increasing the alignment could cause padding issues.
- if (!GV->hasSection() || GV->getAlignment() == 0)
- GV->setAlignment(PrefAlign);
- return GV->getAlignment();
- }
-
- return Align;
-}
-
-/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
-/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
-/// and it is more than the alignment of the ultimate object, see if we can
-/// increase the alignment of the ultimate object, making this check succeed.
-unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
- unsigned PrefAlign) {
- assert(V->getType()->isPointerTy() &&
- "GetOrEnforceKnownAlignment expects a pointer!");
- unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
- APInt Mask = APInt::getAllOnesValue(BitWidth);
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
- unsigned TrailZ = KnownZero.countTrailingOnes();
-
- // Avoid trouble with rediculously large TrailZ values, such as
- // those computed from a null pointer.
- TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
-
- unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
-
- // LLVM doesn't support alignments larger than this currently.
- Align = std::min(Align, +Value::MaximumAlignment);
-
- if (PrefAlign > Align)
- Align = EnforceKnownAlignment(V, Align, PrefAlign);
-
- // We don't need to make any adjustment.
- return Align;
-}
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
- unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0));
- unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1));
+ unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
+ unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
unsigned MinAlign = std::min(DstAlign, SrcAlign);
unsigned CopyAlign = MI->getAlignment();
@@ -211,7 +122,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
}
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
- unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
+ unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
if (MI->getAlignment() < Alignment) {
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
Alignment, false));
@@ -234,7 +145,9 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
Value *Dest = MI->getDest();
- Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
+ unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
+ Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
+ Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
// Alignment 0 is identity for alignment 1 for memset, but not store.
if (Alignment == 0) Alignment = 1;
@@ -280,7 +193,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// memmove/cpy/set of zero bytes is a noop.
if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
- if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
+ if (NumBytes->isNullValue())
+ return EraseInstFromFunction(CI);
if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
if (CI->getZExtValue() == 1) {
@@ -289,6 +203,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// alignment is sufficient.
}
}
+
+ // No other transformations apply to volatile transfers.
+ if (MI->isVolatile())
+ return 0;
// If we have a memmove and the source operation is a constant global,
// then the source and dest pointers can't alias, so we can change this
@@ -332,82 +250,73 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (!TD) break;
const Type *ReturnTy = CI.getType();
- bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
+ uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
// Get to the real allocated thing and offset as fast as possible.
Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
-
+
+ uint64_t Offset = 0;
+ uint64_t Size = -1ULL;
+
+ // Try to look through constant GEPs.
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) {
+ if (!GEP->hasAllConstantIndices()) break;
+
+ // Get the current byte offset into the thing. Use the original
+ // operand in case we're looking through a bitcast.
+ SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
+ Offset = TD->getIndexedOffset(GEP->getPointerOperandType(),
+ Ops.data(), Ops.size());
+
+ Op1 = GEP->getPointerOperand()->stripPointerCasts();
+
+ // Make sure we're not a constant offset from an external
+ // global.
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1))
+ if (!GV->hasDefinitiveInitializer()) break;
+ }
+
// If we've stripped down to a single global variable that we
// can know the size of then just return that.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
if (GV->hasDefinitiveInitializer()) {
Constant *C = GV->getInitializer();
- uint64_t GlobalSize = TD->getTypeAllocSize(C->getType());
- return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize));
+ Size = TD->getTypeAllocSize(C->getType());
} else {
// Can't determine size of the GV.
- Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
+ Constant *RetVal = ConstantInt::get(ReturnTy, DontKnow);
return ReplaceInstUsesWith(CI, RetVal);
}
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
// Get alloca size.
if (AI->getAllocatedType()->isSized()) {
- uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
+ Size = TD->getTypeAllocSize(AI->getAllocatedType());
if (AI->isArrayAllocation()) {
const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
if (!C) break;
- AllocaSize *= C->getZExtValue();
+ Size *= C->getZExtValue();
}
- return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize));
}
} else if (CallInst *MI = extractMallocCall(Op1)) {
+ // Get allocation size.
const Type* MallocType = getMallocAllocatedType(MI);
- // Get alloca size.
- if (MallocType && MallocType->isSized()) {
- if (Value *NElems = getMallocArraySize(MI, TD, true)) {
+ if (MallocType && MallocType->isSized())
+ if (Value *NElems = getMallocArraySize(MI, TD, true))
if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
- return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy,
- (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType))));
- }
- }
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
- // Only handle constant GEPs here.
- if (CE->getOpcode() != Instruction::GetElementPtr) break;
- GEPOperator *GEP = cast<GEPOperator>(CE);
-
- // Make sure we're not a constant offset from an external
- // global.
- Value *Operand = GEP->getPointerOperand();
- Operand = Operand->stripPointerCasts();
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
- if (!GV->hasDefinitiveInitializer()) break;
-
- // Get what we're pointing to and its size.
- const PointerType *BaseType =
- cast<PointerType>(Operand->getType());
- uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType());
-
- // Get the current byte offset into the thing. Use the original
- // operand in case we're looking through a bitcast.
- SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
- const PointerType *OffsetType =
- cast<PointerType>(GEP->getPointerOperand()->getType());
- uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
-
- if (Size < Offset) {
- // Out of bound reference? Negative index normalized to large
- // index? Just return "I don't know".
- Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
- return ReplaceInstUsesWith(CI, RetVal);
- }
-
- Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
- return ReplaceInstUsesWith(CI, RetVal);
- }
+ Size = NElements->getZExtValue() * TD->getTypeAllocSize(MallocType);
+ }
// Do not return "I don't know" here. Later optimization passes could
// make it possible to evaluate objectsize to a constant.
- break;
+ if (Size == -1ULL)
+ break;
+
+ if (Size < Offset) {
+ // Out of bound reference? Negative index normalized to large
+ // index? Just return "I don't know".
+ return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow));
+ }
+ return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset));
}
case Intrinsic::bswap:
// bswap(bswap(x)) -> x
@@ -604,7 +513,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_loadu_dq:
// Turn PPC lvx -> load if the pointer is known aligned.
// Turn X86 loadups -> load if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
@@ -613,7 +522,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
const Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
@@ -624,16 +533,23 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
const Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(1)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
return new StoreInst(II->getArgOperand(1), Ptr);
}
break;
-
- case Intrinsic::x86_sse_cvttss2si: {
- // These intrinsics only demands the 0th element of its input vector. If
+
+ case Intrinsic::x86_sse_cvtss2si:
+ case Intrinsic::x86_sse_cvtss2si64:
+ case Intrinsic::x86_sse_cvttss2si:
+ case Intrinsic::x86_sse_cvttss2si64:
+ case Intrinsic::x86_sse2_cvtsd2si:
+ case Intrinsic::x86_sse2_cvtsd2si64:
+ case Intrinsic::x86_sse2_cvttsd2si:
+ case Intrinsic::x86_sse2_cvttsd2si64: {
+ // These intrinsics only demand the 0th element of their input vectors. If
// we can simplify the input based on that, do so now.
unsigned VWidth =
cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
@@ -646,7 +562,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
break;
}
-
+
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
@@ -697,6 +613,32 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
break;
+ case Intrinsic::arm_neon_vld1:
+ case Intrinsic::arm_neon_vld2:
+ case Intrinsic::arm_neon_vld3:
+ case Intrinsic::arm_neon_vld4:
+ case Intrinsic::arm_neon_vld2lane:
+ case Intrinsic::arm_neon_vld3lane:
+ case Intrinsic::arm_neon_vld4lane:
+ case Intrinsic::arm_neon_vst1:
+ case Intrinsic::arm_neon_vst2:
+ case Intrinsic::arm_neon_vst3:
+ case Intrinsic::arm_neon_vst4:
+ case Intrinsic::arm_neon_vst2lane:
+ case Intrinsic::arm_neon_vst3lane:
+ case Intrinsic::arm_neon_vst4lane: {
+ unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
+ unsigned AlignArg = II->getNumArgOperands() - 1;
+ ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
+ if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
+ II->setArgOperand(AlignArg,
+ ConstantInt::get(Type::getInt32Ty(II->getContext()),
+ MemAlign, false));
+ return II;
+ }
+ break;
+ }
+
case Intrinsic::stackrestore: {
// If the save is right next to the restore, remove the restore. This can
// happen when variable allocas are DCE'd.
@@ -783,6 +725,8 @@ protected:
NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
}
bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
+ if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp))
+ return true;
if (ConstantInt *SizeCI =
dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
if (SizeCI->isAllOnesValue())
@@ -819,11 +763,11 @@ Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
Instruction *InstCombiner::visitCallSite(CallSite CS) {
bool Changed = false;
- // If the callee is a constexpr cast of a function, attempt to move the cast
- // to the arguments of the call/invoke.
- if (transformConstExprCastCall(CS)) return 0;
-
+ // If the callee is a pointer to a function, attempt to move any casts to the
+ // arguments of the call/invoke.
Value *Callee = CS.getCalledValue();
+ if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
+ return 0;
if (Function *CalleeF = dyn_cast<Function>(Callee))
// If the call and callee calling conventions don't match, this call must
@@ -917,12 +861,10 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// attempt to move the cast to the arguments of the call/invoke.
//
bool InstCombiner::transformConstExprCastCall(CallSite CS) {
- if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
- ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
- if (CE->getOpcode() != Instruction::BitCast ||
- !isa<Function>(CE->getOperand(0)))
+ Function *Callee =
+ dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
+ if (Callee == 0)
return false;
- Function *Callee = cast<Function>(CE->getOperand(0));
Instruction *Caller = CS.getInstruction();
const AttrListPtr &CallerPAL = CS.getAttributes();
@@ -984,9 +926,22 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
if (!CastInst::isCastable(ActTy, ParamTy))
return false; // Cannot transform this parameter value.
- if (CallerPAL.getParamAttributes(i + 1)
- & Attribute::typeIncompatible(ParamTy))
+ unsigned Attrs = CallerPAL.getParamAttributes(i + 1);
+ if (Attrs & Attribute::typeIncompatible(ParamTy))
return false; // Attribute not compatible with transformed value.
+
+ // If the parameter is passed as a byval argument, then we have to have a
+ // sized type and the sized type has to have the same size as the old type.
+ if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
+ const PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
+ if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
+ return false;
+
+ const Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
+ if (TD->getTypeAllocSize(CurElTy) !=
+ TD->getTypeAllocSize(ParamPTy->getElementType()))
+ return false;
+ }
// Converting from one pointer type to another or between a pointer and an
// integer of the same size is safe even if we do not have a body.
@@ -1109,8 +1064,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Value *NV = NC;
if (OldRetTy != NV->getType() && !Caller->use_empty()) {
if (!NV->getType()->isVoidTy()) {
- Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
- OldRetTy, false);
+ Instruction::CastOps opcode =
+ CastInst::getCastOpcode(NC, false, OldRetTy, false);
NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
// If this is an invoke instruction, we should insert it after the first
@@ -1119,7 +1074,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
InsertNewInstBefore(NC, *I);
} else {
- // Otherwise, it's a call, just insert cast right after the call instr
+ // Otherwise, it's a call, just insert cast right after the call.
InsertNewInstBefore(NC, *Caller);
}
Worklist.AddUsersToWorkList(*Caller);
@@ -1128,7 +1083,6 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
}
}
-
if (!Caller->use_empty())
Caller->replaceAllUsesWith(NV);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 79a9b09..b432641 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -462,8 +462,8 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
Value *A = 0; ConstantInt *Cst = 0;
- if (match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst))) &&
- Src->hasOneUse()) {
+ if (Src->hasOneUse() &&
+ match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) {
// We have three types to worry about here, the type of A, the source of
// the truncate (MidSize), and the destination of the truncate. We know that
// ASize < MidSize and MidSize > ResultSize, but don't know the relation
@@ -482,6 +482,16 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
Shift->takeName(Src);
return CastInst::CreateIntegerCast(Shift, CI.getType(), false);
}
+
+ // Transform "trunc (and X, cst)" -> "and (trunc X), cst" so long as the dest
+ // type isn't non-native.
+ if (Src->hasOneUse() && isa<IntegerType>(Src->getType()) &&
+ ShouldChangeType(Src->getType(), CI.getType()) &&
+ match(Src, m_And(m_Value(A), m_ConstantInt(Cst)))) {
+ Value *NewTrunc = Builder->CreateTrunc(A, CI.getType(), A->getName()+".tr");
+ return BinaryOperator::CreateAnd(NewTrunc,
+ ConstantExpr::getTrunc(Cst, CI.getType()));
+ }
return 0;
}
@@ -1019,8 +1029,22 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
}
}
}
-
-
+
+ // vector (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed.
+ if (const VectorType *VTy = dyn_cast<VectorType>(DestTy)) {
+ ICmpInst::Predicate Pred; Value *CmpLHS;
+ if (match(Src, m_ICmp(Pred, m_Value(CmpLHS), m_Zero()))) {
+ if (Pred == ICmpInst::ICMP_SLT && CmpLHS->getType() == DestTy) {
+ const Type *EltTy = VTy->getElementType();
+
+ // splat the shift constant to a constant vector.
+ Constant *VSh = ConstantInt::get(VTy, EltTy->getScalarSizeInBits()-1);
+ Value *In = Builder->CreateAShr(CmpLHS, VSh,CmpLHS->getName()+".lobit");
+ return ReplaceInstUsesWith(CI, In);
+ }
+ }
+ }
+
// If the input is a shl/ashr pair of a same constant, then this is a sign
// extension from a smaller value. If we could trust arbitrary bitwidth
// integers, we could turn this into a truncate to the smaller bit and then
@@ -1363,8 +1387,7 @@ static Instruction *OptimizeVectorResize(Value *InVal, const VectorType *DestTy,
ConstantInt::get(Int32Ty, SrcElts));
}
- Constant *Mask = ConstantVector::get(ShuffleMask.data(), ShuffleMask.size());
- return new ShuffleVectorInst(InVal, V2, Mask);
+ return new ShuffleVectorInst(InVal, V2, ConstantVector::get(ShuffleMask));
}
static bool isMultipleOfTypeSize(unsigned Value, const Type *Ty) {
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index d7e2b72..999de34 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -22,13 +22,17 @@
using namespace llvm;
using namespace PatternMatch;
+static ConstantInt *getOne(Constant *C) {
+ return ConstantInt::get(cast<IntegerType>(C->getType()), 1);
+}
+
/// AddOne - Add one to a ConstantInt
static Constant *AddOne(Constant *C) {
return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
}
/// SubOne - Subtract one from a ConstantInt
-static Constant *SubOne(ConstantInt *C) {
- return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
+static Constant *SubOne(Constant *C) {
+ return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
}
static ConstantInt *ExtractElement(Constant *V, Constant *Idx) {
@@ -160,8 +164,8 @@ static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero,
Max = KnownOne|UnknownBits;
if (UnknownBits.isNegative()) { // Sign bit is unknown
- Min.set(Min.getBitWidth()-1);
- Max.clear(Max.getBitWidth()-1);
+ Min.setBit(Min.getBitWidth()-1);
+ Max.clearBit(Max.getBitWidth()-1);
}
}
@@ -694,13 +698,6 @@ Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI,
if (Pred == ICmpInst::ICMP_NE)
return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext()));
- // If this is an instruction (as opposed to constantexpr) get NUW/NSW info.
- bool isNUW = false, isNSW = false;
- if (BinaryOperator *Add = dyn_cast<BinaryOperator>(TheAdd)) {
- isNUW = Add->hasNoUnsignedWrap();
- isNSW = Add->hasNoSignedWrap();
- }
-
// From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
// so the values can never be equal. Similiarly for all other "or equals"
// operators.
@@ -709,10 +706,6 @@ Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI,
// (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
// (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
- // If this is an NUW add, then this is always false.
- if (isNUW)
- return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext()));
-
Value *R =
ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI);
return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
@@ -721,12 +714,8 @@ Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI,
// (X+1) >u X --> X <u (0-1) --> X != 255
// (X+2) >u X --> X <u (0-2) --> X <u 254
// (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
- if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
- // If this is an NUW add, then this is always true.
- if (isNUW)
- return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext()));
+ if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
- }
unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
ConstantInt *SMax = ConstantInt::get(X->getContext(),
@@ -738,16 +727,8 @@ Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI,
// (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
// (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
// (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
- if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
- // If this is an NSW add, then we have two cases: if the constant is
- // positive, then this is always false, if negative, this is always true.
- if (isNSW) {
- bool isTrue = CI->getValue().isNegative();
- return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
- }
-
+ if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
- }
// (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
// (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
@@ -756,13 +737,6 @@ Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI,
// (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
// (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
- // If this is an NSW add, then we have two cases: if the constant is
- // positive, then this is always true, if negative, this is always false.
- if (isNSW) {
- bool isTrue = !CI->getValue().isNegative();
- return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
- }
-
assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
Constant *C = ConstantInt::get(X->getContext(), CI->getValue()-1);
return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
@@ -782,7 +756,7 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
// results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
// (x /u C1) <u C2. Simply casting the operands and result won't
// work. :( The if statement below tests that condition and bails
- // if it finds it.
+ // if it finds it.
bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
return 0;
@@ -790,9 +764,11 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
return 0; // The ProdOV computation fails on divide by zero.
if (DivIsSigned && DivRHS->isAllOnesValue())
return 0; // The overflow computation also screws up here
- if (DivRHS->isOne())
- return 0; // Not worth bothering, and eliminates some funny cases
- // with INT_MIN.
+ if (DivRHS->isOne()) {
+ // This eliminates some funny cases with INT_MIN.
+ ICI.setOperand(0, DivI->getOperand(0)); // X/1 == X.
+ return &ICI;
+ }
// Compute Prod = CI * DivRHS. We are essentially solving an equation
// of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
@@ -809,6 +785,10 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
// Get the ICmp opcode
ICmpInst::Predicate Pred = ICI.getPredicate();
+ /// If the division is known to be exact, then there is no remainder from the
+ /// divide, so the covered range size is unit, otherwise it is the divisor.
+ ConstantInt *RangeSize = DivI->isExact() ? getOne(Prod) : DivRHS;
+
// Figure out the interval that is being checked. For example, a comparison
// like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
// Compute this interval based on the constants involved and the signedness of
@@ -818,38 +798,43 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
// -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
int LoOverflow = 0, HiOverflow = 0;
Constant *LoBound = 0, *HiBound = 0;
-
+
if (!DivIsSigned) { // udiv
// e.g. X/5 op 3 --> [15, 20)
LoBound = Prod;
HiOverflow = LoOverflow = ProdOV;
- if (!HiOverflow)
- HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, false);
+ if (!HiOverflow) {
+ // If this is not an exact divide, then many values in the range collapse
+ // to the same result value.
+ HiOverflow = AddWithOverflow(HiBound, LoBound, RangeSize, false);
+ }
+
} else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
if (CmpRHSV == 0) { // (X / pos) op 0
// Can't overflow. e.g. X/2 op 0 --> [-1, 2)
- LoBound = cast<ConstantInt>(ConstantExpr::getNeg(SubOne(DivRHS)));
- HiBound = DivRHS;
+ LoBound = ConstantExpr::getNeg(SubOne(RangeSize));
+ HiBound = RangeSize;
} else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
HiOverflow = LoOverflow = ProdOV;
if (!HiOverflow)
- HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, true);
+ HiOverflow = AddWithOverflow(HiBound, Prod, RangeSize, true);
} else { // (X / pos) op neg
// e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
HiBound = AddOne(Prod);
LoOverflow = HiOverflow = ProdOV ? -1 : 0;
if (!LoOverflow) {
- ConstantInt* DivNeg =
- cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
+ ConstantInt *DivNeg =cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
- }
+ }
}
} else if (DivRHS->getValue().isNegative()) { // Divisor is < 0.
+ if (DivI->isExact())
+ RangeSize = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
if (CmpRHSV == 0) { // (X / neg) op 0
// e.g. X/-5 op 0 --> [-4, 5)
- LoBound = AddOne(DivRHS);
- HiBound = cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
+ LoBound = AddOne(RangeSize);
+ HiBound = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
if (HiBound == DivRHS) { // -INTMIN = INTMIN
HiOverflow = 1; // [INTMIN+1, overflow)
HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
@@ -859,12 +844,12 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
HiBound = AddOne(Prod);
HiOverflow = LoOverflow = ProdOV ? -1 : 0;
if (!LoOverflow)
- LoOverflow = AddWithOverflow(LoBound, HiBound, DivRHS, true) ? -1 : 0;
+ LoOverflow = AddWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
} else { // (X / neg) op neg
LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
LoOverflow = HiOverflow = ProdOV;
if (!HiOverflow)
- HiOverflow = SubWithOverflow(HiBound, Prod, DivRHS, true);
+ HiOverflow = SubWithOverflow(HiBound, Prod, RangeSize, true);
}
// Dividing by a negative swaps the condition. LT <-> GT
@@ -883,9 +868,8 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
if (LoOverflow)
return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
ICmpInst::ICMP_ULT, X, HiBound);
- return ReplaceInstUsesWith(ICI,
- InsertRangeTest(X, LoBound, HiBound, DivIsSigned,
- true));
+ return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
+ DivIsSigned, true));
case ICmpInst::ICMP_NE:
if (LoOverflow && HiOverflow)
return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
@@ -908,13 +892,100 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
case ICmpInst::ICMP_SGT:
if (HiOverflow == +1) // High bound greater than input range.
return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
- else if (HiOverflow == -1) // High bound less than input range.
+ if (HiOverflow == -1) // High bound less than input range.
return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
if (Pred == ICmpInst::ICMP_UGT)
return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
- else
- return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
+ return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
+ }
+}
+
+/// FoldICmpShrCst - Handle "icmp(([al]shr X, cst1), cst2)".
+Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
+ ConstantInt *ShAmt) {
+ const APInt &CmpRHSV = cast<ConstantInt>(ICI.getOperand(1))->getValue();
+
+ // Check that the shift amount is in range. If not, don't perform
+ // undefined shifts. When the shift is visited it will be
+ // simplified.
+ uint32_t TypeBits = CmpRHSV.getBitWidth();
+ uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
+ if (ShAmtVal >= TypeBits || ShAmtVal == 0)
+ return 0;
+
+ if (!ICI.isEquality()) {
+ // If we have an unsigned comparison and an ashr, we can't simplify this.
+ // Similarly for signed comparisons with lshr.
+ if (ICI.isSigned() != (Shr->getOpcode() == Instruction::AShr))
+ return 0;
+
+ // Otherwise, all lshr and all exact ashr's are equivalent to a udiv/sdiv by
+ // a power of 2. Since we already have logic to simplify these, transform
+ // to div and then simplify the resultant comparison.
+ if (Shr->getOpcode() == Instruction::AShr &&
+ !Shr->isExact())
+ return 0;
+
+ // Revisit the shift (to delete it).
+ Worklist.Add(Shr);
+
+ Constant *DivCst =
+ ConstantInt::get(Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal));
+
+ Value *Tmp =
+ Shr->getOpcode() == Instruction::AShr ?
+ Builder->CreateSDiv(Shr->getOperand(0), DivCst, "", Shr->isExact()) :
+ Builder->CreateUDiv(Shr->getOperand(0), DivCst, "", Shr->isExact());
+
+ ICI.setOperand(0, Tmp);
+
+ // If the builder folded the binop, just return it.
+ BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp);
+ if (TheDiv == 0)
+ return &ICI;
+
+ // Otherwise, fold this div/compare.
+ assert(TheDiv->getOpcode() == Instruction::SDiv ||
+ TheDiv->getOpcode() == Instruction::UDiv);
+
+ Instruction *Res = FoldICmpDivCst(ICI, TheDiv, cast<ConstantInt>(DivCst));
+ assert(Res && "This div/cst should have folded!");
+ return Res;
+ }
+
+
+ // If we are comparing against bits always shifted out, the
+ // comparison cannot succeed.
+ APInt Comp = CmpRHSV << ShAmtVal;
+ ConstantInt *ShiftedCmpRHS = ConstantInt::get(ICI.getContext(), Comp);
+ if (Shr->getOpcode() == Instruction::LShr)
+ Comp = Comp.lshr(ShAmtVal);
+ else
+ Comp = Comp.ashr(ShAmtVal);
+
+ if (Comp != CmpRHSV) { // Comparing against a bit that we know is zero.
+ bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
+ Constant *Cst = ConstantInt::get(Type::getInt1Ty(ICI.getContext()),
+ IsICMP_NE);
+ return ReplaceInstUsesWith(ICI, Cst);
+ }
+
+ // Otherwise, check to see if the bits shifted out are known to be zero.
+ // If so, we can compare against the unshifted value:
+ // (X & 4) >> 1 == 2 --> (X & 4) == 4.
+ if (Shr->hasOneUse() && Shr->isExact())
+ return new ICmpInst(ICI.getPredicate(), Shr->getOperand(0), ShiftedCmpRHS);
+
+ if (Shr->hasOneUse()) {
+ // Otherwise strength reduce the shift into an and.
+ APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
+ Constant *Mask = ConstantInt::get(ICI.getContext(), Val);
+
+ Value *And = Builder->CreateAnd(Shr->getOperand(0),
+ Mask, Shr->getName()+".mask");
+ return new ICmpInst(ICI.getPredicate(), And, ShiftedCmpRHS);
}
+ return 0;
}
@@ -939,8 +1010,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// If all the high bits are known, we can do this xform.
if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
// Pull in the high bits from known-ones set.
- APInt NewRHS(RHS->getValue());
- NewRHS.zext(SrcBits);
+ APInt NewRHS = RHS->getValue().zext(SrcBits);
NewRHS |= KnownOne;
return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
ConstantInt::get(ICI.getContext(), NewRHS));
@@ -1022,10 +1092,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
(AndCST->getValue().isNonNegative() && RHSV.isNonNegative()))) {
uint32_t BitWidth =
cast<IntegerType>(Cast->getOperand(0)->getType())->getBitWidth();
- APInt NewCST = AndCST->getValue();
- NewCST.zext(BitWidth);
- APInt NewCI = RHSV;
- NewCI.zext(BitWidth);
+ APInt NewCST = AndCST->getValue().zext(BitWidth);
+ APInt NewCI = RHSV.zext(BitWidth);
Value *NewAnd =
Builder->CreateAnd(Cast->getOperand(0),
ConstantInt::get(ICI.getContext(), NewCST),
@@ -1145,7 +1213,6 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
if (match(LHSI, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
// Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
// -> and (icmp eq P, null), (icmp eq Q, null).
-
Value *ICIP = Builder->CreateICmp(ICI.getPredicate(), P,
Constant::getNullValue(P->getType()));
Value *ICIQ = Builder->CreateICmp(ICI.getPredicate(), Q,
@@ -1185,6 +1252,12 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
return ReplaceInstUsesWith(ICI, Cst);
}
+ // If the shift is NUW, then it is just shifting out zeros, no need for an
+ // AND.
+ if (cast<BinaryOperator>(LHSI)->hasNoUnsignedWrap())
+ return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
+ ConstantExpr::getLShr(RHS, ShAmt));
+
if (LHSI->hasOneUse()) {
// Otherwise strength reduce the shift into an and.
uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
@@ -1195,8 +1268,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
Value *And =
Builder->CreateAnd(LHSI->getOperand(0),Mask, LHSI->getName()+".mask");
return new ICmpInst(ICI.getPredicate(), And,
- ConstantInt::get(ICI.getContext(),
- RHSV.lshr(ShAmtVal)));
+ ConstantExpr::getLShr(RHS, ShAmt));
}
}
@@ -1205,8 +1277,9 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
if (LHSI->hasOneUse() &&
isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
// (X << 31) <s 0 --> (X&1) != 0
- Constant *Mask = ConstantInt::get(ICI.getContext(), APInt(TypeBits, 1) <<
- (TypeBits-ShAmt->getZExtValue()-1));
+ Constant *Mask = ConstantInt::get(LHSI->getOperand(0)->getType(),
+ APInt::getOneBitSet(TypeBits,
+ TypeBits-ShAmt->getZExtValue()-1));
Value *And =
Builder->CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask");
return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
@@ -1216,57 +1289,13 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
}
case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
- case Instruction::AShr: {
+ case Instruction::AShr:
// Only handle equality comparisons of shift-by-constant.
- ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
- if (!ShAmt || !ICI.isEquality()) break;
-
- // Check that the shift amount is in range. If not, don't perform
- // undefined shifts. When the shift is visited it will be
- // simplified.
- uint32_t TypeBits = RHSV.getBitWidth();
- if (ShAmt->uge(TypeBits))
- break;
-
- uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
-
- // If we are comparing against bits always shifted out, the
- // comparison cannot succeed.
- APInt Comp = RHSV << ShAmtVal;
- if (LHSI->getOpcode() == Instruction::LShr)
- Comp = Comp.lshr(ShAmtVal);
- else
- Comp = Comp.ashr(ShAmtVal);
-
- if (Comp != RHSV) { // Comparing against a bit that we know is zero.
- bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
- Constant *Cst = ConstantInt::get(Type::getInt1Ty(ICI.getContext()),
- IsICMP_NE);
- return ReplaceInstUsesWith(ICI, Cst);
- }
-
- // Otherwise, check to see if the bits shifted out are known to be zero.
- // If so, we can compare against the unshifted value:
- // (X & 4) >> 1 == 2 --> (X & 4) == 4.
- if (LHSI->hasOneUse() &&
- MaskedValueIsZero(LHSI->getOperand(0),
- APInt::getLowBitsSet(Comp.getBitWidth(), ShAmtVal))) {
- return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
- ConstantExpr::getShl(RHS, ShAmt));
- }
-
- if (LHSI->hasOneUse()) {
- // Otherwise strength reduce the shift into an and.
- APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
- Constant *Mask = ConstantInt::get(ICI.getContext(), Val);
-
- Value *And = Builder->CreateAnd(LHSI->getOperand(0),
- Mask, LHSI->getName()+".mask");
- return new ICmpInst(ICI.getPredicate(), And,
- ConstantExpr::getShl(RHS, ShAmt));
- }
+ if (ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
+ if (Instruction *Res = FoldICmpShrCst(ICI, cast<BinaryOperator>(LHSI),
+ ShAmt))
+ return Res;
break;
- }
case Instruction::SDiv:
case Instruction::UDiv:
@@ -1543,50 +1572,174 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// The re-extended constant changed so the constant cannot be represented
// in the shorter type. Consequently, we cannot emit a simple comparison.
+ // All the cases that fold to true or false will have already been handled
+ // by SimplifyICmpInst, so only deal with the tricky case.
- // First, handle some easy cases. We know the result cannot be equal at this
- // point so handle the ICI.isEquality() cases
- if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
- return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
- if (ICI.getPredicate() == ICmpInst::ICMP_NE)
- return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
+ if (isSignedCmp || !isSignedExt)
+ return 0;
// Evaluate the comparison for LT (we invert for GT below). LE and GE cases
// should have been folded away previously and not enter in here.
- Value *Result;
- if (isSignedCmp) {
- // We're performing a signed comparison.
- if (cast<ConstantInt>(CI)->getValue().isNegative())
- Result = ConstantInt::getFalse(ICI.getContext()); // X < (small) --> false
- else
- Result = ConstantInt::getTrue(ICI.getContext()); // X < (large) --> true
- } else {
- // We're performing an unsigned comparison.
- if (isSignedExt) {
- // We're performing an unsigned comp with a sign extended value.
- // This is true if the input is >= 0. [aka >s -1]
- Constant *NegOne = Constant::getAllOnesValue(SrcTy);
- Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName());
- } else {
- // Unsigned extend & unsigned compare -> always true.
- Result = ConstantInt::getTrue(ICI.getContext());
- }
- }
+
+ // We're performing an unsigned comp with a sign extended value.
+ // This is true if the input is >= 0. [aka >s -1]
+ Constant *NegOne = Constant::getAllOnesValue(SrcTy);
+ Value *Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName());
// Finally, return the value computed.
- if (ICI.getPredicate() == ICmpInst::ICMP_ULT ||
- ICI.getPredicate() == ICmpInst::ICMP_SLT)
+ if (ICI.getPredicate() == ICmpInst::ICMP_ULT)
return ReplaceInstUsesWith(ICI, Result);
- assert((ICI.getPredicate()==ICmpInst::ICMP_UGT ||
- ICI.getPredicate()==ICmpInst::ICMP_SGT) &&
- "ICmp should be folded!");
- if (Constant *CI = dyn_cast<Constant>(Result))
- return ReplaceInstUsesWith(ICI, ConstantExpr::getNot(CI));
+ assert(ICI.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
return BinaryOperator::CreateNot(Result);
}
+/// ProcessUGT_ADDCST_ADD - The caller has matched a pattern of the form:
+/// I = icmp ugt (add (add A, B), CI2), CI1
+/// If this is of the form:
+/// sum = a + b
+/// if (sum+128 >u 255)
+/// Then replace it with llvm.sadd.with.overflow.i8.
+///
+static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
+ ConstantInt *CI2, ConstantInt *CI1,
+ InstCombiner &IC) {
+ // The transformation we're trying to do here is to transform this into an
+ // llvm.sadd.with.overflow. To do this, we have to replace the original add
+ // with a narrower add, and discard the add-with-constant that is part of the
+ // range check (if we can't eliminate it, this isn't profitable).
+
+ // In order to eliminate the add-with-constant, the compare can be its only
+ // use.
+ Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
+ if (!AddWithCst->hasOneUse()) return 0;
+
+ // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
+ if (!CI2->getValue().isPowerOf2()) return 0;
+ unsigned NewWidth = CI2->getValue().countTrailingZeros();
+ if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) return 0;
+
+ // The width of the new add formed is 1 more than the bias.
+ ++NewWidth;
+
+ // Check to see that CI1 is an all-ones value with NewWidth bits.
+ if (CI1->getBitWidth() == NewWidth ||
+ CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
+ return 0;
+
+ // In order to replace the original add with a narrower
+ // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
+ // and truncates that discard the high bits of the add. Verify that this is
+ // the case.
+ Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
+ for (Value::use_iterator UI = OrigAdd->use_begin(), E = OrigAdd->use_end();
+ UI != E; ++UI) {
+ if (*UI == AddWithCst) continue;
+
+ // Only accept truncates for now. We would really like a nice recursive
+ // predicate like SimplifyDemandedBits, but which goes downwards the use-def
+ // chain to see which bits of a value are actually demanded. If the
+ // original add had another add which was then immediately truncated, we
+ // could still do the transformation.
+ TruncInst *TI = dyn_cast<TruncInst>(*UI);
+ if (TI == 0 ||
+ TI->getType()->getPrimitiveSizeInBits() > NewWidth) return 0;
+ }
+
+ // If the pattern matches, truncate the inputs to the narrower type and
+ // use the sadd_with_overflow intrinsic to efficiently compute both the
+ // result and the overflow bit.
+ Module *M = I.getParent()->getParent()->getParent();
+
+ const Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
+ Value *F = Intrinsic::getDeclaration(M, Intrinsic::sadd_with_overflow,
+ &NewType, 1);
+
+ InstCombiner::BuilderTy *Builder = IC.Builder;
+
+ // Put the new code above the original add, in case there are any uses of the
+ // add between the add and the compare.
+ Builder->SetInsertPoint(OrigAdd);
+
+ Value *TruncA = Builder->CreateTrunc(A, NewType, A->getName()+".trunc");
+ Value *TruncB = Builder->CreateTrunc(B, NewType, B->getName()+".trunc");
+ CallInst *Call = Builder->CreateCall2(F, TruncA, TruncB, "sadd");
+ Value *Add = Builder->CreateExtractValue(Call, 0, "sadd.result");
+ Value *ZExt = Builder->CreateZExt(Add, OrigAdd->getType());
+
+ // The inner add was the result of the narrow add, zero extended to the
+ // wider type. Replace it with the result computed by the intrinsic.
+ IC.ReplaceInstUsesWith(*OrigAdd, ZExt);
+
+ // The original icmp gets replaced with the overflow value.
+ return ExtractValueInst::Create(Call, 1, "sadd.overflow");
+}
+
+static Instruction *ProcessUAddIdiom(Instruction &I, Value *OrigAddV,
+ InstCombiner &IC) {
+ // Don't bother doing this transformation for pointers, don't do it for
+ // vectors.
+ if (!isa<IntegerType>(OrigAddV->getType())) return 0;
+
+ // If the add is a constant expr, then we don't bother transforming it.
+ Instruction *OrigAdd = dyn_cast<Instruction>(OrigAddV);
+ if (OrigAdd == 0) return 0;
+
+ Value *LHS = OrigAdd->getOperand(0), *RHS = OrigAdd->getOperand(1);
+
+ // Put the new code above the original add, in case there are any uses of the
+ // add between the add and the compare.
+ InstCombiner::BuilderTy *Builder = IC.Builder;
+ Builder->SetInsertPoint(OrigAdd);
+
+ Module *M = I.getParent()->getParent()->getParent();
+ const Type *Ty = LHS->getType();
+ Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, &Ty,1);
+ CallInst *Call = Builder->CreateCall2(F, LHS, RHS, "uadd");
+ Value *Add = Builder->CreateExtractValue(Call, 0);
+ IC.ReplaceInstUsesWith(*OrigAdd, Add);
+
+ // The original icmp gets replaced with the overflow value.
+ return ExtractValueInst::Create(Call, 1, "uadd.overflow");
+}
+
+// DemandedBitsLHSMask - When performing a comparison against a constant,
+// it is possible that not all the bits in the LHS are demanded. This helper
+// method computes the mask that IS demanded.
+static APInt DemandedBitsLHSMask(ICmpInst &I,
+ unsigned BitWidth, bool isSignCheck) {
+ if (isSignCheck)
+ return APInt::getSignBit(BitWidth);
+
+ ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
+ if (!CI) return APInt::getAllOnesValue(BitWidth);
+ const APInt &RHS = CI->getValue();
+
+ switch (I.getPredicate()) {
+ // For a UGT comparison, we don't care about any bits that
+ // correspond to the trailing ones of the comparand. The value of these
+ // bits doesn't impact the outcome of the comparison, because any value
+ // greater than the RHS must differ in a bit higher than these due to carry.
+ case ICmpInst::ICMP_UGT: {
+ unsigned trailingOnes = RHS.countTrailingOnes();
+ APInt lowBitsSet = APInt::getLowBitsSet(BitWidth, trailingOnes);
+ return ~lowBitsSet;
+ }
+
+ // Similarly, for a ULT comparison, we don't care about the trailing zeros.
+ // Any value less than the RHS must differ in a higher bit because of carries.
+ case ICmpInst::ICMP_ULT: {
+ unsigned trailingZeros = RHS.countTrailingZeros();
+ APInt lowBitsSet = APInt::getLowBitsSet(BitWidth, trailingZeros);
+ return ~lowBitsSet;
+ }
+
+ default:
+ return APInt::getAllOnesValue(BitWidth);
+ }
+
+}
Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
bool Changed = false;
@@ -1649,17 +1802,37 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
}
unsigned BitWidth = 0;
- if (TD)
- BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
- else if (Ty->isIntOrIntVectorTy())
+ if (Ty->isIntOrIntVectorTy())
BitWidth = Ty->getScalarSizeInBits();
-
+ else if (TD) // Pointers require TD info to get their size.
+ BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
+
bool isSignBit = false;
// See if we are doing a comparison with a constant.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
Value *A = 0, *B = 0;
+ // Match the following pattern, which is a common idiom when writing
+ // overflow-safe integer arithmetic function. The source performs an
+ // addition in wider type, and explicitly checks for overflow using
+ // comparisons against INT_MIN and INT_MAX. Simplify this by using the
+ // sadd_with_overflow intrinsic.
+ //
+ // TODO: This could probably be generalized to handle other overflow-safe
+ // operations if we worked out the formulas to compute the appropriate
+ // magic constants.
+ //
+ // sum = a + b
+ // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
+ {
+ ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI
+ if (I.getPredicate() == ICmpInst::ICMP_UGT &&
+ match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
+ if (Instruction *Res = ProcessUGT_ADDCST_ADD(I, A, B, CI2, CI, *this))
+ return Res;
+ }
+
// (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
if (I.isEquality() && CI->isZero() &&
match(Op0, m_Sub(m_Value(A), m_Value(B)))) {
@@ -1704,8 +1877,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
if (SimplifyDemandedBits(I.getOperandUse(0),
- isSignBit ? APInt::getSignBit(BitWidth)
- : APInt::getAllOnesValue(BitWidth),
+ DemandedBitsLHSMask(I, BitWidth, isSignBit),
Op0KnownZero, Op0KnownOne, 0))
return &I;
if (SimplifyDemandedBits(I.getOperandUse(1),
@@ -1744,14 +1916,80 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// simplify this comparison. For example, (x&4) < 8 is always true.
switch (I.getPredicate()) {
default: llvm_unreachable("Unknown icmp opcode!");
- case ICmpInst::ICMP_EQ:
+ case ICmpInst::ICMP_EQ: {
if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
+
+ // If all bits are known zero except for one, then we know at most one
+ // bit is set. If the comparison is against zero, then this is a check
+ // to see if *that* bit is set.
+ APInt Op0KnownZeroInverted = ~Op0KnownZero;
+ if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) {
+ // If the LHS is an AND with the same constant, look through it.
+ Value *LHS = 0;
+ ConstantInt *LHSC = 0;
+ if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
+ LHSC->getValue() != Op0KnownZeroInverted)
+ LHS = Op0;
+
+ // If the LHS is 1 << x, and we know the result is a power of 2 like 8,
+ // then turn "((1 << x)&8) == 0" into "x != 3".
+ Value *X = 0;
+ if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
+ unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros();
+ return new ICmpInst(ICmpInst::ICMP_NE, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ }
+
+ // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
+ // then turn "((8 >>u x)&1) == 0" into "x != 3".
+ const APInt *CI;
+ if (Op0KnownZeroInverted == 1 &&
+ match(LHS, m_LShr(m_Power2(CI), m_Value(X))))
+ return new ICmpInst(ICmpInst::ICMP_NE, X,
+ ConstantInt::get(X->getType(),
+ CI->countTrailingZeros()));
+ }
+
break;
- case ICmpInst::ICMP_NE:
+ }
+ case ICmpInst::ICMP_NE: {
if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
+
+ // If all bits are known zero except for one, then we know at most one
+ // bit is set. If the comparison is against zero, then this is a check
+ // to see if *that* bit is set.
+ APInt Op0KnownZeroInverted = ~Op0KnownZero;
+ if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) {
+ // If the LHS is an AND with the same constant, look through it.
+ Value *LHS = 0;
+ ConstantInt *LHSC = 0;
+ if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
+ LHSC->getValue() != Op0KnownZeroInverted)
+ LHS = Op0;
+
+ // If the LHS is 1 << x, and we know the result is a power of 2 like 8,
+ // then turn "((1 << x)&8) != 0" into "x == 3".
+ Value *X = 0;
+ if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
+ unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros();
+ return new ICmpInst(ICmpInst::ICMP_EQ, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ }
+
+ // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
+ // then turn "((8 >>u x)&1) != 0" into "x == 3".
+ const APInt *CI;
+ if (Op0KnownZeroInverted == 1 &&
+ match(LHS, m_LShr(m_Power2(CI), m_Value(X))))
+ return new ICmpInst(ICmpInst::ICMP_EQ, X,
+ ConstantInt::get(X->getType(),
+ CI->countTrailingZeros()));
+ }
+
break;
+ }
case ICmpInst::ICMP_ULT:
if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
@@ -1894,7 +2132,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// block. If in the same block, we're encouraging jump threading. If
// not, we are just pessimizing the code by making an i1 phi.
if (LHSI->getParent() == I.getParent())
- if (Instruction *NV = FoldOpIntoPhi(I, true))
+ if (Instruction *NV = FoldOpIntoPhi(I))
return NV;
break;
case Instruction::Select: {
@@ -1995,79 +2233,163 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
if (Instruction *R = visitICmpInstWithCastAndCast(I))
return R;
}
-
- // See if it's the same type of instruction on the left and right.
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
- if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
- if (Op0I->getOpcode() == Op1I->getOpcode() && Op0I->hasOneUse() &&
- Op1I->hasOneUse() && Op0I->getOperand(1) == Op1I->getOperand(1)) {
- switch (Op0I->getOpcode()) {
- default: break;
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Xor:
- if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
- return new ICmpInst(I.getPredicate(), Op0I->getOperand(0),
- Op1I->getOperand(0));
- // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
- if (CI->getValue().isSignBit()) {
- ICmpInst::Predicate Pred = I.isSigned()
- ? I.getUnsignedPredicate()
- : I.getSignedPredicate();
- return new ICmpInst(Pred, Op0I->getOperand(0),
- Op1I->getOperand(0));
- }
-
- if (CI->getValue().isMaxSignedValue()) {
- ICmpInst::Predicate Pred = I.isSigned()
- ? I.getUnsignedPredicate()
- : I.getSignedPredicate();
- Pred = I.getSwappedPredicate(Pred);
- return new ICmpInst(Pred, Op0I->getOperand(0),
- Op1I->getOperand(0));
- }
+
+ // Special logic for binary operators.
+ BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
+ BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
+ if (BO0 || BO1) {
+ CmpInst::Predicate Pred = I.getPredicate();
+ bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
+ if (BO0 && isa<OverflowingBinaryOperator>(BO0))
+ NoOp0WrapProblem = ICmpInst::isEquality(Pred) ||
+ (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
+ (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
+ if (BO1 && isa<OverflowingBinaryOperator>(BO1))
+ NoOp1WrapProblem = ICmpInst::isEquality(Pred) ||
+ (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
+ (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
+
+ // Analyze the case when either Op0 or Op1 is an add instruction.
+ // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
+ Value *A = 0, *B = 0, *C = 0, *D = 0;
+ if (BO0 && BO0->getOpcode() == Instruction::Add)
+ A = BO0->getOperand(0), B = BO0->getOperand(1);
+ if (BO1 && BO1->getOpcode() == Instruction::Add)
+ C = BO1->getOperand(0), D = BO1->getOperand(1);
+
+ // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
+ if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
+ return new ICmpInst(Pred, A == Op1 ? B : A,
+ Constant::getNullValue(Op1->getType()));
+
+ // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
+ if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
+ return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
+ C == Op0 ? D : C);
+
+ // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow.
+ if (A && C && (A == C || A == D || B == C || B == D) &&
+ NoOp0WrapProblem && NoOp1WrapProblem &&
+ // Try not to increase register pressure.
+ BO0->hasOneUse() && BO1->hasOneUse()) {
+ // Determine Y and Z in the form icmp (X+Y), (X+Z).
+ Value *Y = (A == C || A == D) ? B : A;
+ Value *Z = (C == A || C == B) ? D : C;
+ return new ICmpInst(Pred, Y, Z);
+ }
+
+ // Analyze the case when either Op0 or Op1 is a sub instruction.
+ // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
+ A = 0; B = 0; C = 0; D = 0;
+ if (BO0 && BO0->getOpcode() == Instruction::Sub)
+ A = BO0->getOperand(0), B = BO0->getOperand(1);
+ if (BO1 && BO1->getOpcode() == Instruction::Sub)
+ C = BO1->getOperand(0), D = BO1->getOperand(1);
+
+ // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow.
+ if (A == Op1 && NoOp0WrapProblem)
+ return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
+
+ // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow.
+ if (C == Op0 && NoOp1WrapProblem)
+ return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
+
+ // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow.
+ if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem &&
+ // Try not to increase register pressure.
+ BO0->hasOneUse() && BO1->hasOneUse())
+ return new ICmpInst(Pred, A, C);
+
+ // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow.
+ if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem &&
+ // Try not to increase register pressure.
+ BO0->hasOneUse() && BO1->hasOneUse())
+ return new ICmpInst(Pred, D, B);
+
+ if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() &&
+ BO0->hasOneUse() && BO1->hasOneUse() &&
+ BO0->getOperand(1) == BO1->getOperand(1)) {
+ switch (BO0->getOpcode()) {
+ default: break;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Xor:
+ if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
+ return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
+ BO1->getOperand(0));
+ // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) {
+ if (CI->getValue().isSignBit()) {
+ ICmpInst::Predicate Pred = I.isSigned()
+ ? I.getUnsignedPredicate()
+ : I.getSignedPredicate();
+ return new ICmpInst(Pred, BO0->getOperand(0),
+ BO1->getOperand(0));
+ }
+
+ if (CI->getValue().isMaxSignedValue()) {
+ ICmpInst::Predicate Pred = I.isSigned()
+ ? I.getUnsignedPredicate()
+ : I.getSignedPredicate();
+ Pred = I.getSwappedPredicate(Pred);
+ return new ICmpInst(Pred, BO0->getOperand(0),
+ BO1->getOperand(0));
}
+ }
+ break;
+ case Instruction::Mul:
+ if (!I.isEquality())
break;
- case Instruction::Mul:
- if (!I.isEquality())
- break;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
- // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
- // Mask = -1 >> count-trailing-zeros(Cst).
- if (!CI->isZero() && !CI->isOne()) {
- const APInt &AP = CI->getValue();
- ConstantInt *Mask = ConstantInt::get(I.getContext(),
- APInt::getLowBitsSet(AP.getBitWidth(),
- AP.getBitWidth() -
- AP.countTrailingZeros()));
- Value *And1 = Builder->CreateAnd(Op0I->getOperand(0), Mask);
- Value *And2 = Builder->CreateAnd(Op1I->getOperand(0), Mask);
- return new ICmpInst(I.getPredicate(), And1, And2);
- }
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) {
+ // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
+ // Mask = -1 >> count-trailing-zeros(Cst).
+ if (!CI->isZero() && !CI->isOne()) {
+ const APInt &AP = CI->getValue();
+ ConstantInt *Mask = ConstantInt::get(I.getContext(),
+ APInt::getLowBitsSet(AP.getBitWidth(),
+ AP.getBitWidth() -
+ AP.countTrailingZeros()));
+ Value *And1 = Builder->CreateAnd(BO0->getOperand(0), Mask);
+ Value *And2 = Builder->CreateAnd(BO1->getOperand(0), Mask);
+ return new ICmpInst(I.getPredicate(), And1, And2);
}
- break;
}
+ break;
}
}
}
- // ~x < ~y --> y < x
{ Value *A, *B;
- if (match(Op0, m_Not(m_Value(A))) &&
- match(Op1, m_Not(m_Value(B))))
- return new ICmpInst(I.getPredicate(), B, A);
+ // ~x < ~y --> y < x
+ // ~x < cst --> ~cst < x
+ if (match(Op0, m_Not(m_Value(A)))) {
+ if (match(Op1, m_Not(m_Value(B))))
+ return new ICmpInst(I.getPredicate(), B, A);
+ if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
+ return new ICmpInst(I.getPredicate(), ConstantExpr::getNot(RHSC), A);
+ }
+
+ // (a+b) <u a --> llvm.uadd.with.overflow.
+ // (a+b) <u b --> llvm.uadd.with.overflow.
+ if (I.getPredicate() == ICmpInst::ICMP_ULT &&
+ match(Op0, m_Add(m_Value(A), m_Value(B))) &&
+ (Op1 == A || Op1 == B))
+ if (Instruction *R = ProcessUAddIdiom(I, Op0, *this))
+ return R;
+
+ // a >u (a+b) --> llvm.uadd.with.overflow.
+ // b >u (a+b) --> llvm.uadd.with.overflow.
+ if (I.getPredicate() == ICmpInst::ICMP_UGT &&
+ match(Op1, m_Add(m_Value(A), m_Value(B))) &&
+ (Op0 == A || Op0 == B))
+ if (Instruction *R = ProcessUAddIdiom(I, Op1, *this))
+ return R;
}
if (I.isEquality()) {
Value *A, *B, *C, *D;
-
- // -x == -y --> x == y
- if (match(Op0, m_Neg(m_Value(A))) &&
- match(Op1, m_Neg(m_Value(B))))
- return new ICmpInst(I.getPredicate(), A, B);
-
+
if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
Value *OtherVal = A == Op1 ? B : A;
@@ -2102,16 +2424,6 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Constant::getNullValue(A->getType()));
}
- // (A-B) == A -> B == 0
- if (match(Op0, m_Sub(m_Specific(Op1), m_Value(B))))
- return new ICmpInst(I.getPredicate(), B,
- Constant::getNullValue(B->getType()));
-
- // A == (A-B) -> B == 0
- if (match(Op1, m_Sub(m_Specific(Op0), m_Value(B))))
- return new ICmpInst(I.getPredicate(), B,
- Constant::getNullValue(B->getType()));
-
// (X&Z) == (Y&Z) -> (X^Y) & Z == 0
if (Op0->hasOneUse() && Op1->hasOneUse() &&
match(Op0, m_And(m_Value(A), m_Value(B))) &&
@@ -2397,7 +2709,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
// block. If in the same block, we're encouraging jump threading. If
// not, we are just pessimizing the code by making an i1 phi.
if (LHSI->getParent() == I.getParent())
- if (Instruction *NV = FoldOpIntoPhi(I, true))
+ if (Instruction *NV = FoldOpIntoPhi(I))
return NV;
break;
case Instruction::SIToFP:
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index b68fbc2..78ff734 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -145,7 +145,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
// Attempt to improve the alignment.
if (TD) {
unsigned KnownAlign =
- GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
+ getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);
unsigned LoadAlign = LI.getAlignment();
unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
TD->getABITypeAlignment(LI.getType());
@@ -165,7 +165,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (LI.isVolatile()) return 0;
// Do really simple store-to-load forwarding and load CSE, to catch cases
- // where there are several consequtive memory accesses to the same location,
+ // where there are several consecutive memory accesses to the same location,
// separated by a few arithmetic operations.
BasicBlock::iterator BBI = &LI;
if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
@@ -330,7 +330,9 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
SIOp0->getName()+".c");
- return new StoreInst(NewCast, CastOp);
+ SI.setOperand(0, NewCast);
+ SI.setOperand(1, CastOp);
+ return &SI;
}
/// equivalentAddressValues - Test if A and B will obviously have the same
@@ -414,7 +416,8 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
// Attempt to improve the alignment.
if (TD) {
unsigned KnownAlign =
- GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
+ getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),
+ TD);
unsigned StoreAlign = SI.getAlignment();
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
TD->getABITypeAlignment(Val->getType());
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index b3974e8..d1a1fd6 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -14,26 +14,22 @@
#include "InstCombine.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Support/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
-/// SubOne - Subtract one from a ConstantInt.
-static Constant *SubOne(ConstantInt *C) {
- return ConstantInt::get(C->getContext(), C->getValue()-1);
-}
-
/// MultiplyOverflows - True if the multiply can not be expressed in an int
/// this size.
static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) {
uint32_t W = C1->getBitWidth();
APInt LHSExt = C1->getValue(), RHSExt = C2->getValue();
if (sign) {
- LHSExt.sext(W * 2);
- RHSExt.sext(W * 2);
+ LHSExt = LHSExt.sext(W * 2);
+ RHSExt = RHSExt.sext(W * 2);
} else {
- LHSExt.zext(W * 2);
- RHSExt.zext(W * 2);
+ LHSExt = LHSExt.zext(W * 2);
+ RHSExt = RHSExt.zext(W * 2);
}
APInt MulExt = LHSExt * RHSExt;
@@ -47,62 +43,48 @@ static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) {
}
Instruction *InstCombiner::visitMul(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
+ bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (isa<UndefValue>(Op1)) // undef * X -> 0
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
+ if (Value *V = SimplifyMulInst(Op0, Op1, TD))
+ return ReplaceInstUsesWith(I, V);
- // Simplify mul instructions with a constant RHS.
- if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1C)) {
-
- // ((X << C1)*C2) == (X * (C2 << C1))
- if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
- if (SI->getOpcode() == Instruction::Shl)
- if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
- return BinaryOperator::CreateMul(SI->getOperand(0),
- ConstantExpr::getShl(CI, ShOp));
-
- if (CI->isZero())
- return ReplaceInstUsesWith(I, Op1C); // X * 0 == 0
- if (CI->equalsInt(1)) // X * 1 == X
- return ReplaceInstUsesWith(I, Op0);
- if (CI->isAllOnesValue()) // X * -1 == 0 - X
- return BinaryOperator::CreateNeg(Op0, I.getName());
-
- const APInt& Val = cast<ConstantInt>(CI)->getValue();
- if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C
- return BinaryOperator::CreateShl(Op0,
- ConstantInt::get(Op0->getType(), Val.logBase2()));
- }
- } else if (Op1C->getType()->isVectorTy()) {
- if (Op1C->isNullValue())
- return ReplaceInstUsesWith(I, Op1C);
-
- if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
- if (Op1V->isAllOnesValue()) // X * -1 == 0 - X
- return BinaryOperator::CreateNeg(Op0, I.getName());
+ if (Value *V = SimplifyUsingDistributiveLaws(I))
+ return ReplaceInstUsesWith(I, V);
- // As above, vector X*splat(1.0) -> X in all defined cases.
- if (Constant *Splat = Op1V->getSplatValue()) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Splat))
- if (CI->equalsInt(1))
- return ReplaceInstUsesWith(I, Op0);
- }
- }
+ if (match(Op1, m_AllOnes())) // X * -1 == 0 - X
+ return BinaryOperator::CreateNeg(Op0, I.getName());
+
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
+
+ // ((X << C1)*C2) == (X * (C2 << C1))
+ if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
+ if (SI->getOpcode() == Instruction::Shl)
+ if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
+ return BinaryOperator::CreateMul(SI->getOperand(0),
+ ConstantExpr::getShl(CI, ShOp));
+
+ const APInt &Val = CI->getValue();
+ if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C
+ Constant *NewCst = ConstantInt::get(Op0->getType(), Val.logBase2());
+ BinaryOperator *Shl = BinaryOperator::CreateShl(Op0, NewCst);
+ if (I.hasNoSignedWrap()) Shl->setHasNoSignedWrap();
+ if (I.hasNoUnsignedWrap()) Shl->setHasNoUnsignedWrap();
+ return Shl;
}
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0))
- if (Op0I->getOpcode() == Instruction::Add && Op0I->hasOneUse() &&
- isa<ConstantInt>(Op0I->getOperand(1)) && isa<ConstantInt>(Op1C)) {
- // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
- Value *Add = Builder->CreateMul(Op0I->getOperand(0), Op1C, "tmp");
- Value *C1C2 = Builder->CreateMul(Op1C, Op0I->getOperand(1));
- return BinaryOperator::CreateAdd(Add, C1C2);
-
+ // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
+ { Value *X; ConstantInt *C1;
+ if (Op0->hasOneUse() &&
+ match(Op0, m_Add(m_Value(X), m_ConstantInt(C1)))) {
+ Value *Add = Builder->CreateMul(X, CI, "tmp");
+ return BinaryOperator::CreateAdd(Add, Builder->CreateMul(C1, CI));
}
-
+ }
+ }
+
+ // Simplify mul instructions with a constant RHS.
+ if (isa<Constant>(Op1)) {
// Try to fold constant mul into select arguments.
if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
if (Instruction *R = FoldOpIntoSelect(I, SI))
@@ -135,8 +117,8 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
BO->getOpcode() == Instruction::SDiv)) {
Value *Op0BO = BO->getOperand(0), *Op1BO = BO->getOperand(1);
- // If the division is exact, X % Y is zero.
- if (SDivOperator *SDiv = dyn_cast<SDivOperator>(BO))
+ // If the division is exact, X % Y is zero, so we end up with X or -X.
+ if (PossiblyExactOperator *SDiv = dyn_cast<PossiblyExactOperator>(BO))
if (SDiv->isExact()) {
if (Op1BO == Op1C)
return ReplaceInstUsesWith(I, Op0BO);
@@ -194,7 +176,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
+ bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
// Simplify mul instructions with a constant RHS...
@@ -304,28 +286,6 @@ bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
}
-/// This function implements the transforms on div instructions that work
-/// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
-/// used by the visitors to those instructions.
-/// @brief Transforms common to all three div instructions
-Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- // undef / X -> 0 for integer.
- // undef / X -> undef for FP (the undef could be a snan).
- if (isa<UndefValue>(Op0)) {
- if (Op0->getType()->isFPOrFPVectorTy())
- return ReplaceInstUsesWith(I, Op0);
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- }
-
- // X / undef -> undef
- if (isa<UndefValue>(Op1))
- return ReplaceInstUsesWith(I, Op1);
-
- return 0;
-}
-
/// This function implements the transforms common to both integer division
/// instructions (udiv and sdiv). It is called by the visitors to those integer
/// division instructions.
@@ -333,31 +293,12 @@ Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- // (sdiv X, X) --> 1 (udiv X, X) --> 1
- if (Op0 == Op1) {
- if (const VectorType *Ty = dyn_cast<VectorType>(I.getType())) {
- Constant *CI = ConstantInt::get(Ty->getElementType(), 1);
- std::vector<Constant*> Elts(Ty->getNumElements(), CI);
- return ReplaceInstUsesWith(I, ConstantVector::get(Elts));
- }
-
- Constant *CI = ConstantInt::get(I.getType(), 1);
- return ReplaceInstUsesWith(I, CI);
- }
-
- if (Instruction *Common = commonDivTransforms(I))
- return Common;
-
// Handle cases involving: [su]div X, (select Cond, Y, Z)
// This does not apply for fdiv.
if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
return &I;
if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- // div X, 1 == X
- if (RHS->equalsInt(1))
- return ReplaceInstUsesWith(I, Op0);
-
// (X / C1) / C2 -> X / (C1*C2)
if (Instruction *LHS = dyn_cast<Instruction>(Op0))
if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode())
@@ -365,9 +306,8 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
if (MultiplyOverflows(RHS, LHSRHS,
I.getOpcode()==Instruction::SDiv))
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- else
- return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0),
- ConstantExpr::getMul(RHS, LHSRHS));
+ return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0),
+ ConstantExpr::getMul(RHS, LHSRHS));
}
if (!RHS->isZero()) { // avoid X udiv 0
@@ -380,20 +320,13 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
}
}
- // 0 / X == 0, we don't need to preserve faults!
- if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
- if (LHS->equalsInt(0))
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
-
- // It can't be division by zero, hence it must be division by one.
- if (I.getType()->isIntegerTy(1))
- return ReplaceInstUsesWith(I, Op0);
-
- if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
- if (ConstantInt *X = cast_or_null<ConstantInt>(Op1V->getSplatValue()))
- // div X, 1 == X
- if (X->isOne())
- return ReplaceInstUsesWith(I, Op0);
+ // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
+ Value *X = 0, *Z = 0;
+ if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) { // (X - Z) / Y; Y = Op1
+ bool isSigned = I.getOpcode() == Instruction::SDiv;
+ if ((isSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
+ (!isSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1)))))
+ return BinaryOperator::Create(I.getOpcode(), X, Op1);
}
return 0;
@@ -402,6 +335,9 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
+ if (Value *V = SimplifyUDivInst(Op0, Op1, TD))
+ return ReplaceInstUsesWith(I, V);
+
// Handle the integer div common cases
if (Instruction *Common = commonIDivTransforms(I))
return Common;
@@ -410,60 +346,59 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
// X udiv 2^C -> X >> C
// Check to see if this is an unsigned division with an exact power of 2,
// if so, convert to a right shift.
- if (C->getValue().isPowerOf2()) // 0 not included in isPowerOf2
- return BinaryOperator::CreateLShr(Op0,
+ if (C->getValue().isPowerOf2()) { // 0 not included in isPowerOf2
+ BinaryOperator *LShr =
+ BinaryOperator::CreateLShr(Op0,
ConstantInt::get(Op0->getType(), C->getValue().logBase2()));
+ if (I.isExact()) LShr->setIsExact();
+ return LShr;
+ }
// X udiv C, where C >= signbit
if (C->getValue().isNegative()) {
- Value *IC = Builder->CreateICmpULT( Op0, C);
+ Value *IC = Builder->CreateICmpULT(Op0, C);
return SelectInst::Create(IC, Constant::getNullValue(I.getType()),
ConstantInt::get(I.getType(), 1));
}
}
// X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
- if (BinaryOperator *RHSI = dyn_cast<BinaryOperator>(I.getOperand(1))) {
- if (RHSI->getOpcode() == Instruction::Shl &&
- isa<ConstantInt>(RHSI->getOperand(0))) {
- const APInt& C1 = cast<ConstantInt>(RHSI->getOperand(0))->getValue();
- if (C1.isPowerOf2()) {
- Value *N = RHSI->getOperand(1);
- const Type *NTy = N->getType();
- if (uint32_t C2 = C1.logBase2())
- N = Builder->CreateAdd(N, ConstantInt::get(NTy, C2), "tmp");
- return BinaryOperator::CreateLShr(Op0, N);
- }
+ { const APInt *CI; Value *N;
+ if (match(Op1, m_Shl(m_Power2(CI), m_Value(N)))) {
+ if (*CI != 1)
+ N = Builder->CreateAdd(N, ConstantInt::get(I.getType(), CI->logBase2()),
+ "tmp");
+ if (I.isExact())
+ return BinaryOperator::CreateExactLShr(Op0, N);
+ return BinaryOperator::CreateLShr(Op0, N);
}
}
// udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
// where C1&C2 are powers of two.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
- if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
- if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
- const APInt &TVA = STO->getValue(), &FVA = SFO->getValue();
- if (TVA.isPowerOf2() && FVA.isPowerOf2()) {
- // Compute the shift amounts
- uint32_t TSA = TVA.logBase2(), FSA = FVA.logBase2();
- // Construct the "on true" case of the select
- Constant *TC = ConstantInt::get(Op0->getType(), TSA);
- Value *TSI = Builder->CreateLShr(Op0, TC, SI->getName()+".t");
+ { Value *Cond; const APInt *C1, *C2;
+ if (match(Op1, m_Select(m_Value(Cond), m_Power2(C1), m_Power2(C2)))) {
+ // Construct the "on true" case of the select
+ Value *TSI = Builder->CreateLShr(Op0, C1->logBase2(), Op1->getName()+".t",
+ I.isExact());
- // Construct the "on false" case of the select
- Constant *FC = ConstantInt::get(Op0->getType(), FSA);
- Value *FSI = Builder->CreateLShr(Op0, FC, SI->getName()+".f");
-
- // construct the select instruction and return it.
- return SelectInst::Create(SI->getOperand(0), TSI, FSI, SI->getName());
- }
- }
+ // Construct the "on false" case of the select
+ Value *FSI = Builder->CreateLShr(Op0, C2->logBase2(), Op1->getName()+".f",
+ I.isExact());
+
+ // construct the select instruction and return it.
+ return SelectInst::Create(Cond, TSI, FSI);
+ }
+ }
return 0;
}
Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
+ if (Value *V = SimplifySDivInst(Op0, Op1, TD))
+ return ReplaceInstUsesWith(I, V);
+
// Handle the integer div common cases
if (Instruction *Common = commonIDivTransforms(I))
return Common;
@@ -473,20 +408,17 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
if (RHS->isAllOnesValue())
return BinaryOperator::CreateNeg(Op0);
- // sdiv X, C --> ashr X, log2(C)
- if (cast<SDivOperator>(&I)->isExact() &&
- RHS->getValue().isNonNegative() &&
+ // sdiv X, C --> ashr exact X, log2(C)
+ if (I.isExact() && RHS->getValue().isNonNegative() &&
RHS->getValue().isPowerOf2()) {
Value *ShAmt = llvm::ConstantInt::get(RHS->getType(),
RHS->getValue().exactLogBase2());
- return BinaryOperator::CreateAShr(Op0, ShAmt, I.getName());
+ return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName());
}
// -X/C --> X/-C provided the negation doesn't overflow.
if (SubOperator *Sub = dyn_cast<SubOperator>(Op0))
- if (isa<Constant>(Sub->getOperand(0)) &&
- cast<Constant>(Sub->getOperand(0))->isNullValue() &&
- Sub->hasNoSignedWrap())
+ if (match(Sub->getOperand(0), m_Zero()) && Sub->hasNoSignedWrap())
return BinaryOperator::CreateSDiv(Sub->getOperand(1),
ConstantExpr::getNeg(RHS));
}
@@ -500,9 +432,8 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
// X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
}
- ConstantInt *ShiftedInt;
- if (match(Op1, m_Shl(m_ConstantInt(ShiftedInt), m_Value())) &&
- ShiftedInt->getValue().isPowerOf2()) {
+
+ if (match(Op1, m_Shl(m_Power2(), m_Value()))) {
// X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
// Safe because the only negative value (1 << Y) can take on is
// INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
@@ -516,7 +447,12 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
}
Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
- return commonDivTransforms(I);
+ Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
+
+ if (Value *V = SimplifyFDivInst(Op0, Op1, TD))
+ return ReplaceInstUsesWith(I, V);
+
+ return 0;
}
/// This function implements the transforms on rem instructions that work
@@ -551,6 +487,10 @@ Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
if (Instruction *common = commonRemTransforms(I))
return common;
+ // X % X == 0
+ if (Op0 == Op1)
+ return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
+
// 0 % X == 0 for integer, we don't need to preserve faults!
if (Constant *LHS = dyn_cast<Constant>(Op0))
if (LHS->isNullValue())
@@ -588,42 +528,29 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
if (Instruction *common = commonIRemTransforms(I))
return common;
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- // X urem C^2 -> X and C
- // Check to see if this is an unsigned remainder with an exact power of 2,
- // if so, convert to a bitwise and.
- if (ConstantInt *C = dyn_cast<ConstantInt>(RHS))
- if (C->getValue().isPowerOf2())
- return BinaryOperator::CreateAnd(Op0, SubOne(C));
+ // X urem C^2 -> X and C-1
+ { const APInt *C;
+ if (match(Op1, m_Power2(C)))
+ return BinaryOperator::CreateAnd(Op0,
+ ConstantInt::get(I.getType(), *C-1));
}
- if (Instruction *RHSI = dyn_cast<Instruction>(I.getOperand(1))) {
- // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
- if (RHSI->getOpcode() == Instruction::Shl &&
- isa<ConstantInt>(RHSI->getOperand(0))) {
- if (cast<ConstantInt>(RHSI->getOperand(0))->getValue().isPowerOf2()) {
- Constant *N1 = Constant::getAllOnesValue(I.getType());
- Value *Add = Builder->CreateAdd(RHSI, N1, "tmp");
- return BinaryOperator::CreateAnd(Op0, Add);
- }
- }
+ // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
+ if (match(Op1, m_Shl(m_Power2(), m_Value()))) {
+ Constant *N1 = Constant::getAllOnesValue(I.getType());
+ Value *Add = Builder->CreateAdd(Op1, N1, "tmp");
+ return BinaryOperator::CreateAnd(Op0, Add);
}
- // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
- // where C1&C2 are powers of two.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
- if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
- if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
- // STO == 0 and SFO == 0 handled above.
- if ((STO->getValue().isPowerOf2()) &&
- (SFO->getValue().isPowerOf2())) {
- Value *TrueAnd = Builder->CreateAnd(Op0, SubOne(STO),
- SI->getName()+".t");
- Value *FalseAnd = Builder->CreateAnd(Op0, SubOne(SFO),
- SI->getName()+".f");
- return SelectInst::Create(SI->getOperand(0), TrueAnd, FalseAnd);
- }
- }
+ // urem X, (select Cond, 2^C1, 2^C2) -->
+ // select Cond, (and X, C1-1), (and X, C2-1)
+ // when C1&C2 are powers of two.
+ { Value *Cond; const APInt *C1, *C2;
+ if (match(Op1, m_Select(m_Value(Cond), m_Power2(C1), m_Power2(C2)))) {
+ Value *TrueAnd = Builder->CreateAnd(Op0, *C1-1, Op1->getName()+".t");
+ Value *FalseAnd = Builder->CreateAnd(Op0, *C2-1, Op1->getName()+".f");
+ return SelectInst::Create(Cond, TrueAnd, FalseAnd);
+ }
}
return 0;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
index f7fc62f..297a18c 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "InstCombine.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Target/TargetData.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/STLExtras.h"
@@ -30,22 +31,37 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
const Type *LHSType = LHSVal->getType();
const Type *RHSType = RHSVal->getType();
+ bool isNUW = false, isNSW = false, isExact = false;
+ if (OverflowingBinaryOperator *BO =
+ dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
+ isNUW = BO->hasNoUnsignedWrap();
+ isNSW = BO->hasNoSignedWrap();
+ } else if (PossiblyExactOperator *PEO =
+ dyn_cast<PossiblyExactOperator>(FirstInst))
+ isExact = PEO->isExact();
+
// Scan to see if all operands are the same opcode, and all have one use.
for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
// Verify type of the LHS matches so we don't fold cmp's of different
- // types or GEP's with different index types.
+ // types.
I->getOperand(0)->getType() != LHSType ||
I->getOperand(1)->getType() != RHSType)
return 0;
// If they are CmpInst instructions, check their predicates
- if (Opc == Instruction::ICmp || Opc == Instruction::FCmp)
- if (cast<CmpInst>(I)->getPredicate() !=
- cast<CmpInst>(FirstInst)->getPredicate())
+ if (CmpInst *CI = dyn_cast<CmpInst>(I))
+ if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
return 0;
+ if (isNUW)
+ isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
+ if (isNSW)
+ isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
+ if (isExact)
+ isExact = cast<PossiblyExactOperator>(I)->isExact();
+
// Keep track of which operand needs a phi node.
if (I->getOperand(0) != LHSVal) LHSVal = 0;
if (I->getOperand(1) != RHSVal) RHSVal = 0;
@@ -96,11 +112,17 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
}
}
- if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
- return BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
- CmpInst *CIOp = cast<CmpInst>(FirstInst);
- return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
- LHSVal, RHSVal);
+ if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst))
+ return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
+ LHSVal, RHSVal);
+
+ BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
+ BinaryOperator *NewBinOp =
+ BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
+ if (isNUW) NewBinOp->setHasNoUnsignedWrap();
+ if (isNSW) NewBinOp->setHasNoSignedWrap();
+ if (isExact) NewBinOp->setIsExact();
+ return NewBinOp;
}
Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
@@ -117,6 +139,8 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
// especially bad when the PHIs are in the header of a loop.
bool NeededPhi = false;
+ bool AllInBounds = true;
+
// Scan to see if all operands are the same opcode, and all have one use.
for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
@@ -124,6 +148,8 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
GEP->getNumOperands() != FirstInst->getNumOperands())
return 0;
+ AllInBounds &= GEP->isInBounds();
+
// Keep track of whether or not all GEPs are of alloca pointers.
if (AllBasePointersAreAllocas &&
(!isa<AllocaInst>(GEP->getOperand(0)) ||
@@ -201,11 +227,11 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
}
Value *Base = FixedOperands[0];
- return cast<GEPOperator>(FirstInst)->isInBounds() ?
- GetElementPtrInst::CreateInBounds(Base, FixedOperands.begin()+1,
- FixedOperands.end()) :
+ GetElementPtrInst *NewGEP =
GetElementPtrInst::Create(Base, FixedOperands.begin()+1,
FixedOperands.end());
+ if (AllInBounds) NewGEP->setIsInBounds();
+ return NewGEP;
}
@@ -368,6 +394,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
// code size and simplifying code.
Constant *ConstantOp = 0;
const Type *CastSrcTy = 0;
+ bool isNUW = false, isNSW = false, isExact = false;
if (isa<CastInst>(FirstInst)) {
CastSrcTy = FirstInst->getOperand(0)->getType();
@@ -384,6 +411,14 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
if (ConstantOp == 0)
return FoldPHIArgBinOpIntoPHI(PN);
+
+ if (OverflowingBinaryOperator *BO =
+ dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
+ isNUW = BO->hasNoUnsignedWrap();
+ isNSW = BO->hasNoSignedWrap();
+ } else if (PossiblyExactOperator *PEO =
+ dyn_cast<PossiblyExactOperator>(FirstInst))
+ isExact = PEO->isExact();
} else {
return 0; // Cannot fold this operation.
}
@@ -399,6 +434,13 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
} else if (I->getOperand(1) != ConstantOp) {
return 0;
}
+
+ if (isNUW)
+ isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
+ if (isNSW)
+ isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
+ if (isExact)
+ isExact = cast<PossiblyExactOperator>(I)->isExact();
}
// Okay, they are all the same operation. Create a new PHI node of the
@@ -433,8 +475,13 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst))
return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType());
- if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
- return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
+ BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
+ if (isNUW) BinOp->setHasNoUnsignedWrap();
+ if (isNSW) BinOp->setHasNoSignedWrap();
+ if (isExact) BinOp->setIsExact();
+ return BinOp;
+ }
CmpInst *CIOp = cast<CmpInst>(FirstInst);
return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
@@ -731,8 +778,8 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
Instruction *InstCombiner::visitPHINode(PHINode &PN) {
// If LCSSA is around, don't mess with Phi nodes
if (MustPreserveLCSSA) return 0;
-
- if (Value *V = PN.hasConstantValue())
+
+ if (Value *V = SimplifyInstruction(&PN, TD))
return ReplaceInstUsesWith(PN, V);
// If all PHI operands are the same operation, pull them through the PHI,
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index c44fe9d..97abc76 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -24,14 +24,14 @@ static SelectPatternFlavor
MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
SelectInst *SI = dyn_cast<SelectInst>(V);
if (SI == 0) return SPF_UNKNOWN;
-
+
ICmpInst *ICI = dyn_cast<ICmpInst>(SI->getCondition());
if (ICI == 0) return SPF_UNKNOWN;
-
+
LHS = ICI->getOperand(0);
RHS = ICI->getOperand(1);
-
- // (icmp X, Y) ? X : Y
+
+ // (icmp X, Y) ? X : Y
if (SI->getTrueValue() == ICI->getOperand(0) &&
SI->getFalseValue() == ICI->getOperand(1)) {
switch (ICI->getPredicate()) {
@@ -46,8 +46,8 @@ MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
case ICmpInst::ICMP_SLE: return SPF_SMIN;
}
}
-
- // (icmp X, Y) ? Y : X
+
+ // (icmp X, Y) ? Y : X
if (SI->getTrueValue() == ICI->getOperand(1) &&
SI->getFalseValue() == ICI->getOperand(0)) {
switch (ICI->getPredicate()) {
@@ -62,9 +62,9 @@ MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
case ICmpInst::ICMP_SLE: return SPF_SMAX;
}
}
-
+
// TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5)
-
+
return SPF_UNKNOWN;
}
@@ -136,7 +136,7 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
SelectInst *NewSI = SelectInst::Create(SI.getCondition(), TI->getOperand(0),
FI->getOperand(0), SI.getName()+".v");
InsertNewInstBefore(NewSI, SI);
- return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
+ return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
TI->getType());
}
@@ -195,7 +195,10 @@ static bool isSelect01(Constant *C1, Constant *C2) {
ConstantInt *C2I = dyn_cast<ConstantInt>(C2);
if (!C2I)
return false;
- return (C1I->isZero() || C1I->isOne()) && (C2I->isZero() || C2I->isOne());
+ if (!C1I->isZero() && !C2I->isZero()) // One side must be zero.
+ return false;
+ return C1I->isOne() || C1I->isAllOnesValue() ||
+ C2I->isOne() || C2I->isAllOnesValue();
}
/// FoldSelectIntoOp - Try fold the select into one of the operands to
@@ -219,7 +222,7 @@ Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
Constant *C = GetSelectFoldableConstant(TVI);
Value *OOp = TVI->getOperand(2-OpToFold);
// Avoid creating select between 2 constants unless it's selecting
- // between 0 and 1.
+ // between 0, 1 and -1.
if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
Instruction *NewSel = SelectInst::Create(SI.getCondition(), OOp, C);
InsertNewInstBefore(NewSel, SI);
@@ -248,7 +251,7 @@ Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
Constant *C = GetSelectFoldableConstant(FVI);
Value *OOp = FVI->getOperand(2-OpToFold);
// Avoid creating select between 2 constants unless it's selecting
- // between 0 and 1.
+ // between 0, 1 and -1.
if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
Instruction *NewSel = SelectInst::Create(SI.getCondition(), C, OOp);
InsertNewInstBefore(NewSel, SI);
@@ -278,52 +281,95 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
Value *FalseVal = SI.getFalseValue();
// Check cases where the comparison is with a constant that
- // can be adjusted to fit the min/max idiom. We may edit ICI in
- // place here, so make sure the select is the only user.
+ // can be adjusted to fit the min/max idiom. We may move or edit ICI
+ // here, so make sure the select is the only user.
if (ICI->hasOneUse())
if (ConstantInt *CI = dyn_cast<ConstantInt>(CmpRHS)) {
+ // X < MIN ? T : F --> F
+ if ((Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_ULT)
+ && CI->isMinValue(Pred == ICmpInst::ICMP_SLT))
+ return ReplaceInstUsesWith(SI, FalseVal);
+ // X > MAX ? T : F --> F
+ else if ((Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_UGT)
+ && CI->isMaxValue(Pred == ICmpInst::ICMP_SGT))
+ return ReplaceInstUsesWith(SI, FalseVal);
switch (Pred) {
default: break;
case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_SLT: {
- // X < MIN ? T : F --> F
- if (CI->isMinValue(Pred == ICmpInst::ICMP_SLT))
- return ReplaceInstUsesWith(SI, FalseVal);
- // X < C ? X : C-1 --> X > C-1 ? C-1 : X
- Constant *AdjustedRHS =
- ConstantInt::get(CI->getContext(), CI->getValue()-1);
- if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
- (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
- Pred = ICmpInst::getSwappedPredicate(Pred);
- CmpRHS = AdjustedRHS;
- std::swap(FalseVal, TrueVal);
- ICI->setPredicate(Pred);
- ICI->setOperand(1, CmpRHS);
- SI.setOperand(1, TrueVal);
- SI.setOperand(2, FalseVal);
- Changed = true;
- }
- break;
- }
+ case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_SGT: {
- // X > MAX ? T : F --> F
- if (CI->isMaxValue(Pred == ICmpInst::ICMP_SGT))
- return ReplaceInstUsesWith(SI, FalseVal);
+ // These transformations only work for selects over integers.
+ const IntegerType *SelectTy = dyn_cast<IntegerType>(SI.getType());
+ if (!SelectTy)
+ break;
+
+ Constant *AdjustedRHS;
+ if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SGT)
+ AdjustedRHS = ConstantInt::get(CI->getContext(), CI->getValue() + 1);
+ else // (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT)
+ AdjustedRHS = ConstantInt::get(CI->getContext(), CI->getValue() - 1);
+
// X > C ? X : C+1 --> X < C+1 ? C+1 : X
- Constant *AdjustedRHS =
- ConstantInt::get(CI->getContext(), CI->getValue()+1);
+ // X < C ? X : C-1 --> X > C-1 ? C-1 : X
if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
- (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
- Pred = ICmpInst::getSwappedPredicate(Pred);
- CmpRHS = AdjustedRHS;
- std::swap(FalseVal, TrueVal);
- ICI->setPredicate(Pred);
- ICI->setOperand(1, CmpRHS);
- SI.setOperand(1, TrueVal);
- SI.setOperand(2, FalseVal);
- Changed = true;
- }
+ (CmpLHS == FalseVal && AdjustedRHS == TrueVal))
+ ; // Nothing to do here. Values match without any sign/zero extension.
+
+ // Types do not match. Instead of calculating this with mixed types
+ // promote all to the larger type. This enables scalar evolution to
+ // analyze this expression.
+ else if (CmpRHS->getType()->getScalarSizeInBits()
+ < SelectTy->getBitWidth()) {
+ Constant *sextRHS = ConstantExpr::getSExt(AdjustedRHS, SelectTy);
+
+ // X = sext x; x >s c ? X : C+1 --> X = sext x; X <s C+1 ? C+1 : X
+ // X = sext x; x <s c ? X : C-1 --> X = sext x; X >s C-1 ? C-1 : X
+ // X = sext x; x >u c ? X : C+1 --> X = sext x; X <u C+1 ? C+1 : X
+ // X = sext x; x <u c ? X : C-1 --> X = sext x; X >u C-1 ? C-1 : X
+ if (match(TrueVal, m_SExt(m_Specific(CmpLHS))) &&
+ sextRHS == FalseVal) {
+ CmpLHS = TrueVal;
+ AdjustedRHS = sextRHS;
+ } else if (match(FalseVal, m_SExt(m_Specific(CmpLHS))) &&
+ sextRHS == TrueVal) {
+ CmpLHS = FalseVal;
+ AdjustedRHS = sextRHS;
+ } else if (ICI->isUnsigned()) {
+ Constant *zextRHS = ConstantExpr::getZExt(AdjustedRHS, SelectTy);
+ // X = zext x; x >u c ? X : C+1 --> X = zext x; X <u C+1 ? C+1 : X
+ // X = zext x; x <u c ? X : C-1 --> X = zext x; X >u C-1 ? C-1 : X
+ // zext + signed compare cannot be changed:
+ // 0xff <s 0x00, but 0x00ff >s 0x0000
+ if (match(TrueVal, m_ZExt(m_Specific(CmpLHS))) &&
+ zextRHS == FalseVal) {
+ CmpLHS = TrueVal;
+ AdjustedRHS = zextRHS;
+ } else if (match(FalseVal, m_ZExt(m_Specific(CmpLHS))) &&
+ zextRHS == TrueVal) {
+ CmpLHS = FalseVal;
+ AdjustedRHS = zextRHS;
+ } else
+ break;
+ } else
+ break;
+ } else
+ break;
+
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ CmpRHS = AdjustedRHS;
+ std::swap(FalseVal, TrueVal);
+ ICI->setPredicate(Pred);
+ ICI->setOperand(0, CmpLHS);
+ ICI->setOperand(1, CmpRHS);
+ SI.setOperand(1, TrueVal);
+ SI.setOperand(2, FalseVal);
+
+ // Move ICI instruction right before the select instruction. Otherwise
+ // the sext/zext value may be defined after the ICI instruction uses it.
+ ICI->moveBefore(&SI);
+
+ Changed = true;
break;
}
}
@@ -399,28 +445,28 @@ static bool CanSelectOperandBeMappingIntoPredBlock(const Value *V,
// can always be mapped.
const Instruction *I = dyn_cast<Instruction>(V);
if (I == 0) return true;
-
+
// If V is a PHI node defined in the same block as the condition PHI, we can
// map the arguments.
const PHINode *CondPHI = cast<PHINode>(SI.getCondition());
-
+
if (const PHINode *VP = dyn_cast<PHINode>(I))
if (VP->getParent() == CondPHI->getParent())
return true;
-
+
// Otherwise, if the PHI and select are defined in the same block and if V is
// defined in a different block, then we can transform it.
if (SI.getParent() == CondPHI->getParent() &&
I->getParent() != CondPHI->getParent())
return true;
-
+
// Otherwise we have a 'hard' case and we can't tell without doing more
// detailed dominator based analysis, punt.
return false;
}
/// FoldSPFofSPF - We have an SPF (e.g. a min or max) of an SPF of the form:
-/// SPF2(SPF1(A, B), C)
+/// SPF2(SPF1(A, B), C)
Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
SelectPatternFlavor SPF1,
Value *A, Value *B,
@@ -431,7 +477,7 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
// MIN(MIN(a, b), a) -> MIN(a, b)
if (SPF1 == SPF2)
return ReplaceInstUsesWith(Outer, Inner);
-
+
// MAX(MIN(a, b), a) -> a
// MIN(MAX(a, b), a) -> a
if ((SPF1 == SPF_SMIN && SPF2 == SPF_SMAX) ||
@@ -440,13 +486,82 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
(SPF1 == SPF_UMAX && SPF2 == SPF_UMIN))
return ReplaceInstUsesWith(Outer, C);
}
-
+
// TODO: MIN(MIN(A, 23), 97)
return 0;
}
+/// foldSelectICmpAnd - If one of the constants is zero (we know they can't
+/// both be) and we have an icmp instruction with zero, and we have an 'and'
+/// with the non-constant value and a power of two we can turn the select
+/// into a shift on the result of the 'and'.
+static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
+ ConstantInt *FalseVal,
+ InstCombiner::BuilderTy *Builder) {
+ const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
+ if (!IC || !IC->isEquality())
+ return 0;
+
+ if (ConstantInt *C = dyn_cast<ConstantInt>(IC->getOperand(1)))
+ if (!C->isZero())
+ return 0;
+ ConstantInt *AndRHS;
+ Value *LHS = IC->getOperand(0);
+ if (LHS->getType() != SI.getType() ||
+ !match(LHS, m_And(m_Value(), m_ConstantInt(AndRHS))))
+ return 0;
+
+ // If both select arms are non-zero see if we have a select of the form
+ // 'x ? 2^n + C : C'. Then we can offset both arms by C, use the logic
+ // for 'x ? 2^n : 0' and fix the thing up at the end.
+ ConstantInt *Offset = 0;
+ if (!TrueVal->isZero() && !FalseVal->isZero()) {
+ if ((TrueVal->getValue() - FalseVal->getValue()).isPowerOf2())
+ Offset = FalseVal;
+ else if ((FalseVal->getValue() - TrueVal->getValue()).isPowerOf2())
+ Offset = TrueVal;
+ else
+ return 0;
+
+ // Adjust TrueVal and FalseVal to the offset.
+ TrueVal = ConstantInt::get(Builder->getContext(),
+ TrueVal->getValue() - Offset->getValue());
+ FalseVal = ConstantInt::get(Builder->getContext(),
+ FalseVal->getValue() - Offset->getValue());
+ }
+
+ // Make sure the mask in the 'and' and one of the select arms is a power of 2.
+ if (!AndRHS->getValue().isPowerOf2() ||
+ (!TrueVal->getValue().isPowerOf2() &&
+ !FalseVal->getValue().isPowerOf2()))
+ return 0;
+
+ // Determine which shift is needed to transform result of the 'and' into the
+ // desired result.
+ ConstantInt *ValC = !TrueVal->isZero() ? TrueVal : FalseVal;
+ unsigned ValZeros = ValC->getValue().logBase2();
+ unsigned AndZeros = AndRHS->getValue().logBase2();
+
+ Value *V = LHS;
+ if (ValZeros > AndZeros)
+ V = Builder->CreateShl(V, ValZeros - AndZeros);
+ else if (ValZeros < AndZeros)
+ V = Builder->CreateLShr(V, AndZeros - ValZeros);
+
+ // Okay, now we know that everything is set up, we just don't know whether we
+ // have a icmp_ne or icmp_eq and whether the true or false val is the zero.
+ bool ShouldNotVal = !TrueVal->isZero();
+ ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
+ if (ShouldNotVal)
+ V = Builder->CreateXor(V, ValC);
+
+ // Apply an offset if needed.
+ if (Offset)
+ V = Builder->CreateAdd(V, Offset);
+ return V;
+}
Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Value *CondVal = SI.getCondition();
@@ -478,7 +593,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
"not."+CondVal->getName()), SI);
return BinaryOperator::CreateOr(NotCond, TrueVal);
}
-
+
// select a, b, a -> a&b
// select a, a, b -> a|b
if (CondVal == TrueVal)
@@ -497,7 +612,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// select C, -1, 0 -> sext C to int
if (FalseValC->isZero() && TrueValC->isAllOnesValue())
return new SExtInst(CondVal, SI.getType());
-
+
// select C, 0, 1 -> zext !C to int
if (TrueValC->isZero() && FalseValC->getValue() == 1) {
Value *NotCond = Builder->CreateNot(CondVal, "not."+CondVal->getName());
@@ -509,32 +624,9 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Value *NotCond = Builder->CreateNot(CondVal, "not."+CondVal->getName());
return new SExtInst(NotCond, SI.getType());
}
-
- if (ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition())) {
- // If one of the constants is zero (we know they can't both be) and we
- // have an icmp instruction with zero, and we have an 'and' with the
- // non-constant value, eliminate this whole mess. This corresponds to
- // cases like this: ((X & 27) ? 27 : 0)
- if (TrueValC->isZero() || FalseValC->isZero())
- if (IC->isEquality() && isa<ConstantInt>(IC->getOperand(1)) &&
- cast<Constant>(IC->getOperand(1))->isNullValue())
- if (Instruction *ICA = dyn_cast<Instruction>(IC->getOperand(0)))
- if (ICA->getOpcode() == Instruction::And &&
- isa<ConstantInt>(ICA->getOperand(1)) &&
- (ICA->getOperand(1) == TrueValC ||
- ICA->getOperand(1) == FalseValC) &&
- cast<ConstantInt>(ICA->getOperand(1))->getValue().isPowerOf2()) {
- // Okay, now we know that everything is set up, we just don't
- // know whether we have a icmp_ne or icmp_eq and whether the
- // true or false val is the zero.
- bool ShouldNotVal = !TrueValC->isZero();
- ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
- Value *V = ICA;
- if (ShouldNotVal)
- V = Builder->CreateXor(V, ICA->getOperand(1));
- return ReplaceInstUsesWith(SI, V);
- }
- }
+
+ if (Value *V = foldSelectICmpAnd(SI, TrueValC, FalseValC, Builder))
+ return ReplaceInstUsesWith(SI, V);
}
// See if we are selecting two values based on a comparison of the two values.
@@ -542,7 +634,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (FCI->getOperand(0) == TrueVal && FCI->getOperand(1) == FalseVal) {
// Transform (X == Y) ? X : Y -> Y
if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
- // This is not safe in general for floating point:
+ // This is not safe in general for floating point:
// consider X== -0, Y== +0.
// It becomes safe if either operand is a nonzero constant.
ConstantFP *CFPt, *CFPf;
@@ -554,7 +646,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
// Transform (X une Y) ? X : Y -> X
if (FCI->getPredicate() == FCmpInst::FCMP_UNE) {
- // This is not safe in general for floating point:
+ // This is not safe in general for floating point:
// consider X== -0, Y== +0.
// It becomes safe if either operand is a nonzero constant.
ConstantFP *CFPt, *CFPf;
@@ -569,7 +661,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
} else if (FCI->getOperand(0) == FalseVal && FCI->getOperand(1) == TrueVal){
// Transform (X == Y) ? Y : X -> X
if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
- // This is not safe in general for floating point:
+ // This is not safe in general for floating point:
// consider X== -0, Y== +0.
// It becomes safe if either operand is a nonzero constant.
ConstantFP *CFPt, *CFPf;
@@ -581,7 +673,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
// Transform (X une Y) ? Y : X -> Y
if (FCI->getPredicate() == FCmpInst::FCMP_UNE) {
- // This is not safe in general for floating point:
+ // This is not safe in general for floating point:
// consider X== -0, Y== +0.
// It becomes safe if either operand is a nonzero constant.
ConstantFP *CFPt, *CFPf;
@@ -639,6 +731,10 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Value *NegVal; // Compute -Z
if (Constant *C = dyn_cast<Constant>(SubOp->getOperand(1))) {
NegVal = ConstantExpr::getNeg(C);
+ } else if (SI.getType()->isFloatingPointTy()) {
+ NegVal = InsertNewInstBefore(
+ BinaryOperator::CreateFNeg(SubOp->getOperand(1),
+ "tmp"), SI);
} else {
NegVal = InsertNewInstBefore(
BinaryOperator::CreateNeg(SubOp->getOperand(1),
@@ -654,7 +750,10 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
NewFalseOp, SI.getName() + ".p");
NewSel = InsertNewInstBefore(NewSel, SI);
- return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
+ if (SI.getType()->isFloatingPointTy())
+ return BinaryOperator::CreateFAdd(SubOp->getOperand(0), NewSel);
+ else
+ return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
}
}
}
@@ -663,7 +762,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (SI.getType()->isIntegerTy()) {
if (Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal))
return FoldI;
-
+
// MAX(MAX(a, b), a) -> MAX(a, b)
// MIN(MIN(a, b), a) -> MIN(a, b)
// MAX(MIN(a, b), a) -> a
@@ -686,13 +785,26 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
// See if we can fold the select into a phi node if the condition is a select.
- if (isa<PHINode>(SI.getCondition()))
+ if (isa<PHINode>(SI.getCondition()))
// The true/false values have to be live in the PHI predecessor's blocks.
if (CanSelectOperandBeMappingIntoPredBlock(TrueVal, SI) &&
CanSelectOperandBeMappingIntoPredBlock(FalseVal, SI))
if (Instruction *NV = FoldOpIntoPhi(SI))
return NV;
+ if (SelectInst *TrueSI = dyn_cast<SelectInst>(TrueVal)) {
+ if (TrueSI->getCondition() == CondVal) {
+ SI.setOperand(1, TrueSI->getTrueValue());
+ return &SI;
+ }
+ }
+ if (SelectInst *FalseSI = dyn_cast<SelectInst>(FalseVal)) {
+ if (FalseSI->getCondition() == CondVal) {
+ SI.setOperand(2, FalseSI->getFalseValue());
+ return &SI;
+ }
+ }
+
if (BinaryOperator::isNot(CondVal)) {
SI.setOperand(0, BinaryOperator::getNotArgument(CondVal));
SI.setOperand(1, FalseVal);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 27716b8..a7f8005 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -13,6 +13,7 @@
#include "InstCombine.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Support/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
@@ -21,25 +22,6 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- // shl X, 0 == X and shr X, 0 == X
- // shl 0, X == 0 and shr 0, X == 0
- if (Op1 == Constant::getNullValue(Op1->getType()) ||
- Op0 == Constant::getNullValue(Op0->getType()))
- return ReplaceInstUsesWith(I, Op0);
-
- if (isa<UndefValue>(Op0)) {
- if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef
- return ReplaceInstUsesWith(I, Op0);
- else // undef << X -> 0, undef >>u X -> 0
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- }
- if (isa<UndefValue>(Op1)) {
- if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X
- return ReplaceInstUsesWith(I, Op0);
- else // X << undef, X >>u undef -> 0
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- }
-
// See if we can fold away this shift.
if (SimplifyDemandedInstructionBits(I))
return &I;
@@ -53,6 +35,20 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
return Res;
+
+ // X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2.
+ // Because shifts by negative values (which could occur if A were negative)
+ // are undefined.
+ Value *A; const APInt *B;
+ if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Power2(B)))) {
+ // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
+ // demand the sign bit (and many others) here??
+ Value *Rem = Builder->CreateAnd(A, ConstantInt::get(I.getType(), *B-1),
+ Op1->getName());
+ I.setOperand(1, Rem);
+ return &I;
+ }
+
return 0;
}
@@ -81,7 +77,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
// if the needed bits are already zero in the input. This allows us to reuse
// the value which means that we don't care if the shift has multiple uses.
// TODO: Handle opposite shift by exact value.
- ConstantInt *CI;
+ ConstantInt *CI = 0;
if ((isLeftShift && match(I, m_LShr(m_Value(), m_ConstantInt(CI)))) ||
(!isLeftShift && match(I, m_Shl(m_Value(), m_ConstantInt(CI))))) {
if (CI->getZExtValue() == NumBits) {
@@ -131,9 +127,9 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
// We can turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but it isn't
// profitable unless we know the and'd out bits are already zero.
if (CI->getZExtValue() > NumBits) {
- unsigned HighBits = CI->getZExtValue() - NumBits;
+ unsigned LowBits = TypeWidth - CI->getZExtValue();
if (MaskedValueIsZero(I->getOperand(0),
- APInt::getHighBitsSet(TypeWidth, HighBits)))
+ APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits))
return true;
}
@@ -157,7 +153,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
if (CI->getZExtValue() > NumBits) {
unsigned LowBits = CI->getZExtValue() - NumBits;
if (MaskedValueIsZero(I->getOperand(0),
- APInt::getLowBitsSet(TypeWidth, LowBits)))
+ APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits))
return true;
}
@@ -622,16 +618,49 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
}
Instruction *InstCombiner::visitShl(BinaryOperator &I) {
- return commonShiftTransforms(I);
+ if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1),
+ I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
+ TD))
+ return ReplaceInstUsesWith(I, V);
+
+ if (Instruction *V = commonShiftTransforms(I))
+ return V;
+
+ if (ConstantInt *Op1C = dyn_cast<ConstantInt>(I.getOperand(1))) {
+ unsigned ShAmt = Op1C->getZExtValue();
+
+ // If the shifted-out value is known-zero, then this is a NUW shift.
+ if (!I.hasNoUnsignedWrap() &&
+ MaskedValueIsZero(I.getOperand(0),
+ APInt::getHighBitsSet(Op1C->getBitWidth(), ShAmt))) {
+ I.setHasNoUnsignedWrap();
+ return &I;
+ }
+
+ // If the shifted out value is all signbits, this is a NSW shift.
+ if (!I.hasNoSignedWrap() &&
+ ComputeNumSignBits(I.getOperand(0)) > ShAmt) {
+ I.setHasNoSignedWrap();
+ return &I;
+ }
+ }
+
+ return 0;
}
Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
+ if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1),
+ I.isExact(), TD))
+ return ReplaceInstUsesWith(I, V);
+
if (Instruction *R = commonShiftTransforms(I))
return R;
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1))
+ if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
+ unsigned ShAmt = Op1C->getZExtValue();
+
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op0)) {
unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
// ctlz.i32(x)>>5 --> zext(x == 0)
@@ -640,7 +669,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
if ((II->getIntrinsicID() == Intrinsic::ctlz ||
II->getIntrinsicID() == Intrinsic::cttz ||
II->getIntrinsicID() == Intrinsic::ctpop) &&
- isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == Op1C->getZExtValue()){
+ isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmt) {
bool isCtPop = II->getIntrinsicID() == Intrinsic::ctpop;
Constant *RHS = ConstantInt::getSigned(Op0->getType(), isCtPop ? -1:0);
Value *Cmp = Builder->CreateICmpEQ(II->getArgOperand(0), RHS);
@@ -648,29 +677,37 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
}
}
+ // If the shifted-out value is known-zero, then this is an exact shift.
+ if (!I.isExact() &&
+ MaskedValueIsZero(Op0,APInt::getLowBitsSet(Op1C->getBitWidth(),ShAmt))){
+ I.setIsExact();
+ return &I;
+ }
+ }
+
return 0;
}
Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
+ if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1),
+ I.isExact(), TD))
+ return ReplaceInstUsesWith(I, V);
+
if (Instruction *R = commonShiftTransforms(I))
return R;
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0)) {
- // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
- if (CSI->isAllOnesValue())
- return ReplaceInstUsesWith(I, CSI);
- }
-
+
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
+ unsigned ShAmt = Op1C->getZExtValue();
+
// If the input is a SHL by the same constant (ashr (shl X, C), C), then we
// have a sign-extend idiom.
Value *X;
if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1)))) {
- // If the input value is known to already be sign extended enough, delete
- // the extension.
- if (ComputeNumSignBits(X) > Op1C->getZExtValue())
+ // If the left shift is just shifting out partial signbits, delete the
+ // extension.
+ if (cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap())
return ReplaceInstUsesWith(I, X);
// If the input is an extension from the shifted amount value, e.g.
@@ -685,6 +722,13 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
return new SExtInst(ZI->getOperand(0), ZI->getType());
}
}
+
+ // If the shifted-out value is known-zero, then this is an exact shift.
+ if (!I.isExact() &&
+ MaskedValueIsZero(Op0,APInt::getLowBitsSet(Op1C->getBitWidth(),ShAmt))){
+ I.setIsExact();
+ return &I;
+ }
}
// See if we can turn a signed shr into an unsigned shr.
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index adf7a76..bda8cea 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -34,7 +34,7 @@ static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
if (!OpC) return false;
// If there are no bits set that aren't demanded, nothing to do.
- Demanded.zextOrTrunc(OpC->getValue().getBitWidth());
+ Demanded = Demanded.zextOrTrunc(OpC->getValue().getBitWidth());
if ((~Demanded & OpC->getValue()) == 0)
return false;
@@ -121,13 +121,13 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
if (isa<ConstantPointerNull>(V)) {
// We know all of the bits for a constant!
- KnownOne.clear();
+ KnownOne.clearAllBits();
KnownZero = DemandedMask;
return 0;
}
- KnownZero.clear();
- KnownOne.clear();
+ KnownZero.clearAllBits();
+ KnownOne.clearAllBits();
if (DemandedMask == 0) { // Not demanding any bits from V.
if (isa<UndefValue>(V))
return 0;
@@ -388,15 +388,15 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
break;
case Instruction::Trunc: {
unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
- DemandedMask.zext(truncBf);
- KnownZero.zext(truncBf);
- KnownOne.zext(truncBf);
+ DemandedMask = DemandedMask.zext(truncBf);
+ KnownZero = KnownZero.zext(truncBf);
+ KnownOne = KnownOne.zext(truncBf);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
KnownZero, KnownOne, Depth+1))
return I;
- DemandedMask.trunc(BitWidth);
- KnownZero.trunc(BitWidth);
- KnownOne.trunc(BitWidth);
+ DemandedMask = DemandedMask.trunc(BitWidth);
+ KnownZero = KnownZero.trunc(BitWidth);
+ KnownOne = KnownOne.trunc(BitWidth);
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
break;
}
@@ -426,15 +426,15 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
- DemandedMask.trunc(SrcBitWidth);
- KnownZero.trunc(SrcBitWidth);
- KnownOne.trunc(SrcBitWidth);
+ DemandedMask = DemandedMask.trunc(SrcBitWidth);
+ KnownZero = KnownZero.trunc(SrcBitWidth);
+ KnownOne = KnownOne.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
KnownZero, KnownOne, Depth+1))
return I;
- DemandedMask.zext(BitWidth);
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
+ DemandedMask = DemandedMask.zext(BitWidth);
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
// The top bits are known to be zero.
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
@@ -451,17 +451,17 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If any of the sign extended bits are demanded, we know that the sign
// bit is demanded.
if ((NewBits & DemandedMask) != 0)
- InputDemandedBits.set(SrcBitWidth-1);
+ InputDemandedBits.setBit(SrcBitWidth-1);
- InputDemandedBits.trunc(SrcBitWidth);
- KnownZero.trunc(SrcBitWidth);
- KnownOne.trunc(SrcBitWidth);
+ InputDemandedBits = InputDemandedBits.trunc(SrcBitWidth);
+ KnownZero = KnownZero.trunc(SrcBitWidth);
+ KnownOne = KnownOne.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I->getOperandUse(0), InputDemandedBits,
KnownZero, KnownOne, Depth+1))
return I;
- InputDemandedBits.zext(BitWidth);
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
+ InputDemandedBits = InputDemandedBits.zext(BitWidth);
+ KnownZero = KnownZero.zext(BitWidth);
+ KnownOne = KnownOne.zext(BitWidth);
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
// If the sign bit of the input is known set or clear, then we know the
@@ -576,8 +576,16 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
break;
case Instruction::Shl:
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
+ uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
+
+ // If the shift is NUW/NSW, then it does demand the high bits.
+ ShlOperator *IOp = cast<ShlOperator>(I);
+ if (IOp->hasNoSignedWrap())
+ DemandedMaskIn |= APInt::getHighBitsSet(BitWidth, ShiftAmt+1);
+ else if (IOp->hasNoUnsignedWrap())
+ DemandedMaskIn |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
+
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
KnownZero, KnownOne, Depth+1))
return I;
@@ -592,10 +600,16 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
case Instruction::LShr:
// For a logical shift right
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
+ uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
// Unsigned shift right.
APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
+
+ // If the shift is exact, then it does demand the low bits (and knows that
+ // they are zero).
+ if (cast<LShrOperator>(I)->isExact())
+ DemandedMaskIn |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
+
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
KnownZero, KnownOne, Depth+1))
return I;
@@ -627,14 +641,20 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return I->getOperand(0);
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint32_t ShiftAmt = SA->getLimitedValue(BitWidth);
+ uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
// Signed shift right.
APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
// If any of the "high bits" are demanded, we should set the sign bit as
// demanded.
if (DemandedMask.countLeadingZeros() <= ShiftAmt)
- DemandedMaskIn.set(BitWidth-1);
+ DemandedMaskIn.setBit(BitWidth-1);
+
+ // If the shift is exact, then it does demand the low bits (and knows that
+ // they are zero).
+ if (cast<AShrOperator>(I)->isExact())
+ DemandedMaskIn |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
+
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
KnownZero, KnownOne, Depth+1))
return I;
@@ -793,10 +813,10 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
for (unsigned i = 0; i != VWidth; ++i)
if (!DemandedElts[i]) { // If not demanded, set to undef.
Elts.push_back(Undef);
- UndefElts.set(i);
+ UndefElts.setBit(i);
} else if (isa<UndefValue>(CV->getOperand(i))) { // Already undef.
Elts.push_back(Undef);
- UndefElts.set(i);
+ UndefElts.setBit(i);
} else { // Otherwise, defined.
Elts.push_back(CV->getOperand(i));
}
@@ -879,13 +899,13 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// Otherwise, the element inserted overwrites whatever was there, so the
// input demanded set is simpler than the output set.
APInt DemandedElts2 = DemandedElts;
- DemandedElts2.clear(IdxNo);
+ DemandedElts2.clearBit(IdxNo);
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2,
UndefElts, Depth+1);
if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
// The inserted element is defined.
- UndefElts.clear(IdxNo);
+ UndefElts.clearBit(IdxNo);
break;
}
case Instruction::ShuffleVector: {
@@ -900,9 +920,9 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
assert(MaskVal < LHSVWidth * 2 &&
"shufflevector mask index out of range!");
if (MaskVal < LHSVWidth)
- LeftDemanded.set(MaskVal);
+ LeftDemanded.setBit(MaskVal);
else
- RightDemanded.set(MaskVal - LHSVWidth);
+ RightDemanded.setBit(MaskVal - LHSVWidth);
}
}
}
@@ -921,16 +941,16 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
for (unsigned i = 0; i < VWidth; i++) {
unsigned MaskVal = Shuffle->getMaskValue(i);
if (MaskVal == -1u) {
- UndefElts.set(i);
+ UndefElts.setBit(i);
} else if (MaskVal < LHSVWidth) {
if (UndefElts4[MaskVal]) {
NewUndefElts = true;
- UndefElts.set(i);
+ UndefElts.setBit(i);
}
} else {
if (UndefElts3[MaskVal - LHSVWidth]) {
NewUndefElts = true;
- UndefElts.set(i);
+ UndefElts.setBit(i);
}
}
}
@@ -973,7 +993,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
Ratio = VWidth/InVWidth;
for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
if (DemandedElts[OutIdx])
- InputDemandedElts.set(OutIdx/Ratio);
+ InputDemandedElts.setBit(OutIdx/Ratio);
}
} else {
// Untested so far.
@@ -985,7 +1005,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
Ratio = InVWidth/VWidth;
for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
if (DemandedElts[InIdx/Ratio])
- InputDemandedElts.set(InIdx);
+ InputDemandedElts.setBit(InIdx);
}
// div/rem demand all inputs, because they don't want divide by zero.
@@ -1004,7 +1024,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// undef.
for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
if (UndefElts2[OutIdx/Ratio])
- UndefElts.set(OutIdx);
+ UndefElts.setBit(OutIdx);
} else if (VWidth < InVWidth) {
llvm_unreachable("Unimp");
// If there are more elements in the source than there are in the result,
@@ -1013,7 +1033,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
UndefElts = ~0ULL >> (64-VWidth); // Start out all undef.
for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
if (!UndefElts2[InIdx]) // Not undef?
- UndefElts.clear(InIdx/Ratio); // Clear undef bit.
+ UndefElts.clearBit(InIdx/Ratio); // Clear undef bit.
}
break;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index a58124d..5caa12d 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -18,7 +18,7 @@ using namespace llvm;
/// CheapToScalarize - Return true if the value is cheaper to scalarize than it
/// is to leave as a vector operation.
static bool CheapToScalarize(Value *V, bool isConstant) {
- if (isa<ConstantAggregateZero>(V))
+ if (isa<ConstantAggregateZero>(V))
return true;
if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
if (isConstant) return true;
@@ -31,7 +31,7 @@ static bool CheapToScalarize(Value *V, bool isConstant) {
}
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
-
+
// Insert element gets simplified to the inserted element or is deleted if
// this is constant idx extract element and its a constant idx insertelt.
if (I->getOpcode() == Instruction::InsertElement && isConstant &&
@@ -49,26 +49,24 @@ static bool CheapToScalarize(Value *V, bool isConstant) {
(CheapToScalarize(CI->getOperand(0), isConstant) ||
CheapToScalarize(CI->getOperand(1), isConstant)))
return true;
-
+
return false;
}
-/// Read and decode a shufflevector mask.
-///
-/// It turns undef elements into values that are larger than the number of
-/// elements in the input.
-static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
+/// getShuffleMask - Read and decode a shufflevector mask.
+/// Turn undef elements into negative values.
+static std::vector<int> getShuffleMask(const ShuffleVectorInst *SVI) {
unsigned NElts = SVI->getType()->getNumElements();
if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
- return std::vector<unsigned>(NElts, 0);
+ return std::vector<int>(NElts, 0);
if (isa<UndefValue>(SVI->getOperand(2)))
- return std::vector<unsigned>(NElts, 2*NElts);
-
- std::vector<unsigned> Result;
+ return std::vector<int>(NElts, -1);
+
+ std::vector<int> Result;
const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
for (User::const_op_iterator i = CP->op_begin(), e = CP->op_end(); i!=e; ++i)
if (isa<UndefValue>(*i))
- Result.push_back(NElts*2); // undef -> 8
+ Result.push_back(-1); // undef
else
Result.push_back(cast<ConstantInt>(*i)->getZExtValue());
return Result;
@@ -83,42 +81,41 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) {
unsigned Width = PTy->getNumElements();
if (EltNo >= Width) // Out of range access.
return UndefValue::get(PTy->getElementType());
-
+
if (isa<UndefValue>(V))
return UndefValue::get(PTy->getElementType());
if (isa<ConstantAggregateZero>(V))
return Constant::getNullValue(PTy->getElementType());
if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
return CP->getOperand(EltNo);
-
+
if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
// If this is an insert to a variable element, we don't know what it is.
- if (!isa<ConstantInt>(III->getOperand(2)))
+ if (!isa<ConstantInt>(III->getOperand(2)))
return 0;
unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
-
+
// If this is an insert to the element we are looking for, return the
// inserted value.
- if (EltNo == IIElt)
+ if (EltNo == IIElt)
return III->getOperand(1);
-
+
// Otherwise, the insertelement doesn't modify the value, recurse on its
// vector input.
return FindScalarElement(III->getOperand(0), EltNo);
}
-
+
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
unsigned LHSWidth =
- cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
- unsigned InEl = getShuffleMask(SVI)[EltNo];
- if (InEl < LHSWidth)
- return FindScalarElement(SVI->getOperand(0), InEl);
- else if (InEl < LHSWidth*2)
- return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth);
- else
+ cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
+ int InEl = getShuffleMask(SVI)[EltNo];
+ if (InEl < 0)
return UndefValue::get(PTy->getElementType());
+ if (InEl < (int)LHSWidth)
+ return FindScalarElement(SVI->getOperand(0), InEl);
+ return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth);
}
-
+
// Otherwise, we don't know.
return 0;
}
@@ -127,11 +124,11 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// If vector val is undef, replace extract with scalar undef.
if (isa<UndefValue>(EI.getOperand(0)))
return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
-
+
// If vector val is constant 0, replace extract with scalar 0.
if (isa<ConstantAggregateZero>(EI.getOperand(0)))
return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType()));
-
+
if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
// If vector val is constant with all elements the same, replace EI with
// that element. When the elements are not identical, we cannot replace yet
@@ -139,53 +136,53 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
Constant *op0 = C->getOperand(0);
for (unsigned i = 1; i != C->getNumOperands(); ++i)
if (C->getOperand(i) != op0) {
- op0 = 0;
+ op0 = 0;
break;
}
if (op0)
return ReplaceInstUsesWith(EI, op0);
}
-
+
// If extracting a specified index from the vector, see if we can recursively
// find a previously computed scalar that was inserted into the vector.
if (ConstantInt *IdxC = dyn_cast<ConstantInt>(EI.getOperand(1))) {
unsigned IndexVal = IdxC->getZExtValue();
unsigned VectorWidth = EI.getVectorOperandType()->getNumElements();
-
+
// If this is extracting an invalid index, turn this into undef, to avoid
// crashing the code below.
if (IndexVal >= VectorWidth)
return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
-
+
// This instruction only demands the single element from the input vector.
// If the input vector has a single use, simplify it based on this use
// property.
if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
APInt UndefElts(VectorWidth, 0);
APInt DemandedMask(VectorWidth, 0);
- DemandedMask.set(IndexVal);
+ DemandedMask.setBit(IndexVal);
if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
DemandedMask, UndefElts)) {
EI.setOperand(0, V);
return &EI;
}
}
-
+
if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal))
return ReplaceInstUsesWith(EI, Elt);
-
+
// If the this extractelement is directly using a bitcast from a vector of
// the same number of elements, see if we can find the source element from
// it. In this case, we will end up needing to bitcast the scalars.
if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
- if (const VectorType *VT =
+ if (const VectorType *VT =
dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
if (VT->getNumElements() == VectorWidth)
if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal))
return new BitCastInst(Elt, EI.getType());
}
}
-
+
if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) {
// Push extractelement into predecessor operation if legal and
// profitable to do so
@@ -193,11 +190,11 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
if (I->hasOneUse() &&
CheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) {
Value *newEI0 =
- Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
- EI.getName()+".lhs");
+ Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
+ EI.getName()+".lhs");
Value *newEI1 =
- Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
- EI.getName()+".rhs");
+ Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
+ EI.getName()+".rhs");
return BinaryOperator::Create(BO->getOpcode(), newEI0, newEI1);
}
} else if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
@@ -215,21 +212,22 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// If this is extracting an element from a shufflevector, figure out where
// it came from and extract from the appropriate input element instead.
if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
- unsigned SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
+ int SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
Value *Src;
unsigned LHSWidth =
- cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
-
- if (SrcIdx < LHSWidth)
+ cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
+
+ if (SrcIdx < 0)
+ return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
+ if (SrcIdx < (int)LHSWidth)
Src = SVI->getOperand(0);
- else if (SrcIdx < LHSWidth*2) {
+ else {
SrcIdx -= LHSWidth;
Src = SVI->getOperand(1);
- } else {
- return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
}
+ const Type *Int32Ty = Type::getInt32Ty(EI.getContext());
return ExtractElementInst::Create(Src,
- ConstantInt::get(Type::getInt32Ty(EI.getContext()),
+ ConstantInt::get(Int32Ty,
SrcIdx, false));
}
}
@@ -239,42 +237,42 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
}
/// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
-/// elements from either LHS or RHS, return the shuffle mask and true.
+/// elements from either LHS or RHS, return the shuffle mask and true.
/// Otherwise, return false.
static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
std::vector<Constant*> &Mask) {
assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
"Invalid CollectSingleShuffleElements");
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
-
+
if (isa<UndefValue>(V)) {
Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
return true;
}
-
+
if (V == LHS) {
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()), i));
return true;
}
-
+
if (V == RHS) {
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()),
i+NumElts));
return true;
}
-
+
if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
// If this is an insert of an extract from some other vector, include it.
Value *VecOp = IEI->getOperand(0);
Value *ScalarOp = IEI->getOperand(1);
Value *IdxOp = IEI->getOperand(2);
-
+
if (!isa<ConstantInt>(IdxOp))
return false;
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
-
+
if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
// Okay, we can handle this if the vector we are insertinting into is
// transitively ok.
@@ -282,13 +280,13 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
// If so, update the mask to reflect the inserted undef.
Mask[InsertedIdx] = UndefValue::get(Type::getInt32Ty(V->getContext()));
return true;
- }
+ }
} else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
if (isa<ConstantInt>(EI->getOperand(1)) &&
EI->getOperand(0)->getType() == V->getType()) {
unsigned ExtractedIdx =
cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
-
+
// This must be extracting from either LHS or RHS.
if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
// Okay, we can handle this if the vector we are insertinting into is
@@ -296,15 +294,14 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
// If so, update the mask to reflect the inserted value.
if (EI->getOperand(0) == LHS) {
- Mask[InsertedIdx % NumElts] =
+ Mask[InsertedIdx % NumElts] =
ConstantInt::get(Type::getInt32Ty(V->getContext()),
ExtractedIdx);
} else {
assert(EI->getOperand(0) == RHS);
- Mask[InsertedIdx % NumElts] =
+ Mask[InsertedIdx % NumElts] =
ConstantInt::get(Type::getInt32Ty(V->getContext()),
ExtractedIdx+NumElts);
-
}
return true;
}
@@ -313,7 +310,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
}
}
// TODO: Handle shufflevector here!
-
+
return false;
}
@@ -322,11 +319,11 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
/// that computes V and the LHS value of the shuffle.
static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
Value *&RHS) {
- assert(V->getType()->isVectorTy() &&
+ assert(V->getType()->isVectorTy() &&
(RHS == 0 || V->getType() == RHS->getType()) &&
"Invalid shuffle!");
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
-
+
if (isa<UndefValue>(V)) {
Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
return V;
@@ -338,25 +335,25 @@ static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
Value *VecOp = IEI->getOperand(0);
Value *ScalarOp = IEI->getOperand(1);
Value *IdxOp = IEI->getOperand(2);
-
+
if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
EI->getOperand(0)->getType() == V->getType()) {
unsigned ExtractedIdx =
- cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
+ cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
-
+
// Either the extracted from or inserted into vector must be RHSVec,
// otherwise we'd end up with a shuffle of three inputs.
if (EI->getOperand(0) == RHS || RHS == 0) {
RHS = EI->getOperand(0);
Value *V = CollectShuffleElements(VecOp, Mask, RHS);
- Mask[InsertedIdx % NumElts] =
- ConstantInt::get(Type::getInt32Ty(V->getContext()),
- NumElts+ExtractedIdx);
+ Mask[InsertedIdx % NumElts] =
+ ConstantInt::get(Type::getInt32Ty(V->getContext()),
+ NumElts+ExtractedIdx);
return V;
}
-
+
if (VecOp == RHS) {
Value *V = CollectShuffleElements(EI->getOperand(0), Mask, RHS);
// Everything but the extracted element is replaced with the RHS.
@@ -367,7 +364,7 @@ static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
}
return V;
}
-
+
// If this insertelement is a chain that comes from exactly these two
// vectors, return the vector and the effective shuffle.
if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask))
@@ -376,7 +373,7 @@ static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
}
}
// TODO: Handle shufflevector here!
-
+
// Otherwise, can't do anything fancy. Return an identity vector.
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()), i));
@@ -387,32 +384,32 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
Value *VecOp = IE.getOperand(0);
Value *ScalarOp = IE.getOperand(1);
Value *IdxOp = IE.getOperand(2);
-
+
// Inserting an undef or into an undefined place, remove this.
if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
ReplaceInstUsesWith(IE, VecOp);
-
- // If the inserted element was extracted from some other vector, and if the
+
+ // If the inserted element was extracted from some other vector, and if the
// indexes are constant, try to turn this into a shufflevector operation.
if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
EI->getOperand(0)->getType() == IE.getType()) {
unsigned NumVectorElts = IE.getType()->getNumElements();
unsigned ExtractedIdx =
- cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
+ cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
-
+
if (ExtractedIdx >= NumVectorElts) // Out of range extract.
return ReplaceInstUsesWith(IE, VecOp);
-
+
if (InsertedIdx >= NumVectorElts) // Out of range insert.
return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
-
+
// If we are extracting a value from a vector, then inserting it right
// back into the same place, just use the input vector.
if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
- return ReplaceInstUsesWith(IE, VecOp);
-
+ return ReplaceInstUsesWith(IE, VecOp);
+
// If this insertelement isn't used by some other insertelement, turn it
// (and any insertelements it points to), into one big shuffle.
if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
@@ -421,18 +418,20 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
Value *LHS = CollectShuffleElements(&IE, Mask, RHS);
if (RHS == 0) RHS = UndefValue::get(LHS->getType());
// We now have a shuffle of LHS, RHS, Mask.
- return new ShuffleVectorInst(LHS, RHS,
- ConstantVector::get(Mask));
+ return new ShuffleVectorInst(LHS, RHS, ConstantVector::get(Mask));
}
}
}
-
+
unsigned VWidth = cast<VectorType>(VecOp->getType())->getNumElements();
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
- if (SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts))
+ if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts)) {
+ if (V != &IE)
+ return ReplaceInstUsesWith(IE, V);
return &IE;
-
+ }
+
return 0;
}
@@ -440,27 +439,29 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
Value *LHS = SVI.getOperand(0);
Value *RHS = SVI.getOperand(1);
- std::vector<unsigned> Mask = getShuffleMask(&SVI);
-
+ std::vector<int> Mask = getShuffleMask(&SVI);
+
bool MadeChange = false;
-
+
// Undefined shuffle mask -> undefined value.
if (isa<UndefValue>(SVI.getOperand(2)))
return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
-
+
unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
-
+
if (VWidth != cast<VectorType>(LHS->getType())->getNumElements())
return 0;
-
+
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
- if (SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
+ if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
+ if (V != &SVI)
+ return ReplaceInstUsesWith(SVI, V);
LHS = SVI.getOperand(0);
RHS = SVI.getOperand(1);
MadeChange = true;
}
-
+
// Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
// Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
if (LHS == RHS || isa<UndefValue>(LHS)) {
@@ -468,16 +469,16 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// shuffle(undef,undef,mask) -> undef.
return ReplaceInstUsesWith(SVI, LHS);
}
-
+
// Remap any references to RHS to use LHS.
std::vector<Constant*> Elts;
for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
- if (Mask[i] >= 2*e)
+ if (Mask[i] < 0)
Elts.push_back(UndefValue::get(Type::getInt32Ty(SVI.getContext())));
else {
- if ((Mask[i] >= e && isa<UndefValue>(RHS)) ||
- (Mask[i] < e && isa<UndefValue>(LHS))) {
- Mask[i] = 2*e; // Turn into undef.
+ if ((Mask[i] >= (int)e && isa<UndefValue>(RHS)) ||
+ (Mask[i] < (int)e && isa<UndefValue>(LHS))) {
+ Mask[i] = -1; // Turn into undef.
Elts.push_back(UndefValue::get(Type::getInt32Ty(SVI.getContext())));
} else {
Mask[i] = Mask[i] % e; // Force to LHS.
@@ -493,59 +494,65 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
RHS = SVI.getOperand(1);
MadeChange = true;
}
-
+
// Analyze the shuffle, are the LHS or RHS and identity shuffles?
bool isLHSID = true, isRHSID = true;
-
+
for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
- if (Mask[i] >= e*2) continue; // Ignore undef values.
+ if (Mask[i] < 0) continue; // Ignore undef values.
// Is this an identity shuffle of the LHS value?
- isLHSID &= (Mask[i] == i);
-
+ isLHSID &= (Mask[i] == (int)i);
+
// Is this an identity shuffle of the RHS value?
isRHSID &= (Mask[i]-e == i);
}
-
+
// Eliminate identity shuffles.
if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
-
+
// If the LHS is a shufflevector itself, see if we can combine it with this
// one without producing an unusual shuffle. Here we are really conservative:
// we are absolutely afraid of producing a shuffle mask not in the input
// program, because the code gen may not be smart enough to turn a merged
// shuffle into two specific shuffles: it may produce worse code. As such,
- // we only merge two shuffles if the result is one of the two input shuffle
- // masks. In this case, merging the shuffles just removes one instruction,
- // which we know is safe. This is good for things like turning:
- // (splat(splat)) -> splat.
+ // we only merge two shuffles if the result is either a splat or one of the
+ // two input shuffle masks. In this case, merging the shuffles just removes
+ // one instruction, which we know is safe. This is good for things like
+ // turning: (splat(splat)) -> splat.
if (ShuffleVectorInst *LHSSVI = dyn_cast<ShuffleVectorInst>(LHS)) {
if (isa<UndefValue>(RHS)) {
- std::vector<unsigned> LHSMask = getShuffleMask(LHSSVI);
-
+ std::vector<int> LHSMask = getShuffleMask(LHSSVI);
+
if (LHSMask.size() == Mask.size()) {
- std::vector<unsigned> NewMask;
- for (unsigned i = 0, e = Mask.size(); i != e; ++i)
- if (Mask[i] >= e)
- NewMask.push_back(2*e);
+ std::vector<int> NewMask;
+ bool isSplat = true;
+ int SplatElt = -1; // undef
+ for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
+ int MaskElt;
+ if (Mask[i] < 0 || Mask[i] >= (int)e)
+ MaskElt = -1; // undef
else
- NewMask.push_back(LHSMask[Mask[i]]);
-
+ MaskElt = LHSMask[Mask[i]];
+ // Check if this could still be a splat.
+ if (MaskElt >= 0) {
+ if (SplatElt >=0 && SplatElt != MaskElt)
+ isSplat = false;
+ SplatElt = MaskElt;
+ }
+ NewMask.push_back(MaskElt);
+ }
+
// If the result mask is equal to the src shuffle or this
// shuffle mask, do the replacement.
- if (NewMask == LHSMask || NewMask == Mask) {
- unsigned LHSInNElts =
- cast<VectorType>(LHSSVI->getOperand(0)->getType())->
- getNumElements();
+ if (isSplat || NewMask == LHSMask || NewMask == Mask) {
std::vector<Constant*> Elts;
+ const Type *Int32Ty = Type::getInt32Ty(SVI.getContext());
for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
- if (NewMask[i] >= LHSInNElts*2) {
- Elts.push_back(UndefValue::get(
- Type::getInt32Ty(SVI.getContext())));
+ if (NewMask[i] < 0) {
+ Elts.push_back(UndefValue::get(Int32Ty));
} else {
- Elts.push_back(ConstantInt::get(
- Type::getInt32Ty(SVI.getContext()),
- NewMask[i]));
+ Elts.push_back(ConstantInt::get(Int32Ty, NewMask[i]));
}
}
return new ShuffleVectorInst(LHSSVI->getOperand(0),
@@ -555,7 +562,6 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
}
}
}
-
+
return MadeChange ? &SVI : 0;
}
-
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index e46c679..37123d0 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -48,6 +48,7 @@
#include "llvm/Support/PatternMatch.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm-c/Initialization.h"
#include <algorithm>
#include <climits>
using namespace llvm;
@@ -57,11 +58,22 @@ STATISTIC(NumCombined , "Number of insts combined");
STATISTIC(NumConstProp, "Number of constant folds");
STATISTIC(NumDeadInst , "Number of dead inst eliminated");
STATISTIC(NumSunkInst , "Number of instructions sunk");
+STATISTIC(NumExpand, "Number of expansions");
+STATISTIC(NumFactor , "Number of factorizations");
+STATISTIC(NumReassoc , "Number of reassociations");
+// Initialization Routines
+void llvm::initializeInstCombine(PassRegistry &Registry) {
+ initializeInstCombinerPass(Registry);
+}
+
+void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
+ initializeInstCombine(*unwrap(R));
+}
char InstCombiner::ID = 0;
INITIALIZE_PASS(InstCombiner, "instcombine",
- "Combine redundant instructions", false, false);
+ "Combine redundant instructions", false, false)
void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreservedID(LCSSAID);
@@ -97,53 +109,326 @@ bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const {
}
-// SimplifyCommutative - This performs a few simplifications for commutative
-// operators:
+/// SimplifyAssociativeOrCommutative - This performs a few simplifications for
+/// operators which are associative or commutative:
+//
+// Commutative operators:
//
// 1. Order operands such that they are listed from right (least complex) to
// left (most complex). This puts constants before unary operators before
// binary operators.
//
-// 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
-// 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
+// Associative operators:
+//
+// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
+// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
+//
+// Associative and commutative operators:
+//
+// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
+// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
+// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
+// if C1 and C2 are constants.
//
-bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
+bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
+ Instruction::BinaryOps Opcode = I.getOpcode();
bool Changed = false;
- if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
- Changed = !I.swapOperands();
- if (!I.isAssociative()) return Changed;
-
- Instruction::BinaryOps Opcode = I.getOpcode();
- if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
- if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
- if (isa<Constant>(I.getOperand(1))) {
- Constant *Folded = ConstantExpr::get(I.getOpcode(),
- cast<Constant>(I.getOperand(1)),
- cast<Constant>(Op->getOperand(1)));
- I.setOperand(0, Op->getOperand(0));
- I.setOperand(1, Folded);
- return true;
+ do {
+ // Order operands such that they are listed from right (least complex) to
+ // left (most complex). This puts constants before unary operators before
+ // binary operators.
+ if (I.isCommutative() && getComplexity(I.getOperand(0)) <
+ getComplexity(I.getOperand(1)))
+ Changed = !I.swapOperands();
+
+ BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
+ BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
+
+ if (I.isAssociative()) {
+ // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
+ if (Op0 && Op0->getOpcode() == Opcode) {
+ Value *A = Op0->getOperand(0);
+ Value *B = Op0->getOperand(1);
+ Value *C = I.getOperand(1);
+
+ // Does "B op C" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {
+ // It simplifies to V. Form "A op V".
+ I.setOperand(0, A);
+ I.setOperand(1, V);
+ // Conservatively clear the optional flags, since they may not be
+ // preserved by the reassociation.
+ I.clearSubclassOptionalData();
+ Changed = true;
+ ++NumReassoc;
+ continue;
+ }
}
-
- if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)))
- if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
- Op->hasOneUse() && Op1->hasOneUse()) {
- Constant *C1 = cast<Constant>(Op->getOperand(1));
- Constant *C2 = cast<Constant>(Op1->getOperand(1));
-
- // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
- Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2);
- Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0),
- Op1->getOperand(0),
- Op1->getName(), &I);
- Worklist.Add(New);
- I.setOperand(0, New);
- I.setOperand(1, Folded);
- return true;
+
+ // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
+ if (Op1 && Op1->getOpcode() == Opcode) {
+ Value *A = I.getOperand(0);
+ Value *B = Op1->getOperand(0);
+ Value *C = Op1->getOperand(1);
+
+ // Does "A op B" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {
+ // It simplifies to V. Form "V op C".
+ I.setOperand(0, V);
+ I.setOperand(1, C);
+ // Conservatively clear the optional flags, since they may not be
+ // preserved by the reassociation.
+ I.clearSubclassOptionalData();
+ Changed = true;
+ ++NumReassoc;
+ continue;
}
+ }
}
- return Changed;
+
+ if (I.isAssociative() && I.isCommutative()) {
+ // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
+ if (Op0 && Op0->getOpcode() == Opcode) {
+ Value *A = Op0->getOperand(0);
+ Value *B = Op0->getOperand(1);
+ Value *C = I.getOperand(1);
+
+ // Does "C op A" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
+ // It simplifies to V. Form "V op B".
+ I.setOperand(0, V);
+ I.setOperand(1, B);
+ // Conservatively clear the optional flags, since they may not be
+ // preserved by the reassociation.
+ I.clearSubclassOptionalData();
+ Changed = true;
+ ++NumReassoc;
+ continue;
+ }
+ }
+
+ // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
+ if (Op1 && Op1->getOpcode() == Opcode) {
+ Value *A = I.getOperand(0);
+ Value *B = Op1->getOperand(0);
+ Value *C = Op1->getOperand(1);
+
+ // Does "C op A" simplify?
+ if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
+ // It simplifies to V. Form "B op V".
+ I.setOperand(0, B);
+ I.setOperand(1, V);
+ // Conservatively clear the optional flags, since they may not be
+ // preserved by the reassociation.
+ I.clearSubclassOptionalData();
+ Changed = true;
+ ++NumReassoc;
+ continue;
+ }
+ }
+
+ // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
+ // if C1 and C2 are constants.
+ if (Op0 && Op1 &&
+ Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
+ isa<Constant>(Op0->getOperand(1)) &&
+ isa<Constant>(Op1->getOperand(1)) &&
+ Op0->hasOneUse() && Op1->hasOneUse()) {
+ Value *A = Op0->getOperand(0);
+ Constant *C1 = cast<Constant>(Op0->getOperand(1));
+ Value *B = Op1->getOperand(0);
+ Constant *C2 = cast<Constant>(Op1->getOperand(1));
+
+ Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
+ Instruction *New = BinaryOperator::Create(Opcode, A, B, Op1->getName(),
+ &I);
+ Worklist.Add(New);
+ I.setOperand(0, New);
+ I.setOperand(1, Folded);
+ // Conservatively clear the optional flags, since they may not be
+ // preserved by the reassociation.
+ I.clearSubclassOptionalData();
+ Changed = true;
+ continue;
+ }
+ }
+
+ // No further simplifications.
+ return Changed;
+ } while (1);
+}
+
+/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to
+/// "(X LOp Y) ROp (X LOp Z)".
+static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
+ Instruction::BinaryOps ROp) {
+ switch (LOp) {
+ default:
+ return false;
+
+ case Instruction::And:
+ // And distributes over Or and Xor.
+ switch (ROp) {
+ default:
+ return false;
+ case Instruction::Or:
+ case Instruction::Xor:
+ return true;
+ }
+
+ case Instruction::Mul:
+ // Multiplication distributes over addition and subtraction.
+ switch (ROp) {
+ default:
+ return false;
+ case Instruction::Add:
+ case Instruction::Sub:
+ return true;
+ }
+
+ case Instruction::Or:
+ // Or distributes over And.
+ switch (ROp) {
+ default:
+ return false;
+ case Instruction::And:
+ return true;
+ }
+ }
+}
+
+/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to
+/// "(X ROp Z) LOp (Y ROp Z)".
+static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
+ Instruction::BinaryOps ROp) {
+ if (Instruction::isCommutative(ROp))
+ return LeftDistributesOverRight(ROp, LOp);
+ // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
+ // but this requires knowing that the addition does not overflow and other
+ // such subtleties.
+ return false;
+}
+
+/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
+/// which some other binary operation distributes over either by factorizing
+/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
+/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is
+/// a win). Returns the simplified value, or null if it didn't simplify.
+Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
+ BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
+ Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op
+
+ // Factorization.
+ if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) {
+ // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
+ // a common term.
+ Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
+ Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
+ Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
+
+ // Does "X op' Y" always equal "Y op' X"?
+ bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
+
+ // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
+ if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
+ // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
+ // commutative case, "(A op' B) op (C op' A)"?
+ if (A == C || (InnerCommutative && A == D)) {
+ if (A != C)
+ std::swap(C, D);
+ // Consider forming "A op' (B op D)".
+ // If "B op D" simplifies then it can be formed with no cost.
+ Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);
+ // If "B op D" doesn't simplify then only go on if both of the existing
+ // operations "A op' B" and "C op' D" will be zapped as no longer used.
+ if (!V && Op0->hasOneUse() && Op1->hasOneUse())
+ V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName());
+ if (V) {
+ ++NumFactor;
+ V = Builder->CreateBinOp(InnerOpcode, A, V);
+ V->takeName(&I);
+ return V;
+ }
+ }
+
+ // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
+ if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
+ // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
+ // commutative case, "(A op' B) op (B op' D)"?
+ if (B == D || (InnerCommutative && B == C)) {
+ if (B != D)
+ std::swap(C, D);
+ // Consider forming "(A op C) op' B".
+ // If "A op C" simplifies then it can be formed with no cost.
+ Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);
+ // If "A op C" doesn't simplify then only go on if both of the existing
+ // operations "A op' B" and "C op' D" will be zapped as no longer used.
+ if (!V && Op0->hasOneUse() && Op1->hasOneUse())
+ V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName());
+ if (V) {
+ ++NumFactor;
+ V = Builder->CreateBinOp(InnerOpcode, V, B);
+ V->takeName(&I);
+ return V;
+ }
+ }
+ }
+
+ // Expansion.
+ if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
+ // The instruction has the form "(A op' B) op C". See if expanding it out
+ // to "(A op C) op' (B op C)" results in simplifications.
+ Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
+ Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
+
+ // Do "A op C" and "B op C" both simplify?
+ if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))
+ if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {
+ // They do! Return "L op' R".
+ ++NumExpand;
+ // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
+ if ((L == A && R == B) ||
+ (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
+ return Op0;
+ // Otherwise return "L op' R" if it simplifies.
+ if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
+ return V;
+ // Otherwise, create a new instruction.
+ C = Builder->CreateBinOp(InnerOpcode, L, R);
+ C->takeName(&I);
+ return C;
+ }
+ }
+
+ if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
+ // The instruction has the form "A op (B op' C)". See if expanding it out
+ // to "(A op B) op' (A op C)" results in simplifications.
+ Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
+ Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
+
+ // Do "A op B" and "A op C" both simplify?
+ if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))
+ if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {
+ // They do! Return "L op' R".
+ ++NumExpand;
+ // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
+ if ((L == B && R == C) ||
+ (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
+ return Op1;
+ // Otherwise return "L op' R" if it simplifies.
+ if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
+ return V;
+ // Otherwise, create a new instruction.
+ A = Builder->CreateBinOp(InnerOpcode, L, R);
+ A->takeName(&I);
+ return A;
+ }
+ }
+
+ return 0;
}
// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
@@ -185,8 +470,9 @@ Value *InstCombiner::dyn_castFNegVal(Value *V) const {
static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
InstCombiner *IC) {
- if (CastInst *CI = dyn_cast<CastInst>(&I))
+ if (CastInst *CI = dyn_cast<CastInst>(&I)) {
return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
+ }
// Figure out if the constant is the left or the right argument.
bool ConstIsRHS = isa<Constant>(I.getOperand(1));
@@ -228,11 +514,24 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
// Bool selects with constant operands can be folded to logical ops.
if (SI->getType()->isIntegerTy(1)) return 0;
+ // If it's a bitcast involving vectors, make sure it has the same number of
+ // elements on both sides.
+ if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
+ const VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
+ const VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
+
+ // Verify that either both or neither are vectors.
+ if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
+ // If vectors, verify that they have the same number of elements.
+ if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
+ return 0;
+ }
+
Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
- return SelectInst::Create(SI->getCondition(), SelectTrueVal,
- SelectFalseVal);
+ return SelectInst::Create(SI->getCondition(),
+ SelectTrueVal, SelectFalseVal);
}
return 0;
}
@@ -242,20 +541,25 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
/// has a PHI node as operand #0, see if we can fold the instruction into the
/// PHI (which is only possible if all operands to the PHI are constants).
///
-/// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
-/// that would normally be unprofitable because they strongly encourage jump
-/// threading.
-Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
- bool AllowAggressive) {
- AllowAggressive = false;
+Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
PHINode *PN = cast<PHINode>(I.getOperand(0));
unsigned NumPHIValues = PN->getNumIncomingValues();
- if (NumPHIValues == 0 ||
- // We normally only transform phis with a single use, unless we're trying
- // hard to make jump threading happen.
- (!PN->hasOneUse() && !AllowAggressive))
+ if (NumPHIValues == 0)
return 0;
+ // We normally only transform phis with a single use. However, if a PHI has
+ // multiple uses and they are all the same operation, we can fold *all* of the
+ // uses into the PHI.
+ if (!PN->hasOneUse()) {
+ // Walk the use list for the instruction, comparing them to I.
+ for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
+ UI != E; ++UI) {
+ Instruction *User = cast<Instruction>(*UI);
+ if (User != &I && !I.isIdenticalTo(User))
+ return 0;
+ }
+ // Otherwise, we can replace *all* users with the new PHI we form.
+ }
// Check to see if all of the operands of the PHI are simple constants
// (constantint/constantfp/undef). If there is one non-constant value,
@@ -263,24 +567,34 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
// bail out. We don't do arbitrary constant expressions here because moving
// their computation can be expensive without a cost model.
BasicBlock *NonConstBB = 0;
- for (unsigned i = 0; i != NumPHIValues; ++i)
- if (!isa<Constant>(PN->getIncomingValue(i)) ||
- isa<ConstantExpr>(PN->getIncomingValue(i))) {
- if (NonConstBB) return 0; // More than one non-const value.
- if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
- NonConstBB = PN->getIncomingBlock(i);
-
- // If the incoming non-constant value is in I's block, we have an infinite
- // loop.
- if (NonConstBB == I.getParent())
+ for (unsigned i = 0; i != NumPHIValues; ++i) {
+ Value *InVal = PN->getIncomingValue(i);
+ if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
+ continue;
+
+ if (isa<PHINode>(InVal)) return 0; // Itself a phi.
+ if (NonConstBB) return 0; // More than one non-const value.
+
+ NonConstBB = PN->getIncomingBlock(i);
+
+ // If the InVal is an invoke at the end of the pred block, then we can't
+ // insert a computation after it without breaking the edge.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
+ if (II->getParent() == NonConstBB)
return 0;
- }
+
+ // If the incoming non-constant value is in I's block, we will remove one
+ // instruction, but insert another equivalent one, leading to infinite
+ // instcombine.
+ if (NonConstBB == I.getParent())
+ return 0;
+ }
// If there is exactly one non-constant value, we can insert a copy of the
// operation in that block. However, if this is a critical edge, we would be
// inserting the computation one some other paths (e.g. inside a loop). Only
// do this if the pred block is unconditionally branching into the phi block.
- if (NonConstBB != 0 && !AllowAggressive) {
+ if (NonConstBB != 0) {
BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
if (!BI || !BI->isUnconditional()) return 0;
}
@@ -290,7 +604,12 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
NewPN->reserveOperandSpace(PN->getNumOperands()/2);
InsertNewInstBefore(NewPN, *PN);
NewPN->takeName(PN);
-
+
+ // If we are going to have to insert a new computation, do so right before the
+ // predecessors terminator.
+ if (NonConstBB)
+ Builder->SetInsertPoint(NonConstBB->getTerminator());
+
// Next, add all of the operands to the PHI.
if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
// We only currently try to fold the condition of a select when it is a phi,
@@ -303,42 +622,36 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
Value *InV = 0;
- if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
+ if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
- } else {
- assert(PN->getIncomingBlock(i) == NonConstBB);
- InV = SelectInst::Create(PN->getIncomingValue(i), TrueVInPred,
- FalseVInPred,
- "phitmp", NonConstBB->getTerminator());
- Worklist.Add(cast<Instruction>(InV));
- }
+ else
+ InV = Builder->CreateSelect(PN->getIncomingValue(i),
+ TrueVInPred, FalseVInPred, "phitmp");
NewPN->addIncoming(InV, ThisBB);
}
+ } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
+ Constant *C = cast<Constant>(I.getOperand(1));
+ for (unsigned i = 0; i != NumPHIValues; ++i) {
+ Value *InV = 0;
+ if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
+ InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
+ else if (isa<ICmpInst>(CI))
+ InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
+ C, "phitmp");
+ else
+ InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
+ C, "phitmp");
+ NewPN->addIncoming(InV, PN->getIncomingBlock(i));
+ }
} else if (I.getNumOperands() == 2) {
Constant *C = cast<Constant>(I.getOperand(1));
for (unsigned i = 0; i != NumPHIValues; ++i) {
Value *InV = 0;
- if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
- if (CmpInst *CI = dyn_cast<CmpInst>(&I))
- InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
- else
- InV = ConstantExpr::get(I.getOpcode(), InC, C);
- } else {
- assert(PN->getIncomingBlock(i) == NonConstBB);
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
- InV = BinaryOperator::Create(BO->getOpcode(),
- PN->getIncomingValue(i), C, "phitmp",
- NonConstBB->getTerminator());
- else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
- InV = CmpInst::Create(CI->getOpcode(),
- CI->getPredicate(),
- PN->getIncomingValue(i), C, "phitmp",
- NonConstBB->getTerminator());
- else
- llvm_unreachable("Unknown binop!");
-
- Worklist.Add(cast<Instruction>(InV));
- }
+ if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
+ InV = ConstantExpr::get(I.getOpcode(), InC, C);
+ else
+ InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
+ PN->getIncomingValue(i), C, "phitmp");
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
}
} else {
@@ -346,18 +659,22 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
const Type *RetTy = CI->getType();
for (unsigned i = 0; i != NumPHIValues; ++i) {
Value *InV;
- if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
+ if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
- } else {
- assert(PN->getIncomingBlock(i) == NonConstBB);
- InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i),
- I.getType(), "phitmp",
- NonConstBB->getTerminator());
- Worklist.Add(cast<Instruction>(InV));
- }
+ else
+ InV = Builder->CreateCast(CI->getOpcode(),
+ PN->getIncomingValue(i), I.getType(), "phitmp");
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
}
}
+
+ for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
+ UI != E; ) {
+ Instruction *User = cast<Instruction>(*UI++);
+ if (User == &I) continue;
+ ReplaceInstUsesWith(*User, NewPN);
+ EraseInstFromFunction(*User);
+ }
return ReplaceInstUsesWith(I, NewPN);
}
@@ -432,28 +749,35 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *PtrOp = GEP.getOperand(0);
- if (isa<UndefValue>(GEP.getOperand(0)))
- return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType()));
-
- // Eliminate unneeded casts for indices.
+ // Eliminate unneeded casts for indices, and replace indices which displace
+ // by multiples of a zero size type with zero.
if (TD) {
bool MadeChange = false;
- unsigned PtrSize = TD->getPointerSizeInBits();
-
+ const Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
+
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
I != E; ++I, ++GTI) {
- if (!isa<SequentialType>(*GTI)) continue;
-
- // If we are using a wider index than needed for this platform, shrink it
- // to what we need. If narrower, sign-extend it to what we need. This
- // explicit cast can make subsequent optimizations more obvious.
- unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth();
- if (OpBits == PtrSize)
- continue;
-
- *I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true);
- MadeChange = true;
+ // Skip indices into struct types.
+ const SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
+ if (!SeqTy) continue;
+
+ // If the element type has zero size then any index over it is equivalent
+ // to an index of zero, so replace it with zero if it is not zero already.
+ if (SeqTy->getElementType()->isSized() &&
+ TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
+ if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
+ *I = Constant::getNullValue(IntPtrTy);
+ MadeChange = true;
+ }
+
+ if ((*I)->getType() != IntPtrTy) {
+ // If we are using a wider index than needed for this platform, shrink
+ // it to what we need. If narrower, sign-extend it to what we need.
+ // This explicit cast can make subsequent optimizations more obvious.
+ *I = Builder->CreateIntCast(*I, IntPtrTy, true);
+ MadeChange = true;
+ }
}
if (MadeChange) return &GEP;
}
@@ -940,6 +1264,14 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
EraseInstFromFunction(*II);
return BinaryOperator::CreateAdd(LHS, RHS);
}
+
+ // If the normal result of the add is dead, and the RHS is a constant,
+ // we can transform this into a range comparison.
+ // overflow = uadd a, -4 --> overflow = icmp ugt a, 3
+ if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
+ return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
+ ConstantExpr::getNot(CI));
break;
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow:
@@ -964,10 +1296,37 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
}
}
}
- // Can't simplify extracts from other values. Note that nested extracts are
- // already simplified implicitely by the above (extract ( extract (insert) )
+ if (LoadInst *L = dyn_cast<LoadInst>(Agg))
+ // If the (non-volatile) load only has one use, we can rewrite this to a
+ // load from a GEP. This reduces the size of the load.
+ // FIXME: If a load is used only by extractvalue instructions then this
+ // could be done regardless of having multiple uses.
+ if (!L->isVolatile() && L->hasOneUse()) {
+ // extractvalue has integer indices, getelementptr has Value*s. Convert.
+ SmallVector<Value*, 4> Indices;
+ // Prefix an i32 0 since we need the first element.
+ Indices.push_back(Builder->getInt32(0));
+ for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
+ I != E; ++I)
+ Indices.push_back(Builder->getInt32(*I));
+
+ // We need to insert these at the location of the old load, not at that of
+ // the extractvalue.
+ Builder->SetInsertPoint(L->getParent(), L);
+ Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(),
+ Indices.begin(), Indices.end());
+ // Returning the load directly will cause the main loop to insert it in
+ // the wrong spot, so use ReplaceInstUsesWith().
+ return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
+ }
+ // We could simplify extracts from other values. Note that nested extracts may
+ // already be simplified implicitly by the above: extract (extract (insert) )
// will be translated into extract ( insert ( extract ) ) first and then just
- // the value inserted, if appropriate).
+ // the value inserted, if appropriate. Similarly for extracts from single-use
+ // loads: extract (extract (load)) will be translated to extract (load (gep))
+ // and if again single-use then via load (gep (gep)) to load (gep).
+ // However, double extracts from e.g. function arguments or return values
+ // aren't handled yet.
return 0;
}
@@ -1023,10 +1382,8 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
bool MadeIRChange = false;
SmallVector<BasicBlock*, 256> Worklist;
Worklist.push_back(BB);
-
- std::vector<Instruction*> InstrsForInstCombineWorklist;
- InstrsForInstCombineWorklist.reserve(128);
+ SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
SmallPtrSet<ConstantExpr*, 64> FoldedConstants;
do {
@@ -1231,6 +1588,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
DEBUG(errs() << "IC: Old = " << *I << '\n'
<< " New = " << *Result << '\n');
+ Result->setDebugLoc(I->getDebugLoc());
// Everything uses the new instruction now.
I->replaceAllUsesWith(Result);
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp
index a77d70c..1d31fcc 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp
@@ -17,6 +17,7 @@
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "insert-edge-profiling"
+
#include "ProfilingUtils.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
@@ -34,7 +35,9 @@ namespace {
bool runOnModule(Module &M);
public:
static char ID; // Pass identification, replacement for typeid
- EdgeProfiler() : ModulePass(ID) {}
+ EdgeProfiler() : ModulePass(ID) {
+ initializeEdgeProfilerPass(*PassRegistry::getPassRegistry());
+ }
virtual const char *getPassName() const {
return "Edge Profiler";
@@ -44,7 +47,7 @@ namespace {
char EdgeProfiler::ID = 0;
INITIALIZE_PASS(EdgeProfiler, "insert-edge-profiling",
- "Insert instrumentation for edge profiling", false, false);
+ "Insert instrumentation for edge profiling", false, false)
ModulePass *llvm::createEdgeProfilerPass() { return new EdgeProfiler(); }
@@ -98,7 +101,7 @@ bool EdgeProfiler::runOnModule(Module &M) {
// otherwise insert it in the successor block.
if (TI->getNumSuccessors() == 1) {
// Insert counter at the start of the block
- IncrementCounterInBlock(BB, i++, Counters);
+ IncrementCounterInBlock(BB, i++, Counters, false);
} else {
// Insert counter at the start of the block
IncrementCounterInBlock(TI->getSuccessor(s), i++, Counters);
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
new file mode 100644
index 0000000..96ed4fa
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -0,0 +1,32 @@
+//===-- Instrumentation.cpp - TransformUtils Infrastructure ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the common initialization infrastructure for the
+// Instrumentation library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/InitializePasses.h"
+#include "llvm-c/Initialization.h"
+
+using namespace llvm;
+
+/// initializeInstrumentation - Initialize all passes in the TransformUtils
+/// library.
+void llvm::initializeInstrumentation(PassRegistry &Registry) {
+ initializeEdgeProfilerPass(Registry);
+ initializeOptimalEdgeProfilerPass(Registry);
+ initializePathProfilerPass(Registry);
+}
+
+/// LLVMInitializeInstrumentation - C binding for
+/// initializeInstrumentation.
+void LLVMInitializeInstrumentation(LLVMPassRegistryRef R) {
+ initializeInstrumentation(*unwrap(R));
+}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
index 8eec987..c85a1a9 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
@@ -36,7 +36,9 @@ namespace {
bool runOnModule(Module &M);
public:
static char ID; // Pass identification, replacement for typeid
- OptimalEdgeProfiler() : ModulePass(ID) {}
+ OptimalEdgeProfiler() : ModulePass(ID) {
+ initializeOptimalEdgeProfilerPass(*PassRegistry::getPassRegistry());
+ }
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredID(ProfileEstimatorPassID);
@@ -50,9 +52,14 @@ namespace {
}
char OptimalEdgeProfiler::ID = 0;
-INITIALIZE_PASS(OptimalEdgeProfiler, "insert-optimal-edge-profiling",
+INITIALIZE_PASS_BEGIN(OptimalEdgeProfiler, "insert-optimal-edge-profiling",
+ "Insert optimal instrumentation for edge profiling",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(ProfileEstimatorPass)
+INITIALIZE_AG_DEPENDENCY(ProfileInfo)
+INITIALIZE_PASS_END(OptimalEdgeProfiler, "insert-optimal-edge-profiling",
"Insert optimal instrumentation for edge profiling",
- false, false);
+ false, false)
ModulePass *llvm::createOptimalEdgeProfilerPass() {
return new OptimalEdgeProfiler();
@@ -125,11 +132,11 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
// Calculate a Maximum Spanning Tree with the edge weights determined by
// ProfileEstimator. ProfileEstimator also assign weights to the virtual
// edges (0,entry) and (BB,0) (for blocks with no successors) and this
- // edges also participate in the maximum spanning tree calculation.
+ // edges also participate in the maximum spanning tree calculation.
// The third parameter of MaximumSpanningTree() has the effect that not the
// actual MST is returned but the edges _not_ in the MST.
- ProfileInfo::EdgeWeights ECs =
+ ProfileInfo::EdgeWeights ECs =
getAnalysis<ProfileInfo>(*F).getEdgeWeights(F);
std::vector<ProfileInfo::EdgeWeight> EdgeVector(ECs.begin(), ECs.end());
MaximumSpanningTree<BasicBlock> MST (EdgeVector);
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
new file mode 100644
index 0000000..6449b39
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
@@ -0,0 +1,1423 @@
+//===- PathProfiling.cpp - Inserts counters for path profiling ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass instruments functions for Ball-Larus path profiling. Ball-Larus
+// profiling converts the CFG into a DAG by replacing backedges with edges
+// from entry to the start block and from the end block to exit. The paths
+// along the new DAG are enumrated, i.e. each path is given a path number.
+// Edges are instrumented to increment the path number register, such that the
+// path number register will equal the path number of the path taken at the
+// exit.
+//
+// This file defines classes for building a CFG for use with different stages
+// in the Ball-Larus path profiling instrumentation [Ball96]. The
+// requirements are formatting the llvm CFG into the Ball-Larus DAG, path
+// numbering, finding a spanning tree, moving increments from the spanning
+// tree to chords.
+//
+// Terms:
+// DAG - Directed Acyclic Graph.
+// Ball-Larus DAG - A CFG with an entry node, an exit node, and backedges
+// removed in the following manner. For every backedge
+// v->w, insert edge ENTRY->w and edge v->EXIT.
+// Path Number - The number corresponding to a specific path through a
+// Ball-Larus DAG.
+// Spanning Tree - A subgraph, S, is a spanning tree if S covers all
+// vertices and is a tree.
+// Chord - An edge not in the spanning tree.
+//
+// [Ball96]
+// T. Ball and J. R. Larus. "Efficient Path Profiling."
+// International Symposium on Microarchitecture, pages 46-57, 1996.
+// http://portal.acm.org/citation.cfm?id=243857
+//
+// [Ball94]
+// Thomas Ball. "Efficiently Counting Program Events with Support for
+// On-line queries."
+// ACM Transactions on Programmmg Languages and Systems, Vol 16, No 5,
+// September 1994, Pages 1399-1410.
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "insert-path-profiling"
+
+#include "llvm/DerivedTypes.h"
+#include "ProfilingUtils.h"
+#include "llvm/Analysis/PathNumbering.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/InstrTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/TypeBuilder.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include <map>
+#include <vector>
+
+#define HASH_THRESHHOLD 100000
+
+using namespace llvm;
+
+namespace {
+class BLInstrumentationNode;
+class BLInstrumentationEdge;
+class BLInstrumentationDag;
+
+// ---------------------------------------------------------------------------
+// BLInstrumentationNode extends BallLarusNode with member used by the
+// instrumentation algortihms.
+// ---------------------------------------------------------------------------
+class BLInstrumentationNode : public BallLarusNode {
+public:
+ // Creates a new BLInstrumentationNode from a BasicBlock.
+ BLInstrumentationNode(BasicBlock* BB);
+
+ // Get/sets the Value corresponding to the pathNumber register,
+ // constant or phinode. Used by the instrumentation code to remember
+ // path number Values.
+ Value* getStartingPathNumber();
+ void setStartingPathNumber(Value* pathNumber);
+
+ Value* getEndingPathNumber();
+ void setEndingPathNumber(Value* pathNumber);
+
+ // Get/set the PHINode Instruction for this node.
+ PHINode* getPathPHI();
+ void setPathPHI(PHINode* pathPHI);
+
+private:
+
+ Value* _startingPathNumber; // The Value for the current pathNumber.
+ Value* _endingPathNumber; // The Value for the current pathNumber.
+ PHINode* _pathPHI; // The PHINode for current pathNumber.
+};
+
+// --------------------------------------------------------------------------
+// BLInstrumentationEdge extends BallLarusEdge with data about the
+// instrumentation that will end up on each edge.
+// --------------------------------------------------------------------------
+class BLInstrumentationEdge : public BallLarusEdge {
+public:
+ BLInstrumentationEdge(BLInstrumentationNode* source,
+ BLInstrumentationNode* target);
+
+ // Sets the target node of this edge. Required to split edges.
+ void setTarget(BallLarusNode* node);
+
+ // Get/set whether edge is in the spanning tree.
+ bool isInSpanningTree() const;
+ void setIsInSpanningTree(bool isInSpanningTree);
+
+ // Get/ set whether this edge will be instrumented with a path number
+ // initialization.
+ bool isInitialization() const;
+ void setIsInitialization(bool isInitialization);
+
+ // Get/set whether this edge will be instrumented with a path counter
+ // increment. Notice this is incrementing the path counter
+ // corresponding to the path number register. The path number
+ // increment is determined by getIncrement().
+ bool isCounterIncrement() const;
+ void setIsCounterIncrement(bool isCounterIncrement);
+
+ // Get/set the path number increment that this edge will be instrumented
+ // with. This is distinct from the path counter increment and the
+ // weight. The counter increment counts the number of executions of
+ // some path, whereas the path number keeps track of which path number
+ // the program is on.
+ long getIncrement() const;
+ void setIncrement(long increment);
+
+ // Get/set whether the edge has been instrumented.
+ bool hasInstrumentation();
+ void setHasInstrumentation(bool hasInstrumentation);
+
+ // Returns the successor number of this edge in the source.
+ unsigned getSuccessorNumber();
+
+private:
+ // The increment that the code will be instrumented with.
+ long long _increment;
+
+ // Whether this edge is in the spanning tree.
+ bool _isInSpanningTree;
+
+ // Whether this edge is an initialiation of the path number.
+ bool _isInitialization;
+
+ // Whether this edge is a path counter increment.
+ bool _isCounterIncrement;
+
+ // Whether this edge has been instrumented.
+ bool _hasInstrumentation;
+};
+
+// ---------------------------------------------------------------------------
+// BLInstrumentationDag extends BallLarusDag with algorithms that
+// determine where instrumentation should be placed.
+// ---------------------------------------------------------------------------
+class BLInstrumentationDag : public BallLarusDag {
+public:
+ BLInstrumentationDag(Function &F);
+
+ // Returns the Exit->Root edge. This edge is required for creating
+ // directed cycles in the algorithm for moving instrumentation off of
+ // the spanning tree
+ BallLarusEdge* getExitRootEdge();
+
+ // Returns an array of phony edges which mark those nodes
+ // with function calls
+ BLEdgeVector getCallPhonyEdges();
+
+ // Gets/sets the path counter array
+ GlobalVariable* getCounterArray();
+ void setCounterArray(GlobalVariable* c);
+
+ // Calculates the increments for the chords, thereby removing
+ // instrumentation from the spanning tree edges. Implementation is based
+ // on the algorithm in Figure 4 of [Ball94]
+ void calculateChordIncrements();
+
+ // Updates the state when an edge has been split
+ void splitUpdate(BLInstrumentationEdge* formerEdge, BasicBlock* newBlock);
+
+ // Calculates a spanning tree of the DAG ignoring cycles. Whichever
+ // edges are in the spanning tree will not be instrumented, but this
+ // implementation does not try to minimize the instrumentation overhead
+ // by trying to find hot edges.
+ void calculateSpanningTree();
+
+ // Pushes initialization further down in order to group the first
+ // increment and initialization.
+ void pushInitialization();
+
+ // Pushes the path counter increments up in order to group the last path
+ // number increment.
+ void pushCounters();
+
+ // Removes phony edges from the successor list of the source, and the
+ // predecessor list of the target.
+ void unlinkPhony();
+
+ // Generate dot graph for the function
+ void generateDotGraph();
+
+protected:
+ // BLInstrumentationDag creates BLInstrumentationNode objects in this
+ // method overriding the creation of BallLarusNode objects.
+ //
+ // Allows subclasses to determine which type of Node is created.
+ // Override this method to produce subclasses of BallLarusNode if
+ // necessary.
+ virtual BallLarusNode* createNode(BasicBlock* BB);
+
+ // BLInstrumentationDag create BLInstrumentationEdges.
+ //
+ // Allows subclasses to determine which type of Edge is created.
+ // Override this method to produce subclasses of BallLarusEdge if
+ // necessary. Parameters source and target will have been created by
+ // createNode and can be cast to the subclass of BallLarusNode*
+ // returned by createNode.
+ virtual BallLarusEdge* createEdge(
+ BallLarusNode* source, BallLarusNode* target, unsigned edgeNumber);
+
+private:
+ BLEdgeVector _treeEdges; // All edges in the spanning tree.
+ BLEdgeVector _chordEdges; // All edges not in the spanning tree.
+ GlobalVariable* _counterArray; // Array to store path counters
+
+ // Removes the edge from the appropriate predecessor and successor lists.
+ void unlinkEdge(BallLarusEdge* edge);
+
+ // Makes an edge part of the spanning tree.
+ void makeEdgeSpanning(BLInstrumentationEdge* edge);
+
+ // Pushes initialization and calls itself recursively.
+ void pushInitializationFromEdge(BLInstrumentationEdge* edge);
+
+ // Pushes path counter increments up recursively.
+ void pushCountersFromEdge(BLInstrumentationEdge* edge);
+
+ // Depth first algorithm for determining the chord increments.f
+ void calculateChordIncrementsDfs(
+ long weight, BallLarusNode* v, BallLarusEdge* e);
+
+ // Determines the relative direction of two edges.
+ int calculateChordIncrementsDir(BallLarusEdge* e, BallLarusEdge* f);
+};
+
+// ---------------------------------------------------------------------------
+// PathProfiler is a module pass which intruments path profiling instructions
+// ---------------------------------------------------------------------------
+class PathProfiler : public ModulePass {
+private:
+ // Current context for multi threading support.
+ LLVMContext* Context;
+
+ // Which function are we currently instrumenting
+ unsigned currentFunctionNumber;
+
+ // The function prototype in the profiling runtime for incrementing a
+ // single path counter in a hash table.
+ Constant* llvmIncrementHashFunction;
+ Constant* llvmDecrementHashFunction;
+
+ // Instruments each function with path profiling. 'main' is instrumented
+ // with code to save the profile to disk.
+ bool runOnModule(Module &M);
+
+ // Analyzes the function for Ball-Larus path profiling, and inserts code.
+ void runOnFunction(std::vector<Constant*> &ftInit, Function &F, Module &M);
+
+ // Creates an increment constant representing incr.
+ ConstantInt* createIncrementConstant(long incr, int bitsize);
+
+ // Creates an increment constant representing the value in
+ // edge->getIncrement().
+ ConstantInt* createIncrementConstant(BLInstrumentationEdge* edge);
+
+ // Finds the insertion point after pathNumber in block. PathNumber may
+ // be NULL.
+ BasicBlock::iterator getInsertionPoint(
+ BasicBlock* block, Value* pathNumber);
+
+ // Inserts source's pathNumber Value* into target. Target may or may not
+ // have multiple predecessors, and may or may not have its phiNode
+ // initalized.
+ void pushValueIntoNode(
+ BLInstrumentationNode* source, BLInstrumentationNode* target);
+
+ // Inserts source's pathNumber Value* into the appropriate slot of
+ // target's phiNode.
+ void pushValueIntoPHI(
+ BLInstrumentationNode* target, BLInstrumentationNode* source);
+
+ // The Value* in node, oldVal, is updated with a Value* correspodning to
+ // oldVal + addition.
+ void insertNumberIncrement(BLInstrumentationNode* node, Value* addition,
+ bool atBeginning);
+
+ // Creates a counter increment in the given node. The Value* in node is
+ // taken as the index into a hash table.
+ void insertCounterIncrement(
+ Value* incValue,
+ BasicBlock::iterator insertPoint,
+ BLInstrumentationDag* dag,
+ bool increment = true);
+
+ // A PHINode is created in the node, and its values initialized to -1U.
+ void preparePHI(BLInstrumentationNode* node);
+
+ // Inserts instrumentation for the given edge
+ //
+ // Pre: The edge's source node has pathNumber set if edge is non zero
+ // path number increment.
+ //
+ // Post: Edge's target node has a pathNumber set to the path number Value
+ // corresponding to the value of the path register after edge's
+ // execution.
+ void insertInstrumentationStartingAt(
+ BLInstrumentationEdge* edge,
+ BLInstrumentationDag* dag);
+
+ // If this edge is a critical edge, then inserts a node at this edge.
+ // This edge becomes the first edge, and a new BallLarusEdge is created.
+ bool splitCritical(BLInstrumentationEdge* edge, BLInstrumentationDag* dag);
+
+ // Inserts instrumentation according to the marked edges in dag. Phony
+ // edges must be unlinked from the DAG, but accessible from the
+ // backedges. Dag must have initializations, path number increments, and
+ // counter increments present.
+ //
+ // Counter storage is created here.
+ void insertInstrumentation( BLInstrumentationDag& dag, Module &M);
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ PathProfiler() : ModulePass(ID) {
+ initializePathProfilerPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual const char *getPassName() const {
+ return "Path Profiler";
+ }
+};
+} // end anonymous namespace
+
+// Should we print the dot-graphs
+static cl::opt<bool> DotPathDag("path-profile-pathdag", cl::Hidden,
+ cl::desc("Output the path profiling DAG for each function."));
+
+// Register the path profiler as a pass
+char PathProfiler::ID = 0;
+INITIALIZE_PASS(PathProfiler, "insert-path-profiling",
+ "Insert instrumentation for Ball-Larus path profiling",
+ false, false)
+
+ModulePass *llvm::createPathProfilerPass() { return new PathProfiler(); }
+
+namespace llvm {
+ class PathProfilingFunctionTable {};
+
+ // Type for global array storing references to hashes or arrays
+ template<bool xcompile> class TypeBuilder<PathProfilingFunctionTable,
+ xcompile> {
+ public:
+ static const StructType *get(LLVMContext& C) {
+ return( StructType::get(
+ C, TypeBuilder<types::i<32>, xcompile>::get(C), // type
+ TypeBuilder<types::i<32>, xcompile>::get(C), // array size
+ TypeBuilder<types::i<8>*, xcompile>::get(C), // array/hash ptr
+ NULL));
+ }
+ };
+
+ typedef TypeBuilder<PathProfilingFunctionTable, true>
+ ftEntryTypeBuilder;
+
+ // BallLarusEdge << operator overloading
+ raw_ostream& operator<<(raw_ostream& os,
+ const BLInstrumentationEdge& edge) {
+ os << "[" << edge.getSource()->getName() << " -> "
+ << edge.getTarget()->getName() << "] init: "
+ << (edge.isInitialization() ? "yes" : "no")
+ << " incr:" << edge.getIncrement() << " cinc: "
+ << (edge.isCounterIncrement() ? "yes" : "no");
+ return(os);
+ }
+}
+
+// Creates a new BLInstrumentationNode from a BasicBlock.
+BLInstrumentationNode::BLInstrumentationNode(BasicBlock* BB) :
+ BallLarusNode(BB),
+ _startingPathNumber(NULL), _endingPathNumber(NULL), _pathPHI(NULL) {}
+
+// Constructor for BLInstrumentationEdge.
+BLInstrumentationEdge::BLInstrumentationEdge(BLInstrumentationNode* source,
+ BLInstrumentationNode* target)
+ : BallLarusEdge(source, target, 0),
+ _increment(0), _isInSpanningTree(false), _isInitialization(false),
+ _isCounterIncrement(false), _hasInstrumentation(false) {}
+
+// Sets the target node of this edge. Required to split edges.
+void BLInstrumentationEdge::setTarget(BallLarusNode* node) {
+ _target = node;
+}
+
+// Returns whether this edge is in the spanning tree.
+bool BLInstrumentationEdge::isInSpanningTree() const {
+ return(_isInSpanningTree);
+}
+
+// Sets whether this edge is in the spanning tree.
+void BLInstrumentationEdge::setIsInSpanningTree(bool isInSpanningTree) {
+ _isInSpanningTree = isInSpanningTree;
+}
+
+// Returns whether this edge will be instrumented with a path number
+// initialization.
+bool BLInstrumentationEdge::isInitialization() const {
+ return(_isInitialization);
+}
+
+// Sets whether this edge will be instrumented with a path number
+// initialization.
+void BLInstrumentationEdge::setIsInitialization(bool isInitialization) {
+ _isInitialization = isInitialization;
+}
+
+// Returns whether this edge will be instrumented with a path counter
+// increment. Notice this is incrementing the path counter
+// corresponding to the path number register. The path number
+// increment is determined by getIncrement().
+bool BLInstrumentationEdge::isCounterIncrement() const {
+ return(_isCounterIncrement);
+}
+
+// Sets whether this edge will be instrumented with a path counter
+// increment.
+void BLInstrumentationEdge::setIsCounterIncrement(bool isCounterIncrement) {
+ _isCounterIncrement = isCounterIncrement;
+}
+
+// Gets the path number increment that this edge will be instrumented
+// with. This is distinct from the path counter increment and the
+// weight. The counter increment is counts the number of executions of
+// some path, whereas the path number keeps track of which path number
+// the program is on.
+long BLInstrumentationEdge::getIncrement() const {
+ return(_increment);
+}
+
+// Set whether this edge will be instrumented with a path number
+// increment.
+void BLInstrumentationEdge::setIncrement(long increment) {
+ _increment = increment;
+}
+
+// True iff the edge has already been instrumented.
+bool BLInstrumentationEdge::hasInstrumentation() {
+ return(_hasInstrumentation);
+}
+
+// Set whether this edge has been instrumented.
+void BLInstrumentationEdge::setHasInstrumentation(bool hasInstrumentation) {
+ _hasInstrumentation = hasInstrumentation;
+}
+
+// Returns the successor number of this edge in the source.
+unsigned BLInstrumentationEdge::getSuccessorNumber() {
+ BallLarusNode* sourceNode = getSource();
+ BallLarusNode* targetNode = getTarget();
+ BasicBlock* source = sourceNode->getBlock();
+ BasicBlock* target = targetNode->getBlock();
+
+ if(source == NULL || target == NULL)
+ return(0);
+
+ TerminatorInst* terminator = source->getTerminator();
+
+ unsigned i;
+ for(i=0; i < terminator->getNumSuccessors(); i++) {
+ if(terminator->getSuccessor(i) == target)
+ break;
+ }
+
+ return(i);
+}
+
+// BLInstrumentationDag constructor initializes a DAG for the given Function.
+BLInstrumentationDag::BLInstrumentationDag(Function &F) : BallLarusDag(F),
+ _counterArray(0) {
+}
+
+// Returns the Exit->Root edge. This edge is required for creating
+// directed cycles in the algorithm for moving instrumentation off of
+// the spanning tree
+BallLarusEdge* BLInstrumentationDag::getExitRootEdge() {
+ BLEdgeIterator erEdge = getExit()->succBegin();
+ return(*erEdge);
+}
+
+BLEdgeVector BLInstrumentationDag::getCallPhonyEdges () {
+ BLEdgeVector callEdges;
+
+ for( BLEdgeIterator edge = _edges.begin(), end = _edges.end();
+ edge != end; edge++ ) {
+ if( (*edge)->getType() == BallLarusEdge::CALLEDGE_PHONY )
+ callEdges.push_back(*edge);
+ }
+
+ return callEdges;
+}
+
+// Gets the path counter array
+GlobalVariable* BLInstrumentationDag::getCounterArray() {
+ return _counterArray;
+}
+
+void BLInstrumentationDag::setCounterArray(GlobalVariable* c) {
+ _counterArray = c;
+}
+
+// Calculates the increment for the chords, thereby removing
+// instrumentation from the spanning tree edges. Implementation is based on
+// the algorithm in Figure 4 of [Ball94]
+void BLInstrumentationDag::calculateChordIncrements() {
+ calculateChordIncrementsDfs(0, getRoot(), NULL);
+
+ BLInstrumentationEdge* chord;
+ for(BLEdgeIterator chordEdge = _chordEdges.begin(),
+ end = _chordEdges.end(); chordEdge != end; chordEdge++) {
+ chord = (BLInstrumentationEdge*) *chordEdge;
+ chord->setIncrement(chord->getIncrement() + chord->getWeight());
+ }
+}
+
+// Updates the state when an edge has been split
+void BLInstrumentationDag::splitUpdate(BLInstrumentationEdge* formerEdge,
+ BasicBlock* newBlock) {
+ BallLarusNode* oldTarget = formerEdge->getTarget();
+ BallLarusNode* newNode = addNode(newBlock);
+ formerEdge->setTarget(newNode);
+ newNode->addPredEdge(formerEdge);
+
+ DEBUG(dbgs() << " Edge split: " << *formerEdge << "\n");
+
+ oldTarget->removePredEdge(formerEdge);
+ BallLarusEdge* newEdge = addEdge(newNode, oldTarget,0);
+
+ if( formerEdge->getType() == BallLarusEdge::BACKEDGE ||
+ formerEdge->getType() == BallLarusEdge::SPLITEDGE) {
+ newEdge->setType(formerEdge->getType());
+ newEdge->setPhonyRoot(formerEdge->getPhonyRoot());
+ newEdge->setPhonyExit(formerEdge->getPhonyExit());
+ formerEdge->setType(BallLarusEdge::NORMAL);
+ formerEdge->setPhonyRoot(NULL);
+ formerEdge->setPhonyExit(NULL);
+ }
+}
+
+// Calculates a spanning tree of the DAG ignoring cycles. Whichever
+// edges are in the spanning tree will not be instrumented, but this
+// implementation does not try to minimize the instrumentation overhead
+// by trying to find hot edges.
+void BLInstrumentationDag::calculateSpanningTree() {
+ std::stack<BallLarusNode*> dfsStack;
+
+ for(BLNodeIterator nodeIt = _nodes.begin(), end = _nodes.end();
+ nodeIt != end; nodeIt++) {
+ (*nodeIt)->setColor(BallLarusNode::WHITE);
+ }
+
+ dfsStack.push(getRoot());
+ while(dfsStack.size() > 0) {
+ BallLarusNode* node = dfsStack.top();
+ dfsStack.pop();
+
+ if(node->getColor() == BallLarusNode::WHITE)
+ continue;
+
+ BallLarusNode* nextNode;
+ bool forward = true;
+ BLEdgeIterator succEnd = node->succEnd();
+
+ node->setColor(BallLarusNode::WHITE);
+ // first iterate over successors then predecessors
+ for(BLEdgeIterator edge = node->succBegin(), predEnd = node->predEnd();
+ edge != predEnd; edge++) {
+ if(edge == succEnd) {
+ edge = node->predBegin();
+ forward = false;
+ }
+
+ // Ignore split edges
+ if ((*edge)->getType() == BallLarusEdge::SPLITEDGE)
+ continue;
+
+ nextNode = forward? (*edge)->getTarget(): (*edge)->getSource();
+ if(nextNode->getColor() != BallLarusNode::WHITE) {
+ nextNode->setColor(BallLarusNode::WHITE);
+ makeEdgeSpanning((BLInstrumentationEdge*)(*edge));
+ }
+ }
+ }
+
+ for(BLEdgeIterator edge = _edges.begin(), end = _edges.end();
+ edge != end; edge++) {
+ BLInstrumentationEdge* instEdge = (BLInstrumentationEdge*) (*edge);
+ // safe since createEdge is overriden
+ if(!instEdge->isInSpanningTree() && (*edge)->getType()
+ != BallLarusEdge::SPLITEDGE)
+ _chordEdges.push_back(instEdge);
+ }
+}
+
+// Pushes initialization further down in order to group the first
+// increment and initialization.
+void BLInstrumentationDag::pushInitialization() {
+ BLInstrumentationEdge* exitRootEdge =
+ (BLInstrumentationEdge*) getExitRootEdge();
+ exitRootEdge->setIsInitialization(true);
+ pushInitializationFromEdge(exitRootEdge);
+}
+
+// Pushes the path counter increments up in order to group the last path
+// number increment.
+void BLInstrumentationDag::pushCounters() {
+ BLInstrumentationEdge* exitRootEdge =
+ (BLInstrumentationEdge*) getExitRootEdge();
+ exitRootEdge->setIsCounterIncrement(true);
+ pushCountersFromEdge(exitRootEdge);
+}
+
+// Removes phony edges from the successor list of the source, and the
+// predecessor list of the target.
+void BLInstrumentationDag::unlinkPhony() {
+ BallLarusEdge* edge;
+
+ for(BLEdgeIterator next = _edges.begin(),
+ end = _edges.end(); next != end; next++) {
+ edge = (*next);
+
+ if( edge->getType() == BallLarusEdge::BACKEDGE_PHONY ||
+ edge->getType() == BallLarusEdge::SPLITEDGE_PHONY ||
+ edge->getType() == BallLarusEdge::CALLEDGE_PHONY ) {
+ unlinkEdge(edge);
+ }
+ }
+}
+
+// Generate a .dot graph to represent the DAG and pathNumbers
+void BLInstrumentationDag::generateDotGraph() {
+ std::string errorInfo;
+ std::string functionName = getFunction().getNameStr();
+ std::string filename = "pathdag." + functionName + ".dot";
+
+ DEBUG (dbgs() << "Writing '" << filename << "'...\n");
+ raw_fd_ostream dotFile(filename.c_str(), errorInfo);
+
+ if (!errorInfo.empty()) {
+ errs() << "Error opening '" << filename.c_str() <<"' for writing!";
+ errs() << "\n";
+ return;
+ }
+
+ dotFile << "digraph " << functionName << " {\n";
+
+ for( BLEdgeIterator edge = _edges.begin(), end = _edges.end();
+ edge != end; edge++) {
+ std::string sourceName = (*edge)->getSource()->getName();
+ std::string targetName = (*edge)->getTarget()->getName();
+
+ dotFile << "\t\"" << sourceName.c_str() << "\" -> \""
+ << targetName.c_str() << "\" ";
+
+ long inc = ((BLInstrumentationEdge*)(*edge))->getIncrement();
+
+ switch( (*edge)->getType() ) {
+ case BallLarusEdge::NORMAL:
+ dotFile << "[label=" << inc << "] [color=black];\n";
+ break;
+
+ case BallLarusEdge::BACKEDGE:
+ dotFile << "[color=cyan];\n";
+ break;
+
+ case BallLarusEdge::BACKEDGE_PHONY:
+ dotFile << "[label=" << inc
+ << "] [color=blue];\n";
+ break;
+
+ case BallLarusEdge::SPLITEDGE:
+ dotFile << "[color=violet];\n";
+ break;
+
+ case BallLarusEdge::SPLITEDGE_PHONY:
+ dotFile << "[label=" << inc << "] [color=red];\n";
+ break;
+
+ case BallLarusEdge::CALLEDGE_PHONY:
+ dotFile << "[label=" << inc << "] [color=green];\n";
+ break;
+ }
+ }
+
+ dotFile << "}\n";
+}
+
+// Allows subclasses to determine which type of Node is created.
+// Override this method to produce subclasses of BallLarusNode if
+// necessary. The destructor of BallLarusDag will call free on each pointer
+// created.
+BallLarusNode* BLInstrumentationDag::createNode(BasicBlock* BB) {
+ return( new BLInstrumentationNode(BB) );
+}
+
+// Allows subclasses to determine which type of Edge is created.
+// Override this method to produce subclasses of BallLarusEdge if
+// necessary. The destructor of BallLarusDag will call free on each pointer
+// created.
+BallLarusEdge* BLInstrumentationDag::createEdge(BallLarusNode* source,
+ BallLarusNode* target, unsigned edgeNumber) {
+ // One can cast from BallLarusNode to BLInstrumentationNode since createNode
+ // is overriden to produce BLInstrumentationNode.
+ return( new BLInstrumentationEdge((BLInstrumentationNode*)source,
+ (BLInstrumentationNode*)target) );
+}
+
+// Sets the Value corresponding to the pathNumber register, constant,
+// or phinode. Used by the instrumentation code to remember path
+// number Values.
+Value* BLInstrumentationNode::getStartingPathNumber(){
+ return(_startingPathNumber);
+}
+
+// Sets the Value of the pathNumber. Used by the instrumentation code.
+void BLInstrumentationNode::setStartingPathNumber(Value* pathNumber) {
+ DEBUG(dbgs() << " SPN-" << getName() << " <-- " << (pathNumber ?
+ pathNumber->getNameStr() : "unused") << "\n");
+ _startingPathNumber = pathNumber;
+}
+
+Value* BLInstrumentationNode::getEndingPathNumber(){
+ return(_endingPathNumber);
+}
+
+void BLInstrumentationNode::setEndingPathNumber(Value* pathNumber) {
+ DEBUG(dbgs() << " EPN-" << getName() << " <-- "
+ << (pathNumber ? pathNumber->getNameStr() : "unused") << "\n");
+ _endingPathNumber = pathNumber;
+}
+
+// Get the PHINode Instruction for this node. Used by instrumentation
+// code.
+PHINode* BLInstrumentationNode::getPathPHI() {
+ return(_pathPHI);
+}
+
+// Set the PHINode Instruction for this node. Used by instrumentation
+// code.
+void BLInstrumentationNode::setPathPHI(PHINode* pathPHI) {
+ _pathPHI = pathPHI;
+}
+
+// Removes the edge from the appropriate predecessor and successor
+// lists.
+void BLInstrumentationDag::unlinkEdge(BallLarusEdge* edge) {
+ if(edge == getExitRootEdge())
+ DEBUG(dbgs() << " Removing exit->root edge\n");
+
+ edge->getSource()->removeSuccEdge(edge);
+ edge->getTarget()->removePredEdge(edge);
+}
+
+// Makes an edge part of the spanning tree.
+void BLInstrumentationDag::makeEdgeSpanning(BLInstrumentationEdge* edge) {
+ edge->setIsInSpanningTree(true);
+ _treeEdges.push_back(edge);
+}
+
+// Pushes initialization and calls itself recursively.
+void BLInstrumentationDag::pushInitializationFromEdge(
+ BLInstrumentationEdge* edge) {
+ BallLarusNode* target;
+
+ target = edge->getTarget();
+ if( target->getNumberPredEdges() > 1 || target == getExit() ) {
+ return;
+ } else {
+ for(BLEdgeIterator next = target->succBegin(),
+ end = target->succEnd(); next != end; next++) {
+ BLInstrumentationEdge* intoEdge = (BLInstrumentationEdge*) *next;
+
+ // Skip split edges
+ if (intoEdge->getType() == BallLarusEdge::SPLITEDGE)
+ continue;
+
+ intoEdge->setIncrement(intoEdge->getIncrement() +
+ edge->getIncrement());
+ intoEdge->setIsInitialization(true);
+ pushInitializationFromEdge(intoEdge);
+ }
+
+ edge->setIncrement(0);
+ edge->setIsInitialization(false);
+ }
+}
+
+// Pushes path counter increments up recursively.
+void BLInstrumentationDag::pushCountersFromEdge(BLInstrumentationEdge* edge) {
+ BallLarusNode* source;
+
+ source = edge->getSource();
+ if(source->getNumberSuccEdges() > 1 || source == getRoot()
+ || edge->isInitialization()) {
+ return;
+ } else {
+ for(BLEdgeIterator previous = source->predBegin(),
+ end = source->predEnd(); previous != end; previous++) {
+ BLInstrumentationEdge* fromEdge = (BLInstrumentationEdge*) *previous;
+
+ // Skip split edges
+ if (fromEdge->getType() == BallLarusEdge::SPLITEDGE)
+ continue;
+
+ fromEdge->setIncrement(fromEdge->getIncrement() +
+ edge->getIncrement());
+ fromEdge->setIsCounterIncrement(true);
+ pushCountersFromEdge(fromEdge);
+ }
+
+ edge->setIncrement(0);
+ edge->setIsCounterIncrement(false);
+ }
+}
+
+// Depth first algorithm for determining the chord increments.
+void BLInstrumentationDag::calculateChordIncrementsDfs(long weight,
+ BallLarusNode* v, BallLarusEdge* e) {
+ BLInstrumentationEdge* f;
+
+ for(BLEdgeIterator treeEdge = _treeEdges.begin(),
+ end = _treeEdges.end(); treeEdge != end; treeEdge++) {
+ f = (BLInstrumentationEdge*) *treeEdge;
+ if(e != f && v == f->getTarget()) {
+ calculateChordIncrementsDfs(
+ calculateChordIncrementsDir(e,f)*(weight) +
+ f->getWeight(), f->getSource(), f);
+ }
+ if(e != f && v == f->getSource()) {
+ calculateChordIncrementsDfs(
+ calculateChordIncrementsDir(e,f)*(weight) +
+ f->getWeight(), f->getTarget(), f);
+ }
+ }
+
+ for(BLEdgeIterator chordEdge = _chordEdges.begin(),
+ end = _chordEdges.end(); chordEdge != end; chordEdge++) {
+ f = (BLInstrumentationEdge*) *chordEdge;
+ if(v == f->getSource() || v == f->getTarget()) {
+ f->setIncrement(f->getIncrement() +
+ calculateChordIncrementsDir(e,f)*weight);
+ }
+ }
+}
+
+// Determines the relative direction of two edges.
+int BLInstrumentationDag::calculateChordIncrementsDir(BallLarusEdge* e,
+ BallLarusEdge* f) {
+ if( e == NULL)
+ return(1);
+ else if(e->getSource() == f->getTarget()
+ || e->getTarget() == f->getSource())
+ return(1);
+
+ return(-1);
+}
+
+// Creates an increment constant representing incr.
+ConstantInt* PathProfiler::createIncrementConstant(long incr,
+ int bitsize) {
+ return(ConstantInt::get(IntegerType::get(*Context, 32), incr));
+}
+
+// Creates an increment constant representing the value in
+// edge->getIncrement().
+ConstantInt* PathProfiler::createIncrementConstant(
+ BLInstrumentationEdge* edge) {
+ return(createIncrementConstant(edge->getIncrement(), 32));
+}
+
+// Finds the insertion point after pathNumber in block. PathNumber may
+// be NULL.
+BasicBlock::iterator PathProfiler::getInsertionPoint(BasicBlock* block, Value*
+ pathNumber) {
+ if(pathNumber == NULL || isa<ConstantInt>(pathNumber)
+ || (((Instruction*)(pathNumber))->getParent()) != block) {
+ return(block->getFirstNonPHI());
+ } else {
+ Instruction* pathNumberInst = (Instruction*) (pathNumber);
+ BasicBlock::iterator insertPoint;
+ BasicBlock::iterator end = block->end();
+
+ for(insertPoint = block->begin();
+ insertPoint != end; insertPoint++) {
+ Instruction* insertInst = &(*insertPoint);
+
+ if(insertInst == pathNumberInst)
+ return(++insertPoint);
+ }
+
+ return(insertPoint);
+ }
+}
+
+// A PHINode is created in the node, and its values initialized to -1U.
+void PathProfiler::preparePHI(BLInstrumentationNode* node) {
+ BasicBlock* block = node->getBlock();
+ BasicBlock::iterator insertPoint = block->getFirstNonPHI();
+ PHINode* phi = PHINode::Create(Type::getInt32Ty(*Context), "pathNumber",
+ insertPoint );
+ node->setPathPHI(phi);
+ node->setStartingPathNumber(phi);
+ node->setEndingPathNumber(phi);
+
+ for(pred_iterator predIt = pred_begin(node->getBlock()),
+ end = pred_end(node->getBlock()); predIt != end; predIt++) {
+ BasicBlock* pred = (*predIt);
+
+ if(pred != NULL)
+ phi->addIncoming(createIncrementConstant((long)-1, 32), pred);
+ }
+}
+
+// Inserts source's pathNumber Value* into target. Target may or may not
+// have multiple predecessors, and may or may not have its phiNode
+// initalized.
+void PathProfiler::pushValueIntoNode(BLInstrumentationNode* source,
+ BLInstrumentationNode* target) {
+ if(target->getBlock() == NULL)
+ return;
+
+
+ if(target->getNumberPredEdges() <= 1) {
+ assert(target->getStartingPathNumber() == NULL &&
+ "Target already has path number");
+ target->setStartingPathNumber(source->getEndingPathNumber());
+ target->setEndingPathNumber(source->getEndingPathNumber());
+ DEBUG(dbgs() << " Passing path number"
+ << (source->getEndingPathNumber() ? "" : " (null)")
+ << " value through.\n");
+ } else {
+ if(target->getPathPHI() == NULL) {
+ DEBUG(dbgs() << " Initializing PHI node for block '"
+ << target->getName() << "'\n");
+ preparePHI(target);
+ }
+ pushValueIntoPHI(target, source);
+ DEBUG(dbgs() << " Passing number value into PHI for block '"
+ << target->getName() << "'\n");
+ }
+}
+
+// Inserts source's pathNumber Value* into the appropriate slot of
+// target's phiNode.
+void PathProfiler::pushValueIntoPHI(BLInstrumentationNode* target,
+ BLInstrumentationNode* source) {
+ PHINode* phi = target->getPathPHI();
+ assert(phi != NULL && " Tried to push value into node with PHI, but node"
+ " actually had no PHI.");
+ phi->removeIncomingValue(source->getBlock(), false);
+ phi->addIncoming(source->getEndingPathNumber(), source->getBlock());
+}
+
+// The Value* in node, oldVal, is updated with a Value* correspodning to
+// oldVal + addition.
+void PathProfiler::insertNumberIncrement(BLInstrumentationNode* node,
+ Value* addition, bool atBeginning) {
+ BasicBlock* block = node->getBlock();
+ assert(node->getStartingPathNumber() != NULL);
+ assert(node->getEndingPathNumber() != NULL);
+
+ BasicBlock::iterator insertPoint;
+
+ if( atBeginning )
+ insertPoint = block->getFirstNonPHI();
+ else
+ insertPoint = block->getTerminator();
+
+ DEBUG(errs() << " Creating addition instruction.\n");
+ Value* newpn = BinaryOperator::Create(Instruction::Add,
+ node->getStartingPathNumber(),
+ addition, "pathNumber", insertPoint);
+
+ node->setEndingPathNumber(newpn);
+
+ if( atBeginning )
+ node->setStartingPathNumber(newpn);
+}
+
+// Creates a counter increment in the given node. The Value* in node is
+// taken as the index into an array or hash table. The hash table access
+// is a call to the runtime.
+void PathProfiler::insertCounterIncrement(Value* incValue,
+ BasicBlock::iterator insertPoint,
+ BLInstrumentationDag* dag,
+ bool increment) {
+ // Counter increment for array
+ if( dag->getNumberOfPaths() <= HASH_THRESHHOLD ) {
+ // Get pointer to the array location
+ std::vector<Value*> gepIndices(2);
+ gepIndices[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
+ gepIndices[1] = incValue;
+
+ GetElementPtrInst* pcPointer =
+ GetElementPtrInst::Create(dag->getCounterArray(),
+ gepIndices.begin(), gepIndices.end(),
+ "counterInc", insertPoint);
+
+ // Load from the array - call it oldPC
+ LoadInst* oldPc = new LoadInst(pcPointer, "oldPC", insertPoint);
+
+ // Test to see whether adding 1 will overflow the counter
+ ICmpInst* isMax = new ICmpInst(insertPoint, CmpInst::ICMP_ULT, oldPc,
+ createIncrementConstant(0xffffffff, 32),
+ "isMax");
+
+ // Select increment for the path counter based on overflow
+ SelectInst* inc =
+ SelectInst::Create( isMax, createIncrementConstant(increment?1:-1,32),
+ createIncrementConstant(0,32),
+ "pathInc", insertPoint);
+
+ // newPc = oldPc + inc
+ BinaryOperator* newPc = BinaryOperator::Create(Instruction::Add,
+ oldPc, inc, "newPC",
+ insertPoint);
+
+ // Store back in to the array
+ new StoreInst(newPc, pcPointer, insertPoint);
+ } else { // Counter increment for hash
+ std::vector<Value*> args(2);
+ args[0] = ConstantInt::get(Type::getInt32Ty(*Context),
+ currentFunctionNumber);
+ args[1] = incValue;
+
+ CallInst::Create(
+ increment ? llvmIncrementHashFunction : llvmDecrementHashFunction,
+ args.begin(), args.end(), "", insertPoint);
+ }
+}
+
+// Inserts instrumentation for the given edge
+//
+// Pre: The edge's source node has pathNumber set if edge is non zero
+// path number increment.
+//
+// Post: Edge's target node has a pathNumber set to the path number Value
+// corresponding to the value of the path register after edge's
+// execution.
+//
+// FIXME: This should be reworked so it's not recursive.
+void PathProfiler::insertInstrumentationStartingAt(BLInstrumentationEdge* edge,
+ BLInstrumentationDag* dag) {
+ // Mark the edge as instrumented
+ edge->setHasInstrumentation(true);
+ DEBUG(dbgs() << "\nInstrumenting edge: " << (*edge) << "\n");
+
+ // create a new node for this edge's instrumentation
+ splitCritical(edge, dag);
+
+ BLInstrumentationNode* sourceNode = (BLInstrumentationNode*)edge->getSource();
+ BLInstrumentationNode* targetNode = (BLInstrumentationNode*)edge->getTarget();
+ BLInstrumentationNode* instrumentNode;
+ BLInstrumentationNode* nextSourceNode;
+
+ bool atBeginning = false;
+
+ // Source node has only 1 successor so any information can be simply
+ // inserted in to it without splitting
+ if( sourceNode->getBlock() && sourceNode->getNumberSuccEdges() <= 1) {
+ DEBUG(dbgs() << " Potential instructions to be placed in: "
+ << sourceNode->getName() << " (at end)\n");
+ instrumentNode = sourceNode;
+ nextSourceNode = targetNode; // ... since we never made any new nodes
+ }
+
+ // The target node only has one predecessor, so we can safely insert edge
+ // instrumentation into it. If there was splitting, it must have been
+ // successful.
+ else if( targetNode->getNumberPredEdges() == 1 ) {
+ DEBUG(dbgs() << " Potential instructions to be placed in: "
+ << targetNode->getName() << " (at beginning)\n");
+ pushValueIntoNode(sourceNode, targetNode);
+ instrumentNode = targetNode;
+ nextSourceNode = NULL; // ... otherwise we'll just keep splitting
+ atBeginning = true;
+ }
+
+ // Somehow, splitting must have failed.
+ else {
+ errs() << "Instrumenting could not split a critical edge.\n";
+ DEBUG(dbgs() << " Couldn't split edge " << (*edge) << ".\n");
+ return;
+ }
+
+ // Insert instrumentation if this is a back or split edge
+ if( edge->getType() == BallLarusEdge::BACKEDGE ||
+ edge->getType() == BallLarusEdge::SPLITEDGE ) {
+ BLInstrumentationEdge* top =
+ (BLInstrumentationEdge*) edge->getPhonyRoot();
+ BLInstrumentationEdge* bottom =
+ (BLInstrumentationEdge*) edge->getPhonyExit();
+
+ assert( top->isInitialization() && " Top phony edge did not"
+ " contain a path number initialization.");
+ assert( bottom->isCounterIncrement() && " Bottom phony edge"
+ " did not contain a path counter increment.");
+
+ // split edge has yet to be initialized
+ if( !instrumentNode->getEndingPathNumber() ) {
+ instrumentNode->setStartingPathNumber(createIncrementConstant(0,32));
+ instrumentNode->setEndingPathNumber(createIncrementConstant(0,32));
+ }
+
+ BasicBlock::iterator insertPoint = atBeginning ?
+ instrumentNode->getBlock()->getFirstNonPHI() :
+ instrumentNode->getBlock()->getTerminator();
+
+ // add information from the bottom edge, if it exists
+ if( bottom->getIncrement() ) {
+ Value* newpn =
+ BinaryOperator::Create(Instruction::Add,
+ instrumentNode->getStartingPathNumber(),
+ createIncrementConstant(bottom),
+ "pathNumber", insertPoint);
+ instrumentNode->setEndingPathNumber(newpn);
+ }
+
+ insertCounterIncrement(instrumentNode->getEndingPathNumber(),
+ insertPoint, dag);
+
+ if( atBeginning )
+ instrumentNode->setStartingPathNumber(createIncrementConstant(top));
+
+ instrumentNode->setEndingPathNumber(createIncrementConstant(top));
+
+ // Check for path counter increments
+ if( top->isCounterIncrement() ) {
+ insertCounterIncrement(instrumentNode->getEndingPathNumber(),
+ instrumentNode->getBlock()->getTerminator(),dag);
+ instrumentNode->setEndingPathNumber(0);
+ }
+ }
+
+ // Insert instrumentation if this is a normal edge
+ else {
+ BasicBlock::iterator insertPoint = atBeginning ?
+ instrumentNode->getBlock()->getFirstNonPHI() :
+ instrumentNode->getBlock()->getTerminator();
+
+ if( edge->isInitialization() ) { // initialize path number
+ instrumentNode->setEndingPathNumber(createIncrementConstant(edge));
+ } else if( edge->getIncrement() ) {// increment path number
+ Value* newpn =
+ BinaryOperator::Create(Instruction::Add,
+ instrumentNode->getStartingPathNumber(),
+ createIncrementConstant(edge),
+ "pathNumber", insertPoint);
+ instrumentNode->setEndingPathNumber(newpn);
+
+ if( atBeginning )
+ instrumentNode->setStartingPathNumber(newpn);
+ }
+
+ // Check for path counter increments
+ if( edge->isCounterIncrement() ) {
+ insertCounterIncrement(instrumentNode->getEndingPathNumber(),
+ insertPoint, dag);
+ instrumentNode->setEndingPathNumber(0);
+ }
+ }
+
+ // Push it along
+ if (nextSourceNode && instrumentNode->getEndingPathNumber())
+ pushValueIntoNode(instrumentNode, nextSourceNode);
+
+ // Add all the successors
+ for( BLEdgeIterator next = targetNode->succBegin(),
+ end = targetNode->succEnd(); next != end; next++ ) {
+ // So long as it is un-instrumented, add it to the list
+ if( !((BLInstrumentationEdge*)(*next))->hasInstrumentation() )
+ insertInstrumentationStartingAt((BLInstrumentationEdge*)*next,dag);
+ else
+ DEBUG(dbgs() << " Edge " << *(BLInstrumentationEdge*)(*next)
+ << " already instrumented.\n");
+ }
+}
+
+// Inserts instrumentation according to the marked edges in dag. Phony edges
+// must be unlinked from the DAG, but accessible from the backedges. Dag
+// must have initializations, path number increments, and counter increments
+// present.
+//
+// Counter storage is created here.
+void PathProfiler::insertInstrumentation(
+ BLInstrumentationDag& dag, Module &M) {
+
+ BLInstrumentationEdge* exitRootEdge =
+ (BLInstrumentationEdge*) dag.getExitRootEdge();
+ insertInstrumentationStartingAt(exitRootEdge, &dag);
+
+ // Iterate through each call edge and apply the appropriate hash increment
+ // and decrement functions
+ BLEdgeVector callEdges = dag.getCallPhonyEdges();
+ for( BLEdgeIterator edge = callEdges.begin(),
+ end = callEdges.end(); edge != end; edge++ ) {
+ BLInstrumentationNode* node =
+ (BLInstrumentationNode*)(*edge)->getSource();
+ BasicBlock::iterator insertPoint = node->getBlock()->getFirstNonPHI();
+
+ // Find the first function call
+ while( ((Instruction&)(*insertPoint)).getOpcode() != Instruction::Call )
+ insertPoint++;
+
+ DEBUG(dbgs() << "\nInstrumenting method call block '"
+ << node->getBlock()->getNameStr() << "'\n");
+ DEBUG(dbgs() << " Path number initialized: "
+ << ((node->getStartingPathNumber()) ? "yes" : "no") << "\n");
+
+ Value* newpn;
+ if( node->getStartingPathNumber() ) {
+ long inc = ((BLInstrumentationEdge*)(*edge))->getIncrement();
+ if ( inc )
+ newpn = BinaryOperator::Create(Instruction::Add,
+ node->getStartingPathNumber(),
+ createIncrementConstant(inc,32),
+ "pathNumber", insertPoint);
+ else
+ newpn = node->getStartingPathNumber();
+ } else {
+ newpn = (Value*)createIncrementConstant(
+ ((BLInstrumentationEdge*)(*edge))->getIncrement(), 32);
+ }
+
+ insertCounterIncrement(newpn, insertPoint, &dag);
+ insertCounterIncrement(newpn, node->getBlock()->getTerminator(),
+ &dag, false);
+ }
+}
+
+// Entry point of the module
+void PathProfiler::runOnFunction(std::vector<Constant*> &ftInit,
+ Function &F, Module &M) {
+ // Build DAG from CFG
+ BLInstrumentationDag dag = BLInstrumentationDag(F);
+ dag.init();
+
+ // give each path a unique integer value
+ dag.calculatePathNumbers();
+
+ // modify path increments to increase the efficiency
+ // of instrumentation
+ dag.calculateSpanningTree();
+ dag.calculateChordIncrements();
+ dag.pushInitialization();
+ dag.pushCounters();
+ dag.unlinkPhony();
+
+ // potentially generate .dot graph for the dag
+ if (DotPathDag)
+ dag.generateDotGraph ();
+
+ // Should we store the information in an array or hash
+ if( dag.getNumberOfPaths() <= HASH_THRESHHOLD ) {
+ const Type* t = ArrayType::get(Type::getInt32Ty(*Context),
+ dag.getNumberOfPaths());
+
+ dag.setCounterArray(new GlobalVariable(M, t, false,
+ GlobalValue::InternalLinkage,
+ Constant::getNullValue(t), ""));
+ }
+
+ insertInstrumentation(dag, M);
+
+ // Add to global function reference table
+ unsigned type;
+ const Type* voidPtr = TypeBuilder<types::i<8>*, true>::get(*Context);
+
+ if( dag.getNumberOfPaths() <= HASH_THRESHHOLD )
+ type = ProfilingArray;
+ else
+ type = ProfilingHash;
+
+ std::vector<Constant*> entryArray(3);
+ entryArray[0] = createIncrementConstant(type,32);
+ entryArray[1] = createIncrementConstant(dag.getNumberOfPaths(),32);
+ entryArray[2] = dag.getCounterArray() ?
+ ConstantExpr::getBitCast(dag.getCounterArray(), voidPtr) :
+ Constant::getNullValue(voidPtr);
+
+ const StructType* at = ftEntryTypeBuilder::get(*Context);
+ ConstantStruct* functionEntry =
+ (ConstantStruct*)ConstantStruct::get(at, entryArray);
+ ftInit.push_back(functionEntry);
+}
+
+// Output the bitcode if we want to observe instrumentation changess
+#define PRINT_MODULE dbgs() << \
+ "\n\n============= MODULE BEGIN ===============\n" << M << \
+ "\n============== MODULE END ================\n"
+
+bool PathProfiler::runOnModule(Module &M) {
+ Context = &M.getContext();
+
+ DEBUG(dbgs()
+ << "****************************************\n"
+ << "****************************************\n"
+ << "** **\n"
+ << "** PATH PROFILING INSTRUMENTATION **\n"
+ << "** **\n"
+ << "****************************************\n"
+ << "****************************************\n");
+
+ // No main, no instrumentation!
+ Function *Main = M.getFunction("main");
+
+ // Using fortran? ... this kind of works
+ if (!Main)
+ Main = M.getFunction("MAIN__");
+
+ if (!Main) {
+ errs() << "WARNING: cannot insert path profiling into a module"
+ << " with no main function!\n";
+ return false;
+ }
+
+ BasicBlock::iterator insertPoint = Main->getEntryBlock().getFirstNonPHI();
+
+ llvmIncrementHashFunction = M.getOrInsertFunction(
+ "llvm_increment_path_count",
+ Type::getVoidTy(*Context), // return type
+ Type::getInt32Ty(*Context), // function number
+ Type::getInt32Ty(*Context), // path number
+ NULL );
+
+ llvmDecrementHashFunction = M.getOrInsertFunction(
+ "llvm_decrement_path_count",
+ Type::getVoidTy(*Context), // return type
+ Type::getInt32Ty(*Context), // function number
+ Type::getInt32Ty(*Context), // path number
+ NULL );
+
+ std::vector<Constant*> ftInit;
+ unsigned functionNumber = 0;
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; F++) {
+ if (F->isDeclaration())
+ continue;
+
+ DEBUG(dbgs() << "Function: " << F->getNameStr() << "\n");
+ functionNumber++;
+
+ // set function number
+ currentFunctionNumber = functionNumber;
+ runOnFunction(ftInit, *F, M);
+ }
+
+ const Type *t = ftEntryTypeBuilder::get(*Context);
+ const ArrayType* ftArrayType = ArrayType::get(t, ftInit.size());
+ Constant* ftInitConstant = ConstantArray::get(ftArrayType, ftInit);
+
+ DEBUG(dbgs() << " ftArrayType:" << *ftArrayType << "\n");
+
+ GlobalVariable* functionTable =
+ new GlobalVariable(M, ftArrayType, false, GlobalValue::InternalLinkage,
+ ftInitConstant, "functionPathTable");
+ const Type *eltType = ftArrayType->getTypeAtIndex((unsigned)0);
+ InsertProfilingInitCall(Main, "llvm_start_path_profiling", functionTable,
+ PointerType::getUnqual(eltType));
+
+ DEBUG(PRINT_MODULE);
+
+ return true;
+}
+
+// If this edge is a critical edge, then inserts a node at this edge.
+// This edge becomes the first edge, and a new BallLarusEdge is created.
+// Returns true if the edge was split
+bool PathProfiler::splitCritical(BLInstrumentationEdge* edge,
+ BLInstrumentationDag* dag) {
+ unsigned succNum = edge->getSuccessorNumber();
+ BallLarusNode* sourceNode = edge->getSource();
+ BallLarusNode* targetNode = edge->getTarget();
+ BasicBlock* sourceBlock = sourceNode->getBlock();
+ BasicBlock* targetBlock = targetNode->getBlock();
+
+ if(sourceBlock == NULL || targetBlock == NULL
+ || sourceNode->getNumberSuccEdges() <= 1
+ || targetNode->getNumberPredEdges() == 1 ) {
+ return(false);
+ }
+
+ TerminatorInst* terminator = sourceBlock->getTerminator();
+
+ if( SplitCriticalEdge(terminator, succNum, this, false)) {
+ BasicBlock* newBlock = terminator->getSuccessor(succNum);
+ dag->splitUpdate(edge, newBlock);
+ return(true);
+ } else
+ return(false);
+}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
index 1a30e9b..b57bbf6 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
@@ -22,12 +22,13 @@
#include "llvm/Module.h"
void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
- GlobalValue *Array) {
+ GlobalValue *Array,
+ PointerType *arrayType) {
LLVMContext &Context = MainFn->getContext();
- const Type *ArgVTy =
+ const Type *ArgVTy =
PointerType::getUnqual(Type::getInt8PtrTy(Context));
- const PointerType *UIntPtr =
- Type::getInt32PtrTy(Context);
+ const PointerType *UIntPtr = arrayType ? arrayType :
+ Type::getInt32PtrTy(Context);
Module &M = *MainFn->getParent();
Constant *InitFn = M.getOrInsertFunction(FnName, Type::getInt32Ty(Context),
Type::getInt32Ty(Context),
@@ -71,9 +72,9 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
case 2:
AI = MainFn->arg_begin(); ++AI;
if (AI->getType() != ArgVTy) {
- Instruction::CastOps opcode = CastInst::getCastOpcode(AI, false, ArgVTy,
+ Instruction::CastOps opcode = CastInst::getCastOpcode(AI, false, ArgVTy,
false);
- InitCall->setArgOperand(1,
+ InitCall->setArgOperand(1,
CastInst::Create(opcode, AI, ArgVTy, "argv.cast", InitCall));
} else {
InitCall->setArgOperand(1, AI);
@@ -93,7 +94,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
}
opcode = CastInst::getCastOpcode(AI, true,
Type::getInt32Ty(Context), true);
- InitCall->setArgOperand(0,
+ InitCall->setArgOperand(0,
CastInst::Create(opcode, AI, Type::getInt32Ty(Context),
"argc.cast", InitCall));
} else {
@@ -106,9 +107,10 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
}
void llvm::IncrementCounterInBlock(BasicBlock *BB, unsigned CounterNum,
- GlobalValue *CounterArray) {
+ GlobalValue *CounterArray, bool beginning) {
// Insert the increment after any alloca or PHI instructions...
- BasicBlock::iterator InsertPos = BB->getFirstNonPHI();
+ BasicBlock::iterator InsertPos = beginning ? BB->getFirstNonPHI() :
+ BB->getTerminator();
while (isa<AllocaInst>(InsertPos))
++InsertPos;
@@ -118,7 +120,7 @@ void llvm::IncrementCounterInBlock(BasicBlock *BB, unsigned CounterNum,
std::vector<Constant*> Indices(2);
Indices[0] = Constant::getNullValue(Type::getInt32Ty(Context));
Indices[1] = ConstantInt::get(Type::getInt32Ty(Context), CounterNum);
- Constant *ElementPtr =
+ Constant *ElementPtr =
ConstantExpr::getGetElementPtr(CounterArray, &Indices[0],
Indices.size());
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.h b/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.h
index 94efffe..a76e357 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.h
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.h
@@ -21,11 +21,14 @@ namespace llvm {
class Function;
class GlobalValue;
class BasicBlock;
+ class PointerType;
void InsertProfilingInitCall(Function *MainFn, const char *FnName,
- GlobalValue *Arr = 0);
+ GlobalValue *Arr = 0,
+ PointerType *arrayType = 0);
void IncrementCounterInBlock(BasicBlock *BB, unsigned CounterNum,
- GlobalValue *CounterArray);
+ GlobalValue *CounterArray,
+ bool beginning = true);
}
#endif
diff --git a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
index ada086e..a5adb5e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
@@ -33,7 +33,9 @@ STATISTIC(NumRemoved, "Number of instructions removed");
namespace {
struct ADCE : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- ADCE() : FunctionPass(ID) {}
+ ADCE() : FunctionPass(ID) {
+ initializeADCEPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function& F);
@@ -45,7 +47,7 @@ namespace {
}
char ADCE::ID = 0;
-INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false);
+INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false)
bool ADCE::runOnFunction(Function& F) {
SmallPtrSet<Instruction*, 128> alive;
diff --git a/contrib/llvm/lib/Transforms/Scalar/BasicBlockPlacement.cpp b/contrib/llvm/lib/Transforms/Scalar/BasicBlockPlacement.cpp
index b144678..cee5502 100644
--- a/contrib/llvm/lib/Transforms/Scalar/BasicBlockPlacement.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/BasicBlockPlacement.cpp
@@ -41,7 +41,9 @@ STATISTIC(NumMoved, "Number of basic blocks moved");
namespace {
struct BlockPlacement : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- BlockPlacement() : FunctionPass(ID) {}
+ BlockPlacement() : FunctionPass(ID) {
+ initializeBlockPlacementPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
@@ -74,8 +76,11 @@ namespace {
}
char BlockPlacement::ID = 0;
-INITIALIZE_PASS(BlockPlacement, "block-placement",
- "Profile Guided Basic Block Placement", false, false);
+INITIALIZE_PASS_BEGIN(BlockPlacement, "block-placement",
+ "Profile Guided Basic Block Placement", false, false)
+INITIALIZE_AG_DEPENDENCY(ProfileInfo)
+INITIALIZE_PASS_END(BlockPlacement, "block-placement",
+ "Profile Guided Basic Block Placement", false, false)
FunctionPass *llvm::createBlockPlacementPass() { return new BlockPlacement(); }
diff --git a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
index e07b761..9536939 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -22,6 +22,8 @@
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Pass.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
@@ -31,6 +33,7 @@
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
@@ -39,31 +42,59 @@
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/IRBuilder.h"
+#include "llvm/Support/ValueHandle.h"
using namespace llvm;
using namespace llvm::PatternMatch;
+STATISTIC(NumBlocksElim, "Number of blocks eliminated");
+STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
+STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
+STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
+ "sunken Cmps");
+STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
+ "of sunken Casts");
+STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
+ "computations were sunk");
+STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
+STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
+
static cl::opt<bool>
CriticalEdgeSplit("cgp-critical-edge-splitting",
cl::desc("Split critical edges during codegen prepare"),
- cl::init(true), cl::Hidden);
+ cl::init(false), cl::Hidden);
namespace {
class CodeGenPrepare : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// transformation profitability.
const TargetLowering *TLI;
+ DominatorTree *DT;
ProfileInfo *PFI;
+
+ /// CurInstIterator - As we scan instructions optimizing them, this is the
+ /// next instruction to optimize. Xforms that can invalidate this should
+ /// update it.
+ BasicBlock::iterator CurInstIterator;
/// BackEdges - Keep a set of all the loop back edges.
///
SmallSet<std::pair<const BasicBlock*, const BasicBlock*>, 8> BackEdges;
+
+ // Keeps track of non-local addresses that have been sunk into a block. This
+ // allows us to avoid inserting duplicate code for blocks with multiple
+ // load/stores of the same address.
+ DenseMap<Value*, Value*> SunkAddrs;
+
public:
static char ID; // Pass identification, replacement for typeid
explicit CodeGenPrepare(const TargetLowering *tli = 0)
- : FunctionPass(ID), TLI(tli) {}
+ : FunctionPass(ID), TLI(tli) {
+ initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
+ }
bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addPreserved<DominatorTree>();
AU.addPreserved<ProfileInfo>();
}
@@ -76,10 +107,9 @@ namespace {
bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
void EliminateMostlyEmptyBlock(BasicBlock *BB);
bool OptimizeBlock(BasicBlock &BB);
- bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy,
- DenseMap<Value*,Value*> &SunkAddrs);
- bool OptimizeInlineAsmInst(Instruction *I, CallSite CS,
- DenseMap<Value*,Value*> &SunkAddrs);
+ bool OptimizeInst(Instruction *I);
+ bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy);
+ bool OptimizeInlineAsmInst(CallInst *CS);
bool OptimizeCallInst(CallInst *CI);
bool MoveExtToFormExtLoad(Instruction *I);
bool OptimizeExtUses(Instruction *I);
@@ -89,7 +119,7 @@ namespace {
char CodeGenPrepare::ID = 0;
INITIALIZE_PASS(CodeGenPrepare, "codegenprepare",
- "Optimize for code generation", false, false);
+ "Optimize for code generation", false, false)
FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
return new CodeGenPrepare(TLI);
@@ -108,13 +138,16 @@ void CodeGenPrepare::findLoopBackEdges(const Function &F) {
bool CodeGenPrepare::runOnFunction(Function &F) {
bool EverMadeChange = false;
+ DT = getAnalysisIfAvailable<DominatorTree>();
PFI = getAnalysisIfAvailable<ProfileInfo>();
// First pass, eliminate blocks that contain only PHI nodes and an
// unconditional branch.
EverMadeChange |= EliminateMostlyEmptyBlocks(F);
- // Now find loop back edges.
- findLoopBackEdges(F);
+ // Now find loop back edges, but only if they are being used to decide which
+ // critical edges to split.
+ if (CriticalEdgeSplit)
+ findLoopBackEdges(F);
bool MadeChange = true;
while (MadeChange) {
@@ -123,6 +156,9 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
MadeChange |= OptimizeBlock(*BB);
EverMadeChange |= MadeChange;
}
+
+ SunkAddrs.clear();
+
return EverMadeChange;
}
@@ -297,11 +333,19 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
// The PHIs are now updated, change everything that refers to BB to use
// DestBB and remove BB.
BB->replaceAllUsesWith(DestBB);
+ if (DT) {
+ BasicBlock *BBIDom = DT->getNode(BB)->getIDom()->getBlock();
+ BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock();
+ BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom);
+ DT->changeImmediateDominator(DestBB, NewIDom);
+ DT->eraseNode(BB);
+ }
if (PFI) {
PFI->replaceAllUses(BB, DestBB);
PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB));
}
BB->eraseFromParent();
+ ++NumBlocksElim;
DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
}
@@ -480,6 +524,7 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
// Replace a use of the cast with a use of the new cast.
TheUse = InsertedCast;
+ ++NumCastUses;
}
// If we removed all uses, nuke the cast.
@@ -537,6 +582,7 @@ static bool OptimizeCmpExpression(CmpInst *CI) {
// Replace a use of the cmp with a use of the new cmp.
TheUse = InsertedCmp;
+ ++NumCmpUses;
}
// If we removed all uses, nuke the cmp.
@@ -563,14 +609,45 @@ protected:
} // end anonymous namespace
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
+ BasicBlock *BB = CI->getParent();
+
+ // Lower inline assembly if we can.
+ // If we found an inline asm expession, and if the target knows how to
+ // lower it to normal LLVM code, do so now.
+ if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
+ if (TLI->ExpandInlineAsm(CI)) {
+ // Avoid invalidating the iterator.
+ CurInstIterator = BB->begin();
+ // Avoid processing instructions out of order, which could cause
+ // reuse before a value is defined.
+ SunkAddrs.clear();
+ return true;
+ }
+ // Sink address computing for memory operands into the block.
+ if (OptimizeInlineAsmInst(CI))
+ return true;
+ }
+
// Lower all uses of llvm.objectsize.*
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
const Type *ReturnTy = CI->getType();
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
- CI->replaceAllUsesWith(RetVal);
- CI->eraseFromParent();
+
+ // Substituting this can cause recursive simplifications, which can
+ // invalidate our iterator. Use a WeakVH to hold onto it in case this
+ // happens.
+ WeakVH IterHandle(CurInstIterator);
+
+ ReplaceAndSimplifyAllUses(CI, RetVal, TLI ? TLI->getTargetData() : 0, DT);
+
+ // If the iterator instruction was recursively deleted, start over at the
+ // start of the block.
+ if (IterHandle != CurInstIterator) {
+ CurInstIterator = BB->begin();
+ SunkAddrs.clear();
+ }
return true;
}
@@ -588,6 +665,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
CodeGenPrepareFortifiedLibCalls Simplifier;
return Simplifier.fold(CI, TD);
}
+
//===----------------------------------------------------------------------===//
// Memory Optimization
//===----------------------------------------------------------------------===//
@@ -610,13 +688,69 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
/// This method is used to optimize both load/store and inline asms with memory
/// operands.
bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
- const Type *AccessTy,
- DenseMap<Value*,Value*> &SunkAddrs) {
- // Figure out what addressing mode will be built up for this operation.
+ const Type *AccessTy) {
+ Value *Repl = Addr;
+
+ // Try to collapse single-value PHI nodes. This is necessary to undo
+ // unprofitable PRE transformations.
+ SmallVector<Value*, 8> worklist;
+ SmallPtrSet<Value*, 16> Visited;
+ worklist.push_back(Addr);
+
+ // Use a worklist to iteratively look through PHI nodes, and ensure that
+ // the addressing mode obtained from the non-PHI roots of the graph
+ // are equivalent.
+ Value *Consensus = 0;
+ unsigned NumUses = 0;
SmallVector<Instruction*, 16> AddrModeInsts;
- ExtAddrMode AddrMode = AddressingModeMatcher::Match(Addr, AccessTy,MemoryInst,
- AddrModeInsts, *TLI);
-
+ ExtAddrMode AddrMode;
+ while (!worklist.empty()) {
+ Value *V = worklist.back();
+ worklist.pop_back();
+
+ // Break use-def graph loops.
+ if (Visited.count(V)) {
+ Consensus = 0;
+ break;
+ }
+
+ Visited.insert(V);
+
+ // For a PHI node, push all of its incoming values.
+ if (PHINode *P = dyn_cast<PHINode>(V)) {
+ for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i)
+ worklist.push_back(P->getIncomingValue(i));
+ continue;
+ }
+
+ // For non-PHIs, determine the addressing mode being computed.
+ SmallVector<Instruction*, 16> NewAddrModeInsts;
+ ExtAddrMode NewAddrMode =
+ AddressingModeMatcher::Match(V, AccessTy,MemoryInst,
+ NewAddrModeInsts, *TLI);
+
+ // Ensure that the obtained addressing mode is equivalent to that obtained
+ // for all other roots of the PHI traversal. Also, when choosing one
+ // such root as representative, select the one with the most uses in order
+ // to keep the cost modeling heuristics in AddressingModeMatcher applicable.
+ if (!Consensus || NewAddrMode == AddrMode) {
+ if (V->getNumUses() > NumUses) {
+ Consensus = V;
+ NumUses = V->getNumUses();
+ AddrMode = NewAddrMode;
+ AddrModeInsts = NewAddrModeInsts;
+ }
+ continue;
+ }
+
+ Consensus = 0;
+ break;
+ }
+
+ // If the addressing mode couldn't be determined, or if multiple different
+ // ones were determined, bail out now.
+ if (!Consensus) return false;
+
// Check to see if any of the instructions supersumed by this addr mode are
// non-local to I's BB.
bool AnyNonLocal = false;
@@ -719,60 +853,39 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt);
}
- MemoryInst->replaceUsesOfWith(Addr, SunkAddr);
+ MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
- if (Addr->use_empty()) {
- RecursivelyDeleteTriviallyDeadInstructions(Addr);
+ if (Repl->use_empty()) {
+ RecursivelyDeleteTriviallyDeadInstructions(Repl);
// This address is now available for reassignment, so erase the table entry;
// we don't want to match some completely different instruction.
SunkAddrs[Addr] = 0;
}
+ ++NumMemoryInsts;
return true;
}
/// OptimizeInlineAsmInst - If there are any memory operands, use
/// OptimizeMemoryInst to sink their address computing into the block when
/// possible / profitable.
-bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
- DenseMap<Value*,Value*> &SunkAddrs) {
+bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
bool MadeChange = false;
- InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
-
- // Do a prepass over the constraints, canonicalizing them, and building up the
- // ConstraintOperands list.
- std::vector<InlineAsm::ConstraintInfo>
- ConstraintInfos = IA->ParseConstraints();
-
- /// ConstraintOperands - Information about all of the constraints.
- std::vector<TargetLowering::AsmOperandInfo> ConstraintOperands;
- unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
- for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
- ConstraintOperands.
- push_back(TargetLowering::AsmOperandInfo(ConstraintInfos[i]));
- TargetLowering::AsmOperandInfo &OpInfo = ConstraintOperands.back();
-
- // Compute the value type for each operand.
- switch (OpInfo.Type) {
- case InlineAsm::isOutput:
- if (OpInfo.isIndirect)
- OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
- break;
- case InlineAsm::isInput:
- OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
- break;
- case InlineAsm::isClobber:
- // Nothing to do.
- break;
- }
+ TargetLowering::AsmOperandInfoVector
+ TargetConstraints = TLI->ParseConstraints(CS);
+ unsigned ArgNo = 0;
+ for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
+ TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
+
// Compute the constraint code and ConstraintType to use.
TLI->ComputeConstraintToUse(OpInfo, SDValue());
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
OpInfo.isIndirect) {
- Value *OpVal = OpInfo.CallOperandVal;
- MadeChange |= OptimizeMemoryInst(I, OpVal, OpVal->getType(), SunkAddrs);
- }
+ Value *OpVal = CS->getArgOperand(ArgNo++);
+ MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType());
+ } else if (OpInfo.Type == InlineAsm::isInput)
+ ArgNo++;
}
return MadeChange;
@@ -794,7 +907,9 @@ bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) {
// If the load has other users and the truncate is not free, this probably
// isn't worthwhile.
if (!LI->hasOneUse() &&
- TLI && !TLI->isTruncateFree(I->getType(), LI->getType()))
+ TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) ||
+ !TLI->isTypeLegal(TLI->getValueType(I->getType()))) &&
+ !TLI->isTruncateFree(I->getType(), LI->getType()))
return false;
// Check whether the target supports casts folded into loads.
@@ -812,13 +927,14 @@ bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) {
// can fold it.
I->removeFromParent();
I->insertAfter(LI);
+ ++NumExtsMoved;
return true;
}
bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
BasicBlock *DefBB = I->getParent();
- // If both result of the {s|z}xt and its source are live out, rewrite all
+ // If the result of a {s|z}ext and its source are both live out, rewrite all
// other uses of the source with result of extension.
Value *Src = I->getOperand(0);
if (Src->hasOneUse())
@@ -883,13 +999,83 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
// Replace a use of the {s|z}ext source with a use of the result.
TheUse = InsertedTrunc;
-
+ ++NumExtUses;
MadeChange = true;
}
return MadeChange;
}
+bool CodeGenPrepare::OptimizeInst(Instruction *I) {
+ if (PHINode *P = dyn_cast<PHINode>(I)) {
+ // It is possible for very late stage optimizations (such as SimplifyCFG)
+ // to introduce PHI nodes too late to be cleaned up. If we detect such a
+ // trivial PHI, go ahead and zap it here.
+ if (Value *V = SimplifyInstruction(P)) {
+ P->replaceAllUsesWith(V);
+ P->eraseFromParent();
+ ++NumPHIsElim;
+ return true;
+ }
+ return false;
+ }
+
+ if (CastInst *CI = dyn_cast<CastInst>(I)) {
+ // If the source of the cast is a constant, then this should have
+ // already been constant folded. The only reason NOT to constant fold
+ // it is if something (e.g. LSR) was careful to place the constant
+ // evaluation in a block other than then one that uses it (e.g. to hoist
+ // the address of globals out of a loop). If this is the case, we don't
+ // want to forward-subst the cast.
+ if (isa<Constant>(CI->getOperand(0)))
+ return false;
+
+ if (TLI && OptimizeNoopCopyExpression(CI, *TLI))
+ return true;
+
+ if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
+ bool MadeChange = MoveExtToFormExtLoad(I);
+ return MadeChange | OptimizeExtUses(I);
+ }
+ return false;
+ }
+
+ if (CmpInst *CI = dyn_cast<CmpInst>(I))
+ return OptimizeCmpExpression(CI);
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (TLI)
+ return OptimizeMemoryInst(I, I->getOperand(0), LI->getType());
+ return false;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ if (TLI)
+ return OptimizeMemoryInst(I, SI->getOperand(1),
+ SI->getOperand(0)->getType());
+ return false;
+ }
+
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
+ if (GEPI->hasAllZeroIndices()) {
+ /// The GEP operand must be a pointer, so must its result -> BitCast
+ Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
+ GEPI->getName(), GEPI);
+ GEPI->replaceAllUsesWith(NC);
+ GEPI->eraseFromParent();
+ ++NumGEPsElim;
+ OptimizeInst(NC);
+ return true;
+ }
+ return false;
+ }
+
+ if (CallInst *CI = dyn_cast<CallInst>(I))
+ return OptimizeCallInst(CI);
+
+ return false;
+}
+
// In this pass we look for GEP and cast instructions that are used
// across basic blocks and rewrite them to improve basic-block-at-a-time
// selection.
@@ -908,74 +1094,11 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
}
}
- // Keep track of non-local addresses that have been sunk into this block.
- // This allows us to avoid inserting duplicate code for blocks with multiple
- // load/stores of the same address.
- DenseMap<Value*, Value*> SunkAddrs;
-
- for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) {
- Instruction *I = BBI++;
+ SunkAddrs.clear();
- if (CastInst *CI = dyn_cast<CastInst>(I)) {
- // If the source of the cast is a constant, then this should have
- // already been constant folded. The only reason NOT to constant fold
- // it is if something (e.g. LSR) was careful to place the constant
- // evaluation in a block other than then one that uses it (e.g. to hoist
- // the address of globals out of a loop). If this is the case, we don't
- // want to forward-subst the cast.
- if (isa<Constant>(CI->getOperand(0)))
- continue;
-
- bool Change = false;
- if (TLI) {
- Change = OptimizeNoopCopyExpression(CI, *TLI);
- MadeChange |= Change;
- }
-
- if (!Change && (isa<ZExtInst>(I) || isa<SExtInst>(I))) {
- MadeChange |= MoveExtToFormExtLoad(I);
- MadeChange |= OptimizeExtUses(I);
- }
- } else if (CmpInst *CI = dyn_cast<CmpInst>(I)) {
- MadeChange |= OptimizeCmpExpression(CI);
- } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- if (TLI)
- MadeChange |= OptimizeMemoryInst(I, I->getOperand(0), LI->getType(),
- SunkAddrs);
- } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
- if (TLI)
- MadeChange |= OptimizeMemoryInst(I, SI->getOperand(1),
- SI->getOperand(0)->getType(),
- SunkAddrs);
- } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
- if (GEPI->hasAllZeroIndices()) {
- /// The GEP operand must be a pointer, so must its result -> BitCast
- Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
- GEPI->getName(), GEPI);
- GEPI->replaceAllUsesWith(NC);
- GEPI->eraseFromParent();
- MadeChange = true;
- BBI = NC;
- }
- } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
- // If we found an inline asm expession, and if the target knows how to
- // lower it to normal LLVM code, do so now.
- if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
- if (TLI->ExpandInlineAsm(CI)) {
- BBI = BB.begin();
- // Avoid processing instructions out of order, which could cause
- // reuse before a value is defined.
- SunkAddrs.clear();
- } else
- // Sink address computing for memory operands into the block.
- MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
- } else {
- // Other CallInst optimizations that don't need to muck with the
- // enclosing iterator here.
- MadeChange |= OptimizeCallInst(CI);
- }
- }
- }
+ CurInstIterator = BB.begin();
+ for (BasicBlock::iterator E = BB.end(); CurInstIterator != E; )
+ MadeChange |= OptimizeInst(CurInstIterator++);
return MadeChange;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
index a0ea369..664c3f6 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
@@ -34,7 +34,9 @@ STATISTIC(NumInstKilled, "Number of instructions killed");
namespace {
struct ConstantPropagation : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- ConstantPropagation() : FunctionPass(ID) {}
+ ConstantPropagation() : FunctionPass(ID) {
+ initializeConstantPropagationPass(*PassRegistry::getPassRegistry());
+ }
bool runOnFunction(Function &F);
@@ -46,7 +48,7 @@ namespace {
char ConstantPropagation::ID = 0;
INITIALIZE_PASS(ConstantPropagation, "constprop",
- "Simple constant propagation", false, false);
+ "Simple constant propagation", false, false)
FunctionPass *llvm::createConstantPropagationPass() {
return new ConstantPropagation();
diff --git a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 0d4e45d..be12973 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -16,6 +16,7 @@
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/Pass.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/Support/CFG.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -30,18 +31,20 @@ STATISTIC(NumCmps, "Number of comparisons propagated");
namespace {
class CorrelatedValuePropagation : public FunctionPass {
LazyValueInfo *LVI;
-
+
bool processSelect(SelectInst *SI);
bool processPHI(PHINode *P);
bool processMemAccess(Instruction *I);
bool processCmp(CmpInst *C);
-
+
public:
static char ID;
- CorrelatedValuePropagation(): FunctionPass(ID) { }
-
+ CorrelatedValuePropagation(): FunctionPass(ID) {
+ initializeCorrelatedValuePropagationPass(*PassRegistry::getPassRegistry());
+ }
+
bool runOnFunction(Function &F);
-
+
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<LazyValueInfo>();
}
@@ -49,8 +52,11 @@ namespace {
}
char CorrelatedValuePropagation::ID = 0;
-INITIALIZE_PASS(CorrelatedValuePropagation, "correlated-propagation",
- "Value Propagation", false, false);
+INITIALIZE_PASS_BEGIN(CorrelatedValuePropagation, "correlated-propagation",
+ "Value Propagation", false, false)
+INITIALIZE_PASS_DEPENDENCY(LazyValueInfo)
+INITIALIZE_PASS_END(CorrelatedValuePropagation, "correlated-propagation",
+ "Value Propagation", false, false)
// Public interface to the Value Propagation pass
Pass *llvm::createCorrelatedValuePropagationPass() {
@@ -60,46 +66,51 @@ Pass *llvm::createCorrelatedValuePropagationPass() {
bool CorrelatedValuePropagation::processSelect(SelectInst *S) {
if (S->getType()->isVectorTy()) return false;
if (isa<Constant>(S->getOperand(0))) return false;
-
+
Constant *C = LVI->getConstant(S->getOperand(0), S->getParent());
if (!C) return false;
-
+
ConstantInt *CI = dyn_cast<ConstantInt>(C);
if (!CI) return false;
-
- S->replaceAllUsesWith(S->getOperand(CI->isOne() ? 1 : 2));
+
+ Value *ReplaceWith = S->getOperand(1);
+ Value *Other = S->getOperand(2);
+ if (!CI->isOne()) std::swap(ReplaceWith, Other);
+ if (ReplaceWith == S) ReplaceWith = UndefValue::get(S->getType());
+
+ S->replaceAllUsesWith(ReplaceWith);
S->eraseFromParent();
++NumSelects;
-
+
return true;
}
bool CorrelatedValuePropagation::processPHI(PHINode *P) {
bool Changed = false;
-
+
BasicBlock *BB = P->getParent();
for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
Value *Incoming = P->getIncomingValue(i);
if (isa<Constant>(Incoming)) continue;
-
+
Constant *C = LVI->getConstantOnEdge(P->getIncomingValue(i),
P->getIncomingBlock(i),
BB);
if (!C) continue;
-
+
P->setIncomingValue(i, C);
Changed = true;
}
-
- if (Value *ConstVal = P->hasConstantValue()) {
- P->replaceAllUsesWith(ConstVal);
+
+ if (Value *V = SimplifyInstruction(P)) {
+ P->replaceAllUsesWith(V);
P->eraseFromParent();
Changed = true;
}
-
+
++NumPhis;
-
+
return Changed;
}
@@ -109,12 +120,12 @@ bool CorrelatedValuePropagation::processMemAccess(Instruction *I) {
Pointer = L->getPointerOperand();
else
Pointer = cast<StoreInst>(I)->getPointerOperand();
-
+
if (isa<Constant>(Pointer)) return false;
-
+
Constant *C = LVI->getConstant(Pointer, I->getParent());
if (!C) return false;
-
+
++NumMemAccess;
I->replaceUsesOfWith(Pointer, C);
return true;
@@ -130,32 +141,32 @@ bool CorrelatedValuePropagation::processCmp(CmpInst *C) {
if (isa<Instruction>(Op0) &&
cast<Instruction>(Op0)->getParent() == C->getParent())
return false;
-
+
Constant *Op1 = dyn_cast<Constant>(C->getOperand(1));
if (!Op1) return false;
-
+
pred_iterator PI = pred_begin(C->getParent()), PE = pred_end(C->getParent());
if (PI == PE) return false;
-
- LazyValueInfo::Tristate Result = LVI->getPredicateOnEdge(C->getPredicate(),
+
+ LazyValueInfo::Tristate Result = LVI->getPredicateOnEdge(C->getPredicate(),
C->getOperand(0), Op1, *PI, C->getParent());
if (Result == LazyValueInfo::Unknown) return false;
++PI;
while (PI != PE) {
- LazyValueInfo::Tristate Res = LVI->getPredicateOnEdge(C->getPredicate(),
+ LazyValueInfo::Tristate Res = LVI->getPredicateOnEdge(C->getPredicate(),
C->getOperand(0), Op1, *PI, C->getParent());
if (Res != Result) return false;
++PI;
}
-
+
++NumCmps;
-
+
if (Result == LazyValueInfo::True)
C->replaceAllUsesWith(ConstantInt::getTrue(C->getContext()));
else
C->replaceAllUsesWith(ConstantInt::getFalse(C->getContext()));
-
+
C->eraseFromParent();
return true;
@@ -163,9 +174,9 @@ bool CorrelatedValuePropagation::processCmp(CmpInst *C) {
bool CorrelatedValuePropagation::runOnFunction(Function &F) {
LVI = &getAnalysis<LazyValueInfo>();
-
+
bool FnChanged = false;
-
+
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
bool BBChanged = false;
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ) {
@@ -187,14 +198,9 @@ bool CorrelatedValuePropagation::runOnFunction(Function &F) {
break;
}
}
-
- // Propagating correlated values might leave cruft around.
- // Try to clean it up before we continue.
- if (BBChanged)
- SimplifyInstructionsInBlock(FI);
-
+
FnChanged |= BBChanged;
}
-
+
return FnChanged;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/DCE.cpp b/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
index 87ea803..dbb68f3 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DCE.cpp
@@ -35,7 +35,9 @@ namespace {
//
struct DeadInstElimination : public BasicBlockPass {
static char ID; // Pass identification, replacement for typeid
- DeadInstElimination() : BasicBlockPass(ID) {}
+ DeadInstElimination() : BasicBlockPass(ID) {
+ initializeDeadInstEliminationPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnBasicBlock(BasicBlock &BB) {
bool Changed = false;
for (BasicBlock::iterator DI = BB.begin(); DI != BB.end(); ) {
@@ -57,7 +59,7 @@ namespace {
char DeadInstElimination::ID = 0;
INITIALIZE_PASS(DeadInstElimination, "die",
- "Dead Instruction Elimination", false, false);
+ "Dead Instruction Elimination", false, false)
Pass *llvm::createDeadInstEliminationPass() {
return new DeadInstElimination();
@@ -70,7 +72,9 @@ namespace {
//
struct DCE : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- DCE() : FunctionPass(ID) {}
+ DCE() : FunctionPass(ID) {
+ initializeDCEPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
@@ -81,7 +85,7 @@ namespace {
}
char DCE::ID = 0;
-INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false);
+INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false)
bool DCE::runOnFunction(Function &F) {
// Start out with all of the instructions in the worklist...
diff --git a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index c8fd9d9..867a06a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -19,17 +19,20 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Pass.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
using namespace llvm;
STATISTIC(NumFastStores, "Number of stores deleted");
@@ -37,58 +40,107 @@ STATISTIC(NumFastOther , "Number of other instrs removed");
namespace {
struct DSE : public FunctionPass {
- TargetData *TD;
+ AliasAnalysis *AA;
+ MemoryDependenceAnalysis *MD;
static char ID; // Pass identification, replacement for typeid
- DSE() : FunctionPass(ID) {}
+ DSE() : FunctionPass(ID), AA(0), MD(0) {
+ initializeDSEPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F) {
- bool Changed = false;
-
+ AA = &getAnalysis<AliasAnalysis>();
+ MD = &getAnalysis<MemoryDependenceAnalysis>();
DominatorTree &DT = getAnalysis<DominatorTree>();
+ bool Changed = false;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
// Only check non-dead blocks. Dead blocks may have strange pointer
// cycles that will confuse alias analysis.
if (DT.isReachableFromEntry(I))
Changed |= runOnBasicBlock(*I);
+
+ AA = 0; MD = 0;
return Changed;
}
bool runOnBasicBlock(BasicBlock &BB);
- bool handleFreeWithNonTrivialDependency(const CallInst *F,
- MemDepResult Dep);
+ bool HandleFree(CallInst *F);
bool handleEndBlock(BasicBlock &BB);
- bool RemoveUndeadPointers(Value *Ptr, uint64_t killPointerSize,
- BasicBlock::iterator &BBI,
- SmallPtrSet<Value*, 64> &deadPointers);
- void DeleteDeadInstruction(Instruction *I,
- SmallPtrSet<Value*, 64> *deadPointers = 0);
-
+ void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
+ SmallPtrSet<Value*, 16> &DeadStackObjects);
- // getAnalysisUsage - We require post dominance frontiers (aka Control
- // Dependence Graph)
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<DominatorTree>();
AU.addRequired<AliasAnalysis>();
AU.addRequired<MemoryDependenceAnalysis>();
+ AU.addPreserved<AliasAnalysis>();
AU.addPreserved<DominatorTree>();
AU.addPreserved<MemoryDependenceAnalysis>();
}
-
- unsigned getPointerSize(Value *V) const;
};
}
char DSE::ID = 0;
-INITIALIZE_PASS(DSE, "dse", "Dead Store Elimination", false, false);
+INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
-/// doesClobberMemory - Does this instruction clobber (write without reading)
-/// some memory?
-static bool doesClobberMemory(Instruction *I) {
+//===----------------------------------------------------------------------===//
+// Helper functions
+//===----------------------------------------------------------------------===//
+
+/// DeleteDeadInstruction - Delete this instruction. Before we do, go through
+/// and zero out all the operands of this instruction. If any of them become
+/// dead, delete them and the computation tree that feeds them.
+///
+/// If ValueSet is non-null, remove any deleted instructions from it as well.
+///
+static void DeleteDeadInstruction(Instruction *I,
+ MemoryDependenceAnalysis &MD,
+ SmallPtrSet<Value*, 16> *ValueSet = 0) {
+ SmallVector<Instruction*, 32> NowDeadInsts;
+
+ NowDeadInsts.push_back(I);
+ --NumFastOther;
+
+ // Before we touch this instruction, remove it from memdep!
+ do {
+ Instruction *DeadInst = NowDeadInsts.pop_back_val();
+ ++NumFastOther;
+
+ // This instruction is dead, zap it, in stages. Start by removing it from
+ // MemDep, which needs to know the operands and needs it to be in the
+ // function.
+ MD.removeInstruction(DeadInst);
+
+ for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
+ Value *Op = DeadInst->getOperand(op);
+ DeadInst->setOperand(op, 0);
+
+ // If this operand just became dead, add it to the NowDeadInsts list.
+ if (!Op->use_empty()) continue;
+
+ if (Instruction *OpI = dyn_cast<Instruction>(Op))
+ if (isInstructionTriviallyDead(OpI))
+ NowDeadInsts.push_back(OpI);
+ }
+
+ DeadInst->eraseFromParent();
+
+ if (ValueSet) ValueSet->erase(DeadInst);
+ } while (!NowDeadInsts.empty());
+}
+
+
+/// hasMemoryWrite - Does this instruction write some memory? This only returns
+/// true for things that we can analyze with other helpers below.
+static bool hasMemoryWrite(Instruction *I) {
if (isa<StoreInst>(I))
return true;
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
@@ -106,146 +158,296 @@ static bool doesClobberMemory(Instruction *I) {
return false;
}
-/// isElidable - If the value of this instruction and the memory it writes to is
-/// unused, may we delete this instrtction?
-static bool isElidable(Instruction *I) {
- assert(doesClobberMemory(I));
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
- return II->getIntrinsicID() != Intrinsic::lifetime_end;
+/// getLocForWrite - Return a Location stored to by the specified instruction.
+static AliasAnalysis::Location
+getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ return AA.getLocation(SI);
+
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
+ // memcpy/memmove/memset.
+ AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
+ // If we don't have target data around, an unknown size in Location means
+ // that we should use the size of the pointee type. This isn't valid for
+ // memset/memcpy, which writes more than an i8.
+ if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0)
+ return AliasAnalysis::Location();
+ return Loc;
+ }
+
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
+ if (II == 0) return AliasAnalysis::Location();
+
+ switch (II->getIntrinsicID()) {
+ default: return AliasAnalysis::Location(); // Unhandled intrinsic.
+ case Intrinsic::init_trampoline:
+ // If we don't have target data around, an unknown size in Location means
+ // that we should use the size of the pointee type. This isn't valid for
+ // init.trampoline, which writes more than an i8.
+ if (AA.getTargetData() == 0) return AliasAnalysis::Location();
+
+ // FIXME: We don't know the size of the trampoline, so we can't really
+ // handle it here.
+ return AliasAnalysis::Location(II->getArgOperand(0));
+ case Intrinsic::lifetime_end: {
+ uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
+ return AliasAnalysis::Location(II->getArgOperand(1), Len);
+ }
+ }
+}
+
+/// getLocForRead - Return the location read by the specified "hasMemoryWrite"
+/// instruction if any.
+static AliasAnalysis::Location
+getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
+ assert(hasMemoryWrite(Inst) && "Unknown instruction case");
+
+ // The only instructions that both read and write are the mem transfer
+ // instructions (memcpy/memmove).
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
+ return AA.getLocationForSource(MTI);
+ return AliasAnalysis::Location();
+}
+
+
+/// isRemovable - If the value of this instruction and the memory it writes to
+/// is unused, may we delete this instruction?
+static bool isRemovable(Instruction *I) {
+ // Don't remove volatile stores.
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return !SI->isVolatile();
- return true;
+
+ IntrinsicInst *II = cast<IntrinsicInst>(I);
+ switch (II->getIntrinsicID()) {
+ default: assert(0 && "doesn't pass 'hasMemoryWrite' predicate");
+ case Intrinsic::lifetime_end:
+ // Never remove dead lifetime_end's, e.g. because it is followed by a
+ // free.
+ return false;
+ case Intrinsic::init_trampoline:
+ // Always safe to remove init_trampoline.
+ return true;
+
+ case Intrinsic::memset:
+ case Intrinsic::memmove:
+ case Intrinsic::memcpy:
+ // Don't remove volatile memory intrinsics.
+ return !cast<MemIntrinsic>(II)->isVolatile();
+ }
}
-/// getPointerOperand - Return the pointer that is being clobbered.
-static Value *getPointerOperand(Instruction *I) {
- assert(doesClobberMemory(I));
+/// getStoredPointerOperand - Return the pointer that is being written to.
+static Value *getStoredPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
- return MI->getArgOperand(0);
+ return MI->getDest();
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
default: assert(false && "Unexpected intrinsic!");
case Intrinsic::init_trampoline:
return II->getArgOperand(0);
- case Intrinsic::lifetime_end:
- return II->getArgOperand(1);
}
}
-/// getStoreSize - Return the length in bytes of the write by the clobbering
-/// instruction. If variable or unknown, returns -1.
-static unsigned getStoreSize(Instruction *I, const TargetData *TD) {
- assert(doesClobberMemory(I));
- if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
- if (!TD) return -1u;
- return TD->getTypeStoreSize(SI->getOperand(0)->getType());
+static uint64_t getPointerSize(Value *V, AliasAnalysis &AA) {
+ const TargetData *TD = AA.getTargetData();
+ if (TD == 0)
+ return AliasAnalysis::UnknownSize;
+
+ if (AllocaInst *A = dyn_cast<AllocaInst>(V)) {
+ // Get size information for the alloca
+ if (ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize()))
+ return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType());
+ return AliasAnalysis::UnknownSize;
}
+
+ assert(isa<Argument>(V) && "Expected AllocaInst or Argument!");
+ const PointerType *PT = cast<PointerType>(V->getType());
+ return TD->getTypeAllocSize(PT->getElementType());
+}
- Value *Len;
- if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
- Len = MI->getLength();
- } else {
- IntrinsicInst *II = cast<IntrinsicInst>(I);
- switch (II->getIntrinsicID()) {
- default: assert(false && "Unexpected intrinsic!");
- case Intrinsic::init_trampoline:
- return -1u;
- case Intrinsic::lifetime_end:
- Len = II->getArgOperand(0);
- break;
+/// isObjectPointerWithTrustworthySize - Return true if the specified Value* is
+/// pointing to an object with a pointer size we can trust.
+static bool isObjectPointerWithTrustworthySize(const Value *V) {
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
+ return !AI->isArrayAllocation();
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ return !GV->mayBeOverridden();
+ if (const Argument *A = dyn_cast<Argument>(V))
+ return A->hasByValAttr();
+ return false;
+}
+
+/// isCompleteOverwrite - Return true if a store to the 'Later' location
+/// completely overwrites a store to the 'Earlier' location.
+static bool isCompleteOverwrite(const AliasAnalysis::Location &Later,
+ const AliasAnalysis::Location &Earlier,
+ AliasAnalysis &AA) {
+ const Value *P1 = Earlier.Ptr->stripPointerCasts();
+ const Value *P2 = Later.Ptr->stripPointerCasts();
+
+ // If the start pointers are the same, we just have to compare sizes to see if
+ // the later store was larger than the earlier store.
+ if (P1 == P2) {
+ // If we don't know the sizes of either access, then we can't do a
+ // comparison.
+ if (Later.Size == AliasAnalysis::UnknownSize ||
+ Earlier.Size == AliasAnalysis::UnknownSize) {
+ // If we have no TargetData information around, then the size of the store
+ // is inferrable from the pointee type. If they are the same type, then
+ // we know that the store is safe.
+ if (AA.getTargetData() == 0)
+ return Later.Ptr->getType() == Earlier.Ptr->getType();
+ return false;
}
+
+ // Make sure that the Later size is >= the Earlier size.
+ if (Later.Size < Earlier.Size)
+ return false;
+ return true;
}
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(Len))
- if (!LenCI->isAllOnesValue())
- return LenCI->getZExtValue();
- return -1u;
+
+ // Otherwise, we have to have size information, and the later store has to be
+ // larger than the earlier one.
+ if (Later.Size == AliasAnalysis::UnknownSize ||
+ Earlier.Size == AliasAnalysis::UnknownSize ||
+ Later.Size <= Earlier.Size || AA.getTargetData() == 0)
+ return false;
+
+ // Check to see if the later store is to the entire object (either a global,
+ // an alloca, or a byval argument). If so, then it clearly overwrites any
+ // other store to the same object.
+ const TargetData &TD = *AA.getTargetData();
+
+ const Value *UO1 = GetUnderlyingObject(P1, &TD),
+ *UO2 = GetUnderlyingObject(P2, &TD);
+
+ // If we can't resolve the same pointers to the same object, then we can't
+ // analyze them at all.
+ if (UO1 != UO2)
+ return false;
+
+ // If the "Later" store is to a recognizable object, get its size.
+ if (isObjectPointerWithTrustworthySize(UO2)) {
+ uint64_t ObjectSize =
+ TD.getTypeAllocSize(cast<PointerType>(UO2->getType())->getElementType());
+ if (ObjectSize == Later.Size)
+ return true;
+ }
+
+ // Okay, we have stores to two completely different pointers. Try to
+ // decompose the pointer into a "base + constant_offset" form. If the base
+ // pointers are equal, then we can reason about the two stores.
+ int64_t Off1 = 0, Off2 = 0;
+ const Value *BP1 = GetPointerBaseWithConstantOffset(P1, Off1, TD);
+ const Value *BP2 = GetPointerBaseWithConstantOffset(P2, Off2, TD);
+
+ // If the base pointers still differ, we have two completely different stores.
+ if (BP1 != BP2)
+ return false;
+
+ // Otherwise, we might have a situation like:
+ // store i16 -> P + 1 Byte
+ // store i32 -> P
+ // In this case, we see if the later store completely overlaps all bytes
+ // stored by the previous store.
+ if (Off1 < Off2 || // Earlier starts before Later.
+ Off1+Earlier.Size > Off2+Later.Size) // Earlier goes beyond Later.
+ return false;
+ // Otherwise, we have complete overlap.
+ return true;
}
-/// isStoreAtLeastAsWideAs - Return true if the size of the store in I1 is
-/// greater than or equal to the store in I2. This returns false if we don't
-/// know.
+/// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
+/// memory region into an identical pointer) then it doesn't actually make its
+/// input dead in the traditional sense. Consider this case:
+///
+/// memcpy(A <- B)
+/// memcpy(A <- A)
+///
+/// In this case, the second store to A does not make the first store to A dead.
+/// The usual situation isn't an explicit A<-A store like this (which can be
+/// trivially removed) but a case where two pointers may alias.
///
-static bool isStoreAtLeastAsWideAs(Instruction *I1, Instruction *I2,
- const TargetData *TD) {
- const Type *I1Ty = getPointerOperand(I1)->getType();
- const Type *I2Ty = getPointerOperand(I2)->getType();
+/// This function detects when it is unsafe to remove a dependent instruction
+/// because the DSE inducing instruction may be a self-read.
+static bool isPossibleSelfRead(Instruction *Inst,
+ const AliasAnalysis::Location &InstStoreLoc,
+ Instruction *DepWrite, AliasAnalysis &AA) {
+ // Self reads can only happen for instructions that read memory. Get the
+ // location read.
+ AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
+ if (InstReadLoc.Ptr == 0) return false; // Not a reading instruction.
- // Exactly the same type, must have exactly the same size.
- if (I1Ty == I2Ty) return true;
+ // If the read and written loc obviously don't alias, it isn't a read.
+ if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
- int I1Size = getStoreSize(I1, TD);
- int I2Size = getStoreSize(I2, TD);
+ // Okay, 'Inst' may copy over itself. However, we can still remove a the
+ // DepWrite instruction if we can prove that it reads from the same location
+ // as Inst. This handles useful cases like:
+ // memcpy(A <- B)
+ // memcpy(A <- B)
+ // Here we don't know if A/B may alias, but we do know that B/B are must
+ // aliases, so removing the first memcpy is safe (assuming it writes <= #
+ // bytes as the second one.
+ AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
- return I1Size != -1 && I2Size != -1 && I1Size >= I2Size;
+ if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
+ return false;
+
+ // If DepWrite doesn't read memory or if we can't prove it is a must alias,
+ // then it can't be considered dead.
+ return true;
}
-bool DSE::runOnBasicBlock(BasicBlock &BB) {
- MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
- TD = getAnalysisIfAvailable<TargetData>();
+//===----------------------------------------------------------------------===//
+// DSE Pass
+//===----------------------------------------------------------------------===//
+
+bool DSE::runOnBasicBlock(BasicBlock &BB) {
bool MadeChange = false;
// Do a top-down walk on the BB.
for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
Instruction *Inst = BBI++;
- // If we find a store or a free, get its memory dependence.
- if (!doesClobberMemory(Inst) && !isFreeCall(Inst))
- continue;
-
- MemDepResult InstDep = MD.getDependency(Inst);
-
- // Ignore non-local stores.
- // FIXME: cross-block DSE would be fun. :)
- if (InstDep.isNonLocal()) continue;
-
- // Handle frees whose dependencies are non-trivial.
- if (const CallInst *F = isFreeCall(Inst)) {
- MadeChange |= handleFreeWithNonTrivialDependency(F, InstDep);
+ // Handle 'free' calls specially.
+ if (CallInst *F = isFreeCall(Inst)) {
+ MadeChange |= HandleFree(F);
continue;
}
- // If not a definite must-alias dependency, ignore it.
- if (!InstDep.isDef())
+ // If we find something that writes memory, get its memory dependence.
+ if (!hasMemoryWrite(Inst))
continue;
-
- // If this is a store-store dependence, then the previous store is dead so
- // long as this store is at least as big as it.
- if (doesClobberMemory(InstDep.getInst())) {
- Instruction *DepStore = InstDep.getInst();
- if (isStoreAtLeastAsWideAs(Inst, DepStore, TD) &&
- isElidable(DepStore)) {
- // Delete the store and now-dead instructions that feed it.
- DeleteDeadInstruction(DepStore);
- ++NumFastStores;
- MadeChange = true;
- // DeleteDeadInstruction can delete the current instruction in loop
- // cases, reset BBI.
- BBI = Inst;
- if (BBI != BB.begin())
- --BBI;
- continue;
- }
- }
+ MemDepResult InstDep = MD->getDependency(Inst);
- if (!isElidable(Inst))
+ // Ignore non-local store liveness.
+ // FIXME: cross-block DSE would be fun. :)
+ if (InstDep.isNonLocal() ||
+ // Ignore self dependence, which happens in the entry block of the
+ // function.
+ InstDep.getInst() == Inst)
continue;
-
+
// If we're storing the same value back to a pointer that we just
// loaded from, then the store can be removed.
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
- SI->getOperand(0) == DepLoad) {
+ SI->getOperand(0) == DepLoad && !SI->isVolatile()) {
+ DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
+ << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
+
// DeleteDeadInstruction can delete the current instruction. Save BBI
// in case we need it.
WeakVH NextInst(BBI);
- DeleteDeadInstruction(SI);
+ DeleteDeadInstruction(SI, *MD);
if (NextInst == 0) // Next instruction deleted.
BBI = BB.begin();
@@ -258,24 +460,63 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
}
}
- // If this is a lifetime end marker, we can throw away the store.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(InstDep.getInst())) {
- if (II->getIntrinsicID() == Intrinsic::lifetime_end) {
- // Delete the store and now-dead instructions that feed it.
- // DeleteDeadInstruction can delete the current instruction. Save BBI
- // in case we need it.
- WeakVH NextInst(BBI);
-
- DeleteDeadInstruction(Inst);
+ // Figure out what location is being stored to.
+ AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
+
+ // If we didn't get a useful location, fail.
+ if (Loc.Ptr == 0)
+ continue;
+
+ while (!InstDep.isNonLocal()) {
+ // Get the memory clobbered by the instruction we depend on. MemDep will
+ // skip any instructions that 'Loc' clearly doesn't interact with. If we
+ // end up depending on a may- or must-aliased load, then we can't optimize
+ // away the store and we bail out. However, if we depend on on something
+ // that overwrites the memory location we *can* potentially optimize it.
+ //
+ // Find out what memory location the dependant instruction stores.
+ Instruction *DepWrite = InstDep.getInst();
+ AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
+ // If we didn't get a useful location, or if it isn't a size, bail out.
+ if (DepLoc.Ptr == 0)
+ break;
+
+ // If we find a write that is a) removable (i.e., non-volatile), b) is
+ // completely obliterated by the store to 'Loc', and c) which we know that
+ // 'Inst' doesn't load from, then we can remove it.
+ if (isRemovable(DepWrite) && isCompleteOverwrite(Loc, DepLoc, *AA) &&
+ !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
+ DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
+ << *DepWrite << "\n KILLER: " << *Inst << '\n');
- if (NextInst == 0) // Next instruction deleted.
- BBI = BB.begin();
- else if (BBI != BB.begin()) // Revisit this instruction if possible.
- --BBI;
+ // Delete the store and now-dead instructions that feed it.
+ DeleteDeadInstruction(DepWrite, *MD);
++NumFastStores;
MadeChange = true;
- continue;
+
+ // DeleteDeadInstruction can delete the current instruction in loop
+ // cases, reset BBI.
+ BBI = Inst;
+ if (BBI != BB.begin())
+ --BBI;
+ break;
}
+
+ // If this is a may-aliased store that is clobbering the store value, we
+ // can keep searching past it for another must-aliased pointer that stores
+ // to the same location. For example, in:
+ // store -> P
+ // store -> Q
+ // store -> P
+ // we can remove the first store to P even though we don't know if P and Q
+ // alias.
+ if (DepWrite == &BB.front()) break;
+
+ // Can't look past this instruction if it might read 'Loc'.
+ if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
+ break;
+
+ InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
}
}
@@ -287,26 +528,36 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
return MadeChange;
}
-/// handleFreeWithNonTrivialDependency - Handle frees of entire structures whose
-/// dependency is a store to a field of that structure.
-bool DSE::handleFreeWithNonTrivialDependency(const CallInst *F,
- MemDepResult Dep) {
- AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
-
- Instruction *Dependency = Dep.getInst();
- if (!Dependency || !doesClobberMemory(Dependency) || !isElidable(Dependency))
- return false;
+/// HandleFree - Handle frees of entire structures whose dependency is a store
+/// to a field of that structure.
+bool DSE::HandleFree(CallInst *F) {
+ MemDepResult Dep = MD->getDependency(F);
+ do {
+ if (Dep.isNonLocal()) return false;
+
+ Instruction *Dependency = Dep.getInst();
+ if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
+ return false;
- Value *DepPointer = getPointerOperand(Dependency)->getUnderlyingObject();
+ Value *DepPointer =
+ GetUnderlyingObject(getStoredPointerOperand(Dependency));
- // Check for aliasing.
- if (AA.alias(F->getArgOperand(0), 1, DepPointer, 1) !=
- AliasAnalysis::MustAlias)
- return false;
+ // Check for aliasing.
+ if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
+ return false;
+
+ // DCE instructions only used to calculate that store
+ DeleteDeadInstruction(Dependency, *MD);
+ ++NumFastStores;
+
+ // Inst's old Dependency is now deleted. Compute the next dependency,
+ // which may also be dead, as in
+ // s[0] = 0;
+ // s[1] = 0; // This has just been deleted.
+ // free(s);
+ Dep = MD->getDependency(F);
+ } while (!Dep.isNonLocal());
- // DCE instructions only used to calculate that store
- DeleteDeadInstruction(Dependency);
- ++NumFastStores;
return true;
}
@@ -317,259 +568,163 @@ bool DSE::handleFreeWithNonTrivialDependency(const CallInst *F,
/// store i32 1, i32* %A
/// ret void
bool DSE::handleEndBlock(BasicBlock &BB) {
- AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
-
bool MadeChange = false;
- // Pointers alloca'd in this function are dead in the end block
- SmallPtrSet<Value*, 64> deadPointers;
+ // Keep track of all of the stack objects that are dead at the end of the
+ // function.
+ SmallPtrSet<Value*, 16> DeadStackObjects;
// Find all of the alloca'd pointers in the entry block.
BasicBlock *Entry = BB.getParent()->begin();
for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I)
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
- deadPointers.insert(AI);
+ DeadStackObjects.insert(AI);
// Treat byval arguments the same, stores to them are dead at the end of the
// function.
for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
AE = BB.getParent()->arg_end(); AI != AE; ++AI)
if (AI->hasByValAttr())
- deadPointers.insert(AI);
+ DeadStackObjects.insert(AI);
// Scan the basic block backwards
for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
--BBI;
- // If we find a store whose pointer is dead.
- if (doesClobberMemory(BBI)) {
- if (isElidable(BBI)) {
- // See through pointer-to-pointer bitcasts
- Value *pointerOperand = getPointerOperand(BBI)->getUnderlyingObject();
-
- // Alloca'd pointers or byval arguments (which are functionally like
- // alloca's) are valid candidates for removal.
- if (deadPointers.count(pointerOperand)) {
- // DCE instructions only used to calculate that store.
- Instruction *Dead = BBI;
- ++BBI;
- DeleteDeadInstruction(Dead, &deadPointers);
- ++NumFastStores;
- MadeChange = true;
- continue;
- }
- }
-
- // Because a memcpy or memmove is also a load, we can't skip it if we
- // didn't remove it.
- if (!isa<MemTransferInst>(BBI))
+ // If we find a store, check to see if it points into a dead stack value.
+ if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
+ // See through pointer-to-pointer bitcasts
+ Value *Pointer = GetUnderlyingObject(getStoredPointerOperand(BBI));
+
+ // Stores to stack values are valid candidates for removal.
+ if (DeadStackObjects.count(Pointer)) {
+ Instruction *Dead = BBI++;
+
+ DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
+ << *Dead << "\n Object: " << *Pointer << '\n');
+
+ // DCE instructions only used to calculate that store.
+ DeleteDeadInstruction(Dead, *MD, &DeadStackObjects);
+ ++NumFastStores;
+ MadeChange = true;
continue;
+ }
}
- Value *killPointer = 0;
- uint64_t killPointerSize = ~0UL;
+ // Remove any dead non-memory-mutating instructions.
+ if (isInstructionTriviallyDead(BBI)) {
+ Instruction *Inst = BBI++;
+ DeleteDeadInstruction(Inst, *MD, &DeadStackObjects);
+ ++NumFastOther;
+ MadeChange = true;
+ continue;
+ }
- // If we encounter a use of the pointer, it is no longer considered dead
- if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
- // However, if this load is unused and not volatile, we can go ahead and
- // remove it, and not have to worry about it making our pointer undead!
- if (L->use_empty() && !L->isVolatile()) {
- ++BBI;
- DeleteDeadInstruction(L, &deadPointers);
- ++NumFastOther;
- MadeChange = true;
- continue;
- }
-
- killPointer = L->getPointerOperand();
- } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
- killPointer = V->getOperand(0);
- } else if (isa<MemTransferInst>(BBI) &&
- isa<ConstantInt>(cast<MemTransferInst>(BBI)->getLength())) {
- killPointer = cast<MemTransferInst>(BBI)->getSource();
- killPointerSize = cast<ConstantInt>(
- cast<MemTransferInst>(BBI)->getLength())->getZExtValue();
- } else if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) {
- deadPointers.erase(A);
-
- // Dead alloca's can be DCE'd when we reach them
- if (A->use_empty()) {
- ++BBI;
- DeleteDeadInstruction(A, &deadPointers);
- ++NumFastOther;
- MadeChange = true;
- }
-
+ if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) {
+ DeadStackObjects.erase(A);
continue;
- } else if (CallSite CS = cast<Value>(BBI)) {
- // If this call does not access memory, it can't
- // be undeadifying any of our pointers.
- if (AA.doesNotAccessMemory(CS))
+ }
+
+ if (CallSite CS = cast<Value>(BBI)) {
+ // If this call does not access memory, it can't be loading any of our
+ // pointers.
+ if (AA->doesNotAccessMemory(CS))
continue;
- unsigned modRef = 0;
- unsigned other = 0;
+ unsigned NumModRef = 0, NumOther = 0;
- // Remove any pointers made undead by the call from the dead set
- std::vector<Value*> dead;
- for (SmallPtrSet<Value*, 64>::iterator I = deadPointers.begin(),
- E = deadPointers.end(); I != E; ++I) {
- // HACK: if we detect that our AA is imprecise, it's not
- // worth it to scan the rest of the deadPointers set. Just
- // assume that the AA will return ModRef for everything, and
- // go ahead and bail.
- if (modRef >= 16 && other == 0) {
- deadPointers.clear();
+ // If the call might load from any of our allocas, then any store above
+ // the call is live.
+ SmallVector<Value*, 8> LiveAllocas;
+ for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
+ E = DeadStackObjects.end(); I != E; ++I) {
+ // If we detect that our AA is imprecise, it's not worth it to scan the
+ // rest of the DeadPointers set. Just assume that the AA will return
+ // ModRef for everything, and go ahead and bail out.
+ if (NumModRef >= 16 && NumOther == 0)
return MadeChange;
- }
-
- // See if the call site touches it
- AliasAnalysis::ModRefResult A = AA.getModRefInfo(CS, *I,
- getPointerSize(*I));
+
+ // See if the call site touches it.
+ AliasAnalysis::ModRefResult A =
+ AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA));
if (A == AliasAnalysis::ModRef)
- ++modRef;
+ ++NumModRef;
else
- ++other;
+ ++NumOther;
if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
- dead.push_back(*I);
+ LiveAllocas.push_back(*I);
}
-
- for (std::vector<Value*>::iterator I = dead.begin(), E = dead.end();
- I != E; ++I)
- deadPointers.erase(*I);
- continue;
- } else if (isInstructionTriviallyDead(BBI)) {
- // For any non-memory-affecting non-terminators, DCE them as we reach them
- Instruction *Inst = BBI;
- ++BBI;
- DeleteDeadInstruction(Inst, &deadPointers);
- ++NumFastOther;
- MadeChange = true;
+ for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(),
+ E = LiveAllocas.end(); I != E; ++I)
+ DeadStackObjects.erase(*I);
+
+ // If all of the allocas were clobbered by the call then we're not going
+ // to find anything else to process.
+ if (DeadStackObjects.empty())
+ return MadeChange;
+
continue;
}
- if (!killPointer)
+ AliasAnalysis::Location LoadedLoc;
+
+ // If we encounter a use of the pointer, it is no longer considered dead
+ if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
+ LoadedLoc = AA->getLocation(L);
+ } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
+ LoadedLoc = AA->getLocation(V);
+ } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
+ LoadedLoc = AA->getLocationForSource(MTI);
+ } else {
+ // Not a loading instruction.
continue;
+ }
- killPointer = killPointer->getUnderlyingObject();
+ // Remove any allocas from the DeadPointer set that are loaded, as this
+ // makes any stores above the access live.
+ RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
- // Deal with undead pointers
- MadeChange |= RemoveUndeadPointers(killPointer, killPointerSize, BBI,
- deadPointers);
+ // If all of the allocas were clobbered by the access then we're not going
+ // to find anything else to process.
+ if (DeadStackObjects.empty())
+ break;
}
return MadeChange;
}
-/// RemoveUndeadPointers - check for uses of a pointer that make it
-/// undead when scanning for dead stores to alloca's.
-bool DSE::RemoveUndeadPointers(Value *killPointer, uint64_t killPointerSize,
- BasicBlock::iterator &BBI,
- SmallPtrSet<Value*, 64> &deadPointers) {
- AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
-
- // If the kill pointer can be easily reduced to an alloca,
- // don't bother doing extraneous AA queries.
- if (deadPointers.count(killPointer)) {
- deadPointers.erase(killPointer);
- return false;
- }
-
- // A global can't be in the dead pointer set.
- if (isa<GlobalValue>(killPointer))
- return false;
-
- bool MadeChange = false;
+/// RemoveAccessedObjects - Check to see if the specified location may alias any
+/// of the stack objects in the DeadStackObjects set. If so, they become live
+/// because the location is being loaded.
+void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
+ SmallPtrSet<Value*, 16> &DeadStackObjects) {
+ const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr);
+
+ // A constant can't be in the dead pointer set.
+ if (isa<Constant>(UnderlyingPointer))
+ return;
- SmallVector<Value*, 16> undead;
+ // If the kill pointer can be easily reduced to an alloca, don't bother doing
+ // extraneous AA queries.
+ if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
+ DeadStackObjects.erase(const_cast<Value*>(UnderlyingPointer));
+ return;
+ }
- for (SmallPtrSet<Value*, 64>::iterator I = deadPointers.begin(),
- E = deadPointers.end(); I != E; ++I) {
- // See if this pointer could alias it
- AliasAnalysis::AliasResult A = AA.alias(*I, getPointerSize(*I),
- killPointer, killPointerSize);
-
- // If it must-alias and a store, we can delete it
- if (isa<StoreInst>(BBI) && A == AliasAnalysis::MustAlias) {
- StoreInst *S = cast<StoreInst>(BBI);
-
- // Remove it!
- ++BBI;
- DeleteDeadInstruction(S, &deadPointers);
- ++NumFastStores;
- MadeChange = true;
-
- continue;
-
- // Otherwise, it is undead
- } else if (A != AliasAnalysis::NoAlias)
- undead.push_back(*I);
+ SmallVector<Value*, 16> NowLive;
+ for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
+ E = DeadStackObjects.end(); I != E; ++I) {
+ // See if the loaded location could alias the stack location.
+ AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA));
+ if (!AA->isNoAlias(StackLoc, LoadedLoc))
+ NowLive.push_back(*I);
}
- for (SmallVector<Value*, 16>::iterator I = undead.begin(), E = undead.end();
+ for (SmallVector<Value*, 16>::iterator I = NowLive.begin(), E = NowLive.end();
I != E; ++I)
- deadPointers.erase(*I);
-
- return MadeChange;
+ DeadStackObjects.erase(*I);
}
-/// DeleteDeadInstruction - Delete this instruction. Before we do, go through
-/// and zero out all the operands of this instruction. If any of them become
-/// dead, delete them and the computation tree that feeds them.
-///
-/// If ValueSet is non-null, remove any deleted instructions from it as well.
-///
-void DSE::DeleteDeadInstruction(Instruction *I,
- SmallPtrSet<Value*, 64> *ValueSet) {
- SmallVector<Instruction*, 32> NowDeadInsts;
-
- NowDeadInsts.push_back(I);
- --NumFastOther;
-
- // Before we touch this instruction, remove it from memdep!
- MemoryDependenceAnalysis &MDA = getAnalysis<MemoryDependenceAnalysis>();
- do {
- Instruction *DeadInst = NowDeadInsts.pop_back_val();
-
- ++NumFastOther;
-
- // This instruction is dead, zap it, in stages. Start by removing it from
- // MemDep, which needs to know the operands and needs it to be in the
- // function.
- MDA.removeInstruction(DeadInst);
-
- for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
- Value *Op = DeadInst->getOperand(op);
- DeadInst->setOperand(op, 0);
-
- // If this operand just became dead, add it to the NowDeadInsts list.
- if (!Op->use_empty()) continue;
-
- if (Instruction *OpI = dyn_cast<Instruction>(Op))
- if (isInstructionTriviallyDead(OpI))
- NowDeadInsts.push_back(OpI);
- }
-
- DeadInst->eraseFromParent();
-
- if (ValueSet) ValueSet->erase(DeadInst);
- } while (!NowDeadInsts.empty());
-}
-
-unsigned DSE::getPointerSize(Value *V) const {
- if (TD) {
- if (AllocaInst *A = dyn_cast<AllocaInst>(V)) {
- // Get size information for the alloca
- if (ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize()))
- return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType());
- } else {
- assert(isa<Argument>(V) && "Expected AllocaInst or Argument!");
- const PointerType *PT = cast<PointerType>(V->getType());
- return TD->getTypeAllocSize(PT->getElementType());
- }
- }
- return ~0U;
-}
diff --git a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
new file mode 100644
index 0000000..3d3f17b
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -0,0 +1,470 @@
+//===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs a simple dominator tree walk that eliminates trivially
+// redundant instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "early-cse"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Instructions.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include "llvm/ADT/ScopedHashTable.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
+STATISTIC(NumCSE, "Number of instructions CSE'd");
+STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
+STATISTIC(NumCSECall, "Number of call instructions CSE'd");
+STATISTIC(NumDSE, "Number of trivial dead stores removed");
+
+static unsigned getHash(const void *V) {
+ return DenseMapInfo<const void*>::getHashValue(V);
+}
+
+//===----------------------------------------------------------------------===//
+// SimpleValue
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// SimpleValue - Instances of this struct represent available values in the
+ /// scoped hash table.
+ struct SimpleValue {
+ Instruction *Inst;
+
+ SimpleValue(Instruction *I) : Inst(I) {
+ assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
+ }
+
+ bool isSentinel() const {
+ return Inst == DenseMapInfo<Instruction*>::getEmptyKey() ||
+ Inst == DenseMapInfo<Instruction*>::getTombstoneKey();
+ }
+
+ static bool canHandle(Instruction *Inst) {
+ // This can only handle non-void readnone functions.
+ if (CallInst *CI = dyn_cast<CallInst>(Inst))
+ return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
+ return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
+ isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
+ isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
+ isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
+ isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
+ }
+ };
+}
+
+namespace llvm {
+// SimpleValue is POD.
+template<> struct isPodLike<SimpleValue> {
+ static const bool value = true;
+};
+
+template<> struct DenseMapInfo<SimpleValue> {
+ static inline SimpleValue getEmptyKey() {
+ return DenseMapInfo<Instruction*>::getEmptyKey();
+ }
+ static inline SimpleValue getTombstoneKey() {
+ return DenseMapInfo<Instruction*>::getTombstoneKey();
+ }
+ static unsigned getHashValue(SimpleValue Val);
+ static bool isEqual(SimpleValue LHS, SimpleValue RHS);
+};
+}
+
+unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
+ Instruction *Inst = Val.Inst;
+
+ // Hash in all of the operands as pointers.
+ unsigned Res = 0;
+ for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
+ Res ^= getHash(Inst->getOperand(i)) << i;
+
+ if (CastInst *CI = dyn_cast<CastInst>(Inst))
+ Res ^= getHash(CI->getType());
+ else if (CmpInst *CI = dyn_cast<CmpInst>(Inst))
+ Res ^= CI->getPredicate();
+ else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) {
+ for (ExtractValueInst::idx_iterator I = EVI->idx_begin(),
+ E = EVI->idx_end(); I != E; ++I)
+ Res ^= *I;
+ } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) {
+ for (InsertValueInst::idx_iterator I = IVI->idx_begin(),
+ E = IVI->idx_end(); I != E; ++I)
+ Res ^= *I;
+ } else {
+ // nothing extra to hash in.
+ assert((isa<CallInst>(Inst) ||
+ isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) ||
+ isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
+ isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst)) &&
+ "Invalid/unknown instruction");
+ }
+
+ // Mix in the opcode.
+ return (Res << 1) ^ Inst->getOpcode();
+}
+
+bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
+ Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
+
+ if (LHS.isSentinel() || RHS.isSentinel())
+ return LHSI == RHSI;
+
+ if (LHSI->getOpcode() != RHSI->getOpcode()) return false;
+ return LHSI->isIdenticalTo(RHSI);
+}
+
+//===----------------------------------------------------------------------===//
+// CallValue
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// CallValue - Instances of this struct represent available call values in
+ /// the scoped hash table.
+ struct CallValue {
+ Instruction *Inst;
+
+ CallValue(Instruction *I) : Inst(I) {
+ assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
+ }
+
+ bool isSentinel() const {
+ return Inst == DenseMapInfo<Instruction*>::getEmptyKey() ||
+ Inst == DenseMapInfo<Instruction*>::getTombstoneKey();
+ }
+
+ static bool canHandle(Instruction *Inst) {
+ // Don't value number anything that returns void.
+ if (Inst->getType()->isVoidTy())
+ return false;
+
+ CallInst *CI = dyn_cast<CallInst>(Inst);
+ if (CI == 0 || !CI->onlyReadsMemory())
+ return false;
+ return true;
+ }
+ };
+}
+
+namespace llvm {
+ // CallValue is POD.
+ template<> struct isPodLike<CallValue> {
+ static const bool value = true;
+ };
+
+ template<> struct DenseMapInfo<CallValue> {
+ static inline CallValue getEmptyKey() {
+ return DenseMapInfo<Instruction*>::getEmptyKey();
+ }
+ static inline CallValue getTombstoneKey() {
+ return DenseMapInfo<Instruction*>::getTombstoneKey();
+ }
+ static unsigned getHashValue(CallValue Val);
+ static bool isEqual(CallValue LHS, CallValue RHS);
+ };
+}
+unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
+ Instruction *Inst = Val.Inst;
+ // Hash in all of the operands as pointers.
+ unsigned Res = 0;
+ for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) {
+ assert(!Inst->getOperand(i)->getType()->isMetadataTy() &&
+ "Cannot value number calls with metadata operands");
+ Res ^= getHash(Inst->getOperand(i)) << i;
+ }
+
+ // Mix in the opcode.
+ return (Res << 1) ^ Inst->getOpcode();
+}
+
+bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
+ Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
+ if (LHS.isSentinel() || RHS.isSentinel())
+ return LHSI == RHSI;
+ return LHSI->isIdenticalTo(RHSI);
+}
+
+
+//===----------------------------------------------------------------------===//
+// EarlyCSE pass.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+/// EarlyCSE - This pass does a simple depth-first walk over the dominator
+/// tree, eliminating trivially redundant instructions and using instsimplify
+/// to canonicalize things as it goes. It is intended to be fast and catch
+/// obvious cases so that instcombine and other passes are more effective. It
+/// is expected that a later pass of GVN will catch the interesting/hard
+/// cases.
+class EarlyCSE : public FunctionPass {
+public:
+ const TargetData *TD;
+ DominatorTree *DT;
+ typedef RecyclingAllocator<BumpPtrAllocator,
+ ScopedHashTableVal<SimpleValue, Value*> > AllocatorTy;
+ typedef ScopedHashTable<SimpleValue, Value*, DenseMapInfo<SimpleValue>,
+ AllocatorTy> ScopedHTType;
+
+ /// AvailableValues - This scoped hash table contains the current values of
+ /// all of our simple scalar expressions. As we walk down the domtree, we
+ /// look to see if instructions are in this: if so, we replace them with what
+ /// we find, otherwise we insert them so that dominated values can succeed in
+ /// their lookup.
+ ScopedHTType *AvailableValues;
+
+ /// AvailableLoads - This scoped hash table contains the current values
+ /// of loads. This allows us to get efficient access to dominating loads when
+ /// we have a fully redundant load. In addition to the most recent load, we
+ /// keep track of a generation count of the read, which is compared against
+ /// the current generation count. The current generation count is
+ /// incremented after every possibly writing memory operation, which ensures
+ /// that we only CSE loads with other loads that have no intervening store.
+ typedef RecyclingAllocator<BumpPtrAllocator,
+ ScopedHashTableVal<Value*, std::pair<Value*, unsigned> > > LoadMapAllocator;
+ typedef ScopedHashTable<Value*, std::pair<Value*, unsigned>,
+ DenseMapInfo<Value*>, LoadMapAllocator> LoadHTType;
+ LoadHTType *AvailableLoads;
+
+ /// AvailableCalls - This scoped hash table contains the current values
+ /// of read-only call values. It uses the same generation count as loads.
+ typedef ScopedHashTable<CallValue, std::pair<Value*, unsigned> > CallHTType;
+ CallHTType *AvailableCalls;
+
+ /// CurrentGeneration - This is the current generation of the memory value.
+ unsigned CurrentGeneration;
+
+ static char ID;
+ explicit EarlyCSE() : FunctionPass(ID) {
+ initializeEarlyCSEPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F);
+
+private:
+
+ bool processNode(DomTreeNode *Node);
+
+ // This transformation requires dominator postdominator info
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<DominatorTree>();
+ AU.setPreservesCFG();
+ }
+};
+}
+
+char EarlyCSE::ID = 0;
+
+// createEarlyCSEPass - The public interface to this file.
+FunctionPass *llvm::createEarlyCSEPass() {
+ return new EarlyCSE();
+}
+
+INITIALIZE_PASS_BEGIN(EarlyCSE, "early-cse", "Early CSE", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(EarlyCSE, "early-cse", "Early CSE", false, false)
+
+bool EarlyCSE::processNode(DomTreeNode *Node) {
+ // Define a scope in the scoped hash table. When we are done processing this
+ // domtree node and recurse back up to our parent domtree node, this will pop
+ // off all the values we install.
+ ScopedHTType::ScopeTy Scope(*AvailableValues);
+
+ // Define a scope for the load values so that anything we add will get
+ // popped when we recurse back up to our parent domtree node.
+ LoadHTType::ScopeTy LoadScope(*AvailableLoads);
+
+ // Define a scope for the call values so that anything we add will get
+ // popped when we recurse back up to our parent domtree node.
+ CallHTType::ScopeTy CallScope(*AvailableCalls);
+
+ BasicBlock *BB = Node->getBlock();
+
+ // If this block has a single predecessor, then the predecessor is the parent
+ // of the domtree node and all of the live out memory values are still current
+ // in this block. If this block has multiple predecessors, then they could
+ // have invalidated the live-out memory values of our parent value. For now,
+ // just be conservative and invalidate memory if this block has multiple
+ // predecessors.
+ if (BB->getSinglePredecessor() == 0)
+ ++CurrentGeneration;
+
+ /// LastStore - Keep track of the last non-volatile store that we saw... for
+ /// as long as there in no instruction that reads memory. If we see a store
+ /// to the same location, we delete the dead store. This zaps trivial dead
+ /// stores which can occur in bitfield code among other things.
+ StoreInst *LastStore = 0;
+
+ bool Changed = false;
+
+ // See if any instructions in the block can be eliminated. If so, do it. If
+ // not, add them to AvailableValues.
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
+ Instruction *Inst = I++;
+
+ // Dead instructions should just be removed.
+ if (isInstructionTriviallyDead(Inst)) {
+ DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
+ Inst->eraseFromParent();
+ Changed = true;
+ ++NumSimplify;
+ continue;
+ }
+
+ // If the instruction can be simplified (e.g. X+0 = X) then replace it with
+ // its simpler value.
+ if (Value *V = SimplifyInstruction(Inst, TD, DT)) {
+ DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n');
+ Inst->replaceAllUsesWith(V);
+ Inst->eraseFromParent();
+ Changed = true;
+ ++NumSimplify;
+ continue;
+ }
+
+ // If this is a simple instruction that we can value number, process it.
+ if (SimpleValue::canHandle(Inst)) {
+ // See if the instruction has an available value. If so, use it.
+ if (Value *V = AvailableValues->lookup(Inst)) {
+ DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n');
+ Inst->replaceAllUsesWith(V);
+ Inst->eraseFromParent();
+ Changed = true;
+ ++NumCSE;
+ continue;
+ }
+
+ // Otherwise, just remember that this value is available.
+ AvailableValues->insert(Inst, Inst);
+ continue;
+ }
+
+ // If this is a non-volatile load, process it.
+ if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ // Ignore volatile loads.
+ if (LI->isVolatile()) {
+ LastStore = 0;
+ continue;
+ }
+
+ // If we have an available version of this load, and if it is the right
+ // generation, replace this instruction.
+ std::pair<Value*, unsigned> InVal =
+ AvailableLoads->lookup(Inst->getOperand(0));
+ if (InVal.first != 0 && InVal.second == CurrentGeneration) {
+ DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst << " to: "
+ << *InVal.first << '\n');
+ if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first);
+ Inst->eraseFromParent();
+ Changed = true;
+ ++NumCSELoad;
+ continue;
+ }
+
+ // Otherwise, remember that we have this instruction.
+ AvailableLoads->insert(Inst->getOperand(0),
+ std::pair<Value*, unsigned>(Inst, CurrentGeneration));
+ LastStore = 0;
+ continue;
+ }
+
+ // If this instruction may read from memory, forget LastStore.
+ if (Inst->mayReadFromMemory())
+ LastStore = 0;
+
+ // If this is a read-only call, process it.
+ if (CallValue::canHandle(Inst)) {
+ // If we have an available version of this call, and if it is the right
+ // generation, replace this instruction.
+ std::pair<Value*, unsigned> InVal = AvailableCalls->lookup(Inst);
+ if (InVal.first != 0 && InVal.second == CurrentGeneration) {
+ DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst << " to: "
+ << *InVal.first << '\n');
+ if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first);
+ Inst->eraseFromParent();
+ Changed = true;
+ ++NumCSECall;
+ continue;
+ }
+
+ // Otherwise, remember that we have this instruction.
+ AvailableCalls->insert(Inst,
+ std::pair<Value*, unsigned>(Inst, CurrentGeneration));
+ continue;
+ }
+
+ // Okay, this isn't something we can CSE at all. Check to see if it is
+ // something that could modify memory. If so, our available memory values
+ // cannot be used so bump the generation count.
+ if (Inst->mayWriteToMemory()) {
+ ++CurrentGeneration;
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ // We do a trivial form of DSE if there are two stores to the same
+ // location with no intervening loads. Delete the earlier store.
+ if (LastStore &&
+ LastStore->getPointerOperand() == SI->getPointerOperand()) {
+ DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore << " due to: "
+ << *Inst << '\n');
+ LastStore->eraseFromParent();
+ Changed = true;
+ ++NumDSE;
+ LastStore = 0;
+ continue;
+ }
+
+ // Okay, we just invalidated anything we knew about loaded values. Try
+ // to salvage *something* by remembering that the stored value is a live
+ // version of the pointer. It is safe to forward from volatile stores
+ // to non-volatile loads, so we don't have to check for volatility of
+ // the store.
+ AvailableLoads->insert(SI->getPointerOperand(),
+ std::pair<Value*, unsigned>(SI->getValueOperand(), CurrentGeneration));
+
+ // Remember that this was the last store we saw for DSE.
+ if (!SI->isVolatile())
+ LastStore = SI;
+ }
+ }
+ }
+
+ unsigned LiveOutGeneration = CurrentGeneration;
+ for (DomTreeNode::iterator I = Node->begin(), E = Node->end(); I != E; ++I) {
+ Changed |= processNode(*I);
+ // Pop any generation changes off the stack from the recursive walk.
+ CurrentGeneration = LiveOutGeneration;
+ }
+ return Changed;
+}
+
+
+bool EarlyCSE::runOnFunction(Function &F) {
+ TD = getAnalysisIfAvailable<TargetData>();
+ DT = &getAnalysis<DominatorTree>();
+
+ // Tables that the pass uses when walking the domtree.
+ ScopedHTType AVTable;
+ AvailableValues = &AVTable;
+ LoadHTType LoadTable;
+ AvailableLoads = &LoadTable;
+ CallHTType CallTable;
+ AvailableCalls = &CallTable;
+
+ CurrentGeneration = 0;
+ return processNode(DT->getRootNode());
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/GEPSplitter.cpp b/contrib/llvm/lib/Transforms/Scalar/GEPSplitter.cpp
index 53dd06d..4c3d188 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GEPSplitter.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GEPSplitter.cpp
@@ -27,13 +27,15 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
public:
static char ID; // Pass identification, replacement for typeid
- explicit GEPSplitter() : FunctionPass(ID) {}
+ explicit GEPSplitter() : FunctionPass(ID) {
+ initializeGEPSplitterPass(*PassRegistry::getPassRegistry());
+ }
};
}
char GEPSplitter::ID = 0;
INITIALIZE_PASS(GEPSplitter, "split-geps",
- "split complex GEPs into simple GEPs", false, false);
+ "split complex GEPs into simple GEPs", false, false)
FunctionPass *llvm::createGEPSplitterPass() {
return new GEPSplitter();
diff --git a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
index c62ce1f..a0123f5 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -17,39 +17,30 @@
#define DEBUG_TYPE "gvn"
#include "llvm/Transforms/Scalar.h"
-#include "llvm/BasicBlock.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
-#include "llvm/Function.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
-#include "llvm/Operator.h"
-#include "llvm/Value.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/PHITransAddr.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/IRBuilder.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Assembly/Writer.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/IRBuilder.h"
using namespace llvm;
STATISTIC(NumGVNInstr, "Number of instructions deleted");
@@ -61,7 +52,6 @@ STATISTIC(NumPRELoad, "Number of loads PRE'd");
static cl::opt<bool> EnablePRE("enable-pre",
cl::init(true), cl::Hidden);
static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
-static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false));
//===----------------------------------------------------------------------===//
// ValueTable Class
@@ -72,76 +62,23 @@ static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false));
/// two values.
namespace {
struct Expression {
- enum ExpressionOpcode {
- ADD = Instruction::Add,
- FADD = Instruction::FAdd,
- SUB = Instruction::Sub,
- FSUB = Instruction::FSub,
- MUL = Instruction::Mul,
- FMUL = Instruction::FMul,
- UDIV = Instruction::UDiv,
- SDIV = Instruction::SDiv,
- FDIV = Instruction::FDiv,
- UREM = Instruction::URem,
- SREM = Instruction::SRem,
- FREM = Instruction::FRem,
- SHL = Instruction::Shl,
- LSHR = Instruction::LShr,
- ASHR = Instruction::AShr,
- AND = Instruction::And,
- OR = Instruction::Or,
- XOR = Instruction::Xor,
- TRUNC = Instruction::Trunc,
- ZEXT = Instruction::ZExt,
- SEXT = Instruction::SExt,
- FPTOUI = Instruction::FPToUI,
- FPTOSI = Instruction::FPToSI,
- UITOFP = Instruction::UIToFP,
- SITOFP = Instruction::SIToFP,
- FPTRUNC = Instruction::FPTrunc,
- FPEXT = Instruction::FPExt,
- PTRTOINT = Instruction::PtrToInt,
- INTTOPTR = Instruction::IntToPtr,
- BITCAST = Instruction::BitCast,
- ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
- ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
- FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
- FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
- FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
- SHUFFLE, SELECT, GEP, CALL, CONSTANT,
- INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
-
- ExpressionOpcode opcode;
+ uint32_t opcode;
const Type* type;
SmallVector<uint32_t, 4> varargs;
- Value *function;
Expression() { }
- Expression(ExpressionOpcode o) : opcode(o) { }
+ Expression(uint32_t o) : opcode(o) { }
bool operator==(const Expression &other) const {
if (opcode != other.opcode)
return false;
- else if (opcode == EMPTY || opcode == TOMBSTONE)
+ else if (opcode == ~0U || opcode == ~1U)
return true;
else if (type != other.type)
return false;
- else if (function != other.function)
+ else if (varargs != other.varargs)
return false;
- else {
- if (varargs.size() != other.varargs.size())
- return false;
-
- for (size_t i = 0; i < varargs.size(); ++i)
- if (varargs[i] != other.varargs[i])
- return false;
-
- return true;
- }
- }
-
- bool operator!=(const Expression &other) const {
- return !(*this == other);
+ return true;
}
};
@@ -155,19 +92,7 @@ namespace {
uint32_t nextValueNumber;
- Expression::ExpressionOpcode getOpcode(CmpInst* C);
- Expression create_expression(BinaryOperator* BO);
- Expression create_expression(CmpInst* C);
- Expression create_expression(ShuffleVectorInst* V);
- Expression create_expression(ExtractElementInst* C);
- Expression create_expression(InsertElementInst* V);
- Expression create_expression(SelectInst* V);
- Expression create_expression(CastInst* C);
- Expression create_expression(GetElementPtrInst* G);
- Expression create_expression(CallInst* C);
- Expression create_expression(ExtractValueInst* C);
- Expression create_expression(InsertValueInst* C);
-
+ Expression create_expression(Instruction* I);
uint32_t lookup_or_add_call(CallInst* C);
public:
ValueTable() : nextValueNumber(1) { }
@@ -176,7 +101,6 @@ namespace {
void add(Value *V, uint32_t num);
void clear();
void erase(Value *v);
- unsigned size();
void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
AliasAnalysis *getAliasAnalysis() const { return AA; }
void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
@@ -189,11 +113,11 @@ namespace {
namespace llvm {
template <> struct DenseMapInfo<Expression> {
static inline Expression getEmptyKey() {
- return Expression(Expression::EMPTY);
+ return ~0U;
}
static inline Expression getTombstoneKey() {
- return Expression(Expression::TOMBSTONE);
+ return ~1U;
}
static unsigned getHashValue(const Expression e) {
@@ -205,20 +129,13 @@ template <> struct DenseMapInfo<Expression> {
for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
E = e.varargs.end(); I != E; ++I)
hash = *I + hash * 37;
-
- hash = ((unsigned)((uintptr_t)e.function >> 4) ^
- (unsigned)((uintptr_t)e.function >> 9)) +
- hash * 37;
-
+
return hash;
}
static bool isEqual(const Expression &LHS, const Expression &RHS) {
return LHS == RHS;
}
};
-
-template <>
-struct isPodLike<Expression> { static const bool value = true; };
}
@@ -226,185 +143,27 @@ struct isPodLike<Expression> { static const bool value = true; };
// ValueTable Internal Functions
//===----------------------------------------------------------------------===//
-Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
- if (isa<ICmpInst>(C)) {
- switch (C->getPredicate()) {
- default: // THIS SHOULD NEVER HAPPEN
- llvm_unreachable("Comparison with unknown predicate?");
- case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
- case ICmpInst::ICMP_NE: return Expression::ICMPNE;
- case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
- case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
- case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
- case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
- case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
- case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
- case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
- case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
- }
- } else {
- switch (C->getPredicate()) {
- default: // THIS SHOULD NEVER HAPPEN
- llvm_unreachable("Comparison with unknown predicate?");
- case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
- case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
- case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
- case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
- case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
- case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
- case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
- case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
- case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
- case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
- case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
- case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
- case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
- case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
- }
- }
-}
-
-Expression ValueTable::create_expression(CallInst* C) {
- Expression e;
-
- e.type = C->getType();
- e.function = C->getCalledFunction();
- e.opcode = Expression::CALL;
-
- CallSite CS(C);
- for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end();
- I != E; ++I)
- e.varargs.push_back(lookup_or_add(*I));
-
- return e;
-}
-
-Expression ValueTable::create_expression(BinaryOperator* BO) {
- Expression e;
- e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
- e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
- e.function = 0;
- e.type = BO->getType();
- e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
-
- return e;
-}
-
-Expression ValueTable::create_expression(CmpInst* C) {
- Expression e;
-
- e.varargs.push_back(lookup_or_add(C->getOperand(0)));
- e.varargs.push_back(lookup_or_add(C->getOperand(1)));
- e.function = 0;
- e.type = C->getType();
- e.opcode = getOpcode(C);
-
- return e;
-}
-
-Expression ValueTable::create_expression(CastInst* C) {
- Expression e;
-
- e.varargs.push_back(lookup_or_add(C->getOperand(0)));
- e.function = 0;
- e.type = C->getType();
- e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
-
- return e;
-}
-
-Expression ValueTable::create_expression(ShuffleVectorInst* S) {
- Expression e;
-
- e.varargs.push_back(lookup_or_add(S->getOperand(0)));
- e.varargs.push_back(lookup_or_add(S->getOperand(1)));
- e.varargs.push_back(lookup_or_add(S->getOperand(2)));
- e.function = 0;
- e.type = S->getType();
- e.opcode = Expression::SHUFFLE;
-
- return e;
-}
-Expression ValueTable::create_expression(ExtractElementInst* E) {
+Expression ValueTable::create_expression(Instruction *I) {
Expression e;
-
- e.varargs.push_back(lookup_or_add(E->getOperand(0)));
- e.varargs.push_back(lookup_or_add(E->getOperand(1)));
- e.function = 0;
- e.type = E->getType();
- e.opcode = Expression::EXTRACT;
-
- return e;
-}
-
-Expression ValueTable::create_expression(InsertElementInst* I) {
- Expression e;
-
- e.varargs.push_back(lookup_or_add(I->getOperand(0)));
- e.varargs.push_back(lookup_or_add(I->getOperand(1)));
- e.varargs.push_back(lookup_or_add(I->getOperand(2)));
- e.function = 0;
e.type = I->getType();
- e.opcode = Expression::INSERT;
-
- return e;
-}
-
-Expression ValueTable::create_expression(SelectInst* I) {
- Expression e;
-
- e.varargs.push_back(lookup_or_add(I->getCondition()));
- e.varargs.push_back(lookup_or_add(I->getTrueValue()));
- e.varargs.push_back(lookup_or_add(I->getFalseValue()));
- e.function = 0;
- e.type = I->getType();
- e.opcode = Expression::SELECT;
-
- return e;
-}
-
-Expression ValueTable::create_expression(GetElementPtrInst* G) {
- Expression e;
-
- e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
- e.function = 0;
- e.type = G->getType();
- e.opcode = Expression::GEP;
-
- for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
- I != E; ++I)
- e.varargs.push_back(lookup_or_add(*I));
-
- return e;
-}
-
-Expression ValueTable::create_expression(ExtractValueInst* E) {
- Expression e;
-
- e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
- for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
- II != IE; ++II)
- e.varargs.push_back(*II);
- e.function = 0;
- e.type = E->getType();
- e.opcode = Expression::EXTRACTVALUE;
-
- return e;
-}
-
-Expression ValueTable::create_expression(InsertValueInst* E) {
- Expression e;
-
- e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
- e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
- for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
- II != IE; ++II)
- e.varargs.push_back(*II);
- e.function = 0;
- e.type = E->getType();
- e.opcode = Expression::INSERTVALUE;
-
+ e.opcode = I->getOpcode();
+ for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
+ OI != OE; ++OI)
+ e.varargs.push_back(lookup_or_add(*OI));
+
+ if (CmpInst *C = dyn_cast<CmpInst>(I))
+ e.opcode = (C->getOpcode() << 8) | C->getPredicate();
+ else if (ExtractValueInst *E = dyn_cast<ExtractValueInst>(I)) {
+ for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
+ II != IE; ++II)
+ e.varargs.push_back(*II);
+ } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) {
+ for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
+ II != IE; ++II)
+ e.varargs.push_back(*II);
+ }
+
return e;
}
@@ -563,12 +322,8 @@ uint32_t ValueTable::lookup_or_add(Value *V) {
case Instruction::And:
case Instruction::Or :
case Instruction::Xor:
- exp = create_expression(cast<BinaryOperator>(I));
- break;
case Instruction::ICmp:
case Instruction::FCmp:
- exp = create_expression(cast<CmpInst>(I));
- break;
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
@@ -581,28 +336,14 @@ uint32_t ValueTable::lookup_or_add(Value *V) {
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::BitCast:
- exp = create_expression(cast<CastInst>(I));
- break;
case Instruction::Select:
- exp = create_expression(cast<SelectInst>(I));
- break;
case Instruction::ExtractElement:
- exp = create_expression(cast<ExtractElementInst>(I));
- break;
case Instruction::InsertElement:
- exp = create_expression(cast<InsertElementInst>(I));
- break;
case Instruction::ShuffleVector:
- exp = create_expression(cast<ShuffleVectorInst>(I));
- break;
case Instruction::ExtractValue:
- exp = create_expression(cast<ExtractValueInst>(I));
- break;
case Instruction::InsertValue:
- exp = create_expression(cast<InsertValueInst>(I));
- break;
case Instruction::GetElementPtr:
- exp = create_expression(cast<GetElementPtrInst>(I));
+ exp = create_expression(I);
break;
default:
valueNumbering[V] = nextValueNumber;
@@ -649,30 +390,76 @@ void ValueTable::verifyRemoved(const Value *V) const {
//===----------------------------------------------------------------------===//
namespace {
- struct ValueNumberScope {
- ValueNumberScope* parent;
- DenseMap<uint32_t, Value*> table;
-
- ValueNumberScope(ValueNumberScope* p) : parent(p) { }
- };
-}
-
-namespace {
class GVN : public FunctionPass {
bool runOnFunction(Function &F);
public:
static char ID; // Pass identification, replacement for typeid
explicit GVN(bool noloads = false)
- : FunctionPass(ID), NoLoads(noloads), MD(0) { }
+ : FunctionPass(ID), NoLoads(noloads), MD(0) {
+ initializeGVNPass(*PassRegistry::getPassRegistry());
+ }
private:
bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
+ const TargetData* TD;
ValueTable VN;
- DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
+
+ /// LeaderTable - A mapping from value numbers to lists of Value*'s that
+ /// have that value number. Use findLeader to query it.
+ struct LeaderTableEntry {
+ Value *Val;
+ BasicBlock *BB;
+ LeaderTableEntry *Next;
+ };
+ DenseMap<uint32_t, LeaderTableEntry> LeaderTable;
+ BumpPtrAllocator TableAllocator;
+
+ /// addToLeaderTable - Push a new Value to the LeaderTable onto the list for
+ /// its value number.
+ void addToLeaderTable(uint32_t N, Value *V, BasicBlock *BB) {
+ LeaderTableEntry& Curr = LeaderTable[N];
+ if (!Curr.Val) {
+ Curr.Val = V;
+ Curr.BB = BB;
+ return;
+ }
+
+ LeaderTableEntry* Node = TableAllocator.Allocate<LeaderTableEntry>();
+ Node->Val = V;
+ Node->BB = BB;
+ Node->Next = Curr.Next;
+ Curr.Next = Node;
+ }
+
+ /// removeFromLeaderTable - Scan the list of values corresponding to a given
+ /// value number, and remove the given value if encountered.
+ void removeFromLeaderTable(uint32_t N, Value *V, BasicBlock *BB) {
+ LeaderTableEntry* Prev = 0;
+ LeaderTableEntry* Curr = &LeaderTable[N];
+
+ while (Curr->Val != V || Curr->BB != BB) {
+ Prev = Curr;
+ Curr = Curr->Next;
+ }
+
+ if (Prev) {
+ Prev->Next = Curr->Next;
+ } else {
+ if (!Curr->Next) {
+ Curr->Val = 0;
+ Curr->BB = 0;
+ } else {
+ LeaderTableEntry* Next = Curr->Next;
+ Curr->Val = Next->Val;
+ Curr->BB = Next->BB;
+ Curr->Next = Next->Next;
+ }
+ }
+ }
// List of critical edges to be split between iterations.
SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
@@ -699,9 +486,8 @@ namespace {
bool processBlock(BasicBlock *BB);
void dump(DenseMap<uint32_t, Value*>& d);
bool iterateOnFunction(Function &F);
- Value *CollapsePhi(PHINode* p);
bool performPRE(Function& F);
- Value *lookupNumber(BasicBlock *BB, uint32_t num);
+ Value *findLeader(BasicBlock *BB, uint32_t num);
void cleanupGlobalSets();
void verifyRemoved(const Instruction *I) const;
bool splitCriticalEdges();
@@ -715,7 +501,11 @@ FunctionPass *llvm::createGVNPass(bool NoLoads) {
return new GVN(NoLoads);
}
-INITIALIZE_PASS(GVN, "gvn", "Global Value Numbering", false, false);
+INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false)
+INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
void GVN::dump(DenseMap<uint32_t, Value*>& d) {
errs() << "{\n";
@@ -727,33 +517,6 @@ void GVN::dump(DenseMap<uint32_t, Value*>& d) {
errs() << "}\n";
}
-static bool isSafeReplacement(PHINode* p, Instruction *inst) {
- if (!isa<PHINode>(inst))
- return true;
-
- for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
- UI != E; ++UI)
- if (PHINode* use_phi = dyn_cast<PHINode>(*UI))
- if (use_phi->getParent() == inst->getParent())
- return false;
-
- return true;
-}
-
-Value *GVN::CollapsePhi(PHINode *PN) {
- Value *ConstVal = PN->hasConstantValue(DT);
- if (!ConstVal) return 0;
-
- Instruction *Inst = dyn_cast<Instruction>(ConstVal);
- if (!Inst)
- return ConstVal;
-
- if (DT->dominates(Inst, PN))
- if (isSafeReplacement(PN, Inst))
- return Inst;
- return 0;
-}
-
/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
/// we're analyzing is fully available in the specified block. As we go, keep
/// track of which blocks we know are fully alive in FullyAvailableBlocks. This
@@ -937,47 +700,6 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
}
-/// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
-/// be expressed as a base pointer plus a constant offset. Return the base and
-/// offset to the caller.
-static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const TargetData &TD) {
- Operator *PtrOp = dyn_cast<Operator>(Ptr);
- if (PtrOp == 0) return Ptr;
-
- // Just look through bitcasts.
- if (PtrOp->getOpcode() == Instruction::BitCast)
- return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
-
- // If this is a GEP with constant indices, we can look through it.
- GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
- if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
-
- gep_type_iterator GTI = gep_type_begin(GEP);
- for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
- ++I, ++GTI) {
- ConstantInt *OpC = cast<ConstantInt>(*I);
- if (OpC->isZero()) continue;
-
- // Handle a struct and array indices which add their offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
- } else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
- Offset += OpC->getSExtValue()*Size;
- }
- }
-
- // Re-sign extend from the pointer size if needed to get overflow edge cases
- // right.
- unsigned PtrSize = TD.getPointerSizeInBits();
- if (PtrSize < 64)
- Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
-
- return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
-}
-
-
/// AnalyzeLoadFromClobberingWrite - This function is called when we have a
/// memdep query of a load that ends up being a clobbering memory write (store,
/// memset, memcpy, memmove). This means that the write *may* provide bits used
@@ -996,9 +718,8 @@ static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
return -1;
int64_t StoreOffset = 0, LoadOffset = 0;
- Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
- Value *LoadBase =
- GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
+ Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD);
+ Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
if (StoreBase != LoadBase)
return -1;
@@ -1020,8 +741,6 @@ static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
// If the load and store don't overlap at all, the store doesn't provide
// anything to the load. In this case, they really don't alias at all, AA
// must have gotten confused.
- // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
- // remove this check, as it is duplicated with what we have below.
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
if ((WriteSizeInBits & 7) | (LoadSize & 7))
@@ -1067,12 +786,12 @@ static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
StoreInst *DepSI,
const TargetData &TD) {
// Cannot handle reading from store of first-class aggregate yet.
- if (DepSI->getOperand(0)->getType()->isStructTy() ||
- DepSI->getOperand(0)->getType()->isArrayTy())
+ if (DepSI->getValueOperand()->getType()->isStructTy() ||
+ DepSI->getValueOperand()->getType()->isArrayTy())
return -1;
Value *StorePtr = DepSI->getPointerOperand();
- uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
+ uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType());
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
StorePtr, StoreSize, TD);
}
@@ -1099,7 +818,7 @@ static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
Constant *Src = dyn_cast<Constant>(MTI->getSource());
if (Src == 0) return -1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &TD));
if (GV == 0 || !GV->isConstant()) return -1;
// See if the access is within the bounds of the transfer.
@@ -1331,6 +1050,15 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
if (V->getType()->isPointerTy())
for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
AA->copyValue(LI, NewPHIs[i]);
+
+ // Now that we've copied information to the new PHIs, scan through
+ // them again and inform alias analysis that we've added potentially
+ // escaping uses to any values that are operands to these PHIs.
+ for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) {
+ PHINode *P = NewPHIs[i];
+ for (unsigned ii = 0, ee = P->getNumIncomingValues(); ii != ee; ++ii)
+ AA->addEscapingUse(P->getOperandUse(2*ii));
+ }
return V;
}
@@ -1347,8 +1075,8 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
SmallVectorImpl<Instruction*> &toErase) {
// Find the non-local dependencies of the load.
SmallVector<NonLocalDepResult, 64> Deps;
- MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
- Deps);
+ AliasAnalysis::Location Loc = VN.getAliasAnalysis()->getLocation(LI);
+ MD->getNonLocalPointerDependency(Loc, true, LI->getParent(), Deps);
//DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
// << Deps.size() << *LI << '\n');
@@ -1376,8 +1104,6 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
SmallVector<BasicBlock*, 16> UnavailableBlocks;
- const TargetData *TD = 0;
-
for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
BasicBlock *DepBB = Deps[i].getBB();
MemDepResult DepInfo = Deps[i].getResult();
@@ -1392,14 +1118,12 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
// read by the load, we can extract the bits we need for the load from the
// stored value.
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
- if (TD == 0)
- TD = getAnalysisIfAvailable<TargetData>();
if (TD && Address) {
int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
DepSI, *TD);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
- DepSI->getOperand(0),
+ DepSI->getValueOperand(),
Offset));
continue;
}
@@ -1409,8 +1133,6 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
- if (TD == 0)
- TD = getAnalysisIfAvailable<TargetData>();
if (TD && Address) {
int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
DepMI, *TD);
@@ -1440,13 +1162,10 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to.
- if (S->getOperand(0)->getType() != LI->getType()) {
- if (TD == 0)
- TD = getAnalysisIfAvailable<TargetData>();
-
+ if (S->getValueOperand()->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
+ if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
LI->getType(), *TD)) {
UnavailableBlocks.push_back(DepBB);
continue;
@@ -1454,16 +1173,13 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
}
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
- S->getOperand(0)));
+ S->getValueOperand()));
continue;
}
if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
// If the types mismatch and we can't handle it, reject reuse of the load.
if (LD->getType() != LI->getType()) {
- if (TD == 0)
- TD = getAnalysisIfAvailable<TargetData>();
-
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
@@ -1533,26 +1249,19 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
return false;
if (Blockers.count(TmpBB))
return false;
+
+ // If any of these blocks has more than one successor (i.e. if the edge we
+ // just traversed was critical), then there are other paths through this
+ // block along which the load may not be anticipated. Hoisting the load
+ // above this block would be adding the load to execution paths along
+ // which it was not previously executed.
if (TmpBB->getTerminator()->getNumSuccessors() != 1)
- allSingleSucc = false;
+ return false;
}
assert(TmpBB);
LoadBB = TmpBB;
- // If we have a repl set with LI itself in it, this means we have a loop where
- // at least one of the values is LI. Since this means that we won't be able
- // to eliminate LI even if we insert uses in the other predecessors, we will
- // end up increasing code size. Reject this by scanning for LI.
- for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
- if (ValuesPerBlock[i].isSimpleValue() &&
- ValuesPerBlock[i].getSimpleValue() == LI) {
- // Skip cases where LI is the only definition, even for EnableFullLoadPRE.
- if (!EnableFullLoadPRE || e == 1)
- return false;
- }
- }
-
// FIXME: It is extremely unclear what this loop is doing, other than
// artificially restricting loadpre.
if (isSinglePred) {
@@ -1612,14 +1321,13 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
unsigned NumUnavailablePreds = PredLoads.size();
assert(NumUnavailablePreds != 0 &&
"Fully available value should be eliminated above!");
- if (!EnableFullLoadPRE) {
- // If this load is unavailable in multiple predecessors, reject it.
- // FIXME: If we could restructure the CFG, we could make a common pred with
- // all the preds that don't have an available LI and insert a new load into
- // that one block.
- if (NumUnavailablePreds != 1)
+
+ // If this load is unavailable in multiple predecessors, reject it.
+ // FIXME: If we could restructure the CFG, we could make a common pred with
+ // all the preds that don't have an available LI and insert a new load into
+ // that one block.
+ if (NumUnavailablePreds != 1)
return false;
- }
// Check if the load can safely be moved to all the unavailable predecessors.
bool CanDoPRE = true;
@@ -1634,7 +1342,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
// If all preds have a single successor, then we know it is safe to insert
// the load on the pred (?!?), so we can insert code to materialize the
// pointer if it is not available.
- PHITransAddr Address(LI->getOperand(0), TD);
+ PHITransAddr Address(LI->getPointerOperand(), TD);
Value *LoadPtr = 0;
if (allSingleSucc) {
LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
@@ -1648,7 +1356,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
// we fail PRE.
if (LoadPtr == 0) {
DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
- << *LI->getOperand(0) << "\n");
+ << *LI->getPointerOperand() << "\n");
CanDoPRE = false;
break;
}
@@ -1657,8 +1365,8 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
// @1 = getelementptr (i8* p, ...
// test p and branch if == 0
// load @1
- // It is valid to have the getelementptr before the test, even if p can be 0,
- // as getelementptr only does address arithmetic.
+ // It is valid to have the getelementptr before the test, even if p can
+ // be 0, as getelementptr only does address arithmetic.
// If we are not pushing the value through any multiple-successor blocks
// we do not have this case. Otherwise, check that the load is safe to
// put anywhere; this can be improved, but should be conservatively safe.
@@ -1675,8 +1383,11 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
}
if (!CanDoPRE) {
- while (!NewInsts.empty())
- NewInsts.pop_back_val()->eraseFromParent();
+ while (!NewInsts.empty()) {
+ Instruction *I = NewInsts.pop_back_val();
+ if (MD) MD->removeInstruction(I);
+ I->eraseFromParent();
+ }
return false;
}
@@ -1702,9 +1413,13 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
BasicBlock *UnavailablePred = I->first;
Value *LoadPtr = I->second;
- Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
- LI->getAlignment(),
- UnavailablePred->getTerminator());
+ Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
+ LI->getAlignment(),
+ UnavailablePred->getTerminator());
+
+ // Transfer the old load's TBAA tag to the new load.
+ if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa))
+ NewLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
// Add the newly created load.
ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
@@ -1753,19 +1468,19 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
// access code.
Value *AvailVal = 0;
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
- if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
+ if (TD) {
int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
L->getPointerOperand(),
DepSI, *TD);
if (Offset != -1)
- AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
+ AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
L->getType(), L, *TD);
}
// If the clobbering value is a memset/memcpy/memmove, see if we can forward
// a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
- if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
+ if (TD) {
int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
L->getPointerOperand(),
DepMI, *TD);
@@ -1804,14 +1519,13 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
Instruction *DepInst = Dep.getInst();
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
- Value *StoredVal = DepSI->getOperand(0);
+ Value *StoredVal = DepSI->getValueOperand();
// The store and load are to a must-aliased pointer, but they may not
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
- const TargetData *TD = 0;
if (StoredVal->getType() != L->getType()) {
- if ((TD = getAnalysisIfAvailable<TargetData>())) {
+ if (TD) {
StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
L, *TD);
if (StoredVal == 0)
@@ -1840,9 +1554,8 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
// The loads are of a must-aliased pointer, but they may not actually have
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
- const TargetData *TD = 0;
if (DepLI->getType() != L->getType()) {
- if ((TD = getAnalysisIfAvailable<TargetData>())) {
+ if (TD) {
AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
if (AvailableVal == 0)
return false;
@@ -1890,20 +1603,32 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
return false;
}
-Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
- DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
- if (I == localAvail.end())
- return 0;
-
- ValueNumberScope *Locals = I->second;
- while (Locals) {
- DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
- if (I != Locals->table.end())
- return I->second;
- Locals = Locals->parent;
+// findLeader - In order to find a leader for a given value number at a
+// specific basic block, we first obtain the list of all Values for that number,
+// and then scan the list to find one whose block dominates the block in
+// question. This is fast because dominator tree queries consist of only
+// a few comparisons of DFS numbers.
+Value *GVN::findLeader(BasicBlock *BB, uint32_t num) {
+ LeaderTableEntry Vals = LeaderTable[num];
+ if (!Vals.Val) return 0;
+
+ Value *Val = 0;
+ if (DT->dominates(Vals.BB, BB)) {
+ Val = Vals.Val;
+ if (isa<Constant>(Val)) return Val;
+ }
+
+ LeaderTableEntry* Next = Vals.Next;
+ while (Next) {
+ if (DT->dominates(Next->BB, BB)) {
+ if (isa<Constant>(Next->Val)) return Next->Val;
+ if (!Val) Val = Next->Val;
+ }
+
+ Next = Next->Next;
}
- return 0;
+ return Val;
}
@@ -1915,85 +1640,92 @@ bool GVN::processInstruction(Instruction *I,
if (isa<DbgInfoIntrinsic>(I))
return false;
+ // If the instruction can be easily simplified then do so now in preference
+ // to value numbering it. Value numbering often exposes redundancies, for
+ // example if it determines that %y is equal to %x then the instruction
+ // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
+ if (Value *V = SimplifyInstruction(I, TD, DT)) {
+ I->replaceAllUsesWith(V);
+ if (MD && V->getType()->isPointerTy())
+ MD->invalidateCachedPointerInfo(V);
+ VN.erase(I);
+ toErase.push_back(I);
+ return true;
+ }
+
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
bool Changed = processLoad(LI, toErase);
if (!Changed) {
unsigned Num = VN.lookup_or_add(LI);
- localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
+ addToLeaderTable(Num, LI, LI->getParent());
}
return Changed;
}
- uint32_t NextNum = VN.getNextUnusedValueNumber();
- unsigned Num = VN.lookup_or_add(I);
-
+ // For conditions branches, we can perform simple conditional propagation on
+ // the condition value itself.
if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
- localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
-
if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
return false;
-
+
Value *BranchCond = BI->getCondition();
uint32_t CondVN = VN.lookup_or_add(BranchCond);
-
+
BasicBlock *TrueSucc = BI->getSuccessor(0);
BasicBlock *FalseSucc = BI->getSuccessor(1);
-
+
if (TrueSucc->getSinglePredecessor())
- localAvail[TrueSucc]->table[CondVN] =
- ConstantInt::getTrue(TrueSucc->getContext());
+ addToLeaderTable(CondVN,
+ ConstantInt::getTrue(TrueSucc->getContext()),
+ TrueSucc);
if (FalseSucc->getSinglePredecessor())
- localAvail[FalseSucc]->table[CondVN] =
- ConstantInt::getFalse(TrueSucc->getContext());
-
+ addToLeaderTable(CondVN,
+ ConstantInt::getFalse(TrueSucc->getContext()),
+ FalseSucc);
+
return false;
+ }
+
+ // Instructions with void type don't return a value, so there's
+ // no point in trying to find redudancies in them.
+ if (I->getType()->isVoidTy()) return false;
+
+ uint32_t NextNum = VN.getNextUnusedValueNumber();
+ unsigned Num = VN.lookup_or_add(I);
// Allocations are always uniquely numbered, so we can save time and memory
// by fast failing them.
- } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
- localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
+ if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) {
+ addToLeaderTable(Num, I, I->getParent());
return false;
}
- // Collapse PHI nodes
- if (PHINode* p = dyn_cast<PHINode>(I)) {
- Value *constVal = CollapsePhi(p);
-
- if (constVal) {
- p->replaceAllUsesWith(constVal);
- if (MD && constVal->getType()->isPointerTy())
- MD->invalidateCachedPointerInfo(constVal);
- VN.erase(p);
-
- toErase.push_back(p);
- } else {
- localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
- }
-
// If the number we were assigned was a brand new VN, then we don't
// need to do a lookup to see if the number already exists
// somewhere in the domtree: it can't!
- } else if (Num == NextNum) {
- localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
-
+ if (Num == NextNum) {
+ addToLeaderTable(Num, I, I->getParent());
+ return false;
+ }
+
// Perform fast-path value-number based elimination of values inherited from
// dominators.
- } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
- // Remove it!
- VN.erase(I);
- I->replaceAllUsesWith(repl);
- if (MD && repl->getType()->isPointerTy())
- MD->invalidateCachedPointerInfo(repl);
- toErase.push_back(I);
- return true;
-
- } else {
- localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
+ Value *repl = findLeader(I->getParent(), Num);
+ if (repl == 0) {
+ // Failure, just remember this instance for future use.
+ addToLeaderTable(Num, I, I->getParent());
+ return false;
}
-
- return false;
+
+ // Remove it!
+ VN.erase(I);
+ I->replaceAllUsesWith(repl);
+ if (MD && repl->getType()->isPointerTy())
+ MD->invalidateCachedPointerInfo(repl);
+ toErase.push_back(I);
+ return true;
}
/// runOnFunction - This is the main transformation entry point for a function.
@@ -2001,6 +1733,7 @@ bool GVN::runOnFunction(Function& F) {
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
+ TD = getAnalysisIfAvailable<TargetData>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
VN.setDomTree(DT);
@@ -2011,8 +1744,8 @@ bool GVN::runOnFunction(Function& F) {
// Merge unconditional branches, allowing PRE to catch more
// optimization opportunities.
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
- BasicBlock *BB = FI;
- ++FI;
+ BasicBlock *BB = FI++;
+
bool removedBlock = MergeBlockIntoPredecessor(BB, this);
if (removedBlock) ++NumGVNBlocks;
@@ -2020,7 +1753,6 @@ bool GVN::runOnFunction(Function& F) {
}
unsigned Iteration = 0;
-
while (ShouldContinue) {
DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
ShouldContinue = iterateOnFunction(F);
@@ -2138,20 +1870,19 @@ bool GVN::performPRE(Function &F) {
if (P == CurrentBlock) {
NumWithout = 2;
break;
- } else if (!localAvail.count(P)) {
+ } else if (!DT->dominates(&F.getEntryBlock(), P)) {
NumWithout = 2;
break;
}
- DenseMap<uint32_t, Value*>::iterator predV =
- localAvail[P]->table.find(ValNo);
- if (predV == localAvail[P]->table.end()) {
+ Value* predV = findLeader(P, ValNo);
+ if (predV == 0) {
PREPred = P;
++NumWithout;
- } else if (predV->second == CurInst) {
+ } else if (predV == CurInst) {
NumWithout = 2;
} else {
- predMap[P] = predV->second;
+ predMap[P] = predV;
++NumWith;
}
}
@@ -2186,7 +1917,7 @@ bool GVN::performPRE(Function &F) {
if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
continue;
- if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
+ if (Value *V = findLeader(PREPred, VN.lookup(Op))) {
PREInstr->setOperand(i, V);
} else {
success = false;
@@ -2210,7 +1941,7 @@ bool GVN::performPRE(Function &F) {
++NumGVNPRE;
// Update the availability map to include the new instruction.
- localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
+ addToLeaderTable(ValNo, PREInstr, PREPred);
// Create a PHI to make the value available in this block.
PHINode* Phi = PHINode::Create(CurInst->getType(),
@@ -2223,12 +1954,21 @@ bool GVN::performPRE(Function &F) {
}
VN.add(Phi, ValNo);
- localAvail[CurrentBlock]->table[ValNo] = Phi;
+ addToLeaderTable(ValNo, Phi, CurrentBlock);
CurInst->replaceAllUsesWith(Phi);
- if (MD && Phi->getType()->isPointerTy())
- MD->invalidateCachedPointerInfo(Phi);
+ if (Phi->getType()->isPointerTy()) {
+ // Because we have added a PHI-use of the pointer value, it has now
+ // "escaped" from alias analysis' perspective. We need to inform
+ // AA of this.
+ for (unsigned ii = 0, ee = Phi->getNumIncomingValues(); ii != ee; ++ii)
+ VN.getAliasAnalysis()->addEscapingUse(Phi->getOperandUse(2*ii));
+
+ if (MD)
+ MD->invalidateCachedPointerInfo(Phi);
+ }
VN.erase(CurInst);
+ removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
if (MD) MD->removeInstruction(CurInst);
@@ -2260,16 +2000,7 @@ bool GVN::splitCriticalEdges() {
/// iterateOnFunction - Executes one iteration of GVN
bool GVN::iterateOnFunction(Function &F) {
cleanupGlobalSets();
-
- for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
- DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
- if (DI->getIDom())
- localAvail[DI->getBlock()] =
- new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
- else
- localAvail[DI->getBlock()] = new ValueNumberScope(0);
- }
-
+
// Top-down walk of the dominator tree
bool Changed = false;
#if 0
@@ -2289,11 +2020,8 @@ bool GVN::iterateOnFunction(Function &F) {
void GVN::cleanupGlobalSets() {
VN.clear();
-
- for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
- I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
- delete I->second;
- localAvail.clear();
+ LeaderTable.clear();
+ TableAllocator.Reset();
}
/// verifyRemoved - Verify that the specified instruction does not occur in our
@@ -2303,17 +2031,14 @@ void GVN::verifyRemoved(const Instruction *Inst) const {
// Walk through the value number scope to make sure the instruction isn't
// ferreted away in it.
- for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
- I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
- const ValueNumberScope *VNS = I->second;
-
- while (VNS) {
- for (DenseMap<uint32_t, Value*>::const_iterator
- II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
- assert(II->second != Inst && "Inst still in value numbering scope!");
- }
-
- VNS = VNS->parent;
+ for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator
+ I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) {
+ const LeaderTableEntry *Node = &I->second;
+ assert(Node->Val != Inst && "Inst still in value numbering scope!");
+
+ while (Node->Next) {
+ Node = Node->Next;
+ assert(Node->Val != Inst && "Inst still in value numbering scope!");
}
}
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index af2eafc..0fb6798 100644
--- a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -77,7 +77,9 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- IndVarSimplify() : LoopPass(ID) {}
+ IndVarSimplify() : LoopPass(ID) {
+ initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -117,8 +119,16 @@ namespace {
}
char IndVarSimplify::ID = 0;
-INITIALIZE_PASS(IndVarSimplify, "indvars",
- "Canonicalize Induction Variables", false, false);
+INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars",
+ "Canonicalize Induction Variables", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_DEPENDENCY(IVUsers)
+INITIALIZE_PASS_END(IndVarSimplify, "indvars",
+ "Canonicalize Induction Variables", false, false)
Pass *llvm::createIndVarSimplifyPass() {
return new IndVarSimplify();
@@ -190,7 +200,7 @@ ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L,
}
// Expand the code for the iteration count.
- assert(RHS->isLoopInvariant(L) &&
+ assert(SE->isLoopInvariant(RHS, L) &&
"Computed iteration count is not loop invariant!");
Value *ExitCnt = Rewriter.expandCodeFor(RHS, IndVar->getType(), BI);
@@ -233,8 +243,7 @@ ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L,
/// happen later, except that it's more powerful in some cases, because it's
/// able to brute-force evaluate arbitrary instructions as long as they have
/// constant operands at the beginning of the loop.
-void IndVarSimplify::RewriteLoopExitValues(Loop *L,
- SCEVExpander &Rewriter) {
+void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
// Verify the input to the pass in already in LCSSA form.
assert(L->isLCSSAForm(*DT));
@@ -292,7 +301,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L,
// and varies predictably *inside* the loop. Evaluate the value it
// contains when the loop exits, if possible.
const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop());
- if (!ExitValue->isLoopInvariant(L))
+ if (!SE->isLoopInvariant(ExitValue, L))
continue;
Changed = true;
@@ -338,7 +347,7 @@ void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) {
// If there are, change them into integer recurrences, permitting analysis by
// the SCEV routines.
//
- BasicBlock *Header = L->getHeader();
+ BasicBlock *Header = L->getHeader();
SmallVector<WeakVH, 8> PHIs;
for (BasicBlock::iterator I = Header->begin();
@@ -346,7 +355,7 @@ void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) {
PHIs.push_back(PN);
for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
- if (PHINode *PN = dyn_cast_or_null<PHINode>(PHIs[i]))
+ if (PHINode *PN = dyn_cast_or_null<PHINode>(&*PHIs[i]))
HandleFloatingPointIV(L, PN);
// If the loop previously had floating-point IV, ScalarEvolution
@@ -395,7 +404,7 @@ void IndVarSimplify::EliminateIVComparisons() {
// which are now dead.
while (!DeadInsts.empty())
if (Instruction *Inst =
- dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()))
+ dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
RecursivelyDeleteTriviallyDeadInstructions(Inst);
}
@@ -462,7 +471,7 @@ void IndVarSimplify::EliminateIVRemainders() {
// which are now dead.
while (!DeadInsts.empty())
if (Instruction *Inst =
- dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()))
+ dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
RecursivelyDeleteTriviallyDeadInstructions(Inst);
}
@@ -607,9 +616,9 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// currently can only reduce affine polynomials. For now just disable
// indvar subst on anything more complex than an affine addrec, unless
// it can be expanded to a trivial value.
-static bool isSafe(const SCEV *S, const Loop *L) {
+static bool isSafe(const SCEV *S, const Loop *L, ScalarEvolution *SE) {
// Loop-invariant values are safe.
- if (S->isLoopInvariant(L)) return true;
+ if (SE->isLoopInvariant(S, L)) return true;
// Affine addrecs are safe. Non-affine are not, because LSR doesn't know how
// to transform them into efficient code.
@@ -620,18 +629,18 @@ static bool isSafe(const SCEV *S, const Loop *L) {
if (const SCEVCommutativeExpr *Commutative = dyn_cast<SCEVCommutativeExpr>(S)) {
for (SCEVCommutativeExpr::op_iterator I = Commutative->op_begin(),
E = Commutative->op_end(); I != E; ++I)
- if (!isSafe(*I, L)) return false;
+ if (!isSafe(*I, L, SE)) return false;
return true;
}
// A cast is safe if its operand is.
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
- return isSafe(C->getOperand(), L);
+ return isSafe(C->getOperand(), L, SE);
// A udiv is safe if its operands are.
if (const SCEVUDivExpr *UD = dyn_cast<SCEVUDivExpr>(S))
- return isSafe(UD->getLHS(), L) &&
- isSafe(UD->getRHS(), L);
+ return isSafe(UD->getLHS(), L, SE) &&
+ isSafe(UD->getRHS(), L, SE);
// SCEVUnknown is always safe.
if (isa<SCEVUnknown>(S))
@@ -662,7 +671,7 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
// Evaluate the expression out of the loop, if possible.
if (!L->contains(UI->getUser())) {
const SCEV *ExitVal = SE->getSCEVAtScope(AR, L->getParentLoop());
- if (ExitVal->isLoopInvariant(L))
+ if (SE->isLoopInvariant(ExitVal, L))
AR = ExitVal;
}
@@ -672,7 +681,7 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
// currently can only reduce affine polynomials. For now just disable
// indvar subst on anything more complex than an affine addrec, unless
// it can be expanded to a trivial value.
- if (!isSafe(AR, L))
+ if (!isSafe(AR, L, SE))
continue;
// Determine the insertion point for this user. By default, insert
@@ -725,7 +734,7 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
// which are now dead.
while (!DeadInsts.empty())
if (Instruction *Inst =
- dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()))
+ dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
RecursivelyDeleteTriviallyDeadInstructions(Inst);
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 104d5ae..90094a8 100644
--- a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -40,20 +40,22 @@ STATISTIC(NumFolds, "Number of terminators folded");
STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi");
static cl::opt<unsigned>
-Threshold("jump-threading-threshold",
+Threshold("jump-threading-threshold",
cl::desc("Max block size to duplicate for jump threading"),
cl::init(6), cl::Hidden);
-// Turn on use of LazyValueInfo.
-static cl::opt<bool>
-EnableLVI("enable-jump-threading-lvi",
- cl::desc("Use LVI for jump threading"),
- cl::init(true),
- cl::ReallyHidden);
-
-
-
namespace {
+ // These are at global scope so static functions can use them too.
+ typedef SmallVectorImpl<std::pair<Constant*, BasicBlock*> > PredValueInfo;
+ typedef SmallVector<std::pair<Constant*, BasicBlock*>, 8> PredValueInfoTy;
+
+ // This is used to keep track of what kind of constant we're currently hoping
+ // to find.
+ enum ConstantPreference {
+ WantInteger,
+ WantBlockAddress
+ };
+
/// This pass performs 'jump threading', which looks at blocks that have
/// multiple predecessors and multiple successors. If one or more of the
/// predecessors of the block can be proven to always jump to one of the
@@ -79,61 +81,59 @@ namespace {
SmallSet<AssertingVH<BasicBlock>, 16> LoopHeaders;
#endif
DenseSet<std::pair<Value*, BasicBlock*> > RecursionSet;
-
+
// RAII helper for updating the recursion stack.
struct RecursionSetRemover {
DenseSet<std::pair<Value*, BasicBlock*> > &TheSet;
std::pair<Value*, BasicBlock*> ThePair;
-
+
RecursionSetRemover(DenseSet<std::pair<Value*, BasicBlock*> > &S,
std::pair<Value*, BasicBlock*> P)
: TheSet(S), ThePair(P) { }
-
+
~RecursionSetRemover() {
TheSet.erase(ThePair);
}
};
public:
static char ID; // Pass identification
- JumpThreading() : FunctionPass(ID) {}
+ JumpThreading() : FunctionPass(ID) {
+ initializeJumpThreadingPass(*PassRegistry::getPassRegistry());
+ }
bool runOnFunction(Function &F);
-
+
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- if (EnableLVI) {
- AU.addRequired<LazyValueInfo>();
- AU.addPreserved<LazyValueInfo>();
- }
+ AU.addRequired<LazyValueInfo>();
+ AU.addPreserved<LazyValueInfo>();
}
-
+
void FindLoopHeaders(Function &F);
bool ProcessBlock(BasicBlock *BB);
bool ThreadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock*> &PredBBs,
BasicBlock *SuccBB);
bool DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
const SmallVectorImpl<BasicBlock *> &PredBBs);
-
- typedef SmallVectorImpl<std::pair<ConstantInt*,
- BasicBlock*> > PredValueInfo;
-
+
bool ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,
- PredValueInfo &Result);
- bool ProcessThreadableEdges(Value *Cond, BasicBlock *BB);
-
-
- bool ProcessBranchOnDuplicateCond(BasicBlock *PredBB, BasicBlock *DestBB);
- bool ProcessSwitchOnDuplicateCond(BasicBlock *PredBB, BasicBlock *DestBB);
+ PredValueInfo &Result,
+ ConstantPreference Preference);
+ bool ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
+ ConstantPreference Preference);
bool ProcessBranchOnPHI(PHINode *PN);
bool ProcessBranchOnXOR(BinaryOperator *BO);
-
+
bool SimplifyPartiallyRedundantLoad(LoadInst *LI);
};
}
char JumpThreading::ID = 0;
-INITIALIZE_PASS(JumpThreading, "jump-threading",
- "Jump Threading", false, false);
+INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",
+ "Jump Threading", false, false)
+INITIALIZE_PASS_DEPENDENCY(LazyValueInfo)
+INITIALIZE_PASS_END(JumpThreading, "jump-threading",
+ "Jump Threading", false, false)
// Public interface to the Jump Threading pass
FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); }
@@ -143,21 +143,21 @@ FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); }
bool JumpThreading::runOnFunction(Function &F) {
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
TD = getAnalysisIfAvailable<TargetData>();
- LVI = EnableLVI ? &getAnalysis<LazyValueInfo>() : 0;
-
+ LVI = &getAnalysis<LazyValueInfo>();
+
FindLoopHeaders(F);
-
+
bool Changed, EverChanged = false;
do {
Changed = false;
for (Function::iterator I = F.begin(), E = F.end(); I != E;) {
BasicBlock *BB = I;
- // Thread all of the branches we can over this block.
+ // Thread all of the branches we can over this block.
while (ProcessBlock(BB))
Changed = true;
-
+
++I;
-
+
// If the block is trivially dead, zap it. This eliminates the successor
// edges which simplifies the CFG.
if (pred_begin(BB) == pred_end(BB) &&
@@ -165,48 +165,46 @@ bool JumpThreading::runOnFunction(Function &F) {
DEBUG(dbgs() << " JT: Deleting dead block '" << BB->getName()
<< "' with terminator: " << *BB->getTerminator() << '\n');
LoopHeaders.erase(BB);
- if (LVI) LVI->eraseBlock(BB);
+ LVI->eraseBlock(BB);
DeleteDeadBlock(BB);
Changed = true;
- } else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
- // Can't thread an unconditional jump, but if the block is "almost
- // empty", we can replace uses of it with uses of the successor and make
- // this dead.
- if (BI->isUnconditional() &&
- BB != &BB->getParent()->getEntryBlock()) {
- BasicBlock::iterator BBI = BB->getFirstNonPHI();
- // Ignore dbg intrinsics.
- while (isa<DbgInfoIntrinsic>(BBI))
- ++BBI;
+ continue;
+ }
+
+ BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
+
+ // Can't thread an unconditional jump, but if the block is "almost
+ // empty", we can replace uses of it with uses of the successor and make
+ // this dead.
+ if (BI && BI->isUnconditional() &&
+ BB != &BB->getParent()->getEntryBlock() &&
// If the terminator is the only non-phi instruction, try to nuke it.
- if (BBI->isTerminator()) {
- // Since TryToSimplifyUncondBranchFromEmptyBlock may delete the
- // block, we have to make sure it isn't in the LoopHeaders set. We
- // reinsert afterward if needed.
- bool ErasedFromLoopHeaders = LoopHeaders.erase(BB);
- BasicBlock *Succ = BI->getSuccessor(0);
-
- // FIXME: It is always conservatively correct to drop the info
- // for a block even if it doesn't get erased. This isn't totally
- // awesome, but it allows us to use AssertingVH to prevent nasty
- // dangling pointer issues within LazyValueInfo.
- if (LVI) LVI->eraseBlock(BB);
- if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) {
- Changed = true;
- // If we deleted BB and BB was the header of a loop, then the
- // successor is now the header of the loop.
- BB = Succ;
- }
-
- if (ErasedFromLoopHeaders)
- LoopHeaders.insert(BB);
- }
+ BB->getFirstNonPHIOrDbg()->isTerminator()) {
+ // Since TryToSimplifyUncondBranchFromEmptyBlock may delete the
+ // block, we have to make sure it isn't in the LoopHeaders set. We
+ // reinsert afterward if needed.
+ bool ErasedFromLoopHeaders = LoopHeaders.erase(BB);
+ BasicBlock *Succ = BI->getSuccessor(0);
+
+ // FIXME: It is always conservatively correct to drop the info
+ // for a block even if it doesn't get erased. This isn't totally
+ // awesome, but it allows us to use AssertingVH to prevent nasty
+ // dangling pointer issues within LazyValueInfo.
+ LVI->eraseBlock(BB);
+ if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) {
+ Changed = true;
+ // If we deleted BB and BB was the header of a loop, then the
+ // successor is now the header of the loop.
+ BB = Succ;
}
+
+ if (ErasedFromLoopHeaders)
+ LoopHeaders.insert(BB);
}
}
EverChanged |= Changed;
} while (Changed);
-
+
LoopHeaders.clear();
return EverChanged;
}
@@ -216,25 +214,25 @@ bool JumpThreading::runOnFunction(Function &F) {
static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB) {
/// Ignore PHI nodes, these will be flattened when duplication happens.
BasicBlock::const_iterator I = BB->getFirstNonPHI();
-
+
// FIXME: THREADING will delete values that are just used to compute the
// branch, so they shouldn't count against the duplication cost.
-
-
+
+
// Sum up the cost of each instruction until we get to the terminator. Don't
// include the terminator because the copy won't include it.
unsigned Size = 0;
for (; !isa<TerminatorInst>(I); ++I) {
// Debugger intrinsics don't incur code size.
if (isa<DbgInfoIntrinsic>(I)) continue;
-
+
// If this is a pointer->pointer bitcast, it is free.
if (isa<BitCastInst>(I) && I->getType()->isPointerTy())
continue;
-
+
// All other instructions count for at least one unit.
++Size;
-
+
// Calls are more expensive. If they are non-intrinsic calls, we model them
// as having cost of 4. If they are a non-vector intrinsic, we model them
// as having cost of 2 total, and if they are a vector intrinsic, we model
@@ -246,12 +244,16 @@ static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB) {
Size += 1;
}
}
-
+
// Threading through a switch statement is particularly profitable. If this
// block ends in a switch, decrease its cost to make it more likely to happen.
if (isa<SwitchInst>(I))
Size = Size > 6 ? Size-6 : 0;
-
+
+ // The same holds for indirect branches, but slightly more so.
+ if (isa<IndirectBrInst>(I))
+ Size = Size > 8 ? Size-8 : 0;
+
return Size;
}
@@ -273,57 +275,64 @@ static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB) {
void JumpThreading::FindLoopHeaders(Function &F) {
SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges;
FindFunctionBackedges(F, Edges);
-
+
for (unsigned i = 0, e = Edges.size(); i != e; ++i)
LoopHeaders.insert(const_cast<BasicBlock*>(Edges[i].second));
}
-// Helper method for ComputeValueKnownInPredecessors. If Value is a
-// ConstantInt, push it. If it's an undef, push 0. Otherwise, do nothing.
-static void PushConstantIntOrUndef(SmallVectorImpl<std::pair<ConstantInt*,
- BasicBlock*> > &Result,
- Constant *Value, BasicBlock* BB){
- if (ConstantInt *FoldedCInt = dyn_cast<ConstantInt>(Value))
- Result.push_back(std::make_pair(FoldedCInt, BB));
- else if (isa<UndefValue>(Value))
- Result.push_back(std::make_pair((ConstantInt*)0, BB));
+/// getKnownConstant - Helper method to determine if we can thread over a
+/// terminator with the given value as its condition, and if so what value to
+/// use for that. What kind of value this is depends on whether we want an
+/// integer or a block address, but an undef is always accepted.
+/// Returns null if Val is null or not an appropriate constant.
+static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) {
+ if (!Val)
+ return 0;
+
+ // Undef is "known" enough.
+ if (UndefValue *U = dyn_cast<UndefValue>(Val))
+ return U;
+
+ if (Preference == WantBlockAddress)
+ return dyn_cast<BlockAddress>(Val->stripPointerCasts());
+
+ return dyn_cast<ConstantInt>(Val);
}
/// ComputeValueKnownInPredecessors - Given a basic block BB and a value V, see
-/// if we can infer that the value is a known ConstantInt in any of our
-/// predecessors. If so, return the known list of value and pred BB in the
-/// result vector. If a value is known to be undef, it is returned as null.
+/// if we can infer that the value is a known ConstantInt/BlockAddress or undef
+/// in any of our predecessors. If so, return the known list of value and pred
+/// BB in the result vector.
///
/// This returns true if there were any known values.
///
bool JumpThreading::
-ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
+ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result,
+ ConstantPreference Preference) {
// This method walks up use-def chains recursively. Because of this, we could
// get into an infinite loop going around loops in the use-def chain. To
// prevent this, keep track of what (value, block) pairs we've already visited
// and terminate the search if we loop back to them
if (!RecursionSet.insert(std::make_pair(V, BB)).second)
return false;
-
+
// An RAII help to remove this pair from the recursion set once the recursion
// stack pops back out again.
RecursionSetRemover remover(RecursionSet, std::make_pair(V, BB));
-
- // If V is a constantint, then it is known in all predecessors.
- if (isa<ConstantInt>(V) || isa<UndefValue>(V)) {
- ConstantInt *CI = dyn_cast<ConstantInt>(V);
-
+
+ // If V is a constant, then it is known in all predecessors.
+ if (Constant *KC = getKnownConstant(V, Preference)) {
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- Result.push_back(std::make_pair(CI, *PI));
-
+ Result.push_back(std::make_pair(KC, *PI));
+
return true;
}
-
+
// If V is a non-instruction value, or an instruction in a different block,
// then it can't be derived from a PHI.
Instruction *I = dyn_cast<Instruction>(V);
if (I == 0 || I->getParent() != BB) {
-
+
// Okay, if this is a live-in value, see if it has a known value at the end
// of any of our predecessors.
//
@@ -331,82 +340,78 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
/// TODO: Per PR2563, we could infer value range information about a
/// predecessor based on its terminator.
//
- if (LVI) {
- // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if
- // "I" is a non-local compare-with-a-constant instruction. This would be
- // able to handle value inequalities better, for example if the compare is
- // "X < 4" and "X < 3" is known true but "X < 4" itself is not available.
- // Perhaps getConstantOnEdge should be smart enough to do this?
-
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
- BasicBlock *P = *PI;
- // If the value is known by LazyValueInfo to be a constant in a
- // predecessor, use that information to try to thread this block.
- Constant *PredCst = LVI->getConstantOnEdge(V, P, BB);
- if (PredCst == 0 ||
- (!isa<ConstantInt>(PredCst) && !isa<UndefValue>(PredCst)))
- continue;
-
- Result.push_back(std::make_pair(dyn_cast<ConstantInt>(PredCst), P));
- }
-
- return !Result.empty();
+ // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if
+ // "I" is a non-local compare-with-a-constant instruction. This would be
+ // able to handle value inequalities better, for example if the compare is
+ // "X < 4" and "X < 3" is known true but "X < 4" itself is not available.
+ // Perhaps getConstantOnEdge should be smart enough to do this?
+
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ // If the value is known by LazyValueInfo to be a constant in a
+ // predecessor, use that information to try to thread this block.
+ Constant *PredCst = LVI->getConstantOnEdge(V, P, BB);
+ if (Constant *KC = getKnownConstant(PredCst, Preference))
+ Result.push_back(std::make_pair(KC, P));
}
-
- return false;
+
+ return !Result.empty();
}
-
+
/// If I is a PHI node, then we know the incoming values for any constants.
if (PHINode *PN = dyn_cast<PHINode>(I)) {
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *InVal = PN->getIncomingValue(i);
- if (isa<ConstantInt>(InVal) || isa<UndefValue>(InVal)) {
- ConstantInt *CI = dyn_cast<ConstantInt>(InVal);
- Result.push_back(std::make_pair(CI, PN->getIncomingBlock(i)));
- } else if (LVI) {
+ if (Constant *KC = getKnownConstant(InVal, Preference)) {
+ Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i)));
+ } else {
Constant *CI = LVI->getConstantOnEdge(InVal,
PN->getIncomingBlock(i), BB);
- // LVI returns null is no value could be determined.
- if (!CI) continue;
- PushConstantIntOrUndef(Result, CI, PN->getIncomingBlock(i));
+ if (Constant *KC = getKnownConstant(CI, Preference))
+ Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i)));
}
}
-
+
return !Result.empty();
}
-
- SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> LHSVals, RHSVals;
+
+ PredValueInfoTy LHSVals, RHSVals;
// Handle some boolean conditions.
- if (I->getType()->getPrimitiveSizeInBits() == 1) {
+ if (I->getType()->getPrimitiveSizeInBits() == 1) {
+ assert(Preference == WantInteger && "One-bit non-integer type?");
// X | true -> true
// X & false -> false
if (I->getOpcode() == Instruction::Or ||
I->getOpcode() == Instruction::And) {
- ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals);
- ComputeValueKnownInPredecessors(I->getOperand(1), BB, RHSVals);
-
+ ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
+ WantInteger);
+ ComputeValueKnownInPredecessors(I->getOperand(1), BB, RHSVals,
+ WantInteger);
+
if (LHSVals.empty() && RHSVals.empty())
return false;
-
+
ConstantInt *InterestingVal;
if (I->getOpcode() == Instruction::Or)
InterestingVal = ConstantInt::getTrue(I->getContext());
else
InterestingVal = ConstantInt::getFalse(I->getContext());
-
+
SmallPtrSet<BasicBlock*, 4> LHSKnownBBs;
-
+
// Scan for the sentinel. If we find an undef, force it to the
// interesting value: x|undef -> true and x&undef -> false.
for (unsigned i = 0, e = LHSVals.size(); i != e; ++i)
- if (LHSVals[i].first == InterestingVal || LHSVals[i].first == 0) {
+ if (LHSVals[i].first == InterestingVal ||
+ isa<UndefValue>(LHSVals[i].first)) {
Result.push_back(LHSVals[i]);
Result.back().first = InterestingVal;
LHSKnownBBs.insert(LHSVals[i].second);
}
for (unsigned i = 0, e = RHSVals.size(); i != e; ++i)
- if (RHSVals[i].first == InterestingVal || RHSVals[i].first == 0) {
+ if (RHSVals[i].first == InterestingVal ||
+ isa<UndefValue>(RHSVals[i].first)) {
// If we already inferred a value for this block on the LHS, don't
// re-add it.
if (!LHSKnownBBs.count(RHSVals[i].second)) {
@@ -414,48 +419,51 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
Result.back().first = InterestingVal;
}
}
-
+
return !Result.empty();
}
-
+
// Handle the NOT form of XOR.
if (I->getOpcode() == Instruction::Xor &&
isa<ConstantInt>(I->getOperand(1)) &&
cast<ConstantInt>(I->getOperand(1))->isOne()) {
- ComputeValueKnownInPredecessors(I->getOperand(0), BB, Result);
+ ComputeValueKnownInPredecessors(I->getOperand(0), BB, Result,
+ WantInteger);
if (Result.empty())
return false;
// Invert the known values.
for (unsigned i = 0, e = Result.size(); i != e; ++i)
- if (Result[i].first)
- Result[i].first =
- cast<ConstantInt>(ConstantExpr::getNot(Result[i].first));
-
+ Result[i].first = ConstantExpr::getNot(Result[i].first);
+
return true;
}
-
+
// Try to simplify some other binary operator values.
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
+ assert(Preference != WantBlockAddress
+ && "A binary operator creating a block address?");
if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
- SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> LHSVals;
- ComputeValueKnownInPredecessors(BO->getOperand(0), BB, LHSVals);
-
+ PredValueInfoTy LHSVals;
+ ComputeValueKnownInPredecessors(BO->getOperand(0), BB, LHSVals,
+ WantInteger);
+
// Try to use constant folding to simplify the binary operator.
for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) {
- Constant *V = LHSVals[i].first ? LHSVals[i].first :
- cast<Constant>(UndefValue::get(BO->getType()));
+ Constant *V = LHSVals[i].first;
Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI);
-
- PushConstantIntOrUndef(Result, Folded, LHSVals[i].second);
+
+ if (Constant *KC = getKnownConstant(Folded, WantInteger))
+ Result.push_back(std::make_pair(KC, LHSVals[i].second));
}
}
-
+
return !Result.empty();
}
-
+
// Handle compare with phi operand, where the PHI is defined in this block.
if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) {
+ assert(Preference == WantInteger && "Compares only produce integers");
PHINode *PN = dyn_cast<PHINode>(Cmp->getOperand(0));
if (PN && PN->getParent() == BB) {
// We can do this simplification if any comparisons fold to true or false.
@@ -464,32 +472,31 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
BasicBlock *PredBB = PN->getIncomingBlock(i);
Value *LHS = PN->getIncomingValue(i);
Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB);
-
+
Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, TD);
if (Res == 0) {
- if (!LVI || !isa<Constant>(RHS))
+ if (!isa<Constant>(RHS))
continue;
-
- LazyValueInfo::Tristate
+
+ LazyValueInfo::Tristate
ResT = LVI->getPredicateOnEdge(Cmp->getPredicate(), LHS,
cast<Constant>(RHS), PredBB, BB);
if (ResT == LazyValueInfo::Unknown)
continue;
Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT);
}
-
- if (Constant *ConstRes = dyn_cast<Constant>(Res))
- PushConstantIntOrUndef(Result, ConstRes, PredBB);
+
+ if (Constant *KC = getKnownConstant(Res, WantInteger))
+ Result.push_back(std::make_pair(KC, PredBB));
}
-
+
return !Result.empty();
}
-
-
+
+
// If comparing a live-in value against a constant, see if we know the
// live-in value on any predecessors.
- if (LVI && isa<Constant>(Cmp->getOperand(1)) &&
- Cmp->getType()->isIntegerTy()) {
+ if (isa<Constant>(Cmp->getOperand(1)) && Cmp->getType()->isIntegerTy()) {
if (!isa<Instruction>(Cmp->getOperand(0)) ||
cast<Instruction>(Cmp->getOperand(0))->getParent() != BB) {
Constant *RHSCst = cast<Constant>(Cmp->getOperand(1));
@@ -505,44 +512,74 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
continue;
Constant *ResC = ConstantInt::get(Cmp->getType(), Res);
- Result.push_back(std::make_pair(cast<ConstantInt>(ResC), P));
+ Result.push_back(std::make_pair(ResC, P));
}
return !Result.empty();
}
-
+
// Try to find a constant value for the LHS of a comparison,
// and evaluate it statically if we can.
if (Constant *CmpConst = dyn_cast<Constant>(Cmp->getOperand(1))) {
- SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> LHSVals;
- ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals);
-
+ PredValueInfoTy LHSVals;
+ ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
+ WantInteger);
+
for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) {
- Constant *V = LHSVals[i].first ? LHSVals[i].first :
- cast<Constant>(UndefValue::get(CmpConst->getType()));
+ Constant *V = LHSVals[i].first;
Constant *Folded = ConstantExpr::getCompare(Cmp->getPredicate(),
V, CmpConst);
- PushConstantIntOrUndef(Result, Folded, LHSVals[i].second);
+ if (Constant *KC = getKnownConstant(Folded, WantInteger))
+ Result.push_back(std::make_pair(KC, LHSVals[i].second));
}
-
+
return !Result.empty();
}
}
}
-
- if (LVI) {
- // If all else fails, see if LVI can figure out a constant value for us.
- Constant *CI = LVI->getConstant(V, BB);
- ConstantInt *CInt = dyn_cast_or_null<ConstantInt>(CI);
- if (CInt) {
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- Result.push_back(std::make_pair(CInt, *PI));
+
+ if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
+ // Handle select instructions where at least one operand is a known constant
+ // and we can figure out the condition value for any predecessor block.
+ Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference);
+ Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference);
+ PredValueInfoTy Conds;
+ if ((TrueVal || FalseVal) &&
+ ComputeValueKnownInPredecessors(SI->getCondition(), BB, Conds,
+ WantInteger)) {
+ for (unsigned i = 0, e = Conds.size(); i != e; ++i) {
+ Constant *Cond = Conds[i].first;
+
+ // Figure out what value to use for the condition.
+ bool KnownCond;
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) {
+ // A known boolean.
+ KnownCond = CI->isOne();
+ } else {
+ assert(isa<UndefValue>(Cond) && "Unexpected condition value");
+ // Either operand will do, so be sure to pick the one that's a known
+ // constant.
+ // FIXME: Do this more cleverly if both values are known constants?
+ KnownCond = (TrueVal != 0);
+ }
+
+ // See if the select has a known constant value for this predecessor.
+ if (Constant *Val = KnownCond ? TrueVal : FalseVal)
+ Result.push_back(std::make_pair(Val, Conds[i].second));
+ }
+
+ return !Result.empty();
}
-
- return !Result.empty();
}
-
- return false;
+
+ // If all else fails, see if LVI can figure out a constant value for us.
+ Constant *CI = LVI->getConstant(V, BB);
+ if (Constant *KC = getKnownConstant(CI, Preference)) {
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
+ Result.push_back(std::make_pair(KC, *PI));
+ }
+
+ return !Result.empty();
}
@@ -565,10 +602,20 @@ static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) {
if (NumPreds < MinNumPreds)
MinSucc = i;
}
-
+
return MinSucc;
}
+static bool hasAddressTakenAndUsed(BasicBlock *BB) {
+ if (!BB->hasAddressTaken()) return false;
+
+ // If the block has its address taken, it may be a tree of dead constants
+ // hanging off of it. These shouldn't keep the block alive.
+ BlockAddress *BA = BlockAddress::get(BB);
+ BA->removeDeadConstantUsers();
+ return !BA->use_empty();
+}
+
/// ProcessBlock - If there are any predecessors whose control can be threaded
/// through to a successor, transform them now.
bool JumpThreading::ProcessBlock(BasicBlock *BB) {
@@ -577,167 +624,122 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
if (pred_begin(BB) == pred_end(BB) &&
BB != &BB->getParent()->getEntryBlock())
return false;
-
+
// If this block has a single predecessor, and if that pred has a single
// successor, merge the blocks. This encourages recursive jump threading
// because now the condition in this block can be threaded through
// predecessors of our predecessor block.
if (BasicBlock *SinglePred = BB->getSinglePredecessor()) {
if (SinglePred->getTerminator()->getNumSuccessors() == 1 &&
- SinglePred != BB) {
+ SinglePred != BB && !hasAddressTakenAndUsed(BB)) {
// If SinglePred was a loop header, BB becomes one.
if (LoopHeaders.erase(SinglePred))
LoopHeaders.insert(BB);
-
+
// Remember if SinglePred was the entry block of the function. If so, we
// will need to move BB back to the entry position.
bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
- if (LVI) LVI->eraseBlock(SinglePred);
+ LVI->eraseBlock(SinglePred);
MergeBasicBlockIntoOnlyPred(BB);
-
+
if (isEntry && BB != &BB->getParent()->getEntryBlock())
BB->moveBefore(&BB->getParent()->getEntryBlock());
return true;
}
}
- // Look to see if the terminator is a branch of switch, if not we can't thread
- // it.
+ // What kind of constant we're looking for.
+ ConstantPreference Preference = WantInteger;
+
+ // Look to see if the terminator is a conditional branch, switch or indirect
+ // branch, if not we can't thread it.
Value *Condition;
- if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
+ Instruction *Terminator = BB->getTerminator();
+ if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) {
// Can't thread an unconditional jump.
if (BI->isUnconditional()) return false;
Condition = BI->getCondition();
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator()))
+ } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) {
Condition = SI->getCondition();
- else
+ } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) {
+ Condition = IB->getAddress()->stripPointerCasts();
+ Preference = WantBlockAddress;
+ } else {
return false; // Must be an invoke.
-
- // If the terminator of this block is branching on a constant, simplify the
- // terminator to an unconditional branch. This can occur due to threading in
- // other blocks.
- if (isa<ConstantInt>(Condition)) {
- DEBUG(dbgs() << " In block '" << BB->getName()
- << "' folding terminator: " << *BB->getTerminator() << '\n');
- ++NumFolds;
- ConstantFoldTerminator(BB);
- return true;
}
-
+
// If the terminator is branching on an undef, we can pick any of the
// successors to branch to. Let GetBestDestForJumpOnUndef decide.
if (isa<UndefValue>(Condition)) {
unsigned BestSucc = GetBestDestForJumpOnUndef(BB);
-
+
// Fold the branch/switch.
TerminatorInst *BBTerm = BB->getTerminator();
for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) {
if (i == BestSucc) continue;
- RemovePredecessorAndSimplify(BBTerm->getSuccessor(i), BB, TD);
+ BBTerm->getSuccessor(i)->removePredecessor(BB, true);
}
-
+
DEBUG(dbgs() << " In block '" << BB->getName()
<< "' folding undef terminator: " << *BBTerm << '\n');
BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm);
BBTerm->eraseFromParent();
return true;
}
-
- Instruction *CondInst = dyn_cast<Instruction>(Condition);
- // If the condition is an instruction defined in another block, see if a
- // predecessor has the same condition:
- // br COND, BBX, BBY
- // BBX:
- // br COND, BBZ, BBW
- if (!LVI &&
- !Condition->hasOneUse() && // Multiple uses.
- (CondInst == 0 || CondInst->getParent() != BB)) { // Non-local definition.
- pred_iterator PI = pred_begin(BB), E = pred_end(BB);
- if (isa<BranchInst>(BB->getTerminator())) {
- for (; PI != E; ++PI) {
- BasicBlock *P = *PI;
- if (BranchInst *PBI = dyn_cast<BranchInst>(P->getTerminator()))
- if (PBI->isConditional() && PBI->getCondition() == Condition &&
- ProcessBranchOnDuplicateCond(P, BB))
- return true;
- }
- } else {
- assert(isa<SwitchInst>(BB->getTerminator()) && "Unknown jump terminator");
- for (; PI != E; ++PI) {
- BasicBlock *P = *PI;
- if (SwitchInst *PSI = dyn_cast<SwitchInst>(P->getTerminator()))
- if (PSI->getCondition() == Condition &&
- ProcessSwitchOnDuplicateCond(P, BB))
- return true;
- }
- }
+ // If the terminator of this block is branching on a constant, simplify the
+ // terminator to an unconditional branch. This can occur due to threading in
+ // other blocks.
+ if (getKnownConstant(Condition, Preference)) {
+ DEBUG(dbgs() << " In block '" << BB->getName()
+ << "' folding terminator: " << *BB->getTerminator() << '\n');
+ ++NumFolds;
+ ConstantFoldTerminator(BB);
+ return true;
}
+ Instruction *CondInst = dyn_cast<Instruction>(Condition);
+
// All the rest of our checks depend on the condition being an instruction.
if (CondInst == 0) {
// FIXME: Unify this with code below.
- if (LVI && ProcessThreadableEdges(Condition, BB))
+ if (ProcessThreadableEdges(Condition, BB, Preference))
return true;
return false;
- }
-
-
+ }
+
+
if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) {
- if (!LVI &&
- (!isa<PHINode>(CondCmp->getOperand(0)) ||
- cast<PHINode>(CondCmp->getOperand(0))->getParent() != BB)) {
- // If we have a comparison, loop over the predecessors to see if there is
- // a condition with a lexically identical value.
- pred_iterator PI = pred_begin(BB), E = pred_end(BB);
- for (; PI != E; ++PI) {
- BasicBlock *P = *PI;
- if (BranchInst *PBI = dyn_cast<BranchInst>(P->getTerminator()))
- if (PBI->isConditional() && P != BB) {
- if (CmpInst *CI = dyn_cast<CmpInst>(PBI->getCondition())) {
- if (CI->getOperand(0) == CondCmp->getOperand(0) &&
- CI->getOperand(1) == CondCmp->getOperand(1) &&
- CI->getPredicate() == CondCmp->getPredicate()) {
- // TODO: Could handle things like (x != 4) --> (x == 17)
- if (ProcessBranchOnDuplicateCond(P, BB))
- return true;
- }
- }
- }
- }
- }
-
// For a comparison where the LHS is outside this block, it's possible
// that we've branched on it before. Used LVI to see if we can simplify
// the branch based on that.
BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1));
pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
- if (LVI && CondBr && CondConst && CondBr->isConditional() && PI != PE &&
+ if (CondBr && CondConst && CondBr->isConditional() && PI != PE &&
(!isa<Instruction>(CondCmp->getOperand(0)) ||
cast<Instruction>(CondCmp->getOperand(0))->getParent() != BB)) {
// For predecessor edge, determine if the comparison is true or false
// on that edge. If they're all true or all false, we can simplify the
// branch.
// FIXME: We could handle mixed true/false by duplicating code.
- LazyValueInfo::Tristate Baseline =
+ LazyValueInfo::Tristate Baseline =
LVI->getPredicateOnEdge(CondCmp->getPredicate(), CondCmp->getOperand(0),
CondConst, *PI, BB);
if (Baseline != LazyValueInfo::Unknown) {
// Check that all remaining incoming values match the first one.
while (++PI != PE) {
- LazyValueInfo::Tristate Ret = LVI->getPredicateOnEdge(
- CondCmp->getPredicate(),
- CondCmp->getOperand(0),
- CondConst, *PI, BB);
+ LazyValueInfo::Tristate Ret =
+ LVI->getPredicateOnEdge(CondCmp->getPredicate(),
+ CondCmp->getOperand(0), CondConst, *PI, BB);
if (Ret != Baseline) break;
}
-
+
// If we terminated early, then one of the values didn't match.
if (PI == PE) {
unsigned ToRemove = Baseline == LazyValueInfo::True ? 1 : 0;
unsigned ToKeep = Baseline == LazyValueInfo::True ? 0 : 1;
- RemovePredecessorAndSimplify(CondBr->getSuccessor(ToRemove), BB, TD);
+ CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true);
BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
CondBr->eraseFromParent();
return true;
@@ -755,174 +757,37 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue))
if (isa<Constant>(CondCmp->getOperand(1)))
SimplifyValue = CondCmp->getOperand(0);
-
+
// TODO: There are other places where load PRE would be profitable, such as
// more complex comparisons.
if (LoadInst *LI = dyn_cast<LoadInst>(SimplifyValue))
if (SimplifyPartiallyRedundantLoad(LI))
return true;
-
-
+
+
// Handle a variety of cases where we are branching on something derived from
// a PHI node in the current block. If we can prove that any predecessors
// compute a predictable value based on a PHI node, thread those predecessors.
//
- if (ProcessThreadableEdges(CondInst, BB))
+ if (ProcessThreadableEdges(CondInst, BB, Preference))
return true;
-
+
// If this is an otherwise-unfoldable branch on a phi node in the current
// block, see if we can simplify.
if (PHINode *PN = dyn_cast<PHINode>(CondInst))
if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
return ProcessBranchOnPHI(PN);
-
-
+
+
// If this is an otherwise-unfoldable branch on a XOR, see if we can simplify.
if (CondInst->getOpcode() == Instruction::Xor &&
CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
return ProcessBranchOnXOR(cast<BinaryOperator>(CondInst));
-
-
- // TODO: If we have: "br (X > 0)" and we have a predecessor where we know
- // "(X == 4)", thread through this block.
-
- return false;
-}
-/// ProcessBranchOnDuplicateCond - We found a block and a predecessor of that
-/// block that jump on exactly the same condition. This means that we almost
-/// always know the direction of the edge in the DESTBB:
-/// PREDBB:
-/// br COND, DESTBB, BBY
-/// DESTBB:
-/// br COND, BBZ, BBW
-///
-/// If DESTBB has multiple predecessors, we can't just constant fold the branch
-/// in DESTBB, we have to thread over it.
-bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB,
- BasicBlock *BB) {
- BranchInst *PredBI = cast<BranchInst>(PredBB->getTerminator());
-
- // If both successors of PredBB go to DESTBB, we don't know anything. We can
- // fold the branch to an unconditional one, which allows other recursive
- // simplifications.
- bool BranchDir;
- if (PredBI->getSuccessor(1) != BB)
- BranchDir = true;
- else if (PredBI->getSuccessor(0) != BB)
- BranchDir = false;
- else {
- DEBUG(dbgs() << " In block '" << PredBB->getName()
- << "' folding terminator: " << *PredBB->getTerminator() << '\n');
- ++NumFolds;
- ConstantFoldTerminator(PredBB);
- return true;
- }
-
- BranchInst *DestBI = cast<BranchInst>(BB->getTerminator());
- // If the dest block has one predecessor, just fix the branch condition to a
- // constant and fold it.
- if (BB->getSinglePredecessor()) {
- DEBUG(dbgs() << " In block '" << BB->getName()
- << "' folding condition to '" << BranchDir << "': "
- << *BB->getTerminator() << '\n');
- ++NumFolds;
- Value *OldCond = DestBI->getCondition();
- DestBI->setCondition(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
- BranchDir));
- // Delete dead instructions before we fold the branch. Folding the branch
- // can eliminate edges from the CFG which can end up deleting OldCond.
- RecursivelyDeleteTriviallyDeadInstructions(OldCond);
- ConstantFoldTerminator(BB);
- return true;
- }
-
-
- // Next, figure out which successor we are threading to.
- BasicBlock *SuccBB = DestBI->getSuccessor(!BranchDir);
-
- SmallVector<BasicBlock*, 2> Preds;
- Preds.push_back(PredBB);
-
- // Ok, try to thread it!
- return ThreadEdge(BB, Preds, SuccBB);
-}
-
-/// ProcessSwitchOnDuplicateCond - We found a block and a predecessor of that
-/// block that switch on exactly the same condition. This means that we almost
-/// always know the direction of the edge in the DESTBB:
-/// PREDBB:
-/// switch COND [... DESTBB, BBY ... ]
-/// DESTBB:
-/// switch COND [... BBZ, BBW ]
-///
-/// Optimizing switches like this is very important, because simplifycfg builds
-/// switches out of repeated 'if' conditions.
-bool JumpThreading::ProcessSwitchOnDuplicateCond(BasicBlock *PredBB,
- BasicBlock *DestBB) {
- // Can't thread edge to self.
- if (PredBB == DestBB)
- return false;
-
- SwitchInst *PredSI = cast<SwitchInst>(PredBB->getTerminator());
- SwitchInst *DestSI = cast<SwitchInst>(DestBB->getTerminator());
-
- // There are a variety of optimizations that we can potentially do on these
- // blocks: we order them from most to least preferable.
-
- // If DESTBB *just* contains the switch, then we can forward edges from PREDBB
- // directly to their destination. This does not introduce *any* code size
- // growth. Skip debug info first.
- BasicBlock::iterator BBI = DestBB->begin();
- while (isa<DbgInfoIntrinsic>(BBI))
- BBI++;
-
- // FIXME: Thread if it just contains a PHI.
- if (isa<SwitchInst>(BBI)) {
- bool MadeChange = false;
- // Ignore the default edge for now.
- for (unsigned i = 1, e = DestSI->getNumSuccessors(); i != e; ++i) {
- ConstantInt *DestVal = DestSI->getCaseValue(i);
- BasicBlock *DestSucc = DestSI->getSuccessor(i);
-
- // Okay, DestSI has a case for 'DestVal' that goes to 'DestSucc'. See if
- // PredSI has an explicit case for it. If so, forward. If it is covered
- // by the default case, we can't update PredSI.
- unsigned PredCase = PredSI->findCaseValue(DestVal);
- if (PredCase == 0) continue;
-
- // If PredSI doesn't go to DestBB on this value, then it won't reach the
- // case on this condition.
- if (PredSI->getSuccessor(PredCase) != DestBB &&
- DestSI->getSuccessor(i) != DestBB)
- continue;
-
- // Do not forward this if it already goes to this destination, this would
- // be an infinite loop.
- if (PredSI->getSuccessor(PredCase) == DestSucc)
- continue;
-
- // Otherwise, we're safe to make the change. Make sure that the edge from
- // DestSI to DestSucc is not critical and has no PHI nodes.
- DEBUG(dbgs() << "FORWARDING EDGE " << *DestVal << " FROM: " << *PredSI);
- DEBUG(dbgs() << "THROUGH: " << *DestSI);
+ // TODO: If we have: "br (X > 0)" and we have a predecessor where we know
+ // "(X == 4)", thread through this block.
- // If the destination has PHI nodes, just split the edge for updating
- // simplicity.
- if (isa<PHINode>(DestSucc->begin()) && !DestSucc->getSinglePredecessor()){
- SplitCriticalEdge(DestSI, i, this);
- DestSucc = DestSI->getSuccessor(i);
- }
- FoldSingleEntryPHINodes(DestSucc);
- PredSI->setSuccessor(PredCase, DestSucc);
- MadeChange = true;
- }
-
- if (MadeChange)
- return true;
- }
-
return false;
}
@@ -934,13 +799,13 @@ bool JumpThreading::ProcessSwitchOnDuplicateCond(BasicBlock *PredBB,
bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// Don't hack volatile loads.
if (LI->isVolatile()) return false;
-
+
// If the load is defined in a block with exactly one predecessor, it can't be
// partially redundant.
BasicBlock *LoadBB = LI->getParent();
if (LoadBB->getSinglePredecessor())
return false;
-
+
Value *LoadedPtr = LI->getOperand(0);
// If the loaded operand is defined in the LoadBB, it can't be available.
@@ -948,17 +813,17 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
if (Instruction *PtrOp = dyn_cast<Instruction>(LoadedPtr))
if (PtrOp->getParent() == LoadBB)
return false;
-
+
// Scan a few instructions up from the load, to see if it is obviously live at
// the entry to its block.
BasicBlock::iterator BBIt = LI;
- if (Value *AvailableVal =
+ if (Value *AvailableVal =
FindAvailableLoadedValue(LoadedPtr, LoadBB, BBIt, 6)) {
// If the value if the load is locally available within the block, just use
// it. This frequently occurs for reg2mem'd allocas.
//cerr << "LOAD ELIMINATED:\n" << *BBIt << *LI << "\n";
-
+
// If the returned value is the load itself, replace with an undef. This can
// only happen in dead loops.
if (AvailableVal == LI) AvailableVal = UndefValue::get(LI->getType());
@@ -972,13 +837,13 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// might clobber its value.
if (BBIt != LoadBB->begin())
return false;
-
-
+
+
SmallPtrSet<BasicBlock*, 8> PredsScanned;
typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy;
AvailablePredsTy AvailablePreds;
BasicBlock *OneUnavailablePred = 0;
-
+
// If we got here, the loaded value is transparent through to the start of the
// block. Check to see if it is available in any of the predecessor blocks.
for (pred_iterator PI = pred_begin(LoadBB), PE = pred_end(LoadBB);
@@ -996,23 +861,23 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
OneUnavailablePred = PredBB;
continue;
}
-
+
// If so, this load is partially redundant. Remember this info so that we
// can create a PHI node.
AvailablePreds.push_back(std::make_pair(PredBB, PredAvailable));
}
-
+
// If the loaded value isn't available in any predecessor, it isn't partially
// redundant.
if (AvailablePreds.empty()) return false;
-
+
// Okay, the loaded value is available in at least one (and maybe all!)
// predecessors. If the value is unavailable in more than one unique
// predecessor, we want to insert a merge block for those common predecessors.
// This ensures that we only have to insert one reload, thus not increasing
// code size.
BasicBlock *UnavailablePred = 0;
-
+
// If there is exactly one predecessor where the value is unavailable, the
// already computed 'OneUnavailablePred' block is it. If it ends in an
// unconditional branch, we know that it isn't a critical edge.
@@ -1035,17 +900,17 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// If the predecessor is an indirect goto, we can't split the edge.
if (isa<IndirectBrInst>(P->getTerminator()))
return false;
-
+
if (!AvailablePredSet.count(P))
PredsToSplit.push_back(P);
}
-
+
// Split them out to their own block.
UnavailablePred =
SplitBlockPredecessors(LoadBB, &PredsToSplit[0], PredsToSplit.size(),
"thread-pre-split", this);
}
-
+
// If the value isn't available in all predecessors, then there will be
// exactly one where it isn't available. Insert a load on that edge and add
// it to the AvailablePreds list.
@@ -1057,35 +922,35 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
UnavailablePred->getTerminator());
AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal));
}
-
+
// Now we know that each predecessor of this block has a value in
// AvailablePreds, sort them for efficient access as we're walking the preds.
array_pod_sort(AvailablePreds.begin(), AvailablePreds.end());
-
+
// Create a PHI node at the start of the block for the PRE'd load value.
PHINode *PN = PHINode::Create(LI->getType(), "", LoadBB->begin());
PN->takeName(LI);
-
+
// Insert new entries into the PHI for each predecessor. A single block may
// have multiple entries here.
for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); PI != E;
++PI) {
BasicBlock *P = *PI;
- AvailablePredsTy::iterator I =
+ AvailablePredsTy::iterator I =
std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(),
std::make_pair(P, (Value*)0));
-
+
assert(I != AvailablePreds.end() && I->first == P &&
"Didn't find entry for predecessor!");
-
+
PN->addIncoming(I->second, I->first);
}
-
+
//cerr << "PRE: " << *LI << *PN << "\n";
-
+
LI->replaceAllUsesWith(PN);
LI->eraseFromParent();
-
+
return true;
}
@@ -1097,7 +962,7 @@ FindMostPopularDest(BasicBlock *BB,
const SmallVectorImpl<std::pair<BasicBlock*,
BasicBlock*> > &PredToDestList) {
assert(!PredToDestList.empty());
-
+
// Determine popularity. If there are multiple possible destinations, we
// explicitly choose to ignore 'undef' destinations. We prefer to thread
// blocks with known and real destinations to threading undef. We'll handle
@@ -1106,13 +971,13 @@ FindMostPopularDest(BasicBlock *BB,
for (unsigned i = 0, e = PredToDestList.size(); i != e; ++i)
if (PredToDestList[i].second)
DestPopularity[PredToDestList[i].second]++;
-
+
// Find the most popular dest.
DenseMap<BasicBlock*, unsigned>::iterator DPI = DestPopularity.begin();
BasicBlock *MostPopularDest = DPI->first;
unsigned Popularity = DPI->second;
SmallVector<BasicBlock*, 4> SamePopularity;
-
+
for (++DPI; DPI != DestPopularity.end(); ++DPI) {
// If the popularity of this entry isn't higher than the popularity we've
// seen so far, ignore it.
@@ -1126,10 +991,10 @@ FindMostPopularDest(BasicBlock *BB,
SamePopularity.clear();
MostPopularDest = DPI->first;
Popularity = DPI->second;
- }
+ }
}
-
- // Okay, now we know the most popular destination. If there is more than
+
+ // Okay, now we know the most popular destination. If there is more than one
// destination, we need to determine one. This is arbitrary, but we need
// to make a deterministic decision. Pick the first one that appears in the
// successor list.
@@ -1138,105 +1003,105 @@ FindMostPopularDest(BasicBlock *BB,
TerminatorInst *TI = BB->getTerminator();
for (unsigned i = 0; ; ++i) {
assert(i != TI->getNumSuccessors() && "Didn't find any successor!");
-
+
if (std::find(SamePopularity.begin(), SamePopularity.end(),
TI->getSuccessor(i)) == SamePopularity.end())
continue;
-
+
MostPopularDest = TI->getSuccessor(i);
break;
}
}
-
+
// Okay, we have finally picked the most popular destination.
return MostPopularDest;
}
-bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB) {
+bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
+ ConstantPreference Preference) {
// If threading this would thread across a loop header, don't even try to
// thread the edge.
if (LoopHeaders.count(BB))
return false;
-
- SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> PredValues;
- if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues))
+
+ PredValueInfoTy PredValues;
+ if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues, Preference))
return false;
-
+
assert(!PredValues.empty() &&
"ComputeValueKnownInPredecessors returned true with no values");
DEBUG(dbgs() << "IN BB: " << *BB;
for (unsigned i = 0, e = PredValues.size(); i != e; ++i) {
- dbgs() << " BB '" << BB->getName() << "': FOUND condition = ";
- if (PredValues[i].first)
- dbgs() << *PredValues[i].first;
- else
- dbgs() << "UNDEF";
- dbgs() << " for pred '" << PredValues[i].second->getName()
- << "'.\n";
+ dbgs() << " BB '" << BB->getName() << "': FOUND condition = "
+ << *PredValues[i].first
+ << " for pred '" << PredValues[i].second->getName() << "'.\n";
});
-
+
// Decide what we want to thread through. Convert our list of known values to
// a list of known destinations for each pred. This also discards duplicate
// predecessors and keeps track of the undefined inputs (which are represented
// as a null dest in the PredToDestList).
SmallPtrSet<BasicBlock*, 16> SeenPreds;
SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList;
-
+
BasicBlock *OnlyDest = 0;
BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL;
-
+
for (unsigned i = 0, e = PredValues.size(); i != e; ++i) {
BasicBlock *Pred = PredValues[i].second;
if (!SeenPreds.insert(Pred))
continue; // Duplicate predecessor entry.
-
+
// If the predecessor ends with an indirect goto, we can't change its
// destination.
if (isa<IndirectBrInst>(Pred->getTerminator()))
continue;
-
- ConstantInt *Val = PredValues[i].first;
-
+
+ Constant *Val = PredValues[i].first;
+
BasicBlock *DestBB;
- if (Val == 0) // Undef.
+ if (isa<UndefValue>(Val))
DestBB = 0;
else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()))
- DestBB = BI->getSuccessor(Val->isZero());
+ DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero());
+ else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator()))
+ DestBB = SI->getSuccessor(SI->findCaseValue(cast<ConstantInt>(Val)));
else {
- SwitchInst *SI = cast<SwitchInst>(BB->getTerminator());
- DestBB = SI->getSuccessor(SI->findCaseValue(Val));
+ assert(isa<IndirectBrInst>(BB->getTerminator())
+ && "Unexpected terminator");
+ DestBB = cast<BlockAddress>(Val)->getBasicBlock();
}
// If we have exactly one destination, remember it for efficiency below.
- if (i == 0)
+ if (PredToDestList.empty())
OnlyDest = DestBB;
else if (OnlyDest != DestBB)
OnlyDest = MultipleDestSentinel;
-
+
PredToDestList.push_back(std::make_pair(Pred, DestBB));
}
-
+
// If all edges were unthreadable, we fail.
if (PredToDestList.empty())
return false;
-
+
// Determine which is the most common successor. If we have many inputs and
// this block is a switch, we want to start by threading the batch that goes
// to the most popular destination first. If we only know about one
// threadable destination (the common case) we can avoid this.
BasicBlock *MostPopularDest = OnlyDest;
-
+
if (MostPopularDest == MultipleDestSentinel)
MostPopularDest = FindMostPopularDest(BB, PredToDestList);
-
+
// Now that we know what the most popular destination is, factor all
// predecessors that will jump to it into a single predecessor.
SmallVector<BasicBlock*, 16> PredsToFactor;
for (unsigned i = 0, e = PredToDestList.size(); i != e; ++i)
if (PredToDestList[i].second == MostPopularDest) {
BasicBlock *Pred = PredToDestList[i].first;
-
+
// This predecessor may be a switch or something else that has multiple
// edges to the block. Factor each of these edges by listing them
// according to # occurrences in PredsToFactor.
@@ -1251,7 +1116,7 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB) {
if (MostPopularDest == 0)
MostPopularDest = BB->getTerminator()->
getSuccessor(GetBestDestForJumpOnUndef(BB));
-
+
// Ok, try to thread it!
return ThreadEdge(BB, PredsToFactor, MostPopularDest);
}
@@ -1259,15 +1124,15 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB) {
/// ProcessBranchOnPHI - We have an otherwise unthreadable conditional branch on
/// a PHI node in the current block. See if there are any simplifications we
/// can do based on inputs to the phi node.
-///
+///
bool JumpThreading::ProcessBranchOnPHI(PHINode *PN) {
BasicBlock *BB = PN->getParent();
-
+
// TODO: We could make use of this to do it once for blocks with common PHI
// values.
SmallVector<BasicBlock*, 1> PredBBs;
PredBBs.resize(1);
-
+
// If any of the predecessor blocks end in an unconditional branch, we can
// *duplicate* the conditional branch into that block in order to further
// encourage jump threading and to eliminate cases where we have branch on a
@@ -1289,21 +1154,21 @@ bool JumpThreading::ProcessBranchOnPHI(PHINode *PN) {
/// ProcessBranchOnXOR - We have an otherwise unthreadable conditional branch on
/// a xor instruction in the current block. See if there are any
/// simplifications we can do based on inputs to the xor.
-///
+///
bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
BasicBlock *BB = BO->getParent();
-
+
// If either the LHS or RHS of the xor is a constant, don't do this
// optimization.
if (isa<ConstantInt>(BO->getOperand(0)) ||
isa<ConstantInt>(BO->getOperand(1)))
return false;
-
+
// If the first instruction in BB isn't a phi, we won't be able to infer
// anything special about any particular predecessor.
if (!isa<PHINode>(BB->front()))
return false;
-
+
// If we have a xor as the branch input to this block, and we know that the
// LHS or RHS of the xor in any predecessor is true/false, then we can clone
// the condition into the predecessor and fix that value to true, saving some
@@ -1322,15 +1187,17 @@ bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
// %Y = icmp ne i32 %A, %B
// br i1 %Z, ...
- SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> XorOpValues;
+ PredValueInfoTy XorOpValues;
bool isLHS = true;
- if (!ComputeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues)) {
+ if (!ComputeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues,
+ WantInteger)) {
assert(XorOpValues.empty());
- if (!ComputeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues))
+ if (!ComputeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues,
+ WantInteger))
return false;
isLHS = false;
}
-
+
assert(!XorOpValues.empty() &&
"ComputeValueKnownInPredecessors returned true with no values");
@@ -1338,29 +1205,33 @@ bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
// predecessors can be of the set true, false, or undef.
unsigned NumTrue = 0, NumFalse = 0;
for (unsigned i = 0, e = XorOpValues.size(); i != e; ++i) {
- if (!XorOpValues[i].first) continue; // Ignore undefs for the count.
- if (XorOpValues[i].first->isZero())
+ if (isa<UndefValue>(XorOpValues[i].first))
+ // Ignore undefs for the count.
+ continue;
+ if (cast<ConstantInt>(XorOpValues[i].first)->isZero())
++NumFalse;
else
++NumTrue;
}
-
+
// Determine which value to split on, true, false, or undef if neither.
ConstantInt *SplitVal = 0;
if (NumTrue > NumFalse)
SplitVal = ConstantInt::getTrue(BB->getContext());
else if (NumTrue != 0 || NumFalse != 0)
SplitVal = ConstantInt::getFalse(BB->getContext());
-
+
// Collect all of the blocks that this can be folded into so that we can
// factor this once and clone it once.
SmallVector<BasicBlock*, 8> BlocksToFoldInto;
for (unsigned i = 0, e = XorOpValues.size(); i != e; ++i) {
- if (XorOpValues[i].first != SplitVal && XorOpValues[i].first != 0) continue;
+ if (XorOpValues[i].first != SplitVal &&
+ !isa<UndefValue>(XorOpValues[i].first))
+ continue;
BlocksToFoldInto.push_back(XorOpValues[i].second);
}
-
+
// If we inferred a value for all of the predecessors, then duplication won't
// help us. However, we can just replace the LHS or RHS with the constant.
if (BlocksToFoldInto.size() ==
@@ -1377,10 +1248,10 @@ bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
// If all preds provide 1, set the computed value to 1.
BO->setOperand(!isLHS, SplitVal);
}
-
+
return true;
}
-
+
// Try to duplicate BB into PredBB.
return DuplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto);
}
@@ -1398,14 +1269,14 @@ static void AddPHINodeEntriesForMappedBlock(BasicBlock *PHIBB,
// Ok, we have a PHI node. Figure out what the incoming value was for the
// DestBlock.
Value *IV = PN->getIncomingValueForBlock(OldPred);
-
+
// Remap the value if necessary.
if (Instruction *Inst = dyn_cast<Instruction>(IV)) {
DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst);
if (I != ValueMap.end())
IV = I->second;
}
-
+
PN->addIncoming(IV, NewPred);
}
}
@@ -1413,8 +1284,8 @@ static void AddPHINodeEntriesForMappedBlock(BasicBlock *PHIBB,
/// ThreadEdge - We have decided that it is safe and profitable to factor the
/// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB
/// across BB. Transform the IR to reflect this change.
-bool JumpThreading::ThreadEdge(BasicBlock *BB,
- const SmallVectorImpl<BasicBlock*> &PredBBs,
+bool JumpThreading::ThreadEdge(BasicBlock *BB,
+ const SmallVectorImpl<BasicBlock*> &PredBBs,
BasicBlock *SuccBB) {
// If threading to the same block as we come from, we would infinite loop.
if (SuccBB == BB) {
@@ -1422,7 +1293,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
<< "' - would thread to self!\n");
return false;
}
-
+
// If threading this would thread across a loop header, don't thread the edge.
// See the comments above FindLoopHeaders for justifications and caveats.
if (LoopHeaders.count(BB)) {
@@ -1438,7 +1309,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
<< "' - Cost is too high: " << JumpThreadCost << "\n");
return false;
}
-
+
// And finally, do it! Start by factoring the predecessors is needed.
BasicBlock *PredBB;
if (PredBBs.size() == 1)
@@ -1449,30 +1320,29 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
PredBB = SplitBlockPredecessors(BB, &PredBBs[0], PredBBs.size(),
".thr_comm", this);
}
-
+
// And finally, do it!
DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() << "' to '"
<< SuccBB->getName() << "' with cost: " << JumpThreadCost
<< ", across block:\n "
<< *BB << "\n");
-
- if (LVI)
- LVI->threadEdge(PredBB, BB, SuccBB);
-
+
+ LVI->threadEdge(PredBB, BB, SuccBB);
+
// We are going to have to map operands from the original BB block to the new
// copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to
// account for entry from PredBB.
DenseMap<Instruction*, Value*> ValueMapping;
-
- BasicBlock *NewBB = BasicBlock::Create(BB->getContext(),
- BB->getName()+".thread",
+
+ BasicBlock *NewBB = BasicBlock::Create(BB->getContext(),
+ BB->getName()+".thread",
BB->getParent(), BB);
NewBB->moveAfter(PredBB);
-
+
BasicBlock::iterator BI = BB->begin();
for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
-
+
// Clone the non-phi instructions of BB into NewBB, keeping track of the
// mapping and using it to remap operands in the cloned instructions.
for (; !isa<TerminatorInst>(BI); ++BI) {
@@ -1480,7 +1350,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
New->setName(BI->getName());
NewBB->getInstList().push_back(New);
ValueMapping[BI] = New;
-
+
// Remap operands to patch up intra-block references.
for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
@@ -1489,15 +1359,15 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
New->setOperand(i, I->second);
}
}
-
+
// We didn't copy the terminator from BB over to NewBB, because there is now
// an unconditional jump to SuccBB. Insert the unconditional jump.
BranchInst::Create(SuccBB, NewBB);
-
+
// Check to see if SuccBB has PHI nodes. If so, we need to add entries to the
// PHI nodes for NewBB now.
AddPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping);
-
+
// If there were values defined in BB that are used outside the block, then we
// now have to update all uses of the value to use either the original value,
// the cloned value, or some PHI derived value. This can require arbitrary
@@ -1515,14 +1385,14 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
continue;
} else if (User->getParent() == BB)
continue;
-
+
UsesToRename.push_back(&UI.getUse());
}
-
+
// If there are no uses outside the block, we're done with this instruction.
if (UsesToRename.empty())
continue;
-
+
DEBUG(dbgs() << "JT: Renaming non-local uses of: " << *I << "\n");
// We found a use of I outside of BB. Rename all uses of I that are outside
@@ -1531,28 +1401,28 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
SSAUpdate.Initialize(I->getType(), I->getName());
SSAUpdate.AddAvailableValue(BB, I);
SSAUpdate.AddAvailableValue(NewBB, ValueMapping[I]);
-
+
while (!UsesToRename.empty())
SSAUpdate.RewriteUse(*UsesToRename.pop_back_val());
DEBUG(dbgs() << "\n");
}
-
-
+
+
// Ok, NewBB is good to go. Update the terminator of PredBB to jump to
// NewBB instead of BB. This eliminates predecessors from BB, which requires
// us to simplify any PHI nodes in BB.
TerminatorInst *PredTerm = PredBB->getTerminator();
for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i)
if (PredTerm->getSuccessor(i) == BB) {
- RemovePredecessorAndSimplify(BB, PredBB, TD);
+ BB->removePredecessor(PredBB, true);
PredTerm->setSuccessor(i, NewBB);
}
-
+
// At this point, the IR is fully up to date and consistent. Do a quick scan
// over the new instructions and zap any that are constants or dead. This
// frequently happens because of phi translation.
SimplifyInstructionsInBlock(NewBB, TD);
-
+
// Threaded an edge!
++NumThreads;
return true;
@@ -1576,14 +1446,14 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
<< "' - it might create an irreducible loop!\n");
return false;
}
-
+
unsigned DuplicationCost = getJumpThreadDuplicationCost(BB);
if (DuplicationCost > Threshold) {
DEBUG(dbgs() << " Not duplicating BB '" << BB->getName()
<< "' - Cost is too high: " << DuplicationCost << "\n");
return false;
}
-
+
// And finally, do it! Start by factoring the predecessors is needed.
BasicBlock *PredBB;
if (PredBBs.size() == 1)
@@ -1594,35 +1464,35 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
PredBB = SplitBlockPredecessors(BB, &PredBBs[0], PredBBs.size(),
".thr_comm", this);
}
-
+
// Okay, we decided to do this! Clone all the instructions in BB onto the end
// of PredBB.
DEBUG(dbgs() << " Duplicating block '" << BB->getName() << "' into end of '"
<< PredBB->getName() << "' to eliminate branch on phi. Cost: "
<< DuplicationCost << " block is:" << *BB << "\n");
-
+
// Unless PredBB ends with an unconditional branch, split the edge so that we
// can just clone the bits from BB into the end of the new PredBB.
BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
-
+
if (OldPredBranch == 0 || !OldPredBranch->isUnconditional()) {
PredBB = SplitEdge(PredBB, BB, this);
OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
}
-
+
// We are going to have to map operands from the original BB block into the
// PredBB block. Evaluate PHI nodes in BB.
DenseMap<Instruction*, Value*> ValueMapping;
-
+
BasicBlock::iterator BI = BB->begin();
for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
-
+
// Clone the non-phi instructions of BB into PredBB, keeping track of the
// mapping and using it to remap operands in the cloned instructions.
for (; BI != BB->end(); ++BI) {
Instruction *New = BI->clone();
-
+
// Remap operands to patch up intra-block references.
for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
@@ -1644,7 +1514,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
ValueMapping[BI] = New;
}
}
-
+
// Check to see if the targets of the branch had PHI nodes. If so, we need to
// add entries to the PHI nodes for branch from PredBB now.
BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator());
@@ -1652,7 +1522,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
ValueMapping);
AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB,
ValueMapping);
-
+
// If there were values defined in BB that are used outside the block, then we
// now have to update all uses of the value to use either the original value,
// the cloned value, or some PHI derived value. This can require arbitrary
@@ -1670,35 +1540,35 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
continue;
} else if (User->getParent() == BB)
continue;
-
+
UsesToRename.push_back(&UI.getUse());
}
-
+
// If there are no uses outside the block, we're done with this instruction.
if (UsesToRename.empty())
continue;
-
+
DEBUG(dbgs() << "JT: Renaming non-local uses of: " << *I << "\n");
-
+
// We found a use of I outside of BB. Rename all uses of I that are outside
// its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
// with the two values we know.
SSAUpdate.Initialize(I->getType(), I->getName());
SSAUpdate.AddAvailableValue(BB, I);
SSAUpdate.AddAvailableValue(PredBB, ValueMapping[I]);
-
+
while (!UsesToRename.empty())
SSAUpdate.RewriteUse(*UsesToRename.pop_back_val());
DEBUG(dbgs() << "\n");
}
-
+
// PredBB no longer jumps to BB, remove entries in the PHI node for the edge
// that we nuked.
- RemovePredecessorAndSimplify(BB, PredBB, TD);
-
+ BB->removePredecessor(PredBB, true);
+
// Remove the unconditional branch at the end of the PredBB block.
OldPredBranch->eraseFromParent();
-
+
++NumDupes;
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
index 2ef8544..0786793 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -36,13 +36,13 @@
#include "llvm/DerivedTypes.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Instructions.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/Dominators.h"
-#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/Support/CFG.h"
@@ -66,7 +66,9 @@ DisablePromotion("disable-licm-promotion", cl::Hidden,
namespace {
struct LICM : public LoopPass {
static char ID; // Pass identification, replacement for typeid
- LICM() : LoopPass(ID) {}
+ LICM() : LoopPass(ID) {
+ initializeLICMPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -80,7 +82,7 @@ namespace {
AU.addRequiredID(LoopSimplifyID);
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved("scalar-evolution");
AU.addPreservedID(LoopSimplifyID);
}
@@ -129,42 +131,7 @@ namespace {
///
bool inSubLoop(BasicBlock *BB) {
assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
- for (Loop::iterator I = CurLoop->begin(), E = CurLoop->end(); I != E; ++I)
- if ((*I)->contains(BB))
- return true; // A subloop actually contains this block!
- return false;
- }
-
- /// isExitBlockDominatedByBlockInLoop - This method checks to see if the
- /// specified exit block of the loop is dominated by the specified block
- /// that is in the body of the loop. We use these constraints to
- /// dramatically limit the amount of the dominator tree that needs to be
- /// searched.
- bool isExitBlockDominatedByBlockInLoop(BasicBlock *ExitBlock,
- BasicBlock *BlockInLoop) const {
- // If the block in the loop is the loop header, it must be dominated!
- BasicBlock *LoopHeader = CurLoop->getHeader();
- if (BlockInLoop == LoopHeader)
- return true;
-
- DomTreeNode *BlockInLoopNode = DT->getNode(BlockInLoop);
- DomTreeNode *IDom = DT->getNode(ExitBlock);
-
- // Because the exit block is not in the loop, we know we have to get _at
- // least_ its immediate dominator.
- IDom = IDom->getIDom();
-
- while (IDom && IDom != BlockInLoopNode) {
- // If we have got to the header of the loop, then the instructions block
- // did not dominate the exit node, so we can't hoist it.
- if (IDom->getBlock() == LoopHeader)
- return false;
-
- // Get next Immediate Dominator.
- IDom = IDom->getIDom();
- };
-
- return true;
+ return LI->getLoopFor(BB) != CurLoop;
}
/// sink - When an instruction is found to only be used outside of the loop,
@@ -187,13 +154,13 @@ namespace {
/// pointerInvalidatedByLoop - Return true if the body of this loop may
/// store into the memory location pointed to by V.
///
- bool pointerInvalidatedByLoop(Value *V, unsigned Size) {
+ bool pointerInvalidatedByLoop(Value *V, uint64_t Size,
+ const MDNode *TBAAInfo) {
// Check to see if any of the basic blocks in CurLoop invalidate *V.
- return CurAST->getAliasSetForPointer(V, Size).isMod();
+ return CurAST->getAliasSetForPointer(V, Size, TBAAInfo).isMod();
}
bool canSinkOrHoistInst(Instruction &I);
- bool isLoopInvariantInst(Instruction &I);
bool isNotUsedInLoop(Instruction &I);
void PromoteAliasSet(AliasSet &AS);
@@ -201,7 +168,12 @@ namespace {
}
char LICM::ID = 0;
-INITIALIZE_PASS(LICM, "licm", "Loop Invariant Code Motion", false, false);
+INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(LICM, "licm", "Loop Invariant Code Motion", false, false)
Pass *llvm::createLICMPass() { return new LICM(); }
@@ -369,7 +341,7 @@ void LICM::HoistRegion(DomTreeNode *N) {
// if all of the operands of the instruction are loop invariant and if it
// is safe to hoist the instruction.
//
- if (isLoopInvariantInst(I) && canSinkOrHoistInst(I) &&
+ if (CurLoop->hasLoopInvariantOperands(&I) && canSinkOrHoistInst(I) &&
isSafeToExecuteUnconditionally(I))
hoist(I);
}
@@ -394,16 +366,17 @@ bool LICM::canSinkOrHoistInst(Instruction &I) {
return true;
// Don't hoist loads which have may-aliased stores in loop.
- unsigned Size = 0;
+ uint64_t Size = 0;
if (LI->getType()->isSized())
Size = AA->getTypeStoreSize(LI->getType());
- return !pointerInvalidatedByLoop(LI->getOperand(0), Size);
+ return !pointerInvalidatedByLoop(LI->getOperand(0), Size,
+ LI->getMetadata(LLVMContext::MD_tbaa));
} else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
// Handle obvious cases efficiently.
AliasAnalysis::ModRefBehavior Behavior = AA->getModRefBehavior(CI);
if (Behavior == AliasAnalysis::DoesNotAccessMemory)
return true;
- else if (Behavior == AliasAnalysis::OnlyReadsMemory) {
+ if (AliasAnalysis::onlyReadsMemory(Behavior)) {
// If this call only reads from memory and there are no writes to memory
// in the loop, we can hoist or sink the call as appropriate.
bool FoundMod = false;
@@ -452,20 +425,6 @@ bool LICM::isNotUsedInLoop(Instruction &I) {
}
-/// isLoopInvariantInst - Return true if all operands of this instruction are
-/// loop invariant. We also filter out non-hoistable instructions here just for
-/// efficiency.
-///
-bool LICM::isLoopInvariantInst(Instruction &I) {
- // The instruction is loop invariant if all of its operands are loop-invariant
- for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
- if (!CurLoop->isLoopInvariant(I.getOperand(i)))
- return false;
-
- // If we got this far, the instruction is loop invariant!
- return true;
-}
-
/// sink - When an instruction is found to only be used outside of the loop,
/// this function moves it to the exit blocks and patches up SSA form as needed.
/// This method is guaranteed to remove the original instruction from its
@@ -486,7 +445,7 @@ void LICM::sink(Instruction &I) {
// enough that we handle it as a special (more efficient) case. It is more
// efficient to handle because there are no PHI nodes that need to be placed.
if (ExitBlocks.size() == 1) {
- if (!isExitBlockDominatedByBlockInLoop(ExitBlocks[0], I.getParent())) {
+ if (!DT->dominates(I.getParent(), ExitBlocks[0])) {
// Instruction is not used, just delete it.
CurAST->deleteValue(&I);
// If I has users in unreachable blocks, eliminate.
@@ -537,7 +496,7 @@ void LICM::sink(Instruction &I) {
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
BasicBlock *ExitBlock = ExitBlocks[i];
- if (!isExitBlockDominatedByBlockInLoop(ExitBlock, InstOrigBB))
+ if (!DT->dominates(InstOrigBB, ExitBlock))
continue;
// Insert the code after the last PHI node.
@@ -628,15 +587,61 @@ bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
SmallVector<BasicBlock*, 8> ExitBlocks;
CurLoop->getExitBlocks(ExitBlocks);
- // For each exit block, get the DT node and walk up the DT until the
- // instruction's basic block is found or we exit the loop.
+ // Verify that the block dominates each of the exit blocks of the loop.
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
- if (!isExitBlockDominatedByBlockInLoop(ExitBlocks[i], Inst.getParent()))
+ if (!DT->dominates(Inst.getParent(), ExitBlocks[i]))
return false;
return true;
}
+namespace {
+ class LoopPromoter : public LoadAndStorePromoter {
+ Value *SomePtr; // Designated pointer to store to.
+ SmallPtrSet<Value*, 4> &PointerMustAliases;
+ SmallVectorImpl<BasicBlock*> &LoopExitBlocks;
+ AliasSetTracker &AST;
+ public:
+ LoopPromoter(Value *SP,
+ const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
+ SmallPtrSet<Value*, 4> &PMA,
+ SmallVectorImpl<BasicBlock*> &LEB, AliasSetTracker &ast)
+ : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
+ LoopExitBlocks(LEB), AST(ast) {}
+
+ virtual bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &) const {
+ Value *Ptr;
+ if (LoadInst *LI = dyn_cast<LoadInst>(I))
+ Ptr = LI->getOperand(0);
+ else
+ Ptr = cast<StoreInst>(I)->getPointerOperand();
+ return PointerMustAliases.count(Ptr);
+ }
+
+ virtual void doExtraRewritesBeforeFinalDeletion() const {
+ // Insert stores after in the loop exit blocks. Each exit block gets a
+ // store of the live-out values that feed them. Since we've already told
+ // the SSA updater about the defs in the loop and the preheader
+ // definition, it is all set and we can start using it.
+ for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
+ BasicBlock *ExitBlock = LoopExitBlocks[i];
+ Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
+ Instruction *InsertPos = ExitBlock->getFirstNonPHI();
+ new StoreInst(LiveInValue, SomePtr, InsertPos);
+ }
+ }
+
+ virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {
+ // Update alias analysis.
+ AST.copyValue(LI, V);
+ }
+ virtual void instructionDeleted(Instruction *I) const {
+ AST.deleteValue(I);
+ }
+ };
+} // end anon namespace
+
/// PromoteAliasSet - Try to promote memory values to scalars by sinking
/// stores out of the loop and moving loads to before the loop. We do this by
/// looping over the stores in the loop, looking for stores to Must pointers
@@ -697,8 +702,11 @@ void LICM::PromoteAliasSet(AliasSet &AS) {
if (isa<LoadInst>(Use))
assert(!cast<LoadInst>(Use)->isVolatile() && "AST broken");
else if (isa<StoreInst>(Use)) {
+ // Stores *of* the pointer are not interesting, only stores *to* the
+ // pointer.
+ if (Use->getOperand(1) != ASIV)
+ continue;
assert(!cast<StoreInst>(Use)->isVolatile() && "AST broken");
- if (Use->getOperand(0) == ASIV) return;
} else
return; // Not a load or store.
@@ -718,179 +726,43 @@ void LICM::PromoteAliasSet(AliasSet &AS) {
Changed = true;
++NumPromoted;
+ SmallVector<BasicBlock*, 8> ExitBlocks;
+ CurLoop->getUniqueExitBlocks(ExitBlocks);
+
// We use the SSAUpdater interface to insert phi nodes as required.
SmallVector<PHINode*, 16> NewPHIs;
SSAUpdater SSA(&NewPHIs);
+ LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
+ *CurAST);
- // It wants to know some value of the same type as what we'll be inserting.
- Value *SomeValue;
- if (isa<LoadInst>(LoopUses[0]))
- SomeValue = LoopUses[0];
- else
- SomeValue = cast<StoreInst>(LoopUses[0])->getOperand(0);
- SSA.Initialize(SomeValue->getType(), SomeValue->getName());
-
- // First step: bucket up uses of the pointers by the block they occur in.
- // This is important because we have to handle multiple defs/uses in a block
- // ourselves: SSAUpdater is purely for cross-block references.
- // FIXME: Want a TinyVector<Instruction*> since there is usually 0/1 element.
- DenseMap<BasicBlock*, std::vector<Instruction*> > UsesByBlock;
- for (unsigned i = 0, e = LoopUses.size(); i != e; ++i) {
- Instruction *User = LoopUses[i];
- UsesByBlock[User->getParent()].push_back(User);
- }
-
- // Okay, now we can iterate over all the blocks in the loop with uses,
- // processing them. Keep track of which loads are loading a live-in value.
- SmallVector<LoadInst*, 32> LiveInLoads;
- DenseMap<Value*, Value*> ReplacedLoads;
-
- for (unsigned LoopUse = 0, e = LoopUses.size(); LoopUse != e; ++LoopUse) {
- Instruction *User = LoopUses[LoopUse];
- std::vector<Instruction*> &BlockUses = UsesByBlock[User->getParent()];
-
- // If this block has already been processed, ignore this repeat use.
- if (BlockUses.empty()) continue;
-
- // Okay, this is the first use in the block. If this block just has a
- // single user in it, we can rewrite it trivially.
- if (BlockUses.size() == 1) {
- // If it is a store, it is a trivial def of the value in the block.
- if (isa<StoreInst>(User)) {
- SSA.AddAvailableValue(User->getParent(),
- cast<StoreInst>(User)->getOperand(0));
- } else {
- // Otherwise it is a load, queue it to rewrite as a live-in load.
- LiveInLoads.push_back(cast<LoadInst>(User));
- }
- BlockUses.clear();
- continue;
- }
-
- // Otherwise, check to see if this block is all loads. If so, we can queue
- // them all as live in loads.
- bool HasStore = false;
- for (unsigned i = 0, e = BlockUses.size(); i != e; ++i) {
- if (isa<StoreInst>(BlockUses[i])) {
- HasStore = true;
- break;
- }
- }
-
- if (!HasStore) {
- for (unsigned i = 0, e = BlockUses.size(); i != e; ++i)
- LiveInLoads.push_back(cast<LoadInst>(BlockUses[i]));
- BlockUses.clear();
- continue;
- }
-
- // Otherwise, we have mixed loads and stores (or just a bunch of stores).
- // Since SSAUpdater is purely for cross-block values, we need to determine
- // the order of these instructions in the block. If the first use in the
- // block is a load, then it uses the live in value. The last store defines
- // the live out value. We handle this by doing a linear scan of the block.
- BasicBlock *BB = User->getParent();
- Value *StoredValue = 0;
- for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
- if (LoadInst *L = dyn_cast<LoadInst>(II)) {
- // If this is a load from an unrelated pointer, ignore it.
- if (!PointerMustAliases.count(L->getOperand(0))) continue;
-
- // If we haven't seen a store yet, this is a live in use, otherwise
- // use the stored value.
- if (StoredValue) {
- L->replaceAllUsesWith(StoredValue);
- ReplacedLoads[L] = StoredValue;
- } else {
- LiveInLoads.push_back(L);
- }
- continue;
- }
-
- if (StoreInst *S = dyn_cast<StoreInst>(II)) {
- // If this is a store to an unrelated pointer, ignore it.
- if (!PointerMustAliases.count(S->getOperand(1))) continue;
-
- // Remember that this is the active value in the block.
- StoredValue = S->getOperand(0);
- }
- }
-
- // The last stored value that happened is the live-out for the block.
- assert(StoredValue && "Already checked that there is a store in block");
- SSA.AddAvailableValue(BB, StoredValue);
- BlockUses.clear();
- }
-
- // Now that all the intra-loop values are classified, set up the preheader.
- // It gets a load of the pointer we're promoting, and it is the live-out value
- // from the preheader.
- LoadInst *PreheaderLoad = new LoadInst(SomePtr,SomePtr->getName()+".promoted",
- Preheader->getTerminator());
+ // Set up the preheader to have a definition of the value. It is the live-out
+ // value from the preheader that uses in the loop will use.
+ LoadInst *PreheaderLoad =
+ new LoadInst(SomePtr, SomePtr->getName()+".promoted",
+ Preheader->getTerminator());
SSA.AddAvailableValue(Preheader, PreheaderLoad);
- // Now that the preheader is good to go, set up the exit blocks. Each exit
- // block gets a store of the live-out values that feed them. Since we've
- // already told the SSA updater about the defs in the loop and the preheader
- // definition, it is all set and we can start using it.
- SmallVector<BasicBlock*, 8> ExitBlocks;
- CurLoop->getUniqueExitBlocks(ExitBlocks);
- for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
- BasicBlock *ExitBlock = ExitBlocks[i];
- Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
- Instruction *InsertPos = ExitBlock->getFirstNonPHI();
- new StoreInst(LiveInValue, SomePtr, InsertPos);
+ // Copy any value stored to or loaded from a must-alias of the pointer.
+ if (PreheaderLoad->getType()->isPointerTy()) {
+ Value *SomeValue;
+ if (LoadInst *LI = dyn_cast<LoadInst>(LoopUses[0]))
+ SomeValue = LI;
+ else
+ SomeValue = cast<StoreInst>(LoopUses[0])->getValueOperand();
+
+ CurAST->copyValue(SomeValue, PreheaderLoad);
}
- // Okay, now we rewrite all loads that use live-in values in the loop,
- // inserting PHI nodes as necessary.
- for (unsigned i = 0, e = LiveInLoads.size(); i != e; ++i) {
- LoadInst *ALoad = LiveInLoads[i];
- Value *NewVal = SSA.GetValueInMiddleOfBlock(ALoad->getParent());
- ALoad->replaceAllUsesWith(NewVal);
- CurAST->copyValue(ALoad, NewVal);
- ReplacedLoads[ALoad] = NewVal;
- }
+ // Rewrite all the loads in the loop and remember all the definitions from
+ // stores in the loop.
+ Promoter.run(LoopUses);
// If the preheader load is itself a pointer, we need to tell alias analysis
// about the new pointer we created in the preheader block and about any PHI
// nodes that just got inserted.
if (PreheaderLoad->getType()->isPointerTy()) {
- // Copy any value stored to or loaded from a must-alias of the pointer.
- CurAST->copyValue(SomeValue, PreheaderLoad);
-
for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
- CurAST->copyValue(SomeValue, NewPHIs[i]);
- }
-
- // Now that everything is rewritten, delete the old instructions from the body
- // of the loop. They should all be dead now.
- for (unsigned i = 0, e = LoopUses.size(); i != e; ++i) {
- Instruction *User = LoopUses[i];
-
- // If this is a load that still has uses, then the load must have been added
- // as a live value in the SSAUpdate data structure for a block (e.g. because
- // the loaded value was stored later). In this case, we need to recursively
- // propagate the updates until we get to the real value.
- if (!User->use_empty()) {
- Value *NewVal = ReplacedLoads[User];
- assert(NewVal && "not a replaced load?");
-
- // Propagate down to the ultimate replacee. The intermediately loads
- // could theoretically already have been deleted, so we don't want to
- // dereference the Value*'s.
- DenseMap<Value*, Value*>::iterator RLI = ReplacedLoads.find(NewVal);
- while (RLI != ReplacedLoads.end()) {
- NewVal = RLI->second;
- RLI = ReplacedLoads.find(NewVal);
- }
-
- User->replaceAllUsesWith(NewVal);
- CurAST->copyValue(User, NewVal);
- }
-
- CurAST->deleteValue(User);
- User->eraseFromParent();
+ CurAST->copyValue(PreheaderLoad, NewPHIs[i]);
}
// fwew, we're done!
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index 543dfc1..6d1d344 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -17,6 +17,7 @@
#define DEBUG_TYPE "loop-delete"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/SmallVector.h"
@@ -28,7 +29,9 @@ namespace {
class LoopDeletion : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
- LoopDeletion() : LoopPass(ID) {}
+ LoopDeletion() : LoopPass(ID) {
+ initializeLoopDeletionPass(*PassRegistry::getPassRegistry());
+ }
// Possibly eliminate loop L if it is dead.
bool runOnLoop(Loop* L, LPPassManager& LPM);
@@ -49,14 +52,20 @@ namespace {
AU.addPreserved<LoopInfo>();
AU.addPreservedID(LoopSimplifyID);
AU.addPreservedID(LCSSAID);
- AU.addPreserved<DominanceFrontier>();
}
};
}
char LoopDeletion::ID = 0;
-INITIALIZE_PASS(LoopDeletion, "loop-deletion",
- "Delete dead loops", false, false);
+INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion",
+ "Delete dead loops", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_END(LoopDeletion, "loop-deletion",
+ "Delete dead loops", false, false)
Pass* llvm::createLoopDeletionPass() {
return new LoopDeletion();
@@ -183,22 +192,19 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
// Update the dominator tree and remove the instructions and blocks that will
// be deleted from the reference counting scheme.
DominatorTree& DT = getAnalysis<DominatorTree>();
- DominanceFrontier* DF = getAnalysisIfAvailable<DominanceFrontier>();
- SmallPtrSet<DomTreeNode*, 8> ChildNodes;
+ SmallVector<DomTreeNode*, 8> ChildNodes;
for (Loop::block_iterator LI = L->block_begin(), LE = L->block_end();
LI != LE; ++LI) {
// Move all of the block's children to be children of the preheader, which
// allows us to remove the domtree entry for the block.
- ChildNodes.insert(DT[*LI]->begin(), DT[*LI]->end());
- for (SmallPtrSet<DomTreeNode*, 8>::iterator DI = ChildNodes.begin(),
+ ChildNodes.insert(ChildNodes.begin(), DT[*LI]->begin(), DT[*LI]->end());
+ for (SmallVector<DomTreeNode*, 8>::iterator DI = ChildNodes.begin(),
DE = ChildNodes.end(); DI != DE; ++DI) {
DT.changeImmediateDominator(*DI, DT[preheader]);
- if (DF) DF->changeImmediateDominator((*DI)->getBlock(), preheader, &DT);
}
ChildNodes.clear();
DT.eraseNode(*LI);
- if (DF) DF->removeBlock(*LI);
// Remove the block from the reference counting scheme, so that we can
// delete it freely later.
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
new file mode 100644
index 0000000..d7fa149
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -0,0 +1,594 @@
+//===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements an idiom recognizer that transforms simple loops into a
+// non-loop form. In cases that this kicks in, it can be a significant
+// performance win.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO List:
+//
+// Future loop memory idioms to recognize:
+// memcmp, memmove, strlen, etc.
+// Future floating point idioms to recognize in -ffast-math mode:
+// fpowi
+// Future integer operation idioms to recognize:
+// ctpop, ctlz, cttz
+//
+// Beware that isel's default lowering for ctpop is highly inefficient for
+// i64 and larger types when i64 is legal and the value has few bits set. It
+// would be good to enhance isel to emit a loop for ctpop in this case.
+//
+// We should enhance the memset/memcpy recognition to handle multiple stores in
+// the loop. This would handle things like:
+// void foo(_Complex float *P)
+// for (i) { __real__(*P) = 0; __imag__(*P) = 0; }
+//
+// This could recognize common matrix multiplies and dot product idioms and
+// replace them with calls to BLAS (if linked in??).
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "loop-idiom"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Module.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/IRBuilder.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
+STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
+
+namespace {
+ class LoopIdiomRecognize : public LoopPass {
+ Loop *CurLoop;
+ const TargetData *TD;
+ DominatorTree *DT;
+ ScalarEvolution *SE;
+ TargetLibraryInfo *TLI;
+ public:
+ static char ID;
+ explicit LoopIdiomRecognize() : LoopPass(ID) {
+ initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
+ SmallVectorImpl<BasicBlock*> &ExitBlocks);
+
+ bool processLoopStore(StoreInst *SI, const SCEV *BECount);
+ bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
+
+ bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
+ unsigned StoreAlignment,
+ Value *SplatValue, Instruction *TheStore,
+ const SCEVAddRecExpr *Ev,
+ const SCEV *BECount);
+ bool processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
+ const SCEVAddRecExpr *StoreEv,
+ const SCEVAddRecExpr *LoadEv,
+ const SCEV *BECount);
+
+ /// This transformation requires natural loop information & requires that
+ /// loop preheaders be inserted into the CFG.
+ ///
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<LoopInfo>();
+ AU.addPreserved<LoopInfo>();
+ AU.addRequiredID(LoopSimplifyID);
+ AU.addPreservedID(LoopSimplifyID);
+ AU.addRequiredID(LCSSAID);
+ AU.addPreservedID(LCSSAID);
+ AU.addRequired<AliasAnalysis>();
+ AU.addPreserved<AliasAnalysis>();
+ AU.addRequired<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<DominatorTree>();
+ AU.addRequired<DominatorTree>();
+ AU.addRequired<TargetLibraryInfo>();
+ }
+ };
+}
+
+char LoopIdiomRecognize::ID = 0;
+INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
+ false, false)
+
+Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); }
+
+/// DeleteDeadInstruction - Delete this instruction. Before we do, go through
+/// and zero out all the operands of this instruction. If any of them become
+/// dead, delete them and the computation tree that feeds them.
+///
+static void DeleteDeadInstruction(Instruction *I, ScalarEvolution &SE) {
+ SmallVector<Instruction*, 32> NowDeadInsts;
+
+ NowDeadInsts.push_back(I);
+
+ // Before we touch this instruction, remove it from SE!
+ do {
+ Instruction *DeadInst = NowDeadInsts.pop_back_val();
+
+ // This instruction is dead, zap it, in stages. Start by removing it from
+ // SCEV.
+ SE.forgetValue(DeadInst);
+
+ for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
+ Value *Op = DeadInst->getOperand(op);
+ DeadInst->setOperand(op, 0);
+
+ // If this operand just became dead, add it to the NowDeadInsts list.
+ if (!Op->use_empty()) continue;
+
+ if (Instruction *OpI = dyn_cast<Instruction>(Op))
+ if (isInstructionTriviallyDead(OpI))
+ NowDeadInsts.push_back(OpI);
+ }
+
+ DeadInst->eraseFromParent();
+
+ } while (!NowDeadInsts.empty());
+}
+
+bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
+ CurLoop = L;
+
+ // The trip count of the loop must be analyzable.
+ SE = &getAnalysis<ScalarEvolution>();
+ if (!SE->hasLoopInvariantBackedgeTakenCount(L))
+ return false;
+ const SCEV *BECount = SE->getBackedgeTakenCount(L);
+ if (isa<SCEVCouldNotCompute>(BECount)) return false;
+
+ // If this loop executes exactly one time, then it should be peeled, not
+ // optimized by this pass.
+ if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
+ if (BECst->getValue()->getValue() == 0)
+ return false;
+
+ // We require target data for now.
+ TD = getAnalysisIfAvailable<TargetData>();
+ if (TD == 0) return false;
+
+ DT = &getAnalysis<DominatorTree>();
+ LoopInfo &LI = getAnalysis<LoopInfo>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
+
+ SmallVector<BasicBlock*, 8> ExitBlocks;
+ CurLoop->getUniqueExitBlocks(ExitBlocks);
+
+ DEBUG(dbgs() << "loop-idiom Scanning: F["
+ << L->getHeader()->getParent()->getName()
+ << "] Loop %" << L->getHeader()->getName() << "\n");
+
+ bool MadeChange = false;
+ // Scan all the blocks in the loop that are not in subloops.
+ for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
+ ++BI) {
+ // Ignore blocks in subloops.
+ if (LI.getLoopFor(*BI) != CurLoop)
+ continue;
+
+ MadeChange |= runOnLoopBlock(*BI, BECount, ExitBlocks);
+ }
+ return MadeChange;
+}
+
+/// runOnLoopBlock - Process the specified block, which lives in a counted loop
+/// with the specified backedge count. This block is known to be in the current
+/// loop and not in any subloops.
+bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
+ SmallVectorImpl<BasicBlock*> &ExitBlocks) {
+ // We can only promote stores in this block if they are unconditionally
+ // executed in the loop. For a block to be unconditionally executed, it has
+ // to dominate all the exit blocks of the loop. Verify this now.
+ for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
+ if (!DT->dominates(BB, ExitBlocks[i]))
+ return false;
+
+ bool MadeChange = false;
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
+ Instruction *Inst = I++;
+ // Look for store instructions, which may be optimized to memset/memcpy.
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ WeakVH InstPtr(I);
+ if (!processLoopStore(SI, BECount)) continue;
+ MadeChange = true;
+
+ // If processing the store invalidated our iterator, start over from the
+ // top of the block.
+ if (InstPtr == 0)
+ I = BB->begin();
+ continue;
+ }
+
+ // Look for memset instructions, which may be optimized to a larger memset.
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
+ WeakVH InstPtr(I);
+ if (!processLoopMemSet(MSI, BECount)) continue;
+ MadeChange = true;
+
+ // If processing the memset invalidated our iterator, start over from the
+ // top of the block.
+ if (InstPtr == 0)
+ I = BB->begin();
+ continue;
+ }
+ }
+
+ return MadeChange;
+}
+
+
+/// processLoopStore - See if this store can be promoted to a memset or memcpy.
+bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
+ if (SI->isVolatile()) return false;
+
+ Value *StoredVal = SI->getValueOperand();
+ Value *StorePtr = SI->getPointerOperand();
+
+ // Reject stores that are so large that they overflow an unsigned.
+ uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType());
+ if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
+ return false;
+
+ // See if the pointer expression is an AddRec like {base,+,1} on the current
+ // loop, which indicates a strided store. If we have something else, it's a
+ // random store we can't handle.
+ const SCEVAddRecExpr *StoreEv =
+ dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
+ if (StoreEv == 0 || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
+ return false;
+
+ // Check to see if the stride matches the size of the store. If so, then we
+ // know that every byte is touched in the loop.
+ unsigned StoreSize = (unsigned)SizeInBits >> 3;
+ const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1));
+
+ // TODO: Could also handle negative stride here someday, that will require the
+ // validity check in mayLoopAccessLocation to be updated though.
+ if (Stride == 0 || StoreSize != Stride->getValue()->getValue())
+ return false;
+
+ // See if we can optimize just this store in isolation.
+ if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(),
+ StoredVal, SI, StoreEv, BECount))
+ return true;
+
+ // If the stored value is a strided load in the same loop with the same stride
+ // this this may be transformable into a memcpy. This kicks in for stuff like
+ // for (i) A[i] = B[i];
+ if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
+ const SCEVAddRecExpr *LoadEv =
+ dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0)));
+ if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() &&
+ StoreEv->getOperand(1) == LoadEv->getOperand(1) && !LI->isVolatile())
+ if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount))
+ return true;
+ }
+ //errs() << "UNHANDLED strided store: " << *StoreEv << " - " << *SI << "\n";
+
+ return false;
+}
+
+/// processLoopMemSet - See if this memset can be promoted to a large memset.
+bool LoopIdiomRecognize::
+processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) {
+ // We can only handle non-volatile memsets with a constant size.
+ if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) return false;
+
+ // If we're not allowed to hack on memset, we fail.
+ if (!TLI->has(LibFunc::memset))
+ return false;
+
+ Value *Pointer = MSI->getDest();
+
+ // See if the pointer expression is an AddRec like {base,+,1} on the current
+ // loop, which indicates a strided store. If we have something else, it's a
+ // random store we can't handle.
+ const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
+ if (Ev == 0 || Ev->getLoop() != CurLoop || !Ev->isAffine())
+ return false;
+
+ // Reject memsets that are so large that they overflow an unsigned.
+ uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
+ if ((SizeInBytes >> 32) != 0)
+ return false;
+
+ // Check to see if the stride matches the size of the memset. If so, then we
+ // know that every byte is touched in the loop.
+ const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
+
+ // TODO: Could also handle negative stride here someday, that will require the
+ // validity check in mayLoopAccessLocation to be updated though.
+ if (Stride == 0 || MSI->getLength() != Stride->getValue())
+ return false;
+
+ return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
+ MSI->getAlignment(), MSI->getValue(),
+ MSI, Ev, BECount);
+}
+
+
+/// mayLoopAccessLocation - Return true if the specified loop might access the
+/// specified pointer location, which is a loop-strided access. The 'Access'
+/// argument specifies what the verboten forms of access are (read or write).
+static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
+ Loop *L, const SCEV *BECount,
+ unsigned StoreSize, AliasAnalysis &AA,
+ Instruction *IgnoredStore) {
+ // Get the location that may be stored across the loop. Since the access is
+ // strided positively through memory, we say that the modified location starts
+ // at the pointer and has infinite size.
+ uint64_t AccessSize = AliasAnalysis::UnknownSize;
+
+ // If the loop iterates a fixed number of times, we can refine the access size
+ // to be exactly the size of the memset, which is (BECount+1)*StoreSize
+ if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
+ AccessSize = (BECst->getValue()->getZExtValue()+1)*StoreSize;
+
+ // TODO: For this to be really effective, we have to dive into the pointer
+ // operand in the store. Store to &A[i] of 100 will always return may alias
+ // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
+ // which will then no-alias a store to &A[100].
+ AliasAnalysis::Location StoreLoc(Ptr, AccessSize);
+
+ for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
+ ++BI)
+ for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I)
+ if (&*I != IgnoredStore &&
+ (AA.getModRefInfo(I, StoreLoc) & Access))
+ return true;
+
+ return false;
+}
+
+/// getMemSetPatternValue - If a strided store of the specified value is safe to
+/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
+/// be passed in. Otherwise, return null.
+///
+/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
+/// just replicate their input array and then pass on to memset_pattern16.
+static Constant *getMemSetPatternValue(Value *V, const TargetData &TD) {
+ // If the value isn't a constant, we can't promote it to being in a constant
+ // array. We could theoretically do a store to an alloca or something, but
+ // that doesn't seem worthwhile.
+ Constant *C = dyn_cast<Constant>(V);
+ if (C == 0) return 0;
+
+ // Only handle simple values that are a power of two bytes in size.
+ uint64_t Size = TD.getTypeSizeInBits(V->getType());
+ if (Size == 0 || (Size & 7) || (Size & (Size-1)))
+ return 0;
+
+ // Don't care enough about darwin/ppc to implement this.
+ if (TD.isBigEndian())
+ return 0;
+
+ // Convert to size in bytes.
+ Size /= 8;
+
+ // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
+ // if the top and bottom are the same (e.g. for vectors and large integers).
+ if (Size > 16) return 0;
+
+ // If the constant is exactly 16 bytes, just use it.
+ if (Size == 16) return C;
+
+ // Otherwise, we'll use an array of the constants.
+ unsigned ArraySize = 16/Size;
+ ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
+ return ConstantArray::get(AT, std::vector<Constant*>(ArraySize, C));
+}
+
+
+/// processLoopStridedStore - We see a strided store of some value. If we can
+/// transform this into a memset or memset_pattern in the loop preheader, do so.
+bool LoopIdiomRecognize::
+processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
+ unsigned StoreAlignment, Value *StoredVal,
+ Instruction *TheStore, const SCEVAddRecExpr *Ev,
+ const SCEV *BECount) {
+
+ // If the stored value is a byte-wise value (like i32 -1), then it may be
+ // turned into a memset of i8 -1, assuming that all the consecutive bytes
+ // are stored. A store of i32 0x01020304 can never be turned into a memset,
+ // but it can be turned into memset_pattern if the target supports it.
+ Value *SplatValue = isBytewiseValue(StoredVal);
+ Constant *PatternValue = 0;
+
+ // If we're allowed to form a memset, and the stored value would be acceptable
+ // for memset, use it.
+ if (SplatValue && TLI->has(LibFunc::memset) &&
+ // Verify that the stored value is loop invariant. If not, we can't
+ // promote the memset.
+ CurLoop->isLoopInvariant(SplatValue)) {
+ // Keep and use SplatValue.
+ PatternValue = 0;
+ } else if (TLI->has(LibFunc::memset_pattern16) &&
+ (PatternValue = getMemSetPatternValue(StoredVal, *TD))) {
+ // It looks like we can use PatternValue!
+ SplatValue = 0;
+ } else {
+ // Otherwise, this isn't an idiom we can transform. For example, we can't
+ // do anything with a 3-byte store, for example.
+ return false;
+ }
+
+
+ // Okay, we have a strided store "p[i]" of a splattable value. We can turn
+ // this into a memset in the loop preheader now if we want. However, this
+ // would be unsafe to do if there is anything else in the loop that may read
+ // or write to the aliased location. Check for an alias.
+ if (mayLoopAccessLocation(DestPtr, AliasAnalysis::ModRef,
+ CurLoop, BECount,
+ StoreSize, getAnalysis<AliasAnalysis>(), TheStore))
+ return false;
+
+ // Okay, everything looks good, insert the memset.
+ BasicBlock *Preheader = CurLoop->getLoopPreheader();
+
+ IRBuilder<> Builder(Preheader->getTerminator());
+
+ // The trip count of the loop and the base pointer of the addrec SCEV is
+ // guaranteed to be loop invariant, which means that it should dominate the
+ // header. Just insert code for it in the preheader.
+ SCEVExpander Expander(*SE);
+
+ unsigned AddrSpace = cast<PointerType>(DestPtr->getType())->getAddressSpace();
+ Value *BasePtr =
+ Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace),
+ Preheader->getTerminator());
+
+ // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
+ // pointer size if it isn't already.
+ const Type *IntPtr = TD->getIntPtrType(DestPtr->getContext());
+ BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
+
+ const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
+ true /*no unsigned overflow*/);
+ if (StoreSize != 1)
+ NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
+ true /*no unsigned overflow*/);
+
+ Value *NumBytes =
+ Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
+
+ Value *NewCall;
+ if (SplatValue)
+ NewCall = Builder.CreateMemSet(BasePtr, SplatValue,NumBytes,StoreAlignment);
+ else {
+ Module *M = TheStore->getParent()->getParent()->getParent();
+ Value *MSP = M->getOrInsertFunction("memset_pattern16",
+ Builder.getVoidTy(),
+ Builder.getInt8PtrTy(),
+ Builder.getInt8PtrTy(), IntPtr,
+ (void*)0);
+
+ // Otherwise we should form a memset_pattern16. PatternValue is known to be
+ // an constant array of 16-bytes. Plop the value into a mergable global.
+ GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
+ GlobalValue::InternalLinkage,
+ PatternValue, ".memset_pattern");
+ GV->setUnnamedAddr(true); // Ok to merge these.
+ GV->setAlignment(16);
+ Value *PatternPtr = ConstantExpr::getBitCast(GV, Builder.getInt8PtrTy());
+ NewCall = Builder.CreateCall3(MSP, BasePtr, PatternPtr, NumBytes);
+ }
+
+ DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
+ << " from store to: " << *Ev << " at: " << *TheStore << "\n");
+ (void)NewCall;
+
+ // Okay, the memset has been formed. Zap the original store and anything that
+ // feeds into it.
+ DeleteDeadInstruction(TheStore, *SE);
+ ++NumMemSet;
+ return true;
+}
+
+/// processLoopStoreOfLoopLoad - We see a strided store whose value is a
+/// same-strided load.
+bool LoopIdiomRecognize::
+processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
+ const SCEVAddRecExpr *StoreEv,
+ const SCEVAddRecExpr *LoadEv,
+ const SCEV *BECount) {
+ // If we're not allowed to form memcpy, we fail.
+ if (!TLI->has(LibFunc::memcpy))
+ return false;
+
+ LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
+
+ // Okay, we have a strided store "p[i]" of a loaded value. We can turn
+ // this into a memcpy in the loop preheader now if we want. However, this
+ // would be unsafe to do if there is anything else in the loop that may read
+ // or write to the stored location (including the load feeding the stores).
+ // Check for an alias.
+ if (mayLoopAccessLocation(SI->getPointerOperand(), AliasAnalysis::ModRef,
+ CurLoop, BECount, StoreSize,
+ getAnalysis<AliasAnalysis>(), SI))
+ return false;
+
+ // For a memcpy, we have to make sure that the input array is not being
+ // mutated by the loop.
+ if (mayLoopAccessLocation(LI->getPointerOperand(), AliasAnalysis::Mod,
+ CurLoop, BECount, StoreSize,
+ getAnalysis<AliasAnalysis>(), SI))
+ return false;
+
+ // Okay, everything looks good, insert the memcpy.
+ BasicBlock *Preheader = CurLoop->getLoopPreheader();
+
+ IRBuilder<> Builder(Preheader->getTerminator());
+
+ // The trip count of the loop and the base pointer of the addrec SCEV is
+ // guaranteed to be loop invariant, which means that it should dominate the
+ // header. Just insert code for it in the preheader.
+ SCEVExpander Expander(*SE);
+
+ Value *LoadBasePtr =
+ Expander.expandCodeFor(LoadEv->getStart(),
+ Builder.getInt8PtrTy(LI->getPointerAddressSpace()),
+ Preheader->getTerminator());
+ Value *StoreBasePtr =
+ Expander.expandCodeFor(StoreEv->getStart(),
+ Builder.getInt8PtrTy(SI->getPointerAddressSpace()),
+ Preheader->getTerminator());
+
+ // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
+ // pointer size if it isn't already.
+ const Type *IntPtr = TD->getIntPtrType(SI->getContext());
+ BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
+
+ const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
+ true /*no unsigned overflow*/);
+ if (StoreSize != 1)
+ NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
+ true /*no unsigned overflow*/);
+
+ Value *NumBytes =
+ Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
+
+ Value *NewCall =
+ Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
+ std::min(SI->getAlignment(), LI->getAlignment()));
+
+ DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
+ << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
+ << " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
+ (void)NewCall;
+
+ // Okay, the memset has been formed. Zap the original store and anything that
+ // feeds into it.
+ DeleteDeadInstruction(SI, *SE);
+ ++NumMemCpy;
+ return true;
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp
deleted file mode 100644
index a433674..0000000
--- a/contrib/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp
+++ /dev/null
@@ -1,1270 +0,0 @@
-//===- LoopIndexSplit.cpp - Loop Index Splitting Pass ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements Loop Index Splitting Pass. This pass handles three
-// kinds of loops.
-//
-// [1] A loop may be eliminated if the body is executed exactly once.
-// For example,
-//
-// for (i = 0; i < N; ++i) {
-// if (i == X) {
-// body;
-// }
-// }
-//
-// is transformed to
-//
-// i = X;
-// body;
-//
-// [2] A loop's iteration space may be shrunk if the loop body is executed
-// for a proper sub-range of the loop's iteration space. For example,
-//
-// for (i = 0; i < N; ++i) {
-// if (i > A && i < B) {
-// ...
-// }
-// }
-//
-// is transformed to iterators from A to B, if A > 0 and B < N.
-//
-// [3] A loop may be split if the loop body is dominated by a branch.
-// For example,
-//
-// for (i = LB; i < UB; ++i) { if (i < SV) A; else B; }
-//
-// is transformed into
-//
-// AEV = BSV = SV
-// for (i = LB; i < min(UB, AEV); ++i)
-// A;
-// for (i = max(LB, BSV); i < UB; ++i);
-// B;
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "loop-index-split"
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Cloning.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/Statistic.h"
-
-using namespace llvm;
-
-STATISTIC(NumIndexSplit, "Number of loop index split");
-STATISTIC(NumIndexSplitRemoved, "Number of loops eliminated by loop index split");
-STATISTIC(NumRestrictBounds, "Number of loop iteration space restricted");
-
-namespace {
-
- class LoopIndexSplit : public LoopPass {
- public:
- static char ID; // Pass ID, replacement for typeid
- LoopIndexSplit() : LoopPass(ID) {}
-
- // Index split Loop L. Return true if loop is split.
- bool runOnLoop(Loop *L, LPPassManager &LPM);
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<ScalarEvolution>();
- AU.addRequiredID(LCSSAID);
- AU.addPreservedID(LCSSAID);
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
- AU.addRequiredID(LoopSimplifyID);
- AU.addPreservedID(LoopSimplifyID);
- AU.addRequired<DominatorTree>();
- AU.addRequired<DominanceFrontier>();
- AU.addPreserved<DominatorTree>();
- AU.addPreserved<DominanceFrontier>();
- }
-
- private:
- /// processOneIterationLoop -- Eliminate loop if loop body is executed
- /// only once. For example,
- /// for (i = 0; i < N; ++i) {
- /// if ( i == X) {
- /// ...
- /// }
- /// }
- ///
- bool processOneIterationLoop();
-
- // -- Routines used by updateLoopIterationSpace();
-
- /// updateLoopIterationSpace -- Update loop's iteration space if loop
- /// body is executed for certain IV range only. For example,
- ///
- /// for (i = 0; i < N; ++i) {
- /// if ( i > A && i < B) {
- /// ...
- /// }
- /// }
- /// is transformed to iterators from A to B, if A > 0 and B < N.
- ///
- bool updateLoopIterationSpace();
-
- /// restrictLoopBound - Op dominates loop body. Op compares an IV based value
- /// with a loop invariant value. Update loop's lower and upper bound based on
- /// the loop invariant value.
- bool restrictLoopBound(ICmpInst &Op);
-
- // --- Routines used by splitLoop(). --- /
-
- bool splitLoop();
-
- /// removeBlocks - Remove basic block DeadBB and all blocks dominated by
- /// DeadBB. This routine is used to remove split condition's dead branch,
- /// dominated by DeadBB. LiveBB dominates split conidition's other branch.
- void removeBlocks(BasicBlock *DeadBB, Loop *LP, BasicBlock *LiveBB);
-
- /// moveExitCondition - Move exit condition EC into split condition block.
- void moveExitCondition(BasicBlock *CondBB, BasicBlock *ActiveBB,
- BasicBlock *ExitBB, ICmpInst *EC, ICmpInst *SC,
- PHINode *IV, Instruction *IVAdd, Loop *LP,
- unsigned);
-
- /// updatePHINodes - CFG has been changed.
- /// Before
- /// - ExitBB's single predecessor was Latch
- /// - Latch's second successor was Header
- /// Now
- /// - ExitBB's single predecessor was Header
- /// - Latch's one and only successor was Header
- ///
- /// Update ExitBB PHINodes' to reflect this change.
- void updatePHINodes(BasicBlock *ExitBB, BasicBlock *Latch,
- BasicBlock *Header,
- PHINode *IV, Instruction *IVIncrement, Loop *LP);
-
- // --- Utility routines --- /
-
- /// cleanBlock - A block is considered clean if all non terminal
- /// instructions are either PHINodes or IV based values.
- bool cleanBlock(BasicBlock *BB);
-
- /// IVisLT - If Op is comparing IV based value with an loop invariant and
- /// IV based value is less than the loop invariant then return the loop
- /// invariant. Otherwise return NULL.
- Value * IVisLT(ICmpInst &Op);
-
- /// IVisLE - If Op is comparing IV based value with an loop invariant and
- /// IV based value is less than or equal to the loop invariant then
- /// return the loop invariant. Otherwise return NULL.
- Value * IVisLE(ICmpInst &Op);
-
- /// IVisGT - If Op is comparing IV based value with an loop invariant and
- /// IV based value is greater than the loop invariant then return the loop
- /// invariant. Otherwise return NULL.
- Value * IVisGT(ICmpInst &Op);
-
- /// IVisGE - If Op is comparing IV based value with an loop invariant and
- /// IV based value is greater than or equal to the loop invariant then
- /// return the loop invariant. Otherwise return NULL.
- Value * IVisGE(ICmpInst &Op);
-
- private:
-
- // Current Loop information.
- Loop *L;
- LPPassManager *LPM;
- LoopInfo *LI;
- DominatorTree *DT;
- DominanceFrontier *DF;
-
- PHINode *IndVar;
- ICmpInst *ExitCondition;
- ICmpInst *SplitCondition;
- Value *IVStartValue;
- Value *IVExitValue;
- Instruction *IVIncrement;
- SmallPtrSet<Value *, 4> IVBasedValues;
- };
-}
-
-char LoopIndexSplit::ID = 0;
-INITIALIZE_PASS(LoopIndexSplit, "loop-index-split",
- "Index Split Loops", false, false);
-
-Pass *llvm::createLoopIndexSplitPass() {
- return new LoopIndexSplit();
-}
-
-// Index split Loop L. Return true if loop is split.
-bool LoopIndexSplit::runOnLoop(Loop *IncomingLoop, LPPassManager &LPM_Ref) {
- L = IncomingLoop;
- LPM = &LPM_Ref;
-
- // If LoopSimplify form is not available, stay out of trouble.
- if (!L->isLoopSimplifyForm())
- return false;
-
- // FIXME - Nested loops make dominator info updates tricky.
- if (!L->getSubLoops().empty())
- return false;
-
- DT = &getAnalysis<DominatorTree>();
- LI = &getAnalysis<LoopInfo>();
- DF = &getAnalysis<DominanceFrontier>();
-
- // Initialize loop data.
- IndVar = L->getCanonicalInductionVariable();
- if (!IndVar) return false;
-
- bool P1InLoop = L->contains(IndVar->getIncomingBlock(1));
- IVStartValue = IndVar->getIncomingValue(!P1InLoop);
- IVIncrement = dyn_cast<Instruction>(IndVar->getIncomingValue(P1InLoop));
- if (!IVIncrement) return false;
-
- IVBasedValues.clear();
- IVBasedValues.insert(IndVar);
- IVBasedValues.insert(IVIncrement);
- for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
- I != E; ++I)
- for(BasicBlock::iterator BI = (*I)->begin(), BE = (*I)->end();
- BI != BE; ++BI) {
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BI))
- if (BO != IVIncrement
- && (BO->getOpcode() == Instruction::Add
- || BO->getOpcode() == Instruction::Sub))
- if (IVBasedValues.count(BO->getOperand(0))
- && L->isLoopInvariant(BO->getOperand(1)))
- IVBasedValues.insert(BO);
- }
-
- // Reject loop if loop exit condition is not suitable.
- BasicBlock *ExitingBlock = L->getExitingBlock();
- if (!ExitingBlock)
- return false;
- BranchInst *EBR = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
- if (!EBR) return false;
- ExitCondition = dyn_cast<ICmpInst>(EBR->getCondition());
- if (!ExitCondition) return false;
- if (ExitingBlock != L->getLoopLatch()) return false;
- IVExitValue = ExitCondition->getOperand(1);
- if (!L->isLoopInvariant(IVExitValue))
- IVExitValue = ExitCondition->getOperand(0);
- if (!L->isLoopInvariant(IVExitValue))
- return false;
- if (!IVBasedValues.count(
- ExitCondition->getOperand(IVExitValue == ExitCondition->getOperand(0))))
- return false;
-
- // If start value is more then exit value where induction variable
- // increments by 1 then we are potentially dealing with an infinite loop.
- // Do not index split this loop.
- if (ConstantInt *SV = dyn_cast<ConstantInt>(IVStartValue))
- if (ConstantInt *EV = dyn_cast<ConstantInt>(IVExitValue))
- if (SV->getSExtValue() > EV->getSExtValue())
- return false;
-
- if (processOneIterationLoop())
- return true;
-
- if (updateLoopIterationSpace())
- return true;
-
- if (splitLoop())
- return true;
-
- return false;
-}
-
-// --- Helper routines ---
-// isUsedOutsideLoop - Returns true iff V is used outside the loop L.
-static bool isUsedOutsideLoop(Value *V, Loop *L) {
- for(Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
- if (!L->contains(cast<Instruction>(*UI)))
- return true;
- return false;
-}
-
-// Return V+1
-static Value *getPlusOne(Value *V, bool Sign, Instruction *InsertPt,
- LLVMContext &Context) {
- Constant *One = ConstantInt::get(V->getType(), 1, Sign);
- return BinaryOperator::CreateAdd(V, One, "lsp", InsertPt);
-}
-
-// Return V-1
-static Value *getMinusOne(Value *V, bool Sign, Instruction *InsertPt,
- LLVMContext &Context) {
- Constant *One = ConstantInt::get(V->getType(), 1, Sign);
- return BinaryOperator::CreateSub(V, One, "lsp", InsertPt);
-}
-
-// Return min(V1, V1)
-static Value *getMin(Value *V1, Value *V2, bool Sign, Instruction *InsertPt) {
-
- Value *C = new ICmpInst(InsertPt,
- Sign ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
- V1, V2, "lsp");
- return SelectInst::Create(C, V1, V2, "lsp", InsertPt);
-}
-
-// Return max(V1, V2)
-static Value *getMax(Value *V1, Value *V2, bool Sign, Instruction *InsertPt) {
-
- Value *C = new ICmpInst(InsertPt,
- Sign ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
- V1, V2, "lsp");
- return SelectInst::Create(C, V2, V1, "lsp", InsertPt);
-}
-
-/// processOneIterationLoop -- Eliminate loop if loop body is executed
-/// only once. For example,
-/// for (i = 0; i < N; ++i) {
-/// if ( i == X) {
-/// ...
-/// }
-/// }
-///
-bool LoopIndexSplit::processOneIterationLoop() {
- SplitCondition = NULL;
- BasicBlock *Latch = L->getLoopLatch();
- BasicBlock *Header = L->getHeader();
- BranchInst *BR = dyn_cast<BranchInst>(Header->getTerminator());
- if (!BR) return false;
- if (!isa<BranchInst>(Latch->getTerminator())) return false;
- if (BR->isUnconditional()) return false;
- SplitCondition = dyn_cast<ICmpInst>(BR->getCondition());
- if (!SplitCondition) return false;
- if (SplitCondition == ExitCondition) return false;
- if (SplitCondition->getPredicate() != ICmpInst::ICMP_EQ) return false;
- if (BR->getOperand(1) != Latch) return false;
- if (!IVBasedValues.count(SplitCondition->getOperand(0))
- && !IVBasedValues.count(SplitCondition->getOperand(1)))
- return false;
-
- // If IV is used outside the loop then this loop traversal is required.
- // FIXME: Calculate and use last IV value.
- if (isUsedOutsideLoop(IVIncrement, L))
- return false;
-
- // If BR operands are not IV or not loop invariants then skip this loop.
- Value *OPV = SplitCondition->getOperand(0);
- Value *SplitValue = SplitCondition->getOperand(1);
- if (!L->isLoopInvariant(SplitValue))
- std::swap(OPV, SplitValue);
- if (!L->isLoopInvariant(SplitValue))
- return false;
- Instruction *OPI = dyn_cast<Instruction>(OPV);
- if (!OPI)
- return false;
- if (OPI->getParent() != Header || isUsedOutsideLoop(OPI, L))
- return false;
- Value *StartValue = IVStartValue;
- Value *ExitValue = IVExitValue;;
-
- if (OPV != IndVar) {
- // If BR operand is IV based then use this operand to calculate
- // effective conditions for loop body.
- BinaryOperator *BOPV = dyn_cast<BinaryOperator>(OPV);
- if (!BOPV)
- return false;
- if (BOPV->getOpcode() != Instruction::Add)
- return false;
- StartValue = BinaryOperator::CreateAdd(OPV, StartValue, "" , BR);
- ExitValue = BinaryOperator::CreateAdd(OPV, ExitValue, "" , BR);
- }
-
- if (!cleanBlock(Header))
- return false;
-
- if (!cleanBlock(Latch))
- return false;
-
- // If the merge point for BR is not loop latch then skip this loop.
- if (BR->getSuccessor(0) != Latch) {
- DominanceFrontier::iterator DF0 = DF->find(BR->getSuccessor(0));
- assert (DF0 != DF->end() && "Unable to find dominance frontier");
- if (!DF0->second.count(Latch))
- return false;
- }
-
- if (BR->getSuccessor(1) != Latch) {
- DominanceFrontier::iterator DF1 = DF->find(BR->getSuccessor(1));
- assert (DF1 != DF->end() && "Unable to find dominance frontier");
- if (!DF1->second.count(Latch))
- return false;
- }
-
- // Now, Current loop L contains compare instruction
- // that compares induction variable, IndVar, against loop invariant. And
- // entire (i.e. meaningful) loop body is dominated by this compare
- // instruction. In such case eliminate
- // loop structure surrounding this loop body. For example,
- // for (int i = start; i < end; ++i) {
- // if ( i == somevalue) {
- // loop_body
- // }
- // }
- // can be transformed into
- // if (somevalue >= start && somevalue < end) {
- // i = somevalue;
- // loop_body
- // }
-
- // Replace index variable with split value in loop body. Loop body is executed
- // only when index variable is equal to split value.
- IndVar->replaceAllUsesWith(SplitValue);
-
- // Replace split condition in header.
- // Transform
- // SplitCondition : icmp eq i32 IndVar, SplitValue
- // into
- // c1 = icmp uge i32 SplitValue, StartValue
- // c2 = icmp ult i32 SplitValue, ExitValue
- // and i32 c1, c2
- Instruction *C1 = new ICmpInst(BR, ExitCondition->isSigned() ?
- ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
- SplitValue, StartValue, "lisplit");
-
- CmpInst::Predicate C2P = ExitCondition->getPredicate();
- BranchInst *LatchBR = cast<BranchInst>(Latch->getTerminator());
- if (LatchBR->getOperand(1) != Header)
- C2P = CmpInst::getInversePredicate(C2P);
- Instruction *C2 = new ICmpInst(BR, C2P, SplitValue, ExitValue, "lisplit");
- Instruction *NSplitCond = BinaryOperator::CreateAnd(C1, C2, "lisplit", BR);
-
- SplitCondition->replaceAllUsesWith(NSplitCond);
- SplitCondition->eraseFromParent();
-
- // Remove Latch to Header edge.
- BasicBlock *LatchSucc = NULL;
- Header->removePredecessor(Latch);
- for (succ_iterator SI = succ_begin(Latch), E = succ_end(Latch);
- SI != E; ++SI) {
- if (Header != *SI)
- LatchSucc = *SI;
- }
-
- // Clean up latch block.
- Value *LatchBRCond = LatchBR->getCondition();
- LatchBR->setUnconditionalDest(LatchSucc);
- RecursivelyDeleteTriviallyDeadInstructions(LatchBRCond);
-
- LPM->deleteLoopFromQueue(L);
-
- // Update Dominator Info.
- // Only CFG change done is to remove Latch to Header edge. This
- // does not change dominator tree because Latch did not dominate
- // Header.
- if (DF) {
- DominanceFrontier::iterator HeaderDF = DF->find(Header);
- if (HeaderDF != DF->end())
- DF->removeFromFrontier(HeaderDF, Header);
-
- DominanceFrontier::iterator LatchDF = DF->find(Latch);
- if (LatchDF != DF->end())
- DF->removeFromFrontier(LatchDF, Header);
- }
-
- ++NumIndexSplitRemoved;
- return true;
-}
-
-/// restrictLoopBound - Op dominates loop body. Op compares an IV based value
-/// with a loop invariant value. Update loop's lower and upper bound based on
-/// the loop invariant value.
-bool LoopIndexSplit::restrictLoopBound(ICmpInst &Op) {
- bool Sign = Op.isSigned();
- Instruction *PHTerm = L->getLoopPreheader()->getTerminator();
-
- if (IVisGT(*ExitCondition) || IVisGE(*ExitCondition)) {
- BranchInst *EBR =
- cast<BranchInst>(ExitCondition->getParent()->getTerminator());
- ExitCondition->setPredicate(ExitCondition->getInversePredicate());
- BasicBlock *T = EBR->getSuccessor(0);
- EBR->setSuccessor(0, EBR->getSuccessor(1));
- EBR->setSuccessor(1, T);
- }
-
- LLVMContext &Context = Op.getContext();
-
- // New upper and lower bounds.
- Value *NLB = NULL;
- Value *NUB = NULL;
- if (Value *V = IVisLT(Op)) {
- // Restrict upper bound.
- if (IVisLE(*ExitCondition))
- V = getMinusOne(V, Sign, PHTerm, Context);
- NUB = getMin(V, IVExitValue, Sign, PHTerm);
- } else if (Value *V = IVisLE(Op)) {
- // Restrict upper bound.
- if (IVisLT(*ExitCondition))
- V = getPlusOne(V, Sign, PHTerm, Context);
- NUB = getMin(V, IVExitValue, Sign, PHTerm);
- } else if (Value *V = IVisGT(Op)) {
- // Restrict lower bound.
- V = getPlusOne(V, Sign, PHTerm, Context);
- NLB = getMax(V, IVStartValue, Sign, PHTerm);
- } else if (Value *V = IVisGE(Op))
- // Restrict lower bound.
- NLB = getMax(V, IVStartValue, Sign, PHTerm);
-
- if (!NLB && !NUB)
- return false;
-
- if (NLB) {
- unsigned i = IndVar->getBasicBlockIndex(L->getLoopPreheader());
- IndVar->setIncomingValue(i, NLB);
- }
-
- if (NUB) {
- unsigned i = (ExitCondition->getOperand(0) != IVExitValue);
- ExitCondition->setOperand(i, NUB);
- }
- return true;
-}
-
-/// updateLoopIterationSpace -- Update loop's iteration space if loop
-/// body is executed for certain IV range only. For example,
-///
-/// for (i = 0; i < N; ++i) {
-/// if ( i > A && i < B) {
-/// ...
-/// }
-/// }
-/// is transformed to iterators from A to B, if A > 0 and B < N.
-///
-bool LoopIndexSplit::updateLoopIterationSpace() {
- SplitCondition = NULL;
- if (ExitCondition->getPredicate() == ICmpInst::ICMP_NE
- || ExitCondition->getPredicate() == ICmpInst::ICMP_EQ)
- return false;
- BasicBlock *Latch = L->getLoopLatch();
- BasicBlock *Header = L->getHeader();
- BranchInst *BR = dyn_cast<BranchInst>(Header->getTerminator());
- if (!BR) return false;
- if (!isa<BranchInst>(Latch->getTerminator())) return false;
- if (BR->isUnconditional()) return false;
- BinaryOperator *AND = dyn_cast<BinaryOperator>(BR->getCondition());
- if (!AND) return false;
- if (AND->getOpcode() != Instruction::And) return false;
- ICmpInst *Op0 = dyn_cast<ICmpInst>(AND->getOperand(0));
- ICmpInst *Op1 = dyn_cast<ICmpInst>(AND->getOperand(1));
- if (!Op0 || !Op1)
- return false;
- IVBasedValues.insert(AND);
- IVBasedValues.insert(Op0);
- IVBasedValues.insert(Op1);
- if (!cleanBlock(Header)) return false;
- BasicBlock *ExitingBlock = ExitCondition->getParent();
- if (!cleanBlock(ExitingBlock)) return false;
-
- // If the merge point for BR is not loop latch then skip this loop.
- if (BR->getSuccessor(0) != Latch) {
- DominanceFrontier::iterator DF0 = DF->find(BR->getSuccessor(0));
- assert (DF0 != DF->end() && "Unable to find dominance frontier");
- if (!DF0->second.count(Latch))
- return false;
- }
-
- if (BR->getSuccessor(1) != Latch) {
- DominanceFrontier::iterator DF1 = DF->find(BR->getSuccessor(1));
- assert (DF1 != DF->end() && "Unable to find dominance frontier");
- if (!DF1->second.count(Latch))
- return false;
- }
-
- // Verify that loop exiting block has only two predecessor, where one pred
- // is split condition block. The other predecessor will become exiting block's
- // dominator after CFG is updated. TODO : Handle CFG's where exiting block has
- // more then two predecessors. This requires extra work in updating dominator
- // information.
- BasicBlock *ExitingBBPred = NULL;
- for (pred_iterator PI = pred_begin(ExitingBlock), PE = pred_end(ExitingBlock);
- PI != PE; ++PI) {
- BasicBlock *BB = *PI;
- if (Header == BB)
- continue;
- if (ExitingBBPred)
- return false;
- else
- ExitingBBPred = BB;
- }
-
- if (!restrictLoopBound(*Op0))
- return false;
-
- if (!restrictLoopBound(*Op1))
- return false;
-
- // Update CFG.
- if (BR->getSuccessor(0) == ExitingBlock)
- BR->setUnconditionalDest(BR->getSuccessor(1));
- else
- BR->setUnconditionalDest(BR->getSuccessor(0));
-
- AND->eraseFromParent();
- if (Op0->use_empty())
- Op0->eraseFromParent();
- if (Op1->use_empty())
- Op1->eraseFromParent();
-
- // Update domiantor info. Now, ExitingBlock has only one predecessor,
- // ExitingBBPred, and it is ExitingBlock's immediate domiantor.
- DT->changeImmediateDominator(ExitingBlock, ExitingBBPred);
-
- BasicBlock *ExitBlock = ExitingBlock->getTerminator()->getSuccessor(1);
- if (L->contains(ExitBlock))
- ExitBlock = ExitingBlock->getTerminator()->getSuccessor(0);
-
- // If ExitingBlock is a member of the loop basic blocks' DF list then
- // replace ExitingBlock with header and exit block in the DF list
- DominanceFrontier::iterator ExitingBlockDF = DF->find(ExitingBlock);
- for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
- I != E; ++I) {
- BasicBlock *BB = *I;
- if (BB == Header || BB == ExitingBlock)
- continue;
- DominanceFrontier::iterator BBDF = DF->find(BB);
- DominanceFrontier::DomSetType::iterator DomSetI = BBDF->second.begin();
- DominanceFrontier::DomSetType::iterator DomSetE = BBDF->second.end();
- while (DomSetI != DomSetE) {
- DominanceFrontier::DomSetType::iterator CurrentItr = DomSetI;
- ++DomSetI;
- BasicBlock *DFBB = *CurrentItr;
- if (DFBB == ExitingBlock) {
- BBDF->second.erase(DFBB);
- for (DominanceFrontier::DomSetType::iterator
- EBI = ExitingBlockDF->second.begin(),
- EBE = ExitingBlockDF->second.end(); EBI != EBE; ++EBI)
- BBDF->second.insert(*EBI);
- }
- }
- }
- ++NumRestrictBounds;
- return true;
-}
-
-/// removeBlocks - Remove basic block DeadBB and all blocks dominated by DeadBB.
-/// This routine is used to remove split condition's dead branch, dominated by
-/// DeadBB. LiveBB dominates split conidition's other branch.
-void LoopIndexSplit::removeBlocks(BasicBlock *DeadBB, Loop *LP,
- BasicBlock *LiveBB) {
-
- // First update DeadBB's dominance frontier.
- SmallVector<BasicBlock *, 8> FrontierBBs;
- DominanceFrontier::iterator DeadBBDF = DF->find(DeadBB);
- if (DeadBBDF != DF->end()) {
- SmallVector<BasicBlock *, 8> PredBlocks;
-
- DominanceFrontier::DomSetType DeadBBSet = DeadBBDF->second;
- for (DominanceFrontier::DomSetType::iterator DeadBBSetI = DeadBBSet.begin(),
- DeadBBSetE = DeadBBSet.end(); DeadBBSetI != DeadBBSetE; ++DeadBBSetI)
- {
- BasicBlock *FrontierBB = *DeadBBSetI;
- FrontierBBs.push_back(FrontierBB);
-
- // Rremove any PHI incoming edge from blocks dominated by DeadBB.
- PredBlocks.clear();
- for(pred_iterator PI = pred_begin(FrontierBB), PE = pred_end(FrontierBB);
- PI != PE; ++PI) {
- BasicBlock *P = *PI;
- if (DT->dominates(DeadBB, P))
- PredBlocks.push_back(P);
- }
-
- for(BasicBlock::iterator FBI = FrontierBB->begin(), FBE = FrontierBB->end();
- FBI != FBE; ++FBI) {
- if (PHINode *PN = dyn_cast<PHINode>(FBI)) {
- for(SmallVector<BasicBlock *, 8>::iterator PI = PredBlocks.begin(),
- PE = PredBlocks.end(); PI != PE; ++PI) {
- BasicBlock *P = *PI;
- PN->removeIncomingValue(P);
- }
- }
- else
- break;
- }
- }
- }
-
- // Now remove DeadBB and all nodes dominated by DeadBB in df order.
- SmallVector<BasicBlock *, 32> WorkList;
- DomTreeNode *DN = DT->getNode(DeadBB);
- for (df_iterator<DomTreeNode*> DI = df_begin(DN),
- E = df_end(DN); DI != E; ++DI) {
- BasicBlock *BB = DI->getBlock();
- WorkList.push_back(BB);
- BB->replaceAllUsesWith(UndefValue::get(
- Type::getLabelTy(DeadBB->getContext())));
- }
-
- while (!WorkList.empty()) {
- BasicBlock *BB = WorkList.pop_back_val();
- LPM->deleteSimpleAnalysisValue(BB, LP);
- for(BasicBlock::iterator BBI = BB->begin(), BBE = BB->end();
- BBI != BBE; ) {
- Instruction *I = BBI;
- ++BBI;
- I->replaceAllUsesWith(UndefValue::get(I->getType()));
- LPM->deleteSimpleAnalysisValue(I, LP);
- I->eraseFromParent();
- }
- DT->eraseNode(BB);
- DF->removeBlock(BB);
- LI->removeBlock(BB);
- BB->eraseFromParent();
- }
-
- // Update Frontier BBs' dominator info.
- while (!FrontierBBs.empty()) {
- BasicBlock *FBB = FrontierBBs.pop_back_val();
- BasicBlock *NewDominator = FBB->getSinglePredecessor();
- if (!NewDominator) {
- pred_iterator PI = pred_begin(FBB), PE = pred_end(FBB);
- NewDominator = *PI;
- ++PI;
- if (NewDominator != LiveBB) {
- for(; PI != PE; ++PI) {
- BasicBlock *P = *PI;
- if (P == LiveBB) {
- NewDominator = LiveBB;
- break;
- }
- NewDominator = DT->findNearestCommonDominator(NewDominator, P);
- }
- }
- }
- assert (NewDominator && "Unable to fix dominator info.");
- DT->changeImmediateDominator(FBB, NewDominator);
- DF->changeImmediateDominator(FBB, NewDominator, DT);
- }
-
-}
-
-// moveExitCondition - Move exit condition EC into split condition block CondBB.
-void LoopIndexSplit::moveExitCondition(BasicBlock *CondBB, BasicBlock *ActiveBB,
- BasicBlock *ExitBB, ICmpInst *EC,
- ICmpInst *SC, PHINode *IV,
- Instruction *IVAdd, Loop *LP,
- unsigned ExitValueNum) {
-
- BasicBlock *ExitingBB = EC->getParent();
- Instruction *CurrentBR = CondBB->getTerminator();
-
- // Move exit condition into split condition block.
- EC->moveBefore(CurrentBR);
- EC->setOperand(ExitValueNum == 0 ? 1 : 0, IV);
-
- // Move exiting block's branch into split condition block. Update its branch
- // destination.
- BranchInst *ExitingBR = cast<BranchInst>(ExitingBB->getTerminator());
- ExitingBR->moveBefore(CurrentBR);
- BasicBlock *OrigDestBB = NULL;
- if (ExitingBR->getSuccessor(0) == ExitBB) {
- OrigDestBB = ExitingBR->getSuccessor(1);
- ExitingBR->setSuccessor(1, ActiveBB);
- }
- else {
- OrigDestBB = ExitingBR->getSuccessor(0);
- ExitingBR->setSuccessor(0, ActiveBB);
- }
-
- // Remove split condition and current split condition branch.
- SC->eraseFromParent();
- CurrentBR->eraseFromParent();
-
- // Connect exiting block to original destination.
- BranchInst::Create(OrigDestBB, ExitingBB);
-
- // Update PHINodes
- updatePHINodes(ExitBB, ExitingBB, CondBB, IV, IVAdd, LP);
-
- // Fix dominator info.
- // ExitBB is now dominated by CondBB
- DT->changeImmediateDominator(ExitBB, CondBB);
- DF->changeImmediateDominator(ExitBB, CondBB, DT);
-
- // Blocks outside the loop may have been in the dominance frontier of blocks
- // inside the condition; this is now impossible because the blocks inside the
- // condition no loger dominate the exit. Remove the relevant blocks from
- // the dominance frontiers.
- for (Loop::block_iterator I = LP->block_begin(), E = LP->block_end();
- I != E; ++I) {
- if (!DT->properlyDominates(CondBB, *I)) continue;
- DominanceFrontier::iterator BBDF = DF->find(*I);
- DominanceFrontier::DomSetType::iterator DomSetI = BBDF->second.begin();
- DominanceFrontier::DomSetType::iterator DomSetE = BBDF->second.end();
- while (DomSetI != DomSetE) {
- DominanceFrontier::DomSetType::iterator CurrentItr = DomSetI;
- ++DomSetI;
- BasicBlock *DFBB = *CurrentItr;
- if (!LP->contains(DFBB))
- BBDF->second.erase(DFBB);
- }
- }
-}
-
-/// updatePHINodes - CFG has been changed.
-/// Before
-/// - ExitBB's single predecessor was Latch
-/// - Latch's second successor was Header
-/// Now
-/// - ExitBB's single predecessor is Header
-/// - Latch's one and only successor is Header
-///
-/// Update ExitBB PHINodes' to reflect this change.
-void LoopIndexSplit::updatePHINodes(BasicBlock *ExitBB, BasicBlock *Latch,
- BasicBlock *Header,
- PHINode *IV, Instruction *IVIncrement,
- Loop *LP) {
-
- for (BasicBlock::iterator BI = ExitBB->begin(), BE = ExitBB->end();
- BI != BE; ) {
- PHINode *PN = dyn_cast<PHINode>(BI);
- ++BI;
- if (!PN)
- break;
-
- Value *V = PN->getIncomingValueForBlock(Latch);
- if (PHINode *PHV = dyn_cast<PHINode>(V)) {
- // PHV is in Latch. PHV has one use is in ExitBB PHINode. And one use
- // in Header which is new incoming value for PN.
- Value *NewV = NULL;
- for (Value::use_iterator UI = PHV->use_begin(), E = PHV->use_end();
- UI != E; ++UI)
- if (PHINode *U = dyn_cast<PHINode>(*UI))
- if (LP->contains(U)) {
- NewV = U;
- break;
- }
-
- // Add incoming value from header only if PN has any use inside the loop.
- if (NewV)
- PN->addIncoming(NewV, Header);
-
- } else if (Instruction *PHI = dyn_cast<Instruction>(V)) {
- // If this instruction is IVIncrement then IV is new incoming value
- // from header otherwise this instruction must be incoming value from
- // header because loop is in LCSSA form.
- if (PHI == IVIncrement)
- PN->addIncoming(IV, Header);
- else
- PN->addIncoming(V, Header);
- } else
- // Otherwise this is an incoming value from header because loop is in
- // LCSSA form.
- PN->addIncoming(V, Header);
-
- // Remove incoming value from Latch.
- PN->removeIncomingValue(Latch);
- }
-}
-
-bool LoopIndexSplit::splitLoop() {
- SplitCondition = NULL;
- if (ExitCondition->getPredicate() == ICmpInst::ICMP_NE
- || ExitCondition->getPredicate() == ICmpInst::ICMP_EQ)
- return false;
- BasicBlock *Header = L->getHeader();
- BasicBlock *Latch = L->getLoopLatch();
- BranchInst *SBR = NULL; // Split Condition Branch
- BranchInst *EBR = cast<BranchInst>(ExitCondition->getParent()->getTerminator());
- // If Exiting block includes loop variant instructions then this
- // loop may not be split safely.
- BasicBlock *ExitingBlock = ExitCondition->getParent();
- if (!cleanBlock(ExitingBlock)) return false;
-
- LLVMContext &Context = Header->getContext();
-
- for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
- I != E; ++I) {
- BranchInst *BR = dyn_cast<BranchInst>((*I)->getTerminator());
- if (!BR || BR->isUnconditional()) continue;
- ICmpInst *CI = dyn_cast<ICmpInst>(BR->getCondition());
- if (!CI || CI == ExitCondition
- || CI->getPredicate() == ICmpInst::ICMP_NE
- || CI->getPredicate() == ICmpInst::ICMP_EQ)
- continue;
-
- // Unable to handle triangle loops at the moment.
- // In triangle loop, split condition is in header and one of the
- // the split destination is loop latch. If split condition is EQ
- // then such loops are already handle in processOneIterationLoop().
- if (Header == (*I)
- && (Latch == BR->getSuccessor(0) || Latch == BR->getSuccessor(1)))
- continue;
-
- // If the block does not dominate the latch then this is not a diamond.
- // Such loop may not benefit from index split.
- if (!DT->dominates((*I), Latch))
- continue;
-
- // If split condition branches heads do not have single predecessor,
- // SplitCondBlock, then is not possible to remove inactive branch.
- if (!BR->getSuccessor(0)->getSinglePredecessor()
- || !BR->getSuccessor(1)->getSinglePredecessor())
- return false;
-
- // If the merge point for BR is not loop latch then skip this condition.
- if (BR->getSuccessor(0) != Latch) {
- DominanceFrontier::iterator DF0 = DF->find(BR->getSuccessor(0));
- assert (DF0 != DF->end() && "Unable to find dominance frontier");
- if (!DF0->second.count(Latch))
- continue;
- }
-
- if (BR->getSuccessor(1) != Latch) {
- DominanceFrontier::iterator DF1 = DF->find(BR->getSuccessor(1));
- assert (DF1 != DF->end() && "Unable to find dominance frontier");
- if (!DF1->second.count(Latch))
- continue;
- }
- SplitCondition = CI;
- SBR = BR;
- break;
- }
-
- if (!SplitCondition)
- return false;
-
- // If the predicate sign does not match then skip.
- if (ExitCondition->isSigned() != SplitCondition->isSigned())
- return false;
-
- unsigned EVOpNum = (ExitCondition->getOperand(1) == IVExitValue);
- unsigned SVOpNum = IVBasedValues.count(SplitCondition->getOperand(0));
- Value *SplitValue = SplitCondition->getOperand(SVOpNum);
- if (!L->isLoopInvariant(SplitValue))
- return false;
- if (!IVBasedValues.count(SplitCondition->getOperand(!SVOpNum)))
- return false;
-
- // Check for side effects.
- for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
- I != E; ++I) {
- BasicBlock *BB = *I;
-
- assert(DT->dominates(Header, BB));
- if (DT->properlyDominates(SplitCondition->getParent(), BB))
- continue;
-
- for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
- BI != BE; ++BI) {
- Instruction *Inst = BI;
-
- if (!Inst->isSafeToSpeculativelyExecute() && !isa<PHINode>(Inst)
- && !isa<BranchInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst))
- return false;
- }
- }
-
- // Normalize loop conditions so that it is easier to calculate new loop
- // bounds.
- if (IVisGT(*ExitCondition) || IVisGE(*ExitCondition)) {
- ExitCondition->setPredicate(ExitCondition->getInversePredicate());
- BasicBlock *T = EBR->getSuccessor(0);
- EBR->setSuccessor(0, EBR->getSuccessor(1));
- EBR->setSuccessor(1, T);
- }
-
- if (IVisGT(*SplitCondition) || IVisGE(*SplitCondition)) {
- SplitCondition->setPredicate(SplitCondition->getInversePredicate());
- BasicBlock *T = SBR->getSuccessor(0);
- SBR->setSuccessor(0, SBR->getSuccessor(1));
- SBR->setSuccessor(1, T);
- }
-
- //[*] Calculate new loop bounds.
- Value *AEV = SplitValue;
- Value *BSV = SplitValue;
- bool Sign = SplitCondition->isSigned();
- Instruction *PHTerm = L->getLoopPreheader()->getTerminator();
-
- if (IVisLT(*ExitCondition)) {
- if (IVisLT(*SplitCondition)) {
- /* Do nothing */
- }
- else if (IVisLE(*SplitCondition)) {
- AEV = getPlusOne(SplitValue, Sign, PHTerm, Context);
- BSV = getPlusOne(SplitValue, Sign, PHTerm, Context);
- } else {
- assert (0 && "Unexpected split condition!");
- }
- }
- else if (IVisLE(*ExitCondition)) {
- if (IVisLT(*SplitCondition)) {
- AEV = getMinusOne(SplitValue, Sign, PHTerm, Context);
- }
- else if (IVisLE(*SplitCondition)) {
- BSV = getPlusOne(SplitValue, Sign, PHTerm, Context);
- } else {
- assert (0 && "Unexpected split condition!");
- }
- } else {
- assert (0 && "Unexpected exit condition!");
- }
- AEV = getMin(AEV, IVExitValue, Sign, PHTerm);
- BSV = getMax(BSV, IVStartValue, Sign, PHTerm);
-
- // [*] Clone Loop
- ValueMap<const Value *, Value *> VMap;
- Loop *BLoop = CloneLoop(L, LPM, LI, VMap, this);
- Loop *ALoop = L;
-
- // [*] ALoop's exiting edge enters BLoop's header.
- // ALoop's original exit block becomes BLoop's exit block.
- PHINode *B_IndVar = cast<PHINode>(VMap[IndVar]);
- BasicBlock *A_ExitingBlock = ExitCondition->getParent();
- BranchInst *A_ExitInsn =
- dyn_cast<BranchInst>(A_ExitingBlock->getTerminator());
- assert (A_ExitInsn && "Unable to find suitable loop exit branch");
- BasicBlock *B_ExitBlock = A_ExitInsn->getSuccessor(1);
- BasicBlock *B_Header = BLoop->getHeader();
- if (ALoop->contains(B_ExitBlock)) {
- B_ExitBlock = A_ExitInsn->getSuccessor(0);
- A_ExitInsn->setSuccessor(0, B_Header);
- } else
- A_ExitInsn->setSuccessor(1, B_Header);
-
- // [*] Update ALoop's exit value using new exit value.
- ExitCondition->setOperand(EVOpNum, AEV);
-
- // [*] Update BLoop's header phi nodes. Remove incoming PHINode's from
- // original loop's preheader. Add incoming PHINode values from
- // ALoop's exiting block. Update BLoop header's domiantor info.
-
- // Collect inverse map of Header PHINodes.
- DenseMap<Value *, Value *> InverseMap;
- for (BasicBlock::iterator BI = ALoop->getHeader()->begin(),
- BE = ALoop->getHeader()->end(); BI != BE; ++BI) {
- if (PHINode *PN = dyn_cast<PHINode>(BI)) {
- PHINode *PNClone = cast<PHINode>(VMap[PN]);
- InverseMap[PNClone] = PN;
- } else
- break;
- }
-
- BasicBlock *A_Preheader = ALoop->getLoopPreheader();
- for (BasicBlock::iterator BI = B_Header->begin(), BE = B_Header->end();
- BI != BE; ++BI) {
- if (PHINode *PN = dyn_cast<PHINode>(BI)) {
- // Remove incoming value from original preheader.
- PN->removeIncomingValue(A_Preheader);
-
- // Add incoming value from A_ExitingBlock.
- if (PN == B_IndVar)
- PN->addIncoming(BSV, A_ExitingBlock);
- else {
- PHINode *OrigPN = cast<PHINode>(InverseMap[PN]);
- Value *V2 = NULL;
- // If loop header is also loop exiting block then
- // OrigPN is incoming value for B loop header.
- if (A_ExitingBlock == ALoop->getHeader())
- V2 = OrigPN;
- else
- V2 = OrigPN->getIncomingValueForBlock(A_ExitingBlock);
- PN->addIncoming(V2, A_ExitingBlock);
- }
- } else
- break;
- }
-
- DT->changeImmediateDominator(B_Header, A_ExitingBlock);
- DF->changeImmediateDominator(B_Header, A_ExitingBlock, DT);
-
- // [*] Update BLoop's exit block. Its new predecessor is BLoop's exit
- // block. Remove incoming PHINode values from ALoop's exiting block.
- // Add new incoming values from BLoop's incoming exiting value.
- // Update BLoop exit block's dominator info..
- BasicBlock *B_ExitingBlock = cast<BasicBlock>(VMap[A_ExitingBlock]);
- for (BasicBlock::iterator BI = B_ExitBlock->begin(), BE = B_ExitBlock->end();
- BI != BE; ++BI) {
- if (PHINode *PN = dyn_cast<PHINode>(BI)) {
- PN->addIncoming(VMap[PN->getIncomingValueForBlock(A_ExitingBlock)],
- B_ExitingBlock);
- PN->removeIncomingValue(A_ExitingBlock);
- } else
- break;
- }
-
- DT->changeImmediateDominator(B_ExitBlock, B_ExitingBlock);
- DF->changeImmediateDominator(B_ExitBlock, B_ExitingBlock, DT);
-
- //[*] Split ALoop's exit edge. This creates a new block which
- // serves two purposes. First one is to hold PHINode defnitions
- // to ensure that ALoop's LCSSA form. Second use it to act
- // as a preheader for BLoop.
- BasicBlock *A_ExitBlock = SplitEdge(A_ExitingBlock, B_Header, this);
-
- //[*] Preserve ALoop's LCSSA form. Create new forwarding PHINodes
- // in A_ExitBlock to redefine outgoing PHI definitions from ALoop.
- for(BasicBlock::iterator BI = B_Header->begin(), BE = B_Header->end();
- BI != BE; ++BI) {
- if (PHINode *PN = dyn_cast<PHINode>(BI)) {
- Value *V1 = PN->getIncomingValueForBlock(A_ExitBlock);
- PHINode *newPHI = PHINode::Create(PN->getType(), PN->getName());
- newPHI->addIncoming(V1, A_ExitingBlock);
- A_ExitBlock->getInstList().push_front(newPHI);
- PN->removeIncomingValue(A_ExitBlock);
- PN->addIncoming(newPHI, A_ExitBlock);
- } else
- break;
- }
-
- //[*] Eliminate split condition's inactive branch from ALoop.
- BasicBlock *A_SplitCondBlock = SplitCondition->getParent();
- BranchInst *A_BR = cast<BranchInst>(A_SplitCondBlock->getTerminator());
- BasicBlock *A_InactiveBranch = NULL;
- BasicBlock *A_ActiveBranch = NULL;
- A_ActiveBranch = A_BR->getSuccessor(0);
- A_InactiveBranch = A_BR->getSuccessor(1);
- A_BR->setUnconditionalDest(A_ActiveBranch);
- removeBlocks(A_InactiveBranch, L, A_ActiveBranch);
-
- //[*] Eliminate split condition's inactive branch in from BLoop.
- BasicBlock *B_SplitCondBlock = cast<BasicBlock>(VMap[A_SplitCondBlock]);
- BranchInst *B_BR = cast<BranchInst>(B_SplitCondBlock->getTerminator());
- BasicBlock *B_InactiveBranch = NULL;
- BasicBlock *B_ActiveBranch = NULL;
- B_ActiveBranch = B_BR->getSuccessor(1);
- B_InactiveBranch = B_BR->getSuccessor(0);
- B_BR->setUnconditionalDest(B_ActiveBranch);
- removeBlocks(B_InactiveBranch, BLoop, B_ActiveBranch);
-
- BasicBlock *A_Header = ALoop->getHeader();
- if (A_ExitingBlock == A_Header)
- return true;
-
- //[*] Move exit condition into split condition block to avoid
- // executing dead loop iteration.
- ICmpInst *B_ExitCondition = cast<ICmpInst>(VMap[ExitCondition]);
- Instruction *B_IndVarIncrement = cast<Instruction>(VMap[IVIncrement]);
- ICmpInst *B_SplitCondition = cast<ICmpInst>(VMap[SplitCondition]);
-
- moveExitCondition(A_SplitCondBlock, A_ActiveBranch, A_ExitBlock, ExitCondition,
- cast<ICmpInst>(SplitCondition), IndVar, IVIncrement,
- ALoop, EVOpNum);
-
- moveExitCondition(B_SplitCondBlock, B_ActiveBranch,
- B_ExitBlock, B_ExitCondition,
- B_SplitCondition, B_IndVar, B_IndVarIncrement,
- BLoop, EVOpNum);
-
- ++NumIndexSplit;
- return true;
-}
-
-/// cleanBlock - A block is considered clean if all non terminal instructions
-/// are either, PHINodes, IV based.
-bool LoopIndexSplit::cleanBlock(BasicBlock *BB) {
- Instruction *Terminator = BB->getTerminator();
- for(BasicBlock::iterator BI = BB->begin(), BE = BB->end();
- BI != BE; ++BI) {
- Instruction *I = BI;
-
- if (isa<PHINode>(I) || I == Terminator || I == ExitCondition
- || I == SplitCondition || IVBasedValues.count(I)
- || isa<DbgInfoIntrinsic>(I))
- continue;
-
- if (I->mayHaveSideEffects())
- return false;
-
- // I is used only inside this block then it is OK.
- bool usedOutsideBB = false;
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE; ++UI) {
- Instruction *U = cast<Instruction>(*UI);
- if (U->getParent() != BB)
- usedOutsideBB = true;
- }
- if (!usedOutsideBB)
- continue;
-
- // Otherwise we have a instruction that may not allow loop spliting.
- return false;
- }
- return true;
-}
-
-/// IVisLT - If Op is comparing IV based value with an loop invariant and
-/// IV based value is less than the loop invariant then return the loop
-/// invariant. Otherwise return NULL.
-Value * LoopIndexSplit::IVisLT(ICmpInst &Op) {
- ICmpInst::Predicate P = Op.getPredicate();
- if ((P == ICmpInst::ICMP_SLT || P == ICmpInst::ICMP_ULT)
- && IVBasedValues.count(Op.getOperand(0))
- && L->isLoopInvariant(Op.getOperand(1)))
- return Op.getOperand(1);
-
- if ((P == ICmpInst::ICMP_SGT || P == ICmpInst::ICMP_UGT)
- && IVBasedValues.count(Op.getOperand(1))
- && L->isLoopInvariant(Op.getOperand(0)))
- return Op.getOperand(0);
-
- return NULL;
-}
-
-/// IVisLE - If Op is comparing IV based value with an loop invariant and
-/// IV based value is less than or equal to the loop invariant then
-/// return the loop invariant. Otherwise return NULL.
-Value * LoopIndexSplit::IVisLE(ICmpInst &Op) {
- ICmpInst::Predicate P = Op.getPredicate();
- if ((P == ICmpInst::ICMP_SLE || P == ICmpInst::ICMP_ULE)
- && IVBasedValues.count(Op.getOperand(0))
- && L->isLoopInvariant(Op.getOperand(1)))
- return Op.getOperand(1);
-
- if ((P == ICmpInst::ICMP_SGE || P == ICmpInst::ICMP_UGE)
- && IVBasedValues.count(Op.getOperand(1))
- && L->isLoopInvariant(Op.getOperand(0)))
- return Op.getOperand(0);
-
- return NULL;
-}
-
-/// IVisGT - If Op is comparing IV based value with an loop invariant and
-/// IV based value is greater than the loop invariant then return the loop
-/// invariant. Otherwise return NULL.
-Value * LoopIndexSplit::IVisGT(ICmpInst &Op) {
- ICmpInst::Predicate P = Op.getPredicate();
- if ((P == ICmpInst::ICMP_SGT || P == ICmpInst::ICMP_UGT)
- && IVBasedValues.count(Op.getOperand(0))
- && L->isLoopInvariant(Op.getOperand(1)))
- return Op.getOperand(1);
-
- if ((P == ICmpInst::ICMP_SLT || P == ICmpInst::ICMP_ULT)
- && IVBasedValues.count(Op.getOperand(1))
- && L->isLoopInvariant(Op.getOperand(0)))
- return Op.getOperand(0);
-
- return NULL;
-}
-
-/// IVisGE - If Op is comparing IV based value with an loop invariant and
-/// IV based value is greater than or equal to the loop invariant then
-/// return the loop invariant. Otherwise return NULL.
-Value * LoopIndexSplit::IVisGE(ICmpInst &Op) {
- ICmpInst::Predicate P = Op.getPredicate();
- if ((P == ICmpInst::ICMP_SGE || P == ICmpInst::ICMP_UGE)
- && IVBasedValues.count(Op.getOperand(0))
- && L->isLoopInvariant(Op.getOperand(1)))
- return Op.getOperand(1);
-
- if ((P == ICmpInst::ICMP_SLE || P == ICmpInst::ICMP_ULE)
- && IVBasedValues.count(Op.getOperand(1))
- && L->isLoopInvariant(Op.getOperand(0)))
- return Op.getOperand(0);
-
- return NULL;
-}
-
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
new file mode 100644
index 0000000..af25c5c
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -0,0 +1,170 @@
+//===- LoopInstSimplify.cpp - Loop Instruction Simplification Pass --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs lightweight instruction simplification on loop bodies.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "loop-instsimplify"
+#include "llvm/Instructions.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+STATISTIC(NumSimplified, "Number of redundant instructions simplified");
+
+namespace {
+ class LoopInstSimplify : public LoopPass {
+ public:
+ static char ID; // Pass ID, replacement for typeid
+ LoopInstSimplify() : LoopPass(ID) {
+ initializeLoopInstSimplifyPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnLoop(Loop*, LPPassManager&);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<LoopInfo>();
+ AU.addRequiredID(LoopSimplifyID);
+ AU.addPreservedID(LoopSimplifyID);
+ AU.addPreservedID(LCSSAID);
+ AU.addPreserved("scalar-evolution");
+ }
+ };
+}
+
+char LoopInstSimplify::ID = 0;
+INITIALIZE_PASS_BEGIN(LoopInstSimplify, "loop-instsimplify",
+ "Simplify instructions in loops", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_END(LoopInstSimplify, "loop-instsimplify",
+ "Simplify instructions in loops", false, false)
+
+Pass *llvm::createLoopInstSimplifyPass() {
+ return new LoopInstSimplify();
+}
+
+bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
+ DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
+ LoopInfo *LI = &getAnalysis<LoopInfo>();
+ const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+
+ SmallVector<BasicBlock*, 8> ExitBlocks;
+ L->getUniqueExitBlocks(ExitBlocks);
+ array_pod_sort(ExitBlocks.begin(), ExitBlocks.end());
+
+ SmallPtrSet<const Instruction*, 8> S1, S2, *ToSimplify = &S1, *Next = &S2;
+
+ // The bit we are stealing from the pointer represents whether this basic
+ // block is the header of a subloop, in which case we only process its phis.
+ typedef PointerIntPair<BasicBlock*, 1> WorklistItem;
+ SmallVector<WorklistItem, 16> VisitStack;
+ SmallPtrSet<BasicBlock*, 32> Visited;
+
+ bool Changed = false;
+ bool LocalChanged;
+ do {
+ LocalChanged = false;
+
+ VisitStack.clear();
+ Visited.clear();
+
+ VisitStack.push_back(WorklistItem(L->getHeader(), false));
+
+ while (!VisitStack.empty()) {
+ WorklistItem Item = VisitStack.pop_back_val();
+ BasicBlock *BB = Item.getPointer();
+ bool IsSubloopHeader = Item.getInt();
+
+ // Simplify instructions in the current basic block.
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
+ Instruction *I = BI++;
+
+ // The first time through the loop ToSimplify is empty and we try to
+ // simplify all instructions. On later iterations ToSimplify is not
+ // empty and we only bother simplifying instructions that are in it.
+ if (!ToSimplify->empty() && !ToSimplify->count(I))
+ continue;
+
+ // Don't bother simplifying unused instructions.
+ if (!I->use_empty()) {
+ Value *V = SimplifyInstruction(I, TD, DT);
+ if (V && LI->replacementPreservesLCSSAForm(I, V)) {
+ // Mark all uses for resimplification next time round the loop.
+ for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
+ UI != UE; ++UI)
+ Next->insert(cast<Instruction>(*UI));
+
+ I->replaceAllUsesWith(V);
+ LocalChanged = true;
+ ++NumSimplified;
+ }
+ }
+ LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I);
+
+ if (IsSubloopHeader && !isa<PHINode>(I))
+ break;
+ }
+
+ // Add all successors to the worklist, except for loop exit blocks and the
+ // bodies of subloops. We visit the headers of loops so that we can process
+ // their phis, but we contract the rest of the subloop body and only follow
+ // edges leading back to the original loop.
+ for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
+ ++SI) {
+ BasicBlock *SuccBB = *SI;
+ if (!Visited.insert(SuccBB))
+ continue;
+
+ const Loop *SuccLoop = LI->getLoopFor(SuccBB);
+ if (SuccLoop && SuccLoop->getHeader() == SuccBB
+ && L->contains(SuccLoop)) {
+ VisitStack.push_back(WorklistItem(SuccBB, true));
+
+ SmallVector<BasicBlock*, 8> SubLoopExitBlocks;
+ SuccLoop->getExitBlocks(SubLoopExitBlocks);
+
+ for (unsigned i = 0; i < SubLoopExitBlocks.size(); ++i) {
+ BasicBlock *ExitBB = SubLoopExitBlocks[i];
+ if (LI->getLoopFor(ExitBB) == L && Visited.insert(ExitBB))
+ VisitStack.push_back(WorklistItem(ExitBB, false));
+ }
+
+ continue;
+ }
+
+ bool IsExitBlock = std::binary_search(ExitBlocks.begin(),
+ ExitBlocks.end(), SuccBB);
+ if (IsExitBlock)
+ continue;
+
+ VisitStack.push_back(WorklistItem(SuccBB, false));
+ }
+ }
+
+ // Place the list of instructions to simplify on the next loop iteration
+ // into ToSimplify.
+ std::swap(ToSimplify, Next);
+ Next->clear();
+
+ Changed |= LocalChanged;
+ } while (LocalChanged);
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 65acc1d..95e1578 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -15,16 +15,16 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Function.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/Support/CommandLine.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/SmallVector.h"
using namespace llvm;
#define MAX_HEADER_SIZE 16
@@ -35,16 +35,13 @@ namespace {
class LoopRotate : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
- LoopRotate() : LoopPass(ID) {}
-
- // Rotate Loop L as many times as possible. Return true if
- // loop is rotated at least once.
- bool runOnLoop(Loop *L, LPPassManager &LPM);
+ LoopRotate() : LoopPass(ID) {
+ initializeLoopRotatePass(*PassRegistry::getPassRegistry());
+ }
// LCSSA form makes instruction renaming easier.
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<DominatorTree>();
- AU.addPreserved<DominanceFrontier>();
AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
@@ -54,79 +51,119 @@ namespace {
AU.addPreserved<ScalarEvolution>();
}
- // Helper functions
-
- /// Do actual work
- bool rotateLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool rotateLoop(Loop *L);
- /// Initialize local data
- void initialize();
-
- /// After loop rotation, loop pre-header has multiple sucessors.
- /// Insert one forwarding basic block to ensure that loop pre-header
- /// has only one successor.
- void preserveCanonicalLoopForm(LPPassManager &LPM);
-
private:
- Loop *L;
- BasicBlock *OrigHeader;
- BasicBlock *OrigPreHeader;
- BasicBlock *OrigLatch;
- BasicBlock *NewHeader;
- BasicBlock *Exit;
- LPPassManager *LPM_Ptr;
+ LoopInfo *LI;
};
}
char LoopRotate::ID = 0;
-INITIALIZE_PASS(LoopRotate, "loop-rotate", "Rotate Loops", false, false);
+INITIALIZE_PASS_BEGIN(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_END(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
Pass *llvm::createLoopRotatePass() { return new LoopRotate(); }
/// Rotate Loop L as many times as possible. Return true if
/// the loop is rotated at least once.
-bool LoopRotate::runOnLoop(Loop *Lp, LPPassManager &LPM) {
-
- bool RotatedOneLoop = false;
- initialize();
- LPM_Ptr = &LPM;
+bool LoopRotate::runOnLoop(Loop *L, LPPassManager &LPM) {
+ LI = &getAnalysis<LoopInfo>();
// One loop can be rotated multiple times.
- while (rotateLoop(Lp,LPM)) {
- RotatedOneLoop = true;
- initialize();
- }
+ bool MadeChange = false;
+ while (rotateLoop(L))
+ MadeChange = true;
- return RotatedOneLoop;
+ return MadeChange;
}
-/// Rotate loop LP. Return true if the loop is rotated.
-bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
- L = Lp;
-
- OrigPreHeader = L->getLoopPreheader();
- if (!OrigPreHeader) return false;
-
- OrigLatch = L->getLoopLatch();
- if (!OrigLatch) return false;
+/// RewriteUsesOfClonedInstructions - We just cloned the instructions from the
+/// old header into the preheader. If there were uses of the values produced by
+/// these instruction that were outside of the loop, we have to insert PHI nodes
+/// to merge the two values. Do this now.
+static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
+ BasicBlock *OrigPreheader,
+ ValueToValueMapTy &ValueMap) {
+ // Remove PHI node entries that are no longer live.
+ BasicBlock::iterator I, E = OrigHeader->end();
+ for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I)
+ PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader));
+
+ // Now fix up users of the instructions in OrigHeader, inserting PHI nodes
+ // as necessary.
+ SSAUpdater SSA;
+ for (I = OrigHeader->begin(); I != E; ++I) {
+ Value *OrigHeaderVal = I;
+
+ // If there are no uses of the value (e.g. because it returns void), there
+ // is nothing to rewrite.
+ if (OrigHeaderVal->use_empty())
+ continue;
+
+ Value *OrigPreHeaderVal = ValueMap[OrigHeaderVal];
- OrigHeader = L->getHeader();
+ // The value now exits in two versions: the initial value in the preheader
+ // and the loop "next" value in the original header.
+ SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName());
+ SSA.AddAvailableValue(OrigHeader, OrigHeaderVal);
+ SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal);
+
+ // Visit each use of the OrigHeader instruction.
+ for (Value::use_iterator UI = OrigHeaderVal->use_begin(),
+ UE = OrigHeaderVal->use_end(); UI != UE; ) {
+ // Grab the use before incrementing the iterator.
+ Use &U = UI.getUse();
+
+ // Increment the iterator before removing the use from the list.
+ ++UI;
+
+ // SSAUpdater can't handle a non-PHI use in the same block as an
+ // earlier def. We can easily handle those cases manually.
+ Instruction *UserInst = cast<Instruction>(U.getUser());
+ if (!isa<PHINode>(UserInst)) {
+ BasicBlock *UserBB = UserInst->getParent();
+
+ // The original users in the OrigHeader are already using the
+ // original definitions.
+ if (UserBB == OrigHeader)
+ continue;
+
+ // Users in the OrigPreHeader need to use the value to which the
+ // original definitions are mapped.
+ if (UserBB == OrigPreheader) {
+ U = OrigPreHeaderVal;
+ continue;
+ }
+ }
+
+ // Anything else can be handled by SSAUpdater.
+ SSA.RewriteUse(U);
+ }
+ }
+}
+/// Rotate loop LP. Return true if the loop is rotated.
+bool LoopRotate::rotateLoop(Loop *L) {
// If the loop has only one block then there is not much to rotate.
if (L->getBlocks().size() == 1)
return false;
-
+
+ BasicBlock *OrigHeader = L->getHeader();
+
+ BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
+ if (BI == 0 || BI->isUnconditional())
+ return false;
+
// If the loop header is not one of the loop exiting blocks then
// either this loop is already rotated or it is not
// suitable for loop rotation transformations.
if (!L->isLoopExiting(OrigHeader))
return false;
- BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
- if (!BI)
- return false;
- assert(BI->isConditional() && "Branch Instruction is not conditional");
-
// Updating PHInodes in loops with multiple exits adds complexity.
// Keep it simple, and restrict loop rotation to loops with one exit only.
// In future, lift this restriction and support for multiple exits if
@@ -136,24 +173,18 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
if (ExitBlocks.size() > 1)
return false;
- // Check size of original header and reject
- // loop if it is very big.
- unsigned Size = 0;
-
- // FIXME: Use common api to estimate size.
- for (BasicBlock::const_iterator OI = OrigHeader->begin(),
- OE = OrigHeader->end(); OI != OE; ++OI) {
- if (isa<PHINode>(OI))
- continue; // PHI nodes don't count.
- if (isa<DbgInfoIntrinsic>(OI))
- continue; // Debug intrinsics don't count as size.
- ++Size;
+ // Check size of original header and reject loop if it is very big.
+ {
+ CodeMetrics Metrics;
+ Metrics.analyzeBasicBlock(OrigHeader);
+ if (Metrics.NumInsts > MAX_HEADER_SIZE)
+ return false;
}
- if (Size > MAX_HEADER_SIZE)
- return false;
-
// Now, this loop is suitable for rotation.
+ BasicBlock *OrigPreheader = L->getLoopPreheader();
+ BasicBlock *OrigLatch = L->getLoopLatch();
+ assert(OrigPreheader && OrigLatch && "Loop not in canonical form?");
// Anything ScalarEvolution may know about this loop or the PHI nodes
// in its header will soon be invalidated.
@@ -163,8 +194,8 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
// Find new Loop header. NewHeader is a Header's one and only successor
// that is inside loop. Header's other successor is outside the
// loop. Otherwise loop is not suitable for rotation.
- Exit = BI->getSuccessor(0);
- NewHeader = BI->getSuccessor(1);
+ BasicBlock *Exit = BI->getSuccessor(0);
+ BasicBlock *NewHeader = BI->getSuccessor(1);
if (L->contains(Exit))
std::swap(Exit, NewHeader);
assert(NewHeader && "Unable to determine new loop header");
@@ -180,20 +211,54 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
// Begin by walking OrigHeader and populating ValueMap with an entry for
// each Instruction.
BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end();
- DenseMap<const Value *, Value *> ValueMap;
+ ValueToValueMapTy ValueMap;
// For PHI nodes, the value available in OldPreHeader is just the
// incoming value from OldPreHeader.
for (; PHINode *PN = dyn_cast<PHINode>(I); ++I)
- ValueMap[PN] = PN->getIncomingValue(PN->getBasicBlockIndex(OrigPreHeader));
+ ValueMap[PN] = PN->getIncomingValue(PN->getBasicBlockIndex(OrigPreheader));
- // For the rest of the instructions, create a clone in the OldPreHeader.
- TerminatorInst *LoopEntryBranch = OrigPreHeader->getTerminator();
- for (; I != E; ++I) {
- Instruction *C = I->clone();
- C->setName(I->getName());
- C->insertBefore(LoopEntryBranch);
- ValueMap[I] = C;
+ // For the rest of the instructions, either hoist to the OrigPreheader if
+ // possible or create a clone in the OldPreHeader if not.
+ TerminatorInst *LoopEntryBranch = OrigPreheader->getTerminator();
+ while (I != E) {
+ Instruction *Inst = I++;
+
+ // If the instruction's operands are invariant and it doesn't read or write
+ // memory, then it is safe to hoist. Doing this doesn't change the order of
+ // execution in the preheader, but does prevent the instruction from
+ // executing in each iteration of the loop. This means it is safe to hoist
+ // something that might trap, but isn't safe to hoist something that reads
+ // memory (without proving that the loop doesn't write).
+ if (L->hasLoopInvariantOperands(Inst) &&
+ !Inst->mayReadFromMemory() && !Inst->mayWriteToMemory() &&
+ !isa<TerminatorInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst)) {
+ Inst->moveBefore(LoopEntryBranch);
+ continue;
+ }
+
+ // Otherwise, create a duplicate of the instruction.
+ Instruction *C = Inst->clone();
+
+ // Eagerly remap the operands of the instruction.
+ RemapInstruction(C, ValueMap,
+ RF_NoModuleLevelChanges|RF_IgnoreMissingEntries);
+
+ // With the operands remapped, see if the instruction constant folds or is
+ // otherwise simplifyable. This commonly occurs because the entry from PHI
+ // nodes allows icmps and other instructions to fold.
+ Value *V = SimplifyInstruction(C);
+ if (V && LI->replacementPreservesLCSSAForm(C, V)) {
+ // If so, then delete the temporary instruction and stick the folded value
+ // in the map.
+ delete C;
+ ValueMap[Inst] = V;
+ } else {
+ // Otherwise, stick the new instruction into the new block!
+ C->setName(Inst->getName());
+ C->insertBefore(LoopEntryBranch);
+ ValueMap[Inst] = C;
+ }
}
// Along with all the other instructions, we just cloned OrigHeader's
@@ -203,221 +268,81 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
for (BasicBlock::iterator BI = TI->getSuccessor(i)->begin();
PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
- PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreHeader);
+ PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader);
// Now that OrigPreHeader has a clone of OrigHeader's terminator, remove
// OrigPreHeader's old terminator (the original branch into the loop), and
// remove the corresponding incoming values from the PHI nodes in OrigHeader.
LoopEntryBranch->eraseFromParent();
- for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I)
- PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreHeader));
- // Now fix up users of the instructions in OrigHeader, inserting PHI nodes
- // as necessary.
- SSAUpdater SSA;
- for (I = OrigHeader->begin(); I != E; ++I) {
- Value *OrigHeaderVal = I;
- Value *OrigPreHeaderVal = ValueMap[OrigHeaderVal];
-
- // The value now exits in two versions: the initial value in the preheader
- // and the loop "next" value in the original header.
- SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName());
- SSA.AddAvailableValue(OrigHeader, OrigHeaderVal);
- SSA.AddAvailableValue(OrigPreHeader, OrigPreHeaderVal);
-
- // Visit each use of the OrigHeader instruction.
- for (Value::use_iterator UI = OrigHeaderVal->use_begin(),
- UE = OrigHeaderVal->use_end(); UI != UE; ) {
- // Grab the use before incrementing the iterator.
- Use &U = UI.getUse();
-
- // Increment the iterator before removing the use from the list.
- ++UI;
-
- // SSAUpdater can't handle a non-PHI use in the same block as an
- // earlier def. We can easily handle those cases manually.
- Instruction *UserInst = cast<Instruction>(U.getUser());
- if (!isa<PHINode>(UserInst)) {
- BasicBlock *UserBB = UserInst->getParent();
-
- // The original users in the OrigHeader are already using the
- // original definitions.
- if (UserBB == OrigHeader)
- continue;
-
- // Users in the OrigPreHeader need to use the value to which the
- // original definitions are mapped.
- if (UserBB == OrigPreHeader) {
- U = OrigPreHeaderVal;
- continue;
- }
- }
-
- // Anything else can be handled by SSAUpdater.
- SSA.RewriteUse(U);
- }
- }
+ // If there were any uses of instructions in the duplicated block outside the
+ // loop, update them, inserting PHI nodes as required
+ RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap);
// NewHeader is now the header of the loop.
L->moveToHeader(NewHeader);
+ assert(L->getHeader() == NewHeader && "Latch block is our new header");
- // Move the original header to the bottom of the loop, where it now more
- // naturally belongs. This isn't necessary for correctness, and CodeGen can
- // usually reorder blocks on its own to fix things like this up, but it's
- // still nice to keep the IR readable.
- //
- // The original header should have only one predecessor at this point, since
- // we checked that the loop had a proper preheader and unique backedge before
- // we started.
- assert(OrigHeader->getSinglePredecessor() &&
- "Original loop header has too many predecessors after loop rotation!");
- OrigHeader->moveAfter(OrigHeader->getSinglePredecessor());
-
- // Also, since this original header only has one predecessor, zap its
- // PHI nodes, which are now trivial.
- FoldSingleEntryPHINodes(OrigHeader);
-
- // TODO: We could just go ahead and merge OrigHeader into its predecessor
- // at this point, if we don't mind updating dominator info.
-
- // Establish a new preheader, update dominators, etc.
- preserveCanonicalLoopForm(LPM);
-
- ++NumRotated;
- return true;
-}
-
-/// Initialize local data
-void LoopRotate::initialize() {
- L = NULL;
- OrigHeader = NULL;
- OrigPreHeader = NULL;
- NewHeader = NULL;
- Exit = NULL;
-}
-
-/// After loop rotation, loop pre-header has multiple sucessors.
-/// Insert one forwarding basic block to ensure that loop pre-header
-/// has only one successor.
-void LoopRotate::preserveCanonicalLoopForm(LPPassManager &LPM) {
-
- // Right now original pre-header has two successors, new header and
- // exit block. Insert new block between original pre-header and
- // new header such that loop's new pre-header has only one successor.
- BasicBlock *NewPreHeader = BasicBlock::Create(OrigHeader->getContext(),
- "bb.nph",
- OrigHeader->getParent(),
- NewHeader);
- LoopInfo &LI = getAnalysis<LoopInfo>();
- if (Loop *PL = LI.getLoopFor(OrigPreHeader))
- PL->addBasicBlockToLoop(NewPreHeader, LI.getBase());
- BranchInst::Create(NewHeader, NewPreHeader);
- BranchInst *OrigPH_BI = cast<BranchInst>(OrigPreHeader->getTerminator());
- if (OrigPH_BI->getSuccessor(0) == NewHeader)
- OrigPH_BI->setSuccessor(0, NewPreHeader);
- else {
- assert(OrigPH_BI->getSuccessor(1) == NewHeader &&
- "Unexpected original pre-header terminator");
- OrigPH_BI->setSuccessor(1, NewPreHeader);
- }
-
- PHINode *PN;
- for (BasicBlock::iterator I = NewHeader->begin();
- (PN = dyn_cast<PHINode>(I)); ++I) {
- int index = PN->getBasicBlockIndex(OrigPreHeader);
- assert(index != -1 && "Expected incoming value from Original PreHeader");
- PN->setIncomingBlock(index, NewPreHeader);
- assert(PN->getBasicBlockIndex(OrigPreHeader) == -1 &&
- "Expected only one incoming value from Original PreHeader");
- }
-
- if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>()) {
- DT->addNewBlock(NewPreHeader, OrigPreHeader);
- DT->changeImmediateDominator(L->getHeader(), NewPreHeader);
- DT->changeImmediateDominator(Exit, OrigPreHeader);
- for (Loop::block_iterator BI = L->block_begin(), BE = L->block_end();
- BI != BE; ++BI) {
- BasicBlock *B = *BI;
- if (L->getHeader() != B) {
- DomTreeNode *Node = DT->getNode(B);
- if (Node && Node->getBlock() == OrigHeader)
- DT->changeImmediateDominator(*BI, L->getHeader());
- }
- }
- DT->changeImmediateDominator(OrigHeader, OrigLatch);
- }
-
- if (DominanceFrontier *DF = getAnalysisIfAvailable<DominanceFrontier>()) {
- // New Preheader's dominance frontier is Exit block.
- DominanceFrontier::DomSetType NewPHSet;
- NewPHSet.insert(Exit);
- DF->addBasicBlock(NewPreHeader, NewPHSet);
-
- // New Header's dominance frontier now includes itself and Exit block
- DominanceFrontier::iterator HeadI = DF->find(L->getHeader());
- if (HeadI != DF->end()) {
- DominanceFrontier::DomSetType & HeaderSet = HeadI->second;
- HeaderSet.clear();
- HeaderSet.insert(L->getHeader());
- HeaderSet.insert(Exit);
- } else {
- DominanceFrontier::DomSetType HeaderSet;
- HeaderSet.insert(L->getHeader());
- HeaderSet.insert(Exit);
- DF->addBasicBlock(L->getHeader(), HeaderSet);
- }
-
- // Original header (new Loop Latch)'s dominance frontier is Exit.
- DominanceFrontier::iterator LatchI = DF->find(L->getLoopLatch());
- if (LatchI != DF->end()) {
- DominanceFrontier::DomSetType &LatchSet = LatchI->second;
- LatchSet = LatchI->second;
- LatchSet.clear();
- LatchSet.insert(Exit);
- } else {
- DominanceFrontier::DomSetType LatchSet;
- LatchSet.insert(Exit);
- DF->addBasicBlock(L->getHeader(), LatchSet);
+ // At this point, we've finished our major CFG changes. As part of cloning
+ // the loop into the preheader we've simplified instructions and the
+ // duplicated conditional branch may now be branching on a constant. If it is
+ // branching on a constant and if that constant means that we enter the loop,
+ // then we fold away the cond branch to an uncond branch. This simplifies the
+ // loop in cases important for nested loops, and it also means we don't have
+ // to split as many edges.
+ BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator());
+ assert(PHBI->isConditional() && "Should be clone of BI condbr!");
+ if (!isa<ConstantInt>(PHBI->getCondition()) ||
+ PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero())
+ != NewHeader) {
+ // The conditional branch can't be folded, handle the general case.
+ // Update DominatorTree to reflect the CFG change we just made. Then split
+ // edges as necessary to preserve LoopSimplify form.
+ if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>()) {
+ // Since OrigPreheader now has the conditional branch to Exit block, it is
+ // the dominator of Exit.
+ DT->changeImmediateDominator(Exit, OrigPreheader);
+ DT->changeImmediateDominator(NewHeader, OrigPreheader);
+
+ // Update OrigHeader to be dominated by the new header block.
+ DT->changeImmediateDominator(OrigHeader, OrigLatch);
}
-
- // If a loop block dominates new loop latch then add to its frontiers
- // new header and Exit and remove new latch (which is equal to original
- // header).
- BasicBlock *NewLatch = L->getLoopLatch();
-
- assert(NewLatch == OrigHeader && "NewLatch is inequal to OrigHeader");
-
+
+ // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
+ // thus is not a preheader anymore. Split the edge to form a real preheader.
+ BasicBlock *NewPH = SplitCriticalEdge(OrigPreheader, NewHeader, this);
+ NewPH->setName(NewHeader->getName() + ".lr.ph");
+
+ // Preserve canonical loop form, which means that 'Exit' should have only one
+ // predecessor.
+ BasicBlock *ExitSplit = SplitCriticalEdge(L->getLoopLatch(), Exit, this);
+ ExitSplit->moveBefore(Exit);
+ } else {
+ // We can fold the conditional branch in the preheader, this makes things
+ // simpler. The first step is to remove the extra edge to the Exit block.
+ Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/);
+ BranchInst::Create(NewHeader, PHBI);
+ PHBI->eraseFromParent();
+
+ // With our CFG finalized, update DomTree if it is available.
if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>()) {
- for (Loop::block_iterator BI = L->block_begin(), BE = L->block_end();
- BI != BE; ++BI) {
- BasicBlock *B = *BI;
- if (DT->dominates(B, NewLatch)) {
- DominanceFrontier::iterator BDFI = DF->find(B);
- if (BDFI != DF->end()) {
- DominanceFrontier::DomSetType &BSet = BDFI->second;
- BSet.erase(NewLatch);
- BSet.insert(L->getHeader());
- BSet.insert(Exit);
- } else {
- DominanceFrontier::DomSetType BSet;
- BSet.insert(L->getHeader());
- BSet.insert(Exit);
- DF->addBasicBlock(B, BSet);
- }
- }
- }
+ // Update OrigHeader to be dominated by the new header block.
+ DT->changeImmediateDominator(NewHeader, OrigPreheader);
+ DT->changeImmediateDominator(OrigHeader, OrigLatch);
}
}
-
- // Preserve canonical loop form, which means Exit block should
- // have only one predecessor.
- SplitEdge(L->getLoopLatch(), Exit, this);
-
- assert(NewHeader && L->getHeader() == NewHeader &&
- "Invalid loop header after loop rotation");
- assert(NewPreHeader && L->getLoopPreheader() == NewPreHeader &&
- "Invalid loop preheader after loop rotation");
- assert(L->getLoopLatch() &&
- "Invalid loop latch after loop rotation");
+
+ assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation");
+ assert(L->getLoopLatch() && "Invalid loop latch after loop rotation");
+
+ // Now that the CFG and DomTree are in a consistent state again, try to merge
+ // the OrigHeader block into OrigLatch. This will succeed if they are
+ // connected by an unconditional branch. This is just a cleanup so the
+ // emitted code isn't too gross in this common case.
+ MergeBlockIntoPredecessor(OrigHeader, this);
+
+ ++NumRotated;
+ return true;
}
+
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index e8dc5d3..ac4aea2 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -63,6 +63,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Assembly/Writer.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/SmallBitVector.h"
@@ -113,7 +114,7 @@ class RegUseTracker {
public:
void CountRegister(const SCEV *Reg, size_t LUIdx);
void DropRegister(const SCEV *Reg, size_t LUIdx);
- void DropUse(size_t LUIdx);
+ void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx);
bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const;
@@ -152,11 +153,19 @@ RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) {
}
void
-RegUseTracker::DropUse(size_t LUIdx) {
- // Remove the use index from every register's use list.
+RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) {
+ assert(LUIdx <= LastLUIdx);
+
+ // Update RegUses. The data structure is not optimized for this purpose;
+ // we must iterate through it and update each of the bit vectors.
for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end();
- I != E; ++I)
- I->second.UsedByIndices.reset(LUIdx);
+ I != E; ++I) {
+ SmallBitVector &UsedByIndices = I->second.UsedByIndices;
+ if (LUIdx < UsedByIndices.size())
+ UsedByIndices[LUIdx] =
+ LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0;
+ UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx));
+ }
}
bool
@@ -202,8 +211,7 @@ struct Formula {
Formula() : ScaledReg(0) {}
- void InitialMatch(const SCEV *S, Loop *L,
- ScalarEvolution &SE, DominatorTree &DT);
+ void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
unsigned getNumRegs() const;
const Type *getType() const;
@@ -224,9 +232,9 @@ struct Formula {
static void DoInitialMatch(const SCEV *S, Loop *L,
SmallVectorImpl<const SCEV *> &Good,
SmallVectorImpl<const SCEV *> &Bad,
- ScalarEvolution &SE, DominatorTree &DT) {
+ ScalarEvolution &SE) {
// Collect expressions which properly dominate the loop header.
- if (S->properlyDominates(L->getHeader(), &DT)) {
+ if (SE.properlyDominates(S, L->getHeader())) {
Good.push_back(S);
return;
}
@@ -235,18 +243,18 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
I != E; ++I)
- DoInitialMatch(*I, L, Good, Bad, SE, DT);
+ DoInitialMatch(*I, L, Good, Bad, SE);
return;
}
// Look at addrec operands.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
if (!AR->getStart()->isZero()) {
- DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT);
+ DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
AR->getStepRecurrence(SE),
AR->getLoop()),
- L, Good, Bad, SE, DT);
+ L, Good, Bad, SE);
return;
}
@@ -258,7 +266,7 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
SmallVector<const SCEV *, 4> MyGood;
SmallVector<const SCEV *, 4> MyBad;
- DoInitialMatch(NewMul, L, MyGood, MyBad, SE, DT);
+ DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
SE.getEffectiveSCEVType(NewMul->getType())));
for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(),
@@ -278,11 +286,10 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
/// InitialMatch - Incorporate loop-variant parts of S into this Formula,
/// attempting to keep all loop-invariant and loop-computable values in a
/// single base register.
-void Formula::InitialMatch(const SCEV *S, Loop *L,
- ScalarEvolution &SE, DominatorTree &DT) {
+void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
SmallVector<const SCEV *, 4> Good;
SmallVector<const SCEV *, 4> Bad;
- DoInitialMatch(S, L, Good, Bad, SE, DT);
+ DoInitialMatch(S, L, Good, Bad, SE);
if (!Good.empty()) {
const SCEV *Sum = SE.getAddExpr(Good);
if (!Sum->isZero())
@@ -608,7 +615,7 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
bool Changed = false;
while (!DeadInsts.empty()) {
- Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val());
+ Instruction *I = dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val());
if (I == 0 || !isInstructionTriviallyDead(I))
continue;
@@ -645,8 +652,6 @@ public:
: NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0),
SetupCost(0) {}
- unsigned getNumRegs() const { return NumRegs; }
-
bool operator<(const Cost &Other) const;
void Loose();
@@ -722,6 +727,9 @@ void Cost::RateRegister(const SCEV *Reg,
(isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) ||
isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart()))))
++SetupCost;
+
+ NumIVMuls += isa<SCEVMulExpr>(Reg) &&
+ SE.hasComputableLoopEvolution(Reg, L);
}
/// RatePrimaryRegister - Record this register in the set. If we haven't seen it
@@ -756,9 +764,6 @@ void Cost::RateFormula(const Formula &F,
return;
}
RatePrimaryRegister(BaseReg, Regs, L, SE, DT);
-
- NumIVMuls += isa<SCEVMulExpr>(BaseReg) &&
- BaseReg->hasComputableLoopEvolution(L);
}
if (F.BaseRegs.size() > 1)
@@ -1257,32 +1262,6 @@ struct UseMapDenseMapInfo {
}
};
-/// FormulaSorter - This class implements an ordering for formulae which sorts
-/// the by their standalone cost.
-class FormulaSorter {
- /// These two sets are kept empty, so that we compute standalone costs.
- DenseSet<const SCEV *> VisitedRegs;
- SmallPtrSet<const SCEV *, 16> Regs;
- Loop *L;
- LSRUse *LU;
- ScalarEvolution &SE;
- DominatorTree &DT;
-
-public:
- FormulaSorter(Loop *l, LSRUse &lu, ScalarEvolution &se, DominatorTree &dt)
- : L(l), LU(&lu), SE(se), DT(dt) {}
-
- bool operator()(const Formula &A, const Formula &B) {
- Cost CostA;
- CostA.RateFormula(A, Regs, VisitedRegs, L, LU->Offsets, SE, DT);
- Regs.clear();
- Cost CostB;
- CostB.RateFormula(B, Regs, VisitedRegs, L, LU->Offsets, SE, DT);
- Regs.clear();
- return CostA < CostB;
- }
-};
-
/// LSRInstance - This class holds state for the main loop strength reduction
/// logic.
class LSRInstance {
@@ -1341,7 +1320,7 @@ class LSRInstance {
LSRUse::KindType Kind,
const Type *AccessTy);
- void DeleteUse(LSRUse &LU);
+ void DeleteUse(LSRUse &LU, size_t LUIdx);
LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU);
@@ -1925,10 +1904,13 @@ LSRInstance::getUse(const SCEV *&Expr,
}
/// DeleteUse - Delete the given use from the Uses list.
-void LSRInstance::DeleteUse(LSRUse &LU) {
+void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) {
if (&LU != &Uses.back())
std::swap(LU, Uses.back());
Uses.pop_back();
+
+ // Update RegUses.
+ RegUses.SwapAndDropUse(LUIdx, Uses.size());
}
/// FindUseWithFormula - Look for a use distinct from OrigLU which is has
@@ -2073,7 +2055,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
// x == y --> x - y == 0
const SCEV *N = SE.getSCEV(NV);
- if (N->isLoopInvariant(L)) {
+ if (SE.isLoopInvariant(N, L)) {
Kind = LSRUse::ICmpZero;
S = SE.getMinusSCEV(N, S);
}
@@ -2113,7 +2095,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
void
LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) {
Formula F;
- F.InitialMatch(S, L, SE, DT);
+ F.InitialMatch(S, L, SE);
bool Inserted = InsertFormula(LU, LUIdx, F);
assert(Inserted && "Initial formula already exists!"); (void)Inserted;
}
@@ -2213,7 +2195,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
unsigned OtherIdx = !UI.getOperandNo();
Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
- if (SE.getSCEV(OtherOp)->hasComputableLoopEvolution(L))
+ if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L))
continue;
}
@@ -2296,7 +2278,7 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
// Loop-variant "unknown" values are uninteresting; we won't be able to
// do anything meaningful with them.
- if (isa<SCEVUnknown>(*J) && !(*J)->isLoopInvariant(L))
+ if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L))
continue;
// Don't pull a constant into a register if the constant could be folded
@@ -2347,8 +2329,8 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
for (SmallVectorImpl<const SCEV *>::const_iterator
I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) {
const SCEV *BaseReg = *I;
- if (BaseReg->properlyDominates(L->getHeader(), &DT) &&
- !BaseReg->hasComputableLoopEvolution(L))
+ if (SE.properlyDominates(BaseReg, L->getHeader()) &&
+ !SE.hasComputableLoopEvolution(BaseReg, L))
Ops.push_back(BaseReg);
else
F.BaseRegs.push_back(BaseReg);
@@ -2813,9 +2795,11 @@ LSRInstance::GenerateAllReuseFormulae() {
print_uses(dbgs()));
}
-/// If their are multiple formulae with the same set of registers used
+/// If there are multiple formulae with the same set of registers used
/// by other uses, pick the best one and delete the others.
void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
+ DenseSet<const SCEV *> VisitedRegs;
+ SmallPtrSet<const SCEV *, 16> Regs;
#ifndef NDEBUG
bool ChangedFormulae = false;
#endif
@@ -2828,7 +2812,6 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
LSRUse &LU = Uses[LUIdx];
- FormulaSorter Sorter(L, LU, SE, DT);
DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n');
bool Any = false;
@@ -2854,7 +2837,14 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
BestFormulae.insert(std::make_pair(Key, FIdx));
if (!P.second) {
Formula &Best = LU.Formulae[P.first->second];
- if (Sorter.operator()(F, Best))
+
+ Cost CostF;
+ CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT);
+ Regs.clear();
+ Cost CostBest;
+ CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT);
+ Regs.clear();
+ if (CostF < CostBest)
std::swap(F, Best);
DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
dbgs() << "\n"
@@ -2894,7 +2884,7 @@ static const size_t ComplexityLimit = UINT16_MAX;
/// this many solutions because it prune the search space, but the pruning
/// isn't always sufficient.
size_t LSRInstance::EstimateSearchSpaceComplexity() const {
- uint32_t Power = 1;
+ size_t Power = 1;
for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
E = Uses.end(); I != E; ++I) {
size_t FSize = I->Formulae.size();
@@ -3001,6 +2991,28 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
+ // Update the relocs to reference the new use.
+ for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(),
+ E = Fixups.end(); I != E; ++I) {
+ LSRFixup &Fixup = *I;
+ if (Fixup.LUIdx == LUIdx) {
+ Fixup.LUIdx = LUThatHas - &Uses.front();
+ Fixup.Offset += F.AM.BaseOffs;
+ // Add the new offset to LUThatHas' offset list.
+ if (LUThatHas->Offsets.back() != Fixup.Offset) {
+ LUThatHas->Offsets.push_back(Fixup.Offset);
+ if (Fixup.Offset > LUThatHas->MaxOffset)
+ LUThatHas->MaxOffset = Fixup.Offset;
+ if (Fixup.Offset < LUThatHas->MinOffset)
+ LUThatHas->MinOffset = Fixup.Offset;
+ }
+ DEBUG(dbgs() << "New fixup has offset "
+ << Fixup.Offset << '\n');
+ }
+ if (Fixup.LUIdx == NumUses-1)
+ Fixup.LUIdx = LUIdx;
+ }
+
// Delete formulae from the new use which are no longer legal.
bool Any = false;
for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) {
@@ -3019,22 +3031,8 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
if (Any)
LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses);
- // Update the relocs to reference the new use.
- for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(),
- E = Fixups.end(); I != E; ++I) {
- LSRFixup &Fixup = *I;
- if (Fixup.LUIdx == LUIdx) {
- Fixup.LUIdx = LUThatHas - &Uses.front();
- Fixup.Offset += F.AM.BaseOffs;
- DEBUG(dbgs() << "New fixup has offset "
- << Fixup.Offset << '\n');
- }
- if (Fixup.LUIdx == NumUses-1)
- Fixup.LUIdx = LUIdx;
- }
-
// Delete the old use.
- DeleteUse(LU);
+ DeleteUse(LU, LUIdx);
--LUIdx;
--NumUses;
break;
@@ -3546,21 +3544,23 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
// is the canonical backedge for this loop, which complicates post-inc
// users.
if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 &&
- !isa<IndirectBrInst>(BB->getTerminator()) &&
- (PN->getParent() != L->getHeader() || !L->contains(BB))) {
- // Split the critical edge.
- BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P);
-
- // If PN is outside of the loop and BB is in the loop, we want to
- // move the block to be immediately before the PHI block, not
- // immediately after BB.
- if (L->contains(BB) && !L->contains(PN))
- NewBB->moveBefore(PN->getParent());
-
- // Splitting the edge can reduce the number of PHI entries we have.
- e = PN->getNumIncomingValues();
- BB = NewBB;
- i = PN->getBasicBlockIndex(BB);
+ !isa<IndirectBrInst>(BB->getTerminator())) {
+ Loop *PNLoop = LI.getLoopFor(PN->getParent());
+ if (!PNLoop || PN->getParent() != PNLoop->getHeader()) {
+ // Split the critical edge.
+ BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P);
+
+ // If PN is outside of the loop and BB is in the loop, we want to
+ // move the block to be immediately before the PHI block, not
+ // immediately after BB.
+ if (L->contains(BB) && !L->contains(PN))
+ NewBB->moveBefore(PN->getParent());
+
+ // Splitting the edge can reduce the number of PHI entries we have.
+ e = PN->getNumIncomingValues();
+ BB = NewBB;
+ i = PN->getBasicBlockIndex(BB);
+ }
}
std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
@@ -3792,21 +3792,30 @@ private:
}
char LoopStrengthReduce::ID = 0;
-INITIALIZE_PASS(LoopStrengthReduce, "loop-reduce",
- "Loop Strength Reduction", false, false);
+INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
+ "Loop Strength Reduction", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(IVUsers)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
+ "Loop Strength Reduction", false, false)
+
Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
return new LoopStrengthReduce(TLI);
}
LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli)
- : LoopPass(ID), TLI(tli) {}
+ : LoopPass(ID), TLI(tli) {
+ initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
+ }
void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
// We split critical edges, so we change the CFG. However, we do update
// many analyses if they are around.
AU.addPreservedID(LoopSimplifyID);
- AU.addPreserved("domfrontier");
AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
@@ -3815,6 +3824,9 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<DominatorTree>();
AU.addRequired<ScalarEvolution>();
AU.addPreserved<ScalarEvolution>();
+ // Requiring LoopSimplify a second time here prevents IVUsers from running
+ // twice, since LoopSimplify was invalidated by running ScalarEvolution.
+ AU.addRequiredID(LoopSimplifyID);
AU.addRequired<IVUsers>();
AU.addPreserved<IVUsers>();
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index d0edfa2..80b263a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -16,7 +16,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -27,7 +27,7 @@
using namespace llvm;
static cl::opt<unsigned>
-UnrollThreshold("unroll-threshold", cl::init(200), cl::Hidden,
+UnrollThreshold("unroll-threshold", cl::init(150), cl::Hidden,
cl::desc("The cut-off point for automatic loop unrolling"));
static cl::opt<unsigned>
@@ -43,12 +43,20 @@ namespace {
class LoopUnroll : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
- LoopUnroll() : LoopPass(ID) {}
+ LoopUnroll() : LoopPass(ID) {
+ initializeLoopUnrollPass(*PassRegistry::getPassRegistry());
+ }
/// A magic value for use with the Threshold parameter to indicate
/// that the loop unroll should be performed regardless of how much
/// code expansion would result.
static const unsigned NoThreshold = UINT_MAX;
+
+ // Threshold to use when optsize is specified (and there is no
+ // explicit -unroll-threshold).
+ static const unsigned OptSizeUnrollThreshold = 50;
+
+ unsigned CurrentThreshold;
bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -73,7 +81,11 @@ namespace {
}
char LoopUnroll::ID = 0;
-INITIALIZE_PASS(LoopUnroll, "loop-unroll", "Unroll loops", false, false);
+INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
Pass *llvm::createLoopUnrollPass() { return new LoopUnroll(); }
@@ -83,8 +95,16 @@ static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls) {
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I)
Metrics.analyzeBasicBlock(*I);
- NumCalls = Metrics.NumCalls;
- return Metrics.NumInsts;
+ NumCalls = Metrics.NumInlineCandidates;
+
+ unsigned LoopSize = Metrics.NumInsts;
+
+ // Don't allow an estimate of size zero. This would allows unrolling of loops
+ // with huge iteration counts, which is a compile time problem even if it's
+ // not a problem for code quality.
+ if (LoopSize == 0) LoopSize = 1;
+
+ return LoopSize;
}
bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
@@ -94,6 +114,15 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
DEBUG(dbgs() << "Loop Unroll: F[" << Header->getParent()->getName()
<< "] Loop %" << Header->getName() << "\n");
(void)Header;
+
+ // Determine the current unrolling threshold. While this is normally set
+ // from UnrollThreshold, it is overridden to a smaller value if the current
+ // function is marked as optimize-for-size, and the unroll threshold was
+ // not user specified.
+ CurrentThreshold = UnrollThreshold;
+ if (Header->getParent()->hasFnAttr(Attribute::OptimizeForSize) &&
+ UnrollThreshold.getNumOccurrences() == 0)
+ CurrentThreshold = OptSizeUnrollThreshold;
// Find trip count
unsigned TripCount = L->getSmallConstantTripCount();
@@ -111,25 +140,25 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
}
// Enforce the threshold.
- if (UnrollThreshold != NoThreshold) {
- unsigned NumCalls;
- unsigned LoopSize = ApproximateLoopSize(L, NumCalls);
+ if (CurrentThreshold != NoThreshold) {
+ unsigned NumInlineCandidates;
+ unsigned LoopSize = ApproximateLoopSize(L, NumInlineCandidates);
DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
- if (NumCalls != 0) {
- DEBUG(dbgs() << " Not unrolling loop with function calls.\n");
+ if (NumInlineCandidates != 0) {
+ DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
return false;
}
uint64_t Size = (uint64_t)LoopSize*Count;
- if (TripCount != 1 && Size > UnrollThreshold) {
+ if (TripCount != 1 && Size > CurrentThreshold) {
DEBUG(dbgs() << " Too large to fully unroll with count: " << Count
- << " because size: " << Size << ">" << UnrollThreshold << "\n");
+ << " because size: " << Size << ">" << CurrentThreshold << "\n");
if (!UnrollAllowPartial) {
DEBUG(dbgs() << " will not try to unroll partially because "
<< "-unroll-allow-partial not given\n");
return false;
}
// Reduce unroll count to be modulo of TripCount for partial unrolling
- Count = UnrollThreshold / LoopSize;
+ Count = CurrentThreshold / LoopSize;
while (Count != 0 && TripCount%Count != 0) {
Count--;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 9afe428..b4e3d31 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -32,12 +32,12 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
-#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -93,7 +93,9 @@ namespace {
explicit LoopUnswitch(bool Os = false) :
LoopPass(ID), OptimizeForSize(Os), redoLoop(false),
currentLoop(NULL), DT(NULL), loopHeader(NULL),
- loopPreheader(NULL) {}
+ loopPreheader(NULL) {
+ initializeLoopUnswitchPass(*PassRegistry::getPassRegistry());
+ }
bool runOnLoop(Loop *L, LPPassManager &LPM);
bool processCurrentLoop();
@@ -109,6 +111,7 @@ namespace {
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
AU.addPreserved<DominatorTree>();
+ AU.addPreserved<ScalarEvolution>();
}
private:
@@ -158,7 +161,13 @@ namespace {
};
}
char LoopUnswitch::ID = 0;
-INITIALIZE_PASS(LoopUnswitch, "loop-unswitch", "Unswitch loops", false, false);
+INITIALIZE_PASS_BEGIN(LoopUnswitch, "loop-unswitch", "Unswitch loops",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_END(LoopUnswitch, "loop-unswitch", "Unswitch loops",
+ false, false)
Pass *llvm::createLoopUnswitchPass(bool Os) {
return new LoopUnswitch(Os);
@@ -450,22 +459,9 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
return true;
}
-// RemapInstruction - Convert the instruction operands from referencing the
-// current values into those specified by VMap.
-//
-static inline void RemapInstruction(Instruction *I,
- ValueMap<const Value *, Value*> &VMap) {
- for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
- Value *Op = I->getOperand(op);
- ValueMap<const Value *, Value*>::iterator It = VMap.find(Op);
- if (It != VMap.end()) Op = It->second;
- I->setOperand(op, Op);
- }
-}
-
/// CloneLoop - Recursively clone the specified loop and all of its children,
/// mapping the blocks with the specified map.
-static Loop *CloneLoop(Loop *L, Loop *PL, ValueMap<const Value*, Value*> &VM,
+static Loop *CloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
LoopInfo *LI, LPPassManager *LPM) {
Loop *New = new Loop();
LPM->insertLoop(New, PL);
@@ -580,6 +576,9 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
<< " blocks] in Function " << F->getName()
<< " when '" << *Val << "' == " << *LIC << "\n");
+ if (ScalarEvolution *SE = getAnalysisIfAvailable<ScalarEvolution>())
+ SE->forgetLoop(L);
+
LoopBlocks.clear();
NewBlocks.clear();
@@ -609,7 +608,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
// the loop preheader and exit blocks), keeping track of the mapping between
// the instructions and blocks.
NewBlocks.reserve(LoopBlocks.size());
- ValueMap<const Value*, Value*> VMap;
+ ValueToValueMapTy VMap;
for (unsigned i = 0, e = LoopBlocks.size(); i != e; ++i) {
BasicBlock *NewBB = CloneBasicBlock(LoopBlocks[i], VMap, ".us", F);
NewBlocks.push_back(NewBB);
@@ -647,7 +646,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
for (BasicBlock::iterator I = ExitSucc->begin(); isa<PHINode>(I); ++I) {
PN = cast<PHINode>(I);
Value *V = PN->getIncomingValueForBlock(ExitBlocks[i]);
- ValueMap<const Value *, Value*>::iterator It = VMap.find(V);
+ ValueToValueMapTy::iterator It = VMap.find(V);
if (It != VMap.end()) V = It->second;
PN->addIncoming(V, NewExit);
}
@@ -657,7 +656,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
for (unsigned i = 0, e = NewBlocks.size(); i != e; ++i)
for (BasicBlock::iterator I = NewBlocks[i]->begin(),
E = NewBlocks[i]->end(); I != E; ++I)
- RemapInstruction(I, VMap);
+ RemapInstruction(I, VMap,RF_NoModuleLevelChanges|RF_IgnoreMissingEntries);
// Rewrite the original preheader to select between versions of the loop.
BranchInst *OldBR = cast<BranchInst>(loopPreheader->getTerminator());
@@ -961,13 +960,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
while (!Worklist.empty()) {
Instruction *I = Worklist.back();
Worklist.pop_back();
-
- // Simple constant folding.
- if (Constant *C = ConstantFoldInstruction(I)) {
- ReplaceUsesOfWith(I, C, Worklist, L, LPM);
- continue;
- }
-
+
// Simple DCE.
if (isInstructionTriviallyDead(I)) {
DEBUG(dbgs() << "Remove dead instruction '" << *I);
@@ -982,15 +975,16 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
++NumSimplify;
continue;
}
-
+
// See if instruction simplification can hack this up. This is common for
// things like "select false, X, Y" after unswitching made the condition be
// 'false'.
- if (Value *V = SimplifyInstruction(I)) {
- ReplaceUsesOfWith(I, V, Worklist, L, LPM);
- continue;
- }
-
+ if (Value *V = SimplifyInstruction(I, 0, DT))
+ if (LI->replacementPreservesLCSSAForm(I, V)) {
+ ReplaceUsesOfWith(I, V, Worklist, L, LPM);
+ continue;
+ }
+
// Special case hacks that appear commonly in unswitched code.
if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
if (BI->isUnconditional()) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
index 973ffe7..9087b46 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -14,26 +14,15 @@
#define DEBUG_TYPE "loweratomic"
#include "llvm/Transforms/Scalar.h"
-#include "llvm/BasicBlock.h"
#include "llvm/Function.h"
-#include "llvm/Instruction.h"
-#include "llvm/Instructions.h"
-#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Pass.h"
#include "llvm/Support/IRBuilder.h"
-
using namespace llvm;
-namespace {
-
-bool LowerAtomicIntrinsic(CallInst *CI) {
- IRBuilder<> Builder(CI->getParent(), CI);
-
- Function *Callee = CI->getCalledFunction();
- if (!Callee)
- return false;
-
- unsigned IID = Callee->getIntrinsicID();
+static bool LowerAtomicIntrinsic(IntrinsicInst *II) {
+ IRBuilder<> Builder(II->getParent(), II);
+ unsigned IID = II->getIntrinsicID();
switch (IID) {
case Intrinsic::memory_barrier:
break;
@@ -48,80 +37,70 @@ bool LowerAtomicIntrinsic(CallInst *CI) {
case Intrinsic::atomic_load_min:
case Intrinsic::atomic_load_umax:
case Intrinsic::atomic_load_umin: {
- Value *Ptr = CI->getArgOperand(0);
- Value *Delta = CI->getArgOperand(1);
+ Value *Ptr = II->getArgOperand(0), *Delta = II->getArgOperand(1);
LoadInst *Orig = Builder.CreateLoad(Ptr);
Value *Res = NULL;
switch (IID) {
- default: assert(0 && "Unrecognized atomic modify operation");
- case Intrinsic::atomic_load_add:
- Res = Builder.CreateAdd(Orig, Delta);
- break;
- case Intrinsic::atomic_load_sub:
- Res = Builder.CreateSub(Orig, Delta);
- break;
- case Intrinsic::atomic_load_and:
- Res = Builder.CreateAnd(Orig, Delta);
- break;
- case Intrinsic::atomic_load_nand:
- Res = Builder.CreateNot(Builder.CreateAnd(Orig, Delta));
- break;
- case Intrinsic::atomic_load_or:
- Res = Builder.CreateOr(Orig, Delta);
- break;
- case Intrinsic::atomic_load_xor:
- Res = Builder.CreateXor(Orig, Delta);
- break;
- case Intrinsic::atomic_load_max:
- Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
- Delta,
- Orig);
- break;
- case Intrinsic::atomic_load_min:
- Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
- Orig,
- Delta);
- break;
- case Intrinsic::atomic_load_umax:
- Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
- Delta,
- Orig);
- break;
- case Intrinsic::atomic_load_umin:
- Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
- Orig,
- Delta);
- break;
+ default: assert(0 && "Unrecognized atomic modify operation");
+ case Intrinsic::atomic_load_add:
+ Res = Builder.CreateAdd(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_sub:
+ Res = Builder.CreateSub(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_and:
+ Res = Builder.CreateAnd(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_nand:
+ Res = Builder.CreateNot(Builder.CreateAnd(Orig, Delta));
+ break;
+ case Intrinsic::atomic_load_or:
+ Res = Builder.CreateOr(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_xor:
+ Res = Builder.CreateXor(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_max:
+ Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
+ Delta, Orig);
+ break;
+ case Intrinsic::atomic_load_min:
+ Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
+ Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_umax:
+ Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
+ Delta, Orig);
+ break;
+ case Intrinsic::atomic_load_umin:
+ Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
+ Orig, Delta);
+ break;
}
Builder.CreateStore(Res, Ptr);
- CI->replaceAllUsesWith(Orig);
+ II->replaceAllUsesWith(Orig);
break;
}
case Intrinsic::atomic_swap: {
- Value *Ptr = CI->getArgOperand(0);
- Value *Val = CI->getArgOperand(1);
-
+ Value *Ptr = II->getArgOperand(0), *Val = II->getArgOperand(1);
LoadInst *Orig = Builder.CreateLoad(Ptr);
Builder.CreateStore(Val, Ptr);
-
- CI->replaceAllUsesWith(Orig);
+ II->replaceAllUsesWith(Orig);
break;
}
case Intrinsic::atomic_cmp_swap: {
- Value *Ptr = CI->getArgOperand(0);
- Value *Cmp = CI->getArgOperand(1);
- Value *Val = CI->getArgOperand(2);
+ Value *Ptr = II->getArgOperand(0), *Cmp = II->getArgOperand(1);
+ Value *Val = II->getArgOperand(2);
LoadInst *Orig = Builder.CreateLoad(Ptr);
Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
Value *Res = Builder.CreateSelect(Equal, Val, Orig);
Builder.CreateStore(Res, Ptr);
-
- CI->replaceAllUsesWith(Orig);
+ II->replaceAllUsesWith(Orig);
break;
}
@@ -129,33 +108,32 @@ bool LowerAtomicIntrinsic(CallInst *CI) {
return false;
}
- assert(CI->use_empty() &&
+ assert(II->use_empty() &&
"Lowering should have eliminated any uses of the intrinsic call!");
- CI->eraseFromParent();
+ II->eraseFromParent();
return true;
}
-struct LowerAtomic : public BasicBlockPass {
- static char ID;
- LowerAtomic() : BasicBlockPass(ID) {}
- bool runOnBasicBlock(BasicBlock &BB) {
- bool Changed = false;
- for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; ) {
- Instruction *Inst = DI++;
- if (CallInst *CI = dyn_cast<CallInst>(Inst))
- Changed |= LowerAtomicIntrinsic(CI);
+namespace {
+ struct LowerAtomic : public BasicBlockPass {
+ static char ID;
+ LowerAtomic() : BasicBlockPass(ID) {
+ initializeLowerAtomicPass(*PassRegistry::getPassRegistry());
}
- return Changed;
- }
-
-};
-
+ bool runOnBasicBlock(BasicBlock &BB) {
+ bool Changed = false;
+ for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; )
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(DI++))
+ Changed |= LowerAtomicIntrinsic(II);
+ return Changed;
+ }
+ };
}
char LowerAtomic::ID = 0;
INITIALIZE_PASS(LowerAtomic, "loweratomic",
"Lower atomic intrinsics to non-atomic form",
- false, false);
+ false, false)
Pass *llvm::createLowerAtomicPass() { return new LowerAtomic(); }
diff --git a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 24fae42..bde0e53 100644
--- a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -14,16 +14,18 @@
#define DEBUG_TYPE "memcpyopt"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Instructions.h"
-#include "llvm/LLVMContext.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include <list>
@@ -32,62 +34,10 @@ using namespace llvm;
STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
STATISTIC(NumMemSetInfer, "Number of memsets inferred");
STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
-
-/// isBytewiseValue - If the specified value can be set by repeating the same
-/// byte in memory, return the i8 value that it is represented with. This is
-/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
-/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
-/// byte store (e.g. i16 0x1234), return null.
-static Value *isBytewiseValue(Value *V) {
- LLVMContext &Context = V->getContext();
-
- // All byte-wide stores are splatable, even of arbitrary variables.
- if (V->getType()->isIntegerTy(8)) return V;
-
- // Constant float and double values can be handled as integer values if the
- // corresponding integer value is "byteable". An important case is 0.0.
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
- if (CFP->getType()->isFloatTy())
- V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(Context));
- if (CFP->getType()->isDoubleTy())
- V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(Context));
- // Don't handle long double formats, which have strange constraints.
- }
-
- // We can handle constant integers that are power of two in size and a
- // multiple of 8 bits.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- unsigned Width = CI->getBitWidth();
- if (isPowerOf2_32(Width) && Width > 8) {
- // We can handle this value if the recursive binary decomposition is the
- // same at all levels.
- APInt Val = CI->getValue();
- APInt Val2;
- while (Val.getBitWidth() != 8) {
- unsigned NextWidth = Val.getBitWidth()/2;
- Val2 = Val.lshr(NextWidth);
- Val2.trunc(Val.getBitWidth()/2);
- Val.trunc(Val.getBitWidth()/2);
-
- // If the top/bottom halves aren't the same, reject it.
- if (Val != Val2)
- return 0;
- }
- return ConstantInt::get(Context, Val);
- }
- }
-
- // Conceptually, we could handle things like:
- // %a = zext i8 %X to i16
- // %b = shl i16 %a, 8
- // %c = or i16 %a, %b
- // but until there is an example that actually needs this, it doesn't seem
- // worth worrying about.
- return 0;
-}
+STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
- bool &VariableIdxFound, TargetData &TD) {
+ bool &VariableIdxFound, const TargetData &TD){
// Skip over the first indices.
gep_type_iterator GTI = gep_type_begin(GEP);
for (unsigned i = 1; i != Idx; ++i, ++GTI)
@@ -120,14 +70,31 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
/// constant offset, and return that constant offset. For example, Ptr1 might
/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
- TargetData &TD) {
+ const TargetData &TD) {
+ Ptr1 = Ptr1->stripPointerCasts();
+ Ptr2 = Ptr2->stripPointerCasts();
+ GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
+ GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
+
+ bool VariableIdxFound = false;
+
+ // If one pointer is a GEP and the other isn't, then see if the GEP is a
+ // constant offset from the base, as in "P" and "gep P, 1".
+ if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
+ Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD);
+ return !VariableIdxFound;
+ }
+
+ if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
+ Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD);
+ return !VariableIdxFound;
+ }
+
// Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
// base. After that base, they may have some number of common (and
// potentially variable) indices. After that they handle some constant
// offset, which determines their offset from each other. At this point, we
// handle no other case.
- GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
- GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
return false;
@@ -137,7 +104,6 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
break;
- bool VariableIdxFound = false;
int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
if (VariableIdxFound) return false;
@@ -171,7 +137,7 @@ struct MemsetRange {
unsigned Alignment;
/// TheStores - The actual stores that make up this range.
- SmallVector<StoreInst*, 16> TheStores;
+ SmallVector<Instruction*, 16> TheStores;
bool isProfitableToUseMemset(const TargetData &TD) const;
@@ -181,10 +147,19 @@ struct MemsetRange {
bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
// If we found more than 8 stores to merge or 64 bytes, use memset.
if (TheStores.size() >= 8 || End-Start >= 64) return true;
+
+ // If there is nothing to merge, don't do anything.
+ if (TheStores.size() < 2) return false;
+
+ // If any of the stores are a memset, then it is always good to extend the
+ // memset.
+ for (unsigned i = 0, e = TheStores.size(); i != e; ++i)
+ if (!isa<StoreInst>(TheStores[i]))
+ return true;
// Assume that the code generator is capable of merging pairs of stores
// together if it wants to.
- if (TheStores.size() <= 2) return false;
+ if (TheStores.size() == 2) return false;
// If we have fewer than 8 stores, it can still be worthwhile to do this.
// For example, merging 4 i8 stores into an i32 store is useful almost always.
@@ -215,31 +190,53 @@ class MemsetRanges {
/// because each element is relatively large and expensive to copy.
std::list<MemsetRange> Ranges;
typedef std::list<MemsetRange>::iterator range_iterator;
- TargetData &TD;
+ const TargetData &TD;
public:
- MemsetRanges(TargetData &td) : TD(td) {}
+ MemsetRanges(const TargetData &td) : TD(td) {}
typedef std::list<MemsetRange>::const_iterator const_iterator;
const_iterator begin() const { return Ranges.begin(); }
const_iterator end() const { return Ranges.end(); }
bool empty() const { return Ranges.empty(); }
- void addStore(int64_t OffsetFromFirst, StoreInst *SI);
+ void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ addStore(OffsetFromFirst, SI);
+ else
+ addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
+ }
+
+ void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
+ int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());
+
+ addRange(OffsetFromFirst, StoreSize,
+ SI->getPointerOperand(), SI->getAlignment(), SI);
+ }
+
+ void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
+ int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
+ addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
+ }
+
+ void addRange(int64_t Start, int64_t Size, Value *Ptr,
+ unsigned Alignment, Instruction *Inst);
+
};
} // end anon namespace
-/// addStore - Add a new store to the MemsetRanges data structure. This adds a
+/// addRange - Add a new store to the MemsetRanges data structure. This adds a
/// new range for the specified store at the specified offset, merging into
/// existing ranges as appropriate.
-void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
- int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType());
-
- // Do a linear search of the ranges to see if this can be joined and/or to
- // find the insertion point in the list. We keep the ranges sorted for
- // simplicity here. This is a linear search of a linked list, which is ugly,
- // however the number of ranges is limited, so this won't get crazy slow.
+///
+/// Do a linear search of the ranges to see if this can be joined and/or to
+/// find the insertion point in the list. We keep the ranges sorted for
+/// simplicity here. This is a linear search of a linked list, which is ugly,
+/// however the number of ranges is limited, so this won't get crazy slow.
+void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
+ unsigned Alignment, Instruction *Inst) {
+ int64_t End = Start+Size;
range_iterator I = Ranges.begin(), E = Ranges.end();
while (I != E && Start > I->End)
@@ -252,14 +249,14 @@ void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
MemsetRange &R = *Ranges.insert(I, MemsetRange());
R.Start = Start;
R.End = End;
- R.StartPtr = SI->getPointerOperand();
- R.Alignment = SI->getAlignment();
- R.TheStores.push_back(SI);
+ R.StartPtr = Ptr;
+ R.Alignment = Alignment;
+ R.TheStores.push_back(Inst);
return;
}
-
+
// This store overlaps with I, add it.
- I->TheStores.push_back(SI);
+ I->TheStores.push_back(Inst);
// At this point, we may have an interval that completely contains our store.
// If so, just add it to the interval and return.
@@ -274,8 +271,8 @@ void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
// stopped on *it*.
if (Start < I->Start) {
I->Start = Start;
- I->StartPtr = SI->getPointerOperand();
- I->Alignment = SI->getAlignment();
+ I->StartPtr = Ptr;
+ I->Alignment = Alignment;
}
// Now we know that Start <= I->End and Start >= I->Start (so the startpoint
@@ -301,10 +298,16 @@ void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
namespace {
class MemCpyOpt : public FunctionPass {
- bool runOnFunction(Function &F);
+ MemoryDependenceAnalysis *MD;
+ const TargetData *TD;
public:
static char ID; // Pass identification, replacement for typeid
- MemCpyOpt() : FunctionPass(ID) {}
+ MemCpyOpt() : FunctionPass(ID) {
+ initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
+ MD = 0;
+ }
+
+ bool runOnFunction(Function &F);
private:
// This transformation requires dominator postdominator info
@@ -319,9 +322,17 @@ namespace {
// Helper fuctions
bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
+ bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
bool processMemCpy(MemCpyInst *M);
bool processMemMove(MemMoveInst *M);
- bool performCallSlotOptzn(MemCpyInst *cpy, CallInst *C);
+ bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
+ uint64_t cpyLen, CallInst *C);
+ bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
+ uint64_t MSize);
+ bool processByValArgument(CallSite CS, unsigned ArgNo);
+ Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
+ Value *ByteVal);
+
bool iterateOnFunction(Function &F);
};
@@ -331,165 +342,199 @@ namespace {
// createMemCpyOptPass - The public interface to this file...
FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
-INITIALIZE_PASS(MemCpyOpt, "memcpyopt", "MemCpy Optimization", false, false);
-
-
+INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
+ false, false)
-/// processStore - When GVN is scanning forward over instructions, we look for
+/// tryMergingIntoMemset - When scanning forward over instructions, we look for
/// some other patterns to fold away. In particular, this looks for stores to
-/// neighboring locations of memory. If it sees enough consequtive ones
-/// (currently 4) it attempts to merge them together into a memcpy/memset.
-bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
- if (SI->isVolatile()) return false;
-
- LLVMContext &Context = SI->getContext();
-
- // There are two cases that are interesting for this code to handle: memcpy
- // and memset. Right now we only handle memset.
+/// neighboring locations of memory. If it sees enough consecutive ones, it
+/// attempts to merge them together into a memcpy/memset.
+Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
+ Value *StartPtr, Value *ByteVal) {
+ if (TD == 0) return 0;
- // Ensure that the value being stored is something that can be memset'able a
- // byte at a time like "0" or "-1" or any width, as well as things like
- // 0xA0A0A0A0 and 0.0.
- Value *ByteVal = isBytewiseValue(SI->getOperand(0));
- if (!ByteVal)
- return false;
-
- TargetData *TD = getAnalysisIfAvailable<TargetData>();
- if (!TD) return false;
- AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- Module *M = SI->getParent()->getParent()->getParent();
-
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
MemsetRanges Ranges(*TD);
- Value *StartPtr = SI->getPointerOperand();
-
- BasicBlock::iterator BI = SI;
+ BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
- if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
- // If the call is readnone, ignore it, otherwise bail out. We don't even
- // allow readonly here because we don't want something like:
+ if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
+ // If the instruction is readnone, ignore it, otherwise bail out. We
+ // don't even allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
- if (AA.getModRefBehavior(CallSite(BI)) ==
- AliasAnalysis::DoesNotAccessMemory)
- continue;
-
- // TODO: If this is a memset, try to join it in.
-
- break;
- } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
- break;
-
- // If this is a non-store instruction it is fine, ignore it.
- StoreInst *NextStore = dyn_cast<StoreInst>(BI);
- if (NextStore == 0) continue;
+ if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
+ break;
+ continue;
+ }
- // If this is a store, see if we can merge it in.
- if (NextStore->isVolatile()) break;
+ if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
+ // If this is a store, see if we can merge it in.
+ if (NextStore->isVolatile()) break;
- // Check to see if this stored value is of the same byte-splattable value.
- if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
- break;
-
- // Check to see if this store is to a constant offset from the start ptr.
- int64_t Offset;
- if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
- break;
-
- Ranges.addStore(Offset, NextStore);
+ // Check to see if this stored value is of the same byte-splattable value.
+ if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
+ break;
+
+ // Check to see if this store is to a constant offset from the start ptr.
+ int64_t Offset;
+ if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
+ Offset, *TD))
+ break;
+
+ Ranges.addStore(Offset, NextStore);
+ } else {
+ MemSetInst *MSI = cast<MemSetInst>(BI);
+
+ if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
+ !isa<ConstantInt>(MSI->getLength()))
+ break;
+
+ // Check to see if this store is to a constant offset from the start ptr.
+ int64_t Offset;
+ if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
+ break;
+
+ Ranges.addMemSet(Offset, MSI);
+ }
}
-
+
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (Ranges.empty())
- return false;
+ return 0;
// If we had at least one store that could be merged in, add the starting
// store as well. We try to avoid this unless there is at least something
// interesting as a small compile-time optimization.
- Ranges.addStore(0, SI);
-
-
+ Ranges.addInst(0, StartInst);
+
+ // If we create any memsets, we put it right before the first instruction that
+ // isn't part of the memset block. This ensure that the memset is dominated
+ // by any addressing instruction needed by the start of the block.
+ IRBuilder<> Builder(BI);
+
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
- bool MadeChange = false;
+ Instruction *AMemSet = 0;
for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemsetRange &Range = *I;
-
+
if (Range.TheStores.size() == 1) continue;
// If it is profitable to lower this range to memset, do so now.
if (!Range.isProfitableToUseMemset(*TD))
continue;
- // Otherwise, we do want to transform this! Create a new memset. We put
- // the memset right before the first instruction that isn't part of this
- // memset block. This ensure that the memset is dominated by any addressing
- // instruction needed by the start of the block.
- BasicBlock::iterator InsertPt = BI;
-
+ // Otherwise, we do want to transform this! Create a new memset.
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
-
+
// Determine alignment
unsigned Alignment = Range.Alignment;
if (Alignment == 0) {
const Type *EltType =
- cast<PointerType>(StartPtr->getType())->getElementType();
+ cast<PointerType>(StartPtr->getType())->getElementType();
Alignment = TD->getABITypeAlignment(EltType);
}
-
- // Cast the start ptr to be i8* as memset requires.
- const PointerType* StartPTy = cast<PointerType>(StartPtr->getType());
- const PointerType *i8Ptr = Type::getInt8PtrTy(Context,
- StartPTy->getAddressSpace());
- if (StartPTy!= i8Ptr)
- StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
- InsertPt);
-
- Value *Ops[] = {
- StartPtr, ByteVal, // Start, value
- // size
- ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start),
- // align
- ConstantInt::get(Type::getInt32Ty(Context), Alignment),
- // volatile
- ConstantInt::get(Type::getInt1Ty(Context), 0),
- };
- const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
-
- Function *MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
-
- Value *C = CallInst::Create(MemSetF, Ops, Ops+5, "", InsertPt);
+
+ AMemSet =
+ Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
+
DEBUG(dbgs() << "Replace stores:\n";
for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
- dbgs() << *Range.TheStores[i];
- dbgs() << "With: " << *C); C=C;
-
- // Don't invalidate the iterator
- BBI = BI;
-
+ dbgs() << *Range.TheStores[i] << '\n';
+ dbgs() << "With: " << *AMemSet << '\n');
+
// Zap all the stores.
- for (SmallVector<StoreInst*, 16>::const_iterator
+ for (SmallVector<Instruction*, 16>::const_iterator
SI = Range.TheStores.begin(),
- SE = Range.TheStores.end(); SI != SE; ++SI)
+ SE = Range.TheStores.end(); SI != SE; ++SI) {
+ MD->removeInstruction(*SI);
(*SI)->eraseFromParent();
+ }
++NumMemSetInfer;
- MadeChange = true;
}
- return MadeChange;
+ return AMemSet;
+}
+
+
+bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
+ if (SI->isVolatile()) return false;
+
+ if (TD == 0) return false;
+
+ // Detect cases where we're performing call slot forwarding, but
+ // happen to be using a load-store pair to implement it, rather than
+ // a memcpy.
+ if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
+ if (!LI->isVolatile() && LI->hasOneUse()) {
+ MemDepResult dep = MD->getDependency(LI);
+ CallInst *C = 0;
+ if (dep.isClobber() && !isa<MemCpyInst>(dep.getInst()))
+ C = dyn_cast<CallInst>(dep.getInst());
+
+ if (C) {
+ bool changed = performCallSlotOptzn(LI,
+ SI->getPointerOperand()->stripPointerCasts(),
+ LI->getPointerOperand()->stripPointerCasts(),
+ TD->getTypeStoreSize(SI->getOperand(0)->getType()), C);
+ if (changed) {
+ MD->removeInstruction(SI);
+ SI->eraseFromParent();
+ MD->removeInstruction(LI);
+ LI->eraseFromParent();
+ ++NumMemCpyInstr;
+ return true;
+ }
+ }
+ }
+ }
+
+ // There are two cases that are interesting for this code to handle: memcpy
+ // and memset. Right now we only handle memset.
+
+ // Ensure that the value being stored is something that can be memset'able a
+ // byte at a time like "0" or "-1" or any width, as well as things like
+ // 0xA0A0A0A0 and 0.0.
+ if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
+ if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
+ ByteVal)) {
+ BBI = I; // Don't invalidate iterator.
+ return true;
+ }
+
+ return false;
+}
+
+bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
+ // See if there is another memset or store neighboring this memset which
+ // allows us to widen out the memset to do a single larger store.
+ if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
+ if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
+ MSI->getValue())) {
+ BBI = I; // Don't invalidate iterator.
+ return true;
+ }
+ return false;
}
/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
/// and checks for the possibility of a call slot optimization by having
/// the call write its result directly into the destination of the memcpy.
-bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
+bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
+ Value *cpyDest, Value *cpySrc,
+ uint64_t cpyLen, CallInst *C) {
// The general transformation to keep in mind is
//
// call @func(..., src, ...)
@@ -506,24 +551,15 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
// Deliberately get the source and destination with bitcasts stripped away,
// because we'll need to do type comparisons based on the underlying type.
- Value *cpyDest = cpy->getDest();
- Value *cpySrc = cpy->getSource();
CallSite CS(C);
- // We need to be able to reason about the size of the memcpy, so we require
- // that it be a constant.
- ConstantInt *cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
- if (!cpyLength)
- return false;
-
// Require that src be an alloca. This simplifies the reasoning considerably.
AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
if (!srcAlloca)
return false;
// Check that all of src is copied to dest.
- TargetData *TD = getAnalysisIfAvailable<TargetData>();
- if (!TD) return false;
+ if (TD == 0) return false;
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
@@ -532,7 +568,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
srcArraySize->getZExtValue();
- if (cpyLength->getZExtValue() < srcSize)
+ if (cpyLen < srcSize)
return false;
// Check that accessing the first srcSize bytes of dest will not cause a
@@ -601,8 +637,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
// the use analysis, we also need to know that it does not sneakily
// access dest. We rely on AA to figure this out for us.
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) !=
- AliasAnalysis::NoModRef)
+ if (AA.getModRefInfo(C, cpyDest, srcSize) != AliasAnalysis::NoModRef)
return false;
// All the checks have passed, so do the transformation.
@@ -625,99 +660,142 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
// Drop any cached information about the call, because we may have changed
// its dependence information by changing its parameter.
- MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
- MD.removeInstruction(C);
+ MD->removeInstruction(C);
- // Remove the memcpy
- MD.removeInstruction(cpy);
- cpy->eraseFromParent();
+ // Remove the memcpy.
+ MD->removeInstruction(cpy);
++NumMemCpyInstr;
return true;
}
-/// processMemCpy - perform simplification of memcpy's. If we have memcpy A
-/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
-/// B to be a memcpy from X to Z (or potentially a memmove, depending on
-/// circumstances). This allows later passes to remove the first memcpy
-/// altogether.
-bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
- MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
-
- // The are two possible optimizations we can do for memcpy:
- // a) memcpy-memcpy xform which exposes redundance for DSE.
- // b) call-memcpy xform for return slot optimization.
- MemDepResult dep = MD.getDependency(M);
- if (!dep.isClobber())
- return false;
- if (!isa<MemCpyInst>(dep.getInst())) {
- if (CallInst *C = dyn_cast<CallInst>(dep.getInst()))
- return performCallSlotOptzn(M, C);
+/// processMemCpyMemCpyDependence - We've found that the (upward scanning)
+/// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
+/// copy from MDep's input if we can. MSize is the size of M's copy.
+///
+bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
+ uint64_t MSize) {
+ // We can only transforms memcpy's where the dest of one is the source of the
+ // other.
+ if (M->getSource() != MDep->getDest() || MDep->isVolatile())
return false;
- }
-
- MemCpyInst *MDep = cast<MemCpyInst>(dep.getInst());
- // We can only transforms memcpy's where the dest of one is the source of the
- // other
- if (M->getSource() != MDep->getDest())
+ // If dep instruction is reading from our current input, then it is a noop
+ // transfer and substituting the input won't change this instruction. Just
+ // ignore the input and let someone else zap MDep. This handles cases like:
+ // memcpy(a <- a)
+ // memcpy(b <- a)
+ if (M->getSource() == MDep->getSource())
return false;
// Second, the length of the memcpy's must be the same, or the preceeding one
// must be larger than the following one.
- ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
- ConstantInt *C2 = dyn_cast<ConstantInt>(M->getLength());
- if (!C1 || !C2)
- return false;
-
- uint64_t DepSize = C1->getValue().getZExtValue();
- uint64_t CpySize = C2->getValue().getZExtValue();
-
- if (DepSize < CpySize)
+ ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
+ ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
+ if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
return false;
- // Finally, we have to make sure that the dest of the second does not
- // alias the source of the first
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
- AliasAnalysis::NoAlias)
+
+ // Verify that the copied-from memory doesn't change in between the two
+ // transfers. For example, in:
+ // memcpy(a <- b)
+ // *b = 42;
+ // memcpy(c <- a)
+ // It would be invalid to transform the second memcpy into memcpy(c <- b).
+ //
+ // TODO: If the code between M and MDep is transparent to the destination "c",
+ // then we could still perform the xform by moving M up to the first memcpy.
+ //
+ // NOTE: This is conservative, it will stop on any read from the source loc,
+ // not just the defining memcpy.
+ MemDepResult SourceDep =
+ MD->getPointerDependencyFrom(AA.getLocationForSource(MDep),
+ false, M, M->getParent());
+ if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
return false;
- else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
- AliasAnalysis::NoAlias)
+
+ // If the dest of the second might alias the source of the first, then the
+ // source and dest might overlap. We still want to eliminate the intermediate
+ // value, but we have to generate a memmove instead of memcpy.
+ bool UseMemMove = false;
+ if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)))
+ UseMemMove = true;
+
+ // If all checks passed, then we can transform M.
+
+ // Make sure to use the lesser of the alignment of the source and the dest
+ // since we're changing where we're reading from, but don't want to increase
+ // the alignment past what can be read from or written to.
+ // TODO: Is this worth it if we're creating a less aligned memcpy? For
+ // example we could be moving from movaps -> movq on x86.
+ unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
+
+ IRBuilder<> Builder(M);
+ if (UseMemMove)
+ Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
+ Align, M->isVolatile());
+ else
+ Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
+ Align, M->isVolatile());
+
+ // Remove the instruction we're replacing.
+ MD->removeInstruction(M);
+ M->eraseFromParent();
+ ++NumMemCpyInstr;
+ return true;
+}
+
+
+/// processMemCpy - perform simplification of memcpy's. If we have memcpy A
+/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
+/// B to be a memcpy from X to Z (or potentially a memmove, depending on
+/// circumstances). This allows later passes to remove the first memcpy
+/// altogether.
+bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
+ // We can only optimize statically-sized memcpy's that are non-volatile.
+ ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
+ if (CopySize == 0 || M->isVolatile()) return false;
+
+ // If the source and destination of the memcpy are the same, then zap it.
+ if (M->getSource() == M->getDest()) {
+ MD->removeInstruction(M);
+ M->eraseFromParent();
return false;
- else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
- != AliasAnalysis::NoAlias)
+ }
+
+ // If copying from a constant, try to turn the memcpy into a memset.
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
+ if (GV->isConstant() && GV->hasDefinitiveInitializer())
+ if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
+ IRBuilder<> Builder(M);
+ Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize,
+ M->getAlignment(), false);
+ MD->removeInstruction(M);
+ M->eraseFromParent();
+ ++NumCpyToSet;
+ return true;
+ }
+
+ // The are two possible optimizations we can do for memcpy:
+ // a) memcpy-memcpy xform which exposes redundance for DSE.
+ // b) call-memcpy xform for return slot optimization.
+ MemDepResult DepInfo = MD->getDependency(M);
+ if (!DepInfo.isClobber())
return false;
- // If all checks passed, then we can transform these memcpy's
- const Type *ArgTys[3] = { M->getRawDest()->getType(),
- MDep->getRawSource()->getType(),
- M->getLength()->getType() };
- Function *MemCpyFun = Intrinsic::getDeclaration(
- M->getParent()->getParent()->getParent(),
- M->getIntrinsicID(), ArgTys, 3);
+ if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()))
+ return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
- Value *Args[5] = {
- M->getRawDest(), MDep->getRawSource(), M->getLength(),
- M->getAlignmentCst(), M->getVolatileCst()
- };
-
- CallInst *C = CallInst::Create(MemCpyFun, Args, Args+5, "", M);
-
-
- // If C and M don't interfere, then this is a valid transformation. If they
- // did, this would mean that the two sources overlap, which would be bad.
- if (MD.getDependency(C) == dep) {
- MD.removeInstruction(M);
- M->eraseFromParent();
- ++NumMemCpyInstr;
- return true;
+ if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
+ if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
+ CopySize->getZExtValue(), C)) {
+ MD->removeInstruction(M);
+ M->eraseFromParent();
+ return true;
+ }
}
- // Otherwise, there was no point in doing this, so we remove the call we
- // inserted and act like nothing happened.
- MD.removeInstruction(C);
- C->eraseFromParent();
return false;
}
@@ -726,15 +804,8 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
bool MemCpyOpt::processMemMove(MemMoveInst *M) {
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- // If the memmove is a constant size, use it for the alias query, this allows
- // us to optimize things like: memmove(P, P+64, 64);
- uint64_t MemMoveSize = ~0ULL;
- if (ConstantInt *Len = dyn_cast<ConstantInt>(M->getLength()))
- MemMoveSize = Len->getZExtValue();
-
// See if the pointers alias.
- if (AA.alias(M->getRawDest(), MemMoveSize, M->getRawSource(), MemMoveSize) !=
- AliasAnalysis::NoAlias)
+ if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M)))
return false;
DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
@@ -749,33 +820,107 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
// MemDep may have over conservative information about this instruction, just
// conservatively flush it from the cache.
- getAnalysis<MemoryDependenceAnalysis>().removeInstruction(M);
+ MD->removeInstruction(M);
++NumMoveToCpy;
return true;
}
+/// processByValArgument - This is called on every byval argument in call sites.
+bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
+ if (TD == 0) return false;
+
+ // Find out what feeds this byval argument.
+ Value *ByValArg = CS.getArgument(ArgNo);
+ const Type *ByValTy =cast<PointerType>(ByValArg->getType())->getElementType();
+ uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);
+ MemDepResult DepInfo =
+ MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
+ true, CS.getInstruction(),
+ CS.getInstruction()->getParent());
+ if (!DepInfo.isClobber())
+ return false;
+
+ // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
+ // a memcpy, see if we can byval from the source of the memcpy instead of the
+ // result.
+ MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
+ if (MDep == 0 || MDep->isVolatile() ||
+ ByValArg->stripPointerCasts() != MDep->getDest())
+ return false;
+
+ // The length of the memcpy must be larger or equal to the size of the byval.
+ ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
+ if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize)
+ return false;
+
+ // Get the alignment of the byval. If it is greater than the memcpy, then we
+ // can't do the substitution. If the call doesn't specify the alignment, then
+ // it is some target specific value that we can't know.
+ unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
+ if (ByValAlign == 0 || MDep->getAlignment() < ByValAlign)
+ return false;
+
+ // Verify that the copied-from memory doesn't change in between the memcpy and
+ // the byval call.
+ // memcpy(a <- b)
+ // *b = 42;
+ // foo(*a)
+ // It would be invalid to transform the second memcpy into foo(*b).
+ //
+ // NOTE: This is conservative, it will stop on any read from the source loc,
+ // not just the defining memcpy.
+ MemDepResult SourceDep =
+ MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep),
+ false, CS.getInstruction(), MDep->getParent());
+ if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
+ return false;
+
+ Value *TmpCast = MDep->getSource();
+ if (MDep->getSource()->getType() != ByValArg->getType())
+ TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
+ "tmpcast", CS.getInstruction());
+
+ DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
+ << " " << *MDep << "\n"
+ << " " << *CS.getInstruction() << "\n");
+
+ // Otherwise we're good! Update the byval argument.
+ CS.setArgument(ArgNo, TmpCast);
+ ++NumMemCpyInstr;
+ return true;
+}
-// MemCpyOpt::iterateOnFunction - Executes one iteration of GVN.
+/// iterateOnFunction - Executes one iteration of MemCpyOpt.
bool MemCpyOpt::iterateOnFunction(Function &F) {
bool MadeChange = false;
// Walk all instruction in the function.
for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
- for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
- BI != BE;) {
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
// Avoid invalidating the iterator.
Instruction *I = BI++;
+ bool RepeatInstruction = false;
+
if (StoreInst *SI = dyn_cast<StoreInst>(I))
MadeChange |= processStore(SI, BI);
+ else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
+ RepeatInstruction = processMemSet(M, BI);
else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
- MadeChange |= processMemCpy(M);
- else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) {
- if (processMemMove(M)) {
- --BI; // Reprocess the new memcpy.
- MadeChange = true;
- }
+ RepeatInstruction = processMemCpy(M);
+ else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
+ RepeatInstruction = processMemMove(M);
+ else if (CallSite CS = (Value*)I) {
+ for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
+ if (CS.paramHasAttr(i+1, Attribute::ByVal))
+ MadeChange |= processByValArgument(CS, i);
+ }
+
+ // Reprocess the instruction if desired.
+ if (RepeatInstruction) {
+ if (BI != BB->begin()) --BI;
+ MadeChange = true;
}
}
}
@@ -788,14 +933,14 @@ bool MemCpyOpt::iterateOnFunction(Function &F) {
//
bool MemCpyOpt::runOnFunction(Function &F) {
bool MadeChange = false;
+ MD = &getAnalysis<MemoryDependenceAnalysis>();
+ TD = getAnalysisIfAvailable<TargetData>();
while (1) {
if (!iterateOnFunction(F))
break;
MadeChange = true;
}
+ MD = 0;
return MadeChange;
}
-
-
-
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
index b8afcc1..e093b52 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -77,7 +77,9 @@ namespace {
bool MadeChange;
public:
static char ID; // Pass identification, replacement for typeid
- Reassociate() : FunctionPass(ID) {}
+ Reassociate() : FunctionPass(ID) {
+ initializeReassociatePass(*PassRegistry::getPassRegistry());
+ }
bool runOnFunction(Function &F);
@@ -104,7 +106,7 @@ namespace {
char Reassociate::ID = 0;
INITIALIZE_PASS(Reassociate, "reassociate",
- "Reassociate expressions", false, false);
+ "Reassociate expressions", false, false)
// Public interface to the Reassociate pass
FunctionPass *llvm::createReassociatePass() { return new Reassociate(); }
@@ -238,6 +240,12 @@ void Reassociate::LinearizeExpr(BinaryOperator *I) {
RHS->setOperand(0, LHS);
I->setOperand(0, RHS);
+ // Conservatively clear all the optional flags, which may not hold
+ // after the reassociation.
+ I->clearSubclassOptionalData();
+ LHS->clearSubclassOptionalData();
+ RHS->clearSubclassOptionalData();
+
++NumLinear;
MadeChange = true;
DEBUG(dbgs() << "Linearized: " << *I << '\n');
@@ -339,6 +347,12 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
DEBUG(dbgs() << "RA: " << *I << '\n');
I->setOperand(0, Ops[i].Op);
I->setOperand(1, Ops[i+1].Op);
+
+ // Clear all the optional flags, which may not hold after the
+ // reassociation if the expression involved more than just this operation.
+ if (Ops.size() != 2)
+ I->clearSubclassOptionalData();
+
DEBUG(dbgs() << "TO: " << *I << '\n');
MadeChange = true;
++NumChanged;
@@ -354,6 +368,11 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
if (I->getOperand(1) != Ops[i].Op) {
DEBUG(dbgs() << "RA: " << *I << '\n');
I->setOperand(1, Ops[i].Op);
+
+ // Conservatively clear all the optional flags, which may not hold
+ // after the reassociation.
+ I->clearSubclassOptionalData();
+
DEBUG(dbgs() << "TO: " << *I << '\n');
MadeChange = true;
++NumChanged;
@@ -809,16 +828,23 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
// RemoveFactorFromExpression on successive values to behave differently.
Instruction *DummyInst = BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal);
SmallVector<Value*, 4> NewMulOps;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ for (unsigned i = 0; i != Ops.size(); ++i) {
// Only try to remove factors from expressions we're allowed to.
BinaryOperator *BOp = dyn_cast<BinaryOperator>(Ops[i].Op);
if (BOp == 0 || BOp->getOpcode() != Instruction::Mul || !BOp->use_empty())
continue;
if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) {
- NewMulOps.push_back(V);
- Ops.erase(Ops.begin()+i);
- --i; --e;
+ // The factorized operand may occur several times. Convert them all in
+ // one fell swoop.
+ for (unsigned j = Ops.size(); j != i;) {
+ --j;
+ if (Ops[j].Op == Ops[i].Op) {
+ NewMulOps.push_back(V);
+ Ops.erase(Ops.begin()+j);
+ }
+ }
+ --i;
}
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp b/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
index 506b72a..459bb06 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
@@ -36,7 +36,9 @@ STATISTIC(NumPhisDemoted, "Number of phi-nodes demoted");
namespace {
struct RegToMem : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- RegToMem() : FunctionPass(ID) {}
+ RegToMem() : FunctionPass(ID) {
+ initializeRegToMemPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredID(BreakCriticalEdgesID);
@@ -59,9 +61,11 @@ namespace {
}
char RegToMem::ID = 0;
-INITIALIZE_PASS(RegToMem, "reg2mem", "Demote all values to stack slots",
- false, false);
-
+INITIALIZE_PASS_BEGIN(RegToMem, "reg2mem", "Demote all values to stack slots",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(BreakCriticalEdges)
+INITIALIZE_PASS_END(RegToMem, "reg2mem", "Demote all values to stack slots",
+ false, false)
bool RegToMem::runOnFunction(Function &F) {
if (F.isDeclaration())
diff --git a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
index 6115c05..c82e929 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -481,6 +481,19 @@ private:
}
}
+ /// InsertInOverdefinedPHIs - Insert an entry in the UsersOfOverdefinedPHIS
+ /// map for I and PN, but if one is there already, do not create another.
+ /// (Duplicate entries do not break anything directly, but can lead to
+ /// exponential growth of the table in rare cases.)
+ void InsertInOverdefinedPHIs(Instruction *I, PHINode *PN) {
+ std::multimap<PHINode*, Instruction*>::iterator J, E;
+ tie(J, E) = UsersOfOverdefinedPHIs.equal_range(PN);
+ for (; J != E; ++J)
+ if (J->second == I)
+ return;
+ UsersOfOverdefinedPHIs.insert(std::make_pair(PN, I));
+ }
+
private:
friend class InstVisitor<SCCPSolver>;
@@ -973,9 +986,9 @@ void SCCPSolver::visitBinaryOperator(Instruction &I) {
if (Result.isConstant()) {
markConstant(IV, &I, Result.getConstant());
// Remember that this instruction is virtually using the PHI node
- // operands.
- UsersOfOverdefinedPHIs.insert(std::make_pair(PN1, &I));
- UsersOfOverdefinedPHIs.insert(std::make_pair(PN2, &I));
+ // operands.
+ InsertInOverdefinedPHIs(&I, PN1);
+ InsertInOverdefinedPHIs(&I, PN2);
return;
}
@@ -1056,8 +1069,8 @@ void SCCPSolver::visitCmpInst(CmpInst &I) {
markConstant(&I, Result.getConstant());
// Remember that this instruction is virtually using the PHI node
// operands.
- UsersOfOverdefinedPHIs.insert(std::make_pair(PN1, &I));
- UsersOfOverdefinedPHIs.insert(std::make_pair(PN2, &I));
+ InsertInOverdefinedPHIs(&I, PN1);
+ InsertInOverdefinedPHIs(&I, PN2);
return;
}
@@ -1585,22 +1598,20 @@ namespace {
///
struct SCCP : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- SCCP() : FunctionPass(ID) {}
+ SCCP() : FunctionPass(ID) {
+ initializeSCCPPass(*PassRegistry::getPassRegistry());
+ }
// runOnFunction - Run the Sparse Conditional Constant Propagation
// algorithm, and return true if the function was modified.
//
bool runOnFunction(Function &F);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- }
};
} // end anonymous namespace
char SCCP::ID = 0;
INITIALIZE_PASS(SCCP, "sccp",
- "Sparse Conditional Constant Propagation", false, false);
+ "Sparse Conditional Constant Propagation", false, false)
// createSCCPPass - This is the public interface to this file.
FunctionPass *llvm::createSCCPPass() {
@@ -1701,7 +1712,9 @@ namespace {
///
struct IPSCCP : public ModulePass {
static char ID;
- IPSCCP() : ModulePass(ID) {}
+ IPSCCP() : ModulePass(ID) {
+ initializeIPSCCPPass(*PassRegistry::getPassRegistry());
+ }
bool runOnModule(Module &M);
};
} // end anonymous namespace
@@ -1709,7 +1722,7 @@ namespace {
char IPSCCP::ID = 0;
INITIALIZE_PASS(IPSCCP, "ipsccp",
"Interprocedural Sparse Conditional Constant Propagation",
- false, false);
+ false, false)
// createIPSCCPPass - This is the public interface to this file.
ModulePass *llvm::createIPSCCPPass() {
diff --git a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
index cb03423..bf9ca6d 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -7,12 +7,15 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the C bindings for libLLVMScalarOpts.a, which implements
-// several scalar transformations over the LLVM intermediate representation.
+// This file implements common infrastructure for libLLVMScalarOpts.a, which
+// implements several scalar transformations over the LLVM intermediate
+// representation, including the C bindings for that library.
//
//===----------------------------------------------------------------------===//
#include "llvm-c/Transforms/Scalar.h"
+#include "llvm-c/Initialization.h"
+#include "llvm/InitializePasses.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
@@ -20,6 +23,50 @@
using namespace llvm;
+/// initializeScalarOptsPasses - Initialize all passes linked into the
+/// ScalarOpts library.
+void llvm::initializeScalarOpts(PassRegistry &Registry) {
+ initializeADCEPass(Registry);
+ initializeBlockPlacementPass(Registry);
+ initializeCodeGenPreparePass(Registry);
+ initializeConstantPropagationPass(Registry);
+ initializeCorrelatedValuePropagationPass(Registry);
+ initializeDCEPass(Registry);
+ initializeDeadInstEliminationPass(Registry);
+ initializeDSEPass(Registry);
+ initializeGEPSplitterPass(Registry);
+ initializeGVNPass(Registry);
+ initializeEarlyCSEPass(Registry);
+ initializeIndVarSimplifyPass(Registry);
+ initializeJumpThreadingPass(Registry);
+ initializeLICMPass(Registry);
+ initializeLoopDeletionPass(Registry);
+ initializeLoopInstSimplifyPass(Registry);
+ initializeLoopRotatePass(Registry);
+ initializeLoopStrengthReducePass(Registry);
+ initializeLoopUnrollPass(Registry);
+ initializeLoopUnswitchPass(Registry);
+ initializeLoopIdiomRecognizePass(Registry);
+ initializeLowerAtomicPass(Registry);
+ initializeMemCpyOptPass(Registry);
+ initializeReassociatePass(Registry);
+ initializeRegToMemPass(Registry);
+ initializeSCCPPass(Registry);
+ initializeIPSCCPPass(Registry);
+ initializeSROA_DTPass(Registry);
+ initializeSROA_SSAUpPass(Registry);
+ initializeCFGSimplifyPassPass(Registry);
+ initializeSimplifyHalfPowrLibCallsPass(Registry);
+ initializeSimplifyLibCallsPass(Registry);
+ initializeSinkingPass(Registry);
+ initializeTailDupPass(Registry);
+ initializeTailCallElimPass(Registry);
+}
+
+void LLVMInitializeScalarOpts(LLVMPassRegistryRef R) {
+ initializeScalarOpts(*unwrap(R));
+}
+
void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createAggressiveDCEPass());
}
@@ -56,10 +103,6 @@ void LLVMAddLoopDeletionPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createLoopDeletionPass());
}
-void LLVMAddLoopIndexSplitPass(LLVMPassManagerRef PM) {
- unwrap(PM)->add(createLoopIndexSplitPass());
-}
-
void LLVMAddLoopRotatePass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createLoopRotatePass());
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index fee317d..c3ca852 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -31,28 +31,34 @@
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
STATISTIC(NumReplaced, "Number of allocas broken up");
STATISTIC(NumPromoted, "Number of allocas promoted");
+STATISTIC(NumAdjusted, "Number of scalar allocas adjusted to allow promotion");
STATISTIC(NumConverted, "Number of aggregates converted to scalar");
STATISTIC(NumGlobals, "Number of allocas copied from constant global");
namespace {
struct SROA : public FunctionPass {
- static char ID; // Pass identification, replacement for typeid
- explicit SROA(signed T = -1) : FunctionPass(ID) {
+ SROA(int T, bool hasDT, char &ID)
+ : FunctionPass(ID), HasDomTree(hasDT) {
if (T == -1)
SRThreshold = 128;
else
@@ -64,17 +70,10 @@ namespace {
bool performScalarRepl(Function &F);
bool performPromotion(Function &F);
- // getAnalysisUsage - This pass does not require any passes, but we know it
- // will not alter the CFG, so say so.
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
- AU.addRequired<DominanceFrontier>();
- AU.setPreservesCFG();
- }
-
private:
+ bool HasDomTree;
TargetData *TD;
-
+
/// DeadInsts - Keep track of instructions we have made dead, so that
/// we can remove them after we are done working.
SmallVector<Value*, 32> DeadInsts;
@@ -83,39 +82,61 @@ namespace {
/// information about the uses. All these fields are initialized to false
/// and set to true when something is learned.
struct AllocaInfo {
+ /// The alloca to promote.
+ AllocaInst *AI;
+
+ /// CheckedPHIs - This is a set of verified PHI nodes, to prevent infinite
+ /// looping and avoid redundant work.
+ SmallPtrSet<PHINode*, 8> CheckedPHIs;
+
/// isUnsafe - This is set to true if the alloca cannot be SROA'd.
bool isUnsafe : 1;
-
+
/// isMemCpySrc - This is true if this aggregate is memcpy'd from.
bool isMemCpySrc : 1;
/// isMemCpyDst - This is true if this aggregate is memcpy'd into.
bool isMemCpyDst : 1;
- AllocaInfo()
- : isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false) {}
+ /// hasSubelementAccess - This is true if a subelement of the alloca is
+ /// ever accessed, or false if the alloca is only accessed with mem
+ /// intrinsics or load/store that only access the entire alloca at once.
+ bool hasSubelementAccess : 1;
+
+ /// hasALoadOrStore - This is true if there are any loads or stores to it.
+ /// The alloca may just be accessed with memcpy, for example, which would
+ /// not set this.
+ bool hasALoadOrStore : 1;
+
+ explicit AllocaInfo(AllocaInst *ai)
+ : AI(ai), isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false),
+ hasSubelementAccess(false), hasALoadOrStore(false) {}
};
-
+
unsigned SRThreshold;
- void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
+ void MarkUnsafe(AllocaInfo &I, Instruction *User) {
+ I.isUnsafe = true;
+ DEBUG(dbgs() << " Transformation preventing inst: " << *User << '\n');
+ }
bool isSafeAllocaToScalarRepl(AllocaInst *AI);
- void isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
- AllocaInfo &Info);
- void isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t &Offset,
- AllocaInfo &Info);
- void isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize,
- const Type *MemOpType, bool isStore, AllocaInfo &Info);
+ void isSafeForScalarRepl(Instruction *I, uint64_t Offset, AllocaInfo &Info);
+ void isSafePHISelectUseForScalarRepl(Instruction *User, uint64_t Offset,
+ AllocaInfo &Info);
+ void isSafeGEP(GetElementPtrInst *GEPI, uint64_t &Offset, AllocaInfo &Info);
+ void isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
+ const Type *MemOpType, bool isStore, AllocaInfo &Info,
+ Instruction *TheAccess, bool AllowWholeAccess);
bool TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size);
uint64_t FindElementAndOffset(const Type *&T, uint64_t &Offset,
const Type *&IdxTy);
-
- void DoScalarReplacement(AllocaInst *AI,
+
+ void DoScalarReplacement(AllocaInst *AI,
std::vector<AllocaInst*> &WorkList);
void DeleteDeadInstructions();
-
+
void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
SmallVector<AllocaInst*, 32> &NewElts);
void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
@@ -129,18 +150,63 @@ namespace {
SmallVector<AllocaInst*, 32> &NewElts);
void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
-
+
static MemTransferInst *isOnlyCopiedFromConstantGlobal(AllocaInst *AI);
};
+
+ // SROA_DT - SROA that uses DominatorTree.
+ struct SROA_DT : public SROA {
+ static char ID;
+ public:
+ SROA_DT(int T = -1) : SROA(T, true, ID) {
+ initializeSROA_DTPass(*PassRegistry::getPassRegistry());
+ }
+
+ // getAnalysisUsage - This pass does not require any passes, but we know it
+ // will not alter the CFG, so say so.
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<DominatorTree>();
+ AU.setPreservesCFG();
+ }
+ };
+
+ // SROA_SSAUp - SROA that uses SSAUpdater.
+ struct SROA_SSAUp : public SROA {
+ static char ID;
+ public:
+ SROA_SSAUp(int T = -1) : SROA(T, false, ID) {
+ initializeSROA_SSAUpPass(*PassRegistry::getPassRegistry());
+ }
+
+ // getAnalysisUsage - This pass does not require any passes, but we know it
+ // will not alter the CFG, so say so.
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ }
+ };
+
}
-char SROA::ID = 0;
-INITIALIZE_PASS(SROA, "scalarrepl",
- "Scalar Replacement of Aggregates", false, false);
+char SROA_DT::ID = 0;
+char SROA_SSAUp::ID = 0;
+
+INITIALIZE_PASS_BEGIN(SROA_DT, "scalarrepl",
+ "Scalar Replacement of Aggregates (DT)", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(SROA_DT, "scalarrepl",
+ "Scalar Replacement of Aggregates (DT)", false, false)
+
+INITIALIZE_PASS_BEGIN(SROA_SSAUp, "scalarrepl-ssa",
+ "Scalar Replacement of Aggregates (SSAUp)", false, false)
+INITIALIZE_PASS_END(SROA_SSAUp, "scalarrepl-ssa",
+ "Scalar Replacement of Aggregates (SSAUp)", false, false)
// Public interface to the ScalarReplAggregates pass
-FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
- return new SROA(Threshold);
+FunctionPass *llvm::createScalarReplAggregatesPass(int Threshold,
+ bool UseDomTree) {
+ if (UseDomTree)
+ return new SROA_DT(Threshold);
+ return new SROA_SSAUp(Threshold);
}
@@ -156,16 +222,16 @@ class ConvertToScalarInfo {
/// AllocaSize - The size of the alloca being considered.
unsigned AllocaSize;
const TargetData &TD;
-
+
/// IsNotTrivial - This is set to true if there is some access to the object
/// which means that mem2reg can't promote it.
bool IsNotTrivial;
-
+
/// VectorTy - This tracks the type that we should promote the vector to if
/// it is possible to turn it into a vector. This starts out null, and if it
/// isn't possible to turn into a vector type, it gets set to VoidTy.
const Type *VectorTy;
-
+
/// HadAVector - True if there is at least one vector access to the alloca.
/// We don't want to turn random arrays into vectors and use vector element
/// insert/extract, but if there are element accesses to something that is
@@ -179,14 +245,14 @@ public:
VectorTy = 0;
HadAVector = false;
}
-
+
AllocaInst *TryConvert(AllocaInst *AI);
-
+
private:
bool CanConvertToScalar(Value *V, uint64_t Offset);
void MergeInType(const Type *In, uint64_t Offset);
void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
-
+
Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
uint64_t Offset, IRBuilder<> &Builder);
Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
@@ -195,26 +261,6 @@ private:
} // end anonymous namespace.
-/// IsVerbotenVectorType - Return true if this is a vector type ScalarRepl isn't
-/// allowed to form. We do this to avoid MMX types, which is a complete hack,
-/// but is required until the backend is fixed.
-static bool IsVerbotenVectorType(const VectorType *VTy, const Instruction *I) {
- StringRef Triple(I->getParent()->getParent()->getParent()->getTargetTriple());
- if (!Triple.startswith("i386") &&
- !Triple.startswith("x86_64"))
- return false;
-
- // Reject all the MMX vector types.
- switch (VTy->getNumElements()) {
- default: return false;
- case 1: return VTy->getElementType()->isIntegerTy(64);
- case 2: return VTy->getElementType()->isIntegerTy(32);
- case 4: return VTy->getElementType()->isIntegerTy(16);
- case 8: return VTy->getElementType()->isIntegerTy(8);
- }
-}
-
-
/// TryConvert - Analyze the specified alloca, and if it is safe to do so,
/// rewrite it to be a new alloca which is mem2reg'able. This returns the new
/// alloca if possible or null if not.
@@ -223,7 +269,7 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
// out.
if (!CanConvertToScalar(AI, 0) || !IsNotTrivial)
return 0;
-
+
// If we were able to find a vector type that can handle this with
// insert/extract elements, and if there was at least one use that had
// a vector type, promote this to a vector. We don't want to promote
@@ -231,8 +277,7 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
// we just get a lot of insert/extracts. If at least one vector is
// involved, then we probably really do have a union of vector/array.
const Type *NewTy;
- if (VectorTy && VectorTy->isVectorTy() && HadAVector &&
- !IsVerbotenVectorType(cast<VectorType>(VectorTy), AI)) {
+ if (VectorTy && VectorTy->isVectorTy() && HadAVector) {
DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = "
<< *VectorTy << '\n');
NewTy = VectorTy; // Use the vector type.
@@ -263,7 +308,7 @@ void ConvertToScalarInfo::MergeInType(const Type *In, uint64_t Offset) {
// nothing to be done.
if (VectorTy && VectorTy->isVoidTy())
return;
-
+
// If this could be contributing to a vector, analyze it.
// If the In type is a vector that is the same size as the alloca, see if it
@@ -271,7 +316,7 @@ void ConvertToScalarInfo::MergeInType(const Type *In, uint64_t Offset) {
if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
// Remember if we saw a vector type.
HadAVector = true;
-
+
if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
// If we're storing/loading a vector of the right size, allow it as a
// vector. If this the first vector we see, remember the type so that
@@ -290,7 +335,7 @@ void ConvertToScalarInfo::MergeInType(const Type *In, uint64_t Offset) {
// compatible with it.
unsigned EltSize = In->getPrimitiveSizeInBits()/8;
if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 &&
- (VectorTy == 0 ||
+ (VectorTy == 0 ||
cast<VectorType>(VectorTy)->getElementType()
->getPrimitiveSizeInBits()/8 == EltSize)) {
if (VectorTy == 0)
@@ -298,7 +343,7 @@ void ConvertToScalarInfo::MergeInType(const Type *In, uint64_t Offset) {
return;
}
}
-
+
// Otherwise, we have a case that we can't handle with an optimized vector
// form. We can still turn this into a large integer.
VectorTy = Type::getVoidTy(In->getContext());
@@ -316,22 +361,28 @@ void ConvertToScalarInfo::MergeInType(const Type *In, uint64_t Offset) {
bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
Instruction *User = cast<Instruction>(*UI);
-
+
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
// Don't break volatile loads.
if (LI->isVolatile())
return false;
+ // Don't touch MMX operations.
+ if (LI->getType()->isX86_MMXTy())
+ return false;
MergeInType(LI->getType(), Offset);
continue;
}
-
+
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// Storing the pointer, not into the value?
if (SI->getOperand(0) == V || SI->isVolatile()) return false;
+ // Don't touch MMX operations.
+ if (SI->getOperand(0)->getType()->isX86_MMXTy())
+ return false;
MergeInType(SI->getOperand(0)->getType(), Offset);
continue;
}
-
+
if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
IsNotTrivial = true; // Can't be mem2reg'd.
if (!CanConvertToScalar(BCI, Offset))
@@ -343,7 +394,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
// If this is a GEP with a variable indices, we can't handle it.
if (!GEP->hasAllConstantIndices())
return false;
-
+
// Compute the offset that this GEP adds to the pointer.
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
@@ -372,15 +423,15 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength());
if (Len == 0 || Len->getZExtValue() != AllocaSize || Offset != 0)
return false;
-
+
IsNotTrivial = true; // Can't be mem2reg'd.
continue;
}
-
+
// Otherwise, we cannot handle this!
return false;
}
-
+
return true;
}
@@ -411,9 +462,9 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
GEP->eraseFromParent();
continue;
}
-
- IRBuilder<> Builder(User->getParent(), User);
-
+
+ IRBuilder<> Builder(User);
+
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
// The load is a bit extract from NewAI shifted right by Offset bits.
Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp");
@@ -423,7 +474,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
LI->eraseFromParent();
continue;
}
-
+
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
assert(SI->getOperand(0) != Ptr && "Consistency error!");
Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
@@ -431,14 +482,14 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
Builder);
Builder.CreateStore(New, NewAI);
SI->eraseFromParent();
-
+
// If the load we just inserted is now dead, then the inserted store
// overwrote the entire thing.
if (Old->use_empty())
Old->eraseFromParent();
continue;
}
-
+
// If this is a constant sized memset of a constant value (e.g. 0) we can
// transform it into a store of the expanded constant value.
if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
@@ -446,7 +497,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
if (NumBytes != 0) {
unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
-
+
// Compute the value replicated the right number of times.
APInt APVal(NumBytes*8, Val);
@@ -454,17 +505,17 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
if (Val)
for (unsigned i = 1; i != NumBytes; ++i)
APVal |= APVal << 8;
-
+
Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
Value *New = ConvertScalar_InsertValue(
ConstantInt::get(User->getContext(), APVal),
Old, Offset, Builder);
Builder.CreateStore(New, NewAI);
-
+
// If the load we just inserted is now dead, then the memset overwrote
// the entire thing.
if (Old->use_empty())
- Old->eraseFromParent();
+ Old->eraseFromParent();
}
MSI->eraseFromParent();
continue;
@@ -474,29 +525,42 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
// can handle it like a load or store of the scalar type.
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
assert(Offset == 0 && "must be store to start of alloca");
-
+
// If the source and destination are both to the same alloca, then this is
// a noop copy-to-self, just delete it. Otherwise, emit a load and store
// as appropriate.
- AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject(0));
-
- if (MTI->getSource()->getUnderlyingObject(0) != OrigAI) {
+ AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &TD, 0));
+
+ if (GetUnderlyingObject(MTI->getSource(), &TD, 0) != OrigAI) {
// Dest must be OrigAI, change this to be a load from the original
// pointer (bitcasted), then a store to our new alloca.
assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
Value *SrcPtr = MTI->getSource();
- SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType());
-
+ const PointerType* SPTy = cast<PointerType>(SrcPtr->getType());
+ const PointerType* AIPTy = cast<PointerType>(NewAI->getType());
+ if (SPTy->getAddressSpace() != AIPTy->getAddressSpace()) {
+ AIPTy = PointerType::get(AIPTy->getElementType(),
+ SPTy->getAddressSpace());
+ }
+ SrcPtr = Builder.CreateBitCast(SrcPtr, AIPTy);
+
LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
SrcVal->setAlignment(MTI->getAlignment());
Builder.CreateStore(SrcVal, NewAI);
- } else if (MTI->getDest()->getUnderlyingObject(0) != OrigAI) {
+ } else if (GetUnderlyingObject(MTI->getDest(), &TD, 0) != OrigAI) {
// Src must be OrigAI, change this to be a load from NewAI then a store
// through the original dest pointer (bitcasted).
assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
- Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType());
+ const PointerType* DPTy = cast<PointerType>(MTI->getDest()->getType());
+ const PointerType* AIPTy = cast<PointerType>(NewAI->getType());
+ if (DPTy->getAddressSpace() != AIPTy->getAddressSpace()) {
+ AIPTy = PointerType::get(AIPTy->getElementType(),
+ DPTy->getAddressSpace());
+ }
+ Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), AIPTy);
+
StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
NewStore->setAlignment(MTI->getAlignment());
} else {
@@ -506,7 +570,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
MTI->eraseFromParent();
continue;
}
-
+
llvm_unreachable("Unsupported operation!");
}
}
@@ -548,7 +612,7 @@ ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
V = Builder.CreateBitCast(V, ToType, "tmp");
return V;
}
-
+
// If ToType is a first class aggregate, extract out each of the pieces and
// use insertvalue's to form the FCA.
if (const StructType *ST = dyn_cast<StructType>(ToType)) {
@@ -562,7 +626,7 @@ ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
}
return Res;
}
-
+
if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
Value *Res = UndefValue::get(AT);
@@ -598,7 +662,7 @@ ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
ConstantInt::get(FromVal->getType(),
ShAmt), "tmp");
else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
- FromVal = Builder.CreateShl(FromVal,
+ FromVal = Builder.CreateShl(FromVal,
ConstantInt::get(FromVal->getType(),
-ShAmt), "tmp");
@@ -606,11 +670,11 @@ ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
unsigned LIBitWidth = TD.getTypeSizeInBits(ToType);
if (LIBitWidth < NTy->getBitWidth())
FromVal =
- Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
+ Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
LIBitWidth), "tmp");
else if (LIBitWidth > NTy->getBitWidth())
FromVal =
- Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
+ Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
LIBitWidth), "tmp");
// If the result is an integer, this is a trunc or bitcast.
@@ -647,7 +711,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy);
uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType());
-
+
// Changing the whole vector with memset or with an access of a different
// vector type?
if (ValSize == VecSize)
@@ -657,28 +721,28 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
// Must be an element insertion.
unsigned Elt = Offset/EltSize;
-
+
if (SV->getType() != VTy->getElementType())
SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
-
- SV = Builder.CreateInsertElement(Old, SV,
+
+ SV = Builder.CreateInsertElement(Old, SV,
ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt),
"tmp");
return SV;
}
-
+
// If SV is a first-class aggregate value, insert each value recursively.
if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
const StructLayout &Layout = *TD.getStructLayout(ST);
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
- Old = ConvertScalar_InsertValue(Elt, Old,
+ Old = ConvertScalar_InsertValue(Elt, Old,
Offset+Layout.getElementOffsetInBits(i),
Builder);
}
return Old;
}
-
+
if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
@@ -778,16 +842,298 @@ bool SROA::runOnFunction(Function &F) {
return Changed;
}
+namespace {
+class AllocaPromoter : public LoadAndStorePromoter {
+ AllocaInst *AI;
+public:
+ AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S)
+ : LoadAndStorePromoter(Insts, S), AI(0) {}
+
+ void run(AllocaInst *AI, const SmallVectorImpl<Instruction*> &Insts) {
+ // Remember which alloca we're promoting (for isInstInList).
+ this->AI = AI;
+ LoadAndStorePromoter::run(Insts);
+ AI->eraseFromParent();
+ }
+
+ virtual bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I))
+ return LI->getOperand(0) == AI;
+ return cast<StoreInst>(I)->getPointerOperand() == AI;
+ }
+};
+} // end anon namespace
+
+/// isSafeSelectToSpeculate - Select instructions that use an alloca and are
+/// subsequently loaded can be rewritten to load both input pointers and then
+/// select between the result, allowing the load of the alloca to be promoted.
+/// From this:
+/// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
+/// %V = load i32* %P2
+/// to:
+/// %V1 = load i32* %Alloca -> will be mem2reg'd
+/// %V2 = load i32* %Other
+/// %V = select i1 %cond, i32 %V1, i32 %V2
+///
+/// We can do this to a select if its only uses are loads and if the operand to
+/// the select can be loaded unconditionally.
+static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
+ bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
+ bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
+
+ for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
+ UI != UE; ++UI) {
+ LoadInst *LI = dyn_cast<LoadInst>(*UI);
+ if (LI == 0 || LI->isVolatile()) return false;
+
+ // Both operands to the select need to be dereferencable, either absolutely
+ // (e.g. allocas) or at this point because we can see other accesses to it.
+ if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI,
+ LI->getAlignment(), TD))
+ return false;
+ if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(), LI,
+ LI->getAlignment(), TD))
+ return false;
+ }
+
+ return true;
+}
+
+/// isSafePHIToSpeculate - PHI instructions that use an alloca and are
+/// subsequently loaded can be rewritten to load both input pointers in the pred
+/// blocks and then PHI the results, allowing the load of the alloca to be
+/// promoted.
+/// From this:
+/// %P2 = phi [i32* %Alloca, i32* %Other]
+/// %V = load i32* %P2
+/// to:
+/// %V1 = load i32* %Alloca -> will be mem2reg'd
+/// ...
+/// %V2 = load i32* %Other
+/// ...
+/// %V = phi [i32 %V1, i32 %V2]
+///
+/// We can do this to a select if its only uses are loads and if the operand to
+/// the select can be loaded unconditionally.
+static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
+ // For now, we can only do this promotion if the load is in the same block as
+ // the PHI, and if there are no stores between the phi and load.
+ // TODO: Allow recursive phi users.
+ // TODO: Allow stores.
+ BasicBlock *BB = PN->getParent();
+ unsigned MaxAlign = 0;
+ for (Value::use_iterator UI = PN->use_begin(), UE = PN->use_end();
+ UI != UE; ++UI) {
+ LoadInst *LI = dyn_cast<LoadInst>(*UI);
+ if (LI == 0 || LI->isVolatile()) return false;
+
+ // For now we only allow loads in the same block as the PHI. This is a
+ // common case that happens when instcombine merges two loads through a PHI.
+ if (LI->getParent() != BB) return false;
+
+ // Ensure that there are no instructions between the PHI and the load that
+ // could store.
+ for (BasicBlock::iterator BBI = PN; &*BBI != LI; ++BBI)
+ if (BBI->mayWriteToMemory())
+ return false;
+
+ MaxAlign = std::max(MaxAlign, LI->getAlignment());
+ }
+
+ // Okay, we know that we have one or more loads in the same block as the PHI.
+ // We can transform this if it is safe to push the loads into the predecessor
+ // blocks. The only thing to watch out for is that we can't put a possibly
+ // trapping load in the predecessor if it is a critical edge.
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ BasicBlock *Pred = PN->getIncomingBlock(i);
+
+ // If the predecessor has a single successor, then the edge isn't critical.
+ if (Pred->getTerminator()->getNumSuccessors() == 1)
+ continue;
+
+ Value *InVal = PN->getIncomingValue(i);
+
+ // If the InVal is an invoke in the pred, we can't put a load on the edge.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
+ if (II->getParent() == Pred)
+ return false;
+
+ // If this pointer is always safe to load, or if we can prove that there is
+ // already a load in the block, then we can move the load to the pred block.
+ if (InVal->isDereferenceablePointer() ||
+ isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, TD))
+ continue;
+
+ return false;
+ }
+
+ return true;
+}
+
+
+/// tryToMakeAllocaBePromotable - This returns true if the alloca only has
+/// direct (non-volatile) loads and stores to it. If the alloca is close but
+/// not quite there, this will transform the code to allow promotion. As such,
+/// it is a non-pure predicate.
+static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
+ SetVector<Instruction*, SmallVector<Instruction*, 4>,
+ SmallPtrSet<Instruction*, 4> > InstsToRewrite;
+
+ for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
+ UI != UE; ++UI) {
+ User *U = *UI;
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
+ if (LI->isVolatile())
+ return false;
+ continue;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
+ if (SI->getOperand(0) == AI || SI->isVolatile())
+ return false; // Don't allow a store OF the AI, only INTO the AI.
+ continue;
+ }
+
+ if (SelectInst *SI = dyn_cast<SelectInst>(U)) {
+ // If the condition being selected on is a constant, fold the select, yes
+ // this does (rarely) happen early on.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition())) {
+ Value *Result = SI->getOperand(1+CI->isZero());
+ SI->replaceAllUsesWith(Result);
+ SI->eraseFromParent();
+
+ // This is very rare and we just scrambled the use list of AI, start
+ // over completely.
+ return tryToMakeAllocaBePromotable(AI, TD);
+ }
+
+ // If it is safe to turn "load (select c, AI, ptr)" into a select of two
+ // loads, then we can transform this by rewriting the select.
+ if (!isSafeSelectToSpeculate(SI, TD))
+ return false;
+
+ InstsToRewrite.insert(SI);
+ continue;
+ }
+
+ if (PHINode *PN = dyn_cast<PHINode>(U)) {
+ if (PN->use_empty()) { // Dead PHIs can be stripped.
+ InstsToRewrite.insert(PN);
+ continue;
+ }
+
+ // If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads
+ // in the pred blocks, then we can transform this by rewriting the PHI.
+ if (!isSafePHIToSpeculate(PN, TD))
+ return false;
+
+ InstsToRewrite.insert(PN);
+ continue;
+ }
+
+ return false;
+ }
+
+ // If there are no instructions to rewrite, then all uses are load/stores and
+ // we're done!
+ if (InstsToRewrite.empty())
+ return true;
+
+ // If we have instructions that need to be rewritten for this to be promotable
+ // take care of it now.
+ for (unsigned i = 0, e = InstsToRewrite.size(); i != e; ++i) {
+ if (SelectInst *SI = dyn_cast<SelectInst>(InstsToRewrite[i])) {
+ // Selects in InstsToRewrite only have load uses. Rewrite each as two
+ // loads with a new select.
+ while (!SI->use_empty()) {
+ LoadInst *LI = cast<LoadInst>(SI->use_back());
+
+ IRBuilder<> Builder(LI);
+ LoadInst *TrueLoad =
+ Builder.CreateLoad(SI->getTrueValue(), LI->getName()+".t");
+ LoadInst *FalseLoad =
+ Builder.CreateLoad(SI->getFalseValue(), LI->getName()+".t");
+
+ // Transfer alignment and TBAA info if present.
+ TrueLoad->setAlignment(LI->getAlignment());
+ FalseLoad->setAlignment(LI->getAlignment());
+ if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
+ TrueLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
+ FalseLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
+ }
+
+ Value *V = Builder.CreateSelect(SI->getCondition(), TrueLoad, FalseLoad);
+ V->takeName(LI);
+ LI->replaceAllUsesWith(V);
+ LI->eraseFromParent();
+ }
+
+ // Now that all the loads are gone, the select is gone too.
+ SI->eraseFromParent();
+ continue;
+ }
+
+ // Otherwise, we have a PHI node which allows us to push the loads into the
+ // predecessors.
+ PHINode *PN = cast<PHINode>(InstsToRewrite[i]);
+ if (PN->use_empty()) {
+ PN->eraseFromParent();
+ continue;
+ }
+
+ const Type *LoadTy = cast<PointerType>(PN->getType())->getElementType();
+ PHINode *NewPN = PHINode::Create(LoadTy, PN->getName()+".ld", PN);
+
+ // Get the TBAA tag and alignment to use from one of the loads. It doesn't
+ // matter which one we get and if any differ, it doesn't matter.
+ LoadInst *SomeLoad = cast<LoadInst>(PN->use_back());
+ MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
+ unsigned Align = SomeLoad->getAlignment();
+
+ // Rewrite all loads of the PN to use the new PHI.
+ while (!PN->use_empty()) {
+ LoadInst *LI = cast<LoadInst>(PN->use_back());
+ LI->replaceAllUsesWith(NewPN);
+ LI->eraseFromParent();
+ }
+
+ // Inject loads into all of the pred blocks. Keep track of which blocks we
+ // insert them into in case we have multiple edges from the same block.
+ DenseMap<BasicBlock*, LoadInst*> InsertedLoads;
+
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ BasicBlock *Pred = PN->getIncomingBlock(i);
+ LoadInst *&Load = InsertedLoads[Pred];
+ if (Load == 0) {
+ Load = new LoadInst(PN->getIncomingValue(i),
+ PN->getName() + "." + Pred->getName(),
+ Pred->getTerminator());
+ Load->setAlignment(Align);
+ if (TBAATag) Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+ }
+
+ NewPN->addIncoming(Load, Pred);
+ }
+
+ PN->eraseFromParent();
+ }
+
+ ++NumAdjusted;
+ return true;
+}
+
bool SROA::performPromotion(Function &F) {
std::vector<AllocaInst*> Allocas;
- DominatorTree &DT = getAnalysis<DominatorTree>();
- DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
+ DominatorTree *DT = 0;
+ if (HasDomTree)
+ DT = &getAnalysis<DominatorTree>();
BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
bool Changed = false;
-
+ SmallVector<Instruction*, 64> Insts;
while (1) {
Allocas.clear();
@@ -795,12 +1141,27 @@ bool SROA::performPromotion(Function &F) {
// the entry node
for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
- if (isAllocaPromotable(AI))
+ if (tryToMakeAllocaBePromotable(AI, TD))
Allocas.push_back(AI);
if (Allocas.empty()) break;
- PromoteMemToReg(Allocas, DT, DF);
+ if (HasDomTree)
+ PromoteMemToReg(Allocas, *DT);
+ else {
+ SSAUpdater SSA;
+ for (unsigned i = 0, e = Allocas.size(); i != e; ++i) {
+ AllocaInst *AI = Allocas[i];
+
+ // Build list of instructions to promote.
+ for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
+ UI != E; ++UI)
+ Insts.push_back(cast<Instruction>(*UI));
+
+ AllocaPromoter(Insts, SSA).run(AI, Insts);
+ Insts.clear();
+ }
+ }
NumPromoted += Allocas.size();
Changed = true;
}
@@ -842,7 +1203,7 @@ bool SROA::performScalarRepl(Function &F) {
while (!WorkList.empty()) {
AllocaInst *AI = WorkList.back();
WorkList.pop_back();
-
+
// Handle dead allocas trivially. These can be formed by SROA'ing arrays
// with unused elements.
if (AI->use_empty()) {
@@ -854,7 +1215,7 @@ bool SROA::performScalarRepl(Function &F) {
// If this alloca is impossible for us to promote, reject it early.
if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized())
continue;
-
+
// Check to see if this allocation is only modified by a memcpy/memmove from
// a constant global. If this is the case, we can change all users to use
// the constant global instead. This is commonly produced by the CFE by
@@ -871,7 +1232,7 @@ bool SROA::performScalarRepl(Function &F) {
Changed = true;
continue;
}
-
+
// Check to see if we can perform the core SROA transformation. We cannot
// transform the allocation instruction if it is an array allocation
// (allocations OF arrays are ok though), and an allocation of a scalar
@@ -880,10 +1241,10 @@ bool SROA::performScalarRepl(Function &F) {
// Do not promote [0 x %struct].
if (AllocaSize == 0) continue;
-
+
// Do not promote any struct whose size is too big.
if (AllocaSize > SRThreshold) continue;
-
+
// If the alloca looks like a good candidate for scalar replacement, and if
// all its users can be transformed, then split up the aggregate into its
// separate elements.
@@ -906,8 +1267,8 @@ bool SROA::performScalarRepl(Function &F) {
++NumConverted;
Changed = true;
continue;
- }
-
+ }
+
// Otherwise, couldn't process this alloca.
}
@@ -916,14 +1277,14 @@ bool SROA::performScalarRepl(Function &F) {
/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
/// predicate, do SROA now.
-void SROA::DoScalarReplacement(AllocaInst *AI,
+void SROA::DoScalarReplacement(AllocaInst *AI,
std::vector<AllocaInst*> &WorkList) {
DEBUG(dbgs() << "Found inst to SROA: " << *AI << '\n');
SmallVector<AllocaInst*, 32> ElementAllocas;
if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
ElementAllocas.reserve(ST->getNumContainedTypes());
for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
- AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
+ AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
AI->getAlignment(),
AI->getName() + "." + Twine(i), AI);
ElementAllocas.push_back(NA);
@@ -971,48 +1332,106 @@ void SROA::DeleteDeadInstructions() {
I->eraseFromParent();
}
}
-
+
/// isSafeForScalarRepl - Check if instruction I is a safe use with regard to
/// performing scalar replacement of alloca AI. The results are flagged in
/// the Info parameter. Offset indicates the position within AI that is
/// referenced by this instruction.
-void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
+void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
AllocaInfo &Info) {
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
Instruction *User = cast<Instruction>(*UI);
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
- isSafeForScalarRepl(BC, AI, Offset, Info);
+ isSafeForScalarRepl(BC, Offset, Info);
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
uint64_t GEPOffset = Offset;
- isSafeGEP(GEPI, AI, GEPOffset, Info);
+ isSafeGEP(GEPI, GEPOffset, Info);
if (!Info.isUnsafe)
- isSafeForScalarRepl(GEPI, AI, GEPOffset, Info);
+ isSafeForScalarRepl(GEPI, GEPOffset, Info);
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
- if (Length)
- isSafeMemAccess(AI, Offset, Length->getZExtValue(), 0,
- UI.getOperandNo() == 0, Info);
- else
- MarkUnsafe(Info);
+ if (Length == 0)
+ return MarkUnsafe(Info, User);
+ isSafeMemAccess(Offset, Length->getZExtValue(), 0,
+ UI.getOperandNo() == 0, Info, MI,
+ true /*AllowWholeAccess*/);
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
+ if (LI->isVolatile())
+ return MarkUnsafe(Info, User);
+ const Type *LIType = LI->getType();
+ isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
+ LIType, false, Info, LI, true /*AllowWholeAccess*/);
+ Info.hasALoadOrStore = true;
+
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ // Store is ok if storing INTO the pointer, not storing the pointer
+ if (SI->isVolatile() || SI->getOperand(0) == I)
+ return MarkUnsafe(Info, User);
+
+ const Type *SIType = SI->getOperand(0)->getType();
+ isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
+ SIType, true, Info, SI, true /*AllowWholeAccess*/);
+ Info.hasALoadOrStore = true;
+ } else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
+ isSafePHISelectUseForScalarRepl(User, Offset, Info);
+ } else {
+ return MarkUnsafe(Info, User);
+ }
+ if (Info.isUnsafe) return;
+ }
+}
+
+
+/// isSafePHIUseForScalarRepl - If we see a PHI node or select using a pointer
+/// derived from the alloca, we can often still split the alloca into elements.
+/// This is useful if we have a large alloca where one element is phi'd
+/// together somewhere: we can SRoA and promote all the other elements even if
+/// we end up not being able to promote this one.
+///
+/// All we require is that the uses of the PHI do not index into other parts of
+/// the alloca. The most important use case for this is single load and stores
+/// that are PHI'd together, which can happen due to code sinking.
+void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
+ AllocaInfo &Info) {
+ // If we've already checked this PHI, don't do it again.
+ if (PHINode *PN = dyn_cast<PHINode>(I))
+ if (!Info.CheckedPHIs.insert(PN))
+ return;
+
+ for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
+ Instruction *User = cast<Instruction>(*UI);
+
+ if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
+ isSafePHISelectUseForScalarRepl(BC, Offset, Info);
+ } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
+ // Only allow "bitcast" GEPs for simplicity. We could generalize this,
+ // but would have to prove that we're staying inside of an element being
+ // promoted.
+ if (!GEPI->hasAllZeroIndices())
+ return MarkUnsafe(Info, User);
+ isSafePHISelectUseForScalarRepl(GEPI, Offset, Info);
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
- if (!LI->isVolatile()) {
- const Type *LIType = LI->getType();
- isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(LIType),
- LIType, false, Info);
- } else
- MarkUnsafe(Info);
+ if (LI->isVolatile())
+ return MarkUnsafe(Info, User);
+ const Type *LIType = LI->getType();
+ isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
+ LIType, false, Info, LI, false /*AllowWholeAccess*/);
+ Info.hasALoadOrStore = true;
+
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// Store is ok if storing INTO the pointer, not storing the pointer
- if (!SI->isVolatile() && SI->getOperand(0) != I) {
- const Type *SIType = SI->getOperand(0)->getType();
- isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(SIType),
- SIType, true, Info);
- } else
- MarkUnsafe(Info);
+ if (SI->isVolatile() || SI->getOperand(0) == I)
+ return MarkUnsafe(Info, User);
+
+ const Type *SIType = SI->getOperand(0)->getType();
+ isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
+ SIType, true, Info, SI, false /*AllowWholeAccess*/);
+ Info.hasALoadOrStore = true;
+ } else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
+ isSafePHISelectUseForScalarRepl(User, Offset, Info);
} else {
- DEBUG(errs() << " Transformation preventing inst: " << *User << '\n');
- MarkUnsafe(Info);
+ return MarkUnsafe(Info, User);
}
if (Info.isUnsafe) return;
}
@@ -1023,7 +1442,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
/// references, and when the resulting offset corresponds to an element within
/// the alloca type. The results are flagged in the Info parameter. Upon
/// return, Offset is adjusted as specified by the GEP indices.
-void SROA::isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI,
+void SROA::isSafeGEP(GetElementPtrInst *GEPI,
uint64_t &Offset, AllocaInfo &Info) {
gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI);
if (GEPIt == E)
@@ -1038,7 +1457,7 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI,
ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand());
if (!IdxVal)
- return MarkUnsafe(Info);
+ return MarkUnsafe(Info, GEPI);
}
// Compute the offset due to this GEP and check if the alloca has a
@@ -1046,40 +1465,92 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI,
SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(),
&Indices[0], Indices.size());
- if (!TypeHasComponent(AI->getAllocatedType(), Offset, 0))
- MarkUnsafe(Info);
+ if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, 0))
+ MarkUnsafe(Info, GEPI);
+}
+
+/// isHomogeneousAggregate - Check if type T is a struct or array containing
+/// elements of the same type (which is always true for arrays). If so,
+/// return true with NumElts and EltTy set to the number of elements and the
+/// element type, respectively.
+static bool isHomogeneousAggregate(const Type *T, unsigned &NumElts,
+ const Type *&EltTy) {
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T)) {
+ NumElts = AT->getNumElements();
+ EltTy = (NumElts == 0 ? 0 : AT->getElementType());
+ return true;
+ }
+ if (const StructType *ST = dyn_cast<StructType>(T)) {
+ NumElts = ST->getNumContainedTypes();
+ EltTy = (NumElts == 0 ? 0 : ST->getContainedType(0));
+ for (unsigned n = 1; n < NumElts; ++n) {
+ if (ST->getContainedType(n) != EltTy)
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+/// isCompatibleAggregate - Check if T1 and T2 are either the same type or are
+/// "homogeneous" aggregates with the same element type and number of elements.
+static bool isCompatibleAggregate(const Type *T1, const Type *T2) {
+ if (T1 == T2)
+ return true;
+
+ unsigned NumElts1, NumElts2;
+ const Type *EltTy1, *EltTy2;
+ if (isHomogeneousAggregate(T1, NumElts1, EltTy1) &&
+ isHomogeneousAggregate(T2, NumElts2, EltTy2) &&
+ NumElts1 == NumElts2 &&
+ EltTy1 == EltTy2)
+ return true;
+
+ return false;
}
/// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI
/// alloca or has an offset and size that corresponds to a component element
/// within it. The offset checked here may have been formed from a GEP with a
/// pointer bitcasted to a different type.
-void SROA::isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize,
+///
+/// If AllowWholeAccess is true, then this allows uses of the entire alloca as a
+/// unit. If false, it only allows accesses known to be in a single element.
+void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
const Type *MemOpType, bool isStore,
- AllocaInfo &Info) {
+ AllocaInfo &Info, Instruction *TheAccess,
+ bool AllowWholeAccess) {
// Check if this is a load/store of the entire alloca.
- if (Offset == 0 && MemSize == TD->getTypeAllocSize(AI->getAllocatedType())) {
- bool UsesAggregateType = (MemOpType == AI->getAllocatedType());
- // This is safe for MemIntrinsics (where MemOpType is 0), integer types
- // (which are essentially the same as the MemIntrinsics, especially with
- // regard to copying padding between elements), or references using the
- // aggregate type of the alloca.
- if (!MemOpType || MemOpType->isIntegerTy() || UsesAggregateType) {
- if (!UsesAggregateType) {
- if (isStore)
- Info.isMemCpyDst = true;
- else
- Info.isMemCpySrc = true;
- }
+ if (Offset == 0 && AllowWholeAccess &&
+ MemSize == TD->getTypeAllocSize(Info.AI->getAllocatedType())) {
+ // This can be safe for MemIntrinsics (where MemOpType is 0) and integer
+ // loads/stores (which are essentially the same as the MemIntrinsics with
+ // regard to copying padding between elements). But, if an alloca is
+ // flagged as both a source and destination of such operations, we'll need
+ // to check later for padding between elements.
+ if (!MemOpType || MemOpType->isIntegerTy()) {
+ if (isStore)
+ Info.isMemCpyDst = true;
+ else
+ Info.isMemCpySrc = true;
+ return;
+ }
+ // This is also safe for references using a type that is compatible with
+ // the type of the alloca, so that loads/stores can be rewritten using
+ // insertvalue/extractvalue.
+ if (isCompatibleAggregate(MemOpType, Info.AI->getAllocatedType())) {
+ Info.hasSubelementAccess = true;
return;
}
}
// Check if the offset/size correspond to a component within the alloca type.
- const Type *T = AI->getAllocatedType();
- if (TypeHasComponent(T, Offset, MemSize))
+ const Type *T = Info.AI->getAllocatedType();
+ if (TypeHasComponent(T, Offset, MemSize)) {
+ Info.hasSubelementAccess = true;
return;
+ }
- return MarkUnsafe(Info);
+ return MarkUnsafe(Info, TheAccess);
}
/// TypeHasComponent - Return true if T has a component type with the
@@ -1116,14 +1587,21 @@ bool SROA::TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size) {
/// instruction.
void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
SmallVector<AllocaInst*, 32> &NewElts) {
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
+ for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) {
+ Use &TheUse = UI.getUse();
+ Instruction *User = cast<Instruction>(*UI++);
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
RewriteBitCast(BC, AI, Offset, NewElts);
- } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
+ continue;
+ }
+
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
RewriteGEP(GEPI, AI, Offset, NewElts);
- } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
+ continue;
+ }
+
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
uint64_t MemSize = Length->getZExtValue();
if (Offset == 0 &&
@@ -1131,9 +1609,13 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
// Otherwise the intrinsic can only touch a single element and the
// address operand will be updated, so nothing else needs to be done.
- } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
+ continue;
+ }
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
const Type *LIType = LI->getType();
- if (LIType == AI->getAllocatedType()) {
+
+ if (isCompatibleAggregate(LIType, AI->getAllocatedType())) {
// Replace:
// %res = load { i32, i32 }* %alloc
// with:
@@ -1155,10 +1637,13 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
// If this is a load of the entire alloca to an integer, rewrite it.
RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
}
- } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ continue;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
Value *Val = SI->getOperand(0);
const Type *SIType = Val->getType();
- if (SIType == AI->getAllocatedType()) {
+ if (isCompatibleAggregate(SIType, AI->getAllocatedType())) {
// Replace:
// store { i32, i32 } %val, { i32, i32 }* %alloc
// with:
@@ -1178,6 +1663,26 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
// If this is a store of the entire alloca from an integer, rewrite it.
RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
}
+ continue;
+ }
+
+ if (isa<SelectInst>(User) || isa<PHINode>(User)) {
+ // If we have a PHI user of the alloca itself (as opposed to a GEP or
+ // bitcast) we have to rewrite it. GEP and bitcast uses will be RAUW'd to
+ // the new pointer.
+ if (!isa<AllocaInst>(I)) continue;
+
+ assert(Offset == 0 && NewElts[0] &&
+ "Direct alloca use should have a zero offset");
+
+ // If we have a use of the alloca, we know the derived uses will be
+ // utilizing just the first element of the scalarized result. Insert a
+ // bitcast of the first alloca before the user as required.
+ AllocaInst *NewAI = NewElts[0];
+ BitCastInst *BCI = new BitCastInst(NewAI, AI->getType(), "", NewAI);
+ NewAI->moveBefore(BCI);
+ TheUse = BCI;
+ continue;
}
}
}
@@ -1305,7 +1810,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// function is only called for mem intrinsics that access the whole
// aggregate, so non-zero GEPs are not an issue here.)
OtherPtr = OtherPtr->stripPointerCasts();
-
+
// Copying the alloca to itself is a no-op: just delete it.
if (OtherPtr == AI || OtherPtr == NewElts[0]) {
// This code will run twice for a no-op memcpy -- once for each operand.
@@ -1316,28 +1821,26 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
DeadInsts.push_back(MI);
return;
}
-
+
// If the pointer is not the right type, insert a bitcast to the right
// type.
const Type *NewTy =
PointerType::get(AI->getType()->getElementType(), AddrSpace);
-
+
if (OtherPtr->getType() != NewTy)
OtherPtr = new BitCastInst(OtherPtr, NewTy, OtherPtr->getName(), MI);
}
-
+
// Process each element of the aggregate.
- Value *TheFn = MI->getCalledValue();
- const Type *BytePtrTy = MI->getRawDest()->getType();
bool SROADest = MI->getRawDest() == Inst;
-
+
Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext()));
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// If this is a memcpy/memmove, emit a GEP of the other element address.
Value *OtherElt = 0;
unsigned OtherEltAlign = MemAlignment;
-
+
if (OtherPtr) {
Value *Idx[2] = { Zero,
ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) };
@@ -1353,7 +1856,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
const Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
EltOffset = TD->getTypeAllocSize(EltTy)*i;
}
-
+
// The alignment of the other pointer is the guaranteed alignment of the
// element, which is affected by both the known alignment of the whole
// mem intrinsic and the alignment of the element. If the alignment of
@@ -1361,10 +1864,10 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// known alignment is just 4 bytes.
OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
}
-
+
Value *EltPtr = NewElts[i];
const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
-
+
// If we got down to a scalar, insert a load or store as appropriate.
if (EltTy->isSingleValueType()) {
if (isa<MemTransferInst>(MI)) {
@@ -1380,7 +1883,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
continue;
}
assert(isa<MemSetInst>(MI));
-
+
// If the stored element is zero (common case), just store a null
// constant.
Constant *StoreVal;
@@ -1400,7 +1903,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
TotalVal = TotalVal.shl(8);
TotalVal |= OneVal;
}
-
+
// Convert the integer value to the appropriate type.
StoreVal = ConstantInt::get(CI->getContext(), TotalVal);
if (ValTy->isPointerTy())
@@ -1408,12 +1911,12 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
else if (ValTy->isFloatingPointTy())
StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
assert(StoreVal->getType() == ValTy && "Type mismatch!");
-
+
// If the requested value was a vector constant, create it.
if (EltTy != ValTy) {
unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
- StoreVal = ConstantVector::get(&Elts[0], NumElts);
+ StoreVal = ConstantVector::get(Elts);
}
}
new StoreInst(StoreVal, EltPtr, MI);
@@ -1422,55 +1925,24 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// Otherwise, if we're storing a byte variable, use a memset call for
// this element.
}
-
- // Cast the element pointer to BytePtrTy.
- if (EltPtr->getType() != BytePtrTy)
- EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getName(), MI);
-
- // Cast the other pointer (if we have one) to BytePtrTy.
- if (OtherElt && OtherElt->getType() != BytePtrTy) {
- // Preserve address space of OtherElt
- const PointerType* OtherPTy = cast<PointerType>(OtherElt->getType());
- const PointerType* PTy = cast<PointerType>(BytePtrTy);
- if (OtherPTy->getElementType() != PTy->getElementType()) {
- Type *NewOtherPTy = PointerType::get(PTy->getElementType(),
- OtherPTy->getAddressSpace());
- OtherElt = new BitCastInst(OtherElt, NewOtherPTy,
- OtherElt->getNameStr(), MI);
- }
- }
-
+
unsigned EltSize = TD->getTypeAllocSize(EltTy);
-
+
+ IRBuilder<> Builder(MI);
+
// Finally, insert the meminst for this element.
- if (isa<MemTransferInst>(MI)) {
- Value *Ops[] = {
- SROADest ? EltPtr : OtherElt, // Dest ptr
- SROADest ? OtherElt : EltPtr, // Src ptr
- ConstantInt::get(MI->getArgOperand(2)->getType(), EltSize), // Size
- // Align
- ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign),
- MI->getVolatileCst()
- };
- // In case we fold the address space overloaded memcpy of A to B
- // with memcpy of B to C, change the function to be a memcpy of A to C.
- const Type *Tys[] = { Ops[0]->getType(), Ops[1]->getType(),
- Ops[2]->getType() };
- Module *M = MI->getParent()->getParent()->getParent();
- TheFn = Intrinsic::getDeclaration(M, MI->getIntrinsicID(), Tys, 3);
- CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
+ if (isa<MemSetInst>(MI)) {
+ Builder.CreateMemSet(EltPtr, MI->getArgOperand(1), EltSize,
+ MI->isVolatile());
} else {
- assert(isa<MemSetInst>(MI));
- Value *Ops[] = {
- EltPtr, MI->getArgOperand(1), // Dest, Value,
- ConstantInt::get(MI->getArgOperand(2)->getType(), EltSize), // Size
- Zero, // Align
- ConstantInt::get(Type::getInt1Ty(MI->getContext()), 0) // isVolatile
- };
- const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
- Module *M = MI->getParent()->getParent()->getParent();
- TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
- CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
+ assert(isa<MemTransferInst>(MI));
+ Value *Dst = SROADest ? EltPtr : OtherElt; // Dest ptr
+ Value *Src = SROADest ? OtherElt : EltPtr; // Src ptr
+
+ if (isa<MemCpyInst>(MI))
+ Builder.CreateMemCpy(Dst, Src, EltSize, OtherEltAlign,MI->isVolatile());
+ else
+ Builder.CreateMemMove(Dst, Src, EltSize,OtherEltAlign,MI->isVolatile());
}
}
DeadInsts.push_back(MI);
@@ -1486,12 +1958,13 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
Value *SrcVal = SI->getOperand(0);
const Type *AllocaEltTy = AI->getAllocatedType();
uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
+
+ IRBuilder<> Builder(SI);
// Handle tail padding by extending the operand
if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
- SrcVal = new ZExtInst(SrcVal,
- IntegerType::get(SI->getContext(), AllocaSizeBits),
- "", SI);
+ SrcVal = Builder.CreateZExt(SrcVal,
+ IntegerType::get(SI->getContext(), AllocaSizeBits));
DEBUG(dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI
<< '\n');
@@ -1500,47 +1973,44 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
// have different ways to compute the element offset.
if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
const StructLayout *Layout = TD->getStructLayout(EltSTy);
-
+
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// Get the number of bits to shift SrcVal to get the value.
const Type *FieldTy = EltSTy->getElementType(i);
uint64_t Shift = Layout->getElementOffsetInBits(i);
-
+
if (TD->isBigEndian())
Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
-
+
Value *EltVal = SrcVal;
if (Shift) {
Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
- EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
- "sroa.store.elt", SI);
+ EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt");
}
-
+
// Truncate down to an integer of the right size.
uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
-
+
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
-
+
if (FieldSizeBits != AllocaSizeBits)
- EltVal = new TruncInst(EltVal,
- IntegerType::get(SI->getContext(), FieldSizeBits),
- "", SI);
+ EltVal = Builder.CreateTrunc(EltVal,
+ IntegerType::get(SI->getContext(), FieldSizeBits));
Value *DestField = NewElts[i];
if (EltVal->getType() == FieldTy) {
// Storing to an integer field of this size, just do it.
} else if (FieldTy->isFloatingPointTy() || FieldTy->isVectorTy()) {
// Bitcast to the right element type (for fp/vector values).
- EltVal = new BitCastInst(EltVal, FieldTy, "", SI);
+ EltVal = Builder.CreateBitCast(EltVal, FieldTy);
} else {
// Otherwise, bitcast the dest pointer (for aggregates).
- DestField = new BitCastInst(DestField,
- PointerType::getUnqual(EltVal->getType()),
- "", SI);
+ DestField = Builder.CreateBitCast(DestField,
+ PointerType::getUnqual(EltVal->getType()));
}
new StoreInst(EltVal, DestField, SI);
}
-
+
} else {
const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
const Type *ArrayEltTy = ATy->getElementType();
@@ -1548,50 +2018,48 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
uint64_t Shift;
-
+
if (TD->isBigEndian())
Shift = AllocaSizeBits-ElementOffset;
- else
+ else
Shift = 0;
-
+
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// Ignore zero sized fields like {}, they obviously contain no data.
if (ElementSizeBits == 0) continue;
-
+
Value *EltVal = SrcVal;
if (Shift) {
Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
- EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
- "sroa.store.elt", SI);
+ EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt");
}
-
+
// Truncate down to an integer of the right size.
if (ElementSizeBits != AllocaSizeBits)
- EltVal = new TruncInst(EltVal,
- IntegerType::get(SI->getContext(),
- ElementSizeBits),"",SI);
+ EltVal = Builder.CreateTrunc(EltVal,
+ IntegerType::get(SI->getContext(),
+ ElementSizeBits));
Value *DestField = NewElts[i];
if (EltVal->getType() == ArrayEltTy) {
// Storing to an integer field of this size, just do it.
} else if (ArrayEltTy->isFloatingPointTy() ||
ArrayEltTy->isVectorTy()) {
// Bitcast to the right element type (for fp/vector values).
- EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI);
+ EltVal = Builder.CreateBitCast(EltVal, ArrayEltTy);
} else {
// Otherwise, bitcast the dest pointer (for aggregates).
- DestField = new BitCastInst(DestField,
- PointerType::getUnqual(EltVal->getType()),
- "", SI);
+ DestField = Builder.CreateBitCast(DestField,
+ PointerType::getUnqual(EltVal->getType()));
}
new StoreInst(EltVal, DestField, SI);
-
+
if (TD->isBigEndian())
Shift -= ElementOffset;
- else
+ else
Shift += ElementOffset;
}
}
-
+
DeadInsts.push_back(SI);
}
@@ -1603,10 +2071,10 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
// and form the result value.
const Type *AllocaEltTy = AI->getAllocatedType();
uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
-
+
DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
<< '\n');
-
+
// There are two forms here: AI could be an array or struct. Both cases
// have different ways to compute the element offset.
const StructLayout *Layout = 0;
@@ -1616,11 +2084,11 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
} else {
const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
- }
-
- Value *ResultVal =
+ }
+
+ Value *ResultVal =
Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits));
-
+
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// Load the value from the alloca. If the NewElt is an aggregate, cast
// the pointer to an integer of the same size before doing the load.
@@ -1628,11 +2096,11 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
const Type *FieldTy =
cast<PointerType>(SrcField->getType())->getElementType();
uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
-
+
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
-
- const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
+
+ const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
FieldSizeBits);
if (!FieldTy->isIntegerTy() && !FieldTy->isFloatingPointTy() &&
!FieldTy->isVectorTy())
@@ -1650,17 +2118,17 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
// we can shift and insert it.
if (SrcField->getType() != ResultVal->getType())
SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI);
-
+
// Determine the number of bits to shift SrcField.
uint64_t Shift;
if (Layout) // Struct case.
Shift = Layout->getElementOffsetInBits(i);
else // Array case.
Shift = i*ArrayEltBitOffset;
-
+
if (TD->isBigEndian())
Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
-
+
if (Shift) {
Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift);
SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
@@ -1683,46 +2151,39 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
}
/// HasPadding - Return true if the specified type has any structure or
-/// alignment padding, false otherwise.
+/// alignment padding in between the elements that would be split apart
+/// by SROA; return false otherwise.
static bool HasPadding(const Type *Ty, const TargetData &TD) {
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty))
- return HasPadding(ATy->getElementType(), TD);
-
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
- return HasPadding(VTy->getElementType(), TD);
-
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = TD.getStructLayout(STy);
- unsigned PrevFieldBitOffset = 0;
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
-
- // Padding in sub-elements?
- if (HasPadding(STy->getElementType(i), TD))
- return true;
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ Ty = ATy->getElementType();
+ return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
+ }
- // Check to see if there is any padding between this element and the
- // previous one.
- if (i) {
- unsigned PrevFieldEnd =
+ // SROA currently handles only Arrays and Structs.
+ const StructType *STy = cast<StructType>(Ty);
+ const StructLayout *SL = TD.getStructLayout(STy);
+ unsigned PrevFieldBitOffset = 0;
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
+
+ // Check to see if there is any padding between this element and the
+ // previous one.
+ if (i) {
+ unsigned PrevFieldEnd =
PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
- if (PrevFieldEnd < FieldBitOffset)
- return true;
- }
-
- PrevFieldBitOffset = FieldBitOffset;
- }
-
- // Check for tail padding.
- if (unsigned EltCount = STy->getNumElements()) {
- unsigned PrevFieldEnd = PrevFieldBitOffset +
- TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
- if (PrevFieldEnd < SL->getSizeInBits())
+ if (PrevFieldEnd < FieldBitOffset)
return true;
}
+ PrevFieldBitOffset = FieldBitOffset;
}
-
- return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
+ // Check for tail padding.
+ if (unsigned EltCount = STy->getNumElements()) {
+ unsigned PrevFieldEnd = PrevFieldBitOffset +
+ TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
+ if (PrevFieldEnd < SL->getSizeInBits())
+ return true;
+ }
+ return false;
}
/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
@@ -1731,14 +2192,14 @@ static bool HasPadding(const Type *Ty, const TargetData &TD) {
bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
// Loop over the use list of the alloca. We can only transform it if all of
// the users are safe to transform.
- AllocaInfo Info;
-
- isSafeForScalarRepl(AI, AI, 0, Info);
+ AllocaInfo Info(AI);
+
+ isSafeForScalarRepl(AI, 0, Info);
if (Info.isUnsafe) {
DEBUG(dbgs() << "Cannot transform: " << *AI << '\n');
return false;
}
-
+
// Okay, we know all the users are promotable. If the aggregate is a memcpy
// source and destination, we have to be careful. In particular, the memcpy
// could be moving around elements that live in structure padding of the LLVM
@@ -1748,6 +2209,20 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
HasPadding(AI->getAllocatedType(), *TD))
return false;
+ // If the alloca never has an access to just *part* of it, but is accessed
+ // via loads and stores, then we should use ConvertToScalarInfo to promote
+ // the alloca instead of promoting each piece at a time and inserting fission
+ // and fusion code.
+ if (!Info.hasSubelementAccess && Info.hasALoadOrStore) {
+ // If the struct/array just has one element, use basic SRoA.
+ if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
+ if (ST->getNumElements() > 1) return false;
+ } else {
+ if (cast<ArrayType>(AI->getAllocatedType())->getNumElements() > 1)
+ return false;
+ }
+ }
+
return true;
}
@@ -1760,7 +2235,7 @@ static bool PointsToConstantGlobal(Value *V) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return GV->isConstant();
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- if (CE->getOpcode() == Instruction::BitCast ||
+ if (CE->getOpcode() == Instruction::BitCast ||
CE->getOpcode() == Instruction::GetElementPtr)
return PointsToConstantGlobal(CE->getOperand(0));
return false;
@@ -1771,18 +2246,19 @@ static bool PointsToConstantGlobal(Value *V) {
/// see any stores or other unknown uses. If we see pointer arithmetic, keep
/// track of whether it moves the pointer (with isOffset) but otherwise traverse
/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
-/// the alloca, and if the source pointer is a pointer to a constant global, we
+/// the alloca, and if the source pointer is a pointer to a constant global, we
/// can optimize this.
static bool isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
bool isOffset) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
User *U = cast<Instruction>(*UI);
- if (LoadInst *LI = dyn_cast<LoadInst>(U))
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
// Ignore non-volatile loads, they are always ok.
- if (!LI->isVolatile())
- continue;
-
+ if (LI->isVolatile()) return false;
+ continue;
+ }
+
if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
// If uses of the bitcast are ok, we are ok.
if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
@@ -1797,27 +2273,52 @@ static bool isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
return false;
continue;
}
-
+
+ if (CallSite CS = U) {
+ // If this is a readonly/readnone call site, then we know it is just a
+ // load and we can ignore it.
+ if (CS.onlyReadsMemory())
+ continue;
+
+ // If this is the function being called then we treat it like a load and
+ // ignore it.
+ if (CS.isCallee(UI))
+ continue;
+
+ // If this is being passed as a byval argument, the caller is making a
+ // copy, so it is only a read of the alloca.
+ unsigned ArgNo = CS.getArgumentNo(UI);
+ if (CS.paramHasAttr(ArgNo+1, Attribute::ByVal))
+ continue;
+ }
+
// If this is isn't our memcpy/memmove, reject it as something we can't
// handle.
MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
if (MI == 0)
return false;
+ // If the transfer is using the alloca as a source of the transfer, then
+ // ignore it since it is a load (unless the transfer is volatile).
+ if (UI.getOperandNo() == 1) {
+ if (MI->isVolatile()) return false;
+ continue;
+ }
+
// If we already have seen a copy, reject the second one.
if (TheCopy) return false;
-
+
// If the pointer has been offset from the start of the alloca, we can't
// safely handle this.
if (isOffset) return false;
// If the memintrinsic isn't using the alloca as the dest, reject it.
if (UI.getOperandNo() != 0) return false;
-
+
// If the source of the memcpy/move is not a constant global, reject it.
if (!PointsToConstantGlobal(MI->getSource()))
return false;
-
+
// Otherwise, the transform is safe. Remember the copy instruction.
TheCopy = MI;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 360749c..ce5dd73 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -42,7 +42,9 @@ STATISTIC(NumSimpl, "Number of blocks simplified");
namespace {
struct CFGSimplifyPass : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- CFGSimplifyPass() : FunctionPass(ID) {}
+ CFGSimplifyPass() : FunctionPass(ID) {
+ initializeCFGSimplifyPassPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
};
@@ -50,7 +52,7 @@ namespace {
char CFGSimplifyPass::ID = 0;
INITIALIZE_PASS(CFGSimplifyPass, "simplifycfg",
- "Simplify the CFG", false, false);
+ "Simplify the CFG", false, false)
// Public interface to the CFGSimplification pass
FunctionPass *llvm::createCFGSimplificationPass() {
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp
index 3ec70ec..70ff32e 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp
@@ -32,7 +32,9 @@ namespace {
const TargetData *TD;
public:
static char ID; // Pass identification
- SimplifyHalfPowrLibCalls() : FunctionPass(ID) {}
+ SimplifyHalfPowrLibCalls() : FunctionPass(ID) {
+ initializeSimplifyHalfPowrLibCallsPass(*PassRegistry::getPassRegistry());
+ }
bool runOnFunction(Function &F);
@@ -47,7 +49,7 @@ namespace {
} // end anonymous namespace.
INITIALIZE_PASS(SimplifyHalfPowrLibCalls, "simplify-libcalls-halfpowr",
- "Simplify half_powr library calls", false, false);
+ "Simplify half_powr library calls", false, false)
// Public interface to the Simplify HalfPowr LibCalls pass.
FunctionPass *llvm::createSimplifyHalfPowrLibCallsPass() {
@@ -95,7 +97,8 @@ InlineHalfPowrs(const std::vector<Instruction *> &HalfPowrs,
InlineFunctionInfo IFI(0, TD);
bool B = InlineFunction(Call, IFI);
- assert(B && "half_powr didn't inline?"); B=B;
+ assert(B && "half_powr didn't inline?");
+ (void)B;
BasicBlock *NewBody = NewBlock->getSinglePredecessor();
assert(NewBody);
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index d7ce53f..ec45b71 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -123,7 +123,7 @@ struct StrCatOpt : public LibCallOptimization {
// Verify the "strcat" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
- FT->getReturnType() != Type::getInt8PtrTy(*Context) ||
+ FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
FT->getParamType(1) != FT->getReturnType())
return 0;
@@ -160,9 +160,8 @@ struct StrCatOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
- EmitMemCpy(CpyDst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len+1),
- 1, false, B, TD);
+ B.CreateMemCpy(CpyDst, Src,
+ ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);
}
};
@@ -174,7 +173,7 @@ struct StrNCatOpt : public StrCatOpt {
// Verify the "strncat" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
- FT->getReturnType() != Type::getInt8PtrTy(*Context) ||
+ FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
FT->getParamType(1) != FT->getReturnType() ||
!FT->getParamType(2)->isIntegerTy())
@@ -222,8 +221,9 @@ struct StrChrOpt : public LibCallOptimization {
// Verify the "strchr" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
- FT->getReturnType() != Type::getInt8PtrTy(*Context) ||
- FT->getParamType(0) != FT->getReturnType())
+ FT->getReturnType() != B.getInt8PtrTy() ||
+ FT->getParamType(0) != FT->getReturnType() ||
+ !FT->getParamType(1)->isIntegerTy(32))
return 0;
Value *SrcStr = CI->getArgOperand(0);
@@ -252,22 +252,55 @@ struct StrChrOpt : public LibCallOptimization {
// strchr can find the nul character.
Str += '\0';
- char CharValue = CharC->getSExtValue();
// Compute the offset.
- uint64_t i = 0;
- while (1) {
- if (i == Str.size()) // Didn't find the char. strchr returns null.
- return Constant::getNullValue(CI->getType());
- // Did we find our match?
- if (Str[i] == CharValue)
- break;
- ++i;
- }
+ size_t I = Str.find(CharC->getSExtValue());
+ if (I == std::string::npos) // Didn't find the char. strchr returns null.
+ return Constant::getNullValue(CI->getType());
// strchr(s+n,c) -> gep(s+n+i,c)
- Value *Idx = ConstantInt::get(Type::getInt64Ty(*Context), i);
- return B.CreateGEP(SrcStr, Idx, "strchr");
+ return B.CreateGEP(SrcStr, B.getInt64(I), "strchr");
+ }
+};
+
+//===---------------------------------------===//
+// 'strrchr' Optimizations
+
+struct StrRChrOpt : public LibCallOptimization {
+ virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Verify the "strrchr" function prototype.
+ const FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getReturnType() != B.getInt8PtrTy() ||
+ FT->getParamType(0) != FT->getReturnType() ||
+ !FT->getParamType(1)->isIntegerTy(32))
+ return 0;
+
+ Value *SrcStr = CI->getArgOperand(0);
+ ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+
+ // Cannot fold anything if we're not looking for a constant.
+ if (!CharC)
+ return 0;
+
+ std::string Str;
+ if (!GetConstantStringInfo(SrcStr, Str)) {
+ // strrchr(s, 0) -> strchr(s, 0)
+ if (TD && CharC->isZero())
+ return EmitStrChr(SrcStr, '\0', B, TD);
+ return 0;
+ }
+
+ // strrchr can find the nul character.
+ Str += '\0';
+
+ // Compute the offset.
+ size_t I = Str.rfind(CharC->getSExtValue());
+ if (I == std::string::npos) // Didn't find the char. Return null.
+ return Constant::getNullValue(CI->getType());
+
+ // strrchr(s+n,c) -> gep(s+n+i,c)
+ return B.CreateGEP(SrcStr, B.getInt64(I), "strrchr");
}
};
@@ -281,7 +314,7 @@ struct StrCmpOpt : public LibCallOptimization {
if (FT->getNumParams() != 2 ||
!FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != Type::getInt8PtrTy(*Context))
+ FT->getParamType(0) != B.getInt8PtrTy())
return 0;
Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
@@ -329,7 +362,7 @@ struct StrNCmpOpt : public LibCallOptimization {
if (FT->getNumParams() != 3 ||
!FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != Type::getInt8PtrTy(*Context) ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getParamType(2)->isIntegerTy())
return 0;
@@ -384,7 +417,7 @@ struct StrCpyOpt : public LibCallOptimization {
if (FT->getNumParams() != NumParams ||
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != Type::getInt8PtrTy(*Context))
+ FT->getParamType(0) != B.getInt8PtrTy())
return 0;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
@@ -405,9 +438,8 @@ struct StrCpyOpt : public LibCallOptimization {
ConstantInt::get(TD->getIntPtrType(*Context), Len),
CI->getArgOperand(2), B, TD);
else
- EmitMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len),
- 1, false, B, TD);
+ B.CreateMemCpy(Dst, Src,
+ ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
return Dst;
}
};
@@ -420,7 +452,7 @@ struct StrNCpyOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != Type::getInt8PtrTy(*Context) ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getParamType(2)->isIntegerTy())
return 0;
@@ -435,8 +467,7 @@ struct StrNCpyOpt : public LibCallOptimization {
if (SrcLen == 0) {
// strncpy(x, "", y) -> memset(x, '\0', y, 1)
- EmitMemSet(Dst, ConstantInt::get(Type::getInt8Ty(*Context), '\0'),
- LenOp, false, B, TD);
+ B.CreateMemSet(Dst, B.getInt8('\0'), LenOp, 1);
return Dst;
}
@@ -455,9 +486,8 @@ struct StrNCpyOpt : public LibCallOptimization {
if (Len > SrcLen+1) return 0;
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
- EmitMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len),
- 1, false, B, TD);
+ B.CreateMemCpy(Dst, Src,
+ ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
return Dst;
}
@@ -470,7 +500,7 @@ struct StrLenOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 1 ||
- FT->getParamType(0) != Type::getInt8PtrTy(*Context) ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getReturnType()->isIntegerTy())
return 0;
@@ -488,6 +518,45 @@ struct StrLenOpt : public LibCallOptimization {
}
};
+
+//===---------------------------------------===//
+// 'strpbrk' Optimizations
+
+struct StrPBrkOpt : public LibCallOptimization {
+ virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ const FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
+ FT->getParamType(1) != FT->getParamType(0) ||
+ FT->getReturnType() != FT->getParamType(0))
+ return 0;
+
+ std::string S1, S2;
+ bool HasS1 = GetConstantStringInfo(CI->getArgOperand(0), S1);
+ bool HasS2 = GetConstantStringInfo(CI->getArgOperand(1), S2);
+
+ // strpbrk(s, "") -> NULL
+ // strpbrk("", s) -> NULL
+ if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
+ return Constant::getNullValue(CI->getType());
+
+ // Constant folding.
+ if (HasS1 && HasS2) {
+ size_t I = S1.find_first_of(S2);
+ if (I == std::string::npos) // No match.
+ return Constant::getNullValue(CI->getType());
+
+ return B.CreateGEP(CI->getArgOperand(0), B.getInt64(I), "strpbrk");
+ }
+
+ // strpbrk(s, "a") -> strchr(s, 'a')
+ if (TD && HasS2 && S2.size() == 1)
+ return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD);
+
+ return 0;
+ }
+};
+
//===---------------------------------------===//
// 'strto*' Optimizations. This handles strtol, strtod, strtof, strtoul, etc.
@@ -501,7 +570,8 @@ struct StrToOpt : public LibCallOptimization {
Value *EndPtr = CI->getArgOperand(1);
if (isa<ConstantPointerNull>(EndPtr)) {
- CI->setOnlyReadsMemory();
+ // With a null EndPtr, this function won't capture the main argument.
+ // It would be readonly too, except that it still may write to errno.
CI->addAttribute(1, Attribute::NoCapture);
}
@@ -510,6 +580,67 @@ struct StrToOpt : public LibCallOptimization {
};
//===---------------------------------------===//
+// 'strspn' Optimizations
+
+struct StrSpnOpt : public LibCallOptimization {
+ virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ const FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
+ FT->getParamType(1) != FT->getParamType(0) ||
+ !FT->getReturnType()->isIntegerTy())
+ return 0;
+
+ std::string S1, S2;
+ bool HasS1 = GetConstantStringInfo(CI->getArgOperand(0), S1);
+ bool HasS2 = GetConstantStringInfo(CI->getArgOperand(1), S2);
+
+ // strspn(s, "") -> 0
+ // strspn("", s) -> 0
+ if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
+ return Constant::getNullValue(CI->getType());
+
+ // Constant folding.
+ if (HasS1 && HasS2)
+ return ConstantInt::get(CI->getType(), strspn(S1.c_str(), S2.c_str()));
+
+ return 0;
+ }
+};
+
+//===---------------------------------------===//
+// 'strcspn' Optimizations
+
+struct StrCSpnOpt : public LibCallOptimization {
+ virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ const FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() != 2 ||
+ FT->getParamType(0) != B.getInt8PtrTy() ||
+ FT->getParamType(1) != FT->getParamType(0) ||
+ !FT->getReturnType()->isIntegerTy())
+ return 0;
+
+ std::string S1, S2;
+ bool HasS1 = GetConstantStringInfo(CI->getArgOperand(0), S1);
+ bool HasS2 = GetConstantStringInfo(CI->getArgOperand(1), S2);
+
+ // strcspn("", s) -> 0
+ if (HasS1 && S1.empty())
+ return Constant::getNullValue(CI->getType());
+
+ // Constant folding.
+ if (HasS1 && HasS2)
+ return ConstantInt::get(CI->getType(), strcspn(S1.c_str(), S2.c_str()));
+
+ // strcspn(s, "") -> strlen(s)
+ if (TD && HasS2 && S2.empty())
+ return EmitStrLen(CI->getArgOperand(0), B, TD);
+
+ return 0;
+ }
+};
+
+//===---------------------------------------===//
// 'strstr' Optimizations
struct StrStrOpt : public LibCallOptimization {
@@ -637,8 +768,8 @@ struct MemCpyOpt : public LibCallOptimization {
return 0;
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
- EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1, false, B, TD);
+ B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
};
@@ -659,8 +790,8 @@ struct MemMoveOpt : public LibCallOptimization {
return 0;
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
- EmitMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1, false, B, TD);
+ B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
};
@@ -681,9 +812,8 @@ struct MemSetOpt : public LibCallOptimization {
return 0;
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
- Value *Val = B.CreateIntCast(CI->getArgOperand(1),
- Type::getInt8Ty(*Context), false);
- EmitMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), false, B, TD);
+ Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
+ B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
};
@@ -765,12 +895,10 @@ struct Exp2Opt : public LibCallOptimization {
Value *LdExpArg = 0;
if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
- LdExpArg = B.CreateSExt(OpC->getOperand(0),
- Type::getInt32Ty(*Context), "tmp");
+ LdExpArg = B.CreateSExt(OpC->getOperand(0), B.getInt32Ty(), "tmp");
} else if (UIToFPInst *OpC = dyn_cast<UIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
- LdExpArg = B.CreateZExt(OpC->getOperand(0),
- Type::getInt32Ty(*Context), "tmp");
+ LdExpArg = B.CreateZExt(OpC->getOperand(0), B.getInt32Ty(), "tmp");
}
if (LdExpArg) {
@@ -789,7 +917,7 @@ struct Exp2Opt : public LibCallOptimization {
Module *M = Caller->getParent();
Value *Callee = M->getOrInsertFunction(Name, Op->getType(),
Op->getType(),
- Type::getInt32Ty(*Context),NULL);
+ B.getInt32Ty(), NULL);
CallInst *CI = B.CreateCall2(Callee, One, LdExpArg);
if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
@@ -819,7 +947,7 @@ struct UnaryDoubleFPOpt : public LibCallOptimization {
Value *V = Cast->getOperand(0);
V = EmitUnaryFloatFnCall(V, Callee->getName().data(), B,
Callee->getAttributes());
- return B.CreateFPExt(V, Type::getDoubleTy(*Context));
+ return B.CreateFPExt(V, B.getDoubleTy());
}
};
@@ -846,8 +974,8 @@ struct FFSOpt : public LibCallOptimization {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
if (CI->getValue() == 0) // ffs(0) -> 0.
return Constant::getNullValue(CI->getType());
- return ConstantInt::get(Type::getInt32Ty(*Context), // ffs(c) -> cttz(c)+1
- CI->getValue().countTrailingZeros()+1);
+ // ffs(c) -> cttz(c)+1
+ return B.getInt32(CI->getValue().countTrailingZeros() + 1);
}
// ffs(x) -> x != 0 ? (i32)llvm.cttz(x)+1 : 0
@@ -856,11 +984,10 @@ struct FFSOpt : public LibCallOptimization {
Intrinsic::cttz, &ArgType, 1);
Value *V = B.CreateCall(F, Op, "cttz");
V = B.CreateAdd(V, ConstantInt::get(V->getType(), 1), "tmp");
- V = B.CreateIntCast(V, Type::getInt32Ty(*Context), false, "tmp");
+ V = B.CreateIntCast(V, B.getInt32Ty(), false, "tmp");
Value *Cond = B.CreateICmpNE(Op, Constant::getNullValue(ArgType), "tmp");
- return B.CreateSelect(Cond, V,
- ConstantInt::get(Type::getInt32Ty(*Context), 0));
+ return B.CreateSelect(Cond, V, B.getInt32(0));
}
};
@@ -877,10 +1004,8 @@ struct IsDigitOpt : public LibCallOptimization {
// isdigit(c) -> (c-'0') <u 10
Value *Op = CI->getArgOperand(0);
- Op = B.CreateSub(Op, ConstantInt::get(Type::getInt32Ty(*Context), '0'),
- "isdigittmp");
- Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 10),
- "isdigit");
+ Op = B.CreateSub(Op, B.getInt32('0'), "isdigittmp");
+ Op = B.CreateICmpULT(Op, B.getInt32(10), "isdigit");
return B.CreateZExt(Op, CI->getType());
}
};
@@ -898,8 +1023,7 @@ struct IsAsciiOpt : public LibCallOptimization {
// isascii(c) -> c <u 128
Value *Op = CI->getArgOperand(0);
- Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 128),
- "isascii");
+ Op = B.CreateICmpULT(Op, B.getInt32(128), "isascii");
return B.CreateZExt(Op, CI->getType());
}
};
@@ -917,8 +1041,7 @@ struct AbsOpt : public LibCallOptimization {
// abs(x) -> x >s -1 ? x : -x
Value *Op = CI->getArgOperand(0);
- Value *Pos = B.CreateICmpSGT(Op,
- Constant::getAllOnesValue(Op->getType()),
+ Value *Pos = B.CreateICmpSGT(Op, Constant::getAllOnesValue(Op->getType()),
"ispos");
Value *Neg = B.CreateNeg(Op, "neg");
return B.CreateSelect(Pos, Op, Neg);
@@ -969,11 +1092,15 @@ struct PrintFOpt : public LibCallOptimization {
return CI->use_empty() ? (Value*)CI :
ConstantInt::get(CI->getType(), 0);
- // printf("x") -> putchar('x'), even for '%'. Return the result of putchar
- // in case there is an error writing to stdout.
+ // Do not do any of the following transformations if the printf return value
+ // is used, in general the printf return value is not compatible with either
+ // putchar() or puts().
+ if (!CI->use_empty())
+ return 0;
+
+ // printf("x") -> putchar('x'), even for '%'.
if (FormatStr.size() == 1) {
- Value *Res = EmitPutChar(ConstantInt::get(Type::getInt32Ty(*Context),
- FormatStr[0]), B, TD);
+ Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, TD);
if (CI->use_empty()) return CI;
return B.CreateIntCast(Res, CI->getType(), true);
}
@@ -1004,8 +1131,7 @@ struct PrintFOpt : public LibCallOptimization {
// printf("%s\n", str) --> puts(str)
if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
- CI->getArgOperand(1)->getType()->isPointerTy() &&
- CI->use_empty()) {
+ CI->getArgOperand(1)->getType()->isPointerTy()) {
EmitPutS(CI->getArgOperand(1), B, TD);
return CI;
}
@@ -1042,9 +1168,9 @@ struct SPrintFOpt : public LibCallOptimization {
if (!TD) return 0;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
- EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1), // Copy the
- ConstantInt::get(TD->getIntPtrType(*Context), // nul byte.
- FormatStr.size() + 1), 1, false, B, TD);
+ B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ ConstantInt::get(TD->getIntPtrType(*Context), // Copy the
+ FormatStr.size() + 1), 1); // nul byte.
return ConstantInt::get(CI->getType(), FormatStr.size());
}
@@ -1058,13 +1184,11 @@ struct SPrintFOpt : public LibCallOptimization {
if (FormatStr[1] == 'c') {
// sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
- Value *V = B.CreateTrunc(CI->getArgOperand(2),
- Type::getInt8Ty(*Context), "char");
+ Value *V = B.CreateTrunc(CI->getArgOperand(2), B.getInt8Ty(), "char");
Value *Ptr = CastToCStr(CI->getArgOperand(0), B);
B.CreateStore(V, Ptr);
- Ptr = B.CreateGEP(Ptr, ConstantInt::get(Type::getInt32Ty(*Context), 1),
- "nul");
- B.CreateStore(Constant::getNullValue(Type::getInt8Ty(*Context)), Ptr);
+ Ptr = B.CreateGEP(Ptr, B.getInt32(1), "nul");
+ B.CreateStore(B.getInt8(0), Ptr);
return ConstantInt::get(CI->getType(), 1);
}
@@ -1080,8 +1204,7 @@ struct SPrintFOpt : public LibCallOptimization {
Value *IncLen = B.CreateAdd(Len,
ConstantInt::get(Len->getType(), 1),
"leninc");
- EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(2),
- IncLen, 1, false, B, TD);
+ B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(2), IncLen, 1);
// The sprintf result is the unincremented number of bytes in the string.
return B.CreateIntCast(Len, CI->getType(), false);
@@ -1208,6 +1331,34 @@ struct FPrintFOpt : public LibCallOptimization {
}
};
+//===---------------------------------------===//
+// 'puts' Optimizations
+
+struct PutsOpt : public LibCallOptimization {
+ virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ // Require one fixed pointer argument and an integer/void result.
+ const FunctionType *FT = Callee->getFunctionType();
+ if (FT->getNumParams() < 1 || !FT->getParamType(0)->isPointerTy() ||
+ !(FT->getReturnType()->isIntegerTy() ||
+ FT->getReturnType()->isVoidTy()))
+ return 0;
+
+ // Check for a constant string.
+ std::string Str;
+ if (!GetConstantStringInfo(CI->getArgOperand(0), Str))
+ return 0;
+
+ if (Str.empty() && CI->use_empty()) {
+ // puts("") -> putchar('\n')
+ Value *Res = EmitPutChar(B.getInt32('\n'), B, TD);
+ if (CI->use_empty()) return CI;
+ return B.CreateIntCast(Res, CI->getType(), true);
+ }
+
+ return 0;
+ }
+};
+
} // end anonymous namespace.
//===----------------------------------------------------------------------===//
@@ -1220,10 +1371,10 @@ namespace {
class SimplifyLibCalls : public FunctionPass {
StringMap<LibCallOptimization*> Optimizations;
// String and Memory LibCall Optimizations
- StrCatOpt StrCat; StrNCatOpt StrNCat; StrChrOpt StrChr; StrCmpOpt StrCmp;
- StrNCmpOpt StrNCmp; StrCpyOpt StrCpy; StrCpyOpt StrCpyChk;
- StrNCpyOpt StrNCpy; StrLenOpt StrLen;
- StrToOpt StrTo; StrStrOpt StrStr;
+ StrCatOpt StrCat; StrNCatOpt StrNCat; StrChrOpt StrChr; StrRChrOpt StrRChr;
+ StrCmpOpt StrCmp; StrNCmpOpt StrNCmp; StrCpyOpt StrCpy; StrCpyOpt StrCpyChk;
+ StrNCpyOpt StrNCpy; StrLenOpt StrLen; StrPBrkOpt StrPBrk;
+ StrToOpt StrTo; StrSpnOpt StrSpn; StrCSpnOpt StrCSpn; StrStrOpt StrStr;
MemCmpOpt MemCmp; MemCpyOpt MemCpy; MemMoveOpt MemMove; MemSetOpt MemSet;
// Math Library Optimizations
PowOpt Pow; Exp2Opt Exp2; UnaryDoubleFPOpt UnaryDoubleFP;
@@ -1233,11 +1384,14 @@ namespace {
// Formatting and IO Optimizations
SPrintFOpt SPrintF; PrintFOpt PrintF;
FWriteOpt FWrite; FPutsOpt FPuts; FPrintFOpt FPrintF;
+ PutsOpt Puts;
bool Modified; // This is only used by doInitialization.
public:
static char ID; // Pass identification
- SimplifyLibCalls() : FunctionPass(ID), StrCpy(false), StrCpyChk(true) {}
+ SimplifyLibCalls() : FunctionPass(ID), StrCpy(false), StrCpyChk(true) {
+ initializeSimplifyLibCallsPass(*PassRegistry::getPassRegistry());
+ }
void InitOptimizations();
bool runOnFunction(Function &F);
@@ -1255,7 +1409,7 @@ namespace {
} // end anonymous namespace.
INITIALIZE_PASS(SimplifyLibCalls, "simplify-libcalls",
- "Simplify well-known library calls", false, false);
+ "Simplify well-known library calls", false, false)
// Public interface to the Simplify LibCalls pass.
FunctionPass *llvm::createSimplifyLibCallsPass() {
@@ -1269,11 +1423,13 @@ void SimplifyLibCalls::InitOptimizations() {
Optimizations["strcat"] = &StrCat;
Optimizations["strncat"] = &StrNCat;
Optimizations["strchr"] = &StrChr;
+ Optimizations["strrchr"] = &StrRChr;
Optimizations["strcmp"] = &StrCmp;
Optimizations["strncmp"] = &StrNCmp;
Optimizations["strcpy"] = &StrCpy;
Optimizations["strncpy"] = &StrNCpy;
Optimizations["strlen"] = &StrLen;
+ Optimizations["strpbrk"] = &StrPBrk;
Optimizations["strtol"] = &StrTo;
Optimizations["strtod"] = &StrTo;
Optimizations["strtof"] = &StrTo;
@@ -1281,6 +1437,8 @@ void SimplifyLibCalls::InitOptimizations() {
Optimizations["strtoll"] = &StrTo;
Optimizations["strtold"] = &StrTo;
Optimizations["strtoull"] = &StrTo;
+ Optimizations["strspn"] = &StrSpn;
+ Optimizations["strcspn"] = &StrCSpn;
Optimizations["strstr"] = &StrStr;
Optimizations["memcmp"] = &MemCmp;
Optimizations["memcpy"] = &MemCpy;
@@ -1341,6 +1499,7 @@ void SimplifyLibCalls::InitOptimizations() {
Optimizations["fwrite"] = &FWrite;
Optimizations["fputs"] = &FPuts;
Optimizations["fprintf"] = &FPrintF;
+ Optimizations["puts"] = &Puts;
}
@@ -2155,9 +2314,6 @@ bool SimplifyLibCalls::doInitialization(Module &M) {
// * pow(sqrt(x),y) -> pow(x,y*0.5)
// * pow(pow(x,y),z)-> pow(x,y*z)
//
-// puts:
-// * puts("") -> putchar('\n')
-//
// round, roundf, roundl:
// * round(cnst) -> cnst'
//
@@ -2173,24 +2329,6 @@ bool SimplifyLibCalls::doInitialization(Module &M) {
// stpcpy:
// * stpcpy(str, "literal") ->
// llvm.memcpy(str,"literal",strlen("literal")+1,1)
-// strrchr:
-// * strrchr(s,c) -> reverse_offset_of_in(c,s)
-// (if c is a constant integer and s is a constant string)
-// * strrchr(s1,0) -> strchr(s1,0)
-//
-// strpbrk:
-// * strpbrk(s,a) -> offset_in_for(s,a)
-// (if s and a are both constant strings)
-// * strpbrk(s,"") -> 0
-// * strpbrk(s,a) -> strchr(s,a[0]) (if a is constant string of length 1)
-//
-// strspn, strcspn:
-// * strspn(s,a) -> const_int (if both args are constant)
-// * strspn("",a) -> 0
-// * strspn(s,"") -> 0
-// * strcspn(s,a) -> const_int (if both args are constant)
-// * strcspn("",a) -> 0
-// * strcspn(s,"") -> strlen(a)
//
// tan, tanf, tanl:
// * tan(atan(x)) -> x
diff --git a/contrib/llvm/lib/Transforms/Scalar/Sink.cpp b/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
index 95d3ded..705f442 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -35,7 +35,9 @@ namespace {
public:
static char ID; // Pass identification
- Sinking() : FunctionPass(ID) {}
+ Sinking() : FunctionPass(ID) {
+ initializeSinkingPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
@@ -56,7 +58,11 @@ namespace {
} // end anonymous namespace
char Sinking::ID = 0;
-INITIALIZE_PASS(Sinking, "sink", "Code sinking", false, false);
+INITIALIZE_PASS_BEGIN(Sinking, "sink", "Code sinking", false, false)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(Sinking, "sink", "Code sinking", false, false)
FunctionPass *llvm::createSinkingPass() { return new Sinking(); }
@@ -150,11 +156,10 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
if (L->isVolatile()) return false;
- Value *Ptr = L->getPointerOperand();
- unsigned Size = AA->getTypeStoreSize(L->getType());
+ AliasAnalysis::Location Loc = AA->getLocation(L);
for (SmallPtrSet<Instruction *, 8>::iterator I = Stores.begin(),
E = Stores.end(); I != E; ++I)
- if (AA->getModRefInfo(*I, Ptr, Size) & AliasAnalysis::Mod)
+ if (AA->getModRefInfo(*I, Loc) & AliasAnalysis::Mod)
return false;
}
@@ -163,7 +168,10 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
return false;
}
- return Inst->isSafeToSpeculativelyExecute();
+ if (isa<TerminatorInst>(Inst) || isa<PHINode>(Inst))
+ return false;
+
+ return true;
}
/// SinkInstruction - Determine whether it is safe to sink the specified machine
diff --git a/contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp b/contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp
index 2e437ac..9dd83c0 100644
--- a/contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp
@@ -26,14 +26,14 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Pass.h"
#include "llvm/Type.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Transforms/Utils/Local.h"
#include <map>
using namespace llvm;
@@ -49,7 +49,9 @@ namespace {
bool runOnFunction(Function &F);
public:
static char ID; // Pass identification, replacement for typeid
- TailDup() : FunctionPass(ID) {}
+ TailDup() : FunctionPass(ID) {
+ initializeTailDupPass(*PassRegistry::getPassRegistry());
+ }
private:
inline bool shouldEliminateUnconditionalBranch(TerminatorInst *, unsigned);
@@ -59,7 +61,7 @@ namespace {
}
char TailDup::ID = 0;
-INITIALIZE_PASS(TailDup, "tailduplicate", "Tail Duplication", false, false);
+INITIALIZE_PASS(TailDup, "tailduplicate", "Tail Duplication", false, false)
// Public interface to the Tail Duplication pass
FunctionPass *llvm::createTailDuplicationPass() { return new TailDup(); }
@@ -360,8 +362,8 @@ void TailDup::eliminateUnconditionalBranch(BranchInst *Branch) {
Instruction *Inst = BI++;
if (isInstructionTriviallyDead(Inst))
Inst->eraseFromParent();
- else if (Constant *C = ConstantFoldInstruction(Inst)) {
- Inst->replaceAllUsesWith(C);
+ else if (Value *V = SimplifyInstruction(Inst)) {
+ Inst->replaceAllUsesWith(V);
Inst->eraseFromParent();
}
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 3717254..5b6bc04 100644
--- a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -52,31 +52,52 @@
#define DEBUG_TYPE "tailcallelim"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/CFG.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
using namespace llvm;
STATISTIC(NumEliminated, "Number of tail calls removed");
+STATISTIC(NumRetDuped, "Number of return duplicated");
STATISTIC(NumAccumAdded, "Number of accumulators introduced");
namespace {
struct TailCallElim : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- TailCallElim() : FunctionPass(ID) {}
+ TailCallElim() : FunctionPass(ID) {
+ initializeTailCallElimPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
private:
+ CallInst *FindTRECandidate(Instruction *I,
+ bool CannotTailCallElimCallsMarkedTail);
+ bool EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
+ BasicBlock *&OldEntry,
+ bool &TailCallsAreMarkedTail,
+ SmallVector<PHINode*, 8> &ArgumentPHIs,
+ bool CannotTailCallElimCallsMarkedTail);
+ bool FoldReturnAndProcessPred(BasicBlock *BB,
+ ReturnInst *Ret, BasicBlock *&OldEntry,
+ bool &TailCallsAreMarkedTail,
+ SmallVector<PHINode*, 8> &ArgumentPHIs,
+ bool CannotTailCallElimCallsMarkedTail);
bool ProcessReturningBlock(ReturnInst *RI, BasicBlock *&OldEntry,
bool &TailCallsAreMarkedTail,
SmallVector<PHINode*, 8> &ArgumentPHIs,
@@ -88,7 +109,7 @@ namespace {
char TailCallElim::ID = 0;
INITIALIZE_PASS(TailCallElim, "tailcallelim",
- "Tail Call Elimination", false, false);
+ "Tail Call Elimination", false, false)
// Public interface to the TailCallElimination pass
FunctionPass *llvm::createTailCallEliminationPass() {
@@ -133,7 +154,6 @@ bool TailCallElim::runOnFunction(Function &F) {
bool TailCallsAreMarkedTail = false;
SmallVector<PHINode*, 8> ArgumentPHIs;
bool MadeChange = false;
-
bool FunctionContainsEscapingAllocas = false;
// CannotTCETailMarkedCall - If true, we cannot perform TCE on tail calls
@@ -160,10 +180,17 @@ bool TailCallElim::runOnFunction(Function &F) {
return false;
// Second pass, change any tail calls to loops.
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
- if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator()))
- MadeChange |= ProcessReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail,
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
+ if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) {
+ bool Change = ProcessReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail,
ArgumentPHIs,CannotTCETailMarkedCall);
+ if (!Change && BB->getFirstNonPHIOrDbg() == Ret)
+ Change = FoldReturnAndProcessPred(BB, Ret, OldEntry,
+ TailCallsAreMarkedTail, ArgumentPHIs,
+ CannotTCETailMarkedCall);
+ MadeChange |= Change;
+ }
+ }
// If we eliminated any tail recursions, it's possible that we inserted some
// silly PHI nodes which just merge an initial value (the incoming operand)
@@ -175,7 +202,7 @@ bool TailCallElim::runOnFunction(Function &F) {
PHINode *PN = ArgumentPHIs[i];
// If the PHI Node is a dynamic constant, replace it with the value it is.
- if (Value *PNV = PN->hasConstantValue()) {
+ if (Value *PNV = SimplifyInstruction(PN)) {
PN->replaceAllUsesWith(PNV);
PN->eraseFromParent();
}
@@ -322,41 +349,47 @@ Value *TailCallElim::CanTransformAccumulatorRecursion(Instruction *I,
return getCommonReturnValue(cast<ReturnInst>(I->use_back()), CI);
}
-bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
- bool &TailCallsAreMarkedTail,
- SmallVector<PHINode*, 8> &ArgumentPHIs,
- bool CannotTailCallElimCallsMarkedTail) {
- BasicBlock *BB = Ret->getParent();
+static Instruction *FirstNonDbg(BasicBlock::iterator I) {
+ while (isa<DbgInfoIntrinsic>(I))
+ ++I;
+ return &*I;
+}
+
+CallInst*
+TailCallElim::FindTRECandidate(Instruction *TI,
+ bool CannotTailCallElimCallsMarkedTail) {
+ BasicBlock *BB = TI->getParent();
Function *F = BB->getParent();
- if (&BB->front() == Ret) // Make sure there is something before the ret...
- return false;
+ if (&BB->front() == TI) // Make sure there is something before the terminator.
+ return 0;
// Scan backwards from the return, checking to see if there is a tail call in
// this block. If so, set CI to it.
- CallInst *CI;
- BasicBlock::iterator BBI = Ret;
- while (1) {
+ CallInst *CI = 0;
+ BasicBlock::iterator BBI = TI;
+ while (true) {
CI = dyn_cast<CallInst>(BBI);
if (CI && CI->getCalledFunction() == F)
break;
if (BBI == BB->begin())
- return false; // Didn't find a potential tail call.
+ return 0; // Didn't find a potential tail call.
--BBI;
}
// If this call is marked as a tail call, and if there are dynamic allocas in
// the function, we cannot perform this optimization.
if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail)
- return false;
+ return 0;
// As a special case, detect code like this:
// double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
// and disable this xform in this case, because the code generator will
// lower the call to fabs into inline code.
if (BB == &F->getEntryBlock() &&
- &BB->front() == CI && &*++BB->begin() == Ret &&
+ FirstNonDbg(BB->front()) == CI &&
+ FirstNonDbg(llvm::next(BB->begin())) == TI &&
callIsSmall(F)) {
// A single-block function with just a call and a return. Check that
// the arguments match.
@@ -367,9 +400,17 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
for (; I != E && FI != FE; ++I, ++FI)
if (*I != &*FI) break;
if (I == E && FI == FE)
- return false;
+ return 0;
}
+ return CI;
+}
+
+bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
+ BasicBlock *&OldEntry,
+ bool &TailCallsAreMarkedTail,
+ SmallVector<PHINode*, 8> &ArgumentPHIs,
+ bool CannotTailCallElimCallsMarkedTail) {
// If we are introducing accumulator recursion to eliminate operations after
// the call instruction that are both associative and commutative, the initial
// value for the accumulator is placed in this variable. If this value is set
@@ -387,7 +428,8 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
// tail call if all of the instructions between the call and the return are
// movable to above the call itself, leaving the call next to the return.
// Check that this is the case now.
- for (BBI = CI, ++BBI; &*BBI != Ret; ++BBI) {
+ BasicBlock::iterator BBI = CI;
+ for (++BBI; &*BBI != Ret; ++BBI) {
if (CanMoveAboveCall(BBI, CI)) continue;
// If we can't move the instruction above the call, it might be because it
@@ -424,6 +466,9 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
return false;
}
+ BasicBlock *BB = Ret->getParent();
+ Function *F = BB->getParent();
+
// OK! We can transform this tail call. If this is the first one found,
// create the new entry block, allowing us to branch back to the old entry.
if (OldEntry == 0) {
@@ -533,3 +578,53 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
++NumEliminated;
return true;
}
+
+bool TailCallElim::FoldReturnAndProcessPred(BasicBlock *BB,
+ ReturnInst *Ret, BasicBlock *&OldEntry,
+ bool &TailCallsAreMarkedTail,
+ SmallVector<PHINode*, 8> &ArgumentPHIs,
+ bool CannotTailCallElimCallsMarkedTail) {
+ bool Change = false;
+
+ // If the return block contains nothing but the return and PHI's,
+ // there might be an opportunity to duplicate the return in its
+ // predecessors and perform TRC there. Look for predecessors that end
+ // in unconditional branch and recursive call(s).
+ SmallVector<BranchInst*, 8> UncondBranchPreds;
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *Pred = *PI;
+ TerminatorInst *PTI = Pred->getTerminator();
+ if (BranchInst *BI = dyn_cast<BranchInst>(PTI))
+ if (BI->isUnconditional())
+ UncondBranchPreds.push_back(BI);
+ }
+
+ while (!UncondBranchPreds.empty()) {
+ BranchInst *BI = UncondBranchPreds.pop_back_val();
+ BasicBlock *Pred = BI->getParent();
+ if (CallInst *CI = FindTRECandidate(BI, CannotTailCallElimCallsMarkedTail)){
+ DEBUG(dbgs() << "FOLDING: " << *BB
+ << "INTO UNCOND BRANCH PRED: " << *Pred);
+ EliminateRecursiveTailCall(CI, FoldReturnIntoUncondBranch(Ret, BB, Pred),
+ OldEntry, TailCallsAreMarkedTail, ArgumentPHIs,
+ CannotTailCallElimCallsMarkedTail);
+ ++NumRetDuped;
+ Change = true;
+ }
+ }
+
+ return Change;
+}
+
+bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
+ bool &TailCallsAreMarkedTail,
+ SmallVector<PHINode*, 8> &ArgumentPHIs,
+ bool CannotTailCallElimCallsMarkedTail) {
+ CallInst *CI = FindTRECandidate(Ret, CannotTailCallElimCallsMarkedTail);
+ if (!CI)
+ return false;
+
+ return EliminateRecursiveTailCall(CI, Ret, OldEntry, TailCallsAreMarkedTail,
+ ArgumentPHIs,
+ CannotTailCallElimCallsMarkedTail);
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp b/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
index 4d64c85..be7bed1 100644
--- a/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
@@ -21,6 +21,7 @@
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/CallSite.h"
using namespace llvm;
using namespace llvm::PatternMatch;
@@ -379,27 +380,10 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
/// return false.
static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
const TargetLowering &TLI) {
- std::vector<InlineAsm::ConstraintInfo>
- Constraints = IA->ParseConstraints();
-
- unsigned ArgNo = 0; // The argument of the CallInst.
- for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
- TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
-
- // Compute the value type for each operand.
- switch (OpInfo.Type) {
- case InlineAsm::isOutput:
- if (OpInfo.isIndirect)
- OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
- break;
- case InlineAsm::isInput:
- OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
- break;
- case InlineAsm::isClobber:
- // Nothing to do.
- break;
- }
-
+ TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI));
+ for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
+ TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
+
// Compute the constraint code and ConstraintType to use.
TLI.ComputeConstraintToUse(OpInfo, SDValue());
@@ -584,7 +568,7 @@ IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
MemoryInst, Result);
Matcher.IgnoreProfitability = true;
bool Success = Matcher.MatchAddr(Address, 0);
- Success = Success; assert(Success && "Couldn't select *anything*?");
+ (void)Success; assert(Success && "Couldn't select *anything*?");
// If the match didn't cover I, then it won't be shared by it.
if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
diff --git a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 093083a..acaea19 100644
--- a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -19,8 +19,9 @@
#include "llvm/Constant.h"
#include "llvm/Type.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Scalar.h"
@@ -63,12 +64,27 @@ void llvm::DeleteDeadBlock(BasicBlock *BB) {
/// any single-entry PHI nodes in it, fold them away. This handles the case
/// when all entries to the PHI nodes in a block are guaranteed equal, such as
/// when the block has exactly one predecessor.
-void llvm::FoldSingleEntryPHINodes(BasicBlock *BB) {
+void llvm::FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P) {
+ if (!isa<PHINode>(BB->begin())) return;
+
+ AliasAnalysis *AA = 0;
+ MemoryDependenceAnalysis *MemDep = 0;
+ if (P) {
+ AA = P->getAnalysisIfAvailable<AliasAnalysis>();
+ MemDep = P->getAnalysisIfAvailable<MemoryDependenceAnalysis>();
+ }
+
while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) {
if (PN->getIncomingValue(0) != PN)
PN->replaceAllUsesWith(PN->getIncomingValue(0));
else
PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
+
+ if (MemDep)
+ MemDep->removeInstruction(PN); // Memdep updates AA itself.
+ else if (AA && isa<PointerType>(PN->getType()))
+ AA->deleteValue(PN);
+
PN->eraseFromParent();
}
}
@@ -110,7 +126,7 @@ bool llvm::MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P) {
if (isa<InvokeInst>(PredBB->getTerminator())) return false;
succ_iterator SI(succ_begin(PredBB)), SE(succ_end(PredBB));
- BasicBlock* OnlySucc = BB;
+ BasicBlock *OnlySucc = BB;
for (; SI != SE; ++SI)
if (*SI != OnlySucc) {
OnlySucc = 0; // There are multiple distinct successors!
@@ -131,10 +147,8 @@ bool llvm::MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P) {
}
// Begin by getting rid of unneeded PHIs.
- while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
- PN->replaceAllUsesWith(PN->getIncomingValue(0));
- BB->getInstList().pop_front(); // Delete the phi node...
- }
+ if (isa<PHINode>(BB->front()))
+ FoldSingleEntryPHINodes(BB, P);
// Delete the unconditional branch from the predecessor...
PredBB->getInstList().pop_back();
@@ -152,24 +166,27 @@ bool llvm::MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P) {
// Finally, erase the old block and update dominator info.
if (P) {
- if (DominatorTree* DT = P->getAnalysisIfAvailable<DominatorTree>()) {
- DomTreeNode* DTN = DT->getNode(BB);
- DomTreeNode* PredDTN = DT->getNode(PredBB);
-
- if (DTN) {
- SmallPtrSet<DomTreeNode*, 8> Children(DTN->begin(), DTN->end());
- for (SmallPtrSet<DomTreeNode*, 8>::iterator DI = Children.begin(),
+ if (DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>()) {
+ if (DomTreeNode *DTN = DT->getNode(BB)) {
+ DomTreeNode *PredDTN = DT->getNode(PredBB);
+ SmallVector<DomTreeNode*, 8> Children(DTN->begin(), DTN->end());
+ for (SmallVector<DomTreeNode*, 8>::iterator DI = Children.begin(),
DE = Children.end(); DI != DE; ++DI)
DT->changeImmediateDominator(*DI, PredDTN);
DT->eraseNode(BB);
}
+
+ if (LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>())
+ LI->removeBlock(BB);
+
+ if (MemoryDependenceAnalysis *MD =
+ P->getAnalysisIfAvailable<MemoryDependenceAnalysis>())
+ MD->invalidateCachedPredecessors();
}
}
BB->eraseFromParent();
-
-
return true;
}
@@ -218,52 +235,6 @@ void llvm::ReplaceInstWithInst(Instruction *From, Instruction *To) {
ReplaceInstWithInst(From->getParent()->getInstList(), BI, To);
}
-/// RemoveSuccessor - Change the specified terminator instruction such that its
-/// successor SuccNum no longer exists. Because this reduces the outgoing
-/// degree of the current basic block, the actual terminator instruction itself
-/// may have to be changed. In the case where the last successor of the block
-/// is deleted, a return instruction is inserted in its place which can cause a
-/// surprising change in program behavior if it is not expected.
-///
-void llvm::RemoveSuccessor(TerminatorInst *TI, unsigned SuccNum) {
- assert(SuccNum < TI->getNumSuccessors() &&
- "Trying to remove a nonexistant successor!");
-
- // If our old successor block contains any PHI nodes, remove the entry in the
- // PHI nodes that comes from this branch...
- //
- BasicBlock *BB = TI->getParent();
- TI->getSuccessor(SuccNum)->removePredecessor(BB);
-
- TerminatorInst *NewTI = 0;
- switch (TI->getOpcode()) {
- case Instruction::Br:
- // If this is a conditional branch... convert to unconditional branch.
- if (TI->getNumSuccessors() == 2) {
- cast<BranchInst>(TI)->setUnconditionalDest(TI->getSuccessor(1-SuccNum));
- } else { // Otherwise convert to a return instruction...
- Value *RetVal = 0;
-
- // Create a value to return... if the function doesn't return null...
- if (!BB->getParent()->getReturnType()->isVoidTy())
- RetVal = Constant::getNullValue(BB->getParent()->getReturnType());
-
- // Create the return...
- NewTI = ReturnInst::Create(TI->getContext(), RetVal);
- }
- break;
-
- case Instruction::Invoke: // Should convert to call
- case Instruction::Switch: // Should remove entry
- default:
- case Instruction::Ret: // Cannot happen, has no successors!
- llvm_unreachable("Unhandled terminator inst type in RemoveSuccessor!");
- }
-
- if (NewTI) // If it's a different instruction, replace.
- ReplaceInstWithInst(TI, NewTI);
-}
-
/// GetSuccessorNumber - Search for the specified successor of basic block BB
/// and return its position in the terminator instruction's list of
/// successors. It is an error to call this with a block that is not a
@@ -300,13 +271,13 @@ BasicBlock *llvm::SplitEdge(BasicBlock *BB, BasicBlock *Succ, Pass *P) {
assert(SP == BB && "CFG broken");
SP = NULL;
return SplitBlock(Succ, Succ->begin(), P);
- } else {
- // Otherwise, if BB has a single successor, split it at the bottom of the
- // block.
- assert(BB->getTerminator()->getNumSuccessors() == 1 &&
- "Should have a single succ!");
- return SplitBlock(BB, BB->getTerminator(), P);
}
+
+ // Otherwise, if BB has a single successor, split it at the bottom of the
+ // block.
+ assert(BB->getTerminator()->getNumSuccessors() == 1 &&
+ "Should have a single succ!");
+ return SplitBlock(BB, BB->getTerminator(), P);
}
/// SplitBlock - Split the specified block at the specified instruction - every
@@ -322,12 +293,12 @@ BasicBlock *llvm::SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P) {
// The new block lives in whichever loop the old one did. This preserves
// LCSSA as well, because we force the split point to be after any PHI nodes.
- if (LoopInfo* LI = P->getAnalysisIfAvailable<LoopInfo>())
+ if (LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>())
if (Loop *L = LI->getLoopFor(Old))
L->addBasicBlockToLoop(New, LI->getBase());
if (DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>()) {
- // Old dominates New. New node domiantes all other nodes dominated by Old.
+ // Old dominates New. New node dominates all other nodes dominated by Old.
DomTreeNode *OldNode = DT->getNode(Old);
std::vector<DomTreeNode *> Children;
for (DomTreeNode::iterator I = OldNode->begin(), E = OldNode->end();
@@ -340,9 +311,6 @@ BasicBlock *llvm::SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P) {
DT->changeImmediateDominator(*I, NewNode);
}
- if (DominanceFrontier *DF = P->getAnalysisIfAvailable<DominanceFrontier>())
- DF->splitBlock(Old);
-
return New;
}
@@ -354,10 +322,9 @@ BasicBlock *llvm::SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P) {
/// suffix of 'Suffix'.
///
/// This currently updates the LLVM IR, AliasAnalysis, DominatorTree,
-/// DominanceFrontier, LoopInfo, and LCCSA but no other analyses.
-/// In particular, it does not preserve LoopSimplify (because it's
-/// complicated to handle the case where one of the edges being split
-/// is an exit of a loop with other exits).
+/// LoopInfo, and LCCSA but no other analyses. In particular, it does not
+/// preserve LoopSimplify (because it's complicated to handle the case where one
+/// of the edges being split is an exit of a loop with other exits).
///
BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
BasicBlock *const *Preds,
@@ -407,13 +374,10 @@ BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
}
}
- // Update dominator tree and dominator frontier if available.
+ // Update dominator tree if available.
DominatorTree *DT = P ? P->getAnalysisIfAvailable<DominatorTree>() : 0;
if (DT)
DT->splitBlock(NewBB);
- if (DominanceFrontier *DF =
- P ? P->getAnalysisIfAvailable<DominanceFrontier>() : 0)
- DF->splitBlock(NewBB);
// Insert a new PHI node into NewBB for every PHI node in BB and that new PHI
// node becomes an incoming value for BB's phi node. However, if the Preds
@@ -545,7 +509,32 @@ void llvm::FindFunctionBackedges(const Function &F,
// Go up one level.
InStack.erase(VisitStack.pop_back_val().first);
}
- } while (!VisitStack.empty());
-
-
+ } while (!VisitStack.empty());
+}
+
+/// FoldReturnIntoUncondBranch - This method duplicates the specified return
+/// instruction into a predecessor which ends in an unconditional branch. If
+/// the return instruction returns a value defined by a PHI, propagate the
+/// right value into the return. It returns the new return instruction in the
+/// predecessor.
+ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
+ BasicBlock *Pred) {
+ Instruction *UncondBranch = Pred->getTerminator();
+ // Clone the return and add it to the end of the predecessor.
+ Instruction *NewRet = RI->clone();
+ Pred->getInstList().push_back(NewRet);
+
+ // If the return instruction returns a value, and if the value was a
+ // PHI node in "BB", propagate the right value into the return.
+ for (User::op_iterator i = NewRet->op_begin(), e = NewRet->op_end();
+ i != e; ++i)
+ if (PHINode *PN = dyn_cast<PHINode>(*i))
+ if (PN->getParent() == BB)
+ *i = PN->getIncomingValueForBlock(Pred);
+
+ // Update any PHI nodes in the returning block to realize that we no
+ // longer branch to them.
+ BB->removePredecessor(Pred);
+ UncondBranch->eraseFromParent();
+ return cast<ReturnInst>(NewRet);
}
diff --git a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
index f75ffe6..616b066 100644
--- a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -11,8 +11,7 @@
// inserting a dummy basic block. This pass may be "required" by passes that
// cannot deal with critical edges. For this usage, the structure type is
// forward declared. This pass obviously invalidates the CFG, but can update
-// forward dominator (set, immediate dominators, tree, and frontier)
-// information.
+// dominator trees.
//
//===----------------------------------------------------------------------===//
@@ -36,13 +35,14 @@ STATISTIC(NumBroken, "Number of blocks inserted");
namespace {
struct BreakCriticalEdges : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- BreakCriticalEdges() : FunctionPass(ID) {}
+ BreakCriticalEdges() : FunctionPass(ID) {
+ initializeBreakCriticalEdgesPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<DominatorTree>();
- AU.addPreserved<DominanceFrontier>();
AU.addPreserved<LoopInfo>();
AU.addPreserved<ProfileInfo>();
@@ -54,7 +54,7 @@ namespace {
char BreakCriticalEdges::ID = 0;
INITIALIZE_PASS(BreakCriticalEdges, "break-crit-edges",
- "Break critical edges in CFG", false, false);
+ "Break critical edges in CFG", false, false)
// Publically exposed interface to pass...
char &llvm::BreakCriticalEdgesID = BreakCriticalEdges::ID;
@@ -150,10 +150,9 @@ static void CreatePHIsForSplitLoopExit(SmallVectorImpl<BasicBlock *> &Preds,
}
/// SplitCriticalEdge - If this edge is a critical edge, insert a new node to
-/// split the critical edge. This will update DominatorTree and
-/// DominatorFrontier information if it is available, thus calling this pass
-/// will not invalidate either of them. This returns the new block if the edge
-/// was split, null otherwise.
+/// split the critical edge. This will update DominatorTree information if it
+/// is available, thus calling this pass will not invalidate either of them.
+/// This returns the new block if the edge was split, null otherwise.
///
/// If MergeIdenticalEdges is true (not the default), *all* edges from TI to the
/// specified successor will be merged into the same critical edge block.
@@ -255,12 +254,11 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
if (P == 0) return NewBB;
DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>();
- DominanceFrontier *DF = P->getAnalysisIfAvailable<DominanceFrontier>();
LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>();
ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>();
// If we have nothing to update, just return.
- if (DT == 0 && DF == 0 && LI == 0 && PI == 0)
+ if (DT == 0 && LI == 0 && PI == 0)
return NewBB;
// Now update analysis information. Since the only predecessor of NewBB is
@@ -281,7 +279,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
I != E; ++I) {
BasicBlock *P = *I;
if (P != NewBB)
- OtherPreds.push_back(P);
+ OtherPreds.push_back(P);
}
}
@@ -318,40 +316,6 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
}
}
- // Should we update DominanceFrontier information?
- if (DF) {
- // If NewBBDominatesDestBB hasn't been computed yet, do so with DF.
- if (!OtherPreds.empty()) {
- // FIXME: IMPLEMENT THIS!
- llvm_unreachable("Requiring domfrontiers but not idom/domtree/domset."
- " not implemented yet!");
- }
-
- // Since the new block is dominated by its only predecessor TIBB,
- // it cannot be in any block's dominance frontier. If NewBB dominates
- // DestBB, its dominance frontier is the same as DestBB's, otherwise it is
- // just {DestBB}.
- DominanceFrontier::DomSetType NewDFSet;
- if (NewBBDominatesDestBB) {
- DominanceFrontier::iterator I = DF->find(DestBB);
- if (I != DF->end()) {
- DF->addBasicBlock(NewBB, I->second);
-
- if (I->second.count(DestBB)) {
- // However NewBB's frontier does not include DestBB.
- DominanceFrontier::iterator NF = DF->find(NewBB);
- DF->removeFromFrontier(NF, DestBB);
- }
- }
- else
- DF->addBasicBlock(NewBB, DominanceFrontier::DomSetType());
- } else {
- DominanceFrontier::DomSetType NewDFSet;
- NewDFSet.insert(DestBB);
- DF->addBasicBlock(NewBB, NewDFSet);
- }
- }
-
// Update LoopInfo if it is around.
if (LI) {
if (Loop *TIL = LI->getLoopFor(TIBB)) {
diff --git a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index c313949..4a90751 100644
--- a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -131,21 +131,6 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
return CI;
}
-
-/// EmitMemCpy - Emit a call to the memcpy function to the builder. This always
-/// expects that Len has type 'intptr_t' and Dst/Src are pointers.
-Value *llvm::EmitMemCpy(Value *Dst, Value *Src, Value *Len, unsigned Align,
- bool isVolatile, IRBuilder<> &B, const TargetData *TD) {
- Module *M = B.GetInsertBlock()->getParent()->getParent();
- Dst = CastToCStr(Dst, B);
- Src = CastToCStr(Src, B);
- const Type *ArgTys[3] = { Dst->getType(), Src->getType(), Len->getType() };
- Value *MemCpy = Intrinsic::getDeclaration(M, Intrinsic::memcpy, ArgTys, 3);
- return B.CreateCall5(MemCpy, Dst, Src, Len,
- ConstantInt::get(B.getInt32Ty(), Align),
- ConstantInt::get(B.getInt1Ty(), isVolatile));
-}
-
/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
/// are pointers.
@@ -170,22 +155,6 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
return CI;
}
-/// EmitMemMove - Emit a call to the memmove function to the builder. This
-/// always expects that the size has type 'intptr_t' and Dst/Src are pointers.
-Value *llvm::EmitMemMove(Value *Dst, Value *Src, Value *Len, unsigned Align,
- bool isVolatile, IRBuilder<> &B, const TargetData *TD) {
- Module *M = B.GetInsertBlock()->getParent()->getParent();
- LLVMContext &Context = B.GetInsertBlock()->getContext();
- const Type *ArgTys[3] = { Dst->getType(), Src->getType(),
- TD->getIntPtrType(Context) };
- Value *MemMove = Intrinsic::getDeclaration(M, Intrinsic::memmove, ArgTys, 3);
- Dst = CastToCStr(Dst, B);
- Src = CastToCStr(Src, B);
- Value *A = ConstantInt::get(B.getInt32Ty(), Align);
- Value *Vol = ConstantInt::get(B.getInt1Ty(), isVolatile);
- return B.CreateCall5(MemMove, Dst, Src, Len, A, Vol);
-}
-
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
@@ -233,18 +202,6 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
return CI;
}
-/// EmitMemSet - Emit a call to the memset function
-Value *llvm::EmitMemSet(Value *Dst, Value *Val, Value *Len, bool isVolatile,
- IRBuilder<> &B, const TargetData *TD) {
- Module *M = B.GetInsertBlock()->getParent()->getParent();
- Intrinsic::ID IID = Intrinsic::memset;
- const Type *Tys[2] = { Dst->getType(), Len->getType() };
- Value *MemSet = Intrinsic::getDeclaration(M, IID, Tys, 2);
- Value *Align = ConstantInt::get(B.getInt32Ty(), 1);
- Value *Vol = ConstantInt::get(B.getInt1Ty(), isVolatile);
- return B.CreateCall5(MemSet, CastToCStr(Dst, B), Val, Len, Align, Vol);
-}
-
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name' (e.g.
/// 'floor'). This function is known to take a single of type matching 'Op' and
/// returns one value with the same type. If 'Op' is a long double, 'l' is
@@ -422,8 +379,8 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
return false;
if (isFoldable(3, 2, false)) {
- EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1, false, B, TD);
+ B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1);
replaceCall(CI->getArgOperand(0));
return true;
}
@@ -445,8 +402,8 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
return false;
if (isFoldable(3, 2, false)) {
- EmitMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1, false, B, TD);
+ B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1);
replaceCall(CI->getArgOperand(0));
return true;
}
@@ -465,8 +422,7 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
if (isFoldable(3, 2, false)) {
Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(),
false);
- EmitMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2),
- false, B, TD);
+ B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
replaceCall(CI->getArgOperand(0));
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
index f43186e..d967ceb 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -112,8 +112,7 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
const BasicBlock &BB = *BI;
// Create a new basic block and copy instructions into it!
- BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc,
- CodeInfo);
+ BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc, CodeInfo);
VMap[&BB] = CBB; // Add basic block mapping.
if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator()))
@@ -122,12 +121,12 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
// Loop over all of the instructions in the function, fixing up operand
// references as we go. This uses VMap to do all the hard work.
- //
for (Function::iterator BB = cast<BasicBlock>(VMap[OldFunc->begin()]),
BE = NewFunc->end(); BB != BE; ++BB)
// Loop over all instructions, fixing each one as we find it...
for (BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II)
- RemapInstruction(II, VMap, ModuleLevelChanges);
+ RemapInstruction(II, VMap,
+ ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
}
/// CloneFunction - Return a copy of the specified function, but without
@@ -138,8 +137,7 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// updated to include mappings from all of the instructions and basicblocks in
/// the function from their old to new values.
///
-Function *llvm::CloneFunction(const Function *F,
- ValueToValueMapTy &VMap,
+Function *llvm::CloneFunction(const Function *F, ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
ClonedCodeInfo *CodeInfo) {
std::vector<const Type*> ArgTypes;
@@ -216,7 +214,7 @@ namespace {
/// anything that it can reach.
void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
std::vector<const BasicBlock*> &ToClone){
- Value *&BBEntry = VMap[BB];
+ TrackingVH<Value> &BBEntry = VMap[BB];
// Have we already cloned this block?
if (BBEntry) return;
@@ -262,8 +260,10 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If the condition was a known constant in the callee...
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
// Or is a known constant in the caller...
- if (Cond == 0)
- Cond = dyn_cast_or_null<ConstantInt>(VMap[BI->getCondition()]);
+ if (Cond == 0) {
+ Value *V = VMap[BI->getCondition()];
+ Cond = dyn_cast_or_null<ConstantInt>(V);
+ }
// Constant fold to uncond branch!
if (Cond) {
@@ -276,8 +276,10 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
} else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) {
// If switching on a value known constant in the caller.
ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition());
- if (Cond == 0) // Or known constant after constant prop in the callee...
- Cond = dyn_cast_or_null<ConstantInt>(VMap[SI->getCondition()]);
+ if (Cond == 0) { // Or known constant after constant prop in the callee...
+ Value *V = VMap[SI->getCondition()];
+ Cond = dyn_cast_or_null<ConstantInt>(V);
+ }
if (Cond) { // Constant fold to uncond branch!
BasicBlock *Dest = SI->getSuccessor(SI->findCaseValue(Cond));
VMap[OldTI] = BranchInst::Create(Dest, NewBB);
@@ -318,7 +320,8 @@ ConstantFoldMappedInstruction(const Instruction *I) {
SmallVector<Constant*, 8> Ops;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (Constant *Op = dyn_cast_or_null<Constant>(MapValue(I->getOperand(i),
- VMap, ModuleLevelChanges)))
+ VMap,
+ ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges)))
Ops.push_back(Op);
else
return 0; // All operands not constant!
@@ -394,7 +397,8 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVector<const PHINode*, 16> PHIToResolve;
for (Function::const_iterator BI = OldFunc->begin(), BE = OldFunc->end();
BI != BE; ++BI) {
- BasicBlock *NewBB = cast_or_null<BasicBlock>(VMap[BI]);
+ Value *V = VMap[BI];
+ BasicBlock *NewBB = cast_or_null<BasicBlock>(V);
if (NewBB == 0) continue; // Dead block.
// Add the new block to the new function.
@@ -455,7 +459,8 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
I->setDebugLoc(DebugLoc());
}
}
- RemapInstruction(I, VMap, ModuleLevelChanges);
+ RemapInstruction(I, VMap,
+ ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
}
}
@@ -474,10 +479,11 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
OPN = PHIToResolve[phino];
PHINode *PN = cast<PHINode>(VMap[OPN]);
for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) {
- if (BasicBlock *MappedBlock =
- cast_or_null<BasicBlock>(VMap[PN->getIncomingBlock(pred)])) {
+ Value *V = VMap[PN->getIncomingBlock(pred)];
+ if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(V)) {
Value *InVal = MapValue(PN->getIncomingValue(pred),
- VMap, ModuleLevelChanges);
+ VMap,
+ ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
assert(InVal && "Unknown input value?");
PN->setIncomingValue(pred, InVal);
PN->setIncomingBlock(pred, MappedBlock);
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp b/contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp
index 551b630..87dd141 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp
@@ -19,15 +19,14 @@
using namespace llvm;
-/// CloneDominatorInfo - Clone basicblock's dominator tree and, if available,
-/// dominance info. It is expected that basic block is already cloned.
+/// CloneDominatorInfo - Clone a basic block's dominator tree. It is expected
+/// that the basic block is already cloned.
static void CloneDominatorInfo(BasicBlock *BB,
- ValueMap<const Value *, Value *> &VMap,
- DominatorTree *DT,
- DominanceFrontier *DF) {
+ ValueToValueMapTy &VMap,
+ DominatorTree *DT) {
assert (DT && "DominatorTree is not available");
- ValueMap<const Value *, Value*>::iterator BI = VMap.find(BB);
+ ValueToValueMapTy::iterator BI = VMap.find(BB);
assert (BI != VMap.end() && "BasicBlock clone is missing");
BasicBlock *NewBB = cast<BasicBlock>(BI->second);
@@ -42,45 +41,23 @@ static void CloneDominatorInfo(BasicBlock *BB,
// NewBB's dominator is either BB's dominator or BB's dominator's clone.
BasicBlock *NewBBDom = BBDom;
- ValueMap<const Value *, Value*>::iterator BBDomI = VMap.find(BBDom);
+ ValueToValueMapTy::iterator BBDomI = VMap.find(BBDom);
if (BBDomI != VMap.end()) {
NewBBDom = cast<BasicBlock>(BBDomI->second);
if (!DT->getNode(NewBBDom))
- CloneDominatorInfo(BBDom, VMap, DT, DF);
+ CloneDominatorInfo(BBDom, VMap, DT);
}
DT->addNewBlock(NewBB, NewBBDom);
-
- // Copy cloned dominance frontiner set
- if (DF) {
- DominanceFrontier::DomSetType NewDFSet;
- DominanceFrontier::iterator DFI = DF->find(BB);
- if ( DFI != DF->end()) {
- DominanceFrontier::DomSetType S = DFI->second;
- for (DominanceFrontier::DomSetType::iterator I = S.begin(), E = S.end();
- I != E; ++I) {
- BasicBlock *DB = *I;
- ValueMap<const Value*, Value*>::iterator IDM = VMap.find(DB);
- if (IDM != VMap.end())
- NewDFSet.insert(cast<BasicBlock>(IDM->second));
- else
- NewDFSet.insert(DB);
- }
- }
- DF->addBasicBlock(NewBB, NewDFSet);
- }
}
/// CloneLoop - Clone Loop. Clone dominator info. Populate VMap
/// using old blocks to new blocks mapping.
Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
- ValueMap<const Value *, Value *> &VMap, Pass *P) {
+ ValueToValueMapTy &VMap, Pass *P) {
DominatorTree *DT = NULL;
- DominanceFrontier *DF = NULL;
- if (P) {
+ if (P)
DT = P->getAnalysisIfAvailable<DominatorTree>();
- DF = P->getAnalysisIfAvailable<DominanceFrontier>();
- }
SmallVector<BasicBlock *, 16> NewBlocks;
@@ -116,7 +93,7 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BasicBlock *BB = *I;
- CloneDominatorInfo(BB, VMap, DT, DF);
+ CloneDominatorInfo(BB, VMap, DT);
}
// Process sub loops
@@ -134,7 +111,7 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (unsigned index = 0, num_ops = Insn->getNumOperands();
index != num_ops; ++index) {
Value *Op = Insn->getOperand(index);
- ValueMap<const Value *, Value *>::iterator OpItr = VMap.find(Op);
+ ValueToValueMapTy::iterator OpItr = VMap.find(Op);
if (OpItr != VMap.end())
Insn->setOperand(index, OpItr->second);
}
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
index b347bf5..1046c38 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
@@ -89,8 +89,7 @@ Module *llvm::CloneModule(const Module *M,
GlobalVariable *GV = cast<GlobalVariable>(VMap[I]);
if (I->hasInitializer())
GV->setInitializer(cast<Constant>(MapValue(I->getInitializer(),
- VMap,
- true)));
+ VMap, RF_None)));
GV->setLinkage(I->getLinkage());
GV->setThreadLocal(I->isThreadLocal());
GV->setConstant(I->isConstant());
@@ -121,7 +120,7 @@ Module *llvm::CloneModule(const Module *M,
GlobalAlias *GA = cast<GlobalAlias>(VMap[I]);
GA->setLinkage(I->getLinkage());
if (const Constant* C = I->getAliasee())
- GA->setAliasee(cast<Constant>(MapValue(C, VMap, true)));
+ GA->setAliasee(cast<Constant>(MapValue(C, VMap, RF_None)));
}
// And named metadata....
@@ -130,7 +129,8 @@ Module *llvm::CloneModule(const Module *M,
const NamedMDNode &NMD = *I;
NamedMDNode *NewNMD = New->getOrInsertNamedMetadata(NMD.getName());
for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i)
- NewNMD->addOperand(cast<MDNode>(MapValue(NMD.getOperand(i), VMap, true)));
+ NewNMD->addOperand(cast<MDNode>(MapValue(NMD.getOperand(i), VMap,
+ RF_None)));
}
return New;
diff --git a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index b51f751..e633772 100644
--- a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -186,8 +186,8 @@ void CodeExtractor::splitReturnBlocks() {
if (ReturnInst *RI = dyn_cast<ReturnInst>((*I)->getTerminator())) {
BasicBlock *New = (*I)->splitBasicBlock(RI, (*I)->getName()+".ret");
if (DT) {
- // Old dominates New. New node domiantes all other nodes dominated
- //by Old.
+ // Old dominates New. New node dominates all other nodes dominated
+ // by Old.
DomTreeNode *OldNode = DT->getNode(*I);
SmallVector<DomTreeNode*, 8> Children;
for (DomTreeNode::iterator DI = OldNode->begin(), DE = OldNode->end();
diff --git a/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp b/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
index 8e82a02..8cc2649 100644
--- a/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
@@ -129,7 +129,7 @@ AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
if (InvokeInst *II = dyn_cast<InvokeInst>(P->getIncomingValue(i))) {
assert(II->getParent() != P->getIncomingBlock(i) &&
- "Invoke edge not supported yet"); II=II;
+ "Invoke edge not supported yet"); (void)II;
}
new StoreInst(P->getIncomingValue(i), Slot,
P->getIncomingBlock(i)->getTerminator());
diff --git a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 88979e86..c1faf24 100644
--- a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -22,7 +22,9 @@
#include "llvm/Attributes.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CallSite.h"
@@ -170,7 +172,7 @@ static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
/// some edges of the callgraph may remain.
static void UpdateCallGraphAfterInlining(CallSite CS,
Function::iterator FirstNewBlock,
- ValueMap<const Value*, Value*> &VMap,
+ ValueToValueMapTy &VMap,
InlineFunctionInfo &IFI) {
CallGraph &CG = *IFI.CG;
const Function *Caller = CS.getInstruction()->getParent()->getParent();
@@ -193,7 +195,7 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
for (; I != E; ++I) {
const Value *OrigCall = I->first;
- ValueMap<const Value*, Value*>::iterator VMI = VMap.find(OrigCall);
+ ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
// Only copy the edge if the call was inlined!
if (VMI == VMap.end() || VMI->second == 0)
continue;
@@ -228,6 +230,90 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
CallerNode->removeCallEdgeFor(CS);
}
+/// HandleByValArgument - When inlining a call site that has a byval argument,
+/// we have to make the implicit memcpy explicit by adding it.
+static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
+ const Function *CalledFunc,
+ InlineFunctionInfo &IFI,
+ unsigned ByValAlignment) {
+ const Type *AggTy = cast<PointerType>(Arg->getType())->getElementType();
+
+ // If the called function is readonly, then it could not mutate the caller's
+ // copy of the byval'd memory. In this case, it is safe to elide the copy and
+ // temporary.
+ if (CalledFunc->onlyReadsMemory()) {
+ // If the byval argument has a specified alignment that is greater than the
+ // passed in pointer, then we either have to round up the input pointer or
+ // give up on this transformation.
+ if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
+ return Arg;
+
+ // If the pointer is already known to be sufficiently aligned, or if we can
+ // round it up to a larger alignment, then we don't need a temporary.
+ if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
+ IFI.TD) >= ByValAlignment)
+ return Arg;
+
+ // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
+ // for code quality, but rarely happens and is required for correctness.
+ }
+
+ LLVMContext &Context = Arg->getContext();
+
+ const Type *VoidPtrTy = Type::getInt8PtrTy(Context);
+
+ // Create the alloca. If we have TargetData, use nice alignment.
+ unsigned Align = 1;
+ if (IFI.TD)
+ Align = IFI.TD->getPrefTypeAlignment(AggTy);
+
+ // If the byval had an alignment specified, we *must* use at least that
+ // alignment, as it is required by the byval argument (and uses of the
+ // pointer inside the callee).
+ Align = std::max(Align, ByValAlignment);
+
+ Function *Caller = TheCall->getParent()->getParent();
+
+ Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(),
+ &*Caller->begin()->begin());
+ // Emit a memcpy.
+ const Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
+ Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
+ Intrinsic::memcpy,
+ Tys, 3);
+ Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
+ Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall);
+
+ Value *Size;
+ if (IFI.TD == 0)
+ Size = ConstantExpr::getSizeOf(AggTy);
+ else
+ Size = ConstantInt::get(Type::getInt64Ty(Context),
+ IFI.TD->getTypeStoreSize(AggTy));
+
+ // Always generate a memcpy of alignment 1 here because we don't know
+ // the alignment of the src pointer. Other optimizations can infer
+ // better alignment.
+ Value *CallArgs[] = {
+ DestCast, SrcCast, Size,
+ ConstantInt::get(Type::getInt32Ty(Context), 1),
+ ConstantInt::getFalse(Context) // isVolatile
+ };
+ CallInst *TheMemCpy =
+ CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall);
+
+ // If we have a call graph, update it.
+ if (CallGraph *CG = IFI.CG) {
+ CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
+ CallGraphNode *CallerNode = (*CG)[Caller];
+ CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
+ }
+
+ // Uses of the argument in the function should use our new alloca
+ // instead.
+ return NewAlloca;
+}
+
// InlineFunction - This function inlines the called function into the basic
// block of the caller. This returns false if it is not possible to inline this
// call. The program is still in a well defined state if this occurs though.
@@ -251,7 +337,6 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
CalledFunc->isDeclaration() || // call, or call to a vararg function!
CalledFunc->getFunctionType()->isVarArg()) return false;
-
// If the call to the callee is not a tail call, we must clear the 'tail'
// flags on any calls that we inline.
bool MustClearTailCallFlags =
@@ -287,7 +372,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
Function::iterator FirstNewBlock;
{ // Scope to destroy VMap after cloning.
- ValueMap<const Value*, Value*> VMap;
+ ValueToValueMapTy VMap;
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
@@ -304,58 +389,14 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// by them explicit. However, we don't do this if the callee is readonly
// or readnone, because the copy would be unneeded: the callee doesn't
// modify the struct.
- if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal) &&
- !CalledFunc->onlyReadsMemory()) {
- const Type *AggTy = cast<PointerType>(I->getType())->getElementType();
- const Type *VoidPtrTy =
- Type::getInt8PtrTy(Context);
-
- // Create the alloca. If we have TargetData, use nice alignment.
- unsigned Align = 1;
- if (IFI.TD) Align = IFI.TD->getPrefTypeAlignment(AggTy);
- Value *NewAlloca = new AllocaInst(AggTy, 0, Align,
- I->getName(),
- &*Caller->begin()->begin());
- // Emit a memcpy.
- const Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
- Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
- Intrinsic::memcpy,
- Tys, 3);
- Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
- Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
-
- Value *Size;
- if (IFI.TD == 0)
- Size = ConstantExpr::getSizeOf(AggTy);
- else
- Size = ConstantInt::get(Type::getInt64Ty(Context),
- IFI.TD->getTypeStoreSize(AggTy));
-
- // Always generate a memcpy of alignment 1 here because we don't know
- // the alignment of the src pointer. Other optimizations can infer
- // better alignment.
- Value *CallArgs[] = {
- DestCast, SrcCast, Size,
- ConstantInt::get(Type::getInt32Ty(Context), 1),
- ConstantInt::get(Type::getInt1Ty(Context), 0)
- };
- CallInst *TheMemCpy =
- CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall);
-
- // If we have a call graph, update it.
- if (CallGraph *CG = IFI.CG) {
- CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
- CallGraphNode *CallerNode = (*CG)[Caller];
- CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
- }
-
- // Uses of the argument in the function should use our new alloca
- // instead.
- ActualArg = NewAlloca;
-
+ if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal)) {
+ ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
+ CalledFunc->getParamAlignment(ArgNo+1));
+
// Calls that we inline may use the new alloca, so we need to clear
- // their 'tail' flags.
- MustClearTailCallFlags = true;
+ // their 'tail' flags if HandleByValArgument introduced a new alloca and
+ // the callee has calls.
+ MustClearTailCallFlags |= ActualArg != *AI;
}
VMap[I] = ActualArg;
@@ -399,8 +440,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
if (!isa<Constant>(AI->getArraySize()))
continue;
- // Keep track of the static allocas that we inline into the caller if the
- // StaticAllocas pointer is non-null.
+ // Keep track of the static allocas that we inline into the caller.
IFI.StaticAllocas.push_back(AI);
// Scan for the block of allocas that we can move over, and move them
@@ -579,10 +619,10 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// any users of the original call/invoke instruction.
const Type *RTy = CalledFunc->getReturnType();
+ PHINode *PHI = 0;
if (Returns.size() > 1) {
// The PHI node should go at the front of the new basic block to merge all
// possible incoming values.
- PHINode *PHI = 0;
if (!TheCall->use_empty()) {
PHI = PHINode::Create(RTy, TheCall->getName(),
AfterCallBB->begin());
@@ -600,14 +640,6 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
"Ret value not consistent in function!");
PHI->addIncoming(RI->getReturnValue(), RI->getParent());
}
-
- // Now that we inserted the PHI, check to see if it has a single value
- // (e.g. all the entries are the same or undef). If so, remove the PHI so
- // it doesn't block other optimizations.
- if (Value *V = PHI->hasConstantValue()) {
- PHI->replaceAllUsesWith(V);
- PHI->eraseFromParent();
- }
}
@@ -664,5 +696,14 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// Now we can remove the CalleeEntry block, which is now empty.
Caller->getBasicBlockList().erase(CalleeEntry);
+ // If we inserted a phi node, check to see if it has a single value (e.g. all
+ // the entries are the same or undef). If so, remove the PHI so it doesn't
+ // block other optimizations.
+ if (PHI)
+ if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
+ PHI->replaceAllUsesWith(V);
+ PHI->eraseFromParent();
+ }
+
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp b/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp
index 5ca8299..45c15de 100644
--- a/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InstructionNamer.cpp
@@ -23,7 +23,9 @@ using namespace llvm;
namespace {
struct InstNamer : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- InstNamer() : FunctionPass(ID) {}
+ InstNamer() : FunctionPass(ID) {
+ initializeInstNamerPass(*PassRegistry::getPassRegistry());
+ }
void getAnalysisUsage(AnalysisUsage &Info) const {
Info.setPreservesAll();
@@ -48,11 +50,10 @@ namespace {
};
char InstNamer::ID = 0;
- INITIALIZE_PASS(InstNamer, "instnamer",
- "Assign names to anonymous instructions", false, false);
}
-
+INITIALIZE_PASS(InstNamer, "instnamer",
+ "Assign names to anonymous instructions", false, false)
char &llvm::InstructionNamerID = InstNamer::ID;
//===----------------------------------------------------------------------===//
//
diff --git a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
index 275b265..b2e5fa6 100644
--- a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
@@ -47,7 +47,9 @@ STATISTIC(NumLCSSA, "Number of live out of a loop variables");
namespace {
struct LCSSA : public LoopPass {
static char ID; // Pass identification, replacement for typeid
- LCSSA() : LoopPass(ID) {}
+ LCSSA() : LoopPass(ID) {
+ initializeLCSSAPass(*PassRegistry::getPassRegistry());
+ }
// Cached analysis information for the current function.
DominatorTree *DT;
@@ -65,10 +67,7 @@ namespace {
AU.setPreservesCFG();
AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
- AU.addPreserved<DominanceFrontier>();
AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
AU.addPreservedID(LoopSimplifyID);
AU.addPreserved<ScalarEvolution>();
}
@@ -90,7 +89,10 @@ namespace {
}
char LCSSA::ID = 0;
-INITIALIZE_PASS(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false);
+INITIALIZE_PASS_BEGIN(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_END(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false)
Pass *llvm::createLCSSAPass() { return new LCSSA(); }
char &llvm::LCSSAID = LCSSA::ID;
diff --git a/contrib/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm/lib/Transforms/Utils/Local.cpp
index 52f0499..063c76e 100644
--- a/contrib/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Local.cpp
@@ -22,9 +22,11 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ProfileInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
@@ -66,9 +68,9 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
assert(BI->getParent() && "Terminator not inserted in block!");
OldDest->removePredecessor(BI->getParent());
- // Set the unconditional destination, and change the insn to be an
- // unconditional branch.
- BI->setUnconditionalDest(Destination);
+ // Replace the conditional branch with an unconditional one.
+ BranchInst::Create(Destination, BI);
+ BI->eraseFromParent();
return true;
}
@@ -81,8 +83,9 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
assert(BI->getParent() && "Terminator not inserted in block!");
Dest1->removePredecessor(BI->getParent());
- // Change a conditional branch to unconditional.
- BI->setUnconditionalDest(Dest1);
+ // Replace the conditional branch with an unconditional one.
+ BranchInst::Create(Dest1, BI);
+ BI->eraseFromParent();
return true;
}
return false;
@@ -209,9 +212,6 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) {
// We don't want debug info removed by anything this general.
if (isa<DbgInfoIntrinsic>(I)) return false;
- // Likewise for memory use markers.
- if (isa<MemoryUseIntrinsic>(I)) return false;
-
if (!I->mayHaveSideEffects()) return true;
// Special case intrinsics that "may have side effects" but can be deleted
@@ -260,29 +260,45 @@ bool llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) {
return true;
}
+/// areAllUsesEqual - Check whether the uses of a value are all the same.
+/// This is similar to Instruction::hasOneUse() except this will also return
+/// true when there are multiple uses that all refer to the same value.
+static bool areAllUsesEqual(Instruction *I) {
+ Value::use_iterator UI = I->use_begin();
+ Value::use_iterator UE = I->use_end();
+ if (UI == UE)
+ return false;
+
+ User *TheUse = *UI;
+ for (++UI; UI != UE; ++UI) {
+ if (*UI != TheUse)
+ return false;
+ }
+ return true;
+}
+
/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
/// dead PHI node, due to being a def-use chain of single-use nodes that
/// either forms a cycle or is terminated by a trivially dead instruction,
/// delete it. If that makes any of its operands trivially dead, delete them
/// too, recursively. Return true if the PHI node is actually deleted.
-bool
-llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) {
+bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) {
// We can remove a PHI if it is on a cycle in the def-use graph
// where each node in the cycle has degree one, i.e. only one use,
// and is an instruction with no side effects.
- if (!PN->hasOneUse())
+ if (!areAllUsesEqual(PN))
return false;
bool Changed = false;
SmallPtrSet<PHINode *, 4> PHIs;
PHIs.insert(PN);
for (Instruction *J = cast<Instruction>(*PN->use_begin());
- J->hasOneUse() && !J->mayHaveSideEffects();
+ areAllUsesEqual(J) && !J->mayHaveSideEffects();
J = cast<Instruction>(*J->use_begin()))
// If we find a PHI more than once, we're on a cycle that
// won't prove fruitful.
if (PHINode *JP = dyn_cast<PHINode>(J))
- if (!PHIs.insert(cast<PHINode>(JP))) {
+ if (!PHIs.insert(JP)) {
// Break the cycle and delete the PHI and its operands.
JP->replaceAllUsesWith(UndefValue::get(JP->getType()));
(void)RecursivelyDeleteTriviallyDeadInstructions(JP);
@@ -346,13 +362,13 @@ void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
WeakVH PhiIt = &BB->front();
while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
-
- Value *PNV = PN->hasConstantValue();
+
+ Value *PNV = SimplifyInstruction(PN, TD);
if (PNV == 0) continue;
-
+
// If we're able to simplify the phi to a single value, substitute the new
// value into all of its uses.
- assert(PNV != PN && "hasConstantValue broken");
+ assert(PNV != PN && "SimplifyInstruction broken!");
Value *OldPhiIt = PhiIt;
ReplaceAndSimplifyAllUses(PN, PNV, TD);
@@ -402,6 +418,12 @@ void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, Pass *P) {
PredBB->replaceAllUsesWith(DestBB);
if (P) {
+ DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>();
+ if (DT) {
+ BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock();
+ DT->changeImmediateDominator(DestBB, PredBBIDom);
+ DT->eraseNode(PredBB);
+ }
ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>();
if (PI) {
PI->replaceAllUses(PredBB, DestBB);
@@ -645,3 +667,95 @@ bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
return Changed;
}
+
+/// enforceKnownAlignment - If the specified pointer points to an object that
+/// we control, modify the object's alignment to PrefAlign. This isn't
+/// often possible though. If alignment is important, a more reliable approach
+/// is to simply align all global variables and allocation instructions to
+/// their preferred alignment from the beginning.
+///
+static unsigned enforceKnownAlignment(Value *V, unsigned Align,
+ unsigned PrefAlign) {
+
+ User *U = dyn_cast<User>(V);
+ if (!U) return Align;
+
+ switch (Operator::getOpcode(U)) {
+ default: break;
+ case Instruction::BitCast:
+ return enforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
+ case Instruction::GetElementPtr: {
+ // If all indexes are zero, it is just the alignment of the base pointer.
+ bool AllZeroOperands = true;
+ for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
+ if (!isa<Constant>(*i) ||
+ !cast<Constant>(*i)->isNullValue()) {
+ AllZeroOperands = false;
+ break;
+ }
+
+ if (AllZeroOperands) {
+ // Treat this like a bitcast.
+ return enforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
+ }
+ return Align;
+ }
+ case Instruction::Alloca: {
+ AllocaInst *AI = cast<AllocaInst>(V);
+ // If there is a requested alignment and if this is an alloca, round up.
+ if (AI->getAlignment() >= PrefAlign)
+ return AI->getAlignment();
+ AI->setAlignment(PrefAlign);
+ return PrefAlign;
+ }
+ }
+
+ if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ // If there is a large requested alignment and we can, bump up the alignment
+ // of the global.
+ if (GV->isDeclaration()) return Align;
+
+ if (GV->getAlignment() >= PrefAlign)
+ return GV->getAlignment();
+ // We can only increase the alignment of the global if it has no alignment
+ // specified or if it is not assigned a section. If it is assigned a
+ // section, the global could be densely packed with other objects in the
+ // section, increasing the alignment could cause padding issues.
+ if (!GV->hasSection() || GV->getAlignment() == 0)
+ GV->setAlignment(PrefAlign);
+ return GV->getAlignment();
+ }
+
+ return Align;
+}
+
+/// getOrEnforceKnownAlignment - If the specified pointer has an alignment that
+/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
+/// and it is more than the alignment of the ultimate object, see if we can
+/// increase the alignment of the ultimate object, making this check succeed.
+unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
+ const TargetData *TD) {
+ assert(V->getType()->isPointerTy() &&
+ "getOrEnforceKnownAlignment expects a pointer!");
+ unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
+ APInt Mask = APInt::getAllOnesValue(BitWidth);
+ APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD);
+ unsigned TrailZ = KnownZero.countTrailingOnes();
+
+ // Avoid trouble with rediculously large TrailZ values, such as
+ // those computed from a null pointer.
+ TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
+
+ unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
+
+ // LLVM doesn't support alignments larger than this currently.
+ Align = std::min(Align, +Value::MaximumAlignment);
+
+ if (PrefAlign > Align)
+ Align = enforceKnownAlignment(V, Align, PrefAlign);
+
+ // We don't need to make any adjustment.
+ return Align;
+}
+
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index b3c4801..2462630 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -37,7 +37,7 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "loopsimplify"
+#define DEBUG_TYPE "loop-simplify"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
@@ -46,9 +46,10 @@
#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/CFG.h"
@@ -65,7 +66,9 @@ STATISTIC(NumNested , "Number of nested loops split out");
namespace {
struct LoopSimplify : public LoopPass {
static char ID; // Pass identification, replacement for typeid
- LoopSimplify() : LoopPass(ID) {}
+ LoopSimplify() : LoopPass(ID) {
+ initializeLoopSimplifyPass(*PassRegistry::getPassRegistry());
+ }
// AA - If we have an alias analysis object to update, this is it, otherwise
// this is null.
@@ -87,8 +90,6 @@ namespace {
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<ScalarEvolution>();
AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added.
- AU.addPreserved<DominanceFrontier>();
- AU.addPreservedID(LCSSAID);
}
/// verifyAnalysis() - Verify LoopSimplifyForm's guarantees.
@@ -107,8 +108,12 @@ namespace {
}
char LoopSimplify::ID = 0;
-INITIALIZE_PASS(LoopSimplify, "loopsimplify",
- "Canonicalize natural loops", true, false);
+INITIALIZE_PASS_BEGIN(LoopSimplify, "loop-simplify",
+ "Canonicalize natural loops", true, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_END(LoopSimplify, "loop-simplify",
+ "Canonicalize natural loops", true, false)
// Publically exposed interface to pass...
char &llvm::LoopSimplifyID = LoopSimplify::ID;
@@ -157,9 +162,8 @@ ReprocessLoop:
for (SmallPtrSet<BasicBlock*, 4>::iterator I = BadPreds.begin(),
E = BadPreds.end(); I != E; ++I) {
- DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor ";
- WriteAsOperand(dbgs(), *I, false);
- dbgs() << "\n");
+ DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor "
+ << (*I)->getName() << "\n");
// Inform each successor of each dead pred.
for (succ_iterator SI = succ_begin(*I), SE = succ_end(*I); SI != SE; ++SI)
@@ -184,9 +188,8 @@ ReprocessLoop:
if (BI->isConditional()) {
if (UndefValue *Cond = dyn_cast<UndefValue>(BI->getCondition())) {
- DEBUG(dbgs() << "LoopSimplify: Resolving \"br i1 undef\" to exit in ";
- WriteAsOperand(dbgs(), *I, false);
- dbgs() << "\n");
+ DEBUG(dbgs() << "LoopSimplify: Resolving \"br i1 undef\" to exit in "
+ << (*I)->getName() << "\n");
BI->setCondition(ConstantInt::get(Cond->getType(),
!L->contains(BI->getSuccessor(0))));
@@ -262,8 +265,9 @@ ReprocessLoop:
PHINode *PN;
for (BasicBlock::iterator I = L->getHeader()->begin();
(PN = dyn_cast<PHINode>(I++)); )
- if (Value *V = PN->hasConstantValue(DT)) {
+ if (Value *V = SimplifyInstruction(PN, 0, DT)) {
if (AA) AA->deleteValue(PN);
+ if (SE) SE->forgetValue(PN);
PN->replaceAllUsesWith(V);
PN->eraseFromParent();
}
@@ -317,29 +321,22 @@ ReprocessLoop:
if (!FoldBranchToCommonDest(BI)) continue;
// Success. The block is now dead, so remove it from the loop,
- // update the dominator tree and dominance frontier, and delete it.
-
- DEBUG(dbgs() << "LoopSimplify: Eliminating exiting block ";
- WriteAsOperand(dbgs(), ExitingBlock, false);
- dbgs() << "\n");
+ // update the dominator tree and delete it.
+ DEBUG(dbgs() << "LoopSimplify: Eliminating exiting block "
+ << ExitingBlock->getName() << "\n");
assert(pred_begin(ExitingBlock) == pred_end(ExitingBlock));
Changed = true;
LI->removeBlock(ExitingBlock);
- DominanceFrontier *DF = getAnalysisIfAvailable<DominanceFrontier>();
DomTreeNode *Node = DT->getNode(ExitingBlock);
const std::vector<DomTreeNodeBase<BasicBlock> *> &Children =
Node->getChildren();
while (!Children.empty()) {
DomTreeNode *Child = Children.front();
DT->changeImmediateDominator(Child, Node->getIDom());
- if (DF) DF->changeImmediateDominator(Child->getBlock(),
- Node->getIDom()->getBlock(),
- DT);
}
DT->eraseNode(ExitingBlock);
- if (DF) DF->removeBlock(ExitingBlock);
BI->getSuccessor(0)->removePredecessor(ExitingBlock);
BI->getSuccessor(1)->removePredecessor(ExitingBlock);
@@ -378,9 +375,8 @@ BasicBlock *LoopSimplify::InsertPreheaderForLoop(Loop *L) {
SplitBlockPredecessors(Header, &OutsideBlocks[0], OutsideBlocks.size(),
".preheader", this);
- DEBUG(dbgs() << "LoopSimplify: Creating pre-header ";
- WriteAsOperand(dbgs(), NewBB, false);
- dbgs() << "\n");
+ DEBUG(dbgs() << "LoopSimplify: Creating pre-header " << NewBB->getName()
+ << "\n");
// Make sure that NewBB is put someplace intelligent, which doesn't mess up
// code layout too horribly.
@@ -409,10 +405,8 @@ BasicBlock *LoopSimplify::RewriteLoopExitBlock(Loop *L, BasicBlock *Exit) {
LoopBlocks.size(), ".loopexit",
this);
- DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block ";
- WriteAsOperand(dbgs(), NewBB, false);
- dbgs() << "\n");
-
+ DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "
+ << NewBB->getName() << "\n");
return NewBB;
}
@@ -438,11 +432,11 @@ static void AddBlockAndPredsToSet(BasicBlock *InputBB, BasicBlock *StopBlock,
/// FindPHIToPartitionLoops - The first part of loop-nestification is to find a
/// PHI node that tells us how to partition the loops.
static PHINode *FindPHIToPartitionLoops(Loop *L, DominatorTree *DT,
- AliasAnalysis *AA) {
+ AliasAnalysis *AA, LoopInfo *LI) {
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ) {
PHINode *PN = cast<PHINode>(I);
++I;
- if (Value *V = PN->hasConstantValue(DT)) {
+ if (Value *V = SimplifyInstruction(PN, 0, DT)) {
// This is a degenerate PHI already, don't modify it!
PN->replaceAllUsesWith(V);
if (AA) AA->deleteValue(PN);
@@ -516,7 +510,7 @@ void LoopSimplify::PlaceSplitBlockCarefully(BasicBlock *NewBB,
/// created.
///
Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM) {
- PHINode *PN = FindPHIToPartitionLoops(L, DT, AA);
+ PHINode *PN = FindPHIToPartitionLoops(L, DT, AA, LI);
if (PN == 0) return 0; // No known way to partition.
// Pull out all predecessors that have varying values in the loop. This
@@ -643,9 +637,8 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
Header->getName()+".backedge", F);
BranchInst *BETerminator = BranchInst::Create(Header, BEBlock);
- DEBUG(dbgs() << "LoopSimplify: Inserting unique backedge block ";
- WriteAsOperand(dbgs(), BEBlock, false);
- dbgs() << "\n");
+ DEBUG(dbgs() << "LoopSimplify: Inserting unique backedge block "
+ << BEBlock->getName() << "\n");
// Move the new backedge block to right after the last backedge block.
Function::iterator InsertPos = BackedgeBlocks.back(); ++InsertPos;
@@ -721,8 +714,6 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
// Update dominator information
DT->splitBlock(BEBlock);
- if (DominanceFrontier *DF = getAnalysisIfAvailable<DominanceFrontier>())
- DF->splitBlock(BEBlock);
return BEBlock;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index 236bbe9..7da7271 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -16,13 +16,14 @@
//
// The process of unrolling can produce extraneous basic blocks linked with
// unconditional branches. This will be corrected in the future.
+//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "loop-unroll"
#include "llvm/Transforms/Utils/UnrollLoop.h"
#include "llvm/BasicBlock.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Support/Debug.h"
@@ -30,20 +31,19 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
-
using namespace llvm;
// TODO: Should these be here or in LoopUnroll?
STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
-STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
+STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
/// RemapInstruction - Convert the instruction operands from referencing the
/// current values into those specified by VMap.
static inline void RemapInstruction(Instruction *I,
- ValueMap<const Value *, Value*> &VMap) {
+ ValueToValueMapTy &VMap) {
for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
Value *Op = I->getOperand(op);
- ValueMap<const Value *, Value*>::iterator It = VMap.find(Op);
+ ValueToValueMapTy::iterator It = VMap.find(Op);
if (It != VMap.end())
I->setOperand(op, It->second);
}
@@ -96,7 +96,7 @@ static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI) {
}
/// Unroll the given loop by Count. The loop must be in LCSSA form. Returns true
-/// if unrolling was succesful, or false if the loop was unmodified. Unrolling
+/// if unrolling was successful, or false if the loop was unmodified. Unrolling
/// can only fail when the loop's latch block is not terminated by a conditional
/// branch instruction. However, if the trip count (and multiple) are not known,
/// loop unrolling will mostly produce more code that is no faster.
@@ -105,7 +105,8 @@ static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI) {
///
/// If a LoopPassManager is passed in, and the loop is fully removed, it will be
/// removed from the LoopPassManager as well. LPM can also be NULL.
-bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM) {
+bool llvm::UnrollLoop(Loop *L, unsigned Count,
+ LoopInfo *LI, LPPassManager *LPM) {
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
@@ -127,6 +128,13 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
" Can't unroll; loop not terminated by a conditional branch.\n");
return false;
}
+
+ if (Header->hasAddressTaken()) {
+ // The loop-rotate pass can be helpful to avoid this in many cases.
+ DEBUG(dbgs() <<
+ " Won't unroll loop: address of header block is taken.\n");
+ return false;
+ }
// Notify ScalarEvolution that the loop will be substantially changed,
// if not outright eliminated.
@@ -189,7 +197,6 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
// For the first iteration of the loop, we should use the precloned values for
// PHI nodes. Insert associations now.
- typedef ValueMap<const Value*, Value*> ValueToValueMapTy;
ValueToValueMapTy LastValueMap;
std::vector<PHINode*> OrigPHINode;
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
@@ -274,7 +281,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
for (unsigned i = 0; i < NewBlocks.size(); ++i)
for (BasicBlock::iterator I = NewBlocks[i]->begin(),
E = NewBlocks[i]->end(); I != E; ++I)
- RemapInstruction(I, LastValueMap);
+ ::RemapInstruction(I, LastValueMap);
}
// The latch block exits the loop. If there are any PHI nodes in the
@@ -342,7 +349,9 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
// iteration.
Term->setSuccessor(!ContinueOnTrue, Dest);
} else {
- Term->setUnconditionalDest(Dest);
+ // Replace the conditional branch with an unconditional one.
+ BranchInst::Create(Dest, Term);
+ Term->eraseFromParent();
// Merge adjacent basic blocks, if possible.
if (BasicBlock *Fold = FoldBlockIntoPredecessor(Dest, LI)) {
std::replace(Latches.begin(), Latches.end(), Dest, Fold);
@@ -362,10 +371,11 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
if (isInstructionTriviallyDead(Inst))
(*BB)->getInstList().erase(Inst);
- else if (Constant *C = ConstantFoldInstruction(Inst)) {
- Inst->replaceAllUsesWith(C);
- (*BB)->getInstList().erase(Inst);
- }
+ else if (Value *V = SimplifyInstruction(Inst))
+ if (LI->replacementPreservesLCSSAForm(Inst, V)) {
+ Inst->replaceAllUsesWith(V);
+ (*BB)->getInstList().erase(Inst);
+ }
}
NumCompletelyUnrolled += CompletelyUnroll;
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp b/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
index a46dd84..025ae0d 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
@@ -79,7 +79,9 @@ namespace {
explicit LowerInvoke(const TargetLowering *tli = NULL,
bool useExpensiveEHSupport = ExpensiveEHSupport)
: FunctionPass(ID), useExpensiveEHSupport(useExpensiveEHSupport),
- TLI(tli) { }
+ TLI(tli) {
+ initializeLowerInvokePass(*PassRegistry::getPassRegistry());
+ }
bool doInitialization(Module &M);
bool runOnFunction(Function &F);
@@ -102,7 +104,7 @@ namespace {
char LowerInvoke::ID = 0;
INITIALIZE_PASS(LowerInvoke, "lowerinvoke",
"Lower invoke and unwind, for unwindless code generators",
- false, false);
+ false, false)
char &llvm::LowerInvokePassID = LowerInvoke::ID;
@@ -148,19 +150,20 @@ bool LowerInvoke::doInitialization(Module &M) {
"llvm.sjljeh.jblist");
}
-// VisualStudio defines setjmp as _setjmp via #include <csetjmp> / <setjmp.h>,
-// so it looks like Intrinsic::_setjmp
-#if defined(_MSC_VER) && defined(setjmp)
-#define setjmp_undefined_for_visual_studio
-#undef setjmp
+// VisualStudio defines setjmp as _setjmp
+#if defined(_MSC_VER) && defined(setjmp) && \
+ !defined(setjmp_undefined_for_msvc)
+# pragma push_macro("setjmp")
+# undef setjmp
+# define setjmp_undefined_for_msvc
#endif
SetJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::setjmp);
-#if defined(_MSC_VER) && defined(setjmp_undefined_for_visual_studio)
-// let's return it to _setjmp state in case anyone ever needs it after this
-// point under VisualStudio
-#define setjmp _setjmp
+#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
+ // let's return it to _setjmp state
+# pragma pop_macro("setjmp")
+# undef setjmp_undefined_for_msvc
#endif
LongJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::longjmp);
@@ -186,6 +189,7 @@ bool LowerInvoke::insertCheapEHSupport(Function &F) {
NewCall->takeName(II);
NewCall->setCallingConv(II->getCallingConv());
NewCall->setAttributes(II->getAttributes());
+ NewCall->setDebugLoc(II->getDebugLoc());
II->replaceAllUsesWith(NewCall);
// Insert an unconditional branch to the normal destination.
@@ -266,6 +270,7 @@ void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
NewCall->takeName(II);
NewCall->setCallingConv(II->getCallingConv());
NewCall->setAttributes(II->getAttributes());
+ NewCall->setDebugLoc(II->getDebugLoc());
II->replaceAllUsesWith(NewCall);
// Replace the invoke with an uncond branch.
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index 5530b47..914a439 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -33,7 +33,9 @@ namespace {
class LowerSwitch : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- LowerSwitch() : FunctionPass(ID) {}
+ LowerSwitch() : FunctionPass(ID) {
+ initializeLowerSwitchPass(*PassRegistry::getPassRegistry());
+ }
virtual bool runOnFunction(Function &F);
@@ -80,7 +82,7 @@ namespace {
char LowerSwitch::ID = 0;
INITIALIZE_PASS(LowerSwitch, "lowerswitch",
- "Lower SwitchInst's to branches", false, false);
+ "Lower SwitchInst's to branches", false, false)
// Publically exposed interface to pass...
char &llvm::LowerSwitchID = LowerSwitch::ID;
@@ -107,7 +109,8 @@ bool LowerSwitch::runOnFunction(Function &F) {
// operator<< - Used for debugging purposes.
//
static raw_ostream& operator<<(raw_ostream &O,
- const LowerSwitch::CaseVector &C) ATTRIBUTE_USED;
+ const LowerSwitch::CaseVector &C)
+ LLVM_ATTRIBUTE_USED;
static raw_ostream& operator<<(raw_ostream &O,
const LowerSwitch::CaseVector &C) {
O << "[";
diff --git a/contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp b/contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp
index 101645b..f4ca81a 100644
--- a/contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Mem2Reg.cpp
@@ -27,18 +27,17 @@ STATISTIC(NumPromoted, "Number of alloca's promoted");
namespace {
struct PromotePass : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- PromotePass() : FunctionPass(ID) {}
+ PromotePass() : FunctionPass(ID) {
+ initializePromotePassPass(*PassRegistry::getPassRegistry());
+ }
// runOnFunction - To run this pass, first we calculate the alloca
// instructions that are safe for promotion, then we promote each one.
//
virtual bool runOnFunction(Function &F);
- // getAnalysisUsage - We need dominance frontiers
- //
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTree>();
- AU.addRequired<DominanceFrontier>();
AU.setPreservesCFG();
// This is a cluster of orthogonal Transforms
AU.addPreserved<UnifyFunctionExitNodes>();
@@ -49,8 +48,11 @@ namespace {
} // end of anonymous namespace
char PromotePass::ID = 0;
-INITIALIZE_PASS(PromotePass, "mem2reg", "Promote Memory to Register",
- false, false);
+INITIALIZE_PASS_BEGIN(PromotePass, "mem2reg", "Promote Memory to Register",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(PromotePass, "mem2reg", "Promote Memory to Register",
+ false, false)
bool PromotePass::runOnFunction(Function &F) {
std::vector<AllocaInst*> Allocas;
@@ -60,7 +62,6 @@ bool PromotePass::runOnFunction(Function &F) {
bool Changed = false;
DominatorTree &DT = getAnalysis<DominatorTree>();
- DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
while (1) {
Allocas.clear();
@@ -74,7 +75,7 @@ bool PromotePass::runOnFunction(Function &F) {
if (Allocas.empty()) break;
- PromoteMemToReg(Allocas, DT, DF);
+ PromoteMemToReg(Allocas, DT);
NumPromoted += Allocas.size();
Changed = true;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index a4e3029..e6a4373 100644
--- a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -9,10 +9,19 @@
//
// This file promotes memory references to be register references. It promotes
// alloca instructions which only have loads and stores as uses. An alloca is
-// transformed by using dominator frontiers to place PHI nodes, then traversing
-// the function in depth-first order to rewrite loads and stores as appropriate.
-// This is just the standard SSA construction algorithm to construct "pruned"
-// SSA form.
+// transformed by using iterated dominator frontiers to place PHI nodes, then
+// traversing the function in depth-first order to rewrite loads and stores as
+// appropriate.
+//
+// The algorithm used here is based on:
+//
+// Sreedhar and Gao. A linear time algorithm for placing phi-nodes.
+// In Proceedings of the 22nd ACM SIGPLAN-SIGACT Symposium on Principles of
+// Programming Languages
+// POPL '95. ACM, New York, NY, 62-73.
+//
+// It has been modified to not explicitly use the DJ graph data structure and to
+// directly compute pruned SSA using per-variable liveness information.
//
//===----------------------------------------------------------------------===//
@@ -24,9 +33,10 @@
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Metadata.h"
+#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Analysis/Dominators.h"
-#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -34,6 +44,8 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CFG.h"
#include <algorithm>
+#include <map>
+#include <queue>
using namespace llvm;
STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block");
@@ -178,7 +190,6 @@ namespace {
///
std::vector<AllocaInst*> Allocas;
DominatorTree &DT;
- DominanceFrontier &DF;
DIFactory *DIF;
/// AST - An AliasSetTracker object to update. If null, don't update it.
@@ -187,7 +198,7 @@ namespace {
/// AllocaLookup - Reverse mapping of Allocas.
///
- std::map<AllocaInst*, unsigned> AllocaLookup;
+ DenseMap<AllocaInst*, unsigned> AllocaLookup;
/// NewPhiNodes - The PhiNodes we're adding.
///
@@ -216,12 +227,15 @@ namespace {
/// non-determinstic behavior.
DenseMap<BasicBlock*, unsigned> BBNumbers;
+ /// DomLevels - Maps DomTreeNodes to their level in the dominator tree.
+ DenseMap<DomTreeNode*, unsigned> DomLevels;
+
/// BBNumPreds - Lazily compute the number of predecessors a block has.
DenseMap<const BasicBlock*, unsigned> BBNumPreds;
public:
PromoteMem2Reg(const std::vector<AllocaInst*> &A, DominatorTree &dt,
- DominanceFrontier &df, AliasSetTracker *ast)
- : Allocas(A), DT(dt), DF(df), DIF(0), AST(ast) {}
+ AliasSetTracker *ast)
+ : Allocas(A), DT(dt), DIF(0), AST(ast) {}
~PromoteMem2Reg() {
delete DIF;
}
@@ -264,13 +278,12 @@ namespace {
void RenamePass(BasicBlock *BB, BasicBlock *Pred,
RenamePassData::ValVector &IncVals,
std::vector<RenamePassData> &Worklist);
- bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version,
- SmallPtrSet<PHINode*, 16> &InsertedPHINodes);
+ bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version);
};
struct AllocaInfo {
- std::vector<BasicBlock*> DefiningBlocks;
- std::vector<BasicBlock*> UsingBlocks;
+ SmallVector<BasicBlock*, 32> DefiningBlocks;
+ SmallVector<BasicBlock*, 32> UsingBlocks;
StoreInst *OnlyStore;
BasicBlock *OnlyBlock;
@@ -325,11 +338,19 @@ namespace {
DbgDeclare = FindAllocaDbgDeclare(AI);
}
};
+
+ typedef std::pair<DomTreeNode*, unsigned> DomTreeNodePair;
+
+ struct DomTreeNodeCompare {
+ bool operator()(const DomTreeNodePair &LHS, const DomTreeNodePair &RHS) {
+ return LHS.second < RHS.second;
+ }
+ };
} // end of anonymous namespace
void PromoteMem2Reg::run() {
- Function &F = *DF.getRoot()->getParent();
+ Function &F = *DT.getRoot()->getParent();
if (AST) PointerAllocaValues.resize(Allocas.size());
AllocaDbgDeclares.resize(Allocas.size());
@@ -422,7 +443,26 @@ void PromoteMem2Reg::run() {
continue;
}
}
-
+
+ // If we haven't computed dominator tree levels, do so now.
+ if (DomLevels.empty()) {
+ SmallVector<DomTreeNode*, 32> Worklist;
+
+ DomTreeNode *Root = DT.getRootNode();
+ DomLevels[Root] = 0;
+ Worklist.push_back(Root);
+
+ while (!Worklist.empty()) {
+ DomTreeNode *Node = Worklist.pop_back_val();
+ unsigned ChildLevel = DomLevels[Node] + 1;
+ for (DomTreeNode::iterator CI = Node->begin(), CE = Node->end();
+ CI != CE; ++CI) {
+ DomLevels[*CI] = ChildLevel;
+ Worklist.push_back(*CI);
+ }
+ }
+ }
+
// If we haven't computed a numbering for the BB's in the function, do so
// now.
if (BBNumbers.empty()) {
@@ -484,9 +524,8 @@ void PromoteMem2Reg::run() {
Instruction *A = Allocas[i];
// If there are any uses of the alloca instructions left, they must be in
- // sections of dead code that were not processed on the dominance frontier.
- // Just delete the users now.
- //
+ // unreachable basic blocks that were not processed by walking the dominator
+ // tree. Just delete the users now.
if (!A->use_empty())
A->replaceAllUsesWith(UndefValue::get(A->getType()));
if (AST) AST->deleteValue(A);
@@ -509,9 +548,9 @@ void PromoteMem2Reg::run() {
for (DenseMap<std::pair<BasicBlock*, unsigned>, PHINode*>::iterator I =
NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E;) {
PHINode *PN = I->second;
-
+
// If this PHI node merges one value and/or undefs, get the value.
- if (Value *V = PN->hasConstantValue(&DT)) {
+ if (Value *V = SimplifyInstruction(PN, 0, &DT)) {
if (AST && PN->getType()->isPointerTy())
AST->deleteValue(PN);
PN->replaceAllUsesWith(V);
@@ -663,7 +702,6 @@ ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
/// avoiding insertion of dead phi nodes.
void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum,
AllocaInfo &Info) {
-
// Unique the set of defining blocks for efficient lookup.
SmallPtrSet<BasicBlock*, 32> DefBlocks;
DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());
@@ -673,47 +711,78 @@ void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum,
SmallPtrSet<BasicBlock*, 32> LiveInBlocks;
ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);
- // Compute the locations where PhiNodes need to be inserted. Look at the
- // dominance frontier of EACH basic-block we have a write in.
- unsigned CurrentVersion = 0;
- SmallPtrSet<PHINode*, 16> InsertedPHINodes;
- std::vector<std::pair<unsigned, BasicBlock*> > DFBlocks;
- while (!Info.DefiningBlocks.empty()) {
- BasicBlock *BB = Info.DefiningBlocks.back();
- Info.DefiningBlocks.pop_back();
-
- // Look up the DF for this write, add it to defining blocks.
- DominanceFrontier::const_iterator it = DF.find(BB);
- if (it == DF.end()) continue;
-
- const DominanceFrontier::DomSetType &S = it->second;
-
- // In theory we don't need the indirection through the DFBlocks vector.
- // In practice, the order of calling QueuePhiNode would depend on the
- // (unspecified) ordering of basic blocks in the dominance frontier,
- // which would give PHI nodes non-determinstic subscripts. Fix this by
- // processing blocks in order of the occurance in the function.
- for (DominanceFrontier::DomSetType::const_iterator P = S.begin(),
- PE = S.end(); P != PE; ++P) {
- // If the frontier block is not in the live-in set for the alloca, don't
- // bother processing it.
- if (!LiveInBlocks.count(*P))
- continue;
-
- DFBlocks.push_back(std::make_pair(BBNumbers[*P], *P));
- }
-
- // Sort by which the block ordering in the function.
- if (DFBlocks.size() > 1)
- std::sort(DFBlocks.begin(), DFBlocks.end());
-
- for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i) {
- BasicBlock *BB = DFBlocks[i].second;
- if (QueuePhiNode(BB, AllocaNum, CurrentVersion, InsertedPHINodes))
- Info.DefiningBlocks.push_back(BB);
+ // Use a priority queue keyed on dominator tree level so that inserted nodes
+ // are handled from the bottom of the dominator tree upwards.
+ typedef std::priority_queue<DomTreeNodePair, SmallVector<DomTreeNodePair, 32>,
+ DomTreeNodeCompare> IDFPriorityQueue;
+ IDFPriorityQueue PQ;
+
+ for (SmallPtrSet<BasicBlock*, 32>::const_iterator I = DefBlocks.begin(),
+ E = DefBlocks.end(); I != E; ++I) {
+ if (DomTreeNode *Node = DT.getNode(*I))
+ PQ.push(std::make_pair(Node, DomLevels[Node]));
+ }
+
+ SmallVector<std::pair<unsigned, BasicBlock*>, 32> DFBlocks;
+ SmallPtrSet<DomTreeNode*, 32> Visited;
+ SmallVector<DomTreeNode*, 32> Worklist;
+ while (!PQ.empty()) {
+ DomTreeNodePair RootPair = PQ.top();
+ PQ.pop();
+ DomTreeNode *Root = RootPair.first;
+ unsigned RootLevel = RootPair.second;
+
+ // Walk all dominator tree children of Root, inspecting their CFG edges with
+ // targets elsewhere on the dominator tree. Only targets whose level is at
+ // most Root's level are added to the iterated dominance frontier of the
+ // definition set.
+
+ Worklist.clear();
+ Worklist.push_back(Root);
+
+ while (!Worklist.empty()) {
+ DomTreeNode *Node = Worklist.pop_back_val();
+ BasicBlock *BB = Node->getBlock();
+
+ for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
+ ++SI) {
+ DomTreeNode *SuccNode = DT.getNode(*SI);
+
+ // Quickly skip all CFG edges that are also dominator tree edges instead
+ // of catching them below.
+ if (SuccNode->getIDom() == Node)
+ continue;
+
+ unsigned SuccLevel = DomLevels[SuccNode];
+ if (SuccLevel > RootLevel)
+ continue;
+
+ if (!Visited.insert(SuccNode))
+ continue;
+
+ BasicBlock *SuccBB = SuccNode->getBlock();
+ if (!LiveInBlocks.count(SuccBB))
+ continue;
+
+ DFBlocks.push_back(std::make_pair(BBNumbers[SuccBB], SuccBB));
+ if (!DefBlocks.count(SuccBB))
+ PQ.push(std::make_pair(SuccNode, SuccLevel));
+ }
+
+ for (DomTreeNode::iterator CI = Node->begin(), CE = Node->end(); CI != CE;
+ ++CI) {
+ if (!Visited.count(*CI))
+ Worklist.push_back(*CI);
+ }
}
- DFBlocks.clear();
}
+
+ if (DFBlocks.size() > 1)
+ std::sort(DFBlocks.begin(), DFBlocks.end());
+
+ unsigned CurrentVersion = 0;
+ for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i)
+ QueuePhiNode(DFBlocks[i].second, AllocaNum, CurrentVersion);
}
/// RewriteSingleStoreAlloca - If there is only a single store to this value,
@@ -900,8 +969,7 @@ void PromoteMem2Reg::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
// Alloca returns true if there wasn't already a phi-node for that variable
//
bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
- unsigned &Version,
- SmallPtrSet<PHINode*, 16> &InsertedPHINodes) {
+ unsigned &Version) {
// Look up the basic-block in question.
PHINode *&PN = NewPhiNodes[std::make_pair(BB, AllocaNo)];
@@ -916,8 +984,6 @@ bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
++NumPHIInsert;
PhiToAllocaMap[PN] = AllocaNo;
PN->reserveOperandSpace(getNumPreds(BB));
-
- InsertedPHINodes.insert(PN);
if (AST && PN->getType()->isPointerTy())
AST->copyValue(PointerAllocaValues[AllocaNo], PN);
@@ -986,7 +1052,7 @@ NextIteration:
AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand());
if (!Src) continue;
- std::map<AllocaInst*, unsigned>::iterator AI = AllocaLookup.find(Src);
+ DenseMap<AllocaInst*, unsigned>::iterator AI = AllocaLookup.find(Src);
if (AI == AllocaLookup.end()) continue;
Value *V = IncomingVals[AI->second];
@@ -1002,7 +1068,7 @@ NextIteration:
AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand());
if (!Dest) continue;
- std::map<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
+ DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
if (ai == AllocaLookup.end())
continue;
@@ -1036,18 +1102,17 @@ NextIteration:
}
/// PromoteMemToReg - Promote the specified list of alloca instructions into
-/// scalar registers, inserting PHI nodes as appropriate. This function makes
-/// use of DominanceFrontier information. This function does not modify the CFG
-/// of the function at all. All allocas must be from the same function.
+/// scalar registers, inserting PHI nodes as appropriate. This function does
+/// not modify the CFG of the function at all. All allocas must be from the
+/// same function.
///
/// If AST is specified, the specified tracker is updated to reflect changes
/// made to the IR.
///
void llvm::PromoteMemToReg(const std::vector<AllocaInst*> &Allocas,
- DominatorTree &DT, DominanceFrontier &DF,
- AliasSetTracker *AST) {
+ DominatorTree &DT, AliasSetTracker *AST) {
// If there is nothing to do, bail out...
if (Allocas.empty()) return;
- PromoteMem2Reg(Allocas, DT, DF, AST).run();
+ PromoteMem2Reg(Allocas, DT, AST).run();
}
diff --git a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
index c855988..3896d98 100644
--- a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
@@ -14,6 +14,7 @@
#define DEBUG_TYPE "ssaupdater"
#include "llvm/Instructions.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CFG.h"
@@ -178,9 +179,9 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
// See if the PHI node can be merged to a single value. This can happen in
// loop cases when we get a PHI of itself and one other value.
- if (Value *ConstVal = InsertedPHI->hasConstantValue()) {
+ if (Value *V = SimplifyInstruction(InsertedPHI)) {
InsertedPHI->eraseFromParent();
- return ConstVal;
+ return V;
}
// If the client wants to know about all new instructions, tell it.
@@ -342,3 +343,169 @@ Value *SSAUpdater::GetValueAtEndOfBlockInternal(BasicBlock *BB) {
SSAUpdaterImpl<SSAUpdater> Impl(this, &AvailableVals, InsertedPHIs);
return Impl.GetValue(BB);
}
+
+//===----------------------------------------------------------------------===//
+// LoadAndStorePromoter Implementation
+//===----------------------------------------------------------------------===//
+
+LoadAndStorePromoter::
+LoadAndStorePromoter(const SmallVectorImpl<Instruction*> &Insts,
+ SSAUpdater &S, StringRef BaseName) : SSA(S) {
+ if (Insts.empty()) return;
+
+ Value *SomeVal;
+ if (LoadInst *LI = dyn_cast<LoadInst>(Insts[0]))
+ SomeVal = LI;
+ else
+ SomeVal = cast<StoreInst>(Insts[0])->getOperand(0);
+
+ if (BaseName.empty())
+ BaseName = SomeVal->getName();
+ SSA.Initialize(SomeVal->getType(), BaseName);
+}
+
+
+void LoadAndStorePromoter::
+run(const SmallVectorImpl<Instruction*> &Insts) const {
+
+ // First step: bucket up uses of the alloca by the block they occur in.
+ // This is important because we have to handle multiple defs/uses in a block
+ // ourselves: SSAUpdater is purely for cross-block references.
+ // FIXME: Want a TinyVector<Instruction*> since there is often 0/1 element.
+ DenseMap<BasicBlock*, std::vector<Instruction*> > UsesByBlock;
+
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
+ Instruction *User = Insts[i];
+ UsesByBlock[User->getParent()].push_back(User);
+ }
+
+ // Okay, now we can iterate over all the blocks in the function with uses,
+ // processing them. Keep track of which loads are loading a live-in value.
+ // Walk the uses in the use-list order to be determinstic.
+ SmallVector<LoadInst*, 32> LiveInLoads;
+ DenseMap<Value*, Value*> ReplacedLoads;
+
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
+ Instruction *User = Insts[i];
+ BasicBlock *BB = User->getParent();
+ std::vector<Instruction*> &BlockUses = UsesByBlock[BB];
+
+ // If this block has already been processed, ignore this repeat use.
+ if (BlockUses.empty()) continue;
+
+ // Okay, this is the first use in the block. If this block just has a
+ // single user in it, we can rewrite it trivially.
+ if (BlockUses.size() == 1) {
+ // If it is a store, it is a trivial def of the value in the block.
+ if (StoreInst *SI = dyn_cast<StoreInst>(User))
+ SSA.AddAvailableValue(BB, SI->getOperand(0));
+ else
+ // Otherwise it is a load, queue it to rewrite as a live-in load.
+ LiveInLoads.push_back(cast<LoadInst>(User));
+ BlockUses.clear();
+ continue;
+ }
+
+ // Otherwise, check to see if this block is all loads.
+ bool HasStore = false;
+ for (unsigned i = 0, e = BlockUses.size(); i != e; ++i) {
+ if (isa<StoreInst>(BlockUses[i])) {
+ HasStore = true;
+ break;
+ }
+ }
+
+ // If so, we can queue them all as live in loads. We don't have an
+ // efficient way to tell which on is first in the block and don't want to
+ // scan large blocks, so just add all loads as live ins.
+ if (!HasStore) {
+ for (unsigned i = 0, e = BlockUses.size(); i != e; ++i)
+ LiveInLoads.push_back(cast<LoadInst>(BlockUses[i]));
+ BlockUses.clear();
+ continue;
+ }
+
+ // Otherwise, we have mixed loads and stores (or just a bunch of stores).
+ // Since SSAUpdater is purely for cross-block values, we need to determine
+ // the order of these instructions in the block. If the first use in the
+ // block is a load, then it uses the live in value. The last store defines
+ // the live out value. We handle this by doing a linear scan of the block.
+ Value *StoredValue = 0;
+ for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
+ if (LoadInst *L = dyn_cast<LoadInst>(II)) {
+ // If this is a load from an unrelated pointer, ignore it.
+ if (!isInstInList(L, Insts)) continue;
+
+ // If we haven't seen a store yet, this is a live in use, otherwise
+ // use the stored value.
+ if (StoredValue) {
+ replaceLoadWithValue(L, StoredValue);
+ L->replaceAllUsesWith(StoredValue);
+ ReplacedLoads[L] = StoredValue;
+ } else {
+ LiveInLoads.push_back(L);
+ }
+ continue;
+ }
+
+ if (StoreInst *S = dyn_cast<StoreInst>(II)) {
+ // If this is a store to an unrelated pointer, ignore it.
+ if (!isInstInList(S, Insts)) continue;
+
+ // Remember that this is the active value in the block.
+ StoredValue = S->getOperand(0);
+ }
+ }
+
+ // The last stored value that happened is the live-out for the block.
+ assert(StoredValue && "Already checked that there is a store in block");
+ SSA.AddAvailableValue(BB, StoredValue);
+ BlockUses.clear();
+ }
+
+ // Okay, now we rewrite all loads that use live-in values in the loop,
+ // inserting PHI nodes as necessary.
+ for (unsigned i = 0, e = LiveInLoads.size(); i != e; ++i) {
+ LoadInst *ALoad = LiveInLoads[i];
+ Value *NewVal = SSA.GetValueInMiddleOfBlock(ALoad->getParent());
+ replaceLoadWithValue(ALoad, NewVal);
+
+ // Avoid assertions in unreachable code.
+ if (NewVal == ALoad) NewVal = UndefValue::get(NewVal->getType());
+ ALoad->replaceAllUsesWith(NewVal);
+ ReplacedLoads[ALoad] = NewVal;
+ }
+
+ // Allow the client to do stuff before we start nuking things.
+ doExtraRewritesBeforeFinalDeletion();
+
+ // Now that everything is rewritten, delete the old instructions from the
+ // function. They should all be dead now.
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
+ Instruction *User = Insts[i];
+
+ // If this is a load that still has uses, then the load must have been added
+ // as a live value in the SSAUpdate data structure for a block (e.g. because
+ // the loaded value was stored later). In this case, we need to recursively
+ // propagate the updates until we get to the real value.
+ if (!User->use_empty()) {
+ Value *NewVal = ReplacedLoads[User];
+ assert(NewVal && "not a replaced load?");
+
+ // Propagate down to the ultimate replacee. The intermediately loads
+ // could theoretically already have been deleted, so we don't want to
+ // dereference the Value*'s.
+ DenseMap<Value*, Value*>::iterator RLI = ReplacedLoads.find(NewVal);
+ while (RLI != ReplacedLoads.end()) {
+ NewVal = RLI->second;
+ RLI = ReplacedLoads.find(NewVal);
+ }
+
+ replaceLoadWithValue(cast<LoadInst>(User), NewVal);
+ User->replaceAllUsesWith(NewVal);
+ }
+
+ instructionDeleted(User);
+ User->eraseFromParent();
+ }
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 28d7afb..fb660db 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -19,33 +19,34 @@
#include "llvm/Type.h"
#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ConstantRange.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
-#include <functional>
#include <set>
#include <map>
using namespace llvm;
+static cl::opt<bool>
+DupRet("simplifycfg-dup-ret", cl::Hidden, cl::init(false),
+ cl::desc("Duplicate return instructions into unconditional branches"));
+
STATISTIC(NumSpeculations, "Number of speculative executed instructions");
namespace {
class SimplifyCFGOpt {
const TargetData *const TD;
- ConstantInt *GetConstantInt(Value *V);
- Value *GatherConstantSetEQs(Value *V, std::vector<ConstantInt*> &Values);
- Value *GatherConstantSetNEs(Value *V, std::vector<ConstantInt*> &Values);
- bool GatherValueComparisons(Instruction *Cond, Value *&CompVal,
- std::vector<ConstantInt*> &Values);
Value *isValueEqualityComparison(TerminatorInst *TI);
BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI,
std::vector<std::pair<ConstantInt*, BasicBlock*> > &Cases);
@@ -53,6 +54,14 @@ class SimplifyCFGOpt {
BasicBlock *Pred);
bool FoldValueComparisonIntoPredecessors(TerminatorInst *TI);
+ bool SimplifyReturn(ReturnInst *RI);
+ bool SimplifyUnwind(UnwindInst *UI);
+ bool SimplifyUnreachable(UnreachableInst *UI);
+ bool SimplifySwitch(SwitchInst *SI);
+ bool SimplifyIndirectBr(IndirectBrInst *IBI);
+ bool SimplifyUncondBranch(BranchInst *BI);
+ bool SimplifyCondBranch(BranchInst *BI);
+
public:
explicit SimplifyCFGOpt(const TargetData *td) : TD(td) {}
bool run(BasicBlock *BB);
@@ -91,8 +100,6 @@ static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) {
/// ExistPred, an existing predecessor of Succ.
static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
BasicBlock *ExistPred) {
- assert(std::find(succ_begin(ExistPred), succ_end(ExistPred), Succ) !=
- succ_end(ExistPred) && "ExistPred is not a predecessor of Succ!");
if (!isa<PHINode>(Succ->begin())) return; // Quick exit if nothing to do
PHINode *PN;
@@ -102,28 +109,29 @@ static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
}
-/// GetIfCondition - Given a basic block (BB) with two predecessors (and
-/// presumably PHI nodes in it), check to see if the merge at this block is due
+/// GetIfCondition - Given a basic block (BB) with two predecessors (and at
+/// least one PHI node in it), check to see if the merge at this block is due
/// to an "if condition". If so, return the boolean condition that determines
/// which entry into BB will be taken. Also, return by references the block
/// that will be entered from if the condition is true, and the block that will
/// be entered if the condition is false.
///
-///
-static Value *GetIfCondition(BasicBlock *BB,
- BasicBlock *&IfTrue, BasicBlock *&IfFalse) {
- assert(std::distance(pred_begin(BB), pred_end(BB)) == 2 &&
+/// This does no checking to see if the true/false blocks have large or unsavory
+/// instructions in them.
+static Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
+ BasicBlock *&IfFalse) {
+ PHINode *SomePHI = cast<PHINode>(BB->begin());
+ assert(SomePHI->getNumIncomingValues() == 2 &&
"Function can only handle blocks with 2 predecessors!");
- BasicBlock *Pred1 = *pred_begin(BB);
- BasicBlock *Pred2 = *++pred_begin(BB);
+ BasicBlock *Pred1 = SomePHI->getIncomingBlock(0);
+ BasicBlock *Pred2 = SomePHI->getIncomingBlock(1);
// We can only handle branches. Other control flow will be lowered to
// branches if possible anyway.
- if (!isa<BranchInst>(Pred1->getTerminator()) ||
- !isa<BranchInst>(Pred2->getTerminator()))
+ BranchInst *Pred1Br = dyn_cast<BranchInst>(Pred1->getTerminator());
+ BranchInst *Pred2Br = dyn_cast<BranchInst>(Pred2->getTerminator());
+ if (Pred1Br == 0 || Pred2Br == 0)
return 0;
- BranchInst *Pred1Br = cast<BranchInst>(Pred1->getTerminator());
- BranchInst *Pred2Br = cast<BranchInst>(Pred2->getTerminator());
// Eliminate code duplication by ensuring that Pred1Br is conditional if
// either are.
@@ -140,6 +148,12 @@ static Value *GetIfCondition(BasicBlock *BB,
}
if (Pred1Br->isConditional()) {
+ // The only thing we have to watch out for here is to make sure that Pred2
+ // doesn't have incoming edges from other blocks. If it does, the condition
+ // doesn't dominate BB.
+ if (Pred2->getSinglePredecessor() == 0)
+ return 0;
+
// If we found a conditional branch predecessor, make sure that it branches
// to BB and Pred2Br. If it doesn't, this isn't an "if statement".
if (Pred1Br->getSuccessor(0) == BB &&
@@ -156,39 +170,29 @@ static Value *GetIfCondition(BasicBlock *BB,
return 0;
}
- // The only thing we have to watch out for here is to make sure that Pred2
- // doesn't have incoming edges from other blocks. If it does, the condition
- // doesn't dominate BB.
- if (++pred_begin(Pred2) != pred_end(Pred2))
- return 0;
-
return Pred1Br->getCondition();
}
// Ok, if we got here, both predecessors end with an unconditional branch to
// BB. Don't panic! If both blocks only have a single (identical)
// predecessor, and THAT is a conditional branch, then we're all ok!
- if (pred_begin(Pred1) == pred_end(Pred1) ||
- ++pred_begin(Pred1) != pred_end(Pred1) ||
- pred_begin(Pred2) == pred_end(Pred2) ||
- ++pred_begin(Pred2) != pred_end(Pred2) ||
- *pred_begin(Pred1) != *pred_begin(Pred2))
+ BasicBlock *CommonPred = Pred1->getSinglePredecessor();
+ if (CommonPred == 0 || CommonPred != Pred2->getSinglePredecessor())
return 0;
// Otherwise, if this is a conditional branch, then we can use it!
- BasicBlock *CommonPred = *pred_begin(Pred1);
- if (BranchInst *BI = dyn_cast<BranchInst>(CommonPred->getTerminator())) {
- assert(BI->isConditional() && "Two successors but not conditional?");
- if (BI->getSuccessor(0) == Pred1) {
- IfTrue = Pred1;
- IfFalse = Pred2;
- } else {
- IfTrue = Pred2;
- IfFalse = Pred1;
- }
- return BI->getCondition();
+ BranchInst *BI = dyn_cast<BranchInst>(CommonPred->getTerminator());
+ if (BI == 0) return 0;
+
+ assert(BI->isConditional() && "Two successors but not conditional?");
+ if (BI->getSuccessor(0) == Pred1) {
+ IfTrue = Pred1;
+ IfFalse = Pred2;
+ } else {
+ IfTrue = Pred2;
+ IfFalse = Pred1;
}
- return 0;
+ return BI->getCondition();
}
/// DominatesMergePoint - If we have a merge point of an "if condition" as
@@ -201,7 +205,7 @@ static Value *GetIfCondition(BasicBlock *BB,
/// non-trapping. If both are true, the instruction is inserted into the set
/// and true is returned.
static bool DominatesMergePoint(Value *V, BasicBlock *BB,
- std::set<Instruction*> *AggressiveInsts) {
+ SmallPtrSet<Instruction*, 4> *AggressiveInsts) {
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
// Non-instructions all dominate instructions, but not all constantexprs
@@ -219,56 +223,55 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
// If this instruction is defined in a block that contains an unconditional
// branch to BB, then it must be in the 'conditional' part of the "if
- // statement".
- if (BranchInst *BI = dyn_cast<BranchInst>(PBB->getTerminator()))
- if (BI->isUnconditional() && BI->getSuccessor(0) == BB) {
- if (!AggressiveInsts) return false;
- // Okay, it looks like the instruction IS in the "condition". Check to
- // see if it's a cheap instruction to unconditionally compute, and if it
- // only uses stuff defined outside of the condition. If so, hoist it out.
- if (!I->isSafeToSpeculativelyExecute())
- return false;
+ // statement". If not, it definitely dominates the region.
+ BranchInst *BI = dyn_cast<BranchInst>(PBB->getTerminator());
+ if (BI == 0 || BI->isConditional() || BI->getSuccessor(0) != BB)
+ return true;
- switch (I->getOpcode()) {
- default: return false; // Cannot hoist this out safely.
- case Instruction::Load: {
- // We have to check to make sure there are no instructions before the
- // load in its basic block, as we are going to hoist the loop out to
- // its predecessor.
- BasicBlock::iterator IP = PBB->begin();
- while (isa<DbgInfoIntrinsic>(IP))
- IP++;
- if (IP != BasicBlock::iterator(I))
- return false;
- break;
- }
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:
- case Instruction::ICmp:
- break; // These are all cheap and non-trapping instructions.
- }
+ // If we aren't allowing aggressive promotion anymore, then don't consider
+ // instructions in the 'if region'.
+ if (AggressiveInsts == 0) return false;
+
+ // Okay, it looks like the instruction IS in the "condition". Check to
+ // see if it's a cheap instruction to unconditionally compute, and if it
+ // only uses stuff defined outside of the condition. If so, hoist it out.
+ if (!I->isSafeToSpeculativelyExecute())
+ return false;
- // Okay, we can only really hoist these out if their operands are not
- // defined in the conditional region.
- for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
- if (!DominatesMergePoint(*i, BB, 0))
- return false;
- // Okay, it's safe to do this! Remember this instruction.
- AggressiveInsts->insert(I);
- }
+ switch (I->getOpcode()) {
+ default: return false; // Cannot hoist this out safely.
+ case Instruction::Load:
+ // We have to check to make sure there are no instructions before the
+ // load in its basic block, as we are going to hoist the load out to its
+ // predecessor.
+ if (PBB->getFirstNonPHIOrDbg() != I)
+ return false;
+ break;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::ICmp:
+ break; // These are all cheap and non-trapping instructions.
+ }
+ // Okay, we can only really hoist these out if their operands are not
+ // defined in the conditional region.
+ for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
+ if (!DominatesMergePoint(*i, BB, 0))
+ return false;
+ // Okay, it's safe to do this! Remember this instruction.
+ AggressiveInsts->insert(I);
return true;
}
/// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr
/// and PointerNullValue. Return NULL if value is not a constant int.
-ConstantInt *SimplifyCFGOpt::GetConstantInt(Value *V) {
+static ConstantInt *GetConstantInt(Value *V, const TargetData *TD) {
// Normal constant int.
ConstantInt *CI = dyn_cast<ConstantInt>(V);
if (CI || !TD || !isa<Constant>(V) || !V->getType()->isPointerTy())
@@ -296,77 +299,94 @@ ConstantInt *SimplifyCFGOpt::GetConstantInt(Value *V) {
return 0;
}
-/// GatherConstantSetEQs - Given a potentially 'or'd together collection of
-/// icmp_eq instructions that compare a value against a constant, return the
-/// value being compared, and stick the constant into the Values vector.
-Value *SimplifyCFGOpt::
-GatherConstantSetEQs(Value *V, std::vector<ConstantInt*> &Values) {
- if (Instruction *Inst = dyn_cast<Instruction>(V)) {
- if (Inst->getOpcode() == Instruction::ICmp &&
- cast<ICmpInst>(Inst)->getPredicate() == ICmpInst::ICMP_EQ) {
- if (ConstantInt *C = GetConstantInt(Inst->getOperand(1))) {
- Values.push_back(C);
- return Inst->getOperand(0);
- } else if (ConstantInt *C = GetConstantInt(Inst->getOperand(0))) {
- Values.push_back(C);
- return Inst->getOperand(1);
+/// GatherConstantCompares - Given a potentially 'or'd or 'and'd together
+/// collection of icmp eq/ne instructions that compare a value against a
+/// constant, return the value being compared, and stick the constant into the
+/// Values vector.
+static Value *
+GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
+ const TargetData *TD, bool isEQ, unsigned &UsedICmps) {
+ Instruction *I = dyn_cast<Instruction>(V);
+ if (I == 0) return 0;
+
+ // If this is an icmp against a constant, handle this as one of the cases.
+ if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
+ if (ConstantInt *C = GetConstantInt(I->getOperand(1), TD)) {
+ if (ICI->getPredicate() == (isEQ ? ICmpInst::ICMP_EQ:ICmpInst::ICMP_NE)) {
+ UsedICmps++;
+ Vals.push_back(C);
+ return I->getOperand(0);
}
- } else if (Inst->getOpcode() == Instruction::Or) {
- if (Value *LHS = GatherConstantSetEQs(Inst->getOperand(0), Values))
- if (Value *RHS = GatherConstantSetEQs(Inst->getOperand(1), Values))
- if (LHS == RHS)
- return LHS;
+
+ // If we have "x ult 3" comparison, for example, then we can add 0,1,2 to
+ // the set.
+ ConstantRange Span =
+ ConstantRange::makeICmpRegion(ICI->getPredicate(), C->getValue());
+
+ // If this is an and/!= check then we want to optimize "x ugt 2" into
+ // x != 0 && x != 1.
+ if (!isEQ)
+ Span = Span.inverse();
+
+ // If there are a ton of values, we don't want to make a ginormous switch.
+ if (Span.getSetSize().ugt(8) || Span.isEmptySet() ||
+ // We don't handle wrapped sets yet.
+ Span.isWrappedSet())
+ return 0;
+
+ for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp)
+ Vals.push_back(ConstantInt::get(V->getContext(), Tmp));
+ UsedICmps++;
+ return I->getOperand(0);
}
+ return 0;
}
- return 0;
-}
+
+ // Otherwise, we can only handle an | or &, depending on isEQ.
+ if (I->getOpcode() != (isEQ ? Instruction::Or : Instruction::And))
+ return 0;
+
+ unsigned NumValsBeforeLHS = Vals.size();
+ unsigned UsedICmpsBeforeLHS = UsedICmps;
+ if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, TD,
+ isEQ, UsedICmps)) {
+ unsigned NumVals = Vals.size();
+ unsigned UsedICmpsBeforeRHS = UsedICmps;
+ if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD,
+ isEQ, UsedICmps)) {
+ if (LHS == RHS)
+ return LHS;
+ Vals.resize(NumVals);
+ UsedICmps = UsedICmpsBeforeRHS;
+ }
-/// GatherConstantSetNEs - Given a potentially 'and'd together collection of
-/// setne instructions that compare a value against a constant, return the value
-/// being compared, and stick the constant into the Values vector.
-Value *SimplifyCFGOpt::
-GatherConstantSetNEs(Value *V, std::vector<ConstantInt*> &Values) {
- if (Instruction *Inst = dyn_cast<Instruction>(V)) {
- if (Inst->getOpcode() == Instruction::ICmp &&
- cast<ICmpInst>(Inst)->getPredicate() == ICmpInst::ICMP_NE) {
- if (ConstantInt *C = GetConstantInt(Inst->getOperand(1))) {
- Values.push_back(C);
- return Inst->getOperand(0);
- } else if (ConstantInt *C = GetConstantInt(Inst->getOperand(0))) {
- Values.push_back(C);
- return Inst->getOperand(1);
- }
- } else if (Inst->getOpcode() == Instruction::And) {
- if (Value *LHS = GatherConstantSetNEs(Inst->getOperand(0), Values))
- if (Value *RHS = GatherConstantSetNEs(Inst->getOperand(1), Values))
- if (LHS == RHS)
- return LHS;
+ // The RHS of the or/and can't be folded in and we haven't used "Extra" yet,
+ // set it and return success.
+ if (Extra == 0 || Extra == I->getOperand(1)) {
+ Extra = I->getOperand(1);
+ return LHS;
}
+
+ Vals.resize(NumValsBeforeLHS);
+ UsedICmps = UsedICmpsBeforeLHS;
+ return 0;
}
- return 0;
-}
-
-/// GatherValueComparisons - If the specified Cond is an 'and' or 'or' of a
-/// bunch of comparisons of one value against constants, return the value and
-/// the constants being compared.
-bool SimplifyCFGOpt::GatherValueComparisons(Instruction *Cond, Value *&CompVal,
- std::vector<ConstantInt*> &Values) {
- if (Cond->getOpcode() == Instruction::Or) {
- CompVal = GatherConstantSetEQs(Cond, Values);
-
- // Return true to indicate that the condition is true if the CompVal is
- // equal to one of the constants.
- return true;
- } else if (Cond->getOpcode() == Instruction::And) {
- CompVal = GatherConstantSetNEs(Cond, Values);
-
- // Return false to indicate that the condition is false if the CompVal is
- // equal to one of the constants.
- return false;
+
+ // If the LHS can't be folded in, but Extra is available and RHS can, try to
+ // use LHS as Extra.
+ if (Extra == 0 || Extra == I->getOperand(0)) {
+ Value *OldExtra = Extra;
+ Extra = I->getOperand(0);
+ if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD,
+ isEQ, UsedICmps))
+ return RHS;
+ assert(Vals.size() == NumValsBeforeLHS);
+ Extra = OldExtra;
}
- return false;
+
+ return 0;
}
-
+
static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
Instruction* Cond = 0;
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
@@ -374,6 +394,8 @@ static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
} else if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
if (BI->isConditional())
Cond = dyn_cast<Instruction>(BI->getCondition());
+ } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(TI)) {
+ Cond = dyn_cast<Instruction>(IBI->getAddress());
}
TI->eraseFromParent();
@@ -395,7 +417,7 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
if ((ICI->getPredicate() == ICmpInst::ICMP_EQ ||
ICI->getPredicate() == ICmpInst::ICMP_NE) &&
- GetConstantInt(ICI->getOperand(1)))
+ GetConstantInt(ICI->getOperand(1), TD))
CV = ICI->getOperand(0);
// Unwrap any lossless ptrtoint cast.
@@ -420,7 +442,7 @@ GetValueEqualityComparisonCases(TerminatorInst *TI,
BranchInst *BI = cast<BranchInst>(TI);
ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());
- Cases.push_back(std::make_pair(GetConstantInt(ICI->getOperand(1)),
+ Cases.push_back(std::make_pair(GetConstantInt(ICI->getOperand(1), TD),
BI->getSuccessor(ICI->getPredicate() ==
ICmpInst::ICMP_NE)));
return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ);
@@ -459,8 +481,8 @@ ValuesOverlap(std::vector<std::pair<ConstantInt*, BasicBlock*> > &C1,
}
// Otherwise, just sort both lists and compare element by element.
- std::sort(V1->begin(), V1->end());
- std::sort(V2->begin(), V2->end());
+ array_pod_sort(V1->begin(), V1->end());
+ array_pod_sort(V2->begin(), V2->end());
unsigned i1 = 0, i2 = 0, e1 = V1->size(), e2 = V2->size();
while (i1 != e1 && i2 != e2) {
if ((*V1)[i1].first == (*V2)[i2].first)
@@ -506,90 +528,87 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
// If we are here, we know that the value is none of those cases listed in
// PredCases. If there are any cases in ThisCases that are in PredCases, we
// can simplify TI.
- if (ValuesOverlap(PredCases, ThisCases)) {
- if (isa<BranchInst>(TI)) {
- // Okay, one of the successors of this condbr is dead. Convert it to a
- // uncond br.
- assert(ThisCases.size() == 1 && "Branch can only have one case!");
- // Insert the new branch.
- Instruction *NI = BranchInst::Create(ThisDef, TI);
- (void) NI;
-
- // Remove PHI node entries for the dead edge.
- ThisCases[0].second->removePredecessor(TI->getParent());
-
- DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
- << "Through successor TI: " << *TI << "Leaving: " << *NI << "\n");
-
- EraseTerminatorInstAndDCECond(TI);
- return true;
-
- } else {
- SwitchInst *SI = cast<SwitchInst>(TI);
- // Okay, TI has cases that are statically dead, prune them away.
- SmallPtrSet<Constant*, 16> DeadCases;
- for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
- DeadCases.insert(PredCases[i].first);
+ if (!ValuesOverlap(PredCases, ThisCases))
+ return false;
+
+ if (isa<BranchInst>(TI)) {
+ // Okay, one of the successors of this condbr is dead. Convert it to a
+ // uncond br.
+ assert(ThisCases.size() == 1 && "Branch can only have one case!");
+ // Insert the new branch.
+ Instruction *NI = BranchInst::Create(ThisDef, TI);
+ (void) NI;
- DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
- << "Through successor TI: " << *TI);
+ // Remove PHI node entries for the dead edge.
+ ThisCases[0].second->removePredecessor(TI->getParent());
- for (unsigned i = SI->getNumCases()-1; i != 0; --i)
- if (DeadCases.count(SI->getCaseValue(i))) {
- SI->getSuccessor(i)->removePredecessor(TI->getParent());
- SI->removeCase(i);
- }
+ DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
+ << "Through successor TI: " << *TI << "Leaving: " << *NI << "\n");
- DEBUG(dbgs() << "Leaving: " << *TI << "\n");
- return true;
- }
+ EraseTerminatorInstAndDCECond(TI);
+ return true;
}
-
- } else {
- // Otherwise, TI's block must correspond to some matched value. Find out
- // which value (or set of values) this is.
- ConstantInt *TIV = 0;
- BasicBlock *TIBB = TI->getParent();
+
+ SwitchInst *SI = cast<SwitchInst>(TI);
+ // Okay, TI has cases that are statically dead, prune them away.
+ SmallPtrSet<Constant*, 16> DeadCases;
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
- if (PredCases[i].second == TIBB) {
- if (TIV == 0)
- TIV = PredCases[i].first;
- else
- return false; // Cannot handle multiple values coming to this block.
- }
- assert(TIV && "No edge from pred to succ?");
-
- // Okay, we found the one constant that our value can be if we get into TI's
- // BB. Find out which successor will unconditionally be branched to.
- BasicBlock *TheRealDest = 0;
- for (unsigned i = 0, e = ThisCases.size(); i != e; ++i)
- if (ThisCases[i].first == TIV) {
- TheRealDest = ThisCases[i].second;
- break;
+ DeadCases.insert(PredCases[i].first);
+
+ DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
+ << "Through successor TI: " << *TI);
+
+ for (unsigned i = SI->getNumCases()-1; i != 0; --i)
+ if (DeadCases.count(SI->getCaseValue(i))) {
+ SI->getSuccessor(i)->removePredecessor(TI->getParent());
+ SI->removeCase(i);
}
- // If not handled by any explicit cases, it is handled by the default case.
- if (TheRealDest == 0) TheRealDest = ThisDef;
+ DEBUG(dbgs() << "Leaving: " << *TI << "\n");
+ return true;
+ }
+
+ // Otherwise, TI's block must correspond to some matched value. Find out
+ // which value (or set of values) this is.
+ ConstantInt *TIV = 0;
+ BasicBlock *TIBB = TI->getParent();
+ for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
+ if (PredCases[i].second == TIBB) {
+ if (TIV != 0)
+ return false; // Cannot handle multiple values coming to this block.
+ TIV = PredCases[i].first;
+ }
+ assert(TIV && "No edge from pred to succ?");
+
+ // Okay, we found the one constant that our value can be if we get into TI's
+ // BB. Find out which successor will unconditionally be branched to.
+ BasicBlock *TheRealDest = 0;
+ for (unsigned i = 0, e = ThisCases.size(); i != e; ++i)
+ if (ThisCases[i].first == TIV) {
+ TheRealDest = ThisCases[i].second;
+ break;
+ }
- // Remove PHI node entries for dead edges.
- BasicBlock *CheckEdge = TheRealDest;
- for (succ_iterator SI = succ_begin(TIBB), e = succ_end(TIBB); SI != e; ++SI)
- if (*SI != CheckEdge)
- (*SI)->removePredecessor(TIBB);
- else
- CheckEdge = 0;
+ // If not handled by any explicit cases, it is handled by the default case.
+ if (TheRealDest == 0) TheRealDest = ThisDef;
- // Insert the new branch.
- Instruction *NI = BranchInst::Create(TheRealDest, TI);
- (void) NI;
+ // Remove PHI node entries for dead edges.
+ BasicBlock *CheckEdge = TheRealDest;
+ for (succ_iterator SI = succ_begin(TIBB), e = succ_end(TIBB); SI != e; ++SI)
+ if (*SI != CheckEdge)
+ (*SI)->removePredecessor(TIBB);
+ else
+ CheckEdge = 0;
- DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
- << "Through successor TI: " << *TI << "Leaving: " << *NI << "\n");
+ // Insert the new branch.
+ Instruction *NI = BranchInst::Create(TheRealDest, TI);
+ (void) NI;
- EraseTerminatorInstAndDCECond(TI);
- return true;
- }
- return false;
+ DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
+ << "Through successor TI: " << *TI << "Leaving: " << *NI << "\n");
+
+ EraseTerminatorInstAndDCECond(TI);
+ return true;
}
namespace {
@@ -603,6 +622,16 @@ namespace {
};
}
+static int ConstantIntSortPredicate(const void *P1, const void *P2) {
+ const ConstantInt *LHS = *(const ConstantInt**)P1;
+ const ConstantInt *RHS = *(const ConstantInt**)P2;
+ if (LHS->getValue().ult(RHS->getValue()))
+ return 1;
+ if (LHS->getValue() == RHS->getValue())
+ return 0;
+ return -1;
+}
+
/// FoldValueComparisonIntoPredecessors - The specified terminator is a value
/// equality comparison instruction (either a switch or a branch on "X == c").
/// See if any of the predecessors of the terminator block are value comparisons
@@ -798,7 +827,7 @@ static bool HoistThenElseCodeToIf(BranchInst *BI) {
if (!I2->use_empty())
I2->replaceAllUsesWith(I1);
I1->intersectOptionalDataWith(I2);
- BB2->getInstList().erase(I2);
+ I2->eraseFromParent();
I1 = BB1_Itr++;
while (isa<DbgInfoIntrinsic>(I1))
@@ -836,18 +865,18 @@ HoistTerminator:
(PN = dyn_cast<PHINode>(BBI)); ++BBI) {
Value *BB1V = PN->getIncomingValueForBlock(BB1);
Value *BB2V = PN->getIncomingValueForBlock(BB2);
- if (BB1V != BB2V) {
- // These values do not agree. Insert a select instruction before NT
- // that determines the right value.
- SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)];
- if (SI == 0)
- SI = SelectInst::Create(BI->getCondition(), BB1V, BB2V,
- BB1V->getName()+"."+BB2V->getName(), NT);
- // Make the PHI node use the select for all incoming values for BB1/BB2
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (PN->getIncomingBlock(i) == BB1 || PN->getIncomingBlock(i) == BB2)
- PN->setIncomingValue(i, SI);
- }
+ if (BB1V == BB2V) continue;
+
+ // These values do not agree. Insert a select instruction before NT
+ // that determines the right value.
+ SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)];
+ if (SI == 0)
+ SI = SelectInst::Create(BI->getCondition(), BB1V, BB2V,
+ BB1V->getName()+"."+BB2V->getName(), NT);
+ // Make the PHI node use the select for all incoming values for BB1/BB2
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ if (PN->getIncomingBlock(i) == BB1 || PN->getIncomingBlock(i) == BB2)
+ PN->setIncomingValue(i, SI);
}
}
@@ -872,21 +901,19 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
BBI != BBE; ++BBI) {
Instruction *I = BBI;
// Skip debug info.
- if (isa<DbgInfoIntrinsic>(I)) continue;
- if (I == Term) break;
+ if (isa<DbgInfoIntrinsic>(I)) continue;
+ if (I == Term) break;
- if (!HInst)
- HInst = I;
- else
+ if (HInst)
return false;
+ HInst = I;
}
if (!HInst)
return false;
// Be conservative for now. FP select instruction can often be expensive.
Value *BrCond = BI->getCondition();
- if (isa<Instruction>(BrCond) &&
- cast<Instruction>(BrCond)->getOpcode() == Instruction::FCmp)
+ if (isa<FCmpInst>(BrCond))
return false;
// If BB1 is actually on the false edge of the conditional branch, remember
@@ -990,12 +1017,12 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
for(Value::use_iterator UI = BrCond->use_begin(), UE = BrCond->use_end();
UI != UE; ++UI) {
Instruction *Use = cast<Instruction>(*UI);
- if (BB1Insns.count(Use)) {
- // If BrCond uses the instruction that place it just before
- // branch instruction.
- InsertPos = BI;
- break;
- }
+ if (!BB1Insns.count(Use)) continue;
+
+ // If BrCond uses the instruction that place it just before
+ // branch instruction.
+ InsertPos = BI;
+ break;
}
} else
InsertPos = BI;
@@ -1016,8 +1043,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
for (unsigned i = 0, e = PHIUses.size(); i != e; ++i) {
PHINode *PN = PHIUses[i];
for (unsigned j = 0, ee = PN->getNumIncomingValues(); j != ee; ++j)
- if (PN->getIncomingBlock(j) == BB1 ||
- PN->getIncomingBlock(j) == BIParent)
+ if (PN->getIncomingBlock(j) == BB1 || PN->getIncomingBlock(j) == BIParent)
PN->setIncomingValue(j, SI);
}
@@ -1055,7 +1081,7 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
/// that is defined in the same block as the branch and if any PHI entries are
/// constants, thread edges corresponding to that entry to be branches to their
/// ultimate destination.
-static bool FoldCondBranchOnPHI(BranchInst *BI) {
+static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) {
BasicBlock *BB = BI->getParent();
PHINode *PN = dyn_cast<PHINode>(BI->getCondition());
// NOTE: we currently cannot transform this case if the PHI node is used
@@ -1075,78 +1101,73 @@ static bool FoldCondBranchOnPHI(BranchInst *BI) {
// Okay, this is a simple enough basic block. See if any phi values are
// constants.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
- ConstantInt *CB;
- if ((CB = dyn_cast<ConstantInt>(PN->getIncomingValue(i))) &&
- CB->getType()->isIntegerTy(1)) {
- // Okay, we now know that all edges from PredBB should be revectored to
- // branch to RealDest.
- BasicBlock *PredBB = PN->getIncomingBlock(i);
- BasicBlock *RealDest = BI->getSuccessor(!CB->getZExtValue());
+ ConstantInt *CB = dyn_cast<ConstantInt>(PN->getIncomingValue(i));
+ if (CB == 0 || !CB->getType()->isIntegerTy(1)) continue;
+
+ // Okay, we now know that all edges from PredBB should be revectored to
+ // branch to RealDest.
+ BasicBlock *PredBB = PN->getIncomingBlock(i);
+ BasicBlock *RealDest = BI->getSuccessor(!CB->getZExtValue());
+
+ if (RealDest == BB) continue; // Skip self loops.
+
+ // The dest block might have PHI nodes, other predecessors and other
+ // difficult cases. Instead of being smart about this, just insert a new
+ // block that jumps to the destination block, effectively splitting
+ // the edge we are about to create.
+ BasicBlock *EdgeBB = BasicBlock::Create(BB->getContext(),
+ RealDest->getName()+".critedge",
+ RealDest->getParent(), RealDest);
+ BranchInst::Create(RealDest, EdgeBB);
+
+ // Update PHI nodes.
+ AddPredecessorToBlock(RealDest, EdgeBB, BB);
+
+ // BB may have instructions that are being threaded over. Clone these
+ // instructions into EdgeBB. We know that there will be no uses of the
+ // cloned instructions outside of EdgeBB.
+ BasicBlock::iterator InsertPt = EdgeBB->begin();
+ DenseMap<Value*, Value*> TranslateMap; // Track translated values.
+ for (BasicBlock::iterator BBI = BB->begin(); &*BBI != BI; ++BBI) {
+ if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
+ TranslateMap[PN] = PN->getIncomingValueForBlock(PredBB);
+ continue;
+ }
+ // Clone the instruction.
+ Instruction *N = BBI->clone();
+ if (BBI->hasName()) N->setName(BBI->getName()+".c");
- if (RealDest == BB) continue; // Skip self loops.
+ // Update operands due to translation.
+ for (User::op_iterator i = N->op_begin(), e = N->op_end();
+ i != e; ++i) {
+ DenseMap<Value*, Value*>::iterator PI = TranslateMap.find(*i);
+ if (PI != TranslateMap.end())
+ *i = PI->second;
+ }
- // The dest block might have PHI nodes, other predecessors and other
- // difficult cases. Instead of being smart about this, just insert a new
- // block that jumps to the destination block, effectively splitting
- // the edge we are about to create.
- BasicBlock *EdgeBB = BasicBlock::Create(BB->getContext(),
- RealDest->getName()+".critedge",
- RealDest->getParent(), RealDest);
- BranchInst::Create(RealDest, EdgeBB);
- PHINode *PN;
- for (BasicBlock::iterator BBI = RealDest->begin();
- (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
- Value *V = PN->getIncomingValueForBlock(BB);
- PN->addIncoming(V, EdgeBB);
+ // Check for trivial simplification.
+ if (Value *V = SimplifyInstruction(N, TD)) {
+ TranslateMap[BBI] = V;
+ delete N; // Instruction folded away, don't need actual inst
+ } else {
+ // Insert the new instruction into its new home.
+ EdgeBB->getInstList().insert(InsertPt, N);
+ if (!BBI->use_empty())
+ TranslateMap[BBI] = N;
}
+ }
- // BB may have instructions that are being threaded over. Clone these
- // instructions into EdgeBB. We know that there will be no uses of the
- // cloned instructions outside of EdgeBB.
- BasicBlock::iterator InsertPt = EdgeBB->begin();
- std::map<Value*, Value*> TranslateMap; // Track translated values.
- for (BasicBlock::iterator BBI = BB->begin(); &*BBI != BI; ++BBI) {
- if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
- TranslateMap[PN] = PN->getIncomingValueForBlock(PredBB);
- } else {
- // Clone the instruction.
- Instruction *N = BBI->clone();
- if (BBI->hasName()) N->setName(BBI->getName()+".c");
-
- // Update operands due to translation.
- for (User::op_iterator i = N->op_begin(), e = N->op_end();
- i != e; ++i) {
- std::map<Value*, Value*>::iterator PI =
- TranslateMap.find(*i);
- if (PI != TranslateMap.end())
- *i = PI->second;
- }
-
- // Check for trivial simplification.
- if (Constant *C = ConstantFoldInstruction(N)) {
- TranslateMap[BBI] = C;
- delete N; // Constant folded away, don't need actual inst
- } else {
- // Insert the new instruction into its new home.
- EdgeBB->getInstList().insert(InsertPt, N);
- if (!BBI->use_empty())
- TranslateMap[BBI] = N;
- }
- }
+ // Loop over all of the edges from PredBB to BB, changing them to branch
+ // to EdgeBB instead.
+ TerminatorInst *PredBBTI = PredBB->getTerminator();
+ for (unsigned i = 0, e = PredBBTI->getNumSuccessors(); i != e; ++i)
+ if (PredBBTI->getSuccessor(i) == BB) {
+ BB->removePredecessor(PredBB);
+ PredBBTI->setSuccessor(i, EdgeBB);
}
-
- // Loop over all of the edges from PredBB to BB, changing them to branch
- // to EdgeBB instead.
- TerminatorInst *PredBBTI = PredBB->getTerminator();
- for (unsigned i = 0, e = PredBBTI->getNumSuccessors(); i != e; ++i)
- if (PredBBTI->getSuccessor(i) == BB) {
- BB->removePredecessor(PredBB);
- PredBBTI->setSuccessor(i, EdgeBB);
- }
-
- // Recurse, simplifying any other constants.
- return FoldCondBranchOnPHI(BI) | true;
- }
+
+ // Recurse, simplifying any other constants.
+ return FoldCondBranchOnPHI(BI, TD) | true;
}
return false;
@@ -1154,18 +1175,20 @@ static bool FoldCondBranchOnPHI(BranchInst *BI) {
/// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry
/// PHI node, see if we can eliminate it.
-static bool FoldTwoEntryPHINode(PHINode *PN) {
+static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
// Ok, this is a two entry PHI node. Check to see if this is a simple "if
// statement", which has a very simple dominance structure. Basically, we
// are trying to find the condition that is being branched on, which
// subsequently causes this merge to happen. We really want control
// dependence information for this check, but simplifycfg can't keep it up
// to date, and this catches most of the cases we care about anyway.
- //
BasicBlock *BB = PN->getParent();
BasicBlock *IfTrue, *IfFalse;
Value *IfCond = GetIfCondition(BB, IfTrue, IfFalse);
- if (!IfCond) return false;
+ if (!IfCond ||
+ // Don't bother if the branch will be constant folded trivially.
+ isa<ConstantInt>(IfCond))
+ return false;
// Okay, we found that we can merge this two-entry phi node into a select.
// Doing so would require us to fold *all* two entry phi nodes in this block.
@@ -1177,42 +1200,49 @@ static bool FoldTwoEntryPHINode(PHINode *PN) {
if (NumPhis > 2)
return false;
- DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond << " T: "
- << IfTrue->getName() << " F: " << IfFalse->getName() << "\n");
-
// Loop over the PHI's seeing if we can promote them all to select
// instructions. While we are at it, keep track of the instructions
// that need to be moved to the dominating block.
- std::set<Instruction*> AggressiveInsts;
-
- BasicBlock::iterator AfterPHIIt = BB->begin();
- while (isa<PHINode>(AfterPHIIt)) {
- PHINode *PN = cast<PHINode>(AfterPHIIt++);
- if (PN->getIncomingValue(0) == PN->getIncomingValue(1)) {
- if (PN->getIncomingValue(0) != PN)
- PN->replaceAllUsesWith(PN->getIncomingValue(0));
- else
- PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
- } else if (!DominatesMergePoint(PN->getIncomingValue(0), BB,
- &AggressiveInsts) ||
- !DominatesMergePoint(PN->getIncomingValue(1), BB,
- &AggressiveInsts)) {
- return false;
+ SmallPtrSet<Instruction*, 4> AggressiveInsts;
+
+ for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) {
+ PHINode *PN = cast<PHINode>(II++);
+ if (Value *V = SimplifyInstruction(PN, TD)) {
+ PN->replaceAllUsesWith(V);
+ PN->eraseFromParent();
+ continue;
}
+
+ if (!DominatesMergePoint(PN->getIncomingValue(0), BB, &AggressiveInsts) ||
+ !DominatesMergePoint(PN->getIncomingValue(1), BB, &AggressiveInsts))
+ return false;
}
+ // If we folded the the first phi, PN dangles at this point. Refresh it. If
+ // we ran out of PHIs then we simplified them all.
+ PN = dyn_cast<PHINode>(BB->begin());
+ if (PN == 0) return true;
+
+ // Don't fold i1 branches on PHIs which contain binary operators. These can
+ // often be turned into switches and other things.
+ if (PN->getType()->isIntegerTy(1) &&
+ (isa<BinaryOperator>(PN->getIncomingValue(0)) ||
+ isa<BinaryOperator>(PN->getIncomingValue(1)) ||
+ isa<BinaryOperator>(IfCond)))
+ return false;
+
// If we all PHI nodes are promotable, check to make sure that all
// instructions in the predecessor blocks can be promoted as well. If
// not, we won't be able to get rid of the control flow, so it's not
// worth promoting to select instructions.
- BasicBlock *DomBlock = 0, *IfBlock1 = 0, *IfBlock2 = 0;
- PN = cast<PHINode>(BB->begin());
- BasicBlock *Pred = PN->getIncomingBlock(0);
- if (cast<BranchInst>(Pred->getTerminator())->isUnconditional()) {
- IfBlock1 = Pred;
- DomBlock = *pred_begin(Pred);
- for (BasicBlock::iterator I = Pred->begin();
- !isa<TerminatorInst>(I); ++I)
+ BasicBlock *DomBlock = 0;
+ BasicBlock *IfBlock1 = PN->getIncomingBlock(0);
+ BasicBlock *IfBlock2 = PN->getIncomingBlock(1);
+ if (cast<BranchInst>(IfBlock1->getTerminator())->isConditional()) {
+ IfBlock1 = 0;
+ } else {
+ DomBlock = *pred_begin(IfBlock1);
+ for (BasicBlock::iterator I = IfBlock1->begin();!isa<TerminatorInst>(I);++I)
if (!AggressiveInsts.count(I) && !isa<DbgInfoIntrinsic>(I)) {
// This is not an aggressive instruction that we can promote.
// Because of this, we won't be able to get rid of the control
@@ -1221,12 +1251,11 @@ static bool FoldTwoEntryPHINode(PHINode *PN) {
}
}
- Pred = PN->getIncomingBlock(1);
- if (cast<BranchInst>(Pred->getTerminator())->isUnconditional()) {
- IfBlock2 = Pred;
- DomBlock = *pred_begin(Pred);
- for (BasicBlock::iterator I = Pred->begin();
- !isa<TerminatorInst>(I); ++I)
+ if (cast<BranchInst>(IfBlock2->getTerminator())->isConditional()) {
+ IfBlock2 = 0;
+ } else {
+ DomBlock = *pred_begin(IfBlock2);
+ for (BasicBlock::iterator I = IfBlock2->begin();!isa<TerminatorInst>(I);++I)
if (!AggressiveInsts.count(I) && !isa<DbgInfoIntrinsic>(I)) {
// This is not an aggressive instruction that we can promote.
// Because of this, we won't be able to get rid of the control
@@ -1234,56 +1263,45 @@ static bool FoldTwoEntryPHINode(PHINode *PN) {
return false;
}
}
+
+ DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond << " T: "
+ << IfTrue->getName() << " F: " << IfFalse->getName() << "\n");
// If we can still promote the PHI nodes after this gauntlet of tests,
// do all of the PHI's now.
-
+ Instruction *InsertPt = DomBlock->getTerminator();
+
// Move all 'aggressive' instructions, which are defined in the
// conditional parts of the if's up to the dominating block.
- if (IfBlock1) {
- DomBlock->getInstList().splice(DomBlock->getTerminator(),
- IfBlock1->getInstList(),
- IfBlock1->begin(),
+ if (IfBlock1)
+ DomBlock->getInstList().splice(InsertPt,
+ IfBlock1->getInstList(), IfBlock1->begin(),
IfBlock1->getTerminator());
- }
- if (IfBlock2) {
- DomBlock->getInstList().splice(DomBlock->getTerminator(),
- IfBlock2->getInstList(),
- IfBlock2->begin(),
+ if (IfBlock2)
+ DomBlock->getInstList().splice(InsertPt,
+ IfBlock2->getInstList(), IfBlock2->begin(),
IfBlock2->getTerminator());
- }
while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) {
// Change the PHI node into a select instruction.
- Value *TrueVal =
- PN->getIncomingValue(PN->getIncomingBlock(0) == IfFalse);
- Value *FalseVal =
- PN->getIncomingValue(PN->getIncomingBlock(0) == IfTrue);
+ Value *TrueVal = PN->getIncomingValue(PN->getIncomingBlock(0) == IfFalse);
+ Value *FalseVal = PN->getIncomingValue(PN->getIncomingBlock(0) == IfTrue);
- Value *NV = SelectInst::Create(IfCond, TrueVal, FalseVal, "", AfterPHIIt);
+ Value *NV = SelectInst::Create(IfCond, TrueVal, FalseVal, "", InsertPt);
PN->replaceAllUsesWith(NV);
NV->takeName(PN);
-
- BB->getInstList().erase(PN);
+ PN->eraseFromParent();
}
+
+ // At this point, IfBlock1 and IfBlock2 are both empty, so our if statement
+ // has been flattened. Change DomBlock to jump directly to our new block to
+ // avoid other simplifycfg's kicking in on the diamond.
+ TerminatorInst *OldTI = DomBlock->getTerminator();
+ BranchInst::Create(BB, OldTI);
+ OldTI->eraseFromParent();
return true;
}
-/// isTerminatorFirstRelevantInsn - Return true if Term is very first
-/// instruction ignoring Phi nodes and dbg intrinsics.
-static bool isTerminatorFirstRelevantInsn(BasicBlock *BB, Instruction *Term) {
- BasicBlock::iterator BBI = Term;
- while (BBI != BB->begin()) {
- --BBI;
- if (!isa<DbgInfoIntrinsic>(BBI))
- break;
- }
-
- if (isa<PHINode>(BBI) || &*BBI == Term || isa<DbgInfoIntrinsic>(BBI))
- return true;
- return false;
-}
-
/// SimplifyCondBranchToTwoReturns - If we found a conditional branch that goes
/// to two returning blocks, try to merge them together into one return,
/// introducing a select if the return values disagree.
@@ -1297,9 +1315,9 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI) {
// Check to ensure both blocks are empty (just a return) or optionally empty
// with PHI nodes. If there are other instructions, merging would cause extra
// computation on one path or the other.
- if (!isTerminatorFirstRelevantInsn(TrueSucc, TrueRet))
+ if (!TrueSucc->getFirstNonPHIOrDbg()->isTerminator())
return false;
- if (!isTerminatorFirstRelevantInsn(FalseSucc, FalseRet))
+ if (!FalseSucc->getFirstNonPHIOrDbg()->isTerminator())
return false;
// Okay, we found a branch that is going to two return nodes. If
@@ -1386,7 +1404,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// must be at the front of the block.
BasicBlock::iterator FrontIt = BB->front();
// Ignore dbg intrinsics.
- while(isa<DbgInfoIntrinsic>(FrontIt))
+ while (isa<DbgInfoIntrinsic>(FrontIt))
++FrontIt;
// Allow a single instruction to be hoisted in addition to the compare
@@ -1470,7 +1488,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
UsedValues.erase(Pair.first);
if (UsedValues.empty()) break;
- if (Instruction* I = dyn_cast<Instruction>(Pair.first)) {
+ if (Instruction *I = dyn_cast<Instruction>(Pair.first)) {
for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
OI != OE; ++OI)
Worklist.push_back(std::make_pair(OI->get(), Pair.second+1));
@@ -1498,9 +1516,16 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// If we need to invert the condition in the pred block to match, do so now.
if (InvertPredCond) {
- Value *NewCond =
- BinaryOperator::CreateNot(PBI->getCondition(),
+ Value *NewCond = PBI->getCondition();
+
+ if (NewCond->hasOneUse() && isa<CmpInst>(NewCond)) {
+ CmpInst *CI = cast<CmpInst>(NewCond);
+ CI->setPredicate(CI->getInversePredicate());
+ } else {
+ NewCond = BinaryOperator::CreateNot(NewCond,
PBI->getCondition()->getName()+".not", PBI);
+ }
+
PBI->setCondition(NewCond);
BasicBlock *OldTrue = PBI->getSuccessor(0);
BasicBlock *OldFalse = PBI->getSuccessor(1);
@@ -1686,17 +1711,13 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
// OtherDest may have phi nodes. If so, add an entry from PBI's
// block that are identical to the entries for BI's block.
- PHINode *PN;
- for (BasicBlock::iterator II = OtherDest->begin();
- (PN = dyn_cast<PHINode>(II)); ++II) {
- Value *V = PN->getIncomingValueForBlock(BB);
- PN->addIncoming(V, PBI->getParent());
- }
+ AddPredecessorToBlock(OtherDest, PBI->getParent(), BB);
// We know that the CommonDest already had an edge from PBI to
// it. If it has PHIs though, the PHIs may have different
// entries for BB and PBI's BB. If so, insert a select to make
// them agree.
+ PHINode *PN;
for (BasicBlock::iterator II = CommonDest->begin();
(PN = dyn_cast<PHINode>(II)); ++II) {
Value *BIV = PN->getIncomingValueForBlock(BB);
@@ -1718,481 +1739,789 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
return true;
}
-bool SimplifyCFGOpt::run(BasicBlock *BB) {
- bool Changed = false;
- Function *M = BB->getParent();
-
- assert(BB && BB->getParent() && "Block not embedded in function!");
- assert(BB->getTerminator() && "Degenerate basic block encountered!");
+// SimplifyTerminatorOnSelect - Simplifies a terminator by replacing it with a
+// branch to TrueBB if Cond is true or to FalseBB if Cond is false.
+// Takes care of updating the successors and removing the old terminator.
+// Also makes sure not to introduce new successors by assuming that edges to
+// non-successor TrueBBs and FalseBBs aren't reachable.
+static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
+ BasicBlock *TrueBB, BasicBlock *FalseBB){
+ // Remove any superfluous successor edges from the CFG.
+ // First, figure out which successors to preserve.
+ // If TrueBB and FalseBB are equal, only try to preserve one copy of that
+ // successor.
+ BasicBlock *KeepEdge1 = TrueBB;
+ BasicBlock *KeepEdge2 = TrueBB != FalseBB ? FalseBB : 0;
+
+ // Then remove the rest.
+ for (unsigned I = 0, E = OldTerm->getNumSuccessors(); I != E; ++I) {
+ BasicBlock *Succ = OldTerm->getSuccessor(I);
+ // Make sure only to keep exactly one copy of each edge.
+ if (Succ == KeepEdge1)
+ KeepEdge1 = 0;
+ else if (Succ == KeepEdge2)
+ KeepEdge2 = 0;
+ else
+ Succ->removePredecessor(OldTerm->getParent());
+ }
- // Remove basic blocks that have no predecessors (except the entry block)...
- // or that just have themself as a predecessor. These are unreachable.
- if ((pred_begin(BB) == pred_end(BB) &&
- &BB->getParent()->getEntryBlock() != BB) ||
- BB->getSinglePredecessor() == BB) {
- DEBUG(dbgs() << "Removing BB: \n" << *BB);
- DeleteDeadBlock(BB);
- return true;
+ // Insert an appropriate new terminator.
+ if ((KeepEdge1 == 0) && (KeepEdge2 == 0)) {
+ if (TrueBB == FalseBB)
+ // We were only looking for one successor, and it was present.
+ // Create an unconditional branch to it.
+ BranchInst::Create(TrueBB, OldTerm);
+ else
+ // We found both of the successors we were looking for.
+ // Create a conditional branch sharing the condition of the select.
+ BranchInst::Create(TrueBB, FalseBB, Cond, OldTerm);
+ } else if (KeepEdge1 && (KeepEdge2 || TrueBB == FalseBB)) {
+ // Neither of the selected blocks were successors, so this
+ // terminator must be unreachable.
+ new UnreachableInst(OldTerm->getContext(), OldTerm);
+ } else {
+ // One of the selected values was a successor, but the other wasn't.
+ // Insert an unconditional branch to the one that was found;
+ // the edge to the one that wasn't must be unreachable.
+ if (KeepEdge1 == 0)
+ // Only TrueBB was found.
+ BranchInst::Create(TrueBB, OldTerm);
+ else
+ // Only FalseBB was found.
+ BranchInst::Create(FalseBB, OldTerm);
}
- // Check to see if we can constant propagate this terminator instruction
- // away...
- Changed |= ConstantFoldTerminator(BB);
+ EraseTerminatorInstAndDCECond(OldTerm);
+ return true;
+}
- // Check for and eliminate duplicate PHI nodes in this block.
- Changed |= EliminateDuplicatePHINodes(BB);
+// SimplifyIndirectBrOnSelect - Replaces
+// (indirectbr (select cond, blockaddress(@fn, BlockA),
+// blockaddress(@fn, BlockB)))
+// with
+// (br cond, BlockA, BlockB).
+static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) {
+ // Check that both operands of the select are block addresses.
+ BlockAddress *TBA = dyn_cast<BlockAddress>(SI->getTrueValue());
+ BlockAddress *FBA = dyn_cast<BlockAddress>(SI->getFalseValue());
+ if (!TBA || !FBA)
+ return false;
- // If there is a trivial two-entry PHI node in this basic block, and we can
- // eliminate it, do so now.
- if (PHINode *PN = dyn_cast<PHINode>(BB->begin()))
- if (PN->getNumIncomingValues() == 2)
- Changed |= FoldTwoEntryPHINode(PN);
+ // Extract the actual blocks.
+ BasicBlock *TrueBB = TBA->getBasicBlock();
+ BasicBlock *FalseBB = FBA->getBasicBlock();
- // If this is a returning block with only PHI nodes in it, fold the return
- // instruction into any unconditional branch predecessors.
- //
- // If any predecessor is a conditional branch that just selects among
- // different return values, fold the replace the branch/return with a select
- // and return.
- if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
- if (isTerminatorFirstRelevantInsn(BB, BB->getTerminator())) {
- // Find predecessors that end with branches.
- SmallVector<BasicBlock*, 8> UncondBranchPreds;
- SmallVector<BranchInst*, 8> CondBranchPreds;
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
- BasicBlock *P = *PI;
- TerminatorInst *PTI = P->getTerminator();
- if (BranchInst *BI = dyn_cast<BranchInst>(PTI)) {
- if (BI->isUnconditional())
- UncondBranchPreds.push_back(P);
- else
- CondBranchPreds.push_back(BI);
- }
- }
+ // Perform the actual simplification.
+ return SimplifyTerminatorOnSelect(IBI, SI->getCondition(), TrueBB, FalseBB);
+}
- // If we found some, do the transformation!
- if (!UncondBranchPreds.empty()) {
- while (!UncondBranchPreds.empty()) {
- BasicBlock *Pred = UncondBranchPreds.pop_back_val();
- DEBUG(dbgs() << "FOLDING: " << *BB
- << "INTO UNCOND BRANCH PRED: " << *Pred);
- Instruction *UncondBranch = Pred->getTerminator();
- // Clone the return and add it to the end of the predecessor.
- Instruction *NewRet = RI->clone();
- Pred->getInstList().push_back(NewRet);
-
- // If the return instruction returns a value, and if the value was a
- // PHI node in "BB", propagate the right value into the return.
- for (User::op_iterator i = NewRet->op_begin(), e = NewRet->op_end();
- i != e; ++i)
- if (PHINode *PN = dyn_cast<PHINode>(*i))
- if (PN->getParent() == BB)
- *i = PN->getIncomingValueForBlock(Pred);
-
- // Update any PHI nodes in the returning block to realize that we no
- // longer branch to them.
- BB->removePredecessor(Pred);
- Pred->getInstList().erase(UncondBranch);
- }
+/// TryToSimplifyUncondBranchWithICmpInIt - This is called when we find an icmp
+/// instruction (a seteq/setne with a constant) as the only instruction in a
+/// block that ends with an uncond branch. We are looking for a very specific
+/// pattern that occurs when "A == 1 || A == 2 || A == 3" gets simplified. In
+/// this case, we merge the first two "or's of icmp" into a switch, but then the
+/// default value goes to an uncond block with a seteq in it, we get something
+/// like:
+///
+/// switch i8 %A, label %DEFAULT [ i8 1, label %end i8 2, label %end ]
+/// DEFAULT:
+/// %tmp = icmp eq i8 %A, 92
+/// br label %end
+/// end:
+/// ... = phi i1 [ true, %entry ], [ %tmp, %DEFAULT ], [ true, %entry ]
+///
+/// We prefer to split the edge to 'end' so that there is a true/false entry to
+/// the PHI, merging the third icmp into the switch.
+static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
+ const TargetData *TD) {
+ BasicBlock *BB = ICI->getParent();
+ // If the block has any PHIs in it or the icmp has multiple uses, it is too
+ // complex.
+ if (isa<PHINode>(BB->begin()) || !ICI->hasOneUse()) return false;
+
+ Value *V = ICI->getOperand(0);
+ ConstantInt *Cst = cast<ConstantInt>(ICI->getOperand(1));
+
+ // The pattern we're looking for is where our only predecessor is a switch on
+ // 'V' and this block is the default case for the switch. In this case we can
+ // fold the compared value into the switch to simplify things.
+ BasicBlock *Pred = BB->getSinglePredecessor();
+ if (Pred == 0 || !isa<SwitchInst>(Pred->getTerminator())) return false;
+
+ SwitchInst *SI = cast<SwitchInst>(Pred->getTerminator());
+ if (SI->getCondition() != V)
+ return false;
+
+ // If BB is reachable on a non-default case, then we simply know the value of
+ // V in this block. Substitute it and constant fold the icmp instruction
+ // away.
+ if (SI->getDefaultDest() != BB) {
+ ConstantInt *VVal = SI->findCaseDest(BB);
+ assert(VVal && "Should have a unique destination value");
+ ICI->setOperand(0, VVal);
+
+ if (Value *V = SimplifyInstruction(ICI, TD)) {
+ ICI->replaceAllUsesWith(V);
+ ICI->eraseFromParent();
+ }
+ // BB is now empty, so it is likely to simplify away.
+ return SimplifyCFG(BB) | true;
+ }
+
+ // Ok, the block is reachable from the default dest. If the constant we're
+ // comparing exists in one of the other edges, then we can constant fold ICI
+ // and zap it.
+ if (SI->findCaseValue(Cst) != 0) {
+ Value *V;
+ if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
+ V = ConstantInt::getFalse(BB->getContext());
+ else
+ V = ConstantInt::getTrue(BB->getContext());
+
+ ICI->replaceAllUsesWith(V);
+ ICI->eraseFromParent();
+ // BB is now empty, so it is likely to simplify away.
+ return SimplifyCFG(BB) | true;
+ }
+
+ // The use of the icmp has to be in the 'end' block, by the only PHI node in
+ // the block.
+ BasicBlock *SuccBlock = BB->getTerminator()->getSuccessor(0);
+ PHINode *PHIUse = dyn_cast<PHINode>(ICI->use_back());
+ if (PHIUse == 0 || PHIUse != &SuccBlock->front() ||
+ isa<PHINode>(++BasicBlock::iterator(PHIUse)))
+ return false;
- // If we eliminated all predecessors of the block, delete the block now.
- if (pred_begin(BB) == pred_end(BB))
- // We know there are no successors, so just nuke the block.
- M->getBasicBlockList().erase(BB);
+ // If the icmp is a SETEQ, then the default dest gets false, the new edge gets
+ // true in the PHI.
+ Constant *DefaultCst = ConstantInt::getTrue(BB->getContext());
+ Constant *NewCst = ConstantInt::getFalse(BB->getContext());
- return true;
- }
+ if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
+ std::swap(DefaultCst, NewCst);
- // Check out all of the conditional branches going to this return
- // instruction. If any of them just select between returns, change the
- // branch itself into a select/return pair.
- while (!CondBranchPreds.empty()) {
- BranchInst *BI = CondBranchPreds.pop_back_val();
-
- // Check to see if the non-BB successor is also a return block.
- if (isa<ReturnInst>(BI->getSuccessor(0)->getTerminator()) &&
- isa<ReturnInst>(BI->getSuccessor(1)->getTerminator()) &&
- SimplifyCondBranchToTwoReturns(BI))
- return true;
- }
- }
- } else if (isa<UnwindInst>(BB->begin())) {
- // Check to see if the first instruction in this block is just an unwind.
- // If so, replace any invoke instructions which use this as an exception
- // destination with call instructions.
- //
- SmallVector<BasicBlock*, 8> Preds(pred_begin(BB), pred_end(BB));
- while (!Preds.empty()) {
- BasicBlock *Pred = Preds.back();
- if (InvokeInst *II = dyn_cast<InvokeInst>(Pred->getTerminator()))
- if (II->getUnwindDest() == BB) {
- // Insert a new branch instruction before the invoke, because this
- // is now a fall through.
- BranchInst *BI = BranchInst::Create(II->getNormalDest(), II);
- Pred->getInstList().remove(II); // Take out of symbol table
-
- // Insert the call now.
- SmallVector<Value*,8> Args(II->op_begin(), II->op_end()-3);
- CallInst *CI = CallInst::Create(II->getCalledValue(),
- Args.begin(), Args.end(),
- II->getName(), BI);
- CI->setCallingConv(II->getCallingConv());
- CI->setAttributes(II->getAttributes());
- // If the invoke produced a value, the Call now does instead.
- II->replaceAllUsesWith(CI);
- delete II;
- Changed = true;
- }
+ // Replace ICI (which is used by the PHI for the default value) with true or
+ // false depending on if it is EQ or NE.
+ ICI->replaceAllUsesWith(DefaultCst);
+ ICI->eraseFromParent();
- Preds.pop_back();
- }
+ // Okay, the switch goes to this block on a default value. Add an edge from
+ // the switch to the merge point on the compared value.
+ BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "switch.edge",
+ BB->getParent(), BB);
+ SI->addCase(Cst, NewBB);
+
+ // NewBB branches to the phi block, add the uncond branch and the phi entry.
+ BranchInst::Create(SuccBlock, NewBB);
+ PHIUse->addIncoming(NewCst, NewBB);
+ return true;
+}
- // If this block is now dead, remove it.
- if (pred_begin(BB) == pred_end(BB)) {
- // We know there are no successors, so just nuke the block.
- M->getBasicBlockList().erase(BB);
- return true;
- }
+/// SimplifyBranchOnICmpChain - The specified branch is a conditional branch.
+/// Check to see if it is branching on an or/and chain of icmp instructions, and
+/// fold it into a switch instruction if so.
+static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD) {
+ Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
+ if (Cond == 0) return false;
+
+
+ // Change br (X == 0 | X == 1), T, F into a switch instruction.
+ // If this is a bunch of seteq's or'd together, or if it's a bunch of
+ // 'setne's and'ed together, collect them.
+ Value *CompVal = 0;
+ std::vector<ConstantInt*> Values;
+ bool TrueWhenEqual = true;
+ Value *ExtraCase = 0;
+ unsigned UsedICmps = 0;
+
+ if (Cond->getOpcode() == Instruction::Or) {
+ CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, true,
+ UsedICmps);
+ } else if (Cond->getOpcode() == Instruction::And) {
+ CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, false,
+ UsedICmps);
+ TrueWhenEqual = false;
+ }
+
+ // If we didn't have a multiply compared value, fail.
+ if (CompVal == 0) return false;
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
- if (isValueEqualityComparison(SI)) {
- // If we only have one predecessor, and if it is a branch on this value,
- // see if that predecessor totally determines the outcome of this switch.
- if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
- if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred))
- return SimplifyCFG(BB) || 1;
-
- // If the block only contains the switch, see if we can fold the block
- // away into any preds.
- BasicBlock::iterator BBI = BB->begin();
- // Ignore dbg intrinsics.
- while (isa<DbgInfoIntrinsic>(BBI))
- ++BBI;
- if (SI == &*BBI)
- if (FoldValueComparisonIntoPredecessors(SI))
- return SimplifyCFG(BB) || 1;
- }
- } else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
- if (BI->isUnconditional()) {
- BasicBlock::iterator BBI = BB->getFirstNonPHI();
+ // Avoid turning single icmps into a switch.
+ if (UsedICmps <= 1)
+ return false;
- // Ignore dbg intrinsics.
- while (isa<DbgInfoIntrinsic>(BBI))
- ++BBI;
- if (BBI->isTerminator()) // Terminator is the only non-phi instruction!
- if (BB != &BB->getParent()->getEntryBlock())
- if (TryToSimplifyUncondBranchFromEmptyBlock(BB))
- return true;
+ // There might be duplicate constants in the list, which the switch
+ // instruction can't handle, remove them now.
+ array_pod_sort(Values.begin(), Values.end(), ConstantIntSortPredicate);
+ Values.erase(std::unique(Values.begin(), Values.end()), Values.end());
+
+ // If Extra was used, we require at least two switch values to do the
+ // transformation. A switch with one value is just an cond branch.
+ if (ExtraCase && Values.size() < 2) return false;
+
+ // Figure out which block is which destination.
+ BasicBlock *DefaultBB = BI->getSuccessor(1);
+ BasicBlock *EdgeBB = BI->getSuccessor(0);
+ if (!TrueWhenEqual) std::swap(DefaultBB, EdgeBB);
+
+ BasicBlock *BB = BI->getParent();
+
+ DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size()
+ << " cases into SWITCH. BB is:\n" << *BB);
+
+ // If there are any extra values that couldn't be folded into the switch
+ // then we evaluate them with an explicit branch first. Split the block
+ // right before the condbr to handle it.
+ if (ExtraCase) {
+ BasicBlock *NewBB = BB->splitBasicBlock(BI, "switch.early.test");
+ // Remove the uncond branch added to the old block.
+ TerminatorInst *OldTI = BB->getTerminator();
+
+ if (TrueWhenEqual)
+ BranchInst::Create(EdgeBB, NewBB, ExtraCase, OldTI);
+ else
+ BranchInst::Create(NewBB, EdgeBB, ExtraCase, OldTI);
- } else { // Conditional branch
- if (isValueEqualityComparison(BI)) {
- // If we only have one predecessor, and if it is a branch on this value,
- // see if that predecessor totally determines the outcome of this
- // switch.
- if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
- if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred))
- return SimplifyCFG(BB) | true;
-
- // This block must be empty, except for the setcond inst, if it exists.
- // Ignore dbg intrinsics.
- BasicBlock::iterator I = BB->begin();
- // Ignore dbg intrinsics.
- while (isa<DbgInfoIntrinsic>(I))
- ++I;
- if (&*I == BI) {
- if (FoldValueComparisonIntoPredecessors(BI))
- return SimplifyCFG(BB) | true;
- } else if (&*I == cast<Instruction>(BI->getCondition())){
- ++I;
- // Ignore dbg intrinsics.
- while (isa<DbgInfoIntrinsic>(I))
- ++I;
- if(&*I == BI) {
- if (FoldValueComparisonIntoPredecessors(BI))
- return SimplifyCFG(BB) | true;
- }
- }
- }
+ OldTI->eraseFromParent();
+
+ // If there are PHI nodes in EdgeBB, then we need to add a new entry to them
+ // for the edge we just added.
+ AddPredecessorToBlock(EdgeBB, BB, NewBB);
+
+ DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCase
+ << "\nEXTRABB = " << *BB);
+ BB = NewBB;
+ }
+
+ // Convert pointer to int before we switch.
+ if (CompVal->getType()->isPointerTy()) {
+ assert(TD && "Cannot switch on pointer without TargetData");
+ CompVal = new PtrToIntInst(CompVal,
+ TD->getIntPtrType(CompVal->getContext()),
+ "magicptr", BI);
+ }
+
+ // Create the new switch instruction now.
+ SwitchInst *New = SwitchInst::Create(CompVal, DefaultBB, Values.size(), BI);
+
+ // Add all of the 'cases' to the switch instruction.
+ for (unsigned i = 0, e = Values.size(); i != e; ++i)
+ New->addCase(Values[i], EdgeBB);
+
+ // We added edges from PI to the EdgeBB. As such, if there were any
+ // PHI nodes in EdgeBB, they need entries to be added corresponding to
+ // the number of edges added.
+ for (BasicBlock::iterator BBI = EdgeBB->begin();
+ isa<PHINode>(BBI); ++BBI) {
+ PHINode *PN = cast<PHINode>(BBI);
+ Value *InVal = PN->getIncomingValueForBlock(BB);
+ for (unsigned i = 0, e = Values.size()-1; i != e; ++i)
+ PN->addIncoming(InVal, BB);
+ }
+
+ // Erase the old branch instruction.
+ EraseTerminatorInstAndDCECond(BI);
+
+ DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n');
+ return true;
+}
- // If this is a branch on a phi node in the current block, thread control
- // through this block if any PHI node entries are constants.
- if (PHINode *PN = dyn_cast<PHINode>(BI->getCondition()))
- if (PN->getParent() == BI->getParent())
- if (FoldCondBranchOnPHI(BI))
- return SimplifyCFG(BB) | true;
-
- // If this basic block is ONLY a setcc and a branch, and if a predecessor
- // branches to us and one of our successors, fold the setcc into the
- // predecessor and use logical operations to pick the right destination.
- if (FoldBranchToCommonDest(BI))
- return SimplifyCFG(BB) | true;
+bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI) {
+ BasicBlock *BB = RI->getParent();
+ if (!BB->getFirstNonPHIOrDbg()->isTerminator()) return false;
+
+ // Find predecessors that end with branches.
+ SmallVector<BasicBlock*, 8> UncondBranchPreds;
+ SmallVector<BranchInst*, 8> CondBranchPreds;
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ TerminatorInst *PTI = P->getTerminator();
+ if (BranchInst *BI = dyn_cast<BranchInst>(PTI)) {
+ if (BI->isUnconditional())
+ UncondBranchPreds.push_back(P);
+ else
+ CondBranchPreds.push_back(BI);
+ }
+ }
+
+ // If we found some, do the transformation!
+ if (!UncondBranchPreds.empty() && DupRet) {
+ while (!UncondBranchPreds.empty()) {
+ BasicBlock *Pred = UncondBranchPreds.pop_back_val();
+ DEBUG(dbgs() << "FOLDING: " << *BB
+ << "INTO UNCOND BRANCH PRED: " << *Pred);
+ (void)FoldReturnIntoUncondBranch(RI, BB, Pred);
+ }
+
+ // If we eliminated all predecessors of the block, delete the block now.
+ if (pred_begin(BB) == pred_end(BB))
+ // We know there are no successors, so just nuke the block.
+ BB->eraseFromParent();
+
+ return true;
+ }
+
+ // Check out all of the conditional branches going to this return
+ // instruction. If any of them just select between returns, change the
+ // branch itself into a select/return pair.
+ while (!CondBranchPreds.empty()) {
+ BranchInst *BI = CondBranchPreds.pop_back_val();
+
+ // Check to see if the non-BB successor is also a return block.
+ if (isa<ReturnInst>(BI->getSuccessor(0)->getTerminator()) &&
+ isa<ReturnInst>(BI->getSuccessor(1)->getTerminator()) &&
+ SimplifyCondBranchToTwoReturns(BI))
+ return true;
+ }
+ return false;
+}
+bool SimplifyCFGOpt::SimplifyUnwind(UnwindInst *UI) {
+ // Check to see if the first instruction in this block is just an unwind.
+ // If so, replace any invoke instructions which use this as an exception
+ // destination with call instructions.
+ BasicBlock *BB = UI->getParent();
+ if (!BB->getFirstNonPHIOrDbg()->isTerminator()) return false;
- // Scan predecessor blocks for conditional branches.
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
- if (PBI != BI && PBI->isConditional())
- if (SimplifyCondBranchToCondBranch(PBI, BI))
- return SimplifyCFG(BB) | true;
- }
- } else if (isa<UnreachableInst>(BB->getTerminator())) {
- // If there are any instructions immediately before the unreachable that can
- // be removed, do so.
- Instruction *Unreachable = BB->getTerminator();
- while (Unreachable != BB->begin()) {
- BasicBlock::iterator BBI = Unreachable;
- --BBI;
- // Do not delete instructions that can have side effects, like calls
- // (which may never return) and volatile loads and stores.
- if (isa<CallInst>(BBI) && !isa<DbgInfoIntrinsic>(BBI)) break;
-
- if (StoreInst *SI = dyn_cast<StoreInst>(BBI))
- if (SI->isVolatile())
- break;
-
- if (LoadInst *LI = dyn_cast<LoadInst>(BBI))
- if (LI->isVolatile())
- break;
-
- // Delete this instruction
- BB->getInstList().erase(BBI);
+ bool Changed = false;
+ SmallVector<BasicBlock*, 8> Preds(pred_begin(BB), pred_end(BB));
+ while (!Preds.empty()) {
+ BasicBlock *Pred = Preds.back();
+ InvokeInst *II = dyn_cast<InvokeInst>(Pred->getTerminator());
+ if (II && II->getUnwindDest() == BB) {
+ // Insert a new branch instruction before the invoke, because this
+ // is now a fall through.
+ BranchInst *BI = BranchInst::Create(II->getNormalDest(), II);
+ Pred->getInstList().remove(II); // Take out of symbol table
+
+ // Insert the call now.
+ SmallVector<Value*,8> Args(II->op_begin(), II->op_end()-3);
+ CallInst *CI = CallInst::Create(II->getCalledValue(),
+ Args.begin(), Args.end(),
+ II->getName(), BI);
+ CI->setCallingConv(II->getCallingConv());
+ CI->setAttributes(II->getAttributes());
+ // If the invoke produced a value, the Call now does instead.
+ II->replaceAllUsesWith(CI);
+ delete II;
Changed = true;
}
+
+ Preds.pop_back();
+ }
+
+ // If this block is now dead (and isn't the entry block), remove it.
+ if (pred_begin(BB) == pred_end(BB) &&
+ BB != &BB->getParent()->getEntryBlock()) {
+ // We know there are no successors, so just nuke the block.
+ BB->eraseFromParent();
+ return true;
+ }
+
+ return Changed;
+}
- // If the unreachable instruction is the first in the block, take a gander
- // at all of the predecessors of this instruction, and simplify them.
- if (&BB->front() == Unreachable) {
- SmallVector<BasicBlock*, 8> Preds(pred_begin(BB), pred_end(BB));
- for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
- TerminatorInst *TI = Preds[i]->getTerminator();
-
- if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
- if (BI->isUnconditional()) {
- if (BI->getSuccessor(0) == BB) {
- new UnreachableInst(TI->getContext(), TI);
- TI->eraseFromParent();
- Changed = true;
- }
- } else {
- if (BI->getSuccessor(0) == BB) {
- BranchInst::Create(BI->getSuccessor(1), BI);
- EraseTerminatorInstAndDCECond(BI);
- } else if (BI->getSuccessor(1) == BB) {
- BranchInst::Create(BI->getSuccessor(0), BI);
- EraseTerminatorInstAndDCECond(BI);
- Changed = true;
- }
+bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
+ BasicBlock *BB = UI->getParent();
+
+ bool Changed = false;
+
+ // If there are any instructions immediately before the unreachable that can
+ // be removed, do so.
+ while (UI != BB->begin()) {
+ BasicBlock::iterator BBI = UI;
+ --BBI;
+ // Do not delete instructions that can have side effects, like calls
+ // (which may never return) and volatile loads and stores.
+ if (isa<CallInst>(BBI) && !isa<DbgInfoIntrinsic>(BBI)) break;
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(BBI))
+ if (SI->isVolatile())
+ break;
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(BBI))
+ if (LI->isVolatile())
+ break;
+
+ // Delete this instruction
+ BBI->eraseFromParent();
+ Changed = true;
+ }
+
+ // If the unreachable instruction is the first in the block, take a gander
+ // at all of the predecessors of this instruction, and simplify them.
+ if (&BB->front() != UI) return Changed;
+
+ SmallVector<BasicBlock*, 8> Preds(pred_begin(BB), pred_end(BB));
+ for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
+ TerminatorInst *TI = Preds[i]->getTerminator();
+
+ if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
+ if (BI->isUnconditional()) {
+ if (BI->getSuccessor(0) == BB) {
+ new UnreachableInst(TI->getContext(), TI);
+ TI->eraseFromParent();
+ Changed = true;
+ }
+ } else {
+ if (BI->getSuccessor(0) == BB) {
+ BranchInst::Create(BI->getSuccessor(1), BI);
+ EraseTerminatorInstAndDCECond(BI);
+ } else if (BI->getSuccessor(1) == BB) {
+ BranchInst::Create(BI->getSuccessor(0), BI);
+ EraseTerminatorInstAndDCECond(BI);
+ Changed = true;
+ }
+ }
+ } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
+ for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i)
+ if (SI->getSuccessor(i) == BB) {
+ BB->removePredecessor(SI->getParent());
+ SI->removeCase(i);
+ --i; --e;
+ Changed = true;
+ }
+ // If the default value is unreachable, figure out the most popular
+ // destination and make it the default.
+ if (SI->getSuccessor(0) == BB) {
+ std::map<BasicBlock*, unsigned> Popularity;
+ for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i)
+ Popularity[SI->getSuccessor(i)]++;
+
+ // Find the most popular block.
+ unsigned MaxPop = 0;
+ BasicBlock *MaxBlock = 0;
+ for (std::map<BasicBlock*, unsigned>::iterator
+ I = Popularity.begin(), E = Popularity.end(); I != E; ++I) {
+ if (I->second > MaxPop) {
+ MaxPop = I->second;
+ MaxBlock = I->first;
}
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
+ }
+ if (MaxBlock) {
+ // Make this the new default, allowing us to delete any explicit
+ // edges to it.
+ SI->setSuccessor(0, MaxBlock);
+ Changed = true;
+
+ // If MaxBlock has phinodes in it, remove MaxPop-1 entries from
+ // it.
+ if (isa<PHINode>(MaxBlock->begin()))
+ for (unsigned i = 0; i != MaxPop-1; ++i)
+ MaxBlock->removePredecessor(SI->getParent());
+
for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i)
- if (SI->getSuccessor(i) == BB) {
- BB->removePredecessor(SI->getParent());
+ if (SI->getSuccessor(i) == MaxBlock) {
SI->removeCase(i);
--i; --e;
- Changed = true;
- }
- // If the default value is unreachable, figure out the most popular
- // destination and make it the default.
- if (SI->getSuccessor(0) == BB) {
- std::map<BasicBlock*, unsigned> Popularity;
- for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i)
- Popularity[SI->getSuccessor(i)]++;
-
- // Find the most popular block.
- unsigned MaxPop = 0;
- BasicBlock *MaxBlock = 0;
- for (std::map<BasicBlock*, unsigned>::iterator
- I = Popularity.begin(), E = Popularity.end(); I != E; ++I) {
- if (I->second > MaxPop) {
- MaxPop = I->second;
- MaxBlock = I->first;
- }
- }
- if (MaxBlock) {
- // Make this the new default, allowing us to delete any explicit
- // edges to it.
- SI->setSuccessor(0, MaxBlock);
- Changed = true;
-
- // If MaxBlock has phinodes in it, remove MaxPop-1 entries from
- // it.
- if (isa<PHINode>(MaxBlock->begin()))
- for (unsigned i = 0; i != MaxPop-1; ++i)
- MaxBlock->removePredecessor(SI->getParent());
-
- for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i)
- if (SI->getSuccessor(i) == MaxBlock) {
- SI->removeCase(i);
- --i; --e;
- }
}
- }
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(TI)) {
- if (II->getUnwindDest() == BB) {
- // Convert the invoke to a call instruction. This would be a good
- // place to note that the call does not throw though.
- BranchInst *BI = BranchInst::Create(II->getNormalDest(), II);
- II->removeFromParent(); // Take out of symbol table
-
- // Insert the call now...
- SmallVector<Value*, 8> Args(II->op_begin(), II->op_end()-3);
- CallInst *CI = CallInst::Create(II->getCalledValue(),
- Args.begin(), Args.end(),
- II->getName(), BI);
- CI->setCallingConv(II->getCallingConv());
- CI->setAttributes(II->getAttributes());
- // If the invoke produced a value, the call does now instead.
- II->replaceAllUsesWith(CI);
- delete II;
- Changed = true;
- }
}
}
-
- // If this block is now dead, remove it.
- if (pred_begin(BB) == pred_end(BB) &&
- BB != &BB->getParent()->getEntryBlock()) {
- // We know there are no successors, so just nuke the block.
- M->getBasicBlockList().erase(BB);
- return true;
- }
- }
- } else if (IndirectBrInst *IBI =
- dyn_cast<IndirectBrInst>(BB->getTerminator())) {
- // Eliminate redundant destinations.
- SmallPtrSet<Value *, 8> Succs;
- for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
- BasicBlock *Dest = IBI->getDestination(i);
- if (!Dest->hasAddressTaken() || !Succs.insert(Dest)) {
- Dest->removePredecessor(BB);
- IBI->removeDestination(i);
- --i; --e;
+ } else if (InvokeInst *II = dyn_cast<InvokeInst>(TI)) {
+ if (II->getUnwindDest() == BB) {
+ // Convert the invoke to a call instruction. This would be a good
+ // place to note that the call does not throw though.
+ BranchInst *BI = BranchInst::Create(II->getNormalDest(), II);
+ II->removeFromParent(); // Take out of symbol table
+
+ // Insert the call now...
+ SmallVector<Value*, 8> Args(II->op_begin(), II->op_end()-3);
+ CallInst *CI = CallInst::Create(II->getCalledValue(),
+ Args.begin(), Args.end(),
+ II->getName(), BI);
+ CI->setCallingConv(II->getCallingConv());
+ CI->setAttributes(II->getAttributes());
+ // If the invoke produced a value, the call does now instead.
+ II->replaceAllUsesWith(CI);
+ delete II;
Changed = true;
}
- }
+ }
+ }
+
+ // If this block is now dead, remove it.
+ if (pred_begin(BB) == pred_end(BB) &&
+ BB != &BB->getParent()->getEntryBlock()) {
+ // We know there are no successors, so just nuke the block.
+ BB->eraseFromParent();
+ return true;
+ }
- if (IBI->getNumDestinations() == 0) {
- // If the indirectbr has no successors, change it to unreachable.
- new UnreachableInst(IBI->getContext(), IBI);
- IBI->eraseFromParent();
- Changed = true;
- } else if (IBI->getNumDestinations() == 1) {
- // If the indirectbr has one successor, change it to a direct branch.
- BranchInst::Create(IBI->getDestination(0), IBI);
- IBI->eraseFromParent();
+ return Changed;
+}
+
+/// TurnSwitchRangeIntoICmp - Turns a switch with that contains only a
+/// integer range comparison into a sub, an icmp and a branch.
+static bool TurnSwitchRangeIntoICmp(SwitchInst *SI) {
+ assert(SI->getNumCases() > 2 && "Degenerate switch?");
+
+ // Make sure all cases point to the same destination and gather the values.
+ SmallVector<ConstantInt *, 16> Cases;
+ Cases.push_back(SI->getCaseValue(1));
+ for (unsigned I = 2, E = SI->getNumCases(); I != E; ++I) {
+ if (SI->getSuccessor(I-1) != SI->getSuccessor(I))
+ return false;
+ Cases.push_back(SI->getCaseValue(I));
+ }
+ assert(Cases.size() == SI->getNumCases()-1 && "Not all cases gathered");
+
+ // Sort the case values, then check if they form a range we can transform.
+ array_pod_sort(Cases.begin(), Cases.end(), ConstantIntSortPredicate);
+ for (unsigned I = 1, E = Cases.size(); I != E; ++I) {
+ if (Cases[I-1]->getValue() != Cases[I]->getValue()+1)
+ return false;
+ }
+
+ Constant *Offset = ConstantExpr::getNeg(Cases.back());
+ Constant *NumCases = ConstantInt::get(Offset->getType(), SI->getNumCases()-1);
+
+ Value *Sub = SI->getCondition();
+ if (!Offset->isNullValue())
+ Sub = BinaryOperator::CreateAdd(Sub, Offset, Sub->getName()+".off", SI);
+ Value *Cmp = new ICmpInst(SI, ICmpInst::ICMP_ULT, Sub, NumCases, "switch");
+ BranchInst::Create(SI->getSuccessor(1), SI->getDefaultDest(), Cmp, SI);
+
+ // Prune obsolete incoming values off the successor's PHI nodes.
+ for (BasicBlock::iterator BBI = SI->getSuccessor(1)->begin();
+ isa<PHINode>(BBI); ++BBI) {
+ for (unsigned I = 0, E = SI->getNumCases()-2; I != E; ++I)
+ cast<PHINode>(BBI)->removeIncomingValue(SI->getParent());
+ }
+ SI->eraseFromParent();
+
+ return true;
+}
+
+bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI) {
+ // If this switch is too complex to want to look at, ignore it.
+ if (!isValueEqualityComparison(SI))
+ return false;
+
+ BasicBlock *BB = SI->getParent();
+
+ // If we only have one predecessor, and if it is a branch on this value,
+ // see if that predecessor totally determines the outcome of this switch.
+ if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
+ if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred))
+ return SimplifyCFG(BB) | true;
+
+ // If the block only contains the switch, see if we can fold the block
+ // away into any preds.
+ BasicBlock::iterator BBI = BB->begin();
+ // Ignore dbg intrinsics.
+ while (isa<DbgInfoIntrinsic>(BBI))
+ ++BBI;
+ if (SI == &*BBI)
+ if (FoldValueComparisonIntoPredecessors(SI))
+ return SimplifyCFG(BB) | true;
+
+ // Try to transform the switch into an icmp and a branch.
+ if (TurnSwitchRangeIntoICmp(SI))
+ return SimplifyCFG(BB) | true;
+
+ return false;
+}
+
+bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) {
+ BasicBlock *BB = IBI->getParent();
+ bool Changed = false;
+
+ // Eliminate redundant destinations.
+ SmallPtrSet<Value *, 8> Succs;
+ for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
+ BasicBlock *Dest = IBI->getDestination(i);
+ if (!Dest->hasAddressTaken() || !Succs.insert(Dest)) {
+ Dest->removePredecessor(BB);
+ IBI->removeDestination(i);
+ --i; --e;
Changed = true;
}
+ }
+
+ if (IBI->getNumDestinations() == 0) {
+ // If the indirectbr has no successors, change it to unreachable.
+ new UnreachableInst(IBI->getContext(), IBI);
+ EraseTerminatorInstAndDCECond(IBI);
+ return true;
+ }
+
+ if (IBI->getNumDestinations() == 1) {
+ // If the indirectbr has one successor, change it to a direct branch.
+ BranchInst::Create(IBI->getDestination(0), IBI);
+ EraseTerminatorInstAndDCECond(IBI);
+ return true;
}
+
+ if (SelectInst *SI = dyn_cast<SelectInst>(IBI->getAddress())) {
+ if (SimplifyIndirectBrOnSelect(IBI, SI))
+ return SimplifyCFG(BB) | true;
+ }
+ return Changed;
+}
- // Merge basic blocks into their predecessor if there is only one distinct
- // pred, and if there is only one distinct successor of the predecessor, and
- // if there are no PHI nodes.
- //
- if (MergeBlockIntoPredecessor(BB))
+bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI) {
+ BasicBlock *BB = BI->getParent();
+
+ // If the Terminator is the only non-phi instruction, simplify the block.
+ BasicBlock::iterator I = BB->getFirstNonPHIOrDbg();
+ if (I->isTerminator() && BB != &BB->getParent()->getEntryBlock() &&
+ TryToSimplifyUncondBranchFromEmptyBlock(BB))
return true;
+
+ // If the only instruction in the block is a seteq/setne comparison
+ // against a constant, try to simplify the block.
+ if (ICmpInst *ICI = dyn_cast<ICmpInst>(I))
+ if (ICI->isEquality() && isa<ConstantInt>(ICI->getOperand(1))) {
+ for (++I; isa<DbgInfoIntrinsic>(I); ++I)
+ ;
+ if (I->isTerminator() && TryToSimplifyUncondBranchWithICmpInIt(ICI, TD))
+ return true;
+ }
+
+ return false;
+}
- // Otherwise, if this block only has a single predecessor, and if that block
- // is a conditional branch, see if we can hoist any code from this block up
- // into our predecessor.
- pred_iterator PI(pred_begin(BB)), PE(pred_end(BB));
- BasicBlock *OnlyPred = 0;
- for (; PI != PE; ++PI) { // Search all predecessors, see if they are all same
- if (!OnlyPred)
- OnlyPred = *PI;
- else if (*PI != OnlyPred) {
- OnlyPred = 0; // There are multiple different predecessors...
- break;
+
+bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI) {
+ BasicBlock *BB = BI->getParent();
+
+ // Conditional branch
+ if (isValueEqualityComparison(BI)) {
+ // If we only have one predecessor, and if it is a branch on this value,
+ // see if that predecessor totally determines the outcome of this
+ // switch.
+ if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
+ if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred))
+ return SimplifyCFG(BB) | true;
+
+ // This block must be empty, except for the setcond inst, if it exists.
+ // Ignore dbg intrinsics.
+ BasicBlock::iterator I = BB->begin();
+ // Ignore dbg intrinsics.
+ while (isa<DbgInfoIntrinsic>(I))
+ ++I;
+ if (&*I == BI) {
+ if (FoldValueComparisonIntoPredecessors(BI))
+ return SimplifyCFG(BB) | true;
+ } else if (&*I == cast<Instruction>(BI->getCondition())){
+ ++I;
+ // Ignore dbg intrinsics.
+ while (isa<DbgInfoIntrinsic>(I))
+ ++I;
+ if (&*I == BI && FoldValueComparisonIntoPredecessors(BI))
+ return SimplifyCFG(BB) | true;
}
}
- if (OnlyPred)
- if (BranchInst *BI = dyn_cast<BranchInst>(OnlyPred->getTerminator()))
- if (BI->isConditional()) {
- // Get the other block.
- BasicBlock *OtherBB = BI->getSuccessor(BI->getSuccessor(0) == BB);
- PI = pred_begin(OtherBB);
- ++PI;
-
- if (PI == pred_end(OtherBB)) {
- // We have a conditional branch to two blocks that are only reachable
- // from the condbr. We know that the condbr dominates the two blocks,
- // so see if there is any identical code in the "then" and "else"
- // blocks. If so, we can hoist it up to the branching block.
- Changed |= HoistThenElseCodeToIf(BI);
- } else {
- BasicBlock* OnlySucc = NULL;
- for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB);
- SI != SE; ++SI) {
- if (!OnlySucc)
- OnlySucc = *SI;
- else if (*SI != OnlySucc) {
- OnlySucc = 0; // There are multiple distinct successors!
- break;
- }
- }
+ // Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction.
+ if (SimplifyBranchOnICmpChain(BI, TD))
+ return true;
+
+ // We have a conditional branch to two blocks that are only reachable
+ // from BI. We know that the condbr dominates the two blocks, so see if
+ // there is any identical code in the "then" and "else" blocks. If so, we
+ // can hoist it up to the branching block.
+ if (BI->getSuccessor(0)->getSinglePredecessor() != 0) {
+ if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
+ if (HoistThenElseCodeToIf(BI))
+ return SimplifyCFG(BB) | true;
+ } else {
+ // If Successor #1 has multiple preds, we may be able to conditionally
+ // execute Successor #0 if it branches to successor #1.
+ TerminatorInst *Succ0TI = BI->getSuccessor(0)->getTerminator();
+ if (Succ0TI->getNumSuccessors() == 1 &&
+ Succ0TI->getSuccessor(0) == BI->getSuccessor(1))
+ if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0)))
+ return SimplifyCFG(BB) | true;
+ }
+ } else if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
+ // If Successor #0 has multiple preds, we may be able to conditionally
+ // execute Successor #1 if it branches to successor #0.
+ TerminatorInst *Succ1TI = BI->getSuccessor(1)->getTerminator();
+ if (Succ1TI->getNumSuccessors() == 1 &&
+ Succ1TI->getSuccessor(0) == BI->getSuccessor(0))
+ if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1)))
+ return SimplifyCFG(BB) | true;
+ }
+
+ // If this is a branch on a phi node in the current block, thread control
+ // through this block if any PHI node entries are constants.
+ if (PHINode *PN = dyn_cast<PHINode>(BI->getCondition()))
+ if (PN->getParent() == BI->getParent())
+ if (FoldCondBranchOnPHI(BI, TD))
+ return SimplifyCFG(BB) | true;
+
+ // If this basic block is ONLY a setcc and a branch, and if a predecessor
+ // branches to us and one of our successors, fold the setcc into the
+ // predecessor and use logical operations to pick the right destination.
+ if (FoldBranchToCommonDest(BI))
+ return SimplifyCFG(BB) | true;
+
+ // Scan predecessor blocks for conditional branches.
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
+ if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
+ if (PBI != BI && PBI->isConditional())
+ if (SimplifyCondBranchToCondBranch(PBI, BI))
+ return SimplifyCFG(BB) | true;
- if (OnlySucc == OtherBB) {
- // If BB's only successor is the other successor of the predecessor,
- // i.e. a triangle, see if we can hoist any code from this block up
- // to the "if" block.
- Changed |= SpeculativelyExecuteBB(BI, BB);
- }
- }
- }
+ return false;
+}
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- if (BranchInst *BI = dyn_cast<BranchInst>((*PI)->getTerminator()))
- // Change br (X == 0 | X == 1), T, F into a switch instruction.
- if (BI->isConditional() && isa<Instruction>(BI->getCondition())) {
- Instruction *Cond = cast<Instruction>(BI->getCondition());
- // If this is a bunch of seteq's or'd together, or if it's a bunch of
- // 'setne's and'ed together, collect them.
- Value *CompVal = 0;
- std::vector<ConstantInt*> Values;
- bool TrueWhenEqual = GatherValueComparisons(Cond, CompVal, Values);
- if (CompVal) {
- // There might be duplicate constants in the list, which the switch
- // instruction can't handle, remove them now.
- std::sort(Values.begin(), Values.end(), ConstantIntOrdering());
- Values.erase(std::unique(Values.begin(), Values.end()), Values.end());
-
- // Figure out which block is which destination.
- BasicBlock *DefaultBB = BI->getSuccessor(1);
- BasicBlock *EdgeBB = BI->getSuccessor(0);
- if (!TrueWhenEqual) std::swap(DefaultBB, EdgeBB);
-
- // Convert pointer to int before we switch.
- if (CompVal->getType()->isPointerTy()) {
- assert(TD && "Cannot switch on pointer without TargetData");
- CompVal = new PtrToIntInst(CompVal,
- TD->getIntPtrType(CompVal->getContext()),
- "magicptr", BI);
- }
+bool SimplifyCFGOpt::run(BasicBlock *BB) {
+ bool Changed = false;
- // Create the new switch instruction now.
- SwitchInst *New = SwitchInst::Create(CompVal, DefaultBB,
- Values.size(), BI);
-
- // Add all of the 'cases' to the switch instruction.
- for (unsigned i = 0, e = Values.size(); i != e; ++i)
- New->addCase(Values[i], EdgeBB);
-
- // We added edges from PI to the EdgeBB. As such, if there were any
- // PHI nodes in EdgeBB, they need entries to be added corresponding to
- // the number of edges added.
- for (BasicBlock::iterator BBI = EdgeBB->begin();
- isa<PHINode>(BBI); ++BBI) {
- PHINode *PN = cast<PHINode>(BBI);
- Value *InVal = PN->getIncomingValueForBlock(*PI);
- for (unsigned i = 0, e = Values.size()-1; i != e; ++i)
- PN->addIncoming(InVal, *PI);
- }
+ assert(BB && BB->getParent() && "Block not embedded in function!");
+ assert(BB->getTerminator() && "Degenerate basic block encountered!");
- // Erase the old branch instruction.
- EraseTerminatorInstAndDCECond(BI);
- return true;
- }
- }
+ // Remove basic blocks that have no predecessors (except the entry block)...
+ // or that just have themself as a predecessor. These are unreachable.
+ if ((pred_begin(BB) == pred_end(BB) &&
+ BB != &BB->getParent()->getEntryBlock()) ||
+ BB->getSinglePredecessor() == BB) {
+ DEBUG(dbgs() << "Removing BB: \n" << *BB);
+ DeleteDeadBlock(BB);
+ return true;
+ }
+
+ // Check to see if we can constant propagate this terminator instruction
+ // away...
+ Changed |= ConstantFoldTerminator(BB);
+
+ // Check for and eliminate duplicate PHI nodes in this block.
+ Changed |= EliminateDuplicatePHINodes(BB);
+
+ // Merge basic blocks into their predecessor if there is only one distinct
+ // pred, and if there is only one distinct successor of the predecessor, and
+ // if there are no PHI nodes.
+ //
+ if (MergeBlockIntoPredecessor(BB))
+ return true;
+
+ // If there is a trivial two-entry PHI node in this basic block, and we can
+ // eliminate it, do so now.
+ if (PHINode *PN = dyn_cast<PHINode>(BB->begin()))
+ if (PN->getNumIncomingValues() == 2)
+ Changed |= FoldTwoEntryPHINode(PN, TD);
+
+ if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
+ if (BI->isUnconditional()) {
+ if (SimplifyUncondBranch(BI)) return true;
+ } else {
+ if (SimplifyCondBranch(BI)) return true;
+ }
+ } else if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
+ if (SimplifyReturn(RI)) return true;
+ } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
+ if (SimplifySwitch(SI)) return true;
+ } else if (UnreachableInst *UI =
+ dyn_cast<UnreachableInst>(BB->getTerminator())) {
+ if (SimplifyUnreachable(UI)) return true;
+ } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
+ if (SimplifyUnwind(UI)) return true;
+ } else if (IndirectBrInst *IBI =
+ dyn_cast<IndirectBrInst>(BB->getTerminator())) {
+ if (SimplifyIndirectBr(IBI)) return true;
+ }
return Changed;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
new file mode 100644
index 0000000..ac005f9
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
@@ -0,0 +1,94 @@
+//===------ SimplifyInstructions.cpp - Remove redundant instructions ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility pass used for testing the InstructionSimplify analysis.
+// The analysis is applied to every instruction, and if it simplifies then the
+// instruction is replaced by the simplification. If you are looking for a pass
+// that performs serious instruction folding, use the instcombine pass instead.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "instsimplify"
+#include "llvm/Function.h"
+#include "llvm/Pass.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Local.h"
+using namespace llvm;
+
+STATISTIC(NumSimplified, "Number of redundant instructions removed");
+
+namespace {
+ struct InstSimplifier : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ InstSimplifier() : FunctionPass(ID) {
+ initializeInstSimplifierPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ }
+
+ /// runOnFunction - Remove instructions that simplify.
+ bool runOnFunction(Function &F) {
+ const DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
+ const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ SmallPtrSet<const Instruction*, 8> S1, S2, *ToSimplify = &S1, *Next = &S2;
+ bool Changed = false;
+
+ do {
+ for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
+ DE = df_end(&F.getEntryBlock()); DI != DE; ++DI)
+ for (BasicBlock::iterator BI = DI->begin(), BE = DI->end(); BI != BE;) {
+ Instruction *I = BI++;
+ // The first time through the loop ToSimplify is empty and we try to
+ // simplify all instructions. On later iterations ToSimplify is not
+ // empty and we only bother simplifying instructions that are in it.
+ if (!ToSimplify->empty() && !ToSimplify->count(I))
+ continue;
+ // Don't waste time simplifying unused instructions.
+ if (!I->use_empty())
+ if (Value *V = SimplifyInstruction(I, TD, DT)) {
+ // Mark all uses for resimplification next time round the loop.
+ for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
+ UI != UE; ++UI)
+ Next->insert(cast<Instruction>(*UI));
+ I->replaceAllUsesWith(V);
+ ++NumSimplified;
+ Changed = true;
+ }
+ Changed |= RecursivelyDeleteTriviallyDeadInstructions(I);
+ }
+
+ // Place the list of instructions to simplify on the next loop iteration
+ // into ToSimplify.
+ std::swap(ToSimplify, Next);
+ Next->clear();
+ } while (!ToSimplify->empty());
+
+ return Changed;
+ }
+ };
+}
+
+char InstSimplifier::ID = 0;
+INITIALIZE_PASS(InstSimplifier, "instsimplify", "Remove redundant instructions",
+ false, false)
+char &llvm::InstructionSimplifierID = InstSimplifier::ID;
+
+// Public interface to the simplify instructions pass.
+FunctionPass *llvm::createInstructionSimplifierPass() {
+ return new InstSimplifier();
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp b/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
index a51f1e1..ccb8287 100644
--- a/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
char UnifyFunctionExitNodes::ID = 0;
INITIALIZE_PASS(UnifyFunctionExitNodes, "mergereturn",
- "Unify function exit nodes", false, false);
+ "Unify function exit nodes", false, false)
Pass *llvm::createUnifyFunctionExitNodesPass() {
return new UnifyFunctionExitNodes();
diff --git a/contrib/llvm/lib/Transforms/Utils/Utils.cpp b/contrib/llvm/lib/Transforms/Utils/Utils.cpp
new file mode 100644
index 0000000..24e8c8f
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/Utils.cpp
@@ -0,0 +1,37 @@
+//===-- Utils.cpp - TransformUtils Infrastructure -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the common initialization infrastructure for the
+// TransformUtils library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/InitializePasses.h"
+#include "llvm-c/Initialization.h"
+
+using namespace llvm;
+
+/// initializeTransformUtils - Initialize all passes in the TransformUtils
+/// library.
+void llvm::initializeTransformUtils(PassRegistry &Registry) {
+ initializeBreakCriticalEdgesPass(Registry);
+ initializeInstNamerPass(Registry);
+ initializeLCSSAPass(Registry);
+ initializeLoopSimplifyPass(Registry);
+ initializeLowerInvokePass(Registry);
+ initializeLowerSwitchPass(Registry);
+ initializePromotePassPass(Registry);
+ initializeUnifyFunctionExitNodesPass(Registry);
+ initializeInstSimplifierPass(Registry);
+}
+
+/// LLVMInitializeTransformUtils - C binding for initializeTransformUtilsPasses.
+void LLVMInitializeTransformUtils(LLVMPassRegistryRef R) {
+ initializeTransformUtils(*unwrap(R));
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
index fc4bde7..f5481d3 100644
--- a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -21,147 +21,111 @@
using namespace llvm;
Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM,
- bool ModuleLevelChanges) {
- Value *&VMSlot = VM[V];
- if (VMSlot) return VMSlot; // Does it exist in the map yet?
+ RemapFlags Flags) {
+ ValueToValueMapTy::iterator I = VM.find(V);
+
+ // If the value already exists in the map, use it.
+ if (I != VM.end() && I->second) return I->second;
- // NOTE: VMSlot can be invalidated by any reference to VM, which can grow the
- // DenseMap. This includes any recursive calls to MapValue.
-
// Global values do not need to be seeded into the VM if they
// are using the identity mapping.
- if (isa<GlobalValue>(V) || isa<InlineAsm>(V) || isa<MDString>(V) ||
- (isa<MDNode>(V) && !cast<MDNode>(V)->isFunctionLocal() &&
- !ModuleLevelChanges))
- return VMSlot = const_cast<Value*>(V);
+ if (isa<GlobalValue>(V) || isa<InlineAsm>(V) || isa<MDString>(V))
+ return VM[V] = const_cast<Value*>(V);
if (const MDNode *MD = dyn_cast<MDNode>(V)) {
- // Start by assuming that we'll use the identity mapping.
- VMSlot = const_cast<Value*>(V);
-
+ // If this is a module-level metadata and we know that nothing at the module
+ // level is changing, then use an identity mapping.
+ if (!MD->isFunctionLocal() && (Flags & RF_NoModuleLevelChanges))
+ return VM[V] = const_cast<Value*>(V);
+
+ // Create a dummy node in case we have a metadata cycle.
+ MDNode *Dummy = MDNode::getTemporary(V->getContext(), 0, 0);
+ VM[V] = Dummy;
+
// Check all operands to see if any need to be remapped.
for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i) {
Value *OP = MD->getOperand(i);
- if (!OP || MapValue(OP, VM, ModuleLevelChanges) == OP) continue;
+ if (OP == 0 || MapValue(OP, VM, Flags) == OP) continue;
- // Ok, at least one operand needs remapping.
- MDNode *Dummy = MDNode::getTemporary(V->getContext(), 0, 0);
- VM[V] = Dummy;
+ // Ok, at least one operand needs remapping.
SmallVector<Value*, 4> Elts;
Elts.reserve(MD->getNumOperands());
- for (i = 0; i != e; ++i)
- Elts.push_back(MD->getOperand(i) ?
- MapValue(MD->getOperand(i), VM, ModuleLevelChanges) : 0);
+ for (i = 0; i != e; ++i) {
+ Value *Op = MD->getOperand(i);
+ Elts.push_back(Op ? MapValue(Op, VM, Flags) : 0);
+ }
MDNode *NewMD = MDNode::get(V->getContext(), Elts.data(), Elts.size());
Dummy->replaceAllUsesWith(NewMD);
+ VM[V] = NewMD;
MDNode::deleteTemporary(Dummy);
- return VM[V] = NewMD;
+ return NewMD;
}
- // No operands needed remapping; keep the identity map.
+ VM[V] = const_cast<Value*>(V);
+ MDNode::deleteTemporary(Dummy);
+
+ // No operands needed remapping. Use an identity mapping.
return const_cast<Value*>(V);
}
+ // Okay, this either must be a constant (which may or may not be mappable) or
+ // is something that is not in the mapping table.
Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V));
if (C == 0)
return 0;
- if (isa<ConstantInt>(C) || isa<ConstantFP>(C) ||
- isa<ConstantPointerNull>(C) || isa<ConstantAggregateZero>(C) ||
- isa<UndefValue>(C))
- return VMSlot = C; // Primitive constants map directly
-
- if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
- for (User::op_iterator b = CA->op_begin(), i = b, e = CA->op_end();
- i != e; ++i) {
- Value *MV = MapValue(*i, VM, ModuleLevelChanges);
- if (MV != *i) {
- // This array must contain a reference to a global, make a new array
- // and return it.
- //
- std::vector<Constant*> Values;
- Values.reserve(CA->getNumOperands());
- for (User::op_iterator j = b; j != i; ++j)
- Values.push_back(cast<Constant>(*j));
- Values.push_back(cast<Constant>(MV));
- for (++i; i != e; ++i)
- Values.push_back(cast<Constant>(MapValue(*i, VM,
- ModuleLevelChanges)));
- return VM[V] = ConstantArray::get(CA->getType(), Values);
- }
- }
- return VM[V] = C;
- }
-
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
- for (User::op_iterator b = CS->op_begin(), i = b, e = CS->op_end();
- i != e; ++i) {
- Value *MV = MapValue(*i, VM, ModuleLevelChanges);
- if (MV != *i) {
- // This struct must contain a reference to a global, make a new struct
- // and return it.
- //
- std::vector<Constant*> Values;
- Values.reserve(CS->getNumOperands());
- for (User::op_iterator j = b; j != i; ++j)
- Values.push_back(cast<Constant>(*j));
- Values.push_back(cast<Constant>(MV));
- for (++i; i != e; ++i)
- Values.push_back(cast<Constant>(MapValue(*i, VM,
- ModuleLevelChanges)));
- return VM[V] = ConstantStruct::get(CS->getType(), Values);
- }
- }
- return VM[V] = C;
+ if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
+ Function *F = cast<Function>(MapValue(BA->getFunction(), VM, Flags));
+ BasicBlock *BB = cast_or_null<BasicBlock>(MapValue(BA->getBasicBlock(), VM,
+ Flags));
+ return VM[V] = BlockAddress::get(F, BB ? BB : BA->getBasicBlock());
}
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
+ Value *Op = C->getOperand(i);
+ Value *Mapped = MapValue(Op, VM, Flags);
+ if (Mapped == C) continue;
+
+ // Okay, the operands don't all match. We've already processed some or all
+ // of the operands, set them up now.
std::vector<Constant*> Ops;
- for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i)
- Ops.push_back(cast<Constant>(MapValue(*i, VM, ModuleLevelChanges)));
- return VM[V] = CE->getWithOperands(Ops);
+ Ops.reserve(C->getNumOperands());
+ for (unsigned j = 0; j != i; ++j)
+ Ops.push_back(cast<Constant>(C->getOperand(i)));
+ Ops.push_back(cast<Constant>(Mapped));
+
+ // Map the rest of the operands that aren't processed yet.
+ for (++i; i != e; ++i)
+ Ops.push_back(cast<Constant>(MapValue(C->getOperand(i), VM, Flags)));
+
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ return VM[V] = CE->getWithOperands(Ops);
+ if (ConstantArray *CA = dyn_cast<ConstantArray>(C))
+ return VM[V] = ConstantArray::get(CA->getType(), Ops);
+ if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C))
+ return VM[V] = ConstantStruct::get(CS->getType(), Ops);
+ assert(isa<ConstantVector>(C) && "Unknown mapped constant type");
+ return VM[V] = ConstantVector::get(Ops);
}
-
- if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
- for (User::op_iterator b = CV->op_begin(), i = b, e = CV->op_end();
- i != e; ++i) {
- Value *MV = MapValue(*i, VM, ModuleLevelChanges);
- if (MV != *i) {
- // This vector value must contain a reference to a global, make a new
- // vector constant and return it.
- //
- std::vector<Constant*> Values;
- Values.reserve(CV->getNumOperands());
- for (User::op_iterator j = b; j != i; ++j)
- Values.push_back(cast<Constant>(*j));
- Values.push_back(cast<Constant>(MV));
- for (++i; i != e; ++i)
- Values.push_back(cast<Constant>(MapValue(*i, VM,
- ModuleLevelChanges)));
- return VM[V] = ConstantVector::get(Values);
- }
- }
- return VM[V] = C;
- }
-
- BlockAddress *BA = cast<BlockAddress>(C);
- Function *F = cast<Function>(MapValue(BA->getFunction(), VM,
- ModuleLevelChanges));
- BasicBlock *BB = cast_or_null<BasicBlock>(MapValue(BA->getBasicBlock(),VM,
- ModuleLevelChanges));
- return VM[V] = BlockAddress::get(F, BB ? BB : BA->getBasicBlock());
+
+ // If we reach here, all of the operands of the constant match.
+ return VM[V] = C;
}
/// RemapInstruction - Convert the instruction operands from referencing the
/// current values into those specified by VMap.
///
void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap,
- bool ModuleLevelChanges) {
+ RemapFlags Flags) {
// Remap operands.
for (User::op_iterator op = I->op_begin(), E = I->op_end(); op != E; ++op) {
- Value *V = MapValue(*op, VMap, ModuleLevelChanges);
- assert(V && "Referenced value not in value map!");
- *op = V;
+ Value *V = MapValue(*op, VMap, Flags);
+ // If we aren't ignoring missing entries, assert that something happened.
+ if (V != 0)
+ *op = V;
+ else
+ assert((Flags & RF_IgnoreMissingEntries) &&
+ "Referenced value not in value map!");
}
// Remap attached metadata.
@@ -170,7 +134,7 @@ void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap,
for (SmallVectorImpl<std::pair<unsigned, MDNode *> >::iterator
MI = MDs.begin(), ME = MDs.end(); MI != ME; ++MI) {
Value *Old = MI->second;
- Value *New = MapValue(Old, VMap, ModuleLevelChanges);
+ Value *New = MapValue(Old, VMap, Flags);
if (New != Old)
I->setMetadata(MI->first, cast<MDNode>(New));
}
diff --git a/contrib/llvm/lib/VMCore/AsmWriter.cpp b/contrib/llvm/lib/VMCore/AsmWriter.cpp
index 831a996..cbc874a 100644
--- a/contrib/llvm/lib/VMCore/AsmWriter.cpp
+++ b/contrib/llvm/lib/VMCore/AsmWriter.cpp
@@ -198,6 +198,7 @@ void TypePrinting::CalcTypeName(const Type *Ty,
case Type::PPC_FP128TyID: OS << "ppc_fp128"; break;
case Type::LabelTyID: OS << "label"; break;
case Type::MetadataTyID: OS << "metadata"; break;
+ case Type::X86_MMXTyID: OS << "x86_mmx"; break;
case Type::IntegerTyID:
OS << 'i' << cast<IntegerType>(Ty)->getBitWidth();
break;
@@ -830,7 +831,8 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
Out << " nuw";
if (OBO->hasNoSignedWrap())
Out << " nsw";
- } else if (const SDivOperator *Div = dyn_cast<SDivOperator>(U)) {
+ } else if (const PossiblyExactOperator *Div =
+ dyn_cast<PossiblyExactOperator>(U)) {
if (Div->isExact())
Out << " exact";
} else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
@@ -1057,11 +1059,6 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
return;
}
- if (const MDNode *Node = dyn_cast<MDNode>(CV)) {
- Out << "!" << Machine->getMetadataSlot(Node);
- return;
- }
-
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
Out << CE->getOpcodeName();
WriteOptimizationInfo(Out, CE);
@@ -1165,7 +1162,11 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
else
Machine = new SlotTracker(Context);
}
- Out << '!' << Machine->getMetadataSlot(N);
+ int Slot = Machine->getMetadataSlot(N);
+ if (Slot == -1)
+ Out << "<badref>";
+ else
+ Out << '!' << Slot;
return;
}
@@ -1395,7 +1396,11 @@ void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) {
Out << "!" << NMD->getName() << " = !{";
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
if (i) Out << ", ";
- Out << '!' << Machine.getMetadataSlot(NMD->getOperand(i));
+ int Slot = Machine.getMetadataSlot(NMD->getOperand(i));
+ if (Slot == -1)
+ Out << "<badref>";
+ else
+ Out << '!' << Slot;
}
Out << "}\n";
}
@@ -1455,6 +1460,7 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
if (GV->isThreadLocal()) Out << "thread_local ";
if (unsigned AddressSpace = GV->getType()->getAddressSpace())
Out << "addrspace(" << AddressSpace << ") ";
+ if (GV->hasUnnamedAddr()) Out << "unnamed_addr ";
Out << (GV->isConstant() ? "constant " : "global ");
TypePrinter.print(GV->getType()->getElementType(), Out);
@@ -1575,6 +1581,8 @@ void AssemblyWriter::printFunction(const Function *F) {
case CallingConv::ARM_AAPCS: Out << "arm_aapcscc "; break;
case CallingConv::ARM_AAPCS_VFP:Out << "arm_aapcs_vfpcc "; break;
case CallingConv::MSP430_INTR: Out << "msp430_intrcc "; break;
+ case CallingConv::PTX_Kernel: Out << "ptx_kernel"; break;
+ case CallingConv::PTX_Device: Out << "ptx_device"; break;
default: Out << "cc" << F->getCallingConv() << " "; break;
}
@@ -1622,6 +1630,8 @@ void AssemblyWriter::printFunction(const Function *F) {
Out << "..."; // Output varargs portion of signature!
}
Out << ')';
+ if (F->hasUnnamedAddr())
+ Out << " unnamed_addr";
Attributes FnAttrs = Attrs.getFnAttributes();
if (FnAttrs != Attribute::None)
Out << ' ' << Attribute::getAsString(Attrs.getFnAttributes());
@@ -1843,6 +1853,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break;
case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break;
case CallingConv::MSP430_INTR: Out << " msp430_intrcc "; break;
+ case CallingConv::PTX_Kernel: Out << " ptx_kernel"; break;
+ case CallingConv::PTX_Device: Out << " ptx_device"; break;
default: Out << " cc" << CI->getCallingConv(); break;
}
@@ -1897,6 +1909,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break;
case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break;
case CallingConv::MSP430_INTR: Out << " msp430_intrcc "; break;
+ case CallingConv::PTX_Kernel: Out << " ptx_kernel"; break;
+ case CallingConv::PTX_Device: Out << " ptx_device"; break;
default: Out << " cc" << II->getCallingConv(); break;
}
@@ -2033,15 +2047,7 @@ static void WriteMDNodeComment(const MDNode *Node,
return;
Out.PadToColumn(50);
- if (Tag == dwarf::DW_TAG_auto_variable)
- Out << "; [ DW_TAG_auto_variable ]";
- else if (Tag == dwarf::DW_TAG_arg_variable)
- Out << "; [ DW_TAG_arg_variable ]";
- else if (Tag == dwarf::DW_TAG_return_variable)
- Out << "; [ DW_TAG_return_variable ]";
- else if (Tag == dwarf::DW_TAG_vector_type)
- Out << "; [ DW_TAG_vector_type ]";
- else if (Tag == dwarf::DW_TAG_user_base)
+ if (Tag == dwarf::DW_TAG_user_base)
Out << "; [ DW_TAG_user_base ]";
else if (Tag.isIntN(32)) {
if (const char *TagName = dwarf::TagString(Tag.getZExtValue()))
diff --git a/contrib/llvm/lib/VMCore/Attributes.cpp b/contrib/llvm/lib/VMCore/Attributes.cpp
index a000aee..92152a3 100644
--- a/contrib/llvm/lib/VMCore/Attributes.cpp
+++ b/contrib/llvm/lib/VMCore/Attributes.cpp
@@ -15,8 +15,8 @@
#include "llvm/Type.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/System/Atomic.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Mutex.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/raw_ostream.h"
@@ -70,6 +70,8 @@ std::string Attribute::getAsString(Attributes Attrs) {
Result += "noimplicitfloat ";
if (Attrs & Attribute::Naked)
Result += "naked ";
+ if (Attrs & Attribute::Hotpatch)
+ Result += "hotpatch ";
if (Attrs & Attribute::StackAlignment) {
Result += "alignstack(";
Result += utostr(Attribute::getStackAlignmentFromAttrs(Attrs));
@@ -105,6 +107,14 @@ Attributes Attribute::typeIncompatible(const Type *Ty) {
//===----------------------------------------------------------------------===//
namespace llvm {
+ class AttributeListImpl;
+}
+
+static ManagedStatic<FoldingSet<AttributeListImpl> > AttributesLists;
+
+namespace llvm {
+static ManagedStatic<sys::SmartMutex<true> > ALMutex;
+
class AttributeListImpl : public FoldingSetNode {
sys::cas_flag RefCount;
@@ -120,10 +130,17 @@ public:
RefCount = 0;
}
- void AddRef() { sys::AtomicIncrement(&RefCount); }
+ void AddRef() {
+ sys::SmartScopedLock<true> Lock(*ALMutex);
+ ++RefCount;
+ }
void DropRef() {
- sys::cas_flag old = sys::AtomicDecrement(&RefCount);
- if (old == 0) delete this;
+ sys::SmartScopedLock<true> Lock(*ALMutex);
+ if (!AttributesLists.isConstructed())
+ return;
+ sys::cas_flag new_val = --RefCount;
+ if (new_val == 0)
+ delete this;
}
void Profile(FoldingSetNodeID &ID) const {
@@ -137,11 +154,8 @@ public:
};
}
-static ManagedStatic<sys::SmartMutex<true> > ALMutex;
-static ManagedStatic<FoldingSet<AttributeListImpl> > AttributesLists;
-
AttributeListImpl::~AttributeListImpl() {
- sys::SmartScopedLock<true> Lock(*ALMutex);
+ // NOTE: Lock must be acquired by caller.
AttributesLists->RemoveNode(this);
}
@@ -195,6 +209,7 @@ AttrListPtr::AttrListPtr(const AttrListPtr &P) : AttrList(P.AttrList) {
}
const AttrListPtr &AttrListPtr::operator=(const AttrListPtr &RHS) {
+ sys::SmartScopedLock<true> Lock(*ALMutex);
if (AttrList == RHS.AttrList) return *this;
if (AttrList) AttrList->DropRef();
AttrList = RHS.AttrList;
diff --git a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
index 9330e14..b323540 100644
--- a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
+++ b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
@@ -288,37 +288,224 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
break;
case 'x':
// This fixes all MMX shift intrinsic instructions to take a
- // v1i64 instead of a v2i32 as the second parameter.
- if (Name.compare(5,10,"x86.mmx.ps",10) == 0 &&
- (Name.compare(13,4,"psll", 4) == 0 ||
- Name.compare(13,4,"psra", 4) == 0 ||
- Name.compare(13,4,"psrl", 4) == 0) && Name[17] != 'i') {
-
- const llvm::Type *VT =
- VectorType::get(IntegerType::get(FTy->getContext(), 64), 1);
-
- // We don't have to do anything if the parameter already has
- // the correct type.
- if (FTy->getParamType(1) == VT)
+ // x86_mmx instead of a v1i64, v2i32, v4i16, or v8i8.
+ if (Name.compare(5, 8, "x86.mmx.", 8) == 0) {
+ const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
+
+ if (Name.compare(13, 4, "padd", 4) == 0 ||
+ Name.compare(13, 4, "psub", 4) == 0 ||
+ Name.compare(13, 4, "pmul", 4) == 0 ||
+ Name.compare(13, 5, "pmadd", 5) == 0 ||
+ Name.compare(13, 4, "pand", 4) == 0 ||
+ Name.compare(13, 3, "por", 3) == 0 ||
+ Name.compare(13, 4, "pxor", 4) == 0 ||
+ Name.compare(13, 4, "pavg", 4) == 0 ||
+ Name.compare(13, 4, "pmax", 4) == 0 ||
+ Name.compare(13, 4, "pmin", 4) == 0 ||
+ Name.compare(13, 4, "psad", 4) == 0 ||
+ Name.compare(13, 4, "psll", 4) == 0 ||
+ Name.compare(13, 4, "psrl", 4) == 0 ||
+ Name.compare(13, 4, "psra", 4) == 0 ||
+ Name.compare(13, 4, "pack", 4) == 0 ||
+ Name.compare(13, 6, "punpck", 6) == 0 ||
+ Name.compare(13, 4, "pcmp", 4) == 0) {
+ assert(FTy->getNumParams() == 2 && "MMX intrinsic takes 2 args!");
+ const Type *SecondParamTy = X86_MMXTy;
+
+ if (Name.compare(13, 5, "pslli", 5) == 0 ||
+ Name.compare(13, 5, "psrli", 5) == 0 ||
+ Name.compare(13, 5, "psrai", 5) == 0)
+ SecondParamTy = FTy->getParamType(1);
+
+ // Don't do anything if it has the correct types.
+ if (FTy->getReturnType() == X86_MMXTy &&
+ FTy->getParamType(0) == X86_MMXTy &&
+ FTy->getParamType(1) == SecondParamTy)
+ break;
+
+ // We first need to change the name of the old (bad) intrinsic, because
+ // its type is incorrect, but we cannot overload that name. We
+ // arbitrarily unique it here allowing us to construct a correctly named
+ // and typed function below.
+ F->setName("");
+
+ // Now construct the new intrinsic with the correct name and type. We
+ // leave the old function around in order to query its type, whatever it
+ // may be, and correctly convert up to the new type.
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ X86_MMXTy, X86_MMXTy,
+ SecondParamTy, (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 8, "maskmovq", 8) == 0) {
+ // Don't do anything if it has the correct types.
+ if (FTy->getParamType(0) == X86_MMXTy &&
+ FTy->getParamType(1) == X86_MMXTy)
+ break;
+
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ FTy->getReturnType(),
+ X86_MMXTy,
+ X86_MMXTy,
+ FTy->getParamType(2),
+ (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 8, "pmovmskb", 8) == 0) {
+ if (FTy->getParamType(0) == X86_MMXTy)
+ break;
+
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ FTy->getReturnType(),
+ X86_MMXTy,
+ (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 5, "movnt", 5) == 0) {
+ if (FTy->getParamType(1) == X86_MMXTy)
+ break;
+
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ FTy->getReturnType(),
+ FTy->getParamType(0),
+ X86_MMXTy,
+ (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 7, "palignr", 7) == 0) {
+ if (FTy->getReturnType() == X86_MMXTy &&
+ FTy->getParamType(0) == X86_MMXTy &&
+ FTy->getParamType(1) == X86_MMXTy)
+ break;
+
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ X86_MMXTy,
+ X86_MMXTy,
+ X86_MMXTy,
+ FTy->getParamType(2),
+ (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 5, "pextr", 5) == 0) {
+ if (FTy->getParamType(0) == X86_MMXTy)
+ break;
+
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ FTy->getReturnType(),
+ X86_MMXTy,
+ FTy->getParamType(1),
+ (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 5, "pinsr", 5) == 0) {
+ if (FTy->getReturnType() == X86_MMXTy &&
+ FTy->getParamType(0) == X86_MMXTy)
+ break;
+
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ X86_MMXTy,
+ X86_MMXTy,
+ FTy->getParamType(1),
+ FTy->getParamType(2),
+ (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 12, "cvtsi32.si64", 12) == 0) {
+ if (FTy->getReturnType() == X86_MMXTy)
+ break;
+
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ X86_MMXTy,
+ FTy->getParamType(0),
+ (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 12, "cvtsi64.si32", 12) == 0) {
+ if (FTy->getParamType(0) == X86_MMXTy)
+ break;
+
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ FTy->getReturnType(),
+ X86_MMXTy,
+ (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 8, "vec.init", 8) == 0) {
+ if (FTy->getReturnType() == X86_MMXTy)
+ break;
+
+ F->setName("");
+
+ if (Name.compare(21, 2, ".b", 2) == 0)
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ X86_MMXTy,
+ FTy->getParamType(0),
+ FTy->getParamType(1),
+ FTy->getParamType(2),
+ FTy->getParamType(3),
+ FTy->getParamType(4),
+ FTy->getParamType(5),
+ FTy->getParamType(6),
+ FTy->getParamType(7),
+ (Type*)0));
+ else if (Name.compare(21, 2, ".w", 2) == 0)
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ X86_MMXTy,
+ FTy->getParamType(0),
+ FTy->getParamType(1),
+ FTy->getParamType(2),
+ FTy->getParamType(3),
+ (Type*)0));
+ else if (Name.compare(21, 2, ".d", 2) == 0)
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ X86_MMXTy,
+ FTy->getParamType(0),
+ FTy->getParamType(1),
+ (Type*)0));
+ return true;
+ }
+
+
+ if (Name.compare(13, 9, "vec.ext.d", 9) == 0) {
+ if (FTy->getReturnType() == X86_MMXTy &&
+ FTy->getParamType(0) == X86_MMXTy)
+ break;
+
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(Name,
+ X86_MMXTy,
+ X86_MMXTy,
+ FTy->getParamType(1),
+ (Type*)0));
+ return true;
+ }
+
+ if (Name.compare(13, 9, "emms", 4) == 0 ||
+ Name.compare(13, 9, "femms", 5) == 0) {
+ NewFn = 0;
break;
-
- // We first need to change the name of the old (bad) intrinsic, because
- // its type is incorrect, but we cannot overload that name. We
- // arbitrarily unique it here allowing us to construct a correctly named
- // and typed function below.
- F->setName("");
+ }
- assert(FTy->getNumParams() == 2 && "MMX shift intrinsics take 2 args!");
-
- // Now construct the new intrinsic with the correct name and type. We
- // leave the old function around in order to query its type, whatever it
- // may be, and correctly convert up to the new type.
- NewFn = cast<Function>(M->getOrInsertFunction(Name,
- FTy->getReturnType(),
- FTy->getParamType(0),
- VT,
- (Type *)0));
- return true;
+ // We really shouldn't get here ever.
+ assert(0 && "Invalid MMX intrinsic!");
+ break;
} else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 ||
Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 ||
Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 ||
@@ -341,6 +528,16 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
// or 0.
NewFn = 0;
return true;
+ } else if (Name.compare(5, 17, "x86.ssse3.pshuf.w", 17) == 0) {
+ // This is an SSE/MMX instruction.
+ const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
+ NewFn =
+ cast<Function>(M->getOrInsertFunction("llvm.x86.sse.pshuf.w",
+ X86_MMXTy,
+ X86_MMXTy,
+ Type::getInt8Ty(F->getContext()),
+ (Type*)0));
+ return true;
}
break;
@@ -432,6 +629,39 @@ static Instruction *CallVABD(CallInst *CI, Value *Arg0, Value *Arg1) {
"upgraded."+CI->getName(), CI);
}
+/// ConstructNewCallInst - Construct a new CallInst with the signature of NewFn.
+static void ConstructNewCallInst(Function *NewFn, CallInst *OldCI,
+ Value **Operands, unsigned NumOps,
+ bool AssignName = true) {
+ // Construct a new CallInst.
+ CallInst *NewCI =
+ CallInst::Create(NewFn, Operands, Operands + NumOps,
+ AssignName ? "upgraded." + OldCI->getName() : "", OldCI);
+
+ NewCI->setTailCall(OldCI->isTailCall());
+ NewCI->setCallingConv(OldCI->getCallingConv());
+
+ // Handle any uses of the old CallInst. If the type has changed, add a cast.
+ if (!OldCI->use_empty()) {
+ if (OldCI->getType() != NewCI->getType()) {
+ Function *OldFn = OldCI->getCalledFunction();
+ CastInst *RetCast =
+ CastInst::Create(CastInst::getCastOpcode(NewCI, true,
+ OldFn->getReturnType(), true),
+ NewCI, OldFn->getReturnType(), NewCI->getName(),OldCI);
+
+ // Replace all uses of the old call with the new cast which has the
+ // correct type.
+ OldCI->replaceAllUsesWith(RetCast);
+ } else {
+ OldCI->replaceAllUsesWith(NewCI);
+ }
+ }
+
+ // Clean up the old call now that it has been completely upgraded.
+ OldCI->eraseFromParent();
+}
+
// UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
// upgraded intrinsic. All argument and return casting must be provided in
// order to seamlessly integrate with existing context.
@@ -629,7 +859,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
for (unsigned i = 0; i != 8; ++i)
Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
- Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
+ Value *SV = ConstantVector::get(Indices);
Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
Rep = Builder.CreateBitCast(Rep, F->getReturnType());
}
@@ -685,7 +915,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
for (unsigned i = 0; i != 16; ++i)
Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
- Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
+ Value *SV = ConstantVector::get(Indices);
Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
Rep = Builder.CreateBitCast(Rep, F->getReturnType());
}
@@ -759,40 +989,265 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
break;
}
+ case Intrinsic::x86_mmx_padd_b:
+ case Intrinsic::x86_mmx_padd_w:
+ case Intrinsic::x86_mmx_padd_d:
+ case Intrinsic::x86_mmx_padd_q:
+ case Intrinsic::x86_mmx_padds_b:
+ case Intrinsic::x86_mmx_padds_w:
+ case Intrinsic::x86_mmx_paddus_b:
+ case Intrinsic::x86_mmx_paddus_w:
+ case Intrinsic::x86_mmx_psub_b:
+ case Intrinsic::x86_mmx_psub_w:
+ case Intrinsic::x86_mmx_psub_d:
+ case Intrinsic::x86_mmx_psub_q:
+ case Intrinsic::x86_mmx_psubs_b:
+ case Intrinsic::x86_mmx_psubs_w:
+ case Intrinsic::x86_mmx_psubus_b:
+ case Intrinsic::x86_mmx_psubus_w:
+ case Intrinsic::x86_mmx_pmulh_w:
+ case Intrinsic::x86_mmx_pmull_w:
+ case Intrinsic::x86_mmx_pmulhu_w:
+ case Intrinsic::x86_mmx_pmulu_dq:
+ case Intrinsic::x86_mmx_pmadd_wd:
+ case Intrinsic::x86_mmx_pand:
+ case Intrinsic::x86_mmx_pandn:
+ case Intrinsic::x86_mmx_por:
+ case Intrinsic::x86_mmx_pxor:
+ case Intrinsic::x86_mmx_pavg_b:
+ case Intrinsic::x86_mmx_pavg_w:
+ case Intrinsic::x86_mmx_pmaxu_b:
+ case Intrinsic::x86_mmx_pmaxs_w:
+ case Intrinsic::x86_mmx_pminu_b:
+ case Intrinsic::x86_mmx_pmins_w:
+ case Intrinsic::x86_mmx_psad_bw:
+ case Intrinsic::x86_mmx_psll_w:
case Intrinsic::x86_mmx_psll_d:
case Intrinsic::x86_mmx_psll_q:
- case Intrinsic::x86_mmx_psll_w:
- case Intrinsic::x86_mmx_psra_d:
- case Intrinsic::x86_mmx_psra_w:
+ case Intrinsic::x86_mmx_pslli_w:
+ case Intrinsic::x86_mmx_pslli_d:
+ case Intrinsic::x86_mmx_pslli_q:
+ case Intrinsic::x86_mmx_psrl_w:
case Intrinsic::x86_mmx_psrl_d:
case Intrinsic::x86_mmx_psrl_q:
- case Intrinsic::x86_mmx_psrl_w: {
+ case Intrinsic::x86_mmx_psrli_w:
+ case Intrinsic::x86_mmx_psrli_d:
+ case Intrinsic::x86_mmx_psrli_q:
+ case Intrinsic::x86_mmx_psra_w:
+ case Intrinsic::x86_mmx_psra_d:
+ case Intrinsic::x86_mmx_psrai_w:
+ case Intrinsic::x86_mmx_psrai_d:
+ case Intrinsic::x86_mmx_packsswb:
+ case Intrinsic::x86_mmx_packssdw:
+ case Intrinsic::x86_mmx_packuswb:
+ case Intrinsic::x86_mmx_punpckhbw:
+ case Intrinsic::x86_mmx_punpckhwd:
+ case Intrinsic::x86_mmx_punpckhdq:
+ case Intrinsic::x86_mmx_punpcklbw:
+ case Intrinsic::x86_mmx_punpcklwd:
+ case Intrinsic::x86_mmx_punpckldq:
+ case Intrinsic::x86_mmx_pcmpeq_b:
+ case Intrinsic::x86_mmx_pcmpeq_w:
+ case Intrinsic::x86_mmx_pcmpeq_d:
+ case Intrinsic::x86_mmx_pcmpgt_b:
+ case Intrinsic::x86_mmx_pcmpgt_w:
+ case Intrinsic::x86_mmx_pcmpgt_d: {
Value *Operands[2];
+ // Cast the operand to the X86 MMX type.
+ Operands[0] = new BitCastInst(CI->getArgOperand(0),
+ NewFn->getFunctionType()->getParamType(0),
+ "upgraded.", CI);
+
+ switch (NewFn->getIntrinsicID()) {
+ default:
+ // Cast to the X86 MMX type.
+ Operands[1] = new BitCastInst(CI->getArgOperand(1),
+ NewFn->getFunctionType()->getParamType(1),
+ "upgraded.", CI);
+ break;
+ case Intrinsic::x86_mmx_pslli_w:
+ case Intrinsic::x86_mmx_pslli_d:
+ case Intrinsic::x86_mmx_pslli_q:
+ case Intrinsic::x86_mmx_psrli_w:
+ case Intrinsic::x86_mmx_psrli_d:
+ case Intrinsic::x86_mmx_psrli_q:
+ case Intrinsic::x86_mmx_psrai_w:
+ case Intrinsic::x86_mmx_psrai_d:
+ // These take an i32 as their second parameter.
+ Operands[1] = CI->getArgOperand(1);
+ break;
+ }
+
+ ConstructNewCallInst(NewFn, CI, Operands, 2);
+ break;
+ }
+ case Intrinsic::x86_mmx_maskmovq: {
+ Value *Operands[3];
+
+ // Cast the operands to the X86 MMX type.
+ Operands[0] = new BitCastInst(CI->getArgOperand(0),
+ NewFn->getFunctionType()->getParamType(0),
+ "upgraded.", CI);
+ Operands[1] = new BitCastInst(CI->getArgOperand(1),
+ NewFn->getFunctionType()->getParamType(1),
+ "upgraded.", CI);
+ Operands[2] = CI->getArgOperand(2);
+
+ ConstructNewCallInst(NewFn, CI, Operands, 3, false);
+ break;
+ }
+ case Intrinsic::x86_mmx_pmovmskb: {
+ Value *Operands[1];
+
+ // Cast the operand to the X86 MMX type.
+ Operands[0] = new BitCastInst(CI->getArgOperand(0),
+ NewFn->getFunctionType()->getParamType(0),
+ "upgraded.", CI);
+
+ ConstructNewCallInst(NewFn, CI, Operands, 1);
+ break;
+ }
+ case Intrinsic::x86_mmx_movnt_dq: {
+ Value *Operands[2];
+
Operands[0] = CI->getArgOperand(0);
-
- // Cast the second parameter to the correct type.
- BitCastInst *BC = new BitCastInst(CI->getArgOperand(1),
- NewFn->getFunctionType()->getParamType(1),
- "upgraded.", CI);
- Operands[1] = BC;
-
- // Construct a new CallInst
- CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+2,
- "upgraded."+CI->getName(), CI);
- NewCI->setTailCall(CI->isTailCall());
- NewCI->setCallingConv(CI->getCallingConv());
-
- // Handle any uses of the old CallInst.
- if (!CI->use_empty())
- // Replace all uses of the old call with the new cast which has the
- // correct type.
- CI->replaceAllUsesWith(NewCI);
-
- // Clean up the old call now that it has been completely upgraded.
- CI->eraseFromParent();
+
+ // Cast the operand to the X86 MMX type.
+ Operands[1] = new BitCastInst(CI->getArgOperand(1),
+ NewFn->getFunctionType()->getParamType(1),
+ "upgraded.", CI);
+
+ ConstructNewCallInst(NewFn, CI, Operands, 2, false);
break;
- }
+ }
+ case Intrinsic::x86_mmx_palignr_b: {
+ Value *Operands[3];
+
+ // Cast the operands to the X86 MMX type.
+ Operands[0] = new BitCastInst(CI->getArgOperand(0),
+ NewFn->getFunctionType()->getParamType(0),
+ "upgraded.", CI);
+ Operands[1] = new BitCastInst(CI->getArgOperand(1),
+ NewFn->getFunctionType()->getParamType(1),
+ "upgraded.", CI);
+ Operands[2] = CI->getArgOperand(2);
+
+ ConstructNewCallInst(NewFn, CI, Operands, 3);
+ break;
+ }
+ case Intrinsic::x86_mmx_pextr_w: {
+ Value *Operands[2];
+
+ // Cast the operands to the X86 MMX type.
+ Operands[0] = new BitCastInst(CI->getArgOperand(0),
+ NewFn->getFunctionType()->getParamType(0),
+ "upgraded.", CI);
+ Operands[1] = CI->getArgOperand(1);
+
+ ConstructNewCallInst(NewFn, CI, Operands, 2);
+ break;
+ }
+ case Intrinsic::x86_mmx_pinsr_w: {
+ Value *Operands[3];
+
+ // Cast the operands to the X86 MMX type.
+ Operands[0] = new BitCastInst(CI->getArgOperand(0),
+ NewFn->getFunctionType()->getParamType(0),
+ "upgraded.", CI);
+ Operands[1] = CI->getArgOperand(1);
+ Operands[2] = CI->getArgOperand(2);
+
+ ConstructNewCallInst(NewFn, CI, Operands, 3);
+ break;
+ }
+ case Intrinsic::x86_sse_pshuf_w: {
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ // Cast the operand to the X86 MMX type.
+ Value *Operands[2];
+ Operands[0] =
+ Builder.CreateBitCast(CI->getArgOperand(0),
+ NewFn->getFunctionType()->getParamType(0),
+ "upgraded.");
+ Operands[1] =
+ Builder.CreateTrunc(CI->getArgOperand(1),
+ Type::getInt8Ty(C),
+ "upgraded.");
+
+ ConstructNewCallInst(NewFn, CI, Operands, 2);
+ break;
+ }
+
+#if 0
+ case Intrinsic::x86_mmx_cvtsi32_si64: {
+ // The return type needs to be changed.
+ Value *Operands[1];
+ Operands[0] = CI->getArgOperand(0);
+ ConstructNewCallInst(NewFn, CI, Operands, 1);
+ break;
+ }
+ case Intrinsic::x86_mmx_cvtsi64_si32: {
+ Value *Operands[1];
+
+ // Cast the operand to the X86 MMX type.
+ Operands[0] = new BitCastInst(CI->getArgOperand(0),
+ NewFn->getFunctionType()->getParamType(0),
+ "upgraded.", CI);
+
+ ConstructNewCallInst(NewFn, CI, Operands, 1);
+ break;
+ }
+ case Intrinsic::x86_mmx_vec_init_b:
+ case Intrinsic::x86_mmx_vec_init_w:
+ case Intrinsic::x86_mmx_vec_init_d: {
+ // The return type needs to be changed.
+ Value *Operands[8];
+ unsigned NumOps = 0;
+
+ switch (NewFn->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::x86_mmx_vec_init_b: NumOps = 8; break;
+ case Intrinsic::x86_mmx_vec_init_w: NumOps = 4; break;
+ case Intrinsic::x86_mmx_vec_init_d: NumOps = 2; break;
+ }
+
+ switch (NewFn->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::x86_mmx_vec_init_b:
+ Operands[7] = CI->getArgOperand(7);
+ Operands[6] = CI->getArgOperand(6);
+ Operands[5] = CI->getArgOperand(5);
+ Operands[4] = CI->getArgOperand(4);
+ // FALLTHRU
+ case Intrinsic::x86_mmx_vec_init_w:
+ Operands[3] = CI->getArgOperand(3);
+ Operands[2] = CI->getArgOperand(2);
+ // FALLTHRU
+ case Intrinsic::x86_mmx_vec_init_d:
+ Operands[1] = CI->getArgOperand(1);
+ Operands[0] = CI->getArgOperand(0);
+ break;
+ }
+
+ ConstructNewCallInst(NewFn, CI, Operands, NumOps);
+ break;
+ }
+ case Intrinsic::x86_mmx_vec_ext_d: {
+ Value *Operands[2];
+
+ // Cast the operand to the X86 MMX type.
+ Operands[0] = new BitCastInst(CI->getArgOperand(0),
+ NewFn->getFunctionType()->getParamType(0),
+ "upgraded.", CI);
+ Operands[1] = CI->getArgOperand(1);
+
+ ConstructNewCallInst(NewFn, CI, Operands, 2);
+ break;
+ }
+#endif
+
case Intrinsic::ctlz:
case Intrinsic::ctpop:
case Intrinsic::cttz: {
diff --git a/contrib/llvm/lib/VMCore/BasicBlock.cpp b/contrib/llvm/lib/VMCore/BasicBlock.cpp
index 8ad5373..955a028 100644
--- a/contrib/llvm/lib/VMCore/BasicBlock.cpp
+++ b/contrib/llvm/lib/VMCore/BasicBlock.cpp
@@ -248,10 +248,11 @@ void BasicBlock::removePredecessor(BasicBlock *Pred,
// If all incoming values to the Phi are the same, we can replace the Phi
// with that value.
Value* PNV = 0;
- if (!DontDeleteUselessPHIs && (PNV = PN->hasConstantValue())) {
- PN->replaceAllUsesWith(PNV);
- PN->eraseFromParent();
- }
+ if (!DontDeleteUselessPHIs && (PNV = PN->hasConstantValue()))
+ if (PNV != PN) {
+ PN->replaceAllUsesWith(PNV);
+ PN->eraseFromParent();
+ }
}
}
}
diff --git a/contrib/llvm/lib/VMCore/ConstantFold.cpp b/contrib/llvm/lib/VMCore/ConstantFold.cpp
index 9a91daf..573efb7 100644
--- a/contrib/llvm/lib/VMCore/ConstantFold.cpp
+++ b/contrib/llvm/lib/VMCore/ConstantFold.cpp
@@ -42,6 +42,10 @@ using namespace llvm;
/// input vector constant are all simple integer or FP values.
static Constant *BitCastConstantVector(ConstantVector *CV,
const VectorType *DstTy) {
+
+ if (CV->isAllOnesValue()) return Constant::getAllOnesValue(DstTy);
+ if (CV->isNullValue()) return Constant::getNullValue(DstTy);
+
// If this cast changes element count then we can't handle it here:
// doing so requires endianness information. This should be handled by
// Analysis/ConstantFolding.cpp
@@ -145,7 +149,7 @@ static Constant *FoldBitCast(Constant *V, const Type *DestTy) {
// This allows for other simplifications (although some of them
// can only be handled by Analysis/ConstantFolding.cpp).
if (isa<ConstantInt>(V) || isa<ConstantFP>(V))
- return ConstantExpr::getBitCast(ConstantVector::get(&V, 1), DestPTy);
+ return ConstantExpr::getBitCast(ConstantVector::get(V), DestPTy);
}
// Finally, implement bitcast folding now. The code below doesn't handle
@@ -202,7 +206,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
APInt V = CI->getValue();
if (ByteStart)
V = V.lshr(ByteStart*8);
- V.trunc(ByteSize*8);
+ V = V.trunc(ByteSize*8);
return ConstantInt::get(CI->getContext(), V);
}
@@ -511,10 +515,14 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
return Constant::getNullValue(DestTy);
return UndefValue::get(DestTy);
}
+
// No compile-time operations on this type yet.
if (V->getType()->isPPC_FP128Ty() || DestTy->isPPC_FP128Ty())
return 0;
+ if (V->isNullValue() && !DestTy->isX86_MMXTy())
+ return Constant::getNullValue(DestTy);
+
// If the cast operand is a constant expression, there's a few things we can
// do to try to simplify it.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
@@ -637,9 +645,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
case Instruction::SIToFP:
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
APInt api = CI->getValue();
- const uint64_t zero[] = {0, 0};
- APFloat apf = APFloat(APInt(DestTy->getPrimitiveSizeInBits(),
- 2, zero));
+ APFloat apf(APInt::getNullValue(DestTy->getPrimitiveSizeInBits()), true);
(void)apf.convertFromAPInt(api,
opc==Instruction::SIToFP,
APFloat::rmNearestTiesToEven);
@@ -649,25 +655,22 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
case Instruction::ZExt:
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
- APInt Result(CI->getValue());
- Result.zext(BitWidth);
- return ConstantInt::get(V->getContext(), Result);
+ return ConstantInt::get(V->getContext(),
+ CI->getValue().zext(BitWidth));
}
return 0;
case Instruction::SExt:
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
- APInt Result(CI->getValue());
- Result.sext(BitWidth);
- return ConstantInt::get(V->getContext(), Result);
+ return ConstantInt::get(V->getContext(),
+ CI->getValue().sext(BitWidth));
}
return 0;
case Instruction::Trunc: {
uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- APInt Result(CI->getValue());
- Result.trunc(DestBitWidth);
- return ConstantInt::get(V->getContext(), Result);
+ return ConstantInt::get(V->getContext(),
+ CI->getValue().trunc(DestBitWidth));
}
// The input must be a constantexpr. See if we can simplify this based on
@@ -690,10 +693,58 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
if (ConstantInt *CB = dyn_cast<ConstantInt>(Cond))
return CB->getZExtValue() ? V1 : V2;
+ // Check for zero aggregate and ConstantVector of zeros
+ if (Cond->isNullValue()) return V2;
+
+ if (ConstantVector* CondV = dyn_cast<ConstantVector>(Cond)) {
+
+ if (CondV->isAllOnesValue()) return V1;
+
+ const VectorType *VTy = cast<VectorType>(V1->getType());
+ ConstantVector *CP1 = dyn_cast<ConstantVector>(V1);
+ ConstantVector *CP2 = dyn_cast<ConstantVector>(V2);
+
+ if ((CP1 || isa<ConstantAggregateZero>(V1)) &&
+ (CP2 || isa<ConstantAggregateZero>(V2))) {
+
+ // Find the element type of the returned vector
+ const Type *EltTy = VTy->getElementType();
+ unsigned NumElem = VTy->getNumElements();
+ std::vector<Constant*> Res(NumElem);
+
+ bool Valid = true;
+ for (unsigned i = 0; i < NumElem; ++i) {
+ ConstantInt* c = dyn_cast<ConstantInt>(CondV->getOperand(i));
+ if (!c) {
+ Valid = false;
+ break;
+ }
+ Constant *C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
+ Constant *C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
+ Res[i] = c->getZExtValue() ? C1 : C2;
+ }
+ // If we were able to build the vector, return it
+ if (Valid) return ConstantVector::get(Res);
+ }
+ }
+
+
if (isa<UndefValue>(V1)) return V2;
if (isa<UndefValue>(V2)) return V1;
if (isa<UndefValue>(Cond)) return V1;
if (V1 == V2) return V1;
+
+ if (ConstantExpr *TrueVal = dyn_cast<ConstantExpr>(V1)) {
+ if (TrueVal->getOpcode() == Instruction::Select)
+ if (TrueVal->getOperand(0) == Cond)
+ return ConstantExpr::getSelect(Cond, TrueVal->getOperand(1), V2);
+ }
+ if (ConstantExpr *FalseVal = dyn_cast<ConstantExpr>(V2)) {
+ if (FalseVal->getOpcode() == Instruction::Select)
+ if (FalseVal->getOperand(0) == Cond)
+ return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2));
+ }
+
return 0;
}
@@ -821,7 +872,7 @@ Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1,
Result.push_back(InElt);
}
- return ConstantVector::get(&Result[0], Result.size());
+ return ConstantVector::get(Result);
}
Constant *llvm::ConstantFoldExtractValueInstruction(Constant *Agg,
@@ -982,8 +1033,8 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
return Constant::getNullValue(C1->getType()); // X lshr undef -> 0
// undef lshr X -> 0
case Instruction::AShr:
- if (!isa<UndefValue>(C2))
- return C1; // undef ashr X --> undef
+ if (!isa<UndefValue>(C2)) // undef ashr X --> all ones
+ return Constant::getAllOnesValue(C1->getType());
else if (isa<UndefValue>(C1))
return C1; // undef ashr undef -> undef
else
@@ -1343,8 +1394,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
// Given ((a + b) + c), if (b + c) folds to something interesting, return
// (a + (b + c)).
- if (Instruction::isAssociative(Opcode, C1->getType()) &&
- CE1->getOpcode() == Opcode) {
+ if (Instruction::isAssociative(Opcode) && CE1->getOpcode() == Opcode) {
Constant *T = ConstantExpr::get(Opcode, CE1->getOperand(1), C2);
if (!isa<ConstantExpr>(T) || cast<ConstantExpr>(T)->getOpcode() != Opcode)
return ConstantExpr::get(Opcode, CE1->getOperand(0), T);
@@ -1413,7 +1463,7 @@ static bool isMaybeZeroSizedType(const Type *Ty) {
/// first is less than the second, return -1, if the second is less than the
/// first, return 1. If the constants are not integral, return -2.
///
-static int IdxCompare(Constant *C1, Constant *C2, const Type *ElTy) {
+static int IdxCompare(Constant *C1, Constant *C2, const Type *ElTy) {
if (C1 == C2) return 0;
// Ok, we found a different index. If they are not ConstantInt, we can't do
@@ -1896,11 +1946,11 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
// If we can constant fold the comparison of each element, constant fold
// the whole vector comparison.
SmallVector<Constant*, 4> ResElts;
- for (unsigned i = 0, e = C1Elts.size(); i != e; ++i) {
- // Compare the elements, producing an i1 result or constant expr.
+ // Compare the elements, producing an i1 result or constant expr.
+ for (unsigned i = 0, e = C1Elts.size(); i != e; ++i)
ResElts.push_back(ConstantExpr::getCompare(pred, C1Elts[i], C2Elts[i]));
- }
- return ConstantVector::get(&ResElts[0], ResElts.size());
+
+ return ConstantVector::get(ResElts);
}
if (C1->getType()->isFloatingPointTy()) {
@@ -1948,7 +1998,7 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
else if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT)
Result = 1;
break;
- case ICmpInst::ICMP_NE: // We know that C1 != C2
+ case FCmpInst::FCMP_ONE: // We know that C1 != C2
// We can only partially decide this relation.
if (pred == FCmpInst::FCMP_OEQ || pred == FCmpInst::FCMP_UEQ)
Result = 0;
@@ -2073,56 +2123,55 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
/// isInBoundsIndices - Test whether the given sequence of *normalized* indices
/// is "inbounds".
-static bool isInBoundsIndices(Constant *const *Idxs, size_t NumIdx) {
+template<typename IndexTy>
+static bool isInBoundsIndices(IndexTy const *Idxs, size_t NumIdx) {
// No indices means nothing that could be out of bounds.
if (NumIdx == 0) return true;
// If the first index is zero, it's in bounds.
- if (Idxs[0]->isNullValue()) return true;
+ if (cast<Constant>(Idxs[0])->isNullValue()) return true;
// If the first index is one and all the rest are zero, it's in bounds,
// by the one-past-the-end rule.
if (!cast<ConstantInt>(Idxs[0])->isOne())
return false;
for (unsigned i = 1, e = NumIdx; i != e; ++i)
- if (!Idxs[i]->isNullValue())
+ if (!cast<Constant>(Idxs[i])->isNullValue())
return false;
return true;
}
-Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
- bool inBounds,
- Constant* const *Idxs,
- unsigned NumIdx) {
+template<typename IndexTy>
+static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
+ bool inBounds,
+ IndexTy const *Idxs,
+ unsigned NumIdx) {
+ Constant *Idx0 = cast<Constant>(Idxs[0]);
if (NumIdx == 0 ||
- (NumIdx == 1 && Idxs[0]->isNullValue()))
+ (NumIdx == 1 && Idx0->isNullValue()))
return C;
if (isa<UndefValue>(C)) {
const PointerType *Ptr = cast<PointerType>(C->getType());
- const Type *Ty = GetElementPtrInst::getIndexedType(Ptr,
- (Value **)Idxs,
- (Value **)Idxs+NumIdx);
+ const Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs, Idxs+NumIdx);
assert(Ty != 0 && "Invalid indices for GEP!");
return UndefValue::get(PointerType::get(Ty, Ptr->getAddressSpace()));
}
- Constant *Idx0 = Idxs[0];
if (C->isNullValue()) {
bool isNull = true;
for (unsigned i = 0, e = NumIdx; i != e; ++i)
- if (!Idxs[i]->isNullValue()) {
+ if (!cast<Constant>(Idxs[i])->isNullValue()) {
isNull = false;
break;
}
if (isNull) {
const PointerType *Ptr = cast<PointerType>(C->getType());
- const Type *Ty = GetElementPtrInst::getIndexedType(Ptr,
- (Value**)Idxs,
- (Value**)Idxs+NumIdx);
+ const Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs,
+ Idxs+NumIdx);
assert(Ty != 0 && "Invalid indices for GEP!");
- return ConstantPointerNull::get(
- PointerType::get(Ty,Ptr->getAddressSpace()));
+ return ConstantPointerNull::get(PointerType::get(Ty,
+ Ptr->getAddressSpace()));
}
}
@@ -2173,9 +2222,9 @@ Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
}
// Implement folding of:
- // int* getelementptr ([2 x int]* bitcast ([3 x int]* %X to [2 x int]*),
- // long 0, long 0)
- // To: int* getelementptr ([3 x int]* %X, long 0, long 0)
+ // i32* getelementptr ([2 x i32]* bitcast ([3 x i32]* %X to [2 x i32]*),
+ // i64 0, i64 0)
+ // To: i32* getelementptr ([3 x i32]* %X, i64 0, i64 0)
//
if (CE->isCast() && NumIdx > 1 && Idx0->isNullValue()) {
if (const PointerType *SPT =
@@ -2214,7 +2263,7 @@ Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
ATy->getNumElements());
NewIdxs[i] = ConstantExpr::getSRem(CI, Factor);
- Constant *PrevIdx = Idxs[i-1];
+ Constant *PrevIdx = cast<Constant>(Idxs[i-1]);
Constant *Div = ConstantExpr::getSDiv(CI, Factor);
// Before adding, extend both operands to i64 to avoid
@@ -2242,7 +2291,7 @@ Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
// If we did any factoring, start over with the adjusted indices.
if (!NewIdxs.empty()) {
for (unsigned i = 0; i != NumIdx; ++i)
- if (!NewIdxs[i]) NewIdxs[i] = Idxs[i];
+ if (!NewIdxs[i]) NewIdxs[i] = cast<Constant>(Idxs[i]);
return inBounds ?
ConstantExpr::getInBoundsGetElementPtr(C, NewIdxs.data(),
NewIdxs.size()) :
@@ -2257,3 +2306,17 @@ Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
return 0;
}
+
+Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
+ bool inBounds,
+ Constant* const *Idxs,
+ unsigned NumIdx) {
+ return ConstantFoldGetElementPtrImpl(C, inBounds, Idxs, NumIdx);
+}
+
+Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
+ bool inBounds,
+ Value* const *Idxs,
+ unsigned NumIdx) {
+ return ConstantFoldGetElementPtrImpl(C, inBounds, Idxs, NumIdx);
+}
diff --git a/contrib/llvm/lib/VMCore/ConstantFold.h b/contrib/llvm/lib/VMCore/ConstantFold.h
index d2dbbdd..0ecd7b4 100644
--- a/contrib/llvm/lib/VMCore/ConstantFold.h
+++ b/contrib/llvm/lib/VMCore/ConstantFold.h
@@ -49,6 +49,8 @@ namespace llvm {
Constant *C1, Constant *C2);
Constant *ConstantFoldGetElementPtr(Constant *C, bool inBounds,
Constant* const *Idxs, unsigned NumIdx);
+ Constant *ConstantFoldGetElementPtr(Constant *C, bool inBounds,
+ Value* const *Idxs, unsigned NumIdx);
} // End llvm namespace
#endif
diff --git a/contrib/llvm/lib/VMCore/Constants.cpp b/contrib/llvm/lib/VMCore/Constants.cpp
index 16eaca8..246fde1 100644
--- a/contrib/llvm/lib/VMCore/Constants.cpp
+++ b/contrib/llvm/lib/VMCore/Constants.cpp
@@ -40,22 +40,25 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
// Constructor to create a '0' constant of arbitrary type...
-static const uint64_t zero[2] = {0, 0};
Constant *Constant::getNullValue(const Type *Ty) {
switch (Ty->getTypeID()) {
case Type::IntegerTyID:
return ConstantInt::get(Ty, 0);
case Type::FloatTyID:
- return ConstantFP::get(Ty->getContext(), APFloat(APInt(32, 0)));
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(APFloat::IEEEsingle));
case Type::DoubleTyID:
- return ConstantFP::get(Ty->getContext(), APFloat(APInt(64, 0)));
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(APFloat::IEEEdouble));
case Type::X86_FP80TyID:
- return ConstantFP::get(Ty->getContext(), APFloat(APInt(80, 2, zero)));
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(APFloat::x87DoubleExtended));
case Type::FP128TyID:
return ConstantFP::get(Ty->getContext(),
- APFloat(APInt(128, 2, zero), true));
+ APFloat::getZero(APFloat::IEEEquad));
case Type::PPC_FP128TyID:
- return ConstantFP::get(Ty->getContext(), APFloat(APInt(128, 2, zero)));
+ return ConstantFP::get(Ty->getContext(),
+ APFloat(APInt::getNullValue(128)));
case Type::PointerTyID:
return ConstantPointerNull::get(cast<PointerType>(Ty));
case Type::StructTyID:
@@ -69,7 +72,7 @@ Constant *Constant::getNullValue(const Type *Ty) {
}
}
-Constant* Constant::getIntegerValue(const Type *Ty, const APInt &V) {
+Constant *Constant::getIntegerValue(const Type *Ty, const APInt &V) {
const Type *ScalarTy = Ty->getScalarType();
// Create the base integer constant.
@@ -86,12 +89,18 @@ Constant* Constant::getIntegerValue(const Type *Ty, const APInt &V) {
return C;
}
-Constant* Constant::getAllOnesValue(const Type *Ty) {
+Constant *Constant::getAllOnesValue(const Type *Ty) {
if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty))
return ConstantInt::get(Ty->getContext(),
APInt::getAllOnesValue(ITy->getBitWidth()));
-
- std::vector<Constant*> Elts;
+
+ if (Ty->isFloatingPointTy()) {
+ APFloat FL = APFloat::getAllOnesValue(Ty->getPrimitiveSizeInBits(),
+ !Ty->isPPC_FP128Ty());
+ return ConstantFP::get(Ty->getContext(), FL);
+ }
+
+ SmallVector<Constant*, 16> Elts;
const VectorType *VTy = cast<VectorType>(Ty);
Elts.resize(VTy->getNumElements(), getAllOnesValue(VTy->getElementType()));
assert(Elts[0] && "Not a vector integer type!");
@@ -253,6 +262,59 @@ void Constant::getVectorElements(SmallVectorImpl<Constant*> &Elts) const {
}
+/// removeDeadUsersOfConstant - If the specified constantexpr is dead, remove
+/// it. This involves recursively eliminating any dead users of the
+/// constantexpr.
+static bool removeDeadUsersOfConstant(const Constant *C) {
+ if (isa<GlobalValue>(C)) return false; // Cannot remove this
+
+ while (!C->use_empty()) {
+ const Constant *User = dyn_cast<Constant>(C->use_back());
+ if (!User) return false; // Non-constant usage;
+ if (!removeDeadUsersOfConstant(User))
+ return false; // Constant wasn't dead
+ }
+
+ const_cast<Constant*>(C)->destroyConstant();
+ return true;
+}
+
+
+/// removeDeadConstantUsers - If there are any dead constant users dangling
+/// off of this constant, remove them. This method is useful for clients
+/// that want to check to see if a global is unused, but don't want to deal
+/// with potentially dead constants hanging off of the globals.
+void Constant::removeDeadConstantUsers() const {
+ Value::const_use_iterator I = use_begin(), E = use_end();
+ Value::const_use_iterator LastNonDeadUser = E;
+ while (I != E) {
+ const Constant *User = dyn_cast<Constant>(*I);
+ if (User == 0) {
+ LastNonDeadUser = I;
+ ++I;
+ continue;
+ }
+
+ if (!removeDeadUsersOfConstant(User)) {
+ // If the constant wasn't dead, remember that this was the last live use
+ // and move on to the next constant.
+ LastNonDeadUser = I;
+ ++I;
+ continue;
+ }
+
+ // If the constant was dead, then the iterator is invalidated.
+ if (LastNonDeadUser == E) {
+ I = use_begin();
+ if (I == E) break;
+ } else {
+ I = LastNonDeadUser;
+ ++I;
+ }
+ }
+}
+
+
//===----------------------------------------------------------------------===//
// ConstantInt
@@ -265,20 +327,16 @@ ConstantInt::ConstantInt(const IntegerType *Ty, const APInt& V)
ConstantInt* ConstantInt::getTrue(LLVMContext &Context) {
LLVMContextImpl *pImpl = Context.pImpl;
- if (pImpl->TheTrueVal)
- return pImpl->TheTrueVal;
- else
- return (pImpl->TheTrueVal =
- ConstantInt::get(IntegerType::get(Context, 1), 1));
+ if (!pImpl->TheTrueVal)
+ pImpl->TheTrueVal = ConstantInt::get(Type::getInt1Ty(Context), 1);
+ return pImpl->TheTrueVal;
}
ConstantInt* ConstantInt::getFalse(LLVMContext &Context) {
LLVMContextImpl *pImpl = Context.pImpl;
- if (pImpl->TheFalseVal)
- return pImpl->TheFalseVal;
- else
- return (pImpl->TheFalseVal =
- ConstantInt::get(IntegerType::get(Context, 1), 0));
+ if (!pImpl->TheFalseVal)
+ pImpl->TheFalseVal = ConstantInt::get(Type::getInt1Ty(Context), 0);
+ return pImpl->TheFalseVal;
}
@@ -297,14 +355,14 @@ ConstantInt *ConstantInt::get(LLVMContext &Context, const APInt& V) {
return Slot;
}
-Constant* ConstantInt::get(const Type* Ty, uint64_t V, bool isSigned) {
+Constant *ConstantInt::get(const Type* Ty, uint64_t V, bool isSigned) {
Constant *C = get(cast<IntegerType>(Ty->getScalarType()),
V, isSigned);
// For vectors, broadcast the value.
if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
- return ConstantVector::get(
- std::vector<Constant *>(VTy->getNumElements(), C));
+ return ConstantVector::get(SmallVector<Constant*,
+ 16>(VTy->getNumElements(), C));
return C;
}
@@ -322,7 +380,7 @@ Constant *ConstantInt::getSigned(const Type *Ty, int64_t V) {
return get(Ty, V, true);
}
-Constant* ConstantInt::get(const Type* Ty, const APInt& V) {
+Constant *ConstantInt::get(const Type* Ty, const APInt& V) {
ConstantInt *C = get(Ty->getContext(), V);
assert(C->getType() == Ty->getScalarType() &&
"ConstantInt type doesn't match the type implied by its value!");
@@ -330,7 +388,7 @@ Constant* ConstantInt::get(const Type* Ty, const APInt& V) {
// For vectors, broadcast the value.
if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::get(
- std::vector<Constant *>(VTy->getNumElements(), C));
+ SmallVector<Constant *, 16>(VTy->getNumElements(), C));
return C;
}
@@ -361,7 +419,7 @@ static const fltSemantics *TypeToFloatSemantics(const Type *Ty) {
/// get() - This returns a constant fp for the specified value in the
/// specified type. This should only be used for simple constant values like
/// 2.0/1.0 etc, that are known-valid both as double and as the target format.
-Constant* ConstantFP::get(const Type* Ty, double V) {
+Constant *ConstantFP::get(const Type* Ty, double V) {
LLVMContext &Context = Ty->getContext();
APFloat FV(V);
@@ -373,13 +431,13 @@ Constant* ConstantFP::get(const Type* Ty, double V) {
// For vectors, broadcast the value.
if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::get(
- std::vector<Constant *>(VTy->getNumElements(), C));
+ SmallVector<Constant *, 16>(VTy->getNumElements(), C));
return C;
}
-Constant* ConstantFP::get(const Type* Ty, StringRef Str) {
+Constant *ConstantFP::get(const Type* Ty, StringRef Str) {
LLVMContext &Context = Ty->getContext();
APFloat FV(*TypeToFloatSemantics(Ty->getScalarType()), Str);
@@ -388,7 +446,7 @@ Constant* ConstantFP::get(const Type* Ty, StringRef Str) {
// For vectors, broadcast the value.
if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::get(
- std::vector<Constant *>(VTy->getNumElements(), C));
+ SmallVector<Constant *, 16>(VTy->getNumElements(), C));
return C;
}
@@ -402,12 +460,12 @@ ConstantFP* ConstantFP::getNegativeZero(const Type* Ty) {
}
-Constant* ConstantFP::getZeroValueForNegation(const Type* Ty) {
+Constant *ConstantFP::getZeroValueForNegation(const Type* Ty) {
if (const VectorType *PTy = dyn_cast<VectorType>(Ty))
if (PTy->getElementType()->isFloatingPointTy()) {
- std::vector<Constant*> zeros(PTy->getNumElements(),
+ SmallVector<Constant*, 16> zeros(PTy->getNumElements(),
getNegativeZero(PTy->getElementType()));
- return ConstantVector::get(PTy, zeros);
+ return ConstantVector::get(zeros);
}
if (Ty->isFloatingPointTy())
@@ -510,7 +568,7 @@ Constant *ConstantArray::get(const ArrayType *Ty,
}
-Constant* ConstantArray::get(const ArrayType* T, Constant* const* Vals,
+Constant *ConstantArray::get(const ArrayType* T, Constant *const* Vals,
unsigned NumVals) {
// FIXME: make this the primary ctor method.
return get(T, std::vector<Constant*>(Vals, Vals+NumVals));
@@ -522,7 +580,7 @@ Constant* ConstantArray::get(const ArrayType* T, Constant* const* Vals,
/// Otherwise, the length parameter specifies how much of the string to use
/// and it won't be null terminated.
///
-Constant* ConstantArray::get(LLVMContext &Context, StringRef Str,
+Constant *ConstantArray::get(LLVMContext &Context, StringRef Str,
bool AddNull) {
std::vector<Constant*> ElementVals;
ElementVals.reserve(Str.size() + size_t(AddNull));
@@ -558,7 +616,7 @@ ConstantStruct::ConstantStruct(const StructType *T,
}
// ConstantStruct accessors.
-Constant* ConstantStruct::get(const StructType* T,
+Constant *ConstantStruct::get(const StructType* T,
const std::vector<Constant*>& V) {
LLVMContextImpl* pImpl = T->getContext().pImpl;
@@ -570,7 +628,7 @@ Constant* ConstantStruct::get(const StructType* T,
return ConstantAggregateZero::get(T);
}
-Constant* ConstantStruct::get(LLVMContext &Context,
+Constant *ConstantStruct::get(LLVMContext &Context,
const std::vector<Constant*>& V, bool packed) {
std::vector<const Type*> StructEls;
StructEls.reserve(V.size());
@@ -579,8 +637,8 @@ Constant* ConstantStruct::get(LLVMContext &Context,
return get(StructType::get(Context, StructEls, packed), V);
}
-Constant* ConstantStruct::get(LLVMContext &Context,
- Constant* const *Vals, unsigned NumVals,
+Constant *ConstantStruct::get(LLVMContext &Context,
+ Constant *const *Vals, unsigned NumVals,
bool Packed) {
// FIXME: make this the primary ctor method.
return get(Context, std::vector<Constant*>(Vals, Vals+NumVals), Packed);
@@ -592,23 +650,22 @@ ConstantVector::ConstantVector(const VectorType *T,
OperandTraits<ConstantVector>::op_end(this) - V.size(),
V.size()) {
Use *OL = OperandList;
- for (std::vector<Constant*>::const_iterator I = V.begin(), E = V.end();
- I != E; ++I, ++OL) {
- Constant *C = *I;
- assert(C->getType() == T->getElementType() &&
+ for (std::vector<Constant*>::const_iterator I = V.begin(), E = V.end();
+ I != E; ++I, ++OL) {
+ Constant *C = *I;
+ assert(C->getType() == T->getElementType() &&
"Initializer for vector element doesn't match vector element type!");
*OL = C;
}
}
// ConstantVector accessors.
-Constant* ConstantVector::get(const VectorType* T,
- const std::vector<Constant*>& V) {
- assert(!V.empty() && "Vectors can't be empty");
- LLVMContext &Context = T->getContext();
- LLVMContextImpl *pImpl = Context.pImpl;
-
- // If this is an all-undef or alll-zero vector, return a
+Constant *ConstantVector::get(const VectorType *T,
+ const std::vector<Constant*> &V) {
+ assert(!V.empty() && "Vectors can't be empty");
+ LLVMContextImpl *pImpl = T->getContext().pImpl;
+
+ // If this is an all-undef or all-zero vector, return a
// ConstantAggregateZero or UndefValue.
Constant *C = V[0];
bool isZero = C->isNullValue();
@@ -630,61 +687,10 @@ Constant* ConstantVector::get(const VectorType* T,
return pImpl->VectorConstants.getOrCreate(T, V);
}
-Constant* ConstantVector::get(const std::vector<Constant*>& V) {
- assert(!V.empty() && "Cannot infer type if V is empty");
- return get(VectorType::get(V.front()->getType(),V.size()), V);
-}
-
-Constant* ConstantVector::get(Constant* const* Vals, unsigned NumVals) {
+Constant *ConstantVector::get(ArrayRef<Constant*> V) {
// FIXME: make this the primary ctor method.
- return get(std::vector<Constant*>(Vals, Vals+NumVals));
-}
-
-Constant* ConstantExpr::getNSWNeg(Constant* C) {
- assert(C->getType()->isIntOrIntVectorTy() &&
- "Cannot NEG a nonintegral value!");
- return getNSWSub(ConstantFP::getZeroValueForNegation(C->getType()), C);
-}
-
-Constant* ConstantExpr::getNUWNeg(Constant* C) {
- assert(C->getType()->isIntOrIntVectorTy() &&
- "Cannot NEG a nonintegral value!");
- return getNUWSub(ConstantFP::getZeroValueForNegation(C->getType()), C);
-}
-
-Constant* ConstantExpr::getNSWAdd(Constant* C1, Constant* C2) {
- return getTy(C1->getType(), Instruction::Add, C1, C2,
- OverflowingBinaryOperator::NoSignedWrap);
-}
-
-Constant* ConstantExpr::getNUWAdd(Constant* C1, Constant* C2) {
- return getTy(C1->getType(), Instruction::Add, C1, C2,
- OverflowingBinaryOperator::NoUnsignedWrap);
-}
-
-Constant* ConstantExpr::getNSWSub(Constant* C1, Constant* C2) {
- return getTy(C1->getType(), Instruction::Sub, C1, C2,
- OverflowingBinaryOperator::NoSignedWrap);
-}
-
-Constant* ConstantExpr::getNUWSub(Constant* C1, Constant* C2) {
- return getTy(C1->getType(), Instruction::Sub, C1, C2,
- OverflowingBinaryOperator::NoUnsignedWrap);
-}
-
-Constant* ConstantExpr::getNSWMul(Constant* C1, Constant* C2) {
- return getTy(C1->getType(), Instruction::Mul, C1, C2,
- OverflowingBinaryOperator::NoSignedWrap);
-}
-
-Constant* ConstantExpr::getNUWMul(Constant* C1, Constant* C2) {
- return getTy(C1->getType(), Instruction::Mul, C1, C2,
- OverflowingBinaryOperator::NoUnsignedWrap);
-}
-
-Constant* ConstantExpr::getExactSDiv(Constant* C1, Constant* C2) {
- return getTy(C1->getType(), Instruction::SDiv, C1, C2,
- SDivOperator::IsExact);
+ assert(!V.empty() && "Vectors cannot be empty");
+ return get(VectorType::get(V.front()->getType(), V.size()), V.vec());
}
// Utility function for determining if a ConstantExpr is a CastOp or not. This
@@ -812,7 +818,7 @@ ConstantExpr::getWithOperandReplaced(unsigned OpNo, Constant *Op) const {
/// operands replaced with the specified values. The specified operands must
/// match count and type with the existing ones.
Constant *ConstantExpr::
-getWithOperands(Constant* const *Ops, unsigned NumOps) const {
+getWithOperands(Constant *const *Ops, unsigned NumOps) const {
assert(NumOps == getNumOperands() && "Operand count mismatch!");
bool AnyChange = false;
for (unsigned i = 0; i != NumOps; ++i) {
@@ -1034,7 +1040,7 @@ bool ConstantVector::isAllOnesValue() const {
/// getSplatValue - If this is a splat constant, where all of the
/// elements have the same value, return that value. Otherwise return null.
-Constant *ConstantVector::getSplatValue() {
+Constant *ConstantVector::getSplatValue() const {
// Check out first element.
Constant *Elt = getOperand(0);
// Then make sure all remaining elements point to the same value.
@@ -1241,7 +1247,7 @@ Constant *ConstantExpr::getFPCast(Constant *C, const Type *Ty) {
if (SrcBits == DstBits)
return C; // Avoid a useless cast
Instruction::CastOps opcode =
- (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt);
+ (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt);
return getCast(opcode, C, Ty);
}
@@ -1482,7 +1488,7 @@ Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2,
return getTy(C1->getType(), Opcode, C1, C2, Flags);
}
-Constant* ConstantExpr::getSizeOf(const Type* Ty) {
+Constant *ConstantExpr::getSizeOf(const Type* Ty) {
// sizeof is implemented as: (i64) gep (Ty*)null, 1
// Note that a non-inbounds gep is used, as null isn't within any object.
Constant *GEPIdx = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
@@ -1492,7 +1498,7 @@ Constant* ConstantExpr::getSizeOf(const Type* Ty) {
Type::getInt64Ty(Ty->getContext()));
}
-Constant* ConstantExpr::getAlignOf(const Type* Ty) {
+Constant *ConstantExpr::getAlignOf(const Type* Ty) {
// alignof is implemented as: (i64) gep ({i1,Ty}*)null, 0, 1
// Note that a non-inbounds gep is used, as null isn't within any object.
const Type *AligningTy = StructType::get(Ty->getContext(),
@@ -1506,12 +1512,12 @@ Constant* ConstantExpr::getAlignOf(const Type* Ty) {
Type::getInt64Ty(Ty->getContext()));
}
-Constant* ConstantExpr::getOffsetOf(const StructType* STy, unsigned FieldNo) {
+Constant *ConstantExpr::getOffsetOf(const StructType* STy, unsigned FieldNo) {
return getOffsetOf(STy, ConstantInt::get(Type::getInt32Ty(STy->getContext()),
FieldNo));
}
-Constant* ConstantExpr::getOffsetOf(const Type* Ty, Constant *FieldNo) {
+Constant *ConstantExpr::getOffsetOf(const Type* Ty, Constant *FieldNo) {
// offsetof is implemented as: (i64) gep (Ty*)null, 0, FieldNo
// Note that a non-inbounds gep is used, as null isn't within any object.
Constant *GEPIdx[] = {
@@ -1547,44 +1553,17 @@ Constant *ConstantExpr::getSelectTy(const Type *ReqTy, Constant *C,
return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
}
+template<typename IndexTy>
Constant *ConstantExpr::getGetElementPtrTy(const Type *ReqTy, Constant *C,
- Value* const *Idxs,
- unsigned NumIdx) {
- assert(GetElementPtrInst::getIndexedType(C->getType(), Idxs,
- Idxs+NumIdx) ==
- cast<PointerType>(ReqTy)->getElementType() &&
- "GEP indices invalid!");
-
- if (Constant *FC = ConstantFoldGetElementPtr(C, /*inBounds=*/false,
- (Constant**)Idxs, NumIdx))
- return FC; // Fold a few common cases...
-
- assert(C->getType()->isPointerTy() &&
- "Non-pointer type for constant GetElementPtr expression");
- // Look up the constant in the table first to ensure uniqueness
- std::vector<Constant*> ArgVec;
- ArgVec.reserve(NumIdx+1);
- ArgVec.push_back(C);
- for (unsigned i = 0; i != NumIdx; ++i)
- ArgVec.push_back(cast<Constant>(Idxs[i]));
- const ExprMapKeyType Key(Instruction::GetElementPtr, ArgVec);
-
- LLVMContextImpl *pImpl = ReqTy->getContext().pImpl;
- return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
-}
-
-Constant *ConstantExpr::getInBoundsGetElementPtrTy(const Type *ReqTy,
- Constant *C,
- Value *const *Idxs,
- unsigned NumIdx) {
+ IndexTy const *Idxs,
+ unsigned NumIdx, bool InBounds) {
assert(GetElementPtrInst::getIndexedType(C->getType(), Idxs,
Idxs+NumIdx) ==
cast<PointerType>(ReqTy)->getElementType() &&
"GEP indices invalid!");
- if (Constant *FC = ConstantFoldGetElementPtr(C, /*inBounds=*/true,
- (Constant**)Idxs, NumIdx))
- return FC; // Fold a few common cases...
+ if (Constant *FC = ConstantFoldGetElementPtr(C, InBounds, Idxs, NumIdx))
+ return FC; // Fold a few common cases.
assert(C->getType()->isPointerTy() &&
"Non-pointer type for constant GetElementPtr expression");
@@ -1595,42 +1574,31 @@ Constant *ConstantExpr::getInBoundsGetElementPtrTy(const Type *ReqTy,
for (unsigned i = 0; i != NumIdx; ++i)
ArgVec.push_back(cast<Constant>(Idxs[i]));
const ExprMapKeyType Key(Instruction::GetElementPtr, ArgVec, 0,
- GEPOperator::IsInBounds);
+ InBounds ? GEPOperator::IsInBounds : 0);
LLVMContextImpl *pImpl = ReqTy->getContext().pImpl;
return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
}
-Constant *ConstantExpr::getGetElementPtr(Constant *C, Value* const *Idxs,
- unsigned NumIdx) {
+template<typename IndexTy>
+Constant *ConstantExpr::getGetElementPtrImpl(Constant *C, IndexTy const *Idxs,
+ unsigned NumIdx, bool InBounds) {
// Get the result type of the getelementptr!
const Type *Ty =
GetElementPtrInst::getIndexedType(C->getType(), Idxs, Idxs+NumIdx);
assert(Ty && "GEP indices invalid!");
unsigned As = cast<PointerType>(C->getType())->getAddressSpace();
- return getGetElementPtrTy(PointerType::get(Ty, As), C, Idxs, NumIdx);
+ return getGetElementPtrTy(PointerType::get(Ty, As), C, Idxs, NumIdx,InBounds);
}
-Constant *ConstantExpr::getInBoundsGetElementPtr(Constant *C,
- Value* const *Idxs,
- unsigned NumIdx) {
- // Get the result type of the getelementptr!
- const Type *Ty =
- GetElementPtrInst::getIndexedType(C->getType(), Idxs, Idxs+NumIdx);
- assert(Ty && "GEP indices invalid!");
- unsigned As = cast<PointerType>(C->getType())->getAddressSpace();
- return getInBoundsGetElementPtrTy(PointerType::get(Ty, As), C, Idxs, NumIdx);
-}
-
-Constant *ConstantExpr::getGetElementPtr(Constant *C, Constant* const *Idxs,
- unsigned NumIdx) {
- return getGetElementPtr(C, (Value* const *)Idxs, NumIdx);
+Constant *ConstantExpr::getGetElementPtr(Constant *C, Value* const *Idxs,
+ unsigned NumIdx, bool InBounds) {
+ return getGetElementPtrImpl(C, Idxs, NumIdx, InBounds);
}
-Constant *ConstantExpr::getInBoundsGetElementPtr(Constant *C,
- Constant* const *Idxs,
- unsigned NumIdx) {
- return getInBoundsGetElementPtr(C, (Value* const *)Idxs, NumIdx);
+Constant *ConstantExpr::getGetElementPtr(Constant *C, Constant *const *Idxs,
+ unsigned NumIdx, bool InBounds) {
+ return getGetElementPtrImpl(C, Idxs, NumIdx, InBounds);
}
Constant *
@@ -1804,98 +1772,111 @@ Constant *ConstantExpr::getExtractValue(Constant *Agg,
return getExtractValueTy(ReqTy, Agg, IdxList, NumIdx);
}
-Constant* ConstantExpr::getNeg(Constant* C) {
+Constant *ConstantExpr::getNeg(Constant *C, bool HasNUW, bool HasNSW) {
assert(C->getType()->isIntOrIntVectorTy() &&
"Cannot NEG a nonintegral value!");
- return get(Instruction::Sub,
- ConstantFP::getZeroValueForNegation(C->getType()),
- C);
+ return getSub(ConstantFP::getZeroValueForNegation(C->getType()),
+ C, HasNUW, HasNSW);
}
-Constant* ConstantExpr::getFNeg(Constant* C) {
+Constant *ConstantExpr::getFNeg(Constant *C) {
assert(C->getType()->isFPOrFPVectorTy() &&
"Cannot FNEG a non-floating-point value!");
- return get(Instruction::FSub,
- ConstantFP::getZeroValueForNegation(C->getType()),
- C);
+ return getFSub(ConstantFP::getZeroValueForNegation(C->getType()), C);
}
-Constant* ConstantExpr::getNot(Constant* C) {
+Constant *ConstantExpr::getNot(Constant *C) {
assert(C->getType()->isIntOrIntVectorTy() &&
"Cannot NOT a nonintegral value!");
return get(Instruction::Xor, C, Constant::getAllOnesValue(C->getType()));
}
-Constant* ConstantExpr::getAdd(Constant* C1, Constant* C2) {
- return get(Instruction::Add, C1, C2);
+Constant *ConstantExpr::getAdd(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Add, C1, C2, Flags);
}
-Constant* ConstantExpr::getFAdd(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getFAdd(Constant *C1, Constant *C2) {
return get(Instruction::FAdd, C1, C2);
}
-Constant* ConstantExpr::getSub(Constant* C1, Constant* C2) {
- return get(Instruction::Sub, C1, C2);
+Constant *ConstantExpr::getSub(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Sub, C1, C2, Flags);
}
-Constant* ConstantExpr::getFSub(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getFSub(Constant *C1, Constant *C2) {
return get(Instruction::FSub, C1, C2);
}
-Constant* ConstantExpr::getMul(Constant* C1, Constant* C2) {
- return get(Instruction::Mul, C1, C2);
+Constant *ConstantExpr::getMul(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Mul, C1, C2, Flags);
}
-Constant* ConstantExpr::getFMul(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getFMul(Constant *C1, Constant *C2) {
return get(Instruction::FMul, C1, C2);
}
-Constant* ConstantExpr::getUDiv(Constant* C1, Constant* C2) {
- return get(Instruction::UDiv, C1, C2);
+Constant *ConstantExpr::getUDiv(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::UDiv, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
}
-Constant* ConstantExpr::getSDiv(Constant* C1, Constant* C2) {
- return get(Instruction::SDiv, C1, C2);
+Constant *ConstantExpr::getSDiv(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::SDiv, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
}
-Constant* ConstantExpr::getFDiv(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getFDiv(Constant *C1, Constant *C2) {
return get(Instruction::FDiv, C1, C2);
}
-Constant* ConstantExpr::getURem(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getURem(Constant *C1, Constant *C2) {
return get(Instruction::URem, C1, C2);
}
-Constant* ConstantExpr::getSRem(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getSRem(Constant *C1, Constant *C2) {
return get(Instruction::SRem, C1, C2);
}
-Constant* ConstantExpr::getFRem(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getFRem(Constant *C1, Constant *C2) {
return get(Instruction::FRem, C1, C2);
}
-Constant* ConstantExpr::getAnd(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getAnd(Constant *C1, Constant *C2) {
return get(Instruction::And, C1, C2);
}
-Constant* ConstantExpr::getOr(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getOr(Constant *C1, Constant *C2) {
return get(Instruction::Or, C1, C2);
}
-Constant* ConstantExpr::getXor(Constant* C1, Constant* C2) {
+Constant *ConstantExpr::getXor(Constant *C1, Constant *C2) {
return get(Instruction::Xor, C1, C2);
}
-Constant* ConstantExpr::getShl(Constant* C1, Constant* C2) {
- return get(Instruction::Shl, C1, C2);
+Constant *ConstantExpr::getShl(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Shl, C1, C2, Flags);
}
-Constant* ConstantExpr::getLShr(Constant* C1, Constant* C2) {
- return get(Instruction::LShr, C1, C2);
+Constant *ConstantExpr::getLShr(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::LShr, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
}
-Constant* ConstantExpr::getAShr(Constant* C1, Constant* C2) {
- return get(Instruction::AShr, C1, C2);
+Constant *ConstantExpr::getAShr(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::AShr, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
}
// destroyConstant - Remove the constant from the constant table...
@@ -2127,7 +2108,8 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
Indices.push_back(Val);
}
Replacement = ConstantExpr::getGetElementPtr(Pointer,
- &Indices[0], Indices.size());
+ &Indices[0], Indices.size(),
+ cast<GEPOperator>(this)->isInBounds());
} else if (getOpcode() == Instruction::ExtractValue) {
Constant *Agg = getOperand(0);
if (Agg == From) Agg = To;
diff --git a/contrib/llvm/lib/VMCore/ConstantsContext.h b/contrib/llvm/lib/VMCore/ConstantsContext.h
index 1c04c3e..ffc673f 100644
--- a/contrib/llvm/lib/VMCore/ConstantsContext.h
+++ b/contrib/llvm/lib/VMCore/ConstantsContext.h
@@ -239,54 +239,64 @@ struct CompareConstantExpr : public ConstantExpr {
};
template <>
-struct OperandTraits<UnaryConstantExpr> : public FixedNumOperandTraits<1> {
+struct OperandTraits<UnaryConstantExpr> :
+ public FixedNumOperandTraits<UnaryConstantExpr, 1> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryConstantExpr, Value)
template <>
-struct OperandTraits<BinaryConstantExpr> : public FixedNumOperandTraits<2> {
+struct OperandTraits<BinaryConstantExpr> :
+ public FixedNumOperandTraits<BinaryConstantExpr, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryConstantExpr, Value)
template <>
-struct OperandTraits<SelectConstantExpr> : public FixedNumOperandTraits<3> {
+struct OperandTraits<SelectConstantExpr> :
+ public FixedNumOperandTraits<SelectConstantExpr, 3> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectConstantExpr, Value)
template <>
-struct OperandTraits<ExtractElementConstantExpr> : public FixedNumOperandTraits<2> {
+struct OperandTraits<ExtractElementConstantExpr> :
+ public FixedNumOperandTraits<ExtractElementConstantExpr, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementConstantExpr, Value)
template <>
-struct OperandTraits<InsertElementConstantExpr> : public FixedNumOperandTraits<3> {
+struct OperandTraits<InsertElementConstantExpr> :
+ public FixedNumOperandTraits<InsertElementConstantExpr, 3> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementConstantExpr, Value)
template <>
-struct OperandTraits<ShuffleVectorConstantExpr> : public FixedNumOperandTraits<3> {
+struct OperandTraits<ShuffleVectorConstantExpr> :
+ public FixedNumOperandTraits<ShuffleVectorConstantExpr, 3> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorConstantExpr, Value)
template <>
-struct OperandTraits<ExtractValueConstantExpr> : public FixedNumOperandTraits<1> {
+struct OperandTraits<ExtractValueConstantExpr> :
+ public FixedNumOperandTraits<ExtractValueConstantExpr, 1> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractValueConstantExpr, Value)
template <>
-struct OperandTraits<InsertValueConstantExpr> : public FixedNumOperandTraits<2> {
+struct OperandTraits<InsertValueConstantExpr> :
+ public FixedNumOperandTraits<InsertValueConstantExpr, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueConstantExpr, Value)
template <>
-struct OperandTraits<GetElementPtrConstantExpr> : public VariadicOperandTraits<1> {
+struct OperandTraits<GetElementPtrConstantExpr> :
+ public VariadicOperandTraits<GetElementPtrConstantExpr, 1> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrConstantExpr, Value)
template <>
-struct OperandTraits<CompareConstantExpr> : public FixedNumOperandTraits<2> {
+struct OperandTraits<CompareConstantExpr> :
+ public FixedNumOperandTraits<CompareConstantExpr, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CompareConstantExpr, Value)
diff --git a/contrib/llvm/lib/VMCore/Core.cpp b/contrib/llvm/lib/VMCore/Core.cpp
index 5aad19d..35c3a2e 100644
--- a/contrib/llvm/lib/VMCore/Core.cpp
+++ b/contrib/llvm/lib/VMCore/Core.cpp
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the C bindings for libLLVMCore.a, which implements
-// the LLVM intermediate representation.
+// This file implements the common infrastructure (including the C bindings)
+// for libLLVMCore.a, which implements the LLVM intermediate representation.
//
//===----------------------------------------------------------------------===//
@@ -28,12 +28,24 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
#include <cassert>
#include <cstdlib>
#include <cstring>
using namespace llvm;
+void llvm::initializeCore(PassRegistry &Registry) {
+ initializeDominatorTreePass(Registry);
+ initializePrintModulePassPass(Registry);
+ initializePrintFunctionPassPass(Registry);
+ initializeVerifierPass(Registry);
+ initializePreVerifierPass(Registry);
+}
+
+void LLVMInitializeCore(LLVMPassRegistryRef R) {
+ initializeCore(*unwrap(R));
+}
/*===-- Error handling ----------------------------------------------------===*/
@@ -116,6 +128,10 @@ LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name) {
return wrap(unwrap(M)->getTypeByName(Name));
}
+const char *LLVMGetTypeName(LLVMModuleRef M, LLVMTypeRef Ty) {
+ return unwrap(M)->getTypeName(unwrap(Ty)).c_str();
+}
+
void LLVMDumpModule(LLVMModuleRef M) {
unwrap(M)->dump();
}
@@ -126,6 +142,12 @@ void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm) {
}
+/*--.. Operations on module contexts ......................................--*/
+LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M) {
+ return wrap(&unwrap(M)->getContext());
+}
+
+
/*===-- Operations on types -----------------------------------------------===*/
/*--.. Operations on all types (mostly) ....................................--*/
@@ -164,6 +186,8 @@ LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty) {
return LLVMOpaqueTypeKind;
case Type::VectorTyID:
return LLVMVectorTypeKind;
+ case Type::X86_MMXTyID:
+ return LLVMX86_MMXTypeKind;
}
}
@@ -232,6 +256,9 @@ LLVMTypeRef LLVMFP128TypeInContext(LLVMContextRef C) {
LLVMTypeRef LLVMPPCFP128TypeInContext(LLVMContextRef C) {
return (LLVMTypeRef) Type::getPPC_FP128Ty(*unwrap(C));
}
+LLVMTypeRef LLVMX86MMXTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getX86_MMXTy(*unwrap(C));
+}
LLVMTypeRef LLVMFloatType(void) {
return LLVMFloatTypeInContext(LLVMGetGlobalContext());
@@ -248,6 +275,9 @@ LLVMTypeRef LLVMFP128Type(void) {
LLVMTypeRef LLVMPPCFP128Type(void) {
return LLVMPPCFP128TypeInContext(LLVMGetGlobalContext());
}
+LLVMTypeRef LLVMX86MMXType(void) {
+ return LLVMX86MMXTypeInContext(LLVMGetGlobalContext());
+}
/*--.. Operations on function types ........................................--*/
@@ -527,6 +557,14 @@ LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N,
return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), N, SignExtend != 0));
}
+LLVMValueRef LLVMConstIntOfArbitraryPrecision(LLVMTypeRef IntTy,
+ unsigned NumWords,
+ const uint64_t Words[]) {
+ IntegerType *Ty = unwrap<IntegerType>(IntTy);
+ return wrap(ConstantInt::get(Ty->getContext(),
+ APInt(Ty->getBitWidth(), NumWords, Words)));
+}
+
LLVMValueRef LLVMConstIntOfString(LLVMTypeRef IntTy, const char Str[],
uint8_t Radix) {
return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), StringRef(Str),
@@ -567,7 +605,7 @@ LLVMValueRef LLVMConstStringInContext(LLVMContextRef C, const char *Str,
LLVMBool DontNullTerminate) {
/* Inverted the sense of AddNull because ', 0)' is a
better mnemonic for null termination than ', 1)'. */
- return wrap(ConstantArray::get(*unwrap(C), std::string(Str, Length),
+ return wrap(ConstantArray::get(*unwrap(C), StringRef(Str, Length),
DontNullTerminate == 0));
}
LLVMValueRef LLVMConstStructInContext(LLVMContextRef C,
@@ -595,8 +633,8 @@ LLVMValueRef LLVMConstStruct(LLVMValueRef *ConstantVals, unsigned Count,
Packed);
}
LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size) {
- return wrap(ConstantVector::get(
- unwrap<Constant>(ScalarConstantVals, Size), Size));
+ return wrap(ConstantVector::get(ArrayRef<Constant*>(
+ unwrap<Constant>(ScalarConstantVals, Size), Size)));
}
/*--.. Constant expressions ................................................--*/
@@ -613,74 +651,62 @@ LLVMValueRef LLVMSizeOf(LLVMTypeRef Ty) {
}
LLVMValueRef LLVMConstNeg(LLVMValueRef ConstantVal) {
- return wrap(ConstantExpr::getNeg(
- unwrap<Constant>(ConstantVal)));
+ return wrap(ConstantExpr::getNeg(unwrap<Constant>(ConstantVal)));
}
LLVMValueRef LLVMConstNSWNeg(LLVMValueRef ConstantVal) {
- return wrap(ConstantExpr::getNSWNeg(
- unwrap<Constant>(ConstantVal)));
+ return wrap(ConstantExpr::getNSWNeg(unwrap<Constant>(ConstantVal)));
}
LLVMValueRef LLVMConstNUWNeg(LLVMValueRef ConstantVal) {
- return wrap(ConstantExpr::getNUWNeg(
- unwrap<Constant>(ConstantVal)));
+ return wrap(ConstantExpr::getNUWNeg(unwrap<Constant>(ConstantVal)));
}
LLVMValueRef LLVMConstFNeg(LLVMValueRef ConstantVal) {
- return wrap(ConstantExpr::getFNeg(
- unwrap<Constant>(ConstantVal)));
+ return wrap(ConstantExpr::getFNeg(unwrap<Constant>(ConstantVal)));
}
LLVMValueRef LLVMConstNot(LLVMValueRef ConstantVal) {
- return wrap(ConstantExpr::getNot(
- unwrap<Constant>(ConstantVal)));
+ return wrap(ConstantExpr::getNot(unwrap<Constant>(ConstantVal)));
}
LLVMValueRef LLVMConstAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getAdd(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getAdd(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstNSWAdd(LLVMValueRef LHSConstant,
LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getNSWAdd(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getNSWAdd(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstNUWAdd(LLVMValueRef LHSConstant,
LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getNUWAdd(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getNUWAdd(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstFAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getFAdd(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getFAdd(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getSub(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getSub(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstNSWSub(LLVMValueRef LHSConstant,
LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getNSWSub(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getNSWSub(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstNUWSub(LLVMValueRef LHSConstant,
LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getNUWSub(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getNUWSub(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
@@ -690,89 +716,75 @@ LLVMValueRef LLVMConstFSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
}
LLVMValueRef LLVMConstMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getMul(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getMul(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstNSWMul(LLVMValueRef LHSConstant,
LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getNSWMul(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getNSWMul(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstNUWMul(LLVMValueRef LHSConstant,
LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getNUWMul(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getNUWMul(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstFMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getFMul(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getFMul(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstUDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getUDiv(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getUDiv(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstSDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getSDiv(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getSDiv(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstExactSDiv(LLVMValueRef LHSConstant,
LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getExactSDiv(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getExactSDiv(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstFDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getFDiv(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getFDiv(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstURem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getURem(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getURem(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstSRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getSRem(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getSRem(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstFRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getFRem(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getFRem(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstAnd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getAnd(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getAnd(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstOr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getOr(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getOr(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstXor(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getXor(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getXor(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
@@ -791,27 +803,23 @@ LLVMValueRef LLVMConstFCmp(LLVMRealPredicate Predicate,
}
LLVMValueRef LLVMConstShl(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getShl(
- unwrap<Constant>(LHSConstant),
- unwrap<Constant>(RHSConstant)));
+ return wrap(ConstantExpr::getShl(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstLShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getLShr(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getLShr(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
- return wrap(ConstantExpr::getAShr(
- unwrap<Constant>(LHSConstant),
+ return wrap(ConstantExpr::getAShr(unwrap<Constant>(LHSConstant),
unwrap<Constant>(RHSConstant)));
}
LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices, unsigned NumIndices) {
- return wrap(ConstantExpr::getGetElementPtr(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getGetElementPtr(unwrap<Constant>(ConstantVal),
unwrap<Constant>(ConstantIndices,
NumIndices),
NumIndices));
@@ -826,38 +834,32 @@ LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
}
LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getTrunc(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getTrunc(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstSExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getSExt(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getSExt(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstZExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getZExt(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getZExt(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstFPTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getFPTrunc(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getFPTrunc(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstFPExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getFPExtend(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getFPExtend(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstUIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getUIToFP(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getUIToFP(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
@@ -872,92 +874,78 @@ LLVMValueRef LLVMConstFPToUI(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
}
LLVMValueRef LLVMConstFPToSI(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getFPToSI(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getFPToSI(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstPtrToInt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getPtrToInt(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getPtrToInt(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstIntToPtr(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getIntToPtr(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getIntToPtr(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstBitCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getBitCast(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getBitCast(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstZExtOrBitCast(LLVMValueRef ConstantVal,
LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getZExtOrBitCast(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getZExtOrBitCast(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstSExtOrBitCast(LLVMValueRef ConstantVal,
LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getSExtOrBitCast(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getSExtOrBitCast(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstTruncOrBitCast(LLVMValueRef ConstantVal,
LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getTruncOrBitCast(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getTruncOrBitCast(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstPointerCast(LLVMValueRef ConstantVal,
LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getPointerCast(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getPointerCast(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstIntCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType,
LLVMBool isSigned) {
- return wrap(ConstantExpr::getIntegerCast(
- unwrap<Constant>(ConstantVal),
- unwrap(ToType),
- isSigned));
+ return wrap(ConstantExpr::getIntegerCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType), isSigned));
}
LLVMValueRef LLVMConstFPCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
- return wrap(ConstantExpr::getFPCast(
- unwrap<Constant>(ConstantVal),
+ return wrap(ConstantExpr::getFPCast(unwrap<Constant>(ConstantVal),
unwrap(ToType)));
}
LLVMValueRef LLVMConstSelect(LLVMValueRef ConstantCondition,
LLVMValueRef ConstantIfTrue,
LLVMValueRef ConstantIfFalse) {
- return wrap(ConstantExpr::getSelect(
- unwrap<Constant>(ConstantCondition),
+ return wrap(ConstantExpr::getSelect(unwrap<Constant>(ConstantCondition),
unwrap<Constant>(ConstantIfTrue),
unwrap<Constant>(ConstantIfFalse)));
}
LLVMValueRef LLVMConstExtractElement(LLVMValueRef VectorConstant,
LLVMValueRef IndexConstant) {
- return wrap(ConstantExpr::getExtractElement(
- unwrap<Constant>(VectorConstant),
+ return wrap(ConstantExpr::getExtractElement(unwrap<Constant>(VectorConstant),
unwrap<Constant>(IndexConstant)));
}
LLVMValueRef LLVMConstInsertElement(LLVMValueRef VectorConstant,
LLVMValueRef ElementValueConstant,
LLVMValueRef IndexConstant) {
- return wrap(ConstantExpr::getInsertElement(
- unwrap<Constant>(VectorConstant),
+ return wrap(ConstantExpr::getInsertElement(unwrap<Constant>(VectorConstant),
unwrap<Constant>(ElementValueConstant),
unwrap<Constant>(IndexConstant)));
}
@@ -965,24 +953,21 @@ LLVMValueRef LLVMConstInsertElement(LLVMValueRef VectorConstant,
LLVMValueRef LLVMConstShuffleVector(LLVMValueRef VectorAConstant,
LLVMValueRef VectorBConstant,
LLVMValueRef MaskConstant) {
- return wrap(ConstantExpr::getShuffleVector(
- unwrap<Constant>(VectorAConstant),
+ return wrap(ConstantExpr::getShuffleVector(unwrap<Constant>(VectorAConstant),
unwrap<Constant>(VectorBConstant),
unwrap<Constant>(MaskConstant)));
}
LLVMValueRef LLVMConstExtractValue(LLVMValueRef AggConstant, unsigned *IdxList,
unsigned NumIdx) {
- return wrap(ConstantExpr::getExtractValue(
- unwrap<Constant>(AggConstant),
+ return wrap(ConstantExpr::getExtractValue(unwrap<Constant>(AggConstant),
IdxList, NumIdx));
}
LLVMValueRef LLVMConstInsertValue(LLVMValueRef AggConstant,
LLVMValueRef ElementValueConstant,
unsigned *IdxList, unsigned NumIdx) {
- return wrap(ConstantExpr::getInsertValue(
- unwrap<Constant>(AggConstant),
+ return wrap(ConstantExpr::getInsertValue(unwrap<Constant>(AggConstant),
unwrap<Constant>(ElementValueConstant),
IdxList, NumIdx));
}
@@ -2186,25 +2171,27 @@ LLVMBool LLVMCreateMemoryBufferWithContentsOfFile(
LLVMMemoryBufferRef *OutMemBuf,
char **OutMessage) {
- std::string Error;
- if (MemoryBuffer *MB = MemoryBuffer::getFile(Path, &Error)) {
- *OutMemBuf = wrap(MB);
+ OwningPtr<MemoryBuffer> MB;
+ error_code ec;
+ if (!(ec = MemoryBuffer::getFile(Path, MB))) {
+ *OutMemBuf = wrap(MB.take());
return 0;
}
-
- *OutMessage = strdup(Error.c_str());
+
+ *OutMessage = strdup(ec.message().c_str());
return 1;
}
LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf,
char **OutMessage) {
- std::string Error;
- if (MemoryBuffer *MB = MemoryBuffer::getSTDIN(&Error)) {
- *OutMemBuf = wrap(MB);
+ OwningPtr<MemoryBuffer> MB;
+ error_code ec;
+ if (!(ec = MemoryBuffer::getSTDIN(MB))) {
+ *OutMemBuf = wrap(MB.take());
return 0;
}
- *OutMessage = strdup(Error.c_str());
+ *OutMessage = strdup(ec.message().c_str());
return 1;
}
@@ -2212,6 +2199,11 @@ void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf) {
delete unwrap(MemBuf);
}
+/*===-- Pass Registry -----------------------------------------------------===*/
+
+LLVMPassRegistryRef LLVMGetGlobalPassRegistry(void) {
+ return wrap(PassRegistry::getPassRegistry());
+}
/*===-- Pass Manager ------------------------------------------------------===*/
diff --git a/contrib/llvm/lib/VMCore/Dominators.cpp b/contrib/llvm/lib/VMCore/Dominators.cpp
index f3dad82..c374b06 100644
--- a/contrib/llvm/lib/VMCore/Dominators.cpp
+++ b/contrib/llvm/lib/VMCore/Dominators.cpp
@@ -19,10 +19,10 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/DominatorInternals.h"
+#include "llvm/Assembly/Writer.h"
#include "llvm/Instructions.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/CommandLine.h"
@@ -44,7 +44,7 @@ VerifyDomInfoX("verify-dom-info", cl::location(VerifyDomInfo),
//===----------------------------------------------------------------------===//
//
// Provide public access to DominatorTree information. Implementation details
-// can be found in DominatorCalculation.h.
+// can be found in DominatorInternals.h.
//
//===----------------------------------------------------------------------===//
@@ -53,7 +53,7 @@ TEMPLATE_INSTANTIATION(class llvm::DominatorTreeBase<BasicBlock>);
char DominatorTree::ID = 0;
INITIALIZE_PASS(DominatorTree, "domtree",
- "Dominator Tree Construction", true, true);
+ "Dominator Tree Construction", true, true)
bool DominatorTree::runOnFunction(Function &F) {
DT->recalculate(F);
@@ -67,7 +67,14 @@ void DominatorTree::verifyAnalysis() const {
DominatorTree OtherDT;
OtherDT.getBase().recalculate(F);
- assert(!compare(OtherDT) && "Invalid DominatorTree info!");
+ if (compare(OtherDT)) {
+ errs() << "DominatorTree is not up to date! Computed:\n";
+ print(errs());
+
+ errs() << "\nActual:\n";
+ OtherDT.print(errs());
+ abort();
+ }
}
void DominatorTree::print(raw_ostream &OS, const Module *) const {
@@ -98,263 +105,3 @@ bool DominatorTree::dominates(const Instruction *A, const Instruction *B) const{
return &*I == A;
}
-
-
-
-//===----------------------------------------------------------------------===//
-// DominanceFrontier Implementation
-//===----------------------------------------------------------------------===//
-
-char DominanceFrontier::ID = 0;
-INITIALIZE_PASS(DominanceFrontier, "domfrontier",
- "Dominance Frontier Construction", true, true);
-
-void DominanceFrontier::verifyAnalysis() const {
- if (!VerifyDomInfo) return;
-
- DominatorTree &DT = getAnalysis<DominatorTree>();
-
- DominanceFrontier OtherDF;
- const std::vector<BasicBlock*> &DTRoots = DT.getRoots();
- OtherDF.calculate(DT, DT.getNode(DTRoots[0]));
- assert(!compare(OtherDF) && "Invalid DominanceFrontier info!");
-}
-
-// NewBB is split and now it has one successor. Update dominance frontier to
-// reflect this change.
-void DominanceFrontier::splitBlock(BasicBlock *NewBB) {
- assert(NewBB->getTerminator()->getNumSuccessors() == 1 &&
- "NewBB should have a single successor!");
- BasicBlock *NewBBSucc = NewBB->getTerminator()->getSuccessor(0);
-
- // NewBBSucc inherits original NewBB frontier.
- DominanceFrontier::iterator NewBBI = find(NewBB);
- if (NewBBI != end())
- addBasicBlock(NewBBSucc, NewBBI->second);
-
- // If NewBB dominates NewBBSucc, then DF(NewBB) is now going to be the
- // DF(NewBBSucc) without the stuff that the new block does not dominate
- // a predecessor of.
- DominatorTree &DT = getAnalysis<DominatorTree>();
- DomTreeNode *NewBBNode = DT.getNode(NewBB);
- DomTreeNode *NewBBSuccNode = DT.getNode(NewBBSucc);
- if (DT.dominates(NewBBNode, NewBBSuccNode)) {
- DominanceFrontier::iterator DFI = find(NewBBSucc);
- if (DFI != end()) {
- DominanceFrontier::DomSetType Set = DFI->second;
- // Filter out stuff in Set that we do not dominate a predecessor of.
- for (DominanceFrontier::DomSetType::iterator SetI = Set.begin(),
- E = Set.end(); SetI != E;) {
- bool DominatesPred = false;
- for (pred_iterator PI = pred_begin(*SetI), E = pred_end(*SetI);
- PI != E; ++PI)
- if (DT.dominates(NewBBNode, DT.getNode(*PI))) {
- DominatesPred = true;
- break;
- }
- if (!DominatesPred)
- Set.erase(SetI++);
- else
- ++SetI;
- }
-
- if (NewBBI != end()) {
- for (DominanceFrontier::DomSetType::iterator SetI = Set.begin(),
- E = Set.end(); SetI != E; ++SetI) {
- BasicBlock *SB = *SetI;
- addToFrontier(NewBBI, SB);
- }
- } else
- addBasicBlock(NewBB, Set);
- }
-
- } else {
- // DF(NewBB) is {NewBBSucc} because NewBB does not strictly dominate
- // NewBBSucc, but it does dominate itself (and there is an edge (NewBB ->
- // NewBBSucc)). NewBBSucc is the single successor of NewBB.
- DominanceFrontier::DomSetType NewDFSet;
- NewDFSet.insert(NewBBSucc);
- addBasicBlock(NewBB, NewDFSet);
- }
-
- // Now update dominance frontiers which either used to contain NewBBSucc
- // or which now need to include NewBB.
-
- // Collect the set of blocks which dominate a predecessor of NewBB or
- // NewSuccBB and which don't dominate both. This is an initial
- // approximation of the blocks whose dominance frontiers will need updates.
- SmallVector<DomTreeNode *, 16> AllPredDoms;
-
- // Compute the block which dominates both NewBBSucc and NewBB. This is
- // the immediate dominator of NewBBSucc unless NewBB dominates NewBBSucc.
- // The code below which climbs dominator trees will stop at this point,
- // because from this point up, dominance frontiers are unaffected.
- DomTreeNode *DominatesBoth = 0;
- if (NewBBSuccNode) {
- DominatesBoth = NewBBSuccNode->getIDom();
- if (DominatesBoth == NewBBNode)
- DominatesBoth = NewBBNode->getIDom();
- }
-
- // Collect the set of all blocks which dominate a predecessor of NewBB.
- SmallPtrSet<DomTreeNode *, 8> NewBBPredDoms;
- for (pred_iterator PI = pred_begin(NewBB), E = pred_end(NewBB); PI != E; ++PI)
- for (DomTreeNode *DTN = DT.getNode(*PI); DTN; DTN = DTN->getIDom()) {
- if (DTN == DominatesBoth)
- break;
- if (!NewBBPredDoms.insert(DTN))
- break;
- AllPredDoms.push_back(DTN);
- }
-
- // Collect the set of all blocks which dominate a predecessor of NewSuccBB.
- SmallPtrSet<DomTreeNode *, 8> NewBBSuccPredDoms;
- for (pred_iterator PI = pred_begin(NewBBSucc),
- E = pred_end(NewBBSucc); PI != E; ++PI)
- for (DomTreeNode *DTN = DT.getNode(*PI); DTN; DTN = DTN->getIDom()) {
- if (DTN == DominatesBoth)
- break;
- if (!NewBBSuccPredDoms.insert(DTN))
- break;
- if (!NewBBPredDoms.count(DTN))
- AllPredDoms.push_back(DTN);
- }
-
- // Visit all relevant dominance frontiers and make any needed updates.
- for (SmallVectorImpl<DomTreeNode *>::const_iterator I = AllPredDoms.begin(),
- E = AllPredDoms.end(); I != E; ++I) {
- DomTreeNode *DTN = *I;
- iterator DFI = find((*I)->getBlock());
-
- // Only consider nodes that have NewBBSucc in their dominator frontier.
- if (DFI == end() || !DFI->second.count(NewBBSucc)) continue;
-
- // If the block dominates a predecessor of NewBB but does not properly
- // dominate NewBB itself, add NewBB to its dominance frontier.
- if (NewBBPredDoms.count(DTN) &&
- !DT.properlyDominates(DTN, NewBBNode))
- addToFrontier(DFI, NewBB);
-
- // If the block does not dominate a predecessor of NewBBSucc or
- // properly dominates NewBBSucc itself, remove NewBBSucc from its
- // dominance frontier.
- if (!NewBBSuccPredDoms.count(DTN) ||
- DT.properlyDominates(DTN, NewBBSuccNode))
- removeFromFrontier(DFI, NewBBSucc);
- }
-}
-
-namespace {
- class DFCalculateWorkObject {
- public:
- DFCalculateWorkObject(BasicBlock *B, BasicBlock *P,
- const DomTreeNode *N,
- const DomTreeNode *PN)
- : currentBB(B), parentBB(P), Node(N), parentNode(PN) {}
- BasicBlock *currentBB;
- BasicBlock *parentBB;
- const DomTreeNode *Node;
- const DomTreeNode *parentNode;
- };
-}
-
-const DominanceFrontier::DomSetType &
-DominanceFrontier::calculate(const DominatorTree &DT,
- const DomTreeNode *Node) {
- BasicBlock *BB = Node->getBlock();
- DomSetType *Result = NULL;
-
- std::vector<DFCalculateWorkObject> workList;
- SmallPtrSet<BasicBlock *, 32> visited;
-
- workList.push_back(DFCalculateWorkObject(BB, NULL, Node, NULL));
- do {
- DFCalculateWorkObject *currentW = &workList.back();
- assert (currentW && "Missing work object.");
-
- BasicBlock *currentBB = currentW->currentBB;
- BasicBlock *parentBB = currentW->parentBB;
- const DomTreeNode *currentNode = currentW->Node;
- const DomTreeNode *parentNode = currentW->parentNode;
- assert (currentBB && "Invalid work object. Missing current Basic Block");
- assert (currentNode && "Invalid work object. Missing current Node");
- DomSetType &S = Frontiers[currentBB];
-
- // Visit each block only once.
- if (visited.count(currentBB) == 0) {
- visited.insert(currentBB);
-
- // Loop over CFG successors to calculate DFlocal[currentNode]
- for (succ_iterator SI = succ_begin(currentBB), SE = succ_end(currentBB);
- SI != SE; ++SI) {
- // Does Node immediately dominate this successor?
- if (DT[*SI]->getIDom() != currentNode)
- S.insert(*SI);
- }
- }
-
- // At this point, S is DFlocal. Now we union in DFup's of our children...
- // Loop through and visit the nodes that Node immediately dominates (Node's
- // children in the IDomTree)
- bool visitChild = false;
- for (DomTreeNode::const_iterator NI = currentNode->begin(),
- NE = currentNode->end(); NI != NE; ++NI) {
- DomTreeNode *IDominee = *NI;
- BasicBlock *childBB = IDominee->getBlock();
- if (visited.count(childBB) == 0) {
- workList.push_back(DFCalculateWorkObject(childBB, currentBB,
- IDominee, currentNode));
- visitChild = true;
- }
- }
-
- // If all children are visited or there is any child then pop this block
- // from the workList.
- if (!visitChild) {
-
- if (!parentBB) {
- Result = &S;
- break;
- }
-
- DomSetType::const_iterator CDFI = S.begin(), CDFE = S.end();
- DomSetType &parentSet = Frontiers[parentBB];
- for (; CDFI != CDFE; ++CDFI) {
- if (!DT.properlyDominates(parentNode, DT[*CDFI]))
- parentSet.insert(*CDFI);
- }
- workList.pop_back();
- }
-
- } while (!workList.empty());
-
- return *Result;
-}
-
-void DominanceFrontierBase::print(raw_ostream &OS, const Module* ) const {
- for (const_iterator I = begin(), E = end(); I != E; ++I) {
- OS << " DomFrontier for BB ";
- if (I->first)
- WriteAsOperand(OS, I->first, false);
- else
- OS << " <<exit node>>";
- OS << " is:\t";
-
- const std::set<BasicBlock*> &BBs = I->second;
-
- for (std::set<BasicBlock*>::const_iterator I = BBs.begin(), E = BBs.end();
- I != E; ++I) {
- OS << ' ';
- if (*I)
- WriteAsOperand(OS, *I, false);
- else
- OS << "<<exit node>>";
- }
- OS << "\n";
- }
-}
-
-void DominanceFrontierBase::dump() const {
- print(dbgs());
-}
-
diff --git a/contrib/llvm/lib/VMCore/Function.cpp b/contrib/llvm/lib/VMCore/Function.cpp
index 8f94efc..00d1d78 100644
--- a/contrib/llvm/lib/VMCore/Function.cpp
+++ b/contrib/llvm/lib/VMCore/Function.cpp
@@ -20,8 +20,8 @@
#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/StringPool.h"
-#include "llvm/System/RWMutex.h"
-#include "llvm/System/Threading.h"
+#include "llvm/Support/RWMutex.h"
+#include "llvm/Support/Threading.h"
#include "SymbolTableListTraitsImpl.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringExtras.h"
@@ -227,19 +227,10 @@ void Function::dropAllReferences() {
for (iterator I = begin(), E = end(); I != E; ++I)
I->dropAllReferences();
- // Delete all basic blocks.
- while (!BasicBlocks.empty()) {
- // If there is still a reference to the block, it must be a 'blockaddress'
- // constant pointing to it. Just replace the BlockAddress with undef.
- BasicBlock *BB = BasicBlocks.begin();
- if (!BB->use_empty()) {
- BlockAddress *BA = cast<BlockAddress>(BB->use_back());
- BA->replaceAllUsesWith(UndefValue::get(BA->getType()));
- BA->destroyConstant();
- }
-
- BB->eraseFromParent();
- }
+ // Delete all basic blocks. They are now unused, except possibly by
+ // blockaddresses, but BasicBlock's destructor takes care of those.
+ while (!BasicBlocks.empty())
+ BasicBlocks.begin()->eraseFromParent();
}
void Function::addAttribute(unsigned i, Attributes attr) {
diff --git a/contrib/llvm/lib/VMCore/Globals.cpp b/contrib/llvm/lib/VMCore/Globals.cpp
index 96716ee..60000ad 100644
--- a/contrib/llvm/lib/VMCore/Globals.cpp
+++ b/contrib/llvm/lib/VMCore/Globals.cpp
@@ -26,23 +26,6 @@ using namespace llvm;
// GlobalValue Class
//===----------------------------------------------------------------------===//
-/// removeDeadUsersOfConstant - If the specified constantexpr is dead, remove
-/// it. This involves recursively eliminating any dead users of the
-/// constantexpr.
-static bool removeDeadUsersOfConstant(const Constant *C) {
- if (isa<GlobalValue>(C)) return false; // Cannot remove this
-
- while (!C->use_empty()) {
- const Constant *User = dyn_cast<Constant>(C->use_back());
- if (!User) return false; // Non-constant usage;
- if (!removeDeadUsersOfConstant(User))
- return false; // Constant wasn't dead
- }
-
- const_cast<Constant*>(C)->destroyConstant();
- return true;
-}
-
bool GlobalValue::isMaterializable() const {
return getParent() && getParent()->isMaterializable(this);
}
@@ -56,38 +39,6 @@ void GlobalValue::Dematerialize() {
getParent()->Dematerialize(this);
}
-/// removeDeadConstantUsers - If there are any dead constant users dangling
-/// off of this global value, remove them. This method is useful for clients
-/// that want to check to see if a global is unused, but don't want to deal
-/// with potentially dead constants hanging off of the globals.
-void GlobalValue::removeDeadConstantUsers() const {
- Value::const_use_iterator I = use_begin(), E = use_end();
- Value::const_use_iterator LastNonDeadUser = E;
- while (I != E) {
- if (const Constant *User = dyn_cast<Constant>(*I)) {
- if (!removeDeadUsersOfConstant(User)) {
- // If the constant wasn't dead, remember that this was the last live use
- // and move on to the next constant.
- LastNonDeadUser = I;
- ++I;
- } else {
- // If the constant was dead, then the iterator is invalidated.
- if (LastNonDeadUser == E) {
- I = use_begin();
- if (I == E) break;
- } else {
- I = LastNonDeadUser;
- ++I;
- }
- }
- } else {
- LastNonDeadUser = I;
- ++I;
- }
- }
-}
-
-
/// Override destroyConstant to make sure it doesn't get called on
/// GlobalValue's because they shouldn't be treated like other constants.
void GlobalValue::destroyConstant() {
diff --git a/contrib/llvm/lib/VMCore/IRBuilder.cpp b/contrib/llvm/lib/VMCore/IRBuilder.cpp
index c1b783c..595dea4 100644
--- a/contrib/llvm/lib/VMCore/IRBuilder.cpp
+++ b/contrib/llvm/lib/VMCore/IRBuilder.cpp
@@ -15,6 +15,7 @@
#include "llvm/Support/IRBuilder.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Function.h"
+#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
using namespace llvm;
@@ -36,3 +37,83 @@ const Type *IRBuilderBase::getCurrentFunctionReturnType() const {
assert(BB && BB->getParent() && "No current function!");
return BB->getParent()->getReturnType();
}
+
+Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
+ const PointerType *PT = cast<PointerType>(Ptr->getType());
+ if (PT->getElementType()->isIntegerTy(8))
+ return Ptr;
+
+ // Otherwise, we need to insert a bitcast.
+ PT = getInt8PtrTy(PT->getAddressSpace());
+ BitCastInst *BCI = new BitCastInst(Ptr, PT, "");
+ BB->getInstList().insert(InsertPt, BCI);
+ SetInstDebugLocation(BCI);
+ return BCI;
+}
+
+static CallInst *createCallHelper(Value *Callee, Value *const* Ops,
+ unsigned NumOps, IRBuilderBase *Builder) {
+ CallInst *CI = CallInst::Create(Callee, Ops, Ops + NumOps, "");
+ Builder->GetInsertBlock()->getInstList().insert(Builder->GetInsertPoint(),CI);
+ Builder->SetInstDebugLocation(CI);
+ return CI;
+}
+
+
+CallInst *IRBuilderBase::
+CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
+ bool isVolatile, MDNode *TBAATag) {
+ Ptr = getCastedInt8PtrValue(Ptr);
+ Value *Ops[] = { Ptr, Val, Size, getInt32(Align), getInt1(isVolatile) };
+ const Type *Tys[] = { Ptr->getType(), Size->getType() };
+ Module *M = BB->getParent()->getParent();
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, 5, this);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::
+CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
+ bool isVolatile, MDNode *TBAATag) {
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = { Dst, Src, Size, getInt32(Align), getInt1(isVolatile) };
+ const Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
+ Module *M = BB->getParent()->getParent();
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys, 3);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, 5, this);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::
+CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
+ bool isVolatile, MDNode *TBAATag) {
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = { Dst, Src, Size, getInt32(Align), getInt1(isVolatile) };
+ const Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
+ Module *M = BB->getParent()->getParent();
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys, 3);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, 5, this);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ return CI;
+}
diff --git a/contrib/llvm/lib/VMCore/InlineAsm.cpp b/contrib/llvm/lib/VMCore/InlineAsm.cpp
index 69f713b..e4f99f0 100644
--- a/contrib/llvm/lib/VMCore/InlineAsm.cpp
+++ b/contrib/llvm/lib/VMCore/InlineAsm.cpp
@@ -47,26 +47,54 @@ InlineAsm::InlineAsm(const PointerType *Ty, const std::string &asmString,
}
void InlineAsm::destroyConstant() {
+ getRawType()->getContext().pImpl->InlineAsms.remove(this);
delete this;
}
const FunctionType *InlineAsm::getFunctionType() const {
return cast<FunctionType>(getType()->getElementType());
}
+
+///Default constructor.
+InlineAsm::ConstraintInfo::ConstraintInfo() :
+ Type(isInput), isEarlyClobber(false),
+ MatchingInput(-1), isCommutative(false),
+ isIndirect(false), isMultipleAlternative(false),
+ currentAlternativeIndex(0) {
+}
+
+/// Copy constructor.
+InlineAsm::ConstraintInfo::ConstraintInfo(const ConstraintInfo &other) :
+ Type(other.Type), isEarlyClobber(other.isEarlyClobber),
+ MatchingInput(other.MatchingInput), isCommutative(other.isCommutative),
+ isIndirect(other.isIndirect), Codes(other.Codes),
+ isMultipleAlternative(other.isMultipleAlternative),
+ multipleAlternatives(other.multipleAlternatives),
+ currentAlternativeIndex(other.currentAlternativeIndex) {
+}
/// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the
/// fields in this structure. If the constraint string is not understood,
/// return true, otherwise return false.
bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
- std::vector<InlineAsm::ConstraintInfo> &ConstraintsSoFar) {
+ InlineAsm::ConstraintInfoVector &ConstraintsSoFar) {
StringRef::iterator I = Str.begin(), E = Str.end();
+ unsigned multipleAlternativeCount = Str.count('|') + 1;
+ unsigned multipleAlternativeIndex = 0;
+ ConstraintCodeVector *pCodes = &Codes;
// Initialize
+ isMultipleAlternative = (multipleAlternativeCount > 1 ? true : false);
+ if (isMultipleAlternative) {
+ multipleAlternatives.resize(multipleAlternativeCount);
+ pCodes = &multipleAlternatives[0].Codes;
+ }
Type = isInput;
isEarlyClobber = false;
MatchingInput = -1;
isCommutative = false;
isIndirect = false;
+ currentAlternativeIndex = 0;
// Parse prefixes.
if (*I == '~') {
@@ -120,15 +148,15 @@ bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
// Find the end of the register name.
StringRef::iterator ConstraintEnd = std::find(I+1, E, '}');
if (ConstraintEnd == E) return true; // "{foo"
- Codes.push_back(std::string(I, ConstraintEnd+1));
+ pCodes->push_back(std::string(I, ConstraintEnd+1));
I = ConstraintEnd+1;
} else if (isdigit(*I)) { // Matching Constraint
// Maximal munch numbers.
StringRef::iterator NumStart = I;
while (I != E && isdigit(*I))
++I;
- Codes.push_back(std::string(NumStart, I));
- unsigned N = atoi(Codes.back().c_str());
+ pCodes->push_back(std::string(NumStart, I));
+ unsigned N = atoi(pCodes->back().c_str());
// Check that this is a valid matching constraint!
if (N >= ConstraintsSoFar.size() || ConstraintsSoFar[N].Type != isOutput||
Type != isInput)
@@ -136,14 +164,26 @@ bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
// If Operand N already has a matching input, reject this. An output
// can't be constrained to the same value as multiple inputs.
- if (ConstraintsSoFar[N].hasMatchingInput())
- return true;
-
- // Note that operand #n has a matching input.
- ConstraintsSoFar[N].MatchingInput = ConstraintsSoFar.size();
+ if (isMultipleAlternative) {
+ InlineAsm::SubConstraintInfo &scInfo =
+ ConstraintsSoFar[N].multipleAlternatives[multipleAlternativeIndex];
+ if (scInfo.MatchingInput != -1)
+ return true;
+ // Note that operand #n has a matching input.
+ scInfo.MatchingInput = ConstraintsSoFar.size();
+ } else {
+ if (ConstraintsSoFar[N].hasMatchingInput())
+ return true;
+ // Note that operand #n has a matching input.
+ ConstraintsSoFar[N].MatchingInput = ConstraintsSoFar.size();
+ }
+ } else if (*I == '|') {
+ multipleAlternativeIndex++;
+ pCodes = &multipleAlternatives[multipleAlternativeIndex].Codes;
+ ++I;
} else {
// Single letter constraint.
- Codes.push_back(std::string(I, I+1));
+ pCodes->push_back(std::string(I, I+1));
++I;
}
}
@@ -151,9 +191,21 @@ bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
return false;
}
-std::vector<InlineAsm::ConstraintInfo>
+/// selectAlternative - Point this constraint to the alternative constraint
+/// indicated by the index.
+void InlineAsm::ConstraintInfo::selectAlternative(unsigned index) {
+ if (index < multipleAlternatives.size()) {
+ currentAlternativeIndex = index;
+ InlineAsm::SubConstraintInfo &scInfo =
+ multipleAlternatives[currentAlternativeIndex];
+ MatchingInput = scInfo.MatchingInput;
+ Codes = scInfo.Codes;
+ }
+}
+
+InlineAsm::ConstraintInfoVector
InlineAsm::ParseConstraints(StringRef Constraints) {
- std::vector<ConstraintInfo> Result;
+ ConstraintInfoVector Result;
// Scan the constraints string.
for (StringRef::iterator I = Constraints.begin(),
@@ -183,13 +235,12 @@ InlineAsm::ParseConstraints(StringRef Constraints) {
return Result;
}
-
/// Verify - Verify that the specified constraint string is reasonable for the
/// specified function type, and otherwise validate the constraint string.
bool InlineAsm::Verify(const FunctionType *Ty, StringRef ConstStr) {
if (Ty->isVarArg()) return false;
- std::vector<ConstraintInfo> Constraints = ParseConstraints(ConstStr);
+ ConstraintInfoVector Constraints = ParseConstraints(ConstStr);
// Error parsing constraints.
if (Constraints.empty() && !ConstStr.empty()) return false;
diff --git a/contrib/llvm/lib/VMCore/Instruction.cpp b/contrib/llvm/lib/VMCore/Instruction.cpp
index 05bed4c..2c8b8b2 100644
--- a/contrib/llvm/lib/VMCore/Instruction.cpp
+++ b/contrib/llvm/lib/VMCore/Instruction.cpp
@@ -200,12 +200,10 @@ bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
if (const CallInst *CI = dyn_cast<CallInst>(this))
return CI->isTailCall() == cast<CallInst>(I)->isTailCall() &&
CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() &&
- CI->getAttributes().getRawPointer() ==
- cast<CallInst>(I)->getAttributes().getRawPointer();
+ CI->getAttributes() == cast<CallInst>(I)->getAttributes();
if (const InvokeInst *CI = dyn_cast<InvokeInst>(this))
return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() &&
- CI->getAttributes().getRawPointer() ==
- cast<InvokeInst>(I)->getAttributes().getRawPointer();
+ CI->getAttributes() == cast<InvokeInst>(I)->getAttributes();
if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this)) {
if (IVI->getNumIndices() != cast<InsertValueInst>(I)->getNumIndices())
return false;
@@ -253,12 +251,11 @@ bool Instruction::isSameOperationAs(const Instruction *I) const {
if (const CallInst *CI = dyn_cast<CallInst>(this))
return CI->isTailCall() == cast<CallInst>(I)->isTailCall() &&
CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() &&
- CI->getAttributes().getRawPointer() ==
- cast<CallInst>(I)->getAttributes().getRawPointer();
+ CI->getAttributes() == cast<CallInst>(I)->getAttributes();
if (const InvokeInst *CI = dyn_cast<InvokeInst>(this))
return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() &&
- CI->getAttributes().getRawPointer() ==
- cast<InvokeInst>(I)->getAttributes().getRawPointer();
+ CI->getAttributes() ==
+ cast<InvokeInst>(I)->getAttributes();
if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this)) {
if (IVI->getNumIndices() != cast<InsertValueInst>(I)->getNumIndices())
return false;
@@ -348,7 +345,7 @@ bool Instruction::mayThrow() const {
///
/// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
///
-bool Instruction::isAssociative(unsigned Opcode, const Type *Ty) {
+bool Instruction::isAssociative(unsigned Opcode) {
return Opcode == And || Opcode == Or || Opcode == Xor ||
Opcode == Add || Opcode == Mul;
}
@@ -398,25 +395,10 @@ bool Instruction::isSafeToSpeculativelyExecute() const {
return Op && !Op->isNullValue() && !Op->isAllOnesValue();
}
case Load: {
- if (cast<LoadInst>(this)->isVolatile())
+ const LoadInst *LI = cast<LoadInst>(this);
+ if (LI->isVolatile())
return false;
- // Note that it is not safe to speculate into a malloc'd region because
- // malloc may return null.
- // It's also not safe to follow a bitcast, for example:
- // bitcast i8* (alloca i8) to i32*
- // would result in a 4-byte load from a 1-byte alloca.
- Value *Op0 = getOperand(0);
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0)) {
- // TODO: it's safe to do this for any GEP with constant indices that
- // compute inside the allocated type, but not for any inbounds gep.
- if (GEP->hasAllZeroIndices())
- Op0 = GEP->getPointerOperand();
- }
- if (isa<AllocaInst>(Op0))
- return true;
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(getOperand(0)))
- return !GV->hasExternalWeakLinkage();
- return false;
+ return LI->getPointerOperand()->isDereferenceablePointer();
}
case Call:
return false; // The called function could have undefined behavior or
diff --git a/contrib/llvm/lib/VMCore/Instructions.cpp b/contrib/llvm/lib/VMCore/Instructions.cpp
index 401802e..d129028 100644
--- a/contrib/llvm/lib/VMCore/Instructions.cpp
+++ b/contrib/llvm/lib/VMCore/Instructions.cpp
@@ -19,7 +19,6 @@
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/Operator.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ConstantRange.h"
@@ -97,8 +96,7 @@ PHINode::PHINode(const PHINode &PN)
}
PHINode::~PHINode() {
- if (OperandList)
- dropHungoffUses(OperandList);
+ dropHungoffUses();
}
// removeIncomingValue - Remove an incoming value. This is useful if a
@@ -159,66 +157,18 @@ void PHINode::resizeOperands(unsigned NumOps) {
Use *NewOps = allocHungoffUses(NumOps);
std::copy(OldOps, OldOps + e, NewOps);
OperandList = NewOps;
- if (OldOps) Use::zap(OldOps, OldOps + e, true);
+ Use::zap(OldOps, OldOps + e, true);
}
/// hasConstantValue - If the specified PHI node always merges together the same
/// value, return the value, otherwise return null.
-///
-/// If the PHI has undef operands, but all the rest of the operands are
-/// some unique value, return that value if it can be proved that the
-/// value dominates the PHI. If DT is null, use a conservative check,
-/// otherwise use DT to test for dominance.
-///
-Value *PHINode::hasConstantValue(DominatorTree *DT) const {
- // If the PHI node only has one incoming value, eliminate the PHI node.
- if (getNumIncomingValues() == 1) {
- if (getIncomingValue(0) != this) // not X = phi X
- return getIncomingValue(0);
- return UndefValue::get(getType()); // Self cycle is dead.
- }
-
- // Otherwise if all of the incoming values are the same for the PHI, replace
- // the PHI node with the incoming value.
- //
- Value *InVal = 0;
- bool HasUndefInput = false;
- for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i)
- if (isa<UndefValue>(getIncomingValue(i))) {
- HasUndefInput = true;
- } else if (getIncomingValue(i) != this) { // Not the PHI node itself...
- if (InVal && getIncomingValue(i) != InVal)
- return 0; // Not the same, bail out.
- InVal = getIncomingValue(i);
- }
-
- // The only case that could cause InVal to be null is if we have a PHI node
- // that only has entries for itself. In this case, there is no entry into the
- // loop, so kill the PHI.
- //
- if (InVal == 0) InVal = UndefValue::get(getType());
-
- // If we have a PHI node like phi(X, undef, X), where X is defined by some
- // instruction, we cannot always return X as the result of the PHI node. Only
- // do this if X is not an instruction (thus it must dominate the PHI block),
- // or if the client is prepared to deal with this possibility.
- if (!HasUndefInput || !isa<Instruction>(InVal))
- return InVal;
-
- Instruction *IV = cast<Instruction>(InVal);
- if (DT) {
- // We have a DominatorTree. Do a precise test.
- if (!DT->dominates(IV, this))
- return 0;
- } else {
- // If it is in the entry block, it obviously dominates everything.
- if (IV->getParent() != &IV->getParent()->getParent()->getEntryBlock() ||
- isa<InvokeInst>(IV))
- return 0; // Cannot guarantee that InVal dominates this PHINode.
- }
-
- // All of the incoming values are the same, return the value now.
- return InVal;
+Value *PHINode::hasConstantValue() const {
+ // Exploit the fact that phi nodes always have at least one entry.
+ Value *ConstantValue = getIncomingValue(0);
+ for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
+ if (getIncomingValue(i) != ConstantValue)
+ return 0; // Incoming values not all the same.
+ return ConstantValue;
}
@@ -235,7 +185,7 @@ void CallInst::init(Value *Func, Value* const *Params, unsigned NumParams) {
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
- FTy = FTy; // silence warning.
+ (void)FTy; // silence warning.
assert((NumParams == FTy->getNumParams() ||
(FTy->isVarArg() && NumParams > FTy->getNumParams())) &&
@@ -256,7 +206,7 @@ void CallInst::init(Value *Func, Value *Actual1, Value *Actual2) {
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
- FTy = FTy; // silence warning.
+ (void)FTy; // silence warning.
assert((FTy->getNumParams() == 2 ||
(FTy->isVarArg() && FTy->getNumParams() < 2)) &&
@@ -276,7 +226,7 @@ void CallInst::init(Value *Func, Value *Actual) {
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
- FTy = FTy; // silence warning.
+ (void)FTy; // silence warning.
assert((FTy->getNumParams() == 1 ||
(FTy->isVarArg() && FTy->getNumParams() == 0)) &&
@@ -292,7 +242,7 @@ void CallInst::init(Value *Func) {
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
- FTy = FTy; // silence warning.
+ (void)FTy; // silence warning.
assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
}
@@ -549,7 +499,7 @@ void InvokeInst::init(Value *Fn, BasicBlock *IfNormal, BasicBlock *IfException,
Op<-1>() = IfException;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType());
- FTy = FTy; // silence warning.
+ (void)FTy; // silence warning.
assert(((NumArgs == FTy->getNumParams()) ||
(FTy->isVarArg() && NumArgs > FTy->getNumParams())) &&
@@ -779,31 +729,6 @@ BranchInst::BranchInst(const BranchInst &BI) :
SubclassOptionalData = BI.SubclassOptionalData;
}
-
-Use* Use::getPrefix() {
- PointerIntPair<Use**, 2, PrevPtrTag> &PotentialPrefix(this[-1].Prev);
- if (PotentialPrefix.getOpaqueValue())
- return 0;
-
- return reinterpret_cast<Use*>((char*)&PotentialPrefix + 1);
-}
-
-BranchInst::~BranchInst() {
- if (NumOperands == 1) {
- if (Use *Prefix = OperandList->getPrefix()) {
- Op<-1>() = 0;
- //
- // mark OperandList to have a special value for scrutiny
- // by baseclass destructors and operator delete
- OperandList = Prefix;
- } else {
- NumOperands = 3;
- OperandList = op_begin();
- }
- }
-}
-
-
BasicBlock *BranchInst::getSuccessorV(unsigned idx) const {
return getSuccessor(idx);
}
@@ -899,7 +824,7 @@ void AllocaInst::setAlignment(unsigned Align) {
bool AllocaInst::isArrayAllocation() const {
if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
- return CI->getZExtValue() != 1;
+ return !CI->isOne();
return true;
}
@@ -1248,6 +1173,12 @@ const Type* GetElementPtrInst::getIndexedType(const Type *Ptr,
}
const Type* GetElementPtrInst::getIndexedType(const Type *Ptr,
+ Constant* const *Idxs,
+ unsigned NumIdx) {
+ return getIndexedTypeInternal(Ptr, Idxs, NumIdx);
+}
+
+const Type* GetElementPtrInst::getIndexedType(const Type *Ptr,
uint64_t const *Idxs,
unsigned NumIdx) {
return getIndexedTypeInternal(Ptr, Idxs, NumIdx);
@@ -1473,6 +1404,8 @@ int ShuffleVectorInst::getMaskValue(unsigned i) const {
void InsertValueInst::init(Value *Agg, Value *Val, const unsigned *Idx,
unsigned NumIdx, const Twine &Name) {
assert(NumOperands == 2 && "NumOperands not initialized?");
+ assert(ExtractValueInst::getIndexedType(Agg->getType(), Idx, Idx + NumIdx) ==
+ Val->getType() && "Inserted value must match indexed type!");
Op<0>() = Agg;
Op<1>() = Val;
@@ -1483,6 +1416,8 @@ void InsertValueInst::init(Value *Agg, Value *Val, const unsigned *Idx,
void InsertValueInst::init(Value *Agg, Value *Val, unsigned Idx,
const Twine &Name) {
assert(NumOperands == 2 && "NumOperands not initialized?");
+ assert(ExtractValueInst::getIndexedType(Agg->getType(), Idx) == Val->getType()
+ && "Inserted value must match indexed type!");
Op<0>() = Agg;
Op<1>() = Val;
@@ -1555,13 +1490,26 @@ ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
const Type* ExtractValueInst::getIndexedType(const Type *Agg,
const unsigned *Idxs,
unsigned NumIdx) {
- unsigned CurIdx = 0;
- for (; CurIdx != NumIdx; ++CurIdx) {
- const CompositeType *CT = dyn_cast<CompositeType>(Agg);
- if (!CT || CT->isPointerTy() || CT->isVectorTy()) return 0;
+ for (unsigned CurIdx = 0; CurIdx != NumIdx; ++CurIdx) {
unsigned Index = Idxs[CurIdx];
- if (!CT->indexValid(Index)) return 0;
- Agg = CT->getTypeAtIndex(Index);
+ // We can't use CompositeType::indexValid(Index) here.
+ // indexValid() always returns true for arrays because getelementptr allows
+ // out-of-bounds indices. Since we don't allow those for extractvalue and
+ // insertvalue we need to check array indexing manually.
+ // Since the only other types we can index into are struct types it's just
+ // as easy to check those manually as well.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
+ if (Index >= AT->getNumElements())
+ return 0;
+ } else if (const StructType *ST = dyn_cast<StructType>(Agg)) {
+ if (Index >= ST->getNumElements())
+ return 0;
+ } else {
+ // Not a valid type to index into.
+ return 0;
+ }
+
+ Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index);
// If the new type forwards to another type, then it is in the middle
// of being refined to another type (and hence, may have dropped all
@@ -1570,7 +1518,7 @@ const Type* ExtractValueInst::getIndexedType(const Type *Agg,
if (const Type *Ty = Agg->getForwardedType())
Agg = Ty;
}
- return CurIdx == NumIdx ? Agg : 0;
+ return Agg;
}
const Type* ExtractValueInst::getIndexedType(const Type *Agg,
@@ -1611,7 +1559,7 @@ BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
void BinaryOperator::init(BinaryOps iType) {
Value *LHS = getOperand(0), *RHS = getOperand(1);
- LHS = LHS; RHS = RHS; // Silence warnings.
+ (void)LHS; (void)RHS; // Silence warnings.
assert(LHS->getType() == RHS->getType() &&
"Binary operator operand types must match!");
#ifndef NDEBUG
@@ -1874,7 +1822,7 @@ void BinaryOperator::setHasNoSignedWrap(bool b) {
}
void BinaryOperator::setIsExact(bool b) {
- cast<SDivOperator>(this)->setIsExact(b);
+ cast<PossiblyExactOperator>(this)->setIsExact(b);
}
bool BinaryOperator::hasNoUnsignedWrap() const {
@@ -1886,7 +1834,7 @@ bool BinaryOperator::hasNoSignedWrap() const {
}
bool BinaryOperator::isExact() const {
- return cast<SDivOperator>(this)->isExact();
+ return cast<PossiblyExactOperator>(this)->isExact();
}
//===----------------------------------------------------------------------===//
@@ -2360,6 +2308,8 @@ bool CastInst::isCastable(const Type *SrcTy, const Type *DestTy) {
} else { // Casting from something else
return false;
}
+ } else if (DestTy->isX86_MMXTy()) {
+ return SrcBits == 64;
} else { // Casting to something else
return false;
}
@@ -2441,6 +2391,10 @@ CastInst::getCastOpcode(
return BitCast; // vector -> vector
} else if (DestPTy->getBitWidth() == SrcBits) {
return BitCast; // float/int -> vector
+ } else if (SrcTy->isX86_MMXTy()) {
+ assert(DestPTy->getBitWidth()==64 &&
+ "Casting X86_MMX to vector of wrong width");
+ return BitCast; // MMX to 64-bit vector
} else {
assert(!"Illegal cast to vector (wrong type or size)");
}
@@ -2452,6 +2406,14 @@ CastInst::getCastOpcode(
} else {
assert(!"Casting pointer to other than pointer or int");
}
+ } else if (DestTy->isX86_MMXTy()) {
+ if (isa<VectorType>(SrcTy)) {
+ assert(cast<VectorType>(SrcTy)->getBitWidth() == 64 &&
+ "Casting vector of wrong width to X86_MMX");
+ return BitCast; // 64-bit vector to MMX
+ } else {
+ assert(!"Illegal cast to X86_MMX");
+ }
} else {
assert(!"Casting to type that is not first-class");
}
@@ -2754,14 +2716,14 @@ void CmpInst::swapOperands() {
cast<FCmpInst>(this)->swapOperands();
}
-bool CmpInst::isCommutative() {
- if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
+bool CmpInst::isCommutative() const {
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
return IC->isCommutative();
return cast<FCmpInst>(this)->isCommutative();
}
-bool CmpInst::isEquality() {
- if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
+bool CmpInst::isEquality() const {
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
return IC->isEquality();
return cast<FCmpInst>(this)->isEquality();
}
@@ -2974,9 +2936,9 @@ bool CmpInst::isFalseWhenEqual(unsigned short predicate) {
// SwitchInst Implementation
//===----------------------------------------------------------------------===//
-void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumCases) {
- assert(Value && Default);
- ReservedSpace = 2+NumCases*2;
+void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
+ assert(Value && Default && NumReserved);
+ ReservedSpace = NumReserved;
NumOperands = 2;
OperandList = allocHungoffUses(ReservedSpace);
@@ -2992,7 +2954,7 @@ SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
0, 0, InsertBefore) {
- init(Value, Default, NumCases);
+ init(Value, Default, 2+NumCases*2);
}
/// SwitchInst ctor - Create a new switch instruction, specifying a value to
@@ -3003,14 +2965,15 @@ SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
0, 0, InsertAtEnd) {
- init(Value, Default, NumCases);
+ init(Value, Default, 2+NumCases*2);
}
SwitchInst::SwitchInst(const SwitchInst &SI)
- : TerminatorInst(Type::getVoidTy(SI.getContext()), Instruction::Switch,
- allocHungoffUses(SI.getNumOperands()), SI.getNumOperands()) {
+ : TerminatorInst(SI.getType(), Instruction::Switch, 0, 0) {
+ init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
+ NumOperands = SI.getNumOperands();
Use *OL = OperandList, *InOL = SI.OperandList;
- for (unsigned i = 0, E = SI.getNumOperands(); i != E; i+=2) {
+ for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
OL[i] = InOL[i];
OL[i+1] = InOL[i+1];
}
@@ -3018,7 +2981,7 @@ SwitchInst::SwitchInst(const SwitchInst &SI)
}
SwitchInst::~SwitchInst() {
- dropHungoffUses(OperandList);
+ dropHungoffUses();
}
@@ -3046,14 +3009,10 @@ void SwitchInst::removeCase(unsigned idx) {
unsigned NumOps = getNumOperands();
Use *OL = OperandList;
- // Move everything after this operand down.
- //
- // FIXME: we could just swap with the end of the list, then erase. However,
- // client might not expect this to happen. The code as it is thrashes the
- // use/def lists, which is kinda lame.
- for (unsigned i = (idx+1)*2; i != NumOps; i += 2) {
- OL[i-2] = OL[i];
- OL[i-2+1] = OL[i+1];
+ // Overwrite this case with the end of the list.
+ if ((idx + 1) * 2 != NumOps) {
+ OL[idx * 2] = OL[NumOps - 2];
+ OL[idx * 2 + 1] = OL[NumOps - 1];
}
// Nuke the last value.
@@ -3089,7 +3048,7 @@ void SwitchInst::resizeOperands(unsigned NumOps) {
NewOps[i] = OldOps[i];
}
OperandList = NewOps;
- if (OldOps) Use::zap(OldOps, OldOps + e, true);
+ Use::zap(OldOps, OldOps + e, true);
}
@@ -3104,7 +3063,7 @@ void SwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
}
//===----------------------------------------------------------------------===//
-// SwitchInst Implementation
+// IndirectBrInst Implementation
//===----------------------------------------------------------------------===//
void IndirectBrInst::init(Value *Address, unsigned NumDests) {
@@ -3144,7 +3103,7 @@ void IndirectBrInst::resizeOperands(unsigned NumOps) {
for (unsigned i = 0; i != e; ++i)
NewOps[i] = OldOps[i];
OperandList = NewOps;
- if (OldOps) Use::zap(OldOps, OldOps + e, true);
+ Use::zap(OldOps, OldOps + e, true);
}
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
@@ -3172,7 +3131,7 @@ IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
}
IndirectBrInst::~IndirectBrInst() {
- dropHungoffUses(OperandList);
+ dropHungoffUses();
}
/// addDestination - Add a destination.
@@ -3346,8 +3305,7 @@ ReturnInst *ReturnInst::clone_impl() const {
}
BranchInst *BranchInst::clone_impl() const {
- unsigned Ops(getNumOperands());
- return new(Ops, Ops == 1) BranchInst(*this);
+ return new(getNumOperands()) BranchInst(*this);
}
SwitchInst *SwitchInst::clone_impl() const {
diff --git a/contrib/llvm/lib/VMCore/LLVMContext.cpp b/contrib/llvm/lib/VMCore/LLVMContext.cpp
index 563c651..1bd497d 100644
--- a/contrib/llvm/lib/VMCore/LLVMContext.cpp
+++ b/contrib/llvm/lib/VMCore/LLVMContext.cpp
@@ -19,6 +19,7 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/SourceMgr.h"
#include "LLVMContextImpl.h"
+#include <cctype>
using namespace llvm;
static ManagedStatic<LLVMContext> GlobalContext;
@@ -28,25 +29,42 @@ LLVMContext& llvm::getGlobalContext() {
}
LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
- // Create the first metadata kind, which is always 'dbg'.
+ // Create the fixed metadata kinds. This is done in the same order as the
+ // MD_* enum values so that they correspond.
+
+ // Create the 'dbg' metadata kind.
unsigned DbgID = getMDKindID("dbg");
assert(DbgID == MD_dbg && "dbg kind id drifted"); (void)DbgID;
+
+ // Create the 'tbaa' metadata kind.
+ unsigned TBAAID = getMDKindID("tbaa");
+ assert(TBAAID == MD_tbaa && "tbaa kind id drifted"); (void)TBAAID;
}
LLVMContext::~LLVMContext() { delete pImpl; }
+void LLVMContext::addModule(Module *M) {
+ pImpl->OwnedModules.insert(M);
+}
+
+void LLVMContext::removeModule(Module *M) {
+ pImpl->OwnedModules.erase(M);
+}
+
//===----------------------------------------------------------------------===//
// Recoverable Backend Errors
//===----------------------------------------------------------------------===//
-void LLVMContext::setInlineAsmDiagnosticHandler(void *DiagHandler,
- void *DiagContext) {
+void LLVMContext::
+setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler,
+ void *DiagContext) {
pImpl->InlineAsmDiagHandler = DiagHandler;
pImpl->InlineAsmDiagContext = DiagContext;
}
/// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
/// setInlineAsmDiagnosticHandler.
-void *LLVMContext::getInlineAsmDiagnosticHandler() const {
+LLVMContext::InlineAsmDiagHandlerTy
+LLVMContext::getInlineAsmDiagnosticHandler() const {
return pImpl->InlineAsmDiagHandler;
}
@@ -76,13 +94,11 @@ void LLVMContext::emitError(unsigned LocCookie, StringRef ErrorStr) {
errs() << "error: " << ErrorStr << "\n";
exit(1);
}
-
+
// If we do have an error handler, we can report the error and keep going.
SMDiagnostic Diag("", "error: " + ErrorStr.str());
-
- ((SourceMgr::DiagHandlerTy)(intptr_t)pImpl->InlineAsmDiagHandler)
- (Diag, pImpl->InlineAsmDiagContext, LocCookie);
-
+
+ pImpl->InlineAsmDiagHandler(Diag, pImpl->InlineAsmDiagContext, LocCookie);
}
//===----------------------------------------------------------------------===//
@@ -94,13 +110,13 @@ void LLVMContext::emitError(unsigned LocCookie, StringRef ErrorStr) {
static bool isValidName(StringRef MDName) {
if (MDName.empty())
return false;
-
- if (!isalpha(MDName[0]))
+
+ if (!std::isalpha(MDName[0]))
return false;
-
+
for (StringRef::iterator I = MDName.begin() + 1, E = MDName.end(); I != E;
++I) {
- if (!isalnum(*I) && *I != '_' && *I != '-' && *I != '.')
+ if (!std::isalnum(*I) && *I != '_' && *I != '-' && *I != '.')
return false;
}
return true;
diff --git a/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp b/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp
index 93a075f..ccb8dc5 100644
--- a/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp
+++ b/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "LLVMContextImpl.h"
+#include "llvm/Module.h"
#include <algorithm>
using namespace llvm;
@@ -25,6 +26,7 @@ LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
X86_FP80Ty(C, Type::X86_FP80TyID),
FP128Ty(C, Type::FP128TyID),
PPC_FP128Ty(C, Type::PPC_FP128TyID),
+ X86_MMXTy(C, Type::X86_MMXTyID),
Int1Ty(C, 1),
Int8Ty(C, 8),
Int16Ty(C, 16),
@@ -51,6 +53,15 @@ struct DropReferences {
}
LLVMContextImpl::~LLVMContextImpl() {
+ // NOTE: We need to delete the contents of OwnedModules, but we have to
+ // duplicate it into a temporary vector, because the destructor of Module
+ // will try to remove itself from OwnedModules set. This would cause
+ // iterator invalidation if we iterated on the set directly.
+ std::vector<Module*> Modules(OwnedModules.begin(), OwnedModules.end());
+ for (std::vector<Module*>::iterator I = Modules.begin(), E = Modules.end();
+ I != E; ++I)
+ delete *I;
+
std::for_each(ExprConstants.map_begin(), ExprConstants.map_end(),
DropReferences());
std::for_each(ArrayConstants.map_begin(), ArrayConstants.map_end(),
@@ -90,7 +101,7 @@ LLVMContextImpl::~LLVMContextImpl() {
MDNodes.push_back(&*I);
}
MDNodes.append(NonUniquedMDNodes.begin(), NonUniquedMDNodes.end());
- for (SmallVector<MDNode*, 8>::iterator I = MDNodes.begin(),
+ for (SmallVectorImpl<MDNode *>::iterator I = MDNodes.begin(),
E = MDNodes.end(); I != E; ++I) {
(*I)->destroy();
}
diff --git a/contrib/llvm/lib/VMCore/LLVMContextImpl.h b/contrib/llvm/lib/VMCore/LLVMContextImpl.h
index 51b2992..23971aa 100644
--- a/contrib/llvm/lib/VMCore/LLVMContextImpl.h
+++ b/contrib/llvm/lib/VMCore/LLVMContextImpl.h
@@ -115,7 +115,12 @@ public:
class LLVMContextImpl {
public:
- void *InlineAsmDiagHandler, *InlineAsmDiagContext;
+ /// OwnedModules - The set of modules instantiated in this context, and which
+ /// will be automatically deleted if this context is deleted.
+ SmallPtrSet<Module*, 4> OwnedModules;
+
+ LLVMContext::InlineAsmDiagHandlerTy InlineAsmDiagHandler;
+ void *InlineAsmDiagContext;
typedef DenseMap<DenseMapAPIntKeyInfo::KeyTy, ConstantInt*,
DenseMapAPIntKeyInfo> IntMapTy;
@@ -170,6 +175,7 @@ public:
const Type X86_FP80Ty;
const Type FP128Ty;
const Type PPC_FP128Ty;
+ const Type X86_MMXTy;
const IntegerType Int1Ty;
const IntegerType Int8Ty;
const IntegerType Int16Ty;
diff --git a/contrib/llvm/lib/VMCore/LeakDetector.cpp b/contrib/llvm/lib/VMCore/LeakDetector.cpp
index a44f61d..f6651e9 100644
--- a/contrib/llvm/lib/VMCore/LeakDetector.cpp
+++ b/contrib/llvm/lib/VMCore/LeakDetector.cpp
@@ -16,8 +16,8 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ManagedStatic.h"
-#include "llvm/System/Mutex.h"
-#include "llvm/System/Threading.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Value.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/VMCore/Metadata.cpp b/contrib/llvm/lib/VMCore/Metadata.cpp
index da69c43..0b8e8df 100644
--- a/contrib/llvm/lib/VMCore/Metadata.cpp
+++ b/contrib/llvm/lib/VMCore/Metadata.cpp
@@ -339,17 +339,14 @@ void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
// Now that the node is out of the folding set, get ready to reinsert it.
// First, check to see if another node with the same operands already exists
- // in the set. If it doesn't exist, this returns the position to insert it.
+ // in the set. If so, then this node is redundant.
FoldingSetNodeID ID;
Profile(ID);
void *InsertPoint;
- MDNode *N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint);
-
- if (N) {
- N->replaceAllUsesWith(this);
- N->destroy();
- N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint);
- assert(N == 0 && "shouldn't be in the map now!"); (void)N;
+ if (MDNode *N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint)) {
+ replaceAllUsesWith(N);
+ destroy();
+ return;
}
// InsertPoint will have been set by the FindNodeOrInsertPos call.
diff --git a/contrib/llvm/lib/VMCore/Module.cpp b/contrib/llvm/lib/VMCore/Module.cpp
index d7ddf96..341e527 100644
--- a/contrib/llvm/lib/VMCore/Module.cpp
+++ b/contrib/llvm/lib/VMCore/Module.cpp
@@ -62,9 +62,11 @@ Module::Module(StringRef MID, LLVMContext& C)
ValSymTab = new ValueSymbolTable();
TypeSymTab = new TypeSymbolTable();
NamedMDSymTab = new StringMap<NamedMDNode *>();
+ Context.addModule(this);
}
Module::~Module() {
+ Context.removeModule(this);
dropAllReferences();
GlobalList.clear();
FunctionList.clear();
diff --git a/contrib/llvm/lib/VMCore/Pass.cpp b/contrib/llvm/lib/VMCore/Pass.cpp
index a7d7f61..9afc540 100644
--- a/contrib/llvm/lib/VMCore/Pass.cpp
+++ b/contrib/llvm/lib/VMCore/Pass.cpp
@@ -213,7 +213,6 @@ RegisterAGBase::RegisterAGBase(const char *Name, const void *InterfaceID,
*this, isDefault);
}
-
//===----------------------------------------------------------------------===//
// PassRegistrationListener implementation
//
diff --git a/contrib/llvm/lib/VMCore/PassManager.cpp b/contrib/llvm/lib/VMCore/PassManager.cpp
index ab4d4e5..8bfef98 100644
--- a/contrib/llvm/lib/VMCore/PassManager.cpp
+++ b/contrib/llvm/lib/VMCore/PassManager.cpp
@@ -24,7 +24,7 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/PassNameParser.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include <algorithm>
#include <cstdio>
#include <map>
@@ -497,9 +497,14 @@ PMTopLevelManager::PMTopLevelManager(PMDataManager *PMDM) {
}
/// Set pass P as the last user of the given analysis passes.
-void PMTopLevelManager::setLastUser(SmallVector<Pass *, 12> &AnalysisPasses,
- Pass *P) {
- for (SmallVector<Pass *, 12>::iterator I = AnalysisPasses.begin(),
+void
+PMTopLevelManager::setLastUser(const SmallVectorImpl<Pass *> &AnalysisPasses,
+ Pass *P) {
+ unsigned PDepth = 0;
+ if (P->getResolver())
+ PDepth = P->getResolver()->getPMDataManager().getDepth();
+
+ for (SmallVectorImpl<Pass *>::const_iterator I = AnalysisPasses.begin(),
E = AnalysisPasses.end(); I != E; ++I) {
Pass *AP = *I;
LastUser[AP] = P;
@@ -507,20 +512,47 @@ void PMTopLevelManager::setLastUser(SmallVector<Pass *, 12> &AnalysisPasses,
if (P == AP)
continue;
+ // Update the last users of passes that are required transitive by AP.
+ AnalysisUsage *AnUsage = findAnalysisUsage(AP);
+ const AnalysisUsage::VectorType &IDs = AnUsage->getRequiredTransitiveSet();
+ SmallVector<Pass *, 12> LastUses;
+ SmallVector<Pass *, 12> LastPMUses;
+ for (AnalysisUsage::VectorType::const_iterator I = IDs.begin(),
+ E = IDs.end(); I != E; ++I) {
+ Pass *AnalysisPass = findAnalysisPass(*I);
+ assert(AnalysisPass && "Expected analysis pass to exist.");
+ AnalysisResolver *AR = AnalysisPass->getResolver();
+ assert(AR && "Expected analysis resolver to exist.");
+ unsigned APDepth = AR->getPMDataManager().getDepth();
+
+ if (PDepth == APDepth)
+ LastUses.push_back(AnalysisPass);
+ else if (PDepth > APDepth)
+ LastPMUses.push_back(AnalysisPass);
+ }
+
+ setLastUser(LastUses, P);
+
+ // If this pass has a corresponding pass manager, push higher level
+ // analysis to this pass manager.
+ if (P->getResolver())
+ setLastUser(LastPMUses, P->getResolver()->getPMDataManager().getAsPass());
+
+
// If AP is the last user of other passes then make P last user of
// such passes.
for (DenseMap<Pass *, Pass *>::iterator LUI = LastUser.begin(),
LUE = LastUser.end(); LUI != LUE; ++LUI) {
if (LUI->second == AP)
// DenseMap iterator is not invalidated here because
- // this is just updating exisitng entry.
+ // this is just updating existing entries.
LastUser[LUI->first] = P;
}
}
}
/// Collect passes whose last user is P
-void PMTopLevelManager::collectLastUses(SmallVector<Pass *, 12> &LastUses,
+void PMTopLevelManager::collectLastUses(SmallVectorImpl<Pass *> &LastUses,
Pass *P) {
DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator DMI =
InversedLastUser.find(P);
@@ -612,41 +644,40 @@ void PMTopLevelManager::schedulePass(Pass *P) {
/// then return NULL.
Pass *PMTopLevelManager::findAnalysisPass(AnalysisID AID) {
- Pass *P = NULL;
// Check pass managers
- for (SmallVector<PMDataManager *, 8>::iterator I = PassManagers.begin(),
- E = PassManagers.end(); P == NULL && I != E; ++I) {
- PMDataManager *PMD = *I;
- P = PMD->findAnalysisPass(AID, false);
- }
+ for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(),
+ E = PassManagers.end(); I != E; ++I)
+ if (Pass *P = (*I)->findAnalysisPass(AID, false))
+ return P;
// Check other pass managers
- for (SmallVector<PMDataManager *, 8>::iterator
+ for (SmallVectorImpl<PMDataManager *>::iterator
I = IndirectPassManagers.begin(),
- E = IndirectPassManagers.end(); P == NULL && I != E; ++I)
- P = (*I)->findAnalysisPass(AID, false);
-
- for (SmallVector<ImmutablePass *, 8>::iterator I = ImmutablePasses.begin(),
- E = ImmutablePasses.end(); P == NULL && I != E; ++I) {
+ E = IndirectPassManagers.end(); I != E; ++I)
+ if (Pass *P = (*I)->findAnalysisPass(AID, false))
+ return P;
+
+ // Check the immutable passes. Iterate in reverse order so that we find
+ // the most recently registered passes first.
+ for (SmallVector<ImmutablePass *, 8>::reverse_iterator I =
+ ImmutablePasses.rbegin(), E = ImmutablePasses.rend(); I != E; ++I) {
AnalysisID PI = (*I)->getPassID();
if (PI == AID)
- P = *I;
+ return *I;
// If Pass not found then check the interfaces implemented by Immutable Pass
- if (!P) {
- const PassInfo *PassInf =
- PassRegistry::getPassRegistry()->getPassInfo(PI);
- const std::vector<const PassInfo*> &ImmPI =
- PassInf->getInterfacesImplemented();
- for (std::vector<const PassInfo*>::const_iterator II = ImmPI.begin(),
- EE = ImmPI.end(); II != EE; ++II) {
- if ((*II)->getTypeInfo() == AID)
- P = *I;
- }
+ const PassInfo *PassInf =
+ PassRegistry::getPassRegistry()->getPassInfo(PI);
+ const std::vector<const PassInfo*> &ImmPI =
+ PassInf->getInterfacesImplemented();
+ for (std::vector<const PassInfo*>::const_iterator II = ImmPI.begin(),
+ EE = ImmPI.end(); II != EE; ++II) {
+ if ((*II)->getTypeInfo() == AID)
+ return *I;
}
}
- return P;
+ return 0;
}
// Print passes managed by this top level manager.
@@ -675,6 +706,12 @@ void PMTopLevelManager::dumpArguments() const {
return;
dbgs() << "Pass Arguments: ";
+ for (SmallVector<ImmutablePass *, 8>::const_iterator I =
+ ImmutablePasses.begin(), E = ImmutablePasses.end(); I != E; ++I)
+ if (const PassInfo *PI =
+ PassRegistry::getPassRegistry()->getPassInfo((*I)->getPassID()))
+ if (!PI->isAnalysisGroup())
+ dbgs() << " -" << PI->getPassArgument();
for (SmallVector<PMDataManager *, 8>::const_iterator I = PassManagers.begin(),
E = PassManagers.end(); I != E; ++I)
(*I)->dumpPassArguments();
@@ -682,12 +719,12 @@ void PMTopLevelManager::dumpArguments() const {
}
void PMTopLevelManager::initializeAllAnalysisInfo() {
- for (SmallVector<PMDataManager *, 8>::iterator I = PassManagers.begin(),
+ for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(),
E = PassManagers.end(); I != E; ++I)
(*I)->initializeAnalysisInfo();
// Initailize other pass managers
- for (SmallVector<PMDataManager *, 8>::iterator
+ for (SmallVectorImpl<PMDataManager *>::iterator
I = IndirectPassManagers.begin(), E = IndirectPassManagers.end();
I != E; ++I)
(*I)->initializeAnalysisInfo();
@@ -708,11 +745,11 @@ void PMTopLevelManager::initializeAllAnalysisInfo() {
/// Destructor
PMTopLevelManager::~PMTopLevelManager() {
- for (SmallVector<PMDataManager *, 8>::iterator I = PassManagers.begin(),
+ for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(),
E = PassManagers.end(); I != E; ++I)
delete *I;
- for (SmallVector<ImmutablePass *, 8>::iterator
+ for (SmallVectorImpl<ImmutablePass *>::iterator
I = ImmutablePasses.begin(), E = ImmutablePasses.end(); I != E; ++I)
delete *I;
@@ -749,7 +786,7 @@ bool PMDataManager::preserveHigherLevelAnalysis(Pass *P) {
return true;
const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet();
- for (SmallVector<Pass *, 8>::iterator I = HigherLevelAnalysis.begin(),
+ for (SmallVectorImpl<Pass *>::iterator I = HigherLevelAnalysis.begin(),
E = HigherLevelAnalysis.end(); I != E; ++I) {
Pass *P1 = *I;
if (P1->getAsImmutablePass() == 0 &&
@@ -849,7 +886,7 @@ void PMDataManager::removeDeadPasses(Pass *P, StringRef Msg,
dbgs() << " Free these instances\n";
}
- for (SmallVector<Pass *, 12>::iterator I = DeadPasses.begin(),
+ for (SmallVectorImpl<Pass *>::iterator I = DeadPasses.begin(),
E = DeadPasses.end(); I != E; ++I)
freePass(*I, Msg, DBG_STR);
}
@@ -910,7 +947,7 @@ void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
collectRequiredAnalysis(RequiredPasses,
ReqAnalysisNotAvailable, P);
- for (SmallVector<Pass *, 8>::iterator I = RequiredPasses.begin(),
+ for (SmallVectorImpl<Pass *>::iterator I = RequiredPasses.begin(),
E = RequiredPasses.end(); I != E; ++I) {
Pass *PRequired = *I;
unsigned RDepth = 0;
@@ -944,7 +981,7 @@ void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
}
// Now, take care of required analyses that are not available.
- for (SmallVector<AnalysisID, 8>::iterator
+ for (SmallVectorImpl<AnalysisID>::iterator
I = ReqAnalysisNotAvailable.begin(),
E = ReqAnalysisNotAvailable.end() ;I != E; ++I) {
const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(*I);
@@ -965,8 +1002,8 @@ void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
/// Populate RP with analysis pass that are required by
/// pass P and are available. Populate RP_NotAvail with analysis
/// pass that are required by pass P but are not available.
-void PMDataManager::collectRequiredAnalysis(SmallVector<Pass *, 8>&RP,
- SmallVector<AnalysisID, 8> &RP_NotAvail,
+void PMDataManager::collectRequiredAnalysis(SmallVectorImpl<Pass *> &RP,
+ SmallVectorImpl<AnalysisID> &RP_NotAvail,
Pass *P) {
AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet();
@@ -1038,7 +1075,7 @@ void PMDataManager::dumpLastUses(Pass *P, unsigned Offset) const{
TPM->collectLastUses(LUses, P);
- for (SmallVector<Pass *, 12>::iterator I = LUses.begin(),
+ for (SmallVectorImpl<Pass *>::iterator I = LUses.begin(),
E = LUses.end(); I != E; ++I) {
llvm::dbgs() << "--" << std::string(Offset*2, ' ');
(*I)->dumpPassStructure(0);
@@ -1046,7 +1083,7 @@ void PMDataManager::dumpLastUses(Pass *P, unsigned Offset) const{
}
void PMDataManager::dumpPassArguments() const {
- for (SmallVector<Pass *, 8>::const_iterator I = PassVector.begin(),
+ for (SmallVectorImpl<Pass *>::const_iterator I = PassVector.begin(),
E = PassVector.end(); I != E; ++I) {
if (PMDataManager *PMD = (*I)->getAsPMDataManager())
PMD->dumpPassArguments();
@@ -1087,6 +1124,9 @@ void PMDataManager::dumpPassInfo(Pass *P, enum PassDebuggingString S1,
case ON_MODULE_MSG:
dbgs() << "' on Module '" << Msg << "'...\n";
break;
+ case ON_REGION_MSG:
+ dbgs() << "' on Region '" << Msg << "'...\n";
+ break;
case ON_LOOP_MSG:
dbgs() << "' on Loop '" << Msg << "'...\n";
break;
@@ -1163,7 +1203,7 @@ Pass *PMDataManager::getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F) {
// Destructor
PMDataManager::~PMDataManager() {
- for (SmallVector<Pass *, 8>::iterator I = PassVector.begin(),
+ for (SmallVectorImpl<Pass *>::iterator I = PassVector.begin(),
E = PassVector.end(); I != E; ++I)
delete *I;
}
@@ -1563,7 +1603,7 @@ void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
FPP->add(RequiredPass);
// Register P as the last user of RequiredPass.
- SmallVector<Pass *, 12> LU;
+ SmallVector<Pass *, 1> LU;
LU.push_back(RequiredPass);
FPP->setLastUser(LU, P);
}
diff --git a/contrib/llvm/lib/VMCore/PassRegistry.cpp b/contrib/llvm/lib/VMCore/PassRegistry.cpp
index 21dba56..c97a170 100644
--- a/contrib/llvm/lib/VMCore/PassRegistry.cpp
+++ b/contrib/llvm/lib/VMCore/PassRegistry.cpp
@@ -16,93 +16,125 @@
#include "llvm/PassSupport.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
+#include <vector>
using namespace llvm;
-static PassRegistry *PassRegistryObj = 0;
-PassRegistry *PassRegistry::getPassRegistry() {
- // Use double-checked locking to safely initialize the registrar when
- // we're running in multithreaded mode.
- PassRegistry* tmp = PassRegistryObj;
- if (llvm_is_multithreaded()) {
- sys::MemoryFence();
- if (!tmp) {
- llvm_acquire_global_lock();
- tmp = PassRegistryObj;
- if (!tmp) {
- tmp = new PassRegistry();
- sys::MemoryFence();
- PassRegistryObj = tmp;
- }
- llvm_release_global_lock();
- }
- } else if (!tmp) {
- PassRegistryObj = new PassRegistry();
- }
-
- return PassRegistryObj;
-}
-
-namespace {
-
-// FIXME: We use ManagedCleanup to erase the pass registrar on shutdown.
+// FIXME: We use ManagedStatic to erase the pass registrar on shutdown.
// Unfortunately, passes are registered with static ctors, and having
// llvm_shutdown clear this map prevents successful ressurection after
// llvm_shutdown is run. Ideally we should find a solution so that we don't
// leak the map, AND can still resurrect after shutdown.
-void cleanupPassRegistry(void*) {
- if (PassRegistryObj) {
- delete PassRegistryObj;
- PassRegistryObj = 0;
- }
+static ManagedStatic<PassRegistry> PassRegistryObj;
+PassRegistry *PassRegistry::getPassRegistry() {
+ return &*PassRegistryObj;
}
-ManagedCleanup<&cleanupPassRegistry> registryCleanup ATTRIBUTE_USED;
+static ManagedStatic<sys::SmartMutex<true> > Lock;
+
+//===----------------------------------------------------------------------===//
+// PassRegistryImpl
+//
+
+namespace {
+struct PassRegistryImpl {
+ /// PassInfoMap - Keep track of the PassInfo object for each registered pass.
+ typedef DenseMap<const void*, const PassInfo*> MapType;
+ MapType PassInfoMap;
+
+ typedef StringMap<const PassInfo*> StringMapType;
+ StringMapType PassInfoStringMap;
+
+ /// AnalysisGroupInfo - Keep track of information for each analysis group.
+ struct AnalysisGroupInfo {
+ SmallPtrSet<const PassInfo *, 8> Implementations;
+ };
+ DenseMap<const PassInfo*, AnalysisGroupInfo> AnalysisGroupInfoMap;
+
+ std::vector<const PassInfo*> ToFree;
+ std::vector<PassRegistrationListener*> Listeners;
+};
+} // end anonymous namespace
+
+void *PassRegistry::getImpl() const {
+ if (!pImpl)
+ pImpl = new PassRegistryImpl();
+ return pImpl;
+}
+
+//===----------------------------------------------------------------------===//
+// Accessors
+//
+
+PassRegistry::~PassRegistry() {
+ sys::SmartScopedLock<true> Guard(*Lock);
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(pImpl);
+
+ for (std::vector<const PassInfo*>::iterator I = Impl->ToFree.begin(),
+ E = Impl->ToFree.end(); I != E; ++I)
+ delete *I;
+
+ delete Impl;
+ pImpl = 0;
}
const PassInfo *PassRegistry::getPassInfo(const void *TI) const {
- sys::SmartScopedLock<true> Guard(Lock);
- MapType::const_iterator I = PassInfoMap.find(TI);
- return I != PassInfoMap.end() ? I->second : 0;
+ sys::SmartScopedLock<true> Guard(*Lock);
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+ PassRegistryImpl::MapType::const_iterator I = Impl->PassInfoMap.find(TI);
+ return I != Impl->PassInfoMap.end() ? I->second : 0;
}
const PassInfo *PassRegistry::getPassInfo(StringRef Arg) const {
- sys::SmartScopedLock<true> Guard(Lock);
- StringMapType::const_iterator I = PassInfoStringMap.find(Arg);
- return I != PassInfoStringMap.end() ? I->second : 0;
+ sys::SmartScopedLock<true> Guard(*Lock);
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+ PassRegistryImpl::StringMapType::const_iterator
+ I = Impl->PassInfoStringMap.find(Arg);
+ return I != Impl->PassInfoStringMap.end() ? I->second : 0;
}
//===----------------------------------------------------------------------===//
// Pass Registration mechanism
//
-void PassRegistry::registerPass(const PassInfo &PI) {
- sys::SmartScopedLock<true> Guard(Lock);
+void PassRegistry::registerPass(const PassInfo &PI, bool ShouldFree) {
+ sys::SmartScopedLock<true> Guard(*Lock);
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
bool Inserted =
- PassInfoMap.insert(std::make_pair(PI.getTypeInfo(),&PI)).second;
- assert(Inserted && "Pass registered multiple times!"); Inserted=Inserted;
- PassInfoStringMap[PI.getPassArgument()] = &PI;
+ Impl->PassInfoMap.insert(std::make_pair(PI.getTypeInfo(),&PI)).second;
+ assert(Inserted && "Pass registered multiple times!");
+ (void)Inserted;
+ Impl->PassInfoStringMap[PI.getPassArgument()] = &PI;
// Notify any listeners.
for (std::vector<PassRegistrationListener*>::iterator
- I = Listeners.begin(), E = Listeners.end(); I != E; ++I)
+ I = Impl->Listeners.begin(), E = Impl->Listeners.end(); I != E; ++I)
(*I)->passRegistered(&PI);
+
+ if (ShouldFree) Impl->ToFree.push_back(&PI);
}
void PassRegistry::unregisterPass(const PassInfo &PI) {
- sys::SmartScopedLock<true> Guard(Lock);
- MapType::iterator I = PassInfoMap.find(PI.getTypeInfo());
- assert(I != PassInfoMap.end() && "Pass registered but not in map!");
+ sys::SmartScopedLock<true> Guard(*Lock);
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+ PassRegistryImpl::MapType::iterator I =
+ Impl->PassInfoMap.find(PI.getTypeInfo());
+ assert(I != Impl->PassInfoMap.end() && "Pass registered but not in map!");
// Remove pass from the map.
- PassInfoMap.erase(I);
- PassInfoStringMap.erase(PI.getPassArgument());
+ Impl->PassInfoMap.erase(I);
+ Impl->PassInfoStringMap.erase(PI.getPassArgument());
}
void PassRegistry::enumerateWith(PassRegistrationListener *L) {
- sys::SmartScopedLock<true> Guard(Lock);
- for (MapType::const_iterator I = PassInfoMap.begin(),
- E = PassInfoMap.end(); I != E; ++I)
+ sys::SmartScopedLock<true> Guard(*Lock);
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+ for (PassRegistryImpl::MapType::const_iterator I = Impl->PassInfoMap.begin(),
+ E = Impl->PassInfoMap.end(); I != E; ++I)
L->passEnumerate(I->second);
}
@@ -111,7 +143,8 @@ void PassRegistry::enumerateWith(PassRegistrationListener *L) {
void PassRegistry::registerAnalysisGroup(const void *InterfaceID,
const void *PassID,
PassInfo& Registeree,
- bool isDefault) {
+ bool isDefault,
+ bool ShouldFree) {
PassInfo *InterfaceInfo = const_cast<PassInfo*>(getPassInfo(InterfaceID));
if (InterfaceInfo == 0) {
// First reference to Interface, register it now.
@@ -126,12 +159,15 @@ void PassRegistry::registerAnalysisGroup(const void *InterfaceID,
assert(ImplementationInfo &&
"Must register pass before adding to AnalysisGroup!");
+ sys::SmartScopedLock<true> Guard(*Lock);
+
// Make sure we keep track of the fact that the implementation implements
// the interface.
ImplementationInfo->addInterfaceImplemented(InterfaceInfo);
- sys::SmartScopedLock<true> Guard(Lock);
- AnalysisGroupInfo &AGI = AnalysisGroupInfoMap[InterfaceInfo];
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+ PassRegistryImpl::AnalysisGroupInfo &AGI =
+ Impl->AnalysisGroupInfoMap[InterfaceInfo];
assert(AGI.Implementations.count(ImplementationInfo) == 0 &&
"Cannot add a pass to the same analysis group more than once!");
AGI.Implementations.insert(ImplementationInfo);
@@ -143,17 +179,30 @@ void PassRegistry::registerAnalysisGroup(const void *InterfaceID,
InterfaceInfo->setNormalCtor(ImplementationInfo->getNormalCtor());
}
}
+
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+ if (ShouldFree) Impl->ToFree.push_back(&Registeree);
}
void PassRegistry::addRegistrationListener(PassRegistrationListener *L) {
- sys::SmartScopedLock<true> Guard(Lock);
- Listeners.push_back(L);
+ sys::SmartScopedLock<true> Guard(*Lock);
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+ Impl->Listeners.push_back(L);
}
void PassRegistry::removeRegistrationListener(PassRegistrationListener *L) {
- sys::SmartScopedLock<true> Guard(Lock);
+ sys::SmartScopedLock<true> Guard(*Lock);
+
+ // NOTE: This is necessary, because removeRegistrationListener() can be called
+ // as part of the llvm_shutdown sequence. Since we have no control over the
+ // order of that sequence, we need to gracefully handle the case where the
+ // PassRegistry is destructed before the object that triggers this call.
+ if (!pImpl) return;
+
+ PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
std::vector<PassRegistrationListener*>::iterator I =
- std::find(Listeners.begin(), Listeners.end(), L);
- assert(I != Listeners.end() && "PassRegistrationListener not registered!");
- Listeners.erase(I);
+ std::find(Impl->Listeners.begin(), Impl->Listeners.end(), L);
+ assert(I != Impl->Listeners.end() &&
+ "PassRegistrationListener not registered!");
+ Impl->Listeners.erase(I);
}
diff --git a/contrib/llvm/lib/VMCore/PrintModulePass.cpp b/contrib/llvm/lib/VMCore/PrintModulePass.cpp
index 2ee49d2..1f1fbc9 100644
--- a/contrib/llvm/lib/VMCore/PrintModulePass.cpp
+++ b/contrib/llvm/lib/VMCore/PrintModulePass.cpp
@@ -78,10 +78,10 @@ namespace {
char PrintModulePass::ID = 0;
INITIALIZE_PASS(PrintModulePass, "print-module",
- "Print module to stderr", false, false);
+ "Print module to stderr", false, false)
char PrintFunctionPass::ID = 0;
INITIALIZE_PASS(PrintFunctionPass, "print-function",
- "Print function to stderr", false, false);
+ "Print function to stderr", false, false)
/// createPrintModulePass - Create and return a pass that writes the
/// module to the specified raw_ostream.
diff --git a/contrib/llvm/lib/VMCore/Type.cpp b/contrib/llvm/lib/VMCore/Type.cpp
index c55e626..be28ad1 100644
--- a/contrib/llvm/lib/VMCore/Type.cpp
+++ b/contrib/llvm/lib/VMCore/Type.cpp
@@ -27,7 +27,7 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Threading.h"
+#include "llvm/Support/Threading.h"
#include <algorithm>
#include <cstdarg>
using namespace llvm;
@@ -109,6 +109,7 @@ const Type *Type::getPrimitiveType(LLVMContext &C, TypeID IDNumber) {
case PPC_FP128TyID : return getPPC_FP128Ty(C);
case LabelTyID : return getLabelTy(C);
case MetadataTyID : return getMetadataTy(C);
+ case X86_MMXTyID : return getX86_MMXTy(C);
default:
return 0;
}
@@ -172,10 +173,20 @@ bool Type::canLosslesslyBitCastTo(const Type *Ty) const {
return false;
// Vector -> Vector conversions are always lossless if the two vector types
- // have the same size, otherwise not.
- if (const VectorType *thisPTy = dyn_cast<VectorType>(this))
+ // have the same size, otherwise not. Also, 64-bit vector types can be
+ // converted to x86mmx.
+ if (const VectorType *thisPTy = dyn_cast<VectorType>(this)) {
if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
return thisPTy->getBitWidth() == thatPTy->getBitWidth();
+ if (Ty->getTypeID() == Type::X86_MMXTyID &&
+ thisPTy->getBitWidth() == 64)
+ return true;
+ }
+
+ if (this->getTypeID() == Type::X86_MMXTyID)
+ if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
+ if (thatPTy->getBitWidth() == 64)
+ return true;
// At this point we have only various mismatches of the first class types
// remaining and ptr->ptr. Just select the lossless conversions. Everything
@@ -192,6 +203,7 @@ unsigned Type::getPrimitiveSizeInBits() const {
case Type::X86_FP80TyID: return 80;
case Type::FP128TyID: return 128;
case Type::PPC_FP128TyID: return 128;
+ case Type::X86_MMXTyID: return 64;
case Type::IntegerTyID: return cast<IntegerType>(this)->getBitWidth();
case Type::VectorTyID: return cast<VectorType>(this)->getBitWidth();
default: return 0;
@@ -354,6 +366,10 @@ const Type *Type::getPPC_FP128Ty(LLVMContext &C) {
return &C.pImpl->PPC_FP128Ty;
}
+const Type *Type::getX86_MMXTy(LLVMContext &C) {
+ return &C.pImpl->X86_MMXTy;
+}
+
const IntegerType *Type::getIntNTy(LLVMContext &C, unsigned N) {
return IntegerType::get(C, N);
}
@@ -398,6 +414,10 @@ const PointerType *Type::getPPC_FP128PtrTy(LLVMContext &C, unsigned AS) {
return getPPC_FP128Ty(C)->getPointerTo(AS);
}
+const PointerType *Type::getX86_MMXPtrTy(LLVMContext &C, unsigned AS) {
+ return getX86_MMXTy(C)->getPointerTo(AS);
+}
+
const PointerType *Type::getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS) {
return getIntNTy(C, N)->getPointerTo(AS);
}
@@ -1083,7 +1103,7 @@ void DerivedType::refineAbstractTypeTo(const Type *NewType) {
while (!AbstractTypeUsers.empty() && NewTy != this) {
AbstractTypeUser *User = AbstractTypeUsers.back();
- unsigned OldSize = AbstractTypeUsers.size(); OldSize=OldSize;
+ unsigned OldSize = AbstractTypeUsers.size(); (void)OldSize;
#ifdef DEBUG_MERGE_TYPES
DEBUG(dbgs() << " REFINING user " << OldSize-1 << "[" << (void*)User
<< "] of abstract type [" << (void*)this << " "
@@ -1110,7 +1130,7 @@ void DerivedType::notifyUsesThatTypeBecameConcrete() {
DEBUG(dbgs() << "typeIsREFINED type: " << (void*)this << " " << *this <<"\n");
#endif
- unsigned OldSize = AbstractTypeUsers.size(); OldSize=OldSize;
+ unsigned OldSize = AbstractTypeUsers.size(); (void)OldSize;
while (!AbstractTypeUsers.empty()) {
AbstractTypeUser *ATU = AbstractTypeUsers.back();
ATU->typeBecameConcrete(this);
diff --git a/contrib/llvm/lib/VMCore/TypesContext.h b/contrib/llvm/lib/VMCore/TypesContext.h
index 5a90917..4694486 100644
--- a/contrib/llvm/lib/VMCore/TypesContext.h
+++ b/contrib/llvm/lib/VMCore/TypesContext.h
@@ -317,7 +317,7 @@ public:
// The old record is now out-of-date, because one of the children has been
// updated. Remove the obsolete entry from the map.
unsigned NumErased = Map.erase(ValType::get(Ty));
- assert(NumErased && "Element not found!"); NumErased = NumErased;
+ assert(NumErased && "Element not found!"); (void)NumErased;
// Remember the structural hash for the type before we start hacking on it,
// in case we need it later.
diff --git a/contrib/llvm/lib/VMCore/Use.cpp b/contrib/llvm/lib/VMCore/Use.cpp
index fec710b..2258b8d 100644
--- a/contrib/llvm/lib/VMCore/Use.cpp
+++ b/contrib/llvm/lib/VMCore/Use.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/User.h"
+#include "llvm/Value.h"
namespace llvm {
@@ -85,7 +85,8 @@ const Use *Use::getImpliedUser() const {
// Use initTags Implementation
//===----------------------------------------------------------------------===//
-Use *Use::initTags(Use * const Start, Use *Stop, ptrdiff_t Done) {
+Use *Use::initTags(Use * const Start, Use *Stop) {
+ ptrdiff_t Done = 0;
while (Done < 20) {
if (Start == Stop--)
return Start;
@@ -97,20 +98,18 @@ Use *Use::initTags(Use * const Start, Use *Stop, ptrdiff_t Done) {
oneDigitTag, oneDigitTag, oneDigitTag,
oneDigitTag, stopTag
};
- Stop->Prev.setFromOpaqueValue(reinterpret_cast<Use**>(tags[Done++]));
- Stop->Val = 0;
+ new(Stop) Use(tags[Done++]);
}
ptrdiff_t Count = Done;
while (Start != Stop) {
--Stop;
- Stop->Val = 0;
if (!Count) {
- Stop->Prev.setFromOpaqueValue(reinterpret_cast<Use**>(stopTag));
+ new(Stop) Use(stopTag);
++Done;
Count = Done;
} else {
- Stop->Prev.setFromOpaqueValue(reinterpret_cast<Use**>(Count & 1));
+ new(Stop) Use(PrevPtrTag(Count & 1));
Count >>= 1;
++Done;
}
@@ -124,123 +123,24 @@ Use *Use::initTags(Use * const Start, Use *Stop, ptrdiff_t Done) {
//===----------------------------------------------------------------------===//
void Use::zap(Use *Start, const Use *Stop, bool del) {
- if (del) {
- while (Start != Stop) {
- (--Stop)->~Use();
- }
+ while (Start != Stop)
+ (--Stop)->~Use();
+ if (del)
::operator delete(Start);
- return;
- }
-
- while (Start != Stop) {
- (Start++)->set(0);
- }
}
//===----------------------------------------------------------------------===//
-// AugmentedUse layout struct
-//===----------------------------------------------------------------------===//
-
-struct AugmentedUse : public Use {
- PointerIntPair<User*, 1, Tag> ref;
- AugmentedUse(); // not implemented
-};
-
-
-//===----------------------------------------------------------------------===//
// Use getUser Implementation
//===----------------------------------------------------------------------===//
User *Use::getUser() const {
const Use *End = getImpliedUser();
- const PointerIntPair<User*, 1, Tag>& ref(
- static_cast<const AugmentedUse*>(End - 1)->ref);
+ const PointerIntPair<User*, 1, unsigned>&
+ ref(static_cast<const AugmentedUse*>(End - 1)->ref);
User *She = ref.getPointer();
return ref.getInt()
? She
: (User*)End;
}
-//===----------------------------------------------------------------------===//
-// User allocHungoffUses Implementation
-//===----------------------------------------------------------------------===//
-
-Use *User::allocHungoffUses(unsigned N) const {
- Use *Begin = static_cast<Use*>(::operator new(sizeof(Use) * N
- + sizeof(AugmentedUse)
- - sizeof(Use)));
- Use *End = Begin + N;
- PointerIntPair<User*, 1, Tag>& ref(static_cast<AugmentedUse&>(End[-1]).ref);
- ref.setPointer(const_cast<User*>(this));
- ref.setInt(tagOne);
- return Use::initTags(Begin, End);
-}
-
-//===----------------------------------------------------------------------===//
-// User operator new Implementations
-//===----------------------------------------------------------------------===//
-
-void *User::operator new(size_t s, unsigned Us) {
- void *Storage = ::operator new(s + sizeof(Use) * Us);
- Use *Start = static_cast<Use*>(Storage);
- Use *End = Start + Us;
- User *Obj = reinterpret_cast<User*>(End);
- Obj->OperandList = Start;
- Obj->NumOperands = Us;
- Use::initTags(Start, End);
- return Obj;
-}
-
-/// Prefixed allocation - just before the first Use, allocate a NULL pointer.
-/// The destructor can detect its presence and readjust the OperandList
-/// for deletition.
-///
-void *User::operator new(size_t s, unsigned Us, bool Prefix) {
- // currently prefixed allocation only admissible for
- // unconditional branch instructions
- if (!Prefix)
- return operator new(s, Us);
-
- assert(Us == 1 && "Other than one Use allocated?");
- typedef PointerIntPair<void*, 2, Use::PrevPtrTag> TaggedPrefix;
- void *Raw = ::operator new(s + sizeof(TaggedPrefix) + sizeof(Use) * Us);
- TaggedPrefix *Pre = static_cast<TaggedPrefix*>(Raw);
- Pre->setFromOpaqueValue(0);
- void *Storage = Pre + 1; // skip over prefix
- Use *Start = static_cast<Use*>(Storage);
- Use *End = Start + Us;
- User *Obj = reinterpret_cast<User*>(End);
- Obj->OperandList = Start;
- Obj->NumOperands = Us;
- Use::initTags(Start, End);
- return Obj;
-}
-
-//===----------------------------------------------------------------------===//
-// User operator delete Implementation
-//===----------------------------------------------------------------------===//
-
-void User::operator delete(void *Usr) {
- User *Start = static_cast<User*>(Usr);
- Use *Storage = static_cast<Use*>(Usr) - Start->NumOperands;
- //
- // look for a variadic User
- if (Storage == Start->OperandList) {
- ::operator delete(Storage);
- return;
- }
- //
- // check for the flag whether the destructor has detected a prefixed
- // allocation, in which case we remove the flag and delete starting
- // at OperandList
- if (reinterpret_cast<intptr_t>(Start->OperandList) & 1) {
- ::operator delete(reinterpret_cast<char*>(Start->OperandList) - 1);
- return;
- }
- //
- // in all other cases just delete the nullary User (covers hung-off
- // uses also
- ::operator delete(Usr);
-}
-
} // End llvm namespace
diff --git a/contrib/llvm/lib/VMCore/User.cpp b/contrib/llvm/lib/VMCore/User.cpp
new file mode 100644
index 0000000..2f4587d
--- /dev/null
+++ b/contrib/llvm/lib/VMCore/User.cpp
@@ -0,0 +1,81 @@
+//===-- User.cpp - Implement the User class -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Constant.h"
+#include "llvm/GlobalValue.h"
+#include "llvm/User.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// User Class
+//===----------------------------------------------------------------------===//
+
+// replaceUsesOfWith - Replaces all references to the "From" definition with
+// references to the "To" definition.
+//
+void User::replaceUsesOfWith(Value *From, Value *To) {
+ if (From == To) return; // Duh what?
+
+ assert((!isa<Constant>(this) || isa<GlobalValue>(this)) &&
+ "Cannot call User::replaceUsesOfWith on a constant!");
+
+ for (unsigned i = 0, E = getNumOperands(); i != E; ++i)
+ if (getOperand(i) == From) { // Is This operand is pointing to oldval?
+ // The side effects of this setOperand call include linking to
+ // "To", adding "this" to the uses list of To, and
+ // most importantly, removing "this" from the use list of "From".
+ setOperand(i, To); // Fix it now...
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// User allocHungoffUses Implementation
+//===----------------------------------------------------------------------===//
+
+Use *User::allocHungoffUses(unsigned N) const {
+ Use *Begin = static_cast<Use*>(::operator new(sizeof(Use) * N
+ + sizeof(AugmentedUse)
+ - sizeof(Use)));
+ Use *End = Begin + N;
+ PointerIntPair<User*, 1, unsigned>&
+ ref(static_cast<AugmentedUse&>(End[-1]).ref);
+ ref.setPointer(const_cast<User*>(this));
+ ref.setInt(1);
+ return Use::initTags(Begin, End);
+}
+
+//===----------------------------------------------------------------------===//
+// User operator new Implementations
+//===----------------------------------------------------------------------===//
+
+void *User::operator new(size_t s, unsigned Us) {
+ void *Storage = ::operator new(s + sizeof(Use) * Us);
+ Use *Start = static_cast<Use*>(Storage);
+ Use *End = Start + Us;
+ User *Obj = reinterpret_cast<User*>(End);
+ Obj->OperandList = Start;
+ Obj->NumOperands = Us;
+ Use::initTags(Start, End);
+ return Obj;
+}
+
+//===----------------------------------------------------------------------===//
+// User operator delete Implementation
+//===----------------------------------------------------------------------===//
+
+void User::operator delete(void *Usr) {
+ User *Start = static_cast<User*>(Usr);
+ Use *Storage = static_cast<Use*>(Usr) - Start->NumOperands;
+ // If there were hung-off uses, they will have been freed already and
+ // NumOperands reset to 0, so here we just free the User itself.
+ ::operator delete(Storage);
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/lib/VMCore/Value.cpp b/contrib/llvm/lib/VMCore/Value.cpp
index b8c6775..29f6a80 100644
--- a/contrib/llvm/lib/VMCore/Value.cpp
+++ b/contrib/llvm/lib/VMCore/Value.cpp
@@ -22,6 +22,7 @@
#include "llvm/ValueSymbolTable.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/ManagedStatic.h"
@@ -254,7 +255,7 @@ void Value::takeName(Value *V) {
// Get V's ST, this should always succed, because V has a name.
ValueSymbolTable *VST;
bool Failure = getSymTab(V, VST);
- assert(!Failure && "V has a name, so it should have a ST!"); Failure=Failure;
+ assert(!Failure && "V has a name, so it should have a ST!"); (void)Failure;
// If these values are both in the same symtab, we can do this very fast.
// This works even if both values have no symtab yet.
@@ -345,25 +346,62 @@ Value *Value::stripPointerCasts() {
return V;
}
-Value *Value::getUnderlyingObject(unsigned MaxLookup) {
- if (!getType()->isPointerTy())
- return this;
- Value *V = this;
- for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
- V = GEP->getPointerOperand();
- } else if (Operator::getOpcode(V) == Instruction::BitCast) {
- V = cast<Operator>(V)->getOperand(0);
- } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
- if (GA->mayBeOverridden())
- return V;
- V = GA->getAliasee();
- } else {
- return V;
+/// isDereferenceablePointer - Test if this value is always a pointer to
+/// allocated and suitably aligned memory for a simple load or store.
+bool Value::isDereferenceablePointer() const {
+ // Note that it is not safe to speculate into a malloc'd region because
+ // malloc may return null.
+ // It's also not always safe to follow a bitcast, for example:
+ // bitcast i8* (alloca i8) to i32*
+ // would result in a 4-byte load from a 1-byte alloca. Some cases could
+ // be handled using TargetData to check sizes and alignments though.
+
+ // These are obviously ok.
+ if (isa<AllocaInst>(this)) return true;
+
+ // Global variables which can't collapse to null are ok.
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(this))
+ return !GV->hasExternalWeakLinkage();
+
+ // byval arguments are ok.
+ if (const Argument *A = dyn_cast<Argument>(this))
+ return A->hasByValAttr();
+
+ // For GEPs, determine if the indexing lands within the allocated object.
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(this)) {
+ // Conservatively require that the base pointer be fully dereferenceable.
+ if (!GEP->getOperand(0)->isDereferenceablePointer())
+ return false;
+ // Check the indices.
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (User::const_op_iterator I = GEP->op_begin()+1,
+ E = GEP->op_end(); I != E; ++I) {
+ Value *Index = *I;
+ const Type *Ty = *GTI++;
+ // Struct indices can't be out of bounds.
+ if (isa<StructType>(Ty))
+ continue;
+ ConstantInt *CI = dyn_cast<ConstantInt>(Index);
+ if (!CI)
+ return false;
+ // Zero is always ok.
+ if (CI->isZero())
+ continue;
+ // Check to see that it's within the bounds of an array.
+ const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
+ if (!ATy)
+ return false;
+ if (CI->getValue().getActiveBits() > 64)
+ return false;
+ if (CI->getZExtValue() >= ATy->getNumElements())
+ return false;
}
- assert(V->getType()->isPointerTy() && "Unexpected operand type!");
+ // Indices check out; this is dereferenceable.
+ return true;
}
- return V;
+
+ // If we don't know, assume the worst.
+ return false;
}
/// DoPHITranslation - If this value is a PHI node with CurBB as its parent,
@@ -600,26 +638,3 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
/// ~CallbackVH. Empty, but defined here to avoid emitting the vtable
/// more than once.
CallbackVH::~CallbackVH() {}
-
-
-//===----------------------------------------------------------------------===//
-// User Class
-//===----------------------------------------------------------------------===//
-
-// replaceUsesOfWith - Replaces all references to the "From" definition with
-// references to the "To" definition.
-//
-void User::replaceUsesOfWith(Value *From, Value *To) {
- if (From == To) return; // Duh what?
-
- assert((!isa<Constant>(this) || isa<GlobalValue>(this)) &&
- "Cannot call User::replaceUsesOfWith on a constant!");
-
- for (unsigned i = 0, E = getNumOperands(); i != E; ++i)
- if (getOperand(i) == From) { // Is This operand is pointing to oldval?
- // The side effects of this setOperand call include linking to
- // "To", adding "this" to the uses list of To, and
- // most importantly, removing "this" from the use list of "From".
- setOperand(i, To); // Fix it now...
- }
-}
diff --git a/contrib/llvm/lib/VMCore/ValueTypes.cpp b/contrib/llvm/lib/VMCore/ValueTypes.cpp
index d2a8ce3..c054ae4 100644
--- a/contrib/llvm/lib/VMCore/ValueTypes.cpp
+++ b/contrib/llvm/lib/VMCore/ValueTypes.cpp
@@ -109,7 +109,8 @@ std::string EVT::getEVTString() const {
case MVT::ppcf128: return "ppcf128";
case MVT::isVoid: return "isVoid";
case MVT::Other: return "ch";
- case MVT::Flag: return "flag";
+ case MVT::Glue: return "glue";
+ case MVT::x86mmx: return "x86mmx";
case MVT::v2i8: return "v2i8";
case MVT::v4i8: return "v4i8";
case MVT::v8i8: return "v8i8";
@@ -155,6 +156,7 @@ const Type *EVT::getTypeForEVT(LLVMContext &Context) const {
case MVT::f80: return Type::getX86_FP80Ty(Context);
case MVT::f128: return Type::getFP128Ty(Context);
case MVT::ppcf128: return Type::getPPC_FP128Ty(Context);
+ case MVT::x86mmx: return Type::getX86_MMXTy(Context);
case MVT::v2i8: return VectorType::get(Type::getInt8Ty(Context), 2);
case MVT::v4i8: return VectorType::get(Type::getInt8Ty(Context), 4);
case MVT::v8i8: return VectorType::get(Type::getInt8Ty(Context), 8);
@@ -196,6 +198,7 @@ EVT EVT::getEVT(const Type *Ty, bool HandleUnknown){
case Type::FloatTyID: return MVT(MVT::f32);
case Type::DoubleTyID: return MVT(MVT::f64);
case Type::X86_FP80TyID: return MVT(MVT::f80);
+ case Type::X86_MMXTyID: return MVT(MVT::x86mmx);
case Type::FP128TyID: return MVT(MVT::f128);
case Type::PPC_FP128TyID: return MVT(MVT::ppcf128);
case Type::PointerTyID: return MVT(MVT::iPTR);
diff --git a/contrib/llvm/lib/VMCore/Verifier.cpp b/contrib/llvm/lib/VMCore/Verifier.cpp
index e3ecc97..58ec6fe 100644
--- a/contrib/llvm/lib/VMCore/Verifier.cpp
+++ b/contrib/llvm/lib/VMCore/Verifier.cpp
@@ -72,7 +72,9 @@ namespace { // Anonymous namespace for class
struct PreVerifier : public FunctionPass {
static char ID; // Pass ID, replacement for typeid
- PreVerifier() : FunctionPass(ID) { }
+ PreVerifier() : FunctionPass(ID) {
+ initializePreVerifierPass(*PassRegistry::getPassRegistry());
+ }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -103,8 +105,8 @@ namespace { // Anonymous namespace for class
char PreVerifier::ID = 0;
INITIALIZE_PASS(PreVerifier, "preverify", "Preliminary module verification",
- false, false);
-char &PreVerifyID = PreVerifier::ID;
+ false, false)
+static char &PreVerifyID = PreVerifier::ID;
namespace {
class TypeSet : public AbstractTypeUser {
@@ -184,11 +186,15 @@ namespace {
Verifier()
: FunctionPass(ID),
Broken(false), RealPass(true), action(AbortProcessAction),
- Mod(0), Context(0), DT(0), MessagesStr(Messages) {}
+ Mod(0), Context(0), DT(0), MessagesStr(Messages) {
+ initializeVerifierPass(*PassRegistry::getPassRegistry());
+ }
explicit Verifier(VerifierFailureAction ctn)
: FunctionPass(ID),
Broken(false), RealPass(true), action(ctn), Mod(0), Context(0), DT(0),
- MessagesStr(Messages) {}
+ MessagesStr(Messages) {
+ initializeVerifierPass(*PassRegistry::getPassRegistry());
+ }
bool doInitialization(Module &M) {
Mod = &M;
@@ -393,7 +399,10 @@ namespace {
} // End anonymous namespace
char Verifier::ID = 0;
-INITIALIZE_PASS(Verifier, "verify", "Module Verifier", false, false);
+INITIALIZE_PASS_BEGIN(Verifier, "verify", "Module Verifier", false, false)
+INITIALIZE_PASS_DEPENDENCY(PreVerifier)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(Verifier, "verify", "Module Verifier", false, false)
// Assert - We know that cond should be true, if not print an error message.
#define Assert(C, M) \
@@ -475,6 +484,7 @@ void Verifier::visitGlobalAlias(GlobalAlias &GA) {
"Aliasee cannot be NULL!", &GA);
Assert1(GA.getType() == GA.getAliasee()->getType(),
"Alias and aliasee types should match!", &GA);
+ Assert1(!GA.hasUnnamedAddr(), "Alias cannot have unnamed_addr!", &GA);
if (!isa<GlobalValue>(GA.getAliasee())) {
const ConstantExpr *CE = dyn_cast<ConstantExpr>(GA.getAliasee());
@@ -685,6 +695,8 @@ void Verifier::visitFunction(Function &F) {
case CallingConv::Cold:
case CallingConv::X86_FastCall:
case CallingConv::X86_ThisCall:
+ case CallingConv::PTX_Kernel:
+ case CallingConv::PTX_Device:
Assert1(!F.isVarArg(),
"Varargs functions must have C calling conventions!", &F);
break;
@@ -1643,10 +1655,14 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
if (ID == Intrinsic::gcroot) {
AllocaInst *AI =
dyn_cast<AllocaInst>(CI.getArgOperand(0)->stripPointerCasts());
- Assert1(AI && AI->getType()->getElementType()->isPointerTy(),
- "llvm.gcroot parameter #1 must be a pointer alloca.", &CI);
+ Assert1(AI, "llvm.gcroot parameter #1 must be an alloca.", &CI);
Assert1(isa<Constant>(CI.getArgOperand(1)),
"llvm.gcroot parameter #2 must be a constant.", &CI);
+ if (!AI->getType()->getElementType()->isPointerTy()) {
+ Assert1(!isa<ConstantPointerNull>(CI.getArgOperand(1)),
+ "llvm.gcroot parameter #1 must either be a pointer alloca, "
+ "or argument #2 must be a non-null constant.", &CI);
+ }
}
Assert1(CI.getParent()->getParent()->hasGC(),
diff --git a/contrib/llvm/tools/clang/include/clang-c/Index.h b/contrib/llvm/tools/clang/include/clang-c/Index.h
index 4631c65..b3b49c7 100644
--- a/contrib/llvm/tools/clang/include/clang-c/Index.h
+++ b/contrib/llvm/tools/clang/include/clang-c/Index.h
@@ -63,7 +63,7 @@ typedef void *CXIndex;
/**
* \brief A single translation unit, which resides in an index.
*/
-typedef void *CXTranslationUnit; /* A translation unit instance. */
+typedef struct CXTranslationUnitImpl *CXTranslationUnit;
/**
* \brief Opaque pointer representing client data that will be passed through
@@ -133,10 +133,8 @@ enum CXAvailabilityKind {
* with the string data, call \c clang_disposeString() to free the string.
*/
typedef struct {
- const char *Spelling;
- /* A 1 value indicates the clang_ indexing API needed to allocate the string
- (and it must be freed by clang_disposeString()). */
- int MustFreeString;
+ void *data;
+ unsigned private_flags;
} CXString;
/**
@@ -202,18 +200,6 @@ CINDEX_LINKAGE CXIndex clang_createIndex(int excludeDeclarationsFromPCH,
CINDEX_LINKAGE void clang_disposeIndex(CXIndex index);
/**
- * \brief Request that AST's be generated externally for API calls which parse
- * source code on the fly, e.g. \see createTranslationUnitFromSourceFile.
- *
- * Note: This is for debugging purposes only, and may be removed at a later
- * date.
- *
- * \param index - The index to update.
- * \param value - The new flag value.
- */
-CINDEX_LINKAGE void clang_setUseExternalASTGeneration(CXIndex index,
- int value);
-/**
* \defgroup CINDEX_FILES File manipulation routines
*
* @{
@@ -269,8 +255,8 @@ CINDEX_LINKAGE CXFile clang_getFile(CXTranslationUnit tu,
* \brief Identifies a specific source location within a translation
* unit.
*
- * Use clang_getInstantiationLocation() to map a source location to a
- * particular file, line, and column.
+ * Use clang_getInstantiationLocation() or clang_getSpellingLocation()
+ * to map a source location to a particular file, line, and column.
*/
typedef struct {
void *ptr_data[2];
@@ -313,6 +299,13 @@ CINDEX_LINKAGE CXSourceLocation clang_getLocation(CXTranslationUnit tu,
CXFile file,
unsigned line,
unsigned column);
+/**
+ * \brief Retrieves the source location associated with a given character offset
+ * in a particular translation unit.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getLocationForOffset(CXTranslationUnit tu,
+ CXFile file,
+ unsigned offset);
/**
* \brief Retrieve a NULL (invalid) source range.
@@ -330,6 +323,9 @@ CINDEX_LINKAGE CXSourceRange clang_getRange(CXSourceLocation begin,
* \brief Retrieve the file, line, column, and offset represented by
* the given source location.
*
+ * If the location refers into a macro instantiation, retrieves the
+ * location of the macro instantiation.
+ *
* \param location the location within a source file that will be decomposed
* into its parts.
*
@@ -352,6 +348,34 @@ CINDEX_LINKAGE void clang_getInstantiationLocation(CXSourceLocation location,
unsigned *offset);
/**
+ * \brief Retrieve the file, line, column, and offset represented by
+ * the given source location.
+ *
+ * If the location refers into a macro instantiation, return where the
+ * location was originally spelled in the source file.
+ *
+ * \param location the location within a source file that will be decomposed
+ * into its parts.
+ *
+ * \param file [out] if non-NULL, will be set to the file to which the given
+ * source location points.
+ *
+ * \param line [out] if non-NULL, will be set to the line to which the given
+ * source location points.
+ *
+ * \param column [out] if non-NULL, will be set to the column to which the given
+ * source location points.
+ *
+ * \param offset [out] if non-NULL, will be set to the offset into the
+ * buffer to which the given source location points.
+ */
+CINDEX_LINKAGE void clang_getSpellingLocation(CXSourceLocation location,
+ CXFile *file,
+ unsigned *line,
+ unsigned *column,
+ unsigned *offset);
+
+/**
* \brief Retrieve a source location representing the first character within a
* source range.
*/
@@ -475,7 +499,34 @@ enum CXDiagnosticDisplayOptions {
* This option corresponds to the clang flag
* \c -fdiagnostics-print-source-range-info.
*/
- CXDiagnostic_DisplaySourceRanges = 0x04
+ CXDiagnostic_DisplaySourceRanges = 0x04,
+
+ /**
+ * \brief Display the option name associated with this diagnostic, if any.
+ *
+ * The option name displayed (e.g., -Wconversion) will be placed in brackets
+ * after the diagnostic text. This option corresponds to the clang flag
+ * \c -fdiagnostics-show-option.
+ */
+ CXDiagnostic_DisplayOption = 0x08,
+
+ /**
+ * \brief Display the category number associated with this diagnostic, if any.
+ *
+ * The category number is displayed within brackets after the diagnostic text.
+ * This option corresponds to the clang flag
+ * \c -fdiagnostics-show-category=id.
+ */
+ CXDiagnostic_DisplayCategoryId = 0x10,
+
+ /**
+ * \brief Display the category name associated with this diagnostic, if any.
+ *
+ * The category name is displayed within brackets after the diagnostic text.
+ * This option corresponds to the clang flag
+ * \c -fdiagnostics-show-category=name.
+ */
+ CXDiagnostic_DisplayCategoryName = 0x20
};
/**
@@ -506,10 +557,6 @@ CINDEX_LINKAGE CXString clang_formatDiagnostic(CXDiagnostic Diagnostic,
CINDEX_LINKAGE unsigned clang_defaultDiagnosticDisplayOptions(void);
/**
- * \brief Print a diagnostic to the given file.
- */
-
-/**
* \brief Determine the severity of the given diagnostic.
*/
CINDEX_LINKAGE enum CXDiagnosticSeverity
@@ -529,6 +576,43 @@ CINDEX_LINKAGE CXSourceLocation clang_getDiagnosticLocation(CXDiagnostic);
CINDEX_LINKAGE CXString clang_getDiagnosticSpelling(CXDiagnostic);
/**
+ * \brief Retrieve the name of the command-line option that enabled this
+ * diagnostic.
+ *
+ * \param Diag The diagnostic to be queried.
+ *
+ * \param Disable If non-NULL, will be set to the option that disables this
+ * diagnostic (if any).
+ *
+ * \returns A string that contains the command-line option used to enable this
+ * warning, such as "-Wconversion" or "-pedantic".
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticOption(CXDiagnostic Diag,
+ CXString *Disable);
+
+/**
+ * \brief Retrieve the category number for this diagnostic.
+ *
+ * Diagnostics can be categorized into groups along with other, related
+ * diagnostics (e.g., diagnostics under the same warning flag). This routine
+ * retrieves the category number for the given diagnostic.
+ *
+ * \returns The number of the category that contains this diagnostic, or zero
+ * if this diagnostic is uncategorized.
+ */
+CINDEX_LINKAGE unsigned clang_getDiagnosticCategory(CXDiagnostic);
+
+/**
+ * \brief Retrieve the name of a particular diagnostic category.
+ *
+ * \param Category A diagnostic category number, as returned by
+ * \c clang_getDiagnosticCategory().
+ *
+ * \returns The name of the given diagnostic category.
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticCategoryName(unsigned Category);
+
+/**
* \brief Determine the number of source ranges associated with the given
* diagnostic.
*/
@@ -621,9 +705,20 @@ clang_getTranslationUnitSpelling(CXTranslationUnit CTUnit);
* '-fsyntax-only'
* '-o <output file>' (both '-o' and '<output file>' are ignored)
*
+ * \param CIdx The index object with which the translation unit will be
+ * associated.
*
* \param source_filename - The name of the source file to load, or NULL if the
- * source file is included in clang_command_line_args.
+ * source file is included in \p clang_command_line_args.
+ *
+ * \param num_clang_command_line_args The number of command-line arguments in
+ * \p clang_command_line_args.
+ *
+ * \param clang_command_line_args The command-line arguments that would be
+ * passed to the \c clang executable if it were being invoked out-of-process.
+ * These command-line options will be parsed and will affect how the translation
+ * unit is parsed. Note that the following options are ignored: '-c',
+ * '-emit-ast', '-fsyntex-only' (which is the default), and '-o <output file>'.
*
* \param num_unsaved_files the number of unsaved file entries in \p
* unsaved_files.
@@ -633,13 +728,6 @@ clang_getTranslationUnitSpelling(CXTranslationUnit CTUnit);
* those files. The contents and name of these files (as specified by
* CXUnsavedFile) are copied when necessary, so the client only needs to
* guarantee their validity until the call to this function returns.
- *
- * \param diag_callback callback function that will receive any diagnostics
- * emitted while processing this source file. If NULL, diagnostics will be
- * suppressed.
- *
- * \param diag_client_data client data that will be passed to the diagnostic
- * callback function.
*/
CINDEX_LINKAGE CXTranslationUnit clang_createTranslationUnitFromSourceFile(
CXIndex CIdx,
@@ -718,7 +806,22 @@ enum CXTranslationUnit_Flags {
* introduces some overhead to reparsing but improves the performance of
* code-completion operations.
*/
- CXTranslationUnit_CacheCompletionResults = 0x08
+ CXTranslationUnit_CacheCompletionResults = 0x08,
+ /**
+ * \brief Enable precompiled preambles in C++.
+ *
+ * Note: this is a *temporary* option that is available only while
+ * we are testing C++ precompiled preamble support.
+ */
+ CXTranslationUnit_CXXPrecompiledPreamble = 0x10,
+
+ /**
+ * \brief Enabled chained precompiled preambles in C++.
+ *
+ * Note: this is a *temporary* option that is available only while
+ * we are testing C++ precompiled preamble support.
+ */
+ CXTranslationUnit_CXXChainedPCH = 0x20
};
/**
@@ -749,7 +852,7 @@ CINDEX_LINKAGE unsigned clang_defaultEditingTranslationUnitOptions(void);
* associated.
*
* \param source_filename The name of the source file to load, or NULL if the
- * source file is included in \p clang_command_line_args.
+ * source file is included in \p command_line_args.
*
* \param command_line_args The command-line arguments that would be
* passed to the \c clang executable if it were being invoked out-of-process.
@@ -1000,7 +1103,6 @@ enum CXCursorKind {
CXCursor_UsingDirective = 34,
/** \brief A using declaration. */
CXCursor_UsingDeclaration = 35,
-
CXCursor_FirstDecl = CXCursor_UnexposedDecl,
CXCursor_LastDecl = CXCursor_UsingDeclaration,
@@ -1027,15 +1129,75 @@ enum CXCursorKind {
CXCursor_TypeRef = 43,
CXCursor_CXXBaseSpecifier = 44,
/**
- * \brief A reference to a class template, function template, or template
- * template parameter.
+ * \brief A reference to a class template, function template, template
+ * template parameter, or class template partial specialization.
*/
CXCursor_TemplateRef = 45,
/**
* \brief A reference to a namespace or namespace alias.
*/
CXCursor_NamespaceRef = 46,
- CXCursor_LastRef = CXCursor_NamespaceRef,
+ /**
+ * \brief A reference to a member of a struct, union, or class that occurs in
+ * some non-expression context, e.g., a designated initializer.
+ */
+ CXCursor_MemberRef = 47,
+ /**
+ * \brief A reference to a labeled statement.
+ *
+ * This cursor kind is used to describe the jump to "start_over" in the
+ * goto statement in the following example:
+ *
+ * \code
+ * start_over:
+ * ++counter;
+ *
+ * goto start_over;
+ * \endcode
+ *
+ * A label reference cursor refers to a label statement.
+ */
+ CXCursor_LabelRef = 48,
+
+ /**
+ * \brief A reference to a set of overloaded functions or function templates
+ * that has not yet been resolved to a specific function or function template.
+ *
+ * An overloaded declaration reference cursor occurs in C++ templates where
+ * a dependent name refers to a function. For example:
+ *
+ * \code
+ * template<typename T> void swap(T&, T&);
+ *
+ * struct X { ... };
+ * void swap(X&, X&);
+ *
+ * template<typename T>
+ * void reverse(T* first, T* last) {
+ * while (first < last - 1) {
+ * swap(*first, *--last);
+ * ++first;
+ * }
+ * }
+ *
+ * struct Y { };
+ * void swap(Y&, Y&);
+ * \endcode
+ *
+ * Here, the identifier "swap" is associated with an overloaded declaration
+ * reference. In the template definition, "swap" refers to either of the two
+ * "swap" functions declared above, so both results will be available. At
+ * instantiation time, "swap" may also refer to other functions found via
+ * argument-dependent lookup (e.g., the "swap" function at the end of the
+ * example).
+ *
+ * The functions \c clang_getNumOverloadedDecls() and
+ * \c clang_getOverloadedDecl() can be used to retrieve the definitions
+ * referenced by this cursor.
+ */
+ CXCursor_OverloadedDeclRef = 49,
+
+ CXCursor_LastRef = CXCursor_OverloadedDeclRef,
/* Error conditions */
CXCursor_FirstInvalid = 70,
@@ -1095,7 +1257,21 @@ enum CXCursorKind {
* reported.
*/
CXCursor_UnexposedStmt = 200,
- CXCursor_LastStmt = 200,
+
+ /** \brief A labelled statement in a function.
+ *
+ * This cursor kind is used to describe the "start_over:" label statement in
+ * the following example:
+ *
+ * \code
+ * start_over:
+ * ++counter;
+ * \endcode
+ *
+ */
+ CXCursor_LabelStmt = 201,
+
+ CXCursor_LastStmt = CXCursor_LabelStmt,
/**
* \brief Cursor that represents the translation unit itself.
@@ -1122,8 +1298,9 @@ enum CXCursorKind {
CXCursor_PreprocessingDirective = 500,
CXCursor_MacroDefinition = 501,
CXCursor_MacroInstantiation = 502,
+ CXCursor_InclusionDirective = 503,
CXCursor_FirstPreprocessing = CXCursor_PreprocessingDirective,
- CXCursor_LastPreprocessing = CXCursor_MacroInstantiation
+ CXCursor_LastPreprocessing = CXCursor_InclusionDirective
};
/**
@@ -1174,6 +1351,11 @@ CINDEX_LINKAGE CXCursor clang_getTranslationUnitCursor(CXTranslationUnit);
CINDEX_LINKAGE unsigned clang_equalCursors(CXCursor, CXCursor);
/**
+ * \brief Compute a hash value for the given cursor.
+ */
+CINDEX_LINKAGE unsigned clang_hashCursor(CXCursor);
+
+/**
* \brief Retrieve the kind of the given cursor.
*/
CINDEX_LINKAGE enum CXCursorKind clang_getCursorKind(CXCursor);
@@ -1278,6 +1460,167 @@ CINDEX_LINKAGE enum CXLanguageKind {
*/
CINDEX_LINKAGE enum CXLanguageKind clang_getCursorLanguage(CXCursor cursor);
+
+/**
+ * \brief A fast container representing a set of CXCursors.
+ */
+typedef struct CXCursorSetImpl *CXCursorSet;
+
+/**
+ * \brief Creates an empty CXCursorSet.
+ */
+CINDEX_LINKAGE CXCursorSet clang_createCXCursorSet();
+
+/**
+ * \brief Disposes a CXCursorSet and releases its associated memory.
+ */
+CINDEX_LINKAGE void clang_disposeCXCursorSet(CXCursorSet cset);
+
+/**
+ * \brief Queries a CXCursorSet to see if it contains a specific CXCursor.
+ *
+ * \returns non-zero if the set contains the specified cursor.
+*/
+CINDEX_LINKAGE unsigned clang_CXCursorSet_contains(CXCursorSet cset,
+ CXCursor cursor);
+
+/**
+ * \brief Inserts a CXCursor into a CXCursorSet.
+ *
+ * \returns zero if the CXCursor was already in the set, and non-zero otherwise.
+*/
+CINDEX_LINKAGE unsigned clang_CXCursorSet_insert(CXCursorSet cset,
+ CXCursor cursor);
+
+/**
+ * \brief Determine the semantic parent of the given cursor.
+ *
+ * The semantic parent of a cursor is the cursor that semantically contains
+ * the given \p cursor. For many declarations, the lexical and semantic parents
+ * are equivalent (the lexical parent is returned by
+ * \c clang_getCursorLexicalParent()). They diverge when declarations or
+ * definitions are provided out-of-line. For example:
+ *
+ * \code
+ * class C {
+ * void f();
+ * };
+ *
+ * void C::f() { }
+ * \endcode
+ *
+ * In the out-of-line definition of \c C::f, the semantic parent is the
+ * the class \c C, of which this function is a member. The lexical parent is
+ * the place where the declaration actually occurs in the source code; in this
+ * case, the definition occurs in the translation unit. In general, the
+ * lexical parent for a given entity can change without affecting the semantics
+ * of the program, and the lexical parent of different declarations of the
+ * same entity may be different. Changing the semantic parent of a declaration,
+ * on the other hand, can have a major impact on semantics, and redeclarations
+ * of a particular entity should all have the same semantic context.
+ *
+ * In the example above, both declarations of \c C::f have \c C as their
+ * semantic context, while the lexical context of the first \c C::f is \c C
+ * and the lexical context of the second \c C::f is the translation unit.
+ *
+ * For global declarations, the semantic parent is the translation unit.
+ */
+CINDEX_LINKAGE CXCursor clang_getCursorSemanticParent(CXCursor cursor);
+
+/**
+ * \brief Determine the lexical parent of the given cursor.
+ *
+ * The lexical parent of a cursor is the cursor in which the given \p cursor
+ * was actually written. For many declarations, the lexical and semantic parents
+ * are equivalent (the semantic parent is returned by
+ * \c clang_getCursorSemanticParent()). They diverge when declarations or
+ * definitions are provided out-of-line. For example:
+ *
+ * \code
+ * class C {
+ * void f();
+ * };
+ *
+ * void C::f() { }
+ * \endcode
+ *
+ * In the out-of-line definition of \c C::f, the semantic parent is the
+ * the class \c C, of which this function is a member. The lexical parent is
+ * the place where the declaration actually occurs in the source code; in this
+ * case, the definition occurs in the translation unit. In general, the
+ * lexical parent for a given entity can change without affecting the semantics
+ * of the program, and the lexical parent of different declarations of the
+ * same entity may be different. Changing the semantic parent of a declaration,
+ * on the other hand, can have a major impact on semantics, and redeclarations
+ * of a particular entity should all have the same semantic context.
+ *
+ * In the example above, both declarations of \c C::f have \c C as their
+ * semantic context, while the lexical context of the first \c C::f is \c C
+ * and the lexical context of the second \c C::f is the translation unit.
+ *
+ * For declarations written in the global scope, the lexical parent is
+ * the translation unit.
+ */
+CINDEX_LINKAGE CXCursor clang_getCursorLexicalParent(CXCursor cursor);
+
+/**
+ * \brief Determine the set of methods that are overridden by the given
+ * method.
+ *
+ * In both Objective-C and C++, a method (aka virtual member function,
+ * in C++) can override a virtual method in a base class. For
+ * Objective-C, a method is said to override any method in the class's
+ * interface (if we're coming from an implementation), its protocols,
+ * or its categories, that has the same selector and is of the same
+ * kind (class or instance). If no such method exists, the search
+ * continues to the class's superclass, its protocols, and its
+ * categories, and so on.
+ *
+ * For C++, a virtual member function overrides any virtual member
+ * function with the same signature that occurs in its base
+ * classes. With multiple inheritance, a virtual member function can
+ * override several virtual member functions coming from different
+ * base classes.
+ *
+ * In all cases, this function determines the immediate overridden
+ * method, rather than all of the overridden methods. For example, if
+ * a method is originally declared in a class A, then overridden in B
+ * (which in inherits from A) and also in C (which inherited from B),
+ * then the only overridden method returned from this function when
+ * invoked on C's method will be B's method. The client may then
+ * invoke this function again, given the previously-found overridden
+ * methods, to map out the complete method-override set.
+ *
+ * \param cursor A cursor representing an Objective-C or C++
+ * method. This routine will compute the set of methods that this
+ * method overrides.
+ *
+ * \param overridden A pointer whose pointee will be replaced with a
+ * pointer to an array of cursors, representing the set of overridden
+ * methods. If there are no overridden methods, the pointee will be
+ * set to NULL. The pointee must be freed via a call to
+ * \c clang_disposeOverriddenCursors().
+ *
+ * \param num_overridden A pointer to the number of overridden
+ * functions, will be set to the number of overridden functions in the
+ * array pointed to by \p overridden.
+ */
+CINDEX_LINKAGE void clang_getOverriddenCursors(CXCursor cursor,
+ CXCursor **overridden,
+ unsigned *num_overridden);
+
+/**
+ * \brief Free the set of overridden cursors returned by \c
+ * clang_getOverriddenCursors().
+ */
+CINDEX_LINKAGE void clang_disposeOverriddenCursors(CXCursor *overridden);
+
+/**
+ * \brief Retrieve the file that is included by the given inclusion directive
+ * cursor.
+ */
+CINDEX_LINKAGE CXFile clang_getIncludedFile(CXCursor cursor);
+
/**
* @}
*/
@@ -1439,6 +1782,24 @@ CINDEX_LINKAGE unsigned clang_equalTypes(CXType A, CXType B);
CINDEX_LINKAGE CXType clang_getCanonicalType(CXType T);
/**
+ * \determine Determine whether a CXType has the "const" qualifier set,
+ * without looking through typedefs that may have added "const" at a different level.
+ */
+CINDEX_LINKAGE unsigned clang_isConstQualifiedType(CXType T);
+
+/**
+ * \determine Determine whether a CXType has the "volatile" qualifier set,
+ * without looking through typedefs that may have added "volatile" at a different level.
+ */
+CINDEX_LINKAGE unsigned clang_isVolatileQualifiedType(CXType T);
+
+/**
+ * \determine Determine whether a CXType has the "restrict" qualifier set,
+ * without looking through typedefs that may have added "restrict" at a different level.
+ */
+CINDEX_LINKAGE unsigned clang_isRestrictQualifiedType(CXType T);
+
+/**
* \brief For pointer types, returns the type of the pointee.
*
*/
@@ -1449,6 +1810,10 @@ CINDEX_LINKAGE CXType clang_getPointeeType(CXType T);
*/
CINDEX_LINKAGE CXCursor clang_getTypeDeclaration(CXType T);
+/**
+ * Returns the Objective-C type encoding for the specified declaration.
+ */
+CINDEX_LINKAGE CXString clang_getDeclObjCTypeEncoding(CXCursor C);
/**
* \brief Retrieve the spelling of a given CXTypeKind.
@@ -1496,6 +1861,34 @@ enum CX_CXXAccessSpecifier {
CINDEX_LINKAGE enum CX_CXXAccessSpecifier clang_getCXXAccessSpecifier(CXCursor);
/**
+ * \brief Determine the number of overloaded declarations referenced by a
+ * \c CXCursor_OverloadedDeclRef cursor.
+ *
+ * \param cursor The cursor whose overloaded declarations are being queried.
+ *
+ * \returns The number of overloaded declarations referenced by \c cursor. If it
+ * is not a \c CXCursor_OverloadedDeclRef cursor, returns 0.
+ */
+CINDEX_LINKAGE unsigned clang_getNumOverloadedDecls(CXCursor cursor);
+
+/**
+ * \brief Retrieve a cursor for one of the overloaded declarations referenced
+ * by a \c CXCursor_OverloadedDeclRef cursor.
+ *
+ * \param cursor The cursor whose overloaded declarations are being queried.
+ *
+ * \param index The zero-based index into the set of overloaded declarations in
+ * the cursor.
+ *
+ * \returns A cursor representing the declaration referenced by the given
+ * \c cursor at the specified \c index. If the cursor does not have an
+ * associated set of overloaded declarations, or if the index is out of bounds,
+ * returns \c clang_getNullCursor();
+ */
+CINDEX_LINKAGE CXCursor clang_getOverloadedDecl(CXCursor cursor,
+ unsigned index);
+
+/**
* @}
*/
@@ -1591,6 +1984,29 @@ typedef enum CXChildVisitResult (*CXCursorVisitor)(CXCursor cursor,
CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
CXCursorVisitor visitor,
CXClientData client_data);
+#ifdef __has_feature
+# if __has_feature(blocks)
+/**
+ * \brief Visitor invoked for each cursor found by a traversal.
+ *
+ * This visitor block will be invoked for each cursor found by
+ * clang_visitChildrenWithBlock(). Its first argument is the cursor being
+ * visited, its second argument is the parent visitor for that cursor.
+ *
+ * The visitor should return one of the \c CXChildVisitResult values
+ * to direct clang_visitChildrenWithBlock().
+ */
+typedef enum CXChildVisitResult
+ (^CXCursorVisitorBlock)(CXCursor cursor, CXCursor parent);
+
+/**
+ * Visits the children of a cursor using the specified block. Behaves
+ * identically to clang_visitChildren() in all other respects.
+ */
+unsigned clang_visitChildrenWithBlock(CXCursor parent,
+ CXCursorVisitorBlock block);
+# endif
+#endif
/**
* @}
@@ -1664,6 +2080,15 @@ CINDEX_LINKAGE CXString clang_constructUSR_ObjCProperty(const char *property,
*/
CINDEX_LINKAGE CXString clang_getCursorSpelling(CXCursor);
+/**
+ * \brief Retrieve the display name for the entity referenced by this cursor.
+ *
+ * The display name contains extra information that helps identify the cursor,
+ * such as the parameters of a function or template or the arguments of a
+ * class template specialization.
+ */
+CINDEX_LINKAGE CXString clang_getCursorDisplayName(CXCursor);
+
/** \brief For a cursor that is a reference, retrieve a cursor representing the
* entity that it references.
*
@@ -1713,6 +2138,32 @@ CINDEX_LINKAGE CXCursor clang_getCursorDefinition(CXCursor);
CINDEX_LINKAGE unsigned clang_isCursorDefinition(CXCursor);
/**
+ * \brief Retrieve the canonical cursor corresponding to the given cursor.
+ *
+ * In the C family of languages, many kinds of entities can be declared several
+ * times within a single translation unit. For example, a structure type can
+ * be forward-declared (possibly multiple times) and later defined:
+ *
+ * \code
+ * struct X;
+ * struct X;
+ * struct X {
+ * int member;
+ * };
+ * \endcode
+ *
+ * The declarations and the definition of \c X are represented by three
+ * different cursors, all of which are declarations of the same underlying
+ * entity. One of these cursor is considered the "canonical" cursor, which
+ * is effectively the representative for the underlying entity. One can
+ * determine if two cursors are declarations of the same underlying entity by
+ * comparing their canonical cursors.
+ *
+ * \returns The canonical cursor for the entity referred to by the given cursor.
+ */
+CINDEX_LINKAGE CXCursor clang_getCanonicalCursor(CXCursor);
+
+/**
* @}
*/
@@ -1939,6 +2390,9 @@ CINDEX_LINKAGE void clang_getDefinitionSpellingAndExtent(CXCursor,
unsigned *endLine,
unsigned *endColumn);
CINDEX_LINKAGE void clang_enableStackTraces(void);
+CINDEX_LINKAGE void clang_executeOnThread(void (*fn)(void*), void *user_data,
+ unsigned stack_size);
+
/**
* @}
*/
@@ -2243,7 +2697,7 @@ clang_getCompletionAvailability(CXCompletionString completion_string);
* \brief Contains the results of code-completion.
*
* This data structure contains the results of code completion, as
- * produced by \c clang_codeComplete. Its contents must be freed by
+ * produced by \c clang_codeCompleteAt(). Its contents must be freed by
* \c clang_disposeCodeCompleteResults.
*/
typedef struct {
@@ -2260,99 +2714,6 @@ typedef struct {
} CXCodeCompleteResults;
/**
- * \brief Perform code completion at a given location in a source file.
- *
- * This function performs code completion at a particular file, line, and
- * column within source code, providing results that suggest potential
- * code snippets based on the context of the completion. The basic model
- * for code completion is that Clang will parse a complete source file,
- * performing syntax checking up to the location where code-completion has
- * been requested. At that point, a special code-completion token is passed
- * to the parser, which recognizes this token and determines, based on the
- * current location in the C/Objective-C/C++ grammar and the state of
- * semantic analysis, what completions to provide. These completions are
- * returned via a new \c CXCodeCompleteResults structure.
- *
- * Code completion itself is meant to be triggered by the client when the
- * user types punctuation characters or whitespace, at which point the
- * code-completion location will coincide with the cursor. For example, if \c p
- * is a pointer, code-completion might be triggered after the "-" and then
- * after the ">" in \c p->. When the code-completion location is afer the ">",
- * the completion results will provide, e.g., the members of the struct that
- * "p" points to. The client is responsible for placing the cursor at the
- * beginning of the token currently being typed, then filtering the results
- * based on the contents of the token. For example, when code-completing for
- * the expression \c p->get, the client should provide the location just after
- * the ">" (e.g., pointing at the "g") to this code-completion hook. Then, the
- * client can filter the results based on the current token text ("get"), only
- * showing those results that start with "get". The intent of this interface
- * is to separate the relatively high-latency acquisition of code-completion
- * results from the filtering of results on a per-character basis, which must
- * have a lower latency.
- *
- * \param CIdx the \c CXIndex instance that will be used to perform code
- * completion.
- *
- * \param source_filename the name of the source file that should be parsed to
- * perform code-completion. This source file must be the same as or include the
- * filename described by \p complete_filename, or no code-completion results
- * will be produced. NOTE: One can also specify NULL for this argument if the
- * source file is included in command_line_args.
- *
- * \param num_command_line_args the number of command-line arguments stored in
- * \p command_line_args.
- *
- * \param command_line_args the command-line arguments to pass to the Clang
- * compiler to build the given source file. This should include all of the
- * necessary include paths, language-dialect switches, precompiled header
- * includes, etc., but should not include any information specific to
- * code completion.
- *
- * \param num_unsaved_files the number of unsaved file entries in \p
- * unsaved_files.
- *
- * \param unsaved_files the files that have not yet been saved to disk
- * but may be required for code completion, including the contents of
- * those files. The contents and name of these files (as specified by
- * CXUnsavedFile) are copied when necessary, so the client only needs to
- * guarantee their validity until the call to this function returns.
- *
- * \param complete_filename the name of the source file where code completion
- * should be performed. In many cases, this name will be the same as the
- * source filename. However, the completion filename may also be a file
- * included by the source file, which is required when producing
- * code-completion results for a header.
- *
- * \param complete_line the line at which code-completion should occur.
- *
- * \param complete_column the column at which code-completion should occur.
- * Note that the column should point just after the syntactic construct that
- * initiated code completion, and not in the middle of a lexical token.
- *
- * \param diag_callback callback function that will receive any diagnostics
- * emitted while processing this source file. If NULL, diagnostics will be
- * suppressed.
- *
- * \param diag_client_data client data that will be passed to the diagnostic
- * callback function.
- *
- * \returns if successful, a new CXCodeCompleteResults structure
- * containing code-completion results, which should eventually be
- * freed with \c clang_disposeCodeCompleteResults(). If code
- * completion fails, returns NULL.
- */
-CINDEX_LINKAGE
-CXCodeCompleteResults *clang_codeComplete(CXIndex CIdx,
- const char *source_filename,
- int num_command_line_args,
- const char * const *command_line_args,
- unsigned num_unsaved_files,
- struct CXUnsavedFile *unsaved_files,
- const char *complete_filename,
- unsigned complete_line,
- unsigned complete_column);
-
-/**
* \brief Flags that can be passed to \c clang_codeCompleteAt() to
* modify its behavior.
*
@@ -2510,12 +2871,6 @@ CXDiagnostic clang_codeCompleteGetDiagnostic(CXCodeCompleteResults *Results,
*/
CINDEX_LINKAGE CXString clang_getClangVersion();
-/**
- * \brief Return a version string, suitable for showing to a user, but not
- * intended to be parsed (the format is not guaranteed to be stable).
- */
-
-
/**
* \brief Visitor invoked for each file in a translation unit
* (used with clang_getInclusions()).
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h b/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
index 84833c0..08ee4ef 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
@@ -19,6 +19,7 @@ namespace clang {
class CXXRecordDecl;
class DeclGroupRef;
class HandleTagDeclDefinition;
+ class ASTMutationListener;
class ASTDeserializationListener; // layering violation because void* is ugly
class SemaConsumer; // layering violation required for safe SemaConsumer
class TagDecl;
@@ -86,10 +87,13 @@ public:
/// it was actually used.
virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {}
+ /// \brief If the consumer is interested in entities getting modified after
+ /// their initial creation, it should return a pointer to
+ /// a GetASTMutationListener here.
+ virtual ASTMutationListener *GetASTMutationListener() { return 0; }
+
/// \brief If the consumer is interested in entities being deserialized from
/// AST files, it should return a pointer to a ASTDeserializationListener here
- ///
- /// The return type is void* because ASTDS lives in Frontend.
virtual ASTDeserializationListener *GetASTDeserializationListener() { return 0; }
/// PrintStats - If desired, print any statistics.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
index ae4ee94..0e88713 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
@@ -45,6 +45,7 @@ namespace clang {
class Diagnostic;
class Expr;
class ExternalASTSource;
+ class ASTMutationListener;
class IdentifierTable;
class SelectorTable;
class SourceManager;
@@ -56,6 +57,7 @@ namespace clang {
class CXXRecordDecl;
class Decl;
class FieldDecl;
+ class MangleContext;
class ObjCIvarDecl;
class ObjCIvarRefExpr;
class ObjCPropertyDecl;
@@ -78,49 +80,61 @@ namespace clang {
class ASTContext {
ASTContext &this_() { return *this; }
- std::vector<Type*> Types;
- llvm::FoldingSet<ExtQuals> ExtQualNodes;
- llvm::FoldingSet<ComplexType> ComplexTypes;
- llvm::FoldingSet<PointerType> PointerTypes;
- llvm::FoldingSet<BlockPointerType> BlockPointerTypes;
- llvm::FoldingSet<LValueReferenceType> LValueReferenceTypes;
- llvm::FoldingSet<RValueReferenceType> RValueReferenceTypes;
- llvm::FoldingSet<MemberPointerType> MemberPointerTypes;
- llvm::FoldingSet<ConstantArrayType> ConstantArrayTypes;
- llvm::FoldingSet<IncompleteArrayType> IncompleteArrayTypes;
- std::vector<VariableArrayType*> VariableArrayTypes;
- llvm::FoldingSet<DependentSizedArrayType> DependentSizedArrayTypes;
- llvm::FoldingSet<DependentSizedExtVectorType> DependentSizedExtVectorTypes;
- llvm::FoldingSet<VectorType> VectorTypes;
- llvm::FoldingSet<FunctionNoProtoType> FunctionNoProtoTypes;
- llvm::FoldingSet<FunctionProtoType> FunctionProtoTypes;
- llvm::FoldingSet<DependentTypeOfExprType> DependentTypeOfExprTypes;
- llvm::FoldingSet<DependentDecltypeType> DependentDecltypeTypes;
- llvm::FoldingSet<TemplateTypeParmType> TemplateTypeParmTypes;
- llvm::FoldingSet<SubstTemplateTypeParmType> SubstTemplateTypeParmTypes;
- llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&>
+ mutable std::vector<Type*> Types;
+ mutable llvm::FoldingSet<ExtQuals> ExtQualNodes;
+ mutable llvm::FoldingSet<ComplexType> ComplexTypes;
+ mutable llvm::FoldingSet<PointerType> PointerTypes;
+ mutable llvm::FoldingSet<BlockPointerType> BlockPointerTypes;
+ mutable llvm::FoldingSet<LValueReferenceType> LValueReferenceTypes;
+ mutable llvm::FoldingSet<RValueReferenceType> RValueReferenceTypes;
+ mutable llvm::FoldingSet<MemberPointerType> MemberPointerTypes;
+ mutable llvm::FoldingSet<ConstantArrayType> ConstantArrayTypes;
+ mutable llvm::FoldingSet<IncompleteArrayType> IncompleteArrayTypes;
+ mutable std::vector<VariableArrayType*> VariableArrayTypes;
+ mutable llvm::FoldingSet<DependentSizedArrayType> DependentSizedArrayTypes;
+ mutable llvm::FoldingSet<DependentSizedExtVectorType>
+ DependentSizedExtVectorTypes;
+ mutable llvm::FoldingSet<VectorType> VectorTypes;
+ mutable llvm::FoldingSet<FunctionNoProtoType> FunctionNoProtoTypes;
+ mutable llvm::FoldingSet<FunctionProtoType> FunctionProtoTypes;
+ mutable llvm::FoldingSet<DependentTypeOfExprType> DependentTypeOfExprTypes;
+ mutable llvm::FoldingSet<DependentDecltypeType> DependentDecltypeTypes;
+ mutable llvm::FoldingSet<TemplateTypeParmType> TemplateTypeParmTypes;
+ mutable llvm::FoldingSet<SubstTemplateTypeParmType>
+ SubstTemplateTypeParmTypes;
+ mutable llvm::FoldingSet<SubstTemplateTypeParmPackType>
+ SubstTemplateTypeParmPackTypes;
+ mutable llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&>
TemplateSpecializationTypes;
- llvm::FoldingSet<ElaboratedType> ElaboratedTypes;
- llvm::FoldingSet<DependentNameType> DependentNameTypes;
- llvm::ContextualFoldingSet<DependentTemplateSpecializationType, ASTContext&>
+ mutable llvm::FoldingSet<ParenType> ParenTypes;
+ mutable llvm::FoldingSet<ElaboratedType> ElaboratedTypes;
+ mutable llvm::FoldingSet<DependentNameType> DependentNameTypes;
+ mutable llvm::ContextualFoldingSet<DependentTemplateSpecializationType,
+ ASTContext&>
DependentTemplateSpecializationTypes;
- llvm::FoldingSet<ObjCObjectTypeImpl> ObjCObjectTypes;
- llvm::FoldingSet<ObjCObjectPointerType> ObjCObjectPointerTypes;
-
- llvm::FoldingSet<QualifiedTemplateName> QualifiedTemplateNames;
- llvm::FoldingSet<DependentTemplateName> DependentTemplateNames;
-
+ llvm::FoldingSet<PackExpansionType> PackExpansionTypes;
+ mutable llvm::FoldingSet<ObjCObjectTypeImpl> ObjCObjectTypes;
+ mutable llvm::FoldingSet<ObjCObjectPointerType> ObjCObjectPointerTypes;
+ llvm::FoldingSet<AttributedType> AttributedTypes;
+
+ mutable llvm::FoldingSet<QualifiedTemplateName> QualifiedTemplateNames;
+ mutable llvm::FoldingSet<DependentTemplateName> DependentTemplateNames;
+ mutable llvm::FoldingSet<SubstTemplateTemplateParmPackStorage>
+ SubstTemplateTemplateParmPacks;
+
/// \brief The set of nested name specifiers.
///
/// This set is managed by the NestedNameSpecifier class.
- llvm::FoldingSet<NestedNameSpecifier> NestedNameSpecifiers;
- NestedNameSpecifier *GlobalNestedNameSpecifier;
+ mutable llvm::FoldingSet<NestedNameSpecifier> NestedNameSpecifiers;
+ mutable NestedNameSpecifier *GlobalNestedNameSpecifier;
friend class NestedNameSpecifier;
/// ASTRecordLayouts - A cache mapping from RecordDecls to ASTRecordLayouts.
/// This is lazily created. This is intentionally not serialized.
- llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*> ASTRecordLayouts;
- llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*> ObjCLayouts;
+ mutable llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>
+ ASTRecordLayouts;
+ mutable llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*>
+ ObjCLayouts;
/// KeyFunctions - A cache mapping from CXXRecordDecls to key functions.
llvm::DenseMap<const CXXRecordDecl*, const CXXMethodDecl*> KeyFunctions;
@@ -128,6 +142,9 @@ class ASTContext {
/// \brief Mapping from ObjCContainers to their ObjCImplementations.
llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*> ObjCImpls;
+ /// \brief Mapping from __block VarDecls to their copy initialization expr.
+ llvm::DenseMap<const VarDecl*, Expr*> BlockVarCopyInits;
+
/// \brief Representation of a "canonical" template template parameter that
/// is used in canonical template names.
class CanonicalTemplateTemplateParm : public llvm::FoldingSetNode {
@@ -144,10 +161,11 @@ class ASTContext {
static void Profile(llvm::FoldingSetNodeID &ID,
TemplateTemplateParmDecl *Parm);
};
- llvm::FoldingSet<CanonicalTemplateTemplateParm> CanonTemplateTemplateParms;
+ mutable llvm::FoldingSet<CanonicalTemplateTemplateParm>
+ CanonTemplateTemplateParms;
- TemplateTemplateParmDecl *getCanonicalTemplateTemplateParmDecl(
- TemplateTemplateParmDecl *TTP);
+ TemplateTemplateParmDecl *
+ getCanonicalTemplateTemplateParmDecl(TemplateTemplateParmDecl *TTP) const;
/// \brief Whether __[u]int128_t identifier is installed.
bool IsInt128Installed;
@@ -171,11 +189,11 @@ class ASTContext {
QualType ObjCClassTypedefType;
QualType ObjCConstantStringType;
- RecordDecl *CFConstantStringTypeDecl;
+ mutable RecordDecl *CFConstantStringTypeDecl;
- RecordDecl *NSConstantStringTypeDecl;
+ mutable RecordDecl *NSConstantStringTypeDecl;
- RecordDecl *ObjCFastEnumerationStateTypeDecl;
+ mutable RecordDecl *ObjCFastEnumerationStateTypeDecl;
/// \brief The type for the C FILE type.
TypeDecl *FILEDecl;
@@ -187,10 +205,13 @@ class ASTContext {
TypeDecl *sigjmp_bufDecl;
/// \brief Type for the Block descriptor for Blocks CodeGen.
- RecordDecl *BlockDescriptorType;
+ mutable RecordDecl *BlockDescriptorType;
/// \brief Type for the Block descriptor for Blocks CodeGen.
- RecordDecl *BlockDescriptorExtendedType;
+ mutable RecordDecl *BlockDescriptorExtendedType;
+
+ /// \brief Declaration for the CUDA cudaConfigureCall function.
+ FunctionDecl *cudaConfigureCallDecl;
TypeSourceInfo NullTypeSourceInfo;
@@ -279,7 +300,7 @@ class ASTContext {
///
/// AST objects are never destructed; rather, all memory associated with the
/// AST objects will be released when the ASTContext itself is destroyed.
- llvm::BumpPtrAllocator BumpAlloc;
+ mutable llvm::BumpPtrAllocator BumpAlloc;
/// \brief Allocator for partial diagnostics.
PartialDiagnostic::StorageAllocator DiagAllocator;
@@ -287,14 +308,17 @@ class ASTContext {
/// \brief The current C++ ABI.
llvm::OwningPtr<CXXABI> ABI;
CXXABI *createCXXABI(const TargetInfo &T);
-
+
+ friend class ASTDeclReader;
+
public:
const TargetInfo &Target;
IdentifierTable &Idents;
SelectorTable &Selectors;
Builtin::Context &BuiltinInfo;
- DeclarationNameTable DeclarationNames;
+ mutable DeclarationNameTable DeclarationNames;
llvm::OwningPtr<ExternalASTSource> ExternalSource;
+ ASTMutationListener *Listener;
clang::PrintingPolicy PrintingPolicy;
// Typedefs which may be provided defining the structure of Objective-C
@@ -305,10 +329,10 @@ public:
SourceManager& getSourceManager() { return SourceMgr; }
const SourceManager& getSourceManager() const { return SourceMgr; }
- void *Allocate(unsigned Size, unsigned Align = 8) {
+ void *Allocate(unsigned Size, unsigned Align = 8) const {
return BumpAlloc.Allocate(Size, Align);
}
- void Deallocate(void *Ptr) { }
+ void Deallocate(void *Ptr) const { }
PartialDiagnostic::StorageAllocator &getDiagAllocator() {
return DiagAllocator;
@@ -316,6 +340,8 @@ public:
const LangOptions& getLangOptions() const { return LangOpts; }
+ Diagnostic &getDiagnostics() const;
+
FullSourceLoc getFullLoc(SourceLocation Loc) const {
return FullSourceLoc(Loc,SourceMgr);
}
@@ -388,7 +414,6 @@ public:
CanQualType VoidPtrTy, NullPtrTy;
CanQualType OverloadTy;
CanQualType DependentTy;
- CanQualType UndeducedAutoTy;
CanQualType ObjCBuiltinIdTy, ObjCBuiltinClassTy, ObjCBuiltinSelTy;
ASTContext(const LangOptions& LOpts, SourceManager &SM, const TargetInfo &t,
@@ -409,6 +434,19 @@ public:
/// with this AST context, if any.
ExternalASTSource *getExternalSource() const { return ExternalSource.get(); }
+ /// \brief Attach an AST mutation listener to the AST context.
+ ///
+ /// The AST mutation listener provides the ability to track modifications to
+ /// the abstract syntax tree entities committed after they were initially
+ /// created.
+ void setASTMutationListener(ASTMutationListener *Listener) {
+ this->Listener = Listener;
+ }
+
+ /// \brief Retrieve a pointer to the AST mutation listener associated
+ /// with this AST context, if any.
+ ASTMutationListener *getASTMutationListener() const { return Listener; }
+
void PrintStats() const;
const std::vector<Type*>& getTypes() const { return Types; }
@@ -418,9 +456,9 @@ public:
private:
/// getExtQualType - Return a type with extended qualifiers.
- QualType getExtQualType(const Type *Base, Qualifiers Quals);
+ QualType getExtQualType(const Type *Base, Qualifiers Quals) const;
- QualType getTypeDeclTypeSlow(const TypeDecl *Decl);
+ QualType getTypeDeclTypeSlow(const TypeDecl *Decl) const;
public:
/// getAddSpaceQualType - Return the uniqued reference to the type for an
@@ -428,24 +466,26 @@ public:
/// The resulting type has a union of the qualifiers from T and the address
/// space. If T already has an address space specifier, it is silently
/// replaced.
- QualType getAddrSpaceQualType(QualType T, unsigned AddressSpace);
+ QualType getAddrSpaceQualType(QualType T, unsigned AddressSpace) const;
/// getObjCGCQualType - Returns the uniqued reference to the type for an
/// objc gc qualified type. The retulting type has a union of the qualifiers
/// from T and the gc attribute.
- QualType getObjCGCQualType(QualType T, Qualifiers::GC gcAttr);
+ QualType getObjCGCQualType(QualType T, Qualifiers::GC gcAttr) const;
/// getRestrictType - Returns the uniqued reference to the type for a
/// 'restrict' qualified type. The resulting type has a union of the
/// qualifiers from T and 'restrict'.
- QualType getRestrictType(QualType T) {
+ QualType getRestrictType(QualType T) const {
return T.withFastQualifiers(Qualifiers::Restrict);
}
/// getVolatileType - Returns the uniqued reference to the type for a
/// 'volatile' qualified type. The resulting type has a union of the
/// qualifiers from T and 'volatile'.
- QualType getVolatileType(QualType T);
+ QualType getVolatileType(QualType T) const {
+ return T.withFastQualifiers(Qualifiers::Volatile);
+ }
/// getConstType - Returns the uniqued reference to the type for a
/// 'const' qualified type. The resulting type has a union of the
@@ -453,44 +493,33 @@ public:
///
/// It can be reasonably expected that this will always be
/// equivalent to calling T.withConst().
- QualType getConstType(QualType T) { return T.withConst(); }
-
- /// getNoReturnType - Add or remove the noreturn attribute to the given type
- /// which must be a FunctionType or a pointer to an allowable type or a
- /// BlockPointer.
- QualType getNoReturnType(QualType T, bool AddNoReturn = true);
-
- /// getCallConvType - Adds the specified calling convention attribute to
- /// the given type, which must be a FunctionType or a pointer to an
- /// allowable type.
- QualType getCallConvType(QualType T, CallingConv CallConv);
+ QualType getConstType(QualType T) const { return T.withConst(); }
- /// getRegParmType - Sets the specified regparm attribute to
- /// the given type, which must be a FunctionType or a pointer to an
- /// allowable type.
- QualType getRegParmType(QualType T, unsigned RegParm);
+ /// adjustFunctionType - Change the ExtInfo on a function type.
+ const FunctionType *adjustFunctionType(const FunctionType *Fn,
+ FunctionType::ExtInfo EInfo);
/// getComplexType - Return the uniqued reference to the type for a complex
/// number with the specified element type.
- QualType getComplexType(QualType T);
- CanQualType getComplexType(CanQualType T) {
+ QualType getComplexType(QualType T) const;
+ CanQualType getComplexType(CanQualType T) const {
return CanQualType::CreateUnsafe(getComplexType((QualType) T));
}
/// getPointerType - Return the uniqued reference to the type for a pointer to
/// the specified type.
- QualType getPointerType(QualType T);
- CanQualType getPointerType(CanQualType T) {
+ QualType getPointerType(QualType T) const;
+ CanQualType getPointerType(CanQualType T) const {
return CanQualType::CreateUnsafe(getPointerType((QualType) T));
}
/// getBlockPointerType - Return the uniqued reference to the type for a block
/// of the specified type.
- QualType getBlockPointerType(QualType T);
+ QualType getBlockPointerType(QualType T) const;
/// This gets the struct used to keep track of the descriptor for pointer to
/// blocks.
- QualType getBlockDescriptorType();
+ QualType getBlockDescriptorType() const;
// Set the type for a Block descriptor type.
void setBlockDescriptorType(QualType T);
@@ -503,48 +532,56 @@ public:
/// This gets the struct used to keep track of the extended descriptor for
/// pointer to blocks.
- QualType getBlockDescriptorExtendedType();
+ QualType getBlockDescriptorExtendedType() const;
// Set the type for a Block descriptor extended type.
void setBlockDescriptorExtendedType(QualType T);
/// Get the BlockDescriptorExtendedType type, or NULL if it hasn't yet been
/// built.
- QualType getRawBlockdescriptorExtendedType() {
+ QualType getRawBlockdescriptorExtendedType() const {
if (BlockDescriptorExtendedType)
return getTagDeclType(BlockDescriptorExtendedType);
return QualType();
}
+ void setcudaConfigureCallDecl(FunctionDecl *FD) {
+ cudaConfigureCallDecl = FD;
+ }
+ FunctionDecl *getcudaConfigureCallDecl() {
+ return cudaConfigureCallDecl;
+ }
+
/// This gets the struct used to keep track of pointer to blocks, complete
/// with captured variables.
QualType getBlockParmType(bool BlockHasCopyDispose,
- llvm::SmallVectorImpl<const Expr *> &Layout);
+ llvm::SmallVectorImpl<const Expr *> &Layout) const;
/// This builds the struct used for __block variables.
- QualType BuildByRefType(llvm::StringRef DeclName, QualType Ty);
+ QualType BuildByRefType(llvm::StringRef DeclName, QualType Ty) const;
/// Returns true iff we need copy/dispose helpers for the given type.
- bool BlockRequiresCopying(QualType Ty);
+ bool BlockRequiresCopying(QualType Ty) const;
/// getLValueReferenceType - Return the uniqued reference to the type for an
/// lvalue reference to the specified type.
- QualType getLValueReferenceType(QualType T, bool SpelledAsLValue = true);
+ QualType getLValueReferenceType(QualType T, bool SpelledAsLValue = true)
+ const;
/// getRValueReferenceType - Return the uniqued reference to the type for an
/// rvalue reference to the specified type.
- QualType getRValueReferenceType(QualType T);
+ QualType getRValueReferenceType(QualType T) const;
/// getMemberPointerType - Return the uniqued reference to the type for a
/// member pointer to the specified type in the specified class. The class
/// is a Type because it could be a dependent name.
- QualType getMemberPointerType(QualType T, const Type *Cls);
+ QualType getMemberPointerType(QualType T, const Type *Cls) const;
/// getVariableArrayType - Returns a non-unique reference to the type for a
/// variable array of the specified element type.
QualType getVariableArrayType(QualType EltTy, Expr *NumElts,
ArrayType::ArraySizeModifier ASM,
- unsigned EltTypeQuals,
- SourceRange Brackets);
+ unsigned IndexTypeQuals,
+ SourceRange Brackets) const;
/// getDependentSizedArrayType - Returns a non-unique reference to
/// the type for a dependently-sized array of the specified element
@@ -552,30 +589,34 @@ public:
/// comparable, at some point.
QualType getDependentSizedArrayType(QualType EltTy, Expr *NumElts,
ArrayType::ArraySizeModifier ASM,
- unsigned EltTypeQuals,
- SourceRange Brackets);
+ unsigned IndexTypeQuals,
+ SourceRange Brackets) const;
/// getIncompleteArrayType - Returns a unique reference to the type for a
/// incomplete array of the specified element type.
QualType getIncompleteArrayType(QualType EltTy,
ArrayType::ArraySizeModifier ASM,
- unsigned EltTypeQuals);
+ unsigned IndexTypeQuals) const;
/// getConstantArrayType - Return the unique reference to the type for a
/// constant array of the specified element type.
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize,
ArrayType::ArraySizeModifier ASM,
- unsigned EltTypeQuals);
+ unsigned IndexTypeQuals) const;
+
+ /// getVariableArrayDecayedType - Returns a vla type where known sizes
+ /// are replaced with [*].
+ QualType getVariableArrayDecayedType(QualType Ty) const;
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType getVectorType(QualType VectorType, unsigned NumElts,
- VectorType::AltiVecSpecific AltiVecSpec);
+ VectorType::VectorKind VecKind) const;
/// getExtVectorType - Return the unique reference to an extended vector type
/// of the specified element type and size. VectorType must be a built-in
/// type.
- QualType getExtVectorType(QualType VectorType, unsigned NumElts);
+ QualType getExtVectorType(QualType VectorType, unsigned NumElts) const;
/// getDependentSizedExtVectorType - Returns a non-unique reference to
/// the type for a dependently-sized vector of the specified element
@@ -583,30 +624,27 @@ public:
/// comparable, at some point.
QualType getDependentSizedExtVectorType(QualType VectorType,
Expr *SizeExpr,
- SourceLocation AttrLoc);
+ SourceLocation AttrLoc) const;
/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
///
QualType getFunctionNoProtoType(QualType ResultTy,
- const FunctionType::ExtInfo &Info);
+ const FunctionType::ExtInfo &Info) const;
- QualType getFunctionNoProtoType(QualType ResultTy) {
+ QualType getFunctionNoProtoType(QualType ResultTy) const {
return getFunctionNoProtoType(ResultTy, FunctionType::ExtInfo());
}
- /// getFunctionType - Return a normal function type with a typed argument
- /// list. isVariadic indicates whether the argument list includes '...'.
- QualType getFunctionType(QualType ResultTy, const QualType *ArgArray,
- unsigned NumArgs, bool isVariadic,
- unsigned TypeQuals, bool hasExceptionSpec,
- bool hasAnyExceptionSpec,
- unsigned NumExs, const QualType *ExArray,
- const FunctionType::ExtInfo &Info);
+ /// getFunctionType - Return a normal function type with a typed
+ /// argument list.
+ QualType getFunctionType(QualType ResultTy,
+ const QualType *Args, unsigned NumArgs,
+ const FunctionProtoType::ExtProtoInfo &EPI) const;
/// getTypeDeclType - Return the unique reference to the type for
/// the specified type declaration.
QualType getTypeDeclType(const TypeDecl *Decl,
- const TypeDecl *PrevDecl = 0) {
+ const TypeDecl *PrevDecl = 0) const {
assert(Decl && "Passed null for Decl param");
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
@@ -621,77 +659,93 @@ public:
/// getTypedefType - Return the unique reference to the type for the
/// specified typename decl.
- QualType getTypedefType(const TypedefDecl *Decl, QualType Canon = QualType());
+ QualType getTypedefType(const TypedefDecl *Decl, QualType Canon = QualType())
+ const;
+
+ QualType getRecordType(const RecordDecl *Decl) const;
- QualType getRecordType(const RecordDecl *Decl);
+ QualType getEnumType(const EnumDecl *Decl) const;
- QualType getEnumType(const EnumDecl *Decl);
+ QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST) const;
- QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST);
+ QualType getAttributedType(AttributedType::Kind attrKind,
+ QualType modifiedType,
+ QualType equivalentType);
QualType getSubstTemplateTypeParmType(const TemplateTypeParmType *Replaced,
- QualType Replacement);
+ QualType Replacement) const;
+ QualType getSubstTemplateTypeParmPackType(
+ const TemplateTypeParmType *Replaced,
+ const TemplateArgument &ArgPack);
QualType getTemplateTypeParmType(unsigned Depth, unsigned Index,
bool ParameterPack,
- IdentifierInfo *Name = 0);
+ IdentifierInfo *Name = 0) const;
QualType getTemplateSpecializationType(TemplateName T,
const TemplateArgument *Args,
unsigned NumArgs,
- QualType Canon = QualType());
+ QualType Canon = QualType()) const;
QualType getCanonicalTemplateSpecializationType(TemplateName T,
const TemplateArgument *Args,
- unsigned NumArgs);
+ unsigned NumArgs) const;
QualType getTemplateSpecializationType(TemplateName T,
const TemplateArgumentListInfo &Args,
- QualType Canon = QualType());
+ QualType Canon = QualType()) const;
TypeSourceInfo *
getTemplateSpecializationTypeInfo(TemplateName T, SourceLocation TLoc,
const TemplateArgumentListInfo &Args,
- QualType Canon = QualType());
+ QualType Canon = QualType()) const;
+
+ QualType getParenType(QualType NamedType) const;
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
- QualType NamedType);
+ QualType NamedType) const;
QualType getDependentNameType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
- QualType Canon = QualType());
+ QualType Canon = QualType()) const;
QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
- const TemplateArgumentListInfo &Args);
+ const TemplateArgumentListInfo &Args) const;
QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
unsigned NumArgs,
- const TemplateArgument *Args);
+ const TemplateArgument *Args) const;
+
+ QualType getPackExpansionType(QualType Pattern,
+ llvm::Optional<unsigned> NumExpansions);
- QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl);
+ QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl) const;
QualType getObjCObjectType(QualType Base,
ObjCProtocolDecl * const *Protocols,
- unsigned NumProtocols);
+ unsigned NumProtocols) const;
/// getObjCObjectPointerType - Return a ObjCObjectPointerType type
/// for the given ObjCObjectType.
- QualType getObjCObjectPointerType(QualType OIT);
+ QualType getObjCObjectPointerType(QualType OIT) const;
/// getTypeOfType - GCC extension.
- QualType getTypeOfExprType(Expr *e);
- QualType getTypeOfType(QualType t);
+ QualType getTypeOfExprType(Expr *e) const;
+ QualType getTypeOfType(QualType t) const;
/// getDecltypeType - C++0x decltype.
- QualType getDecltypeType(Expr *e);
+ QualType getDecltypeType(Expr *e) const;
+
+ /// getAutoType - C++0x deduced auto type.
+ QualType getAutoType(QualType DeducedType) const;
/// getTagDeclType - Return the unique reference to the type for the
/// specified TagDecl (struct/union/class/enum) decl.
- QualType getTagDeclType(const TagDecl *Decl);
+ QualType getTagDeclType(const TagDecl *Decl) const;
/// getSizeType - Return the unique type for "size_t" (C99 7.17), defined
/// in <stddef.h>. The sizeof operator requires this (C99 6.5.3.4p4).
@@ -716,14 +770,14 @@ public:
// getCFConstantStringType - Return the C structure type used to represent
// constant CFStrings.
- QualType getCFConstantStringType();
+ QualType getCFConstantStringType() const;
// getNSConstantStringType - Return the C structure type used to represent
// constant NSStrings.
- QualType getNSConstantStringType();
+ QualType getNSConstantStringType() const;
/// Get the structure type used to representation NSStrings, or NULL
/// if it hasn't yet been built.
- QualType getRawNSConstantStringType() {
+ QualType getRawNSConstantStringType() const {
if (NSConstantStringTypeDecl)
return getTagDeclType(NSConstantStringTypeDecl);
return QualType();
@@ -733,7 +787,7 @@ public:
/// Get the structure type used to representation CFStrings, or NULL
/// if it hasn't yet been built.
- QualType getRawCFConstantStringType() {
+ QualType getRawCFConstantStringType() const {
if (CFConstantStringTypeDecl)
return getTagDeclType(CFConstantStringTypeDecl);
return QualType();
@@ -747,11 +801,11 @@ public:
}
//// This gets the struct used to keep track of fast enumerations.
- QualType getObjCFastEnumerationStateType();
+ QualType getObjCFastEnumerationStateType() const;
/// Get the ObjCFastEnumerationState type, or NULL if it hasn't yet
/// been built.
- QualType getRawObjCFastEnumerationStateType() {
+ QualType getRawObjCFastEnumerationStateType() const {
if (ObjCFastEnumerationStateTypeDecl)
return getTagDeclType(ObjCFastEnumerationStateTypeDecl);
return QualType();
@@ -763,7 +817,7 @@ public:
void setFILEDecl(TypeDecl *FILEDecl) { this->FILEDecl = FILEDecl; }
/// \brief Retrieve the C FILE type.
- QualType getFILEType() {
+ QualType getFILEType() const {
if (FILEDecl)
return getTypeDeclType(FILEDecl);
return QualType();
@@ -775,7 +829,7 @@ public:
}
/// \brief Retrieve the C jmp_buf type.
- QualType getjmp_bufType() {
+ QualType getjmp_bufType() const {
if (jmp_bufDecl)
return getTypeDeclType(jmp_bufDecl);
return QualType();
@@ -787,17 +841,22 @@ public:
}
/// \brief Retrieve the C sigjmp_buf type.
- QualType getsigjmp_bufType() {
+ QualType getsigjmp_bufType() const {
if (sigjmp_bufDecl)
return getTypeDeclType(sigjmp_bufDecl);
return QualType();
}
+ /// \brief The result type of logical operations, '<', '>', '!=', etc.
+ QualType getLogicalOperationType() const {
+ return getLangOptions().CPlusPlus ? BoolTy : IntTy;
+ }
+
/// getObjCEncodingForType - Emit the ObjC type encoding for the
/// given type into \arg S. If \arg NameFields is specified then
/// record field names are also encoded.
void getObjCEncodingForType(QualType t, std::string &S,
- const FieldDecl *Field=0);
+ const FieldDecl *Field=0) const;
void getLegacyIntegralTypeEncoding(QualType &t) const;
@@ -805,13 +864,18 @@ public:
void getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
std::string &S) const;
+ /// getObjCEncodingForFunctionDecl - Returns the encoded type for this
+ //function. This is in the same format as Objective-C method encodings.
+ void getObjCEncodingForFunctionDecl(const FunctionDecl *Decl, std::string& S);
+
/// getObjCEncodingForMethodDecl - Return the encoded type for this method
/// declaration.
- void getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, std::string &S);
+ void getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, std::string &S)
+ const;
- /// getObjCEncodingForBlockDecl - Return the encoded type for this block
+ /// getObjCEncodingForBlock - Return the encoded type for this block
/// declaration.
- void getObjCEncodingForBlock(const BlockExpr *Expr, std::string& S);
+ std::string getObjCEncodingForBlock(const BlockExpr *blockExpr) const;
/// getObjCEncodingForPropertyDecl - Return the encoded type for
/// this method declaration. If non-NULL, Container must be either
@@ -819,14 +883,14 @@ public:
/// only be NULL when getting encodings for protocol properties.
void getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
const Decl *Container,
- std::string &S);
+ std::string &S) const;
bool ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
- ObjCProtocolDecl *rProto);
+ ObjCProtocolDecl *rProto) const;
/// getObjCEncodingTypeSize returns size of type for objective-c encoding
/// purpose in characters.
- CharUnits getObjCEncodingTypeSize(QualType t);
+ CharUnits getObjCEncodingTypeSize(QualType t) const;
/// \brief Whether __[u]int128_t identifier is installed.
bool isInt128Installed() const { return IsInt128Installed; }
@@ -854,12 +918,12 @@ public:
/// getCVRQualifiedType - Returns a type with additional const,
/// volatile, or restrict qualifiers.
- QualType getCVRQualifiedType(QualType T, unsigned CVR) {
+ QualType getCVRQualifiedType(QualType T, unsigned CVR) const {
return getQualifiedType(T, Qualifiers::fromCVRMask(CVR));
}
/// getQualifiedType - Returns a type with additional qualifiers.
- QualType getQualifiedType(QualType T, Qualifiers Qs) {
+ QualType getQualifiedType(QualType T, Qualifiers Qs) const {
if (!Qs.hasNonFastQualifiers())
return T.withFastQualifiers(Qs.getFastQualifiers());
QualifierCollector Qc(Qs);
@@ -868,35 +932,41 @@ public:
}
/// getQualifiedType - Returns a type with additional qualifiers.
- QualType getQualifiedType(const Type *T, Qualifiers Qs) {
+ QualType getQualifiedType(const Type *T, Qualifiers Qs) const {
if (!Qs.hasNonFastQualifiers())
return QualType(T, Qs.getFastQualifiers());
return getExtQualType(T, Qs);
}
DeclarationNameInfo getNameForTemplate(TemplateName Name,
- SourceLocation NameLoc);
+ SourceLocation NameLoc) const;
TemplateName getOverloadedTemplateName(UnresolvedSetIterator Begin,
- UnresolvedSetIterator End);
+ UnresolvedSetIterator End) const;
TemplateName getQualifiedTemplateName(NestedNameSpecifier *NNS,
bool TemplateKeyword,
- TemplateDecl *Template);
+ TemplateDecl *Template) const;
TemplateName getDependentTemplateName(NestedNameSpecifier *NNS,
- const IdentifierInfo *Name);
+ const IdentifierInfo *Name) const;
TemplateName getDependentTemplateName(NestedNameSpecifier *NNS,
- OverloadedOperatorKind Operator);
-
+ OverloadedOperatorKind Operator) const;
+ TemplateName getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
+ const TemplateArgument &ArgPack) const;
+
enum GetBuiltinTypeError {
GE_None, //< No error
GE_Missing_stdio, //< Missing a type from <stdio.h>
GE_Missing_setjmp //< Missing a type from <setjmp.h>
};
- /// GetBuiltinType - Return the type for the specified builtin.
- QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error);
+ /// GetBuiltinType - Return the type for the specified builtin. If
+ /// IntegerConstantArgs is non-null, it is filled in with a bitmask of
+ /// arguments to the builtin that are required to be integer constant
+ /// expressions.
+ QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error,
+ unsigned *IntegerConstantArgs = 0) const;
private:
CanQualType getFromTargetType(unsigned Type) const;
@@ -909,11 +979,12 @@ public:
/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
/// garbage collection attribute.
///
- Qualifiers::GC getObjCGCAttrKind(const QualType &Ty) const;
+ Qualifiers::GC getObjCGCAttrKind(QualType Ty) const;
- /// areCompatibleVectorTypes - Return true if the given vector types either
- /// are of the same unqualified type or if one is GCC and other - equivalent
- /// AltiVec vector type.
+ /// areCompatibleVectorTypes - Return true if the given vector types
+ /// are of the same unqualified type or if they are equivalent to the same
+ /// GCC vector type, ignoring whether they are target-specific (AltiVec or
+ /// Neon) types.
bool areCompatibleVectorTypes(QualType FirstVec, QualType SecondVec);
/// isObjCNSObjectType - Return true if this is an NSObject object with
@@ -930,76 +1001,83 @@ public:
/// getTypeInfo - Get the size and alignment of the specified complete type in
/// bits.
- std::pair<uint64_t, unsigned> getTypeInfo(const Type *T);
- std::pair<uint64_t, unsigned> getTypeInfo(QualType T) {
+ std::pair<uint64_t, unsigned> getTypeInfo(const Type *T) const;
+ std::pair<uint64_t, unsigned> getTypeInfo(QualType T) const {
return getTypeInfo(T.getTypePtr());
}
/// getTypeSize - Return the size of the specified type, in bits. This method
/// does not work on incomplete types.
- uint64_t getTypeSize(QualType T) {
+ uint64_t getTypeSize(QualType T) const {
return getTypeInfo(T).first;
}
- uint64_t getTypeSize(const Type *T) {
+ uint64_t getTypeSize(const Type *T) const {
return getTypeInfo(T).first;
}
/// getCharWidth - Return the size of the character type, in bits
- uint64_t getCharWidth() {
+ uint64_t getCharWidth() const {
return getTypeSize(CharTy);
}
+ /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
+ CharUnits toCharUnitsFromBits(int64_t BitSize) const;
+
+ /// toBits - Convert a size in characters to a size in bits.
+ int64_t toBits(CharUnits CharSize) const;
+
/// getTypeSizeInChars - Return the size of the specified type, in characters.
/// This method does not work on incomplete types.
- CharUnits getTypeSizeInChars(QualType T);
- CharUnits getTypeSizeInChars(const Type *T);
+ CharUnits getTypeSizeInChars(QualType T) const;
+ CharUnits getTypeSizeInChars(const Type *T) const;
/// getTypeAlign - Return the ABI-specified alignment of a type, in bits.
/// This method does not work on incomplete types.
- unsigned getTypeAlign(QualType T) {
+ unsigned getTypeAlign(QualType T) const {
return getTypeInfo(T).second;
}
- unsigned getTypeAlign(const Type *T) {
+ unsigned getTypeAlign(const Type *T) const {
return getTypeInfo(T).second;
}
/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
/// characters. This method does not work on incomplete types.
- CharUnits getTypeAlignInChars(QualType T);
- CharUnits getTypeAlignInChars(const Type *T);
+ CharUnits getTypeAlignInChars(QualType T) const;
+ CharUnits getTypeAlignInChars(const Type *T) const;
- std::pair<CharUnits, CharUnits> getTypeInfoInChars(const Type *T);
- std::pair<CharUnits, CharUnits> getTypeInfoInChars(QualType T);
+ std::pair<CharUnits, CharUnits> getTypeInfoInChars(const Type *T) const;
+ std::pair<CharUnits, CharUnits> getTypeInfoInChars(QualType T) const;
/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
/// type for the current target in bits. This can be different than the ABI
/// alignment in cases where it is beneficial for performance to overalign
/// a data type.
- unsigned getPreferredTypeAlign(const Type *T);
+ unsigned getPreferredTypeAlign(const Type *T) const;
/// getDeclAlign - Return a conservative estimate of the alignment of
/// the specified decl. Note that bitfields do not have a valid alignment, so
/// this method will assert on them.
/// If @p RefAsPointee, references are treated like their underlying type
/// (for alignof), else they're treated like pointers (for CodeGen).
- CharUnits getDeclAlign(const Decl *D, bool RefAsPointee = false);
+ CharUnits getDeclAlign(const Decl *D, bool RefAsPointee = false) const;
/// getASTRecordLayout - Get or compute information about the layout of the
/// specified record (struct/union/class), which indicates its size and field
/// position information.
- const ASTRecordLayout &getASTRecordLayout(const RecordDecl *D);
+ const ASTRecordLayout &getASTRecordLayout(const RecordDecl *D) const;
/// getASTObjCInterfaceLayout - Get or compute information about the
/// layout of the specified Objective-C interface.
- const ASTRecordLayout &getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D);
+ const ASTRecordLayout &getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D)
+ const;
- void DumpRecordLayout(const RecordDecl *RD, llvm::raw_ostream &OS);
+ void DumpRecordLayout(const RecordDecl *RD, llvm::raw_ostream &OS) const;
/// getASTObjCImplementationLayout - Get or compute information about
/// the layout of the specified Objective-C implementation. This may
/// differ from the interface if synthesized ivars are present.
const ASTRecordLayout &
- getASTObjCImplementationLayout(const ObjCImplementationDecl *D);
+ getASTObjCImplementationLayout(const ObjCImplementationDecl *D) const;
/// getKeyFunction - Get the key function for the given record decl, or NULL
/// if there isn't one. The key function is, according to the Itanium C++ ABI
@@ -1009,13 +1087,18 @@ public:
/// of class definition.
const CXXMethodDecl *getKeyFunction(const CXXRecordDecl *RD);
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const;
+
+ MangleContext *createMangleContext();
+
void ShallowCollectObjCIvars(const ObjCInterfaceDecl *OI,
- llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars);
+ llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars)
+ const;
void DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, bool leafClass,
- llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars);
+ llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) const;
- unsigned CountNonClassIvars(const ObjCInterfaceDecl *OI);
+ unsigned CountNonClassIvars(const ObjCInterfaceDecl *OI) const;
void CollectInheritedProtocols(const Decl *CDecl,
llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols);
@@ -1029,8 +1112,11 @@ public:
/// include typedefs, 'typeof' operators, etc. The returned type is guaranteed
/// to be free of any of these, allowing two canonical types to be compared
/// for exact equality with a simple pointer comparison.
- CanQualType getCanonicalType(QualType T);
- const Type *getCanonicalType(const Type *T) {
+ CanQualType getCanonicalType(QualType T) const {
+ return CanQualType::CreateUnsafe(T.getCanonicalType());
+ }
+
+ const Type *getCanonicalType(const Type *T) const {
return T->getCanonicalTypeInternal().getTypePtr();
}
@@ -1038,7 +1124,7 @@ public:
/// corresponding to the specific potentially non-canonical one.
/// Qualifiers are stripped off, functions are turned into function
/// pointers, and arrays decay one level into pointers.
- CanQualType getCanonicalParamType(QualType T);
+ CanQualType getCanonicalParamType(QualType T) const;
/// \brief Determine whether the given types are equivalent.
bool hasSameType(QualType T1, QualType T2) {
@@ -1062,13 +1148,8 @@ public:
/// \brief Determine whether the given types are equivalent after
/// cvr-qualifiers have been removed.
bool hasSameUnqualifiedType(QualType T1, QualType T2) {
- CanQualType CT1 = getCanonicalType(T1);
- CanQualType CT2 = getCanonicalType(T2);
-
- Qualifiers Quals;
- QualType UnqualT1 = getUnqualifiedArrayType(CT1, Quals);
- QualType UnqualT2 = getUnqualifiedArrayType(CT2, Quals);
- return UnqualT1 == UnqualT2;
+ return getCanonicalType(T1).getTypePtr() ==
+ getCanonicalType(T2).getTypePtr();
}
bool UnwrapSimilarPointerTypes(QualType &T1, QualType &T2);
@@ -1097,11 +1178,15 @@ public:
/// by declarations in the type system and the canonical type for
/// the template type parameter 'T' is template-param-0-0.
NestedNameSpecifier *
- getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS);
+ getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const;
+
+ /// \brief Retrieves the default calling convention to use for
+ /// C++ instance methods.
+ CallingConv getDefaultMethodCallConv();
/// \brief Retrieves the canonical representation of the given
/// calling convention.
- CallingConv getCanonicalCallConv(CallingConv CC) {
+ CallingConv getCanonicalCallConv(CallingConv CC) const {
if (CC == CC_C)
return CC_Default;
return CC;
@@ -1131,7 +1216,7 @@ public:
/// template name uses the shortest form of the dependent
/// nested-name-specifier, which itself contains all canonical
/// types, values, and templates.
- TemplateName getCanonicalTemplateName(TemplateName Name);
+ TemplateName getCanonicalTemplateName(TemplateName Name) const;
/// \brief Determine whether the given template names refer to the same
/// template.
@@ -1142,33 +1227,35 @@ public:
/// The canonical template argument is the simplest template argument
/// (which may be a type, value, expression, or declaration) that
/// expresses the value of the argument.
- TemplateArgument getCanonicalTemplateArgument(const TemplateArgument &Arg);
+ TemplateArgument getCanonicalTemplateArgument(const TemplateArgument &Arg)
+ const;
/// Type Query functions. If the type is an instance of the specified class,
/// return the Type pointer for the underlying maximally pretty type. This
/// is a member of ASTContext because this may need to do some amount of
/// canonicalization, e.g. to move type qualifiers into the element type.
- const ArrayType *getAsArrayType(QualType T);
- const ConstantArrayType *getAsConstantArrayType(QualType T) {
+ const ArrayType *getAsArrayType(QualType T) const;
+ const ConstantArrayType *getAsConstantArrayType(QualType T) const {
return dyn_cast_or_null<ConstantArrayType>(getAsArrayType(T));
}
- const VariableArrayType *getAsVariableArrayType(QualType T) {
+ const VariableArrayType *getAsVariableArrayType(QualType T) const {
return dyn_cast_or_null<VariableArrayType>(getAsArrayType(T));
}
- const IncompleteArrayType *getAsIncompleteArrayType(QualType T) {
+ const IncompleteArrayType *getAsIncompleteArrayType(QualType T) const {
return dyn_cast_or_null<IncompleteArrayType>(getAsArrayType(T));
}
- const DependentSizedArrayType *getAsDependentSizedArrayType(QualType T) {
+ const DependentSizedArrayType *getAsDependentSizedArrayType(QualType T)
+ const {
return dyn_cast_or_null<DependentSizedArrayType>(getAsArrayType(T));
}
/// getBaseElementType - Returns the innermost element type of an array type.
/// For example, will return "int" for int[m][n]
- QualType getBaseElementType(const ArrayType *VAT);
+ QualType getBaseElementType(const ArrayType *VAT) const;
/// getBaseElementType - Returns the innermost element type of a type
/// (which needn't actually be an array type).
- QualType getBaseElementType(QualType QT);
+ QualType getBaseElementType(QualType QT) const;
/// getConstantArrayElementCount - Returns number of constant array elements.
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const;
@@ -1179,30 +1266,30 @@ public:
/// this returns a pointer to a properly qualified element of the array.
///
/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
- QualType getArrayDecayedType(QualType T);
+ QualType getArrayDecayedType(QualType T) const;
/// getPromotedIntegerType - Returns the type that Promotable will
/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
/// integer type.
- QualType getPromotedIntegerType(QualType PromotableType);
+ QualType getPromotedIntegerType(QualType PromotableType) const;
/// \brief Whether this is a promotable bitfield reference according
/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
///
/// \returns the type this bit-field will promote to, or NULL if no
/// promotion occurs.
- QualType isPromotableBitField(Expr *E);
+ QualType isPromotableBitField(Expr *E) const;
/// getIntegerTypeOrder - Returns the highest ranked integer type:
/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
/// LHS < RHS, return -1.
- int getIntegerTypeOrder(QualType LHS, QualType RHS);
+ int getIntegerTypeOrder(QualType LHS, QualType RHS) const;
/// getFloatingTypeOrder - Compare the rank of the two specified floating
/// point types, ignoring the domain of the type (i.e. 'double' ==
/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
/// LHS < RHS, return -1.
- int getFloatingTypeOrder(QualType LHS, QualType RHS);
+ int getFloatingTypeOrder(QualType LHS, QualType RHS) const;
/// getFloatingTypeOfSizeWithinDomain - Returns a real floating
/// point or a complex type (based on typeDomain/typeSize).
@@ -1213,7 +1300,7 @@ public:
private:
// Helper for integer ordering
- unsigned getIntegerRank(Type* T);
+ unsigned getIntegerRank(const Type *T) const;
public:
@@ -1260,14 +1347,15 @@ public:
bool Unqualified = false);
QualType mergeFunctionTypes(QualType, QualType, bool OfBlockPointer=false,
bool Unqualified = false);
+ QualType mergeFunctionArgumentTypes(QualType, QualType,
+ bool OfBlockPointer=false,
+ bool Unqualified = false);
+ QualType mergeTransparentUnionType(QualType, QualType,
+ bool OfBlockPointer=false,
+ bool Unqualified = false);
QualType mergeObjCGCQualifiers(QualType, QualType);
- /// UsualArithmeticConversionsType - handles the various conversions
- /// that are common to binary operators (C99 6.3.1.8, C++ [expr]p9)
- /// and returns the result type of that conversion.
- QualType UsualArithmeticConversionsType(QualType lhs, QualType rhs);
-
void ResetObjCLayout(const ObjCContainerDecl *CD) {
ObjCLayouts[CD] = 0;
}
@@ -1278,7 +1366,7 @@ public:
// The width of an integer, as defined in C99 6.2.6.2. This is the number
// of bits in an integer type excluding any padding bits.
- unsigned getIntWidth(QualType T);
+ unsigned getIntWidth(QualType T) const;
// Per C99 6.2.5p6, for every signed integer type, there is a corresponding
// unsigned integer type. This method takes a signed type, and returns the
@@ -1303,7 +1391,7 @@ public:
/// MakeIntValue - Make an APSInt of the appropriate width and
/// signedness for the given \arg Value and integer \arg Type.
- llvm::APSInt MakeIntValue(uint64_t Value, QualType Type) {
+ llvm::APSInt MakeIntValue(uint64_t Value, QualType Type) const {
llvm::APSInt Res(getIntWidth(Type), !Type->isSignedIntegerType());
Res = Value;
return Res;
@@ -1314,12 +1402,23 @@ public:
/// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
ObjCCategoryImplDecl *getObjCImplementation(ObjCCategoryDecl *D);
+ /// \brief returns true if there is at lease one @implementation in TU.
+ bool AnyObjCImplementation() {
+ return !ObjCImpls.empty();
+ }
+
/// \brief Set the implementation of ObjCInterfaceDecl.
void setObjCImplementation(ObjCInterfaceDecl *IFaceD,
ObjCImplementationDecl *ImplD);
/// \brief Set the implementation of ObjCCategoryDecl.
void setObjCImplementation(ObjCCategoryDecl *CatD,
ObjCCategoryImplDecl *ImplD);
+
+ /// \brief Set the copy inialization expression of a block var decl.
+ void setBlockVarCopyInits(VarDecl*VD, Expr* Init);
+ /// \brief Get the copy initialization expression of VarDecl,or NULL if
+ /// none exists.
+ Expr *getBlockVarCopyInits(const VarDecl*VD);
/// \brief Allocate an uninitialized TypeSourceInfo.
///
@@ -1332,13 +1431,14 @@ public:
///
/// \param Size the size of the type info to create, or 0 if the size
/// should be calculated based on the type.
- TypeSourceInfo *CreateTypeSourceInfo(QualType T, unsigned Size = 0);
+ TypeSourceInfo *CreateTypeSourceInfo(QualType T, unsigned Size = 0) const;
/// \brief Allocate a TypeSourceInfo where all locations have been
/// initialized to a given location, which defaults to the empty
/// location.
TypeSourceInfo *
- getTrivialTypeSourceInfo(QualType T, SourceLocation Loc = SourceLocation());
+ getTrivialTypeSourceInfo(QualType T,
+ SourceLocation Loc = SourceLocation()) const;
TypeSourceInfo *getNullTypeSourceInfo() { return &NullTypeSourceInfo; }
@@ -1407,10 +1507,11 @@ private:
bool ExpandStructures,
const FieldDecl *Field,
bool OutermostType = false,
- bool EncodingProperty = false);
+ bool EncodingProperty = false) const;
- const ASTRecordLayout &getObjCLayout(const ObjCInterfaceDecl *D,
- const ObjCImplementationDecl *Impl);
+ const ASTRecordLayout &
+ getObjCLayout(const ObjCInterfaceDecl *D,
+ const ObjCImplementationDecl *Impl) const;
private:
/// \brief A set of deallocations that should be performed when the
@@ -1423,8 +1524,8 @@ private:
llvm::PointerIntPair<StoredDeclsMap*,1> LastSDM;
/// \brief A counter used to uniquely identify "blocks".
- unsigned int UniqueBlockByRefTypeID;
- unsigned int UniqueBlockParmTypeID;
+ mutable unsigned int UniqueBlockByRefTypeID;
+ mutable unsigned int UniqueBlockParmTypeID;
friend class DeclContext;
friend class DeclarationNameTable;
@@ -1469,7 +1570,7 @@ static inline Selector GetUnarySelector(const char* name, ASTContext& Ctx) {
/// @param Alignment The alignment of the allocated memory (if the underlying
/// allocator supports it).
/// @return The allocated memory. Could be NULL.
-inline void *operator new(size_t Bytes, clang::ASTContext &C,
+inline void *operator new(size_t Bytes, const clang::ASTContext &C,
size_t Alignment) throw () {
return C.Allocate(Bytes, Alignment);
}
@@ -1479,7 +1580,7 @@ inline void *operator new(size_t Bytes, clang::ASTContext &C,
/// invoking it directly; see the new operator for more details. This operator
/// is called implicitly by the compiler if a placement new expression using
/// the ASTContext throws in the object constructor.
-inline void operator delete(void *Ptr, clang::ASTContext &C, size_t)
+inline void operator delete(void *Ptr, const clang::ASTContext &C, size_t)
throw () {
C.Deallocate(Ptr);
}
@@ -1503,7 +1604,7 @@ inline void operator delete(void *Ptr, clang::ASTContext &C, size_t)
/// @param Alignment The alignment of the allocated memory (if the underlying
/// allocator supports it).
/// @return The allocated memory. Could be NULL.
-inline void *operator new[](size_t Bytes, clang::ASTContext& C,
+inline void *operator new[](size_t Bytes, const clang::ASTContext& C,
size_t Alignment = 8) throw () {
return C.Allocate(Bytes, Alignment);
}
@@ -1514,7 +1615,7 @@ inline void *operator new[](size_t Bytes, clang::ASTContext& C,
/// invoking it directly; see the new[] operator for more details. This operator
/// is called implicitly by the compiler if a placement new[] expression using
/// the ASTContext throws in the object constructor.
-inline void operator delete[](void *Ptr, clang::ASTContext &C, size_t)
+inline void operator delete[](void *Ptr, const clang::ASTContext &C, size_t)
throw () {
C.Deallocate(Ptr);
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h b/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h
index 7cbf3a5..1ab53b3 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h
@@ -15,7 +15,7 @@
namespace clang {
namespace diag {
enum {
-#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,CATEGORY) ENUM,
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,ACCESS,CATEGORY) ENUM,
#define ASTSTART
#include "clang/Basic/DiagnosticASTKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h b/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
index 9380058..b659ce7 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
@@ -45,13 +45,13 @@ namespace clang {
/// \brief The file managers we're importing to and from.
FileManager &ToFileManager, &FromFileManager;
-
- /// \brief The diagnostics object that we should use to emit diagnostics.
- Diagnostic &Diags;
+
+ /// \brief Whether to perform a minimal import.
+ bool Minimal;
/// \brief Mapping from the already-imported types in the "from" context
/// to the corresponding types in the "to" context.
- llvm::DenseMap<Type *, Type *> ImportedTypes;
+ llvm::DenseMap<const Type *, const Type *> ImportedTypes;
/// \brief Mapping from the already-imported declarations in the "from"
/// context to the corresponding declarations in the "to" context.
@@ -63,7 +63,7 @@ namespace clang {
/// \brief Mapping from the already-imported FileIDs in the "from" source
/// manager to the corresponding FileIDs in the "to" source manager.
- llvm::DenseMap<unsigned, FileID> ImportedFileIDs;
+ llvm::DenseMap<FileID, FileID> ImportedFileIDs;
/// \brief Imported, anonymous tag declarations that are missing their
/// corresponding typedefs.
@@ -74,12 +74,29 @@ namespace clang {
NonEquivalentDeclSet NonEquivalentDecls;
public:
- ASTImporter(Diagnostic &Diags,
- ASTContext &ToContext, FileManager &ToFileManager,
- ASTContext &FromContext, FileManager &FromFileManager);
+ /// \brief Create a new AST importer.
+ ///
+ /// \param ToContext The context we'll be importing into.
+ ///
+ /// \param ToFileManager The file manager we'll be importing into.
+ ///
+ /// \param FromContext The context we'll be importing from.
+ ///
+ /// \param FromFileManager The file manager we'll be importing into.
+ ///
+ /// \param MinimalImport If true, the importer will attempt to import
+ /// as little as it can, e.g., by importing declarations as forward
+ /// declarations that can be completed at a later point.
+ ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
+ ASTContext &FromContext, FileManager &FromFileManager,
+ bool MinimalImport);
virtual ~ASTImporter();
+ /// \brief Whether the importer will perform a minimal import, creating
+ /// to-be-completed forward declarations when possible.
+ bool isMinimalImport() const { return Minimal; }
+
/// \brief Import the given type from the "from" context into the "to"
/// context.
///
@@ -129,6 +146,10 @@ namespace clang {
/// context, or NULL if an error occurred.
NestedNameSpecifier *Import(NestedNameSpecifier *FromNNS);
+ /// \brief Import the goven template name from the "from" context into the
+ /// "to" context.
+ TemplateName Import(TemplateName From);
+
/// \brief Import the given source location from the "from" context into
/// the "to" context.
///
@@ -154,7 +175,7 @@ namespace clang {
/// into the "to" context.
///
/// \returns the equivalent identifier in the "to" context.
- IdentifierInfo *Import(IdentifierInfo *FromId);
+ IdentifierInfo *Import(const IdentifierInfo *FromId);
/// \brief Import the given Objective-C selector from the "from"
/// context into the "to" context.
@@ -169,6 +190,12 @@ namespace clang {
/// context.
FileID Import(FileID);
+ /// \brief Import the definition of the given declaration, including all of
+ /// the declarations it contains.
+ ///
+ /// This routine is intended to be used
+ void ImportDefinition(Decl *From);
+
/// \brief Cope with a name conflict when importing a declaration into the
/// given context.
///
@@ -212,9 +239,6 @@ namespace clang {
/// \brief Retrieve the file manager that AST nodes are being imported from.
FileManager &getFromFileManager() const { return FromFileManager; }
-
- /// \brief Retrieve the diagnostic formatter.
- Diagnostic &getDiags() const { return Diags; }
/// \brief Report a diagnostic in the "to" context.
DiagnosticBuilder ToDiag(SourceLocation Loc, unsigned DiagID);
@@ -228,12 +252,13 @@ namespace clang {
/// \brief Note that we have imported the "from" declaration by mapping it
/// to the (potentially-newly-created) "to" declaration.
///
- /// \returns \p To
- Decl *Imported(Decl *From, Decl *To);
+ /// Subclasses can override this function to observe all of the \c From ->
+ /// \c To declaration mappings as they are imported.
+ virtual Decl *Imported(Decl *From, Decl *To);
/// \brief Determine whether the given types are structurally
/// equivalent.
- bool IsStructurallyEquivalent(QualType From, QualType To);
+ bool IsStructurallyEquivalent(QualType From, QualType To);
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h b/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
new file mode 100644
index 0000000..01e6180
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
@@ -0,0 +1,48 @@
+//===--- ASTMutationListener.h - AST Mutation Interface --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTMutationListener interface.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_ASTMUTATIONLISTENER_H
+#define LLVM_CLANG_AST_ASTMUTATIONLISTENER_H
+
+namespace clang {
+ class Decl;
+ class DeclContext;
+ class TagDecl;
+ class CXXRecordDecl;
+ class ClassTemplateDecl;
+ class ClassTemplateSpecializationDecl;
+
+/// \brief An abstract interface that should be implemented by listeners
+/// that want to be notified when an AST entity gets modified after its
+/// initial creation.
+class ASTMutationListener {
+public:
+ virtual ~ASTMutationListener();
+
+ /// \brief A new TagDecl definition was completed.
+ virtual void CompletedTagDefinition(const TagDecl *D) { }
+
+ /// \brief A new declaration with name has been added to a DeclContext.
+ virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D) {}
+
+ /// \brief An implicit member was added after the definition was completed.
+ virtual void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {}
+
+ /// \brief A template specialization (or partial one) was added to the
+ /// template declaration.
+ virtual void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+ const ClassTemplateSpecializationDecl *D) {}
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Attr.h b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
index 62ca49f..67968fd 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Attr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
@@ -36,19 +36,19 @@ namespace clang {
}
// Defined in ASTContext.h
-void *operator new(size_t Bytes, clang::ASTContext &C,
+void *operator new(size_t Bytes, const clang::ASTContext &C,
size_t Alignment = 16) throw ();
// FIXME: Being forced to not have a default argument here due to redeclaration
// rules on default arguments sucks
-void *operator new[](size_t Bytes, clang::ASTContext &C,
+void *operator new[](size_t Bytes, const clang::ASTContext &C,
size_t Alignment) throw ();
// It is good practice to pair new/delete operators. Also, MSVC gives many
// warnings if a matching delete overload is not declared, even though the
// throw() spec guarantees it will not be implicitly called.
-void operator delete(void *Ptr, clang::ASTContext &C, size_t)
+void operator delete(void *Ptr, const clang::ASTContext &C, size_t)
throw ();
-void operator delete[](void *Ptr, clang::ASTContext &C, size_t)
+void operator delete[](void *Ptr, const clang::ASTContext &C, size_t)
throw ();
namespace clang {
@@ -58,9 +58,10 @@ class Attr {
private:
SourceLocation Loc;
unsigned AttrKind : 16;
- bool Inherited : 1;
protected:
+ bool Inherited : 1;
+
virtual ~Attr();
void* operator new(size_t bytes) throw() {
@@ -88,10 +89,6 @@ protected:
public:
- /// \brief Whether this attribute should be merged to new
- /// declarations.
- virtual bool isMerged() const { return true; }
-
attr::Kind getKind() const {
return static_cast<attr::Kind>(AttrKind);
}
@@ -100,7 +97,6 @@ public:
void setLocation(SourceLocation L) { Loc = L; }
bool isInherited() const { return Inherited; }
- void setInherited(bool I) { Inherited = I; }
// Clone this attribute.
virtual Attr* clone(ASTContext &C) const = 0;
@@ -109,6 +105,21 @@ public:
static bool classof(const Attr *) { return true; }
};
+class InheritableAttr : public Attr {
+protected:
+ InheritableAttr(attr::Kind AK, SourceLocation L)
+ : Attr(AK, L) {}
+
+public:
+ void setInherited(bool I) { Inherited = I; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Attr *A) {
+ return A->getKind() <= attr::LAST_INHERITABLE;
+ }
+ static bool classof(const InheritableAttr *) { return true; }
+};
+
#include "clang/AST/Attrs.inc"
/// AttrVec - A vector of Attr, which is how they are stored on the AST.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h b/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
index 5a84e40..2d30cb3 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
@@ -20,6 +20,7 @@
#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include <list>
#include <map>
@@ -359,7 +360,11 @@ public:
/// subobjects of that type.
class CXXFinalOverriderMap
: public llvm::DenseMap<const CXXMethodDecl *, OverridingMethods> { };
-
+
+/// \brief A set of all the primary bases for a class.
+class CXXIndirectPrimaryBaseSet
+ : public llvm::SmallSet<const CXXRecordDecl*, 32> { };
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
index dad4dfc..4d7fcfd 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
@@ -66,7 +66,16 @@ public:
/// \brief Retrieve the underlying type pointer, which refers to a
/// canonical type.
- T *getTypePtr() const { return cast_or_null<T>(Stored.getTypePtr()); }
+ ///
+ /// The underlying pointer must not be NULL.
+ const T *getTypePtr() const { return cast<T>(Stored.getTypePtr()); }
+
+ /// \brief Retrieve the underlying type pointer, which refers to a
+ /// canonical type, or NULL.
+ ///
+ const T *getTypePtrOrNull() const {
+ return cast_or_null<T>(Stored.getTypePtrOrNull());
+ }
/// \brief Implicit conversion to a qualified type.
operator QualType() const { return Stored; }
@@ -78,6 +87,8 @@ public:
return Stored.isNull();
}
+ SplitQualType split() const { return Stored.split(); }
+
/// \brief Retrieve a canonical type pointer with a different static type,
/// upcasting or downcasting as needed.
///
@@ -216,7 +227,7 @@ protected:
public:
/// \brief Retrieve the pointer to the underlying Type
- T* getTypePtr() const { return Stored.getTypePtr(); }
+ const T *getTypePtr() const { return Stored.getTypePtr(); }
/// \brief Implicit conversion to the underlying pointer.
///
@@ -225,7 +236,7 @@ public:
/// @code
/// if (CanQual<PointerType> Ptr = T->getAs<PointerType>()) { ... }
/// @endcode
- operator const T*() const { return this->Stored.getTypePtr(); }
+ operator const T*() const { return this->Stored.getTypePtrOrNull(); }
/// \brief Try to convert the given canonical type to a specific structural
/// type.
@@ -336,7 +347,7 @@ namespace llvm {
/// to return smart pointer (proxies?).
template<typename T>
struct simplify_type<const ::clang::CanQual<T> > {
- typedef T* SimpleType;
+ typedef const T *SimpleType;
static SimpleType getSimplifiedValue(const ::clang::CanQual<T> &Val) {
return Val.getTypePtr();
}
@@ -630,7 +641,6 @@ struct CanProxyAdaptor<RecordType> : public CanProxyBase<RecordType> {
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(RecordDecl *, getDecl)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasConstFields)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(unsigned, getAddressSpace)
};
template<>
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h b/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h
index 0bb4b76..cf909e8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h
@@ -14,7 +14,9 @@
#ifndef LLVM_CLANG_AST_CHARUNITS_H
#define LLVM_CLANG_AST_CHARUNITS_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
namespace clang {
@@ -131,12 +133,24 @@ namespace clang {
CharUnits operator- (const CharUnits &Other) const {
return CharUnits(Quantity - Other.Quantity);
}
+ CharUnits operator- () const {
+ return CharUnits(-Quantity);
+ }
+
// Conversions.
/// getQuantity - Get the raw integer representation of this quantity.
QuantityType getQuantity() const { return Quantity; }
+ /// RoundUpToAlignment - Returns the next integer (mod 2**64) that is
+ /// greater than or equal to this quantity and is a multiple of \arg
+ /// Align. Align must be non-zero.
+ CharUnits RoundUpToAlignment(const CharUnits &Align) {
+ return CharUnits(llvm::RoundUpToAlignment(Quantity,
+ Align.Quantity));
+ }
+
}; // class CharUnit
} // namespace clang
@@ -146,4 +160,38 @@ inline clang::CharUnits operator* (clang::CharUnits::QuantityType Scale,
return CU * Scale;
}
+namespace llvm {
+
+template<> struct DenseMapInfo<clang::CharUnits> {
+ static clang::CharUnits getEmptyKey() {
+ clang::CharUnits::QuantityType Quantity =
+ DenseMapInfo<clang::CharUnits::QuantityType>::getEmptyKey();
+
+ return clang::CharUnits::fromQuantity(Quantity);
+ }
+
+ static clang::CharUnits getTombstoneKey() {
+ clang::CharUnits::QuantityType Quantity =
+ DenseMapInfo<clang::CharUnits::QuantityType>::getTombstoneKey();
+
+ return clang::CharUnits::fromQuantity(Quantity);
+ }
+
+ static unsigned getHashValue(const clang::CharUnits &CU) {
+ clang::CharUnits::QuantityType Quantity = CU.getQuantity();
+ return DenseMapInfo<clang::CharUnits::QuantityType>::getHashValue(Quantity);
+ }
+
+ static bool isEqual(const clang::CharUnits &LHS,
+ const clang::CharUnits &RHS) {
+ return LHS == RHS;
+ }
+};
+
+template <> struct isPodLike<clang::CharUnits> {
+ static const bool value = true;
+};
+
+} // end namespace llvm
+
#endif // LLVM_CLANG_AST_CHARUNITS_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Decl.h b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
index 6749255..ee515da 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
@@ -36,6 +36,7 @@ class FunctionTemplateSpecializationInfo;
class DependentFunctionTemplateSpecializationInfo;
class TypeLoc;
class UnresolvedSetImpl;
+class LabelStmt;
/// \brief A container of type source information.
///
@@ -196,9 +197,86 @@ public:
/// determine whether it's an instance member of its class.
bool isCXXInstanceMember() const;
+ class LinkageInfo {
+ Linkage linkage_;
+ Visibility visibility_;
+ bool explicit_;
+
+ public:
+ LinkageInfo() : linkage_(ExternalLinkage), visibility_(DefaultVisibility),
+ explicit_(false) {}
+ LinkageInfo(Linkage L, Visibility V, bool E)
+ : linkage_(L), visibility_(V), explicit_(E) {}
+
+ static LinkageInfo external() {
+ return LinkageInfo();
+ }
+ static LinkageInfo internal() {
+ return LinkageInfo(InternalLinkage, DefaultVisibility, false);
+ }
+ static LinkageInfo uniqueExternal() {
+ return LinkageInfo(UniqueExternalLinkage, DefaultVisibility, false);
+ }
+ static LinkageInfo none() {
+ return LinkageInfo(NoLinkage, DefaultVisibility, false);
+ }
+
+ Linkage linkage() const { return linkage_; }
+ Visibility visibility() const { return visibility_; }
+ bool visibilityExplicit() const { return explicit_; }
+
+ void setLinkage(Linkage L) { linkage_ = L; }
+ void setVisibility(Visibility V) { visibility_ = V; }
+ void setVisibility(Visibility V, bool E) { visibility_ = V; explicit_ = E; }
+ void setVisibility(LinkageInfo Other) {
+ setVisibility(Other.visibility(), Other.visibilityExplicit());
+ }
+
+ void mergeLinkage(Linkage L) {
+ setLinkage(minLinkage(linkage(), L));
+ }
+ void mergeLinkage(LinkageInfo Other) {
+ setLinkage(minLinkage(linkage(), Other.linkage()));
+ }
+
+ void mergeVisibility(Visibility V) {
+ setVisibility(minVisibility(visibility(), V));
+ }
+ void mergeVisibility(Visibility V, bool E) {
+ setVisibility(minVisibility(visibility(), V), visibilityExplicit() || E);
+ }
+ void mergeVisibility(LinkageInfo Other) {
+ mergeVisibility(Other.visibility(), Other.visibilityExplicit());
+ }
+
+ void merge(LinkageInfo Other) {
+ mergeLinkage(Other);
+ mergeVisibility(Other);
+ }
+ void merge(std::pair<Linkage,Visibility> LV) {
+ mergeLinkage(LV.first);
+ mergeVisibility(LV.second);
+ }
+
+ friend LinkageInfo merge(LinkageInfo L, LinkageInfo R) {
+ L.merge(R);
+ return L;
+ }
+ };
+
/// \brief Determine what kind of linkage this entity has.
Linkage getLinkage() const;
+ /// \brief Determines the visibility of this entity.
+ Visibility getVisibility() const { return getLinkageAndVisibility().visibility(); }
+
+ /// \brief Determines the linkage and visibility of this entity.
+ LinkageInfo getLinkageAndVisibility() const;
+
+ /// \brief Clear the linkage cache in response to a change
+ /// to the declaration.
+ void ClearLinkageCache();
+
/// \brief Looks through UsingDecls and ObjCCompatibleAliasDecls for
/// the underlying named decl.
NamedDecl *getUnderlyingDecl();
@@ -217,6 +295,29 @@ inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
return OS;
}
+/// LabelDecl - Represents the declaration of a label. Labels also have a
+/// corresponding LabelStmt, which indicates the position that the label was
+/// defined at. For normal labels, the location of the decl is the same as the
+/// location of the statement. For GNU local labels (__label__), the decl
+/// location is where the __label__ is.
+class LabelDecl : public NamedDecl {
+ LabelStmt *TheStmt;
+ LabelDecl(DeclContext *DC, SourceLocation L, IdentifierInfo *II, LabelStmt *S)
+ : NamedDecl(Label, DC, L, II), TheStmt(S) {}
+
+public:
+ static LabelDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *II);
+
+ LabelStmt *getStmt() const { return TheStmt; }
+ void setStmt(LabelStmt *T) { TheStmt = T; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const LabelDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Label; }
+};
+
/// NamespaceDecl - Represent a C++ namespace.
class NamespaceDecl : public NamedDecl, public DeclContext {
bool IsInline : 1;
@@ -232,7 +333,7 @@ class NamespaceDecl : public NamedDecl, public DeclContext {
// NextNamespace points to the next extended declaration.
// OrigNamespace points to the original namespace declaration.
// OrigNamespace of the first namespace decl points to its anonymous namespace
- NamespaceDecl *NextNamespace;
+ LazyDeclPtr NextNamespace;
/// \brief A pointer to either the original namespace definition for
/// this namespace (if the boolean value is false) or the anonymous
@@ -250,7 +351,7 @@ class NamespaceDecl : public NamedDecl, public DeclContext {
NamespaceDecl(DeclContext *DC, SourceLocation L, IdentifierInfo *Id)
: NamedDecl(Namespace, DC, L, Id), DeclContext(Namespace),
- IsInline(false), NextNamespace(0), OrigOrAnonNamespace(0, true) { }
+ IsInline(false), NextNamespace(), OrigOrAnonNamespace(0, true) { }
public:
static NamespaceDecl *Create(ASTContext &C, DeclContext *DC,
@@ -281,8 +382,10 @@ public:
/// \brief Return the next extended namespace declaration or null if there
/// is none.
- NamespaceDecl *getNextNamespace() { return NextNamespace; }
- const NamespaceDecl *getNextNamespace() const { return NextNamespace; }
+ NamespaceDecl *getNextNamespace();
+ const NamespaceDecl *getNextNamespace() const {
+ return const_cast<NamespaceDecl *>(this)->getNextNamespace();
+ }
/// \brief Set the next extended namespace declaration.
void setNextNamespace(NamespaceDecl *ND) { NextNamespace = ND; }
@@ -331,9 +434,9 @@ public:
SourceLocation getLBracLoc() const { return LBracLoc; }
SourceLocation getRBracLoc() const { return RBracLoc; }
- void setLBracLoc(SourceLocation LBrace) { LBracLoc = LBrace; }
- void setRBracLoc(SourceLocation RBrace) { RBracLoc = RBrace; }
-
+ void setLBracLoc(SourceLocation L) { LBracLoc = L; }
+ void setRBracLoc(SourceLocation R) { RBracLoc = R; }
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const NamespaceDecl *D) { return true; }
@@ -471,6 +574,9 @@ public:
static bool classofKind(Kind K) {
return K >= firstDeclarator && K <= lastDeclarator;
}
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
};
/// \brief Structure used to store a statement, the constant value to
@@ -545,15 +651,21 @@ private:
/// \brief Whether this local variable could be allocated in the return
/// slot of its function, enabling the named return value optimization (NRVO).
bool NRVOVariable : 1;
+
+ /// \brief Whether this variable has a deduced C++0x auto type for which we're
+ /// currently parsing the initializer.
+ bool ParsingAutoInit : 1;
friend class StmtIteratorBase;
+ friend class ASTDeclReader;
+
protected:
VarDecl(Kind DK, DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo, StorageClass SC,
StorageClass SCAsWritten)
: DeclaratorDecl(DK, DC, L, Id, T, TInfo), Init(),
ThreadSpecified(false), HasCXXDirectInit(false),
- ExceptionVar(false), NRVOVariable(false) {
+ ExceptionVar(false), NRVOVariable(false), ParsingAutoInit(false) {
SClass = SC;
SClassAsWritten = SCAsWritten;
}
@@ -582,10 +694,7 @@ public:
StorageClass getStorageClassAsWritten() const {
return (StorageClass) SClassAsWritten;
}
- void setStorageClass(StorageClass SC) {
- assert(isLegalForVariable(SC));
- SClass = SC;
- }
+ void setStorageClass(StorageClass SC);
void setStorageClassAsWritten(StorageClass SC) {
assert(isLegalForVariable(SC));
SClassAsWritten = SC;
@@ -630,13 +739,13 @@ public:
/// external, C linkage.
bool isExternC() const;
- /// isBlockVarDecl - Returns true for local variable declarations. Note that
- /// this includes static variables inside of functions. It also includes
- /// variables inside blocks.
+ /// isLocalVarDecl - Returns true for local variable declarations
+ /// other than parameters. Note that this includes static variables
+ /// inside of functions. It also includes variables inside blocks.
///
/// void foo() { int x; static int y; extern int z; }
///
- bool isBlockVarDecl() const {
+ bool isLocalVarDecl() const {
if (getKind() != Decl::Var)
return false;
if (const DeclContext *DC = getDeclContext())
@@ -644,8 +753,8 @@ public:
return false;
}
- /// isFunctionOrMethodVarDecl - Similar to isBlockVarDecl, but excludes
- /// variables declared in blocks.
+ /// isFunctionOrMethodVarDecl - Similar to isLocalVarDecl, but
+ /// excludes variables declared in blocks.
bool isFunctionOrMethodVarDecl() const {
if (getKind() != Decl::Var)
return false;
@@ -683,6 +792,10 @@ public:
/// definition.
DefinitionKind isThisDeclarationADefinition() const;
+ /// \brief Check whether this variable is defined in this
+ /// translation unit.
+ DefinitionKind hasDefinition() const;
+
/// \brief Get the tentative definition that acts as the real definition in
/// a TU. Returns null if there is a proper definition available.
VarDecl *getActingDefinition();
@@ -733,7 +846,7 @@ public:
const Expr *getAnyInitializer(const VarDecl *&D) const;
bool hasInit() const {
- return !Init.isNull();
+ return !Init.isNull() && (Init.is<Stmt *>() || Init.is<EvaluatedStmt *>());
}
const Expr *getInit() const {
if (Init.isNull())
@@ -776,6 +889,18 @@ public:
void setInit(Expr *I);
+ /// \brief Check whether we are in the process of parsing an initializer
+ /// needed to deduce the type of this variable.
+ bool isParsingAutoInit() const {
+ return ParsingAutoInit;
+ }
+
+ /// \brief Note whether we are currently parsing an initializer needed to
+ /// deduce the type of this variable.
+ void setParsingAutoInit(bool P) {
+ ParsingAutoInit = P;
+ }
+
EvaluatedStmt *EnsureEvaluatedStmt() const {
EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>();
if (!Eval) {
@@ -928,7 +1053,9 @@ class ImplicitParamDecl : public VarDecl {
protected:
ImplicitParamDecl(Kind DK, DeclContext *DC, SourceLocation L,
IdentifierInfo *Id, QualType Tw)
- : VarDecl(DK, DC, L, Id, Tw, /*TInfo=*/0, SC_None, SC_None) {}
+ : VarDecl(DK, DC, L, Id, Tw, /*TInfo=*/0, SC_None, SC_None) {
+ setImplicit();
+ }
public:
static ImplicitParamDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id,
@@ -1046,6 +1173,10 @@ public:
return getType();
}
+ /// \brief Determine whether this parameter is actually a function
+ /// parameter pack.
+ bool isParameterPack() const;
+
/// setOwningFunction - Sets the function declaration that owns this
/// ParmVarDecl. Since ParmVarDecls are often created before the
/// FunctionDecls that own them, this routine is required to update
@@ -1096,13 +1227,13 @@ private:
unsigned SClass : 2;
unsigned SClassAsWritten : 2;
bool IsInline : 1;
+ bool IsInlineSpecified : 1;
bool IsVirtualAsWritten : 1;
bool IsPure : 1;
bool HasInheritedPrototype : 1;
bool HasWrittenPrototype : 1;
bool IsDeleted : 1;
bool IsTrivial : 1; // sunk from CXXMethodDecl
- bool IsCopyAssignment : 1; // sunk from CXXMethodDecl
bool HasImplicitReturnZero : 1;
/// \brief End part of this FunctionDecl's source range.
@@ -1136,19 +1267,54 @@ private:
/// declaration name embedded in the DeclaratorDecl base class.
DeclarationNameLoc DNLoc;
+ /// \brief Specify that this function declaration is actually a function
+ /// template specialization.
+ ///
+ /// \param C the ASTContext.
+ ///
+ /// \param Template the function template that this function template
+ /// specialization specializes.
+ ///
+ /// \param TemplateArgs the template arguments that produced this
+ /// function template specialization from the template.
+ ///
+ /// \param InsertPos If non-NULL, the position in the function template
+ /// specialization set where the function template specialization data will
+ /// be inserted.
+ ///
+ /// \param TSK the kind of template specialization this is.
+ ///
+ /// \param TemplateArgsAsWritten location info of template arguments.
+ ///
+ /// \param PointOfInstantiation point at which the function template
+ /// specialization was first instantiated.
+ void setFunctionTemplateSpecialization(ASTContext &C,
+ FunctionTemplateDecl *Template,
+ const TemplateArgumentList *TemplateArgs,
+ void *InsertPos,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation PointOfInstantiation);
+
+ /// \brief Specify that this record is an instantiation of the
+ /// member function FD.
+ void setInstantiationOfMemberFunction(ASTContext &C, FunctionDecl *FD,
+ TemplateSpecializationKind TSK);
+
+ void setParams(ASTContext &C, ParmVarDecl **NewParamInfo, unsigned NumParams);
+
protected:
FunctionDecl(Kind DK, DeclContext *DC, const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
- StorageClass S, StorageClass SCAsWritten, bool isInline)
+ StorageClass S, StorageClass SCAsWritten, bool isInlineSpecified)
: DeclaratorDecl(DK, DC, NameInfo.getLoc(), NameInfo.getName(), T, TInfo),
DeclContext(DK),
ParamInfo(0), Body(),
- SClass(S), SClassAsWritten(SCAsWritten), IsInline(isInline),
+ SClass(S), SClassAsWritten(SCAsWritten),
+ IsInline(isInlineSpecified), IsInlineSpecified(isInlineSpecified),
IsVirtualAsWritten(false), IsPure(false), HasInheritedPrototype(false),
HasWrittenPrototype(true), IsDeleted(false), IsTrivial(false),
- IsCopyAssignment(false),
- HasImplicitReturnZero(false),
- EndRangeLoc(NameInfo.getEndLoc()),
+ HasImplicitReturnZero(false), EndRangeLoc(NameInfo.getEndLoc()),
TemplateOrSpecialization(),
DNLoc(NameInfo.getInfo()) {}
@@ -1169,11 +1335,11 @@ public:
TypeSourceInfo *TInfo,
StorageClass S = SC_None,
StorageClass SCAsWritten = SC_None,
- bool isInline = false,
+ bool isInlineSpecified = false,
bool hasWrittenPrototype = true) {
DeclarationNameInfo NameInfo(N, L);
return FunctionDecl::Create(C, DC, NameInfo, T, TInfo, S, SCAsWritten,
- isInline, hasWrittenPrototype);
+ isInlineSpecified, hasWrittenPrototype);
}
static FunctionDecl *Create(ASTContext &C, DeclContext *DC,
@@ -1181,7 +1347,7 @@ public:
QualType T, TypeSourceInfo *TInfo,
StorageClass S = SC_None,
StorageClass SCAsWritten = SC_None,
- bool isInline = false,
+ bool isInlineSpecified = false,
bool hasWrittenPrototype = true);
DeclarationNameInfo getNameInfo() const {
@@ -1246,7 +1412,7 @@ public:
/// Whether this virtual function is pure, i.e. makes the containing class
/// abstract.
bool isPure() const { return IsPure; }
- void setPure(bool P = true) { IsPure = P; }
+ void setPure(bool P = true);
/// Whether this function is "trivial" in some specialized C++ senses.
/// Can only be true for default constructors, copy constructors,
@@ -1255,9 +1421,6 @@ public:
bool isTrivial() const { return IsTrivial; }
void setTrivial(bool IT) { IsTrivial = IT; }
- bool isCopyAssignment() const { return IsCopyAssignment; }
- void setCopyAssignment(bool CA) { IsCopyAssignment = CA; }
-
/// Whether falling off this function implicitly returns null/zero.
/// If a more specific implicit return value is required, front-ends
/// should synthesize the appropriate return statements.
@@ -1273,7 +1436,6 @@ public:
}
bool hasWrittenPrototype() const { return HasWrittenPrototype; }
- void setHasWrittenPrototype(bool P) { HasWrittenPrototype = P; }
/// \brief Whether this function inherited its prototype from a
/// previous declaration.
@@ -1343,7 +1505,9 @@ public:
assert(i < getNumParams() && "Illegal param #");
return ParamInfo[i];
}
- void setParams(ParmVarDecl **NewParamInfo, unsigned NumParams);
+ void setParams(ParmVarDecl **NewParamInfo, unsigned NumParams) {
+ setParams(getASTContext(), NewParamInfo, NumParams);
+ }
/// getMinRequiredArguments - Returns the minimum number of arguments
/// needed to call this function. This may be fewer than the number of
@@ -1361,31 +1525,32 @@ public:
}
StorageClass getStorageClass() const { return StorageClass(SClass); }
- void setStorageClass(StorageClass SC) {
- assert(isLegalForFunction(SC));
- SClass = SC;
- }
+ void setStorageClass(StorageClass SC);
StorageClass getStorageClassAsWritten() const {
return StorageClass(SClassAsWritten);
}
- void setStorageClassAsWritten(StorageClass SC) {
- assert(isLegalForFunction(SC));
- SClassAsWritten = SC;
- }
/// \brief Determine whether the "inline" keyword was specified for this
/// function.
- bool isInlineSpecified() const { return IsInline; }
+ bool isInlineSpecified() const { return IsInlineSpecified; }
/// Set whether the "inline" keyword was specified for this function.
- void setInlineSpecified(bool I) { IsInline = I; }
+ void setInlineSpecified(bool I) {
+ IsInlineSpecified = I;
+ IsInline = I;
+ }
+
+ /// Flag that this function is implicitly inline.
+ void setImplicitlyInline() {
+ IsInline = true;
+ }
/// \brief Determine whether this function should be inlined, because it is
/// either marked "inline" or is a member function of a C++ class that
/// was defined in the class body.
bool isInlined() const;
-
+
bool isInlineDefinitionExternallyVisible() const;
/// isOverloadedOperator - Whether this function declaration
@@ -1432,7 +1597,9 @@ public:
/// \brief Specify that this record is an instantiation of the
/// member function FD.
void setInstantiationOfMemberFunction(FunctionDecl *FD,
- TemplateSpecializationKind TSK);
+ TemplateSpecializationKind TSK) {
+ setInstantiationOfMemberFunction(getASTContext(), FD, TSK);
+ }
/// \brief Retrieves the function template that is described by this
/// function declaration.
@@ -1526,43 +1693,11 @@ public:
void *InsertPos,
TemplateSpecializationKind TSK = TSK_ImplicitInstantiation,
const TemplateArgumentListInfo *TemplateArgsAsWritten = 0,
- SourceLocation PointOfInstantiation = SourceLocation());
-
- /// \brief Specify that this function declaration is actually a function
- /// template specialization.
- ///
- /// \param Template the function template that this function template
- /// specialization specializes.
- ///
- /// \param NumTemplateArgs number of template arguments that produced this
- /// function template specialization from the template.
- ///
- /// \param TemplateArgs array of template arguments that produced this
- /// function template specialization from the template.
- ///
- /// \param TSK the kind of template specialization this is.
- ///
- /// \param NumTemplateArgsAsWritten number of template arguments that produced
- /// this function template specialization from the template.
- ///
- /// \param TemplateArgsAsWritten array of location info for the template
- /// arguments.
- ///
- /// \param LAngleLoc location of left angle token.
- ///
- /// \param RAngleLoc location of right angle token.
- ///
- /// \param PointOfInstantiation point at which the function template
- /// specialization was first instantiated.
- void setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
- unsigned NumTemplateArgs,
- const TemplateArgument *TemplateArgs,
- TemplateSpecializationKind TSK,
- unsigned NumTemplateArgsAsWritten,
- TemplateArgumentLoc *TemplateArgsAsWritten,
- SourceLocation LAngleLoc,
- SourceLocation RAngleLoc,
- SourceLocation PointOfInstantiation);
+ SourceLocation PointOfInstantiation = SourceLocation()) {
+ setFunctionTemplateSpecialization(getASTContext(), Template, TemplateArgs,
+ InsertPos, TSK, TemplateArgsAsWritten,
+ PointOfInstantiation);
+ }
/// \brief Specifies that this function declaration is actually a
/// dependent function template specialization.
@@ -1620,19 +1755,26 @@ public:
class FieldDecl : public DeclaratorDecl {
// FIXME: This can be packed into the bitfields in Decl.
bool Mutable : 1;
+ mutable unsigned CachedFieldIndex : 31;
+
Expr *BitWidth;
protected:
FieldDecl(Kind DK, DeclContext *DC, SourceLocation L,
IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
Expr *BW, bool Mutable)
- : DeclaratorDecl(DK, DC, L, Id, T, TInfo), Mutable(Mutable), BitWidth(BW) {
+ : DeclaratorDecl(DK, DC, L, Id, T, TInfo),
+ Mutable(Mutable), CachedFieldIndex(0), BitWidth(BW) {
}
public:
- static FieldDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L,
- IdentifierInfo *Id, QualType T,
+ static FieldDecl *Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, Expr *BW, bool Mutable);
+ /// getFieldIndex - Returns the index of this field within its record,
+ /// as appropriate for passing to ASTRecordLayout::getFieldOffset.
+ unsigned getFieldIndex() const;
+
/// isMutable - Determines whether this field is mutable (C++ only).
bool isMutable() const { return Mutable; }
@@ -1707,6 +1849,45 @@ public:
friend class StmtIteratorBase;
};
+/// IndirectFieldDecl - An instance of this class is created to represent a
+/// field injected from an anonymous union/struct into the parent scope.
+/// IndirectFieldDecl are always implicit.
+class IndirectFieldDecl : public ValueDecl {
+ NamedDecl **Chaining;
+ unsigned ChainingSize;
+
+ IndirectFieldDecl(DeclContext *DC, SourceLocation L,
+ DeclarationName N, QualType T,
+ NamedDecl **CH, unsigned CHS)
+ : ValueDecl(IndirectField, DC, L, N, T), Chaining(CH), ChainingSize(CHS) {}
+
+public:
+ static IndirectFieldDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ QualType T, NamedDecl **CH, unsigned CHS);
+
+ typedef NamedDecl * const *chain_iterator;
+ chain_iterator chain_begin() const { return Chaining; }
+ chain_iterator chain_end() const { return Chaining+ChainingSize; }
+
+ unsigned getChainingSize() const { return ChainingSize; }
+
+ FieldDecl *getAnonField() const {
+ assert(ChainingSize >= 2);
+ return cast<FieldDecl>(Chaining[ChainingSize - 1]);
+ }
+
+ VarDecl *getVarDecl() const {
+ assert(ChainingSize >= 2);
+ return dyn_cast<VarDecl>(*chain_begin());
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const IndirectFieldDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == IndirectField; }
+ friend class ASTDeclReader;
+};
/// TypeDecl - Represents a declaration of a type.
///
@@ -1715,7 +1896,7 @@ class TypeDecl : public NamedDecl {
/// this TypeDecl. It is a cache maintained by
/// ASTContext::getTypedefType, ASTContext::getTagDeclType, and
/// ASTContext::getTemplateTypeParmType, and TemplateTypeParmDecl.
- mutable Type *TypeForDecl;
+ mutable const Type *TypeForDecl;
friend class ASTContext;
friend class DeclContext;
friend class TagDecl;
@@ -1729,8 +1910,8 @@ protected:
public:
// Low-level accessor
- Type *getTypeForDecl() const { return TypeForDecl; }
- void setTypeForDecl(Type *TD) { TypeForDecl = TD; }
+ const Type *getTypeForDecl() const { return TypeForDecl; }
+ void setTypeForDecl(const Type *TD) { TypeForDecl = TD; }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -1820,6 +2001,19 @@ protected:
unsigned NumPositiveBits : 8;
unsigned NumNegativeBits : 8;
+ /// IsScoped - True if this tag declaration is a scoped enumeration. Only
+ /// possible in C++0x mode.
+ bool IsScoped : 1;
+ /// IsScopedUsingClassTag - If this tag declaration is a scoped enum,
+ /// then this is true if the scoped enum was declared using the class
+ /// tag, false if it was declared with the struct tag. No meaning is
+ /// associated if this tag declaration is not a scoped enum.
+ bool IsScopedUsingClassTag : 1;
+
+ /// IsFixed - True if this is an enumeration with fixed underlying type. Only
+ /// possible in C++0x mode.
+ bool IsFixed : 1;
+
private:
SourceLocation TagKeywordLoc;
SourceLocation RBraceLoc;
@@ -1859,6 +2053,11 @@ protected:
typedef Redeclarable<TagDecl> redeclarable_base;
virtual TagDecl *getNextRedeclaration() { return RedeclLink.getNext(); }
+ /// @brief Completes the definition of this tag declaration.
+ ///
+ /// This is a helper function for derived classes.
+ void completeDefinition();
+
public:
typedef redeclarable_base::redecl_iterator redecl_iterator;
redecl_iterator redecls_begin() const {
@@ -1923,9 +2122,6 @@ public:
/// where it is in the process of being defined.
void startDefinition();
- /// @brief Completes the definition of this tag declaration.
- void completeDefinition();
-
/// getDefinition - Returns the TagDecl that actually defines this
/// struct/union/class/enum. When determining whether or not a
/// struct/union/class/enum is completely defined, one should use this method
@@ -2001,7 +2197,19 @@ class EnumDecl : public TagDecl {
/// IntegerType - This represent the integer type that the enum corresponds
/// to for code generation purposes. Note that the enumerator constants may
/// have a different type than this does.
- QualType IntegerType;
+ ///
+ /// If the underlying integer type was explicitly stated in the source
+ /// code, this is a TypeSourceInfo* for that type. Otherwise this type
+ /// was automatically deduced somehow, and this is a Type*.
+ ///
+ /// Normally if IsFixed(), this would contain a TypeSourceInfo*, but in
+ /// some cases it won't.
+ ///
+ /// The underlying type of an enumeration never has any qualifiers, so
+ /// we can get away with just storing a raw Type*, and thus save an
+ /// extra pointer when TypeSourceInfo is needed.
+
+ llvm::PointerUnion<const Type*, TypeSourceInfo*> IntegerType;
/// PromotionType - The integer type that values of this type should
/// promote to. In C, enumerators are generally of an integer type
@@ -2022,11 +2230,16 @@ class EnumDecl : public TagDecl {
};
EnumDecl(DeclContext *DC, SourceLocation L,
- IdentifierInfo *Id, EnumDecl *PrevDecl, SourceLocation TKL)
+ IdentifierInfo *Id, EnumDecl *PrevDecl, SourceLocation TKL,
+ bool Scoped, bool ScopedUsingClassTag, bool Fixed)
: TagDecl(Enum, TTK_Enum, DC, L, Id, PrevDecl, TKL), InstantiatedFrom(0) {
- IntegerType = QualType();
+ assert(Scoped || !ScopedUsingClassTag);
+ IntegerType = (const Type*)0;
NumNegativeBits = 0;
NumPositiveBits = 0;
+ IsScoped = Scoped;
+ IsScopedUsingClassTag = ScopedUsingClassTag;
+ IsFixed = Fixed;
}
public:
EnumDecl *getCanonicalDecl() {
@@ -2045,7 +2258,9 @@ public:
static EnumDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id,
- SourceLocation TKL, EnumDecl *PrevDecl);
+ SourceLocation TKL, EnumDecl *PrevDecl,
+ bool IsScoped, bool IsScopedUsingClassTag,
+ bool IsFixed);
static EnumDecl *Create(ASTContext &C, EmptyShell Empty);
/// completeDefinition - When created, the EnumDecl corresponds to a
@@ -2085,10 +2300,25 @@ public:
/// getIntegerType - Return the integer type this enum decl corresponds to.
/// This returns a null qualtype for an enum forward definition.
- QualType getIntegerType() const { return IntegerType; }
+ QualType getIntegerType() const {
+ if (!IntegerType)
+ return QualType();
+ if (const Type* T = IntegerType.dyn_cast<const Type*>())
+ return QualType(T, 0);
+ return IntegerType.get<TypeSourceInfo*>()->getType();
+ }
/// \brief Set the underlying integer type.
- void setIntegerType(QualType T) { IntegerType = T; }
+ void setIntegerType(QualType T) { IntegerType = T.getTypePtrOrNull(); }
+
+ /// \brief Set the underlying integer type source info.
+ void setIntegerTypeSourceInfo(TypeSourceInfo* TInfo) { IntegerType = TInfo; }
+
+ /// \brief Return the type source info for the underlying integer type,
+ /// if no type source info exists, return 0.
+ TypeSourceInfo* getIntegerTypeSourceInfo() const {
+ return IntegerType.dyn_cast<TypeSourceInfo*>();
+ }
/// \brief Returns the width in bits requred to store all the
/// non-negative enumerators of this enum.
@@ -2116,6 +2346,27 @@ public:
NumNegativeBits = Num;
}
+ /// \brief Returns true if this is a C++0x scoped enumeration.
+ bool isScoped() const {
+ return IsScoped;
+ }
+
+ /// \brief Returns true if this is a C++0x scoped enumeration.
+ bool isScopedUsingClassTag() const {
+ return IsScopedUsingClassTag;
+ }
+
+ /// \brief Returns true if this is a C++0x enumeration with fixed underlying
+ /// type.
+ bool isFixed() const {
+ return IsFixed;
+ }
+
+ /// \brief Returns true if this can be considered a complete type.
+ bool isComplete() const {
+ return isDefinition() || isFixed();
+ }
+
/// \brief Returns the enumeration (declared within the template)
/// from which this enumeration type was instantiated, or NULL if
/// this enumeration was not instantiated from any template.
@@ -2128,6 +2379,8 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const EnumDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Enum; }
+
+ friend class ASTDeclReader;
};
@@ -2151,17 +2404,24 @@ class RecordDecl : public TagDecl {
/// containing an object.
bool HasObjectMember : 1;
+ /// \brief Whether the field declarations of this record have been loaded
+ /// from external storage. To avoid unnecessary deserialization of
+ /// methods/nested types we allow deserialization of just the fields
+ /// when needed.
+ mutable bool LoadedFieldsFromExternalStorage : 1;
+ friend class DeclContext;
+
protected:
RecordDecl(Kind DK, TagKind TK, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id,
RecordDecl *PrevDecl, SourceLocation TKL);
public:
- static RecordDecl *Create(ASTContext &C, TagKind TK, DeclContext *DC,
+ static RecordDecl *Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id,
SourceLocation TKL = SourceLocation(),
RecordDecl* PrevDecl = 0);
- static RecordDecl *Create(ASTContext &C, EmptyShell Empty);
+ static RecordDecl *Create(const ASTContext &C, EmptyShell Empty);
const RecordDecl *getPreviousDeclaration() const {
return cast_or_null<RecordDecl>(TagDecl::getPreviousDeclaration());
@@ -2190,11 +2450,6 @@ public:
AnonymousStructOrUnion = Anon;
}
- ValueDecl *getAnonymousStructOrUnionObject();
- const ValueDecl *getAnonymousStructOrUnionObject() const {
- return const_cast<RecordDecl*>(this)->getAnonymousStructOrUnionObject();
- }
-
bool hasObjectMember() const { return HasObjectMember; }
void setHasObjectMember (bool val) { HasObjectMember = val; }
@@ -2229,11 +2484,10 @@ public:
// data members, functions, constructors, destructors, etc.
typedef specific_decl_iterator<FieldDecl> field_iterator;
- field_iterator field_begin() const {
- return field_iterator(decls_begin());
- }
+ field_iterator field_begin() const;
+
field_iterator field_end() const {
- return field_iterator(decls_end());
+ return field_iterator(decl_iterator());
}
// field_empty - Whether there are any fields (non-static data
@@ -2244,13 +2498,17 @@ public:
/// completeDefinition - Notes that the definition of this type is
/// now complete.
- void completeDefinition();
+ virtual void completeDefinition();
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const RecordDecl *D) { return true; }
static bool classofKind(Kind K) {
return K >= firstRecord && K <= lastRecord;
}
+
+private:
+ /// \brief Deserialize just the fields.
+ void LoadFieldsFromExternalStorage() const;
};
class FileScopeAsmDecl : public Decl {
@@ -2275,8 +2533,49 @@ public:
/// ^{ statement-body } or ^(int arg1, float arg2){ statement-body }
///
class BlockDecl : public Decl, public DeclContext {
+public:
+ /// A class which contains all the information about a particular
+ /// captured value.
+ class Capture {
+ enum {
+ flag_isByRef = 0x1,
+ flag_isNested = 0x2
+ };
+
+ /// The variable being captured.
+ llvm::PointerIntPair<VarDecl*, 2> VariableAndFlags;
+
+ /// The copy expression, expressed in terms of a DeclRef (or
+ /// BlockDeclRef) to the captured variable. Only required if the
+ /// variable has a C++ class type.
+ Expr *CopyExpr;
+
+ public:
+ Capture(VarDecl *variable, bool byRef, bool nested, Expr *copy)
+ : VariableAndFlags(variable,
+ (byRef ? flag_isByRef : 0) | (nested ? flag_isNested : 0)),
+ CopyExpr(copy) {}
+
+ /// The variable being captured.
+ VarDecl *getVariable() const { return VariableAndFlags.getPointer(); }
+
+ /// Whether this is a "by ref" capture, i.e. a capture of a __block
+ /// variable.
+ bool isByRef() const { return VariableAndFlags.getInt() & flag_isByRef; }
+
+ /// Whether this is a nested capture, i.e. the variable captured
+ /// is not from outside the immediately enclosing function/block.
+ bool isNested() const { return VariableAndFlags.getInt() & flag_isNested; }
+
+ bool hasCopyExpr() const { return CopyExpr != 0; }
+ Expr *getCopyExpr() const { return CopyExpr; }
+ void setCopyExpr(Expr *e) { CopyExpr = e; }
+ };
+
+private:
// FIXME: This can be packed into the bitfields in Decl.
bool IsVariadic : 1;
+ bool CapturesCXXThis : 1;
/// ParamInfo - new[]'d array of pointers to ParmVarDecls for the formal
/// parameters of this function. This is null if a prototype or if there are
/// no formals.
@@ -2286,11 +2585,15 @@ class BlockDecl : public Decl, public DeclContext {
Stmt *Body;
TypeSourceInfo *SignatureAsWritten;
+ Capture *Captures;
+ unsigned NumCaptures;
+
protected:
BlockDecl(DeclContext *DC, SourceLocation CaretLoc)
: Decl(Block, DC, CaretLoc), DeclContext(Block),
- IsVariadic(false), ParamInfo(0), NumParams(0), Body(0),
- SignatureAsWritten(0) {}
+ IsVariadic(false), CapturesCXXThis(false),
+ ParamInfo(0), NumParams(0), Body(0),
+ SignatureAsWritten(0), Captures(0), NumCaptures(0) {}
public:
static BlockDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L);
@@ -2319,7 +2622,7 @@ public:
param_const_iterator param_begin() const { return ParamInfo; }
param_const_iterator param_end() const { return ParamInfo+param_size(); }
- unsigned getNumParams() const;
+ unsigned getNumParams() const { return NumParams; }
const ParmVarDecl *getParamDecl(unsigned i) const {
assert(i < getNumParams() && "Illegal param #");
return ParamInfo[i];
@@ -2330,6 +2633,30 @@ public:
}
void setParams(ParmVarDecl **NewParamInfo, unsigned NumParams);
+ /// hasCaptures - True if this block (or its nested blocks) captures
+ /// anything of local storage from its enclosing scopes.
+ bool hasCaptures() const { return NumCaptures != 0 || CapturesCXXThis; }
+
+ /// getNumCaptures - Returns the number of captured variables.
+ /// Does not include an entry for 'this'.
+ unsigned getNumCaptures() const { return NumCaptures; }
+
+ typedef const Capture *capture_iterator;
+ typedef const Capture *capture_const_iterator;
+ capture_iterator capture_begin() { return Captures; }
+ capture_iterator capture_end() { return Captures + NumCaptures; }
+ capture_const_iterator capture_begin() const { return Captures; }
+ capture_const_iterator capture_end() const { return Captures + NumCaptures; }
+
+ bool capturesCXXThis() const { return CapturesCXXThis; }
+
+ void setCaptures(ASTContext &Context,
+ const Capture *begin,
+ const Capture *end,
+ bool capturesCXXThis);
+
+ virtual SourceRange getSourceRange() const;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const BlockDecl *D) { return true; }
@@ -2350,6 +2677,32 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
return DB;
}
+template<typename decl_type>
+void Redeclarable<decl_type>::setPreviousDeclaration(decl_type *PrevDecl) {
+ // Note: This routine is implemented here because we need both NamedDecl
+ // and Redeclarable to be defined.
+
+ decl_type *First;
+
+ if (PrevDecl) {
+ // Point to previous. Make sure that this is actually the most recent
+ // redeclaration, or we can build invalid chains. If the most recent
+ // redeclaration is invalid, it won't be PrevDecl, but we want it anyway.
+ RedeclLink = PreviousDeclLink(llvm::cast<decl_type>(
+ PrevDecl->getMostRecentDeclaration()));
+ First = PrevDecl->getFirstDeclaration();
+ assert(First->RedeclLink.NextIsLatest() && "Expected first");
+ } else {
+ // Make this first.
+ First = static_cast<decl_type*>(this);
+ }
+
+ // First one will point to this one as latest.
+ First->RedeclLink = LatestDeclLink(static_cast<decl_type*>(this));
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(static_cast<decl_type*>(this)))
+ ND->ClearLinkageCache();
+}
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
index 1369c2b..bf249ce 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
@@ -43,6 +43,7 @@ class DeclarationName;
class CompoundStmt;
class StoredDeclsMap;
class DependentDiagnostic;
+class ASTMutationListener;
}
namespace llvm {
@@ -197,25 +198,25 @@ private:
return DeclCtx.get<DeclContext*>();
}
- /// Loc - The location that this decl.
+ /// Loc - The location of this decl.
SourceLocation Loc;
/// DeclKind - This indicates which class this is.
- Kind DeclKind : 8;
+ unsigned DeclKind : 8;
/// InvalidDecl - This indicates a semantic error occurred.
- unsigned int InvalidDecl : 1;
+ unsigned InvalidDecl : 1;
/// HasAttrs - This indicates whether the decl has attributes or not.
- unsigned int HasAttrs : 1;
+ unsigned HasAttrs : 1;
/// Implicit - Whether this declaration was implicitly generated by
/// the implementation rather than explicitly written by the user.
- bool Implicit : 1;
+ unsigned Implicit : 1;
/// \brief Whether this declaration was "used", meaning that a definition is
/// required.
- bool Used : 1;
+ unsigned Used : 1;
protected:
/// Access - Used by C++ decls for the access specifier.
@@ -227,17 +228,24 @@ protected:
unsigned PCHLevel : 2;
/// ChangedAfterLoad - if this declaration has changed since being loaded
- bool ChangedAfterLoad : 1;
+ unsigned ChangedAfterLoad : 1;
/// IdentifierNamespace - This specifies what IDNS_* namespace this lives in.
- unsigned IdentifierNamespace : 15;
+ unsigned IdentifierNamespace : 12;
+ /// \brief Whether the \c CachedLinkage field is active.
+ ///
+ /// This field is only valid for NamedDecls subclasses.
+ mutable unsigned HasCachedLinkage : 1;
+
+ /// \brief If \c HasCachedLinkage, the linkage of this declaration.
+ ///
+ /// This field is only valid for NamedDecls subclasses.
+ mutable unsigned CachedLinkage : 2;
+
+
private:
-#ifndef NDEBUG
void CheckAccessDeclContext() const;
-#else
- void CheckAccessDeclContext() const { }
-#endif
protected:
@@ -246,7 +254,9 @@ protected:
Loc(L), DeclKind(DK), InvalidDecl(0),
HasAttrs(false), Implicit(false), Used(false),
Access(AS_none), PCHLevel(0), ChangedAfterLoad(false),
- IdentifierNamespace(getIdentifierNamespaceForKind(DK)) {
+ IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
+ HasCachedLinkage(0)
+ {
if (Decl::CollectingStats()) add(DK);
}
@@ -254,7 +264,9 @@ protected:
: NextDeclInContext(0), DeclKind(DK), InvalidDecl(0),
HasAttrs(false), Implicit(false), Used(false),
Access(AS_none), PCHLevel(0), ChangedAfterLoad(false),
- IdentifierNamespace(getIdentifierNamespaceForKind(DK)) {
+ IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
+ HasCachedLinkage(0)
+ {
if (Decl::CollectingStats()) add(DK);
}
@@ -272,7 +284,7 @@ public:
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- Kind getKind() const { return DeclKind; }
+ Kind getKind() const { return static_cast<Kind>(DeclKind); }
const char *getDeclKindName() const;
Decl *getNextDeclInContext() { return NextDeclInContext; }
@@ -298,17 +310,21 @@ public:
void setAccess(AccessSpecifier AS) {
Access = AS;
+#ifndef NDEBUG
CheckAccessDeclContext();
+#endif
}
AccessSpecifier getAccess() const {
+#ifndef NDEBUG
CheckAccessDeclContext();
+#endif
return AccessSpecifier(Access);
}
bool hasAttrs() const { return HasAttrs; }
void setAttrs(const AttrVec& Attrs);
- AttrVec& getAttrs() {
+ AttrVec &getAttrs() {
return const_cast<AttrVec&>(const_cast<const Decl*>(this)->getAttrs());
}
const AttrVec &getAttrs() const;
@@ -551,6 +567,9 @@ public:
/// template parameter pack.
bool isTemplateParameterPack() const;
+ /// \brief Whether this declaration is a parameter pack.
+ bool isParameterPack() const;
+
/// \brief Whether this declaration is a function or function template.
bool isFunctionOrFunctionTemplate() const;
@@ -621,10 +640,14 @@ public:
llvm::raw_ostream &Out, const PrintingPolicy &Policy,
unsigned Indentation = 0);
void dump() const;
+ void dumpXML() const;
+ void dumpXML(llvm::raw_ostream &OS) const;
private:
const Attr *getAttrsImpl() const;
+protected:
+ ASTMutationListener *getASTMutationListener() const;
};
/// PrettyStackTraceDecl - If a crash occurs, indicate that it happened when
@@ -681,23 +704,24 @@ public:
///
class DeclContext {
/// DeclKind - This indicates which class this is.
- Decl::Kind DeclKind : 8;
+ unsigned DeclKind : 8;
/// \brief Whether this declaration context also has some external
/// storage that contains additional declarations that are lexically
/// part of this context.
- mutable bool ExternalLexicalStorage : 1;
+ mutable unsigned ExternalLexicalStorage : 1;
/// \brief Whether this declaration context also has some external
/// storage that contains additional declarations that are visible
/// in this context.
- mutable bool ExternalVisibleStorage : 1;
+ mutable unsigned ExternalVisibleStorage : 1;
/// \brief Pointer to the data structure used to lookup declarations
/// within this context (or a DependentStoredDeclsMap if this is a
/// dependent context).
mutable StoredDeclsMap *LookupPtr;
+protected:
/// FirstDecl - The first declaration stored within this declaration
/// context.
mutable Decl *FirstDecl;
@@ -710,7 +734,12 @@ class DeclContext {
friend class ExternalASTSource;
-protected:
+ /// \brief Build up a chain of declarations.
+ ///
+ /// \returns the first/last pair of declarations.
+ static std::pair<Decl *, Decl *>
+ BuildDeclChain(const llvm::SmallVectorImpl<Decl*> &Decls);
+
DeclContext(Decl::Kind K)
: DeclKind(K), ExternalLexicalStorage(false),
ExternalVisibleStorage(false), LookupPtr(0), FirstDecl(0),
@@ -720,7 +749,7 @@ public:
~DeclContext();
Decl::Kind getDeclKind() const {
- return DeclKind;
+ return static_cast<Decl::Kind>(DeclKind);
}
const char *getDeclKindName() const;
@@ -807,6 +836,10 @@ public:
/// C++0x scoped enums), and C++ linkage specifications.
bool isTransparentContext() const;
+ /// \brief Determines whether this context is, or is nested within,
+ /// a C++ extern "C" linkage spec.
+ bool isExternCContext() const;
+
/// \brief Determine whether this declaration context is equivalent
/// to the declaration context DC.
bool Equals(const DeclContext *DC) const {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
index a9802bf..d11ee8f 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
@@ -35,6 +35,7 @@ class CXXMethodDecl;
class CXXRecordDecl;
class CXXMemberLookupCriteria;
class CXXFinalOverriderMap;
+class CXXIndirectPrimaryBaseSet;
class FriendDecl;
/// \brief Represents any kind of function declaration, whether it is a
@@ -112,6 +113,8 @@ class AccessSpecDecl : public Decl {
: Decl(AccessSpec, DC, ASLoc), ColonLoc(ColonLoc) {
setAccess(AS);
}
+ AccessSpecDecl(EmptyShell Empty)
+ : Decl(AccessSpec, Empty) { }
public:
/// getAccessSpecifierLoc - The location of the access specifier.
SourceLocation getAccessSpecifierLoc() const { return getLocation(); }
@@ -132,6 +135,9 @@ public:
SourceLocation ColonLoc) {
return new (C) AccessSpecDecl(AS, DC, ASLoc, ColonLoc);
}
+ static AccessSpecDecl *Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) AccessSpecDecl(Empty);
+ }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -162,6 +168,10 @@ class CXXBaseSpecifier {
/// specifier (if present).
SourceRange Range;
+ /// \brief The source location of the ellipsis, if this is a pack
+ /// expansion.
+ SourceLocation EllipsisLoc;
+
/// Virtual - Whether this is a virtual base class or not.
bool Virtual : 1;
@@ -177,6 +187,10 @@ class CXXBaseSpecifier {
/// VC++ bug.
unsigned Access : 2;
+ /// InheritConstructors - Whether the class contains a using declaration
+ /// to inherit the named class's constructors.
+ bool InheritConstructors : 1;
+
/// BaseTypeInfo - The type of the base class. This will be a class or struct
/// (or a typedef of such). The source code range does not include the
/// "virtual" or access specifier.
@@ -186,8 +200,9 @@ public:
CXXBaseSpecifier() { }
CXXBaseSpecifier(SourceRange R, bool V, bool BC, AccessSpecifier A,
- TypeSourceInfo *TInfo)
- : Range(R), Virtual(V), BaseOfClass(BC), Access(A), BaseTypeInfo(TInfo) { }
+ TypeSourceInfo *TInfo, SourceLocation EllipsisLoc)
+ : Range(R), EllipsisLoc(EllipsisLoc), Virtual(V), BaseOfClass(BC),
+ Access(A), InheritConstructors(false), BaseTypeInfo(TInfo) { }
/// getSourceRange - Retrieves the source range that contains the
/// entire base specifier.
@@ -201,6 +216,22 @@ public:
/// with the 'class' keyword (vs. one declared with the 'struct' keyword).
bool isBaseOfClass() const { return BaseOfClass; }
+ /// \brief Determine whether this base specifier is a pack expansion.
+ bool isPackExpansion() const { return EllipsisLoc.isValid(); }
+
+ /// \brief Determine whether this base class's constructors get inherited.
+ bool getInheritConstructors() const { return InheritConstructors; }
+
+ /// \brief Set that this base class's constructors should be inherited.
+ void setInheritConstructors(bool Inherit = true) {
+ InheritConstructors = Inherit;
+ }
+
+ /// \brief For a pack expansion, determine the location of the ellipsis.
+ SourceLocation getEllipsisLoc() const {
+ return EllipsisLoc;
+ }
+
/// getAccessSpecifier - Returns the access specifier for this base
/// specifier. This is the actual base specifier as used for
/// semantic analysis, so the result can never be AS_none. To
@@ -336,20 +367,20 @@ class CXXRecordDecl : public RecordDecl {
/// \brief Whether we have already declared a destructor within the class.
bool DeclaredDestructor : 1;
-
- /// Bases - Base classes of this class.
- /// FIXME: This is wasted space for a union.
- CXXBaseSpecifier *Bases;
/// NumBases - The number of base class specifiers in Bases.
unsigned NumBases;
-
- /// VBases - direct and indirect virtual base classes of this class.
- CXXBaseSpecifier *VBases;
-
+
/// NumVBases - The number of virtual base class specifiers in VBases.
unsigned NumVBases;
+ /// Bases - Base classes of this class.
+ /// FIXME: This is wasted space for a union.
+ LazyCXXBaseSpecifiersPtr Bases;
+
+ /// VBases - direct and indirect virtual base classes of this class.
+ LazyCXXBaseSpecifiersPtr VBases;
+
/// Conversions - Overload set containing the conversion functions
/// of this C++ class (but not its inherited conversion
/// functions). Each of the entries in this overload set is a
@@ -371,6 +402,15 @@ class CXXRecordDecl : public RecordDecl {
/// in reverse order.
FriendDecl *FirstFriend;
+ /// \brief Retrieve the set of direct base classes.
+ CXXBaseSpecifier *getBases() const {
+ return Bases.get(Definition->getASTContext().getExternalSource());
+ }
+
+ /// \brief Retrieve the set of virtual base classes.
+ CXXBaseSpecifier *getVBases() const {
+ return VBases.get(Definition->getASTContext().getExternalSource());
+ }
} *DefinitionData;
struct DefinitionData &data() {
@@ -395,9 +435,17 @@ class CXXRecordDecl : public RecordDecl {
llvm::PointerUnion<ClassTemplateDecl*, MemberSpecializationInfo*>
TemplateOrInstantiation;
-#ifndef NDEBUG
- void CheckConversionFunction(NamedDecl *D);
-#endif
+ friend class DeclContext;
+
+ /// \brief Notify the class that member has been added.
+ ///
+ /// This routine helps maintain information about the class based on which
+ /// members have been added. It will be invoked by DeclContext::addDecl()
+ /// whenever a member is added to this record.
+ void addedMember(Decl *D);
+
+ void markedVirtualFunctionPure();
+ friend void FunctionDecl::setPure(bool);
protected:
CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
@@ -445,12 +493,12 @@ public:
bool hasDefinition() const { return DefinitionData != 0; }
- static CXXRecordDecl *Create(ASTContext &C, TagKind TK, DeclContext *DC,
+ static CXXRecordDecl *Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id,
SourceLocation TKL = SourceLocation(),
CXXRecordDecl* PrevDecl=0,
bool DelayTypeCreation = false);
- static CXXRecordDecl *Create(ASTContext &C, EmptyShell Empty);
+ static CXXRecordDecl *Create(const ASTContext &C, EmptyShell Empty);
bool isDynamicClass() const {
return data().Polymorphic || data().NumVBases != 0;
@@ -463,8 +511,8 @@ public:
/// class.
unsigned getNumBases() const { return data().NumBases; }
- base_class_iterator bases_begin() { return data().Bases; }
- base_class_const_iterator bases_begin() const { return data().Bases; }
+ base_class_iterator bases_begin() { return data().getBases(); }
+ base_class_const_iterator bases_begin() const { return data().getBases(); }
base_class_iterator bases_end() { return bases_begin() + data().NumBases; }
base_class_const_iterator bases_end() const {
return bases_begin() + data().NumBases;
@@ -486,8 +534,8 @@ public:
/// class.
unsigned getNumVBases() const { return data().NumVBases; }
- base_class_iterator vbases_begin() { return data().VBases; }
- base_class_const_iterator vbases_begin() const { return data().VBases; }
+ base_class_iterator vbases_begin() { return data().getVBases(); }
+ base_class_const_iterator vbases_begin() const { return data().getVBases(); }
base_class_iterator vbases_end() { return vbases_begin() + data().NumVBases; }
base_class_const_iterator vbases_end() const {
return vbases_begin() + data().NumVBases;
@@ -553,18 +601,12 @@ public:
return data().DeclaredDefaultConstructor;
}
- /// \brief Note whether this class has already had its default constructor
- /// implicitly declared or doesn't need one.
- void setDeclaredDefaultConstructor(bool DDC) {
- data().DeclaredDefaultConstructor = DDC;
- }
-
/// hasConstCopyConstructor - Determines whether this class has a
/// copy constructor that accepts a const-qualified argument.
- bool hasConstCopyConstructor(ASTContext &Context) const;
+ bool hasConstCopyConstructor(const ASTContext &Context) const;
/// getCopyConstructor - Returns the copy constructor for this class
- CXXConstructorDecl *getCopyConstructor(ASTContext &Context,
+ CXXConstructorDecl *getCopyConstructor(const ASTContext &Context,
unsigned TypeQuals) const;
/// \brief Retrieve the copy-assignment operator for this class, if available.
@@ -579,11 +621,6 @@ public:
/// a unique copy-assignment operator could not be found.
CXXMethodDecl *getCopyAssignmentOperator(bool ArgIsConst) const;
- /// addedConstructor - Notify the class that another constructor has
- /// been added. This routine helps maintain information about the
- /// class based on which constructors have been added.
- void addedConstructor(ASTContext &Context, CXXConstructorDecl *ConDecl);
-
/// hasUserDeclaredConstructor - Whether this class has any
/// user-declared constructors. When true, a default constructor
/// will not be implicitly declared.
@@ -606,17 +643,6 @@ public:
return data().DeclaredCopyConstructor;
}
- /// \brief Note whether this class has already had its copy constructor
- /// declared.
- void setDeclaredCopyConstructor(bool DCC) {
- data().DeclaredCopyConstructor = DCC;
- }
-
- /// addedAssignmentOperator - Notify the class that another assignment
- /// operator has been added. This routine helps maintain information about the
- /// class based on which operators have been added.
- void addedAssignmentOperator(ASTContext &Context, CXXMethodDecl *OpDecl);
-
/// hasUserDeclaredCopyAssignment - Whether this class has a
/// user-declared copy assignment operator. When false, a copy
/// assigment operator will be implicitly declared.
@@ -632,12 +658,6 @@ public:
return data().DeclaredCopyAssignment;
}
- /// \brief Note whether this class has already had its copy assignment
- /// operator declared.
- void setDeclaredCopyAssignment(bool DCA) {
- data().DeclaredCopyAssignment = DCA;
- }
-
/// hasUserDeclaredDestructor - Whether this class has a
/// user-declared destructor. When false, a destructor will be
/// implicitly declared.
@@ -645,26 +665,12 @@ public:
return data().UserDeclaredDestructor;
}
- /// setUserDeclaredDestructor - Set whether this class has a
- /// user-declared destructor. If not set by the time the class is
- /// fully defined, a destructor will be implicitly declared.
- void setUserDeclaredDestructor(bool UCD) {
- data().UserDeclaredDestructor = UCD;
- if (UCD)
- data().DeclaredDestructor = true;
- }
-
/// \brief Determine whether this class has had its destructor declared,
/// either via the user or via an implicit declaration.
///
/// This value is used for lazy creation of destructors.
bool hasDeclaredDestructor() const { return data().DeclaredDestructor; }
-
- /// \brief Note whether this class has already had its destructor declared.
- void setDeclaredDestructor(bool DD) {
- data().DeclaredDestructor = DD;
- }
-
+
/// getConversions - Retrieve the overload set containing all of the
/// conversion functions in this class.
UnresolvedSetImpl *getConversionFunctions() {
@@ -682,13 +688,6 @@ public:
return getConversionFunctions()->end();
}
- /// Replaces a conversion function with a new declaration.
- ///
- /// Returns true if the old conversion was found.
- bool replaceConversion(const NamedDecl* Old, NamedDecl *New) {
- return getConversionFunctions()->replace(Old, New);
- }
-
/// Removes a conversion function from this class. The conversion
/// function must currently be a member of this class. Furthermore,
/// this class must currently be in the process of being defined.
@@ -698,105 +697,52 @@ public:
/// in current class; including conversion function templates.
const UnresolvedSetImpl *getVisibleConversionFunctions();
- /// addConversionFunction - Registers a conversion function which
- /// this class declares directly.
- void addConversionFunction(NamedDecl *Decl) {
-#ifndef NDEBUG
- CheckConversionFunction(Decl);
-#endif
-
- // We intentionally don't use the decl's access here because it
- // hasn't been set yet. That's really just a misdesign in Sema.
- data().Conversions.addDecl(Decl);
- }
-
/// isAggregate - Whether this class is an aggregate (C++
/// [dcl.init.aggr]), which is a class with no user-declared
/// constructors, no private or protected non-static data members,
/// no base classes, and no virtual functions (C++ [dcl.init.aggr]p1).
bool isAggregate() const { return data().Aggregate; }
- /// setAggregate - Set whether this class is an aggregate (C++
- /// [dcl.init.aggr]).
- void setAggregate(bool Agg) { data().Aggregate = Agg; }
-
- /// setMethodAsVirtual - Make input method virtual and set the necesssary
- /// special function bits and other bits accordingly.
- void setMethodAsVirtual(FunctionDecl *Method);
-
/// isPOD - Whether this class is a POD-type (C++ [class]p4), which is a class
/// that is an aggregate that has no non-static non-POD data members, no
/// reference data members, no user-defined copy assignment operator and no
/// user-defined destructor.
bool isPOD() const { return data().PlainOldData; }
- /// setPOD - Set whether this class is a POD-type (C++ [class]p4).
- void setPOD(bool POD) { data().PlainOldData = POD; }
-
/// isEmpty - Whether this class is empty (C++0x [meta.unary.prop]), which
/// means it has a virtual function, virtual base, data member (other than
/// 0-width bit-field) or inherits from a non-empty class. Does NOT include
/// a check for union-ness.
bool isEmpty() const { return data().Empty; }
- /// Set whether this class is empty (C++0x [meta.unary.prop])
- void setEmpty(bool Emp) { data().Empty = Emp; }
-
/// isPolymorphic - Whether this class is polymorphic (C++ [class.virtual]),
/// which means that the class contains or inherits a virtual function.
bool isPolymorphic() const { return data().Polymorphic; }
- /// setPolymorphic - Set whether this class is polymorphic (C++
- /// [class.virtual]).
- void setPolymorphic(bool Poly) { data().Polymorphic = Poly; }
-
/// isAbstract - Whether this class is abstract (C++ [class.abstract]),
/// which means that the class contains or inherits a pure virtual function.
bool isAbstract() const { return data().Abstract; }
- /// setAbstract - Set whether this class is abstract (C++ [class.abstract])
- void setAbstract(bool Abs) { data().Abstract = Abs; }
-
// hasTrivialConstructor - Whether this class has a trivial constructor
// (C++ [class.ctor]p5)
bool hasTrivialConstructor() const { return data().HasTrivialConstructor; }
- // setHasTrivialConstructor - Set whether this class has a trivial constructor
- // (C++ [class.ctor]p5)
- void setHasTrivialConstructor(bool TC) { data().HasTrivialConstructor = TC; }
-
// hasTrivialCopyConstructor - Whether this class has a trivial copy
// constructor (C++ [class.copy]p6)
bool hasTrivialCopyConstructor() const {
return data().HasTrivialCopyConstructor;
}
- // setHasTrivialCopyConstructor - Set whether this class has a trivial
- // copy constructor (C++ [class.copy]p6)
- void setHasTrivialCopyConstructor(bool TC) {
- data().HasTrivialCopyConstructor = TC;
- }
-
// hasTrivialCopyAssignment - Whether this class has a trivial copy
// assignment operator (C++ [class.copy]p11)
bool hasTrivialCopyAssignment() const {
return data().HasTrivialCopyAssignment;
}
- // setHasTrivialCopyAssignment - Set whether this class has a
- // trivial copy assignment operator (C++ [class.copy]p11)
- void setHasTrivialCopyAssignment(bool TC) {
- data().HasTrivialCopyAssignment = TC;
- }
-
// hasTrivialDestructor - Whether this class has a trivial destructor
// (C++ [class.dtor]p3)
bool hasTrivialDestructor() const { return data().HasTrivialDestructor; }
- // setHasTrivialDestructor - Set whether this class has a trivial destructor
- // (C++ [class.dtor]p3)
- void setHasTrivialDestructor(bool TC) { data().HasTrivialDestructor = TC; }
-
/// \brief If this record is an instantiation of a member class,
/// retrieves the member class from which it was instantiated.
///
@@ -854,9 +800,6 @@ public:
/// \brief Set the kind of specialization or template instantiation this is.
void setTemplateSpecializationKind(TemplateSpecializationKind TSK);
-
- /// getDefaultConstructor - Returns the default constructor for this class
- CXXConstructorDecl *getDefaultConstructor();
/// getDestructor - Returns the destructor decl for this class.
CXXDestructorDecl *getDestructor() const;
@@ -880,7 +823,7 @@ public:
/// \param Base the base class we are searching for.
///
/// \returns true if this class is derived from Base, false otherwise.
- bool isDerivedFrom(CXXRecordDecl *Base) const;
+ bool isDerivedFrom(const CXXRecordDecl *Base) const;
/// \brief Determine whether this class is derived from the type \p Base.
///
@@ -898,7 +841,7 @@ public:
///
/// \todo add a separate paramaeter to configure IsDerivedFrom, rather than
/// tangling input and output in \p Paths
- bool isDerivedFrom(CXXRecordDecl *Base, CXXBasePaths &Paths) const;
+ bool isDerivedFrom(const CXXRecordDecl *Base, CXXBasePaths &Paths) const;
/// \brief Determine whether this class is virtually derived from
/// the class \p Base.
@@ -1034,6 +977,9 @@ public:
/// most-derived class in the class hierarchy.
void getFinalOverriders(CXXFinalOverriderMap &FinaOverriders) const;
+ /// \brief Get the indirect primary bases for this class.
+ void getIndirectPrimaryBases(CXXIndirectPrimaryBaseSet& Bases) const;
+
/// viewInheritance - Renders and displays an inheritance diagram
/// for this C++ class and all of its base classes (transitively) using
/// GraphViz.
@@ -1048,6 +994,27 @@ public:
return (PathAccess > DeclAccess ? PathAccess : DeclAccess);
}
+ /// \brief Indicates that the definition of this class is now complete.
+ virtual void completeDefinition();
+
+ /// \brief Indicates that the definition of this class is now complete,
+ /// and provides a final overrider map to help determine
+ ///
+ /// \param FinalOverriders The final overrider map for this class, which can
+ /// be provided as an optimization for abstract-class checking. If NULL,
+ /// final overriders will be computed if they are needed to complete the
+ /// definition.
+ void completeDefinition(CXXFinalOverriderMap *FinalOverriders);
+
+ /// \brief Determine whether this class may end up being abstract, even though
+ /// it is not yet known to be abstract.
+ ///
+ /// \returns true if this class is not known to be abstract but has any
+ /// base classes that are abstract. In this case, \c completeDefinition()
+ /// will need to compute final overriders to determine whether the class is
+ /// actually abstract.
+ bool mayBeAbstract() const;
+
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
return K >= firstCXXRecord && K <= lastCXXRecord;
@@ -1059,6 +1026,8 @@ public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
+ friend class ASTReader;
+ friend class ASTWriter;
};
/// CXXMethodDecl - Represents a static or instance method of a
@@ -1139,6 +1108,20 @@ public:
return getType()->getAs<FunctionProtoType>()->getTypeQuals();
}
+ /// \brief Retrieve the ref-qualifier associated with this method.
+ ///
+ /// In the following example, \c f() has an lvalue ref-qualifier, \c g()
+ /// has an rvalue ref-qualifier, and \c h() has no ref-qualifier.
+ /// \code
+ /// struct X {
+ /// void f() &;
+ /// void g() &&;
+ /// void h();
+ /// };
+ RefQualifierKind getRefQualifier() const {
+ return getType()->getAs<FunctionProtoType>()->getRefQualifier();
+ }
+
bool hasInlineBody() const;
// Implement isa/cast/dyncast/etc.
@@ -1149,7 +1132,7 @@ public:
}
};
-/// CXXBaseOrMemberInitializer - Represents a C++ base or member
+/// CXXCtorInitializer - Represents a C++ base or member
/// initializer, which is part of a constructor initializer that
/// initializes one non-static member variable or one base class. For
/// example, in the following, both 'A(a)' and 'f(3.14159)' are member
@@ -1163,37 +1146,20 @@ public:
/// B(A& a) : A(a), f(3.14159) { }
/// };
/// @endcode
-class CXXBaseOrMemberInitializer {
- /// \brief Either the base class name (stored as a TypeSourceInfo*) or the
- /// field being initialized.
- llvm::PointerUnion<TypeSourceInfo *, FieldDecl *> BaseOrMember;
+class CXXCtorInitializer {
+ /// \brief Either the base class name (stored as a TypeSourceInfo*), an normal
+ /// field (FieldDecl) or an anonymous field (IndirectFieldDecl*) being
+ /// initialized.
+ llvm::PointerUnion3<TypeSourceInfo *, FieldDecl *, IndirectFieldDecl *>
+ Initializee;
- /// \brief The source location for the field name.
- SourceLocation MemberLocation;
+ /// \brief The source location for the field name or, for a base initializer
+ /// pack expansion, the location of the ellipsis.
+ SourceLocation MemberOrEllipsisLocation;
/// \brief The argument used to initialize the base or member, which may
/// end up constructing an object (when multiple arguments are involved).
Stmt *Init;
-
- /// \brief Stores either the constructor to call to initialize this base or
- /// member (a CXXConstructorDecl pointer), or stores the anonymous union of
- /// which the initialized value is a member.
- ///
- /// When the value is a FieldDecl pointer, 'BaseOrMember' is class's
- /// anonymous union data member, this field holds the FieldDecl for the
- /// member of the anonymous union being initialized.
- /// @code
- /// struct X {
- /// X() : au_i1(123) {}
- /// union {
- /// int au_i1;
- /// float au_f1;
- /// };
- /// };
- /// @endcode
- /// In above example, BaseOrMember holds the field decl. for anonymous union
- /// and AnonUnionMember holds field decl for au_i1.
- FieldDecl *AnonUnionMember;
/// LParenLoc - Location of the left paren of the ctor-initializer.
SourceLocation LParenLoc;
@@ -1208,6 +1174,7 @@ class CXXBaseOrMemberInitializer {
/// IsWritten - Whether or not the initializer is explicitly written
/// in the sources.
bool IsWritten : 1;
+
/// SourceOrderOrNumArrayIndices - If IsWritten is true, then this
/// number keeps track of the textual order of this initializer in the
/// original sources, counting from 0; otherwise, if IsWritten is false,
@@ -1215,50 +1182,62 @@ class CXXBaseOrMemberInitializer {
/// object in memory.
unsigned SourceOrderOrNumArrayIndices : 14;
- CXXBaseOrMemberInitializer(ASTContext &Context,
- FieldDecl *Member, SourceLocation MemberLoc,
- SourceLocation L,
- Expr *Init,
- SourceLocation R,
- VarDecl **Indices,
- unsigned NumIndices);
+ CXXCtorInitializer(ASTContext &Context, FieldDecl *Member,
+ SourceLocation MemberLoc, SourceLocation L, Expr *Init,
+ SourceLocation R, VarDecl **Indices, unsigned NumIndices);
public:
- /// CXXBaseOrMemberInitializer - Creates a new base-class initializer.
+ /// CXXCtorInitializer - Creates a new base-class initializer.
explicit
- CXXBaseOrMemberInitializer(ASTContext &Context,
- TypeSourceInfo *TInfo, bool IsVirtual,
- SourceLocation L,
- Expr *Init,
- SourceLocation R);
+ CXXCtorInitializer(ASTContext &Context, TypeSourceInfo *TInfo, bool IsVirtual,
+ SourceLocation L, Expr *Init, SourceLocation R,
+ SourceLocation EllipsisLoc);
- /// CXXBaseOrMemberInitializer - Creates a new member initializer.
+ /// CXXCtorInitializer - Creates a new member initializer.
explicit
- CXXBaseOrMemberInitializer(ASTContext &Context,
- FieldDecl *Member, SourceLocation MemberLoc,
- SourceLocation L,
- Expr *Init,
- SourceLocation R);
+ CXXCtorInitializer(ASTContext &Context, FieldDecl *Member,
+ SourceLocation MemberLoc, SourceLocation L, Expr *Init,
+ SourceLocation R);
+
+ explicit
+ CXXCtorInitializer(ASTContext &Context, IndirectFieldDecl *Member,
+ SourceLocation MemberLoc, SourceLocation L, Expr *Init,
+ SourceLocation R);
/// \brief Creates a new member initializer that optionally contains
/// array indices used to describe an elementwise initialization.
- static CXXBaseOrMemberInitializer *Create(ASTContext &Context,
- FieldDecl *Member,
- SourceLocation MemberLoc,
- SourceLocation L,
- Expr *Init,
- SourceLocation R,
- VarDecl **Indices,
- unsigned NumIndices);
+ static CXXCtorInitializer *Create(ASTContext &Context, FieldDecl *Member,
+ SourceLocation MemberLoc, SourceLocation L,
+ Expr *Init, SourceLocation R,
+ VarDecl **Indices, unsigned NumIndices);
/// isBaseInitializer - Returns true when this initializer is
/// initializing a base class.
- bool isBaseInitializer() const { return BaseOrMember.is<TypeSourceInfo*>(); }
+ bool isBaseInitializer() const { return Initializee.is<TypeSourceInfo*>(); }
/// isMemberInitializer - Returns true when this initializer is
/// initializing a non-static data member.
- bool isMemberInitializer() const { return BaseOrMember.is<FieldDecl*>(); }
+ bool isMemberInitializer() const { return Initializee.is<FieldDecl*>(); }
+
+ bool isAnyMemberInitializer() const {
+ return isMemberInitializer() || isIndirectMemberInitializer();
+ }
+ bool isIndirectMemberInitializer() const {
+ return Initializee.is<IndirectFieldDecl*>();
+ }
+
+ /// \brief Determine whether this initializer is a pack expansion.
+ bool isPackExpansion() const {
+ return isBaseInitializer() && MemberOrEllipsisLocation.isValid();
+ }
+
+ // \brief For a pack expansion, returns the location of the ellipsis.
+ SourceLocation getEllipsisLoc() const {
+ assert(isPackExpansion() && "Initializer is not a pack expansion");
+ return MemberOrEllipsisLocation;
+ }
+
/// If this is a base class initializer, returns the type of the
/// base class with location information. Otherwise, returns an NULL
/// type location.
@@ -1267,7 +1246,6 @@ public:
/// If this is a base class initializer, returns the type of the base class.
/// Otherwise, returns NULL.
const Type *getBaseClass() const;
- Type *getBaseClass();
/// Returns whether the base is virtual or not.
bool isBaseVirtual() const {
@@ -1278,7 +1256,7 @@ public:
/// \brief Returns the declarator information for a base class initializer.
TypeSourceInfo *getBaseClassInfo() const {
- return BaseOrMember.dyn_cast<TypeSourceInfo *>();
+ return Initializee.dyn_cast<TypeSourceInfo *>();
}
/// getMember - If this is a member initializer, returns the
@@ -1286,18 +1264,28 @@ public:
/// initialized. Otherwise, returns NULL.
FieldDecl *getMember() const {
if (isMemberInitializer())
- return BaseOrMember.get<FieldDecl*>();
+ return Initializee.get<FieldDecl*>();
+ else
+ return 0;
+ }
+ FieldDecl *getAnyMember() const {
+ if (isMemberInitializer())
+ return Initializee.get<FieldDecl*>();
+ else if (isIndirectMemberInitializer())
+ return Initializee.get<IndirectFieldDecl*>()->getAnonField();
else
return 0;
}
- SourceLocation getMemberLocation() const {
- return MemberLocation;
+ IndirectFieldDecl *getIndirectMember() const {
+ if (isIndirectMemberInitializer())
+ return Initializee.get<IndirectFieldDecl*>();
+ else
+ return 0;
}
- void setMember(FieldDecl *Member) {
- assert(isMemberInitializer());
- BaseOrMember = Member;
+ SourceLocation getMemberLocation() const {
+ return MemberOrEllipsisLocation;
}
/// \brief Determine the source location of the initializer.
@@ -1329,15 +1317,7 @@ public:
IsWritten = true;
SourceOrderOrNumArrayIndices = static_cast<unsigned>(pos);
}
-
- FieldDecl *getAnonUnionMember() const {
- return AnonUnionMember;
- }
- void setAnonUnionMember(FieldDecl *anonMember) {
- AnonUnionMember = anonMember;
- }
-
SourceLocation getLParenLoc() const { return LParenLoc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
@@ -1388,10 +1368,10 @@ class CXXConstructorDecl : public CXXMethodDecl {
bool ImplicitlyDefined : 1;
/// Support for base and member initializers.
- /// BaseOrMemberInitializers - The arguments used to initialize the base
+ /// CtorInitializers - The arguments used to initialize the base
/// or member.
- CXXBaseOrMemberInitializer **BaseOrMemberInitializers;
- unsigned NumBaseOrMemberInitializers;
+ CXXCtorInitializer **CtorInitializers;
+ unsigned NumCtorInitializers;
CXXConstructorDecl(CXXRecordDecl *RD, const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
@@ -1400,7 +1380,7 @@ class CXXConstructorDecl : public CXXMethodDecl {
: CXXMethodDecl(CXXConstructor, RD, NameInfo, T, TInfo, false,
SC_None, isInline),
IsExplicitSpecified(isExplicitSpecified), ImplicitlyDefined(false),
- BaseOrMemberInitializers(0), NumBaseOrMemberInitializers(0) {
+ CtorInitializers(0), NumCtorInitializers(0) {
setImplicit(isImplicitlyDeclared);
}
@@ -1443,37 +1423,54 @@ public:
}
/// init_iterator - Iterates through the member/base initializer list.
- typedef CXXBaseOrMemberInitializer **init_iterator;
+ typedef CXXCtorInitializer **init_iterator;
/// init_const_iterator - Iterates through the memberbase initializer list.
- typedef CXXBaseOrMemberInitializer * const * init_const_iterator;
+ typedef CXXCtorInitializer * const * init_const_iterator;
/// init_begin() - Retrieve an iterator to the first initializer.
- init_iterator init_begin() { return BaseOrMemberInitializers; }
+ init_iterator init_begin() { return CtorInitializers; }
/// begin() - Retrieve an iterator to the first initializer.
- init_const_iterator init_begin() const { return BaseOrMemberInitializers; }
+ init_const_iterator init_begin() const { return CtorInitializers; }
/// init_end() - Retrieve an iterator past the last initializer.
init_iterator init_end() {
- return BaseOrMemberInitializers + NumBaseOrMemberInitializers;
+ return CtorInitializers + NumCtorInitializers;
}
/// end() - Retrieve an iterator past the last initializer.
init_const_iterator init_end() const {
- return BaseOrMemberInitializers + NumBaseOrMemberInitializers;
+ return CtorInitializers + NumCtorInitializers;
+ }
+
+ typedef std::reverse_iterator<init_iterator> init_reverse_iterator;
+ typedef std::reverse_iterator<init_const_iterator> init_const_reverse_iterator;
+
+ init_reverse_iterator init_rbegin() {
+ return init_reverse_iterator(init_end());
+ }
+ init_const_reverse_iterator init_rbegin() const {
+ return init_const_reverse_iterator(init_end());
+ }
+
+ init_reverse_iterator init_rend() {
+ return init_reverse_iterator(init_begin());
+ }
+ init_const_reverse_iterator init_rend() const {
+ return init_const_reverse_iterator(init_begin());
}
/// getNumArgs - Determine the number of arguments used to
/// initialize the member or base.
- unsigned getNumBaseOrMemberInitializers() const {
- return NumBaseOrMemberInitializers;
+ unsigned getNumCtorInitializers() const {
+ return NumCtorInitializers;
}
- void setNumBaseOrMemberInitializers(unsigned numBaseOrMemberInitializers) {
- NumBaseOrMemberInitializers = numBaseOrMemberInitializers;
+ void setNumCtorInitializers(unsigned numCtorInitializers) {
+ NumCtorInitializers = numCtorInitializers;
}
- void setBaseOrMemberInitializers(CXXBaseOrMemberInitializer ** initializers) {
- BaseOrMemberInitializers = initializers;
+ void setCtorInitializers(CXXCtorInitializer ** initializers) {
+ CtorInitializers = initializers;
}
/// isDefaultConstructor - Whether this constructor is a default
/// constructor (C++ [class.ctor]p5), which can be used to
@@ -1502,15 +1499,44 @@ public:
return isCopyConstructor(TypeQuals);
}
+ /// \brief Determine whether this constructor is a move constructor
+ /// (C++0x [class.copy]p3), which can be used to move values of the class.
+ ///
+ /// \param TypeQuals If this constructor is a move constructor, will be set
+ /// to the type qualifiers on the referent of the first parameter's type.
+ bool isMoveConstructor(unsigned &TypeQuals) const;
+
+ /// \brief Determine whether this constructor is a move constructor
+ /// (C++0x [class.copy]p3), which can be used to move values of the class.
+ bool isMoveConstructor() const;
+
+ /// \brief Determine whether this is a copy or move constructor.
+ ///
+ /// \param TypeQuals Will be set to the type qualifiers on the reference
+ /// parameter, if in fact this is a copy or move constructor.
+ bool isCopyOrMoveConstructor(unsigned &TypeQuals) const;
+
+ /// \brief Determine whether this a copy or move constructor.
+ bool isCopyOrMoveConstructor() const {
+ unsigned Quals;
+ return isCopyOrMoveConstructor(Quals);
+ }
+
/// isConvertingConstructor - Whether this constructor is a
/// converting constructor (C++ [class.conv.ctor]), which can be
/// used for user-defined conversions.
bool isConvertingConstructor(bool AllowExplicit) const;
/// \brief Determine whether this is a member template specialization that
- /// looks like a copy constructor. Such constructors are never used to copy
+ /// would copy the object to itself. Such constructors are never used to copy
/// an object.
- bool isCopyConstructorLikeSpecialization() const;
+ bool isSpecializationCopyingObject() const;
+
+ /// \brief Get the constructor that this inheriting constructor is based on.
+ const CXXConstructorDecl *getInheritedConstructor() const;
+
+ /// \brief Set the constructor that this inheriting constructor is based on.
+ void setInheritedConstructor(const CXXConstructorDecl *BaseCtor);
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -1542,8 +1568,9 @@ class CXXDestructorDecl : public CXXMethodDecl {
FunctionDecl *OperatorDelete;
CXXDestructorDecl(CXXRecordDecl *RD, const DeclarationNameInfo &NameInfo,
- QualType T, bool isInline, bool isImplicitlyDeclared)
- : CXXMethodDecl(CXXDestructor, RD, NameInfo, T, /*TInfo=*/0, false,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isImplicitlyDeclared)
+ : CXXMethodDecl(CXXDestructor, RD, NameInfo, T, TInfo, false,
SC_None, isInline),
ImplicitlyDefined(false), OperatorDelete(0) {
setImplicit(isImplicitlyDeclared);
@@ -1553,7 +1580,8 @@ public:
static CXXDestructorDecl *Create(ASTContext& C, EmptyShell Empty);
static CXXDestructorDecl *Create(ASTContext &C, CXXRecordDecl *RD,
const DeclarationNameInfo &NameInfo,
- QualType T, bool isInline,
+ QualType T, TypeSourceInfo* TInfo,
+ bool isInline,
bool isImplicitlyDeclared);
/// isImplicitlyDefined - Whether this destructor was implicitly
@@ -1918,13 +1946,16 @@ class UsingShadowDecl : public NamedDecl {
/// The referenced declaration.
NamedDecl *Underlying;
- /// The using declaration which introduced this decl.
- UsingDecl *Using;
+ /// \brief The using declaration which introduced this decl or the next using
+ /// shadow declaration contained in the aforementioned using declaration.
+ NamedDecl *UsingOrNextShadow;
+ friend class UsingDecl;
UsingShadowDecl(DeclContext *DC, SourceLocation Loc, UsingDecl *Using,
NamedDecl *Target)
: NamedDecl(UsingShadow, DC, Loc, DeclarationName()),
- Underlying(Target), Using(Using) {
+ Underlying(Target),
+ UsingOrNextShadow(reinterpret_cast<NamedDecl *>(Using)) {
if (Target) {
setDeclName(Target->getDeclName());
IdentifierNamespace = Target->getIdentifierNamespace();
@@ -1952,15 +1983,20 @@ public:
}
/// \brief Gets the using declaration to which this declaration is tied.
- UsingDecl *getUsingDecl() const { return Using; }
+ UsingDecl *getUsingDecl() const;
- /// \brief Sets the using declaration that introduces this target
- /// declaration.
- void setUsingDecl(UsingDecl* UD) { Using = UD; }
+ /// \brief The next using shadow declaration contained in the shadow decl
+ /// chain of the using declaration which introduced this decl.
+ UsingShadowDecl *getNextUsingShadowDecl() const {
+ return dyn_cast_or_null<UsingShadowDecl>(UsingOrNextShadow);
+ }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const UsingShadowDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Decl::UsingShadow; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
};
/// UsingDecl - Represents a C++ using-declaration. For example:
@@ -1980,10 +2016,9 @@ class UsingDecl : public NamedDecl {
/// declaration name embedded in the ValueDecl base class.
DeclarationNameLoc DNLoc;
- /// \brief The collection of shadow declarations associated with
- /// this using declaration. This set can change as a class is
- /// processed.
- llvm::SmallPtrSet<UsingShadowDecl*, 8> Shadows;
+ /// \brief The first shadow declaration of the shadow decl chain associated
+ /// with this using declaration.
+ UsingShadowDecl *FirstUsingShadow;
// \brief Has 'typename' keyword.
bool IsTypeName;
@@ -1993,7 +2028,7 @@ class UsingDecl : public NamedDecl {
const DeclarationNameInfo &NameInfo, bool IsTypeNameArg)
: NamedDecl(Using, DC, NameInfo.getLoc(), NameInfo.getName()),
NestedNameRange(NNR), UsingLocation(UL), TargetNestedName(TargetNNS),
- DNLoc(NameInfo.getInfo()), IsTypeName(IsTypeNameArg) {
+ DNLoc(NameInfo.getInfo()), FirstUsingShadow(0),IsTypeName(IsTypeNameArg) {
}
public:
@@ -2031,29 +2066,58 @@ public:
/// \brief Sets whether the using declaration has 'typename'.
void setTypeName(bool TN) { IsTypeName = TN; }
- typedef llvm::SmallPtrSet<UsingShadowDecl*,8>::const_iterator shadow_iterator;
- shadow_iterator shadow_begin() const { return Shadows.begin(); }
- shadow_iterator shadow_end() const { return Shadows.end(); }
+ /// \brief Iterates through the using shadow declarations assosiated with
+ /// this using declaration.
+ class shadow_iterator {
+ /// \brief The current using shadow declaration.
+ UsingShadowDecl *Current;
+
+ public:
+ typedef UsingShadowDecl* value_type;
+ typedef UsingShadowDecl* reference;
+ typedef UsingShadowDecl* pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ shadow_iterator() : Current(0) { }
+ explicit shadow_iterator(UsingShadowDecl *C) : Current(C) { }
- void addShadowDecl(UsingShadowDecl *S) {
- assert(S->getUsingDecl() == this);
- if (!Shadows.insert(S)) {
- assert(false && "declaration already in set");
+ reference operator*() const { return Current; }
+ pointer operator->() const { return Current; }
+
+ shadow_iterator& operator++() {
+ Current = Current->getNextUsingShadowDecl();
+ return *this;
}
- }
- void removeShadowDecl(UsingShadowDecl *S) {
- assert(S->getUsingDecl() == this);
- if (!Shadows.erase(S)) {
- assert(false && "declaration not in set");
+
+ shadow_iterator operator++(int) {
+ shadow_iterator tmp(*this);
+ ++(*this);
+ return tmp;
}
+
+ friend bool operator==(shadow_iterator x, shadow_iterator y) {
+ return x.Current == y.Current;
+ }
+ friend bool operator!=(shadow_iterator x, shadow_iterator y) {
+ return x.Current != y.Current;
+ }
+ };
+
+ shadow_iterator shadow_begin() const {
+ return shadow_iterator(FirstUsingShadow);
}
+ shadow_iterator shadow_end() const { return shadow_iterator(); }
/// \brief Return the number of shadowed declarations associated with this
/// using declaration.
- unsigned getNumShadowDecls() const {
- return Shadows.size();
+ unsigned shadow_size() const {
+ return std::distance(shadow_begin(), shadow_end());
}
+ void addShadowDecl(UsingShadowDecl *S);
+ void removeShadowDecl(UsingShadowDecl *S);
+
static UsingDecl *Create(ASTContext &C, DeclContext *DC,
SourceRange NNR, SourceLocation UsingL,
NestedNameSpecifier* TargetNNS,
@@ -2145,6 +2209,9 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const UnresolvedUsingValueDecl *D) { return true; }
static bool classofKind(Kind K) { return K == UnresolvedUsingValue; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
};
/// UnresolvedUsingTypenameDecl - Represents a dependent using
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
index 4b5e6fd..20d6da1 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
@@ -43,11 +43,16 @@ private:
FriendUnion Friend;
// A pointer to the next friend in the sequence.
- FriendDecl *NextFriend;
+ LazyDeclPtr NextFriend;
// Location of the 'friend' specifier.
SourceLocation FriendLoc;
+ /// True if this 'friend' declaration is unsupported. Eventually we
+ /// will support every possible friend declaration, but for now we
+ /// silently ignore some and set this flag to authorize all access.
+ bool UnsupportedFriend;
+
friend class CXXRecordDecl::friend_iterator;
friend class CXXRecordDecl;
@@ -55,13 +60,19 @@ private:
SourceLocation FriendL)
: Decl(Decl::Friend, DC, L),
Friend(Friend),
- NextFriend(0),
- FriendLoc(FriendL) {
+ NextFriend(),
+ FriendLoc(FriendL),
+ UnsupportedFriend(false) {
}
explicit FriendDecl(EmptyShell Empty)
- : Decl(Decl::Friend, Empty), NextFriend(0) { }
+ : Decl(Decl::Friend, Empty), NextFriend() { }
+ FriendDecl *getNextFriend() {
+ return cast_or_null<FriendDecl>(
+ NextFriend.get(getASTContext().getExternalSource()));
+ }
+
public:
static FriendDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, FriendUnion Friend_,
@@ -87,6 +98,14 @@ public:
return FriendLoc;
}
+ /// Determines if this friend kind is unsupported.
+ bool isUnsupportedFriend() const {
+ return UnsupportedFriend;
+ }
+ void setUnsupportedFriend(bool Unsupported) {
+ UnsupportedFriend = Unsupported;
+ }
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const FriendDecl *D) { return true; }
@@ -115,7 +134,7 @@ public:
friend_iterator &operator++() {
assert(Ptr && "attempt to increment past end of friend list");
- Ptr = Ptr->NextFriend;
+ Ptr = Ptr->getNextFriend();
return *this;
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h b/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h
index 030291e..cb9e168 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h
@@ -14,7 +14,7 @@
#ifndef LLVM_CLANG_AST_DECLGROUP_H
#define LLVM_CLANG_AST_DECLGROUP_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace clang {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
index ad26748..b3ca474 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
@@ -28,7 +28,7 @@ class ObjCProtocolDecl;
class ObjCCategoryDecl;
class ObjCPropertyDecl;
class ObjCPropertyImplDecl;
-class CXXBaseOrMemberInitializer;
+class CXXCtorInitializer;
class ObjCListBase {
void operator=(const ObjCListBase &); // DO NOT IMPLEMENT
@@ -437,7 +437,7 @@ public:
class ObjCInterfaceDecl : public ObjCContainerDecl {
/// TypeForDecl - This indicates the Type object that represents this
/// TypeDecl. It is a cache maintained by ASTContext::getObjCInterfaceType
- mutable Type *TypeForDecl;
+ mutable const Type *TypeForDecl;
friend class ASTContext;
/// Class's super class.
@@ -449,8 +449,11 @@ class ObjCInterfaceDecl : public ObjCContainerDecl {
/// Protocols reference in both the @interface and class extensions.
ObjCList<ObjCProtocolDecl> AllReferencedProtocols;
- /// List of categories defined for this class.
- /// FIXME: Why is this a linked list??
+ /// \brief List of categories and class extensions defined for this class.
+ ///
+ /// Categories are stored as a linked list in the AST, since the categories
+ /// and class extensions come long after the initial interface declaration,
+ /// and we avoid dynamically-resized arrays in the AST whereever possible.
ObjCCategoryDecl *CategoryList;
/// IvarList - List of all ivars defined by this class; including class
@@ -459,7 +462,11 @@ class ObjCInterfaceDecl : public ObjCContainerDecl {
bool ForwardDecl:1; // declared with @class.
bool InternalInterface:1; // true - no @interface for @implementation
-
+
+ /// \brief Indicates that the contents of this Objective-C class will be
+ /// completed by the external AST source when required.
+ mutable bool ExternallyCompleted : 1;
+
SourceLocation ClassLoc; // location of the class identifier.
SourceLocation SuperClassLoc; // location of the super class identifier.
SourceLocation EndLoc; // marks the '>', '}', or identifier.
@@ -467,6 +474,7 @@ class ObjCInterfaceDecl : public ObjCContainerDecl {
ObjCInterfaceDecl(DeclContext *DC, SourceLocation atLoc, IdentifierInfo *Id,
SourceLocation CLoc, bool FD, bool isInternal);
+ void LoadExternalDefinition() const;
public:
static ObjCInterfaceDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation atLoc,
@@ -474,7 +482,16 @@ public:
SourceLocation ClassLoc = SourceLocation(),
bool ForwardDecl = false,
bool isInternal = false);
+
+ /// \brief Indicate that this Objective-C class is complete, but that
+ /// the external AST source will be responsible for filling in its contents
+ /// when a complete class is required.
+ void setExternallyCompleted();
+
const ObjCProtocolList &getReferencedProtocols() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
return ReferencedProtocols;
}
@@ -494,29 +511,47 @@ public:
typedef ObjCProtocolList::iterator protocol_iterator;
protocol_iterator protocol_begin() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
return ReferencedProtocols.begin();
}
protocol_iterator protocol_end() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
return ReferencedProtocols.end();
}
typedef ObjCProtocolList::loc_iterator protocol_loc_iterator;
protocol_loc_iterator protocol_loc_begin() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
return ReferencedProtocols.loc_begin();
}
protocol_loc_iterator protocol_loc_end() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
return ReferencedProtocols.loc_end();
}
typedef ObjCList<ObjCProtocolDecl>::iterator all_protocol_iterator;
all_protocol_iterator all_referenced_protocol_begin() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
return AllReferencedProtocols.empty() ? protocol_begin()
: AllReferencedProtocols.begin();
}
all_protocol_iterator all_referenced_protocol_end() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
return AllReferencedProtocols.empty() ? protocol_end()
: AllReferencedProtocols.end();
}
@@ -551,10 +586,22 @@ public:
bool isForwardDecl() const { return ForwardDecl; }
void setForwardDecl(bool val) { ForwardDecl = val; }
- ObjCInterfaceDecl *getSuperClass() const { return SuperClass; }
+ ObjCInterfaceDecl *getSuperClass() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return SuperClass;
+ }
+
void setSuperClass(ObjCInterfaceDecl * superCls) { SuperClass = superCls; }
- ObjCCategoryDecl* getCategoryList() const { return CategoryList; }
+ ObjCCategoryDecl* getCategoryList() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return CategoryList;
+ }
+
void setCategoryList(ObjCCategoryDecl *category) {
CategoryList = category;
}
@@ -595,7 +642,7 @@ public:
ObjCInterfaceDecl *lookupInheritedClass(const IdentifierInfo *ICName);
// Lookup a method in the classes implementation hierarchy.
- ObjCMethodDecl *lookupPrivateInstanceMethod(const Selector &Sel);
+ ObjCMethodDecl *lookupPrivateMethod(const Selector &Sel, bool Instance=true);
// Location information, modeled after the Stmt API.
SourceLocation getLocStart() const { return getLocation(); } // '@'interface
@@ -621,8 +668,8 @@ public:
bool RHSIsQualifiedID = false);
// Low-level accessor
- Type *getTypeForDecl() const { return TypeForDecl; }
- void setTypeForDecl(Type *TD) const { TypeForDecl = TD; }
+ const Type *getTypeForDecl() const { return TypeForDecl; }
+ void setTypeForDecl(const Type *TD) const { TypeForDecl = TD; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ObjCInterfaceDecl *D) { return true; }
@@ -991,6 +1038,7 @@ public:
void insertNextClassCategory() {
NextClassCategory = ClassInterface->getCategoryList();
ClassInterface->setCategoryList(this);
+ ClassInterface->setChangedSinceDeserialization(true);
}
bool IsClassExtension() const { return getIdentifier() == 0; }
@@ -1168,7 +1216,7 @@ class ObjCImplementationDecl : public ObjCImplDecl {
ObjCInterfaceDecl *SuperClass;
/// Support for ivar initialization.
/// IvarInitializers - The arguments used to initialize the ivars
- CXXBaseOrMemberInitializer **IvarInitializers;
+ CXXCtorInitializer **IvarInitializers;
unsigned NumIvarInitializers;
/// true of class extension has at least one bitfield ivar.
@@ -1187,10 +1235,10 @@ public:
ObjCInterfaceDecl *superDecl);
/// init_iterator - Iterates through the ivar initializer list.
- typedef CXXBaseOrMemberInitializer **init_iterator;
+ typedef CXXCtorInitializer **init_iterator;
/// init_const_iterator - Iterates through the ivar initializer list.
- typedef CXXBaseOrMemberInitializer * const * init_const_iterator;
+ typedef CXXCtorInitializer * const * init_const_iterator;
/// init_begin() - Retrieve an iterator to the first initializer.
init_iterator init_begin() { return IvarInitializers; }
@@ -1215,7 +1263,7 @@ public:
}
void setIvarInitializers(ASTContext &C,
- CXXBaseOrMemberInitializer ** initializers,
+ CXXCtorInitializer ** initializers,
unsigned numInitializers);
bool hasSynthBitfield() const { return HasSynthBitfield; }
@@ -1322,7 +1370,8 @@ public:
OBJC_PR_retain = 0x10,
OBJC_PR_copy = 0x20,
OBJC_PR_nonatomic = 0x40,
- OBJC_PR_setter = 0x80
+ OBJC_PR_setter = 0x80,
+ OBJC_PR_atomic = 0x100
};
enum SetterKind { Assign, Retain, Copy };
@@ -1330,8 +1379,8 @@ public:
private:
SourceLocation AtLoc; // location of @property
TypeSourceInfo *DeclType;
- unsigned PropertyAttributes : 8;
- unsigned PropertyAttributesAsWritten : 8;
+ unsigned PropertyAttributes : 9;
+ unsigned PropertyAttributesAsWritten : 9;
// @required/@optional
unsigned PropertyImplementation : 2;
@@ -1429,6 +1478,10 @@ public:
return PropertyIvarDecl;
}
+ virtual SourceRange getSourceRange() const {
+ return SourceRange(AtLoc, getLocation());
+ }
+
/// Lookup a property by name in the specified DeclContext.
static ObjCPropertyDecl *findPropertyDecl(const DeclContext *DC,
IdentifierInfo *propertyID);
@@ -1450,6 +1503,15 @@ public:
};
private:
SourceLocation AtLoc; // location of @synthesize or @dynamic
+
+ /// \brief For @synthesize, the location of the ivar, if it was written in
+ /// the source code.
+ ///
+ /// \code
+ /// @synthesize int a = b
+ /// \endcode
+ SourceLocation IvarLoc;
+
/// Property declaration being implemented
ObjCPropertyDecl *PropertyDecl;
@@ -1466,9 +1528,10 @@ private:
ObjCPropertyImplDecl(DeclContext *DC, SourceLocation atLoc, SourceLocation L,
ObjCPropertyDecl *property,
Kind PK,
- ObjCIvarDecl *ivarDecl)
+ ObjCIvarDecl *ivarDecl,
+ SourceLocation ivarLoc)
: Decl(ObjCPropertyImpl, DC, L), AtLoc(atLoc),
- PropertyDecl(property), PropertyIvarDecl(ivarDecl),
+ IvarLoc(ivarLoc), PropertyDecl(property), PropertyIvarDecl(ivarDecl),
GetterCXXConstructor(0), SetterCXXAssignment(0) {
assert (PK == Dynamic || PropertyIvarDecl);
}
@@ -1478,11 +1541,11 @@ public:
SourceLocation atLoc, SourceLocation L,
ObjCPropertyDecl *property,
Kind PK,
- ObjCIvarDecl *ivarDecl);
+ ObjCIvarDecl *ivarDecl,
+ SourceLocation ivarLoc);
- virtual SourceRange getSourceRange() const {
- return SourceRange(AtLoc, getLocation());
- }
+ virtual SourceRange getSourceRange() const;
+
SourceLocation getLocStart() const { return AtLoc; }
void setAtLoc(SourceLocation Loc) { AtLoc = Loc; }
@@ -1498,7 +1561,13 @@ public:
ObjCIvarDecl *getPropertyIvarDecl() const {
return PropertyIvarDecl;
}
- void setPropertyIvarDecl(ObjCIvarDecl *Ivar) { PropertyIvarDecl = Ivar; }
+ SourceLocation getPropertyIvarDeclLoc() const { return IvarLoc; }
+
+ void setPropertyIvarDecl(ObjCIvarDecl *Ivar,
+ SourceLocation IvarLoc) {
+ PropertyIvarDecl = Ivar;
+ this->IvarLoc = IvarLoc;
+ }
Expr *getGetterCXXConstructor() const {
return GetterCXXConstructor;
@@ -1517,6 +1586,8 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ObjCPropertyImplDecl *D) { return true; }
static bool classofKind(Decl::Kind K) { return K == ObjCPropertyImpl; }
+
+ friend class ASTDeclReader;
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
index b532668..176c6ba 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
@@ -53,7 +53,7 @@ class TemplateParameterList {
SourceLocation RAngleLoc);
public:
- static TemplateParameterList *Create(ASTContext &C,
+ static TemplateParameterList *Create(const ASTContext &C,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
NamedDecl **Params,
@@ -85,7 +85,7 @@ public:
return begin()[Idx];
}
- /// \btief Returns the minimum number of arguments needed to form a
+ /// \brief Returns the minimum number of arguments needed to form a
/// template specialization. This may be fewer than the number of
/// template parameters, if some of the parameters have default
/// arguments or if there is a parameter pack.
@@ -107,101 +107,57 @@ public:
}
};
-/// \brief A helper class for making template argument lists.
-class TemplateArgumentListBuilder {
- TemplateArgument *StructuredArgs;
- unsigned MaxStructuredArgs;
- unsigned NumStructuredArgs;
-
- llvm::SmallVector<TemplateArgument, 4> FlatArgs;
- unsigned MaxFlatArgs;
- unsigned NumFlatArgs;
-
- bool AddingToPack;
- unsigned PackBeginIndex;
-
-public:
- TemplateArgumentListBuilder(const TemplateParameterList *Parameters,
- unsigned NumTemplateArgs)
- : StructuredArgs(0), MaxStructuredArgs(Parameters->size()),
- NumStructuredArgs(0), FlatArgs(0),
- MaxFlatArgs(std::max(MaxStructuredArgs, NumTemplateArgs)), NumFlatArgs(0),
- AddingToPack(false), PackBeginIndex(0) { }
-
- void Append(const TemplateArgument &Arg);
- void BeginPack();
- void EndPack();
-
- unsigned flatSize() const { return FlatArgs.size(); }
- const TemplateArgument *getFlatArguments() const { return FlatArgs.data(); }
-
- unsigned structuredSize() const {
- // If we don't have any structured args, just reuse the flat size.
- if (!StructuredArgs)
- return flatSize();
-
- return NumStructuredArgs;
- }
- const TemplateArgument *getStructuredArguments() const {
- // If we don't have any structured args, just reuse the flat args.
- if (!StructuredArgs)
- return getFlatArguments();
-
- return StructuredArgs;
- }
-};
-
/// \brief A template argument list.
-///
-/// FIXME: In the future, this class will be extended to support
-/// variadic templates and member templates, which will make some of
-/// the function names below make more sense.
class TemplateArgumentList {
/// \brief The template argument list.
///
/// The integer value will be non-zero to indicate that this
/// template argument list does own the pointer.
- llvm::PointerIntPair<const TemplateArgument *, 1> FlatArguments;
+ llvm::PointerIntPair<const TemplateArgument *, 1> Arguments;
/// \brief The number of template arguments in this template
/// argument list.
- unsigned NumFlatArguments;
-
- llvm::PointerIntPair<const TemplateArgument *, 1> StructuredArguments;
- unsigned NumStructuredArguments;
+ unsigned NumArguments;
TemplateArgumentList(const TemplateArgumentList &Other); // DO NOT IMPL
void operator=(const TemplateArgumentList &Other); // DO NOT IMPL
+
+ TemplateArgumentList(const TemplateArgument *Args, unsigned NumArgs,
+ bool Owned)
+ : Arguments(Args, Owned), NumArguments(NumArgs) { }
+
public:
- /// TemplateArgumentList - If this constructor is passed "true" for 'TakeArgs'
- /// it copies them into a locally new[]'d array. If passed "false", then it
- /// just references the array passed in. This is only safe if the builder
- /// outlives it, but saves a copy.
- TemplateArgumentList(ASTContext &Context,
- TemplateArgumentListBuilder &Builder,
- bool TakeArgs);
-
- /// TemplateArgumentList - It copies the template arguments into a locally
- /// new[]'d array.
- TemplateArgumentList(ASTContext &Context,
- const TemplateArgument *Args, unsigned NumArgs);
-
- /// Produces a shallow copy of the given template argument list. This
- /// assumes that the input argument list outlives it. This takes the list as
- /// a pointer to avoid looking like a copy constructor, since this really
- /// really isn't safe to use that way.
- explicit TemplateArgumentList(const TemplateArgumentList *Other);
-
- TemplateArgumentList() : NumFlatArguments(0), NumStructuredArguments(0) { }
-
- /// \brief Copies the template arguments into a locally new[]'d array.
- void init(ASTContext &Context,
- const TemplateArgument *Args, unsigned NumArgs);
+ /// \brief Type used to indicate that the template argument list itself is a
+ /// stack object. It does not own its template arguments.
+ enum OnStackType { OnStack };
+
+ /// \brief Create a new template argument list that copies the given set of
+ /// template arguments.
+ static TemplateArgumentList *CreateCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs);
+
+ /// \brief Construct a new, temporary template argument list on the stack.
+ ///
+ /// The template argument list does not own the template arguments
+ /// provided.
+ explicit TemplateArgumentList(OnStackType,
+ const TemplateArgument *Args, unsigned NumArgs)
+ : Arguments(Args, false), NumArguments(NumArgs) { }
+
+ /// \brief Produces a shallow copy of the given template argument list.
+ ///
+ /// This operation assumes that the input argument list outlives it.
+ /// This takes the list as a pointer to avoid looking like a copy
+ /// constructor, since this really really isn't safe to use that
+ /// way.
+ explicit TemplateArgumentList(const TemplateArgumentList *Other)
+ : Arguments(Other->data(), false), NumArguments(Other->size()) { }
/// \brief Retrieve the template argument at a given index.
const TemplateArgument &get(unsigned Idx) const {
- assert(Idx < NumFlatArguments && "Invalid template argument index");
- return getFlatArgumentList()[Idx];
+ assert(Idx < NumArguments && "Invalid template argument index");
+ return data()[Idx];
}
/// \brief Retrieve the template argument at a given index.
@@ -209,15 +165,11 @@ public:
/// \brief Retrieve the number of template arguments in this
/// template argument list.
- unsigned size() const { return NumFlatArguments; }
-
- /// \brief Retrieve the number of template arguments in the
- /// flattened template argument list.
- unsigned flat_size() const { return NumFlatArguments; }
+ unsigned size() const { return NumArguments; }
- /// \brief Retrieve the flattened template argument list.
- const TemplateArgument *getFlatArgumentList() const {
- return FlatArguments.getPointer();
+ /// \brief Retrieve a pointer to the template argument list.
+ const TemplateArgument *data() const {
+ return Arguments.getPointer();
}
};
@@ -292,7 +244,31 @@ public:
/// which is a FunctionDecl that has been explicitly specialization or
/// instantiated from a function template.
class FunctionTemplateSpecializationInfo : public llvm::FoldingSetNode {
+ FunctionTemplateSpecializationInfo(FunctionDecl *FD,
+ FunctionTemplateDecl *Template,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentList *TemplateArgs,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation POI)
+ : Function(FD),
+ Template(Template, TSK - 1),
+ TemplateArguments(TemplateArgs),
+ TemplateArgumentsAsWritten(TemplateArgsAsWritten),
+ PointOfInstantiation(POI) { }
+
public:
+ static FunctionTemplateSpecializationInfo *
+ Create(ASTContext &C, FunctionDecl *FD, FunctionTemplateDecl *Template,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentList *TemplateArgs,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation POI) {
+ return new (C) FunctionTemplateSpecializationInfo(FD, Template, TSK,
+ TemplateArgs,
+ TemplateArgsAsWritten,
+ POI);
+ }
+
/// \brief The function template specialization that this structure
/// describes.
FunctionDecl *Function;
@@ -345,8 +321,8 @@ public:
}
void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, TemplateArguments->getFlatArgumentList(),
- TemplateArguments->flat_size(),
+ Profile(ID, TemplateArguments->data(),
+ TemplateArguments->size(),
Function->getASTContext());
}
@@ -441,11 +417,6 @@ class DependentFunctionTemplateSpecializationInfo {
return reinterpret_cast<FunctionTemplateDecl*const*>(this+1);
}
- const TemplateArgumentLoc *getTemplateArgs() const {
- return reinterpret_cast<const TemplateArgumentLoc*>(
- &getTemplates()[getNumTemplates()]);
- }
-
public:
DependentFunctionTemplateSpecializationInfo(
const UnresolvedSetImpl &Templates,
@@ -463,6 +434,12 @@ public:
return getTemplates()[I];
}
+ /// \brief Returns the explicit template arguments that were given.
+ const TemplateArgumentLoc *getTemplateArgs() const {
+ return reinterpret_cast<const TemplateArgumentLoc*>(
+ &getTemplates()[getNumTemplates()]);
+ }
+
/// \brief Returns the number of explicit template arguments that were given.
unsigned getNumTemplateArgs() const {
return d.NumArgs;
@@ -584,7 +561,7 @@ protected:
/// for the common pointer.
CommonBase *getCommonPtr();
- virtual CommonBase *newCommon() = 0;
+ virtual CommonBase *newCommon(ASTContext &C) = 0;
// Construct a template decl with name, parameters, and templated element.
RedeclarableTemplateDecl(Kind DK, DeclContext *DC, SourceLocation L,
@@ -789,19 +766,13 @@ protected:
TemplateParameterList *Params, NamedDecl *Decl)
: RedeclarableTemplateDecl(FunctionTemplate, DC, L, Name, Params, Decl) { }
- CommonBase *newCommon();
+ CommonBase *newCommon(ASTContext &C);
Common *getCommonPtr() {
return static_cast<Common *>(RedeclarableTemplateDecl::getCommonPtr());
}
- friend void FunctionDecl::setFunctionTemplateSpecialization(
- FunctionTemplateDecl *Template,
- const TemplateArgumentList *TemplateArgs,
- void *InsertPos,
- TemplateSpecializationKind TSK,
- const TemplateArgumentListInfo *TemplateArgsAsWritten,
- SourceLocation PointOfInstantiation);
+ friend class FunctionDecl;
/// \brief Retrieve the set of function template specializations of this
/// function template.
@@ -940,15 +911,15 @@ class TemplateTypeParmDecl : public TypeDecl {
bool Typename, QualType Type, bool ParameterPack)
: TypeDecl(TemplateTypeParm, DC, L, Id), Typename(Typename),
InheritedDefault(false), ParameterPack(ParameterPack), DefaultArgument() {
- TypeForDecl = Type.getTypePtr();
+ TypeForDecl = Type.getTypePtrOrNull();
}
public:
- static TemplateTypeParmDecl *Create(ASTContext &C, DeclContext *DC,
+ static TemplateTypeParmDecl *Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
IdentifierInfo *Id, bool Typename,
bool ParameterPack);
- static TemplateTypeParmDecl *Create(ASTContext &C, EmptyShell Empty);
+ static TemplateTypeParmDecl *Create(const ASTContext &C, EmptyShell Empty);
/// \brief Whether this template type parameter was declared with
/// the 'typename' keyword. If not, it was declared with the 'class'
@@ -1014,22 +985,54 @@ public:
/// template<int Size> class array { };
/// @endcode
class NonTypeTemplateParmDecl
- : public VarDecl, protected TemplateParmPosition {
+ : public DeclaratorDecl, protected TemplateParmPosition {
/// \brief The default template argument, if any, and whether or not
/// it was inherited.
llvm::PointerIntPair<Expr*, 1, bool> DefaultArgumentAndInherited;
+ // FIXME: Collapse this into TemplateParamPosition; or, just move depth/index
+ // down here to save memory.
+
+ /// \brief Whether this non-type template parameter is a parameter pack.
+ bool ParameterPack;
+
+ /// \brief Whether this non-type template parameter is an "expanded"
+ /// parameter pack, meaning that its type is a pack expansion and we
+ /// already know the set of types that expansion expands to.
+ bool ExpandedParameterPack;
+
+ /// \brief The number of types in an expanded parameter pack.
+ unsigned NumExpandedTypes;
+
NonTypeTemplateParmDecl(DeclContext *DC, SourceLocation L, unsigned D,
unsigned P, IdentifierInfo *Id, QualType T,
- TypeSourceInfo *TInfo)
- : VarDecl(NonTypeTemplateParm, DC, L, Id, T, TInfo, SC_None, SC_None),
- TemplateParmPosition(D, P), DefaultArgumentAndInherited(0, false)
+ bool ParameterPack, TypeSourceInfo *TInfo)
+ : DeclaratorDecl(NonTypeTemplateParm, DC, L, Id, T, TInfo),
+ TemplateParmPosition(D, P), DefaultArgumentAndInherited(0, false),
+ ParameterPack(ParameterPack), ExpandedParameterPack(false),
+ NumExpandedTypes(0)
{ }
+ NonTypeTemplateParmDecl(DeclContext *DC, SourceLocation L, unsigned D,
+ unsigned P, IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos);
+
+ friend class ASTDeclReader;
+
public:
static NonTypeTemplateParmDecl *
- Create(ASTContext &C, DeclContext *DC, SourceLocation L, unsigned D,
- unsigned P, IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo);
+ Create(const ASTContext &C, DeclContext *DC, SourceLocation L, unsigned D,
+ unsigned P, IdentifierInfo *Id, QualType T, bool ParameterPack,
+ TypeSourceInfo *TInfo);
+
+ static NonTypeTemplateParmDecl *
+ Create(const ASTContext &C, DeclContext *DC, SourceLocation L, unsigned D,
+ unsigned P, IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes, unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos);
using TemplateParmPosition::getDepth;
using TemplateParmPosition::setDepth;
@@ -1037,6 +1040,9 @@ public:
using TemplateParmPosition::setPosition;
using TemplateParmPosition::getIndex;
+ SourceLocation getInnerLocStart() const;
+ SourceRange getSourceRange() const;
+
/// \brief Determine whether this template parameter has a default
/// argument.
bool hasDefaultArgument() const {
@@ -1071,6 +1077,65 @@ public:
DefaultArgumentAndInherited.setInt(false);
}
+ /// \brief Whether this parameter is a non-type template parameter pack.
+ ///
+ /// If the parameter is a parameter pack, the type may be a
+ /// \c PackExpansionType. In the following example, the \c Dims parameter
+ /// is a parameter pack (whose type is 'unsigned').
+ ///
+ /// \code
+ /// template<typename T, unsigned ...Dims> struct multi_array;
+ /// \endcode
+ bool isParameterPack() const { return ParameterPack; }
+
+ /// \brief Whether this parameter is a non-type template parameter pack
+ /// that has different types at different positions.
+ ///
+ /// A parameter pack is an expanded parameter pack when the original
+ /// parameter pack's type was itself a pack expansion, and that expansion
+ /// has already been expanded. For example, given:
+ ///
+ /// \code
+ /// template<typename ...Types>
+ /// struct X {
+ /// template<Types ...Values>
+ /// struct Y { /* ... */ };
+ /// };
+ /// \endcode
+ ///
+ /// The parameter pack \c Values has a \c PackExpansionType as its type,
+ /// which expands \c Types. When \c Types is supplied with template arguments
+ /// by instantiating \c X, the instantiation of \c Values becomes an
+ /// expanded parameter pack. For example, instantiating
+ /// \c X<int, unsigned int> results in \c Values being an expanded parameter
+ /// pack with expansion types \c int and \c unsigned int.
+ ///
+ /// The \c getExpansionType() and \c getExpansionTypeSourceInfo() functions
+ /// return the expansion types.
+ bool isExpandedParameterPack() const { return ExpandedParameterPack; }
+
+ /// \brief Retrieves the number of expansion types in an expanded parameter pack.
+ unsigned getNumExpansionTypes() const {
+ assert(ExpandedParameterPack && "Not an expansion parameter pack");
+ return NumExpandedTypes;
+ }
+
+ /// \brief Retrieve a particular expansion type within an expanded parameter
+ /// pack.
+ QualType getExpansionType(unsigned I) const {
+ assert(I < NumExpandedTypes && "Out-of-range expansion type index");
+ void * const *TypesAndInfos = reinterpret_cast<void * const*>(this + 1);
+ return QualType::getFromOpaquePtr(TypesAndInfos[2*I]);
+ }
+
+ /// \brief Retrieve a particular expansion type source info within an
+ /// expanded parameter pack.
+ TypeSourceInfo *getExpansionTypeSourceInfo(unsigned I) const {
+ assert(I < NumExpandedTypes && "Out-of-range expansion type index");
+ void * const *TypesAndInfos = reinterpret_cast<void * const*>(this + 1);
+ return static_cast<TypeSourceInfo *>(TypesAndInfos[2*I+1]);
+ }
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const NonTypeTemplateParmDecl *D) { return true; }
@@ -1092,24 +1157,36 @@ class TemplateTemplateParmDecl
/// Whether or not the default argument was inherited.
bool DefaultArgumentWasInherited;
+ /// \brief Whether this parameter is a parameter pack.
+ bool ParameterPack;
+
TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L,
- unsigned D, unsigned P,
+ unsigned D, unsigned P, bool ParameterPack,
IdentifierInfo *Id, TemplateParameterList *Params)
: TemplateDecl(TemplateTemplateParm, DC, L, Id, Params),
TemplateParmPosition(D, P), DefaultArgument(),
- DefaultArgumentWasInherited(false)
+ DefaultArgumentWasInherited(false), ParameterPack(ParameterPack)
{ }
public:
- static TemplateTemplateParmDecl *Create(ASTContext &C, DeclContext *DC,
+ static TemplateTemplateParmDecl *Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D,
- unsigned P, IdentifierInfo *Id,
+ unsigned P, bool ParameterPack,
+ IdentifierInfo *Id,
TemplateParameterList *Params);
using TemplateParmPosition::getDepth;
using TemplateParmPosition::getPosition;
using TemplateParmPosition::getIndex;
+ /// \brief Whether this template template parameter is a template
+ /// parameter pack.
+ ///
+ /// \code
+ /// template<template <class T> ...MetaFunctions> struct Apply;
+ /// \endcode
+ bool isParameterPack() const { return ParameterPack; }
+
/// \brief Determine whether this template parameter has a default
/// argument.
bool hasDefaultArgument() const {
@@ -1211,7 +1288,7 @@ class ClassTemplateSpecializationDecl
ExplicitSpecializationInfo *ExplicitInfo;
/// \brief The template arguments used to describe this specialization.
- TemplateArgumentList TemplateArgs;
+ TemplateArgumentList *TemplateArgs;
/// \brief The point where this template was instantiated (if any)
SourceLocation PointOfInstantiation;
@@ -1224,7 +1301,8 @@ protected:
ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
DeclContext *DC, SourceLocation L,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgumentListBuilder &Builder,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
ClassTemplateSpecializationDecl *PrevDecl);
explicit ClassTemplateSpecializationDecl(Kind DK);
@@ -1233,7 +1311,8 @@ public:
static ClassTemplateSpecializationDecl *
Create(ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation L,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgumentListBuilder &Builder,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
ClassTemplateSpecializationDecl *PrevDecl);
static ClassTemplateSpecializationDecl *
Create(ASTContext &Context, EmptyShell Empty);
@@ -1259,15 +1338,7 @@ public:
/// \brief Retrieve the template arguments of the class template
/// specialization.
const TemplateArgumentList &getTemplateArgs() const {
- return TemplateArgs;
- }
-
- /// \brief Initialize the template arguments of the class template
- /// specialization.
- void initTemplateArgs(TemplateArgument *Args, unsigned NumArgs) {
- assert(TemplateArgs.flat_size() == 0 &&
- "Template arguments already initialized!");
- TemplateArgs.init(getASTContext(), Args, NumArgs);
+ return *TemplateArgs;
}
/// \brief Determine the kind of specialization that this
@@ -1357,18 +1428,6 @@ public:
SpecializedTemplate = PS;
}
- /// \brief Note that this class template specialization is actually an
- /// instantiation of the given class template partial specialization whose
- /// template arguments have been deduced.
- void setInstantiationOf(ClassTemplatePartialSpecializationDecl *PartialSpec,
- TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs) {
- ASTContext &Ctx = getASTContext();
- setInstantiationOf(PartialSpec,
- new (Ctx) TemplateArgumentList(Ctx, TemplateArgs,
- NumTemplateArgs));
- }
-
/// \brief Note that this class template specialization is an instantiation
/// of the given class template.
void setInstantiationOf(ClassTemplateDecl *TemplDecl) {
@@ -1415,8 +1474,7 @@ public:
SourceLocation getInnerLocStart() const { return getTemplateKeywordLoc(); }
void Profile(llvm::FoldingSetNodeID &ID) const {
- Profile(ID, TemplateArgs.getFlatArgumentList(), TemplateArgs.flat_size(),
- getASTContext());
+ Profile(ID, TemplateArgs->data(), TemplateArgs->size(), getASTContext());
}
static void
@@ -1440,6 +1498,9 @@ public:
static bool classof(const ClassTemplatePartialSpecializationDecl *) {
return true;
}
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
};
class ClassTemplatePartialSpecializationDecl
@@ -1469,15 +1530,16 @@ class ClassTemplatePartialSpecializationDecl
DeclContext *DC, SourceLocation L,
TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgumentListBuilder &Builder,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
TemplateArgumentLoc *ArgInfos,
unsigned NumArgInfos,
ClassTemplatePartialSpecializationDecl *PrevDecl,
unsigned SequenceNumber)
: ClassTemplateSpecializationDecl(Context,
ClassTemplatePartialSpecialization,
- TK, DC, L, SpecializedTemplate, Builder,
- PrevDecl),
+ TK, DC, L, SpecializedTemplate,
+ Args, NumArgs, PrevDecl),
TemplateParams(Params), ArgsAsWritten(ArgInfos),
NumArgsAsWritten(NumArgInfos), SequenceNumber(SequenceNumber),
InstantiatedFromMember(0, false) { }
@@ -1493,7 +1555,8 @@ public:
Create(ASTContext &Context, TagKind TK,DeclContext *DC, SourceLocation L,
TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgumentListBuilder &Builder,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
const TemplateArgumentListInfo &ArgInfos,
QualType CanonInjectedType,
ClassTemplatePartialSpecializationDecl *PrevDecl,
@@ -1512,18 +1575,11 @@ public:
return TemplateParams;
}
- void initTemplateParameters(TemplateParameterList *Params) {
- assert(TemplateParams == 0 && "TemplateParams already set");
- TemplateParams = Params;
- }
-
/// Get the template arguments as written.
TemplateArgumentLoc *getTemplateArgsAsWritten() const {
return ArgsAsWritten;
}
- void initTemplateArgsAsWritten(const TemplateArgumentListInfo &ArgInfos);
-
/// Get the number of template arguments as written.
unsigned getNumTemplateArgsAsWritten() const {
return NumArgsAsWritten;
@@ -1532,8 +1588,7 @@ public:
/// \brief Get the sequence number for this class template partial
/// specialization.
unsigned getSequenceNumber() const { return SequenceNumber; }
- void setSequenceNumber(unsigned N) { SequenceNumber = N; }
-
+
/// \brief Retrieve the member class template partial specialization from
/// which this particular class template partial specialization was
/// instantiated.
@@ -1617,6 +1672,9 @@ public:
static bool classof(const ClassTemplatePartialSpecializationDecl *) {
return true;
}
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
};
/// Declaration of a class template.
@@ -1630,6 +1688,8 @@ protected:
/// \brief Data that is common to all of the declarations of a given
/// class template.
struct Common : CommonBase {
+ Common() : LazySpecializations() { }
+
/// \brief The class template specializations for this class
/// template, including explicit specializations and instantiations.
llvm::FoldingSet<ClassTemplateSpecializationDecl> Specializations;
@@ -1641,25 +1701,31 @@ protected:
/// \brief The injected-class-name type for this class template.
QualType InjectedClassNameType;
+
+ /// \brief If non-null, points to an array of specializations (including
+ /// partial specializations) known ownly by their external declaration IDs.
+ ///
+ /// The first value in the array is the number of of specializations/
+ /// partial specializations that follow.
+ uint32_t *LazySpecializations;
};
+ /// \brief Load any lazily-loaded specializations from the external source.
+ void LoadLazySpecializations();
+
/// \brief Retrieve the set of specializations of this class template.
- llvm::FoldingSet<ClassTemplateSpecializationDecl> &getSpecializations() {
- return getCommonPtr()->Specializations;
- }
+ llvm::FoldingSet<ClassTemplateSpecializationDecl> &getSpecializations();
/// \brief Retrieve the set of partial specializations of this class
/// template.
llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &
- getPartialSpecializations() {
- return getCommonPtr()->PartialSpecializations;
- }
+ getPartialSpecializations();
ClassTemplateDecl(DeclContext *DC, SourceLocation L, DeclarationName Name,
TemplateParameterList *Params, NamedDecl *Decl)
: RedeclarableTemplateDecl(ClassTemplate, DC, L, Name, Params, Decl) { }
- CommonBase *newCommon();
+ CommonBase *newCommon(ASTContext &C);
Common *getCommonPtr() {
return static_cast<Common *>(RedeclarableTemplateDecl::getCommonPtr());
@@ -1693,9 +1759,7 @@ public:
/// \brief Insert the specified specialization knowing that it is not already
/// in. InsertPos must be obtained from findSpecialization.
- void AddSpecialization(ClassTemplateSpecializationDecl *D, void *InsertPos) {
- getSpecializations().InsertNode(D, InsertPos);
- }
+ void AddSpecialization(ClassTemplateSpecializationDecl *D, void *InsertPos);
ClassTemplateDecl *getCanonicalDecl() {
return redeclarable_base::getCanonicalDecl();
@@ -1729,9 +1793,7 @@ public:
/// \brief Insert the specified partial specialization knowing that it is not
/// already in. InsertPos must be obtained from findPartialSpecialization.
void AddPartialSpecialization(ClassTemplatePartialSpecializationDecl *D,
- void *InsertPos) {
- getPartialSpecializations().InsertNode(D, InsertPos);
- }
+ void *InsertPos);
/// \brief Return the next partial specialization sequence number.
unsigned getNextPartialSpecSequenceNumber() {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
index 8bb6275..e54719b 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
@@ -315,7 +315,7 @@ inline bool operator>=(DeclarationName LHS, DeclarationName RHS) {
/// retrieved using its member functions (e.g.,
/// getCXXConstructorName).
class DeclarationNameTable {
- ASTContext &Ctx;
+ const ASTContext &Ctx;
void *CXXSpecialNamesImpl; // Actually a FoldingSet<CXXSpecialName> *
CXXOperatorIdName *CXXOperatorNames; // Operator names
void *CXXLiteralOperatorNames; // Actually a CXXOperatorIdName*
@@ -324,7 +324,7 @@ class DeclarationNameTable {
DeclarationNameTable& operator=(const DeclarationNameTable&); // NONCOPYABLE
public:
- DeclarationNameTable(ASTContext &C);
+ DeclarationNameTable(const ASTContext &C);
~DeclarationNameTable();
/// getIdentifier - Create a declaration name that is a simple
@@ -402,7 +402,7 @@ struct DeclarationNameLoc {
DeclarationNameLoc(DeclarationName Name);
// FIXME: this should go away once all DNLocs are properly initialized.
- DeclarationNameLoc() { NamedType.TInfo = 0; }
+ DeclarationNameLoc() { memset((void*) this, 0, sizeof(*this)); }
}; // struct DeclarationNameLoc
@@ -492,6 +492,10 @@ public:
LocInfo.CXXLiteralOperatorName.OpNameLoc = Loc.getRawEncoding();
}
+ /// \brief Determine whether this name contains an unexpanded
+ /// parameter pack.
+ bool containsUnexpandedParameterPack() const;
+
/// getAsString - Retrieve the human-readable string for this name.
std::string getAsString() const;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h
new file mode 100644
index 0000000..035f57c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h
@@ -0,0 +1,82 @@
+//===--- EvaluatedExprVisitor.h - Evaluated expression visitor --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the EvaluatedExprVisitor class template, which visits
+// the potentially-evaluated subexpressions of a potentially-evaluated
+// expression.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_EVALUATEDEXPRVISITOR_H
+#define LLVM_CLANG_AST_EVALUATEDEXPRVISITOR_H
+
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+
+namespace clang {
+
+class ASTContext;
+
+/// \begin Given a potentially-evaluated expression, this visitor visits all
+/// of its potentially-evaluated subexpressions, recursively.
+template<typename ImplClass>
+class EvaluatedExprVisitor : public StmtVisitor<ImplClass> {
+ ASTContext &Context;
+
+public:
+ explicit EvaluatedExprVisitor(ASTContext &Context) : Context(Context) { }
+
+ // Expressions that have no potentially-evaluated subexpressions (but may have
+ // other sub-expressions).
+ void VisitDeclRefExpr(DeclRefExpr *E) { }
+ void VisitOffsetOfExpr(OffsetOfExpr *E) { }
+ void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) { }
+ void VisitBlockExpr(BlockExpr *E) { }
+ void VisitCXXUuidofExpr(CXXUuidofExpr *E) { }
+ void VisitCXXNoexceptExpr(CXXNoexceptExpr *E) { }
+
+ void VisitMemberExpr(MemberExpr *E) {
+ // Only the base matters.
+ return this->Visit(E->getBase());
+ }
+
+ void VisitChooseExpr(ChooseExpr *E) {
+ // Only the selected subexpression matters; the other one is not evaluated.
+ return this->Visit(E->getChosenSubExpr(Context));
+ }
+
+ void VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ // Only the actual initializer matters; the designators are all constant
+ // expressions.
+ return this->Visit(E->getInit());
+ }
+
+ void VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ // typeid(expression) is potentially evaluated when the argument is
+ // a glvalue of polymorphic type. (C++ 5.2.8p2-3)
+ if (!E->isTypeOperand() && E->Classify(Context).isGLValue())
+ if (const RecordType *Record
+ = E->getExprOperand()->getType()->template getAs<RecordType>())
+ if (cast<CXXRecordDecl>(Record->getDecl())->isPolymorphic())
+ return this->Visit(E->getExprOperand());
+ }
+
+ /// \brief The basis case walks all of the children of the statement or
+ /// expression, assuming they are all potentially evaluated.
+ void VisitStmt(Stmt *S) {
+ for (Stmt::child_range C = S->children(); C; ++C)
+ if (*C)
+ this->Visit(*C);
+ }
+};
+
+}
+
+#endif // LLVM_CLANG_AST_EVALUATEDEXPRVISITOR_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Expr.h b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
index 48130be..a17205c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Expr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
@@ -25,6 +25,7 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include <cctype>
#include <vector>
namespace clang {
@@ -39,8 +40,10 @@ namespace clang {
class CXXBaseSpecifier;
class CXXOperatorCallExpr;
class CXXMemberCallExpr;
+ class ObjCPropertyRefExpr;
class TemplateArgumentLoc;
class TemplateArgumentListInfo;
+ class OpaqueValueExpr;
/// \brief A simple array of base specifiers.
typedef llvm::SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
@@ -52,24 +55,14 @@ typedef llvm::SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class Expr : public Stmt {
QualType TR;
- virtual void ANCHOR(); // key function.
protected:
- /// TypeDependent - Whether this expression is type-dependent
- /// (C++ [temp.dep.expr]).
- bool TypeDependent : 1;
-
- /// ValueDependent - Whether this expression is value-dependent
- /// (C++ [temp.dep.constexpr]).
- bool ValueDependent : 1;
-
- /// ValueKind - The value classification of this expression.
- /// Only actually used by certain subclasses.
- unsigned ValueKind : 2;
-
- enum { BitsRemaining = 28 };
-
- Expr(StmtClass SC, QualType T, bool TD, bool VD)
- : Stmt(SC), TypeDependent(TD), ValueDependent(VD), ValueKind(0) {
+ Expr(StmtClass SC, QualType T, ExprValueKind VK, ExprObjectKind OK,
+ bool TD, bool VD, bool ContainsUnexpandedParameterPack) : Stmt(SC) {
+ ExprBits.TypeDependent = TD;
+ ExprBits.ValueDependent = VD;
+ ExprBits.ValueKind = VK;
+ ExprBits.ObjectKind = OK;
+ ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
setType(T);
}
@@ -77,15 +70,6 @@ protected:
explicit Expr(StmtClass SC, EmptyShell) : Stmt(SC) { }
public:
- /// \brief Increases the reference count for this expression.
- ///
- /// Invoke the Retain() operation when this expression
- /// is being shared by another owner.
- Expr *Retain() {
- Stmt::Retain();
- return this;
- }
-
QualType getType() const { return TR; }
void setType(QualType t) {
// In C++, the type of an expression is always adjusted so that it
@@ -108,10 +92,10 @@ public:
/// @code
/// template<int Size, char (&Chars)[Size]> struct meta_string;
/// @endcode
- bool isValueDependent() const { return ValueDependent; }
+ bool isValueDependent() const { return ExprBits.ValueDependent; }
/// \brief Set whether this expression is value-dependent or not.
- void setValueDependent(bool VD) { ValueDependent = VD; }
+ void setValueDependent(bool VD) { ExprBits.ValueDependent = VD; }
/// isTypeDependent - Determines whether this expression is
/// type-dependent (C++ [temp.dep.expr]), which means that its type
@@ -124,19 +108,38 @@ public:
/// x + y;
/// }
/// @endcode
- bool isTypeDependent() const { return TypeDependent; }
+ bool isTypeDependent() const { return ExprBits.TypeDependent; }
/// \brief Set whether this expression is type-dependent or not.
- void setTypeDependent(bool TD) { TypeDependent = TD; }
+ void setTypeDependent(bool TD) { ExprBits.TypeDependent = TD; }
+
+ /// \brief Whether this expression contains an unexpanded parameter
+ /// pack (for C++0x variadic templates).
+ ///
+ /// Given the following function template:
+ ///
+ /// \code
+ /// template<typename F, typename ...Types>
+ /// void forward(const F &f, Types &&...args) {
+ /// f(static_cast<Types&&>(args)...);
+ /// }
+ /// \endcode
+ ///
+ /// The expressions \c args and \c static_cast<Types&&>(args) both
+ /// contain parameter packs.
+ bool containsUnexpandedParameterPack() const {
+ return ExprBits.ContainsUnexpandedParameterPack;
+ }
- /// SourceLocation tokens are not useful in isolation - they are low level
- /// value objects created/interpreted by SourceManager. We assume AST
- /// clients will have a pointer to the respective SourceManager.
- virtual SourceRange getSourceRange() const = 0;
+ /// \brief Set the bit that describes whether this expression
+ /// contains an unexpanded parameter pack.
+ void setContainsUnexpandedParameterPack(bool PP = true) {
+ ExprBits.ContainsUnexpandedParameterPack = PP;
+ }
/// getExprLoc - Return the preferred location for the arrow when diagnosing
/// a problem with a generic expression.
- virtual SourceLocation getExprLoc() const { return getLocStart(); }
+ SourceLocation getExprLoc() const;
/// isUnusedResultAWarning - Return true if this immediate expression should
/// be warned about if the result is unused. If so, fill in Loc and Ranges
@@ -145,19 +148,25 @@ public:
bool isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
SourceRange &R2, ASTContext &Ctx) const;
- /// isLvalue - C99 6.3.2.1: an lvalue is an expression with an object type or
- /// incomplete type other than void. Nonarray expressions that can be lvalues:
- /// - name, where name must be a variable
- /// - e[i]
- /// - (e), where e must be an lvalue
- /// - e.name, where e must be an lvalue
- /// - e->name
- /// - *e, the type of e cannot be a function type
- /// - string-constant
- /// - reference type [C++ [expr]]
- /// - b ? x : y, where x and y are lvalues of suitable types [C++]
+ /// isLValue - True if this expression is an "l-value" according to
+ /// the rules of the current language. C and C++ give somewhat
+ /// different rules for this concept, but in general, the result of
+ /// an l-value expression identifies a specific object whereas the
+ /// result of an r-value expression is a value detached from any
+ /// specific storage.
///
- enum isLvalueResult {
+ /// C++0x divides the concept of "r-value" into pure r-values
+ /// ("pr-values") and so-called expiring values ("x-values"), which
+ /// identify specific objects that can be safely cannibalized for
+ /// their resources. This is an unfortunate abuse of terminology on
+ /// the part of the C++ committee. In Clang, when we say "r-value",
+ /// we generally mean a pr-value.
+ bool isLValue() const { return getValueKind() == VK_LValue; }
+ bool isRValue() const { return getValueKind() == VK_RValue; }
+ bool isXValue() const { return getValueKind() == VK_XValue; }
+ bool isGLValue() const { return getValueKind() != VK_RValue; }
+
+ enum LValueClassification {
LV_Valid,
LV_NotObjectType,
LV_IncompleteVoidType,
@@ -167,7 +176,8 @@ public:
LV_SubObjCPropertySetting,
LV_ClassTemporary
};
- isLvalueResult isLvalue(ASTContext &Ctx) const;
+ /// Reasons why an expression might not be an l-value.
+ LValueClassification ClassifyLValue(ASTContext &Ctx) const;
/// isModifiableLvalue - C99 6.3.2.1: an lvalue that does not have array type,
/// does not have an incomplete type, does not have a const-qualified type,
@@ -252,8 +262,14 @@ public:
bool isPRValue() const { return Kind >= CL_Function; }
bool isRValue() const { return Kind >= CL_XValue; }
bool isModifiable() const { return getModifiable() == CM_Modifiable; }
+
+ /// \brief Create a simple, modifiably lvalue
+ static Classification makeSimpleLValue() {
+ return Classification(CL_LValue, CM_Modifiable);
+ }
+
};
- /// \brief classify - Classify this expression according to the C++0x
+ /// \brief Classify - Classify this expression according to the C++0x
/// expression taxonomy.
///
/// C++0x defines ([basic.lval]) a new taxonomy of expressions to replace the
@@ -269,7 +285,7 @@ public:
return ClassifyImpl(Ctx, 0);
}
- /// \brief classifyModifiable - Classify this expression according to the
+ /// \brief ClassifyModifiable - Classify this expression according to the
/// C++0x expression taxonomy, and see if it is valid on the left side
/// of an assignment.
///
@@ -281,6 +297,40 @@ public:
return ClassifyImpl(Ctx, &Loc);
}
+ /// getValueKindForType - Given a formal return or parameter type,
+ /// give its value kind.
+ static ExprValueKind getValueKindForType(QualType T) {
+ if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ return (isa<LValueReferenceType>(RT)
+ ? VK_LValue
+ : (RT->getPointeeType()->isFunctionType()
+ ? VK_LValue : VK_XValue));
+ return VK_RValue;
+ }
+
+ /// getValueKind - The value kind that this expression produces.
+ ExprValueKind getValueKind() const {
+ return static_cast<ExprValueKind>(ExprBits.ValueKind);
+ }
+
+ /// getObjectKind - The object kind that this expression produces.
+ /// Object kinds are meaningful only for expressions that yield an
+ /// l-value or x-value.
+ ExprObjectKind getObjectKind() const {
+ return static_cast<ExprObjectKind>(ExprBits.ObjectKind);
+ }
+
+ bool isOrdinaryOrBitFieldObject() const {
+ ExprObjectKind OK = getObjectKind();
+ return (OK == OK_Ordinary || OK == OK_BitField);
+ }
+
+ /// setValueKind - Set the value kind produced by this expression.
+ void setValueKind(ExprValueKind Cat) { ExprBits.ValueKind = Cat; }
+
+ /// setObjectKind - Set the object kind produced by this expression.
+ void setObjectKind(ExprObjectKind Cat) { ExprBits.ObjectKind = Cat; }
+
private:
Classification ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const;
@@ -294,6 +344,10 @@ public:
return const_cast<Expr*>(this)->getBitField();
}
+ /// \brief If this expression is an l-value for an Objective C
+ /// property, find the underlying property reference expression.
+ const ObjCPropertyRefExpr *getObjCProperty() const;
+
/// \brief Returns whether this expression refers to a vector element.
bool refersToVectorElement() const;
@@ -355,33 +409,49 @@ public:
/// any crazy technique (that has nothing to do with language standards) that
/// we want to. If this function returns true, it returns the folded constant
/// in Result.
- bool Evaluate(EvalResult &Result, ASTContext &Ctx) const;
+ bool Evaluate(EvalResult &Result, const ASTContext &Ctx) const;
/// EvaluateAsBooleanCondition - Return true if this is a constant
/// which we we can fold and convert to a boolean condition using
/// any crazy technique that we want to.
- bool EvaluateAsBooleanCondition(bool &Result, ASTContext &Ctx) const;
+ bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx) const;
/// isEvaluatable - Call Evaluate to see if this expression can be constant
/// folded, but discard the result.
- bool isEvaluatable(ASTContext &Ctx) const;
+ bool isEvaluatable(const ASTContext &Ctx) const;
/// HasSideEffects - This routine returns true for all those expressions
- /// which must be evaluated each time and must not be optimization away
+ /// which must be evaluated each time and must not be optimized away
/// or evaluated at compile time. Example is a function call, volatile
/// variable read.
- bool HasSideEffects(ASTContext &Ctx) const;
+ bool HasSideEffects(const ASTContext &Ctx) const;
/// EvaluateAsInt - Call Evaluate and return the folded integer. This
/// must be called on an expression that constant folds to an integer.
- llvm::APSInt EvaluateAsInt(ASTContext &Ctx) const;
+ llvm::APSInt EvaluateAsInt(const ASTContext &Ctx) const;
/// EvaluateAsLValue - Evaluate an expression to see if it's a lvalue
/// with link time known address.
- bool EvaluateAsLValue(EvalResult &Result, ASTContext &Ctx) const;
+ bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const;
/// EvaluateAsLValue - Evaluate an expression to see if it's a lvalue.
- bool EvaluateAsAnyLValue(EvalResult &Result, ASTContext &Ctx) const;
+ bool EvaluateAsAnyLValue(EvalResult &Result, const ASTContext &Ctx) const;
+
+ /// \brief Enumeration used to describe the kind of Null pointer constant
+ /// returned from \c isNullPointerConstant().
+ enum NullPointerConstantKind {
+ /// \brief Expression is not a Null pointer constant.
+ NPCK_NotNull = 0,
+
+ /// \brief Expression is a Null pointer constant built from a zero integer.
+ NPCK_ZeroInteger,
+
+ /// \brief Expression is a C++0X nullptr.
+ NPCK_CXX0X_nullptr,
+
+ /// \brief Expression is a GNU-style __null constant.
+ NPCK_GNUNull
+ };
/// \brief Enumeration used to describe how \c isNullPointerConstant()
/// should cope with value-dependent expressions.
@@ -398,16 +468,30 @@ public:
NPC_ValueDependentIsNotNull
};
- /// isNullPointerConstant - C99 6.3.2.3p3 - Return true if this is either an
- /// integer constant expression with the value zero, or if this is one that is
- /// cast to void*.
- bool isNullPointerConstant(ASTContext &Ctx,
- NullPointerConstantValueDependence NPC) const;
+ /// isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to
+ /// a Null pointer constant. The return value can further distinguish the
+ /// kind of NULL pointer constant that was detected.
+ NullPointerConstantKind isNullPointerConstant(
+ ASTContext &Ctx,
+ NullPointerConstantValueDependence NPC) const;
/// isOBJCGCCandidate - Return true if this expression may be used in a read/
/// write barrier.
bool isOBJCGCCandidate(ASTContext &Ctx) const;
+ /// \brief Returns true if this expression is a bound member function.
+ bool isBoundMemberFunction(ASTContext &Ctx) const;
+
+ /// \brief Result type of CanThrow().
+ enum CanThrowResult {
+ CT_Cannot,
+ CT_Dependent,
+ CT_Can
+ };
+ /// \brief Test if this expression, if evaluated, might throw, according to
+ /// the rules of C++ [expr.unary.noexcept].
+ CanThrowResult CanThrow(ASTContext &C) const;
+
/// IgnoreParens - Ignore parentheses. If this Expr is a ParenExpr, return
/// its subexpression. If that subexpression is also a ParenExpr,
/// then this method recursively returns its subexpression, and so forth.
@@ -422,6 +506,18 @@ public:
/// ParenExpr or ImplicitCastExprs, returning their operand.
Expr *IgnoreParenImpCasts();
+ const Expr *IgnoreParenImpCasts() const {
+ return const_cast<Expr*>(this)->IgnoreParenImpCasts();
+ }
+
+ /// Ignore parentheses and lvalue casts. Strip off any ParenExpr and
+ /// CastExprs that represent lvalue casts, returning their operand.
+ Expr *IgnoreParenLValueCasts();
+
+ const Expr *IgnoreParenLValueCasts() const {
+ return const_cast<Expr*>(this)->IgnoreParenLValueCasts();
+ }
+
/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
/// value (including ptr->int casts of the same size). Strip off any
/// ParenExpr or CastExprs, returning their operand.
@@ -436,14 +532,9 @@ public:
/// the expression is a default argument.
bool isDefaultArgument() const;
- /// \brief Determine whether this expression directly creates a
- /// temporary object (of class type).
- bool isTemporaryObject() const { return getTemporaryObject() != 0; }
-
- /// \brief If this expression directly creates a temporary object of
- /// class type, return the expression that actually constructs that
- /// temporary object.
- const Expr *getTemporaryObject() const;
+ /// \brief Determine whether the result of this expression is a
+ /// temporary object of the given class type.
+ bool isTemporaryObject(ASTContext &Ctx, const CXXRecordDecl *TempTy) const;
const Expr *IgnoreParens() const {
return const_cast<Expr*>(this)->IgnoreParens();
@@ -470,6 +561,63 @@ public:
// Primary Expressions.
//===----------------------------------------------------------------------===//
+/// OpaqueValueExpr - An expression referring to an opaque object of a
+/// fixed type and value class. These don't correspond to concrete
+/// syntax; instead they're used to express operations (usually copy
+/// operations) on values whose source is generally obvious from
+/// context.
+class OpaqueValueExpr : public Expr {
+ friend class ASTStmtReader;
+ Expr *SourceExpr;
+ SourceLocation Loc;
+
+public:
+ OpaqueValueExpr(SourceLocation Loc, QualType T, ExprValueKind VK,
+ ExprObjectKind OK = OK_Ordinary)
+ : Expr(OpaqueValueExprClass, T, VK, OK,
+ T->isDependentType(), T->isDependentType(), false),
+ SourceExpr(0), Loc(Loc) {
+ }
+
+ /// Given an expression which invokes a copy constructor --- i.e. a
+ /// CXXConstructExpr, possibly wrapped in an ExprWithCleanups ---
+ /// find the OpaqueValueExpr that's the source of the construction.
+ static const OpaqueValueExpr *findInCopyConstruct(const Expr *expr);
+
+ explicit OpaqueValueExpr(EmptyShell Empty)
+ : Expr(OpaqueValueExprClass, Empty) { }
+
+ /// \brief Retrieve the location of this expression.
+ SourceLocation getLocation() const { return Loc; }
+
+ SourceRange getSourceRange() const {
+ if (SourceExpr) return SourceExpr->getSourceRange();
+ return Loc;
+ }
+ SourceLocation getExprLoc() const {
+ if (SourceExpr) return SourceExpr->getExprLoc();
+ return Loc;
+ }
+
+ child_range children() { return child_range(); }
+
+ /// The source expression of an opaque value expression is the
+ /// expression which originally generated the value. This is
+ /// provided as a convenience for analyses that don't wish to
+ /// precisely model the execution behavior of the program.
+ ///
+ /// The source expression is typically set when building the
+ /// expression which binds the opaque value expression in the first
+ /// place.
+ Expr *getSourceExpr() const { return SourceExpr; }
+ void setSourceExpr(Expr *e) { SourceExpr = e; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OpaqueValueExprClass;
+ }
+ static bool classof(const OpaqueValueExpr *) { return true; }
+};
+
/// \brief Represents the qualifier that may precede a C++ name, e.g., the
/// "std::" in "std::sort".
struct NameQualifier {
@@ -505,6 +653,8 @@ struct ExplicitTemplateArgumentList {
}
void initializeFrom(const TemplateArgumentListInfo &List);
+ void initializeFrom(const TemplateArgumentListInfo &List,
+ bool &Dependent, bool &ContainsUnexpandedParameterPack);
void copyInto(TemplateArgumentListInfo &List) const;
static std::size_t sizeFor(unsigned NumTemplateArgs);
static std::size_t sizeFor(const TemplateArgumentListInfo &List);
@@ -551,12 +701,12 @@ class DeclRefExpr : public Expr {
DeclRefExpr(NestedNameSpecifier *Qualifier, SourceRange QualifierRange,
ValueDecl *D, SourceLocation NameLoc,
const TemplateArgumentListInfo *TemplateArgs,
- QualType T);
+ QualType T, ExprValueKind VK);
DeclRefExpr(NestedNameSpecifier *Qualifier, SourceRange QualifierRange,
ValueDecl *D, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
- QualType T);
+ QualType T, ExprValueKind VK);
/// \brief Construct an empty declaration reference expression.
explicit DeclRefExpr(EmptyShell Empty)
@@ -567,8 +717,9 @@ class DeclRefExpr : public Expr {
void computeDependence();
public:
- DeclRefExpr(ValueDecl *d, QualType t, SourceLocation l) :
- Expr(DeclRefExprClass, t, false, false), DecoratedD(d, 0), Loc(l) {
+ DeclRefExpr(ValueDecl *d, QualType t, ExprValueKind VK, SourceLocation l) :
+ Expr(DeclRefExprClass, t, VK, OK_Ordinary, false, false, false),
+ DecoratedD(d, 0), Loc(l) {
computeDependence();
}
@@ -577,7 +728,7 @@ public:
SourceRange QualifierRange,
ValueDecl *D,
SourceLocation NameLoc,
- QualType T,
+ QualType T, ExprValueKind VK,
const TemplateArgumentListInfo *TemplateArgs = 0);
static DeclRefExpr *Create(ASTContext &Context,
@@ -585,12 +736,14 @@ public:
SourceRange QualifierRange,
ValueDecl *D,
const DeclarationNameInfo &NameInfo,
- QualType T,
+ QualType T, ExprValueKind VK,
const TemplateArgumentListInfo *TemplateArgs = 0);
/// \brief Construct an empty declaration reference expression.
static DeclRefExpr *CreateEmpty(ASTContext &Context,
- bool HasQualifier, unsigned NumTemplateArgs);
+ bool HasQualifier,
+ bool HasExplicitTemplateArgs,
+ unsigned NumTemplateArgs);
ValueDecl *getDecl() { return DecoratedD.getPointer(); }
const ValueDecl *getDecl() const { return DecoratedD.getPointer(); }
@@ -602,7 +755,7 @@ public:
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- virtual SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const;
/// \brief Determine whether this declaration reference was preceded by a
/// C++ nested-name-specifier, e.g., \c N::foo.
@@ -706,8 +859,7 @@ public:
static bool classof(const DeclRefExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
friend class ASTStmtReader;
friend class ASTStmtWriter;
@@ -730,8 +882,10 @@ private:
IdentType Type;
public:
PredefinedExpr(SourceLocation l, QualType type, IdentType IT)
- : Expr(PredefinedExprClass, type, type->isDependentType(),
- type->isDependentType()), Loc(l), Type(IT) {}
+ : Expr(PredefinedExprClass, type, VK_LValue, OK_Ordinary,
+ type->isDependentType(), type->isDependentType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ Loc(l), Type(IT) {}
/// \brief Construct an empty predefined expression.
explicit PredefinedExpr(EmptyShell Empty)
@@ -745,7 +899,7 @@ public:
static std::string ComputeName(IdentType IT, const Decl *CurrentDecl);
- virtual SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const { return SourceRange(Loc); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == PredefinedExprClass;
@@ -753,8 +907,7 @@ public:
static bool classof(const PredefinedExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// \brief Used by IntegerLiteral/FloatingLiteral to store the numeric without
@@ -817,7 +970,9 @@ public:
// or UnsignedLongLongTy
IntegerLiteral(ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l)
- : Expr(IntegerLiteralClass, type, false, false), Loc(l) {
+ : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
+ false),
+ Loc(l) {
assert(type->isIntegerType() && "Illegal type in IntegerLiteral");
setValue(C, V);
}
@@ -829,7 +984,7 @@ public:
static IntegerLiteral *Create(ASTContext &C, EmptyShell Empty);
llvm::APInt getValue() const { return Num.getValue(); }
- virtual SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const { return SourceRange(Loc); }
/// \brief Retrieve the location of the literal.
SourceLocation getLocation() const { return Loc; }
@@ -843,8 +998,7 @@ public:
static bool classof(const IntegerLiteral *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
class CharacterLiteral : public Expr {
@@ -854,8 +1008,9 @@ class CharacterLiteral : public Expr {
public:
// type should be IntTy
CharacterLiteral(unsigned value, bool iswide, QualType type, SourceLocation l)
- : Expr(CharacterLiteralClass, type, false, false), Value(value), Loc(l),
- IsWide(iswide) {
+ : Expr(CharacterLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
+ false),
+ Value(value), Loc(l), IsWide(iswide) {
}
/// \brief Construct an empty character literal.
@@ -864,7 +1019,7 @@ public:
SourceLocation getLocation() const { return Loc; }
bool isWide() const { return IsWide; }
- virtual SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const { return SourceRange(Loc); }
unsigned getValue() const { return Value; }
@@ -878,8 +1033,7 @@ public:
static bool classof(const CharacterLiteral *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
class FloatingLiteral : public Expr {
@@ -889,7 +1043,8 @@ class FloatingLiteral : public Expr {
FloatingLiteral(ASTContext &C, const llvm::APFloat &V, bool isexact,
QualType Type, SourceLocation L)
- : Expr(FloatingLiteralClass, Type, false, false),
+ : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
+ false),
IsExact(isexact), Loc(L) {
setValue(C, V);
}
@@ -919,7 +1074,7 @@ public:
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- virtual SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const { return SourceRange(Loc); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == FloatingLiteralClass;
@@ -927,8 +1082,7 @@ public:
static bool classof(const FloatingLiteral *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// ImaginaryLiteral - We support imaginary integer and floating point literals,
@@ -940,7 +1094,9 @@ class ImaginaryLiteral : public Expr {
Stmt *Val;
public:
ImaginaryLiteral(Expr *val, QualType Ty)
- : Expr(ImaginaryLiteralClass, Ty, false, false), Val(val) {}
+ : Expr(ImaginaryLiteralClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ false),
+ Val(val) {}
/// \brief Build an empty imaginary literal.
explicit ImaginaryLiteral(EmptyShell Empty)
@@ -950,15 +1106,14 @@ public:
Expr *getSubExpr() { return cast<Expr>(Val); }
void setSubExpr(Expr *E) { Val = E; }
- virtual SourceRange getSourceRange() const { return Val->getSourceRange(); }
+ SourceRange getSourceRange() const { return Val->getSourceRange(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ImaginaryLiteralClass;
}
static bool classof(const ImaginaryLiteral *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Val, &Val+1); }
};
/// StringLiteral - This represents a string literal expression, e.g. "foo"
@@ -984,7 +1139,8 @@ class StringLiteral : public Expr {
unsigned NumConcatenated;
SourceLocation TokLocs[1];
- StringLiteral(QualType Ty) : Expr(StringLiteralClass, Ty, false, false) {}
+ StringLiteral(QualType Ty) :
+ Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary, false, false, false) {}
public:
/// This is the "fully general" constructor that allows representation of
@@ -1034,12 +1190,23 @@ public:
assert(TokNum < NumConcatenated && "Invalid tok number");
TokLocs[TokNum] = L;
}
+
+ /// getLocationOfByte - Return a source location that points to the specified
+ /// byte of this string literal.
+ ///
+ /// Strings are amazingly complex. They can be formed from multiple tokens
+ /// and can have escape sequences in them in addition to the usual trigraph
+ /// and escaped newline business. This routine handles this complexity.
+ ///
+ SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
+ const LangOptions &Features,
+ const TargetInfo &Target) const;
typedef const SourceLocation *tokloc_iterator;
tokloc_iterator tokloc_begin() const { return TokLocs; }
tokloc_iterator tokloc_end() const { return TokLocs+NumConcatenated; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(TokLocs[0], TokLocs[NumConcatenated-1]);
}
static bool classof(const Stmt *T) {
@@ -1048,8 +1215,7 @@ public:
static bool classof(const StringLiteral *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// ParenExpr - This represents a parethesized expression, e.g. "(1)". This
@@ -1060,7 +1226,9 @@ class ParenExpr : public Expr {
public:
ParenExpr(SourceLocation l, SourceLocation r, Expr *val)
: Expr(ParenExprClass, val->getType(),
- val->isTypeDependent(), val->isValueDependent()),
+ val->getValueKind(), val->getObjectKind(),
+ val->isTypeDependent(), val->isValueDependent(),
+ val->containsUnexpandedParameterPack()),
L(l), R(r), Val(val) {}
/// \brief Construct an empty parenthesized expression.
@@ -1071,7 +1239,7 @@ public:
Expr *getSubExpr() { return cast<Expr>(Val); }
void setSubExpr(Expr *E) { Val = E; }
- virtual SourceRange getSourceRange() const { return SourceRange(L, R); }
+ SourceRange getSourceRange() const { return SourceRange(L, R); }
/// \brief Get the location of the left parentheses '('.
SourceLocation getLParen() const { return L; }
@@ -1087,8 +1255,7 @@ public:
static bool classof(const ParenExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Val, &Val+1); }
};
@@ -1112,10 +1279,12 @@ private:
Stmt *Val;
public:
- UnaryOperator(Expr *input, Opcode opc, QualType type, SourceLocation l)
- : Expr(UnaryOperatorClass, type,
+ UnaryOperator(Expr *input, Opcode opc, QualType type,
+ ExprValueKind VK, ExprObjectKind OK, SourceLocation l)
+ : Expr(UnaryOperatorClass, type, VK, OK,
input->isTypeDependent() || type->isDependentType(),
- input->isValueDependent()),
+ input->isValueDependent(),
+ input->containsUnexpandedParameterPack()),
Opc(opc), Loc(l), Val(input) {}
/// \brief Build an empty unary operator.
@@ -1137,7 +1306,7 @@ public:
return Op == UO_PostInc || Op == UO_PostDec;
}
- /// isPostfix - Return true if this is a prefix operation, like --x.
+ /// isPrefix - Return true if this is a prefix operation, like --x.
static bool isPrefix(Opcode Op) {
return Op == UO_PreInc || Op == UO_PreDec;
}
@@ -1167,13 +1336,13 @@ public:
/// the given unary opcode.
static OverloadedOperatorKind getOverloadedOperator(Opcode Opc);
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
if (isPostfix())
return SourceRange(Val->getLocStart(), Loc);
else
return SourceRange(Loc, Val->getLocEnd());
}
- virtual SourceLocation getExprLoc() const { return Loc; }
+ SourceLocation getExprLoc() const { return Loc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == UnaryOperatorClass;
@@ -1181,8 +1350,7 @@ public:
static bool classof(const UnaryOperator *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Val, &Val+1); }
};
/// OffsetOfExpr - [C99 7.17] - This represents an expression of the form
@@ -1369,7 +1537,7 @@ public:
return NumExprs;
}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(OperatorLoc, RParenLoc);
}
@@ -1380,8 +1548,12 @@ public:
static bool classof(const OffsetOfExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ Stmt **begin =
+ reinterpret_cast<Stmt**>(reinterpret_cast<OffsetOfNode*>(this + 1)
+ + NumComps);
+ return child_range(begin, begin + NumExprs);
+ }
};
/// SizeOfAlignOfExpr - [C99 6.5.3.4] - This is for sizeof/alignof, both of
@@ -1399,10 +1571,11 @@ public:
SizeOfAlignOfExpr(bool issizeof, TypeSourceInfo *TInfo,
QualType resultType, SourceLocation op,
SourceLocation rp) :
- Expr(SizeOfAlignOfExprClass, resultType,
+ Expr(SizeOfAlignOfExprClass, resultType, VK_RValue, OK_Ordinary,
false, // Never type-dependent (C++ [temp.dep.expr]p3).
// Value-dependent if the argument is type-dependent.
- TInfo->getType()->isDependentType()),
+ TInfo->getType()->isDependentType(),
+ TInfo->getType()->containsUnexpandedParameterPack()),
isSizeof(issizeof), isType(true), OpLoc(op), RParenLoc(rp) {
Argument.Ty = TInfo;
}
@@ -1410,10 +1583,11 @@ public:
SizeOfAlignOfExpr(bool issizeof, Expr *E,
QualType resultType, SourceLocation op,
SourceLocation rp) :
- Expr(SizeOfAlignOfExprClass, resultType,
+ Expr(SizeOfAlignOfExprClass, resultType, VK_RValue, OK_Ordinary,
false, // Never type-dependent (C++ [temp.dep.expr]p3).
// Value-dependent if the argument is type-dependent.
- E->isTypeDependent()),
+ E->isTypeDependent(),
+ E->containsUnexpandedParameterPack()),
isSizeof(issizeof), isType(false), OpLoc(op), RParenLoc(rp) {
Argument.Ex = E;
}
@@ -1459,7 +1633,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(OpLoc, RParenLoc);
}
@@ -1469,8 +1643,7 @@ public:
static bool classof(const SizeOfAlignOfExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children();
};
//===----------------------------------------------------------------------===//
@@ -1484,10 +1657,13 @@ class ArraySubscriptExpr : public Expr {
SourceLocation RBracketLoc;
public:
ArraySubscriptExpr(Expr *lhs, Expr *rhs, QualType t,
+ ExprValueKind VK, ExprObjectKind OK,
SourceLocation rbracketloc)
- : Expr(ArraySubscriptExprClass, t,
+ : Expr(ArraySubscriptExprClass, t, VK, OK,
lhs->isTypeDependent() || rhs->isTypeDependent(),
- lhs->isValueDependent() || rhs->isValueDependent()),
+ lhs->isValueDependent() || rhs->isValueDependent(),
+ (lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack())),
RBracketLoc(rbracketloc) {
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
@@ -1530,14 +1706,14 @@ public:
return cast<Expr>(getRHS()->getType()->isIntegerType() ? getRHS():getLHS());
}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(getLHS()->getLocStart(), RBracketLoc);
}
SourceLocation getRBracketLoc() const { return RBracketLoc; }
void setRBracketLoc(SourceLocation L) { RBracketLoc = L; }
- virtual SourceLocation getExprLoc() const { return getBase()->getExprLoc(); }
+ SourceLocation getExprLoc() const { return getBase()->getExprLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ArraySubscriptExprClass;
@@ -1545,8 +1721,9 @@ public:
static bool classof(const ArraySubscriptExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
};
@@ -1557,19 +1734,36 @@ public:
/// a subclass for overloaded operator calls that use operator syntax, e.g.,
/// "str1 + str2" to resolve to a function call.
class CallExpr : public Expr {
- enum { FN=0, ARGS_START=1 };
+ enum { FN=0, PREARGS_START=1 };
Stmt **SubExprs;
unsigned NumArgs;
SourceLocation RParenLoc;
protected:
- // This version of the constructor is for derived classes.
- CallExpr(ASTContext& C, StmtClass SC, Expr *fn, Expr **args, unsigned numargs,
- QualType t, SourceLocation rparenloc);
+ // These versions of the constructor are for derived classes.
+ CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
+ Expr **args, unsigned numargs, QualType t, ExprValueKind VK,
+ SourceLocation rparenloc);
+ CallExpr(ASTContext &C, StmtClass SC, unsigned NumPreArgs, EmptyShell Empty);
+
+ Stmt *getPreArg(unsigned i) {
+ assert(i < getNumPreArgs() && "Prearg access out of range!");
+ return SubExprs[PREARGS_START+i];
+ }
+ const Stmt *getPreArg(unsigned i) const {
+ assert(i < getNumPreArgs() && "Prearg access out of range!");
+ return SubExprs[PREARGS_START+i];
+ }
+ void setPreArg(unsigned i, Stmt *PreArg) {
+ assert(i < getNumPreArgs() && "Prearg access out of range!");
+ SubExprs[PREARGS_START+i] = PreArg;
+ }
+
+ unsigned getNumPreArgs() const { return CallExprBits.NumPreArgs; }
public:
CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs, QualType t,
- SourceLocation rparenloc);
+ ExprValueKind VK, SourceLocation rparenloc);
/// \brief Build an empty call expression.
CallExpr(ASTContext &C, StmtClass SC, EmptyShell Empty);
@@ -1593,20 +1787,25 @@ public:
///
unsigned getNumArgs() const { return NumArgs; }
+ /// \brief Retrieve the call arguments.
+ Expr **getArgs() {
+ return reinterpret_cast<Expr **>(SubExprs+getNumPreArgs()+PREARGS_START);
+ }
+
/// getArg - Return the specified argument.
Expr *getArg(unsigned Arg) {
assert(Arg < NumArgs && "Arg access out of range!");
- return cast<Expr>(SubExprs[Arg+ARGS_START]);
+ return cast<Expr>(SubExprs[Arg+getNumPreArgs()+PREARGS_START]);
}
const Expr *getArg(unsigned Arg) const {
assert(Arg < NumArgs && "Arg access out of range!");
- return cast<Expr>(SubExprs[Arg+ARGS_START]);
+ return cast<Expr>(SubExprs[Arg+getNumPreArgs()+PREARGS_START]);
}
/// setArg - Set the specified argument.
void setArg(unsigned Arg, Expr *ArgExpr) {
assert(Arg < NumArgs && "Arg access out of range!");
- SubExprs[Arg+ARGS_START] = ArgExpr;
+ SubExprs[Arg+getNumPreArgs()+PREARGS_START] = ArgExpr;
}
/// setNumArgs - This changes the number of arguments present in this call.
@@ -1617,10 +1816,16 @@ public:
typedef ExprIterator arg_iterator;
typedef ConstExprIterator const_arg_iterator;
- arg_iterator arg_begin() { return SubExprs+ARGS_START; }
- arg_iterator arg_end() { return SubExprs+ARGS_START+getNumArgs(); }
- const_arg_iterator arg_begin() const { return SubExprs+ARGS_START; }
- const_arg_iterator arg_end() const { return SubExprs+ARGS_START+getNumArgs();}
+ arg_iterator arg_begin() { return SubExprs+PREARGS_START+getNumPreArgs(); }
+ arg_iterator arg_end() {
+ return SubExprs+PREARGS_START+getNumPreArgs()+getNumArgs();
+ }
+ const_arg_iterator arg_begin() const {
+ return SubExprs+PREARGS_START+getNumPreArgs();
+ }
+ const_arg_iterator arg_end() const {
+ return SubExprs+PREARGS_START+getNumPreArgs()+getNumArgs();
+ }
/// getNumCommas - Return the number of commas that must have been present in
/// this function call.
@@ -1628,7 +1833,7 @@ public:
/// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If
/// not, return 0.
- unsigned isBuiltinCall(ASTContext &Context) const;
+ unsigned isBuiltinCall(const ASTContext &Context) const;
/// getCallReturnType - Get the return type of the call expr. This is not
/// always the type of the expr itself, if the return type is a reference
@@ -1638,7 +1843,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(getCallee()->getLocStart(), RParenLoc);
}
@@ -1649,8 +1854,10 @@ public:
static bool classof(const CallExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0],
+ &SubExprs[0]+NumArgs+getNumPreArgs()+PREARGS_START);
+ }
};
/// MemberExpr - [C99 6.5.2.3] Structure and Union Members. X->F and X.F.
@@ -1705,9 +1912,11 @@ class MemberExpr : public Expr {
public:
MemberExpr(Expr *base, bool isarrow, ValueDecl *memberdecl,
- const DeclarationNameInfo &NameInfo, QualType ty)
- : Expr(MemberExprClass, ty,
- base->isTypeDependent(), base->isValueDependent()),
+ const DeclarationNameInfo &NameInfo, QualType ty,
+ ExprValueKind VK, ExprObjectKind OK)
+ : Expr(MemberExprClass, ty, VK, OK,
+ base->isTypeDependent(), base->isValueDependent(),
+ base->containsUnexpandedParameterPack()),
Base(base), MemberDecl(memberdecl), MemberLoc(NameInfo.getLoc()),
MemberDNLoc(NameInfo.getInfo()), IsArrow(isarrow),
HasQualifierOrFoundDecl(false), HasExplicitTemplateArgumentList(false) {
@@ -1719,9 +1928,11 @@ public:
// (i.e., source locations for C++ operator names or type source info
// for constructors, destructors and conversion oeprators).
MemberExpr(Expr *base, bool isarrow, ValueDecl *memberdecl,
- SourceLocation l, QualType ty)
- : Expr(MemberExprClass, ty,
- base->isTypeDependent(), base->isValueDependent()),
+ SourceLocation l, QualType ty,
+ ExprValueKind VK, ExprObjectKind OK)
+ : Expr(MemberExprClass, ty, VK, OK,
+ base->isTypeDependent(), base->isValueDependent(),
+ base->containsUnexpandedParameterPack()),
Base(base), MemberDecl(memberdecl), MemberLoc(l), MemberDNLoc(),
IsArrow(isarrow),
HasQualifierOrFoundDecl(false), HasExplicitTemplateArgumentList(false) {}
@@ -1731,7 +1942,7 @@ public:
ValueDecl *memberdecl, DeclAccessPair founddecl,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *targs,
- QualType ty);
+ QualType ty, ExprValueKind VK, ExprObjectKind OK);
void setBase(Expr *E) { Base = E; }
Expr *getBase() const { return cast<Expr>(Base); }
@@ -1866,7 +2077,7 @@ public:
SourceLocation getMemberLoc() const { return MemberLoc; }
void setMemberLoc(SourceLocation L) { MemberLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
// If we have an implicit base (like a C++ implicit this),
// make sure not to return its location
SourceLocation EndLoc = (HasExplicitTemplateArgumentList)
@@ -1878,7 +2089,7 @@ public:
return SourceRange(BaseLoc, EndLoc);
}
- virtual SourceLocation getExprLoc() const { return MemberLoc; }
+ SourceLocation getExprLoc() const { return MemberLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MemberExprClass;
@@ -1886,8 +2097,10 @@ public:
static bool classof(const MemberExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Base, &Base+1); }
+
+ friend class ASTReader;
+ friend class ASTStmtWriter;
};
/// CompoundLiteralExpr - [C99 6.5.2.5]
@@ -1904,11 +2117,12 @@ class CompoundLiteralExpr : public Expr {
Stmt *Init;
bool FileScope;
public:
- // FIXME: Can compound literals be value-dependent?
CompoundLiteralExpr(SourceLocation lparenloc, TypeSourceInfo *tinfo,
- QualType T, Expr *init, bool fileScope)
- : Expr(CompoundLiteralExprClass, T,
- tinfo->getType()->isDependentType(), false),
+ QualType T, ExprValueKind VK, Expr *init, bool fileScope)
+ : Expr(CompoundLiteralExprClass, T, VK, OK_Ordinary,
+ tinfo->getType()->isDependentType(),
+ init->isValueDependent(),
+ init->containsUnexpandedParameterPack()),
LParenLoc(lparenloc), TInfo(tinfo), Init(init), FileScope(fileScope) {}
/// \brief Construct an empty compound literal.
@@ -1928,7 +2142,7 @@ public:
TypeSourceInfo *getTypeSourceInfo() const { return TInfo; }
void setTypeSourceInfo(TypeSourceInfo* tinfo) { TInfo = tinfo; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
// FIXME: Init should never be null.
if (!Init)
return SourceRange();
@@ -1943,8 +2157,7 @@ public:
static bool classof(const CompoundLiteralExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Init, &Init+1); }
};
/// CastExpr - Base class for type casts, including both implicit
@@ -1956,11 +2169,9 @@ public:
typedef clang::CastKind CastKind;
private:
- unsigned Kind : 5;
- unsigned BasePathSize : BitsRemaining - 5;
Stmt *Op;
- void CheckBasePath() const {
+ void CheckCastConsistency() const {
#ifndef NDEBUG
switch (getCastKind()) {
case CK_DerivedToBase:
@@ -1972,16 +2183,13 @@ private:
break;
// These should not have an inheritance path.
- case CK_Unknown:
case CK_BitCast:
- case CK_LValueBitCast:
- case CK_NoOp:
case CK_Dynamic:
case CK_ToUnion:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_NullToMemberPointer:
- case CK_UserDefinedConversion:
+ case CK_NullToPointer:
case CK_ConstructorConversion:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
@@ -1991,10 +2199,32 @@ private:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingCast:
- case CK_MemberPointerToBoolean:
case CK_AnyPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ assert(!getType()->isBooleanType() && "unheralded conversion to bool");
+ // fallthrough to check for null base path
+
+ case CK_Dependent:
+ case CK_LValueToRValue:
+ case CK_GetObjCProperty:
+ case CK_NoOp:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_FloatingToBoolean:
+ case CK_MemberPointerToBoolean:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean:
+ case CK_LValueBitCast: // -> bool&
+ case CK_UserDefinedConversion: // operator bool()
assert(path_empty() && "Cast kind should not have a base path!");
break;
}
@@ -2007,26 +2237,33 @@ private:
CXXBaseSpecifier **path_buffer();
protected:
- CastExpr(StmtClass SC, QualType ty, const CastKind kind, Expr *op,
- unsigned BasePathSize) :
- Expr(SC, ty,
+ CastExpr(StmtClass SC, QualType ty, ExprValueKind VK,
+ const CastKind kind, Expr *op, unsigned BasePathSize) :
+ Expr(SC, ty, VK, OK_Ordinary,
// Cast expressions are type-dependent if the type is
// dependent (C++ [temp.dep.expr]p3).
ty->isDependentType(),
// Cast expressions are value-dependent if the type is
// dependent or if the subexpression is value-dependent.
- ty->isDependentType() || (op && op->isValueDependent())),
- Kind(kind), BasePathSize(BasePathSize), Op(op) {
- CheckBasePath();
+ ty->isDependentType() || (op && op->isValueDependent()),
+ (ty->containsUnexpandedParameterPack() ||
+ op->containsUnexpandedParameterPack())),
+ Op(op) {
+ assert(kind != CK_Invalid && "creating cast with invalid cast kind");
+ CastExprBits.Kind = kind;
+ CastExprBits.BasePathSize = BasePathSize;
+ CheckCastConsistency();
}
/// \brief Construct an empty cast.
CastExpr(StmtClass SC, EmptyShell Empty, unsigned BasePathSize)
- : Expr(SC, Empty), BasePathSize(BasePathSize) { }
+ : Expr(SC, Empty) {
+ CastExprBits.BasePathSize = BasePathSize;
+ }
public:
- CastKind getCastKind() const { return static_cast<CastKind>(Kind); }
- void setCastKind(CastKind K) { Kind = K; }
+ CastKind getCastKind() const { return (CastKind) CastExprBits.Kind; }
+ void setCastKind(CastKind K) { CastExprBits.Kind = K; }
const char *getCastKindName() const;
Expr *getSubExpr() { return cast<Expr>(Op); }
@@ -2043,8 +2280,8 @@ public:
typedef CXXBaseSpecifier **path_iterator;
typedef const CXXBaseSpecifier * const *path_const_iterator;
- bool path_empty() const { return BasePathSize == 0; }
- unsigned path_size() const { return BasePathSize; }
+ bool path_empty() const { return CastExprBits.BasePathSize == 0; }
+ unsigned path_size() const { return CastExprBits.BasePathSize; }
path_iterator path_begin() { return path_buffer(); }
path_iterator path_end() { return path_buffer() + path_size(); }
path_const_iterator path_begin() const { return path_buffer(); }
@@ -2059,8 +2296,7 @@ public:
static bool classof(const CastExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Op, &Op+1); }
};
/// ImplicitCastExpr - Allows us to explicitly represent implicit type
@@ -2087,8 +2323,7 @@ class ImplicitCastExpr : public CastExpr {
private:
ImplicitCastExpr(QualType ty, CastKind kind, Expr *op,
unsigned BasePathLength, ExprValueKind VK)
- : CastExpr(ImplicitCastExprClass, ty, kind, op, BasePathLength) {
- ValueKind = VK;
+ : CastExpr(ImplicitCastExprClass, ty, VK, kind, op, BasePathLength) {
}
/// \brief Construct an empty implicit cast.
@@ -2099,8 +2334,7 @@ public:
enum OnStack_t { OnStack };
ImplicitCastExpr(OnStack_t _, QualType ty, CastKind kind, Expr *op,
ExprValueKind VK)
- : CastExpr(ImplicitCastExprClass, ty, kind, op, 0) {
- ValueKind = VK;
+ : CastExpr(ImplicitCastExprClass, ty, VK, kind, op, 0) {
}
static ImplicitCastExpr *Create(ASTContext &Context, QualType T,
@@ -2110,18 +2344,10 @@ public:
static ImplicitCastExpr *CreateEmpty(ASTContext &Context, unsigned PathSize);
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return getSubExpr()->getSourceRange();
}
- /// getValueKind - The value kind that this cast produces.
- ExprValueKind getValueKind() const {
- return static_cast<ExprValueKind>(ValueKind);
- }
-
- /// setValueKind - Set the value kind this cast produces.
- void setValueKind(ExprValueKind Cat) { ValueKind = Cat; }
-
static bool classof(const Stmt *T) {
return T->getStmtClass() == ImplicitCastExprClass;
}
@@ -2150,9 +2376,10 @@ class ExplicitCastExpr : public CastExpr {
TypeSourceInfo *TInfo;
protected:
- ExplicitCastExpr(StmtClass SC, QualType exprTy, CastKind kind,
- Expr *op, unsigned PathSize, TypeSourceInfo *writtenTy)
- : CastExpr(SC, exprTy, kind, op, PathSize), TInfo(writtenTy) {}
+ ExplicitCastExpr(StmtClass SC, QualType exprTy, ExprValueKind VK,
+ CastKind kind, Expr *op, unsigned PathSize,
+ TypeSourceInfo *writtenTy)
+ : CastExpr(SC, exprTy, VK, kind, op, PathSize), TInfo(writtenTy) {}
/// \brief Construct an empty explicit cast.
ExplicitCastExpr(StmtClass SC, EmptyShell Shell, unsigned PathSize)
@@ -2182,10 +2409,10 @@ class CStyleCastExpr : public ExplicitCastExpr {
SourceLocation LPLoc; // the location of the left paren
SourceLocation RPLoc; // the location of the right paren
- CStyleCastExpr(QualType exprTy, CastKind kind, Expr *op,
+ CStyleCastExpr(QualType exprTy, ExprValueKind vk, CastKind kind, Expr *op,
unsigned PathSize, TypeSourceInfo *writtenTy,
SourceLocation l, SourceLocation r)
- : ExplicitCastExpr(CStyleCastExprClass, exprTy, kind, op, PathSize,
+ : ExplicitCastExpr(CStyleCastExprClass, exprTy, vk, kind, op, PathSize,
writtenTy), LPLoc(l), RPLoc(r) {}
/// \brief Construct an empty C-style explicit cast.
@@ -2193,7 +2420,8 @@ class CStyleCastExpr : public ExplicitCastExpr {
: ExplicitCastExpr(CStyleCastExprClass, Shell, PathSize) { }
public:
- static CStyleCastExpr *Create(ASTContext &Context, QualType T, CastKind K,
+ static CStyleCastExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK, CastKind K,
Expr *Op, const CXXCastPath *BasePath,
TypeSourceInfo *WrittenTy, SourceLocation L,
SourceLocation R);
@@ -2206,7 +2434,7 @@ public:
SourceLocation getRParenLoc() const { return RPLoc; }
void setRParenLoc(SourceLocation L) { RPLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(LPLoc, getSubExpr()->getSourceRange().getEnd());
}
static bool classof(const Stmt *T) {
@@ -2246,10 +2474,13 @@ private:
public:
BinaryOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy,
+ ExprValueKind VK, ExprObjectKind OK,
SourceLocation opLoc)
- : Expr(BinaryOperatorClass, ResTy,
+ : Expr(BinaryOperatorClass, ResTy, VK, OK,
lhs->isTypeDependent() || rhs->isTypeDependent(),
- lhs->isValueDependent() || rhs->isValueDependent()),
+ lhs->isValueDependent() || rhs->isValueDependent(),
+ (lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack())),
Opc(opc), OpLoc(opLoc) {
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
@@ -2272,7 +2503,7 @@ public:
Expr *getRHS() const { return cast<Expr>(SubExprs[RHS]); }
void setRHS(Expr *E) { SubExprs[RHS] = E; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(getLHS()->getLocStart(), getRHS()->getLocEnd());
}
@@ -2291,6 +2522,7 @@ public:
static OverloadedOperatorKind getOverloadedOperator(Opcode Opc);
/// predicates to categorize the respective opcodes.
+ bool isPtrMemOp() const { return Opc == BO_PtrMemD || Opc == BO_PtrMemI; }
bool isMultiplicativeOp() const { return Opc >= BO_Mul && Opc <= BO_Rem; }
static bool isAdditiveOp(Opcode Opc) { return Opc == BO_Add || Opc==BO_Sub; }
bool isAdditiveOp() const { return isAdditiveOp(getOpcode()); }
@@ -2312,13 +2544,24 @@ public:
static bool isLogicalOp(Opcode Opc) { return Opc == BO_LAnd || Opc==BO_LOr; }
bool isLogicalOp() const { return isLogicalOp(getOpcode()); }
- bool isAssignmentOp() const { return Opc >= BO_Assign && Opc <= BO_OrAssign; }
- bool isCompoundAssignmentOp() const {
+ static bool isAssignmentOp(Opcode Opc) {
+ return Opc >= BO_Assign && Opc <= BO_OrAssign;
+ }
+ bool isAssignmentOp() const { return isAssignmentOp(getOpcode()); }
+
+ static bool isCompoundAssignmentOp(Opcode Opc) {
return Opc > BO_Assign && Opc <= BO_OrAssign;
}
- bool isShiftAssignOp() const {
+ bool isCompoundAssignmentOp() const {
+ return isCompoundAssignmentOp(getOpcode());
+ }
+
+ static bool isShiftAssignOp(Opcode Opc) {
return Opc == BO_ShlAssign || Opc == BO_ShrAssign;
}
+ bool isShiftAssignOp() const {
+ return isShiftAssignOp(getOpcode());
+ }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstBinaryOperatorConstant &&
@@ -2327,15 +2570,19 @@ public:
static bool classof(const BinaryOperator *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
protected:
BinaryOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy,
+ ExprValueKind VK, ExprObjectKind OK,
SourceLocation opLoc, bool dead)
- : Expr(CompoundAssignOperatorClass, ResTy,
+ : Expr(CompoundAssignOperatorClass, ResTy, VK, OK,
lhs->isTypeDependent() || rhs->isTypeDependent(),
- lhs->isValueDependent() || rhs->isValueDependent()),
+ lhs->isValueDependent() || rhs->isValueDependent(),
+ (lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack())),
Opc(opc), OpLoc(opLoc) {
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
@@ -2355,11 +2602,11 @@ class CompoundAssignOperator : public BinaryOperator {
QualType ComputationLHSType;
QualType ComputationResultType;
public:
- CompoundAssignOperator(Expr *lhs, Expr *rhs, Opcode opc,
- QualType ResType, QualType CompLHSType,
- QualType CompResultType,
+ CompoundAssignOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResType,
+ ExprValueKind VK, ExprObjectKind OK,
+ QualType CompLHSType, QualType CompResultType,
SourceLocation OpLoc)
- : BinaryOperator(lhs, rhs, opc, ResType, OpLoc, true),
+ : BinaryOperator(lhs, rhs, opc, ResType, VK, OK, OpLoc, true),
ComputationLHSType(CompLHSType),
ComputationResultType(CompResultType) {
assert(isCompoundAssignmentOp() &&
@@ -2385,75 +2632,97 @@ public:
}
};
-/// ConditionalOperator - The ?: operator. Note that LHS may be null when the
-/// GNU "missing LHS" extension is in use.
-///
-class ConditionalOperator : public Expr {
+/// AbstractConditionalOperator - An abstract base class for
+/// ConditionalOperator and BinaryConditionalOperator.
+class AbstractConditionalOperator : public Expr {
+ SourceLocation QuestionLoc, ColonLoc;
+ friend class ASTStmtReader;
+
+protected:
+ AbstractConditionalOperator(StmtClass SC, QualType T,
+ ExprValueKind VK, ExprObjectKind OK,
+ bool TD, bool VD,
+ bool ContainsUnexpandedParameterPack,
+ SourceLocation qloc,
+ SourceLocation cloc)
+ : Expr(SC, T, VK, OK, TD, VD, ContainsUnexpandedParameterPack),
+ QuestionLoc(qloc), ColonLoc(cloc) {}
+
+ AbstractConditionalOperator(StmtClass SC, EmptyShell Empty)
+ : Expr(SC, Empty) { }
+
+public:
+ // getCond - Return the expression representing the condition for
+ // the ?: operator.
+ Expr *getCond() const;
+
+ // getTrueExpr - Return the subexpression representing the value of
+ // the expression if the condition evaluates to true.
+ Expr *getTrueExpr() const;
+
+ // getFalseExpr - Return the subexpression representing the value of
+ // the expression if the condition evaluates to false. This is
+ // the same as getRHS.
+ Expr *getFalseExpr() const;
+
+ SourceLocation getQuestionLoc() const { return QuestionLoc; }
+ SourceLocation getColonLoc() const { return ColonLoc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ConditionalOperatorClass ||
+ T->getStmtClass() == BinaryConditionalOperatorClass;
+ }
+ static bool classof(const AbstractConditionalOperator *) { return true; }
+};
+
+/// ConditionalOperator - The ?: ternary operator. The GNU "missing
+/// middle" extension is a BinaryConditionalOperator.
+class ConditionalOperator : public AbstractConditionalOperator {
enum { COND, LHS, RHS, END_EXPR };
Stmt* SubExprs[END_EXPR]; // Left/Middle/Right hand sides.
- Stmt* Save;
- SourceLocation QuestionLoc, ColonLoc;
+
+ friend class ASTStmtReader;
public:
ConditionalOperator(Expr *cond, SourceLocation QLoc, Expr *lhs,
- SourceLocation CLoc, Expr *rhs, Expr *save, QualType t)
- : Expr(ConditionalOperatorClass, t,
+ SourceLocation CLoc, Expr *rhs,
+ QualType t, ExprValueKind VK, ExprObjectKind OK)
+ : AbstractConditionalOperator(ConditionalOperatorClass, t, VK, OK,
// FIXME: the type of the conditional operator doesn't
// depend on the type of the conditional, but the standard
// seems to imply that it could. File a bug!
- ((lhs && lhs->isTypeDependent()) || (rhs && rhs->isTypeDependent())),
- (cond->isValueDependent() ||
- (lhs && lhs->isValueDependent()) ||
- (rhs && rhs->isValueDependent()))),
- QuestionLoc(QLoc),
- ColonLoc(CLoc) {
+ (lhs->isTypeDependent() || rhs->isTypeDependent()),
+ (cond->isValueDependent() || lhs->isValueDependent() ||
+ rhs->isValueDependent()),
+ (cond->containsUnexpandedParameterPack() ||
+ lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack()),
+ QLoc, CLoc) {
SubExprs[COND] = cond;
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
- Save = save;
}
/// \brief Build an empty conditional operator.
explicit ConditionalOperator(EmptyShell Empty)
- : Expr(ConditionalOperatorClass, Empty) { }
+ : AbstractConditionalOperator(ConditionalOperatorClass, Empty) { }
// getCond - Return the expression representing the condition for
- // the ?: operator.
+ // the ?: operator.
Expr *getCond() const { return cast<Expr>(SubExprs[COND]); }
- void setCond(Expr *E) { SubExprs[COND] = E; }
- // getTrueExpr - Return the subexpression representing the value of the ?:
- // expression if the condition evaluates to true.
- Expr *getTrueExpr() const {
- return cast<Expr>(!Save ? SubExprs[LHS] : SubExprs[COND]);
- }
+ // getTrueExpr - Return the subexpression representing the value of
+ // the expression if the condition evaluates to true.
+ Expr *getTrueExpr() const { return cast<Expr>(SubExprs[LHS]); }
- // getFalseExpr - Return the subexpression representing the value of the ?:
- // expression if the condition evaluates to false. This is the same as getRHS.
+ // getFalseExpr - Return the subexpression representing the value of
+ // the expression if the condition evaluates to false. This is
+ // the same as getRHS.
Expr *getFalseExpr() const { return cast<Expr>(SubExprs[RHS]); }
- // getSaveExpr - In most cases this value will be null. Except a GCC extension
- // allows the left subexpression to be omitted, and instead of that condition
- // be returned. e.g: x ?: y is shorthand for x ? x : y, except that the
- // expression "x" is only evaluated once. Under this senario, this function
- // returns the original, non-converted condition expression for the ?:operator
- Expr *getSaveExpr() const { return Save? cast<Expr>(Save) : (Expr*)0; }
-
- Expr *getLHS() const { return Save ? 0 : cast<Expr>(SubExprs[LHS]); }
- void setLHS(Expr *E) { SubExprs[LHS] = E; }
-
+ Expr *getLHS() const { return cast<Expr>(SubExprs[LHS]); }
Expr *getRHS() const { return cast<Expr>(SubExprs[RHS]); }
- void setRHS(Expr *E) { SubExprs[RHS] = E; }
-
- Expr *getSAVE() const { return Save? cast<Expr>(Save) : (Expr*)0; }
- void setSAVE(Expr *E) { Save = E; }
-
- SourceLocation getQuestionLoc() const { return QuestionLoc; }
- void setQuestionLoc(SourceLocation L) { QuestionLoc = L; }
- SourceLocation getColonLoc() const { return ColonLoc; }
- void setColonLoc(SourceLocation L) { ColonLoc = L; }
-
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(getCond()->getLocStart(), getRHS()->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -2462,18 +2731,118 @@ public:
static bool classof(const ConditionalOperator *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+};
+
+/// BinaryConditionalOperator - The GNU extension to the conditional
+/// operator which allows the middle operand to be omitted.
+///
+/// This is a different expression kind on the assumption that almost
+/// every client ends up needing to know that these are different.
+class BinaryConditionalOperator : public AbstractConditionalOperator {
+ enum { COMMON, COND, LHS, RHS, NUM_SUBEXPRS };
+
+ /// - the common condition/left-hand-side expression, which will be
+ /// evaluated as the opaque value
+ /// - the condition, expressed in terms of the opaque value
+ /// - the left-hand-side, expressed in terms of the opaque value
+ /// - the right-hand-side
+ Stmt *SubExprs[NUM_SUBEXPRS];
+ OpaqueValueExpr *OpaqueValue;
+
+ friend class ASTStmtReader;
+public:
+ BinaryConditionalOperator(Expr *common, OpaqueValueExpr *opaqueValue,
+ Expr *cond, Expr *lhs, Expr *rhs,
+ SourceLocation qloc, SourceLocation cloc,
+ QualType t, ExprValueKind VK, ExprObjectKind OK)
+ : AbstractConditionalOperator(BinaryConditionalOperatorClass, t, VK, OK,
+ (common->isTypeDependent() || rhs->isTypeDependent()),
+ (common->isValueDependent() || rhs->isValueDependent()),
+ (common->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack()),
+ qloc, cloc),
+ OpaqueValue(opaqueValue) {
+ SubExprs[COMMON] = common;
+ SubExprs[COND] = cond;
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+
+ OpaqueValue->setSourceExpr(common);
+ }
+
+ /// \brief Build an empty conditional operator.
+ explicit BinaryConditionalOperator(EmptyShell Empty)
+ : AbstractConditionalOperator(BinaryConditionalOperatorClass, Empty) { }
+
+ /// \brief getCommon - Return the common expression, written to the
+ /// left of the condition. The opaque value will be bound to the
+ /// result of this expression.
+ Expr *getCommon() const { return cast<Expr>(SubExprs[COMMON]); }
+
+ /// \brief getOpaqueValue - Return the opaque value placeholder.
+ OpaqueValueExpr *getOpaqueValue() const { return OpaqueValue; }
+
+ /// \brief getCond - Return the condition expression; this is defined
+ /// in terms of the opaque value.
+ Expr *getCond() const { return cast<Expr>(SubExprs[COND]); }
+
+ /// \brief getTrueExpr - Return the subexpression which will be
+ /// evaluated if the condition evaluates to true; this is defined
+ /// in terms of the opaque value.
+ Expr *getTrueExpr() const {
+ return cast<Expr>(SubExprs[LHS]);
+ }
+
+ /// \brief getFalseExpr - Return the subexpression which will be
+ /// evaluated if the condnition evaluates to false; this is
+ /// defined in terms of the opaque value.
+ Expr *getFalseExpr() const {
+ return cast<Expr>(SubExprs[RHS]);
+ }
+
+ SourceRange getSourceRange() const {
+ return SourceRange(getCommon()->getLocStart(), getFalseExpr()->getLocEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == BinaryConditionalOperatorClass;
+ }
+ static bool classof(const BinaryConditionalOperator *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(SubExprs, SubExprs + NUM_SUBEXPRS);
+ }
};
+inline Expr *AbstractConditionalOperator::getCond() const {
+ if (const ConditionalOperator *co = dyn_cast<ConditionalOperator>(this))
+ return co->getCond();
+ return cast<BinaryConditionalOperator>(this)->getCond();
+}
+
+inline Expr *AbstractConditionalOperator::getTrueExpr() const {
+ if (const ConditionalOperator *co = dyn_cast<ConditionalOperator>(this))
+ return co->getTrueExpr();
+ return cast<BinaryConditionalOperator>(this)->getTrueExpr();
+}
+
+inline Expr *AbstractConditionalOperator::getFalseExpr() const {
+ if (const ConditionalOperator *co = dyn_cast<ConditionalOperator>(this))
+ return co->getFalseExpr();
+ return cast<BinaryConditionalOperator>(this)->getFalseExpr();
+}
+
/// AddrLabelExpr - The GNU address of label extension, representing &&label.
class AddrLabelExpr : public Expr {
SourceLocation AmpAmpLoc, LabelLoc;
- LabelStmt *Label;
+ LabelDecl *Label;
public:
- AddrLabelExpr(SourceLocation AALoc, SourceLocation LLoc, LabelStmt *L,
+ AddrLabelExpr(SourceLocation AALoc, SourceLocation LLoc, LabelDecl *L,
QualType t)
- : Expr(AddrLabelExprClass, t, false, false),
+ : Expr(AddrLabelExprClass, t, VK_RValue, OK_Ordinary, false, false, false),
AmpAmpLoc(AALoc), LabelLoc(LLoc), Label(L) {}
/// \brief Build an empty address of a label expression.
@@ -2485,12 +2854,12 @@ public:
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(AmpAmpLoc, LabelLoc);
}
- LabelStmt *getLabel() const { return Label; }
- void setLabel(LabelStmt *S) { Label = S; }
+ LabelDecl *getLabel() const { return Label; }
+ void setLabel(LabelDecl *L) { Label = L; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == AddrLabelExprClass;
@@ -2498,13 +2867,15 @@ public:
static bool classof(const AddrLabelExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
/// The StmtExpr contains a single CompoundStmt node, which it evaluates and
/// takes the value of the last subexpression.
+///
+/// A StmtExpr is always an r-value; values "returned" out of a
+/// StmtExpr will be copied.
class StmtExpr : public Expr {
Stmt *SubStmt;
SourceLocation LParenLoc, RParenLoc;
@@ -2512,7 +2883,8 @@ public:
// FIXME: Does type-dependence need to be computed differently?
StmtExpr(CompoundStmt *substmt, QualType T,
SourceLocation lp, SourceLocation rp) :
- Expr(StmtExprClass, T, T->isDependentType(), false),
+ Expr(StmtExprClass, T, VK_RValue, OK_Ordinary,
+ T->isDependentType(), false, false),
SubStmt(substmt), LParenLoc(lp), RParenLoc(rp) { }
/// \brief Build an empty statement expression.
@@ -2522,7 +2894,7 @@ public:
const CompoundStmt *getSubStmt() const { return cast<CompoundStmt>(SubStmt); }
void setSubStmt(CompoundStmt *S) { SubStmt = S; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(LParenLoc, RParenLoc);
}
@@ -2537,55 +2909,9 @@ public:
static bool classof(const StmtExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&SubStmt, &SubStmt+1); }
};
-/// TypesCompatibleExpr - GNU builtin-in function __builtin_types_compatible_p.
-/// This AST node represents a function that returns 1 if two *types* (not
-/// expressions) are compatible. The result of this built-in function can be
-/// used in integer constant expressions.
-class TypesCompatibleExpr : public Expr {
- TypeSourceInfo *TInfo1;
- TypeSourceInfo *TInfo2;
- SourceLocation BuiltinLoc, RParenLoc;
-public:
- TypesCompatibleExpr(QualType ReturnType, SourceLocation BLoc,
- TypeSourceInfo *tinfo1, TypeSourceInfo *tinfo2,
- SourceLocation RP) :
- Expr(TypesCompatibleExprClass, ReturnType, false, false),
- TInfo1(tinfo1), TInfo2(tinfo2), BuiltinLoc(BLoc), RParenLoc(RP) {}
-
- /// \brief Build an empty __builtin_type_compatible_p expression.
- explicit TypesCompatibleExpr(EmptyShell Empty)
- : Expr(TypesCompatibleExprClass, Empty) { }
-
- TypeSourceInfo *getArgTInfo1() const { return TInfo1; }
- void setArgTInfo1(TypeSourceInfo *TInfo) { TInfo1 = TInfo; }
- TypeSourceInfo *getArgTInfo2() const { return TInfo2; }
- void setArgTInfo2(TypeSourceInfo *TInfo) { TInfo2 = TInfo; }
-
- QualType getArgType1() const { return TInfo1->getType(); }
- QualType getArgType2() const { return TInfo2->getType(); }
-
- SourceLocation getBuiltinLoc() const { return BuiltinLoc; }
- void setBuiltinLoc(SourceLocation L) { BuiltinLoc = L; }
-
- SourceLocation getRParenLoc() const { return RParenLoc; }
- void setRParenLoc(SourceLocation L) { RParenLoc = L; }
-
- virtual SourceRange getSourceRange() const {
- return SourceRange(BuiltinLoc, RParenLoc);
- }
- static bool classof(const Stmt *T) {
- return T->getStmtClass() == TypesCompatibleExprClass;
- }
- static bool classof(const TypesCompatibleExpr *) { return true; }
-
- // Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
-};
/// ShuffleVectorExpr - clang-specific builtin-in function
/// __builtin_shufflevector.
@@ -2604,18 +2930,9 @@ class ShuffleVectorExpr : public Expr {
unsigned NumExprs;
public:
- // FIXME: Can a shufflevector be value-dependent? Does type-dependence need
- // to be computed differently?
ShuffleVectorExpr(ASTContext &C, Expr **args, unsigned nexpr,
QualType Type, SourceLocation BLoc,
- SourceLocation RP) :
- Expr(ShuffleVectorExprClass, Type, Type->isDependentType(), false),
- BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(nexpr) {
-
- SubExprs = new (C) Stmt*[nexpr];
- for (unsigned i = 0; i < nexpr; i++)
- SubExprs[i] = args[i];
- }
+ SourceLocation RP);
/// \brief Build an empty vector-shuffle expression.
explicit ShuffleVectorExpr(EmptyShell Empty)
@@ -2627,7 +2944,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(BuiltinLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -2640,6 +2957,9 @@ public:
/// pointers.
unsigned getNumSubExprs() const { return NumExprs; }
+ /// \brief Retrieve the array of expressions.
+ Expr **getSubExprs() { return reinterpret_cast<Expr **>(SubExprs); }
+
/// getExpr - Return the Expr at the specified index.
Expr *getExpr(unsigned Index) {
assert((Index < NumExprs) && "Arg access out of range!");
@@ -2658,8 +2978,9 @@ public:
}
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+NumExprs);
+ }
};
/// ChooseExpr - GNU builtin-in function __builtin_choose_expr.
@@ -2676,9 +2997,13 @@ class ChooseExpr : public Expr {
Stmt* SubExprs[END_EXPR]; // Left/Middle/Right hand sides.
SourceLocation BuiltinLoc, RParenLoc;
public:
- ChooseExpr(SourceLocation BLoc, Expr *cond, Expr *lhs, Expr *rhs, QualType t,
+ ChooseExpr(SourceLocation BLoc, Expr *cond, Expr *lhs, Expr *rhs,
+ QualType t, ExprValueKind VK, ExprObjectKind OK,
SourceLocation RP, bool TypeDependent, bool ValueDependent)
- : Expr(ChooseExprClass, t, TypeDependent, ValueDependent),
+ : Expr(ChooseExprClass, t, VK, OK, TypeDependent, ValueDependent,
+ (cond->containsUnexpandedParameterPack() ||
+ lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack())),
BuiltinLoc(BLoc), RParenLoc(RP) {
SubExprs[COND] = cond;
SubExprs[LHS] = lhs;
@@ -2690,11 +3015,11 @@ public:
/// isConditionTrue - Return whether the condition is true (i.e. not
/// equal to zero).
- bool isConditionTrue(ASTContext &C) const;
+ bool isConditionTrue(const ASTContext &C) const;
/// getChosenSubExpr - Return the subexpression chosen according to the
/// condition.
- Expr *getChosenSubExpr(ASTContext &C) const {
+ Expr *getChosenSubExpr(const ASTContext &C) const {
return isConditionTrue(C) ? getLHS() : getRHS();
}
@@ -2711,7 +3036,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(BuiltinLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -2720,8 +3045,9 @@ public:
static bool classof(const ChooseExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
};
/// GNUNullExpr - Implements the GNU __null extension, which is a name
@@ -2736,7 +3062,8 @@ class GNUNullExpr : public Expr {
public:
GNUNullExpr(QualType Ty, SourceLocation Loc)
- : Expr(GNUNullExprClass, Ty, false, false), TokenLoc(Loc) { }
+ : Expr(GNUNullExprClass, Ty, VK_RValue, OK_Ordinary, false, false, false),
+ TokenLoc(Loc) { }
/// \brief Build an empty GNU __null expression.
explicit GNUNullExpr(EmptyShell Empty) : Expr(GNUNullExprClass, Empty) { }
@@ -2745,7 +3072,7 @@ public:
SourceLocation getTokenLocation() const { return TokenLoc; }
void setTokenLocation(SourceLocation L) { TokenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(TokenLoc);
}
static bool classof(const Stmt *T) {
@@ -2754,8 +3081,7 @@ public:
static bool classof(const GNUNullExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// VAArgExpr, used for the builtin function __builtin_va_arg.
@@ -2766,7 +3092,10 @@ class VAArgExpr : public Expr {
public:
VAArgExpr(SourceLocation BLoc, Expr* e, TypeSourceInfo *TInfo,
SourceLocation RPLoc, QualType t)
- : Expr(VAArgExprClass, t, t->isDependentType(), false),
+ : Expr(VAArgExprClass, t, VK_RValue, OK_Ordinary,
+ t->isDependentType(), false,
+ (TInfo->getType()->containsUnexpandedParameterPack() ||
+ e->containsUnexpandedParameterPack())),
Val(e), TInfo(TInfo),
BuiltinLoc(BLoc),
RParenLoc(RPLoc) { }
@@ -2787,7 +3116,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(BuiltinLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -2796,8 +3125,7 @@ public:
static bool classof(const VAArgExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Val, &Val+1); }
};
/// @brief Describes an C or C++ initializer list.
@@ -2866,12 +3194,15 @@ public:
unsigned getNumInits() const { return InitExprs.size(); }
- const Expr* getInit(unsigned Init) const {
+ /// \brief Retrieve the set of initializers.
+ Expr **getInits() { return reinterpret_cast<Expr **>(InitExprs.data()); }
+
+ const Expr *getInit(unsigned Init) const {
assert(Init < getNumInits() && "Initializer access out of range!");
return cast_or_null<Expr>(InitExprs[Init]);
}
- Expr* getInit(unsigned Init) {
+ Expr *getInit(unsigned Init) {
assert(Init < getNumInits() && "Initializer access out of range!");
return cast_or_null<Expr>(InitExprs[Init]);
}
@@ -2933,17 +3264,18 @@ public:
HadArrayRangeDesignator = ARD;
}
- virtual SourceRange getSourceRange() const {
- return SourceRange(LBraceLoc, RBraceLoc);
- }
+ SourceRange getSourceRange() const;
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == InitListExprClass;
}
static bool classof(const InitListExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ if (InitExprs.empty()) return child_range();
+ return child_range(&InitExprs[0], &InitExprs[0] + InitExprs.size());
+ }
typedef InitExprsTy::iterator iterator;
typedef InitExprsTy::const_iterator const_iterator;
@@ -3182,6 +3514,15 @@ public:
return Designators + NumDesignators;
}
+ typedef std::reverse_iterator<designators_iterator>
+ reverse_designators_iterator;
+ reverse_designators_iterator designators_rbegin() {
+ return reverse_designators_iterator(designators_end());
+ }
+ reverse_designators_iterator designators_rend() {
+ return reverse_designators_iterator(designators_begin());
+ }
+
Designator *getDesignator(unsigned Idx) { return &designators_begin()[Idx]; }
void setDesignators(ASTContext &C, const Designator *Desigs,
@@ -3235,7 +3576,7 @@ public:
void ExpandDesignator(ASTContext &C, unsigned Idx, const Designator *First,
const Designator *Last);
- virtual SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == DesignatedInitExprClass;
@@ -3243,8 +3584,10 @@ public:
static bool classof(const DesignatedInitExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ Stmt **begin = reinterpret_cast<Stmt**>(this + 1);
+ return child_range(begin, begin + NumSubExprs);
+ }
};
/// \brief Represents an implicitly-generated value initialization of
@@ -3258,7 +3601,8 @@ public:
class ImplicitValueInitExpr : public Expr {
public:
explicit ImplicitValueInitExpr(QualType ty)
- : Expr(ImplicitValueInitExprClass, ty, false, false) { }
+ : Expr(ImplicitValueInitExprClass, ty, VK_RValue, OK_Ordinary,
+ false, false, false) { }
/// \brief Construct an empty implicit value initialization.
explicit ImplicitValueInitExpr(EmptyShell Empty)
@@ -3269,13 +3613,12 @@ public:
}
static bool classof(const ImplicitValueInitExpr *) { return true; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange();
}
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
@@ -3308,7 +3651,7 @@ public:
SourceLocation getLParenLoc() const { return LParenLoc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(LParenLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -3317,8 +3660,9 @@ public:
static bool classof(const ParenListExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&Exprs[0], &Exprs[0]+NumExprs);
+ }
friend class ASTStmtReader;
friend class ASTStmtWriter;
@@ -3342,10 +3686,12 @@ class ExtVectorElementExpr : public Expr {
IdentifierInfo *Accessor;
SourceLocation AccessorLoc;
public:
- ExtVectorElementExpr(QualType ty, Expr *base, IdentifierInfo &accessor,
- SourceLocation loc)
- : Expr(ExtVectorElementExprClass, ty, base->isTypeDependent(),
- base->isValueDependent()),
+ ExtVectorElementExpr(QualType ty, ExprValueKind VK, Expr *base,
+ IdentifierInfo &accessor, SourceLocation loc)
+ : Expr(ExtVectorElementExprClass, ty, VK,
+ (VK == VK_RValue ? OK_Ordinary : OK_VectorComponent),
+ base->isTypeDependent(), base->isValueDependent(),
+ base->containsUnexpandedParameterPack()),
Base(base), Accessor(&accessor), AccessorLoc(loc) {}
/// \brief Build an empty vector element expression.
@@ -3373,7 +3719,7 @@ public:
/// aggregate Constant of ConstantInt(s).
void getEncodedElementAccess(llvm::SmallVectorImpl<unsigned> &Elts) const;
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(getBase()->getLocStart(), AccessorLoc);
}
@@ -3387,8 +3733,7 @@ public:
static bool classof(const ExtVectorElementExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Base, &Base+1); }
};
@@ -3397,11 +3742,11 @@ public:
class BlockExpr : public Expr {
protected:
BlockDecl *TheBlock;
- bool HasBlockDeclRefExprs;
public:
- BlockExpr(BlockDecl *BD, QualType ty, bool hasBlockDeclRefExprs)
- : Expr(BlockExprClass, ty, ty->isDependentType(), false),
- TheBlock(BD), HasBlockDeclRefExprs(hasBlockDeclRefExprs) {}
+ BlockExpr(BlockDecl *BD, QualType ty)
+ : Expr(BlockExprClass, ty, VK_RValue, OK_Ordinary,
+ ty->isDependentType(), false, false),
+ TheBlock(BD) {}
/// \brief Build an empty block expression.
explicit BlockExpr(EmptyShell Empty) : Expr(BlockExprClass, Empty) { }
@@ -3415,58 +3760,46 @@ public:
const Stmt *getBody() const;
Stmt *getBody();
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(getCaretLocation(), getBody()->getLocEnd());
}
/// getFunctionType - Return the underlying function type for this block.
const FunctionType *getFunctionType() const;
- /// hasBlockDeclRefExprs - Return true iff the block has BlockDeclRefExpr
- /// inside of the block that reference values outside the block.
- bool hasBlockDeclRefExprs() const { return HasBlockDeclRefExprs; }
- void setHasBlockDeclRefExprs(bool BDRE) { HasBlockDeclRefExprs = BDRE; }
-
static bool classof(const Stmt *T) {
return T->getStmtClass() == BlockExprClass;
}
static bool classof(const BlockExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
-/// BlockDeclRefExpr - A reference to a declared variable, function,
-/// enum, etc.
+/// BlockDeclRefExpr - A reference to a local variable declared in an
+/// enclosing scope.
class BlockDeclRefExpr : public Expr {
- ValueDecl *D;
+ VarDecl *D;
SourceLocation Loc;
bool IsByRef : 1;
bool ConstQualAdded : 1;
- Stmt *CopyConstructorVal;
public:
- // FIXME: Fix type/value dependence!
- BlockDeclRefExpr(ValueDecl *d, QualType t, SourceLocation l, bool ByRef,
- bool constAdded = false,
- Stmt *copyConstructorVal = 0)
- : Expr(BlockDeclRefExprClass, t, (!t.isNull() && t->isDependentType()),false),
- D(d), Loc(l), IsByRef(ByRef),
- ConstQualAdded(constAdded), CopyConstructorVal(copyConstructorVal) {}
+ BlockDeclRefExpr(VarDecl *d, QualType t, ExprValueKind VK,
+ SourceLocation l, bool ByRef, bool constAdded = false);
// \brief Build an empty reference to a declared variable in a
// block.
explicit BlockDeclRefExpr(EmptyShell Empty)
: Expr(BlockDeclRefExprClass, Empty) { }
- ValueDecl *getDecl() { return D; }
- const ValueDecl *getDecl() const { return D; }
- void setDecl(ValueDecl *VD) { D = VD; }
+ VarDecl *getDecl() { return D; }
+ const VarDecl *getDecl() const { return D; }
+ void setDecl(VarDecl *VD) { D = VD; }
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- virtual SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const { return SourceRange(Loc); }
bool isByRef() const { return IsByRef; }
void setByRef(bool BR) { IsByRef = BR; }
@@ -3474,20 +3807,13 @@ public:
bool isConstQualAdded() const { return ConstQualAdded; }
void setConstQualAdded(bool C) { ConstQualAdded = C; }
- const Expr *getCopyConstructorExpr() const
- { return cast_or_null<Expr>(CopyConstructorVal); }
- Expr *getCopyConstructorExpr()
- { return cast_or_null<Expr>(CopyConstructorVal); }
- void setCopyConstructorExpr(Expr *E) { CopyConstructorVal = E; }
-
static bool classof(const Stmt *T) {
return T->getStmtClass() == BlockDeclRefExprClass;
}
static bool classof(const BlockDeclRefExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
index 0a94354..85ce962 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
@@ -21,11 +21,11 @@
namespace clang {
- class CXXConstructorDecl;
- class CXXDestructorDecl;
- class CXXMethodDecl;
- class CXXTemporary;
- class TemplateArgumentListInfo;
+class CXXConstructorDecl;
+class CXXDestructorDecl;
+class CXXMethodDecl;
+class CXXTemporary;
+class TemplateArgumentListInfo;
//===--------------------------------------------------------------------===//
// C++ Expressions.
@@ -51,8 +51,9 @@ class CXXOperatorCallExpr : public CallExpr {
public:
CXXOperatorCallExpr(ASTContext& C, OverloadedOperatorKind Op, Expr *fn,
Expr **args, unsigned numargs, QualType t,
- SourceLocation operatorloc)
- : CallExpr(C, CXXOperatorCallExprClass, fn, args, numargs, t, operatorloc),
+ ExprValueKind VK, SourceLocation operatorloc)
+ : CallExpr(C, CXXOperatorCallExprClass, fn, 0, args, numargs, t, VK,
+ operatorloc),
Operator(Op) {}
explicit CXXOperatorCallExpr(ASTContext& C, EmptyShell Empty) :
CallExpr(C, CXXOperatorCallExprClass, Empty) { }
@@ -70,7 +71,7 @@ public:
/// bracket.
SourceLocation getOperatorLoc() const { return getRParenLoc(); }
- virtual SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXOperatorCallExprClass;
@@ -89,8 +90,8 @@ public:
class CXXMemberCallExpr : public CallExpr {
public:
CXXMemberCallExpr(ASTContext &C, Expr *fn, Expr **args, unsigned numargs,
- QualType t, SourceLocation rparenloc)
- : CallExpr(C, CXXMemberCallExprClass, fn, args, numargs, t, rparenloc) {}
+ QualType t, ExprValueKind VK, SourceLocation RP)
+ : CallExpr(C, CXXMemberCallExprClass, fn, 0, args, numargs, t, VK, RP) {}
CXXMemberCallExpr(ASTContext &C, EmptyShell Empty)
: CallExpr(C, CXXMemberCallExprClass, Empty) { }
@@ -100,7 +101,14 @@ public:
/// operation would return "x".
Expr *getImplicitObjectArgument();
- virtual SourceRange getSourceRange() const;
+ /// getRecordDecl - Retrieves the CXXRecordDecl for the underlying type of
+ /// the implicit object argument. Note that this is may not be the same
+ /// declaration as that of the class context of the CXXMethodDecl which this
+ /// function is calling.
+ /// FIXME: Returns 0 for member pointer call exprs.
+ CXXRecordDecl *getRecordDecl();
+
+ SourceRange getSourceRange() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXMemberCallExprClass;
@@ -108,6 +116,35 @@ public:
static bool classof(const CXXMemberCallExpr *) { return true; }
};
+/// CUDAKernelCallExpr - Represents a call to a CUDA kernel function.
+class CUDAKernelCallExpr : public CallExpr {
+private:
+ enum { CONFIG, END_PREARG };
+
+public:
+ CUDAKernelCallExpr(ASTContext &C, Expr *fn, CallExpr *Config,
+ Expr **args, unsigned numargs, QualType t,
+ ExprValueKind VK, SourceLocation RP)
+ : CallExpr(C, CUDAKernelCallExprClass, fn, END_PREARG, args, numargs, t, VK,
+ RP) {
+ setConfig(Config);
+ }
+
+ CUDAKernelCallExpr(ASTContext &C, EmptyShell Empty)
+ : CallExpr(C, CUDAKernelCallExprClass, END_PREARG, Empty) { }
+
+ const CallExpr *getConfig() const {
+ return cast_or_null<CallExpr>(getPreArg(CONFIG));
+ }
+ CallExpr *getConfig() { return cast_or_null<CallExpr>(getPreArg(CONFIG)); }
+ void setConfig(CallExpr *E) { setPreArg(CONFIG, E); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CUDAKernelCallExprClass;
+ }
+ static bool classof(const CUDAKernelCallExpr *) { return true; }
+};
+
/// CXXNamedCastExpr - Abstract class common to all of the C++ "named"
/// casts, @c static_cast, @c dynamic_cast, @c reinterpret_cast, or @c
/// const_cast.
@@ -118,26 +155,33 @@ public:
class CXXNamedCastExpr : public ExplicitCastExpr {
private:
SourceLocation Loc; // the location of the casting op
-
+ SourceLocation RParenLoc; // the location of the right parenthesis
+
protected:
- CXXNamedCastExpr(StmtClass SC, QualType ty, CastKind kind, Expr *op,
- unsigned PathSize, TypeSourceInfo *writtenTy,
- SourceLocation l)
- : ExplicitCastExpr(SC, ty, kind, op, PathSize, writtenTy), Loc(l) {}
+ CXXNamedCastExpr(StmtClass SC, QualType ty, ExprValueKind VK,
+ CastKind kind, Expr *op, unsigned PathSize,
+ TypeSourceInfo *writtenTy, SourceLocation l,
+ SourceLocation RParenLoc)
+ : ExplicitCastExpr(SC, ty, VK, kind, op, PathSize, writtenTy), Loc(l),
+ RParenLoc(RParenLoc) {}
explicit CXXNamedCastExpr(StmtClass SC, EmptyShell Shell, unsigned PathSize)
: ExplicitCastExpr(SC, Shell, PathSize) { }
+ friend class ASTStmtReader;
+
public:
const char *getCastName() const;
/// \brief Retrieve the location of the cast operator keyword, e.g.,
/// "static_cast".
SourceLocation getOperatorLoc() const { return Loc; }
- void setOperatorLoc(SourceLocation L) { Loc = L; }
- virtual SourceRange getSourceRange() const {
- return SourceRange(Loc, getSubExpr()->getSourceRange().getEnd());
+ /// \brief Retrieve the location of the closing parenthesis.
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ SourceRange getSourceRange() const {
+ return SourceRange(Loc, RParenLoc);
}
static bool classof(const Stmt *T) {
switch (T->getStmtClass()) {
@@ -158,20 +202,21 @@ public:
/// This expression node represents a C++ static cast, e.g.,
/// @c static_cast<int>(1.0).
class CXXStaticCastExpr : public CXXNamedCastExpr {
- CXXStaticCastExpr(QualType ty, CastKind kind, Expr *op,
+ CXXStaticCastExpr(QualType ty, ExprValueKind vk, CastKind kind, Expr *op,
unsigned pathSize, TypeSourceInfo *writtenTy,
- SourceLocation l)
- : CXXNamedCastExpr(CXXStaticCastExprClass, ty, kind, op, pathSize,
- writtenTy, l) {}
+ SourceLocation l, SourceLocation RParenLoc)
+ : CXXNamedCastExpr(CXXStaticCastExprClass, ty, vk, kind, op, pathSize,
+ writtenTy, l, RParenLoc) {}
explicit CXXStaticCastExpr(EmptyShell Empty, unsigned PathSize)
: CXXNamedCastExpr(CXXStaticCastExprClass, Empty, PathSize) { }
public:
static CXXStaticCastExpr *Create(ASTContext &Context, QualType T,
- CastKind K, Expr *Op,
+ ExprValueKind VK, CastKind K, Expr *Op,
const CXXCastPath *Path,
- TypeSourceInfo *Written, SourceLocation L);
+ TypeSourceInfo *Written, SourceLocation L,
+ SourceLocation RParenLoc);
static CXXStaticCastExpr *CreateEmpty(ASTContext &Context,
unsigned PathSize);
@@ -188,20 +233,21 @@ public:
/// This expression node represents a dynamic cast, e.g.,
/// @c dynamic_cast<Derived*>(BasePtr).
class CXXDynamicCastExpr : public CXXNamedCastExpr {
- CXXDynamicCastExpr(QualType ty, CastKind kind, Expr *op,
- unsigned pathSize, TypeSourceInfo *writtenTy,
- SourceLocation l)
- : CXXNamedCastExpr(CXXDynamicCastExprClass, ty, kind, op, pathSize,
- writtenTy, l) {}
+ CXXDynamicCastExpr(QualType ty, ExprValueKind VK, CastKind kind,
+ Expr *op, unsigned pathSize, TypeSourceInfo *writtenTy,
+ SourceLocation l, SourceLocation RParenLoc)
+ : CXXNamedCastExpr(CXXDynamicCastExprClass, ty, VK, kind, op, pathSize,
+ writtenTy, l, RParenLoc) {}
explicit CXXDynamicCastExpr(EmptyShell Empty, unsigned pathSize)
: CXXNamedCastExpr(CXXDynamicCastExprClass, Empty, pathSize) { }
public:
static CXXDynamicCastExpr *Create(ASTContext &Context, QualType T,
- CastKind Kind, Expr *Op,
+ ExprValueKind VK, CastKind Kind, Expr *Op,
const CXXCastPath *Path,
- TypeSourceInfo *Written, SourceLocation L);
+ TypeSourceInfo *Written, SourceLocation L,
+ SourceLocation RParenLoc);
static CXXDynamicCastExpr *CreateEmpty(ASTContext &Context,
unsigned pathSize);
@@ -219,20 +265,22 @@ public:
/// This expression node represents a reinterpret cast, e.g.,
/// @c reinterpret_cast<int>(VoidPtr).
class CXXReinterpretCastExpr : public CXXNamedCastExpr {
- CXXReinterpretCastExpr(QualType ty, CastKind kind, Expr *op,
- unsigned pathSize,
- TypeSourceInfo *writtenTy, SourceLocation l)
- : CXXNamedCastExpr(CXXReinterpretCastExprClass, ty, kind, op, pathSize,
- writtenTy, l) {}
+ CXXReinterpretCastExpr(QualType ty, ExprValueKind vk, CastKind kind,
+ Expr *op, unsigned pathSize,
+ TypeSourceInfo *writtenTy, SourceLocation l,
+ SourceLocation RParenLoc)
+ : CXXNamedCastExpr(CXXReinterpretCastExprClass, ty, vk, kind, op,
+ pathSize, writtenTy, l, RParenLoc) {}
CXXReinterpretCastExpr(EmptyShell Empty, unsigned pathSize)
: CXXNamedCastExpr(CXXReinterpretCastExprClass, Empty, pathSize) { }
public:
static CXXReinterpretCastExpr *Create(ASTContext &Context, QualType T,
- CastKind Kind, Expr *Op,
- const CXXCastPath *Path,
- TypeSourceInfo *WrittenTy, SourceLocation L);
+ ExprValueKind VK, CastKind Kind,
+ Expr *Op, const CXXCastPath *Path,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation RParenLoc);
static CXXReinterpretCastExpr *CreateEmpty(ASTContext &Context,
unsigned pathSize);
@@ -248,17 +296,20 @@ public:
/// This expression node represents a const cast, e.g.,
/// @c const_cast<char*>(PtrToConstChar).
class CXXConstCastExpr : public CXXNamedCastExpr {
- CXXConstCastExpr(QualType ty, Expr *op, TypeSourceInfo *writtenTy,
- SourceLocation l)
- : CXXNamedCastExpr(CXXConstCastExprClass, ty, CK_NoOp, op,
- 0, writtenTy, l) {}
+ CXXConstCastExpr(QualType ty, ExprValueKind VK, Expr *op,
+ TypeSourceInfo *writtenTy, SourceLocation l,
+ SourceLocation RParenLoc)
+ : CXXNamedCastExpr(CXXConstCastExprClass, ty, VK, CK_NoOp, op,
+ 0, writtenTy, l, RParenLoc) {}
explicit CXXConstCastExpr(EmptyShell Empty)
: CXXNamedCastExpr(CXXConstCastExprClass, Empty, 0) { }
public:
- static CXXConstCastExpr *Create(ASTContext &Context, QualType T, Expr *Op,
- TypeSourceInfo *WrittenTy, SourceLocation L);
+ static CXXConstCastExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK, Expr *Op,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation RParenLoc);
static CXXConstCastExpr *CreateEmpty(ASTContext &Context);
static bool classof(const Stmt *T) {
@@ -274,7 +325,9 @@ class CXXBoolLiteralExpr : public Expr {
SourceLocation Loc;
public:
CXXBoolLiteralExpr(bool val, QualType Ty, SourceLocation l) :
- Expr(CXXBoolLiteralExprClass, Ty, false, false), Value(val), Loc(l) {}
+ Expr(CXXBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ false),
+ Value(val), Loc(l) {}
explicit CXXBoolLiteralExpr(EmptyShell Empty)
: Expr(CXXBoolLiteralExprClass, Empty) { }
@@ -282,7 +335,7 @@ public:
bool getValue() const { return Value; }
void setValue(bool V) { Value = V; }
- virtual SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const { return SourceRange(Loc); }
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
@@ -293,8 +346,7 @@ public:
static bool classof(const CXXBoolLiteralExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// CXXNullPtrLiteralExpr - [C++0x 2.14.7] C++ Pointer Literal
@@ -302,12 +354,14 @@ class CXXNullPtrLiteralExpr : public Expr {
SourceLocation Loc;
public:
CXXNullPtrLiteralExpr(QualType Ty, SourceLocation l) :
- Expr(CXXNullPtrLiteralExprClass, Ty, false, false), Loc(l) {}
+ Expr(CXXNullPtrLiteralExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ false),
+ Loc(l) {}
explicit CXXNullPtrLiteralExpr(EmptyShell Empty)
: Expr(CXXNullPtrLiteralExprClass, Empty) { }
- virtual SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const { return SourceRange(Loc); }
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
@@ -317,8 +371,7 @@ public:
}
static bool classof(const CXXNullPtrLiteralExpr *) { return true; }
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// CXXTypeidExpr - A C++ @c typeid expression (C++ [expr.typeid]), which gets
@@ -333,19 +386,21 @@ private:
public:
CXXTypeidExpr(QualType Ty, TypeSourceInfo *Operand, SourceRange R)
- : Expr(CXXTypeidExprClass, Ty,
+ : Expr(CXXTypeidExprClass, Ty, VK_LValue, OK_Ordinary,
// typeid is never type-dependent (C++ [temp.dep.expr]p4)
false,
// typeid is value-dependent if the type or expression are dependent
- Operand->getType()->isDependentType()),
+ Operand->getType()->isDependentType(),
+ Operand->getType()->containsUnexpandedParameterPack()),
Operand(Operand), Range(R) { }
CXXTypeidExpr(QualType Ty, Expr *Operand, SourceRange R)
- : Expr(CXXTypeidExprClass, Ty,
+ : Expr(CXXTypeidExprClass, Ty, VK_LValue, OK_Ordinary,
// typeid is never type-dependent (C++ [temp.dep.expr]p4)
- false,
+ false,
// typeid is value-dependent if the type or expression are dependent
- Operand->isTypeDependent() || Operand->isValueDependent()),
+ Operand->isTypeDependent() || Operand->isValueDependent(),
+ Operand->containsUnexpandedParameterPack()),
Operand(Operand), Range(R) { }
CXXTypeidExpr(EmptyShell Empty, bool isExpr)
@@ -383,7 +438,7 @@ public:
Operand = E;
}
- virtual SourceRange getSourceRange() const { return Range; }
+ SourceRange getSourceRange() const { return Range; }
void setSourceRange(SourceRange R) { Range = R; }
static bool classof(const Stmt *T) {
@@ -392,8 +447,84 @@ public:
static bool classof(const CXXTypeidExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ if (isTypeOperand()) return child_range();
+ Stmt **begin = reinterpret_cast<Stmt**>(&Operand);
+ return child_range(begin, begin + 1);
+ }
+};
+
+/// CXXUuidofExpr - A microsoft C++ @c __uuidof expression, which gets
+/// the _GUID that corresponds to the supplied type or expression.
+///
+/// This represents code like @c __uuidof(COMTYPE) or @c __uuidof(*comPtr)
+class CXXUuidofExpr : public Expr {
+private:
+ llvm::PointerUnion<Stmt *, TypeSourceInfo *> Operand;
+ SourceRange Range;
+
+public:
+ CXXUuidofExpr(QualType Ty, TypeSourceInfo *Operand, SourceRange R)
+ : Expr(CXXUuidofExprClass, Ty, VK_LValue, OK_Ordinary,
+ false, Operand->getType()->isDependentType(),
+ Operand->getType()->containsUnexpandedParameterPack()),
+ Operand(Operand), Range(R) { }
+
+ CXXUuidofExpr(QualType Ty, Expr *Operand, SourceRange R)
+ : Expr(CXXUuidofExprClass, Ty, VK_LValue, OK_Ordinary,
+ false, Operand->isTypeDependent(),
+ Operand->containsUnexpandedParameterPack()),
+ Operand(Operand), Range(R) { }
+
+ CXXUuidofExpr(EmptyShell Empty, bool isExpr)
+ : Expr(CXXUuidofExprClass, Empty) {
+ if (isExpr)
+ Operand = (Expr*)0;
+ else
+ Operand = (TypeSourceInfo*)0;
+ }
+
+ bool isTypeOperand() const { return Operand.is<TypeSourceInfo *>(); }
+
+ /// \brief Retrieves the type operand of this __uuidof() expression after
+ /// various required adjustments (removing reference types, cv-qualifiers).
+ QualType getTypeOperand() const;
+
+ /// \brief Retrieve source information for the type operand.
+ TypeSourceInfo *getTypeOperandSourceInfo() const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
+ return Operand.get<TypeSourceInfo *>();
+ }
+
+ void setTypeOperandSourceInfo(TypeSourceInfo *TSI) {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
+ Operand = TSI;
+ }
+
+ Expr *getExprOperand() const {
+ assert(!isTypeOperand() && "Cannot call getExprOperand for __uuidof(type)");
+ return static_cast<Expr*>(Operand.get<Stmt *>());
+ }
+
+ void setExprOperand(Expr *E) {
+ assert(!isTypeOperand() && "Cannot call getExprOperand for __uuidof(type)");
+ Operand = E;
+ }
+
+ SourceRange getSourceRange() const { return Range; }
+ void setSourceRange(SourceRange R) { Range = R; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXUuidofExprClass;
+ }
+ static bool classof(const CXXUuidofExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ if (isTypeOperand()) return child_range();
+ Stmt **begin = reinterpret_cast<Stmt**>(&Operand);
+ return child_range(begin, begin + 1);
+ }
};
/// CXXThisExpr - Represents the "this" expression in C++, which is a
@@ -413,10 +544,11 @@ class CXXThisExpr : public Expr {
public:
CXXThisExpr(SourceLocation L, QualType Type, bool isImplicit)
- : Expr(CXXThisExprClass, Type,
+ : Expr(CXXThisExprClass, Type, VK_RValue, OK_Ordinary,
// 'this' is type-dependent if the class type of the enclosing
// member function is dependent (C++ [temp.dep.expr]p2)
- Type->isDependentType(), Type->isDependentType()),
+ Type->isDependentType(), Type->isDependentType(),
+ /*ContainsUnexpandedParameterPack=*/false),
Loc(L), Implicit(isImplicit) { }
CXXThisExpr(EmptyShell Empty) : Expr(CXXThisExprClass, Empty) {}
@@ -424,7 +556,7 @@ public:
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- virtual SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const { return SourceRange(Loc); }
bool isImplicit() const { return Implicit; }
void setImplicit(bool I) { Implicit = I; }
@@ -435,8 +567,7 @@ public:
static bool classof(const CXXThisExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// CXXThrowExpr - [C++ 15] C++ Throw Expression. This handles
@@ -451,7 +582,9 @@ public:
// exepression. The l is the location of the throw keyword. expr
// can by null, if the optional expression to throw isn't present.
CXXThrowExpr(Expr *expr, QualType Ty, SourceLocation l) :
- Expr(CXXThrowExprClass, Ty, false, false), Op(expr), ThrowLoc(l) {}
+ Expr(CXXThrowExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ expr && expr->containsUnexpandedParameterPack()),
+ Op(expr), ThrowLoc(l) {}
CXXThrowExpr(EmptyShell Empty) : Expr(CXXThrowExprClass, Empty) {}
const Expr *getSubExpr() const { return cast_or_null<Expr>(Op); }
@@ -461,7 +594,7 @@ public:
SourceLocation getThrowLoc() const { return ThrowLoc; }
void setThrowLoc(SourceLocation L) { ThrowLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
if (getSubExpr() == 0)
return SourceRange(ThrowLoc, ThrowLoc);
return SourceRange(ThrowLoc, getSubExpr()->getSourceRange().getEnd());
@@ -473,8 +606,9 @@ public:
static bool classof(const CXXThrowExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&Op, Op ? &Op+1 : &Op);
+ }
};
/// CXXDefaultArgExpr - C++ [dcl.fct.default]. This wraps up a
@@ -497,12 +631,16 @@ class CXXDefaultArgExpr : public Expr {
param->hasUnparsedDefaultArg()
? param->getType().getNonReferenceType()
: param->getDefaultArg()->getType(),
- false, false),
+ param->getDefaultArg()->getValueKind(),
+ param->getDefaultArg()->getObjectKind(), false, false, false),
Param(param, false), Loc(Loc) { }
CXXDefaultArgExpr(StmtClass SC, SourceLocation Loc, ParmVarDecl *param,
Expr *SubExpr)
- : Expr(SC, SubExpr->getType(), false, false), Param(param, true), Loc(Loc) {
+ : Expr(SC, SubExpr->getType(),
+ SubExpr->getValueKind(), SubExpr->getObjectKind(),
+ false, false, false),
+ Param(param, true), Loc(Loc) {
*reinterpret_cast<Expr **>(this + 1) = SubExpr;
}
@@ -544,7 +682,7 @@ public:
/// used.
SourceLocation getUsedLocation() const { return Loc; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
// Default argument expressions have no representation in the
// source, so they have an empty source range.
return SourceRange();
@@ -556,8 +694,7 @@ public:
static bool classof(const CXXDefaultArgExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
friend class ASTStmtReader;
friend class ASTStmtWriter;
@@ -597,9 +734,12 @@ class CXXBindTemporaryExpr : public Expr {
Stmt *SubExpr;
- CXXBindTemporaryExpr(CXXTemporary *temp, Expr* subexpr)
- : Expr(CXXBindTemporaryExprClass, subexpr->getType(), false, false),
- Temp(temp), SubExpr(subexpr) { }
+ CXXBindTemporaryExpr(CXXTemporary *temp, Expr* SubExpr)
+ : Expr(CXXBindTemporaryExprClass, SubExpr->getType(),
+ VK_RValue, OK_Ordinary, SubExpr->isTypeDependent(),
+ SubExpr->isValueDependent(),
+ SubExpr->containsUnexpandedParameterPack()),
+ Temp(temp), SubExpr(SubExpr) { }
public:
CXXBindTemporaryExpr(EmptyShell Empty)
@@ -616,7 +756,7 @@ public:
Expr *getSubExpr() { return cast<Expr>(SubExpr); }
void setSubExpr(Expr *E) { SubExpr = E; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SubExpr->getSourceRange();
}
@@ -627,8 +767,7 @@ public:
static bool classof(const CXXBindTemporaryExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&SubExpr, &SubExpr + 1); }
};
/// CXXConstructExpr - Represents a call to a C++ constructor.
@@ -644,6 +783,7 @@ private:
CXXConstructorDecl *Constructor;
SourceLocation Loc;
+ SourceRange ParenRange;
bool Elidable : 1;
bool ZeroInitialization : 1;
unsigned ConstructKind : 2;
@@ -656,7 +796,8 @@ protected:
CXXConstructorDecl *d, bool elidable,
Expr **args, unsigned numargs,
bool ZeroInitialization = false,
- ConstructionKind ConstructKind = CK_Complete);
+ ConstructionKind ConstructKind = CK_Complete,
+ SourceRange ParenRange = SourceRange());
/// \brief Construct an empty C++ construction expression.
CXXConstructExpr(StmtClass SC, EmptyShell Empty)
@@ -675,7 +816,8 @@ public:
CXXConstructorDecl *D, bool Elidable,
Expr **Args, unsigned NumArgs,
bool ZeroInitialization = false,
- ConstructionKind ConstructKind = CK_Complete);
+ ConstructionKind ConstructKind = CK_Complete,
+ SourceRange ParenRange = SourceRange());
CXXConstructorDecl* getConstructor() const { return Constructor; }
@@ -731,7 +873,8 @@ public:
Args[Arg] = ArgExpr;
}
- virtual SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const;
+ SourceRange getParenRange() const { return ParenRange; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXConstructExprClass ||
@@ -740,8 +883,9 @@ public:
static bool classof(const CXXConstructExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&Args[0], &Args[0]+NumArgs);
+ }
friend class ASTStmtReader;
};
@@ -753,12 +897,13 @@ class CXXFunctionalCastExpr : public ExplicitCastExpr {
SourceLocation TyBeginLoc;
SourceLocation RParenLoc;
- CXXFunctionalCastExpr(QualType ty, TypeSourceInfo *writtenTy,
+ CXXFunctionalCastExpr(QualType ty, ExprValueKind VK,
+ TypeSourceInfo *writtenTy,
SourceLocation tyBeginLoc, CastKind kind,
Expr *castExpr, unsigned pathSize,
SourceLocation rParenLoc)
- : ExplicitCastExpr(CXXFunctionalCastExprClass, ty, kind, castExpr,
- pathSize, writtenTy),
+ : ExplicitCastExpr(CXXFunctionalCastExprClass, ty, VK, kind,
+ castExpr, pathSize, writtenTy),
TyBeginLoc(tyBeginLoc), RParenLoc(rParenLoc) {}
explicit CXXFunctionalCastExpr(EmptyShell Shell, unsigned PathSize)
@@ -766,6 +911,7 @@ class CXXFunctionalCastExpr : public ExplicitCastExpr {
public:
static CXXFunctionalCastExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
TypeSourceInfo *Written,
SourceLocation TyBeginLoc,
CastKind Kind, Expr *Op,
@@ -779,7 +925,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(TyBeginLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -804,24 +950,21 @@ public:
/// };
/// @endcode
class CXXTemporaryObjectExpr : public CXXConstructExpr {
- SourceLocation TyBeginLoc;
- SourceLocation RParenLoc;
+ TypeSourceInfo *Type;
public:
CXXTemporaryObjectExpr(ASTContext &C, CXXConstructorDecl *Cons,
- QualType writtenTy, SourceLocation tyBeginLoc,
+ TypeSourceInfo *Type,
Expr **Args,unsigned NumArgs,
- SourceLocation rParenLoc,
+ SourceRange parenRange,
bool ZeroInitialization = false);
explicit CXXTemporaryObjectExpr(EmptyShell Empty)
- : CXXConstructExpr(CXXTemporaryObjectExprClass, Empty) { }
+ : CXXConstructExpr(CXXTemporaryObjectExprClass, Empty), Type() { }
- SourceLocation getTypeBeginLoc() const { return TyBeginLoc; }
- SourceLocation getRParenLoc() const { return RParenLoc; }
+ TypeSourceInfo *getTypeSourceInfo() const { return Type; }
- virtual SourceRange getSourceRange() const {
- return SourceRange(TyBeginLoc, RParenLoc);
- }
+ SourceRange getSourceRange() const;
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXTemporaryObjectExprClass;
}
@@ -835,32 +978,31 @@ public:
/// T, which is a non-class type.
///
class CXXScalarValueInitExpr : public Expr {
- SourceLocation TyBeginLoc;
SourceLocation RParenLoc;
+ TypeSourceInfo *TypeInfo;
+ friend class ASTStmtReader;
+
public:
- CXXScalarValueInitExpr(QualType ty, SourceLocation tyBeginLoc,
- SourceLocation rParenLoc ) :
- Expr(CXXScalarValueInitExprClass, ty, false, false),
- TyBeginLoc(tyBeginLoc), RParenLoc(rParenLoc) {}
+ /// \brief Create an explicitly-written scalar-value initialization
+ /// expression.
+ CXXScalarValueInitExpr(QualType Type,
+ TypeSourceInfo *TypeInfo,
+ SourceLocation rParenLoc ) :
+ Expr(CXXScalarValueInitExprClass, Type, VK_RValue, OK_Ordinary,
+ false, false, false),
+ RParenLoc(rParenLoc), TypeInfo(TypeInfo) {}
+
explicit CXXScalarValueInitExpr(EmptyShell Shell)
: Expr(CXXScalarValueInitExprClass, Shell) { }
- SourceLocation getTypeBeginLoc() const { return TyBeginLoc; }
- SourceLocation getRParenLoc() const { return RParenLoc; }
-
- void setTypeBeginLoc(SourceLocation L) { TyBeginLoc = L; }
- void setRParenLoc(SourceLocation L) { RParenLoc = L; }
-
- /// @brief Whether this initialization expression was
- /// implicitly-generated.
- bool isImplicit() const {
- return TyBeginLoc.isInvalid() && RParenLoc.isInvalid();
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return TypeInfo;
}
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
- virtual SourceRange getSourceRange() const {
- return SourceRange(TyBeginLoc, RParenLoc);
- }
+ SourceRange getSourceRange() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXScalarValueInitExprClass;
@@ -868,8 +1010,7 @@ public:
static bool classof(const CXXScalarValueInitExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// CXXNewExpr - A new expression for memory allocation and constructor calls,
@@ -882,8 +1023,11 @@ class CXXNewExpr : public Expr {
bool Initializer : 1;
// Do we allocate an array? If so, the first SubExpr is the size expression.
bool Array : 1;
+ // If this is an array allocation, does the usual deallocation
+ // function for the allocated type want to know the allocated size?
+ bool UsualArrayDeleteWantsSize : 1;
// The number of placement new arguments.
- unsigned NumPlacementArgs : 15;
+ unsigned NumPlacementArgs : 14;
// The number of constructor arguments. This may be 1 even for non-class
// types; use the pseudo copy constructor.
unsigned NumConstructorArgs : 14;
@@ -900,12 +1044,17 @@ class CXXNewExpr : public Expr {
// Must be null for all other types.
CXXConstructorDecl *Constructor;
+ /// \brief The allocated type-source information, as written in the source.
+ TypeSourceInfo *AllocatedTypeInfo;
+
/// \brief If the allocated type was expressed as a parenthesized type-id,
/// the source range covering the parenthesized type-id.
SourceRange TypeIdParens;
SourceLocation StartLoc;
SourceLocation EndLoc;
+ SourceLocation ConstructorLParen;
+ SourceLocation ConstructorRParen;
friend class ASTStmtReader;
public:
@@ -914,8 +1063,11 @@ public:
SourceRange TypeIdParens,
Expr *arraySize, CXXConstructorDecl *constructor, bool initializer,
Expr **constructorArgs, unsigned numConsArgs,
- FunctionDecl *operatorDelete, QualType ty,
- SourceLocation startLoc, SourceLocation endLoc);
+ FunctionDecl *operatorDelete, bool usualArrayDeleteWantsSize,
+ QualType ty, TypeSourceInfo *AllocatedTypeInfo,
+ SourceLocation startLoc, SourceLocation endLoc,
+ SourceLocation constructorLParen,
+ SourceLocation constructorRParen);
explicit CXXNewExpr(EmptyShell Shell)
: Expr(CXXNewExprClass, Shell), SubExprs(0) { }
@@ -927,6 +1079,10 @@ public:
return getType()->getAs<PointerType>()->getPointeeType();
}
+ TypeSourceInfo *getAllocatedTypeSourceInfo() const {
+ return AllocatedTypeInfo;
+ }
+
FunctionDecl *getOperatorNew() const { return OperatorNew; }
void setOperatorNew(FunctionDecl *D) { OperatorNew = D; }
FunctionDecl *getOperatorDelete() const { return OperatorDelete; }
@@ -943,6 +1099,10 @@ public:
}
unsigned getNumPlacementArgs() const { return NumPlacementArgs; }
+ Expr **getPlacementArgs() {
+ return reinterpret_cast<Expr **>(SubExprs + Array);
+ }
+
Expr *getPlacementArg(unsigned i) {
assert(i < NumPlacementArgs && "Index out of range");
return cast<Expr>(SubExprs[Array + i]);
@@ -956,11 +1116,21 @@ public:
SourceRange getTypeIdParens() const { return TypeIdParens; }
bool isGlobalNew() const { return GlobalNew; }
- void setGlobalNew(bool V) { GlobalNew = V; }
bool hasInitializer() const { return Initializer; }
- void setHasInitializer(bool V) { Initializer = V; }
+
+ /// Answers whether the usual array deallocation function for the
+ /// allocated type expects the size of the allocation as a
+ /// parameter.
+ bool doesUsualArrayDeleteWantSize() const {
+ return UsualArrayDeleteWantsSize;
+ }
unsigned getNumConstructorArgs() const { return NumConstructorArgs; }
+
+ Expr **getConstructorArgs() {
+ return reinterpret_cast<Expr **>(SubExprs + Array + NumPlacementArgs);
+ }
+
Expr *getConstructorArg(unsigned i) {
assert(i < NumConstructorArgs && "Index out of range");
return cast<Expr>(SubExprs[Array + NumPlacementArgs + i]);
@@ -1007,13 +1177,13 @@ public:
const_arg_iterator raw_arg_begin() const { return SubExprs; }
const_arg_iterator raw_arg_end() const { return constructor_arg_end(); }
-
SourceLocation getStartLoc() const { return StartLoc; }
- void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
- void setEndLoc(SourceLocation L) { EndLoc = L; }
-
- virtual SourceRange getSourceRange() const {
+
+ SourceLocation getConstructorLParen() const { return ConstructorLParen; }
+ SourceLocation getConstructorRParen() const { return ConstructorRParen; }
+
+ SourceRange getSourceRange() const {
return SourceRange(StartLoc, EndLoc);
}
@@ -1023,8 +1193,11 @@ public:
static bool classof(const CXXNewExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0],
+ &SubExprs[0] + Array + getNumPlacementArgs()
+ + getNumConstructorArgs());
+ }
};
/// CXXDeleteExpr - A delete expression for memory deallocation and destructor
@@ -1034,6 +1207,13 @@ class CXXDeleteExpr : public Expr {
bool GlobalDelete : 1;
// Is this the array form of delete, i.e. "delete[]"?
bool ArrayForm : 1;
+ // ArrayFormAsWritten can be different from ArrayForm if 'delete' is applied
+ // to pointer-to-array type (ArrayFormAsWritten will be false while ArrayForm
+ // will be true).
+ bool ArrayFormAsWritten : 1;
+ // Does the usual deallocation function for the element type require
+ // a size_t argument?
+ bool UsualArrayDeleteWantsSize : 1;
// Points to the operator delete overload that is used. Could be a member.
FunctionDecl *OperatorDelete;
// The pointer expression to be deleted.
@@ -1042,30 +1222,42 @@ class CXXDeleteExpr : public Expr {
SourceLocation Loc;
public:
CXXDeleteExpr(QualType ty, bool globalDelete, bool arrayForm,
+ bool arrayFormAsWritten, bool usualArrayDeleteWantsSize,
FunctionDecl *operatorDelete, Expr *arg, SourceLocation loc)
- : Expr(CXXDeleteExprClass, ty, false, false), GlobalDelete(globalDelete),
- ArrayForm(arrayForm), OperatorDelete(operatorDelete), Argument(arg),
- Loc(loc) { }
+ : Expr(CXXDeleteExprClass, ty, VK_RValue, OK_Ordinary, false, false,
+ arg->containsUnexpandedParameterPack()),
+ GlobalDelete(globalDelete),
+ ArrayForm(arrayForm), ArrayFormAsWritten(arrayFormAsWritten),
+ UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize),
+ OperatorDelete(operatorDelete), Argument(arg), Loc(loc) { }
explicit CXXDeleteExpr(EmptyShell Shell)
: Expr(CXXDeleteExprClass, Shell), OperatorDelete(0), Argument(0) { }
bool isGlobalDelete() const { return GlobalDelete; }
bool isArrayForm() const { return ArrayForm; }
-
- void setGlobalDelete(bool V) { GlobalDelete = V; }
- void setArrayForm(bool V) { ArrayForm = V; }
+ bool isArrayFormAsWritten() const { return ArrayFormAsWritten; }
+
+ /// Answers whether the usual array deallocation function for the
+ /// allocated type expects the size of the allocation as a
+ /// parameter. This can be true even if the actual deallocation
+ /// function that we're using doesn't want a size.
+ bool doesUsualArrayDeleteWantSize() const {
+ return UsualArrayDeleteWantsSize;
+ }
FunctionDecl *getOperatorDelete() const { return OperatorDelete; }
- void setOperatorDelete(FunctionDecl *D) { OperatorDelete = D; }
Expr *getArgument() { return cast<Expr>(Argument); }
const Expr *getArgument() const { return cast<Expr>(Argument); }
- void setArgument(Expr *E) { Argument = E; }
- virtual SourceRange getSourceRange() const {
+ /// \brief Retrieve the type being destroyed. If the type being
+ /// destroyed is a dependent type which may or may not be a pointer,
+ /// return an invalid type.
+ QualType getDestroyedType() const;
+
+ SourceRange getSourceRange() const {
return SourceRange(Loc, Argument->getLocEnd());
}
- void setStartLoc(SourceLocation L) { Loc = L; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXDeleteExprClass;
@@ -1073,8 +1265,9 @@ public:
static bool classof(const CXXDeleteExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Argument, &Argument+1); }
+
+ friend class ASTStmtReader;
};
/// \brief Structure used to store the type being destroyed by a
@@ -1171,21 +1364,7 @@ public:
TypeSourceInfo *ScopeType,
SourceLocation ColonColonLoc,
SourceLocation TildeLoc,
- PseudoDestructorTypeStorage DestroyedType)
- : Expr(CXXPseudoDestructorExprClass,
- Context.getPointerType(Context.getFunctionType(Context.VoidTy, 0, 0,
- false, 0, false,
- false, 0, 0,
- FunctionType::ExtInfo())),
- /*isTypeDependent=*/(Base->isTypeDependent() ||
- (DestroyedType.getTypeSourceInfo() &&
- DestroyedType.getTypeSourceInfo()->getType()->isDependentType())),
- /*isValueDependent=*/Base->isValueDependent()),
- Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
- OperatorLoc(OperatorLoc), Qualifier(Qualifier),
- QualifierRange(QualifierRange),
- ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
- DestroyedType(DestroyedType) { }
+ PseudoDestructorTypeStorage DestroyedType);
explicit CXXPseudoDestructorExpr(EmptyShell Shell)
: Expr(CXXPseudoDestructorExprClass, Shell),
@@ -1278,7 +1457,7 @@ public:
DestroyedType = PseudoDestructorTypeStorage(Info);
}
- virtual SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXPseudoDestructorExprClass;
@@ -1286,8 +1465,7 @@ public:
static bool classof(const CXXPseudoDestructorExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Base, &Base + 1); }
};
/// UnaryTypeTraitExpr - A GCC or MS unary type trait, as used in the
@@ -1296,8 +1474,10 @@ public:
/// __is_pod(int) == true
/// __is_enum(std::string) == false
class UnaryTypeTraitExpr : public Expr {
- /// UTT - The trait.
- UnaryTypeTrait UTT;
+ /// UTT - The trait. A UnaryTypeTrait enum in MSVC compat unsigned.
+ unsigned UTT : 31;
+ /// The value of the type trait. Unspecified if dependent.
+ bool Value : 1;
/// Loc - The location of the type trait keyword.
SourceLocation Loc;
@@ -1305,25 +1485,31 @@ class UnaryTypeTraitExpr : public Expr {
/// RParen - The location of the closing paren.
SourceLocation RParen;
- /// QueriedType - The type we're testing.
- QualType QueriedType;
+ /// The type being queried.
+ TypeSourceInfo *QueriedType;
public:
- UnaryTypeTraitExpr(SourceLocation loc, UnaryTypeTrait utt, QualType queried,
+ UnaryTypeTraitExpr(SourceLocation loc, UnaryTypeTrait utt,
+ TypeSourceInfo *queried, bool value,
SourceLocation rparen, QualType ty)
- : Expr(UnaryTypeTraitExprClass, ty, false, queried->isDependentType()),
- UTT(utt), Loc(loc), RParen(rparen), QueriedType(queried) { }
+ : Expr(UnaryTypeTraitExprClass, ty, VK_RValue, OK_Ordinary,
+ false, queried->getType()->isDependentType(),
+ queried->getType()->containsUnexpandedParameterPack()),
+ UTT(utt), Value(value), Loc(loc), RParen(rparen), QueriedType(queried) { }
explicit UnaryTypeTraitExpr(EmptyShell Empty)
- : Expr(UnaryTypeTraitExprClass, Empty), UTT((UnaryTypeTrait)0) { }
+ : Expr(UnaryTypeTraitExprClass, Empty), UTT(0), Value(false),
+ QueriedType() { }
- virtual SourceRange getSourceRange() const { return SourceRange(Loc, RParen);}
+ SourceRange getSourceRange() const { return SourceRange(Loc, RParen);}
- UnaryTypeTrait getTrait() const { return UTT; }
+ UnaryTypeTrait getTrait() const { return static_cast<UnaryTypeTrait>(UTT); }
- QualType getQueriedType() const { return QueriedType; }
+ QualType getQueriedType() const { return QueriedType->getType(); }
- bool EvaluateTrait(ASTContext&) const;
+ TypeSourceInfo *getQueriedTypeSourceInfo() const { return QueriedType; }
+
+ bool getValue() const { return Value; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == UnaryTypeTraitExprClass;
@@ -1331,8 +1517,74 @@ public:
static bool classof(const UnaryTypeTraitExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+};
+
+/// BinaryTypeTraitExpr - A GCC or MS binary type trait, as used in the
+/// implementation of TR1/C++0x type trait templates.
+/// Example:
+/// __is_base_of(Base, Derived) == true
+class BinaryTypeTraitExpr : public Expr {
+ /// BTT - The trait. A BinaryTypeTrait enum in MSVC compat unsigned.
+ unsigned BTT : 8;
+
+ /// The value of the type trait. Unspecified if dependent.
+ bool Value : 1;
+
+ /// Loc - The location of the type trait keyword.
+ SourceLocation Loc;
+
+ /// RParen - The location of the closing paren.
+ SourceLocation RParen;
+
+ /// The lhs type being queried.
+ TypeSourceInfo *LhsType;
+
+ /// The rhs type being queried.
+ TypeSourceInfo *RhsType;
+
+public:
+ BinaryTypeTraitExpr(SourceLocation loc, BinaryTypeTrait btt,
+ TypeSourceInfo *lhsType, TypeSourceInfo *rhsType,
+ bool value, SourceLocation rparen, QualType ty)
+ : Expr(BinaryTypeTraitExprClass, ty, VK_RValue, OK_Ordinary, false,
+ lhsType->getType()->isDependentType() ||
+ rhsType->getType()->isDependentType(),
+ (lhsType->getType()->containsUnexpandedParameterPack() ||
+ rhsType->getType()->containsUnexpandedParameterPack())),
+ BTT(btt), Value(value), Loc(loc), RParen(rparen),
+ LhsType(lhsType), RhsType(rhsType) { }
+
+
+ explicit BinaryTypeTraitExpr(EmptyShell Empty)
+ : Expr(BinaryTypeTraitExprClass, Empty), BTT(0), Value(false),
+ LhsType(), RhsType() { }
+
+ SourceRange getSourceRange() const {
+ return SourceRange(Loc, RParen);
+ }
+
+ BinaryTypeTrait getTrait() const {
+ return static_cast<BinaryTypeTrait>(BTT);
+ }
+
+ QualType getLhsType() const { return LhsType->getType(); }
+ QualType getRhsType() const { return RhsType->getType(); }
+
+ TypeSourceInfo *getLhsTypeSourceInfo() const { return LhsType; }
+ TypeSourceInfo *getRhsTypeSourceInfo() const { return RhsType; }
+
+ bool getValue() const { assert(!isTypeDependent()); return Value; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == BinaryTypeTraitExprClass;
+ }
+ static bool classof(const BinaryTypeTraitExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
friend class ASTStmtReader;
};
@@ -1360,23 +1612,23 @@ protected:
/// True if the name was a template-id.
bool HasExplicitTemplateArgs;
- OverloadExpr(StmtClass K, ASTContext &C, QualType T, bool Dependent,
+ OverloadExpr(StmtClass K, ASTContext &C,
NestedNameSpecifier *Qualifier, SourceRange QRange,
const DeclarationNameInfo &NameInfo,
- bool HasTemplateArgs,
- UnresolvedSetIterator Begin, UnresolvedSetIterator End);
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin, UnresolvedSetIterator End,
+ bool KnownDependent = false,
+ bool KnownContainsUnexpandedParameterPack = false);
OverloadExpr(StmtClass K, EmptyShell Empty)
: Expr(K, Empty), Results(0), NumResults(0),
Qualifier(0), HasExplicitTemplateArgs(false) { }
-public:
- /// Computes whether an unresolved lookup on the given declarations
- /// and optional template arguments is type- and value-dependent.
- static bool ComputeDependence(UnresolvedSetIterator Begin,
- UnresolvedSetIterator End,
- const TemplateArgumentListInfo *Args);
+ void initializeResults(ASTContext &C,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End);
+public:
struct FindResult {
OverloadExpr *Expression;
bool IsAddressOfOperand;
@@ -1420,9 +1672,6 @@ public:
return UnresolvedSetIterator(Results + NumResults);
}
- void initializeResults(ASTContext &C,
- UnresolvedSetIterator Begin,UnresolvedSetIterator End);
-
/// Gets the number of declarations in the unresolved set.
unsigned getNumDecls() const { return NumResults; }
@@ -1469,6 +1718,9 @@ public:
T->getStmtClass() == UnresolvedMemberExprClass;
}
static bool classof(const OverloadExpr *) { return true; }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
};
/// \brief A reference to a name which we were able to look up during
@@ -1498,14 +1750,15 @@ class UnresolvedLookupExpr : public OverloadExpr {
/// against the qualified-lookup bits.
CXXRecordDecl *NamingClass;
- UnresolvedLookupExpr(ASTContext &C, QualType T, bool Dependent,
+ UnresolvedLookupExpr(ASTContext &C,
CXXRecordDecl *NamingClass,
NestedNameSpecifier *Qualifier, SourceRange QRange,
const DeclarationNameInfo &NameInfo,
- bool RequiresADL, bool Overloaded, bool HasTemplateArgs,
+ bool RequiresADL, bool Overloaded,
+ const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin, UnresolvedSetIterator End)
- : OverloadExpr(UnresolvedLookupExprClass, C, T, Dependent, Qualifier,
- QRange, NameInfo, HasTemplateArgs, Begin, End),
+ : OverloadExpr(UnresolvedLookupExprClass, C, Qualifier, QRange, NameInfo,
+ TemplateArgs, Begin, End),
RequiresADL(RequiresADL), Overloaded(Overloaded), NamingClass(NamingClass)
{}
@@ -1516,7 +1769,6 @@ class UnresolvedLookupExpr : public OverloadExpr {
public:
static UnresolvedLookupExpr *Create(ASTContext &C,
- bool Dependent,
CXXRecordDecl *NamingClass,
NestedNameSpecifier *Qualifier,
SourceRange QualifierRange,
@@ -1524,16 +1776,12 @@ public:
bool ADL, bool Overloaded,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End) {
- return new(C) UnresolvedLookupExpr(C,
- Dependent ? C.DependentTy : C.OverloadTy,
- Dependent, NamingClass,
- Qualifier, QualifierRange, NameInfo,
- ADL, Overloaded, false,
- Begin, End);
+ return new(C) UnresolvedLookupExpr(C, NamingClass, Qualifier,
+ QualifierRange, NameInfo, ADL,
+ Overloaded, 0, Begin, End);
}
static UnresolvedLookupExpr *Create(ASTContext &C,
- bool Dependent,
CXXRecordDecl *NamingClass,
NestedNameSpecifier *Qualifier,
SourceRange QualifierRange,
@@ -1544,6 +1792,7 @@ public:
UnresolvedSetIterator End);
static UnresolvedLookupExpr *CreateEmpty(ASTContext &C,
+ bool HasExplicitTemplateArgs,
unsigned NumTemplateArgs);
/// True if this declaration should be extended by
@@ -1606,15 +1855,14 @@ public:
return getExplicitTemplateArgs().NumTemplateArgs;
}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
SourceRange Range(getNameInfo().getSourceRange());
if (getQualifier()) Range.setBegin(getQualifierRange().getBegin());
if (hasExplicitTemplateArgs()) Range.setEnd(getRAngleLoc());
return Range;
}
- virtual StmtIterator child_begin();
- virtual StmtIterator child_end();
+ child_range children() { return child_range(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == UnresolvedLookupExprClass;
@@ -1655,11 +1903,7 @@ class DependentScopeDeclRefExpr : public Expr {
NestedNameSpecifier *Qualifier,
SourceRange QualifierRange,
const DeclarationNameInfo &NameInfo,
- bool HasExplicitTemplateArgs)
- : Expr(DependentScopeDeclRefExprClass, T, true, true),
- NameInfo(NameInfo), QualifierRange(QualifierRange), Qualifier(Qualifier),
- HasExplicitTemplateArgs(HasExplicitTemplateArgs)
- {}
+ const TemplateArgumentListInfo *Args);
public:
static DependentScopeDeclRefExpr *Create(ASTContext &C,
@@ -1669,6 +1913,7 @@ public:
const TemplateArgumentListInfo *TemplateArgs = 0);
static DependentScopeDeclRefExpr *CreateEmpty(ASTContext &C,
+ bool HasExplicitTemplateArgs,
unsigned NumTemplateArgs);
/// \brief Retrieve the name that this expression refers to.
@@ -1740,7 +1985,7 @@ public:
return getExplicitTemplateArgs().NumTemplateArgs;
}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
SourceRange Range(QualifierRange.getBegin(), getLocation());
if (hasExplicitTemplateArgs())
Range.setEnd(getRAngleLoc());
@@ -1752,25 +1997,32 @@ public:
}
static bool classof(const DependentScopeDeclRefExpr *) { return true; }
- virtual StmtIterator child_begin();
- virtual StmtIterator child_end();
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
};
-class CXXExprWithTemporaries : public Expr {
+/// Represents an expression --- generally a full-expression --- which
+/// introduces cleanups to be run at the end of the sub-expression's
+/// evaluation. The most common source of expression-introduced
+/// cleanups is temporary objects in C++, but several other C++
+/// expressions can create cleanups.
+class ExprWithCleanups : public Expr {
Stmt *SubExpr;
CXXTemporary **Temps;
unsigned NumTemps;
- CXXExprWithTemporaries(ASTContext &C, Expr *SubExpr, CXXTemporary **Temps,
- unsigned NumTemps);
-
+ ExprWithCleanups(ASTContext &C, Expr *SubExpr,
+ CXXTemporary **Temps, unsigned NumTemps);
+
public:
- CXXExprWithTemporaries(EmptyShell Empty)
- : Expr(CXXExprWithTemporariesClass, Empty),
+ ExprWithCleanups(EmptyShell Empty)
+ : Expr(ExprWithCleanupsClass, Empty),
SubExpr(0), Temps(0), NumTemps(0) {}
- static CXXExprWithTemporaries *Create(ASTContext &C, Expr *SubExpr,
+ static ExprWithCleanups *Create(ASTContext &C, Expr *SubExpr,
CXXTemporary **Temps,
unsigned NumTemps);
@@ -1782,7 +2034,7 @@ public:
return Temps[i];
}
const CXXTemporary *getTemporary(unsigned i) const {
- return const_cast<CXXExprWithTemporaries*>(this)->getTemporary(i);
+ return const_cast<ExprWithCleanups*>(this)->getTemporary(i);
}
void setTemporary(unsigned i, CXXTemporary *T) {
assert(i < NumTemps && "Index out of range");
@@ -1793,19 +2045,18 @@ public:
const Expr *getSubExpr() const { return cast<Expr>(SubExpr); }
void setSubExpr(Expr *E) { SubExpr = E; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SubExpr->getSourceRange();
}
// Implement isa/cast/dyncast/etc.
static bool classof(const Stmt *T) {
- return T->getStmtClass() == CXXExprWithTemporariesClass;
+ return T->getStmtClass() == ExprWithCleanupsClass;
}
- static bool classof(const CXXExprWithTemporaries *) { return true; }
+ static bool classof(const ExprWithCleanups *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&SubExpr, &SubExpr + 1); }
};
/// \brief Describes an explicit type conversion that uses functional
@@ -1830,12 +2081,9 @@ public:
/// constructor call, conversion function call, or some kind of type
/// conversion.
class CXXUnresolvedConstructExpr : public Expr {
- /// \brief The starting location of the type
- SourceLocation TyBeginLoc;
-
/// \brief The type being constructed.
- QualType Type;
-
+ TypeSourceInfo *Type;
+
/// \brief The location of the left parentheses ('(').
SourceLocation LParenLoc;
@@ -1845,20 +2093,20 @@ class CXXUnresolvedConstructExpr : public Expr {
/// \brief The number of arguments used to construct the type.
unsigned NumArgs;
- CXXUnresolvedConstructExpr(SourceLocation TyBegin,
- QualType T,
+ CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
Expr **Args,
unsigned NumArgs,
SourceLocation RParenLoc);
CXXUnresolvedConstructExpr(EmptyShell Empty, unsigned NumArgs)
- : Expr(CXXUnresolvedConstructExprClass, Empty), NumArgs(NumArgs) { }
+ : Expr(CXXUnresolvedConstructExprClass, Empty), Type(), NumArgs(NumArgs) { }
+ friend class ASTStmtReader;
+
public:
static CXXUnresolvedConstructExpr *Create(ASTContext &C,
- SourceLocation TyBegin,
- QualType T,
+ TypeSourceInfo *Type,
SourceLocation LParenLoc,
Expr **Args,
unsigned NumArgs,
@@ -1867,15 +2115,14 @@ public:
static CXXUnresolvedConstructExpr *CreateEmpty(ASTContext &C,
unsigned NumArgs);
- /// \brief Retrieve the source location where the type begins.
- SourceLocation getTypeBeginLoc() const { return TyBeginLoc; }
- void setTypeBeginLoc(SourceLocation L) { TyBeginLoc = L; }
-
/// \brief Retrieve the type that is being constructed, as specified
/// in the source code.
- QualType getTypeAsWritten() const { return Type; }
- void setTypeAsWritten(QualType T) { Type = T; }
+ QualType getTypeAsWritten() const { return Type->getType(); }
+ /// \brief Retrieve the type source information for the type being
+ /// constructed.
+ TypeSourceInfo *getTypeSourceInfo() const { return Type; }
+
/// \brief Retrieve the location of the left parentheses ('(') that
/// precedes the argument list.
SourceLocation getLParenLoc() const { return LParenLoc; }
@@ -1916,17 +2163,18 @@ public:
*(arg_begin() + I) = E;
}
- virtual SourceRange getSourceRange() const {
- return SourceRange(TyBeginLoc, RParenLoc);
- }
+ SourceRange getSourceRange() const;
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXUnresolvedConstructExprClass;
}
static bool classof(const CXXUnresolvedConstructExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ Stmt **begin = reinterpret_cast<Stmt**>(this+1);
+ return child_range(begin, begin + NumArgs);
+ }
};
/// \brief Represents a C++ member access expression where the actual
@@ -1987,19 +2235,13 @@ class CXXDependentScopeMemberExpr : public Expr {
public:
CXXDependentScopeMemberExpr(ASTContext &C,
- Expr *Base, QualType BaseType,
- bool IsArrow,
- SourceLocation OperatorLoc,
- NestedNameSpecifier *Qualifier,
- SourceRange QualifierRange,
- NamedDecl *FirstQualifierFoundInScope,
- DeclarationNameInfo MemberNameInfo)
- : Expr(CXXDependentScopeMemberExprClass, C.DependentTy, true, true),
- Base(Base), BaseType(BaseType), IsArrow(IsArrow),
- HasExplicitTemplateArgs(false), OperatorLoc(OperatorLoc),
- Qualifier(Qualifier), QualifierRange(QualifierRange),
- FirstQualifierFoundInScope(FirstQualifierFoundInScope),
- MemberNameInfo(MemberNameInfo) { }
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifier *Qualifier,
+ SourceRange QualifierRange,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo);
static CXXDependentScopeMemberExpr *
Create(ASTContext &C,
@@ -2012,7 +2254,8 @@ public:
const TemplateArgumentListInfo *TemplateArgs);
static CXXDependentScopeMemberExpr *
- CreateEmpty(ASTContext &C, unsigned NumTemplateArgs);
+ CreateEmpty(ASTContext &C, bool HasExplicitTemplateArgs,
+ unsigned NumTemplateArgs);
/// \brief True if this is an implicit access, i.e. one in which the
/// member being accessed was not written in the source. The source
@@ -2147,7 +2390,7 @@ public:
return getExplicitTemplateArgs().RAngleLoc;
}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
SourceRange Range;
if (!isImplicitAccess())
Range.setBegin(Base->getSourceRange().getBegin());
@@ -2169,8 +2412,13 @@ public:
static bool classof(const CXXDependentScopeMemberExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ if (isImplicitAccess()) return child_range();
+ return child_range(&Base, &Base + 1);
+ }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
};
/// \brief Represents a C++ member access expression for which lookup
@@ -2206,8 +2454,7 @@ class UnresolvedMemberExpr : public OverloadExpr {
/// \brief The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
- UnresolvedMemberExpr(ASTContext &C, QualType T, bool Dependent,
- bool HasUnresolvedUsing,
+ UnresolvedMemberExpr(ASTContext &C, bool HasUnresolvedUsing,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifier *Qualifier,
@@ -2222,7 +2469,7 @@ class UnresolvedMemberExpr : public OverloadExpr {
public:
static UnresolvedMemberExpr *
- Create(ASTContext &C, bool Dependent, bool HasUnresolvedUsing,
+ Create(ASTContext &C, bool HasUnresolvedUsing,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifier *Qualifier,
@@ -2232,7 +2479,8 @@ public:
UnresolvedSetIterator Begin, UnresolvedSetIterator End);
static UnresolvedMemberExpr *
- CreateEmpty(ASTContext &C, unsigned NumTemplateArgs);
+ CreateEmpty(ASTContext &C, bool HasExplicitTemplateArgs,
+ unsigned NumTemplateArgs);
/// \brief True if this is an implicit access, i.e. one in which the
/// member being accessed was not written in the source. The source
@@ -2337,7 +2585,7 @@ public:
return getExplicitTemplateArgs().RAngleLoc;
}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
SourceRange Range = getMemberNameInfo().getSourceRange();
if (!isImplicitAccess())
Range.setBegin(Base->getSourceRange().getBegin());
@@ -2355,10 +2603,130 @@ public:
static bool classof(const UnresolvedMemberExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ if (isImplicitAccess()) return child_range();
+ return child_range(&Base, &Base + 1);
+ }
+};
+
+/// \brief Represents a C++0x noexcept expression (C++ [expr.unary.noexcept]).
+///
+/// The noexcept expression tests whether a given expression might throw. Its
+/// result is a boolean constant.
+class CXXNoexceptExpr : public Expr {
+ bool Value : 1;
+ Stmt *Operand;
+ SourceRange Range;
+
+ friend class ASTStmtReader;
+
+public:
+ CXXNoexceptExpr(QualType Ty, Expr *Operand, CanThrowResult Val,
+ SourceLocation Keyword, SourceLocation RParen)
+ : Expr(CXXNoexceptExprClass, Ty, VK_RValue, OK_Ordinary,
+ /*TypeDependent*/false,
+ /*ValueDependent*/Val == CT_Dependent,
+ Operand->containsUnexpandedParameterPack()),
+ Value(Val == CT_Cannot), Operand(Operand), Range(Keyword, RParen)
+ { }
+
+ CXXNoexceptExpr(EmptyShell Empty)
+ : Expr(CXXNoexceptExprClass, Empty)
+ { }
+
+ Expr *getOperand() const { return static_cast<Expr*>(Operand); }
+
+ SourceRange getSourceRange() const { return Range; }
+
+ bool getValue() const { return Value; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXNoexceptExprClass;
+ }
+ static bool classof(const CXXNoexceptExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Operand, &Operand + 1); }
};
+/// \brief Represents a C++0x pack expansion that produces a sequence of
+/// expressions.
+///
+/// A pack expansion expression contains a pattern (which itself is an
+/// expression) followed by an ellipsis. For example:
+///
+/// \code
+/// template<typename F, typename ...Types>
+/// void forward(F f, Types &&...args) {
+/// f(static_cast<Types&&>(args)...);
+/// }
+/// \endcode
+///
+/// Here, the argument to the function object \c f is a pack expansion whose
+/// pattern is \c static_cast<Types&&>(args). When the \c forward function
+/// template is instantiated, the pack expansion will instantiate to zero or
+/// or more function arguments to the function object \c f.
+class PackExpansionExpr : public Expr {
+ SourceLocation EllipsisLoc;
+
+ /// \brief The number of expansions that will be produced by this pack
+ /// expansion expression, if known.
+ ///
+ /// When zero, the number of expansions is not known. Otherwise, this value
+ /// is the number of expansions + 1.
+ unsigned NumExpansions;
+
+ Stmt *Pattern;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+public:
+ PackExpansionExpr(QualType T, Expr *Pattern, SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions)
+ : Expr(PackExpansionExprClass, T, Pattern->getValueKind(),
+ Pattern->getObjectKind(), /*TypeDependent=*/true,
+ /*ValueDependent=*/true, /*ContainsUnexpandedParameterPack=*/false),
+ EllipsisLoc(EllipsisLoc),
+ NumExpansions(NumExpansions? *NumExpansions + 1 : 0),
+ Pattern(Pattern) { }
+
+ PackExpansionExpr(EmptyShell Empty) : Expr(PackExpansionExprClass, Empty) { }
+
+ /// \brief Retrieve the pattern of the pack expansion.
+ Expr *getPattern() { return reinterpret_cast<Expr *>(Pattern); }
+
+ /// \brief Retrieve the pattern of the pack expansion.
+ const Expr *getPattern() const { return reinterpret_cast<Expr *>(Pattern); }
+
+ /// \brief Retrieve the location of the ellipsis that describes this pack
+ /// expansion.
+ SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
+
+ /// \brief Determine the number of expansions that will be produced when
+ /// this pack expansion is instantiated, if already known.
+ llvm::Optional<unsigned> getNumExpansions() const {
+ if (NumExpansions)
+ return NumExpansions - 1;
+
+ return llvm::Optional<unsigned>();
+ }
+
+ SourceRange getSourceRange() const {
+ return SourceRange(Pattern->getLocStart(), EllipsisLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == PackExpansionExprClass;
+ }
+ static bool classof(const PackExpansionExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&Pattern, &Pattern + 1);
+ }
+};
+
inline ExplicitTemplateArgumentList &OverloadExpr::getExplicitTemplateArgs() {
if (isa<UnresolvedLookupExpr>(this))
return cast<UnresolvedLookupExpr>(this)->getExplicitTemplateArgs();
@@ -2366,6 +2734,159 @@ inline ExplicitTemplateArgumentList &OverloadExpr::getExplicitTemplateArgs() {
return cast<UnresolvedMemberExpr>(this)->getExplicitTemplateArgs();
}
+/// \brief Represents an expression that computes the length of a parameter
+/// pack.
+///
+/// \code
+/// template<typename ...Types>
+/// struct count {
+/// static const unsigned value = sizeof...(Types);
+/// };
+/// \endcode
+class SizeOfPackExpr : public Expr {
+ /// \brief The location of the 'sizeof' keyword.
+ SourceLocation OperatorLoc;
+
+ /// \brief The location of the name of the parameter pack.
+ SourceLocation PackLoc;
+
+ /// \brief The location of the closing parenthesis.
+ SourceLocation RParenLoc;
+
+ /// \brief The length of the parameter pack, if known.
+ ///
+ /// When this expression is value-dependent, the length of the parameter pack
+ /// is unknown. When this expression is not value-dependent, the length is
+ /// known.
+ unsigned Length;
+
+ /// \brief The parameter pack itself.
+ NamedDecl *Pack;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+public:
+ /// \brief Creates a value-dependent expression that computes the length of
+ /// the given parameter pack.
+ SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
+ SourceLocation PackLoc, SourceLocation RParenLoc)
+ : Expr(SizeOfPackExprClass, SizeType, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false, /*ValueDependent=*/true,
+ /*ContainsUnexpandedParameterPack=*/false),
+ OperatorLoc(OperatorLoc), PackLoc(PackLoc), RParenLoc(RParenLoc),
+ Length(0), Pack(Pack) { }
+
+ /// \brief Creates an expression that computes the length of
+ /// the given parameter pack, which is already known.
+ SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
+ SourceLocation PackLoc, SourceLocation RParenLoc,
+ unsigned Length)
+ : Expr(SizeOfPackExprClass, SizeType, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false, /*ValueDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ OperatorLoc(OperatorLoc), PackLoc(PackLoc), RParenLoc(RParenLoc),
+ Length(Length), Pack(Pack) { }
+
+ /// \brief Create an empty expression.
+ SizeOfPackExpr(EmptyShell Empty) : Expr(SizeOfPackExprClass, Empty) { }
+
+ /// \brief Determine the location of the 'sizeof' keyword.
+ SourceLocation getOperatorLoc() const { return OperatorLoc; }
+
+ /// \brief Determine the location of the parameter pack.
+ SourceLocation getPackLoc() const { return PackLoc; }
+
+ /// \brief Determine the location of the right parenthesis.
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ /// \brief Retrieve the parameter pack.
+ NamedDecl *getPack() const { return Pack; }
+
+ /// \brief Retrieve the length of the parameter pack.
+ ///
+ /// This routine may only be invoked when the expression is not
+ /// value-dependent.
+ unsigned getPackLength() const {
+ assert(!isValueDependent() &&
+ "Cannot get the length of a value-dependent pack size expression");
+ return Length;
+ }
+
+ SourceRange getSourceRange() const {
+ return SourceRange(OperatorLoc, RParenLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == SizeOfPackExprClass;
+ }
+ static bool classof(const SizeOfPackExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// \brief Represents a reference to a non-type template parameter pack that
+/// has been substituted with a non-template argument pack.
+///
+/// When a pack expansion in the source code contains multiple parameter packs
+/// and those parameter packs correspond to different levels of template
+/// parameter lists, this node node is used to represent a non-type template
+/// parameter pack from an outer level, which has already had its argument pack
+/// substituted but that still lives within a pack expansion that itself
+/// could not be instantiated. When actually performing a substitution into
+/// that pack expansion (e.g., when all template parameters have corresponding
+/// arguments), this type will be replaced with the appropriate underlying
+/// expression at the current pack substitution index.
+class SubstNonTypeTemplateParmPackExpr : public Expr {
+ /// \brief The non-type template parameter pack itself.
+ NonTypeTemplateParmDecl *Param;
+
+ /// \brief A pointer to the set of template arguments that this
+ /// parameter pack is instantiated with.
+ const TemplateArgument *Arguments;
+
+ /// \brief The number of template arguments in \c Arguments.
+ unsigned NumArguments;
+
+ /// \brief The location of the non-type template parameter pack reference.
+ SourceLocation NameLoc;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+public:
+ SubstNonTypeTemplateParmPackExpr(QualType T,
+ NonTypeTemplateParmDecl *Param,
+ SourceLocation NameLoc,
+ const TemplateArgument &ArgPack);
+
+ SubstNonTypeTemplateParmPackExpr(EmptyShell Empty)
+ : Expr(SubstNonTypeTemplateParmPackExprClass, Empty) { }
+
+ /// \brief Retrieve the non-type template parameter pack being substituted.
+ NonTypeTemplateParmDecl *getParameterPack() const { return Param; }
+
+ /// \brief Retrieve the location of the parameter pack name.
+ SourceLocation getParameterPackLocation() const { return NameLoc; }
+
+ /// \brief Retrieve the template argument pack containing the substituted
+ /// template arguments.
+ TemplateArgument getArgumentPack() const;
+
+ SourceRange getSourceRange() const { return NameLoc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == SubstNonTypeTemplateParmPackExprClass;
+ }
+ static bool classof(const SubstNonTypeTemplateParmPackExpr *) {
+ return true;
+ }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
index 8a09f4e..285efb7 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
@@ -14,14 +14,13 @@
#ifndef LLVM_CLANG_AST_EXPROBJC_H
#define LLVM_CLANG_AST_EXPROBJC_H
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/IdentifierTable.h"
namespace clang {
class IdentifierInfo;
class ASTContext;
- class ObjCMethodDecl;
- class ObjCPropertyDecl;
/// ObjCStringLiteral, used for Objective-C string literals
/// i.e. @"foo".
@@ -30,7 +29,9 @@ class ObjCStringLiteral : public Expr {
SourceLocation AtLoc;
public:
ObjCStringLiteral(StringLiteral *SL, QualType T, SourceLocation L)
- : Expr(ObjCStringLiteralClass, T, false, false), String(SL), AtLoc(L) {}
+ : Expr(ObjCStringLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
+ false),
+ String(SL), AtLoc(L) {}
explicit ObjCStringLiteral(EmptyShell Empty)
: Expr(ObjCStringLiteralClass, Empty) {}
@@ -41,7 +42,7 @@ public:
SourceLocation getAtLoc() const { return AtLoc; }
void setAtLoc(SourceLocation L) { AtLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(AtLoc, String->getLocEnd());
}
@@ -51,8 +52,7 @@ public:
static bool classof(const ObjCStringLiteral *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&String, &String+1); }
};
/// ObjCEncodeExpr, used for @encode in Objective-C. @encode has the same type
@@ -64,8 +64,10 @@ class ObjCEncodeExpr : public Expr {
public:
ObjCEncodeExpr(QualType T, TypeSourceInfo *EncodedType,
SourceLocation at, SourceLocation rp)
- : Expr(ObjCEncodeExprClass, T, EncodedType->getType()->isDependentType(),
- EncodedType->getType()->isDependentType()),
+ : Expr(ObjCEncodeExprClass, T, VK_LValue, OK_Ordinary,
+ EncodedType->getType()->isDependentType(),
+ EncodedType->getType()->isDependentType(),
+ EncodedType->getType()->containsUnexpandedParameterPack()),
EncodedType(EncodedType), AtLoc(at), RParenLoc(rp) {}
explicit ObjCEncodeExpr(EmptyShell Empty) : Expr(ObjCEncodeExprClass, Empty){}
@@ -83,7 +85,7 @@ public:
EncodedType = EncType;
}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(AtLoc, RParenLoc);
}
@@ -93,8 +95,7 @@ public:
static bool classof(const ObjCEncodeExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// ObjCSelectorExpr used for @selector in Objective-C.
@@ -104,8 +105,9 @@ class ObjCSelectorExpr : public Expr {
public:
ObjCSelectorExpr(QualType T, Selector selInfo,
SourceLocation at, SourceLocation rp)
- : Expr(ObjCSelectorExprClass, T, false, false), SelName(selInfo), AtLoc(at),
- RParenLoc(rp){}
+ : Expr(ObjCSelectorExprClass, T, VK_RValue, OK_Ordinary, false, false,
+ false),
+ SelName(selInfo), AtLoc(at), RParenLoc(rp){}
explicit ObjCSelectorExpr(EmptyShell Empty)
: Expr(ObjCSelectorExprClass, Empty) {}
@@ -117,7 +119,7 @@ public:
void setAtLoc(SourceLocation L) { AtLoc = L; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(AtLoc, RParenLoc);
}
@@ -130,8 +132,7 @@ public:
static bool classof(const ObjCSelectorExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// ObjCProtocolExpr used for protocol expression in Objective-C. This is used
@@ -144,8 +145,9 @@ class ObjCProtocolExpr : public Expr {
public:
ObjCProtocolExpr(QualType T, ObjCProtocolDecl *protocol,
SourceLocation at, SourceLocation rp)
- : Expr(ObjCProtocolExprClass, T, false, false), TheProtocol(protocol),
- AtLoc(at), RParenLoc(rp) {}
+ : Expr(ObjCProtocolExprClass, T, VK_RValue, OK_Ordinary, false, false,
+ false),
+ TheProtocol(protocol), AtLoc(at), RParenLoc(rp) {}
explicit ObjCProtocolExpr(EmptyShell Empty)
: Expr(ObjCProtocolExprClass, Empty) {}
@@ -157,7 +159,7 @@ public:
void setAtLoc(SourceLocation L) { AtLoc = L; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(AtLoc, RParenLoc);
}
@@ -167,8 +169,7 @@ public:
static bool classof(const ObjCProtocolExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// ObjCIvarRefExpr - A reference to an ObjC instance variable.
@@ -180,13 +181,13 @@ class ObjCIvarRefExpr : public Expr {
bool IsFreeIvar:1; // True if ivar reference has no base (self assumed).
public:
- ObjCIvarRefExpr(ObjCIvarDecl *d,
- QualType t, SourceLocation l, Expr *base,
+ ObjCIvarRefExpr(ObjCIvarDecl *d, QualType t,
+ SourceLocation l, Expr *base,
bool arrow = false, bool freeIvar = false) :
- Expr(ObjCIvarRefExprClass, t, /*TypeDependent=*/false,
- base->isValueDependent()), D(d),
- Loc(l), Base(base), IsArrow(arrow),
- IsFreeIvar(freeIvar) {}
+ Expr(ObjCIvarRefExprClass, t, VK_LValue, OK_Ordinary,
+ /*TypeDependent=*/false, base->isValueDependent(),
+ base->containsUnexpandedParameterPack()),
+ D(d), Loc(l), Base(base), IsArrow(arrow), IsFreeIvar(freeIvar) {}
explicit ObjCIvarRefExpr(EmptyShell Empty)
: Expr(ObjCIvarRefExprClass, Empty) {}
@@ -207,7 +208,7 @@ public:
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return isFreeIvar() ? SourceRange(Loc)
: SourceRange(getBase()->getLocStart(), Loc);
}
@@ -218,8 +219,7 @@ public:
static bool classof(const ObjCIvarRefExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Base, &Base+1); }
};
/// ObjCPropertyRefExpr - A dot-syntax expression to access an ObjC
@@ -227,32 +227,127 @@ public:
///
class ObjCPropertyRefExpr : public Expr {
private:
- ObjCPropertyDecl *AsProperty;
+ /// If the bool is true, this is an implicit property reference; the
+ /// pointer is an (optional) ObjCMethodDecl and Setter may be set.
+ /// if the bool is false, this is an explicit property reference;
+ /// the pointer is an ObjCPropertyDecl and Setter is always null.
+ llvm::PointerIntPair<NamedDecl*, 1, bool> PropertyOrGetter;
+ ObjCMethodDecl *Setter;
+
SourceLocation IdLoc;
- Stmt *Base;
+
+ /// \brief When the receiver in property access is 'super', this is
+ /// the location of the 'super' keyword. When it's an interface,
+ /// this is that interface.
+ SourceLocation ReceiverLoc;
+ llvm::PointerUnion3<Stmt*, const Type*, ObjCInterfaceDecl*> Receiver;
+
public:
ObjCPropertyRefExpr(ObjCPropertyDecl *PD, QualType t,
+ ExprValueKind VK, ExprObjectKind OK,
SourceLocation l, Expr *base)
- : Expr(ObjCPropertyRefExprClass, t, /*TypeDependent=*/false,
- base->isValueDependent()),
- AsProperty(PD), IdLoc(l), Base(base) {
+ : Expr(ObjCPropertyRefExprClass, t, VK, OK,
+ /*TypeDependent=*/false, base->isValueDependent(),
+ base->containsUnexpandedParameterPack()),
+ PropertyOrGetter(PD, false), Setter(0),
+ IdLoc(l), ReceiverLoc(), Receiver(base) {
+ }
+
+ ObjCPropertyRefExpr(ObjCPropertyDecl *PD, QualType t,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation l, SourceLocation sl, QualType st)
+ : Expr(ObjCPropertyRefExprClass, t, VK, OK,
+ /*TypeDependent=*/false, false,
+ st->containsUnexpandedParameterPack()),
+ PropertyOrGetter(PD, false), Setter(0),
+ IdLoc(l), ReceiverLoc(sl), Receiver(st.getTypePtr()) {
+ }
+
+ ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
+ QualType T, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation IdLoc, Expr *Base)
+ : Expr(ObjCPropertyRefExprClass, T, VK, OK, false,
+ Base->isValueDependent(),
+ Base->containsUnexpandedParameterPack()),
+ PropertyOrGetter(Getter, true), Setter(Setter),
+ IdLoc(IdLoc), ReceiverLoc(), Receiver(Base) {
+ }
+
+ ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
+ QualType T, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation IdLoc,
+ SourceLocation SuperLoc, QualType SuperTy)
+ : Expr(ObjCPropertyRefExprClass, T, VK, OK, false, false, false),
+ PropertyOrGetter(Getter, true), Setter(Setter),
+ IdLoc(IdLoc), ReceiverLoc(SuperLoc), Receiver(SuperTy.getTypePtr()) {
+ }
+
+ ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
+ QualType T, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation IdLoc,
+ SourceLocation ReceiverLoc, ObjCInterfaceDecl *Receiver)
+ : Expr(ObjCPropertyRefExprClass, T, VK, OK, false, false, false),
+ PropertyOrGetter(Getter, true), Setter(Setter),
+ IdLoc(IdLoc), ReceiverLoc(ReceiverLoc), Receiver(Receiver) {
}
explicit ObjCPropertyRefExpr(EmptyShell Empty)
: Expr(ObjCPropertyRefExprClass, Empty) {}
- ObjCPropertyDecl *getProperty() const { return AsProperty; }
- void setProperty(ObjCPropertyDecl *D) { AsProperty = D; }
+ bool isImplicitProperty() const { return PropertyOrGetter.getInt(); }
+ bool isExplicitProperty() const { return !PropertyOrGetter.getInt(); }
- const Expr *getBase() const { return cast<Expr>(Base); }
- Expr *getBase() { return cast<Expr>(Base); }
- void setBase(Expr *base) { Base = base; }
+ ObjCPropertyDecl *getExplicitProperty() const {
+ assert(!isImplicitProperty());
+ return cast<ObjCPropertyDecl>(PropertyOrGetter.getPointer());
+ }
+
+ ObjCMethodDecl *getImplicitPropertyGetter() const {
+ assert(isImplicitProperty());
+ return cast_or_null<ObjCMethodDecl>(PropertyOrGetter.getPointer());
+ }
+
+ ObjCMethodDecl *getImplicitPropertySetter() const {
+ assert(isImplicitProperty());
+ return Setter;
+ }
+
+ Selector getGetterSelector() const {
+ if (isImplicitProperty())
+ return getImplicitPropertyGetter()->getSelector();
+ return getExplicitProperty()->getGetterName();
+ }
+
+ Selector getSetterSelector() const {
+ if (isImplicitProperty())
+ return getImplicitPropertySetter()->getSelector();
+ return getExplicitProperty()->getSetterName();
+ }
+
+ const Expr *getBase() const {
+ return cast<Expr>(Receiver.get<Stmt*>());
+ }
+ Expr *getBase() {
+ return cast<Expr>(Receiver.get<Stmt*>());
+ }
SourceLocation getLocation() const { return IdLoc; }
- void setLocation(SourceLocation L) { IdLoc = L; }
+
+ SourceLocation getReceiverLocation() const { return ReceiverLoc; }
+ QualType getSuperReceiverType() const {
+ return QualType(Receiver.get<const Type*>(), 0);
+ }
+ ObjCInterfaceDecl *getClassReceiver() const {
+ return Receiver.get<ObjCInterfaceDecl*>();
+ }
+ bool isObjectReceiver() const { return Receiver.is<Stmt*>(); }
+ bool isSuperReceiver() const { return Receiver.is<const Type*>(); }
+ bool isClassReceiver() const { return Receiver.is<ObjCInterfaceDecl*>(); }
- virtual SourceRange getSourceRange() const {
- return SourceRange(getBase()->getLocStart(), IdLoc);
+ SourceRange getSourceRange() const {
+ return SourceRange((isObjectReceiver() ? getBase()->getLocStart()
+ : getReceiverLocation()),
+ IdLoc);
}
static bool classof(const Stmt *T) {
@@ -261,89 +356,32 @@ public:
static bool classof(const ObjCPropertyRefExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
-};
-
-/// ObjCImplicitSetterGetterRefExpr - A dot-syntax expression to access two
-/// methods; one to set a value to an 'ivar' (Setter) and the other to access
-/// an 'ivar' (Setter).
-/// An example for use of this AST is:
-/// @code
-/// @interface Test { }
-/// - (Test *)crash;
-/// - (void)setCrash: (Test*)value;
-/// @end
-/// void foo(Test *p1, Test *p2)
-/// {
-/// p2.crash = p1.crash; // Uses ObjCImplicitSetterGetterRefExpr AST
-/// }
-/// @endcode
-class ObjCImplicitSetterGetterRefExpr : public Expr {
- /// Setter - Setter method user declared for setting its 'ivar' to a value
- ObjCMethodDecl *Setter;
- /// Getter - Getter method user declared for accessing 'ivar' it controls.
- ObjCMethodDecl *Getter;
- /// Location of the member in the dot syntax notation. This is location
- /// of the getter method.
- SourceLocation MemberLoc;
- // FIXME: Swizzle these into a single pointer.
- Stmt *Base;
- ObjCInterfaceDecl *InterfaceDecl;
- /// Location of the receiver class in the dot syntax notation
- /// used to call a class method setter/getter.
- SourceLocation ClassLoc;
-
-public:
- ObjCImplicitSetterGetterRefExpr(ObjCMethodDecl *getter,
- QualType t,
- ObjCMethodDecl *setter,
- SourceLocation l, Expr *base)
- : Expr(ObjCImplicitSetterGetterRefExprClass, t, /*TypeDependent=*/false,
- base->isValueDependent()),
- Setter(setter), Getter(getter), MemberLoc(l), Base(base),
- InterfaceDecl(0), ClassLoc(SourceLocation()) {
- }
- ObjCImplicitSetterGetterRefExpr(ObjCMethodDecl *getter,
- QualType t,
- ObjCMethodDecl *setter,
- SourceLocation l, ObjCInterfaceDecl *C, SourceLocation CL)
- : Expr(ObjCImplicitSetterGetterRefExprClass, t, false, false),
- Setter(setter), Getter(getter), MemberLoc(l), Base(0), InterfaceDecl(C),
- ClassLoc(CL) {
+ child_range children() {
+ if (Receiver.is<Stmt*>()) {
+ Stmt **begin = reinterpret_cast<Stmt**>(&Receiver); // hack!
+ return child_range(begin, begin+1);
}
- explicit ObjCImplicitSetterGetterRefExpr(EmptyShell Empty)
- : Expr(ObjCImplicitSetterGetterRefExprClass, Empty){}
-
- ObjCMethodDecl *getGetterMethod() const { return Getter; }
- ObjCMethodDecl *getSetterMethod() const { return Setter; }
- ObjCInterfaceDecl *getInterfaceDecl() const { return InterfaceDecl; }
- void setGetterMethod(ObjCMethodDecl *D) { Getter = D; }
- void setSetterMethod(ObjCMethodDecl *D) { Setter = D; }
- void setInterfaceDecl(ObjCInterfaceDecl *D) { InterfaceDecl = D; }
-
- virtual SourceRange getSourceRange() const {
- if (Base)
- return SourceRange(getBase()->getLocStart(), MemberLoc);
- return SourceRange(ClassLoc, MemberLoc);
- }
- const Expr *getBase() const { return cast_or_null<Expr>(Base); }
- Expr *getBase() { return cast_or_null<Expr>(Base); }
- void setBase(Expr *base) { Base = base; }
-
- SourceLocation getLocation() const { return MemberLoc; }
- void setLocation(SourceLocation L) { MemberLoc = L; }
- SourceLocation getClassLoc() const { return ClassLoc; }
- void setClassLoc(SourceLocation L) { ClassLoc = L; }
+ return child_range();
+ }
- static bool classof(const Stmt *T) {
- return T->getStmtClass() == ObjCImplicitSetterGetterRefExprClass;
+private:
+ friend class ASTStmtReader;
+ void setExplicitProperty(ObjCPropertyDecl *D) {
+ PropertyOrGetter.setPointer(D);
+ PropertyOrGetter.setInt(false);
+ Setter = 0;
+ }
+ void setImplicitProperty(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter) {
+ PropertyOrGetter.setPointer(Getter);
+ PropertyOrGetter.setInt(true);
+ this->Setter = Setter;
}
- static bool classof(const ObjCImplicitSetterGetterRefExpr *) { return true; }
+ void setBase(Expr *Base) { Receiver = Base; }
+ void setSuperReceiver(QualType T) { Receiver = T.getTypePtr(); }
+ void setClassReceiver(ObjCInterfaceDecl *D) { Receiver = D; }
- // Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ void setLocation(SourceLocation L) { IdLoc = L; }
+ void setReceiverLocation(SourceLocation Loc) { ReceiverLoc = Loc; }
};
/// \brief An expression that sends a message to the given Objective-C
@@ -396,6 +434,9 @@ class ObjCMessageExpr : public Expr {
/// referring to the method that we type-checked against.
uintptr_t SelectorOrMethod;
+ /// \brief Location of the selector.
+ SourceLocation SelectorLoc;
+
/// \brief The source locations of the open and close square
/// brackets ('[' and ']', respectively).
SourceLocation LBracLoc, RBracLoc;
@@ -404,26 +445,29 @@ class ObjCMessageExpr : public Expr {
: Expr(ObjCMessageExprClass, Empty), NumArgs(NumArgs), Kind(0),
HasMethod(0), SelectorOrMethod(0) { }
- ObjCMessageExpr(QualType T,
+ ObjCMessageExpr(QualType T, ExprValueKind VK,
SourceLocation LBracLoc,
SourceLocation SuperLoc,
bool IsInstanceSuper,
QualType SuperType,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc);
- ObjCMessageExpr(QualType T,
+ ObjCMessageExpr(QualType T, ExprValueKind VK,
SourceLocation LBracLoc,
TypeSourceInfo *Receiver,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc);
- ObjCMessageExpr(QualType T,
+ ObjCMessageExpr(QualType T, ExprValueKind VK,
SourceLocation LBracLoc,
Expr *Receiver,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc);
@@ -458,6 +502,10 @@ public:
///
/// \param T The result type of this message.
///
+ /// \param VK The value kind of this message. A message returning
+ /// a l-value or r-value reference will be an l-value or x-value,
+ /// respectively.
+ ///
/// \param LBrac The location of the open square bracket '['.
///
/// \param SuperLoc The location of the "super" keyword.
@@ -475,12 +523,14 @@ public:
/// \param NumArgs The number of arguments.
///
/// \param RBracLoc The location of the closing square bracket ']'.
- static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
+ static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
SourceLocation LBracLoc,
SourceLocation SuperLoc,
bool IsInstanceSuper,
QualType SuperType,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc);
@@ -491,6 +541,10 @@ public:
///
/// \param T The result type of this message.
///
+ /// \param VK The value kind of this message. A message returning
+ /// a l-value or r-value reference will be an l-value or x-value,
+ /// respectively.
+ ///
/// \param LBrac The location of the open square bracket '['.
///
/// \param Receiver The type of the receiver, including
@@ -507,9 +561,11 @@ public:
///
/// \param RBracLoc The location of the closing square bracket ']'.
static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
SourceLocation LBracLoc,
TypeSourceInfo *Receiver,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc);
@@ -520,6 +576,10 @@ public:
///
/// \param T The result type of this message.
///
+ /// \param VK The value kind of this message. A message returning
+ /// a l-value or r-value reference will be an l-value or x-value,
+ /// respectively.
+ ///
/// \param LBrac The location of the open square bracket '['.
///
/// \param Receiver The expression used to produce the object that
@@ -536,9 +596,11 @@ public:
///
/// \param RBracLoc The location of the closing square bracket ']'.
static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
SourceLocation LBracLoc,
Expr *Receiver,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc);
@@ -556,6 +618,9 @@ public:
/// sent to.
ReceiverKind getReceiverKind() const { return (ReceiverKind)Kind; }
+ /// \brief Source range of the receiver.
+ SourceRange getReceiverRange() const;
+
/// \brief Determine whether this is an instance message to either a
/// computed object or to super.
bool isInstanceMessage() const {
@@ -682,11 +747,11 @@ public:
/// \brief Retrieve the arguments to this message, not including the
/// receiver.
- Stmt **getArgs() {
- return reinterpret_cast<Stmt **>(this + 1) + 1;
+ Expr **getArgs() {
+ return reinterpret_cast<Expr **>(this + 1) + 1;
}
- const Stmt * const *getArgs() const {
- return reinterpret_cast<const Stmt * const *>(this + 1) + 1;
+ const Expr * const *getArgs() const {
+ return reinterpret_cast<const Expr * const *>(this + 1) + 1;
}
/// getArg - Return the specified argument.
@@ -706,15 +771,13 @@ public:
SourceLocation getLeftLoc() const { return LBracLoc; }
SourceLocation getRightLoc() const { return RBracLoc; }
-
- void setLeftLoc(SourceLocation L) { LBracLoc = L; }
- void setRightLoc(SourceLocation L) { RBracLoc = L; }
+ SourceLocation getSelectorLoc() const { return SelectorLoc; }
void setSourceRange(SourceRange R) {
LBracLoc = R.getBegin();
RBracLoc = R.getEnd();
}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(LBracLoc, RBracLoc);
}
@@ -724,43 +787,24 @@ public:
static bool classof(const ObjCMessageExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children();
typedef ExprIterator arg_iterator;
typedef ConstExprIterator const_arg_iterator;
- arg_iterator arg_begin() { return getArgs(); }
- arg_iterator arg_end() { return getArgs() + NumArgs; }
- const_arg_iterator arg_begin() const { return getArgs(); }
- const_arg_iterator arg_end() const { return getArgs() + NumArgs; }
-};
-
-/// ObjCSuperExpr - Represents the "super" expression in Objective-C,
-/// which refers to the object on which the current method is executing.
-///
-/// FIXME: This class is intended for removal, once its remaining
-/// clients have been altered to represent "super" internally.
-class ObjCSuperExpr : public Expr {
- SourceLocation Loc;
-public:
- ObjCSuperExpr(SourceLocation L, QualType Type)
- : Expr(ObjCSuperExprClass, Type, false, false), Loc(L) { }
- explicit ObjCSuperExpr(EmptyShell Empty) : Expr(ObjCSuperExprClass, Empty) {}
-
- SourceLocation getLoc() const { return Loc; }
- void setLoc(SourceLocation L) { Loc = L; }
-
- virtual SourceRange getSourceRange() const { return SourceRange(Loc); }
-
- static bool classof(const Stmt *T) {
- return T->getStmtClass() == ObjCSuperExprClass;
+ arg_iterator arg_begin() { return reinterpret_cast<Stmt **>(getArgs()); }
+ arg_iterator arg_end() {
+ return reinterpret_cast<Stmt **>(getArgs() + NumArgs);
+ }
+ const_arg_iterator arg_begin() const {
+ return reinterpret_cast<Stmt const * const*>(getArgs());
+ }
+ const_arg_iterator arg_end() const {
+ return reinterpret_cast<Stmt const * const*>(getArgs() + NumArgs);
}
- static bool classof(const ObjCSuperExpr *) { return true; }
- // Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
};
/// ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
@@ -776,8 +820,9 @@ class ObjCIsaExpr : public Expr {
bool IsArrow;
public:
ObjCIsaExpr(Expr *base, bool isarrow, SourceLocation l, QualType ty)
- : Expr(ObjCIsaExprClass, ty, /*TypeDependent=*/false,
- base->isValueDependent()),
+ : Expr(ObjCIsaExprClass, ty, VK_LValue, OK_Ordinary,
+ /*TypeDependent=*/false, base->isValueDependent(),
+ /*ContainsUnexpandedParameterPack=*/false),
Base(base), IsaMemberLoc(l), IsArrow(isarrow) {}
/// \brief Build an empty expression.
@@ -794,11 +839,11 @@ public:
SourceLocation getIsaMemberLoc() const { return IsaMemberLoc; }
void setIsaMemberLoc(SourceLocation L) { IsaMemberLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(getBase()->getLocStart(), IsaMemberLoc);
}
- virtual SourceLocation getExprLoc() const { return IsaMemberLoc; }
+ SourceLocation getExprLoc() const { return IsaMemberLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCIsaExprClass;
@@ -806,8 +851,7 @@ public:
static bool classof(const ObjCIsaExpr *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Base, &Base+1); }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
index a8ef005..7b23766 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_AST_EXTERNAL_AST_SOURCE_H
#define LLVM_CLANG_AST_EXTERNAL_AST_SOURCE_H
+#include "clang/AST/DeclBase.h"
#include <cassert>
#include <vector>
@@ -24,6 +25,7 @@ template <class T> class SmallVectorImpl;
namespace clang {
class ASTConsumer;
+class CXXBaseSpecifier;
class Decl;
class DeclContext;
class DeclContextLookupResult;
@@ -32,6 +34,7 @@ class ExternalSemaSource; // layering violation required for downcasting
class NamedDecl;
class Selector;
class Stmt;
+class TagDecl;
/// \brief Abstract interface for external sources of AST nodes.
///
@@ -91,6 +94,10 @@ public:
/// FunctionDecl::setLazyBody when building decls.
virtual Stmt *GetExternalDeclStmt(uint64_t Offset) = 0;
+ /// \brief Resolve the offset of a set of C++ base specifiers in the decl
+ /// stream into an array of specifiers.
+ virtual CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset) = 0;
+
/// \brief Finds all declarations with the given name in the
/// given context.
///
@@ -110,12 +117,44 @@ public:
virtual void MaterializeVisibleDecls(const DeclContext *DC) = 0;
/// \brief Finds all declarations lexically contained within the given
- /// DeclContext.
+ /// DeclContext, after applying an optional filter predicate.
+ ///
+ /// \param isKindWeWant a predicate function that returns true if the passed
+ /// declaration kind is one we are looking for. If NULL, all declarations
+ /// are returned.
///
/// \return true if an error occurred
virtual bool FindExternalLexicalDecls(const DeclContext *DC,
- llvm::SmallVectorImpl<Decl*> &Result) = 0;
+ bool (*isKindWeWant)(Decl::Kind),
+ llvm::SmallVectorImpl<Decl*> &Result) = 0;
+
+ /// \brief Finds all declarations lexically contained within the given
+ /// DeclContext.
+ ///
+ /// \return true if an error occurred
+ bool FindExternalLexicalDecls(const DeclContext *DC,
+ llvm::SmallVectorImpl<Decl*> &Result) {
+ return FindExternalLexicalDecls(DC, 0, Result);
+ }
+
+ template <typename DeclTy>
+ bool FindExternalLexicalDeclsBy(const DeclContext *DC,
+ llvm::SmallVectorImpl<Decl*> &Result) {
+ return FindExternalLexicalDecls(DC, DeclTy::classofKind, Result);
+ }
+
+ /// \brief Gives the external AST source an opportunity to complete
+ /// an incomplete type.
+ virtual void CompleteType(TagDecl *Tag) {}
+ /// \brief Gives the external AST source an opportunity to complete an
+ /// incomplete Objective-C class.
+ ///
+ /// This routine will only be invoked if the "externally completed" bit is
+ /// set on the ObjCInterfaceDecl via the function
+ /// \c ObjCInterfaceDecl::setExternallyCompleted().
+ virtual void CompleteType(ObjCInterfaceDecl *Class) { }
+
/// \brief Notify ExternalASTSource that we started deserialization of
/// a decl or type so until FinishedDeserializing is called there may be
/// decls that are initializing. Must be paired with FinishedDeserializing.
@@ -227,6 +266,11 @@ typedef LazyOffsetPtr<Stmt, uint64_t, &ExternalASTSource::GetExternalDeclStmt>
typedef LazyOffsetPtr<Decl, uint32_t, &ExternalASTSource::GetExternalDecl>
LazyDeclPtr;
+/// \brief A lazy pointer to a set of CXXBaseSpecifiers.
+typedef LazyOffsetPtr<CXXBaseSpecifier, uint64_t,
+ &ExternalASTSource::GetExternalCXXBaseSpecifiers>
+ LazyCXXBaseSpecifiersPtr;
+
} // end namespace clang
#endif // LLVM_CLANG_AST_EXTERNAL_AST_SOURCE_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/FullExpr.h b/contrib/llvm/tools/clang/include/clang/AST/FullExpr.h
deleted file mode 100644
index 6ceefed..0000000
--- a/contrib/llvm/tools/clang/include/clang/AST/FullExpr.h
+++ /dev/null
@@ -1,88 +0,0 @@
-//===--- FullExpr.h - C++ full expression class -----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the FullExpr interface, to be used for type safe handling
-// of full expressions.
-//
-// Full expressions are described in C++ [intro.execution]p12.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_FULLEXPR_H
-#define LLVM_CLANG_AST_FULLEXPR_H
-
-#include "llvm/ADT/PointerUnion.h"
-
-namespace clang {
- class ASTContext;
- class CXXTemporary;
- class Expr;
-
-class FullExpr {
- struct ExprAndTemporaries {
- Expr *SubExpr;
-
- unsigned NumTemps;
-
- typedef CXXTemporary** temps_iterator;
-
- temps_iterator temps_begin() {
- return reinterpret_cast<CXXTemporary **>(this + 1);
- }
- temps_iterator temps_end() {
- return temps_begin() + NumTemps;
- }
- };
-
- typedef llvm::PointerUnion<Expr *, ExprAndTemporaries *> SubExprTy;
- SubExprTy SubExpr;
-
- FullExpr() { }
-
-public:
- static FullExpr Create(ASTContext &Context, Expr *SubExpr,
- CXXTemporary **Temps, unsigned NumTemps);
-
- Expr *getExpr() {
- if (Expr *E = SubExpr.dyn_cast<Expr *>())
- return E;
-
- return SubExpr.get<ExprAndTemporaries *>()->SubExpr;
- }
-
- const Expr *getExpr() const {
- return const_cast<FullExpr*>(this)->getExpr();
- }
-
- typedef CXXTemporary** temps_iterator;
-
- temps_iterator temps_begin() {
- if (ExprAndTemporaries *ET = SubExpr.dyn_cast<ExprAndTemporaries *>())
- return ET->temps_begin();
-
- return 0;
- }
- temps_iterator temps_end() {
- if (ExprAndTemporaries *ET = SubExpr.dyn_cast<ExprAndTemporaries *>())
- return ET->temps_end();
-
- return 0;
- }
-
- void *getAsOpaquePtr() const { return SubExpr.getOpaqueValue(); }
-
- static FullExpr getFromOpaquePtr(void *Ptr) {
- FullExpr E;
- E.SubExpr = SubExprTy::getFromOpaqueValue(Ptr);
- return E;
- }
-};
-
-} // end namespace clang
-
-#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h b/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
index 139f6c0..7af7702 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
@@ -7,20 +7,15 @@
//
//===----------------------------------------------------------------------===//
//
-// Implements C++ name mangling according to the Itanium C++ ABI,
-// which is used in GCC 3.2 and newer (and many compilers that are
-// ABI-compatible with GCC):
-//
-// http://www.codesourcery.com/public/cxx-abi/abi.html
+// Defines the C++ name mangling interface.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CODEGEN_MANGLE_H
-#define LLVM_CLANG_CODEGEN_MANGLE_H
+#ifndef LLVM_CLANG_AST_MANGLE_H
+#define LLVM_CLANG_AST_MANGLE_H
-#include "CGCXX.h"
-#include "GlobalDecl.h"
#include "clang/AST/Type.h"
+#include "clang/Basic/ABI.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/SmallString.h"
@@ -36,8 +31,6 @@ namespace clang {
class NamedDecl;
class ObjCMethodDecl;
class VarDecl;
-
-namespace CodeGen {
struct ThisAdjustment;
struct ThunkInfo;
@@ -74,9 +67,6 @@ class MangleContext {
ASTContext &Context;
Diagnostic &Diags;
- llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
- unsigned Discriminator;
- llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
llvm::DenseMap<const BlockDecl*, unsigned> GlobalBlockIds;
llvm::DenseMap<const BlockDecl*, unsigned> LocalBlockIds;
@@ -91,15 +81,8 @@ public:
Diagnostic &getDiags() const { return Diags; }
- void startNewFunction() { LocalBlockIds.clear(); }
+ virtual void startNewFunction() { LocalBlockIds.clear(); }
- uint64_t getAnonymousStructId(const TagDecl *TD) {
- std::pair<llvm::DenseMap<const TagDecl *,
- uint64_t>::iterator, bool> Result =
- AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
- return Result.first->second;
- }
-
unsigned getBlockId(const BlockDecl *BD, bool Local) {
llvm::DenseMap<const BlockDecl *, unsigned> &BlockIds
= Local? LocalBlockIds : GlobalBlockIds;
@@ -111,67 +94,57 @@ public:
/// @name Mangler Entry Points
/// @{
- virtual bool shouldMangleDeclName(const NamedDecl *D);
- virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
+ virtual bool shouldMangleDeclName(const NamedDecl *D) = 0;
+ virtual void mangleName(const NamedDecl *D, llvm::raw_ostream &)=0;
virtual void mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
- llvm::SmallVectorImpl<char> &);
+ llvm::raw_ostream &) = 0;
virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
const ThisAdjustment &ThisAdjustment,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleGuardVariable(const VarDecl *D,
- llvm::SmallVectorImpl<char> &);
+ llvm::raw_ostream &) = 0;
virtual void mangleReferenceTemporary(const VarDecl *D,
- llvm::SmallVectorImpl<char> &);
+ llvm::raw_ostream &) = 0;
virtual void mangleCXXVTable(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &);
+ llvm::raw_ostream &) = 0;
virtual void mangleCXXVTT(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &);
+ llvm::raw_ostream &) = 0;
virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
const CXXRecordDecl *Type,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
+ llvm::raw_ostream &) = 0;
+ virtual void mangleCXXRTTI(QualType T, llvm::raw_ostream &) = 0;
+ virtual void mangleCXXRTTIName(QualType T, llvm::raw_ostream &) = 0;
virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- llvm::SmallVectorImpl<char> &);
+ llvm::raw_ostream &) = 0;
virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- llvm::SmallVectorImpl<char> &);
- void mangleBlock(GlobalDecl GD,
- const BlockDecl *BD, llvm::SmallVectorImpl<char> &);
-
- void mangleInitDiscriminator() {
- Discriminator = 0;
- }
-
- bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
- unsigned &discriminator = Uniquifier[ND];
- if (!discriminator)
- discriminator = ++Discriminator;
- if (discriminator == 1)
- return false;
- disc = discriminator-2;
- return true;
+ llvm::raw_ostream &) = 0;
+
+ void mangleGlobalBlock(const BlockDecl *BD,
+ llvm::raw_ostream &Out);
+ void mangleCtorBlock(const CXXConstructorDecl *CD, CXXCtorType CT,
+ const BlockDecl *BD, llvm::raw_ostream &Out);
+ void mangleDtorBlock(const CXXDestructorDecl *CD, CXXDtorType DT,
+ const BlockDecl *BD, llvm::raw_ostream &Out);
+ void mangleBlock(const DeclContext *DC, const BlockDecl *BD,
+ llvm::raw_ostream &Out);
+ // Do the right thing.
+ void mangleBlock(const BlockDecl *BD, llvm::raw_ostream &Out);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD,
+ llvm::raw_ostream &);
+
+ // This is pretty lame.
+ virtual void mangleItaniumGuardVariable(const VarDecl *D,
+ llvm::raw_ostream &) {
+ assert(0 && "Target does not support mangling guard variables");
}
/// @}
};
-/// MiscNameMangler - Mangles Objective-C method names and blocks.
-class MiscNameMangler {
- MangleContext &Context;
- llvm::raw_svector_ostream Out;
-
- ASTContext &getASTContext() const { return Context.getASTContext(); }
-
-public:
- MiscNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res);
+MangleContext *createItaniumMangleContext(ASTContext &Context,
+ Diagnostic &Diags);
+MangleContext *createMicrosoftMangleContext(ASTContext &Context,
+ Diagnostic &Diags);
- llvm::raw_svector_ostream &getStream() { return Out; }
-
- void mangleBlock(GlobalDecl GD, const BlockDecl *BD);
- void mangleObjCMethodName(const ObjCMethodDecl *MD);
-};
-
-}
}
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
index 3b25f3b..99cc1f2 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
@@ -90,7 +90,7 @@ private:
/// \brief Either find or insert the given nested name specifier
/// mockup in the given context.
- static NestedNameSpecifier *FindOrInsert(ASTContext &Context,
+ static NestedNameSpecifier *FindOrInsert(const ASTContext &Context,
const NestedNameSpecifier &Mockup);
public:
@@ -99,19 +99,19 @@ public:
/// The prefix must be dependent, since nested name specifiers
/// referencing an identifier are only permitted when the identifier
/// cannot be resolved.
- static NestedNameSpecifier *Create(ASTContext &Context,
+ static NestedNameSpecifier *Create(const ASTContext &Context,
NestedNameSpecifier *Prefix,
IdentifierInfo *II);
/// \brief Builds a nested name specifier that names a namespace.
- static NestedNameSpecifier *Create(ASTContext &Context,
+ static NestedNameSpecifier *Create(const ASTContext &Context,
NestedNameSpecifier *Prefix,
NamespaceDecl *NS);
/// \brief Builds a nested name specifier that names a type.
- static NestedNameSpecifier *Create(ASTContext &Context,
+ static NestedNameSpecifier *Create(const ASTContext &Context,
NestedNameSpecifier *Prefix,
- bool Template, Type *T);
+ bool Template, const Type *T);
/// \brief Builds a specifier that consists of just an identifier.
///
@@ -119,11 +119,12 @@ public:
/// prefix because the prefix is implied by something outside of the
/// nested name specifier, e.g., in "x->Base::f", the "x" has a dependent
/// type.
- static NestedNameSpecifier *Create(ASTContext &Context, IdentifierInfo *II);
+ static NestedNameSpecifier *Create(const ASTContext &Context,
+ IdentifierInfo *II);
/// \brief Returns the nested name specifier representing the global
/// scope.
- static NestedNameSpecifier *GlobalSpecifier(ASTContext &Context);
+ static NestedNameSpecifier *GlobalSpecifier(const ASTContext &Context);
/// \brief Return the prefix of this nested name specifier.
///
@@ -160,10 +161,10 @@ public:
}
/// \brief Retrieve the type stored in this nested name specifier.
- Type *getAsType() const {
+ const Type *getAsType() const {
if (Prefix.getInt() == TypeSpec ||
Prefix.getInt() == TypeSpecWithTemplate)
- return (Type *)Specifier;
+ return (const Type *)Specifier;
return 0;
}
@@ -172,6 +173,10 @@ public:
/// type or not.
bool isDependent() const;
+ /// \brief Whether this nested-name-specifier contains an unexpanded
+ /// parameter pack (for C++0x variadic templates).
+ bool containsUnexpandedParameterPack() const;
+
/// \brief Print this nested name specifier to the given output
/// stream.
void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
index 8045311..35c72c4 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
@@ -17,107 +17,238 @@
namespace clang {
-/// CastKind - the kind of cast this represents.
+/// CastKind - The kind of operation required for a conversion.
enum CastKind {
- /// CK_Unknown - Unknown cast kind.
- /// FIXME: The goal is to get rid of this and make all casts have a
- /// kind so that the AST client doesn't have to try to figure out what's
- /// going on.
- CK_Unknown,
+ /// CK_Dependent - A conversion which cannot yet be analyzed because
+ /// either the expression or target type is dependent. These are
+ /// created only for explicit casts; dependent ASTs aren't required
+ /// to even approximately type-check.
+ /// (T*) malloc(sizeof(T))
+ /// reinterpret_cast<intptr_t>(A<T>::alloc());
+ CK_Dependent,
- /// CK_BitCast - Used for reinterpret_cast.
+ /// CK_BitCast - A conversion which causes a bit pattern of one type
+ /// to be reinterpreted as a bit pattern of another type. Generally
+ /// the operands must have equivalent size and unrelated types.
+ ///
+ /// The pointer conversion char* -> int* is a bitcast. Many other
+ /// pointer conversions which are "physically" bitcasts are given
+ /// special cast kinds.
+ ///
+ /// Vector coercions are bitcasts.
CK_BitCast,
- /// CK_LValueBitCast - Used for reinterpret_cast of expressions to
- /// a reference type.
+ /// CK_LValueBitCast - A conversion which reinterprets the address of
+ /// an l-value as an l-value of a different kind. Used for
+ /// reinterpret_casts of l-value expressions to reference types.
+ /// bool b; reinterpret_cast<char&>(b) = 'a';
CK_LValueBitCast,
+
+ /// CK_LValueToRValue - A conversion which causes the extraction of
+ /// an r-value from the operand gl-value. The result of an r-value
+ /// conversion is always unqualified.
+ CK_LValueToRValue,
+
+ /// CK_GetObjCProperty - A conversion which calls an Objective-C
+ /// property getter. The operand is an OK_ObjCProperty l-value; the
+ /// result will generally be an r-value, but could be an ordinary
+ /// gl-value if the property reference is to an implicit property
+ /// for a method that returns a reference type.
+ CK_GetObjCProperty,
- /// CK_NoOp - Used for const_cast.
+ /// CK_NoOp - A conversion which does not affect the type other than
+ /// (possibly) adding qualifiers.
+ /// int -> int
+ /// char** -> const char * const *
CK_NoOp,
- /// CK_BaseToDerived - Base to derived class casts.
+ /// CK_BaseToDerived - A conversion from a C++ class pointer/reference
+ /// to a derived class pointer/reference.
+ /// B *b = static_cast<B*>(a);
CK_BaseToDerived,
- /// CK_DerivedToBase - Derived to base class casts.
+ /// CK_DerivedToBase - A conversion from a C++ class pointer
+ /// to a base class pointer.
+ /// A *a = new B();
CK_DerivedToBase,
- /// CK_UncheckedDerivedToBase - Derived to base class casts that
- /// assume that the derived pointer is not null.
+ /// CK_UncheckedDerivedToBase - A conversion from a C++ class
+ /// pointer/reference to a base class that can assume that the
+ /// derived pointer is not null.
+ /// const A &a = B();
+ /// b->method_from_a();
CK_UncheckedDerivedToBase,
- /// CK_Dynamic - Dynamic cast.
+ /// CK_Dynamic - A C++ dynamic_cast.
CK_Dynamic,
- /// CK_ToUnion - Cast to union (GCC extension).
+ /// CK_ToUnion - The GCC cast-to-union extension.
+ /// int -> union { int x; float y; }
+ /// float -> union { int x; float y; }
CK_ToUnion,
/// CK_ArrayToPointerDecay - Array to pointer decay.
+ /// int[10] -> int*
+ /// char[5][6] -> char(*)[6]
CK_ArrayToPointerDecay,
- // CK_FunctionToPointerDecay - Function to pointer decay.
+ /// CK_FunctionToPointerDecay - Function to pointer decay.
+ /// void(int) -> void(*)(int)
CK_FunctionToPointerDecay,
- /// CK_NullToMemberPointer - Null pointer to member pointer.
+ /// CK_NullToPointer - Null pointer constant to pointer, ObjC
+ /// pointer, or block pointer.
+ /// (void*) 0
+ /// void (^block)() = 0;
+ CK_NullToPointer,
+
+ /// CK_NullToMemberPointer - Null pointer constant to member pointer.
+ /// int A::*mptr = 0;
+ /// int (A::*fptr)(int) = nullptr;
CK_NullToMemberPointer,
/// CK_BaseToDerivedMemberPointer - Member pointer in base class to
/// member pointer in derived class.
+ /// int B::*mptr = &A::member;
CK_BaseToDerivedMemberPointer,
/// CK_DerivedToBaseMemberPointer - Member pointer in derived class to
/// member pointer in base class.
+ /// int A::*mptr = static_cast<int A::*>(&B::member);
CK_DerivedToBaseMemberPointer,
+ /// CK_MemberPointerToBoolean - Member pointer to boolean. A check
+ /// against the null member pointer.
+ CK_MemberPointerToBoolean,
+
/// CK_UserDefinedConversion - Conversion using a user defined type
/// conversion function.
+ /// struct A { operator int(); }; int i = int(A());
CK_UserDefinedConversion,
- /// CK_ConstructorConversion - Conversion by constructor
+ /// CK_ConstructorConversion - Conversion by constructor.
+ /// struct A { A(int); }; A a = A(10);
CK_ConstructorConversion,
- /// CK_IntegralToPointer - Integral to pointer
+ /// CK_IntegralToPointer - Integral to pointer. A special kind of
+ /// reinterpreting conversion. Applies to normal, ObjC, and block
+ /// pointers.
+ /// (char*) 0x1001aab0
+ /// reinterpret_cast<int*>(0)
CK_IntegralToPointer,
- /// CK_PointerToIntegral - Pointer to integral
+ /// CK_PointerToIntegral - Pointer to integral. A special kind of
+ /// reinterpreting conversion. Applies to normal, ObjC, and block
+ /// pointers.
+ /// (intptr_t) "help!"
CK_PointerToIntegral,
+
+ /// CK_PointerToBoolean - Pointer to boolean conversion. A check
+ /// against null. Applies to normal, ObjC, and block pointers.
+ CK_PointerToBoolean,
- /// CK_ToVoid - Cast to void.
+ /// CK_ToVoid - Cast to void, discarding the computed value.
+ /// (void) malloc(2048)
CK_ToVoid,
- /// CK_VectorSplat - Casting from an integer/floating type to an extended
- /// vector type with the same element type as the src type. Splats the
- /// src expression into the destination expression.
+ /// CK_VectorSplat - A conversion from an arithmetic type to a
+ /// vector of that element type. Fills all elements ("splats") with
+ /// the source value.
+ /// __attribute__((ext_vector_type(4))) int v = 5;
CK_VectorSplat,
- /// CK_IntegralCast - Casting between integral types of different size.
+ /// CK_IntegralCast - A cast between integral types (other than to
+ /// boolean). Variously a bitcast, a truncation, a sign-extension,
+ /// or a zero-extension.
+ /// long l = 5;
+ /// (unsigned) i
CK_IntegralCast,
+ /// CK_IntegralToBoolean - Integral to boolean. A check against zero.
+ /// (bool) i
+ CK_IntegralToBoolean,
+
/// CK_IntegralToFloating - Integral to floating point.
+ /// float f = i;
CK_IntegralToFloating,
- /// CK_FloatingToIntegral - Floating point to integral.
+ /// CK_FloatingToIntegral - Floating point to integral. Rounds
+ /// towards zero, discarding any fractional component.
+ /// (int) f
CK_FloatingToIntegral,
+
+ /// CK_FloatingToBoolean - Floating point to boolean.
+ /// (bool) f
+ CK_FloatingToBoolean,
/// CK_FloatingCast - Casting between floating types of different size.
+ /// (double) f
+ /// (float) ld
CK_FloatingCast,
- /// CK_MemberPointerToBoolean - Member pointer to boolean
- CK_MemberPointerToBoolean,
-
- /// CK_AnyPointerToObjCPointerCast - Casting any pointer to objective-c
- /// pointer
+ /// CK_AnyPointerToObjCPointerCast - Casting any other pointer kind
+ /// to an Objective-C pointer.
CK_AnyPointerToObjCPointerCast,
- /// CK_AnyPointerToBlockPointerCast - Casting any pointer to block
- /// pointer
+ /// CK_AnyPointerToBlockPointerCast - Casting any other pointer kind
+ /// to a block pointer.
CK_AnyPointerToBlockPointerCast,
/// \brief Converting between two Objective-C object types, which
/// can occur when performing reference binding to an Objective-C
/// object.
- CK_ObjCObjectLValueCast
+ CK_ObjCObjectLValueCast,
+
+ /// \brief A conversion of a floating point real to a floating point
+ /// complex of the original type. Injects the value as the real
+ /// component with a zero imaginary component.
+ /// float -> _Complex float
+ CK_FloatingRealToComplex,
+
+ /// \brief Converts a floating point complex to floating point real
+ /// of the source's element type. Just discards the imaginary
+ /// component.
+ /// _Complex long double -> long double
+ CK_FloatingComplexToReal,
+
+ /// \brief Converts a floating point complex to bool by comparing
+ /// against 0+0i.
+ CK_FloatingComplexToBoolean,
+
+ /// \brief Converts between different floating point complex types.
+ /// _Complex float -> _Complex double
+ CK_FloatingComplexCast,
+
+ /// \brief Converts from a floating complex to an integral complex.
+ /// _Complex float -> _Complex int
+ CK_FloatingComplexToIntegralComplex,
+
+ /// \brief Converts from an integral real to an integral complex
+ /// whose element type matches the source. Injects the value as
+ /// the real component with a zero imaginary component.
+ /// long -> _Complex long
+ CK_IntegralRealToComplex,
+
+ /// \brief Converts an integral complex to an integral real of the
+ /// source's element type by discarding the imaginary component.
+ /// _Complex short -> short
+ CK_IntegralComplexToReal,
+
+ /// \brief Converts an integral complex to bool by comparing against
+ /// 0+0i.
+ CK_IntegralComplexToBoolean,
+
+ /// \brief Converts between different integral complex types.
+ /// _Complex char -> _Complex long long
+ /// _Complex unsigned int -> _Complex signed int
+ CK_IntegralComplexCast,
+
+ /// \brief Converts from an integral complex to a floating complex.
+ /// _Complex unsigned -> _Complex float
+ CK_IntegralComplexToFloatingComplex
};
+#define CK_Invalid ((CastKind) -1)
enum BinaryOperatorKind {
// Operators listed in order of precedence.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ParentMap.h b/contrib/llvm/tools/clang/include/clang/AST/ParentMap.h
index f826e11..9ea5a09 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ParentMap.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ParentMap.h
@@ -24,8 +24,14 @@ public:
ParentMap(Stmt* ASTRoot);
~ParentMap();
+ /// \brief Adds and/or updates the parent/child-relations of the complete
+ /// stmt tree of S. All children of S including indirect descendants are
+ /// visited and updated or inserted but not the parents of S.
+ void addStmt(Stmt* S);
+
Stmt *getParent(Stmt*) const;
Stmt *getParentIgnoreParens(Stmt *) const;
+ Stmt *getParentIgnoreParenCasts(Stmt *) const;
const Stmt *getParent(const Stmt* S) const {
return getParent(const_cast<Stmt*>(S));
@@ -35,6 +41,10 @@ public:
return getParentIgnoreParens(const_cast<Stmt*>(S));
}
+ const Stmt *getParentIgnoreParenCasts(const Stmt *S) const {
+ return getParentIgnoreParenCasts(const_cast<Stmt*>(S));
+ }
+
bool hasParent(Stmt* S) const {
return getParent(S) != 0;
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
index 70d65d3..a59c302 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
@@ -14,6 +14,8 @@
#ifndef LLVM_CLANG_AST_PRETTY_PRINTER_H
#define LLVM_CLANG_AST_PRETTY_PRINTER_H
+#include "clang/Basic/LangOptions.h"
+
namespace llvm {
class raw_ostream;
}
@@ -44,7 +46,7 @@ struct PrintingPolicy {
unsigned Indentation : 8;
/// \brief What language we're printing.
- const LangOptions &LangOpts;
+ const LangOptions LangOpts;
/// \brief Whether we should suppress printing of the actual specifiers for
/// the given type or declaration.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
index 2b3229e..d7bab80 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
@@ -14,8 +14,9 @@
#ifndef LLVM_CLANG_AST_LAYOUTINFO_H
#define LLVM_CLANG_AST_LAYOUTINFO_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
namespace clang {
@@ -32,93 +33,41 @@ namespace clang {
/// ObjCInterfaceDecl. FIXME - Find appropriate name.
/// These objects are managed by ASTContext.
class ASTRecordLayout {
- /// Size - Size of record in bits.
- uint64_t Size;
+ /// Size - Size of record in characters.
+ CharUnits Size;
- /// DataSize - Size of record in bits without tail padding.
- uint64_t DataSize;
+ /// DataSize - Size of record in characters without tail padding.
+ CharUnits DataSize;
/// FieldOffsets - Array of field offsets in bits.
uint64_t *FieldOffsets;
- // Alignment - Alignment of record in bits.
- unsigned Alignment;
+ // Alignment - Alignment of record in characters.
+ CharUnits Alignment;
// FieldCount - Number of fields.
unsigned FieldCount;
-public:
- /// PrimaryBaseInfo - Contains info about a primary base.
- struct PrimaryBaseInfo {
- PrimaryBaseInfo() {}
-
- PrimaryBaseInfo(const CXXRecordDecl *Base, bool IsVirtual)
- : Value(Base, Base && IsVirtual) {}
-
- /// Value - Points to the primary base. The single-bit value
- /// will be non-zero when the primary base is virtual.
- llvm::PointerIntPair<const CXXRecordDecl *, 1, bool> Value;
-
- /// getBase - Returns the primary base.
- const CXXRecordDecl *getBase() const { return Value.getPointer(); }
-
- /// isVirtual - Returns whether the primary base is virtual or not.
- bool isVirtual() const { return Value.getInt(); }
-
- friend bool operator==(const PrimaryBaseInfo &X, const PrimaryBaseInfo &Y) {
- return X.Value == Y.Value;
- }
- };
-
- /// primary_base_info_iterator - An iterator for iterating the primary base
- /// class chain.
- class primary_base_info_iterator {
- /// Current - The current base class info.
- PrimaryBaseInfo Current;
-
- public:
- primary_base_info_iterator() {}
- primary_base_info_iterator(PrimaryBaseInfo Info) : Current(Info) {}
-
- const PrimaryBaseInfo &operator*() const { return Current; }
-
- primary_base_info_iterator& operator++() {
- const CXXRecordDecl *RD = Current.getBase();
- Current = RD->getASTContext().getASTRecordLayout(RD).getPrimaryBaseInfo();
- return *this;
- }
-
- friend bool operator==(const primary_base_info_iterator &X,
- const primary_base_info_iterator &Y) {
- return X.Current == Y.Current;
- }
- friend bool operator!=(const primary_base_info_iterator &X,
- const primary_base_info_iterator &Y) {
- return !(X == Y);
- }
- };
-
-private:
/// CXXRecordLayoutInfo - Contains C++ specific layout information.
struct CXXRecordLayoutInfo {
- /// NonVirtualSize - The non-virtual size (in bits) of an object, which is
+ /// NonVirtualSize - The non-virtual size (in chars) of an object, which is
/// the size of the object without virtual bases.
- uint64_t NonVirtualSize;
+ CharUnits NonVirtualSize;
- /// NonVirtualAlign - The non-virtual alignment (in bits) of an object,
+ /// NonVirtualAlign - The non-virtual alignment (in chars) of an object,
/// which is the alignment of the object without virtual bases.
- uint64_t NonVirtualAlign;
+ CharUnits NonVirtualAlign;
/// SizeOfLargestEmptySubobject - The size of the largest empty subobject
/// (either a base or a member). Will be zero if the class doesn't contain
/// any empty subobjects.
- uint64_t SizeOfLargestEmptySubobject;
+ CharUnits SizeOfLargestEmptySubobject;
/// PrimaryBase - The primary base info for this record.
- PrimaryBaseInfo PrimaryBase;
+ llvm::PointerIntPair<const CXXRecordDecl *, 1, bool> PrimaryBase;
/// FIXME: This should really use a SmallPtrMap, once we have one in LLVM :)
- typedef llvm::DenseMap<const CXXRecordDecl *, uint64_t> BaseOffsetsMapTy;
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
/// BaseOffsets - Contains a map from base classes to their offset.
BaseOffsetsMapTy BaseOffsets;
@@ -133,35 +82,35 @@ private:
friend class ASTContext;
- ASTRecordLayout(ASTContext &Ctx, uint64_t size, unsigned alignment,
- unsigned datasize, const uint64_t *fieldoffsets,
+ ASTRecordLayout(const ASTContext &Ctx, CharUnits size, CharUnits alignment,
+ CharUnits datasize, const uint64_t *fieldoffsets,
unsigned fieldcount);
// Constructor for C++ records.
typedef CXXRecordLayoutInfo::BaseOffsetsMapTy BaseOffsetsMapTy;
- ASTRecordLayout(ASTContext &Ctx,
- uint64_t size, unsigned alignment, uint64_t datasize,
+ ASTRecordLayout(const ASTContext &Ctx,
+ CharUnits size, CharUnits alignment, CharUnits datasize,
const uint64_t *fieldoffsets, unsigned fieldcount,
- uint64_t nonvirtualsize, unsigned nonvirtualalign,
- uint64_t SizeOfLargestEmptySubobject,
+ CharUnits nonvirtualsize, CharUnits nonvirtualalign,
+ CharUnits SizeOfLargestEmptySubobject,
const CXXRecordDecl *PrimaryBase,
- bool PrimaryBaseIsVirtual,
+ bool IsPrimaryBaseVirtual,
const BaseOffsetsMapTy& BaseOffsets,
const BaseOffsetsMapTy& VBaseOffsets);
~ASTRecordLayout() {}
void Destroy(ASTContext &Ctx);
-
+
ASTRecordLayout(const ASTRecordLayout&); // DO NOT IMPLEMENT
void operator=(const ASTRecordLayout&); // DO NOT IMPLEMENT
public:
- /// getAlignment - Get the record alignment in bits.
- unsigned getAlignment() const { return Alignment; }
+ /// getAlignment - Get the record alignment in characters.
+ CharUnits getAlignment() const { return Alignment; }
- /// getSize - Get the record size in bits.
- uint64_t getSize() const { return Size; }
+ /// getSize - Get the record size in characters.
+ CharUnits getSize() const { return Size; }
/// getFieldCount - Get the number of fields in the layout.
unsigned getFieldCount() const { return FieldCount; }
@@ -174,75 +123,81 @@ public:
}
/// getDataSize() - Get the record data size, which is the record size
- /// without tail padding, in bits.
- uint64_t getDataSize() const {
+ /// without tail padding, in characters.
+ CharUnits getDataSize() const {
return DataSize;
}
- /// getNonVirtualSize - Get the non-virtual size (in bits) of an object,
+ /// getNonVirtualSize - Get the non-virtual size (in chars) of an object,
/// which is the size of the object without virtual bases.
- uint64_t getNonVirtualSize() const {
+ CharUnits getNonVirtualSize() const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
return CXXInfo->NonVirtualSize;
}
- /// getNonVirtualSize - Get the non-virtual alignment (in bits) of an object,
+ /// getNonVirtualSize - Get the non-virtual alignment (in chars) of an object,
/// which is the alignment of the object without virtual bases.
- unsigned getNonVirtualAlign() const {
+ CharUnits getNonVirtualAlign() const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
return CXXInfo->NonVirtualAlign;
}
- /// getPrimaryBaseInfo - Get the primary base info.
- const PrimaryBaseInfo &getPrimaryBaseInfo() const {
+ /// getPrimaryBase - Get the primary base for this record.
+ const CXXRecordDecl *getPrimaryBase() const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
- return CXXInfo->PrimaryBase;
+ return CXXInfo->PrimaryBase.getPointer();
}
- // FIXME: Migrate off of this function and use getPrimaryBaseInfo directly.
- const CXXRecordDecl *getPrimaryBase() const {
- return getPrimaryBaseInfo().getBase();
- }
+ /// isPrimaryBaseVirtual - Get whether the primary base for this record
+ /// is virtual or not.
+ bool isPrimaryBaseVirtual() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
- // FIXME: Migrate off of this function and use getPrimaryBaseInfo directly.
- bool getPrimaryBaseWasVirtual() const {
- return getPrimaryBaseInfo().isVirtual();
+ return CXXInfo->PrimaryBase.getInt();
}
- /// getBaseClassOffset - Get the offset, in bits, for the given base class.
- uint64_t getBaseClassOffset(const CXXRecordDecl *Base) const {
+ /// getBaseClassOffset - Get the offset, in chars, for the given base class.
+ CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
assert(CXXInfo->BaseOffsets.count(Base) && "Did not find base!");
return CXXInfo->BaseOffsets[Base];
}
- /// getVBaseClassOffset - Get the offset, in bits, for the given base class.
- uint64_t getVBaseClassOffset(const CXXRecordDecl *VBase) const {
+ /// getVBaseClassOffset - Get the offset, in chars, for the given base class.
+ CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
assert(CXXInfo->VBaseOffsets.count(VBase) && "Did not find base!");
return CXXInfo->VBaseOffsets[VBase];
}
-
- uint64_t getSizeOfLargestEmptySubobject() const {
+
+ /// getBaseClassOffsetInBits - Get the offset, in bits, for the given
+ /// base class.
+ uint64_t getBaseClassOffsetInBits(const CXXRecordDecl *Base) const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
- return CXXInfo->SizeOfLargestEmptySubobject;
+ assert(CXXInfo->BaseOffsets.count(Base) && "Did not find base!");
+
+ return getBaseClassOffset(Base).getQuantity() *
+ Base->getASTContext().getCharWidth();
}
- primary_base_info_iterator primary_base_begin() const {
+ /// getVBaseClassOffsetInBits - Get the offset, in bits, for the given
+ /// base class.
+ uint64_t getVBaseClassOffsetInBits(const CXXRecordDecl *VBase) const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
-
- return primary_base_info_iterator(getPrimaryBaseInfo());
+ assert(CXXInfo->VBaseOffsets.count(VBase) && "Did not find base!");
+
+ return getVBaseClassOffset(VBase).getQuantity() *
+ VBase->getASTContext().getCharWidth();
}
- primary_base_info_iterator primary_base_end() const {
+ CharUnits getSizeOfLargestEmptySubobject() const {
assert(CXXInfo && "Record layout does not have C++ specific info!");
-
- return primary_base_info_iterator();
+ return CXXInfo->SizeOfLargestEmptySubobject;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
index 232e47b..921b799 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -143,6 +143,10 @@ public:
/// \brief Return whether this visitor should recurse into
/// template instantiations.
bool shouldVisitTemplateInstantiations() const { return false; }
+
+ /// \brief Return whether this visitor should recurse into the types of
+ /// TypeLocs.
+ bool shouldWalkTypesOfTypeLocs() const { return true; }
/// \brief Recursively visit a statement or expression, by
/// dispatching to Traverse*() based on the argument's dynamic type.
@@ -211,7 +215,7 @@ public:
/// be overridden for clients that need access to the name.
///
/// \returns false if the visitation was terminated early, true otherwise.
- bool TraverseConstructorInitializer(CXXBaseOrMemberInitializer *Init);
+ bool TraverseConstructorInitializer(CXXCtorInitializer *Init);
// ---- Methods on Stmts ----
@@ -368,7 +372,7 @@ private:
bool TraverseTemplateParameterListHelper(TemplateParameterList *TPL);
bool TraverseClassInstantiations(ClassTemplateDecl* D, Decl *Pattern);
bool TraverseFunctionInstantiations(FunctionTemplateDecl* D) ;
- bool TraverseTemplateArgumentLocsHelper(const TemplateArgumentLoc *TAL,
+ bool TraverseTemplateArgumentLocsHelper(const TemplateArgumentLoc *TAL,
unsigned Count);
bool TraverseArrayTypeLocHelper(ArrayTypeLoc TL);
bool TraverseRecordHelper(RecordDecl *D);
@@ -393,7 +397,7 @@ bool RecursiveASTVisitor<Derived>::TraverseStmt(Stmt *S) {
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
switch (BinOp->getOpcode()) {
#define OPERATOR(NAME) \
- case BO_##NAME: DISPATCH(Bin##PtrMemD, BinaryOperator, S);
+ case BO_##NAME: DISPATCH(Bin##NAME, BinaryOperator, S);
BINOP_LIST()
#undef OPERATOR
@@ -438,7 +442,8 @@ bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
switch (T->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, BASE)
#define TYPE(CLASS, BASE) \
- case Type::CLASS: DISPATCH(CLASS##Type, CLASS##Type, T.getTypePtr());
+ case Type::CLASS: DISPATCH(CLASS##Type, CLASS##Type, \
+ const_cast<Type*>(T.getTypePtr()));
#include "clang/AST/TypeNodes.def"
}
@@ -531,7 +536,9 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgument(
return getDerived().TraverseType(Arg.getAsType());
case TemplateArgument::Template:
- return getDerived().TraverseTemplateName(Arg.getAsTemplate());
+ case TemplateArgument::TemplateExpansion:
+ return getDerived().TraverseTemplateName(
+ Arg.getAsTemplateOrTemplatePattern());
case TemplateArgument::Expression:
return getDerived().TraverseStmt(Arg.getAsExpr());
@@ -566,7 +573,9 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLoc(
}
case TemplateArgument::Template:
- return getDerived().TraverseTemplateName(Arg.getAsTemplate());
+ case TemplateArgument::TemplateExpansion:
+ return getDerived().TraverseTemplateName(
+ Arg.getAsTemplateOrTemplatePattern());
case TemplateArgument::Expression:
return getDerived().TraverseStmt(ArgLoc.getSourceExpression());
@@ -592,7 +601,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArguments(
template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseConstructorInitializer(
- CXXBaseOrMemberInitializer *Init) {
+ CXXCtorInitializer *Init) {
// FIXME: recurse on TypeLoc of the base initializer if isBaseInitializer()?
if (Init->isWritten())
TRY_TO(TraverseStmt(Init->getInit()));
@@ -706,10 +715,15 @@ DEF_TRAVERSE_TYPE(DecltypeType, {
TRY_TO(TraverseStmt(T->getUnderlyingExpr()));
})
+DEF_TRAVERSE_TYPE(AutoType, {
+ TRY_TO(TraverseType(T->getDeducedType()));
+ })
+
DEF_TRAVERSE_TYPE(RecordType, { })
DEF_TRAVERSE_TYPE(EnumType, { })
DEF_TRAVERSE_TYPE(TemplateTypeParmType, { })
DEF_TRAVERSE_TYPE(SubstTemplateTypeParmType, { })
+DEF_TRAVERSE_TYPE(SubstTemplateTypeParmPackType, { })
DEF_TRAVERSE_TYPE(TemplateSpecializationType, {
TRY_TO(TraverseTemplateName(T->getTemplateName()));
@@ -718,6 +732,14 @@ DEF_TRAVERSE_TYPE(TemplateSpecializationType, {
DEF_TRAVERSE_TYPE(InjectedClassNameType, { })
+DEF_TRAVERSE_TYPE(AttributedType, {
+ TRY_TO(TraverseType(T->getModifiedType()));
+ })
+
+DEF_TRAVERSE_TYPE(ParenType, {
+ TRY_TO(TraverseType(T->getInnerType()));
+ })
+
DEF_TRAVERSE_TYPE(ElaboratedType, {
if (T->getQualifier()) {
TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
@@ -734,6 +756,10 @@ DEF_TRAVERSE_TYPE(DependentTemplateSpecializationType, {
TRY_TO(TraverseTemplateArguments(T->getArgs(), T->getNumArgs()));
})
+DEF_TRAVERSE_TYPE(PackExpansionType, {
+ TRY_TO(TraverseType(T->getPattern()));
+ })
+
DEF_TRAVERSE_TYPE(ObjCInterfaceType, { })
DEF_TRAVERSE_TYPE(ObjCObjectType, {
@@ -752,14 +778,15 @@ DEF_TRAVERSE_TYPE(ObjCObjectPointerType, {
// ----------------- TypeLoc traversal -----------------
// This macro makes available a variable TL, the passed-in TypeLoc.
-// It calls WalkUpFrom* for the Type in the given TypeLoc, in addition
-// to WalkUpFrom* for the TypeLoc itself, such that existing clients
-// that override the WalkUpFrom*Type() and/or Visit*Type() methods
+// If requested, it calls WalkUpFrom* for the Type in the given TypeLoc,
+// in addition to WalkUpFrom* for the TypeLoc itself, such that existing
+// clients that override the WalkUpFrom*Type() and/or Visit*Type() methods
// continue to work.
#define DEF_TRAVERSE_TYPELOC(TYPE, CODE) \
template<typename Derived> \
bool RecursiveASTVisitor<Derived>::Traverse##TYPE##Loc(TYPE##Loc TL) { \
- TRY_TO(WalkUpFrom##TYPE(TL.getTypePtr())); \
+ if (getDerived().shouldWalkTypesOfTypeLocs()) \
+ TRY_TO(WalkUpFrom##TYPE(const_cast<TYPE*>(TL.getTypePtr()))); \
TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
{ CODE; } \
return true; \
@@ -867,22 +894,16 @@ DEF_TRAVERSE_TYPELOC(FunctionNoProtoType, {
TRY_TO(TraverseTypeLoc(TL.getResultLoc()));
})
-// FIXME: location of arguments, exception specifications (attributes?)
-// Note that we have the ParmVarDecl's here. Do we want to use them?
+// FIXME: location of exception specifications (attributes?)
DEF_TRAVERSE_TYPELOC(FunctionProtoType, {
TRY_TO(TraverseTypeLoc(TL.getResultLoc()));
- FunctionProtoType *T = TL.getTypePtr();
-/*
+ const FunctionProtoType *T = TL.getTypePtr();
+
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
TRY_TO(TraverseDecl(TL.getArg(I)));
}
-*/
- for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
- AEnd = T->arg_type_end();
- A != AEnd; ++A) {
- TRY_TO(TraverseType(*A));
- }
+
for (FunctionProtoType::exception_iterator E = T->exception_begin(),
EEnd = T->exception_end();
E != EEnd; ++E) {
@@ -906,10 +927,15 @@ DEF_TRAVERSE_TYPELOC(DecltypeType, {
TRY_TO(TraverseStmt(TL.getTypePtr()->getUnderlyingExpr()));
})
+DEF_TRAVERSE_TYPELOC(AutoType, {
+ TRY_TO(TraverseType(TL.getTypePtr()->getDeducedType()));
+ })
+
DEF_TRAVERSE_TYPELOC(RecordType, { })
DEF_TRAVERSE_TYPELOC(EnumType, { })
DEF_TRAVERSE_TYPELOC(TemplateTypeParmType, { })
DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmType, { })
+DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmPackType, { })
// FIXME: use the loc for the template name?
DEF_TRAVERSE_TYPELOC(TemplateSpecializationType, {
@@ -921,6 +947,14 @@ DEF_TRAVERSE_TYPELOC(TemplateSpecializationType, {
DEF_TRAVERSE_TYPELOC(InjectedClassNameType, { })
+DEF_TRAVERSE_TYPELOC(ParenType, {
+ TRY_TO(TraverseTypeLoc(TL.getInnerLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(AttributedType, {
+ TRY_TO(TraverseTypeLoc(TL.getModifiedLoc()));
+ })
+
// FIXME: use the sourceloc on qualifier?
DEF_TRAVERSE_TYPELOC(ElaboratedType, {
if (TL.getTypePtr()->getQualifier()) {
@@ -941,6 +975,10 @@ DEF_TRAVERSE_TYPELOC(DependentTemplateSpecializationType, {
}
})
+DEF_TRAVERSE_TYPELOC(PackExpansionType, {
+ TRY_TO(TraverseTypeLoc(TL.getPatternLoc()));
+ })
+
DEF_TRAVERSE_TYPELOC(ObjCInterfaceType, { })
DEF_TRAVERSE_TYPELOC(ObjCObjectType, {
@@ -1001,11 +1039,18 @@ DEF_TRAVERSE_DECL(FileScopeAsmDecl, {
})
DEF_TRAVERSE_DECL(FriendDecl, {
- TRY_TO(TraverseDecl(D->getFriendDecl()));
+ // Friend is either decl or a type.
+ if (D->getFriendType())
+ TRY_TO(TraverseTypeLoc(D->getFriendType()->getTypeLoc()));
+ else
+ TRY_TO(TraverseDecl(D->getFriendDecl()));
})
DEF_TRAVERSE_DECL(FriendTemplateDecl, {
- TRY_TO(TraverseDecl(D->getFriendDecl()));
+ if (D->getFriendType())
+ TRY_TO(TraverseTypeLoc(D->getFriendType()->getTypeLoc()));
+ else
+ TRY_TO(TraverseDecl(D->getFriendDecl()));
for (unsigned I = 0, E = D->getNumTemplateParameters(); I < E; ++I) {
TemplateParameterList *TPL = D->getTemplateParameterList(I);
for (TemplateParameterList::iterator ITPL = TPL->begin(),
@@ -1051,6 +1096,11 @@ DEF_TRAVERSE_DECL(NamespaceAliasDecl, {
return true;
})
+DEF_TRAVERSE_DECL(LabelDecl, {
+ // There is no code in a LabelDecl.
+})
+
+
DEF_TRAVERSE_DECL(NamespaceDecl, {
// Code in an unnamed namespace shows up automatically in
// decls_begin()/decls_end(). Thus we don't need to recurse on
@@ -1082,7 +1132,9 @@ DEF_TRAVERSE_DECL(ObjCProtocolDecl, {
})
DEF_TRAVERSE_DECL(ObjCMethodDecl, {
- // FIXME: implement
+ // We don't traverse nodes in param_begin()/param_end(), as they
+ // appear in decls_begin()/decls_end() and thus are handled.
+ TRY_TO(TraverseStmt(D->getBody()));
})
DEF_TRAVERSE_DECL(ObjCPropertyDecl, {
@@ -1175,7 +1227,7 @@ DEF_TRAVERSE_DECL(ClassTemplateDecl, {
if (D->isThisDeclarationADefinition())
TRY_TO(TraverseClassInstantiations(D, D));
}
-
+
// Note that getInstantiatedFromMemberTemplate() is just a link
// from a template instantiation back to the template from which
// it was instantiated, and thus should not be traversed.
@@ -1208,10 +1260,10 @@ bool RecursiveASTVisitor<Derived>::TraverseFunctionInstantiations(
assert(false && "Unknown specialization kind.");
}
}
-
+
return true;
}
-
+
DEF_TRAVERSE_DECL(FunctionTemplateDecl, {
TRY_TO(TraverseDecl(D->getTemplatedDecl()));
TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
@@ -1251,7 +1303,7 @@ DEF_TRAVERSE_DECL(TemplateTypeParmDecl, {
})
DEF_TRAVERSE_DECL(TypedefDecl, {
- TRY_TO(TraverseType(D->getUnderlyingType()));
+ TRY_TO(TraverseTypeLoc(D->getTypeSourceInfo()->getTypeLoc()));
// We shouldn't traverse D->getTypeForDecl(); it's a result of
// declaring the typedef, not something that was written in the
// source.
@@ -1282,11 +1334,7 @@ bool RecursiveASTVisitor<Derived>::TraverseRecordHelper(
RecordDecl *D) {
// We shouldn't traverse D->getTypeForDecl(); it's a result of
// declaring the type, not something that was written in the source.
- //
- // The anonymous struct or union object is the variable or field
- // whose type is the anonymous struct or union. We shouldn't
- // traverse D->getAnonymousStructOrUnionObject(), as it's not
- // something that is explicitly written in the source.
+
TRY_TO(TraverseNestedNameSpecifier(D->getQualifier()));
return true;
}
@@ -1380,6 +1428,8 @@ DEF_TRAVERSE_DECL(UnresolvedUsingValueDecl, {
TRY_TO(TraverseNestedNameSpecifier(D->getTargetNestedNameSpecifier()));
})
+DEF_TRAVERSE_DECL(IndirectFieldDecl, {})
+
template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseDeclaratorHelper(DeclaratorDecl *D) {
TRY_TO(TraverseNestedNameSpecifier(D->getQualifier()));
@@ -1412,47 +1462,11 @@ template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
TRY_TO(TraverseNestedNameSpecifier(D->getQualifier()));
- // Visit the function type itself, which can be either
- // FunctionNoProtoType or FunctionProtoType, or a typedef. If it's
- // not a Function*ProtoType, then it can't have a body or arguments,
- // so we have to do less work.
- Type *FuncType = D->getType().getTypePtr();
- if (FunctionProtoType *FuncProto = dyn_cast<FunctionProtoType>(FuncType)) {
- if (D->isThisDeclarationADefinition()) {
- // Don't call Traverse*, or the result type and parameter types
- // will be double counted.
- TRY_TO(WalkUpFromFunctionProtoType(FuncProto));
- } else {
- // This works around a bug in Clang that does not add the parameters
- // to decls_begin/end for function declarations (as opposed to
- // definitions):
- // http://llvm.org/PR7442
- // We work around this here by traversing the function type.
- // This isn't perfect because we don't traverse the default
- // values, if any. It also may not interact great with
- // templates. But it's the best we can do until the bug is
- // fixed.
- // FIXME: replace the entire 'if' statement with
- // TRY_TO(WalkUpFromFunctionProtoType(FuncProto));
- // when the bug is fixed.
- TRY_TO(TraverseFunctionProtoType(FuncProto));
- return true;
- }
- } else if (FunctionNoProtoType *FuncNoProto =
- dyn_cast<FunctionNoProtoType>(FuncType)) {
- // Don't call Traverse*, or the result type will be double
- // counted.
- TRY_TO(WalkUpFromFunctionNoProtoType(FuncNoProto));
- } else { // a typedef type, or who knows what
- assert(!D->isThisDeclarationADefinition() && "Unexpected function type");
- TRY_TO(TraverseType(D->getType()));
- return true;
- }
-
- TRY_TO(TraverseType(D->getResultType()));
-
// If we're an explicit template specialization, iterate over the
- // template args that were explicitly specified.
+ // template args that were explicitly specified. If we were doing
+ // this in typing order, we'd do it between the return type and
+ // the function args, but both are handled by the FunctionTypeLoc
+ // above, so we have to choose one side. I've decided to do before.
if (const FunctionTemplateSpecializationInfo *FTSI =
D->getTemplateSpecializationInfo()) {
if (FTSI->getTemplateSpecializationKind() != TSK_Undeclared &&
@@ -1467,28 +1481,11 @@ bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
}
}
- for (FunctionDecl::param_iterator I = D->param_begin(), E = D->param_end();
- I != E; ++I) {
- TRY_TO(TraverseDecl(*I));
- }
-
- if (FunctionProtoType *FuncProto = dyn_cast<FunctionProtoType>(FuncType)) {
- if (D->isThisDeclarationADefinition()) {
- // This would be visited if we called TraverseType(D->getType())
- // above, but we don't (at least, not in the
- // declaration-is-a-definition case), in order to avoid duplicate
- // visiting for parameters. (We need to check parameters here,
- // rather than letting D->getType() do it, so we visit default
- // parameter values). So we need to re-do some of the work the
- // type would do.
- for (FunctionProtoType::exception_iterator
- E = FuncProto->exception_begin(),
- EEnd = FuncProto->exception_end();
- E != EEnd; ++E) {
- TRY_TO(TraverseType(*E));
- }
- }
- }
+ // Visit the function type itself, which can be either
+ // FunctionNoProtoType or FunctionProtoType, or a typedef. This
+ // also covers the return type and the function parameters,
+ // including exception specifications.
+ TRY_TO(TraverseTypeLoc(D->getTypeSourceInfo()->getTypeLoc()));
if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D)) {
// Constructor initializers.
@@ -1554,7 +1551,7 @@ DEF_TRAVERSE_DECL(ImplicitParamDecl, {
DEF_TRAVERSE_DECL(NonTypeTemplateParmDecl, {
// A non-type template parameter, e.g. "S" in template<int S> class Foo ...
- TRY_TO(TraverseVarHelper(D));
+ TRY_TO(TraverseDeclaratorHelper(D));
TRY_TO(TraverseStmt(D->getDefaultArgument()));
})
@@ -1577,12 +1574,11 @@ DEF_TRAVERSE_DECL(ParmVarDecl, {
// ----------------- Stmt traversal -----------------
//
// For stmts, we automate (in the DEF_TRAVERSE_STMT macro) iterating
-// over the children defined in child_begin/child_end (every stmt
-// defines these, though sometimes the range is empty). Each
-// individual Traverse* method only needs to worry about children
-// other than those. To see what child_begin()/end() does for a given
-// class, see, e.g.,
-// http://clang.llvm.org/doxygen/Stmt_8cpp_source.html
+// over the children defined in children() (every stmt defines these,
+// though sometimes the range is empty). Each individual Traverse*
+// method only needs to worry about children other than those. To see
+// what children() does for a given class, see, e.g.,
+// http://clang.llvm.org/doxygen/Stmt_8cpp_source.html
// This macro makes available a variable S, the passed-in stmt.
#define DEF_TRAVERSE_STMT(STMT, CODE) \
@@ -1590,9 +1586,8 @@ template<typename Derived> \
bool RecursiveASTVisitor<Derived>::Traverse##STMT (STMT *S) { \
TRY_TO(WalkUpFrom##STMT(S)); \
{ CODE; } \
- for (Stmt::child_iterator C = S->child_begin(), CEnd = S->child_end(); \
- C != CEnd; ++C) { \
- TRY_TO(TraverseStmt(*C)); \
+ for (Stmt::child_range range = S->children(); range; ++range) { \
+ TRY_TO(TraverseStmt(*range)); \
} \
return true; \
}
@@ -1608,12 +1603,12 @@ DEF_TRAVERSE_STMT(AsmStmt, {
for (unsigned I = 0, E = S->getNumClobbers(); I < E; ++I) {
TRY_TO(TraverseStmt(S->getClobber(I)));
}
- // child_begin()/end() iterates over inputExpr and outputExpr.
+ // children() iterates over inputExpr and outputExpr.
})
DEF_TRAVERSE_STMT(CXXCatchStmt, {
TRY_TO(TraverseDecl(S->getExceptionDecl()));
- // child_begin()/end() iterates over the handler block.
+ // children() iterates over the handler block.
})
DEF_TRAVERSE_STMT(DeclStmt, {
@@ -1621,11 +1616,11 @@ DEF_TRAVERSE_STMT(DeclStmt, {
I != E; ++I) {
TRY_TO(TraverseDecl(*I));
}
- // Suppress the default iteration over child_begin/end by
+ // Suppress the default iteration over children() by
// returning. Here's why: A DeclStmt looks like 'type var [=
// initializer]'. The decls above already traverse over the
// initializers, so we don't have to do it again (which
- // child_begin/end would do).
+ // children() would do).
return true;
})
@@ -1652,17 +1647,16 @@ DEF_TRAVERSE_STMT(ObjCAtThrowStmt, { })
DEF_TRAVERSE_STMT(ObjCAtTryStmt, { })
DEF_TRAVERSE_STMT(ObjCForCollectionStmt, { })
DEF_TRAVERSE_STMT(ReturnStmt, { })
-DEF_TRAVERSE_STMT(SwitchCase, { })
DEF_TRAVERSE_STMT(SwitchStmt, { })
DEF_TRAVERSE_STMT(WhileStmt, { })
DEF_TRAVERSE_STMT(CXXDependentScopeMemberExpr, {
+ TRY_TO(TraverseNestedNameSpecifier(S->getQualifier()));
if (S->hasExplicitTemplateArgs()) {
TRY_TO(TraverseTemplateArgumentLocsHelper(
S->getTemplateArgs(), S->getNumTemplateArgs()));
}
- TRY_TO(TraverseNestedNameSpecifier(S->getQualifier()));
})
DEF_TRAVERSE_STMT(DeclRefExpr, {
@@ -1695,27 +1689,27 @@ DEF_TRAVERSE_STMT(ImplicitCastExpr, {
})
DEF_TRAVERSE_STMT(CStyleCastExpr, {
- TRY_TO(TraverseType(S->getTypeAsWritten()));
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
})
DEF_TRAVERSE_STMT(CXXFunctionalCastExpr, {
- TRY_TO(TraverseType(S->getTypeAsWritten()));
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
})
DEF_TRAVERSE_STMT(CXXConstCastExpr, {
- TRY_TO(TraverseType(S->getTypeAsWritten()));
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
})
DEF_TRAVERSE_STMT(CXXDynamicCastExpr, {
- TRY_TO(TraverseType(S->getTypeAsWritten()));
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
})
DEF_TRAVERSE_STMT(CXXReinterpretCastExpr, {
- TRY_TO(TraverseType(S->getTypeAsWritten()));
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
})
DEF_TRAVERSE_STMT(CXXStaticCastExpr, {
- TRY_TO(TraverseType(S->getTypeAsWritten()));
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
})
// InitListExpr is a tricky one, because we want to do all our work on
@@ -1729,9 +1723,8 @@ bool RecursiveASTVisitor<Derived>::TraverseInitListExpr(InitListExpr *S) {
S = Syn;
TRY_TO(WalkUpFromInitListExpr(S));
// All we need are the default actions. FIXME: use a helper function.
- for (Stmt::child_iterator C = S->child_begin(), CEnd = S->child_end();
- C != CEnd; ++C) {
- TRY_TO(TraverseStmt(*C));
+ for (Stmt::child_range range = S->children(); range; ++range) {
+ TRY_TO(TraverseStmt(*range));
}
return true;
}
@@ -1739,12 +1732,12 @@ bool RecursiveASTVisitor<Derived>::TraverseInitListExpr(InitListExpr *S) {
DEF_TRAVERSE_STMT(CXXScalarValueInitExpr, {
// This is called for code like 'return T()' where T is a built-in
// (i.e. non-class) type.
- if (!S->isImplicit())
- TRY_TO(TraverseType(S->getType()));
+ TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
})
DEF_TRAVERSE_STMT(CXXNewExpr, {
- TRY_TO(TraverseType(S->getAllocatedType()));
+ // The child-iterator will pick up the other arguments.
+ TRY_TO(TraverseTypeLoc(S->getAllocatedTypeSourceInfo()->getTypeLoc()));
})
DEF_TRAVERSE_STMT(OffsetOfExpr, {
@@ -1769,15 +1762,43 @@ DEF_TRAVERSE_STMT(CXXTypeidExpr, {
TRY_TO(TraverseTypeLoc(S->getTypeOperandSourceInfo()->getTypeLoc()));
})
-DEF_TRAVERSE_STMT(TypesCompatibleExpr, {
- TRY_TO(TraverseTypeLoc(S->getArgTInfo1()->getTypeLoc()));
- TRY_TO(TraverseTypeLoc(S->getArgTInfo2()->getTypeLoc()));
+DEF_TRAVERSE_STMT(CXXUuidofExpr, {
+ // The child-iterator will pick up the arg if it's an expression,
+ // but not if it's a type.
+ if (S->isTypeOperand())
+ TRY_TO(TraverseTypeLoc(S->getTypeOperandSourceInfo()->getTypeLoc()));
})
DEF_TRAVERSE_STMT(UnaryTypeTraitExpr, {
- TRY_TO(TraverseType(S->getQueriedType()));
+ TRY_TO(TraverseTypeLoc(S->getQueriedTypeSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(BinaryTypeTraitExpr, {
+ TRY_TO(TraverseTypeLoc(S->getLhsTypeSourceInfo()->getTypeLoc()));
+ TRY_TO(TraverseTypeLoc(S->getRhsTypeSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(VAArgExpr, {
+ // The child-iterator will pick up the expression argument.
+ TRY_TO(TraverseTypeLoc(S->getWrittenTypeInfo()->getTypeLoc()));
})
+DEF_TRAVERSE_STMT(CXXTemporaryObjectExpr, {
+ // This is called for code like 'return T()' where T is a class type.
+ TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXUnresolvedConstructExpr, {
+ // This is called for code like 'T()', where T is a template argument.
+ TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
+ })
+
+// These expressions all might take explicit template arguments.
+// We traverse those if so. FIXME: implement these.
+DEF_TRAVERSE_STMT(CXXConstructExpr, { })
+DEF_TRAVERSE_STMT(CallExpr, { })
+DEF_TRAVERSE_STMT(CXXMemberCallExpr, { })
+
// These exprs (most of them), do not need any action except iterating
// over the children.
DEF_TRAVERSE_STMT(AddrLabelExpr, { })
@@ -1790,53 +1811,64 @@ DEF_TRAVERSE_STMT(CXXBindTemporaryExpr, { })
DEF_TRAVERSE_STMT(CXXBoolLiteralExpr, { })
DEF_TRAVERSE_STMT(CXXDefaultArgExpr, { })
DEF_TRAVERSE_STMT(CXXDeleteExpr, { })
-DEF_TRAVERSE_STMT(CXXExprWithTemporaries, { })
+DEF_TRAVERSE_STMT(ExprWithCleanups, { })
DEF_TRAVERSE_STMT(CXXNullPtrLiteralExpr, { })
-DEF_TRAVERSE_STMT(CXXPseudoDestructorExpr, { })
+DEF_TRAVERSE_STMT(CXXPseudoDestructorExpr, {
+ TRY_TO(TraverseNestedNameSpecifier(S->getQualifier()));
+ if (TypeSourceInfo *ScopeInfo = S->getScopeTypeInfo())
+ TRY_TO(TraverseTypeLoc(ScopeInfo->getTypeLoc()));
+ if (TypeSourceInfo *DestroyedTypeInfo = S->getDestroyedTypeInfo())
+ TRY_TO(TraverseTypeLoc(DestroyedTypeInfo->getTypeLoc()));
+})
DEF_TRAVERSE_STMT(CXXThisExpr, { })
DEF_TRAVERSE_STMT(CXXThrowExpr, { })
-DEF_TRAVERSE_STMT(CXXUnresolvedConstructExpr, { })
DEF_TRAVERSE_STMT(DesignatedInitExpr, { })
DEF_TRAVERSE_STMT(ExtVectorElementExpr, { })
DEF_TRAVERSE_STMT(GNUNullExpr, { })
DEF_TRAVERSE_STMT(ImplicitValueInitExpr, { })
DEF_TRAVERSE_STMT(ObjCEncodeExpr, { })
-DEF_TRAVERSE_STMT(ObjCImplicitSetterGetterRefExpr, { })
DEF_TRAVERSE_STMT(ObjCIsaExpr, { })
DEF_TRAVERSE_STMT(ObjCIvarRefExpr, { })
DEF_TRAVERSE_STMT(ObjCMessageExpr, { })
DEF_TRAVERSE_STMT(ObjCPropertyRefExpr, { })
DEF_TRAVERSE_STMT(ObjCProtocolExpr, { })
DEF_TRAVERSE_STMT(ObjCSelectorExpr, { })
-DEF_TRAVERSE_STMT(ObjCSuperExpr, { })
DEF_TRAVERSE_STMT(ParenExpr, { })
DEF_TRAVERSE_STMT(ParenListExpr, { })
DEF_TRAVERSE_STMT(PredefinedExpr, { })
DEF_TRAVERSE_STMT(ShuffleVectorExpr, { })
DEF_TRAVERSE_STMT(StmtExpr, { })
-DEF_TRAVERSE_STMT(UnresolvedLookupExpr, { })
-DEF_TRAVERSE_STMT(UnresolvedMemberExpr, { })
-DEF_TRAVERSE_STMT(VAArgExpr, {
- // The child-iterator will pick up the expression argument.
- TRY_TO(TraverseTypeLoc(S->getWrittenTypeInfo()->getTypeLoc()));
- })
-DEF_TRAVERSE_STMT(CXXConstructExpr, { })
-
-DEF_TRAVERSE_STMT(CXXTemporaryObjectExpr, {
- // This is called for code like 'return T()' where T is a class type.
- TRY_TO(TraverseType(S->getType()));
- })
+DEF_TRAVERSE_STMT(UnresolvedLookupExpr, {
+ TRY_TO(TraverseNestedNameSpecifier(S->getQualifier()));
+ if (S->hasExplicitTemplateArgs()) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(S->getTemplateArgs(),
+ S->getNumTemplateArgs()));
+ }
+})
+
+DEF_TRAVERSE_STMT(UnresolvedMemberExpr, {
+ TRY_TO(TraverseNestedNameSpecifier(S->getQualifier()));
+ if (S->hasExplicitTemplateArgs()) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(S->getTemplateArgs(),
+ S->getNumTemplateArgs()));
+ }
+})
-DEF_TRAVERSE_STMT(CallExpr, { })
-DEF_TRAVERSE_STMT(CXXMemberCallExpr, { })
DEF_TRAVERSE_STMT(CXXOperatorCallExpr, { })
+DEF_TRAVERSE_STMT(OpaqueValueExpr, { })
+DEF_TRAVERSE_STMT(CUDAKernelCallExpr, { })
// These operators (all of them) do not need any action except
// iterating over the children.
+DEF_TRAVERSE_STMT(BinaryConditionalOperator, { })
DEF_TRAVERSE_STMT(ConditionalOperator, { })
DEF_TRAVERSE_STMT(UnaryOperator, { })
DEF_TRAVERSE_STMT(BinaryOperator, { })
DEF_TRAVERSE_STMT(CompoundAssignOperator, { })
+DEF_TRAVERSE_STMT(CXXNoexceptExpr, { })
+DEF_TRAVERSE_STMT(PackExpansionExpr, { })
+DEF_TRAVERSE_STMT(SizeOfPackExpr, { })
+DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmPackExpr, { })
// These literals (all of them) do not need any action.
DEF_TRAVERSE_STMT(IntegerLiteral, { })
@@ -1861,7 +1893,6 @@ DEF_TRAVERSE_STMT(ObjCStringLiteral, { })
// http://clang.llvm.org/doxygen/classclang_1_1CXXTypeidExpr.html
// http://clang.llvm.org/doxygen/classclang_1_1SizeOfAlignOfExpr.html
// http://clang.llvm.org/doxygen/classclang_1_1TypesCompatibleExpr.html
-// http://clang.llvm.org/doxygen/classclang_1_1CXXUnresolvedConstructExpr.html
// Every class that has getQualifier.
#undef DEF_TRAVERSE_STMT
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
index ba77829..e87ca78 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
@@ -109,25 +109,7 @@ public:
/// \brief Set the previous declaration. If PrevDecl is NULL, set this as the
/// first and only declaration.
- void setPreviousDeclaration(decl_type *PrevDecl) {
- decl_type *First;
-
- if (PrevDecl) {
- // Point to previous. Make sure that this is actually the most recent
- // redeclaration, or we can build invalid chains. If the most recent
- // redeclaration is invalid, it won't be PrevDecl, but we want it anyway.
- RedeclLink = PreviousDeclLink(llvm::cast<decl_type>(
- PrevDecl->getMostRecentDeclaration()));
- First = PrevDecl->getFirstDeclaration();
- assert(First->RedeclLink.NextIsLatest() && "Expected first");
- } else {
- // Make this first.
- First = static_cast<decl_type*>(this);
- }
-
- // First one will point to this one as latest.
- First->RedeclLink = LatestDeclLink(static_cast<decl_type*>(this));
- }
+ void setPreviousDeclaration(decl_type *PrevDecl);
/// \brief Iterates through all the redeclarations of the same decl.
class redecl_iterator {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
index 62a6b64..7ede9ce 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
@@ -20,7 +20,6 @@
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtIterator.h"
#include "clang/AST/DeclGroup.h"
-#include "clang/AST/FullExpr.h"
#include "llvm/ADT/SmallVector.h"
#include "clang/AST/ASTContext.h"
#include <string>
@@ -104,13 +103,7 @@ public:
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
-};
-private:
- /// \brief The statement class.
- const unsigned sClass : 8;
-
- /// \brief The reference count for this statement.
- unsigned RefCount : 24;
+ };
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
@@ -122,6 +115,77 @@ protected:
assert(0 && "Stmts cannot be released with regular 'delete'.");
}
+ class StmtBitfields {
+ friend class Stmt;
+
+ /// \brief The statement class.
+ unsigned sClass : 8;
+ };
+ enum { NumStmtBits = 8 };
+
+ class CompoundStmtBitfields {
+ friend class CompoundStmt;
+ unsigned : NumStmtBits;
+
+ unsigned NumStmts : 32 - NumStmtBits;
+ };
+
+ class ExprBitfields {
+ friend class Expr;
+ friend class DeclRefExpr; // computeDependence
+ friend class InitListExpr; // ctor
+ friend class DesignatedInitExpr; // ctor
+ friend class BlockDeclRefExpr; // ctor
+ friend class ASTStmtReader; // deserialization
+ friend class CXXNewExpr; // ctor
+ friend class DependentScopeDeclRefExpr; // ctor
+ friend class CXXConstructExpr; // ctor
+ friend class CallExpr; // ctor
+ friend class OffsetOfExpr; // ctor
+ friend class ObjCMessageExpr; // ctor
+ friend class ShuffleVectorExpr; // ctor
+ friend class ParenListExpr; // ctor
+ friend class CXXUnresolvedConstructExpr; // ctor
+ friend class CXXDependentScopeMemberExpr; // ctor
+ friend class OverloadExpr; // ctor
+ unsigned : NumStmtBits;
+
+ unsigned ValueKind : 2;
+ unsigned ObjectKind : 2;
+ unsigned TypeDependent : 1;
+ unsigned ValueDependent : 1;
+ unsigned ContainsUnexpandedParameterPack : 1;
+ };
+ enum { NumExprBits = 15 };
+
+ class CastExprBitfields {
+ friend class CastExpr;
+ unsigned : NumExprBits;
+
+ unsigned Kind : 6;
+ unsigned BasePathSize : 32 - 6 - NumExprBits;
+ };
+
+ class CallExprBitfields {
+ friend class CallExpr;
+ unsigned : NumExprBits;
+
+ unsigned NumPreArgs : 1;
+ };
+
+ union {
+ // FIXME: this is wasteful on 64-bit platforms.
+ void *Aligner;
+
+ StmtBitfields StmtBits;
+ CompoundStmtBitfields CompoundStmtBits;
+ ExprBitfields ExprBits;
+ CastExprBitfields CastExprBits;
+ CallExprBitfields CallExprBits;
+ };
+
+ friend class ASTStmtReader;
+
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
@@ -152,44 +216,27 @@ public:
protected:
/// \brief Construct an empty statement.
- explicit Stmt(StmtClass SC, EmptyShell) : sClass(SC), RefCount(1) {
+ explicit Stmt(StmtClass SC, EmptyShell) {
+ StmtBits.sClass = SC;
if (Stmt::CollectingStats()) Stmt::addStmtClass(SC);
}
public:
- Stmt(StmtClass SC) : sClass(SC), RefCount(1) {
+ Stmt(StmtClass SC) {
+ StmtBits.sClass = SC;
if (Stmt::CollectingStats()) Stmt::addStmtClass(SC);
}
- virtual ~Stmt() {}
-
-#ifndef NDEBUG
- /// \brief True if this statement's refcount is in a valid state.
- /// Should be used only in assertions.
- bool isRetained() const {
- return (RefCount >= 1);
- }
-#endif
-
- /// \brief Increases the reference count for this statement.
- ///
- /// Invoke the Retain() operation when this statement or expression
- /// is being shared by another owner.
- Stmt *Retain() {
- assert(RefCount >= 1);
- ++RefCount;
- return this;
- }
StmtClass getStmtClass() const {
- assert(RefCount >= 1 && "Referencing already-destroyed statement!");
- return (StmtClass)sClass;
+ return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
- virtual SourceRange getSourceRange() const = 0;
+ SourceRange getSourceRange() const;
+
SourceLocation getLocStart() const { return getSourceRange().getBegin(); }
SourceLocation getLocEnd() const { return getSourceRange().getEnd(); }
@@ -236,22 +283,25 @@ public:
/// within CFGs.
bool hasImplicitControlFlow() const;
- /// Child Iterators: All subclasses must implement child_begin and child_end
- /// to permit easy iteration over the substatements/subexpessions of an
- /// AST node. This permits easy iteration over all nodes in the AST.
+ /// Child Iterators: All subclasses must implement 'children'
+ /// to permit easy iteration over the substatements/subexpessions of an
+ /// AST node. This permits easy iteration over all nodes in the AST.
typedef StmtIterator child_iterator;
typedef ConstStmtIterator const_child_iterator;
- virtual child_iterator child_begin() = 0;
- virtual child_iterator child_end() = 0;
+ typedef StmtRange child_range;
+ typedef ConstStmtRange const_child_range;
- const_child_iterator child_begin() const {
- return const_child_iterator(const_cast<Stmt*>(this)->child_begin());
+ child_range children();
+ const_child_range children() const {
+ return const_cast<Stmt*>(this)->children();
}
- const_child_iterator child_end() const {
- return const_child_iterator(const_cast<Stmt*>(this)->child_end());
- }
+ child_iterator child_begin() { return children().first; }
+ child_iterator child_end() { return children().second; }
+
+ const_child_iterator child_begin() const { return children().first; }
+ const_child_iterator child_end() const { return children().second; }
/// \brief Produce a unique representation of the given statement.
///
@@ -265,7 +315,7 @@ public:
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
- void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context,
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical);
};
@@ -314,8 +364,10 @@ public:
static bool classof(const DeclStmt *) { return true; }
// Iterators over subexpressions.
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(child_iterator(DG.begin(), DG.end()),
+ child_iterator(DG.end(), DG.end()));
+ }
typedef DeclGroupRef::iterator decl_iterator;
typedef DeclGroupRef::const_iterator const_decl_iterator;
@@ -330,8 +382,16 @@ public:
///
class NullStmt : public Stmt {
SourceLocation SemiLoc;
+
+ /// \brief Whether the null statement was preceded by an empty macro, e.g:
+ /// @code
+ /// #define CALL(x)
+ /// CALL(0);
+ /// @endcode
+ bool LeadingEmptyMacro;
public:
- NullStmt(SourceLocation L) : Stmt(NullStmtClass), SemiLoc(L) {}
+ NullStmt(SourceLocation L, bool LeadingEmptyMacro = false)
+ : Stmt(NullStmtClass), SemiLoc(L), LeadingEmptyMacro(LeadingEmptyMacro) {}
/// \brief Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) { }
@@ -339,55 +399,66 @@ public:
SourceLocation getSemiLoc() const { return SemiLoc; }
void setSemiLoc(SourceLocation L) { SemiLoc = L; }
- virtual SourceRange getSourceRange() const { return SourceRange(SemiLoc); }
+ bool hasLeadingEmptyMacro() const { return LeadingEmptyMacro; }
+
+ SourceRange getSourceRange() const { return SourceRange(SemiLoc); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
static bool classof(const NullStmt *) { return true; }
- // Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
///
class CompoundStmt : public Stmt {
Stmt** Body;
- unsigned NumStmts;
SourceLocation LBracLoc, RBracLoc;
public:
- CompoundStmt(ASTContext& C, Stmt **StmtStart, unsigned numStmts,
- SourceLocation LB, SourceLocation RB)
- : Stmt(CompoundStmtClass), NumStmts(numStmts), LBracLoc(LB), RBracLoc(RB) {
+ CompoundStmt(ASTContext& C, Stmt **StmtStart, unsigned NumStmts,
+ SourceLocation LB, SourceLocation RB)
+ : Stmt(CompoundStmtClass), LBracLoc(LB), RBracLoc(RB) {
+ CompoundStmtBits.NumStmts = NumStmts;
+
if (NumStmts == 0) {
Body = 0;
return;
}
Body = new (C) Stmt*[NumStmts];
- memcpy(Body, StmtStart, numStmts * sizeof(*Body));
+ memcpy(Body, StmtStart, NumStmts * sizeof(*Body));
}
// \brief Build an empty compound statement.
explicit CompoundStmt(EmptyShell Empty)
- : Stmt(CompoundStmtClass, Empty), Body(0), NumStmts(0) { }
+ : Stmt(CompoundStmtClass, Empty), Body(0) {
+ CompoundStmtBits.NumStmts = 0;
+ }
void setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts);
- bool body_empty() const { return NumStmts == 0; }
- unsigned size() const { return NumStmts; }
+ bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
+ unsigned size() const { return CompoundStmtBits.NumStmts; }
typedef Stmt** body_iterator;
body_iterator body_begin() { return Body; }
- body_iterator body_end() { return Body + NumStmts; }
- Stmt *body_back() { return NumStmts ? Body[NumStmts-1] : 0; }
+ body_iterator body_end() { return Body + size(); }
+ Stmt *body_back() { return !body_empty() ? Body[size()-1] : 0; }
+
+ void setLastStmt(Stmt *S) {
+ assert(!body_empty() && "setLastStmt");
+ Body[size()-1] = S;
+ }
typedef Stmt* const * const_body_iterator;
const_body_iterator body_begin() const { return Body; }
- const_body_iterator body_end() const { return Body + NumStmts; }
- const Stmt *body_back() const { return NumStmts ? Body[NumStmts-1] : 0; }
+ const_body_iterator body_end() const { return Body + size(); }
+ const Stmt *body_back() const { return !body_empty() ? Body[size()-1] : 0; }
typedef std::reverse_iterator<body_iterator> reverse_body_iterator;
reverse_body_iterator body_rbegin() {
@@ -408,7 +479,7 @@ public:
return const_reverse_body_iterator(body_begin());
}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(LBracLoc, RBracLoc);
}
@@ -423,8 +494,9 @@ public:
static bool classof(const CompoundStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&Body[0], &Body[0]+CompoundStmtBits.NumStmts);
+ }
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
@@ -443,17 +515,15 @@ public:
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
- Stmt *getSubStmt() { return v_getSubStmt(); }
+ Stmt *getSubStmt();
- virtual SourceRange getSourceRange() const { return SourceRange(); }
+ SourceRange getSourceRange() const { return SourceRange(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
- T->getStmtClass() == DefaultStmtClass;
+ T->getStmtClass() == DefaultStmtClass;
}
static bool classof(const SwitchCase *) { return true; }
-protected:
- virtual Stmt* v_getSubStmt() = 0;
};
class CaseStmt : public SwitchCase {
@@ -463,8 +533,6 @@ class CaseStmt : public SwitchCase {
SourceLocation CaseLoc;
SourceLocation EllipsisLoc;
SourceLocation ColonLoc;
-
- virtual Stmt* v_getSubStmt() { return getSubStmt(); }
public:
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
@@ -504,7 +572,7 @@ public:
void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
@@ -518,15 +586,15 @@ public:
static bool classof(const CaseStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
+ }
};
class DefaultStmt : public SwitchCase {
Stmt* SubStmt;
SourceLocation DefaultLoc;
SourceLocation ColonLoc;
- virtual Stmt* v_getSubStmt() { return getSubStmt(); }
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) :
SwitchCase(DefaultStmtClass), SubStmt(substmt), DefaultLoc(DL),
@@ -544,7 +612,7 @@ public:
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(DefaultLoc, SubStmt->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -553,42 +621,43 @@ public:
static bool classof(const DefaultStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&SubStmt, &SubStmt+1); }
};
+
+/// LabelStmt - Represents a label, which has a substatement. For example:
+/// foo: return;
+///
class LabelStmt : public Stmt {
- IdentifierInfo *Label;
+ LabelDecl *TheDecl;
Stmt *SubStmt;
SourceLocation IdentLoc;
public:
- LabelStmt(SourceLocation IL, IdentifierInfo *label, Stmt *substmt)
- : Stmt(LabelStmtClass), Label(label),
- SubStmt(substmt), IdentLoc(IL) {}
+ LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
+ : Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt), IdentLoc(IL) {
+ }
// \brief Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { }
SourceLocation getIdentLoc() const { return IdentLoc; }
- IdentifierInfo *getID() const { return Label; }
- void setID(IdentifierInfo *II) { Label = II; }
+ LabelDecl *getDecl() const { return TheDecl; }
+ void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setIdentLoc(SourceLocation L) { IdentLoc = L; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(IdentLoc, SubStmt->getLocEnd());
}
+ child_range children() { return child_range(&SubStmt, &SubStmt+1); }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
static bool classof(const LabelStmt *) { return true; }
-
- // Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
};
@@ -635,22 +704,23 @@ public:
SourceLocation getElseLoc() const { return ElseLoc; }
void setElseLoc(SourceLocation L) { ElseLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
if (SubExprs[ELSE])
return SourceRange(IfLoc, SubExprs[ELSE]->getLocEnd());
else
return SourceRange(IfLoc, SubExprs[THEN]->getLocEnd());
}
+ // Iterators over subexpressions. The iterators will include iterating
+ // over the initialization expression referenced by the condition variable.
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
static bool classof(const IfStmt *) { return true; }
-
- // Iterators over subexpressions. The iterators will include iterating
- // over the initialization expression referenced by the condition variable.
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
};
/// SwitchStmt - This represents a 'switch' stmt.
@@ -662,6 +732,11 @@ class SwitchStmt : public Stmt {
SwitchCase *FirstCase;
SourceLocation SwitchLoc;
+ /// If the SwitchStmt is a switch on an enum value, this records whether
+ /// all the enum values were covered by CaseStmts. This value is meant to
+ /// be a hint for possible clients.
+ unsigned AllEnumCasesCovered : 1;
+
public:
SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond);
@@ -705,21 +780,34 @@ public:
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() && "case/default already added to a switch");
- SC->Retain();
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
- virtual SourceRange getSourceRange() const {
+
+ /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
+ /// switch over an enum value then all cases have been explicitly covered.
+ void setAllEnumCasesCovered() {
+ AllEnumCasesCovered = 1;
+ }
+
+ /// Returns true if the SwitchStmt is a switch of an enum value and all cases
+ /// have been explicitly covered.
+ bool isAllEnumCasesCovered() const {
+ return (bool) AllEnumCasesCovered;
+ }
+
+ SourceRange getSourceRange() const {
return SourceRange(SwitchLoc, SubExprs[BODY]->getLocEnd());
}
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
static bool classof(const SwitchStmt *) { return true; }
-
- // Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
};
@@ -757,7 +845,7 @@ public:
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(WhileLoc, SubExprs[BODY]->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -766,14 +854,15 @@ public:
static bool classof(const WhileStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
};
/// DoStmt - This represents a 'do/while' stmt.
///
class DoStmt : public Stmt {
- enum { COND, BODY, END_EXPR };
+ enum { BODY, COND, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation DoLoc;
SourceLocation WhileLoc;
@@ -805,7 +894,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(DoLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -814,8 +903,9 @@ public:
static bool classof(const DoStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
};
@@ -870,7 +960,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(ForLoc, SubExprs[BODY]->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -879,32 +969,33 @@ public:
static bool classof(const ForStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
};
/// GotoStmt - This represents a direct goto.
///
class GotoStmt : public Stmt {
- LabelStmt *Label;
+ LabelDecl *Label;
SourceLocation GotoLoc;
SourceLocation LabelLoc;
public:
- GotoStmt(LabelStmt *label, SourceLocation GL, SourceLocation LL)
+ GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {}
/// \brief Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { }
- LabelStmt *getLabel() const { return Label; }
- void setLabel(LabelStmt *S) { Label = S; }
+ LabelDecl *getLabel() const { return Label; }
+ void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(GotoLoc, LabelLoc);
}
static bool classof(const Stmt *T) {
@@ -913,8 +1004,7 @@ public:
static bool classof(const GotoStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// IndirectGotoStmt - This represents an indirect goto.
@@ -938,11 +1028,18 @@ public:
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
- Expr *getTarget();
- const Expr *getTarget() const;
+ Expr *getTarget() { return reinterpret_cast<Expr*>(Target); }
+ const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); }
- virtual SourceRange getSourceRange() const {
+ /// getConstantTarget - Returns the fixed target of this indirect
+ /// goto, if one exists.
+ LabelDecl *getConstantTarget();
+ const LabelDecl *getConstantTarget() const {
+ return const_cast<IndirectGotoStmt*>(this)->getConstantTarget();
+ }
+
+ SourceRange getSourceRange() const {
return SourceRange(GotoLoc, Target->getLocEnd());
}
@@ -952,8 +1049,7 @@ public:
static bool classof(const IndirectGotoStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Target, &Target+1); }
};
@@ -970,7 +1066,7 @@ public:
SourceLocation getContinueLoc() const { return ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueLoc = L; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(ContinueLoc);
}
@@ -980,8 +1076,7 @@ public:
static bool classof(const ContinueStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
/// BreakStmt - This represents a break.
@@ -997,7 +1092,7 @@ public:
SourceLocation getBreakLoc() const { return BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakLoc = L; }
- virtual SourceRange getSourceRange() const { return SourceRange(BreakLoc); }
+ SourceRange getSourceRange() const { return SourceRange(BreakLoc); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
@@ -1005,8 +1100,7 @@ public:
static bool classof(const BreakStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(); }
};
@@ -1050,7 +1144,7 @@ public:
const VarDecl *getNRVOCandidate() const { return NRVOCandidate; }
void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; }
- virtual SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
@@ -1058,8 +1152,10 @@ public:
static bool classof(const ReturnStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ if (RetExpr) return child_range(&RetExpr, &RetExpr+1);
+ return child_range();
+ }
};
/// AsmStmt - This represents a GNU inline-assembly statement extension.
@@ -1257,7 +1353,7 @@ public:
StringLiteral *getClobber(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobber(unsigned i) const { return Clobbers[i]; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(AsmLoc, RParenLoc);
}
@@ -1304,10 +1400,9 @@ public:
return &Exprs[0] + NumOutputs;
}
- // Child iterators
-
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
+ }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
index 0508f35..f08815f 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
@@ -37,7 +37,7 @@ public:
CXXCatchStmt(EmptyShell Empty)
: Stmt(CXXCatchStmtClass), ExceptionDecl(0), HandlerBlock(0) {}
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(CatchLoc, HandlerBlock->getLocEnd());
}
@@ -51,8 +51,7 @@ public:
}
static bool classof(const CXXCatchStmt *) { return true; }
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&HandlerBlock, &HandlerBlock+1); }
friend class ASTStmtReader;
};
@@ -84,7 +83,7 @@ public:
static CXXTryStmt *Create(ASTContext &C, EmptyShell Empty,
unsigned numHandlers);
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(getTryLoc(), getEndLoc());
}
@@ -113,8 +112,9 @@ public:
}
static bool classof(const CXXTryStmt *) { return true; }
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(getStmts(), getStmts() + getNumHandlers() + 1);
+ }
friend class ASTStmtReader;
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h b/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
index 4da2e34..851c001 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
@@ -1,4 +1,4 @@
-//===--- StmtIterator.h - Iterators for Statements ------------------------===//
+//===--- StmtIterator.h - Iterators for Statements --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
#ifndef LLVM_CLANG_AST_STMT_ITR_H
#define LLVM_CLANG_AST_STMT_ITR_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <cstddef>
#include <iterator>
@@ -51,11 +51,11 @@ protected:
return (RawVAPtr & Flags) == 0;
}
- VariableArrayType* getVAPtr() const {
- return reinterpret_cast<VariableArrayType*>(RawVAPtr & ~Flags);
+ const VariableArrayType *getVAPtr() const {
+ return reinterpret_cast<const VariableArrayType*>(RawVAPtr & ~Flags);
}
- void setVAPtr(VariableArrayType* P) {
+ void setVAPtr(const VariableArrayType *P) {
assert (inDecl() || inDeclGroup() || inSizeOfTypeVA());
RawVAPtr = reinterpret_cast<uintptr_t>(P) | (RawVAPtr & Flags);
}
@@ -68,7 +68,7 @@ protected:
StmtIteratorBase(Stmt **s) : stmt(s), decl(0), RawVAPtr(0) {}
StmtIteratorBase(Decl *d, Stmt **s);
- StmtIteratorBase(VariableArrayType *t);
+ StmtIteratorBase(const VariableArrayType *t);
StmtIteratorBase(Decl **dgi, Decl **dge);
StmtIteratorBase() : stmt(0), decl(0), RawVAPtr(0) {}
};
@@ -86,7 +86,7 @@ public:
StmtIteratorImpl(Stmt **s) : StmtIteratorBase(s) {}
StmtIteratorImpl(Decl **dgi, Decl **dge) : StmtIteratorBase(dgi, dge) {}
StmtIteratorImpl(Decl *d, Stmt **s) : StmtIteratorBase(d, s) {}
- StmtIteratorImpl(VariableArrayType* t) : StmtIteratorBase(t) {}
+ StmtIteratorImpl(const VariableArrayType *t) : StmtIteratorBase(t) {}
DERIVED& operator++() {
if (inDecl() || inDeclGroup()) {
@@ -130,7 +130,7 @@ struct StmtIterator : public StmtIteratorImpl<StmtIterator,Stmt*&> {
StmtIterator(Decl** dgi, Decl** dge)
: StmtIteratorImpl<StmtIterator,Stmt*&>(dgi, dge) {}
- StmtIterator(VariableArrayType* t)
+ StmtIterator(const VariableArrayType *t)
: StmtIteratorImpl<StmtIterator,Stmt*&>(t) {}
StmtIterator(Decl* D, Stmt **s = 0)
@@ -146,6 +146,86 @@ struct ConstStmtIterator : public StmtIteratorImpl<ConstStmtIterator,
StmtIteratorImpl<ConstStmtIterator,const Stmt*>(RHS) {}
};
+/// A range of statement iterators.
+///
+/// This class provides some extra functionality beyond std::pair
+/// in order to allow the following idiom:
+/// for (StmtRange range = stmt->children(); range; ++range)
+struct StmtRange : std::pair<StmtIterator,StmtIterator> {
+ StmtRange() {}
+ StmtRange(const StmtIterator &begin, const StmtIterator &end)
+ : std::pair<StmtIterator,StmtIterator>(begin, end) {}
+
+ bool empty() const { return first == second; }
+ operator bool() const { return !empty(); }
+
+ Stmt *operator->() const { return first.operator->(); }
+ Stmt *&operator*() const { return first.operator*(); }
+
+ StmtRange &operator++() {
+ assert(!empty() && "incrementing on empty range");
+ ++first;
+ return *this;
+ }
+
+ StmtRange operator++(int) {
+ assert(!empty() && "incrementing on empty range");
+ StmtRange copy = *this;
+ ++first;
+ return copy;
+ }
+
+ friend const StmtIterator &begin(const StmtRange &range) {
+ return range.first;
+ }
+ friend const StmtIterator &end(const StmtRange &range) {
+ return range.second;
+ }
+};
+
+/// A range of const statement iterators.
+///
+/// This class provides some extra functionality beyond std::pair
+/// in order to allow the following idiom:
+/// for (ConstStmtRange range = stmt->children(); range; ++range)
+struct ConstStmtRange : std::pair<ConstStmtIterator,ConstStmtIterator> {
+ ConstStmtRange() {}
+ ConstStmtRange(const ConstStmtIterator &begin,
+ const ConstStmtIterator &end)
+ : std::pair<ConstStmtIterator,ConstStmtIterator>(begin, end) {}
+ ConstStmtRange(const StmtRange &range)
+ : std::pair<ConstStmtIterator,ConstStmtIterator>(range.first, range.second)
+ {}
+ ConstStmtRange(const StmtIterator &begin, const StmtIterator &end)
+ : std::pair<ConstStmtIterator,ConstStmtIterator>(begin, end) {}
+
+ bool empty() const { return first == second; }
+ operator bool() const { return !empty(); }
+
+ const Stmt *operator->() const { return first.operator->(); }
+ const Stmt *operator*() const { return first.operator*(); }
+
+ ConstStmtRange &operator++() {
+ assert(!empty() && "incrementing on empty range");
+ ++first;
+ return *this;
+ }
+
+ ConstStmtRange operator++(int) {
+ assert(!empty() && "incrementing on empty range");
+ ConstStmtRange copy = *this;
+ ++first;
+ return copy;
+ }
+
+ friend const ConstStmtIterator &begin(const ConstStmtRange &range) {
+ return range.first;
+ }
+ friend const ConstStmtIterator &end(const ConstStmtRange &range) {
+ return range.second;
+ }
+};
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
index 269aa4c..1800a71 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
@@ -55,7 +55,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(ForLoc, SubExprs[BODY]->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -64,8 +64,9 @@ public:
static bool classof(const ObjCForCollectionStmt *) { return true; }
// Iterators
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
+ }
};
/// ObjCAtCatchStmt - This represents objective-c's @catch statement.
@@ -102,7 +103,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(AtCatchLoc, Body->getLocEnd());
}
@@ -113,8 +114,7 @@ public:
}
static bool classof(const ObjCAtCatchStmt *) { return true; }
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Body, &Body + 1); }
};
/// ObjCAtFinallyStmt - This represent objective-c's @finally Statement
@@ -133,7 +133,7 @@ public:
Stmt *getFinallyBody() { return AtFinallyStmt; }
void setFinallyBody(Stmt *S) { AtFinallyStmt = S; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(AtFinallyLoc, AtFinallyStmt->getLocEnd());
}
@@ -145,8 +145,9 @@ public:
}
static bool classof(const ObjCAtFinallyStmt *) { return true; }
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&AtFinallyStmt, &AtFinallyStmt+1);
+ }
};
/// ObjCAtTryStmt - This represent objective-c's over-all
@@ -239,15 +240,17 @@ public:
getStmts()[1 + NumCatchStmts] = S;
}
- virtual SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCAtTryStmtClass;
}
static bool classof(const ObjCAtTryStmt *) { return true; }
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(getStmts(),
+ getStmts() + 1 + NumCatchStmts + HasFinally);
+ }
};
/// ObjCAtSynchronizedStmt - This is for objective-c's @synchronized statement.
@@ -291,7 +294,7 @@ public:
}
void setSynchExpr(Stmt *S) { SubStmts[SYNC_EXPR] = S; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
return SourceRange(AtSynchronizedLoc, getSynchBody()->getLocEnd());
}
@@ -300,8 +303,9 @@ public:
}
static bool classof(const ObjCAtSynchronizedStmt *) { return true; }
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() {
+ return child_range(&SubStmts[0], &SubStmts[0]+END_EXPR);
+ }
};
/// ObjCAtThrowStmt - This represents objective-c's @throw statement.
@@ -323,7 +327,7 @@ public:
SourceLocation getThrowLoc() { return AtThrowLoc; }
void setThrowLoc(SourceLocation Loc) { AtThrowLoc = Loc; }
- virtual SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const {
if (Throw)
return SourceRange(AtThrowLoc, Throw->getLocEnd());
else
@@ -335,8 +339,7 @@ public:
}
static bool classof(const ObjCAtThrowStmt *) { return true; }
- virtual child_iterator child_begin();
- virtual child_iterator child_end();
+ child_range children() { return child_range(&Throw, &Throw+1); }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
index 7d5123f..a4e074e 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
@@ -23,6 +23,7 @@
namespace llvm {
class FoldingSetNodeID;
+ class raw_ostream;
}
namespace clang {
@@ -30,26 +31,14 @@ namespace clang {
class Decl;
class DiagnosticBuilder;
class Expr;
+struct PrintingPolicy;
class TypeSourceInfo;
/// \brief Represents a template argument within a class template
/// specialization.
class TemplateArgument {
- union {
- uintptr_t TypeOrValue;
- struct {
- char Value[sizeof(llvm::APSInt)];
- void *Type;
- } Integer;
- struct {
- TemplateArgument *Args;
- unsigned NumArgs;
- bool CopyArgs;
- } Args;
- };
-
public:
- /// \brief The type of template argument we're storing.
+ /// \brief The kind of template argument we're storing.
enum ArgKind {
/// \brief Represents an empty template argument, e.g., one that has not
/// been deduced.
@@ -66,16 +55,42 @@ public:
/// The template argument is a template name that was provided for a
/// template template parameter.
Template,
+ /// The template argument is a pack expansion of a template name that was
+ /// provided for a template template parameter.
+ TemplateExpansion,
/// The template argument is a value- or type-dependent expression
/// stored in an Expr*.
Expression,
/// The template argument is actually a parameter pack. Arguments are stored
/// in the Args struct.
Pack
- } Kind;
+ };
+
+private:
+ /// \brief The kind of template argument we're storing.
+ unsigned Kind;
+ union {
+ uintptr_t TypeOrValue;
+ struct {
+ char Value[sizeof(llvm::APSInt)];
+ void *Type;
+ } Integer;
+ struct {
+ const TemplateArgument *Args;
+ unsigned NumArgs;
+ } Args;
+ struct {
+ void *Name;
+ unsigned NumExpansions;
+ } TemplateArg;
+ };
+
+ TemplateArgument(TemplateName, bool); // DO NOT USE
+
+public:
/// \brief Construct an empty, invalid template argument.
- TemplateArgument() : TypeOrValue(0), Kind(Null) { }
+ TemplateArgument() : Kind(Null), TypeOrValue(0) { }
/// \brief Construct a template type argument.
TemplateArgument(QualType T) : Kind(Type) {
@@ -92,6 +107,8 @@ public:
/// \brief Construct an integral constant template argument.
TemplateArgument(const llvm::APSInt &Value, QualType Type) : Kind(Integral) {
+ // FIXME: Large integral values will get leaked. Do something
+ // similar to what we did with IntegerLiteral.
new (Integer.Value) llvm::APSInt(Value);
Integer.Type = Type.getAsOpaquePtr();
}
@@ -102,10 +119,35 @@ public:
/// parameters. However, the template name could be a dependent template
/// name that ends up being instantiated to a function template whose address
/// is taken.
- TemplateArgument(TemplateName Name) : Kind(Template) {
- TypeOrValue = reinterpret_cast<uintptr_t>(Name.getAsVoidPointer());
+ ///
+ /// \param Name The template name.
+ TemplateArgument(TemplateName Name) : Kind(Template)
+ {
+ TemplateArg.Name = Name.getAsVoidPointer();
+ TemplateArg.NumExpansions = 0;
}
-
+
+ /// \brief Construct a template argument that is a template pack expansion.
+ ///
+ /// This form of template argument is generally used for template template
+ /// parameters. However, the template name could be a dependent template
+ /// name that ends up being instantiated to a function template whose address
+ /// is taken.
+ ///
+ /// \param Name The template name.
+ ///
+ /// \param NumExpansions The number of expansions that will be generated by
+ /// instantiating
+ TemplateArgument(TemplateName Name, llvm::Optional<unsigned> NumExpansions)
+ : Kind(TemplateExpansion)
+ {
+ TemplateArg.Name = Name.getAsVoidPointer();
+ if (NumExpansions)
+ TemplateArg.NumExpansions = *NumExpansions + 1;
+ else
+ TemplateArg.NumExpansions = 0;
+ }
+
/// \brief Construct a template argument that is an expression.
///
/// This form of template argument only occurs in template argument
@@ -115,46 +157,59 @@ public:
TypeOrValue = reinterpret_cast<uintptr_t>(E);
}
+ /// \brief Construct a template argument that is a template argument pack.
+ ///
+ /// We assume that storage for the template arguments provided
+ /// outlives the TemplateArgument itself.
+ TemplateArgument(const TemplateArgument *Args, unsigned NumArgs) : Kind(Pack){
+ this->Args.Args = Args;
+ this->Args.NumArgs = NumArgs;
+ }
+
/// \brief Copy constructor for a template argument.
TemplateArgument(const TemplateArgument &Other) : Kind(Other.Kind) {
+ // FIXME: Large integral values will get leaked. Do something
+ // similar to what we did with IntegerLiteral.
if (Kind == Integral) {
new (Integer.Value) llvm::APSInt(*Other.getAsIntegral());
Integer.Type = Other.Integer.Type;
} else if (Kind == Pack) {
Args.NumArgs = Other.Args.NumArgs;
- Args.Args = new TemplateArgument[Args.NumArgs];
- for (unsigned I = 0; I != Args.NumArgs; ++I)
- Args.Args[I] = Other.Args.Args[I];
- }
- else
+ Args.Args = Other.Args.Args;
+ } else if (Kind == Template || Kind == TemplateExpansion) {
+ TemplateArg.Name = Other.TemplateArg.Name;
+ TemplateArg.NumExpansions = Other.TemplateArg.NumExpansions;
+ } else
TypeOrValue = Other.TypeOrValue;
}
TemplateArgument& operator=(const TemplateArgument& Other) {
- // FIXME: Does not provide the strong guarantee for exception
- // safety.
using llvm::APSInt;
- // FIXME: Handle Packs
- assert(Kind != Pack && "FIXME: Handle packs");
- assert(Other.Kind != Pack && "FIXME: Handle packs");
-
if (Kind == Other.Kind && Kind == Integral) {
// Copy integral values.
*this->getAsIntegral() = *Other.getAsIntegral();
Integer.Type = Other.Integer.Type;
- } else {
- // Destroy the current integral value, if that's what we're holding.
- if (Kind == Integral)
- getAsIntegral()->~APSInt();
+ return *this;
+ }
+
+ // Destroy the current integral value, if that's what we're holding.
+ if (Kind == Integral)
+ getAsIntegral()->~APSInt();
- Kind = Other.Kind;
+ Kind = Other.Kind;
- if (Other.Kind == Integral) {
- new (Integer.Value) llvm::APSInt(*Other.getAsIntegral());
- Integer.Type = Other.Integer.Type;
- } else
- TypeOrValue = Other.TypeOrValue;
+ if (Other.Kind == Integral) {
+ new (Integer.Value) llvm::APSInt(*Other.getAsIntegral());
+ Integer.Type = Other.Integer.Type;
+ } else if (Other.Kind == Pack) {
+ Args.NumArgs = Other.Args.NumArgs;
+ Args.Args = Other.Args.Args;
+ } else if (Kind == Template || Kind == TemplateExpansion) {
+ TemplateArg.Name = Other.TemplateArg.Name;
+ TemplateArg.NumExpansions = Other.TemplateArg.NumExpansions;
+ } else {
+ TypeOrValue = Other.TypeOrValue;
}
return *this;
@@ -165,16 +220,31 @@ public:
if (Kind == Integral)
getAsIntegral()->~APSInt();
- else if (Kind == Pack && Args.CopyArgs)
- delete[] Args.Args;
}
+ /// \brief Create a new template argument pack by copying the given set of
+ /// template arguments.
+ static TemplateArgument CreatePackCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs);
+
/// \brief Return the kind of stored template argument.
- ArgKind getKind() const { return Kind; }
+ ArgKind getKind() const { return (ArgKind)Kind; }
/// \brief Determine whether this template argument has no value.
bool isNull() const { return Kind == Null; }
+ /// \brief Whether this template argument is dependent on a template
+ /// parameter.
+ bool isDependent() const;
+
+ /// \brief Whether this template argument contains an unexpanded
+ /// parameter pack.
+ bool containsUnexpandedParameterPack() const;
+
+ /// \brief Determine whether this template argument is a pack expansion.
+ bool isPackExpansion() const;
+
/// \brief Retrieve the template argument as a type.
QualType getAsType() const {
if (Kind != Type)
@@ -195,9 +265,21 @@ public:
if (Kind != Template)
return TemplateName();
- return TemplateName::getFromVoidPointer(
- reinterpret_cast<void *> (TypeOrValue));
+ return TemplateName::getFromVoidPointer(TemplateArg.Name);
+ }
+
+ /// \brief Retrieve the template argument as a template name; if the argument
+ /// is a pack expansion, return the pattern as a template name.
+ TemplateName getAsTemplateOrTemplatePattern() const {
+ if (Kind != Template && Kind != TemplateExpansion)
+ return TemplateName();
+
+ return TemplateName::getFromVoidPointer(TemplateArg.Name);
}
+
+ /// \brief Retrieve the number of expansions that a template template argument
+ /// expansion will produce, if known.
+ llvm::Optional<unsigned> getNumTemplateExpansions() const;
/// \brief Retrieve the template argument as an integral value.
llvm::APSInt *getAsIntegral() {
@@ -260,11 +342,17 @@ public:
/// same.
bool structurallyEquals(const TemplateArgument &Other) const;
- /// \brief Construct a template argument pack.
- void setArgumentPack(TemplateArgument *Args, unsigned NumArgs, bool CopyArgs);
+ /// \brief When the template argument is a pack expansion, returns
+ /// the pattern of the pack expansion.
+ ///
+ /// \param Ellipsis Will be set to the location of the ellipsis.
+ TemplateArgument getPackExpansionPattern() const;
+ /// \brief Print this template argument to the given output stream.
+ void print(const PrintingPolicy &Policy, llvm::raw_ostream &Out) const;
+
/// \brief Used to insert TemplateArguments into FoldingSets.
- void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context) const;
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) const;
};
/// Location information for a TemplateArgument.
@@ -276,95 +364,48 @@ private:
struct {
unsigned QualifierRange[2];
unsigned TemplateNameLoc;
+ unsigned EllipsisLoc;
} Template;
};
-#ifndef NDEBUG
- enum Kind {
- K_None,
- K_TypeSourceInfo,
- K_Expression,
- K_Template
- } Kind;
-#endif
-
public:
- TemplateArgumentLocInfo()
- : Expression(0)
-#ifndef NDEBUG
- , Kind(K_None)
-#endif
- {}
+ TemplateArgumentLocInfo();
- TemplateArgumentLocInfo(TypeSourceInfo *TInfo)
- : Declarator(TInfo)
-#ifndef NDEBUG
- , Kind(K_TypeSourceInfo)
-#endif
- {}
+ TemplateArgumentLocInfo(TypeSourceInfo *TInfo) : Declarator(TInfo) {}
- TemplateArgumentLocInfo(Expr *E)
- : Expression(E)
-#ifndef NDEBUG
- , Kind(K_Expression)
-#endif
- {}
+ TemplateArgumentLocInfo(Expr *E) : Expression(E) {}
TemplateArgumentLocInfo(SourceRange QualifierRange,
- SourceLocation TemplateNameLoc)
-#ifndef NDEBUG
- : Kind(K_Template)
-#endif
+ SourceLocation TemplateNameLoc,
+ SourceLocation EllipsisLoc)
{
Template.QualifierRange[0] = QualifierRange.getBegin().getRawEncoding();
Template.QualifierRange[1] = QualifierRange.getEnd().getRawEncoding();
Template.TemplateNameLoc = TemplateNameLoc.getRawEncoding();
+ Template.EllipsisLoc = EllipsisLoc.getRawEncoding();
}
TypeSourceInfo *getAsTypeSourceInfo() const {
- assert(Kind == K_TypeSourceInfo);
return Declarator;
}
Expr *getAsExpr() const {
- assert(Kind == K_Expression);
return Expression;
}
SourceRange getTemplateQualifierRange() const {
- assert(Kind == K_Template);
return SourceRange(
SourceLocation::getFromRawEncoding(Template.QualifierRange[0]),
SourceLocation::getFromRawEncoding(Template.QualifierRange[1]));
}
SourceLocation getTemplateNameLoc() const {
- assert(Kind == K_Template);
return SourceLocation::getFromRawEncoding(Template.TemplateNameLoc);
}
-#ifndef NDEBUG
- void validateForArgument(const TemplateArgument &Arg) {
- switch (Arg.getKind()) {
- case TemplateArgument::Type:
- assert(Kind == K_TypeSourceInfo);
- break;
- case TemplateArgument::Expression:
- case TemplateArgument::Declaration:
- assert(Kind == K_Expression);
- break;
- case TemplateArgument::Template:
- assert(Kind == K_Template);
- break;
- case TemplateArgument::Integral:
- case TemplateArgument::Pack:
- assert(Kind == K_None);
- break;
- case TemplateArgument::Null:
- llvm_unreachable("source info for null template argument?");
- }
+ SourceLocation getTemplateEllipsisLoc() const {
+ return SourceLocation::getFromRawEncoding(Template.EllipsisLoc);
}
-#endif
};
/// Location wrapper for a TemplateArgument. TemplateArgument is to
@@ -393,14 +434,18 @@ public:
TemplateArgumentLoc(const TemplateArgument &Argument,
SourceRange QualifierRange,
- SourceLocation TemplateNameLoc)
- : Argument(Argument), LocInfo(QualifierRange, TemplateNameLoc) {
- assert(Argument.getKind() == TemplateArgument::Template);
+ SourceLocation TemplateNameLoc,
+ SourceLocation EllipsisLoc = SourceLocation())
+ : Argument(Argument),
+ LocInfo(QualifierRange, TemplateNameLoc, EllipsisLoc) {
+ assert(Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion);
}
/// \brief - Fetches the primary location of the argument.
SourceLocation getLocation() const {
- if (Argument.getKind() == TemplateArgument::Template)
+ if (Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion)
return getTemplateNameLoc();
return getSourceRange().getBegin();
@@ -433,14 +478,32 @@ public:
}
SourceRange getTemplateQualifierRange() const {
- assert(Argument.getKind() == TemplateArgument::Template);
+ assert(Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion);
return LocInfo.getTemplateQualifierRange();
}
SourceLocation getTemplateNameLoc() const {
- assert(Argument.getKind() == TemplateArgument::Template);
+ assert(Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion);
return LocInfo.getTemplateNameLoc();
}
+
+ SourceLocation getTemplateEllipsisLoc() const {
+ assert(Argument.getKind() == TemplateArgument::TemplateExpansion);
+ return LocInfo.getTemplateEllipsisLoc();
+ }
+
+ /// \brief When the template argument is a pack expansion, returns
+ /// the pattern of the pack expansion.
+ ///
+ /// \param Ellipsis Will be set to the location of the ellipsis.
+ ///
+ /// \param NumExpansions Will be set to the number of expansions that will
+ /// be generated from this pack expansion, if known a priori.
+ TemplateArgumentLoc getPackExpansionPattern(SourceLocation &Ellipsis,
+ llvm::Optional<unsigned> &NumExpansions,
+ ASTContext &Context) const;
};
/// A convenient class for passing around template argument
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h b/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h
index ddfac71..1721973 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h
@@ -23,43 +23,119 @@ namespace llvm {
}
namespace clang {
-
+
+class ASTContext;
class DependentTemplateName;
class DiagnosticBuilder;
class IdentifierInfo;
class NestedNameSpecifier;
+class OverloadedTemplateStorage;
struct PrintingPolicy;
class QualifiedTemplateName;
class NamedDecl;
+class SubstTemplateTemplateParmPackStorage;
+class TemplateArgument;
class TemplateDecl;
-
-/// \brief A structure for storing the information associated with an
-/// overloaded template name.
-class OverloadedTemplateStorage {
+class TemplateTemplateParmDecl;
+
+/// \brief Implementation class used to describe either a set of overloaded
+/// template names or an already-substituted template template parameter pack.
+class UncommonTemplateNameStorage {
+protected:
union {
- unsigned Size;
- NamedDecl *Storage[1];
+ struct {
+ /// \brief If true, this is an OverloadedTemplateStorage instance;
+ /// otherwise, it's a SubstTemplateTemplateParmPackStorage instance.
+ unsigned IsOverloadedStorage : 1;
+
+ /// \brief The number of stored templates or template arguments,
+ /// depending on which subclass we have.
+ unsigned Size : 31;
+ } Bits;
+
+ void *PointerAlignment;
};
-
+
+ UncommonTemplateNameStorage(unsigned Size, bool OverloadedStorage) {
+ Bits.IsOverloadedStorage = OverloadedStorage;
+ Bits.Size = Size;
+ }
+
+public:
+ unsigned size() const { return Bits.Size; }
+
+ OverloadedTemplateStorage *getAsOverloadedStorage() {
+ return Bits.IsOverloadedStorage
+ ? reinterpret_cast<OverloadedTemplateStorage *>(this)
+ : 0;
+ }
+
+ SubstTemplateTemplateParmPackStorage *getAsSubstTemplateTemplateParmPack() {
+ return Bits.IsOverloadedStorage
+ ? 0
+ : reinterpret_cast<SubstTemplateTemplateParmPackStorage *>(this) ;
+ }
+};
+
+/// \brief A structure for storing the information associated with an
+/// overloaded template name.
+class OverloadedTemplateStorage : public UncommonTemplateNameStorage {
friend class ASTContext;
- OverloadedTemplateStorage(unsigned Size) : Size(Size) {}
+ OverloadedTemplateStorage(unsigned Size)
+ : UncommonTemplateNameStorage(Size, true) { }
NamedDecl **getStorage() {
- return &Storage[1];
+ return reinterpret_cast<NamedDecl **>(this + 1);
}
NamedDecl * const *getStorage() const {
- return &Storage[1];
+ return reinterpret_cast<NamedDecl *const *>(this + 1);
}
public:
typedef NamedDecl *const *iterator;
- unsigned size() const { return Size; }
-
iterator begin() const { return getStorage(); }
iterator end() const { return getStorage() + size(); }
};
+
+
+/// \brief A structure for storing an already-substituted template template
+/// parameter pack.
+///
+/// This kind of template names occurs when the parameter pack has been
+/// provided with a template template argument pack in a context where its
+/// enclosing pack expansion could not be fully expanded.
+class SubstTemplateTemplateParmPackStorage
+ : public UncommonTemplateNameStorage, public llvm::FoldingSetNode
+{
+ ASTContext &Context;
+ TemplateTemplateParmDecl *Parameter;
+ const TemplateArgument *Arguments;
+
+public:
+ SubstTemplateTemplateParmPackStorage(ASTContext &Context,
+ TemplateTemplateParmDecl *Parameter,
+ unsigned Size,
+ const TemplateArgument *Arguments)
+ : UncommonTemplateNameStorage(Size, false), Context(Context),
+ Parameter(Parameter), Arguments(Arguments) { }
+
+ /// \brief Retrieve the template template parameter pack being substituted.
+ TemplateTemplateParmDecl *getParameterPack() const {
+ return Parameter;
+ }
+
+ /// \brief Retrieve the template template argument pack with which this
+ /// parameter was substituted.
+ TemplateArgument getArgumentPack() const;
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context,
+ TemplateTemplateParmDecl *Parameter,
+ const TemplateArgument &ArgPack);
+};
/// \brief Represents a C++ template name within the type system.
///
@@ -90,7 +166,7 @@ public:
/// only be understood in the context of
class TemplateName {
typedef llvm::PointerUnion4<TemplateDecl *,
- OverloadedTemplateStorage *,
+ UncommonTemplateNameStorage *,
QualifiedTemplateName *,
DependentTemplateName *> StorageType;
@@ -103,16 +179,28 @@ class TemplateName {
public:
// \brief Kind of name that is actually stored.
enum NameKind {
+ /// \brief A single template declaration.
Template,
+ /// \brief A set of overloaded template declarations.
OverloadedTemplate,
+ /// \brief A qualified template name, where the qualification is kept
+ /// to describe the source code as written.
QualifiedTemplate,
- DependentTemplate
+ /// \brief A dependent template name that has not been resolved to a
+ /// template (or set of templates).
+ DependentTemplate,
+ /// \brief A template template parameter pack that has been substituted for
+ /// a template template argument pack, but has not yet been expanded into
+ /// individual arguments.
+ SubstTemplateTemplateParmPack
};
TemplateName() : Storage() { }
explicit TemplateName(TemplateDecl *Template) : Storage(Template) { }
explicit TemplateName(OverloadedTemplateStorage *Storage)
: Storage(Storage) { }
+ explicit TemplateName(SubstTemplateTemplateParmPackStorage *Storage)
+ : Storage(Storage) { }
explicit TemplateName(QualifiedTemplateName *Qual) : Storage(Qual) { }
explicit TemplateName(DependentTemplateName *Dep) : Storage(Dep) { }
@@ -122,7 +210,7 @@ public:
// \brief Get the kind of name that is actually stored.
NameKind getKind() const;
- /// \brief Retrieve the the underlying template declaration that
+ /// \brief Retrieve the underlying template declaration that
/// this template name refers to, if known.
///
/// \returns The template declaration that this template name refers
@@ -131,7 +219,7 @@ public:
/// set of function templates, returns NULL.
TemplateDecl *getAsTemplateDecl() const;
- /// \brief Retrieve the the underlying, overloaded function template
+ /// \brief Retrieve the underlying, overloaded function template
// declarations that this template name refers to, if known.
///
/// \returns The set of overloaded function templates that this template
@@ -139,7 +227,25 @@ public:
/// specific set of function templates because it is a dependent name or
/// refers to a single template, returns NULL.
OverloadedTemplateStorage *getAsOverloadedTemplate() const {
- return Storage.dyn_cast<OverloadedTemplateStorage *>();
+ if (UncommonTemplateNameStorage *Uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return Uncommon->getAsOverloadedStorage();
+
+ return 0;
+ }
+
+ /// \brief Retrieve the substituted template template parameter pack, if
+ /// known.
+ ///
+ /// \returns The storage for the substituted template template parameter pack,
+ /// if known. Otherwise, returns NULL.
+ SubstTemplateTemplateParmPackStorage *
+ getAsSubstTemplateTemplateParmPack() const {
+ if (UncommonTemplateNameStorage *Uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return Uncommon->getAsSubstTemplateTemplateParmPack();
+
+ return 0;
}
/// \brief Retrieve the underlying qualified template name
@@ -157,6 +263,10 @@ public:
/// \brief Determines whether this is a dependent template name.
bool isDependent() const;
+ /// \brief Determines whether this template name contains an
+ /// unexpanded parameter pack (for C++0x variadic templates).
+ bool containsUnexpandedParameterPack() const;
+
/// \brief Print the template name.
///
/// \param OS the output stream to which the template name will be
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Type.h b/contrib/llvm/tools/clang/include/clang/AST/Type.h
index 92e62a5..9b177cc 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Type.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Type.h
@@ -18,12 +18,14 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/Linkage.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/Visibility.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateName.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/type_traits.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
@@ -34,7 +36,7 @@ using llvm::dyn_cast;
using llvm::dyn_cast_or_null;
namespace clang {
enum {
- TypeAlignmentInBits = 3,
+ TypeAlignmentInBits = 4,
TypeAlignment = 1 << TypeAlignmentInBits
};
class Type;
@@ -91,8 +93,9 @@ namespace clang {
class TemplateArgument;
class TemplateArgumentLoc;
class TemplateArgumentListInfo;
- class Type;
class ElaboratedType;
+ class ExtQuals;
+ class ExtQualsTypeCommonBase;
struct PrintingPolicy;
template <typename> class CanQual;
@@ -128,7 +131,7 @@ public:
MaxAddressSpace = 0xffffffu,
/// The width of the "fast" qualifier mask.
- FastWidth = 2,
+ FastWidth = 3,
/// The fast qualifier mask.
FastMask = (1 << FastWidth) - 1
@@ -271,6 +274,25 @@ public:
}
}
+ /// \brief Add the qualifiers from the given set to this set, given that
+ /// they don't conflict.
+ void addConsistentQualifiers(Qualifiers qs) {
+ assert(getAddressSpace() == qs.getAddressSpace() ||
+ !hasAddressSpace() || !qs.hasAddressSpace());
+ assert(getObjCGCAttr() == qs.getObjCGCAttr() ||
+ !hasObjCGCAttr() || !qs.hasObjCGCAttr());
+ Mask |= qs.Mask;
+ }
+
+ /// \brief Determines if these qualifiers compatibly include another set.
+ /// Generally this answers the question of whether an object with the other
+ /// qualifiers can be safely used as an object with these qualifiers.
+ bool compatiblyIncludes(Qualifiers other) const {
+ // Non-CVR qualifiers must match exactly. CVR qualifiers may subset.
+ return ((Mask & ~CVRMask) == (other.Mask & ~CVRMask)) &&
+ (((Mask & CVRMask) | (other.Mask & CVRMask)) == (Mask & CVRMask));
+ }
+
bool isSupersetOf(Qualifiers Other) const;
bool operator==(Qualifiers Other) const { return Mask == Other.Mask; }
@@ -325,85 +347,6 @@ private:
static const uint32_t AddressSpaceShift = 5;
};
-
-/// ExtQuals - We can encode up to three bits in the low bits of a
-/// type pointer, but there are many more type qualifiers that we want
-/// to be able to apply to an arbitrary type. Therefore we have this
-/// struct, intended to be heap-allocated and used by QualType to
-/// store qualifiers.
-///
-/// The current design tags the 'const' and 'restrict' qualifiers in
-/// two low bits on the QualType pointer; a third bit records whether
-/// the pointer is an ExtQuals node. 'const' was chosen because it is
-/// orders of magnitude more common than the other two qualifiers, in
-/// both library and user code. It's relatively rare to see
-/// 'restrict' in user code, but many standard C headers are saturated
-/// with 'restrict' declarations, so that representing them efficiently
-/// is a critical goal of this representation.
-class ExtQuals : public llvm::FoldingSetNode {
- // NOTE: changing the fast qualifiers should be straightforward as
- // long as you don't make 'const' non-fast.
- // 1. Qualifiers:
- // a) Modify the bitmasks (Qualifiers::TQ and DeclSpec::TQ).
- // Fast qualifiers must occupy the low-order bits.
- // b) Update Qualifiers::FastWidth and FastMask.
- // 2. QualType:
- // a) Update is{Volatile,Restrict}Qualified(), defined inline.
- // b) Update remove{Volatile,Restrict}, defined near the end of
- // this header.
- // 3. ASTContext:
- // a) Update get{Volatile,Restrict}Type.
-
- /// Context - the context to which this set belongs. We save this
- /// here so that QualifierCollector can use it to reapply extended
- /// qualifiers to an arbitrary type without requiring a context to
- /// be pushed through every single API dealing with qualifiers.
- ASTContext& Context;
-
- /// BaseType - the underlying type that this qualifies
- const Type *BaseType;
-
- /// Quals - the immutable set of qualifiers applied by this
- /// node; always contains extended qualifiers.
- Qualifiers Quals;
-
-public:
- ExtQuals(ASTContext& Context, const Type *Base, Qualifiers Quals)
- : Context(Context), BaseType(Base), Quals(Quals)
- {
- assert(Quals.hasNonFastQualifiers()
- && "ExtQuals created with no fast qualifiers");
- assert(!Quals.hasFastQualifiers()
- && "ExtQuals created with fast qualifiers");
- }
-
- Qualifiers getQualifiers() const { return Quals; }
-
- bool hasVolatile() const { return Quals.hasVolatile(); }
-
- bool hasObjCGCAttr() const { return Quals.hasObjCGCAttr(); }
- Qualifiers::GC getObjCGCAttr() const { return Quals.getObjCGCAttr(); }
-
- bool hasAddressSpace() const { return Quals.hasAddressSpace(); }
- unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
-
- const Type *getBaseType() const { return BaseType; }
-
- ASTContext &getContext() const { return Context; }
-
-public:
- void Profile(llvm::FoldingSetNodeID &ID) const {
- Profile(ID, getBaseType(), Quals);
- }
- static void Profile(llvm::FoldingSetNodeID &ID,
- const Type *BaseType,
- Qualifiers Quals) {
- assert(!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!");
- ID.AddPointer(BaseType);
- Quals.Profile(ID);
- }
-};
-
/// CallingConv - Specifies the calling convention that a function uses.
enum CallingConv {
CC_Default,
@@ -414,6 +357,7 @@ enum CallingConv {
CC_X86Pascal // __attribute__((pascal))
};
+typedef std::pair<const Type*, Qualifiers> SplitQualType;
/// QualType - For efficiency, we don't store CV-qualified types as nodes on
/// their own: instead each reference to a type stores the qualifiers. This
@@ -440,8 +384,14 @@ class QualType {
return Value.getPointer().get<const Type*>();
}
- QualType getUnqualifiedTypeSlow() const;
-
+ const ExtQualsTypeCommonBase *getCommonPtr() const {
+ assert(!isNull() && "Cannot retrieve a NULL type pointer");
+ uintptr_t CommonPtrVal
+ = reinterpret_cast<uintptr_t>(Value.getOpaqueValue());
+ CommonPtrVal &= ~(uintptr_t)((1 << TypeAlignmentInBits) - 1);
+ return reinterpret_cast<ExtQualsTypeCommonBase*>(CommonPtrVal);
+ }
+
friend class QualifierCollector;
public:
QualType() {}
@@ -457,24 +407,29 @@ public:
/// Retrieves a pointer to the underlying (unqualified) type.
/// This should really return a const Type, but it's not worth
/// changing all the users right now.
- Type *getTypePtr() const {
- if (hasLocalNonFastQualifiers())
- return const_cast<Type*>(getExtQualsUnsafe()->getBaseType());
- return const_cast<Type*>(getTypePtrUnsafe());
- }
+ ///
+ /// This function requires that the type not be NULL. If the type might be
+ /// NULL, use the (slightly less efficient) \c getTypePtrOrNull().
+ const Type *getTypePtr() const;
+
+ const Type *getTypePtrOrNull() const;
+
+ /// Divides a QualType into its unqualified type and a set of local
+ /// qualifiers.
+ SplitQualType split() const;
void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
- static QualType getFromOpaquePtr(void *Ptr) {
+ static QualType getFromOpaquePtr(const void *Ptr) {
QualType T;
- T.Value.setFromOpaqueValue(Ptr);
+ T.Value.setFromOpaqueValue(const_cast<void*>(Ptr));
return T;
}
- Type &operator*() const {
+ const Type &operator*() const {
return *getTypePtr();
}
- Type *operator->() const {
+ const Type *operator->() const {
return getTypePtr();
}
@@ -510,7 +465,7 @@ public:
/// "volatile" qualifier set, without looking through typedefs that may have
/// added "volatile" at a different level.
bool isLocalVolatileQualified() const {
- return (hasLocalNonFastQualifiers() && getExtQualsUnsafe()->hasVolatile());
+ return (getLocalFastQualifiers() & Qualifiers::Volatile);
}
/// \brief Determine whether this type is volatile-qualified.
@@ -536,13 +491,7 @@ public:
/// \brief Retrieve the set of qualifiers local to this particular QualType
/// instance, not including any qualifiers acquired through typedefs or
/// other sugar.
- Qualifiers getLocalQualifiers() const {
- Qualifiers Quals;
- if (hasLocalNonFastQualifiers())
- Quals = getExtQualsUnsafe()->getQualifiers();
- Quals.addFastQualifiers(getLocalFastQualifiers());
- return Quals;
- }
+ Qualifiers getLocalQualifiers() const;
/// \brief Retrieve the set of qualifiers applied to this type.
Qualifiers getQualifiers() const;
@@ -551,21 +500,13 @@ public:
/// local to this particular QualType instance, not including any qualifiers
/// acquired through typedefs or other sugar.
unsigned getLocalCVRQualifiers() const {
- unsigned CVR = getLocalFastQualifiers();
- if (isLocalVolatileQualified())
- CVR |= Qualifiers::Volatile;
- return CVR;
+ return getLocalFastQualifiers();
}
/// \brief Retrieve the set of CVR (const-volatile-restrict) qualifiers
/// applied to this type.
unsigned getCVRQualifiers() const;
- /// \brief Retrieve the set of CVR (const-volatile-restrict) qualifiers
- /// applied to this type, looking through any number of unqualified array
- /// types to their element types' qualifiers.
- unsigned getCVRQualifiersThroughArrayTypes() const;
-
bool isConstant(ASTContext& Ctx) const {
return QualType::isConstant(*this, Ctx);
}
@@ -587,16 +528,13 @@ public:
Value.setInt(Value.getInt() | TQs);
}
- // FIXME: The remove* functions are semantically broken, because they might
- // not remove a qualifier stored on a typedef. Most of the with* functions
- // have the same problem.
- void removeConst();
- void removeVolatile();
- void removeRestrict();
- void removeCVRQualifiers(unsigned Mask);
+ void removeLocalConst();
+ void removeLocalVolatile();
+ void removeLocalRestrict();
+ void removeLocalCVRQualifiers(unsigned Mask);
- void removeFastQualifiers() { Value.setInt(0); }
- void removeFastQualifiers(unsigned Mask) {
+ void removeLocalFastQualifiers() { Value.setInt(0); }
+ void removeLocalFastQualifiers(unsigned Mask) {
assert(!(Mask & ~Qualifiers::FastMask) && "mask has non-fast qualifiers");
Value.setInt(Value.getInt() & ~Mask);
}
@@ -611,31 +549,54 @@ public:
// Creates a type with exactly the given fast qualifiers, removing
// any existing fast qualifiers.
- QualType withExactFastQualifiers(unsigned TQs) const {
- return withoutFastQualifiers().withFastQualifiers(TQs);
+ QualType withExactLocalFastQualifiers(unsigned TQs) const {
+ return withoutLocalFastQualifiers().withFastQualifiers(TQs);
}
// Removes fast qualifiers, but leaves any extended qualifiers in place.
- QualType withoutFastQualifiers() const {
+ QualType withoutLocalFastQualifiers() const {
QualType T = *this;
- T.removeFastQualifiers();
+ T.removeLocalFastQualifiers();
return T;
}
+ QualType getCanonicalType() const;
+
/// \brief Return this type with all of the instance-specific qualifiers
/// removed, but without removing any qualifiers that may have been applied
/// through typedefs.
QualType getLocalUnqualifiedType() const { return QualType(getTypePtr(), 0); }
- /// \brief Return the unqualified form of the given type, which might be
- /// desugared to eliminate qualifiers introduced via typedefs.
- QualType getUnqualifiedType() const {
- QualType T = getLocalUnqualifiedType();
- if (!T.hasQualifiers())
- return T;
-
- return getUnqualifiedTypeSlow();
- }
+ /// \brief Retrieve the unqualified variant of the given type,
+ /// removing as little sugar as possible.
+ ///
+ /// This routine looks through various kinds of sugar to find the
+ /// least-desugared type that is unqualified. For example, given:
+ ///
+ /// \code
+ /// typedef int Integer;
+ /// typedef const Integer CInteger;
+ /// typedef CInteger DifferenceType;
+ /// \endcode
+ ///
+ /// Executing \c getUnqualifiedType() on the type \c DifferenceType will
+ /// desugar until we hit the type \c Integer, which has no qualifiers on it.
+ ///
+ /// The resulting type might still be qualified if it's an array
+ /// type. To strip qualifiers even from within an array type, use
+ /// ASTContext::getUnqualifiedArrayType.
+ inline QualType getUnqualifiedType() const;
+
+ /// getSplitUnqualifiedType - Retrieve the unqualified variant of the
+ /// given type, removing as little sugar as possible.
+ ///
+ /// Like getUnqualifiedType(), but also returns the set of
+ /// qualifiers that were built up.
+ ///
+ /// The resulting type might still be qualified if it's an array
+ /// type. To strip qualifiers even from within an array type, use
+ /// ASTContext::getUnqualifiedArrayType.
+ inline SplitQualType getSplitUnqualifiedType() const;
bool isMoreQualifiedThan(QualType Other) const;
bool isAtLeastAsQualifiedAs(QualType Other) const;
@@ -659,8 +620,20 @@ public:
/// concrete.
///
/// Qualifiers are left in place.
- QualType getDesugaredType() const {
- return QualType::getDesugaredType(*this);
+ QualType getDesugaredType(const ASTContext &Context) const {
+ return getDesugaredType(*this, Context);
+ }
+
+ SplitQualType getSplitDesugaredType() const {
+ return getSplitDesugaredType(*this);
+ }
+
+ /// IgnoreParens - Returns the specified type after dropping any
+ /// outer-level parentheses.
+ QualType IgnoreParens() const {
+ if (isa<ParenType>(*this))
+ return QualType::IgnoreParens(*this);
+ return *this;
}
/// operator==/!= - Indicate whether the specified types and qualifiers are
@@ -671,7 +644,13 @@ public:
friend bool operator!=(const QualType &LHS, const QualType &RHS) {
return LHS.Value != RHS.Value;
}
- std::string getAsString() const;
+ std::string getAsString() const {
+ return getAsString(split());
+ }
+ static std::string getAsString(SplitQualType split) {
+ return getAsString(split.first, split.second);
+ }
+ static std::string getAsString(const Type *ty, Qualifiers qs);
std::string getAsString(const PrintingPolicy &Policy) const {
std::string S;
@@ -679,7 +658,16 @@ public:
return S;
}
void getAsStringInternal(std::string &Str,
- const PrintingPolicy &Policy) const;
+ const PrintingPolicy &Policy) const {
+ return getAsStringInternal(split(), Str, Policy);
+ }
+ static void getAsStringInternal(SplitQualType split, std::string &out,
+ const PrintingPolicy &policy) {
+ return getAsStringInternal(split.first, split.second, out, policy);
+ }
+ static void getAsStringInternal(const Type *ty, Qualifiers qs,
+ std::string &out,
+ const PrintingPolicy &policy);
void dump(const char *s) const;
void dump() const;
@@ -704,12 +692,29 @@ public:
return getObjCGCAttr() == Qualifiers::Strong;
}
+ enum DestructionKind {
+ DK_none,
+ DK_cxx_destructor
+ };
+
+ /// isDestructedType - nonzero if objects of this type require
+ /// non-trivial work to clean up after. Non-zero because it's
+ /// conceivable that qualifiers (objc_gc(weak)?) could make
+ /// something require destruction.
+ DestructionKind isDestructedType() const {
+ return isDestructedTypeImpl(*this);
+ }
+
private:
// These methods are implemented in a separate translation unit;
// "static"-ize them to avoid creating temporary QualTypes in the
// caller.
static bool isConstant(QualType T, ASTContext& Ctx);
- static QualType getDesugaredType(QualType T);
+ static QualType getDesugaredType(QualType T, const ASTContext &Context);
+ static SplitQualType getSplitDesugaredType(QualType T);
+ static SplitQualType getSplitUnqualifiedTypeImpl(QualType type);
+ static QualType IgnoreParens(QualType T);
+ static DestructionKind isDestructedTypeImpl(QualType type);
};
} // end clang.
@@ -718,7 +723,7 @@ namespace llvm {
/// Implement simplify_type for QualType, so that we can dyn_cast from QualType
/// to a specific Type class.
template<> struct simplify_type<const ::clang::QualType> {
- typedef ::clang::Type* SimpleType;
+ typedef const ::clang::Type *SimpleType;
static SimpleType getSimplifiedValue(const ::clang::QualType &Val) {
return Val.getTypePtr();
}
@@ -744,6 +749,106 @@ public:
namespace clang {
+/// \brief Base class that is common to both the \c ExtQuals and \c Type
+/// classes, which allows \c QualType to access the common fields between the
+/// two.
+///
+class ExtQualsTypeCommonBase {
+ ExtQualsTypeCommonBase(const Type *baseType, QualType canon)
+ : BaseType(baseType), CanonicalType(canon) {}
+
+ /// \brief The "base" type of an extended qualifiers type (\c ExtQuals) or
+ /// a self-referential pointer (for \c Type).
+ ///
+ /// This pointer allows an efficient mapping from a QualType to its
+ /// underlying type pointer.
+ const Type *const BaseType;
+
+ /// \brief The canonical type of this type. A QualType.
+ QualType CanonicalType;
+
+ friend class QualType;
+ friend class Type;
+ friend class ExtQuals;
+};
+
+/// ExtQuals - We can encode up to four bits in the low bits of a
+/// type pointer, but there are many more type qualifiers that we want
+/// to be able to apply to an arbitrary type. Therefore we have this
+/// struct, intended to be heap-allocated and used by QualType to
+/// store qualifiers.
+///
+/// The current design tags the 'const', 'restrict', and 'volatile' qualifiers
+/// in three low bits on the QualType pointer; a fourth bit records whether
+/// the pointer is an ExtQuals node. The extended qualifiers (address spaces,
+/// Objective-C GC attributes) are much more rare.
+class ExtQuals : public ExtQualsTypeCommonBase, public llvm::FoldingSetNode {
+ // NOTE: changing the fast qualifiers should be straightforward as
+ // long as you don't make 'const' non-fast.
+ // 1. Qualifiers:
+ // a) Modify the bitmasks (Qualifiers::TQ and DeclSpec::TQ).
+ // Fast qualifiers must occupy the low-order bits.
+ // b) Update Qualifiers::FastWidth and FastMask.
+ // 2. QualType:
+ // a) Update is{Volatile,Restrict}Qualified(), defined inline.
+ // b) Update remove{Volatile,Restrict}, defined near the end of
+ // this header.
+ // 3. ASTContext:
+ // a) Update get{Volatile,Restrict}Type.
+
+ /// Quals - the immutable set of qualifiers applied by this
+ /// node; always contains extended qualifiers.
+ Qualifiers Quals;
+
+ ExtQuals *this_() { return this; }
+
+public:
+ ExtQuals(const Type *baseType, QualType canon, Qualifiers quals)
+ : ExtQualsTypeCommonBase(baseType,
+ canon.isNull() ? QualType(this_(), 0) : canon),
+ Quals(quals)
+ {
+ assert(Quals.hasNonFastQualifiers()
+ && "ExtQuals created with no fast qualifiers");
+ assert(!Quals.hasFastQualifiers()
+ && "ExtQuals created with fast qualifiers");
+ }
+
+ Qualifiers getQualifiers() const { return Quals; }
+
+ bool hasObjCGCAttr() const { return Quals.hasObjCGCAttr(); }
+ Qualifiers::GC getObjCGCAttr() const { return Quals.getObjCGCAttr(); }
+
+ bool hasAddressSpace() const { return Quals.hasAddressSpace(); }
+ unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
+
+ const Type *getBaseType() const { return BaseType; }
+
+public:
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getBaseType(), Quals);
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const Type *BaseType,
+ Qualifiers Quals) {
+ assert(!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!");
+ ID.AddPointer(BaseType);
+ Quals.Profile(ID);
+ }
+};
+
+/// \brief The kind of C++0x ref-qualifier associated with a function type,
+/// which determines whether a member function's "this" object can be an
+/// lvalue, rvalue, or neither.
+enum RefQualifierKind {
+ /// \brief No ref-qualifier was provided.
+ RQ_None = 0,
+ /// \brief An lvalue ref-qualifier was provided (\c &).
+ RQ_LValue,
+ /// \brief An rvalue ref-qualifier was provided (\c &&).
+ RQ_RValue
+};
+
/// Type - This is the base class of the type hierarchy. A central concept
/// with types is that each type always has a canonical type. A canonical type
/// is the type with any typedef names stripped out of it or the types it
@@ -769,7 +874,7 @@ namespace clang {
///
/// Types, once created, are immutable.
///
-class Type {
+class Type : public ExtQualsTypeCommonBase {
public:
enum TypeClass {
#define TYPE(Class, Base) Class,
@@ -783,53 +888,249 @@ private:
Type(const Type&); // DO NOT IMPLEMENT.
void operator=(const Type&); // DO NOT IMPLEMENT.
- QualType CanonicalType;
+ /// Bitfields required by the Type class.
+ class TypeBitfields {
+ friend class Type;
+ template <class T> friend class TypePropertyCache;
- /// TypeClass bitfield - Enum that specifies what subclass this belongs to.
- unsigned TC : 8;
+ /// TypeClass bitfield - Enum that specifies what subclass this belongs to.
+ unsigned TC : 8;
- /// Dependent - Whether this type is a dependent type (C++ [temp.dep.type]).
- /// Note that this should stay at the end of the ivars for Type so that
- /// subclasses can pack their bitfields into the same word.
- bool Dependent : 1;
+ /// Dependent - Whether this type is a dependent type (C++ [temp.dep.type]).
+ /// Note that this should stay at the end of the ivars for Type so that
+ /// subclasses can pack their bitfields into the same word.
+ unsigned Dependent : 1;
- /// \brief Whether the linkage of this type is already known.
- mutable bool LinkageKnown : 1;
+ /// \brief Whether this type is a variably-modified type (C99 6.7.5).
+ unsigned VariablyModified : 1;
+
+ /// \brief Whether this type contains an unexpanded parameter pack
+ /// (for C++0x variadic templates).
+ unsigned ContainsUnexpandedParameterPack : 1;
+
+ /// \brief Nonzero if the cache (i.e. the bitfields here starting
+ /// with 'Cache') is valid. If so, then this is a
+ /// LangOptions::VisibilityMode+1.
+ mutable unsigned CacheValidAndVisibility : 2;
+
+ /// \brief Linkage of this type.
+ mutable unsigned CachedLinkage : 2;
+
+ /// \brief Whether this type involves and local or unnamed types.
+ mutable unsigned CachedLocalOrUnnamed : 1;
- /// \brief Linkage of this type.
- mutable unsigned CachedLinkage : 2;
+ /// \brief FromAST - Whether this type comes from an AST file.
+ mutable unsigned FromAST : 1;
+
+ bool isCacheValid() const {
+ return (CacheValidAndVisibility != 0);
+ }
+ Visibility getVisibility() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return static_cast<Visibility>(CacheValidAndVisibility-1);
+ }
+ Linkage getLinkage() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return static_cast<Linkage>(CachedLinkage);
+ }
+ bool hasLocalOrUnnamedType() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return CachedLocalOrUnnamed;
+ }
+ };
+ enum { NumTypeBits = 17 };
+
+protected:
+ // These classes allow subclasses to somewhat cleanly pack bitfields
+ // into Type.
+
+ class ArrayTypeBitfields {
+ friend class ArrayType;
+
+ unsigned : NumTypeBits;
+
+ /// IndexTypeQuals - CVR qualifiers from declarations like
+ /// 'int X[static restrict 4]'. For function parameters only.
+ unsigned IndexTypeQuals : 3;
+
+ /// SizeModifier - storage class qualifiers from declarations like
+ /// 'int X[static restrict 4]'. For function parameters only.
+ /// Actually an ArrayType::ArraySizeModifier.
+ unsigned SizeModifier : 3;
+ };
+
+ class BuiltinTypeBitfields {
+ friend class BuiltinType;
+
+ unsigned : NumTypeBits;
+
+ /// The kind (BuiltinType::Kind) of builtin type this is.
+ unsigned Kind : 8;
+ };
+
+ class FunctionTypeBitfields {
+ friend class FunctionType;
+
+ unsigned : NumTypeBits;
+
+ /// Extra information which affects how the function is called, like
+ /// regparm and the calling convention.
+ unsigned ExtInfo : 8;
+
+ /// Whether the function is variadic. Only used by FunctionProtoType.
+ unsigned Variadic : 1;
+
+ /// TypeQuals - Used only by FunctionProtoType, put here to pack with the
+ /// other bitfields.
+ /// The qualifiers are part of FunctionProtoType because...
+ ///
+ /// C++ 8.3.5p4: The return type, the parameter type list and the
+ /// cv-qualifier-seq, [...], are part of the function type.
+ unsigned TypeQuals : 3;
+
+ /// \brief The ref-qualifier associated with a \c FunctionProtoType.
+ ///
+ /// This is a value of type \c RefQualifierKind.
+ unsigned RefQualifier : 2;
+ };
+
+ class ObjCObjectTypeBitfields {
+ friend class ObjCObjectType;
+
+ unsigned : NumTypeBits;
+
+ /// NumProtocols - The number of protocols stored directly on this
+ /// object type.
+ unsigned NumProtocols : 32 - NumTypeBits;
+ };
+
+ class ReferenceTypeBitfields {
+ friend class ReferenceType;
+
+ unsigned : NumTypeBits;
+
+ /// True if the type was originally spelled with an lvalue sigil.
+ /// This is never true of rvalue references but can also be false
+ /// on lvalue references because of C++0x [dcl.typedef]p9,
+ /// as follows:
+ ///
+ /// typedef int &ref; // lvalue, spelled lvalue
+ /// typedef int &&rvref; // rvalue
+ /// ref &a; // lvalue, inner ref, spelled lvalue
+ /// ref &&a; // lvalue, inner ref
+ /// rvref &a; // lvalue, inner ref, spelled lvalue
+ /// rvref &&a; // rvalue, inner ref
+ unsigned SpelledAsLValue : 1;
+
+ /// True if the inner type is a reference type. This only happens
+ /// in non-canonical forms.
+ unsigned InnerRef : 1;
+ };
+
+ class TypeWithKeywordBitfields {
+ friend class TypeWithKeyword;
+
+ unsigned : NumTypeBits;
+
+ /// An ElaboratedTypeKeyword. 8 bits for efficient access.
+ unsigned Keyword : 8;
+ };
+
+ class VectorTypeBitfields {
+ friend class VectorType;
+
+ unsigned : NumTypeBits;
+
+ /// VecKind - The kind of vector, either a generic vector type or some
+ /// target-specific vector type such as for AltiVec or Neon.
+ unsigned VecKind : 3;
+
+ /// NumElements - The number of elements in the vector.
+ unsigned NumElements : 29 - NumTypeBits;
+ };
+
+ class AttributedTypeBitfields {
+ friend class AttributedType;
- /// \brief FromAST - Whether this type comes from an AST file.
- mutable bool FromAST : 1;
+ unsigned : NumTypeBits;
+ /// AttrKind - an AttributedType::Kind
+ unsigned AttrKind : 32 - NumTypeBits;
+ };
+
+ union {
+ TypeBitfields TypeBits;
+ ArrayTypeBitfields ArrayTypeBits;
+ AttributedTypeBitfields AttributedTypeBits;
+ BuiltinTypeBitfields BuiltinTypeBits;
+ FunctionTypeBitfields FunctionTypeBits;
+ ObjCObjectTypeBitfields ObjCObjectTypeBits;
+ ReferenceTypeBitfields ReferenceTypeBits;
+ TypeWithKeywordBitfields TypeWithKeywordBits;
+ VectorTypeBitfields VectorTypeBits;
+ };
+
+private:
/// \brief Set whether this type comes from an AST file.
void setFromAST(bool V = true) const {
- FromAST = V;
+ TypeBits.FromAST = V;
}
-protected:
- /// \brief Compute the linkage of this type.
- virtual Linkage getLinkageImpl() const;
-
- enum { BitsRemainingInType = 19 };
+ template <class T> friend class TypePropertyCache;
+protected:
// silence VC++ warning C4355: 'this' : used in base member initializer list
Type *this_() { return this; }
- Type(TypeClass tc, QualType Canonical, bool dependent)
- : CanonicalType(Canonical.isNull() ? QualType(this_(), 0) : Canonical),
- TC(tc), Dependent(dependent), LinkageKnown(false),
- CachedLinkage(NoLinkage), FromAST(false) {}
- virtual ~Type();
+ Type(TypeClass tc, QualType canon, bool Dependent, bool VariablyModified,
+ bool ContainsUnexpandedParameterPack)
+ : ExtQualsTypeCommonBase(this,
+ canon.isNull() ? QualType(this_(), 0) : canon) {
+ TypeBits.TC = tc;
+ TypeBits.Dependent = Dependent;
+ TypeBits.VariablyModified = VariablyModified;
+ TypeBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ TypeBits.CacheValidAndVisibility = 0;
+ TypeBits.CachedLocalOrUnnamed = false;
+ TypeBits.CachedLinkage = NoLinkage;
+ TypeBits.FromAST = false;
+ }
friend class ASTContext;
+ void setDependent(bool D = true) { TypeBits.Dependent = D; }
+ void setVariablyModified(bool VM = true) { TypeBits.VariablyModified = VM; }
+ void setContainsUnexpandedParameterPack(bool PP = true) {
+ TypeBits.ContainsUnexpandedParameterPack = PP;
+ }
+
public:
- TypeClass getTypeClass() const { return static_cast<TypeClass>(TC); }
+ TypeClass getTypeClass() const { return static_cast<TypeClass>(TypeBits.TC); }
/// \brief Whether this type comes from an AST file.
- bool isFromAST() const { return FromAST; }
+ bool isFromAST() const { return TypeBits.FromAST; }
+
+ /// \brief Whether this type is or contains an unexpanded parameter
+ /// pack, used to support C++0x variadic templates.
+ ///
+ /// A type that contains a parameter pack shall be expanded by the
+ /// ellipsis operator at some point. For example, the typedef in the
+ /// following example contains an unexpanded parameter pack 'T':
+ ///
+ /// \code
+ /// template<typename ...T>
+ /// struct X {
+ /// typedef T* pointer_types; // ill-formed; T is a parameter pack.
+ /// };
+ /// \endcode
+ ///
+ /// Note that this routine does not specify which
+ bool containsUnexpandedParameterPack() const {
+ return TypeBits.ContainsUnexpandedParameterPack;
+ }
+ /// Determines if this type would be canonical if it had no further
+ /// qualification.
bool isCanonicalUnqualified() const {
- return CanonicalType.getTypePtr() == this;
+ return CanonicalType == QualType(this, 0);
}
/// Types are partitioned into 3 broad categories (C99 6.2.5p1):
@@ -846,6 +1147,14 @@ public:
bool isIncompleteOrObjectType() const {
return !isFunctionType();
}
+
+ /// \brief Determine whether this type is an object type.
+ bool isObjectType() const {
+ // C++ [basic.types]p8:
+ // An object type is a (possibly cv-qualified) type that is not a
+ // function type, not a reference type, and not a void type.
+ return !isReferenceType() && !isFunctionType() && !isVoidType();
+ }
/// isPODType - Return true if this is a plain-old-data type (C++ 3.9p10).
bool isPODType() const;
@@ -854,10 +1163,6 @@ public:
/// (C++0x [basic.types]p10)
bool isLiteralType() const;
- /// isVariablyModifiedType (C99 6.7.5.2p2) - Return true for variable array
- /// types that have a non-constant expression. This does not include "[]".
- bool isVariablyModifiedType() const;
-
/// Helper methods to distinguish type categories. All type predicates
/// operate on the canonical type, ignoring typedefs and qualifiers.
@@ -867,6 +1172,12 @@ public:
/// isSpecificBuiltinType - Test for a particular builtin type.
bool isSpecificBuiltinType(unsigned K) const;
+ /// isPlaceholderType - Test for a type which does not represent an
+ /// actual type-system type but is instead used as a placeholder for
+ /// various convenient purposes within Clang. All such types are
+ /// BuiltinTypes.
+ bool isPlaceholderType() const;
+
/// isIntegerType() does *not* include complex integers (a GCC extension).
/// isComplexIntegerType() can be used to test for complex integers.
bool isIntegerType() const; // C99 6.2.5p17 (int, char, bool, enum)
@@ -879,6 +1190,9 @@ public:
/// \brief Determine whether this type is an integral or enumeration type.
bool isIntegralOrEnumerationType() const;
+ /// \brief Determine whether this type is an integral or unscoped enumeration
+ /// type.
+ bool isIntegralOrUnscopedEnumerationType() const;
/// Floating point categories.
bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double)
@@ -938,10 +1252,33 @@ public:
bool isTemplateTypeParmType() const; // C++ template type parameter
bool isNullPtrType() const; // C++0x nullptr_t
+ enum ScalarTypeKind {
+ STK_Pointer,
+ STK_MemberPointer,
+ STK_Bool,
+ STK_Integral,
+ STK_Floating,
+ STK_IntegralComplex,
+ STK_FloatingComplex
+ };
+ /// getScalarTypeKind - Given that this is a scalar type, classify it.
+ ScalarTypeKind getScalarTypeKind() const;
+
/// isDependentType - Whether this type is a dependent type, meaning
/// that its definition somehow depends on a template parameter
/// (C++ [temp.dep.type]).
- bool isDependentType() const { return Dependent; }
+ bool isDependentType() const { return TypeBits.Dependent; }
+
+ /// \brief Whether this type is a variably-modified type (C99 6.7.5).
+ bool isVariablyModifiedType() const { return TypeBits.VariablyModified; }
+
+ /// \brief Whether this type involves a variable-length array type
+ /// with a definite size.
+ bool hasSizedVLAType() const;
+
+ /// \brief Whether this type is or contains a local or unnamed type.
+ bool hasUnnamedOrLocalType() const;
+
bool isOverloadableType() const;
/// \brief Determine wither this type is a C++ elaborated-type-specifier.
@@ -991,15 +1328,41 @@ public:
/// because the type is a RecordType or because it is the injected-class-name
/// type of a class template or class template partial specialization.
CXXRecordDecl *getAsCXXRecordDecl() const;
+
+ /// \brief Get the AutoType whose type will be deduced for a variable with
+ /// an initializer of this type. This looks through declarators like pointer
+ /// types, but not through decltype or typedefs.
+ AutoType *getContainedAutoType() const;
- // Member-template getAs<specific type>'. Look through sugar for
- // an instance of <specific type>. This scheme will eventually
- // replace the specific getAsXXXX methods above.
- //
- // There are some specializations of this member template listed
- // immediately following this class.
+ /// Member-template getAs<specific type>'. Look through sugar for
+ /// an instance of <specific type>. This scheme will eventually
+ /// replace the specific getAsXXXX methods above.
+ ///
+ /// There are some specializations of this member template listed
+ /// immediately following this class.
template <typename T> const T *getAs() const;
+ /// A variant of getAs<> for array types which silently discards
+ /// qualifiers from the outermost type.
+ const ArrayType *getAsArrayTypeUnsafe() const;
+
+ /// Member-template castAs<specific type>. Look through sugar for
+ /// the underlying instance of <specific type>.
+ ///
+ /// This method has the same relationship to getAs<T> as cast<T> has
+ /// to dyn_cast<T>; which is to say, the underlying type *must*
+ /// have the intended type, and this method will never return null.
+ template <typename T> const T *castAs() const;
+
+ /// A variant of castAs<> for array type which silently discards
+ /// qualifiers from the outermost type.
+ const ArrayType *castAsArrayTypeUnsafe() const;
+
+ /// getBaseElementTypeUnsafe - Get the base element type of this
+ /// type, potentially discarding type qualifiers. This method
+ /// should never be used when type qualifiers are meaningful.
+ const Type *getBaseElementTypeUnsafe() const;
+
/// getArrayElementTypeNoTypeQual - If this is an array type, return the
/// element type of the array, potentially with type qualifiers missing.
/// This method should never be used when type qualifiers are meaningful.
@@ -1040,6 +1403,12 @@ public:
/// \brief Determine the linkage of this type.
Linkage getLinkage() const;
+
+ /// \brief Determine the visibility of this type.
+ Visibility getVisibility() const;
+
+ /// \brief Determine the linkage and visibility of this type.
+ std::pair<Linkage,Visibility> getLinkageAndVisibility() const;
/// \brief Note that the linkage is no longer known.
void ClearLinkageCache();
@@ -1067,6 +1436,9 @@ template <> inline const TypedefType *Type::getAs() const {
#define LEAF_TYPE(Class) \
template <> inline const Class##Type *Type::getAs() const { \
return dyn_cast<Class##Type>(CanonicalType); \
+} \
+template <> inline const Class##Type *Type::castAs() const { \
+ return cast<Class##Type>(CanonicalType); \
}
#include "clang/AST/TypeNodes.def"
@@ -1081,6 +1453,7 @@ public:
Bool, // This is bool and/or _Bool.
Char_U, // This is 'char' for targets where char is unsigned.
UChar, // This is explicitly qualified unsigned char.
+ WChar_U, // This is 'wchar_t' for C++, when unsigned.
Char16, // This is 'char16_t' for C++.
Char32, // This is 'char32_t' for C++.
UShort,
@@ -1091,7 +1464,7 @@ public:
Char_S, // This is 'char' for targets where char is signed.
SChar, // This is explicitly qualified signed char.
- WChar, // This is 'wchar_t' for C++.
+ WChar_S, // This is 'wchar_t' for C++, when signed.
Short,
Int,
Long,
@@ -1102,11 +1475,13 @@ public:
NullPtr, // This is the type of C++0x 'nullptr'.
- Overload, // This represents the type of an overloaded function declaration.
- Dependent, // This represents the type of a type-dependent expression.
+ /// This represents the type of an expression whose type is
+ /// totally unknown, e.g. 'T::foo'. It is permitted for this to
+ /// appear in situations where the structure of the type is
+ /// theoretically deducible.
+ Dependent,
- UndeducedAuto, // In C++0x, this represents the type of an auto variable
- // that has not been deduced yet.
+ Overload, // This represents the type of an overloaded function declaration.
/// The primitive Objective C 'id' type. The type pointed to by the
/// user-visible 'id' type. Only ever shows up in an AST as the base
@@ -1120,37 +1495,42 @@ public:
ObjCSel // This represents the ObjC 'SEL' type.
};
-private:
- Kind TypeKind;
-
-protected:
- virtual Linkage getLinkageImpl() const;
-
+
public:
BuiltinType(Kind K)
- : Type(Builtin, QualType(), /*Dependent=*/(K == Dependent)),
- TypeKind(K) {}
+ : Type(Builtin, QualType(), /*Dependent=*/(K == Dependent),
+ /*VariablyModified=*/false,
+ /*Unexpanded paramter pack=*/false) {
+ BuiltinTypeBits.Kind = K;
+ }
- Kind getKind() const { return TypeKind; }
+ Kind getKind() const { return static_cast<Kind>(BuiltinTypeBits.Kind); }
const char *getName(const LangOptions &LO) const;
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
bool isInteger() const {
- return TypeKind >= Bool && TypeKind <= Int128;
+ return getKind() >= Bool && getKind() <= Int128;
}
bool isSignedInteger() const {
- return TypeKind >= Char_S && TypeKind <= Int128;
+ return getKind() >= Char_S && getKind() <= Int128;
}
bool isUnsignedInteger() const {
- return TypeKind >= Bool && TypeKind <= UInt128;
+ return getKind() >= Bool && getKind() <= UInt128;
}
bool isFloatingPoint() const {
- return TypeKind >= Float && TypeKind <= LongDouble;
+ return getKind() >= Float && getKind() <= LongDouble;
+ }
+
+ /// Determines whether this type is a "forbidden" placeholder type,
+ /// i.e. a type which cannot appear in arbitrary positions in a
+ /// fully-formed expression.
+ bool isPlaceholderType() const {
+ return getKind() == Overload;
}
static bool classof(const Type *T) { return T->getTypeClass() == Builtin; }
@@ -1163,14 +1543,13 @@ public:
class ComplexType : public Type, public llvm::FoldingSetNode {
QualType ElementType;
ComplexType(QualType Element, QualType CanonicalPtr) :
- Type(Complex, CanonicalPtr, Element->isDependentType()),
+ Type(Complex, CanonicalPtr, Element->isDependentType(),
+ Element->isVariablyModifiedType(),
+ Element->containsUnexpandedParameterPack()),
ElementType(Element) {
}
friend class ASTContext; // ASTContext creates these.
-protected:
- virtual Linkage getLinkageImpl() const;
-
public:
QualType getElementType() const { return ElementType; }
@@ -1188,19 +1567,50 @@ public:
static bool classof(const ComplexType *) { return true; }
};
+/// ParenType - Sugar for parentheses used when specifying types.
+///
+class ParenType : public Type, public llvm::FoldingSetNode {
+ QualType Inner;
+
+ ParenType(QualType InnerType, QualType CanonType) :
+ Type(Paren, CanonType, InnerType->isDependentType(),
+ InnerType->isVariablyModifiedType(),
+ InnerType->containsUnexpandedParameterPack()),
+ Inner(InnerType) {
+ }
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+
+ QualType getInnerType() const { return Inner; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getInnerType(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getInnerType());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Inner) {
+ Inner.Profile(ID);
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Paren; }
+ static bool classof(const ParenType *) { return true; }
+};
+
/// PointerType - C99 6.7.5.1 - Pointer Declarators.
///
class PointerType : public Type, public llvm::FoldingSetNode {
QualType PointeeType;
PointerType(QualType Pointee, QualType CanonicalPtr) :
- Type(Pointer, CanonicalPtr, Pointee->isDependentType()), PointeeType(Pointee) {
+ Type(Pointer, CanonicalPtr, Pointee->isDependentType(),
+ Pointee->isVariablyModifiedType(),
+ Pointee->containsUnexpandedParameterPack()),
+ PointeeType(Pointee) {
}
friend class ASTContext; // ASTContext creates these.
-protected:
- virtual Linkage getLinkageImpl() const;
-
public:
QualType getPointeeType() const { return PointeeType; }
@@ -1226,14 +1636,13 @@ public:
class BlockPointerType : public Type, public llvm::FoldingSetNode {
QualType PointeeType; // Block is some kind of pointer type
BlockPointerType(QualType Pointee, QualType CanonicalCls) :
- Type(BlockPointer, CanonicalCls, Pointee->isDependentType()),
+ Type(BlockPointer, CanonicalCls, Pointee->isDependentType(),
+ Pointee->isVariablyModifiedType(),
+ Pointee->containsUnexpandedParameterPack()),
PointeeType(Pointee) {
}
friend class ASTContext; // ASTContext creates these.
-protected:
- virtual Linkage getLinkageImpl() const;
-
public:
// Get the pointee type. Pointee is required to always be a function type.
@@ -1260,48 +1669,33 @@ public:
class ReferenceType : public Type, public llvm::FoldingSetNode {
QualType PointeeType;
- /// True if the type was originally spelled with an lvalue sigil.
- /// This is never true of rvalue references but can also be false
- /// on lvalue references because of C++0x [dcl.typedef]p9,
- /// as follows:
- ///
- /// typedef int &ref; // lvalue, spelled lvalue
- /// typedef int &&rvref; // rvalue
- /// ref &a; // lvalue, inner ref, spelled lvalue
- /// ref &&a; // lvalue, inner ref
- /// rvref &a; // lvalue, inner ref, spelled lvalue
- /// rvref &&a; // rvalue, inner ref
- bool SpelledAsLValue;
-
- /// True if the inner type is a reference type. This only happens
- /// in non-canonical forms.
- bool InnerRef;
-
protected:
ReferenceType(TypeClass tc, QualType Referencee, QualType CanonicalRef,
bool SpelledAsLValue) :
- Type(tc, CanonicalRef, Referencee->isDependentType()),
- PointeeType(Referencee), SpelledAsLValue(SpelledAsLValue),
- InnerRef(Referencee->isReferenceType()) {
+ Type(tc, CanonicalRef, Referencee->isDependentType(),
+ Referencee->isVariablyModifiedType(),
+ Referencee->containsUnexpandedParameterPack()),
+ PointeeType(Referencee)
+ {
+ ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue;
+ ReferenceTypeBits.InnerRef = Referencee->isReferenceType();
}
- virtual Linkage getLinkageImpl() const;
-
public:
- bool isSpelledAsLValue() const { return SpelledAsLValue; }
- bool isInnerRef() const { return InnerRef; }
+ bool isSpelledAsLValue() const { return ReferenceTypeBits.SpelledAsLValue; }
+ bool isInnerRef() const { return ReferenceTypeBits.InnerRef; }
QualType getPointeeTypeAsWritten() const { return PointeeType; }
QualType getPointeeType() const {
// FIXME: this might strip inner qualifiers; okay?
const ReferenceType *T = this;
- while (T->InnerRef)
- T = T->PointeeType->getAs<ReferenceType>();
+ while (T->isInnerRef())
+ T = T->PointeeType->castAs<ReferenceType>();
return T->PointeeType;
}
void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, PointeeType, SpelledAsLValue);
+ Profile(ID, PointeeType, isSpelledAsLValue());
}
static void Profile(llvm::FoldingSetNodeID &ID,
QualType Referencee,
@@ -1362,14 +1756,14 @@ class MemberPointerType : public Type, public llvm::FoldingSetNode {
MemberPointerType(QualType Pointee, const Type *Cls, QualType CanonicalPtr) :
Type(MemberPointer, CanonicalPtr,
- Cls->isDependentType() || Pointee->isDependentType()),
+ Cls->isDependentType() || Pointee->isDependentType(),
+ Pointee->isVariablyModifiedType(),
+ (Cls->containsUnexpandedParameterPack() ||
+ Pointee->containsUnexpandedParameterPack())),
PointeeType(Pointee), Class(Cls) {
}
friend class ASTContext; // ASTContext creates these.
-protected:
- virtual Linkage getLinkageImpl() const;
-
public:
QualType getPointeeType() const { return PointeeType; }
@@ -1420,14 +1814,6 @@ private:
/// ElementType - The element type of the array.
QualType ElementType;
- // NOTE: VC++ treats enums as signed, avoid using the ArraySizeModifier enum
- /// NOTE: These fields are packed into the bitfields space in the Type class.
- unsigned SizeModifier : 2;
-
- /// IndexTypeQuals - Capture qualifiers in declarations like:
- /// 'int X[static restrict 4]'. For function parameters only.
- unsigned IndexTypeQuals : 3;
-
protected:
// C++ [temp.dep.type]p1:
// A type is dependent if it is...
@@ -1435,23 +1821,29 @@ protected:
// size is specified by a constant expression that is
// value-dependent,
ArrayType(TypeClass tc, QualType et, QualType can,
- ArraySizeModifier sm, unsigned tq)
- : Type(tc, can, et->isDependentType() || tc == DependentSizedArray),
- ElementType(et), SizeModifier(sm), IndexTypeQuals(tq) {}
+ ArraySizeModifier sm, unsigned tq,
+ bool ContainsUnexpandedParameterPack)
+ : Type(tc, can, et->isDependentType() || tc == DependentSizedArray,
+ (tc == VariableArray || et->isVariablyModifiedType()),
+ ContainsUnexpandedParameterPack),
+ ElementType(et) {
+ ArrayTypeBits.IndexTypeQuals = tq;
+ ArrayTypeBits.SizeModifier = sm;
+ }
friend class ASTContext; // ASTContext creates these.
- virtual Linkage getLinkageImpl() const;
-
public:
QualType getElementType() const { return ElementType; }
ArraySizeModifier getSizeModifier() const {
- return ArraySizeModifier(SizeModifier);
+ return ArraySizeModifier(ArrayTypeBits.SizeModifier);
}
Qualifiers getIndexTypeQualifiers() const {
- return Qualifiers::fromCVRMask(IndexTypeQuals);
+ return Qualifiers::fromCVRMask(getIndexTypeCVRQualifiers());
+ }
+ unsigned getIndexTypeCVRQualifiers() const {
+ return ArrayTypeBits.IndexTypeQuals;
}
- unsigned getIndexTypeCVRQualifiers() const { return IndexTypeQuals; }
static bool classof(const Type *T) {
return T->getTypeClass() == ConstantArray ||
@@ -1471,12 +1863,14 @@ class ConstantArrayType : public ArrayType {
ConstantArrayType(QualType et, QualType can, const llvm::APInt &size,
ArraySizeModifier sm, unsigned tq)
- : ArrayType(ConstantArray, et, can, sm, tq),
+ : ArrayType(ConstantArray, et, can, sm, tq,
+ et->containsUnexpandedParameterPack()),
Size(size) {}
protected:
ConstantArrayType(TypeClass tc, QualType et, QualType can,
const llvm::APInt &size, ArraySizeModifier sm, unsigned tq)
- : ArrayType(tc, et, can, sm, tq), Size(size) {}
+ : ArrayType(tc, et, can, sm, tq, et->containsUnexpandedParameterPack()),
+ Size(size) {}
friend class ASTContext; // ASTContext creates these.
public:
const llvm::APInt &getSize() const { return Size; }
@@ -1519,7 +1913,8 @@ class IncompleteArrayType : public ArrayType {
IncompleteArrayType(QualType et, QualType can,
ArraySizeModifier sm, unsigned tq)
- : ArrayType(IncompleteArray, et, can, sm, tq) {}
+ : ArrayType(IncompleteArray, et, can, sm, tq,
+ et->containsUnexpandedParameterPack()) {}
friend class ASTContext; // ASTContext creates these.
public:
bool isSugared() const { return false; }
@@ -1570,7 +1965,8 @@ class VariableArrayType : public ArrayType {
VariableArrayType(QualType et, QualType can, Expr *e,
ArraySizeModifier sm, unsigned tq,
SourceRange brackets)
- : ArrayType(VariableArray, et, can, sm, tq),
+ : ArrayType(VariableArray, et, can, sm, tq,
+ et->containsUnexpandedParameterPack()),
SizeExpr((Stmt*) e), Brackets(brackets) {}
friend class ASTContext; // ASTContext creates these.
@@ -1613,7 +2009,7 @@ public:
/// until template instantiation occurs, at which point this will
/// become either a ConstantArrayType or a VariableArrayType.
class DependentSizedArrayType : public ArrayType {
- ASTContext &Context;
+ const ASTContext &Context;
/// \brief An assignment expression that will instantiate to the
/// size of the array.
@@ -1625,11 +2021,10 @@ class DependentSizedArrayType : public ArrayType {
/// Brackets - The left and right array brackets.
SourceRange Brackets;
- DependentSizedArrayType(ASTContext &Context, QualType et, QualType can,
+ DependentSizedArrayType(const ASTContext &Context, QualType et, QualType can,
Expr *e, ArraySizeModifier sm, unsigned tq,
- SourceRange brackets)
- : ArrayType(DependentSizedArray, et, can, sm, tq),
- Context(Context), SizeExpr((Stmt*) e), Brackets(brackets) {}
+ SourceRange brackets);
+
friend class ASTContext; // ASTContext creates these.
public:
@@ -1658,7 +2053,7 @@ public:
getSizeModifier(), getIndexTypeCVRQualifiers(), getSizeExpr());
}
- static void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context,
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
QualType ET, ArraySizeModifier SizeMod,
unsigned TypeQuals, Expr *E);
};
@@ -1672,17 +2067,15 @@ public:
/// }
/// @endcode
class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode {
- ASTContext &Context;
+ const ASTContext &Context;
Expr *SizeExpr;
/// ElementType - The element type of the array.
QualType ElementType;
SourceLocation loc;
- DependentSizedExtVectorType(ASTContext &Context, QualType ElementType,
- QualType can, Expr *SizeExpr, SourceLocation loc)
- : Type (DependentSizedExtVector, can, true),
- Context(Context), SizeExpr(SizeExpr), ElementType(ElementType),
- loc(loc) {}
+ DependentSizedExtVectorType(const ASTContext &Context, QualType ElementType,
+ QualType can, Expr *SizeExpr, SourceLocation loc);
+
friend class ASTContext;
public:
@@ -1702,7 +2095,7 @@ public:
Profile(ID, Context, getElementType(), getSizeExpr());
}
- static void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context,
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
QualType ElementType, Expr *SizeExpr);
};
@@ -1714,53 +2107,49 @@ public:
/// client is responsible for converting the size into the number of elements.
class VectorType : public Type, public llvm::FoldingSetNode {
public:
- enum AltiVecSpecific {
- NotAltiVec, // is not AltiVec vector
- AltiVec, // is AltiVec vector
- Pixel, // is AltiVec 'vector Pixel'
- Bool // is AltiVec 'vector bool ...'
+ enum VectorKind {
+ GenericVector, // not a target-specific vector type
+ AltiVecVector, // is AltiVec vector
+ AltiVecPixel, // is AltiVec 'vector Pixel'
+ AltiVecBool, // is AltiVec 'vector bool ...'
+ NeonVector, // is ARM Neon vector
+ NeonPolyVector // is ARM Neon polynomial vector
};
protected:
/// ElementType - The element type of the vector.
QualType ElementType;
- /// NumElements - The number of elements in the vector.
- unsigned NumElements;
-
- AltiVecSpecific AltiVecSpec;
-
VectorType(QualType vecType, unsigned nElements, QualType canonType,
- AltiVecSpecific altiVecSpec) :
- Type(Vector, canonType, vecType->isDependentType()),
- ElementType(vecType), NumElements(nElements), AltiVecSpec(altiVecSpec) {}
+ VectorKind vecKind);
+
VectorType(TypeClass tc, QualType vecType, unsigned nElements,
- QualType canonType, AltiVecSpecific altiVecSpec)
- : Type(tc, canonType, vecType->isDependentType()), ElementType(vecType),
- NumElements(nElements), AltiVecSpec(altiVecSpec) {}
+ QualType canonType, VectorKind vecKind);
+
friend class ASTContext; // ASTContext creates these.
- virtual Linkage getLinkageImpl() const;
-
public:
QualType getElementType() const { return ElementType; }
- unsigned getNumElements() const { return NumElements; }
+ unsigned getNumElements() const { return VectorTypeBits.NumElements; }
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
- AltiVecSpecific getAltiVecSpecific() const { return AltiVecSpec; }
+ VectorKind getVectorKind() const {
+ return VectorKind(VectorTypeBits.VecKind);
+ }
void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getElementType(), getNumElements(), getTypeClass(), AltiVecSpec);
+ Profile(ID, getElementType(), getNumElements(),
+ getTypeClass(), getVectorKind());
}
static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
unsigned NumElements, TypeClass TypeClass,
- unsigned AltiVecSpec) {
+ VectorKind VecKind) {
ID.AddPointer(ElementType.getAsOpaquePtr());
ID.AddInteger(NumElements);
ID.AddInteger(TypeClass);
- ID.AddInteger(AltiVecSpec);
+ ID.AddInteger(VecKind);
}
static bool classof(const Type *T) {
@@ -1776,7 +2165,7 @@ public:
/// points, colors, and textures (modeled after OpenGL Shading Language).
class ExtVectorType : public VectorType {
ExtVectorType(QualType vecType, unsigned nElements, QualType canonType) :
- VectorType(ExtVector, vecType, nElements, canonType, NotAltiVec) {}
+ VectorType(ExtVector, vecType, nElements, canonType, GenericVector) {}
friend class ASTContext; // ASTContext creates these.
public:
static int getPointAccessorIdx(char c) {
@@ -1823,7 +2212,7 @@ public:
bool isAccessorWithinNumElements(char c) const {
if (int idx = getAccessorIdx(c)+1)
- return unsigned(idx-1) < NumElements;
+ return unsigned(idx-1) < getNumElements();
return false;
}
bool isSugared() const { return false; }
@@ -1839,40 +2228,20 @@ public:
/// class of FunctionNoProtoType and FunctionProtoType.
///
class FunctionType : public Type {
- virtual void ANCHOR(); // Key function for FunctionType.
-
- /// SubClassData - This field is owned by the subclass, put here to pack
- /// tightly with the ivars in Type.
- bool SubClassData : 1;
-
- /// TypeQuals - Used only by FunctionProtoType, put here to pack with the
- /// other bitfields.
- /// The qualifiers are part of FunctionProtoType because...
- ///
- /// C++ 8.3.5p4: The return type, the parameter type list and the
- /// cv-qualifier-seq, [...], are part of the function type.
- ///
- unsigned TypeQuals : 3;
-
- /// NoReturn - Indicates if the function type is attribute noreturn.
- unsigned NoReturn : 1;
-
- /// RegParm - How many arguments to pass inreg.
- unsigned RegParm : 3;
-
- /// CallConv - The calling convention used by the function.
- unsigned CallConv : 3;
-
// The type returned by the function.
QualType ResultType;
public:
- // This class is used for passing arround the information needed to
- // construct a call. It is not actually used for storage, just for
- // factoring together common arguments.
- // If you add a field (say Foo), other than the obvious places (both, constructors,
- // compile failures), what you need to update is
- // * Operetor==
+ /// ExtInfo - A class which abstracts out some details necessary for
+ /// making a call.
+ ///
+ /// It is not actually used directly for storing this information in
+ /// a FunctionType, although FunctionType does currently use the
+ /// same bit-pattern.
+ ///
+ // If you add a field (say Foo), other than the obvious places (both,
+ // constructors, compile failures), what you need to update is
+ // * Operator==
// * getFoo
// * withFoo
// * functionType. Add Foo, getFoo.
@@ -1883,76 +2252,100 @@ class FunctionType : public Type {
// * TypePrinter::PrintFunctionProto
// * AST read and write
// * Codegen
-
class ExtInfo {
+ // Feel free to rearrange or add bits, but if you go over 8,
+ // you'll need to adjust both the Bits field below and
+ // Type::FunctionTypeBitfields.
+
+ // | CC |noreturn|regparm
+ // |0 .. 2| 3 |4 .. 6
+ enum { CallConvMask = 0x7 };
+ enum { NoReturnMask = 0x8 };
+ enum { RegParmMask = ~(CallConvMask | NoReturnMask),
+ RegParmOffset = 4 };
+
+ unsigned char Bits;
+
+ ExtInfo(unsigned Bits) : Bits(static_cast<unsigned char>(Bits)) {}
+
+ friend class FunctionType;
+
public:
// Constructor with no defaults. Use this when you know that you
// have all the elements (when reading an AST file for example).
- ExtInfo(bool noReturn, unsigned regParm, CallingConv cc) :
- NoReturn(noReturn), RegParm(regParm), CC(cc) {}
+ ExtInfo(bool noReturn, unsigned regParm, CallingConv cc) {
+ Bits = ((unsigned) cc) |
+ (noReturn ? NoReturnMask : 0) |
+ (regParm << RegParmOffset);
+ }
// Constructor with all defaults. Use when for example creating a
// function know to use defaults.
- ExtInfo() : NoReturn(false), RegParm(0), CC(CC_Default) {}
+ ExtInfo() : Bits(0) {}
- bool getNoReturn() const { return NoReturn; }
- unsigned getRegParm() const { return RegParm; }
- CallingConv getCC() const { return CC; }
+ bool getNoReturn() const { return Bits & NoReturnMask; }
+ unsigned getRegParm() const { return Bits >> RegParmOffset; }
+ CallingConv getCC() const { return CallingConv(Bits & CallConvMask); }
- bool operator==(const ExtInfo &Other) const {
- return getNoReturn() == Other.getNoReturn() &&
- getRegParm() == Other.getRegParm() &&
- getCC() == Other.getCC();
+ bool operator==(ExtInfo Other) const {
+ return Bits == Other.Bits;
}
- bool operator!=(const ExtInfo &Other) const {
- return !(*this == Other);
+ bool operator!=(ExtInfo Other) const {
+ return Bits != Other.Bits;
}
// Note that we don't have setters. That is by design, use
// the following with methods instead of mutating these objects.
ExtInfo withNoReturn(bool noReturn) const {
- return ExtInfo(noReturn, getRegParm(), getCC());
+ if (noReturn)
+ return ExtInfo(Bits | NoReturnMask);
+ else
+ return ExtInfo(Bits & ~NoReturnMask);
}
ExtInfo withRegParm(unsigned RegParm) const {
- return ExtInfo(getNoReturn(), RegParm, getCC());
+ return ExtInfo((Bits & ~RegParmMask) | (RegParm << RegParmOffset));
}
ExtInfo withCallingConv(CallingConv cc) const {
- return ExtInfo(getNoReturn(), getRegParm(), cc);
+ return ExtInfo((Bits & ~CallConvMask) | (unsigned) cc);
}
- private:
- // True if we have __attribute__((noreturn))
- bool NoReturn;
- // The value passed to __attribute__((regparm(x)))
- unsigned RegParm;
- // The calling convention as specified via
- // __attribute__((cdecl|stdcall|fastcall|thiscall|pascal))
- CallingConv CC;
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Bits);
+ }
};
protected:
- FunctionType(TypeClass tc, QualType res, bool SubclassInfo,
- unsigned typeQuals, QualType Canonical, bool Dependent,
- const ExtInfo &Info)
- : Type(tc, Canonical, Dependent),
- SubClassData(SubclassInfo), TypeQuals(typeQuals),
- NoReturn(Info.getNoReturn()),
- RegParm(Info.getRegParm()), CallConv(Info.getCC()), ResultType(res) {}
- bool getSubClassData() const { return SubClassData; }
- unsigned getTypeQuals() const { return TypeQuals; }
+ FunctionType(TypeClass tc, QualType res, bool variadic,
+ unsigned typeQuals, RefQualifierKind RefQualifier,
+ QualType Canonical, bool Dependent,
+ bool VariablyModified, bool ContainsUnexpandedParameterPack,
+ ExtInfo Info)
+ : Type(tc, Canonical, Dependent, VariablyModified,
+ ContainsUnexpandedParameterPack),
+ ResultType(res) {
+ FunctionTypeBits.ExtInfo = Info.Bits;
+ FunctionTypeBits.Variadic = variadic;
+ FunctionTypeBits.TypeQuals = typeQuals;
+ FunctionTypeBits.RefQualifier = static_cast<unsigned>(RefQualifier);
+ }
+ bool isVariadic() const { return FunctionTypeBits.Variadic; }
+ unsigned getTypeQuals() const { return FunctionTypeBits.TypeQuals; }
+
+ RefQualifierKind getRefQualifier() const {
+ return static_cast<RefQualifierKind>(FunctionTypeBits.RefQualifier);
+ }
+
public:
QualType getResultType() const { return ResultType; }
- unsigned getRegParmType() const { return RegParm; }
- bool getNoReturnAttr() const { return NoReturn; }
- CallingConv getCallConv() const { return (CallingConv)CallConv; }
- ExtInfo getExtInfo() const {
- return ExtInfo(NoReturn, RegParm, (CallingConv)CallConv);
- }
+ unsigned getRegParmType() const { return getExtInfo().getRegParm(); }
+ bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); }
+ CallingConv getCallConv() const { return getExtInfo().getCC(); }
+ ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); }
/// \brief Determine the type of an expression that calls a function of
/// this type.
@@ -1972,15 +2365,13 @@ public:
/// FunctionNoProtoType - Represents a K&R-style 'int foo()' function, which has
/// no information available about its arguments.
class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode {
- FunctionNoProtoType(QualType Result, QualType Canonical,
- const ExtInfo &Info)
- : FunctionType(FunctionNoProto, Result, false, 0, Canonical,
- /*Dependent=*/false, Info) {}
+ FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info)
+ : FunctionType(FunctionNoProto, Result, false, 0, RQ_None, Canonical,
+ /*Dependent=*/false, Result->isVariablyModifiedType(),
+ /*ContainsUnexpandedParameterPack=*/false, Info) {}
+
friend class ASTContext; // ASTContext creates these.
-protected:
- virtual Linkage getLinkageImpl() const;
-
public:
// No additional state past what FunctionType provides.
@@ -1991,10 +2382,8 @@ public:
Profile(ID, getResultType(), getExtInfo());
}
static void Profile(llvm::FoldingSetNodeID &ID, QualType ResultType,
- const ExtInfo &Info) {
- ID.AddInteger(Info.getCC());
- ID.AddInteger(Info.getRegParm());
- ID.AddInteger(Info.getNoReturn());
+ ExtInfo Info) {
+ Info.Profile(ID);
ID.AddPointer(ResultType.getAsOpaquePtr());
}
@@ -2010,36 +2399,37 @@ public:
/// exception specification, but this specification is not part of the canonical
/// type.
class FunctionProtoType : public FunctionType, public llvm::FoldingSetNode {
- /// hasAnyDependentType - Determine whether there are any dependent
- /// types within the arguments passed in.
- static bool hasAnyDependentType(const QualType *ArgArray, unsigned numArgs) {
+public:
+ /// ExtProtoInfo - Extra information about a function prototype.
+ struct ExtProtoInfo {
+ ExtProtoInfo() :
+ Variadic(false), HasExceptionSpec(false), HasAnyExceptionSpec(false),
+ TypeQuals(0), RefQualifier(RQ_None), NumExceptions(0), Exceptions(0) {}
+
+ FunctionType::ExtInfo ExtInfo;
+ bool Variadic;
+ bool HasExceptionSpec;
+ bool HasAnyExceptionSpec;
+ unsigned char TypeQuals;
+ RefQualifierKind RefQualifier;
+ unsigned NumExceptions;
+ const QualType *Exceptions;
+ };
+
+private:
+ /// \brief Determine whether there are any argument types that
+ /// contain an unexpanded parameter pack.
+ static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray,
+ unsigned numArgs) {
for (unsigned Idx = 0; Idx < numArgs; ++Idx)
- if (ArgArray[Idx]->isDependentType())
- return true;
+ if (ArgArray[Idx]->containsUnexpandedParameterPack())
+ return true;
return false;
}
- FunctionProtoType(QualType Result, const QualType *ArgArray, unsigned numArgs,
- bool isVariadic, unsigned typeQuals, bool hasExs,
- bool hasAnyExs, const QualType *ExArray,
- unsigned numExs, QualType Canonical,
- const ExtInfo &Info)
- : FunctionType(FunctionProto, Result, isVariadic, typeQuals, Canonical,
- (Result->isDependentType() ||
- hasAnyDependentType(ArgArray, numArgs)),
- Info),
- NumArgs(numArgs), NumExceptions(numExs), HasExceptionSpec(hasExs),
- AnyExceptionSpec(hasAnyExs) {
- // Fill in the trailing argument array.
- QualType *ArgInfo = reinterpret_cast<QualType*>(this+1);
- for (unsigned i = 0; i != numArgs; ++i)
- ArgInfo[i] = ArgArray[i];
- // Fill in the exception array.
- QualType *Ex = ArgInfo + numArgs;
- for (unsigned i = 0; i != numExs; ++i)
- Ex[i] = ExArray[i];
- }
+ FunctionProtoType(QualType result, const QualType *args, unsigned numArgs,
+ QualType canonical, const ExtProtoInfo &epi);
/// NumArgs - The number of arguments this function has, not counting '...'.
unsigned NumArgs : 20;
@@ -2048,10 +2438,10 @@ class FunctionProtoType : public FunctionType, public llvm::FoldingSetNode {
unsigned NumExceptions : 10;
/// HasExceptionSpec - Whether this function has an exception spec at all.
- bool HasExceptionSpec : 1;
+ unsigned HasExceptionSpec : 1;
- /// AnyExceptionSpec - Whether this function has a throw(...) spec.
- bool AnyExceptionSpec : 1;
+ /// HasAnyExceptionSpec - Whether this function has a throw(...) spec.
+ unsigned HasAnyExceptionSpec : 1;
/// ArgInfo - There is an variable size array after the class in memory that
/// holds the argument types.
@@ -2061,9 +2451,6 @@ class FunctionProtoType : public FunctionType, public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these.
-protected:
- virtual Linkage getLinkageImpl() const;
-
public:
unsigned getNumArgs() const { return NumArgs; }
QualType getArgType(unsigned i) const {
@@ -2071,8 +2458,21 @@ public:
return arg_type_begin()[i];
}
+ ExtProtoInfo getExtProtoInfo() const {
+ ExtProtoInfo EPI;
+ EPI.ExtInfo = getExtInfo();
+ EPI.Variadic = isVariadic();
+ EPI.HasExceptionSpec = hasExceptionSpec();
+ EPI.HasAnyExceptionSpec = hasAnyExceptionSpec();
+ EPI.TypeQuals = static_cast<unsigned char>(getTypeQuals());
+ EPI.RefQualifier = getRefQualifier();
+ EPI.NumExceptions = NumExceptions;
+ EPI.Exceptions = exception_begin();
+ return EPI;
+ }
+
bool hasExceptionSpec() const { return HasExceptionSpec; }
- bool hasAnyExceptionSpec() const { return AnyExceptionSpec; }
+ bool hasAnyExceptionSpec() const { return HasAnyExceptionSpec; }
unsigned getNumExceptions() const { return NumExceptions; }
QualType getExceptionType(unsigned i) const {
assert(i < NumExceptions && "Invalid exception number!");
@@ -2083,9 +2483,24 @@ public:
getNumExceptions() == 0;
}
- bool isVariadic() const { return getSubClassData(); }
+ using FunctionType::isVariadic;
+
+ /// \brief Determines whether this function prototype contains a
+ /// parameter pack at the end.
+ ///
+ /// A function template whose last parameter is a parameter pack can be
+ /// called with an arbitrary number of arguments, much like a variadic
+ /// function. However,
+ bool isTemplateVariadic() const;
+
unsigned getTypeQuals() const { return FunctionType::getTypeQuals(); }
+
+ /// \brief Retrieve the ref-qualifier associated with this function type.
+ RefQualifierKind getRefQualifier() const {
+ return FunctionType::getRefQualifier();
+ }
+
typedef const QualType *arg_type_iterator;
arg_type_iterator arg_type_begin() const {
return reinterpret_cast<const QualType *>(this+1);
@@ -2112,10 +2527,7 @@ public:
void Profile(llvm::FoldingSetNodeID &ID);
static void Profile(llvm::FoldingSetNodeID &ID, QualType Result,
arg_type_iterator ArgTys, unsigned NumArgs,
- bool isVariadic, unsigned TypeQuals,
- bool hasExceptionSpec, bool anyExceptionSpec,
- unsigned NumExceptions, exception_iterator Exs,
- const ExtInfo &ExtInfo);
+ const ExtProtoInfo &EPI);
};
@@ -2127,7 +2539,8 @@ class UnresolvedUsingType : public Type {
UnresolvedUsingTypenameDecl *Decl;
UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D)
- : Type(UnresolvedUsing, QualType(), true),
+ : Type(UnresolvedUsing, QualType(), true, false,
+ /*ContainsUnexpandedParameterPack=*/false),
Decl(const_cast<UnresolvedUsingTypenameDecl*>(D)) {}
friend class ASTContext; // ASTContext creates these.
public:
@@ -2156,7 +2569,8 @@ class TypedefType : public Type {
TypedefDecl *Decl;
protected:
TypedefType(TypeClass tc, const TypedefDecl *D, QualType can)
- : Type(tc, can, can->isDependentType()),
+ : Type(tc, can, can->isDependentType(), can->isVariablyModifiedType(),
+ /*ContainsUnexpandedParameterPack=*/false),
Decl(const_cast<TypedefDecl*>(D)) {
assert(!isa<TypedefType>(can) && "Invalid canonical type");
}
@@ -2165,14 +2579,6 @@ public:
TypedefDecl *getDecl() const { return Decl; }
- /// LookThroughTypedefs - Return the ultimate type this typedef corresponds to
- /// potentially looking through *all* consecutive typedefs. This returns the
- /// sum of the type qualifiers, so if you have:
- /// typedef const int A;
- /// typedef volatile A B;
- /// looking through the typedefs for B will give you "const volatile A".
- QualType LookThroughTypedefs() const;
-
bool isSugared() const { return true; }
QualType desugar() const;
@@ -2208,10 +2614,10 @@ public:
/// of this class via TypeOfExprType nodes.
class DependentTypeOfExprType
: public TypeOfExprType, public llvm::FoldingSetNode {
- ASTContext &Context;
+ const ASTContext &Context;
public:
- DependentTypeOfExprType(ASTContext &Context, Expr *E)
+ DependentTypeOfExprType(const ASTContext &Context, Expr *E)
: TypeOfExprType(E), Context(Context) { }
bool isSugared() const { return false; }
@@ -2221,7 +2627,7 @@ public:
Profile(ID, Context, getUnderlyingExpr());
}
- static void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context,
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
Expr *E);
};
@@ -2229,7 +2635,9 @@ public:
class TypeOfType : public Type {
QualType TOType;
TypeOfType(QualType T, QualType can)
- : Type(TypeOf, can, T->isDependentType()), TOType(T) {
+ : Type(TypeOf, can, T->isDependentType(), T->isVariablyModifiedType(),
+ T->containsUnexpandedParameterPack()),
+ TOType(T) {
assert(!isa<TypedefType>(can) && "Invalid canonical type");
}
friend class ASTContext; // ASTContext creates these.
@@ -2279,10 +2687,10 @@ public:
/// canonical, dependent types, only. Clients will only see instances
/// of this class via DecltypeType nodes.
class DependentDecltypeType : public DecltypeType, public llvm::FoldingSetNode {
- ASTContext &Context;
+ const ASTContext &Context;
public:
- DependentDecltypeType(ASTContext &Context, Expr *E);
+ DependentDecltypeType(const ASTContext &Context, Expr *E);
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -2291,7 +2699,7 @@ public:
Profile(ID, Context, getUnderlyingExpr());
}
- static void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context,
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
Expr *E);
};
@@ -2303,8 +2711,6 @@ class TagType : public Type {
protected:
TagType(TypeClass TC, const TagDecl *D, QualType can);
- virtual Linkage getLinkageImpl() const;
-
public:
TagDecl *getDecl() const;
@@ -2340,10 +2746,6 @@ public:
// const, it needs to return false.
bool hasConstFields() const { return false; }
- // FIXME: RecordType needs to check when it is created that all fields are in
- // the same address space, and return that.
- unsigned getAddressSpace() const { return 0; }
-
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -2376,20 +2778,107 @@ public:
static bool classof(const EnumType *) { return true; }
};
+/// AttributedType - An attributed type is a type to which a type
+/// attribute has been applied. The "modified type" is the
+/// fully-sugared type to which the attributed type was applied;
+/// generally it is not canonically equivalent to the attributed type.
+/// The "equivalent type" is the minimally-desugared type which the
+/// type is canonically equivalent to.
+///
+/// For example, in the following attributed type:
+/// int32_t __attribute__((vector_size(16)))
+/// - the modified type is the TypedefType for int32_t
+/// - the equivalent type is VectorType(16, int32_t)
+/// - the canonical type is VectorType(16, int)
+class AttributedType : public Type, public llvm::FoldingSetNode {
+public:
+ // It is really silly to have yet another attribute-kind enum, but
+ // clang::attr::Kind doesn't currently cover the pure type attrs.
+ enum Kind {
+ // Expression operand.
+ attr_address_space,
+ attr_regparm,
+ attr_vector_size,
+ attr_neon_vector_type,
+ attr_neon_polyvector_type,
+
+ FirstExprOperandKind = attr_address_space,
+ LastExprOperandKind = attr_neon_polyvector_type,
+
+ // Enumerated operand (string or keyword).
+ attr_objc_gc,
+
+ FirstEnumOperandKind = attr_objc_gc,
+ LastEnumOperandKind = attr_objc_gc,
+
+ // No operand.
+ attr_noreturn,
+ attr_cdecl,
+ attr_fastcall,
+ attr_stdcall,
+ attr_thiscall,
+ attr_pascal
+ };
+
+private:
+ QualType ModifiedType;
+ QualType EquivalentType;
+
+ friend class ASTContext; // creates these
+
+ AttributedType(QualType canon, Kind attrKind,
+ QualType modified, QualType equivalent)
+ : Type(Attributed, canon, canon->isDependentType(),
+ canon->isVariablyModifiedType(),
+ canon->containsUnexpandedParameterPack()),
+ ModifiedType(modified), EquivalentType(equivalent) {
+ AttributedTypeBits.AttrKind = attrKind;
+ }
+
+public:
+ Kind getAttrKind() const {
+ return static_cast<Kind>(AttributedTypeBits.AttrKind);
+ }
+
+ QualType getModifiedType() const { return ModifiedType; }
+ QualType getEquivalentType() const { return EquivalentType; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getEquivalentType(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getAttrKind(), ModifiedType, EquivalentType);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, Kind attrKind,
+ QualType modified, QualType equivalent) {
+ ID.AddInteger(attrKind);
+ ID.AddPointer(modified.getAsOpaquePtr());
+ ID.AddPointer(equivalent.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Attributed;
+ }
+ static bool classof(const AttributedType *T) { return true; }
+};
+
class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
unsigned Depth : 15;
- unsigned Index : 16;
unsigned ParameterPack : 1;
+ unsigned Index : 16;
IdentifierInfo *Name;
TemplateTypeParmType(unsigned D, unsigned I, bool PP, IdentifierInfo *N,
QualType Canon)
- : Type(TemplateTypeParm, Canon, /*Dependent=*/true),
- Depth(D), Index(I), ParameterPack(PP), Name(N) { }
+ : Type(TemplateTypeParm, Canon, /*Dependent=*/true,
+ /*VariablyModified=*/false, PP),
+ Depth(D), ParameterPack(PP), Index(I), Name(N) { }
TemplateTypeParmType(unsigned D, unsigned I, bool PP)
- : Type(TemplateTypeParm, QualType(this, 0), /*Dependent=*/true),
- Depth(D), Index(I), ParameterPack(PP), Name(0) { }
+ : Type(TemplateTypeParm, QualType(this, 0), /*Dependent=*/true,
+ /*VariablyModified=*/false, PP),
+ Depth(D), ParameterPack(PP), Index(I), Name(0) { }
friend class ASTContext; // ASTContext creates these
@@ -2433,7 +2922,9 @@ class SubstTemplateTypeParmType : public Type, public llvm::FoldingSetNode {
const TemplateTypeParmType *Replaced;
SubstTemplateTypeParmType(const TemplateTypeParmType *Param, QualType Canon)
- : Type(SubstTemplateTypeParm, Canon, Canon->isDependentType()),
+ : Type(SubstTemplateTypeParm, Canon, Canon->isDependentType(),
+ Canon->isVariablyModifiedType(),
+ Canon->containsUnexpandedParameterPack()),
Replaced(Param) { }
friend class ASTContext;
@@ -2471,6 +2962,101 @@ public:
static bool classof(const SubstTemplateTypeParmType *T) { return true; }
};
+/// \brief Represents the result of substituting a set of types for a template
+/// type parameter pack.
+///
+/// When a pack expansion in the source code contains multiple parameter packs
+/// and those parameter packs correspond to different levels of template
+/// parameter lists, this type node is used to represent a template type
+/// parameter pack from an outer level, which has already had its argument pack
+/// substituted but that still lives within a pack expansion that itself
+/// could not be instantiated. When actually performing a substitution into
+/// that pack expansion (e.g., when all template parameters have corresponding
+/// arguments), this type will be replaced with the \c SubstTemplateTypeParmType
+/// at the current pack substitution index.
+class SubstTemplateTypeParmPackType : public Type, public llvm::FoldingSetNode {
+ /// \brief The original type parameter.
+ const TemplateTypeParmType *Replaced;
+
+ /// \brief A pointer to the set of template arguments that this
+ /// parameter pack is instantiated with.
+ const TemplateArgument *Arguments;
+
+ /// \brief The number of template arguments in \c Arguments.
+ unsigned NumArguments;
+
+ SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
+ QualType Canon,
+ const TemplateArgument &ArgPack);
+
+ friend class ASTContext;
+
+public:
+ IdentifierInfo *getName() const { return Replaced->getName(); }
+
+ /// Gets the template parameter that was substituted for.
+ const TemplateTypeParmType *getReplacedParameter() const {
+ return Replaced;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ TemplateArgument getArgumentPack() const;
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const TemplateTypeParmType *Replaced,
+ const TemplateArgument &ArgPack);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == SubstTemplateTypeParmPack;
+ }
+ static bool classof(const SubstTemplateTypeParmPackType *T) { return true; }
+};
+
+/// \brief Represents a C++0x auto type.
+///
+/// These types are usually a placeholder for a deduced type. However, within
+/// templates and before the initializer is attached, there is no deduced type
+/// and an auto type is type-dependent and canonical.
+class AutoType : public Type, public llvm::FoldingSetNode {
+ AutoType(QualType DeducedType)
+ : Type(Auto, DeducedType.isNull() ? QualType(this, 0) : DeducedType,
+ /*Dependent=*/DeducedType.isNull(),
+ /*VariablyModified=*/false, /*ContainsParameterPack=*/false) {
+ assert((DeducedType.isNull() || !DeducedType->isDependentType()) &&
+ "deduced a dependent type for auto");
+ }
+
+ friend class ASTContext; // ASTContext creates these
+
+public:
+ bool isSugared() const { return isDeduced(); }
+ QualType desugar() const { return getCanonicalTypeInternal(); }
+
+ QualType getDeducedType() const {
+ return isDeduced() ? getCanonicalTypeInternal() : QualType();
+ }
+ bool isDeduced() const {
+ return !isDependentType();
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getDeducedType());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ QualType Deduced) {
+ ID.AddPointer(Deduced.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Auto;
+ }
+ static bool classof(const AutoType *T) { return true; }
+};
+
/// \brief Represents the type of a template specialization as written
/// in the source code.
///
@@ -2516,7 +3102,8 @@ public:
/// enclosing the template arguments.
static std::string PrintTemplateArgumentList(const TemplateArgument *Args,
unsigned NumArgs,
- const PrintingPolicy &Policy);
+ const PrintingPolicy &Policy,
+ bool SkipBrackets = false);
static std::string PrintTemplateArgumentList(const TemplateArgumentLoc *Args,
unsigned NumArgs,
@@ -2556,14 +3143,14 @@ public:
}
QualType desugar() const { return getCanonicalTypeInternal(); }
- void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Ctx) {
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
Profile(ID, Template, getArgs(), NumArgs, Ctx);
}
static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T,
const TemplateArgument *Args,
unsigned NumArgs,
- ASTContext &Context);
+ const ASTContext &Context);
static bool classof(const Type *T) {
return T->getTypeClass() == TemplateSpecialization;
@@ -2607,7 +3194,9 @@ class InjectedClassNameType : public Type {
// currently suitable for AST reading, too much
// interdependencies.
InjectedClassNameType(CXXRecordDecl *D, QualType TST)
- : Type(InjectedClassName, QualType(), true),
+ : Type(InjectedClassName, QualType(), /*Dependent=*/true,
+ /*VariablyModified=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
Decl(D), InjectedType(TST) {
assert(isa<TemplateSpecializationType>(TST));
assert(!TST.hasQualifiers());
@@ -2666,19 +3255,18 @@ enum ElaboratedTypeKeyword {
/// Also provides a few static helpers for converting and printing
/// elaborated type keyword and tag type kind enumerations.
class TypeWithKeyword : public Type {
- /// Keyword - Encodes an ElaboratedTypeKeyword enumeration constant.
- unsigned Keyword : 3;
-
protected:
TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
- QualType Canonical, bool dependent)
- : Type(tc, Canonical, dependent), Keyword(Keyword) {}
+ QualType Canonical, bool Dependent, bool VariablyModified,
+ bool ContainsUnexpandedParameterPack)
+ : Type(tc, Canonical, Dependent, VariablyModified,
+ ContainsUnexpandedParameterPack) {
+ TypeWithKeywordBits.Keyword = Keyword;
+ }
public:
- virtual ~TypeWithKeyword(); // pin vtable to Type.cpp
-
ElaboratedTypeKeyword getKeyword() const {
- return static_cast<ElaboratedTypeKeyword>(Keyword);
+ return static_cast<ElaboratedTypeKeyword>(TypeWithKeywordBits.Keyword);
}
/// getKeywordForTypeSpec - Converts a type specifier (DeclSpec::TST)
@@ -2730,7 +3318,9 @@ class ElaboratedType : public TypeWithKeyword, public llvm::FoldingSetNode {
ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
QualType NamedType, QualType CanonType)
: TypeWithKeyword(Keyword, Elaborated, CanonType,
- NamedType->isDependentType()),
+ NamedType->isDependentType(),
+ NamedType->isVariablyModifiedType(),
+ NamedType->containsUnexpandedParameterPack()),
NNS(NNS), NamedType(NamedType) {
assert(!(Keyword == ETK_None && NNS == 0) &&
"ElaboratedType cannot have elaborated type keyword "
@@ -2790,7 +3380,9 @@ class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
const IdentifierInfo *Name, QualType CanonType)
- : TypeWithKeyword(Keyword, DependentName, CanonType, true),
+ : TypeWithKeyword(Keyword, DependentName, CanonType, /*Dependent=*/true,
+ /*VariablyModified=*/false,
+ NNS->containsUnexpandedParameterPack()),
NNS(NNS), Name(Name) {
assert(NNS->isDependent() &&
"DependentNameType requires a dependent nested-name-specifier");
@@ -2799,8 +3391,6 @@ class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these
public:
- virtual ~DependentNameType();
-
/// \brief Retrieve the qualification on this type.
NestedNameSpecifier *getQualifier() const { return NNS; }
@@ -2867,8 +3457,6 @@ class DependentTemplateSpecializationType :
friend class ASTContext; // ASTContext creates these
public:
- virtual ~DependentTemplateSpecializationType();
-
NestedNameSpecifier *getQualifier() const { return NNS; }
const IdentifierInfo *getIdentifier() const { return Name; }
@@ -2889,12 +3477,12 @@ public:
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
- void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context) {
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
Profile(ID, Context, getKeyword(), NNS, Name, NumArgs, getArgs());
}
static void Profile(llvm::FoldingSetNodeID &ID,
- ASTContext &Context,
+ const ASTContext &Context,
ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *Qualifier,
const IdentifierInfo *Name,
@@ -2909,6 +3497,88 @@ public:
}
};
+/// \brief Represents a pack expansion of types.
+///
+/// Pack expansions are part of C++0x variadic templates. A pack
+/// expansion contains a pattern, which itself contains one or more
+/// "unexpanded" parameter packs. When instantiated, a pack expansion
+/// produces a series of types, each instantiated from the pattern of
+/// the expansion, where the Ith instantiation of the pattern uses the
+/// Ith arguments bound to each of the unexpanded parameter packs. The
+/// pack expansion is considered to "expand" these unexpanded
+/// parameter packs.
+///
+/// \code
+/// template<typename ...Types> struct tuple;
+///
+/// template<typename ...Types>
+/// struct tuple_of_references {
+/// typedef tuple<Types&...> type;
+/// };
+/// \endcode
+///
+/// Here, the pack expansion \c Types&... is represented via a
+/// PackExpansionType whose pattern is Types&.
+class PackExpansionType : public Type, public llvm::FoldingSetNode {
+ /// \brief The pattern of the pack expansion.
+ QualType Pattern;
+
+ /// \brief The number of expansions that this pack expansion will
+ /// generate when substituted (+1), or indicates that
+ ///
+ /// This field will only have a non-zero value when some of the parameter
+ /// packs that occur within the pattern have been substituted but others have
+ /// not.
+ unsigned NumExpansions;
+
+ PackExpansionType(QualType Pattern, QualType Canon,
+ llvm::Optional<unsigned> NumExpansions)
+ : Type(PackExpansion, Canon, /*Dependent=*/true,
+ /*VariableModified=*/Pattern->isVariablyModifiedType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ Pattern(Pattern),
+ NumExpansions(NumExpansions? *NumExpansions + 1: 0) { }
+
+ friend class ASTContext; // ASTContext creates these
+
+public:
+ /// \brief Retrieve the pattern of this pack expansion, which is the
+ /// type that will be repeatedly instantiated when instantiating the
+ /// pack expansion itself.
+ QualType getPattern() const { return Pattern; }
+
+ /// \brief Retrieve the number of expansions that this pack expansion will
+ /// generate, if known.
+ llvm::Optional<unsigned> getNumExpansions() const {
+ if (NumExpansions)
+ return NumExpansions - 1;
+
+ return llvm::Optional<unsigned>();
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPattern(), getNumExpansions());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Pattern,
+ llvm::Optional<unsigned> NumExpansions) {
+ ID.AddPointer(Pattern.getAsOpaquePtr());
+ ID.AddBoolean(NumExpansions);
+ if (NumExpansions)
+ ID.AddInteger(*NumExpansions);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == PackExpansion;
+ }
+ static bool classof(const PackExpansionType *T) {
+ return true;
+ }
+};
+
/// ObjCObjectType - Represents a class type in Objective C.
/// Every Objective C type is a combination of a base type and a
/// list of protocols.
@@ -2930,19 +3600,15 @@ public:
/// with base BuiltinType::ObjCIdType and protocol list [P]. Eventually
/// this should get its own sugar class to better represent the source.
class ObjCObjectType : public Type {
- // Pad the bit count up so that NumProtocols is 2-byte aligned
- unsigned : BitsRemainingInType - 16;
-
- /// \brief The number of protocols stored after the
- /// ObjCObjectPointerType node.
- ///
- /// These protocols are those written directly on the type. If
- /// protocol qualifiers ever become additive, the iterators will
- /// get kindof complicated.
- ///
- /// In the canonical object type, these are sorted alphabetically
- /// and uniqued.
- unsigned NumProtocols : 16;
+ // ObjCObjectType.NumProtocols - the number of protocols stored
+ // after the ObjCObjectPointerType node.
+ //
+ // These protocols are those written directly on the type. If
+ // protocol qualifiers ever become additive, the iterators will need
+ // to get kindof complicated.
+ //
+ // In the canonical object type, these are sorted alphabetically
+ // and uniqued.
/// Either a BuiltinType or an InterfaceType or sugar for either.
QualType BaseType;
@@ -2959,13 +3625,11 @@ protected:
enum Nonce_ObjCInterface { Nonce_ObjCInterface };
ObjCObjectType(enum Nonce_ObjCInterface)
- : Type(ObjCInterface, QualType(), false),
- NumProtocols(0),
- BaseType(QualType(this_(), 0)) {}
+ : Type(ObjCInterface, QualType(), false, false, false),
+ BaseType(QualType(this_(), 0)) {
+ ObjCObjectTypeBits.NumProtocols = 0;
+ }
-protected:
- Linkage getLinkageImpl() const; // key function
-
public:
/// getBaseType - Gets the base type of this object type. This is
/// always (possibly sugar for) one of:
@@ -3006,7 +3670,7 @@ public:
/// getNumProtocols - Return the number of qualifying protocols in this
/// interface type, or 0 if there are none.
- unsigned getNumProtocols() const { return NumProtocols; }
+ unsigned getNumProtocols() const { return ObjCObjectTypeBits.NumProtocols; }
/// \brief Fetch a protocol by index.
ObjCProtocolDecl *getProtocol(unsigned I) const {
@@ -3072,6 +3736,7 @@ class ObjCInterfaceType : public ObjCObjectType {
: ObjCObjectType(Nonce_ObjCInterface),
Decl(const_cast<ObjCInterfaceDecl*>(D)) {}
friend class ASTContext; // ASTContext creates these.
+
public:
/// getDecl - Get the declaration of this interface.
ObjCInterfaceDecl *getDecl() const { return Decl; }
@@ -3117,13 +3782,10 @@ class ObjCObjectPointerType : public Type, public llvm::FoldingSetNode {
QualType PointeeType;
ObjCObjectPointerType(QualType Canonical, QualType Pointee)
- : Type(ObjCObjectPointer, Canonical, false),
+ : Type(ObjCObjectPointer, Canonical, false, false, false),
PointeeType(Pointee) {}
friend class ASTContext; // ASTContext creates these.
-protected:
- virtual Linkage getLinkageImpl() const;
-
public:
/// getPointeeType - Gets the type pointed to by this ObjC pointer.
/// The result will always be an ObjCObjectType or sugar thereof.
@@ -3152,7 +3814,7 @@ public:
/// would return 'A1P<Q>' (and we'd have to make iterating over
/// qualifiers more complicated).
const ObjCObjectType *getObjectType() const {
- return PointeeType->getAs<ObjCObjectType>();
+ return PointeeType->castAs<ObjCObjectType>();
}
/// getInterfaceType - If this pointer points to an Objective C
@@ -3238,177 +3900,154 @@ public:
/// A qualifier set is used to build a set of qualifiers.
class QualifierCollector : public Qualifiers {
- ASTContext *Context;
-
public:
- QualifierCollector(Qualifiers Qs = Qualifiers())
- : Qualifiers(Qs), Context(0) {}
- QualifierCollector(ASTContext &Context, Qualifiers Qs = Qualifiers())
- : Qualifiers(Qs), Context(&Context) {}
-
- void setContext(ASTContext &C) { Context = &C; }
+ QualifierCollector(Qualifiers Qs = Qualifiers()) : Qualifiers(Qs) {}
/// Collect any qualifiers on the given type and return an
- /// unqualified type.
- const Type *strip(QualType QT) {
- addFastQualifiers(QT.getLocalFastQualifiers());
- if (QT.hasLocalNonFastQualifiers()) {
- const ExtQuals *EQ = QT.getExtQualsUnsafe();
- Context = &EQ->getContext();
- addQualifiers(EQ->getQualifiers());
- return EQ->getBaseType();
- }
- return QT.getTypePtrUnsafe();
+ /// unqualified type. The qualifiers are assumed to be consistent
+ /// with those already in the type.
+ const Type *strip(QualType type) {
+ addFastQualifiers(type.getLocalFastQualifiers());
+ if (!type.hasLocalNonFastQualifiers())
+ return type.getTypePtrUnsafe();
+
+ const ExtQuals *extQuals = type.getExtQualsUnsafe();
+ addConsistentQualifiers(extQuals->getQualifiers());
+ return extQuals->getBaseType();
}
/// Apply the collected qualifiers to the given type.
- QualType apply(QualType QT) const;
+ QualType apply(const ASTContext &Context, QualType QT) const;
/// Apply the collected qualifiers to the given type.
- QualType apply(const Type* T) const;
-
+ QualType apply(const ASTContext &Context, const Type* T) const;
};
// Inline function definitions.
+inline const Type *QualType::getTypePtr() const {
+ return getCommonPtr()->BaseType;
+}
+
+inline const Type *QualType::getTypePtrOrNull() const {
+ return (isNull() ? 0 : getCommonPtr()->BaseType);
+}
+
+inline SplitQualType QualType::split() const {
+ if (!hasLocalNonFastQualifiers())
+ return SplitQualType(getTypePtrUnsafe(),
+ Qualifiers::fromFastMask(getLocalFastQualifiers()));
+
+ const ExtQuals *eq = getExtQualsUnsafe();
+ Qualifiers qs = eq->getQualifiers();
+ qs.addFastQualifiers(getLocalFastQualifiers());
+ return SplitQualType(eq->getBaseType(), qs);
+}
+
+inline Qualifiers QualType::getLocalQualifiers() const {
+ Qualifiers Quals;
+ if (hasLocalNonFastQualifiers())
+ Quals = getExtQualsUnsafe()->getQualifiers();
+ Quals.addFastQualifiers(getLocalFastQualifiers());
+ return Quals;
+}
+
+inline Qualifiers QualType::getQualifiers() const {
+ Qualifiers quals = getCommonPtr()->CanonicalType.getLocalQualifiers();
+ quals.addFastQualifiers(getLocalFastQualifiers());
+ return quals;
+}
+
+inline unsigned QualType::getCVRQualifiers() const {
+ unsigned cvr = getCommonPtr()->CanonicalType.getLocalCVRQualifiers();
+ cvr |= getLocalCVRQualifiers();
+ return cvr;
+}
+
+inline QualType QualType::getCanonicalType() const {
+ QualType canon = getCommonPtr()->CanonicalType;
+ return canon.withFastQualifiers(getLocalFastQualifiers());
+}
+
inline bool QualType::isCanonical() const {
- const Type *T = getTypePtr();
- if (hasLocalQualifiers())
- return T->isCanonicalUnqualified() && !isa<ArrayType>(T);
- return T->isCanonicalUnqualified();
+ return getTypePtr()->isCanonicalUnqualified();
}
inline bool QualType::isCanonicalAsParam() const {
+ if (!isCanonical()) return false;
if (hasLocalQualifiers()) return false;
+
const Type *T = getTypePtr();
- return T->isCanonicalUnqualified() &&
- !isa<FunctionType>(T) && !isa<ArrayType>(T);
+ if (T->isVariablyModifiedType() && T->hasSizedVLAType())
+ return false;
+
+ return !isa<FunctionType>(T) && !isa<ArrayType>(T);
}
inline bool QualType::isConstQualified() const {
return isLocalConstQualified() ||
- getTypePtr()->getCanonicalTypeInternal().isLocalConstQualified();
+ getCommonPtr()->CanonicalType.isLocalConstQualified();
}
inline bool QualType::isRestrictQualified() const {
return isLocalRestrictQualified() ||
- getTypePtr()->getCanonicalTypeInternal().isLocalRestrictQualified();
+ getCommonPtr()->CanonicalType.isLocalRestrictQualified();
}
inline bool QualType::isVolatileQualified() const {
return isLocalVolatileQualified() ||
- getTypePtr()->getCanonicalTypeInternal().isLocalVolatileQualified();
+ getCommonPtr()->CanonicalType.isLocalVolatileQualified();
}
inline bool QualType::hasQualifiers() const {
return hasLocalQualifiers() ||
- getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers();
-}
-
-inline Qualifiers QualType::getQualifiers() const {
- Qualifiers Quals = getLocalQualifiers();
- Quals.addQualifiers(
- getTypePtr()->getCanonicalTypeInternal().getLocalQualifiers());
- return Quals;
-}
-
-inline unsigned QualType::getCVRQualifiers() const {
- return getLocalCVRQualifiers() |
- getTypePtr()->getCanonicalTypeInternal().getLocalCVRQualifiers();
+ getCommonPtr()->CanonicalType.hasLocalQualifiers();
}
-/// getCVRQualifiersThroughArrayTypes - If there are CVR qualifiers for this
-/// type, returns them. Otherwise, if this is an array type, recurses
-/// on the element type until some qualifiers have been found or a non-array
-/// type reached.
-inline unsigned QualType::getCVRQualifiersThroughArrayTypes() const {
- if (unsigned Quals = getCVRQualifiers())
- return Quals;
- QualType CT = getTypePtr()->getCanonicalTypeInternal();
- if (const ArrayType *AT = dyn_cast<ArrayType>(CT))
- return AT->getElementType().getCVRQualifiersThroughArrayTypes();
- return 0;
+inline QualType QualType::getUnqualifiedType() const {
+ if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
+ return QualType(getTypePtr(), 0);
+
+ return QualType(getSplitUnqualifiedTypeImpl(*this).first, 0);
}
-inline void QualType::removeConst() {
- removeFastQualifiers(Qualifiers::Const);
+inline SplitQualType QualType::getSplitUnqualifiedType() const {
+ if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
+ return split();
+
+ return getSplitUnqualifiedTypeImpl(*this);
+}
+
+inline void QualType::removeLocalConst() {
+ removeLocalFastQualifiers(Qualifiers::Const);
}
-inline void QualType::removeRestrict() {
- removeFastQualifiers(Qualifiers::Restrict);
+inline void QualType::removeLocalRestrict() {
+ removeLocalFastQualifiers(Qualifiers::Restrict);
}
-inline void QualType::removeVolatile() {
- QualifierCollector Qc;
- const Type *Ty = Qc.strip(*this);
- if (Qc.hasVolatile()) {
- Qc.removeVolatile();
- *this = Qc.apply(Ty);
- }
+inline void QualType::removeLocalVolatile() {
+ removeLocalFastQualifiers(Qualifiers::Volatile);
}
-inline void QualType::removeCVRQualifiers(unsigned Mask) {
+inline void QualType::removeLocalCVRQualifiers(unsigned Mask) {
assert(!(Mask & ~Qualifiers::CVRMask) && "mask has non-CVR bits");
+ assert((int)Qualifiers::CVRMask == (int)Qualifiers::FastMask);
// Fast path: we don't need to touch the slow qualifiers.
- if (!(Mask & ~Qualifiers::FastMask)) {
- removeFastQualifiers(Mask);
- return;
- }
-
- QualifierCollector Qc;
- const Type *Ty = Qc.strip(*this);
- Qc.removeCVRQualifiers(Mask);
- *this = Qc.apply(Ty);
+ removeLocalFastQualifiers(Mask);
}
/// getAddressSpace - Return the address space of this type.
inline unsigned QualType::getAddressSpace() const {
- if (hasLocalNonFastQualifiers()) {
- const ExtQuals *EQ = getExtQualsUnsafe();
- if (EQ->hasAddressSpace())
- return EQ->getAddressSpace();
- }
-
- QualType CT = getTypePtr()->getCanonicalTypeInternal();
- if (CT.hasLocalNonFastQualifiers()) {
- const ExtQuals *EQ = CT.getExtQualsUnsafe();
- if (EQ->hasAddressSpace())
- return EQ->getAddressSpace();
- }
-
- if (const ArrayType *AT = dyn_cast<ArrayType>(CT))
- return AT->getElementType().getAddressSpace();
- if (const RecordType *RT = dyn_cast<RecordType>(CT))
- return RT->getAddressSpace();
- return 0;
+ return getQualifiers().getAddressSpace();
}
/// getObjCGCAttr - Return the gc attribute of this type.
inline Qualifiers::GC QualType::getObjCGCAttr() const {
- if (hasLocalNonFastQualifiers()) {
- const ExtQuals *EQ = getExtQualsUnsafe();
- if (EQ->hasObjCGCAttr())
- return EQ->getObjCGCAttr();
- }
-
- QualType CT = getTypePtr()->getCanonicalTypeInternal();
- if (CT.hasLocalNonFastQualifiers()) {
- const ExtQuals *EQ = CT.getExtQualsUnsafe();
- if (EQ->hasObjCGCAttr())
- return EQ->getObjCGCAttr();
- }
-
- if (const ArrayType *AT = dyn_cast<ArrayType>(CT))
- return AT->getElementType().getObjCGCAttr();
- if (const ObjCObjectPointerType *PT = CT->getAs<ObjCObjectPointerType>())
- return PT->getPointeeType().getObjCGCAttr();
- // We most look at all pointer types, not just pointer to interface types.
- if (const PointerType *PT = CT->getAs<PointerType>())
- return PT->getPointeeType().getObjCGCAttr();
- return Qualifiers::GCNone;
+ return getQualifiers().getObjCGCAttr();
}
inline FunctionType::ExtInfo getFunctionExtInfo(const Type &t) {
@@ -3436,26 +4075,18 @@ inline bool Qualifiers::isSupersetOf(Qualifiers Other) const {
/// is more qualified than "const int", "volatile int", and
/// "int". However, it is not more qualified than "const volatile
/// int".
-inline bool QualType::isMoreQualifiedThan(QualType Other) const {
- // FIXME: work on arbitrary qualifiers
- unsigned MyQuals = this->getCVRQualifiersThroughArrayTypes();
- unsigned OtherQuals = Other.getCVRQualifiersThroughArrayTypes();
- if (getAddressSpace() != Other.getAddressSpace())
- return false;
- return MyQuals != OtherQuals && (MyQuals | OtherQuals) == MyQuals;
+inline bool QualType::isMoreQualifiedThan(QualType other) const {
+ Qualifiers myQuals = getQualifiers();
+ Qualifiers otherQuals = other.getQualifiers();
+ return (myQuals != otherQuals && myQuals.compatiblyIncludes(otherQuals));
}
/// isAtLeastAsQualifiedAs - Determine whether this type is at last
/// as qualified as the Other type. For example, "const volatile
/// int" is at least as qualified as "const int", "volatile int",
/// "int", and "const volatile int".
-inline bool QualType::isAtLeastAsQualifiedAs(QualType Other) const {
- // FIXME: work on arbitrary qualifiers
- unsigned MyQuals = this->getCVRQualifiersThroughArrayTypes();
- unsigned OtherQuals = Other.getCVRQualifiersThroughArrayTypes();
- if (getAddressSpace() != Other.getAddressSpace())
- return false;
- return (MyQuals | OtherQuals) == MyQuals;
+inline bool QualType::isAtLeastAsQualifiedAs(QualType other) const {
+ return getQualifiers().compatiblyIncludes(other.getQualifiers());
}
/// getNonReferenceType - If Type is a reference type (e.g., const
@@ -3496,7 +4127,7 @@ inline bool Type::isRValueReferenceType() const {
return isa<RValueReferenceType>(CanonicalType);
}
inline bool Type::isFunctionPointerType() const {
- if (const PointerType* T = getAs<PointerType>())
+ if (const PointerType *T = getAs<PointerType>())
return T->getPointeeType()->isFunctionType();
else
return false;
@@ -3531,9 +4162,15 @@ inline bool Type::isVariableArrayType() const {
inline bool Type::isDependentSizedArrayType() const {
return isa<DependentSizedArrayType>(CanonicalType);
}
+inline bool Type::isBuiltinType() const {
+ return isa<BuiltinType>(CanonicalType);
+}
inline bool Type::isRecordType() const {
return isa<RecordType>(CanonicalType);
}
+inline bool Type::isEnumeralType() const {
+ return isa<EnumType>(CanonicalType);
+}
inline bool Type::isAnyComplexType() const {
return isa<ComplexType>(CanonicalType);
}
@@ -3586,10 +4223,6 @@ inline bool Type::isTemplateTypeParmType() const {
return isa<TemplateTypeParmType>(CanonicalType);
}
-inline bool Type::isBuiltinType() const {
- return getAs<BuiltinType>();
-}
-
inline bool Type::isSpecificBuiltinType(unsigned K) const {
if (const BuiltinType *BT = getAs<BuiltinType>())
if (BT->getKind() == (BuiltinType::Kind) K)
@@ -3597,6 +4230,12 @@ inline bool Type::isSpecificBuiltinType(unsigned K) const {
return false;
}
+inline bool Type::isPlaceholderType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>())
+ return BT->isPlaceholderType();
+ return false;
+}
+
/// \brief Determines whether this is a type for which one can define
/// an overloaded operator.
inline bool Type::isOverloadableType() const {
@@ -3612,6 +4251,13 @@ inline bool Type::hasObjCPointerRepresentation() const {
return isObjCObjectPointerType();
}
+inline const Type *Type::getBaseElementTypeUnsafe() const {
+ const Type *type = this;
+ while (const ArrayType *arrayType = type->getAsArrayTypeUnsafe())
+ type = arrayType->getElementType().getTypePtr();
+ return type;
+}
+
/// Insertion operator for diagnostics. This allows sending QualType's into a
/// diagnostic with <<.
inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
@@ -3658,6 +4304,35 @@ template <typename T> const T *Type::getAs() const {
return cast<T>(getUnqualifiedDesugaredType());
}
+inline const ArrayType *Type::getAsArrayTypeUnsafe() const {
+ // If this is directly an array type, return it.
+ if (const ArrayType *arr = dyn_cast<ArrayType>(this))
+ return arr;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ArrayType>(CanonicalType))
+ return 0;
+
+ // If this is a typedef for the type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ArrayType>(getUnqualifiedDesugaredType());
+}
+
+template <typename T> const T *Type::castAs() const {
+ ArrayType_cannot_be_used_with_getAs<T> at;
+ (void) at;
+
+ assert(isa<T>(CanonicalType));
+ if (const T *ty = dyn_cast<T>(this)) return ty;
+ return cast<T>(getUnqualifiedDesugaredType());
+}
+
+inline const ArrayType *Type::castAsArrayTypeUnsafe() const {
+ assert(isa<ArrayType>(CanonicalType));
+ if (const ArrayType *arr = dyn_cast<ArrayType>(this)) return arr;
+ return cast<ArrayType>(getUnqualifiedDesugaredType());
+}
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
index f1c64bd..c7f5ee7 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
@@ -20,6 +20,7 @@
#include "clang/Basic/Specifiers.h"
namespace clang {
+ class ASTContext;
class ParmVarDecl;
class TypeSourceInfo;
class UnqualTypeLoc;
@@ -38,7 +39,7 @@ class TypeLoc {
protected:
// The correctness of this relies on the property that, for Type *Ty,
// QualType(Ty, 0).getAsOpaquePtr() == (void*) Ty
- void *Ty;
+ const void *Ty;
void *Data;
public:
@@ -56,7 +57,7 @@ public:
TypeLoc() : Ty(0), Data(0) { }
TypeLoc(QualType ty, void *opaqueData)
: Ty(ty.getAsOpaquePtr()), Data(opaqueData) { }
- TypeLoc(Type *ty, void *opaqueData)
+ TypeLoc(const Type *ty, void *opaqueData)
: Ty(ty), Data(opaqueData) { }
TypeLocClass getTypeLocClass() const {
@@ -76,7 +77,7 @@ public:
return QualType::getFromOpaquePtr(Ty);
}
- Type *getTypePtr() const {
+ const Type *getTypePtr() const {
return QualType::getFromOpaquePtr(Ty).getTypePtr();
}
@@ -115,13 +116,36 @@ public:
/// \brief Skips past any qualifiers, if this is qualified.
UnqualTypeLoc getUnqualifiedLoc() const; // implemented in this header
+ TypeLoc IgnoreParens() const {
+ if (isa<ParenTypeLoc>(this))
+ return IgnoreParensImpl(*this);
+ return *this;
+ }
+
/// \brief Initializes this to state that every location in this
/// type is the given location.
///
/// This method exists to provide a simple transition for code that
/// relies on location-less types.
- void initialize(SourceLocation Loc) const {
- initializeImpl(*this, Loc);
+ void initialize(ASTContext &Context, SourceLocation Loc) const {
+ initializeImpl(Context, *this, Loc);
+ }
+
+ /// \brief Initializes this by copying its information from another
+ /// TypeLoc of the same type.
+ void initializeFullCopy(TypeLoc Other) const {
+ assert(getType() == Other.getType());
+ size_t Size = getFullDataSize();
+ memcpy(getOpaqueData(), Other.getOpaqueData(), Size);
+ }
+
+ /// \brief Initializes this by copying its information from another
+ /// TypeLoc of the same type. The given size must be the full data
+ /// size.
+ void initializeFullCopy(TypeLoc Other, unsigned Size) const {
+ assert(getType() == Other.getType());
+ assert(getFullDataSize() == Size);
+ memcpy(getOpaqueData(), Other.getOpaqueData(), Size);
}
friend bool operator==(const TypeLoc &LHS, const TypeLoc &RHS) {
@@ -135,8 +159,9 @@ public:
static bool classof(const TypeLoc *TL) { return true; }
private:
- static void initializeImpl(TypeLoc TL, SourceLocation Loc);
+ static void initializeImpl(ASTContext &Context, TypeLoc TL, SourceLocation Loc);
static TypeLoc getNextTypeLocImpl(TypeLoc TL);
+ static TypeLoc IgnoreParensImpl(TypeLoc TL);
static SourceRange getLocalSourceRangeImpl(TypeLoc TL);
};
@@ -146,14 +171,14 @@ inline TypeLoc TypeSourceInfo::getTypeLoc() const {
}
/// \brief Wrapper of type source information for a type with
-/// no direct quqlaifiers.
+/// no direct qualifiers.
class UnqualTypeLoc : public TypeLoc {
public:
UnqualTypeLoc() {}
- UnqualTypeLoc(Type *Ty, void *Data) : TypeLoc(Ty, Data) {}
+ UnqualTypeLoc(const Type *Ty, void *Data) : TypeLoc(Ty, Data) {}
- Type *getTypePtr() const {
- return reinterpret_cast<Type*>(Ty);
+ const Type *getTypePtr() const {
+ return reinterpret_cast<const Type*>(Ty);
}
TypeLocClass getTypeLocClass() const {
@@ -183,7 +208,7 @@ public:
/// Initializes the local data of this type source info block to
/// provide no information.
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
// do nothing
}
@@ -282,7 +307,7 @@ public:
return getNextTypeLoc(asDerived()->getInnerType());
}
- TypeClass *getTypePtr() const {
+ const TypeClass *getTypePtr() const {
return cast<TypeClass>(Base::getTypePtr());
}
@@ -355,7 +380,7 @@ public:
return true;
}
- TypeClass *getTypePtr() const {
+ const TypeClass *getTypePtr() const {
return cast<TypeClass>(Base::getTypePtr());
}
};
@@ -383,7 +408,7 @@ public:
SourceRange getLocalSourceRange() const {
return SourceRange(getNameLoc(), getNameLoc());
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setNameLoc(Loc);
}
@@ -484,7 +509,7 @@ public:
getWrittenBuiltinSpecs().ModeAttr = written;
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setBuiltinLoc(Loc);
if (needsExtraLocalData()) {
WrittenBuiltinSpecs &wbs = getWrittenBuiltinSpecs();
@@ -568,6 +593,137 @@ class SubstTemplateTypeParmTypeLoc :
SubstTemplateTypeParmType> {
};
+ /// \brief Wrapper for substituted template type parameters.
+class SubstTemplateTypeParmPackTypeLoc :
+ public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ SubstTemplateTypeParmPackTypeLoc,
+ SubstTemplateTypeParmPackType> {
+};
+
+struct AttributedLocInfo {
+ union {
+ Expr *ExprOperand;
+
+ /// A raw SourceLocation.
+ unsigned EnumOperandLoc;
+ };
+
+ SourceRange OperandParens;
+
+ SourceLocation AttrLoc;
+};
+
+/// \brief Type source information for an attributed type.
+class AttributedTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ AttributedTypeLoc,
+ AttributedType,
+ AttributedLocInfo> {
+public:
+ AttributedType::Kind getAttrKind() const {
+ return getTypePtr()->getAttrKind();
+ }
+
+ bool hasAttrExprOperand() const {
+ return (getAttrKind() >= AttributedType::FirstExprOperandKind &&
+ getAttrKind() <= AttributedType::LastExprOperandKind);
+ }
+
+ bool hasAttrEnumOperand() const {
+ return (getAttrKind() >= AttributedType::FirstEnumOperandKind &&
+ getAttrKind() <= AttributedType::LastEnumOperandKind);
+ }
+
+ bool hasAttrOperand() const {
+ return hasAttrExprOperand() || hasAttrEnumOperand();
+ }
+
+ /// The modified type, which is generally canonically different from
+ /// the attribute type.
+ /// int main(int, char**) __attribute__((noreturn))
+ /// ~~~ ~~~~~~~~~~~~~
+ TypeLoc getModifiedLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ /// The location of the attribute name, i.e.
+ /// __attribute__((regparm(1000)))
+ /// ^~~~~~~
+ SourceLocation getAttrNameLoc() const {
+ return getLocalData()->AttrLoc;
+ }
+ void setAttrNameLoc(SourceLocation loc) {
+ getLocalData()->AttrLoc = loc;
+ }
+
+ /// The attribute's expression operand, if it has one.
+ /// void *cur_thread __attribute__((address_space(21)))
+ /// ^~
+ Expr *getAttrExprOperand() const {
+ assert(hasAttrExprOperand());
+ return getLocalData()->ExprOperand;
+ }
+ void setAttrExprOperand(Expr *e) {
+ assert(hasAttrExprOperand());
+ getLocalData()->ExprOperand = e;
+ }
+
+ /// The location of the attribute's enumerated operand, if it has one.
+ /// void * __attribute__((objc_gc(weak)))
+ /// ^~~~
+ SourceLocation getAttrEnumOperandLoc() const {
+ assert(hasAttrEnumOperand());
+ return SourceLocation::getFromRawEncoding(getLocalData()->EnumOperandLoc);
+ }
+ void setAttrEnumOperandLoc(SourceLocation loc) {
+ assert(hasAttrEnumOperand());
+ getLocalData()->EnumOperandLoc = loc.getRawEncoding();
+ }
+
+ /// The location of the parentheses around the operand, if there is
+ /// an operand.
+ /// void * __attribute__((objc_gc(weak)))
+ /// ^ ^
+ SourceRange getAttrOperandParensRange() const {
+ assert(hasAttrOperand());
+ return getLocalData()->OperandParens;
+ }
+ void setAttrOperandParensRange(SourceRange range) {
+ assert(hasAttrOperand());
+ getLocalData()->OperandParens = range;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ // Note that this does *not* include the range of the attribute
+ // enclosure, e.g.:
+ // __attribute__((foo(bar)))
+ // ^~~~~~~~~~~~~~~ ~~
+ // or
+ // [[foo(bar)]]
+ // ^~ ~~
+ // That enclosure doesn't necessarily belong to a single attribute
+ // anyway.
+ SourceRange range(getAttrNameLoc());
+ if (hasAttrOperand())
+ range.setEnd(getAttrOperandParensRange().getEnd());
+ return range;
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation loc) {
+ setAttrNameLoc(loc);
+ if (hasAttrExprOperand()) {
+ setAttrOperandParensRange(SourceRange(loc));
+ setAttrExprOperand(0);
+ } else if (hasAttrEnumOperand()) {
+ setAttrOperandParensRange(SourceRange(loc));
+ setAttrEnumOperandLoc(loc);
+ }
+ }
+
+ QualType getInnerType() const {
+ return getTypePtr()->getModifiedType();
+ }
+};
+
struct ObjCProtocolListLocInfo {
SourceLocation LAngleLoc;
@@ -638,7 +794,7 @@ public:
return SourceRange(getLAngleLoc(), getRAngleLoc());
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setHasBaseTypeAsWritten(true);
setLAngleLoc(Loc);
setRAngleLoc(Loc);
@@ -682,11 +838,51 @@ public:
return SourceRange(getNameLoc());
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setNameLoc(Loc);
}
};
+struct ParenLocInfo {
+ SourceLocation LParenLoc;
+ SourceLocation RParenLoc;
+};
+
+class ParenTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, ParenTypeLoc, ParenType,
+ ParenLocInfo> {
+public:
+ SourceLocation getLParenLoc() const {
+ return this->getLocalData()->LParenLoc;
+ }
+ SourceLocation getRParenLoc() const {
+ return this->getLocalData()->RParenLoc;
+ }
+ void setLParenLoc(SourceLocation Loc) {
+ this->getLocalData()->LParenLoc = Loc;
+ }
+ void setRParenLoc(SourceLocation Loc) {
+ this->getLocalData()->RParenLoc = Loc;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getLParenLoc(), getRParenLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setLParenLoc(Loc);
+ setRParenLoc(Loc);
+ }
+
+ TypeLoc getInnerLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ QualType getInnerType() const {
+ return this->getTypePtr()->getInnerType();
+ }
+};
+
struct PointerLikeLocInfo {
SourceLocation StarLoc;
@@ -712,7 +908,7 @@ public:
return SourceRange(getSigilLoc(), getSigilLoc());
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setSigilLoc(Loc);
}
@@ -812,6 +1008,7 @@ public:
struct FunctionLocInfo {
SourceLocation LParenLoc, RParenLoc;
+ bool TrailingReturn;
};
/// \brief Wrapper for source info for functions.
@@ -819,11 +1016,6 @@ class FunctionTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
FunctionTypeLoc,
FunctionType,
FunctionLocInfo> {
- // ParmVarDecls* are stored after Info, one for each argument.
- ParmVarDecl **getParmArray() const {
- return (ParmVarDecl**) getExtraLocalData();
- }
-
public:
SourceLocation getLParenLoc() const {
return getLocalData()->LParenLoc;
@@ -839,6 +1031,18 @@ public:
getLocalData()->RParenLoc = Loc;
}
+ bool getTrailingReturn() const {
+ return getLocalData()->TrailingReturn;
+ }
+ void setTrailingReturn(bool Trailing) {
+ getLocalData()->TrailingReturn = Trailing;
+ }
+
+ // ParmVarDecls* are stored after Info, one for each argument.
+ ParmVarDecl **getParmArray() const {
+ return (ParmVarDecl**) getExtraLocalData();
+ }
+
unsigned getNumArgs() const {
if (isa<FunctionNoProtoType>(getTypePtr()))
return 0;
@@ -855,9 +1059,10 @@ public:
return SourceRange(getLParenLoc(), getRParenLoc());
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setLParenLoc(Loc);
setRParenLoc(Loc);
+ setTrailingReturn(false);
for (unsigned i = 0, e = getNumArgs(); i != e; ++i)
setArg(i, NULL);
}
@@ -928,7 +1133,7 @@ public:
return SourceRange(getLBracketLoc(), getRBracketLoc());
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setLBracketLoc(Loc);
setRBracketLoc(Loc);
setSizeExpr(NULL);
@@ -997,9 +1202,6 @@ public:
return getTypePtr()->getNumArgs();
}
void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
-#ifndef NDEBUG
- AI.validateForArgument(getTypePtr()->getArg(i));
-#endif
getArgInfos()[i] = AI;
}
TemplateArgumentLocInfo getArgLocInfo(unsigned i) const {
@@ -1033,47 +1235,18 @@ public:
return SourceRange(getTemplateNameLoc(), getRAngleLoc());
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setLAngleLoc(Loc);
setRAngleLoc(Loc);
setTemplateNameLoc(Loc);
- initializeArgLocs(getNumArgs(), getTypePtr()->getArgs(),
+ initializeArgLocs(Context, getNumArgs(), getTypePtr()->getArgs(),
getArgInfos(), Loc);
}
- static void initializeArgLocs(unsigned NumArgs,
+ static void initializeArgLocs(ASTContext &Context, unsigned NumArgs,
const TemplateArgument *Args,
TemplateArgumentLocInfo *ArgInfos,
- SourceLocation Loc) {
- for (unsigned i = 0, e = NumArgs; i != e; ++i) {
- TemplateArgumentLocInfo Info;
-#ifndef NDEBUG
- // If asserts are enabled, be sure to initialize the argument
- // loc with the right kind of pointer.
- switch (Args[i].getKind()) {
- case TemplateArgument::Expression:
- case TemplateArgument::Declaration:
- Info = TemplateArgumentLocInfo((Expr*) 0);
- break;
-
- case TemplateArgument::Type:
- Info = TemplateArgumentLocInfo((TypeSourceInfo*) 0);
- break;
-
- case TemplateArgument::Template:
- Info = TemplateArgumentLocInfo(SourceRange(Loc), Loc);
- break;
-
- case TemplateArgument::Integral:
- case TemplateArgument::Pack:
- case TemplateArgument::Null:
- // K_None is fine.
- break;
- }
-#endif
- ArgInfos[i] = Info;
- }
- }
+ SourceLocation Loc);
unsigned getExtraLocalDataSize() const {
return getNumArgs() * sizeof(TemplateArgumentLocInfo);
@@ -1168,7 +1341,7 @@ public:
return SourceRange(getTypeofLoc(), getRParenLoc());
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setTypeofLoc(Loc);
setLParenLoc(Loc);
setRParenLoc(Loc);
@@ -1208,6 +1381,11 @@ class DecltypeTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
DecltypeType> {
};
+class AutoTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ AutoTypeLoc,
+ AutoType> {
+};
+
struct ElaboratedLocInfo {
SourceLocation KeywordLoc;
SourceRange QualifierRange;
@@ -1242,7 +1420,7 @@ public:
return getQualifierRange();
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setKeywordLoc(Loc);
setQualifierRange(SourceRange(Loc));
}
@@ -1307,7 +1485,7 @@ public:
memcpy(Data, Loc.Data, size);
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setKeywordLoc(Loc);
setQualifierRange(SourceRange(Loc));
setNameLoc(Loc);
@@ -1368,9 +1546,6 @@ public:
}
void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
-#ifndef NDEBUG
- AI.validateForArgument(getTypePtr()->getArg(i));
-#endif
getArgInfos()[i] = AI;
}
TemplateArgumentLocInfo getArgLocInfo(unsigned i) const {
@@ -1394,13 +1569,13 @@ public:
memcpy(Data, Loc.Data, size);
}
- void initializeLocal(SourceLocation Loc) {
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
setKeywordLoc(Loc);
setQualifierRange(SourceRange(Loc));
setNameLoc(Loc);
setLAngleLoc(Loc);
setRAngleLoc(Loc);
- TemplateSpecializationTypeLoc::initializeArgLocs(getNumArgs(),
+ TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
getTypePtr()->getArgs(),
getArgInfos(), Loc);
}
@@ -1415,6 +1590,40 @@ private:
}
};
+
+struct PackExpansionTypeLocInfo {
+ SourceLocation EllipsisLoc;
+};
+
+class PackExpansionTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, PackExpansionTypeLoc,
+ PackExpansionType, PackExpansionTypeLocInfo> {
+public:
+ SourceLocation getEllipsisLoc() const {
+ return this->getLocalData()->EllipsisLoc;
+ }
+
+ void setEllipsisLoc(SourceLocation Loc) {
+ this->getLocalData()->EllipsisLoc = Loc;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getEllipsisLoc(), getEllipsisLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setEllipsisLoc(Loc);
+ }
+
+ TypeLoc getPatternLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ QualType getInnerType() const {
+ return this->getTypePtr()->getPattern();
+ }
+};
+
}
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def b/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def
index 9cb5686..b2591cc 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def
@@ -79,6 +79,7 @@ ABSTRACT_TYPE(Function, Type)
TYPE(FunctionProto, FunctionType)
TYPE(FunctionNoProto, FunctionType)
DEPENDENT_TYPE(UnresolvedUsing, Type)
+NON_CANONICAL_TYPE(Paren, Type)
NON_CANONICAL_TYPE(Typedef, Type)
NON_CANONICAL_UNLESS_DEPENDENT_TYPE(TypeOfExpr, Type)
NON_CANONICAL_UNLESS_DEPENDENT_TYPE(TypeOf, Type)
@@ -87,12 +88,16 @@ ABSTRACT_TYPE(Tag, Type)
TYPE(Record, TagType)
TYPE(Enum, TagType)
NON_CANONICAL_TYPE(Elaborated, Type)
+NON_CANONICAL_TYPE(Attributed, Type)
DEPENDENT_TYPE(TemplateTypeParm, Type)
NON_CANONICAL_TYPE(SubstTemplateTypeParm, Type)
+DEPENDENT_TYPE(SubstTemplateTypeParmPack, Type)
NON_CANONICAL_UNLESS_DEPENDENT_TYPE(TemplateSpecialization, Type)
+NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Auto, Type)
DEPENDENT_TYPE(InjectedClassName, Type)
DEPENDENT_TYPE(DependentName, Type)
DEPENDENT_TYPE(DependentTemplateSpecialization, Type)
+DEPENDENT_TYPE(PackExpansion, Type)
TYPE(ObjCObject, Type)
TYPE(ObjCInterface, ObjCObjectType)
TYPE(ObjCObjectPointer, Type)
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h
index 5c9c5285..c52926b 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h
@@ -19,12 +19,13 @@
namespace clang {
#define DISPATCH(CLASS) \
- return static_cast<ImplClass*>(this)->Visit ## CLASS(static_cast<CLASS*>(T))
+ return static_cast<ImplClass*>(this)-> \
+ Visit##CLASS(static_cast<const CLASS*>(T))
template<typename ImplClass, typename RetTy=void>
class TypeVisitor {
public:
- RetTy Visit(Type *T) {
+ RetTy Visit(const Type *T) {
// Top switch stmt: dispatch to VisitFooType for each FooType.
switch (T->getTypeClass()) {
default: assert(0 && "Unknown type class!");
@@ -36,13 +37,13 @@ public:
// If the implementation chooses not to implement a certain visit method, fall
// back on superclass.
-#define TYPE(CLASS, PARENT) RetTy Visit##CLASS##Type(CLASS##Type *T) { \
+#define TYPE(CLASS, PARENT) RetTy Visit##CLASS##Type(const CLASS##Type *T) { \
DISPATCH(PARENT); \
}
#include "clang/AST/TypeNodes.def"
// Base case, ignore it. :)
- RetTy VisitType(Type*) { return RetTy(); }
+ RetTy VisitType(const Type*) { return RetTy(); }
};
#undef DISPATCH
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
index 08200d3..069ead3 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
@@ -384,6 +384,7 @@ using analyze_format_string::OptionalAmount;
using analyze_format_string::OptionalFlag;
class PrintfSpecifier : public analyze_format_string::FormatSpecifier {
+ OptionalFlag HasThousandsGrouping; // ''', POSIX extension.
OptionalFlag IsLeftJustified; // '-'
OptionalFlag HasPlusPrefix; // '+'
OptionalFlag HasSpacePrefix; // ' '
@@ -393,8 +394,8 @@ class PrintfSpecifier : public analyze_format_string::FormatSpecifier {
public:
PrintfSpecifier() :
FormatSpecifier(/* isPrintf = */ true),
- IsLeftJustified("-"), HasPlusPrefix("+"), HasSpacePrefix(" "),
- HasAlternativeForm("#"), HasLeadingZeroes("0") {}
+ HasThousandsGrouping("'"), IsLeftJustified("-"), HasPlusPrefix("+"),
+ HasSpacePrefix(" "), HasAlternativeForm("#"), HasLeadingZeroes("0") {}
static PrintfSpecifier Parse(const char *beg, const char *end);
@@ -402,6 +403,10 @@ public:
void setConversionSpecifier(const PrintfConversionSpecifier &cs) {
CS = cs;
}
+ void setHasThousandsGrouping(const char *position) {
+ HasThousandsGrouping = true;
+ HasThousandsGrouping.setPosition(position);
+ }
void setIsLeftJustified(const char *position) {
IsLeftJustified = true;
IsLeftJustified.setPosition(position);
@@ -450,6 +455,9 @@ public:
/// more than one type.
ArgTypeResult getArgType(ASTContext &Ctx) const;
+ const OptionalFlag &hasThousandsGrouping() const {
+ return HasThousandsGrouping;
+ }
const OptionalFlag &isLeftJustified() const { return IsLeftJustified; }
const OptionalFlag &hasPlusPrefix() const { return HasPlusPrefix; }
const OptionalFlag &hasAlternativeForm() const { return HasAlternativeForm; }
@@ -470,6 +478,7 @@ public:
bool hasValidLeadingZeros() const;
bool hasValidSpacePrefix() const;
bool hasValidLeftJustified() const;
+ bool hasValidThousandsGroupingPrefix() const;
bool hasValidPrecision() const;
bool hasValidFieldWidth() const;
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h
index 237fe14..fbbd261 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h
@@ -55,7 +55,8 @@ struct LiveVariables_ValueTypes {
/// ObserveStmt - A callback invoked right before invoking the
/// liveness transfer function on the given statement.
- virtual void ObserveStmt(Stmt* S, const AnalysisDataTy& AD,
+ virtual void ObserveStmt(Stmt* S, const CFGBlock *currentBlock,
+ const AnalysisDataTy& AD,
const ValTy& V) {}
virtual void ObserverKill(DeclRefExpr* DR) {}
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValuesV2.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValuesV2.h
new file mode 100644
index 0000000..c1fe040
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValuesV2.h
@@ -0,0 +1,40 @@
+//= UninitializedValuesV2.h - Finding uses of uninitialized values --*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs for invoking and reported uninitialized values
+// warnings.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_UNINIT_VALS_H
+#define LLVM_CLANG_UNINIT_VALS_H
+
+namespace clang {
+
+class AnalysisContext;
+class CFG;
+class DeclContext;
+class Expr;
+class VarDecl;
+
+class UninitVariablesHandler {
+public:
+ UninitVariablesHandler() {}
+ virtual ~UninitVariablesHandler();
+
+ virtual void handleUseOfUninitVariable(const Expr *ex,
+ const VarDecl *vd) {}
+};
+
+void runUninitializedVariablesAnalysis(const DeclContext &dc, const CFG &cfg,
+ AnalysisContext &ac,
+ UninitVariablesHandler &handler);
+
+}
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
index 7d4d25f..2ecbfdc 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
@@ -16,6 +16,7 @@
#define LLVM_CLANG_ANALYSIS_ANALYSISCONTEXT_H
#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerUnion.h"
@@ -56,15 +57,20 @@ class AnalysisContext {
llvm::BumpPtrAllocator A;
bool UseUnoptimizedCFG;
bool AddEHEdges;
+ bool AddImplicitDtors;
+ bool AddInitializers;
public:
AnalysisContext(const Decl *d, idx::TranslationUnit *tu,
bool useUnoptimizedCFG = false,
- bool addehedges = false)
+ bool addehedges = false,
+ bool addImplicitDtors = false,
+ bool addInitializers = false)
: D(d), TU(tu), cfg(0), completeCFG(0),
builtCFG(false), builtCompleteCFG(false),
liveness(0), relaxedLiveness(0), PM(0), PCA(0),
ReferencedBlockVars(0), UseUnoptimizedCFG(useUnoptimizedCFG),
- AddEHEdges(addehedges) {}
+ AddEHEdges(addehedges), AddImplicitDtors(addImplicitDtors),
+ AddInitializers(addInitializers) {}
~AnalysisContext();
@@ -80,13 +86,17 @@ public:
bool getAddEHEdges() const { return AddEHEdges; }
bool getUseUnoptimizedCFG() const { return UseUnoptimizedCFG; }
+ bool getAddImplicitDtors() const { return AddImplicitDtors; }
+ bool getAddInitializers() const { return AddInitializers; }
Stmt *getBody();
CFG *getCFG();
-
+
/// Return a version of the CFG without any edges pruned.
CFG *getUnoptimizedCFG();
+ void dumpCFG();
+
ParentMap &getParentMap();
PseudoConstantAnalysis *getPseudoConstantAnalysis();
LiveVariables *getLiveVariables();
@@ -106,15 +116,21 @@ class AnalysisContextManager {
typedef llvm::DenseMap<const Decl*, AnalysisContext*> ContextMap;
ContextMap Contexts;
bool UseUnoptimizedCFG;
+ bool AddImplicitDtors;
+ bool AddInitializers;
public:
- AnalysisContextManager(bool useUnoptimizedCFG = false)
- : UseUnoptimizedCFG(useUnoptimizedCFG) {}
+ AnalysisContextManager(bool useUnoptimizedCFG = false,
+ bool addImplicitDtors = false, bool addInitializers = false)
+ : UseUnoptimizedCFG(useUnoptimizedCFG), AddImplicitDtors(addImplicitDtors),
+ AddInitializers(addInitializers) {}
~AnalysisContextManager();
AnalysisContext *getContext(const Decl *D, idx::TranslationUnit *TU = 0);
bool getUseUnoptimizedCFG() const { return UseUnoptimizedCFG; }
+ bool getAddImplicitDtors() const { return AddImplicitDtors; }
+ bool getAddInitializers() const { return AddInitializers; }
// Discard all previously created AnalysisContexts.
void clear();
@@ -196,9 +212,10 @@ class StackFrameContext : public LocationContext {
friend class LocationContextManager;
StackFrameContext(AnalysisContext *ctx, const LocationContext *parent,
- const Stmt *s, const CFGBlock *blk, unsigned idx)
- : LocationContext(StackFrame, ctx, parent), CallSite(s), Block(blk),
- Index(idx) {}
+ const Stmt *s, const CFGBlock *blk,
+ unsigned idx)
+ : LocationContext(StackFrame, ctx, parent), CallSite(s),
+ Block(blk), Index(idx) {}
public:
~StackFrameContext() {}
@@ -282,8 +299,8 @@ public:
const StackFrameContext *getStackFrame(AnalysisContext *ctx,
const LocationContext *parent,
- const Stmt *s, const CFGBlock *blk,
- unsigned idx);
+ const Stmt *s,
+ const CFGBlock *blk, unsigned idx);
const ScopeContext *getScope(AnalysisContext *ctx,
const LocationContext *parent,
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h
index e98a3df..295d0a2 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h
@@ -15,7 +15,7 @@
namespace clang {
namespace diag {
enum {
-#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,CATEGORY) ENUM,
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,ACCESS,CATEGORY) ENUM,
#define ANALYSISSTART
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
index b7a8e11..b337d74 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
@@ -22,14 +22,21 @@
#include "clang/Analysis/Support/BumpVector.h"
#include "clang/Basic/SourceLocation.h"
#include <cassert>
+#include <iterator>
namespace llvm {
class raw_ostream;
}
+
namespace clang {
class Decl;
class Stmt;
class Expr;
+ class FieldDecl;
+ class VarDecl;
+ class CXXCtorInitializer;
+ class CXXBaseSpecifier;
+ class CXXBindTemporaryExpr;
class CFG;
class PrinterHelper;
class LangOptions;
@@ -37,19 +44,203 @@ namespace clang {
/// CFGElement - Represents a top-level expression in a basic block.
class CFGElement {
- llvm::PointerIntPair<Stmt *, 2> Data;
public:
- enum Type { StartScope, EndScope };
- explicit CFGElement() {}
- CFGElement(Stmt *S, bool lvalue) : Data(S, lvalue ? 1 : 0) {}
- CFGElement(Stmt *S, Type t) : Data(S, t == StartScope ? 2 : 3) {}
- Stmt *getStmt() const { return Data.getPointer(); }
- bool asLValue() const { return Data.getInt() == 1; }
- bool asStartScope() const { return Data.getInt() == 2; }
- bool asEndScope() const { return Data.getInt() == 3; }
- bool asDtor() const { return Data.getInt() == 4; }
+ enum Kind {
+ // main kind
+ Statement,
+ Initializer,
+ ImplicitDtor,
+ // dtor kind
+ AutomaticObjectDtor,
+ BaseDtor,
+ MemberDtor,
+ TemporaryDtor,
+ DTOR_BEGIN = AutomaticObjectDtor
+ };
+
+protected:
+ // The int bits are used to mark the main kind.
+ llvm::PointerIntPair<void *, 2> Data1;
+ // The int bits are used to mark the dtor kind.
+ llvm::PointerIntPair<void *, 2> Data2;
+
+ CFGElement(void *Ptr, unsigned Int) : Data1(Ptr, Int) {}
+ CFGElement(void *Ptr1, unsigned Int1, void *Ptr2, unsigned Int2)
+ : Data1(Ptr1, Int1), Data2(Ptr2, Int2) {}
+
+public:
+ CFGElement() {}
+
+ Kind getKind() const { return static_cast<Kind>(Data1.getInt()); }
+
+ Kind getDtorKind() const {
+ assert(getKind() == ImplicitDtor);
+ return static_cast<Kind>(Data2.getInt() + DTOR_BEGIN);
+ }
+
+ bool isValid() const { return Data1.getPointer(); }
+
+ operator bool() const { return isValid(); }
+
+ template<class ElemTy> ElemTy getAs() const {
+ if (llvm::isa<ElemTy>(this))
+ return *static_cast<const ElemTy*>(this);
+ return ElemTy();
+ }
+
+ static bool classof(const CFGElement *E) { return true; }
+};
+
+class CFGStmt : public CFGElement {
+public:
+ CFGStmt() {}
+ CFGStmt(Stmt *S) : CFGElement(S, 0) {}
+
+ Stmt *getStmt() const { return static_cast<Stmt *>(Data1.getPointer()); }
+
operator Stmt*() const { return getStmt(); }
- operator bool() const { return getStmt() != 0; }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == Statement;
+ }
+};
+
+/// CFGInitializer - Represents C++ base or member initializer from
+/// constructor's initialization list.
+class CFGInitializer : public CFGElement {
+public:
+ CFGInitializer() {}
+ CFGInitializer(CXXCtorInitializer* I)
+ : CFGElement(I, Initializer) {}
+
+ CXXCtorInitializer* getInitializer() const {
+ return static_cast<CXXCtorInitializer*>(Data1.getPointer());
+ }
+ operator CXXCtorInitializer*() const { return getInitializer(); }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == Initializer;
+ }
+};
+
+/// CFGImplicitDtor - Represents C++ object destructor implicitly generated
+/// by compiler on various occasions.
+class CFGImplicitDtor : public CFGElement {
+protected:
+ CFGImplicitDtor(unsigned K, void* P, void* S)
+ : CFGElement(P, ImplicitDtor, S, K - DTOR_BEGIN) {}
+
+public:
+ CFGImplicitDtor() {}
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == ImplicitDtor;
+ }
+};
+
+/// CFGAutomaticObjDtor - Represents C++ object destructor implicitly generated
+/// for automatic object or temporary bound to const reference at the point
+/// of leaving its local scope.
+class CFGAutomaticObjDtor: public CFGImplicitDtor {
+public:
+ CFGAutomaticObjDtor() {}
+ CFGAutomaticObjDtor(VarDecl* VD, Stmt* S)
+ : CFGImplicitDtor(AutomaticObjectDtor, VD, S) {}
+
+ VarDecl* getVarDecl() const {
+ return static_cast<VarDecl*>(Data1.getPointer());
+ }
+
+ // Get statement end of which triggered the destructor call.
+ Stmt* getTriggerStmt() const {
+ return static_cast<Stmt*>(Data2.getPointer());
+ }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == ImplicitDtor &&
+ E->getDtorKind() == AutomaticObjectDtor;
+ }
+};
+
+/// CFGBaseDtor - Represents C++ object destructor implicitly generated for
+/// base object in destructor.
+class CFGBaseDtor : public CFGImplicitDtor {
+public:
+ CFGBaseDtor() {}
+ CFGBaseDtor(const CXXBaseSpecifier *BS)
+ : CFGImplicitDtor(BaseDtor, const_cast<CXXBaseSpecifier*>(BS), NULL) {}
+
+ const CXXBaseSpecifier *getBaseSpecifier() const {
+ return static_cast<const CXXBaseSpecifier*>(Data1.getPointer());
+ }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == ImplicitDtor && E->getDtorKind() == BaseDtor;
+ }
+};
+
+/// CFGMemberDtor - Represents C++ object destructor implicitly generated for
+/// member object in destructor.
+class CFGMemberDtor : public CFGImplicitDtor {
+public:
+ CFGMemberDtor() {}
+ CFGMemberDtor(FieldDecl *FD)
+ : CFGImplicitDtor(MemberDtor, FD, NULL) {}
+
+ FieldDecl *getFieldDecl() const {
+ return static_cast<FieldDecl*>(Data1.getPointer());
+ }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == ImplicitDtor && E->getDtorKind() == MemberDtor;
+ }
+};
+
+/// CFGTemporaryDtor - Represents C++ object destructor implicitly generated
+/// at the end of full expression for temporary object.
+class CFGTemporaryDtor : public CFGImplicitDtor {
+public:
+ CFGTemporaryDtor() {}
+ CFGTemporaryDtor(CXXBindTemporaryExpr *E)
+ : CFGImplicitDtor(TemporaryDtor, E, NULL) {}
+
+ CXXBindTemporaryExpr *getBindTemporaryExpr() const {
+ return static_cast<CXXBindTemporaryExpr *>(Data1.getPointer());
+ }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == ImplicitDtor && E->getDtorKind() == TemporaryDtor;
+ }
+};
+
+/// CFGTerminator - Represents CFGBlock terminator statement.
+///
+/// TemporaryDtorsBranch bit is set to true if the terminator marks a branch
+/// in control flow of destructors of temporaries. In this case terminator
+/// statement is the same statement that branches control flow in evaluation
+/// of matching full expression.
+class CFGTerminator {
+ llvm::PointerIntPair<Stmt *, 1> Data;
+public:
+ CFGTerminator() {}
+ CFGTerminator(Stmt *S, bool TemporaryDtorsBranch = false)
+ : Data(S, TemporaryDtorsBranch) {}
+
+ Stmt *getStmt() { return Data.getPointer(); }
+ const Stmt *getStmt() const { return Data.getPointer(); }
+
+ bool isTemporaryDtorsBranch() const { return Data.getInt(); }
+
+ operator Stmt *() { return getStmt(); }
+ operator const Stmt *() const { return getStmt(); }
+
+ Stmt *operator->() { return getStmt(); }
+ const Stmt *operator->() const { return getStmt(); }
+
+ Stmt &operator*() { return *getStmt(); }
+ const Stmt &operator*() const { return *getStmt(); }
+
+ operator bool() const { return getStmt(); }
};
/// CFGBlock - Represents a single basic block in a source-level CFG.
@@ -77,11 +268,11 @@ public:
/// &&, || expression that uses result of && or ||, RHS
///
class CFGBlock {
- class StatementList {
+ class ElementList {
typedef BumpVector<CFGElement> ImplTy;
ImplTy Impl;
public:
- StatementList(BumpVectorContext &C) : Impl(C, 4) {}
+ ElementList(BumpVectorContext &C) : Impl(C, 4) {}
typedef std::reverse_iterator<ImplTy::iterator> iterator;
typedef std::reverse_iterator<ImplTy::const_iterator> const_iterator;
@@ -89,6 +280,11 @@ class CFGBlock {
typedef ImplTy::const_iterator const_reverse_iterator;
void push_back(CFGElement e, BumpVectorContext &C) { Impl.push_back(e, C); }
+ reverse_iterator insert(reverse_iterator I, size_t Cnt, CFGElement E,
+ BumpVectorContext& C) {
+ return Impl.insert(I, Cnt, E, C);
+ }
+
CFGElement front() const { return Impl.back(); }
CFGElement back() const { return Impl.front(); }
@@ -111,7 +307,7 @@ class CFGBlock {
};
/// Stmts - The set of statements in the basic block.
- StatementList Stmts;
+ ElementList Elements;
/// Label - An (optional) label that prefixes the executable
/// statements in the block. When this variable is non-NULL, it is
@@ -121,7 +317,7 @@ class CFGBlock {
/// Terminator - The terminator for a basic block that
/// indicates the type of control-flow that occurs between a block
/// and its successors.
- Stmt *Terminator;
+ CFGTerminator Terminator;
/// LoopTarget - Some blocks are used to represent the "loop edge" to
/// the start of a loop from within the loop body. This Stmt* will be
@@ -140,33 +336,33 @@ class CFGBlock {
public:
explicit CFGBlock(unsigned blockid, BumpVectorContext &C)
- : Stmts(C), Label(NULL), Terminator(NULL), LoopTarget(NULL),
+ : Elements(C), Label(NULL), Terminator(NULL), LoopTarget(NULL),
BlockID(blockid), Preds(C, 1), Succs(C, 1) {}
~CFGBlock() {}
// Statement iterators
- typedef StatementList::iterator iterator;
- typedef StatementList::const_iterator const_iterator;
- typedef StatementList::reverse_iterator reverse_iterator;
- typedef StatementList::const_reverse_iterator const_reverse_iterator;
+ typedef ElementList::iterator iterator;
+ typedef ElementList::const_iterator const_iterator;
+ typedef ElementList::reverse_iterator reverse_iterator;
+ typedef ElementList::const_reverse_iterator const_reverse_iterator;
- CFGElement front() const { return Stmts.front(); }
- CFGElement back() const { return Stmts.back(); }
+ CFGElement front() const { return Elements.front(); }
+ CFGElement back() const { return Elements.back(); }
- iterator begin() { return Stmts.begin(); }
- iterator end() { return Stmts.end(); }
- const_iterator begin() const { return Stmts.begin(); }
- const_iterator end() const { return Stmts.end(); }
+ iterator begin() { return Elements.begin(); }
+ iterator end() { return Elements.end(); }
+ const_iterator begin() const { return Elements.begin(); }
+ const_iterator end() const { return Elements.end(); }
- reverse_iterator rbegin() { return Stmts.rbegin(); }
- reverse_iterator rend() { return Stmts.rend(); }
- const_reverse_iterator rbegin() const { return Stmts.rbegin(); }
- const_reverse_iterator rend() const { return Stmts.rend(); }
+ reverse_iterator rbegin() { return Elements.rbegin(); }
+ reverse_iterator rend() { return Elements.rend(); }
+ const_reverse_iterator rbegin() const { return Elements.rbegin(); }
+ const_reverse_iterator rend() const { return Elements.rend(); }
- unsigned size() const { return Stmts.size(); }
- bool empty() const { return Stmts.empty(); }
+ unsigned size() const { return Elements.size(); }
+ bool empty() const { return Elements.empty(); }
- CFGElement operator[](size_t i) const { return Stmts[i]; }
+ CFGElement operator[](size_t i) const { return Elements[i]; }
// CFG iterators
typedef AdjacentBlocks::iterator pred_iterator;
@@ -205,14 +401,67 @@ public:
unsigned pred_size() const { return Preds.size(); }
bool pred_empty() const { return Preds.empty(); }
+
+ class FilterOptions {
+ public:
+ FilterOptions() {
+ IgnoreDefaultsWithCoveredEnums = 0;
+ }
+
+ unsigned IgnoreDefaultsWithCoveredEnums : 1;
+ };
+
+ static bool FilterEdge(const FilterOptions &F, const CFGBlock *Src,
+ const CFGBlock *Dst);
+
+ template <typename IMPL, bool IsPred>
+ class FilteredCFGBlockIterator {
+ private:
+ IMPL I, E;
+ const FilterOptions F;
+ const CFGBlock *From;
+ public:
+ explicit FilteredCFGBlockIterator(const IMPL &i, const IMPL &e,
+ const CFGBlock *from,
+ const FilterOptions &f)
+ : I(i), E(e), F(f), From(from) {}
+
+ bool hasMore() const { return I != E; }
+
+ FilteredCFGBlockIterator &operator++() {
+ do { ++I; } while (hasMore() && Filter(*I));
+ return *this;
+ }
+
+ const CFGBlock *operator*() const { return *I; }
+ private:
+ bool Filter(const CFGBlock *To) {
+ return IsPred ? FilterEdge(F, To, From) : FilterEdge(F, From, To);
+ }
+ };
+
+ typedef FilteredCFGBlockIterator<const_pred_iterator, true>
+ filtered_pred_iterator;
+
+ typedef FilteredCFGBlockIterator<const_succ_iterator, false>
+ filtered_succ_iterator;
+
+ filtered_pred_iterator filtered_pred_start_end(const FilterOptions &f) const {
+ return filtered_pred_iterator(pred_begin(), pred_end(), this, f);
+ }
+
+ filtered_succ_iterator filtered_succ_start_end(const FilterOptions &f) const {
+ return filtered_succ_iterator(succ_begin(), succ_end(), this, f);
+ }
+
// Manipulation of block contents
void setTerminator(Stmt* Statement) { Terminator = Statement; }
void setLabel(Stmt* Statement) { Label = Statement; }
void setLoopTarget(const Stmt *loopTarget) { LoopTarget = loopTarget; }
- Stmt* getTerminator() { return Terminator; }
- const Stmt* getTerminator() const { return Terminator; }
+ CFGTerminator getTerminator() { return Terminator; }
+ const CFGTerminator getTerminator() const { return Terminator; }
Stmt* getTerminatorCondition();
@@ -239,17 +488,39 @@ public:
Succs.push_back(Block, C);
}
- void appendStmt(Stmt* Statement, BumpVectorContext &C, bool asLValue) {
- Stmts.push_back(CFGElement(Statement, asLValue), C);
- }
- void StartScope(Stmt* S, BumpVectorContext &C) {
- Stmts.push_back(CFGElement(S, CFGElement::StartScope), C);
+ void appendStmt(Stmt* statement, BumpVectorContext &C) {
+ Elements.push_back(CFGStmt(statement), C);
+ }
+
+ void appendInitializer(CXXCtorInitializer *initializer,
+ BumpVectorContext& C) {
+ Elements.push_back(CFGInitializer(initializer), C);
}
- void EndScope(Stmt* S, BumpVectorContext &C) {
- Stmts.push_back(CFGElement(S, CFGElement::EndScope), C);
+
+ void appendBaseDtor(const CXXBaseSpecifier *BS, BumpVectorContext &C) {
+ Elements.push_back(CFGBaseDtor(BS), C);
}
-};
+ void appendMemberDtor(FieldDecl *FD, BumpVectorContext &C) {
+ Elements.push_back(CFGMemberDtor(FD), C);
+ }
+
+ void appendTemporaryDtor(CXXBindTemporaryExpr *E, BumpVectorContext &C) {
+ Elements.push_back(CFGTemporaryDtor(E), C);
+ }
+
+ // Destructors must be inserted in reversed order. So insertion is in two
+ // steps. First we prepare space for some number of elements, then we insert
+ // the elements beginning at the last position in prepared space.
+ iterator beginAutomaticObjDtorsInsert(iterator I, size_t Cnt,
+ BumpVectorContext& C) {
+ return iterator(Elements.insert(I.base(), Cnt, CFGElement(), C));
+ }
+ iterator insertAutomaticObjDtor(iterator I, VarDecl* VD, Stmt* S) {
+ *I = CFGAutomaticObjDtor(VD, S);
+ return ++I;
+ }
+};
/// CFG - Represents a source-level, intra-procedural CFG that represents the
/// control-flow of a Stmt. The Stmt can represent an entire function body,
@@ -264,13 +535,24 @@ public:
// CFG Construction & Manipulation.
//===--------------------------------------------------------------------===//
+ class BuildOptions {
+ public:
+ bool PruneTriviallyFalseEdges:1;
+ bool AddEHEdges:1;
+ bool AddInitializers:1;
+ bool AddImplicitDtors:1;
+
+ BuildOptions()
+ : PruneTriviallyFalseEdges(true)
+ , AddEHEdges(false)
+ , AddInitializers(false)
+ , AddImplicitDtors(false) {}
+ };
+
/// buildCFG - Builds a CFG from an AST. The responsibility to free the
/// constructed CFG belongs to the caller.
static CFG* buildCFG(const Decl *D, Stmt* AST, ASTContext *C,
- bool pruneTriviallyFalseEdges = true,
- bool AddEHEdges = false,
- bool AddScopes = false /* NOT FULLY IMPLEMENTED.
- NOT READY FOR GENERAL USE. */);
+ BuildOptions BO = BuildOptions());
/// createBlock - Create a new block in the CFG. The CFG owns the block;
/// the caller should not directly free it.
@@ -324,8 +606,10 @@ public:
void VisitBlockStmts(CALLBACK& O) const {
for (const_iterator I=begin(), E=end(); I != E; ++I)
for (CFGBlock::const_iterator BI=(*I)->begin(), BE=(*I)->end();
- BI != BE; ++BI)
- O(*BI);
+ BI != BE; ++BI) {
+ if (CFGStmt S = BI->getAs<CFGStmt>())
+ O(S);
+ }
}
//===--------------------------------------------------------------------===//
@@ -340,7 +624,10 @@ public:
operator unsigned() const { assert(Idx >=0); return (unsigned) Idx; }
};
- bool isBlkExpr(const Stmt* S) { return getBlkExprNum(S); }
+ bool isBlkExpr(const Stmt* S) { return getBlkExprNum(S); }
+ bool isBlkExpr(const Stmt *S) const {
+ return const_cast<CFG*>(this)->isBlkExpr(S);
+ }
BlkExprNumTy getBlkExprNum(const Stmt* S);
unsigned getNumBlkExprs();
@@ -398,18 +685,22 @@ private:
namespace llvm {
-/// Implement simplify_type for CFGElement, so that we can dyn_cast from
-/// CFGElement to a specific Stmt class.
-template <> struct simplify_type<const ::clang::CFGElement> {
- typedef ::clang::Stmt* SimpleType;
- static SimpleType getSimplifiedValue(const ::clang::CFGElement &Val) {
+/// Implement simplify_type for CFGTerminator, so that we can dyn_cast from
+/// CFGTerminator to a specific Stmt class.
+template <> struct simplify_type<const ::clang::CFGTerminator> {
+ typedef const ::clang::Stmt *SimpleType;
+ static SimpleType getSimplifiedValue(const ::clang::CFGTerminator &Val) {
return Val.getStmt();
}
};
-
-template <> struct simplify_type< ::clang::CFGElement>
- : public simplify_type<const ::clang::CFGElement> {};
-
+
+template <> struct simplify_type< ::clang::CFGTerminator> {
+ typedef ::clang::Stmt *SimpleType;
+ static SimpleType getSimplifiedValue(const ::clang::CFGTerminator &Val) {
+ return const_cast<SimpleType>(Val.getStmt());
+ }
+};
+
// Traits for: CFGBlock
template <> struct GraphTraits< ::clang::CFGBlock* > {
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/DomainSpecific/CocoaConventions.h b/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h
index 4bbdab0..7e6e381 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/DomainSpecific/CocoaConventions.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h
@@ -11,17 +11,18 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CHECKER_DS_COCOA
-#define LLVM_CLANG_CHECKER_DS_COCOA
+#ifndef LLVM_CLANG_ANALYSIS_DS_COCOA
+#define LLVM_CLANG_ANALYSIS_DS_COCOA
#include "clang/AST/Type.h"
namespace clang {
+namespace ento {
namespace cocoa {
enum NamingConvention { NoConvention, CreateRule, InitRule };
- NamingConvention deriveNamingConvention(Selector S);
+ NamingConvention deriveNamingConvention(Selector S, bool ignorePrefix = true);
static inline bool followsFundamentalRule(Selector S) {
return deriveNamingConvention(S) == CreateRule;
@@ -34,6 +35,6 @@ namespace cocoa {
bool isCocoaObjectRef(QualType T);
-}}
+}}}
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowSolver.h b/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowSolver.h
index 9375db0..d75d333 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowSolver.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowSolver.h
@@ -273,8 +273,13 @@ private:
void ProcessBlock(const CFGBlock* B, bool recordStmtValues,
dataflow::forward_analysis_tag) {
- for (StmtItr I=ItrTraits::StmtBegin(B), E=ItrTraits::StmtEnd(B); I!=E;++I)
- ProcessStmt(*I, recordStmtValues, AnalysisDirTag());
+ TF.setCurrentBlock(B);
+
+ for (StmtItr I=ItrTraits::StmtBegin(B), E=ItrTraits::StmtEnd(B); I!=E;++I) {
+ CFGElement El = *I;
+ if (CFGStmt S = El.getAs<CFGStmt>())
+ ProcessStmt(S, recordStmtValues, AnalysisDirTag());
+ }
TF.VisitTerminator(const_cast<CFGBlock*>(B));
}
@@ -282,10 +287,15 @@ private:
void ProcessBlock(const CFGBlock* B, bool recordStmtValues,
dataflow::backward_analysis_tag) {
+ TF.setCurrentBlock(B);
+
TF.VisitTerminator(const_cast<CFGBlock*>(B));
- for (StmtItr I=ItrTraits::StmtBegin(B), E=ItrTraits::StmtEnd(B); I!=E;++I)
- ProcessStmt(*I, recordStmtValues, AnalysisDirTag());
+ for (StmtItr I=ItrTraits::StmtBegin(B), E=ItrTraits::StmtEnd(B); I!=E;++I) {
+ CFGElement El = *I;
+ if (CFGStmt S = El.getAs<CFGStmt>())
+ ProcessStmt(S, recordStmtValues, AnalysisDirTag());
+ }
}
void ProcessStmt(const Stmt* S, bool record, dataflow::forward_analysis_tag) {
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
index ba303de..54cfc3d 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
@@ -17,7 +17,7 @@
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/Casting.h"
@@ -44,6 +44,7 @@ public:
PostPurgeDeadSymbolsKind,
PostStmtCustomKind,
PostLValueKind,
+ PostInitializerKind,
CallEnterKind,
CallExitKind,
MinPostStmtKind = PostStmtKind,
@@ -70,11 +71,12 @@ protected:
protected:
const void* getData1() const { return Data.first; }
const void* getData2() const { return Data.second; }
- const void *getTag() const { return Tag; }
public:
Kind getKind() const { return K; }
+ const void *getTag() const { return Tag; }
+
const LocationContext *getLocationContext() const { return L; }
// For use with DenseMap. This hash is probably slow.
@@ -118,10 +120,12 @@ public:
return B->empty() ? CFGElement() : B->front();
}
- const Stmt *getFirstStmt() const {
- return getFirstElement().getStmt();
+ /// Create a new BlockEntrance object that is the same as the original
+ /// except for using the specified tag value.
+ BlockEntrance withTag(const void *tag) {
+ return BlockEntrance(getBlock(), getLocationContext(), tag);
}
-
+
static bool classof(const ProgramPoint* Location) {
return Location->getKind() == BlockEntranceKind;
}
@@ -136,11 +140,6 @@ public:
return reinterpret_cast<const CFGBlock*>(getData1());
}
- const Stmt* getLastStmt() const {
- const CFGBlock* B = getBlock();
- return B->empty() ? CFGElement() : B->back();
- }
-
const Stmt* getTerminator() const {
return getBlock()->getTerminator();
}
@@ -183,14 +182,15 @@ public:
class PostStmt : public StmtPoint {
protected:
- PostStmt(const Stmt* S, Kind k, const LocationContext *L, const void *tag = 0)
- : StmtPoint(S, NULL, k, L, tag) {}
-
PostStmt(const Stmt* S, const void* data, Kind k, const LocationContext *L,
const void *tag =0)
: StmtPoint(S, data, k, L, tag) {}
public:
+ explicit PostStmt(const Stmt* S, Kind k,
+ const LocationContext *L, const void *tag = 0)
+ : StmtPoint(S, NULL, k, L, tag) {}
+
explicit PostStmt(const Stmt* S, const LocationContext *L,const void *tag = 0)
: StmtPoint(S, NULL, PostStmtKind, L, tag) {}
@@ -313,19 +313,29 @@ public:
}
};
+class PostInitializer : public ProgramPoint {
+public:
+ PostInitializer(const CXXCtorInitializer *I,
+ const LocationContext *L)
+ : ProgramPoint(I, PostInitializerKind, L) {}
+
+ static bool classof(const ProgramPoint *Location) {
+ return Location->getKind() == PostInitializerKind;
+ }
+};
+
class CallEnter : public StmtPoint {
public:
- // L is caller's location context. AC is callee's AnalysisContext.
- CallEnter(const Stmt *S, const AnalysisContext *AC, const LocationContext *L)
- : StmtPoint(S, AC, CallEnterKind, L, 0) {}
+ CallEnter(const Stmt *stmt, const StackFrameContext *calleeCtx,
+ const LocationContext *callerCtx)
+ : StmtPoint(stmt, calleeCtx, CallEnterKind, callerCtx, 0) {}
const Stmt *getCallExpr() const {
return static_cast<const Stmt *>(getData1());
}
- AnalysisContext *getCalleeContext() const {
- return const_cast<AnalysisContext *>(
- static_cast<const AnalysisContext *>(getData2()));
+ const StackFrameContext *getCalleeContext() const {
+ return static_cast<const StackFrameContext *>(getData2());
}
static bool classof(const ProgramPoint *Location) {
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h b/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h
index 7cd4812..83532e6 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h
@@ -24,6 +24,7 @@
#include "llvm/ADT/PointerIntPair.h"
#include <algorithm>
#include <cstring>
+#include <iterator>
#include <memory>
namespace clang {
@@ -155,7 +156,25 @@ public:
grow(C);
goto Retry;
}
-
+
+ /// insert - Insert some number of copies of element into a position. Return
+ /// iterator to position after last inserted copy.
+ iterator insert(iterator I, size_t Cnt, const_reference E,
+ BumpVectorContext &C) {
+ assert (I >= Begin && I <= End && "Iterator out of bounds.");
+ if (End + Cnt <= Capacity) {
+ Retry:
+ move_range_right(I, End, Cnt);
+ construct_range(I, I + Cnt, E);
+ End += Cnt;
+ return I + Cnt;
+ }
+ ptrdiff_t D = I - Begin;
+ grow(C, size() + Cnt);
+ I = Begin + D;
+ goto Retry;
+ }
+
void reserve(BumpVectorContext &C, unsigned N) {
if (unsigned(Capacity-Begin) < N)
grow(C, N);
@@ -181,6 +200,14 @@ private:
E->~T();
}
}
+
+ void move_range_right(T *S, T *E, size_t D) {
+ for (T *I = E + D - 1, *IL = S + D - 1; I != IL; --I) {
+ --E;
+ new (I) T(*E);
+ E->~T();
+ }
+ }
};
// Define this out-of-line to dissuade the C++ compiler from inlining it.
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
index f20a49a..95f4ace 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
@@ -66,6 +66,8 @@ public:
DISPATCH_CASE(Record) // FIXME: Refine. VisitStructDecl?
DISPATCH_CASE(CXXRecord)
DISPATCH_CASE(Enum)
+ DISPATCH_CASE(UsingDirective)
+ DISPATCH_CASE(Using)
default:
assert(false && "Subtype of ScopedDecl not handled.");
}
@@ -85,6 +87,8 @@ public:
DEFAULT_DISPATCH(ObjCMethod)
DEFAULT_DISPATCH(ObjCProtocol)
DEFAULT_DISPATCH(ObjCCategory)
+ DEFAULT_DISPATCH(UsingDirective)
+ DEFAULT_DISPATCH(Using)
void VisitCXXRecordDecl(CXXRecordDecl *D) {
static_cast<ImplClass*>(this)->VisitRecordDecl(D);
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtVisitor.h b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtVisitor.h
index 75a4ac6..bb7cf0b 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtVisitor.h
@@ -26,6 +26,11 @@ public:
static_cast< ImplClass* >(this)->VisitChildren(S);
}
+ void VisitCompoundStmt(CompoundStmt *S) {
+ // Do nothing. Everything in a CompoundStmt is inlined
+ // into the CFG.
+ }
+
void VisitConditionVariableInit(Stmt *S) {
assert(S == this->getCurrentBlkStmt());
VarDecl *CondVar = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGStmtVisitor.h b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGStmtVisitor.h
index 6421f18..d197e69 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGStmtVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGStmtVisitor.h
@@ -80,6 +80,7 @@ public:
DISPATCH_CASE(StmtExpr)
DISPATCH_CASE(ConditionalOperator)
+ DISPATCH_CASE(BinaryConditionalOperator)
DISPATCH_CASE(ObjCForCollectionStmt)
case Stmt::BinaryOperatorClass: {
@@ -102,6 +103,7 @@ public:
DEFAULT_BLOCKSTMT_VISIT(StmtExpr)
DEFAULT_BLOCKSTMT_VISIT(ConditionalOperator)
+ DEFAULT_BLOCKSTMT_VISIT(BinaryConditionalOperator)
RetTy BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
return static_cast<ImplClass*>(this)->BlockStmt_VisitStmt(S);
@@ -155,7 +157,7 @@ public:
}
}
- for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I != E;++I)
+ for (Stmt::child_range I = S->children(); I; ++I)
if (*I) static_cast<ImplClass*>(this)->Visit(*I);
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ABI.h b/contrib/llvm/tools/clang/include/clang/Basic/ABI.h
new file mode 100644
index 0000000..018f500
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ABI.h
@@ -0,0 +1,126 @@
+//===----- ABI.h - ABI related declarations ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These enums/classes describe ABI related information about constructors,
+// destructors and thunks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_BASIC_ABI_H
+#define CLANG_BASIC_ABI_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace clang {
+
+/// CXXCtorType - C++ constructor types
+enum CXXCtorType {
+ Ctor_Complete, // Complete object ctor
+ Ctor_Base, // Base object ctor
+ Ctor_CompleteAllocating // Complete object allocating ctor
+};
+
+/// CXXDtorType - C++ destructor types
+enum CXXDtorType {
+ Dtor_Deleting, // Deleting dtor
+ Dtor_Complete, // Complete object dtor
+ Dtor_Base // Base object dtor
+};
+
+/// ReturnAdjustment - A return adjustment.
+struct ReturnAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
+ /// of the virtual base class offset.
+ int64_t VBaseOffsetOffset;
+
+ ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; }
+
+ friend bool operator==(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset;
+ }
+
+ friend bool operator<(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset < RHS.VBaseOffsetOffset;
+ }
+};
+
+/// ThisAdjustment - A 'this' pointer adjustment.
+struct ThisAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
+ /// of the virtual call offset.
+ int64_t VCallOffsetOffset;
+
+ ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
+
+ friend bool operator==(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset == RHS.VCallOffsetOffset;
+ }
+
+ friend bool operator<(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset < RHS.VCallOffsetOffset;
+ }
+};
+
+/// ThunkInfo - The 'this' pointer adjustment as well as an optional return
+/// adjustment for a thunk.
+struct ThunkInfo {
+ /// This - The 'this' pointer adjustment.
+ ThisAdjustment This;
+
+ /// Return - The return adjustment.
+ ReturnAdjustment Return;
+
+ ThunkInfo() { }
+
+ ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return)
+ : This(This), Return(Return) { }
+
+ friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ return LHS.This == RHS.This && LHS.Return == RHS.Return;
+ }
+
+ friend bool operator<(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ if (LHS.This < RHS.This)
+ return true;
+
+ return LHS.This == RHS.This && LHS.Return < RHS.Return;
+ }
+
+ bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); }
+};
+
+} // end namespace clang
+
+#endif // CLANG_BASIC_ABI_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
index 2f2267f..3e62d41 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
@@ -89,171 +89,224 @@ class Attr {
code AdditionalMembers = [{}];
}
+class InheritableAttr : Attr;
+
//
// Attributes begin here
//
-def Alias : Attr {
+def Alias : InheritableAttr {
let Spellings = ["alias"];
let Args = [StringArgument<"Aliasee">];
}
-def Aligned : Attr {
+def Aligned : InheritableAttr {
let Spellings = ["align", "aligned"];
let Subjects = [NonBitField, NormalVar, Tag];
let Args = [AlignedArgument<"Alignment">];
let Namespaces = ["", "std"];
}
-def AlignMac68k : Attr {
+def AlignMac68k : InheritableAttr {
let Spellings = [];
}
-def AlwaysInline : Attr {
+def AlwaysInline : InheritableAttr {
let Spellings = ["always_inline"];
}
-def AnalyzerNoReturn : Attr {
+def AnalyzerNoReturn : InheritableAttr {
let Spellings = ["analyzer_noreturn"];
}
-def Annotate : Attr {
+def Annotate : InheritableAttr {
let Spellings = ["annotate"];
let Args = [StringArgument<"Annotation">];
}
-def AsmLabel : Attr {
+def AsmLabel : InheritableAttr {
let Spellings = [];
let Args = [StringArgument<"Label">];
}
-def BaseCheck : Attr {
- let Spellings = ["base_check"];
- let Subjects = [CXXRecord];
- let Namespaces = ["", "std"];
-}
-
-def Blocks : Attr {
+def Blocks : InheritableAttr {
let Spellings = ["blocks"];
let Args = [EnumArgument<"Type", "BlockType", ["byref"], ["ByRef"]>];
}
-def CarriesDependency : Attr {
+def CarriesDependency : InheritableAttr {
let Spellings = ["carries_dependency"];
let Subjects = [ParmVar, Function];
let Namespaces = ["", "std"];
}
-def CDecl : Attr {
+def CDecl : InheritableAttr {
let Spellings = ["cdecl", "__cdecl"];
}
-def CFReturnsRetained : Attr {
+def CFReturnsRetained : InheritableAttr {
let Spellings = ["cf_returns_retained"];
+ let Subjects = [ObjCMethod, Function];
}
-def CFReturnsNotRetained : Attr {
+def CFReturnsNotRetained : InheritableAttr {
let Spellings = ["cf_returns_not_retained"];
+ let Subjects = [ObjCMethod, Function];
+}
+
+def CFConsumed : InheritableAttr {
+ let Spellings = ["cf_consumed"];
+ let Subjects = [ParmVar];
}
-def Cleanup : Attr {
+def Cleanup : InheritableAttr {
let Spellings = ["cleanup"];
let Args = [FunctionArgument<"FunctionDecl">];
}
-def Const : Attr {
+def Common : InheritableAttr {
+ let Spellings = ["common"];
+}
+
+def Const : InheritableAttr {
let Spellings = ["const"];
}
-def Constructor : Attr {
+def Constructor : InheritableAttr {
let Spellings = ["constructor"];
let Args = [IntArgument<"Priority">];
}
-def Deprecated : Attr {
+def CUDAConstant : InheritableAttr {
+ let Spellings = ["constant"];
+}
+
+def CUDADevice : Attr {
+ let Spellings = ["device"];
+}
+
+def CUDAGlobal : InheritableAttr {
+ let Spellings = ["global"];
+}
+
+def CUDAHost : Attr {
+ let Spellings = ["host"];
+}
+
+def CUDALaunchBounds : InheritableAttr {
+ let Spellings = ["launch_bounds"];
+ let Args = [IntArgument<"MaxThreads">, DefaultIntArgument<"MinBlocks", 0>];
+}
+
+def CUDAShared : InheritableAttr {
+ let Spellings = ["shared"];
+}
+
+def OpenCLKernel : Attr {
+ let Spellings = ["opencl_kernel_function"];
+}
+
+def Deprecated : InheritableAttr {
let Spellings = ["deprecated"];
+ let Args = [StringArgument<"Message">];
}
-def Destructor : Attr {
+def Destructor : InheritableAttr {
let Spellings = ["destructor"];
let Args = [IntArgument<"Priority">];
}
-def DLLExport : Attr {
+def DLLExport : InheritableAttr {
let Spellings = ["dllexport"];
}
-def DLLImport : Attr {
+def DLLImport : InheritableAttr {
let Spellings = ["dllimport"];
}
-def FastCall : Attr {
+def Explicit : InheritableAttr {
+ let Spellings = [];
+}
+
+def FastCall : InheritableAttr {
let Spellings = ["fastcall", "__fastcall"];
}
-def Final : Attr {
- let Spellings = ["final"];
- let Subjects = [CXXRecord, CXXVirtualMethod];
- let Namespaces = ["", "std"];
+def Final : InheritableAttr {
+ let Spellings = [];
}
-def Format : Attr {
+def Format : InheritableAttr {
let Spellings = ["format"];
let Args = [StringArgument<"Type">, IntArgument<"FormatIdx">,
IntArgument<"FirstArg">];
}
-def FormatArg : Attr {
+def FormatArg : InheritableAttr {
let Spellings = ["format_arg"];
let Args = [IntArgument<"FormatIdx">];
}
-def GNUInline : Attr {
+def GNUInline : InheritableAttr {
let Spellings = ["gnu_inline"];
}
-def Hiding : Attr {
- let Spellings = ["hiding"];
- let Subjects = [Field, CXXMethod];
- let Namespaces = ["", "std"];
-}
-
-def IBAction : Attr {
+def IBAction : InheritableAttr {
let Spellings = ["ibaction"];
}
-def IBOutlet : Attr {
+def IBOutlet : InheritableAttr {
let Spellings = ["iboutlet"];
}
-def IBOutletCollection : Attr {
+def IBOutletCollection : InheritableAttr {
let Spellings = ["iboutletcollection"];
let Args = [TypeArgument<"Interface">];
}
-def Malloc : Attr {
+def Malloc : InheritableAttr {
let Spellings = ["malloc"];
}
-def MaxFieldAlignment : Attr {
+def MaxFieldAlignment : InheritableAttr {
let Spellings = [];
let Args = [UnsignedArgument<"Alignment">];
}
-def MSP430Interrupt : Attr {
+def MayAlias : InheritableAttr {
+ let Spellings = ["may_alias"];
+}
+
+def MSP430Interrupt : InheritableAttr {
let Spellings = [];
let Args = [UnsignedArgument<"Number">];
}
-def NoDebug : Attr {
+def MBlazeInterruptHandler : InheritableAttr {
+ let Spellings = [];
+}
+
+def MBlazeSaveVolatiles : InheritableAttr {
+ let Spellings = [];
+}
+
+def Naked : InheritableAttr {
+ let Spellings = ["naked"];
+}
+
+def NoCommon : InheritableAttr {
+ let Spellings = ["nocommon"];
+}
+
+def NoDebug : InheritableAttr {
let Spellings = ["nodebug"];
}
-def NoInline : Attr {
+def NoInline : InheritableAttr {
let Spellings = ["noinline"];
}
-def NonNull : Attr {
+def NonNull : InheritableAttr {
let Spellings = ["nonnull"];
let Args = [VariadicUnsignedArgument<"Args">];
let AdditionalMembers =
@@ -266,49 +319,64 @@ def NonNull : Attr {
} }];
}
-def NoReturn : Attr {
+def NoReturn : InheritableAttr {
let Spellings = ["noreturn"];
// FIXME: Does GCC allow this on the function instead?
let Subjects = [Function];
let Namespaces = ["", "std"];
}
-def NoInstrumentFunction : Attr {
+def NoInstrumentFunction : InheritableAttr {
let Spellings = ["no_instrument_function"];
let Subjects = [Function];
}
-def NoThrow : Attr {
+def NoThrow : InheritableAttr {
let Spellings = ["nothrow"];
}
-def NSReturnsRetained : Attr {
+def NSReturnsRetained : InheritableAttr {
let Spellings = ["ns_returns_retained"];
+ let Subjects = [ObjCMethod, Function];
}
-def NSReturnsNotRetained : Attr {
+def NSReturnsNotRetained : InheritableAttr {
let Spellings = ["ns_returns_not_retained"];
+ let Subjects = [ObjCMethod, Function];
}
-def ObjCException : Attr {
- let Spellings = ["objc_exception"];
+def NSReturnsAutoreleased : InheritableAttr {
+ let Spellings = ["ns_returns_autoreleased"];
+ let Subjects = [ObjCMethod, Function];
}
-def ObjCNSObject : Attr {
- let Spellings = ["NSOjbect"];
+def NSConsumesSelf : InheritableAttr {
+ let Spellings = ["ns_consumes_self"];
+ let Subjects = [ObjCMethod];
}
-def Override : Attr {
- let Spellings = ["override"];
- let Subjects = [CXXVirtualMethod];
- let Namespaces = ["", "std"];
+def NSConsumed : InheritableAttr {
+ let Spellings = ["ns_consumed"];
+ let Subjects = [ParmVar];
+}
+
+def ObjCException : InheritableAttr {
+ let Spellings = ["objc_exception"];
+}
+
+def ObjCNSObject : InheritableAttr {
+ let Spellings = ["NSObject"];
}
def Overloadable : Attr {
let Spellings = ["overloadable"];
}
-def Ownership : Attr {
+def Override : InheritableAttr {
+ let Spellings = [];
+}
+
+def Ownership : InheritableAttr {
let Spellings = ["ownership_holds", "ownership_returns", "ownership_takes"];
let Args = [EnumArgument<"OwnKind", "OwnershipKind",
["ownership_holds", "ownership_returns", "ownership_takes"],
@@ -316,97 +384,104 @@ def Ownership : Attr {
StringArgument<"Module">, VariadicUnsignedArgument<"Args">];
}
-def Packed : Attr {
+def Packed : InheritableAttr {
let Spellings = ["packed"];
}
-def Pure : Attr {
+def Pure : InheritableAttr {
let Spellings = ["pure"];
}
-def Regparm : Attr {
+def Regparm : InheritableAttr {
let Spellings = ["regparm"];
let Args = [UnsignedArgument<"NumParams">];
}
-def ReqdWorkGroupSize : Attr {
+def ReqdWorkGroupSize : InheritableAttr {
let Spellings = ["reqd_work_group_size"];
let Args = [UnsignedArgument<"XDim">, UnsignedArgument<"YDim">,
UnsignedArgument<"ZDim">];
}
-def InitPriority : Attr {
+def InitPriority : InheritableAttr {
let Spellings = ["init_priority"];
let Args = [UnsignedArgument<"Priority">];
}
-def Section : Attr {
+def Section : InheritableAttr {
let Spellings = ["section"];
let Args = [StringArgument<"Name">];
}
-def Sentinel : Attr {
+def Sentinel : InheritableAttr {
let Spellings = ["sentinel"];
let Args = [DefaultIntArgument<"Sentinel", 0>,
DefaultIntArgument<"NullPos", 0>];
}
-def StdCall : Attr {
+def StdCall : InheritableAttr {
let Spellings = ["stdcall", "__stdcall"];
}
-def ThisCall : Attr {
+def ThisCall : InheritableAttr {
let Spellings = ["thiscall", "__thiscall"];
}
-def Pascal : Attr {
+def Pascal : InheritableAttr {
let Spellings = ["pascal", "__pascal"];
}
-def TransparentUnion : Attr {
+def TransparentUnion : InheritableAttr {
let Spellings = ["transparent_union"];
}
-def Unavailable : Attr {
+def Unavailable : InheritableAttr {
let Spellings = ["unavailable"];
+ let Args = [StringArgument<"Message">];
}
-def Unused : Attr {
+def Unused : InheritableAttr {
let Spellings = ["unused"];
}
-def Used : Attr {
+def Used : InheritableAttr {
let Spellings = ["used"];
}
-def Visibility : Attr {
+def Uuid : InheritableAttr {
+ let Spellings = ["uuid"];
+ let Args = [StringArgument<"Guid">];
+ let Subjects = [CXXRecord];
+}
+
+def Visibility : InheritableAttr {
let Spellings = ["visibility"];
let Args = [EnumArgument<"Visibility", "VisibilityType",
["default", "hidden", "internal", "protected"],
["Default", "Hidden", "Hidden", "Protected"]>];
}
-def VecReturn : Attr {
+def VecReturn : InheritableAttr {
let Spellings = ["vecreturn"];
let Subjects = [CXXRecord];
}
-def WarnUnusedResult : Attr {
+def WarnUnusedResult : InheritableAttr {
let Spellings = ["warn_unused_result"];
}
-def Weak : Attr {
+def Weak : InheritableAttr {
let Spellings = ["weak"];
}
-def WeakImport : Attr {
+def WeakImport : InheritableAttr {
let Spellings = ["weak_import"];
}
-def WeakRef : Attr {
+def WeakRef : InheritableAttr {
let Spellings = ["weakref"];
}
-def X86ForceAlignArgPointer : Attr {
+def X86ForceAlignArgPointer : InheritableAttr {
let Spellings = [];
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
index 822573b..65c4f98 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
@@ -21,6 +21,7 @@ namespace attr {
// Kind - This is a list of all the recognized kinds of attributes.
enum Kind {
#define ATTR(X) X,
+#define LAST_INHERITABLE_ATTR(X) X, LAST_INHERITABLE = X,
#include "clang/Basic/AttrList.inc"
NUM_ATTRS
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
index 0da8938..7d270ad 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
@@ -29,6 +29,8 @@
// d -> double
// z -> size_t
// F -> constant CFString
+// G -> id
+// H -> SEL
// a -> __builtin_va_list
// A -> "reference" to __builtin_va_list
// V -> Vector, following num elements and a base type.
@@ -38,12 +40,13 @@
// SJ -> sigjmp_buf
// . -> "...". This may only occur at the end of the function list.
//
-// Types maybe prefixed with the following modifiers:
+// Types may be prefixed with the following modifiers:
// L -> long (e.g. Li for 'long int')
// LL -> long long
// LLL -> __int128_t (e.g. LLLi)
// S -> signed
// U -> unsigned
+// I -> Required to constant fold to an integer constant expression.
//
// Types may be postfixed with the following modifiers:
// * -> pointer (optionally followed by an address space number)
@@ -75,7 +78,7 @@
// FIXME: gcc has nonnull
#if defined(BUILTIN) && !defined(LIBBUILTIN)
-# define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) BUILTIN(ID, TYPE, ATTRS)
+# define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, BUILTIN_LANG) BUILTIN(ID, TYPE, ATTRS)
#endif
// Standard libc/libm functions:
@@ -124,12 +127,24 @@ BUILTIN(__builtin_powl, "LdLdLd", "Fnc")
BUILTIN(__builtin_acos , "dd" , "Fnc")
BUILTIN(__builtin_acosf, "ff" , "Fnc")
BUILTIN(__builtin_acosl, "LdLd", "Fnc")
+BUILTIN(__builtin_acosh , "dd" , "Fnc")
+BUILTIN(__builtin_acoshf, "ff" , "Fnc")
+BUILTIN(__builtin_acoshl, "LdLd", "Fnc")
BUILTIN(__builtin_asin , "dd" , "Fnc")
BUILTIN(__builtin_asinf, "ff" , "Fnc")
BUILTIN(__builtin_asinl, "LdLd", "Fnc")
+BUILTIN(__builtin_asinh , "dd" , "Fnc")
+BUILTIN(__builtin_asinhf, "ff" , "Fnc")
+BUILTIN(__builtin_asinhl, "LdLd", "Fnc")
BUILTIN(__builtin_atan , "dd" , "Fnc")
BUILTIN(__builtin_atanf, "ff" , "Fnc")
BUILTIN(__builtin_atanl, "LdLd", "Fnc")
+BUILTIN(__builtin_atanh , "dd", "Fnc")
+BUILTIN(__builtin_atanhf, "ff", "Fnc")
+BUILTIN(__builtin_atanhl, "LdLd", "Fnc")
+BUILTIN(__builtin_cbrt , "dd", "Fnc")
+BUILTIN(__builtin_cbrtf, "ff", "Fnc")
+BUILTIN(__builtin_cbrtl, "LdLd", "Fnc")
BUILTIN(__builtin_ceil , "dd" , "Fnc")
BUILTIN(__builtin_ceilf, "ff" , "Fnc")
BUILTIN(__builtin_ceill, "LdLd", "Fnc")
@@ -139,21 +154,99 @@ BUILTIN(__builtin_cosh , "dd" , "Fnc")
BUILTIN(__builtin_coshf, "ff" , "Fnc")
BUILTIN(__builtin_coshl, "LdLd", "Fnc")
BUILTIN(__builtin_cosl, "LdLd", "Fnc")
+BUILTIN(__builtin_erf , "dd", "Fnc")
+BUILTIN(__builtin_erff, "ff", "Fnc")
+BUILTIN(__builtin_erfl, "LdLd", "Fnc")
+BUILTIN(__builtin_erfc , "dd", "Fnc")
+BUILTIN(__builtin_erfcf, "ff", "Fnc")
+BUILTIN(__builtin_erfcl, "LdLd", "Fnc")
BUILTIN(__builtin_exp , "dd" , "Fnc")
BUILTIN(__builtin_expf, "ff" , "Fnc")
BUILTIN(__builtin_expl, "LdLd", "Fnc")
+BUILTIN(__builtin_exp2 , "dd" , "Fnc")
+BUILTIN(__builtin_exp2f, "ff" , "Fnc")
+BUILTIN(__builtin_exp2l, "LdLd", "Fnc")
+BUILTIN(__builtin_expm1 , "dd", "Fnc")
+BUILTIN(__builtin_expm1f, "ff", "Fnc")
+BUILTIN(__builtin_expm1l, "LdLd", "Fnc")
+BUILTIN(__builtin_fdim, "ddd", "Fnc")
+BUILTIN(__builtin_fdimf, "fff", "Fnc")
+BUILTIN(__builtin_fdiml, "LdLdLd", "Fnc")
BUILTIN(__builtin_floor , "dd" , "Fnc")
BUILTIN(__builtin_floorf, "ff" , "Fnc")
BUILTIN(__builtin_floorl, "LdLd", "Fnc")
+BUILTIN(__builtin_fma, "dddd", "Fnc")
+BUILTIN(__builtin_fmaf, "ffff", "Fnc")
+BUILTIN(__builtin_fmal, "LdLdLdLd", "Fnc")
+BUILTIN(__builtin_fmax, "ddd", "Fnc")
+BUILTIN(__builtin_fmaxf, "fff", "Fnc")
+BUILTIN(__builtin_fmaxl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_fmin, "ddd", "Fnc")
+BUILTIN(__builtin_fminf, "fff", "Fnc")
+BUILTIN(__builtin_fminl, "LdLdLd", "Fnc")
BUILTIN(__builtin_hypot , "ddd" , "Fnc")
BUILTIN(__builtin_hypotf, "fff" , "Fnc")
BUILTIN(__builtin_hypotl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_ilogb , "id", "Fnc")
+BUILTIN(__builtin_ilogbf, "if", "Fnc")
+BUILTIN(__builtin_ilogbl, "iLd", "Fnc")
+BUILTIN(__builtin_lgamma , "dd", "Fnc")
+BUILTIN(__builtin_lgammaf, "ff", "Fnc")
+BUILTIN(__builtin_lgammal, "LdLd", "Fnc")
+BUILTIN(__builtin_llrint, "LLid", "Fnc")
+BUILTIN(__builtin_llrintf, "LLif", "Fnc")
+BUILTIN(__builtin_llrintl, "LLiLd", "Fnc")
+BUILTIN(__builtin_llround , "LLid", "Fnc")
+BUILTIN(__builtin_llroundf, "LLif", "Fnc")
+BUILTIN(__builtin_llroundl, "LLiLd", "Fnc")
BUILTIN(__builtin_log , "dd" , "Fnc")
BUILTIN(__builtin_log10 , "dd" , "Fnc")
BUILTIN(__builtin_log10f, "ff" , "Fnc")
BUILTIN(__builtin_log10l, "LdLd", "Fnc")
+BUILTIN(__builtin_log1p , "dd" , "Fnc")
+BUILTIN(__builtin_log1pf, "ff" , "Fnc")
+BUILTIN(__builtin_log1pl, "LdLd", "Fnc")
+BUILTIN(__builtin_log2, "dd" , "Fnc")
+BUILTIN(__builtin_log2f, "ff" , "Fnc")
+BUILTIN(__builtin_log2l, "LdLd" , "Fnc")
+BUILTIN(__builtin_logb , "dd", "Fnc")
+BUILTIN(__builtin_logbf, "ff", "Fnc")
+BUILTIN(__builtin_logbl, "LdLd", "Fnc")
BUILTIN(__builtin_logf, "ff" , "Fnc")
BUILTIN(__builtin_logl, "LdLd", "Fnc")
+BUILTIN(__builtin_lrint , "Lid", "Fnc")
+BUILTIN(__builtin_lrintf, "Lif", "Fnc")
+BUILTIN(__builtin_lrintl, "LiLd", "Fnc")
+BUILTIN(__builtin_lround , "Lid", "Fnc")
+BUILTIN(__builtin_lroundf, "Lif", "Fnc")
+BUILTIN(__builtin_lroundl, "LiLd", "Fnc")
+BUILTIN(__builtin_nearbyint , "dd", "Fnc")
+BUILTIN(__builtin_nearbyintf, "ff", "Fnc")
+BUILTIN(__builtin_nearbyintl, "LdLd", "Fnc")
+BUILTIN(__builtin_nextafter , "ddd", "Fnc")
+BUILTIN(__builtin_nextafterf, "fff", "Fnc")
+BUILTIN(__builtin_nextafterl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_nexttoward , "ddd", "Fnc")
+BUILTIN(__builtin_nexttowardf, "fff", "Fnc")
+BUILTIN(__builtin_nexttowardl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_remainder , "ddd", "Fnc")
+BUILTIN(__builtin_remainderf, "fff", "Fnc")
+BUILTIN(__builtin_remainderl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_remquo , "dddi*", "Fnc")
+BUILTIN(__builtin_remquof, "fffi*", "Fnc")
+BUILTIN(__builtin_remquol, "LdLdLdi*", "Fnc")
+BUILTIN(__builtin_rint , "dd", "Fnc")
+BUILTIN(__builtin_rintf, "ff", "Fnc")
+BUILTIN(__builtin_rintl, "LdLd", "Fnc")
+BUILTIN(__builtin_round, "dd" , "Fnc")
+BUILTIN(__builtin_roundf, "ff" , "Fnc")
+BUILTIN(__builtin_roundl, "LdLd" , "Fnc")
+BUILTIN(__builtin_scalbln , "ddLi", "Fnc")
+BUILTIN(__builtin_scalblnf, "ffLi", "Fnc")
+BUILTIN(__builtin_scalblnl, "LdLdLi", "Fnc")
+BUILTIN(__builtin_scalbn , "ddi", "Fnc")
+BUILTIN(__builtin_scalbnf, "ffi", "Fnc")
+BUILTIN(__builtin_scalbnl, "LdLdi", "Fnc")
BUILTIN(__builtin_sin , "dd" , "Fnc")
BUILTIN(__builtin_sinf, "ff" , "Fnc")
BUILTIN(__builtin_sinh , "dd" , "Fnc")
@@ -169,6 +262,12 @@ BUILTIN(__builtin_tanh , "dd" , "Fnc")
BUILTIN(__builtin_tanhf, "ff" , "Fnc")
BUILTIN(__builtin_tanhl, "LdLd", "Fnc")
BUILTIN(__builtin_tanl, "LdLd", "Fnc")
+BUILTIN(__builtin_tgamma , "dd", "Fnc")
+BUILTIN(__builtin_tgammaf, "ff", "Fnc")
+BUILTIN(__builtin_tgammal, "LdLd", "Fnc")
+BUILTIN(__builtin_trunc , "dd", "Fnc")
+BUILTIN(__builtin_truncf, "ff", "Fnc")
+BUILTIN(__builtin_truncl, "LdLd", "Fnc")
// C99 complex builtins
BUILTIN(__builtin_cabs, "dXd", "Fnc")
@@ -176,15 +275,24 @@ BUILTIN(__builtin_cabsf, "fXf", "Fnc")
BUILTIN(__builtin_cabsl, "LdXLd", "Fnc")
BUILTIN(__builtin_cacos, "XdXd", "Fnc")
BUILTIN(__builtin_cacosf, "XfXf", "Fnc")
+BUILTIN(__builtin_cacosh, "XdXd", "Fnc")
+BUILTIN(__builtin_cacoshf, "XfXf", "Fnc")
+BUILTIN(__builtin_cacoshl, "XLdXLd", "Fnc")
BUILTIN(__builtin_cacosl, "XLdXLd", "Fnc")
BUILTIN(__builtin_carg, "dXd", "Fnc")
BUILTIN(__builtin_cargf, "fXf", "Fnc")
BUILTIN(__builtin_cargl, "LdXLd", "Fnc")
BUILTIN(__builtin_casin, "XdXd", "Fnc")
BUILTIN(__builtin_casinf, "XfXf", "Fnc")
+BUILTIN(__builtin_casinh, "XdXd", "Fnc")
+BUILTIN(__builtin_casinhf, "XfXf", "Fnc")
+BUILTIN(__builtin_casinhl, "XLdXLd", "Fnc")
BUILTIN(__builtin_casinl, "XLdXLd", "Fnc")
BUILTIN(__builtin_catan, "XdXd", "Fnc")
BUILTIN(__builtin_catanf, "XfXf", "Fnc")
+BUILTIN(__builtin_catanh, "XdXd", "Fnc")
+BUILTIN(__builtin_catanhf, "XfXf", "Fnc")
+BUILTIN(__builtin_catanhl, "XLdXLd", "Fnc")
BUILTIN(__builtin_catanl, "XLdXLd", "Fnc")
BUILTIN(__builtin_ccos, "XdXd", "Fnc")
BUILTIN(__builtin_ccosf, "XfXf", "Fnc")
@@ -275,7 +383,7 @@ BUILTIN(__builtin_bswap32, "UiUi", "nc")
BUILTIN(__builtin_bswap64, "ULLiULLi", "nc")
// Random GCC builtins
-BUILTIN(__builtin_constant_p, "Us.", "nc")
+BUILTIN(__builtin_constant_p, "i.", "nc")
BUILTIN(__builtin_classify_type, "i.", "nc")
BUILTIN(__builtin___CFStringMakeConstantString, "FC*cC*", "nc")
BUILTIN(__builtin___NSStringMakeConstantString, "FC*cC*", "nc")
@@ -313,14 +421,14 @@ BUILTIN(__builtin_strpbrk, "c*cC*cC*", "nF")
BUILTIN(__builtin_strrchr, "c*cC*i", "nF")
BUILTIN(__builtin_strspn, "zcC*cC*", "nF")
BUILTIN(__builtin_strstr, "c*cC*cC*", "nF")
-BUILTIN(__builtin_return_address, "v*Ui", "n")
+BUILTIN(__builtin_return_address, "v*IUi", "n")
BUILTIN(__builtin_extract_return_addr, "v*v*", "n")
-BUILTIN(__builtin_frame_address, "v*Ui", "n")
+BUILTIN(__builtin_frame_address, "v*IUi", "n")
BUILTIN(__builtin_flt_rounds, "i", "nc")
BUILTIN(__builtin_setjmp, "iv**", "")
BUILTIN(__builtin_longjmp, "vv**i", "r")
BUILTIN(__builtin_unwind_init, "v", "")
-BUILTIN(__builtin_eh_return_data_regno, "ii", "nc")
+BUILTIN(__builtin_eh_return_data_regno, "iIi", "nc")
BUILTIN(__builtin_snprintf, "ic*zcC*.", "nFp:2:")
BUILTIN(__builtin_vsprintf, "ic*cC*a", "nFP:1:")
BUILTIN(__builtin_vsnprintf, "ic*zcC*a", "nFP:2:")
@@ -485,92 +593,135 @@ BUILTIN(__builtin_abort, "v", "Fnr")
BUILTIN(__builtin_index, "c*cC*i", "Fn")
BUILTIN(__builtin_rindex, "c*cC*i", "Fn")
-
+// Microsoft builtins.
+BUILTIN(__assume, "vb", "n")
+BUILTIN(__noop, "v.", "n")
// C99 library functions
// C99 stdlib.h
-LIBBUILTIN(abort, "v", "fr", "stdlib.h")
-LIBBUILTIN(calloc, "v*zz", "f", "stdlib.h")
-LIBBUILTIN(exit, "vi", "fr", "stdlib.h")
-LIBBUILTIN(_Exit, "vi", "fr", "stdlib.h")
-LIBBUILTIN(malloc, "v*z", "f", "stdlib.h")
-LIBBUILTIN(realloc, "v*v*z", "f", "stdlib.h")
+LIBBUILTIN(abort, "v", "fr", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(calloc, "v*zz", "f", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(exit, "vi", "fr", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(_Exit, "vi", "fr", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(malloc, "v*z", "f", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(realloc, "v*v*z", "f", "stdlib.h", ALL_LANGUAGES)
// C99 string.h
-LIBBUILTIN(memcpy, "v*v*vC*z", "f", "string.h")
-LIBBUILTIN(memmove, "v*v*vC*z", "f", "string.h")
-LIBBUILTIN(strcpy, "c*c*cC*", "f", "string.h")
-LIBBUILTIN(strncpy, "c*c*cC*z", "f", "string.h")
-LIBBUILTIN(strcat, "c*c*cC*", "f", "string.h")
-LIBBUILTIN(strncat, "c*c*cC*z", "f", "string.h")
-LIBBUILTIN(strxfrm, "zc*cC*z", "f", "string.h")
-LIBBUILTIN(memchr, "v*vC*iz", "f", "string.h")
-LIBBUILTIN(strchr, "c*cC*i", "f", "string.h")
-LIBBUILTIN(strcspn, "zcC*cC*", "f", "string.h")
-LIBBUILTIN(strpbrk, "c*cC*cC*", "f", "string.h")
-LIBBUILTIN(strrchr, "c*cC*i", "f", "string.h")
-LIBBUILTIN(strspn, "zcC*cC*", "f", "string.h")
-LIBBUILTIN(strstr, "c*cC*cC*", "f", "string.h")
-LIBBUILTIN(strtok, "c*c*cC*", "f", "string.h")
-LIBBUILTIN(memset, "v*v*iz", "f", "string.h")
-LIBBUILTIN(strerror, "c*i", "f", "string.h")
-LIBBUILTIN(strlen, "zcC*", "f", "string.h")
+LIBBUILTIN(memcpy, "v*v*vC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(memmove, "v*v*vC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strcpy, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strncpy, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strcat, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strncat, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strxfrm, "zc*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(memchr, "v*vC*iz", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strchr, "c*cC*i", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strcspn, "zcC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strpbrk, "c*cC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strrchr, "c*cC*i", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strspn, "zcC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strstr, "c*cC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strtok, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(memset, "v*v*iz", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strerror, "c*i", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strlen, "zcC*", "f", "string.h", ALL_LANGUAGES)
// C99 stdio.h
-LIBBUILTIN(printf, "icC*.", "fp:0:", "stdio.h")
-LIBBUILTIN(fprintf, "iP*cC*.", "fp:1:", "stdio.h")
-LIBBUILTIN(snprintf, "ic*zcC*.", "fp:2:", "stdio.h")
-LIBBUILTIN(sprintf, "ic*cC*.", "fp:1:", "stdio.h")
-LIBBUILTIN(vprintf, "icC*a", "fP:0:", "stdio.h")
-LIBBUILTIN(vfprintf, "i.", "fP:1:", "stdio.h")
-LIBBUILTIN(vsnprintf, "ic*zcC*a", "fP:2:", "stdio.h")
-LIBBUILTIN(vsprintf, "ic*cC*a", "fP:1:", "stdio.h")
-LIBBUILTIN(scanf, "icC*.", "fs:0:", "stdio.h")
+LIBBUILTIN(printf, "icC*.", "fp:0:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(fprintf, "iP*cC*.", "fp:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(snprintf, "ic*zcC*.", "fp:2:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(sprintf, "ic*cC*.", "fp:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vprintf, "icC*a", "fP:0:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vfprintf, "i.", "fP:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vsnprintf, "ic*zcC*a", "fP:2:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vsprintf, "ic*cC*a", "fP:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(scanf, "icC*.", "fs:0:", "stdio.h", ALL_LANGUAGES)
// C99
-LIBBUILTIN(longjmp, "vJi", "fr", "setjmp.h")
+LIBBUILTIN(longjmp, "vJi", "fr", "setjmp.h", ALL_LANGUAGES)
// Non-C library functions
// FIXME: Non-C-standard stuff shouldn't be builtins in non-GNU mode!
-LIBBUILTIN(alloca, "v*z", "f", "stdlib.h")
+LIBBUILTIN(alloca, "v*z", "f", "stdlib.h", ALL_LANGUAGES)
// POSIX string.h
-LIBBUILTIN(stpcpy, "c*c*cC*", "f", "string.h")
-LIBBUILTIN(stpncpy, "c*c*cC*z", "f", "string.h")
-LIBBUILTIN(strdup, "c*cC*", "f", "string.h")
-LIBBUILTIN(strndup, "c*cC*z", "f", "string.h")
+LIBBUILTIN(stpcpy, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(stpncpy, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strdup, "c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strndup, "c*cC*z", "f", "string.h", ALL_LANGUAGES)
// POSIX strings.h
-LIBBUILTIN(index, "c*cC*i", "f", "strings.h")
-LIBBUILTIN(rindex, "c*cC*i", "f", "strings.h")
-LIBBUILTIN(bzero, "vv*z", "f", "strings.h")
+LIBBUILTIN(index, "c*cC*i", "f", "strings.h", ALL_LANGUAGES)
+LIBBUILTIN(rindex, "c*cC*i", "f", "strings.h", ALL_LANGUAGES)
+LIBBUILTIN(bzero, "vv*z", "f", "strings.h", ALL_LANGUAGES)
// POSIX unistd.h
-LIBBUILTIN(_exit, "vi", "fr", "unistd.h")
+LIBBUILTIN(_exit, "vi", "fr", "unistd.h", ALL_LANGUAGES)
// POSIX setjmp.h
-LIBBUILTIN(_longjmp, "vJi", "fr", "setjmp.h")
-LIBBUILTIN(siglongjmp, "vSJi", "fr", "setjmp.h")
+LIBBUILTIN(_longjmp, "vJi", "fr", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(siglongjmp, "vSJi", "fr", "setjmp.h", ALL_LANGUAGES)
+// id objc_msgSend(id, SEL, ...)
+LIBBUILTIN(objc_msgSend, "GGH.", "f", "objc/message.h", OBJC_LANG)
+
+// long double objc_msgSend_fpret(id self, SEL op, ...)
+LIBBUILTIN(objc_msgSend_fpret, "LdGH.", "f", "objc/message.h", OBJC_LANG)
+// id objc_msgSend_stret (id, SEL, ...)
+LIBBUILTIN(objc_msgSend_stret, "GGH.", "f", "objc/message.h", OBJC_LANG)
+// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+LIBBUILTIN(objc_msgSendSuper, "Gv*H.", "f", "objc/message.h", OBJC_LANG)
+// void objc_msgSendSuper_stret(struct objc_super *super, SEL op, ...)
+LIBBUILTIN(objc_msgSendSuper_stret, "vv*H.", "f", "objc/message.h", OBJC_LANG)
+// id objc_getClass(const char *name)
+LIBBUILTIN(objc_getClass, "GcC*", "f", "objc/runtime.h", OBJC_LANG)
+// id objc_getMetaClass(const char *name)
+LIBBUILTIN(objc_getMetaClass, "GcC*", "f", "objc/runtime.h", OBJC_LANG)
+// void objc_enumerationMutation(id)
+LIBBUILTIN(objc_enumerationMutation, "vG", "f", "objc/runtime.h", OBJC_LANG)
+
+// id objc_read_weak(id *location)
+LIBBUILTIN(objc_read_weak, "GG*", "f", "/objc/objc-auto.h", OBJC_LANG)
+// id objc_assign_weak(id value, id *location)
+LIBBUILTIN(objc_assign_weak, "GGG*", "f", "/objc/objc-auto.h", OBJC_LANG)
+// id objc_assign_ivar(id value, id dest, ptrdiff_t offset)
+// FIXME. Darwin has ptrdiff_t typedef'ed to int.
+LIBBUILTIN(objc_assign_ivar, "GGGi", "f", "/objc/objc-auto.h", OBJC_LANG)
+// id objc_assign_global(id val, id *dest)
+LIBBUILTIN(objc_assign_global, "GGG*", "f", "/objc/objc-auto.h", OBJC_LANG)
+// id objc_assign_strongCast(id val, id *dest
+LIBBUILTIN(objc_assign_strongCast, "GGG*", "f", "/objc/objc-auto.h", OBJC_LANG)
+
+// id objc_exception_extract(void *localExceptionData)
+LIBBUILTIN(objc_exception_extract, "Gv*", "f", "/objc/objc-exception.h", OBJC_LANG)
+// void objc_exception_try_enter(void *localExceptionData)
+LIBBUILTIN(objc_exception_try_enter, "vv*", "f", "/objc/objc-exception.h", OBJC_LANG)
+// void objc_exception_try_exit(void *localExceptionData)
+LIBBUILTIN(objc_exception_try_exit, "vv*", "f", "/objc/objc-exception.h", OBJC_LANG)
+// int objc_exception_match(Class exceptionClass, id exception)
+LIBBUILTIN(objc_exception_match, "iGG", "f", "/objc/objc-exception.h", OBJC_LANG)
+// void objc_exception_throw(id exception)
+LIBBUILTIN(objc_exception_throw, "vG", "f", "/objc/objc-exception.h", OBJC_LANG)
+
+// int objc_sync_enter(id obj)
+LIBBUILTIN(objc_sync_enter, "iG", "f", "/objc/objc-sync.h", OBJC_LANG)
+// int objc_sync_exit(id obj)
+LIBBUILTIN(objc_sync_exit, "iG", "f", "/objc/objc-sync.h", OBJC_LANG)
-// FIXME: This type isn't very correct, it should be
-// id objc_msgSend(id, SEL)
-// but we need new type letters for that.
-LIBBUILTIN(objc_msgSend, "v*.", "f", "objc/message.h")
BUILTIN(__builtin_objc_memmove_collectable, "v*v*vC*z", "nF")
// Builtin math library functions
-LIBBUILTIN(pow, "ddd", "fe", "math.h")
-LIBBUILTIN(powl, "LdLdLd", "fe", "math.h")
-LIBBUILTIN(powf, "fff", "fe", "math.h")
+LIBBUILTIN(pow, "ddd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(powl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(powf, "fff", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sqrt, "dd", "fe", "math.h")
-LIBBUILTIN(sqrtl, "LdLd", "fe", "math.h")
-LIBBUILTIN(sqrtf, "ff", "fe", "math.h")
+LIBBUILTIN(sqrt, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sqrtl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sqrtf, "ff", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sin, "dd", "fe", "math.h")
-LIBBUILTIN(sinl, "LdLd", "fe", "math.h")
-LIBBUILTIN(sinf, "ff", "fe", "math.h")
+LIBBUILTIN(sin, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sinl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sinf, "ff", "fe", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(cos, "dd", "fe", "math.h")
-LIBBUILTIN(cosl, "LdLd", "fe", "math.h")
-LIBBUILTIN(cosf, "ff", "fe", "math.h")
+LIBBUILTIN(cos, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(cosl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(cosf, "ff", "fe", "math.h", ALL_LANGUAGES)
// Blocks runtime Builtin math library functions
-LIBBUILTIN(_Block_object_assign, "vv*vC*iC", "f", "Blocks.h")
-LIBBUILTIN(_Block_object_dispose, "vvC*iC", "f", "Blocks.h")
+LIBBUILTIN(_Block_object_assign, "vv*vC*iC", "f", "Blocks.h", ALL_LANGUAGES)
+LIBBUILTIN(_Block_object_dispose, "vvC*iC", "f", "Blocks.h", ALL_LANGUAGES)
// FIXME: Also declare NSConcreteGlobalBlock and NSConcreteStackBlock.
#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h
index 94d5e69..4df4c8f 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h
@@ -30,7 +30,15 @@ namespace clang {
class IdentifierTable;
class ASTContext;
class QualType;
-
+ class LangOptions;
+
+ enum LanguageID {
+ C_LANG = 0x1, // builtin for c only.
+ CXX_LANG = 0x2, // builtin for cplusplus only.
+ OBJC_LANG = 0x4, // builtin for objective-c and objective-c++
+ ALL_LANGUAGES = (C_LANG|CXX_LANG|OBJC_LANG) //builtin is for all languages.
+ };
+
namespace Builtin {
enum ID {
NotBuiltin = 0, // This is not a builtin function.
@@ -41,6 +49,7 @@ enum ID {
struct Info {
const char *Name, *Type, *Attributes, *HeaderName;
+ LanguageID builtin_lang;
bool Suppressed;
bool operator==(const Info &RHS) const {
@@ -62,7 +71,7 @@ public:
/// InitializeBuiltins - Mark the identifiers for all the builtins with their
/// appropriate builtin ID # and mark any non-portable builtin identifiers as
/// such.
- void InitializeBuiltins(IdentifierTable &Table, bool NoBuiltins = false);
+ void InitializeBuiltins(IdentifierTable &Table, const LangOptions& LangOpts);
/// \brief Popular the vector with the names of all of the builtins.
void GetBuiltinNames(llvm::SmallVectorImpl<const char *> &Names,
@@ -108,6 +117,10 @@ public:
return strchr(GetRecord(ID).Attributes, 'f') != 0;
}
+ /// \brief Completely forget that the given ID was ever considered a builtin,
+ /// e.g., because the user provided a conflicting signature.
+ void ForgetBuiltin(unsigned ID, IdentifierTable &Table);
+
/// \brief If this is a library function that comes from a specific
/// header, retrieve that header name.
const char *getHeaderName(unsigned ID) const {
@@ -124,12 +137,6 @@ public:
/// argument and whether this function as a va_list argument.
bool isScanfLike(unsigned ID, unsigned &FormatIdx, bool &HasVAListArg);
- /// hasVAListUse - Return true of the specified builtin uses __builtin_va_list
- /// as an operand or return type.
- bool hasVAListUse(unsigned ID) const {
- return strpbrk(GetRecord(ID).Type, "Aa") != 0;
- }
-
/// isConstWithoutErrno - Return true if this function has no side
/// effects and doesn't read memory, except for possibly errno. Such
/// functions can be const when the MathErrno lang option is
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def
index e0518dc..8a751e4 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def
@@ -50,25 +50,25 @@ BUILTIN(__builtin_altivec_vctuxs, "V4UiV4fi", "")
BUILTIN(__builtin_altivec_dss, "vUi", "")
BUILTIN(__builtin_altivec_dssall, "v", "")
-BUILTIN(__builtin_altivec_dst, "vv*iUi", "")
-BUILTIN(__builtin_altivec_dstt, "vv*iUi", "")
-BUILTIN(__builtin_altivec_dstst, "vv*iUi", "")
-BUILTIN(__builtin_altivec_dststt, "vv*iUi", "")
+BUILTIN(__builtin_altivec_dst, "vvC*iUi", "")
+BUILTIN(__builtin_altivec_dstt, "vvC*iUi", "")
+BUILTIN(__builtin_altivec_dstst, "vvC*iUi", "")
+BUILTIN(__builtin_altivec_dststt, "vvC*iUi", "")
BUILTIN(__builtin_altivec_vexptefp, "V4fV4f", "")
BUILTIN(__builtin_altivec_vrfim, "V4fV4f", "")
-BUILTIN(__builtin_altivec_lvx, "V4iiv*", "")
-BUILTIN(__builtin_altivec_lvxl, "V4iiv*", "")
-BUILTIN(__builtin_altivec_lvebx, "V16civ*", "")
-BUILTIN(__builtin_altivec_lvehx, "V8siv*", "")
-BUILTIN(__builtin_altivec_lvewx, "V4iiv*", "")
+BUILTIN(__builtin_altivec_lvx, "V4iivC*", "")
+BUILTIN(__builtin_altivec_lvxl, "V4iivC*", "")
+BUILTIN(__builtin_altivec_lvebx, "V16civC*", "")
+BUILTIN(__builtin_altivec_lvehx, "V8sivC*", "")
+BUILTIN(__builtin_altivec_lvewx, "V4iivC*", "")
BUILTIN(__builtin_altivec_vlogefp, "V4fV4f", "")
-BUILTIN(__builtin_altivec_lvsl, "V16cUcv*", "")
-BUILTIN(__builtin_altivec_lvsr, "V16cUcv*", "")
+BUILTIN(__builtin_altivec_lvsl, "V16cUcvC*", "")
+BUILTIN(__builtin_altivec_lvsr, "V16cUcvC*", "")
BUILTIN(__builtin_altivec_vmaddfp, "V4fV4fV4fV4f", "")
BUILTIN(__builtin_altivec_vmhaddshs, "V8sV8sV8sV8s", "")
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
index 5ad64b9..da106da 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
@@ -25,11 +25,16 @@
// FIXME: Are these nothrow/const?
// MMX
+//
+// FIXME: All MMX instructions will be generated via builtins. Any MMX vector
+// types (<1 x i64>, <2 x i32>, etc.) that aren't used by these builtins will be
+// expanded by the back-end.
BUILTIN(__builtin_ia32_emms, "v", "")
BUILTIN(__builtin_ia32_femms, "v", "")
BUILTIN(__builtin_ia32_paddb, "V8cV8cV8c", "")
BUILTIN(__builtin_ia32_paddw, "V4sV4sV4s", "")
BUILTIN(__builtin_ia32_paddd, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_paddq, "V1LLiV1LLiV1LLi", "")
BUILTIN(__builtin_ia32_paddsb, "V8cV8cV8c", "")
BUILTIN(__builtin_ia32_paddsw, "V4sV4sV4s", "")
BUILTIN(__builtin_ia32_paddusb, "V8cV8cV8c", "")
@@ -37,6 +42,7 @@ BUILTIN(__builtin_ia32_paddusw, "V4sV4sV4s", "")
BUILTIN(__builtin_ia32_psubb, "V8cV8cV8c", "")
BUILTIN(__builtin_ia32_psubw, "V4sV4sV4s", "")
BUILTIN(__builtin_ia32_psubd, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_psubq, "V1LLiV1LLiV1LLi", "")
BUILTIN(__builtin_ia32_psubsb, "V8cV8cV8c", "")
BUILTIN(__builtin_ia32_psubsw, "V4sV4sV4s", "")
BUILTIN(__builtin_ia32_psubusb, "V8cV8cV8c", "")
@@ -76,6 +82,7 @@ BUILTIN(__builtin_ia32_psradi, "V2iV2ii", "")
BUILTIN(__builtin_ia32_packsswb, "V8cV4sV4s", "")
BUILTIN(__builtin_ia32_packssdw, "V4sV2iV2i", "")
BUILTIN(__builtin_ia32_packuswb, "V8cV4sV4s", "")
+BUILTIN(__builtin_ia32_pshufw, "V4sV4sIc", "")
BUILTIN(__builtin_ia32_punpckhbw, "V8cV8cV8c", "")
BUILTIN(__builtin_ia32_punpckhwd, "V4sV4sV4s", "")
BUILTIN(__builtin_ia32_punpckhdq, "V2iV2iV2i", "")
@@ -91,7 +98,7 @@ BUILTIN(__builtin_ia32_pcmpgtd, "V2iV2iV2i", "")
BUILTIN(__builtin_ia32_maskmovq, "vV8cV8cc*", "")
BUILTIN(__builtin_ia32_pmovmskb, "iV8c", "")
BUILTIN(__builtin_ia32_movntq, "vV1LLi*V1LLi", "")
-BUILTIN(__builtin_ia32_palignr, "V8cV8cV8cc", "") // FIXME: Correct type?
+BUILTIN(__builtin_ia32_palignr, "V8cV8cV8cIc", "")
BUILTIN(__builtin_ia32_vec_init_v2si, "V2iii", "")
BUILTIN(__builtin_ia32_vec_init_v4hi, "V4sssss", "")
BUILTIN(__builtin_ia32_vec_init_v8qi, "V8ccccccccc", "")
@@ -249,8 +256,8 @@ BUILTIN(__builtin_ia32_psraw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_psrad128, "V4iV4iV4i", "")
BUILTIN(__builtin_ia32_psrlw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_psrld128, "V4iV4iV4i", "")
-BUILTIN(__builtin_ia32_pslldqi128, "V2LLiV2LLii", "")
-BUILTIN(__builtin_ia32_psrldqi128, "V2LLiV2LLii", "")
+BUILTIN(__builtin_ia32_pslldqi128, "V2LLiV2LLiIi", "")
+BUILTIN(__builtin_ia32_psrldqi128, "V2LLiV2LLiIi", "")
BUILTIN(__builtin_ia32_psrlq128, "V2LLiV2LLiV2LLi", "")
BUILTIN(__builtin_ia32_psllw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_pslld128, "V4iV4iV4i", "")
@@ -267,7 +274,7 @@ BUILTIN(__builtin_ia32_pmaddwd128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_monitor, "vv*UiUi", "")
BUILTIN(__builtin_ia32_mwait, "vUiUi", "")
BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "")
-BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cc", "") // FIXME: Correct type?
+BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cIc", "") // FIXME: Correct type?
BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "")
BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "")
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
index e2f93e0..2ec7427 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
@@ -15,6 +15,7 @@ def Named : Decl<1>;
def Namespace : DDecl<Named>, DeclContext;
def UsingDirective : DDecl<Named>;
def NamespaceAlias : DDecl<Named>;
+ def Label : DDecl<Named>;
def Type : DDecl<Named, 1>;
def Typedef : DDecl<Type>;
def UnresolvedUsingTypename : DDecl<Type>;
@@ -29,6 +30,7 @@ def Named : Decl<1>;
def Value : DDecl<Named, 1>;
def EnumConstant : DDecl<Value>;
def UnresolvedUsingValue : DDecl<Value>;
+ def IndirectField : DDecl<Value>;
def Declarator : DDecl<Value, 1>;
def Function : DDecl<Declarator>, DeclContext;
def CXXMethod : DDecl<Function>;
@@ -41,7 +43,7 @@ def Named : Decl<1>;
def Var : DDecl<Declarator>;
def ImplicitParam : DDecl<Var>;
def ParmVar : DDecl<Var>;
- def NonTypeTemplateParm : DDecl<Var>;
+ def NonTypeTemplateParm : DDecl<Declarator>;
def Template : DDecl<Named, 1>;
def RedeclarableTemplate : DDecl<Template, 1>;
def FunctionTemplate : DDecl<RedeclarableTemplate>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
index 37d2694..19e7c91 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
@@ -14,76 +14,24 @@
#ifndef LLVM_CLANG_DIAGNOSTIC_H
#define LLVM_CLANG_DIAGNOSTIC_H
+#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/Support/type_traits.h"
-#include <string>
-#include <vector>
-#include <cassert>
-namespace llvm {
- template <typename T> class SmallVectorImpl;
-}
+#include <vector>
+#include <list>
namespace clang {
- class DeclContext;
- class DiagnosticBuilder;
class DiagnosticClient;
- class FileManager;
+ class DiagnosticBuilder;
class IdentifierInfo;
+ class DeclContext;
class LangOptions;
- class PartialDiagnostic;
class Preprocessor;
-
- // Import the diagnostic enums themselves.
- namespace diag {
- // Start position for diagnostics.
- enum {
- DIAG_START_DRIVER = 300,
- DIAG_START_FRONTEND = DIAG_START_DRIVER + 100,
- DIAG_START_LEX = DIAG_START_FRONTEND + 100,
- DIAG_START_PARSE = DIAG_START_LEX + 300,
- DIAG_START_AST = DIAG_START_PARSE + 300,
- DIAG_START_SEMA = DIAG_START_AST + 100,
- DIAG_START_ANALYSIS = DIAG_START_SEMA + 1500,
- DIAG_UPPER_LIMIT = DIAG_START_ANALYSIS + 100
- };
-
- class CustomDiagInfo;
-
- /// diag::kind - All of the diagnostics that can be emitted by the frontend.
- typedef unsigned kind;
-
- // Get typedefs for common diagnostics.
- enum {
-#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,CATEGORY) ENUM,
-#include "clang/Basic/DiagnosticCommonKinds.inc"
- NUM_BUILTIN_COMMON_DIAGNOSTICS
-#undef DIAG
- };
-
- /// Enum values that allow the client to map NOTEs, WARNINGs, and EXTENSIONs
- /// to either MAP_IGNORE (nothing), MAP_WARNING (emit a warning), MAP_ERROR
- /// (emit as an error). It allows clients to map errors to
- /// MAP_ERROR/MAP_DEFAULT or MAP_FATAL (stop emitting diagnostics after this
- /// one).
- enum Mapping {
- // NOTE: 0 means "uncomputed".
- MAP_IGNORE = 1, //< Map this diagnostic to nothing, ignore it.
- MAP_WARNING = 2, //< Map this diagnostic to a warning.
- MAP_ERROR = 3, //< Map this diagnostic to an error.
- MAP_FATAL = 4, //< Map this diagnostic to a fatal error.
-
- /// Map this diagnostic to "warning", but make it immune to -Werror. This
- /// happens when you specify -Wno-error=foo.
- MAP_WARNING_NO_WERROR = 5,
- /// Map this diagnostic to "error", but make it immune to -Wfatal-errors.
- /// This happens for -Wno-fatal-errors=foo.
- MAP_ERROR_NO_WFATAL = 6
- };
- }
+ class DiagnosticErrorTrap;
/// \brief Annotates a diagnostic with some code that should be
/// inserted, removed, or replaced to fix the problem.
@@ -153,12 +101,17 @@ public:
/// Diagnostic - This concrete class is used by the front-end to report
/// problems and issues. It massages the diagnostics (e.g. handling things like
/// "report warnings as errors" and passes them off to the DiagnosticClient for
-/// reporting to the user.
+/// reporting to the user. Diagnostic is tied to one translation unit and
+/// one SourceManager.
class Diagnostic : public llvm::RefCountedBase<Diagnostic> {
public:
/// Level - The level of the diagnostic, after it has been through mapping.
enum Level {
- Ignored, Note, Warning, Error, Fatal
+ Ignored = DiagnosticIDs::Ignored,
+ Note = DiagnosticIDs::Note,
+ Warning = DiagnosticIDs::Warning,
+ Error = DiagnosticIDs::Error,
+ Fatal = DiagnosticIDs::Fatal
};
/// ExtensionHandling - How do we handle otherwise-unmapped extension? This
@@ -203,33 +156,94 @@ private:
unsigned TemplateBacktraceLimit; // Cap on depth of template backtrace stack,
// 0 -> no limit.
ExtensionHandling ExtBehavior; // Map extensions onto warnings or errors?
- llvm::OwningPtr<DiagnosticClient> Client;
-
- /// DiagMappings - Mapping information for diagnostics. Mapping info is
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> Diags;
+ DiagnosticClient *Client;
+ bool OwnsDiagClient;
+ SourceManager *SourceMgr;
+
+ /// \brief Mapping information for diagnostics. Mapping info is
/// packed into four bits per diagnostic. The low three bits are the mapping
/// (an instance of diag::Mapping), or zero if unset. The high bit is set
/// when the mapping was established as a user mapping. If the high bit is
/// clear, then the low bits are set to the default value, and should be
/// mapped with -pedantic, -Werror, etc.
- class DiagMappings {
- unsigned char Values[diag::DIAG_UPPER_LIMIT/2];
+ ///
+ /// A new DiagState is created and kept around when diagnostic pragmas modify
+ /// the state so that we know what is the diagnostic state at any given
+ /// source location.
+ class DiagState {
+ llvm::DenseMap<unsigned, unsigned> DiagMap;
public:
- DiagMappings() {
- memset(Values, 0, diag::DIAG_UPPER_LIMIT/2);
- }
+ typedef llvm::DenseMap<unsigned, unsigned>::const_iterator iterator;
- void setMapping(diag::kind Diag, unsigned Map) {
- size_t Shift = (Diag & 1)*4;
- Values[Diag/2] = (Values[Diag/2] & ~(15 << Shift)) | (Map << Shift);
- }
+ void setMapping(diag::kind Diag, unsigned Map) { DiagMap[Diag] = Map; }
diag::Mapping getMapping(diag::kind Diag) const {
- return (diag::Mapping)((Values[Diag/2] >> (Diag & 1)*4) & 15);
+ iterator I = DiagMap.find(Diag);
+ if (I != DiagMap.end())
+ return (diag::Mapping)I->second;
+ return diag::Mapping();
+ }
+
+ iterator begin() const { return DiagMap.begin(); }
+ iterator end() const { return DiagMap.end(); }
+ };
+
+ /// \brief Keeps and automatically disposes all DiagStates that we create.
+ std::list<DiagState> DiagStates;
+
+ /// \brief Represents a point in source where the diagnostic state was
+ /// modified because of a pragma. 'Loc' can be null if the point represents
+ /// the diagnostic state modifications done through the command-line.
+ struct DiagStatePoint {
+ DiagState *State;
+ FullSourceLoc Loc;
+ DiagStatePoint(DiagState *State, FullSourceLoc Loc)
+ : State(State), Loc(Loc) { }
+
+ bool operator<(const DiagStatePoint &RHS) const {
+ // If Loc is invalid it means it came from <command-line>, in which case
+ // we regard it as coming before any valid source location.
+ if (RHS.Loc.isInvalid())
+ return false;
+ if (Loc.isInvalid())
+ return true;
+ return Loc.isBeforeInTranslationUnitThan(RHS.Loc);
}
};
- mutable std::vector<DiagMappings> DiagMappingsStack;
+ /// \brief A vector of all DiagStatePoints representing changes in diagnostic
+ /// state due to diagnostic pragmas. The vector is always sorted according to
+ /// the SourceLocation of the DiagStatePoint.
+ typedef std::vector<DiagStatePoint> DiagStatePointsTy;
+ mutable DiagStatePointsTy DiagStatePoints;
+
+ /// \brief Keeps the DiagState that was active during each diagnostic 'push'
+ /// so we can get back at it when we 'pop'.
+ std::vector<DiagState *> DiagStateOnPushStack;
+
+ DiagState *GetCurDiagState() const {
+ assert(!DiagStatePoints.empty());
+ return DiagStatePoints.back().State;
+ }
+
+ void PushDiagStatePoint(DiagState *State, SourceLocation L) {
+ FullSourceLoc Loc(L, *SourceMgr);
+ // Make sure that DiagStatePoints is always sorted according to Loc.
+ assert((Loc.isValid() || DiagStatePoints.empty()) &&
+ "Adding invalid loc point after another point");
+ assert((Loc.isInvalid() || DiagStatePoints.empty() ||
+ DiagStatePoints.back().Loc.isInvalid() ||
+ DiagStatePoints.back().Loc.isBeforeInTranslationUnitThan(Loc)) &&
+ "Previous point loc comes after or is the same as new one");
+ DiagStatePoints.push_back(DiagStatePoint(State,
+ FullSourceLoc(Loc, *SourceMgr)));
+ }
+
+ /// \brief Finds the DiagStatePoint that contains the diagnostic state of
+ /// the given source location.
+ DiagStatePointsTy::iterator GetDiagStatePointForLoc(SourceLocation Loc) const;
/// ErrorOccurred / FatalErrorOccurred - This is set to true when an error or
/// fatal error is emitted, and is sticky.
@@ -239,14 +253,11 @@ private:
/// LastDiagLevel - This is the level of the last diagnostic emitted. This is
/// used to emit continuation diagnostics with the same level as the
/// diagnostic that they follow.
- Diagnostic::Level LastDiagLevel;
+ DiagnosticIDs::Level LastDiagLevel;
unsigned NumWarnings; // Number of warnings reported
unsigned NumErrors; // Number of errors reported
unsigned NumErrorsSuppressed; // Number of errors suppressed
-
- /// CustomDiagInfo - Information for uniquing and looking up custom diags.
- diag::CustomDiagInfo *CustomDiagInfo;
/// ArgToStringFn - A function pointer that converts an opaque diagnostic
/// argument to a strings. This takes the modifiers and argument that was
@@ -279,34 +290,52 @@ private:
std::string DelayedDiagArg2;
public:
- explicit Diagnostic(DiagnosticClient *client = 0);
+ explicit Diagnostic(const llvm::IntrusiveRefCntPtr<DiagnosticIDs> &Diags,
+ DiagnosticClient *client = 0,
+ bool ShouldOwnClient = true);
~Diagnostic();
- //===--------------------------------------------------------------------===//
- // Diagnostic characterization methods, used by a client to customize how
- //
+ const llvm::IntrusiveRefCntPtr<DiagnosticIDs> &getDiagnosticIDs() const {
+ return Diags;
+ }
+
+ DiagnosticClient *getClient() { return Client; }
+ const DiagnosticClient *getClient() const { return Client; }
- DiagnosticClient *getClient() { return Client.get(); }
- const DiagnosticClient *getClient() const { return Client.get(); }
-
/// \brief Return the current diagnostic client along with ownership of that
/// client.
- DiagnosticClient *takeClient() { return Client.take(); }
+ DiagnosticClient *takeClient() {
+ OwnsDiagClient = false;
+ return Client;
+ }
+
+ bool hasSourceManager() const { return SourceMgr != 0; }
+ SourceManager &getSourceManager() const {
+ assert(SourceMgr && "SourceManager not set!");
+ return *SourceMgr;
+ }
+ void setSourceManager(SourceManager *SrcMgr) { SourceMgr = SrcMgr; }
+
+ //===--------------------------------------------------------------------===//
+ // Diagnostic characterization methods, used by a client to customize how
+ // diagnostics are emitted.
+ //
/// pushMappings - Copies the current DiagMappings and pushes the new copy
/// onto the top of the stack.
- void pushMappings();
+ void pushMappings(SourceLocation Loc);
/// popMappings - Pops the current DiagMappings off the top of the stack
/// causing the new top of the stack to be the active mappings. Returns
/// true if the pop happens, false if there is only one DiagMapping on the
/// stack.
- bool popMappings();
+ bool popMappings(SourceLocation Loc);
/// \brief Set the diagnostic client associated with this diagnostic object.
///
- /// The diagnostic object takes ownership of \c client.
- void setClient(DiagnosticClient* client) { Client.reset(client); }
+ /// \param ShouldOwnClient true if the diagnostic object should take
+ /// ownership of \c client.
+ void setClient(DiagnosticClient *client, bool ShouldOwnClient = true);
/// setErrorLimit - Specify a limit for the number of errors we should
/// emit before giving up. Zero disables the limit.
@@ -362,7 +391,7 @@ public:
/// \brief Pretend that the last diagnostic issued was ignored. This can
/// be used by clients who suppress diagnostics themselves.
void setLastDiagnosticIgnored() {
- LastDiagLevel = Ignored;
+ LastDiagLevel = DiagnosticIDs::Ignored;
}
/// setExtensionHandlingBehavior - This controls whether otherwise-unmapped
@@ -379,28 +408,29 @@ public:
void DecrementAllExtensionsSilenced() { --AllExtensionsSilenced; }
bool hasAllExtensionsSilenced() { return AllExtensionsSilenced != 0; }
- /// setDiagnosticMapping - This allows the client to specify that certain
+ /// \brief This allows the client to specify that certain
/// warnings are ignored. Notes can never be mapped, errors can only be
/// mapped to fatal, and WARNINGs and EXTENSIONs can be mapped arbitrarily.
- void setDiagnosticMapping(diag::kind Diag, diag::Mapping Map) {
- assert(Diag < diag::DIAG_UPPER_LIMIT &&
- "Can only map builtin diagnostics");
- assert((isBuiltinWarningOrExtension(Diag) ||
- (Map == diag::MAP_FATAL || Map == diag::MAP_ERROR)) &&
- "Cannot map errors into warnings!");
- setDiagnosticMappingInternal(Diag, Map, true);
- }
+ ///
+ /// \param Loc The source location that this change of diagnostic state should
+ /// take affect. It can be null if we are setting the latest state.
+ void setDiagnosticMapping(diag::kind Diag, diag::Mapping Map,
+ SourceLocation Loc);
/// setDiagnosticGroupMapping - Change an entire diagnostic group (e.g.
/// "unknown-pragmas" to have the specified mapping. This returns true and
/// ignores the request if "Group" was unknown, false otherwise.
- bool setDiagnosticGroupMapping(const char *Group, diag::Mapping Map);
+ ///
+ /// 'Loc' is the source location that this change of diagnostic state should
+ /// take affect. It can be null if we are setting the state from command-line.
+ bool setDiagnosticGroupMapping(const char *Group, diag::Mapping Map,
+ SourceLocation Loc = SourceLocation()) {
+ return Diags->setDiagnosticGroupMapping(Group, Map, Loc, *this);
+ }
bool hasErrorOccurred() const { return ErrorOccurred; }
bool hasFatalErrorOccurred() const { return FatalErrorOccurred; }
- unsigned getNumErrors() const { return NumErrors; }
- unsigned getNumErrorsSuppressed() const { return NumErrorsSuppressed; }
unsigned getNumWarnings() const { return NumWarnings; }
void setNumWarnings(unsigned NumWarnings) {
@@ -410,8 +440,9 @@ public:
/// getCustomDiagID - Return an ID for a diagnostic with the specified message
/// and level. If this is the first request for this diagnosic, it is
/// registered and created, otherwise the existing ID is returned.
- unsigned getCustomDiagID(Level L, llvm::StringRef Message);
-
+ unsigned getCustomDiagID(Level L, llvm::StringRef Message) {
+ return Diags->getCustomDiagID((DiagnosticIDs::Level)L, Message);
+ }
/// ConvertArgToString - This method converts a diagnostic argument (as an
/// intptr_t) into the string that represents it.
@@ -437,92 +468,22 @@ public:
// Diagnostic classification and reporting interfaces.
//
- /// getDescription - Given a diagnostic ID, return a description of the
- /// issue.
- const char *getDescription(unsigned DiagID) const;
-
- /// isNoteWarningOrExtension - Return true if the unmapped diagnostic
- /// level of the specified diagnostic ID is a Warning or Extension.
- /// This only works on builtin diagnostics, not custom ones, and is not legal to
- /// call on NOTEs.
- static bool isBuiltinWarningOrExtension(unsigned DiagID);
-
- /// \brief Determine whether the given built-in diagnostic ID is a
- /// Note.
- static bool isBuiltinNote(unsigned DiagID);
-
- /// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
- /// ID is for an extension of some sort.
- ///
- static bool isBuiltinExtensionDiag(unsigned DiagID) {
- bool ignored;
- return isBuiltinExtensionDiag(DiagID, ignored);
- }
-
- /// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
- /// ID is for an extension of some sort. This also returns EnabledByDefault,
- /// which is set to indicate whether the diagnostic is ignored by default (in
- /// which case -pedantic enables it) or treated as a warning/error by default.
- ///
- static bool isBuiltinExtensionDiag(unsigned DiagID, bool &EnabledByDefault);
-
-
- /// getWarningOptionForDiag - Return the lowest-level warning option that
- /// enables the specified diagnostic. If there is no -Wfoo flag that controls
- /// the diagnostic, this returns null.
- static const char *getWarningOptionForDiag(unsigned DiagID);
-
- /// getWarningOptionForDiag - Return the category number that a specified
- /// DiagID belongs to, or 0 if no category.
- static unsigned getCategoryNumberForDiag(unsigned DiagID);
-
- /// getCategoryNameFromID - Given a category ID, return the name of the
- /// category.
- static const char *getCategoryNameFromID(unsigned CategoryID);
-
- /// \brief Enumeration describing how the the emission of a diagnostic should
- /// be treated when it occurs during C++ template argument deduction.
- enum SFINAEResponse {
- /// \brief The diagnostic should not be reported, but it should cause
- /// template argument deduction to fail.
- ///
- /// The vast majority of errors that occur during template argument
- /// deduction fall into this category.
- SFINAE_SubstitutionFailure,
-
- /// \brief The diagnostic should be suppressed entirely.
- ///
- /// Warnings generally fall into this category.
- SFINAE_Suppress,
-
- /// \brief The diagnostic should be reported.
- ///
- /// The diagnostic should be reported. Various fatal errors (e.g.,
- /// template instantiation depth exceeded) fall into this category.
- SFINAE_Report
- };
-
- /// \brief Determines whether the given built-in diagnostic ID is
- /// for an error that is suppressed if it occurs during C++ template
- /// argument deduction.
- ///
- /// When an error is suppressed due to SFINAE, the template argument
- /// deduction fails but no diagnostic is emitted. Certain classes of
- /// errors, such as those errors that involve C++ access control,
- /// are not SFINAE errors.
- static SFINAEResponse getDiagnosticSFINAEResponse(unsigned DiagID);
-
- /// getDiagnosticLevel - Based on the way the client configured the Diagnostic
+ /// \brief Based on the way the client configured the Diagnostic
/// object, classify the specified diagnostic ID into a Level, consumable by
/// the DiagnosticClient.
- Level getDiagnosticLevel(unsigned DiagID) const;
+ ///
+ /// \param Loc The source location we are interested in finding out the
+ /// diagnostic state. Can be null in order to query the latest state.
+ Level getDiagnosticLevel(unsigned DiagID, SourceLocation Loc) const {
+ return (Level)Diags->getDiagnosticLevel(DiagID, Loc, *this);
+ }
/// Report - Issue the message to the client. @c DiagID is a member of the
/// @c diag::kind enum. This actually returns aninstance of DiagnosticBuilder
/// which emits the diagnostics (through @c ProcessDiag) when it is destroyed.
/// @c Pos represents the source location associated with the diagnostic,
/// which can be an invalid location if no position information is available.
- inline DiagnosticBuilder Report(FullSourceLoc Pos, unsigned DiagID);
+ inline DiagnosticBuilder Report(SourceLocation Pos, unsigned DiagID);
inline DiagnosticBuilder Report(unsigned DiagID);
/// \brief Determine whethere there is already a diagnostic in flight.
@@ -563,32 +524,34 @@ private:
/// getDiagnosticMappingInfo - Return the mapping info currently set for the
/// specified builtin diagnostic. This returns the high bit encoding, or zero
/// if the field is completely uninitialized.
- diag::Mapping getDiagnosticMappingInfo(diag::kind Diag) const {
- return DiagMappingsStack.back().getMapping(Diag);
+ diag::Mapping getDiagnosticMappingInfo(diag::kind Diag,
+ DiagState *State) const {
+ return State->getMapping(Diag);
}
void setDiagnosticMappingInternal(unsigned DiagId, unsigned Map,
- bool isUser) const {
+ DiagState *State,
+ bool isUser, bool isPragma) const {
if (isUser) Map |= 8; // Set the high bit for user mappings.
- DiagMappingsStack.back().setMapping((diag::kind)DiagId, Map);
+ if (isPragma) Map |= 0x10; // Set the bit for diagnostic pragma mappings.
+ State->setMapping((diag::kind)DiagId, Map);
}
- /// getDiagnosticLevel - This is an internal implementation helper used when
- /// DiagClass is already known.
- Level getDiagnosticLevel(unsigned DiagID, unsigned DiagClass) const;
-
// This is private state used by DiagnosticBuilder. We put it here instead of
// in DiagnosticBuilder in order to keep DiagnosticBuilder a small lightweight
// object. This implementation choice means that we can only have one
// diagnostic "in flight" at a time, but this seems to be a reasonable
// tradeoff to keep these objects small. Assertions verify that only one
// diagnostic is in flight at a time.
+ friend class DiagnosticIDs;
friend class DiagnosticBuilder;
friend class DiagnosticInfo;
-
+ friend class PartialDiagnostic;
+ friend class DiagnosticErrorTrap;
+
/// CurDiagLoc - This is the location of the current diagnostic that is in
/// flight.
- FullSourceLoc CurDiagLoc;
+ SourceLocation CurDiagLoc;
/// CurDiagID - This is the ID of the current diagnostic that is in flight.
/// This is set to ~0U when there is no diagnostic in flight.
@@ -640,7 +603,33 @@ private:
///
/// \returns true if the diagnostic was emitted, false if it was
/// suppressed.
- bool ProcessDiag();
+ bool ProcessDiag() {
+ return Diags->ProcessDiag(*this);
+ }
+
+ friend class ASTReader;
+ friend class ASTWriter;
+};
+
+/// \brief RAII class that determines when any errors have occurred
+/// between the time the instance was created and the time it was
+/// queried.
+class DiagnosticErrorTrap {
+ Diagnostic &Diag;
+ unsigned PrevErrors;
+
+public:
+ explicit DiagnosticErrorTrap(Diagnostic &Diag)
+ : Diag(Diag), PrevErrors(Diag.NumErrors) {}
+
+ /// \brief Determine whether any errors have occurred since this
+ /// object instance was created.
+ bool hasErrorOccurred() const {
+ return Diag.NumErrors > PrevErrors;
+ }
+
+ // Set to initial state of "no errors occurred".
+ void reset() { PrevErrors = Diag.NumErrors; }
};
//===----------------------------------------------------------------------===//
@@ -667,6 +656,11 @@ class DiagnosticBuilder {
explicit DiagnosticBuilder(Diagnostic *diagObj)
: DiagObj(diagObj), NumArgs(0), NumRanges(0), NumFixItHints(0) {}
+ friend class PartialDiagnostic;
+
+protected:
+ void FlushCounts();
+
public:
/// Copy constructor. When copied, this "takes" the diagnostic info from the
/// input and neuters it.
@@ -703,6 +697,17 @@ public:
/// isActive - Determine whether this diagnostic is still active.
bool isActive() const { return DiagObj != 0; }
+ /// \brief Retrieve the active diagnostic ID.
+ ///
+ /// \pre \c isActive()
+ unsigned getDiagID() const {
+ assert(isActive() && "Diagnostic is inactive");
+ return DiagObj->CurDiagID;
+ }
+
+ /// \brief Clear out the current diagnostic.
+ void Clear() { DiagObj = 0; }
+
/// Operator bool: conversion of DiagnosticBuilder to bool always returns
/// true. This allows is to be used in boolean error contexts like:
/// return Diag(...);
@@ -816,14 +821,15 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
/// Report - Issue the message to the client. DiagID is a member of the
/// diag::kind enum. This actually returns a new instance of DiagnosticBuilder
/// which emits the diagnostics (through ProcessDiag) when it is destroyed.
-inline DiagnosticBuilder Diagnostic::Report(FullSourceLoc Loc, unsigned DiagID){
+inline DiagnosticBuilder Diagnostic::Report(SourceLocation Loc,
+ unsigned DiagID){
assert(CurDiagID == ~0U && "Multiple diagnostics in flight at once!");
CurDiagLoc = Loc;
CurDiagID = DiagID;
return DiagnosticBuilder(this);
}
inline DiagnosticBuilder Diagnostic::Report(unsigned DiagID) {
- return Report(FullSourceLoc(), DiagID);
+ return Report(SourceLocation(), DiagID);
}
//===----------------------------------------------------------------------===//
@@ -840,7 +846,9 @@ public:
const Diagnostic *getDiags() const { return DiagObj; }
unsigned getID() const { return DiagObj->CurDiagID; }
- const FullSourceLoc &getLocation() const { return DiagObj->CurDiagLoc; }
+ const SourceLocation &getLocation() const { return DiagObj->CurDiagLoc; }
+ bool hasSourceManager() const { return DiagObj->hasSourceManager(); }
+ SourceManager &getSourceManager() const { return DiagObj->getSourceManager();}
unsigned getNumArgs() const { return DiagObj->NumDiagArgs; }
@@ -930,10 +938,11 @@ public:
};
/**
- * \brief Represents a diagnostic in a form that can be serialized and
- * deserialized.
+ * \brief Represents a diagnostic in a form that can be retained until its
+ * corresponding source manager is destroyed.
*/
class StoredDiagnostic {
+ unsigned ID;
Diagnostic::Level Level;
FullSourceLoc Loc;
std::string Message;
@@ -943,12 +952,14 @@ class StoredDiagnostic {
public:
StoredDiagnostic();
StoredDiagnostic(Diagnostic::Level Level, const DiagnosticInfo &Info);
- StoredDiagnostic(Diagnostic::Level Level, llvm::StringRef Message);
+ StoredDiagnostic(Diagnostic::Level Level, unsigned ID,
+ llvm::StringRef Message);
~StoredDiagnostic();
/// \brief Evaluates true when this object stores a diagnostic.
operator bool() const { return Message.size() > 0; }
+ unsigned getID() const { return ID; }
Diagnostic::Level getLevel() const { return Level; }
const FullSourceLoc &getLocation() const { return Loc; }
llvm::StringRef getMessage() const { return Message; }
@@ -964,25 +975,21 @@ public:
fixit_iterator fixit_begin() const { return FixIts.begin(); }
fixit_iterator fixit_end() const { return FixIts.end(); }
unsigned fixit_size() const { return FixIts.size(); }
-
- /// Serialize - Serialize the given diagnostic (with its diagnostic
- /// level) to the given stream. Serialization is a lossy operation,
- /// since the specific diagnostic ID and any macro-instantiation
- /// information is lost.
- void Serialize(llvm::raw_ostream &OS) const;
-
- /// Deserialize - Deserialize the first diagnostic within the memory
- /// [Memory, MemoryEnd), producing a new diagnostic builder describing the
- /// deserialized diagnostic. If the memory does not contain a
- /// diagnostic, returns a diagnostic builder with no diagnostic ID.
- static StoredDiagnostic Deserialize(FileManager &FM, SourceManager &SM,
- const char *&Memory, const char *MemoryEnd);
};
/// DiagnosticClient - This is an abstract interface implemented by clients of
/// the front-end, which formats and prints fully processed diagnostics.
class DiagnosticClient {
+protected:
+ unsigned NumWarnings; // Number of warnings reported
+ unsigned NumErrors; // Number of errors reported
+
public:
+ DiagnosticClient() : NumWarnings(0), NumErrors(0) { }
+
+ unsigned getNumErrors() const { return NumErrors; }
+ unsigned getNumWarnings() const { return NumWarnings; }
+
virtual ~DiagnosticClient();
/// BeginSourceFile - Callback to inform the diagnostic client that processing
@@ -1012,8 +1019,11 @@ public:
/// HandleDiagnostic - Handle this diagnostic, reporting it to the user or
/// capturing it to a log as needed.
+ ///
+ /// Default implementation just keeps track of the total number of warnings
+ /// and errors.
virtual void HandleDiagnostic(Diagnostic::Level DiagLevel,
- const DiagnosticInfo &Info) = 0;
+ const DiagnosticInfo &Info);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
index fabf9eb..be510ed 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
@@ -56,6 +56,7 @@ class Diagnostic<string text, DiagClass DC, DiagMapping defaultmapping> {
string Text = text;
DiagClass Class = DC;
bit SFINAE = 1;
+ bit AccessControl = 0;
DiagMapping DefaultMapping = defaultmapping;
DiagGroup Group;
string CategoryName = "";
@@ -74,6 +75,7 @@ class DefaultError { DiagMapping DefaultMapping = MAP_ERROR; }
class DefaultFatal { DiagMapping DefaultMapping = MAP_FATAL; }
class NoSFINAE { bit SFINAE = 0; }
+class AccessControl { bit AccessControl = 1; }
// Definitions for Diagnostics.
include "DiagnosticASTKinds.td"
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
index d755d99..7d45bc5 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -56,6 +56,8 @@ def note_odr_number_of_bases : Note<
"class has %0 base %plural{1:class|:classes}0">;
def note_odr_enumerator : Note<"enumerator %0 with value %1 here">;
def note_odr_missing_enumerator : Note<"no corresponding enumerator here">;
+
+// Importing Objective-C ASTs
def err_odr_ivar_type_inconsistent : Error<
"instance variable %0 declared with incompatible types in different "
"translation units (%1 vs. %2)">;
@@ -80,5 +82,32 @@ def note_odr_objc_method_here : Note<
def err_odr_objc_property_type_inconsistent : Error<
"property %0 declared with incompatible types in different "
"translation units (%1 vs. %2)">;
+def err_odr_objc_property_impl_kind_inconsistent : Error<
+ "property %0 is implemented with %select{@synthesize|@dynamic}1 in one "
+ "translation but %select{@dynamic|@synthesize}1 in another translation unit">;
+def note_odr_objc_property_impl_kind : Note<
+ "property %0 is implemented with %select{@synthesize|@dynamic}1 here">;
+def err_odr_objc_synthesize_ivar_inconsistent : Error<
+ "property %0 is synthesized to different ivars in different translation "
+ "units (%1 vs. %2)">;
+def note_odr_objc_synthesize_ivar_here : Note<
+ "property is synthesized to ivar %0 here">;
+
+// Importing C++ ASTs
+def err_odr_different_num_template_parameters : Error<
+ "template parameter lists have a different number of parameters (%0 vs %1)">;
+def note_odr_template_parameter_list : Note<
+ "template parameter list also declared here">;
+def err_odr_different_template_parameter_kind : Error<
+ "template parameter has different kinds in different translation units">;
+def note_odr_template_parameter_here : Note<
+ "template parameter declared here">;
+def err_odr_parameter_pack_non_pack : Error<
+ "parameter kind mismatch; parameter is %select{not a|a}0 parameter pack">;
+def note_odr_parameter_pack_non_pack : Note<
+ "%select{parameter|parameter pack}0 declared here">;
+def err_odr_non_type_parameter_type_inconsistent : Error<
+ "non-type template parameter declared with incompatible types in different "
+ "translation units (%0 vs. %1)">;
def err_unsupported_ast_node: Error<"cannot import unsupported AST node %0">;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
index 98ea9d4..85c64c5 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -44,9 +44,12 @@ def err_expected_colon_after_setter_name : Error<
def ext_no_declarators : ExtWarn<"declaration does not declare anything">,
InGroup<MissingDeclarations>;
def err_param_redefinition : Error<"redefinition of parameter %0">;
+def warn_method_param_redefinition : Warning<"redefinition of method parameter %0">;
def err_invalid_storage_class_in_func_decl : Error<
"invalid storage class specifier in function declarator">;
def err_expected_namespace_name : Error<"expected namespace name">;
+def ext_variadic_templates : ExtWarn<
+ "variadic templates are a C++0x extension">, InGroup<CXX0x>;
// Sema && Lex
def ext_longlong : Extension<
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 34cd600..ef1c9e7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -11,11 +11,15 @@ let Component = "Driver" in {
def err_drv_no_such_file : Error<"no such file or directory: '%0'">;
def err_drv_unsupported_opt : Error<"unsupported option '%0'">;
+def err_drv_unsupported_option_argument : Error<
+ "unsupported argument '%1' to option '%0'">;
def err_drv_unknown_stdin_type : Error<
"-E or -x required when input is from standard input">;
def err_drv_unknown_language : Error<"language not recognized: '%0'">;
def err_drv_invalid_arch_name : Error<
"invalid arch name '%0'">;
+def err_drv_invalid_stdlib_name : Error<
+ "invalid library name in argument '%0'">;
def err_drv_invalid_opt_with_multiple_archs : Error<
"option '%0' cannot be used with multiple -arch options">;
def err_drv_invalid_output_with_multiple_archs : Error<
@@ -49,6 +53,8 @@ def err_drv_no_ast_support : Error<
"'%0': unable to use AST files with this tool">;
def err_drv_clang_unsupported : Error<
"the clang compiler does not support '%0'">;
+def err_drv_clang_unsupported_per_platform : Error<
+ "the clang compiler does not support '%0' on this platform">;
def err_drv_clang_unsupported_opt_cxx_darwin_i386 : Error<
"the clang compiler does not support '%0' for C++ on Darwin/i386">;
def err_drv_command_failed : Error<
@@ -71,6 +77,10 @@ def err_drv_cc_print_options_failure : Error<
def err_drv_preamble_format : Error<
"incorrect format for -preamble-bytes=N,END">;
+def warn_c_kext : Warning<
+ "ignoring -fapple-kext which is valid for c++ and objective-c++ only">;
+def warn_drv_unsupported_option_argument : Warning<
+ "ignoring unsupported argument '%1' to option '%0'">;
def warn_drv_input_file_unused : Warning<
"%0: '%1' input unused when '%2' is present">;
def warn_drv_preprocessed_input_file_unused : Warning<
@@ -91,8 +101,6 @@ def warn_drv_assuming_mfloat_abi_is : Warning<
"unknown platform, assuming -mfloat-abi=%0">;
def warn_ignoring_ftabstop_value : Warning<
"ignoring invalid -ftabstop value '%0', using default value %1">;
-def warn_drv_missing_resource_library : Warning<
- "missing resource library '%0', link may fail">;
def warn_drv_conflicting_deployment_targets : Warning<
"conflicting deployment targets, both MACOSX_DEPLOYMENT_TARGET '%0' and IPHONEOS_DEPLOYMENT_TARGET '%1' are present in environment">;
def warn_drv_treating_input_as_cxx : Warning<
@@ -100,5 +108,7 @@ def warn_drv_treating_input_as_cxx : Warning<
InGroup<Deprecated>;
def warn_drv_objc_gc_unsupported : Warning<
"Objective-C garbage collection is not supported on this platform, ignoring '%0'">;
+def warn_drv_pch_not_first_include : Warning<
+ "precompiled header '%0' was ignored because '%1' is not first '-include'">;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index 7c74bf4..5f9f4a7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -18,7 +18,7 @@ def err_fe_invalid_ast_action : Error<"invalid action for AST input">,
DefaultFatal;
// Error generated by the backend.
def err_fe_inline_asm : Error<"%0">, CatInlineAsm;
-def note_fe_inline_asm_here : Note<"instantated into assembly here">;
+def note_fe_inline_asm_here : Note<"instantiated into assembly here">;
@@ -69,12 +69,16 @@ def err_fe_pch_file_modified : Error<
DefaultFatal;
def err_fe_unable_to_open_output : Error<
"unable to open output file '%0': '%1'">;
+def err_fe_unable_to_rename_temp : Error<
+ "unable to rename temporary '%0' to output file '%1': '%2'">;
def err_fe_unable_to_open_logfile : Error<
"unable to open logfile file '%0': '%1'">;
def err_fe_pth_file_has_no_source_header : Error<
"PTH file '%0' does not designate an original source header file for -include-pth">;
def warn_fe_macro_contains_embedded_newline : Warning<
"macro '%0' contains embedded newline, text after the newline is ignored.">;
+def warn_fe_cc_print_header_failure : Warning<
+ "unable to open CC_PRINT_HEADERS file: %0 (using stderr)">;
def err_verify_missing_start : Error<
"cannot find start ('{{') of expected %0">;
@@ -83,7 +87,8 @@ def err_verify_missing_end : Error<
def err_verify_invalid_content : Error<
"invalid expected %0: %1">;
def err_verify_inconsistent_diags : Error<
- "'%0' diagnostics %select{expected|seen}1 but not %select{seen|expected}1: %2">;
+ "'%0' diagnostics %select{expected|seen}1 but not %select{seen|expected}1: "
+ "%2">;
def note_fixit_applied : Note<"FIX-IT applied suggested code changes">;
def note_fixit_in_macro : Note<
@@ -127,6 +132,12 @@ def warn_pch_nonfragile_abi2 : Error<
"PCH file was compiled with the %select{32-bit|enhanced non-fragile}0 "
"Objective-C ABI but the %select{32-bit|enhanced non-fragile}1 "
"Objective-C ABI is selected">;
+def warn_pch_apple_kext : Error<
+ "PCH file was compiled %select{with|without}0 support for Apple's kernel "
+ "extensions ABI but it is currently %select{disabled|enabled}1">;
+def warn_pch_objc_auto_properties : Error<
+ "PCH file was compiled %select{with|without}0 support for auto-synthesized "
+ "@properties but it is currently %select{disabled|enabled}1">;
def warn_pch_no_constant_cfstrings : Error<
"Objctive-C NSstring generation support was %select{disabled|enabled}0 "
"in PCH file but currently %select{disabled|enabled}1">;
@@ -142,6 +153,9 @@ def warn_pch_gnu_keywords : Error<
def warn_pch_microsoft_extensions : Error<
"Microsoft extensions were %select{disabled|enabled}0 in PCH file but are "
"currently %select{disabled|enabled}1">;
+def warn_pch_ms_bitfields : Error<
+ "Microsoft-compatible structure layout was %select{disabled|enabled}0 in "
+ "PCH file but is currently %select{disabled|enabled}1">;
def warn_pch_heinous_extensions : Error<
"heinous extensions were %select{disabled|enabled}0 in PCH file but are "
"currently %select{disabled|enabled}1">;
@@ -154,6 +168,9 @@ def warn_pch_altivec : Error<
def warn_pch_opencl : Error<
"OpenCL language extensions were %select{disabled|enabled}0 in PCH file "
"but are currently %select{disabled|enabled}1">;
+def warn_pch_cuda : Error<
+ "CUDA language extensions were %select{disabled|enabled}0 in PCH file "
+ "but are currently %select{disabled|enabled}1">;
def warn_pch_elide_constructors : Error<
"Elidable copy constructors were %select{disabled|enabled}0 in PCH file "
"but are currently %select{disabled|enabled}1">;
@@ -163,6 +180,9 @@ def warn_pch_exceptions : Error<
def warn_pch_sjlj_exceptions : Error<
"sjlj-exceptions were %select{disabled|enabled}0 in PCH file but "
"are currently %select{disabled|enabled}1">;
+def warn_pch_objc_exceptions : Error<
+ "Objective-C exceptions were %select{disabled|enabled}0 in PCH file but "
+ "are currently %select{disabled|enabled}1">;
def warn_pch_objc_runtime : Error<
"PCH file was compiled with the %select{NeXT|GNU}0 runtime but the "
"%select{NeXT|GNU}1 runtime is selected">;
@@ -241,6 +261,9 @@ def warn_pch_char_signed : Error<
def warn_pch_short_wchar : Error<
"-fshort-wchar was %select{disabled|enabled}0 in the PCH file but "
"is currently %select{disabled|enabled}1">;
+def warn_pch_short_enums : Error<
+ "-fshort-enums was %select{disabled|enabled}0 in the PCH file but "
+ "is currently %select{disabled|enabled}1">;
def err_not_a_pch_file : Error<
"'%0' does not appear to be a precompiled header file">, DefaultFatal;
@@ -250,4 +273,7 @@ def warn_unknown_warning_option : Warning<
def warn_unknown_warning_specifier : Warning<
"unknown %0 warning specifier: '%1'">,
InGroup<DiagGroup<"unknown-warning-option"> >;
+
+def warn_unkwown_analyzer_checker : Warning<
+ "no analyzer checkers are associated with '%0'">;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
index d4b7f1f..d4377c9 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
@@ -35,9 +35,12 @@ def : DiagGroup<"declaration-after-statement">;
def GNUDesignator : DiagGroup<"gnu-designator">;
def DeprecatedDeclarations : DiagGroup<"deprecated-declarations">;
+def DeprecatedWritableStr : DiagGroup<"deprecated-writable-strings">;
def Deprecated : DiagGroup<"deprecated", [ DeprecatedDeclarations] >,
DiagCategory<"Deprecations">;
+def DeprecatedImplementations :DiagGroup<"deprecated-implementations">;
+
def : DiagGroup<"disabled-optimization">;
def : DiagGroup<"discard-qual">;
def : DiagGroup<"div-by-zero">;
@@ -54,6 +57,7 @@ def : DiagGroup<"effc++">;
def FourByteMultiChar : DiagGroup<"four-char-constants">;
def GlobalConstructors : DiagGroup<"global-constructors">;
def : DiagGroup<"idiomatic-parentheses">;
+def LogicalOpParentheses: DiagGroup<"logical-op-parentheses">;
def IgnoredQualifiers : DiagGroup<"ignored-qualifiers">;
def : DiagGroup<"import">;
def : DiagGroup<"init-self">;
@@ -74,17 +78,19 @@ def : DiagGroup<"newline-eof">;
def LongLong : DiagGroup<"long-long">;
def MismatchedTags : DiagGroup<"mismatched-tags">;
def MissingFieldInitializers : DiagGroup<"missing-field-initializers">;
+def NullDereference : DiagGroup<"null-dereference">;
def InitializerOverrides : DiagGroup<"initializer-overrides">;
def NonNull : DiagGroup<"nonnull">;
def : DiagGroup<"nonportable-cfstrings">;
-def : DiagGroup<"non-virtual-dtor">;
+def NonVirtualDtor : DiagGroup<"non-virtual-dtor">;
def : DiagGroup<"old-style-cast">;
def : DiagGroup<"old-style-definition">;
def OutOfLineDeclaration : DiagGroup<"out-of-line-declaration">;
def : DiagGroup<"overflow">;
def OverlengthStrings : DiagGroup<"overlength-strings">;
-def : DiagGroup<"overloaded-virtual">;
-def : DiagGroup<"packed">;
+def OverloadedVirtual : DiagGroup<"overloaded-virtual">;
+def Packed : DiagGroup<"packed">;
+def Padded : DiagGroup<"padded">;
def PointerArith : DiagGroup<"pointer-arith">;
def PoundWarning : DiagGroup<"#warnings">,
DiagCategory<"#warning Directive">;
@@ -92,6 +98,7 @@ def : DiagGroup<"pointer-to-int-cast">;
def : DiagGroup<"redundant-decls">;
def ReturnType : DiagGroup<"return-type">;
def BindToTemporaryCopy : DiagGroup<"bind-to-temporary-copy">;
+def SelfAssignment : DiagGroup<"self-assign">;
def SemiBeforeMethodBody : DiagGroup<"semicolon-before-method-body">;
def : DiagGroup<"sequence-point">;
def Shadow : DiagGroup<"shadow">;
@@ -101,6 +108,7 @@ def SignCompare : DiagGroup<"sign-compare">;
def : DiagGroup<"stack-protector">;
def : DiagGroup<"switch-default">;
def : DiagGroup<"synth">;
+def TautologicalCompare : DiagGroup<"tautological-compare">;
// Preprocessor warnings.
def : DiagGroup<"builtin-macro-redefined">;
@@ -140,9 +148,14 @@ def UnusedLabel : DiagGroup<"unused-label">;
def UnusedParameter : DiagGroup<"unused-parameter">;
def UnusedValue : DiagGroup<"unused-value">;
def UnusedVariable : DiagGroup<"unused-variable">;
+def UsedButMarkedUnused : DiagGroup<"used-but-marked-unused">;
def ReadOnlySetterAttrs : DiagGroup<"readonly-setter-attrs">;
def Reorder : DiagGroup<"reorder">;
def UndeclaredSelector : DiagGroup<"undeclared-selector">;
+def ImplicitAtomic : DiagGroup<"implicit-atomic-properties">;
+def CustomAtomic : DiagGroup<"custom-atomic-properties">;
+def AtomicProperties : DiagGroup<"atomic-properties",
+ [ImplicitAtomic, CustomAtomic]>;
def Selector : DiagGroup<"selector">;
def NonfragileAbi2 : DiagGroup<"nonfragile-abi2">;
def Protocol : DiagGroup<"protocol">;
@@ -154,17 +167,27 @@ def VLA : DiagGroup<"vla">;
def VolatileRegisterVar : DiagGroup<"volatile-register-var">;
def : DiagGroup<"write-strings">;
def CharSubscript : DiagGroup<"char-subscripts">;
+def LargeByValueCopy : DiagGroup<"large-by-value-copy">;
// Aggregation warning settings.
// -Widiomatic-parentheses contains warnings about 'idiomatic'
// missing parentheses; it is off by default.
-def Parentheses : DiagGroup<"parentheses", [DiagGroup<"idiomatic-parentheses">]>;
+def Parentheses : DiagGroup<"parentheses",
+ [LogicalOpParentheses,
+ DiagGroup<"idiomatic-parentheses">]>;
-// -Wconversion has its own warnings, but we split this one out for
-// legacy reasons.
+// -Wconversion has its own warnings, but we split a few out for
+// legacy reasons:
+// - some people want just 64-to-32 warnings
+// - conversion warnings with constant sources are on by default
+// - conversion warnings for literals are on by default
+// - bool-to-pointer conversion warnings are on by default
def Conversion : DiagGroup<"conversion",
- [DiagGroup<"shorten-64-to-32">, BoolConversions]>,
+ [DiagGroup<"shorten-64-to-32">,
+ DiagGroup<"constant-conversion">,
+ DiagGroup<"literal-conversion">,
+ BoolConversions]>,
DiagCategory<"Value Conversion Issue">;
def Unused : DiagGroup<"unused",
@@ -202,13 +225,15 @@ def Most : DiagGroup<"most", [
MultiChar,
Reorder,
ReturnType,
+ SelfAssignment,
Switch,
Trigraphs,
Uninitialized,
UnknownPragmas,
Unused,
VectorConversions,
- VolatileRegisterVar
+ VolatileRegisterVar,
+ OverloadedVirtual
]>;
// -Wall is -Wmost -Wparentheses
@@ -224,5 +249,12 @@ def : DiagGroup<"comments", [Comment]>; // -Wcomments = -Wcomment
def NonGCC : DiagGroup<"non-gcc",
[SignCompare, Conversion, LiteralRange]>;
+// A warning group for warnings about using C++0x features as extensions in
+// earlier C++ versions.
+def CXX0x : DiagGroup<"c++0x-extensions">;
+
// A warning group for warnings about GCC extensions.
def GNU : DiagGroup<"gnu", [GNUDesignator, VLA]>;
+
+// A warning group for warnings about Microsoft extensions.
+def Microsoft : DiagGroup<"microsoft">;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
new file mode 100644
index 0000000..b463805
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
@@ -0,0 +1,212 @@
+//===--- DiagnosticIDs.h - Diagnostic IDs Handling --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Diagnostic IDs-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DIAGNOSTICIDS_H
+#define LLVM_CLANG_DIAGNOSTICIDS_H
+
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+ class Diagnostic;
+ class SourceLocation;
+
+ // Import the diagnostic enums themselves.
+ namespace diag {
+ // Start position for diagnostics.
+ enum {
+ DIAG_START_DRIVER = 300,
+ DIAG_START_FRONTEND = DIAG_START_DRIVER + 100,
+ DIAG_START_LEX = DIAG_START_FRONTEND + 120,
+ DIAG_START_PARSE = DIAG_START_LEX + 300,
+ DIAG_START_AST = DIAG_START_PARSE + 300,
+ DIAG_START_SEMA = DIAG_START_AST + 100,
+ DIAG_START_ANALYSIS = DIAG_START_SEMA + 3000,
+ DIAG_UPPER_LIMIT = DIAG_START_ANALYSIS + 100
+ };
+
+ class CustomDiagInfo;
+
+ /// diag::kind - All of the diagnostics that can be emitted by the frontend.
+ typedef unsigned kind;
+
+ // Get typedefs for common diagnostics.
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,ACCESS,CATEGORY) ENUM,
+#include "clang/Basic/DiagnosticCommonKinds.inc"
+ NUM_BUILTIN_COMMON_DIAGNOSTICS
+#undef DIAG
+ };
+
+ /// Enum values that allow the client to map NOTEs, WARNINGs, and EXTENSIONs
+ /// to either MAP_IGNORE (nothing), MAP_WARNING (emit a warning), MAP_ERROR
+ /// (emit as an error). It allows clients to map errors to
+ /// MAP_ERROR/MAP_DEFAULT or MAP_FATAL (stop emitting diagnostics after this
+ /// one).
+ enum Mapping {
+ // NOTE: 0 means "uncomputed".
+ MAP_IGNORE = 1, //< Map this diagnostic to nothing, ignore it.
+ MAP_WARNING = 2, //< Map this diagnostic to a warning.
+ MAP_ERROR = 3, //< Map this diagnostic to an error.
+ MAP_FATAL = 4, //< Map this diagnostic to a fatal error.
+
+ /// Map this diagnostic to "warning", but make it immune to -Werror. This
+ /// happens when you specify -Wno-error=foo.
+ MAP_WARNING_NO_WERROR = 5,
+ /// Map this diagnostic to "error", but make it immune to -Wfatal-errors.
+ /// This happens for -Wno-fatal-errors=foo.
+ MAP_ERROR_NO_WFATAL = 6
+ };
+ }
+
+/// \brief Used for handling and querying diagnostic IDs. Can be used and shared
+/// by multiple Diagnostics for multiple translation units.
+class DiagnosticIDs : public llvm::RefCountedBase<DiagnosticIDs> {
+public:
+ /// Level - The level of the diagnostic, after it has been through mapping.
+ enum Level {
+ Ignored, Note, Warning, Error, Fatal
+ };
+
+private:
+ /// CustomDiagInfo - Information for uniquing and looking up custom diags.
+ diag::CustomDiagInfo *CustomDiagInfo;
+
+public:
+ DiagnosticIDs();
+ ~DiagnosticIDs();
+
+ /// getCustomDiagID - Return an ID for a diagnostic with the specified message
+ /// and level. If this is the first request for this diagnosic, it is
+ /// registered and created, otherwise the existing ID is returned.
+ unsigned getCustomDiagID(Level L, llvm::StringRef Message);
+
+ //===--------------------------------------------------------------------===//
+ // Diagnostic classification and reporting interfaces.
+ //
+
+ /// getDescription - Given a diagnostic ID, return a description of the
+ /// issue.
+ const char *getDescription(unsigned DiagID) const;
+
+ /// isNoteWarningOrExtension - Return true if the unmapped diagnostic
+ /// level of the specified diagnostic ID is a Warning or Extension.
+ /// This only works on builtin diagnostics, not custom ones, and is not legal to
+ /// call on NOTEs.
+ static bool isBuiltinWarningOrExtension(unsigned DiagID);
+
+ /// \brief Determine whether the given built-in diagnostic ID is a
+ /// Note.
+ static bool isBuiltinNote(unsigned DiagID);
+
+ /// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
+ /// ID is for an extension of some sort.
+ ///
+ static bool isBuiltinExtensionDiag(unsigned DiagID) {
+ bool ignored;
+ return isBuiltinExtensionDiag(DiagID, ignored);
+ }
+
+ /// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
+ /// ID is for an extension of some sort. This also returns EnabledByDefault,
+ /// which is set to indicate whether the diagnostic is ignored by default (in
+ /// which case -pedantic enables it) or treated as a warning/error by default.
+ ///
+ static bool isBuiltinExtensionDiag(unsigned DiagID, bool &EnabledByDefault);
+
+
+ /// getWarningOptionForDiag - Return the lowest-level warning option that
+ /// enables the specified diagnostic. If there is no -Wfoo flag that controls
+ /// the diagnostic, this returns null.
+ static const char *getWarningOptionForDiag(unsigned DiagID);
+
+ /// getWarningOptionForDiag - Return the category number that a specified
+ /// DiagID belongs to, or 0 if no category.
+ static unsigned getCategoryNumberForDiag(unsigned DiagID);
+
+ /// getCategoryNameFromID - Given a category ID, return the name of the
+ /// category.
+ static const char *getCategoryNameFromID(unsigned CategoryID);
+
+ /// \brief Enumeration describing how the the emission of a diagnostic should
+ /// be treated when it occurs during C++ template argument deduction.
+ enum SFINAEResponse {
+ /// \brief The diagnostic should not be reported, but it should cause
+ /// template argument deduction to fail.
+ ///
+ /// The vast majority of errors that occur during template argument
+ /// deduction fall into this category.
+ SFINAE_SubstitutionFailure,
+
+ /// \brief The diagnostic should be suppressed entirely.
+ ///
+ /// Warnings generally fall into this category.
+ SFINAE_Suppress,
+
+ /// \brief The diagnostic should be reported.
+ ///
+ /// The diagnostic should be reported. Various fatal errors (e.g.,
+ /// template instantiation depth exceeded) fall into this category.
+ SFINAE_Report,
+
+ /// \brief The diagnostic is an access-control diagnostic, which will be
+ /// substitution failures in some contexts and reported in others.
+ SFINAE_AccessControl
+ };
+
+ /// \brief Determines whether the given built-in diagnostic ID is
+ /// for an error that is suppressed if it occurs during C++ template
+ /// argument deduction.
+ ///
+ /// When an error is suppressed due to SFINAE, the template argument
+ /// deduction fails but no diagnostic is emitted. Certain classes of
+ /// errors, such as those errors that involve C++ access control,
+ /// are not SFINAE errors.
+ static SFINAEResponse getDiagnosticSFINAEResponse(unsigned DiagID);
+
+private:
+ /// setDiagnosticGroupMapping - Change an entire diagnostic group (e.g.
+ /// "unknown-pragmas" to have the specified mapping. This returns true and
+ /// ignores the request if "Group" was unknown, false otherwise.
+ bool setDiagnosticGroupMapping(const char *Group, diag::Mapping Map,
+ SourceLocation Loc, Diagnostic &Diag) const;
+
+ /// \brief Based on the way the client configured the Diagnostic
+ /// object, classify the specified diagnostic ID into a Level, consumable by
+ /// the DiagnosticClient.
+ ///
+ /// \param Loc The source location we are interested in finding out the
+ /// diagnostic state. Can be null in order to query the latest state.
+ DiagnosticIDs::Level getDiagnosticLevel(unsigned DiagID, SourceLocation Loc,
+ const Diagnostic &Diag) const;
+
+ /// getDiagnosticLevel - This is an internal implementation helper used when
+ /// DiagClass is already known.
+ DiagnosticIDs::Level getDiagnosticLevel(unsigned DiagID,
+ unsigned DiagClass,
+ SourceLocation Loc,
+ const Diagnostic &Diag) const;
+
+ /// ProcessDiag - This is the method used to report a diagnostic that is
+ /// finally fully formed.
+ ///
+ /// \returns true if the diagnostic was emitted, false if it was
+ /// suppressed.
+ bool ProcessDiag(Diagnostic &Diag) const;
+
+ friend class Diagnostic;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
index dcb05c8..6d1d9b6 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -45,8 +45,8 @@ def charize_microsoft_ext : Extension<"@# is a microsoft extension">;
def ext_token_used : Extension<"extension used">;
-def err_unterminated_string : Error<"missing terminating '\"' character">;
-def err_unterminated_char : Error<"missing terminating ' character">;
+def warn_unterminated_string : ExtWarn<"missing terminating '\"' character">;
+def warn_unterminated_char : ExtWarn<"missing terminating ' character">;
def err_empty_character : Error<"empty character constant">;
def err_unterminated_block_comment : Error<"unterminated /* comment">;
def err_invalid_character_to_charify : Error<
@@ -98,6 +98,10 @@ def warn_hex_escape_too_large : ExtWarn<"hex escape sequence out of range">;
def ext_string_too_long : Extension<"string literal of length %0 exceeds "
"maximum length %1 that %select{C90|ISO C99|C++}2 compilers are required to "
"support">, InGroup<OverlengthStrings>;
+def warn_ucn_escape_too_large : ExtWarn<
+ "character unicode escape sequence too long for its type">;
+def warn_ucn_not_valid_in_c89 : ExtWarn<
+ "unicode escape sequences are only valid in C99 or C++">;
//===----------------------------------------------------------------------===//
// PTH Diagnostics
@@ -240,11 +244,11 @@ def warn_pragma_ignored : Warning<"unknown pragma ignored">,
InGroup<UnknownPragmas>, DefaultIgnore;
def ext_stdc_pragma_ignored : ExtWarn<"unknown pragma in STDC namespace">,
InGroup<UnknownPragmas>;
-def ext_stdc_pragma_syntax :
+def ext_on_off_switch_syntax :
ExtWarn<"expected 'ON' or 'OFF' or 'DEFAULT' in pragma">,
InGroup<UnknownPragmas>;
-def ext_stdc_pragma_syntax_eom :
- ExtWarn<"expected end of macro in STDC pragma">,
+def ext_pragma_syntax_eom :
+ ExtWarn<"expected end of macro in pragma">,
InGroup<UnknownPragmas>;
def warn_stdc_fenv_access_not_supported :
Warning<"pragma STDC FENV_ACCESS ON is not supported, ignoring pragma">,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
index 646fd0d..9d7ec9d 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -55,11 +55,15 @@ def ext_c99_compound_literal : Extension<
def ext_enumerator_list_comma : Extension<
"commas at the end of enumerator lists are a %select{C99|C++0x}0-specific "
"feature">;
+def err_enumerator_list_missing_comma : Error<
+ "missing ',' between enumerators">;
def ext_gnu_indirect_goto : Extension<
"use of GNU indirect-goto extension">, InGroup<GNU>;
def ext_gnu_address_of_label : Extension<
"use of GNU address-of-label extension">, InGroup<GNU>;
+def ext_gnu_local_label : Extension<
+ "use of GNU locally declared label extension">, InGroup<GNU>;
def ext_gnu_statement_expr : Extension<
"use of GNU statement expression extension">, InGroup<GNU>;
def ext_gnu_conditional_expr : Extension<
@@ -93,6 +97,7 @@ def err_expected_lsquare : Error<"expected '['">;
def err_expected_rsquare : Error<"expected ']'">;
def err_expected_rbrace : Error<"expected '}'">;
def err_expected_greater : Error<"expected '>'">;
+def err_expected_ggg : Error<"expected '>>>'">;
def err_expected_semi_declaration : Error<
"expected ';' at end of declaration">;
def err_expected_semi_decl_list : Error<
@@ -110,6 +115,8 @@ def err_expected_fn_body : Error<
def err_expected_method_body : Error<"expected method body">;
def err_invalid_token_after_toplevel_declarator : Error<
"expected ';' after top level declarator">;
+def err_invalid_equalequal_after_declarator : Error<
+ "invalid '==' at end of declaration; did you mean '='?">;
def err_expected_statement : Error<"expected statement">;
def err_expected_lparen_after : Error<"expected '(' after '%0'">;
def err_expected_lparen_after_id : Error<"expected '(' after %0">;
@@ -122,6 +129,8 @@ def err_expected_while : Error<"expected 'while' in do/while loop">;
def err_expected_semi_after : Error<"expected ';' after %0">;
def err_expected_semi_after_stmt : Error<"expected ';' after %0 statement">;
def err_expected_semi_after_expr : Error<"expected ';' after expression">;
+def err_extraneous_token_before_semi : Error<"extraneous '%0' before ';'">;
+
def err_expected_semi_after_method_proto : Error<
"expected ';' after method prototype">;
def err_expected_semi_after_namespace_name : Error<
@@ -139,6 +148,8 @@ def err_expected_semi_for : Error<"expected ';' in 'for' statement specifier">;
def err_expected_colon_after : Error<"expected ':' after %0">;
def err_label_end_of_compound_statement : Error<
"label at end of compound statement: expected statement">;
+def err_address_of_label_outside_fn : Error<
+ "use of address-of-label extension outside of a function body">;
def err_expected_string_literal : Error<"expected string literal">;
def err_expected_asm_operand : Error<
"expected string literal or '[' for asm operand">, CatInlineAsm;
@@ -152,10 +163,12 @@ def err_invalid_reference_qualifier_application : Error<
"'%0' qualifier may not be applied to a reference">;
def err_illegal_decl_reference_to_reference : Error<
"%0 declared as a reference to a reference">;
-def err_rvalue_reference : Error<
- "rvalue references are only allowed in C++0x">;
-def ext_inline_namespace : Extension<
- "inline namespaces are a C++0x feature">;
+def ext_rvalue_reference : ExtWarn<
+ "rvalue references are a C++0x extension">, InGroup<CXX0x>;
+def ext_ref_qualifier : ExtWarn<
+ "reference qualifiers on functions are a C++0x extension">, InGroup<CXX0x>;
+def ext_inline_namespace : ExtWarn<
+ "inline namespaces are a C++0x feature">, InGroup<CXX0x>;
def err_argument_required_after_attribute : Error<
"argument required after attribute">;
def err_missing_param : Error<"expected parameter declarator">;
@@ -194,10 +207,16 @@ def err_unknown_typename : Error<
"unknown type name %0">;
def err_use_of_tag_name_without_tag : Error<
"must use '%1' tag to refer to type %0%select{| in this scope}2">;
+def err_templated_using_directive : Error<
+ "cannot template a using directive">;
+def err_templated_using_declaration : Error<
+ "cannot template a using declaration">;
def err_expected_ident_in_using : Error<
"expected an identifier in using directive">;
def err_unexected_colon_in_nested_name_spec : Error<
"unexpected ':' in nested name specifier">;
+def err_bool_redeclaration : Error<
+ "redeclaration of C++ built-in type 'bool'">;
/// Objective-C parser diagnostics
def err_expected_minus_or_plus : Error<
@@ -214,13 +233,17 @@ def err_illegal_super_cast : Error<
def err_objc_illegal_visibility_spec : Error<
"illegal visibility specification">;
def err_objc_illegal_interface_qual : Error<"illegal interface qualifier">;
-def err_objc_expected_equal : Error<
- "setter/getter expects '=' followed by name">;
+def err_objc_expected_equal_for_getter : Error<
+ "expected '=' for Objective-C getter">;
+def err_objc_expected_equal_for_setter : Error<
+ "expected '=' for Objective-C setter">;
+def err_objc_expected_selector_for_getter_setter : Error<
+ "expected selector for Objective-C %select{setter|getter}0">;
def err_objc_property_requires_field_name : Error<
"property requires fields to be named">;
def err_objc_property_bitfield : Error<"property name cannot be a bitfield">;
def err_objc_expected_property_attr : Error<"unknown property attribute %0">;
-def err_objc_propertoes_require_objc2 : Error<
+def err_objc_properties_require_objc2 : Error<
"properties are an Objective-C 2 feature">;
def err_objc_unexpected_attr : Error<
"prefix attribute must be followed by an interface or protocol">;
@@ -283,8 +306,6 @@ def err_default_arg_unparsed : Error<
def err_dup_virtual : Error<"duplicate 'virtual' in base specifier">;
// C++ operator overloading
-def err_operator_missing_type_specifier : Error<
- "missing type specifier after 'operator'">;
def err_operator_string_not_empty : Error<
"string literal after 'operator' must be '\"\"'">;
@@ -334,6 +355,10 @@ def err_enum_template : Error<"enumeration cannot be a template">;
def err_missing_dependent_template_keyword : Error<
"use 'template' keyword to treat '%0' as a dependent template name">;
+def warn_static_inline_explicit_inst_ignored : Warning<
+ "ignoring '%select{static|inline}0' keyword on explicit template "
+ "instantiation">;
+
// Constructor template diagnostics.
def err_out_of_line_constructor_template_id : Error<
"out-of-line constructor for %0 cannot have template arguments">;
@@ -356,18 +381,42 @@ def err_expected_type_name_after_typename : Error<
def err_explicit_spec_non_template : Error<
"explicit %select{specialization|instantiation}0 of non-template "
"%select{class|struct|union}1 %2">;
-
-def err_variadic_templates : Error<
- "variadic templates are only allowed in C++0x">;
def err_default_template_template_parameter_not_template : Error<
"default template argument for a template template parameter must be a class "
"template">;
+def err_ctor_init_missing_comma : Error<
+ "missing ',' between base or member initializers">;
+
// C++ declarations
def err_friend_decl_defines_class : Error<
"cannot define a type in a friend declaration">;
-
+
+def warn_deleted_function_accepted_as_extension: ExtWarn<
+ "deleted function definition accepted as a C++0x extension">, InGroup<CXX0x>;
+
+// C++0x override control
+def ext_override_control_keyword : Extension<
+ "'%0' keyword accepted as a C++0x extension">, InGroup<CXX0x>;
+def ext_override_inline: Extension<
+ "'%0' keyword only allowed in declarations, allowed as an extension">;
+
+def err_duplicate_virt_specifier : Error<
+ "class member already marked '%0'">;
+def err_duplicate_class_virt_specifier : Error<
+ "class already marked '%0'">;
+
+def err_scoped_enum_missing_identifier : Error<
+ "scoped enumeration requires a name">;
+
+def err_expected_parameter_pack : Error<
+ "expected the name of a parameter pack">;
+def err_paren_sizeof_parameter_pack : Error<
+ "missing parentheses around the size of parameter pack %0">;
+def err_sizeof_parameter_pack : Error<
+ "expected parenthesized parameter pack name in 'sizeof...' expression">;
+
// Language specific pragmas
// - Generic warnings
def warn_pragma_expected_lparen : Warning<
@@ -398,5 +447,17 @@ def warn_pragma_unused_expected_var : Warning<
def warn_pragma_unused_expected_punc : Warning<
"expected ')' or ',' in '#pragma unused'">;
+// OpenCL Section 6.8.g
+def err_not_opencl_storage_class_specifier : Error<
+ "OpenCL does not support the '%0' storage class specifier">;
+
+// OpenCL EXTENSION pragma (OpenCL 1.1 [9.1])
+def warn_pragma_expected_colon : Warning<
+ "missing ':' after %0 - ignoring">;
+def warn_pragma_expected_enable_disable : Warning<
+ "expected 'enable' or 'disable' - ignoring">;
+def warn_pragma_unknown_extension : Warning<
+ "unknown OpenCL extension %0 - ignoring">;
+
} // end of Parse Issue category.
} // end of Parser diagnostics
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
index a25b2a3..2e7f274 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -46,14 +46,18 @@ def err_vla_decl_has_static_storage : Error<
"variable length array declaration can not have 'static' storage duration">;
def err_vla_decl_has_extern_linkage : Error<
"variable length array declaration can not have 'extern' linkage">;
-
+
// C99 variably modified types
def err_variably_modified_template_arg : Error<
"variably modified type %0 cannot be used as a template argument">;
def err_variably_modified_nontype_template_param : Error<
"non-type template parameter of variably modified type %0">;
+def err_variably_modified_new_type : Error<
+ "'new' cannot allocate object of variably modified type %0">;
// C99 Designated Initializers
+def ext_designated_init : Extension<
+ "designated initializers are a C99 feature, accepted in C++ as an extension">;
def err_array_designator_negative : Error<
"array designator value '%0' is negative">;
def err_array_designator_empty_range : Error<
@@ -93,6 +97,8 @@ def ext_anon_param_requires_type_specifier : Extension<
"type specifier required for unnamed parameter, defaults to int">;
def err_bad_variable_name : Error<
"'%0' cannot be the name of a variable or data member">;
+def err_bad_parameter_name : Error<
+ "'%0' cannot be the name of a parameter">;
def err_parameter_name_omitted : Error<"parameter name omitted">;
def warn_unused_parameter : Warning<"unused parameter %0">,
InGroup<UnusedParameter>, DefaultIgnore;
@@ -106,7 +112,16 @@ def warn_unused_function : Warning<"unused function %0">,
InGroup<UnusedFunction>, DefaultIgnore;
def warn_unused_member_function : Warning<"unused member function %0">,
InGroup<UnusedMemberFunction>, DefaultIgnore;
-
+def warn_used_but_marked_unused: Warning<"%0 was marked unused but was used">,
+ InGroup<UsedButMarkedUnused>, DefaultIgnore;
+
+def warn_parameter_size: Warning<
+ "%0 is a large (%1 bytes) pass-by-value argument; "
+ "pass it by reference instead ?">, InGroup<LargeByValueCopy>;
+def warn_return_value_size: Warning<
+ "return value of %0 is a large (%1 bytes) pass-by-value object; "
+ "pass it by reference instead ?">, InGroup<LargeByValueCopy>;
+
def warn_implicit_function_decl : Warning<
"implicit declaration of function %0">,
InGroup<ImplicitFunctionDeclare>, DefaultIgnore;
@@ -147,6 +162,17 @@ def err_using_decl_nested_name_specifier_is_current_class : Error<
"using declaration refers to its own class">;
def err_using_decl_nested_name_specifier_is_not_base_class : Error<
"using declaration refers into '%0', which is not a base class of %1">;
+def err_using_decl_constructor_not_in_direct_base : Error<
+ "%0 is not a direct base of %1, can not inherit constructors">;
+def err_using_decl_constructor_conflict : Error<
+ "can not inherit constructor, already inherited constructor with "
+ "the same signature">;
+def note_using_decl_constructor_conflict_current_ctor : Note<
+ "conflicting constructor">;
+def note_using_decl_constructor_conflict_previous_ctor : Note<
+ "previous constructor">;
+def note_using_decl_constructor_conflict_previous_using : Note<
+ "previously inherited here">;
def err_using_decl_can_not_refer_to_class_member : Error<
"using declaration can not refer to class member">;
def err_using_decl_can_not_refer_to_namespace : Error<
@@ -210,10 +236,12 @@ def note_please_include_header : Note<
"please include the header <%0> or explicitly provide a "
"declaration for '%1'">;
def note_previous_builtin_declaration : Note<"%0 is a builtin with type %1">;
-def err_implicit_decl_requires_stdio : Error<
- "implicit declaration of '%0' requires inclusion of the header <stdio.h>">;
-def err_implicit_decl_requires_setjmp : Error<
- "implicit declaration of '%0' requires inclusion of the header <setjmp.h>">;
+def warn_implicit_decl_requires_stdio : Warning<
+ "declaration of built-in function '%0' requires inclusion of the header "
+ "<stdio.h>">;
+def warn_implicit_decl_requires_setjmp : Warning<
+ "declaration of built-in function '%0' requires inclusion of the header "
+ "<setjmp.h>">;
def warn_redecl_library_builtin : Warning<
"incompatible redeclaration of library function %0">;
def err_builtin_definition : Error<"definition of builtin function %0">;
@@ -227,6 +255,7 @@ def warn_unusual_main_decl : Warning<"'main' should not be declared "
"%select{static|inline|static or inline}0">;
def err_unusual_main_decl : Error<"'main' is not allowed to be declared "
"%select{static|inline|static or inline}0">;
+def err_main_template_decl : Error<"'main' cannot be a template">;
def err_main_returns_nonint : Error<"'main' must return 'int'">;
def err_main_surplus_args : Error<"too many parameters (%0) for 'main': "
"must be 0, 2, or 3">;
@@ -261,8 +290,8 @@ def warn_pragma_pack_pop_failed : Warning<"#pragma pack(pop, ...) failed: %0">;
def warn_pragma_unused_undeclared_var : Warning<
"undeclared variable %0 used as an argument for '#pragma unused'">;
-def warn_pragma_unused_expected_localvar : Warning<
- "only local variables can be arguments to '#pragma unused'">;
+def warn_pragma_unused_expected_var_arg : Warning<
+ "only variables can be arguments to '#pragma unused'">;
def err_unsupported_pragma_weak : Error<
"using '#pragma weak' to refer to an undeclared identifier is not yet supported">;
@@ -314,16 +343,23 @@ def err_conflicting_ivar_name : Error<
"conflicting instance variable names: %0 vs %1">;
def err_inconsistant_ivar_count : Error<
"inconsistent number of instance variables specified">;
-def warn_incomplete_impl : Warning<"incomplete implementation">;
+def warn_incomplete_impl : Warning<"incomplete implementation">,
+ InGroup<DiagGroup<"incomplete-implementation">>;
def note_undef_method_impl : Note<"method definition for %0 not found">;
def note_required_for_protocol_at :
Note<"required for direct or indirect protocol %0">;
def warn_conflicting_ret_types : Warning<
"conflicting return type in implementation of %0: %1 vs %2">;
+def warn_non_covariant_ret_types : Warning<
+ "conflicting return type in implementation of %0: %1 vs %2">,
+ InGroup<DiagGroup<"method-signatures">>, DefaultIgnore;
def warn_conflicting_param_types : Warning<
"conflicting parameter types in implementation of %0: %1 vs %2">;
+def warn_non_contravariant_param_types : Warning<
+ "conflicting parameter types in implementation of %0: %1 vs %2">,
+ InGroup<DiagGroup<"method-signatures">>, DefaultIgnore;
def warn_conflicting_variadic :Warning<
"conflicting variadic declaration of method and its implementation">;
@@ -337,6 +373,7 @@ def warn_strict_multiple_method_decl : Warning<
def warn_accessor_property_type_mismatch : Warning<
"type of property %0 does not match type of accessor %1">;
def note_declared_at : Note<"declared here">;
+def note_method_declared_at : Note<"method declared here">;
def err_setter_type_void : Error<"type of setter must be void">;
def err_duplicate_method_decl : Error<"duplicate declaration of method %0">;
def warn_missing_atend : Warning<"'@end' is missing in implementation context">;
@@ -361,9 +398,17 @@ def warn_objc_property_copy_missing_on_block : Warning<
def warn_atomic_property_rule : Warning<
"writable atomic property %0 cannot pair a synthesized setter/getter "
"with a user defined setter/getter">;
+def warn_default_atomic_custom_getter_setter : Warning<
+ "atomic by default property %0 has a user defined %select{getter|setter}1 "
+ "(property should be marked 'atomic' if this is intended)">,
+ InGroup<CustomAtomic>, DefaultIgnore;
def err_use_continuation_class : Error<
- "illegal declaration of property in continuation class %0"
- ": attribute must be readwrite, while its primary must be readonly">;
+ "illegal redeclaration of property in continuation class %0"
+ " (attribute must be 'readwrite', while its primary must be 'readonly')">;
+def err_use_continuation_class_redeclaration_readwrite : Error<
+ "illegal redeclaration of 'readwrite' property in continuation class %0"
+ " (perhaps you intended this to be a 'readwrite' redeclaration of a "
+ "'readonly' public property?)">;
def err_continuation_class : Error<"continuation class has no primary class">;
def err_property_type : Error<"property cannot have array or function type %0">;
def error_missing_property_context : Error<
@@ -400,7 +445,7 @@ def error_ivar_in_superclass_use : Error<
def error_weak_property : Error<
"existing ivar %1 for __weak property %0 must be __weak">;
def error_strong_property : Error<
- "existing ivar %1 for a __strong property %0 must be garbage collectable">;
+ "property %0 must be declared __weak to match existing ivar %1 with __weak attribute">;
def error_dynamic_property_ivar_decl : Error<
"dynamic property can not have ivar specification">;
def error_duplicate_ivar_use : Error<
@@ -411,6 +456,11 @@ def warn_objc_property_attr_mutually_exclusive : Warning<
InGroup<ReadOnlySetterAttrs>, DefaultIgnore;
def warn_undeclared_selector : Warning<
"undeclared selector %0">, InGroup<UndeclaredSelector>, DefaultIgnore;
+def warn_implicit_atomic_property : Warning<
+ "property is assumed atomic by default">, InGroup<ImplicitAtomic>, DefaultIgnore;
+def warn_auto_implicit_atomic_property : Warning<
+ "property is assumed atomic when auto-synthesizing the property">,
+ InGroup<ImplicitAtomic>, DefaultIgnore;
def warn_unimplemented_selector: Warning<
"unimplemented selector %0">, InGroup<Selector>, DefaultIgnore;
def warn_unimplemented_protocol_method : Warning<
@@ -442,11 +492,15 @@ def err_introducing_special_friend : Error<
"destructor|conversion operator}0 as a friend">;
def err_tagless_friend_type_template : Error<
"friend type templates must use an elaborated type">;
+def err_no_matching_local_friend : Error<
+ "no matching function found in local scope">;
+def err_partial_specialization_friend : Error<
+ "partial specialization cannot be declared as a friend">;
def err_abstract_type_in_decl : Error<
"%select{return|parameter|variable|field}0 type %1 is an abstract class">;
def err_allocation_of_abstract_type : Error<
- "allocation of an object of abstract type %0">;
+ "allocating an object of abstract class type %0">;
def err_throw_abstract_type : Error<
"cannot throw an object of abstract type %0">;
def err_array_of_abstract_type : Error<"array of abstract class type %0">;
@@ -463,7 +517,7 @@ def err_type_defined_in_param_type : Error<
"%0 can not be defined in a parameter type">;
def note_pure_virtual_function : Note<
- "pure virtual function %0">;
+ "unimplemented pure virtual method %0 in %1">;
def err_deleted_non_function : Error<
"only functions can have deleted definitions">;
@@ -503,65 +557,70 @@ def warn_missing_exception_specification : Warning<
def err_class_redeclared_with_different_access : Error<
"%0 redeclared with '%1' access">;
def err_access : Error<
- "%1 is a %select{private|protected}0 member of %3">, NoSFINAE;
+ "%1 is a %select{private|protected}0 member of %3">, AccessControl;
def err_access_ctor : Error<
- "calling a %select{private|protected}0 constructor of class %2">, NoSFINAE;
+ "calling a %select{private|protected}0 constructor of class %2">,
+ AccessControl;
def ext_rvalue_to_reference_access_ctor : ExtWarn<
"C++98 requires an accessible copy constructor for class %2 when binding "
"a reference to a temporary; was %select{private|protected}0">,
- NoSFINAE, InGroup<BindToTemporaryCopy>;
+ AccessControl, InGroup<BindToTemporaryCopy>;
def err_access_base : Error<
"%select{base class|inherited virtual base class}0 %1 has %select{private|"
"protected}3 %select{constructor|copy constructor|copy assignment operator|"
- "destructor}2">, NoSFINAE;
+ "destructor}2">, AccessControl;
def err_access_field: Error<
"field of type %0 has %select{private|protected}2 %select{constructor|copy "
- "constructor|copy assignment operator|destructor}1">, NoSFINAE;
+ "constructor|copy assignment operator|destructor}1">, AccessControl;
def err_access_ctor_field :
Error<"field of type %1 has %select{private|protected}2 constructor">,
- NoSFINAE;
+ AccessControl;
+def err_access_dtor : Error<
+ "calling a %select{private|protected}1 destructor of class %0">,
+ AccessControl;
def err_access_dtor_base :
Error<"base class %0 has %select{private|protected}1 destructor">,
- NoSFINAE;
+ AccessControl;
def err_access_dtor_vbase :
Error<"inherited virtual base class %0 has "
"%select{private|protected}1 destructor">,
- NoSFINAE;
+ AccessControl;
def err_access_dtor_temp :
Error<"temporary of type %0 has %select{private|protected}1 destructor">,
- NoSFINAE;
+ AccessControl;
def err_access_dtor_exception :
Error<"exception object of type %0 has %select{private|protected}1 "
- "destructor">, NoSFINAE;
+ "destructor">, AccessControl;
def err_access_dtor_field :
Error<"field of type %1 has %select{private|protected}2 destructor">,
- NoSFINAE;
+ AccessControl;
def err_access_dtor_var :
Error<"variable of type %1 has %select{private|protected}2 destructor">,
- NoSFINAE;
+ AccessControl;
def err_access_assign_field :
Error<"field of type %1 has %select{private|protected}2 copy assignment"
" operator">,
- NoSFINAE;
+ AccessControl;
def err_access_assign_base :
Error<"base class %0 has %select{private|protected}1 copy assignment"
" operator">,
- NoSFINAE;
+ AccessControl;
def err_access_copy_field :
Error<"field of type %1 has %select{private|protected}2 copy constructor">,
- NoSFINAE;
+ AccessControl;
def err_access_copy_base :
Error<"base class %0 has %select{private|protected}1 copy constructor">,
- NoSFINAE;
+ AccessControl;
def err_access_dtor_ivar :
Error<"instance variable of type %0 has %select{private|protected}1 "
"destructor">,
- NoSFINAE;
+ AccessControl;
def note_previous_access_declaration : Note<
"previously declared '%1' here">;
def err_access_outside_class : Error<
- "access to %select{private|protected}0 member outside any class context">;
+ "access to %select{private|protected}0 member outside any class context">,
+ AccessControl;
def note_access_natural : Note<
"%select{|implicitly }1declared %select{private|protected}0 here">;
def note_access_constrained_by_path : Note<
@@ -604,6 +663,10 @@ def err_virtual_non_function : Error<
"'virtual' can only appear on non-static member functions">;
def err_virtual_out_of_class : Error<
"'virtual' can only be specified inside the class definition">;
+def err_virtual_member_function_template : Error<
+ "'virtual' can not be specified on member function templates">;
+def err_static_overrides_virtual : Error<
+ "'static' member function %0 overrides a virtual function in a base class">;
def err_explicit_non_function : Error<
"'explicit' can only appear on non-static member functions">;
def err_explicit_out_of_class : Error<
@@ -619,7 +682,7 @@ def err_not_integral_type_bitfield : Error<
def err_not_integral_type_anon_bitfield : Error<
"anonymous bit-field has non-integral type %0">;
def err_member_initialization : Error<
- "%0 can only be initialized if it is a static const integral data member">;
+ "fields can only be initialized in constructors">;
def err_member_function_initialization : Error<
"initializer on function does not look like a pure-specifier">;
def err_non_virtual_pure : Error<
@@ -652,6 +715,10 @@ def note_nontrivial_has_nontrivial : Note<
def note_nontrivial_user_defined : Note<
"because type %0 has a user-declared %select{constructor|copy constructor|"
"copy assignment operator|destructor}1">;
+def err_static_data_member_not_allowed_in_union_or_anon_struct : Error<
+ "static data member %0 not allowed in %select{anonymous struct|union}1">;
+def err_union_member_of_reference_type : Error<
+ "union member %0 has reference type %1">;
def err_different_return_type_for_overriding_virtual_function : Error<
"virtual function %0 has a different return type (%1) than the "
@@ -661,7 +728,7 @@ def note_overridden_virtual_function : Note<
def err_covariant_return_inaccessible_base : Error<
"invalid covariant return for virtual function: %1 is a "
- "%select{private|protected}2 base class of %0">, NoSFINAE;
+ "%select{private|protected}2 base class of %0">, AccessControl;
def err_covariant_return_ambiguous_derived_to_base_conv : Error<
"return type of virtual function %3 is not covariant with the return type of "
"the function it overrides (ambiguous conversion from derived class "
@@ -684,6 +751,9 @@ def err_covariant_return_type_class_type_more_qualified : Error<
def err_constructor_cannot_be : Error<"constructor cannot be declared '%0'">;
def err_invalid_qualified_constructor : Error<
"'%0' qualifier is not allowed on a constructor">;
+def err_ref_qualifier_constructor : Error<
+ "ref-qualifier '%select{&&|&}0' is not allowed on a constructor">;
+
def err_constructor_return_type : Error<
"constructor cannot have a return type">;
def err_constructor_redeclared : Error<"constructor cannot be redeclared">;
@@ -694,6 +764,8 @@ def warn_no_constructor_for_refconst : Warning<
"initialize its non-modifiable members">;
def note_refconst_member_not_initialized : Note<
"%select{const|reference}0 member %1 will never be initialized">;
+def ext_ms_explicit_constructor_call : ExtWarn<
+ "explicit constructor calls are a Microsoft extension">, InGroup<Microsoft>;
// C++ destructors
def err_destructor_not_member : Error<
@@ -701,6 +773,8 @@ def err_destructor_not_member : Error<
def err_destructor_cannot_be : Error<"destructor cannot be declared '%0'">;
def err_invalid_qualified_destructor : Error<
"'%0' qualifier is not allowed on a destructor">;
+def err_ref_qualifier_destructor : Error<
+ "ref-qualifier '%select{&&|&}0' is not allowed on a destructor">;
def err_destructor_return_type : Error<"destructor cannot have a return type">;
def err_destructor_redeclared : Error<"destructor cannot be redeclared">;
def err_destructor_with_params : Error<"destructor cannot have any parameters">;
@@ -721,7 +795,8 @@ def err_init_conversion_failed : Error<
"base class|a vector element}0 of type %1 with an %select{rvalue|lvalue}2 of "
"type %3">;
-def err_lvalue_to_rvalue_ref : Error<"rvalue reference cannot bind to lvalue">;
+def err_lvalue_to_rvalue_ref : Error<"rvalue reference to type %0 cannot bind "
+ "to lvalue of type %1">;
def err_invalid_initialization : Error<
"invalid initialization of reference of type %0 from expression of type %1">;
def err_lvalue_to_rvalue_ambig_ref : Error<"rvalue reference cannot bind to lvalue "
@@ -745,6 +820,10 @@ def err_reference_bind_init_list : Error<
def err_init_list_bad_dest_type : Error<
"%select{|non-aggregate }0type %1 cannot be initialized with an initializer "
"list">;
+def err_member_function_call_bad_cvr : Error<"member function %0 not viable: "
+ "'this' argument has type %1, but function is not marked "
+ "%select{const|restrict|const or restrict|volatile|const or volatile|"
+ "volatile or restrict|const, volatile, or restrict}2">;
def err_reference_init_drops_quals : Error<
"initialization of reference to type %0 with a %select{value|temporary}1 of type %2 drops "
@@ -768,7 +847,16 @@ def err_init_reference_member_uninitialized : Error<
def note_uninit_reference_member : Note<
"uninitialized reference member is here">;
def warn_field_is_uninit : Warning<"field is uninitialized when used here">,
- InGroup<DiagGroup<"uninitialized">>;
+ InGroup<Uninitialized>;
+def warn_uninit_var : Warning<"variable %0 is possibly uninitialized when used here">,
+ InGroup<DiagGroup<"uninitialized-experimental">>, DefaultIgnore;
+def note_uninit_var_def : Note<
+ "variable %0 is declared here">;
+def warn_uninit_var_captured_by_block : Warning<
+ "variable %0 is possibly uninitialized when captured by block">,
+ InGroup<DiagGroup<"uninitialized-experimental">>, DefaultIgnore;
+def note_var_fixit_add_initialization : Note<
+ "add initialization to silence this warning">;
def err_init_incomplete_type : Error<"initialization of incomplete type %0">;
def err_temp_copy_no_viable : Error<
@@ -798,20 +886,46 @@ def err_temp_copy_incomplete : Error<
// C++0x decltype
def err_cannot_determine_declared_type_of_overloaded_function : Error<
- "cannot determine the %select{type|declared type}0 of an overloaded "
- "function">;
+ "cannot determine the type of an overloaded function">;
// C++0x auto
def err_auto_variable_cannot_appear_in_own_initializer : Error<
"variable %0 declared with 'auto' type cannot appear in its own initializer">;
def err_illegal_decl_array_of_auto : Error<
- "'%0' declared as array of 'auto'">;
+ "'%0' declared as array of %1">;
+def err_new_array_of_auto : Error<
+ "cannot allocate array of 'auto'">;
def err_auto_not_allowed : Error<
- "'auto' not allowed in %select{function prototype|struct member|union member"
- "|class member|exception declaration|template parameter|block literal}0">;
+ "'auto' not allowed %select{in function prototype|in struct member"
+ "|in union member|in class member|in exception declaration"
+ "|in template parameter|in block literal|in template argument|here}0">;
def err_auto_var_requires_init : Error<
"declaration of variable %0 with type %1 requires an initializer">;
-
+def err_auto_new_requires_ctor_arg : Error<
+ "new expression for type %0 requires a constructor argument">;
+def err_auto_var_init_multiple_expressions : Error<
+ "initializer for variable %0 with type %1 contains multiple expressions">;
+def err_auto_new_ctor_multiple_expressions : Error<
+ "new expression for type %0 contains multiple constructor arguments">;
+def err_auto_missing_trailing_return : Error<
+ "'auto' return without trailing return type">;
+def err_trailing_return_without_auto : Error<
+ "function with trailing return type must specify return type 'auto', not %0">;
+def err_auto_var_deduction_failure : Error<
+ "variable %0 with type %1 has incompatible initializer of type %2">;
+def err_auto_new_deduction_failure : Error<
+ "new expression for type %0 has incompatible constructor argument of type %1">;
+def err_auto_different_deductions : Error<
+ "'auto' deduced as %0 in declaration of %1 and deduced as %2 in declaration of %3">;
+
+// C++0x override control
+def override_keyword_only_allowed_on_virtual_member_functions : Error<
+ "only virtual member functions can be marked '%0'">;
+def err_function_marked_override_not_overriding : Error<
+ "%0 marked 'override' but does not override any member functions">;
+def err_class_marked_final_used_as_base : Error<
+ "base %0 is marked 'final'">;
+
// C++0x attributes
def err_repeat_attribute : Error<"'%0' attribute cannot be repeated">;
@@ -820,6 +934,32 @@ def err_final_function_overridden : Error<
"declaration of %0 overrides a 'final' function">;
def err_final_base : Error<
"derivation from 'final' %0">;
+
+def err_function_overriding_without_override : Error<
+ "%0 overrides function%s1 without being marked 'override'">;
+
+// C++0x scoped enumerations
+def err_enum_invalid_underlying : Error<
+ "non-integral type %0 is an invalid underlying type">;
+def err_enumerator_too_large : Error<
+ "enumerator value is not representable in the underlying type %0">;
+def ext_enumerator_too_large : ExtWarn<
+ "enumerator value is not representable in the underlying type %0">,
+ InGroup<Microsoft>;
+def err_enumerator_wrapped : Error<
+ "enumerator value %0 is not representable in the underlying type %1">;
+def err_enum_redeclare_type_mismatch : Error<
+ "enumeration redeclared with different underlying type %0 (was %1)">;
+def err_enum_redeclare_fixed_mismatch : Error<
+ "enumeration previously declared with %select{non|}0fixed underlying type">;
+def err_enum_redeclare_scoped_mismatch : Error<
+ "enumeration previously declared as %select{un|}0scoped">;
+
+// C++0x delegating constructors
+def err_delegation_0x_only : Error<
+ "delegating constructors are permitted only in C++0x">;
+def err_delegation_unimplemented : Error<
+ "delegating constructors are not fully implemented">;
// Objective-C++
def err_objc_decls_may_only_appear_in_global_scope : Error<
@@ -842,13 +982,15 @@ def err_iboutletcollection_object_type : Error<
def err_attribute_missing_parameter_name : Error<
"attribute requires unquoted parameter">;
def err_attribute_invalid_vector_type : Error<"invalid vector element type %0">;
+def err_attribute_bad_neon_vector_size : Error<
+ "Neon vector size must be 64 or 128 bits">;
def err_attribute_argument_not_int : Error<
"'%0' attribute requires integer constant">;
def err_attribute_argument_outof_range : Error<
"init_priority attribute requires integer constant between "
"101 and 65535 inclusive">;
def err_init_priority_object_attr : Error<
- "can only use ‘init_priority’ attribute on file-scope definitions "
+ "can only use 'init_priority' attribute on file-scope definitions "
"of objects of class type">;
def err_attribute_argument_n_not_int : Error<
"'%0' attribute requires parameter %1 to be an integer constant">;
@@ -858,8 +1000,12 @@ def err_attribute_argument_out_of_bounds : Error<
"'%0' attribute parameter %1 is out of bounds">;
def err_attribute_requires_objc_interface : Error<
"attribute may only be applied to an Objective-C interface">;
+def err_attribute_uuid_malformed_guid : Error<
+ "uuid attribute contains a malformed GUID">;
def warn_nonnull_pointers_only : Warning<
"nonnull attribute only applies to pointer arguments">;
+def err_attribute_invalid_implicit_this_argument : Error<
+ "'%0' attribute is invalid for the implicit this argument">;
def err_ownership_type : Error<
"%0 attribute only applies to %1 arguments">;
def err_format_strftime_third_parameter : Error<
@@ -868,6 +1014,9 @@ def err_format_attribute_requires_variadic : Error<
"format attribute requires variadic function">;
def err_format_attribute_not : Error<"format argument not %0">;
def err_format_attribute_result_not : Error<"function does not return %0">;
+def err_format_attribute_implicit_this_format_string : Error<
+ "format attribute cannot specify the implicit this argument as the format "
+ "string">;
def err_attribute_invalid_size : Error<
"vector size not an integral multiple of component size">;
def err_attribute_zero_size : Error<"zero vector size">;
@@ -889,9 +1038,6 @@ def err_attribute_address_space_too_high : Error<
"address space is larger than the maximum supported (%0)">;
def err_attribute_address_multiple_qualifiers : Error<
"multiple address spaces specified for type">;
-def err_implicit_pointer_address_space_cast : Error<
- "illegal implicit conversion between two pointers with different address "
- "spaces">;
def err_as_qualified_auto_decl : Error<
"automatic variable qualified with an address space">;
def err_arg_with_address_space : Error<
@@ -921,32 +1067,39 @@ def warn_attribute_weak_on_local : Warning<
def warn_weak_identifier_undeclared : Warning<
"weak identifier %0 never declared">;
def err_attribute_weak_static : Error<
- "weak declaration of '%0' must be public">;
+ "weak declaration cannot have internal linkage">;
def warn_attribute_weak_import_invalid_on_definition : Warning<
"'weak_import' attribute cannot be specified on a definition">;
def err_attribute_weakref_not_static : Error<
- "weakref declaration of '%0' must be static">;
+ "weakref declaration must have internal linkage">;
def err_attribute_weakref_not_global_context : Error<
"weakref declaration of '%0' must be in a global context">;
def err_attribute_weakref_without_alias : Error<
"weakref declaration of '%0' must also have an alias attribute">;
+def err_alias_not_supported_on_darwin : Error <
+ "only weak aliases are supported on darwin">;
def warn_attribute_wrong_decl_type : Warning<
- "%0 attribute only applies to %select{function|union|"
- "variable and function|function or method|parameter|"
- "parameter or Objective-C method |function, method or block|"
- "virtual method or class|function, method, or parameter|class|virtual method"
- "|member}1 types">;
+ "%0 attribute only applies to %select{functions|unions|"
+ "variables and functions|functions and methods|parameters|"
+ "parameters and methods|functions, methods and blocks|"
+ "classes and virtual methods|functions, methods, and parameters|"
+ "classes|virtual methods|class members|variables|methods|"
+ "variables, functions and labels}1">;
def err_attribute_wrong_decl_type : Error<
- "%0 attribute only applies to %select{function|union|"
- "variable and function|function or method|parameter|"
- "parameter or Objective-C method |function, method or block|"
- "virtual method or class|function, method, or parameter|class|virtual method"
- "|member}1 types">;
+ "%0 attribute only applies to %select{functions|unions|"
+ "variables and functions|functions and methods|parameters|"
+ "parameters and methods|functions, methods and blocks|"
+ "classes and virtual methods|functions, methods, and parameters|"
+ "classes|virtual methods|class members|variables|methods}1">;
def warn_function_attribute_wrong_type : Warning<
"%0 only applies to function types; type here is %1">;
def warn_gnu_inline_attribute_requires_inline : Warning<
"'gnu_inline' attribute requires function to be marked 'inline',"
" attribute ignored">;
+def err_attribute_vecreturn_only_vector_member : Error<
+ "the vecreturn attribute can only be used on a class or structure with one member, which must be a vector">;
+def err_attribute_vecreturn_only_pod_record : Error<
+ "the vecreturn attribute can only be used on a POD (plain old data) class or structure (i.e. no virtual functions)">;
def err_cconv_change : Error<
"function declared '%0' here was previously declared "
"%select{'%2'|without calling convention}1">;
@@ -955,9 +1108,8 @@ def err_cconv_knr : Error<
def err_cconv_varargs : Error<
"variadic function cannot use %0 calling convention">;
def err_regparm_mismatch : Error<"function declared with with regparm(%0) "
- "attribute was previously declared %plural{0:without the regparm|1:"
- "with the regparm(1)|2:with the regparm(2)|3:with the regparm(3)|:with the"
- "regparm}1 attribute">;
+ "attribute was previously declared "
+ "%plural{0:without the regparm|:with the regparm(%1)}1 attribute">;
def warn_impcast_vector_scalar : Warning<
"implicit conversion turns vector to scalar: %0 to %1">,
@@ -983,6 +1135,16 @@ def warn_impcast_integer_precision : Warning<
def warn_impcast_integer_64_32 : Warning<
"implicit conversion loses integer precision: %0 to %1">,
InGroup<DiagGroup<"shorten-64-to-32">>, DefaultIgnore;
+def warn_impcast_integer_precision_constant : Warning<
+ "implicit conversion from %2 to %3 changes value from %0 to %1">,
+ InGroup<DiagGroup<"constant-conversion">>;
+def warn_impcast_bitfield_precision_constant : Warning<
+ "implicit truncation from %2 to bitfield changes value from %0 to %1">,
+ InGroup<DiagGroup<"constant-conversion">>;
+def warn_impcast_literal_float_to_integer : Warning<
+ "implicit conversion turns literal floating-point number into integer: "
+ "%0 to %1">,
+ InGroup<DiagGroup<"literal-conversion">>, DefaultIgnore;
def warn_cast_align : Warning<
"cast from %0 to %1 increases required alignment from %2 to %3">,
@@ -1023,7 +1185,6 @@ def warn_attribute_malloc_pointer_only : Warning<
def warn_transparent_union_nonpointer : Warning<
"'transparent_union' attribute support incomplete; only supported for "
"pointer unions">;
-
def warn_attribute_sentinel_named_arguments : Warning<
"'sentinel' attribute requires named arguments">;
def warn_attribute_sentinel_not_variadic : Warning<
@@ -1048,9 +1209,9 @@ def err_attribute_regparm_invalid_number : Error<
// Clang-Specific Attributes
-def err_attribute_iboutlet : Error<
+def warn_attribute_iboutlet : Warning<
"%0 attribute can only be applied to instance variables or properties">;
-def err_attribute_ibaction: Error<
+def warn_attribute_ibaction: Warning<
"ibaction attribute can only be applied to Objective-C instance methods">;
def err_attribute_overloadable_not_function : Error<
"'overloadable' attribute can only be applied to a function">;
@@ -1062,8 +1223,11 @@ def note_attribute_overloadable_prev_overload : Note<
def err_attribute_overloadable_no_prototype : Error<
"'overloadable' function %0 must have a prototype">;
def warn_ns_attribute_wrong_return_type : Warning<
- "%0 attribute only applies to functions or methods that "
- "return a pointer or Objective-C object">;
+ "%0 attribute only applies to %select{functions|methods}1 that "
+ "return %select{an Objective-C object|a pointer}2">;
+def warn_ns_attribute_wrong_parameter_type : Warning<
+ "%0 attribute only applies to %select{Objective-C object|pointer}1 "
+ "parameters">;
// Function Parameter Semantic Analysis.
def err_param_with_void_type : Error<"argument may not have 'void' type">;
@@ -1148,12 +1312,14 @@ def note_ovl_candidate : Note<"candidate "
"function |function |constructor |"
"is the implicit default constructor|"
"is the implicit copy constructor|"
- "is the implicit copy assignment operator}0%1">;
+ "is the implicit copy assignment operator|"
+ "is an inherited constructor}0%1">;
def warn_init_pointer_from_false : Warning<
"initialization of pointer of type %0 from literal 'false'">,
InGroup<BoolConversions>;
+def note_ovl_candidate_inherited_constructor : Note<"inherited from here">;
def note_ovl_candidate_bad_deduction : Note<
"candidate template ignored: failed template argument deduction">;
def note_ovl_candidate_incomplete_deduction : Note<"candidate template ignored: "
@@ -1181,14 +1347,15 @@ def note_ovl_candidate_arity : Note<"candidate "
"%select{function|function|constructor|function|function|constructor|"
"constructor (the implicit default constructor)|"
"constructor (the implicit copy constructor)|"
- "function (the implicit copy assignment operator)}0 %select{|template }1"
+ "function (the implicit copy assignment operator)|"
+ "constructor (inherited)}0 %select{|template }1"
"not viable: requires%select{ at least| at most|}2 %3 argument%s3, but %4 "
"%plural{1:was|:were}4 provided">;
def note_ovl_candidate_deleted : Note<
- "candidate %select{function|function|constructor|"
- "function |function |constructor |||}0%1 "
- "has been explicitly %select{made unavailable|deleted}2">;
+ "candidate %select{function|function|constructor|"
+ "function |function |constructor ||||constructor (inherited)}0%1 "
+ "has been explicitly %select{made unavailable|deleted}2">;
// Giving the index of the bad argument really clutters this message, and
// it's relatively unimportant because 1) it's generally obvious which
@@ -1200,21 +1367,24 @@ def note_ovl_candidate_bad_conv_incomplete : Note<"candidate "
"function |function |constructor |"
"constructor (the implicit default constructor)|"
"constructor (the implicit copy constructor)|"
- "function (the implicit copy assignment operator)}0%1 "
+ "function (the implicit copy assignment operator)|"
+ "constructor (inherited)}0%1 "
"not viable: cannot convert argument of incomplete type %2 to %3">;
def note_ovl_candidate_bad_overload : Note<"candidate "
"%select{function|function|constructor|"
"function |function |constructor |"
"constructor (the implicit default constructor)|"
"constructor (the implicit copy constructor)|"
- "function (the implicit copy assignment operator)}0%1"
+ "function (the implicit copy assignment operator)|"
+ "constructor (inherited)}0%1"
" not viable: no overload of %3 matching %2 for %ordinal4 argument">;
def note_ovl_candidate_bad_conv : Note<"candidate "
"%select{function|function|constructor|"
"function |function |constructor |"
"constructor (the implicit default constructor)|"
"constructor (the implicit copy constructor)|"
- "function (the implicit copy assignment operator)}0%1"
+ "function (the implicit copy assignment operator)|"
+ "constructor (inherited)}0%1"
" not viable: no known conversion from %2 to %3 for "
"%select{%ordinal5 argument|object argument}4">;
def note_ovl_candidate_bad_addrspace : Note<"candidate "
@@ -1222,12 +1392,13 @@ def note_ovl_candidate_bad_addrspace : Note<"candidate "
"function |function |constructor |"
"constructor (the implicit default constructor)|"
"constructor (the implicit copy constructor)|"
- "function (the implicit copy assignment operator)}0%1 not viable: "
+ "function (the implicit copy assignment operator)|"
+ "constructor (inherited)}0%1 not viable: "
"%select{%ordinal6|'this'}5 argument (%2) is in "
"address space %3, but parameter must be in address space %4">;
def note_ovl_candidate_bad_cvr_this : Note<"candidate "
"%select{|function|||function||||"
- "function (the implicit copy assignment operator)}0 not viable: "
+ "function (the implicit copy assignment operator)|}0 not viable: "
"'this' argument has type %2, but method is not marked "
"%select{const|restrict|const or restrict|volatile|const or volatile|"
"volatile or restrict|const, volatile, or restrict}3">;
@@ -1236,7 +1407,8 @@ def note_ovl_candidate_bad_cvr : Note<"candidate "
"function |function |constructor |"
"constructor (the implicit default constructor)|"
"constructor (the implicit copy constructor)|"
- "function (the implicit copy assignment operator)}0%1 not viable: "
+ "function (the implicit copy assignment operator)|"
+ "constructor (inherited)}0%1 not viable: "
"%ordinal4 argument (%2) would lose "
"%select{const|restrict|const and restrict|volatile|const and volatile|"
"volatile and restrict|const, volatile, and restrict}3 qualifier"
@@ -1246,7 +1418,8 @@ def note_ovl_candidate_bad_base_to_derived_conv : Note<"candidate "
"function |function |constructor |"
"constructor (the implicit default constructor)|"
"constructor (the implicit copy constructor)|"
- "function (the implicit copy assignment operator)}0%1"
+ "function (the implicit copy assignment operator)|"
+ "constructor (inherited)}0%1"
" not viable: cannot %select{convert from|convert from|bind}2 "
"%select{base class pointer|superclass|base class object of type}2 %3 to "
"%select{derived class pointer|subclass|derived class reference}2 %4 for "
@@ -1260,13 +1433,26 @@ def note_ovl_builtin_unary_candidate : Note<
"built-in candidate %0">;
def err_ovl_no_viable_function_in_init : Error<
"no matching constructor for initialization of %0">;
+def err_ovl_no_conversion_in_cast : Error<
+ "cannot convert %1 to %2 without a conversion operator">;
+def err_ovl_no_viable_conversion_in_cast : Error<
+ "no matching conversion for %select{|static_cast|reinterpret_cast|"
+ "dynamic_cast|C-style cast|functional-style cast}0 from %1 to %2">;
+def err_ovl_ambiguous_conversion_in_cast : Error<
+ "ambiguous conversion for %select{|static_cast|reinterpret_cast|"
+ "dynamic_cast|C-style cast|functional-style cast}0 from %1 to %2">;
+def err_ovl_deleted_conversion_in_cast : Error<
+ "%select{|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
+ "functional-style cast}0 from %1 to %2 uses deleted function">;
def err_ovl_ambiguous_init : Error<"call to constructor of %0 is ambiguous">;
def err_ref_init_ambiguous : Error<
"reference initialization of type %0 with initializer of type %1 is ambiguous">;
def err_ovl_deleted_init : Error<
"call to %select{unavailable|deleted}0 constructor of %1">;
-def err_ovl_ambiguous_oper : Error<
- "use of overloaded operator '%0' is ambiguous">;
+def err_ovl_ambiguous_oper_unary : Error<
+ "use of overloaded operator '%0' is ambiguous (operand type %1)">;
+def err_ovl_ambiguous_oper_binary : Error<
+ "use of overloaded operator '%0' is ambiguous (with operand types %1 and %2)">;
def err_ovl_no_viable_oper : Error<"no viable overloaded '%0'">;
def err_ovl_deleted_oper : Error<
"overload resolution selected %select{unavailable|deleted}0 operator '%1'">;
@@ -1274,6 +1460,9 @@ def err_ovl_no_viable_subscript :
Error<"no viable overloaded operator[] for type %0">;
def err_ovl_no_oper :
Error<"type %0 does not provide a %select{subscript|call}1 operator">;
+def err_ovl_unresolvable :
+ Error<"cannot resolve overloaded function from context">;
+
def err_ovl_no_viable_object_call : Error<
"no matching function for call to object of type %0">;
@@ -1337,14 +1526,16 @@ def note_template_param_prev_default_arg : Note<
"previous default template argument defined here">;
def err_template_param_default_arg_missing : Error<
"template parameter missing a default argument">;
-def err_template_parameter_default_in_function_template : Error<
- "a template parameter of a function template cannot have a default argument "
- "in C++98">;
+def ext_template_parameter_default_in_function_template : ExtWarn<
+ "default template arguments for a function template are a C++0x extension">,
+ InGroup<CXX0x>;
def err_template_parameter_default_template_member : Error<
"cannot add a default template argument to the definition of a member of a "
"class template">;
def err_template_parameter_default_friend_template : Error<
"default template argument not permitted on a friend template">;
+def err_template_template_parm_no_parms : Error<
+ "template template parameter must have its own template parameters">;
def err_template_variable : Error<"variable %0 declared as a template">;
def err_template_variable_noparams : Error<
@@ -1401,10 +1592,10 @@ def err_template_arg_not_convertible : Error<
"of type %1">;
def warn_template_arg_negative : Warning<
"non-type template argument with value '%0' converted to '%1' for unsigned "
- "template parameter of type %2">;
+ "template parameter of type %2">, InGroup<Conversion>, DefaultIgnore;
def warn_template_arg_too_large : Warning<
"non-type template argument value '%0' truncated to '%1' for "
- "template parameter of type %2">;
+ "template parameter of type %2">, InGroup<Conversion>, DefaultIgnore;
def err_template_arg_no_ref_bind : Error<
"non-type template parameter of reference type %0 cannot bind to template "
"argument of type %1">;
@@ -1440,10 +1631,15 @@ def err_template_arg_not_object_or_func : Error<
"non-type template argument does not refer to an object or function">;
def err_template_arg_not_pointer_to_member_form : Error<
"non-type template argument is not a pointer to member constant">;
-def err_template_arg_extra_parens : Error<
- "non-type template argument cannot be surrounded by parentheses">;
+def ext_template_arg_extra_parens : ExtWarn<
+ "address non-type template argument cannot be surrounded by parentheses">;
def err_pointer_to_member_type : Error<
"invalid use of pointer to member type after %select{.*|->*}0">;
+def err_pointer_to_member_call_drops_quals : Error<
+ "call to pointer to member function of type %0 drops '%1' qualifier%s2">;
+def err_pointer_to_member_oper_value_classify: Error<
+ "pointer-to-member function type %0 can only be called on an "
+ "%select{rvalue|lvalue}1">;
// C++ template specialization
def err_template_spec_unknown_kind : Error<
@@ -1462,10 +1658,20 @@ def err_template_spec_decl_out_of_scope_global : Error<
"%select{class template|class template partial|function template|member "
"function|static data member|member class}0 specialization of %1 must "
"originally be declared in the global scope">;
+def ext_template_spec_decl_out_of_scope_global : ExtWarn<
+ "%select{class template|class template partial|function template|member "
+ "function|static data member|member class}0 specialization of %1 must "
+ "originally be declared in the global scope; accepted as a C++0x extension">,
+ InGroup<CXX0x>;
def err_template_spec_decl_out_of_scope : Error<
"%select{class template|class template partial|function template|member "
"function|static data member|member class}0 specialization of %1 must "
"originally be declared in namespace %2">;
+def ext_template_spec_decl_out_of_scope : ExtWarn<
+ "%select{class template|class template partial|function template|member "
+ "function|static data member|member class}0 specialization of %1 must "
+ "originally be declared in namespace %2; accepted as a C++0x extension">,
+ InGroup<CXX0x>;
def err_template_spec_redecl_out_of_scope : Error<
"%select{class template|class template partial|function template|member "
"function|static data member|member class}0 specialization of %1 not in a "
@@ -1549,6 +1755,8 @@ def err_function_template_spec_ambiguous : Error<
"arguments to identify a particular function template">;
def note_function_template_spec_matched : Note<
"function template matches specialization %0">;
+def err_function_template_partial_spec : Error<
+ "function template partial specialization is not allowed">;
// C++ Template Instantiation
def err_template_recursion_depth_exceeded : Error<
@@ -1597,6 +1805,8 @@ def note_instantiation_contexts_suppressed : Note<
def err_field_instantiates_to_function : Error<
"data member instantiated with function type %0">;
+def err_variable_instantiates_to_function : Error<
+ "%select{variable|static data member}0 instantiated with function type %1">;
def err_nested_name_spec_non_tag : Error<
"type %0 cannot be used prior to '::' because it has no members">;
@@ -1607,7 +1817,8 @@ def note_previous_explicit_instantiation : Note<
"previous explicit instantiation is here">;
def ext_explicit_instantiation_after_specialization : Extension<
"explicit instantiation of %0 that occurs after an explicit "
- "specialization will be ignored (C++0x extension)">;
+ "specialization will be ignored (C++0x extension)">,
+ InGroup<CXX0x>;
def note_previous_template_specialization : Note<
"previous template specialization is here">;
def err_explicit_instantiation_enum : Error<
@@ -1678,7 +1889,12 @@ def note_typename_refers_here : Note<
def err_typename_missing : Error<
"missing 'typename' prior to dependent type name '%0%1'">;
def ext_typename_outside_of_template : ExtWarn<
- "'typename' occurs outside of a template">;
+ "'typename' occurs outside of a template">, InGroup<CXX0x>;
+def err_typename_refers_to_using_value_decl : Error<
+ "typename specifier refers to a dependent using declaration for a value "
+ "%0 in %1">;
+def note_using_value_decl_missing_typename : Note<
+ "add 'typename' to treat this using declaration as a type">;
def err_template_kw_refers_to_non_template : Error<
"%0 following the 'template' keyword does not refer to a template">;
@@ -1691,7 +1907,7 @@ def note_referenced_class_template : Error<
def err_template_kw_missing : Error<
"missing 'template' keyword prior to dependent template name '%0%1'">;
def ext_template_outside_of_template : ExtWarn<
- "'template' keyword outside of a template">;
+ "'template' keyword outside of a template">, InGroup<CXX0x>;
// C++0x Variadic Templates
def err_template_param_pack_default_arg : Error<
@@ -1700,17 +1916,62 @@ def err_template_param_pack_must_be_last_template_parameter : Error<
"template parameter pack must be the last template parameter">;
def err_template_parameter_pack_non_pack : Error<
- "template %select{type|non-type|template}0 parameter%select{| pack}1 "
- "conflicts with previous template %select{type|non-type|template}0 "
- "parameter%select{ pack|}1">;
+ "%select{template type|non-type template|template template}0 parameter"
+ "%select{| pack}1 conflicts with previous %select{template type|"
+ "non-type template|template template}0 parameter%select{ pack|}1">;
def note_template_parameter_pack_non_pack : Note<
- "template %select{type|non-type|template}0 parameter%select{| pack}1 "
- "does not match template %select{type|non-type|template}0 "
- "parameter%select{ pack|}1 in template argument">;
+ "%select{template type|non-type template|template template}0 parameter"
+ "%select{| pack}1 does not match %select{template type|non-type template"
+ "|template template}0 parameter%select{ pack|}1 in template argument">;
def note_template_parameter_pack_here : Note<
- "previous template %select{type|non-type|template}0 "
+ "previous %select{template type|non-type template|template template}0 "
"parameter%select{| pack}1 declared here">;
+def err_unexpanded_parameter_pack_0 : Error<
+ "%select{expression|base type|declaration type|data member type|bit-field "
+ "size|static assertion|fixed underlying type|enumerator value|"
+ "using declaration|friend declaration|qualifier|initializer|default argument|"
+ "non-type template parameter type|exception type|partial specialization}0 "
+ "contains an unexpanded parameter pack">;
+def err_unexpanded_parameter_pack_1 : Error<
+ "%select{expression|base type|declaration type|data member type|bit-field "
+ "size|static assertion|fixed underlying type|enumerator value|"
+ "using declaration|friend declaration|qualifier|initializer|default argument|"
+ "non-type template parameter type|exception type|partial specialization}0 "
+ "contains unexpanded parameter pack %1">;
+def err_unexpanded_parameter_pack_2 : Error<
+ "%select{expression|base type|declaration type|data member type|bit-field "
+ "size|static assertion|fixed underlying type|enumerator value|"
+ "using declaration|friend declaration|qualifier|initializer|default argument|"
+ "non-type template parameter type|exception type|partial specialization}0 "
+ "contains unexpanded parameter packs %1 and %2">;
+def err_unexpanded_parameter_pack_3_or_more : Error<
+ "%select{expression|base type|declaration type|data member type|bit-field "
+ "size|static assertion|fixed underlying type|enumerator value|"
+ "using declaration|friend declaration|qualifier|initializer|default argument|"
+ "non-type template parameter type|exception type|partial specialization}0 "
+ "contains unexpanded parameter packs %1, %2, ...">;
+
+def err_pack_expansion_without_parameter_packs : Error<
+ "pack expansion does not contain any unexpanded parameter packs">;
+def err_pack_expansion_length_conflict : Error<
+ "pack expansion contains parameter packs %0 and %1 that have different "
+ "lengths (%2 vs. %3)">;
+def err_pack_expansion_length_conflict_multilevel : Error<
+ "pack expansion contains parameter pack %0 that has a different "
+ "length (%1 vs. %2) from outer parameter packs">;
+def err_pack_expansion_member_init : Error<
+ "pack expansion for initialization of member %0">;
+
+def err_function_parameter_pack_without_parameter_packs : Error<
+ "type %0 of function parameter pack does not contain any unexpanded "
+ "parameter packs">;
+def err_ellipsis_in_declarator_not_parameter : Error<
+ "only function and template parameters can be parameter packs">;
+
+def err_sizeof_pack_no_pack_name : Error<
+ "%0 does not refer to the name of a parameter pack">;
+
def err_unexpected_typedef : Error<
"unexpected type name %0: expected expression">;
def err_unexpected_namespace : Error<
@@ -1721,9 +1982,20 @@ def note_dependent_var_use : Note<"must qualify identifier to find this "
def err_undeclared_use : Error<"use of undeclared %0">;
def warn_deprecated : Warning<"%0 is deprecated">,
InGroup<DeprecatedDeclarations>;
+def warn_deprecated_message : Warning<"%0 is deprecated: %1">,
+ InGroup<DeprecatedDeclarations>;
+def warn_deprecated_fwdclass_message : Warning<
+ "%0 maybe deprecated because receiver type is unknown">,
+ InGroup<DeprecatedDeclarations>;
+def warn_deprecated_def : Warning<
+ "Implementing deprecated %select{method|class|category}0">,
+ InGroup<DeprecatedImplementations>, DefaultIgnore;
def err_unavailable : Error<"%0 is unavailable">;
+def err_unavailable_message : Error<"%0 is unavailable: %1">;
+def warn_unavailable_fwdclass_message : Warning<
+ "%0 maybe unavailable because receiver type is unknown">;
def note_unavailable_here : Note<
- "function has been explicitly marked %select{unavailable|deleted}0 here">;
+ "function has been explicitly marked %select{unavailable|deleted|deprecated}0 here">;
def warn_not_enough_argument : Warning<
"not enough variable arguments in %0 declaration to fit a sentinel">;
def warn_missing_sentinel : Warning <
@@ -1737,6 +2009,14 @@ def err_redefinition : Error<"redefinition of %0">;
def err_definition_of_implicitly_declared_member : Error<
"definition of implicitly declared %select{constructor|copy constructor|"
"copy assignment operator|destructor}1">;
+def err_redefinition_extern_inline : Error<
+ "redefinition of a 'extern inline' function %0 is not supported in "
+ "%select{C99 mode|C++}1">;
+
+// This should eventually be an error.
+def warn_undefined_internal : Warning<
+ "%select{function|variable}0 %q1 has internal linkage but is not defined">;
+def note_used_here : Note<"used here">;
def warn_redefinition_of_typedef : Warning<
"redefinition of typedef %0 is invalid in C">,
@@ -1748,6 +2028,10 @@ def err_static_non_static : Error<
"static declaration of %0 follows non-static declaration">;
def err_non_static_static : Error<
"non-static declaration of %0 follows static declaration">;
+def err_extern_non_extern : Error<
+ "extern declaration of %0 follows non-extern declaration">;
+def err_non_extern_extern : Error<
+ "non-extern declaration of %0 follows extern declaration">;
def err_non_thread_thread : Error<
"non-thread-local declaration of %0 follows thread-local declaration">;
def err_thread_non_thread : Error<
@@ -1780,6 +2064,8 @@ def ext_forward_ref_enum : Extension<
"ISO C forbids forward references to 'enum' types">;
def err_forward_ref_enum : Error<
"ISO C++ forbids forward references to 'enum' types">;
+def ext_ms_forward_ref_enum : Extension<
+ "forward references to 'enum' types are a Microsoft extension">, InGroup<Microsoft>;
def ext_forward_ref_enum_def : Extension<
"redeclaration of already-defined enum %0 is a GNU extension">, InGroup<GNU>;
@@ -1809,6 +2095,19 @@ def err_vm_func_decl : Error<
def err_array_too_large : Error<
"array is too large (%0 elements)">;
+// -Wpadded, -Wpacked
+def warn_padded_struct_field : Warning<
+ "padding %select{struct|class}0 %1 with %2 %select{byte|bit}3%select{|s}4 "
+ "to align %5">, InGroup<Padded>, DefaultIgnore;
+def warn_padded_struct_anon_field : Warning<
+ "padding %select{struct|class}0 %1 with %2 %select{byte|bit}3%select{|s}4 "
+ "to align anonymous bit-field">, InGroup<Padded>, DefaultIgnore;
+def warn_padded_struct_size : Warning<
+ "padding size of %0 with %1 %select{byte|bit}2%select{|s}3 "
+ "to alignment boundary">, InGroup<Padded>, DefaultIgnore;
+def warn_unnecessary_packed : Warning<
+ "packed attribute is unnecessary for %0">, InGroup<Packed>, DefaultIgnore;
+
def err_typecheck_negative_array_size : Error<"array size is negative">;
def warn_typecheck_function_qualifiers : Warning<
"qualifier on function type %0 has unspecified behavior">;
@@ -1881,17 +2180,18 @@ def warn_missing_braces : Warning<
"suggest braces around initialization of subobject">,
InGroup<DiagGroup<"missing-braces">>, DefaultIgnore;
-def err_redefinition_of_label : Error<"redefinition of label '%0'">;
-def err_undeclared_label_use : Error<"use of undeclared label '%0'">;
+def err_redefinition_of_label : Error<"redefinition of label %0">;
+def err_undeclared_label_use : Error<"use of undeclared label %0">;
+def warn_unused_label : Warning<"unused label %0">,
+ InGroup<UnusedLabel>, DefaultIgnore;
def err_goto_into_protected_scope : Error<"goto into protected scope">;
def err_switch_into_protected_scope : Error<
"switch case is in protected scope">;
def err_indirect_goto_without_addrlabel : Error<
"indirect goto in function with no address-of-label expressions">;
-def warn_indirect_goto_in_protected_scope : Warning<
- "indirect goto might cross protected scopes">,
- InGroup<DiagGroup<"label-address-scope">>;
+def err_indirect_goto_in_protected_scope : Error<
+ "indirect goto might cross protected scopes">;
def note_indirect_goto_target : Note<"possible target of indirect goto">;
def note_protected_by_variable_init : Note<
"jump bypasses variable initialization">;
@@ -1953,6 +2253,13 @@ def ext_flexible_array_in_array : Extension<
"%0 may not be used as an array element due to flexible array member">;
def err_flexible_array_init_nonempty : Error<
"non-empty initialization of flexible array member inside subobject">;
+def ext_flexible_array_empty_aggregate : Extension<
+ "flexible array member %0 in otherwise empty %select{struct|class}1 "
+ "is a Microsoft extension">, InGroup<Microsoft>;
+def ext_flexible_array_union : Extension<
+ "flexible array member %0 in a union is a Microsoft extension">,
+ InGroup<Microsoft>;
+
def err_flexible_array_init_needs_braces : Error<
"flexible array requires brace-enclosed initializer">;
def err_illegal_decl_array_of_functions : Error<
@@ -1963,6 +2270,8 @@ def err_illegal_message_expr_incomplete_type : Error<
"objective-c message has incomplete result type %0">;
def err_illegal_decl_array_of_references : Error<
"'%0' declared as array of references of type %1">;
+def err_decl_negative_array_size : Error<
+ "'%0' declared as an array with a negative size">;
def err_array_star_outside_prototype : Error<
"star modifier used outside of function prototype">;
def err_illegal_decl_pointer_to_reference : Error<
@@ -2031,7 +2340,16 @@ def note_precedence_bitwise_silence : Note<
def warn_logical_instead_of_bitwise : Warning<
"use of logical %0 with constant operand; switch to bitwise %1 or "
"remove constant">, InGroup<DiagGroup<"constant-logical-operand">>;
-
+
+def warn_logical_and_in_logical_or : Warning<
+ "'&&' within '||'">, InGroup<LogicalOpParentheses>;
+def note_logical_and_in_logical_or_silence : Note<
+ "place parentheses around the '&&' expression to silence this warning">;
+
+def warn_self_assignment : Warning<
+ "explicitly assigning a variable of type %0 to itself">,
+ InGroup<SelfAssignment>, DefaultIgnore;
+
def err_sizeof_nonfragile_interface : Error<
"invalid application of '%select{alignof|sizeof}1' to interface %0 in "
"non-fragile ABI">;
@@ -2069,6 +2387,11 @@ def err_typecheck_member_reference_type : Error<
def err_typecheck_member_reference_unknown : Error<
"cannot refer to member %0 in %1 with '%select{.|->}2'">;
def err_member_reference_needs_call : Error<
+ "base of member reference is an overloaded function; perhaps you meant "
+ "to call %select{it|the 0-argument overload}0?">;
+def note_member_ref_possible_intended_overload : Note<
+ "possibly valid overload here">;
+def err_member_reference_needs_call_zero_arg : Error<
"base of member reference has function type %0; perhaps you meant to call "
"this function with '()'?">;
def warn_subscript_is_char : Warning<"array subscript is of type 'char'">,
@@ -2078,12 +2401,17 @@ def err_typecheck_incomplete_tag : Error<"incomplete definition of type %0">;
def err_no_member : Error<"no member named %0 in %1">;
def err_member_redeclared : Error<"class member cannot be redeclared">;
+def err_member_name_of_class : Error<"member %0 has the same name as its class">;
def err_member_def_undefined_record : Error<
"out-of-line definition of %0 from class %1 without definition">;
def err_member_def_does_not_match : Error<
"out-of-line definition of %0 does not match any declaration in %1">;
+def err_member_def_does_not_match_ret_type : Error<
+ "out-of-line definition of %q0 differ from the declaration in the return type">;
def err_nonstatic_member_out_of_line : Error<
"non-static data member defined out-of-line">;
+def err_nonstatic_flexible_variable : Error<
+ "non-static initialization of a variable with flexible array member">;
def err_qualified_typedef_declarator : Error<
"typedef declarator cannot be qualified">;
def err_qualified_param_declarator : Error<
@@ -2091,6 +2419,10 @@ def err_qualified_param_declarator : Error<
def ext_out_of_line_declaration : ExtWarn<
"out-of-line declaration of a member must be a definition">,
InGroup<OutOfLineDeclaration>, DefaultError;
+def warn_member_extra_qualification : Warning<
+ "extra qualification on member %0">;
+def err_member_qualification : Error<
+ "non-friend class member %0 cannot have a qualified name">;
def note_member_def_close_match : Note<"member declaration nearly matches">;
def err_typecheck_ivar_variable_size : Error<
"instance variables must have a constant size">;
@@ -2121,7 +2453,7 @@ def err_array_init_not_init_list : Error<
"array initializer must be an initializer "
"list%select{| or string literal}0">;
def warn_deprecated_string_literal_conversion : Warning<
- "conversion from string literal to %0 is deprecated">, InGroup<Deprecated>;
+ "conversion from string literal to %0 is deprecated">, InGroup<DeprecatedWritableStr>;
def err_realimag_invalid_type : Error<"invalid type %0 to %1 operator">;
def err_typecheck_sclass_fscope : Error<
"illegal storage class on file-scoped variable">;
@@ -2151,7 +2483,7 @@ def err_typecheck_unary_expr : Error<
def err_typecheck_indirection_requires_pointer : Error<
"indirection requires pointer operand (%0 invalid)">;
def warn_indirection_through_null : Warning<
- "indirection of non-volatile null pointer will be deleted, not trap">;
+ "indirection of non-volatile null pointer will be deleted, not trap">, InGroup<NullDereference>;
def note_indirection_through_null : Note<
"consider using __builtin_trap() or qualifying pointer with 'volatile'">;
@@ -2200,11 +2532,14 @@ def warn_mixed_sign_conditional : Warning<
"operands of ? are integers of different signs: %0 and %1">,
InGroup<SignCompare>, DefaultIgnore;
def warn_lunsigned_always_true_comparison : Warning<
- "comparison of unsigned expression %0 is always %1">,
- InGroup<SignCompare>, DefaultIgnore;
+ "comparison of unsigned%select{| enum}2 expression %0 is always %1">,
+ InGroup<TautologicalCompare>;
def warn_runsigned_always_true_comparison : Warning<
- "comparison of %0 unsigned expression is always %1">,
- InGroup<SignCompare>, DefaultIgnore;
+ "comparison of %0 unsigned%select{| enum}2 expression is always %1">,
+ InGroup<TautologicalCompare>;
+def warn_comparison_of_mixed_enum_types : Warning<
+ "comparison of two values with different enumeration types (%0 and %1)">,
+ InGroup<DiagGroup<"enum-compare">>;
def err_invalid_this_use : Error<
"invalid use of 'this' outside of a nonstatic member function">;
@@ -2212,11 +2547,26 @@ def err_invalid_member_use_in_static_method : Error<
"invalid use of member %0 in static member function">;
def err_invalid_qualified_function_type : Error<
"type qualifier is not allowed on this function">;
+def err_invalid_ref_qualifier_function_type : Error<
+ "ref-qualifier '%select{&&|&}0' is only allowed on non-static member functions,"
+ " member function pointers, and typedefs of function types">;
+def ext_qualified_function_type_template_arg : ExtWarn<
+ "template argument of '%0' qualified function type is a GNU extension">,
+ InGroup<GNU>;
+
def err_invalid_qualified_function_pointer : Error<
"type qualifier is not allowed on this function %select{pointer|reference}0">;
def err_invalid_qualified_typedef_function_type_use : Error<
"a qualified function type cannot be used to declare a "
"%select{static member|nonmember}0 function">;
+def err_invalid_ref_qualifier_typedef_function_type_use : Error<
+ "%select{static member|nonmember}0 function cannot have a ref-qualifier "
+ "'%select{&&|&}1'">;
+
+def err_ref_qualifier_overload : Error<
+ "cannot overload a member function %select{without a ref-qualifier|with "
+ "ref-qualifier '&'|with ref-qualifier '&&'}0 with a member function %select{"
+ "without a ref-qualifier|with ref-qualifier '&'|with ref-qualifier '&&'}1">;
def err_invalid_non_static_member_use : Error<
"invalid use of nonstatic data member %0">;
@@ -2244,6 +2594,15 @@ def err_ref_array_type : Error<
"cannot refer to declaration with an array type inside block">;
def err_property_not_found : Error<
"property %0 not found on object of type %1">;
+def err_getter_not_found : Error<
+ "expected getter method not found on object of type %0">;
+def err_property_not_found_forward_class : Error<
+ "property %0 cannot be found in forward class object %1">;
+def err_property_not_as_forward_class : Error<
+ "property %0 refers to an incomplete Objective-C class %1 "
+ "(with no @interface available)">;
+def note_forward_class : Note<
+ "forward class is declared here">;
def err_duplicate_property : Error<
"property has a previous declaration">;
def ext_gnu_void_ptr : Extension<
@@ -2265,6 +2624,8 @@ def error_no_subobject_property_setting : Error<
def ext_freestanding_complex : Extension<
"complex numbers are an extension in a freestanding C99 implementation">;
+// FIXME: Remove when we support imaginary.
+def err_imaginary_not_supported : Error<"imaginary types are not supported">;
// Obj-c expressions
def warn_root_inst_method_not_found : Warning<
@@ -2275,14 +2636,16 @@ def warn_inst_method_not_found : Warning<
"method %objcinstance0 not found (return type defaults to 'id')">;
def error_no_super_class_message : Error<
"no @interface declaration found in class messaging of %0">;
-def error_no_super_class : Error<
- "no super class declared in @interface for %0">;
+def error_root_class_cannot_use_super : Error<
+ "%0 cannot use 'super' because it is a root class">;
def err_invalid_receiver_to_message : Error<
"invalid receiver to message expression">;
def err_invalid_receiver_to_message_super : Error<
"'super' is only valid in a method body">;
def err_invalid_receiver_class_message : Error<
"receiver type %0 is not an Objective-C class">;
+def err_missing_open_square_message_send : Error<
+ "missing '[' at start of message send expression">;
def warn_bad_receiver_type : Warning<
"receiver type %0 is not 'id' or interface pointer, consider "
"casting it to 'id'">;
@@ -2332,6 +2695,16 @@ def note_parameter_here : Note<
// C++ casts
// These messages adhere to the TryCast pattern: %0 is an int specifying the
// cast type, %1 is the source type, %2 is the destination type.
+def err_bad_reinterpret_cast_overload : Error<
+ "reinterpret_cast cannot resolve overloaded function %0 to type %1">;
+
+def err_bad_static_cast_overload : Error<
+ "address of overloaded function %0 cannot be static_cast to type %1">;
+
+def err_bad_cstyle_cast_overload : Error<
+ "address of overloaded function %0 cannot be cast to type %1">;
+
+
def err_bad_cxx_cast_generic : Error<
"%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
"functional-style cast}0 from %1 to %2 is not allowed">;
@@ -2389,6 +2762,10 @@ def err_bad_dynamic_cast_not_polymorphic : Error<"%0 is not polymorphic">;
// Other C++ expressions
def err_need_header_before_typeid : Error<
"you need to include <typeinfo> before using the 'typeid' operator">;
+def err_need_header_before_ms_uuidof : Error<
+ "you need to include <guiddef.h> before using the '__uuidof' operator">;
+def err_uuidof_without_guid : Error<
+ "cannot call operator __uuidof on a type with no GUID">;
def err_incomplete_typeid : Error<"'typeid' of incomplete type %0">;
def err_static_illegal_in_new : Error<
"the 'static' modifier for the array size is not legal in new expressions">;
@@ -2420,7 +2797,8 @@ def err_array_size_ambiguous_conversion : Error<
"enumeration type">;
def ext_array_size_conversion : Extension<
"implicit conversion from array size expression of type %0 to "
- "%select{integral|enumeration}1 type %2 is a C++0x extension">;
+ "%select{integral|enumeration}1 type %2 is a C++0x extension">,
+ InGroup<CXX0x>;
def err_default_init_const : Error<
"default initialization of an object of const type %0"
@@ -2434,6 +2812,8 @@ def warn_delete_incomplete : Warning<
"deleting pointer to incomplete type %0 may cause undefined behaviour">;
def err_delete_incomplete_class_type : Warning<
"deleting incomplete class type %0; no conversions to pointer type">;
+def warn_delete_array_type : Warning<
+ "'delete' applied to a pointer-to-array type %0 treated as delete[]">;
def err_no_suitable_delete_member_function_found : Error<
"no suitable member %0 in %1">;
def err_ambiguous_suitable_delete_member_function_found : Error<
@@ -2462,6 +2842,18 @@ def err_bad_memptr_lhs : Error<
def warn_exception_caught_by_earlier_handler : Warning<
"exception of type %0 will be caught by earlier handler">;
def note_previous_exception_handler : Note<"for type %0">;
+def err_exceptions_disabled : Error<
+ "cannot use '%0' with exceptions disabled">;
+def err_objc_exceptions_disabled : Error<
+ "cannot use '%0' with Objective-C exceptions disabled">;
+def warn_non_virtual_dtor : Warning<
+ "%0 has virtual functions but non-virtual destructor">,
+ InGroup<NonVirtualDtor>, DefaultIgnore;
+def warn_overloaded_virtual : Warning<
+ "%q0 hides overloaded virtual %select{function|functions}1">,
+ InGroup<OverloadedVirtual>, DefaultIgnore;
+def note_hidden_overloaded_virtual_declared_here : Note<
+ "hidden overloaded virtual function %q0 declared here">;
def err_conditional_void_nonvoid : Error<
"%select{left|right}1 operand to ? is void, but %select{right|left}1 operand "
@@ -2527,6 +2919,8 @@ def err_not_tag_in_scope : Error<
def err_cannot_form_pointer_to_member_of_reference_type : Error<
"cannot form a pointer-to-member to member %0 of reference type %1">;
+def err_invalid_use_of_bound_member_func : Error<
+ "a bound member function may only be called">;
def err_incomplete_object_call : Error<
"incomplete type in call to object of type %0">;
def err_incomplete_pointer_to_member_return : Error<
@@ -2541,9 +2935,21 @@ def warn_condition_is_idiomatic_assignment : Warning<"using the result "
InGroup<DiagGroup<"idiomatic-parentheses">>, DefaultIgnore;
def note_condition_assign_to_comparison : Note<
"use '==' to turn this assignment into an equality comparison">;
+def note_condition_or_assign_to_comparison : Note<
+ "use '!=' to turn this compound assignment into an inequality comparison">;
def note_condition_assign_silence : Note<
"place parentheses around the assignment to silence this warning">;
+def warn_equality_with_extra_parens : Warning<"equality comparison with "
+ "extraneous parentheses">, InGroup<Parentheses>;
+def note_equality_comparison_to_assign : Note<
+ "use '=' to turn this equality comparison into an assignment">;
+def note_equality_comparison_silence : Note<
+ "remove extraneous parentheses around the comparison to silence this warning">;
+
+def warn_synthesized_ivar_access : Warning<
+ "direct access of synthesized ivar by using property access %0">,
+ InGroup<NonfragileAbi2>, DefaultIgnore;
def warn_ivar_variable_conflict : Warning<
"when default property synthesis is on, "
"%0 lookup will access property ivar instead of global variable">,
@@ -2629,7 +3035,15 @@ def err_typecheck_convert_incompatible_block_pointer : Error<
" %0 "
"%select{from|to parameter of type|from a function with result type|to type|"
"with an expression of type|to parameter of type|to type}2 %1">;
-
+def err_typecheck_incompatible_address_space : Error<
+ "%select{assigning %1 to %0"
+ "|passing %0 to parameter of type %1"
+ "|returning %0 from a function with result type %1"
+ "|converting %0 to type %1"
+ "|initializing %0 with an expression of type %1"
+ "|sending %0 to parameter of type %1"
+ "|casting %0 to type %1}2"
+ " changes address space of pointer">;
def err_typecheck_convert_ambiguous : Error<
"ambiguity in initializing value of type %0 with initializer of type %1">;
def err_cannot_initialize_decl_noname : Error<
@@ -2688,9 +3102,14 @@ def err_atomic_builtin_pointer_size : Error<
"first argument to atomic builtin must be a pointer to 1,2,4,8 or 16 byte "
"type (%0 invalid)">;
-
def err_deleted_function_use : Error<"attempt to use a deleted function">;
+def err_kern_type_not_void_return : Error<
+ "kernel function type %0 must have void return type">;
+def err_config_scalar_return : Error<
+ "CUDA special function 'cudaConfigureCall' must have scalar return type">;
+
+
def err_cannot_pass_objc_interface_to_vararg : Error<
"cannot pass object with interface type %0 by-value through variadic "
"%select{function|block|method}1">;
@@ -2709,6 +3128,8 @@ def err_typecheck_cond_expect_scalar : Error<
"used type %0 where arithmetic or pointer type is required">;
def ext_typecheck_cond_one_void : Extension<
"C99 forbids conditional expressions with only one void side">;
+def err_typecheck_cond_expect_scalar_or_vector : Error<
+ "used type %0 where arithmetic, pointer, or vector type is required">;
def err_typecheck_cast_to_incomplete : Error<
"cast to incomplete type %0">;
def ext_typecheck_cast_nonscalar : Extension<
@@ -2754,6 +3175,8 @@ def err_incomplete_type_used_in_type_trait_expr : Error<
"incomplete type %0 used in type trait expression">;
def err_expected_ident_or_lparen : Error<"expected identifier or '('">;
+def err_typecheck_cond_incompatible_operands_null : Error<
+ "non-pointer operand type %0 incompatible with %select{NULL|nullptr}1">;
} // End of general sema category.
// inline asm.
@@ -2771,6 +3194,8 @@ let CategoryName = "Inline Assembly Issue" in {
def err_asm_tying_incompatible_types : Error<
"unsupported inline asm: input with type %0 matching output with type %1">;
def err_asm_unknown_register_name : Error<"unknown register name '%0' in asm">;
+ def warn_asm_label_on_auto_decl : Warning<
+ "ignored asm label '%0' on automatic variable">;
def err_invalid_asm_cast_lvalue : Error<
"invalid use of a cast in a inline asm context requiring an l-value: "
"remove the cast or build with -fheinous-gnu-extensions">;
@@ -2824,10 +3249,15 @@ def err_base_init_direct_and_virtual : Error<
def err_not_direct_base_or_virtual : Error<
"type %0 is not a direct or virtual base of %1">;
-def err_in_class_initializer_non_integral_type : Error<
- "in-class initializer has non-integral, non-enumeration type %0">;
+def err_in_class_initializer_non_const : Error<
+ "non-const static data member must be initialized out of line">;
+def err_in_class_initializer_bad_type : Error<
+ "static data member of type %0 must be initialized out of line">;
+def ext_in_class_initializer_float_type : ExtWarn<
+ "in-class initializer for static data member of type %0 "
+ "is a C++0x extension">, InGroup<CXX0x>;
def err_in_class_initializer_non_constant : Error<
- "in-class initializer is not an integral constant expression">;
+ "in-class initializer is not a constant expression">;
// C++ anonymous unions and GNU anonymous structs/unions
def ext_anonymous_union : Extension<
@@ -2847,6 +3277,9 @@ def err_anonymous_struct_member_redecl : Error<
"member of anonymous struct redeclares %0">;
def err_anonymous_record_with_type : Error<
"types cannot be declared in an anonymous %select{struct|union}0">;
+def ext_anonymous_record_with_type : Extension<
+ "types declared in an anonymous %select{struct|union}0 are a Microsoft "
+ "extension">, InGroup<Microsoft>;
def err_anonymous_record_with_function : Error<
"functions cannot be declared in an anonymous %select{struct|union}0">;
def err_anonymous_record_with_static : Error<
@@ -2856,6 +3289,8 @@ def err_anonymous_record_bad_member : Error<
def err_anonymous_record_nonpublic_member : Error<
"anonymous %select{struct|union}0 cannot contain a "
"%select{private|protected}1 data member">;
+def ext_ms_anonymous_struct : ExtWarn<
+ "anonymous structs are a Microsoft extension">, InGroup<Microsoft>;
// C++ local classes
def err_reference_to_local_var_in_enclosing_function : Error<
@@ -2885,7 +3320,7 @@ def err_memptr_conv_via_virtual : Error<
// C++ access control
def err_conv_to_inaccessible_base : Error<
- "conversion from %0 to inaccessible base class %1">, NoSFINAE;
+ "conversion from %0 to inaccessible base class %1">, AccessControl;
def note_inheritance_specifier_here : Note<
"'%0' inheritance specifier here">;
def note_inheritance_implicitly_private_here : Note<
@@ -2986,7 +3421,16 @@ def warn_not_compound_assign : Warning<
// C++0x explicit conversion operators
def warn_explicit_conversion_functions : Warning<
- "explicit conversion functions are a C++0x extension">;
+ "explicit conversion functions are a C++0x extension">, InGroup<CXX0x>;
+
+def warn_array_index_precedes_bounds : Warning<
+ "array index of '%0' indexes before the beginning of the array">,
+ InGroup<DiagGroup<"array-bounds">>;
+def warn_array_index_exceeds_bounds : Warning<
+ "array index of '%0' indexes past the end of an array (that contains %1 elements)">,
+ InGroup<DiagGroup<"array-bounds">>;
+def note_array_index_out_of_bounds : Note<
+ "array %0 declared here">;
def warn_printf_write_back : Warning<
"use of '%%n' in format string discouraged (potentially insecure)">,
@@ -3054,10 +3498,16 @@ def warn_ret_stack_addr : Warning<
"address of stack memory associated with local variable %0 returned">;
def warn_ret_stack_ref : Warning<
"reference to stack memory associated with local variable %0 returned">;
+def warn_ret_local_temp_addr : Warning<
+ "returning address of local temporary object">;
+def warn_ret_local_temp_ref : Warning<
+ "returning reference to local temporary object">;
def warn_ret_addr_label : Warning<
"returning address of label, which is local">;
def err_ret_local_block : Error<
"returning block that lives on the local stack">;
+def note_ref_var_local_bind : Note<
+ "binding reference variable %0 here">;
// For non-floating point, expressions of the form x == x or x != x
@@ -3065,7 +3515,7 @@ def err_ret_local_block : Error<
// Array comparisons have similar warnings
def warn_comparison_always : Warning<
"%select{self-|array }0comparison always evaluates to %select{false|true|a constant}1">,
- InGroup<DiagGroup<"tautological-compare">>;
+ InGroup<TautologicalCompare>;
def warn_stringcompare : Warning<
"result of comparison against %select{a string literal|@encode}0 is "
@@ -3079,8 +3529,8 @@ def err_blocks_disable : Error<"blocks support disabled - compile with -fblocks"
def err_expected_block_lbrace : Error<"expected '{' in block literal">;
def err_return_in_block_expression : Error<
"return not allowed in block expression literal">;
-def err_block_returns_array : Error<
- "block declared as returning an array">;
+def err_block_returning_array_function : Error<
+ "block cannot return %select{array|function}0 type %1">;
// CFString checking
@@ -3088,6 +3538,9 @@ def err_cfstring_literal_not_string_constant : Error<
"CFString literal is not a string constant">;
def warn_cfstring_literal_contains_nul_character : Warning<
"CFString literal contains NUL character">;
+def warn_cfstring_truncated : Warning<
+ "input conversion stopped due to an input byte that does not "
+ "belong to the input codeset UTF-8">;
// Statements.
def err_continue_not_in_loop : Error<
@@ -3106,8 +3559,18 @@ def err_duplicate_case : Error<"duplicate case value '%0'">;
def warn_case_empty_range : Warning<"empty case range specified">;
def warn_missing_case_for_condition :
Warning<"no case matching constant switch condition '%0'">;
-def warn_missing_cases : Warning<"enumeration value %0 not handled in switch">,
+def warn_missing_case1 : Warning<"enumeration value %0 not handled in switch">,
+ InGroup<DiagGroup<"switch-enum"> >;
+def warn_missing_case2 : Warning<
+ "enumeration values %0 and %1 not handled in switch">,
+ InGroup<DiagGroup<"switch-enum"> >;
+def warn_missing_case3 : Warning<
+ "enumeration values %0, %1, and %2 not handled in switch">,
InGroup<DiagGroup<"switch-enum"> >;
+def warn_missing_cases : Warning<
+ "%0 enumeration values not handled in switch: %1, %2, %3...">,
+ InGroup<DiagGroup<"switch-enum"> >;
+
def warn_not_in_enum : Warning<"case value not in enumerated type %0">,
InGroup<DiagGroup<"switch-enum"> >;
def err_typecheck_statement_requires_scalar : Error<
@@ -3225,6 +3688,8 @@ def ext_c99_array_usage : Extension<
"use of C99-specific array features, accepted as an extension">;
def err_c99_array_usage_cxx : Error<
"C99-specific array features are not permitted in C++">;
+def err_double_requires_fp64 : Error<
+ "use of type 'double' requires cl_khr_fp64 extension to be enabled">;
def note_getter_unavailable : Note<
"or because setter is declared here, but no getter method %0 is found">;
@@ -3235,9 +3700,9 @@ def warn_ivar_use_hidden : Warning<
def error_ivar_use_in_class_method : Error<
"instance variable %0 accessed in class method">;
def error_private_ivar_access : Error<"instance variable %0 is private">,
- NoSFINAE;
+ AccessControl;
def error_protected_ivar_access : Error<"instance variable %0 is protected">,
- NoSFINAE;
+ AccessControl;
def warn_maynot_respond : Warning<"%0 may not respond to %1">;
def warn_attribute_method_def : Warning<
"method attribute can only be specified on method declarations">;
@@ -3284,6 +3749,9 @@ def err_using_directive_suggest : Error<
def err_using_directive_member_suggest : Error<
"no namespace named %0 in %1; did you mean %2?">;
def note_namespace_defined_here : Note<"namespace %0 defined here">;
+def err_sizeof_pack_no_pack_name_suggest : Error<
+ "%0 does not refer to the name of a parameter pack; did you mean %1?">;
+def note_parameter_pack_here : Note<"parameter pack %0 declared here">;
} // end of sema category
} // end of sema component.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
index e71f51a..563157f 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_FILEMANAGER_H
#define LLVM_CLANG_FILEMANAGER_H
+#include "clang/Basic/FileSystemOptions.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -22,12 +23,20 @@
#include "llvm/Config/config.h" // for mode_t
// FIXME: Enhance libsystem to support inode and other fields in stat.
#include <sys/types.h>
-#include <sys/stat.h>
+
+struct stat;
+
+namespace llvm {
+class MemoryBuffer;
+namespace sys { class Path; }
+}
namespace clang {
class FileManager;
-
-/// DirectoryEntry - Cached information about one directory on the disk.
+class FileSystemStatCache;
+
+/// DirectoryEntry - Cached information about one directory (either on
+/// the disk or in the virtual file system).
///
class DirectoryEntry {
const char *Name; // Name of the directory.
@@ -37,7 +46,9 @@ public:
const char *getName() const { return Name; }
};
-/// FileEntry - Cached information about one file on the disk.
+/// FileEntry - Cached information about one file (either on the disk
+/// or in the virtual file system). If the 'FD' member is valid, then
+/// this FileEntry has an open file descriptor for the file.
///
class FileEntry {
const char *Name; // Name of the file.
@@ -48,12 +59,29 @@ class FileEntry {
dev_t Device; // ID for the device containing the file.
ino_t Inode; // Inode number for the file.
mode_t FileMode; // The file mode as returned by 'stat'.
+
+ /// FD - The file descriptor for the file entry if it is opened and owned
+ /// by the FileEntry. If not, this is set to -1.
+ mutable int FD;
friend class FileManager;
+
public:
FileEntry(dev_t device, ino_t inode, mode_t m)
- : Name(0), Device(device), Inode(inode), FileMode(m) {}
+ : Name(0), Device(device), Inode(inode), FileMode(m), FD(-1) {}
// Add a default constructor for use with llvm::StringMap
- FileEntry() : Name(0), Device(0), Inode(0), FileMode(0) {}
+ FileEntry() : Name(0), Device(0), Inode(0), FileMode(0), FD(-1) {}
+
+ FileEntry(const FileEntry &FE) {
+ memcpy(this, &FE, sizeof(FE));
+ assert(FD == -1 && "Cannot copy a file-owning FileEntry");
+ }
+
+ void operator=(const FileEntry &FE) {
+ memcpy(this, &FE, sizeof(FE));
+ assert(FD == -1 && "Cannot assign a file-owning FileEntry");
+ }
+
+ ~FileEntry();
const char *getName() const { return Name; }
off_t getSize() const { return Size; }
@@ -67,111 +95,68 @@ public:
///
const DirectoryEntry *getDir() const { return Dir; }
- bool operator<(const FileEntry& RHS) const {
+ bool operator<(const FileEntry &RHS) const {
return Device < RHS.Device || (Device == RHS.Device && Inode < RHS.Inode);
}
};
-/// \brief Abstract interface for introducing a FileManager cache for 'stat'
-/// system calls, which is used by precompiled and pretokenized headers to
-/// improve performance.
-class StatSysCallCache {
-protected:
- llvm::OwningPtr<StatSysCallCache> NextStatCache;
-
-public:
- virtual ~StatSysCallCache() {}
- virtual int stat(const char *path, struct stat *buf) {
- if (getNextStatCache())
- return getNextStatCache()->stat(path, buf);
-
- return ::stat(path, buf);
- }
-
- /// \brief Sets the next stat call cache in the chain of stat caches.
- /// Takes ownership of the given stat cache.
- void setNextStatCache(StatSysCallCache *Cache) {
- NextStatCache.reset(Cache);
- }
-
- /// \brief Retrieve the next stat call cache in the chain.
- StatSysCallCache *getNextStatCache() { return NextStatCache.get(); }
-
- /// \brief Retrieve the next stat call cache in the chain, transferring
- /// ownership of this cache (and, transitively, all of the remaining caches)
- /// to the caller.
- StatSysCallCache *takeNextStatCache() { return NextStatCache.take(); }
-};
-
-/// \brief A stat "cache" that can be used by FileManager to keep
-/// track of the results of stat() calls that occur throughout the
-/// execution of the front end.
-class MemorizeStatCalls : public StatSysCallCache {
-public:
- /// \brief The result of a stat() call.
- ///
- /// The first member is the result of calling stat(). If stat()
- /// found something, the second member is a copy of the stat
- /// structure.
- typedef std::pair<int, struct stat> StatResult;
-
- /// \brief The set of stat() calls that have been
- llvm::StringMap<StatResult, llvm::BumpPtrAllocator> StatCalls;
-
- typedef llvm::StringMap<StatResult, llvm::BumpPtrAllocator>::const_iterator
- iterator;
-
- iterator begin() const { return StatCalls.begin(); }
- iterator end() const { return StatCalls.end(); }
-
- virtual int stat(const char *path, struct stat *buf);
-};
-
/// FileManager - Implements support for file system lookup, file system
/// caching, and directory search management. This also handles more advanced
/// properties, such as uniquing files based on "inode", so that a file with two
/// names (e.g. symlinked) will be treated as a single file.
///
class FileManager {
+ FileSystemOptions FileSystemOpts;
class UniqueDirContainer;
class UniqueFileContainer;
- /// UniqueDirs/UniqueFiles - Cache for existing directories/files.
+ /// UniqueRealDirs/UniqueRealFiles - Cache for existing real directories/files.
///
- UniqueDirContainer &UniqueDirs;
- UniqueFileContainer &UniqueFiles;
+ UniqueDirContainer &UniqueRealDirs;
+ UniqueFileContainer &UniqueRealFiles;
- /// DirEntries/FileEntries - This is a cache of directory/file entries we have
- /// looked up. The actual Entry is owned by UniqueFiles/UniqueDirs above.
+ /// \brief The virtual directories that we have allocated. For each
+ /// virtual file (e.g. foo/bar/baz.cpp), we add all of its parent
+ /// directories (foo/ and foo/bar/) here.
+ llvm::SmallVector<DirectoryEntry*, 4> VirtualDirectoryEntries;
+ /// \brief The virtual files that we have allocated.
+ llvm::SmallVector<FileEntry*, 4> VirtualFileEntries;
+
+ /// SeenDirEntries/SeenFileEntries - This is a cache that maps paths
+ /// to directory/file entries (either real or virtual) we have
+ /// looked up. The actual Entries for real directories/files are
+ /// owned by UniqueRealDirs/UniqueRealFiles above, while the Entries
+ /// for virtual directories/files are owned by
+ /// VirtualDirectoryEntries/VirtualFileEntries above.
///
- llvm::StringMap<DirectoryEntry*, llvm::BumpPtrAllocator> DirEntries;
- llvm::StringMap<FileEntry*, llvm::BumpPtrAllocator> FileEntries;
+ llvm::StringMap<DirectoryEntry*, llvm::BumpPtrAllocator> SeenDirEntries;
+ llvm::StringMap<FileEntry*, llvm::BumpPtrAllocator> SeenFileEntries;
/// NextFileUID - Each FileEntry we create is assigned a unique ID #.
///
unsigned NextFileUID;
- /// \brief The virtual files that we have allocated.
- llvm::SmallVector<FileEntry *, 4> VirtualFileEntries;
-
// Statistics.
unsigned NumDirLookups, NumFileLookups;
unsigned NumDirCacheMisses, NumFileCacheMisses;
// Caching.
- llvm::OwningPtr<StatSysCallCache> StatCache;
+ llvm::OwningPtr<FileSystemStatCache> StatCache;
- int stat_cached(const char* path, struct stat* buf) {
- return StatCache.get() ? StatCache->stat(path, buf) : stat(path, buf);
- }
+ bool getStatValue(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor);
+
+ /// Add all ancestors of the given path (pointing to either a file
+ /// or a directory) as virtual directories.
+ void addAncestorsAsVirtualDirs(llvm::StringRef Path);
public:
- FileManager();
+ FileManager(const FileSystemOptions &FileSystemOpts);
~FileManager();
- /// \brief Installs the provided StatSysCallCache object within
- /// the FileManager.
+ /// \brief Installs the provided FileSystemStatCache object within
+ /// the FileManager.
///
/// Ownership of this object is transferred to the FileManager.
///
@@ -181,33 +166,46 @@ public:
/// \param AtBeginning whether this new stat cache must be installed at the
/// beginning of the chain of stat caches. Otherwise, it will be added to
/// the end of the chain.
- void addStatCache(StatSysCallCache *statCache, bool AtBeginning = false);
+ void addStatCache(FileSystemStatCache *statCache, bool AtBeginning = false);
- /// \brief Removes the provided StatSysCallCache object from the file manager.
- void removeStatCache(StatSysCallCache *statCache);
-
- /// getDirectory - Lookup, cache, and verify the specified directory. This
- /// returns null if the directory doesn't exist.
+ /// \brief Removes the specified FileSystemStatCache object from the manager.
+ void removeStatCache(FileSystemStatCache *statCache);
+
+ /// getDirectory - Lookup, cache, and verify the specified directory
+ /// (real or virtual). This returns NULL if the directory doesn't exist.
///
- const DirectoryEntry *getDirectory(llvm::StringRef Filename) {
- return getDirectory(Filename.begin(), Filename.end());
- }
- const DirectoryEntry *getDirectory(const char *FileStart,const char *FileEnd);
+ const DirectoryEntry *getDirectory(llvm::StringRef DirName);
- /// getFile - Lookup, cache, and verify the specified file. This returns null
- /// if the file doesn't exist.
+ /// getFile - Lookup, cache, and verify the specified file (real or
+ /// virtual). This returns NULL if the file doesn't exist.
///
- const FileEntry *getFile(llvm::StringRef Filename) {
- return getFile(Filename.begin(), Filename.end());
- }
- const FileEntry *getFile(const char *FilenameStart,
- const char *FilenameEnd);
+ const FileEntry *getFile(llvm::StringRef Filename);
/// \brief Retrieve a file entry for a "virtual" file that acts as
/// if there were a file with the given name on disk. The file
/// itself is not accessed.
const FileEntry *getVirtualFile(llvm::StringRef Filename, off_t Size,
time_t ModificationTime);
+
+ /// \brief Open the specified file as a MemoryBuffer, returning a new
+ /// MemoryBuffer if successful, otherwise returning null.
+ llvm::MemoryBuffer *getBufferForFile(const FileEntry *Entry,
+ std::string *ErrorStr = 0);
+ llvm::MemoryBuffer *getBufferForFile(llvm::StringRef Filename,
+ std::string *ErrorStr = 0);
+
+ /// \brief If path is not absolute and FileSystemOptions set the working
+ /// directory, the path is modified to be relative to the given
+ /// working directory.
+ static void FixupRelativePath(llvm::sys::Path &path,
+ const FileSystemOptions &FSOpts);
+
+
+ /// \brief Produce an array mapping from the unique IDs assigned to each
+ /// file to the corresponding FileEntry pointer.
+ void GetUniqueIDMapping(
+ llvm::SmallVectorImpl<const FileEntry *> &UIDToFiles) const;
+
void PrintStats() const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h
new file mode 100644
index 0000000..81e928d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h
@@ -0,0 +1,31 @@
+//===--- FileSystemOptions.h - File System Options --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FileSystemOptions interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_FILESYSTEMOPTIONS_H
+#define LLVM_CLANG_BASIC_FILESYSTEMOPTIONS_H
+
+#include <string>
+
+namespace clang {
+
+/// \brief Keeps track of options that affect how file operations are performed.
+class FileSystemOptions {
+public:
+ /// \brief If set, paths are resolved as if the working directory was
+ /// set to the value of WorkingDir.
+ std::string WorkingDir;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
new file mode 100644
index 0000000..77828b3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
@@ -0,0 +1,101 @@
+//===--- FileSystemStatCache.h - Caching for 'stat' calls -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FileSystemStatCache interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FILESYSTEMSTATCACHE_H
+#define LLVM_CLANG_FILESYSTEMSTATCACHE_H
+
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringMap.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+
+namespace clang {
+
+/// \brief Abstract interface for introducing a FileManager cache for 'stat'
+/// system calls, which is used by precompiled and pretokenized headers to
+/// improve performance.
+class FileSystemStatCache {
+protected:
+ llvm::OwningPtr<FileSystemStatCache> NextStatCache;
+
+public:
+ virtual ~FileSystemStatCache() {}
+
+ enum LookupResult {
+ CacheExists, //< We know the file exists and its cached stat data.
+ CacheMissing //< We know that the file doesn't exist.
+ };
+
+ /// FileSystemStatCache::get - Get the 'stat' information for the specified
+ /// path, using the cache to accellerate it if possible. This returns true if
+ /// the path does not exist or false if it exists.
+ ///
+ /// If FileDescriptor is non-null, then this lookup should only return success
+ /// for files (not directories). If it is null this lookup should only return
+ /// success for directories (not files). On a successful file lookup, the
+ /// implementation can optionally fill in FileDescriptor with a valid
+ /// descriptor and the client guarantees that it will close it.
+ static bool get(const char *Path, struct stat &StatBuf, int *FileDescriptor,
+ FileSystemStatCache *Cache);
+
+
+ /// \brief Sets the next stat call cache in the chain of stat caches.
+ /// Takes ownership of the given stat cache.
+ void setNextStatCache(FileSystemStatCache *Cache) {
+ NextStatCache.reset(Cache);
+ }
+
+ /// \brief Retrieve the next stat call cache in the chain.
+ FileSystemStatCache *getNextStatCache() { return NextStatCache.get(); }
+
+ /// \brief Retrieve the next stat call cache in the chain, transferring
+ /// ownership of this cache (and, transitively, all of the remaining caches)
+ /// to the caller.
+ FileSystemStatCache *takeNextStatCache() { return NextStatCache.take(); }
+
+protected:
+ virtual LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) = 0;
+
+ LookupResult statChained(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ if (FileSystemStatCache *Next = getNextStatCache())
+ return Next->getStat(Path, StatBuf, FileDescriptor);
+
+ // If we hit the end of the list of stat caches to try, just compute and
+ // return it without a cache.
+ return get(Path, StatBuf, FileDescriptor, 0) ? CacheMissing : CacheExists;
+ }
+};
+
+/// \brief A stat "cache" that can be used by FileManager to keep
+/// track of the results of stat() calls that occur throughout the
+/// execution of the front end.
+class MemorizeStatCalls : public FileSystemStatCache {
+public:
+ /// \brief The set of stat() calls that have been seen.
+ llvm::StringMap<struct stat, llvm::BumpPtrAllocator> StatCalls;
+
+ typedef llvm::StringMap<struct stat, llvm::BumpPtrAllocator>::const_iterator
+ iterator;
+
+ iterator begin() const { return StatCalls.begin(); }
+ iterator end() const { return StatCalls.end(); }
+
+ virtual LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
index 24fe086..d576643 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
@@ -22,8 +22,9 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
-#include <string>
#include <cassert>
+#include <cctype>
+#include <string>
namespace llvm {
template <typename T> struct DenseMapInfo;
@@ -53,7 +54,7 @@ class IdentifierInfo {
// Objective-C keyword ('protocol' in '@protocol') or builtin (__builtin_inf).
// First NUM_OBJC_KEYWORDS values are for Objective-C, the remaining values
// are for builtins.
- unsigned ObjCOrBuiltinID :10;
+ unsigned ObjCOrBuiltinID :11;
bool HasMacro : 1; // True if there is a #define for this.
bool IsExtension : 1; // True if identifier is a lang extension.
bool IsPoisoned : 1; // True if identifier is poisoned.
@@ -63,7 +64,7 @@ class IdentifierInfo {
// file and wasn't modified since.
bool RevertedTokenID : 1; // True if RevertTokenIDToIdentifier was
// called.
- // 7 bits left in 32-bit word.
+ // 6 bits left in 32-bit word.
void *FETokenInfo; // Managed by the language front-end.
llvm::StringMapEntry<IdentifierInfo*> *Entry;
@@ -71,7 +72,7 @@ class IdentifierInfo {
void operator=(const IdentifierInfo&); // NONASSIGNABLE.
friend class IdentifierTable;
-
+
public:
IdentifierInfo();
@@ -254,6 +255,35 @@ private:
}
};
+/// \brief An iterator that walks over all of the known identifiers
+/// in the lookup table.
+///
+/// Since this iterator uses an abstract interface via virtual
+/// functions, it uses an object-oriented interface rather than the
+/// more standard C++ STL iterator interface. In this OO-style
+/// iteration, the single function \c Next() provides dereference,
+/// advance, and end-of-sequence checking in a single
+/// operation. Subclasses of this iterator type will provide the
+/// actual functionality.
+class IdentifierIterator {
+private:
+ IdentifierIterator(const IdentifierIterator&); // Do not implement
+ IdentifierIterator &operator=(const IdentifierIterator&); // Do not implement
+
+protected:
+ IdentifierIterator() { }
+
+public:
+ virtual ~IdentifierIterator();
+
+ /// \brief Retrieve the next string in the identifier table and
+ /// advances the iterator for the following string.
+ ///
+ /// \returns The next string in the identifier table. If there is
+ /// no such string, returns an empty \c llvm::StringRef.
+ virtual llvm::StringRef Next() = 0;
+};
+
/// IdentifierInfoLookup - An abstract class used by IdentifierTable that
/// provides an interface for performing lookups from strings
/// (const char *) to IdentiferInfo objects.
@@ -266,6 +296,18 @@ public:
/// of a reference. If the pointer is NULL then the IdentifierInfo cannot
/// be found.
virtual IdentifierInfo* get(llvm::StringRef Name) = 0;
+
+ /// \brief Retrieve an iterator into the set of all identifiers
+ /// known to this identifier lookup source.
+ ///
+ /// This routine provides access to all of the identifiers known to
+ /// the identifier lookup, allowing access to the contents of the
+ /// identifiers without introducing the overhead of constructing
+ /// IdentifierInfo objects for each.
+ ///
+ /// \returns A new iterator into the set of known identifiers. The
+ /// caller is responsible for deleting this iterator.
+ virtual IdentifierIterator *getIdentifiers() const;
};
/// \brief An abstract class used to resolve numerical identifier
@@ -304,6 +346,11 @@ public:
ExternalLookup = IILookup;
}
+ /// \brief Retrieve the external identifier lookup object, if any.
+ IdentifierInfoLookup *getExternalIdentifierLookup() const {
+ return ExternalLookup;
+ }
+
llvm::BumpPtrAllocator& getAllocator() {
return HashTable.getAllocator();
}
@@ -463,8 +510,33 @@ public:
return getIdentifierInfoFlag() == ZeroArg;
}
unsigned getNumArgs() const;
+
+
+ /// \brief Retrieve the identifier at a given position in the selector.
+ ///
+ /// Note that the identifier pointer returned may be NULL. Clients that only
+ /// care about the text of the identifier string, and not the specific,
+ /// uniqued identifier pointer, should use \c getNameForSlot(), which returns
+ /// an empty string when the identifier pointer would be NULL.
+ ///
+ /// \param argIndex The index for which we want to retrieve the identifier.
+ /// This index shall be less than \c getNumArgs() unless this is a keyword
+ /// selector, in which case 0 is the only permissible value.
+ ///
+ /// \returns the uniqued identifier for this slot, or NULL if this slot has
+ /// no corresponding identifier.
IdentifierInfo *getIdentifierInfoForSlot(unsigned argIndex) const;
-
+
+ /// \brief Retrieve the name at a given position in the selector.
+ ///
+ /// \param argIndex The index for which we want to retrieve the name.
+ /// This index shall be less than \c getNumArgs() unless this is a keyword
+ /// selector, in which case 0 is the only permissible value.
+ ///
+ /// \returns the name for this slot, which may be the empty string if no
+ /// name was supplied.
+ llvm::StringRef getNameForSlot(unsigned argIndex) const;
+
/// getAsString - Derive the full selector name (e.g. "foo:bar:") and return
/// it as an std::string.
std::string getAsString() const;
@@ -570,6 +642,17 @@ struct DenseMapInfo<clang::Selector> {
template <>
struct isPodLike<clang::Selector> { static const bool value = true; };
+template<>
+class PointerLikeTypeTraits<clang::Selector> {
+public:
+ static inline const void *getAsVoidPointer(clang::Selector P) {
+ return P.getAsOpaquePtr();
+ }
+ static inline clang::Selector getFromVoidPointer(const void *P) {
+ return clang::Selector(reinterpret_cast<uintptr_t>(P));
+ }
+ enum { NumLowBitsAvailable = 0 };
+};
// Provide PointerLikeTypeTraits for IdentifierInfo pointers, which
// are not guaranteed to be 8-byte aligned.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
index 5d1ec67..b148943 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LANGOPTIONS_H
#include <string>
+#include "clang/Basic/Visibility.h"
namespace clang {
@@ -43,6 +44,8 @@ public:
unsigned ObjC2 : 1; // Objective-C 2 support enabled.
unsigned ObjCNonFragileABI : 1; // Objective-C modern abi enabled
unsigned ObjCNonFragileABI2 : 1; // Objective-C enhanced modern abi enabled
+ unsigned ObjCDefaultSynthProperties : 1; // Objective-C auto-synthesized properties.
+ unsigned AppleKext : 1; // Allow apple kext features.
unsigned PascalStrings : 1; // Allow Pascal strings
unsigned WritableStrings : 1; // Allow writable strings
@@ -51,8 +54,10 @@ public:
unsigned AltiVec : 1; // Support AltiVec-style vector initializers.
unsigned Exceptions : 1; // Support exception handling.
unsigned SjLjExceptions : 1; // Use setjmp-longjump exception handling.
+ unsigned ObjCExceptions : 1; // Support Objective-C exceptions.
unsigned RTTI : 1; // Support RTTI information.
+ unsigned MSBitfields : 1; // MS-compatible structure layout
unsigned NeXTRuntime : 1; // Use NeXT runtime.
unsigned Freestanding : 1; // Freestanding implementation
unsigned FormatExtensions : 1; // FreeBSD format extensions (-fformat-extensions)
@@ -90,8 +95,12 @@ public:
unsigned CharIsSigned : 1; // Whether char is a signed or unsigned type
unsigned ShortWChar : 1; // Force wchar_t to be unsigned short int.
+ unsigned ShortEnums : 1; // The enum type will be equivalent to the
+ // smallest integer type with enough room.
+
unsigned OpenCL : 1; // OpenCL C99 language extensions.
-
+ unsigned CUDA : 1; // CUDA C++ language extensions.
+
unsigned AssumeSaneOperatorNew : 1; // Whether to add __attribute__((malloc))
// to the declaration of C++'s new
// operators
@@ -102,10 +111,16 @@ public:
unsigned DumpVTableLayouts : 1; /// Dump the layouts of emitted vtables.
unsigned NoConstantCFStrings : 1; // Do not do CF strings
unsigned InlineVisibilityHidden : 1; // Whether inline C++ methods have
- // hidden visibility by default.
+ // hidden visibility by default.
unsigned SpellChecking : 1; // Whether to perform spell-checking for error
// recovery.
+ unsigned SinglePrecisionConstants : 1; // Whether to treat double-precision
+ // floating point constants as
+ // single precision constants.
+ unsigned FastRelaxedMath : 1; // OpenCL fast relaxed math (on its own,
+ // defines __FAST_RELAXED_MATH__).
+ unsigned DefaultFPContract : 1; // Default setting for FP_CONTRACT
// FIXME: This is just a temporary option, for testing purposes.
unsigned NoBitFieldTypeAlign : 1;
@@ -119,47 +134,56 @@ private:
public:
unsigned InstantiationDepth; // Maximum template instantiation depth.
+ unsigned NumLargeByValueCopy; // Warn if parameter/return value is larger
+ // in bytes than this setting. 0 is no check.
+
+ // Version of Microsoft Visual C/C++ we are pretending to be. This is
+ // temporary until we support all MS extensions used in Windows SDK and stdlib
+ // headers. Sets _MSC_VER.
+ unsigned MSCVersion;
std::string ObjCConstantStringClass;
enum GCMode { NonGC, GCOnly, HybridGC };
enum StackProtectorMode { SSPOff, SSPOn, SSPReq };
- enum VisibilityMode {
- Default,
- Protected,
- Hidden
- };
-
+
enum SignedOverflowBehaviorTy {
SOB_Undefined, // Default C standard behavior.
SOB_Defined, // -fwrapv
SOB_Trapping // -ftrapv
};
+ /// The name of the handler function to be called when -ftrapv is specified.
+ /// If none is specified, abort (GCC-compatible behaviour).
+ std::string OverflowHandler;
LangOptions() {
Trigraphs = BCPLComment = Bool = DollarIdents = AsmPreprocessor = 0;
GNUMode = GNUKeywords = ImplicitInt = Digraphs = 0;
HexFloats = 0;
GC = ObjC1 = ObjC2 = ObjCNonFragileABI = ObjCNonFragileABI2 = 0;
+ AppleKext = 0;
+ ObjCDefaultSynthProperties = 0;
NoConstantCFStrings = 0; InlineVisibilityHidden = 0;
C99 = Microsoft = Borland = CPlusPlus = CPlusPlus0x = 0;
CXXOperatorNames = PascalStrings = WritableStrings = ConstStrings = 0;
Exceptions = SjLjExceptions = Freestanding = NoBuiltin = 0;
+ ObjCExceptions = 1;
+ MSBitfields = 0;
NeXTRuntime = 1;
RTTI = 1;
LaxVectorConversions = 1;
HeinousExtensions = 0;
- AltiVec = OpenCL = StackProtector = 0;
+ AltiVec = OpenCL = CUDA = StackProtector = 0;
+
+ SymbolVisibility = (unsigned) DefaultVisibility;
- SymbolVisibility = (unsigned) Default;
-
ThreadsafeStatics = 1;
POSIXThreads = 0;
Blocks = 0;
EmitAllDecls = 0;
MathErrno = 1;
SignedOverflowBehavior = SOB_Undefined;
-
+
AssumeSaneOperatorNew = 1;
AccessControl = 1;
ElideConstructors = 1;
@@ -169,6 +193,9 @@ public:
InstantiationDepth = 1024;
+ NumLargeByValueCopy = 0;
+ MSCVersion = 0;
+
Optimize = 0;
OptimizeSize = 0;
@@ -180,10 +207,14 @@ public:
CharIsSigned = 1;
ShortWChar = 0;
+ ShortEnums = 0;
CatchUndefined = 0;
DumpRecordLayouts = 0;
DumpVTableLayouts = 0;
SpellChecking = 1;
+ SinglePrecisionConstants = 0;
+ FastRelaxedMath = 0;
+ DefaultFPContract = 0;
NoBitFieldTypeAlign = 0;
}
@@ -197,17 +228,44 @@ public:
StackProtector = static_cast<unsigned>(m);
}
- VisibilityMode getVisibilityMode() const {
- return (VisibilityMode) SymbolVisibility;
+ Visibility getVisibilityMode() const {
+ return (Visibility) SymbolVisibility;
}
- void setVisibilityMode(VisibilityMode v) { SymbolVisibility = (unsigned) v; }
-
+ void setVisibilityMode(Visibility v) { SymbolVisibility = (unsigned) v; }
+
SignedOverflowBehaviorTy getSignedOverflowBehavior() const {
return (SignedOverflowBehaviorTy)SignedOverflowBehavior;
}
void setSignedOverflowBehavior(SignedOverflowBehaviorTy V) {
SignedOverflowBehavior = (unsigned)V;
}
+
+ bool areExceptionsEnabled() const {
+ return Exceptions;
+ }
+};
+
+/// Floating point control options
+class FPOptions {
+public:
+ unsigned fp_contract : 1;
+
+ FPOptions() : fp_contract(0) {}
+
+ FPOptions(const LangOptions &LangOpts) :
+ fp_contract(LangOpts.DefaultFPContract) {}
+};
+
+/// OpenCL volatile options
+class OpenCLOptions {
+public:
+#define OPENCLEXT(nm) unsigned nm : 1;
+#include "clang/Basic/OpenCLExtensions.def"
+
+ OpenCLOptions() {
+#define OPENCLEXT(nm) nm = 0;
+#include "clang/Basic/OpenCLExtensions.def"
+ }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
index 8909e47..267ecbc 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
@@ -15,10 +15,10 @@
#define LLVM_CLANG_BASIC_ON_DISK_HASH_TABLE_H
#include "llvm/Support/Allocator.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Host.h"
+#include "llvm/Support/Host.h"
#include <cassert>
#include <cstdlib>
@@ -320,7 +320,7 @@ public:
InfoPtr->ReadKey((const unsigned char* const) Items, L.first);
// If the key doesn't match just skip reading the value.
- if (!Info::EqualKey(X, iKey)) {
+ if (!InfoPtr->EqualKey(X, iKey)) {
Items += item_len;
continue;
}
@@ -334,6 +334,70 @@ public:
iterator end() const { return iterator(); }
+ /// \brief Iterates over all of the keys in the table.
+ class key_iterator {
+ const unsigned char* Ptr;
+ unsigned NumItemsInBucketLeft;
+ unsigned NumEntriesLeft;
+ Info *InfoObj;
+ public:
+ typedef external_key_type value_type;
+
+ key_iterator(const unsigned char* const Ptr, unsigned NumEntries,
+ Info *InfoObj)
+ : Ptr(Ptr), NumItemsInBucketLeft(0), NumEntriesLeft(NumEntries),
+ InfoObj(InfoObj) { }
+ key_iterator()
+ : Ptr(0), NumItemsInBucketLeft(0), NumEntriesLeft(0), InfoObj(0) { }
+
+ friend bool operator==(const key_iterator &X, const key_iterator &Y) {
+ return X.NumEntriesLeft == Y.NumEntriesLeft;
+ }
+ friend bool operator!=(const key_iterator& X, const key_iterator &Y) {
+ return X.NumEntriesLeft != Y.NumEntriesLeft;
+ }
+
+ key_iterator& operator++() { // Preincrement
+ if (!NumItemsInBucketLeft) {
+ // 'Items' starts with a 16-bit unsigned integer representing the
+ // number of items in this bucket.
+ NumItemsInBucketLeft = io::ReadUnalignedLE16(Ptr);
+ }
+ Ptr += 4; // Skip the hash.
+ // Determine the length of the key and the data.
+ const std::pair<unsigned, unsigned>& L = Info::ReadKeyDataLength(Ptr);
+ Ptr += L.first + L.second;
+ assert(NumItemsInBucketLeft);
+ --NumItemsInBucketLeft;
+ assert(NumEntriesLeft);
+ --NumEntriesLeft;
+ return *this;
+ }
+ key_iterator operator++(int) { // Postincrement
+ key_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ value_type operator*() const {
+ const unsigned char* LocalPtr = Ptr;
+ if (!NumItemsInBucketLeft)
+ LocalPtr += 2; // number of items in bucket
+ LocalPtr += 4; // Skip the hash.
+
+ // Determine the length of the key and the data.
+ const std::pair<unsigned, unsigned>& L
+ = Info::ReadKeyDataLength(LocalPtr);
+
+ // Read the key.
+ const internal_key_type& Key = InfoObj->ReadKey(LocalPtr, L.first);
+ return InfoObj->GetExternalKey(Key);
+ }
+ };
+
+ key_iterator key_begin() {
+ return key_iterator(Base + 4, getNumEntries(), &InfoObj);
+ }
+ key_iterator key_end() { return key_iterator(); }
+
/// \brief Iterates over all the entries in the table, returning
/// a key/data pair.
class item_iterator {
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OpenCLExtensions.def b/contrib/llvm/tools/clang/include/clang/Basic/OpenCLExtensions.def
new file mode 100644
index 0000000..95cc1a9
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OpenCLExtensions.def
@@ -0,0 +1,28 @@
+//===--- OpenCLExtensions.def - OpenCL extension list -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the list of supported OpenCL extensions.
+//
+//===----------------------------------------------------------------------===//
+
+OPENCLEXT(cl_khr_fp64)
+OPENCLEXT(cl_khr_int64_base_atomics)
+OPENCLEXT(cl_khr_int64_extended_atomics)
+OPENCLEXT(cl_khr_fp16)
+OPENCLEXT(cl_khr_gl_sharing)
+OPENCLEXT(cl_khr_gl_event)
+OPENCLEXT(cl_khr_d3d10_sharing)
+OPENCLEXT(cl_khr_global_int32_base_atomics)
+OPENCLEXT(cl_khr_global_int32_extended_atomics)
+OPENCLEXT(cl_khr_local_int32_base_atomics)
+OPENCLEXT(cl_khr_local_int32_extended_atomics)
+OPENCLEXT(cl_khr_byte_addressable_store)
+OPENCLEXT(cl_khr_3d_image_writes)
+
+#undef OPENCLEXT
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
index cd0da97..d00195b 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
@@ -18,7 +18,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace clang {
@@ -57,6 +57,10 @@ public:
/// what sort of argument kind it is.
intptr_t DiagArgumentsVal[MaxArguments];
+ /// \brief The values for the various substitution positions that have
+ /// string arguments.
+ std::string DiagArgumentsStr[MaxArguments];
+
/// DiagRanges - The list of ranges added to this diagnostic. It currently
/// only support 10 ranges, could easily be extended if needed.
CharSourceRange DiagRanges[10];
@@ -186,6 +190,26 @@ public:
*this->DiagStorage = *Other.DiagStorage;
}
+ PartialDiagnostic(const DiagnosticInfo &Other, StorageAllocator &Allocator)
+ : DiagID(Other.getID()), DiagStorage(0), Allocator(&Allocator)
+ {
+ // Copy arguments.
+ for (unsigned I = 0, N = Other.getNumArgs(); I != N; ++I) {
+ if (Other.getArgKind(I) == Diagnostic::ak_std_string)
+ AddString(Other.getArgStdStr(I));
+ else
+ AddTaggedVal(Other.getRawArg(I), Other.getArgKind(I));
+ }
+
+ // Copy source ranges.
+ for (unsigned I = 0, N = Other.getNumRanges(); I != N; ++I)
+ AddSourceRange(Other.getRange(I));
+
+ // Copy fix-its.
+ for (unsigned I = 0, N = Other.getNumFixItHints(); I != N; ++I)
+ AddFixItHint(Other.getFixItHint(I));
+ }
+
PartialDiagnostic &operator=(const PartialDiagnostic &Other) {
DiagID = Other.DiagID;
if (Other.DiagStorage) {
@@ -216,13 +240,28 @@ public:
DiagStorage->DiagArgumentsVal[DiagStorage->NumDiagArgs++] = V;
}
+ void AddString(llvm::StringRef V) const {
+ if (!DiagStorage)
+ DiagStorage = getStorage();
+
+ assert(DiagStorage->NumDiagArgs < Storage::MaxArguments &&
+ "Too many arguments to diagnostic!");
+ DiagStorage->DiagArgumentsKind[DiagStorage->NumDiagArgs]
+ = Diagnostic::ak_std_string;
+ DiagStorage->DiagArgumentsStr[DiagStorage->NumDiagArgs++] = V;
+ }
+
void Emit(const DiagnosticBuilder &DB) const {
if (!DiagStorage)
return;
// Add all arguments.
for (unsigned i = 0, e = DiagStorage->NumDiagArgs; i != e; ++i) {
- DB.AddTaggedVal(DiagStorage->DiagArgumentsVal[i],
+ if ((Diagnostic::ArgumentKind)DiagStorage->DiagArgumentsKind[i]
+ == Diagnostic::ak_std_string)
+ DB.AddString(DiagStorage->DiagArgumentsStr[i]);
+ else
+ DB.AddTaggedVal(DiagStorage->DiagArgumentsVal[i],
(Diagnostic::ArgumentKind)DiagStorage->DiagArgumentsKind[i]);
}
@@ -230,7 +269,7 @@ public:
for (unsigned i = 0, e = DiagStorage->NumDiagRanges; i != e; ++i)
DB.AddSourceRange(DiagStorage->DiagRanges[i]);
- // Add all code modification hints
+ // Add all fix-its.
for (unsigned i = 0, e = DiagStorage->NumFixItHints; i != e; ++i)
DB.AddFixItHint(DiagStorage->FixItHints[i]);
}
@@ -263,6 +302,13 @@ public:
}
friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ llvm::StringRef S) {
+
+ PD.AddString(S);
+ return PD;
+ }
+
+ friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
const SourceRange &R) {
PD.AddSourceRange(CharSourceRange::getTokenRange(R));
return PD;
@@ -288,6 +334,9 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
return DB;
}
+/// \brief A partial diagnostic along with the source location where this
+/// diagnostic occurs.
+typedef std::pair<SourceLocation, PartialDiagnostic> PartialDiagnosticAt;
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
index 35f27fb..605c4bb 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_SOURCELOCATION_H
#define LLVM_CLANG_SOURCELOCATION_H
+#include "llvm/Support/PointerLikeTypeTraits.h"
#include <utility>
#include <cassert>
@@ -121,7 +122,6 @@ public:
/// directly.
unsigned getRawEncoding() const { return ID; }
-
/// getFromRawEncoding - Turn a raw encoding of a SourceLocation object into
/// a real SourceLocation.
static SourceLocation getFromRawEncoding(unsigned Encoding) {
@@ -130,6 +130,22 @@ public:
return X;
}
+ /// getPtrEncoding - When a SourceLocation itself cannot be used, this returns
+ /// an (opaque) pointer encoding for it. This should only be passed
+ /// to SourceLocation::getFromPtrEncoding, it should not be inspected
+ /// directly.
+ void* getPtrEncoding() const {
+ // Double cast to avoid a warning "cast to pointer from integer of different
+ // size".
+ return (void*)(uintptr_t)getRawEncoding();
+ }
+
+ /// getFromPtrEncoding - Turn a pointer encoding of a SourceLocation object
+ /// into a real SourceLocation.
+ static SourceLocation getFromPtrEncoding(void *Encoding) {
+ return getFromRawEncoding((unsigned)(uintptr_t)Encoding);
+ }
+
void print(llvm::raw_ostream &OS, const SourceManager &SM) const;
void dump(const SourceManager &SM) const;
};
@@ -265,6 +281,20 @@ public:
bool isInSystemHeader() const;
+ /// \brief Determines the order of 2 source locations in the translation unit.
+ ///
+ /// \returns true if this source location comes before 'Loc', false otherwise.
+ bool isBeforeInTranslationUnitThan(SourceLocation Loc) const;
+
+ /// \brief Determines the order of 2 source locations in the translation unit.
+ ///
+ /// \returns true if this source location comes before 'Loc', false otherwise.
+ bool isBeforeInTranslationUnitThan(FullSourceLoc Loc) const {
+ assert(Loc.isValid());
+ assert(SrcMgr == Loc.SrcMgr && "Loc comes from another SourceManager!");
+ return isBeforeInTranslationUnitThan((SourceLocation)Loc);
+ }
+
/// Prints information about this FullSourceLoc to stderr. Useful for
/// debugging.
void dump() const { SourceLocation::dump(*SrcMgr); }
@@ -350,6 +380,19 @@ namespace llvm {
template <>
struct isPodLike<clang::FileID> { static const bool value = true; };
+ // Teach SmallPtrSet how to handle SourceLocation.
+ template<>
+ class PointerLikeTypeTraits<clang::SourceLocation> {
+ public:
+ static inline void *getAsVoidPointer(clang::SourceLocation L) {
+ return L.getPtrEncoding();
+ }
+ static inline clang::SourceLocation getFromVoidPointer(void *P) {
+ return clang::SourceLocation::getFromRawEncoding((unsigned)(uintptr_t)P);
+ }
+ enum { NumLowBitsAvailable = 0 };
+ };
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
index 7a66117..c0fbd08 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
@@ -16,7 +16,7 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/DenseMap.h"
@@ -369,7 +369,9 @@ public:
class SourceManager {
/// \brief Diagnostic object.
Diagnostic &Diag;
-
+
+ FileManager &FileMgr;
+
mutable llvm::BumpPtrAllocator ContentCacheAlloc;
/// FileInfos - Memoized information about all of the files tracked by this
@@ -427,15 +429,15 @@ class SourceManager {
explicit SourceManager(const SourceManager&);
void operator=(const SourceManager&);
public:
- SourceManager(Diagnostic &Diag)
- : Diag(Diag), ExternalSLocEntries(0), LineTable(0), NumLinearScans(0),
- NumBinaryProbes(0) {
- clearIDTables();
- }
+ SourceManager(Diagnostic &Diag, FileManager &FileMgr);
~SourceManager();
void clearIDTables();
+ Diagnostic &getDiagnostics() const { return Diag; }
+
+ FileManager &getFileManager() const { return FileMgr; }
+
//===--------------------------------------------------------------------===//
// MainFileID creation and querying methods.
//===--------------------------------------------------------------------===//
@@ -450,6 +452,13 @@ public:
return MainFileID;
}
+ /// \brief Set the file ID for the precompiled preamble, which is also the
+ /// main file.
+ void SetPreambleFileID(FileID Preamble) {
+ assert(MainFileID.isInvalid() && "MainFileID already set!");
+ MainFileID = Preamble;
+ }
+
//===--------------------------------------------------------------------===//
// Methods to create new FileID's and instantiations.
//===--------------------------------------------------------------------===//
@@ -464,7 +473,7 @@ public:
unsigned PreallocatedID = 0,
unsigned Offset = 0) {
const SrcMgr::ContentCache *IR = getOrCreateContentCache(SourceFile);
- if (IR == 0) return FileID(); // Error opening file?
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
return createFileID(IR, IncludePos, FileCharacter, PreallocatedID, Offset);
}
@@ -514,9 +523,7 @@ public:
///
/// \param DoNotFree If true, then the buffer will not be freed when the
/// source manager is destroyed.
- ///
- /// \returns true if an error occurred, false otherwise.
- bool overrideFileContents(const FileEntry *SourceFile,
+ void overrideFileContents(const FileEntry *SourceFile,
const llvm::MemoryBuffer *Buffer,
bool DoNotFree = false);
@@ -715,6 +722,11 @@ public:
///
/// Note that a presumed location is always given as the instantiation point
/// of an instantiation location, not at the spelling location.
+ ///
+ /// \returns The presumed location of the specified SourceLocation. If the
+ /// presumed location cannot be calculate (e.g., because \p Loc is invalid
+ /// or the file containing \p Loc has changed on disk), returns an invalid
+ /// presumed location.
PresumedLoc getPresumedLoc(SourceLocation Loc) const;
/// isFromSameFile - Returns true if both SourceLocations correspond to
@@ -771,7 +783,7 @@ public:
/// If the source file is included multiple times, the source location will
/// be based upon the first inclusion.
SourceLocation getLocation(const FileEntry *SourceFile,
- unsigned Line, unsigned Col) const;
+ unsigned Line, unsigned Col);
/// \brief Determines the order of 2 source locations in the translation unit.
///
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
index e757a2f..e6b6218 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
@@ -95,6 +95,23 @@ namespace clang {
VK_XValue
};
+ /// A further classification of the kind of object referenced by an
+ /// l-value or x-value.
+ enum ExprObjectKind {
+ /// An ordinary object is located at an address in memory.
+ OK_Ordinary,
+
+ /// A bitfield object is a bitfield on a C or C++ record.
+ OK_BitField,
+
+ /// A vector component is an element or range of elements on a vector.
+ OK_VectorComponent,
+
+ /// An Objective C property is a logical field of an Objective-C
+ /// object which is read and written via Objective C method calls.
+ OK_ObjCProperty
+ };
+
// \brief Describes the kind of template specialization that a
// particular template specialization declaration represents.
enum TemplateSpecializationKind {
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
index 4aa055e..be0d8ff 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
@@ -23,7 +23,7 @@ def ContinueStmt : Stmt;
def BreakStmt : Stmt;
def ReturnStmt : Stmt;
def DeclStmt : Stmt;
-def SwitchCase : Stmt;
+def SwitchCase : Stmt<1>;
def CaseStmt : DStmt<SwitchCase>;
def DefaultStmt : DStmt<SwitchCase>;
@@ -61,7 +61,9 @@ def MemberExpr : DStmt<Expr>;
def CastExpr : DStmt<Expr, 1>;
def BinaryOperator : DStmt<Expr>;
def CompoundAssignOperator : DStmt<BinaryOperator>;
-def ConditionalOperator : DStmt<Expr>;
+def AbstractConditionalOperator : DStmt<Expr, 1>;
+def ConditionalOperator : DStmt<AbstractConditionalOperator>;
+def BinaryConditionalOperator : DStmt<AbstractConditionalOperator>;
def ImplicitCastExpr : DStmt<CastExpr>;
def ExplicitCastExpr : DStmt<CastExpr, 1>;
def CStyleCastExpr : DStmt<ExplicitCastExpr>;
@@ -76,7 +78,6 @@ def VAArgExpr : DStmt<Expr>;
// GNU Extensions.
def AddrLabelExpr : DStmt<Expr>;
def StmtExpr : DStmt<Expr>;
-def TypesCompatibleExpr : DStmt<Expr>;
def ChooseExpr : DStmt<Expr>;
def GNUNullExpr : DStmt<Expr>;
@@ -100,16 +101,21 @@ def CXXNewExpr : DStmt<Expr>;
def CXXDeleteExpr : DStmt<Expr>;
def CXXPseudoDestructorExpr : DStmt<Expr>;
def UnaryTypeTraitExpr : DStmt<Expr>;
+def BinaryTypeTraitExpr : DStmt<Expr>;
def DependentScopeDeclRefExpr : DStmt<Expr>;
def CXXConstructExpr : DStmt<Expr>;
def CXXBindTemporaryExpr : DStmt<Expr>;
-def CXXExprWithTemporaries : DStmt<Expr>;
+def ExprWithCleanups : DStmt<Expr>;
def CXXTemporaryObjectExpr : DStmt<CXXConstructExpr>;
def CXXUnresolvedConstructExpr : DStmt<Expr>;
def CXXDependentScopeMemberExpr : DStmt<Expr>;
def OverloadExpr : DStmt<Expr, 1>;
def UnresolvedLookupExpr : DStmt<OverloadExpr>;
def UnresolvedMemberExpr : DStmt<OverloadExpr>;
+def CXXNoexceptExpr : DStmt<Expr>;
+def PackExpansionExpr : DStmt<Expr>;
+def SizeOfPackExpr : DStmt<Expr>;
+def SubstNonTypeTemplateParmPackExpr : DStmt<Expr>;
// Obj-C Expressions.
def ObjCStringLiteral : DStmt<Expr>;
@@ -119,11 +125,17 @@ def ObjCSelectorExpr : DStmt<Expr>;
def ObjCProtocolExpr : DStmt<Expr>;
def ObjCIvarRefExpr : DStmt<Expr>;
def ObjCPropertyRefExpr : DStmt<Expr>;
-def ObjCImplicitSetterGetterRefExpr : DStmt<Expr>;
-def ObjCSuperExpr : DStmt<Expr>;
def ObjCIsaExpr : DStmt<Expr>;
+// CUDA Expressions.
+def CUDAKernelCallExpr : DStmt<CallExpr>;
+
// Clang Extensions.
def ShuffleVectorExpr : DStmt<Expr>;
def BlockExpr : DStmt<Expr>;
def BlockDeclRefExpr : DStmt<Expr>;
+def OpaqueValueExpr : DStmt<Expr>;
+
+// Microsoft Extensions.
+def CXXUuidofExpr : DStmt<Expr>;
+
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
index 40df9ba..b9087f2 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
@@ -18,7 +18,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <vector>
#include <string>
@@ -64,6 +64,7 @@ protected:
bool TLSSupported;
bool NoAsmVariants; // True if {|} are normal characters.
unsigned char PointerWidth, PointerAlign;
+ unsigned char BoolWidth, BoolAlign;
unsigned char IntWidth, IntAlign;
unsigned char FloatWidth, FloatAlign;
unsigned char DoubleWidth, DoubleAlign;
@@ -73,6 +74,7 @@ protected:
unsigned char LongLongWidth, LongLongAlign;
const char *DescriptionString;
const char *UserLabelPrefix;
+ const char *MCountName;
const llvm::fltSemantics *FloatFormat, *DoubleFormat, *LongDoubleFormat;
unsigned char RegParmMax, SSERegParmMax;
TargetCXXABI CXXABI;
@@ -139,7 +141,7 @@ public:
IntType getSigAtomicType() const { return SigAtomicType; }
- /// getTypeWidth - Return the width (in bits) of the specified integer type
+ /// getTypeWidth - Return the width (in bits) of the specified integer type
/// enum. For example, SignedInt -> getIntWidth().
unsigned getTypeWidth(IntType T) const;
@@ -149,7 +151,7 @@ public:
/// isTypeSigned - Return whether an integer types is signed. Returns true if
/// the type is signed; false otherwise.
- bool isTypeSigned(IntType T) const;
+ static bool isTypeSigned(IntType T);
/// getPointerWidth - Return the width of pointers on this target, for the
/// specified address space.
@@ -162,8 +164,8 @@ public:
/// getBoolWidth/Align - Return the size of '_Bool' and C++ 'bool' for this
/// target, in bits.
- unsigned getBoolWidth(bool isWide = false) const { return 8; } // FIXME
- unsigned getBoolAlign(bool isWide = false) const { return 8; } // FIXME
+ unsigned getBoolWidth() const { return BoolWidth; }
+ unsigned getBoolAlign() const { return BoolAlign; }
unsigned getCharWidth() const { return 8; } // FIXME
unsigned getCharAlign() const { return 8; } // FIXME
@@ -240,6 +242,11 @@ public:
return UserLabelPrefix;
}
+ /// MCountName - This returns name of the mcount instrumentation function.
+ const char *getMCountName() const {
+ return MCountName;
+ }
+
bool useBitFieldTypeAlignment() const {
return UseBitFieldTypeAlignment;
}
@@ -306,7 +313,7 @@ public:
std::string Name; // Operand name: [foo] with no []'s.
public:
ConstraintInfo(llvm::StringRef ConstraintStr, llvm::StringRef Name)
- : Flags(0), TiedOperand(-1), ConstraintStr(ConstraintStr.str()),
+ : Flags(0), TiedOperand(-1), ConstraintStr(ConstraintStr.str()),
Name(Name.str()) {}
const std::string &getConstraintStr() const { return ConstraintStr; }
@@ -356,6 +363,9 @@ public:
unsigned NumOutputs, unsigned &Index) const;
virtual std::string convertConstraint(const char Constraint) const {
+ // 'p' defaults to 'r', but can be overridden by targets.
+ if (Constraint == 'p')
+ return std::string("r");
return std::string(1, Constraint);
}
@@ -391,7 +401,7 @@ public:
return "__OBJC,__cstring_object,regular,no_dead_strip";
}
- /// getNSStringNonFragileABISection - Return the section to use for
+ /// getNSStringNonFragileABISection - Return the section to use for
/// NSString literals, or 0 if no special section is used (NonFragile ABI).
virtual const char *getNSStringNonFragileABISection() const {
return "__DATA, __objc_stringobj, regular, no_dead_strip";
@@ -499,7 +509,7 @@ public:
bool isTLSSupported() const {
return TLSSupported;
}
-
+
/// hasNoAsmVariants - Return true if {|} are normal characters in the
/// asm string. If this returns false (the default), then {abc|xyz} is syntax
/// that says that when compiling for asm variant #0, "abc" should be
@@ -508,19 +518,18 @@ public:
bool hasNoAsmVariants() const {
return NoAsmVariants;
}
-
+
/// getEHDataRegisterNumber - Return the register number that
/// __builtin_eh_return_regno would return with the specified argument.
virtual int getEHDataRegisterNumber(unsigned RegNo) const {
- return -1;
+ return -1;
}
-
- /// getStaticInitSectionSpecifier - Return the section to use for C++ static
+
+ /// getStaticInitSectionSpecifier - Return the section to use for C++ static
/// initialization functions.
virtual const char *getStaticInitSectionSpecifier() const {
return 0;
}
-
protected:
virtual uint64_t getPointerWidthV(unsigned AddrSpace) const {
return PointerWidth;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
index dc360ad..b84b04d 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
@@ -103,6 +103,7 @@ TOK(comment) // Comment (only in -E -C[C] mode)
// C99 6.4.2: Identifiers.
TOK(identifier) // abcde123
+TOK(raw_identifier) // Used only in raw lexing mode.
// C99 6.4.4.1: Integer Constants
// C99 6.4.4.2: Floating Constants
@@ -175,6 +176,10 @@ PUNCTUATOR(coloncolon, "::")
// Objective C support.
PUNCTUATOR(at, "@")
+// CUDA support.
+PUNCTUATOR(lesslessless, "<<<")
+PUNCTUATOR(greatergreatergreater, ">>>")
+
// C99 6.4.1: Keywords. These turn into kw_* tokens.
// Flags allowed:
// KEYALL - This is a keyword in all variants of C and C++, or it
@@ -183,9 +188,12 @@ PUNCTUATOR(at, "@")
// KEYC99 - This is a keyword introduced to C in C99
// KEYCXX - This is a C++ keyword, or a C++-specific keyword in the
// implementation namespace
+// KEYNOCXX - This is a keyword in every non-C++ dialect.
// KEYCXX0X - This is a C++ keyword introduced to C++ in C++0x
// KEYGNU - This is a keyword if GNU extensions are enabled
// KEYMS - This is a keyword if Microsoft extensions are enabled
+// KEYOPENCL - This is a keyword in OpenCL
+// KEYALTIVEC - This is a keyword in AltiVec
// KEYBORLAND - This is a keyword if Borland extensions are enabled
//
KEYWORD(auto , KEYALL)
@@ -222,7 +230,7 @@ KEYWORD(unsigned , KEYALL)
KEYWORD(void , KEYALL)
KEYWORD(volatile , KEYALL)
KEYWORD(while , KEYALL)
-KEYWORD(_Bool , KEYNOMS)
+KEYWORD(_Bool , KEYNOCXX)
KEYWORD(_Complex , KEYALL)
KEYWORD(_Imaginary , KEYALL)
KEYWORD(__func__ , KEYALL)
@@ -278,6 +286,7 @@ KEYWORD(char16_t , KEYCXX0X)
KEYWORD(char32_t , KEYCXX0X)
KEYWORD(constexpr , KEYCXX0X)
KEYWORD(decltype , KEYCXX0X)
+KEYWORD(noexcept , KEYCXX0X)
KEYWORD(nullptr , KEYCXX0X)
KEYWORD(static_assert , KEYCXX0X)
KEYWORD(thread_local , KEYCXX0X)
@@ -316,6 +325,7 @@ KEYWORD(__has_virtual_destructor , KEYCXX)
KEYWORD(__is_abstract , KEYCXX)
KEYWORD(__is_base_of , KEYCXX)
KEYWORD(__is_class , KEYCXX)
+KEYWORD(__is_convertible_to , KEYCXX)
KEYWORD(__is_empty , KEYCXX)
KEYWORD(__is_enum , KEYCXX)
KEYWORD(__is_pod , KEYCXX)
@@ -323,7 +333,6 @@ KEYWORD(__is_polymorphic , KEYCXX)
KEYWORD(__is_union , KEYCXX)
// Tentative name - there's no implementation of std::is_literal_type yet.
KEYWORD(__is_literal , KEYCXX)
-// FIXME: Add MS's traits, too.
// Apple Extension.
KEYWORD(__private_extern__ , KEYALL)
@@ -336,7 +345,11 @@ KEYWORD(__fastcall , KEYALL)
KEYWORD(__thiscall , KEYALL)
KEYWORD(__forceinline , KEYALL)
-// Borland Extension.
+// OpenCL-specific keywords (see OpenCL 1.1 [6.1.9])
+KEYWORD(__kernel , KEYOPENCL)
+ALIAS("kernel", __kernel , KEYOPENCL)
+
+// Borland Extensions.
KEYWORD(__pascal , KEYALL)
// Altivec Extension.
@@ -356,6 +369,7 @@ ALIAS("__complex__" , _Complex , KEYALL)
ALIAS("__imag__" , __imag , KEYALL)
ALIAS("__inline" , inline , KEYALL)
ALIAS("__inline__" , inline , KEYALL)
+ALIAS("__nullptr" , nullptr , KEYCXX)
ALIAS("__real__" , __real , KEYALL)
ALIAS("__restrict" , restrict , KEYALL)
ALIAS("__restrict__" , restrict , KEYALL)
@@ -369,15 +383,23 @@ ALIAS("__volatile__" , volatile , KEYALL)
// Microsoft extensions which should be disabled in strict conformance mode
KEYWORD(__ptr64 , KEYMS)
KEYWORD(__w64 , KEYMS)
+KEYWORD(__uuidof , KEYMS | KEYBORLAND)
ALIAS("_asm" , asm , KEYMS)
-ALIAS("_cdecl" , __cdecl , KEYMS)
-ALIAS("_fastcall" , __fastcall , KEYMS)
-ALIAS("_stdcall" , __stdcall , KEYMS)
+ALIAS("_cdecl" , __cdecl , KEYMS | KEYBORLAND)
+ALIAS("_fastcall" , __fastcall , KEYMS | KEYBORLAND)
+ALIAS("_stdcall" , __stdcall , KEYMS | KEYBORLAND)
ALIAS("_thiscall" , __thiscall , KEYMS)
+ALIAS("_uuidof" , __uuidof , KEYMS | KEYBORLAND)
+ALIAS("_inline" , inline , KEYMS)
// Borland Extensions which should be disabled in strict conformance mode.
ALIAS("_pascal" , __pascal , KEYBORLAND)
+// Clang Extensions.
+ALIAS("__char16_t" , char16_t , KEYCXX)
+ALIAS("__char32_t" , char32_t , KEYCXX)
+
+
//===----------------------------------------------------------------------===//
// Objective-C @-preceeded keywords.
//===----------------------------------------------------------------------===//
@@ -421,6 +443,12 @@ ANNOTATION(typename) // annotation for a C typedef name, a C++ (possibly
ANNOTATION(template_id) // annotation for a C++ template-id that names a
// function template specialization (not a type),
// e.g., "std::swap<int>"
+
+// Annotation for #pragma unused(...)
+// For each argument inside the parentheses the pragma handler will produce
+// one 'pragma_unused' annotation token followed by the argument token.
+ANNOTATION(pragma_unused)
+
#undef ANNOTATION
#undef OBJC2_AT_KEYWORD
#undef OBJC1_AT_KEYWORD
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
index 85dc067..515390a 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
@@ -43,6 +43,12 @@ enum ObjCKeywordKind {
NUM_OBJC_KEYWORDS
};
+/// OnOffSwitch - This defines the possible values of an on-off-switch
+/// (C99 6.10.6p2).
+enum OnOffSwitch {
+ OOS_ON, OOS_OFF, OOS_DEFAULT
+};
+
/// \brief Determines the name of a token as used within the front end.
///
/// The name of a token will be an internal name (such as "l_square")
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
index 36b8300..00c6e9e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
@@ -36,6 +36,12 @@ namespace clang {
UTT_IsLiteral
};
+ /// BinaryTypeTrait - Names for the binary type traits.
+ enum BinaryTypeTrait {
+ BTT_IsBaseOf,
+ BTT_TypeCompatible,
+ BTT_IsConvertibleTo
+ };
}
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Version.h b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
index 9948677..ede68ed 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Version.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
@@ -44,7 +44,7 @@ namespace clang {
/// \brief Retrieves the repository path (e.g., Subversion path) that
/// identifies the particular Clang branch, tag, or trunk from which this
/// Clang was built.
- llvm::StringRef getClangRepositoryPath();
+ std::string getClangRepositoryPath();
/// \brief Retrieves the repository revision number (or identifer) from which
/// this Clang was built.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h b/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h
new file mode 100644
index 0000000..90e288a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h
@@ -0,0 +1,48 @@
+//===--- Visibility.h - Visibility enumeration and utilities ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Visibility enumeration and various utility
+// functions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_VISIBILITY_H
+#define LLVM_CLANG_BASIC_VISIBILITY_H
+
+namespace clang {
+
+/// \link Describes the different kinds of visibility that a
+/// declaration may have. Visibility determines how a declaration
+/// interacts with the dynamic linker. It may also affect whether the
+/// symbol can be found by runtime symbol lookup APIs.
+///
+/// Visibility is not described in any language standard and
+/// (nonetheless) sometimes has odd behavior. Not all platforms
+/// support all visibility kinds.
+enum Visibility {
+ /// Objects with "hidden" visibility are not seen by the dynamic
+ /// linker.
+ HiddenVisibility,
+
+ /// Objects with "protected" visibility are seen by the dynamic
+ /// linker but always dynamically resolve to an object within this
+ /// shared object.
+ ProtectedVisibility,
+
+ /// Objects with "default" visibility are seen by the dynamic linker
+ /// and act like normal objects.
+ DefaultVisibility
+};
+
+inline Visibility minVisibility(Visibility L, Visibility R) {
+ return L < R ? L : R;
+}
+
+}
+
+#endif // LLVM_CLANG_BASIC_VISIBILITY_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
index fa6ebb7..880a0da 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
@@ -16,13 +16,34 @@ class Op;
def OP_NONE : Op;
def OP_ADD : Op;
+def OP_ADDL : Op;
+def OP_ADDW : Op;
def OP_SUB : Op;
+def OP_SUBL : Op;
+def OP_SUBW : Op;
def OP_MUL : Op;
+def OP_MULL : Op;
def OP_MLA : Op;
+def OP_MLAL : Op;
def OP_MLS : Op;
+def OP_MLSL : Op;
def OP_MUL_N : Op;
+def OP_MULL_N: Op;
def OP_MLA_N : Op;
def OP_MLS_N : Op;
+def OP_MLAL_N : Op;
+def OP_MLSL_N : Op;
+def OP_MUL_LN: Op;
+def OP_MULL_LN : Op;
+def OP_MLA_LN: Op;
+def OP_MLS_LN: Op;
+def OP_MLAL_LN : Op;
+def OP_MLSL_LN : Op;
+def OP_QDMULL_LN : Op;
+def OP_QDMLAL_LN : Op;
+def OP_QDMLSL_LN : Op;
+def OP_QDMULH_LN : Op;
+def OP_QRDMULH_LN : Op;
def OP_EQ : Op;
def OP_GE : Op;
def OP_LE : Op;
@@ -40,22 +61,31 @@ def OP_HI : Op;
def OP_LO : Op;
def OP_CONC : Op;
def OP_DUP : Op;
+def OP_DUP_LN: Op;
def OP_SEL : Op;
def OP_REV64 : Op;
def OP_REV32 : Op;
def OP_REV16 : Op;
+def OP_REINT : Op;
+def OP_ABDL : Op;
+def OP_ABA : Op;
+def OP_ABAL : Op;
-class Inst <string p, string t, Op o> {
+class Inst <string n, string p, string t, Op o> {
+ string Name = n;
string Prototype = p;
string Types = t;
Op Operand = o;
bit isShift = 0;
}
-// Used to generate Builtins.def
-class SInst<string p, string t> : Inst<p, t, OP_NONE> {}
-class IInst<string p, string t> : Inst<p, t, OP_NONE> {}
-class WInst<string p, string t> : Inst<p, t, OP_NONE> {}
+// Used to generate Builtins.def:
+// SInst: Instruction with signed/unsigned suffix (e.g., "s8", "u8", "p8")
+// IInst: Instruction with generic integer suffix (e.g., "i8")
+// WInst: Instruction with only bit size suffix (e.g., "8")
+class SInst<string n, string p, string t> : Inst<n, p, t, OP_NONE> {}
+class IInst<string n, string p, string t> : Inst<n, p, t, OP_NONE> {}
+class WInst<string n, string p, string t> : Inst<n, p, t, OP_NONE> {}
// prototype: return (arg, arg, ...)
// v: void
@@ -93,251 +123,273 @@ class WInst<string p, string t> : Inst<p, t, OP_NONE> {}
////////////////////////////////////////////////////////////////////////////////
// E.3.1 Addition
-def VADD : Inst<"ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>;
-def VADDL : SInst<"wdd", "csiUcUsUi">;
-def VADDW : SInst<"wwd", "csiUcUsUi">;
-def VHADD : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
-def VRHADD : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
-def VQADD : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VADDHN : IInst<"dww", "csiUcUsUi">;
-def VRADDHN : IInst<"dww", "csiUcUsUi">;
+def VADD : Inst<"vadd", "ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>;
+def VADDL : Inst<"vaddl", "wdd", "csiUcUsUi", OP_ADDL>;
+def VADDW : Inst<"vaddw", "wwd", "csiUcUsUi", OP_ADDW>;
+def VHADD : SInst<"vhadd", "ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VRHADD : SInst<"vrhadd", "ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VQADD : SInst<"vqadd", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VADDHN : IInst<"vaddhn", "hkk", "silUsUiUl">;
+def VRADDHN : IInst<"vraddhn", "hkk", "silUsUiUl">;
////////////////////////////////////////////////////////////////////////////////
// E.3.2 Multiplication
-def VMUL : Inst<"ddd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_MUL>;
-def VMLA : Inst<"dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>;
-def VMLAL : SInst<"wwdd", "csiUcUsUi">;
-def VMLS : Inst<"dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>;
-def VMLSL : SInst<"wwdd", "csiUcUsUi">;
-def VQDMULH : SInst<"ddd", "siQsQi">;
-def VQRDMULH : SInst<"ddd", "siQsQi">;
-def VQDMLAL : SInst<"wwdd", "si">;
-def VQDMLSL : SInst<"wwdd", "si">;
-def VMULL : SInst<"wdd", "csiUcUsUiPc">;
-def VQDMULL : SInst<"wdd", "si">;
+def VMUL : Inst<"vmul", "ddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MUL>;
+def VMULP : SInst<"vmul", "ddd", "PcQPc">;
+def VMLA : Inst<"vmla", "dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>;
+def VMLAL : Inst<"vmlal", "wwdd", "csiUcUsUi", OP_MLAL>;
+def VMLS : Inst<"vmls", "dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>;
+def VMLSL : Inst<"vmlsl", "wwdd", "csiUcUsUi", OP_MLSL>;
+def VQDMULH : SInst<"vqdmulh", "ddd", "siQsQi">;
+def VQRDMULH : SInst<"vqrdmulh", "ddd", "siQsQi">;
+def VQDMLAL : SInst<"vqdmlal", "wwdd", "si">;
+def VQDMLSL : SInst<"vqdmlsl", "wwdd", "si">;
+def VMULL : Inst<"vmull", "wdd", "csiUcUsUi", OP_MULL>;
+def VMULLP : SInst<"vmull", "wdd", "Pc">;
+def VQDMULL : SInst<"vqdmull", "wdd", "si">;
////////////////////////////////////////////////////////////////////////////////
// E.3.3 Subtraction
-def VSUB : Inst<"ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>;
-def VSUBL : SInst<"wdd", "csiUcUsUi">;
-def VSUBW : SInst<"wwd", "csiUcUsUi">;
-def VQSUB : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VHSUB : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
-def VSUBHN : IInst<"dww", "csiUcUsUi">;
-def VRSUBHN : IInst<"dww", "csiUcUsUi">;
+def VSUB : Inst<"vsub", "ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>;
+def VSUBL : Inst<"vsubl", "wdd", "csiUcUsUi", OP_SUBL>;
+def VSUBW : Inst<"vsubw", "wwd", "csiUcUsUi", OP_SUBW>;
+def VQSUB : SInst<"vqsub", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VHSUB : SInst<"vhsub", "ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VSUBHN : IInst<"vsubhn", "hkk", "silUsUiUl">;
+def VRSUBHN : IInst<"vrsubhn", "hkk", "silUsUiUl">;
////////////////////////////////////////////////////////////////////////////////
// E.3.4 Comparison
-def VCEQ : Inst<"udd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>;
-def VCGE : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>;
-def VCLE : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>;
-def VCGT : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>;
-def VCLT : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>;
-def VCAGE : IInst<"udd", "fQf">;
-def VCALE : IInst<"udd", "fQf">;
-def VCAGT : IInst<"udd", "fQf">;
-def VCALT : IInst<"udd", "fQf">;
-def VTST : WInst<"udd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc">;
+def VCEQ : Inst<"vceq", "udd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>;
+def VCGE : Inst<"vcge", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>;
+def VCLE : Inst<"vcle", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>;
+def VCGT : Inst<"vcgt", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>;
+def VCLT : Inst<"vclt", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>;
+def VCAGE : IInst<"vcage", "udd", "fQf">;
+def VCALE : IInst<"vcale", "udd", "fQf">;
+def VCAGT : IInst<"vcagt", "udd", "fQf">;
+def VCALT : IInst<"vcalt", "udd", "fQf">;
+def VTST : WInst<"vtst", "udd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc">;
////////////////////////////////////////////////////////////////////////////////
// E.3.5 Absolute Difference
-def VABD : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
-def VABDL : SInst<"wdd", "csiUcUsUi">;
-def VABA : SInst<"dddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
-def VABAL : SInst<"wwdd", "csiUcUsUi">;
+def VABD : SInst<"vabd", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+def VABDL : Inst<"vabdl", "wdd", "csiUcUsUi", OP_ABDL>;
+def VABA : Inst<"vaba", "dddd", "csiUcUsUiQcQsQiQUcQUsQUi", OP_ABA>;
+def VABAL : Inst<"vabal", "wwdd", "csiUcUsUi", OP_ABAL>;
////////////////////////////////////////////////////////////////////////////////
// E.3.6 Max/Min
-def VMAX : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
-def VMIN : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+def VMAX : SInst<"vmax", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+def VMIN : SInst<"vmin", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
////////////////////////////////////////////////////////////////////////////////
// E.3.7 Pairdise Addition
-def VPADD : IInst<"ddd", "csiUcUsUif">;
-def VPADDL : SInst<"nd", "csiUcUsUiQcQsQiQUcQUsQUi">;
-def VPADAL : SInst<"nnd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VPADD : IInst<"vpadd", "ddd", "csiUcUsUif">;
+def VPADDL : SInst<"vpaddl", "nd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VPADAL : SInst<"vpadal", "nnd", "csiUcUsUiQcQsQiQUcQUsQUi">;
////////////////////////////////////////////////////////////////////////////////
// E.3.8-9 Folding Max/Min
-def VPMAX : SInst<"ddd", "csiUcUsUif">;
-def VPMIN : SInst<"ddd", "csiUcUsUif">;
+def VPMAX : SInst<"vpmax", "ddd", "csiUcUsUif">;
+def VPMIN : SInst<"vpmin", "ddd", "csiUcUsUif">;
////////////////////////////////////////////////////////////////////////////////
// E.3.10 Reciprocal/Sqrt
-def VRECPS : IInst<"ddd", "fQf">;
-def VRSQRTS : IInst<"ddd", "fQf">;
+def VRECPS : IInst<"vrecps", "ddd", "fQf">;
+def VRSQRTS : IInst<"vrsqrts", "ddd", "fQf">;
////////////////////////////////////////////////////////////////////////////////
// E.3.11 Shifts by signed variable
-def VSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VQSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VRSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VQRSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VSHL : SInst<"vshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHL : SInst<"vqshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSHL : SInst<"vrshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQRSHL : SInst<"vqrshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
////////////////////////////////////////////////////////////////////////////////
// E.3.12 Shifts by constant
let isShift = 1 in {
-def VSHR_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VSHL_N : IInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VRSHR_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VSRA_N : SInst<"dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VRSRA_N : SInst<"dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VQSHL_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
-def VQSHLU_N : SInst<"udi", "csilQcQsQiQl">;
-def VSHRN_N : IInst<"hki", "silUsUiUl">;
-def VQSHRUN_N : SInst<"eki", "sil">;
-def VQRSHRUN_N : SInst<"eki", "sil">;
-def VQSHRN_N : SInst<"hki", "silUsUiUl">;
-def VRSHRN_N : IInst<"hki", "silUsUiUl">;
-def VQRSHRN_N : SInst<"hki", "silUsUiUl">;
-def VSHLL_N : SInst<"wdi", "csiUcUsUi">;
+def VSHR_N : SInst<"vshr_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VSHL_N : IInst<"vshl_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSHR_N : SInst<"vrshr_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VSRA_N : SInst<"vsra_n", "dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSRA_N : SInst<"vrsra_n", "dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHL_N : SInst<"vqshl_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHLU_N : SInst<"vqshlu_n", "udi", "csilQcQsQiQl">;
+def VSHRN_N : IInst<"vshrn_n", "hki", "silUsUiUl">;
+def VQSHRUN_N : SInst<"vqshrun_n", "eki", "sil">;
+def VQRSHRUN_N : SInst<"vqrshrun_n", "eki", "sil">;
+def VQSHRN_N : SInst<"vqshrn_n", "hki", "silUsUiUl">;
+def VRSHRN_N : IInst<"vrshrn_n", "hki", "silUsUiUl">;
+def VQRSHRN_N : SInst<"vqrshrn_n", "hki", "silUsUiUl">;
+def VSHLL_N : SInst<"vshll_n", "wdi", "csiUcUsUi">;
////////////////////////////////////////////////////////////////////////////////
// E.3.13 Shifts with insert
-def VSRI_N : WInst<"dddi", "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
-def VSLI_N : WInst<"dddi", "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
+def VSRI_N : WInst<"vsri_n", "dddi",
+ "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
+def VSLI_N : WInst<"vsli_n", "dddi",
+ "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
}
////////////////////////////////////////////////////////////////////////////////
// E.3.14 Loads and stores of a single vector
-def VLD1 : WInst<"dc", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VLD1_LANE : WInst<"dcdi", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VLD1_DUP : WInst<"dc", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VST1 : WInst<"vpd", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VST1_LANE : WInst<"vpdi", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD1 : WInst<"vld1", "dc",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD1_LANE : WInst<"vld1_lane", "dcdi",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD1_DUP : WInst<"vld1_dup", "dc",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST1 : WInst<"vst1", "vpd",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST1_LANE : WInst<"vst1_lane", "vpdi",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
////////////////////////////////////////////////////////////////////////////////
// E.3.15 Loads and stores of an N-element structure
-def VLD2 : WInst<"2c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VLD3 : WInst<"3c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VLD4 : WInst<"4c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VLD2_DUP : WInst<"2c", "UcUsUiUlcsilhfPcPs">;
-def VLD3_DUP : WInst<"3c", "UcUsUiUlcsilhfPcPs">;
-def VLD4_DUP : WInst<"4c", "UcUsUiUlcsilhfPcPs">;
-def VLD2_LANE : WInst<"2c2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
-def VLD3_LANE : WInst<"3c3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
-def VLD4_LANE : WInst<"4c4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
-def VST2 : WInst<"vp2", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VST3 : WInst<"vp3", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VST4 : WInst<"vp4", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
-def VST2_LANE : WInst<"vp2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
-def VST3_LANE : WInst<"vp3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
-def VST4_LANE : WInst<"vp4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VLD2 : WInst<"vld2", "2c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD3 : WInst<"vld3", "3c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD4 : WInst<"vld4", "4c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD2_DUP : WInst<"vld2_dup", "2c", "UcUsUiUlcsilhfPcPs">;
+def VLD3_DUP : WInst<"vld3_dup", "3c", "UcUsUiUlcsilhfPcPs">;
+def VLD4_DUP : WInst<"vld4_dup", "4c", "UcUsUiUlcsilhfPcPs">;
+def VLD2_LANE : WInst<"vld2_lane", "2c2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VLD3_LANE : WInst<"vld3_lane", "3c3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VLD4_LANE : WInst<"vld4_lane", "4c4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST2 : WInst<"vst2", "vp2", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST3 : WInst<"vst3", "vp3", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST4 : WInst<"vst4", "vp4", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST2_LANE : WInst<"vst2_lane", "vp2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST3_LANE : WInst<"vst3_lane", "vp3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST4_LANE : WInst<"vst4_lane", "vp4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
////////////////////////////////////////////////////////////////////////////////
// E.3.16 Extract lanes from a vector
-def VGET_LANE : IInst<"sdi", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
+def VGET_LANE : IInst<"vget_lane", "sdi",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
////////////////////////////////////////////////////////////////////////////////
// E.3.17 Set lanes within a vector
-def VSET_LANE : IInst<"dsdi", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
+def VSET_LANE : IInst<"vset_lane", "dsdi",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
////////////////////////////////////////////////////////////////////////////////
// E.3.18 Initialize a vector from bit pattern
-def VCREATE: Inst<"dl", "csihfUcUsUiUlPcPsl", OP_CAST>;
+def VCREATE: Inst<"vcreate", "dl", "csihfUcUsUiUlPcPsl", OP_CAST>;
////////////////////////////////////////////////////////////////////////////////
// E.3.19 Set all lanes to same value
-def VDUP_N : Inst<"ds", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
-def VMOV_N : Inst<"ds", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
-def VDUP_LANE : WInst<"dgi", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
+def VDUP_N : Inst<"vdup_n", "ds",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
+def VMOV_N : Inst<"vmov_n", "ds",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
+def VDUP_LANE : Inst<"vdup_lane", "dgi",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl",OP_DUP_LN>;
////////////////////////////////////////////////////////////////////////////////
// E.3.20 Combining vectors
-def VCOMBINE : Inst<"kdd", "csilhfUcUsUiUlPcPs", OP_CONC>;
+def VCOMBINE : Inst<"vcombine", "kdd", "csilhfUcUsUiUlPcPs", OP_CONC>;
////////////////////////////////////////////////////////////////////////////////
// E.3.21 Splitting vectors
-def VGET_HIGH : Inst<"dk", "csilhfUcUsUiUlPcPs", OP_HI>;
-def VGET_LOW : Inst<"dk", "csilhfUcUsUiUlPcPs", OP_LO>;
+def VGET_HIGH : Inst<"vget_high", "dk", "csilhfUcUsUiUlPcPs", OP_HI>;
+def VGET_LOW : Inst<"vget_low", "dk", "csilhfUcUsUiUlPcPs", OP_LO>;
////////////////////////////////////////////////////////////////////////////////
// E.3.22 Converting vectors
-def VCVT_S32 : SInst<"xd", "fQf">;
-def VCVT_U32 : SInst<"ud", "fQf">;
-def VCVT_F16 : SInst<"hk", "f">;
-def VCVT_N_S32 : SInst<"xdi", "fQf">;
-def VCVT_N_U32 : SInst<"udi", "fQf">;
-def VCVT_F32 : SInst<"fd", "iUiQiQUi">;
-def VCVT_F32_F16 : SInst<"kh", "f">;
-def VCVT_N_F32 : SInst<"fdi", "iUiQiQUi">;
-def VMOVN : IInst<"hk", "silUsUiUl">;
-def VMOVL : SInst<"wd", "csiUcUsUi">;
-def VQMOVN : SInst<"hk", "silUsUiUl">;
-def VQMOVUN : SInst<"ek", "sil">;
+def VCVT_S32 : SInst<"vcvt_s32", "xd", "fQf">;
+def VCVT_U32 : SInst<"vcvt_u32", "ud", "fQf">;
+def VCVT_F16 : SInst<"vcvt_f16", "hk", "f">;
+def VCVT_N_S32 : SInst<"vcvt_n_s32", "xdi", "fQf">;
+def VCVT_N_U32 : SInst<"vcvt_n_u32", "udi", "fQf">;
+def VCVT_F32 : SInst<"vcvt_f32", "fd", "iUiQiQUi">;
+def VCVT_F32_F16 : SInst<"vcvt_f32_f16", "fd", "h">;
+def VCVT_N_F32 : SInst<"vcvt_n_f32", "fdi", "iUiQiQUi">;
+def VMOVN : IInst<"vmovn", "hk", "silUsUiUl">;
+def VMOVL : SInst<"vmovl", "wd", "csiUcUsUi">;
+def VQMOVN : SInst<"vqmovn", "hk", "silUsUiUl">;
+def VQMOVUN : SInst<"vqmovun", "ek", "sil">;
////////////////////////////////////////////////////////////////////////////////
// E.3.23-24 Table lookup, Extended table lookup
-def VTBL1 : WInst<"ddt", "UccPc">;
-def VTBL2 : WInst<"d2t", "UccPc">;
-def VTBL3 : WInst<"d3t", "UccPc">;
-def VTBL4 : WInst<"d4t", "UccPc">;
-def VTBX1 : WInst<"dddt", "UccPc">;
-def VTBX2 : WInst<"dd2t", "UccPc">;
-def VTBX3 : WInst<"dd3t", "UccPc">;
-def VTBX4 : WInst<"dd4t", "UccPc">;
+def VTBL1 : WInst<"vtbl1", "ddt", "UccPc">;
+def VTBL2 : WInst<"vtbl2", "d2t", "UccPc">;
+def VTBL3 : WInst<"vtbl3", "d3t", "UccPc">;
+def VTBL4 : WInst<"vtbl4", "d4t", "UccPc">;
+def VTBX1 : WInst<"vtbx1", "dddt", "UccPc">;
+def VTBX2 : WInst<"vtbx2", "dd2t", "UccPc">;
+def VTBX3 : WInst<"vtbx3", "dd3t", "UccPc">;
+def VTBX4 : WInst<"vtbx4", "dd4t", "UccPc">;
////////////////////////////////////////////////////////////////////////////////
// E.3.25 Operations with a scalar value
-def VMLA_LANE : IInst<"ddddi", "siUsUifQsQiQUsQUiQf">;
-def VMLAL_LANE : SInst<"wwddi", "siUsUi">;
-def VQDMLAL_LANE : SInst<"wwddi", "si">;
-def VMLS_LANE : IInst<"ddddi", "siUsUifQsQiQUsQUiQf">;
-def VMLSL_LANE : SInst<"wwddi", "siUsUi">;
-def VQDMLSL_LANE : SInst<"wwddi", "si">;
-def VMUL_N : Inst<"dds", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>;
-def VMULL_N : SInst<"wda", "siUsUi">;
-def VMULL_LANE : SInst<"wddi", "siUsUi">;
-def VQDMULL_N : SInst<"wda", "si">;
-def VQDMULL_LANE : SInst<"wddi", "si">;
-def VQDMULH_N : SInst<"dda", "siQsQi">;
-def VQDMULH_LANE : SInst<"dddi", "siQsQi">;
-def VQRDMULH_N : SInst<"dda", "siQsQi">;
-def VQRDMULH_LANE : SInst<"dddi", "siQsQi">;
-def VMLA_N : Inst<"ddda", "siUsUifQsQiQUsQUiQf", OP_MLA_N>;
-def VMLAL_N : SInst<"wwda", "siUsUi">;
-def VQDMLAL_N : SInst<"wwda", "si">;
-def VMLS_N : Inst<"ddds", "siUsUifQsQiQUsQUiQf", OP_MLS_N>;
-def VMLSL_N : SInst<"wwda", "siUsUi">;
-def VQDMLSL_N : SInst<"wwda", "si">;
+def VMLA_LANE : Inst<"vmla_lane", "dddgi", "siUsUifQsQiQUsQUiQf", OP_MLA_LN>;
+def VMLAL_LANE : Inst<"vmlal_lane", "wwddi", "siUsUi", OP_MLAL_LN>;
+def VQDMLAL_LANE : Inst<"vqdmlal_lane", "wwddi", "si", OP_QDMLAL_LN>;
+def VMLS_LANE : Inst<"vmls_lane", "dddgi", "siUsUifQsQiQUsQUiQf", OP_MLS_LN>;
+def VMLSL_LANE : Inst<"vmlsl_lane", "wwddi", "siUsUi", OP_MLSL_LN>;
+def VQDMLSL_LANE : Inst<"vqdmlsl_lane", "wwddi", "si", OP_QDMLSL_LN>;
+def VMUL_N : Inst<"vmul_n", "dds", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>;
+def VMUL_LANE : Inst<"vmul_lane", "ddgi", "sifUsUiQsQiQfQUsQUi", OP_MUL_LN>;
+def VMULL_N : Inst<"vmull_n", "wda", "siUsUi", OP_MULL_N>;
+def VMULL_LANE : Inst<"vmull_lane", "wddi", "siUsUi", OP_MULL_LN>;
+def VQDMULL_N : SInst<"vqdmull_n", "wda", "si">;
+def VQDMULL_LANE : Inst<"vqdmull_lane", "wddi", "si", OP_QDMULL_LN>;
+def VQDMULH_N : SInst<"vqdmulh_n", "dda", "siQsQi">;
+def VQDMULH_LANE : Inst<"vqdmulh_lane", "ddgi", "siQsQi", OP_QDMULH_LN>;
+def VQRDMULH_N : SInst<"vqrdmulh_n", "dda", "siQsQi">;
+def VQRDMULH_LANE : Inst<"vqrdmulh_lane", "ddgi", "siQsQi", OP_QRDMULH_LN>;
+def VMLA_N : Inst<"vmla_n", "ddda", "siUsUifQsQiQUsQUiQf", OP_MLA_N>;
+def VMLAL_N : Inst<"vmlal_n", "wwda", "siUsUi", OP_MLAL_N>;
+def VQDMLAL_N : SInst<"vqdmlal_n", "wwda", "si">;
+def VMLS_N : Inst<"vmls_n", "ddds", "siUsUifQsQiQUsQUiQf", OP_MLS_N>;
+def VMLSL_N : Inst<"vmlsl_n", "wwda", "siUsUi", OP_MLSL_N>;
+def VQDMLSL_N : SInst<"vqdmlsl_n", "wwda", "si">;
////////////////////////////////////////////////////////////////////////////////
// E.3.26 Vector Extract
-def VEXT : WInst<"dddi", "cUcPcsUsPsiUilUlQcQUcQPcQsQUsQPsQiQUiQlQUl">;
+def VEXT : WInst<"vext", "dddi",
+ "cUcPcsUsPsiUilUlfQcQUcQPcQsQUsQPsQiQUiQlQUlQf">;
////////////////////////////////////////////////////////////////////////////////
// E.3.27 Reverse vector elements (sdap endianness)
-def VREV64 : Inst<"dd", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf", OP_REV64>;
-def VREV32 : Inst<"dd", "csUcUsPcQcQsQUcQUsQPc", OP_REV32>;
-def VREV16 : Inst<"dd", "cUcPcQcQUcQPc", OP_REV16>;
+def VREV64 : Inst<"vrev64", "dd", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf",
+ OP_REV64>;
+def VREV32 : Inst<"vrev32", "dd", "csUcUsPcPsQcQsQUcQUsQPcQPs", OP_REV32>;
+def VREV16 : Inst<"vrev16", "dd", "cUcPcQcQUcQPc", OP_REV16>;
////////////////////////////////////////////////////////////////////////////////
// E.3.28 Other single operand arithmetic
-def VABS : SInst<"dd", "csifQcQsQiQf">;
-def VQABS : SInst<"dd", "csiQcQsQi">;
-def VNEG : Inst<"dd", "csifQcQsQiQf", OP_NEG>;
-def VQNEG : SInst<"dd", "csiQcQsQi">;
-def VCLS : SInst<"dd", "csiQcQsQi">;
-def VCLZ : IInst<"dd", "csiUcUsUiQcQsQiQUcQUsQUi">;
-def VCNT : WInst<"dd", "UccPcQUcQcQPc">;
-def VRECPE : SInst<"dd", "fUiQfQUi">;
-def VRSQRTE : SInst<"dd", "fUiQfQUi">;
+def VABS : SInst<"vabs", "dd", "csifQcQsQiQf">;
+def VQABS : SInst<"vqabs", "dd", "csiQcQsQi">;
+def VNEG : Inst<"vneg", "dd", "csifQcQsQiQf", OP_NEG>;
+def VQNEG : SInst<"vqneg", "dd", "csiQcQsQi">;
+def VCLS : SInst<"vcls", "dd", "csiQcQsQi">;
+def VCLZ : IInst<"vclz", "dd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VCNT : WInst<"vcnt", "dd", "UccPcQUcQcQPc">;
+def VRECPE : SInst<"vrecpe", "dd", "fUiQfQUi">;
+def VRSQRTE : SInst<"vrsqrte", "dd", "fUiQfQUi">;
////////////////////////////////////////////////////////////////////////////////
// E.3.29 Logical operations
-def VMVN : Inst<"dd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>;
-def VAND : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>;
-def VORR : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>;
-def VEOR : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>;
-def VBIC : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>;
-def VORN : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>;
-def VBSL : Inst<"dudd", "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs", OP_SEL>;
+def VMVN : Inst<"vmvn", "dd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>;
+def VAND : Inst<"vand", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>;
+def VORR : Inst<"vorr", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>;
+def VEOR : Inst<"veor", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>;
+def VBIC : Inst<"vbic", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>;
+def VORN : Inst<"vorn", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>;
+def VBSL : Inst<"vbsl", "dudd",
+ "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs", OP_SEL>;
////////////////////////////////////////////////////////////////////////////////
// E.3.30 Transposition operations
-def VTRN: WInst<"2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
-def VZIP: WInst<"2dd", "csUcUsfPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
-def VUZP: WInst<"2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+def VTRN : WInst<"vtrn", "2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+def VZIP : WInst<"vzip", "2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+def VUZP : WInst<"vuzp", "2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
////////////////////////////////////////////////////////////////////////////////
// E.3.31 Vector reinterpret cast operations
+def VREINTERPRET
+ : Inst<"vreinterpret", "dd",
+ "csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs", OP_REINT>;
+
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/Checkers/LocalCheckers.h b/contrib/llvm/tools/clang/include/clang/Checker/Checkers/LocalCheckers.h
deleted file mode 100644
index 4a9e381..0000000
--- a/contrib/llvm/tools/clang/include/clang/Checker/Checkers/LocalCheckers.h
+++ /dev/null
@@ -1,61 +0,0 @@
-//==- LocalCheckers.h - Intra-Procedural+Flow-Sensitive Checkers -*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interface to call a set of intra-procedural (local)
-// checkers that use flow/path-sensitive analyses to find bugs.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_ANALYSIS_LOCALCHECKERS_H
-#define LLVM_CLANG_ANALYSIS_LOCALCHECKERS_H
-
-namespace clang {
-
-class CFG;
-class Decl;
-class Diagnostic;
-class ASTContext;
-class PathDiagnosticClient;
-class GRTransferFuncs;
-class BugType;
-class LangOptions;
-class ParentMap;
-class LiveVariables;
-class BugReporter;
-class ObjCImplementationDecl;
-class LangOptions;
-class GRExprEngine;
-class TranslationUnitDecl;
-
-void CheckDeadStores(CFG &cfg, LiveVariables &L, ParentMap &map,
- BugReporter& BR);
-
-GRTransferFuncs* MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
- const LangOptions& lopts);
-
-void CheckObjCDealloc(const ObjCImplementationDecl* D, const LangOptions& L,
- BugReporter& BR);
-
-void CheckObjCInstMethSignature(const ObjCImplementationDecl *ID,
- BugReporter& BR);
-
-void CheckObjCUnusedIvar(const ObjCImplementationDecl *D, BugReporter& BR);
-
-void RegisterAppleChecks(GRExprEngine& Eng, const Decl &D);
-void RegisterExperimentalChecks(GRExprEngine &Eng);
-void RegisterExperimentalInternalChecks(GRExprEngine &Eng);
-
-void CheckLLVMConventions(TranslationUnitDecl &TU, BugReporter &BR);
-void CheckSecuritySyntaxOnly(const Decl *D, BugReporter &BR);
-void CheckSizeofPointer(const Decl *D, BugReporter &BR);
-
-void RegisterCallInliner(GRExprEngine &Eng);
-} // end namespace clang
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/ManagerRegistry.h b/contrib/llvm/tools/clang/include/clang/Checker/ManagerRegistry.h
deleted file mode 100644
index ebfd28e..0000000
--- a/contrib/llvm/tools/clang/include/clang/Checker/ManagerRegistry.h
+++ /dev/null
@@ -1,53 +0,0 @@
-//===-- ManagerRegistry.h - Pluggable analyzer module registry --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the ManagerRegistry and Register* classes.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_ANALYSIS_MANAGER_REGISTRY_H
-#define LLVM_CLANG_ANALYSIS_MANAGER_REGISTRY_H
-
-#include "clang/Checker/PathSensitive/GRState.h"
-
-namespace clang {
-
-/// ManagerRegistry - This class records manager creators registered at
-/// runtime. The information is communicated to AnalysisManager through static
-/// members. Better design is expected.
-
-class ManagerRegistry {
-public:
- static StoreManagerCreator StoreMgrCreator;
- static ConstraintManagerCreator ConstraintMgrCreator;
-};
-
-/// RegisterConstraintManager - This class is used to setup the constraint
-/// manager of the static analyzer. The constructor takes a creator function
-/// pointer for creating the constraint manager.
-///
-/// It is used like this:
-///
-/// class MyConstraintManager {};
-/// ConstraintManager* CreateMyConstraintManager(GRStateManager& statemgr) {
-/// return new MyConstraintManager(statemgr);
-/// }
-/// RegisterConstraintManager X(CreateMyConstraintManager);
-
-class RegisterConstraintManager {
-public:
- RegisterConstraintManager(ConstraintManagerCreator CMC) {
- assert(ManagerRegistry::ConstraintMgrCreator == 0
- && "ConstraintMgrCreator already set!");
- ManagerRegistry::ConstraintMgrCreator = CMC;
- }
-};
-
-}
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRAuditor.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRAuditor.h
deleted file mode 100644
index 015c82e..0000000
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRAuditor.h
+++ /dev/null
@@ -1,35 +0,0 @@
-//==- GRAuditor.h - Observers of the creation of ExplodedNodes------*- C++ -*-//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines GRAuditor and its primary subclasses, an interface
-// to audit the creation of ExplodedNodes. This interface can be used
-// to implement simple checkers that do not mutate analysis state but
-// instead operate by perfoming simple logical checks at key monitoring
-// locations (e.g., function calls).
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_ANALYSIS_GRAUDITOR
-#define LLVM_CLANG_ANALYSIS_GRAUDITOR
-
-namespace clang {
-
-class ExplodedNode;
-class GRStateManager;
-
-class GRAuditor {
-public:
- virtual ~GRAuditor() {}
- virtual bool Audit(ExplodedNode* N, GRStateManager& M) = 0;
-};
-
-
-} // end clang namespace
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSimpleAPICheck.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSimpleAPICheck.h
deleted file mode 100644
index 6d85e5f..0000000
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSimpleAPICheck.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// GRCheckAPI.h - Simple API checks based on GRAuditor ------------*- C++ -*--//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interface for building simple, path-sensitive checks
-// that are stateless and only emit warnings at errors that occur at
-// CallExpr or ObjCMessageExpr.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_ANALYSIS_GRAPICHECKS
-#define LLVM_CLANG_ANALYSIS_GRAPICHECKS
-
-#include "clang/Checker/PathSensitive/GRAuditor.h"
-
-namespace clang {
-
-class GRSimpleAPICheck : public GRAuditor {
-public:
- GRSimpleAPICheck() {}
- virtual ~GRSimpleAPICheck() {}
-};
-
-} // end namespace clang
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSubEngine.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSubEngine.h
deleted file mode 100644
index 1904835..0000000
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSubEngine.h
+++ /dev/null
@@ -1,107 +0,0 @@
-//== GRSubEngine.h - Interface of the subengine of GRCoreEngine ----*- C++ -*-//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interface of a subengine of the GRCoreEngine.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_GRSUBENGINE_H
-#define LLVM_CLANG_ANALYSIS_GRSUBENGINE_H
-
-#include "clang/Checker/PathSensitive/SVals.h"
-
-namespace clang {
-
-class AnalysisManager;
-class CFGBlock;
-class CFGElement;
-class ExplodedNode;
-class GRState;
-class GRStateManager;
-class GRBlockCounter;
-class GRStmtNodeBuilder;
-class GRBranchNodeBuilder;
-class GRIndirectGotoNodeBuilder;
-class GRSwitchNodeBuilder;
-class GREndPathNodeBuilder;
-class GRCallEnterNodeBuilder;
-class GRCallExitNodeBuilder;
-class LocationContext;
-class MemRegion;
-class Stmt;
-
-class GRSubEngine {
-public:
- virtual ~GRSubEngine() {}
-
- virtual const GRState* getInitialState(const LocationContext *InitLoc) = 0;
-
- virtual AnalysisManager &getAnalysisManager() = 0;
-
- virtual GRStateManager &getStateManager() = 0;
-
- /// Called by GRCoreEngine. Used to generate new successor
- /// nodes by processing the 'effects' of a block-level statement.
- virtual void ProcessStmt(const CFGElement E, GRStmtNodeBuilder& builder) = 0;
-
- /// Called by GRCoreEngine when start processing
- /// a CFGBlock. This method returns true if the analysis should continue
- /// exploring the given path, and false otherwise.
- virtual bool ProcessBlockEntrance(const CFGBlock* B, const ExplodedNode *Pred,
- GRBlockCounter BC) = 0;
-
- /// Called by GRCoreEngine. Used to generate successor
- /// nodes by processing the 'effects' of a branch condition.
- virtual void ProcessBranch(const Stmt* Condition, const Stmt* Term,
- GRBranchNodeBuilder& builder) = 0;
-
- /// Called by GRCoreEngine. Used to generate successor
- /// nodes by processing the 'effects' of a computed goto jump.
- virtual void ProcessIndirectGoto(GRIndirectGotoNodeBuilder& builder) = 0;
-
- /// Called by GRCoreEngine. Used to generate successor
- /// nodes by processing the 'effects' of a switch statement.
- virtual void ProcessSwitch(GRSwitchNodeBuilder& builder) = 0;
-
- /// ProcessEndPath - Called by GRCoreEngine. Used to generate end-of-path
- /// nodes when the control reaches the end of a function.
- virtual void ProcessEndPath(GREndPathNodeBuilder& builder) = 0;
-
- // Generate the entry node of the callee.
- virtual void ProcessCallEnter(GRCallEnterNodeBuilder &builder) = 0;
-
- // Generate the first post callsite node.
- virtual void ProcessCallExit(GRCallExitNodeBuilder &builder) = 0;
-
- /// Called by ConstraintManager. Used to call checker-specific
- /// logic for handling assumptions on symbolic values.
- virtual const GRState* ProcessAssume(const GRState *state,
- SVal cond, bool assumption) = 0;
-
- /// WantsRegionChangeUpdate - Called by GRStateManager to determine if a
- /// region change should trigger a ProcessRegionChanges update.
- virtual bool WantsRegionChangeUpdate(const GRState* state) = 0;
-
- /// ProcessRegionChanges - Called by GRStateManager whenever a change is made
- /// to the store. Used to update checkers that track region values.
- virtual const GRState* ProcessRegionChanges(const GRState* state,
- const MemRegion* const *Begin,
- const MemRegion* const *End) = 0;
-
- inline const GRState* ProcessRegionChange(const GRState* state,
- const MemRegion* MR) {
- return ProcessRegionChanges(state, &MR, &MR+1);
- }
-
- /// Called by GRCoreEngine when the analysis worklist is either empty or the
- // maximum number of analysis steps have been reached.
- virtual void ProcessEndWorklist(bool hasWorkRemaining) = 0;
-};
-}
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRTransferFuncs.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRTransferFuncs.h
deleted file mode 100644
index 320b7f7..0000000
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRTransferFuncs.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//== GRTransferFuncs.h - Path-Sens. Transfer Functions Interface -*- C++ -*--=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines GRTransferFuncs, which provides a base-class that
-// defines an interface for transfer functions used by GRExprEngine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_ANALYSIS_GRTF
-#define LLVM_CLANG_ANALYSIS_GRTF
-
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/PathSensitive/SVals.h"
-#include <vector>
-
-namespace clang {
-class ExplodedNode;
-class ExplodedNodeSet;
-class GREndPathNodeBuilder;
-class GRExprEngine;
-class GRStmtNodeBuilder;
-class GRStmtNodeBuilderRef;
-class ObjCMessageExpr;
-
-class GRTransferFuncs {
-public:
- GRTransferFuncs() {}
- virtual ~GRTransferFuncs() {}
-
- virtual void RegisterPrinters(std::vector<GRState::Printer*>& Printers) {}
- virtual void RegisterChecks(GRExprEngine& Eng) {}
-
-
- // Calls.
-
- virtual void EvalCall(ExplodedNodeSet& Dst,
- GRExprEngine& Engine,
- GRStmtNodeBuilder& Builder,
- const CallExpr* CE, SVal L,
- ExplodedNode* Pred) {}
-
- virtual void EvalObjCMessageExpr(ExplodedNodeSet& Dst,
- GRExprEngine& Engine,
- GRStmtNodeBuilder& Builder,
- const ObjCMessageExpr* ME,
- ExplodedNode* Pred,
- const GRState *state) {}
-
- // Stores.
-
- virtual void EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {}
-
- // End-of-path and dead symbol notification.
-
- virtual void EvalEndPath(GRExprEngine& Engine,
- GREndPathNodeBuilder& Builder) {}
-
-
- virtual void EvalDeadSymbols(ExplodedNodeSet& Dst,
- GRExprEngine& Engine,
- GRStmtNodeBuilder& Builder,
- ExplodedNode* Pred,
- const GRState* state,
- SymbolReaper& SymReaper) {}
-
- // Return statements.
- virtual void EvalReturn(ExplodedNodeSet& Dst,
- GRExprEngine& Engine,
- GRStmtNodeBuilder& Builder,
- const ReturnStmt* S,
- ExplodedNode* Pred) {}
-
- // Assumptions.
- virtual const GRState* EvalAssume(const GRState *state,
- SVal Cond, bool Assumption) {
- return state;
- }
-};
-} // end clang namespace
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRWorkList.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRWorkList.h
deleted file mode 100644
index 315b614..0000000
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRWorkList.h
+++ /dev/null
@@ -1,79 +0,0 @@
-//==- GRWorkList.h - Worklist class used by GRCoreEngine -----------*- C++ -*-//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines GRWorkList, a pure virtual class that represents an opaque
-// worklist used by GRCoreEngine to explore the reachability state space.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_ANALYSIS_GRWORKLIST
-#define LLVM_CLANG_ANALYSIS_GRWORKLIST
-
-#include "clang/Checker/PathSensitive/GRBlockCounter.h"
-#include <cstddef>
-
-namespace clang {
-
-class CFGBlock;
-class ExplodedNode;
-class ExplodedNodeImpl;
-
-class GRWorkListUnit {
- ExplodedNode* Node;
- GRBlockCounter Counter;
- const CFGBlock* Block;
- unsigned BlockIdx; // This is the index of the next statement.
-
-public:
- GRWorkListUnit(ExplodedNode* N, GRBlockCounter C,
- const CFGBlock* B, unsigned idx)
- : Node(N),
- Counter(C),
- Block(B),
- BlockIdx(idx) {}
-
- explicit GRWorkListUnit(ExplodedNode* N, GRBlockCounter C)
- : Node(N),
- Counter(C),
- Block(NULL),
- BlockIdx(0) {}
-
- ExplodedNode* getNode() const { return Node; }
- GRBlockCounter getBlockCounter() const { return Counter; }
- const CFGBlock* getBlock() const { return Block; }
- unsigned getIndex() const { return BlockIdx; }
-};
-
-class GRWorkList {
- GRBlockCounter CurrentCounter;
-public:
- virtual ~GRWorkList();
- virtual bool hasWork() const = 0;
-
- virtual void Enqueue(const GRWorkListUnit& U) = 0;
-
- void Enqueue(ExplodedNode* N, const CFGBlock* B, unsigned idx) {
- Enqueue(GRWorkListUnit(N, CurrentCounter, B, idx));
- }
-
- void Enqueue(ExplodedNode* N) {
- Enqueue(GRWorkListUnit(N, CurrentCounter));
- }
-
- virtual GRWorkListUnit Dequeue() = 0;
-
- void setBlockCounter(GRBlockCounter C) { CurrentCounter = C; }
- GRBlockCounter getBlockCounter() const { return CurrentCounter; }
-
- static GRWorkList *MakeDFS();
- static GRWorkList *MakeBFS();
- static GRWorkList *MakeBFSBlockDFSContents();
-};
-} // end clang namespace
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SValuator.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SValuator.h
deleted file mode 100644
index 9192ca7..0000000
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SValuator.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// SValuator.h - Construction of SVals from evaluating expressions -*- C++ -*---
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines SValuator, a class that defines the interface for
-// "symbolical evaluators" which construct an SVal from an expression.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_ANALYSIS_SVALUATOR
-#define LLVM_CLANG_ANALYSIS_SVALUATOR
-
-#include "clang/AST/Expr.h"
-#include "clang/Checker/PathSensitive/SVals.h"
-
-namespace clang {
-
-class GRState;
-class ValueManager;
-
-class SValuator {
- friend class ValueManager;
-protected:
- ValueManager &ValMgr;
-
-public:
- // FIXME: Make these protected again one RegionStoreManager correctly
- // handles loads from differening bound value types.
- virtual SVal EvalCastNL(NonLoc val, QualType castTy) = 0;
- virtual SVal EvalCastL(Loc val, QualType castTy) = 0;
-
-public:
- SValuator(ValueManager &valMgr) : ValMgr(valMgr) {}
- virtual ~SValuator() {}
-
- SVal EvalCast(SVal V, QualType castTy, QualType originalType);
-
- virtual SVal EvalMinus(NonLoc val) = 0;
-
- virtual SVal EvalComplement(NonLoc val) = 0;
-
- virtual SVal EvalBinOpNN(const GRState *state, BinaryOperator::Opcode Op,
- NonLoc lhs, NonLoc rhs, QualType resultTy) = 0;
-
- virtual SVal EvalBinOpLL(const GRState *state, BinaryOperator::Opcode Op,
- Loc lhs, Loc rhs, QualType resultTy) = 0;
-
- virtual SVal EvalBinOpLN(const GRState *state, BinaryOperator::Opcode Op,
- Loc lhs, NonLoc rhs, QualType resultTy) = 0;
-
- /// getKnownValue - Evaluates a given SVal. If the SVal has only one possible
- /// (integer) value, that value is returned. Otherwise, returns NULL.
- virtual const llvm::APSInt *getKnownValue(const GRState *state, SVal V) = 0;
-
- SVal EvalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
- SVal L, SVal R, QualType T);
-
- DefinedOrUnknownSVal EvalEQ(const GRState *ST, DefinedOrUnknownSVal L,
- DefinedOrUnknownSVal R);
-};
-
-SValuator* CreateSimpleSValuator(ValueManager &valMgr);
-
-} // end clang namespace
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
index cecfcda..052c660 100644
--- a/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
@@ -14,18 +14,25 @@
#include "llvm/ADT/OwningPtr.h"
namespace llvm {
+ class LLVMContext;
class Module;
}
namespace clang {
+class BackendConsumer;
class CodeGenAction : public ASTFrontendAction {
private:
unsigned Act;
llvm::OwningPtr<llvm::Module> TheModule;
+ llvm::LLVMContext *VMContext;
+ bool OwnsVMContext;
protected:
- CodeGenAction(unsigned _Act);
+ /// Create a new code generation action. If the optional \arg _VMContext
+ /// parameter is supplied, the action uses it without taking ownership,
+ /// otherwise it creates a fresh LLVM context and takes ownership.
+ CodeGenAction(unsigned _Act, llvm::LLVMContext *_VMContext = 0);
virtual bool hasIRSupport() const;
@@ -42,36 +49,41 @@ public:
/// takeModule - Take the generated LLVM module, for use after the action has
/// been run. The result may be null on failure.
llvm::Module *takeModule();
+
+ /// Take the LLVM context used by this action.
+ llvm::LLVMContext *takeLLVMContext();
+
+ BackendConsumer *BEConsumer;
};
class EmitAssemblyAction : public CodeGenAction {
public:
- EmitAssemblyAction();
+ EmitAssemblyAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitBCAction : public CodeGenAction {
public:
- EmitBCAction();
+ EmitBCAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitLLVMAction : public CodeGenAction {
public:
- EmitLLVMAction();
+ EmitLLVMAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitLLVMOnlyAction : public CodeGenAction {
public:
- EmitLLVMOnlyAction();
+ EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitCodeGenOnlyAction : public CodeGenAction {
public:
- EmitCodeGenOnlyAction();
+ EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitObjAction : public CodeGenAction {
public:
- EmitObjAction();
+ EmitObjAction(llvm::LLVMContext *_VMContext = 0);
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Config/config.h.cmake b/contrib/llvm/tools/clang/include/clang/Config/config.h.cmake
new file mode 100644
index 0000000..5f13d2f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Config/config.h.cmake
@@ -0,0 +1,17 @@
+/* Relative directory for resource files */
+#define CLANG_RESOURCE_DIR "${CLANG_RESOURCE_DIR}"
+
+/* 32 bit multilib directory. */
+#define CXX_INCLUDE_32BIT_DIR "${CXX_INCLUDE_32BIT_DIR}"
+
+/* 64 bit multilib directory. */
+#define CXX_INCLUDE_64BIT_DIR "${CXX_INCLUDE_64BIT_DIR}"
+
+/* Arch the libstdc++ headers. */
+#define CXX_INCLUDE_ARCH "${CXX_INCLUDE_ARCH}"
+
+/* Directory with the libstdc++ headers. */
+#define CXX_INCLUDE_ROOT "${CXX_INCLUDE_ROOT}"
+
+/* Directories clang will search for headers */
+#define C_INCLUDE_DIRS "${C_INCLUDE_DIRS}"
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
index 257b653..0fcf821 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
@@ -288,7 +288,7 @@ namespace driver {
unsigned NumInputArgStrings;
public:
- InputArgList(const char **ArgBegin, const char **ArgEnd);
+ InputArgList(const char* const *ArgBegin, const char* const *ArgEnd);
~InputArgList();
virtual const char *getArgString(unsigned Index) const {
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
index 5c08dc6..50472ff 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
@@ -46,6 +46,7 @@ def _help : Flag<"--help">, Alias<help>;
def version : Flag<"-version">,
HelpText<"Print the assembler version">;
def _version : Flag<"--version">, Alias<version>;
+def v : Flag<"-v">, Alias<version>;
// Generic forwarding to LLVM options. This should only be used for debugging
// and experimental features.
@@ -69,3 +70,6 @@ def show_inst : Flag<"-show-inst">,
def relax_all : Flag<"-relax-all">,
HelpText<"Relax all fixups (for performance testing)">;
+
+def no_exec_stack : Flag<"--noexecstack">,
+ HelpText<"Mark the file as not needing an executable stack">; \ No newline at end of file
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
index a4a04fe..57107ba 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
@@ -36,34 +36,20 @@ def triple_EQ : Joined<"-triple=">, Alias<triple>;
// Analyzer Options
//===----------------------------------------------------------------------===//
-def analysis_CFGDump : Flag<"-cfg-dump">,
- HelpText<"Display Control-Flow Graphs">;
-def analysis_CFGView : Flag<"-cfg-view">,
- HelpText<"View Control-Flow Graphs using GraphViz">;
def analysis_UnoptimizedCFG : Flag<"-unoptimized-cfg">,
HelpText<"Generate unoptimized CFGs for all analyses">;
-def analysis_DisplayLiveVariables : Flag<"-dump-live-variables">,
- HelpText<"Print results of live variable analysis">;
-def analysis_LLVMConventionChecker : Flag<"-analyzer-check-llvm-conventions">,
- HelpText<"Check code for LLVM codebase conventions (domain-specific)">;
-def analysis_SecuritySyntacticChecks : Flag<"-analyzer-check-security-syntactic">,
- HelpText<"Perform quick security checks that require no data flow">;
-def analysis_WarnDeadStores : Flag<"-analyzer-check-dead-stores">,
- HelpText<"Warn about stores to dead variables">;
+def analysis_CFGAddImplicitDtors : Flag<"-cfg-add-implicit-dtors">,
+ HelpText<"Add C++ implicit destructors to CFGs for all analyses">;
+def analysis_CFGAddInitializers : Flag<"-cfg-add-initializers">,
+ HelpText<"Add C++ initializers to CFGs for all analyses">;
def analysis_WarnUninitVals : Flag<"-warn-uninit-values">,
HelpText<"Warn about uses of uninitialized variables">;
-def analysis_WarnObjCMethSigs : Flag<"-analyzer-check-objc-methodsigs">,
- HelpText<"Warn about Objective-C method signatures with type incompatibilities">;
-def analysis_WarnObjCDealloc : Flag<"-analyzer-check-objc-missing-dealloc">,
- HelpText<"Warn about Objective-C classes that lack a correct implementation of -dealloc">;
-def analysis_WarnObjCUnusedIvars : Flag<"-analyzer-check-objc-unused-ivars">,
- HelpText<"Warn about private ivars that are never used">;
def analysis_ObjCMemChecker : Flag<"-analyzer-check-objc-mem">,
HelpText<"Run the [Core] Foundation reference count checker">;
-def analysis_WarnSizeofPointer : Flag<"-warn-sizeof-pointer">,
- HelpText<"Warn about unintended use of sizeof() on pointer expressions">;
-def analysis_WarnIdempotentOps : Flag<"-analyzer-check-idempotent-operations">,
- HelpText<"Warn about idempotent operations">;
+def analysis_AnalyzerStats : Flag<"-analyzer-stats">,
+ HelpText<"Emit warnings with analyzer statistics">;
+def analysis_WarnBufferOverflows : Flag<"-analyzer-check-buffer-overflows">,
+ HelpText<"Warn about buffer overflows">;
def analyzer_store : Separate<"-analyzer-store">,
HelpText<"Source Code Analysis - Abstract Memory Store Models">;
@@ -82,7 +68,7 @@ def analyzer_output_EQ : Joined<"-analyzer-output=">,
def analyzer_opt_analyze_headers : Flag<"-analyzer-opt-analyze-headers">,
HelpText<"Force the static analyzer to analyze functions defined in header files">;
def analyzer_opt_analyze_nested_blocks : Flag<"-analyzer-opt-analyze-nested-blocks">,
- HelpText<"Analyze the definitions of blocks in addition to functions">;
+ HelpText<"Analyze the definitions of blocks in addition to functions">;
def analyzer_display_progress : Flag<"-analyzer-display-progress">,
HelpText<"Emit verbose output about the analyzer's progress">;
def analyzer_experimental_checks : Flag<"-analyzer-experimental-checks">,
@@ -97,6 +83,8 @@ def analyzer_eagerly_assume : Flag<"-analyzer-eagerly-assume">,
HelpText<"Eagerly assume the truth/falseness of some symbolic constraints">;
def analyzer_no_purge_dead : Flag<"-analyzer-no-purge-dead">,
HelpText<"Don't remove dead symbols, bindings, and constraints before processing a statement">;
+def analyzer_no_eagerly_trim_egraph : Flag<"-analyzer-no-eagerly-trim-egraph">,
+ HelpText<"Don't eagerly remove uninteresting ExplodedNodes from the ExplodedGraph">;
def trim_egraph : Flag<"-trim-egraph">,
HelpText<"Only show error-related paths in the analysis graph">;
def analyzer_viz_egraph_graphviz : Flag<"-analyzer-viz-egraph-graphviz">,
@@ -106,10 +94,20 @@ def analyzer_viz_egraph_ubigraph : Flag<"-analyzer-viz-egraph-ubigraph">,
def analyzer_inline_call : Flag<"-analyzer-inline-call">,
HelpText<"Experimental transfer function inlining callees when its definition is available.">;
def analyzer_max_nodes : Separate<"-analyzer-max-nodes">,
- HelpText<"The maximum number of nodes the analyzer can generate">;
+ HelpText<"The maximum number of nodes the analyzer can generate (150000 default, 0 = no limit)">;
def analyzer_max_loop : Separate<"-analyzer-max-loop">,
HelpText<"The maximum number of times the analyzer will go through a loop">;
+def analyzer_checker : Separate<"-analyzer-checker">,
+ HelpText<"Choose analyzer checkers to enable">;
+def analyzer_checker_EQ : Joined<"-analyzer-checker=">,
+ Alias<analyzer_checker>;
+
+def analyzer_disable_checker : Separate<"-analyzer-disable-checker">,
+ HelpText<"Choose analyzer checkers to disable">;
+def analyzer_disable_checker_EQ : Joined<"-analyzer-disable-checker=">,
+ Alias<analyzer_disable_checker>;
+
//===----------------------------------------------------------------------===//
// CodeGen Options
//===----------------------------------------------------------------------===//
@@ -125,6 +123,8 @@ def dwarf_debug_flags : Separate<"-dwarf-debug-flags">,
def g : Flag<"-g">, HelpText<"Generate source level debug information">;
def fcatch_undefined_behavior : Flag<"-fcatch-undefined-behavior">,
HelpText<"Generate runtime checks for undefined behavior.">;
+def flimit_debug_info : Flag<"-flimit-debug-info">,
+ HelpText<"Limit debug information produced to reduce size of debug binary">;
def fno_common : Flag<"-fno-common">,
HelpText<"Compile common globals like normal definitions">;
def no_implicit_float : Flag<"-no-implicit-float">,
@@ -143,6 +143,8 @@ def fdata_sections : Flag<"-fdata-sections">,
HelpText<"Place each data in its own section (ELF Only)">;
def funroll_loops : Flag<"-funroll-loops">,
HelpText<"Turn on loop unroller">;
+def relaxed_aliasing : Flag<"-relaxed-aliasing">,
+ HelpText<"Turn off Type Based Alias Analysis">;
def masm_verbose : Flag<"-masm-verbose">,
HelpText<"Generate verbose assembly output">;
def mcode_model : Separate<"-mcode-model">,
@@ -161,6 +163,8 @@ def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">,
HelpText<"Omit frame pointer setup for leaf functions.">;
def msoft_float : Flag<"-msoft-float">,
HelpText<"Use software floating point">;
+def mregparm : Separate<"-mregparm">,
+ HelpText<"Limit the number of registers available for integer arguments">;
def mrelax_all : Flag<"-mrelax-all">,
HelpText<"Relax all machine instructions">;
def mrelocation_model : Separate<"-mrelocation-model">,
@@ -169,8 +173,11 @@ def munwind_tables : Flag<"-munwind-tables">,
HelpText<"Generate unwinding tables for all functions">;
def mconstructor_aliases : Flag<"-mconstructor-aliases">,
HelpText<"Emit complete constructors and destructors as aliases when possible">;
+def mms_bitfields : Flag<"-mms-bitfields">,
+ HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard.">;
def O : Joined<"-O">, HelpText<"Optimization level">;
def Os : Flag<"-Os">, HelpText<"Optimize for size">;
+def pg : Flag<"-pg">, HelpText<"Enable mcount instrumentation">;
//===----------------------------------------------------------------------===//
// Dependency Output Options
@@ -180,6 +187,10 @@ def dependency_file : Separate<"-dependency-file">,
HelpText<"Filename (or -) to write dependency output to">;
def sys_header_deps : Flag<"-sys-header-deps">,
HelpText<"Include system headers in dependency output">;
+def header_include_file : Separate<"-header-include-file">,
+ HelpText<"Filename (or -) to write header include output to">;
+def H : Flag<"-H">,
+ HelpText<"Show header includes and nesting depth">;
def MQ : Separate<"-MQ">, HelpText<"Specify target to quote for dependency">;
def MT : Separate<"-MT">, HelpText<"Specify target for dependency">;
def MP : Flag<"-MP">,
@@ -203,7 +214,6 @@ def fno_caret_diagnostics : Flag<"-fno-caret-diagnostics">,
HelpText<"Do not include source line and caret with diagnostics">;
def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">,
HelpText<"Do not include fixit information in diagnostics">;
-def fdiagnostics_binary : Flag<"-fdiagnostics-binary">;
def w : Flag<"-w">, HelpText<"Suppress all warnings">;
def pedantic : Flag<"-pedantic">;
def pedantic_errors : Flag<"-pedantic-errors">;
@@ -221,7 +231,7 @@ def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">,
HelpText<"Print diagnostic name with mappable diagnostics">;
def fdiagnostics_show_category : Separate<"-fdiagnostics-show-category">,
HelpText<"Print diagnostic category">;
-
+
def ftabstop : Separate<"-ftabstop">, MetaVarName<"<N>">,
HelpText<"Set the tab stop distance.">;
def ferror_limit : Separate<"-ferror-limit">, MetaVarName<"<N>">,
@@ -260,8 +270,6 @@ def remap_file : Separate<"-remap-file">,
HelpText<"Replace the contents of the <from> file with the contents of the <to> file">;
def code_completion_at_EQ : Joined<"-code-completion-at=">,
Alias<code_completion_at>;
-def no_code_completion_debug_printer : Flag<"-no-code-completion-debug-printer">,
- HelpText<"Don't use the \"debug\" code-completion print">;
def code_completion_macros : Flag<"-code-completion-macros">,
HelpText<"Include macros in code-completion results">;
def code_completion_patterns : Flag<"-code-completion-patterns">,
@@ -274,17 +282,16 @@ def help : Flag<"-help">,
HelpText<"Print this help text">;
def _help : Flag<"--help">, Alias<help>;
def x : Separate<"-x">, HelpText<"Input language type">;
-def cxx_inheritance_view : Separate<"-cxx-inheritance-view">,
- MetaVarName<"<class name>">,
- HelpText<"View C++ inheritance for a specified class">;
def o : Separate<"-o">, MetaVarName<"<path>">, HelpText<"Specify output file">;
def load : Separate<"-load">, MetaVarName<"<dsopath>">,
HelpText<"Load the named plugin (dynamic shared object)">;
def plugin : Separate<"-plugin">, MetaVarName<"<name>">,
- HelpText<"Use the named plugin action (use \"help\" to list available options)">;
-def plugin_arg : JoinedAndSeparate<"-plugin-arg-">,
+ HelpText<"Use the named plugin action instead of the default action (use \"help\" to list available options)">;
+def plugin_arg : JoinedAndSeparate<"-plugin-arg-">,
MetaVarName<"<name> <arg>">,
HelpText<"Pass <arg> to plugin <name>">;
+def add_plugin : Separate<"-add-plugin">, MetaVarName<"<name>">,
+ HelpText<"Use the named plugin action in addition to the default action">;
def resource_dir : Separate<"-resource-dir">,
HelpText<"The directory which holds the compiler resource files">;
def version : Flag<"-version">,
@@ -323,6 +330,8 @@ def ast_print_xml : Flag<"-ast-print-xml">,
HelpText<"Build ASTs and then print them in XML format">;
def ast_dump : Flag<"-ast-dump">,
HelpText<"Build ASTs and then debug dump them">;
+def ast_dump_xml : Flag<"-ast-dump-xml">,
+ HelpText<"Build ASTs and then debug dump them in a verbose XML format">;
def ast_view : Flag<"-ast-view">,
HelpText<"Build ASTs and view them with GraphViz">;
def boostcon : Flag<"-boostcon">,
@@ -359,6 +368,11 @@ def create_module : Flag<"-create-module">,
def import_module : Separate<"-import-module">,
HelpText<"Import a module definition file">;
+def working_directory : JoinedOrSeparate<"-working-directory">,
+ HelpText<"Resolve file paths relative to the specified directory">;
+def working_directory_EQ : Joined<"-working-directory=">,
+ Alias<working_directory>;
+
def relocatable_pch : Flag<"-relocatable-pch">,
HelpText<"Whether to build a relocatable precompiled header">;
def chained_pch : Flag<"-chained-pch">,
@@ -401,11 +415,13 @@ def femit_all_decls : Flag<"-femit-all-decls">,
HelpText<"Emit all declarations, even if unused">;
def fblocks : Flag<"-fblocks">,
HelpText<"enable the 'blocks' language feature">;
-def fheinous_gnu_extensions : Flag<"-fheinous-gnu-extensions">;
+def fheinous_gnu_extensions : Flag<"-fheinous-gnu-extensions">;
def fexceptions : Flag<"-fexceptions">,
HelpText<"Enable support for exception handling">;
def fsjlj_exceptions : Flag<"-fsjlj-exceptions">,
HelpText<"Use SjLj style exceptions">;
+def fno_objc_exceptions : Flag<"-fno-objc-exceptions">,
+ HelpText<"Disable Objective-C exceptions">;
def ffreestanding : Flag<"-ffreestanding">,
HelpText<"Assert that the compilation takes place in a freestanding environment">;
def fformat_extensions : Flag<"-fformat-extensions">,
@@ -420,6 +436,8 @@ def fmath_errno : Flag<"-fmath-errno">,
HelpText<"Require math functions to indicate errors by setting errno">;
def fms_extensions : Flag<"-fms-extensions">,
HelpText<"Accept some non-standard constructs used in Microsoft header files ">;
+def fmsc_version : Joined<"-fmsc-version=">,
+ HelpText<"Version of the Microsoft C/C++ compiler to report in _MSC_VER (0 = don't define it (default))">;
def fborland_extensions : Flag<"-fborland-extensions">,
HelpText<"Accept non-standard constructs supported by the Borland compiler">;
def main_file_name : Separate<"-main-file-name">,
@@ -433,7 +451,7 @@ def fno_operator_names : Flag<"-fno-operator-names">,
def fno_signed_char : Flag<"-fno-signed-char">,
HelpText<"Char is unsigned">;
def fno_spell_checking : Flag<"-fno-spell-checking">,
- HelpText<"Disable spell-checking">;
+ HelpText<"Disable spell-checking">;
def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">,
HelpText<"Don't use __cxa_atexit for calling destructors">;
def fconstant_string_class : Separate<"-fconstant-string-class">,
@@ -445,16 +463,21 @@ def fobjc_gc : Flag<"-fobjc-gc">,
HelpText<"Enable Objective-C garbage collection">;
def fobjc_gc_only : Flag<"-fobjc-gc-only">,
HelpText<"Use GC exclusively for Objective-C related memory management">;
+def fapple_kext : Flag<"-fapple-kext">,
+ HelpText<"Use Apple's kernel extensions ABI">;
def fobjc_dispatch_method_EQ : Joined<"-fobjc-dispatch-method=">,
HelpText<"Objective-C dispatch method to use">;
+def fobjc_default_synthesize_properties : Flag<"-fobjc-default-synthesize-properties">,
+ HelpText<"enable the default synthesis of Objective-C properties">;
def print_ivar_layout : Flag<"-print-ivar-layout">,
HelpText<"Enable Objective-C Ivar layout bitmap print trace">;
def fobjc_nonfragile_abi : Flag<"-fobjc-nonfragile-abi">,
HelpText<"enable objective-c's nonfragile abi">;
-def fobjc_nonfragile_abi2 : Flag<"-fobjc-nonfragile-abi2">,
- HelpText<"enable objective-c's enhanced nonfragile abi">;
def ftrapv : Flag<"-ftrapv">,
HelpText<"Trap on integer overflow">;
+def ftrapv_handler : Separate<"-ftrapv-handler">,
+ MetaVarName<"<function name>">,
+ HelpText<"Specify the function to be called on overflow.">;
def fwrapv : Flag<"-fwrapv">,
HelpText<"Treat signed integer overflow as two's complement">;
def pic_level : Separate<"-pic-level">,
@@ -467,8 +490,14 @@ def fno_rtti : Flag<"-fno-rtti">,
HelpText<"Disable generation of rtti information">;
def fno_validate_pch : Flag<"-fno-validate-pch">,
HelpText<"Disable validation of precompiled headers">;
+def dump_deserialized_pch_decls : Flag<"-dump-deserialized-decls">,
+ HelpText<"Dump declarations that are deserialized from PCH, for testing">;
+def error_on_deserialized_pch_decl : Separate<"-error-on-deserialized-decl">,
+ HelpText<"Emit error if a specific declaration is deserialized from PCH, for testing">;
def fshort_wchar : Flag<"-fshort-wchar">,
HelpText<"Force wchar_t to be a short unsigned int">;
+def fshort_enums : Flag<"-fshort-enums">,
+ HelpText<"Allocate to an enum type only as many bytes as it needs for the declared range of possible values">;
def static_define : Flag<"-static-define">,
HelpText<"Should __STATIC__ be defined">;
def stack_protector : Separate<"-stack-protector">,
@@ -479,6 +508,11 @@ def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">,
HelpText<"Give inline C++ member functions default visibility by default">;
def ftemplate_depth : Separate<"-ftemplate-depth">,
HelpText<"Maximum depth of recursive template instantiation">;
+def Wlarge_by_value_copy : Separate<"-Wlarge-by-value-copy">,
+ HelpText<"Warn if a function definition returns or accepts an object larger "
+ "in bytes that a given value">;
+def Wlarge_by_value_copy_EQ : Joined<"-Wlarge-by-value-copy=">,
+ Alias<Wlarge_by_value_copy>;
def trigraphs : Flag<"-trigraphs">,
HelpText<"Process trigraph sequences">;
def fwritable_strings : Flag<"-fwritable-strings">,
@@ -518,6 +552,8 @@ def iwithprefixbefore : JoinedOrSeparate<"-iwithprefixbefore">,
HelpText<"Set directory to include search path with prefix">;
def isysroot : JoinedOrSeparate<"-isysroot">, MetaVarName<"<dir>">,
HelpText<"Set the system root directory (usually /)">;
+def cxx_system_include : Separate<"-cxx-system-include">,
+ HelpText<"Add a system #include directory for the C++ standard library">;
def v : Flag<"-v">, HelpText<"Enable verbose output">;
//===----------------------------------------------------------------------===//
@@ -545,7 +581,7 @@ def undef : Flag<"-undef">, MetaVarName<"<macro>">,
HelpText<"undef all system defines">;
def detailed_preprocessing_record : Flag<"-detailed-preprocessing-record">,
HelpText<"include a detailed record of preprocessing actions">;
-
+
//===----------------------------------------------------------------------===//
// Preprocessed Output Options
//===----------------------------------------------------------------------===//
@@ -560,5 +596,22 @@ def dM : Flag<"-dM">,
HelpText<"Print macro definitions in -E mode instead of normal output">;
def dD : Flag<"-dD">,
HelpText<"Print macro definitions in -E mode in addition to normal output">;
-def H : Flag<"-H">,
- HelpText<"Show header includes and nesting depth">;
+
+//===----------------------------------------------------------------------===//
+// OpenCL Options
+//===----------------------------------------------------------------------===//
+
+def cl_opt_disable : Flag<"-cl-opt-disable">,
+ HelpText<"OpenCL only. This option disables all optimizations. The default is optimizations are enabled.">;
+def cl_single_precision_constant : Flag<"-cl-single-precision-constant">,
+ HelpText<"OpenCL only. Treat double precision floating-point constant as single precision constant.">;
+def cl_finite_math_only : Flag<"-cl-finite-math-only">,
+ HelpText<"OpenCL only. Allow floating-point optimizations that assume arguments and results are not NaNs or +-Inf.">;
+def cl_unsafe_math_optimizations : Flag<"-cl-unsafe-math-optimizations">,
+ HelpText<"OpenCL only. Allow unsafe floating-point optimizations. Also implies -cl-no-signed-zeros and -cl-mad-enable">;
+def cl_fast_relaxed_math : Flag<"-cl-fast-relaxed-math">,
+ HelpText<"OpenCL only. Sets -cl-finite-math-only and -cl-unsafe-math-optimizations, and defines __FAST_RELAXED_MATH__">;
+def cl_mad_enable : Flag<"-cl-mad-enable">,
+ HelpText<"OpenCL only. Enable less precise MAD instructions to be generated.">;
+def cl_std_EQ : Joined<"-cl-std=">,
+ HelpText<"OpenCL language standard to compile for">;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
index 28eff4f..03fa0ef 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
@@ -17,7 +17,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/System/Path.h" // FIXME: Kill when CompilationInfo
+#include "llvm/Support/Path.h" // FIXME: Kill when CompilationInfo
// lands.
#include <list>
#include <set>
@@ -74,7 +74,8 @@ public:
/// functionality.
/// FIXME: This type of customization should be removed in favor of the
/// universal driver when it is ready.
- std::string PrefixDir;
+ typedef llvm::SmallVector<std::string, 4> prefix_list;
+ prefix_list PrefixDirs;
/// Default host triple.
std::string DefaultHostTriple;
@@ -92,12 +93,12 @@ public:
/// Information about the host which can be overriden by the user.
std::string HostBits, HostMachine, HostSystem, HostRelease;
- /// Name to use when calling the generic gcc.
- std::string CCCGenericGCCName;
-
/// The file to log CC_PRINT_OPTIONS output to, if enabled.
const char *CCPrintOptionsFilename;
+ /// The file to log CC_PRINT_HEADERS output to, if enabled.
+ const char *CCPrintHeadersFilename;
+
/// Whether the driver should follow g++ like behavior.
unsigned CCCIsCXX : 1;
@@ -111,7 +112,14 @@ public:
/// CCPrintOptionsFilename or to stderr.
unsigned CCPrintOptions : 1;
+ /// Set CC_PRINT_HEADERS mode, which causes the frontend to log header include
+ /// information to CCPrintHeadersFilename or to stderr.
+ unsigned CCPrintHeaders : 1;
+
private:
+ /// Name to use when calling the generic gcc.
+ std::string CCCGenericGCCName;
+
/// Whether to check that input files exist when constructing compilation
/// jobs.
unsigned CheckInputsExist : 1;
@@ -157,6 +165,10 @@ public:
/// @name Accessors
/// @{
+ /// Name to use when calling the generic gcc.
+ const std::string &getCCCGenericGCCName() const { return CCCGenericGCCName; }
+
+
const OptTable &getOpts() const { return *Opts; }
const Diagnostic &getDiags() const { return Diags; }
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h
index c20d807b..0733c51 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h
@@ -15,7 +15,7 @@
namespace clang {
namespace diag {
enum {
-#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,CATEGORY) ENUM,
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,ACCESS,CATEGORY) ENUM,
#define DRIVERSTART
#include "clang/Basic/DiagnosticDriverKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h b/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h
index 04e7299..7285a48 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h
@@ -71,6 +71,8 @@ const HostInfo *createOpenBSDHostInfo(const Driver &D,
const llvm::Triple& Triple);
const HostInfo *createFreeBSDHostInfo(const Driver &D,
const llvm::Triple& Triple);
+const HostInfo *createNetBSDHostInfo(const Driver &D,
+ const llvm::Triple& Triple);
const HostInfo *createMinixHostInfo(const Driver &D,
const llvm::Triple& Triple);
const HostInfo *createDragonFlyHostInfo(const Driver &D,
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
index 08b483c..3befe1d 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
@@ -170,8 +170,8 @@ namespace options {
/// \param MissingArgCount - On error, the number of missing options.
/// \return - An InputArgList; on error this will contain all the options
/// which could be parsed.
- InputArgList *ParseArgs(const char **ArgBegin,
- const char **ArgEnd,
+ InputArgList *ParseArgs(const char* const *ArgBegin,
+ const char* const *ArgEnd,
unsigned &MissingArgIndex,
unsigned &MissingArgCount) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.td b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
index a629c9b..1a8a150 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
@@ -38,6 +38,7 @@ def u_Group : OptionGroup<"<u group>">;
def pedantic_Group : OptionGroup<"<pedantic group>">,
Group<CompileOnly_Group>;
+def reserved_lib_Group : OptionGroup<"<reserved libs group>">;
// Temporary groups for clang options which we know we don't support,
// but don't want to verbosely warn the user about.
@@ -195,6 +196,7 @@ def allowable__client : Separate<"-allowable_client">;
def ansi : Flag<"-ansi">, Group<a_Group>;
def arch__errors__fatal : Flag<"-arch_errors_fatal">;
def arch : Separate<"-arch">, Flags<[DriverOption]>;
+def arch__only : Separate<"-arch_only">;
def a : Joined<"-a">, Group<a_Group>;
def bind__at__load : Flag<"-bind_at_load">;
def bundle__loader : Separate<"-bundle_loader">;
@@ -213,7 +215,7 @@ def dD : Flag<"-dD">, Group<d_Group>;
def dM : Flag<"-dM">, Group<d_Group>;
def dead__strip : Flag<"-dead_strip">;
def dependency_file : Separate<"-dependency-file">;
-def dumpmachine : Flag<"-dumpmachine">, Flags<[Unsupported]>;
+def dumpmachine : Flag<"-dumpmachine">;
def dumpspecs : Flag<"-dumpspecs">, Flags<[Unsupported]>;
def dumpversion : Flag<"-dumpversion">;
def dylib__file : Separate<"-dylib_file">;
@@ -233,9 +235,13 @@ def fPIC : Flag<"-fPIC">, Group<f_Group>;
def fPIE : Flag<"-fPIE">, Group<f_Group>;
def fno_PIE : Flag<"-fno-PIE">, Group<f_Group>;
def faccess_control : Flag<"-faccess-control">, Group<f_Group>;
+def fallow_unsupported : Flag<"-fallow-unsupported">, Group<f_Group>;
def fapple_kext : Flag<"-fapple-kext">, Group<f_Group>;
def fasm : Flag<"-fasm">, Group<f_Group>;
-def fasm_blocks : Flag<"-fasm-blocks">, Group<clang_ignored_f_Group>;
+
+def fasm_blocks : Flag<"-fasm-blocks">, Group<f_Group>;
+def fno_asm_blocks : Flag<"-fno-asm-blocks">, Group<f_Group>;
+
def fassume_sane_operator_new : Flag<"-fassume-sane-operator-new">, Group<f_Group>;
def fastcp : Flag<"-fastcp">, Group<f_Group>;
def fastf : Flag<"-fastf">, Group<f_Group>;
@@ -259,13 +265,13 @@ def fconstant_string_class_EQ : Joined<"-fconstant-string-class=">, Group<f_Grou
def fcreate_profile : Flag<"-fcreate-profile">, Group<f_Group>;
def fdebug_pass_arguments : Flag<"-fdebug-pass-arguments">, Group<f_Group>;
def fdebug_pass_structure : Flag<"-fdebug-pass-structure">, Group<f_Group>;
-def fdiagnostics_binary : Flag<"-fdiagnostics-binary">, Group<f_Group>, Flags<[HelpHidden]>;
def fdiagnostics_fixit_info : Flag<"-fdiagnostics-fixit-info">, Group<f_Group>;
def fdiagnostics_print_source_range_info : Flag<"-fdiagnostics-print-source-range-info">, Group<f_Group>;
def fdiagnostics_parseable_fixits : Flag<"-fdiagnostics-parseable-fixits">, Group<f_Group>;
def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">, Group<f_Group>;
def fdiagnostics_show_category_EQ : Joined<"-fdiagnostics-show-category=">, Group<f_Group>;
def fdollars_in_identifiers : Flag<"-fdollars-in-identifiers">, Group<f_Group>;
+def felide_constructors : Flag<"-felide-constructors">, Group<f_Group>;
def feliminate_unused_debug_symbols : Flag<"-feliminate-unused-debug-symbols">, Group<f_Group>;
def femit_all_decls : Flag<"-femit-all-decls">, Group<f_Group>;
def fencoding_EQ : Joined<"-fencoding=">, Group<f_Group>;
@@ -274,6 +280,10 @@ def fextdirs_EQ : Joined<"-fextdirs=">, Group<f_Group>;
def fhosted : Flag<"-fhosted">, Group<f_Group>;
def ffast_math : Flag<"-ffast-math">, Group<clang_ignored_f_Group>;
def ffinite_math_only : Flag<"-ffinite-math-only">, Group<clang_ignored_f_Group>;
+
+def ffor_scope : Flag<"-ffor-scope">, Group<f_Group>;
+def fno_for_scope : Flag<"-fno-for-scope">, Group<f_Group>;
+
def ffreestanding : Flag<"-ffreestanding">, Group<f_Group>;
def fformat_extensions: Flag<"-fformat-extensions">;
def fgnu_keywords : Flag<"-fgnu-keywords">, Group<f_Group>;
@@ -287,14 +297,17 @@ def finstrument_functions : Flag<"-finstrument-functions">, Group<f_Group>;
def fkeep_inline_functions : Flag<"-fkeep-inline-functions">, Group<clang_ignored_f_Group>;
def flat__namespace : Flag<"-flat_namespace">;
def flax_vector_conversions : Flag<"-flax-vector-conversions">, Group<f_Group>;
+def flimit_debug_info : Flag<"-flimit-debug-info">, Group<f_Group>,
+ HelpText<"Limit debug information produced to reduce size of debug binary">;
def flimited_precision_EQ : Joined<"-flimited-precision=">, Group<f_Group>;
def flto : Flag<"-flto">, Group<f_Group>;
-def fmacro_backtrace_limit_EQ : Joined<"-fmacro-backtrace-limit=">,
+def fmacro_backtrace_limit_EQ : Joined<"-fmacro-backtrace-limit=">,
Group<f_Group>;
def fmath_errno : Flag<"-fmath-errno">, Group<f_Group>;
def fmerge_all_constants : Flag<"-fmerge-all-constants">, Group<f_Group>;
def fmessage_length_EQ : Joined<"-fmessage-length=">, Group<f_Group>;
def fms_extensions : Flag<"-fms-extensions">, Group<f_Group>;
+def fmsc_version : Joined<"-fmsc-version=">, Group<f_Group>;
def fmudflapth : Flag<"-fmudflapth">, Group<f_Group>;
def fmudflap : Flag<"-fmudflap">, Group<f_Group>;
def fnested_functions : Flag<"-fnested-functions">, Group<f_Group>;
@@ -315,6 +328,7 @@ def fno_constant_cfstrings : Flag<"-fno-constant-cfstrings">, Group<f_Group>;
def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">, Group<f_Group>;
def fno_diagnostics_show_option : Flag<"-fno-diagnostics-show-option">, Group<f_Group>;
def fno_dollars_in_identifiers : Flag<"-fno-dollars-in-identifiers">, Group<f_Group>;
+def fno_elide_constructors : Flag<"-fno-elide-constructors">, Group<f_Group>;
def fno_eliminate_unused_debug_symbols : Flag<"-fno-eliminate-unused-debug-symbols">, Group<f_Group>;
def fno_exceptions : Flag<"-fno-exceptions">, Group<f_Group>;
def fno_finite_math_only : Flag<"-fno-finite-math-only">, Group<clang_ignored_f_Group>;
@@ -326,15 +340,19 @@ def fno_lax_vector_conversions : Flag<"-fno-lax-vector-conversions">, Group<f_Gr
def fno_math_errno : Flag<"-fno-math-errno">, Group<f_Group>;
def fno_merge_all_constants : Flag<"-fno-merge-all-constants">, Group<f_Group>;
def fno_ms_extensions : Flag<"-fno-ms-extensions">, Group<f_Group>;
+def fno_objc_default_synthesize_properties
+ : Flag<"-fno-objc-default-synthesize-properties">, Group<f_Group>;
+def fno_objc_exceptions: Flag<"-fno-objc-exceptions">, Group<f_Group>;
def fno_objc_legacy_dispatch : Flag<"-fno-objc-legacy-dispatch">, Group<f_Group>;
def fno_omit_frame_pointer : Flag<"-fno-omit-frame-pointer">, Group<f_Group>;
def fno_pascal_strings : Flag<"-fno-pascal-strings">, Group<f_Group>;
def fno_rtti : Flag<"-fno-rtti">, Group<f_Group>;
+def fno_short_enums : Flag<"-fno-short-enums">, Group<f_Group>;
def fno_show_column : Flag<"-fno-show-column">, Group<f_Group>;
def fno_show_source_location : Flag<"-fno-show-source-location">, Group<f_Group>;
def fno_spell_checking : Flag<"-fno-spell-checking">, Group<f_Group>;
def fno_stack_protector : Flag<"-fno-stack-protector">, Group<f_Group>;
-def fno_strict_aliasing : Flag<"-fno-strict-aliasing">, Group<clang_ignored_f_Group>;
+def fno_strict_aliasing : Flag<"-fno-strict-aliasing">, Group<f_Group>;
def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">, Group<f_Group>;
def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">, Group<f_Group>;
def fno_unit_at_a_time : Flag<"-fno-unit-at-a-time">, Group<f_Group>;
@@ -342,15 +360,23 @@ def fno_unwind_tables : Flag<"-fno-unwind-tables">, Group<f_Group>;
def fno_verbose_asm : Flag<"-fno-verbose-asm">, Group<f_Group>;
def fno_working_directory : Flag<"-fno-working-directory">, Group<f_Group>;
def fno_zero_initialized_in_bss : Flag<"-fno-zero-initialized-in-bss">, Group<f_Group>;
-def fobjc_abi_version_EQ : Joined<"-fobjc-abi-version=">, Group<f_Group>;
def fobjc_atdefs : Flag<"-fobjc-atdefs">, Group<clang_ignored_f_Group>;
def fobjc_call_cxx_cdtors : Flag<"-fobjc-call-cxx-cdtors">, Group<clang_ignored_f_Group>;
+def fobjc_default_synthesize_properties :
+ Flag<"-fobjc-default-synthesize-properties">, Group<f_Group>;
+def fobjc_exceptions: Flag<"-fobjc-exceptions">, Group<f_Group>;
+
def fobjc_gc_only : Flag<"-fobjc-gc-only">, Group<f_Group>;
def fobjc_gc : Flag<"-fobjc-gc">, Group<f_Group>;
def fobjc_legacy_dispatch : Flag<"-fobjc-legacy-dispatch">, Group<f_Group>;
def fobjc_new_property : Flag<"-fobjc-new-property">, Group<clang_ignored_f_Group>;
+
+// Objective-C ABI options.
+def fobjc_abi_version_EQ : Joined<"-fobjc-abi-version=">, Group<f_Group>;
+def fobjc_nonfragile_abi_version_EQ : Joined<"-fobjc-nonfragile-abi-version=">, Group<f_Group>;
def fobjc_nonfragile_abi : Flag<"-fobjc-nonfragile-abi">, Group<f_Group>;
-def fobjc_nonfragile_abi2 : Flag<"-fobjc-nonfragile-abi2">, Group<f_Group>;
+def fno_objc_nonfragile_abi : Flag<"-fno-objc-nonfragile-abi">, Group<f_Group>;
+
def fobjc_sender_dependent_dispatch : Flag<"-fobjc-sender-dependent-dispatch">, Group<f_Group>;
def fobjc : Flag<"-fobjc">, Group<f_Group>;
def fomit_frame_pointer : Flag<"-fomit-frame-pointer">, Group<f_Group>;
@@ -369,7 +395,7 @@ def framework : Separate<"-framework">, Flags<[LinkerInput]>;
def frandom_seed_EQ : Joined<"-frandom-seed=">, Group<clang_ignored_f_Group>;
def frtti : Flag<"-frtti">, Group<f_Group>;
def fsched_interblock : Flag<"-fsched-interblock">, Group<clang_ignored_f_Group>;
-def fshort_enums : Flag<"-fshort-enums">, Group<clang_ignored_f_Group>;
+def fshort_enums : Flag<"-fshort-enums">, Group<f_Group>;
def freorder_blocks : Flag<"-freorder-blocks">, Group<clang_ignored_f_Group>;
def fshort_wchar : Flag<"-fshort-wchar">, Group<f_Group>;
def fshow_overloads_EQ : Joined<"-fshow-overloads=">, Group<f_Group>;
@@ -379,17 +405,20 @@ def fsigned_bitfields : Flag<"-fsigned-bitfields">, Group<f_Group>;
def fsigned_char : Flag<"-fsigned-char">, Group<f_Group>;
def fstack_protector_all : Flag<"-fstack-protector-all">, Group<f_Group>;
def fstack_protector : Flag<"-fstack-protector">, Group<f_Group>;
-def fstrict_aliasing : Flag<"-fstrict-aliasing">, Group<clang_ignored_f_Group>;
+def fstrict_aliasing : Flag<"-fstrict-aliasing">, Group<f_Group>;
def fsyntax_only : Flag<"-fsyntax-only">, Flags<[DriverOption]>;
def ftabstop_EQ : Joined<"-ftabstop=">, Group<f_Group>;
def ferror_limit_EQ : Joined<"-ferror-limit=">, Group<f_Group>;
def ftemplate_depth_ : Joined<"-ftemplate-depth-">, Group<f_Group>;
-def ftemplate_backtrace_limit_EQ : Joined<"-ftemplate-backtrace-limit=">,
+def ftemplate_backtrace_limit_EQ : Joined<"-ftemplate-backtrace-limit=">,
Group<f_Group>;
+def Wlarge_by_value_copy_def : Flag<"-Wlarge-by-value-copy">;
+def Wlarge_by_value_copy_EQ : Joined<"-Wlarge-by-value-copy=">;
def fterminated_vtables : Flag<"-fterminated-vtables">, Group<f_Group>;
def fthreadsafe_statics : Flag<"-fthreadsafe-statics">, Group<f_Group>;
def ftime_report : Flag<"-ftime-report">, Group<f_Group>;
def ftrapv : Flag<"-ftrapv">, Group<f_Group>;
+def ftrapv_handler_EQ : Joined<"-ftrapv-handler=">, Group<f_Group>;
def funit_at_a_time : Flag<"-funit-at-a-time">, Group<f_Group>;
def funroll_loops : Flag<"-funroll-loops">, Group<f_Group>;
def funsigned_bitfields : Flag<"-funsigned-bitfields">, Group<f_Group>;
@@ -418,6 +447,7 @@ def iframework : JoinedOrSeparate<"-iframework">, Group<clang_i_Group>;
def imacros : JoinedOrSeparate<"-imacros">, Group<clang_i_Group>;
def image__base : Separate<"-image_base">;
def include_ : JoinedOrSeparate<"-include">, Group<clang_i_Group>, EnumName<"include">;
+def include_pch : Separate<"-include-pch">, Group<clang_i_Group>;
def init : Separate<"-init">;
def install__name : Separate<"-install_name">;
def integrated_as : Flag<"-integrated-as">, Flags<[DriverOption]>;
@@ -451,6 +481,7 @@ def mkernel : Flag<"-mkernel">, Group<m_Group>;
def mlinker_version_EQ : Joined<"-mlinker-version=">, Flags<[NoForward]>;
def mllvm : Separate<"-mllvm">;
def mmacosx_version_min_EQ : Joined<"-mmacosx-version-min=">, Group<m_Group>;
+def mms_bitfields : Flag<"-mms-bitfields">, Group<m_Group>;
def mmmx : Flag<"-mmmx">, Group<m_x86_Features_Group>;
def mno_3dnowa : Flag<"-mno-3dnowa">, Group<m_x86_Features_Group>;
def mno_3dnow : Flag<"-mno-3dnow">, Group<m_x86_Features_Group>;
@@ -479,6 +510,7 @@ def mno_omit_leaf_frame_pointer : Flag<"-mno-omit-leaf-frame-pointer">, Group<f_
def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">, Group<f_Group>;
def mpascal_strings : Flag<"-mpascal-strings">, Group<m_Group>;
def mred_zone : Flag<"-mred-zone">, Group<m_Group>;
+def mregparm_EQ : Joined<"-mregparm=">, Group<m_Group>;
def mrelax_all : Flag<"-mrelax-all">, Group<m_Group>;
def msoft_float : Flag<"-msoft-float">, Group<m_Group>;
def msse2 : Flag<"-msse2">, Group<m_x86_Features_Group>;
@@ -545,10 +577,12 @@ def private__bundle : Flag<"-private_bundle">;
def pthreads : Flag<"-pthreads">;
def pthread : Flag<"-pthread">;
def p : Flag<"-p">;
+def pie : Flag<"-pie">;
def read__only__relocs : Separate<"-read_only_relocs">;
def remap : Flag<"-remap">;
def rewrite_objc : Flag<"-rewrite-objc">, Flags<[DriverOption]>,
HelpText<"Rewrite Objective-C source to C++">;
+def rdynamic : Flag<"-rdynamic">;
def rpath : Separate<"-rpath">, Flags<[LinkerInput]>;
def r : Flag<"-r">;
def save_temps : Flag<"-save-temps">, Flags<[DriverOption]>,
@@ -576,6 +610,7 @@ def static_libgcc : Flag<"-static-libgcc">;
def static : Flag<"-static">, Flags<[NoArgumentUnused]>;
def std_default_EQ : Joined<"-std-default=">;
def std_EQ : Joined<"-std=">;
+def stdlib_EQ : Joined<"-stdlib=">;
def sub__library : JoinedOrSeparate<"-sub_library">;
def sub__umbrella : JoinedOrSeparate<"-sub_umbrella">;
def s : Flag<"-s">;
@@ -592,6 +627,7 @@ def undefined : JoinedOrSeparate<"-undefined">, Group<u_Group>;
def undef : Flag<"-undef">, Group<u_Group>;
def unexported__symbols__list : Separate<"-unexported_symbols_list">;
def u : JoinedOrSeparate<"-u">, Group<u_Group>;
+def use_gold_plugin : Flag<"-use-gold-plugin">;
def v : Flag<"-v">,
HelpText<"Show commands to run and use verbose output">;
def weak_l : Joined<"-weak-l">, Flags<[LinkerInput]>;
@@ -606,6 +642,11 @@ def x : JoinedOrSeparate<"-x">, Flags<[DriverOption]>,
MetaVarName<"<language>">;
def y : Joined<"-y">;
+def working_directory : Separate<"-working-directory">,
+ HelpText<"Resolve file paths relative to the specified directory">;
+def working_directory_EQ : Joined<"-working-directory=">,
+ Alias<working_directory>;
+
// Double dash options, which are usually an alias for one of the previous
// options.
@@ -720,6 +761,8 @@ def _specs : Separate<"--specs">, Alias<specs_EQ>;
def _static : Flag<"--static">, Alias<static>;
def _std_EQ : Joined<"--std=">, Alias<std_EQ>;
def _std : Separate<"--std">, Alias<std_EQ>;
+def _stdlib_EQ : Joined<"--stdlib=">, Alias<std_EQ>;
+def _stdlib : Separate<"--stdlib">, Alias<std_EQ>;
def _sysroot_EQ : Joined<"--sysroot=">;
def _sysroot : Separate<"--sysroot">, Alias<_sysroot_EQ>;
def _target_help : Flag<"--target-help">;
@@ -740,4 +783,15 @@ def _write_user_dependencies : Flag<"--write-user-dependencies">, Alias<MMD>;
def _ : Joined<"--">, Flags<[Unsupported]>;
// Special internal option to handle -Xlinker --no-demangle.
-def Z_Xlinker__no_demangle : Flag<"-Z-Xlinker-no-demangle">, Flags<[Unsupported]>;
+def Z_Xlinker__no_demangle : Flag<"-Z-Xlinker-no-demangle">,
+ Flags<[Unsupported, NoArgumentUnused]>;
+
+// Special internal option to allow forwarding arbitrary arguments to linker.
+def Zlinker_input : Separate<"-Zlinker-input">,
+ Flags<[Unsupported, NoArgumentUnused]>;
+
+// Reserved library options.
+def Z_reserved_lib_stdcxx : Flag<"-Z-reserved-lib-stdc++">,
+ Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
+def Z_reserved_lib_cckext : Flag<"-Z-reserved-lib-cckext">,
+ Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
index 55be4ee..f0012bd 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
@@ -10,10 +10,11 @@
#ifndef CLANG_DRIVER_TOOLCHAIN_H_
#define CLANG_DRIVER_TOOLCHAIN_H_
+#include "clang/Driver/Util.h"
#include "clang/Driver/Types.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include <string>
namespace clang {
@@ -32,6 +33,11 @@ class ToolChain {
public:
typedef llvm::SmallVector<std::string, 4> path_list;
+ enum CXXStdlibType {
+ CST_Libcxx,
+ CST_Libstdcxx
+ };
+
private:
const HostInfo &Host;
const llvm::Triple Triple;
@@ -92,6 +98,10 @@ public:
// Platform defaults information
+ /// HasNativeLTOLinker - Check whether the linker and related tools have
+ /// native LLVM support.
+ virtual bool HasNativeLLVMSupport() const;
+
/// LookupTypeForExtension - Return the default language type to use for the
/// given extension.
virtual types::ID LookupTypeForExtension(const char *Ext) const;
@@ -103,6 +113,14 @@ public:
/// by default.
virtual bool IsIntegratedAssemblerDefault() const { return false; }
+ /// IsStrictAliasingDefault - Does this tool chain use -fstrict-aliasing by
+ /// default.
+ virtual bool IsStrictAliasingDefault() const { return true; }
+
+ /// IsObjCDefaultSynthPropertiesDefault - Does this tool chain enable
+ /// -fobjc-default-synthesize-properties by default.
+ virtual bool IsObjCDefaultSynthPropertiesDefault() const { return false; }
+
/// IsObjCNonFragileABIDefault - Does this tool chain set
/// -fobjc-nonfragile-abi by default.
virtual bool IsObjCNonFragileABIDefault() const { return false; }
@@ -153,6 +171,25 @@ public:
/// sets the deployment target) determines the version in the triple passed to
/// Clang.
virtual std::string ComputeEffectiveClangTriple(const ArgList &Args) const;
+
+ // GetCXXStdlibType - Determine the C++ standard library type to use with the
+ // given compilation arguments.
+ virtual CXXStdlibType GetCXXStdlibType(const ArgList &Args) const;
+
+ /// AddClangCXXStdlibIncludeArgs - Add the clang -cc1 level arguments to set
+ /// the include paths to use for the given C++ standard library type.
+ virtual void AddClangCXXStdlibIncludeArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+
+ /// AddCXXStdlibLibArgs - Add the system specific linker arguments to use
+ /// for the given C++ standard library type.
+ virtual void AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+
+ /// AddCCKextLibArgs - Add the system specific linker arguments to use
+ /// for kernel extensions (Darwin-specific).
+ virtual void AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
};
} // end namespace driver
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Types.def b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
index 06a8690..f09a1dc 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Types.def
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
@@ -42,6 +42,7 @@
TYPE("cpp-output", PP_C, INVALID, "i", "u")
TYPE("c", C, PP_C, 0, "u")
TYPE("cl", CL, PP_C, 0, "u")
+TYPE("cuda", CUDA, PP_CXX, 0, "u")
TYPE("objective-c-cpp-output", PP_ObjC, INVALID, "mi", "u")
TYPE("objective-c", ObjC, PP_ObjC, 0, "u")
TYPE("c++-cpp-output", PP_CXX, INVALID, "ii", "u")
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
index cca243d..c45bd40 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
@@ -18,8 +18,6 @@
namespace llvm {
class raw_ostream;
- class Module;
- class LLVMContext;
namespace sys { class Path; }
}
namespace clang {
@@ -48,6 +46,10 @@ ASTConsumer *CreateASTPrinterXML(llvm::raw_ostream *OS);
// intended for debugging.
ASTConsumer *CreateASTDumper();
+// AST XML-dumper: dumps out the AST to stderr in a very detailed XML
+// format; this is intended for particularly intense debugging.
+ASTConsumer *CreateASTDumperXML(llvm::raw_ostream &OS);
+
// Graphical AST viewer: for each function definition, creates a graph of
// the AST and displays it with the graph viewer "dotty". Also outputs
// function declarations to stderr.
@@ -57,10 +59,6 @@ ASTConsumer *CreateASTViewer();
// to stderr; this is intended for debugging.
ASTConsumer *CreateDeclContextPrinter();
-// Inheritance viewer: for C++ code, creates a graph of the inheritance
-// tree for the given class and displays it with "dotty".
-ASTConsumer *CreateInheritanceViewer(const std::string& clsname);
-
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
index e3fd4b3..e935633 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
@@ -21,13 +21,13 @@
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemOptions.h"
#include "clang-c/Index.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/System/Path.h"
-#include "llvm/Support/Timer.h"
+#include "llvm/Support/Path.h"
#include <map>
#include <string>
#include <vector>
@@ -54,6 +54,14 @@ class TargetInfo;
using namespace idx;
+/// \brief Allocator for a cached set of global code completions.
+class GlobalCodeCompletionAllocator
+ : public CodeCompletionAllocator,
+ public llvm::RefCountedBase<GlobalCodeCompletionAllocator>
+{
+
+};
+
/// \brief Utility class for loading a ASTContext from an AST file.
///
class ASTUnit {
@@ -69,7 +77,9 @@ private:
llvm::OwningPtr<TargetInfo> Target;
llvm::OwningPtr<Preprocessor> PP;
llvm::OwningPtr<ASTContext> Ctx;
-
+
+ FileSystemOptions FileSystemOpts;
+
/// \brief The AST consumer that received information about the translation
/// unit as it was parsed or loaded.
llvm::OwningPtr<ASTConsumer> Consumer;
@@ -82,6 +92,13 @@ private:
/// LoadFromCommandLine available.
llvm::OwningPtr<CompilerInvocation> Invocation;
+ /// \brief The set of target features.
+ ///
+ /// FIXME: each time we reparse, we need to restore the set of target
+ /// features from this vector, because TargetInfo::CreateTargetInfo()
+ /// mangles the target options in place. Yuck!
+ std::vector<std::string> TargetFeatures;
+
// OnlyLocalDecls - when true, walking this AST should only visit declarations
// that come from the AST itself, not from included precompiled headers.
// FIXME: This is temporary; eventually, CIndex will always do this.
@@ -89,13 +106,16 @@ private:
/// \brief Whether to capture any diagnostics produced.
bool CaptureDiagnostics;
-
+
/// \brief Track whether the main file was loaded from an AST or not.
bool MainFileIsAST;
/// \brief Whether this AST represents a complete translation unit.
bool CompleteTranslationUnit;
+ /// \brief Whether we should time each operation.
+ bool WantTiming;
+
/// Track the top-level decls which appeared in an ASTUnit which was loaded
/// from a source file.
//
@@ -105,6 +125,14 @@ private:
// more scalable search mechanisms.
std::vector<Decl*> TopLevelDecls;
+ /// \brief The list of preprocessed entities which appeared when the ASTUnit
+ /// was loaded.
+ ///
+ /// FIXME: This is just an optimization hack to avoid deserializing large
+ /// parts of a PCH file while performing a walk or search. In the long term,
+ /// we should provide more scalable search mechanisms.
+ std::vector<PreprocessedEntity *> PreprocessedEntities;
+
/// The name of the original source file used to generate this ASTUnit.
std::string OriginalSourceFile;
@@ -115,6 +143,13 @@ private:
/// translation unit.
llvm::SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
+ /// \brief The number of stored diagnostics that come from the driver
+ /// itself.
+ ///
+ /// Diagnostics that come from the driver are retained from one parse to
+ /// the next.
+ unsigned NumStoredDiagnosticsFromDriver;
+
/// \brief Temporary files that should be removed when the ASTUnit is
/// destroyed.
llvm::SmallVector<llvm::sys::Path, 4> TemporaryFiles;
@@ -199,22 +234,21 @@ private:
/// a precompiled preamble.
unsigned NumStoredDiagnosticsInPreamble;
- /// \brief The group of timers associated with this translation unit.
- llvm::OwningPtr<llvm::TimerGroup> TimerGroup;
-
/// \brief A list of the serialization ID numbers for each of the top-level
/// declarations parsed within the precompiled preamble.
std::vector<serialization::DeclID> TopLevelDeclsInPreamble;
- ///
- /// \defgroup CodeCompleteCaching Code-completion caching
- ///
- /// \{
- ///
-
+ /// \brief A list of the offsets into the precompiled preamble which
+ /// correspond to preprocessed entities.
+ std::vector<uint64_t> PreprocessedEntitiesInPreamble;
+
/// \brief Whether we should be caching code-completion results.
bool ShouldCacheCodeCompletionResults;
+ static void ConfigureDiags(llvm::IntrusiveRefCntPtr<Diagnostic> &Diags,
+ const char **ArgBegin, const char **ArgEnd,
+ ASTUnit &AST, bool CaptureDiagnostics);
+
public:
/// \brief A cached code-completion result, which may be introduced in one of
/// many different contexts.
@@ -261,7 +295,17 @@ public:
return CachedCompletionTypes;
}
+ /// \brief Retrieve the allocator used to cache global code completions.
+ llvm::IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
+ getCachedCompletionAllocator() {
+ return CachedCompletionAllocator;
+ }
+
private:
+ /// \brief Allocator used to store cached code completions.
+ llvm::IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
+ CachedCompletionAllocator;
+
/// \brief The set of cached code-completion results.
std::vector<CachedCodeCompletionResult> CachedCompletionResults;
@@ -269,20 +313,24 @@ private:
/// type, which is used for type equality comparisons.
llvm::StringMap<unsigned> CachedCompletionTypes;
- /// \brief The number of top-level declarations present the last time we
- /// cached code-completion results.
+ /// \brief A string hash of the top-level declaration and macro definition
+ /// names processed the last time that we reparsed the file.
///
- /// The value is used to help detect when we should repopulate the global
- /// completion cache.
- unsigned NumTopLevelDeclsAtLastCompletionCache;
+ /// This hash value is used to determine when we need to refresh the
+ /// global code-completion cache.
+ unsigned CompletionCacheTopLevelHashValue;
- /// \brief The number of reparses left until we'll consider updating the
- /// code-completion cache.
+ /// \brief A string hash of the top-level declaration and macro definition
+ /// names processed the last time that we reparsed the precompiled preamble.
///
- /// This is meant to avoid thrashing during reparsing, by not allowing the
- /// code-completion cache to be updated on every reparse.
- unsigned CacheCodeCompletionCoolDown;
+ /// This hash value is used to determine when we need to refresh the
+ /// global code-completion cache after a rebuild of the precompiled preamble.
+ unsigned PreambleTopLevelHashValue;
+ /// \brief The current hash value for the top-level declaration and macro
+ /// definition names
+ unsigned CurrentTopLevelHashValue;
+
/// \brief Bit used by CIndex to mark when a translation unit may be in an
/// inconsistent state, and is not safe to free.
unsigned UnsafeToFree : 1;
@@ -294,14 +342,6 @@ private:
/// \brief Clear out and deallocate
void ClearCachedCompletionResults();
- ///
- /// \}
- ///
-
- /// \brief The timers we've created from the various parses, reparses, etc.
- /// involved in this translation unit.
- std::vector<llvm::Timer *> Timers;
-
ASTUnit(const ASTUnit&); // DO NOT IMPLEMENT
ASTUnit &operator=(const ASTUnit &); // DO NOT IMPLEMENT
@@ -319,7 +359,8 @@ private:
bool AllowRebuild = true,
unsigned MaxLines = 0);
void RealizeTopLevelDeclsFromPreamble();
-
+ void RealizePreprocessedEntitiesFromPreamble();
+
public:
class ConcurrencyCheck {
volatile ASTUnit &Self;
@@ -367,6 +408,8 @@ public:
const FileManager &getFileManager() const { return *FileMgr; }
FileManager &getFileManager() { return *FileMgr; }
+ const FileSystemOptions &getFileSystemOpts() const { return FileSystemOpts; }
+
const std::string &getOriginalSourceFileName();
const std::string &getASTFileName();
@@ -386,6 +429,9 @@ public:
void setLastASTLocation(ASTLocation ALoc) { LastLoc = ALoc; }
ASTLocation getLastASTLocation() const { return LastLoc; }
+
+ llvm::StringRef getMainFileName() const;
+
typedef std::vector<Decl *>::iterator top_level_iterator;
top_level_iterator top_level_begin() {
@@ -423,6 +469,22 @@ public:
TopLevelDeclsInPreamble.push_back(D);
}
+ /// \brief Retrieve a reference to the current top-level name hash value.
+ ///
+ /// Note: This is used internally by the top-level tracking action
+ unsigned &getCurrentTopLevelHashValue() { return CurrentTopLevelHashValue; }
+
+ typedef std::vector<PreprocessedEntity *>::iterator pp_entity_iterator;
+
+ pp_entity_iterator pp_entity_begin();
+ pp_entity_iterator pp_entity_end();
+
+ /// \brief Add a new preprocessed entity that's stored at the given offset
+ /// in the precompiled preamble.
+ void addPreprocessedEntityFromPreamble(uint64_t Offset) {
+ PreprocessedEntitiesInPreamble.push_back(Offset);
+ }
+
/// \brief Retrieve the mapping from File IDs to the preprocessed entities
/// within that file.
PreprocessedEntitiesByFileMap &getPreprocessedEntitiesByFile() {
@@ -457,7 +519,10 @@ public:
unsigned cached_completion_size() const {
return CachedCompletionResults.size();
}
-
+
+ llvm::MemoryBuffer *getBufferForFile(llvm::StringRef Filename,
+ std::string *ErrorStr = 0);
+
/// \brief Whether this AST represents a complete translation unit.
///
/// If false, this AST is only a partial translation unit, e.g., one
@@ -478,11 +543,25 @@ public:
/// \returns - The initialized ASTUnit or null if the AST failed to load.
static ASTUnit *LoadFromASTFile(const std::string &Filename,
llvm::IntrusiveRefCntPtr<Diagnostic> Diags,
+ const FileSystemOptions &FileSystemOpts,
bool OnlyLocalDecls = false,
RemappedFile *RemappedFiles = 0,
unsigned NumRemappedFiles = 0,
bool CaptureDiagnostics = false);
+private:
+ /// \brief Helper function for \c LoadFromCompilerInvocation() and
+ /// \c LoadFromCommandLine(), which loads an AST from a compiler invocation.
+ ///
+ /// \param PrecompilePreamble Whether to precompile the preamble of this
+ /// translation unit, to improve the performance of reparsing.
+ ///
+ /// \returns \c true if a catastrophic failure occurred (which means that the
+ /// \c ASTUnit itself is invalid), or \c false otherwise.
+ bool LoadFromCompilerInvocation(bool PrecompilePreamble);
+
+public:
+
/// LoadFromCompilerInvocation - Create an ASTUnit from a source file, via a
/// CompilerInvocation object.
///
@@ -521,12 +600,14 @@ public:
llvm::IntrusiveRefCntPtr<Diagnostic> Diags,
llvm::StringRef ResourceFilesPath,
bool OnlyLocalDecls = false,
+ bool CaptureDiagnostics = false,
RemappedFile *RemappedFiles = 0,
unsigned NumRemappedFiles = 0,
- bool CaptureDiagnostics = false,
bool PrecompilePreamble = false,
bool CompleteTranslationUnit = true,
- bool CacheCodeCompletionResults = false);
+ bool CacheCodeCompletionResults = false,
+ bool CXXPrecompilePreamble = false,
+ bool CXXChainedPCH = false);
/// \brief Reparse the source files using the same command-line options that
/// were originally used to produce this translation unit.
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def b/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
index aaa3920..75b52a8 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
@@ -15,45 +15,12 @@
#define ANALYSIS(NAME, CMDFLAG, DESC, SCOPE)
#endif
-ANALYSIS(CFGDump, "cfg-dump",
- "Display Control-Flow Graphs", Code)
-
-ANALYSIS(CFGView, "cfg-view",
- "View Control-Flow Graphs using GraphViz", Code)
-
-ANALYSIS(DisplayLiveVariables, "dump-live-variables",
- "Print results of live variable analysis", Code)
-
-ANALYSIS(SecuritySyntacticChecks, "analyzer-check-security-syntactic",
- "Perform quick security checks that require no data flow", Code)
-
-ANALYSIS(LLVMConventionChecker, "analyzer-check-llvm-conventions",
- "Check code for LLVM codebase conventions (domain-specific)",
- TranslationUnit)
-
-ANALYSIS(WarnDeadStores, "analyzer-check-dead-stores",
- "Warn about stores to dead variables", Code)
-
ANALYSIS(WarnUninitVals, "warn-uninit-values",
"Warn about uses of uninitialized variables", Code)
-
-ANALYSIS(WarnObjCMethSigs, "analyzer-check-objc-methodsigs",
- "Warn about Objective-C method signatures with type incompatibilities",
- ObjCImplementation)
-
-ANALYSIS(WarnObjCDealloc, "analyzer-check-objc-missing-dealloc",
-"Warn about Objective-C classes that lack a correct implementation of -dealloc",
- ObjCImplementation)
-ANALYSIS(WarnObjCUnusedIvars, "analyzer-check-objc-unused-ivars",
- "Warn about private ivars that are never used", ObjCImplementation)
-
ANALYSIS(ObjCMemChecker, "analyzer-check-objc-mem",
"Run the [Core] Foundation reference count checker", Code)
-ANALYSIS(WarnSizeofPointer, "warn-sizeof-pointer",
- "Warn about unintended use of sizeof() on pointer expressions", Code)
-
#ifndef ANALYSIS_STORE
#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN)
#endif
@@ -73,9 +40,10 @@ ANALYSIS_CONSTRAINTS(RangeConstraints, "range", "Use constraint tracking of conc
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE)
#endif
-ANALYSIS_DIAGNOSTICS(HTML, "html", "Output analysis results using HTML", CreateHTMLDiagnosticClient, false)
-ANALYSIS_DIAGNOSTICS(PLIST, "plist", "Output analysis results using Plists", CreatePlistDiagnosticClient, true)
-ANALYSIS_DIAGNOSTICS(PLIST_HTML, "plist-html", "Output analysis results using HTML wrapped with Plists", CreatePlistHTMLDiagnosticClient, true)
+ANALYSIS_DIAGNOSTICS(HTML, "html", "Output analysis results using HTML", createHTMLDiagnosticClient, false)
+ANALYSIS_DIAGNOSTICS(PLIST, "plist", "Output analysis results using Plists", createPlistDiagnosticClient, true)
+ANALYSIS_DIAGNOSTICS(PLIST_HTML, "plist-html", "Output analysis results using HTML wrapped with Plists", createPlistHTMLDiagnosticClient, true)
+ANALYSIS_DIAGNOSTICS(TEXT, "text", "Text output of analysis results", createTextPathDiagnosticClient, true)
#undef ANALYSIS
#undef ANALYSIS_STORE
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
index 9ed15ba..5806928 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
@@ -56,6 +56,8 @@ NUM_ANALYSIS_DIAG_CLIENTS
class AnalyzerOptions {
public:
std::vector<Analyses> AnalysisList;
+ /// \brief Pair of checker name and enable/disable.
+ std::vector<std::pair<std::string, bool> > CheckersControlList;
AnalysisStores AnalysisStoreOpt;
AnalysisConstraints AnalysisConstraintsOpt;
AnalysisDiagClients AnalysisDiagOpt;
@@ -65,17 +67,20 @@ public:
unsigned AnalyzeAll : 1;
unsigned AnalyzerDisplayProgress : 1;
unsigned AnalyzeNestedBlocks : 1;
+ unsigned AnalyzerStats : 1;
unsigned EagerlyAssume : 1;
- unsigned IdempotentOps : 1;
+ unsigned BufferOverflows : 1;
unsigned PurgeDead : 1;
unsigned TrimGraph : 1;
unsigned VisualizeEGDot : 1;
unsigned VisualizeEGUbi : 1;
unsigned EnableExperimentalChecks : 1;
unsigned EnableExperimentalInternalChecks : 1;
- unsigned EnableIdempotentOperationChecker : 1;
unsigned InlineCall : 1;
unsigned UnoptimizedCFG : 1;
+ unsigned CFGAddImplicitDtors : 1;
+ unsigned CFGAddInitializers : 1;
+ unsigned EagerlyTrimEGraph : 1;
public:
AnalyzerOptions() {
@@ -85,14 +90,20 @@ public:
AnalyzeAll = 0;
AnalyzerDisplayProgress = 0;
AnalyzeNestedBlocks = 0;
+ AnalyzerStats = 0;
EagerlyAssume = 0;
+ BufferOverflows = 0;
PurgeDead = 1;
TrimGraph = 0;
VisualizeEGDot = 0;
VisualizeEGUbi = 0;
EnableExperimentalChecks = 0;
EnableExperimentalInternalChecks = 0;
+ InlineCall = 0;
UnoptimizedCFG = 0;
+ CFGAddImplicitDtors = 0;
+ CFGAddInitializers = 0;
+ EagerlyTrimEGraph = 0;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
index b3f5709..ee85b655 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
@@ -40,24 +40,32 @@ public:
/// aliases to base ctors when possible.
unsigned DataSections : 1; /// Set when -fdata-sections is enabled
unsigned DebugInfo : 1; /// Should generate debug info (-g).
+ unsigned LimitDebugInfo : 1; /// Limit generated debug info to reduce size.
unsigned DisableFPElim : 1; /// Set when -fomit-frame-pointer is enabled.
unsigned DisableLLVMOpts : 1; /// Don't run any optimizations, for use in
/// getting .bc files that correspond to the
/// internal state before optimizations are
/// done.
unsigned DisableRedZone : 1; /// Set when -mno-red-zone is enabled.
- unsigned EmitDeclMetadata : 1; /// Emit special metadata indicating what Decl*
- /// various IR entities came from. Only useful
- /// when running CodeGen as a subroutine.
+ unsigned EmitDeclMetadata : 1; /// Emit special metadata indicating what
+ /// Decl* various IR entities came from. Only
+ /// useful when running CodeGen as a
+ /// subroutine.
unsigned FunctionSections : 1; /// Set when -ffunction-sections is enabled
unsigned HiddenWeakTemplateVTables : 1; /// Emit weak vtables and RTTI for
/// template classes with hidden visibility
unsigned HiddenWeakVTables : 1; /// Emit weak vtables, RTTI, and thunks with
- /// hidden visibility
- unsigned InstrumentFunctions : 1; /// Set when -finstrument-functions is enabled
+ /// hidden visibility.
+ unsigned InstrumentFunctions : 1; /// Set when -finstrument-functions is
+ /// enabled.
+ unsigned InstrumentForProfiling : 1; /// Set when -pg is enabled
+ unsigned LessPreciseFPMAD : 1; /// Enable less precise MAD instructions to be
+ /// generated.
unsigned MergeAllConstants : 1; /// Merge identical constants.
unsigned NoCommon : 1; /// Set when -fno-common or C++ is enabled.
unsigned NoImplicitFloat : 1; /// Set when -mno-implicit-float is enabled.
+ unsigned NoInfsFPMath : 1; /// Assume FP arguments, results not +-Inf.
+ unsigned NoNaNsFPMath : 1; /// Assume FP arguments, results not NaN.
unsigned NoZeroInitializedInBSS : 1; /// -fno-zero-initialized-in-bss
unsigned ObjCDispatchMethod : 2; /// Method of Objective-C dispatch to use.
unsigned OmitLeafFramePointer : 1; /// Set when -momit-leaf-frame-pointer is
@@ -65,12 +73,14 @@ public:
unsigned OptimizationLevel : 3; /// The -O[0-4] option specified.
unsigned OptimizeSize : 1; /// If -Os is specified.
unsigned RelaxAll : 1; /// Relax all machine code instructions.
+ unsigned RelaxedAliasing : 1; /// Set when -fno-strict-aliasing is enabled.
unsigned SimplifyLibCalls : 1; /// Set when -fbuiltin is enabled.
unsigned SoftFloat : 1; /// -soft-float.
unsigned TimePasses : 1; /// Set when -ftime-report is enabled.
unsigned UnitAtATime : 1; /// Unused. For mirroring GCC optimization
/// selection.
unsigned UnrollLoops : 1; /// Control whether loops are unrolled.
+ unsigned UnsafeFPMath : 1; /// Allow unsafe floating point optzns.
unsigned UnwindTables : 1; /// Emit unwind tables.
unsigned VerifyModule : 1; /// Control whether the module should be run
/// through the LLVM Verifier.
@@ -102,6 +112,10 @@ public:
/// The name of the relocation model to use.
std::string RelocationModel;
+ /// The user specified number of registers to be used for integral arguments,
+ /// or 0 if unspecified.
+ unsigned NumRegisterParameters;
+
public:
CodeGenOptions() {
AsmVerbose = 0;
@@ -109,6 +123,7 @@ public:
CXXCtorDtorAliases = 0;
DataSections = 0;
DebugInfo = 0;
+ LimitDebugInfo = 0;
DisableFPElim = 0;
DisableLLVMOpts = 0;
DisableRedZone = 0;
@@ -117,20 +132,27 @@ public:
HiddenWeakTemplateVTables = 0;
HiddenWeakVTables = 0;
InstrumentFunctions = 0;
+ InstrumentForProfiling = 0;
+ LessPreciseFPMAD = 0;
MergeAllConstants = 1;
NoCommon = 0;
NoImplicitFloat = 0;
+ NoInfsFPMath = 0;
+ NoNaNsFPMath = 0;
NoZeroInitializedInBSS = 0;
+ NumRegisterParameters = 0;
ObjCDispatchMethod = Legacy;
OmitLeafFramePointer = 0;
OptimizationLevel = 0;
OptimizeSize = 0;
RelaxAll = 0;
+ RelaxedAliasing = 0;
SimplifyLibCalls = 1;
SoftFloat = 0;
TimePasses = 0;
UnitAtATime = 1;
UnrollLoops = 0;
+ UnsafeFPMath = 0;
UnwindTables = 0;
VerifyModule = 1;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CommandLineSourceLoc.h b/contrib/llvm/tools/clang/include/clang/Frontend/CommandLineSourceLoc.h
index bea468b..8911cfa 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CommandLineSourceLoc.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CommandLineSourceLoc.h
@@ -37,9 +37,15 @@ public:
// If both tail splits were valid integers, return success.
if (!ColSplit.second.getAsInteger(10, PSL.Column) &&
- !LineSplit.second.getAsInteger(10, PSL.Line))
+ !LineSplit.second.getAsInteger(10, PSL.Line)) {
PSL.FileName = LineSplit.first;
+ // On the command-line, stdin may be specified via "-". Inside the
+ // compiler, stdin is called "<stdin>".
+ if (PSL.FileName == "-")
+ PSL.FileName = "<stdin>";
+ }
+
return PSL;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
index 1b3c336..7ea79e5 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
@@ -19,7 +19,6 @@
#include <string>
namespace llvm {
-class LLVMContext;
class raw_ostream;
class raw_fd_ostream;
class Timer;
@@ -59,9 +58,6 @@ class TargetInfo;
/// come in two forms; a short form that reuses the CompilerInstance objects,
/// and a long form that takes explicit instances of any required objects.
class CompilerInstance {
- /// The LLVM context used for this instance.
- llvm::OwningPtr<llvm::LLVMContext> LLVMContext;
-
/// The options used in this compiler instance.
llvm::OwningPtr<CompilerInvocation> Invocation;
@@ -95,8 +91,23 @@ class CompilerInstance {
/// The frontend timer
llvm::OwningPtr<llvm::Timer> FrontendTimer;
+ /// \brief Holds information about the output file.
+ ///
+ /// If TempFilename is not empty we must rename it to Filename at the end.
+ /// TempFilename may be empty and Filename non empty if creating the temporary
+ /// failed.
+ struct OutputFile {
+ std::string Filename;
+ std::string TempFilename;
+ llvm::raw_ostream *OS;
+
+ OutputFile(const std::string &filename, const std::string &tempFilename,
+ llvm::raw_ostream *os)
+ : Filename(filename), TempFilename(tempFilename), OS(os) { }
+ };
+
/// The list of active output files.
- std::list< std::pair<std::string, llvm::raw_ostream*> > OutputFiles;
+ std::list<OutputFile> OutputFiles;
void operator=(const CompilerInstance &); // DO NOT IMPLEMENT
CompilerInstance(const CompilerInstance&); // DO NOT IMPLEMENT
@@ -140,23 +151,6 @@ public:
bool ExecuteAction(FrontendAction &Act);
/// }
- /// @name LLVM Context
- /// {
-
- bool hasLLVMContext() const { return LLVMContext != 0; }
-
- llvm::LLVMContext &getLLVMContext() const {
- assert(LLVMContext && "Compiler instance has no LLVM context!");
- return *LLVMContext;
- }
-
- llvm::LLVMContext *takeLLVMContext() { return LLVMContext.take(); }
-
- /// setLLVMContext - Replace the current LLVM context and take ownership of
- /// \arg Value.
- void setLLVMContext(llvm::LLVMContext *Value);
-
- /// }
/// @name Compiler Invocation and Options
/// {
@@ -205,6 +199,10 @@ public:
return Invocation->getDiagnosticOpts();
}
+ const FileSystemOptions &getFileSystemOpts() const {
+ return Invocation->getFileSystemOpts();
+ }
+
FrontendOptions &getFrontendOpts() {
return Invocation->getFrontendOpts();
}
@@ -435,16 +433,10 @@ public:
/// @name Output Files
/// {
- /// getOutputFileList - Get the list of (path, output stream) pairs of output
- /// files; the path may be empty but the stream will always be non-null.
- const std::list< std::pair<std::string,
- llvm::raw_ostream*> > &getOutputFileList() const;
-
/// addOutputFile - Add an output file onto the list of tracked output files.
///
- /// \param Path - The path to the output file, or empty.
- /// \param OS - The output stream, which should be non-null.
- void addOutputFile(llvm::StringRef Path, llvm::raw_ostream *OS);
+ /// \param OutFile - The output file info.
+ void addOutputFile(const OutputFile &OutFile);
/// clearOutputFiles - Clear the output file list, destroying the contained
/// output streams.
@@ -459,8 +451,14 @@ public:
/// Create the diagnostics engine using the invocation's diagnostic options
/// and replace any existing one with it.
///
- /// Note that this routine also replaces the diagnostic client.
- void createDiagnostics(int Argc, char **Argv);
+ /// Note that this routine also replaces the diagnostic client,
+ /// allocating one if one is not provided.
+ ///
+ /// \param Client If non-NULL, a diagnostic client that will be
+ /// attached to (and, then, owned by) the Diagnostic inside this AST
+ /// unit.
+ void createDiagnostics(int Argc, const char* const *Argv,
+ DiagnosticClient *Client = 0);
/// Create a Diagnostic object with a the TextDiagnosticPrinter.
///
@@ -468,23 +466,30 @@ public:
/// when the diagnostic options indicate that the compiler should output
/// logging information.
///
- /// Note that this creates an unowned DiagnosticClient, if using directly the
- /// caller is responsible for releasing the returned Diagnostic's client
- /// eventually.
+ /// If no diagnostic client is provided, this creates a
+ /// DiagnosticClient that is owned by the returned diagnostic
+ /// object, if using directly the caller is responsible for
+ /// releasing the returned Diagnostic's client eventually.
///
/// \param Opts - The diagnostic options; note that the created text
/// diagnostic object contains a reference to these options and its lifetime
/// must extend past that of the diagnostic engine.
///
+ /// \param Client If non-NULL, a diagnostic client that will be
+ /// attached to (and, then, owned by) the returned Diagnostic
+ /// object.
+ ///
/// \return The new object on success, or null on failure.
static llvm::IntrusiveRefCntPtr<Diagnostic>
- createDiagnostics(const DiagnosticOptions &Opts, int Argc, char **Argv);
+ createDiagnostics(const DiagnosticOptions &Opts, int Argc,
+ const char* const *Argv,
+ DiagnosticClient *Client = 0);
/// Create the file manager and replace any existing one with it.
void createFileManager();
/// Create the source manager and replace any existing one with it.
- void createSourceManager();
+ void createSourceManager(FileManager &FileMgr);
/// Create the preprocessor, using the invocation, file, and source managers,
/// and replace any existing one with it.
@@ -511,6 +516,7 @@ public:
/// context.
void createPCHExternalASTSource(llvm::StringRef Path,
bool DisablePCHValidation,
+ bool DisableStatCache,
void *DeserializationListener);
/// Create an external AST source to read a PCH file.
@@ -519,8 +525,9 @@ public:
static ExternalASTSource *
createPCHExternalASTSource(llvm::StringRef Path, const std::string &Sysroot,
bool DisablePCHValidation,
+ bool DisableStatCache,
Preprocessor &PP, ASTContext &Context,
- void *DeserializationListener);
+ void *DeserializationListener, bool Preamble);
/// Create a code completion consumer using the invocation; note that this
/// will cause the source manager to truncate the input source file at the
@@ -533,7 +540,7 @@ public:
static CodeCompleteConsumer *
createCodeCompletionConsumer(Preprocessor &PP, const std::string &Filename,
unsigned Line, unsigned Column,
- bool UseDebugPrinter, bool ShowMacros,
+ bool ShowMacros,
bool ShowCodePatterns, bool ShowGlobals,
llvm::raw_ostream &OS);
@@ -557,7 +564,8 @@ public:
///
/// \return - Null on error.
llvm::raw_fd_ostream *
- createOutputFile(llvm::StringRef OutputPath, bool Binary = true,
+ createOutputFile(llvm::StringRef OutputPath,
+ bool Binary = true, bool RemoveFileOnSignal = true,
llvm::StringRef BaseInput = "",
llvm::StringRef Extension = "");
@@ -565,7 +573,8 @@ public:
///
/// If \arg OutputPath is empty, then createOutputFile will derive an output
/// path location as \arg BaseInput, with any suffix removed, and \arg
- /// Extension appended.
+ /// Extension appended. If OutputPath is not stdout createOutputFile will
+ /// create a new temporary file that must be renamed to OutputPath in the end.
///
/// \param OutputPath - If given, the path to the output file.
/// \param Error [out] - On failure, the error message.
@@ -573,13 +582,20 @@ public:
/// for deriving the output path.
/// \param Extension - The extension to use for derived output names.
/// \param Binary - The mode to open the file in.
+ /// \param RemoveFileOnSignal - Whether the file should be registered with
+ /// llvm::sys::RemoveFileOnSignal. Note that this is not safe for
+ /// multithreaded use, as the underlying signal mechanism is not reentrant
/// \param ResultPathName [out] - If given, the result path name will be
/// stored here on success.
+ /// \param TempPathName [out] - If given, the temporary file path name
+ /// will be stored here on success.
static llvm::raw_fd_ostream *
createOutputFile(llvm::StringRef OutputPath, std::string &Error,
- bool Binary = true, llvm::StringRef BaseInput = "",
+ bool Binary = true, bool RemoveFileOnSignal = true,
+ llvm::StringRef BaseInput = "",
llvm::StringRef Extension = "",
- std::string *ResultPathName = 0);
+ std::string *ResultPathName = 0,
+ std::string *TempPathName = 0);
/// }
/// @name Initialization Utility Methods
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
index d558ad3..e0329db 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
@@ -12,12 +12,14 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/FileSystemOptions.h"
#include "clang/Frontend/AnalyzerOptions.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/HeaderSearchOptions.h"
+#include "clang/Frontend/LangStandard.h"
#include "clang/Frontend/PreprocessorOptions.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
#include "llvm/ADT/StringRef.h"
@@ -52,6 +54,9 @@ class CompilerInvocation {
/// Options controlling the diagnostic engine.
DiagnosticOptions DiagnosticOpts;
+ /// Options controlling file system operations.
+ FileSystemOptions FileSystemOpts;
+
/// Options controlling the frontend itself.
FrontendOptions FrontendOpts;
@@ -83,8 +88,10 @@ public:
/// \param ArgBegin - The first element in the argument vector.
/// \param ArgEnd - The last element in the argument vector.
/// \param Diags - The diagnostic engine to use for errors.
- static void CreateFromArgs(CompilerInvocation &Res, const char **ArgBegin,
- const char **ArgEnd, Diagnostic &Diags);
+ static void CreateFromArgs(CompilerInvocation &Res,
+ const char* const *ArgBegin,
+ const char* const *ArgEnd,
+ Diagnostic &Diags);
/// GetBuiltinIncludePath - Get the directory where the compiler headers
/// reside, relative to the compiler binary (found by the passed in
@@ -100,6 +107,25 @@ public:
/// passing to CreateFromArgs.
void toArgs(std::vector<std::string> &Res);
+ /// setLangDefaults - Set language defaults for the given input language and
+ /// language standard in this CompilerInvocation.
+ ///
+ /// \param IK - The input language.
+ /// \param LangStd - The input language standard.
+ void setLangDefaults(InputKind IK,
+ LangStandard::Kind LangStd = LangStandard::lang_unspecified) {
+ setLangDefaults(LangOpts, IK, LangStd);
+ }
+
+ /// setLangDefaults - Set language defaults for the given input language and
+ /// language standard in the given LangOptions object.
+ ///
+ /// \param LangOpts - The LangOptions object to set up.
+ /// \param IK - The input language.
+ /// \param LangStd - The input language standard.
+ static void setLangDefaults(LangOptions &Opts, InputKind IK,
+ LangStandard::Kind LangStd = LangStandard::lang_unspecified);
+
/// @}
/// @name Option Subgroups
/// @{
@@ -124,6 +150,11 @@ public:
DiagnosticOptions &getDiagnosticOpts() { return DiagnosticOpts; }
const DiagnosticOptions &getDiagnosticOpts() const { return DiagnosticOpts; }
+ FileSystemOptions &getFileSystemOpts() { return FileSystemOpts; }
+ const FileSystemOptions &getFileSystemOpts() const {
+ return FileSystemOpts;
+ }
+
HeaderSearchOptions &getHeaderSearchOpts() { return HeaderSearchOpts; }
const HeaderSearchOptions &getHeaderSearchOpts() const {
return HeaderSearchOpts;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DeclXML.def b/contrib/llvm/tools/clang/include/clang/Frontend/DeclXML.def
index 1845118..1b158fd 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DeclXML.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DeclXML.def
@@ -103,6 +103,9 @@ NODE_XML(FunctionDecl, "Function")
ATTRIBUTE_OPT_XML(isInlineSpecified(), "inline")
//ATTRIBUTE_OPT_XML(isVariadic(), "variadic") // in the type reference
ATTRIBUTE_XML(getNumParams(), "num_args")
+ ATTRIBUTE_OPT_XML(isMain(), "main")
+ ATTRIBUTE_OPT_XML(isExternC(), "externc")
+ ATTRIBUTE_OPT_XML(isGlobal(), "global")
SUB_NODE_SEQUENCE_XML(ParmVarDecl)
SUB_NODE_FN_BODY_XML
END_NODE_XML
@@ -117,6 +120,7 @@ NODE_XML(CXXMethodDecl, "CXXMethod")
ATTRIBUTE_OPT_XML(isInlineSpecified(), "inline")
ATTRIBUTE_OPT_XML(isStatic(), "static")
ATTRIBUTE_OPT_XML(isVirtual(), "virtual")
+ ATTRIBUTE_OPT_XML(isPure(), "pure")
ATTRIBUTE_ENUM_OPT_XML(getAccess(), "access")
ENUM_XML(AS_none, "")
ENUM_XML(AS_public, "public")
@@ -316,6 +320,7 @@ NODE_XML(LinkageSpecDecl, "LinkageSpec")
ENUM_XML(LinkageSpecDecl::lang_c, "C")
ENUM_XML(LinkageSpecDecl::lang_cxx, "CXX")
END_ENUM_XML
+ SUB_NODE_XML(DeclContext)
END_NODE_XML
NODE_XML(TemplateDecl, "Template")
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h
index ab8e49d..35aa6c6 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h
@@ -20,13 +20,20 @@ namespace clang {
class DependencyOutputOptions {
public:
unsigned IncludeSystemHeaders : 1; ///< Include system header dependencies.
+ unsigned ShowHeaderIncludes : 1; ///< Show header inclusions (-H).
unsigned UsePhonyTargets : 1; ///< Include phony targets for each
/// dependency, which can avoid some 'make'
/// problems.
- /// The file to write depencency output to.
+ /// The file to write dependency output to.
std::string OutputFile;
+ /// The file to write header include output to. This is orthogonal to
+ /// ShowHeaderIncludes (-H) and will include headers mentioned in the
+ /// predefines buffer. If the output file is "-", output will be sent to
+ /// stderr.
+ std::string HeaderIncludeOutputFile;
+
/// A list of names to use as the targets in the dependency file; this list
/// must contain at least one entry.
std::vector<std::string> Targets;
@@ -34,6 +41,7 @@ public:
public:
DependencyOutputOptions() {
IncludeSystemHeaders = 0;
+ ShowHeaderIncludes = 0;
UsePhonyTargets = 0;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
index c80bc03..f7f498b 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
@@ -41,9 +41,6 @@ public:
unsigned VerifyDiagnostics: 1; /// Check that diagnostics match the expected
/// diagnostics, indicated by markers in the
/// input source file.
- unsigned BinaryOutput : 1; /// Emit diagnostics via the diagnostic
- /// binary serialization mechanism, to be
- /// deserialized by, e.g., the CIndex library.
unsigned ErrorLimit; /// Limit # errors emitted.
unsigned MacroBacktraceLimit; /// Limit depth of macro instantiation
@@ -86,7 +83,6 @@ public:
ShowSourceRanges = 0;
ShowParseableFixits = 0;
VerifyDiagnostics = 0;
- BinaryOutput = 0;
ErrorLimit = 0;
TemplateBacktraceLimit = DefaultTemplateBacktraceLimit;
MacroBacktraceLimit = DefaultMacroBacktraceLimit;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
index 773543a..ee0863a 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
@@ -37,6 +37,7 @@ enum InputKind {
IK_PreprocessedObjC,
IK_PreprocessedObjCXX,
IK_OpenCL,
+ IK_CUDA,
IK_AST,
IK_LLVM_IR
};
@@ -51,6 +52,10 @@ class FrontendAction {
CompilerInstance *Instance;
friend class ASTMergeAction;
+private:
+ ASTConsumer* CreateWrappedASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile);
+
protected:
/// @name Implementation Action Interface
/// @{
@@ -130,7 +135,7 @@ public:
}
ASTUnit &getCurrentASTUnit() const {
- assert(!CurrentASTUnit && "No current AST unit!");
+ assert(CurrentASTUnit && "No current AST unit!");
return *CurrentASTUnit;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
index 7b8063c..4df2e71 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
@@ -54,6 +54,12 @@ protected:
llvm::StringRef InFile);
};
+class ASTDumpXMLAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile);
+};
+
class ASTViewAction : public ASTFrontendAction {
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
@@ -83,16 +89,11 @@ public:
static bool ComputeASTConsumerArguments(CompilerInstance &CI,
llvm::StringRef InFile,
std::string &Sysroot,
+ std::string &OutputFile,
llvm::raw_ostream *&OS,
bool &Chaining);
};
-class InheritanceViewAction : public ASTFrontendAction {
-protected:
- virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile);
-};
-
class SyntaxOnlyAction : public ASTFrontendAction {
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h
index 61ad22c..2efbc81 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h
@@ -15,7 +15,7 @@
namespace clang {
namespace diag {
enum {
-#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,CATEGORY) ENUM,
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,ACCESS,CATEGORY) ENUM,
#define FRONTENDSTART
#include "clang/Basic/DiagnosticFrontendKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
index 4c16d08..19d39c3 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
@@ -21,6 +21,7 @@ namespace clang {
namespace frontend {
enum ActionKind {
ASTDump, ///< Parse ASTs and dump them.
+ ASTDumpXML, ///< Parse ASTs and dump them in XML.
ASTPrint, ///< Parse ASTs and print them.
ASTPrintXML, ///< Parse ASTs and print them in XML.
ASTView, ///< Parse ASTs and view them in Graphviz.
@@ -38,7 +39,6 @@ namespace frontend {
FixIt, ///< Parse and apply any fixits to the source.
GeneratePCH, ///< Generate pre-compiled header.
GeneratePTH, ///< Generate pre-tokenized header.
- InheritanceView, ///< View C++ inheritance for a specified class.
InitOnly, ///< Only execute frontend initialization.
ParseSyntaxOnly, ///< Parse and perform semantic analysis.
PluginAction, ///< Run a plugin action, \see ActionName.
@@ -56,8 +56,6 @@ namespace frontend {
/// FrontendOptions - Options for controlling the behavior of the frontend.
class FrontendOptions {
public:
- unsigned DebugCodeCompletionPrinter : 1; ///< Use the debug printer for code
- /// completion results.
unsigned DisableFree : 1; ///< Disable memory freeing on exit.
unsigned RelocatablePCH : 1; ///< When generating PCH files,
/// instruct the AST writer to create
@@ -86,9 +84,6 @@ public:
/// The output file, if any.
std::string OutputFile;
- /// If given, the name for a C++ class to view the inheritance of.
- std::string ViewClassInheritance;
-
/// If given, the new suffix for fix-it rewritten files.
std::string FixItSuffix;
@@ -101,9 +96,15 @@ public:
/// The name of the action to run when using a plugin action.
std::string ActionName;
- /// Arg to pass to the plugin
+ /// Args to pass to the plugin
std::vector<std::string> PluginArgs;
+ /// The list of plugin actions to run in addition to the normal action.
+ std::vector<std::string> AddPluginActions;
+
+ /// Args to pass to the additional plugins
+ std::vector<std::vector<std::string> > AddPluginArgs;
+
/// The list of plugins to load.
std::vector<std::string> Plugins;
@@ -119,7 +120,6 @@ public:
public:
FrontendOptions() {
- DebugCodeCompletionPrinter = 1;
DisableFree = 0;
ProgramAction = frontend::ParseSyntaxOnly;
ActionName = "";
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h
index 588d32b..cbb4a57 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h
@@ -54,6 +54,9 @@ public:
/// User specified include entries.
std::vector<Entry> UserEntries;
+ /// If non-empty, the list of C++ standard include paths to use.
+ std::vector<std::string> CXXSystemIncludes;
+
/// A (system-path) delimited list of include paths to be added from the
/// environment following the user specified includes (but prior to builtin
/// and standard includes). This is parsed in the same manner as the CPATH
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
index 52aa463..d4046b3 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
@@ -80,4 +80,9 @@ LANGSTANDARD(opencl, "cl",
"OpenCL 1.0",
BCPLComment | C99 | Digraphs | HexFloat)
+// CUDA
+LANGSTANDARD(cuda, "cuda",
+ "NVIDIA CUDA(tm)",
+ BCPLComment | CPlusPlus | Digraphs)
+
#undef LANGSTANDARD
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
new file mode 100644
index 0000000..560178b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
@@ -0,0 +1,54 @@
+//===-- MultiplexConsumer.h - AST Consumer for PCH Generation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MultiplexConsumer class, which can be used to
+// multiplex ASTConsumer and SemaConsumer messages to many consumers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaConsumer.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <vector>
+
+namespace clang {
+
+class MultiplexASTMutationListener;
+class MultiplexASTDeserializationListener;
+
+// Has a list of ASTConsumers and calls each of them. Owns its children.
+class MultiplexConsumer : public SemaConsumer {
+public:
+ // Takes ownership of the pointers in C.
+ MultiplexConsumer(const std::vector<ASTConsumer*>& C);
+ ~MultiplexConsumer();
+
+ // ASTConsumer
+ virtual void Initialize(ASTContext &Context);
+ virtual void HandleTopLevelDecl(DeclGroupRef D);
+ virtual void HandleInterestingDecl(DeclGroupRef D);
+ virtual void HandleTranslationUnit(ASTContext &Ctx);
+ virtual void HandleTagDeclDefinition(TagDecl *D);
+ virtual void CompleteTentativeDefinition(VarDecl *D);
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired);
+ virtual ASTMutationListener *GetASTMutationListener();
+ virtual ASTDeserializationListener *GetASTDeserializationListener();
+ virtual void PrintStats();
+
+ // SemaConsumer
+ virtual void InitializeSema(Sema &S);
+ virtual void ForgetSema();
+
+ static bool classof(const MultiplexConsumer *) { return true; }
+private:
+ std::vector<ASTConsumer*> Consumers; // Owns these.
+ llvm::OwningPtr<MultiplexASTMutationListener> MutationListener;
+ llvm::OwningPtr<MultiplexASTDeserializationListener> DeserializationListener;
+};
+
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h
index 851c1f0..0d52e53 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h
@@ -15,6 +15,7 @@
#include <string>
#include <utility>
#include <vector>
+#include <set>
namespace llvm {
class MemoryBuffer;
@@ -46,7 +47,18 @@ public:
/// \brief When true, disables most of the normal validation performed on
/// precompiled headers.
bool DisablePCHValidation;
-
+
+ /// \brief When true, disables the use of the stat cache within a
+ /// precompiled header or AST file.
+ bool DisableStatCache;
+
+ /// \brief Dump declarations that are deserialized from PCH, for testing.
+ bool DumpDeserializedPCHDecls;
+
+ /// \brief This is a set of names for decls that we do not want to be
+ /// deserialized, and we emit an error if they are; for testing purposes.
+ std::set<std::string> DeserializedPCHDeclsToErrorOn;
+
/// \brief If non-zero, the implicit PCH include is actually a precompiled
/// preamble that covers this number of bytes in the main source file.
///
@@ -117,7 +129,8 @@ public:
public:
PreprocessorOptions() : UsePredefines(true), DetailedRecord(false),
- DisablePCHValidation(false),
+ DisablePCHValidation(false), DisableStatCache(false),
+ DumpDeserializedPCHDecls(false),
PrecompiledPreambleBytes(0, true),
RetainRemappedFileBuffers(false) { }
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h
index 82517c5..1eda0d4 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h
@@ -18,7 +18,6 @@ class PreprocessorOutputOptions {
public:
unsigned ShowCPP : 1; ///< Print normal preprocessed output.
unsigned ShowComments : 1; ///< Show comments.
- unsigned ShowHeaderIncludes : 1; ///< Show header inclusions (-H).
unsigned ShowLineMarkers : 1; ///< Show #line markers.
unsigned ShowMacroComments : 1; ///< Show comments, even in macros.
unsigned ShowMacros : 1; ///< Print macro definitions.
@@ -27,7 +26,6 @@ public:
PreprocessorOutputOptions() {
ShowCPP = 1;
ShowComments = 0;
- ShowHeaderIncludes = 0;
ShowLineMarkers = 1;
ShowMacroComments = 0;
ShowMacros = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/StmtXML.def b/contrib/llvm/tools/clang/include/clang/Frontend/StmtXML.def
index c03a5a8..8a859e6 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/StmtXML.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/StmtXML.def
@@ -415,13 +415,6 @@ NODE_XML(StmtExpr, "StmtExpr") // StmtExpr contains a s
SUB_NODE_XML(CompoundStmt)
END_NODE_XML
-NODE_XML(TypesCompatibleExpr, "TypesCompatibleExpr") // GNU builtin-in function __builtin_types_compatible_p
- ATTRIBUTE_FILE_LOCATION_XML
- TYPE_ATTRIBUTE_XML(getType())
- ATTRIBUTE_XML(getArgType1(), "type1_ref") // id of type1
- ATTRIBUTE_XML(getArgType2(), "type2_ref") // id of type2
-END_NODE_XML
-
NODE_XML(ChooseExpr, "ChooseExpr") // GNU builtin-in function __builtin_choose_expr(expr1, expr2, expr3)
ATTRIBUTE_FILE_LOCATION_XML
TYPE_ATTRIBUTE_XML(getType())
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def b/contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def
index 1536c92..b78e70f 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def
@@ -98,7 +98,8 @@ NODE_XML(BuiltinType, "FundamentalType")
ENUM_XML(BuiltinType::Float, "float");
ENUM_XML(BuiltinType::Double, "double");
ENUM_XML(BuiltinType::LongDouble, "long double");
- ENUM_XML(BuiltinType::WChar, "wchar_t");
+ ENUM_XML(BuiltinType::WChar_U, "wchar_t");
+ ENUM_XML(BuiltinType::WChar_S, "wchar_t");
ENUM_XML(BuiltinType::Char16, "char16_t");
ENUM_XML(BuiltinType::Char32, "char32_t");
ENUM_XML(BuiltinType::NullPtr, "nullptr_t"); // This is the type of C++0x 'nullptr'.
@@ -130,6 +131,13 @@ NODE_XML(FunctionProtoType, "FunctionType")
ID_ATTRIBUTE_XML
ATTRIBUTE_XML(getResultType(), "result_type")
ATTRIBUTE_OPT_XML(isVariadic(), "variadic")
+ ATTRIBUTE_ENUM_XML(getCallConv(), "call_conv")
+ ENUM_XML(CC_Default, "")
+ ENUM_XML(CC_C, "C")
+ ENUM_XML(CC_X86StdCall, "X86StdCall")
+ ENUM_XML(CC_X86FastCall, "X86FastCall")
+ ENUM_XML(CC_X86ThisCall, "X86ThisCall")
+ END_ENUM_XML
END_NODE_XML
NODE_XML(TypedefType, "Typedef")
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h b/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
index fe722db..485161b 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
@@ -73,6 +73,18 @@ bool CheckDiagnostics(Preprocessor &PP);
void AttachDependencyFileGen(Preprocessor &PP,
const DependencyOutputOptions &Opts);
+/// AttachHeaderIncludeGen - Create a header include list generator, and attach
+/// it to the given preprocessor.
+///
+/// \param ShowAllHeaders - If true, show all header information instead of just
+/// headers following the predefines buffer. This is useful for making sure
+/// includes mentioned on the command line are also reported, but differs from
+/// the default behavior used by -H.
+/// \param OutputPath - If non-empty, a path to write the header include
+/// information to, instead of writing to stderr.
+void AttachHeaderIncludeGen(Preprocessor &PP, bool ShowAllHeaders = false,
+ llvm::StringRef OutputPath = "");
+
/// CacheTokens - Cache tokens for use with PCH. Note that this requires
/// a seekable stream.
void CacheTokens(Preprocessor &PP, llvm::raw_fd_ostream* OS);
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticsClient.h b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticsClient.h
index 6f45e49..793cedd 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticsClient.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticsClient.h
@@ -68,7 +68,6 @@ public:
llvm::OwningPtr<DiagnosticClient> PrimaryClient;
llvm::OwningPtr<TextDiagnosticBuffer> Buffer;
Preprocessor *CurrentPreprocessor;
- unsigned NumErrors;
private:
void CheckDiagnostics();
@@ -88,9 +87,6 @@ public:
virtual void HandleDiagnostic(Diagnostic::Level DiagLevel,
const DiagnosticInfo &Info);
-
- /// HadErrors - Check if there were any mismatches in expected diagnostics.
- bool HadErrors();
};
} // end namspace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h b/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
index 791d3fe..dbf7389 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
@@ -27,6 +27,9 @@ public:
/// \brief Read the set of macros defined by this external macro source.
virtual void ReadDefinedMacros() = 0;
+
+ /// \brief Read the definition for the given macro.
+ virtual void LoadMacroDefinition(IdentifierInfo *II) = 0;
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
index 9837e29..8a5c83e 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
@@ -43,7 +43,7 @@ public:
/// HeaderMap::Create - This attempts to load the specified file as a header
/// map. If it doesn't look like a HeaderMap, it gives up and returns null.
- static const HeaderMap *Create(const FileEntry *FE);
+ static const HeaderMap *Create(const FileEntry *FE, FileManager &FM);
/// LookupFile - Check to see if the specified relative filename is located in
/// this HeaderMap. If so, open it and return its FileEntry.
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
index 80b38de..30bd4f5 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
@@ -29,7 +29,7 @@ class IdentifierInfo;
/// file that is #included.
struct HeaderFileInfo {
/// isImport - True if this is a #import'd or #pragma once file.
- bool isImport : 1;
+ unsigned isImport : 1;
/// DirInfo - Keep track of whether this is a system header, and if so,
/// whether it is C++ clean or not. This can be set by the include paths or
@@ -37,10 +37,24 @@ struct HeaderFileInfo {
/// SrcMgr::CharacteristicKind.
unsigned DirInfo : 2;
+ /// \brief Whether this header file info was supplied by an external source.
+ unsigned External : 1;
+
+ /// \brief Whether this structure is considered to already have been
+ /// "resolved", meaning that it was loaded from the external source.
+ unsigned Resolved : 1;
+
/// NumIncludes - This is the number of times the file has been included
/// already.
unsigned short NumIncludes;
+ /// \brief The ID number of the controlling macro.
+ ///
+ /// This ID number will be non-zero when there is a controlling
+ /// macro whose IdentifierInfo may not yet have been loaded from
+ /// external storage.
+ unsigned ControllingMacroID;
+
/// ControllingMacro - If this file has a #ifndef XXX (or equivalent) guard
/// that protects the entire contents of the file, this is the identifier
/// for the macro that controls whether or not it has any effect.
@@ -51,27 +65,40 @@ struct HeaderFileInfo {
/// external storage.
const IdentifierInfo *ControllingMacro;
- /// \brief The ID number of the controlling macro.
- ///
- /// This ID number will be non-zero when there is a controlling
- /// macro whose IdentifierInfo may not yet have been loaded from
- /// external storage.
- unsigned ControllingMacroID;
-
HeaderFileInfo()
- : isImport(false), DirInfo(SrcMgr::C_User),
- NumIncludes(0), ControllingMacro(0), ControllingMacroID(0) {}
+ : isImport(false), DirInfo(SrcMgr::C_User), External(false),
+ Resolved(false), NumIncludes(0), ControllingMacroID(0),
+ ControllingMacro(0) {}
/// \brief Retrieve the controlling macro for this header file, if
/// any.
const IdentifierInfo *getControllingMacro(ExternalIdentifierLookup *External);
+
+ /// \brief Determine whether this is a non-default header file info, e.g.,
+ /// it corresponds to an actual header we've included or tried to include.
+ bool isNonDefault() const {
+ return isImport || NumIncludes || ControllingMacro || ControllingMacroID;
+ }
};
+/// \brief An external source of header file information, which may supply
+/// information about header files already included.
+class ExternalHeaderFileInfoSource {
+public:
+ virtual ~ExternalHeaderFileInfoSource();
+
+ /// \brief Retrieve the header file information for the given file entry.
+ ///
+ /// \returns Header file information for the given file entry, with the
+ /// \c External bit set. If the file entry is not known, return a
+ /// default-constructed \c HeaderFileInfo.
+ virtual HeaderFileInfo GetHeaderFileInfo(const FileEntry *FE) = 0;
+};
+
/// HeaderSearch - This class encapsulates the information needed to find the
/// file referenced by a #include or #include_next, (sub-)framework lookup, etc.
class HeaderSearch {
FileManager &FileMgr;
-
/// #include search path information. Requests for #include "x" search the
/// directory of the #including file first, then each directory in SearchDirs
/// consequtively. Requests for <x> search the current dir first, then each
@@ -108,6 +135,9 @@ class HeaderSearch {
/// macros into IdentifierInfo pointers, as needed.
ExternalIdentifierLookup *ExternalLookup;
+ /// \brief Entity used to look up stored header file information.
+ ExternalHeaderFileInfoSource *ExternalSource;
+
// Various statistics we track for performance analysis.
unsigned NumIncluded;
unsigned NumMultiIncludeFileOptzn;
@@ -142,6 +172,15 @@ public:
ExternalLookup = EIL;
}
+ ExternalIdentifierLookup *getExternalLookup() const {
+ return ExternalLookup;
+ }
+
+ /// \brief Set the external source of header information.
+ void SetExternalSource(ExternalHeaderFileInfoSource *ES) {
+ ExternalSource = ES;
+ }
+
/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
/// return null on failure. isAngled indicates whether the file reference is
/// a <> reference. If successful, this returns 'UsedDir', the
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h
index 2d941e4..5fcb8eb 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h
@@ -15,7 +15,7 @@
namespace clang {
namespace diag {
enum {
-#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,CATEGORY) ENUM,
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,ACCESS,CATEGORY) ENUM,
#define LEXSTART
#include "clang/Basic/DiagnosticLexKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
index 9e0fb7e..fc9a8de 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
@@ -211,6 +211,32 @@ public:
/// and " characters. This does not add surrounding ""'s to the string.
static void Stringify(llvm::SmallVectorImpl<char> &Str);
+
+ /// getSpelling - This method is used to get the spelling of a token into a
+ /// preallocated buffer, instead of as an std::string. The caller is required
+ /// to allocate enough space for the token, which is guaranteed to be at least
+ /// Tok.getLength() bytes long. The length of the actual result is returned.
+ ///
+ /// Note that this method may do two possible things: it may either fill in
+ /// the buffer specified with characters, or it may *change the input pointer*
+ /// to point to a constant buffer with the data already in it (avoiding a
+ /// copy). The caller is not allowed to modify the returned buffer pointer
+ /// if an internal buffer is returned.
+ static unsigned getSpelling(const Token &Tok, const char *&Buffer,
+ const SourceManager &SourceMgr,
+ const LangOptions &Features,
+ bool *Invalid = 0);
+
+ /// getSpelling() - Return the 'spelling' of the Tok token. The spelling of a
+ /// token is the characters used to represent the token in the source file
+ /// after trigraph expansion and escaped-newline folding. In particular, this
+ /// wants to get the true, uncanonicalized, spelling of things like digraphs
+ /// UCNs, etc.
+ static std::string getSpelling(const Token &Tok,
+ const SourceManager &SourceMgr,
+ const LangOptions &Features,
+ bool *Invalid = 0);
+
/// MeasureTokenLength - Relex the token at the specified location and return
/// its length in bytes in the input file. If the token needs cleaning (e.g.
/// includes a trigraph or an escaped newline) then this count includes bytes
@@ -228,6 +254,33 @@ public:
const SourceManager &SM,
const LangOptions &LangOpts);
+ /// AdvanceToTokenCharacter - If the current SourceLocation specifies a
+ /// location at the start of a token, return a new location that specifies a
+ /// character within the token. This handles trigraphs and escaped newlines.
+ static SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart,
+ unsigned Character,
+ const SourceManager &SM,
+ const LangOptions &Features);
+
+ /// \brief Computes the source location just past the end of the
+ /// token at this source location.
+ ///
+ /// This routine can be used to produce a source location that
+ /// points just past the end of the token referenced by \p Loc, and
+ /// is generally used when a diagnostic needs to point just after a
+ /// token where it expected something different that it received. If
+ /// the returned source location would not be meaningful (e.g., if
+ /// it points into a macro), this routine returns an invalid
+ /// source location.
+ ///
+ /// \param Offset an offset from the end of the token, where the source
+ /// location should refer to. The default offset (0) produces a source
+ /// location pointing just past the end of the token; an offset of 1 produces
+ /// a source location pointing to the last character in the token, etc.
+ static SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
+ const SourceManager &SM,
+ const LangOptions &Features);
+
/// \brief Compute the preamble of the given file.
///
/// The preamble of a file contains the initial comments, include directives,
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
index ba46fb1..bf2c06b 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
@@ -15,10 +15,11 @@
#ifndef CLANG_LITERALSUPPORT_H
#define CLANG_LITERALSUPPORT_H
-#include <string>
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
+#include <cctype>
+#include <string>
namespace clang {
@@ -27,6 +28,8 @@ class Preprocessor;
class Token;
class SourceLocation;
class TargetInfo;
+class SourceManager;
+class LangOptions;
/// NumericLiteralParser - This performs strict semantic analysis of the content
/// of a ppnumber, classifying it as either integer, floating, or erroneous,
@@ -138,8 +141,11 @@ public:
/// wide string analysis and Translation Phase #6 (concatenation of string
/// literals) (C99 5.1.1.2p1).
class StringLiteralParser {
- Preprocessor &PP;
-
+ const SourceManager &SM;
+ const LangOptions &Features;
+ const TargetInfo &Target;
+ Diagnostic *Diags;
+
unsigned MaxTokenLength;
unsigned SizeBound;
unsigned wchar_tByteWidth;
@@ -148,6 +154,14 @@ class StringLiteralParser {
public:
StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
Preprocessor &PP, bool Complain = true);
+ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
+ const SourceManager &sm, const LangOptions &features,
+ const TargetInfo &target, Diagnostic *diags = 0)
+ : SM(sm), Features(features), Target(target), Diags(diags) {
+ init(StringToks, NumStringToks);
+ }
+
+
bool hadError;
bool AnyWide;
bool Pascal;
@@ -163,8 +177,13 @@ public:
/// getOffsetOfStringByte - This function returns the offset of the
/// specified byte of the string data represented by Token. This handles
/// advancing over escape sequences in the string.
- static unsigned getOffsetOfStringByte(const Token &TheTok, unsigned ByteNo,
- Preprocessor &PP, bool Complain = true);
+ ///
+ /// If the Diagnostics pointer is non-null, then this will do semantic
+ /// checking of the string literal and emit errors and warnings.
+ unsigned getOffsetOfStringByte(const Token &TheTok, unsigned ByteNo) const;
+
+private:
+ void init(const Token *StringToks, unsigned NumStringToks);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
index 90f95b7..717c300 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
@@ -82,6 +82,9 @@ private:
/// AllowRedefinitionsWithoutWarning - True if this macro can be redefined
/// without emitting a warning.
bool IsAllowRedefinitionsWithoutWarning : 1;
+
+ /// \brief Must warn if the macro is unused at the end of translation unit.
+ bool IsWarnIfUnused : 1;
~MacroInfo() {
assert(ArgumentList == 0 && "Didn't call destroy before dtor!");
@@ -138,6 +141,11 @@ public:
IsAllowRedefinitionsWithoutWarning = Val;
}
+ /// \brief Set the value of the IsWarnIfUnused flag.
+ void setIsWarnIfUnused(bool val) {
+ IsWarnIfUnused = val;
+ }
+
/// setArgumentList - Set the specified list of identifiers as the argument
/// list for this macro.
void setArgumentList(IdentifierInfo* const *List, unsigned NumArgs,
@@ -201,6 +209,11 @@ public:
return IsAllowRedefinitionsWithoutWarning;
}
+ /// \brief Return true if we should emit a warning if the macro is unused.
+ bool isWarnIfUnused() const {
+ return IsWarnIfUnused;
+ }
+
/// getNumTokens - Return the number of tokens that this macro expands to.
///
unsigned getNumTokens() const {
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
index 782f2d5..b2a80a6 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
@@ -54,12 +54,43 @@ public:
SrcMgr::CharacteristicKind FileType) {
}
+ /// \brief This callback is invoked whenever an inclusion directive of
+ /// any kind (\c #include, \c #import, etc.) has been processed, regardless
+ /// of whether the inclusion will actually result in an inclusion.
+ ///
+ /// \param HashLoc The location of the '#' that starts the inclusion
+ /// directive.
+ ///
+ /// \param IncludeTok The token that indicates the kind of inclusion
+ /// directive, e.g., 'include' or 'import'.
+ ///
+ /// \param FileName The name of the file being included, as written in the
+ /// source code.
+ ///
+ /// \param IsAngled Whether the file name was enclosed in angle brackets;
+ /// otherwise, it was enclosed in quotes.
+ ///
+ /// \param File The actual file that may be included by this inclusion
+ /// directive.
+ ///
+ /// \param EndLoc The location of the last token within the inclusion
+ /// directive.
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ llvm::StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc) {
+ }
+
/// EndOfMainFile - This callback is invoked when the end of the main file is
/// reach, no subsequent callbacks will be made.
virtual void EndOfMainFile() {
}
/// Ident - This callback is invoked when a #ident or #sccs directive is read.
+ /// \param Loc The location of the directive.
+ /// \param str The text of the directive.
///
virtual void Ident(SourceLocation Loc, const std::string &str) {
}
@@ -73,6 +104,8 @@ public:
/// PragmaMessage - This callback is invoked when a #pragma message directive
/// is read.
+ /// \param Loc The location of the message directive.
+ /// \param str The text of the message directive.
///
virtual void PragmaMessage(SourceLocation Loc, llvm::StringRef Str) {
}
@@ -80,17 +113,48 @@ public:
/// MacroExpands - This is called by
/// Preprocessor::HandleMacroExpandedIdentifier when a macro invocation is
/// found.
- virtual void MacroExpands(const Token &Id, const MacroInfo* MI) {
+ virtual void MacroExpands(const Token &MacroNameTok, const MacroInfo* MI) {
}
/// MacroDefined - This hook is called whenever a macro definition is seen.
- virtual void MacroDefined(const IdentifierInfo *II, const MacroInfo *MI) {
+ virtual void MacroDefined(const Token &MacroNameTok, const MacroInfo *MI) {
}
/// MacroUndefined - This hook is called whenever a macro #undef is seen.
/// MI is released immediately following this callback.
- virtual void MacroUndefined(SourceLocation Loc, const IdentifierInfo *II,
- const MacroInfo *MI) {
+ virtual void MacroUndefined(const Token &MacroNameTok, const MacroInfo *MI) {
+ }
+
+ /// If -- This hook is called whenever an #if is seen.
+ /// \param Range The SourceRange of the expression being tested.
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ virtual void If(SourceRange Range) {
+ }
+
+ /// Elif -- This hook is called whenever an #elif is seen.
+ /// \param Range The SourceRange of the expression being tested.
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ virtual void Elif(SourceRange Range) {
+ }
+
+ /// Ifdef -- This hook is called whenever an #ifdef is seen.
+ /// \param Loc The location of the token being tested.
+ /// \param II Information on the token being tested.
+ virtual void Ifdef(const Token &MacroNameTok) {
+ }
+
+ /// Ifndef -- This hook is called whenever an #ifndef is seen.
+ /// \param Loc The location of the token being tested.
+ /// \param II Information on the token being tested.
+ virtual void Ifndef(const Token &MacroNameTok) {
+ }
+
+ /// Else -- This hook is called whenever an #else is seen.
+ virtual void Else() {
+ }
+
+ /// Endif -- This hook is called whenever an #endif is seen.
+ virtual void Endif() {
}
};
@@ -119,6 +183,18 @@ public:
Second->FileSkipped(ParentFile, FilenameTok, FileType);
}
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ llvm::StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc) {
+ First->InclusionDirective(HashLoc, IncludeTok, FileName, IsAngled, File,
+ EndLoc);
+ Second->InclusionDirective(HashLoc, IncludeTok, FileName, IsAngled, File,
+ EndLoc);
+ }
+
virtual void EndOfMainFile() {
First->EndOfMainFile();
Second->EndOfMainFile();
@@ -140,20 +216,55 @@ public:
Second->PragmaMessage(Loc, Str);
}
- virtual void MacroExpands(const Token &Id, const MacroInfo* MI) {
- First->MacroExpands(Id, MI);
- Second->MacroExpands(Id, MI);
+ virtual void MacroExpands(const Token &MacroNameTok, const MacroInfo* MI) {
+ First->MacroExpands(MacroNameTok, MI);
+ Second->MacroExpands(MacroNameTok, MI);
+ }
+
+ virtual void MacroDefined(const Token &MacroNameTok, const MacroInfo *MI) {
+ First->MacroDefined(MacroNameTok, MI);
+ Second->MacroDefined(MacroNameTok, MI);
+ }
+
+ virtual void MacroUndefined(const Token &MacroNameTok, const MacroInfo *MI) {
+ First->MacroUndefined(MacroNameTok, MI);
+ Second->MacroUndefined(MacroNameTok, MI);
+ }
+
+ /// If -- This hook is called whenever an #if is seen.
+ virtual void If(SourceRange Range) {
+ First->If(Range);
+ Second->If(Range);
+ }
+
+ /// Elif -- This hook is called whenever an #if is seen.
+ virtual void Elif(SourceRange Range) {
+ First->Elif(Range);
+ Second->Elif(Range);
+ }
+
+ /// Ifdef -- This hook is called whenever an #ifdef is seen.
+ virtual void Ifdef(const Token &MacroNameTok) {
+ First->Ifdef(MacroNameTok);
+ Second->Ifdef(MacroNameTok);
+ }
+
+ /// Ifndef -- This hook is called whenever an #ifndef is seen.
+ virtual void Ifndef(const Token &MacroNameTok) {
+ First->Ifndef(MacroNameTok);
+ Second->Ifndef(MacroNameTok);
}
- virtual void MacroDefined(const IdentifierInfo *II, const MacroInfo *MI) {
- First->MacroDefined(II, MI);
- Second->MacroDefined(II, MI);
+ /// Else -- This hook is called whenever an #else is seen.
+ virtual void Else() {
+ First->Else();
+ Second->Else();
}
- virtual void MacroUndefined(SourceLocation Loc, const IdentifierInfo *II,
- const MacroInfo *MI) {
- First->MacroUndefined(Loc, II, MI);
- Second->MacroUndefined(Loc, II, MI);
+ /// Endif -- This hook is called whenever an #endif is seen.
+ virtual void Endif() {
+ First->Endif();
+ Second->Endif();
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h b/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
index 5e8a4f1..094b7ef 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
@@ -31,7 +31,7 @@ namespace clang {
class FileEntry;
class PTHLexer;
class Diagnostic;
-class StatSysCallCache;
+class FileSystemStatCache;
class PTHManager : public IdentifierInfoLookup {
friend class PTHLexer;
@@ -128,11 +128,11 @@ public:
/// It is the responsibility of the caller to 'delete' the returned object.
PTHLexer *CreateLexer(FileID FID);
- /// createStatCache - Returns a StatSysCallCache object for use with
+ /// createStatCache - Returns a FileSystemStatCache object for use with
/// FileManager objects. These objects use the PTH data to speed up
/// calls to stat by memoizing their results from when the PTH file
/// was generated.
- StatSysCallCache *createStatCache();
+ FileSystemStatCache *createStatCache();
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h b/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
index c68555b..8bd2236 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
@@ -25,6 +25,28 @@ namespace clang {
class IdentifierInfo;
class PragmaNamespace;
+ /**
+ * \brief Describes how the pragma was introduced, e.g., with #pragma,
+ * _Pragma, or __pragma.
+ */
+ enum PragmaIntroducerKind {
+ /**
+ * \brief The pragma was introduced via #pragma.
+ */
+ PIK_HashPragma,
+
+ /**
+ * \brief The pragma was introduced via the C99 _Pragma(string-literal).
+ */
+ PIK__Pragma,
+
+ /**
+ * \brief The pragma was introduced via the Microsoft
+ * __pragma(token-string).
+ */
+ PIK___pragma
+ };
+
/// PragmaHandler - Instances of this interface defined to handle the various
/// pragmas that the language front-end uses. Each handler optionally has a
/// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with
@@ -42,7 +64,8 @@ public:
virtual ~PragmaHandler();
llvm::StringRef getName() const { return Name; }
- virtual void HandlePragma(Preprocessor &PP, Token &FirstToken) = 0;
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) = 0;
/// getIfNamespace - If this is a namespace, return it. This is equivalent to
/// using a dynamic_cast, but doesn't require RTTI.
@@ -55,7 +78,8 @@ class EmptyPragmaHandler : public PragmaHandler {
public:
EmptyPragmaHandler();
- virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
};
/// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas,
@@ -90,7 +114,8 @@ public:
return Handlers.empty();
}
- virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
virtual PragmaNamespace *getIfNamespace() { return this; }
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
index 730f04f..afd7ae1 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
@@ -35,6 +35,7 @@ void operator delete(void* ptr, clang::PreprocessingRecord& PR,
namespace clang {
class MacroDefinition;
+ class FileEntry;
/// \brief Base class that describes a preprocessed entity, which may be a
/// preprocessor directive or macro instantiation.
@@ -54,8 +55,12 @@ namespace clang {
/// \brief A macro definition.
MacroDefinitionKind,
+ /// \brief An inclusion directive, such as \c #include, \c
+ /// #import, or \c #include_next.
+ InclusionDirectiveKind,
+
FirstPreprocessingDirective = PreprocessingDirectiveKind,
- LastPreprocessingDirective = MacroDefinitionKind
+ LastPreprocessingDirective = InclusionDirectiveKind
};
private:
@@ -173,6 +178,66 @@ namespace clang {
}
static bool classof(const MacroDefinition *) { return true; }
};
+
+ /// \brief Record the location of an inclusion directive, such as an
+ /// \c #include or \c #import statement.
+ class InclusionDirective : public PreprocessingDirective {
+ public:
+ /// \brief The kind of inclusion directives known to the
+ /// preprocessor.
+ enum InclusionKind {
+ /// \brief An \c #include directive.
+ Include,
+ /// \brief An Objective-C \c #import directive.
+ Import,
+ /// \brief A GNU \c #include_next directive.
+ IncludeNext,
+ /// \brief A Clang \c #__include_macros directive.
+ IncludeMacros
+ };
+
+ private:
+ /// \brief The name of the file that was included, as written in
+ /// the source.
+ llvm::StringRef FileName;
+
+ /// \brief Whether the file name was in quotation marks; otherwise, it was
+ /// in angle brackets.
+ unsigned InQuotes : 1;
+
+ /// \brief The kind of inclusion directive we have.
+ ///
+ /// This is a value of type InclusionKind.
+ unsigned Kind : 2;
+
+ /// \brief The file that was included.
+ const FileEntry *File;
+
+ public:
+ InclusionDirective(PreprocessingRecord &PPRec,
+ InclusionKind Kind, llvm::StringRef FileName,
+ bool InQuotes, const FileEntry *File, SourceRange Range);
+
+ /// \brief Determine what kind of inclusion directive this is.
+ InclusionKind getKind() const { return static_cast<InclusionKind>(Kind); }
+
+ /// \brief Retrieve the included file name as it was written in the source.
+ llvm::StringRef getFileName() const { return FileName; }
+
+ /// \brief Determine whether the included file name was written in quotes;
+ /// otherwise, it was written in angle brackets.
+ bool wasInQuotes() const { return InQuotes; }
+
+ /// \brief Retrieve the file entry for the actual file that was included
+ /// by this directive.
+ const FileEntry *getFile() const { return File; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const PreprocessedEntity *PE) {
+ return PE->getKind() == InclusionDirectiveKind;
+ }
+ static bool classof(const InclusionDirective *) { return true; }
+ };
/// \brief An abstract class that should be subclassed by any external source
/// of preprocessing record entries.
@@ -183,6 +248,10 @@ namespace clang {
/// \brief Read any preallocated preprocessed entities from the external
/// source.
virtual void ReadPreprocessedEntities() = 0;
+
+ /// \brief Read the preprocessed entity at the given offset.
+ virtual PreprocessedEntity *
+ ReadPreprocessedEntityAtOffset(uint64_t Offset) = 0;
};
/// \brief A record of the steps taken while preprocessing a source file,
@@ -229,13 +298,22 @@ namespace clang {
iterator end(bool OnlyLocalEntities = false);
const_iterator begin(bool OnlyLocalEntities = false) const;
const_iterator end(bool OnlyLocalEntities = false) const;
-
+
/// \brief Add a new preprocessed entity to this record.
void addPreprocessedEntity(PreprocessedEntity *Entity);
/// \brief Set the external source for preprocessed entities.
void SetExternalSource(ExternalPreprocessingRecordSource &Source,
unsigned NumPreallocatedEntities);
+
+ /// \brief Retrieve the external source for preprocessed entities.
+ ExternalPreprocessingRecordSource *getExternalSource() const {
+ return ExternalSource;
+ }
+
+ unsigned getNumPreallocatedEntities() const {
+ return NumPreallocatedEntities;
+ }
/// \brief Set the preallocated entry at the given index to the given
/// preprocessed entity.
@@ -256,9 +334,14 @@ namespace clang {
MacroDefinition *findMacroDefinition(const MacroInfo *MI);
virtual void MacroExpands(const Token &Id, const MacroInfo* MI);
- virtual void MacroDefined(const IdentifierInfo *II, const MacroInfo *MI);
- virtual void MacroUndefined(SourceLocation Loc, const IdentifierInfo *II,
- const MacroInfo *MI);
+ virtual void MacroDefined(const Token &Id, const MacroInfo *MI);
+ virtual void MacroUndefined(const Token &Id, const MacroInfo *MI);
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ llvm::StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
index 6b9b89e..018f7e9 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_LEX_PREPROCESSOR_H
#define LLVM_CLANG_LEX_PREPROCESSOR_H
+#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/PPCallbacks.h"
@@ -24,6 +25,7 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
@@ -82,6 +84,7 @@ class Preprocessor {
IdentifierInfo *Ident__VA_ARGS__; // __VA_ARGS__
IdentifierInfo *Ident__has_feature; // __has_feature
IdentifierInfo *Ident__has_builtin; // __has_builtin
+ IdentifierInfo *Ident__has_attribute; // __has_attribute
IdentifierInfo *Ident__has_include; // __has_include
IdentifierInfo *Ident__has_include_next; // __has_include_next
@@ -194,10 +197,15 @@ class Preprocessor {
/// to the actual definition of the macro.
llvm::DenseMap<IdentifierInfo*, MacroInfo*> Macros;
- /// MICache - A "freelist" of MacroInfo objects that can be reused for quick
- /// allocation.
- /// FIXME: why not use a singly linked list?
- std::vector<MacroInfo*> MICache;
+ /// \brief Macros that we want to warn because they are not used at the end
+ /// of the translation unit; we store just their SourceLocations instead
+ /// something like MacroInfo*. The benefit of this is that when we are
+ /// deserializing from PCH, we don't need to deserialize identifier & macros
+ /// just so that we can report that they are unused, we just warn using
+ /// the SourceLocations of this set (that will be filled by the ASTReader).
+ /// We are using SmallPtrSet instead of a vector for faster removal.
+ typedef llvm::SmallPtrSet<SourceLocation, 32> WarnUnusedMacroLocsTy;
+ WarnUnusedMacroLocsTy WarnUnusedMacroLocs;
/// MacroArgCache - This is a "freelist" of MacroArg objects that can be
/// reused for quick allocation.
@@ -251,6 +259,22 @@ private: // Cached tokens state.
/// invoked (at which point the last position is popped).
std::vector<CachedTokensTy::size_type> BacktrackPositions;
+ struct MacroInfoChain {
+ MacroInfo MI;
+ MacroInfoChain *Next;
+ MacroInfoChain *Prev;
+ };
+
+ /// MacroInfos are managed as a chain for easy disposal. This is the head
+ /// of that list.
+ MacroInfoChain *MIChainHead;
+
+ /// MICache - A "freelist" of MacroInfo objects that can be reused for quick
+ /// allocation.
+ MacroInfoChain *MICache;
+
+ MacroInfo *getInfoForMacro(IdentifierInfo *II) const;
+
public:
Preprocessor(Diagnostic &diags, const LangOptions &opts,
const TargetInfo &target,
@@ -324,7 +348,10 @@ public:
/// getMacroInfo - Given an identifier, return the MacroInfo it is #defined to
/// or null if it isn't #define'd.
MacroInfo *getMacroInfo(IdentifierInfo *II) const {
- return II->hasMacroDefinition() ? Macros.find(II)->second : 0;
+ if (!II->hasMacroDefinition())
+ return 0;
+
+ return getInfoForMacro(II);
}
/// setMacroInfo - Specify a macro for this identifier.
@@ -591,6 +618,9 @@ public:
/// for which we are performing code completion.
bool isCodeCompletionFile(SourceLocation FileLoc) const;
+ /// \brief Determine if we are performing code completion.
+ bool isCodeCompletionEnabled() const { return CodeCompletionFile != 0; }
+
/// \brief Instruct the preprocessor to skip part of the main
/// the main source file.
///
@@ -607,12 +637,11 @@ public:
/// the specified Token's location, translating the token's start
/// position in the current buffer into a SourcePosition object for rendering.
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
- return Diags->Report(FullSourceLoc(Loc, getSourceManager()), DiagID);
+ return Diags->Report(Loc, DiagID);
}
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID) {
- return Diags->Report(FullSourceLoc(Tok.getLocation(), getSourceManager()),
- DiagID);
+ return Diags->Report(Tok.getLocation(), DiagID);
}
/// getSpelling() - Return the 'spelling' of the Tok token. The spelling of a
@@ -622,17 +651,9 @@ public:
/// UCNs, etc.
///
/// \param Invalid If non-NULL, will be set \c true if an error occurs.
- std::string getSpelling(const Token &Tok, bool *Invalid = 0) const;
-
- /// getSpelling() - Return the 'spelling' of the Tok token. The spelling of a
- /// token is the characters used to represent the token in the source file
- /// after trigraph expansion and escaped-newline folding. In particular, this
- /// wants to get the true, uncanonicalized, spelling of things like digraphs
- /// UCNs, etc.
- static std::string getSpelling(const Token &Tok,
- const SourceManager &SourceMgr,
- const LangOptions &Features,
- bool *Invalid = 0);
+ std::string getSpelling(const Token &Tok, bool *Invalid = 0) const {
+ return Lexer::getSpelling(Tok, SourceMgr, Features, Invalid);
+ }
/// getSpelling - This method is used to get the spelling of a token into a
/// preallocated buffer, instead of as an std::string. The caller is required
@@ -645,7 +666,9 @@ public:
/// copy). The caller is not allowed to modify the returned buffer pointer
/// if an internal buffer is returned.
unsigned getSpelling(const Token &Tok, const char *&Buffer,
- bool *Invalid = 0) const;
+ bool *Invalid = 0) const {
+ return Lexer::getSpelling(Tok, Buffer, SourceMgr, Features, Invalid);
+ }
/// getSpelling - This method is used to get the spelling of a token into a
/// SmallVector. Note that the returned StringRef may not point to the
@@ -692,7 +715,9 @@ public:
/// location should refer to. The default offset (0) produces a source
/// location pointing just past the end of the token; an offset of 1 produces
/// a source location pointing to the last character in the token, etc.
- SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
+ SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0) {
+ return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, Features);
+ }
/// DumpToken - Print the token to stderr, used for debugging.
///
@@ -702,7 +727,10 @@ public:
/// AdvanceToTokenCharacter - Given a location that specifies the start of a
/// token, return a new location that specifies a character within the token.
- SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart,unsigned Char);
+ SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart,
+ unsigned Char) const {
+ return Lexer::AdvanceToTokenCharacter(TokStart, Char, SourceMgr, Features);
+ }
/// IncrementPasteCounter - Increment the counters for the number of token
/// paste operations performed. If fast was specified, this is a 'fast paste'
@@ -726,10 +754,10 @@ public:
// Preprocessor callback methods. These are invoked by a lexer as various
// directives and events are found.
- /// LookUpIdentifierInfo - Given a tok::identifier token, look up the
- /// identifier information for the token and install it into the token.
- IdentifierInfo *LookUpIdentifierInfo(Token &Identifier,
- const char *BufPtr = 0) const;
+ /// LookUpIdentifierInfo - Given a tok::raw_identifier token, look up the
+ /// identifier information for the token and install it into the token,
+ /// updating the token kind accordingly.
+ IdentifierInfo *LookUpIdentifierInfo(Token &Identifier) const;
/// HandleIdentifier - This callback is invoked when the lexer reads an
/// identifier and has filled in the tokens IdentifierInfo member. This
@@ -812,7 +840,12 @@ public:
/// This code concatenates and consumes tokens up to the '>' token. It
/// returns false if the > was found, otherwise it returns true if it finds
/// and consumes the EOM marker.
- bool ConcatenateIncludeName(llvm::SmallString<128> &FilenameBuffer);
+ bool ConcatenateIncludeName(llvm::SmallString<128> &FilenameBuffer,
+ SourceLocation &End);
+
+ /// LexOnOffSwitch - Lex an on-off-switch (C99 6.10.6p2) and verify that it is
+ /// followed by EOM. Return true if the token is not a valid on-off-switch.
+ bool LexOnOffSwitch(tok::OnOffSwitch &OOS);
private:
@@ -909,8 +942,8 @@ private:
/// is not enclosed within a string literal.
void HandleMicrosoft__pragma(Token &Tok);
- void Handle_Pragma(const std::string &StrVal, SourceLocation PragmaLoc,
- SourceLocation RParenLoc);
+ void Handle_Pragma(unsigned Introducer, const std::string &StrVal,
+ SourceLocation PragmaLoc, SourceLocation RParenLoc);
/// EnterSourceFileWithLexer - Add a lexer to the top of the include stack and
/// start lexing tokens from it instead of the current buffer.
@@ -961,12 +994,13 @@ private:
void HandleIdentSCCSDirective(Token &Tok);
// File inclusion.
- void HandleIncludeDirective(Token &Tok,
+ void HandleIncludeDirective(SourceLocation HashLoc,
+ Token &Tok,
const DirectoryLookup *LookupFrom = 0,
bool isImport = false);
- void HandleIncludeNextDirective(Token &Tok);
- void HandleIncludeMacrosDirective(Token &Tok);
- void HandleImportDirective(Token &Tok);
+ void HandleIncludeNextDirective(SourceLocation HashLoc, Token &Tok);
+ void HandleIncludeMacrosDirective(SourceLocation HashLoc, Token &Tok);
+ void HandleImportDirective(SourceLocation HashLoc, Token &Tok);
// Macro handling.
void HandleDefineDirective(Token &Tok);
@@ -981,7 +1015,7 @@ private:
void HandleElifDirective(Token &Tok);
// Pragmas.
- void HandlePragmaDirective();
+ void HandlePragmaDirective(unsigned Introducer);
public:
void HandlePragmaOnce(Token &OnceTok);
void HandlePragmaMark();
@@ -997,6 +1031,10 @@ public:
// Return true and store the first token only if any CommentHandler
// has inserted some tokens and getCommentRetentionState() is false.
bool HandleComment(Token &Token, SourceRange Comment);
+
+ /// \brief A macro is used, update information about macros that need unused
+ /// warnings.
+ void markMacroAsUsed(MacroInfo *MI);
};
/// \brief Abstract base class that describes a handler that will receive
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
index 477a213..d833293 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
@@ -155,6 +155,18 @@ public:
/// getFileEntry - Return the FileEntry corresponding to this FileID. Like
/// getFileID(), this only works for lexers with attached preprocessors.
const FileEntry *getFileEntry() const;
+
+ /// \brief Iterator that traverses the current stack of preprocessor
+ /// conditional directives (#if/#ifdef/#ifndef).
+ typedef llvm::SmallVectorImpl<PPConditionalInfo>::const_iterator
+ conditional_iterator;
+
+ conditional_iterator conditional_begin() const {
+ return ConditionalStack.begin();
+ }
+ conditional_iterator conditional_end() const {
+ return ConditionalStack.end();
+ }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Token.h b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
index 954b36e..edcfcc10d 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Token.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
@@ -76,7 +76,8 @@ public:
StartOfLine = 0x01, // At start of line or only after whitespace.
LeadingSpace = 0x02, // Whitespace exists before this token.
DisableExpand = 0x04, // This identifier may never be macro expanded.
- NeedsCleaning = 0x08 // Contained an escaped newline or trigraph.
+ NeedsCleaning = 0x08, // Contained an escaped newline or trigraph.
+ LeadingEmptyMacro = 0x10 // Empty macro exists before this token.
};
tok::TokenKind getKind() const { return (tok::TokenKind)Kind; }
@@ -87,6 +88,12 @@ public:
bool is(tok::TokenKind K) const { return Kind == (unsigned) K; }
bool isNot(tok::TokenKind K) const { return Kind != (unsigned) K; }
+ /// isAnyIdentifier - Return true if this is a raw identifier (when lexing
+ /// in raw mode) or a non-keyword identifier (when lexing in non-raw mode).
+ bool isAnyIdentifier() const {
+ return is(tok::identifier) || is(tok::raw_identifier);
+ }
+
/// isLiteral - Return true if this is a "literal", like a numeric
/// constant, string, etc.
bool isLiteral() const {
@@ -96,9 +103,11 @@ public:
}
bool isAnnotation() const {
- return is(tok::annot_typename) ||
- is(tok::annot_cxxscope) ||
- is(tok::annot_template_id);
+#define ANNOTATION(NAME) \
+ if (is(tok::annot_##NAME)) \
+ return true;
+#include "clang/Basic/TokenKinds.def"
+ return false;
}
/// getLocation - Return a source location identifier for the specified
@@ -153,7 +162,10 @@ public:
}
IdentifierInfo *getIdentifierInfo() const {
- assert(!isAnnotation() && "Used IdentInfo on annotation token!");
+ assert(isNot(tok::raw_identifier) &&
+ "getIdentifierInfo() on a tok::raw_identifier token!");
+ assert(!isAnnotation() &&
+ "getIdentifierInfo() on an annotation token!");
if (isLiteral()) return 0;
return (IdentifierInfo*) PtrData;
}
@@ -161,6 +173,18 @@ public:
PtrData = (void*) II;
}
+ /// getRawIdentifierData - For a raw identifier token (i.e., an identifier
+ /// lexed in raw mode), returns a pointer to the start of it in the text
+ /// buffer if known, null otherwise.
+ const char *getRawIdentifierData() const {
+ assert(is(tok::raw_identifier));
+ return reinterpret_cast<const char*>(PtrData);
+ }
+ void setRawIdentifierData(const char *Ptr) {
+ assert(is(tok::raw_identifier));
+ PtrData = const_cast<char*>(Ptr);
+ }
+
/// getLiteralData - For a literal token (numeric constant, string, etc), this
/// returns a pointer to the start of it in the text buffer if known, null
/// otherwise.
@@ -231,7 +255,13 @@ public:
/// newlines in it.
///
bool needsCleaning() const { return (Flags & NeedsCleaning) ? true : false; }
-
+
+ /// \brief Return true if this token has an empty macro before it.
+ ///
+ bool hasLeadingEmptyMacro() const {
+ return (Flags & LeadingEmptyMacro) ? true : false;
+ }
+
};
/// PPConditionalInfo - Information about the conditional stack (#if directives)
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h
index d7c5eee..f640b37 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h
@@ -15,7 +15,7 @@
namespace clang {
namespace diag {
enum {
-#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,CATEGORY) ENUM,
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,ACCESS,CATEGORY) ENUM,
#define PARSESTART
#include "clang/Basic/DiagnosticParseKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
index 41a2fb6..7587920 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
@@ -25,8 +25,6 @@
#include <list>
namespace clang {
- class AttributeList;
- struct CXX0XAttributeList;
class PragmaHandler;
class Scope;
class DeclGroupRef;
@@ -34,7 +32,8 @@ namespace clang {
class Parser;
class PragmaUnusedHandler;
class ColonProtectionRAIIObject;
-
+ class InMessageExpressionRAIIObject;
+
/// PrettyStackTraceParserEntry - If a crash happens while the parser is active,
/// an entry is printed for it.
class PrettyStackTraceParserEntry : public llvm::PrettyStackTraceEntry {
@@ -75,6 +74,7 @@ namespace prec {
class Parser : public CodeCompletionHandler {
friend class PragmaUnusedHandler;
friend class ColonProtectionRAIIObject;
+ friend class InMessageExpressionRAIIObject;
friend class ParenBraceBracketBalancer;
PrettyStackTraceParserEntry CrashInfo;
@@ -112,12 +112,18 @@ class Parser : public CodeCompletionHandler {
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_pixel;
+ /// C++0x contextual keywords.
+ mutable IdentifierInfo *Ident_final;
+ mutable IdentifierInfo *Ident_override;
+
llvm::OwningPtr<PragmaHandler> AlignHandler;
llvm::OwningPtr<PragmaHandler> GCCVisibilityHandler;
llvm::OwningPtr<PragmaHandler> OptionsHandler;
llvm::OwningPtr<PragmaHandler> PackHandler;
llvm::OwningPtr<PragmaHandler> UnusedHandler;
llvm::OwningPtr<PragmaHandler> WeakHandler;
+ llvm::OwningPtr<PragmaHandler> FPContractHandler;
+ llvm::OwningPtr<PragmaHandler> OpenCLExtensionHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
@@ -131,8 +137,18 @@ class Parser : public CodeCompletionHandler {
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
+ /// \brief When true, we are directly inside an Ojective-C messsage
+ /// send expression.
+ ///
+ /// This is managed by the \c InMessageExpressionRAIIObject class, and
+ /// should not be set directly.
+ bool InMessageExpression;
+
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
+
+ /// Factory object for creating AttributeList objects.
+ AttributeList::Factory AttrFactory;
public:
Parser(Preprocessor &PP, Sema &Actions);
@@ -152,7 +168,7 @@ public:
typedef Stmt StmtTy;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef CXXBaseSpecifier BaseTy;
- typedef CXXBaseOrMemberInitializer MemInitTy;
+ typedef CXXCtorInitializer MemInitTy;
typedef NestedNameSpecifier CXXScopeTy;
typedef TemplateParameterList TemplateParamsTy;
typedef OpaquePtr<TemplateName> TemplateTy;
@@ -227,6 +243,11 @@ private:
Tok.getKind() == tok::wide_string_literal;
}
+ /// \brief Returns true if the current token is a '=' or '==' and
+ /// false otherwise. If it's '==', we assume that it's a typo and we emit
+ /// DiagID and a fixit hint to turn '==' -> '='.
+ bool isTokenEqualOrMistypedEqualEqual(unsigned DiagID);
+
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with all kinds of tokens: strings and specific other
/// tokens must be consumed with custom methods below. This returns the
@@ -330,6 +351,9 @@ private:
/// based on context.
void CodeCompletionRecovery();
+ /// \brief Handle the annotation token produced for #pragma unused(...)
+ void HandlePragmaUnused();
+
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
@@ -464,6 +488,13 @@ private:
const char *DiagMsg = "",
tok::TokenKind SkipToTok = tok::unknown);
+ /// \brief The parser expects a semicolon and, if present, will consume it.
+ ///
+ /// If the next token is not a semicolon, this emits the specified diagnostic,
+ /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
+ /// to the semicolon, consumes that extra token.
+ bool ExpectAndConsumeSemi(unsigned DiagID);
+
//===--------------------------------------------------------------------===//
// Scope manipulation
@@ -531,21 +562,59 @@ private:
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T, bool StopAtSemi = true,
- bool DontConsume = false) {
- return SkipUntil(&T, 1, StopAtSemi, DontConsume);
+ bool DontConsume = false, bool StopAtCodeCompletion = false) {
+ return SkipUntil(&T, 1, StopAtSemi, DontConsume, StopAtCodeCompletion);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, bool StopAtSemi = true,
- bool DontConsume = false) {
+ bool DontConsume = false, bool StopAtCodeCompletion = false) {
tok::TokenKind TokArray[] = {T1, T2};
- return SkipUntil(TokArray, 2, StopAtSemi, DontConsume);
+ return SkipUntil(TokArray, 2, StopAtSemi, DontConsume,StopAtCodeCompletion);
}
bool SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
- bool StopAtSemi = true, bool DontConsume = false);
+ bool StopAtSemi = true, bool DontConsume = false,
+ bool StopAtCodeCompletion = false);
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
- struct LexedMethod {
+ struct ParsingClass;
+
+ /// [class.mem]p1: "... the class is regarded as complete within
+ /// - function bodies
+ /// - default arguments
+ /// - exception-specifications (TODO: C++0x)
+ /// - and brace-or-equal-initializers (TODO: C++0x)
+ /// for non-static data members (including such things in nested classes)."
+ /// LateParsedDeclarations build the tree of those elements so they can
+ /// be parsed after parsing the top-level class.
+ class LateParsedDeclaration {
+ public:
+ virtual ~LateParsedDeclaration();
+
+ virtual void ParseLexedMethodDeclarations();
+ virtual void ParseLexedMethodDefs();
+ };
+
+ /// Inner node of the LateParsedDeclaration tree that parses
+ /// all its members recursively.
+ class LateParsedClass : public LateParsedDeclaration {
+ public:
+ LateParsedClass(Parser *P, ParsingClass *C);
+ virtual ~LateParsedClass();
+
+ virtual void ParseLexedMethodDeclarations();
+ virtual void ParseLexedMethodDefs();
+
+ private:
+ Parser *Self;
+ ParsingClass *Class;
+ };
+
+ /// Contains the lexed tokens of a member function definition
+ /// which needs to be parsed at the end of the class declaration
+ /// after parsing all other member declarations.
+ struct LexedMethod : public LateParsedDeclaration {
+ Parser *Self;
Decl *D;
CachedTokens Toks;
@@ -554,7 +623,10 @@ private:
/// othewise, it is a member function declaration.
bool TemplateScope;
- explicit LexedMethod(Decl *MD) : D(MD), TemplateScope(false) {}
+ explicit LexedMethod(Parser* P, Decl *MD)
+ : Self(P), D(MD), TemplateScope(false) {}
+
+ virtual void ParseLexedMethodDefs();
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
@@ -580,9 +652,13 @@ private:
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
- struct LateParsedMethodDeclaration {
- explicit LateParsedMethodDeclaration(Decl *M)
- : Method(M), TemplateScope(false) { }
+ struct LateParsedMethodDeclaration : public LateParsedDeclaration {
+ explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
+ : Self(P), Method(M), TemplateScope(false) { }
+
+ virtual void ParseLexedMethodDeclarations();
+
+ Parser* Self;
/// Method - The method declaration.
Decl *Method;
@@ -600,17 +676,12 @@ private:
llvm::SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
};
- /// LateParsedMethodDecls - During parsing of a top (non-nested) C++
- /// class, its method declarations that contain parts that won't be
+ /// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
+ /// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definiton is completed (C++ [class.mem]p2),
- /// the method declarations will be stored here with the tokens that
- /// will be parsed to create those entities.
- typedef std::list<LateParsedMethodDeclaration> LateParsedMethodDecls;
-
- /// LexedMethodsForTopClass - During parsing of a top (non-nested) C++ class,
- /// its inline method definitions and the inline method definitions of its
- /// nested classes are lexed and stored here.
- typedef std::list<LexedMethod> LexedMethodsForTopClass;
+ /// the method declarations and possibly attached inline definitions
+ /// will be stored here with the tokens that will be parsed to create those entities.
+ typedef llvm::SmallVector<LateParsedDeclaration*, 2> LateParsedDeclarationsContainer;
/// \brief Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
@@ -632,16 +703,10 @@ private:
/// \brief The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
- /// MethodDecls - Method declarations that contain pieces whose
- /// parsing will be delayed until the class is fully defined.
- LateParsedMethodDecls MethodDecls;
-
- /// MethodDefs - Methods whose definitions will be parsed once the
- /// class has been fully defined.
- LexedMethodsForTopClass MethodDefs;
-
- /// \brief Nested classes inside this class.
- llvm::SmallVector<ParsingClass*, 4> NestedClasses;
+ /// LateParsedDeclarations - Method declarations, inline definitions and
+ /// nested classes that contain pieces whose parsing will be delayed until
+ /// the top-level class is fully defined.
+ LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// \brief The stack of classes that is currently being
@@ -660,7 +725,7 @@ private:
/// class or function definition.
class ParsingDeclRAIIObject {
Sema &Actions;
- Sema::ParsingDeclStackState State;
+ Sema::ParsingDeclState State;
bool Popped;
public:
@@ -772,23 +837,24 @@ private:
class ParsingClassDefinition {
Parser &P;
bool Popped;
+ Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass)
- : P(P), Popped(false) {
- P.PushParsingClass(TagOrTemplate, TopLevelClass);
+ : P(P), Popped(false),
+ State(P.PushParsingClass(TagOrTemplate, TopLevelClass)) {
}
/// \brief Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
- P.PopParsingClass();
+ P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
- P.PopParsingClass();
+ P.PopParsingClass(State);
}
};
@@ -838,16 +904,22 @@ private:
/// \brief Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
+
+ SourceRange getSourceRange() const;
};
- void PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass);
+ Sema::ParsingClassState
+ PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass);
void DeallocateParsedClasses(ParsingClass *Class);
- void PopParsingClass();
+ void PopParsingClass(Sema::ParsingClassState);
- Decl *ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D,
- const ParsedTemplateInfo &TemplateInfo);
+ Decl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsingDeclarator &D,
+ const ParsedTemplateInfo &TemplateInfo,
+ const VirtSpecifiers& VS);
void ParseLexedMethodDeclarations(ParsingClass &Class);
+ void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
+ void ParseLexedMethodDef(LexedMethod &LM);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
@@ -861,14 +933,17 @@ private:
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
- DeclGroupPtrTy ParseExternalDeclaration(CXX0XAttributeList Attr,
+ struct ParsedAttributesWithRange : ParsedAttributes {
+ SourceRange Range;
+ };
+
+ DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = 0);
bool isDeclarationAfterDeclarator() const;
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
- DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(AttributeList *Attr,
- AccessSpecifier AS = AS_none);
+ DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsedAttributes &attrs,
+ AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
- AttributeList *Attr,
AccessSpecifier AS = AS_none);
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
@@ -883,7 +958,7 @@ private:
Decl *ParseObjCAtDirectives();
Decl *ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation atLoc,
- AttributeList *prefixAttrs = 0);
+ ParsedAttributes &prefixAttrs);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
@@ -892,10 +967,11 @@ private:
bool WarnOnDeclarations,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc);
+ bool ParseObjCProtocolQualifiers(DeclSpec &DS);
void ParseObjCInterfaceDeclList(Decl *interfaceDecl,
tok::ObjCKeywordKind contextKey);
Decl *ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
- AttributeList *prefixAttrs = 0);
+ ParsedAttributes &prefixAttrs);
Decl *ObjCImpDecl;
llvm::SmallVector<Decl *, 4> PendingObjCImpDecl;
@@ -923,8 +999,7 @@ private:
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
Decl *classDecl,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword);
- void ParseObjCPropertyAttribute(ObjCDeclSpec &DS, Decl *ClassDecl,
- Decl **Methods, unsigned NumMethods);
+ void ParseObjCPropertyAttribute(ObjCDeclSpec &DS, Decl *ClassDecl);
Decl *ParseObjCMethodDefinition();
@@ -1022,6 +1097,10 @@ private:
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
+ // C++ : Microsoft __uuidof Expression
+ ExprResult ParseCXXUuidof();
+
+ //===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
@@ -1042,6 +1121,10 @@ private:
bool &hasAnyExceptionSpec);
//===--------------------------------------------------------------------===//
+ // C++0x 8: Function declaration trailing-return-type
+ TypeResult ParseTrailingReturnType();
+
+ //===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
@@ -1117,30 +1200,32 @@ private:
// C99 6.8: Statements and Blocks.
StmtResult ParseStatement() {
- return ParseStatementOrDeclaration(true);
+ StmtVector Stmts(Actions);
+ return ParseStatementOrDeclaration(Stmts, true);
}
- StmtResult ParseStatementOrDeclaration(bool OnlyStatement = false);
- StmtResult ParseLabeledStatement(AttributeList *Attr);
- StmtResult ParseCaseStatement(AttributeList *Attr);
- StmtResult ParseDefaultStatement(AttributeList *Attr);
- StmtResult ParseCompoundStatement(AttributeList *Attr,
+ StmtResult ParseStatementOrDeclaration(StmtVector& Stmts,
+ bool OnlyStatement = false);
+ StmtResult ParseLabeledStatement(ParsedAttributes &Attr);
+ StmtResult ParseCaseStatement(ParsedAttributes &Attr);
+ StmtResult ParseDefaultStatement(ParsedAttributes &Attr);
+ StmtResult ParseCompoundStatement(ParsedAttributes &Attr,
bool isStmtExpr = false);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(ExprResult &ExprResult,
Decl *&DeclResult,
SourceLocation Loc,
bool ConvertToBoolean);
- StmtResult ParseIfStatement(AttributeList *Attr);
- StmtResult ParseSwitchStatement(AttributeList *Attr);
- StmtResult ParseWhileStatement(AttributeList *Attr);
- StmtResult ParseDoStatement(AttributeList *Attr);
- StmtResult ParseForStatement(AttributeList *Attr);
- StmtResult ParseGotoStatement(AttributeList *Attr);
- StmtResult ParseContinueStatement(AttributeList *Attr);
- StmtResult ParseBreakStatement(AttributeList *Attr);
- StmtResult ParseReturnStatement(AttributeList *Attr);
+ StmtResult ParseIfStatement(ParsedAttributes &Attr);
+ StmtResult ParseSwitchStatement(ParsedAttributes &Attr);
+ StmtResult ParseWhileStatement(ParsedAttributes &Attr);
+ StmtResult ParseDoStatement(ParsedAttributes &Attr);
+ StmtResult ParseForStatement(ParsedAttributes &Attr);
+ StmtResult ParseGotoStatement(ParsedAttributes &Attr);
+ StmtResult ParseContinueStatement(ParsedAttributes &Attr);
+ StmtResult ParseBreakStatement(ParsedAttributes &Attr);
+ StmtResult ParseReturnStatement(ParsedAttributes &Attr);
StmtResult ParseAsmStatement(bool &msAsm);
- StmtResult FuzzyParseMicrosoftAsmStatement();
+ StmtResult FuzzyParseMicrosoftAsmStatement(SourceLocation AsmLoc);
bool ParseAsmOperandsOpt(llvm::SmallVectorImpl<IdentifierInfo *> &Names,
llvm::SmallVectorImpl<ExprTy *> &Constraints,
llvm::SmallVectorImpl<ExprTy *> &Exprs);
@@ -1148,7 +1233,7 @@ private:
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
- StmtResult ParseCXXTryBlock(AttributeList *Attr);
+ StmtResult ParseCXXTryBlock(ParsedAttributes &Attr);
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc);
StmtResult ParseCXXCatchBlock();
@@ -1173,11 +1258,13 @@ private:
DSC_top_level // top-level/namespace declaration context
};
- DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd,
- CXX0XAttributeList Attr);
- DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context,
+ DeclGroupPtrTy ParseDeclaration(StmtVector &Stmts,
+ unsigned Context, SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs);
+ DeclGroupPtrTy ParseSimpleDeclaration(StmtVector &Stmts,
+ unsigned Context,
SourceLocation &DeclEnd,
- AttributeList *Attr,
+ ParsedAttributes &attrs,
bool RequireSemi);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context,
bool AllowFunctionDefinitions,
@@ -1187,6 +1274,12 @@ private:
Decl *ParseFunctionStatementBody(Decl *Decl);
Decl *ParseFunctionTryBlock(Decl *Decl);
+ /// \brief When in code-completion, skip parsing of the function/method body
+ /// unless the body contains the code-completion point.
+ ///
+ /// \returns true if the function body was skipped.
+ bool trySkippingFunctionBodyForCodeCompletion();
+
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS);
@@ -1206,7 +1299,8 @@ private:
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, bool IsParameter);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
- const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none);
+ const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
+ AccessSpecifier AS = AS_none);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
@@ -1222,7 +1316,7 @@ private:
void ParseStructDeclaration(DeclSpec &DS, FieldCallback &Callback);
- bool isDeclarationSpecifier();
+ bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
bool isTypeQualifier() const;
@@ -1237,7 +1331,7 @@ private:
bool isDeclarationStatement() {
if (getLang().CPlusPlus)
return isCXXDeclarationStatement();
- return isDeclarationSpecifier();
+ return isDeclarationSpecifier(true);
}
/// isSimpleDeclaration - Disambiguates between a declaration or an
@@ -1247,9 +1341,13 @@ private:
bool isSimpleDeclaration() {
if (getLang().CPlusPlus)
return isCXXSimpleDeclaration();
- return isDeclarationSpecifier();
+ return isDeclarationSpecifier(true);
}
+ /// \brief Determine whether we are currently at the start of an Objective-C
+ /// class message that appears to be missing the open bracket '['.
+ bool isStartOfObjCClassMessageMissingOpenBracket();
+
/// \brief Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
@@ -1333,6 +1431,18 @@ private:
bool operator!=(const TPResult &RHS) const { return Res != RHS.Res; }
};
+ /// \brief Based only on the given token kind, determine whether we know that
+ /// we're at the start of an expression or a type-specifier-seq (which may
+ /// be an expression, in C++).
+ ///
+ /// This routine does not attempt to resolve any of the trick cases, e.g.,
+ /// those involving lookup of identifiers.
+ ///
+ /// \returns \c TPR_true if this token starts an expression, \c TPR_false if
+ /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
+ /// tell.
+ TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
+
/// isCXXDeclarationSpecifier - Returns TPResult::True() if it is a
/// declaration specifier, TPResult::False() if it is not,
/// TPResult::Ambiguous() if it could be either a decl-specifier or a
@@ -1340,7 +1450,7 @@ private:
/// encountered.
/// Doesn't consume tokens.
TPResult isCXXDeclarationSpecifier();
-
+
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error().
// Returning TPResult::True()/False() indicates that the ambiguity was
@@ -1351,26 +1461,87 @@ private:
TPResult TryParseDeclarationSpecifier();
TPResult TryParseSimpleDeclaration();
TPResult TryParseTypeofSpecifier();
+ TPResult TryParseProtocolQualifiers();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
TPResult TryParseParameterDeclarationClause();
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
- TypeResult ParseTypeName(SourceRange *Range = 0);
+ TypeResult ParseTypeName(SourceRange *Range = 0,
+ Declarator::TheContext Context
+ = Declarator::TypeNameContext);
void ParseBlockId();
- // EndLoc, if non-NULL, is filled with the location of the last token of
- // the attribute list.
- CXX0XAttributeList ParseCXX0XAttributes(SourceLocation *EndLoc = 0);
- AttributeList *ParseGNUAttributes(SourceLocation *EndLoc = 0);
- AttributeList *ParseMicrosoftDeclSpec(AttributeList* CurrAttr = 0);
- AttributeList *ParseMicrosoftTypeAttributes(AttributeList* CurrAttr = 0);
- AttributeList *ParseBorlandTypeAttributes(AttributeList* CurrAttr = 0);
+
+ void ProhibitAttributes(ParsedAttributesWithRange &attrs) {
+ if (!attrs.Range.isValid()) return;
+ DiagnoseProhibitedAttributes(attrs);
+ }
+ void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs);
+
+ void MaybeParseGNUAttributes(Declarator &D) {
+ if (Tok.is(tok::kw___attribute)) {
+ ParsedAttributes attrs;
+ SourceLocation endLoc;
+ ParseGNUAttributes(attrs, &endLoc);
+ D.addAttributes(attrs.getList(), endLoc);
+ }
+ }
+ void MaybeParseGNUAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0) {
+ if (Tok.is(tok::kw___attribute))
+ ParseGNUAttributes(attrs, endLoc);
+ }
+ void ParseGNUAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0);
+
+ void MaybeParseCXX0XAttributes(Declarator &D) {
+ if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
+ ParsedAttributesWithRange attrs;
+ SourceLocation endLoc;
+ ParseCXX0XAttributes(attrs, &endLoc);
+ D.addAttributes(attrs.getList(), endLoc);
+ }
+ }
+ void MaybeParseCXX0XAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0) {
+ if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
+ ParsedAttributesWithRange attrsWithRange;
+ ParseCXX0XAttributes(attrsWithRange, endLoc);
+ attrs.append(attrsWithRange.getList());
+ }
+ }
+ void MaybeParseCXX0XAttributes(ParsedAttributesWithRange &attrs,
+ SourceLocation *endLoc = 0) {
+ if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
+ ParseCXX0XAttributes(attrs, endLoc);
+ }
+ void ParseCXX0XAttributes(ParsedAttributesWithRange &attrs,
+ SourceLocation *EndLoc = 0);
+
+ void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0) {
+ if (getLang().Microsoft && Tok.is(tok::l_square))
+ ParseMicrosoftAttributes(attrs, endLoc);
+ }
+ void ParseMicrosoftAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0);
+ void ParseMicrosoftDeclSpec(ParsedAttributes &attrs);
+ void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
+ void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
+ void ParseOpenCLAttributes(ParsedAttributes &attrs);
+
void ParseTypeofSpecifier(DeclSpec &DS);
void ParseDecltypeSpecifier(DeclSpec &DS);
ExprResult ParseCXX0XAlignArgument(SourceLocation Start);
+ VirtSpecifiers::Specifier isCXX0XVirtSpecifier() const;
+ void ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS);
+
+ ClassVirtSpecifiers::Specifier isCXX0XClassVirtSpecifier() const;
+ void ParseOptionalCXX0XClassVirtSpecifierSeq(ClassVirtSpecifiers &CVS);
+
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
@@ -1410,12 +1581,13 @@ private:
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
+
void ParseTypeQualifierListOpt(DeclSpec &DS, bool GNUAttributesAllowed = true,
bool CXX0XAttributesAllowed = true);
void ParseDirectDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
- AttributeList *AttrList = 0,
+ ParsedAttributes &attrs,
bool RequiresArg = false);
void ParseFunctionDeclaratorIdentifierList(SourceLocation LParenLoc,
IdentifierInfo *FirstIdent,
@@ -1433,11 +1605,16 @@ private:
SourceLocation InlineLoc = SourceLocation());
Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context);
Decl *ParseUsingDirectiveOrDeclaration(unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd,
- CXX0XAttributeList Attrs);
- Decl *ParseUsingDirective(unsigned Context, SourceLocation UsingLoc,
- SourceLocation &DeclEnd, AttributeList *Attr);
- Decl *ParseUsingDeclaration(unsigned Context, SourceLocation UsingLoc,
+ ParsedAttributesWithRange &attrs);
+ Decl *ParseUsingDirective(unsigned Context,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &attrs);
+ Decl *ParseUsingDeclaration(unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
@@ -1542,6 +1719,7 @@ private:
//===--------------------------------------------------------------------===//
// GNU G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseUnaryTypeTrait();
+ ExprResult ParseBinaryTypeTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h b/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
index 5fb107c..b7f6427 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
@@ -26,7 +26,7 @@ class Diagnostic;
class LangOptions;
class Preprocessor;
-// ObjC rewriter: attempts tp rewrite ObjC constructs into pure C code.
+// ObjC rewriter: attempts to rewrite ObjC constructs into pure C code.
// This is considered experimental, and only works with Apple's ObjC runtime.
ASTConsumer *CreateObjCRewriter(const std::string &InFile,
llvm::raw_ostream *OS,
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
index 9b2e016..26a0d72 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
@@ -99,7 +99,7 @@ public:
const DiagnosticInfo &Info);
/// \brief Emit a diagnostic via the adapted diagnostic client.
- void Diag(FullSourceLoc Loc, unsigned DiagID);
+ void Diag(SourceLocation Loc, unsigned DiagID);
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
index 5331647..45ee579 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
@@ -15,6 +15,7 @@
#ifndef LLVM_CLANG_SEMA_ATTRLIST_H
#define LLVM_CLANG_SEMA_ATTRLIST_H
+#include "llvm/Support/Allocator.h"
#include "clang/Sema/Ownership.h"
#include "clang/Basic/SourceLocation.h"
#include <cassert>
@@ -32,6 +33,9 @@ namespace clang {
/// 4: __attribute__(( aligned(16) )). ParmName is unused, Args/Num used.
///
class AttributeList {
+public:
+ class Factory;
+private:
IdentifierInfo *AttrName;
SourceLocation AttrLoc;
IdentifierInfo *ScopeName;
@@ -42,17 +46,38 @@ class AttributeList {
unsigned NumArgs;
AttributeList *Next;
bool DeclspecAttribute, CXX0XAttribute;
- mutable bool Invalid; /// True if already diagnosed as invalid.
+
+ /// True if already diagnosed as invalid.
+ mutable bool Invalid;
+
AttributeList(const AttributeList &); // DO NOT IMPLEMENT
void operator=(const AttributeList &); // DO NOT IMPLEMENT
-public:
- AttributeList(IdentifierInfo *AttrName, SourceLocation AttrLoc,
+ void operator delete(void *); // DO NOT IMPLEMENT
+ ~AttributeList(); // DO NOT IMPLEMENT
+ AttributeList(llvm::BumpPtrAllocator &Alloc,
+ IdentifierInfo *AttrName, SourceLocation AttrLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
IdentifierInfo *ParmName, SourceLocation ParmLoc,
Expr **args, unsigned numargs,
- AttributeList *Next, bool declspec = false, bool cxx0x = false);
- ~AttributeList();
-
+ bool declspec, bool cxx0x);
+public:
+ class Factory {
+ llvm::BumpPtrAllocator Alloc;
+ public:
+ Factory() {}
+ ~Factory() {}
+ AttributeList *Create(IdentifierInfo *AttrName, SourceLocation AttrLoc,
+ IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
+ IdentifierInfo *ParmName, SourceLocation ParmLoc,
+ Expr **args, unsigned numargs, bool declspec = false, bool cxx0x = false) {
+ AttributeList *Mem = Alloc.Allocate<AttributeList>();
+ new (Mem) AttributeList(Alloc, AttrName, AttrLoc, ScopeName, ScopeLoc,
+ ParmName, ParmLoc, args, numargs,
+ declspec, cxx0x);
+ return Mem;
+ }
+ };
+
enum Kind { // Please keep this list alphabetized.
AT_IBAction, // Clang-specific.
AT_IBOutlet, // Clang-specific.
@@ -68,35 +93,48 @@ public:
AT_carries_dependency,
AT_cdecl,
AT_cleanup,
+ AT_common,
AT_const,
+ AT_constant,
AT_constructor,
AT_deprecated,
AT_destructor,
+ AT_device,
AT_dllexport,
AT_dllimport,
AT_ext_vector_type,
AT_fastcall,
- AT_final,
AT_format,
AT_format_arg,
+ AT_global,
AT_gnu_inline,
- AT_hiding,
+ AT_host,
+ AT_launch_bounds,
AT_malloc,
+ AT_may_alias,
AT_mode,
+ AT_neon_polyvector_type, // Clang-specific.
+ AT_neon_vector_type, // Clang-specific.
+ AT_naked,
AT_nodebug,
AT_noinline,
AT_no_instrument_function,
+ AT_nocommon,
AT_nonnull,
AT_noreturn,
AT_nothrow,
AT_nsobject,
AT_objc_exception,
- AT_override,
AT_cf_returns_not_retained, // Clang-specific.
AT_cf_returns_retained, // Clang-specific.
AT_ns_returns_not_retained, // Clang-specific.
AT_ns_returns_retained, // Clang-specific.
+ AT_ns_returns_autoreleased, // Clang-specific.
+ AT_cf_consumed, // Clang-specific.
+ AT_ns_consumed, // Clang-specific.
+ AT_ns_consumes_self, // Clang-specific.
AT_objc_gc,
+ AT_opencl_kernel_function, // OpenCL-specific.
AT_overloadable, // Clang-specific.
AT_ownership_holds, // Clang-specific.
AT_ownership_returns, // Clang-specific.
@@ -107,12 +145,14 @@ public:
AT_regparm,
AT_section,
AT_sentinel,
+ AT_shared,
AT_stdcall,
AT_thiscall,
AT_transparent_union,
AT_unavailable,
AT_unused,
AT_used,
+ AT_uuid,
AT_vecreturn, // PS3 PPU-specific.
AT_vector_size,
AT_visibility,
@@ -134,6 +174,7 @@ public:
SourceLocation getScopeLoc() const { return ScopeLoc; }
IdentifierInfo *getParameterName() const { return ParmName; }
+ SourceLocation getParameterLoc() const { return ParmLoc; }
bool isDeclspecAttribute() const { return DeclspecAttribute; }
bool isCXX0XAttribute() const { return CXX0XAttribute; }
@@ -199,8 +240,8 @@ public:
/// The right-hand list is appended to the left-hand list, if any
/// A pointer to the joined list is returned.
/// Note: the lists are not left unmodified.
-inline AttributeList* addAttributeLists (AttributeList *Left,
- AttributeList *Right) {
+inline AttributeList *addAttributeLists(AttributeList *Left,
+ AttributeList *Right) {
if (!Left)
return Right;
@@ -229,6 +270,51 @@ struct CXX0XAttributeList {
}
};
+/// ParsedAttributes - A collection of parsed attributes. Currently
+/// we don't differentiate between the various attribute syntaxes,
+/// which is basically silly.
+///
+/// Right now this is a very lightweight container, but the expectation
+/// is that this will become significantly more serious.
+class ParsedAttributes {
+public:
+ ParsedAttributes() : list(0) {}
+
+ bool empty() const { return list == 0; }
+
+ void add(AttributeList *newAttr) {
+ assert(newAttr);
+ assert(newAttr->getNext() == 0);
+ newAttr->setNext(list);
+ list = newAttr;
+ }
+
+ void append(AttributeList *newList) {
+ if (!newList) return;
+
+ AttributeList *lastInNewList = newList;
+ while (AttributeList *next = lastInNewList->getNext())
+ lastInNewList = next;
+
+ lastInNewList->setNext(list);
+ list = newList;
+ }
+
+ void set(AttributeList *newList) {
+ list = newList;
+ }
+
+ void clear() { list = 0; }
+ AttributeList *getList() const { return list; }
+
+ /// Returns a reference to the attribute list. Try not to introduce
+ /// dependencies on this method, it may not be long-lived.
+ AttributeList *&getListRef() { return list; }
+
+private:
+ AttributeList *list;
+};
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
index 6c1ecbf..882440b 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -17,12 +17,13 @@
#include "clang/AST/CanonicalType.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
#include "clang-c/Index.h"
-#include <memory>
#include <string>
namespace llvm {
class raw_ostream;
+ class Twine;
}
namespace clang {
@@ -35,31 +36,37 @@ enum {
/// \brief Priority for the next initialization in a constructor initializer
/// list.
CCP_NextInitializer = 7,
+ /// \brief Priority for an enumeration constant inside a switch whose
+ /// condition is of the enumeration type.
+ CCP_EnumInCase = 7,
/// \brief Priority for a send-to-super completion.
- CCP_SuperCompletion = 8,
+ CCP_SuperCompletion = 20,
/// \brief Priority for a declaration that is in the local scope.
- CCP_LocalDeclaration = 8,
+ CCP_LocalDeclaration = 34,
/// \brief Priority for a member declaration found from the current
/// method or member function.
- CCP_MemberDeclaration = 20,
+ CCP_MemberDeclaration = 35,
/// \brief Priority for a language keyword (that isn't any of the other
/// categories).
- CCP_Keyword = 30,
+ CCP_Keyword = 40,
/// \brief Priority for a code pattern.
- CCP_CodePattern = 30,
+ CCP_CodePattern = 40,
/// \brief Priority for a non-type declaration.
CCP_Declaration = 50,
- /// \brief Priority for a constant value (e.g., enumerator).
- CCP_Constant = 60,
/// \brief Priority for a type.
- CCP_Type = 65,
+ CCP_Type = CCP_Declaration,
+ /// \brief Priority for a constant value (e.g., enumerator).
+ CCP_Constant = 65,
/// \brief Priority for a preprocessor macro.
CCP_Macro = 70,
/// \brief Priority for a nested-name-specifier.
CCP_NestedNameSpecifier = 75,
/// \brief Priority for a result that isn't likely to be what the user wants,
/// but is included for completeness.
- CCP_Unlikely = 80
+ CCP_Unlikely = 80,
+
+ /// \brief Priority for the Objective-C "_cmd" implicit parameter.
+ CCP_ObjC_cmd = CCP_Unlikely
};
/// \brief Priority value deltas that are added to code-completion results
@@ -67,18 +74,21 @@ enum {
enum {
/// \brief The result is in a base class.
CCD_InBaseClass = 2,
- /// \brief The result is a type match against void.
- ///
- /// Since everything converts to "void", we don't give as drastic an
- /// adjustment for matching void.
- CCD_VoidMatch = -5,
/// \brief The result is a C++ non-static member function whose qualifiers
/// exactly match the object type on which the member function can be called.
CCD_ObjectQualifierMatch = -1,
/// \brief The selector of the given message exactly matches the selector
/// of the current method, which might imply that some kind of delegation
/// is occurring.
- CCD_SelectorMatch = -3
+ CCD_SelectorMatch = -3,
+
+ /// \brief Adjustment to the "bool" type in Objective-C, where the typedef
+ /// "BOOL" is preferred.
+ CCD_bool_in_ObjC = 1,
+
+ /// \brief Adjustment for KVC code pattern priorities when it doesn't look
+ /// like the
+ CCD_ProbablyNotObjCCollection = 15
};
/// \brief Priority value factors by which we will divide or multiply the
@@ -119,9 +129,12 @@ QualType getDeclUsageType(ASTContext &C, NamedDecl *ND);
///
/// \param MacroName The name of the macro.
///
+/// \param LangOpts Options describing the current language dialect.
+///
/// \param PreferredTypeIsPointer Whether the preferred type for the context
/// of this macro is a pointer type.
unsigned getMacroUsagePriority(llvm::StringRef MacroName,
+ const LangOptions &LangOpts,
bool PreferredTypeIsPointer = false);
/// \brief Determine the libclang cursor kind associated with the given
@@ -143,6 +156,9 @@ public:
enum Kind {
/// \brief An unspecified code-completion context.
CCC_Other,
+ /// \brief An unspecified code-completion context where we should also add
+ /// macro completions.
+ CCC_OtherWithMacros,
/// \brief Code completion occurred within a "top-level" completion context,
/// e.g., at namespace or global scope.
CCC_TopLevel,
@@ -212,7 +228,13 @@ public:
/// \brief Code completion for a selector, as in an @selector expression.
CCC_SelectorName,
/// \brief Code completion within a type-qualifier list.
- CCC_TypeQualifiers
+ CCC_TypeQualifiers,
+ /// \brief Code completion in a parenthesized expression, which means that
+ /// we may also have types here in C and Objective-C (as well as in C++).
+ CCC_ParenthesizedExpression,
+ /// \brief An unknown context, in which we are recovering from a parsing
+ /// error and don't know which completions we should give.
+ CCC_Recovery
};
private:
@@ -248,6 +270,10 @@ public:
/// \brief Retrieve the type of the base object in a member-access
/// expression.
QualType getBaseType() const { return BaseType; }
+
+ /// \brief Determines whether we want C++ constructors as results within this
+ /// context.
+ bool wantConstructorResults() const;
};
@@ -339,127 +365,163 @@ public:
Chunk() : Kind(CK_Text), Text(0) { }
- Chunk(ChunkKind Kind, llvm::StringRef Text = "");
+ Chunk(ChunkKind Kind, const char *Text = "");
/// \brief Create a new text chunk.
- static Chunk CreateText(llvm::StringRef Text);
+ static Chunk CreateText(const char *Text);
/// \brief Create a new optional chunk.
- static Chunk CreateOptional(std::auto_ptr<CodeCompletionString> Optional);
+ static Chunk CreateOptional(CodeCompletionString *Optional);
/// \brief Create a new placeholder chunk.
- static Chunk CreatePlaceholder(llvm::StringRef Placeholder);
+ static Chunk CreatePlaceholder(const char *Placeholder);
/// \brief Create a new informative chunk.
- static Chunk CreateInformative(llvm::StringRef Informative);
+ static Chunk CreateInformative(const char *Informative);
/// \brief Create a new result type chunk.
- static Chunk CreateResultType(llvm::StringRef ResultType);
+ static Chunk CreateResultType(const char *ResultType);
/// \brief Create a new current-parameter chunk.
- static Chunk CreateCurrentParameter(llvm::StringRef CurrentParameter);
-
- /// \brief Clone the given chunk.
- Chunk Clone() const;
-
- /// \brief Destroy this chunk, deallocating any memory it owns.
- void Destroy();
+ static Chunk CreateCurrentParameter(const char *CurrentParameter);
};
private:
- /// \brief The chunks stored in this string.
- llvm::SmallVector<Chunk, 4> Chunks;
+ /// \brief The number of chunks stored in this string.
+ unsigned NumChunks;
+
+ /// \brief The priority of this code-completion string.
+ unsigned Priority : 30;
+
+ /// \brief The availability of this code-completion result.
+ unsigned Availability : 2;
CodeCompletionString(const CodeCompletionString &); // DO NOT IMPLEMENT
CodeCompletionString &operator=(const CodeCompletionString &); // DITTO
-public:
- CodeCompletionString() { }
- ~CodeCompletionString() { clear(); }
+ CodeCompletionString(const Chunk *Chunks, unsigned NumChunks,
+ unsigned Priority, CXAvailabilityKind Availability);
+ ~CodeCompletionString() { }
- typedef llvm::SmallVector<Chunk, 4>::const_iterator iterator;
- iterator begin() const { return Chunks.begin(); }
- iterator end() const { return Chunks.end(); }
- bool empty() const { return Chunks.empty(); }
- unsigned size() const { return Chunks.size(); }
- void clear();
+ friend class CodeCompletionBuilder;
+ friend class CodeCompletionResult;
- Chunk &operator[](unsigned I) {
+public:
+ typedef const Chunk *iterator;
+ iterator begin() const { return reinterpret_cast<const Chunk *>(this + 1); }
+ iterator end() const { return begin() + NumChunks; }
+ bool empty() const { return NumChunks == 0; }
+ unsigned size() const { return NumChunks; }
+
+ const Chunk &operator[](unsigned I) const {
assert(I < size() && "Chunk index out-of-range");
- return Chunks[I];
+ return begin()[I];
}
+
+ /// \brief Returns the text in the TypedText chunk.
+ const char *getTypedText() const;
- const Chunk &operator[](unsigned I) const {
- assert(I < size() && "Chunk index out-of-range");
- return Chunks[I];
+ /// \brief Retrieve the priority of this code completion result.
+ unsigned getPriority() const { return Priority; }
+
+ /// \brief Reteirve the availability of this code completion result.
+ unsigned getAvailability() const { return Availability; }
+
+ /// \brief Retrieve a string representation of the code completion string,
+ /// which is mainly useful for debugging.
+ std::string getAsString() const;
+};
+
+/// \brief An allocator used specifically for the purpose of code completion.
+class CodeCompletionAllocator : public llvm::BumpPtrAllocator {
+public:
+ /// \brief Copy the given string into this allocator.
+ const char *CopyString(llvm::StringRef String);
+
+ /// \brief Copy the given string into this allocator.
+ const char *CopyString(llvm::Twine String);
+
+ // \brief Copy the given string into this allocator.
+ const char *CopyString(const char *String) {
+ return CopyString(llvm::StringRef(String));
+ }
+
+ /// \brief Copy the given string into this allocator.
+ const char *CopyString(const std::string &String) {
+ return CopyString(llvm::StringRef(String));
+ }
+};
+
+/// \brief A builder class used to construct new code-completion strings.
+class CodeCompletionBuilder {
+public:
+ typedef CodeCompletionString::Chunk Chunk;
+
+private:
+ CodeCompletionAllocator &Allocator;
+ unsigned Priority;
+ CXAvailabilityKind Availability;
+
+ /// \brief The chunks stored in this string.
+ llvm::SmallVector<Chunk, 4> Chunks;
+
+public:
+ CodeCompletionBuilder(CodeCompletionAllocator &Allocator)
+ : Allocator(Allocator), Priority(0), Availability(CXAvailability_Available){
}
+ CodeCompletionBuilder(CodeCompletionAllocator &Allocator,
+ unsigned Priority, CXAvailabilityKind Availability)
+ : Allocator(Allocator), Priority(Priority), Availability(Availability) { }
+
+ /// \brief Retrieve the allocator into which the code completion
+ /// strings should be allocated.
+ CodeCompletionAllocator &getAllocator() const { return Allocator; }
+
+ /// \brief Take the resulting completion string.
+ ///
+ /// This operation can only be performed once.
+ CodeCompletionString *TakeString();
+
/// \brief Add a new typed-text chunk.
- /// The text string will be copied.
- void AddTypedTextChunk(llvm::StringRef Text) {
- Chunks.push_back(Chunk(CK_TypedText, Text));
+ void AddTypedTextChunk(const char *Text) {
+ Chunks.push_back(Chunk(CodeCompletionString::CK_TypedText, Text));
}
/// \brief Add a new text chunk.
- /// The text string will be copied.
- void AddTextChunk(llvm::StringRef Text) {
+ void AddTextChunk(const char *Text) {
Chunks.push_back(Chunk::CreateText(Text));
}
-
+
/// \brief Add a new optional chunk.
- void AddOptionalChunk(std::auto_ptr<CodeCompletionString> Optional) {
+ void AddOptionalChunk(CodeCompletionString *Optional) {
Chunks.push_back(Chunk::CreateOptional(Optional));
}
/// \brief Add a new placeholder chunk.
- /// The placeholder text will be copied.
- void AddPlaceholderChunk(llvm::StringRef Placeholder) {
+ void AddPlaceholderChunk(const char *Placeholder) {
Chunks.push_back(Chunk::CreatePlaceholder(Placeholder));
}
-
+
/// \brief Add a new informative chunk.
- /// The text will be copied.
- void AddInformativeChunk(llvm::StringRef Text) {
+ void AddInformativeChunk(const char *Text) {
Chunks.push_back(Chunk::CreateInformative(Text));
}
-
+
/// \brief Add a new result-type chunk.
- /// The text will be copied.
- void AddResultTypeChunk(llvm::StringRef ResultType) {
+ void AddResultTypeChunk(const char *ResultType) {
Chunks.push_back(Chunk::CreateResultType(ResultType));
}
/// \brief Add a new current-parameter chunk.
- /// The text will be copied.
- void AddCurrentParameterChunk(llvm::StringRef CurrentParameter) {
+ void AddCurrentParameterChunk(const char *CurrentParameter) {
Chunks.push_back(Chunk::CreateCurrentParameter(CurrentParameter));
}
/// \brief Add a new chunk.
void AddChunk(Chunk C) { Chunks.push_back(C); }
-
- /// \brief Returns the text in the TypedText chunk.
- const char *getTypedText() const;
-
- /// \brief Retrieve a string representation of the code completion string,
- /// which is mainly useful for debugging.
- std::string getAsString() const;
-
- /// \brief Clone this code-completion string.
- ///
- /// \param Result If non-NULL, points to an empty code-completion
- /// result that will be given a cloned copy of
- CodeCompletionString *Clone(CodeCompletionString *Result = 0) const;
-
- /// \brief Serialize this code-completion string to the given stream.
- void Serialize(llvm::raw_ostream &OS) const;
-
- /// \brief Deserialize a code-completion string from the given string.
- ///
- /// \returns true if successful, false otherwise.
- bool Deserialize(const char *&Str, const char *StrEnd);
};
-
+
/// \brief Captures a result of code completion.
class CodeCompletionResult {
public:
@@ -590,15 +652,12 @@ public:
///
/// \param S The semantic analysis that created the result.
///
- /// \param Result If non-NULL, the already-allocated, empty
- /// code-completion string that will be populated with the
- /// appropriate code completion string for this result.
+ /// \param Allocator The allocator that will be used to allocate the
+ /// string itself.
CodeCompletionString *CreateCodeCompletionString(Sema &S,
- CodeCompletionString *Result = 0);
-
- void Destroy();
+ CodeCompletionAllocator &Allocator);
- /// brief Determine a base priority for the given declaration.
+ /// \brief Determine a base priority for the given declaration.
static unsigned getPriorityFromDecl(NamedDecl *ND);
private:
@@ -682,7 +741,7 @@ public:
: Kind(CK_Function), Function(Function) { }
OverloadCandidate(FunctionTemplateDecl *FunctionTemplateDecl)
- : Kind(CK_FunctionTemplate), FunctionTemplate(FunctionTemplate) { }
+ : Kind(CK_FunctionTemplate), FunctionTemplate(FunctionTemplateDecl) { }
OverloadCandidate(const FunctionType *Type)
: Kind(CK_FunctionType), Type(Type) { }
@@ -707,7 +766,8 @@ public:
/// \brief Create a new code-completion string that describes the function
/// signature of this overload candidate.
CodeCompletionString *CreateSignatureString(unsigned CurrentArg,
- Sema &S) const;
+ Sema &S,
+ CodeCompletionAllocator &Allocator) const;
};
CodeCompleteConsumer() : IncludeMacros(false), IncludeCodePatterns(false),
@@ -753,6 +813,10 @@ public:
OverloadCandidate *Candidates,
unsigned NumCandidates) { }
//@}
+
+ /// \brief Retrieve the allocator that will be used to allocate
+ /// code completion strings.
+ virtual CodeCompletionAllocator &getAllocator() = 0;
};
/// \brief A simple code-completion consumer that prints the results it
@@ -761,6 +825,8 @@ class PrintingCodeCompleteConsumer : public CodeCompleteConsumer {
/// \brief The raw output stream.
llvm::raw_ostream &OS;
+ CodeCompletionAllocator Allocator;
+
public:
/// \brief Create a new printing code-completion consumer that prints its
/// results to the given raw output stream.
@@ -779,34 +845,10 @@ public:
virtual void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
unsigned NumCandidates);
-};
-
-/// \brief A code-completion consumer that prints the results it receives
-/// in a format that is parsable by the CIndex library.
-class CIndexCodeCompleteConsumer : public CodeCompleteConsumer {
- /// \brief The raw output stream.
- llvm::raw_ostream &OS;
-
-public:
- /// \brief Create a new CIndex code-completion consumer that prints its
- /// results to the given raw output stream in a format readable to the CIndex
- /// library.
- CIndexCodeCompleteConsumer(bool IncludeMacros, bool IncludeCodePatterns,
- bool IncludeGlobals, llvm::raw_ostream &OS)
- : CodeCompleteConsumer(IncludeMacros, IncludeCodePatterns, IncludeGlobals,
- true), OS(OS) {}
- /// \brief Prints the finalized code-completion results.
- virtual void ProcessCodeCompleteResults(Sema &S,
- CodeCompletionContext Context,
- CodeCompletionResult *Results,
- unsigned NumResults);
-
- virtual void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
- OverloadCandidate *Candidates,
- unsigned NumCandidates);
+ virtual CodeCompletionAllocator &getAllocator() { return Allocator; }
};
-
+
} // end namespace clang
#endif // LLVM_CLANG_SEMA_CODECOMPLETECONSUMER_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
index 0893ae7..9c4bb64 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
@@ -26,6 +26,7 @@
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ErrorHandling.h"
namespace clang {
class LangOptions;
@@ -169,32 +170,32 @@ private:
// storage-class-specifier
/*SCS*/unsigned StorageClassSpec : 3;
- bool SCS_thread_specified : 1;
- bool SCS_extern_in_linkage_spec : 1;
+ unsigned SCS_thread_specified : 1;
+ unsigned SCS_extern_in_linkage_spec : 1;
// type-specifier
/*TSW*/unsigned TypeSpecWidth : 2;
/*TSC*/unsigned TypeSpecComplex : 2;
/*TSS*/unsigned TypeSpecSign : 2;
/*TST*/unsigned TypeSpecType : 5;
- bool TypeAltiVecVector : 1;
- bool TypeAltiVecPixel : 1;
- bool TypeAltiVecBool : 1;
- bool TypeSpecOwned : 1;
+ unsigned TypeAltiVecVector : 1;
+ unsigned TypeAltiVecPixel : 1;
+ unsigned TypeAltiVecBool : 1;
+ unsigned TypeSpecOwned : 1;
// type-qualifiers
unsigned TypeQualifiers : 3; // Bitwise OR of TQ.
// function-specifier
- bool FS_inline_specified : 1;
- bool FS_virtual_specified : 1;
- bool FS_explicit_specified : 1;
+ unsigned FS_inline_specified : 1;
+ unsigned FS_virtual_specified : 1;
+ unsigned FS_explicit_specified : 1;
// friend-specifier
- bool Friend_specified : 1;
+ unsigned Friend_specified : 1;
// constexpr-specifier
- bool Constexpr_specified : 1;
+ unsigned Constexpr_specified : 1;
/*SCS*/unsigned StorageClassSpecAsWritten : 3;
@@ -205,7 +206,7 @@ private:
};
// attributes.
- AttributeList *AttrList;
+ ParsedAttributes Attrs;
// Scope specifier for the type spec, if applicable.
CXXScopeSpec TypeScope;
@@ -267,14 +268,12 @@ public:
Friend_specified(false),
Constexpr_specified(false),
StorageClassSpecAsWritten(SCS_unspecified),
- AttrList(0),
ProtocolQualifiers(0),
NumProtocolQualifiers(0),
ProtocolLocs(0),
writtenBS() {
}
~DeclSpec() {
- delete AttrList;
delete [] ProtocolQualifiers;
delete [] ProtocolLocs;
}
@@ -404,7 +403,7 @@ public:
/// TODO: use a more general approach that still allows these
/// diagnostics to be ignored when desired.
bool SetStorageClassSpec(SCS S, SourceLocation Loc, const char *&PrevSpec,
- unsigned &DiagID);
+ unsigned &DiagID, const LangOptions &Lang);
bool SetStorageClassSpecThread(SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID);
bool SetTypeSpecWidth(TSW W, SourceLocation Loc, const char *&PrevSpec,
@@ -473,19 +472,29 @@ public:
/// short __attribute__((unused)) __attribute__((deprecated))
/// int __attribute__((may_alias)) __attribute__((aligned(16))) var;
///
- void AddAttributes(AttributeList *alist) {
- AttrList = addAttributeLists(AttrList, alist);
+ void addAttributes(AttributeList *AL) {
+ Attrs.append(AL);
+ }
+ void aetAttributes(AttributeList *AL) {
+ Attrs.set(AL);
}
- void SetAttributes(AttributeList *AL) { AttrList = AL; }
- const AttributeList *getAttributes() const { return AttrList; }
- AttributeList *getAttributes() { return AttrList; }
+
+ bool hasAttributes() const { return !Attrs.empty(); }
+
+ ParsedAttributes &getAttributes() { return Attrs; }
+ const ParsedAttributes &getAttributes() const { return Attrs; }
/// TakeAttributes - Return the current attribute list and remove them from
/// the DeclSpec so that it doesn't own them.
- AttributeList *TakeAttributes() {
- AttributeList *AL = AttrList;
- AttrList = 0;
- return AL;
+ ParsedAttributes takeAttributes() {
+ ParsedAttributes saved = Attrs;
+ Attrs.clear();
+ return saved;
+ }
+
+ void takeAttributesFrom(ParsedAttributes &attrs) {
+ Attrs.append(attrs.getList());
+ attrs.clear();
}
typedef Decl * const *ProtocolQualifierListTy;
@@ -539,7 +548,8 @@ public:
DQ_PR_retain = 0x10,
DQ_PR_copy = 0x20,
DQ_PR_nonatomic = 0x40,
- DQ_PR_setter = 0x80
+ DQ_PR_setter = 0x80,
+ DQ_PR_atomic = 0x100
};
@@ -573,7 +583,7 @@ private:
ObjCDeclQualifier objcDeclQualifier : 6;
// NOTE: VC++ treats enums as signed, avoid using ObjCPropertyAttributeKind
- unsigned PropertyAttributes : 8;
+ unsigned PropertyAttributes : 9;
IdentifierInfo *GetterName; // getter name of NULL if no getter
IdentifierInfo *SetterName; // setter name of NULL if no setter
};
@@ -800,7 +810,7 @@ typedef llvm::SmallVector<Token, 4> CachedTokens;
/// This is intended to be a small value object.
struct DeclaratorChunk {
enum {
- Pointer, Reference, Array, Function, BlockPointer, MemberPointer
+ Pointer, Reference, Array, Function, BlockPointer, MemberPointer, Paren
} Kind;
/// Loc - The place where this type was defined.
@@ -808,27 +818,27 @@ struct DeclaratorChunk {
/// EndLoc - If valid, the place where this chunck ends.
SourceLocation EndLoc;
- struct PointerTypeInfo {
+ struct TypeInfoCommon {
+ AttributeList *AttrList;
+ };
+
+ struct PointerTypeInfo : TypeInfoCommon {
/// The type qualifiers: const/volatile/restrict.
unsigned TypeQuals : 3;
- AttributeList *AttrList;
void destroy() {
- delete AttrList;
}
};
- struct ReferenceTypeInfo {
+ struct ReferenceTypeInfo : TypeInfoCommon {
/// The type qualifier: restrict. [GNU] C++ extension
bool HasRestrict : 1;
/// True if this is an lvalue reference, false if it's an rvalue reference.
bool LValueRef : 1;
- AttributeList *AttrList;
void destroy() {
- delete AttrList;
}
};
- struct ArrayTypeInfo {
+ struct ArrayTypeInfo : TypeInfoCommon {
/// The type qualifiers for the array: const/volatile/restrict.
unsigned TypeQuals : 3;
@@ -842,6 +852,7 @@ struct DeclaratorChunk {
/// Since the parser is multi-purpose, and we don't want to impose a root
/// expression class on all clients, NumElts is untyped.
Expr *NumElts;
+
void destroy() {}
};
@@ -876,29 +887,33 @@ struct DeclaratorChunk {
SourceRange Range;
};
- struct FunctionTypeInfo {
+ struct FunctionTypeInfo : TypeInfoCommon {
/// hasPrototype - This is true if the function had at least one typed
/// argument. If the function is () or (a,b,c), then it has no prototype,
/// and is treated as a K&R-style function.
- bool hasPrototype : 1;
+ unsigned hasPrototype : 1;
/// isVariadic - If this function has a prototype, and if that
/// proto ends with ',...)', this is true. When true, EllipsisLoc
/// contains the location of the ellipsis.
- bool isVariadic : 1;
+ unsigned isVariadic : 1;
+ /// \brief Whether the ref-qualifier (if any) is an lvalue reference.
+ /// Otherwise, it's an rvalue reference.
+ unsigned RefQualifierIsLValueRef : 1;
+
/// The type qualifiers: const/volatile/restrict.
/// The qualifier bitmask values are the same as in QualType.
unsigned TypeQuals : 3;
/// hasExceptionSpec - True if the function has an exception specification.
- bool hasExceptionSpec : 1;
+ unsigned hasExceptionSpec : 1;
/// hasAnyExceptionSpec - True if the function has a throw(...) specifier.
- bool hasAnyExceptionSpec : 1;
+ unsigned hasAnyExceptionSpec : 1;
/// DeleteArgInfo - If this is true, we need to delete[] ArgInfo.
- bool DeleteArgInfo : 1;
+ unsigned DeleteArgInfo : 1;
/// When isVariadic is true, the location of the ellipsis in the source.
unsigned EllipsisLoc;
@@ -911,6 +926,11 @@ struct DeclaratorChunk {
/// the function has one.
unsigned NumExceptions;
+ /// \brief The location of the ref-qualifier, if any.
+ ///
+ /// If this is an invalid location, there is no ref-qualifier.
+ unsigned RefQualifierLoc;
+
/// ThrowLoc - When hasExceptionSpec is true, the location of the throw
/// keyword introducing the spec.
unsigned ThrowLoc;
@@ -925,6 +945,11 @@ struct DeclaratorChunk {
/// specification and their locations.
TypeAndRange *Exceptions;
+ /// TrailingReturnType - If this isn't null, it's the trailing return type
+ /// specified. This is actually a ParsedType, but stored as void* to
+ /// allow union storage.
+ void *TrailingReturnType;
+
/// freeArgs - reset the argument list to having zero arguments. This is
/// used in various places for error recovery.
void freeArgs() {
@@ -954,22 +979,29 @@ struct DeclaratorChunk {
SourceLocation getThrowLoc() const {
return SourceLocation::getFromRawEncoding(ThrowLoc);
}
+
+ /// \brief Retrieve the location of the ref-qualifier, if any.
+ SourceLocation getRefQualifierLoc() const {
+ return SourceLocation::getFromRawEncoding(RefQualifierLoc);
+ }
+
+ /// \brief Determine whether this function declaration contains a
+ /// ref-qualifier.
+ bool hasRefQualifier() const { return getRefQualifierLoc().isValid(); }
};
- struct BlockPointerTypeInfo {
+ struct BlockPointerTypeInfo : TypeInfoCommon {
/// For now, sema will catch these as invalid.
/// The type qualifiers: const/volatile/restrict.
unsigned TypeQuals : 3;
- AttributeList *AttrList;
+
void destroy() {
- delete AttrList;
}
};
- struct MemberPointerTypeInfo {
+ struct MemberPointerTypeInfo : TypeInfoCommon {
/// The type qualifiers: const/volatile/restrict.
unsigned TypeQuals : 3;
- AttributeList *AttrList;
// CXXScopeSpec has a constructor, so it can't be a direct member.
// So we need some pointer-aligned storage and a bit of trickery.
union {
@@ -983,12 +1015,12 @@ struct DeclaratorChunk {
return *reinterpret_cast<const CXXScopeSpec*>(ScopeMem.Mem);
}
void destroy() {
- delete AttrList;
Scope().~CXXScopeSpec();
}
};
union {
+ TypeInfoCommon Common;
PointerTypeInfo Ptr;
ReferenceTypeInfo Ref;
ArrayTypeInfo Arr;
@@ -1006,58 +1038,57 @@ struct DeclaratorChunk {
case DeclaratorChunk::Reference: return Ref.destroy();
case DeclaratorChunk::Array: return Arr.destroy();
case DeclaratorChunk::MemberPointer: return Mem.destroy();
+ case DeclaratorChunk::Paren: return;
}
}
/// getAttrs - If there are attributes applied to this declaratorchunk, return
/// them.
const AttributeList *getAttrs() const {
- switch (Kind) {
- default: assert(0 && "Unknown declarator kind!");
- case Pointer: return Ptr.AttrList;
- case Reference: return Ref.AttrList;
- case MemberPointer: return Mem.AttrList;
- case Array: return 0;
- case Function: return 0;
- case BlockPointer: return Cls.AttrList;
- }
+ return Common.AttrList;
}
+ AttributeList *&getAttrListRef() {
+ return Common.AttrList;
+ }
/// getPointer - Return a DeclaratorChunk for a pointer.
///
static DeclaratorChunk getPointer(unsigned TypeQuals, SourceLocation Loc,
- AttributeList *AL) {
+ const ParsedAttributes &attrs) {
DeclaratorChunk I;
I.Kind = Pointer;
I.Loc = Loc;
I.Ptr.TypeQuals = TypeQuals;
- I.Ptr.AttrList = AL;
+ I.Ptr.AttrList = attrs.getList();
return I;
}
/// getReference - Return a DeclaratorChunk for a reference.
///
static DeclaratorChunk getReference(unsigned TypeQuals, SourceLocation Loc,
- AttributeList *AL, bool lvalue) {
+ const ParsedAttributes &attrs,
+ bool lvalue) {
DeclaratorChunk I;
I.Kind = Reference;
I.Loc = Loc;
I.Ref.HasRestrict = (TypeQuals & DeclSpec::TQ_restrict) != 0;
I.Ref.LValueRef = lvalue;
- I.Ref.AttrList = AL;
+ I.Ref.AttrList = attrs.getList();
return I;
}
/// getArray - Return a DeclaratorChunk for an array.
///
- static DeclaratorChunk getArray(unsigned TypeQuals, bool isStatic,
- bool isStar, Expr *NumElts,
+ static DeclaratorChunk getArray(unsigned TypeQuals,
+ const ParsedAttributes &attrs,
+ bool isStatic, bool isStar, Expr *NumElts,
SourceLocation LBLoc, SourceLocation RBLoc) {
DeclaratorChunk I;
I.Kind = Array;
I.Loc = LBLoc;
I.EndLoc = RBLoc;
+ I.Arr.AttrList = attrs.getList();
I.Arr.TypeQuals = TypeQuals;
I.Arr.hasStatic = isStatic;
I.Arr.isStar = isStar;
@@ -1067,42 +1098,60 @@ struct DeclaratorChunk {
/// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
/// "TheDeclarator" is the declarator that this will be added to.
- static DeclaratorChunk getFunction(bool hasProto, bool isVariadic,
+ static DeclaratorChunk getFunction(const ParsedAttributes &attrs,
+ bool hasProto, bool isVariadic,
SourceLocation EllipsisLoc,
ParamInfo *ArgInfo, unsigned NumArgs,
- unsigned TypeQuals, bool hasExceptionSpec,
+ unsigned TypeQuals,
+ bool RefQualifierIsLvalueRef,
+ SourceLocation RefQualifierLoc,
+ bool hasExceptionSpec,
SourceLocation ThrowLoc,
bool hasAnyExceptionSpec,
ParsedType *Exceptions,
SourceRange *ExceptionRanges,
unsigned NumExceptions,
SourceLocation LPLoc, SourceLocation RPLoc,
- Declarator &TheDeclarator);
+ Declarator &TheDeclarator,
+ ParsedType TrailingReturnType = ParsedType());
/// getBlockPointer - Return a DeclaratorChunk for a block.
///
static DeclaratorChunk getBlockPointer(unsigned TypeQuals, SourceLocation Loc,
- AttributeList *AL) {
+ const ParsedAttributes &attrs) {
DeclaratorChunk I;
I.Kind = BlockPointer;
I.Loc = Loc;
I.Cls.TypeQuals = TypeQuals;
- I.Cls.AttrList = AL;
+ I.Cls.AttrList = attrs.getList();
return I;
}
static DeclaratorChunk getMemberPointer(const CXXScopeSpec &SS,
unsigned TypeQuals,
SourceLocation Loc,
- AttributeList *AL) {
+ const ParsedAttributes &attrs) {
DeclaratorChunk I;
I.Kind = MemberPointer;
I.Loc = Loc;
I.Mem.TypeQuals = TypeQuals;
- I.Mem.AttrList = AL;
+ I.Mem.AttrList = attrs.getList();
new (I.Mem.ScopeMem.Mem) CXXScopeSpec(SS);
return I;
}
+
+ /// getParen - Return a DeclaratorChunk for a paren.
+ ///
+ static DeclaratorChunk getParen(SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ DeclaratorChunk I;
+ I.Kind = Paren;
+ I.Loc = LParenLoc;
+ I.EndLoc = RParenLoc;
+ I.Common.AttrList = 0;
+ return I;
+ }
+
};
/// Declarator - Information about one declarator, including the parsed type
@@ -1128,7 +1177,8 @@ public:
ConditionContext, // Condition declaration in a C++ if/switch/while/for.
TemplateParamContext,// Within a template parameter list.
CXXCatchContext, // C++ catch exception-declaration
- BlockLiteralContext // Block literal declarator.
+ BlockLiteralContext, // Block literal declarator.
+ TemplateTypeArgContext // Template type argument.
};
private:
@@ -1168,6 +1218,10 @@ private:
/// Extension - true if the declaration is preceded by __extension__.
bool Extension : 1;
+ /// \brief If provided, the source location of the ellipsis used to describe
+ /// this declarator as a parameter pack.
+ SourceLocation EllipsisLoc;
+
friend struct DeclaratorChunk;
public:
@@ -1238,7 +1292,6 @@ public:
for (unsigned i = 0, e = DeclTypeInfo.size(); i != e; ++i)
DeclTypeInfo[i].destroy();
DeclTypeInfo.clear();
- delete AttrList;
AttrList = 0;
AsmLabel = 0;
InlineParamsUsed = false;
@@ -1250,14 +1303,15 @@ public:
bool mayOmitIdentifier() const {
return Context == TypeNameContext || Context == PrototypeContext ||
Context == TemplateParamContext || Context == CXXCatchContext ||
- Context == BlockLiteralContext;
+ Context == BlockLiteralContext || Context == TemplateTypeArgContext;
}
/// mayHaveIdentifier - Return true if the identifier is either optional or
/// required. This is true for normal declarators and prototypes, but not
/// typenames.
bool mayHaveIdentifier() const {
- return Context != TypeNameContext && Context != BlockLiteralContext;
+ return Context != TypeNameContext && Context != BlockLiteralContext &&
+ Context != TemplateTypeArgContext;
}
/// mayBeFollowedByCXXDirectInit - Return true if the declarator can be
@@ -1301,6 +1355,11 @@ public:
SetRangeEnd(EndLoc);
}
+ /// AddInnermostTypeInfo - Add a new innermost chunk to this declarator.
+ void AddInnermostTypeInfo(const DeclaratorChunk &TI) {
+ DeclTypeInfo.insert(DeclTypeInfo.begin(), TI);
+ }
+
/// getNumTypeObjects() - Return the number of types applied to this
/// declarator.
unsigned getNumTypeObjects() const { return DeclTypeInfo.size(); }
@@ -1323,11 +1382,52 @@ public:
DeclTypeInfo.erase(DeclTypeInfo.begin());
}
+ /// isFunctionDeclarator - This method returns true if the declarator
+ /// is a function declarator (looking through parentheses).
+ /// If true is returned, then the reference type parameter idx is
+ /// assigned with the index of the declaration chunk.
+ bool isFunctionDeclarator(unsigned& idx) const {
+ for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) {
+ switch (DeclTypeInfo[i].Kind) {
+ case DeclaratorChunk::Function:
+ idx = i;
+ return true;
+ case DeclaratorChunk::Paren:
+ continue;
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ return false;
+ }
+ llvm_unreachable("Invalid type chunk");
+ return false;
+ }
+ return false;
+ }
+
/// isFunctionDeclarator - Once this declarator is fully parsed and formed,
- /// this method returns true if the identifier is a function declarator.
+ /// this method returns true if the identifier is a function declarator
+ /// (looking through parentheses).
bool isFunctionDeclarator() const {
- return !DeclTypeInfo.empty() &&
- DeclTypeInfo[0].Kind == DeclaratorChunk::Function;
+ unsigned index;
+ return isFunctionDeclarator(index);
+ }
+
+ /// getFunctionTypeInfo - Retrieves the function type info object
+ /// (looking through parentheses).
+ DeclaratorChunk::FunctionTypeInfo &getFunctionTypeInfo() {
+ assert(isFunctionDeclarator() && "Not a function declarator!");
+ unsigned index = 0;
+ isFunctionDeclarator(index);
+ return DeclTypeInfo[index].Fun;
+ }
+
+ /// getFunctionTypeInfo - Retrieves the function type info object
+ /// (looking through parentheses).
+ const DeclaratorChunk::FunctionTypeInfo &getFunctionTypeInfo() const {
+ return const_cast<Declarator*>(this)->getFunctionTypeInfo();
}
/// AddAttributes - simply adds the attribute list to the Declarator.
@@ -1337,19 +1437,25 @@ public:
/// __attribute__((common,deprecated));
///
/// Also extends the range of the declarator.
- void AddAttributes(AttributeList *alist, SourceLocation LastLoc) {
+ void addAttributes(AttributeList *alist, SourceLocation LastLoc) {
AttrList = addAttributeLists(AttrList, alist);
if (!LastLoc.isInvalid())
SetRangeEnd(LastLoc);
}
+ void addAttributes(const ParsedAttributes &attrs) {
+ addAttributes(attrs.getList(), SourceLocation());
+ }
+
const AttributeList *getAttributes() const { return AttrList; }
AttributeList *getAttributes() { return AttrList; }
+ AttributeList *&getAttrListRef() { return AttrList; }
+
/// hasAttributes - do we contain any attributes?
bool hasAttributes() const {
- if (getAttributes() || getDeclSpec().getAttributes()) return true;
+ if (getAttributes() || getDeclSpec().hasAttributes()) return true;
for (unsigned i = 0, e = getNumTypeObjects(); i != e; ++i)
if (getTypeObject(i).getAttrs())
return true;
@@ -1369,6 +1475,10 @@ public:
void setGroupingParens(bool flag) { GroupingParens = flag; }
bool hasGroupingParens() const { return GroupingParens; }
+
+ bool hasEllipsis() const { return EllipsisLoc.isValid(); }
+ SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
+ void setEllipsisLoc(SourceLocation EL) { EllipsisLoc = EL; }
};
/// FieldDeclarator - This little struct is used to capture information about
@@ -1380,7 +1490,69 @@ struct FieldDeclarator {
BitfieldSize = 0;
}
};
-
+
+/// VirtSpecifiers - Represents a C++0x virt-specifier-seq.
+class VirtSpecifiers {
+public:
+ enum Specifier {
+ VS_None = 0,
+ VS_Override = 1,
+ VS_Final = 2,
+ VS_New = 4
+ };
+
+ VirtSpecifiers() : Specifiers(0) { }
+
+ bool SetSpecifier(Specifier VS, SourceLocation Loc,
+ const char *&PrevSpec);
+
+ bool isOverrideSpecified() const { return Specifiers & VS_Override; }
+ SourceLocation getOverrideLoc() const { return VS_overrideLoc; }
+
+ bool isFinalSpecified() const { return Specifiers & VS_Final; }
+ SourceLocation getFinalLoc() const { return VS_finalLoc; }
+
+ bool isNewSpecified() const { return Specifiers & VS_New; }
+ SourceLocation getNewLoc() const { return VS_newLoc; }
+
+ void clear() { Specifiers = 0; }
+
+ static const char *getSpecifierName(Specifier VS);
+
+private:
+ unsigned Specifiers;
+
+ SourceLocation VS_overrideLoc, VS_finalLoc, VS_newLoc;
+};
+
+/// ClassVirtSpecifiers - Represents a C++0x class-virt-specifier-seq.
+class ClassVirtSpecifiers {
+public:
+ enum Specifier {
+ CVS_None = 0,
+ CVS_Final = 1,
+ CVS_Explicit = 2
+ };
+
+ ClassVirtSpecifiers() : Specifiers(0) { }
+
+ bool SetSpecifier(Specifier CVS, SourceLocation Loc,
+ const char *&PrevSpec);
+
+ bool isFinalSpecified() const { return Specifiers & CVS_Final; }
+ SourceLocation getFinalLoc() const { return CVS_finalLoc; }
+
+ bool isExplicitSpecified() const { return Specifiers & CVS_Explicit; }
+ SourceLocation getExplicitLoc() const { return CVS_explicitLoc; }
+
+ static const char *getSpecifierName(Specifier CVS);
+
+private:
+ unsigned Specifiers;
+
+ SourceLocation CVS_finalLoc, CVS_explicitLoc;
+};
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
index 6a9a1bf..6e808de 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
@@ -101,7 +101,7 @@ public:
private:
unsigned Access : 2;
- bool IsMember;
+ unsigned IsMember : 1;
NamedDecl *Target;
CXXRecordDecl *NamingClass;
QualType BaseObjectType;
@@ -119,14 +119,6 @@ public:
SourceLocation Loc;
- union {
- /// Deprecation.
- struct { NamedDecl *Decl; } DeprecationData;
-
- /// Access control.
- char AccessData[sizeof(AccessedEntity)];
- };
-
void destroy() {
switch (Kind) {
case Access: getAccessData().~AccessedEntity(); break;
@@ -135,12 +127,15 @@ public:
}
static DelayedDiagnostic makeDeprecation(SourceLocation Loc,
- NamedDecl *D) {
+ const NamedDecl *D,
+ llvm::StringRef Msg) {
DelayedDiagnostic DD;
DD.Kind = Deprecation;
DD.Triggered = false;
DD.Loc = Loc;
DD.DeprecationData.Decl = D;
+ DD.DeprecationData.Message = Msg.data();
+ DD.DeprecationData.MessageLen = Msg.size();
return DD;
}
@@ -155,11 +150,37 @@ public:
}
AccessedEntity &getAccessData() {
+ assert(Kind == Access && "Not an access diagnostic.");
return *reinterpret_cast<AccessedEntity*>(AccessData);
}
const AccessedEntity &getAccessData() const {
+ assert(Kind == Access && "Not an access diagnostic.");
return *reinterpret_cast<const AccessedEntity*>(AccessData);
}
+
+ const NamedDecl *getDeprecationDecl() const {
+ assert(Kind == Deprecation && "Not a deprecation diagnostic.");
+ return DeprecationData.Decl;
+ }
+
+ llvm::StringRef getDeprecationMessage() const {
+ assert(Kind == Deprecation && "Not a deprecation diagnostic.");
+ return llvm::StringRef(DeprecationData.Message,
+ DeprecationData.MessageLen);
+ }
+
+private:
+ union {
+ /// Deprecation.
+ struct {
+ const NamedDecl *Decl;
+ const char *Message;
+ size_t MessageLen;
+ } DeprecationData;
+
+ /// Access control.
+ char AccessData[sizeof(AccessedEntity)];
+ };
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
index 7be0033..6d7bc63 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
@@ -14,10 +14,11 @@
#define LLVM_CLANG_SEMA_EXTERNAL_SEMA_SOURCE_H
#include "clang/AST/ExternalASTSource.h"
-#include "clang/Sema/ObjCMethodList.h"
+#include <utility>
namespace clang {
+struct ObjCMethodList;
class Sema;
/// \brief An abstract interface that should be implemented by
@@ -44,10 +45,7 @@ public:
///
/// \returns a pair of Objective-C methods lists containing the
/// instance and factory methods, respectively, with this selector.
- virtual std::pair<ObjCMethodList, ObjCMethodList>
- ReadMethodPool(Selector Sel) {
- return std::pair<ObjCMethodList, ObjCMethodList>();
- }
+ virtual std::pair<ObjCMethodList,ObjCMethodList> ReadMethodPool(Selector Sel);
// isa/cast/dyn_cast support
static bool classof(const ExternalASTSource *Source) {
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
index 0062b3a..c191565 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
@@ -88,6 +88,10 @@ private:
/// the VarDecl, ParmVarDecl, or FieldDecl, respectively.
DeclaratorDecl *VariableOrMember;
+ /// \brief When Kind == EK_Temporary, the type source information for
+ /// the temporary.
+ TypeSourceInfo *TypeInfo;
+
struct {
/// \brief When Kind == EK_Result, EK_Exception, or EK_New, the
/// location of the 'return', 'throw', or 'new' keyword,
@@ -114,12 +118,12 @@ private:
/// \brief Create the initialization entity for a variable.
InitializedEntity(VarDecl *Var)
: Kind(EK_Variable), Parent(0), Type(Var->getType()),
- VariableOrMember(reinterpret_cast<DeclaratorDecl*>(Var)) { }
+ VariableOrMember(Var) { }
/// \brief Create the initialization entity for a parameter.
InitializedEntity(ParmVarDecl *Parm)
: Kind(EK_Parameter), Parent(0), Type(Parm->getType().getUnqualifiedType()),
- VariableOrMember(reinterpret_cast<DeclaratorDecl*>(Parm)) { }
+ VariableOrMember(Parm) { }
/// \brief Create the initialization entity for the result of a
/// function, throwing an object, performing an explicit cast, or
@@ -135,7 +139,7 @@ private:
/// \brief Create the initialization entity for a member subobject.
InitializedEntity(FieldDecl *Member, const InitializedEntity *Parent)
: Kind(EK_Member), Parent(Parent), Type(Member->getType()),
- VariableOrMember(reinterpret_cast<DeclaratorDecl*>(Member)) { }
+ VariableOrMember(Member) { }
/// \brief Create the initialization entity for an array element.
InitializedEntity(ASTContext &Context, unsigned Index,
@@ -148,16 +152,20 @@ public:
}
/// \brief Create the initialization entity for a parameter.
- static InitializedEntity InitializeParameter(ParmVarDecl *Parm) {
- return InitializedEntity(Parm);
+ static InitializedEntity InitializeParameter(ASTContext &Context,
+ ParmVarDecl *Parm) {
+ InitializedEntity Res(Parm);
+ Res.Type = Context.getVariableArrayDecayedType(Res.Type);
+ return Res;
}
/// \brief Create the initialization entity for a parameter that is
/// only known by its type.
- static InitializedEntity InitializeParameter(QualType Type) {
+ static InitializedEntity InitializeParameter(ASTContext &Context,
+ QualType Type) {
InitializedEntity Entity;
Entity.Kind = EK_Parameter;
- Entity.Type = Type;
+ Entity.Type = Context.getVariableArrayDecayedType(Type);
Entity.Parent = 0;
Entity.VariableOrMember = 0;
return Entity;
@@ -189,7 +197,15 @@ public:
static InitializedEntity InitializeTemporary(QualType Type) {
return InitializedEntity(EK_Temporary, SourceLocation(), Type);
}
-
+
+ /// \brief Create the initialization entity for a temporary.
+ static InitializedEntity InitializeTemporary(TypeSourceInfo *TypeInfo) {
+ InitializedEntity Result(EK_Temporary, SourceLocation(),
+ TypeInfo->getType());
+ Result.TypeInfo = TypeInfo;
+ return Result;
+ }
+
/// \brief Create the initialization entity for a base class subobject.
static InitializedEntity InitializeBase(ASTContext &Context,
CXXBaseSpecifier *Base,
@@ -201,6 +217,12 @@ public:
return InitializedEntity(Member, Parent);
}
+ /// \brief Create the initialization entity for a member subobject.
+ static InitializedEntity InitializeMember(IndirectFieldDecl *Member,
+ const InitializedEntity *Parent = 0) {
+ return InitializedEntity(Member->getAnonField(), Parent);
+ }
+
/// \brief Create the initialization entity for an array element.
static InitializedEntity InitializeElement(ASTContext &Context,
unsigned Index,
@@ -219,6 +241,15 @@ public:
/// \brief Retrieve type being initialized.
QualType getType() const { return Type; }
+ /// \brief Retrieve complete type-source information for the object being
+ /// constructed, if known.
+ TypeSourceInfo *getTypeSourceInfo() const {
+ if (Kind == EK_Temporary)
+ return TypeInfo;
+
+ return 0;
+ }
+
/// \brief Retrieve the name of the entity being initialized.
DeclarationName getName() const;
@@ -760,12 +791,18 @@ public:
return FailedCandidateSet;
}
+ /// brief Get the overloading result, for when the initialization
+ /// sequence failed due to a bad overload.
+ OverloadingResult getFailedOverloadResult() const {
+ return FailedOverloadResult;
+ }
+
/// \brief Determine why initialization failed.
FailureKind getFailureKind() const {
assert(getKind() == FailedSequence && "Not an initialization failure!");
return Failure;
}
-
+
/// \brief Dump a representation of this initialization sequence to
/// the given stream, for debugging purposes.
void dump(llvm::raw_ostream &OS) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h b/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h
index 1c7720a..aa58d14 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h
@@ -365,6 +365,11 @@ public:
if (Decls.empty()) {
if (ResultKind != NotFoundInCurrentInstantiation)
ResultKind = NotFound;
+
+ if (Paths) {
+ deletePaths(Paths);
+ Paths = 0;
+ }
} else {
AmbiguityKind SavedAK = Ambiguity;
ResultKind = Found;
@@ -492,25 +497,18 @@ public:
LookupResult &Results;
LookupResult::iterator I;
bool Changed;
-#ifndef NDEBUG
bool CalledDone;
-#endif
friend class LookupResult;
Filter(LookupResult &Results)
- : Results(Results), I(Results.begin()), Changed(false)
-#ifndef NDEBUG
- , CalledDone(false)
-#endif
+ : Results(Results), I(Results.begin()), Changed(false), CalledDone(false)
{}
public:
-#ifndef NDEBUG
~Filter() {
assert(CalledDone &&
"LookupResult::Filter destroyed without done() call");
}
-#endif
bool hasNext() const {
return I != Results.end();
@@ -541,10 +539,8 @@ public:
}
void done() {
-#ifndef NDEBUG
assert(!CalledDone && "done() called twice");
CalledDone = true;
-#endif
if (Changed)
Results.resolveKindAfterFilter();
@@ -573,11 +569,7 @@ private:
void configure();
// Sanity checks.
-#ifndef NDEBUG
void sanity() const;
-#else
- void sanity() const {}
-#endif
bool sanityCheckUnresolved() const {
for (iterator I = begin(), E = end(); I != E; ++I)
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Overload.h b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
index 851d68a..3ce3513 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
@@ -75,6 +75,7 @@ namespace clang {
ICK_Vector_Conversion, ///< Vector conversions
ICK_Vector_Splat, ///< A vector splat from an arithmetic type
ICK_Complex_Real, ///< Complex-real conversions (C99 6.3.1.7)
+ ICK_Block_Pointer_Conversion, ///< Block Pointer conversions
ICK_Num_Conversion_Kinds ///< The number of conversion kinds
};
@@ -130,27 +131,37 @@ namespace clang {
/// Third - The third conversion can be a qualification conversion.
ImplicitConversionKind Third : 8;
- /// Deprecated - Whether this the deprecated conversion of a
+ /// \brief Whether this is the deprecated conversion of a
/// string literal to a pointer to non-const character data
/// (C++ 4.2p2).
- bool DeprecatedStringLiteralToCharPtr : 1;
+ unsigned DeprecatedStringLiteralToCharPtr : 1;
/// IncompatibleObjC - Whether this is an Objective-C conversion
/// that we should warn about (if we actually use it).
- bool IncompatibleObjC : 1;
+ unsigned IncompatibleObjC : 1;
/// ReferenceBinding - True when this is a reference binding
/// (C++ [over.ics.ref]).
- bool ReferenceBinding : 1;
+ unsigned ReferenceBinding : 1;
/// DirectBinding - True when this is a reference binding that is a
/// direct binding (C++ [dcl.init.ref]).
- bool DirectBinding : 1;
-
- /// RRefBinding - True when this is a reference binding of an rvalue
- /// reference to an rvalue (C++0x [over.ics.rank]p3b4).
- bool RRefBinding : 1;
+ unsigned DirectBinding : 1;
+ /// \brief Whether this is an lvalue reference binding (otherwise, it's
+ /// an rvalue reference binding).
+ unsigned IsLvalueReference : 1;
+
+ /// \brief Whether we're binding to a function lvalue.
+ unsigned BindsToFunctionLvalue : 1;
+
+ /// \brief Whether we're binding to an rvalue.
+ unsigned BindsToRvalue : 1;
+
+ /// \brief Whether this binds an implicit object argument to a
+ /// non-static member function without a ref-qualifier.
+ unsigned BindsImplicitObjectArgumentWithoutRefQualifier : 1;
+
/// FromType - The type that this conversion is converting
/// from. This is an opaque pointer that can be translated into a
/// QualType.
@@ -231,6 +242,11 @@ namespace clang {
/// user-defined conversion.
FunctionDecl* ConversionFunction;
+ /// \brief The declaration that we found via name lookup, which might be
+ /// the same as \c ConversionFunction or it might be a using declaration
+ /// that refers to \c ConversionFunction.
+ NamedDecl *FoundConversionFunction;
+
void DebugPrint() const;
};
@@ -283,7 +299,9 @@ namespace clang {
no_conversion,
unrelated_class,
suppressed_user,
- bad_qualifiers
+ bad_qualifiers,
+ lvalue_ref_to_rvalue,
+ rvalue_ref_to_lvalue
};
// This can be null, e.g. for implicit object arguments.
@@ -549,6 +567,10 @@ namespace clang {
/// Actually an OverloadFailureKind.
unsigned char FailureKind;
+ /// \brief The number of call arguments that were explicitly provided,
+ /// to be used while performing partial ordering of function templates.
+ unsigned ExplicitCallArguments;
+
/// A structure used to record information about a failed
/// template argument deduction.
struct DeductionFailureInfo {
@@ -630,7 +652,8 @@ namespace clang {
/// Find the best viable function on this overload set, if it exists.
OverloadingResult BestViableFunction(Sema &S, SourceLocation Loc,
- OverloadCandidateSet::iterator& Best);
+ OverloadCandidateSet::iterator& Best,
+ bool UserDefinedConversion = false);
void NoteCandidates(Sema &S,
OverloadCandidateDisplayKind OCD,
@@ -642,7 +665,8 @@ namespace clang {
bool isBetterOverloadCandidate(Sema &S,
const OverloadCandidate& Cand1,
const OverloadCandidate& Cand2,
- SourceLocation Loc);
+ SourceLocation Loc,
+ bool UserDefinedConversion = false);
} // end namespace clang
#endif // LLVM_CLANG_SEMA_OVERLOAD_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h b/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h
index 7739f3a..5f2f0eb 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h
@@ -23,7 +23,7 @@
namespace clang {
class Attr;
- class CXXBaseOrMemberInitializer;
+ class CXXCtorInitializer;
class CXXBaseSpecifier;
class Decl;
class DeclGroupRef;
@@ -106,6 +106,9 @@ namespace llvm {
}
enum { NumLowBitsAvailable = 0 };
};
+
+ template <class T>
+ struct isPodLike<clang::OpaquePtr<T> > { static const bool value = true; };
}
@@ -414,7 +417,7 @@ namespace clang {
template<> struct IsResultPtrLowBitFree<CXXBaseSpecifier*> {
static const bool value = true;
};
- template<> struct IsResultPtrLowBitFree<CXXBaseOrMemberInitializer*> {
+ template<> struct IsResultPtrLowBitFree<CXXCtorInitializer*> {
static const bool value = true;
};
@@ -427,7 +430,7 @@ namespace clang {
typedef ActionResult<Stmt*> StmtResult;
typedef ActionResult<ParsedType> TypeResult;
typedef ActionResult<CXXBaseSpecifier*> BaseResult;
- typedef ActionResult<CXXBaseOrMemberInitializer*> MemInitResult;
+ typedef ActionResult<CXXCtorInitializer*> MemInitResult;
typedef ActionResult<Decl*> DeclResult;
typedef OpaquePtr<TemplateName> ParsedTemplateTy;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
index da68a49..5aa6f47 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
@@ -32,7 +32,9 @@ namespace clang {
Template
};
- /// \brief Build an empty template argument. This template argument
+ /// \brief Build an empty template argument.
+ ///
+ /// This template argument is invalid.
ParsedTemplateArgument() : Kind(Type), Arg(0) { }
/// \brief Create a template type argument or non-type template argument.
@@ -56,7 +58,7 @@ namespace clang {
SourceLocation TemplateLoc)
: Kind(ParsedTemplateArgument::Template),
Arg(Template.getAsOpaquePtr()),
- Loc(TemplateLoc), SS(SS) { }
+ Loc(TemplateLoc), SS(SS), EllipsisLoc() { }
/// \brief Determine whether the given template argument is invalid.
bool isInvalid() const { return Arg == 0; }
@@ -93,6 +95,21 @@ namespace clang {
return SS;
}
+ /// \brief Retrieve the location of the ellipsis that makes a template
+ /// template argument into a pack expansion.
+ SourceLocation getEllipsisLoc() const {
+ assert(Kind == Template &&
+ "Only template template arguments can have an ellipsis");
+ return EllipsisLoc;
+ }
+
+ /// \brief Retrieve a pack expansion of the given template template
+ /// argument.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ ParsedTemplateArgument getTemplatePackExpansion(
+ SourceLocation EllipsisLoc) const;
+
private:
KindType Kind;
@@ -107,6 +124,10 @@ namespace clang {
/// \brief The nested-name-specifier that can accompany a template template
/// argument.
CXXScopeSpec SS;
+
+ /// \brief The ellipsis location that can accompany a template template
+ /// argument (turning it into a template template argument expansion).
+ SourceLocation EllipsisLoc;
};
/// \brief Information about a template-id annotation
@@ -161,7 +182,10 @@ namespace clang {
void Destroy() { free(this); }
};
-
+
+ /// Retrieves the range of the given template parameter lists.
+ SourceRange getTemplateParamsRange(TemplateParameterList const *const *Params,
+ unsigned NumParams);
inline const ParsedTemplateArgument &
ASTTemplateArgsPtr::operator[](unsigned Arg) const {
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Scope.h b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
index 4229c6c..d7fda35 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_SEMA_SCOPE_H
#define LLVM_CLANG_SEMA_SCOPE_H
+#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/SmallPtrSet.h"
namespace clang {
@@ -52,7 +53,7 @@ public:
/// ClassScope - The scope of a struct/union/class definition.
ClassScope = 0x20,
- /// BlockScope - This is a scope that corresponds to a block object.
+ /// BlockScope - This is a scope that corresponds to a block/closure object.
/// Blocks serve as top-level scopes for some objects like labels, they
/// also prevent things like break and continue. BlockScopes always have
/// the FnScope, BreakScope, ContinueScope, and DeclScope flags set as well.
@@ -131,11 +132,12 @@ private:
typedef llvm::SmallVector<UsingDirectiveDecl *, 2> UsingDirectivesTy;
UsingDirectivesTy UsingDirectives;
- /// \brief The number of errors at the start of the given scope.
- unsigned NumErrorsAtStart;
+ /// \brief Used to determine if errors occurred in this scope.
+ DiagnosticErrorTrap ErrorTrap;
public:
- Scope(Scope *Parent, unsigned ScopeFlags) {
+ Scope(Scope *Parent, unsigned ScopeFlags, Diagnostic &Diag)
+ : ErrorTrap(Diag) {
Init(Parent, ScopeFlags);
}
@@ -144,8 +146,7 @@ public:
unsigned getFlags() const { return Flags; }
void setFlags(unsigned F) { Flags = F; }
- /// isBlockScope - Return true if this scope does not correspond to a
- /// closure.
+ /// isBlockScope - Return true if this scope correspond to a closure.
bool isBlockScope() const { return Flags & BlockScope; }
/// getParent - Return the scope that this is nested in.
@@ -214,13 +215,7 @@ public:
void* getEntity() const { return Entity; }
void setEntity(void *E) { Entity = E; }
- /// \brief Retrieve the number of errors that had been emitted when we
- /// entered this scope.
- unsigned getNumErrorsAtStart() const { return NumErrorsAtStart; }
-
- void setNumErrorsAtStart(unsigned NumErrors) {
- NumErrorsAtStart = NumErrors;
- }
+ bool hasErrorOccurred() const { return ErrorTrap.hasErrorOccurred(); }
/// isClassScope - Return true if this scope is a class/struct/union scope.
bool isClassScope() const {
@@ -318,7 +313,7 @@ public:
DeclsInScope.clear();
UsingDirectives.clear();
Entity = 0;
- NumErrorsAtStart = 0;
+ ErrorTrap.reset();
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
index 50cfa9b..b0bb955 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
@@ -17,12 +17,13 @@
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SetVector.h"
namespace clang {
class BlockDecl;
class IdentifierInfo;
-class LabelStmt;
+class LabelDecl;
class ReturnStmt;
class Scope;
class SwitchStmt;
@@ -48,14 +49,8 @@ public:
/// \brief Whether this function contains any indirect gotos.
bool HasIndirectGoto;
- /// \brief The number of errors that had occurred before starting this
- /// function or block.
- unsigned NumErrorsAtStartOfFunction;
-
- /// LabelMap - This is a mapping from label identifiers to the LabelStmt for
- /// it (which acts like the label decl in some ways). Forward referenced
- /// labels have a LabelStmt created for them with a null location & SubStmt.
- llvm::DenseMap<IdentifierInfo*, LabelStmt*> LabelMap;
+ /// \brief Used to determine if errors occurred in this function or block.
+ DiagnosticErrorTrap ErrorTrap;
/// SwitchStack - This is the current set of active switch statements in the
/// block.
@@ -64,7 +59,7 @@ public:
/// \brief The list of return statements that occur within the function or
/// block, if there is any chance of applying the named return value
/// optimization.
- llvm::SmallVector<ReturnStmt *, 4> Returns;
+ llvm::SmallVector<ReturnStmt*, 4> Returns;
void setHasBranchIntoScope() {
HasBranchIntoScope = true;
@@ -83,18 +78,18 @@ public:
(HasBranchProtectedScope && HasBranchIntoScope);
}
- FunctionScopeInfo(unsigned NumErrors)
+ FunctionScopeInfo(Diagnostic &Diag)
: IsBlockInfo(false),
HasBranchProtectedScope(false),
HasBranchIntoScope(false),
HasIndirectGoto(false),
- NumErrorsAtStartOfFunction(NumErrors) { }
+ ErrorTrap(Diag) { }
virtual ~FunctionScopeInfo();
/// \brief Clear out the information in this function scope, making it
/// suitable for reuse.
- void Clear(unsigned NumErrors);
+ void Clear();
static bool classof(const FunctionScopeInfo *FSI) { return true; }
};
@@ -102,8 +97,6 @@ public:
/// \brief Retains information about a block that is currently being parsed.
class BlockScopeInfo : public FunctionScopeInfo {
public:
- bool hasBlockDeclRefExprs;
-
BlockDecl *TheDecl;
/// TheScope - This is the scope for the block itself, which contains
@@ -118,9 +111,18 @@ public:
/// Its return type may be BuiltinType::Dependent.
QualType FunctionType;
- BlockScopeInfo(unsigned NumErrors, Scope *BlockScope, BlockDecl *Block)
- : FunctionScopeInfo(NumErrors), hasBlockDeclRefExprs(false),
- TheDecl(Block), TheScope(BlockScope)
+ /// CaptureMap - A map of captured variables to (index+1) into Captures.
+ llvm::DenseMap<VarDecl*, unsigned> CaptureMap;
+
+ /// Captures - The captured variables.
+ llvm::SmallVector<BlockDecl::Capture, 4> Captures;
+
+ /// CapturesCXXThis - Whether this block captures 'this'.
+ bool CapturesCXXThis;
+
+ BlockScopeInfo(Diagnostic &Diag, Scope *BlockScope, BlockDecl *Block)
+ : FunctionScopeInfo(Diag), TheDecl(Block), TheScope(BlockScope),
+ CapturesCXXThis(false)
{
IsBlockInfo = true;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
index 4741028..91d6914 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
@@ -20,9 +20,10 @@
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/DeclSpec.h"
-#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
@@ -62,6 +63,7 @@ namespace clang {
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class CodeCompleteConsumer;
+ class CodeCompletionAllocator;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
@@ -78,7 +80,6 @@ namespace clang {
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
- class FullExpr;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
@@ -121,7 +122,6 @@ namespace clang {
class TargetAttributesSema;
class TemplateArgument;
class TemplateArgumentList;
- class TemplateArgumentListBuilder;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
@@ -129,6 +129,7 @@ namespace clang {
class TemplateTemplateParmDecl;
class Token;
class TypedefDecl;
+ class TypeLoc;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
@@ -140,7 +141,8 @@ namespace clang {
class VarDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
-
+ class IndirectFieldDecl;
+
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
@@ -165,7 +167,10 @@ class LocInfoType : public Type {
TypeSourceInfo *DeclInfo;
LocInfoType(QualType ty, TypeSourceInfo *TInfo)
- : Type((TypeClass)LocInfo, ty, ty->isDependentType()), DeclInfo(TInfo) {
+ : Type((TypeClass)LocInfo, ty, ty->isDependentType(),
+ ty->isVariablyModifiedType(),
+ ty->containsUnexpandedParameterPack()),
+ DeclInfo(TInfo) {
assert(getTypeClass() == (TypeClass)LocInfo && "LocInfo didn't fit in TC?");
}
friend class Sema;
@@ -183,6 +188,11 @@ public:
static bool classof(const LocInfoType *) { return true; }
};
+// FIXME: No way to easily map from TemplateTypeParmTypes to
+// TemplateTypeParmDecls, so we have this horrible PointerUnion.
+typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
+ SourceLocation> UnexpandedParameterPack;
+
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema&); // DO NOT IMPLEMENT
@@ -194,12 +204,15 @@ public:
typedef OpaquePtr<QualType> TypeTy;
typedef Attr AttrTy;
typedef CXXBaseSpecifier BaseTy;
- typedef CXXBaseOrMemberInitializer MemInitTy;
+ typedef CXXCtorInitializer MemInitTy;
typedef Expr ExprTy;
typedef Stmt StmtTy;
typedef TemplateParameterList TemplateParamsTy;
typedef NestedNameSpecifier CXXScopeTy;
+ OpenCLOptions OpenCLFeatures;
+ FPOptions FPFeatures;
+
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
@@ -220,30 +233,6 @@ public:
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
- /// A RAII object to temporarily push a declaration context.
- class ContextRAII {
- private:
- Sema &S;
- DeclContext *SavedContext;
-
- public:
- ContextRAII(Sema &S, DeclContext *ContextToPush)
- : S(S), SavedContext(S.CurContext) {
- assert(ContextToPush && "pushing null context");
- S.CurContext = ContextToPush;
- }
-
- void pop() {
- if (!SavedContext) return;
- S.CurContext = SavedContext;
- SavedContext = 0;
- }
-
- ~ContextRAII() {
- pop();
- }
- };
-
/// PackContext - Manages the stack for #pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
@@ -312,14 +301,125 @@ public:
/// and must warn if not used. Only contains the first declaration.
llvm::SmallVector<const DeclaratorDecl*, 4> UnusedFileScopedDecls;
- /// \brief The stack of diagnostics that were delayed due to being
- /// produced during the parsing of a declaration.
- llvm::SmallVector<sema::DelayedDiagnostic, 0> DelayedDiagnostics;
+ class DelayedDiagnostics;
+
+ class ParsingDeclState {
+ unsigned SavedStackSize;
+ friend class Sema::DelayedDiagnostics;
+ };
+
+ class ProcessingContextState {
+ unsigned SavedParsingDepth;
+ unsigned SavedActiveStackBase;
+ friend class Sema::DelayedDiagnostics;
+ };
+
+ /// A class which encapsulates the logic for delaying diagnostics
+ /// during parsing and other processing.
+ class DelayedDiagnostics {
+ /// \brief The stack of diagnostics that were delayed due to being
+ /// produced during the parsing of a declaration.
+ sema::DelayedDiagnostic *Stack;
+
+ /// \brief The number of objects on the delayed-diagnostics stack.
+ unsigned StackSize;
+
+ /// \brief The current capacity of the delayed-diagnostics stack.
+ unsigned StackCapacity;
+
+ /// \brief The index of the first "active" delayed diagnostic in
+ /// the stack. When parsing class definitions, we ignore active
+ /// delayed diagnostics from the surrounding context.
+ unsigned ActiveStackBase;
+
+ /// \brief The depth of the declarations we're currently parsing.
+ /// This gets saved and reset whenever we enter a class definition.
+ unsigned ParsingDepth;
+
+ public:
+ DelayedDiagnostics() : Stack(0), StackSize(0), StackCapacity(0),
+ ActiveStackBase(0), ParsingDepth(0) {}
+
+ ~DelayedDiagnostics() {
+ delete[] reinterpret_cast<char*>(Stack);
+ }
- /// \brief The depth of the current ParsingDeclaration stack.
- /// If nonzero, we are currently parsing a declaration (and
- /// hence should delay deprecation warnings).
- unsigned ParsingDeclDepth;
+ /// Adds a delayed diagnostic.
+ void add(const sema::DelayedDiagnostic &diag);
+
+ /// Determines whether diagnostics should be delayed.
+ bool shouldDelayDiagnostics() { return ParsingDepth > 0; }
+
+ /// Observe that we've started parsing a declaration. Access and
+ /// deprecation diagnostics will be delayed; when the declaration
+ /// is completed, all active delayed diagnostics will be evaluated
+ /// in its context, and then active diagnostics stack will be
+ /// popped down to the saved depth.
+ ParsingDeclState pushParsingDecl() {
+ ParsingDepth++;
+
+ ParsingDeclState state;
+ state.SavedStackSize = StackSize;
+ return state;
+ }
+
+ /// Observe that we're completed parsing a declaration.
+ static void popParsingDecl(Sema &S, ParsingDeclState state, Decl *decl);
+
+ /// Observe that we've started processing a different context, the
+ /// contents of which are semantically separate from the
+ /// declarations it may lexically appear in. This sets aside the
+ /// current stack of active diagnostics and starts afresh.
+ ProcessingContextState pushContext() {
+ assert(StackSize >= ActiveStackBase);
+
+ ProcessingContextState state;
+ state.SavedParsingDepth = ParsingDepth;
+ state.SavedActiveStackBase = ActiveStackBase;
+
+ ActiveStackBase = StackSize;
+ ParsingDepth = 0;
+
+ return state;
+ }
+
+ /// Observe that we've stopped processing a context. This
+ /// restores the previous stack of active diagnostics.
+ void popContext(ProcessingContextState state) {
+ assert(ActiveStackBase == StackSize);
+ assert(ParsingDepth == 0);
+ ActiveStackBase = state.SavedActiveStackBase;
+ ParsingDepth = state.SavedParsingDepth;
+ }
+ } DelayedDiagnostics;
+
+ /// A RAII object to temporarily push a declaration context.
+ class ContextRAII {
+ private:
+ Sema &S;
+ DeclContext *SavedContext;
+ ProcessingContextState SavedContextState;
+
+ public:
+ ContextRAII(Sema &S, DeclContext *ContextToPush)
+ : S(S), SavedContext(S.CurContext),
+ SavedContextState(S.DelayedDiagnostics.pushContext())
+ {
+ assert(ContextToPush && "pushing null context");
+ S.CurContext = ContextToPush;
+ }
+
+ void pop() {
+ if (!SavedContext) return;
+ S.CurContext = SavedContext;
+ S.DelayedDiagnostics.popContext(SavedContextState);
+ SavedContext = 0;
+ }
+
+ ~ContextRAII() {
+ pop();
+ }
+ };
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// #pragma weak before declared. rare. may alias another
@@ -365,6 +465,12 @@ public:
/// standard library.
LazyDeclPtr StdBadAlloc;
+ /// \brief The C++ "type_info" declaration, which is defined in <typeinfo>.
+ RecordDecl *CXXTypeInfoDecl;
+
+ /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
+ RecordDecl *MSVCGuidDecl;
+
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
@@ -398,7 +504,17 @@ public:
/// This evaluation context is used primary for the operand of the C++
/// \c typeid expression, whose argument is potentially evaluated only when
/// it is an lvalue of polymorphic class type (C++ [basic.def.odr]p2).
- PotentiallyPotentiallyEvaluated
+ PotentiallyPotentiallyEvaluated,
+
+ /// \brief The current expression is potentially evaluated, but any
+ /// declarations referenced inside that expression are only used if
+ /// in fact the current expression is used.
+ ///
+ /// This value is used when parsing default function arguments, for which
+ /// we would like to provide diagnostics (e.g., passing non-POD arguments
+ /// through varargs) but do not want to mark declarations as "referenced"
+ /// until the default argument is used.
+ PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
@@ -468,6 +584,26 @@ public:
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
+ typedef llvm::DenseMap<ParmVarDecl *, llvm::SmallVector<ParmVarDecl *, 1> >
+ UnparsedDefaultArgInstantiationsMap;
+
+ /// \brief A mapping from parameters with unparsed default arguments to the
+ /// set of instantiations of each parameter.
+ ///
+ /// This mapping is a temporary data structure used when parsing
+ /// nested class templates or nested classes of class templates,
+ /// where we might end up instantiating an inner class before the
+ /// default arguments of its methods have been parsed.
+ UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
+
+ // Contains the locations of the beginning of unparsed default
+ // argument locations.
+ llvm::DenseMap<ParmVarDecl *,SourceLocation> UnparsedDefaultArgLocs;
+
+ /// UndefinedInternals - all the used, undefined objects with
+ /// internal linkage in this translation unit.
+ llvm::DenseMap<NamedDecl*, SourceLocation> UndefinedInternals;
+
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
@@ -481,7 +617,6 @@ public:
/// of -Wselector.
llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors;
-
GlobalMethodPool::iterator ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
@@ -497,6 +632,9 @@ public:
void Initialize();
const LangOptions &getLangOptions() const { return LangOpts; }
+ OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
+ FPOptions &getFPOptions() { return FPFeatures; }
+
Diagnostic &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
const TargetAttributesSema &getTargetAttributesSema() const;
@@ -580,16 +718,20 @@ public:
SourceLocation AttrLoc);
QualType BuildFunctionType(QualType T,
QualType *ParamTypes, unsigned NumParamTypes,
- bool Variadic, unsigned Quals,
+ bool Variadic, unsigned Quals,
+ RefQualifierKind RefQualifier,
SourceLocation Loc, DeclarationName Entity,
- const FunctionType::ExtInfo &Info);
+ FunctionType::ExtInfo Info);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
+ QualType BuildParenType(QualType T);
+
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S,
- TagDecl **OwnedDecl = 0);
+ TagDecl **OwnedDecl = 0,
+ bool AllowAutoInTypeName = false);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
@@ -630,8 +772,8 @@ public:
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
- QualType BuildTypeofExprType(Expr *E);
- QualType BuildDecltypeType(Expr *E);
+ QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
+ QualType BuildDecltypeType(Expr *E, SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
@@ -644,6 +786,7 @@ public:
ParsedType getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = 0,
bool isClassName = false,
+ bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType());
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool DiagnoseUnknownTypeName(const IdentifierInfo &II,
@@ -674,18 +817,19 @@ public:
bool &Redeclaration);
void CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous,
bool &Redeclaration);
+ void CheckCompleteVariableDeclaration(VarDecl *var);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
QualType R, TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool IsFunctionDefinition,
bool &Redeclaration);
- void AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
+ bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
+ void DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
void CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization,
- bool &Redeclaration,
- bool &OverloadableAttrRequired);
+ bool &Redeclaration);
void CheckMain(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
@@ -707,14 +851,9 @@ public:
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
-
- // Contains the locations of the beginning of unparsed default
- // argument locations.
- llvm::DenseMap<ParmVarDecl *,SourceLocation> UnparsedDefaultArgLocs;
-
- void AddInitializerToDecl(Decl *dcl, Expr *init);
- void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
- void ActOnUninitializedDecl(Decl *dcl, bool TypeContainsUndeducedAuto);
+ void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
+ bool TypeMayContainAuto);
+ void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
@@ -734,6 +873,14 @@ public:
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
+ /// \brief Diagnose whether the size of parameters or return value of a
+ /// function or obj-c method definition is pass-by-value and larger than a
+ /// specified threshold.
+ void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
+ ParmVarDecl * const *End,
+ QualType ReturnTy,
+ NamedDecl *D);
+
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(SourceLocation Loc, Expr *expr);
@@ -745,11 +892,16 @@ public:
/// no declarator (e.g. "struct foo;") is parsed.
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
+
+ StmtResult ActOnVlaStmt(const DeclSpec &DS);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record);
+ Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
+ RecordDecl *Record);
+
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag,
SourceLocation NewTagLoc,
@@ -767,7 +919,15 @@ public:
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParameterLists,
- bool &OwnedDecl, bool &IsDependent);
+ bool &OwnedDecl, bool &IsDependent, bool ScopedEnum,
+ bool ScopedEnumUsesClassTag, TypeResult UnderlyingType);
+
+ Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
+ unsigned TagSpec, SourceLocation TagLoc,
+ CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
@@ -826,6 +986,7 @@ public:
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
+ ClassVirtSpecifiers &CVS,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
@@ -845,6 +1006,7 @@ public:
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
+ AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
@@ -897,6 +1059,7 @@ public:
void MergeTypeDefDecl(TypedefDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, Decl *Old);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old);
+ void MergeVarDeclTypes(VarDecl *New, VarDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &OldDecls);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old);
@@ -927,7 +1090,7 @@ public:
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
- FunctionDecl *New,
+ FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
@@ -938,7 +1101,8 @@ public:
Expr *From,
bool SuppressUserConversions,
bool AllowExplicit,
- bool InOverloadResolution);
+ bool InOverloadResolution,
+ bool CStyle);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
@@ -948,8 +1112,10 @@ public:
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
- bool FunctionArgTypesAreEqual (FunctionProtoType* OldType,
- FunctionProtoType* NewType);
+ bool IsBlockPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType);
+ bool FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
+ const FunctionProtoType *NewType);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
@@ -962,10 +1128,16 @@ public:
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
- bool IsQualificationConversion(QualType FromType, QualType ToType);
+ bool IsQualificationConversion(QualType FromType, QualType ToType,
+ bool CStyle);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
+ ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
+ const VarDecl *NRVOCandidate,
+ QualType ResultType,
+ Expr *Value);
+
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init);
@@ -1014,12 +1186,14 @@ public:
bool SuppressUserConversions = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
+ Expr::Classification ObjectClassification,
Expr **Args, unsigned NumArgs,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
+ Expr::Classification ObjectClassification,
Expr **Args, unsigned NumArgs,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
@@ -1028,6 +1202,7 @@ public:
CXXRecordDecl *ActingContext,
const TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
+ Expr::Classification ObjectClassification,
Expr **Args, unsigned NumArgs,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
@@ -1051,7 +1226,7 @@ public:
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
- QualType ObjectTy, Expr **Args, unsigned NumArgs,
+ Expr *Object, Expr **Args, unsigned NumArgs,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc,
@@ -1074,12 +1249,28 @@ public:
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
+ // Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn);
-
- FunctionDecl *ResolveAddressOfOverloadedFunction(Expr *From, QualType ToType,
+
+ // Emit as a series of 'note's all template and non-templates
+ // identified by the expression Expr
+ void NoteAllOverloadCandidates(Expr* E);
+
+ // [PossiblyAFunctionType] --> [Return]
+ // NonFunctionType --> NonFunctionType
+ // R (A) --> R(A)
+ // R (*)(A) --> R (A)
+ // R (&)(A) --> R (A)
+ // R (S::*)(A) --> R (A)
+ QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
+
+ FunctionDecl *ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType,
bool Complain,
DeclAccessPair &Found);
- FunctionDecl *ResolveSingleFunctionTemplateSpecialization(Expr *From);
+
+ FunctionDecl *ResolveSingleFunctionTemplateSpecialization(Expr *From,
+ bool Complain = false,
+ DeclAccessPair* Found = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
@@ -1097,8 +1288,8 @@ public:
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
- SourceLocation *CommaLocs,
- SourceLocation RParenLoc);
+ SourceLocation RParenLoc,
+ Expr *ExecConfig);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
@@ -1117,12 +1308,10 @@ public:
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc, Expr **Args,
- unsigned NumArgs, SourceLocation *CommaLocs,
- SourceLocation RParenLoc);
+ unsigned NumArgs, SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
- SourceLocation *CommaLocs,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
@@ -1135,7 +1324,8 @@ public:
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
- bool CheckParmsForFunctionDef(FunctionDecl *FD);
+ bool CheckParmsForFunctionDef(ParmVarDecl **Param, ParmVarDecl **ParamEnd,
+ bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
@@ -1176,13 +1366,14 @@ public:
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
+ /// Label name lookup.
+ LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
- // Look up of an operator name (e.g., operator+) for use with
- // operator overloading. This lookup is similar to ordinary name
- // lookup, but will ignore any declarations that are class
- // members.
+ /// Look up of an operator name (e.g., operator+) for use with
+ /// operator overloading. This lookup is similar to ordinary name
+ /// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
@@ -1245,6 +1436,9 @@ public:
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
+ LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc,
+ bool isLocalLabel = false);
+
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
@@ -1277,6 +1471,10 @@ public:
CTC_CXXCasts,
/// \brief A member lookup context.
CTC_MemberLookup,
+ /// \brief An Objective-C ivar lookup context (e.g., self->ivar).
+ CTC_ObjCIvarLookup,
+ /// \brief An Objective-C property lookup context (e.g., self.prop).
+ CTC_ObjCPropertyLookup,
/// \brief The receiver of an Objective-C message send within an
/// Objective-C method where 'super' is a valid keyword.
CTC_ObjCMessageReceiver
@@ -1308,8 +1506,14 @@ public:
// More parsing and symbol table subroutines.
// Decl attributes - this routine is the top level dispatcher.
- void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
- void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL);
+ void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD,
+ bool NonInheritable = true, bool Inheritable = true);
+ void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
+ bool NonInheritable = true, bool Inheritable = true);
+
+ bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
+ bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC);
+ bool CheckNoReturnAttr(const AttributeList &attr);
void WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
bool &IncompleteImpl, unsigned DiagID);
@@ -1338,6 +1542,14 @@ public:
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
+ /// \brief Determine whether we can synthesize a provisional ivar for the
+ /// given name.
+ ObjCPropertyDecl *canSynthesizeProvisionalIvar(IdentifierInfo *II);
+
+ /// \brief Determine whether we can synthesize a provisional ivar for the
+ /// given property.
+ bool canSynthesizeProvisionalIvar(ObjCPropertyDecl *Property);
+
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category @implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
@@ -1484,7 +1696,7 @@ public:
// FIXME: The const_cast here is ugly. RValue references would make this
// much nicer (or we could duplicate a bunch of the move semantics
// emulation code from Ownership.h).
- FullExprArg(const FullExprArg& Other): E(Other.E) {}
+ FullExprArg(const FullExprArg& Other) : E(Other.E) {}
ExprResult release() {
return move(E);
@@ -1498,7 +1710,7 @@ public:
private:
// FIXME: No need to make the entire Sema class a friend when it's just
- // Sema::FullExpr that needs access to the constructor below.
+ // Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
@@ -1512,7 +1724,8 @@ public:
StmtResult ActOnExprStmt(FullExprArg Expr);
- StmtResult ActOnNullStmt(SourceLocation SemiLoc);
+ StmtResult ActOnNullStmt(SourceLocation SemiLoc,
+ bool LeadingEmptyMacro = false);
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
MultiStmtArg Elts,
bool isStmtExpr);
@@ -1520,6 +1733,7 @@ public:
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
+ StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
@@ -1528,22 +1742,21 @@ public:
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
- StmtResult ActOnLabelStmt(SourceLocation IdentLoc,
- IdentifierInfo *II,
- SourceLocation ColonLoc,
- Stmt *SubStmt);
+ StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
+ SourceLocation ColonLoc, Stmt *SubStmt);
+
StmtResult ActOnIfStmt(SourceLocation IfLoc,
- FullExprArg CondVal, Decl *CondVar,
- Stmt *ThenVal,
- SourceLocation ElseLoc, Stmt *ElseVal);
+ FullExprArg CondVal, Decl *CondVar,
+ Stmt *ThenVal,
+ SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
- FullExprArg Cond,
- Decl *CondVar, Stmt *Body);
+ FullExprArg Cond,
+ Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
@@ -1563,13 +1776,16 @@ public:
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
- IdentifierInfo *LabelII);
+ LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation GotoLoc, Scope *CurScope);
+ const VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
+ bool AllowFunctionParameters);
+
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
@@ -1606,11 +1822,10 @@ public:
Expr *SynchExpr,
Stmt *SynchBody);
- VarDecl *BuildExceptionDeclaration(Scope *S, QualType ExDeclType,
+ VarDecl *BuildExceptionDeclaration(Scope *S,
TypeSourceInfo *TInfo,
IdentifierInfo *Name,
- SourceLocation Loc,
- SourceRange Range);
+ SourceLocation Loc);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
@@ -1630,18 +1845,31 @@ public:
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedDecl(const NamedDecl *ND);
- typedef uintptr_t ParsingDeclStackState;
+ ParsingDeclState PushParsingDeclaration() {
+ return DelayedDiagnostics.pushParsingDecl();
+ }
+ void PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
+ DelayedDiagnostics::popParsingDecl(*this, state, decl);
+ }
+
+ typedef ProcessingContextState ParsingClassState;
+ ParsingClassState PushParsingClass() {
+ return DelayedDiagnostics.pushContext();
+ }
+ void PopParsingClass(ParsingClassState state) {
+ DelayedDiagnostics.popContext(state);
+ }
- ParsingDeclStackState PushParsingDeclaration();
- void PopParsingDeclaration(ParsingDeclStackState S, Decl *D);
- void EmitDeprecationWarning(NamedDecl *D, SourceLocation Loc);
+ void EmitDeprecationWarning(NamedDecl *D, llvm::StringRef Message,
+ SourceLocation Loc, bool UnknownObjCClass=false);
void HandleDelayedDeprecationCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
- bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc);
+ bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
+ bool UnknownObjCClass=false);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
@@ -1654,6 +1882,7 @@ public:
void MarkDeclarationReferenced(SourceLocation Loc, Decl *D);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
+ void MarkDeclarationsReferencedInExpr(Expr *E);
bool DiagRuntimeBehavior(SourceLocation Loc, const PartialDiagnostic &PD);
// Primary Expressions.
@@ -1674,18 +1903,19 @@ public:
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
+ ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = 0);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
+ ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = 0);
- VarDecl *BuildAnonymousStructUnionMemberPath(FieldDecl *Field,
- llvm::SmallVectorImpl<FieldDecl *> &Path);
ExprResult
- BuildAnonymousStructUnionMemberReference(SourceLocation Loc,
- FieldDecl *Field,
- Expr *BaseObjectExpr = 0,
- SourceLocation OpLoc = SourceLocation());
+ BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
+ SourceLocation nameLoc,
+ IndirectFieldDecl *indirectField,
+ Expr *baseObjectExpr = 0,
+ SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
@@ -1724,7 +1954,7 @@ public:
ExprResult ActOnStringLiteral(const Token *Toks, unsigned NumToks);
// Binary/Unary Operators. 'Tok' is the token for the operator.
- ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
+ ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputArg);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *input);
@@ -1740,10 +1970,15 @@ public:
ActOnSizeOfAlignOfExpr(SourceLocation OpLoc, bool isSizeof, bool isType,
void *TyOrEx, const SourceRange &ArgRange);
- bool CheckAlignOfExpr(Expr *E, SourceLocation OpLoc, const SourceRange &R);
- bool CheckSizeOfAlignOfOperand(QualType type, SourceLocation OpLoc,
- const SourceRange &R, bool isSizeof);
+ ExprResult CheckPlaceholderExpr(Expr *E, SourceLocation Loc);
+ bool CheckSizeOfAlignOfOperand(QualType type, SourceLocation OpLoc,
+ SourceRange R, bool isSizeof);
+ ExprResult ActOnSizeofParameterPackExpr(Scope *S,
+ SourceLocation OpLoc,
+ IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
@@ -1803,12 +2038,16 @@ public:
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
- MultiExprArg Args, SourceLocation *CommaLocs,
- SourceLocation RParenLoc);
+ MultiExprArg Args, SourceLocation RParenLoc,
+ Expr *ExecConfig = 0);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
- SourceLocation RParenLoc);
+ SourceLocation RParenLoc,
+ Expr *ExecConfig = 0);
+
+ ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
+ MultiExprArg ExecConfig, SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
ParsedType Ty, SourceLocation RParenLoc,
@@ -1851,7 +2090,7 @@ public:
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *lhs, Expr *rhs);
ExprResult CreateBuiltinBinOp(SourceLocation TokLoc,
- unsigned Opc, Expr *lhs, Expr *rhs);
+ BinaryOperatorKind Opc, Expr *lhs, Expr *rhs);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
@@ -1860,10 +2099,9 @@ public:
Expr *Cond, Expr *LHS, Expr *RHS);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
- ExprResult ActOnAddrLabel(SourceLocation OpLoc,
- SourceLocation LabLoc,
- IdentifierInfo *LabelII);
-
+ ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
+ LabelDecl *LD);
+
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
@@ -1891,16 +2129,6 @@ public:
unsigned NumComponents,
SourceLocation RParenLoc);
- // __builtin_types_compatible_p(type1, type2)
- ExprResult ActOnTypesCompatibleExpr(SourceLocation BuiltinLoc,
- ParsedType arg1,
- ParsedType arg2,
- SourceLocation RPLoc);
- ExprResult BuildTypesCompatibleExpr(SourceLocation BuiltinLoc,
- TypeSourceInfo *argTInfo1,
- TypeSourceInfo *argTInfo2,
- SourceLocation RPLoc);
-
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *cond, Expr *expr1,
@@ -1993,6 +2221,8 @@ public:
bool IsTypeName,
SourceLocation TypenameLoc);
+ bool CheckInheritedConstructorUsingDecl(UsingDecl *UD);
+
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
@@ -2009,8 +2239,8 @@ public:
void AddCXXDirectInitializerToDecl(Decl *Dcl,
SourceLocation LParenLoc,
MultiExprArg Exprs,
- SourceLocation *CommaLocs,
- SourceLocation RParenLoc);
+ SourceLocation RParenLoc,
+ bool TypeMayContainAuto);
/// InitializeVarWithConstructor - Creates an CXXConstructExpr
/// and sets it as the initializer for the the passed in VarDecl.
@@ -2025,7 +2255,8 @@ public:
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
- bool RequiresZeroInit, unsigned ConstructKind);
+ bool RequiresZeroInit, unsigned ConstructKind,
+ SourceRange ParenRange);
// FIXME: Can re remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
@@ -2033,7 +2264,8 @@ public:
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool RequiresZeroInit,
- unsigned ConstructKind);
+ unsigned ConstructKind,
+ SourceRange ParenRange);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
@@ -2072,6 +2304,12 @@ public:
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
+ /// \brief Declare all inherited constructors for the given class.
+ ///
+ /// \param ClassDecl The class declaration into which the inherited
+ /// constructors will be added.
+ void DeclareInheritedConstructors(CXXRecordDecl *ClassDecl);
+
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param S The scope of the class, which may be NULL if this is a
@@ -2156,8 +2394,29 @@ public:
void *TyOrExpr,
SourceLocation RParenLoc);
+ ExprResult BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc);
+ ExprResult BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *Operand,
+ SourceLocation RParenLoc);
+
+ /// ActOnCXXUuidof - Parse __uuidof( something ).
+ ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
+ SourceLocation LParenLoc, bool isType,
+ void *TyOrExpr,
+ SourceLocation RParenLoc);
+
+
//// ActOnCXXThis - Parse 'this' pointer.
- ExprResult ActOnCXXThis(SourceLocation ThisLoc);
+ ExprResult ActOnCXXThis(SourceLocation loc);
+
+ /// tryCaptureCXXThis - Try to capture a 'this' pointer. Returns a
+ /// pointer to an instance method whose 'this' pointer is
+ /// capturable, or null if this is not possible.
+ CXXMethodDecl *tryCaptureCXXThis();
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
@@ -2173,11 +2432,14 @@ public:
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
- ExprResult ActOnCXXTypeConstructExpr(SourceRange TypeRange,
- ParsedType TypeRep,
+ ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
+ SourceLocation LParenLoc,
+ MultiExprArg Exprs,
+ SourceLocation RParenLoc);
+
+ ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
- SourceLocation *CommaLocs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
@@ -2195,12 +2457,12 @@ public:
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
- SourceLocation TypeLoc,
- SourceRange TypeRange,
+ TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceLocation ConstructorLParen,
MultiExprArg ConstructorArgs,
- SourceLocation ConstructorRParen);
+ SourceLocation ConstructorRParen,
+ bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
@@ -2231,14 +2493,37 @@ public:
SourceLocation StmtLoc,
bool ConvertToBoolean);
+ ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
+ Expr *Operand, SourceLocation RParen);
+ ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
+ SourceLocation RParen);
+
/// ActOnUnaryTypeTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnUnaryTypeTrait(UnaryTypeTrait OTT,
SourceLocation KWLoc,
- SourceLocation LParen,
ParsedType Ty,
SourceLocation RParen);
+ ExprResult BuildUnaryTypeTrait(UnaryTypeTrait OTT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *T,
+ SourceLocation RParen);
+
+ /// ActOnBinaryTypeTrait - Parsed one of the bianry type trait support
+ /// pseudo-functions.
+ ExprResult ActOnBinaryTypeTrait(BinaryTypeTrait OTT,
+ SourceLocation KWLoc,
+ ParsedType LhsTy,
+ ParsedType RhsTy,
+ SourceLocation RParen);
+
+ ExprResult BuildBinaryTypeTrait(BinaryTypeTrait BTT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *LhsT,
+ TypeSourceInfo *RhsT,
+ SourceLocation RParen);
+
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
@@ -2268,14 +2553,15 @@ public:
UnqualifiedId &SecondTypeName,
bool HasTrailingLParen);
- /// MaybeCreateCXXExprWithTemporaries - If the list of temporaries is
- /// non-empty, will create a new CXXExprWithTemporaries expression.
- /// Otherwise, just returs the passed in expression.
- Expr *MaybeCreateCXXExprWithTemporaries(Expr *SubExpr);
- ExprResult MaybeCreateCXXExprWithTemporaries(ExprResult SubExpr);
- FullExpr CreateFullExpr(Expr *SubExpr);
+ /// MaybeCreateExprWithCleanups - If the current full-expression
+ /// requires any cleanups, surround it with a ExprWithCleanups node.
+ /// Otherwise, just returns the passed-in expression.
+ Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
+ Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
+ ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr);
+ StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
@@ -2373,9 +2659,8 @@ public:
Expr *BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
- CXXMemberCallExpr *BuildCXXMemberCallExpr(Expr *Exp,
- NamedDecl *FoundDecl,
- CXXMethodDecl *Method);
+ ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
+ CXXMethodDecl *Method);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
@@ -2423,7 +2708,7 @@ public:
Decl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
- Expr *BitfieldWidth,
+ Expr *BitfieldWidth, const VirtSpecifiers &VS,
Expr *Init, bool IsDefinition,
bool Deleted = false);
@@ -2435,10 +2720,10 @@ public:
SourceLocation IdLoc,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
- SourceLocation *CommaLocs,
- SourceLocation RParenLoc);
+ SourceLocation RParenLoc,
+ SourceLocation EllipsisLoc);
- MemInitResult BuildMemberInitializer(FieldDecl *Member, Expr **Args,
+ MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr **Args,
unsigned NumArgs, SourceLocation IdLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
@@ -2448,11 +2733,19 @@ public:
Expr **Args, unsigned NumArgs,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
- CXXRecordDecl *ClassDecl);
-
- bool SetBaseOrMemberInitializers(CXXConstructorDecl *Constructor,
- CXXBaseOrMemberInitializer **Initializers,
- unsigned NumInitializers, bool AnyErrors);
+ CXXRecordDecl *ClassDecl,
+ SourceLocation EllipsisLoc);
+
+ MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ SourceLocation LParenLoc,
+ CXXRecordDecl *ClassDecl,
+ SourceLocation EllipsisLoc);
+
+ bool SetCtorInitializers(CXXConstructorDecl *Constructor,
+ CXXCtorInitializer **Initializers,
+ unsigned NumInitializers, bool AnyErrors);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
@@ -2466,8 +2759,11 @@ public:
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
- llvm::SmallVector<std::pair<CXXRecordDecl *, SourceLocation>, 16>
- VTableUses;
+ typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
+
+ /// \brief The list of vtables that are required but have not yet been
+ /// materialized.
+ llvm::SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
@@ -2546,20 +2842,15 @@ public:
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
- TypeSourceInfo *TInfo);
-
- /// SetClassDeclAttributesFromBase - Copies class decl traits
- /// (such as whether the class has a trivial constructor,
- /// trivial destructor etc) from the given base class.
- void SetClassDeclAttributesFromBase(CXXRecordDecl *Class,
- const CXXRecordDecl *BaseClass,
- bool BaseIsVirtual);
+ TypeSourceInfo *TInfo,
+ SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
- ParsedType basetype, SourceLocation
- BaseLoc);
+ ParsedType basetype,
+ SourceLocation BaseLoc,
+ SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
@@ -2596,13 +2887,18 @@ public:
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
- /// CheckOverridingFunctionAttributes - Checks whether attributes are
- /// incompatible or prevent overriding.
- bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
- const CXXMethodDecl *Old);
-
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
+ /// CheckOverrideControl - Check C++0x override control semantics.
+ void CheckOverrideControl(const Decl *D);
+
+ /// CheckForFunctionMarkedFinal - Checks whether a virtual member function
+ /// overrides a virtual member function marked 'final', according to
+ /// C++0x [class.virtual]p3.
+ bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old);
+
+
//===--------------------------------------------------------------------===//
// C++ Access Control
//
@@ -2661,6 +2957,10 @@ public:
/// A flag to suppress access checking.
bool SuppressAccessChecking;
+ /// \brief When true, access checking violations are treated as SFINAE
+ /// failures rather than hard errors.
+ bool AccessCheckingSFINAE;
+
void ActOnStartSuppressingAccessChecks();
void ActOnStopSuppressingAccessChecks();
@@ -2732,12 +3032,13 @@ public:
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParamsTy *Params,
+ SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
- const ParsedTemplateArgument &DefaultArg);
+ ParsedTemplateArgument DefaultArg);
TemplateParamsTy *
ActOnTemplateParameterList(unsigned Depth,
@@ -2753,7 +3054,8 @@ public:
TPC_ClassTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
- TPC_FriendFunctionTemplate
+ TPC_FriendFunctionTemplate,
+ TPC_FriendFunctionTemplateDefinition
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
@@ -2788,10 +3090,11 @@ public:
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
- TypeResult ActOnTagTemplateIdType(TypeResult Type,
- TagUseKind TUK,
- TypeSpecifierType TagSpec,
- SourceLocation TagLoc);
+ TypeResult ActOnTagTemplateIdType(CXXScopeSpec &SS,
+ TypeResult Type,
+ TagUseKind TUK,
+ TypeSpecifierType TagSpec,
+ SourceLocation TagLoc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
LookupResult &R,
@@ -2809,11 +3112,6 @@ public:
bool EnteringContext,
TemplateTy &Template);
- bool CheckClassTemplatePartialSpecializationArgs(
- TemplateParameterList *TemplateParams,
- const TemplateArgumentListBuilder &TemplateArgs,
- bool &MirrorsPrimaryTemplate);
-
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
@@ -2886,7 +3184,7 @@ public:
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
- TemplateArgumentListBuilder &Converted);
+ llvm::SmallVectorImpl<TemplateArgument> &Converted);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
@@ -2906,21 +3204,22 @@ public:
bool CheckTemplateArgument(NamedDecl *Param,
const TemplateArgumentLoc &Arg,
- TemplateDecl *Template,
+ NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
- TemplateArgumentListBuilder &Converted,
+ unsigned ArgumentPackIndex,
+ llvm::SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
- TemplateArgumentListBuilder &Converted);
+ llvm::SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
const TemplateArgumentLoc &Arg,
- TemplateArgumentListBuilder &Converted);
+ llvm::SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
@@ -3034,6 +3333,289 @@ public:
const TemplateArgument *Args,
unsigned NumArgs);
+ //===--------------------------------------------------------------------===//
+ // C++ Variadic Templates (C++0x [temp.variadic])
+ //===--------------------------------------------------------------------===//
+
+ /// \brief The context in which an unexpanded parameter pack is
+ /// being diagnosed.
+ ///
+ /// Note that the values of this enumeration line up with the first
+ /// argument to the \c err_unexpanded_parameter_pack diagnostic.
+ enum UnexpandedParameterPackContext {
+ /// \brief An arbitrary expression.
+ UPPC_Expression = 0,
+
+ /// \brief The base type of a class type.
+ UPPC_BaseType,
+
+ /// \brief The type of an arbitrary declaration.
+ UPPC_DeclarationType,
+
+ /// \brief The type of a data member.
+ UPPC_DataMemberType,
+
+ /// \brief The size of a bit-field.
+ UPPC_BitFieldWidth,
+
+ /// \brief The expression in a static assertion.
+ UPPC_StaticAssertExpression,
+
+ /// \brief The fixed underlying type of an enumeration.
+ UPPC_FixedUnderlyingType,
+
+ /// \brief The enumerator value.
+ UPPC_EnumeratorValue,
+
+ /// \brief A using declaration.
+ UPPC_UsingDeclaration,
+
+ /// \brief A friend declaration.
+ UPPC_FriendDeclaration,
+
+ /// \brief A declaration qualifier.
+ UPPC_DeclarationQualifier,
+
+ /// \brief An initializer.
+ UPPC_Initializer,
+
+ /// \brief A default argument.
+ UPPC_DefaultArgument,
+
+ /// \brief The type of a non-type template parameter.
+ UPPC_NonTypeTemplateParameterType,
+
+ /// \brief The type of an exception.
+ UPPC_ExceptionType,
+
+ /// \brief Partial specialization.
+ UPPC_PartialSpecialization
+ };
+
+ /// \brief If the given type contains an unexpanded parameter pack,
+ /// diagnose the error.
+ ///
+ /// \param Loc The source location where a diagnostc should be emitted.
+ ///
+ /// \param T The type that is being checked for unexpanded parameter
+ /// packs.
+ ///
+ /// \returns true if an error ocurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief If the given expression contains an unexpanded parameter
+ /// pack, diagnose the error.
+ ///
+ /// \param E The expression that is being checked for unexpanded
+ /// parameter packs.
+ ///
+ /// \returns true if an error ocurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(Expr *E,
+ UnexpandedParameterPackContext UPPC = UPPC_Expression);
+
+ /// \brief If the given nested-name-specifier contains an unexpanded
+ /// parameter pack, diagnose the error.
+ ///
+ /// \param SS The nested-name-specifier that is being checked for
+ /// unexpanded parameter packs.
+ ///
+ /// \returns true if an error ocurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief If the given name contains an unexpanded parameter pack,
+ /// diagnose the error.
+ ///
+ /// \param NameInfo The name (with source location information) that
+ /// is being checked for unexpanded parameter packs.
+ ///
+ /// \returns true if an error ocurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief If the given template name contains an unexpanded parameter pack,
+ /// diagnose the error.
+ ///
+ /// \param Loc The location of the template name.
+ ///
+ /// \param Template The template name that is being checked for unexpanded
+ /// parameter packs.
+ ///
+ /// \returns true if an error ocurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
+ TemplateName Template,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief If the given template argument contains an unexpanded parameter
+ /// pack, diagnose the error.
+ ///
+ /// \param Arg The template argument that is being checked for unexpanded
+ /// parameter packs.
+ ///
+ /// \returns true if an error ocurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// template argument.
+ ///
+ /// \param Arg The template argument that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(TemplateArgument Arg,
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// template argument.
+ ///
+ /// \param Arg The template argument that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// type.
+ ///
+ /// \param T The type that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(QualType T,
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// type.
+ ///
+ /// \param TL The type that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(TypeLoc TL,
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Invoked when parsing a template argument followed by an
+ /// ellipsis, which creates a pack expansion.
+ ///
+ /// \param Arg The template argument preceding the ellipsis, which
+ /// may already be invalid.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
+ SourceLocation EllipsisLoc);
+
+ /// \brief Invoked when parsing a type followed by an ellipsis, which
+ /// creates a pack expansion.
+ ///
+ /// \param Type The type preceding the ellipsis, which will become
+ /// the pattern of the pack expansion.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
+
+ /// \brief Construct a pack expansion type from the pattern of the pack
+ /// expansion.
+ TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions);
+
+ /// \brief Construct a pack expansion type from the pattern of the pack
+ /// expansion.
+ QualType CheckPackExpansion(QualType Pattern,
+ SourceRange PatternRange,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions);
+
+ /// \brief Invoked when parsing an expression followed by an ellipsis, which
+ /// creates a pack expansion.
+ ///
+ /// \param Pattern The expression preceding the ellipsis, which will become
+ /// the pattern of the pack expansion.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
+
+ /// \brief Invoked when parsing an expression followed by an ellipsis, which
+ /// creates a pack expansion.
+ ///
+ /// \param Pattern The expression preceding the ellipsis, which will become
+ /// the pattern of the pack expansion.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions);
+
+ /// \brief Determine whether we could expand a pack expansion with the
+ /// given set of parameter packs into separate arguments by repeatedly
+ /// transforming the pattern.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis that identifies the
+ /// pack expansion.
+ ///
+ /// \param PatternRange The source range that covers the entire pattern of
+ /// the pack expansion.
+ ///
+ /// \param Unexpanded The set of unexpanded parameter packs within the
+ /// pattern.
+ ///
+ /// \param NumUnexpanded The number of unexpanded parameter packs in
+ /// \p Unexpanded.
+ ///
+ /// \param ShouldExpand Will be set to \c true if the transformer should
+ /// expand the corresponding pack expansions into separate arguments. When
+ /// set, \c NumExpansions must also be set.
+ ///
+ /// \param RetainExpansion Whether the caller should add an unexpanded
+ /// pack expansion after all of the expanded arguments. This is used
+ /// when extending explicitly-specified template argument packs per
+ /// C++0x [temp.arg.explicit]p9.
+ ///
+ /// \param NumExpansions The number of separate arguments that will be in
+ /// the expanded form of the corresponding pack expansion. This is both an
+ /// input and an output parameter, which can be set by the caller if the
+ /// number of expansions is known a priori (e.g., due to a prior substitution)
+ /// and will be set by the callee when the number of expansions is known.
+ /// The callee must set this value when \c ShouldExpand is \c true; it may
+ /// set this value in other cases.
+ ///
+ /// \returns true if an error occurred (e.g., because the parameter packs
+ /// are to be instantiated with arguments of different lengths), false
+ /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
+ /// must be set.
+ bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ const UnexpandedParameterPack *Unexpanded,
+ unsigned NumUnexpanded,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool &ShouldExpand,
+ bool &RetainExpansion,
+ llvm::Optional<unsigned> &NumExpansions);
+
+ /// \brief Determine the number of arguments in the given pack expansion
+ /// type.
+ ///
+ /// This routine already assumes that the pack expansion type can be
+ /// expanded and that the number of arguments in the expansion is
+ /// consistent across all of the unexpanded parameter packs in its pattern.
+ unsigned getNumArgumentsInExpansion(QualType T,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ /// \brief Determine whether the given declarator contains any unexpanded
+ /// parameter packs.
+ ///
+ /// This routine is used by the parser to disambiguate function declarators
+ /// with an ellipsis prior to the ')', e.g.,
+ ///
+ /// \code
+ /// void f(T...);
+ /// \endcode
+ ///
+ /// To determine whether we have an (unnamed) function parameter pack or
+ /// a variadic function.
+ ///
+ /// \returns true if the declarator contains any unexpanded parameter packs,
+ /// false otherwise.
+ bool containsUnexpandedParameterPacks(Declarator &D);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Template Argument Deduction (C++ [temp.deduct])
+ //===--------------------------------------------------------------------===//
+
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
@@ -3128,17 +3710,22 @@ public:
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
+ bool DeduceAutoType(QualType AutoType, Expr *Initializer, QualType &Result);
+
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
- TemplatePartialOrderingContext TPOC);
+ TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments);
UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin,
UnresolvedSetIterator SEnd,
TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
- const PartialDiagnostic &CandidateDiag);
+ const PartialDiagnostic &CandidateDiag,
+ bool Complain = true);
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
@@ -3206,9 +3793,10 @@ public:
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
- /// \brief The template in which we are performing the instantiation,
- /// for substitutions of prior template arguments.
- TemplateDecl *Template;
+ /// \brief The template (or partial specialization) in which we are
+ /// performing the instantiation, for substitutions of prior template
+ /// arguments.
+ NamedDecl *Template;
/// \brief The entity that is being instantiated.
uintptr_t Entity;
@@ -3220,6 +3808,10 @@ public:
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
+ /// \brief The template deduction info object associated with the
+ /// substitution or checking of explicit or deduced template arguments.
+ sema::TemplateDeductionInfo *DeductionInfo;
+
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
@@ -3227,7 +3819,7 @@ public:
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(0), Entity(0), TemplateArgs(0),
- NumTemplateArgs(0) {}
+ NumTemplateArgs(0), DeductionInfo(0) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
@@ -3278,6 +3870,13 @@ public:
llvm::SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
+ /// \brief Whether we are in a SFINAE context that is not associated with
+ /// template instantiation.
+ ///
+ /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
+ /// of a template instantiation or template argument deduction.
+ bool InNonInstantiationSFINAEContext;
+
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
@@ -3292,12 +3891,49 @@ public:
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
+ /// \brief The current index into pack expansion arguments that will be
+ /// used for substitution of parameter packs.
+ ///
+ /// The pack expansion index will be -1 to indicate that parameter packs
+ /// should be instantiated as themselves. Otherwise, the index specifies
+ /// which argument within the parameter pack will be used for substitution.
+ int ArgumentPackSubstitutionIndex;
+
+ /// \brief RAII object used to change the argument pack substitution index
+ /// within a \c Sema object.
+ ///
+ /// See \c ArgumentPackSubstitutionIndex for more information.
+ class ArgumentPackSubstitutionIndexRAII {
+ Sema &Self;
+ int OldSubstitutionIndex;
+
+ public:
+ ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
+ : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
+ Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
+ }
+
+ ~ArgumentPackSubstitutionIndexRAII() {
+ Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
+ }
+ };
+
+ friend class ArgumentPackSubstitutionRAII;
+
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
llvm::SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
-
+
+ /// \brief For each declaration that involved template argument deduction, the
+ /// set of diagnostics that were suppressed during that template argument
+ /// deduction.
+ ///
+ /// FIXME: Serialize this structure to the AST file.
+ llvm::DenseMap<Decl *, llvm::SmallVector<PartialDiagnosticAt, 1> >
+ SuppressedDiagnostics;
+
/// \brief A stack object to be created when performing template
/// instantiation.
///
@@ -3331,6 +3967,7 @@ public:
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
+ sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
@@ -3340,6 +3977,7 @@ public:
ClassTemplatePartialSpecializationDecl *PartialSpec,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
+ sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
@@ -3351,14 +3989,14 @@ public:
/// \brief Note that we are substituting prior template arguments into a
/// non-type or template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
- TemplateDecl *Template,
+ NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
SourceRange InstantiationRange);
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
- TemplateDecl *Template,
+ NamedDecl *Template,
TemplateTemplateParmDecl *Param,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
@@ -3386,7 +4024,7 @@ public:
private:
Sema &SemaRef;
bool Invalid;
-
+ bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
@@ -3402,21 +4040,39 @@ public:
/// template argument substitution failures are not considered
/// errors.
///
- /// When this routine returns true, the emission of most diagnostics
- /// will be suppressed and there will be no local error recovery.
- bool isSFINAEContext() const;
+ /// \returns An empty \c llvm::Optional if we're not in a SFINAE context.
+ /// Otherwise, contains a pointer that, if non-NULL, contains the nearest
+ /// template-deduction context object, which can be used to capture
+ /// diagnostics that will be suppressed.
+ llvm::Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
- /// deduction.
+ /// deduction.`
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
+ bool PrevInNonInstantiationSFINAEContext;
+ bool PrevAccessCheckingSFINAE;
+
public:
- explicit SFINAETrap(Sema &SemaRef)
- : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors) { }
+ explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
+ : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
+ PrevInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext),
+ PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
+ {
+ if (!SemaRef.isSFINAEContext())
+ SemaRef.InNonInstantiationSFINAEContext = true;
+ SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
+ }
- ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; }
+ ~SFINAETrap() {
+ SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
+ SemaRef.InNonInstantiationSFINAEContext
+ = PrevInNonInstantiationSFINAEContext;
+ SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
+ }
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
@@ -3424,24 +4080,6 @@ public:
}
};
- /// \brief RAII class that determines when any errors have occurred
- /// between the time the instance was created and the time it was
- /// queried.
- class ErrorTrap {
- Sema &SemaRef;
- unsigned PrevErrors;
-
- public:
- explicit ErrorTrap(Sema &SemaRef)
- : SemaRef(SemaRef), PrevErrors(SemaRef.getDiagnostics().getNumErrors()) {}
-
- /// \brief Determine whether any errors have occurred since this
- /// object instance was created.
- bool hasErrorOccurred() const {
- return SemaRef.getDiagnostics().getNumErrors() > PrevErrors;
- }
- };
-
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
@@ -3449,6 +4087,17 @@ public:
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
+ typedef llvm::DenseMap<IdentifierInfo *, std::pair<llvm::StringRef, bool> >
+ UnqualifiedTyposCorrectedMap;
+
+ /// \brief A cache containing the results of typo correction for unqualified
+ /// name lookup.
+ ///
+ /// The string is the string that we corrected to (which may be empty, if
+ /// there was no correction), while the boolean will be true when the
+ /// string represents a keyword.
+ UnqualifiedTyposCorrectedMap UnqualifiedTyposCorrected;
+
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
@@ -3485,14 +4134,43 @@ public:
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
+ TypeSourceInfo *SubstType(TypeLoc TL,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc, DeclarationName Entity);
+
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
- const MultiLevelTemplateArgumentList &TemplateArgs);
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ llvm::Optional<unsigned> NumExpansions);
+ bool SubstParmTypes(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ llvm::SmallVectorImpl<QualType> &ParamTypes,
+ llvm::SmallVectorImpl<ParmVarDecl *> *OutParams = 0);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ /// \brief Substitute the given template arguments into a list of
+ /// expressions, expanding pack expansions if required.
+ ///
+ /// \param Exprs The list of expressions to substitute into.
+ ///
+ /// \param NumExprs The number of expressions in \p Exprs.
+ ///
+ /// \param IsCall Whether this is some form of call, in which case
+ /// default arguments will be dropped.
+ ///
+ /// \param TemplateArgs The set of template arguments to substitute.
+ ///
+ /// \param Outputs Will receive all of the substituted arguments.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ llvm::SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
@@ -3541,7 +4219,8 @@ public:
TemplateName
SubstTemplateName(TemplateName Name, SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
- bool Subst(const TemplateArgumentLoc &Arg, TemplateArgumentLoc &Result,
+ bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
+ TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
@@ -3636,7 +4315,19 @@ public:
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes);
- void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *DC);
+
+ /// Process the specified property declaration and create decls for the
+ /// setters and getters as needed.
+ /// \param property The property declaration being processed
+ /// \param DC The semantic container for the property
+ /// \param redeclaredProperty Declaration for property if redeclared
+ /// in class extension.
+ /// \param lexicalDC Container for redeclaredProperty.
+ void ProcessPropertyDecl(ObjCPropertyDecl *property,
+ ObjCContainerDecl *DC,
+ ObjCPropertyDecl *redeclaredProperty = 0,
+ ObjCContainerDecl *lexicalDC = 0);
+
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name);
@@ -3664,14 +4355,16 @@ public:
Selector GetterSel, Selector SetterSel,
Decl *ClassCategory,
bool *OverridingProperty,
- tok::ObjCKeywordKind MethodImplKind);
+ tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC = 0);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,Decl *ClassImplDecl,
IdentifierInfo *PropertyId,
- IdentifierInfo *PropertyIvar);
+ IdentifierInfo *PropertyIvar,
+ SourceLocation PropertyIvarLoc);
struct ObjCArgInfo {
IdentifierInfo *Name;
@@ -3686,6 +4379,7 @@ public:
};
Decl *ActOnMethodDeclaration(
+ Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
@@ -3711,7 +4405,9 @@ public:
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
DeclarationName MemberName,
- SourceLocation MemberLoc);
+ SourceLocation MemberLoc,
+ SourceLocation SuperLoc, QualType SuperType,
+ bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
@@ -3719,6 +4415,8 @@ public:
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
+ ObjCMethodDecl *tryCaptureObjCSelf();
+
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
@@ -3751,6 +4449,7 @@ public:
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
+ SourceLocation SelectorLoc,
SourceLocation RBracLoc,
MultiExprArg Args);
@@ -3768,6 +4467,7 @@ public:
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
+ SourceLocation SelectorLoc,
SourceLocation RBracLoc,
MultiExprArg Args);
@@ -3810,11 +4510,9 @@ public:
SourceLocation RParenLoc);
/// ActOnPragmaUnused - Called on well-formed '#pragma unused'.
- void ActOnPragmaUnused(const Token *Identifiers,
- unsigned NumIdentifiers, Scope *curScope,
- SourceLocation PragmaLoc,
- SourceLocation LParenLoc,
- SourceLocation RParenLoc);
+ void ActOnPragmaUnused(const Token &Identifier,
+ Scope *curScope,
+ SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed #pragma GCC visibility... .
void ActOnPragmaVisibility(bool IsPush, const IdentifierInfo* VisType,
@@ -3835,6 +4533,10 @@ public:
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
+ /// ActOnPragmaFPContract - Called on well formed
+ /// #pragma {STDC,OPENCL} FP_CONTRACT
+ void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
+
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '#pragma pack' and '#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
@@ -3842,9 +4544,9 @@ public:
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
- /// PushVisibilityAttr - Note that we've entered a context with a
- /// visibility attribute.
- void PushVisibilityAttr(const VisibilityAttr *Attr);
+ /// PushNamespaceVisibilityAttr - Note that we've entered a
+ /// namespace with a visibility attribute.
+ void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr);
/// AddPushedVisibilityAttribute - If '#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
@@ -3872,6 +4574,11 @@ public:
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = 0);
+ /// IgnoredValueConversions - Given that an expression's result is
+ /// syntactically ignored, perform any conversions that are
+ /// required.
+ void IgnoredValueConversions(Expr *&expr);
+
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
Expr *UsualUnaryConversions(Expr *&expr);
@@ -3885,6 +4592,12 @@ public:
// lvalue-to-rvalue conversion.
void DefaultFunctionArrayLvalueConversion(Expr *&expr);
+ // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
+ // the operand. This is DefaultFunctionArrayLvalueConversion,
+ // except that it assumes the operand isn't of function or array
+ // type.
+ void DefaultLvalueConversion(Expr *&expr);
+
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
@@ -3957,6 +4670,11 @@ public:
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
+ /// IncompatiblePointerDiscardsQualifiers - The assignment
+ /// discards qualifiers that we don't permit to be discarded,
+ /// like address spaces.
+ IncompatiblePointerDiscardsQualifiers,
+
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
@@ -3996,8 +4714,14 @@ public:
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
- /// This routine is only used by the following two methods. C99 6.5.16.
- AssignConvertType CheckAssignmentConstraints(QualType lhs, QualType rhs);
+ /// C99 6.5.16.
+ AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
+ QualType lhs, QualType rhs);
+
+ /// Check assignment constraints and prepare for a conversion of the
+ /// RHS to the LHS type.
+ AssignConvertType CheckAssignmentConstraints(QualType lhs, Expr *&rhs,
+ CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
@@ -4010,18 +4734,6 @@ public:
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType lhs,
Expr *&rExpr);
- // Helper function for CheckAssignmentConstraints (C99 6.5.16.1p1)
- AssignConvertType CheckPointerTypesForAssignment(QualType lhsType,
- QualType rhsType);
-
- AssignConvertType CheckObjCPointerTypesForAssignment(QualType lhsType,
- QualType rhsType);
-
- // Helper function for CheckAssignmentConstraints involving two
- // block pointer types.
- AssignConvertType CheckBlockPointerTypesForAssignment(QualType lhsType,
- QualType rhsType);
-
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
@@ -4036,10 +4748,11 @@ public:
bool PerformImplicitConversion(Expr *&From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
- bool IgnoreBaseAccess = false);
+ bool CStyle = false);
bool PerformImplicitConversion(Expr *&From, QualType ToType,
const StandardConversionSequence& SCS,
- AssignmentAction Action,bool IgnoreBaseAccess);
+ AssignmentAction Action,
+ bool CStyle);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
@@ -4047,7 +4760,8 @@ public:
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation l, Expr *&lex, Expr *&rex);
QualType CheckPointerToMemberOperands( // C++ 5.5
- Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isIndirect);
+ Expr *&lex, Expr *&rex, ExprValueKind &VK,
+ SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isCompAssign,
bool isDivide);
@@ -4071,36 +4785,30 @@ public:
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *lex, Expr *&rex, SourceLocation OpLoc, QualType convertedType);
- QualType CheckCommaOperands( // C99 6.5.17
- Expr *lex, Expr *&rex, SourceLocation OpLoc);
+
+ void ConvertPropertyForRValue(Expr *&E);
+ void ConvertPropertyForLValue(Expr *&LHS, Expr *&RHS, QualType& LHSTy);
+
QualType CheckConditionalOperands( // C99 6.5.15
- Expr *&cond, Expr *&lhs, Expr *&rhs, SourceLocation questionLoc);
+ Expr *&cond, Expr *&lhs, Expr *&rhs,
+ ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
- Expr *&cond, Expr *&lhs, Expr *&rhs, SourceLocation questionLoc);
+ Expr *&cond, Expr *&lhs, Expr *&rhs,
+ ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = 0);
QualType FindCompositeObjCPointerType(Expr *&LHS, Expr *&RHS,
SourceLocation questionLoc);
+ bool DiagnoseConditionalForNull(Expr *LHS, Expr *RHS,
+ SourceLocation QuestionLoc);
+
/// type checking for vector binary operators.
QualType CheckVectorOperands(SourceLocation l, Expr *&lex, Expr *&rex);
QualType CheckVectorCompareOperands(Expr *&lex, Expr *&rx,
SourceLocation l, bool isRel);
- /// type checking unary operators (subroutines of ActOnUnaryOp).
- /// C99 6.5.3.1, 6.5.3.2, 6.5.3.4
- QualType CheckIncrementDecrementOperand(Expr *op, SourceLocation OpLoc,
- bool isInc, bool isPrefix);
- QualType CheckAddressOfOperand(Expr *op, SourceLocation OpLoc);
- QualType CheckIndirectionOperand(Expr *op, SourceLocation OpLoc);
- QualType CheckRealImagOperand(Expr *&Op, SourceLocation OpLoc, bool isReal);
-
- /// type checking primary expressions.
- QualType CheckExtVectorComponent(QualType baseType, SourceLocation OpLoc,
- const IdentifierInfo *Comp,
- SourceLocation CmpLoc);
-
/// type checking declaration initializers (C99 6.7.8)
bool CheckInitList(const InitializedEntity &Entity,
InitListExpr *&InitList, QualType &DeclType);
@@ -4137,7 +4845,7 @@ public:
/// CheckCastTypes - Check type constraints for casting between types under
/// C semantics, or forward to CXXCheckCStyleCast in C++.
bool CheckCastTypes(SourceRange TyRange, QualType CastTy, Expr *&CastExpr,
- CastKind &Kind, CXXCastPath &BasePath,
+ CastKind &Kind, ExprValueKind &VK, CXXCastPath &BasePath,
bool FunctionalStyle = false);
// CheckVectorCast - check type constraints for vectors.
@@ -4157,9 +4865,9 @@ public:
/// CXXCheckCStyleCast - Check constraints of a C-style or function-style
/// cast under C++ semantics.
- bool CXXCheckCStyleCast(SourceRange R, QualType CastTy, Expr *&CastExpr,
- CastKind &Kind, CXXCastPath &BasePath,
- bool FunctionalStyle);
+ bool CXXCheckCStyleCast(SourceRange R, QualType CastTy, ExprValueKind &VK,
+ Expr *&CastExpr, CastKind &Kind,
+ CXXCastPath &BasePath, bool FunctionalStyle);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
@@ -4168,7 +4876,7 @@ public:
bool CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs, Selector Sel,
ObjCMethodDecl *Method, bool isClassMessage,
SourceLocation lbrac, SourceLocation rbrac,
- QualType &ReturnType);
+ QualType &ReturnType, ExprValueKind &VK);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
@@ -4187,6 +4895,10 @@ public:
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
+ /// \brief Redundant parentheses over an equality comparison can indicate
+ /// that the user intended an assignment used as condition.
+ void DiagnoseEqualityWithExtraParens(ParenExpr *parenE);
+
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
bool CheckCXXBooleanCondition(Expr *&CondExpr);
@@ -4202,8 +4914,6 @@ public:
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
- void InitBuiltinVaListType();
-
/// VerifyIntegerConstantExpression - verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
@@ -4256,14 +4966,20 @@ public:
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
- PCC_Type
+ PCC_Type,
+ /// \brief Code completion occurs in a parenthesized expression, which
+ /// might also be a type cast.
+ PCC_ParenthesizedExpression,
+ /// \brief Code completion occurs within a sequence of declaration
+ /// specifiers within a function, method, or block.
+ PCC_LocalDeclarationSpecifiers
};
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
- void CodeCompleteDeclarator(Scope *S,
- bool AllowNonIdentifiers,
- bool AllowNestedNameSpecifiers);
+ void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
+ bool AllowNonIdentifiers,
+ bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
@@ -4271,6 +4987,7 @@ public:
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
+ void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
@@ -4287,7 +5004,7 @@ public:
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(Decl *Constructor,
- CXXBaseOrMemberInitializer** Initializers,
+ CXXCtorInitializer** Initializers,
unsigned NumInitializers);
void CodeCompleteObjCAtDirective(Scope *S, Decl *ObjCImpDecl,
@@ -4296,31 +5013,25 @@ public:
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
- void CodeCompleteObjCPropertyGetter(Scope *S, Decl *ClassDecl,
- Decl **Methods,
- unsigned NumMethods);
- void CodeCompleteObjCPropertySetter(Scope *S, Decl *ClassDecl,
- Decl **Methods,
- unsigned NumMethods);
- void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS);
+ void CodeCompleteObjCPropertyGetter(Scope *S, Decl *ClassDecl);
+ void CodeCompleteObjCPropertySetter(Scope *S, Decl *ClassDecl);
+ void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
+ bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
IdentifierInfo **SelIdents,
- unsigned NumSelIdents);
- void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
- IdentifierInfo **SelIdents,
- unsigned NumSelIdents);
+ unsigned NumSelIdents,
+ bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
IdentifierInfo **SelIdents,
unsigned NumSelIdents,
- bool IsSuper);
+ bool AtArgumentExpression,
+ bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
IdentifierInfo **SelIdents,
- unsigned NumSelIdents);
- void CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
- IdentifierInfo **SelIdents,
- unsigned NumSelIdents,
- bool IsSuper);
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ ObjCInterfaceDecl *Super = 0);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
@@ -4363,7 +5074,7 @@ public:
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
- void GatherGlobalCodeCompletions(
+ void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
llvm::SmallVectorImpl<CodeCompletionResult> &Results);
//@}
@@ -4376,7 +5087,8 @@ public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
-private:
+private:
+ void CheckArrayAccess(const ArraySubscriptExpr *E);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall);
bool CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall);
@@ -4385,7 +5097,6 @@ private:
ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
@@ -4422,7 +5133,10 @@ private:
void CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc);
void CheckFloatComparison(SourceLocation loc, Expr* lex, Expr* rex);
- void CheckImplicitConversions(Expr *E);
+ void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
+
+ void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
+ Expr *Init);
/// \brief The parser's current scope.
///
@@ -4431,6 +5145,7 @@ private:
protected:
friend class Parser;
+ friend class InitializationSequence;
/// \brief Retrieve the parser's current scope.
Scope *getCurScope() const { return CurScope; }
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h
index a5a1364..ae5aa33 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h
@@ -15,7 +15,7 @@
namespace clang {
namespace diag {
enum {
-#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,CATEGORY) ENUM,
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,ACCESS,CATEGORY) ENUM,
#define SEMASTART
#include "clang/Basic/DiagnosticSemaKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Template.h b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
index a7b3b84..53f4a9d 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Template.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
@@ -13,8 +13,10 @@
#define LLVM_CLANG_SEMA_TEMPLATE_H
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
+#include <utility>
namespace clang {
/// \brief Data structure that captures multiple levels of template argument
@@ -79,12 +81,21 @@ namespace clang {
return !(*this)(Depth, Index).isNull();
}
+ /// \brief Clear out a specific template argument.
+ void setArgument(unsigned Depth, unsigned Index,
+ TemplateArgument Arg) {
+ assert(Depth < TemplateArgumentLists.size());
+ assert(Index < TemplateArgumentLists[getNumLevels() - Depth - 1].second);
+ const_cast<TemplateArgument&>(
+ TemplateArgumentLists[getNumLevels() - Depth - 1].first[Index])
+ = Arg;
+ }
+
/// \brief Add a new outermost level to the multi-level template argument
/// list.
void addOuterTemplateArguments(const TemplateArgumentList *TemplateArgs) {
- TemplateArgumentLists.push_back(
- ArgList(TemplateArgs->getFlatArgumentList(),
- TemplateArgs->flat_size()));
+ TemplateArgumentLists.push_back(ArgList(TemplateArgs->data(),
+ TemplateArgs->size()));
}
/// \brief Add a new outmost level to the multi-level template argument
@@ -140,7 +151,7 @@ namespace clang {
: TemplateArgument(Arg), DeducedFromArrayBound(DeducedFromArrayBound) { }
/// \brief Construct an integral non-type template argument that
- /// has been deduced, possible from an array bound.
+ /// has been deduced, possibly from an array bound.
DeducedTemplateArgument(const llvm::APSInt &Value,
QualType ValueType,
bool DeducedFromArrayBound)
@@ -165,10 +176,19 @@ namespace clang {
/// instantiate a new function declaration, which will have its own
/// set of parameter declarations.
class LocalInstantiationScope {
+ public:
+ /// \brief A set of declarations.
+ typedef llvm::SmallVector<Decl *, 4> DeclArgumentPack;
+
+ private:
/// \brief Reference to the semantic analysis that is performing
/// this template instantiation.
Sema &SemaRef;
+ typedef llvm::DenseMap<const Decl *,
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> >
+ LocalDeclsMap;
+
/// \brief A mapping from local declarations that occur
/// within a template to their instantiations.
///
@@ -183,8 +203,15 @@ namespace clang {
/// when we instantiate add<int>, we will introduce a mapping from
/// the ParmVarDecl for 'x' that occurs in the template to the
/// instantiated ParmVarDecl for 'x'.
- llvm::DenseMap<const Decl *, Decl *> LocalDecls;
+ ///
+ /// For a parameter pack, the local instantiation scope may contain a
+ /// set of instantiated parameters. This is stored as a DeclArgumentPack
+ /// pointer.
+ LocalDeclsMap LocalDecls;
+ /// \brief The set of argument packs we've allocated.
+ llvm::SmallVector<DeclArgumentPack *, 1> ArgumentPacks;
+
/// \brief The outer scope, which contains local variable
/// definitions from some other instantiation (that may not be
/// relevant to this particular scope).
@@ -197,6 +224,19 @@ namespace clang {
/// lookup will search our outer scope.
bool CombineWithOuterScope;
+ /// \brief If non-NULL, the template parameter pack that has been
+ /// partially substituted per C++0x [temp.arg.explicit]p9.
+ NamedDecl *PartiallySubstitutedPack;
+
+ /// \brief If \c PartiallySubstitutedPack is non-null, the set of
+ /// explicitly-specified template arguments in that pack.
+ const TemplateArgument *ArgsInPartiallySubstitutedPack;
+
+ /// \brief If \c PartiallySubstitutedPack, the number of
+ /// explicitly-specified template arguments in
+ /// ArgsInPartiallySubstitutedPack.
+ unsigned NumArgsInPartiallySubstitutedPack;
+
// This class is non-copyable
LocalInstantiationScope(const LocalInstantiationScope &);
LocalInstantiationScope &operator=(const LocalInstantiationScope &);
@@ -204,7 +244,8 @@ namespace clang {
public:
LocalInstantiationScope(Sema &SemaRef, bool CombineWithOuterScope = false)
: SemaRef(SemaRef), Outer(SemaRef.CurrentInstantiationScope),
- Exited(false), CombineWithOuterScope(CombineWithOuterScope)
+ Exited(false), CombineWithOuterScope(CombineWithOuterScope),
+ PartiallySubstitutedPack(0)
{
SemaRef.CurrentInstantiationScope = this;
}
@@ -212,33 +253,171 @@ namespace clang {
~LocalInstantiationScope() {
Exit();
}
+
+ const Sema &getSema() const { return SemaRef; }
/// \brief Exit this local instantiation scope early.
void Exit() {
if (Exited)
return;
+ for (unsigned I = 0, N = ArgumentPacks.size(); I != N; ++I)
+ delete ArgumentPacks[I];
+
SemaRef.CurrentInstantiationScope = Outer;
Exited = true;
}
- Decl *getInstantiationOf(const Decl *D);
+ /// \brief Find the instantiation of the declaration D within the current
+ /// instantiation scope.
+ ///
+ /// \param D The declaration whose instantiation we are searching for.
+ ///
+ /// \returns A pointer to the declaration or argument pack of declarations
+ /// to which the declaration \c D is instantiataed, if found. Otherwise,
+ /// returns NULL.
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *
+ findInstantiationOf(const Decl *D);
+
+ void InstantiatedLocal(const Decl *D, Decl *Inst);
+ void InstantiatedLocalPackArg(const Decl *D, Decl *Inst);
+ void MakeInstantiatedLocalArgPack(const Decl *D);
+
+ /// \brief Note that the given parameter pack has been partially substituted
+ /// via explicit specification of template arguments
+ /// (C++0x [temp.arg.explicit]p9).
+ ///
+ /// \param Pack The parameter pack, which will always be a template
+ /// parameter pack.
+ ///
+ /// \param ExplicitArgs The explicitly-specified template arguments provided
+ /// for this parameter pack.
+ ///
+ /// \param NumExplicitArgs The number of explicitly-specified template
+ /// arguments provided for this parameter pack.
+ void SetPartiallySubstitutedPack(NamedDecl *Pack,
+ const TemplateArgument *ExplicitArgs,
+ unsigned NumExplicitArgs);
+
+ /// \brief Retrieve the partially-substitued template parameter pack.
+ ///
+ /// If there is no partially-substituted parameter pack, returns NULL.
+ NamedDecl *getPartiallySubstitutedPack(
+ const TemplateArgument **ExplicitArgs = 0,
+ unsigned *NumExplicitArgs = 0) const;
+ };
+
+ class TemplateDeclInstantiator
+ : public DeclVisitor<TemplateDeclInstantiator, Decl *>
+ {
+ Sema &SemaRef;
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex;
+ DeclContext *Owner;
+ const MultiLevelTemplateArgumentList &TemplateArgs;
- VarDecl *getInstantiationOf(const VarDecl *Var) {
- return cast<VarDecl>(getInstantiationOf(cast<Decl>(Var)));
+ /// \brief A list of out-of-line class template partial
+ /// specializations that will need to be instantiated after the
+ /// enclosing class's instantiation is complete.
+ llvm::SmallVector<std::pair<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>, 4>
+ OutOfLinePartialSpecs;
+
+ public:
+ TemplateDeclInstantiator(Sema &SemaRef, DeclContext *Owner,
+ const MultiLevelTemplateArgumentList &TemplateArgs)
+ : SemaRef(SemaRef), SubstIndex(SemaRef, -1), Owner(Owner),
+ TemplateArgs(TemplateArgs) { }
+
+ // FIXME: Once we get closer to completion, replace these manually-written
+ // declarations with automatically-generated ones from
+ // clang/AST/DeclNodes.inc.
+ Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ Decl *VisitLabelDecl(LabelDecl *D);
+ Decl *VisitNamespaceDecl(NamespaceDecl *D);
+ Decl *VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ Decl *VisitTypedefDecl(TypedefDecl *D);
+ Decl *VisitVarDecl(VarDecl *D);
+ Decl *VisitAccessSpecDecl(AccessSpecDecl *D);
+ Decl *VisitFieldDecl(FieldDecl *D);
+ Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ Decl *VisitStaticAssertDecl(StaticAssertDecl *D);
+ Decl *VisitEnumDecl(EnumDecl *D);
+ Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
+ Decl *VisitFriendDecl(FriendDecl *D);
+ Decl *VisitFunctionDecl(FunctionDecl *D,
+ TemplateParameterList *TemplateParams = 0);
+ Decl *VisitCXXRecordDecl(CXXRecordDecl *D);
+ Decl *VisitCXXMethodDecl(CXXMethodDecl *D,
+ TemplateParameterList *TemplateParams = 0);
+ Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
+ ParmVarDecl *VisitParmVarDecl(ParmVarDecl *D);
+ Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
+ Decl *VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D);
+ Decl *VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ Decl *VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ Decl *VisitUsingDecl(UsingDecl *D);
+ Decl *VisitUsingShadowDecl(UsingShadowDecl *D);
+ Decl *VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ Decl *VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+
+ // Base case. FIXME: Remove once we can instantiate everything.
+ Decl *VisitDecl(Decl *D) {
+ unsigned DiagID = SemaRef.getDiagnostics().getCustomDiagID(
+ Diagnostic::Error,
+ "cannot instantiate %0 yet");
+ SemaRef.Diag(D->getLocation(), DiagID)
+ << D->getDeclKindName();
+
+ return 0;
}
+
+ typedef
+ llvm::SmallVectorImpl<std::pair<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *> >
+ ::iterator
+ delayed_partial_spec_iterator;
- ParmVarDecl *getInstantiationOf(const ParmVarDecl *Var) {
- return cast<ParmVarDecl>(getInstantiationOf(cast<Decl>(Var)));
+ /// \brief Return an iterator to the beginning of the set of
+ /// "delayed" partial specializations, which must be passed to
+ /// InstantiateClassTemplatePartialSpecialization once the class
+ /// definition has been completed.
+ delayed_partial_spec_iterator delayed_partial_spec_begin() {
+ return OutOfLinePartialSpecs.begin();
}
- NonTypeTemplateParmDecl *getInstantiationOf(
- const NonTypeTemplateParmDecl *Var) {
- return cast<NonTypeTemplateParmDecl>(getInstantiationOf(cast<Decl>(Var)));
+ /// \brief Return an iterator to the end of the set of
+ /// "delayed" partial specializations, which must be passed to
+ /// InstantiateClassTemplatePartialSpecialization once the class
+ /// definition has been completed.
+ delayed_partial_spec_iterator delayed_partial_spec_end() {
+ return OutOfLinePartialSpecs.end();
}
- void InstantiatedLocal(const Decl *D, Decl *Inst);
- };
+ // Helper functions for instantiating methods.
+ TypeSourceInfo *SubstFunctionType(FunctionDecl *D,
+ llvm::SmallVectorImpl<ParmVarDecl *> &Params);
+ bool InitFunctionInstantiation(FunctionDecl *New, FunctionDecl *Tmpl);
+ bool InitMethodInstantiation(CXXMethodDecl *New, CXXMethodDecl *Tmpl);
+
+ TemplateParameterList *
+ SubstTemplateParams(TemplateParameterList *List);
+
+ bool SubstQualifier(const DeclaratorDecl *OldDecl,
+ DeclaratorDecl *NewDecl);
+ bool SubstQualifier(const TagDecl *OldDecl,
+ TagDecl *NewDecl);
+
+ ClassTemplatePartialSpecializationDecl *
+ InstantiateClassTemplatePartialSpecialization(
+ ClassTemplateDecl *ClassTemplate,
+ ClassTemplatePartialSpecializationDecl *PartialSpec);
+ };
}
#endif // LLVM_CLANG_SEMA_TEMPLATE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
index ac32e9c..7cc3571 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
@@ -13,7 +13,9 @@
#ifndef LLVM_CLANG_SEMA_TEMPLATE_DEDUCTION_H
#define LLVM_CLANG_SEMA_TEMPLATE_DEDUCTION_H
+#include "clang/Basic/PartialDiagnostic.h"
#include "clang/AST/DeclTemplate.h"
+#include "llvm/ADT/SmallVector.h"
namespace clang {
@@ -21,7 +23,7 @@ class ASTContext;
class TemplateArgumentList;
namespace sema {
-
+
/// \brief Provides information about an attempted template argument
/// deduction, whose success or failure was described by a
/// TemplateDeductionResult value.
@@ -37,6 +39,10 @@ class TemplateDeductionInfo {
/// deduction is occurring.
SourceLocation Loc;
+ /// \brief Warnings (and follow-on notes) that were suppressed due to
+ /// SFINAE while performing template argument deduction.
+ llvm::SmallVector<PartialDiagnosticAt, 4> SuppressedDiagnostics;
+
// do not implement these
TemplateDeductionInfo(const TemplateDeductionInfo&);
TemplateDeductionInfo &operator=(const TemplateDeductionInfo&);
@@ -69,6 +75,23 @@ public:
Deduced = NewDeduced;
}
+ /// \brief Add a new diagnostic to the set of diagnostics
+ void addSuppressedDiagnostic(SourceLocation Loc, const PartialDiagnostic &PD) {
+ SuppressedDiagnostics.push_back(std::make_pair(Loc, PD));
+ }
+
+ /// \brief Iterator over the set of suppressed diagnostics.
+ typedef llvm::SmallVectorImpl<PartialDiagnosticAt>::const_iterator
+ diag_iterator;
+
+ /// \brief Returns an iterator at the beginning of the sequence of suppressed
+ /// diagnostics.
+ diag_iterator diag_begin() const { return SuppressedDiagnostics.begin(); }
+
+ /// \brief Returns an iterator at the end of the sequence of suppressed
+ /// diagnostics.
+ diag_iterator diag_end() const { return SuppressedDiagnostics.end(); }
+
/// \brief The template parameter to which a template argument
/// deduction failure refers.
///
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
index 0fa446d..68fd91d 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
@@ -19,7 +19,7 @@
#include "clang/AST/Type.h"
#include "llvm/Bitcode/BitCodes.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/DenseMap.h"
namespace clang {
@@ -54,6 +54,9 @@ namespace clang {
/// reserved for the translation unit declaration.
typedef uint32_t DeclID;
+ /// \brief a Decl::Kind/DeclID pair.
+ typedef std::pair<uint32_t, DeclID> KindDeclIDPair;
+
/// \brief An ID number that refers to a type in an AST file.
///
/// The ID of a type is partitioned into two parts: the lower
@@ -112,12 +115,19 @@ namespace clang {
typedef llvm::DenseMap<QualType, TypeIdx, UnsafeQualTypeDenseMapInfo>
TypeIdxMap;
- /// \brief An ID number that refers to an identifier in an AST
- /// file.
+ /// \brief An ID number that refers to an identifier in an AST file.
typedef uint32_t IdentID;
+ /// \brief An ID number that refers to a macro in an AST file.
+ typedef uint32_t MacroID;
+
+ /// \brief An ID number that refers to an ObjC selctor in an AST file.
typedef uint32_t SelectorID;
+ /// \brief An ID number that refers to a set of CXXBaseSpecifiers in an
+ /// AST file.
+ typedef uint32_t CXXBaseSpecifiersID;
+
/// \brief Describes the various kinds of blocks that occur within
/// an AST file.
enum BlockIDs {
@@ -135,7 +145,13 @@ namespace clang {
/// \brief The block containing the definitions of all of the
/// types and decls used within the AST file.
- DECLTYPES_BLOCK_ID
+ DECLTYPES_BLOCK_ID,
+
+ /// \brief The block containing DECL_UPDATES records.
+ DECL_UPDATES_BLOCK_ID,
+
+ /// \brief The block containing the detailed preprocessing record.
+ PREPROCESSOR_DETAIL_BLOCK_ID
};
/// \brief Record types that occur within the AST block itself.
@@ -318,9 +334,35 @@ namespace clang {
/// In practice, this should only be used for the TU and namespaces.
UPDATE_VISIBLE = 34,
- /// \brief Record code for template specializations introduced after
- /// serializations of the original template decl.
- ADDITIONAL_TEMPLATE_SPECIALIZATIONS = 35
+ /// \brief Record for offsets of DECL_UPDATES records for declarations
+ /// that were modified after being deserialized and need updates.
+ DECL_UPDATE_OFFSETS = 35,
+
+ /// \brief Record of updates for a declaration that was modified after
+ /// being deserialized.
+ DECL_UPDATES = 36,
+
+ /// \brief Record code for the table of offsets to CXXBaseSpecifier
+ /// sets.
+ CXX_BASE_SPECIFIER_OFFSETS = 37,
+
+ /// \brief Record code for #pragma diagnostic mappings.
+ DIAG_PRAGMA_MAPPINGS = 38,
+
+ /// \brief Record code for special CUDA declarations.
+ CUDA_SPECIAL_DECL_REFS = 39,
+
+ /// \brief Record code for header search information.
+ HEADER_SEARCH_TABLE = 40,
+
+ /// \brief The directory that the PCH was originally created in.
+ ORIGINAL_PCH_DIR = 41,
+
+ /// \brief Record code for floating point #pragma options.
+ FP_PRAGMA_OPTIONS = 42,
+
+ /// \brief Record code for enabled OpenCL extensions.
+ OPENCL_EXTENSIONS = 43
};
/// \brief Record types used within a source manager block.
@@ -359,16 +401,23 @@ namespace clang {
/// \brief Describes one token.
/// [PP_TOKEN, SLoc, Length, IdentInfoID, Kind, Flags]
- PP_TOKEN = 3,
+ PP_TOKEN = 3
+ };
+ /// \brief Record types used within a preprocessor detail block.
+ enum PreprocessorDetailRecordTypes {
/// \brief Describes a macro instantiation within the preprocessing
/// record.
- PP_MACRO_INSTANTIATION = 4,
+ PPD_MACRO_INSTANTIATION = 0,
/// \brief Describes a macro definition within the preprocessing record.
- PP_MACRO_DEFINITION = 5
+ PPD_MACRO_DEFINITION = 1,
+
+ /// \brief Describes an inclusion directive within the preprocessing
+ /// record.
+ PPD_INCLUSION_DIRECTIVE = 2
};
-
+
/// \defgroup ASTAST AST file AST constants
///
/// The constants in this group describe various components of the
@@ -521,7 +570,17 @@ namespace clang {
/// \brief A DependentTemplateSpecializationType record.
TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION = 32,
/// \brief A DependentSizedArrayType record.
- TYPE_DEPENDENT_SIZED_ARRAY = 33
+ TYPE_DEPENDENT_SIZED_ARRAY = 33,
+ /// \brief A ParenType record.
+ TYPE_PAREN = 34,
+ /// \brief A PackExpansionType record.
+ TYPE_PACK_EXPANSION = 35,
+ /// \brief An AttributedType record.
+ TYPE_ATTRIBUTED = 36,
+ /// \brief A SubstTemplateTypeParmPackType record.
+ TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK = 37,
+ /// \brief A AutoType record.
+ TYPE_AUTO = 38
};
/// \brief The type IDs for special types constructed by semantic
@@ -573,10 +632,8 @@ namespace clang {
/// constant describes a record for a specific declaration class
/// in the AST.
enum DeclCode {
- /// \brief Attributes attached to a declaration.
- DECL_ATTR = 50,
/// \brief A TranslationUnitDecl record.
- DECL_TRANSLATION_UNIT,
+ DECL_TRANSLATION_UNIT = 50,
/// \brief A TypedefDecl record.
DECL_TYPEDEF,
/// \brief An EnumDecl record.
@@ -642,7 +699,9 @@ namespace clang {
/// IDs. This data is used when performing qualified name lookup
/// into a DeclContext via DeclContext::lookup.
DECL_CONTEXT_VISIBLE,
- /// \brief A NamespaceDecl rcord.
+ /// \brief A LabelDecl record.
+ DECL_LABEL,
+ /// \brief A NamespaceDecl record.
DECL_NAMESPACE,
/// \brief A NamespaceAliasDecl record.
DECL_NAMESPACE_ALIAS,
@@ -690,7 +749,14 @@ namespace clang {
/// \brief A TemplateTemplateParmDecl record.
DECL_TEMPLATE_TEMPLATE_PARM,
/// \brief A StaticAssertDecl record.
- DECL_STATIC_ASSERT
+ DECL_STATIC_ASSERT,
+ /// \brief A record containing CXXBaseSpecifiers.
+ DECL_CXX_BASE_SPECIFIERS,
+ /// \brief A IndirectFieldDecl record.
+ DECL_INDIRECTFIELD,
+ /// \brief A NonTypeTemplateParmDecl record that stores an expanded
+ /// non-type template parameter pack.
+ DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK
};
/// \brief Record codes for each kind of statement or expression.
@@ -796,8 +862,6 @@ namespace clang {
EXPR_ADDR_LABEL,
/// \brief A StmtExpr record.
EXPR_STMT,
- /// \brief A TypesCompatibleExpr record.
- EXPR_TYPES_COMPATIBLE,
/// \brief A ChooseExpr record.
EXPR_CHOOSE,
/// \brief A GNUNullExpr record.
@@ -823,12 +887,10 @@ namespace clang {
EXPR_OBJC_IVAR_REF_EXPR,
/// \brief An ObjCPropertyRefExpr record.
EXPR_OBJC_PROPERTY_REF_EXPR,
- /// \brief An ObjCImplicitSetterGetterRefExpr record.
+ /// \brief UNUSED
EXPR_OBJC_KVC_REF_EXPR,
/// \brief An ObjCMessageExpr record.
EXPR_OBJC_MESSAGE_EXPR,
- /// \brief An ObjCSuperExpr record.
- EXPR_OBJC_SUPER_EXPR,
/// \brief An ObjCIsa Expr record.
EXPR_OBJC_ISA,
@@ -875,6 +937,8 @@ namespace clang {
EXPR_CXX_NULL_PTR_LITERAL, // CXXNullPtrLiteralExpr
EXPR_CXX_TYPEID_EXPR, // CXXTypeidExpr (of expr).
EXPR_CXX_TYPEID_TYPE, // CXXTypeidExpr (of type).
+ EXPR_CXX_UUIDOF_EXPR, // CXXUuidofExpr (of expr).
+ EXPR_CXX_UUIDOF_TYPE, // CXXUuidofExpr (of type).
EXPR_CXX_THIS, // CXXThisExpr
EXPR_CXX_THROW, // CXXThrowExpr
EXPR_CXX_DEFAULT_ARG, // CXXDefaultArgExpr
@@ -885,15 +949,28 @@ namespace clang {
EXPR_CXX_DELETE, // CXXDeleteExpr
EXPR_CXX_PSEUDO_DESTRUCTOR, // CXXPseudoDestructorExpr
- EXPR_CXX_EXPR_WITH_TEMPORARIES, // CXXExprWithTemporaries
+ EXPR_EXPR_WITH_CLEANUPS, // ExprWithCleanups
- EXPR_CXX_DEPENDENT_SCOPE_MEMBER, // CXXDependentScopeMemberExpr
- EXPR_CXX_DEPENDENT_SCOPE_DECL_REF, // DependentScopeDeclRefExpr
- EXPR_CXX_UNRESOLVED_CONSTRUCT, // CXXUnresolvedConstructExpr
- EXPR_CXX_UNRESOLVED_MEMBER, // UnresolvedMemberExpr
- EXPR_CXX_UNRESOLVED_LOOKUP, // UnresolvedLookupExpr
+ EXPR_CXX_DEPENDENT_SCOPE_MEMBER, // CXXDependentScopeMemberExpr
+ EXPR_CXX_DEPENDENT_SCOPE_DECL_REF, // DependentScopeDeclRefExpr
+ EXPR_CXX_UNRESOLVED_CONSTRUCT, // CXXUnresolvedConstructExpr
+ EXPR_CXX_UNRESOLVED_MEMBER, // UnresolvedMemberExpr
+ EXPR_CXX_UNRESOLVED_LOOKUP, // UnresolvedLookupExpr
+
+ EXPR_CXX_UNARY_TYPE_TRAIT, // UnaryTypeTraitExpr
+ EXPR_CXX_NOEXCEPT, // CXXNoexceptExpr
+
+ EXPR_OPAQUE_VALUE, // OpaqueValueExpr
+ EXPR_BINARY_CONDITIONAL_OPERATOR, // BinaryConditionalOperator
+ EXPR_BINARY_TYPE_TRAIT, // BinaryTypeTraitExpr
+
+ EXPR_PACK_EXPANSION, // PackExpansionExpr
+ EXPR_SIZEOF_PACK, // SizeOfPackExpr
+ EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK,// SubstNonTypeTemplateParmPackExpr
+
+ // CUDA
- EXPR_CXX_UNARY_TYPE_TRAIT // UnaryTypeTraitExpr
+ EXPR_CUDA_KERNEL_CALL // CUDAKernelCallExpr
};
/// \brief The kinds of designators that can occur in a
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
index f8114de..f8cdebe 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
@@ -22,26 +22,31 @@ namespace clang {
class Decl;
class ASTReader;
class QualType;
-
+class MacroDefinition;
+
class ASTDeserializationListener {
protected:
- virtual ~ASTDeserializationListener() {}
+ virtual ~ASTDeserializationListener();
public:
- /// \brief Tell the listener about the reader.
- virtual void SetReader(ASTReader *Reader) = 0;
+
+ /// \brief The ASTReader was initialized.
+ virtual void ReaderInitialized(ASTReader *Reader) { }
/// \brief An identifier was deserialized from the AST file.
virtual void IdentifierRead(serialization::IdentID ID,
- IdentifierInfo *II) = 0;
+ IdentifierInfo *II) { }
/// \brief A type was deserialized from the AST file. The ID here has the
/// qualifier bits already removed, and T is guaranteed to be locally
/// unqualified.
- virtual void TypeRead(serialization::TypeIdx Idx, QualType T) = 0;
+ virtual void TypeRead(serialization::TypeIdx Idx, QualType T) { }
/// \brief A decl was deserialized from the AST file.
- virtual void DeclRead(serialization::DeclID ID, const Decl *D) = 0;
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D) { }
/// \brief A selector was read from the AST file.
- virtual void SelectorRead(serialization::SelectorID iD, Selector Sel) = 0;
+ virtual void SelectorRead(serialization::SelectorID iD, Selector Sel) { }
+ /// \brief A macro definition was read from the AST file.
+ virtual void MacroDefinitionRead(serialization::MacroID,
+ MacroDefinition *MD) { }
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
index d31be88..9799b8d 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
@@ -20,6 +20,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/TemplateBase.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
@@ -31,7 +32,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitcode/BitstreamReader.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <deque>
#include <map>
#include <string>
@@ -47,22 +48,26 @@ namespace clang {
class AddrLabelExpr;
class ASTConsumer;
class ASTContext;
+class ASTIdentifierIterator;
class Attr;
class Decl;
class DeclContext;
class NestedNameSpecifier;
class CXXBaseSpecifier;
-class CXXBaseOrMemberInitializer;
+class CXXCtorInitializer;
class GotoStmt;
-class LabelStmt;
class MacroDefinition;
class NamedDecl;
-class ASTDeserializationListener;
+class OpaqueValueExpr;
class Preprocessor;
class Sema;
class SwitchCase;
+class ASTDeserializationListener;
class ASTReader;
class ASTDeclReader;
+class ASTStmtReader;
+class ASTIdentifierLookupTrait;
+class TypeLocReader;
struct HeaderFileInfo;
struct PCHPredefinesBlock {
@@ -161,14 +166,27 @@ private:
class ASTReader
: public ExternalPreprocessorSource,
public ExternalPreprocessingRecordSource,
+ public ExternalHeaderFileInfoSource,
public ExternalSemaSource,
public IdentifierInfoLookup,
public ExternalIdentifierLookup,
- public ExternalSLocEntrySource {
+ public ExternalSLocEntrySource
+{
public:
enum ASTReadResult { Success, Failure, IgnorePCH };
+ /// \brief Types of AST files.
+ enum ASTFileType {
+ Module, ///< File is a module proper.
+ PCH, ///< File is a PCH file treated as such.
+ Preamble, ///< File is a PCH file treated as the preamble.
+ MainFile ///< File is a PCH file treated as the actual main file.
+ };
friend class PCHValidator;
friend class ASTDeclReader;
+ friend class ASTStmtReader;
+ friend class ASTIdentifierIterator;
+ friend class ASTIdentifierLookupTrait;
+ friend class TypeLocReader;
private:
/// \brief The receiver of some callbacks invoked by ASTReader.
llvm::OwningPtr<ASTReaderListener> Listener;
@@ -193,31 +211,15 @@ private:
/// \brief The AST consumer.
ASTConsumer *Consumer;
- /// \brief Information that is needed for every file in the chain.
+ /// \brief Information that is needed for every module.
struct PerFileData {
- PerFileData();
+ PerFileData(ASTFileType Ty);
~PerFileData();
- /// \brief The AST stat cache installed for this file, if any.
- ///
- /// The dynamic type of this stat cache is always ASTStatCache
- void *StatCache;
-
- /// \brief The bitstream reader from which we'll read the AST file.
- llvm::BitstreamReader StreamFile;
- llvm::BitstreamCursor Stream;
-
- /// \brief The size of this file, in bits.
- uint64_t SizeInBits;
+ // === General information ===
- /// \brief The cursor to the start of the preprocessor block, which stores
- /// all of the macro definitions.
- llvm::BitstreamCursor MacroCursor;
-
- /// DeclsCursor - This is a cursor to the start of the DECLS_BLOCK block. It
- /// has read all the abbreviations at the start of the block and is ready to
- /// jump around with these in context.
- llvm::BitstreamCursor DeclsCursor;
+ /// \brief The type of this AST file.
+ ASTFileType Type;
/// \brief The file name of the AST file.
std::string FileName;
@@ -226,6 +228,17 @@ private:
/// this AST file.
llvm::OwningPtr<llvm::MemoryBuffer> Buffer;
+ /// \brief The size of this file, in bits.
+ uint64_t SizeInBits;
+
+ /// \brief The bitstream reader from which we'll read the AST file.
+ llvm::BitstreamReader StreamFile;
+
+ /// \brief The main bitstream cursor for the main block.
+ llvm::BitstreamCursor Stream;
+
+ // === Source Locations ===
+
/// \brief Cursor used to read source location entries.
llvm::BitstreamCursor SLocEntryCursor;
@@ -236,19 +249,10 @@ private:
/// AST file.
const uint32_t *SLocOffsets;
- /// \brief The number of types in this AST file.
- unsigned LocalNumTypes;
-
- /// \brief Offset of each type within the bitstream, indexed by the
- /// type ID, or the representation of a Type*.
- const uint32_t *TypeOffsets;
-
- /// \brief The number of declarations in this AST file.
- unsigned LocalNumDecls;
+ /// \brief The entire size of this module's source location offset range.
+ unsigned LocalSLocSize;
- /// \brief Offset of each declaration within the bitstream, indexed
- /// by the declaration ID (-1).
- const uint32_t *DeclOffsets;
+ // === Identifiers ===
/// \brief The number of identifiers in this AST file.
unsigned LocalNumIdentifiers;
@@ -260,26 +264,72 @@ private:
/// stored.
const uint32_t *IdentifierOffsets;
- /// \brief Actual data for the on-disk hash table.
+ /// \brief Actual data for the on-disk hash table of identifiers.
///
- // This pointer points into a memory buffer, where the on-disk hash
- // table for identifiers actually lives.
+ /// This pointer points into a memory buffer, where the on-disk hash
+ /// table for identifiers actually lives.
const char *IdentifierTableData;
/// \brief A pointer to an on-disk hash table of opaque type
/// IdentifierHashTable.
void *IdentifierLookupTable;
+ // === Macros ===
+
+ /// \brief The cursor to the start of the preprocessor block, which stores
+ /// all of the macro definitions.
+ llvm::BitstreamCursor MacroCursor;
+
+ /// \brief The offset of the start of the set of defined macros.
+ uint64_t MacroStartOffset;
+
+ // === Detailed PreprocessingRecord ===
+
+ /// \brief The cursor to the start of the (optional) detailed preprocessing
+ /// record block.
+ llvm::BitstreamCursor PreprocessorDetailCursor;
+
+ /// \brief The offset of the start of the preprocessor detail cursor.
+ uint64_t PreprocessorDetailStartOffset;
+
/// \brief The number of macro definitions in this file.
unsigned LocalNumMacroDefinitions;
-
+
/// \brief Offsets of all of the macro definitions in the preprocessing
/// record in the AST file.
const uint32_t *MacroDefinitionOffsets;
-
- /// \brief The number of preallocated preprocessing entities in the
- /// preprocessing record.
- unsigned NumPreallocatedPreprocessingEntities;
+
+ // === Header search information ===
+
+ /// \brief The number of local HeaderFileInfo structures.
+ unsigned LocalNumHeaderFileInfos;
+
+ /// \brief Actual data for the on-disk hash table of header file
+ /// information.
+ ///
+ /// This pointer points into a memory buffer, where the on-disk hash
+ /// table for header file information actually lives.
+ const char *HeaderFileInfoTableData;
+
+ /// \brief The on-disk hash table that contains information about each of
+ /// the header files.
+ void *HeaderFileInfoTable;
+
+ // === Selectors ===
+
+ /// \brief The number of selectors new to this file.
+ ///
+ /// This is the number of entries in SelectorOffsets.
+ unsigned LocalNumSelectors;
+
+ /// \brief Offsets into the selector lookup table's data array
+ /// where each selector resides.
+ const uint32_t *SelectorOffsets;
+
+ /// \brief A pointer to the character data that comprises the selector table
+ ///
+ /// The SelectorOffsets table refers into this memory.
+ const unsigned char *SelectorLookupTableData;
/// \brief A pointer to an on-disk hash table of opaque type
/// ASTSelectorLookupTable.
@@ -288,26 +338,81 @@ private:
/// instance and factory methods.
void *SelectorLookupTable;
- /// \brief A pointer to the character data that comprises the selector table
+ /// \brief Method selectors used in a @selector expression. Used for
+ /// implementation of -Wselector.
+ llvm::SmallVector<uint64_t, 64> ReferencedSelectorsData;
+
+ // === Declarations ===
+
+ /// DeclsCursor - This is a cursor to the start of the DECLS_BLOCK block. It
+ /// has read all the abbreviations at the start of the block and is ready to
+ /// jump around with these in context.
+ llvm::BitstreamCursor DeclsCursor;
+
+ /// \brief The number of declarations in this AST file.
+ unsigned LocalNumDecls;
+
+ /// \brief Offset of each declaration within the bitstream, indexed
+ /// by the declaration ID (-1).
+ const uint32_t *DeclOffsets;
+
+ /// \brief A snapshot of the pending instantiations in the chain.
///
- /// The SelectorOffsets table refers into this memory.
- const unsigned char *SelectorLookupTableData;
+ /// This record tracks the instantiations that Sema has to perform at the
+ /// end of the TU. It consists of a pair of values for every pending
+ /// instantiation where the first value is the ID of the decl and the second
+ /// is the instantiation location.
+ llvm::SmallVector<uint64_t, 64> PendingInstantiations;
+
+ /// \brief The number of C++ base specifier sets in this AST file.
+ unsigned LocalNumCXXBaseSpecifiers;
+
+ /// \brief Offset of each C++ base specifier set within the bitstream,
+ /// indexed by the C++ base specifier set ID (-1).
+ const uint32_t *CXXBaseSpecifiersOffsets;
+
+ // === Types ===
- /// \brief Offsets into the method pool lookup table's data array
- /// where each selector resides.
- const uint32_t *SelectorOffsets;
+ /// \brief The number of types in this AST file.
+ unsigned LocalNumTypes;
- /// \brief The number of selectors new to this file.
+ /// \brief Offset of each type within the bitstream, indexed by the
+ /// type ID, or the representation of a Type*.
+ const uint32_t *TypeOffsets;
+
+ // === Miscellaneous ===
+
+ /// \brief The AST stat cache installed for this file, if any.
///
- /// This is the number of entries in SelectorOffsets.
- unsigned LocalNumSelectors;
+ /// The dynamic type of this stat cache is always ASTStatCache
+ void *StatCache;
+
+ /// \brief The number of preallocated preprocessing entities in the
+ /// preprocessing record.
+ unsigned NumPreallocatedPreprocessingEntities;
+
+ /// \brief The next module in source order.
+ PerFileData *NextInSource;
+
+ /// \brief All the modules that loaded this one. Can contain NULL for
+ /// directly loaded modules.
+ llvm::SmallVector<PerFileData *, 1> Loaders;
};
+ /// \brief All loaded modules, indexed by name.
+ llvm::StringMap<PerFileData*> Modules;
+
+ /// \brief The first module in source order.
+ PerFileData *FirstInSource;
+
/// \brief The chain of AST files. The first entry is the one named by the
/// user, the last one is the one that doesn't depend on anything further.
/// That is, the entry I was created with -include-pch I+1.
llvm::SmallVector<PerFileData*, 2> Chain;
+ /// \brief SLocEntries that we're going to preload.
+ llvm::SmallVector<uint64_t, 64> PreloadSLocEntries;
+
/// \brief Types that have already been loaded from the chain.
///
/// When the pointer at index I is non-NULL, the type with
@@ -331,6 +436,14 @@ private:
/// = I + 1 has already been loaded.
std::vector<Decl *> DeclsLoaded;
+ typedef std::pair<PerFileData *, uint64_t> FileOffset;
+ typedef llvm::SmallVector<FileOffset, 2> FileOffsetsTy;
+ typedef llvm::DenseMap<serialization::DeclID, FileOffsetsTy>
+ DeclUpdateOffsetsMap;
+ /// \brief Declarations that have modifications residing in a later file
+ /// in the chain.
+ DeclUpdateOffsetsMap DeclUpdateOffsets;
+
typedef llvm::DenseMap<serialization::DeclID,
std::pair<PerFileData *, uint64_t> >
DeclReplacementMap;
@@ -340,7 +453,7 @@ private:
/// \brief Information about the contents of a DeclContext.
struct DeclContextInfo {
void *NameLookupTableData; // a ASTDeclContextNameLookupTable.
- const serialization::DeclID *LexicalDecls;
+ const serialization::KindDeclIDPair *LexicalDecls;
unsigned NumLexicalDecls;
};
// In a full chain, there could be multiple updates to every decl context,
@@ -366,22 +479,20 @@ private:
/// haven't been loaded yet.
DeclContextVisibleUpdatesPending PendingVisibleUpdates;
+ typedef llvm::SmallVector<CXXRecordDecl *, 4> ForwardRefs;
+ typedef llvm::DenseMap<const CXXRecordDecl *, ForwardRefs>
+ PendingForwardRefsMap;
+ /// \brief Forward references that have a definition but the definition decl
+ /// is still initializing. When the definition gets read it will update
+ /// the DefinitionData pointer of all pending references.
+ PendingForwardRefsMap PendingForwardRefs;
+
typedef llvm::DenseMap<serialization::DeclID, serialization::DeclID>
FirstLatestDeclIDMap;
/// \brief Map of first declarations from a chained PCH that point to the
/// most recent declarations in another AST file.
FirstLatestDeclIDMap FirstLatestDeclIDs;
- typedef llvm::SmallVector<serialization::DeclID, 4>
- AdditionalTemplateSpecializations;
- typedef llvm::DenseMap<serialization::DeclID,
- AdditionalTemplateSpecializations>
- AdditionalTemplateSpecializationsMap;
-
- /// \brief Additional specializations (including partial) of templates that
- /// were introduced after the template was serialized.
- AdditionalTemplateSpecializationsMap AdditionalTemplateSpecializationsPending;
-
/// \brief Read the records that describe the contents of declcontexts.
bool ReadDeclContextStorage(llvm::BitstreamCursor &Cursor,
const std::pair<uint64_t, uint64_t> &Offsets,
@@ -405,6 +516,11 @@ private:
/// \brief The macro definitions we have already loaded.
llvm::SmallVector<MacroDefinition *, 16> MacroDefinitionsLoaded;
+ /// \brief Mapping from identifiers that represent macros whose definitions
+ /// have not yet been deserialized to the global offset where the macro
+ /// record resides.
+ llvm::DenseMap<IdentifierInfo *, uint64_t> UnreadMacroRecordOffsets;
+
/// \name CodeGen-relevant special data
/// \brief Fields containing data that is relevant to CodeGen.
//@{
@@ -437,10 +553,6 @@ private:
/// \brief Fields containing data that is used for generating diagnostics
//@{
- /// \brief Method selectors used in a @selector expression. Used for
- /// implementation of -Wselector.
- llvm::SmallVector<uint64_t, 64> ReferencedSelectorsData;
-
/// \brief A snapshot of Sema's unused file-scoped variable tracking, for
/// generating warnings.
llvm::SmallVector<uint64_t, 16> UnusedFileScopedDecls;
@@ -466,14 +578,6 @@ private:
/// local external declarations.
llvm::SmallVector<uint64_t, 16> LocallyScopedExternalDecls;
- /// \brief A snapshot of the pwnsinf instantiations in the chain.
- ///
- /// This record tracks the instantiations that Sema has to perform at the end
- /// of the TU. It consists of a pair of values for every pending instantiation
- /// where the first value is the ID of the decl and the second is the
- /// instantiation location.
- llvm::SmallVector<uint64_t, 64> PendingInstantiations;
-
/// \brief The IDs of all dynamic class declarations in the chain.
///
/// Sema tracks these because it checks for the key functions being defined
@@ -490,8 +594,23 @@ private:
/// The AST context tracks a few important types, such as va_list, directly.
llvm::SmallVector<uint64_t, 16> SpecialTypes;
+ /// \brief The IDs of CUDA-specific declarations ASTContext stores directly.
+ ///
+ /// The AST context tracks a few important decls, currently cudaConfigureCall,
+ /// directly.
+ llvm::SmallVector<uint64_t, 2> CUDASpecialDeclRefs;
+
+ /// \brief The floating point pragma option settings.
+ llvm::SmallVector<uint64_t, 1> FPPragmaOptions;
+
+ /// \brief The OpenCL extension settings.
+ llvm::SmallVector<uint64_t, 1> OpenCLExtensions;
+
//@}
+ /// \brief Diagnostic IDs and their mappings that the user changed.
+ llvm::SmallVector<uint64_t, 8> PragmaDiagMappings;
+
/// \brief The original file name that was used to build the primary AST file,
/// which may have been modified for relocatable-pch support.
std::string OriginalFileName;
@@ -500,6 +619,13 @@ private:
/// AST file.
std::string ActualOriginalFileName;
+ /// \brief The directory that the PCH was originally created in. Used to
+ /// allow resolving headers even after headers+PCH was moved to a new path.
+ std::string OriginalDir;
+
+ /// \brief The directory that the PCH we are reading is stored in.
+ std::string CurrentDir;
+
/// \brief Whether this precompiled header is a relocatable PCH file.
bool RelocatablePCH;
@@ -511,27 +637,17 @@ private:
/// headers when they are loaded.
bool DisableValidation;
+ /// \brief Whether to disable the use of stat caches in AST files.
+ bool DisableStatCache;
+
/// \brief Mapping from switch-case IDs in the chain to switch-case statements
///
/// Statements usually don't have IDs, but switch cases need them, so that the
/// switch statement can refer to them.
std::map<unsigned, SwitchCase *> SwitchCaseStmts;
- /// \brief Mapping from label statement IDs in the chain to label statements.
- ///
- /// Statements usually don't have IDs, but labeled statements need them, so
- /// that goto statements and address-of-label expressions can refer to them.
- std::map<unsigned, LabelStmt *> LabelStmts;
-
- /// \brief Mapping from label IDs to the set of "goto" statements
- /// that point to that label before the label itself has been
- /// de-serialized.
- std::multimap<unsigned, GotoStmt *> UnresolvedGotoStmts;
-
- /// \brief Mapping from label IDs to the set of address label
- /// expressions that point to that label before the label itself has
- /// been de-serialized.
- std::multimap<unsigned, AddrLabelExpr *> UnresolvedAddrLabelExprs;
+ /// \brief Mapping from opaque value IDs to OpaqueValueExprs.
+ std::map<unsigned, OpaqueValueExpr*> OpaqueValueExprs;
/// \brief The number of stat() calls that hit/missed the stat
/// cache.
@@ -544,6 +660,9 @@ private:
/// \brief The number of source location entries in the chain.
unsigned TotalNumSLocEntries;
+ /// \brief The next offset for a SLocEntry after everything in this reader.
+ unsigned NextSLocOffset;
+
/// \brief The number of statements (and expressions) de-serialized
/// from the chain.
unsigned NumStatementsRead;
@@ -602,6 +721,13 @@ private:
/// Objective-C protocols.
std::deque<Decl *> InterestingDecls;
+ /// \brief We delay loading of the previous declaration chain to avoid
+ /// deeply nested calls when there are many redeclarations.
+ std::deque<std::pair<Decl *, serialization::DeclID> > PendingPreviousDecls;
+
+ /// \brief Ready to load the previous declaration of the given Decl.
+ void loadAndAttachPreviousDecl(Decl *D, serialization::DeclID ID);
+
/// \brief When reading a Stmt tree, Stmt operands are placed in this stack.
llvm::SmallVector<Stmt *, 16> StmtStack;
@@ -645,20 +771,26 @@ private:
std::string SuggestedPredefines;
/// \brief Reads a statement from the specified cursor.
- Stmt *ReadStmtFromStream(llvm::BitstreamCursor &Cursor);
+ Stmt *ReadStmtFromStream(PerFileData &F);
void MaybeAddSystemRootToFilename(std::string &Filename);
- ASTReadResult ReadASTCore(llvm::StringRef FileName);
+ ASTReadResult ReadASTCore(llvm::StringRef FileName, ASTFileType Type);
ASTReadResult ReadASTBlock(PerFileData &F);
bool CheckPredefinesBuffers();
- bool ParseLineTable(llvm::SmallVectorImpl<uint64_t> &Record);
+ bool ParseLineTable(PerFileData &F, llvm::SmallVectorImpl<uint64_t> &Record);
ASTReadResult ReadSourceManagerBlock(PerFileData &F);
ASTReadResult ReadSLocEntryRecord(unsigned ID);
- llvm::BitstreamCursor &SLocCursorForID(unsigned ID);
+ PerFileData *SLocCursorForID(unsigned ID);
+ SourceLocation getImportLocation(PerFileData *F);
bool ParseLanguageOptions(const llvm::SmallVectorImpl<uint64_t> &Record);
- typedef std::pair<llvm::BitstreamCursor *, uint64_t> RecordLocation;
+ struct RecordLocation {
+ RecordLocation(PerFileData *M, uint64_t O)
+ : F(M), Offset(O) {}
+ PerFileData *F;
+ uint64_t Offset;
+ };
QualType ReadTypeRecord(unsigned Index);
RecordLocation TypeCursorForIndex(unsigned Index);
@@ -695,8 +827,14 @@ public:
/// \param DisableValidation If true, the AST reader will suppress most
/// of its regular consistency checking, allowing the use of precompiled
/// headers that cannot be determined to be compatible.
+ ///
+ /// \param DisableStatCache If true, the AST reader will ignore the
+ /// stat cache in the AST files. This performance pessimization can
+ /// help when an AST file is being used in cases where the
+ /// underlying files in the file system may have changed, but
+ /// parsing should still continue.
ASTReader(Preprocessor &PP, ASTContext *Context, const char *isysroot = 0,
- bool DisableValidation = false);
+ bool DisableValidation = false, bool DisableStatCache = false);
/// \brief Load the AST file without using any pre-initialized Preprocessor.
///
@@ -717,14 +855,20 @@ public:
/// \param DisableValidation If true, the AST reader will suppress most
/// of its regular consistency checking, allowing the use of precompiled
/// headers that cannot be determined to be compatible.
- ASTReader(SourceManager &SourceMgr, FileManager &FileMgr,
+ ///
+ /// \param DisableStatCache If true, the AST reader will ignore the
+ /// stat cache in the AST files. This performance pessimization can
+ /// help when an AST file is being used in cases where the
+ /// underlying files in the file system may have changed, but
+ /// parsing should still continue.
+ ASTReader(SourceManager &SourceMgr, FileManager &FileMgr,
Diagnostic &Diags, const char *isysroot = 0,
- bool DisableValidation = false);
+ bool DisableValidation = false, bool DisableStatCache = false);
~ASTReader();
/// \brief Load the precompiled header designated by the given file
/// name.
- ASTReadResult ReadAST(const std::string &FileName);
+ ASTReadResult ReadAST(const std::string &FileName, ASTFileType Type);
/// \brief Set the AST callbacks listener.
void setListener(ASTReaderListener *listener) {
@@ -749,6 +893,7 @@ public:
/// \brief Retrieve the name of the original source file name directly from
/// the AST file, without actually loading the AST file.
static std::string getOriginalSourceFile(const std::string &ASTFileName,
+ FileManager &FileMgr,
Diagnostic &Diags);
/// \brief Returns the suggested contents of the predefines buffer,
@@ -756,14 +901,27 @@ public:
/// build prior to including the precompiled header.
const std::string &getSuggestedPredefines() { return SuggestedPredefines; }
- /// \brief Read preprocessed entities into the
+ /// \brief Read preprocessed entities into the preprocessing record.
virtual void ReadPreprocessedEntities();
+ /// \brief Read the preprocessed entity at the given offset.
+ virtual PreprocessedEntity *ReadPreprocessedEntityAtOffset(uint64_t Offset);
+
+ /// \brief Read the header file information for the given file entry.
+ virtual HeaderFileInfo GetHeaderFileInfo(const FileEntry *FE);
+
+ void ReadPragmaDiagnosticMappings(Diagnostic &Diag);
+
/// \brief Returns the number of source locations found in the chain.
unsigned getTotalNumSLocs() const {
return TotalNumSLocEntries;
}
+ /// \brief Returns the next SLocEntry offset after the chain.
+ unsigned getNextSLocOffset() const {
+ return NextSLocOffset;
+ }
+
/// \brief Returns the number of identifiers found in the chain.
unsigned getTotalNumIdentifiers() const {
return static_cast<unsigned>(IdentifiersLoaded.size());
@@ -784,20 +942,27 @@ public:
return static_cast<unsigned>(SelectorsLoaded.size());
}
+ /// \brief Returns the number of macro definitions found in the chain.
+ unsigned getTotalNumMacroDefinitions() const {
+ return static_cast<unsigned>(MacroDefinitionsLoaded.size());
+ }
+
+ /// \brief Returns the number of C++ base specifiers found in the chain.
+ unsigned getTotalNumCXXBaseSpecifiers() const;
+
/// \brief Reads a TemplateArgumentLocInfo appropriate for the
/// given TemplateArgument kind.
TemplateArgumentLocInfo
- GetTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
- llvm::BitstreamCursor &DeclsCursor,
+ GetTemplateArgumentLocInfo(PerFileData &F, TemplateArgument::ArgKind Kind,
const RecordData &Record, unsigned &Idx);
/// \brief Reads a TemplateArgumentLoc.
TemplateArgumentLoc
- ReadTemplateArgumentLoc(llvm::BitstreamCursor &DeclsCursor,
+ ReadTemplateArgumentLoc(PerFileData &F,
const RecordData &Record, unsigned &Idx);
/// \brief Reads a declarator info from the given record.
- TypeSourceInfo *GetTypeSourceInfo(llvm::BitstreamCursor &DeclsCursor,
+ TypeSourceInfo *GetTypeSourceInfo(PerFileData &F,
const RecordData &Record, unsigned &Idx);
/// \brief Resolve and return the translation unit declaration.
@@ -822,6 +987,12 @@ public:
Decl *GetDecl(serialization::DeclID ID);
virtual Decl *GetExternalDecl(uint32_t ID);
+ /// \brief Resolve a CXXBaseSpecifiers ID into an offset into the chain
+ /// of loaded AST files.
+ uint64_t GetCXXBaseSpecifiersOffset(serialization::CXXBaseSpecifiersID ID);
+
+ virtual CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset);
+
/// \brief Resolve the offset of a statement into a statement.
///
/// This operation will read a new statement from the external
@@ -857,6 +1028,7 @@ public:
/// \returns true if there was an error while reading the
/// declarations for this declaration context.
virtual bool FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
llvm::SmallVectorImpl<Decl*> &Decls);
/// \brief Notify ASTReader that we started deserialization of
@@ -897,6 +1069,10 @@ public:
return get(Name.begin(), Name.end());
}
+ /// \brief Retrieve an iterator into the set of all identifiers
+ /// in all loaded AST files.
+ virtual IdentifierIterator *getIdentifiers() const;
+
/// \brief Load the contents of the global method pool for a given
/// selector.
///
@@ -943,47 +1119,65 @@ public:
/// \brief Read a declaration name.
DeclarationName ReadDeclarationName(const RecordData &Record, unsigned &Idx);
+ void ReadDeclarationNameLoc(PerFileData &F,
+ DeclarationNameLoc &DNLoc, DeclarationName Name,
+ const RecordData &Record, unsigned &Idx);
+ void ReadDeclarationNameInfo(PerFileData &F, DeclarationNameInfo &NameInfo,
+ const RecordData &Record, unsigned &Idx);
+
+ void ReadQualifierInfo(PerFileData &F, QualifierInfo &Info,
+ const RecordData &Record, unsigned &Idx);
NestedNameSpecifier *ReadNestedNameSpecifier(const RecordData &Record,
unsigned &Idx);
/// \brief Read a template name.
- TemplateName ReadTemplateName(const RecordData &Record, unsigned &Idx);
+ TemplateName ReadTemplateName(PerFileData &F, const RecordData &Record,
+ unsigned &Idx);
/// \brief Read a template argument.
- TemplateArgument ReadTemplateArgument(llvm::BitstreamCursor &DeclsCursor,
+ TemplateArgument ReadTemplateArgument(PerFileData &F,
const RecordData &Record,unsigned &Idx);
/// \brief Read a template parameter list.
- TemplateParameterList *ReadTemplateParameterList(const RecordData &Record,
+ TemplateParameterList *ReadTemplateParameterList(PerFileData &F,
+ const RecordData &Record,
unsigned &Idx);
/// \brief Read a template argument array.
void
ReadTemplateArgumentList(llvm::SmallVector<TemplateArgument, 8> &TemplArgs,
- llvm::BitstreamCursor &DeclsCursor,
- const RecordData &Record, unsigned &Idx);
+ PerFileData &F, const RecordData &Record,
+ unsigned &Idx);
/// \brief Read a UnresolvedSet structure.
void ReadUnresolvedSet(UnresolvedSetImpl &Set,
const RecordData &Record, unsigned &Idx);
/// \brief Read a C++ base specifier.
- CXXBaseSpecifier ReadCXXBaseSpecifier(llvm::BitstreamCursor &DeclsCursor,
+ CXXBaseSpecifier ReadCXXBaseSpecifier(PerFileData &F,
const RecordData &Record,unsigned &Idx);
- /// \brief Read a CXXBaseOrMemberInitializer array.
- std::pair<CXXBaseOrMemberInitializer **, unsigned>
- ReadCXXBaseOrMemberInitializers(llvm::BitstreamCursor &DeclsCursor,
- const RecordData &Record, unsigned &Idx);
+ /// \brief Read a CXXCtorInitializer array.
+ std::pair<CXXCtorInitializer **, unsigned>
+ ReadCXXCtorInitializers(PerFileData &F, const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Read a source location from raw form.
+ SourceLocation ReadSourceLocation(PerFileData &Module, unsigned Raw) {
+ (void)Module; // No remapping yet
+ return SourceLocation::getFromRawEncoding(Raw);
+ }
/// \brief Read a source location.
- SourceLocation ReadSourceLocation(const RecordData &Record, unsigned& Idx) {
- return SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation ReadSourceLocation(PerFileData &Module,
+ const RecordData &Record, unsigned& Idx) {
+ return ReadSourceLocation(Module, Record[Idx++]);
}
/// \brief Read a source range.
- SourceRange ReadSourceRange(const RecordData &Record, unsigned& Idx);
+ SourceRange ReadSourceRange(PerFileData &F,
+ const RecordData &Record, unsigned& Idx);
/// \brief Read an integral value
llvm::APInt ReadAPInt(const RecordData &Record, unsigned &Idx);
@@ -1000,13 +1194,14 @@ public:
CXXTemporary *ReadCXXTemporary(const RecordData &Record, unsigned &Idx);
/// \brief Reads attributes from the current stream position.
- void ReadAttributes(llvm::BitstreamCursor &DeclsCursor, AttrVec &Attrs);
+ void ReadAttributes(PerFileData &F, AttrVec &Attrs,
+ const RecordData &Record, unsigned &Idx);
/// \brief Reads a statement.
- Stmt *ReadStmt(llvm::BitstreamCursor &Cursor);
+ Stmt *ReadStmt(PerFileData &F);
/// \brief Reads an expression.
- Expr *ReadExpr(llvm::BitstreamCursor &Cursor);
+ Expr *ReadExpr(PerFileData &F);
/// \brief Reads a sub-statement operand during statement reading.
Stmt *ReadSubStmt() {
@@ -1022,13 +1217,30 @@ public:
Expr *ReadSubExpr();
/// \brief Reads the macro record located at the given offset.
- void ReadMacroRecord(llvm::BitstreamCursor &Stream, uint64_t Offset);
+ PreprocessedEntity *ReadMacroRecord(PerFileData &F, uint64_t Offset);
+ /// \brief Reads the preprocessed entity located at the current stream
+ /// position.
+ PreprocessedEntity *LoadPreprocessedEntity(PerFileData &F);
+
+ /// \brief Note that the identifier is a macro whose record will be loaded
+ /// from the given AST file at the given (file-local) offset.
+ void SetIdentifierIsMacro(IdentifierInfo *II, PerFileData &F,
+ uint64_t Offset);
+
/// \brief Read the set of macros defined by this external macro source.
virtual void ReadDefinedMacros();
+ /// \brief Read the macro definition for this identifier.
+ virtual void LoadMacroDefinition(IdentifierInfo *II);
+
+ /// \brief Read the macro definition corresponding to this iterator
+ /// into the unread macro record offsets table.
+ void LoadMacroDefinition(
+ llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos);
+
/// \brief Retrieve the macro definition with the given ID.
- MacroDefinition *getMacroDefinition(serialization::IdentID ID);
+ MacroDefinition *getMacroDefinition(serialization::MacroID ID);
/// \brief Retrieve the AST context that this AST reader supplements.
ASTContext *getContext() { return Context; }
@@ -1053,27 +1265,7 @@ public:
/// \brief Retrieve the switch-case statement with the given ID.
SwitchCase *getSwitchCaseWithID(unsigned ID);
- /// \brief Record that the given label statement has been
- /// deserialized and has the given ID.
- void RecordLabelStmt(LabelStmt *S, unsigned ID);
-
- /// \brief Set the label of the given statement to the label
- /// identified by ID.
- ///
- /// Depending on the order in which the label and other statements
- /// referencing that label occur, this operation may complete
- /// immediately (updating the statement) or it may queue the
- /// statement to be back-patched later.
- void SetLabelOf(GotoStmt *S, unsigned ID);
-
- /// \brief Set the label of the given expression to the label
- /// identified by ID.
- ///
- /// Depending on the order in which the label and other statements
- /// referencing that label occur, this operation may complete
- /// immediately (updating the statement) or it may queue the
- /// statement to be back-patched later.
- void SetLabelOf(AddrLabelExpr *S, unsigned ID);
+ void ClearSwitchCaseIDs();
};
/// \brief Helper class that saves the current stream position and
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTSerializationListener.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTSerializationListener.h
new file mode 100644
index 0000000..0c62e0b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTSerializationListener.h
@@ -0,0 +1,44 @@
+//===- ASTSerializationListener.h - Decl/Type PCH Write Events -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTSerializationListener class, which is notified
+// by the ASTWriter when an entity is serialized.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_FRONTEND_AST_SERIALIZATION_LISTENER_H
+#define LLVM_CLANG_FRONTEND_AST_SERIALIZATION_LISTENER_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace clang {
+
+class PreprocessedEntity;
+
+/// \brief Listener object that receives callbacks when certain kinds of
+/// entities are serialized.
+class ASTSerializationListener {
+public:
+ virtual ~ASTSerializationListener();
+
+ /// \brief Callback invoked whenever a preprocessed entity is serialized.
+ ///
+ /// This callback will only occur when the translation unit was created with
+ /// a detailed preprocessing record.
+ ///
+ /// \param Entity The entity that has been serialized.
+ ///
+ /// \param Offset The offset (in bits) of this entity in the resulting
+ /// AST file.
+ virtual void SerializedPreprocessedEntity(PreprocessedEntity *Entity,
+ uint64_t Offset) = 0;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
index 426fc47..beb4936 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
@@ -17,11 +17,13 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/TemplateBase.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Sema/SemaConsumer.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Bitcode/BitstreamWriter.h"
#include <map>
#include <queue>
@@ -36,13 +38,19 @@ namespace llvm {
namespace clang {
class ASTContext;
+class ASTSerializationListener;
class NestedNameSpecifier;
class CXXBaseSpecifier;
-class CXXBaseOrMemberInitializer;
-class LabelStmt;
+class CXXCtorInitializer;
+class FPOptions;
+class HeaderSearch;
class MacroDefinition;
class MemorizeStatCalls;
+class OpaqueValueExpr;
+class OpenCLOptions;
class ASTReader;
+class PreprocessedEntity;
+class PreprocessingRecord;
class Preprocessor;
class Sema;
class SourceManager;
@@ -55,9 +63,11 @@ class TargetInfo;
/// representation of a given abstract syntax tree and its supporting
/// data structures. This bitstream can be de-serialized via an
/// instance of the ASTReader class.
-class ASTWriter : public ASTDeserializationListener {
+class ASTWriter : public ASTDeserializationListener,
+ public ASTMutationListener {
public:
typedef llvm::SmallVector<uint64_t, 64> RecordData;
+ typedef llvm::SmallVectorImpl<uint64_t> RecordDataImpl;
friend class ASTDeclWriter;
private:
@@ -67,6 +77,10 @@ private:
/// \brief The reader of existing AST files, if we're chaining.
ASTReader *Chain;
+ /// \brief A listener object that receives notifications when certain
+ /// entities are serialized.
+ ASTSerializationListener *SerializationListener;
+
/// \brief Stores a declaration or a type to be written to the AST file.
class DeclOrType {
public:
@@ -163,7 +177,7 @@ private:
/// \brief Offset of each selector within the method pool/selector
/// table, indexed by the Selector ID (-1).
std::vector<uint32_t> SelectorOffsets;
-
+
/// \brief Offsets of each of the macro identifiers into the
/// bitstream.
///
@@ -172,15 +186,30 @@ private:
/// defined.
llvm::DenseMap<const IdentifierInfo *, uint64_t> MacroOffsets;
+ /// \brief The set of identifiers that had macro definitions at some point.
+ std::vector<const IdentifierInfo *> DeserializedMacroNames;
+
+ /// \brief The first ID number we can use for our own macro definitions.
+ serialization::MacroID FirstMacroID;
+
+ /// \brief The decl ID that will be assigned to the next new macro definition.
+ serialization::MacroID NextMacroID;
+
/// \brief Mapping from macro definitions (as they occur in the preprocessing
- /// record) to the index into the macro definitions table.
- llvm::DenseMap<const MacroDefinition *, serialization::IdentID>
+ /// record) to the macro IDs.
+ llvm::DenseMap<const MacroDefinition *, serialization::MacroID>
MacroDefinitions;
/// \brief Mapping from the macro definition indices in \c MacroDefinitions
/// to the corresponding offsets within the preprocessor block.
std::vector<uint32_t> MacroDefinitionOffsets;
+ typedef llvm::SmallVector<uint64_t, 2> UpdateRecord;
+ typedef llvm::DenseMap<const Decl *, UpdateRecord> DeclUpdateMap;
+ /// \brief Mapping from declarations that came from a chained PCH to the
+ /// record containing modifications to them.
+ DeclUpdateMap DeclUpdates;
+
typedef llvm::DenseMap<Decl *, Decl *> FirstLatestDeclMap;
/// \brief Map of first declarations from a chained PCH that point to the
/// most recent declarations in another PCH.
@@ -200,13 +229,17 @@ private:
/// record.
llvm::SmallVector<uint64_t, 16> ExternalDefinitions;
- /// \brief Namespaces that have received extensions since their serialized
+ /// \brief DeclContexts that have received extensions since their serialized
/// form.
///
- /// Basically, when we're chaining and encountering a namespace, we check if
+ /// For namespaces, when we're chaining and encountering a namespace, we check if
/// its primary namespace comes from the chain. If it does, we add the primary
/// to this set, so that we can write out lexical content updates for it.
- llvm::SmallPtrSet<const NamespaceDecl *, 16> UpdatedNamespaces;
+ llvm::SmallPtrSet<const DeclContext *, 16> UpdatedDeclContexts;
+
+ typedef llvm::SmallPtrSet<const Decl *, 16> DeclsToRewriteTy;
+ /// \brief Decls that will be replaced in the current dependent AST file.
+ DeclsToRewriteTy DeclsToRewrite;
/// \brief Decls that have been replaced in the current dependent AST file.
///
@@ -217,16 +250,6 @@ private:
llvm::SmallVector<std::pair<serialization::DeclID, uint64_t>, 16>
ReplacedDecls;
- typedef llvm::SmallVector<serialization::DeclID, 4>
- AdditionalTemplateSpecializationsList;
- typedef llvm::DenseMap<serialization::DeclID,
- AdditionalTemplateSpecializationsList>
- AdditionalTemplateSpecializationsMap;
-
- /// \brief Additional specializations (including partial) of templates that
- /// were introduced after the template was serialized.
- AdditionalTemplateSpecializationsMap AdditionalTemplateSpecializations;
-
/// \brief Statements that we've encountered while serializing a
/// declaration or type.
llvm::SmallVector<Stmt *, 16> StmtsToEmit;
@@ -238,8 +261,8 @@ private:
/// \brief Mapping from SwitchCase statements to IDs.
std::map<SwitchCase *, unsigned> SwitchCaseIDs;
- /// \brief Mapping from LabelStmt statements to IDs.
- std::map<LabelStmt *, unsigned> LabelIDs;
+ /// \brief Mapping from OpaqueValueExpr expressions to IDs.
+ llvm::DenseMap<OpaqueValueExpr *, unsigned> OpaqueValues;
/// \brief The number of statements written to the AST file.
unsigned NumStatements;
@@ -255,17 +278,50 @@ private:
/// file.
unsigned NumVisibleDeclContexts;
+ /// \brief The offset of each CXXBaseSpecifier set within the AST.
+ llvm::SmallVector<uint32_t, 4> CXXBaseSpecifiersOffsets;
+
+ /// \brief The first ID number we can use for our own base specifiers.
+ serialization::CXXBaseSpecifiersID FirstCXXBaseSpecifiersID;
+
+ /// \brief The base specifiers ID that will be assigned to the next new
+ /// set of C++ base specifiers.
+ serialization::CXXBaseSpecifiersID NextCXXBaseSpecifiersID;
+
+ /// \brief A set of C++ base specifiers that is queued to be written into the
+ /// AST file.
+ struct QueuedCXXBaseSpecifiers {
+ QueuedCXXBaseSpecifiers() : ID(), Bases(), BasesEnd() { }
+
+ QueuedCXXBaseSpecifiers(serialization::CXXBaseSpecifiersID ID,
+ CXXBaseSpecifier const *Bases,
+ CXXBaseSpecifier const *BasesEnd)
+ : ID(ID), Bases(Bases), BasesEnd(BasesEnd) { }
+
+ serialization::CXXBaseSpecifiersID ID;
+ CXXBaseSpecifier const * Bases;
+ CXXBaseSpecifier const * BasesEnd;
+ };
+
+ /// \brief Queue of C++ base specifiers to be written to the AST file,
+ /// in the order they should be written.
+ llvm::SmallVector<QueuedCXXBaseSpecifiers, 2> CXXBaseSpecifiersToWrite;
+
/// \brief Write the given subexpression to the bitstream.
void WriteSubStmt(Stmt *S);
void WriteBlockInfoBlock();
- void WriteMetadata(ASTContext &Context, const char *isysroot);
+ void WriteMetadata(ASTContext &Context, const char *isysroot,
+ const std::string &OutputFile);
void WriteLanguageOptions(const LangOptions &LangOpts);
void WriteStatCache(MemorizeStatCalls &StatCalls);
void WriteSourceManagerBlock(SourceManager &SourceMgr,
const Preprocessor &PP,
const char* isysroot);
void WritePreprocessor(const Preprocessor &PP);
+ void WriteHeaderSearch(HeaderSearch &HS, const char* isysroot);
+ void WritePreprocessorDetail(PreprocessingRecord &PPRec);
+ void WritePragmaDiagnosticMappings(const Diagnostic &Diag);
void WriteType(QualType T);
uint64_t WriteDeclContextLexicalBlock(ASTContext &Context, DeclContext *DC);
uint64_t WriteDeclContextVisibleBlock(ASTContext &Context, DeclContext *DC);
@@ -273,10 +329,12 @@ private:
void WriteSelectors(Sema &SemaRef);
void WriteReferencedSelectorsPool(Sema &SemaRef);
void WriteIdentifierTable(Preprocessor &PP);
- void WriteAttributeRecord(const AttrVec &Attrs);
- void WriteDeclUpdateBlock();
+ void WriteAttributes(const AttrVec &Attrs, RecordDataImpl &Record);
+ void WriteDeclUpdatesBlocks();
+ void WriteDeclReplacementsBlock();
void WriteDeclContextVisibleUpdate(const DeclContext *DC);
- void WriteAdditionalTemplateSpecializations();
+ void WriteFPPragmaOptions(const FPOptions &Opts);
+ void WriteOpenCLExtensions(Sema &SemaRef);
unsigned ParmVarDeclAbbrev;
unsigned DeclContextLexicalAbbrev;
@@ -286,7 +344,7 @@ private:
void WriteDecl(ASTContext &Context, Decl *D);
void WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
- const char* isysroot);
+ const char* isysroot, const std::string &OutputFile);
void WriteASTChain(Sema &SemaRef, MemorizeStatCalls *StatCalls,
const char* isysroot);
@@ -295,6 +353,12 @@ public:
/// the given bitstream.
ASTWriter(llvm::BitstreamWriter &Stream);
+ /// \brief Set the listener that will receive notification of serialization
+ /// events.
+ void SetSerializationListener(ASTSerializationListener *Listener) {
+ SerializationListener = Listener;
+ }
+
/// \brief Write a precompiled header for the given semantic analysis.
///
/// \param SemaRef a reference to the semantic analysis object that processed
@@ -309,32 +373,38 @@ public:
/// \param PPRec Record of the preprocessing actions that occurred while
/// preprocessing this file, e.g., macro instantiations
void WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ const std::string &OutputFile,
const char* isysroot);
/// \brief Emit a source location.
- void AddSourceLocation(SourceLocation Loc, RecordData &Record);
+ void AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record);
/// \brief Emit a source range.
- void AddSourceRange(SourceRange Range, RecordData &Record);
+ void AddSourceRange(SourceRange Range, RecordDataImpl &Record);
/// \brief Emit an integral value.
- void AddAPInt(const llvm::APInt &Value, RecordData &Record);
+ void AddAPInt(const llvm::APInt &Value, RecordDataImpl &Record);
/// \brief Emit a signed integral value.
- void AddAPSInt(const llvm::APSInt &Value, RecordData &Record);
+ void AddAPSInt(const llvm::APSInt &Value, RecordDataImpl &Record);
/// \brief Emit a floating-point value.
- void AddAPFloat(const llvm::APFloat &Value, RecordData &Record);
+ void AddAPFloat(const llvm::APFloat &Value, RecordDataImpl &Record);
/// \brief Emit a reference to an identifier.
- void AddIdentifierRef(const IdentifierInfo *II, RecordData &Record);
+ void AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Record);
/// \brief Emit a Selector (which is a smart pointer reference).
- void AddSelectorRef(Selector, RecordData &Record);
+ void AddSelectorRef(Selector, RecordDataImpl &Record);
/// \brief Emit a CXXTemporary.
- void AddCXXTemporary(const CXXTemporary *Temp, RecordData &Record);
+ void AddCXXTemporary(const CXXTemporary *Temp, RecordDataImpl &Record);
+ /// \brief Emit a set of C++ base specifiers to the record.
+ void AddCXXBaseSpecifiersRef(CXXBaseSpecifier const *Bases,
+ CXXBaseSpecifier const *BasesEnd,
+ RecordDataImpl &Record);
+
/// \brief Get the unique number used to refer to the given selector.
serialization::SelectorID getSelectorRef(Selector Sel);
@@ -353,10 +423,10 @@ public:
/// \brief Retrieve the ID number corresponding to the given macro
/// definition.
- serialization::IdentID getMacroDefinitionID(MacroDefinition *MD);
+ serialization::MacroID getMacroDefinitionID(MacroDefinition *MD);
/// \brief Emit a reference to a type.
- void AddTypeRef(QualType T, RecordData &Record);
+ void AddTypeRef(QualType T, RecordDataImpl &Record);
/// \brief Force a type to be emitted and get its ID.
serialization::TypeID GetOrCreateTypeID(QualType T);
@@ -371,20 +441,21 @@ public:
serialization::TypeIdx getTypeIdx(QualType T) const;
/// \brief Emits a reference to a declarator info.
- void AddTypeSourceInfo(TypeSourceInfo *TInfo, RecordData &Record);
+ void AddTypeSourceInfo(TypeSourceInfo *TInfo, RecordDataImpl &Record);
/// \brief Emits a template argument location info.
void AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
const TemplateArgumentLocInfo &Arg,
- RecordData &Record);
+ RecordDataImpl &Record);
/// \brief Emits a template argument location.
void AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
- RecordData &Record);
+ RecordDataImpl &Record);
/// \brief Emit a reference to a declaration.
- void AddDeclRef(const Decl *D, RecordData &Record);
+ void AddDeclRef(const Decl *D, RecordDataImpl &Record);
+
/// \brief Force a declaration to be emitted and get its ID.
serialization::DeclID GetDeclRef(const Decl *D);
@@ -393,49 +464,57 @@ public:
serialization::DeclID getDeclID(const Decl *D);
/// \brief Emit a declaration name.
- void AddDeclarationName(DeclarationName Name, RecordData &Record);
+ void AddDeclarationName(DeclarationName Name, RecordDataImpl &Record);
+ void AddDeclarationNameLoc(const DeclarationNameLoc &DNLoc,
+ DeclarationName Name, RecordDataImpl &Record);
+ void AddDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
+ RecordDataImpl &Record);
+
+ void AddQualifierInfo(const QualifierInfo &Info, RecordDataImpl &Record);
/// \brief Emit a nested name specifier.
- void AddNestedNameSpecifier(NestedNameSpecifier *NNS, RecordData &Record);
+ void AddNestedNameSpecifier(NestedNameSpecifier *NNS, RecordDataImpl &Record);
/// \brief Emit a template name.
- void AddTemplateName(TemplateName Name, RecordData &Record);
+ void AddTemplateName(TemplateName Name, RecordDataImpl &Record);
/// \brief Emit a template argument.
- void AddTemplateArgument(const TemplateArgument &Arg, RecordData &Record);
+ void AddTemplateArgument(const TemplateArgument &Arg, RecordDataImpl &Record);
/// \brief Emit a template parameter list.
void AddTemplateParameterList(const TemplateParameterList *TemplateParams,
- RecordData &Record);
+ RecordDataImpl &Record);
/// \brief Emit a template argument list.
void AddTemplateArgumentList(const TemplateArgumentList *TemplateArgs,
- RecordData &Record);
+ RecordDataImpl &Record);
/// \brief Emit a UnresolvedSet structure.
- void AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordData &Record);
+ void AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordDataImpl &Record);
/// \brief Emit a C++ base specifier.
- void AddCXXBaseSpecifier(const CXXBaseSpecifier &Base, RecordData &Record);
+ void AddCXXBaseSpecifier(const CXXBaseSpecifier &Base, RecordDataImpl &Record);
+
+ /// \brief Emit a CXXCtorInitializer array.
+ void AddCXXCtorInitializers(
+ const CXXCtorInitializer * const *CtorInitializers,
+ unsigned NumCtorInitializers,
+ RecordDataImpl &Record);
- /// \brief Emit a CXXBaseOrMemberInitializer array.
- void AddCXXBaseOrMemberInitializers(
- const CXXBaseOrMemberInitializer * const *BaseOrMembers,
- unsigned NumBaseOrMembers, RecordData &Record);
+ void AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Record);
/// \brief Add a string to the given record.
- void AddString(llvm::StringRef Str, RecordData &Record);
+ void AddString(llvm::StringRef Str, RecordDataImpl &Record);
- /// \brief Mark a namespace as needing an update.
- void AddUpdatedNamespace(const NamespaceDecl *NS) {
- UpdatedNamespaces.insert(NS);
+ /// \brief Mark a declaration context as needing an update.
+ void AddUpdatedDeclContext(const DeclContext *DC) {
+ UpdatedDeclContexts.insert(DC);
}
- /// \brief Record a template specialization or partial specialization of
- /// a template from a previous PCH file.
- void AddAdditionalTemplateSpecialization(serialization::DeclID Templ,
- serialization::DeclID Spec) {
- AdditionalTemplateSpecializations[Templ].push_back(Spec);
+ void RewriteDecl(const Decl *D) {
+ DeclsToRewrite.insert(D);
+ // Reset the flag, so that we don't add this decl multiple times.
+ const_cast<Decl *>(D)->setChangedSinceDeserialization(false);
}
/// \brief Note that the identifier II occurs at the given offset
@@ -462,32 +541,46 @@ public:
/// been added to the queue via AddStmt().
void FlushStmts();
+ /// \brief Flush all of the C++ base specifier sets that have been added
+ /// via \c AddCXXBaseSpecifiersRef().
+ void FlushCXXBaseSpecifiers();
+
/// \brief Record an ID for the given switch-case statement.
unsigned RecordSwitchCaseID(SwitchCase *S);
/// \brief Retrieve the ID for the given switch-case statement.
unsigned getSwitchCaseID(SwitchCase *S);
- /// \brief Retrieve the ID for the given label statement, which may
- /// or may not have been emitted yet.
- unsigned GetLabelID(LabelStmt *S);
+ void ClearSwitchCaseIDs();
+
+ /// \brief Retrieve the ID for the given opaque value expression.
+ unsigned getOpaqueValueID(OpaqueValueExpr *e);
unsigned getParmVarDeclAbbrev() const { return ParmVarDeclAbbrev; }
bool hasChain() const { return Chain; }
// ASTDeserializationListener implementation
- void SetReader(ASTReader *Reader);
+ void ReaderInitialized(ASTReader *Reader);
void IdentifierRead(serialization::IdentID ID, IdentifierInfo *II);
void TypeRead(serialization::TypeIdx Idx, QualType T);
void DeclRead(serialization::DeclID ID, const Decl *D);
- void SelectorRead(serialization::SelectorID iD, Selector Sel);
+ void SelectorRead(serialization::SelectorID ID, Selector Sel);
+ void MacroDefinitionRead(serialization::MacroID ID, MacroDefinition *MD);
+
+ // ASTMutationListener implementation.
+ virtual void CompletedTagDefinition(const TagDecl *D);
+ virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D);
+ virtual void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D);
+ virtual void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+ const ClassTemplateSpecializationDecl *D);
};
/// \brief AST and semantic-analysis consumer that generates a
/// precompiled header from the parsed source code.
class PCHGenerator : public SemaConsumer {
const Preprocessor &PP;
+ std::string OutputFile;
const char *isysroot;
llvm::raw_ostream *Out;
Sema *SemaPtr;
@@ -495,16 +588,19 @@ class PCHGenerator : public SemaConsumer {
std::vector<unsigned char> Buffer;
llvm::BitstreamWriter Stream;
ASTWriter Writer;
+ bool Chaining;
protected:
ASTWriter &getWriter() { return Writer; }
const ASTWriter &getWriter() const { return Writer; }
public:
- PCHGenerator(const Preprocessor &PP, bool Chaining,
+ PCHGenerator(const Preprocessor &PP, const std::string &OutputFile, bool Chaining,
const char *isysroot, llvm::raw_ostream *Out);
virtual void InitializeSema(Sema &S) { SemaPtr = &S; }
virtual void HandleTranslationUnit(ASTContext &Ctx);
+ virtual ASTMutationListener *GetASTMutationListener();
+ virtual ASTSerializationListener *GetASTSerializationListener();
virtual ASTDeserializationListener *GetASTDeserializationListener();
};
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
new file mode 100644
index 0000000..e452ccf
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
@@ -0,0 +1,38 @@
+//===--- CheckerBase.td - Checker TableGen classes ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen core definitions for checkers
+//
+//===----------------------------------------------------------------------===//
+
+class Package<string name> {
+ string PackageName = name;
+ bit Hidden = 0;
+ Package ParentPackage;
+}
+class InPackage<Package P> { Package ParentPackage = P; }
+
+class CheckerGroup<string name> {
+ string GroupName = name;
+}
+class InGroup<CheckerGroup G> { CheckerGroup Group = G; }
+
+// All checkers are an indirect subclass of this.
+class Checker<string name = ""> {
+ string CheckerName = name;
+ string DescFile;
+ string HelpText;
+ bit Hidden = 0;
+ Package ParentPackage;
+ CheckerGroup Group;
+}
+
+class DescFile<string filename> { string DescFile = filename; }
+class HelpText<string text> { string HelpText = text; }
+class Hidden { bit Hidden = 1; }
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/Checkers/DereferenceChecker.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h
index a84183e..f9cce9c 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/Checkers/DereferenceChecker.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h
@@ -8,23 +8,27 @@
//===----------------------------------------------------------------------===//
//
// This defines NullDerefChecker and UndefDerefChecker, two builtin checks
-// in GRExprEngine that check for null and undefined pointers at loads
+// in ExprEngine that check for null and undefined pointers at loads
// and stores.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_DEREFCHECKER
-#define LLVM_CLANG_DEREFCHECKER
+#ifndef LLVM_CLANG_GR_DEREFCHECKER
+#define LLVM_CLANG_GR_DEREFCHECKER
#include <utility>
namespace clang {
-class GRExprEngine;
+namespace ento {
+
+class ExprEngine;
class ExplodedNode;
std::pair<ExplodedNode * const *, ExplodedNode * const *>
-GetImplicitNullDereferences(GRExprEngine &Eng);
+GetImplicitNullDereferences(ExprEngine &Eng);
+
+} // end GR namespace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h
new file mode 100644
index 0000000..42feb78
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h
@@ -0,0 +1,51 @@
+//==- LocalCheckers.h - Intra-Procedural+Flow-Sensitive Checkers -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface to call a set of intra-procedural (local)
+// checkers that use flow/path-sensitive analyses to find bugs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_LOCALCHECKERS_H
+#define LLVM_CLANG_GR_LOCALCHECKERS_H
+
+namespace clang {
+
+class CFG;
+class Decl;
+class Diagnostic;
+class ASTContext;
+class LangOptions;
+class ParentMap;
+class LiveVariables;
+class ObjCImplementationDecl;
+class LangOptions;
+class TranslationUnitDecl;
+
+namespace ento {
+
+class PathDiagnosticClient;
+class TransferFuncs;
+class BugType;
+class BugReporter;
+class ExprEngine;
+
+TransferFuncs* MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
+ const LangOptions& lopts);
+
+void RegisterExperimentalChecks(ExprEngine &Eng);
+void RegisterExperimentalInternalChecks(ExprEngine &Eng);
+
+void RegisterCallInliner(ExprEngine &Eng);
+
+} // end GR namespace
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugReporter.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index 370d965..1786fe6 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugReporter.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -12,11 +12,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_BUGREPORTER
-#define LLVM_CLANG_ANALYSIS_BUGREPORTER
+#ifndef LLVM_CLANG_GR_BUGREPORTER
+#define LLVM_CLANG_GR_BUGREPORTER
#include "clang/Basic/SourceLocation.h"
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/ImmutableSet.h"
@@ -25,20 +25,23 @@
namespace clang {
+class ASTContext;
+class Diagnostic;
+class Stmt;
+class ParentMap;
+
+namespace ento {
+
class PathDiagnostic;
class PathDiagnosticPiece;
class PathDiagnosticClient;
-class ASTContext;
-class Diagnostic;
class ExplodedNode;
class ExplodedGraph;
class BugReporter;
class BugReporterContext;
-class GRExprEngine;
+class ExprEngine;
class GRState;
-class Stmt;
class BugType;
-class ParentMap;
//===----------------------------------------------------------------------===//
// Interface for individual bug reports.
@@ -61,8 +64,8 @@ protected:
BugType& BT;
std::string ShortDescription;
std::string Description;
- const ExplodedNode *EndNode;
- SourceRange R;
+ const ExplodedNode *ErrorNode;
+ mutable SourceRange R;
protected:
friend class BugReporter;
@@ -81,12 +84,13 @@ public:
getOriginalNode(const ExplodedNode* N) = 0;
};
- BugReport(BugType& bt, llvm::StringRef desc, const ExplodedNode *n)
- : BT(bt), Description(desc), EndNode(n) {}
+ BugReport(BugType& bt, llvm::StringRef desc, const ExplodedNode *errornode)
+ : BT(bt), Description(desc), ErrorNode(errornode) {}
BugReport(BugType& bt, llvm::StringRef shortDesc, llvm::StringRef desc,
- const ExplodedNode *n)
- : BT(bt), ShortDescription(shortDesc), Description(desc), EndNode(n) {}
+ const ExplodedNode *errornode)
+ : BT(bt), ShortDescription(shortDesc), Description(desc),
+ ErrorNode(errornode) {}
virtual ~BugReport();
@@ -96,7 +100,7 @@ public:
BugType& getBugType() { return BT; }
// FIXME: Perhaps this should be moved into a subclass?
- const ExplodedNode* getEndNode() const { return EndNode; }
+ const ExplodedNode* getErrorNode() const { return ErrorNode; }
// FIXME: Do we need this? Maybe getLocation() should return a ProgramPoint
// object.
@@ -125,8 +129,10 @@ public:
/// This location is used by clients rendering diagnostics.
virtual SourceLocation getLocation() const;
+ typedef const SourceRange *ranges_iterator;
+
/// getRanges - Returns the source ranges associated with this bug.
- virtual void getRanges(const SourceRange*& beg, const SourceRange*& end);
+ virtual std::pair<ranges_iterator, ranges_iterator> getRanges() const;
virtual PathDiagnosticPiece* VisitNode(const ExplodedNode* N,
const ExplodedNode* PrevN,
@@ -191,14 +197,15 @@ public:
// FIXME: Collapse this with the default BugReport class.
class RangedBugReport : public BugReport {
- std::vector<SourceRange> Ranges;
+ llvm::SmallVector<SourceRange, 4> Ranges;
public:
- RangedBugReport(BugType& D, llvm::StringRef description, ExplodedNode *n)
- : BugReport(D, description, n) {}
+ RangedBugReport(BugType& D, llvm::StringRef description,
+ ExplodedNode *errornode)
+ : BugReport(D, description, errornode) {}
RangedBugReport(BugType& D, llvm::StringRef shortDescription,
- llvm::StringRef description, ExplodedNode *n)
- : BugReport(D, shortDescription, description, n) {}
+ llvm::StringRef description, ExplodedNode *errornode)
+ : BugReport(D, shortDescription, description, errornode) {}
~RangedBugReport();
@@ -208,17 +215,8 @@ public:
Ranges.push_back(R);
}
- // FIXME: Move this out of line.
- void getRanges(const SourceRange*& beg, const SourceRange*& end) {
-
- if (Ranges.empty()) {
- beg = NULL;
- end = NULL;
- }
- else {
- beg = &Ranges[0];
- end = beg + Ranges.size();
- }
+ virtual std::pair<ranges_iterator, ranges_iterator> getRanges() const {
+ return std::make_pair(Ranges.begin(), Ranges.end());
}
};
@@ -232,12 +230,13 @@ private:
Creators creators;
public:
- EnhancedBugReport(BugType& D, llvm::StringRef description, ExplodedNode *n)
- : RangedBugReport(D, description, n) {}
+ EnhancedBugReport(BugType& D, llvm::StringRef description,
+ ExplodedNode *errornode)
+ : RangedBugReport(D, description, errornode) {}
EnhancedBugReport(BugType& D, llvm::StringRef shortDescription,
- llvm::StringRef description, ExplodedNode *n)
- : RangedBugReport(D, shortDescription, description, n) {}
+ llvm::StringRef description, ExplodedNode *errornode)
+ : RangedBugReport(D, shortDescription, description, errornode) {}
~EnhancedBugReport() {}
@@ -279,10 +278,12 @@ private:
void FlushReport(BugReportEquivClass& EQ);
protected:
- BugReporter(BugReporterData& d, Kind k) : BugTypes(F.GetEmptySet()), kind(k), D(d) {}
+ BugReporter(BugReporterData& d, Kind k) : BugTypes(F.getEmptySet()), kind(k),
+ D(d) {}
public:
- BugReporter(BugReporterData& d) : BugTypes(F.GetEmptySet()), kind(BaseBRKind), D(d) {}
+ BugReporter(BugReporterData& d) : BugTypes(F.getEmptySet()), kind(BaseBRKind),
+ D(d) {}
virtual ~BugReporter();
void FlushReports();
@@ -305,8 +306,8 @@ public:
SourceManager& getSourceManager() { return D.getSourceManager(); }
- virtual void GeneratePathDiagnostic(PathDiagnostic& PD,
- BugReportEquivClass& EQ) {}
+ virtual void GeneratePathDiagnostic(PathDiagnostic& pathDiagnostic,
+ llvm::SmallVectorImpl<BugReport *> &bugReports) {}
void Register(BugType *BT);
@@ -347,17 +348,17 @@ public:
// FIXME: Get rid of GRBugReporter. It's the wrong abstraction.
class GRBugReporter : public BugReporter {
- GRExprEngine& Eng;
+ ExprEngine& Eng;
llvm::SmallSet<SymbolRef, 10> NotableSymbols;
public:
- GRBugReporter(BugReporterData& d, GRExprEngine& eng)
+ GRBugReporter(BugReporterData& d, ExprEngine& eng)
: BugReporter(d, GRBugReporterKind), Eng(eng) {}
virtual ~GRBugReporter();
/// getEngine - Return the analysis engine used to analyze a given
/// function or method.
- GRExprEngine &getEngine() { return Eng; }
+ ExprEngine &getEngine() { return Eng; }
/// getGraph - Get the exploded graph created by the analysis engine
/// for the analyzed method or function.
@@ -367,8 +368,8 @@ public:
/// engine.
GRStateManager &getStateManager();
- virtual void GeneratePathDiagnostic(PathDiagnostic& PD,
- BugReportEquivClass& R);
+ virtual void GeneratePathDiagnostic(PathDiagnostic &pathDiagnostic,
+ llvm::SmallVectorImpl<BugReport*> &bugReports);
void addNotableSymbol(SymbolRef Sym) {
NotableSymbols.insert(Sym);
@@ -392,7 +393,7 @@ class BugReporterContext {
llvm::ImmutableList<BugReporterVisitor*> Callbacks;
llvm::FoldingSet<BugReporterVisitor> CallbacksSet;
public:
- BugReporterContext(GRBugReporter& br) : BR(br), Callbacks(F.GetEmptyList()) {}
+ BugReporterContext(GRBugReporter& br) : BR(br), Callbacks(F.getEmptyList()) {}
virtual ~BugReporterContext();
void addVisitor(BugReporterVisitor* visitor);
@@ -419,8 +420,8 @@ public:
return BR.getStateManager();
}
- ValueManager& getValueManager() {
- return getStateManager().getValueManager();
+ SValBuilder& getSValBuilder() {
+ return getStateManager().getSValBuilder();
}
ASTContext& getASTContext() {
@@ -478,6 +479,8 @@ void registerVarDeclsLastStore(BugReporterContext &BRC, const void *stmt,
//===----------------------------------------------------------------------===//
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugType.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
index afc07c8..2793284 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugType.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
@@ -14,14 +14,16 @@
#ifndef LLVM_CLANG_ANALYSIS_BUGTYPE
#define LLVM_CLANG_ANALYSIS_BUGTYPE
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "llvm/ADT/FoldingSet.h"
#include <string>
namespace clang {
+namespace ento {
+
class ExplodedNode;
-class GRExprEngine;
+class ExprEngine;
class BugType {
private:
@@ -68,5 +70,7 @@ public:
llvm::StringRef getDescription() const { return desc; }
};
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/PathDiagnostic.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
index 24c75ce..6d53c09 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/PathDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
@@ -27,6 +27,8 @@ class Decl;
class SourceManager;
class Stmt;
+namespace ento {
+
//===----------------------------------------------------------------------===//
// High-level interface for handlers of path-sensitive diagnostics.
//===----------------------------------------------------------------------===//
@@ -490,5 +492,9 @@ public:
void Profile(llvm::FoldingSetNodeID &ID) const;
};
+
+} // end GR namespace
+
} //end clang namespace
+
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
new file mode 100644
index 0000000..65c8b80
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -0,0 +1,109 @@
+//===--- CheckerManager.h - Static Analyzer Checker Manager -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the Static Analyzer Checker Manager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_CORE_CHECKERMANAGER_H
+#define LLVM_CLANG_SA_CORE_CHECKERMANAGER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+
+namespace clang {
+ class Decl;
+
+namespace ento {
+ class ExprEngine;
+ class AnalysisManager;
+ class BugReporter;
+
+class CheckerManager {
+public:
+ ~CheckerManager();
+
+ typedef void *CheckerRef;
+
+//===----------------------------------------------------------------------===//
+// registerChecker
+//===----------------------------------------------------------------------===//
+
+ /// \brief Used to register checkers.
+ template <typename CHECKER>
+ void registerChecker() {
+ CHECKER *checker = new CHECKER();
+ Checkers.push_back(std::pair<CheckerRef, Dtor>(checker, destruct<CHECKER>));
+ CHECKER::_register(checker, *this);
+ }
+
+ typedef void (*RegisterToEngFunc)(ExprEngine &Eng);
+ void addCheckerRegisterFunction(RegisterToEngFunc fn) {
+ Funcs.push_back(fn);
+ }
+
+//===----------------------------------------------------------------------===//
+// Functions for running checkers.
+//===----------------------------------------------------------------------===//
+
+ /// \brief Run checkers handling Decls.
+ void runCheckersOnASTDecl(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR);
+
+ /// \brief Run checkers handling Decls containing a Stmt body.
+ void runCheckersOnASTBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR);
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions.
+//===----------------------------------------------------------------------===//
+
+ // Functions used by the registration mechanism, checkers should not touch
+ // these directly.
+
+ typedef void (*CheckDeclFunc)(CheckerRef checker, const Decl *D,
+ AnalysisManager& mgr, BugReporter &BR);
+ typedef bool (*HandlesDeclFunc)(const Decl *D);
+ void _registerForDecl(CheckerRef checker, CheckDeclFunc checkfn,
+ HandlesDeclFunc isForDeclFn);
+
+ void _registerForBody(CheckerRef checker, CheckDeclFunc checkfn);
+
+ void registerCheckersToEngine(ExprEngine &eng);
+
+private:
+ template <typename CHECKER>
+ static void destruct(void *obj) { delete static_cast<CHECKER *>(obj); }
+
+ std::vector<RegisterToEngFunc> Funcs;
+
+ struct DeclCheckerInfo {
+ CheckerRef Checker;
+ CheckDeclFunc CheckFn;
+ HandlesDeclFunc IsForDeclFn;
+ };
+ std::vector<DeclCheckerInfo> DeclCheckers;
+
+ std::vector<std::pair<CheckerRef, CheckDeclFunc> > BodyCheckers;
+
+ typedef void (*Dtor)(void *);
+ std::vector<std::pair<CheckerRef, Dtor> > Checkers;
+
+ typedef llvm::SmallVector<std::pair<CheckerRef, CheckDeclFunc>, 4>
+ CachedDeclCheckers;
+ typedef llvm::DenseMap<unsigned, CachedDeclCheckers> CachedDeclCheckersMapTy;
+ CachedDeclCheckersMapTy CachedDeclCheckersMap;
+};
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerProvider.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerProvider.h
new file mode 100644
index 0000000..414ad92
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerProvider.h
@@ -0,0 +1,54 @@
+//===--- CheckerProvider.h - Static Analyzer Checkers Provider --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the Static Analyzer Checker Provider.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_CORE_CHECKERPROVIDER_H
+#define LLVM_CLANG_SA_CORE_CHECKERPROVIDER_H
+
+#include "llvm/ADT/StringRef.h"
+#include <vector>
+
+namespace clang {
+
+namespace ento {
+ class CheckerManager;
+
+class CheckerOptInfo {
+ const char *Name;
+ bool Enable;
+ bool Claimed;
+
+public:
+ CheckerOptInfo(const char *name, bool enable)
+ : Name(name), Enable(enable), Claimed(false) { }
+
+ const char *getName() const { return Name; }
+ bool isEnabled() const { return Enable; }
+ bool isDisabled() const { return !isEnabled(); }
+
+ bool isClaimed() const { return Claimed; }
+ bool isUnclaimed() const { return !isClaimed(); }
+ void claim() { Claimed = true; }
+};
+
+class CheckerProvider {
+public:
+ virtual ~CheckerProvider();
+ virtual void registerCheckers(CheckerManager &checkerMgr,
+ CheckerOptInfo *checkOpts, unsigned numCheckOpts) = 0;
+};
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerV2.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerV2.h
new file mode 100644
index 0000000..8c96866
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerV2.h
@@ -0,0 +1,93 @@
+//== CheckerV2.h - Registration mechanism for checkers -----------*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines CheckerV2, used to create and register checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_CORE_CHECKERV2
+#define LLVM_CLANG_SA_CORE_CHECKERV2
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "llvm/Support/Casting.h"
+
+namespace clang {
+namespace ento {
+ class BugReporter;
+
+namespace check {
+
+struct _VoidCheck {
+ static void _register(void *checker, CheckerManager &mgr) { }
+};
+
+template <typename DECL>
+class ASTDecl {
+ template <typename CHECKER>
+ static void _checkDecl(void *checker, const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ ((const CHECKER *)checker)->checkASTDecl(llvm::cast<DECL>(D), mgr, BR);
+ }
+
+ static bool _handlesDecl(const Decl *D) {
+ return llvm::isa<DECL>(D);
+ }
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForDecl(checker, _checkDecl<CHECKER>, _handlesDecl);
+ }
+};
+
+class ASTCodeBody {
+ template <typename CHECKER>
+ static void _checkBody(void *checker, const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ ((const CHECKER *)checker)->checkASTCodeBody(D, mgr, BR);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForBody(checker, _checkBody<CHECKER>);
+ }
+};
+
+} // end check namespace
+
+template <typename CHECK1, typename CHECK2=check::_VoidCheck,
+ typename CHECK3=check::_VoidCheck, typename CHECK4=check::_VoidCheck,
+ typename CHECK5=check::_VoidCheck, typename CHECK6=check::_VoidCheck,
+ typename CHECK7=check::_VoidCheck, typename CHECK8=check::_VoidCheck,
+ typename CHECK9=check::_VoidCheck, typename CHECK10=check::_VoidCheck,
+ typename CHECK11=check::_VoidCheck,typename CHECK12=check::_VoidCheck>
+class CheckerV2 {
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ CHECK1::_register(checker, mgr);
+ CHECK2::_register(checker, mgr);
+ CHECK3::_register(checker, mgr);
+ CHECK4::_register(checker, mgr);
+ CHECK5::_register(checker, mgr);
+ CHECK6::_register(checker, mgr);
+ CHECK7::_register(checker, mgr);
+ CHECK8::_register(checker, mgr);
+ CHECK9::_register(checker, mgr);
+ CHECK10::_register(checker, mgr);
+ CHECK11::_register(checker, mgr);
+ CHECK12::_register(checker, mgr);
+ }
+};
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathDiagnosticClients.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticClients.h
index d3aa3b2..2713e31 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathDiagnosticClients.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticClients.h
@@ -11,22 +11,32 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CHECKER_PATH_DIAGNOSTIC_CLIENTS_H
-#define LLVM_CLANG_CHECKER_PATH_DIAGNOSTIC_CLiENTS_H
+#ifndef LLVM_CLANG_GR_PATH_DIAGNOSTIC_CLIENTS_H
+#define LLVM_CLANG_GR_PATH_DIAGNOSTIC_CLiENTS_H
#include <string>
namespace clang {
-class PathDiagnosticClient;
class Preprocessor;
+namespace ento {
+
+class PathDiagnosticClient;
+
PathDiagnosticClient*
-CreateHTMLDiagnosticClient(const std::string& prefix, const Preprocessor &PP);
+createHTMLDiagnosticClient(const std::string& prefix, const Preprocessor &PP);
PathDiagnosticClient*
-CreatePlistDiagnosticClient(const std::string& prefix, const Preprocessor &PP,
+createPlistDiagnosticClient(const std::string& prefix, const Preprocessor &PP,
PathDiagnosticClient *SubPD = 0);
+PathDiagnosticClient*
+createTextPathDiagnosticClient(const std::string& prefix,
+ const Preprocessor &PP);
+
+} // end GR namespace
+
} // end clang namespace
+
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/AnalysisManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
index 3855079..1ba038e 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/AnalysisManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
@@ -12,12 +12,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_ANALYSISMANAGER_H
-#define LLVM_CLANG_ANALYSIS_ANALYSISMANAGER_H
+#ifndef LLVM_CLANG_GR_ANALYSISMANAGER_H
+#define LLVM_CLANG_GR_ANALYSISMANAGER_H
#include "clang/Analysis/AnalysisContext.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
namespace clang {
@@ -26,6 +26,9 @@ namespace idx {
class TranslationUnit;
}
+namespace ento {
+ class CheckerManager;
+
class AnalysisManager : public BugReporterData {
AnalysisContextManager AnaCtxMgr;
LocationContextManager LocCtxMgr;
@@ -40,6 +43,8 @@ class AnalysisManager : public BugReporterData {
StoreManagerCreator CreateStoreMgr;
ConstraintManagerCreator CreateConstraintMgr;
+ CheckerManager *CheckerMgr;
+
/// \brief Provide function definitions in other translation units. This is
/// NULL if we don't have multiple translation units. AnalysisManager does
/// not own the Indexer.
@@ -50,8 +55,8 @@ class AnalysisManager : public BugReporterData {
// The maximum number of exploded nodes the analyzer will generate.
unsigned MaxNodes;
- // The maximum number of times the analyzer will go through a loop.
- unsigned MaxLoop;
+ // The maximum number of times the analyzer visit a block.
+ unsigned MaxVisit;
bool VisualizeEGDot;
bool VisualizeEGUbi;
@@ -67,23 +72,29 @@ class AnalysisManager : public BugReporterData {
bool EagerlyAssume;
bool TrimGraph;
bool InlineCall;
+ bool EagerlyTrimEGraph;
public:
AnalysisManager(ASTContext &ctx, Diagnostic &diags,
const LangOptions &lang, PathDiagnosticClient *pd,
StoreManagerCreator storemgr,
ConstraintManagerCreator constraintmgr,
+ CheckerManager *checkerMgr,
idx::Indexer *idxer,
- unsigned maxnodes, unsigned maxloop,
+ unsigned maxnodes, unsigned maxvisit,
bool vizdot, bool vizubi, bool purge, bool eager, bool trim,
- bool inlinecall, bool useUnoptimizedCFG)
-
- : AnaCtxMgr(useUnoptimizedCFG), Ctx(ctx), Diags(diags), LangInfo(lang),
- PD(pd),
- CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),Idxer(idxer),
- AScope(ScopeDecl), MaxNodes(maxnodes), MaxLoop(maxloop),
+ bool inlinecall, bool useUnoptimizedCFG,
+ bool addImplicitDtors, bool addInitializers,
+ bool eagerlyTrimEGraph)
+
+ : AnaCtxMgr(useUnoptimizedCFG, addImplicitDtors, addInitializers),
+ Ctx(ctx), Diags(diags), LangInfo(lang), PD(pd),
+ CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),
+ CheckerMgr(checkerMgr), Idxer(idxer),
+ AScope(ScopeDecl), MaxNodes(maxnodes), MaxVisit(maxvisit),
VisualizeEGDot(vizdot), VisualizeEGUbi(vizubi), PurgeDead(purge),
- EagerlyAssume(eager), TrimGraph(trim), InlineCall(inlinecall) {}
+ EagerlyAssume(eager), TrimGraph(trim), InlineCall(inlinecall),
+ EagerlyTrimEGraph(eagerlyTrimEGraph) {}
~AnalysisManager() { FlushDiagnostics(); }
@@ -104,6 +115,8 @@ public:
return CreateConstraintMgr;
}
+ CheckerManager *getCheckerManager() const { return CheckerMgr; }
+
idx::Indexer *getIndexer() const { return Idxer; }
virtual ASTContext &getASTContext() {
@@ -133,7 +146,7 @@ public:
unsigned getMaxNodes() const { return MaxNodes; }
- unsigned getMaxLoop() const { return MaxLoop; }
+ unsigned getMaxVisit() const { return MaxVisit; }
bool shouldVisualizeGraphviz() const { return VisualizeEGDot; }
@@ -143,6 +156,8 @@ public:
return VisualizeEGDot || VisualizeEGUbi;
}
+ bool shouldEagerlyTrimExplodedGraph() const { return EagerlyTrimEGraph; }
+
bool shouldTrimGraph() const { return TrimGraph; }
bool shouldPurgeDead() const { return PurgeDead; }
@@ -153,7 +168,7 @@ public:
bool hasIndexer() const { return Idxer != 0; }
- const AnalysisContext *getAnalysisContextInAnotherTU(const Decl *D);
+ AnalysisContext *getAnalysisContextInAnotherTU(const Decl *D);
CFG *getCFG(Decl const *D) {
return AnaCtxMgr.getContext(D)->getCFG();
@@ -177,8 +192,8 @@ public:
const StackFrameContext *getStackFrame(AnalysisContext *Ctx,
LocationContext const *Parent,
- Stmt const *S, const CFGBlock *Blk,
- unsigned Idx) {
+ const Stmt *S,
+ const CFGBlock *Blk, unsigned Idx) {
return LocCtxMgr.getStackFrame(Ctx, Parent, S, Blk, Idx);
}
@@ -189,14 +204,17 @@ public:
}
// Get a stack frame with parent.
- StackFrameContext const *getStackFrame(Decl const *D,
+ StackFrameContext const *getStackFrame(const Decl *D,
LocationContext const *Parent,
- Stmt const *S, const CFGBlock *Blk,
- unsigned Idx) {
- return LocCtxMgr.getStackFrame(AnaCtxMgr.getContext(D), Parent, S, Blk,Idx);
+ const Stmt *S,
+ const CFGBlock *Blk, unsigned Idx) {
+ return LocCtxMgr.getStackFrame(AnaCtxMgr.getContext(D), Parent, S,
+ Blk,Idx);
}
};
-}
+} // end GR namespace
+
+} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/BasicValueFactory.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
index 59dd919..a4327e1 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/BasicValueFactory.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
@@ -8,15 +8,15 @@
//===----------------------------------------------------------------------===//
//
// This file defines BasicValueFactory, a class that manages the lifetime
-// of APSInt objects and symbolic constraints used by GRExprEngine
+// of APSInt objects and symbolic constraints used by ExprEngine
// and related classes.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_BASICVALUEFACTORY_H
-#define LLVM_CLANG_ANALYSIS_BASICVALUEFACTORY_H
+#ifndef LLVM_CLANG_GR_BASICVALUEFACTORY_H
+#define LLVM_CLANG_GR_BASICVALUEFACTORY_H
-#include "clang/Checker/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/AST/ASTContext.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/APSInt.h"
@@ -24,6 +24,8 @@
namespace clang {
+namespace ento {
+
class GRState;
class CompoundValData : public llvm::FoldingSetNode {
@@ -102,9 +104,9 @@ public:
}
const llvm::APSInt &Convert(QualType T, const llvm::APSInt &From) {
- assert(T->isIntegerType() || Loc::IsLocType(T));
+ assert(T->isIntegerType() || Loc::isLocType(T));
unsigned bitwidth = Ctx.getTypeSize(T);
- bool isUnsigned = T->isUnsignedIntegerType() || Loc::IsLocType(T);
+ bool isUnsigned = T->isUnsignedIntegerType() || Loc::isLocType(T);
if (isUnsigned == From.isUnsigned() && bitwidth == From.getBitWidth())
return From;
@@ -126,14 +128,14 @@ public:
}
inline const llvm::APSInt& getMaxValue(QualType T) {
- assert(T->isIntegerType() || Loc::IsLocType(T));
- bool isUnsigned = T->isUnsignedIntegerType() || Loc::IsLocType(T);
+ assert(T->isIntegerType() || Loc::isLocType(T));
+ bool isUnsigned = T->isUnsignedIntegerType() || Loc::isLocType(T);
return getValue(llvm::APSInt::getMaxValue(Ctx.getTypeSize(T), isUnsigned));
}
inline const llvm::APSInt& getMinValue(QualType T) {
- assert(T->isIntegerType() || Loc::IsLocType(T));
- bool isUnsigned = T->isUnsignedIntegerType() || Loc::IsLocType(T);
+ assert(T->isIntegerType() || Loc::isLocType(T));
+ bool isUnsigned = T->isUnsignedIntegerType() || Loc::isLocType(T);
return getValue(llvm::APSInt::getMinValue(Ctx.getTypeSize(T), isUnsigned));
}
@@ -162,7 +164,7 @@ public:
}
inline const llvm::APSInt& getTruthValue(bool b) {
- return getTruthValue(b, Ctx.IntTy);
+ return getTruthValue(b, Ctx.getLogicalOperationType());
}
const CompoundValData *getCompoundValData(QualType T,
@@ -172,14 +174,14 @@ public:
const TypedRegion *region);
llvm::ImmutableList<SVal> getEmptySValList() {
- return SValListFactory.GetEmptyList();
+ return SValListFactory.getEmptyList();
}
llvm::ImmutableList<SVal> consVals(SVal X, llvm::ImmutableList<SVal> L) {
- return SValListFactory.Add(X, L);
+ return SValListFactory.add(X, L);
}
- const llvm::APSInt* EvaluateAPSInt(BinaryOperator::Opcode Op,
+ const llvm::APSInt* evalAPSInt(BinaryOperator::Opcode Op,
const llvm::APSInt& V1,
const llvm::APSInt& V2);
@@ -192,6 +194,8 @@ public:
const SVal* getPersistentSVal(SVal X);
};
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRBlockCounter.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h
index b7d0e8a..7d0fdfb 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRBlockCounter.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h
@@ -1,4 +1,4 @@
-//==- GRBlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
+//==- BlockCounter.h - ADT for counting block visits ---------------*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
@@ -7,14 +7,14 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines GRBlockCounter, an abstract data type used to count
+// This file defines BlockCounter, an abstract data type used to count
// the number of times a given block has been visited along a path
-// analyzed by GRCoreEngine.
+// analyzed by CoreEngine.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_GRBLOCKCOUNTER
-#define LLVM_CLANG_ANALYSIS_GRBLOCKCOUNTER
+#ifndef LLVM_CLANG_GR_BLOCKCOUNTER
+#define LLVM_CLANG_GR_BLOCKCOUNTER
namespace llvm {
class BumpPtrAllocator;
@@ -24,13 +24,15 @@ namespace clang {
class StackFrameContext;
-class GRBlockCounter {
+namespace ento {
+
+class BlockCounter {
void* Data;
- GRBlockCounter(void* D) : Data(D) {}
+ BlockCounter(void* D) : Data(D) {}
public:
- GRBlockCounter() : Data(0) {}
+ BlockCounter() : Data(0) {}
unsigned getNumVisited(const StackFrameContext *CallSite,
unsigned BlockID) const;
@@ -41,8 +43,8 @@ public:
Factory(llvm::BumpPtrAllocator& Alloc);
~Factory();
- GRBlockCounter GetEmptyCounter();
- GRBlockCounter IncrementCount(GRBlockCounter BC,
+ BlockCounter GetEmptyCounter();
+ BlockCounter IncrementCount(BlockCounter BC,
const StackFrameContext *CallSite,
unsigned BlockID);
};
@@ -50,6 +52,8 @@ public:
friend class Factory;
};
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Checker.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Checker.h
index 136a29d..22c2027 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Checker.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Checker.h
@@ -12,11 +12,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_CHECKER
-#define LLVM_CLANG_ANALYSIS_CHECKER
+#ifndef LLVM_CLANG_GR_CHECKER
+#define LLVM_CLANG_GR_CHECKER
#include "clang/Analysis/Support/SaveAndRestore.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
//===----------------------------------------------------------------------===//
// Checker interface.
@@ -24,13 +24,15 @@
namespace clang {
+namespace ento {
+
class CheckerContext {
ExplodedNodeSet &Dst;
- GRStmtNodeBuilder &B;
- GRExprEngine &Eng;
+ StmtNodeBuilder &B;
+ ExprEngine &Eng;
ExplodedNode *Pred;
SaveAndRestore<bool> OldSink;
- SaveAndRestore<const void*> OldTag;
+ const void *checkerTag;
SaveAndRestore<ProgramPoint::Kind> OldPointKind;
SaveOr OldHasGen;
const GRState *ST;
@@ -39,22 +41,22 @@ class CheckerContext {
public:
bool *respondsToCallback;
public:
- CheckerContext(ExplodedNodeSet &dst, GRStmtNodeBuilder &builder,
- GRExprEngine &eng, ExplodedNode *pred,
+ CheckerContext(ExplodedNodeSet &dst, StmtNodeBuilder &builder,
+ ExprEngine &eng, ExplodedNode *pred,
const void *tag, ProgramPoint::Kind K,
bool *respondsToCB = 0,
const Stmt *stmt = 0, const GRState *st = 0)
: Dst(dst), B(builder), Eng(eng), Pred(pred),
OldSink(B.BuildSinks),
- OldTag(B.Tag, tag),
+ checkerTag(tag),
OldPointKind(B.PointKind, K),
- OldHasGen(B.HasGeneratedNode),
+ OldHasGen(B.hasGeneratedNode),
ST(st), statement(stmt), size(Dst.size()),
respondsToCallback(respondsToCB) {}
~CheckerContext();
- GRExprEngine &getEngine() {
+ ExprEngine &getEngine() {
return Eng;
}
@@ -71,7 +73,7 @@ public:
}
ExplodedNodeSet &getNodeSet() { return Dst; }
- GRStmtNodeBuilder &getNodeBuilder() { return B; }
+ StmtNodeBuilder &getNodeBuilder() { return B; }
ExplodedNode *&getPredecessor() { return Pred; }
const GRState *getState() { return ST ? ST : B.GetState(Pred); }
@@ -87,80 +89,74 @@ public:
return getBugReporter().getSourceManager();
}
- ValueManager &getValueManager() {
- return Eng.getValueManager();
- }
-
- SValuator &getSValuator() {
- return Eng.getSValuator();
+ SValBuilder &getSValBuilder() {
+ return Eng.getSValBuilder();
}
- ExplodedNode *GenerateNode(bool autoTransition = true) {
+ ExplodedNode *generateNode(bool autoTransition = true) {
assert(statement && "Only transitions with statements currently supported");
- ExplodedNode *N = GenerateNodeImpl(statement, getState(), false);
+ ExplodedNode *N = generateNodeImpl(statement, getState(), false,
+ checkerTag);
if (N && autoTransition)
Dst.Add(N);
return N;
}
- ExplodedNode *GenerateNode(const Stmt *stmt, const GRState *state,
- bool autoTransition = true) {
+ ExplodedNode *generateNode(const Stmt *stmt, const GRState *state,
+ bool autoTransition = true, const void *tag = 0) {
assert(state);
- ExplodedNode *N = GenerateNodeImpl(stmt, state, false);
+ ExplodedNode *N = generateNodeImpl(stmt, state, false,
+ tag ? tag : checkerTag);
if (N && autoTransition)
addTransition(N);
return N;
}
- ExplodedNode *GenerateNode(const GRState *state, ExplodedNode *pred,
+ ExplodedNode *generateNode(const GRState *state, ExplodedNode *pred,
bool autoTransition = true) {
assert(statement && "Only transitions with statements currently supported");
- ExplodedNode *N = GenerateNodeImpl(statement, state, pred, false);
+ ExplodedNode *N = generateNodeImpl(statement, state, pred, false);
if (N && autoTransition)
addTransition(N);
return N;
}
- ExplodedNode *GenerateNode(const GRState *state, bool autoTransition = true) {
+ ExplodedNode *generateNode(const GRState *state, bool autoTransition = true,
+ const void *tag = 0) {
assert(statement && "Only transitions with statements currently supported");
- ExplodedNode *N = GenerateNodeImpl(statement, state, false);
+ ExplodedNode *N = generateNodeImpl(statement, state, false,
+ tag ? tag : checkerTag);
if (N && autoTransition)
addTransition(N);
return N;
}
- ExplodedNode *GenerateSink(const Stmt *stmt, const GRState *state = 0) {
- return GenerateNodeImpl(stmt, state ? state : getState(), true);
+ ExplodedNode *generateSink(const Stmt *stmt, const GRState *state = 0) {
+ return generateNodeImpl(stmt, state ? state : getState(), true,
+ checkerTag);
}
- ExplodedNode *GenerateSink(const GRState *state = 0) {
+ ExplodedNode *generateSink(const GRState *state = 0) {
assert(statement && "Only transitions with statements currently supported");
- return GenerateNodeImpl(statement, state ? state : getState(), true);
+ return generateNodeImpl(statement, state ? state : getState(), true,
+ checkerTag);
}
void addTransition(ExplodedNode *node) {
Dst.Add(node);
}
- void addTransition(const GRState *state) {
+ void addTransition(const GRState *state, const void *tag = 0) {
assert(state);
// If the 'state' is not new, we need to check if the cached state 'ST'
// is new.
if (state != getState() || (ST && ST != B.GetState(Pred)))
// state is new or equals to ST.
- GenerateNode(state, true);
+ generateNode(state, true, tag);
else
Dst.Add(Pred);
}
- // Generate a node with a new program point different from the one that will
- // be created by the GRStmtNodeBuilder.
- void addTransition(const GRState *state, ProgramPoint Loc) {
- ExplodedNode *N = B.generateNode(Loc, state, Pred);
- if (N)
- addTransition(N);
- }
-
void EmitReport(BugReport *R) {
Eng.getBugReporter().EmitReport(R);
}
@@ -170,17 +166,17 @@ public:
}
private:
- ExplodedNode *GenerateNodeImpl(const Stmt* stmt, const GRState *state,
- bool markAsSink) {
- ExplodedNode *node = B.generateNode(stmt, state, Pred);
+ ExplodedNode *generateNodeImpl(const Stmt* stmt, const GRState *state,
+ bool markAsSink, const void *tag) {
+ ExplodedNode *node = B.generateNode(stmt, state, Pred, tag);
if (markAsSink && node)
node->markAsSink();
return node;
}
- ExplodedNode *GenerateNodeImpl(const Stmt* stmt, const GRState *state,
+ ExplodedNode *generateNodeImpl(const Stmt* stmt, const GRState *state,
ExplodedNode *pred, bool markAsSink) {
- ExplodedNode *node = B.generateNode(stmt, state, pred);
+ ExplodedNode *node = B.generateNode(stmt, state, pred, checkerTag);
if (markAsSink && node)
node->markAsSink();
return node;
@@ -189,12 +185,12 @@ private:
class Checker {
private:
- friend class GRExprEngine;
+ friend class ExprEngine;
// FIXME: Remove the 'tag' option.
void GR_Visit(ExplodedNodeSet &Dst,
- GRStmtNodeBuilder &Builder,
- GRExprEngine &Eng,
+ StmtNodeBuilder &Builder,
+ ExprEngine &Eng,
const Stmt *S,
ExplodedNode *Pred, void *tag, bool isPrevisit,
bool& respondsToCallback) {
@@ -207,25 +203,39 @@ private:
_PostVisit(C, S);
}
- bool GR_EvalNilReceiver(ExplodedNodeSet &Dst, GRStmtNodeBuilder &Builder,
- GRExprEngine &Eng, const ObjCMessageExpr *ME,
+ void GR_visitObjCMessage(ExplodedNodeSet &Dst,
+ StmtNodeBuilder &Builder,
+ ExprEngine &Eng,
+ const ObjCMessage &msg,
+ ExplodedNode *Pred, void *tag, bool isPrevisit) {
+ CheckerContext C(Dst, Builder, Eng, Pred, tag,
+ isPrevisit ? ProgramPoint::PreStmtKind :
+ ProgramPoint::PostStmtKind, 0, msg.getOriginExpr());
+ if (isPrevisit)
+ preVisitObjCMessage(C, msg);
+ else
+ postVisitObjCMessage(C, msg);
+ }
+
+ bool GR_evalNilReceiver(ExplodedNodeSet &Dst, StmtNodeBuilder &Builder,
+ ExprEngine &Eng, const ObjCMessage &msg,
ExplodedNode *Pred, const GRState *state, void *tag) {
CheckerContext C(Dst, Builder, Eng, Pred, tag, ProgramPoint::PostStmtKind,
- 0, ME, state);
- return EvalNilReceiver(C, ME);
+ 0, msg.getOriginExpr(), state);
+ return evalNilReceiver(C, msg);
}
- bool GR_EvalCallExpr(ExplodedNodeSet &Dst, GRStmtNodeBuilder &Builder,
- GRExprEngine &Eng, const CallExpr *CE,
+ bool GR_evalCallExpr(ExplodedNodeSet &Dst, StmtNodeBuilder &Builder,
+ ExprEngine &Eng, const CallExpr *CE,
ExplodedNode *Pred, void *tag) {
CheckerContext C(Dst, Builder, Eng, Pred, tag, ProgramPoint::PostStmtKind,
0, CE);
- return EvalCallExpr(C, CE);
+ return evalCallExpr(C, CE);
}
// FIXME: Remove the 'tag' option.
void GR_VisitBind(ExplodedNodeSet &Dst,
- GRStmtNodeBuilder &Builder, GRExprEngine &Eng,
+ StmtNodeBuilder &Builder, ExprEngine &Eng,
const Stmt *StoreE, ExplodedNode *Pred, void *tag,
SVal location, SVal val,
bool isPrevisit) {
@@ -237,9 +247,9 @@ private:
}
// FIXME: Remove the 'tag' option.
- void GR_VisitLocation(ExplodedNodeSet &Dst,
- GRStmtNodeBuilder &Builder,
- GRExprEngine &Eng,
+ void GR_visitLocation(ExplodedNodeSet &Dst,
+ StmtNodeBuilder &Builder,
+ ExprEngine &Eng,
const Stmt *S,
ExplodedNode *Pred, const GRState *state,
SVal location,
@@ -247,49 +257,52 @@ private:
CheckerContext C(Dst, Builder, Eng, Pred, tag,
isLoad ? ProgramPoint::PreLoadKind :
ProgramPoint::PreStoreKind, 0, S, state);
- VisitLocation(C, S, location);
+ visitLocation(C, S, location, isLoad);
}
- void GR_EvalDeadSymbols(ExplodedNodeSet &Dst, GRStmtNodeBuilder &Builder,
- GRExprEngine &Eng, const Stmt *S, ExplodedNode *Pred,
+ void GR_evalDeadSymbols(ExplodedNodeSet &Dst, StmtNodeBuilder &Builder,
+ ExprEngine &Eng, const Stmt *S, ExplodedNode *Pred,
SymbolReaper &SymReaper, void *tag) {
CheckerContext C(Dst, Builder, Eng, Pred, tag,
ProgramPoint::PostPurgeDeadSymbolsKind, 0, S);
- EvalDeadSymbols(C, SymReaper);
+ evalDeadSymbols(C, SymReaper);
}
public:
virtual ~Checker();
virtual void _PreVisit(CheckerContext &C, const Stmt *S) {}
virtual void _PostVisit(CheckerContext &C, const Stmt *S) {}
- virtual void VisitLocation(CheckerContext &C, const Stmt *S, SVal location) {}
+ virtual void preVisitObjCMessage(CheckerContext &C, ObjCMessage msg) {}
+ virtual void postVisitObjCMessage(CheckerContext &C, ObjCMessage msg) {}
+ virtual void visitLocation(CheckerContext &C, const Stmt *S, SVal location,
+ bool isLoad) {}
virtual void PreVisitBind(CheckerContext &C, const Stmt *StoreE,
SVal location, SVal val) {}
- virtual void EvalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper) {}
- virtual void EvalEndPath(GREndPathNodeBuilder &B, void *tag,
- GRExprEngine &Eng) {}
+ virtual void evalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper) {}
+ virtual void evalEndPath(EndOfFunctionNodeBuilder &B, void *tag,
+ ExprEngine &Eng) {}
virtual void MarkLiveSymbols(const GRState *state, SymbolReaper &SymReaper) {}
- virtual void VisitBranchCondition(GRBranchNodeBuilder &Builder,
- GRExprEngine &Eng,
+ virtual void VisitBranchCondition(BranchNodeBuilder &Builder,
+ ExprEngine &Eng,
const Stmt *Condition, void *tag) {}
- virtual bool EvalNilReceiver(CheckerContext &C, const ObjCMessageExpr *ME) {
+ virtual bool evalNilReceiver(CheckerContext &C, ObjCMessage msg) {
return false;
}
- virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+ virtual bool evalCallExpr(CheckerContext &C, const CallExpr *CE) {
return false;
}
- virtual const GRState *EvalAssume(const GRState *state, SVal Cond,
+ virtual const GRState *evalAssume(const GRState *state, SVal Cond,
bool Assumption, bool *respondsToCallback) {
*respondsToCallback = false;
return state;
}
- virtual bool WantsRegionChangeUpdate(const GRState *state) { return false; }
+ virtual bool wantsRegionChangeUpdate(const GRState *state) { return false; }
virtual const GRState *EvalRegionChanges(const GRState *state,
const MemRegion * const *Begin,
@@ -300,8 +313,11 @@ public:
}
virtual void VisitEndAnalysis(ExplodedGraph &G, BugReporter &B,
- GRExprEngine &Eng) {}
+ ExprEngine &Eng) {}
};
+
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerHelpers.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
index ea3c842..12547e0 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerHelpers.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
@@ -11,13 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CHECKER_PATHSENSITIVE_CHECKERHELPERS
-#define LLVM_CLANG_CHECKER_PATHSENSITIVE_CHECKERHELPERS
+#ifndef LLVM_CLANG_GR_PATHSENSITIVE_CHECKERHELPERS
+#define LLVM_CLANG_GR_PATHSENSITIVE_CHECKERHELPERS
#include "clang/AST/Stmt.h"
namespace clang {
+namespace ento {
+
bool containsMacro(const Stmt *S);
bool containsEnum(const Stmt *S);
bool containsStaticLocal(const Stmt *S);
@@ -26,8 +28,7 @@ template <class T> bool containsStmt(const Stmt *S) {
if (isa<T>(S))
return true;
- for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
- ++I)
+ for (Stmt::const_child_range I = S->children(); I; ++I)
if (const Stmt *child = *I)
if (containsStmt<T>(child))
return true;
@@ -35,6 +36,8 @@ template <class T> bool containsStmt(const Stmt *S) {
return false;
}
-}
+} // end GR namespace
+
+} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.def b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.def
index 2edc4a3..9b3c263 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.def
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.def
@@ -21,17 +21,28 @@
PREVISIT(ArraySubscriptExpr, Stmt)
PREVISIT(BinaryOperator, Stmt)
-PREVISIT(CallExpr, Stmt)
-PREVISIT(CXXOperatorCallExpr, CallExpr)
+PREVISIT(CallExpr, GenericCall)
+PREVISIT(CompoundAssignOperator, BinaryOperator)
+PREVISIT(CStyleCastExpr, CastExpr)
+PREVISIT(CXXConstCastExpr, CastExpr)
+PREVISIT(CXXDynamicCastExpr, CastExpr)
+PREVISIT(CXXFunctionalCastExpr, CastExpr)
+PREVISIT(CXXOperatorCallExpr, GenericCall)
+PREVISIT(CXXMemberCallExpr, GenericCall)
+PREVISIT(CXXReinterpretCastExpr, CastExpr)
+PREVISIT(CXXStaticCastExpr, CastExpr)
PREVISIT(DeclStmt, Stmt)
-PREVISIT(ObjCMessageExpr, Stmt)
+PREVISIT(ImplicitCastExpr, CastExpr)
+PREVISIT(ObjCAtSynchronizedStmt, Stmt)
PREVISIT(ReturnStmt, Stmt)
POSTVISIT(BlockExpr, Stmt)
POSTVISIT(BinaryOperator, Stmt)
-POSTVISIT(CallExpr, Stmt)
-POSTVISIT(CXXOperatorCallExpr, CallExpr)
-POSTVISIT(ObjCMessageExpr, Stmt)
+POSTVISIT(CallExpr, GenericCall)
+POSTVISIT(CompoundAssignOperator, BinaryOperator)
+POSTVISIT(CXXOperatorCallExpr, GenericCall)
+POSTVISIT(CXXMemberCallExpr, GenericCall)
+POSTVISIT(ObjCIvarRefExpr, Stmt)
#undef PREVISIT
#undef POSTVISIT
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h
index e2ba89b..dc76c96 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h
@@ -11,12 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_CHECKERVISITOR
-#define LLVM_CLANG_ANALYSIS_CHECKERVISITOR
-#include "clang/Checker/PathSensitive/Checker.h"
+#ifndef LLVM_CLANG_GR_CHECKERVISITOR
+#define LLVM_CLANG_GR_CHECKERVISITOR
+#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h"
namespace clang {
+namespace ento {
+
//===----------------------------------------------------------------------===//
// Checker visitor interface. Used by subclasses of Checker to specify their
// own checker visitor logic.
@@ -41,22 +43,11 @@ public:
assert(false && "Unsupport statement.");
return;
- case Stmt::ImplicitCastExprClass:
- case Stmt::CStyleCastExprClass:
- static_cast<ImplClass*>(this)->PreVisitCastExpr(C,
- static_cast<const CastExpr*>(S));
- break;
-
- case Stmt::CompoundAssignOperatorClass:
- static_cast<ImplClass*>(this)->PreVisitBinaryOperator(C,
- static_cast<const BinaryOperator*>(S));
- break;
-
#define PREVISIT(NAME, FALLBACK) \
case Stmt::NAME ## Class:\
static_cast<ImplClass*>(this)->PreVisit ## NAME(C,static_cast<const NAME*>(S));\
break;
-#include "clang/Checker/PathSensitive/CheckerVisitor.def"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.def"
}
}
@@ -65,20 +56,23 @@ break;
default:
assert(false && "Unsupport statement.");
return;
- case Stmt::CompoundAssignOperatorClass:
- static_cast<ImplClass*>(this)->PostVisitBinaryOperator(C,
- static_cast<const BinaryOperator*>(S));
- break;
#define POSTVISIT(NAME, FALLBACK) \
case Stmt::NAME ## Class:\
static_cast<ImplClass*>(this)->\
PostVisit ## NAME(C,static_cast<const NAME*>(S));\
break;
-#include "clang/Checker/PathSensitive/CheckerVisitor.def"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.def"
}
}
+ void PreVisitGenericCall(CheckerContext &C, const CallExpr *CE) {
+ static_cast<ImplClass*>(this)->PreVisitStmt(C, CE);
+ }
+ void PostVisitGenericCall(CheckerContext &C, const CallExpr *CE) {
+ static_cast<ImplClass*>(this)->PostVisitStmt(C, CE);
+ }
+
void PreVisitStmt(CheckerContext &C, const Stmt *S) {
*C.respondsToCallback = false;
}
@@ -99,9 +93,11 @@ void PreVisit ## NAME(CheckerContext &C, const NAME* S) {\
void PostVisit ## NAME(CheckerContext &C, const NAME* S) {\
static_cast<ImplClass*>(this)->PostVisit ## FALLBACK(C, S);\
}
-#include "clang/Checker/PathSensitive/CheckerVisitor.def"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.def"
};
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ConstraintManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
index 97535f5..199b41a 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ConstraintManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
@@ -11,11 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_CONSTRAINT_MANAGER_H
-#define LLVM_CLANG_ANALYSIS_CONSTRAINT_MANAGER_H
+#ifndef LLVM_CLANG_GR_CONSTRAINT_MANAGER_H
+#define LLVM_CLANG_GR_CONSTRAINT_MANAGER_H
// FIXME: Typedef LiveSymbolsTy/DeadSymbolsTy at a more appropriate place.
-#include "clang/Checker/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
namespace llvm {
class APSInt;
@@ -23,21 +23,23 @@ class APSInt;
namespace clang {
+namespace ento {
+
class GRState;
class GRStateManager;
-class GRSubEngine;
+class SubEngine;
class SVal;
class ConstraintManager {
public:
virtual ~ConstraintManager();
- virtual const GRState *Assume(const GRState *state, DefinedSVal Cond,
+ virtual const GRState *assume(const GRState *state, DefinedSVal Cond,
bool Assumption) = 0;
- std::pair<const GRState*, const GRState*> AssumeDual(const GRState *state,
+ std::pair<const GRState*, const GRState*> assumeDual(const GRState *state,
DefinedSVal Cond) {
- return std::make_pair(Assume(state, Cond, true),
- Assume(state, Cond, false));
+ return std::make_pair(assume(state, Cond, true),
+ assume(state, Cond, false));
}
virtual const llvm::APSInt* getSymVal(const GRState *state,
@@ -46,7 +48,7 @@ public:
virtual bool isEqual(const GRState *state, SymbolRef sym,
const llvm::APSInt& V) const = 0;
- virtual const GRState *RemoveDeadBindings(const GRState *state,
+ virtual const GRState *removeDeadBindings(const GRState *state,
SymbolReaper& SymReaper) = 0;
virtual void print(const GRState *state, llvm::raw_ostream& Out,
@@ -57,15 +59,17 @@ public:
/// canReasonAbout - Not all ConstraintManagers can accurately reason about
/// all SVal values. This method returns true if the ConstraintManager can
/// reasonably handle a given SVal value. This is typically queried by
- /// GRExprEngine to determine if the value should be replaced with a
+ /// ExprEngine to determine if the value should be replaced with a
/// conjured symbolic value in order to recover some precision.
virtual bool canReasonAbout(SVal X) const = 0;
};
ConstraintManager* CreateBasicConstraintManager(GRStateManager& statemgr,
- GRSubEngine &subengine);
+ SubEngine &subengine);
ConstraintManager* CreateRangeConstraintManager(GRStateManager& statemgr,
- GRSubEngine &subengine);
+ SubEngine &subengine);
+
+} // end GR namespace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRCoreEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
index 216ecac..800e63a 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRCoreEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -1,4 +1,4 @@
-//==- GRCoreEngine.h - Path-Sensitive Dataflow Engine --------------*- C++ -*-//
+//==- CoreEngine.h - Path-Sensitive Dataflow Engine ----------------*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
@@ -12,43 +12,45 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_GRENGINE
-#define LLVM_CLANG_ANALYSIS_GRENGINE
+#ifndef LLVM_CLANG_GR_COREENGINE
+#define LLVM_CLANG_GR_COREENGINE
#include "clang/AST/Expr.h"
-#include "clang/Checker/PathSensitive/ExplodedGraph.h"
-#include "clang/Checker/PathSensitive/GRWorkList.h"
-#include "clang/Checker/PathSensitive/GRBlockCounter.h"
-#include "clang/Checker/PathSensitive/GRAuditor.h"
-#include "clang/Checker/PathSensitive/GRSubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "llvm/ADT/OwningPtr.h"
namespace clang {
+namespace ento {
+
//===----------------------------------------------------------------------===//
-/// GRCoreEngine - Implements the core logic of the graph-reachability
+/// CoreEngine - Implements the core logic of the graph-reachability
/// analysis. It traverses the CFG and generates the ExplodedGraph.
/// Program "states" are treated as opaque void pointers.
-/// The template class GRCoreEngine (which subclasses GRCoreEngine)
+/// The template class CoreEngine (which subclasses CoreEngine)
/// provides the matching component to the engine that knows the actual types
/// for states. Note that this engine only dispatches to transfer functions
/// at the statement and block-level. The analyses themselves must implement
/// any transfer function logic and the sub-expression level (if any).
-class GRCoreEngine {
- friend class GRStmtNodeBuilder;
- friend class GRBranchNodeBuilder;
- friend class GRIndirectGotoNodeBuilder;
- friend class GRSwitchNodeBuilder;
- friend class GREndPathNodeBuilder;
- friend class GRCallEnterNodeBuilder;
- friend class GRCallExitNodeBuilder;
+class CoreEngine {
+ friend class StmtNodeBuilder;
+ friend class GenericNodeBuilderImpl;
+ friend class BranchNodeBuilder;
+ friend class IndirectGotoNodeBuilder;
+ friend class SwitchNodeBuilder;
+ friend class EndOfFunctionNodeBuilder;
+ friend class CallEnterNodeBuilder;
+ friend class CallExitNodeBuilder;
public:
typedef std::vector<std::pair<BlockEdge, const ExplodedNode*> >
BlocksAborted;
private:
- GRSubEngine& SubEngine;
+ SubEngine& SubEng;
/// G - The simulation graph. Each node is a (location,state) pair.
llvm::OwningPtr<ExplodedGraph> G;
@@ -56,25 +58,24 @@ private:
/// WList - A set of queued nodes that need to be processed by the
/// worklist algorithm. It is up to the implementation of WList to decide
/// the order that nodes are processed.
- GRWorkList* WList;
+ WorkList* WList;
- /// BCounterFactory - A factory object for created GRBlockCounter objects.
+ /// BCounterFactory - A factory object for created BlockCounter objects.
/// These are used to record for key nodes in the ExplodedGraph the
/// number of times different CFGBlocks have been visited along a path.
- GRBlockCounter::Factory BCounterFactory;
+ BlockCounter::Factory BCounterFactory;
/// The locations where we stopped doing work because we visited a location
/// too many times.
BlocksAborted blocksAborted;
- void GenerateNode(const ProgramPoint& Loc, const GRState* State,
+ void generateNode(const ProgramPoint& Loc, const GRState* State,
ExplodedNode* Pred);
void HandleBlockEdge(const BlockEdge& E, ExplodedNode* Pred);
void HandleBlockEntrance(const BlockEntrance& E, ExplodedNode* Pred);
void HandleBlockExit(const CFGBlock* B, ExplodedNode* Pred);
- void HandlePostStmt(const PostStmt& S, const CFGBlock* B,
- unsigned StmtIdx, ExplodedNode *Pred);
+ void HandlePostStmt(const CFGBlock* B, unsigned StmtIdx, ExplodedNode *Pred);
void HandleBranch(const Stmt* Cond, const Stmt* Term, const CFGBlock* B,
ExplodedNode* Pred);
@@ -82,68 +83,26 @@ private:
unsigned Index, ExplodedNode *Pred);
void HandleCallExit(const CallExit &L, ExplodedNode *Pred);
- /// Get the initial state from the subengine.
- const GRState* getInitialState(const LocationContext *InitLoc) {
- return SubEngine.getInitialState(InitLoc);
- }
-
- void ProcessEndPath(GREndPathNodeBuilder& Builder) {
- SubEngine.ProcessEndPath(Builder);
- }
-
- void ProcessStmt(const CFGElement E, GRStmtNodeBuilder& Builder) {
- SubEngine.ProcessStmt(E, Builder);
- }
-
- bool ProcessBlockEntrance(const CFGBlock* Blk, const ExplodedNode *Pred,
- GRBlockCounter BC) {
- return SubEngine.ProcessBlockEntrance(Blk, Pred, BC);
- }
-
-
- void ProcessBranch(const Stmt* Condition, const Stmt* Terminator,
- GRBranchNodeBuilder& Builder) {
- SubEngine.ProcessBranch(Condition, Terminator, Builder);
- }
-
-
- void ProcessIndirectGoto(GRIndirectGotoNodeBuilder& Builder) {
- SubEngine.ProcessIndirectGoto(Builder);
- }
-
-
- void ProcessSwitch(GRSwitchNodeBuilder& Builder) {
- SubEngine.ProcessSwitch(Builder);
- }
-
- void ProcessCallEnter(GRCallEnterNodeBuilder &Builder) {
- SubEngine.ProcessCallEnter(Builder);
- }
-
- void ProcessCallExit(GRCallExitNodeBuilder &Builder) {
- SubEngine.ProcessCallExit(Builder);
- }
-
private:
- GRCoreEngine(const GRCoreEngine&); // Do not implement.
- GRCoreEngine& operator=(const GRCoreEngine&);
+ CoreEngine(const CoreEngine&); // Do not implement.
+ CoreEngine& operator=(const CoreEngine&);
public:
- /// Construct a GRCoreEngine object to analyze the provided CFG using
+ /// Construct a CoreEngine object to analyze the provided CFG using
/// a DFS exploration of the exploded graph.
- GRCoreEngine(GRSubEngine& subengine)
- : SubEngine(subengine), G(new ExplodedGraph()),
- WList(GRWorkList::MakeBFS()),
+ CoreEngine(SubEngine& subengine)
+ : SubEng(subengine), G(new ExplodedGraph()),
+ WList(WorkList::makeBFS()),
BCounterFactory(G->getAllocator()) {}
- /// Construct a GRCoreEngine object to analyze the provided CFG and to
+ /// Construct a CoreEngine object to analyze the provided CFG and to
/// use the provided worklist object to execute the worklist algorithm.
- /// The GRCoreEngine object assumes ownership of 'wlist'.
- GRCoreEngine(GRWorkList* wlist, GRSubEngine& subengine)
- : SubEngine(subengine), G(new ExplodedGraph()), WList(wlist),
+ /// The CoreEngine object assumes ownership of 'wlist'.
+ CoreEngine(WorkList* wlist, SubEngine& subengine)
+ : SubEng(subengine), G(new ExplodedGraph()), WList(wlist),
BCounterFactory(G->getAllocator()) {}
- ~GRCoreEngine() {
+ ~CoreEngine() {
delete WList;
}
@@ -166,7 +125,7 @@ public:
bool wasBlockAborted() const { return !blocksAborted.empty(); }
bool hasWorkRemaining() const { return wasBlockAborted() || WList->hasWork(); }
- GRWorkList *getWorkList() const { return WList; }
+ WorkList *getWorkList() const { return WList; }
BlocksAborted::const_iterator blocks_aborted_begin() const {
return blocksAborted.begin();
@@ -176,18 +135,17 @@ public:
}
};
-class GRStmtNodeBuilder {
- GRCoreEngine& Eng;
+class StmtNodeBuilder {
+ CoreEngine& Eng;
const CFGBlock& B;
const unsigned Idx;
ExplodedNode* Pred;
GRStateManager& Mgr;
- GRAuditor* Auditor;
public:
bool PurgingDeadSymbols;
bool BuildSinks;
- bool HasGeneratedNode;
+ bool hasGeneratedNode;
ProgramPoint::Kind PointKind;
const void *Tag;
@@ -200,21 +158,21 @@ public:
void GenerateAutoTransition(ExplodedNode* N);
public:
- GRStmtNodeBuilder(const CFGBlock* b, unsigned idx, ExplodedNode* N,
- GRCoreEngine* e, GRStateManager &mgr);
+ StmtNodeBuilder(const CFGBlock* b, unsigned idx, ExplodedNode* N,
+ CoreEngine* e, GRStateManager &mgr);
- ~GRStmtNodeBuilder();
+ ~StmtNodeBuilder();
- ExplodedNode* getBasePredecessor() const { return Pred; }
+ ExplodedNode* getPredecessor() const { return Pred; }
// FIXME: This should not be exposed.
- GRWorkList *getWorkList() { return Eng.WList; }
+ WorkList *getWorkList() { return Eng.WList; }
void SetCleanedState(const GRState* St) {
CleanedState = St;
}
- GRBlockCounter getBlockCounter() const { return Eng.WList->getBlockCounter();}
+ BlockCounter getBlockCounter() const { return Eng.WList->getBlockCounter();}
unsigned getCurrentBlockCount() const {
return getBlockCounter().getNumVisited(
@@ -223,28 +181,29 @@ public:
}
ExplodedNode* generateNode(PostStmt PP,const GRState* St,ExplodedNode* Pred) {
- HasGeneratedNode = true;
+ hasGeneratedNode = true;
return generateNodeInternal(PP, St, Pred);
}
ExplodedNode* generateNode(const Stmt *S, const GRState *St,
- ExplodedNode *Pred, ProgramPoint::Kind K) {
- HasGeneratedNode = true;
+ ExplodedNode *Pred, ProgramPoint::Kind K,
+ const void *tag = 0) {
+ hasGeneratedNode = true;
if (PurgingDeadSymbols)
K = ProgramPoint::PostPurgeDeadSymbolsKind;
- return generateNodeInternal(S, St, Pred, K, Tag);
+ return generateNodeInternal(S, St, Pred, K, tag ? tag : Tag);
}
ExplodedNode* generateNode(const Stmt *S, const GRState *St,
- ExplodedNode *Pred) {
- return generateNode(S, St, Pred, PointKind);
+ ExplodedNode *Pred, const void *tag = 0) {
+ return generateNode(S, St, Pred, PointKind, tag);
}
ExplodedNode *generateNode(const ProgramPoint &PP, const GRState* State,
ExplodedNode* Pred) {
- HasGeneratedNode = true;
+ hasGeneratedNode = true;
return generateNodeInternal(PP, State, Pred);
}
@@ -259,7 +218,13 @@ public:
/// getStmt - Return the current block-level expression associated with
/// this builder.
- const Stmt* getStmt() const { return B[Idx]; }
+ const Stmt* getStmt() const {
+ CFGStmt CS = B[Idx].getAs<CFGStmt>();
+ if (CS)
+ return CS.getStmt();
+ else
+ return 0;
+ }
/// getBlock - Return the CFGBlock associated with the block-level expression
/// of this builder.
@@ -267,10 +232,8 @@ public:
unsigned getIndex() const { return Idx; }
- void setAuditor(GRAuditor* A) { Auditor = A; }
-
const GRState* GetState(ExplodedNode* Pred) const {
- if (Pred == getBasePredecessor())
+ if (Pred == getPredecessor())
return CleanedState;
else
return Pred->getState();
@@ -294,8 +257,8 @@ public:
}
};
-class GRBranchNodeBuilder {
- GRCoreEngine& Eng;
+class BranchNodeBuilder {
+ CoreEngine& Eng;
const CFGBlock* Src;
const CFGBlock* DstT;
const CFGBlock* DstF;
@@ -310,19 +273,19 @@ class GRBranchNodeBuilder {
bool InFeasibleFalse;
public:
- GRBranchNodeBuilder(const CFGBlock* src, const CFGBlock* dstT,
- const CFGBlock* dstF, ExplodedNode* pred, GRCoreEngine* e)
+ BranchNodeBuilder(const CFGBlock* src, const CFGBlock* dstT,
+ const CFGBlock* dstF, ExplodedNode* pred, CoreEngine* e)
: Eng(*e), Src(src), DstT(dstT), DstF(dstF), Pred(pred),
GeneratedTrue(false), GeneratedFalse(false),
InFeasibleTrue(!DstT), InFeasibleFalse(!DstF) {}
- ~GRBranchNodeBuilder();
+ ~BranchNodeBuilder();
ExplodedNode* getPredecessor() const { return Pred; }
const ExplodedGraph& getGraph() const { return *Eng.G; }
- GRBlockCounter getBlockCounter() const { return Eng.WList->getBlockCounter();}
+ BlockCounter getBlockCounter() const { return Eng.WList->getBlockCounter();}
ExplodedNode* generateNode(const GRState* State, bool branch);
@@ -346,33 +309,33 @@ public:
}
};
-class GRIndirectGotoNodeBuilder {
- GRCoreEngine& Eng;
+class IndirectGotoNodeBuilder {
+ CoreEngine& Eng;
const CFGBlock* Src;
const CFGBlock& DispatchBlock;
const Expr* E;
ExplodedNode* Pred;
public:
- GRIndirectGotoNodeBuilder(ExplodedNode* pred, const CFGBlock* src,
- const Expr* e, const CFGBlock* dispatch, GRCoreEngine* eng)
+ IndirectGotoNodeBuilder(ExplodedNode* pred, const CFGBlock* src,
+ const Expr* e, const CFGBlock* dispatch, CoreEngine* eng)
: Eng(*eng), Src(src), DispatchBlock(*dispatch), E(e), Pred(pred) {}
class iterator {
CFGBlock::const_succ_iterator I;
- friend class GRIndirectGotoNodeBuilder;
+ friend class IndirectGotoNodeBuilder;
iterator(CFGBlock::const_succ_iterator i) : I(i) {}
public:
iterator& operator++() { ++I; return *this; }
bool operator!=(const iterator& X) const { return I != X.I; }
- const LabelStmt* getLabel() const {
- return llvm::cast<LabelStmt>((*I)->getLabel());
+ const LabelDecl *getLabel() const {
+ return llvm::cast<LabelStmt>((*I)->getLabel())->getDecl();
}
- const CFGBlock* getBlock() const {
+ const CFGBlock *getBlock() const {
return *I;
}
};
@@ -388,21 +351,21 @@ public:
const GRState* getState() const { return Pred->State; }
};
-class GRSwitchNodeBuilder {
- GRCoreEngine& Eng;
+class SwitchNodeBuilder {
+ CoreEngine& Eng;
const CFGBlock* Src;
const Expr* Condition;
ExplodedNode* Pred;
public:
- GRSwitchNodeBuilder(ExplodedNode* pred, const CFGBlock* src,
- const Expr* condition, GRCoreEngine* eng)
+ SwitchNodeBuilder(ExplodedNode* pred, const CFGBlock* src,
+ const Expr* condition, CoreEngine* eng)
: Eng(*eng), Src(src), Condition(condition), Pred(pred) {}
class iterator {
CFGBlock::const_succ_reverse_iterator I;
- friend class GRSwitchNodeBuilder;
+ friend class SwitchNodeBuilder;
iterator(CFGBlock::const_succ_reverse_iterator i) : I(i) {}
public:
@@ -422,6 +385,10 @@ public:
iterator begin() { return iterator(Src->succ_rbegin()+1); }
iterator end() { return iterator(Src->succ_rend()); }
+ const SwitchStmt *getSwitch() const {
+ return llvm::cast<SwitchStmt>(Src->getTerminator());
+ }
+
ExplodedNode* generateCaseStmtNode(const iterator& I, const GRState* State);
ExplodedNode* generateDefaultCaseNode(const GRState* State,
@@ -432,25 +399,69 @@ public:
const GRState* getState() const { return Pred->State; }
};
-class GREndPathNodeBuilder {
- GRCoreEngine &Eng;
+class GenericNodeBuilderImpl {
+protected:
+ CoreEngine &engine;
+ ExplodedNode *pred;
+ ProgramPoint pp;
+ llvm::SmallVector<ExplodedNode*, 2> sinksGenerated;
+
+ ExplodedNode *generateNodeImpl(const GRState *state, ExplodedNode *pred,
+ ProgramPoint programPoint, bool asSink);
+
+ GenericNodeBuilderImpl(CoreEngine &eng, ExplodedNode *pr, ProgramPoint p)
+ : engine(eng), pred(pr), pp(p), hasGeneratedNode(false) {}
+
+public:
+ bool hasGeneratedNode;
+
+ WorkList &getWorkList() { return *engine.WList; }
+
+ ExplodedNode* getPredecessor() const { return pred; }
+
+ BlockCounter getBlockCounter() const {
+ return engine.WList->getBlockCounter();
+ }
+
+ const llvm::SmallVectorImpl<ExplodedNode*> &sinks() const {
+ return sinksGenerated;
+ }
+};
+
+template <typename PP_T>
+class GenericNodeBuilder : public GenericNodeBuilderImpl {
+public:
+ GenericNodeBuilder(CoreEngine &eng, ExplodedNode *pr, const PP_T &p)
+ : GenericNodeBuilderImpl(eng, pr, p) {}
+
+ ExplodedNode *generateNode(const GRState *state, ExplodedNode *pred,
+ const void *tag, bool asSink) {
+ return generateNodeImpl(state, pred, cast<PP_T>(pp).withTag(tag),
+ asSink);
+ }
+
+ const PP_T &getProgramPoint() const { return cast<PP_T>(pp); }
+};
+
+class EndOfFunctionNodeBuilder {
+ CoreEngine &Eng;
const CFGBlock& B;
ExplodedNode* Pred;
public:
- bool HasGeneratedNode;
+ bool hasGeneratedNode;
public:
- GREndPathNodeBuilder(const CFGBlock* b, ExplodedNode* N, GRCoreEngine* e)
- : Eng(*e), B(*b), Pred(N), HasGeneratedNode(false) {}
+ EndOfFunctionNodeBuilder(const CFGBlock* b, ExplodedNode* N, CoreEngine* e)
+ : Eng(*e), B(*b), Pred(N), hasGeneratedNode(false) {}
- ~GREndPathNodeBuilder();
+ ~EndOfFunctionNodeBuilder();
- GRWorkList &getWorkList() { return *Eng.WList; }
+ WorkList &getWorkList() { return *Eng.WList; }
ExplodedNode* getPredecessor() const { return Pred; }
- GRBlockCounter getBlockCounter() const {
+ BlockCounter getBlockCounter() const {
return Eng.WList->getBlockCounter();
}
@@ -472,16 +483,17 @@ public:
}
};
-class GRCallEnterNodeBuilder {
- GRCoreEngine &Eng;
+class CallEnterNodeBuilder {
+ CoreEngine &Eng;
const ExplodedNode *Pred;
- // The call site.
+ // The call site. For implicit automatic object dtor, this is the trigger
+ // statement.
const Stmt *CE;
- // The AnalysisContext of the callee.
- AnalysisContext *CalleeCtx;
+ // The context of the callee.
+ const StackFrameContext *CalleeCtx;
// The parent block of the CallExpr.
const CFGBlock *Block;
@@ -490,8 +502,8 @@ class GRCallEnterNodeBuilder {
unsigned Index;
public:
- GRCallEnterNodeBuilder(GRCoreEngine &eng, const ExplodedNode *pred,
- const Stmt *s, AnalysisContext *callee,
+ CallEnterNodeBuilder(CoreEngine &eng, const ExplodedNode *pred,
+ const Stmt *s, const StackFrameContext *callee,
const CFGBlock *blk, unsigned idx)
: Eng(eng), Pred(pred), CE(s), CalleeCtx(callee), Block(blk), Index(idx) {}
@@ -503,29 +515,32 @@ public:
const Stmt *getCallExpr() const { return CE; }
- AnalysisContext *getCalleeContext() const { return CalleeCtx; }
+ const StackFrameContext *getCalleeContext() const { return CalleeCtx; }
const CFGBlock *getBlock() const { return Block; }
unsigned getIndex() const { return Index; }
- void GenerateNode(const GRState *state, const LocationContext *LocCtx);
+ void generateNode(const GRState *state);
};
-class GRCallExitNodeBuilder {
- GRCoreEngine &Eng;
+class CallExitNodeBuilder {
+ CoreEngine &Eng;
const ExplodedNode *Pred;
public:
- GRCallExitNodeBuilder(GRCoreEngine &eng, const ExplodedNode *pred)
+ CallExitNodeBuilder(CoreEngine &eng, const ExplodedNode *pred)
: Eng(eng), Pred(pred) {}
const ExplodedNode *getPredecessor() const { return Pred; }
const GRState *getState() const { return Pred->getState(); }
- void GenerateNode(const GRState *state);
+ void generateNode(const GRState *state);
};
+
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Environment.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
index 611f507..732a40cb 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Environment.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
@@ -11,23 +11,25 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_ENVIRONMENT_H
-#define LLVM_CLANG_ANALYSIS_ENVIRONMENT_H
+#ifndef LLVM_CLANG_GR_ENVIRONMENT_H
+#define LLVM_CLANG_GR_ENVIRONMENT_H
-// For using typedefs in StoreManager. Should find a better place for these
-// typedefs.
-#include "clang/Checker/PathSensitive/Store.h"
-
-#include "clang/Checker/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "llvm/ADT/ImmutableMap.h"
namespace clang {
-class EnvironmentManager;
-class ValueManager;
class LiveVariables;
+namespace ento {
+
+class EnvironmentManager;
+class SValBuilder;
+/// Environment - An immutable map from Stmts to their current
+/// symbolic values (SVals).
+///
class Environment {
private:
friend class EnvironmentManager;
@@ -41,22 +43,22 @@ private:
Environment(BindingsTy eb)
: ExprBindings(eb) {}
+ SVal lookupExpr(const Stmt* E) const;
+
public:
typedef BindingsTy::iterator iterator;
iterator begin() const { return ExprBindings.begin(); }
iterator end() const { return ExprBindings.end(); }
- SVal LookupExpr(const Stmt* E) const {
- const SVal* X = ExprBindings.lookup(E);
- return X ? *X : UnknownVal();
- }
- SVal GetSVal(const Stmt* Ex, ValueManager& ValMgr) const;
+ /// GetSVal - Fetches the current binding of the expression in the
+ /// Environment.
+ SVal getSVal(const Stmt* Ex, SValBuilder& svalBuilder) const;
/// Profile - Profile the contents of an Environment object for use
/// in a FoldingSet.
- static void Profile(llvm::FoldingSetNodeID& ID, const Environment* E) {
- E->ExprBindings.Profile(ID);
+ static void Profile(llvm::FoldingSetNodeID& ID, const Environment* env) {
+ env->ExprBindings.Profile(ID);
}
/// Profile - Used to profile the contents of this object for inclusion
@@ -80,7 +82,7 @@ public:
~EnvironmentManager() {}
Environment getInitialEnvironment() {
- return Environment(F.GetEmptyMap());
+ return Environment(F.getEmptyMap());
}
/// Bind the value 'V' to the statement 'S'.
@@ -92,11 +94,13 @@ public:
Environment bindExprAndLocation(Environment Env, const Stmt *S, SVal location,
SVal V);
- Environment RemoveDeadBindings(Environment Env,
+ Environment removeDeadBindings(Environment Env,
SymbolReaper &SymReaper, const GRState *ST,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
};
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ExplodedGraph.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
index c875a23..e5d6876 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ExplodedGraph.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
@@ -16,8 +16,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_EXPLODEDGRAPH
-#define LLVM_CLANG_ANALYSIS_EXPLODEDGRAPH
+#ifndef LLVM_CLANG_GR_EXPLODEDGRAPH
+#define LLVM_CLANG_GR_EXPLODEDGRAPH
#include "clang/Analysis/ProgramPoint.h"
#include "clang/Analysis/AnalysisContext.h"
@@ -31,11 +31,14 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/Support/Casting.h"
#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
namespace clang {
-class GRState;
class CFG;
+
+namespace ento {
+
class ExplodedGraph;
//===----------------------------------------------------------------------===//
@@ -44,14 +47,17 @@ class ExplodedGraph;
// on top of these classes.
//===----------------------------------------------------------------------===//
+// ExplodedNode is not constified all over the engine because we need to add
+// successors to it at any time after creating it.
+
class ExplodedNode : public llvm::FoldingSetNode {
friend class ExplodedGraph;
- friend class GRCoreEngine;
- friend class GRStmtNodeBuilder;
- friend class GRBranchNodeBuilder;
- friend class GRIndirectGotoNodeBuilder;
- friend class GRSwitchNodeBuilder;
- friend class GREndPathNodeBuilder;
+ friend class CoreEngine;
+ friend class StmtNodeBuilder;
+ friend class BranchNodeBuilder;
+ friend class IndirectGotoNodeBuilder;
+ friend class SwitchNodeBuilder;
+ friend class EndOfFunctionNodeBuilder;
class NodeGroup {
enum { Size1 = 0x0, SizeOther = 0x1, AuxFlag = 0x2, Mask = 0x3 };
@@ -69,7 +75,7 @@ class ExplodedNode : public llvm::FoldingSetNode {
ExplodedNode *getNode() const {
return reinterpret_cast<ExplodedNode*>(getPtr());
}
-
+
public:
NodeGroup() : P(0) {}
@@ -83,6 +89,8 @@ class ExplodedNode : public llvm::FoldingSetNode {
void addNode(ExplodedNode* N, ExplodedGraph &G);
+ void replaceNode(ExplodedNode *node);
+
void setFlag() {
assert(P == 0);
P = AuxFlag;
@@ -109,7 +117,13 @@ class ExplodedNode : public llvm::FoldingSetNode {
public:
explicit ExplodedNode(const ProgramPoint& loc, const GRState* state)
- : Location(loc), State(state) {}
+ : Location(loc), State(state) {
+ const_cast<GRState*>(State)->incrementReferenceCount();
+ }
+
+ ~ExplodedNode() {
+ const_cast<GRState*>(State)->decrementReferenceCount();
+ }
/// getLocation - Returns the edge associated with the given node.
ProgramPoint getLocation() const { return Location; }
@@ -200,6 +214,10 @@ public:
};
static void SetAuditor(Auditor* A);
+
+private:
+ void replaceSuccessor(ExplodedNode *node) { Succs.replaceNode(node); }
+ void replacePredecessor(ExplodedNode *node) { Preds.replaceNode(node); }
};
// FIXME: Is this class necessary?
@@ -216,7 +234,7 @@ public:
class ExplodedGraph {
protected:
- friend class GRCoreEngine;
+ friend class CoreEngine;
// Type definitions.
typedef llvm::SmallVector<ExplodedNode*,2> RootsTy;
@@ -241,6 +259,15 @@ protected:
/// NumNodes - The number of nodes in the graph.
unsigned NumNodes;
+
+ /// A list of recently allocated nodes that can potentially be recycled.
+ void *recentlyAllocatedNodes;
+
+ /// A list of nodes that can be reused.
+ void *freeNodes;
+
+ /// A flag that indicates whether nodes should be recycled.
+ bool reclaimNodes;
public:
/// getNode - Retrieve the node associated with a (Location,State) pair,
@@ -267,10 +294,12 @@ public:
return V;
}
- ExplodedGraph() : NumNodes(0) {}
-
- ~ExplodedGraph() {}
+ ExplodedGraph()
+ : NumNodes(0), recentlyAllocatedNodes(0),
+ freeNodes(0), reclaimNodes(false) {}
+ ~ExplodedGraph();
+
unsigned num_roots() const { return Roots.size(); }
unsigned num_eops() const { return EndNodes.size(); }
@@ -324,6 +353,14 @@ public:
const ExplodedNode* const * NEnd,
InterExplodedGraphMap *M,
llvm::DenseMap<const void*, const void*> *InverseMap) const;
+
+ /// Enable tracking of recently allocated nodes for potential reclamation
+ /// when calling reclaimRecentlyAllocatedNodes().
+ void enableNodeReclamation() { reclaimNodes = true; }
+
+ /// Reclaim "uninteresting" nodes created since the last time this method
+ /// was called.
+ void reclaimRecentlyAllocatedNodes();
};
class ExplodedNodeSet {
@@ -368,13 +405,15 @@ public:
inline const_iterator end() const { return Impl.end(); }
};
+} // end GR namespace
+
} // end clang namespace
// GraphTraits
namespace llvm {
- template<> struct GraphTraits<clang::ExplodedNode*> {
- typedef clang::ExplodedNode NodeType;
+ template<> struct GraphTraits<clang::ento::ExplodedNode*> {
+ typedef clang::ento::ExplodedNode NodeType;
typedef NodeType::succ_iterator ChildIteratorType;
typedef llvm::df_iterator<NodeType*> nodes_iterator;
@@ -399,8 +438,8 @@ namespace llvm {
}
};
- template<> struct GraphTraits<const clang::ExplodedNode*> {
- typedef const clang::ExplodedNode NodeType;
+ template<> struct GraphTraits<const clang::ento::ExplodedNode*> {
+ typedef const clang::ento::ExplodedNode NodeType;
typedef NodeType::const_succ_iterator ChildIteratorType;
typedef llvm::df_iterator<NodeType*> nodes_iterator;
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index 5ba0b36..767644a 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -1,4 +1,4 @@
-//===-- GRExprEngine.h - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=
+//===-- ExprEngine.h - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -8,41 +8,45 @@
//===----------------------------------------------------------------------===//
//
// This file defines a meta-engine for path-sensitive dataflow analysis that
-// is built on GRCoreEngine, but provides the boilerplate to execute transfer
+// is built on CoreEngine, but provides the boilerplate to execute transfer
// functions and build the ExplodedGraph at the expression level.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_GREXPRENGINE
-#define LLVM_CLANG_ANALYSIS_GREXPRENGINE
+#ifndef LLVM_CLANG_GR_EXPRENGINE
+#define LLVM_CLANG_GR_EXPRENGINE
-#include "clang/Checker/PathSensitive/AnalysisManager.h"
-#include "clang/Checker/PathSensitive/GRSubEngine.h"
-#include "clang/Checker/PathSensitive/GRCoreEngine.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/PathSensitive/GRSimpleAPICheck.h"
-#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/AST/Type.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtObjC.h"
namespace clang {
+
+class ObjCForCollectionStmt;
+
+namespace ento {
+
class AnalysisManager;
class Checker;
-class ObjCForCollectionStmt;
-class GRExprEngine : public GRSubEngine {
+class ExprEngine : public SubEngine {
AnalysisManager &AMgr;
- GRCoreEngine CoreEngine;
+ CoreEngine Engine;
/// G - the simulation graph.
ExplodedGraph& G;
- /// Builder - The current GRStmtNodeBuilder which is used when building the
+ /// Builder - The current StmtNodeBuilder which is used when building the
/// nodes for a given statement.
- GRStmtNodeBuilder* Builder;
+ StmtNodeBuilder* Builder;
/// StateMgr - Object that manages the data for all created states.
GRStateManager StateMgr;
@@ -50,11 +54,8 @@ class GRExprEngine : public GRSubEngine {
/// SymMgr - Object that manages the symbol information.
SymbolManager& SymMgr;
- /// ValMgr - Object that manages/creates SVals.
- ValueManager &ValMgr;
-
- /// SVator - SValuator object that creates SVals from expressions.
- SValuator &SVator;
+ /// svalBuilder - SValBuilder object that creates SVals from expressions.
+ SValBuilder &svalBuilder;
/// EntryNode - The immediate predecessor node.
ExplodedNode* EntryNode;
@@ -63,8 +64,8 @@ class GRExprEngine : public GRSubEngine {
/// variables and symbols (as determined by a liveness analysis).
const GRState* CleanedState;
- /// CurrentStmt - The current block-level statement.
- const Stmt* CurrentStmt;
+ /// currentStmt - The current block-level statement.
+ const Stmt* currentStmt;
// Obj-C Class Identifiers.
IdentifierInfo* NSExceptionII;
@@ -73,12 +74,10 @@ class GRExprEngine : public GRSubEngine {
Selector* NSExceptionInstanceRaiseSelectors;
Selector RaiseSel;
- llvm::OwningPtr<GRSimpleAPICheck> BatchAuditor;
-
enum CallbackKind {
PreVisitStmtCallback,
PostVisitStmtCallback,
- ProcessAssumeCallback,
+ processAssumeCallback,
EvalRegionChangesCallback
};
@@ -110,27 +109,18 @@ class GRExprEngine : public GRSubEngine {
/// The BugReporter associated with this engine. It is important that
/// this object be placed at the very end of member variables so that its
- /// destructor is called before the rest of the GRExprEngine is destroyed.
+ /// destructor is called before the rest of the ExprEngine is destroyed.
GRBugReporter BR;
- llvm::OwningPtr<GRTransferFuncs> TF;
-
- class CallExprWLItem {
- public:
- CallExpr::const_arg_iterator I;
- ExplodedNode *N;
-
- CallExprWLItem(const CallExpr::const_arg_iterator &i, ExplodedNode *n)
- : I(i), N(n) {}
- };
+ llvm::OwningPtr<TransferFuncs> TF;
public:
- GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf);
+ ExprEngine(AnalysisManager &mgr, TransferFuncs *tf);
- ~GRExprEngine();
+ ~ExprEngine();
void ExecuteWorkList(const LocationContext *L, unsigned Steps = 150000) {
- CoreEngine.ExecuteWorkList(L, Steps, 0);
+ Engine.ExecuteWorkList(L, Steps, 0);
}
/// Execute the work list with an initial state. Nodes that reaches the exit
@@ -139,7 +129,7 @@ public:
void ExecuteWorkListWithInitialState(const LocationContext *L, unsigned Steps,
const GRState *InitState,
ExplodedNodeSet &Dst) {
- CoreEngine.ExecuteWorkListWithInitialState(L, Steps, InitState, Dst);
+ Engine.ExecuteWorkListWithInitialState(L, Steps, InitState, Dst);
}
/// getContext - Return the ASTContext associated with this analysis.
@@ -147,16 +137,16 @@ public:
virtual AnalysisManager &getAnalysisManager() { return AMgr; }
- SValuator &getSValuator() { return SVator; }
+ SValBuilder &getSValBuilder() { return svalBuilder; }
- GRTransferFuncs& getTF() { return *TF; }
+ TransferFuncs& getTF() { return *TF; }
BugReporter& getBugReporter() { return BR; }
- GRStmtNodeBuilder &getBuilder() { assert(Builder); return *Builder; }
+ StmtNodeBuilder &getBuilder() { assert(Builder); return *Builder; }
- // FIXME: Remove once GRTransferFuncs is no longer referenced.
- void setTransferFunction(GRTransferFuncs* tf);
+ // FIXME: Remove once TransferFuncs is no longer referenced.
+ void setTransferFunction(TransferFuncs* tf);
/// ViewGraph - Visualize the ExplodedGraph created by executing the
/// simulation.
@@ -186,56 +176,64 @@ public:
return static_cast<CHECKER*>(lookupChecker(CHECKER::getTag()));
}
- void AddCheck(GRSimpleAPICheck* A, Stmt::StmtClass C);
- void AddCheck(GRSimpleAPICheck* A);
+ /// processCFGElement - Called by CoreEngine. Used to generate new successor
+ /// nodes by processing the 'effects' of a CFG element.
+ void processCFGElement(const CFGElement E, StmtNodeBuilder& builder);
+
+ void ProcessStmt(const CFGStmt S, StmtNodeBuilder &builder);
+
+ void ProcessInitializer(const CFGInitializer I, StmtNodeBuilder &builder);
- /// ProcessStmt - Called by GRCoreEngine. Used to generate new successor
- /// nodes by processing the 'effects' of a block-level statement.
- void ProcessStmt(const CFGElement E, GRStmtNodeBuilder& builder);
+ void ProcessImplicitDtor(const CFGImplicitDtor D, StmtNodeBuilder &builder);
- /// ProcessBlockEntrance - Called by GRCoreEngine when start processing
- /// a CFGBlock. This method returns true if the analysis should continue
- /// exploring the given path, and false otherwise.
- bool ProcessBlockEntrance(const CFGBlock* B, const ExplodedNode *Pred,
- GRBlockCounter BC);
+ void ProcessAutomaticObjDtor(const CFGAutomaticObjDtor D,
+ StmtNodeBuilder &builder);
+ void ProcessBaseDtor(const CFGBaseDtor D, StmtNodeBuilder &builder);
+ void ProcessMemberDtor(const CFGMemberDtor D, StmtNodeBuilder &builder);
+ void ProcessTemporaryDtor(const CFGTemporaryDtor D,
+ StmtNodeBuilder &builder);
- /// ProcessBranch - Called by GRCoreEngine. Used to generate successor
+ /// Called by CoreEngine when processing the entrance of a CFGBlock.
+ virtual void processCFGBlockEntrance(ExplodedNodeSet &dstNodes,
+ GenericNodeBuilder<BlockEntrance> &nodeBuilder);
+
+ /// ProcessBranch - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a branch condition.
- void ProcessBranch(const Stmt* Condition, const Stmt* Term,
- GRBranchNodeBuilder& builder);
+ void processBranch(const Stmt* Condition, const Stmt* Term,
+ BranchNodeBuilder& builder);
- /// ProcessIndirectGoto - Called by GRCoreEngine. Used to generate successor
+ /// processIndirectGoto - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a computed goto jump.
- void ProcessIndirectGoto(GRIndirectGotoNodeBuilder& builder);
+ void processIndirectGoto(IndirectGotoNodeBuilder& builder);
- /// ProcessSwitch - Called by GRCoreEngine. Used to generate successor
+ /// ProcessSwitch - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a switch statement.
- void ProcessSwitch(GRSwitchNodeBuilder& builder);
+ void processSwitch(SwitchNodeBuilder& builder);
- /// ProcessEndPath - Called by GRCoreEngine. Used to generate end-of-path
+ /// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
- void ProcessEndPath(GREndPathNodeBuilder& builder);
+ void processEndOfFunction(EndOfFunctionNodeBuilder& builder);
/// Generate the entry node of the callee.
- void ProcessCallEnter(GRCallEnterNodeBuilder &builder);
+ void processCallEnter(CallEnterNodeBuilder &builder);
/// Generate the first post callsite node.
- void ProcessCallExit(GRCallExitNodeBuilder &builder);
+ void processCallExit(CallExitNodeBuilder &builder);
- /// Called by GRCoreEngine when the analysis worklist has terminated.
- void ProcessEndWorklist(bool hasWorkRemaining);
+ /// Called by CoreEngine when the analysis worklist has terminated.
+ void processEndWorklist(bool hasWorkRemaining);
- /// EvalAssume - Callback function invoked by the ConstraintManager when
+ /// evalAssume - Callback function invoked by the ConstraintManager when
/// making assumptions about state values.
- const GRState *ProcessAssume(const GRState *state, SVal cond,bool assumption);
+ const GRState *processAssume(const GRState *state, SVal cond,bool assumption);
- /// WantsRegionChangeUpdate - Called by GRStateManager to determine if a
- /// region change should trigger a ProcessRegionChanges update.
- bool WantsRegionChangeUpdate(const GRState* state);
+ /// wantsRegionChangeUpdate - Called by GRStateManager to determine if a
+ /// region change should trigger a processRegionChanges update.
+ bool wantsRegionChangeUpdate(const GRState* state);
- /// ProcessRegionChanges - Called by GRStateManager whenever a change is made
+ /// processRegionChanges - Called by GRStateManager whenever a change is made
/// to the store. Used to update checkers that track region values.
- const GRState* ProcessRegionChanges(const GRState *state,
+ const GRState* processRegionChanges(const GRState *state,
const MemRegion * const *Begin,
const MemRegion * const *End);
@@ -247,7 +245,7 @@ public:
return StateMgr.getConstraintManager();
}
- // FIXME: Remove when we migrate over to just using ValueManager.
+ // FIXME: Remove when we migrate over to just using SValBuilder.
BasicValueFactory& getBasicVals() {
return StateMgr.getBasicVals();
}
@@ -255,20 +253,18 @@ public:
return StateMgr.getBasicVals();
}
- ValueManager &getValueManager() { return ValMgr; }
- const ValueManager &getValueManager() const { return ValMgr; }
-
// FIXME: Remove when we migrate over to just using ValueManager.
SymbolManager& getSymbolManager() { return SymMgr; }
const SymbolManager& getSymbolManager() const { return SymMgr; }
// Functions for external checking of whether we have unfinished work
- bool wasBlockAborted() const { return CoreEngine.wasBlockAborted(); }
+ bool wasBlockAborted() const { return Engine.wasBlockAborted(); }
+ bool hasEmptyWorkList() const { return !Engine.getWorkList()->hasWork(); }
bool hasWorkRemaining() const {
- return wasBlockAborted() || CoreEngine.getWorkList()->hasWork();
+ return wasBlockAborted() || Engine.getWorkList()->hasWork();
}
- const GRCoreEngine &getCoreEngine() const { return CoreEngine; }
+ const CoreEngine &getCoreEngine() const { return Engine; }
protected:
const GRState* GetState(ExplodedNode* N) {
@@ -286,11 +282,14 @@ public:
void CheckerVisit(const Stmt *S, ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
CallbackKind Kind);
+ void CheckerVisitObjCMessage(const ObjCMessage &msg, ExplodedNodeSet &Dst,
+ ExplodedNodeSet &Src, bool isPrevisit);
+
bool CheckerEvalCall(const CallExpr *CE,
ExplodedNodeSet &Dst,
ExplodedNode *Pred);
- void CheckerEvalNilReceiver(const ObjCMessageExpr *ME,
+ void CheckerEvalNilReceiver(const ObjCMessage &msg,
ExplodedNodeSet &Dst,
const GRState *state,
ExplodedNode *Pred);
@@ -303,14 +302,10 @@ public:
/// other functions that handle specific kinds of statements.
void Visit(const Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst);
- /// VisitLValue - Evaluate the lvalue of the expression. For example, if Ex is
- /// a DeclRefExpr, it evaluates to the MemRegionVal which represents its
- /// storage location. Note that not all kinds of expressions has lvalue.
- void VisitLValue(const Expr* Ex, ExplodedNode* Pred, ExplodedNodeSet& Dst);
-
/// VisitArraySubscriptExpr - Transfer function for array accesses.
- void VisitArraySubscriptExpr(const ArraySubscriptExpr* Ex, ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue);
+ void VisitLvalArraySubscriptExpr(const ArraySubscriptExpr* Ex,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst);
/// VisitAsmStmt - Transfer function logic for inline asm.
void VisitAsmStmt(const AsmStmt* A, ExplodedNode* Pred, ExplodedNodeSet& Dst);
@@ -331,35 +326,26 @@ public:
/// VisitBinaryOperator - Transfer function logic for binary operators.
void VisitBinaryOperator(const BinaryOperator* B, ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue);
+ ExplodedNodeSet& Dst);
/// VisitCall - Transfer function for function calls.
void VisitCall(const CallExpr* CE, ExplodedNode* Pred,
CallExpr::const_arg_iterator AI,
CallExpr::const_arg_iterator AE,
- ExplodedNodeSet& Dst, bool asLValue);
+ ExplodedNodeSet& Dst);
/// VisitCast - Transfer function logic for all casts (implicit and explicit).
void VisitCast(const CastExpr *CastE, const Expr *Ex, ExplodedNode *Pred,
- ExplodedNodeSet &Dst, bool asLValue);
+ ExplodedNodeSet &Dst);
/// VisitCompoundLiteralExpr - Transfer function logic for compound literals.
void VisitCompoundLiteralExpr(const CompoundLiteralExpr* CL,
- ExplodedNode* Pred, ExplodedNodeSet& Dst,
- bool asLValue);
-
- /// VisitDeclRefExpr - Transfer function logic for DeclRefExprs.
- void VisitDeclRefExpr(const DeclRefExpr* DR, ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue);
+ ExplodedNode* Pred, ExplodedNodeSet& Dst);
- /// VisitBlockDeclRefExpr - Transfer function logic for BlockDeclRefExprs.
- void VisitBlockDeclRefExpr(const BlockDeclRefExpr* DR, ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue);
-
+ /// Transfer function logic for DeclRefExprs and BlockDeclRefExprs.
void VisitCommonDeclRefExpr(const Expr* DR, const NamedDecl *D,
- ExplodedNode* Pred, ExplodedNodeSet& Dst,
- bool asLValue);
+ ExplodedNode* Pred, ExplodedNodeSet& Dst);
/// VisitDeclStmt - Transfer function logic for DeclStmts.
void VisitDeclStmt(const DeclStmt* DS, ExplodedNode* Pred,
@@ -383,11 +369,18 @@ public:
/// VisitMemberExpr - Transfer function for member expressions.
void VisitMemberExpr(const MemberExpr* M, ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue);
+ ExplodedNodeSet& Dst);
- /// VisitObjCIvarRefExpr - Transfer function logic for ObjCIvarRefExprs.
- void VisitObjCIvarRefExpr(const ObjCIvarRefExpr* DR, ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue);
+ /// Transfer function logic for ObjCAtSynchronizedStmts.
+ void VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ void VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *E,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ /// Transfer function logic for computing the lvalue of an Objective-C ivar.
+ void VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr* DR, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst);
/// VisitObjCForCollectionStmt - Transfer function logic for
/// ObjCForCollectionStmt.
@@ -400,7 +393,9 @@ public:
/// VisitObjCMessageExpr - Transfer function for ObjC message expressions.
void VisitObjCMessageExpr(const ObjCMessageExpr* ME, ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue);
+ ExplodedNodeSet& Dst);
+ void VisitObjCMessage(const ObjCMessage &msg, ExplodedNodeSet &Src,
+ ExplodedNodeSet& Dst);
/// VisitReturnStmt - Transfer function logic for return statements.
void VisitReturnStmt(const ReturnStmt* R, ExplodedNode* Pred,
@@ -416,25 +411,36 @@ public:
/// VisitUnaryOperator - Transfer function logic for unary operators.
void VisitUnaryOperator(const UnaryOperator* B, ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue);
+ ExplodedNodeSet& Dst);
void VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
ExplodedNodeSet & Dst);
-
- void VisitCXXConstructExpr(const CXXConstructExpr *E, SVal Dest,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst);
+
+ void VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *expr,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ VisitCXXConstructExpr(expr, 0, Pred, Dst);
+ }
+
+ void VisitCXXConstructExpr(const CXXConstructExpr *E, const MemRegion *Dest,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ void VisitCXXDestructor(const CXXDestructorDecl *DD,
+ const MemRegion *Dest, const Stmt *S,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
void VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
+ void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *C,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
void VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
void VisitCXXDeleteExpr(const CXXDeleteExpr *CDE, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
- void VisitAggExpr(const Expr *E, SVal Dest, ExplodedNode *Pred,
+ void VisitAggExpr(const Expr *E, const MemRegion *Dest, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
/// Create a C++ temporary object for an rvalue.
@@ -442,87 +448,103 @@ public:
ExplodedNodeSet &Dst);
/// Synthesize CXXThisRegion.
- const CXXThisRegion *getCXXThisRegion(const CXXMethodDecl *MD,
+ const CXXThisRegion *getCXXThisRegion(const CXXRecordDecl *RD,
const StackFrameContext *SFC);
+ const CXXThisRegion *getCXXThisRegion(const CXXMethodDecl *decl,
+ const StackFrameContext *frameCtx);
+
/// Evaluate arguments with a work list algorithm.
- void EvalArguments(ConstExprIterator AI, ConstExprIterator AE,
+ void evalArguments(ConstExprIterator AI, ConstExprIterator AE,
const FunctionProtoType *FnType,
- ExplodedNode *Pred, ExplodedNodeSet &Dst);
+ ExplodedNode *Pred, ExplodedNodeSet &Dst,
+ bool FstArgAsLValue = false);
- /// EvalEagerlyAssume - Given the nodes in 'Src', eagerly assume symbolic
+ /// Evaluate method call itself. Used for CXXMethodCallExpr and
+ /// CXXOperatorCallExpr.
+ void evalMethodCall(const CallExpr *MCE, const CXXMethodDecl *MD,
+ const Expr *ThisExpr, ExplodedNode *Pred,
+ ExplodedNodeSet &Src, ExplodedNodeSet &Dst);
+
+ /// evalEagerlyAssume - Given the nodes in 'Src', eagerly assume symbolic
/// expressions of the form 'x != 0' and generate new nodes (stored in Dst)
/// with those assumptions.
- void EvalEagerlyAssume(ExplodedNodeSet& Dst, ExplodedNodeSet& Src,
+ void evalEagerlyAssume(ExplodedNodeSet& Dst, ExplodedNodeSet& Src,
const Expr *Ex);
- SVal EvalMinus(SVal X) {
- return X.isValid() ? SVator.EvalMinus(cast<NonLoc>(X)) : X;
+ SVal evalMinus(SVal X) {
+ return X.isValid() ? svalBuilder.evalMinus(cast<NonLoc>(X)) : X;
}
- SVal EvalComplement(SVal X) {
- return X.isValid() ? SVator.EvalComplement(cast<NonLoc>(X)) : X;
+ SVal evalComplement(SVal X) {
+ return X.isValid() ? svalBuilder.evalComplement(cast<NonLoc>(X)) : X;
}
public:
- SVal EvalBinOp(const GRState *state, BinaryOperator::Opcode op,
+ SVal evalBinOp(const GRState *state, BinaryOperator::Opcode op,
NonLoc L, NonLoc R, QualType T) {
- return SVator.EvalBinOpNN(state, op, L, R, T);
+ return svalBuilder.evalBinOpNN(state, op, L, R, T);
}
- SVal EvalBinOp(const GRState *state, BinaryOperator::Opcode op,
+ SVal evalBinOp(const GRState *state, BinaryOperator::Opcode op,
NonLoc L, SVal R, QualType T) {
- return R.isValid() ? SVator.EvalBinOpNN(state,op,L, cast<NonLoc>(R), T) : R;
+ return R.isValid() ? svalBuilder.evalBinOpNN(state,op,L, cast<NonLoc>(R), T) : R;
}
- SVal EvalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
+ SVal evalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
SVal LHS, SVal RHS, QualType T) {
- return SVator.EvalBinOp(ST, Op, LHS, RHS, T);
+ return svalBuilder.evalBinOp(ST, Op, LHS, RHS, T);
}
protected:
- void EvalObjCMessageExpr(ExplodedNodeSet& Dst, const ObjCMessageExpr* ME,
- ExplodedNode* Pred, const GRState *state) {
- assert (Builder && "GRStmtNodeBuilder must be defined.");
- getTF().EvalObjCMessageExpr(Dst, *this, *Builder, ME, Pred, state);
+ void evalObjCMessage(ExplodedNodeSet& Dst, const ObjCMessage &msg,
+ ExplodedNode* Pred, const GRState *state) {
+ assert (Builder && "StmtNodeBuilder must be defined.");
+ getTF().evalObjCMessage(Dst, *this, *Builder, msg, Pred, state);
}
const GRState* MarkBranch(const GRState* St, const Stmt* Terminator,
bool branchTaken);
- /// EvalBind - Handle the semantics of binding a value to a specific location.
- /// This method is used by EvalStore, VisitDeclStmt, and others.
- void EvalBind(ExplodedNodeSet& Dst, const Stmt* StoreE, ExplodedNode* Pred,
+ /// evalBind - Handle the semantics of binding a value to a specific location.
+ /// This method is used by evalStore, VisitDeclStmt, and others.
+ void evalBind(ExplodedNodeSet& Dst, const Stmt* StoreE, ExplodedNode* Pred,
const GRState* St, SVal location, SVal Val,
bool atDeclInit = false);
public:
// FIXME: 'tag' should be removed, and a LocationContext should be used
// instead.
- void EvalLoad(ExplodedNodeSet& Dst, const Expr* Ex, ExplodedNode* Pred,
+ // FIXME: Comment on the meaning of the arguments, when 'St' may not
+ // be the same as Pred->state, and when 'location' may not be the
+ // same as state->getLValue(Ex).
+ /// Simulate a read of the result of Ex.
+ void evalLoad(ExplodedNodeSet& Dst, const Expr* Ex, ExplodedNode* Pred,
const GRState* St, SVal location, const void *tag = 0,
QualType LoadTy = QualType());
// FIXME: 'tag' should be removed, and a LocationContext should be used
// instead.
- void EvalStore(ExplodedNodeSet& Dst, const Expr* AssignE, const Expr* StoreE,
+ void evalStore(ExplodedNodeSet& Dst, const Expr* AssignE, const Expr* StoreE,
ExplodedNode* Pred, const GRState* St, SVal TargetLV, SVal Val,
const void *tag = 0);
-private:
- void EvalLoadCommon(ExplodedNodeSet& Dst, const Expr* Ex, ExplodedNode* Pred,
+private:
+ void evalLoadCommon(ExplodedNodeSet& Dst, const Expr* Ex, ExplodedNode* Pred,
const GRState* St, SVal location, const void *tag,
QualType LoadTy);
// FIXME: 'tag' should be removed, and a LocationContext should be used
// instead.
- void EvalLocation(ExplodedNodeSet &Dst, const Stmt *S, ExplodedNode* Pred,
+ void evalLocation(ExplodedNodeSet &Dst, const Stmt *S, ExplodedNode* Pred,
const GRState* St, SVal location,
const void *tag, bool isLoad);
bool InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE, ExplodedNode *Pred);
};
+} // end ento namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngineBuilders.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h
index 5503412..18e39d9 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngineBuilders.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h
@@ -1,4 +1,4 @@
-//===-- GRExprEngineBuilders.h - "Builder" classes for GRExprEngine -*- C++ -*-=
+//===-- ExprEngineBuilders.h - "Builder" classes for ExprEngine ---*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -8,52 +8,53 @@
//===----------------------------------------------------------------------===//
//
// This file defines smart builder "references" which are used to marshal
-// builders between GRExprEngine objects and their related components.
+// builders between ExprEngine objects and their related components.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_GREXPRENGINE_BUILDERS
-#define LLVM_CLANG_ANALYSIS_GREXPRENGINE_BUILDERS
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#ifndef LLVM_CLANG_GR_EXPRENGINE_BUILDERS
+#define LLVM_CLANG_GR_EXPRENGINE_BUILDERS
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/Analysis/Support/SaveAndRestore.h"
namespace clang {
-class GRStmtNodeBuilderRef {
+namespace ento {
+
+class StmtNodeBuilderRef {
ExplodedNodeSet &Dst;
- GRStmtNodeBuilder &B;
- GRExprEngine& Eng;
+ StmtNodeBuilder &B;
+ ExprEngine& Eng;
ExplodedNode* Pred;
const GRState* state;
const Stmt* stmt;
const unsigned OldSize;
const bool AutoCreateNode;
SaveAndRestore<bool> OldSink;
- SaveAndRestore<const void*> OldTag;
SaveOr OldHasGen;
private:
- friend class GRExprEngine;
+ friend class ExprEngine;
- GRStmtNodeBuilderRef(); // do not implement
- void operator=(const GRStmtNodeBuilderRef&); // do not implement
+ StmtNodeBuilderRef(); // do not implement
+ void operator=(const StmtNodeBuilderRef&); // do not implement
- GRStmtNodeBuilderRef(ExplodedNodeSet &dst,
- GRStmtNodeBuilder &builder,
- GRExprEngine& eng,
+ StmtNodeBuilderRef(ExplodedNodeSet &dst,
+ StmtNodeBuilder &builder,
+ ExprEngine& eng,
ExplodedNode* pred,
const GRState *st,
const Stmt* s, bool auto_create_node)
: Dst(dst), B(builder), Eng(eng), Pred(pred),
state(st), stmt(s), OldSize(Dst.size()), AutoCreateNode(auto_create_node),
- OldSink(B.BuildSinks), OldTag(B.Tag), OldHasGen(B.HasGeneratedNode) {}
+ OldSink(B.BuildSinks), OldHasGen(B.hasGeneratedNode) {}
public:
- ~GRStmtNodeBuilderRef() {
+ ~StmtNodeBuilderRef() {
// Handle the case where no nodes where generated. Auto-generate that
// contains the updated state if we aren't generating sinks.
- if (!B.BuildSinks && Dst.size() == OldSize && !B.HasGeneratedNode) {
+ if (!B.BuildSinks && Dst.size() == OldSize && !B.hasGeneratedNode) {
if (AutoCreateNode)
B.MakeNode(Dst, const_cast<Stmt*>(stmt), Pred, state);
else
@@ -72,5 +73,8 @@ public:
}
};
+} // end GR namespace
+
} // end clang namespace
+
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRState.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/GRState.h
index d72d63a..37694da 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRState.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/GRState.h
@@ -1,4 +1,4 @@
-//== GRState*h - Path-Sens. "State" for tracking valuues -----*- C++ -*--==//
+//== GRState.h - Path-sensitive "State" for tracking values -----*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
@@ -7,17 +7,18 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines SymbolRef, ExprBindKey, and GRState*
+// This file defines SymbolRef, ExprBindKey, and GRState*.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_VALUESTATE_H
-#define LLVM_CLANG_ANALYSIS_VALUESTATE_H
+#ifndef LLVM_CLANG_GR_VALUESTATE_H
+#define LLVM_CLANG_GR_VALUESTATE_H
-#include "clang/Checker/PathSensitive/ConstraintManager.h"
-#include "clang/Checker/PathSensitive/Environment.h"
-#include "clang/Checker/PathSensitive/Store.h"
-#include "clang/Checker/PathSensitive/ValueManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Environment.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/Support/Casting.h"
@@ -30,11 +31,14 @@ class raw_ostream;
namespace clang {
class ASTContext;
+
+namespace ento {
+
class GRStateManager;
class Checker;
typedef ConstraintManager* (*ConstraintManagerCreator)(GRStateManager&,
- GRSubEngine&);
+ SubEngine&);
typedef StoreManager* (*StoreManagerCreator)(GRStateManager&);
//===----------------------------------------------------------------------===//
@@ -52,16 +56,19 @@ template <typename T> struct GRStateTrait {
}
};
-//===----------------------------------------------------------------------===//
-// GRState- An ImmutableMap type Stmt*/Decl*/Symbols to SVals.
-//===----------------------------------------------------------------------===//
-
class GRStateManager;
-/// GRState - This class encapsulates the actual data values for
-/// for a "state" in our symbolic value tracking. It is intended to be
-/// used as a functional object; that is once it is created and made
-/// "persistent" in a FoldingSet its values will never change.
+/// GRState - This class encapsulates:
+///
+/// 1. A mapping from expressions to values (Environment)
+/// 2. A mapping from locations to values (Store)
+/// 3. Constraints on symbolic values (GenericDataMap)
+///
+/// Together these represent the "abstract state" of a program.
+///
+/// GRState is intended to be used as a functional object; that is,
+/// once it is created and made "persistent" in a FoldingSet, its
+/// values will never change.
class GRState : public llvm::FoldingSetNode {
public:
typedef llvm::ImmutableSet<llvm::APSInt*> IntSetTy;
@@ -71,60 +78,59 @@ private:
void operator=(const GRState& R) const; // Do not implement.
friend class GRStateManager;
+ friend class ExplodedGraph;
+ friend class ExplodedNode;
- GRStateManager *StateMgr;
- Environment Env;
- Store St;
- GenericDataMap GDM;
+ GRStateManager *stateMgr;
+ Environment Env; // Maps a Stmt to its current SVal.
+ Store store; // Maps a location to its current value.
+ GenericDataMap GDM; // Custom data stored by a client of this class.
+ unsigned refCount;
/// makeWithStore - Return a GRState with the same values as the current
/// state with the exception of using the specified Store.
- const GRState *makeWithStore(Store store) const;
+ const GRState *makeWithStore(const StoreRef &store) const;
+
+ void setStore(const StoreRef &storeRef);
public:
/// This ctor is used when creating the first GRState object.
GRState(GRStateManager *mgr, const Environment& env,
- Store st, GenericDataMap gdm)
- : StateMgr(mgr),
- Env(env),
- St(st),
- GDM(gdm) {}
-
+ StoreRef st, GenericDataMap gdm);
+
/// Copy ctor - We must explicitly define this or else the "Next" ptr
/// in FoldingSetNode will also get copied.
- GRState(const GRState& RHS)
- : llvm::FoldingSetNode(),
- StateMgr(RHS.StateMgr),
- Env(RHS.Env),
- St(RHS.St),
- GDM(RHS.GDM) {}
-
- /// getStateManager - Return the GRStateManager associated with this state.
- GRStateManager &getStateManager() const {
- return *StateMgr;
- }
+ GRState(const GRState& RHS);
+
+ ~GRState();
+
+ /// Return the GRStateManager associated with this state.
+ GRStateManager &getStateManager() const { return *stateMgr; }
+
+ /// Return true if this state is referenced by a persistent ExplodedNode.
+ bool referencedByExplodedNode() const { return refCount > 0; }
/// getEnvironment - Return the environment associated with this state.
/// The environment is the mapping from expressions to values.
const Environment& getEnvironment() const { return Env; }
- /// getStore - Return the store associated with this state. The store
+ /// Return the store associated with this state. The store
/// is a mapping from locations to values.
- Store getStore() const { return St; }
-
- void setStore(Store s) { St = s; }
+ Store getStore() const { return store; }
+
/// getGDM - Return the generic data map associated with this state.
GenericDataMap getGDM() const { return GDM; }
void setGDM(GenericDataMap gdm) { GDM = gdm; }
- /// Profile - Profile the contents of a GRState object for use
- /// in a FoldingSet.
+ /// Profile - Profile the contents of a GRState object for use in a
+ /// FoldingSet. Two GRState objects are considered equal if they
+ /// have the same Environment, Store, and GenericDataMap.
static void Profile(llvm::FoldingSetNodeID& ID, const GRState* V) {
V->Env.Profile(ID);
- ID.AddPointer(V->St);
+ ID.AddPointer(V->store);
V->GDM.Profile(ID);
}
@@ -134,10 +140,6 @@ public:
Profile(ID, this);
}
- SVal LookupExpr(Expr* E) const {
- return Env.LookupExpr(E);
- }
-
BasicValueFactory &getBasicVals() const;
SymbolManager &getSymbolManager() const;
@@ -150,36 +152,32 @@ public:
// As constraints gradually accrue on symbolic values, added constraints
// may conflict and indicate that a state is infeasible (as no real values
// could satisfy all the constraints). This is the principal mechanism
- // for modeling path-sensitivity in GRExprEngine/GRState.
+ // for modeling path-sensitivity in ExprEngine/GRState.
//
- // Various "Assume" methods form the interface for adding constraints to
- // symbolic values. A call to "Assume" indicates an assumption being placed
- // on one or symbolic values. Assume methods take the following inputs:
+ // Various "assume" methods form the interface for adding constraints to
+ // symbolic values. A call to 'assume' indicates an assumption being placed
+ // on one or symbolic values. 'assume' methods take the following inputs:
//
// (1) A GRState object representing the current state.
//
- // (2) The assumed constraint (which is specific to a given "Assume" method).
+ // (2) The assumed constraint (which is specific to a given "assume" method).
//
// (3) A binary value "Assumption" that indicates whether the constraint is
// assumed to be true or false.
//
- // The output of "Assume" are two values:
- //
- // (a) "isFeasible" is set to true or false to indicate whether or not
- // the assumption is feasible.
- //
- // (b) A new GRState object with the added constraints.
- //
- // FIXME: (a) should probably disappear since it is redundant with (b).
- // (i.e., (b) could just be set to NULL).
+ // The output of "assume*" is a new GRState object with the added constraints.
+ // If no new state is feasible, NULL is returned.
//
- const GRState *Assume(DefinedOrUnknownSVal cond, bool assumption) const;
-
+ const GRState *assume(DefinedOrUnknownSVal cond, bool assumption) const;
+
+ /// This method assumes both "true" and "false" for 'cond', and
+ /// returns both corresponding states. It's shorthand for doing
+ /// 'assume' twice.
std::pair<const GRState*, const GRState*>
- Assume(DefinedOrUnknownSVal cond) const;
+ assume(DefinedOrUnknownSVal cond) const;
- const GRState *AssumeInBound(DefinedOrUnknownSVal idx,
+ const GRState *assumeInBound(DefinedOrUnknownSVal idx,
DefinedOrUnknownSVal upperBound,
bool assumption) const;
@@ -194,9 +192,7 @@ public:
//==---------------------------------------------------------------------==//
/// BindCompoundLiteral - Return the state that has the bindings currently
- /// in 'state' plus the bindings for the CompoundLiteral. 'R' is the region
- /// for the compound literal and 'BegInit' and 'EndInit' represent an
- /// array of initializer values.
+ /// in this state plus the bindings for the CompoundLiteral.
const GRState *bindCompoundLiteral(const CompoundLiteralExpr* CL,
const LocationContext *LC,
SVal V) const;
@@ -222,27 +218,27 @@ public:
const GRState *unbindLoc(Loc LV) const;
- /// InvalidateRegion - Returns the state with bindings for the given region
- /// cleared from the store. See InvalidateRegions.
- const GRState *InvalidateRegion(const MemRegion *R,
+ /// invalidateRegion - Returns the state with bindings for the given region
+ /// cleared from the store. See invalidateRegions.
+ const GRState *invalidateRegion(const MemRegion *R,
const Expr *E, unsigned BlockCount,
StoreManager::InvalidatedSymbols *IS = NULL)
const {
- return InvalidateRegions(&R, &R+1, E, BlockCount, IS, false);
+ return invalidateRegions(&R, &R+1, E, BlockCount, IS, false);
}
- /// InvalidateRegions - Returns the state with bindings for the given regions
+ /// invalidateRegions - Returns the state with bindings for the given regions
/// cleared from the store. The regions are provided as a continuous array
/// from Begin to End. Optionally invalidates global regions as well.
- const GRState *InvalidateRegions(const MemRegion * const *Begin,
+ const GRState *invalidateRegions(const MemRegion * const *Begin,
const MemRegion * const *End,
const Expr *E, unsigned BlockCount,
StoreManager::InvalidatedSymbols *IS,
bool invalidateGlobals) const;
- /// EnterStackFrame - Returns the state for entry to the given stack frame,
+ /// enterStackFrame - Returns the state for entry to the given stack frame,
/// preserving the current state.
- const GRState *EnterStackFrame(const StackFrameContext *frame) const;
+ const GRState *enterStackFrame(const StackFrameContext *frame) const;
/// Get the lvalue for a variable reference.
Loc getLValue(const VarDecl *D, const LocationContext *LC) const;
@@ -270,12 +266,9 @@ public:
SVal getSValAsScalarOrLoc(const Stmt *Ex) const;
SVal getSVal(Loc LV, QualType T = QualType()) const;
-
- /// Returns a "simplified" SVal bound to the location 'LV' in the state's
- /// store. A simplified SVal will include optimizations such as
- /// if the SVal is a symbol whose value is perfectly constrained then that
- /// constant value is returned instead.
- SVal getSimplifiedSVal(Loc LV, QualType T= QualType()) const;
+
+ /// Returns the "raw" SVal bound to LV before any value simplfication.
+ SVal getRawSVal(Loc LV, QualType T= QualType()) const;
SVal getSVal(const MemRegion* R) const;
@@ -368,6 +361,16 @@ public:
void printStdErr(CFG &C) const;
void printDOT(llvm::raw_ostream& Out, CFG &C) const;
+
+private:
+ /// Increments the number of times this state is referenced by ExplodeNodes.
+ void incrementReferenceCount() { ++refCount; }
+
+ /// Decrement the number of times this state is referenced by ExplodeNodes.
+ void decrementReferenceCount() {
+ assert(refCount > 0);
+ --refCount;
+ }
};
class GRStateSet {
@@ -409,10 +412,10 @@ public:
class GRStateManager {
friend class GRState;
- friend class GRExprEngine; // FIXME: Remove.
+ friend class ExprEngine; // FIXME: Remove.
private:
- /// Eng - The GRSubEngine that owns this state manager.
- GRSubEngine &Eng;
+ /// Eng - The SubEngine that owns this state manager.
+ SubEngine *Eng; /* Can be null. */
EnvironmentManager EnvMgr;
llvm::OwningPtr<StoreManager> StoreMgr;
@@ -431,65 +434,86 @@ private:
/// a particular function. This is used to unique states.
llvm::FoldingSet<GRState> StateSet;
- /// ValueMgr - Object that manages the data for all created SVals.
- ValueManager ValueMgr;
+ /// Object that manages the data for all created SVals.
+ llvm::OwningPtr<SValBuilder> svalBuilder;
- /// Alloc - A BumpPtrAllocator to allocate states.
+ /// A BumpPtrAllocator to allocate states.
llvm::BumpPtrAllocator &Alloc;
+ /// A vector of recently allocated GRStates that can potentially be
+ /// reused.
+ std::vector<GRState *> recentlyAllocatedStates;
+
+ /// A vector of GRStates that we can reuse.
+ std::vector<GRState *> freeStates;
+
public:
GRStateManager(ASTContext& Ctx,
StoreManagerCreator CreateStoreManager,
ConstraintManagerCreator CreateConstraintManager,
llvm::BumpPtrAllocator& alloc,
- GRSubEngine &subeng)
- : Eng(subeng),
+ SubEngine &subeng)
+ : Eng(&subeng),
EnvMgr(alloc),
GDMFactory(alloc),
- ValueMgr(alloc, Ctx, *this),
+ svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
Alloc(alloc) {
StoreMgr.reset((*CreateStoreManager)(*this));
ConstraintMgr.reset((*CreateConstraintManager)(*this, subeng));
}
+ GRStateManager(ASTContext& Ctx,
+ StoreManagerCreator CreateStoreManager,
+ ConstraintManager* ConstraintManagerPtr,
+ llvm::BumpPtrAllocator& alloc)
+ : Eng(0),
+ EnvMgr(alloc),
+ GDMFactory(alloc),
+ svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
+ Alloc(alloc) {
+ StoreMgr.reset((*CreateStoreManager)(*this));
+ ConstraintMgr.reset(ConstraintManagerPtr);
+ }
+
~GRStateManager();
const GRState *getInitialState(const LocationContext *InitLoc);
- ASTContext &getContext() { return ValueMgr.getContext(); }
- const ASTContext &getContext() const { return ValueMgr.getContext(); }
+ ASTContext &getContext() { return svalBuilder->getContext(); }
+ const ASTContext &getContext() const { return svalBuilder->getContext(); }
BasicValueFactory &getBasicVals() {
- return ValueMgr.getBasicValueFactory();
+ return svalBuilder->getBasicValueFactory();
}
const BasicValueFactory& getBasicVals() const {
- return ValueMgr.getBasicValueFactory();
+ return svalBuilder->getBasicValueFactory();
+ }
+
+ SValBuilder &getSValBuilder() {
+ return *svalBuilder;
}
SymbolManager &getSymbolManager() {
- return ValueMgr.getSymbolManager();
+ return svalBuilder->getSymbolManager();
}
const SymbolManager &getSymbolManager() const {
- return ValueMgr.getSymbolManager();
+ return svalBuilder->getSymbolManager();
}
- ValueManager &getValueManager() { return ValueMgr; }
- const ValueManager &getValueManager() const { return ValueMgr; }
-
llvm::BumpPtrAllocator& getAllocator() { return Alloc; }
MemRegionManager& getRegionManager() {
- return ValueMgr.getRegionManager();
+ return svalBuilder->getRegionManager();
}
const MemRegionManager& getRegionManager() const {
- return ValueMgr.getRegionManager();
+ return svalBuilder->getRegionManager();
}
StoreManager& getStoreManager() { return *StoreMgr; }
ConstraintManager& getConstraintManager() { return *ConstraintMgr; }
- GRSubEngine& getOwningEngine() { return Eng; }
+ SubEngine* getOwningEngine() { return Eng; }
- const GRState* RemoveDeadBindings(const GRState* St,
+ const GRState* removeDeadBindings(const GRState* St,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper);
@@ -514,6 +538,10 @@ public:
}
const GRState* getPersistentState(GRState& Impl);
+
+ /// Periodically called by ExprEngine to recycle GRStates that were
+ /// created but never used for creating an ExplodedNode.
+ void recycleUnusedStates();
//==---------------------------------------------------------------------==//
// Generic Data Map methods.
@@ -608,21 +636,21 @@ inline const VarRegion* GRState::getRegion(const VarDecl *D,
return getStateManager().getRegionManager().getVarRegion(D, LC);
}
-inline const GRState *GRState::Assume(DefinedOrUnknownSVal Cond,
+inline const GRState *GRState::assume(DefinedOrUnknownSVal Cond,
bool Assumption) const {
if (Cond.isUnknown())
return this;
- return getStateManager().ConstraintMgr->Assume(this, cast<DefinedSVal>(Cond),
+ return getStateManager().ConstraintMgr->assume(this, cast<DefinedSVal>(Cond),
Assumption);
}
inline std::pair<const GRState*, const GRState*>
-GRState::Assume(DefinedOrUnknownSVal Cond) const {
+GRState::assume(DefinedOrUnknownSVal Cond) const {
if (Cond.isUnknown())
return std::make_pair(this, this);
- return getStateManager().ConstraintMgr->AssumeDual(this,
+ return getStateManager().ConstraintMgr->assumeDual(this,
cast<DefinedSVal>(Cond));
}
@@ -653,7 +681,9 @@ inline SVal GRState::getLValue(const FieldDecl* D, SVal Base) const {
}
inline SVal GRState::getLValue(QualType ElementType, SVal Idx, SVal Base) const{
- return getStateManager().StoreMgr->getLValueElement(ElementType, Idx, Base);
+ if (NonLoc *N = dyn_cast<NonLoc>(&Idx))
+ return getStateManager().StoreMgr->getLValueElement(ElementType, *N, Base);
+ return UnknownVal();
}
inline const llvm::APSInt *GRState::getSymVal(SymbolRef sym) const {
@@ -661,25 +691,25 @@ inline const llvm::APSInt *GRState::getSymVal(SymbolRef sym) const {
}
inline SVal GRState::getSVal(const Stmt* Ex) const {
- return Env.GetSVal(Ex, getStateManager().ValueMgr);
+ return Env.getSVal(Ex, *getStateManager().svalBuilder);
}
inline SVal GRState::getSValAsScalarOrLoc(const Stmt *S) const {
if (const Expr *Ex = dyn_cast<Expr>(S)) {
QualType T = Ex->getType();
- if (Loc::IsLocType(T) || T->isIntegerType())
+ if (Loc::isLocType(T) || T->isIntegerType())
return getSVal(S);
}
return UnknownVal();
}
-inline SVal GRState::getSVal(Loc LV, QualType T) const {
- return getStateManager().StoreMgr->Retrieve(St, LV, T);
+inline SVal GRState::getRawSVal(Loc LV, QualType T) const {
+ return getStateManager().StoreMgr->Retrieve(getStore(), LV, T);
}
inline SVal GRState::getSVal(const MemRegion* R) const {
- return getStateManager().StoreMgr->Retrieve(St, loc::MemRegionVal(R));
+ return getStateManager().StoreMgr->Retrieve(getStore(), loc::MemRegionVal(R));
}
inline BasicValueFactory &GRState::getBasicVals() const {
@@ -755,6 +785,9 @@ CB GRState::scanReachableSymbols(const MemRegion * const *beg,
scanReachableSymbols(beg, end, cb);
return cb;
}
+
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRStateTrait.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h
index 5189a1f..411441f 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRStateTrait.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h
@@ -14,8 +14,8 @@
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_GRSTATETRAIT_H
-#define LLVM_CLANG_ANALYSIS_GRSTATETRAIT_H
+#ifndef LLVM_CLANG_GR_GRSTATETRAIT_H
+#define LLVM_CLANG_GR_GRSTATETRAIT_H
namespace llvm {
class BumpPtrAllocator;
@@ -26,6 +26,8 @@ namespace llvm {
}
namespace clang {
+
+namespace ento {
template <typename T> struct GRStatePartialTrait;
// Partial-specialization for ImmutableMap.
@@ -48,11 +50,11 @@ namespace clang {
return B.lookup(K);
}
static data_type Set(data_type B, key_type K, value_type E,context_type F){
- return F.Add(B, K, E);
+ return F.add(B, K, E);
}
static data_type Remove(data_type B, key_type K, context_type F) {
- return F.Remove(B, K);
+ return F.remove(B, K);
}
static inline context_type MakeContext(void* p) {
@@ -86,11 +88,11 @@ namespace clang {
}
static data_type Add(data_type B, key_type K, context_type F) {
- return F.Add(B, K);
+ return F.add(B, K);
}
static data_type Remove(data_type B, key_type K, context_type F) {
- return F.Remove(B, K);
+ return F.remove(B, K);
}
static bool Contains(data_type B, key_type K) {
@@ -119,7 +121,7 @@ namespace clang {
typedef typename data_type::Factory& context_type;
static data_type Add(data_type L, key_type K, context_type F) {
- return F.Add(K, L);
+ return F.add(K, L);
}
static inline data_type MakeData(void* const* p) {
@@ -143,6 +145,21 @@ namespace clang {
delete (typename data_type::Factory*) Ctx;
}
};
+
+ // Partial specialization for bool.
+ template <> struct GRStatePartialTrait<bool> {
+ typedef bool data_type;
+
+ static inline data_type MakeData(void* const* p) {
+ return (bool) (uintptr_t) p;
+ }
+ static inline void *MakeVoidPtr(data_type d) {
+ return (void*) (uintptr_t) d;
+ }
+ };
+
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/MemRegion.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
index 96f906a..8d19b51 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/MemRegion.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
@@ -13,12 +13,13 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_MEMREGION_H
-#define LLVM_CLANG_ANALYSIS_MEMREGION_H
+#ifndef LLVM_CLANG_GR_MEMREGION_H
+#define LLVM_CLANG_GR_MEMREGION_H
+#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
-#include "clang/Checker/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/ADT/FoldingSet.h"
@@ -31,11 +32,14 @@ class raw_ostream;
namespace clang {
-class MemRegionManager;
-class MemSpaceRegion;
class LocationContext;
class StackFrameContext;
-class ValueManager;
+
+namespace ento {
+
+class MemRegionManager;
+class MemSpaceRegion;
+class SValBuilder;
class VarRegion;
class CodeTextRegion;
@@ -93,9 +97,10 @@ public:
VarRegionKind = BEG_DECL_REGIONS,
FieldRegionKind,
ObjCIvarRegionKind,
- CXXObjectRegionKind,
- END_DECL_REGIONS = CXXObjectRegionKind,
- END_TYPED_REGIONS = END_DECL_REGIONS
+ END_DECL_REGIONS = ObjCIvarRegionKind,
+ CXXTempObjectRegionKind,
+ CXXBaseObjectRegionKind,
+ END_TYPED_REGIONS = CXXBaseObjectRegionKind
};
private:
@@ -294,7 +299,7 @@ public:
}
/// getExtent - Returns the size of the region in bytes.
- virtual DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const {
+ virtual DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const {
return UnknownVal();
}
@@ -329,7 +334,7 @@ public:
bool isBoundable() const { return true; }
- DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
void Profile(llvm::FoldingSetNodeID& ID) const;
@@ -353,16 +358,20 @@ public:
virtual QualType getLocationType() const {
// FIXME: We can possibly optimize this later to cache this value.
- return getContext().getPointerType(getValueType());
+ QualType T = getValueType();
+ ASTContext &ctx = getContext();
+ if (T->getAs<ObjCObjectType>())
+ return ctx.getObjCObjectPointerType(T);
+ return ctx.getPointerType(getValueType());
}
- QualType getDesugaredValueType() const {
+ QualType getDesugaredValueType(ASTContext &Context) const {
QualType T = getValueType();
- return T.getTypePtr() ? T.getDesugaredType() : T;
+ return T.getTypePtrOrNull() ? T.getDesugaredType(Context) : T;
}
- QualType getDesugaredLocationType() const {
- return getLocationType().getDesugaredType();
+ QualType getDesugaredLocationType(ASTContext &Context) const {
+ return getLocationType().getDesugaredType(Context);
}
bool isBoundable() const { return true; }
@@ -542,7 +551,7 @@ public:
bool isBoundable() const { return true; }
- DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
void Profile(llvm::FoldingSetNodeID& ID) const;
@@ -578,7 +587,7 @@ public:
return Str->getType();
}
- DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
bool isBoundable() const { return false; }
@@ -639,7 +648,7 @@ public:
const Decl* getDecl() const { return D; }
void Profile(llvm::FoldingSetNodeID& ID) const;
- DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
static bool classof(const MemRegion* R) {
unsigned k = R->getKind();
@@ -725,7 +734,7 @@ public:
return getDecl()->getType();
}
- DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
static void ProfileRegion(llvm::FoldingSetNodeID& ID, const FieldDecl* FD,
const MemRegion* superRegion) {
@@ -770,14 +779,14 @@ private:
friend class ElementRegion;
const MemRegion *Region;
- int64_t Offset;
+ CharUnits Offset;
- RegionRawOffset(const MemRegion* reg, int64_t offset = 0)
+ RegionRawOffset(const MemRegion* reg, CharUnits offset = CharUnits::Zero())
: Region(reg), Offset(offset) {}
public:
// FIXME: Eventually support symbolic offsets.
- int64_t getByteOffset() const { return Offset; }
+ CharUnits getOffset() const { return Offset; }
const MemRegion *getRegion() const { return Region; }
void dumpToStream(llvm::raw_ostream& os) const;
@@ -788,9 +797,9 @@ class ElementRegion : public TypedRegion {
friend class MemRegionManager;
QualType ElementType;
- SVal Index;
+ NonLoc Index;
- ElementRegion(QualType elementType, SVal Idx, const MemRegion* sReg)
+ ElementRegion(QualType elementType, NonLoc Idx, const MemRegion* sReg)
: TypedRegion(sReg, ElementRegionKind),
ElementType(elementType), Index(Idx) {
assert((!isa<nonloc::ConcreteInt>(&Idx) ||
@@ -803,7 +812,7 @@ class ElementRegion : public TypedRegion {
public:
- SVal getIndex() const { return Index; }
+ NonLoc getIndex() const { return Index; }
QualType getValueType() const {
return ElementType;
@@ -825,13 +834,13 @@ public:
};
// C++ temporary object associated with an expression.
-class CXXObjectRegion : public TypedRegion {
+class CXXTempObjectRegion : public TypedRegion {
friend class MemRegionManager;
Expr const *Ex;
- CXXObjectRegion(Expr const *E, MemRegion const *sReg)
- : TypedRegion(sReg, CXXObjectRegionKind), Ex(E) {}
+ CXXTempObjectRegion(Expr const *E, MemRegion const *sReg)
+ : TypedRegion(sReg, CXXTempObjectRegionKind), Ex(E) {}
static void ProfileRegion(llvm::FoldingSetNodeID &ID,
Expr const *E, const MemRegion *sReg);
@@ -841,10 +850,39 @@ public:
return Ex->getType();
}
+ void dumpToStream(llvm::raw_ostream& os) const;
+
void Profile(llvm::FoldingSetNodeID &ID) const;
static bool classof(const MemRegion* R) {
- return R->getKind() == CXXObjectRegionKind;
+ return R->getKind() == CXXTempObjectRegionKind;
+ }
+};
+
+// CXXBaseObjectRegion represents a base object within a C++ object. It is
+// identified by the base class declaration and the region of its parent object.
+class CXXBaseObjectRegion : public TypedRegion {
+ friend class MemRegionManager;
+
+ const CXXRecordDecl *decl;
+
+ CXXBaseObjectRegion(const CXXRecordDecl *d, const MemRegion *sReg)
+ : TypedRegion(sReg, CXXBaseObjectRegionKind), decl(d) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const CXXRecordDecl *decl, const MemRegion *sReg);
+
+public:
+ const CXXRecordDecl *getDecl() const { return decl; }
+
+ QualType getValueType() const;
+
+ void dumpToStream(llvm::raw_ostream& os) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ static bool classof(const MemRegion *region) {
+ return region->getKind() == CXXBaseObjectRegionKind;
}
};
@@ -942,7 +980,7 @@ public:
/// getElementRegion - Retrieve the memory region associated with the
/// associated element type, index, and super region.
- const ElementRegion *getElementRegion(QualType elementType, SVal Idx,
+ const ElementRegion *getElementRegion(QualType elementType, NonLoc Idx,
const MemRegion *superRegion,
ASTContext &Ctx);
@@ -971,8 +1009,19 @@ public:
const ObjCIvarRegion *getObjCIvarRegion(const ObjCIvarDecl* ivd,
const MemRegion* superRegion);
- const CXXObjectRegion *getCXXObjectRegion(Expr const *Ex,
- LocationContext const *LC);
+ const CXXTempObjectRegion *getCXXTempObjectRegion(Expr const *Ex,
+ LocationContext const *LC);
+
+ const CXXBaseObjectRegion *getCXXBaseObjectRegion(const CXXRecordDecl *decl,
+ const MemRegion *superRegion);
+
+ /// Create a CXXBaseObjectRegion with the same CXXRecordDecl but a different
+ /// super region.
+ const CXXBaseObjectRegion *
+ getCXXBaseObjectRegionWithSuper(const CXXBaseObjectRegion *baseReg,
+ const MemRegion *superRegion) {
+ return getCXXBaseObjectRegion(baseReg->getDecl(), superRegion);
+ }
const FunctionTextRegion *getFunctionTextRegion(const FunctionDecl *FD);
const BlockTextRegion *getBlockTextRegion(const BlockDecl *BD,
@@ -1024,6 +1073,8 @@ inline ASTContext& MemRegion::getContext() const {
return getMemRegionManager()->getContext();
}
+} // end GR namespace
+
} // end clang namespace
//===----------------------------------------------------------------------===//
@@ -1032,7 +1083,7 @@ inline ASTContext& MemRegion::getContext() const {
namespace llvm {
static inline raw_ostream& operator<<(raw_ostream& os,
- const clang::MemRegion* R) {
+ const clang::ento::MemRegion* R) {
R->dumpToStream(os);
return os;
}
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h
new file mode 100644
index 0000000..710fc6b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h
@@ -0,0 +1,210 @@
+//===- ObjCMessage.h - Wrapper for ObjC messages and dot syntax ---*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ObjCMessage which serves as a common wrapper for ObjC
+// message expressions or implicit messages for loading/storing ObjC properties.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_PATHSENSITIVE_OBJCMESSAGE
+#define LLVM_CLANG_STATICANALYZER_PATHSENSITIVE_OBJCMESSAGE
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/AST/ExprObjC.h"
+
+namespace clang {
+namespace ento {
+
+/// \brief Represents both explicit ObjC message expressions and implicit
+/// messages that are sent for handling properties in dot syntax.
+class ObjCMessage {
+ const Expr *MsgOrPropE;
+ const Expr *OriginE;
+ bool IsPropSetter;
+ SVal SetterArgV;
+
+protected:
+ ObjCMessage(const Expr *E, const Expr *origE, bool isSetter, SVal setArgV)
+ : MsgOrPropE(E), OriginE(origE),
+ IsPropSetter(isSetter), SetterArgV(setArgV) { }
+
+public:
+ ObjCMessage() : MsgOrPropE(0), OriginE(0) { }
+
+ ObjCMessage(const ObjCMessageExpr *E)
+ : MsgOrPropE(E), OriginE(E) {
+ assert(E && "should not be initialized with null expression");
+ }
+
+ bool isValid() const { return MsgOrPropE != 0; }
+ bool isInvalid() const { return !isValid(); }
+
+ bool isMessageExpr() const {
+ return isValid() && isa<ObjCMessageExpr>(MsgOrPropE);
+ }
+
+ bool isPropertyGetter() const {
+ return isValid() &&
+ isa<ObjCPropertyRefExpr>(MsgOrPropE) && !IsPropSetter;
+ }
+
+ bool isPropertySetter() const {
+ return isValid() &&
+ isa<ObjCPropertyRefExpr>(MsgOrPropE) && IsPropSetter;
+ }
+
+ const Expr *getOriginExpr() const { return OriginE; }
+
+ QualType getType(ASTContext &ctx) const;
+
+ QualType getResultType(ASTContext &ctx) const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ if (const ObjCMethodDecl *MD = msgE->getMethodDecl())
+ return MD->getResultType();
+ return getType(ctx);
+ }
+
+ Selector getSelector() const;
+
+ const Expr *getInstanceReceiver() const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->getInstanceReceiver();
+ const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
+ if (propE->isObjectReceiver())
+ return propE->getBase();
+ return 0;
+ }
+
+ bool isInstanceMessage() const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->isInstanceMessage();
+ const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
+ // FIXME: 'super' may be super class.
+ return propE->isObjectReceiver() || propE->isSuperReceiver();
+ }
+
+ const ObjCMethodDecl *getMethodDecl() const;
+
+ const ObjCInterfaceDecl *getReceiverInterface() const;
+
+ SourceLocation getSuperLoc() const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->getSuperLoc();
+ return cast<ObjCPropertyRefExpr>(MsgOrPropE)->getReceiverLocation();
+ }
+
+ SourceRange getSourceRange() const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ return MsgOrPropE->getSourceRange();
+ }
+
+ unsigned getNumArgs() const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->getNumArgs();
+ return isPropertySetter() ? 1 : 0;
+ }
+
+ SVal getArgSVal(unsigned i, const GRState *state) const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ assert(i < getNumArgs() && "Invalid index for argument");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return state->getSVal(msgE->getArg(i));
+ assert(isPropertySetter());
+ return SetterArgV;
+ }
+
+ QualType getArgType(unsigned i) const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ assert(i < getNumArgs() && "Invalid index for argument");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->getArg(i)->getType();
+ assert(isPropertySetter());
+ return cast<ObjCPropertyRefExpr>(MsgOrPropE)->getType();
+ }
+
+ const Expr *getArgExpr(unsigned i) const;
+
+ SourceRange getArgSourceRange(unsigned i) const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ assert(i < getNumArgs() && "Invalid index for argument");
+ if (const Expr *argE = getArgExpr(i))
+ return argE->getSourceRange();
+ return OriginE->getSourceRange();
+ }
+};
+
+class ObjCPropertyGetter : public ObjCMessage {
+public:
+ ObjCPropertyGetter(const ObjCPropertyRefExpr *propE, const Expr *originE)
+ : ObjCMessage(propE, originE, false, SVal()) {
+ assert(propE && originE &&
+ "should not be initialized with null expressions");
+ }
+};
+
+class ObjCPropertySetter : public ObjCMessage {
+public:
+ ObjCPropertySetter(const ObjCPropertyRefExpr *propE, const Expr *storeE,
+ SVal argV)
+ : ObjCMessage(propE, storeE, true, argV) {
+ assert(propE && storeE &&"should not be initialized with null expressions");
+ }
+};
+
+/// \brief Common wrapper for a call expression or an ObjC message, mainly to
+/// provide a common interface for handling their arguments.
+class CallOrObjCMessage {
+ const CallExpr *CallE;
+ ObjCMessage Msg;
+ const GRState *State;
+
+public:
+ CallOrObjCMessage(const CallExpr *callE, const GRState *state)
+ : CallE(callE), State(state) { }
+ CallOrObjCMessage(const ObjCMessage &msg, const GRState *state)
+ : CallE(0), Msg(msg), State(state) { }
+
+ QualType getResultType(ASTContext &ctx) const;
+
+ unsigned getNumArgs() const {
+ if (CallE) return CallE->getNumArgs();
+ return Msg.getNumArgs();
+ }
+
+ SVal getArgSVal(unsigned i) const {
+ assert(i < getNumArgs());
+ if (CallE) return State->getSVal(CallE->getArg(i));
+ return Msg.getArgSVal(i, State);
+ }
+
+ SVal getArgSValAsScalarOrLoc(unsigned i) const;
+
+ const Expr *getArg(unsigned i) const {
+ assert(i < getNumArgs());
+ if (CallE) return CallE->getArg(i);
+ return Msg.getArgExpr(i);
+ }
+
+ SourceRange getArgSourceRange(unsigned i) const {
+ assert(i < getNumArgs());
+ if (CallE) return CallE->getArg(i)->getSourceRange();
+ return Msg.getArgSourceRange(i);
+ }
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ValueManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index b81e9c1..fc2b76e 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ValueManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -1,4 +1,4 @@
-//== ValueManager.h - Aggregate manager of symbols and SVals ----*- C++ -*--==//
+// SValBuilder.h - Construction of SVals from evaluating expressions -*- C++ -*-
//
// The LLVM Compiler Infrastructure
//
@@ -7,65 +7,102 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines ValueManager, a class that manages symbolic values
-// and SVals created for use by GRExprEngine and related classes. It
-// wraps and owns SymbolManager, MemRegionManager, and BasicValueFactory.
+// This file defines SValBuilder, a class that defines the interface for
+// "symbolical evaluators" which construct an SVal from an expression.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_AGGREGATE_VALUE_MANAGER_H
-#define LLVM_CLANG_ANALYSIS_AGGREGATE_VALUE_MANAGER_H
+#ifndef LLVM_CLANG_GR_SVALBUILDER
+#define LLVM_CLANG_GR_SVALBUILDER
-#include "llvm/ADT/OwningPtr.h"
-#include "clang/Checker/PathSensitive/MemRegion.h"
-#include "clang/Checker/PathSensitive/SVals.h"
-#include "clang/Checker/PathSensitive/BasicValueFactory.h"
-#include "clang/Checker/PathSensitive/SymbolManager.h"
-#include "clang/Checker/PathSensitive/SValuator.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-
-namespace llvm { class BumpPtrAllocator; }
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
namespace clang {
-class GRStateManager;
+namespace ento {
-class ValueManager {
+class GRState;
+class SValBuilder {
+protected:
ASTContext &Context;
+
+ /// Manager of APSInt values.
BasicValueFactory BasicVals;
- /// SymMgr - Object that manages the symbol information.
+ /// Manages the creation of symbols.
SymbolManager SymMgr;
- /// SVator - SValuator object that creates SVals from expressions.
- llvm::OwningPtr<SValuator> SVator;
-
+ /// Manages the creation of memory regions.
MemRegionManager MemMgr;
GRStateManager &StateMgr;
+ /// The scalar type to use for array indices.
const QualType ArrayIndexTy;
+
+ /// The width of the scalar type used for array indices.
const unsigned ArrayIndexWidth;
public:
- ValueManager(llvm::BumpPtrAllocator &alloc, ASTContext &context,
- GRStateManager &stateMgr)
- : Context(context), BasicVals(context, alloc),
- SymMgr(context, BasicVals, alloc),
- MemMgr(context, alloc), StateMgr(stateMgr),
- ArrayIndexTy(context.IntTy),
- ArrayIndexWidth(context.getTypeSize(ArrayIndexTy)) {
- // FIXME: Generalize later.
- SVator.reset(clang::CreateSimpleSValuator(*this));
- }
+ // FIXME: Make these protected again one RegionStoreManager correctly
+ // handles loads from differening bound value types.
+ virtual SVal evalCastNL(NonLoc val, QualType castTy) = 0;
+ virtual SVal evalCastL(Loc val, QualType castTy) = 0;
- // Accessors to submanagers.
+public:
+ SValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
+ GRStateManager &stateMgr)
+ : Context(context), BasicVals(context, alloc),
+ SymMgr(context, BasicVals, alloc),
+ MemMgr(context, alloc),
+ StateMgr(stateMgr),
+ ArrayIndexTy(context.IntTy),
+ ArrayIndexWidth(context.getTypeSize(ArrayIndexTy)) {}
+
+ virtual ~SValBuilder() {}
+
+ SVal evalCast(SVal V, QualType castTy, QualType originalType);
+
+ virtual SVal evalMinus(NonLoc val) = 0;
+
+ virtual SVal evalComplement(NonLoc val) = 0;
+
+ virtual SVal evalBinOpNN(const GRState *state, BinaryOperator::Opcode Op,
+ NonLoc lhs, NonLoc rhs, QualType resultTy) = 0;
+
+ virtual SVal evalBinOpLL(const GRState *state, BinaryOperator::Opcode Op,
+ Loc lhs, Loc rhs, QualType resultTy) = 0;
+
+ virtual SVal evalBinOpLN(const GRState *state, BinaryOperator::Opcode Op,
+ Loc lhs, NonLoc rhs, QualType resultTy) = 0;
+
+ /// getKnownValue - evaluates a given SVal. If the SVal has only one possible
+ /// (integer) value, that value is returned. Otherwise, returns NULL.
+ virtual const llvm::APSInt *getKnownValue(const GRState *state, SVal V) = 0;
+
+ SVal evalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
+ SVal L, SVal R, QualType T);
+
+ DefinedOrUnknownSVal evalEQ(const GRState *ST, DefinedOrUnknownSVal L,
+ DefinedOrUnknownSVal R);
ASTContext &getContext() { return Context; }
const ASTContext &getContext() const { return Context; }
GRStateManager &getStateManager() { return StateMgr; }
+
+ QualType getConditionType() const {
+ return getContext().IntTy;
+ }
+
+ QualType getArrayIndexType() const {
+ return ArrayIndexTy;
+ }
BasicValueFactory &getBasicValueFactory() { return BasicVals; }
const BasicValueFactory &getBasicValueFactory() const { return BasicVals; }
@@ -73,8 +110,6 @@ public:
SymbolManager &getSymbolManager() { return SymMgr; }
const SymbolManager &getSymbolManager() const { return SymMgr; }
- SValuator &getSValuator() { return *SVator.get(); }
-
MemRegionManager &getRegionManager() { return MemMgr; }
const MemRegionManager &getRegionManager() const { return MemMgr; }
@@ -137,9 +172,8 @@ public:
I->getType()->isUnsignedIntegerType()));
}
- nonloc::ConcreteInt makeIntVal(const CXXBoolLiteralExpr *E) {
- return E->getValue() ? nonloc::ConcreteInt(BasicVals.getValue(1, 1, true))
- : nonloc::ConcreteInt(BasicVals.getValue(0, 1, true));
+ nonloc::ConcreteInt makeBoolVal(const CXXBoolLiteralExpr *E) {
+ return makeTruthVal(E->getValue());
}
nonloc::ConcreteInt makeIntVal(const llvm::APSInt& V) {
@@ -155,7 +189,7 @@ public:
}
DefinedSVal makeIntVal(uint64_t X, QualType T) {
- if (Loc::IsLocType(T))
+ if (Loc::isLocType(T))
return loc::ConcreteInt(BasicVals.getValue(X, T));
return nonloc::ConcreteInt(BasicVals.getValue(X, T));
@@ -183,11 +217,11 @@ public:
NonLoc makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
const SymExpr *rhs, QualType T);
- NonLoc makeTruthVal(bool b, QualType T) {
+ nonloc::ConcreteInt makeTruthVal(bool b, QualType T) {
return nonloc::ConcreteInt(BasicVals.getTruthValue(b, T));
}
- NonLoc makeTruthVal(bool b) {
+ nonloc::ConcreteInt makeTruthVal(bool b) {
return nonloc::ConcreteInt(BasicVals.getTruthValue(b));
}
@@ -203,14 +237,22 @@ public:
return loc::MemRegionVal(R);
}
- Loc makeLoc(const AddrLabelExpr* E) {
+ Loc makeLoc(const AddrLabelExpr *E) {
return loc::GotoLabel(E->getLabel());
}
Loc makeLoc(const llvm::APSInt& V) {
return loc::ConcreteInt(BasicVals.getValue(V));
}
+
};
+
+SValBuilder* createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
+ ASTContext &context,
+ GRStateManager &stateMgr);
+
+} // end GR namespace
+
} // end clang namespace
-#endif
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SVals.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
index cdb338a..0d43079 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SVals.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -12,10 +12,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_RVALUE_H
-#define LLVM_CLANG_ANALYSIS_RVALUE_H
+#ifndef LLVM_CLANG_GR_RVALUE_H
+#define LLVM_CLANG_GR_RVALUE_H
-#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/Support/Casting.h"
#include "llvm/ADT/ImmutableList.h"
@@ -29,6 +29,8 @@ namespace llvm {
namespace clang {
+namespace ento {
+
class CompoundValData;
class LazyCompoundValData;
class GRState;
@@ -37,26 +39,38 @@ class MemRegion;
class TypedRegion;
class MemRegionManager;
class GRStateManager;
-class ValueManager;
+class SValBuilder;
+/// SVal - This represents a symbolic expression, which can be either
+/// an L-value or an R-value.
+///
class SVal {
public:
- enum BaseKind { UndefinedKind, UnknownKind, LocKind, NonLocKind };
+ enum BaseKind {
+ // The enumerators must be representable using 2 bits.
+ UndefinedKind = 0, // for subclass UndefinedVal (an uninitialized value)
+ UnknownKind = 1, // for subclass UnknownVal (a void value)
+ LocKind = 2, // for subclass Loc (an L-value)
+ NonLocKind = 3 // for subclass NonLoc (an R-value that's not
+ // an L-value)
+ };
enum { BaseBits = 2, BaseMask = 0x3 };
protected:
const void* Data;
+
+ /// The lowest 2 bits are a BaseKind (0 -- 3).
+ /// The higher bits are an unsigned "kind" value.
unsigned Kind;
-protected:
- SVal(const void* d, bool isLoc, unsigned ValKind)
+ explicit SVal(const void* d, bool isLoc, unsigned ValKind)
: Data(d), Kind((isLoc ? LocKind : NonLocKind) | (ValKind << BaseBits)) {}
explicit SVal(BaseKind k, const void* D = NULL)
: Data(D), Kind(k) {}
public:
- SVal() : Data(0), Kind(0) {}
+ explicit SVal() : Data(0), Kind(0) {}
~SVal() {}
/// BufferTy - A temporary buffer to hold a set of SVals.
@@ -66,6 +80,8 @@ public:
inline BaseKind getBaseKind() const { return (BaseKind) (Kind & BaseMask); }
inline unsigned getSubKind() const { return (Kind & ~BaseMask) >> BaseBits; }
+ // This method is required for using SVal in a FoldingSetNode. It
+ // extracts a unique signature for this SVal object.
inline void Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddInteger((unsigned) getRawKind());
ID.AddPointer(Data);
@@ -194,13 +210,13 @@ public:
class UnknownVal : public DefinedOrUnknownSVal {
public:
- UnknownVal() : DefinedOrUnknownSVal(UnknownKind) {}
+ explicit UnknownVal() : DefinedOrUnknownSVal(UnknownKind) {}
static inline bool classof(const SVal *V) {
return V->getBaseKind() == UnknownKind;
}
};
-
+
class DefinedSVal : public DefinedOrUnknownSVal {
private:
// Do not implement. We want calling these methods to be a compiler
@@ -209,7 +225,7 @@ private:
bool isUnknownOrUndef() const;
bool isValid() const;
protected:
- DefinedSVal(const void* d, bool isLoc, unsigned ValKind)
+ explicit DefinedSVal(const void* d, bool isLoc, unsigned ValKind)
: DefinedOrUnknownSVal(d, isLoc, ValKind) {}
public:
// Implement isa<T> support.
@@ -220,7 +236,8 @@ public:
class NonLoc : public DefinedSVal {
protected:
- NonLoc(unsigned SubKind, const void* d) : DefinedSVal(d, false, SubKind) {}
+ explicit NonLoc(unsigned SubKind, const void* d)
+ : DefinedSVal(d, false, SubKind) {}
public:
void dumpToStream(llvm::raw_ostream& Out) const;
@@ -233,21 +250,20 @@ public:
class Loc : public DefinedSVal {
protected:
- Loc(unsigned SubKind, const void* D)
+ explicit Loc(unsigned SubKind, const void* D)
: DefinedSVal(const_cast<void*>(D), true, SubKind) {}
public:
void dumpToStream(llvm::raw_ostream& Out) const;
Loc(const Loc& X) : DefinedSVal(X.Data, true, X.getSubKind()) {}
- Loc& operator=(const Loc& X) { memcpy(this, &X, sizeof(Loc)); return *this; }
// Implement isa<T> support.
static inline bool classof(const SVal* V) {
return V->getBaseKind() == LocKind;
}
- static inline bool IsLocType(QualType T) {
+ static inline bool isLocType(QualType T) {
return T->isAnyPointerType() || T->isBlockPointerType() ||
T->isReferenceType();
}
@@ -282,7 +298,7 @@ public:
class SymExprVal : public NonLoc {
public:
- SymExprVal(const SymExpr *SE)
+ explicit SymExprVal(const SymExpr *SE)
: NonLoc(SymExprValKind, reinterpret_cast<const void*>(SE)) {}
const SymExpr *getSymbolicExpression() const {
@@ -301,19 +317,19 @@ public:
class ConcreteInt : public NonLoc {
public:
- ConcreteInt(const llvm::APSInt& V) : NonLoc(ConcreteIntKind, &V) {}
+ explicit ConcreteInt(const llvm::APSInt& V) : NonLoc(ConcreteIntKind, &V) {}
const llvm::APSInt& getValue() const {
return *static_cast<const llvm::APSInt*>(Data);
}
// Transfer functions for binary/unary operations on ConcreteInts.
- SVal evalBinOp(ValueManager &ValMgr, BinaryOperator::Opcode Op,
+ SVal evalBinOp(SValBuilder &svalBuilder, BinaryOperator::Opcode Op,
const ConcreteInt& R) const;
- ConcreteInt evalComplement(ValueManager &ValMgr) const;
+ ConcreteInt evalComplement(SValBuilder &svalBuilder) const;
- ConcreteInt evalMinus(ValueManager &ValMgr) const;
+ ConcreteInt evalMinus(SValBuilder &svalBuilder) const;
// Implement isa<T> support.
static inline bool classof(const SVal* V) {
@@ -327,9 +343,9 @@ public:
};
class LocAsInteger : public NonLoc {
- friend class clang::ValueManager;
+ friend class ento::SValBuilder;
- LocAsInteger(const std::pair<SVal, uintptr_t>& data) :
+ explicit LocAsInteger(const std::pair<SVal, uintptr_t>& data) :
NonLoc(LocAsIntegerKind, &data) {
assert (isa<Loc>(data.first));
}
@@ -361,9 +377,9 @@ public:
};
class CompoundVal : public NonLoc {
- friend class clang::ValueManager;
+ friend class ento::SValBuilder;
- CompoundVal(const CompoundValData* D) : NonLoc(CompoundValKind, D) {}
+ explicit CompoundVal(const CompoundValData* D) : NonLoc(CompoundValKind, D) {}
public:
const CompoundValData* getValue() const {
@@ -384,9 +400,9 @@ public:
};
class LazyCompoundVal : public NonLoc {
- friend class clang::ValueManager;
+ friend class ento::SValBuilder;
- LazyCompoundVal(const LazyCompoundValData *D)
+ explicit LazyCompoundVal(const LazyCompoundValData *D)
: NonLoc(LazyCompoundValKind, D) {}
public:
const LazyCompoundValData *getCVData() const {
@@ -404,7 +420,7 @@ public:
}
};
-} // end namespace clang::nonloc
+} // end namespace ento::nonloc
//==------------------------------------------------------------------------==//
// Subclasses of Loc.
@@ -412,19 +428,18 @@ public:
namespace loc {
-enum Kind { GotoLabelKind, MemRegionKind, ConcreteIntKind };
+enum Kind { GotoLabelKind, MemRegionKind, ConcreteIntKind, ObjCPropRefKind };
class GotoLabel : public Loc {
public:
- GotoLabel(LabelStmt* Label) : Loc(GotoLabelKind, Label) {}
+ explicit GotoLabel(LabelDecl *Label) : Loc(GotoLabelKind, Label) {}
- const LabelStmt* getLabel() const {
- return static_cast<const LabelStmt*>(Data);
+ const LabelDecl *getLabel() const {
+ return static_cast<const LabelDecl*>(Data);
}
static inline bool classof(const SVal* V) {
- return V->getBaseKind() == LocKind &&
- V->getSubKind() == GotoLabelKind;
+ return V->getBaseKind() == LocKind && V->getSubKind() == GotoLabelKind;
}
static inline bool classof(const Loc* V) {
@@ -435,13 +450,13 @@ public:
class MemRegionVal : public Loc {
public:
- MemRegionVal(const MemRegion* r) : Loc(MemRegionKind, r) {}
+ explicit MemRegionVal(const MemRegion* r) : Loc(MemRegionKind, r) {}
const MemRegion* getRegion() const {
return static_cast<const MemRegion*>(Data);
}
- const MemRegion* StripCasts() const;
+ const MemRegion* stripCasts() const;
template <typename REGION>
const REGION* getRegionAs() const {
@@ -469,14 +484,14 @@ public:
class ConcreteInt : public Loc {
public:
- ConcreteInt(const llvm::APSInt& V) : Loc(ConcreteIntKind, &V) {}
+ explicit ConcreteInt(const llvm::APSInt& V) : Loc(ConcreteIntKind, &V) {}
const llvm::APSInt& getValue() const {
return *static_cast<const llvm::APSInt*>(Data);
}
// Transfer functions for binary/unary operations on ConcreteInts.
- SVal EvalBinOp(BasicValueFactory& BasicVals, BinaryOperator::Opcode Op,
+ SVal evalBinOp(BasicValueFactory& BasicVals, BinaryOperator::Opcode Op,
const ConcreteInt& R) const;
// Implement isa<T> support.
@@ -490,14 +505,40 @@ public:
}
};
-} // end clang::loc namespace
+/// \brief Pseudo-location SVal used by the ExprEngine to simulate a "load" or
+/// "store" of an ObjC property for the dot syntax.
+class ObjCPropRef : public Loc {
+public:
+ explicit ObjCPropRef(const ObjCPropertyRefExpr *E)
+ : Loc(ObjCPropRefKind, E) {}
+
+ const ObjCPropertyRefExpr *getPropRefExpr() const {
+ return static_cast<const ObjCPropertyRefExpr *>(Data);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == LocKind &&
+ V->getSubKind() == ObjCPropRefKind;
+ }
+
+ static inline bool classof(const Loc* V) {
+ return V->getSubKind() == ObjCPropRefKind;
+ }
+};
+
+} // end ento::loc namespace
+} // end GR namespace
+
} // end clang namespace
namespace llvm {
static inline llvm::raw_ostream& operator<<(llvm::raw_ostream& os,
- clang::SVal V) {
+ clang::ento::SVal V) {
V.dumpToStream(os);
return os;
}
+
} // end llvm namespace
+
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Store.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
index a1a4184..0251311 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Store.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -11,30 +11,56 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_STORE_H
-#define LLVM_CLANG_ANALYSIS_STORE_H
+#ifndef LLVM_CLANG_GR_STORE_H
+#define LLVM_CLANG_GR_STORE_H
-#include "clang/Checker/PathSensitive/MemRegion.h"
-#include "clang/Checker/PathSensitive/SVals.h"
-#include "clang/Checker/PathSensitive/ValueManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
namespace clang {
+class Stmt;
+class Expr;
+class ObjCIvarDecl;
+class StackFrameContext;
+
+namespace ento {
+
+/// Store - This opaque type encapsulates an immutable mapping from
+/// locations to values. At a high-level, it represents the symbolic
+/// memory model. Different subclasses of StoreManager may choose
+/// different types to represent the locations and values.
typedef const void* Store;
class GRState;
class GRStateManager;
-class Stmt;
-class Expr;
-class ObjCIvarDecl;
class SubRegionMap;
-class StackFrameContext;
+class StoreManager;
+
+class StoreRef {
+ Store store;
+ StoreManager &mgr;
+public:
+ StoreRef(Store, StoreManager &);
+ StoreRef(const StoreRef &);
+ StoreRef &operator=(StoreRef const &);
+
+ bool operator==(const StoreRef &x) const {
+ assert(&mgr == &x.mgr);
+ return x.store == store;
+ }
+ bool operator!=(const StoreRef &x) const { return !operator==(x); }
+ ~StoreRef();
+
+ Store getStore() const { return store; }
+};
+
class StoreManager {
protected:
- ValueManager &ValMgr;
+ SValBuilder &svalBuilder;
GRStateManager &StateMgr;
/// MRMgr - Manages region objects associated with this StoreManager.
@@ -62,25 +88,22 @@ public:
/// \return A pointer to a GRState object that contains the same bindings as
/// \c state with the addition of having the value specified by \c val bound
/// to the location given for \c loc.
- virtual Store Bind(Store store, Loc loc, SVal val) = 0;
-
- virtual Store BindDefault(Store store, const MemRegion *R, SVal V) {
- return store;
- }
+ virtual StoreRef Bind(Store store, Loc loc, SVal val) = 0;
- virtual Store Remove(Store St, Loc L) = 0;
+ virtual StoreRef BindDefault(Store store, const MemRegion *R, SVal V);
+ virtual StoreRef Remove(Store St, Loc L) = 0;
/// BindCompoundLiteral - Return the store that has the bindings currently
/// in 'store' plus the bindings for the CompoundLiteral. 'R' is the region
/// for the compound literal and 'BegInit' and 'EndInit' represent an
/// array of initializer values.
- virtual Store BindCompoundLiteral(Store store,
- const CompoundLiteralExpr* cl,
- const LocationContext *LC, SVal v) = 0;
+ virtual StoreRef BindCompoundLiteral(Store store,
+ const CompoundLiteralExpr* cl,
+ const LocationContext *LC, SVal v) = 0;
/// getInitialStore - Returns the initial "empty" store representing the
/// value bindings upon entry to an analyzed function.
- virtual Store getInitialStore(const LocationContext *InitLoc) = 0;
+ virtual StoreRef getInitialStore(const LocationContext *InitLoc) = 0;
/// getRegionManager - Returns the internal RegionManager object that is
/// used to query and manipulate MemRegion objects.
@@ -92,11 +115,11 @@ public:
virtual SubRegionMap *getSubRegionMap(Store store) = 0;
virtual Loc getLValueVar(const VarDecl *VD, const LocationContext *LC) {
- return ValMgr.makeLoc(MRMgr.getVarRegion(VD, LC));
+ return svalBuilder.makeLoc(MRMgr.getVarRegion(VD, LC));
}
virtual Loc getLValueString(const StringLiteral* S) {
- return ValMgr.makeLoc(MRMgr.getStringRegion(S));
+ return svalBuilder.makeLoc(MRMgr.getStringRegion(S));
}
Loc getLValueCompoundLiteral(const CompoundLiteralExpr* CL,
@@ -112,7 +135,7 @@ public:
return getLValueFieldOrIvar(D, Base);
}
- virtual SVal getLValueElement(QualType elementType, SVal offset, SVal Base);
+ virtual SVal getLValueElement(QualType elementType, NonLoc offset, SVal Base);
// FIXME: This should soon be eliminated altogether; clients should deal with
// region extents directly.
@@ -122,10 +145,15 @@ public:
return UnknownVal();
}
- /// ArrayToPointer - Used by GRExprEngine::VistCast to handle implicit
+ /// ArrayToPointer - Used by ExprEngine::VistCast to handle implicit
/// conversions between arrays and pointers.
virtual SVal ArrayToPointer(Loc Array) = 0;
+ /// Evaluates DerivedToBase casts.
+ virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType) {
+ return UnknownVal();
+ }
+
class CastResult {
const GRState *state;
const MemRegion *region;
@@ -137,30 +165,32 @@ public:
const ElementRegion *GetElementZeroRegion(const MemRegion *R, QualType T);
- /// CastRegion - Used by GRExprEngine::VisitCast to handle casts from
+ /// castRegion - Used by ExprEngine::VisitCast to handle casts from
/// a MemRegion* to a specific location type. 'R' is the region being
/// casted and 'CastToTy' the result type of the cast.
- const MemRegion *CastRegion(const MemRegion *region, QualType CastToTy);
-
-
- /// EvalBinOp - Perform pointer arithmetic.
- virtual SVal EvalBinOp(BinaryOperator::Opcode Op,
- Loc lhs, NonLoc rhs, QualType resultTy) {
- return UnknownVal();
- }
+ const MemRegion *castRegion(const MemRegion *region, QualType CastToTy);
- virtual Store RemoveDeadBindings(Store store, const StackFrameContext *LCtx,
- SymbolReaper& SymReaper,
+ virtual StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots) = 0;
- virtual Store BindDecl(Store store, const VarRegion *VR, SVal initVal) = 0;
+ virtual StoreRef BindDecl(Store store, const VarRegion *VR, SVal initVal) = 0;
+
+ virtual StoreRef BindDeclWithNoInit(Store store, const VarRegion *VR) = 0;
+
+ /// If the StoreManager supports it, increment the reference count of
+ /// the specified Store object.
+ virtual void incrementReferenceCount(Store store) {}
- virtual Store BindDeclWithNoInit(Store store, const VarRegion *VR) = 0;
+ /// If the StoreManager supports it, decrement the reference count of
+ /// the specified Store object. If the reference count hits 0, the memory
+ /// associated with the object is recycled.
+ virtual void decrementReferenceCount(Store store) {}
typedef llvm::DenseSet<SymbolRef> InvalidatedSymbols;
typedef llvm::SmallVector<const MemRegion *, 8> InvalidatedRegions;
- /// InvalidateRegions - Clears out the specified regions from the store,
+ /// invalidateRegions - Clears out the specified regions from the store,
/// marking their values as unknown. Depending on the store, this may also
/// invalidate additional regions that may have changed based on accessing
/// the given regions. Optionally, invalidates non-static globals as well.
@@ -179,18 +209,18 @@ public:
/// invalidated. This should include any regions explicitly invalidated
/// even if they do not currently have bindings. Pass \c NULL if this
/// information will not be used.
- virtual Store InvalidateRegions(Store store,
- const MemRegion * const *Begin,
- const MemRegion * const *End,
- const Expr *E, unsigned Count,
- InvalidatedSymbols *IS,
- bool invalidateGlobals,
- InvalidatedRegions *Regions) = 0;
-
- /// EnterStackFrame - Let the StoreManager to do something when execution
+ virtual StoreRef invalidateRegions(Store store,
+ const MemRegion * const *Begin,
+ const MemRegion * const *End,
+ const Expr *E, unsigned Count,
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals,
+ InvalidatedRegions *Regions) = 0;
+
+ /// enterStackFrame - Let the StoreManager to do something when execution
/// engine is about to execute into a callee.
- virtual Store EnterStackFrame(const GRState *state,
- const StackFrameContext *frame);
+ virtual StoreRef enterStackFrame(const GRState *state,
+ const StackFrameContext *frame);
virtual void print(Store store, llvm::raw_ostream& Out,
const char* nl, const char *sep) = 0;
@@ -206,19 +236,48 @@ public:
virtual void iterBindings(Store store, BindingsHandler& f) = 0;
protected:
- const MemRegion *MakeElementRegion(const MemRegion *Base,
+ const MemRegion *MakeElementRegion(const MemRegion *baseRegion,
QualType pointeeTy, uint64_t index = 0);
/// CastRetrievedVal - Used by subclasses of StoreManager to implement
/// implicit casts that arise from loads from regions that are reinterpreted
/// as another region.
- SVal CastRetrievedVal(SVal val, const TypedRegion *R, QualType castTy,
+ SVal CastRetrievedVal(SVal val, const TypedRegion *region, QualType castTy,
bool performTestOnly = true);
private:
- SVal getLValueFieldOrIvar(const Decl* D, SVal Base);
+ SVal getLValueFieldOrIvar(const Decl* decl, SVal base);
};
+
+inline StoreRef::StoreRef(Store store, StoreManager & smgr)
+ : store(store), mgr(smgr) {
+ if (store)
+ mgr.incrementReferenceCount(store);
+}
+
+inline StoreRef::StoreRef(const StoreRef &sr)
+ : store(sr.store), mgr(sr.mgr)
+{
+ if (store)
+ mgr.incrementReferenceCount(store);
+}
+
+inline StoreRef::~StoreRef() {
+ if (store)
+ mgr.decrementReferenceCount(store);
+}
+
+inline StoreRef &StoreRef::operator=(StoreRef const &newStore) {
+ assert(&newStore.mgr == &mgr);
+ if (store != newStore.store) {
+ mgr.incrementReferenceCount(newStore.store);
+ mgr.decrementReferenceCount(store);
+ store = newStore.getStore();
+ }
+ return *this;
+}
+
// FIXME: Do we still need this?
/// SubRegionMap - An abstract interface that represents a queryable map
/// between MemRegion objects and their subregions.
@@ -240,6 +299,9 @@ StoreManager *CreateBasicStoreManager(GRStateManager& StMgr);
StoreManager *CreateRegionStoreManager(GRStateManager& StMgr);
StoreManager *CreateFieldsOnlyRegionStoreManager(GRStateManager& StMgr);
StoreManager *CreateFlatStoreManager(GRStateManager &StMgr);
+
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
new file mode 100644
index 0000000..3d6f9fa
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
@@ -0,0 +1,116 @@
+//== SubEngine.h - Interface of the subengine of CoreEngine --------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface of a subengine of the CoreEngine.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_GR_SUBENGINE_H
+#define LLVM_CLANG_GR_SUBENGINE_H
+
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+
+namespace clang {
+
+class CFGBlock;
+class CFGElement;
+class LocationContext;
+class Stmt;
+
+namespace ento {
+
+template <typename PP> class GenericNodeBuilder;
+class AnalysisManager;
+class ExplodedNodeSet;
+class ExplodedNode;
+class GRState;
+class GRStateManager;
+class BlockCounter;
+class StmtNodeBuilder;
+class BranchNodeBuilder;
+class IndirectGotoNodeBuilder;
+class SwitchNodeBuilder;
+class EndOfFunctionNodeBuilder;
+class CallEnterNodeBuilder;
+class CallExitNodeBuilder;
+class MemRegion;
+
+class SubEngine {
+public:
+ virtual ~SubEngine() {}
+
+ virtual const GRState* getInitialState(const LocationContext *InitLoc) = 0;
+
+ virtual AnalysisManager &getAnalysisManager() = 0;
+
+ virtual GRStateManager &getStateManager() = 0;
+
+ /// Called by CoreEngine. Used to generate new successor
+ /// nodes by processing the 'effects' of a block-level statement.
+ virtual void processCFGElement(const CFGElement E, StmtNodeBuilder& builder)=0;
+
+ /// Called by CoreEngine when it starts processing a CFGBlock. The
+ /// SubEngine is expected to populate dstNodes with new nodes representing
+ /// updated analysis state, or generate no nodes at all if it doesn't.
+ virtual void processCFGBlockEntrance(ExplodedNodeSet &dstNodes,
+ GenericNodeBuilder<BlockEntrance> &nodeBuilder) = 0;
+
+ /// Called by CoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a branch condition.
+ virtual void processBranch(const Stmt* Condition, const Stmt* Term,
+ BranchNodeBuilder& builder) = 0;
+
+ /// Called by CoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a computed goto jump.
+ virtual void processIndirectGoto(IndirectGotoNodeBuilder& builder) = 0;
+
+ /// Called by CoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a switch statement.
+ virtual void processSwitch(SwitchNodeBuilder& builder) = 0;
+
+ /// Called by CoreEngine. Used to generate end-of-path
+ /// nodes when the control reaches the end of a function.
+ virtual void processEndOfFunction(EndOfFunctionNodeBuilder& builder) = 0;
+
+ // Generate the entry node of the callee.
+ virtual void processCallEnter(CallEnterNodeBuilder &builder) = 0;
+
+ // Generate the first post callsite node.
+ virtual void processCallExit(CallExitNodeBuilder &builder) = 0;
+
+ /// Called by ConstraintManager. Used to call checker-specific
+ /// logic for handling assumptions on symbolic values.
+ virtual const GRState* processAssume(const GRState *state,
+ SVal cond, bool assumption) = 0;
+
+ /// wantsRegionChangeUpdate - Called by GRStateManager to determine if a
+ /// region change should trigger a processRegionChanges update.
+ virtual bool wantsRegionChangeUpdate(const GRState* state) = 0;
+
+ /// processRegionChanges - Called by GRStateManager whenever a change is made
+ /// to the store. Used to update checkers that track region values.
+ virtual const GRState* processRegionChanges(const GRState* state,
+ const MemRegion* const *Begin,
+ const MemRegion* const *End) = 0;
+
+ inline const GRState* processRegionChange(const GRState* state,
+ const MemRegion* MR) {
+ return processRegionChanges(state, &MR, &MR+1);
+ }
+
+ /// Called by CoreEngine when the analysis worklist is either empty or the
+ // maximum number of analysis steps have been reached.
+ virtual void processEndWorklist(bool hasWorkRemaining) = 0;
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SummaryManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SummaryManager.h
index fd23189..ed87851 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SummaryManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SummaryManager.h
@@ -12,14 +12,16 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CHECKER_SUMMARY
-#define LLVM_CLANG_CHECKER_SUMMARY
+#ifndef LLVM_CLANG_GR_SUMMARY
+#define LLVM_CLANG_GR_SUMMARY
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/Allocator.h"
namespace clang {
+namespace ento {
+
namespace summMgr {
@@ -52,6 +54,8 @@ class SummaryManager : SummaryManagerImpl {
};
+} // end GR namespace
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SymbolManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
index 26ed0c1..ad173bb 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SymbolManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
@@ -8,17 +8,17 @@
//===----------------------------------------------------------------------===//
//
// This file defines SymbolManager, a class that manages symbolic values
-// created for use by GRExprEngine and related classes.
+// created for use by ExprEngine and related classes.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_SYMMGR_H
-#define LLVM_CLANG_ANALYSIS_SYMMGR_H
+#ifndef LLVM_CLANG_GR_SYMMGR_H
+#define LLVM_CLANG_GR_SYMMGR_H
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/Analysis/AnalysisContext.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/DenseSet.h"
@@ -29,12 +29,14 @@ class raw_ostream;
namespace clang {
class ASTContext;
+ class StackFrameContext;
+
+namespace ento {
class BasicValueFactory;
class MemRegion;
class SubRegion;
class TypedRegion;
class VarRegion;
- class StackFrameContext;
class SymExpr : public llvm::FoldingSetNode {
public:
@@ -458,7 +460,7 @@ public:
/// isDead - Returns whether or not a symbol has been confirmed dead. This
/// should only be called once all marking of dead symbols has completed.
- /// (For checkers, this means only in the EvalDeadSymbols callback.)
+ /// (For checkers, this means only in the evalDeadSymbols callback.)
bool isDead(SymbolRef sym) const {
return TheDead.count(sym);
}
@@ -473,11 +475,13 @@ public:
virtual ~SymbolVisitor();
};
+} // end GR namespace
+
} // end clang namespace
namespace llvm {
static inline llvm::raw_ostream& operator<<(llvm::raw_ostream& os,
- const clang::SymExpr *SE) {
+ const clang::ento::SymExpr *SE) {
SE->dumpToStream(os);
return os;
}
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h
new file mode 100644
index 0000000..23ed2be
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h
@@ -0,0 +1,93 @@
+//== TransferFuncs.h - Path-Sens. Transfer Functions Interface ---*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines TransferFuncs, which provides a base-class that
+// defines an interface for transfer functions used by ExprEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_TRANSFERFUNCS
+#define LLVM_CLANG_GR_TRANSFERFUNCS
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include <vector>
+
+namespace clang {
+class ObjCMessageExpr;
+
+namespace ento {
+class ExplodedNode;
+class ExplodedNodeSet;
+class EndOfFunctionNodeBuilder;
+class ExprEngine;
+class StmtNodeBuilder;
+class StmtNodeBuilderRef;
+
+class TransferFuncs {
+public:
+ TransferFuncs() {}
+ virtual ~TransferFuncs() {}
+
+ virtual void RegisterPrinters(std::vector<GRState::Printer*>& Printers) {}
+ virtual void RegisterChecks(ExprEngine& Eng) {}
+
+
+ // Calls.
+
+ virtual void evalCall(ExplodedNodeSet& Dst,
+ ExprEngine& Engine,
+ StmtNodeBuilder& Builder,
+ const CallExpr* CE, SVal L,
+ ExplodedNode* Pred) {}
+
+ virtual void evalObjCMessage(ExplodedNodeSet& Dst,
+ ExprEngine& Engine,
+ StmtNodeBuilder& Builder,
+ ObjCMessage msg,
+ ExplodedNode* Pred,
+ const GRState *state) {}
+
+ // Stores.
+
+ virtual void evalBind(StmtNodeBuilderRef& B, SVal location, SVal val) {}
+
+ // End-of-path and dead symbol notification.
+
+ virtual void evalEndPath(ExprEngine& Engine,
+ EndOfFunctionNodeBuilder& Builder) {}
+
+
+ virtual void evalDeadSymbols(ExplodedNodeSet& Dst,
+ ExprEngine& Engine,
+ StmtNodeBuilder& Builder,
+ ExplodedNode* Pred,
+ const GRState* state,
+ SymbolReaper& SymReaper) {}
+
+ // Return statements.
+ virtual void evalReturn(ExplodedNodeSet& Dst,
+ ExprEngine& Engine,
+ StmtNodeBuilder& Builder,
+ const ReturnStmt* S,
+ ExplodedNode* Pred) {}
+
+ // Assumptions.
+ virtual const GRState* evalAssume(const GRState *state,
+ SVal Cond, bool Assumption) {
+ return state;
+ }
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h
new file mode 100644
index 0000000..6bc9fe5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h
@@ -0,0 +1,101 @@
+//==- WorkList.h - Worklist class used by CoreEngine ---------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines WorkList, a pure virtual class that represents an opaque
+// worklist used by CoreEngine to explore the reachability state space.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_WORKLIST
+#define LLVM_CLANG_GR_WORKLIST
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
+#include <cstddef>
+
+namespace clang {
+
+class CFGBlock;
+
+namespace ento {
+
+class ExplodedNode;
+class ExplodedNodeImpl;
+
+class WorkListUnit {
+ ExplodedNode* node;
+ BlockCounter counter;
+ const CFGBlock* block;
+ unsigned blockIdx; // This is the index of the next statement.
+
+public:
+ WorkListUnit(ExplodedNode* N, BlockCounter C,
+ const CFGBlock* B, unsigned idx)
+ : node(N),
+ counter(C),
+ block(B),
+ blockIdx(idx) {}
+
+ explicit WorkListUnit(ExplodedNode* N, BlockCounter C)
+ : node(N),
+ counter(C),
+ block(NULL),
+ blockIdx(0) {}
+
+ /// Returns the node associated with the worklist unit.
+ ExplodedNode *getNode() const { return node; }
+
+ /// Returns the block counter map associated with the worklist unit.
+ BlockCounter getBlockCounter() const { return counter; }
+
+ /// Returns the CFGblock associated with the worklist unit.
+ const CFGBlock *getBlock() const { return block; }
+
+ /// Return the index within the CFGBlock for the worklist unit.
+ unsigned getIndex() const { return blockIdx; }
+};
+
+class WorkList {
+ BlockCounter CurrentCounter;
+public:
+ virtual ~WorkList();
+ virtual bool hasWork() const = 0;
+
+ virtual void enqueue(const WorkListUnit& U) = 0;
+
+ void enqueue(ExplodedNode *N, const CFGBlock *B, unsigned idx) {
+ enqueue(WorkListUnit(N, CurrentCounter, B, idx));
+ }
+
+ void enqueue(ExplodedNode *N) {
+ enqueue(WorkListUnit(N, CurrentCounter));
+ }
+
+ virtual WorkListUnit dequeue() = 0;
+
+ void setBlockCounter(BlockCounter C) { CurrentCounter = C; }
+ BlockCounter getBlockCounter() const { return CurrentCounter; }
+
+ class Visitor {
+ public:
+ Visitor() {}
+ virtual ~Visitor();
+ virtual bool visit(const WorkListUnit &U) = 0;
+ };
+ virtual bool visitItemsInWorkList(Visitor &V) = 0;
+
+ static WorkList *makeDFS();
+ static WorkList *makeBFS();
+ static WorkList *makeBFSBlockDFSContents();
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h
new file mode 100644
index 0000000..4c3e379
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h
@@ -0,0 +1,26 @@
+//===-- CheckerRegistration.h - Checker Registration Function-------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_FRONTEND_CHECKERREGISTRATION_H
+#define LLVM_CLANG_SA_FRONTEND_CHECKERREGISTRATION_H
+
+namespace clang {
+ class AnalyzerOptions;
+ class Diagnostic;
+
+namespace ento {
+ class CheckerManager;
+
+CheckerManager *registerCheckers(const AnalyzerOptions &opts,Diagnostic &diags);
+
+} // end ento namespace
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
index 1c0bbb7..e3867a2 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/FrontendActions.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
@@ -7,13 +7,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CHECKER_FRONTENDACTIONS_H
-#define LLVM_CLANG_CHECKER_FRONTENDACTIONS_H
+#ifndef LLVM_CLANG_GR_FRONTENDACTIONS_H
+#define LLVM_CLANG_GR_FRONTENDACTIONS_H
#include "clang/Frontend/FrontendAction.h"
namespace clang {
+namespace ento {
+
//===----------------------------------------------------------------------===//
// AST Consumer Actions
//===----------------------------------------------------------------------===//
@@ -24,6 +26,8 @@ protected:
llvm::StringRef InFile);
};
-} // end namespace clang
+} // end GR namespace
+
+} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
index 4591a0f..945dfb8 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
@@ -20,7 +20,9 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Mangle.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -50,7 +52,7 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
TemplateTemplateParmDecl *Parm) {
ID.AddInteger(Parm->getDepth());
ID.AddInteger(Parm->getPosition());
- // FIXME: Parameter pack
+ ID.AddBoolean(Parm->isParameterPack());
TemplateParameterList *Params = Parm->getTemplateParameters();
ID.AddInteger(Params->size());
@@ -65,8 +67,15 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
ID.AddInteger(1);
- // FIXME: Parameter pack
+ ID.AddBoolean(NTTP->isParameterPack());
ID.AddPointer(NTTP->getType().getAsOpaquePtr());
+ if (NTTP->isExpandedParameterPack()) {
+ ID.AddBoolean(true);
+ ID.AddInteger(NTTP->getNumExpansionTypes());
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I)
+ ID.AddPointer(NTTP->getExpansionType(I).getAsOpaquePtr());
+ } else
+ ID.AddBoolean(false);
continue;
}
@@ -78,7 +87,7 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
TemplateTemplateParmDecl *
ASTContext::getCanonicalTemplateTemplateParmDecl(
- TemplateTemplateParmDecl *TTP) {
+ TemplateTemplateParmDecl *TTP) const {
// Check if we already have a canonical template template parameter.
llvm::FoldingSetNodeID ID;
CanonicalTemplateTemplateParm::Profile(ID, TTP);
@@ -102,14 +111,40 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
TTP->getIndex(), 0, false,
TTP->isParameterPack()));
else if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(*P))
- CanonParams.push_back(
- NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
- SourceLocation(), NTTP->getDepth(),
- NTTP->getPosition(), 0,
- getCanonicalType(NTTP->getType()),
- 0));
- else
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ QualType T = getCanonicalType(NTTP->getType());
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
+ NonTypeTemplateParmDecl *Param;
+ if (NTTP->isExpandedParameterPack()) {
+ llvm::SmallVector<QualType, 2> ExpandedTypes;
+ llvm::SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
+ ExpandedTInfos.push_back(
+ getTrivialTypeSourceInfo(ExpandedTypes.back()));
+ }
+
+ Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ NTTP->getDepth(),
+ NTTP->getPosition(), 0,
+ T,
+ TInfo,
+ ExpandedTypes.data(),
+ ExpandedTypes.size(),
+ ExpandedTInfos.data());
+ } else {
+ Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ NTTP->getDepth(),
+ NTTP->getPosition(), 0,
+ T,
+ NTTP->isParameterPack(),
+ TInfo);
+ }
+ CanonParams.push_back(Param);
+
+ } else
CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
cast<TemplateTemplateParmDecl>(*P)));
}
@@ -117,7 +152,9 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
TemplateTemplateParmDecl *CanonTTP
= TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
SourceLocation(), TTP->getDepth(),
- TTP->getPosition(), 0,
+ TTP->getPosition(),
+ TTP->isParameterPack(),
+ 0,
TemplateParameterList::Create(*this, SourceLocation(),
SourceLocation(),
CanonParams.data(),
@@ -160,12 +197,13 @@ ASTContext::ASTContext(const LangOptions& LOpts, SourceManager &SM,
CFConstantStringTypeDecl(0), NSConstantStringTypeDecl(0),
ObjCFastEnumerationStateTypeDecl(0), FILEDecl(0), jmp_bufDecl(0),
sigjmp_bufDecl(0), BlockDescriptorType(0), BlockDescriptorExtendedType(0),
+ cudaConfigureCallDecl(0),
NullTypeSourceInfo(QualType()),
SourceMgr(SM), LangOpts(LOpts), ABI(createCXXABI(t)), Target(t),
Idents(idents), Selectors(sels),
BuiltinInfo(builtins),
DeclarationNames(*this),
- ExternalSource(0), PrintingPolicy(LOpts),
+ ExternalSource(0), Listener(0), PrintingPolicy(LOpts),
LastSDM(0, 0),
UniqueBlockByRefTypeID(0), UniqueBlockParmTypeID(0) {
ObjCIdRedefinitionType = QualType();
@@ -314,9 +352,12 @@ void ASTContext::InitBuiltinTypes() {
InitBuiltinType(Int128Ty, BuiltinType::Int128);
InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
- if (LangOpts.CPlusPlus) // C++ 3.9.1p5
- InitBuiltinType(WCharTy, BuiltinType::WChar);
- else // C99
+ if (LangOpts.CPlusPlus) { // C++ 3.9.1p5
+ if (!LangOpts.ShortWChar)
+ InitBuiltinType(WCharTy, BuiltinType::WChar_S);
+ else // -fshort-wchar makes wchar_t be unsigned.
+ InitBuiltinType(WCharTy, BuiltinType::WChar_U);
+ } else // C99
WCharTy = getFromTargetType(Target.getWCharType());
if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
@@ -329,9 +370,6 @@ void ASTContext::InitBuiltinTypes() {
else // C99
Char32Ty = getFromTargetType(Target.getChar32Type());
- // Placeholder type for functions.
- InitBuiltinType(OverloadTy, BuiltinType::Overload);
-
// Placeholder type for type-dependent expressions whose type is
// completely unknown. No code should ever check a type against
// DependentTy and users should never see it; however, it is here to
@@ -339,9 +377,8 @@ void ASTContext::InitBuiltinTypes() {
// expressions.
InitBuiltinType(DependentTy, BuiltinType::Dependent);
- // Placeholder type for C++0x auto declarations whose real type has
- // not yet been deduced.
- InitBuiltinType(UndeducedAutoTy, BuiltinType::UndeducedAuto);
+ // Placeholder type for functions.
+ InitBuiltinType(OverloadTy, BuiltinType::Overload);
// C99 6.2.5p11.
FloatComplexTy = getComplexType(FloatTy);
@@ -369,6 +406,10 @@ void ASTContext::InitBuiltinTypes() {
InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
}
+Diagnostic &ASTContext::getDiagnostics() const {
+ return SourceMgr.getDiagnostics();
+}
+
AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
AttrVec *&Result = DeclAttrs[D];
if (!Result) {
@@ -525,12 +566,33 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
/// this method will assert on them.
/// If @p RefAsPointee, references are treated like their underlying type
/// (for alignof), else they're treated like pointers (for CodeGen).
-CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) {
+CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) const {
unsigned Align = Target.getCharWidth();
- Align = std::max(Align, D->getMaxAlignment());
+ bool UseAlignAttrOnly = false;
+ if (unsigned AlignFromAttr = D->getMaxAlignment()) {
+ Align = AlignFromAttr;
+
+ // __attribute__((aligned)) can increase or decrease alignment
+ // *except* on a struct or struct member, where it only increases
+ // alignment unless 'packed' is also specified.
+ //
+ // It is an error for [[align]] to decrease alignment, so we can
+ // ignore that possibility; Sema should diagnose it.
+ if (isa<FieldDecl>(D)) {
+ UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
+ cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
+ } else {
+ UseAlignAttrOnly = true;
+ }
+ }
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ // If we're using the align attribute only, just ignore everything
+ // else about the declaration and its type.
+ if (UseAlignAttrOnly) {
+ // do nothing
+
+ } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
QualType T = VD->getType();
if (const ReferenceType* RT = T->getAs<ReferenceType>()) {
if (RefAsPointee)
@@ -539,41 +601,61 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) {
T = getPointerType(RT->getPointeeType());
}
if (!T->isIncompleteType() && !T->isFunctionType()) {
+ // Adjust alignments of declarations with array type by the
+ // large-array alignment on the target.
unsigned MinWidth = Target.getLargeArrayMinWidth();
- unsigned ArrayAlign = Target.getLargeArrayAlign();
- if (isa<VariableArrayType>(T) && MinWidth != 0)
- Align = std::max(Align, ArrayAlign);
- if (ConstantArrayType *CT = dyn_cast<ConstantArrayType>(T)) {
- unsigned Size = getTypeSize(CT);
- if (MinWidth != 0 && MinWidth <= Size)
- Align = std::max(Align, ArrayAlign);
+ const ArrayType *arrayType;
+ if (MinWidth && (arrayType = getAsArrayType(T))) {
+ if (isa<VariableArrayType>(arrayType))
+ Align = std::max(Align, Target.getLargeArrayAlign());
+ else if (isa<ConstantArrayType>(arrayType) &&
+ MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
+ Align = std::max(Align, Target.getLargeArrayAlign());
+
+ // Walk through any array types while we're at it.
+ T = getBaseElementType(arrayType);
}
- // Incomplete or function types default to 1.
- while (isa<VariableArrayType>(T) || isa<IncompleteArrayType>(T))
- T = cast<ArrayType>(T)->getElementType();
-
Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
}
- if (const FieldDecl *FD = dyn_cast<FieldDecl>(VD)) {
- // In the case of a field in a packed struct, we want the minimum
- // of the alignment of the field and the alignment of the struct.
- Align = std::min(Align,
- getPreferredTypeAlign(FD->getParent()->getTypeForDecl()));
+
+ // Fields can be subject to extra alignment constraints, like if
+ // the field is packed, the struct is packed, or the struct has a
+ // a max-field-alignment constraint (#pragma pack). So calculate
+ // the actual alignment of the field within the struct, and then
+ // (as we're expected to) constrain that by the alignment of the type.
+ if (const FieldDecl *field = dyn_cast<FieldDecl>(VD)) {
+ // So calculate the alignment of the field.
+ const ASTRecordLayout &layout = getASTRecordLayout(field->getParent());
+
+ // Start with the record's overall alignment.
+ unsigned fieldAlign = toBits(layout.getAlignment());
+
+ // Use the GCD of that and the offset within the record.
+ uint64_t offset = layout.getFieldOffset(field->getFieldIndex());
+ if (offset > 0) {
+ // Alignment is always a power of 2, so the GCD will be a power of 2,
+ // which means we get to do this crazy thing instead of Euclid's.
+ uint64_t lowBitOfOffset = offset & (~offset + 1);
+ if (lowBitOfOffset < fieldAlign)
+ fieldAlign = static_cast<unsigned>(lowBitOfOffset);
+ }
+
+ Align = std::min(Align, fieldAlign);
}
}
- return CharUnits::fromQuantity(Align / Target.getCharWidth());
+ return toCharUnitsFromBits(Align);
}
std::pair<CharUnits, CharUnits>
-ASTContext::getTypeInfoInChars(const Type *T) {
+ASTContext::getTypeInfoInChars(const Type *T) const {
std::pair<uint64_t, unsigned> Info = getTypeInfo(T);
- return std::make_pair(CharUnits::fromQuantity(Info.first / getCharWidth()),
- CharUnits::fromQuantity(Info.second / getCharWidth()));
+ return std::make_pair(toCharUnitsFromBits(Info.first),
+ toCharUnitsFromBits(Info.second));
}
std::pair<CharUnits, CharUnits>
-ASTContext::getTypeInfoInChars(QualType T) {
+ASTContext::getTypeInfoInChars(QualType T) const {
return getTypeInfoInChars(T.getTypePtr());
}
@@ -584,7 +666,7 @@ ASTContext::getTypeInfoInChars(QualType T) {
/// alignment requirements: getPointerInfo should take an AddrSpace, this
/// should take a QualType, &c.
std::pair<uint64_t, unsigned>
-ASTContext::getTypeInfo(const Type *T) {
+ASTContext::getTypeInfo(const Type *T) const {
uint64_t Width=0;
unsigned Align=8;
switch (T->getTypeClass()) {
@@ -652,7 +734,8 @@ ASTContext::getTypeInfo(const Type *T) {
Width = Target.getCharWidth();
Align = Target.getCharAlign();
break;
- case BuiltinType::WChar:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
Width = Target.getWCharWidth();
Align = Target.getWCharAlign();
break;
@@ -760,8 +843,8 @@ ASTContext::getTypeInfo(const Type *T) {
case Type::ObjCInterface: {
const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
- Width = Layout.getSize();
- Align = Layout.getAlignment();
+ Width = toBits(Layout.getSize());
+ Align = toBits(Layout.getAlignment());
break;
}
case Type::Record:
@@ -779,8 +862,8 @@ ASTContext::getTypeInfo(const Type *T) {
const RecordType *RT = cast<RecordType>(TT);
const ASTRecordLayout &Layout = getASTRecordLayout(RT->getDecl());
- Width = Layout.getSize();
- Align = Layout.getAlignment();
+ Width = toBits(Layout.getSize());
+ Align = toBits(Layout.getAlignment());
break;
}
@@ -788,11 +871,26 @@ ASTContext::getTypeInfo(const Type *T) {
return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
getReplacementType().getTypePtr());
+ case Type::Auto: {
+ const AutoType *A = cast<AutoType>(T);
+ assert(A->isDeduced() && "Cannot request the size of a dependent type");
+ return getTypeInfo(cast<AutoType>(T)->getDeducedType().getTypePtr());
+ }
+
+ case Type::Paren:
+ return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
+
case Type::Typedef: {
const TypedefDecl *Typedef = cast<TypedefType>(T)->getDecl();
std::pair<uint64_t, unsigned> Info
= getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
- Align = std::max(Typedef->getMaxAlignment(), Info.second);
+ // If the typedef has an aligned attribute on it, it overrides any computed
+ // alignment we have. This violates the GCC documentation (which says that
+ // attribute(aligned) can only round up) but matches its implementation.
+ if (unsigned AttrAlign = Typedef->getMaxAlignment())
+ Align = AttrAlign;
+ else
+ Align = Info.second;
Width = Info.first;
break;
}
@@ -811,6 +909,10 @@ ASTContext::getTypeInfo(const Type *T) {
case Type::Elaborated:
return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
+ case Type::Attributed:
+ return getTypeInfo(
+ cast<AttributedType>(T)->getEquivalentType().getTypePtr());
+
case Type::TemplateSpecialization:
assert(getCanonicalType(T) != T &&
"Cannot request the size of a dependent type");
@@ -824,29 +926,39 @@ ASTContext::getTypeInfo(const Type *T) {
return std::make_pair(Width, Align);
}
+/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
+CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
+ return CharUnits::fromQuantity(BitSize / getCharWidth());
+}
+
+/// toBits - Convert a size in characters to a size in characters.
+int64_t ASTContext::toBits(CharUnits CharSize) const {
+ return CharSize.getQuantity() * getCharWidth();
+}
+
/// getTypeSizeInChars - Return the size of the specified type, in characters.
/// This method does not work on incomplete types.
-CharUnits ASTContext::getTypeSizeInChars(QualType T) {
- return CharUnits::fromQuantity(getTypeSize(T) / getCharWidth());
+CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
+ return toCharUnitsFromBits(getTypeSize(T));
}
-CharUnits ASTContext::getTypeSizeInChars(const Type *T) {
- return CharUnits::fromQuantity(getTypeSize(T) / getCharWidth());
+CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
+ return toCharUnitsFromBits(getTypeSize(T));
}
/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
/// characters. This method does not work on incomplete types.
-CharUnits ASTContext::getTypeAlignInChars(QualType T) {
- return CharUnits::fromQuantity(getTypeAlign(T) / getCharWidth());
+CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
+ return toCharUnitsFromBits(getTypeAlign(T));
}
-CharUnits ASTContext::getTypeAlignInChars(const Type *T) {
- return CharUnits::fromQuantity(getTypeAlign(T) / getCharWidth());
+CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
+ return toCharUnitsFromBits(getTypeAlign(T));
}
/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
/// type for the current target in bits. This can be different than the ABI
/// alignment in cases where it is beneficial for performance to overalign
/// a data type.
-unsigned ASTContext::getPreferredTypeAlign(const Type *T) {
+unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
unsigned ABIAlign = getTypeAlign(T);
// Double and long long should be naturally aligned if possible.
@@ -863,7 +975,7 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) {
/// Collect all ivars, including those synthesized, in the current class.
///
void ASTContext::ShallowCollectObjCIvars(const ObjCInterfaceDecl *OI,
- llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
+ llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) const {
// FIXME. This need be removed but there are two many places which
// assume const-ness of ObjCInterfaceDecl
ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
@@ -880,7 +992,7 @@ void ASTContext::ShallowCollectObjCIvars(const ObjCInterfaceDecl *OI,
///
void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
bool leafClass,
- llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
+ llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) const {
if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
DeepCollectObjCIvars(SuperClass, false, Ivars);
if (!leafClass) {
@@ -940,7 +1052,7 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
}
}
-unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) {
+unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
unsigned count = 0;
// Count ivars declared in class extension.
for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl;
@@ -985,6 +1097,25 @@ void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
ObjCImpls[CatD] = ImplD;
}
+/// \brief Get the copy initialization expression of VarDecl,or NULL if
+/// none exists.
+Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) {
+ assert(VD && "Passed null params");
+ assert(VD->hasAttr<BlocksAttr>() &&
+ "getBlockVarCopyInits - not __block var");
+ llvm::DenseMap<const VarDecl*, Expr*>::iterator
+ I = BlockVarCopyInits.find(VD);
+ return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : 0;
+}
+
+/// \brief Set the copy inialization expression of a block var decl.
+void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) {
+ assert(VD && Init && "Passed null params");
+ assert(VD->hasAttr<BlocksAttr>() &&
+ "setBlockVarCopyInits - not __block var");
+ BlockVarCopyInits[VD] = Init;
+}
+
/// \brief Allocate an uninitialized TypeSourceInfo.
///
/// The caller should initialize the memory held by TypeSourceInfo using
@@ -994,7 +1125,7 @@ void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
/// should refer to how the declarator was written in source code, not to
/// what type semantic analysis resolved the declarator to.
TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
- unsigned DataSize) {
+ unsigned DataSize) const {
if (!DataSize)
DataSize = TypeLoc::getFullDataSizeForType(T);
else
@@ -1008,19 +1139,20 @@ TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
}
TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
- SourceLocation L) {
+ SourceLocation L) const {
TypeSourceInfo *DI = CreateTypeSourceInfo(T);
- DI->getTypeLoc().initialize(L);
+ DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
return DI;
}
const ASTRecordLayout &
-ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) {
+ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
return getObjCLayout(D, 0);
}
const ASTRecordLayout &
-ASTContext::getASTObjCImplementationLayout(const ObjCImplementationDecl *D) {
+ASTContext::getASTObjCImplementationLayout(
+ const ObjCImplementationDecl *D) const {
return getObjCLayout(D->getClassInterface(), D);
}
@@ -1028,38 +1160,38 @@ ASTContext::getASTObjCImplementationLayout(const ObjCImplementationDecl *D) {
// Type creation/memoization methods
//===----------------------------------------------------------------------===//
-QualType ASTContext::getExtQualType(const Type *TypeNode, Qualifiers Quals) {
- unsigned Fast = Quals.getFastQualifiers();
- Quals.removeFastQualifiers();
+QualType
+ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
+ unsigned fastQuals = quals.getFastQualifiers();
+ quals.removeFastQualifiers();
// Check if we've already instantiated this type.
llvm::FoldingSetNodeID ID;
- ExtQuals::Profile(ID, TypeNode, Quals);
- void *InsertPos = 0;
- if (ExtQuals *EQ = ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos)) {
- assert(EQ->getQualifiers() == Quals);
- QualType T = QualType(EQ, Fast);
- return T;
+ ExtQuals::Profile(ID, baseType, quals);
+ void *insertPos = 0;
+ if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
+ assert(eq->getQualifiers() == quals);
+ return QualType(eq, fastQuals);
}
- ExtQuals *New = new (*this, TypeAlignment) ExtQuals(*this, TypeNode, Quals);
- ExtQualNodes.InsertNode(New, InsertPos);
- QualType T = QualType(New, Fast);
- return T;
-}
-
-QualType ASTContext::getVolatileType(QualType T) {
- QualType CanT = getCanonicalType(T);
- if (CanT.isVolatileQualified()) return T;
+ // If the base type is not canonical, make the appropriate canonical type.
+ QualType canon;
+ if (!baseType->isCanonicalUnqualified()) {
+ SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
+ canonSplit.second.addConsistentQualifiers(quals);
+ canon = getExtQualType(canonSplit.first, canonSplit.second);
- QualifierCollector Quals;
- const Type *TypeNode = Quals.strip(T);
- Quals.addVolatile();
+ // Re-find the insert position.
+ (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
+ }
- return getExtQualType(TypeNode, Quals);
+ ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
+ ExtQualNodes.InsertNode(eq, insertPos);
+ return QualType(eq, fastQuals);
}
-QualType ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) {
+QualType
+ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) const {
QualType CanT = getCanonicalType(T);
if (CanT.getAddressSpace() == AddressSpace)
return T;
@@ -1079,13 +1211,13 @@ QualType ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) {
}
QualType ASTContext::getObjCGCQualType(QualType T,
- Qualifiers::GC GCAttr) {
+ Qualifiers::GC GCAttr) const {
QualType CanT = getCanonicalType(T);
if (CanT.getObjCGCAttr() == GCAttr)
return T;
- if (T->isPointerType()) {
- QualType Pointee = T->getAs<PointerType>()->getPointeeType();
+ if (const PointerType *ptr = T->getAs<PointerType>()) {
+ QualType Pointee = ptr->getPointeeType();
if (Pointee->isAnyPointerType()) {
QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
return getPointerType(ResultType);
@@ -1106,79 +1238,28 @@ QualType ASTContext::getObjCGCQualType(QualType T,
return getExtQualType(TypeNode, Quals);
}
-static QualType getExtFunctionType(ASTContext& Context, QualType T,
- const FunctionType::ExtInfo &Info) {
- QualType ResultType;
- if (const PointerType *Pointer = T->getAs<PointerType>()) {
- QualType Pointee = Pointer->getPointeeType();
- ResultType = getExtFunctionType(Context, Pointee, Info);
- if (ResultType == Pointee)
- return T;
-
- ResultType = Context.getPointerType(ResultType);
- } else if (const BlockPointerType *BlockPointer
- = T->getAs<BlockPointerType>()) {
- QualType Pointee = BlockPointer->getPointeeType();
- ResultType = getExtFunctionType(Context, Pointee, Info);
- if (ResultType == Pointee)
- return T;
-
- ResultType = Context.getBlockPointerType(ResultType);
- } else if (const MemberPointerType *MemberPointer
- = T->getAs<MemberPointerType>()) {
- QualType Pointee = MemberPointer->getPointeeType();
- ResultType = getExtFunctionType(Context, Pointee, Info);
- if (ResultType == Pointee)
- return T;
-
- ResultType = Context.getMemberPointerType(ResultType,
- MemberPointer->getClass());
- } else if (const FunctionType *F = T->getAs<FunctionType>()) {
- if (F->getExtInfo() == Info)
- return T;
-
- if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(F)) {
- ResultType = Context.getFunctionNoProtoType(FNPT->getResultType(),
- Info);
- } else {
- const FunctionProtoType *FPT = cast<FunctionProtoType>(F);
- ResultType
- = Context.getFunctionType(FPT->getResultType(), FPT->arg_type_begin(),
- FPT->getNumArgs(), FPT->isVariadic(),
- FPT->getTypeQuals(),
- FPT->hasExceptionSpec(),
- FPT->hasAnyExceptionSpec(),
- FPT->getNumExceptions(),
- FPT->exception_begin(),
- Info);
- }
- } else
+const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
+ FunctionType::ExtInfo Info) {
+ if (T->getExtInfo() == Info)
return T;
- return Context.getQualifiedType(ResultType, T.getLocalQualifiers());
-}
-
-QualType ASTContext::getNoReturnType(QualType T, bool AddNoReturn) {
- FunctionType::ExtInfo Info = getFunctionExtInfo(T);
- return getExtFunctionType(*this, T,
- Info.withNoReturn(AddNoReturn));
-}
-
-QualType ASTContext::getCallConvType(QualType T, CallingConv CallConv) {
- FunctionType::ExtInfo Info = getFunctionExtInfo(T);
- return getExtFunctionType(*this, T,
- Info.withCallingConv(CallConv));
-}
+ QualType Result;
+ if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
+ Result = getFunctionNoProtoType(FNPT->getResultType(), Info);
+ } else {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExtInfo = Info;
+ Result = getFunctionType(FPT->getResultType(), FPT->arg_type_begin(),
+ FPT->getNumArgs(), EPI);
+ }
-QualType ASTContext::getRegParmType(QualType T, unsigned RegParm) {
- FunctionType::ExtInfo Info = getFunctionExtInfo(T);
- return getExtFunctionType(*this, T,
- Info.withRegParm(RegParm));
+ return cast<FunctionType>(Result.getTypePtr());
}
/// getComplexType - Return the uniqued reference to the type for a complex
/// number with the specified element type.
-QualType ASTContext::getComplexType(QualType T) {
+QualType ASTContext::getComplexType(QualType T) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
@@ -1196,7 +1277,7 @@ QualType ASTContext::getComplexType(QualType T) {
// Get the new insert position for the node we care about.
ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
Types.push_back(New);
@@ -1206,7 +1287,7 @@ QualType ASTContext::getComplexType(QualType T) {
/// getPointerType - Return the uniqued reference to the type for a pointer to
/// the specified type.
-QualType ASTContext::getPointerType(QualType T) {
+QualType ASTContext::getPointerType(QualType T) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
@@ -1224,7 +1305,7 @@ QualType ASTContext::getPointerType(QualType T) {
// Get the new insert position for the node we care about.
PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical);
Types.push_back(New);
@@ -1234,7 +1315,7 @@ QualType ASTContext::getPointerType(QualType T) {
/// getBlockPointerType - Return the uniqued reference to the type for
/// a pointer to the specified block.
-QualType ASTContext::getBlockPointerType(QualType T) {
+QualType ASTContext::getBlockPointerType(QualType T) const {
assert(T->isFunctionType() && "block of function types only");
// Unique pointers, to guarantee there is only one block of a particular
// structure.
@@ -1255,7 +1336,7 @@ QualType ASTContext::getBlockPointerType(QualType T) {
// Get the new insert position for the node we care about.
BlockPointerType *NewIP =
BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
BlockPointerType *New
= new (*this, TypeAlignment) BlockPointerType(T, Canonical);
@@ -1266,7 +1347,8 @@ QualType ASTContext::getBlockPointerType(QualType T) {
/// getLValueReferenceType - Return the uniqued reference to the type for an
/// lvalue reference to the specified type.
-QualType ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) {
+QualType
+ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
@@ -1289,7 +1371,7 @@ QualType ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) {
// Get the new insert position for the node we care about.
LValueReferenceType *NewIP =
LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
LValueReferenceType *New
@@ -1303,7 +1385,7 @@ QualType ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) {
/// getRValueReferenceType - Return the uniqued reference to the type for an
/// rvalue reference to the specified type.
-QualType ASTContext::getRValueReferenceType(QualType T) {
+QualType ASTContext::getRValueReferenceType(QualType T) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
@@ -1326,7 +1408,7 @@ QualType ASTContext::getRValueReferenceType(QualType T) {
// Get the new insert position for the node we care about.
RValueReferenceType *NewIP =
RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
RValueReferenceType *New
@@ -1338,7 +1420,7 @@ QualType ASTContext::getRValueReferenceType(QualType T) {
/// getMemberPointerType - Return the uniqued reference to the type for a
/// member pointer to the specified type, in the specified class.
-QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) {
+QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
@@ -1358,7 +1440,7 @@ QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) {
// Get the new insert position for the node we care about.
MemberPointerType *NewIP =
MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
MemberPointerType *New
= new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
@@ -1372,7 +1454,7 @@ QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) {
QualType ASTContext::getConstantArrayType(QualType EltTy,
const llvm::APInt &ArySizeIn,
ArrayType::ArraySizeModifier ASM,
- unsigned EltTypeQuals) {
+ unsigned IndexTypeQuals) const {
assert((EltTy->isDependentType() ||
EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
"Constant array of VLAs is illegal!");
@@ -1380,55 +1462,185 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
// Convert the array size into a canonical width matching the pointer size for
// the target.
llvm::APInt ArySize(ArySizeIn);
- ArySize.zextOrTrunc(Target.getPointerWidth(EltTy.getAddressSpace()));
+ ArySize =
+ ArySize.zextOrTrunc(Target.getPointerWidth(EltTy.getAddressSpace()));
llvm::FoldingSetNodeID ID;
- ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, EltTypeQuals);
+ ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
void *InsertPos = 0;
if (ConstantArrayType *ATP =
ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(ATP, 0);
- // If the element type isn't canonical, this won't be a canonical type either,
- // so fill in the canonical type field.
- QualType Canonical;
- if (!EltTy.isCanonical()) {
- Canonical = getConstantArrayType(getCanonicalType(EltTy), ArySize,
- ASM, EltTypeQuals);
+ // If the element type isn't canonical or has qualifiers, this won't
+ // be a canonical type either, so fill in the canonical type field.
+ QualType Canon;
+ if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(EltTy).split();
+ Canon = getConstantArrayType(QualType(canonSplit.first, 0), ArySize,
+ ASM, IndexTypeQuals);
+ Canon = getQualifiedType(Canon, canonSplit.second);
+
// Get the new insert position for the node we care about.
ConstantArrayType *NewIP =
ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
ConstantArrayType *New = new(*this,TypeAlignment)
- ConstantArrayType(EltTy, Canonical, ArySize, ASM, EltTypeQuals);
+ ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
ConstantArrayTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
}
+/// getVariableArrayDecayedType - Turns the given type, which may be
+/// variably-modified, into the corresponding type with all the known
+/// sizes replaced with [*].
+QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
+ // Vastly most common case.
+ if (!type->isVariablyModifiedType()) return type;
+
+ QualType result;
+
+ SplitQualType split = type.getSplitDesugaredType();
+ const Type *ty = split.first;
+ switch (ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("didn't desugar past all non-canonical types?");
+
+ // These types should never be variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::DependentSizedExtVector:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::Record:
+ case Type::Enum:
+ case Type::UnresolvedUsing:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::DependentName:
+ case Type::InjectedClassName:
+ case Type::TemplateSpecialization:
+ case Type::DependentTemplateSpecialization:
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ case Type::Auto:
+ case Type::PackExpansion:
+ llvm_unreachable("type should never be variably-modified");
+
+ // These types can be variably-modified but should never need to
+ // further decay.
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ case Type::BlockPointer:
+ case Type::MemberPointer:
+ return type;
+
+ // These types can be variably-modified. All these modifications
+ // preserve structure except as noted by comments.
+ // TODO: if we ever care about optimizing VLAs, there are no-op
+ // optimizations available here.
+ case Type::Pointer:
+ result = getPointerType(getVariableArrayDecayedType(
+ cast<PointerType>(ty)->getPointeeType()));
+ break;
+
+ case Type::LValueReference: {
+ const LValueReferenceType *lv = cast<LValueReferenceType>(ty);
+ result = getLValueReferenceType(
+ getVariableArrayDecayedType(lv->getPointeeType()),
+ lv->isSpelledAsLValue());
+ break;
+ }
+
+ case Type::RValueReference: {
+ const RValueReferenceType *lv = cast<RValueReferenceType>(ty);
+ result = getRValueReferenceType(
+ getVariableArrayDecayedType(lv->getPointeeType()));
+ break;
+ }
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *cat = cast<ConstantArrayType>(ty);
+ result = getConstantArrayType(
+ getVariableArrayDecayedType(cat->getElementType()),
+ cat->getSize(),
+ cat->getSizeModifier(),
+ cat->getIndexTypeCVRQualifiers());
+ break;
+ }
+
+ case Type::DependentSizedArray: {
+ const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty);
+ result = getDependentSizedArrayType(
+ getVariableArrayDecayedType(dat->getElementType()),
+ dat->getSizeExpr(),
+ dat->getSizeModifier(),
+ dat->getIndexTypeCVRQualifiers(),
+ dat->getBracketsRange());
+ break;
+ }
+
+ // Turn incomplete types into [*] types.
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty);
+ result = getVariableArrayType(
+ getVariableArrayDecayedType(iat->getElementType()),
+ /*size*/ 0,
+ ArrayType::Normal,
+ iat->getIndexTypeCVRQualifiers(),
+ SourceRange());
+ break;
+ }
+
+ // Turn VLA types into [*] types.
+ case Type::VariableArray: {
+ const VariableArrayType *vat = cast<VariableArrayType>(ty);
+ result = getVariableArrayType(
+ getVariableArrayDecayedType(vat->getElementType()),
+ /*size*/ 0,
+ ArrayType::Star,
+ vat->getIndexTypeCVRQualifiers(),
+ vat->getBracketsRange());
+ break;
+ }
+ }
+
+ // Apply the top-level qualifiers from the original.
+ return getQualifiedType(result, split.second);
+}
+
/// getVariableArrayType - Returns a non-unique reference to the type for a
/// variable array of the specified element type.
QualType ASTContext::getVariableArrayType(QualType EltTy,
Expr *NumElts,
ArrayType::ArraySizeModifier ASM,
- unsigned EltTypeQuals,
- SourceRange Brackets) {
+ unsigned IndexTypeQuals,
+ SourceRange Brackets) const {
// Since we don't unique expressions, it isn't possible to unique VLA's
// that have an expression provided for their size.
- QualType CanonType;
+ QualType Canon;
- if (!EltTy.isCanonical()) {
- if (NumElts)
- NumElts->Retain();
- CanonType = getVariableArrayType(getCanonicalType(EltTy), NumElts, ASM,
- EltTypeQuals, Brackets);
+ // Be sure to pull qualifiers off the element type.
+ if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(EltTy).split();
+ Canon = getVariableArrayType(QualType(canonSplit.first, 0), NumElts, ASM,
+ IndexTypeQuals, Brackets);
+ Canon = getQualifiedType(Canon, canonSplit.second);
}
VariableArrayType *New = new(*this, TypeAlignment)
- VariableArrayType(EltTy, CanonType, NumElts, ASM, EltTypeQuals, Brackets);
+ VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
VariableArrayTypes.push_back(New);
Types.push_back(New);
@@ -1438,109 +1650,118 @@ QualType ASTContext::getVariableArrayType(QualType EltTy,
/// getDependentSizedArrayType - Returns a non-unique reference to
/// the type for a dependently-sized array of the specified element
/// type.
-QualType ASTContext::getDependentSizedArrayType(QualType EltTy,
- Expr *NumElts,
+QualType ASTContext::getDependentSizedArrayType(QualType elementType,
+ Expr *numElements,
ArrayType::ArraySizeModifier ASM,
- unsigned EltTypeQuals,
- SourceRange Brackets) {
- assert((!NumElts || NumElts->isTypeDependent() ||
- NumElts->isValueDependent()) &&
+ unsigned elementTypeQuals,
+ SourceRange brackets) const {
+ assert((!numElements || numElements->isTypeDependent() ||
+ numElements->isValueDependent()) &&
"Size must be type- or value-dependent!");
- void *InsertPos = 0;
- DependentSizedArrayType *Canon = 0;
- llvm::FoldingSetNodeID ID;
-
- if (NumElts) {
- // Dependently-sized array types that do not have a specified
- // number of elements will have their sizes deduced from an
- // initializer.
- DependentSizedArrayType::Profile(ID, *this, getCanonicalType(EltTy), ASM,
- EltTypeQuals, NumElts);
-
- Canon = DependentSizedArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
+ // Dependently-sized array types that do not have a specified number
+ // of elements will have their sizes deduced from a dependent
+ // initializer. We do no canonicalization here at all, which is okay
+ // because they can't be used in most locations.
+ if (!numElements) {
+ DependentSizedArrayType *newType
+ = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, elementType, QualType(),
+ numElements, ASM, elementTypeQuals,
+ brackets);
+ Types.push_back(newType);
+ return QualType(newType, 0);
}
- DependentSizedArrayType *New;
- if (Canon) {
- // We already have a canonical version of this array type; use it as
- // the canonical type for a newly-built type.
- New = new (*this, TypeAlignment)
- DependentSizedArrayType(*this, EltTy, QualType(Canon, 0),
- NumElts, ASM, EltTypeQuals, Brackets);
- } else {
- QualType CanonEltTy = getCanonicalType(EltTy);
- if (CanonEltTy == EltTy) {
- New = new (*this, TypeAlignment)
- DependentSizedArrayType(*this, EltTy, QualType(),
- NumElts, ASM, EltTypeQuals, Brackets);
-
- if (NumElts) {
- DependentSizedArrayType *CanonCheck
- = DependentSizedArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(!CanonCheck && "Dependent-sized canonical array type broken");
- (void)CanonCheck;
- DependentSizedArrayTypes.InsertNode(New, InsertPos);
- }
- } else {
- QualType Canon = getDependentSizedArrayType(CanonEltTy, NumElts,
- ASM, EltTypeQuals,
- SourceRange());
- New = new (*this, TypeAlignment)
- DependentSizedArrayType(*this, EltTy, Canon,
- NumElts, ASM, EltTypeQuals, Brackets);
- }
- }
+ // Otherwise, we actually build a new type every time, but we
+ // also build a canonical type.
- Types.push_back(New);
- return QualType(New, 0);
-}
+ SplitQualType canonElementType = getCanonicalType(elementType).split();
-QualType ASTContext::getIncompleteArrayType(QualType EltTy,
+ void *insertPos = 0;
+ llvm::FoldingSetNodeID ID;
+ DependentSizedArrayType::Profile(ID, *this,
+ QualType(canonElementType.first, 0),
+ ASM, elementTypeQuals, numElements);
+
+ // Look for an existing type with these properties.
+ DependentSizedArrayType *canonTy =
+ DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
+
+ // If we don't have one, build one.
+ if (!canonTy) {
+ canonTy = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, QualType(canonElementType.first, 0),
+ QualType(), numElements, ASM, elementTypeQuals,
+ brackets);
+ DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
+ Types.push_back(canonTy);
+ }
+
+ // Apply qualifiers from the element type to the array.
+ QualType canon = getQualifiedType(QualType(canonTy,0),
+ canonElementType.second);
+
+ // If we didn't need extra canonicalization for the element type,
+ // then just use that as our result.
+ if (QualType(canonElementType.first, 0) == elementType)
+ return canon;
+
+ // Otherwise, we need to build a type which follows the spelling
+ // of the element type.
+ DependentSizedArrayType *sugaredType
+ = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, elementType, canon, numElements,
+ ASM, elementTypeQuals, brackets);
+ Types.push_back(sugaredType);
+ return QualType(sugaredType, 0);
+}
+
+QualType ASTContext::getIncompleteArrayType(QualType elementType,
ArrayType::ArraySizeModifier ASM,
- unsigned EltTypeQuals) {
+ unsigned elementTypeQuals) const {
llvm::FoldingSetNodeID ID;
- IncompleteArrayType::Profile(ID, EltTy, ASM, EltTypeQuals);
+ IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
- void *InsertPos = 0;
- if (IncompleteArrayType *ATP =
- IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
- return QualType(ATP, 0);
+ void *insertPos = 0;
+ if (IncompleteArrayType *iat =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
+ return QualType(iat, 0);
// If the element type isn't canonical, this won't be a canonical type
- // either, so fill in the canonical type field.
- QualType Canonical;
+ // either, so fill in the canonical type field. We also have to pull
+ // qualifiers off the element type.
+ QualType canon;
- if (!EltTy.isCanonical()) {
- Canonical = getIncompleteArrayType(getCanonicalType(EltTy),
- ASM, EltTypeQuals);
+ if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(elementType).split();
+ canon = getIncompleteArrayType(QualType(canonSplit.first, 0),
+ ASM, elementTypeQuals);
+ canon = getQualifiedType(canon, canonSplit.second);
// Get the new insert position for the node we care about.
- IncompleteArrayType *NewIP =
- IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ IncompleteArrayType *existing =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
+ assert(!existing && "Shouldn't be in the map!"); (void) existing;
}
- IncompleteArrayType *New = new (*this, TypeAlignment)
- IncompleteArrayType(EltTy, Canonical, ASM, EltTypeQuals);
+ IncompleteArrayType *newType = new (*this, TypeAlignment)
+ IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
- IncompleteArrayTypes.InsertNode(New, InsertPos);
- Types.push_back(New);
- return QualType(New, 0);
+ IncompleteArrayTypes.InsertNode(newType, insertPos);
+ Types.push_back(newType);
+ return QualType(newType, 0);
}
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
- VectorType::AltiVecSpecific AltiVecSpec) {
- BuiltinType *baseType;
-
- baseType = dyn_cast<BuiltinType>(getCanonicalType(vecType).getTypePtr());
- assert(baseType != 0 && "getVectorType(): Expecting a built-in type");
+ VectorType::VectorKind VecKind) const {
+ assert(vecType->isBuiltinType());
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
- VectorType::Profile(ID, vecType, NumElts, Type::Vector, AltiVecSpec);
+ VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
void *InsertPos = 0;
if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
@@ -1550,15 +1771,14 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
// so fill in the canonical type field.
QualType Canonical;
if (!vecType.isCanonical()) {
- Canonical = getVectorType(getCanonicalType(vecType), NumElts,
- VectorType::NotAltiVec);
+ Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
// Get the new insert position for the node we care about.
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
VectorType *New = new (*this, TypeAlignment)
- VectorType(vecType, NumElts, Canonical, AltiVecSpec);
+ VectorType(vecType, NumElts, Canonical, VecKind);
VectorTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
@@ -1566,16 +1786,14 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
/// getExtVectorType - Return the unique reference to an extended vector type of
/// the specified element type and size. VectorType must be a built-in type.
-QualType ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) {
- BuiltinType *baseType;
-
- baseType = dyn_cast<BuiltinType>(getCanonicalType(vecType).getTypePtr());
- assert(baseType != 0 && "getExtVectorType(): Expecting a built-in type");
+QualType
+ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
+ assert(vecType->isBuiltinType());
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
- VectorType::NotAltiVec);
+ VectorType::GenericVector);
void *InsertPos = 0;
if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(VTP, 0);
@@ -1588,7 +1806,7 @@ QualType ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) {
// Get the new insert position for the node we care about.
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
ExtVectorType *New = new (*this, TypeAlignment)
ExtVectorType(vecType, NumElts, Canonical);
@@ -1597,9 +1815,10 @@ QualType ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) {
return QualType(New, 0);
}
-QualType ASTContext::getDependentSizedExtVectorType(QualType vecType,
- Expr *SizeExpr,
- SourceLocation AttrLoc) {
+QualType
+ASTContext::getDependentSizedExtVectorType(QualType vecType,
+ Expr *SizeExpr,
+ SourceLocation AttrLoc) const {
llvm::FoldingSetNodeID ID;
DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType),
SizeExpr);
@@ -1640,8 +1859,9 @@ QualType ASTContext::getDependentSizedExtVectorType(QualType vecType,
/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
///
-QualType ASTContext::getFunctionNoProtoType(QualType ResultTy,
- const FunctionType::ExtInfo &Info) {
+QualType
+ASTContext::getFunctionNoProtoType(QualType ResultTy,
+ const FunctionType::ExtInfo &Info) const {
const CallingConv CallConv = Info.getCC();
// Unique functions, to guarantee there is only one function of a particular
// structure.
@@ -1663,7 +1883,7 @@ QualType ASTContext::getFunctionNoProtoType(QualType ResultTy,
// Get the new insert position for the node we care about.
FunctionNoProtoType *NewIP =
FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
FunctionNoProtoType *New = new (*this, TypeAlignment)
@@ -1675,19 +1895,14 @@ QualType ASTContext::getFunctionNoProtoType(QualType ResultTy,
/// getFunctionType - Return a normal function type with a typed argument
/// list. isVariadic indicates whether the argument list includes '...'.
-QualType ASTContext::getFunctionType(QualType ResultTy,const QualType *ArgArray,
- unsigned NumArgs, bool isVariadic,
- unsigned TypeQuals, bool hasExceptionSpec,
- bool hasAnyExceptionSpec, unsigned NumExs,
- const QualType *ExArray,
- const FunctionType::ExtInfo &Info) {
- const CallingConv CallConv= Info.getCC();
+QualType
+ASTContext::getFunctionType(QualType ResultTy,
+ const QualType *ArgArray, unsigned NumArgs,
+ const FunctionProtoType::ExtProtoInfo &EPI) const {
// Unique functions, to guarantee there is only one function of a particular
// structure.
llvm::FoldingSetNodeID ID;
- FunctionProtoType::Profile(ID, ResultTy, ArgArray, NumArgs, isVariadic,
- TypeQuals, hasExceptionSpec, hasAnyExceptionSpec,
- NumExs, ExArray, Info);
+ FunctionProtoType::Profile(ID, ResultTy, ArgArray, NumArgs, EPI);
void *InsertPos = 0;
if (FunctionProtoType *FTP =
@@ -1695,11 +1910,13 @@ QualType ASTContext::getFunctionType(QualType ResultTy,const QualType *ArgArray,
return QualType(FTP, 0);
// Determine whether the type being created is already canonical or not.
- bool isCanonical = !hasExceptionSpec && ResultTy.isCanonical();
+ bool isCanonical = !EPI.HasExceptionSpec && ResultTy.isCanonical();
for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
if (!ArgArray[i].isCanonicalAsParam())
isCanonical = false;
+ const CallingConv CallConv = EPI.ExtInfo.getCC();
+
// If this type isn't canonical, get the canonical version of it.
// The exception spec is not part of the canonical type.
QualType Canonical;
@@ -1709,28 +1926,33 @@ QualType ASTContext::getFunctionType(QualType ResultTy,const QualType *ArgArray,
for (unsigned i = 0; i != NumArgs; ++i)
CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
+ FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
+ if (CanonicalEPI.HasExceptionSpec) {
+ CanonicalEPI.HasExceptionSpec = false;
+ CanonicalEPI.HasAnyExceptionSpec = false;
+ CanonicalEPI.NumExceptions = 0;
+ }
+ CanonicalEPI.ExtInfo
+ = CanonicalEPI.ExtInfo.withCallingConv(getCanonicalCallConv(CallConv));
+
Canonical = getFunctionType(getCanonicalType(ResultTy),
CanonicalArgs.data(), NumArgs,
- isVariadic, TypeQuals, false,
- false, 0, 0,
- Info.withCallingConv(getCanonicalCallConv(CallConv)));
+ CanonicalEPI);
// Get the new insert position for the node we care about.
FunctionProtoType *NewIP =
FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
// FunctionProtoType objects are allocated with extra bytes after them
// for two variable size arrays (for parameter and exception types) at the
// end of them.
- FunctionProtoType *FTP =
- (FunctionProtoType*)Allocate(sizeof(FunctionProtoType) +
- NumArgs*sizeof(QualType) +
- NumExs*sizeof(QualType), TypeAlignment);
- new (FTP) FunctionProtoType(ResultTy, ArgArray, NumArgs, isVariadic,
- TypeQuals, hasExceptionSpec, hasAnyExceptionSpec,
- ExArray, NumExs, Canonical, Info);
+ size_t Size = sizeof(FunctionProtoType) +
+ NumArgs * sizeof(QualType) +
+ EPI.NumExceptions * sizeof(QualType);
+ FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment);
+ new (FTP) FunctionProtoType(ResultTy, ArgArray, NumArgs, Canonical, EPI);
Types.push_back(FTP);
FunctionProtoTypes.InsertNode(FTP, InsertPos);
return QualType(FTP, 0);
@@ -1752,7 +1974,7 @@ static bool NeedsInjectedClassNameType(const RecordDecl *D) {
/// getInjectedClassNameType - Return the unique reference to the
/// injected class name type for the specified templated declaration.
QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
- QualType TST) {
+ QualType TST) const {
assert(NeedsInjectedClassNameType(Decl));
if (Decl->TypeForDecl) {
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
@@ -1761,16 +1983,17 @@ QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
Decl->TypeForDecl = PrevDecl->TypeForDecl;
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
} else {
- Decl->TypeForDecl =
+ Type *newType =
new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
- Types.push_back(Decl->TypeForDecl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
}
return QualType(Decl->TypeForDecl, 0);
}
/// getTypeDeclType - Return the unique reference to the type for the
/// specified type declaration.
-QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) {
+QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
assert(Decl && "Passed null for Decl param");
assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
@@ -1791,56 +2014,81 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) {
return getEnumType(Enum);
} else if (const UnresolvedUsingTypenameDecl *Using =
dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
- Decl->TypeForDecl = new (*this, TypeAlignment) UnresolvedUsingType(Using);
+ Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
} else
llvm_unreachable("TypeDecl without a type?");
- Types.push_back(Decl->TypeForDecl);
return QualType(Decl->TypeForDecl, 0);
}
/// getTypedefType - Return the unique reference to the type for the
/// specified typename decl.
QualType
-ASTContext::getTypedefType(const TypedefDecl *Decl, QualType Canonical) {
+ASTContext::getTypedefType(const TypedefDecl *Decl, QualType Canonical) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
if (Canonical.isNull())
Canonical = getCanonicalType(Decl->getUnderlyingType());
- Decl->TypeForDecl = new(*this, TypeAlignment)
+ TypedefType *newType = new(*this, TypeAlignment)
TypedefType(Type::Typedef, Decl, Canonical);
- Types.push_back(Decl->TypeForDecl);
- return QualType(Decl->TypeForDecl, 0);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
}
-QualType ASTContext::getRecordType(const RecordDecl *Decl) {
+QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
if (const RecordDecl *PrevDecl = Decl->getPreviousDeclaration())
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
- Decl->TypeForDecl = new (*this, TypeAlignment) RecordType(Decl);
- Types.push_back(Decl->TypeForDecl);
- return QualType(Decl->TypeForDecl, 0);
+ RecordType *newType = new (*this, TypeAlignment) RecordType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
}
-QualType ASTContext::getEnumType(const EnumDecl *Decl) {
+QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
if (const EnumDecl *PrevDecl = Decl->getPreviousDeclaration())
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
- Decl->TypeForDecl = new (*this, TypeAlignment) EnumType(Decl);
- Types.push_back(Decl->TypeForDecl);
- return QualType(Decl->TypeForDecl, 0);
+ EnumType *newType = new (*this, TypeAlignment) EnumType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
}
+QualType ASTContext::getAttributedType(AttributedType::Kind attrKind,
+ QualType modifiedType,
+ QualType equivalentType) {
+ llvm::FoldingSetNodeID id;
+ AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
+
+ void *insertPos = 0;
+ AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
+ if (type) return QualType(type, 0);
+
+ QualType canon = getCanonicalType(equivalentType);
+ type = new (*this, TypeAlignment)
+ AttributedType(canon, attrKind, modifiedType, equivalentType);
+
+ Types.push_back(type);
+ AttributedTypes.InsertNode(type, insertPos);
+
+ return QualType(type, 0);
+}
+
+
/// \brief Retrieve a substitution-result type.
QualType
ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
- QualType Replacement) {
+ QualType Replacement) const {
assert(Replacement.isCanonical()
&& "replacement types must always be canonical");
@@ -1860,12 +2108,48 @@ ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
return QualType(SubstParm, 0);
}
+/// \brief Retrieve a
+QualType ASTContext::getSubstTemplateTypeParmPackType(
+ const TemplateTypeParmType *Parm,
+ const TemplateArgument &ArgPack) {
+#ifndef NDEBUG
+ for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(),
+ PEnd = ArgPack.pack_end();
+ P != PEnd; ++P) {
+ assert(P->getKind() == TemplateArgument::Type &&"Pack contains a non-type");
+ assert(P->getAsType().isCanonical() && "Pack contains non-canonical type");
+ }
+#endif
+
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
+ void *InsertPos = 0;
+ if (SubstTemplateTypeParmPackType *SubstParm
+ = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(SubstParm, 0);
+
+ QualType Canon;
+ if (!Parm->isCanonicalUnqualified()) {
+ Canon = getCanonicalType(QualType(Parm, 0));
+ Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
+ ArgPack);
+ SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ SubstTemplateTypeParmPackType *SubstParm
+ = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
+ ArgPack);
+ Types.push_back(SubstParm);
+ SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
+ return QualType(SubstParm, 0);
+}
+
/// \brief Retrieve the template type parameter type for a template
/// parameter or parameter pack with the given depth, index, and (optionally)
/// name.
QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
bool ParameterPack,
- IdentifierInfo *Name) {
+ IdentifierInfo *Name) const {
llvm::FoldingSetNodeID ID;
TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, Name);
void *InsertPos = 0;
@@ -1898,7 +2182,7 @@ TypeSourceInfo *
ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
SourceLocation NameLoc,
const TemplateArgumentListInfo &Args,
- QualType CanonType) {
+ QualType CanonType) const {
QualType TST = getTemplateSpecializationType(Name, Args, CanonType);
TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
@@ -1915,7 +2199,7 @@ ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
const TemplateArgumentListInfo &Args,
- QualType Canon) {
+ QualType Canon) const {
unsigned NumArgs = Args.size();
llvm::SmallVector<TemplateArgument, 4> ArgVec;
@@ -1931,7 +2215,7 @@ QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
const TemplateArgument *Args,
unsigned NumArgs,
- QualType Canon) {
+ QualType Canon) const {
if (!Canon.isNull())
Canon = getCanonicalType(Canon);
else
@@ -1955,7 +2239,7 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
QualType
ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template,
const TemplateArgument *Args,
- unsigned NumArgs) {
+ unsigned NumArgs) const {
// Build the canonical template specialization type.
TemplateName CanonTemplate = getCanonicalTemplateName(Template);
llvm::SmallVector<TemplateArgument, 4> CanonArgs;
@@ -1993,7 +2277,7 @@ ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template,
QualType
ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
- QualType NamedType) {
+ QualType NamedType) const {
llvm::FoldingSetNodeID ID;
ElaboratedType::Profile(ID, Keyword, NNS, NamedType);
@@ -2016,10 +2300,34 @@ ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
return QualType(T, 0);
}
+QualType
+ASTContext::getParenType(QualType InnerType) const {
+ llvm::FoldingSetNodeID ID;
+ ParenType::Profile(ID, InnerType);
+
+ void *InsertPos = 0;
+ ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon = InnerType;
+ if (!Canon.isCanonical()) {
+ Canon = getCanonicalType(InnerType);
+ ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckT && "Paren canonical type broken");
+ (void)CheckT;
+ }
+
+ T = new (*this) ParenType(InnerType, Canon);
+ Types.push_back(T);
+ ParenTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
- QualType Canon) {
+ QualType Canon) const {
assert(NNS->isDependent() && "nested-name-specifier must be dependent");
if (Canon.isNull()) {
@@ -2052,7 +2360,7 @@ ASTContext::getDependentTemplateSpecializationType(
ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
- const TemplateArgumentListInfo &Args) {
+ const TemplateArgumentListInfo &Args) const {
// TODO: avoid this copy
llvm::SmallVector<TemplateArgument, 16> ArgCopy;
for (unsigned I = 0, E = Args.size(); I != E; ++I)
@@ -2068,7 +2376,7 @@ ASTContext::getDependentTemplateSpecializationType(
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
unsigned NumArgs,
- const TemplateArgument *Args) {
+ const TemplateArgument *Args) const {
assert(NNS->isDependent() && "nested-name-specifier must be dependent");
llvm::FoldingSetNodeID ID;
@@ -2114,6 +2422,33 @@ ASTContext::getDependentTemplateSpecializationType(
return QualType(T, 0);
}
+QualType ASTContext::getPackExpansionType(QualType Pattern,
+ llvm::Optional<unsigned> NumExpansions) {
+ llvm::FoldingSetNodeID ID;
+ PackExpansionType::Profile(ID, Pattern, NumExpansions);
+
+ assert(Pattern->containsUnexpandedParameterPack() &&
+ "Pack expansions must expand one or more parameter packs");
+ void *InsertPos = 0;
+ PackExpansionType *T
+ = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon;
+ if (!Pattern.isCanonical()) {
+ Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions);
+
+ // Find the insert position again.
+ PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ T = new (*this) PackExpansionType(Pattern, Canon, NumExpansions);
+ Types.push_back(T);
+ PackExpansionTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
/// CmpProtocolNames - Comparison predicate for sorting protocols
/// alphabetically.
static bool CmpProtocolNames(const ObjCProtocolDecl *LHS,
@@ -2145,7 +2480,7 @@ static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols,
QualType ASTContext::getObjCObjectType(QualType BaseType,
ObjCProtocolDecl * const *Protocols,
- unsigned NumProtocols) {
+ unsigned NumProtocols) const {
// If the base type is an interface and there aren't any protocols
// to add, then the interface type will do just fine.
if (!NumProtocols && isa<ObjCInterfaceType>(BaseType))
@@ -2193,7 +2528,7 @@ QualType ASTContext::getObjCObjectType(QualType BaseType,
/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
/// the given object type.
-QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) {
+QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
llvm::FoldingSetNodeID ID;
ObjCObjectPointerType::Profile(ID, ObjectT);
@@ -2223,7 +2558,7 @@ QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) {
/// getObjCInterfaceType - Return the unique reference to the type for the
/// specified ObjC interface decl. The list of protocols is optional.
-QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl) {
+QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl) const {
if (Decl->TypeForDecl)
return QualType(Decl->TypeForDecl, 0);
@@ -2240,7 +2575,7 @@ QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl) {
/// multiple declarations that refer to "typeof(x)" all contain different
/// DeclRefExpr's. This doesn't effect the type checker, since it operates
/// on canonical type's (which are always unique).
-QualType ASTContext::getTypeOfExprType(Expr *tofExpr) {
+QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const {
TypeOfExprType *toe;
if (tofExpr->isTypeDependent()) {
llvm::FoldingSetNodeID ID;
@@ -2275,7 +2610,7 @@ QualType ASTContext::getTypeOfExprType(Expr *tofExpr) {
/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
/// an issue. This doesn't effect the type checker, since it operates
/// on canonical type's (which are always unique).
-QualType ASTContext::getTypeOfType(QualType tofType) {
+QualType ASTContext::getTypeOfType(QualType tofType) const {
QualType Canonical = getCanonicalType(tofType);
TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
Types.push_back(tot);
@@ -2284,7 +2619,7 @@ QualType ASTContext::getTypeOfType(QualType tofType) {
/// getDecltypeForExpr - Given an expr, will return the decltype for that
/// expression, according to the rules in C++0x [dcl.type.simple]p4
-static QualType getDecltypeForExpr(const Expr *e, ASTContext &Context) {
+static QualType getDecltypeForExpr(const Expr *e, const ASTContext &Context) {
if (e->isTypeDependent())
return Context.DependentTy;
@@ -2308,7 +2643,7 @@ static QualType getDecltypeForExpr(const Expr *e, ASTContext &Context) {
// Otherwise, where T is the type of e, if e is an lvalue, decltype(e) is
// defined as T&, otherwise decltype(e) is defined as T.
- if (e->isLvalue(Context) == Expr::LV_Valid)
+ if (e->isLValue())
T = Context.getLValueReferenceType(T);
return T;
@@ -2319,7 +2654,7 @@ static QualType getDecltypeForExpr(const Expr *e, ASTContext &Context) {
/// memory savings. Since decltype(t) is fairly uncommon, space shouldn't be
/// an issue. This doesn't effect the type checker, since it operates
/// on canonical type's (which are always unique).
-QualType ASTContext::getDecltypeType(Expr *e) {
+QualType ASTContext::getDecltypeType(Expr *e) const {
DecltypeType *dt;
if (e->isTypeDependent()) {
llvm::FoldingSetNodeID ID;
@@ -2348,9 +2683,17 @@ QualType ASTContext::getDecltypeType(Expr *e) {
return QualType(dt, 0);
}
+/// getAutoType - Unlike many "get<Type>" functions, we don't unique
+/// AutoType AST's.
+QualType ASTContext::getAutoType(QualType DeducedType) const {
+ AutoType *at = new (*this, TypeAlignment) AutoType(DeducedType);
+ Types.push_back(at);
+ return QualType(at, 0);
+}
+
/// getTagDeclType - Return the unique reference to the type for the
/// specified TagDecl (struct/union/class/enum) decl.
-QualType ASTContext::getTagDeclType(const TagDecl *Decl) {
+QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
assert (Decl);
// FIXME: What is the design on getTagDeclType when it requires casting
// away const? mutable?
@@ -2388,12 +2731,12 @@ QualType ASTContext::getPointerDiffType() const {
// Type Operators
//===----------------------------------------------------------------------===//
-CanQualType ASTContext::getCanonicalParamType(QualType T) {
+CanQualType ASTContext::getCanonicalParamType(QualType T) const {
// Push qualifiers into arrays, and then discard any remaining
// qualifiers.
T = getCanonicalType(T);
+ T = getVariableArrayDecayedType(T);
const Type *Ty = T.getTypePtr();
-
QualType Result;
if (isa<ArrayType>(Ty)) {
Result = getArrayDecayedType(QualType(Ty,0));
@@ -2406,97 +2749,59 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) {
return CanQualType::CreateUnsafe(Result);
}
-/// getCanonicalType - Return the canonical (structural) type corresponding to
-/// the specified potentially non-canonical type. The non-canonical version
-/// of a type may have many "decorated" versions of types. Decorators can
-/// include typedefs, 'typeof' operators, etc. The returned type is guaranteed
-/// to be free of any of these, allowing two canonical types to be compared
-/// for exact equality with a simple pointer comparison.
-CanQualType ASTContext::getCanonicalType(QualType T) {
- QualifierCollector Quals;
- const Type *Ptr = Quals.strip(T);
- QualType CanType = Ptr->getCanonicalTypeInternal();
-
- // The canonical internal type will be the canonical type *except*
- // that we push type qualifiers down through array types.
-
- // If there are no new qualifiers to push down, stop here.
- if (!Quals.hasQualifiers())
- return CanQualType::CreateUnsafe(CanType);
-
- // If the type qualifiers are on an array type, get the canonical
- // type of the array with the qualifiers applied to the element
- // type.
- ArrayType *AT = dyn_cast<ArrayType>(CanType);
- if (!AT)
- return CanQualType::CreateUnsafe(getQualifiedType(CanType, Quals));
-
- // Get the canonical version of the element with the extra qualifiers on it.
- // This can recursively sink qualifiers through multiple levels of arrays.
- QualType NewEltTy = getQualifiedType(AT->getElementType(), Quals);
- NewEltTy = getCanonicalType(NewEltTy);
-
- if (ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
- return CanQualType::CreateUnsafe(
- getConstantArrayType(NewEltTy, CAT->getSize(),
- CAT->getSizeModifier(),
- CAT->getIndexTypeCVRQualifiers()));
- if (IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT))
- return CanQualType::CreateUnsafe(
- getIncompleteArrayType(NewEltTy, IAT->getSizeModifier(),
- IAT->getIndexTypeCVRQualifiers()));
-
- if (DependentSizedArrayType *DSAT = dyn_cast<DependentSizedArrayType>(AT))
- return CanQualType::CreateUnsafe(
- getDependentSizedArrayType(NewEltTy,
- DSAT->getSizeExpr() ?
- DSAT->getSizeExpr()->Retain() : 0,
- DSAT->getSizeModifier(),
- DSAT->getIndexTypeCVRQualifiers(),
- DSAT->getBracketsRange())->getCanonicalTypeInternal());
-
- VariableArrayType *VAT = cast<VariableArrayType>(AT);
- return CanQualType::CreateUnsafe(getVariableArrayType(NewEltTy,
- VAT->getSizeExpr() ?
- VAT->getSizeExpr()->Retain() : 0,
- VAT->getSizeModifier(),
- VAT->getIndexTypeCVRQualifiers(),
- VAT->getBracketsRange()));
-}
-QualType ASTContext::getUnqualifiedArrayType(QualType T,
- Qualifiers &Quals) {
- Quals = T.getQualifiers();
- const ArrayType *AT = getAsArrayType(T);
+QualType ASTContext::getUnqualifiedArrayType(QualType type,
+ Qualifiers &quals) {
+ SplitQualType splitType = type.getSplitUnqualifiedType();
+
+ // FIXME: getSplitUnqualifiedType() actually walks all the way to
+ // the unqualified desugared type and then drops it on the floor.
+ // We then have to strip that sugar back off with
+ // getUnqualifiedDesugaredType(), which is silly.
+ const ArrayType *AT =
+ dyn_cast<ArrayType>(splitType.first->getUnqualifiedDesugaredType());
+
+ // If we don't have an array, just use the results in splitType.
if (!AT) {
- return T.getUnqualifiedType();
+ quals = splitType.second;
+ return QualType(splitType.first, 0);
}
- QualType Elt = AT->getElementType();
- QualType UnqualElt = getUnqualifiedArrayType(Elt, Quals);
- if (Elt == UnqualElt)
- return T;
+ // Otherwise, recurse on the array's element type.
+ QualType elementType = AT->getElementType();
+ QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
+
+ // If that didn't change the element type, AT has no qualifiers, so we
+ // can just use the results in splitType.
+ if (elementType == unqualElementType) {
+ assert(quals.empty()); // from the recursive call
+ quals = splitType.second;
+ return QualType(splitType.first, 0);
+ }
+
+ // Otherwise, add in the qualifiers from the outermost type, then
+ // build the type back up.
+ quals.addConsistentQualifiers(splitType.second);
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
- return getConstantArrayType(UnqualElt, CAT->getSize(),
+ return getConstantArrayType(unqualElementType, CAT->getSize(),
CAT->getSizeModifier(), 0);
}
if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
- return getIncompleteArrayType(UnqualElt, IAT->getSizeModifier(), 0);
+ return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
}
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) {
- return getVariableArrayType(UnqualElt,
- VAT->getSizeExpr() ?
- VAT->getSizeExpr()->Retain() : 0,
+ return getVariableArrayType(unqualElementType,
+ VAT->getSizeExpr(),
VAT->getSizeModifier(),
VAT->getIndexTypeCVRQualifiers(),
VAT->getBracketsRange());
}
const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT);
- return getDependentSizedArrayType(UnqualElt, DSAT->getSizeExpr()->Retain(),
+ return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
DSAT->getSizeModifier(), 0,
SourceRange());
}
@@ -2543,8 +2848,9 @@ bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
return false;
}
-DeclarationNameInfo ASTContext::getNameForTemplate(TemplateName Name,
- SourceLocation NameLoc) {
+DeclarationNameInfo
+ASTContext::getNameForTemplate(TemplateName Name,
+ SourceLocation NameLoc) const {
if (TemplateDecl *TD = Name.getAsTemplateDecl())
// DNInfo work in progress: CHECKME: what about DNLoc?
return DeclarationNameInfo(TD->getDeclName(), NameLoc);
@@ -2570,7 +2876,7 @@ DeclarationNameInfo ASTContext::getNameForTemplate(TemplateName Name,
return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
}
-TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) {
+TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(Template))
@@ -2580,6 +2886,15 @@ TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) {
return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
}
+ if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack()) {
+ TemplateTemplateParmDecl *CanonParam
+ = getCanonicalTemplateTemplateParmDecl(SubstPack->getParameterPack());
+ TemplateArgument CanonArgPack
+ = getCanonicalTemplateArgument(SubstPack->getArgumentPack());
+ return getSubstTemplateTemplateParmPack(CanonParam, CanonArgPack);
+ }
+
assert(!Name.getAsOverloadedTemplate());
DependentTemplateName *DTN = Name.getAsDependentTemplateName();
@@ -2594,7 +2909,7 @@ bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
}
TemplateArgument
-ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) {
+ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
switch (Arg.getKind()) {
case TemplateArgument::Null:
return Arg;
@@ -2607,7 +2922,12 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) {
case TemplateArgument::Template:
return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
-
+
+ case TemplateArgument::TemplateExpansion:
+ return TemplateArgument(getCanonicalTemplateName(
+ Arg.getAsTemplateOrTemplatePattern()),
+ Arg.getNumTemplateExpansions());
+
case TemplateArgument::Integral:
return TemplateArgument(*Arg.getAsIntegral(),
getCanonicalType(Arg.getIntegralType()));
@@ -2616,17 +2936,18 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) {
return TemplateArgument(getCanonicalType(Arg.getAsType()));
case TemplateArgument::Pack: {
- // FIXME: Allocate in ASTContext
- TemplateArgument *CanonArgs = new TemplateArgument[Arg.pack_size()];
+ if (Arg.pack_size() == 0)
+ return Arg;
+
+ TemplateArgument *CanonArgs
+ = new (*this) TemplateArgument[Arg.pack_size()];
unsigned Idx = 0;
for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
AEnd = Arg.pack_end();
A != AEnd; (void)++A, ++Idx)
CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
- TemplateArgument Result;
- Result.setArgumentPack(CanonArgs, Arg.pack_size(), false);
- return Result;
+ return TemplateArgument(CanonArgs, Arg.pack_size());
}
}
@@ -2636,7 +2957,7 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) {
}
NestedNameSpecifier *
-ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
if (!NNS)
return 0;
@@ -2655,9 +2976,35 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) {
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate: {
QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
- return NestedNameSpecifier::Create(*this, 0,
- NNS->getKind() == NestedNameSpecifier::TypeSpecWithTemplate,
- T.getTypePtr());
+
+ // If we have some kind of dependent-named type (e.g., "typename T::type"),
+ // break it apart into its prefix and identifier, then reconsititute those
+ // as the canonical nested-name-specifier. This is required to canonicalize
+ // a dependent nested-name-specifier involving typedefs of dependent-name
+ // types, e.g.,
+ // typedef typename T::type T1;
+ // typedef typename T1::type T2;
+ if (const DependentNameType *DNT = T->getAs<DependentNameType>()) {
+ NestedNameSpecifier *Prefix
+ = getCanonicalNestedNameSpecifier(DNT->getQualifier());
+ return NestedNameSpecifier::Create(*this, Prefix,
+ const_cast<IdentifierInfo *>(DNT->getIdentifier()));
+ }
+
+ // Do the same thing as above, but with dependent-named specializations.
+ if (const DependentTemplateSpecializationType *DTST
+ = T->getAs<DependentTemplateSpecializationType>()) {
+ NestedNameSpecifier *Prefix
+ = getCanonicalNestedNameSpecifier(DTST->getQualifier());
+ TemplateName Name
+ = getDependentTemplateName(Prefix, DTST->getIdentifier());
+ T = getTemplateSpecializationType(Name,
+ DTST->getArgs(), DTST->getNumArgs());
+ T = getCanonicalType(T);
+ }
+
+ return NestedNameSpecifier::Create(*this, 0, false,
+ const_cast<Type*>(T.getTypePtr()));
}
case NestedNameSpecifier::Global:
@@ -2670,7 +3017,7 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) {
}
-const ArrayType *ASTContext::getAsArrayType(QualType T) {
+const ArrayType *ASTContext::getAsArrayType(QualType T) const {
// Handle the non-qualified case efficiently.
if (!T.hasLocalQualifiers()) {
// Handle the common positive case fast.
@@ -2679,8 +3026,7 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) {
}
// Handle the common negative case fast.
- QualType CType = T->getCanonicalTypeInternal();
- if (!isa<ArrayType>(CType))
+ if (!isa<ArrayType>(T.getCanonicalType()))
return 0;
// Apply any qualifiers from the array type to the element type. This
@@ -2691,19 +3037,17 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) {
// sugar such as a typedef in the way. If we have type qualifiers on the type
// we must propagate them down into the element type.
- QualifierCollector Qs;
- const Type *Ty = Qs.strip(T.getDesugaredType());
+ SplitQualType split = T.getSplitDesugaredType();
+ Qualifiers qs = split.second;
// If we have a simple case, just return now.
- const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
- if (ATy == 0 || Qs.empty())
+ const ArrayType *ATy = dyn_cast<ArrayType>(split.first);
+ if (ATy == 0 || qs.empty())
return ATy;
// Otherwise, we have an array and we have qualifiers on it. Push the
// qualifiers into the array element type and return a new array type.
- // Get the canonical version of the element with the extra qualifiers on it.
- // This can recursively sink qualifiers through multiple levels of arrays.
- QualType NewEltTy = getQualifiedType(ATy->getElementType(), Qs);
+ QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy))
return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
@@ -2718,29 +3062,26 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) {
= dyn_cast<DependentSizedArrayType>(ATy))
return cast<ArrayType>(
getDependentSizedArrayType(NewEltTy,
- DSAT->getSizeExpr() ?
- DSAT->getSizeExpr()->Retain() : 0,
+ DSAT->getSizeExpr(),
DSAT->getSizeModifier(),
DSAT->getIndexTypeCVRQualifiers(),
DSAT->getBracketsRange()));
const VariableArrayType *VAT = cast<VariableArrayType>(ATy);
return cast<ArrayType>(getVariableArrayType(NewEltTy,
- VAT->getSizeExpr() ?
- VAT->getSizeExpr()->Retain() : 0,
+ VAT->getSizeExpr(),
VAT->getSizeModifier(),
VAT->getIndexTypeCVRQualifiers(),
VAT->getBracketsRange()));
}
-
/// getArrayDecayedType - Return the properly qualified result of decaying the
/// specified array type to a pointer. This operation is non-trivial when
/// handling typedefs etc. The canonical type of "T" must be an array type,
/// this returns a pointer to a properly qualified element of the array.
///
/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
-QualType ASTContext::getArrayDecayedType(QualType Ty) {
+QualType ASTContext::getArrayDecayedType(QualType Ty) const {
// Get the element type with 'getAsArrayType' so that we don't lose any
// typedefs in the element type of the array. This also handles propagation
// of type qualifiers from the array type into the element type if present
@@ -2754,20 +3095,22 @@ QualType ASTContext::getArrayDecayedType(QualType Ty) {
return getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers());
}
-QualType ASTContext::getBaseElementType(QualType QT) {
- QualifierCollector Qs;
- while (const ArrayType *AT = getAsArrayType(QualType(Qs.strip(QT), 0)))
- QT = AT->getElementType();
- return Qs.apply(QT);
+QualType ASTContext::getBaseElementType(const ArrayType *array) const {
+ return getBaseElementType(array->getElementType());
}
-QualType ASTContext::getBaseElementType(const ArrayType *AT) {
- QualType ElemTy = AT->getElementType();
+QualType ASTContext::getBaseElementType(QualType type) const {
+ Qualifiers qs;
+ while (true) {
+ SplitQualType split = type.getSplitDesugaredType();
+ const ArrayType *array = split.first->getAsArrayTypeUnsafe();
+ if (!array) break;
- if (const ArrayType *AT = getAsArrayType(ElemTy))
- return getBaseElementType(AT);
+ type = array->getElementType();
+ qs.addConsistentQualifiers(split.second);
+ }
- return ElemTy;
+ return getQualifiedType(type, qs);
}
/// getConstantArrayElementCount - Returns number of constant array elements.
@@ -2825,7 +3168,7 @@ QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
/// point types, ignoring the domain of the type (i.e. 'double' ==
/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
/// LHS < RHS, return -1.
-int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) {
+int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
FloatingRank LHSR = getFloatingRank(LHS);
FloatingRank RHSR = getFloatingRank(RHS);
@@ -2839,12 +3182,13 @@ int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) {
/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
/// routine will assert if passed a built-in type that isn't an integer or enum,
/// or if it is not canonicalized.
-unsigned ASTContext::getIntegerRank(Type *T) {
+unsigned ASTContext::getIntegerRank(const Type *T) const {
assert(T->isCanonicalUnqualified() && "T should be canonicalized");
- if (EnumType* ET = dyn_cast<EnumType>(T))
+ if (const EnumType* ET = dyn_cast<EnumType>(T))
T = ET->getDecl()->getPromotionType().getTypePtr();
- if (T->isSpecificBuiltinType(BuiltinType::WChar))
+ if (T->isSpecificBuiltinType(BuiltinType::WChar_S) ||
+ T->isSpecificBuiltinType(BuiltinType::WChar_U))
T = getFromTargetType(Target.getWCharType()).getTypePtr();
if (T->isSpecificBuiltinType(BuiltinType::Char16))
@@ -2885,7 +3229,7 @@ unsigned ASTContext::getIntegerRank(Type *T) {
///
/// \returns the type this bit-field will promote to, or NULL if no
/// promotion occurs.
-QualType ASTContext::isPromotableBitField(Expr *E) {
+QualType ASTContext::isPromotableBitField(Expr *E) const {
if (E->isTypeDependent() || E->isValueDependent())
return QualType();
@@ -2917,7 +3261,7 @@ QualType ASTContext::isPromotableBitField(Expr *E) {
/// getPromotedIntegerType - Returns the type that Promotable will
/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
/// integer type.
-QualType ASTContext::getPromotedIntegerType(QualType Promotable) {
+QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
assert(!Promotable.isNull());
assert(Promotable->isPromotableIntegerType());
if (const EnumType *ET = Promotable->getAs<EnumType>())
@@ -2933,9 +3277,9 @@ QualType ASTContext::getPromotedIntegerType(QualType Promotable) {
/// getIntegerTypeOrder - Returns the highest ranked integer type:
/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
/// LHS < RHS, return -1.
-int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) {
- Type *LHSC = getCanonicalType(LHS).getTypePtr();
- Type *RHSC = getCanonicalType(RHS).getTypePtr();
+int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
+ const Type *LHSC = getCanonicalType(LHS).getTypePtr();
+ const Type *RHSC = getCanonicalType(RHS).getTypePtr();
if (LHSC == RHSC) return 0;
bool LHSUnsigned = LHSC->isUnsignedIntegerType();
@@ -2972,7 +3316,7 @@ int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) {
}
static RecordDecl *
-CreateRecordDecl(ASTContext &Ctx, RecordDecl::TagKind TK, DeclContext *DC,
+CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id) {
if (Ctx.getLangOptions().CPlusPlus)
return CXXRecordDecl::Create(Ctx, TK, DC, L, Id);
@@ -2981,7 +3325,7 @@ CreateRecordDecl(ASTContext &Ctx, RecordDecl::TagKind TK, DeclContext *DC,
}
// getCFConstantStringType - Return the type used for constant CFStrings.
-QualType ASTContext::getCFConstantStringType() {
+QualType ASTContext::getCFConstantStringType() const {
if (!CFConstantStringTypeDecl) {
CFConstantStringTypeDecl =
CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
@@ -3023,7 +3367,7 @@ void ASTContext::setCFConstantStringType(QualType T) {
}
// getNSConstantStringType - Return the type used for constant NSStrings.
-QualType ASTContext::getNSConstantStringType() {
+QualType ASTContext::getNSConstantStringType() const {
if (!NSConstantStringTypeDecl) {
NSConstantStringTypeDecl =
CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
@@ -3062,7 +3406,7 @@ void ASTContext::setNSConstantStringType(QualType T) {
NSConstantStringTypeDecl = Rec->getDecl();
}
-QualType ASTContext::getObjCFastEnumerationStateType() {
+QualType ASTContext::getObjCFastEnumerationStateType() const {
if (!ObjCFastEnumerationStateTypeDecl) {
ObjCFastEnumerationStateTypeDecl =
CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
@@ -3087,10 +3431,6 @@ QualType ASTContext::getObjCFastEnumerationStateType() {
Field->setAccess(AS_public);
ObjCFastEnumerationStateTypeDecl->addDecl(Field);
}
- if (getLangOptions().CPlusPlus)
- if (CXXRecordDecl *CXXRD =
- dyn_cast<CXXRecordDecl>(ObjCFastEnumerationStateTypeDecl))
- CXXRD->setEmpty(false);
ObjCFastEnumerationStateTypeDecl->completeDefinition();
}
@@ -3098,7 +3438,7 @@ QualType ASTContext::getObjCFastEnumerationStateType() {
return getTagDeclType(ObjCFastEnumerationStateTypeDecl);
}
-QualType ASTContext::getBlockDescriptorType() {
+QualType ASTContext::getBlockDescriptorType() const {
if (BlockDescriptorType)
return getTagDeclType(BlockDescriptorType);
@@ -3143,7 +3483,7 @@ void ASTContext::setBlockDescriptorType(QualType T) {
BlockDescriptorType = Rec->getDecl();
}
-QualType ASTContext::getBlockDescriptorExtendedType() {
+QualType ASTContext::getBlockDescriptorExtendedType() const {
if (BlockDescriptorExtendedType)
return getTagDeclType(BlockDescriptorExtendedType);
@@ -3192,17 +3532,25 @@ void ASTContext::setBlockDescriptorExtendedType(QualType T) {
BlockDescriptorExtendedType = Rec->getDecl();
}
-bool ASTContext::BlockRequiresCopying(QualType Ty) {
+bool ASTContext::BlockRequiresCopying(QualType Ty) const {
if (Ty->isBlockPointerType())
return true;
if (isObjCNSObjectType(Ty))
return true;
if (Ty->isObjCObjectPointerType())
return true;
+ if (getLangOptions().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return RD->hasConstCopyConstructor(*this);
+
+ }
+ }
return false;
}
-QualType ASTContext::BuildByRefType(llvm::StringRef DeclName, QualType Ty) {
+QualType
+ASTContext::BuildByRefType(llvm::StringRef DeclName, QualType Ty) const {
// type = struct __Block_byref_1_X {
// void *__isa;
// struct __Block_byref_1_X *__forwarding;
@@ -3264,7 +3612,7 @@ QualType ASTContext::BuildByRefType(llvm::StringRef DeclName, QualType Ty) {
QualType ASTContext::getBlockParmType(
bool BlockHasCopyDispose,
- llvm::SmallVectorImpl<const Expr *> &Layout) {
+ llvm::SmallVectorImpl<const Expr *> &Layout) const {
// FIXME: Move up
llvm::SmallString<36> Name;
@@ -3351,7 +3699,7 @@ static bool isTypeTypedefedAsBOOL(QualType T) {
/// getObjCEncodingTypeSize returns size of type for objective-c encoding
/// purpose.
-CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) {
+CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
CharUnits sz = getTypeSizeInChars(type);
// Make all integer and enum types at least as large as an int
@@ -3368,10 +3716,11 @@ std::string charUnitsToString(const CharUnits &CU) {
return llvm::itostr(CU.getQuantity());
}
-/// getObjCEncodingForBlockDecl - Return the encoded type for this block
+/// getObjCEncodingForBlock - Return the encoded type for this block
/// declaration.
-void ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr,
- std::string& S) {
+std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
+ std::string S;
+
const BlockDecl *Decl = Expr->getBlockDecl();
QualType BlockTy =
Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
@@ -3414,12 +3763,50 @@ void ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr,
S += charUnitsToString(ParmOffset);
ParmOffset += getObjCEncodingTypeSize(PType);
}
+
+ return S;
+}
+
+void ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
+ std::string& S) {
+ // Encode result type.
+ getObjCEncodingForType(Decl->getResultType(), S);
+ CharUnits ParmOffset;
+ // Compute size of all parameters.
+ for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ QualType PType = (*PI)->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ assert (sz.isPositive() &&
+ "getObjCEncodingForMethodDecl - Incomplete param type");
+ ParmOffset += sz;
+ }
+ S += charUnitsToString(ParmOffset);
+ ParmOffset = CharUnits::Zero();
+
+ // Argument types.
+ for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PVDecl = *PI;
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ getObjCEncodingForType(PType, S);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
}
/// getObjCEncodingForMethodDecl - Return the encoded type for this method
/// declaration.
void ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
- std::string& S) {
+ std::string& S) const {
// FIXME: This is not very efficient.
// Encode type qualifer, 'in', 'inout', etc. for the return type.
getObjCEncodingForTypeQualifier(Decl->getObjCDeclQualifier(), S);
@@ -3495,7 +3882,7 @@ void ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
/// @endcode
void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
const Decl *Container,
- std::string& S) {
+ std::string& S) const {
// Collect information from the property implementation decl(s).
bool Dynamic = false;
ObjCPropertyImplDecl *SynthesizePID = 0;
@@ -3588,19 +3975,17 @@ void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
if (isa<TypedefType>(PointeeTy.getTypePtr())) {
if (const BuiltinType *BT = PointeeTy->getAs<BuiltinType>()) {
- if (BT->getKind() == BuiltinType::ULong &&
- ((const_cast<ASTContext *>(this))->getIntWidth(PointeeTy) == 32))
+ if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
PointeeTy = UnsignedIntTy;
else
- if (BT->getKind() == BuiltinType::Long &&
- ((const_cast<ASTContext *>(this))->getIntWidth(PointeeTy) == 32))
+ if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32)
PointeeTy = IntTy;
}
}
}
void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
- const FieldDecl *Field) {
+ const FieldDecl *Field) const {
// We follow the behavior of gcc, expanding structures which are
// directly pointed to, and expanding embedded structures. Note that
// these rules are sufficient to prevent recursive encoding of the
@@ -3619,31 +4004,29 @@ static char ObjCEncodingForPrimitiveKind(const ASTContext *C, QualType T) {
case BuiltinType::UShort: return 'S';
case BuiltinType::UInt: return 'I';
case BuiltinType::ULong:
- return
- (const_cast<ASTContext *>(C))->getIntWidth(T) == 32 ? 'L' : 'Q';
+ return C->getIntWidth(T) == 32 ? 'L' : 'Q';
case BuiltinType::UInt128: return 'T';
case BuiltinType::ULongLong: return 'Q';
case BuiltinType::Char_S:
case BuiltinType::SChar: return 'c';
case BuiltinType::Short: return 's';
- case BuiltinType::WChar:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
case BuiltinType::Int: return 'i';
case BuiltinType::Long:
- return
- (const_cast<ASTContext *>(C))->getIntWidth(T) == 32 ? 'l' : 'q';
+ return C->getIntWidth(T) == 32 ? 'l' : 'q';
case BuiltinType::LongLong: return 'q';
case BuiltinType::Int128: return 't';
case BuiltinType::Float: return 'f';
case BuiltinType::Double: return 'd';
- case BuiltinType::LongDouble: return 'd';
+ case BuiltinType::LongDouble: return 'D';
}
}
-static void EncodeBitField(const ASTContext *Context, std::string& S,
+static void EncodeBitField(const ASTContext *Ctx, std::string& S,
QualType T, const FieldDecl *FD) {
const Expr *E = FD->getBitWidth();
assert(E && "bitfield width not there - getObjCEncodingForTypeImpl");
- ASTContext *Ctx = const_cast<ASTContext*>(Context);
S += 'b';
// The NeXT runtime encodes bit fields as b followed by the number of bits.
// The GNU runtime requires more information; bitfields are encoded as b,
@@ -3674,7 +4057,10 @@ static void EncodeBitField(const ASTContext *Context, std::string& S,
break;
}
S += llvm::utostr(RL.getFieldOffset(i));
- S += ObjCEncodingForPrimitiveKind(Context, T);
+ if (T->isEnumeralType())
+ S += 'i';
+ else
+ S += ObjCEncodingForPrimitiveKind(Ctx, T);
}
unsigned N = E->EvaluateAsInt(*Ctx).getZExtValue();
S += llvm::utostr(N);
@@ -3686,7 +4072,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
bool ExpandStructures,
const FieldDecl *FD,
bool OutermostType,
- bool EncodingProperty) {
+ bool EncodingProperty) const {
if (T->getAs<BuiltinType>()) {
if (FD && FD->isBitField())
return EncodeBitField(this, S, T, FD);
@@ -3811,8 +4197,8 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
std::string TemplateArgsStr
= TemplateSpecializationType::PrintTemplateArgumentList(
- TemplateArgs.getFlatArgumentList(),
- TemplateArgs.flat_size(),
+ TemplateArgs.data(),
+ TemplateArgs.size(),
(*this).PrintingPolicy);
S += TemplateArgsStr;
@@ -3846,7 +4232,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
S += RDecl->isUnion() ? ')' : '}';
return;
}
-
+
if (T->isEnumeralType()) {
if (FD && FD->isBitField())
EncodeBitField(this, S, T, FD);
@@ -3949,7 +4335,14 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// TODO: maybe there should be a mangling for these
if (T->getAs<MemberPointerType>())
return;
-
+
+ if (T->isVectorType()) {
+ // This matches gcc's encoding, even though technically it is
+ // insufficient.
+ // FIXME. We should do a better job than gcc.
+ return;
+ }
+
assert(0 && "@encode for type not implemented!");
}
@@ -4000,8 +4393,9 @@ void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
/// \brief Retrieve the template name that corresponds to a non-empty
/// lookup.
-TemplateName ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
- UnresolvedSetIterator End) {
+TemplateName
+ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) const {
unsigned size = End - Begin;
assert(size > 1 && "set is not overloaded!");
@@ -4023,9 +4417,10 @@ TemplateName ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
/// \brief Retrieve the template name that represents a qualified
/// template name such as \c std::vector.
-TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
- bool TemplateKeyword,
- TemplateDecl *Template) {
+TemplateName
+ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ bool TemplateKeyword,
+ TemplateDecl *Template) const {
// FIXME: Canonicalization?
llvm::FoldingSetNodeID ID;
QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template);
@@ -4043,8 +4438,9 @@ TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
/// \brief Retrieve the template name that represents a dependent
/// template name such as \c MetaFun::template apply.
-TemplateName ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
- const IdentifierInfo *Name) {
+TemplateName
+ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name) const {
assert((!NNS || NNS->isDependent()) &&
"Nested name specifier must be dependent");
@@ -4078,7 +4474,7 @@ TemplateName ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
/// template name such as \c MetaFun::template operator+.
TemplateName
ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
- OverloadedOperatorKind Operator) {
+ OverloadedOperatorKind Operator) const {
assert((!NNS || NNS->isDependent()) &&
"Nested name specifier must be dependent");
@@ -4109,6 +4505,27 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
return TemplateName(QTN);
}
+TemplateName
+ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
+ const TemplateArgument &ArgPack) const {
+ ASTContext &Self = const_cast<ASTContext &>(*this);
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack);
+
+ void *InsertPos = 0;
+ SubstTemplateTemplateParmPackStorage *Subst
+ = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Subst) {
+ Subst = new (*this) SubstTemplateTemplateParmPackStorage(Self, Param,
+ ArgPack.pack_size(),
+ ArgPack.pack_begin());
+ SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos);
+ }
+
+ return TemplateName(Subst);
+}
+
/// getFromTargetType - Given one of the integer types provided by
/// TargetInfo, produce the corresponding type. The unsigned @p Type
/// is actually a value of type @c TargetInfo::IntType.
@@ -4139,7 +4556,7 @@ CanQualType ASTContext::getFromTargetType(unsigned Type) const {
/// FIXME: Move to Type.
///
bool ASTContext::isObjCNSObjectType(QualType Ty) const {
- if (TypedefType *TDT = dyn_cast<TypedefType>(Ty)) {
+ if (const TypedefType *TDT = dyn_cast<TypedefType>(Ty)) {
if (TypedefDecl *TD = TDT->getDecl())
if (TD->getAttr<ObjCNSObjectAttr>())
return true;
@@ -4150,24 +4567,30 @@ bool ASTContext::isObjCNSObjectType(QualType Ty) const {
/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
/// garbage collection attribute.
///
-Qualifiers::GC ASTContext::getObjCGCAttrKind(const QualType &Ty) const {
- Qualifiers::GC GCAttrs = Qualifiers::GCNone;
- if (getLangOptions().ObjC1 &&
- getLangOptions().getGCMode() != LangOptions::NonGC) {
- GCAttrs = Ty.getObjCGCAttr();
- // Default behavious under objective-c's gc is for objective-c pointers
- // (or pointers to them) be treated as though they were declared
- // as __strong.
- if (GCAttrs == Qualifiers::GCNone) {
- if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
- GCAttrs = Qualifiers::Strong;
- else if (Ty->isPointerType())
- return getObjCGCAttrKind(Ty->getAs<PointerType>()->getPointeeType());
- }
- // Non-pointers have none gc'able attribute regardless of the attribute
- // set on them.
- else if (!Ty->isAnyPointerType() && !Ty->isBlockPointerType())
- return Qualifiers::GCNone;
+Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
+ if (getLangOptions().getGCMode() == LangOptions::NonGC)
+ return Qualifiers::GCNone;
+
+ assert(getLangOptions().ObjC1);
+ Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
+
+ // Default behaviour under objective-C's gc is for ObjC pointers
+ // (or pointers to them) be treated as though they were declared
+ // as __strong.
+ if (GCAttrs == Qualifiers::GCNone) {
+ if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
+ return Qualifiers::Strong;
+ else if (Ty->isPointerType())
+ return getObjCGCAttrKind(Ty->getAs<PointerType>()->getPointeeType());
+ } else {
+ // It's not valid to set GC attributes on anything that isn't a
+ // pointer.
+#ifndef NDEBUG
+ QualType CT = Ty->getCanonicalTypeInternal();
+ while (const ArrayType *AT = dyn_cast<ArrayType>(CT))
+ CT = AT->getElementType();
+ assert(CT->isAnyPointerType() || CT->isBlockPointerType());
+#endif
}
return GCAttrs;
}
@@ -4193,15 +4616,16 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
if (hasSameUnqualifiedType(FirstVec, SecondVec))
return true;
- // AltiVec vectors types are identical to equivalent GCC vector types
+ // Treat Neon vector types and most AltiVec vector types as if they are the
+ // equivalent GCC vector types.
const VectorType *First = FirstVec->getAs<VectorType>();
const VectorType *Second = SecondVec->getAs<VectorType>();
- if ((((First->getAltiVecSpecific() == VectorType::AltiVec) &&
- (Second->getAltiVecSpecific() == VectorType::NotAltiVec)) ||
- ((First->getAltiVecSpecific() == VectorType::NotAltiVec) &&
- (Second->getAltiVecSpecific() == VectorType::AltiVec))) &&
+ if (First->getNumElements() == Second->getNumElements() &&
hasSameType(First->getElementType(), Second->getElementType()) &&
- (First->getNumElements() == Second->getNumElements()))
+ First->getVectorKind() != VectorType::AltiVecPixel &&
+ First->getVectorKind() != VectorType::AltiVecBool &&
+ Second->getVectorKind() != VectorType::AltiVecPixel &&
+ Second->getVectorKind() != VectorType::AltiVecBool)
return true;
return false;
@@ -4213,8 +4637,9 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
/// inheritance hierarchy of 'rProto'.
-bool ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
- ObjCProtocolDecl *rProto) {
+bool
+ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
+ ObjCProtocolDecl *rProto) const {
if (lProto == rProto)
return true;
for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(),
@@ -4336,33 +4761,17 @@ bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs,
if (const ObjCObjectPointerType *lhsOPT =
lhs->getAsObjCInterfacePointerType()) {
- if (lhsOPT->qual_empty()) {
- bool match = false;
- if (ObjCInterfaceDecl *lhsID = lhsOPT->getInterfaceDecl()) {
- for (ObjCObjectPointerType::qual_iterator I = rhsQID->qual_begin(),
- E = rhsQID->qual_end(); I != E; ++I) {
- // when comparing an id<P> on rhs with a static type on lhs,
- // static class must implement all of id's protocols directly or
- // indirectly through its super class.
- if (lhsID->ClassImplementsProtocol(*I, true)) {
- match = true;
- break;
- }
- }
- if (!match)
- return false;
- }
- return true;
- }
- // Both the right and left sides have qualifiers.
+ // If both the right and left sides have qualifiers.
for (ObjCObjectPointerType::qual_iterator I = lhsOPT->qual_begin(),
E = lhsOPT->qual_end(); I != E; ++I) {
ObjCProtocolDecl *lhsProto = *I;
bool match = false;
- // when comparing an id<P> on lhs with a static type on rhs,
+ // when comparing an id<P> on rhs with a static type on lhs,
// see if static class implements all of id's protocols, directly or
// through its super class and categories.
+ // First, lhs protocols in the qualifier list must be found, direct
+ // or indirect in rhs's qualifier list or it is a mismatch.
for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
E = rhsQID->qual_end(); J != E; ++J) {
ObjCProtocolDecl *rhsProto = *J;
@@ -4375,6 +4784,35 @@ bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs,
if (!match)
return false;
}
+
+ // Static class's protocols, or its super class or category protocols
+ // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
+ if (ObjCInterfaceDecl *lhsID = lhsOPT->getInterfaceDecl()) {
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
+ CollectInheritedProtocols(lhsID, LHSInheritedProtocols);
+ // This is rather dubious but matches gcc's behavior. If lhs has
+ // no type qualifier and its class has no static protocol(s)
+ // assume that it is mismatch.
+ if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty())
+ return false;
+ for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
+ LHSInheritedProtocols.begin(),
+ E = LHSInheritedProtocols.end(); I != E; ++I) {
+ bool match = false;
+ ObjCProtocolDecl *lhsProto = (*I);
+ for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
+ E = rhsQID->qual_end(); J != E; ++J) {
+ ObjCProtocolDecl *rhsProto = *J;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ }
return true;
}
return false;
@@ -4600,6 +5038,49 @@ bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
return !mergeTypes(LHS, RHS, true).isNull();
}
+/// mergeTransparentUnionType - if T is a transparent union type and a member
+/// of T is compatible with SubType, return the merged type, else return
+/// QualType()
+QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ if (const RecordType *UT = T->getAsUnionType()) {
+ RecordDecl *UD = UT->getDecl();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
+ for (RecordDecl::field_iterator it = UD->field_begin(),
+ itend = UD->field_end(); it != itend; ++it) {
+ QualType ET = it->getType().getUnqualifiedType();
+ QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
+ if (!MT.isNull())
+ return MT;
+ }
+ }
+ }
+
+ return QualType();
+}
+
+/// mergeFunctionArgumentTypes - merge two types which appear as function
+/// argument types
+QualType ASTContext::mergeFunctionArgumentTypes(QualType lhs, QualType rhs,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ // GNU extension: two types are compatible if they appear as a function
+ // argument, one of the types is a transparent union type and the other
+ // type is compatible with a union member
+ QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer,
+ Unqualified);
+ if (!lmerge.isNull())
+ return lmerge;
+
+ QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer,
+ Unqualified);
+ if (!rmerge.isNull())
+ return rmerge;
+
+ return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
+}
+
QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
bool OfBlockPointer,
bool Unqualified) {
@@ -4612,12 +5093,17 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
// Check return type
QualType retType;
- if (OfBlockPointer)
- retType = mergeTypes(rbase->getResultType(), lbase->getResultType(), true,
- Unqualified);
+ if (OfBlockPointer) {
+ QualType RHS = rbase->getResultType();
+ QualType LHS = lbase->getResultType();
+ bool UnqualifiedResult = Unqualified;
+ if (!UnqualifiedResult)
+ UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
+ retType = mergeTypes(RHS, LHS, true, UnqualifiedResult);
+ }
else
- retType = mergeTypes(lbase->getResultType(), rbase->getResultType(),
- false, Unqualified);
+ retType = mergeTypes(lbase->getResultType(), rbase->getResultType(), false,
+ Unqualified);
if (retType.isNull()) return QualType();
if (Unqualified)
@@ -4634,26 +5120,33 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
allLTypes = false;
if (getCanonicalType(retType) != RRetType)
allRTypes = false;
+
// FIXME: double check this
// FIXME: should we error if lbase->getRegParmAttr() != 0 &&
// rbase->getRegParmAttr() != 0 &&
// lbase->getRegParmAttr() != rbase->getRegParmAttr()?
FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
- unsigned RegParm = lbaseInfo.getRegParm() == 0 ? rbaseInfo.getRegParm() :
- lbaseInfo.getRegParm();
+
+ // Compatible functions must have compatible calling conventions
+ if (!isSameCallConv(lbaseInfo.getCC(), rbaseInfo.getCC()))
+ return QualType();
+
+ // Regparm is part of the calling convention.
+ if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
+ return QualType();
+
+ // It's noreturn if either type is.
+ // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'.
bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
- if (NoReturn != lbaseInfo.getNoReturn() ||
- RegParm != lbaseInfo.getRegParm())
+ if (NoReturn != lbaseInfo.getNoReturn())
allLTypes = false;
- if (NoReturn != rbaseInfo.getNoReturn() ||
- RegParm != rbaseInfo.getRegParm())
+ if (NoReturn != rbaseInfo.getNoReturn())
allRTypes = false;
- CallingConv lcc = lbaseInfo.getCC();
- CallingConv rcc = rbaseInfo.getCC();
- // Compatible functions must have compatible calling conventions
- if (!isSameCallConv(lcc, rcc))
- return QualType();
+
+ FunctionType::ExtInfo einfo(NoReturn,
+ lbaseInfo.getRegParm(),
+ lbaseInfo.getCC());
if (lproto && rproto) { // two C99 style function prototypes
assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
@@ -4677,8 +5170,9 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
for (unsigned i = 0; i < lproto_nargs; i++) {
QualType largtype = lproto->getArgType(i).getUnqualifiedType();
QualType rargtype = rproto->getArgType(i).getUnqualifiedType();
- QualType argtype = mergeTypes(largtype, rargtype, OfBlockPointer,
- Unqualified);
+ QualType argtype = mergeFunctionArgumentTypes(largtype, rargtype,
+ OfBlockPointer,
+ Unqualified);
if (argtype.isNull()) return QualType();
if (Unqualified)
@@ -4697,10 +5191,10 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
}
if (allLTypes) return lhs;
if (allRTypes) return rhs;
- return getFunctionType(retType, types.begin(), types.size(),
- lproto->isVariadic(), lproto->getTypeQuals(),
- false, false, 0, 0,
- FunctionType::ExtInfo(NoReturn, RegParm, lcc));
+
+ FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
+ EPI.ExtInfo = einfo;
+ return getFunctionType(retType, types.begin(), types.size(), EPI);
}
if (lproto) allRTypes = false;
@@ -4731,17 +5225,16 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (allLTypes) return lhs;
if (allRTypes) return rhs;
+
+ FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
+ EPI.ExtInfo = einfo;
return getFunctionType(retType, proto->arg_type_begin(),
- proto->getNumArgs(), proto->isVariadic(),
- proto->getTypeQuals(),
- false, false, 0, 0,
- FunctionType::ExtInfo(NoReturn, RegParm, lcc));
+ proto->getNumArgs(), EPI);
}
if (allLTypes) return lhs;
if (allRTypes) return rhs;
- FunctionType::ExtInfo Info(NoReturn, RegParm, lcc);
- return getFunctionNoProtoType(retType, Info);
+ return getFunctionNoProtoType(retType, einfo);
}
QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
@@ -5020,16 +5513,11 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
// In either case, use OldReturnType to build the new function type.
const FunctionType *F = LHS->getAs<FunctionType>();
if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) {
- FunctionType::ExtInfo Info = getFunctionExtInfo(LHS);
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExtInfo = getFunctionExtInfo(LHS);
QualType ResultType
= getFunctionType(OldReturnType, FPT->arg_type_begin(),
- FPT->getNumArgs(), FPT->isVariadic(),
- FPT->getTypeQuals(),
- FPT->hasExceptionSpec(),
- FPT->hasAnyExceptionSpec(),
- FPT->getNumExceptions(),
- FPT->exception_begin(),
- Info);
+ FPT->getNumArgs(), EPI);
return ResultType;
}
}
@@ -5080,11 +5568,11 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
// Integer Predicates
//===----------------------------------------------------------------------===//
-unsigned ASTContext::getIntWidth(QualType T) {
+unsigned ASTContext::getIntWidth(QualType T) const {
+ if (const EnumType *ET = dyn_cast<EnumType>(T))
+ T = ET->getDecl()->getIntegerType();
if (T->isBooleanType())
return 1;
- if (EnumType *ET = dyn_cast<EnumType>(T))
- T = ET->getDecl()->getIntegerType();
// For builtin types, just use the standard type sizing method
return (unsigned)getTypeSize(T);
}
@@ -5095,7 +5583,7 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) {
// Turn <4 x signed int> -> <4 x unsigned int>
if (const VectorType *VTy = T->getAs<VectorType>())
return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
- VTy->getNumElements(), VTy->getAltiVecSpecific());
+ VTy->getNumElements(), VTy->getVectorKind());
// For enums, we return the unsigned version of the base type.
if (const EnumType *ETy = T->getAs<EnumType>())
@@ -5127,25 +5615,38 @@ ExternalASTSource::~ExternalASTSource() { }
void ExternalASTSource::PrintStats() { }
+ASTMutationListener::~ASTMutationListener() { }
+
//===----------------------------------------------------------------------===//
// Builtin Type Computation
//===----------------------------------------------------------------------===//
/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
-/// pointer over the consumed characters. This returns the resultant type.
-static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context,
+/// pointer over the consumed characters. This returns the resultant type. If
+/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
+/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
+/// a vector of "i*".
+///
+/// RequiresICE is filled in on return to indicate whether the value is required
+/// to be an Integer Constant Expression.
+static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
ASTContext::GetBuiltinTypeError &Error,
- bool AllowTypeModifiers = true) {
+ bool &RequiresICE,
+ bool AllowTypeModifiers) {
// Modifiers.
int HowLong = 0;
bool Signed = false, Unsigned = false;
-
- // Read the modifiers first.
+ RequiresICE = false;
+
+ // Read the prefixed modifiers first.
bool Done = false;
while (!Done) {
switch (*Str++) {
default: Done = true; --Str; break;
+ case 'I':
+ RequiresICE = true;
+ break;
case 'S':
assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
assert(!Signed && "Can't use 'S' modifier multiple times!");
@@ -5223,6 +5724,12 @@ static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context,
case 'F':
Type = Context.getCFConstantStringType();
break;
+ case 'G':
+ Type = Context.getObjCIdType();
+ break;
+ case 'H':
+ Type = Context.getObjCSelType();
+ break;
case 'a':
Type = Context.getBuiltinVaListType();
assert(!Type.isNull() && "builtin va list type not initialized!");
@@ -5238,27 +5745,30 @@ static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context,
// it to be a __va_list_tag*.
Type = Context.getBuiltinVaListType();
assert(!Type.isNull() && "builtin va list type not initialized!");
- if (Type->isArrayType()) {
+ if (Type->isArrayType())
Type = Context.getArrayDecayedType(Type);
- } else {
+ else
Type = Context.getLValueReferenceType(Type);
- }
break;
case 'V': {
char *End;
unsigned NumElements = strtoul(Str, &End, 10);
assert(End != Str && "Missing vector size");
-
Str = End;
- QualType ElementType = DecodeTypeFromStr(Str, Context, Error, false);
- // FIXME: Don't know what to do about AltiVec.
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
+ RequiresICE, false);
+ assert(!RequiresICE && "Can't require vector ICE");
+
+ // TODO: No way to make AltiVec vectors in builtins yet.
Type = Context.getVectorType(ElementType, NumElements,
- VectorType::NotAltiVec);
+ VectorType::GenericVector);
break;
}
case 'X': {
- QualType ElementType = DecodeTypeFromStr(Str, Context, Error, false);
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
+ false);
+ assert(!RequiresICE && "Can't require complex ICE");
Type = Context.getComplexType(ElementType);
break;
}
@@ -5282,59 +5792,70 @@ static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context,
break;
}
- if (!AllowTypeModifiers)
- return Type;
-
- Done = false;
+ // If there are modifiers and if we're allowed to parse them, go for it.
+ Done = !AllowTypeModifiers;
while (!Done) {
switch (char c = *Str++) {
- default: Done = true; --Str; break;
- case '*':
- case '&':
- {
- // Both pointers and references can have their pointee types
- // qualified with an address space.
- char *End;
- unsigned AddrSpace = strtoul(Str, &End, 10);
- if (End != Str && AddrSpace != 0) {
- Type = Context.getAddrSpaceQualType(Type, AddrSpace);
- Str = End;
- }
- }
- if (c == '*')
- Type = Context.getPointerType(Type);
- else
- Type = Context.getLValueReferenceType(Type);
- break;
- // FIXME: There's no way to have a built-in with an rvalue ref arg.
- case 'C':
- Type = Type.withConst();
- break;
- case 'D':
- Type = Context.getVolatileType(Type);
- break;
+ default: Done = true; --Str; break;
+ case '*':
+ case '&': {
+ // Both pointers and references can have their pointee types
+ // qualified with an address space.
+ char *End;
+ unsigned AddrSpace = strtoul(Str, &End, 10);
+ if (End != Str && AddrSpace != 0) {
+ Type = Context.getAddrSpaceQualType(Type, AddrSpace);
+ Str = End;
+ }
+ if (c == '*')
+ Type = Context.getPointerType(Type);
+ else
+ Type = Context.getLValueReferenceType(Type);
+ break;
+ }
+ // FIXME: There's no way to have a built-in with an rvalue ref arg.
+ case 'C':
+ Type = Type.withConst();
+ break;
+ case 'D':
+ Type = Context.getVolatileType(Type);
+ break;
}
}
+
+ assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
+ "Integer constant 'I' type must be an integer");
return Type;
}
/// GetBuiltinType - Return the type for the specified builtin.
-QualType ASTContext::GetBuiltinType(unsigned id,
- GetBuiltinTypeError &Error) {
- const char *TypeStr = BuiltinInfo.GetTypeString(id);
+QualType ASTContext::GetBuiltinType(unsigned Id,
+ GetBuiltinTypeError &Error,
+ unsigned *IntegerConstantArgs) const {
+ const char *TypeStr = BuiltinInfo.GetTypeString(Id);
llvm::SmallVector<QualType, 8> ArgTypes;
+ bool RequiresICE = false;
Error = GE_None;
- QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error);
+ QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error,
+ RequiresICE, true);
if (Error != GE_None)
return QualType();
+
+ assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
+
while (TypeStr[0] && TypeStr[0] != '.') {
- QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error);
+ QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true);
if (Error != GE_None)
return QualType();
+ // If this argument is required to be an IntegerConstantExpression and the
+ // caller cares, fill in the bitmask we return.
+ if (RequiresICE && IntegerConstantArgs)
+ *IntegerConstantArgs |= 1 << ArgTypes.size();
+
// Do array -> pointer decay. The builtin should use the decayed type.
if (Ty->isArrayType())
Ty = getArrayDecayedType(Ty);
@@ -5345,164 +5866,26 @@ QualType ASTContext::GetBuiltinType(unsigned id,
assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
"'.' should only occur at end of builtin type list!");
- // handle untyped/variadic arguments "T c99Style();" or "T cppStyle(...);".
- if (ArgTypes.size() == 0 && TypeStr[0] == '.')
- return getFunctionNoProtoType(ResType);
-
- // FIXME: Should we create noreturn types?
- return getFunctionType(ResType, ArgTypes.data(), ArgTypes.size(),
- TypeStr[0] == '.', 0, false, false, 0, 0,
- FunctionType::ExtInfo());
-}
-
-QualType
-ASTContext::UsualArithmeticConversionsType(QualType lhs, QualType rhs) {
- // Perform the usual unary conversions. We do this early so that
- // integral promotions to "int" can allow us to exit early, in the
- // lhs == rhs check. Also, for conversion purposes, we ignore any
- // qualifiers. For example, "const float" and "float" are
- // equivalent.
- if (lhs->isPromotableIntegerType())
- lhs = getPromotedIntegerType(lhs);
- else
- lhs = lhs.getUnqualifiedType();
- if (rhs->isPromotableIntegerType())
- rhs = getPromotedIntegerType(rhs);
- else
- rhs = rhs.getUnqualifiedType();
+ FunctionType::ExtInfo EI;
+ if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true);
- // If both types are identical, no conversion is needed.
- if (lhs == rhs)
- return lhs;
+ bool Variadic = (TypeStr[0] == '.');
- // If either side is a non-arithmetic type (e.g. a pointer), we are done.
- // The caller can deal with this (e.g. pointer + int).
- if (!lhs->isArithmeticType() || !rhs->isArithmeticType())
- return lhs;
+ // We really shouldn't be making a no-proto type here, especially in C++.
+ if (ArgTypes.empty() && Variadic)
+ return getFunctionNoProtoType(ResType, EI);
- // At this point, we have two different arithmetic types.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = EI;
+ EPI.Variadic = Variadic;
- // Handle complex types first (C99 6.3.1.8p1).
- if (lhs->isComplexType() || rhs->isComplexType()) {
- // if we have an integer operand, the result is the complex type.
- if (rhs->isIntegerType() || rhs->isComplexIntegerType()) {
- // convert the rhs to the lhs complex type.
- return lhs;
- }
- if (lhs->isIntegerType() || lhs->isComplexIntegerType()) {
- // convert the lhs to the rhs complex type.
- return rhs;
- }
- // This handles complex/complex, complex/float, or float/complex.
- // When both operands are complex, the shorter operand is converted to the
- // type of the longer, and that is the type of the result. This corresponds
- // to what is done when combining two real floating-point operands.
- // The fun begins when size promotion occur across type domains.
- // From H&S 6.3.4: When one operand is complex and the other is a real
- // floating-point type, the less precise type is converted, within it's
- // real or complex domain, to the precision of the other type. For example,
- // when combining a "long double" with a "double _Complex", the
- // "double _Complex" is promoted to "long double _Complex".
- int result = getFloatingTypeOrder(lhs, rhs);
-
- if (result > 0) { // The left side is bigger, convert rhs.
- rhs = getFloatingTypeOfSizeWithinDomain(lhs, rhs);
- } else if (result < 0) { // The right side is bigger, convert lhs.
- lhs = getFloatingTypeOfSizeWithinDomain(rhs, lhs);
- }
- // At this point, lhs and rhs have the same rank/size. Now, make sure the
- // domains match. This is a requirement for our implementation, C99
- // does not require this promotion.
- if (lhs != rhs) { // Domains don't match, we have complex/float mix.
- if (lhs->isRealFloatingType()) { // handle "double, _Complex double".
- return rhs;
- } else { // handle "_Complex double, double".
- return lhs;
- }
- }
- return lhs; // The domain/size match exactly.
- }
- // Now handle "real" floating types (i.e. float, double, long double).
- if (lhs->isRealFloatingType() || rhs->isRealFloatingType()) {
- // if we have an integer operand, the result is the real floating type.
- if (rhs->isIntegerType()) {
- // convert rhs to the lhs floating point type.
- return lhs;
- }
- if (rhs->isComplexIntegerType()) {
- // convert rhs to the complex floating point type.
- return getComplexType(lhs);
- }
- if (lhs->isIntegerType()) {
- // convert lhs to the rhs floating point type.
- return rhs;
- }
- if (lhs->isComplexIntegerType()) {
- // convert lhs to the complex floating point type.
- return getComplexType(rhs);
- }
- // We have two real floating types, float/complex combos were handled above.
- // Convert the smaller operand to the bigger result.
- int result = getFloatingTypeOrder(lhs, rhs);
- if (result > 0) // convert the rhs
- return lhs;
- assert(result < 0 && "illegal float comparison");
- return rhs; // convert the lhs
- }
- if (lhs->isComplexIntegerType() || rhs->isComplexIntegerType()) {
- // Handle GCC complex int extension.
- const ComplexType *lhsComplexInt = lhs->getAsComplexIntegerType();
- const ComplexType *rhsComplexInt = rhs->getAsComplexIntegerType();
-
- if (lhsComplexInt && rhsComplexInt) {
- if (getIntegerTypeOrder(lhsComplexInt->getElementType(),
- rhsComplexInt->getElementType()) >= 0)
- return lhs; // convert the rhs
- return rhs;
- } else if (lhsComplexInt && rhs->isIntegerType()) {
- // convert the rhs to the lhs complex type.
- return lhs;
- } else if (rhsComplexInt && lhs->isIntegerType()) {
- // convert the lhs to the rhs complex type.
- return rhs;
- }
- }
- // Finally, we have two differing integer types.
- // The rules for this case are in C99 6.3.1.8
- int compare = getIntegerTypeOrder(lhs, rhs);
- bool lhsSigned = lhs->hasSignedIntegerRepresentation(),
- rhsSigned = rhs->hasSignedIntegerRepresentation();
- QualType destType;
- if (lhsSigned == rhsSigned) {
- // Same signedness; use the higher-ranked type
- destType = compare >= 0 ? lhs : rhs;
- } else if (compare != (lhsSigned ? 1 : -1)) {
- // The unsigned type has greater than or equal rank to the
- // signed type, so use the unsigned type
- destType = lhsSigned ? rhs : lhs;
- } else if (getIntWidth(lhs) != getIntWidth(rhs)) {
- // The two types are different widths; if we are here, that
- // means the signed type is larger than the unsigned type, so
- // use the signed type.
- destType = lhsSigned ? lhs : rhs;
- } else {
- // The signed type is higher-ranked than the unsigned type,
- // but isn't actually any bigger (like unsigned int and long
- // on most 32-bit systems). Use the unsigned type corresponding
- // to the signed type.
- destType = getCorrespondingUnsignedType(lhsSigned ? lhs : rhs);
- }
- return destType;
+ return getFunctionType(ResType, ArgTypes.data(), ArgTypes.size(), EPI);
}
GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) {
GVALinkage External = GVA_StrongExternal;
Linkage L = FD->getLinkage();
- if (L == ExternalLinkage && getLangOptions().CPlusPlus &&
- FD->getType()->getLinkage() == UniqueExternalLinkage)
- L = UniqueExternalLinkage;
-
switch (L) {
case NoLinkage:
case InternalLinkage:
@@ -5663,4 +6046,26 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return true;
}
+CallingConv ASTContext::getDefaultMethodCallConv() {
+ // Pass through to the C++ ABI object
+ return ABI->getDefaultMethodCallConv();
+}
+
+bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
+ // Pass through to the C++ ABI object
+ return ABI->isNearlyEmpty(RD);
+}
+
+MangleContext *ASTContext::createMangleContext() {
+ switch (Target.getCXXABI()) {
+ case CXXABI_ARM:
+ case CXXABI_Itanium:
+ return createItaniumMangleContext(*this, getDiagnostics());
+ case CXXABI_Microsoft:
+ return createMicrosoftMangleContext(*this, getDiagnostics());
+ }
+ assert(0 && "Unsupported ABI");
+ return 0;
+}
+
CXXABI::~CXXABI() {}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
index 23f323d..5bf8a38 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
@@ -28,14 +28,26 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
const Type *Ty = QC.strip(QT);
// Don't aka just because we saw an elaborated type...
- if (isa<ElaboratedType>(Ty)) {
- QT = cast<ElaboratedType>(Ty)->desugar();
+ if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Ty)) {
+ QT = ET->desugar();
continue;
}
-
- // ...or a substituted template type parameter.
- if (isa<SubstTemplateTypeParmType>(Ty)) {
- QT = cast<SubstTemplateTypeParmType>(Ty)->desugar();
+ // ... or a paren type ...
+ if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ QT = PT->desugar();
+ continue;
+ }
+ // ...or a substituted template type parameter ...
+ if (const SubstTemplateTypeParmType *ST =
+ dyn_cast<SubstTemplateTypeParmType>(Ty)) {
+ QT = ST->desugar();
+ continue;
+ }
+ // ... or an auto type.
+ if (const AutoType *AT = dyn_cast<AutoType>(Ty)) {
+ if (!AT->isSugared())
+ break;
+ QT = AT->desugar();
continue;
}
@@ -81,10 +93,10 @@ break; \
break;
// Don't desugar through the primary typedef of an anonymous type.
- if (isa<TagType>(Underlying) && isa<TypedefType>(QT))
- if (cast<TagType>(Underlying)->getDecl()->getTypedefForAnonDecl() ==
- cast<TypedefType>(QT)->getDecl())
- break;
+ if (const TagType *UTT = Underlying->getAs<TagType>())
+ if (const TypedefType *QTT = dyn_cast<TypedefType>(QT))
+ if (UTT->getDecl()->getTypedefForAnonDecl() == QTT->getDecl())
+ break;
// Record that we actually looked through an opaque type here.
ShouldAKA = true;
@@ -94,14 +106,17 @@ break; \
// If we have a pointer-like type, desugar the pointee as well.
// FIXME: Handle other pointer-like types.
if (const PointerType *Ty = QT->getAs<PointerType>()) {
- QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
} else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) {
- QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const RValueReferenceType *Ty = QT->getAs<RValueReferenceType>()) {
+ QT = Context.getRValueReferenceType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
}
- return QC.apply(QT);
+ return QC.apply(Context, QT);
}
/// \brief Convert the given type to a string suitable for printing as part of
@@ -151,13 +166,10 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
bool ShouldAKA = false;
QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA);
if (ShouldAKA) {
- std::string D = DesugaredTy.getAsString(Context.PrintingPolicy);
- if (D != S) {
- S = "'" + S + "' (aka '";
- S += D;
- S += "')";
- return S;
- }
+ S = "'" + S + "' (aka '";
+ S += DesugaredTy.getAsString(Context.PrintingPolicy);
+ S += "')";
+ return S;
}
}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
index 2edd09c..65c0a3b 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
@@ -41,41 +41,42 @@ namespace {
using StmtVisitor<ASTNodeImporter, Stmt *>::Visit;
// Importing types
- QualType VisitType(Type *T);
- QualType VisitBuiltinType(BuiltinType *T);
- QualType VisitComplexType(ComplexType *T);
- QualType VisitPointerType(PointerType *T);
- QualType VisitBlockPointerType(BlockPointerType *T);
- QualType VisitLValueReferenceType(LValueReferenceType *T);
- QualType VisitRValueReferenceType(RValueReferenceType *T);
- QualType VisitMemberPointerType(MemberPointerType *T);
- QualType VisitConstantArrayType(ConstantArrayType *T);
- QualType VisitIncompleteArrayType(IncompleteArrayType *T);
- QualType VisitVariableArrayType(VariableArrayType *T);
+ QualType VisitType(const Type *T);
+ QualType VisitBuiltinType(const BuiltinType *T);
+ QualType VisitComplexType(const ComplexType *T);
+ QualType VisitPointerType(const PointerType *T);
+ QualType VisitBlockPointerType(const BlockPointerType *T);
+ QualType VisitLValueReferenceType(const LValueReferenceType *T);
+ QualType VisitRValueReferenceType(const RValueReferenceType *T);
+ QualType VisitMemberPointerType(const MemberPointerType *T);
+ QualType VisitConstantArrayType(const ConstantArrayType *T);
+ QualType VisitIncompleteArrayType(const IncompleteArrayType *T);
+ QualType VisitVariableArrayType(const VariableArrayType *T);
// FIXME: DependentSizedArrayType
// FIXME: DependentSizedExtVectorType
- QualType VisitVectorType(VectorType *T);
- QualType VisitExtVectorType(ExtVectorType *T);
- QualType VisitFunctionNoProtoType(FunctionNoProtoType *T);
- QualType VisitFunctionProtoType(FunctionProtoType *T);
+ QualType VisitVectorType(const VectorType *T);
+ QualType VisitExtVectorType(const ExtVectorType *T);
+ QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
+ QualType VisitFunctionProtoType(const FunctionProtoType *T);
// FIXME: UnresolvedUsingType
- QualType VisitTypedefType(TypedefType *T);
- QualType VisitTypeOfExprType(TypeOfExprType *T);
+ QualType VisitTypedefType(const TypedefType *T);
+ QualType VisitTypeOfExprType(const TypeOfExprType *T);
// FIXME: DependentTypeOfExprType
- QualType VisitTypeOfType(TypeOfType *T);
- QualType VisitDecltypeType(DecltypeType *T);
+ QualType VisitTypeOfType(const TypeOfType *T);
+ QualType VisitDecltypeType(const DecltypeType *T);
+ QualType VisitAutoType(const AutoType *T);
// FIXME: DependentDecltypeType
- QualType VisitRecordType(RecordType *T);
- QualType VisitEnumType(EnumType *T);
+ QualType VisitRecordType(const RecordType *T);
+ QualType VisitEnumType(const EnumType *T);
// FIXME: TemplateTypeParmType
// FIXME: SubstTemplateTypeParmType
- // FIXME: TemplateSpecializationType
- QualType VisitElaboratedType(ElaboratedType *T);
+ QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T);
+ QualType VisitElaboratedType(const ElaboratedType *T);
// FIXME: DependentNameType
// FIXME: DependentTemplateSpecializationType
- QualType VisitObjCInterfaceType(ObjCInterfaceType *T);
- QualType VisitObjCObjectType(ObjCObjectType *T);
- QualType VisitObjCObjectPointerType(ObjCObjectPointerType *T);
+ QualType VisitObjCInterfaceType(const ObjCInterfaceType *T);
+ QualType VisitObjCObjectType(const ObjCObjectType *T);
+ QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
// Importing declarations
bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
@@ -83,9 +84,17 @@ namespace {
SourceLocation &Loc);
void ImportDeclarationNameLoc(const DeclarationNameInfo &From,
DeclarationNameInfo& To);
- void ImportDeclContext(DeclContext *FromDC);
+ void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
+ bool ImportDefinition(RecordDecl *From, RecordDecl *To);
+ TemplateParameterList *ImportTemplateParameterList(
+ TemplateParameterList *Params);
+ TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
+ bool ImportTemplateArguments(const TemplateArgument *FromArgs,
+ unsigned NumFromArgs,
+ llvm::SmallVectorImpl<TemplateArgument> &ToArgs);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord);
bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
+ bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
Decl *VisitDecl(Decl *D);
Decl *VisitNamespaceDecl(NamespaceDecl *D);
Decl *VisitTypedefDecl(TypedefDecl *D);
@@ -98,6 +107,7 @@ namespace {
Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
Decl *VisitFieldDecl(FieldDecl *D);
+ Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
Decl *VisitObjCIvarDecl(ObjCIvarDecl *D);
Decl *VisitVarDecl(VarDecl *D);
Decl *VisitImplicitParamDecl(ImplicitParamDecl *D);
@@ -106,9 +116,18 @@ namespace {
Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D);
Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D);
Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D);
Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ Decl *VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
Decl *VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D);
Decl *VisitObjCClassDecl(ObjCClassDecl *D);
+ Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
+ Decl *VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D);
// Importing statements
Stmt *VisitStmt(Stmt *S);
@@ -137,9 +156,6 @@ namespace {
/// \brief AST contexts for which we are checking structural equivalence.
ASTContext &C1, &C2;
- /// \brief Diagnostic object used to emit diagnostics.
- Diagnostic &Diags;
-
/// \brief The set of "tentative" equivalences between two canonical
/// declarations, mapping from a declaration in the first context to the
/// declaration in the second context that we believe to be equivalent.
@@ -158,10 +174,9 @@ namespace {
bool StrictTypeSpelling;
StructuralEquivalenceContext(ASTContext &C1, ASTContext &C2,
- Diagnostic &Diags,
llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls,
bool StrictTypeSpelling = false)
- : C1(C1), C2(C2), Diags(Diags), NonEquivalentDecls(NonEquivalentDecls),
+ : C1(C1), C2(C2), NonEquivalentDecls(NonEquivalentDecls),
StrictTypeSpelling(StrictTypeSpelling) { }
/// \brief Determine whether the two declarations are structurally
@@ -179,11 +194,11 @@ namespace {
public:
DiagnosticBuilder Diag1(SourceLocation Loc, unsigned DiagID) {
- return Diags.Report(FullSourceLoc(Loc, C1.getSourceManager()), DiagID);
+ return C1.getDiagnostics().Report(Loc, DiagID);
}
DiagnosticBuilder Diag2(SourceLocation Loc, unsigned DiagID) {
- return Diags.Report(FullSourceLoc(Loc, C2.getSourceManager()), DiagID);
+ return C2.getDiagnostics().Report(Loc, DiagID);
}
};
}
@@ -200,9 +215,9 @@ static bool IsSameValue(const llvm::APInt &I1, const llvm::APInt &I2) {
return I1 == I2;
if (I1.getBitWidth() > I2.getBitWidth())
- return I1 == llvm::APInt(I2).zext(I1.getBitWidth());
+ return I1 == I2.zext(I1.getBitWidth());
- return llvm::APInt(I1).zext(I2.getBitWidth()) == I2;
+ return I1.zext(I2.getBitWidth()) == I2;
}
/// \brief Determine if two APSInts have the same value, zero- or sign-extending
@@ -213,9 +228,9 @@ static bool IsSameValue(const llvm::APSInt &I1, const llvm::APSInt &I2) {
// Check for a bit-width mismatch.
if (I1.getBitWidth() > I2.getBitWidth())
- return IsSameValue(I1, llvm::APSInt(I2).extend(I1.getBitWidth()));
+ return IsSameValue(I1, I2.extend(I1.getBitWidth()));
else if (I2.getBitWidth() > I1.getBitWidth())
- return IsSameValue(llvm::APSInt(I1).extend(I2.getBitWidth()), I2);
+ return IsSameValue(I1.extend(I2.getBitWidth()), I2);
// We have a signedness mismatch. Turn the signed value into an unsigned
// value.
@@ -263,7 +278,54 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const TemplateArgument &Arg1,
const TemplateArgument &Arg2) {
- // FIXME: Implement!
+ if (Arg1.getKind() != Arg2.getKind())
+ return false;
+
+ switch (Arg1.getKind()) {
+ case TemplateArgument::Null:
+ return true;
+
+ case TemplateArgument::Type:
+ return Context.IsStructurallyEquivalent(Arg1.getAsType(), Arg2.getAsType());
+
+ case TemplateArgument::Integral:
+ if (!Context.IsStructurallyEquivalent(Arg1.getIntegralType(),
+ Arg2.getIntegralType()))
+ return false;
+
+ return IsSameValue(*Arg1.getAsIntegral(), *Arg2.getAsIntegral());
+
+ case TemplateArgument::Declaration:
+ return Context.IsStructurallyEquivalent(Arg1.getAsDecl(), Arg2.getAsDecl());
+
+ case TemplateArgument::Template:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsTemplate(),
+ Arg2.getAsTemplate());
+
+ case TemplateArgument::TemplateExpansion:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsTemplateOrTemplatePattern(),
+ Arg2.getAsTemplateOrTemplatePattern());
+
+ case TemplateArgument::Expression:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsExpr(), Arg2.getAsExpr());
+
+ case TemplateArgument::Pack:
+ if (Arg1.pack_size() != Arg2.pack_size())
+ return false;
+
+ for (unsigned I = 0, N = Arg1.pack_size(); I != N; ++I)
+ if (!IsStructurallyEquivalent(Context,
+ Arg1.pack_begin()[I],
+ Arg2.pack_begin()[I]))
+ return false;
+
+ return true;
+ }
+
+ llvm_unreachable("Invalid template argument kind");
return true;
}
@@ -441,7 +503,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
if (Vec1->getNumElements() != Vec2->getNumElements())
return false;
- if (Vec1->getAltiVecSpecific() != Vec2->getAltiVecSpecific())
+ if (Vec1->getVectorKind() != Vec2->getVectorKind())
return false;
break;
}
@@ -496,7 +558,25 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+
+ case Type::Attributed:
+ if (!IsStructurallyEquivalent(Context,
+ cast<AttributedType>(T1)->getModifiedType(),
+ cast<AttributedType>(T2)->getModifiedType()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ cast<AttributedType>(T1)->getEquivalentType(),
+ cast<AttributedType>(T2)->getEquivalentType()))
+ return false;
+ break;
+ case Type::Paren:
+ if (!IsStructurallyEquivalent(Context,
+ cast<ParenType>(T1)->getInnerType(),
+ cast<ParenType>(T2)->getInnerType()))
+ return false;
+ break;
+
case Type::Typedef:
if (!IsStructurallyEquivalent(Context,
cast<TypedefType>(T1)->getDecl(),
@@ -525,6 +605,13 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+ case Type::Auto:
+ if (!IsStructurallyEquivalent(Context,
+ cast<AutoType>(T1)->getDeducedType(),
+ cast<AutoType>(T2)->getDeducedType()))
+ return false;
+ break;
+
case Type::Record:
case Type::Enum:
if (!IsStructurallyEquivalent(Context,
@@ -563,6 +650,21 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
}
+ case Type::SubstTemplateTypeParmPack: {
+ const SubstTemplateTypeParmPackType *Subst1
+ = cast<SubstTemplateTypeParmPackType>(T1);
+ const SubstTemplateTypeParmPackType *Subst2
+ = cast<SubstTemplateTypeParmPackType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ QualType(Subst1->getReplacedParameter(), 0),
+ QualType(Subst2->getReplacedParameter(), 0)))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Subst1->getArgumentPack(),
+ Subst2->getArgumentPack()))
+ return false;
+ break;
+ }
case Type::TemplateSpecialization: {
const TemplateSpecializationType *Spec1
= cast<TemplateSpecializationType>(T1);
@@ -644,7 +746,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
break;
}
-
+
+ case Type::PackExpansion:
+ if (!IsStructurallyEquivalent(Context,
+ cast<PackExpansionType>(T1)->getPattern(),
+ cast<PackExpansionType>(T2)->getPattern()))
+ return false;
+ break;
+
case Type::ObjCInterface: {
const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1);
const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2);
@@ -698,6 +807,33 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
}
+ // If both declarations are class template specializations, we know
+ // the ODR applies, so check the template and template arguments.
+ ClassTemplateSpecializationDecl *Spec1
+ = dyn_cast<ClassTemplateSpecializationDecl>(D1);
+ ClassTemplateSpecializationDecl *Spec2
+ = dyn_cast<ClassTemplateSpecializationDecl>(D2);
+ if (Spec1 && Spec2) {
+ // Check that the specialized templates are the same.
+ if (!IsStructurallyEquivalent(Context, Spec1->getSpecializedTemplate(),
+ Spec2->getSpecializedTemplate()))
+ return false;
+
+ // Check that the template arguments are the same.
+ if (Spec1->getTemplateArgs().size() != Spec2->getTemplateArgs().size())
+ return false;
+
+ for (unsigned I = 0, N = Spec1->getTemplateArgs().size(); I != N; ++I)
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getTemplateArgs().get(I),
+ Spec2->getTemplateArgs().get(I)))
+ return false;
+ }
+ // If one is a class template specialization and the other is not, these
+ // structures are diferent.
+ else if (Spec1 || Spec2)
+ return false;
+
// Compare the definitions of these two records. If either or both are
// incomplete, we assume that they are equivalent.
D1 = D1->getDefinition();
@@ -709,11 +845,11 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
if (D1CXX->getNumBases() != D2CXX->getNumBases()) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
- << Context.C2.getTypeDeclType(D2);
+ << Context.C2.getTypeDeclType(D2);
Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
- << D2CXX->getNumBases();
+ << D2CXX->getNumBases();
Context.Diag1(D1->getLocation(), diag::note_odr_number_of_bases)
- << D1CXX->getNumBases();
+ << D1CXX->getNumBases();
return false;
}
@@ -892,7 +1028,112 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateParameterList *Params1,
+ TemplateParameterList *Params2) {
+ if (Params1->size() != Params2->size()) {
+ Context.Diag2(Params2->getTemplateLoc(),
+ diag::err_odr_different_num_template_parameters)
+ << Params1->size() << Params2->size();
+ Context.Diag1(Params1->getTemplateLoc(),
+ diag::note_odr_template_parameter_list);
+ return false;
+ }
+ for (unsigned I = 0, N = Params1->size(); I != N; ++I) {
+ if (Params1->getParam(I)->getKind() != Params2->getParam(I)->getKind()) {
+ Context.Diag2(Params2->getParam(I)->getLocation(),
+ diag::err_odr_different_template_parameter_kind);
+ Context.Diag1(Params1->getParam(I)->getLocation(),
+ diag::note_odr_template_parameter_here);
+ return false;
+ }
+
+ if (!Context.IsStructurallyEquivalent(Params1->getParam(I),
+ Params2->getParam(I))) {
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateTypeParmDecl *D1,
+ TemplateTypeParmDecl *D2) {
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ NonTypeTemplateParmDecl *D1,
+ NonTypeTemplateParmDecl *D2) {
+ // FIXME: Enable once we have variadic templates.
+#if 0
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ return false;
+ }
+#endif
+
+ // Check types.
+ if (!Context.IsStructurallyEquivalent(D1->getType(), D2->getType())) {
+ Context.Diag2(D2->getLocation(),
+ diag::err_odr_non_type_parameter_type_inconsistent)
+ << D2->getType() << D1->getType();
+ Context.Diag1(D1->getLocation(), diag::note_odr_value_here)
+ << D1->getType();
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateTemplateParmDecl *D1,
+ TemplateTemplateParmDecl *D2) {
+ // FIXME: Enable once we have variadic templates.
+#if 0
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ return false;
+ }
+#endif
+
+ // Check template parameter lists.
+ return IsStructurallyEquivalent(Context, D1->getTemplateParameters(),
+ D2->getTemplateParameters());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ClassTemplateDecl *D1,
+ ClassTemplateDecl *D2) {
+ // Check template parameters.
+ if (!IsStructurallyEquivalent(Context,
+ D1->getTemplateParameters(),
+ D2->getTemplateParameters()))
+ return false;
+
+ // Check the templated declaration.
+ return Context.IsStructurallyEquivalent(D1->getTemplatedDecl(),
+ D2->getTemplatedDecl());
+}
+
/// \brief Determine structural equivalence of two declarations.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Decl *D1, Decl *D2) {
@@ -988,8 +1229,47 @@ bool StructuralEquivalenceContext::Finish() {
// Typedef/non-typedef mismatch.
Equivalent = false;
}
- }
-
+ } else if (ClassTemplateDecl *ClassTemplate1
+ = dyn_cast<ClassTemplateDecl>(D1)) {
+ if (ClassTemplateDecl *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(ClassTemplate1->getIdentifier(),
+ ClassTemplate2->getIdentifier()) ||
+ !::IsStructurallyEquivalent(*this, ClassTemplate1, ClassTemplate2))
+ Equivalent = false;
+ } else {
+ // Class template/non-class-template mismatch.
+ Equivalent = false;
+ }
+ } else if (TemplateTypeParmDecl *TTP1= dyn_cast<TemplateTypeParmDecl>(D1)) {
+ if (TemplateTypeParmDecl *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (NonTypeTemplateParmDecl *NTTP1
+ = dyn_cast<NonTypeTemplateParmDecl>(D1)) {
+ if (NonTypeTemplateParmDecl *NTTP2
+ = dyn_cast<NonTypeTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (TemplateTemplateParmDecl *TTP1
+ = dyn_cast<TemplateTemplateParmDecl>(D1)) {
+ if (TemplateTemplateParmDecl *TTP2
+ = dyn_cast<TemplateTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ }
+
if (!Equivalent) {
// Note that these two declarations are not equivalent (and we already
// know about it).
@@ -1007,13 +1287,13 @@ bool StructuralEquivalenceContext::Finish() {
// Import Types
//----------------------------------------------------------------------------
-QualType ASTNodeImporter::VisitType(Type *T) {
+QualType ASTNodeImporter::VisitType(const Type *T) {
Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
<< T->getTypeClassName();
return QualType();
}
-QualType ASTNodeImporter::VisitBuiltinType(BuiltinType *T) {
+QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
switch (T->getKind()) {
case BuiltinType::Void: return Importer.getToContext().VoidTy;
case BuiltinType::Bool: return Importer.getToContext().BoolTy;
@@ -1054,7 +1334,8 @@ QualType ASTNodeImporter::VisitBuiltinType(BuiltinType *T) {
return Importer.getToContext().CharTy;
case BuiltinType::SChar: return Importer.getToContext().SignedCharTy;
- case BuiltinType::WChar:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
// FIXME: If not in C++, shall we translate to the C equivalent of
// wchar_t?
return Importer.getToContext().WCharTy;
@@ -1074,9 +1355,6 @@ QualType ASTNodeImporter::VisitBuiltinType(BuiltinType *T) {
case BuiltinType::Overload: return Importer.getToContext().OverloadTy;
case BuiltinType::Dependent: return Importer.getToContext().DependentTy;
- case BuiltinType::UndeducedAuto:
- // FIXME: Make sure that the "to" context supports C++0x!
- return Importer.getToContext().UndeducedAutoTy;
case BuiltinType::ObjCId:
// FIXME: Make sure that the "to" context supports Objective-C!
@@ -1092,7 +1370,7 @@ QualType ASTNodeImporter::VisitBuiltinType(BuiltinType *T) {
return QualType();
}
-QualType ASTNodeImporter::VisitComplexType(ComplexType *T) {
+QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
@@ -1100,7 +1378,7 @@ QualType ASTNodeImporter::VisitComplexType(ComplexType *T) {
return Importer.getToContext().getComplexType(ToElementType);
}
-QualType ASTNodeImporter::VisitPointerType(PointerType *T) {
+QualType ASTNodeImporter::VisitPointerType(const PointerType *T) {
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
return QualType();
@@ -1108,7 +1386,7 @@ QualType ASTNodeImporter::VisitPointerType(PointerType *T) {
return Importer.getToContext().getPointerType(ToPointeeType);
}
-QualType ASTNodeImporter::VisitBlockPointerType(BlockPointerType *T) {
+QualType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
// FIXME: Check for blocks support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
@@ -1117,7 +1395,8 @@ QualType ASTNodeImporter::VisitBlockPointerType(BlockPointerType *T) {
return Importer.getToContext().getBlockPointerType(ToPointeeType);
}
-QualType ASTNodeImporter::VisitLValueReferenceType(LValueReferenceType *T) {
+QualType
+ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) {
// FIXME: Check for C++ support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
if (ToPointeeType.isNull())
@@ -1126,7 +1405,8 @@ QualType ASTNodeImporter::VisitLValueReferenceType(LValueReferenceType *T) {
return Importer.getToContext().getLValueReferenceType(ToPointeeType);
}
-QualType ASTNodeImporter::VisitRValueReferenceType(RValueReferenceType *T) {
+QualType
+ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) {
// FIXME: Check for C++0x support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
if (ToPointeeType.isNull())
@@ -1135,7 +1415,7 @@ QualType ASTNodeImporter::VisitRValueReferenceType(RValueReferenceType *T) {
return Importer.getToContext().getRValueReferenceType(ToPointeeType);
}
-QualType ASTNodeImporter::VisitMemberPointerType(MemberPointerType *T) {
+QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
// FIXME: Check for C++ support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
@@ -1146,7 +1426,7 @@ QualType ASTNodeImporter::VisitMemberPointerType(MemberPointerType *T) {
ClassType.getTypePtr());
}
-QualType ASTNodeImporter::VisitConstantArrayType(ConstantArrayType *T) {
+QualType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
@@ -1157,7 +1437,8 @@ QualType ASTNodeImporter::VisitConstantArrayType(ConstantArrayType *T) {
T->getIndexTypeCVRQualifiers());
}
-QualType ASTNodeImporter::VisitIncompleteArrayType(IncompleteArrayType *T) {
+QualType
+ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
@@ -1167,7 +1448,7 @@ QualType ASTNodeImporter::VisitIncompleteArrayType(IncompleteArrayType *T) {
T->getIndexTypeCVRQualifiers());
}
-QualType ASTNodeImporter::VisitVariableArrayType(VariableArrayType *T) {
+QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
@@ -1183,17 +1464,17 @@ QualType ASTNodeImporter::VisitVariableArrayType(VariableArrayType *T) {
Brackets);
}
-QualType ASTNodeImporter::VisitVectorType(VectorType *T) {
+QualType ASTNodeImporter::VisitVectorType(const VectorType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
return Importer.getToContext().getVectorType(ToElementType,
T->getNumElements(),
- T->getAltiVecSpecific());
+ T->getVectorKind());
}
-QualType ASTNodeImporter::VisitExtVectorType(ExtVectorType *T) {
+QualType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
@@ -1202,7 +1483,8 @@ QualType ASTNodeImporter::VisitExtVectorType(ExtVectorType *T) {
T->getNumElements());
}
-QualType ASTNodeImporter::VisitFunctionNoProtoType(FunctionNoProtoType *T) {
+QualType
+ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
// FIXME: What happens if we're importing a function without a prototype
// into C++? Should we make it variadic?
QualType ToResultType = Importer.Import(T->getResultType());
@@ -1213,7 +1495,7 @@ QualType ASTNodeImporter::VisitFunctionNoProtoType(FunctionNoProtoType *T) {
T->getExtInfo());
}
-QualType ASTNodeImporter::VisitFunctionProtoType(FunctionProtoType *T) {
+QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
QualType ToResultType = Importer.Import(T->getResultType());
if (ToResultType.isNull())
return QualType();
@@ -1239,19 +1521,15 @@ QualType ASTNodeImporter::VisitFunctionProtoType(FunctionProtoType *T) {
return QualType();
ExceptionTypes.push_back(ExceptionType);
}
+
+ FunctionProtoType::ExtProtoInfo EPI = T->getExtProtoInfo();
+ EPI.Exceptions = ExceptionTypes.data();
return Importer.getToContext().getFunctionType(ToResultType, ArgTypes.data(),
- ArgTypes.size(),
- T->isVariadic(),
- T->getTypeQuals(),
- T->hasExceptionSpec(),
- T->hasAnyExceptionSpec(),
- ExceptionTypes.size(),
- ExceptionTypes.data(),
- T->getExtInfo());
+ ArgTypes.size(), EPI);
}
-QualType ASTNodeImporter::VisitTypedefType(TypedefType *T) {
+QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
TypedefDecl *ToDecl
= dyn_cast_or_null<TypedefDecl>(Importer.Import(T->getDecl()));
if (!ToDecl)
@@ -1260,7 +1538,7 @@ QualType ASTNodeImporter::VisitTypedefType(TypedefType *T) {
return Importer.getToContext().getTypeDeclType(ToDecl);
}
-QualType ASTNodeImporter::VisitTypeOfExprType(TypeOfExprType *T) {
+QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
if (!ToExpr)
return QualType();
@@ -1268,7 +1546,7 @@ QualType ASTNodeImporter::VisitTypeOfExprType(TypeOfExprType *T) {
return Importer.getToContext().getTypeOfExprType(ToExpr);
}
-QualType ASTNodeImporter::VisitTypeOfType(TypeOfType *T) {
+QualType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
if (ToUnderlyingType.isNull())
return QualType();
@@ -1276,7 +1554,8 @@ QualType ASTNodeImporter::VisitTypeOfType(TypeOfType *T) {
return Importer.getToContext().getTypeOfType(ToUnderlyingType);
}
-QualType ASTNodeImporter::VisitDecltypeType(DecltypeType *T) {
+QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
+ // FIXME: Make sure that the "to" context supports C++0x!
Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
if (!ToExpr)
return QualType();
@@ -1284,7 +1563,20 @@ QualType ASTNodeImporter::VisitDecltypeType(DecltypeType *T) {
return Importer.getToContext().getDecltypeType(ToExpr);
}
-QualType ASTNodeImporter::VisitRecordType(RecordType *T) {
+QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
+ // FIXME: Make sure that the "to" context supports C++0x!
+ QualType FromDeduced = T->getDeducedType();
+ QualType ToDeduced;
+ if (!FromDeduced.isNull()) {
+ ToDeduced = Importer.Import(FromDeduced);
+ if (ToDeduced.isNull())
+ return QualType();
+ }
+
+ return Importer.getToContext().getAutoType(ToDeduced);
+}
+
+QualType ASTNodeImporter::VisitRecordType(const RecordType *T) {
RecordDecl *ToDecl
= dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl()));
if (!ToDecl)
@@ -1293,7 +1585,7 @@ QualType ASTNodeImporter::VisitRecordType(RecordType *T) {
return Importer.getToContext().getTagDeclType(ToDecl);
}
-QualType ASTNodeImporter::VisitEnumType(EnumType *T) {
+QualType ASTNodeImporter::VisitEnumType(const EnumType *T) {
EnumDecl *ToDecl
= dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl()));
if (!ToDecl)
@@ -1302,7 +1594,31 @@ QualType ASTNodeImporter::VisitEnumType(EnumType *T) {
return Importer.getToContext().getTagDeclType(ToDecl);
}
-QualType ASTNodeImporter::VisitElaboratedType(ElaboratedType *T) {
+QualType ASTNodeImporter::VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T) {
+ TemplateName ToTemplate = Importer.Import(T->getTemplateName());
+ if (ToTemplate.isNull())
+ return QualType();
+
+ llvm::SmallVector<TemplateArgument, 2> ToTemplateArgs;
+ if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToTemplateArgs))
+ return QualType();
+
+ QualType ToCanonType;
+ if (!QualType(T, 0).isCanonical()) {
+ QualType FromCanonType
+ = Importer.getFromContext().getCanonicalType(QualType(T, 0));
+ ToCanonType =Importer.Import(FromCanonType);
+ if (ToCanonType.isNull())
+ return QualType();
+ }
+ return Importer.getToContext().getTemplateSpecializationType(ToTemplate,
+ ToTemplateArgs.data(),
+ ToTemplateArgs.size(),
+ ToCanonType);
+}
+
+QualType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
NestedNameSpecifier *ToQualifier = 0;
// Note: the qualifier in an ElaboratedType is optional.
if (T->getQualifier()) {
@@ -1319,7 +1635,7 @@ QualType ASTNodeImporter::VisitElaboratedType(ElaboratedType *T) {
ToQualifier, ToNamedType);
}
-QualType ASTNodeImporter::VisitObjCInterfaceType(ObjCInterfaceType *T) {
+QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
ObjCInterfaceDecl *Class
= dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl()));
if (!Class)
@@ -1328,7 +1644,7 @@ QualType ASTNodeImporter::VisitObjCInterfaceType(ObjCInterfaceType *T) {
return Importer.getToContext().getObjCInterfaceType(Class);
}
-QualType ASTNodeImporter::VisitObjCObjectType(ObjCObjectType *T) {
+QualType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
QualType ToBaseType = Importer.Import(T->getBaseType());
if (ToBaseType.isNull())
return QualType();
@@ -1349,7 +1665,8 @@ QualType ASTNodeImporter::VisitObjCObjectType(ObjCObjectType *T) {
Protocols.size());
}
-QualType ASTNodeImporter::VisitObjCObjectPointerType(ObjCObjectPointerType *T) {
+QualType
+ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
return QualType();
@@ -1420,7 +1737,15 @@ ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
}
}
-void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC) {
+void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
+ if (Importer.isMinimalImport() && !ForceImport) {
+ if (DeclContext *ToDC = Importer.ImportContext(FromDC)) {
+ ToDC->setHasExternalLexicalStorage();
+ ToDC->setHasExternalVisibleStorage();
+ }
+ return;
+ }
+
for (DeclContext::decl_iterator From = FromDC->decls_begin(),
FromEnd = FromDC->decls_end();
From != FromEnd;
@@ -1428,11 +1753,151 @@ void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC) {
Importer.Import(*From);
}
+bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To) {
+ if (To->getDefinition())
+ return false;
+
+ To->startDefinition();
+
+ // Add base classes.
+ if (CXXRecordDecl *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
+ CXXRecordDecl *FromCXX = cast<CXXRecordDecl>(From);
+
+ llvm::SmallVector<CXXBaseSpecifier *, 4> Bases;
+ for (CXXRecordDecl::base_class_iterator
+ Base1 = FromCXX->bases_begin(),
+ FromBaseEnd = FromCXX->bases_end();
+ Base1 != FromBaseEnd;
+ ++Base1) {
+ QualType T = Importer.Import(Base1->getType());
+ if (T.isNull())
+ return true;
+
+ SourceLocation EllipsisLoc;
+ if (Base1->isPackExpansion())
+ EllipsisLoc = Importer.Import(Base1->getEllipsisLoc());
+
+ Bases.push_back(
+ new (Importer.getToContext())
+ CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
+ Base1->isVirtual(),
+ Base1->isBaseOfClass(),
+ Base1->getAccessSpecifierAsWritten(),
+ Importer.Import(Base1->getTypeSourceInfo()),
+ EllipsisLoc));
+ }
+ if (!Bases.empty())
+ ToCXX->setBases(Bases.data(), Bases.size());
+ }
+
+ ImportDeclContext(From);
+ To->completeDefinition();
+ return false;
+}
+
+TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList(
+ TemplateParameterList *Params) {
+ llvm::SmallVector<NamedDecl *, 4> ToParams;
+ ToParams.reserve(Params->size());
+ for (TemplateParameterList::iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ Decl *To = Importer.Import(*P);
+ if (!To)
+ return 0;
+
+ ToParams.push_back(cast<NamedDecl>(To));
+ }
+
+ return TemplateParameterList::Create(Importer.getToContext(),
+ Importer.Import(Params->getTemplateLoc()),
+ Importer.Import(Params->getLAngleLoc()),
+ ToParams.data(), ToParams.size(),
+ Importer.Import(Params->getRAngleLoc()));
+}
+
+TemplateArgument
+ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
+ switch (From.getKind()) {
+ case TemplateArgument::Null:
+ return TemplateArgument();
+
+ case TemplateArgument::Type: {
+ QualType ToType = Importer.Import(From.getAsType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(ToType);
+ }
+
+ case TemplateArgument::Integral: {
+ QualType ToType = Importer.Import(From.getIntegralType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(*From.getAsIntegral(), ToType);
+ }
+
+ case TemplateArgument::Declaration:
+ if (Decl *To = Importer.Import(From.getAsDecl()))
+ return TemplateArgument(To);
+ return TemplateArgument();
+
+ case TemplateArgument::Template: {
+ TemplateName ToTemplate = Importer.Import(From.getAsTemplate());
+ if (ToTemplate.isNull())
+ return TemplateArgument();
+
+ return TemplateArgument(ToTemplate);
+ }
+
+ case TemplateArgument::TemplateExpansion: {
+ TemplateName ToTemplate
+ = Importer.Import(From.getAsTemplateOrTemplatePattern());
+ if (ToTemplate.isNull())
+ return TemplateArgument();
+
+ return TemplateArgument(ToTemplate, From.getNumTemplateExpansions());
+ }
+
+ case TemplateArgument::Expression:
+ if (Expr *ToExpr = Importer.Import(From.getAsExpr()))
+ return TemplateArgument(ToExpr);
+ return TemplateArgument();
+
+ case TemplateArgument::Pack: {
+ llvm::SmallVector<TemplateArgument, 2> ToPack;
+ ToPack.reserve(From.pack_size());
+ if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack))
+ return TemplateArgument();
+
+ TemplateArgument *ToArgs
+ = new (Importer.getToContext()) TemplateArgument[ToPack.size()];
+ std::copy(ToPack.begin(), ToPack.end(), ToArgs);
+ return TemplateArgument(ToArgs, ToPack.size());
+ }
+ }
+
+ llvm_unreachable("Invalid template argument kind");
+ return TemplateArgument();
+}
+
+bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
+ unsigned NumFromArgs,
+ llvm::SmallVectorImpl<TemplateArgument> &ToArgs) {
+ for (unsigned I = 0; I != NumFromArgs; ++I) {
+ TemplateArgument To = ImportTemplateArgument(FromArgs[I]);
+ if (To.isNull() && !FromArgs[I].isNull())
+ return true;
+
+ ToArgs.push_back(To);
+ }
+
+ return false;
+}
+
bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
RecordDecl *ToRecord) {
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
- Importer.getDiags(),
Importer.getNonEquivalentDecls());
return Ctx.IsStructurallyEquivalent(FromRecord, ToRecord);
}
@@ -1440,11 +1905,18 @@ bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
- Importer.getDiags(),
Importer.getNonEquivalentDecls());
return Ctx.IsStructurallyEquivalent(FromEnum, ToEnum);
}
+bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From,
+ ClassTemplateDecl *To) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(From, To);
+}
+
Decl *ASTNodeImporter::VisitDecl(Decl *D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
@@ -1620,9 +2092,10 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
// Create the enum declaration.
EnumDecl *D2 = EnumDecl::Create(Importer.getToContext(), DC, Loc,
- Name.getAsIdentifierInfo(),
- Importer.Import(D->getTagKeywordLoc()),
- 0);
+ Name.getAsIdentifierInfo(),
+ Importer.Import(D->getTagKeywordLoc()), 0,
+ D->isScoped(), D->isScopedUsingClassTag(),
+ D->isFixed());
// Import the qualifier, if any.
if (D->getQualifier()) {
NestedNameSpecifier *NNS = Importer.Import(D->getQualifier());
@@ -1764,38 +2237,8 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
Importer.Imported(D, D2);
- if (D->isDefinition()) {
- D2->startDefinition();
-
- // Add base classes.
- if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
- CXXRecordDecl *D1CXX = cast<CXXRecordDecl>(D);
-
- llvm::SmallVector<CXXBaseSpecifier *, 4> Bases;
- for (CXXRecordDecl::base_class_iterator
- Base1 = D1CXX->bases_begin(),
- FromBaseEnd = D1CXX->bases_end();
- Base1 != FromBaseEnd;
- ++Base1) {
- QualType T = Importer.Import(Base1->getType());
- if (T.isNull())
- return 0;
-
- Bases.push_back(
- new (Importer.getToContext())
- CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
- Base1->isVirtual(),
- Base1->isBaseOfClass(),
- Base1->getAccessSpecifierAsWritten(),
- Importer.Import(Base1->getTypeSourceInfo())));
- }
- if (!Bases.empty())
- D2CXX->setBases(Bases.data(), Bases.size());
- }
-
- ImportDeclContext(D);
- D2->completeDefinition();
- }
+ if (D->isDefinition() && ImportDefinition(D, D2))
+ return 0;
return D2;
}
@@ -1939,7 +2382,7 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
} else if (isa<CXXDestructorDecl>(D)) {
ToFunction = CXXDestructorDecl::Create(Importer.getToContext(),
cast<CXXRecordDecl>(DC),
- NameInfo, T,
+ NameInfo, T, TInfo,
D->isInlineSpecified(),
D->isImplicit());
} else if (CXXConversionDecl *FromConversion
@@ -1949,6 +2392,13 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
NameInfo, T, TInfo,
D->isInlineSpecified(),
FromConversion->isExplicit());
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ ToFunction = CXXMethodDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ NameInfo, T, TInfo,
+ Method->isStatic(),
+ Method->getStorageClassAsWritten(),
+ Method->isInlineSpecified());
} else {
ToFunction = FunctionDecl::Create(Importer.getToContext(), DC,
NameInfo, T, TInfo, D->getStorageClass(),
@@ -1965,8 +2415,10 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
ToFunction->setAccess(D->getAccess());
ToFunction->setLexicalDeclContext(LexicalDC);
+ ToFunction->setVirtualAsWritten(D->isVirtualAsWritten());
+ ToFunction->setTrivial(D->isTrivial());
+ ToFunction->setPure(D->isPure());
Importer.Imported(D, ToFunction);
- LexicalDC->addDecl(ToFunction);
// Set the parameters.
for (unsigned I = 0, N = Parameters.size(); I != N; ++I) {
@@ -1976,7 +2428,10 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ToFunction->setParams(Parameters.data(), Parameters.size());
// FIXME: Other bits to merge?
-
+
+ // Add this function to the lexical context.
+ LexicalDC->addDecl(ToFunction);
+
return ToFunction;
}
@@ -2024,6 +2479,42 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
return ToField;
}
+Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ NamedDecl **NamedChain =
+ new (Importer.getToContext())NamedDecl*[D->getChainingSize()];
+
+ unsigned i = 0;
+ for (IndirectFieldDecl::chain_iterator PI = D->chain_begin(),
+ PE = D->chain_end(); PI != PE; ++PI) {
+ Decl* D = Importer.Import(*PI);
+ if (!D)
+ return 0;
+ NamedChain[i++] = cast<NamedDecl>(D);
+ }
+
+ IndirectFieldDecl *ToIndirectField = IndirectFieldDecl::Create(
+ Importer.getToContext(), DC,
+ Loc, Name.getAsIdentifierInfo(), T,
+ NamedChain, D->getChainingSize());
+ ToIndirectField->setAccess(D->getAccess());
+ ToIndirectField->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToIndirectField);
+ LexicalDC->addDecl(ToIndirectField);
+ return ToIndirectField;
+}
+
Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
// Import the major distinguishing characteristics of an ivar.
DeclContext *DC, *LexicalDC;
@@ -2434,7 +2925,8 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
// If we have an implementation, import it as well.
if (D->getImplementation()) {
ObjCCategoryImplDecl *Impl
- = cast<ObjCCategoryImplDecl>(Importer.Import(D->getImplementation()));
+ = cast_or_null<ObjCCategoryImplDecl>(
+ Importer.Import(D->getImplementation()));
if (!Impl)
return 0;
@@ -2615,8 +3107,8 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
// If we have an @implementation, import it as well.
if (D->getImplementation()) {
- ObjCImplementationDecl *Impl
- = cast<ObjCImplementationDecl>(Importer.Import(D->getImplementation()));
+ ObjCImplementationDecl *Impl = cast_or_null<ObjCImplementationDecl>(
+ Importer.Import(D->getImplementation()));
if (!Impl)
return 0;
@@ -2626,6 +3118,114 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
return ToIface;
}
+Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ ObjCCategoryDecl *Category = cast_or_null<ObjCCategoryDecl>(
+ Importer.Import(D->getCategoryDecl()));
+ if (!Category)
+ return 0;
+
+ ObjCCategoryImplDecl *ToImpl = Category->getImplementation();
+ if (!ToImpl) {
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return 0;
+
+ ToImpl = ObjCCategoryImplDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getLocation()),
+ Importer.Import(D->getIdentifier()),
+ Category->getClassInterface());
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+
+ ToImpl->setLexicalDeclContext(LexicalDC);
+ }
+
+ LexicalDC->addDecl(ToImpl);
+ Category->setImplementation(ToImpl);
+ }
+
+ Importer.Imported(D, ToImpl);
+ ImportDeclContext(D);
+ return ToImpl;
+}
+
+Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ // Find the corresponding interface.
+ ObjCInterfaceDecl *Iface = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(D->getClassInterface()));
+ if (!Iface)
+ return 0;
+
+ // Import the superclass, if any.
+ ObjCInterfaceDecl *Super = 0;
+ if (D->getSuperClass()) {
+ Super = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(D->getSuperClass()));
+ if (!Super)
+ return 0;
+ }
+
+ ObjCImplementationDecl *Impl = Iface->getImplementation();
+ if (!Impl) {
+ // We haven't imported an implementation yet. Create a new @implementation
+ // now.
+ Impl = ObjCImplementationDecl::Create(Importer.getToContext(),
+ Importer.ImportContext(D->getDeclContext()),
+ Importer.Import(D->getLocation()),
+ Iface, Super);
+
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ DeclContext *LexicalDC
+ = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ Impl->setLexicalDeclContext(LexicalDC);
+ }
+
+ // Associate the implementation with the class it implements.
+ Iface->setImplementation(Impl);
+ Importer.Imported(D, Iface->getImplementation());
+ } else {
+ Importer.Imported(D, Iface->getImplementation());
+
+ // Verify that the existing @implementation has the same superclass.
+ if ((Super && !Impl->getSuperClass()) ||
+ (!Super && Impl->getSuperClass()) ||
+ (Super && Impl->getSuperClass() &&
+ Super->getCanonicalDecl() != Impl->getSuperClass())) {
+ Importer.ToDiag(Impl->getLocation(),
+ diag::err_odr_objc_superclass_inconsistent)
+ << Iface->getDeclName();
+ // FIXME: It would be nice to have the location of the superclass
+ // below.
+ if (Impl->getSuperClass())
+ Importer.ToDiag(Impl->getLocation(),
+ diag::note_odr_objc_superclass)
+ << Impl->getSuperClass()->getDeclName();
+ else
+ Importer.ToDiag(Impl->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ if (D->getSuperClass())
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_superclass)
+ << D->getSuperClass()->getDeclName();
+ else
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ return 0;
+ }
+ }
+
+ // Import all of the members of this @implementation.
+ ImportDeclContext(D);
+
+ return Impl;
+}
+
Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
// Import the major distinguishing characteristics of an @property.
DeclContext *DC, *LexicalDC;
@@ -2688,6 +3288,87 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
return ToProperty;
}
+Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ ObjCPropertyDecl *Property = cast_or_null<ObjCPropertyDecl>(
+ Importer.Import(D->getPropertyDecl()));
+ if (!Property)
+ return 0;
+
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return 0;
+
+ // Import the lexical declaration context.
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ }
+
+ ObjCImplDecl *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC);
+ if (!InImpl)
+ return 0;
+
+ // Import the ivar (for an @synthesize).
+ ObjCIvarDecl *Ivar = 0;
+ if (D->getPropertyIvarDecl()) {
+ Ivar = cast_or_null<ObjCIvarDecl>(
+ Importer.Import(D->getPropertyIvarDecl()));
+ if (!Ivar)
+ return 0;
+ }
+
+ ObjCPropertyImplDecl *ToImpl
+ = InImpl->FindPropertyImplDecl(Property->getIdentifier());
+ if (!ToImpl) {
+ ToImpl = ObjCPropertyImplDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getLocStart()),
+ Importer.Import(D->getLocation()),
+ Property,
+ D->getPropertyImplementation(),
+ Ivar,
+ Importer.Import(D->getPropertyIvarDeclLoc()));
+ ToImpl->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToImpl);
+ LexicalDC->addDecl(ToImpl);
+ } else {
+ // Check that we have the same kind of property implementation (@synthesize
+ // vs. @dynamic).
+ if (D->getPropertyImplementation() != ToImpl->getPropertyImplementation()) {
+ Importer.ToDiag(ToImpl->getLocation(),
+ diag::err_odr_objc_property_impl_kind_inconsistent)
+ << Property->getDeclName()
+ << (ToImpl->getPropertyImplementation()
+ == ObjCPropertyImplDecl::Dynamic);
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_property_impl_kind)
+ << D->getPropertyDecl()->getDeclName()
+ << (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
+ return 0;
+ }
+
+ // For @synthesize, check that we have the same
+ if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize &&
+ Ivar != ToImpl->getPropertyIvarDecl()) {
+ Importer.ToDiag(ToImpl->getPropertyIvarDeclLoc(),
+ diag::err_odr_objc_synthesize_ivar_inconsistent)
+ << Property->getDeclName()
+ << ToImpl->getPropertyIvarDecl()->getDeclName()
+ << Ivar->getDeclName();
+ Importer.FromDiag(D->getPropertyIvarDeclLoc(),
+ diag::note_odr_objc_synthesize_ivar_here)
+ << D->getPropertyIvarDecl()->getDeclName();
+ return 0;
+ }
+
+ // Merge the existing implementation with the new implementation.
+ Importer.Imported(D, ToImpl);
+ }
+
+ return ToImpl;
+}
+
Decl *
ASTNodeImporter::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D) {
// Import the context of this declaration.
@@ -2772,6 +3453,275 @@ Decl *ASTNodeImporter::VisitObjCClassDecl(ObjCClassDecl *D) {
return ToClass;
}
+Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ // For template arguments, we adopt the translation unit as our declaration
+ // context. This context will be fixed when the actual template declaration
+ // is created.
+
+ // FIXME: Import default argument.
+ return TemplateTypeParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Importer.Import(D->getLocation()),
+ D->getDepth(),
+ D->getIndex(),
+ Importer.Import(D->getIdentifier()),
+ D->wasDeclaredWithTypename(),
+ D->isParameterPack());
+}
+
+Decl *
+ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the type of this declaration.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Import type-source information.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ if (D->getTypeSourceInfo() && !TInfo)
+ return 0;
+
+ // FIXME: Import default argument.
+
+ return NonTypeTemplateParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Loc, D->getDepth(), D->getPosition(),
+ Name.getAsIdentifierInfo(),
+ T, D->isParameterPack(), TInfo);
+}
+
+Decl *
+ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import template parameters.
+ TemplateParameterList *TemplateParams
+ = ImportTemplateParameterList(D->getTemplateParameters());
+ if (!TemplateParams)
+ return 0;
+
+ // FIXME: Import default argument.
+
+ return TemplateTemplateParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Loc, D->getDepth(), D->getPosition(),
+ D->isParameterPack(),
+ Name.getAsIdentifierInfo(),
+ TemplateParams);
+}
+
+Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ CXXRecordDecl *Definition
+ = cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition());
+ if (Definition && Definition != D->getTemplatedDecl()) {
+ Decl *ImportedDef
+ = Importer.Import(Definition->getDescribedClassTemplate());
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of this class template.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // We may already have a template of the same name; try to find and match it.
+ if (!DC->isFunctionOrMethod()) {
+ llvm::SmallVector<NamedDecl *, 4> ConflictingDecls;
+ for (DeclContext::lookup_result Lookup = DC->lookup(Name);
+ Lookup.first != Lookup.second;
+ ++Lookup.first) {
+ if (!(*Lookup.first)->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ continue;
+
+ Decl *Found = *Lookup.first;
+ if (ClassTemplateDecl *FoundTemplate
+ = dyn_cast<ClassTemplateDecl>(Found)) {
+ if (IsStructuralMatch(D, FoundTemplate)) {
+ // The class templates structurally match; call it the same template.
+ // FIXME: We may be filling in a forward declaration here. Handle
+ // this case!
+ Importer.Imported(D->getTemplatedDecl(),
+ FoundTemplate->getTemplatedDecl());
+ return Importer.Imported(D, FoundTemplate);
+ }
+ }
+
+ ConflictingDecls.push_back(*Lookup.first);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+
+ if (!Name)
+ return 0;
+ }
+
+ CXXRecordDecl *DTemplated = D->getTemplatedDecl();
+
+ // Create the declaration that is being templated.
+ CXXRecordDecl *D2Templated = CXXRecordDecl::Create(Importer.getToContext(),
+ DTemplated->getTagKind(),
+ DC,
+ Importer.Import(DTemplated->getLocation()),
+ Name.getAsIdentifierInfo(),
+ Importer.Import(DTemplated->getTagKeywordLoc()));
+ D2Templated->setAccess(DTemplated->getAccess());
+
+
+ // Import the qualifier, if any.
+ if (DTemplated->getQualifier()) {
+ NestedNameSpecifier *NNS = Importer.Import(DTemplated->getQualifier());
+ SourceRange NNSRange = Importer.Import(DTemplated->getQualifierRange());
+ D2Templated->setQualifierInfo(NNS, NNSRange);
+ }
+ D2Templated->setLexicalDeclContext(LexicalDC);
+
+ // Create the class template declaration itself.
+ TemplateParameterList *TemplateParams
+ = ImportTemplateParameterList(D->getTemplateParameters());
+ if (!TemplateParams)
+ return 0;
+
+ ClassTemplateDecl *D2 = ClassTemplateDecl::Create(Importer.getToContext(), DC,
+ Loc, Name, TemplateParams,
+ D2Templated,
+ /*PrevDecl=*/0);
+ D2Templated->setDescribedClassTemplate(D2);
+
+ D2->setAccess(D->getAccess());
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDecl(D2);
+
+ // Note the relationship between the class templates.
+ Importer.Imported(D, D2);
+ Importer.Imported(DTemplated, D2Templated);
+
+ if (DTemplated->isDefinition() && !D2Templated->isDefinition()) {
+ // FIXME: Import definition!
+ }
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ TagDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ ClassTemplateDecl *ClassTemplate
+ = cast_or_null<ClassTemplateDecl>(Importer.Import(
+ D->getSpecializedTemplate()));
+ if (!ClassTemplate)
+ return 0;
+
+ // Import the context of this declaration.
+ DeclContext *DC = ClassTemplate->getDeclContext();
+ if (!DC)
+ return 0;
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ }
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import template arguments.
+ llvm::SmallVector<TemplateArgument, 2> TemplateArgs;
+ if (ImportTemplateArguments(D->getTemplateArgs().data(),
+ D->getTemplateArgs().size(),
+ TemplateArgs))
+ return 0;
+
+ // Try to find an existing specialization with these template arguments.
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *D2
+ = ClassTemplate->findSpecialization(TemplateArgs.data(),
+ TemplateArgs.size(), InsertPos);
+ if (D2) {
+ // We already have a class template specialization with these template
+ // arguments.
+
+ // FIXME: Check for specialization vs. instantiation errors.
+
+ if (RecordDecl *FoundDef = D2->getDefinition()) {
+ if (!D->isDefinition() || IsStructuralMatch(D, FoundDef)) {
+ // The record types structurally match, or the "from" translation
+ // unit only had a forward declaration anyway; call it the same
+ // function.
+ return Importer.Imported(D, FoundDef);
+ }
+ }
+ } else {
+ // Create a new specialization.
+ D2 = ClassTemplateSpecializationDecl::Create(Importer.getToContext(),
+ D->getTagKind(), DC,
+ Loc, ClassTemplate,
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ /*PrevDecl=*/0);
+ D2->setSpecializationKind(D->getSpecializationKind());
+
+ // Add this specialization to the class template.
+ ClassTemplate->AddSpecialization(D2, InsertPos);
+
+ // Import the qualifier, if any.
+ if (D->getQualifier()) {
+ NestedNameSpecifier *NNS = Importer.Import(D->getQualifier());
+ SourceRange NNSRange = Importer.Import(D->getQualifierRange());
+ D2->setQualifierInfo(NNS, NNSRange);
+ }
+
+
+ // Add the specialization to this context.
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDecl(D2);
+ }
+ Importer.Imported(D, D2);
+
+ if (D->isDefinition() && ImportDefinition(D, D2))
+ return 0;
+
+ return D2;
+}
+
//----------------------------------------------------------------------------
// Import Statements
//----------------------------------------------------------------------------
@@ -2811,7 +3761,7 @@ Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
Importer.Import(E->getQualifierRange()),
ToD,
Importer.Import(E->getLocation()),
- T,
+ T, E->getValueKind(),
/*FIXME:TemplateArgs=*/0);
}
@@ -2856,7 +3806,8 @@ Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
return 0;
return new (Importer.getToContext()) UnaryOperator(SubExpr, E->getOpcode(),
- T,
+ T, E->getValueKind(),
+ E->getObjectKind(),
Importer.Import(E->getOperatorLoc()));
}
@@ -2898,7 +3849,8 @@ Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
return 0;
return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(),
- T,
+ T, E->getValueKind(),
+ E->getObjectKind(),
Importer.Import(E->getOperatorLoc()));
}
@@ -2925,7 +3877,9 @@ Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
return new (Importer.getToContext())
CompoundAssignOperator(LHS, RHS, E->getOpcode(),
- T, CompLHSType, CompResultType,
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ CompLHSType, CompResultType,
Importer.Import(E->getOperatorLoc()));
}
@@ -2970,18 +3924,20 @@ Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) {
if (ImportCastPath(E, BasePath))
return 0;
- return CStyleCastExpr::Create(Importer.getToContext(), T, E->getCastKind(),
+ return CStyleCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), E->getCastKind(),
SubExpr, &BasePath, TInfo,
Importer.Import(E->getLParenLoc()),
Importer.Import(E->getRParenLoc()));
}
-ASTImporter::ASTImporter(Diagnostic &Diags,
- ASTContext &ToContext, FileManager &ToFileManager,
- ASTContext &FromContext, FileManager &FromFileManager)
+ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
+ ASTContext &FromContext, FileManager &FromFileManager,
+ bool MinimalImport)
: ToContext(ToContext), FromContext(FromContext),
ToFileManager(ToFileManager), FromFileManager(FromFileManager),
- Diags(Diags) {
+ Minimal(MinimalImport)
+{
ImportedDecls[FromContext.getTranslationUnitDecl()]
= ToContext.getTranslationUnitDecl();
}
@@ -2991,23 +3947,25 @@ ASTImporter::~ASTImporter() { }
QualType ASTImporter::Import(QualType FromT) {
if (FromT.isNull())
return QualType();
+
+ const Type *fromTy = FromT.getTypePtr();
// Check whether we've already imported this type.
- llvm::DenseMap<Type *, Type *>::iterator Pos
- = ImportedTypes.find(FromT.getTypePtr());
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos
+ = ImportedTypes.find(fromTy);
if (Pos != ImportedTypes.end())
- return ToContext.getQualifiedType(Pos->second, FromT.getQualifiers());
+ return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers());
// Import the type
ASTNodeImporter Importer(*this);
- QualType ToT = Importer.Visit(FromT.getTypePtr());
+ QualType ToT = Importer.Visit(fromTy);
if (ToT.isNull())
return ToT;
// Record the imported type.
- ImportedTypes[FromT.getTypePtr()] = ToT.getTypePtr();
+ ImportedTypes[fromTy] = ToT.getTypePtr();
- return ToContext.getQualifiedType(ToT, FromT.getQualifiers());
+ return ToContext.getQualifiedType(ToT, FromT.getLocalQualifiers());
}
TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
@@ -3109,6 +4067,82 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
return 0;
}
+TemplateName ASTImporter::Import(TemplateName From) {
+ switch (From.getKind()) {
+ case TemplateName::Template:
+ if (TemplateDecl *ToTemplate
+ = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ return TemplateName(ToTemplate);
+
+ return TemplateName();
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *FromStorage = From.getAsOverloadedTemplate();
+ UnresolvedSet<2> ToTemplates;
+ for (OverloadedTemplateStorage::iterator I = FromStorage->begin(),
+ E = FromStorage->end();
+ I != E; ++I) {
+ if (NamedDecl *To = cast_or_null<NamedDecl>(Import(*I)))
+ ToTemplates.addDecl(To);
+ else
+ return TemplateName();
+ }
+ return ToContext.getOverloadedTemplateName(ToTemplates.begin(),
+ ToTemplates.end());
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ QualifiedTemplateName *QTN = From.getAsQualifiedTemplateName();
+ NestedNameSpecifier *Qualifier = Import(QTN->getQualifier());
+ if (!Qualifier)
+ return TemplateName();
+
+ if (TemplateDecl *ToTemplate
+ = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ return ToContext.getQualifiedTemplateName(Qualifier,
+ QTN->hasTemplateKeyword(),
+ ToTemplate);
+
+ return TemplateName();
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = From.getAsDependentTemplateName();
+ NestedNameSpecifier *Qualifier = Import(DTN->getQualifier());
+ if (!Qualifier)
+ return TemplateName();
+
+ if (DTN->isIdentifier()) {
+ return ToContext.getDependentTemplateName(Qualifier,
+ Import(DTN->getIdentifier()));
+ }
+
+ return ToContext.getDependentTemplateName(Qualifier, DTN->getOperator());
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *SubstPack
+ = From.getAsSubstTemplateTemplateParmPack();
+ TemplateTemplateParmDecl *Param
+ = cast_or_null<TemplateTemplateParmDecl>(
+ Import(SubstPack->getParameterPack()));
+ if (!Param)
+ return TemplateName();
+
+ ASTNodeImporter Importer(*this);
+ TemplateArgument ArgPack
+ = Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
+ if (ArgPack.isNull())
+ return TemplateName();
+
+ return ToContext.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
+ }
+
+ llvm_unreachable("Invalid template name kind");
+ return TemplateName();
+}
+
SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
if (FromLoc.isInvalid())
return SourceLocation();
@@ -3130,8 +4164,8 @@ SourceRange ASTImporter::Import(SourceRange FromRange) {
}
FileID ASTImporter::Import(FileID FromID) {
- llvm::DenseMap<unsigned, FileID>::iterator Pos
- = ImportedFileIDs.find(FromID.getHashValue());
+ llvm::DenseMap<FileID, FileID>::iterator Pos
+ = ImportedFileIDs.find(FromID);
if (Pos != ImportedFileIDs.end())
return Pos->second;
@@ -3156,7 +4190,8 @@ FileID ASTImporter::Import(FileID FromID) {
FromSLoc.getFile().getFileCharacteristic());
} else {
// FIXME: We want to re-use the existing MemoryBuffer!
- const llvm::MemoryBuffer *FromBuf = Cache->getBuffer(getDiags(), FromSM);
+ const llvm::MemoryBuffer *
+ FromBuf = Cache->getBuffer(FromContext.getDiagnostics(), FromSM);
llvm::MemoryBuffer *ToBuf
= llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
FromBuf->getBufferIdentifier());
@@ -3164,10 +4199,21 @@ FileID ASTImporter::Import(FileID FromID) {
}
- ImportedFileIDs[FromID.getHashValue()] = ToID;
+ ImportedFileIDs[FromID] = ToID;
return ToID;
}
+void ASTImporter::ImportDefinition(Decl *From) {
+ Decl *To = Import(From);
+ if (!To)
+ return;
+
+ if (DeclContext *FromDC = cast<DeclContext>(From)) {
+ ASTNodeImporter Importer(*this);
+ Importer.ImportDeclContext(FromDC, true);
+ }
+}
+
DeclarationName ASTImporter::Import(DeclarationName FromName) {
if (!FromName)
return DeclarationName();
@@ -3225,7 +4271,7 @@ DeclarationName ASTImporter::Import(DeclarationName FromName) {
return DeclarationName();
}
-IdentifierInfo *ASTImporter::Import(IdentifierInfo *FromId) {
+IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
if (!FromId)
return 0;
@@ -3252,13 +4298,11 @@ DeclarationName ASTImporter::HandleNameConflict(DeclarationName Name,
}
DiagnosticBuilder ASTImporter::ToDiag(SourceLocation Loc, unsigned DiagID) {
- return Diags.Report(FullSourceLoc(Loc, ToContext.getSourceManager()),
- DiagID);
+ return ToContext.getDiagnostics().Report(Loc, DiagID);
}
DiagnosticBuilder ASTImporter::FromDiag(SourceLocation Loc, unsigned DiagID) {
- return Diags.Report(FullSourceLoc(Loc, FromContext.getSourceManager()),
- DiagID);
+ return FromContext.getDiagnostics().Report(Loc, DiagID);
}
Decl *ASTImporter::Imported(Decl *From, Decl *To) {
@@ -3267,12 +4311,11 @@ Decl *ASTImporter::Imported(Decl *From, Decl *To) {
}
bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To) {
- llvm::DenseMap<Type *, Type *>::iterator Pos
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos
= ImportedTypes.find(From.getTypePtr());
if (Pos != ImportedTypes.end() && ToContext.hasSameType(Import(From), To))
return true;
- StructuralEquivalenceContext Ctx(FromContext, ToContext, Diags,
- NonEquivalentDecls);
+ StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls);
return Ctx.IsStructurallyEquivalent(From, To);
}
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXABI.h b/contrib/llvm/tools/clang/lib/AST/CXXABI.h
index 4b38d7a..943c43e 100644
--- a/contrib/llvm/tools/clang/lib/AST/CXXABI.h
+++ b/contrib/llvm/tools/clang/lib/AST/CXXABI.h
@@ -15,6 +15,8 @@
#ifndef LLVM_CLANG_AST_CXXABI_H
#define LLVM_CLANG_AST_CXXABI_H
+#include "clang/AST/Type.h"
+
namespace clang {
class ASTContext;
@@ -28,6 +30,13 @@ public:
/// Returns the size of a member pointer in multiples of the target
/// pointer size.
virtual unsigned getMemberPointerSize(const MemberPointerType *MPT) const = 0;
+
+ /// Returns the default calling convention for C++ methods.
+ virtual CallingConv getDefaultMethodCallConv() const = 0;
+
+ // Returns whether the given class is nearly empty, with just virtual pointers
+ // and no data except possibly virtual bases.
+ virtual bool isNearlyEmpty(const CXXRecordDecl *RD) const = 0;
};
/// Creates an instance of a C++ ABI class.
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
index c563c37..ca9ec18 100644
--- a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/AST/DeclCXX.h"
#include <algorithm>
#include <set>
@@ -75,18 +76,21 @@ void CXXBasePaths::swap(CXXBasePaths &Other) {
std::swap(DetectedVirtual, Other.DetectedVirtual);
}
-bool CXXRecordDecl::isDerivedFrom(CXXRecordDecl *Base) const {
+bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base) const {
CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
/*DetectVirtual=*/false);
return isDerivedFrom(Base, Paths);
}
-bool CXXRecordDecl::isDerivedFrom(CXXRecordDecl *Base, CXXBasePaths &Paths) const {
+bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
+ CXXBasePaths &Paths) const {
if (getCanonicalDecl() == Base->getCanonicalDecl())
return false;
Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
- return lookupInBases(&FindBaseClass, Base->getCanonicalDecl(), Paths);
+ return lookupInBases(&FindBaseClass,
+ const_cast<CXXRecordDecl*>(Base->getCanonicalDecl()),
+ Paths);
}
bool CXXRecordDecl::isVirtuallyDerivedFrom(CXXRecordDecl *Base) const {
@@ -662,3 +666,50 @@ CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const {
}
}
}
+
+static void
+AddIndirectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
+ CXXIndirectPrimaryBaseSet& Bases) {
+ // If the record has a virtual primary base class, add it to our set.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.isPrimaryBaseVirtual())
+ Bases.insert(Layout.getPrimaryBase());
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot get indirect primary bases for class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Only bases with virtual bases participate in computing the
+ // indirect primary virtual base classes.
+ if (BaseDecl->getNumVBases())
+ AddIndirectPrimaryBases(BaseDecl, Context, Bases);
+ }
+
+}
+
+void
+CXXRecordDecl::getIndirectPrimaryBases(CXXIndirectPrimaryBaseSet& Bases) const {
+ ASTContext &Context = getASTContext();
+
+ if (!getNumVBases())
+ return;
+
+ for (CXXRecordDecl::base_class_const_iterator I = bases_begin(),
+ E = bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot get indirect primary bases for class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Only bases with virtual bases participate in computing the
+ // indirect primary virtual base classes.
+ if (BaseDecl->getNumVBases())
+ AddIndirectPrimaryBases(BaseDecl, Context, Bases);
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
index b7be02d..56db8c7 100644
--- a/contrib/llvm/tools/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/Specifiers.h"
@@ -32,35 +33,140 @@ using namespace clang;
// NamedDecl Implementation
//===----------------------------------------------------------------------===//
+static const VisibilityAttr *GetExplicitVisibility(const Decl *d) {
+ // Use the most recent declaration of a variable.
+ if (const VarDecl *var = dyn_cast<VarDecl>(d))
+ return var->getMostRecentDeclaration()->getAttr<VisibilityAttr>();
+
+ // Use the most recent declaration of a function, and also handle
+ // function template specializations.
+ if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(d)) {
+ if (const VisibilityAttr *attr
+ = fn->getMostRecentDeclaration()->getAttr<VisibilityAttr>())
+ return attr;
+
+ // If the function is a specialization of a template with an
+ // explicit visibility attribute, use that.
+ if (FunctionTemplateSpecializationInfo *templateInfo
+ = fn->getTemplateSpecializationInfo())
+ return templateInfo->getTemplate()->getTemplatedDecl()
+ ->getAttr<VisibilityAttr>();
+
+ return 0;
+ }
+
+ // Otherwise, just check the declaration itself first.
+ if (const VisibilityAttr *attr = d->getAttr<VisibilityAttr>())
+ return attr;
+
+ // If there wasn't explicit visibility there, and this is a
+ // specialization of a class template, check for visibility
+ // on the pattern.
+ if (const ClassTemplateSpecializationDecl *spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(d))
+ return spec->getSpecializedTemplate()->getTemplatedDecl()
+ ->getAttr<VisibilityAttr>();
+
+ return 0;
+}
+
+static Visibility GetVisibilityFromAttr(const VisibilityAttr *A) {
+ switch (A->getVisibility()) {
+ case VisibilityAttr::Default:
+ return DefaultVisibility;
+ case VisibilityAttr::Hidden:
+ return HiddenVisibility;
+ case VisibilityAttr::Protected:
+ return ProtectedVisibility;
+ }
+ return DefaultVisibility;
+}
+
+typedef NamedDecl::LinkageInfo LinkageInfo;
+typedef std::pair<Linkage,Visibility> LVPair;
+
+static LVPair merge(LVPair L, LVPair R) {
+ return LVPair(minLinkage(L.first, R.first),
+ minVisibility(L.second, R.second));
+}
+
+static LVPair merge(LVPair L, LinkageInfo R) {
+ return LVPair(minLinkage(L.first, R.linkage()),
+ minVisibility(L.second, R.visibility()));
+}
+
+namespace {
+/// Flags controlling the computation of linkage and visibility.
+struct LVFlags {
+ bool ConsiderGlobalVisibility;
+ bool ConsiderVisibilityAttributes;
+
+ LVFlags() : ConsiderGlobalVisibility(true),
+ ConsiderVisibilityAttributes(true) {
+ }
+
+ /// \brief Returns a set of flags that is only useful for computing the
+ /// linkage, not the visibility, of a declaration.
+ static LVFlags CreateOnlyDeclLinkage() {
+ LVFlags F;
+ F.ConsiderGlobalVisibility = false;
+ F.ConsiderVisibilityAttributes = false;
+ return F;
+ }
+
+ /// Returns a set of flags, otherwise based on these, which ignores
+ /// off all sources of visibility except template arguments.
+ LVFlags onlyTemplateVisibility() const {
+ LVFlags F = *this;
+ F.ConsiderGlobalVisibility = false;
+ F.ConsiderVisibilityAttributes = false;
+ return F;
+ }
+};
+} // end anonymous namespace
+
/// \brief Get the most restrictive linkage for the types in the given
/// template parameter list.
-static Linkage
-getLinkageForTemplateParameterList(const TemplateParameterList *Params) {
- Linkage L = ExternalLinkage;
+static LVPair
+getLVForTemplateParameterList(const TemplateParameterList *Params) {
+ LVPair LV(ExternalLinkage, DefaultVisibility);
for (TemplateParameterList::const_iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
- if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P))
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->isExpandedParameterPack()) {
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ QualType T = NTTP->getExpansionType(I);
+ if (!T->isDependentType())
+ LV = merge(LV, T->getLinkageAndVisibility());
+ }
+ continue;
+ }
+
if (!NTTP->getType()->isDependentType()) {
- L = minLinkage(L, NTTP->getType()->getLinkage());
+ LV = merge(LV, NTTP->getType()->getLinkageAndVisibility());
continue;
}
+ }
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(*P)) {
- L = minLinkage(L,
- getLinkageForTemplateParameterList(TTP->getTemplateParameters()));
+ LV = merge(LV, getLVForTemplateParameterList(TTP->getTemplateParameters()));
}
}
- return L;
+ return LV;
}
+/// getLVForDecl - Get the linkage and visibility for the given declaration.
+static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags F);
+
/// \brief Get the most restrictive linkage for the types and
/// declarations in the given template argument list.
-static Linkage getLinkageForTemplateArgumentList(const TemplateArgument *Args,
- unsigned NumArgs) {
- Linkage L = ExternalLinkage;
+static LVPair getLVForTemplateArgumentList(const TemplateArgument *Args,
+ unsigned NumArgs,
+ LVFlags &F) {
+ LVPair LV(ExternalLinkage, DefaultVisibility);
for (unsigned I = 0; I != NumArgs; ++I) {
switch (Args[I].getKind()) {
@@ -70,40 +176,43 @@ static Linkage getLinkageForTemplateArgumentList(const TemplateArgument *Args,
break;
case TemplateArgument::Type:
- L = minLinkage(L, Args[I].getAsType()->getLinkage());
+ LV = merge(LV, Args[I].getAsType()->getLinkageAndVisibility());
break;
case TemplateArgument::Declaration:
- if (NamedDecl *ND = dyn_cast<NamedDecl>(Args[I].getAsDecl()))
- L = minLinkage(L, ND->getLinkage());
- if (ValueDecl *VD = dyn_cast<ValueDecl>(Args[I].getAsDecl()))
- L = minLinkage(L, VD->getType()->getLinkage());
+ // The decl can validly be null as the representation of nullptr
+ // arguments, valid only in C++0x.
+ if (Decl *D = Args[I].getAsDecl()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ LV = merge(LV, getLVForDecl(ND, F));
+ }
break;
case TemplateArgument::Template:
- if (TemplateDecl *Template
- = Args[I].getAsTemplate().getAsTemplateDecl())
- L = minLinkage(L, Template->getLinkage());
+ case TemplateArgument::TemplateExpansion:
+ if (TemplateDecl *Template
+ = Args[I].getAsTemplateOrTemplatePattern().getAsTemplateDecl())
+ LV = merge(LV, getLVForDecl(Template, F));
break;
case TemplateArgument::Pack:
- L = minLinkage(L,
- getLinkageForTemplateArgumentList(Args[I].pack_begin(),
- Args[I].pack_size()));
+ LV = merge(LV, getLVForTemplateArgumentList(Args[I].pack_begin(),
+ Args[I].pack_size(),
+ F));
break;
}
}
- return L;
+ return LV;
}
-static Linkage
-getLinkageForTemplateArgumentList(const TemplateArgumentList &TArgs) {
- return getLinkageForTemplateArgumentList(TArgs.getFlatArgumentList(),
- TArgs.flat_size());
+static LVPair
+getLVForTemplateArgumentList(const TemplateArgumentList &TArgs,
+ LVFlags &F) {
+ return getLVForTemplateArgumentList(TArgs.data(), TArgs.size(), F);
}
-static Linkage getLinkageForNamespaceScopeDecl(const NamedDecl *D) {
+static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
assert(D->getDeclContext()->getRedeclContext()->isFileContext() &&
"Not a name having namespace scope");
ASTContext &Context = D->getASTContext();
@@ -117,7 +226,7 @@ static Linkage getLinkageForNamespaceScopeDecl(const NamedDecl *D) {
if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
// Explicitly declared static.
if (Var->getStorageClass() == SC_Static)
- return InternalLinkage;
+ return LinkageInfo::internal();
// - an object or reference that is explicitly declared const
// and neither explicitly declared extern nor previously
@@ -135,7 +244,7 @@ static Linkage getLinkageForNamespaceScopeDecl(const NamedDecl *D) {
FoundExtern = true;
if (!FoundExtern)
- return InternalLinkage;
+ return LinkageInfo::internal();
}
} else if (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)) {
// C++ [temp]p4:
@@ -150,23 +259,88 @@ static Linkage getLinkageForNamespaceScopeDecl(const NamedDecl *D) {
// Explicitly declared static.
if (Function->getStorageClass() == SC_Static)
- return InternalLinkage;
+ return LinkageInfo(InternalLinkage, DefaultVisibility, false);
} else if (const FieldDecl *Field = dyn_cast<FieldDecl>(D)) {
// - a data member of an anonymous union.
if (cast<RecordDecl>(Field->getDeclContext())->isAnonymousStructOrUnion())
- return InternalLinkage;
+ return LinkageInfo::internal();
+ }
+
+ if (D->isInAnonymousNamespace())
+ return LinkageInfo::uniqueExternal();
+
+ // Set up the defaults.
+
+ // C99 6.2.2p5:
+ // If the declaration of an identifier for an object has file
+ // scope and no storage-class specifier, its linkage is
+ // external.
+ LinkageInfo LV;
+
+ if (F.ConsiderVisibilityAttributes) {
+ if (const VisibilityAttr *VA = GetExplicitVisibility(D)) {
+ LV.setVisibility(GetVisibilityFromAttr(VA), true);
+ F.ConsiderGlobalVisibility = false;
+ } else {
+ // If we're declared in a namespace with a visibility attribute,
+ // use that namespace's visibility, but don't call it explicit.
+ for (const DeclContext *DC = D->getDeclContext();
+ !isa<TranslationUnitDecl>(DC);
+ DC = DC->getParent()) {
+ if (!isa<NamespaceDecl>(DC)) continue;
+ if (const VisibilityAttr *VA =
+ cast<NamespaceDecl>(DC)->getAttr<VisibilityAttr>()) {
+ LV.setVisibility(GetVisibilityFromAttr(VA), false);
+ F.ConsiderGlobalVisibility = false;
+ break;
+ }
+ }
+ }
}
// C++ [basic.link]p4:
-
+
// A name having namespace scope has external linkage if it is the
// name of
//
// - an object or reference, unless it has internal linkage; or
if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ // GCC applies the following optimization to variables and static
+ // data members, but not to functions:
+ //
+ // Modify the variable's LV by the LV of its type unless this is
+ // C or extern "C". This follows from [basic.link]p9:
+ // A type without linkage shall not be used as the type of a
+ // variable or function with external linkage unless
+ // - the entity has C language linkage, or
+ // - the entity is declared within an unnamed namespace, or
+ // - the entity is not used or is defined in the same
+ // translation unit.
+ // and [basic.link]p10:
+ // ...the types specified by all declarations referring to a
+ // given variable or function shall be identical...
+ // C does not have an equivalent rule.
+ //
+ // Ignore this if we've got an explicit attribute; the user
+ // probably knows what they're doing.
+ //
+ // Note that we don't want to make the variable non-external
+ // because of this, but unique-external linkage suits us.
+ if (Context.getLangOptions().CPlusPlus && !Var->isExternC()) {
+ LVPair TypeLV = Var->getType()->getLinkageAndVisibility();
+ if (TypeLV.first != ExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+ if (!LV.visibilityExplicit())
+ LV.mergeVisibility(TypeLV.second);
+ }
+
+ if (Var->getStorageClass() == SC_PrivateExtern)
+ LV.setVisibility(HiddenVisibility, true);
+
if (!Context.getLangOptions().CPlusPlus &&
(Var->getStorageClass() == SC_Extern ||
Var->getStorageClass() == SC_PrivateExtern)) {
+
// C99 6.2.2p4:
// For an identifier declared with the storage-class specifier
// extern in a scope in which a prior declaration of that
@@ -177,23 +351,22 @@ static Linkage getLinkageForNamespaceScopeDecl(const NamedDecl *D) {
// is visible, or if the prior declaration specifies no
// linkage, then the identifier has external linkage.
if (const VarDecl *PrevVar = Var->getPreviousDeclaration()) {
- if (Linkage L = PrevVar->getLinkage())
- return L;
+ LinkageInfo PrevLV = getLVForDecl(PrevVar, F);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
}
}
- // C99 6.2.2p5:
- // If the declaration of an identifier for an object has file
- // scope and no storage-class specifier, its linkage is
- // external.
- if (Var->isInAnonymousNamespace())
- return UniqueExternalLinkage;
+ // - a function, unless it has internal linkage; or
+ } else if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ // In theory, we can modify the function's LV by the LV of its
+ // type unless it has C linkage (see comment above about variables
+ // for justification). In practice, GCC doesn't do this, so it's
+ // just too painful to make work.
- return ExternalLinkage;
- }
+ if (Function->getStorageClass() == SC_PrivateExtern)
+ LV.setVisibility(HiddenVisibility, true);
- // - a function, unless it has internal linkage; or
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
// C99 6.2.2p5:
// If the declaration of an identifier for a function has no
// storage-class specifier, its linkage is determined exactly
@@ -213,141 +386,300 @@ static Linkage getLinkageForNamespaceScopeDecl(const NamedDecl *D) {
// is visible, or if the prior declaration specifies no
// linkage, then the identifier has external linkage.
if (const FunctionDecl *PrevFunc = Function->getPreviousDeclaration()) {
- if (Linkage L = PrevFunc->getLinkage())
- return L;
+ LinkageInfo PrevLV = getLVForDecl(PrevFunc, F);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
}
}
- if (Function->isInAnonymousNamespace())
- return UniqueExternalLinkage;
+ // In C++, then if the type of the function uses a type with
+ // unique-external linkage, it's not legally usable from outside
+ // this translation unit. However, we should use the C linkage
+ // rules instead for extern "C" declarations.
+ if (Context.getLangOptions().CPlusPlus && !Function->isExternC() &&
+ Function->getType()->getLinkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
if (FunctionTemplateSpecializationInfo *SpecInfo
= Function->getTemplateSpecializationInfo()) {
- Linkage L = SpecInfo->getTemplate()->getLinkage();
+ LV.merge(getLVForDecl(SpecInfo->getTemplate(),
+ F.onlyTemplateVisibility()));
const TemplateArgumentList &TemplateArgs = *SpecInfo->TemplateArguments;
- L = minLinkage(L, getLinkageForTemplateArgumentList(TemplateArgs));
- return L;
+ LV.merge(getLVForTemplateArgumentList(TemplateArgs, F));
}
- return ExternalLinkage;
- }
-
// - a named class (Clause 9), or an unnamed class defined in a
// typedef declaration in which the class has the typedef name
// for linkage purposes (7.1.3); or
// - a named enumeration (7.2), or an unnamed enumeration
// defined in a typedef declaration in which the enumeration
// has the typedef name for linkage purposes (7.1.3); or
- if (const TagDecl *Tag = dyn_cast<TagDecl>(D))
- if (Tag->getDeclName() || Tag->getTypedefForAnonDecl()) {
- if (Tag->isInAnonymousNamespace())
- return UniqueExternalLinkage;
-
- // If this is a class template specialization, consider the
- // linkage of the template and template arguments.
- if (const ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
- const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- Linkage L = getLinkageForTemplateArgumentList(TemplateArgs);
- return minLinkage(L, Spec->getSpecializedTemplate()->getLinkage());
- }
+ } else if (const TagDecl *Tag = dyn_cast<TagDecl>(D)) {
+ // Unnamed tags have no linkage.
+ if (!Tag->getDeclName() && !Tag->getTypedefForAnonDecl())
+ return LinkageInfo::none();
+
+ // If this is a class template specialization, consider the
+ // linkage of the template and template arguments.
+ if (const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
+ // From the template.
+ LV.merge(getLVForDecl(Spec->getSpecializedTemplate(),
+ F.onlyTemplateVisibility()));
- return ExternalLinkage;
+ // The arguments at which the template was instantiated.
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ LV.merge(getLVForTemplateArgumentList(TemplateArgs, F));
}
+ // Consider -fvisibility unless the type has C linkage.
+ if (F.ConsiderGlobalVisibility)
+ F.ConsiderGlobalVisibility =
+ (Context.getLangOptions().CPlusPlus &&
+ !Tag->getDeclContext()->isExternCContext());
+
// - an enumerator belonging to an enumeration with external linkage;
- if (isa<EnumConstantDecl>(D)) {
- Linkage L = cast<NamedDecl>(D->getDeclContext())->getLinkage();
- if (isExternalLinkage(L))
- return L;
- }
+ } else if (isa<EnumConstantDecl>(D)) {
+ LinkageInfo EnumLV = getLVForDecl(cast<NamedDecl>(D->getDeclContext()), F);
+ if (!isExternalLinkage(EnumLV.linkage()))
+ return LinkageInfo::none();
+ LV.merge(EnumLV);
// - a template, unless it is a function template that has
// internal linkage (Clause 14);
- if (const TemplateDecl *Template = dyn_cast<TemplateDecl>(D)) {
- if (D->isInAnonymousNamespace())
- return UniqueExternalLinkage;
-
- return getLinkageForTemplateParameterList(
- Template->getTemplateParameters());
- }
+ } else if (const TemplateDecl *Template = dyn_cast<TemplateDecl>(D)) {
+ LV.merge(getLVForTemplateParameterList(Template->getTemplateParameters()));
// - a namespace (7.3), unless it is declared within an unnamed
// namespace.
- if (isa<NamespaceDecl>(D) && !D->isInAnonymousNamespace())
- return ExternalLinkage;
+ } else if (isa<NamespaceDecl>(D) && !D->isInAnonymousNamespace()) {
+ return LV;
+
+ // By extension, we assign external linkage to Objective-C
+ // interfaces.
+ } else if (isa<ObjCInterfaceDecl>(D)) {
+ // fallout
- return NoLinkage;
+ // Everything not covered here has no linkage.
+ } else {
+ return LinkageInfo::none();
+ }
+
+ // If we ended up with non-external linkage, visibility should
+ // always be default.
+ if (LV.linkage() != ExternalLinkage)
+ return LinkageInfo(LV.linkage(), DefaultVisibility, false);
+
+ // If we didn't end up with hidden visibility, consider attributes
+ // and -fvisibility.
+ if (F.ConsiderGlobalVisibility)
+ LV.mergeVisibility(Context.getLangOptions().getVisibilityMode());
+
+ return LV;
}
-static Linkage getLinkageForClassMember(const NamedDecl *D) {
+static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
+ // Only certain class members have linkage. Note that fields don't
+ // really have linkage, but it's convenient to say they do for the
+ // purposes of calculating linkage of pointer-to-data-member
+ // template arguments.
if (!(isa<CXXMethodDecl>(D) ||
isa<VarDecl>(D) ||
+ isa<FieldDecl>(D) ||
(isa<TagDecl>(D) &&
(D->getDeclName() || cast<TagDecl>(D)->getTypedefForAnonDecl()))))
- return NoLinkage;
+ return LinkageInfo::none();
+
+ LinkageInfo LV;
- // Class members only have linkage if their class has external linkage.
- Linkage L = cast<RecordDecl>(D->getDeclContext())->getLinkage();
- if (!isExternalLinkage(L)) return NoLinkage;
+ // The flags we're going to use to compute the class's visibility.
+ LVFlags ClassF = F;
+
+ // If we have an explicit visibility attribute, merge that in.
+ if (F.ConsiderVisibilityAttributes) {
+ if (const VisibilityAttr *VA = GetExplicitVisibility(D)) {
+ LV.mergeVisibility(GetVisibilityFromAttr(VA), true);
+
+ // Ignore global visibility later, but not this attribute.
+ F.ConsiderGlobalVisibility = false;
+
+ // Ignore both global visibility and attributes when computing our
+ // parent's visibility.
+ ClassF = F.onlyTemplateVisibility();
+ }
+ }
+
+ // Class members only have linkage if their class has external
+ // linkage.
+ LV.merge(getLVForDecl(cast<RecordDecl>(D->getDeclContext()), ClassF));
+ if (!isExternalLinkage(LV.linkage()))
+ return LinkageInfo::none();
// If the class already has unique-external linkage, we can't improve.
- if (L == UniqueExternalLinkage) return UniqueExternalLinkage;
+ if (LV.linkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
- // If this is a method template specialization, use the linkage for
- // the template parameters and arguments.
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
- if (FunctionTemplateSpecializationInfo *SpecInfo
+ // If the type of the function uses a type with unique-external
+ // linkage, it's not legally usable from outside this translation unit.
+ if (MD->getType()->getLinkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+
+ // If this is a method template specialization, use the linkage for
+ // the template parameters and arguments.
+ if (FunctionTemplateSpecializationInfo *Spec
= MD->getTemplateSpecializationInfo()) {
- Linkage ArgLinkage =
- getLinkageForTemplateArgumentList(*SpecInfo->TemplateArguments);
- Linkage ParamLinkage =
- getLinkageForTemplateParameterList(
- SpecInfo->getTemplate()->getTemplateParameters());
- return minLinkage(ArgLinkage, ParamLinkage);
+ LV.merge(getLVForTemplateArgumentList(*Spec->TemplateArguments, F));
+ LV.merge(getLVForTemplateParameterList(
+ Spec->getTemplate()->getTemplateParameters()));
+
+ TSK = Spec->getTemplateSpecializationKind();
+ } else if (MemberSpecializationInfo *MSI =
+ MD->getMemberSpecializationInfo()) {
+ TSK = MSI->getTemplateSpecializationKind();
+ }
+
+ // If we're paying attention to global visibility, apply
+ // -finline-visibility-hidden if this is an inline method.
+ //
+ // Note that ConsiderGlobalVisibility doesn't yet have information
+ // about whether containing classes have visibility attributes,
+ // and that's intentional.
+ if (TSK != TSK_ExplicitInstantiationDeclaration &&
+ F.ConsiderGlobalVisibility &&
+ MD->getASTContext().getLangOptions().InlineVisibilityHidden) {
+ // InlineVisibilityHidden only applies to definitions, and
+ // isInlined() only gives meaningful answers on definitions
+ // anyway.
+ const FunctionDecl *Def = 0;
+ if (MD->hasBody(Def) && Def->isInlined())
+ LV.setVisibility(HiddenVisibility);
+ }
+
+ // Note that in contrast to basically every other situation, we
+ // *do* apply -fvisibility to method declarations.
+
+ } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ // Merge template argument/parameter information for member
+ // class template specializations.
+ LV.merge(getLVForTemplateArgumentList(Spec->getTemplateArgs(), F));
+ LV.merge(getLVForTemplateParameterList(
+ Spec->getSpecializedTemplate()->getTemplateParameters()));
}
- // Similarly for member class template specializations.
- } else if (const ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
- Linkage ArgLinkage =
- getLinkageForTemplateArgumentList(Spec->getTemplateArgs());
- Linkage ParamLinkage =
- getLinkageForTemplateParameterList(
- Spec->getSpecializedTemplate()->getTemplateParameters());
- return minLinkage(ArgLinkage, ParamLinkage);
+ // Static data members.
+ } else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // Modify the variable's linkage by its type, but ignore the
+ // type's visibility unless it's a definition.
+ LVPair TypeLV = VD->getType()->getLinkageAndVisibility();
+ if (TypeLV.first != ExternalLinkage)
+ LV.mergeLinkage(UniqueExternalLinkage);
+ if (!LV.visibilityExplicit())
+ LV.mergeVisibility(TypeLV.second);
}
- return ExternalLinkage;
+ F.ConsiderGlobalVisibility &= !LV.visibilityExplicit();
+
+ // Apply -fvisibility if desired.
+ if (F.ConsiderGlobalVisibility && LV.visibility() != HiddenVisibility) {
+ LV.mergeVisibility(D->getASTContext().getLangOptions().getVisibilityMode());
+ }
+
+ return LV;
+}
+
+static void clearLinkageForClass(const CXXRecordDecl *record) {
+ for (CXXRecordDecl::decl_iterator
+ i = record->decls_begin(), e = record->decls_end(); i != e; ++i) {
+ Decl *child = *i;
+ if (isa<NamedDecl>(child))
+ cast<NamedDecl>(child)->ClearLinkageCache();
+ }
+}
+
+void NamedDecl::ClearLinkageCache() {
+ // Note that we can't skip clearing the linkage of children just
+ // because the parent doesn't have cached linkage: we don't cache
+ // when computing linkage for parent contexts.
+
+ HasCachedLinkage = 0;
+
+ // If we're changing the linkage of a class, we need to reset the
+ // linkage of child declarations, too.
+ if (const CXXRecordDecl *record = dyn_cast<CXXRecordDecl>(this))
+ clearLinkageForClass(record);
+
+ if (ClassTemplateDecl *temp =
+ dyn_cast<ClassTemplateDecl>(const_cast<NamedDecl*>(this))) {
+ // Clear linkage for the template pattern.
+ CXXRecordDecl *record = temp->getTemplatedDecl();
+ record->HasCachedLinkage = 0;
+ clearLinkageForClass(record);
+
+ // We need to clear linkage for specializations, too.
+ for (ClassTemplateDecl::spec_iterator
+ i = temp->spec_begin(), e = temp->spec_end(); i != e; ++i)
+ i->ClearLinkageCache();
+ }
+
+ // Clear cached linkage for function template decls, too.
+ if (FunctionTemplateDecl *temp =
+ dyn_cast<FunctionTemplateDecl>(const_cast<NamedDecl*>(this)))
+ for (FunctionTemplateDecl::spec_iterator
+ i = temp->spec_begin(), e = temp->spec_end(); i != e; ++i)
+ i->ClearLinkageCache();
+
}
Linkage NamedDecl::getLinkage() const {
+ if (HasCachedLinkage) {
+ assert(Linkage(CachedLinkage) ==
+ getLVForDecl(this, LVFlags::CreateOnlyDeclLinkage()).linkage());
+ return Linkage(CachedLinkage);
+ }
+
+ CachedLinkage = getLVForDecl(this,
+ LVFlags::CreateOnlyDeclLinkage()).linkage();
+ HasCachedLinkage = 1;
+ return Linkage(CachedLinkage);
+}
+LinkageInfo NamedDecl::getLinkageAndVisibility() const {
+ LinkageInfo LI = getLVForDecl(this, LVFlags());
+ assert(!HasCachedLinkage || Linkage(CachedLinkage) == LI.linkage());
+ HasCachedLinkage = 1;
+ CachedLinkage = LI.linkage();
+ return LI;
+}
+
+static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
// Objective-C: treat all Objective-C declarations as having external
// linkage.
- switch (getKind()) {
+ switch (D->getKind()) {
default:
break;
+ case Decl::TemplateTemplateParm: // count these as external
+ case Decl::NonTypeTemplateParm:
case Decl::ObjCAtDefsField:
case Decl::ObjCCategory:
case Decl::ObjCCategoryImpl:
- case Decl::ObjCClass:
case Decl::ObjCCompatibleAlias:
case Decl::ObjCForwardProtocol:
case Decl::ObjCImplementation:
- case Decl::ObjCInterface:
- case Decl::ObjCIvar:
case Decl::ObjCMethod:
case Decl::ObjCProperty:
case Decl::ObjCPropertyImpl:
case Decl::ObjCProtocol:
- return ExternalLinkage;
+ return LinkageInfo::external();
}
// Handle linkage for namespace-scope names.
- if (getDeclContext()->getRedeclContext()->isFileContext())
- if (Linkage L = getLinkageForNamespaceScopeDecl(this))
- return L;
+ if (D->getDeclContext()->getRedeclContext()->isFileContext())
+ return getLVForNamespaceScopeDecl(D, Flags);
// C++ [basic.link]p5:
// In addition, a member function, static data member, a named
@@ -356,8 +688,8 @@ Linkage NamedDecl::getLinkage() const {
// that the class or enumeration has the typedef name for linkage
// purposes (7.1.3), has external linkage if the name of the class
// has external linkage.
- if (getDeclContext()->isRecord())
- return getLinkageForClassMember(this);
+ if (D->getDeclContext()->isRecord())
+ return getLVForClassMember(D, Flags);
// C++ [basic.link]p6:
// The name of a function declared in block scope and the name of
@@ -370,36 +702,54 @@ Linkage NamedDecl::getLinkage() const {
// one such matching entity, the program is ill-formed. Otherwise,
// if no matching entity is found, the block scope entity receives
// external linkage.
- if (getLexicalDeclContext()->isFunctionOrMethod()) {
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this)) {
- if (Function->getPreviousDeclaration())
- if (Linkage L = Function->getPreviousDeclaration()->getLinkage())
- return L;
-
+ if (D->getLexicalDeclContext()->isFunctionOrMethod()) {
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
if (Function->isInAnonymousNamespace())
- return UniqueExternalLinkage;
+ return LinkageInfo::uniqueExternal();
- return ExternalLinkage;
+ LinkageInfo LV;
+ if (Flags.ConsiderVisibilityAttributes) {
+ if (const VisibilityAttr *VA = GetExplicitVisibility(Function))
+ LV.setVisibility(GetVisibilityFromAttr(VA));
+ }
+
+ if (const FunctionDecl *Prev = Function->getPreviousDeclaration()) {
+ LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
+
+ return LV;
}
- if (const VarDecl *Var = dyn_cast<VarDecl>(this))
+ if (const VarDecl *Var = dyn_cast<VarDecl>(D))
if (Var->getStorageClass() == SC_Extern ||
Var->getStorageClass() == SC_PrivateExtern) {
- if (Var->getPreviousDeclaration())
- if (Linkage L = Var->getPreviousDeclaration()->getLinkage())
- return L;
-
if (Var->isInAnonymousNamespace())
- return UniqueExternalLinkage;
+ return LinkageInfo::uniqueExternal();
+
+ LinkageInfo LV;
+ if (Var->getStorageClass() == SC_PrivateExtern)
+ LV.setVisibility(HiddenVisibility);
+ else if (Flags.ConsiderVisibilityAttributes) {
+ if (const VisibilityAttr *VA = GetExplicitVisibility(Var))
+ LV.setVisibility(GetVisibilityFromAttr(VA));
+ }
+
+ if (const VarDecl *Prev = Var->getPreviousDeclaration()) {
+ LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
- return ExternalLinkage;
+ return LV;
}
}
// C++ [basic.link]p6:
// Names not covered by these rules have no linkage.
- return NoLinkage;
- }
+ return LinkageInfo::none();
+}
std::string NamedDecl::getQualifiedNameAsString() const {
return getQualifiedNameAsString(getASTContext().getLangOptions());
@@ -430,8 +780,8 @@ std::string NamedDecl::getQualifiedNameAsString(const PrintingPolicy &P) const {
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
std::string TemplateArgsStr
= TemplateSpecializationType::PrintTemplateArgumentList(
- TemplateArgs.getFlatArgumentList(),
- TemplateArgs.flat_size(),
+ TemplateArgs.data(),
+ TemplateArgs.size(),
P);
OS << Spec->getName() << TemplateArgsStr;
} else if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(*I)) {
@@ -514,6 +864,10 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD) const {
return cast<UsingShadowDecl>(this)->getTargetDecl() ==
cast<UsingShadowDecl>(OldD)->getTargetDecl();
+ if (isa<UsingDecl>(this) && isa<UsingDecl>(OldD))
+ return cast<UsingDecl>(this)->getTargetNestedNameDecl() ==
+ cast<UsingDecl>(OldD)->getTargetNestedNameDecl();
+
// For non-function declarations, if the declarations are of the
// same kind then this must be a redeclaration, or semantic analysis
// would not have given us the new declaration.
@@ -545,7 +899,7 @@ bool NamedDecl::isCXXInstanceMember() const {
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
- if (isa<FieldDecl>(D))
+ if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D))
return true;
if (isa<CXXMethodDecl>(D))
return cast<CXXMethodDecl>(D)->isInstance();
@@ -655,6 +1009,14 @@ VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
return new (C) VarDecl(Var, DC, L, Id, T, TInfo, S, SCAsWritten);
}
+void VarDecl::setStorageClass(StorageClass SC) {
+ assert(isLegalForVariable(SC));
+ if (getStorageClass() != SC)
+ ClearLinkageCache();
+
+ SClass = SC;
+}
+
SourceLocation VarDecl::getInnerLocStart() const {
SourceLocation Start = getTypeSpecStartLoc();
if (Start.isInvalid())
@@ -785,6 +1147,17 @@ VarDecl *VarDecl::getDefinition() {
return 0;
}
+VarDecl::DefinitionKind VarDecl::hasDefinition() const {
+ DefinitionKind Kind = DeclarationOnly;
+
+ const VarDecl *First = getFirstDeclaration();
+ for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
+ I != E; ++I)
+ Kind = std::max(Kind, (*I)->isThisDeclarationADefinition());
+
+ return Kind;
+}
+
const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
redecl_iterator I = redecls_begin(), E = redecls_end();
while (I != E && !I->getInit())
@@ -883,15 +1256,14 @@ Expr *ParmVarDecl::getDefaultArg() {
"Default argument is not yet instantiated!");
Expr *Arg = getInit();
- if (CXXExprWithTemporaries *E = dyn_cast_or_null<CXXExprWithTemporaries>(Arg))
+ if (ExprWithCleanups *E = dyn_cast_or_null<ExprWithCleanups>(Arg))
return E->getSubExpr();
return Arg;
}
unsigned ParmVarDecl::getNumDefaultArgTemporaries() const {
- if (const CXXExprWithTemporaries *E =
- dyn_cast<CXXExprWithTemporaries>(getInit()))
+ if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(getInit()))
return E->getNumTemporaries();
return 0;
@@ -901,7 +1273,7 @@ CXXTemporary *ParmVarDecl::getDefaultArgTemporary(unsigned i) {
assert(getNumDefaultArgTemporaries() &&
"Default arguments does not have any temporaries!");
- CXXExprWithTemporaries *E = cast<CXXExprWithTemporaries>(getInit());
+ ExprWithCleanups *E = cast<ExprWithCleanups>(getInit());
return E->getTemporary(i);
}
@@ -915,6 +1287,10 @@ SourceRange ParmVarDecl::getDefaultArgRange() const {
return SourceRange();
}
+bool ParmVarDecl::isParameterPack() const {
+ return isa<PackExpansionType>(getType());
+}
+
//===----------------------------------------------------------------------===//
// FunctionDecl Implementation
//===----------------------------------------------------------------------===//
@@ -926,8 +1302,8 @@ void FunctionDecl::getNameForDiagnostic(std::string &S,
const TemplateArgumentList *TemplateArgs = getTemplateSpecializationArgs();
if (TemplateArgs)
S += TemplateSpecializationType::PrintTemplateArgumentList(
- TemplateArgs->getFlatArgumentList(),
- TemplateArgs->flat_size(),
+ TemplateArgs->data(),
+ TemplateArgs->size(),
Policy);
}
@@ -966,6 +1342,13 @@ void FunctionDecl::setBody(Stmt *B) {
EndRangeLoc = B->getLocEnd();
}
+void FunctionDecl::setPure(bool P) {
+ IsPure = P;
+ if (P)
+ if (CXXRecordDecl *Parent = dyn_cast<CXXRecordDecl>(getDeclContext()))
+ Parent->markedVirtualFunctionPure();
+}
+
bool FunctionDecl::isMain() const {
ASTContext &Context = getASTContext();
return !Context.getLangOptions().Freestanding &&
@@ -994,7 +1377,7 @@ bool FunctionDecl::isExternC() const {
break;
}
- return false;
+ return isMain();
}
bool FunctionDecl::isGlobal() const {
@@ -1027,6 +1410,9 @@ FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
assert((!PrevDecl || PrevFunTmpl) && "Function/function template mismatch");
FunTmpl->setPreviousDeclaration(PrevFunTmpl);
}
+
+ if (PrevDecl->IsInline)
+ IsInline = true;
}
const FunctionDecl *FunctionDecl::getCanonicalDecl() const {
@@ -1037,6 +1423,14 @@ FunctionDecl *FunctionDecl::getCanonicalDecl() {
return getFirstDeclaration();
}
+void FunctionDecl::setStorageClass(StorageClass SC) {
+ assert(isLegalForFunction(SC));
+ if (getStorageClass() != SC)
+ ClearLinkageCache();
+
+ SClass = SC;
+}
+
/// \brief Returns a value indicating whether this function
/// corresponds to a builtin function.
///
@@ -1083,7 +1477,7 @@ unsigned FunctionDecl::getBuiltinID() const {
/// getNumParams - Return the number of parameters this function must have
-/// based on its FunctionType. This is the length of the PararmInfo array
+/// based on its FunctionType. This is the length of the ParamInfo array
/// after it has been created.
unsigned FunctionDecl::getNumParams() const {
const FunctionType *FT = getType()->getAs<FunctionType>();
@@ -1093,13 +1487,14 @@ unsigned FunctionDecl::getNumParams() const {
}
-void FunctionDecl::setParams(ParmVarDecl **NewParamInfo, unsigned NumParams) {
+void FunctionDecl::setParams(ASTContext &C,
+ ParmVarDecl **NewParamInfo, unsigned NumParams) {
assert(ParamInfo == 0 && "Already has param info!");
assert(NumParams == getNumParams() && "Parameter count mismatch!");
// Zero params -> null pointer.
if (NumParams) {
- void *Mem = getASTContext().Allocate(sizeof(ParmVarDecl*)*NumParams);
+ void *Mem = C.Allocate(sizeof(ParmVarDecl*)*NumParams);
ParamInfo = new (Mem) ParmVarDecl*[NumParams];
memcpy(ParamInfo, NewParamInfo, sizeof(ParmVarDecl*)*NumParams);
@@ -1113,25 +1508,40 @@ void FunctionDecl::setParams(ParmVarDecl **NewParamInfo, unsigned NumParams) {
/// getMinRequiredArguments - Returns the minimum number of arguments
/// needed to call this function. This may be fewer than the number of
/// function parameters, if some of the parameters have default
-/// arguments (in C++).
+/// arguments (in C++) or the last parameter is a parameter pack.
unsigned FunctionDecl::getMinRequiredArguments() const {
- unsigned NumRequiredArgs = getNumParams();
- while (NumRequiredArgs > 0
- && getParamDecl(NumRequiredArgs-1)->hasDefaultArg())
+ if (!getASTContext().getLangOptions().CPlusPlus)
+ return getNumParams();
+
+ unsigned NumRequiredArgs = getNumParams();
+
+ // If the last parameter is a parameter pack, we don't need an argument for
+ // it.
+ if (NumRequiredArgs > 0 &&
+ getParamDecl(NumRequiredArgs - 1)->isParameterPack())
+ --NumRequiredArgs;
+
+ // If this parameter has a default argument, we don't need an argument for
+ // it.
+ while (NumRequiredArgs > 0 &&
+ getParamDecl(NumRequiredArgs-1)->hasDefaultArg())
--NumRequiredArgs;
+ // We might have parameter packs before the end. These can't be deduced,
+ // but they can still handle multiple arguments.
+ unsigned ArgIdx = NumRequiredArgs;
+ while (ArgIdx > 0) {
+ if (getParamDecl(ArgIdx - 1)->isParameterPack())
+ NumRequiredArgs = ArgIdx;
+
+ --ArgIdx;
+ }
+
return NumRequiredArgs;
}
bool FunctionDecl::isInlined() const {
- // FIXME: This is not enough. Consider:
- //
- // inline void f();
- // void f() { }
- //
- // f is inlined, but does not have inline specified.
- // To fix this we should add an 'inline' flag to FunctionDecl.
- if (isInlineSpecified())
+ if (IsInline)
return true;
if (isa<CXXMethodDecl>(this)) {
@@ -1185,20 +1595,22 @@ bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
ASTContext &Context = getASTContext();
if (!Context.getLangOptions().C99 || hasAttr<GNUInlineAttr>()) {
- // GNU inline semantics. Based on a number of examples, we came up with the
- // following heuristic: if the "inline" keyword is present on a
- // declaration of the function but "extern" is not present on that
- // declaration, then the symbol is externally visible. Otherwise, the GNU
- // "extern inline" semantics applies and the symbol is not externally
- // visible.
+ // If it's not the case that both 'inline' and 'extern' are
+ // specified on the definition, then this inline definition is
+ // externally visible.
+ if (!(isInlineSpecified() && getStorageClassAsWritten() == SC_Extern))
+ return true;
+
+ // If any declaration is 'inline' but not 'extern', then this definition
+ // is externally visible.
for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
Redecl != RedeclEnd;
++Redecl) {
- if (Redecl->isInlineSpecified() && Redecl->getStorageClass() != SC_Extern)
+ if (Redecl->isInlineSpecified() &&
+ Redecl->getStorageClassAsWritten() != SC_Extern)
return true;
- }
+ }
- // GNU "extern inline" semantics; no externally visible symbol.
return false;
}
@@ -1271,12 +1683,13 @@ MemberSpecializationInfo *FunctionDecl::getMemberSpecializationInfo() const {
}
void
-FunctionDecl::setInstantiationOfMemberFunction(FunctionDecl *FD,
+FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C,
+ FunctionDecl *FD,
TemplateSpecializationKind TSK) {
assert(TemplateOrSpecialization.isNull() &&
"Member function is already a specialization");
MemberSpecializationInfo *Info
- = new (getASTContext()) MemberSpecializationInfo(FD, TSK);
+ = new (C) MemberSpecializationInfo(FD, TSK);
TemplateOrSpecialization = Info;
}
@@ -1362,7 +1775,8 @@ FunctionDecl::getTemplateSpecializationArgsAsWritten() const {
}
void
-FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
+FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
+ FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
void *InsertPos,
TemplateSpecializationKind TSK,
@@ -1373,14 +1787,10 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
FunctionTemplateSpecializationInfo *Info
= TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
if (!Info)
- Info = new (getASTContext()) FunctionTemplateSpecializationInfo;
-
- Info->Function = this;
- Info->Template.setPointer(Template);
- Info->Template.setInt(TSK - 1);
- Info->TemplateArguments = TemplateArgs;
- Info->TemplateArgumentsAsWritten = TemplateArgsAsWritten;
- Info->PointOfInstantiation = PointOfInstantiation;
+ Info = FunctionTemplateSpecializationInfo::Create(C, this, Template, TSK,
+ TemplateArgs,
+ TemplateArgsAsWritten,
+ PointOfInstantiation);
TemplateOrSpecialization = Info;
// Insert this function template specialization into the set of known
@@ -1401,28 +1811,6 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
}
void
-FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
- unsigned NumTemplateArgs,
- const TemplateArgument *TemplateArgs,
- TemplateSpecializationKind TSK,
- unsigned NumTemplateArgsAsWritten,
- TemplateArgumentLoc *TemplateArgsAsWritten,
- SourceLocation LAngleLoc,
- SourceLocation RAngleLoc,
- SourceLocation PointOfInstantiation) {
- ASTContext &Ctx = getASTContext();
- TemplateArgumentList *TemplArgs
- = new (Ctx) TemplateArgumentList(Ctx, TemplateArgs, NumTemplateArgs);
- TemplateArgumentListInfo *TemplArgsInfo
- = new (Ctx) TemplateArgumentListInfo(LAngleLoc, RAngleLoc);
- for (unsigned i=0; i != NumTemplateArgsAsWritten; ++i)
- TemplArgsInfo->addArgument(TemplateArgsAsWritten[i]);
-
- setFunctionTemplateSpecialization(Template, TemplArgs, /*InsertPos=*/0, TSK,
- TemplArgsInfo, PointOfInstantiation);
-}
-
-void
FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context,
const UnresolvedSetImpl &Templates,
const TemplateArgumentListInfo &TemplateArgs) {
@@ -1533,8 +1921,8 @@ bool FunctionDecl::isOutOfLine() const {
// FieldDecl Implementation
//===----------------------------------------------------------------------===//
-FieldDecl *FieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
- IdentifierInfo *Id, QualType T,
+FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, Expr *BW, bool Mutable) {
return new (C) FieldDecl(Decl::Field, DC, L, Id, T, TInfo, BW, Mutable);
}
@@ -1549,6 +1937,25 @@ bool FieldDecl::isAnonymousStructOrUnion() const {
return false;
}
+unsigned FieldDecl::getFieldIndex() const {
+ if (CachedFieldIndex) return CachedFieldIndex - 1;
+
+ unsigned index = 0;
+ RecordDecl::field_iterator
+ i = getParent()->field_begin(), e = getParent()->field_end();
+ while (true) {
+ assert(i != e && "failed to find field in parent!");
+ if (*i == this)
+ break;
+
+ ++i;
+ ++index;
+ }
+
+ CachedFieldIndex = index + 1;
+ return index;
+}
+
//===----------------------------------------------------------------------===//
// TagDecl Implementation
//===----------------------------------------------------------------------===//
@@ -1569,7 +1976,8 @@ TagDecl* TagDecl::getCanonicalDecl() {
void TagDecl::setTypedefForAnonDecl(TypedefDecl *TDD) {
TypedefDeclOrQualifier = TDD;
if (TypeForDecl)
- TypeForDecl->ClearLinkageCache();
+ const_cast<Type*>(TypeForDecl)->ClearLinkageCache();
+ ClearLinkageCache();
}
void TagDecl::startDefinition() {
@@ -1591,11 +1999,16 @@ void TagDecl::completeDefinition() {
IsDefinition = true;
IsBeingDefined = false;
+
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->CompletedTagDefinition(this);
}
TagDecl* TagDecl::getDefinition() const {
if (isDefinition())
return const_cast<TagDecl *>(this);
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this))
+ return CXXRD->getDefinition();
for (redecl_iterator R = redecls_begin(), REnd = redecls_end();
R != REnd; ++R)
@@ -1631,14 +2044,17 @@ void TagDecl::setQualifierInfo(NestedNameSpecifier *Qualifier,
EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
IdentifierInfo *Id, SourceLocation TKL,
- EnumDecl *PrevDecl) {
- EnumDecl *Enum = new (C) EnumDecl(DC, L, Id, PrevDecl, TKL);
+ EnumDecl *PrevDecl, bool IsScoped,
+ bool IsScopedUsingClassTag, bool IsFixed) {
+ EnumDecl *Enum = new (C) EnumDecl(DC, L, Id, PrevDecl, TKL,
+ IsScoped, IsScopedUsingClassTag, IsFixed);
C.getTypeDeclType(Enum, PrevDecl);
return Enum;
}
EnumDecl *EnumDecl::Create(ASTContext &C, EmptyShell Empty) {
- return new (C) EnumDecl(0, SourceLocation(), 0, 0, SourceLocation());
+ return new (C) EnumDecl(0, SourceLocation(), 0, 0, SourceLocation(),
+ false, false, false);
}
void EnumDecl::completeDefinition(QualType NewType,
@@ -1646,7 +2062,8 @@ void EnumDecl::completeDefinition(QualType NewType,
unsigned NumPositiveBits,
unsigned NumNegativeBits) {
assert(!isDefinition() && "Cannot redefine enums!");
- IntegerType = NewType;
+ if (!IntegerType)
+ IntegerType = NewType.getTypePtr();
PromotionType = NewPromotionType;
setNumPositiveBits(NumPositiveBits);
setNumNegativeBits(NumNegativeBits);
@@ -1664,10 +2081,11 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, DeclContext *DC, SourceLocation L,
HasFlexibleArrayMember = false;
AnonymousStructOrUnion = false;
HasObjectMember = false;
+ LoadedFieldsFromExternalStorage = false;
assert(classof(static_cast<Decl*>(this)) && "Invalid Kind!");
}
-RecordDecl *RecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
+RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id,
SourceLocation TKL, RecordDecl* PrevDecl) {
@@ -1676,7 +2094,7 @@ RecordDecl *RecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
return R;
}
-RecordDecl *RecordDecl::Create(ASTContext &C, EmptyShell Empty) {
+RecordDecl *RecordDecl::Create(const ASTContext &C, EmptyShell Empty) {
return new (C) RecordDecl(Record, TTK_Struct, 0, SourceLocation(), 0, 0,
SourceLocation());
}
@@ -1686,6 +2104,13 @@ bool RecordDecl::isInjectedClassName() const {
cast<RecordDecl>(getDeclContext())->getDeclName() == getDeclName();
}
+RecordDecl::field_iterator RecordDecl::field_begin() const {
+ if (hasExternalLexicalStorage() && !LoadedFieldsFromExternalStorage)
+ LoadFieldsFromExternalStorage();
+
+ return field_iterator(decl_iterator(FirstDecl));
+}
+
/// completeDefinition - Notes that the definition of this type is now
/// complete.
void RecordDecl::completeDefinition() {
@@ -1693,15 +2118,29 @@ void RecordDecl::completeDefinition() {
TagDecl::completeDefinition();
}
-ValueDecl *RecordDecl::getAnonymousStructOrUnionObject() {
- // Force the decl chain to come into existence properly.
- if (!getNextDeclInContext()) getParent()->decls_begin();
+void RecordDecl::LoadFieldsFromExternalStorage() const {
+ ExternalASTSource *Source = getASTContext().getExternalSource();
+ assert(hasExternalLexicalStorage() && Source && "No external storage?");
+
+ // Notify that we have a RecordDecl doing some initialization.
+ ExternalASTSource::Deserializing TheFields(Source);
- assert(isAnonymousStructOrUnion());
- ValueDecl *D = cast<ValueDecl>(getNextDeclInContext());
- assert(D->getType()->isRecordType());
- assert(D->getType()->getAs<RecordType>()->getDecl() == this);
- return D;
+ llvm::SmallVector<Decl*, 64> Decls;
+ if (Source->FindExternalLexicalDeclsBy<FieldDecl>(this, Decls))
+ return;
+
+#ifndef NDEBUG
+ // Check that all decls we got were FieldDecls.
+ for (unsigned i=0, e=Decls.size(); i != e; ++i)
+ assert(isa<FieldDecl>(Decls[i]));
+#endif
+
+ LoadedFieldsFromExternalStorage = true;
+
+ if (Decls.empty())
+ return;
+
+ llvm::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls);
}
//===----------------------------------------------------------------------===//
@@ -1721,10 +2160,31 @@ void BlockDecl::setParams(ParmVarDecl **NewParamInfo,
}
}
-unsigned BlockDecl::getNumParams() const {
- return NumParams;
+void BlockDecl::setCaptures(ASTContext &Context,
+ const Capture *begin,
+ const Capture *end,
+ bool capturesCXXThis) {
+ CapturesCXXThis = capturesCXXThis;
+
+ if (begin == end) {
+ NumCaptures = 0;
+ Captures = 0;
+ return;
+ }
+
+ NumCaptures = end - begin;
+
+ // Avoid new Capture[] because we don't want to provide a default
+ // constructor.
+ size_t allocationSize = NumCaptures * sizeof(Capture);
+ void *buffer = Context.Allocate(allocationSize, /*alignment*/sizeof(void*));
+ memcpy(buffer, begin, allocationSize);
+ Captures = static_cast<Capture*>(buffer);
}
+SourceRange BlockDecl::getSourceRange() const {
+ return SourceRange(getLocation(), Body? Body->getLocEnd() : getLocation());
+}
//===----------------------------------------------------------------------===//
// Other Decl Allocation/Deallocation Method Implementations
@@ -1734,11 +2194,22 @@ TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
return new (C) TranslationUnitDecl(C);
}
+LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *II) {
+ return new (C) LabelDecl(DC, L, II, 0);
+}
+
+
NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id) {
return new (C) NamespaceDecl(DC, L, Id);
}
+NamespaceDecl *NamespaceDecl::getNextNamespace() {
+ return dyn_cast_or_null<NamespaceDecl>(
+ NextNamespace.get(getASTContext().getExternalSource()));
+}
+
ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id, QualType T) {
return new (C) ImplicitParamDecl(ImplicitParam, DC, L, Id, T);
@@ -1748,9 +2219,10 @@ FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
StorageClass S, StorageClass SCAsWritten,
- bool isInline, bool hasWrittenPrototype) {
+ bool isInlineSpecified,
+ bool hasWrittenPrototype) {
FunctionDecl *New = new (C) FunctionDecl(Function, DC, NameInfo, T, TInfo,
- S, SCAsWritten, isInline);
+ S, SCAsWritten, isInlineSpecified);
New->HasWrittenPrototype = hasWrittenPrototype;
return New;
}
@@ -1766,6 +2238,13 @@ EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
return new (C) EnumConstantDecl(CD, L, Id, T, E, V);
}
+IndirectFieldDecl *
+IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id, QualType T, NamedDecl **CH,
+ unsigned CHS) {
+ return new (C) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
+}
+
SourceRange EnumConstantDecl::getSourceRange() const {
SourceLocation End = getLocation();
if (Init)
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
index 0b958fe..be379d5 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/Type.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/AST/ASTMutationListener.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -109,10 +110,22 @@ void Decl::add(Kind k) {
bool Decl::isTemplateParameterPack() const {
if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(this))
return TTP->isParameterPack();
-
+ if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(this))
+ return NTTP->isParameterPack();
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(this))
+ return TTP->isParameterPack();
return false;
}
+bool Decl::isParameterPack() const {
+ if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(this))
+ return Parm->isParameterPack();
+
+ return isTemplateParameterPack();
+}
+
bool Decl::isFunctionOrFunctionTemplate() const {
if (const UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(this))
return UD->getTargetDecl()->isFunctionOrFunctionTemplate();
@@ -210,6 +223,10 @@ ASTContext &Decl::getASTContext() const {
return getTranslationUnitDecl()->getASTContext();
}
+ASTMutationListener *Decl::getASTMutationListener() const {
+ return getASTContext().getASTMutationListener();
+}
+
bool Decl::isUsed(bool CheckUsedAttr) const {
if (Used)
return true;
@@ -243,6 +260,10 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ObjCMethod:
case ObjCProperty:
return IDNS_Ordinary;
+ case Label:
+ return IDNS_Label;
+ case IndirectField:
+ return IDNS_Ordinary | IDNS_Member;
case ObjCCompatibleAlias:
case ObjCInterface:
@@ -416,27 +437,34 @@ SourceLocation Decl::getBodyRBrace() const {
return SourceLocation();
}
-#ifndef NDEBUG
void Decl::CheckAccessDeclContext() const {
- // FIXME: Disable this until rdar://8146294 "access specifier for inner class
- // templates is not set or checked" is fixed.
- return;
+#ifndef NDEBUG
// Suppress this check if any of the following hold:
// 1. this is the translation unit (and thus has no parent)
// 2. this is a template parameter (and thus doesn't belong to its context)
- // 3. the context is not a record
- // 4. it's invalid
+ // 3. this is a non-type template parameter
+ // 4. the context is not a record
+ // 5. it's invalid
+ // 6. it's a C++0x static_assert.
if (isa<TranslationUnitDecl>(this) ||
isa<TemplateTypeParmDecl>(this) ||
+ isa<NonTypeTemplateParmDecl>(this) ||
!isa<CXXRecordDecl>(getDeclContext()) ||
- isInvalidDecl())
+ isInvalidDecl() ||
+ isa<StaticAssertDecl>(this) ||
+ // FIXME: a ParmVarDecl can have ClassTemplateSpecialization
+ // as DeclContext (?).
+ isa<ParmVarDecl>(this) ||
+ // FIXME: a ClassTemplateSpecialization or CXXRecordDecl can have
+ // AS_none as access specifier.
+ isa<CXXRecordDecl>(this))
return;
assert(Access != AS_none &&
"Access specifier is AS_none inside a record decl");
+#endif
}
-#endif
//===----------------------------------------------------------------------===//
// DeclContext Implementation
@@ -509,15 +537,24 @@ bool DeclContext::isDependentContext() const {
bool DeclContext::isTransparentContext() const {
if (DeclKind == Decl::Enum)
- return true; // FIXME: Check for C++0x scoped enums
+ return !cast<EnumDecl>(this)->isScoped();
else if (DeclKind == Decl::LinkageSpec)
return true;
- else if (DeclKind >= Decl::firstRecord && DeclKind <= Decl::lastRecord)
- return cast<RecordDecl>(this)->isAnonymousStructOrUnion();
return false;
}
+bool DeclContext::isExternCContext() const {
+ const DeclContext *DC = this;
+ while (DC->DeclKind != Decl::TranslationUnit) {
+ if (DC->DeclKind == Decl::LinkageSpec)
+ return cast<LinkageSpecDecl>(DC)->getLanguage()
+ == LinkageSpecDecl::lang_c;
+ DC = DC->getParent();
+ }
+ return false;
+}
+
bool DeclContext::Encloses(const DeclContext *DC) const {
if (getPrimaryContext() != this)
return getPrimaryContext()->Encloses(DC);
@@ -592,6 +629,24 @@ DeclContext *DeclContext::getNextContext() {
}
}
+std::pair<Decl *, Decl *>
+DeclContext::BuildDeclChain(const llvm::SmallVectorImpl<Decl*> &Decls) {
+ // Build up a chain of declarations via the Decl::NextDeclInContext field.
+ Decl *FirstNewDecl = 0;
+ Decl *PrevDecl = 0;
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ Decl *D = Decls[I];
+ if (PrevDecl)
+ PrevDecl->NextDeclInContext = D;
+ else
+ FirstNewDecl = D;
+
+ PrevDecl = D;
+ }
+
+ return std::make_pair(FirstNewDecl, PrevDecl);
+}
+
/// \brief Load the declarations within this lexical storage from an
/// external source.
void
@@ -612,26 +667,22 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
if (Decls.empty())
return;
- // Resolve all of the declaration IDs into declarations, building up
- // a chain of declarations via the Decl::NextDeclInContext field.
- Decl *FirstNewDecl = 0;
- Decl *PrevDecl = 0;
- for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
- Decl *D = Decls[I];
- if (PrevDecl)
- PrevDecl->NextDeclInContext = D;
- else
- FirstNewDecl = D;
-
- PrevDecl = D;
- }
+ // We may have already loaded just the fields of this record, in which case
+ // don't add the decls, just replace the FirstDecl/LastDecl chain.
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(this))
+ if (RD->LoadedFieldsFromExternalStorage) {
+ llvm::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls);
+ return;
+ }
// Splice the newly-read declarations into the beginning of the list
// of declarations.
- PrevDecl->NextDeclInContext = FirstDecl;
- FirstDecl = FirstNewDecl;
+ Decl *ExternalFirst, *ExternalLast;
+ llvm::tie(ExternalFirst, ExternalLast) = BuildDeclChain(Decls);
+ ExternalLast->NextDeclInContext = FirstDecl;
+ FirstDecl = ExternalFirst;
if (!LastDecl)
- LastDecl = PrevDecl;
+ LastDecl = ExternalLast;
}
DeclContext::lookup_result
@@ -771,6 +822,11 @@ void DeclContext::addHiddenDecl(Decl *D) {
} else {
FirstDecl = LastDecl = D;
}
+
+ // Notify a C++ record declaration that we've added a member, so it can
+ // update it's class-specific state.
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
+ Record->addedMember(D);
}
void DeclContext::addDecl(Decl *D) {
@@ -911,6 +967,12 @@ void DeclContext::makeDeclVisibleInContext(NamedDecl *D, bool Recoverable) {
// parent context, too. This operation is recursive.
if (isTransparentContext() || isInlineNamespace())
getParent()->makeDeclVisibleInContext(D, Recoverable);
+
+ Decl *DCAsDecl = cast<Decl>(this);
+ // Notify that a decl was made visible unless it's a Tag being defined.
+ if (!(isa<TagDecl>(DCAsDecl) && cast<TagDecl>(DCAsDecl)->isBeingDefined()))
+ if (ASTMutationListener *L = DCAsDecl->getASTMutationListener())
+ L->AddedVisibleDecl(this, D);
}
void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D) {
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
index f2f0694..fba73f5 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
@@ -14,6 +14,8 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Expr.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/IdentifierTable.h"
@@ -34,8 +36,8 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
HasTrivialDestructor(true), ComputedVisibleConversions(false),
DeclaredDefaultConstructor(false), DeclaredCopyConstructor(false),
DeclaredCopyAssignment(false), DeclaredDestructor(false),
- Bases(0), NumBases(0), VBases(0), NumVBases(0),
- Definition(D), FirstFriend(0) {
+ NumBases(0), NumVBases(0), Bases(), VBases(),
+ Definition(D), FirstFriend(0) {
}
CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
@@ -46,9 +48,9 @@ CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
DefinitionData(PrevDecl ? PrevDecl->DefinitionData : 0),
TemplateOrInstantiation() { }
-CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
- SourceLocation L, IdentifierInfo *Id,
- SourceLocation TKL,
+CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
+ DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id, SourceLocation TKL,
CXXRecordDecl* PrevDecl,
bool DelayTypeCreation) {
CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TK, DC, L, Id,
@@ -60,7 +62,7 @@ CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
return R;
}
-CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, EmptyShell Empty) {
+CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, EmptyShell Empty) {
return new (C) CXXRecordDecl(CXXRecord, TTK_Struct, 0, SourceLocation(), 0, 0,
SourceLocation());
}
@@ -75,8 +77,8 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// no base classes [...].
data().Aggregate = false;
- if (data().Bases)
- C.Deallocate(data().Bases);
+ if (!data().Bases.isOffset() && data().NumBases > 0)
+ C.Deallocate(data().getBases());
// The set of seen virtual base types.
llvm::SmallPtrSet<CanQualType, 8> SeenVBaseTypes;
@@ -87,7 +89,7 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
data().Bases = new(C) CXXBaseSpecifier [NumBases];
data().NumBases = NumBases;
for (unsigned i = 0; i < NumBases; ++i) {
- data().Bases[i] = *Bases[i];
+ data().getBases()[i] = *Bases[i];
// Keep track of inherited vbases for this base class.
const CXXBaseSpecifier *Base = Bases[i];
QualType BaseType = Base->getType();
@@ -97,6 +99,25 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is [...] a class with [...] no base classes [...].
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class...
+ data().PlainOldData = false;
+
+ // A class with a non-empty base class is not empty.
+ // FIXME: Standard ref?
+ if (!BaseClassDecl->isEmpty())
+ data().Empty = false;
+
+ // C++ [class.virtual]p1:
+ // A class that declares or inherits a virtual function is called a
+ // polymorphic class.
+ if (BaseClassDecl->isPolymorphic())
+ data().Polymorphic = true;
+
// Now go through all virtual bases of this base and add them.
for (CXXRecordDecl::base_class_iterator VBase =
BaseClassDecl->vbases_begin(),
@@ -110,8 +131,50 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// Add this base if it's not already in the list.
if (SeenVBaseTypes.insert(C.getCanonicalType(BaseType)))
VBases.push_back(Base);
+
+ // C++0x [meta.unary.prop] is_empty:
+ // T is a class type, but not a union type, with ... no virtual base
+ // classes
+ data().Empty = false;
+
+ // C++ [class.ctor]p5:
+ // A constructor is trivial if its class has no virtual base classes.
+ data().HasTrivialConstructor = false;
+
+ // C++ [class.copy]p6:
+ // A copy constructor is trivial if its class has no virtual base
+ // classes.
+ data().HasTrivialCopyConstructor = false;
+
+ // C++ [class.copy]p11:
+ // A copy assignment operator is trivial if its class has no virtual
+ // base classes.
+ data().HasTrivialCopyAssignment = false;
+ } else {
+ // C++ [class.ctor]p5:
+ // A constructor is trivial if all the direct base classes of its
+ // class have trivial constructors.
+ if (!BaseClassDecl->hasTrivialConstructor())
+ data().HasTrivialConstructor = false;
+
+ // C++ [class.copy]p6:
+ // A copy constructor is trivial if all the direct base classes of its
+ // class have trivial copy constructors.
+ if (!BaseClassDecl->hasTrivialCopyConstructor())
+ data().HasTrivialCopyConstructor = false;
+
+ // C++ [class.copy]p11:
+ // A copy assignment operator is trivial if all the direct base classes
+ // of its class have trivial copy assignment operators.
+ if (!BaseClassDecl->hasTrivialCopyAssignment())
+ data().HasTrivialCopyAssignment = false;
}
-
+
+ // C++ [class.ctor]p3:
+ // A destructor is trivial if all the direct base classes of its class
+ // have trivial destructors.
+ if (!BaseClassDecl->hasTrivialDestructor())
+ data().HasTrivialDestructor = false;
}
if (VBases.empty())
@@ -130,10 +193,11 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
CXXRecordDecl *VBaseClassDecl = cast<CXXRecordDecl>(
VBaseTypeInfo->getType()->getAs<RecordType>()->getDecl());
- data().VBases[I] =
+ data().getVBases()[I] =
CXXBaseSpecifier(VBaseClassDecl->getSourceRange(), true,
VBaseClassDecl->getTagKind() == TTK_Class,
- VBases[I]->getAccessSpecifier(), VBaseTypeInfo);
+ VBases[I]->getAccessSpecifier(), VBaseTypeInfo,
+ SourceLocation());
}
}
@@ -150,7 +214,7 @@ bool CXXRecordDecl::hasAnyDependentBases() const {
return !forallBases(SawBase, 0);
}
-bool CXXRecordDecl::hasConstCopyConstructor(ASTContext &Context) const {
+bool CXXRecordDecl::hasConstCopyConstructor(const ASTContext &Context) const {
return getCopyConstructor(Context, Qualifiers::Const) != 0;
}
@@ -177,7 +241,7 @@ GetBestOverloadCandidateSimple(
return Cands[Best].first;
}
-CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context,
+CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(const ASTContext &Context,
unsigned TypeQuals) const{
QualType ClassType
= Context.getTypeDeclType(const_cast<CXXRecordDecl*>(this));
@@ -258,84 +322,268 @@ CXXMethodDecl *CXXRecordDecl::getCopyAssignmentOperator(bool ArgIsConst) const {
return GetBestOverloadCandidateSimple(Found);
}
-void
-CXXRecordDecl::addedConstructor(ASTContext &Context,
- CXXConstructorDecl *ConDecl) {
- assert(!ConDecl->isImplicit() && "addedConstructor - not for implicit decl");
- // Note that we have a user-declared constructor.
- data().UserDeclaredConstructor = true;
-
- // Note that we have no need of an implicitly-declared default constructor.
- data().DeclaredDefaultConstructor = true;
+void CXXRecordDecl::markedVirtualFunctionPure() {
+ // C++ [class.abstract]p2:
+ // A class is abstract if it has at least one pure virtual function.
+ data().Abstract = true;
+}
+
+void CXXRecordDecl::addedMember(Decl *D) {
+ // Ignore friends and invalid declarations.
+ if (D->getFriendObjectKind() || D->isInvalidDecl())
+ return;
- // C++ [dcl.init.aggr]p1:
- // An aggregate is an array or a class (clause 9) with no
- // user-declared constructors (12.1) [...].
- data().Aggregate = false;
+ FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ if (FunTmpl)
+ D = FunTmpl->getTemplatedDecl();
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isVirtual()) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with [...] no virtual functions.
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class...
+ data().PlainOldData = false;
+
+ // Virtual functions make the class non-empty.
+ // FIXME: Standard ref?
+ data().Empty = false;
+
+ // C++ [class.virtual]p1:
+ // A class that declares or inherits a virtual function is called a
+ // polymorphic class.
+ data().Polymorphic = true;
+
+ // None of the special member functions are trivial.
+ data().HasTrivialConstructor = false;
+ data().HasTrivialCopyConstructor = false;
+ data().HasTrivialCopyAssignment = false;
+ // FIXME: Destructor?
+ }
+ }
+
+ if (D->isImplicit()) {
+ // Notify that an implicit member was added after the definition
+ // was completed.
+ if (!isBeingDefined())
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXImplicitMember(data().Definition, D);
+
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ // If this is the implicit default constructor, note that we have now
+ // declared it.
+ if (Constructor->isDefaultConstructor())
+ data().DeclaredDefaultConstructor = true;
+ // If this is the implicit copy constructor, note that we have now
+ // declared it.
+ else if (Constructor->isCopyConstructor())
+ data().DeclaredCopyConstructor = true;
+ return;
+ }
- // C++ [class]p4:
- // A POD-struct is an aggregate class [...]
- data().PlainOldData = false;
+ if (isa<CXXDestructorDecl>(D)) {
+ data().DeclaredDestructor = true;
+ return;
+ }
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ // If this is the implicit copy constructor, note that we have now
+ // declared it.
+ // FIXME: Move constructors
+ if (Method->getOverloadedOperator() == OO_Equal)
+ data().DeclaredCopyAssignment = true;
+ return;
+ }
- // C++ [class.ctor]p5:
- // A constructor is trivial if it is an implicitly-declared default
- // constructor.
- // FIXME: C++0x: don't do this for "= default" default constructors.
- data().HasTrivialConstructor = false;
-
- // Note when we have a user-declared copy constructor, which will
- // suppress the implicit declaration of a copy constructor.
- if (ConDecl->isCopyConstructor()) {
- data().UserDeclaredCopyConstructor = true;
- data().DeclaredCopyConstructor = true;
+ // Any other implicit declarations are handled like normal declarations.
+ }
+
+ // Handle (user-declared) constructors.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ // Note that we have a user-declared constructor.
+ data().UserDeclaredConstructor = true;
+
+ // Note that we have no need of an implicitly-declared default constructor.
+ data().DeclaredDefaultConstructor = true;
- // C++ [class.copy]p6:
- // A copy constructor is trivial if it is implicitly declared.
- // FIXME: C++0x: don't do this for "= default" copy constructors.
- data().HasTrivialCopyConstructor = false;
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class (clause 9) with no
+ // user-declared constructors (12.1) [...].
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class [...]
+ data().PlainOldData = false;
+
+ // C++ [class.ctor]p5:
+ // A constructor is trivial if it is an implicitly-declared default
+ // constructor.
+ // FIXME: C++0x: don't do this for "= default" default constructors.
+ data().HasTrivialConstructor = false;
+
+ // Note when we have a user-declared copy constructor, which will
+ // suppress the implicit declaration of a copy constructor.
+ if (!FunTmpl && Constructor->isCopyConstructor()) {
+ data().UserDeclaredCopyConstructor = true;
+ data().DeclaredCopyConstructor = true;
+
+ // C++ [class.copy]p6:
+ // A copy constructor is trivial if it is implicitly declared.
+ // FIXME: C++0x: don't do this for "= default" copy constructors.
+ data().HasTrivialCopyConstructor = false;
+ }
+ return;
}
-}
-void CXXRecordDecl::addedAssignmentOperator(ASTContext &Context,
- CXXMethodDecl *OpDecl) {
- // We're interested specifically in copy assignment operators.
- const FunctionProtoType *FnType = OpDecl->getType()->getAs<FunctionProtoType>();
- assert(FnType && "Overloaded operator has no proto function type.");
- assert(FnType->getNumArgs() == 1 && !FnType->isVariadic());
-
- // Copy assignment operators must be non-templates.
- if (OpDecl->getPrimaryTemplate() || OpDecl->getDescribedFunctionTemplate())
+ // Handle (user-declared) destructors.
+ if (isa<CXXDestructorDecl>(D)) {
+ data().DeclaredDestructor = true;
+ data().UserDeclaredDestructor = true;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class that has [...] no user-defined
+ // destructor.
+ data().PlainOldData = false;
+
+ // C++ [class.dtor]p3:
+ // A destructor is trivial if it is an implicitly-declared destructor and
+ // [...].
+ //
+ // FIXME: C++0x: don't do this for "= default" destructors
+ data().HasTrivialDestructor = false;
+
return;
+ }
- QualType ArgType = FnType->getArgType(0);
- if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>())
- ArgType = Ref->getPointeeType();
-
- ArgType = ArgType.getUnqualifiedType();
- QualType ClassType = Context.getCanonicalType(Context.getTypeDeclType(
- const_cast<CXXRecordDecl*>(this)));
+ // Handle (user-declared) member functions.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->getOverloadedOperator() == OO_Equal) {
+ // We're interested specifically in copy assignment operators.
+ const FunctionProtoType *FnType
+ = Method->getType()->getAs<FunctionProtoType>();
+ assert(FnType && "Overloaded operator has no proto function type.");
+ assert(FnType->getNumArgs() == 1 && !FnType->isVariadic());
+
+ // Copy assignment operators must be non-templates.
+ if (Method->getPrimaryTemplate() || FunTmpl)
+ return;
+
+ ASTContext &Context = getASTContext();
+ QualType ArgType = FnType->getArgType(0);
+ if (const LValueReferenceType *Ref =ArgType->getAs<LValueReferenceType>())
+ ArgType = Ref->getPointeeType();
+
+ ArgType = ArgType.getUnqualifiedType();
+ QualType ClassType = Context.getCanonicalType(Context.getTypeDeclType(
+ const_cast<CXXRecordDecl*>(this)));
+
+ if (!Context.hasSameUnqualifiedType(ClassType, ArgType))
+ return;
+
+ // This is a copy assignment operator.
+ // FIXME: Move assignment operators.
+
+ // Suppress the implicit declaration of a copy constructor.
+ data().UserDeclaredCopyAssignment = true;
+ data().DeclaredCopyAssignment = true;
+
+ // C++ [class.copy]p11:
+ // A copy assignment operator is trivial if it is implicitly declared.
+ // FIXME: C++0x: don't do this for "= default" copy operators.
+ data().HasTrivialCopyAssignment = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class that [...] has no user-defined copy
+ // assignment operator [...].
+ data().PlainOldData = false;
+ }
+
+ // Keep the list of conversion functions up-to-date.
+ if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
+ // We don't record specializations.
+ if (Conversion->getPrimaryTemplate())
+ return;
+
+ // FIXME: We intentionally don't use the decl's access here because it
+ // hasn't been set yet. That's really just a misdesign in Sema.
- if (!Context.hasSameUnqualifiedType(ClassType, ArgType))
+ if (FunTmpl) {
+ if (FunTmpl->getPreviousDeclaration())
+ data().Conversions.replace(FunTmpl->getPreviousDeclaration(),
+ FunTmpl);
+ else
+ data().Conversions.addDecl(FunTmpl);
+ } else {
+ if (Conversion->getPreviousDeclaration())
+ data().Conversions.replace(Conversion->getPreviousDeclaration(),
+ Conversion);
+ else
+ data().Conversions.addDecl(Conversion);
+ }
+ }
+
return;
-
- // This is a copy assignment operator.
- // Note on the decl that it is a copy assignment operator.
- OpDecl->setCopyAssignment(true);
-
- // Suppress the implicit declaration of a copy constructor.
- data().UserDeclaredCopyAssignment = true;
- data().DeclaredCopyAssignment = true;
+ }
- // C++ [class.copy]p11:
- // A copy assignment operator is trivial if it is implicitly declared.
- // FIXME: C++0x: don't do this for "= default" copy operators.
- data().HasTrivialCopyAssignment = false;
-
- // C++ [class]p4:
- // A POD-struct is an aggregate class that [...] has no user-defined copy
- // assignment operator [...].
- data().PlainOldData = false;
+ // Handle non-static data members.
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(D)) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class (clause 9) with [...] no
+ // private or protected non-static data members (clause 11).
+ //
+ // A POD must be an aggregate.
+ if (D->getAccess() == AS_private || D->getAccess() == AS_protected) {
+ data().Aggregate = false;
+ data().PlainOldData = false;
+ }
+
+ // C++ [class]p9:
+ // A POD struct is a class that is both a trivial class and a
+ // standard-layout class, and has no non-static data members of type
+ // non-POD struct, non-POD union (or array of such types).
+ ASTContext &Context = getASTContext();
+ QualType T = Context.getBaseElementType(Field->getType());
+ if (!T->isPODType())
+ data().PlainOldData = false;
+ if (T->isReferenceType())
+ data().HasTrivialConstructor = false;
+
+ if (const RecordType *RecordTy = T->getAs<RecordType>()) {
+ CXXRecordDecl* FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (FieldRec->getDefinition()) {
+ if (!FieldRec->hasTrivialConstructor())
+ data().HasTrivialConstructor = false;
+ if (!FieldRec->hasTrivialCopyConstructor())
+ data().HasTrivialCopyConstructor = false;
+ if (!FieldRec->hasTrivialCopyAssignment())
+ data().HasTrivialCopyAssignment = false;
+ if (!FieldRec->hasTrivialDestructor())
+ data().HasTrivialDestructor = false;
+ }
+ }
+
+ // If this is not a zero-length bit-field, then the class is not empty.
+ if (data().Empty) {
+ if (!Field->getBitWidth())
+ data().Empty = false;
+ else if (!Field->getBitWidth()->isTypeDependent() &&
+ !Field->getBitWidth()->isValueDependent()) {
+ llvm::APSInt Bits;
+ if (Field->getBitWidth()->isIntegerConstantExpr(Bits, Context))
+ if (!!Bits)
+ data().Empty = false;
+ }
+ }
+ }
+
+ // Handle using declarations of conversion functions.
+ if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(D))
+ if (Shadow->getDeclName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName)
+ data().Conversions.addDecl(Shadow, Shadow->getAccess());
}
static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
@@ -479,20 +727,6 @@ const UnresolvedSetImpl *CXXRecordDecl::getVisibleConversionFunctions() {
return &data().VisibleConversions;
}
-#ifndef NDEBUG
-void CXXRecordDecl::CheckConversionFunction(NamedDecl *ConvDecl) {
- assert(ConvDecl->getDeclContext() == this &&
- "conversion function does not belong to this record");
-
- ConvDecl = ConvDecl->getUnderlyingDecl();
- if (FunctionTemplateDecl *Temp = dyn_cast<FunctionTemplateDecl>(ConvDecl)) {
- assert(isa<CXXConversionDecl>(Temp->getTemplatedDecl()));
- } else {
- assert(isa<CXXConversionDecl>(ConvDecl));
- }
-}
-#endif
-
void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) {
// This operation is O(N) but extremely rare. Sema only uses it to
// remove UsingShadowDecls in a class that were followed by a direct
@@ -518,17 +752,6 @@ void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) {
llvm_unreachable("conversion not found in set!");
}
-void CXXRecordDecl::setMethodAsVirtual(FunctionDecl *Method) {
- Method->setVirtualAsWritten(true);
- setAggregate(false);
- setPOD(false);
- setEmpty(false);
- setPolymorphic(true);
- setHasTrivialConstructor(false);
- setHasTrivialCopyConstructor(false);
- setHasTrivialCopyAssignment(false);
-}
-
CXXRecordDecl *CXXRecordDecl::getInstantiatedFromMemberClass() const {
if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
return cast<CXXRecordDecl>(MSInfo->getInstantiatedFrom());
@@ -577,28 +800,6 @@ CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
assert(false && "Not a class template or member class specialization");
}
-CXXConstructorDecl *
-CXXRecordDecl::getDefaultConstructor() {
- ASTContext &Context = getASTContext();
- QualType ClassType = Context.getTypeDeclType(this);
- DeclarationName ConstructorName
- = Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(ClassType.getUnqualifiedType()));
-
- DeclContext::lookup_const_iterator Con, ConEnd;
- for (llvm::tie(Con, ConEnd) = lookup(ConstructorName);
- Con != ConEnd; ++Con) {
- // FIXME: In C++0x, a constructor template can be a default constructor.
- if (isa<FunctionTemplateDecl>(*Con))
- continue;
-
- CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
- if (Constructor->isDefaultConstructor())
- return Constructor;
- }
- return 0;
-}
-
CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
ASTContext &Context = getASTContext();
QualType ClassType = Context.getTypeDeclType(this);
@@ -618,6 +819,69 @@ CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
return Dtor;
}
+void CXXRecordDecl::completeDefinition() {
+ completeDefinition(0);
+}
+
+void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
+ RecordDecl::completeDefinition();
+
+ // If the class may be abstract (but hasn't been marked as such), check for
+ // any pure final overriders.
+ if (mayBeAbstract()) {
+ CXXFinalOverriderMap MyFinalOverriders;
+ if (!FinalOverriders) {
+ getFinalOverriders(MyFinalOverriders);
+ FinalOverriders = &MyFinalOverriders;
+ }
+
+ bool Done = false;
+ for (CXXFinalOverriderMap::iterator M = FinalOverriders->begin(),
+ MEnd = FinalOverriders->end();
+ M != MEnd && !Done; ++M) {
+ for (OverridingMethods::iterator SO = M->second.begin(),
+ SOEnd = M->second.end();
+ SO != SOEnd && !Done; ++SO) {
+ assert(SO->second.size() > 0 &&
+ "All virtual functions have overridding virtual functions");
+
+ // C++ [class.abstract]p4:
+ // A class is abstract if it contains or inherits at least one
+ // pure virtual function for which the final overrider is pure
+ // virtual.
+ if (SO->second.front().Method->isPure()) {
+ data().Abstract = true;
+ Done = true;
+ break;
+ }
+ }
+ }
+ }
+
+ // Set access bits correctly on the directly-declared conversions.
+ for (UnresolvedSetIterator I = data().Conversions.begin(),
+ E = data().Conversions.end();
+ I != E; ++I)
+ data().Conversions.setAccess(I, (*I)->getAccess());
+}
+
+bool CXXRecordDecl::mayBeAbstract() const {
+ if (data().Abstract || isInvalidDecl() || !data().Polymorphic ||
+ isDependentContext())
+ return false;
+
+ for (CXXRecordDecl::base_class_const_iterator B = bases_begin(),
+ BEnd = bases_end();
+ B != BEnd; ++B) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(B->getType()->getAs<RecordType>()->getDecl());
+ if (BaseDecl->isAbstract())
+ return true;
+ }
+
+ return false;
+}
+
CXXMethodDecl *
CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
const DeclarationNameInfo &NameInfo,
@@ -735,85 +999,90 @@ bool CXXMethodDecl::hasInlineBody() const {
return CheckFn->hasBody(fn) && !fn->isOutOfLine();
}
-CXXBaseOrMemberInitializer::
-CXXBaseOrMemberInitializer(ASTContext &Context,
- TypeSourceInfo *TInfo, bool IsVirtual,
- SourceLocation L, Expr *Init, SourceLocation R)
- : BaseOrMember(TInfo), Init(Init), AnonUnionMember(0),
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ TypeSourceInfo *TInfo, bool IsVirtual,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ SourceLocation EllipsisLoc)
+ : Initializee(TInfo), MemberOrEllipsisLocation(EllipsisLoc), Init(Init),
LParenLoc(L), RParenLoc(R), IsVirtual(IsVirtual), IsWritten(false),
SourceOrderOrNumArrayIndices(0)
{
}
-CXXBaseOrMemberInitializer::
-CXXBaseOrMemberInitializer(ASTContext &Context,
- FieldDecl *Member, SourceLocation MemberLoc,
- SourceLocation L, Expr *Init, SourceLocation R)
- : BaseOrMember(Member), MemberLocation(MemberLoc), Init(Init),
- AnonUnionMember(0), LParenLoc(L), RParenLoc(R), IsVirtual(false),
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ IndirectFieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(0)
{
}
-CXXBaseOrMemberInitializer::
-CXXBaseOrMemberInitializer(ASTContext &Context,
- FieldDecl *Member, SourceLocation MemberLoc,
- SourceLocation L, Expr *Init, SourceLocation R,
- VarDecl **Indices,
- unsigned NumIndices)
- : BaseOrMember(Member), MemberLocation(MemberLoc), Init(Init),
- AnonUnionMember(0), LParenLoc(L), RParenLoc(R), IsVirtual(false),
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(NumIndices)
{
VarDecl **MyIndices = reinterpret_cast<VarDecl **> (this + 1);
memcpy(MyIndices, Indices, NumIndices * sizeof(VarDecl *));
}
-CXXBaseOrMemberInitializer *
-CXXBaseOrMemberInitializer::Create(ASTContext &Context,
- FieldDecl *Member,
- SourceLocation MemberLoc,
- SourceLocation L,
- Expr *Init,
- SourceLocation R,
- VarDecl **Indices,
- unsigned NumIndices) {
- void *Mem = Context.Allocate(sizeof(CXXBaseOrMemberInitializer) +
+CXXCtorInitializer *CXXCtorInitializer::Create(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices) {
+ void *Mem = Context.Allocate(sizeof(CXXCtorInitializer) +
sizeof(VarDecl *) * NumIndices,
- llvm::alignof<CXXBaseOrMemberInitializer>());
- return new (Mem) CXXBaseOrMemberInitializer(Context, Member, MemberLoc,
- L, Init, R, Indices, NumIndices);
+ llvm::alignOf<CXXCtorInitializer>());
+ return new (Mem) CXXCtorInitializer(Context, Member, MemberLoc, L, Init, R,
+ Indices, NumIndices);
}
-TypeLoc CXXBaseOrMemberInitializer::getBaseClassLoc() const {
+TypeLoc CXXCtorInitializer::getBaseClassLoc() const {
if (isBaseInitializer())
- return BaseOrMember.get<TypeSourceInfo*>()->getTypeLoc();
+ return Initializee.get<TypeSourceInfo*>()->getTypeLoc();
else
return TypeLoc();
}
-Type *CXXBaseOrMemberInitializer::getBaseClass() {
- if (isBaseInitializer())
- return BaseOrMember.get<TypeSourceInfo*>()->getType().getTypePtr();
- else
- return 0;
-}
-
-const Type *CXXBaseOrMemberInitializer::getBaseClass() const {
+const Type *CXXCtorInitializer::getBaseClass() const {
if (isBaseInitializer())
- return BaseOrMember.get<TypeSourceInfo*>()->getType().getTypePtr();
+ return Initializee.get<TypeSourceInfo*>()->getType().getTypePtr();
else
return 0;
}
-SourceLocation CXXBaseOrMemberInitializer::getSourceLocation() const {
- if (isMemberInitializer())
+SourceLocation CXXCtorInitializer::getSourceLocation() const {
+ if (isAnyMemberInitializer())
return getMemberLocation();
return getBaseClassLoc().getLocalSourceRange().getBegin();
}
-SourceRange CXXBaseOrMemberInitializer::getSourceRange() const {
+SourceRange CXXCtorInitializer::getSourceRange() const {
return SourceRange(getSourceLocation(), getRParenLoc());
}
@@ -847,25 +1116,40 @@ bool CXXConstructorDecl::isDefaultConstructor() const {
bool
CXXConstructorDecl::isCopyConstructor(unsigned &TypeQuals) const {
+ return isCopyOrMoveConstructor(TypeQuals) &&
+ getParamDecl(0)->getType()->isLValueReferenceType();
+}
+
+bool CXXConstructorDecl::isMoveConstructor(unsigned &TypeQuals) const {
+ return isCopyOrMoveConstructor(TypeQuals) &&
+ getParamDecl(0)->getType()->isRValueReferenceType();
+}
+
+/// \brief Determine whether this is a copy or move constructor.
+bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
// C++ [class.copy]p2:
// A non-template constructor for class X is a copy constructor
// if its first parameter is of type X&, const X&, volatile X& or
// const volatile X&, and either there are no other parameters
// or else all other parameters have default arguments (8.3.6).
+ // C++0x [class.copy]p3:
+ // A non-template constructor for class X is a move constructor if its
+ // first parameter is of type X&&, const X&&, volatile X&&, or
+ // const volatile X&&, and either there are no other parameters or else
+ // all other parameters have default arguments.
if ((getNumParams() < 1) ||
(getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
(getPrimaryTemplate() != 0) ||
(getDescribedFunctionTemplate() != 0))
return false;
-
+
const ParmVarDecl *Param = getParamDecl(0);
-
- // Do we have a reference type? Rvalue references don't count.
- const LValueReferenceType *ParamRefType =
- Param->getType()->getAs<LValueReferenceType>();
+
+ // Do we have a reference type?
+ const ReferenceType *ParamRefType = Param->getType()->getAs<ReferenceType>();
if (!ParamRefType)
return false;
-
+
// Is it a reference to our class type?
ASTContext &Context = getASTContext();
@@ -875,12 +1159,12 @@ CXXConstructorDecl::isCopyConstructor(unsigned &TypeQuals) const {
= Context.getCanonicalType(Context.getTagDeclType(getParent()));
if (PointeeType.getUnqualifiedType() != ClassTy)
return false;
-
+
// FIXME: other qualifiers?
-
- // We have a copy constructor.
+
+ // We have a copy or move constructor.
TypeQuals = PointeeType.getCVRQualifiers();
- return true;
+ return true;
}
bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
@@ -899,7 +1183,7 @@ bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
(getNumParams() > 1 && getParamDecl(1)->hasDefaultArg());
}
-bool CXXConstructorDecl::isCopyConstructorLikeSpecialization() const {
+bool CXXConstructorDecl::isSpecializationCopyingObject() const {
if ((getNumParams() < 1) ||
(getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
(getPrimaryTemplate() == 0) ||
@@ -911,12 +1195,6 @@ bool CXXConstructorDecl::isCopyConstructorLikeSpecialization() const {
ASTContext &Context = getASTContext();
CanQualType ParamType = Context.getCanonicalType(Param->getType());
- // Strip off the lvalue reference, if any.
- if (CanQual<LValueReferenceType> ParamRefType
- = ParamType->getAs<LValueReferenceType>())
- ParamType = ParamRefType->getPointeeType();
-
-
// Is it the same as our our class type?
CanQualType ClassTy
= Context.getCanonicalType(Context.getTagDeclType(getParent()));
@@ -926,21 +1204,38 @@ bool CXXConstructorDecl::isCopyConstructorLikeSpecialization() const {
return true;
}
+const CXXConstructorDecl *CXXConstructorDecl::getInheritedConstructor() const {
+ // Hack: we store the inherited constructor in the overridden method table
+ method_iterator It = begin_overridden_methods();
+ if (It == end_overridden_methods())
+ return 0;
+
+ return cast<CXXConstructorDecl>(*It);
+}
+
+void
+CXXConstructorDecl::setInheritedConstructor(const CXXConstructorDecl *BaseCtor){
+ // Hack: we store the inherited constructor in the overridden method table
+ assert(size_overridden_methods() == 0 && "Base ctor already set.");
+ addOverriddenMethod(BaseCtor);
+}
+
CXXDestructorDecl *
CXXDestructorDecl::Create(ASTContext &C, EmptyShell Empty) {
return new (C) CXXDestructorDecl(0, DeclarationNameInfo(),
- QualType(), false, false);
+ QualType(), 0, false, false);
}
CXXDestructorDecl *
CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
const DeclarationNameInfo &NameInfo,
- QualType T, bool isInline,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline,
bool isImplicitlyDeclared) {
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXDestructorName &&
"Name must refer to a destructor");
- return new (C) CXXDestructorDecl(RD, NameInfo, T, isInline,
+ return new (C) CXXDestructorDecl(RD, NameInfo, T, TInfo, isInline,
isImplicitlyDeclared);
}
@@ -1004,6 +1299,44 @@ NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
Qualifier, IdentLoc, Namespace);
}
+UsingDecl *UsingShadowDecl::getUsingDecl() const {
+ const UsingShadowDecl *Shadow = this;
+ while (const UsingShadowDecl *NextShadow =
+ dyn_cast<UsingShadowDecl>(Shadow->UsingOrNextShadow))
+ Shadow = NextShadow;
+ return cast<UsingDecl>(Shadow->UsingOrNextShadow);
+}
+
+void UsingDecl::addShadowDecl(UsingShadowDecl *S) {
+ assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() &&
+ "declaration already in set");
+ assert(S->getUsingDecl() == this);
+
+ if (FirstUsingShadow)
+ S->UsingOrNextShadow = FirstUsingShadow;
+ FirstUsingShadow = S;
+}
+
+void UsingDecl::removeShadowDecl(UsingShadowDecl *S) {
+ assert(std::find(shadow_begin(), shadow_end(), S) != shadow_end() &&
+ "declaration not in set");
+ assert(S->getUsingDecl() == this);
+
+ // Remove S from the shadow decl chain. This is O(n) but hopefully rare.
+
+ if (FirstUsingShadow == S) {
+ FirstUsingShadow = dyn_cast<UsingShadowDecl>(S->UsingOrNextShadow);
+ S->UsingOrNextShadow = this;
+ return;
+ }
+
+ UsingShadowDecl *Prev = FirstUsingShadow;
+ while (Prev->UsingOrNextShadow != S)
+ Prev = cast<UsingShadowDecl>(Prev->UsingOrNextShadow);
+ Prev->UsingOrNextShadow = S->UsingOrNextShadow;
+ S->UsingOrNextShadow = this;
+}
+
UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC,
SourceRange NNR, SourceLocation UL,
NestedNameSpecifier* TargetNNS,
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
index d952cc3..45f5188 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
@@ -153,6 +153,9 @@ ObjCContainerDecl::FindPropertyDeclaration(IdentifierInfo *PropertyId) const {
ObjCPropertyDecl *
ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
IdentifierInfo *PropertyId) const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
if (ObjCPropertyDecl *PD =
ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
return PD;
@@ -171,6 +174,9 @@ void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
ObjCProtocolDecl *const* ExtList, unsigned ExtNum,
ASTContext &C)
{
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
if (AllReferencedProtocols.empty() && ReferencedProtocols.empty()) {
AllReferencedProtocols.set(ExtList, ExtNum, C);
return;
@@ -270,6 +276,9 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
const ObjCInterfaceDecl* ClassDecl = this;
ObjCMethodDecl *MethodDecl = 0;
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
while (ClassDecl != NULL) {
if ((MethodDecl = ClassDecl->getMethod(Sel, isInstance)))
return MethodDecl;
@@ -302,14 +311,16 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
return NULL;
}
-ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateInstanceMethod(
- const Selector &Sel) {
+ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
+ const Selector &Sel,
+ bool Instance) {
ObjCMethodDecl *Method = 0;
if (ObjCImplementationDecl *ImpDecl = getImplementation())
- Method = ImpDecl->getInstanceMethod(Sel);
+ Method = Instance ? ImpDecl->getInstanceMethod(Sel)
+ : ImpDecl->getClassMethod(Sel);
if (!Method && getSuperClass())
- return getSuperClass()->lookupPrivateInstanceMethod(Sel);
+ return getSuperClass()->lookupPrivateMethod(Sel, Instance);
return Method;
}
@@ -443,11 +454,29 @@ ObjCInterfaceDecl(DeclContext *DC, SourceLocation atLoc, IdentifierInfo *Id,
: ObjCContainerDecl(ObjCInterface, DC, atLoc, Id),
TypeForDecl(0), SuperClass(0),
CategoryList(0), IvarList(0),
- ForwardDecl(FD), InternalInterface(isInternal),
+ ForwardDecl(FD), InternalInterface(isInternal), ExternallyCompleted(false),
ClassLoc(CLoc) {
}
+void ObjCInterfaceDecl::LoadExternalDefinition() const {
+ assert(ExternallyCompleted && "Class is not externally completed");
+ ExternallyCompleted = false;
+ getASTContext().getExternalSource()->CompleteType(
+ const_cast<ObjCInterfaceDecl *>(this));
+}
+
+void ObjCInterfaceDecl::setExternallyCompleted() {
+ assert(getASTContext().getExternalSource() &&
+ "Class can't be externally completed without an external source");
+ assert(!ForwardDecl &&
+ "Forward declarations can't be externally completed");
+ ExternallyCompleted = true;
+}
+
ObjCImplementationDecl *ObjCInterfaceDecl::getImplementation() const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
return getASTContext().getObjCImplementation(
const_cast<ObjCInterfaceDecl*>(this));
}
@@ -506,6 +535,9 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
///
ObjCCategoryDecl *
ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const {
+ if (ExternallyCompleted)
+ LoadExternalDefinition();
+
for (ObjCCategoryDecl *Category = getCategoryList();
Category; Category = Category->getNextClassCategory())
if (Category->getIdentifier() == CategoryId)
@@ -711,7 +743,7 @@ ObjCClassDecl::ObjCClassDecl(DeclContext *DC, SourceLocation L,
void ObjCClassDecl::setClassList(ASTContext &C, ObjCInterfaceDecl*const*List,
const SourceLocation *Locs, unsigned Num) {
ForwardDecls = (ObjCClassRef*) C.Allocate(sizeof(ObjCClassRef)*Num,
- llvm::alignof<ObjCClassRef>());
+ llvm::alignOf<ObjCClassRef>());
for (unsigned i = 0; i < Num; ++i)
new (&ForwardDecls[i]) ObjCClassRef(List[i], Locs[i]);
@@ -896,7 +928,6 @@ ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, T);
}
-
//===----------------------------------------------------------------------===//
// ObjCPropertyImplDecl
//===----------------------------------------------------------------------===//
@@ -907,8 +938,16 @@ ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C,
SourceLocation L,
ObjCPropertyDecl *property,
Kind PK,
- ObjCIvarDecl *ivar) {
- return new (C) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar);
+ ObjCIvarDecl *ivar,
+ SourceLocation ivarLoc) {
+ return new (C) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar,
+ ivarLoc);
}
+SourceRange ObjCPropertyImplDecl::getSourceRange() const {
+ SourceLocation EndLoc = getLocation();
+ if (IvarLoc.isValid())
+ EndLoc = IvarLoc;
+ return SourceRange(AtLoc, EndLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
index f18d2f0..77b4257 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
@@ -51,6 +51,7 @@ namespace {
void VisitFunctionDecl(FunctionDecl *D);
void VisitFieldDecl(FieldDecl *D);
void VisitVarDecl(VarDecl *D);
+ void VisitLabelDecl(LabelDecl *D);
void VisitParmVarDecl(ParmVarDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
void VisitNamespaceDecl(NamespaceDecl *D);
@@ -307,9 +308,26 @@ void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) {
}
void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
- Out << "enum " << D << " {\n";
- VisitDeclContext(D);
- Indent() << "}";
+ Out << "enum ";
+ if (D->isScoped()) {
+ if (D->isScopedUsingClassTag())
+ Out << "class ";
+ else
+ Out << "struct ";
+ }
+ Out << D;
+
+ if (D->isFixed()) {
+ std::string Underlying;
+ D->getIntegerType().getAsStringInternal(Underlying, Policy);
+ Out << " : " << Underlying;
+ }
+
+ if (D->isDefinition()) {
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
}
void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
@@ -349,9 +367,15 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressSpecifiers = false;
std::string Proto = D->getNameInfo().getAsString();
- if (isa<FunctionType>(D->getType().getTypePtr())) {
- const FunctionType *AFT = D->getType()->getAs<FunctionType>();
+ QualType Ty = D->getType();
+ while (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ Proto = '(' + Proto + ')';
+ Ty = PT->getInnerType();
+ }
+
+ if (isa<FunctionType>(Ty)) {
+ const FunctionType *AFT = Ty->getAs<FunctionType>();
const FunctionProtoType *FT = 0;
if (D->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(AFT);
@@ -379,6 +403,16 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Proto += ")";
+ if (FT && FT->getTypeQuals()) {
+ unsigned TypeQuals = FT->getTypeQuals();
+ if (TypeQuals & Qualifiers::Const)
+ Proto += " const";
+ if (TypeQuals & Qualifiers::Volatile)
+ Proto += " volatile";
+ if (TypeQuals & Qualifiers::Restrict)
+ Proto += " restrict";
+ }
+
if (FT && FT->hasExceptionSpec()) {
Proto += " throw(";
if (FT->hasAnyExceptionSpec())
@@ -399,18 +433,18 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (D->hasAttr<NoReturnAttr>())
Proto += " __attribute((noreturn))";
if (CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D)) {
- if (CDecl->getNumBaseOrMemberInitializers() > 0) {
+ if (CDecl->getNumCtorInitializers() > 0) {
Proto += " : ";
Out << Proto;
Proto.clear();
for (CXXConstructorDecl::init_const_iterator B = CDecl->init_begin(),
E = CDecl->init_end();
B != E; ++B) {
- CXXBaseOrMemberInitializer * BMInitializer = (*B);
+ CXXCtorInitializer * BMInitializer = (*B);
if (B != CDecl->init_begin())
Out << ", ";
- if (BMInitializer->isMemberInitializer()) {
- FieldDecl *FD = BMInitializer->getMember();
+ if (BMInitializer->isAnyMemberInitializer()) {
+ FieldDecl *FD = BMInitializer->getAnyMember();
Out << FD;
} else {
Out << QualType(BMInitializer->getBaseClass(),
@@ -422,8 +456,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
// Nothing to print
} else {
Expr *Init = BMInitializer->getInit();
- if (CXXExprWithTemporaries *Tmp
- = dyn_cast<CXXExprWithTemporaries>(Init))
+ if (ExprWithCleanups *Tmp = dyn_cast<ExprWithCleanups>(Init))
Init = Tmp->getSubExpr();
Init = Init->IgnoreParens();
@@ -461,7 +494,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
else
AFT->getResultType().getAsStringInternal(Proto, Policy);
} else {
- D->getType().getAsStringInternal(Proto, Policy);
+ Ty.getAsStringInternal(Proto, Policy);
}
Out << Proto;
@@ -505,6 +538,11 @@ void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
}
}
+void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
+ Out << D->getNameAsString() << ":";
+}
+
+
void DeclPrinter::VisitVarDecl(VarDecl *D) {
if (!Policy.SuppressSpecifiers && D->getStorageClass() != SC_None)
Out << VarDecl::getStorageClassSpecifierString(D->getStorageClass()) << " ";
@@ -518,12 +556,15 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
T = Parm->getOriginalType();
T.getAsStringInternal(Name, Policy);
Out << Name;
- if (D->getInit()) {
+ if (Expr *Init = D->getInit()) {
if (D->hasCXXDirectInitializer())
Out << "(";
- else
- Out << " = ";
- D->getInit()->printPretty(Out, Context, 0, Policy, Indentation);
+ else {
+ CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init);
+ if (!CCE || CCE->getConstructor()->isCopyConstructor())
+ Out << " = ";
+ }
+ Init->printPretty(Out, Context, 0, Policy, Indentation);
if (D->hasCXXDirectInitializer())
Out << ")";
}
@@ -646,6 +687,9 @@ void DeclPrinter::VisitTemplateDecl(TemplateDecl *D) {
dyn_cast<NonTypeTemplateParmDecl>(Param)) {
Out << NTTP->getType().getAsString(Policy);
+ if (NTTP->isParameterPack() && !isa<PackExpansionType>(NTTP->getType()))
+ Out << "...";
+
if (IdentifierInfo *Name = NTTP->getIdentifier()) {
Out << ' ';
Out << Name->getName();
@@ -661,8 +705,11 @@ void DeclPrinter::VisitTemplateDecl(TemplateDecl *D) {
Out << "> ";
- if (isa<TemplateTemplateParmDecl>(D)) {
- Out << "class " << D->getName();
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(D)) {
+ Out << "class ";
+ if (TTP->isParameterPack())
+ Out << "...";
+ Out << D->getName();
} else {
Visit(D->getTemplatedDecl());
}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
index e69338a..a73deea 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
@@ -14,10 +14,13 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/STLExtras.h"
+#include <memory>
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -35,7 +38,7 @@ TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
}
TemplateParameterList *
-TemplateParameterList::Create(ASTContext &C, SourceLocation TemplateLoc,
+TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
SourceLocation LAngleLoc, NamedDecl **Params,
unsigned NumParams, SourceLocation RAngleLoc) {
unsigned Size = sizeof(TemplateParameterList)
@@ -47,24 +50,33 @@ TemplateParameterList::Create(ASTContext &C, SourceLocation TemplateLoc,
}
unsigned TemplateParameterList::getMinRequiredArguments() const {
- unsigned NumRequiredArgs = size();
- iterator Param = const_cast<TemplateParameterList *>(this)->end(),
- ParamBegin = const_cast<TemplateParameterList *>(this)->begin();
- while (Param != ParamBegin) {
- --Param;
-
- if (!(*Param)->isTemplateParameterPack() &&
- !(isa<TemplateTypeParmDecl>(*Param) &&
- cast<TemplateTypeParmDecl>(*Param)->hasDefaultArgument()) &&
- !(isa<NonTypeTemplateParmDecl>(*Param) &&
- cast<NonTypeTemplateParmDecl>(*Param)->hasDefaultArgument()) &&
- !(isa<TemplateTemplateParmDecl>(*Param) &&
- cast<TemplateTemplateParmDecl>(*Param)->hasDefaultArgument()))
+ unsigned NumRequiredArgs = 0;
+ for (iterator P = const_cast<TemplateParameterList *>(this)->begin(),
+ PEnd = const_cast<TemplateParameterList *>(this)->end();
+ P != PEnd; ++P) {
+ if ((*P)->isTemplateParameterPack()) {
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P))
+ if (NTTP->isExpandedParameterPack()) {
+ NumRequiredArgs += NTTP->getNumExpansionTypes();
+ continue;
+ }
+
break;
-
- --NumRequiredArgs;
+ }
+
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ if (TTP->hasDefaultArgument())
+ break;
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->hasDefaultArgument())
+ break;
+ } else if (cast<TemplateTemplateParmDecl>(*P)->hasDefaultArgument())
+ break;
+
+ ++NumRequiredArgs;
}
-
+
return NumRequiredArgs;
}
@@ -92,7 +104,7 @@ RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() {
RedeclarableTemplateDecl *First = getCanonicalDecl();
if (First->CommonOrPrev.isNull()) {
- CommonBase *CommonPtr = First->newCommon();
+ CommonBase *CommonPtr = First->newCommon(getASTContext());
First->CommonOrPrev = CommonPtr;
CommonPtr->Latest = First;
}
@@ -156,9 +168,10 @@ FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
return new (C) FunctionTemplateDecl(DC, L, Name, Params, Decl);
}
-RedeclarableTemplateDecl::CommonBase *FunctionTemplateDecl::newCommon() {
- Common *CommonPtr = new (getASTContext()) Common;
- getASTContext().AddDeallocation(DeallocateCommon, CommonPtr);
+RedeclarableTemplateDecl::CommonBase *
+FunctionTemplateDecl::newCommon(ASTContext &C) {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
return CommonPtr;
}
@@ -188,9 +201,33 @@ ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
return New;
}
-RedeclarableTemplateDecl::CommonBase *ClassTemplateDecl::newCommon() {
- Common *CommonPtr = new (getASTContext()) Common;
- getASTContext().AddDeallocation(DeallocateCommon, CommonPtr);
+void ClassTemplateDecl::LoadLazySpecializations() {
+ Common *CommonPtr = getCommonPtr();
+ if (CommonPtr->LazySpecializations) {
+ ASTContext &Context = getASTContext();
+ uint32_t *Specs = CommonPtr->LazySpecializations;
+ CommonPtr->LazySpecializations = 0;
+ for (uint32_t I = 0, N = *Specs++; I != N; ++I)
+ (void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
+ }
+}
+
+llvm::FoldingSet<ClassTemplateSpecializationDecl> &
+ClassTemplateDecl::getSpecializations() {
+ LoadLazySpecializations();
+ return getCommonPtr()->Specializations;
+}
+
+llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &
+ClassTemplateDecl::getPartialSpecializations() {
+ LoadLazySpecializations();
+ return getCommonPtr()->PartialSpecializations;
+}
+
+RedeclarableTemplateDecl::CommonBase *
+ClassTemplateDecl::newCommon(ASTContext &C) {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
return CommonPtr;
}
@@ -200,6 +237,13 @@ ClassTemplateDecl::findSpecialization(const TemplateArgument *Args,
return findSpecializationImpl(getSpecializations(), Args, NumArgs, InsertPos);
}
+void ClassTemplateDecl::AddSpecialization(ClassTemplateSpecializationDecl *D,
+ void *InsertPos) {
+ getSpecializations().InsertNode(D, InsertPos);
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, D);
+}
+
ClassTemplatePartialSpecializationDecl *
ClassTemplateDecl::findPartialSpecialization(const TemplateArgument *Args,
unsigned NumArgs,
@@ -208,6 +252,14 @@ ClassTemplateDecl::findPartialSpecialization(const TemplateArgument *Args,
InsertPos);
}
+void ClassTemplateDecl::AddPartialSpecialization(
+ ClassTemplatePartialSpecializationDecl *D,
+ void *InsertPos) {
+ getPartialSpecializations().InsertNode(D, InsertPos);
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, D);
+}
+
void ClassTemplateDecl::getPartialSpecializations(
llvm::SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) {
llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &PartialSpecs
@@ -258,10 +310,13 @@ ClassTemplateDecl::getInjectedClassNameSpecialization() {
if (!CommonPtr->InjectedClassNameType.isNull())
return CommonPtr->InjectedClassNameType;
- // FIXME: n2800 14.6.1p1 should say how the template arguments
- // corresponding to template parameter packs should be pack
- // expansions. We already say that in 14.6.2.1p2, so it would be
- // better to fix that redundancy.
+ // C++0x [temp.dep.type]p2:
+ // The template argument list of a primary template is a template argument
+ // list in which the nth template argument has the value of the nth template
+ // parameter of the class template. If the nth template parameter is a
+ // template parameter pack (14.5.3), the nth template argument is a pack
+ // expansion (14.5.3) whose pattern is the name of the template parameter
+ // pack.
ASTContext &Context = getASTContext();
TemplateParameterList *Params = getTemplateParameters();
llvm::SmallVector<TemplateArgument, 16> TemplateArgs;
@@ -269,19 +324,38 @@ ClassTemplateDecl::getInjectedClassNameSpecialization() {
for (TemplateParameterList::iterator Param = Params->begin(),
ParamEnd = Params->end();
Param != ParamEnd; ++Param) {
- if (isa<TemplateTypeParmDecl>(*Param)) {
- QualType ParamType = Context.getTypeDeclType(cast<TypeDecl>(*Param));
- TemplateArgs.push_back(TemplateArgument(ParamType));
+ TemplateArgument Arg;
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
+ QualType ArgType = Context.getTypeDeclType(TTP);
+ if (TTP->isParameterPack())
+ ArgType = Context.getPackExpansionType(ArgType,
+ llvm::Optional<unsigned>());
+
+ Arg = TemplateArgument(ArgType);
} else if (NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
Expr *E = new (Context) DeclRefExpr(NTTP,
NTTP->getType().getNonLValueExprType(Context),
+ Expr::getValueKindForType(NTTP->getType()),
NTTP->getLocation());
- TemplateArgs.push_back(TemplateArgument(E));
+
+ if (NTTP->isParameterPack())
+ E = new (Context) PackExpansionExpr(Context.DependentTy, E,
+ NTTP->getLocation(),
+ llvm::Optional<unsigned>());
+ Arg = TemplateArgument(E);
} else {
TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*Param);
- TemplateArgs.push_back(TemplateArgument(TemplateName(TTP)));
+ if (TTP->isParameterPack())
+ Arg = TemplateArgument(TemplateName(TTP), llvm::Optional<unsigned>());
+ else
+ Arg = TemplateArgument(TemplateName(TTP));
}
+
+ if ((*Param)->isTemplateParameterPack())
+ Arg = TemplateArgument::CreatePackCopy(Context, &Arg, 1);
+
+ TemplateArgs.push_back(Arg);
}
CommonPtr->InjectedClassNameType
@@ -296,7 +370,7 @@ ClassTemplateDecl::getInjectedClassNameSpecialization() {
//===----------------------------------------------------------------------===//
TemplateTypeParmDecl *
-TemplateTypeParmDecl::Create(ASTContext &C, DeclContext *DC,
+TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
IdentifierInfo *Id, bool Typename,
bool ParameterPack) {
@@ -305,7 +379,7 @@ TemplateTypeParmDecl::Create(ASTContext &C, DeclContext *DC,
}
TemplateTypeParmDecl *
-TemplateTypeParmDecl::Create(ASTContext &C, EmptyShell Empty) {
+TemplateTypeParmDecl::Create(const ASTContext &C, EmptyShell Empty) {
return new (C) TemplateTypeParmDecl(0, SourceLocation(), 0, false,
QualType(), false);
}
@@ -326,12 +400,62 @@ unsigned TemplateTypeParmDecl::getIndex() const {
// NonTypeTemplateParmDecl Method Implementations
//===----------------------------------------------------------------------===//
+NonTypeTemplateParmDecl::NonTypeTemplateParmDecl(DeclContext *DC,
+ SourceLocation L, unsigned D,
+ unsigned P, IdentifierInfo *Id,
+ QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos)
+ : DeclaratorDecl(NonTypeTemplateParm, DC, L, Id, T, TInfo),
+ TemplateParmPosition(D, P), DefaultArgumentAndInherited(0, false),
+ ParameterPack(true), ExpandedParameterPack(true),
+ NumExpandedTypes(NumExpandedTypes)
+{
+ if (ExpandedTypes && ExpandedTInfos) {
+ void **TypesAndInfos = reinterpret_cast<void **>(this + 1);
+ for (unsigned I = 0; I != NumExpandedTypes; ++I) {
+ TypesAndInfos[2*I] = ExpandedTypes[I].getAsOpaquePtr();
+ TypesAndInfos[2*I + 1] = ExpandedTInfos[I];
+ }
+ }
+}
+
NonTypeTemplateParmDecl *
-NonTypeTemplateParmDecl::Create(ASTContext &C, DeclContext *DC,
+NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
IdentifierInfo *Id, QualType T,
- TypeSourceInfo *TInfo) {
- return new (C) NonTypeTemplateParmDecl(DC, L, D, P, Id, T, TInfo);
+ bool ParameterPack, TypeSourceInfo *TInfo) {
+ return new (C) NonTypeTemplateParmDecl(DC, L, D, P, Id, T, ParameterPack,
+ TInfo);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D, unsigned P,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos) {
+ unsigned Size = sizeof(NonTypeTemplateParmDecl)
+ + NumExpandedTypes * 2 * sizeof(void*);
+ void *Mem = C.Allocate(Size);
+ return new (Mem) NonTypeTemplateParmDecl(DC, L, D, P, Id, T, TInfo,
+ ExpandedTypes, NumExpandedTypes,
+ ExpandedTInfos);
+}
+
+SourceLocation NonTypeTemplateParmDecl::getInnerLocStart() const {
+ SourceLocation Start = getTypeSpecStartLoc();
+ if (Start.isInvalid())
+ Start = getLocation();
+ return Start;
+}
+
+SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
+ return SourceRange(getOuterLocStart(), getLocation());
}
SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
@@ -345,128 +469,29 @@ SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
//===----------------------------------------------------------------------===//
TemplateTemplateParmDecl *
-TemplateTemplateParmDecl::Create(ASTContext &C, DeclContext *DC,
+TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
- IdentifierInfo *Id,
+ bool ParameterPack, IdentifierInfo *Id,
TemplateParameterList *Params) {
- return new (C) TemplateTemplateParmDecl(DC, L, D, P, Id, Params);
-}
-
-//===----------------------------------------------------------------------===//
-// TemplateArgumentListBuilder Implementation
-//===----------------------------------------------------------------------===//
-
-void TemplateArgumentListBuilder::Append(const TemplateArgument &Arg) {
- assert((Arg.getKind() != TemplateArgument::Type ||
- Arg.getAsType().isCanonical()) && "Type must be canonical!");
- assert(FlatArgs.size() < MaxFlatArgs && "Argument list builder is full!");
- assert(!StructuredArgs &&
- "Can't append arguments when an argument pack has been added!");
-
- FlatArgs.push_back(Arg);
-}
-
-void TemplateArgumentListBuilder::BeginPack() {
- assert(!AddingToPack && "Already adding to pack!");
- assert(!StructuredArgs && "Argument list already contains a pack!");
-
- AddingToPack = true;
- PackBeginIndex = FlatArgs.size();
-}
-
-void TemplateArgumentListBuilder::EndPack() {
- assert(AddingToPack && "Not adding to pack!");
- assert(!StructuredArgs && "Argument list already contains a pack!");
-
- AddingToPack = false;
-
- // FIXME: This is a memory leak!
- StructuredArgs = new TemplateArgument[MaxStructuredArgs];
-
- // First copy the flat entries over to the list (if any)
- for (unsigned I = 0; I != PackBeginIndex; ++I) {
- NumStructuredArgs++;
- StructuredArgs[I] = FlatArgs[I];
- }
-
- // Next, set the pack.
- TemplateArgument *PackArgs = 0;
- unsigned NumPackArgs = NumFlatArgs - PackBeginIndex;
- // FIXME: NumPackArgs shouldn't be negative here???
- if (NumPackArgs)
- PackArgs = FlatArgs.data()+PackBeginIndex;
-
- StructuredArgs[NumStructuredArgs++].setArgumentPack(PackArgs, NumPackArgs,
- /*CopyArgs=*/false);
+ return new (C) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id,
+ Params);
}
//===----------------------------------------------------------------------===//
// TemplateArgumentList Implementation
//===----------------------------------------------------------------------===//
-TemplateArgumentList::TemplateArgumentList(ASTContext &Context,
- TemplateArgumentListBuilder &Builder,
- bool TakeArgs)
- : FlatArguments(Builder.getFlatArguments(), TakeArgs),
- NumFlatArguments(Builder.flatSize()),
- StructuredArguments(Builder.getStructuredArguments(), TakeArgs),
- NumStructuredArguments(Builder.structuredSize()) {
-
- if (!TakeArgs)
- return;
-
- // If this does take ownership of the arguments, then we have to new them
- // and copy over.
- TemplateArgument *NewArgs =
- new (Context) TemplateArgument[Builder.flatSize()];
- std::copy(Builder.getFlatArguments(),
- Builder.getFlatArguments()+Builder.flatSize(), NewArgs);
- FlatArguments.setPointer(NewArgs);
-
- // Just reuse the structured and flat arguments array if possible.
- if (Builder.getStructuredArguments() == Builder.getFlatArguments()) {
- StructuredArguments.setPointer(NewArgs);
- StructuredArguments.setInt(0);
- } else {
- TemplateArgument *NewSArgs =
- new (Context) TemplateArgument[Builder.flatSize()];
- std::copy(Builder.getFlatArguments(),
- Builder.getFlatArguments()+Builder.flatSize(), NewSArgs);
- StructuredArguments.setPointer(NewSArgs);
- }
-}
-
-TemplateArgumentList::TemplateArgumentList(ASTContext &Context,
- const TemplateArgument *Args,
- unsigned NumArgs)
- : NumFlatArguments(0), NumStructuredArguments(0) {
- init(Context, Args, NumArgs);
-}
-
-/// Produces a shallow copy of the given template argument list. This
-/// assumes that the input argument list outlives it. This takes the list as
-/// a pointer to avoid looking like a copy constructor, since this really
-/// really isn't safe to use that way.
-TemplateArgumentList::TemplateArgumentList(const TemplateArgumentList *Other)
- : FlatArguments(Other->FlatArguments.getPointer(), false),
- NumFlatArguments(Other->flat_size()),
- StructuredArguments(Other->StructuredArguments.getPointer(), false),
- NumStructuredArguments(Other->NumStructuredArguments) { }
-
-void TemplateArgumentList::init(ASTContext &Context,
- const TemplateArgument *Args,
- unsigned NumArgs) {
-assert(NumFlatArguments == 0 && NumStructuredArguments == 0 &&
- "Already initialized!");
-
-NumFlatArguments = NumStructuredArguments = NumArgs;
-TemplateArgument *NewArgs = new (Context) TemplateArgument[NumArgs];
-std::copy(Args, Args+NumArgs, NewArgs);
-FlatArguments.setPointer(NewArgs);
-FlatArguments.setInt(1); // Owns the pointer.
-
-// Just reuse the flat arguments array.
-StructuredArguments.setPointer(NewArgs);
-StructuredArguments.setInt(0); // Doesn't own the pointer.
+TemplateArgumentList *
+TemplateArgumentList::CreateCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ std::size_t Size = sizeof(TemplateArgumentList)
+ + NumArgs * sizeof(TemplateArgument);
+ void *Mem = Context.Allocate(Size);
+ TemplateArgument *StoredArgs
+ = reinterpret_cast<TemplateArgument *>(
+ static_cast<TemplateArgumentList *>(Mem) + 1);
+ std::uninitialized_copy(Args, Args + NumArgs, StoredArgs);
+ return new (Mem) TemplateArgumentList(StoredArgs, NumArgs, true);
}
//===----------------------------------------------------------------------===//
@@ -476,14 +501,15 @@ ClassTemplateSpecializationDecl::
ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
DeclContext *DC, SourceLocation L,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgumentListBuilder &Builder,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
ClassTemplateSpecializationDecl *PrevDecl)
: CXXRecordDecl(DK, TK, DC, L,
SpecializedTemplate->getIdentifier(),
PrevDecl),
SpecializedTemplate(SpecializedTemplate),
ExplicitInfo(0),
- TemplateArgs(Context, Builder, /*TakeArgs=*/true),
+ TemplateArgs(TemplateArgumentList::CreateCopy(Context, Args, NumArgs)),
SpecializationKind(TSK_Undeclared) {
}
@@ -497,14 +523,15 @@ ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
DeclContext *DC, SourceLocation L,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgumentListBuilder &Builder,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
ClassTemplateSpecializationDecl *PrevDecl) {
ClassTemplateSpecializationDecl *Result
= new (Context)ClassTemplateSpecializationDecl(Context,
ClassTemplateSpecialization,
TK, DC, L,
SpecializedTemplate,
- Builder,
+ Args, NumArgs,
PrevDecl);
Context.getTypeDeclType(Result, PrevDecl);
return Result;
@@ -524,8 +551,8 @@ ClassTemplateSpecializationDecl::getNameForDiagnostic(std::string &S,
const TemplateArgumentList &TemplateArgs = getTemplateArgs();
S += TemplateSpecializationType::PrintTemplateArgumentList(
- TemplateArgs.getFlatArgumentList(),
- TemplateArgs.flat_size(),
+ TemplateArgs.data(),
+ TemplateArgs.size(),
Policy);
}
@@ -545,7 +572,8 @@ ClassTemplatePartialSpecializationDecl::
Create(ASTContext &Context, TagKind TK,DeclContext *DC, SourceLocation L,
TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgumentListBuilder &Builder,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
const TemplateArgumentListInfo &ArgInfos,
QualType CanonInjectedType,
ClassTemplatePartialSpecializationDecl *PrevDecl,
@@ -559,7 +587,7 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC, SourceLocation L,
= new (Context)ClassTemplatePartialSpecializationDecl(Context, TK,
DC, L, Params,
SpecializedTemplate,
- Builder,
+ Args, NumArgs,
ClonedArgs, N,
PrevDecl,
SequenceNumber);
@@ -575,19 +603,6 @@ ClassTemplatePartialSpecializationDecl::Create(ASTContext &Context,
return new (Context)ClassTemplatePartialSpecializationDecl();
}
-void ClassTemplatePartialSpecializationDecl::
-initTemplateArgsAsWritten(const TemplateArgumentListInfo &ArgInfos) {
- assert(ArgsAsWritten == 0 && "ArgsAsWritten already set");
- unsigned N = ArgInfos.size();
- TemplateArgumentLoc *ClonedArgs
- = new (getASTContext()) TemplateArgumentLoc[N];
- for (unsigned I = 0; I != N; ++I)
- ClonedArgs[I] = ArgInfos[I];
-
- ArgsAsWritten = ClonedArgs;
- NumArgsAsWritten = N;
-}
-
//===----------------------------------------------------------------------===//
// FriendTemplateDecl Implementation
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
index 860a0b2..cef54e9 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
@@ -20,6 +20,7 @@
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -93,10 +94,8 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
Selector RHSSelector = RHS.getObjCSelector();
unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs();
for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) {
- IdentifierInfo *LHSId = LHSSelector.getIdentifierInfoForSlot(I);
- IdentifierInfo *RHSId = RHSSelector.getIdentifierInfoForSlot(I);
-
- switch (LHSId->getName().compare(RHSId->getName())) {
+ switch (LHSSelector.getNameForSlot(I).compare(
+ RHSSelector.getNameForSlot(I))) {
case -1: return true;
case 1: return false;
default: break;
@@ -385,7 +384,7 @@ void DeclarationName::dump() const {
llvm::errs() << '\n';
}
-DeclarationNameTable::DeclarationNameTable(ASTContext &C) : Ctx(C) {
+DeclarationNameTable::DeclarationNameTable(const ASTContext &C) : Ctx(C) {
CXXSpecialNamesImpl = new llvm::FoldingSet<CXXSpecialName>;
CXXLiteralOperatorNames = new llvm::FoldingSet<CXXLiteralOperatorIdName>;
@@ -512,6 +511,28 @@ DeclarationNameLoc::DeclarationNameLoc(DeclarationName Name) {
}
}
+bool DeclarationNameInfo::containsUnexpandedParameterPack() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getType()->containsUnexpandedParameterPack();
+
+ return Name.getCXXNameType()->containsUnexpandedParameterPack();
+ }
+ llvm_unreachable("All name kinds handled.");
+}
+
std::string DeclarationNameInfo::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
diff --git a/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
new file mode 100644
index 0000000..9d828fc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
@@ -0,0 +1,1028 @@
+//===--- DumpXML.cpp - Detailed XML dumping ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Decl::dumpXML() method, a debugging tool to
+// print a detailed graph of an AST in an unspecified XML format.
+//
+// There is no guarantee of stability for this format.
+//
+//===----------------------------------------------------------------------===//
+
+// Only pay for this in code size in assertions-enabled builds.
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace clang;
+
+#ifndef NDEBUG
+
+namespace {
+
+enum NodeState {
+ NS_Attrs, NS_LazyChildren, NS_Children
+};
+
+struct Node {
+ llvm::StringRef Name;
+ NodeState State;
+ Node(llvm::StringRef name) : Name(name), State(NS_Attrs) {}
+
+ bool isDoneWithAttrs() const { return State != NS_Attrs; }
+};
+
+template <class Impl> struct XMLDeclVisitor {
+#define DISPATCH(NAME, CLASS) \
+ static_cast<Impl*>(this)->NAME(static_cast<CLASS*>(D))
+
+ void dispatch(Decl *D) {
+ switch (D->getKind()) {
+ default: llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: \
+ DISPATCH(dispatch##DERIVED##DeclAttrs, DERIVED##Decl); \
+ static_cast<Impl*>(this)->completeAttrs(); \
+ DISPATCH(dispatch##DERIVED##DeclChildren, DERIVED##Decl); \
+ DISPATCH(dispatch##DERIVED##DeclAsContext, DERIVED##Decl); \
+ break;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+ }
+
+#define DECL(DERIVED, BASE) \
+ void dispatch##DERIVED##DeclAttrs(DERIVED##Decl *D) { \
+ DISPATCH(dispatch##BASE##Attrs, BASE); \
+ DISPATCH(visit##DERIVED##DeclAttrs, DERIVED##Decl); \
+ } \
+ void visit##DERIVED##DeclAttrs(DERIVED##Decl *D) {} \
+ void dispatch##DERIVED##DeclChildren(DERIVED##Decl *D) { \
+ DISPATCH(dispatch##BASE##Children, BASE); \
+ DISPATCH(visit##DERIVED##DeclChildren, DERIVED##Decl); \
+ } \
+ void visit##DERIVED##DeclChildren(DERIVED##Decl *D) {} \
+ void dispatch##DERIVED##DeclAsContext(DERIVED##Decl *D) { \
+ DISPATCH(dispatch##BASE##AsContext, BASE); \
+ DISPATCH(visit##DERIVED##DeclAsContext, DERIVED##Decl); \
+ } \
+ void visit##DERIVED##DeclAsContext(DERIVED##Decl *D) {}
+#include "clang/AST/DeclNodes.inc"
+
+ void dispatchDeclAttrs(Decl *D) {
+ DISPATCH(visitDeclAttrs, Decl);
+ }
+ void visitDeclAttrs(Decl *D) {}
+
+ void dispatchDeclChildren(Decl *D) {
+ DISPATCH(visitDeclChildren, Decl);
+ }
+ void visitDeclChildren(Decl *D) {}
+
+ void dispatchDeclAsContext(Decl *D) {
+ DISPATCH(visitDeclAsContext, Decl);
+ }
+ void visitDeclAsContext(Decl *D) {}
+
+#undef DISPATCH
+};
+
+template <class Impl> struct XMLTypeVisitor {
+#define DISPATCH(NAME, CLASS) \
+ static_cast<Impl*>(this)->NAME(static_cast<CLASS*>(T))
+
+ void dispatch(Type *T) {
+ switch (T->getTypeClass()) {
+ default: llvm_unreachable("Type that isn't part of TypeNodes.inc!");
+#define TYPE(DERIVED, BASE) \
+ case Type::DERIVED: \
+ DISPATCH(dispatch##DERIVED##TypeAttrs, DERIVED##Type); \
+ static_cast<Impl*>(this)->completeAttrs(); \
+ DISPATCH(dispatch##DERIVED##TypeChildren, DERIVED##Type); \
+ break;
+#define ABSTRACT_TYPE(DERIVED, BASE)
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+#define TYPE(DERIVED, BASE) \
+ void dispatch##DERIVED##TypeAttrs(DERIVED##Type *T) { \
+ DISPATCH(dispatch##BASE##Attrs, BASE); \
+ DISPATCH(visit##DERIVED##TypeAttrs, DERIVED##Type); \
+ } \
+ void visit##DERIVED##TypeAttrs(DERIVED##Type *T) {} \
+ void dispatch##DERIVED##TypeChildren(DERIVED##Type *T) { \
+ DISPATCH(dispatch##BASE##Children, BASE); \
+ DISPATCH(visit##DERIVED##TypeChildren, DERIVED##Type); \
+ } \
+ void visit##DERIVED##TypeChildren(DERIVED##Type *T) {}
+#include "clang/AST/TypeNodes.def"
+
+ void dispatchTypeAttrs(Type *T) {
+ DISPATCH(visitTypeAttrs, Type);
+ }
+ void visitTypeAttrs(Type *T) {}
+
+ void dispatchTypeChildren(Type *T) {
+ DISPATCH(visitTypeChildren, Type);
+ }
+ void visitTypeChildren(Type *T) {}
+
+#undef DISPATCH
+};
+
+static llvm::StringRef getTypeKindName(Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(DERIVED, BASE) case Type::DERIVED: return #DERIVED "Type";
+#define ABSTRACT_TYPE(DERIVED, BASE)
+#include "clang/AST/TypeNodes.def"
+ }
+
+ llvm_unreachable("unknown type kind!");
+ return "unknown_type";
+}
+
+struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
+ public XMLTypeVisitor<XMLDumper> {
+ llvm::raw_ostream &out;
+ ASTContext &Context;
+ llvm::SmallVector<Node, 16> Stack;
+ unsigned Indent;
+ explicit XMLDumper(llvm::raw_ostream &OS, ASTContext &context)
+ : out(OS), Context(context), Indent(0) {}
+
+ void indent() {
+ for (unsigned I = Indent; I; --I)
+ out << ' ';
+ }
+
+ /// Push a new node on the stack.
+ void push(llvm::StringRef name) {
+ if (!Stack.empty()) {
+ assert(Stack.back().isDoneWithAttrs());
+ if (Stack.back().State == NS_LazyChildren) {
+ Stack.back().State = NS_Children;
+ out << ">\n";
+ }
+ Indent++;
+ indent();
+ }
+ Stack.push_back(Node(name));
+ out << '<' << name;
+ }
+
+ /// Set the given attribute to the given value.
+ void set(llvm::StringRef attr, llvm::StringRef value) {
+ assert(!Stack.empty() && !Stack.back().isDoneWithAttrs());
+ out << ' ' << attr << '=' << '"' << value << '"'; // TODO: quotation
+ }
+
+ /// Finish attributes.
+ void completeAttrs() {
+ assert(!Stack.empty() && !Stack.back().isDoneWithAttrs());
+ Stack.back().State = NS_LazyChildren;
+ }
+
+ /// Pop a node.
+ void pop() {
+ assert(!Stack.empty() && Stack.back().isDoneWithAttrs());
+ if (Stack.back().State == NS_LazyChildren) {
+ out << "/>\n";
+ } else {
+ indent();
+ out << "</" << Stack.back().Name << ">\n";
+ }
+ if (Stack.size() > 1) Indent--;
+ Stack.pop_back();
+ }
+
+ //---- General utilities -------------------------------------------//
+
+ void setPointer(llvm::StringRef prop, const void *p) {
+ llvm::SmallString<10> buffer;
+ llvm::raw_svector_ostream os(buffer);
+ os << p;
+ os.flush();
+ set(prop, buffer);
+ }
+
+ void setPointer(void *p) {
+ setPointer("ptr", p);
+ }
+
+ void setInteger(llvm::StringRef prop, const llvm::APSInt &v) {
+ set(prop, v.toString(10));
+ }
+
+ void setInteger(llvm::StringRef prop, unsigned n) {
+ llvm::SmallString<10> buffer;
+ llvm::raw_svector_ostream os(buffer);
+ os << n;
+ os.flush();
+ set(prop, buffer);
+ }
+
+ void setFlag(llvm::StringRef prop, bool flag) {
+ if (flag) set(prop, "true");
+ }
+
+ void setName(DeclarationName Name) {
+ if (!Name)
+ return set("name", "");
+
+ // Common case.
+ if (Name.isIdentifier())
+ return set("name", Name.getAsIdentifierInfo()->getName());
+
+ set("name", Name.getAsString());
+ }
+
+ class TemporaryContainer {
+ XMLDumper &Dumper;
+ public:
+ TemporaryContainer(XMLDumper &dumper, llvm::StringRef name)
+ : Dumper(dumper) {
+ Dumper.push(name);
+ Dumper.completeAttrs();
+ }
+
+ ~TemporaryContainer() {
+ Dumper.pop();
+ }
+ };
+
+ void visitTemplateParameters(TemplateParameterList *L) {
+ push("template_parameters");
+ completeAttrs();
+ for (TemplateParameterList::iterator
+ I = L->begin(), E = L->end(); I != E; ++I)
+ dispatch(*I);
+ pop();
+ }
+
+ void visitTemplateArguments(const TemplateArgumentList &L) {
+ push("template_arguments");
+ completeAttrs();
+ for (unsigned I = 0, E = L.size(); I != E; ++I)
+ dispatch(L[I]);
+ pop();
+ }
+
+ /// Visits a reference to the given declaration.
+ void visitDeclRef(Decl *D) {
+ push(D->getDeclKindName());
+ setPointer("ref", D);
+ completeAttrs();
+ pop();
+ }
+ void visitDeclRef(llvm::StringRef Name, Decl *D) {
+ TemporaryContainer C(*this, Name);
+ if (D) visitDeclRef(D);
+ }
+
+ void dispatch(const TemplateArgument &A) {
+ switch (A.getKind()) {
+ case TemplateArgument::Null: {
+ TemporaryContainer C(*this, "null");
+ break;
+ }
+ case TemplateArgument::Type: {
+ dispatch(A.getAsType());
+ break;
+ }
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ // FIXME: Implement!
+ break;
+
+ case TemplateArgument::Declaration: {
+ visitDeclRef(A.getAsDecl());
+ break;
+ }
+ case TemplateArgument::Integral: {
+ push("integer");
+ setInteger("value", *A.getAsIntegral());
+ completeAttrs();
+ pop();
+ break;
+ }
+ case TemplateArgument::Expression: {
+ dispatch(A.getAsExpr());
+ break;
+ }
+ case TemplateArgument::Pack: {
+ for (TemplateArgument::pack_iterator P = A.pack_begin(),
+ PEnd = A.pack_end();
+ P != PEnd; ++P)
+ dispatch(*P);
+ break;
+ }
+ }
+ }
+
+ void dispatch(const TemplateArgumentLoc &A) {
+ dispatch(A.getArgument());
+ }
+
+ //---- Declarations ------------------------------------------------//
+ // Calls are made in this order:
+ // # Enter a new node.
+ // push("FieldDecl")
+ //
+ // # In this phase, attributes are set on the node.
+ // visitDeclAttrs(D)
+ // visitNamedDeclAttrs(D)
+ // ...
+ // visitFieldDeclAttrs(D)
+ //
+ // # No more attributes after this point.
+ // completeAttrs()
+ //
+ // # Create "header" child nodes, i.e. those which logically
+ // # belong to the declaration itself.
+ // visitDeclChildren(D)
+ // visitNamedDeclChildren(D)
+ // ...
+ // visitFieldDeclChildren(D)
+ //
+ // # Create nodes for the lexical children.
+ // visitDeclAsContext(D)
+ // visitNamedDeclAsContext(D)
+ // ...
+ // visitFieldDeclAsContext(D)
+ //
+ // # Finish the node.
+ // pop();
+ void dispatch(Decl *D) {
+ push(D->getDeclKindName());
+ XMLDeclVisitor<XMLDumper>::dispatch(D);
+ pop();
+ }
+ void visitDeclAttrs(Decl *D) {
+ setPointer(D);
+ }
+
+ /// Visit all the lexical decls in the given context.
+ void visitDeclContext(DeclContext *DC) {
+ for (DeclContext::decl_iterator
+ I = DC->decls_begin(), E = DC->decls_end(); I != E; ++I)
+ dispatch(*I);
+
+ // FIXME: point out visible declarations not in lexical context?
+ }
+
+ /// Set the "access" attribute on the current node according to the
+ /// given specifier.
+ void setAccess(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_public: return set("access", "public");
+ case AS_protected: return set("access", "protected");
+ case AS_private: return set("access", "private");
+ case AS_none: llvm_unreachable("explicit forbidden access");
+ }
+ }
+
+ template <class T> void visitRedeclarableAttrs(T *D) {
+ if (T *Prev = D->getPreviousDeclaration())
+ setPointer("previous", Prev);
+ }
+
+
+ // TranslationUnitDecl
+ void visitTranslationUnitDeclAsContext(TranslationUnitDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // LinkageSpecDecl
+ void visitLinkageSpecDeclAttrs(LinkageSpecDecl *D) {
+ llvm::StringRef lang = "";
+ switch (D->getLanguage()) {
+ case LinkageSpecDecl::lang_c: lang = "C"; break;
+ case LinkageSpecDecl::lang_cxx: lang = "C++"; break;
+ }
+ set("lang", lang);
+ }
+ void visitLinkageSpecDeclAsContext(LinkageSpecDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // NamespaceDecl
+ void visitNamespaceDeclAttrs(NamespaceDecl *D) {
+ setFlag("inline", D->isInline());
+ if (!D->isOriginalNamespace())
+ setPointer("original", D->getOriginalNamespace());
+ }
+ void visitNamespaceDeclAsContext(NamespaceDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // NamedDecl
+ void visitNamedDeclAttrs(NamedDecl *D) {
+ setName(D->getDeclName());
+ }
+
+ // ValueDecl
+ void visitValueDeclChildren(ValueDecl *D) {
+ dispatch(D->getType());
+ }
+
+ // DeclaratorDecl
+ void visitDeclaratorDeclChildren(DeclaratorDecl *D) {
+ //dispatch(D->getTypeSourceInfo()->getTypeLoc());
+ }
+
+ // VarDecl
+ void visitVarDeclAttrs(VarDecl *D) {
+ visitRedeclarableAttrs(D);
+ if (D->getStorageClass() != SC_None)
+ set("storage",
+ VarDecl::getStorageClassSpecifierString(D->getStorageClass()));
+ setFlag("directinit", D->hasCXXDirectInitializer());
+ setFlag("nrvo", D->isNRVOVariable());
+ // TODO: instantiation, etc.
+ }
+ void visitVarDeclChildren(VarDecl *D) {
+ if (D->hasInit()) dispatch(D->getInit());
+ }
+
+ // ParmVarDecl?
+
+ // FunctionDecl
+ void visitFunctionDeclAttrs(FunctionDecl *D) {
+ visitRedeclarableAttrs(D);
+ setFlag("pure", D->isPure());
+ setFlag("trivial", D->isTrivial());
+ setFlag("returnzero", D->hasImplicitReturnZero());
+ setFlag("prototype", D->hasWrittenPrototype());
+ setFlag("deleted", D->isDeleted());
+ if (D->getStorageClass() != SC_None)
+ set("storage",
+ VarDecl::getStorageClassSpecifierString(D->getStorageClass()));
+ setFlag("inline", D->isInlineSpecified());
+ // TODO: instantiation, etc.
+ }
+ void visitFunctionDeclChildren(FunctionDecl *D) {
+ for (FunctionDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I)
+ dispatch(*I);
+ if (D->isThisDeclarationADefinition())
+ dispatch(D->getBody());
+ }
+
+ // CXXMethodDecl ?
+ // CXXConstructorDecl ?
+ // CXXDestructorDecl ?
+ // CXXConversionDecl ?
+
+ void dispatch(CXXCtorInitializer *Init) {
+ // TODO
+ }
+
+ // FieldDecl
+ void visitFieldDeclAttrs(FieldDecl *D) {
+ setFlag("mutable", D->isMutable());
+ }
+ void visitFieldDeclChildren(FieldDecl *D) {
+ if (D->isBitField()) {
+ TemporaryContainer C(*this, "bitwidth");
+ dispatch(D->getBitWidth());
+ }
+ // TODO: C++0x member initializer
+ }
+
+ // EnumConstantDecl
+ void visitEnumConstantDeclChildren(EnumConstantDecl *D) {
+ // value in any case?
+ if (D->getInitExpr()) dispatch(D->getInitExpr());
+ }
+
+ // IndirectFieldDecl
+ void visitIndirectFieldDeclChildren(IndirectFieldDecl *D) {
+ for (IndirectFieldDecl::chain_iterator
+ I = D->chain_begin(), E = D->chain_end(); I != E; ++I) {
+ NamedDecl *VD = const_cast<NamedDecl*>(*I);
+ push(isa<VarDecl>(VD) ? "variable" : "field");
+ setPointer("ptr", VD);
+ completeAttrs();
+ pop();
+ }
+ }
+
+ // TypeDecl
+ void visitTypeDeclAttrs(TypeDecl *D) {
+ setPointer("typeptr", D->getTypeForDecl());
+ }
+
+ // TypedefDecl
+ void visitTypedefDeclAttrs(TypedefDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitTypedefDeclChildren(TypedefDecl *D) {
+ dispatch(D->getTypeSourceInfo()->getTypeLoc());
+ }
+
+ // TagDecl
+ void visitTagDeclAttrs(TagDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitTagDeclAsContext(TagDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // EnumDecl
+ void visitEnumDeclAttrs(EnumDecl *D) {
+ setFlag("scoped", D->isScoped());
+ setFlag("fixed", D->isFixed());
+ }
+ void visitEnumDeclChildren(EnumDecl *D) {
+ {
+ TemporaryContainer C(*this, "promotion_type");
+ dispatch(D->getPromotionType());
+ }
+ {
+ TemporaryContainer C(*this, "integer_type");
+ dispatch(D->getIntegerType());
+ }
+ }
+
+ // RecordDecl ?
+
+ void visitCXXRecordDeclChildren(CXXRecordDecl *D) {
+ if (!D->isThisDeclarationADefinition()) return;
+
+ for (CXXRecordDecl::base_class_iterator
+ I = D->bases_begin(), E = D->bases_end(); I != E; ++I) {
+ push("base");
+ setAccess(I->getAccessSpecifier());
+ completeAttrs();
+ dispatch(I->getTypeSourceInfo()->getTypeLoc());
+ pop();
+ }
+ }
+
+ // ClassTemplateSpecializationDecl ?
+
+ // FileScopeAsmDecl ?
+
+ // BlockDecl
+ void visitBlockDeclAttrs(BlockDecl *D) {
+ setFlag("variadic", D->isVariadic());
+ }
+ void visitBlockDeclChildren(BlockDecl *D) {
+ for (FunctionDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I)
+ dispatch(*I);
+ dispatch(D->getBody());
+ }
+
+ // AccessSpecDecl
+ void visitAccessSpecDeclAttrs(AccessSpecDecl *D) {
+ setAccess(D->getAccess());
+ }
+
+ // TemplateDecl
+ void visitTemplateDeclChildren(TemplateDecl *D) {
+ visitTemplateParameters(D->getTemplateParameters());
+ dispatch(D->getTemplatedDecl());
+ }
+
+ // FunctionTemplateDecl
+ void visitFunctionTemplateDeclAttrs(FunctionTemplateDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitFunctionTemplateDeclChildren(FunctionTemplateDecl *D) {
+ // Mention all the specializations which don't have explicit
+ // declarations elsewhere.
+ for (FunctionTemplateDecl::spec_iterator
+ I = D->spec_begin(), E = D->spec_end(); I != E; ++I) {
+ FunctionTemplateSpecializationInfo *Info
+ = I->getTemplateSpecializationInfo();
+
+ bool Unknown = false;
+ switch (Info->getTemplateSpecializationKind()) {
+ case TSK_ImplicitInstantiation: Unknown = false; break;
+ case TSK_Undeclared: Unknown = true; break;
+
+ // These will be covered at their respective sites.
+ case TSK_ExplicitSpecialization: continue;
+ case TSK_ExplicitInstantiationDeclaration: continue;
+ case TSK_ExplicitInstantiationDefinition: continue;
+ }
+
+ TemporaryContainer C(*this,
+ Unknown ? "uninstantiated" : "instantiation");
+ visitTemplateArguments(*Info->TemplateArguments);
+ dispatch(Info->Function);
+ }
+ }
+
+ // ClasTemplateDecl
+ void visitClassTemplateDeclAttrs(ClassTemplateDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitClassTemplateDeclChildren(ClassTemplateDecl *D) {
+ // Mention all the specializations which don't have explicit
+ // declarations elsewhere.
+ for (ClassTemplateDecl::spec_iterator
+ I = D->spec_begin(), E = D->spec_end(); I != E; ++I) {
+
+ bool Unknown = false;
+ switch (I->getTemplateSpecializationKind()) {
+ case TSK_ImplicitInstantiation: Unknown = false; break;
+ case TSK_Undeclared: Unknown = true; break;
+
+ // These will be covered at their respective sites.
+ case TSK_ExplicitSpecialization: continue;
+ case TSK_ExplicitInstantiationDeclaration: continue;
+ case TSK_ExplicitInstantiationDefinition: continue;
+ }
+
+ TemporaryContainer C(*this,
+ Unknown ? "uninstantiated" : "instantiation");
+ visitTemplateArguments(I->getTemplateArgs());
+ dispatch(*I);
+ }
+ }
+
+ // TemplateTypeParmDecl
+ void visitTemplateTypeParmDeclAttrs(TemplateTypeParmDecl *D) {
+ setInteger("depth", D->getDepth());
+ setInteger("index", D->getIndex());
+ }
+ void visitTemplateTypeParmDeclChildren(TemplateTypeParmDecl *D) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ dispatch(D->getDefaultArgumentInfo()->getTypeLoc());
+ // parameter pack?
+ }
+
+ // NonTypeTemplateParmDecl
+ void visitNonTypeTemplateParmDeclAttrs(NonTypeTemplateParmDecl *D) {
+ setInteger("depth", D->getDepth());
+ setInteger("index", D->getIndex());
+ }
+ void visitNonTypeTemplateParmDeclChildren(NonTypeTemplateParmDecl *D) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ dispatch(D->getDefaultArgument());
+ // parameter pack?
+ }
+
+ // TemplateTemplateParmDecl
+ void visitTemplateTemplateParmDeclAttrs(TemplateTemplateParmDecl *D) {
+ setInteger("depth", D->getDepth());
+ setInteger("index", D->getIndex());
+ }
+ void visitTemplateTemplateParmDeclChildren(TemplateTemplateParmDecl *D) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ dispatch(D->getDefaultArgument());
+ // parameter pack?
+ }
+
+ // FriendDecl
+ void visitFriendDeclChildren(FriendDecl *D) {
+ if (TypeSourceInfo *T = D->getFriendType())
+ dispatch(T->getTypeLoc());
+ else
+ dispatch(D->getFriendDecl());
+ }
+
+ // UsingDirectiveDecl ?
+ // UsingDecl ?
+ // UsingShadowDecl ?
+ // NamespaceAliasDecl ?
+ // UnresolvedUsingValueDecl ?
+ // UnresolvedUsingTypenameDecl ?
+ // StaticAssertDecl ?
+
+ // ObjCImplDecl
+ void visitObjCImplDeclChildren(ObjCImplDecl *D) {
+ visitDeclRef(D->getClassInterface());
+ }
+ void visitObjCImplDeclAsContext(ObjCImplDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCClassDecl
+ void visitObjCClassDeclChildren(ObjCClassDecl *D) {
+ for (ObjCClassDecl::iterator I = D->begin(), E = D->end(); I != E; ++I)
+ visitDeclRef(I->getInterface());
+ }
+
+ // ObjCInterfaceDecl
+ void visitCategoryList(ObjCCategoryDecl *D) {
+ if (!D) return;
+
+ TemporaryContainer C(*this, "categories");
+ for (; D; D = D->getNextClassCategory())
+ visitDeclRef(D);
+ }
+ void visitObjCInterfaceDeclAttrs(ObjCInterfaceDecl *D) {
+ setPointer("typeptr", D->getTypeForDecl());
+ setFlag("forward_decl", D->isForwardDecl());
+ setFlag("implicit_interface", D->isImplicitInterfaceDecl());
+ }
+ void visitObjCInterfaceDeclChildren(ObjCInterfaceDecl *D) {
+ visitDeclRef("super", D->getSuperClass());
+ visitDeclRef("implementation", D->getImplementation());
+ if (D->protocol_begin() != D->protocol_end()) {
+ TemporaryContainer C(*this, "protocols");
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+ visitCategoryList(D->getCategoryList());
+ }
+ void visitObjCInterfaceDeclAsContext(ObjCInterfaceDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCCategoryDecl
+ void visitObjCCategoryDeclAttrs(ObjCCategoryDecl *D) {
+ setFlag("extension", D->IsClassExtension());
+ setFlag("synth_bitfield", D->hasSynthBitfield());
+ }
+ void visitObjCCategoryDeclChildren(ObjCCategoryDecl *D) {
+ visitDeclRef("interface", D->getClassInterface());
+ visitDeclRef("implementation", D->getImplementation());
+ if (D->protocol_begin() != D->protocol_end()) {
+ TemporaryContainer C(*this, "protocols");
+ for (ObjCCategoryDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+ }
+ void visitObjCCategoryDeclAsContext(ObjCCategoryDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCCategoryImplDecl
+ void visitObjCCategoryImplDeclAttrs(ObjCCategoryImplDecl *D) {
+ set("identifier", D->getName());
+ }
+ void visitObjCCategoryImplDeclChildren(ObjCCategoryImplDecl *D) {
+ visitDeclRef(D->getCategoryDecl());
+ }
+
+ // ObjCImplementationDecl
+ void visitObjCImplementationDeclAttrs(ObjCImplementationDecl *D) {
+ setFlag("synth_bitfield", D->hasSynthBitfield());
+ set("identifier", D->getName());
+ }
+ void visitObjCImplementationDeclChildren(ObjCImplementationDecl *D) {
+ visitDeclRef("super", D->getSuperClass());
+ if (D->init_begin() != D->init_end()) {
+ TemporaryContainer C(*this, "initializers");
+ for (ObjCImplementationDecl::init_iterator
+ I = D->init_begin(), E = D->init_end(); I != E; ++I)
+ dispatch(*I);
+ }
+ }
+
+ // ObjCForwardProtocolDecl
+ void visitObjCForwardProtocolDeclChildren(ObjCForwardProtocolDecl *D) {
+ for (ObjCForwardProtocolDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+
+ // ObjCProtocolDecl
+ void visitObjCProtocolDeclAttrs(ObjCProtocolDecl *D) {
+ setFlag("forward_decl", D->isForwardDecl());
+ }
+ void visitObjCProtocolDeclChildren(ObjCProtocolDecl *D) {
+ if (D->protocol_begin() != D->protocol_end()) {
+ TemporaryContainer C(*this, "protocols");
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+ }
+ void visitObjCProtocolDeclAsContext(ObjCProtocolDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCMethodDecl
+ void visitObjCMethodDeclAttrs(ObjCMethodDecl *D) {
+ // decl qualifier?
+ // implementation control?
+
+ setFlag("instance", D->isInstanceMethod());
+ setFlag("variadic", D->isVariadic());
+ setFlag("synthesized", D->isSynthesized());
+ setFlag("defined", D->isDefined());
+ }
+ void visitObjCMethodDeclChildren(ObjCMethodDecl *D) {
+ dispatch(D->getResultType());
+ for (ObjCMethodDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I)
+ dispatch(*I);
+ if (D->isThisDeclarationADefinition())
+ dispatch(D->getBody());
+ }
+
+ // ObjCIvarDecl
+ void setAccessControl(llvm::StringRef prop, ObjCIvarDecl::AccessControl AC) {
+ switch (AC) {
+ case ObjCIvarDecl::None: return set(prop, "none");
+ case ObjCIvarDecl::Private: return set(prop, "private");
+ case ObjCIvarDecl::Protected: return set(prop, "protected");
+ case ObjCIvarDecl::Public: return set(prop, "public");
+ case ObjCIvarDecl::Package: return set(prop, "package");
+ }
+ }
+ void visitObjCIvarDeclAttrs(ObjCIvarDecl *D) {
+ setFlag("synthesize", D->getSynthesize());
+ setAccessControl("access", D->getAccessControl());
+ }
+
+ // ObjCCompatibleAliasDecl
+ void visitObjCCompatibleAliasDeclChildren(ObjCCompatibleAliasDecl *D) {
+ visitDeclRef(D->getClassInterface());
+ }
+
+ // FIXME: ObjCPropertyDecl
+ // FIXME: ObjCPropertyImplDecl
+
+ //---- Types -----------------------------------------------------//
+ void dispatch(TypeLoc TL) {
+ dispatch(TL.getType()); // for now
+ }
+
+ void dispatch(QualType T) {
+ if (T.hasLocalQualifiers()) {
+ push("QualType");
+ Qualifiers Qs = T.getLocalQualifiers();
+ setFlag("const", Qs.hasConst());
+ setFlag("volatile", Qs.hasVolatile());
+ setFlag("restrict", Qs.hasRestrict());
+ if (Qs.hasAddressSpace()) setInteger("addrspace", Qs.getAddressSpace());
+ if (Qs.hasObjCGCAttr()) {
+ switch (Qs.getObjCGCAttr()) {
+ case Qualifiers::Weak: set("gc", "weak"); break;
+ case Qualifiers::Strong: set("gc", "strong"); break;
+ case Qualifiers::GCNone: llvm_unreachable("explicit none");
+ }
+ }
+
+ completeAttrs();
+ dispatch(QualType(T.getTypePtr(), 0));
+ pop();
+ return;
+ }
+
+ Type *Ty = const_cast<Type*>(T.getTypePtr());
+ push(getTypeKindName(Ty));
+ XMLTypeVisitor<XMLDumper>::dispatch(const_cast<Type*>(T.getTypePtr()));
+ pop();
+ }
+
+ void setCallingConv(CallingConv CC) {
+ switch (CC) {
+ case CC_Default: return;
+ case CC_C: return set("cc", "cdecl");
+ case CC_X86FastCall: return set("cc", "x86_fastcall");
+ case CC_X86StdCall: return set("cc", "x86_stdcall");
+ case CC_X86ThisCall: return set("cc", "x86_thiscall");
+ case CC_X86Pascal: return set("cc", "x86_pascal");
+ }
+ }
+
+ void visitTypeAttrs(Type *D) {
+ setPointer(D);
+ setFlag("dependent", D->isDependentType());
+ setFlag("variably_modified", D->isVariablyModifiedType());
+
+ setPointer("canonical", D->getCanonicalTypeInternal().getAsOpaquePtr());
+ }
+
+ void visitPointerTypeChildren(PointerType *T) {
+ dispatch(T->getPointeeType());
+ }
+ void visitReferenceTypeChildren(ReferenceType *T) {
+ dispatch(T->getPointeeType());
+ }
+ void visitObjCObjectPointerTypeChildren(ObjCObjectPointerType *T) {
+ dispatch(T->getPointeeType());
+ }
+ void visitBlockPointerTypeChildren(BlockPointerType *T) {
+ dispatch(T->getPointeeType());
+ }
+
+ // Types that just wrap declarations.
+ void visitTagTypeChildren(TagType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitTypedefTypeChildren(TypedefType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitObjCInterfaceTypeChildren(ObjCInterfaceType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitUnresolvedUsingTypeChildren(UnresolvedUsingType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitInjectedClassNameTypeChildren(InjectedClassNameType *T) {
+ visitDeclRef(T->getDecl());
+ }
+
+ void visitFunctionTypeAttrs(FunctionType *T) {
+ setFlag("noreturn", T->getNoReturnAttr());
+ setCallingConv(T->getCallConv());
+ if (T->getRegParmType()) setInteger("regparm", T->getRegParmType());
+ }
+ void visitFunctionTypeChildren(FunctionType *T) {
+ dispatch(T->getResultType());
+ }
+
+ void visitFunctionProtoTypeAttrs(FunctionProtoType *T) {
+ setFlag("const", T->getTypeQuals() & Qualifiers::Const);
+ setFlag("volatile", T->getTypeQuals() & Qualifiers::Volatile);
+ setFlag("restrict", T->getTypeQuals() & Qualifiers::Restrict);
+ }
+ void visitFunctionProtoTypeChildren(FunctionProtoType *T) {
+ push("parameters");
+ setFlag("variadic", T->isVariadic());
+ completeAttrs();
+ for (FunctionProtoType::arg_type_iterator
+ I = T->arg_type_begin(), E = T->arg_type_end(); I != E; ++I)
+ dispatch(*I);
+ pop();
+
+ if (T->hasExceptionSpec()) {
+ push("exception_specifiers");
+ setFlag("any", T->hasAnyExceptionSpec());
+ completeAttrs();
+ for (FunctionProtoType::exception_iterator
+ I = T->exception_begin(), E = T->exception_end(); I != E; ++I)
+ dispatch(*I);
+ pop();
+ }
+ }
+
+ void visitTemplateSpecializationTypeChildren(TemplateSpecializationType *T) {
+ if (const RecordType *RT = T->getAs<RecordType>())
+ visitDeclRef(RT->getDecl());
+
+ // TODO: TemplateName
+
+ push("template_arguments");
+ completeAttrs();
+ for (unsigned I = 0, E = T->getNumArgs(); I != E; ++I)
+ dispatch(T->getArg(I));
+ pop();
+ }
+
+ //---- Statements ------------------------------------------------//
+ void dispatch(Stmt *S) {
+ // FIXME: this is not really XML at all
+ push("Stmt");
+ out << ">\n";
+ Stack.back().State = NS_Children; // explicitly become non-lazy
+ S->dump(out, Context.getSourceManager());
+ out << '\n';
+ pop();
+ }
+};
+}
+
+void Decl::dumpXML() const {
+ dumpXML(llvm::errs());
+}
+
+void Decl::dumpXML(llvm::raw_ostream &out) const {
+ XMLDumper(out, getASTContext()).dispatch(const_cast<Decl*>(this));
+}
+
+#else /* ifndef NDEBUG */
+
+void Decl::dumpXML() const {}
+void Decl::dumpXML(llvm::raw_ostream &out) const {}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
index 5feef1c..391b26a 100644
--- a/contrib/llvm/tools/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
@@ -20,15 +20,16 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Lexer.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace clang;
-void Expr::ANCHOR() {} // key function for Expr class.
-
/// isKnownToHaveBooleanValue - Return true if this is an integer expression
/// that is known to return 0 or 1. This happens for _Bool/bool expressions
/// but also int expressions which are produced by things like comparisons in
@@ -90,6 +91,42 @@ bool Expr::isKnownToHaveBooleanValue() const {
return false;
}
+// Amusing macro metaprogramming hack: check whether a class provides
+// a more specific implementation of getExprLoc().
+namespace {
+ /// This implementation is used when a class provides a custom
+ /// implementation of getExprLoc.
+ template <class E, class T>
+ SourceLocation getExprLocImpl(const Expr *expr,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const E*>(expr)->getExprLoc();
+ }
+
+ /// This implementation is used when a class doesn't provide
+ /// a custom implementation of getExprLoc. Overload resolution
+ /// should pick it over the implementation above because it's
+ /// more specialized according to function template partial ordering.
+ template <class E>
+ SourceLocation getExprLocImpl(const Expr *expr,
+ SourceLocation (Expr::*v)() const) {
+ return static_cast<const E*>(expr)->getSourceRange().getBegin();
+ }
+}
+
+SourceLocation Expr::getExprLoc() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: llvm_unreachable(#type " is not an Expr"); break;
+#define EXPR(type, base) \
+ case Stmt::type##Class: return getExprLocImpl<type>(this, &type::getExprLoc);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+ return SourceLocation();
+}
+
//===----------------------------------------------------------------------===//
// Primary Expressions.
//===----------------------------------------------------------------------===//
@@ -105,6 +142,25 @@ void ExplicitTemplateArgumentList::initializeFrom(
new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
}
+void ExplicitTemplateArgumentList::initializeFrom(
+ const TemplateArgumentListInfo &Info,
+ bool &Dependent,
+ bool &ContainsUnexpandedParameterPack) {
+ LAngleLoc = Info.getLAngleLoc();
+ RAngleLoc = Info.getRAngleLoc();
+ NumTemplateArgs = Info.size();
+
+ TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
+ for (unsigned i = 0; i != NumTemplateArgs; ++i) {
+ Dependent = Dependent || Info[i].getArgument().isDependent();
+ ContainsUnexpandedParameterPack
+ = ContainsUnexpandedParameterPack ||
+ Info[i].getArgument().containsUnexpandedParameterPack();
+
+ new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
+ }
+}
+
void ExplicitTemplateArgumentList::copyInto(
TemplateArgumentListInfo &Info) const {
Info.setLAngleLoc(LAngleLoc);
@@ -123,11 +179,14 @@ std::size_t ExplicitTemplateArgumentList::sizeFor(
return sizeFor(Info.size());
}
-void DeclRefExpr::computeDependence() {
+/// \brief Compute the type- and value-dependence of a declaration reference
+/// based on the declaration being referenced.
+static void computeDeclRefDependence(NamedDecl *D, QualType T,
+ bool &TypeDependent,
+ bool &ValueDependent) {
TypeDependent = false;
ValueDependent = false;
- NamedDecl *D = getDecl();
// (TD) C++ [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains:
@@ -136,63 +195,93 @@ void DeclRefExpr::computeDependence() {
//
// (VD) C++ [temp.dep.constexpr]p2:
// An identifier is value-dependent if it is:
-
+
// (TD) - an identifier that was declared with dependent type
// (VD) - a name declared with a dependent type,
- if (getType()->isDependentType()) {
+ if (T->isDependentType()) {
TypeDependent = true;
ValueDependent = true;
+ return;
}
+
// (TD) - a conversion-function-id that specifies a dependent type
- else if (D->getDeclName().getNameKind()
- == DeclarationName::CXXConversionFunctionName &&
+ if (D->getDeclName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName &&
D->getDeclName().getCXXNameType()->isDependentType()) {
TypeDependent = true;
ValueDependent = true;
- }
- // (TD) - a template-id that is dependent,
- else if (hasExplicitTemplateArgs() &&
- TemplateSpecializationType::anyDependentTemplateArguments(
- getTemplateArgs(),
- getNumTemplateArgs())) {
- TypeDependent = true;
- ValueDependent = true;
+ return;
}
// (VD) - the name of a non-type template parameter,
- else if (isa<NonTypeTemplateParmDecl>(D))
+ if (isa<NonTypeTemplateParmDecl>(D)) {
ValueDependent = true;
+ return;
+ }
+
// (VD) - a constant with integral or enumeration type and is
// initialized with an expression that is value-dependent.
- else if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
if (Var->getType()->isIntegralOrEnumerationType() &&
Var->getType().getCVRQualifiers() == Qualifiers::Const) {
if (const Expr *Init = Var->getAnyInitializer())
if (Init->isValueDependent())
ValueDependent = true;
}
+
// (VD) - FIXME: Missing from the standard:
// - a member function or a static data member of the current
// instantiation
else if (Var->isStaticDataMember() &&
Var->getDeclContext()->isDependentContext())
ValueDependent = true;
- }
+
+ return;
+ }
+
// (VD) - FIXME: Missing from the standard:
// - a member function or a static data member of the current
// instantiation
- else if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext())
+ if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) {
ValueDependent = true;
- // (TD) - a nested-name-specifier or a qualified-id that names a
- // member of an unknown specialization.
- // (handled by DependentScopeDeclRefExpr)
+ return;
+ }
+}
+
+void DeclRefExpr::computeDependence() {
+ bool TypeDependent = false;
+ bool ValueDependent = false;
+ computeDeclRefDependence(getDecl(), getType(), TypeDependent, ValueDependent);
+
+ // (TD) C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ //
+ // and
+ //
+ // (VD) C++ [temp.dep.constexpr]p2:
+ // An identifier is value-dependent if it is:
+ if (!TypeDependent && !ValueDependent &&
+ hasExplicitTemplateArgs() &&
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ getTemplateArgs(),
+ getNumTemplateArgs())) {
+ TypeDependent = true;
+ ValueDependent = true;
+ }
+
+ ExprBits.TypeDependent = TypeDependent;
+ ExprBits.ValueDependent = ValueDependent;
+
+ // Is the declaration a parameter pack?
+ if (getDecl()->isParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
}
DeclRefExpr::DeclRefExpr(NestedNameSpecifier *Qualifier,
SourceRange QualifierRange,
ValueDecl *D, SourceLocation NameLoc,
const TemplateArgumentListInfo *TemplateArgs,
- QualType T)
- : Expr(DeclRefExprClass, T, false, false),
+ QualType T, ExprValueKind VK)
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false),
DecoratedD(D,
(Qualifier? HasQualifierFlag : 0) |
(TemplateArgs ? HasExplicitTemplateArgumentListFlag : 0)),
@@ -213,8 +302,8 @@ DeclRefExpr::DeclRefExpr(NestedNameSpecifier *Qualifier,
SourceRange QualifierRange,
ValueDecl *D, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
- QualType T)
- : Expr(DeclRefExprClass, T, false, false),
+ QualType T, ExprValueKind VK)
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false),
DecoratedD(D,
(Qualifier? HasQualifierFlag : 0) |
(TemplateArgs ? HasExplicitTemplateArgumentListFlag : 0)),
@@ -237,10 +326,11 @@ DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
ValueDecl *D,
SourceLocation NameLoc,
QualType T,
+ ExprValueKind VK,
const TemplateArgumentListInfo *TemplateArgs) {
return Create(Context, Qualifier, QualifierRange, D,
DeclarationNameInfo(D->getDeclName(), NameLoc),
- T, TemplateArgs);
+ T, VK, TemplateArgs);
}
DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
@@ -249,6 +339,7 @@ DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
ValueDecl *D,
const DeclarationNameInfo &NameInfo,
QualType T,
+ ExprValueKind VK,
const TemplateArgumentListInfo *TemplateArgs) {
std::size_t Size = sizeof(DeclRefExpr);
if (Qualifier != 0)
@@ -257,21 +348,23 @@ DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
if (TemplateArgs)
Size += ExplicitTemplateArgumentList::sizeFor(*TemplateArgs);
- void *Mem = Context.Allocate(Size, llvm::alignof<DeclRefExpr>());
+ void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
return new (Mem) DeclRefExpr(Qualifier, QualifierRange, D, NameInfo,
- TemplateArgs, T);
+ TemplateArgs, T, VK);
}
-DeclRefExpr *DeclRefExpr::CreateEmpty(ASTContext &Context, bool HasQualifier,
+DeclRefExpr *DeclRefExpr::CreateEmpty(ASTContext &Context,
+ bool HasQualifier,
+ bool HasExplicitTemplateArgs,
unsigned NumTemplateArgs) {
std::size_t Size = sizeof(DeclRefExpr);
if (HasQualifier)
Size += sizeof(NameQualifier);
- if (NumTemplateArgs)
+ if (HasExplicitTemplateArgs)
Size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
- void *Mem = Context.Allocate(Size, llvm::alignof<DeclRefExpr>());
+ void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
return new (Mem) DeclRefExpr(EmptyShell());
}
@@ -432,7 +525,7 @@ StringLiteral *StringLiteral::Create(ASTContext &C, const char *StrData,
// any concatenated string tokens.
void *Mem = C.Allocate(sizeof(StringLiteral)+
sizeof(SourceLocation)*(NumStrs-1),
- llvm::alignof<StringLiteral>());
+ llvm::alignOf<StringLiteral>());
StringLiteral *SL = new (Mem) StringLiteral(Ty);
// OPTIMIZE: could allocate this appended to the StringLiteral.
@@ -452,7 +545,7 @@ StringLiteral *StringLiteral::Create(ASTContext &C, const char *StrData,
StringLiteral *StringLiteral::CreateEmpty(ASTContext &C, unsigned NumStrs) {
void *Mem = C.Allocate(sizeof(StringLiteral)+
sizeof(SourceLocation)*(NumStrs-1),
- llvm::alignof<StringLiteral>());
+ llvm::alignOf<StringLiteral>());
StringLiteral *SL = new (Mem) StringLiteral(QualType());
SL->StrData = 0;
SL->ByteLength = 0;
@@ -467,6 +560,72 @@ void StringLiteral::setString(ASTContext &C, llvm::StringRef Str) {
ByteLength = Str.size();
}
+/// getLocationOfByte - Return a source location that points to the specified
+/// byte of this string literal.
+///
+/// Strings are amazingly complex. They can be formed from multiple tokens and
+/// can have escape sequences in them in addition to the usual trigraph and
+/// escaped newline business. This routine handles this complexity.
+///
+SourceLocation StringLiteral::
+getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
+ const LangOptions &Features, const TargetInfo &Target) const {
+ assert(!isWide() && "This doesn't work for wide strings yet");
+
+ // Loop over all of the tokens in this string until we find the one that
+ // contains the byte we're looking for.
+ unsigned TokNo = 0;
+ while (1) {
+ assert(TokNo < getNumConcatenated() && "Invalid byte number!");
+ SourceLocation StrTokLoc = getStrTokenLoc(TokNo);
+
+ // Get the spelling of the string so that we can get the data that makes up
+ // the string literal, not the identifier for the macro it is potentially
+ // expanded through.
+ SourceLocation StrTokSpellingLoc = SM.getSpellingLoc(StrTokLoc);
+
+ // Re-lex the token to get its length and original spelling.
+ std::pair<FileID, unsigned> LocInfo =SM.getDecomposedLoc(StrTokSpellingLoc);
+ bool Invalid = false;
+ llvm::StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return StrTokSpellingLoc;
+
+ const char *StrData = Buffer.data()+LocInfo.second;
+
+ // Create a langops struct and enable trigraphs. This is sufficient for
+ // relexing tokens.
+ LangOptions LangOpts;
+ LangOpts.Trigraphs = true;
+
+ // Create a lexer starting at the beginning of this token.
+ Lexer TheLexer(StrTokSpellingLoc, Features, Buffer.begin(), StrData,
+ Buffer.end());
+ Token TheTok;
+ TheLexer.LexFromRawLexer(TheTok);
+
+ // Use the StringLiteralParser to compute the length of the string in bytes.
+ StringLiteralParser SLP(&TheTok, 1, SM, Features, Target);
+ unsigned TokNumBytes = SLP.GetStringLength();
+
+ // If the byte is in this token, return the location of the byte.
+ if (ByteNo < TokNumBytes ||
+ (ByteNo == TokNumBytes && TokNo == getNumConcatenated())) {
+ unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo);
+
+ // Now that we know the offset of the token in the spelling, use the
+ // preprocessor to get the offset in the original source.
+ return Lexer::AdvanceToTokenCharacter(StrTokLoc, Offset, SM, Features);
+ }
+
+ // Move to the next string token.
+ ++TokNo;
+ ByteNo -= TokNumBytes;
+ }
+}
+
+
+
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
/// corresponds to, e.g. "sizeof" or "[pre]++".
const char *UnaryOperator::getOpcodeStr(Opcode Op) {
@@ -522,43 +681,82 @@ OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
// Postfix Operators.
//===----------------------------------------------------------------------===//
-CallExpr::CallExpr(ASTContext& C, StmtClass SC, Expr *fn, Expr **args,
- unsigned numargs, QualType t, SourceLocation rparenloc)
- : Expr(SC, t,
- fn->isTypeDependent() || hasAnyTypeDependentArguments(args, numargs),
- fn->isValueDependent() || hasAnyValueDependentArguments(args,numargs)),
+CallExpr::CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
+ Expr **args, unsigned numargs, QualType t, ExprValueKind VK,
+ SourceLocation rparenloc)
+ : Expr(SC, t, VK, OK_Ordinary,
+ fn->isTypeDependent(),
+ fn->isValueDependent(),
+ fn->containsUnexpandedParameterPack()),
NumArgs(numargs) {
- SubExprs = new (C) Stmt*[numargs+1];
+ SubExprs = new (C) Stmt*[numargs+PREARGS_START+NumPreArgs];
SubExprs[FN] = fn;
- for (unsigned i = 0; i != numargs; ++i)
- SubExprs[i+ARGS_START] = args[i];
+ for (unsigned i = 0; i != numargs; ++i) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i+PREARGS_START+NumPreArgs] = args[i];
+ }
+ CallExprBits.NumPreArgs = NumPreArgs;
RParenLoc = rparenloc;
}
CallExpr::CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs,
- QualType t, SourceLocation rparenloc)
- : Expr(CallExprClass, t,
- fn->isTypeDependent() || hasAnyTypeDependentArguments(args, numargs),
- fn->isValueDependent() || hasAnyValueDependentArguments(args,numargs)),
+ QualType t, ExprValueKind VK, SourceLocation rparenloc)
+ : Expr(CallExprClass, t, VK, OK_Ordinary,
+ fn->isTypeDependent(),
+ fn->isValueDependent(),
+ fn->containsUnexpandedParameterPack()),
NumArgs(numargs) {
- SubExprs = new (C) Stmt*[numargs+1];
+ SubExprs = new (C) Stmt*[numargs+PREARGS_START];
SubExprs[FN] = fn;
- for (unsigned i = 0; i != numargs; ++i)
- SubExprs[i+ARGS_START] = args[i];
+ for (unsigned i = 0; i != numargs; ++i) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i+PREARGS_START] = args[i];
+ }
+ CallExprBits.NumPreArgs = 0;
RParenLoc = rparenloc;
}
CallExpr::CallExpr(ASTContext &C, StmtClass SC, EmptyShell Empty)
: Expr(SC, Empty), SubExprs(0), NumArgs(0) {
- SubExprs = new (C) Stmt*[1];
+ // FIXME: Why do we allocate this?
+ SubExprs = new (C) Stmt*[PREARGS_START];
+ CallExprBits.NumPreArgs = 0;
+}
+
+CallExpr::CallExpr(ASTContext &C, StmtClass SC, unsigned NumPreArgs,
+ EmptyShell Empty)
+ : Expr(SC, Empty), SubExprs(0), NumArgs(0) {
+ // FIXME: Why do we allocate this?
+ SubExprs = new (C) Stmt*[PREARGS_START+NumPreArgs];
+ CallExprBits.NumPreArgs = NumPreArgs;
}
Decl *CallExpr::getCalleeDecl() {
Expr *CEE = getCallee()->IgnoreParenCasts();
+ // If we're calling a dereference, look at the pointer instead.
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
+ if (BO->isPtrMemOp())
+ CEE = BO->getRHS()->IgnoreParenCasts();
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
+ if (UO->getOpcode() == UO_Deref)
+ CEE = UO->getSubExpr()->IgnoreParenCasts();
+ }
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE))
return DRE->getDecl();
if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
@@ -585,12 +783,14 @@ void CallExpr::setNumArgs(ASTContext& C, unsigned NumArgs) {
}
// Otherwise, we are growing the # arguments. New an bigger argument array.
- Stmt **NewSubExprs = new (C) Stmt*[NumArgs+1];
+ unsigned NumPreArgs = getNumPreArgs();
+ Stmt **NewSubExprs = new (C) Stmt*[NumArgs+PREARGS_START+NumPreArgs];
// Copy over args.
- for (unsigned i = 0; i != getNumArgs()+ARGS_START; ++i)
+ for (unsigned i = 0; i != getNumArgs()+PREARGS_START+NumPreArgs; ++i)
NewSubExprs[i] = SubExprs[i];
// Null out new args.
- for (unsigned i = getNumArgs()+ARGS_START; i != NumArgs+ARGS_START; ++i)
+ for (unsigned i = getNumArgs()+PREARGS_START+NumPreArgs;
+ i != NumArgs+PREARGS_START+NumPreArgs; ++i)
NewSubExprs[i] = 0;
if (SubExprs) C.Deallocate(SubExprs);
@@ -600,7 +800,7 @@ void CallExpr::setNumArgs(ASTContext& C, unsigned NumArgs) {
/// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If
/// not, return 0.
-unsigned CallExpr::isBuiltinCall(ASTContext &Context) const {
+unsigned CallExpr::isBuiltinCall(const ASTContext &Context) const {
// All simple function calls (e.g. func()) are implicitly cast to pointer to
// function. As a result, we try and obtain the DeclRefExpr from the
// ImplicitCastExpr.
@@ -663,10 +863,10 @@ OffsetOfExpr::OffsetOfExpr(ASTContext &C, QualType type,
OffsetOfNode* compsPtr, unsigned numComps,
Expr** exprsPtr, unsigned numExprs,
SourceLocation RParenLoc)
- : Expr(OffsetOfExprClass, type, /*TypeDependent=*/false,
- /*ValueDependent=*/tsi->getType()->isDependentType() ||
- hasAnyTypeDependentArguments(exprsPtr, numExprs) ||
- hasAnyValueDependentArguments(exprsPtr, numExprs)),
+ : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false,
+ /*ValueDependent=*/tsi->getType()->isDependentType(),
+ tsi->getType()->containsUnexpandedParameterPack()),
OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
NumComps(numComps), NumExprs(numExprs)
{
@@ -675,6 +875,11 @@ OffsetOfExpr::OffsetOfExpr(ASTContext &C, QualType type,
}
for(unsigned i = 0; i < numExprs; ++i) {
+ if (exprsPtr[i]->isTypeDependent() || exprsPtr[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (exprsPtr[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
setIndexExpr(i, exprsPtr[i]);
}
}
@@ -694,7 +899,9 @@ MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
DeclAccessPair founddecl,
DeclarationNameInfo nameinfo,
const TemplateArgumentListInfo *targs,
- QualType ty) {
+ QualType ty,
+ ExprValueKind vk,
+ ExprObjectKind ok) {
std::size_t Size = sizeof(MemberExpr);
bool hasQualOrFound = (qual != 0 ||
@@ -706,8 +913,9 @@ MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
if (targs)
Size += ExplicitTemplateArgumentList::sizeFor(*targs);
- void *Mem = C.Allocate(Size, llvm::alignof<MemberExpr>());
- MemberExpr *E = new (Mem) MemberExpr(base, isarrow, memberdecl, nameinfo, ty);
+ void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>());
+ MemberExpr *E = new (Mem) MemberExpr(base, isarrow, memberdecl, nameinfo,
+ ty, vk, ok);
if (hasQualOrFound) {
if (qual && qual->isDependent()) {
@@ -732,12 +940,16 @@ MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
const char *CastExpr::getCastKindName() const {
switch (getCastKind()) {
- case CK_Unknown:
- return "Unknown";
+ case CK_Dependent:
+ return "Dependent";
case CK_BitCast:
return "BitCast";
case CK_LValueBitCast:
return "LValueBitCast";
+ case CK_LValueToRValue:
+ return "LValueToRValue";
+ case CK_GetObjCProperty:
+ return "GetObjCProperty";
case CK_NoOp:
return "NoOp";
case CK_BaseToDerived:
@@ -756,6 +968,8 @@ const char *CastExpr::getCastKindName() const {
return "FunctionToPointerDecay";
case CK_NullToMemberPointer:
return "NullToMemberPointer";
+ case CK_NullToPointer:
+ return "NullToPointer";
case CK_BaseToDerivedMemberPointer:
return "BaseToDerivedMemberPointer";
case CK_DerivedToBaseMemberPointer:
@@ -768,18 +982,24 @@ const char *CastExpr::getCastKindName() const {
return "IntegralToPointer";
case CK_PointerToIntegral:
return "PointerToIntegral";
+ case CK_PointerToBoolean:
+ return "PointerToBoolean";
case CK_ToVoid:
return "ToVoid";
case CK_VectorSplat:
return "VectorSplat";
case CK_IntegralCast:
return "IntegralCast";
+ case CK_IntegralToBoolean:
+ return "IntegralToBoolean";
case CK_IntegralToFloating:
return "IntegralToFloating";
case CK_FloatingToIntegral:
return "FloatingToIntegral";
case CK_FloatingCast:
return "FloatingCast";
+ case CK_FloatingToBoolean:
+ return "FloatingToBoolean";
case CK_MemberPointerToBoolean:
return "MemberPointerToBoolean";
case CK_AnyPointerToObjCPointerCast:
@@ -788,9 +1008,29 @@ const char *CastExpr::getCastKindName() const {
return "AnyPointerToBlockPointerCast";
case CK_ObjCObjectLValueCast:
return "ObjCObjectLValueCast";
+ case CK_FloatingRealToComplex:
+ return "FloatingRealToComplex";
+ case CK_FloatingComplexToReal:
+ return "FloatingComplexToReal";
+ case CK_FloatingComplexToBoolean:
+ return "FloatingComplexToBoolean";
+ case CK_FloatingComplexCast:
+ return "FloatingComplexCast";
+ case CK_FloatingComplexToIntegralComplex:
+ return "FloatingComplexToIntegralComplex";
+ case CK_IntegralRealToComplex:
+ return "IntegralRealToComplex";
+ case CK_IntegralComplexToReal:
+ return "IntegralComplexToReal";
+ case CK_IntegralComplexToBoolean:
+ return "IntegralComplexToBoolean";
+ case CK_IntegralComplexCast:
+ return "IntegralComplexCast";
+ case CK_IntegralComplexToFloatingComplex:
+ return "IntegralComplexToFloatingComplex";
}
- assert(0 && "Unhandled cast kind!");
+ llvm_unreachable("Unhandled cast kind!");
return 0;
}
@@ -859,7 +1099,7 @@ ImplicitCastExpr *ImplicitCastExpr::CreateEmpty(ASTContext &C,
CStyleCastExpr *CStyleCastExpr::Create(ASTContext &C, QualType T,
- CastKind K, Expr *Op,
+ ExprValueKind VK, CastKind K, Expr *Op,
const CXXCastPath *BasePath,
TypeSourceInfo *WrittenTy,
SourceLocation L, SourceLocation R) {
@@ -867,7 +1107,7 @@ CStyleCastExpr *CStyleCastExpr::Create(ASTContext &C, QualType T,
void *Buffer =
C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
CStyleCastExpr *E =
- new (Buffer) CStyleCastExpr(T, K, Op, PathSize, WrittenTy, L, R);
+ new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, R);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
@@ -984,16 +1224,19 @@ OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) {
InitListExpr::InitListExpr(ASTContext &C, SourceLocation lbraceloc,
Expr **initExprs, unsigned numInits,
SourceLocation rbraceloc)
- : Expr(InitListExprClass, QualType(), false, false),
+ : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
+ false),
InitExprs(C, numInits),
LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), SyntacticForm(0),
UnionFieldInit(0), HadArrayRangeDesignator(false)
{
for (unsigned I = 0; I != numInits; ++I) {
if (initExprs[I]->isTypeDependent())
- TypeDependent = true;
+ ExprBits.TypeDependent = true;
if (initExprs[I]->isValueDependent())
- ValueDependent = true;
+ ExprBits.ValueDependent = true;
+ if (initExprs[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
}
InitExprs.insert(C, InitExprs.end(), initExprs, initExprs+numInits);
@@ -1020,6 +1263,35 @@ Expr *InitListExpr::updateInit(ASTContext &C, unsigned Init, Expr *expr) {
return Result;
}
+SourceRange InitListExpr::getSourceRange() const {
+ if (SyntacticForm)
+ return SyntacticForm->getSourceRange();
+ SourceLocation Beg = LBraceLoc, End = RBraceLoc;
+ if (Beg.isInvalid()) {
+ // Find the first non-null initializer.
+ for (InitExprsTy::const_iterator I = InitExprs.begin(),
+ E = InitExprs.end();
+ I != E; ++I) {
+ if (Stmt *S = *I) {
+ Beg = S->getLocStart();
+ break;
+ }
+ }
+ }
+ if (End.isInvalid()) {
+ // Find the first non-null initializer from the end.
+ for (InitExprsTy::const_reverse_iterator I = InitExprs.rbegin(),
+ E = InitExprs.rend();
+ I != E; ++I) {
+ if (Stmt *S = *I) {
+ End = S->getSourceRange().getEnd();
+ break;
+ }
+ }
+ }
+ return SourceRange(Beg, End);
+}
+
/// getFunctionType - Return the underlying function type for this block.
///
const FunctionType *BlockExpr::getFunctionType() const {
@@ -1195,21 +1467,11 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
return false;
}
- case ObjCImplicitSetterGetterRefExprClass: { // Dot syntax for message send.
-#if 0
- const ObjCImplicitSetterGetterRefExpr *Ref =
- cast<ObjCImplicitSetterGetterRefExpr>(this);
- // FIXME: We really want the location of the '.' here.
- Loc = Ref->getLocation();
- R1 = SourceRange(Ref->getLocation(), Ref->getLocation());
- if (Ref->getBase())
- R2 = Ref->getBase()->getSourceRange();
-#else
+ case ObjCPropertyRefExprClass:
Loc = getExprLoc();
R1 = getSourceRange();
-#endif
return true;
- }
+
case StmtExprClass: {
// Statement exprs don't logically have side effects themselves, but are
// sometimes used in macros in ways that give them a type that is unused.
@@ -1217,9 +1479,13 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
// however, if the result of the stmt expr is dead, we don't want to emit a
// warning.
const CompoundStmt *CS = cast<StmtExpr>(this)->getSubStmt();
- if (!CS->body_empty())
+ if (!CS->body_empty()) {
if (const Expr *E = dyn_cast<Expr>(CS->body_back()))
return E->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ if (const LabelStmt *Label = dyn_cast<LabelStmt>(CS->body_back()))
+ if (const Expr *E = dyn_cast<Expr>(Label->getSubStmt()))
+ return E->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ }
if (getType()->isVoidType())
return false;
@@ -1268,8 +1534,8 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
case CXXBindTemporaryExprClass:
return (cast<CXXBindTemporaryExpr>(this)
->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
- case CXXExprWithTemporariesClass:
- return (cast<CXXExprWithTemporaries>(this)
+ case ExprWithCleanupsClass:
+ return (cast<ExprWithCleanups>(this)
->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
}
}
@@ -1311,12 +1577,254 @@ bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
return cast<ArraySubscriptExpr>(this)->getBase()->isOBJCGCCandidate(Ctx);
}
}
+
+bool Expr::isBoundMemberFunction(ASTContext &Ctx) const {
+ if (isTypeDependent())
+ return false;
+ return ClassifyLValue(Ctx) == Expr::LV_MemberFunction;
+}
+
+static Expr::CanThrowResult MergeCanThrow(Expr::CanThrowResult CT1,
+ Expr::CanThrowResult CT2) {
+ // CanThrowResult constants are ordered so that the maximum is the correct
+ // merge result.
+ return CT1 > CT2 ? CT1 : CT2;
+}
+
+static Expr::CanThrowResult CanSubExprsThrow(ASTContext &C, const Expr *CE) {
+ Expr *E = const_cast<Expr*>(CE);
+ Expr::CanThrowResult R = Expr::CT_Cannot;
+ for (Expr::child_range I = E->children(); I && R != Expr::CT_Can; ++I) {
+ R = MergeCanThrow(R, cast<Expr>(*I)->CanThrow(C));
+ }
+ return R;
+}
+
+static Expr::CanThrowResult CanCalleeThrow(const Decl *D,
+ bool NullThrows = true) {
+ if (!D)
+ return NullThrows ? Expr::CT_Can : Expr::CT_Cannot;
+
+ // See if we can get a function type from the decl somehow.
+ const ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (!VD) // If we have no clue what we're calling, assume the worst.
+ return Expr::CT_Can;
+
+ // As an extension, we assume that __attribute__((nothrow)) functions don't
+ // throw.
+ if (isa<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>())
+ return Expr::CT_Cannot;
+
+ QualType T = VD->getType();
+ const FunctionProtoType *FT;
+ if ((FT = T->getAs<FunctionProtoType>())) {
+ } else if (const PointerType *PT = T->getAs<PointerType>())
+ FT = PT->getPointeeType()->getAs<FunctionProtoType>();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ FT = RT->getPointeeType()->getAs<FunctionProtoType>();
+ else if (const MemberPointerType *MT = T->getAs<MemberPointerType>())
+ FT = MT->getPointeeType()->getAs<FunctionProtoType>();
+ else if (const BlockPointerType *BT = T->getAs<BlockPointerType>())
+ FT = BT->getPointeeType()->getAs<FunctionProtoType>();
+
+ if (!FT)
+ return Expr::CT_Can;
+
+ return FT->hasEmptyExceptionSpec() ? Expr::CT_Cannot : Expr::CT_Can;
+}
+
+static Expr::CanThrowResult CanDynamicCastThrow(const CXXDynamicCastExpr *DC) {
+ if (DC->isTypeDependent())
+ return Expr::CT_Dependent;
+
+ if (!DC->getTypeAsWritten()->isReferenceType())
+ return Expr::CT_Cannot;
+
+ return DC->getCastKind() == clang::CK_Dynamic? Expr::CT_Can : Expr::CT_Cannot;
+}
+
+static Expr::CanThrowResult CanTypeidThrow(ASTContext &C,
+ const CXXTypeidExpr *DC) {
+ if (DC->isTypeOperand())
+ return Expr::CT_Cannot;
+
+ Expr *Op = DC->getExprOperand();
+ if (Op->isTypeDependent())
+ return Expr::CT_Dependent;
+
+ const RecordType *RT = Op->getType()->getAs<RecordType>();
+ if (!RT)
+ return Expr::CT_Cannot;
+
+ if (!cast<CXXRecordDecl>(RT->getDecl())->isPolymorphic())
+ return Expr::CT_Cannot;
+
+ if (Op->Classify(C).isPRValue())
+ return Expr::CT_Cannot;
+
+ return Expr::CT_Can;
+}
+
+Expr::CanThrowResult Expr::CanThrow(ASTContext &C) const {
+ // C++ [expr.unary.noexcept]p3:
+ // [Can throw] if in a potentially-evaluated context the expression would
+ // contain:
+ switch (getStmtClass()) {
+ case CXXThrowExprClass:
+ // - a potentially evaluated throw-expression
+ return CT_Can;
+
+ case CXXDynamicCastExprClass: {
+ // - a potentially evaluated dynamic_cast expression dynamic_cast<T>(v),
+ // where T is a reference type, that requires a run-time check
+ CanThrowResult CT = CanDynamicCastThrow(cast<CXXDynamicCastExpr>(this));
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case CXXTypeidExprClass:
+ // - a potentially evaluated typeid expression applied to a glvalue
+ // expression whose type is a polymorphic class type
+ return CanTypeidThrow(C, cast<CXXTypeidExpr>(this));
+
+ // - a potentially evaluated call to a function, member function, function
+ // pointer, or member function pointer that does not have a non-throwing
+ // exception-specification
+ case CallExprClass:
+ case CXXOperatorCallExprClass:
+ case CXXMemberCallExprClass: {
+ CanThrowResult CT = CanCalleeThrow(cast<CallExpr>(this)->getCalleeDecl());
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case CXXConstructExprClass:
+ case CXXTemporaryObjectExprClass: {
+ CanThrowResult CT = CanCalleeThrow(
+ cast<CXXConstructExpr>(this)->getConstructor());
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case CXXNewExprClass: {
+ CanThrowResult CT = MergeCanThrow(
+ CanCalleeThrow(cast<CXXNewExpr>(this)->getOperatorNew()),
+ CanCalleeThrow(cast<CXXNewExpr>(this)->getConstructor(),
+ /*NullThrows*/false));
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case CXXDeleteExprClass: {
+ CanThrowResult CT = CanCalleeThrow(
+ cast<CXXDeleteExpr>(this)->getOperatorDelete());
+ if (CT == CT_Can)
+ return CT;
+ const Expr *Arg = cast<CXXDeleteExpr>(this)->getArgument();
+ // Unwrap exactly one implicit cast, which converts all pointers to void*.
+ if (const ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = Cast->getSubExpr();
+ if (const PointerType *PT = Arg->getType()->getAs<PointerType>()) {
+ if (const RecordType *RT = PT->getPointeeType()->getAs<RecordType>()) {
+ CanThrowResult CT2 = CanCalleeThrow(
+ cast<CXXRecordDecl>(RT->getDecl())->getDestructor());
+ if (CT2 == CT_Can)
+ return CT2;
+ CT = MergeCanThrow(CT, CT2);
+ }
+ }
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case CXXBindTemporaryExprClass: {
+ // The bound temporary has to be destroyed again, which might throw.
+ CanThrowResult CT = CanCalleeThrow(
+ cast<CXXBindTemporaryExpr>(this)->getTemporary()->getDestructor());
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ // ObjC message sends are like function calls, but never have exception
+ // specs.
+ case ObjCMessageExprClass:
+ case ObjCPropertyRefExprClass:
+ return CT_Can;
+
+ // Many other things have subexpressions, so we have to test those.
+ // Some are simple:
+ case ParenExprClass:
+ case MemberExprClass:
+ case CXXReinterpretCastExprClass:
+ case CXXConstCastExprClass:
+ case ConditionalOperatorClass:
+ case CompoundLiteralExprClass:
+ case ExtVectorElementExprClass:
+ case InitListExprClass:
+ case DesignatedInitExprClass:
+ case ParenListExprClass:
+ case VAArgExprClass:
+ case CXXDefaultArgExprClass:
+ case ExprWithCleanupsClass:
+ case ObjCIvarRefExprClass:
+ case ObjCIsaExprClass:
+ case ShuffleVectorExprClass:
+ return CanSubExprsThrow(C, this);
+
+ // Some might be dependent for other reasons.
+ case UnaryOperatorClass:
+ case ArraySubscriptExprClass:
+ case ImplicitCastExprClass:
+ case CStyleCastExprClass:
+ case CXXStaticCastExprClass:
+ case CXXFunctionalCastExprClass:
+ case BinaryOperatorClass:
+ case CompoundAssignOperatorClass: {
+ CanThrowResult CT = isTypeDependent() ? CT_Dependent : CT_Cannot;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ // FIXME: We should handle StmtExpr, but that opens a MASSIVE can of worms.
+ case StmtExprClass:
+ return CT_Can;
+
+ case ChooseExprClass:
+ if (isTypeDependent() || isValueDependent())
+ return CT_Dependent;
+ return cast<ChooseExpr>(this)->getChosenSubExpr(C)->CanThrow(C);
+
+ // Some expressions are always dependent.
+ case DependentScopeDeclRefExprClass:
+ case CXXUnresolvedConstructExprClass:
+ case CXXDependentScopeMemberExprClass:
+ return CT_Dependent;
+
+ default:
+ // All other expressions don't have subexpressions, or else they are
+ // unevaluated.
+ return CT_Cannot;
+ }
+}
+
Expr* Expr::IgnoreParens() {
Expr* E = this;
- while (ParenExpr* P = dyn_cast<ParenExpr>(E))
- E = P->getSubExpr();
-
- return E;
+ while (true) {
+ if (ParenExpr* P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ return E;
+ }
}
/// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr
@@ -1324,24 +1832,68 @@ Expr* Expr::IgnoreParens() {
Expr *Expr::IgnoreParenCasts() {
Expr *E = this;
while (true) {
- if (ParenExpr *P = dyn_cast<ParenExpr>(E))
+ if (ParenExpr* P = dyn_cast<ParenExpr>(E)) {
E = P->getSubExpr();
- else if (CastExpr *P = dyn_cast<CastExpr>(E))
+ continue;
+ }
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
E = P->getSubExpr();
- else
- return E;
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ return E;
}
}
+/// IgnoreParenLValueCasts - Ignore parentheses and lvalue-to-rvalue
+/// casts. This is intended purely as a temporary workaround for code
+/// that hasn't yet been rewritten to do the right thing about those
+/// casts, and may disappear along with the last internal use.
+Expr *Expr::IgnoreParenLValueCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ } else if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ if (P->getCastKind() == CK_LValueToRValue) {
+ E = P->getSubExpr();
+ continue;
+ }
+ } else if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ break;
+ }
+ return E;
+}
+
Expr *Expr::IgnoreParenImpCasts() {
Expr *E = this;
while (true) {
- if (ParenExpr *P = dyn_cast<ParenExpr>(E))
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
E = P->getSubExpr();
- else if (ImplicitCastExpr *P = dyn_cast<ImplicitCastExpr>(E))
+ continue;
+ }
+ if (ImplicitCastExpr *P = dyn_cast<ImplicitCastExpr>(E)) {
E = P->getSubExpr();
- else
- return E;
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ return E;
}
}
@@ -1366,9 +1918,9 @@ Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
continue;
}
- if ((E->getType()->isPointerType() ||
+ if ((E->getType()->isPointerType() ||
E->getType()->isIntegralType(Ctx)) &&
- (SE->getType()->isPointerType() ||
+ (SE->getType()->isPointerType() ||
SE->getType()->isIntegralType(Ctx)) &&
Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) {
E = SE;
@@ -1376,6 +1928,13 @@ Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
}
}
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+
return E;
}
}
@@ -1390,7 +1949,7 @@ bool Expr::isDefaultArgument() const {
/// \brief Skip over any no-op casts and any temporary-binding
/// expressions.
-static const Expr *skipTemporaryBindingsAndNoOpCasts(const Expr *E) {
+static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) {
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
if (ICE->getCastKind() == CK_NoOp)
E = ICE->getSubExpr();
@@ -1407,50 +1966,48 @@ static const Expr *skipTemporaryBindingsAndNoOpCasts(const Expr *E) {
else
break;
}
-
- return E;
-}
-const Expr *Expr::getTemporaryObject() const {
- const Expr *E = skipTemporaryBindingsAndNoOpCasts(this);
+ return E->IgnoreParens();
+}
- // A cast can produce a temporary object. The object's construction
- // is represented as a CXXConstructExpr.
- if (const CastExpr *Cast = dyn_cast<CastExpr>(E)) {
- // Only user-defined and constructor conversions can produce
- // temporary objects.
- if (Cast->getCastKind() != CK_ConstructorConversion &&
- Cast->getCastKind() != CK_UserDefinedConversion)
- return 0;
+/// isTemporaryObject - Determines if this expression produces a
+/// temporary of the given class type.
+bool Expr::isTemporaryObject(ASTContext &C, const CXXRecordDecl *TempTy) const {
+ if (!C.hasSameUnqualifiedType(getType(), C.getTypeDeclType(TempTy)))
+ return false;
- // Strip off temporary bindings and no-op casts.
- const Expr *Sub = skipTemporaryBindingsAndNoOpCasts(Cast->getSubExpr());
+ const Expr *E = skipTemporaryBindingsNoOpCastsAndParens(this);
- // If this is a constructor conversion, see if we have an object
- // construction.
- if (Cast->getCastKind() == CK_ConstructorConversion)
- return dyn_cast<CXXConstructExpr>(Sub);
+ // Temporaries are by definition pr-values of class type.
+ if (!E->Classify(C).isPRValue()) {
+ // In this context, property reference is a message call and is pr-value.
+ if (!isa<ObjCPropertyRefExpr>(E))
+ return false;
+ }
- // If this is a user-defined conversion, see if we have a call to
- // a function that itself returns a temporary object.
- if (Cast->getCastKind() == CK_UserDefinedConversion)
- if (const CallExpr *CE = dyn_cast<CallExpr>(Sub))
- if (CE->getCallReturnType()->isRecordType())
- return CE;
+ // Black-list a few cases which yield pr-values of class type that don't
+ // refer to temporaries of that type:
- return 0;
+ // - implicit derived-to-base conversions
+ if (isa<ImplicitCastExpr>(E)) {
+ switch (cast<ImplicitCastExpr>(E)->getCastKind()) {
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ return false;
+ default:
+ break;
+ }
}
- // A call returning a class type returns a temporary.
- if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
- if (CE->getCallReturnType()->isRecordType())
- return CE;
+ // - member expressions (all)
+ if (isa<MemberExpr>(E))
+ return false;
- return 0;
- }
+ // - opaque values (all)
+ if (isa<OpaqueValueExpr>(E))
+ return false;
- // Explicit temporary object constructors create temporaries.
- return dyn_cast<CXXTemporaryObjectExpr>(E);
+ return true;
}
/// hasAnyTypeDependentArguments - Determines if any of the expressions
@@ -1533,6 +2090,9 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
case ParenExprClass:
return cast<ParenExpr>(this)->getSubExpr()
->isConstantInitializer(Ctx, IsForRef);
+ case ChooseExprClass:
+ return cast<ChooseExpr>(this)->getChosenSubExpr(Ctx)
+ ->isConstantInitializer(Ctx, IsForRef);
case UnaryOperatorClass: {
const UnaryOperator* Exp = cast<UnaryOperator>(this);
if (Exp->getOpcode() == UO_Extension)
@@ -1572,11 +2132,14 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
return isEvaluatable(Ctx);
}
-/// isNullPointerConstant - C99 6.3.2.3p3 - Return true if this is either an
-/// integer constant expression with the value zero, or if this is one that is
-/// cast to void*.
-bool Expr::isNullPointerConstant(ASTContext &Ctx,
- NullPointerConstantValueDependence NPC) const {
+/// isNullPointerConstant - C99 6.3.2.3p3 - Return whether this is a null
+/// pointer constant or not, as well as the specific kind of constant detected.
+/// Null pointer constants can be integer constant expressions with the
+/// value zero, casts of zero to void*, nullptr (C++0X), or __null
+/// (a GNU extension).
+Expr::NullPointerConstantKind
+Expr::isNullPointerConstant(ASTContext &Ctx,
+ NullPointerConstantValueDependence NPC) const {
if (isValueDependent()) {
switch (NPC) {
case NPC_NeverValueDependent:
@@ -1584,10 +2147,13 @@ bool Expr::isNullPointerConstant(ASTContext &Ctx,
// If the unthinkable happens, fall through to the safest alternative.
case NPC_ValueDependentIsNull:
- return isTypeDependent() || getType()->isIntegralType(Ctx);
+ if (isTypeDependent() || getType()->isIntegralType(Ctx))
+ return NPCK_ZeroInteger;
+ else
+ return NPCK_NotNull;
case NPC_ValueDependentIsNotNull:
- return false;
+ return NPCK_NotNull;
}
}
@@ -1616,30 +2182,61 @@ bool Expr::isNullPointerConstant(ASTContext &Ctx,
return DefaultArg->getExpr()->isNullPointerConstant(Ctx, NPC);
} else if (isa<GNUNullExpr>(this)) {
// The GNU __null extension is always a null pointer constant.
- return true;
+ return NPCK_GNUNull;
}
// C++0x nullptr_t is always a null pointer constant.
if (getType()->isNullPtrType())
- return true;
-
+ return NPCK_CXX0X_nullptr;
+
+ if (const RecordType *UT = getType()->getAsUnionType())
+ if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(this)){
+ const Expr *InitExpr = CLE->getInitializer();
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(InitExpr))
+ return ILE->getInit(0)->isNullPointerConstant(Ctx, NPC);
+ }
// This expression must be an integer type.
if (!getType()->isIntegerType() ||
(Ctx.getLangOptions().CPlusPlus && getType()->isEnumeralType()))
- return false;
+ return NPCK_NotNull;
// If we have an integer constant expression, we need to *evaluate* it and
// test for the value 0.
llvm::APSInt Result;
- return isIntegerConstantExpr(Result, Ctx) && Result == 0;
+ bool IsNull = isIntegerConstantExpr(Result, Ctx) && Result == 0;
+
+ return (IsNull ? NPCK_ZeroInteger : NPCK_NotNull);
+}
+
+/// \brief If this expression is an l-value for an Objective C
+/// property, find the underlying property reference expression.
+const ObjCPropertyRefExpr *Expr::getObjCProperty() const {
+ const Expr *E = this;
+ while (true) {
+ assert((E->getValueKind() == VK_LValue &&
+ E->getObjectKind() == OK_ObjCProperty) &&
+ "expression is not a property reference");
+ E = E->IgnoreParenCasts();
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ E = BO->getRHS();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ return cast<ObjCPropertyRefExpr>(E);
}
FieldDecl *Expr::getBitField() {
Expr *E = this->IgnoreParens();
while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
- if (ICE->getValueKind() != VK_RValue &&
- ICE->getCastKind() == CK_NoOp)
+ if (ICE->getCastKind() == CK_LValueToRValue ||
+ (ICE->getValueKind() != VK_RValue && ICE->getCastKind() == CK_NoOp))
E = ICE->getSubExpr()->IgnoreParens();
else
break;
@@ -1650,6 +2247,11 @@ FieldDecl *Expr::getBitField() {
if (Field->isBitField())
return Field;
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E))
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(DeclRef->getDecl()))
+ if (Field->isBitField())
+ return Field;
+
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E))
if (BinOp->isAssignmentOp() && BinOp->getLHS())
return BinOp->getLHS()->getBitField();
@@ -1741,21 +2343,24 @@ void ExtVectorElementExpr::getEncodedElementAccess(
}
ObjCMessageExpr::ObjCMessageExpr(QualType T,
+ ExprValueKind VK,
SourceLocation LBracLoc,
SourceLocation SuperLoc,
bool IsInstanceSuper,
QualType SuperType,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc)
- : Expr(ObjCMessageExprClass, T, /*TypeDependent=*/false,
- /*ValueDependent=*/false),
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
+ /*TypeDependent=*/false, /*ValueDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
NumArgs(NumArgs), Kind(IsInstanceSuper? SuperInstance : SuperClass),
HasMethod(Method != 0), SuperLoc(SuperLoc),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
- LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+ SelectorLoc(SelLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
{
setReceiverPointer(SuperType.getAsOpaquePtr());
if (NumArgs)
@@ -1763,88 +2368,115 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
}
ObjCMessageExpr::ObjCMessageExpr(QualType T,
+ ExprValueKind VK,
SourceLocation LBracLoc,
TypeSourceInfo *Receiver,
- Selector Sel,
+ Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc)
- : Expr(ObjCMessageExprClass, T, T->isDependentType(),
- (T->isDependentType() ||
- hasAnyValueDependentArguments(Args, NumArgs))),
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
+ T->isDependentType(), T->containsUnexpandedParameterPack()),
NumArgs(NumArgs), Kind(Class), HasMethod(Method != 0),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
- LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+ SelectorLoc(SelLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
{
setReceiverPointer(Receiver);
- if (NumArgs)
- memcpy(getArgs(), Args, NumArgs * sizeof(Expr *));
+ Expr **MyArgs = getArgs();
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (Args[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ MyArgs[I] = Args[I];
+ }
}
ObjCMessageExpr::ObjCMessageExpr(QualType T,
+ ExprValueKind VK,
SourceLocation LBracLoc,
Expr *Receiver,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc)
- : Expr(ObjCMessageExprClass, T, Receiver->isTypeDependent(),
- (Receiver->isTypeDependent() ||
- hasAnyValueDependentArguments(Args, NumArgs))),
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, Receiver->isTypeDependent(),
+ Receiver->isTypeDependent(),
+ Receiver->containsUnexpandedParameterPack()),
NumArgs(NumArgs), Kind(Instance), HasMethod(Method != 0),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
- LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+ SelectorLoc(SelLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
{
setReceiverPointer(Receiver);
- if (NumArgs)
- memcpy(getArgs(), Args, NumArgs * sizeof(Expr *));
+ Expr **MyArgs = getArgs();
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (Args[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ MyArgs[I] = Args[I];
+ }
}
ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
SourceLocation LBracLoc,
SourceLocation SuperLoc,
bool IsInstanceSuper,
QualType SuperType,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc) {
unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
NumArgs * sizeof(Expr *);
void *Mem = Context.Allocate(Size, llvm::AlignOf<ObjCMessageExpr>::Alignment);
- return new (Mem) ObjCMessageExpr(T, LBracLoc, SuperLoc, IsInstanceSuper,
- SuperType, Sel, Method, Args, NumArgs,
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, SuperLoc, IsInstanceSuper,
+ SuperType, Sel, SelLoc, Method, Args,NumArgs,
RBracLoc);
}
ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
SourceLocation LBracLoc,
TypeSourceInfo *Receiver,
Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc) {
unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
NumArgs * sizeof(Expr *);
void *Mem = Context.Allocate(Size, llvm::AlignOf<ObjCMessageExpr>::Alignment);
- return new (Mem) ObjCMessageExpr(T, LBracLoc, Receiver, Sel, Method, Args,
- NumArgs, RBracLoc);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel, SelLoc,
+ Method, Args, NumArgs, RBracLoc);
}
ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
SourceLocation LBracLoc,
Expr *Receiver,
- Selector Sel,
+ Selector Sel,
+ SourceLocation SelLoc,
ObjCMethodDecl *Method,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc) {
unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
NumArgs * sizeof(Expr *);
void *Mem = Context.Allocate(Size, llvm::AlignOf<ObjCMessageExpr>::Alignment);
- return new (Mem) ObjCMessageExpr(T, LBracLoc, Receiver, Sel, Method, Args,
- NumArgs, RBracLoc);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel, SelLoc,
+ Method, Args, NumArgs, RBracLoc);
}
ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(ASTContext &Context,
@@ -1854,7 +2486,23 @@ ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(ASTContext &Context,
void *Mem = Context.Allocate(Size, llvm::AlignOf<ObjCMessageExpr>::Alignment);
return new (Mem) ObjCMessageExpr(EmptyShell(), NumArgs);
}
-
+
+SourceRange ObjCMessageExpr::getReceiverRange() const {
+ switch (getReceiverKind()) {
+ case Instance:
+ return getInstanceReceiver()->getSourceRange();
+
+ case Class:
+ return getClassReceiverTypeInfo()->getTypeLoc().getSourceRange();
+
+ case SuperInstance:
+ case SuperClass:
+ return getSuperLoc();
+ }
+
+ return SourceLocation();
+}
+
Selector ObjCMessageExpr::getSelector() const {
if (HasMethod)
return reinterpret_cast<const ObjCMethodDecl *>(SelectorOrMethod)
@@ -1883,19 +2531,40 @@ ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
break;
case SuperClass:
- if (const ObjCObjectPointerType *Iface
- = getSuperType()->getAs<ObjCObjectPointerType>())
- return Iface->getInterfaceDecl();
+ if (const ObjCObjectType *Iface
+ = getSuperType()->getAs<ObjCObjectType>())
+ return Iface->getInterface();
break;
}
return 0;
}
-bool ChooseExpr::isConditionTrue(ASTContext &C) const {
+bool ChooseExpr::isConditionTrue(const ASTContext &C) const {
return getCond()->EvaluateAsInt(C) != 0;
}
+ShuffleVectorExpr::ShuffleVectorExpr(ASTContext &C, Expr **args, unsigned nexpr,
+ QualType Type, SourceLocation BLoc,
+ SourceLocation RP)
+ : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary,
+ Type->isDependentType(), Type->isDependentType(),
+ Type->containsUnexpandedParameterPack()),
+ BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(nexpr)
+{
+ SubExprs = new (C) Stmt*[nexpr];
+ for (unsigned i = 0; i < nexpr; i++) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i] = args[i];
+ }
+}
+
void ShuffleVectorExpr::setExprs(ASTContext &C, Expr ** Exprs,
unsigned NumExprs) {
if (SubExprs) C.Deallocate(SubExprs);
@@ -1926,13 +2595,15 @@ DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
unsigned NumIndexExprs,
Expr *Init)
: Expr(DesignatedInitExprClass, Ty,
- Init->isTypeDependent(), Init->isValueDependent()),
+ Init->getValueKind(), Init->getObjectKind(),
+ Init->isTypeDependent(), Init->isValueDependent(),
+ Init->containsUnexpandedParameterPack()),
EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
NumDesignators(NumDesignators), NumSubExprs(NumIndexExprs + 1) {
this->Designators = new (C) Designator[NumDesignators];
// Record the initializer itself.
- child_iterator Child = child_begin();
+ child_range Child = children();
*Child++ = Init;
// Copy the designators and their subexpressions, computing
@@ -1944,8 +2615,12 @@ DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
if (this->Designators[I].isArrayDesignator()) {
// Compute type- and value-dependence.
Expr *Index = IndexExprs[IndexIdx];
- ValueDependent = ValueDependent ||
- Index->isTypeDependent() || Index->isValueDependent();
+ if (Index->isTypeDependent() || Index->isValueDependent())
+ ExprBits.ValueDependent = true;
+
+ // Propagate unexpanded parameter packs.
+ if (Index->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
// Copy the index expressions into permanent storage.
*Child++ = IndexExprs[IndexIdx++];
@@ -1953,9 +2628,14 @@ DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
// Compute type- and value-dependence.
Expr *Start = IndexExprs[IndexIdx];
Expr *End = IndexExprs[IndexIdx + 1];
- ValueDependent = ValueDependent ||
- Start->isTypeDependent() || Start->isValueDependent() ||
- End->isTypeDependent() || End->isValueDependent();
+ if (Start->isTypeDependent() || Start->isValueDependent() ||
+ End->isTypeDependent() || End->isValueDependent())
+ ExprBits.ValueDependent = true;
+
+ // Propagate unexpanded parameter packs.
+ if (Start->containsUnexpandedParameterPack() ||
+ End->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
// Copy the start/end expressions into permanent storage.
*Child++ = IndexExprs[IndexIdx++];
@@ -2066,14 +2746,30 @@ void DesignatedInitExpr::ExpandDesignator(ASTContext &C, unsigned Idx,
ParenListExpr::ParenListExpr(ASTContext& C, SourceLocation lparenloc,
Expr **exprs, unsigned nexprs,
SourceLocation rparenloc)
-: Expr(ParenListExprClass, QualType(),
- hasAnyTypeDependentArguments(exprs, nexprs),
- hasAnyValueDependentArguments(exprs, nexprs)),
- NumExprs(nexprs), LParenLoc(lparenloc), RParenLoc(rparenloc) {
+ : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
+ false, false, false),
+ NumExprs(nexprs), LParenLoc(lparenloc), RParenLoc(rparenloc) {
Exprs = new (C) Stmt*[nexprs];
- for (unsigned i = 0; i != nexprs; ++i)
+ for (unsigned i = 0; i != nexprs; ++i) {
+ if (exprs[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (exprs[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (exprs[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
Exprs[i] = exprs[i];
+ }
+}
+
+const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
+ e = ewc->getSubExpr();
+ e = cast<CXXConstructExpr>(e)->getArg(0);
+ while (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
+ e = ice->getSubExpr();
+ return cast<OpaqueValueExpr>(e);
}
//===----------------------------------------------------------------------===//
@@ -2093,257 +2789,43 @@ const Expr* ConstExprIterator::operator->() const { return cast<Expr>(*I); }
// Child Iterators for iterating over subexpressions/substatements
//===----------------------------------------------------------------------===//
-// DeclRefExpr
-Stmt::child_iterator DeclRefExpr::child_begin() { return child_iterator(); }
-Stmt::child_iterator DeclRefExpr::child_end() { return child_iterator(); }
-
-// ObjCIvarRefExpr
-Stmt::child_iterator ObjCIvarRefExpr::child_begin() { return &Base; }
-Stmt::child_iterator ObjCIvarRefExpr::child_end() { return &Base+1; }
-
-// ObjCPropertyRefExpr
-Stmt::child_iterator ObjCPropertyRefExpr::child_begin() { return &Base; }
-Stmt::child_iterator ObjCPropertyRefExpr::child_end() { return &Base+1; }
-
-// ObjCImplicitSetterGetterRefExpr
-Stmt::child_iterator ObjCImplicitSetterGetterRefExpr::child_begin() {
- // If this is accessing a class member, skip that entry.
- if (Base) return &Base;
- return &Base+1;
-}
-Stmt::child_iterator ObjCImplicitSetterGetterRefExpr::child_end() {
- return &Base+1;
-}
-
-// ObjCSuperExpr
-Stmt::child_iterator ObjCSuperExpr::child_begin() { return child_iterator(); }
-Stmt::child_iterator ObjCSuperExpr::child_end() { return child_iterator(); }
-
-// ObjCIsaExpr
-Stmt::child_iterator ObjCIsaExpr::child_begin() { return &Base; }
-Stmt::child_iterator ObjCIsaExpr::child_end() { return &Base+1; }
-
-// PredefinedExpr
-Stmt::child_iterator PredefinedExpr::child_begin() { return child_iterator(); }
-Stmt::child_iterator PredefinedExpr::child_end() { return child_iterator(); }
-
-// IntegerLiteral
-Stmt::child_iterator IntegerLiteral::child_begin() { return child_iterator(); }
-Stmt::child_iterator IntegerLiteral::child_end() { return child_iterator(); }
-
-// CharacterLiteral
-Stmt::child_iterator CharacterLiteral::child_begin() { return child_iterator();}
-Stmt::child_iterator CharacterLiteral::child_end() { return child_iterator(); }
-
-// FloatingLiteral
-Stmt::child_iterator FloatingLiteral::child_begin() { return child_iterator(); }
-Stmt::child_iterator FloatingLiteral::child_end() { return child_iterator(); }
-
-// ImaginaryLiteral
-Stmt::child_iterator ImaginaryLiteral::child_begin() { return &Val; }
-Stmt::child_iterator ImaginaryLiteral::child_end() { return &Val+1; }
-
-// StringLiteral
-Stmt::child_iterator StringLiteral::child_begin() { return child_iterator(); }
-Stmt::child_iterator StringLiteral::child_end() { return child_iterator(); }
-
-// ParenExpr
-Stmt::child_iterator ParenExpr::child_begin() { return &Val; }
-Stmt::child_iterator ParenExpr::child_end() { return &Val+1; }
-
-// UnaryOperator
-Stmt::child_iterator UnaryOperator::child_begin() { return &Val; }
-Stmt::child_iterator UnaryOperator::child_end() { return &Val+1; }
-
-// OffsetOfExpr
-Stmt::child_iterator OffsetOfExpr::child_begin() {
- return reinterpret_cast<Stmt **> (reinterpret_cast<OffsetOfNode *> (this + 1)
- + NumComps);
-}
-Stmt::child_iterator OffsetOfExpr::child_end() {
- return child_iterator(&*child_begin() + NumExprs);
-}
-
// SizeOfAlignOfExpr
-Stmt::child_iterator SizeOfAlignOfExpr::child_begin() {
+Stmt::child_range SizeOfAlignOfExpr::children() {
// If this is of a type and the type is a VLA type (and not a typedef), the
// size expression of the VLA needs to be treated as an executable expression.
// Why isn't this weirdness documented better in StmtIterator?
if (isArgumentType()) {
- if (VariableArrayType* T = dyn_cast<VariableArrayType>(
+ if (const VariableArrayType* T = dyn_cast<VariableArrayType>(
getArgumentType().getTypePtr()))
- return child_iterator(T);
- return child_iterator();
+ return child_range(child_iterator(T), child_iterator());
+ return child_range();
}
- return child_iterator(&Argument.Ex);
-}
-Stmt::child_iterator SizeOfAlignOfExpr::child_end() {
- if (isArgumentType())
- return child_iterator();
- return child_iterator(&Argument.Ex + 1);
-}
-
-// ArraySubscriptExpr
-Stmt::child_iterator ArraySubscriptExpr::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator ArraySubscriptExpr::child_end() {
- return &SubExprs[0]+END_EXPR;
-}
-
-// CallExpr
-Stmt::child_iterator CallExpr::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator CallExpr::child_end() {
- return &SubExprs[0]+NumArgs+ARGS_START;
-}
-
-// MemberExpr
-Stmt::child_iterator MemberExpr::child_begin() { return &Base; }
-Stmt::child_iterator MemberExpr::child_end() { return &Base+1; }
-
-// ExtVectorElementExpr
-Stmt::child_iterator ExtVectorElementExpr::child_begin() { return &Base; }
-Stmt::child_iterator ExtVectorElementExpr::child_end() { return &Base+1; }
-
-// CompoundLiteralExpr
-Stmt::child_iterator CompoundLiteralExpr::child_begin() { return &Init; }
-Stmt::child_iterator CompoundLiteralExpr::child_end() { return &Init+1; }
-
-// CastExpr
-Stmt::child_iterator CastExpr::child_begin() { return &Op; }
-Stmt::child_iterator CastExpr::child_end() { return &Op+1; }
-
-// BinaryOperator
-Stmt::child_iterator BinaryOperator::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator BinaryOperator::child_end() {
- return &SubExprs[0]+END_EXPR;
-}
-
-// ConditionalOperator
-Stmt::child_iterator ConditionalOperator::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator ConditionalOperator::child_end() {
- return &SubExprs[0]+END_EXPR;
-}
-
-// AddrLabelExpr
-Stmt::child_iterator AddrLabelExpr::child_begin() { return child_iterator(); }
-Stmt::child_iterator AddrLabelExpr::child_end() { return child_iterator(); }
-
-// StmtExpr
-Stmt::child_iterator StmtExpr::child_begin() { return &SubStmt; }
-Stmt::child_iterator StmtExpr::child_end() { return &SubStmt+1; }
-
-// TypesCompatibleExpr
-Stmt::child_iterator TypesCompatibleExpr::child_begin() {
- return child_iterator();
-}
-
-Stmt::child_iterator TypesCompatibleExpr::child_end() {
- return child_iterator();
-}
-
-// ChooseExpr
-Stmt::child_iterator ChooseExpr::child_begin() { return &SubExprs[0]; }
-Stmt::child_iterator ChooseExpr::child_end() { return &SubExprs[0]+END_EXPR; }
-
-// GNUNullExpr
-Stmt::child_iterator GNUNullExpr::child_begin() { return child_iterator(); }
-Stmt::child_iterator GNUNullExpr::child_end() { return child_iterator(); }
-
-// ShuffleVectorExpr
-Stmt::child_iterator ShuffleVectorExpr::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator ShuffleVectorExpr::child_end() {
- return &SubExprs[0]+NumExprs;
-}
-
-// VAArgExpr
-Stmt::child_iterator VAArgExpr::child_begin() { return &Val; }
-Stmt::child_iterator VAArgExpr::child_end() { return &Val+1; }
-
-// InitListExpr
-Stmt::child_iterator InitListExpr::child_begin() {
- return InitExprs.size() ? &InitExprs[0] : 0;
-}
-Stmt::child_iterator InitListExpr::child_end() {
- return InitExprs.size() ? &InitExprs[0] + InitExprs.size() : 0;
-}
-
-// DesignatedInitExpr
-Stmt::child_iterator DesignatedInitExpr::child_begin() {
- char* Ptr = static_cast<char*>(static_cast<void *>(this));
- Ptr += sizeof(DesignatedInitExpr);
- return reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
-}
-Stmt::child_iterator DesignatedInitExpr::child_end() {
- return child_iterator(&*child_begin() + NumSubExprs);
-}
-
-// ImplicitValueInitExpr
-Stmt::child_iterator ImplicitValueInitExpr::child_begin() {
- return child_iterator();
-}
-
-Stmt::child_iterator ImplicitValueInitExpr::child_end() {
- return child_iterator();
-}
-
-// ParenListExpr
-Stmt::child_iterator ParenListExpr::child_begin() {
- return &Exprs[0];
-}
-Stmt::child_iterator ParenListExpr::child_end() {
- return &Exprs[0]+NumExprs;
-}
-
-// ObjCStringLiteral
-Stmt::child_iterator ObjCStringLiteral::child_begin() {
- return &String;
-}
-Stmt::child_iterator ObjCStringLiteral::child_end() {
- return &String+1;
-}
-
-// ObjCEncodeExpr
-Stmt::child_iterator ObjCEncodeExpr::child_begin() { return child_iterator(); }
-Stmt::child_iterator ObjCEncodeExpr::child_end() { return child_iterator(); }
-
-// ObjCSelectorExpr
-Stmt::child_iterator ObjCSelectorExpr::child_begin() {
- return child_iterator();
-}
-Stmt::child_iterator ObjCSelectorExpr::child_end() {
- return child_iterator();
-}
-
-// ObjCProtocolExpr
-Stmt::child_iterator ObjCProtocolExpr::child_begin() {
- return child_iterator();
-}
-Stmt::child_iterator ObjCProtocolExpr::child_end() {
- return child_iterator();
+ return child_range(&Argument.Ex, &Argument.Ex + 1);
}
// ObjCMessageExpr
-Stmt::child_iterator ObjCMessageExpr::child_begin() {
+Stmt::child_range ObjCMessageExpr::children() {
+ Stmt **begin;
if (getReceiverKind() == Instance)
- return reinterpret_cast<Stmt **>(this + 1);
- return getArgs();
-}
-Stmt::child_iterator ObjCMessageExpr::child_end() {
- return getArgs() + getNumArgs();
+ begin = reinterpret_cast<Stmt **>(this + 1);
+ else
+ begin = reinterpret_cast<Stmt **>(getArgs());
+ return child_range(begin,
+ reinterpret_cast<Stmt **>(getArgs() + getNumArgs()));
}
// Blocks
-Stmt::child_iterator BlockExpr::child_begin() { return child_iterator(); }
-Stmt::child_iterator BlockExpr::child_end() { return child_iterator(); }
+BlockDeclRefExpr::BlockDeclRefExpr(VarDecl *d, QualType t, ExprValueKind VK,
+ SourceLocation l, bool ByRef,
+ bool constAdded)
+ : Expr(BlockDeclRefExprClass, t, VK, OK_Ordinary, false, false,
+ d->isParameterPack()),
+ D(d), Loc(l), IsByRef(ByRef), ConstQualAdded(constAdded)
+{
+ bool TypeDependent = false;
+ bool ValueDependent = false;
+ computeDeclRefDependence(D, getType(), TypeDependent, ValueDependent);
+ ExprBits.TypeDependent = TypeDependent;
+ ExprBits.ValueDependent = ValueDependent;
+}
-Stmt::child_iterator BlockDeclRefExpr::child_begin() { return child_iterator();}
-Stmt::child_iterator BlockDeclRefExpr::child_end() { return child_iterator(); }
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
index 0a10130..28ff9fb 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
@@ -29,57 +29,18 @@ QualType CXXTypeidExpr::getTypeOperand() const {
.getUnqualifiedType();
}
-// CXXTypeidExpr - has child iterators if the operand is an expression
-Stmt::child_iterator CXXTypeidExpr::child_begin() {
- return isTypeOperand() ? child_iterator()
- : reinterpret_cast<Stmt **>(&Operand);
-}
-Stmt::child_iterator CXXTypeidExpr::child_end() {
- return isTypeOperand() ? child_iterator()
- : reinterpret_cast<Stmt **>(&Operand) + 1;
-}
-
-// CXXBoolLiteralExpr
-Stmt::child_iterator CXXBoolLiteralExpr::child_begin() {
- return child_iterator();
-}
-Stmt::child_iterator CXXBoolLiteralExpr::child_end() {
- return child_iterator();
-}
-
-// CXXNullPtrLiteralExpr
-Stmt::child_iterator CXXNullPtrLiteralExpr::child_begin() {
- return child_iterator();
-}
-Stmt::child_iterator CXXNullPtrLiteralExpr::child_end() {
- return child_iterator();
-}
-
-// CXXThisExpr
-Stmt::child_iterator CXXThisExpr::child_begin() { return child_iterator(); }
-Stmt::child_iterator CXXThisExpr::child_end() { return child_iterator(); }
-
-// CXXThrowExpr
-Stmt::child_iterator CXXThrowExpr::child_begin() { return &Op; }
-Stmt::child_iterator CXXThrowExpr::child_end() {
- // If Op is 0, we are processing throw; which has no children.
- return Op ? &Op+1 : &Op;
-}
-
-// CXXDefaultArgExpr
-Stmt::child_iterator CXXDefaultArgExpr::child_begin() {
- return child_iterator();
-}
-Stmt::child_iterator CXXDefaultArgExpr::child_end() {
- return child_iterator();
+QualType CXXUuidofExpr::getTypeOperand() const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
+ return Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType()
+ .getUnqualifiedType();
}
// CXXScalarValueInitExpr
-Stmt::child_iterator CXXScalarValueInitExpr::child_begin() {
- return child_iterator();
-}
-Stmt::child_iterator CXXScalarValueInitExpr::child_end() {
- return child_iterator();
+SourceRange CXXScalarValueInitExpr::getSourceRange() const {
+ SourceLocation Start = RParenLoc;
+ if (TypeInfo)
+ Start = TypeInfo->getTypeLoc().getBeginLoc();
+ return SourceRange(Start, RParenLoc);
}
// CXXNewExpr
@@ -88,22 +49,44 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
SourceRange TypeIdParens, Expr *arraySize,
CXXConstructorDecl *constructor, bool initializer,
Expr **constructorArgs, unsigned numConsArgs,
- FunctionDecl *operatorDelete, QualType ty,
- SourceLocation startLoc, SourceLocation endLoc)
- : Expr(CXXNewExprClass, ty, ty->isDependentType(), ty->isDependentType()),
- GlobalNew(globalNew),
- Initializer(initializer), SubExprs(0), OperatorNew(operatorNew),
+ FunctionDecl *operatorDelete,
+ bool usualArrayDeleteWantsSize, QualType ty,
+ TypeSourceInfo *AllocatedTypeInfo,
+ SourceLocation startLoc, SourceLocation endLoc,
+ SourceLocation constructorLParen,
+ SourceLocation constructorRParen)
+ : Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary,
+ ty->isDependentType(), ty->isDependentType(),
+ ty->containsUnexpandedParameterPack()),
+ GlobalNew(globalNew), Initializer(initializer),
+ UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize),
+ SubExprs(0), OperatorNew(operatorNew),
OperatorDelete(operatorDelete), Constructor(constructor),
- TypeIdParens(TypeIdParens), StartLoc(startLoc), EndLoc(endLoc) {
-
+ AllocatedTypeInfo(AllocatedTypeInfo), TypeIdParens(TypeIdParens),
+ StartLoc(startLoc), EndLoc(endLoc), ConstructorLParen(constructorLParen),
+ ConstructorRParen(constructorRParen) {
AllocateArgsArray(C, arraySize != 0, numPlaceArgs, numConsArgs);
unsigned i = 0;
- if (Array)
+ if (Array) {
+ if (arraySize->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
SubExprs[i++] = arraySize;
- for (unsigned j = 0; j < NumPlacementArgs; ++j)
+ }
+
+ for (unsigned j = 0; j < NumPlacementArgs; ++j) {
+ if (placementArgs[j]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
SubExprs[i++] = placementArgs[j];
- for (unsigned j = 0; j < NumConstructorArgs; ++j)
+ }
+
+ for (unsigned j = 0; j < NumConstructorArgs; ++j) {
+ if (constructorArgs[j]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
SubExprs[i++] = constructorArgs[j];
+ }
}
void CXXNewExpr::AllocateArgsArray(ASTContext &C, bool isArray,
@@ -118,27 +101,59 @@ void CXXNewExpr::AllocateArgsArray(ASTContext &C, bool isArray,
}
-Stmt::child_iterator CXXNewExpr::child_begin() { return &SubExprs[0]; }
-Stmt::child_iterator CXXNewExpr::child_end() {
- return &SubExprs[0] + Array + getNumPlacementArgs() + getNumConstructorArgs();
-}
-
// CXXDeleteExpr
-Stmt::child_iterator CXXDeleteExpr::child_begin() { return &Argument; }
-Stmt::child_iterator CXXDeleteExpr::child_end() { return &Argument+1; }
+QualType CXXDeleteExpr::getDestroyedType() const {
+ const Expr *Arg = getArgument();
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ if (ICE->getCastKind() != CK_UserDefinedConversion &&
+ ICE->getType()->isVoidPointerType())
+ Arg = ICE->getSubExpr();
+ else
+ break;
+ }
+ // The type-to-delete may not be a pointer if it's a dependent type.
+ const QualType ArgType = Arg->getType();
-// CXXPseudoDestructorExpr
-Stmt::child_iterator CXXPseudoDestructorExpr::child_begin() { return &Base; }
-Stmt::child_iterator CXXPseudoDestructorExpr::child_end() {
- return &Base + 1;
+ if (ArgType->isDependentType() && !ArgType->isPointerType())
+ return QualType();
+
+ return ArgType->getAs<PointerType>()->getPointeeType();
}
+// CXXPseudoDestructorExpr
PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
: Type(Info)
{
Location = Info->getTypeLoc().getLocalSourceRange().getBegin();
}
+CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(ASTContext &Context,
+ Expr *Base, bool isArrow, SourceLocation OperatorLoc,
+ NestedNameSpecifier *Qualifier, SourceRange QualifierRange,
+ TypeSourceInfo *ScopeType, SourceLocation ColonColonLoc,
+ SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType)
+ : Expr(CXXPseudoDestructorExprClass,
+ Context.getPointerType(Context.getFunctionType(Context.VoidTy, 0, 0,
+ FunctionProtoType::ExtProtoInfo())),
+ VK_RValue, OK_Ordinary,
+ /*isTypeDependent=*/(Base->isTypeDependent() ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()->isDependentType())),
+ /*isValueDependent=*/Base->isValueDependent(),
+ // ContainsUnexpandedParameterPack
+ (Base->containsUnexpandedParameterPack() ||
+ (Qualifier && Qualifier->containsUnexpandedParameterPack()) ||
+ (ScopeType &&
+ ScopeType->getType()->containsUnexpandedParameterPack()) ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()
+ ->containsUnexpandedParameterPack()))),
+ Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
+ OperatorLoc(OperatorLoc), Qualifier(Qualifier),
+ QualifierRange(QualifierRange),
+ ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
+ DestroyedType(DestroyedType) { }
+
QualType CXXPseudoDestructorExpr::getDestroyedType() const {
if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
return TInfo->getType();
@@ -156,7 +171,7 @@ SourceRange CXXPseudoDestructorExpr::getSourceRange() const {
// UnresolvedLookupExpr
UnresolvedLookupExpr *
-UnresolvedLookupExpr::Create(ASTContext &C, bool Dependent,
+UnresolvedLookupExpr::Create(ASTContext &C,
CXXRecordDecl *NamingClass,
NestedNameSpecifier *Qualifier,
SourceRange QualifierRange,
@@ -168,73 +183,94 @@ UnresolvedLookupExpr::Create(ASTContext &C, bool Dependent,
{
void *Mem = C.Allocate(sizeof(UnresolvedLookupExpr) +
ExplicitTemplateArgumentList::sizeFor(Args));
- UnresolvedLookupExpr *ULE
- = new (Mem) UnresolvedLookupExpr(C,
- Dependent ? C.DependentTy : C.OverloadTy,
- Dependent, NamingClass,
- Qualifier, QualifierRange, NameInfo,
- ADL,
- /*Overload*/ true,
- /*ExplicitTemplateArgs*/ true,
- Begin, End);
-
- reinterpret_cast<ExplicitTemplateArgumentList*>(ULE+1)->initializeFrom(Args);
-
- return ULE;
+ return new (Mem) UnresolvedLookupExpr(C, NamingClass,
+ Qualifier, QualifierRange, NameInfo,
+ ADL, /*Overload*/ true, &Args,
+ Begin, End);
}
UnresolvedLookupExpr *
-UnresolvedLookupExpr::CreateEmpty(ASTContext &C, unsigned NumTemplateArgs) {
+UnresolvedLookupExpr::CreateEmpty(ASTContext &C, bool HasExplicitTemplateArgs,
+ unsigned NumTemplateArgs) {
std::size_t size = sizeof(UnresolvedLookupExpr);
- if (NumTemplateArgs != 0)
+ if (HasExplicitTemplateArgs)
size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
- void *Mem = C.Allocate(size, llvm::alignof<UnresolvedLookupExpr>());
+ void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedLookupExpr>());
UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
- E->HasExplicitTemplateArgs = NumTemplateArgs != 0;
+ E->HasExplicitTemplateArgs = HasExplicitTemplateArgs;
return E;
}
-OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C, QualType T,
- bool Dependent, NestedNameSpecifier *Qualifier,
- SourceRange QRange,
+OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
+ NestedNameSpecifier *Qualifier, SourceRange QRange,
const DeclarationNameInfo &NameInfo,
- bool HasTemplateArgs,
+ const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin,
- UnresolvedSetIterator End)
- : Expr(K, T, Dependent, Dependent),
- Results(0), NumResults(0), NameInfo(NameInfo), Qualifier(Qualifier),
- QualifierRange(QRange), HasExplicitTemplateArgs(HasTemplateArgs)
+ UnresolvedSetIterator End,
+ bool KnownDependent,
+ bool KnownContainsUnexpandedParameterPack)
+ : Expr(K, C.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
+ KnownDependent,
+ (KnownContainsUnexpandedParameterPack ||
+ NameInfo.containsUnexpandedParameterPack() ||
+ (Qualifier && Qualifier->containsUnexpandedParameterPack()))),
+ Results(0), NumResults(End - Begin), NameInfo(NameInfo),
+ Qualifier(Qualifier), QualifierRange(QRange),
+ HasExplicitTemplateArgs(TemplateArgs != 0)
{
- initializeResults(C, Begin, End);
-}
-
-void OverloadExpr::initializeResults(ASTContext &C,
- UnresolvedSetIterator Begin,
- UnresolvedSetIterator End) {
- assert(Results == 0 && "Results already initialized!");
NumResults = End - Begin;
if (NumResults) {
+ // Determine whether this expression is type-dependent.
+ for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I) {
+ if ((*I)->getDeclContext()->isDependentContext() ||
+ isa<UnresolvedUsingValueDecl>(*I)) {
+ ExprBits.TypeDependent = true;
+ ExprBits.ValueDependent = true;
+ }
+ }
+
Results = static_cast<DeclAccessPair *>(
C.Allocate(sizeof(DeclAccessPair) * NumResults,
- llvm::alignof<DeclAccessPair>()));
+ llvm::alignOf<DeclAccessPair>()));
memcpy(Results, &*Begin.getIterator(),
NumResults * sizeof(DeclAccessPair));
}
-}
-
-bool OverloadExpr::ComputeDependence(UnresolvedSetIterator Begin,
- UnresolvedSetIterator End,
- const TemplateArgumentListInfo *Args) {
- for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I)
- if ((*I)->getDeclContext()->isDependentContext())
- return true;
+ // If we have explicit template arguments, check for dependent
+ // template arguments and whether they contain any unexpanded pack
+ // expansions.
+ if (TemplateArgs) {
+ bool Dependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ getExplicitTemplateArgs().initializeFrom(*TemplateArgs, Dependent,
+ ContainsUnexpandedParameterPack);
+
+ if (Dependent) {
+ ExprBits.TypeDependent = true;
+ ExprBits.ValueDependent = true;
+ }
+ if (ContainsUnexpandedParameterPack)
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ }
- if (Args && TemplateSpecializationType::anyDependentTemplateArguments(*Args))
- return true;
+ if (isTypeDependent())
+ setType(C.DependentTy);
+}
- return false;
+void OverloadExpr::initializeResults(ASTContext &C,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ assert(Results == 0 && "Results already initialized!");
+ NumResults = End - Begin;
+ if (NumResults) {
+ Results = static_cast<DeclAccessPair *>(
+ C.Allocate(sizeof(DeclAccessPair) * NumResults,
+
+ llvm::alignOf<DeclAccessPair>()));
+ memcpy(Results, &*Begin.getIterator(),
+ NumResults * sizeof(DeclAccessPair));
+ }
}
CXXRecordDecl *OverloadExpr::getNamingClass() const {
@@ -244,21 +280,30 @@ CXXRecordDecl *OverloadExpr::getNamingClass() const {
return cast<UnresolvedMemberExpr>(this)->getNamingClass();
}
-Stmt::child_iterator UnresolvedLookupExpr::child_begin() {
- return child_iterator();
-}
-Stmt::child_iterator UnresolvedLookupExpr::child_end() {
- return child_iterator();
-}
-// UnaryTypeTraitExpr
-Stmt::child_iterator UnaryTypeTraitExpr::child_begin() {
- return child_iterator();
-}
-Stmt::child_iterator UnaryTypeTraitExpr::child_end() {
- return child_iterator();
+// DependentScopeDeclRefExpr
+DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
+ NestedNameSpecifier *Qualifier,
+ SourceRange QualifierRange,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args)
+ : Expr(DependentScopeDeclRefExprClass, T, VK_LValue, OK_Ordinary,
+ true, true,
+ (NameInfo.containsUnexpandedParameterPack() ||
+ (Qualifier && Qualifier->containsUnexpandedParameterPack()))),
+ NameInfo(NameInfo), QualifierRange(QualifierRange), Qualifier(Qualifier),
+ HasExplicitTemplateArgs(Args != 0)
+{
+ if (Args) {
+ bool Dependent = true;
+ bool ContainsUnexpandedParameterPack
+ = ExprBits.ContainsUnexpandedParameterPack;
+
+ reinterpret_cast<ExplicitTemplateArgumentList*>(this+1)
+ ->initializeFrom(*Args, Dependent, ContainsUnexpandedParameterPack);
+ ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ }
}
-// DependentScopeDeclRefExpr
DependentScopeDeclRefExpr *
DependentScopeDeclRefExpr::Create(ASTContext &C,
NestedNameSpecifier *Qualifier,
@@ -266,251 +311,46 @@ DependentScopeDeclRefExpr::Create(ASTContext &C,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *Args) {
std::size_t size = sizeof(DependentScopeDeclRefExpr);
- if (Args) size += ExplicitTemplateArgumentList::sizeFor(*Args);
- void *Mem = C.Allocate(size);
-
- DependentScopeDeclRefExpr *DRE
- = new (Mem) DependentScopeDeclRefExpr(C.DependentTy,
- Qualifier, QualifierRange,
- NameInfo, Args != 0);
-
if (Args)
- reinterpret_cast<ExplicitTemplateArgumentList*>(DRE+1)
- ->initializeFrom(*Args);
-
- return DRE;
+ size += ExplicitTemplateArgumentList::sizeFor(*Args);
+ void *Mem = C.Allocate(size);
+ return new (Mem) DependentScopeDeclRefExpr(C.DependentTy,
+ Qualifier, QualifierRange,
+ NameInfo, Args);
}
DependentScopeDeclRefExpr *
DependentScopeDeclRefExpr::CreateEmpty(ASTContext &C,
+ bool HasExplicitTemplateArgs,
unsigned NumTemplateArgs) {
std::size_t size = sizeof(DependentScopeDeclRefExpr);
- if (NumTemplateArgs)
+ if (HasExplicitTemplateArgs)
size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size);
-
- return new (Mem) DependentScopeDeclRefExpr(QualType(), 0, SourceRange(),
- DeclarationNameInfo(),
- NumTemplateArgs != 0);
-}
-
-StmtIterator DependentScopeDeclRefExpr::child_begin() {
- return child_iterator();
+ DependentScopeDeclRefExpr *E
+ = new (Mem) DependentScopeDeclRefExpr(QualType(), 0, SourceRange(),
+ DeclarationNameInfo(), 0);
+ E->HasExplicitTemplateArgs = HasExplicitTemplateArgs;
+ return E;
}
-StmtIterator DependentScopeDeclRefExpr::child_end() {
- return child_iterator();
-}
+SourceRange CXXConstructExpr::getSourceRange() const {
+ if (ParenRange.isValid())
+ return SourceRange(Loc, ParenRange.getEnd());
-bool UnaryTypeTraitExpr::EvaluateTrait(ASTContext& C) const {
- switch(UTT) {
- default: assert(false && "Unknown type trait or not implemented");
- case UTT_IsPOD: return QueriedType->isPODType();
- case UTT_IsLiteral: return QueriedType->isLiteralType();
- case UTT_IsClass: // Fallthrough
- case UTT_IsUnion:
- if (const RecordType *Record = QueriedType->getAs<RecordType>()) {
- bool Union = Record->getDecl()->isUnion();
- return UTT == UTT_IsUnion ? Union : !Union;
- }
- return false;
- case UTT_IsEnum: return QueriedType->isEnumeralType();
- case UTT_IsPolymorphic:
- if (const RecordType *Record = QueriedType->getAs<RecordType>()) {
- // Type traits are only parsed in C++, so we've got CXXRecords.
- return cast<CXXRecordDecl>(Record->getDecl())->isPolymorphic();
- }
- return false;
- case UTT_IsAbstract:
- if (const RecordType *RT = QueriedType->getAs<RecordType>())
- return cast<CXXRecordDecl>(RT->getDecl())->isAbstract();
- return false;
- case UTT_IsEmpty:
- if (const RecordType *Record = QueriedType->getAs<RecordType>()) {
- return !Record->getDecl()->isUnion()
- && cast<CXXRecordDecl>(Record->getDecl())->isEmpty();
- }
- return false;
- case UTT_HasTrivialConstructor:
- // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
- // If __is_pod (type) is true then the trait is true, else if type is
- // a cv class or union type (or array thereof) with a trivial default
- // constructor ([class.ctor]) then the trait is true, else it is false.
- if (QueriedType->isPODType())
- return true;
- if (const RecordType *RT =
- C.getBaseElementType(QueriedType)->getAs<RecordType>())
- return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialConstructor();
- return false;
- case UTT_HasTrivialCopy:
- // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
- // If __is_pod (type) is true or type is a reference type then
- // the trait is true, else if type is a cv class or union type
- // with a trivial copy constructor ([class.copy]) then the trait
- // is true, else it is false.
- if (QueriedType->isPODType() || QueriedType->isReferenceType())
- return true;
- if (const RecordType *RT = QueriedType->getAs<RecordType>())
- return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialCopyConstructor();
- return false;
- case UTT_HasTrivialAssign:
- // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
- // If type is const qualified or is a reference type then the
- // trait is false. Otherwise if __is_pod (type) is true then the
- // trait is true, else if type is a cv class or union type with
- // a trivial copy assignment ([class.copy]) then the trait is
- // true, else it is false.
- // Note: the const and reference restrictions are interesting,
- // given that const and reference members don't prevent a class
- // from having a trivial copy assignment operator (but do cause
- // errors if the copy assignment operator is actually used, q.v.
- // [class.copy]p12).
-
- if (C.getBaseElementType(QueriedType).isConstQualified())
- return false;
- if (QueriedType->isPODType())
- return true;
- if (const RecordType *RT = QueriedType->getAs<RecordType>())
- return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialCopyAssignment();
- return false;
- case UTT_HasTrivialDestructor:
- // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
- // If __is_pod (type) is true or type is a reference type
- // then the trait is true, else if type is a cv class or union
- // type (or array thereof) with a trivial destructor
- // ([class.dtor]) then the trait is true, else it is
- // false.
- if (QueriedType->isPODType() || QueriedType->isReferenceType())
- return true;
- if (const RecordType *RT =
- C.getBaseElementType(QueriedType)->getAs<RecordType>())
- return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor();
- return false;
- // TODO: Propagate nothrowness for implicitly declared special members.
- case UTT_HasNothrowAssign:
- // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
- // If type is const qualified or is a reference type then the
- // trait is false. Otherwise if __has_trivial_assign (type)
- // is true then the trait is true, else if type is a cv class
- // or union type with copy assignment operators that are known
- // not to throw an exception then the trait is true, else it is
- // false.
- if (C.getBaseElementType(QueriedType).isConstQualified())
- return false;
- if (QueriedType->isReferenceType())
- return false;
- if (QueriedType->isPODType())
- return true;
- if (const RecordType *RT = QueriedType->getAs<RecordType>()) {
- CXXRecordDecl* RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->hasTrivialCopyAssignment())
- return true;
-
- bool FoundAssign = false;
- bool AllNoThrow = true;
- DeclarationName Name = C.DeclarationNames.getCXXOperatorName(OO_Equal);
- DeclContext::lookup_const_iterator Op, OpEnd;
- for (llvm::tie(Op, OpEnd) = RD->lookup(Name);
- Op != OpEnd; ++Op) {
- CXXMethodDecl *Operator = cast<CXXMethodDecl>(*Op);
- if (Operator->isCopyAssignmentOperator()) {
- FoundAssign = true;
- const FunctionProtoType *CPT
- = Operator->getType()->getAs<FunctionProtoType>();
- if (!CPT->hasEmptyExceptionSpec()) {
- AllNoThrow = false;
- break;
- }
- }
+ SourceLocation End = Loc;
+ for (unsigned I = getNumArgs(); I > 0; --I) {
+ const Expr *Arg = getArg(I-1);
+ if (!Arg->isDefaultArgument()) {
+ SourceLocation NewEnd = Arg->getLocEnd();
+ if (NewEnd.isValid()) {
+ End = NewEnd;
+ break;
}
-
- return FoundAssign && AllNoThrow;
}
- return false;
- case UTT_HasNothrowCopy:
- // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
- // If __has_trivial_copy (type) is true then the trait is true, else
- // if type is a cv class or union type with copy constructors that are
- // known not to throw an exception then the trait is true, else it is
- // false.
- if (QueriedType->isPODType() || QueriedType->isReferenceType())
- return true;
- if (const RecordType *RT = QueriedType->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->hasTrivialCopyConstructor())
- return true;
-
- bool FoundConstructor = false;
- bool AllNoThrow = true;
- unsigned FoundTQs;
- DeclarationName ConstructorName
- = C.DeclarationNames.getCXXConstructorName(
- C.getCanonicalType(QueriedType));
- DeclContext::lookup_const_iterator Con, ConEnd;
- for (llvm::tie(Con, ConEnd) = RD->lookup(ConstructorName);
- Con != ConEnd; ++Con) {
- CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
- if (Constructor->isCopyConstructor(FoundTQs)) {
- FoundConstructor = true;
- const FunctionProtoType *CPT
- = Constructor->getType()->getAs<FunctionProtoType>();
- if (!CPT->hasEmptyExceptionSpec()) {
- AllNoThrow = false;
- break;
- }
- }
- }
-
- return FoundConstructor && AllNoThrow;
- }
- return false;
- case UTT_HasNothrowConstructor:
- // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
- // If __has_trivial_constructor (type) is true then the trait is
- // true, else if type is a cv class or union type (or array
- // thereof) with a default constructor that is known not to
- // throw an exception then the trait is true, else it is false.
- if (QueriedType->isPODType())
- return true;
- if (const RecordType *RT =
- C.getBaseElementType(QueriedType)->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->hasTrivialConstructor())
- return true;
-
- if (CXXConstructorDecl *Constructor = RD->getDefaultConstructor()) {
- const FunctionProtoType *CPT
- = Constructor->getType()->getAs<FunctionProtoType>();
- // TODO: check whether evaluating default arguments can throw.
- // For now, we'll be conservative and assume that they can throw.
- if (CPT->hasEmptyExceptionSpec() && CPT->getNumArgs() == 0)
- return true;
- }
- }
- return false;
- case UTT_HasVirtualDestructor:
- // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
- // If type is a class type with a virtual destructor ([class.dtor])
- // then the trait is true, else it is false.
- if (const RecordType *Record = QueriedType->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
- if (CXXDestructorDecl *Destructor = RD->getDestructor())
- return Destructor->isVirtual();
- }
- return false;
}
-}
-SourceRange CXXConstructExpr::getSourceRange() const {
- // FIXME: Should we know where the parentheses are, if there are any?
- for (std::reverse_iterator<Stmt**> I(&Args[NumArgs]), E(&Args[0]); I!=E;++I) {
- // Ignore CXXDefaultExprs when computing the range, as they don't
- // have a range.
- if (!isa<CXXDefaultArgExpr>(*I))
- return SourceRange(Loc, (*I)->getLocEnd());
- }
-
- return SourceRange(Loc);
+ return SourceRange(Loc, End);
}
SourceRange CXXOperatorCallExpr::getSourceRange() const {
@@ -546,6 +386,17 @@ Expr *CXXMemberCallExpr::getImplicitObjectArgument() {
return 0;
}
+CXXRecordDecl *CXXMemberCallExpr::getRecordDecl() {
+ Expr* ThisArg = getImplicitObjectArgument();
+ if (!ThisArg)
+ return 0;
+
+ if (ThisArg->getType()->isAnyPointerType())
+ return ThisArg->getType()->getPointeeType()->getAsCXXRecordDecl();
+
+ return ThisArg->getType()->getAsCXXRecordDecl();
+}
+
SourceRange CXXMemberCallExpr::getSourceRange() const {
SourceLocation LocStart = getCallee()->getLocStart();
if (LocStart.isInvalid() && getNumArgs() > 0)
@@ -572,15 +423,18 @@ const char *CXXNamedCastExpr::getCastName() const {
}
CXXStaticCastExpr *CXXStaticCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK,
CastKind K, Expr *Op,
const CXXCastPath *BasePath,
TypeSourceInfo *WrittenTy,
- SourceLocation L) {
+ SourceLocation L,
+ SourceLocation RParenLoc) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(sizeof(CXXStaticCastExpr)
+ PathSize * sizeof(CXXBaseSpecifier*));
CXXStaticCastExpr *E =
- new (Buffer) CXXStaticCastExpr(T, K, Op, PathSize, WrittenTy, L);
+ new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
@@ -593,15 +447,18 @@ CXXStaticCastExpr *CXXStaticCastExpr::CreateEmpty(ASTContext &C,
}
CXXDynamicCastExpr *CXXDynamicCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK,
CastKind K, Expr *Op,
const CXXCastPath *BasePath,
TypeSourceInfo *WrittenTy,
- SourceLocation L) {
+ SourceLocation L,
+ SourceLocation RParenLoc) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(sizeof(CXXDynamicCastExpr)
+ PathSize * sizeof(CXXBaseSpecifier*));
CXXDynamicCastExpr *E =
- new (Buffer) CXXDynamicCastExpr(T, K, Op, PathSize, WrittenTy, L);
+ new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
@@ -614,14 +471,17 @@ CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(ASTContext &C,
}
CXXReinterpretCastExpr *
-CXXReinterpretCastExpr::Create(ASTContext &C, QualType T, CastKind K, Expr *Op,
+CXXReinterpretCastExpr::Create(ASTContext &C, QualType T, ExprValueKind VK,
+ CastKind K, Expr *Op,
const CXXCastPath *BasePath,
- TypeSourceInfo *WrittenTy, SourceLocation L) {
+ TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation RParenLoc) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer =
C.Allocate(sizeof(CXXReinterpretCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
CXXReinterpretCastExpr *E =
- new (Buffer) CXXReinterpretCastExpr(T, K, Op, PathSize, WrittenTy, L);
+ new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
@@ -633,10 +493,12 @@ CXXReinterpretCastExpr::CreateEmpty(ASTContext &C, unsigned PathSize) {
return new (Buffer) CXXReinterpretCastExpr(EmptyShell(), PathSize);
}
-CXXConstCastExpr *CXXConstCastExpr::Create(ASTContext &C, QualType T, Expr *Op,
+CXXConstCastExpr *CXXConstCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK, Expr *Op,
TypeSourceInfo *WrittenTy,
- SourceLocation L) {
- return new (C) CXXConstCastExpr(T, Op, WrittenTy, L);
+ SourceLocation L,
+ SourceLocation RParenLoc) {
+ return new (C) CXXConstCastExpr(T, VK, Op, WrittenTy, L, RParenLoc);
}
CXXConstCastExpr *CXXConstCastExpr::CreateEmpty(ASTContext &C) {
@@ -644,7 +506,7 @@ CXXConstCastExpr *CXXConstCastExpr::CreateEmpty(ASTContext &C) {
}
CXXFunctionalCastExpr *
-CXXFunctionalCastExpr::Create(ASTContext &C, QualType T,
+CXXFunctionalCastExpr::Create(ASTContext &C, QualType T, ExprValueKind VK,
TypeSourceInfo *Written, SourceLocation L,
CastKind K, Expr *Op, const CXXCastPath *BasePath,
SourceLocation R) {
@@ -652,7 +514,7 @@ CXXFunctionalCastExpr::Create(ASTContext &C, QualType T,
void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr)
+ PathSize * sizeof(CXXBaseSpecifier*));
CXXFunctionalCastExpr *E =
- new (Buffer) CXXFunctionalCastExpr(T, Written, L, K, Op, PathSize, R);
+ new (Buffer) CXXFunctionalCastExpr(T, VK, Written, L, K, Op, PathSize, R);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
@@ -689,15 +551,22 @@ CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(ASTContext &C,
CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(ASTContext &C,
CXXConstructorDecl *Cons,
- QualType writtenTy,
- SourceLocation tyBeginLoc,
+ TypeSourceInfo *Type,
Expr **Args,
unsigned NumArgs,
- SourceLocation rParenLoc,
+ SourceRange parenRange,
bool ZeroInitialization)
- : CXXConstructExpr(C, CXXTemporaryObjectExprClass, writtenTy, tyBeginLoc,
- Cons, false, Args, NumArgs, ZeroInitialization),
- TyBeginLoc(tyBeginLoc), RParenLoc(rParenLoc) {
+ : CXXConstructExpr(C, CXXTemporaryObjectExprClass,
+ Type->getType().getNonReferenceType(),
+ Type->getTypeLoc().getBeginLoc(),
+ Cons, false, Args, NumArgs, ZeroInitialization,
+ CXXConstructExpr::CK_Complete, parenRange),
+ Type(Type) {
+}
+
+SourceRange CXXTemporaryObjectExpr::getSourceRange() const {
+ return SourceRange(Type->getTypeLoc().getBeginLoc(),
+ getParenRange().getEnd());
}
CXXConstructExpr *CXXConstructExpr::Create(ASTContext &C, QualType T,
@@ -705,10 +574,11 @@ CXXConstructExpr *CXXConstructExpr::Create(ASTContext &C, QualType T,
CXXConstructorDecl *D, bool Elidable,
Expr **Args, unsigned NumArgs,
bool ZeroInitialization,
- ConstructionKind ConstructKind) {
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange) {
return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, Loc, D,
Elidable, Args, NumArgs, ZeroInitialization,
- ConstructKind);
+ ConstructKind, ParenRange);
}
CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
@@ -716,31 +586,39 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
CXXConstructorDecl *D, bool elidable,
Expr **args, unsigned numargs,
bool ZeroInitialization,
- ConstructionKind ConstructKind)
-: Expr(SC, T,
- T->isDependentType(),
- (T->isDependentType() ||
- CallExpr::hasAnyValueDependentArguments(args, numargs))),
- Constructor(D), Loc(Loc), Elidable(elidable),
- ZeroInitialization(ZeroInitialization), ConstructKind(ConstructKind),
- Args(0), NumArgs(numargs)
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange)
+ : Expr(SC, T, VK_RValue, OK_Ordinary,
+ T->isDependentType(), T->isDependentType(),
+ T->containsUnexpandedParameterPack()),
+ Constructor(D), Loc(Loc), ParenRange(ParenRange), Elidable(elidable),
+ ZeroInitialization(ZeroInitialization), ConstructKind(ConstructKind),
+ Args(0), NumArgs(numargs)
{
if (NumArgs) {
Args = new (C) Stmt*[NumArgs];
for (unsigned i = 0; i != NumArgs; ++i) {
assert(args[i] && "NULL argument in CXXConstructExpr");
+
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
Args[i] = args[i];
}
}
}
-CXXExprWithTemporaries::CXXExprWithTemporaries(ASTContext &C,
- Expr *subexpr,
- CXXTemporary **temps,
- unsigned numtemps)
- : Expr(CXXExprWithTemporariesClass, subexpr->getType(),
- subexpr->isTypeDependent(), subexpr->isValueDependent()),
+ExprWithCleanups::ExprWithCleanups(ASTContext &C,
+ Expr *subexpr,
+ CXXTemporary **temps,
+ unsigned numtemps)
+ : Expr(ExprWithCleanupsClass, subexpr->getType(),
+ subexpr->getValueKind(), subexpr->getObjectKind(),
+ subexpr->isTypeDependent(), subexpr->isValueDependent(),
+ subexpr->containsUnexpandedParameterPack()),
SubExpr(subexpr), Temps(0), NumTemps(0) {
if (numtemps) {
setNumTemporaries(C, numtemps);
@@ -749,75 +627,53 @@ CXXExprWithTemporaries::CXXExprWithTemporaries(ASTContext &C,
}
}
-void CXXExprWithTemporaries::setNumTemporaries(ASTContext &C, unsigned N) {
+void ExprWithCleanups::setNumTemporaries(ASTContext &C, unsigned N) {
assert(Temps == 0 && "Cannot resize with this");
NumTemps = N;
Temps = new (C) CXXTemporary*[NumTemps];
}
-CXXExprWithTemporaries *CXXExprWithTemporaries::Create(ASTContext &C,
- Expr *SubExpr,
- CXXTemporary **Temps,
- unsigned NumTemps) {
- return new (C) CXXExprWithTemporaries(C, SubExpr, Temps, NumTemps);
+ExprWithCleanups *ExprWithCleanups::Create(ASTContext &C,
+ Expr *SubExpr,
+ CXXTemporary **Temps,
+ unsigned NumTemps) {
+ return new (C) ExprWithCleanups(C, SubExpr, Temps, NumTemps);
}
-// CXXBindTemporaryExpr
-Stmt::child_iterator CXXBindTemporaryExpr::child_begin() {
- return &SubExpr;
-}
-
-Stmt::child_iterator CXXBindTemporaryExpr::child_end() {
- return &SubExpr + 1;
-}
-
-// CXXConstructExpr
-Stmt::child_iterator CXXConstructExpr::child_begin() {
- return &Args[0];
-}
-Stmt::child_iterator CXXConstructExpr::child_end() {
- return &Args[0]+NumArgs;
-}
-
-// CXXExprWithTemporaries
-Stmt::child_iterator CXXExprWithTemporaries::child_begin() {
- return &SubExpr;
-}
-
-Stmt::child_iterator CXXExprWithTemporaries::child_end() {
- return &SubExpr + 1;
-}
-
-CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(
- SourceLocation TyBeginLoc,
- QualType T,
+CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
Expr **Args,
unsigned NumArgs,
SourceLocation RParenLoc)
- : Expr(CXXUnresolvedConstructExprClass, T.getNonReferenceType(),
- T->isDependentType(), true),
- TyBeginLoc(TyBeginLoc),
- Type(T),
+ : Expr(CXXUnresolvedConstructExprClass,
+ Type->getType().getNonReferenceType(),
+ VK_LValue, OK_Ordinary,
+ Type->getType()->isDependentType(), true,
+ Type->getType()->containsUnexpandedParameterPack()),
+ Type(Type),
LParenLoc(LParenLoc),
RParenLoc(RParenLoc),
NumArgs(NumArgs) {
Stmt **StoredArgs = reinterpret_cast<Stmt **>(this + 1);
- memcpy(StoredArgs, Args, sizeof(Expr *) * NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ StoredArgs[I] = Args[I];
+ }
}
CXXUnresolvedConstructExpr *
CXXUnresolvedConstructExpr::Create(ASTContext &C,
- SourceLocation TyBegin,
- QualType T,
+ TypeSourceInfo *Type,
SourceLocation LParenLoc,
Expr **Args,
unsigned NumArgs,
SourceLocation RParenLoc) {
void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
sizeof(Expr *) * NumArgs);
- return new (Mem) CXXUnresolvedConstructExpr(TyBegin, T, LParenLoc,
+ return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc,
Args, NumArgs, RParenLoc);
}
@@ -829,12 +685,8 @@ CXXUnresolvedConstructExpr::CreateEmpty(ASTContext &C, unsigned NumArgs) {
return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
}
-Stmt::child_iterator CXXUnresolvedConstructExpr::child_begin() {
- return child_iterator(reinterpret_cast<Stmt **>(this + 1));
-}
-
-Stmt::child_iterator CXXUnresolvedConstructExpr::child_end() {
- return child_iterator(reinterpret_cast<Stmt **>(this + 1) + NumArgs);
+SourceRange CXXUnresolvedConstructExpr::getSourceRange() const {
+ return SourceRange(Type->getTypeLoc().getBeginLoc(), RParenLoc);
}
CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
@@ -846,17 +698,46 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs)
- : Expr(CXXDependentScopeMemberExprClass, C.DependentTy, true, true),
+ : Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
+ VK_LValue, OK_Ordinary, true, true,
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ (Qualifier && Qualifier->containsUnexpandedParameterPack()) ||
+ MemberNameInfo.containsUnexpandedParameterPack())),
Base(Base), BaseType(BaseType), IsArrow(IsArrow),
HasExplicitTemplateArgs(TemplateArgs != 0),
OperatorLoc(OperatorLoc),
Qualifier(Qualifier), QualifierRange(QualifierRange),
FirstQualifierFoundInScope(FirstQualifierFoundInScope),
MemberNameInfo(MemberNameInfo) {
- if (TemplateArgs)
- getExplicitTemplateArgs().initializeFrom(*TemplateArgs);
+ if (TemplateArgs) {
+ bool Dependent = true;
+ bool ContainsUnexpandedParameterPack = false;
+ getExplicitTemplateArgs().initializeFrom(*TemplateArgs, Dependent,
+ ContainsUnexpandedParameterPack);
+ if (ContainsUnexpandedParameterPack)
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ }
}
+CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifier *Qualifier,
+ SourceRange QualifierRange,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo)
+ : Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
+ VK_LValue, OK_Ordinary, true, true,
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ (Qualifier && Qualifier->containsUnexpandedParameterPack()) ||
+ MemberNameInfo.containsUnexpandedParameterPack())),
+ Base(Base), BaseType(BaseType), IsArrow(IsArrow),
+ HasExplicitTemplateArgs(false), OperatorLoc(OperatorLoc),
+ Qualifier(Qualifier), QualifierRange(QualifierRange),
+ FirstQualifierFoundInScope(FirstQualifierFoundInScope),
+ MemberNameInfo(MemberNameInfo) { }
+
CXXDependentScopeMemberExpr *
CXXDependentScopeMemberExpr::Create(ASTContext &C,
Expr *Base, QualType BaseType, bool IsArrow,
@@ -877,7 +758,7 @@ CXXDependentScopeMemberExpr::Create(ASTContext &C,
if (TemplateArgs)
size += ExplicitTemplateArgumentList::sizeFor(*TemplateArgs);
- void *Mem = C.Allocate(size, llvm::alignof<CXXDependentScopeMemberExpr>());
+ void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType,
IsArrow, OperatorLoc,
Qualifier, QualifierRange,
@@ -887,8 +768,9 @@ CXXDependentScopeMemberExpr::Create(ASTContext &C,
CXXDependentScopeMemberExpr *
CXXDependentScopeMemberExpr::CreateEmpty(ASTContext &C,
+ bool HasExplicitTemplateArgs,
unsigned NumTemplateArgs) {
- if (NumTemplateArgs == 0)
+ if (!HasExplicitTemplateArgs)
return new (C) CXXDependentScopeMemberExpr(C, 0, QualType(),
0, SourceLocation(), 0,
SourceRange(), 0,
@@ -896,7 +778,7 @@ CXXDependentScopeMemberExpr::CreateEmpty(ASTContext &C,
std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
- void *Mem = C.Allocate(size, llvm::alignof<CXXDependentScopeMemberExpr>());
+ void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
CXXDependentScopeMemberExpr *E
= new (Mem) CXXDependentScopeMemberExpr(C, 0, QualType(),
0, SourceLocation(), 0,
@@ -906,18 +788,7 @@ CXXDependentScopeMemberExpr::CreateEmpty(ASTContext &C,
return E;
}
-Stmt::child_iterator CXXDependentScopeMemberExpr::child_begin() {
- return child_iterator(&Base);
-}
-
-Stmt::child_iterator CXXDependentScopeMemberExpr::child_end() {
- if (isImplicitAccess())
- return child_iterator(&Base);
- return child_iterator(&Base + 1);
-}
-
-UnresolvedMemberExpr::UnresolvedMemberExpr(ASTContext &C, QualType T,
- bool Dependent,
+UnresolvedMemberExpr::UnresolvedMemberExpr(ASTContext &C,
bool HasUnresolvedUsing,
Expr *Base, QualType BaseType,
bool IsArrow,
@@ -928,17 +799,21 @@ UnresolvedMemberExpr::UnresolvedMemberExpr(ASTContext &C, QualType T,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End)
- : OverloadExpr(UnresolvedMemberExprClass, C, T, Dependent,
+ : OverloadExpr(UnresolvedMemberExprClass, C,
Qualifier, QualifierRange, MemberNameInfo,
- TemplateArgs != 0, Begin, End),
+ TemplateArgs, Begin, End,
+ // Dependent
+ ((Base && Base->isTypeDependent()) ||
+ BaseType->isDependentType()),
+ // Contains unexpanded parameter pack
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ BaseType->containsUnexpandedParameterPack())),
IsArrow(IsArrow), HasUnresolvedUsing(HasUnresolvedUsing),
Base(Base), BaseType(BaseType), OperatorLoc(OperatorLoc) {
- if (TemplateArgs)
- getExplicitTemplateArgs().initializeFrom(*TemplateArgs);
}
UnresolvedMemberExpr *
-UnresolvedMemberExpr::Create(ASTContext &C, bool Dependent,
+UnresolvedMemberExpr::Create(ASTContext &C,
bool HasUnresolvedUsing,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
@@ -952,23 +827,23 @@ UnresolvedMemberExpr::Create(ASTContext &C, bool Dependent,
if (TemplateArgs)
size += ExplicitTemplateArgumentList::sizeFor(*TemplateArgs);
- void *Mem = C.Allocate(size, llvm::alignof<UnresolvedMemberExpr>());
+ void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
return new (Mem) UnresolvedMemberExpr(C,
- Dependent ? C.DependentTy : C.OverloadTy,
- Dependent, HasUnresolvedUsing, Base, BaseType,
+ HasUnresolvedUsing, Base, BaseType,
IsArrow, OperatorLoc, Qualifier, QualifierRange,
MemberNameInfo, TemplateArgs, Begin, End);
}
UnresolvedMemberExpr *
-UnresolvedMemberExpr::CreateEmpty(ASTContext &C, unsigned NumTemplateArgs) {
+UnresolvedMemberExpr::CreateEmpty(ASTContext &C, bool HasExplicitTemplateArgs,
+ unsigned NumTemplateArgs) {
std::size_t size = sizeof(UnresolvedMemberExpr);
- if (NumTemplateArgs != 0)
+ if (HasExplicitTemplateArgs)
size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
- void *Mem = C.Allocate(size, llvm::alignof<UnresolvedMemberExpr>());
+ void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
- E->HasExplicitTemplateArgs = NumTemplateArgs != 0;
+ E->HasExplicitTemplateArgs = HasExplicitTemplateArgs;
return E;
}
@@ -980,7 +855,7 @@ CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
// lookup.
CXXRecordDecl *Record = 0;
if (getQualifier()) {
- Type *T = getQualifier()->getAsType();
+ const Type *T = getQualifier()->getAsType();
assert(T && "qualifier in member expression does not name type");
Record = T->getAsCXXRecordDecl();
assert(Record && "qualifier in member expression does not name record");
@@ -1001,12 +876,18 @@ CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
return Record;
}
-Stmt::child_iterator UnresolvedMemberExpr::child_begin() {
- return child_iterator(&Base);
-}
+SubstNonTypeTemplateParmPackExpr::
+SubstNonTypeTemplateParmPackExpr(QualType T,
+ NonTypeTemplateParmDecl *Param,
+ SourceLocation NameLoc,
+ const TemplateArgument &ArgPack)
+ : Expr(SubstNonTypeTemplateParmPackExprClass, T, VK_RValue, OK_Ordinary,
+ true, false, true),
+ Param(Param), Arguments(ArgPack.pack_begin()),
+ NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) { }
-Stmt::child_iterator UnresolvedMemberExpr::child_end() {
- if (isImplicitAccess())
- return child_iterator(&Base);
- return child_iterator(&Base + 1);
+TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
+ return TemplateArgument(Arguments, NumArguments);
}
+
+
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
index d7e38eb..890898a 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
@@ -29,10 +29,27 @@ static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T);
static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E);
static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E);
static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
- const ConditionalOperator *E);
+ const Expr *trueExpr,
+ const Expr *falseExpr);
static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
Cl::Kinds Kind, SourceLocation &Loc);
+static Cl::Kinds ClassifyExprValueKind(const LangOptions &Lang,
+ const Expr *E,
+ ExprValueKind Kind) {
+ switch (Kind) {
+ case VK_RValue:
+ return Lang.CPlusPlus && E->getType()->isRecordType() ?
+ Cl::CL_ClassTemporary : Cl::CL_PRValue;
+ case VK_LValue:
+ return Cl::CL_LValue;
+ case VK_XValue:
+ return Cl::CL_XValue;
+ }
+ llvm_unreachable("Invalid value category of implicit cast.");
+ return Cl::CL_PRValue;
+}
+
Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
assert(!TR->isReferenceType() && "Expressions can't have reference type.");
@@ -48,6 +65,19 @@ Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
kind = Cl::CL_Void;
}
+ // Enable this assertion for testing.
+ switch (kind) {
+ case Cl::CL_LValue: assert(getValueKind() == VK_LValue); break;
+ case Cl::CL_XValue: assert(getValueKind() == VK_XValue); break;
+ case Cl::CL_Function:
+ case Cl::CL_Void:
+ case Cl::CL_DuplicateVectorComponents:
+ case Cl::CL_MemberFunction:
+ case Cl::CL_SubObjCPropertySetting:
+ case Cl::CL_ClassTemporary:
+ case Cl::CL_PRValue: assert(getValueKind() == VK_RValue); break;
+ }
+
Cl::ModifiableType modifiable = Cl::CM_Untested;
if (Loc)
modifiable = IsModifiable(Ctx, this, kind, *Loc);
@@ -60,7 +90,13 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
switch (E->getStmtClass()) {
// First come the expressions that are always lvalues, unconditionally.
-
+ case Stmt::NoStmtClass:
+#define ABSTRACT_STMT(Kind)
+#define STMT(Kind, Base) case Expr::Kind##Class:
+#define EXPR(Kind, Base)
+#include "clang/AST/StmtNodes.inc"
+ llvm_unreachable("cannot classify a statement");
+ break;
case Expr::ObjCIsaExprClass:
// C++ [expr.prim.general]p1: A string literal is an lvalue.
case Expr::StringLiteralClass:
@@ -70,20 +106,56 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::PredefinedExprClass:
// Property references are lvalues
case Expr::ObjCPropertyRefExprClass:
- case Expr::ObjCImplicitSetterGetterRefExprClass:
// C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
case Expr::CXXTypeidExprClass:
// Unresolved lookups get classified as lvalues.
// FIXME: Is this wise? Should they get their own kind?
case Expr::UnresolvedLookupExprClass:
case Expr::UnresolvedMemberExprClass:
+ case Expr::CXXDependentScopeMemberExprClass:
+ case Expr::CXXUnresolvedConstructExprClass:
+ case Expr::DependentScopeDeclRefExprClass:
// ObjC instance variables are lvalues
// FIXME: ObjC++0x might have different rules
case Expr::ObjCIvarRefExprClass:
+ return Cl::CL_LValue;
// C99 6.5.2.5p5 says that compound literals are lvalues.
- // FIXME: C++ might have a different opinion.
+ // In C++, they're class temporaries.
case Expr::CompoundLiteralExprClass:
- return Cl::CL_LValue;
+ return Ctx.getLangOptions().CPlusPlus? Cl::CL_ClassTemporary
+ : Cl::CL_LValue;
+
+ // Expressions that are prvalues.
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::SizeOfAlignOfExprClass:
+ case Expr::CXXNewExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::ImaginaryLiteralClass:
+ case Expr::GNUNullExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::CXXThrowExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::IntegerLiteralClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::AddrLabelExprClass:
+ case Expr::CXXDeleteExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::BlockExprClass:
+ case Expr::FloatingLiteralClass:
+ case Expr::CXXNoexceptExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::BinaryTypeTraitExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ParenListExprClass:
+ case Expr::InitListExprClass:
+ case Expr::SizeOfPackExprClass:
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ return Cl::CL_PRValue;
// Next come the complicated cases.
@@ -115,11 +187,22 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_LValue;
// GNU extensions, simply look through them.
- case UO_Real:
- case UO_Imag:
case UO_Extension:
return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr());
+ // Treat _Real and _Imag basically as if they were member
+ // expressions: l-value only if the operand is a true l-value.
+ case UO_Real:
+ case UO_Imag: {
+ const Expr *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
+ Cl::Kinds K = ClassifyInternal(Ctx, Op);
+ if (K != Cl::CL_LValue) return K;
+
+ if (isa<ObjCPropertyRefExpr>(Op))
+ return Cl::CL_SubObjCPropertySetting;
+ return Cl::CL_LValue;
+ }
+
// C++ [expr.pre.incr]p1: The result is the updated operand; it is an
// lvalue, [...]
// Not so in C.
@@ -131,19 +214,15 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_PRValue;
}
+ case Expr::OpaqueValueExprClass:
+ return ClassifyExprValueKind(Lang, E,
+ cast<OpaqueValueExpr>(E)->getValueKind());
+
// Implicit casts are lvalues if they're lvalue casts. Other than that, we
// only specifically record class temporaries.
case Expr::ImplicitCastExprClass:
- switch (cast<ImplicitCastExpr>(E)->getValueKind()) {
- case VK_RValue:
- return Lang.CPlusPlus && E->getType()->isRecordType() ?
- Cl::CL_ClassTemporary : Cl::CL_PRValue;
- case VK_LValue:
- return Cl::CL_LValue;
- case VK_XValue:
- return Cl::CL_XValue;
- }
- llvm_unreachable("Invalid value category of implicit cast.");
+ return ClassifyExprValueKind(Lang, E,
+ cast<ImplicitCastExpr>(E)->getValueKind());
// C++ [expr.prim.general]p4: The presence of parentheses does not affect
// whether the expression is an lvalue.
@@ -160,6 +239,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CallExprClass:
case Expr::CXXOperatorCallExprClass:
case Expr::CXXMemberCallExprClass:
+ case Expr::CUDAKernelCallExprClass:
return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType());
// __builtin_choose_expr is equivalent to the chosen expression.
@@ -180,9 +260,9 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CXXBindTemporaryExprClass:
return ClassifyInternal(Ctx, cast<CXXBindTemporaryExpr>(E)->getSubExpr());
- // And the temporary lifetime guard.
- case Expr::CXXExprWithTemporariesClass:
- return ClassifyInternal(Ctx, cast<CXXExprWithTemporaries>(E)->getSubExpr());
+ // And the cleanups guard.
+ case Expr::ExprWithCleanupsClass:
+ return ClassifyInternal(Ctx, cast<ExprWithCleanups>(E)->getSubExpr());
// Casts depend completely on the target type. All casts work the same.
case Expr::CStyleCastExprClass:
@@ -195,10 +275,18 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
- case Expr::ConditionalOperatorClass:
+ case Expr::BinaryConditionalOperatorClass: {
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ const BinaryConditionalOperator *co = cast<BinaryConditionalOperator>(E);
+ return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
+ }
+
+ case Expr::ConditionalOperatorClass: {
// Once again, only C++ is interesting.
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
- return ClassifyConditional(Ctx, cast<ConditionalOperator>(E));
+ const ConditionalOperator *co = cast<ConditionalOperator>(E);
+ return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
+ }
// ObjC message sends are effectively function calls, if the target function
// is known.
@@ -207,17 +295,35 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
cast<ObjCMessageExpr>(E)->getMethodDecl()) {
return ClassifyUnnamed(Ctx, Method->getResultType());
}
-
+ return Cl::CL_PRValue;
+
// Some C++ expressions are always class temporaries.
case Expr::CXXConstructExprClass:
case Expr::CXXTemporaryObjectExprClass:
- case Expr::CXXScalarValueInitExprClass:
return Cl::CL_ClassTemporary;
- // Everything we haven't handled is a prvalue.
- default:
+ case Expr::VAArgExprClass:
+ return ClassifyUnnamed(Ctx, E->getType());
+
+ case Expr::DesignatedInitExprClass:
+ return ClassifyInternal(Ctx, cast<DesignatedInitExpr>(E)->getInit());
+
+ case Expr::StmtExprClass: {
+ const CompoundStmt *S = cast<StmtExpr>(E)->getSubStmt();
+ if (const Expr *LastExpr = dyn_cast_or_null<Expr>(S->body_back()))
+ return ClassifyUnnamed(Ctx, LastExpr->getType());
return Cl::CL_PRValue;
}
+
+ case Expr::CXXUuidofExprClass:
+ return Cl::CL_LValue;
+
+ case Expr::PackExpansionExprClass:
+ return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern());
+ }
+
+ llvm_unreachable("unhandled expression kind in classification");
+ return Cl::CL_LValue;
}
/// ClassifyDecl - Return the classification of an expression referencing the
@@ -239,6 +345,7 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
islvalue = NTTParm->getType()->isReferenceType();
else
islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
+ isa<IndirectFieldDecl>(D) ||
(Ctx.getLangOptions().CPlusPlus &&
(isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)));
@@ -274,9 +381,8 @@ static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
if (E->isArrow())
return Cl::CL_LValue;
// ObjC property accesses are not lvalues, but get special treatment.
- Expr *Base = E->getBase();
- if (isa<ObjCPropertyRefExpr>(Base) ||
- isa<ObjCImplicitSetterGetterRefExpr>(Base))
+ Expr *Base = E->getBase()->IgnoreParens();
+ if (isa<ObjCPropertyRefExpr>(Base))
return Cl::CL_SubObjCPropertySetting;
return ClassifyInternal(Ctx, Base);
}
@@ -301,6 +407,9 @@ static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
// *E1 is an lvalue
if (E->isArrow())
return Cl::CL_LValue;
+ Expr *Base = E->getBase()->IgnoreParenImpCasts();
+ if (isa<ObjCPropertyRefExpr>(Base))
+ return Cl::CL_SubObjCPropertySetting;
return ClassifyInternal(Ctx, E->getBase());
}
@@ -320,8 +429,10 @@ static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
assert(Ctx.getLangOptions().CPlusPlus &&
"This is only relevant for C++.");
// C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand.
+ // Except we override this for writes to ObjC properties.
if (E->isAssignmentOp())
- return Cl::CL_LValue;
+ return (E->getLHS()->getObjectKind() == OK_ObjCProperty
+ ? Cl::CL_PRValue : Cl::CL_LValue);
// C++ [expr.comma]p1: the result is of the same value category as its right
// operand, [...].
@@ -345,13 +456,11 @@ static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
return Cl::CL_PRValue;
}
-static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
- const ConditionalOperator *E) {
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx, const Expr *True,
+ const Expr *False) {
assert(Ctx.getLangOptions().CPlusPlus &&
"This is only relevant for C++.");
- Expr *True = E->getTrueExpr();
- Expr *False = E->getFalseExpr();
// C++ [expr.cond]p2
// If either the second or the third operand has type (cv) void, [...]
// the result [...] is a prvalue.
@@ -375,9 +484,10 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
if (Kind == Cl::CL_PRValue) {
// For the sake of better diagnostics, we want to specifically recognize
// use of the GCC cast-as-lvalue extension.
- if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(E->IgnoreParens())){
- if (CE->getSubExpr()->Classify(Ctx).isLValue()) {
- Loc = CE->getLParenLoc();
+ if (const ExplicitCastExpr *CE =
+ dyn_cast<ExplicitCastExpr>(E->IgnoreParens())) {
+ if (CE->getSubExpr()->IgnoreParenImpCasts()->isLValue()) {
+ Loc = CE->getExprLoc();
return Cl::CM_LValueCast;
}
}
@@ -401,9 +511,8 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
// Assignment to a property in ObjC is an implicit setter access. But a
// setter might not exist.
- if (const ObjCImplicitSetterGetterRefExpr *Expr =
- dyn_cast<ObjCImplicitSetterGetterRefExpr>(E)) {
- if (Expr->getSetterMethod() == 0)
+ if (const ObjCPropertyRefExpr *Expr = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ if (Expr->isImplicitProperty() && Expr->getImplicitPropertySetter() == 0)
return Cl::CM_NoSetterProperty;
}
@@ -420,7 +529,8 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
// Records with any const fields (recursively) are not modifiable.
if (const RecordType *R = CT->getAs<RecordType>()) {
- assert(!Ctx.getLangOptions().CPlusPlus &&
+ assert((E->getObjectKind() == OK_ObjCProperty ||
+ !Ctx.getLangOptions().CPlusPlus) &&
"C++ struct assignment should be resolved by the "
"copy assignment operator.");
if (R->hasConstFields())
@@ -430,7 +540,7 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
return Cl::CM_Modifiable;
}
-Expr::isLvalueResult Expr::isLvalue(ASTContext &Ctx) const {
+Expr::LValueClassification Expr::ClassifyLValue(ASTContext &Ctx) const {
Classification VC = Classify(Ctx);
switch (VC.getKind()) {
case Cl::CL_LValue: return LV_Valid;
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
index 7347f5a..656bb99 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
@@ -43,12 +43,20 @@ using llvm::APFloat;
/// evaluate the expression regardless of what the RHS is, but C only allows
/// certain things in certain situations.
struct EvalInfo {
- ASTContext &Ctx;
+ const ASTContext &Ctx;
/// EvalResult - Contains information about the evaluation.
Expr::EvalResult &EvalResult;
- EvalInfo(ASTContext &ctx, Expr::EvalResult& evalresult)
+ llvm::DenseMap<const OpaqueValueExpr*, APValue> OpaqueValues;
+ const APValue *getOpaqueValue(const OpaqueValueExpr *e) {
+ llvm::DenseMap<const OpaqueValueExpr*, APValue>::iterator
+ i = OpaqueValues.find(e);
+ if (i == OpaqueValues.end()) return 0;
+ return &i->second;
+ }
+
+ EvalInfo(const ASTContext &ctx, Expr::EvalResult& evalresult)
: Ctx(ctx), EvalResult(evalresult) {}
};
@@ -73,12 +81,24 @@ namespace {
APSInt &getComplexIntReal() { return IntReal; }
APSInt &getComplexIntImag() { return IntImag; }
- void moveInto(APValue &v) {
+ void moveInto(APValue &v) const {
if (isComplexFloat())
v = APValue(FloatReal, FloatImag);
else
v = APValue(IntReal, IntImag);
}
+ void setFrom(const APValue &v) {
+ assert(v.isComplexFloat() || v.isComplexInt());
+ if (v.isComplexFloat()) {
+ makeComplexFloat();
+ FloatReal = v.getComplexFloatReal();
+ FloatImag = v.getComplexFloatImag();
+ } else {
+ makeComplexInt();
+ IntReal = v.getComplexIntReal();
+ IntImag = v.getComplexIntImag();
+ }
+ }
};
struct LValue {
@@ -88,12 +108,18 @@ namespace {
Expr *getLValueBase() { return Base; }
CharUnits getLValueOffset() { return Offset; }
- void moveInto(APValue &v) {
+ void moveInto(APValue &v) const {
v = APValue(Base, Offset);
}
+ void setFrom(const APValue &v) {
+ assert(v.isLValue());
+ Base = v.getLValueBase();
+ Offset = v.getLValueOffset();
+ }
};
}
+static bool Evaluate(EvalInfo &info, const Expr *E);
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
@@ -192,7 +218,7 @@ static bool HandleConversionToBool(const Expr* E, bool& Result,
}
static APSInt HandleFloatToIntCast(QualType DestType, QualType SrcType,
- APFloat &Value, ASTContext &Ctx) {
+ APFloat &Value, const ASTContext &Ctx) {
unsigned DestWidth = Ctx.getIntWidth(DestType);
// Determine whether we are converting to unsigned or signed.
bool DestSigned = DestType->isSignedIntegerType();
@@ -206,7 +232,7 @@ static APSInt HandleFloatToIntCast(QualType DestType, QualType SrcType,
}
static APFloat HandleFloatToFloatCast(QualType DestType, QualType SrcType,
- APFloat &Value, ASTContext &Ctx) {
+ APFloat &Value, const ASTContext &Ctx) {
bool ignored;
APFloat Result = Value;
Result.convert(Ctx.getFloatTypeSemantics(DestType),
@@ -215,18 +241,18 @@ static APFloat HandleFloatToFloatCast(QualType DestType, QualType SrcType,
}
static APSInt HandleIntToIntCast(QualType DestType, QualType SrcType,
- APSInt &Value, ASTContext &Ctx) {
+ APSInt &Value, const ASTContext &Ctx) {
unsigned DestWidth = Ctx.getIntWidth(DestType);
APSInt Result = Value;
// Figure out if this is a truncate, extend or noop cast.
// If the input is signed, do a sign extend, noop, or truncate.
- Result.extOrTrunc(DestWidth);
+ Result = Result.extOrTrunc(DestWidth);
Result.setIsUnsigned(DestType->isUnsignedIntegerType());
return Result;
}
static APFloat HandleIntToFloatCast(QualType DestType, QualType SrcType,
- APSInt &Value, ASTContext &Ctx) {
+ APSInt &Value, const ASTContext &Ctx) {
APFloat Result(Ctx.getFloatTypeSemantics(DestType), 1);
Result.convertFromAPInt(Value, Value.isSigned(),
@@ -291,8 +317,34 @@ public:
if (Visit(E->getInit(i))) return true;
return false;
}
+
+ bool VisitSizeOfPackExpr(SizeOfPackExpr *) { return false; }
};
+class OpaqueValueEvaluation {
+ EvalInfo &info;
+ OpaqueValueExpr *opaqueValue;
+
+public:
+ OpaqueValueEvaluation(EvalInfo &info, OpaqueValueExpr *opaqueValue,
+ Expr *value)
+ : info(info), opaqueValue(opaqueValue) {
+
+ // If evaluation fails, fail immediately.
+ if (!Evaluate(info, value)) {
+ this->opaqueValue = 0;
+ return;
+ }
+ info.OpaqueValues[opaqueValue] = info.EvalResult.Val;
+ }
+
+ bool hasError() const { return opaqueValue == 0; }
+
+ ~OpaqueValueEvaluation() {
+ if (opaqueValue) info.OpaqueValues.erase(opaqueValue);
+ }
+};
+
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -402,7 +454,7 @@ bool LValueExprEvaluator::VisitMemberExpr(MemberExpr *E) {
break;
}
- Result.Offset += CharUnits::fromQuantity(RL.getFieldOffset(i) / 8);
+ Result.Offset += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i));
return true;
}
@@ -460,17 +512,20 @@ public:
{ return Success(E); }
bool VisitCallExpr(CallExpr *E);
bool VisitBlockExpr(BlockExpr *E) {
- if (!E->hasBlockDeclRefExprs())
+ if (!E->getBlockDecl()->hasCaptures())
return Success(E);
return false;
}
bool VisitImplicitValueInitExpr(ImplicitValueInitExpr *E)
{ return Success((Expr*)0); }
+ bool VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
bool VisitConditionalOperator(ConditionalOperator *E);
bool VisitChooseExpr(ChooseExpr *E)
{ return Visit(E->getChosenSubExpr(Info.Ctx)); }
bool VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E)
{ return Success((Expr*)0); }
+
+ bool VisitOpaqueValueExpr(OpaqueValueExpr *E);
// FIXME: Missing: @protocol, @selector
};
} // end anonymous namespace
@@ -532,35 +587,6 @@ bool PointerExprEvaluator::VisitCastExpr(CastExpr* E) {
default:
break;
- case CK_Unknown: {
- // FIXME: The handling for CK_Unknown is ugly/shouldn't be necessary!
-
- // Check for pointer->pointer cast
- if (SubExpr->getType()->isPointerType() ||
- SubExpr->getType()->isObjCObjectPointerType() ||
- SubExpr->getType()->isNullPtrType() ||
- SubExpr->getType()->isBlockPointerType())
- return Visit(SubExpr);
-
- if (SubExpr->getType()->isIntegralOrEnumerationType()) {
- APValue Value;
- if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
- break;
-
- if (Value.isInt()) {
- Value.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
- Result.Base = 0;
- Result.Offset = CharUnits::fromQuantity(Value.getInt().getZExtValue());
- return true;
- } else {
- Result.Base = Value.getLValueBase();
- Result.Offset = Value.getLValueOffset();
- return true;
- }
- }
- break;
- }
-
case CK_NoOp:
case CK_BitCast:
case CK_LValueBitCast:
@@ -568,13 +594,54 @@ bool PointerExprEvaluator::VisitCastExpr(CastExpr* E) {
case CK_AnyPointerToBlockPointerCast:
return Visit(SubExpr);
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ LValue BaseLV;
+ if (!EvaluatePointer(E->getSubExpr(), BaseLV, Info))
+ return false;
+
+ // Now figure out the necessary offset to add to the baseLV to get from
+ // the derived class to the base class.
+ CharUnits Offset = CharUnits::Zero();
+
+ QualType Ty = E->getSubExpr()->getType();
+ const CXXRecordDecl *DerivedDecl =
+ Ty->getAs<PointerType>()->getPointeeType()->getAsCXXRecordDecl();
+
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ const CXXBaseSpecifier *Base = *PathI;
+
+ // FIXME: If the base is virtual, we'd need to determine the type of the
+ // most derived class and we don't support that right now.
+ if (Base->isVirtual())
+ return false;
+
+ const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
+
+ Offset += Layout.getBaseClassOffset(BaseDecl);
+ DerivedDecl = BaseDecl;
+ }
+
+ Result.Base = BaseLV.getLValueBase();
+ Result.Offset = BaseLV.getLValueOffset() + Offset;
+ return true;
+ }
+
+ case CK_NullToPointer: {
+ Result.Base = 0;
+ Result.Offset = CharUnits::Zero();
+ return true;
+ }
+
case CK_IntegralToPointer: {
APValue Value;
if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
break;
if (Value.isInt()) {
- Value.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
+ Value.getInt() = Value.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
Result.Base = 0;
Result.Offset = CharUnits::fromQuantity(Value.getInt().getZExtValue());
return true;
@@ -602,6 +669,26 @@ bool PointerExprEvaluator::VisitCallExpr(CallExpr *E) {
return false;
}
+bool PointerExprEvaluator::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
+ const APValue *value = Info.getOpaqueValue(e);
+ if (!value)
+ return (e->getSourceExpr() ? Visit(e->getSourceExpr()) : false);
+ Result.setFrom(*value);
+ return true;
+}
+
+bool PointerExprEvaluator::
+VisitBinaryConditionalOperator(BinaryConditionalOperator *e) {
+ OpaqueValueEvaluation opaque(Info, e->getOpaqueValue(), e->getCommon());
+ if (opaque.hasError()) return false;
+
+ bool cond;
+ if (!HandleConversionToBool(e->getCond(), cond, Info))
+ return false;
+
+ return Visit(cond ? e->getTrueExpr() : e->getFalseExpr());
+}
+
bool PointerExprEvaluator::VisitConditionalOperator(ConditionalOperator *E) {
bool BoolResult;
if (!HandleConversionToBool(E->getCond(), BoolResult, Info))
@@ -718,8 +805,7 @@ APValue VectorExprEvaluator::VisitCastExpr(const CastExpr* E) {
llvm::SmallVector<APValue, 4> Elts;
for (unsigned i = 0; i != NElts; ++i) {
- APSInt Tmp = Init;
- Tmp.extOrTrunc(EltWidth);
+ APSInt Tmp = Init.extOrTrunc(EltWidth);
if (EltTy->isIntegerType())
Elts.push_back(APValue(Tmp));
@@ -898,15 +984,14 @@ public:
bool VisitCharacterLiteral(const CharacterLiteral *E) {
return Success(E->getValue(), E);
}
- bool VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
- // Per gcc docs "this built-in function ignores top level
- // qualifiers". We need to use the canonical version to properly
- // be able to strip CRV qualifiers from the type.
- QualType T0 = Info.Ctx.getCanonicalType(E->getArgType1());
- QualType T1 = Info.Ctx.getCanonicalType(E->getArgType2());
- return Success(Info.Ctx.typesAreCompatible(T0.getUnqualifiedType(),
- T1.getUnqualifiedType()),
- E);
+
+ bool VisitOpaqueValueExpr(OpaqueValueExpr *e) {
+ const APValue *value = Info.getOpaqueValue(e);
+ if (!value) {
+ if (e->getSourceExpr()) return Visit(e->getSourceExpr());
+ return Error(e->getExprLoc(), diag::note_invalid_subexpr_in_ice, e);
+ }
+ return Success(value->getInt(), e);
}
bool CheckReferencedDecl(const Expr *E, const Decl *D);
@@ -927,6 +1012,7 @@ public:
bool VisitOffsetOfExpr(const OffsetOfExpr *E);
bool VisitUnaryOperator(const UnaryOperator *E);
bool VisitConditionalOperator(const ConditionalOperator *E);
+ bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E);
bool VisitCastExpr(CastExpr* E);
bool VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
@@ -948,7 +1034,11 @@ public:
}
bool VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
- return Success(E->EvaluateTrait(Info.Ctx), E);
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
+ return Success(E->getValue(), E);
}
bool VisitChooseExpr(const ChooseExpr *E) {
@@ -958,6 +1048,9 @@ public:
bool VisitUnaryReal(const UnaryOperator *E);
bool VisitUnaryImag(const UnaryOperator *E);
+ bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
+ bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
+
private:
CharUnits GetAlignOfExpr(const Expr *E);
CharUnits GetAlignOfType(QualType T);
@@ -1018,7 +1111,6 @@ bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
}
VD->setEvaluatedValue(APValue());
- return false;
}
}
}
@@ -1160,6 +1252,24 @@ bool IntExprEvaluator::VisitCallExpr(CallExpr *E) {
case Builtin::BI__builtin_expect:
return Visit(E->getArg(0));
+
+ case Builtin::BIstrlen:
+ case Builtin::BI__builtin_strlen:
+ // As an extension, we support strlen() and __builtin_strlen() as constant
+ // expressions when the argument is a string literal.
+ if (StringLiteral *S
+ = dyn_cast<StringLiteral>(E->getArg(0)->IgnoreParenImpCasts())) {
+ // The string literal may have embedded null characters. Find the first
+ // one and truncate there.
+ llvm::StringRef Str = S->getString();
+ llvm::StringRef::size_type Pos = Str.find(0);
+ if (Pos != llvm::StringRef::npos)
+ Str = Str.substr(0, Pos);
+
+ return Success(Str.size(), E);
+ }
+
+ return Error(E->getLocStart(), diag::note_invalid_subexpr_in_ice, E);
}
}
@@ -1403,12 +1513,25 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
return Error(E->getOperatorLoc(), diag::note_expr_divide_by_zero, E);
return Success(Result.getInt() % RHS, E);
case BO_Shl: {
- // FIXME: Warn about out of range shift amounts!
- unsigned SA =
- (unsigned) RHS.getLimitedValue(Result.getInt().getBitWidth()-1);
+ // During constant-folding, a negative shift is an opposite shift.
+ if (RHS.isSigned() && RHS.isNegative()) {
+ RHS = -RHS;
+ goto shift_right;
+ }
+
+ shift_left:
+ unsigned SA
+ = (unsigned) RHS.getLimitedValue(Result.getInt().getBitWidth()-1);
return Success(Result.getInt() << SA, E);
}
case BO_Shr: {
+ // During constant-folding, a negative shift is an opposite shift.
+ if (RHS.isSigned() && RHS.isNegative()) {
+ RHS = -RHS;
+ goto shift_left;
+ }
+
+ shift_right:
unsigned SA =
(unsigned) RHS.getLimitedValue(Result.getInt().getBitWidth()-1);
return Success(Result.getInt() >> SA, E);
@@ -1423,6 +1546,18 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
}
}
+bool IntExprEvaluator::
+VisitBinaryConditionalOperator(const BinaryConditionalOperator *e) {
+ OpaqueValueEvaluation opaque(Info, e->getOpaqueValue(), e->getCommon());
+ if (opaque.hasError()) return false;
+
+ bool cond;
+ if (!HandleConversionToBool(e->getCond(), cond, Info))
+ return false;
+
+ return Visit(cond ? e->getTrueExpr() : e->getFalseExpr());
+}
+
bool IntExprEvaluator::VisitConditionalOperator(const ConditionalOperator *E) {
bool Cond;
if (!HandleConversionToBool(E->getCond(), Cond, Info))
@@ -1439,12 +1574,9 @@ CharUnits IntExprEvaluator::GetAlignOfType(QualType T) {
if (const ReferenceType *Ref = T->getAs<ReferenceType>())
T = Ref->getPointeeType();
- // Get information about the alignment.
- unsigned CharSize = Info.Ctx.Target.getCharWidth();
-
// __alignof is defined to return the preferred alignment.
- return CharUnits::fromQuantity(
- Info.Ctx.getPreferredTypeAlign(T.getTypePtr()) / CharSize);
+ return Info.Ctx.toCharUnitsFromBits(
+ Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
}
CharUnits IntExprEvaluator::GetAlignOfExpr(const Expr *E) {
@@ -1527,17 +1659,9 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *E) {
return false;
RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
- unsigned i = 0;
- // FIXME: It would be nice if we didn't have to loop here!
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end();
- Field != FieldEnd; (void)++Field, ++i) {
- if (*Field == MemberDecl)
- break;
- }
+ unsigned i = MemberDecl->getFieldIndex();
assert(i < RL.getFieldCount() && "offsetof field in wrong type");
- Result += CharUnits::fromQuantity(
- RL.getFieldOffset(i) / Info.Ctx.getCharWidth());
+ Result += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i));
CurrentType = MemberDecl->getType().getNonReferenceType();
break;
}
@@ -1565,9 +1689,7 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *E) {
return false;
// Add the offset to the base.
- Result += CharUnits::fromQuantity(
- RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()))
- / Info.Ctx.getCharWidth());
+ Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
break;
}
}
@@ -1723,6 +1845,14 @@ bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
return Success(0, E);
}
+bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
+ return Success(E->getPackLength(), E);
+}
+
+bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ return Success(E->getValue(), E);
+}
+
//===----------------------------------------------------------------------===//
// Float Evaluation
//===----------------------------------------------------------------------===//
@@ -1749,6 +1879,7 @@ public:
bool VisitCastExpr(CastExpr *E);
bool VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
bool VisitConditionalOperator(ConditionalOperator *E);
+ bool VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
bool VisitChooseExpr(const ChooseExpr *E)
{ return Visit(E->getChosenSubExpr(Info.Ctx)); }
@@ -1757,6 +1888,16 @@ public:
bool VisitUnaryReal(const UnaryOperator *E);
bool VisitUnaryImag(const UnaryOperator *E);
+ bool VisitDeclRefExpr(const DeclRefExpr *E);
+
+ bool VisitOpaqueValueExpr(const OpaqueValueExpr *e) {
+ const APValue *value = Info.getOpaqueValue(e);
+ if (!value)
+ return (e->getSourceExpr() ? Visit(e->getSourceExpr()) : false);
+ Result = value->getFloat();
+ return true;
+ }
+
// FIXME: Missing: array subscript of vector, member of vector,
// ImplicitValueInitExpr
};
@@ -1767,7 +1908,7 @@ static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
return FloatExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
}
-static bool TryEvaluateBuiltinNaN(ASTContext &Context,
+static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
QualType ResultTy,
const Expr *Arg,
bool SNaN,
@@ -1844,6 +1985,45 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
}
}
+bool FloatExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
+ const Decl *D = E->getDecl();
+ if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D)) return false;
+ const VarDecl *VD = cast<VarDecl>(D);
+
+ // Require the qualifiers to be const and not volatile.
+ CanQualType T = Info.Ctx.getCanonicalType(E->getType());
+ if (!T.isConstQualified() || T.isVolatileQualified())
+ return false;
+
+ const Expr *Init = VD->getAnyInitializer();
+ if (!Init) return false;
+
+ if (APValue *V = VD->getEvaluatedValue()) {
+ if (V->isFloat()) {
+ Result = V->getFloat();
+ return true;
+ }
+ return false;
+ }
+
+ if (VD->isEvaluatingValue())
+ return false;
+
+ VD->setEvaluatingValue();
+
+ Expr::EvalResult InitResult;
+ if (Init->Evaluate(InitResult, Info.Ctx) && !InitResult.HasSideEffects &&
+ InitResult.Val.isFloat()) {
+ // Cache the evaluated value in the variable declaration.
+ Result = InitResult.Val.getFloat();
+ VD->setEvaluatedValue(InitResult.Val);
+ return true;
+ }
+
+ VD->setEvaluatedValue(APValue());
+ return false;
+}
+
bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isAnyComplexType()) {
ComplexValue CV;
@@ -1902,6 +2082,10 @@ bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
return true;
}
+ // We can't evaluate pointer-to-member operations.
+ if (E->isPtrMemOp())
+ return false;
+
// FIXME: Diagnostics? I really don't understand how the warnings
// and errors are supposed to work.
APFloat RHS(0.0);
@@ -1950,7 +2134,14 @@ bool FloatExprEvaluator::VisitCastExpr(CastExpr *E) {
Result, Info.Ctx);
return true;
}
- // FIXME: Handle complex types
+
+ if (E->getCastKind() == CK_FloatingComplexToReal) {
+ ComplexValue V;
+ if (!EvaluateComplex(SubExpr, V, Info))
+ return false;
+ Result = V.getComplexFloatReal();
+ return true;
+ }
return false;
}
@@ -1960,6 +2151,18 @@ bool FloatExprEvaluator::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E)
return true;
}
+bool FloatExprEvaluator::
+VisitBinaryConditionalOperator(BinaryConditionalOperator *e) {
+ OpaqueValueEvaluation opaque(Info, e->getOpaqueValue(), e->getCommon());
+ if (opaque.hasError()) return false;
+
+ bool cond;
+ if (!HandleConversionToBool(e->getCond(), cond, Info))
+ return false;
+
+ return Visit(cond ? e->getTrueExpr() : e->getFalseExpr());
+}
+
bool FloatExprEvaluator::VisitConditionalOperator(ConditionalOperator *E) {
bool Cond;
if (!HandleConversionToBool(E->getCond(), Cond, Info))
@@ -1997,12 +2200,21 @@ public:
bool VisitCastExpr(CastExpr *E);
bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitConditionalOperator(const ConditionalOperator *E);
+ bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E);
bool VisitChooseExpr(const ChooseExpr *E)
{ return Visit(E->getChosenSubExpr(Info.Ctx)); }
bool VisitUnaryExtension(const UnaryOperator *E)
{ return Visit(E->getSubExpr()); }
- // FIXME Missing: unary +/-/~, binary div, ImplicitValueInitExpr,
- // conditional ?:, comma
+ bool VisitOpaqueValueExpr(const OpaqueValueExpr *e) {
+ const APValue *value = Info.getOpaqueValue(e);
+ if (!value)
+ return (e->getSourceExpr() ? Visit(e->getSourceExpr()) : false);
+ Result.setFrom(*value);
+ return true;
+ }
+ // FIXME Missing: ImplicitValueInitExpr
};
} // end anonymous namespace
@@ -2038,99 +2250,143 @@ bool ComplexExprEvaluator::VisitImaginaryLiteral(ImaginaryLiteral *E) {
}
bool ComplexExprEvaluator::VisitCastExpr(CastExpr *E) {
- Expr* SubExpr = E->getSubExpr();
- QualType EltType = E->getType()->getAs<ComplexType>()->getElementType();
- QualType SubType = SubExpr->getType();
- if (SubType->isRealFloatingType()) {
+ switch (E->getCastKind()) {
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ return Visit(E->getSubExpr());
+
+ case CK_Dependent:
+ case CK_GetObjCProperty:
+ case CK_UserDefinedConversion:
+ return false;
+
+ case CK_FloatingRealToComplex: {
APFloat &Real = Result.FloatReal;
- if (!EvaluateFloat(SubExpr, Real, Info))
+ if (!EvaluateFloat(E->getSubExpr(), Real, Info))
return false;
- if (EltType->isRealFloatingType()) {
- Result.makeComplexFloat();
- Real = HandleFloatToFloatCast(EltType, SubType, Real, Info.Ctx);
- Result.FloatImag = APFloat(Real.getSemantics());
- return true;
- } else {
- Result.makeComplexInt();
- Result.IntReal = HandleFloatToIntCast(EltType, SubType, Real, Info.Ctx);
- Result.IntImag = APSInt(Result.IntReal.getBitWidth(),
- !Result.IntReal.isSigned());
- return true;
- }
- } else if (SubType->isIntegerType()) {
+ Result.makeComplexFloat();
+ Result.FloatImag = APFloat(Real.getSemantics());
+ return true;
+ }
+
+ case CK_FloatingComplexCast: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+
+ Result.FloatReal
+ = HandleFloatToFloatCast(To, From, Result.FloatReal, Info.Ctx);
+ Result.FloatImag
+ = HandleFloatToFloatCast(To, From, Result.FloatImag, Info.Ctx);
+ return true;
+ }
+
+ case CK_FloatingComplexToIntegralComplex: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+ Result.makeComplexInt();
+ Result.IntReal = HandleFloatToIntCast(To, From, Result.FloatReal, Info.Ctx);
+ Result.IntImag = HandleFloatToIntCast(To, From, Result.FloatImag, Info.Ctx);
+ return true;
+ }
+
+ case CK_IntegralRealToComplex: {
APSInt &Real = Result.IntReal;
- if (!EvaluateInteger(SubExpr, Real, Info))
+ if (!EvaluateInteger(E->getSubExpr(), Real, Info))
return false;
- if (EltType->isRealFloatingType()) {
- Result.makeComplexFloat();
- Result.FloatReal
- = HandleIntToFloatCast(EltType, SubType, Real, Info.Ctx);
- Result.FloatImag = APFloat(Result.FloatReal.getSemantics());
- return true;
- } else {
- Result.makeComplexInt();
- Real = HandleIntToIntCast(EltType, SubType, Real, Info.Ctx);
- Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
- return true;
- }
- } else if (const ComplexType *CT = SubType->getAs<ComplexType>()) {
- if (!Visit(SubExpr))
+ Result.makeComplexInt();
+ Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
+ return true;
+ }
+
+ case CK_IntegralComplexCast: {
+ if (!Visit(E->getSubExpr()))
return false;
- QualType SrcType = CT->getElementType();
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
- if (Result.isComplexFloat()) {
- if (EltType->isRealFloatingType()) {
- Result.makeComplexFloat();
- Result.FloatReal = HandleFloatToFloatCast(EltType, SrcType,
- Result.FloatReal,
- Info.Ctx);
- Result.FloatImag = HandleFloatToFloatCast(EltType, SrcType,
- Result.FloatImag,
- Info.Ctx);
- return true;
- } else {
- Result.makeComplexInt();
- Result.IntReal = HandleFloatToIntCast(EltType, SrcType,
- Result.FloatReal,
- Info.Ctx);
- Result.IntImag = HandleFloatToIntCast(EltType, SrcType,
- Result.FloatImag,
- Info.Ctx);
- return true;
- }
- } else {
- assert(Result.isComplexInt() && "Invalid evaluate result.");
- if (EltType->isRealFloatingType()) {
- Result.makeComplexFloat();
- Result.FloatReal = HandleIntToFloatCast(EltType, SrcType,
- Result.IntReal,
- Info.Ctx);
- Result.FloatImag = HandleIntToFloatCast(EltType, SrcType,
- Result.IntImag,
- Info.Ctx);
- return true;
- } else {
- Result.makeComplexInt();
- Result.IntReal = HandleIntToIntCast(EltType, SrcType,
- Result.IntReal,
- Info.Ctx);
- Result.IntImag = HandleIntToIntCast(EltType, SrcType,
- Result.IntImag,
- Info.Ctx);
- return true;
- }
- }
+ Result.IntReal = HandleIntToIntCast(To, From, Result.IntReal, Info.Ctx);
+ Result.IntImag = HandleIntToIntCast(To, From, Result.IntImag, Info.Ctx);
+ return true;
+ }
+
+ case CK_IntegralComplexToFloatingComplex: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+ Result.makeComplexFloat();
+ Result.FloatReal = HandleIntToFloatCast(To, From, Result.IntReal, Info.Ctx);
+ Result.FloatImag = HandleIntToFloatCast(To, From, Result.IntImag, Info.Ctx);
+ return true;
+ }
}
- // FIXME: Handle more casts.
+ llvm_unreachable("unknown cast resulting in complex value");
return false;
}
bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() == BO_Comma) {
+ if (!Visit(E->getRHS()))
+ return false;
+
+ // If we can't evaluate the LHS, it might have side effects;
+ // conservatively mark it.
+ if (!E->getLHS()->isEvaluatable(Info.Ctx))
+ Info.EvalResult.HasSideEffects = true;
+
+ return true;
+ }
if (!Visit(E->getLHS()))
return false;
@@ -2195,29 +2451,122 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
LHS.getComplexIntImag() * RHS.getComplexIntReal());
}
break;
+ case BO_Div:
+ if (Result.isComplexFloat()) {
+ ComplexValue LHS = Result;
+ APFloat &LHS_r = LHS.getComplexFloatReal();
+ APFloat &LHS_i = LHS.getComplexFloatImag();
+ APFloat &RHS_r = RHS.getComplexFloatReal();
+ APFloat &RHS_i = RHS.getComplexFloatImag();
+ APFloat &Res_r = Result.getComplexFloatReal();
+ APFloat &Res_i = Result.getComplexFloatImag();
+
+ APFloat Den = RHS_r;
+ Den.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ APFloat Tmp = RHS_i;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Den.add(Tmp, APFloat::rmNearestTiesToEven);
+
+ Res_r = LHS_r;
+ Res_r.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Tmp = LHS_i;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Res_r.add(Tmp, APFloat::rmNearestTiesToEven);
+ Res_r.divide(Den, APFloat::rmNearestTiesToEven);
+
+ Res_i = LHS_i;
+ Res_i.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Tmp = LHS_r;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Res_i.subtract(Tmp, APFloat::rmNearestTiesToEven);
+ Res_i.divide(Den, APFloat::rmNearestTiesToEven);
+ } else {
+ if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0) {
+ // FIXME: what about diagnostics?
+ return false;
+ }
+ ComplexValue LHS = Result;
+ APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
+ RHS.getComplexIntImag() * RHS.getComplexIntImag();
+ Result.getComplexIntReal() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() +
+ LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den;
+ Result.getComplexIntImag() =
+ (LHS.getComplexIntImag() * RHS.getComplexIntReal() -
+ LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den;
+ }
+ break;
}
return true;
}
+bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ // Get the operand value into 'Result'.
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ switch (E->getOpcode()) {
+ default:
+ // FIXME: what about diagnostics?
+ return false;
+ case UO_Extension:
+ return true;
+ case UO_Plus:
+ // The result is always just the subexpr.
+ return true;
+ case UO_Minus:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().changeSign();
+ Result.getComplexFloatImag().changeSign();
+ }
+ else {
+ Result.getComplexIntReal() = -Result.getComplexIntReal();
+ Result.getComplexIntImag() = -Result.getComplexIntImag();
+ }
+ return true;
+ case UO_Not:
+ if (Result.isComplexFloat())
+ Result.getComplexFloatImag().changeSign();
+ else
+ Result.getComplexIntImag() = -Result.getComplexIntImag();
+ return true;
+ }
+}
+
+bool ComplexExprEvaluator::
+VisitBinaryConditionalOperator(const BinaryConditionalOperator *e) {
+ OpaqueValueEvaluation opaque(Info, e->getOpaqueValue(), e->getCommon());
+ if (opaque.hasError()) return false;
+
+ bool cond;
+ if (!HandleConversionToBool(e->getCond(), cond, Info))
+ return false;
+
+ return Visit(cond ? e->getTrueExpr() : e->getFalseExpr());
+}
+
+bool ComplexExprEvaluator::VisitConditionalOperator(const ConditionalOperator *E) {
+ bool Cond;
+ if (!HandleConversionToBool(E->getCond(), Cond, Info))
+ return false;
+
+ return Visit(Cond ? E->getTrueExpr() : E->getFalseExpr());
+}
+
//===----------------------------------------------------------------------===//
// Top level Expr::Evaluate method.
//===----------------------------------------------------------------------===//
-/// Evaluate - Return true if this is a constant which we can fold using
-/// any crazy technique (that has nothing to do with language standards) that
-/// we want to. If this function returns true, it returns the folded constant
-/// in Result.
-bool Expr::Evaluate(EvalResult &Result, ASTContext &Ctx) const {
- const Expr *E = this;
- EvalInfo Info(Ctx, Result);
+static bool Evaluate(EvalInfo &Info, const Expr *E) {
if (E->getType()->isVectorType()) {
if (!EvaluateVector(E, Info.EvalResult.Val, Info))
return false;
} else if (E->getType()->isIntegerType()) {
if (!IntExprEvaluator(Info, Info.EvalResult.Val).Visit(const_cast<Expr*>(E)))
return false;
- if (Result.Val.isLValue() && !IsGlobalLValue(Result.Val.getLValueBase()))
+ if (Info.EvalResult.Val.isLValue() &&
+ !IsGlobalLValue(Info.EvalResult.Val.getLValueBase()))
return false;
} else if (E->getType()->hasPointerRepresentation()) {
LValue LV;
@@ -2243,14 +2592,24 @@ bool Expr::Evaluate(EvalResult &Result, ASTContext &Ctx) const {
return true;
}
-bool Expr::EvaluateAsBooleanCondition(bool &Result, ASTContext &Ctx) const {
+/// Evaluate - Return true if this is a constant which we can fold using
+/// any crazy technique (that has nothing to do with language standards) that
+/// we want to. If this function returns true, it returns the folded constant
+/// in Result.
+bool Expr::Evaluate(EvalResult &Result, const ASTContext &Ctx) const {
+ EvalInfo Info(Ctx, Result);
+ return ::Evaluate(Info, this);
+}
+
+bool Expr::EvaluateAsBooleanCondition(bool &Result,
+ const ASTContext &Ctx) const {
EvalResult Scratch;
EvalInfo Info(Ctx, Scratch);
return HandleConversionToBool(this, Result, Info);
}
-bool Expr::EvaluateAsLValue(EvalResult &Result, ASTContext &Ctx) const {
+bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const {
EvalInfo Info(Ctx, Result);
LValue LV;
@@ -2263,7 +2622,8 @@ bool Expr::EvaluateAsLValue(EvalResult &Result, ASTContext &Ctx) const {
return false;
}
-bool Expr::EvaluateAsAnyLValue(EvalResult &Result, ASTContext &Ctx) const {
+bool Expr::EvaluateAsAnyLValue(EvalResult &Result,
+ const ASTContext &Ctx) const {
EvalInfo Info(Ctx, Result);
LValue LV;
@@ -2276,21 +2636,21 @@ bool Expr::EvaluateAsAnyLValue(EvalResult &Result, ASTContext &Ctx) const {
/// isEvaluatable - Call Evaluate to see if this expression can be constant
/// folded, but discard the result.
-bool Expr::isEvaluatable(ASTContext &Ctx) const {
+bool Expr::isEvaluatable(const ASTContext &Ctx) const {
EvalResult Result;
return Evaluate(Result, Ctx) && !Result.HasSideEffects;
}
-bool Expr::HasSideEffects(ASTContext &Ctx) const {
+bool Expr::HasSideEffects(const ASTContext &Ctx) const {
Expr::EvalResult Result;
EvalInfo Info(Ctx, Result);
return HasSideEffect(Info).Visit(const_cast<Expr*>(this));
}
-APSInt Expr::EvaluateAsInt(ASTContext &Ctx) const {
+APSInt Expr::EvaluateAsInt(const ASTContext &Ctx) const {
EvalResult EvalResult;
bool Result = Evaluate(EvalResult, Ctx);
- Result = Result;
+ (void)Result;
assert(Result && "Could not evaluate expression");
assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer");
@@ -2358,6 +2718,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
}
switch (E->getStmtClass()) {
+#define ABSTRACT_STMT(Node)
#define STMT(Node, Base) case Expr::Node##Class:
#define EXPR(Node, Base)
#include "clang/AST/StmtNodes.inc"
@@ -2378,8 +2739,10 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::AddrLabelExprClass:
case Expr::StmtExprClass:
case Expr::CXXMemberCallExprClass:
+ case Expr::CUDAKernelCallExprClass:
case Expr::CXXDynamicCastExprClass:
case Expr::CXXTypeidExprClass:
+ case Expr::CXXUuidofExprClass:
case Expr::CXXNullPtrLiteralExprClass:
case Expr::CXXThisExprClass:
case Expr::CXXThrowExprClass:
@@ -2390,7 +2753,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::DependentScopeDeclRefExprClass:
case Expr::CXXConstructExprClass:
case Expr::CXXBindTemporaryExprClass:
- case Expr::CXXExprWithTemporariesClass:
+ case Expr::ExprWithCleanupsClass:
case Expr::CXXTemporaryObjectExprClass:
case Expr::CXXUnresolvedConstructExprClass:
case Expr::CXXDependentScopeMemberExprClass:
@@ -2402,15 +2765,17 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::ObjCProtocolExprClass:
case Expr::ObjCIvarRefExprClass:
case Expr::ObjCPropertyRefExprClass:
- case Expr::ObjCImplicitSetterGetterRefExprClass:
- case Expr::ObjCSuperExprClass:
case Expr::ObjCIsaExprClass:
case Expr::ShuffleVectorExprClass:
case Expr::BlockExprClass:
case Expr::BlockDeclRefExprClass:
case Expr::NoStmtClass:
+ case Expr::OpaqueValueExprClass:
+ case Expr::PackExpansionExprClass:
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
return ICEDiag(2, E->getLocStart());
+ case Expr::SizeOfPackExprClass:
case Expr::GNUNullExprClass:
// GCC considers the GNU __null value to be an integral constant expression.
return NoDiag();
@@ -2421,8 +2786,9 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::CharacterLiteralClass:
case Expr::CXXBoolLiteralExprClass:
case Expr::CXXScalarValueInitExprClass:
- case Expr::TypesCompatibleExprClass:
case Expr::UnaryTypeTraitExprClass:
+ case Expr::BinaryTypeTraitExprClass:
+ case Expr::CXXNoexceptExprClass:
return NoDiag();
case Expr::CallExprClass:
case Expr::CXXOperatorCallExprClass: {
@@ -2619,6 +2985,17 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
return NoDiag();
return ICEDiag(2, E->getLocStart());
}
+ case Expr::BinaryConditionalOperatorClass: {
+ const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(E);
+ ICEDiag CommonResult = CheckICE(Exp->getCommon(), Ctx);
+ if (CommonResult.Val == 2) return CommonResult;
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+ if (FalseResult.Val == 2) return FalseResult;
+ if (CommonResult.Val == 1) return CommonResult;
+ if (FalseResult.Val == 1 &&
+ Exp->getCommon()->EvaluateAsInt(Ctx) == 0) return NoDiag();
+ return FalseResult;
+ }
case Expr::ConditionalOperatorClass: {
const ConditionalOperator *Exp = cast<ConditionalOperator>(E);
// If the condition (ignoring parens) is a __builtin_constant_p call,
diff --git a/contrib/llvm/tools/clang/lib/AST/FullExpr.cpp b/contrib/llvm/tools/clang/lib/AST/FullExpr.cpp
deleted file mode 100644
index 93ee8d1..0000000
--- a/contrib/llvm/tools/clang/lib/AST/FullExpr.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-//===--- FullExpr.cpp - C++ full expression class ---------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the FullExpr interface, to be used for type safe handling
-// of full expressions.
-//
-// Full expressions are described in C++ [intro.execution]p12.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/AST/ASTContext.h"
-#include "clang/AST/FullExpr.h"
-#include "clang/AST/Expr.h"
-#include "clang/AST/ExprCXX.h"
-#include "llvm/Support/AlignOf.h"
-using namespace clang;
-
-FullExpr FullExpr::Create(ASTContext &Context, Expr *SubExpr,
- CXXTemporary **Temporaries, unsigned NumTemporaries) {
- FullExpr E;
-
- if (!NumTemporaries) {
- E.SubExpr = SubExpr;
- return E;
- }
-
- unsigned Size = sizeof(FullExpr)
- + sizeof(CXXTemporary *) * NumTemporaries;
-
- unsigned Align = llvm::AlignOf<ExprAndTemporaries>::Alignment;
- ExprAndTemporaries *ET =
- static_cast<ExprAndTemporaries *>(Context.Allocate(Size, Align));
-
- ET->SubExpr = SubExpr;
- std::copy(Temporaries, Temporaries + NumTemporaries, ET->temps_begin());
-
- return E;
-}
-
diff --git a/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp
index c47a9da..533a232 100644
--- a/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/TypeOrdering.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
@@ -135,34 +136,28 @@ InheritanceHierarchyWriter::WriteNodeReference(QualType Type,
/// class using GraphViz.
void CXXRecordDecl::viewInheritance(ASTContext& Context) const {
QualType Self = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this));
- std::string ErrMsg;
- sys::Path Filename = sys::Path::GetTemporaryDirectory(&ErrMsg);
- if (Filename.isEmpty()) {
- llvm::errs() << "Error: " << ErrMsg << "\n";
- return;
- }
- Filename.appendComponent(Self.getAsString() + ".dot");
- if (Filename.makeUnique(true,&ErrMsg)) {
- llvm::errs() << "Error: " << ErrMsg << "\n";
+ // Create temp directory
+ SmallString<128> Filename;
+ int FileFD = 0;
+ if (error_code ec = sys::fs::unique_file(
+ "clang-class-inheritance-hierarchy-%%-%%-%%-%%-" +
+ Self.getAsString() + ".dot",
+ FileFD, Filename)) {
+ errs() << "Error creating temporary output file: " << ec.message() << '\n';
return;
}
- llvm::errs() << "Writing '" << Filename.c_str() << "'... ";
+ llvm::errs() << "Writing '" << Filename << "'... ";
- llvm::raw_fd_ostream O(Filename.c_str(), ErrMsg);
+ llvm::raw_fd_ostream O(FileFD, true);
+ InheritanceHierarchyWriter Writer(Context, O);
+ Writer.WriteGraph(Self);
- if (ErrMsg.empty()) {
- InheritanceHierarchyWriter Writer(Context, O);
- Writer.WriteGraph(Self);
- llvm::errs() << " done. \n";
+ llvm::errs() << " done. \n";
+ O.close();
- O.close();
-
- // Display the graph
- DisplayGraph(Filename);
- } else {
- llvm::errs() << "error opening file for writing!\n";
- }
+ // Display the graph
+ DisplayGraph(sys::Path(Filename));
}
}
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
index c3fa466..bed02b4 100644
--- a/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
@@ -19,7 +19,10 @@
#include "CXXABI.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
using namespace clang;
@@ -35,6 +38,24 @@ public:
if (Pointee->isFunctionType()) return 2;
return 1;
}
+
+ CallingConv getDefaultMethodCallConv() const {
+ return CC_C;
+ }
+
+ // We cheat and just check that the class has a vtable pointer, and that it's
+ // only big enough to have a vtable pointer and nothing more (or less).
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+
+ // Check that the class has a vtable pointer.
+ if (!RD->isDynamicClass())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ CharUnits PointerSize =
+ Context.toCharUnitsFromBits(Context.Target.getPointerWidth(0));
+ return Layout.getNonVirtualSize() == PointerSize;
+ }
};
class ARMCXXABI : public ItaniumCXXABI {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
index e198874..d66c374 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
@@ -1,4 +1,4 @@
-//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//===--- ItaniumMangle.cpp - Itanium C++ Name Mangling ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,18 +14,19 @@
// http://www.codesourcery.com/public/cxx-abi/abi.html
//
//===----------------------------------------------------------------------===//
-#include "Mangle.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/ABI.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
-#include "CGVTables.h"
#define MANGLE_CHECKER 0
@@ -34,79 +35,17 @@
#endif
using namespace clang;
-using namespace CodeGen;
-
-MiscNameMangler::MiscNameMangler(MangleContext &C,
- llvm::SmallVectorImpl<char> &Res)
- : Context(C), Out(Res) { }
-
-void MiscNameMangler::mangleBlock(GlobalDecl GD, const BlockDecl *BD) {
- // Mangle the context of the block.
- // FIXME: We currently mimic GCC's mangling scheme, which leaves much to be
- // desired. Come up with a better mangling scheme.
- const DeclContext *DC = BD->getDeclContext();
- while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
- DC = DC->getParent();
- if (DC->isFunctionOrMethod()) {
- Out << "__";
- if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
- mangleObjCMethodName(Method);
- else {
- const NamedDecl *ND = cast<NamedDecl>(DC);
- if (IdentifierInfo *II = ND->getIdentifier())
- Out << II->getName();
- else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND)) {
- llvm::SmallString<64> Buffer;
- Context.mangleCXXDtor(D, GD.getDtorType(), Buffer);
- Out << Buffer;
- }
- else if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND)) {
- llvm::SmallString<64> Buffer;
- Context.mangleCXXCtor(D, GD.getCtorType(), Buffer);
- Out << Buffer;
- }
- else {
- // FIXME: We were doing a mangleUnqualifiedName() before, but that's
- // a private member of a class that will soon itself be private to the
- // Itanium C++ ABI object. What should we do now? Right now, I'm just
- // calling the mangleName() method on the MangleContext; is there a
- // better way?
- llvm::SmallString<64> Buffer;
- Context.mangleName(ND, Buffer);
- Out << Buffer;
- }
- }
- Out << "_block_invoke_" << Context.getBlockId(BD, true);
- } else {
- Out << "__block_global_" << Context.getBlockId(BD, false);
- }
-}
-
-void MiscNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
- llvm::SmallString<64> Name;
- llvm::raw_svector_ostream OS(Name);
-
- const ObjCContainerDecl *CD =
- dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
- assert (CD && "Missing container decl in GetNameForMethod");
- OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
- if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
- OS << '(' << CID << ')';
- OS << ' ' << MD->getSelector().getAsString() << ']';
-
- Out << OS.str().size() << OS.str();
-}
namespace {
-static const DeclContext *GetLocalClassFunctionDeclContext(
- const DeclContext *DC) {
- if (isa<CXXRecordDecl>(DC)) {
- while (!DC->isNamespace() && !DC->isTranslationUnit() &&
- !isa<FunctionDecl>(DC))
- DC = DC->getParent();
- if (isa<FunctionDecl>(DC))
- return DC;
+static const CXXRecordDecl *GetLocalClassDecl(const NamedDecl *ND) {
+ const DeclContext *DC = dyn_cast<DeclContext>(ND);
+ if (!DC)
+ DC = ND->getDeclContext();
+ while (!DC->isNamespace() && !DC->isTranslationUnit()) {
+ if (isa<FunctionDecl>(DC->getParent()))
+ return dyn_cast<CXXRecordDecl>(DC);
+ DC = DC->getParent();
}
return 0;
}
@@ -127,10 +66,77 @@ static const CXXMethodDecl *getStructor(const CXXMethodDecl *MD) {
static const unsigned UnknownArity = ~0U;
+class ItaniumMangleContext : public MangleContext {
+ llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
+ unsigned Discriminator;
+ llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+
+public:
+ explicit ItaniumMangleContext(ASTContext &Context,
+ Diagnostic &Diags)
+ : MangleContext(Context, Diags) { }
+
+ uint64_t getAnonymousStructId(const TagDecl *TD) {
+ std::pair<llvm::DenseMap<const TagDecl *,
+ uint64_t>::iterator, bool> Result =
+ AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
+ return Result.first->second;
+ }
+
+ void startNewFunction() {
+ MangleContext::startNewFunction();
+ mangleInitDiscriminator();
+ }
+
+ /// @name Mangler Entry Points
+ /// @{
+
+ bool shouldMangleDeclName(const NamedDecl *D);
+ void mangleName(const NamedDecl *D, llvm::raw_ostream &);
+ void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::raw_ostream &);
+ void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::raw_ostream &);
+ void mangleReferenceTemporary(const VarDecl *D,
+ llvm::raw_ostream &);
+ void mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::raw_ostream &);
+ void mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::raw_ostream &);
+ void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::raw_ostream &);
+ void mangleCXXRTTI(QualType T, llvm::raw_ostream &);
+ void mangleCXXRTTIName(QualType T, llvm::raw_ostream &);
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::raw_ostream &);
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::raw_ostream &);
+
+ void mangleItaniumGuardVariable(const VarDecl *D, llvm::raw_ostream &);
+
+ void mangleInitDiscriminator() {
+ Discriminator = 0;
+ }
+
+ bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+ unsigned &discriminator = Uniquifier[ND];
+ if (!discriminator)
+ discriminator = ++Discriminator;
+ if (discriminator == 1)
+ return false;
+ disc = discriminator-2;
+ return true;
+ }
+ /// @}
+};
+
/// CXXNameMangler - Manage the mangling of a single name.
class CXXNameMangler {
- MangleContext &Context;
- llvm::raw_svector_ostream Out;
+ ItaniumMangleContext &Context;
+ llvm::raw_ostream &Out;
const CXXMethodDecl *Structor;
unsigned StructorType;
@@ -143,15 +149,15 @@ class CXXNameMangler {
ASTContext &getASTContext() const { return Context.getASTContext(); }
public:
- CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
- : Context(C), Out(Res), Structor(0), StructorType(0), SeqID(0) { }
- CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
+ CXXNameMangler(ItaniumMangleContext &C, llvm::raw_ostream &Out_)
+ : Context(C), Out(Out_), Structor(0), StructorType(0), SeqID(0) { }
+ CXXNameMangler(ItaniumMangleContext &C, llvm::raw_ostream &Out_,
const CXXConstructorDecl *D, CXXCtorType Type)
- : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
SeqID(0) { }
- CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
+ CXXNameMangler(ItaniumMangleContext &C, llvm::raw_ostream &Out_,
const CXXDestructorDecl *D, CXXDtorType Type)
- : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
SeqID(0) { }
#if MANGLE_CHECKER
@@ -165,7 +171,7 @@ public:
free(result);
}
#endif
- llvm::raw_svector_ostream &getStream() { return Out; }
+ llvm::raw_ostream &getStream() { return Out; }
void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z");
void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
@@ -222,6 +228,7 @@ private:
void mangleTemplatePrefix(TemplateName Template);
void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
void mangleQualifiers(Qualifiers Quals);
+ void mangleRefQualifier(RefQualifierKind RefQualifier);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
@@ -235,6 +242,7 @@ private:
void mangleType(TemplateName);
void mangleBareFunctionType(const FunctionType *T,
bool MangleReturnType);
+ void mangleNeonVectorType(const VectorType *T);
void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
void mangleMemberExpr(const Expr *Base, bool IsArrow,
@@ -258,6 +266,7 @@ private:
void mangleTemplateParameter(unsigned Index);
};
+
}
static bool isInCLinkageSpecification(const Decl *D) {
@@ -271,7 +280,7 @@ static bool isInCLinkageSpecification(const Decl *D) {
return false;
}
-bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
+bool ItaniumMangleContext::shouldMangleDeclName(const NamedDecl *D) {
// In C, functions with no attributes never need to be mangled. Fastpath them.
if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
return false;
@@ -320,7 +329,17 @@ void CXXNameMangler::mangle(const NamedDecl *D, llvm::StringRef Prefix) {
// over all other naming in the .o file.
if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
// If we have an asm name, then we use it as the mangling.
- Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ // Adding the prefix can cause problems when one file has a "foo" and
+ // another has a "\01foo". That is known to happen on ELF with the
+ // tricks normally used for producing aliases (PR9177). Fortunately the
+ // llvm mangler on ELF is a nop, so we can just avoid adding the \01
+ // marker.
+ llvm::StringRef UserLabelPrefix =
+ getASTContext().Target.getUserLabelPrefix();
+ if (!UserLabelPrefix.empty())
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
Out << ALA->getLabel();
return;
}
@@ -373,7 +392,8 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
// Do the canonicalization out here because parameter types can
// undergo additional canonicalization (e.g. array decay).
- FunctionType *FT = cast<FunctionType>(Context.getASTContext()
+ const FunctionType *FT
+ = cast<FunctionType>(Context.getASTContext()
.getCanonicalType(FD->getType()));
mangleBareFunctionType(FT, MangleReturnType);
@@ -433,16 +453,15 @@ void CXXNameMangler::mangleName(const NamedDecl *ND) {
//
const DeclContext *DC = ND->getDeclContext();
- if (GetLocalClassFunctionDeclContext(DC)) {
- mangleLocalName(ND);
- return;
- }
-
// If this is an extern variable declared locally, the relevant DeclContext
// is that of the containing namespace, or the translation unit.
if (isa<FunctionDecl>(DC) && ND->hasLinkage())
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = DC->getParent();
+ else if (GetLocalClassDecl(ND)) {
+ mangleLocalName(ND);
+ return;
+ }
while (isa<LinkageSpecDecl>(DC))
DC = DC->getParent();
@@ -525,7 +544,7 @@ void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
Diagnostic &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
"cannot mangle dependent operator name");
- Diags.Report(FullSourceLoc(), DiagID);
+ Diags.Report(DiagID);
return;
}
@@ -809,13 +828,17 @@ void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
const DeclContext *DC,
bool NoFunction) {
- // <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
- // ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+ // <nested-name>
+ // ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix> <unqualified-name> E
+ // ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
+ // <template-args> E
Out << 'N';
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND))
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) {
mangleQualifiers(Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
-
+ mangleRefQualifier(Method->getRefQualifier());
+ }
+
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = 0;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
@@ -853,15 +876,18 @@ void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC)) {
mangleObjCMethodName(MD);
- }
- else if (const DeclContext *CDC = GetLocalClassFunctionDeclContext(DC)) {
- mangleFunctionEncoding(cast<FunctionDecl>(CDC));
+ } else if (const CXXRecordDecl *RD = GetLocalClassDecl(ND)) {
+ mangleFunctionEncoding(cast<FunctionDecl>(RD->getDeclContext()));
Out << 'E';
- mangleNestedName(ND, DC, true /*NoFunction*/);
- // FIXME. This still does not cover all cases.
+ // Mangle the name relative to the closest enclosing function.
+ if (ND == RD) // equality ok because RD derived from ND above
+ mangleUnqualifiedName(ND);
+ else
+ mangleNestedName(ND, DC, true /*NoFunction*/);
+
unsigned disc;
- if (Context.getNextDiscriminator(ND, disc)) {
+ if (Context.getNextDiscriminator(RD, disc)) {
if (disc < 10)
Out << '_' << disc;
else
@@ -893,7 +919,9 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
manglePrefix(DC->getParent(), NoFunction);
llvm::SmallString<64> Name;
- Context.mangleBlock(GlobalDecl(), Block, Name);
+ llvm::raw_svector_ostream NameStream(Name);
+ Context.mangleBlock(Block, NameStream);
+ NameStream.flush();
Out << Name.size() << Name;
return;
}
@@ -1007,6 +1035,12 @@ void CXXNameMangler::mangleType(TemplateName TN) {
break;
}
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *SubstPack
+ = TN.getAsSubstTemplateTemplateParmPack();
+ mangleTemplateParameter(SubstPack->getParameterPack()->getIndex());
+ break;
+ }
}
addSubstitution(TN);
@@ -1145,27 +1179,57 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
// FIXME: For now, just drop all extension qualifiers on the floor.
}
+void CXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
+ // <ref-qualifier> ::= R # lvalue reference
+ // ::= O # rvalue-reference
+ // Proposal to Itanium C++ ABI list on 1/26/11
+ switch (RefQualifier) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ Out << 'R';
+ break;
+
+ case RQ_RValue:
+ Out << 'O';
+ break;
+ }
+}
+
void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
- llvm::SmallString<64> Buffer;
- MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
- Out << Buffer;
+ Context.mangleObjCMethodName(MD, Out);
}
-void CXXNameMangler::mangleType(QualType T) {
+void CXXNameMangler::mangleType(QualType nonCanon) {
// Only operate on the canonical type!
- T = Context.getASTContext().getCanonicalType(T);
+ QualType canon = nonCanon.getCanonicalType();
+
+ SplitQualType split = canon.split();
+ Qualifiers quals = split.second;
+ const Type *ty = split.first;
- bool IsSubstitutable = T.hasLocalQualifiers() || !isa<BuiltinType>(T);
- if (IsSubstitutable && mangleSubstitution(T))
+ bool isSubstitutable = quals || !isa<BuiltinType>(ty);
+ if (isSubstitutable && mangleSubstitution(canon))
return;
- if (Qualifiers Quals = T.getLocalQualifiers()) {
- mangleQualifiers(Quals);
+ // If we're mangling a qualified array type, push the qualifiers to
+ // the element type.
+ if (quals && isa<ArrayType>(ty)) {
+ ty = Context.getASTContext().getAsArrayType(canon);
+ quals = Qualifiers();
+
+ // Note that we don't update canon: we want to add the
+ // substitution at the canonical type.
+ }
+
+ if (quals) {
+ mangleQualifiers(quals);
// Recurse: even if the qualified type isn't yet substitutable,
// the unqualified type might be.
- mangleType(T.getLocalUnqualifiedType());
+ mangleType(QualType(ty, 0));
} else {
- switch (T->getTypeClass()) {
+ switch (ty->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT) \
case Type::CLASS: \
@@ -1173,15 +1237,15 @@ void CXXNameMangler::mangleType(QualType T) {
return;
#define TYPE(CLASS, PARENT) \
case Type::CLASS: \
- mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+ mangleType(static_cast<const CLASS##Type*>(ty)); \
break;
#include "clang/AST/TypeNodes.def"
}
}
// Add the substitution.
- if (IsSubstitutable)
- addSubstitution(T);
+ if (isSubstitutable)
+ addSubstitution(canon);
}
void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
@@ -1217,9 +1281,8 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
// UNSUPPORTED: ::= Dh # IEEE 754r half-precision floating point (16 bits)
// ::= Di # char32_t
// ::= Ds # char16_t
+ // ::= Dn # std::nullptr_t (i.e., decltype(nullptr))
// ::= u <source-name> # vendor extended type
- // From our point of view, std::nullptr_t is a builtin, but as far as mangling
- // is concerned, it's a type called std::nullptr_t.
switch (T->getKind()) {
case BuiltinType::Void: Out << 'v'; break;
case BuiltinType::Bool: Out << 'b'; break;
@@ -1231,7 +1294,8 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::ULongLong: Out << 'y'; break;
case BuiltinType::UInt128: Out << 'o'; break;
case BuiltinType::SChar: Out << 'a'; break;
- case BuiltinType::WChar: Out << 'w'; break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: Out << 'w'; break;
case BuiltinType::Char16: Out << "Ds"; break;
case BuiltinType::Char32: Out << "Di"; break;
case BuiltinType::Short: Out << 's'; break;
@@ -1242,16 +1306,13 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::Float: Out << 'f'; break;
case BuiltinType::Double: Out << 'd'; break;
case BuiltinType::LongDouble: Out << 'e'; break;
- case BuiltinType::NullPtr: Out << "St9nullptr_t"; break;
+ case BuiltinType::NullPtr: Out << "Dn"; break;
case BuiltinType::Overload:
case BuiltinType::Dependent:
assert(false &&
"Overloaded and dependent types shouldn't get to name mangling");
break;
- case BuiltinType::UndeducedAuto:
- assert(0 && "Should not see undeduced auto here");
- break;
case BuiltinType::ObjCId: Out << "11objc_object"; break;
case BuiltinType::ObjCClass: Out << "10objc_class"; break;
case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
@@ -1322,7 +1383,9 @@ void CXXNameMangler::mangleType(const ConstantArrayType *T) {
}
void CXXNameMangler::mangleType(const VariableArrayType *T) {
Out << 'A';
- mangleExpression(T->getSizeExpr());
+ // decayed vla types (size 0) will just be skipped.
+ if (T->getSizeExpr())
+ mangleExpression(T->getSizeExpr());
Out << '_';
mangleType(T->getElementType());
}
@@ -1333,7 +1396,7 @@ void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
- Out << 'A' << '_';
+ Out << "A_";
mangleType(T->getElementType());
}
@@ -1345,6 +1408,7 @@ void CXXNameMangler::mangleType(const MemberPointerType *T) {
QualType PointeeType = T->getPointeeType();
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
+ mangleRefQualifier(FPT->getRefQualifier());
mangleType(FPT);
// Itanium C++ ABI 5.1.8:
@@ -1371,6 +1435,11 @@ void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
mangleTemplateParameter(T->getIndex());
}
+// <type> ::= <template-param>
+void CXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T) {
+ mangleTemplateParameter(T->getReplacedParameter()->getIndex());
+}
+
// <type> ::= P <type> # pointer-to
void CXXNameMangler::mangleType(const PointerType *T) {
Out << 'P';
@@ -1399,6 +1468,46 @@ void CXXNameMangler::mangleType(const ComplexType *T) {
mangleType(T->getElementType());
}
+// ARM's ABI for Neon vector types specifies that they should be mangled as
+// if they are structs (to match ARM's initial implementation). The
+// vector type must be one of the special types predefined by ARM.
+void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
+ QualType EltType = T->getElementType();
+ assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType");
+ const char *EltName = 0;
+ if (T->getVectorKind() == VectorType::NeonPolyVector) {
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar: EltName = "poly8_t"; break;
+ case BuiltinType::Short: EltName = "poly16_t"; break;
+ default: llvm_unreachable("unexpected Neon polynomial vector element type");
+ }
+ } else {
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar: EltName = "int8_t"; break;
+ case BuiltinType::UChar: EltName = "uint8_t"; break;
+ case BuiltinType::Short: EltName = "int16_t"; break;
+ case BuiltinType::UShort: EltName = "uint16_t"; break;
+ case BuiltinType::Int: EltName = "int32_t"; break;
+ case BuiltinType::UInt: EltName = "uint32_t"; break;
+ case BuiltinType::LongLong: EltName = "int64_t"; break;
+ case BuiltinType::ULongLong: EltName = "uint64_t"; break;
+ case BuiltinType::Float: EltName = "float32_t"; break;
+ default: llvm_unreachable("unexpected Neon vector element type");
+ }
+ }
+ const char *BaseName = 0;
+ unsigned BitSize = (T->getNumElements() *
+ getASTContext().getTypeSize(EltType));
+ if (BitSize == 64)
+ BaseName = "__simd64_";
+ else {
+ assert(BitSize == 128 && "Neon vector type not 64 or 128 bits");
+ BaseName = "__simd128_";
+ }
+ Out << strlen(BaseName) + strlen(EltName);
+ Out << BaseName << EltName;
+}
+
// GNU extension: vector types
// <type> ::= <vector-type>
// <vector-type> ::= Dv <positive dimension number> _
@@ -1407,10 +1516,15 @@ void CXXNameMangler::mangleType(const ComplexType *T) {
// <extended element type> ::= <element type>
// ::= p # AltiVec vector pixel
void CXXNameMangler::mangleType(const VectorType *T) {
+ if ((T->getVectorKind() == VectorType::NeonVector ||
+ T->getVectorKind() == VectorType::NeonPolyVector)) {
+ mangleNeonVectorType(T);
+ return;
+ }
Out << "Dv" << T->getNumElements() << '_';
- if (T->getAltiVecSpecific() == VectorType::Pixel)
+ if (T->getVectorKind() == VectorType::AltiVecPixel)
Out << 'p';
- else if (T->getAltiVecSpecific() == VectorType::Bool)
+ else if (T->getVectorKind() == VectorType::AltiVecBool)
Out << 'b';
else
mangleType(T->getElementType());
@@ -1425,6 +1539,12 @@ void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
mangleType(T->getElementType());
}
+void CXXNameMangler::mangleType(const PackExpansionType *T) {
+ // <type> ::= Dp <type> # pack expansion (C++0x)
+ Out << "Dp";
+ mangleType(T->getPattern());
+}
+
void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
mangleSourceName(T->getDecl()->getIdentifier());
}
@@ -1525,6 +1645,12 @@ void CXXNameMangler::mangleType(const DecltypeType *T) {
Out << 'E';
}
+void CXXNameMangler::mangleType(const AutoType *T) {
+ QualType D = T->getDeducedType();
+ assert(!D.isNull() && "can't mangle undeduced auto type");
+ mangleType(D);
+}
+
void CXXNameMangler::mangleIntegerLiteral(QualType T,
const llvm::APSInt &Value) {
// <expr-primary> ::= L <type> <value number> E # integer literal
@@ -1570,12 +1696,14 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
// ::= sr <type> <unqualified-name> # dependent name
// ::= sr <type> <unqualified-name> <template-args> # dependent template-id
// ::= sZ <template-param> # size of a parameter pack
+ // ::= sZ <function-param> # size of a function parameter pack
// ::= <expr-primary>
// <expr-primary> ::= L <type> <value number> E # integer literal
// ::= L <type <value float> E # floating literal
// ::= L <mangled-name> E # external name
switch (E->getStmtClass()) {
case Expr::NoStmtClass:
+#define ABSTRACT_STMT(Type)
#define EXPR(Type, Base)
#define STMT(Type, Base) \
case Expr::Type##Class:
@@ -1602,7 +1730,6 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
case Expr::CompoundLiteralExprClass:
case Expr::ExtVectorElementExprClass:
case Expr::ObjCEncodeExprClass:
- case Expr::ObjCImplicitSetterGetterRefExprClass:
case Expr::ObjCIsaExprClass:
case Expr::ObjCIvarRefExprClass:
case Expr::ObjCMessageExprClass:
@@ -1610,25 +1737,40 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
case Expr::ObjCProtocolExprClass:
case Expr::ObjCSelectorExprClass:
case Expr::ObjCStringLiteralClass:
- case Expr::ObjCSuperExprClass:
case Expr::OffsetOfExprClass:
case Expr::PredefinedExprClass:
case Expr::ShuffleVectorExprClass:
case Expr::StmtExprClass:
- case Expr::TypesCompatibleExprClass:
case Expr::UnaryTypeTraitExprClass:
- case Expr::VAArgExprClass: {
+ case Expr::BinaryTypeTraitExprClass:
+ case Expr::VAArgExprClass:
+ case Expr::CXXUuidofExprClass:
+ case Expr::CXXNoexceptExprClass:
+ case Expr::CUDAKernelCallExprClass: {
// As bad as this diagnostic is, it's better than crashing.
Diagnostic &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
"cannot yet mangle expression type %0");
- Diags.Report(FullSourceLoc(E->getExprLoc(),
- getASTContext().getSourceManager()),
- DiagID)
+ Diags.Report(E->getExprLoc(), DiagID)
<< E->getStmtClassName() << E->getSourceRange();
break;
}
+ // Even gcc-4.5 doesn't mangle this.
+ case Expr::BinaryConditionalOperatorClass: {
+ Diagnostic &Diags = Context.getDiags();
+ unsigned DiagID =
+ Diags.getCustomDiagID(Diagnostic::Error,
+ "?: operator with omitted middle operand cannot be mangled");
+ Diags.Report(E->getExprLoc(), DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
+ // These are used for internal purposes and cannot be meaningfully mangled.
+ case Expr::OpaqueValueExprClass:
+ llvm_unreachable("cannot mangle opaque value; mangling wrong thing?");
+
case Expr::CXXDefaultArgExprClass:
mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
break;
@@ -1878,6 +2020,11 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
break;
}
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ mangleTemplateParameter(
+ cast<SubstNonTypeTemplateParmPackExpr>(E)->getParameterPack()->getIndex());
+ break;
+
case Expr::DependentScopeDeclRefExprClass: {
const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
NestedNameSpecifier *NNS = DRE->getQualifier();
@@ -1910,8 +2057,8 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
break;
- case Expr::CXXExprWithTemporariesClass:
- mangleExpression(cast<CXXExprWithTemporaries>(E)->getSubExpr(), Arity);
+ case Expr::ExprWithCleanupsClass:
+ mangleExpression(cast<ExprWithCleanups>(E)->getSubExpr(), Arity);
break;
case Expr::FloatingLiteralClass: {
@@ -1947,7 +2094,7 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
case Expr::ImaginaryLiteralClass: {
const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
// Mangle as if a complex literal.
- // Proposal from David Vandervoorde, 2010.06.30.
+ // Proposal from David Vandevoorde, 2010.06.30.
Out << 'L';
mangleType(E->getType());
if (const FloatingLiteral *Imag =
@@ -1957,7 +2104,7 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
Out << '_';
mangleFloat(Imag->getValue());
} else {
- Out << '0' << '_';
+ Out << "0_";
llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
if (IE->getSubExpr()->getType()->isSignedIntegerType())
Value.setIsSigned(true);
@@ -1986,7 +2133,33 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
Out << "LDnE";
break;
}
-
+
+ case Expr::PackExpansionExprClass:
+ Out << "sp";
+ mangleExpression(cast<PackExpansionExpr>(E)->getPattern());
+ break;
+
+ case Expr::SizeOfPackExprClass: {
+ Out << "sZ";
+ const NamedDecl *Pack = cast<SizeOfPackExpr>(E)->getPack();
+ if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Pack))
+ mangleTemplateParameter(TTP->getIndex());
+ else if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Pack))
+ mangleTemplateParameter(NTTP->getIndex());
+ else if (const TemplateTemplateParmDecl *TempTP
+ = dyn_cast<TemplateTemplateParmDecl>(Pack))
+ mangleTemplateParameter(TempTP->getIndex());
+ else {
+ // Note: proposed by Mike Herrick on 11/30/10
+ // <expression> ::= sZ <function-param> # size of function parameter pack
+ Diagnostic &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot mangle sizeof...(function parameter pack)");
+ Diags.Report(DiagID);
+ return;
+ }
+ }
}
}
@@ -2073,11 +2246,12 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
// <template-arg> ::= <type> # type or template
// ::= X <expression> E # expression
// ::= <expr-primary> # simple expressions
- // ::= I <template-arg>* E # argument pack
+ // ::= J <template-arg>* E # argument pack
// ::= sp <expression> # pack expansion of (C++0x)
switch (A.getKind()) {
- default:
- assert(0 && "Unknown template argument kind!");
+ case TemplateArgument::Null:
+ llvm_unreachable("Cannot mangle NULL template argument");
+
case TemplateArgument::Type:
mangleType(A.getAsType());
break;
@@ -2085,6 +2259,11 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
// This is mangled as <type>.
mangleType(A.getAsTemplate());
break;
+ case TemplateArgument::TemplateExpansion:
+ // <type> ::= Dp <type> # pack expansion (C++0x)
+ Out << "Dp";
+ mangleType(A.getAsTemplateOrTemplatePattern());
+ break;
case TemplateArgument::Expression:
Out << 'X';
mangleExpression(A.getAsExpr());
@@ -2126,6 +2305,16 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
break;
}
+
+ case TemplateArgument::Pack: {
+ // Note: proposal by Mike Herrick on 12/20/10
+ Out << 'J';
+ for (TemplateArgument::pack_iterator PA = A.pack_begin(),
+ PAEnd = A.pack_end();
+ PA != PAEnd; ++PA)
+ mangleTemplateArg(P, *PA);
+ Out << 'E';
+ }
}
}
@@ -2240,8 +2429,8 @@ static bool isCharSpecialization(QualType T, const char *Name) {
}
template <std::size_t StrLen>
-bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl *SD,
- const char (&Str)[StrLen]) {
+static bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl*SD,
+ const char (&Str)[StrLen]) {
if (!SD->getIdentifier()->isStr(Str))
return false;
@@ -2370,8 +2559,8 @@ void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
/// and this routine will return false. In this case, the caller should just
/// emit the identifier of the declaration (\c D->getIdentifier()) as its
/// name.
-void MangleContext::mangleName(const NamedDecl *D,
- llvm::SmallVectorImpl<char> &Res) {
+void ItaniumMangleContext::mangleName(const NamedDecl *D,
+ llvm::raw_ostream &Out) {
assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
"Invalid mangleName() call, argument is not a variable or function!");
assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
@@ -2381,31 +2570,27 @@ void MangleContext::mangleName(const NamedDecl *D,
getASTContext().getSourceManager(),
"Mangling declaration");
- CXXNameMangler Mangler(*this, Res);
+ CXXNameMangler Mangler(*this, Out);
return Mangler.mangle(D);
}
-void MangleContext::mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- llvm::SmallVectorImpl<char> &Res) {
- CXXNameMangler Mangler(*this, Res, D, Type);
+void ItaniumMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ llvm::raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Type);
Mangler.mangle(D);
}
-void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- llvm::SmallVectorImpl<char> &Res) {
- CXXNameMangler Mangler(*this, Res, D, Type);
+void ItaniumMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ llvm::raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Type);
Mangler.mangle(D);
}
-void MangleContext::mangleBlock(GlobalDecl GD, const BlockDecl *BD,
- llvm::SmallVectorImpl<char> &Res) {
- MiscNameMangler Mangler(*this, Res);
- Mangler.mangleBlock(GD, BD);
-}
-
-void MangleContext::mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- llvm::SmallVectorImpl<char> &Res) {
+void ItaniumMangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::raw_ostream &Out) {
// <special-name> ::= T <call-offset> <base encoding>
// # base is the nominal target function of thunk
// <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
@@ -2415,8 +2600,7 @@ void MangleContext::mangleThunk(const CXXMethodDecl *MD,
assert(!isa<CXXDestructorDecl>(MD) &&
"Use mangleCXXDtor for destructor decls!");
-
- CXXNameMangler Mangler(*this, Res);
+ CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZT";
if (!Thunk.Return.isEmpty())
Mangler.getStream() << 'c';
@@ -2433,13 +2617,13 @@ void MangleContext::mangleThunk(const CXXMethodDecl *MD,
}
void
-MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment,
- llvm::SmallVectorImpl<char> &Res) {
+ItaniumMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::raw_ostream &Out) {
// <special-name> ::= T <call-offset> <base encoding>
// # base is the nominal target function of thunk
-
- CXXNameMangler Mangler(*this, Res, DD, Type);
+ CXXNameMangler Mangler(*this, Out, DD, Type);
Mangler.getStream() << "_ZT";
// Mangle the 'this' pointer adjustment.
@@ -2451,45 +2635,46 @@ MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
/// mangleGuardVariable - Returns the mangled name for a guard variable
/// for the passed in VarDecl.
-void MangleContext::mangleGuardVariable(const VarDecl *D,
- llvm::SmallVectorImpl<char> &Res) {
+void ItaniumMangleContext::mangleItaniumGuardVariable(const VarDecl *D,
+ llvm::raw_ostream &Out) {
// <special-name> ::= GV <object name> # Guard variable for one-time
// # initialization
- CXXNameMangler Mangler(*this, Res);
+ CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZGV";
Mangler.mangleName(D);
}
-void MangleContext::mangleReferenceTemporary(const VarDecl *D,
- llvm::SmallVectorImpl<char> &Res) {
+void ItaniumMangleContext::mangleReferenceTemporary(const VarDecl *D,
+ llvm::raw_ostream &Out) {
// We match the GCC mangling here.
// <special-name> ::= GR <object name>
- CXXNameMangler Mangler(*this, Res);
+ CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZGR";
Mangler.mangleName(D);
}
-void MangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &Res) {
+void ItaniumMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::raw_ostream &Out) {
// <special-name> ::= TV <type> # virtual table
- CXXNameMangler Mangler(*this, Res);
+ CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTV";
Mangler.mangleNameOrStandardSubstitution(RD);
}
-void MangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &Res) {
+void ItaniumMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::raw_ostream &Out) {
// <special-name> ::= TT <type> # VTT structure
- CXXNameMangler Mangler(*this, Res);
+ CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTT";
Mangler.mangleNameOrStandardSubstitution(RD);
}
-void MangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
- const CXXRecordDecl *Type,
- llvm::SmallVectorImpl<char> &Res) {
+void ItaniumMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::raw_ostream &Out) {
// <special-name> ::= TC <type> <offset number> _ <base type>
- CXXNameMangler Mangler(*this, Res);
+ CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTC";
Mangler.mangleNameOrStandardSubstitution(RD);
Mangler.getStream() << Offset;
@@ -2497,19 +2682,24 @@ void MangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
Mangler.mangleNameOrStandardSubstitution(Type);
}
-void MangleContext::mangleCXXRTTI(QualType Ty,
- llvm::SmallVectorImpl<char> &Res) {
+void ItaniumMangleContext::mangleCXXRTTI(QualType Ty,
+ llvm::raw_ostream &Out) {
// <special-name> ::= TI <type> # typeinfo structure
assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers");
- CXXNameMangler Mangler(*this, Res);
+ CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTI";
Mangler.mangleType(Ty);
}
-void MangleContext::mangleCXXRTTIName(QualType Ty,
- llvm::SmallVectorImpl<char> &Res) {
+void ItaniumMangleContext::mangleCXXRTTIName(QualType Ty,
+ llvm::raw_ostream &Out) {
// <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
- CXXNameMangler Mangler(*this, Res);
+ CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTS";
Mangler.mangleType(Ty);
}
+
+MangleContext *clang::createItaniumMangleContext(ASTContext &Context,
+ Diagnostic &Diags) {
+ return new ItaniumMangleContext(Context, Diags);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Mangle.cpp b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
new file mode 100644
index 0000000..3a0b909
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
@@ -0,0 +1,135 @@
+//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements generic name mangling support for blocks and Objective-C.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+
+// FIXME: For blocks we currently mimic GCC's mangling scheme, which leaves
+// much to be desired. Come up with a better mangling scheme.
+
+namespace {
+
+static void mangleFunctionBlock(MangleContext &Context,
+ llvm::StringRef Outer,
+ const BlockDecl *BD,
+ llvm::raw_ostream &Out) {
+ Out << "__" << Outer << "_block_invoke_" << Context.getBlockId(BD, true);
+}
+
+static void checkMangleDC(const DeclContext *DC, const BlockDecl *BD) {
+#ifndef NDEBUG
+ const DeclContext *ExpectedDC = BD->getDeclContext();
+ while (isa<BlockDecl>(ExpectedDC) || isa<EnumDecl>(ExpectedDC))
+ ExpectedDC = ExpectedDC->getParent();
+ assert(DC == ExpectedDC && "Given decl context did not match expected!");
+#endif
+}
+
+}
+
+void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
+ llvm::raw_ostream &Out) {
+ Out << "__block_global_" << getBlockId(BD, false);
+}
+
+void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
+ CXXCtorType CT, const BlockDecl *BD,
+ llvm::raw_ostream &ResStream) {
+ checkMangleDC(CD, BD);
+ llvm::SmallString<64> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ mangleCXXCtor(CD, CT, Out);
+ Out.flush();
+ mangleFunctionBlock(*this, Buffer, BD, ResStream);
+}
+
+void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD,
+ CXXDtorType DT, const BlockDecl *BD,
+ llvm::raw_ostream &ResStream) {
+ checkMangleDC(DD, BD);
+ llvm::SmallString<64> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ mangleCXXDtor(DD, DT, Out);
+ Out.flush();
+ mangleFunctionBlock(*this, Buffer, BD, ResStream);
+}
+
+void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
+ llvm::raw_ostream &Out) {
+ assert(!isa<CXXConstructorDecl>(DC) && !isa<CXXDestructorDecl>(DC));
+ checkMangleDC(DC, BD);
+
+ llvm::SmallString<64> Buffer;
+ llvm::raw_svector_ostream Stream(Buffer);
+ if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(Method, Stream);
+ } else {
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (IdentifierInfo *II = ND->getIdentifier())
+ Stream << II->getName();
+ else {
+ // FIXME: We were doing a mangleUnqualifiedName() before, but that's
+ // a private member of a class that will soon itself be private to the
+ // Itanium C++ ABI object. What should we do now? Right now, I'm just
+ // calling the mangleName() method on the MangleContext; is there a
+ // better way?
+ mangleName(ND, Stream);
+ }
+ }
+ Stream.flush();
+ mangleFunctionBlock(*this, Buffer, BD, Out);
+}
+
+void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
+ llvm::raw_ostream &Out) {
+ llvm::SmallString<64> Name;
+ llvm::raw_svector_ostream OS(Name);
+
+ const ObjCContainerDecl *CD =
+ dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
+ OS << '(' << CID << ')';
+ OS << ' ' << MD->getSelector().getAsString() << ']';
+
+ Out << OS.str().size() << OS.str();
+}
+
+void MangleContext::mangleBlock(const BlockDecl *BD,
+ llvm::raw_ostream &Out) {
+ const DeclContext *DC = BD->getDeclContext();
+ while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
+ DC = DC->getParent();
+ if (DC->isFunctionOrMethod())
+ mangleBlock(DC, BD, Out);
+ else
+ mangleGlobalBlock(BD, Out);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
index 87b7767..4de93bb 100644
--- a/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
@@ -14,8 +14,10 @@
#include "CXXABI.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/Type.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
using namespace clang;
@@ -26,6 +28,27 @@ public:
MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { }
unsigned getMemberPointerSize(const MemberPointerType *MPT) const;
+
+ CallingConv getDefaultMethodCallConv() const {
+ if (Context.Target.getTriple().getArch() == llvm::Triple::x86)
+ return CC_X86ThisCall;
+ else
+ return CC_C;
+ }
+
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+ // FIXME: Audit the corners
+ if (!RD->isDynamicClass())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // In the Microsoft ABI, classes can have one or two vtable pointers.
+ CharUnits PointerSize =
+ Context.toCharUnitsFromBits(Context.Target.getPointerWidth(0));
+ return Layout.getNonVirtualSize() == PointerSize ||
+ Layout.getNonVirtualSize() == PointerSize * 2;
+ }
};
}
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
new file mode 100644
index 0000000..4bf7f23
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
@@ -0,0 +1,1188 @@
+//===--- MicrosoftMangle.cpp - Microsoft Visual C++ Name Mangling ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ name mangling targetting the Microsoft Visual C++ ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/ABI.h"
+
+using namespace clang;
+
+namespace {
+
+/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftCXXNameMangler {
+ MangleContext &Context;
+ llvm::raw_ostream &Out;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ MicrosoftCXXNameMangler(MangleContext &C, llvm::raw_ostream &Out_)
+ : Context(C), Out(Out_) { }
+
+ void mangle(const NamedDecl *D, llvm::StringRef Prefix = "?");
+ void mangleName(const NamedDecl *ND);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleVariableEncoding(const VarDecl *VD);
+ void mangleNumber(int64_t Number);
+ void mangleType(QualType T);
+
+private:
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName());
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
+ void mangleSourceName(const IdentifierInfo *II);
+ void manglePostfix(const DeclContext *DC, bool NoFunction=false);
+ void mangleOperatorName(OverloadedOperatorKind OO);
+ void mangleQualifiers(Qualifiers Quals, bool IsMember);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(const FunctionType *T, const FunctionDecl *D,
+ bool IsStructor, bool IsInstMethod);
+ void mangleType(const ArrayType *T, bool IsGlobal);
+ void mangleExtraDimensions(QualType T);
+ void mangleFunctionClass(const FunctionDecl *FD);
+ void mangleCallingConvention(const FunctionType *T, bool IsInstMethod = false);
+ void mangleThrowSpecification(const FunctionProtoType *T);
+
+};
+
+/// MicrosoftMangleContext - Overrides the default MangleContext for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftMangleContext : public MangleContext {
+public:
+ MicrosoftMangleContext(ASTContext &Context,
+ Diagnostic &Diags) : MangleContext(Context, Diags) { }
+ virtual bool shouldMangleDeclName(const NamedDecl *D);
+ virtual void mangleName(const NamedDecl *D, llvm::raw_ostream &Out);
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::raw_ostream &);
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::raw_ostream &);
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::raw_ostream &);
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::raw_ostream &);
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::raw_ostream &);
+ virtual void mangleCXXRTTI(QualType T, llvm::raw_ostream &);
+ virtual void mangleCXXRTTIName(QualType T, llvm::raw_ostream &);
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::raw_ostream &);
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::raw_ostream &);
+ virtual void mangleReferenceTemporary(const clang::VarDecl *,
+ llvm::raw_ostream &);
+};
+
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOptions().CPlusPlus)
+ return false;
+
+ // Variables at global scope with internal linkage are not mangled.
+ if (!FD) {
+ const DeclContext *DC = D->getDeclContext();
+ if (DC->isTranslationUnit() && D->getLinkage() == InternalLinkage)
+ return false;
+ }
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
+ llvm::StringRef Prefix) {
+ // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
+ // Therefore it's really important that we don't decorate the
+ // name with leading underscores or leading/trailing at signs. So, emit a
+ // asm marker at the start so we get the name right.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= ? <name> <type-encoding>
+ Out << Prefix;
+ mangleName(D);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleVariableEncoding(VD);
+ // TODO: Fields? Can MSVC even mangle them?
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <type-encoding> ::= <function-class> <function-type>
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // We should never ever see a FunctionNoProtoType at this point.
+ // We don't even know how to mangle their types anyway :).
+ const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType());
+
+ bool InStructor = false, InInstMethod = false;
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD) {
+ if (MD->isInstance())
+ InInstMethod = true;
+ if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))
+ InStructor = true;
+ }
+
+ // First, the function class.
+ mangleFunctionClass(FD);
+
+ mangleType(FT, FD, InStructor, InInstMethod);
+}
+
+void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
+ // <type-encoding> ::= <storage-class> <variable-type>
+ // <storage-class> ::= 0 # private static member
+ // ::= 1 # protected static member
+ // ::= 2 # public static member
+ // ::= 3 # global
+ // ::= 4 # static local
+
+ // The first character in the encoding (after the name) is the storage class.
+ if (VD->isStaticDataMember()) {
+ // If it's a static member, it also encodes the access level.
+ switch (VD->getAccess()) {
+ default:
+ case AS_private: Out << '0'; break;
+ case AS_protected: Out << '1'; break;
+ case AS_public: Out << '2'; break;
+ }
+ }
+ else if (!VD->isStaticLocal())
+ Out << '3';
+ else
+ Out << '4';
+ // Now mangle the type.
+ // <variable-type> ::= <type> <cvr-qualifiers>
+ // ::= <type> A # pointers, references, arrays
+ // Pointers and references are odd. The type of 'int * const foo;' gets
+ // mangled as 'QAHA' instead of 'PAHB', for example.
+ QualType Ty = VD->getType();
+ if (Ty->isPointerType() || Ty->isReferenceType()) {
+ mangleType(Ty);
+ Out << 'A';
+ } else if (Ty->isArrayType()) {
+ // Global arrays are funny, too.
+ mangleType(cast<ArrayType>(Ty.getTypePtr()), true);
+ Out << 'A';
+ } else {
+ mangleType(Ty.getLocalUnqualifiedType());
+ mangleQualifiers(Ty.getLocalQualifiers(), false);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
+ const DeclContext *DC = ND->getDeclContext();
+
+ // Always start with the unqualified name.
+ mangleUnqualifiedName(ND);
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ manglePostfix(DC);
+
+ // Terminate the whole name with an '@'.
+ Out << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [?] <decimal digit> # <= 9
+ // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc...
+ if (Number < 0) {
+ Out << '?';
+ Number = -Number;
+ }
+ if (Number >= 1 && Number <= 10) {
+ Out << Number-1;
+ } else {
+ // We have to build up the encoding in reverse order, so it will come
+ // out right when we write it out.
+ char Encoding[16];
+ char *EndPtr = Encoding+sizeof(Encoding);
+ char *CurPtr = EndPtr;
+ while (Number) {
+ *--CurPtr = 'A' + (Number % 16);
+ Number /= 16;
+ }
+ Out.write(CurPtr, EndPtr-CurPtr);
+ Out << '@';
+ }
+}
+
+void
+MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ Out << "?A";
+ break;
+ }
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // When VC encounters an anonymous type with no tag and no typedef,
+ // it literally emits '<unnamed-tag>'.
+ Out << "<unnamed-tag>";
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ assert(false && "Can't mangle Objective-C selector names here!");
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ assert(false && "Can't mangle constructors yet!");
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ assert(false && "Can't mangle destructors yet!");
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= ?B # (cast)
+ // The target type is encoded as the return type.
+ Out << "?B";
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: Was this added in VS2010? Does MS even know how to mangle this?
+ assert(false && "Don't know how to mangle literal operators yet!");
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ assert(false && "Can't mangle a using directive name!");
+ break;
+ }
+}
+
+void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
+ bool NoFunction) {
+ // <postfix> ::= <unqualified-name> [<postfix>]
+ // ::= <template-postfix> <template-args> [<postfix>]
+ // ::= <template-param>
+ // ::= <substitution> [<postfix>]
+
+ if (!DC) return;
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ Context.mangleBlock(BD, Out);
+ Out << '@';
+ return manglePostfix(DC->getParent(), NoFunction);
+ }
+
+ if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ mangleUnqualifiedName(cast<NamedDecl>(DC));
+ manglePostfix(DC->getParent(), NoFunction);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
+ switch (OO) {
+ // ?0 # constructor
+ // ?1 # destructor
+ // <operator-name> ::= ?2 # new
+ case OO_New: Out << "?2"; break;
+ // <operator-name> ::= ?3 # delete
+ case OO_Delete: Out << "?3"; break;
+ // <operator-name> ::= ?4 # =
+ case OO_Equal: Out << "?4"; break;
+ // <operator-name> ::= ?5 # >>
+ case OO_GreaterGreater: Out << "?5"; break;
+ // <operator-name> ::= ?6 # <<
+ case OO_LessLess: Out << "?6"; break;
+ // <operator-name> ::= ?7 # !
+ case OO_Exclaim: Out << "?7"; break;
+ // <operator-name> ::= ?8 # ==
+ case OO_EqualEqual: Out << "?8"; break;
+ // <operator-name> ::= ?9 # !=
+ case OO_ExclaimEqual: Out << "?9"; break;
+ // <operator-name> ::= ?A # []
+ case OO_Subscript: Out << "?A"; break;
+ // ?B # conversion
+ // <operator-name> ::= ?C # ->
+ case OO_Arrow: Out << "?C"; break;
+ // <operator-name> ::= ?D # *
+ case OO_Star: Out << "?D"; break;
+ // <operator-name> ::= ?E # ++
+ case OO_PlusPlus: Out << "?E"; break;
+ // <operator-name> ::= ?F # --
+ case OO_MinusMinus: Out << "?F"; break;
+ // <operator-name> ::= ?G # -
+ case OO_Minus: Out << "?G"; break;
+ // <operator-name> ::= ?H # +
+ case OO_Plus: Out << "?H"; break;
+ // <operator-name> ::= ?I # &
+ case OO_Amp: Out << "?I"; break;
+ // <operator-name> ::= ?J # ->*
+ case OO_ArrowStar: Out << "?J"; break;
+ // <operator-name> ::= ?K # /
+ case OO_Slash: Out << "?K"; break;
+ // <operator-name> ::= ?L # %
+ case OO_Percent: Out << "?L"; break;
+ // <operator-name> ::= ?M # <
+ case OO_Less: Out << "?M"; break;
+ // <operator-name> ::= ?N # <=
+ case OO_LessEqual: Out << "?N"; break;
+ // <operator-name> ::= ?O # >
+ case OO_Greater: Out << "?O"; break;
+ // <operator-name> ::= ?P # >=
+ case OO_GreaterEqual: Out << "?P"; break;
+ // <operator-name> ::= ?Q # ,
+ case OO_Comma: Out << "?Q"; break;
+ // <operator-name> ::= ?R # ()
+ case OO_Call: Out << "?R"; break;
+ // <operator-name> ::= ?S # ~
+ case OO_Tilde: Out << "?S"; break;
+ // <operator-name> ::= ?T # ^
+ case OO_Caret: Out << "?T"; break;
+ // <operator-name> ::= ?U # |
+ case OO_Pipe: Out << "?U"; break;
+ // <operator-name> ::= ?V # &&
+ case OO_AmpAmp: Out << "?V"; break;
+ // <operator-name> ::= ?W # ||
+ case OO_PipePipe: Out << "?W"; break;
+ // <operator-name> ::= ?X # *=
+ case OO_StarEqual: Out << "?X"; break;
+ // <operator-name> ::= ?Y # +=
+ case OO_PlusEqual: Out << "?Y"; break;
+ // <operator-name> ::= ?Z # -=
+ case OO_MinusEqual: Out << "?Z"; break;
+ // <operator-name> ::= ?_0 # /=
+ case OO_SlashEqual: Out << "?_0"; break;
+ // <operator-name> ::= ?_1 # %=
+ case OO_PercentEqual: Out << "?_1"; break;
+ // <operator-name> ::= ?_2 # >>=
+ case OO_GreaterGreaterEqual: Out << "?_2"; break;
+ // <operator-name> ::= ?_3 # <<=
+ case OO_LessLessEqual: Out << "?_3"; break;
+ // <operator-name> ::= ?_4 # &=
+ case OO_AmpEqual: Out << "?_4"; break;
+ // <operator-name> ::= ?_5 # |=
+ case OO_PipeEqual: Out << "?_5"; break;
+ // <operator-name> ::= ?_6 # ^=
+ case OO_CaretEqual: Out << "?_6"; break;
+ // ?_7 # vftable
+ // ?_8 # vbtable
+ // ?_9 # vcall
+ // ?_A # typeof
+ // ?_B # local static guard
+ // ?_C # string
+ // ?_D # vbase destructor
+ // ?_E # vector deleting destructor
+ // ?_F # default constructor closure
+ // ?_G # scalar deleting destructor
+ // ?_H # vector constructor iterator
+ // ?_I # vector destructor iterator
+ // ?_J # vector vbase constructor iterator
+ // ?_K # virtual displacement map
+ // ?_L # eh vector constructor iterator
+ // ?_M # eh vector destructor iterator
+ // ?_N # eh vector vbase constructor iterator
+ // ?_O # copy constructor closure
+ // ?_P<name> # udt returning <name>
+ // ?_Q # <unknown>
+ // ?_R0 # RTTI Type Descriptor
+ // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
+ // ?_R2 # RTTI Base Class Array
+ // ?_R3 # RTTI Class Hierarchy Descriptor
+ // ?_R4 # RTTI Complete Object Locator
+ // ?_S # local vftable
+ // ?_T # local vftable constructor closure
+ // <operator-name> ::= ?_U # new[]
+ case OO_Array_New: Out << "?_U"; break;
+ // <operator-name> ::= ?_V # delete[]
+ case OO_Array_Delete: Out << "?_V"; break;
+
+ case OO_Conditional:
+ assert(false && "Don't know how to mangle ?:");
+ break;
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Not an overloaded operator");
+ break;
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source name> ::= <identifier> @
+ Out << II->getName() << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ Context.mangleObjCMethodName(MD, Out);
+}
+
+void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
+ bool IsMember) {
+ // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
+ // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
+ // 'I' means __restrict (32/64-bit).
+ // Note that the MSVC __restrict keyword isn't the same as the C99 restrict
+ // keyword!
+ // <base-cvr-qualifiers> ::= A # near
+ // ::= B # near const
+ // ::= C # near volatile
+ // ::= D # near const volatile
+ // ::= E # far (16-bit)
+ // ::= F # far const (16-bit)
+ // ::= G # far volatile (16-bit)
+ // ::= H # far const volatile (16-bit)
+ // ::= I # huge (16-bit)
+ // ::= J # huge const (16-bit)
+ // ::= K # huge volatile (16-bit)
+ // ::= L # huge const volatile (16-bit)
+ // ::= M <basis> # based
+ // ::= N <basis> # based const
+ // ::= O <basis> # based volatile
+ // ::= P <basis> # based const volatile
+ // ::= Q # near member
+ // ::= R # near const member
+ // ::= S # near volatile member
+ // ::= T # near const volatile member
+ // ::= U # far member (16-bit)
+ // ::= V # far const member (16-bit)
+ // ::= W # far volatile member (16-bit)
+ // ::= X # far const volatile member (16-bit)
+ // ::= Y # huge member (16-bit)
+ // ::= Z # huge const member (16-bit)
+ // ::= 0 # huge volatile member (16-bit)
+ // ::= 1 # huge const volatile member (16-bit)
+ // ::= 2 <basis> # based member
+ // ::= 3 <basis> # based const member
+ // ::= 4 <basis> # based volatile member
+ // ::= 5 <basis> # based const volatile member
+ // ::= 6 # near function (pointers only)
+ // ::= 7 # far function (pointers only)
+ // ::= 8 # near method (pointers only)
+ // ::= 9 # far method (pointers only)
+ // ::= _A <basis> # based function (pointers only)
+ // ::= _B <basis> # based function (far?) (pointers only)
+ // ::= _C <basis> # based method (pointers only)
+ // ::= _D <basis> # based method (far?) (pointers only)
+ // ::= _E # block (Clang)
+ // <basis> ::= 0 # __based(void)
+ // ::= 1 # __based(segment)?
+ // ::= 2 <name> # __based(name)
+ // ::= 3 # ?
+ // ::= 4 # ?
+ // ::= 5 # not really based
+ if (!IsMember) {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'A';
+ else
+ Out << 'B';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'C';
+ else
+ Out << 'D';
+ }
+ } else {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'Q';
+ else
+ Out << 'R';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'S';
+ else
+ Out << 'T';
+ }
+ }
+
+ // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void MicrosoftCXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = getASTContext().getCanonicalType(T);
+
+ Qualifiers Quals = T.getLocalQualifiers();
+ if (Quals) {
+ // We have to mangle these now, while we still have enough information.
+ // <pointer-cvr-qualifiers> ::= P # pointer
+ // ::= Q # const pointer
+ // ::= R # volatile pointer
+ // ::= S # const volatile pointer
+ if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ if (!Quals.hasVolatile())
+ Out << 'Q';
+ else {
+ if (!Quals.hasConst())
+ Out << 'R';
+ else
+ Out << 'S';
+ }
+ } else
+ // Just emit qualifiers like normal.
+ // NB: When we mangle a pointer/reference type, and the pointee
+ // type has no qualifiers, the lack of qualifier gets mangled
+ // in there.
+ mangleQualifiers(Quals, false);
+ } else if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ Out << 'P';
+ }
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+return;
+#define TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+break;
+#include "clang/AST/TypeNodes.def"
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= X # void
+ // ::= C # signed char
+ // ::= D # char
+ // ::= E # unsigned char
+ // ::= F # short
+ // ::= G # unsigned short (or wchar_t if it's not a builtin)
+ // ::= H # int
+ // ::= I # unsigned int
+ // ::= J # long
+ // ::= K # unsigned long
+ // L # <none>
+ // ::= M # float
+ // ::= N # double
+ // ::= O # long double (__float80 is mangled differently)
+ // ::= _D # __int8 (yup, it's a distinct type in MSVC)
+ // ::= _E # unsigned __int8
+ // ::= _F # __int16
+ // ::= _G # unsigned __int16
+ // ::= _H # __int32
+ // ::= _I # unsigned __int32
+ // ::= _J # long long, __int64
+ // ::= _K # unsigned long long, __int64
+ // ::= _L # __int128
+ // ::= _M # unsigned __int128
+ // ::= _N # bool
+ // _O # <array in parameter>
+ // ::= _T # __float80 (Intel)
+ // ::= _W # wchar_t
+ // ::= _Z # __float80 (Digital Mars)
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'X'; break;
+ case BuiltinType::SChar: Out << 'C'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
+ case BuiltinType::UChar: Out << 'E'; break;
+ case BuiltinType::Short: Out << 'F'; break;
+ case BuiltinType::UShort: Out << 'G'; break;
+ case BuiltinType::Int: Out << 'H'; break;
+ case BuiltinType::UInt: Out << 'I'; break;
+ case BuiltinType::Long: Out << 'J'; break;
+ case BuiltinType::ULong: Out << 'K'; break;
+ case BuiltinType::Float: Out << 'M'; break;
+ case BuiltinType::Double: Out << 'N'; break;
+ // TODO: Determine size and mangle accordingly
+ case BuiltinType::LongDouble: Out << 'O'; break;
+ // TODO: __int8 and friends
+ case BuiltinType::LongLong: Out << "_J"; break;
+ case BuiltinType::ULongLong: Out << "_K"; break;
+ case BuiltinType::Int128: Out << "_L"; break;
+ case BuiltinType::UInt128: Out << "_M"; break;
+ case BuiltinType::Bool: Out << "_N"; break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: Out << "_W"; break;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ assert(false &&
+ "Overloaded and dependent types shouldn't get to name mangling");
+ break;
+ case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
+ case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
+ case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
+
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::NullPtr:
+ assert(false && "Don't know how to mangle this type");
+ break;
+ }
+}
+
+// <type> ::= <function-type>
+void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) {
+ // Structors only appear in decls, so at this point we know it's not a
+ // structor type.
+ // I'll probably have mangleType(MemberPointerType) call the mangleType()
+ // method directly.
+ mangleType(T, NULL, false, false);
+}
+void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
+ const FunctionDecl *D,
+ bool IsStructor,
+ bool IsInstMethod) {
+ // <function-type> ::= <this-cvr-qualifiers> <calling-convention>
+ // <return-type> <argument-list> <throw-spec>
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // If this is a C++ instance method, mangle the CVR qualifiers for the
+ // this pointer.
+ if (IsInstMethod)
+ mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
+
+ mangleCallingConvention(T, IsInstMethod);
+
+ // <return-type> ::= <type>
+ // ::= @ # structors (they have no declared return type)
+ if (IsStructor)
+ Out << '@';
+ else
+ mangleType(Proto->getResultType());
+
+ // <argument-list> ::= X # void
+ // ::= <type>+ @
+ // ::= <type>* Z # varargs
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ Out << 'X';
+ } else {
+ if (D) {
+ // If we got a decl, use the "types-as-written" to make sure arrays
+ // get mangled right.
+ for (FunctionDecl::param_const_iterator Parm = D->param_begin(),
+ ParmEnd = D->param_end();
+ Parm != ParmEnd; ++Parm)
+ mangleType((*Parm)->getTypeSourceInfo()->getType());
+ } else {
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+ }
+ // <builtin-type> ::= Z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'Z';
+ else
+ Out << '@';
+ }
+
+ mangleThrowSpecification(Proto);
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
+ // <function-class> ::= A # private: near
+ // ::= B # private: far
+ // ::= C # private: static near
+ // ::= D # private: static far
+ // ::= E # private: virtual near
+ // ::= F # private: virtual far
+ // ::= G # private: thunk near
+ // ::= H # private: thunk far
+ // ::= I # protected: near
+ // ::= J # protected: far
+ // ::= K # protected: static near
+ // ::= L # protected: static far
+ // ::= M # protected: virtual near
+ // ::= N # protected: virtual far
+ // ::= O # protected: thunk near
+ // ::= P # protected: thunk far
+ // ::= Q # public: near
+ // ::= R # public: far
+ // ::= S # public: static near
+ // ::= T # public: static far
+ // ::= U # public: virtual near
+ // ::= V # public: virtual far
+ // ::= W # public: thunk near
+ // ::= X # public: thunk far
+ // ::= Y # global near
+ // ::= Z # global far
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ switch (MD->getAccess()) {
+ default:
+ case AS_private:
+ if (MD->isStatic())
+ Out << 'C';
+ else if (MD->isVirtual())
+ Out << 'E';
+ else
+ Out << 'A';
+ break;
+ case AS_protected:
+ if (MD->isStatic())
+ Out << 'K';
+ else if (MD->isVirtual())
+ Out << 'M';
+ else
+ Out << 'I';
+ break;
+ case AS_public:
+ if (MD->isStatic())
+ Out << 'S';
+ else if (MD->isVirtual())
+ Out << 'U';
+ else
+ Out << 'Q';
+ }
+ } else
+ Out << 'Y';
+}
+void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T,
+ bool IsInstMethod) {
+ // <calling-convention> ::= A # __cdecl
+ // ::= B # __export __cdecl
+ // ::= C # __pascal
+ // ::= D # __export __pascal
+ // ::= E # __thiscall
+ // ::= F # __export __thiscall
+ // ::= G # __stdcall
+ // ::= H # __export __stdcall
+ // ::= I # __fastcall
+ // ::= J # __export __fastcall
+ // The 'export' calling conventions are from a bygone era
+ // (*cough*Win16*cough*) when functions were declared for export with
+ // that keyword. (It didn't actually export them, it just made them so
+ // that they could be in a DLL and somebody from another module could call
+ // them.)
+ CallingConv CC = T->getCallConv();
+ if (CC == CC_Default)
+ CC = IsInstMethod ? getASTContext().getDefaultMethodCallConv() : CC_C;
+ switch (CC) {
+ case CC_Default:
+ case CC_C: Out << 'A'; break;
+ case CC_X86Pascal: Out << 'C'; break;
+ case CC_X86ThisCall: Out << 'E'; break;
+ case CC_X86StdCall: Out << 'G'; break;
+ case CC_X86FastCall: Out << 'I'; break;
+ }
+}
+void MicrosoftCXXNameMangler::mangleThrowSpecification(
+ const FunctionProtoType *FT) {
+ // <throw-spec> ::= Z # throw(...) (default)
+ // ::= @ # throw() or __declspec/__attribute__((nothrow))
+ // ::= <type>+
+ // NOTE: Since the Microsoft compiler ignores throw specifications, they are
+ // all actually mangled as 'Z'. (They're ignored because their associated
+ // functionality isn't implemented, and probably never will be.)
+ Out << 'Z';
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ assert(false && "Don't know how to mangle UnresolvedUsingTypes yet!");
+}
+
+// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
+// <union-type> ::= T <name>
+// <struct-type> ::= U <name>
+// <class-type> ::= V <name>
+// <enum-type> ::= W <size> <name>
+void MicrosoftCXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
+ switch (T->getDecl()->getTagKind()) {
+ case TTK_Union:
+ Out << 'T';
+ break;
+ case TTK_Struct:
+ Out << 'U';
+ break;
+ case TTK_Class:
+ Out << 'V';
+ break;
+ case TTK_Enum:
+ Out << 'W';
+ Out << getASTContext().getTypeSizeInChars(
+ cast<EnumDecl>(T->getDecl())->getIntegerType()).getQuantity();
+ break;
+ }
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as global
+// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as param
+// It's supposed to be the other way around, but for some strange reason, it
+// isn't. Today this behavior is retained for the sole purpose of backwards
+// compatibility.
+void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
+ // This isn't a recursive mangling, so now we have to do it all in this
+ // one call.
+ if (IsGlobal)
+ Out << 'P';
+ else
+ Out << 'Q';
+ mangleExtraDimensions(T->getElementType());
+}
+void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
+ llvm::SmallVector<llvm::APInt, 3> Dimensions;
+ for (;;) {
+ if (ElementTy->isConstantArrayType()) {
+ const ConstantArrayType *CAT =
+ static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
+ Dimensions.push_back(CAT->getSize());
+ ElementTy = CAT->getElementType();
+ } else if (ElementTy->isVariableArrayType()) {
+ assert(false && "Don't know how to mangle VLAs!");
+ } else if (ElementTy->isDependentSizedArrayType()) {
+ // The dependent expression has to be folded into a constant (TODO).
+ assert(false && "Don't know how to mangle dependent-sized arrays!");
+ } else if (ElementTy->isIncompleteArrayType()) continue;
+ else break;
+ }
+ mangleQualifiers(ElementTy.getQualifiers(), false);
+ // If there are any additional dimensions, mangle them now.
+ if (Dimensions.size() > 0) {
+ Out << 'Y';
+ // <dimension-count> ::= <number> # number of extra dimensions
+ mangleNumber(Dimensions.size());
+ for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) {
+ mangleNumber(Dimensions[Dim].getLimitedValue());
+ }
+ }
+ mangleType(ElementTy.getLocalUnqualifiedType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
+// <class name> <type>
+void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ Out << '8';
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(FPT, NULL, false, true);
+ } else {
+ mangleQualifiers(PointeeType.getQualifiers(), true);
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(PointeeType.getLocalUnqualifiedType());
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ assert(false && "Don't know how to mangle TemplateTypeParmTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const SubstTemplateTypeParmPackType *T) {
+ assert(false &&
+ "Don't know how to mangle SubstTemplateTypeParmPackTypes yet!");
+}
+
+// <type> ::= <pointer-type>
+// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
+ QualType PointeeTy = T->getPointeeType();
+ if (PointeeTy->isArrayType()) {
+ // Pointers to arrays are mangled like arrays.
+ mangleExtraDimensions(T->getPointeeType());
+ } else if (PointeeTy->isFunctionType()) {
+ // Function pointers are special.
+ Out << '6';
+ mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
+ NULL, false, false);
+ } else {
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+ }
+}
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ // Object pointers never have qualifiers.
+ Out << 'A';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= <reference-type>
+// <reference-type> ::= A <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'A';
+ QualType PointeeTy = T->getPointeeType();
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+}
+
+void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) {
+ assert(false && "Don't know how to mangle RValueReferenceTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) {
+ assert(false && "Don't know how to mangle ComplexTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const VectorType *T) {
+ assert(false && "Don't know how to mangle VectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) {
+ assert(false && "Don't know how to mangle ExtVectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ assert(false && "Don't know how to mangle DependentSizedExtVectorTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ // ObjC interfaces have structs underlying them.
+ Out << 'U';
+ mangleName(T->getDecl());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "_E";
+ mangleType(T->getPointeeType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ assert(false && "Don't know how to mangle InjectedClassNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ assert(false && "Don't know how to mangle TemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) {
+ assert(false && "Don't know how to mangle DependentNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const DependentTemplateSpecializationType *T) {
+ assert(false &&
+ "Don't know how to mangle DependentTemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T) {
+ assert(false && "Don't know how to mangle PackExpansionTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) {
+ assert(false && "Don't know how to mangle TypeOfTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) {
+ assert(false && "Don't know how to mangle TypeOfExprTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) {
+ assert(false && "Don't know how to mangle DecltypeTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const AutoType *T) {
+ assert(false && "Don't know how to mangle AutoTypes yet!");
+}
+
+void MicrosoftMangleContext::mangleName(const NamedDecl *D,
+ llvm::raw_ostream &Out) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ return Mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::raw_ostream &) {
+ assert(false && "Can't yet mangle thunks!");
+}
+void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThisAdjustment &,
+ llvm::raw_ostream &) {
+ assert(false && "Can't yet mangle destructor thunks!");
+}
+void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::raw_ostream &) {
+ assert(false && "Can't yet mangle virtual tables!");
+}
+void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::raw_ostream &) {
+ llvm_unreachable("The MS C++ ABI does not have virtual table tables!");
+}
+void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::raw_ostream &) {
+ llvm_unreachable("The MS C++ ABI does not have constructor vtables!");
+}
+void MicrosoftMangleContext::mangleCXXRTTI(QualType T,
+ llvm::raw_ostream &) {
+ assert(false && "Can't yet mangle RTTI!");
+}
+void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
+ llvm::raw_ostream &) {
+ assert(false && "Can't yet mangle RTTI names!");
+}
+void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ llvm::raw_ostream &) {
+ assert(false && "Can't yet mangle constructors!");
+}
+void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ llvm::raw_ostream &) {
+ assert(false && "Can't yet mangle destructors!");
+}
+void MicrosoftMangleContext::mangleReferenceTemporary(const clang::VarDecl *,
+ llvm::raw_ostream &) {
+ assert(false && "Can't yet mangle reference temporaries!");
+}
+
+MangleContext *clang::createMicrosoftMangleContext(ASTContext &Context,
+ Diagnostic &Diags) {
+ return new MicrosoftMangleContext(Context, Diags);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
index 212def8..650321d 100644
--- a/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
@@ -22,7 +22,7 @@
using namespace clang;
NestedNameSpecifier *
-NestedNameSpecifier::FindOrInsert(ASTContext &Context,
+NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
const NestedNameSpecifier &Mockup) {
llvm::FoldingSetNodeID ID;
Mockup.Profile(ID);
@@ -39,8 +39,8 @@ NestedNameSpecifier::FindOrInsert(ASTContext &Context,
}
NestedNameSpecifier *
-NestedNameSpecifier::Create(ASTContext &Context, NestedNameSpecifier *Prefix,
- IdentifierInfo *II) {
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix, IdentifierInfo *II) {
assert(II && "Identifier cannot be NULL");
assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent");
@@ -52,8 +52,8 @@ NestedNameSpecifier::Create(ASTContext &Context, NestedNameSpecifier *Prefix,
}
NestedNameSpecifier *
-NestedNameSpecifier::Create(ASTContext &Context, NestedNameSpecifier *Prefix,
- NamespaceDecl *NS) {
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix, NamespaceDecl *NS) {
assert(NS && "Namespace cannot be NULL");
assert((!Prefix ||
(Prefix->getAsType() == 0 && Prefix->getAsIdentifier() == 0)) &&
@@ -66,18 +66,19 @@ NestedNameSpecifier::Create(ASTContext &Context, NestedNameSpecifier *Prefix,
}
NestedNameSpecifier *
-NestedNameSpecifier::Create(ASTContext &Context, NestedNameSpecifier *Prefix,
- bool Template, Type *T) {
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ bool Template, const Type *T) {
assert(T && "Type cannot be NULL");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(Prefix);
Mockup.Prefix.setInt(Template? TypeSpecWithTemplate : TypeSpec);
- Mockup.Specifier = T;
+ Mockup.Specifier = const_cast<Type*>(T);
return FindOrInsert(Context, Mockup);
}
NestedNameSpecifier *
-NestedNameSpecifier::Create(ASTContext &Context, IdentifierInfo *II) {
+NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) {
assert(II && "Identifier cannot be NULL");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(0);
@@ -86,7 +87,8 @@ NestedNameSpecifier::Create(ASTContext &Context, IdentifierInfo *II) {
return FindOrInsert(Context, Mockup);
}
-NestedNameSpecifier *NestedNameSpecifier::GlobalSpecifier(ASTContext &Context) {
+NestedNameSpecifier *
+NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
if (!Context.GlobalNestedNameSpecifier)
Context.GlobalNestedNameSpecifier = new (Context, 4) NestedNameSpecifier();
return Context.GlobalNestedNameSpecifier;
@@ -113,6 +115,24 @@ bool NestedNameSpecifier::isDependent() const {
return false;
}
+bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
+ switch (getKind()) {
+ case Identifier:
+ return getPrefix() && getPrefix()->containsUnexpandedParameterPack();
+
+ case Namespace:
+ case Global:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->containsUnexpandedParameterPack();
+ }
+
+ // Necessary to suppress a GCC warning.
+ return false;
+}
+
/// \brief Print this nested name specifier to the given output
/// stream.
void
@@ -139,7 +159,7 @@ NestedNameSpecifier::print(llvm::raw_ostream &OS,
case TypeSpec: {
std::string TypeStr;
- Type *T = getAsType();
+ const Type *T = getAsType();
PrintingPolicy InnerPolicy(Policy);
InnerPolicy.SuppressScope = true;
diff --git a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
index 5fe873a..eca351a 100644
--- a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
@@ -21,7 +21,7 @@ using namespace clang;
typedef llvm::DenseMap<Stmt*, Stmt*> MapTy;
static void BuildParentMap(MapTy& M, Stmt* S) {
- for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I!=E; ++I)
+ for (Stmt::child_range I = S->children(); I; ++I)
if (*I) {
M[*I] = S;
BuildParentMap(M, *I);
@@ -40,6 +40,12 @@ ParentMap::~ParentMap() {
delete (MapTy*) Impl;
}
+void ParentMap::addStmt(Stmt* S) {
+ if (S) {
+ BuildParentMap(*(MapTy*) Impl, S);
+ }
+}
+
Stmt* ParentMap::getParent(Stmt* S) const {
MapTy* M = (MapTy*) Impl;
MapTy::iterator I = M->find(S);
@@ -51,6 +57,15 @@ Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const {
return S;
}
+Stmt *ParentMap::getParentIgnoreParenCasts(Stmt *S) const {
+ do {
+ S = getParent(S);
+ }
+ while (S && (isa<ParenExpr>(S) || isa<CastExpr>(S)));
+
+ return S;
+}
+
bool ParentMap::isConsumedExpr(Expr* E) const {
Stmt *P = getParent(E);
Stmt *DirectChild = E;
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
index 4d9c516..035c48f 100644
--- a/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
@@ -27,9 +27,10 @@ void ASTRecordLayout::Destroy(ASTContext &Ctx) {
Ctx.Deallocate(this);
}
-ASTRecordLayout::ASTRecordLayout(ASTContext &Ctx, uint64_t size, unsigned alignment,
- unsigned datasize, const uint64_t *fieldoffsets,
- unsigned fieldcount)
+ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
+ CharUnits alignment, CharUnits datasize,
+ const uint64_t *fieldoffsets,
+ unsigned fieldcount)
: Size(size), DataSize(datasize), FieldOffsets(0), Alignment(alignment),
FieldCount(fieldcount), CXXInfo(0) {
if (FieldCount > 0) {
@@ -39,16 +40,16 @@ ASTRecordLayout::ASTRecordLayout(ASTContext &Ctx, uint64_t size, unsigned alignm
}
// Constructor for C++ records.
-ASTRecordLayout::ASTRecordLayout(ASTContext &Ctx,
- uint64_t size, unsigned alignment,
- uint64_t datasize,
+ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
+ CharUnits size, CharUnits alignment,
+ CharUnits datasize,
const uint64_t *fieldoffsets,
unsigned fieldcount,
- uint64_t nonvirtualsize,
- unsigned nonvirtualalign,
- uint64_t SizeOfLargestEmptySubobject,
+ CharUnits nonvirtualsize,
+ CharUnits nonvirtualalign,
+ CharUnits SizeOfLargestEmptySubobject,
const CXXRecordDecl *PrimaryBase,
- bool PrimaryBaseIsVirtual,
+ bool IsPrimaryBaseVirtual,
const BaseOffsetsMapTy& BaseOffsets,
const BaseOffsetsMapTy& VBaseOffsets)
: Size(size), DataSize(datasize), FieldOffsets(0), Alignment(alignment),
@@ -59,7 +60,8 @@ ASTRecordLayout::ASTRecordLayout(ASTContext &Ctx,
memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
}
- CXXInfo->PrimaryBase = PrimaryBaseInfo(PrimaryBase, PrimaryBaseIsVirtual);
+ CXXInfo->PrimaryBase.setPointer(PrimaryBase);
+ CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual);
CXXInfo->NonVirtualSize = nonvirtualsize;
CXXInfo->NonVirtualAlign = nonvirtualalign;
CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
@@ -68,11 +70,11 @@ ASTRecordLayout::ASTRecordLayout(ASTContext &Ctx,
#ifndef NDEBUG
if (const CXXRecordDecl *PrimaryBase = getPrimaryBase()) {
- if (getPrimaryBaseWasVirtual())
- assert(getVBaseClassOffset(PrimaryBase) == 0 &&
+ if (isPrimaryBaseVirtual())
+ assert(getVBaseClassOffset(PrimaryBase).isZero() &&
"Primary virtual base must be at offset 0!");
else
- assert(getBaseClassOffset(PrimaryBase) == 0 &&
+ assert(getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base must be at offset 0!");
}
#endif
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
index 13fae29..cf14eba 100644
--- a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -8,12 +8,14 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/Support/Format.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/MathExtras.h"
@@ -55,62 +57,71 @@ struct BaseSubobjectInfo {
/// EmptySubobjectMap - Keeps track of which empty subobjects exist at different
/// offsets while laying out a C++ class.
class EmptySubobjectMap {
- ASTContext &Context;
-
+ const ASTContext &Context;
+ uint64_t CharWidth;
+
/// Class - The class whose empty entries we're keeping track of.
const CXXRecordDecl *Class;
/// EmptyClassOffsets - A map from offsets to empty record decls.
typedef llvm::SmallVector<const CXXRecordDecl *, 1> ClassVectorTy;
- typedef llvm::DenseMap<uint64_t, ClassVectorTy> EmptyClassOffsetsMapTy;
+ typedef llvm::DenseMap<CharUnits, ClassVectorTy> EmptyClassOffsetsMapTy;
EmptyClassOffsetsMapTy EmptyClassOffsets;
/// MaxEmptyClassOffset - The highest offset known to contain an empty
/// base subobject.
- uint64_t MaxEmptyClassOffset;
+ CharUnits MaxEmptyClassOffset;
/// ComputeEmptySubobjectSizes - Compute the size of the largest base or
/// member subobject that is empty.
void ComputeEmptySubobjectSizes();
- void AddSubobjectAtOffset(const CXXRecordDecl *RD, uint64_t Offset);
+ void AddSubobjectAtOffset(const CXXRecordDecl *RD, CharUnits Offset);
void UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
- uint64_t Offset, bool PlacingEmptyBase);
+ CharUnits Offset, bool PlacingEmptyBase);
void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
const CXXRecordDecl *Class,
- uint64_t Offset);
- void UpdateEmptyFieldSubobjects(const FieldDecl *FD, uint64_t Offset);
+ CharUnits Offset);
+ void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset);
/// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty
/// subobjects beyond the given offset.
- bool AnyEmptySubobjectsBeyondOffset(uint64_t Offset) const {
+ bool AnyEmptySubobjectsBeyondOffset(CharUnits Offset) const {
return Offset <= MaxEmptyClassOffset;
}
+ CharUnits
+ getFieldOffset(const ASTRecordLayout &Layout, unsigned FieldNo) const {
+ uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
+ assert(FieldOffset % CharWidth == 0 &&
+ "Field offset not at char boundary!");
+
+ return Context.toCharUnitsFromBits(FieldOffset);
+ }
+
protected:
- bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
- uint64_t Offset) const;
+ bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) const;
bool CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
- uint64_t Offset);
+ CharUnits Offset);
bool CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
const CXXRecordDecl *Class,
- uint64_t Offset) const;
+ CharUnits Offset) const;
bool CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
- uint64_t Offset) const;
+ CharUnits Offset) const;
public:
/// This holds the size of the largest empty subobject (either a base
/// or a member). Will be zero if the record being built doesn't contain
/// any empty classes.
- uint64_t SizeOfLargestEmptySubobject;
+ CharUnits SizeOfLargestEmptySubobject;
- EmptySubobjectMap(ASTContext &Context, const CXXRecordDecl *Class)
- : Context(Context), Class(Class), MaxEmptyClassOffset(0),
- SizeOfLargestEmptySubobject(0) {
+ EmptySubobjectMap(const ASTContext &Context, const CXXRecordDecl *Class)
+ : Context(Context), CharWidth(Context.getCharWidth()), Class(Class) {
ComputeEmptySubobjectSizes();
}
@@ -119,11 +130,11 @@ public:
/// Returns false if placing the record will result in two components
/// (direct or indirect) of the same type having the same offset.
bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
- uint64_t Offset);
+ CharUnits Offset);
/// CanPlaceFieldAtOffset - Return whether a field can be placed at the given
/// offset.
- bool CanPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset);
+ bool CanPlaceFieldAtOffset(const FieldDecl *FD, CharUnits Offset);
};
void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
@@ -133,7 +144,7 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t EmptySize = 0;
+ CharUnits EmptySize;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
if (BaseDecl->isEmpty()) {
// If the class decl is empty, get its size.
@@ -143,8 +154,8 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
EmptySize = Layout.getSizeOfLargestEmptySubobject();
}
- SizeOfLargestEmptySubobject = std::max(SizeOfLargestEmptySubobject,
- EmptySize);
+ if (EmptySize > SizeOfLargestEmptySubobject)
+ SizeOfLargestEmptySubobject = EmptySize;
}
// Check the fields.
@@ -159,7 +170,7 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
if (!RT)
continue;
- uint64_t EmptySize = 0;
+ CharUnits EmptySize;
const CXXRecordDecl *MemberDecl = cast<CXXRecordDecl>(RT->getDecl());
const ASTRecordLayout &Layout = Context.getASTRecordLayout(MemberDecl);
if (MemberDecl->isEmpty()) {
@@ -170,14 +181,14 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
EmptySize = Layout.getSizeOfLargestEmptySubobject();
}
- SizeOfLargestEmptySubobject = std::max(SizeOfLargestEmptySubobject,
- EmptySize);
+ if (EmptySize > SizeOfLargestEmptySubobject)
+ SizeOfLargestEmptySubobject = EmptySize;
}
}
bool
EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
- uint64_t Offset) const {
+ CharUnits Offset) const {
// We only need to check empty bases.
if (!RD->isEmpty())
return true;
@@ -195,24 +206,27 @@ EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
}
void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD,
- uint64_t Offset) {
+ CharUnits Offset) {
// We only care about empty bases.
if (!RD->isEmpty())
return;
+ // If we have empty structures inside an union, we can assign both
+ // the same offset. Just avoid pushing them twice in the list.
ClassVectorTy& Classes = EmptyClassOffsets[Offset];
- assert(std::find(Classes.begin(), Classes.end(), RD) == Classes.end() &&
- "Duplicate empty class detected!");
-
+ if (std::find(Classes.begin(), Classes.end(), RD) != Classes.end())
+ return;
+
Classes.push_back(RD);
// Update the empty class offset.
- MaxEmptyClassOffset = std::max(MaxEmptyClassOffset, Offset);
+ if (Offset > MaxEmptyClassOffset)
+ MaxEmptyClassOffset = Offset;
}
bool
-EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
- uint64_t Offset) {
+EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset) {
// We don't have to keep looking past the maximum offset that's known to
// contain an empty class.
if (!AnyEmptySubobjectsBeyondOffset(Offset))
@@ -228,7 +242,7 @@ EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
if (Base->IsVirtual)
continue;
- uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset))
return false;
@@ -248,8 +262,10 @@ EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
const FieldDecl *FD = *I;
-
- uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
return false;
}
@@ -258,7 +274,7 @@ EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
}
void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
- uint64_t Offset,
+ CharUnits Offset,
bool PlacingEmptyBase) {
if (!PlacingEmptyBase && Offset >= SizeOfLargestEmptySubobject) {
// We know that the only empty subobjects that can conflict with empty
@@ -278,7 +294,7 @@ void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
if (Base->IsVirtual)
continue;
- uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
UpdateEmptyBaseSubobjects(Base, BaseOffset, PlacingEmptyBase);
}
@@ -295,17 +311,19 @@ void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
const FieldDecl *FD = *I;
+ if (FD->isBitField())
+ continue;
- uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
UpdateEmptyFieldSubobjects(FD, FieldOffset);
}
}
bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
- uint64_t Offset) {
+ CharUnits Offset) {
// If we know this class doesn't have any empty subobjects we don't need to
// bother checking.
- if (!SizeOfLargestEmptySubobject)
+ if (SizeOfLargestEmptySubobject.isZero())
return true;
if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
@@ -320,7 +338,7 @@ bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
bool
EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
const CXXRecordDecl *Class,
- uint64_t Offset) const {
+ CharUnits Offset) const {
// We don't have to keep looking past the maximum offset that's known to
// contain an empty class.
if (!AnyEmptySubobjectsBeyondOffset(Offset))
@@ -340,7 +358,7 @@ EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset))
return false;
}
@@ -352,7 +370,7 @@ EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
const CXXRecordDecl *VBaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset))
return false;
}
@@ -363,8 +381,10 @@ EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I, ++FieldNo) {
const FieldDecl *FD = *I;
-
- uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
return false;
@@ -373,8 +393,9 @@ EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
return true;
}
-bool EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
- uint64_t Offset) const {
+bool
+EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ CharUnits Offset) const {
// We don't have to keep looking past the maximum offset that's known to
// contain an empty class.
if (!AnyEmptySubobjectsBeyondOffset(Offset))
@@ -397,7 +418,7 @@ bool EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
uint64_t NumElements = Context.getConstantArrayElementCount(AT);
- uint64_t ElementOffset = Offset;
+ CharUnits ElementOffset = Offset;
for (uint64_t I = 0; I != NumElements; ++I) {
// We don't have to keep looking past the maximum offset that's known to
// contain an empty class.
@@ -415,7 +436,8 @@ bool EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
}
bool
-EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) {
+EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD,
+ CharUnits Offset) {
if (!CanPlaceFieldSubobjectAtOffset(FD, Offset))
return false;
@@ -427,7 +449,7 @@ EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) {
void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
const CXXRecordDecl *Class,
- uint64_t Offset) {
+ CharUnits Offset) {
// We know that the only empty subobjects that can conflict with empty
// field subobjects are subobjects of empty bases that can be placed at offset
// zero. Because of this, we only need to keep track of empty field
@@ -449,7 +471,7 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
}
@@ -460,7 +482,7 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
const CXXRecordDecl *VBaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
}
}
@@ -470,15 +492,17 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I, ++FieldNo) {
const FieldDecl *FD = *I;
-
- uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
UpdateEmptyFieldSubobjects(FD, FieldOffset);
}
}
void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
- uint64_t Offset) {
+ CharUnits Offset) {
QualType T = FD->getType();
if (const RecordType *RT = T->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
@@ -497,7 +521,7 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
uint64_t NumElements = Context.getConstantArrayElementCount(AT);
- uint64_t ElementOffset = Offset;
+ CharUnits ElementOffset = Offset;
for (uint64_t I = 0; I != NumElements; ++I) {
// We know that the only empty subobjects that can conflict with empty
@@ -519,7 +543,7 @@ protected:
// FIXME: Remove this and make the appropriate fields public.
friend class clang::ASTContext;
- ASTContext &Context;
+ const ASTContext &Context;
EmptySubobjectMap *EmptySubobjects;
@@ -527,7 +551,10 @@ protected:
uint64_t Size;
/// Alignment - The current alignment of the record layout.
- unsigned Alignment;
+ CharUnits Alignment;
+
+ /// \brief The alignment if attribute packed is not used.
+ CharUnits UnpackedAlignment;
llvm::SmallVector<uint64_t, 16> FieldOffsets;
@@ -545,13 +572,13 @@ protected:
/// MaxFieldAlignment - The maximum allowed field alignment. This is set by
/// #pragma pack.
- unsigned MaxFieldAlignment;
+ CharUnits MaxFieldAlignment;
/// DataSize - The data size of the record being laid out.
uint64_t DataSize;
- uint64_t NonVirtualSize;
- unsigned NonVirtualAlignment;
+ CharUnits NonVirtualSize;
+ CharUnits NonVirtualAlignment;
/// PrimaryBase - the primary base class (if one exists) of the class
/// we're laying out.
@@ -561,7 +588,7 @@ protected:
/// out is virtual.
bool PrimaryBaseIsVirtual;
- typedef llvm::DenseMap<const CXXRecordDecl *, uint64_t> BaseOffsetsMapTy;
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
/// Bases - base classes and their offsets in the record.
BaseOffsetsMapTy Bases;
@@ -571,7 +598,7 @@ protected:
/// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
/// primary base classes for some other direct or indirect base class.
- llvm::SmallSet<const CXXRecordDecl*, 32> IndirectPrimaryBases;
+ CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
/// FirstNearlyEmptyVBase - The first nearly empty virtual base class in
/// inheritance graph order. Used for determining the primary base class.
@@ -581,11 +608,14 @@ protected:
/// avoid visiting virtual bases more than once.
llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
- RecordLayoutBuilder(ASTContext &Context, EmptySubobjectMap *EmptySubobjects)
- : Context(Context), EmptySubobjects(EmptySubobjects), Size(0), Alignment(8),
- Packed(false), IsUnion(false), IsMac68kAlign(false),
- UnfilledBitsInLastByte(0), MaxFieldAlignment(0), DataSize(0),
- NonVirtualSize(0), NonVirtualAlignment(8), PrimaryBase(0),
+ RecordLayoutBuilder(const ASTContext &Context, EmptySubobjectMap
+ *EmptySubobjects)
+ : Context(Context), EmptySubobjects(EmptySubobjects), Size(0),
+ Alignment(CharUnits::One()), UnpackedAlignment(Alignment),
+ Packed(false), IsUnion(false), IsMac68kAlign(false),
+ UnfilledBitsInLastByte(0), MaxFieldAlignment(CharUnits::Zero()),
+ DataSize(0), NonVirtualSize(CharUnits::Zero()),
+ NonVirtualAlignment(CharUnits::One()), PrimaryBase(0),
PrimaryBaseIsVirtual(false), FirstNearlyEmptyVBase(0) { }
void Layout(const RecordDecl *D);
@@ -594,7 +624,8 @@ protected:
void LayoutFields(const RecordDecl *D);
void LayoutField(const FieldDecl *D);
- void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize);
+ void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize,
+ bool FieldPacked, const FieldDecl *D);
void LayoutBitField(const FieldDecl *D);
/// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects.
@@ -628,13 +659,6 @@ protected:
virtual uint64_t GetVirtualPointersSize(const CXXRecordDecl *RD) const;
- /// IdentifyPrimaryBases - Identify all virtual base classes, direct or
- /// indirect, that are primary base classes for some other direct or indirect
- /// base class.
- void IdentifyPrimaryBases(const CXXRecordDecl *RD);
-
- virtual bool IsNearlyEmpty(const CXXRecordDecl *RD) const;
-
/// LayoutNonVirtualBases - Determines the primary base class (if any) and
/// lays it out. Will then proceed to lay out all non-virtual base clasess.
void LayoutNonVirtualBases(const CXXRecordDecl *RD);
@@ -642,8 +666,8 @@ protected:
/// LayoutNonVirtualBase - Lays out a single non-virtual base.
void LayoutNonVirtualBase(const BaseSubobjectInfo *Base);
- void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
- uint64_t Offset);
+ void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
/// LayoutVirtualBases - Lays out all the virtual bases.
void LayoutVirtualBases(const CXXRecordDecl *RD,
@@ -653,17 +677,26 @@ protected:
void LayoutVirtualBase(const BaseSubobjectInfo *Base);
/// LayoutBase - Will lay out a base and return the offset where it was
- /// placed, in bits.
- uint64_t LayoutBase(const BaseSubobjectInfo *Base);
+ /// placed, in chars.
+ CharUnits LayoutBase(const BaseSubobjectInfo *Base);
/// InitializeLayout - Initialize record layout for the given record decl.
void InitializeLayout(const Decl *D);
/// FinishLayout - Finalize record layout. Adjust record size based on the
/// alignment.
- void FinishLayout();
+ void FinishLayout(const NamedDecl *D);
+
+ void UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment);
+ void UpdateAlignment(CharUnits NewAlignment) {
+ UpdateAlignment(NewAlignment, NewAlignment);
+ }
+
+ void CheckFieldPadding(uint64_t Offset, uint64_t UnpaddedOffset,
+ uint64_t UnpackedOffset, unsigned UnpackedAlign,
+ bool isPacked, const FieldDecl *D);
- void UpdateAlignment(unsigned NewAlignment);
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
RecordLayoutBuilder(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
void operator=(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
@@ -674,42 +707,6 @@ public:
};
} // end anonymous namespace
-/// IsNearlyEmpty - Indicates when a class has a vtable pointer, but
-/// no other data.
-bool RecordLayoutBuilder::IsNearlyEmpty(const CXXRecordDecl *RD) const {
- // FIXME: Audit the corners
- if (!RD->isDynamicClass())
- return false;
- const ASTRecordLayout &BaseInfo = Context.getASTRecordLayout(RD);
- if (BaseInfo.getNonVirtualSize() == Context.Target.getPointerWidth(0))
- return true;
- return false;
-}
-
-void RecordLayoutBuilder::IdentifyPrimaryBases(const CXXRecordDecl *RD) {
- const ASTRecordLayout::PrimaryBaseInfo &BaseInfo =
- Context.getASTRecordLayout(RD).getPrimaryBaseInfo();
-
- // If the record has a primary base class that is virtual, add it to the set
- // of primary bases.
- if (BaseInfo.isVirtual())
- IndirectPrimaryBases.insert(BaseInfo.getBase());
-
- // Now traverse all bases and find primary bases for them.
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- assert(!i->getType()->isDependentType() &&
- "Cannot layout class with dependent bases.");
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
-
- // Only bases with virtual bases participate in computing the
- // indirect primary virtual base classes.
- if (Base->getNumVBases())
- IdentifyPrimaryBases(Base);
- }
-}
-
void
RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
@@ -721,7 +718,7 @@ RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
// Check if this is a nearly empty virtual base.
- if (I->isVirtual() && IsNearlyEmpty(Base)) {
+ if (I->isVirtual() && Context.isNearlyEmpty(Base)) {
// If it's not an indirect primary base, then we've found our primary
// base.
if (!IndirectPrimaryBases.count(Base)) {
@@ -754,14 +751,7 @@ void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
// Compute all the primary virtual bases for all of our direct and
// indirect bases, and record all their primary virtual base classes.
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- assert(!i->getType()->isDependentType() &&
- "Cannot lay out class with dependent bases.");
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- IdentifyPrimaryBases(Base);
- }
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
// If the record has a dynamic base class, attempt to choose a primary base
// class. It is the first (in direct base class order) non-virtual dynamic
@@ -809,8 +799,18 @@ void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
Size += GetVirtualPointersSize(RD);
DataSize = Size;
+ CharUnits UnpackedBaseAlign =
+ Context.toCharUnitsFromBits(Context.Target.getPointerAlign(0));
+ CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+
+ // The maximum field alignment overrides base align.
+ if (!MaxFieldAlignment.isZero()) {
+ BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
+ UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ }
+
// Update the alignment.
- UpdateAlignment(Context.Target.getPointerAlign(0));
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
}
BaseSubobjectInfo *
@@ -845,7 +845,7 @@ RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
// Check if this base has a primary virtual base.
if (RD->getNumVBases()) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- if (Layout.getPrimaryBaseWasVirtual()) {
+ if (Layout.isPrimaryBaseVirtual()) {
// This base does have a primary virtual base.
PrimaryVirtualBase = Layout.getPrimaryBase();
assert(PrimaryVirtualBase && "Didn't have a primary virtual base!");
@@ -977,7 +977,7 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
// Layout the base.
- uint64_t Offset = LayoutBase(Base);
+ CharUnits Offset = LayoutBase(Base);
// Add its base class offset.
assert(!Bases.count(Base->Class) && "base offset already exists!");
@@ -988,7 +988,7 @@ void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
void
RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
- uint64_t Offset) {
+ CharUnits Offset) {
// This base isn't interesting, it has no virtual bases.
if (!Info->Class->getNumVBases())
return;
@@ -1016,7 +1016,7 @@ RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
if (Base->IsVirtual)
continue;
- uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
AddPrimaryVirtualBaseOffsets(Base, BaseOffset);
}
}
@@ -1033,7 +1033,7 @@ RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
} else {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
PrimaryBase = Layout.getPrimaryBase();
- PrimaryBaseIsVirtual = Layout.getPrimaryBaseWasVirtual();
+ PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
}
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
@@ -1074,7 +1074,7 @@ void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
assert(!Base->Derived && "Trying to lay out a primary virtual base!");
// Layout the base.
- uint64_t Offset = LayoutBase(Base);
+ CharUnits Offset = LayoutBase(Base);
// Add its base class offset.
assert(!VBases.count(Base->Class) && "vbase offset already exists!");
@@ -1083,38 +1083,48 @@ void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
AddPrimaryVirtualBaseOffsets(Base, Offset);
}
-uint64_t RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
+CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
// If we have an empty base class, try to place it at offset 0.
if (Base->Class->isEmpty() &&
- EmptySubobjects->CanPlaceBaseAtOffset(Base, 0)) {
- Size = std::max(Size, Layout.getSize());
+ EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) {
+ uint64_t RecordSizeInBits = Context.toBits(Layout.getSize());
+ Size = std::max(Size, RecordSizeInBits);
- return 0;
+ return CharUnits::Zero();
}
- unsigned BaseAlign = Layout.getNonVirtualAlign();
+ CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlign();
+ CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+
+ // The maximum field alignment overrides base align.
+ if (!MaxFieldAlignment.isZero()) {
+ BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
+ UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ }
// Round up the current record size to the base's alignment boundary.
- uint64_t Offset = llvm::RoundUpToAlignment(DataSize, BaseAlign);
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(DataSize, Context.toBits(BaseAlign));
// Try to place the base.
- while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
- Offset += BaseAlign;
+ while (!EmptySubobjects->CanPlaceBaseAtOffset(Base,
+ Context.toCharUnitsFromBits(Offset)))
+ Offset += Context.toBits(BaseAlign);
if (!Base->Class->isEmpty()) {
// Update the data size.
- DataSize = Offset + Layout.getNonVirtualSize();
+ DataSize = Offset + Context.toBits(Layout.getNonVirtualSize());
Size = std::max(Size, DataSize);
} else
- Size = std::max(Size, Offset + Layout.getSize());
+ Size = std::max(Size, Offset + Context.toBits(Layout.getSize()));
// Remember max struct/class alignment.
- UpdateAlignment(BaseAlign);
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
- return Offset;
+ return Context.toCharUnitsFromBits(Offset);
}
void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
@@ -1129,14 +1139,14 @@ void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
// to bit-fields, but gcc appears not to follow that.
if (D->hasAttr<AlignMac68kAttr>()) {
IsMac68kAlign = true;
- MaxFieldAlignment = 2 * 8;
- Alignment = 2 * 8;
+ MaxFieldAlignment = CharUnits::fromQuantity(2);
+ Alignment = CharUnits::fromQuantity(2);
} else {
if (const MaxFieldAlignmentAttr *MFAA = D->getAttr<MaxFieldAlignmentAttr>())
- MaxFieldAlignment = MFAA->getAlignment();
+ MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment());
if (unsigned MaxAlign = D->getMaxAlignment())
- UpdateAlignment(MaxAlign);
+ UpdateAlignment(Context.toCharUnitsFromBits(MaxAlign));
}
}
@@ -1146,7 +1156,7 @@ void RecordLayoutBuilder::Layout(const RecordDecl *D) {
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
- FinishLayout();
+ FinishLayout(D);
}
void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
@@ -1157,7 +1167,7 @@ void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
LayoutFields(RD);
- NonVirtualSize = Size;
+ NonVirtualSize = Context.toCharUnitsFromBits(Size);
NonVirtualAlignment = Alignment;
// Lay out the virtual bases and add the primary virtual base offsets.
@@ -1167,7 +1177,7 @@ void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
- FinishLayout();
+ FinishLayout(RD);
#ifndef NDEBUG
// Check that we have base offsets for all bases.
@@ -1201,7 +1211,7 @@ void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
// We start laying out ivars not at the end of the superclass
// structure, but at the next byte following the last field.
- Size = llvm::RoundUpToAlignment(SL.getDataSize(), 8);
+ Size = Context.toBits(SL.getDataSize());
DataSize = Size;
}
@@ -1215,7 +1225,7 @@ void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
- FinishLayout();
+ FinishLayout(D);
}
void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
@@ -1227,7 +1237,9 @@ void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
}
void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
- uint64_t TypeSize) {
+ uint64_t TypeSize,
+ bool FieldPacked,
+ const FieldDecl *D) {
assert(Context.getLangOptions().CPlusPlus &&
"Can only have wide bit-fields in C++!");
@@ -1258,6 +1270,7 @@ void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
UnfilledBitsInLastByte = 0;
uint64_t FieldOffset;
+ uint64_t UnpaddedFieldOffset = DataSize - UnfilledBitsInLastByte;
if (IsUnion) {
DataSize = std::max(DataSize, FieldSize);
@@ -1276,16 +1289,20 @@ void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
// Place this field at the current location.
FieldOffsets.push_back(FieldOffset);
+ CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, FieldOffset,
+ TypeAlign, FieldPacked, D);
+
// Update the size.
Size = std::max(Size, DataSize);
// Remember max struct/class alignment.
- UpdateAlignment(TypeAlign);
+ UpdateAlignment(Context.toCharUnitsFromBits(TypeAlign));
}
void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
- uint64_t FieldOffset = IsUnion ? 0 : (DataSize - UnfilledBitsInLastByte);
+ uint64_t UnpaddedFieldOffset = DataSize - UnfilledBitsInLastByte;
+ uint64_t FieldOffset = IsUnion ? 0 : UnpaddedFieldOffset;
uint64_t FieldSize = D->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
std::pair<uint64_t, unsigned> FieldInfo = Context.getTypeInfo(D->getType());
@@ -1293,29 +1310,48 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
unsigned FieldAlign = FieldInfo.second;
if (FieldSize > TypeSize) {
- LayoutWideBitField(FieldSize, TypeSize);
+ LayoutWideBitField(FieldSize, TypeSize, FieldPacked, D);
return;
}
+ // The align if the field is not packed. This is to check if the attribute
+ // was unnecessary (-Wpacked).
+ unsigned UnpackedFieldAlign = FieldAlign;
+ uint64_t UnpackedFieldOffset = FieldOffset;
+ if (!Context.Target.useBitFieldTypeAlignment())
+ UnpackedFieldAlign = 1;
+
if (FieldPacked || !Context.Target.useBitFieldTypeAlignment())
FieldAlign = 1;
FieldAlign = std::max(FieldAlign, D->getMaxAlignment());
+ UnpackedFieldAlign = std::max(UnpackedFieldAlign, D->getMaxAlignment());
// The maximum field alignment overrides the aligned attribute.
- if (MaxFieldAlignment)
- FieldAlign = std::min(FieldAlign, MaxFieldAlignment);
+ if (!MaxFieldAlignment.isZero()) {
+ unsigned MaxFieldAlignmentInBits = Context.toBits(MaxFieldAlignment);
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
+ UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
+ }
// Check if we need to add padding to give the field the correct alignment.
if (FieldSize == 0 || (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)
FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+ if (FieldSize == 0 ||
+ (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize)
+ UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
+ UnpackedFieldAlign);
+
// Padding members don't affect overall alignment.
if (!D->getIdentifier())
- FieldAlign = 1;
+ FieldAlign = UnpackedFieldAlign = 1;
// Place this field at the current location.
FieldOffsets.push_back(FieldOffset);
+ CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
+ UnpackedFieldAlign, FieldPacked, D);
+
// Update DataSize to include the last byte containing (part of) the bitfield.
if (IsUnion) {
// FIXME: I think FieldSize should be TypeSize here.
@@ -1331,7 +1367,8 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
Size = std::max(Size, DataSize);
// Remember max struct/class alignment.
- UpdateAlignment(FieldAlign);
+ UpdateAlignment(Context.toCharUnitsFromBits(FieldAlign),
+ Context.toCharUnitsFromBits(UnpackedFieldAlign));
}
void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
@@ -1340,42 +1377,74 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
return;
}
+ uint64_t UnpaddedFieldOffset = DataSize - UnfilledBitsInLastByte;
+
// Reset the unfilled bits.
UnfilledBitsInLastByte = 0;
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
- uint64_t FieldOffset = IsUnion ? 0 : DataSize;
- uint64_t FieldSize;
- unsigned FieldAlign;
+ CharUnits FieldOffset =
+ IsUnion ? CharUnits::Zero() : Context.toCharUnitsFromBits(DataSize);
+ CharUnits FieldSize;
+ CharUnits FieldAlign;
if (D->getType()->isIncompleteArrayType()) {
// This is a flexible array member; we can't directly
// query getTypeInfo about these, so we figure it out here.
// Flexible array members don't have any size, but they
// have to be aligned appropriately for their element type.
- FieldSize = 0;
+ FieldSize = CharUnits::Zero();
const ArrayType* ATy = Context.getAsArrayType(D->getType());
- FieldAlign = Context.getTypeAlign(ATy->getElementType());
+ FieldAlign = Context.getTypeAlignInChars(ATy->getElementType());
} else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) {
unsigned AS = RT->getPointeeType().getAddressSpace();
- FieldSize = Context.Target.getPointerWidth(AS);
- FieldAlign = Context.Target.getPointerAlign(AS);
+ FieldSize =
+ Context.toCharUnitsFromBits(Context.Target.getPointerWidth(AS));
+ FieldAlign =
+ Context.toCharUnitsFromBits(Context.Target.getPointerAlign(AS));
} else {
- std::pair<uint64_t, unsigned> FieldInfo = Context.getTypeInfo(D->getType());
+ std::pair<CharUnits, CharUnits> FieldInfo =
+ Context.getTypeInfoInChars(D->getType());
FieldSize = FieldInfo.first;
FieldAlign = FieldInfo.second;
+
+ if (Context.getLangOptions().MSBitfields) {
+ // If MS bitfield layout is required, figure out what type is being
+ // laid out and align the field to the width of that type.
+
+ // Resolve all typedefs down to their base type and round up the field
+ // alignment if necessary.
+ QualType T = Context.getBaseElementType(D->getType());
+ if (const BuiltinType *BTy = T->getAs<BuiltinType>()) {
+ CharUnits TypeSize = Context.getTypeSizeInChars(BTy);
+ if (TypeSize > FieldAlign)
+ FieldAlign = TypeSize;
+ }
+ }
}
+ // The align if the field is not packed. This is to check if the attribute
+ // was unnecessary (-Wpacked).
+ CharUnits UnpackedFieldAlign = FieldAlign;
+ CharUnits UnpackedFieldOffset = FieldOffset;
+
if (FieldPacked)
- FieldAlign = 8;
- FieldAlign = std::max(FieldAlign, D->getMaxAlignment());
+ FieldAlign = CharUnits::One();
+ CharUnits MaxAlignmentInChars =
+ Context.toCharUnitsFromBits(D->getMaxAlignment());
+ FieldAlign = std::max(FieldAlign, MaxAlignmentInChars);
+ UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars);
// The maximum field alignment overrides the aligned attribute.
- if (MaxFieldAlignment)
+ if (!MaxFieldAlignment.isZero()) {
FieldAlign = std::min(FieldAlign, MaxFieldAlignment);
+ UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignment);
+ }
// Round up the current record size to the field's alignment boundary.
- FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+ FieldOffset = FieldOffset.RoundUpToAlignment(FieldAlign);
+ UnpackedFieldOffset =
+ UnpackedFieldOffset.RoundUpToAlignment(UnpackedFieldAlign);
if (!IsUnion && EmptySubobjects) {
// Check if we can place the field at this offset.
@@ -1386,41 +1455,130 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
}
// Place this field at the current location.
- FieldOffsets.push_back(FieldOffset);
+ FieldOffsets.push_back(Context.toBits(FieldOffset));
+
+ CheckFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset,
+ Context.toBits(UnpackedFieldOffset),
+ Context.toBits(UnpackedFieldAlign), FieldPacked, D);
// Reserve space for this field.
+ uint64_t FieldSizeInBits = Context.toBits(FieldSize);
if (IsUnion)
- Size = std::max(Size, FieldSize);
+ Size = std::max(Size, FieldSizeInBits);
else
- Size = FieldOffset + FieldSize;
+ Size = Context.toBits(FieldOffset) + FieldSizeInBits;
// Update the data size.
DataSize = Size;
// Remember max struct/class alignment.
- UpdateAlignment(FieldAlign);
+ UpdateAlignment(FieldAlign, UnpackedFieldAlign);
}
-void RecordLayoutBuilder::FinishLayout() {
+void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
// In C++, records cannot be of size 0.
- if (Context.getLangOptions().CPlusPlus && Size == 0)
- Size = 8;
+ if (Context.getLangOptions().CPlusPlus && Size == 0) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ // Compatibility with gcc requires a class (pod or non-pod)
+ // which is not empty but of size 0; such as having fields of
+ // array of zero-length, remains of Size 0
+ if (RD->isEmpty())
+ Size = 8;
+ }
+ else
+ Size = 8;
+ }
// Finally, round the size of the record up to the alignment of the
// record itself.
- Size = llvm::RoundUpToAlignment(Size, Alignment);
+ uint64_t UnpaddedSize = Size - UnfilledBitsInLastByte;
+ uint64_t UnpackedSize =
+ llvm::RoundUpToAlignment(Size, Context.toBits(UnpackedAlignment));
+ Size = llvm::RoundUpToAlignment(Size, Context.toBits(Alignment));
+
+ unsigned CharBitNum = Context.Target.getCharWidth();
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
+ // Warn if padding was introduced to the struct/class/union.
+ if (Size > UnpaddedSize) {
+ unsigned PadSize = Size - UnpaddedSize;
+ bool InBits = true;
+ if (PadSize % CharBitNum == 0) {
+ PadSize = PadSize / CharBitNum;
+ InBits = false;
+ }
+ Diag(RD->getLocation(), diag::warn_padded_struct_size)
+ << Context.getTypeDeclType(RD)
+ << PadSize
+ << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
+ }
+
+ // Warn if we packed it unnecessarily. If the alignment is 1 byte don't
+ // bother since there won't be alignment issues.
+ if (Packed && UnpackedAlignment > CharUnits::One() && Size == UnpackedSize)
+ Diag(D->getLocation(), diag::warn_unnecessary_packed)
+ << Context.getTypeDeclType(RD);
+ }
}
-void RecordLayoutBuilder::UpdateAlignment(unsigned NewAlignment) {
+void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment,
+ CharUnits UnpackedNewAlignment) {
// The alignment is not modified when using 'mac68k' alignment.
if (IsMac68kAlign)
return;
- if (NewAlignment <= Alignment)
+ if (NewAlignment > Alignment) {
+ assert(llvm::isPowerOf2_32(NewAlignment.getQuantity() &&
+ "Alignment not a power of 2"));
+ Alignment = NewAlignment;
+ }
+
+ if (UnpackedNewAlignment > UnpackedAlignment) {
+ assert(llvm::isPowerOf2_32(UnpackedNewAlignment.getQuantity() &&
+ "Alignment not a power of 2"));
+ UnpackedAlignment = UnpackedNewAlignment;
+ }
+}
+
+void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset,
+ uint64_t UnpaddedOffset,
+ uint64_t UnpackedOffset,
+ unsigned UnpackedAlign,
+ bool isPacked,
+ const FieldDecl *D) {
+ // We let objc ivars without warning, objc interfaces generally are not used
+ // for padding tricks.
+ if (isa<ObjCIvarDecl>(D))
return;
- assert(llvm::isPowerOf2_32(NewAlignment && "Alignment not a power of 2"));
+ unsigned CharBitNum = Context.Target.getCharWidth();
- Alignment = NewAlignment;
+ // Warn if padding was introduced to the struct/class.
+ if (!IsUnion && Offset > UnpaddedOffset) {
+ unsigned PadSize = Offset - UnpaddedOffset;
+ bool InBits = true;
+ if (PadSize % CharBitNum == 0) {
+ PadSize = PadSize / CharBitNum;
+ InBits = false;
+ }
+ if (D->getIdentifier())
+ Diag(D->getLocation(), diag::warn_padded_struct_field)
+ << (D->getParent()->isStruct() ? 0 : 1) // struct|class
+ << Context.getTypeDeclType(D->getParent())
+ << PadSize
+ << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1) // plural or not
+ << D->getIdentifier();
+ else
+ Diag(D->getLocation(), diag::warn_padded_struct_anon_field)
+ << (D->getParent()->isStruct() ? 0 : 1) // struct|class
+ << Context.getTypeDeclType(D->getParent())
+ << PadSize
+ << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
+ }
+
+ // Warn if we packed it unnecessarily. If the alignment is 1 byte don't
+ // bother since there won't be alignment issues.
+ if (isPacked && UnpackedAlign > CharBitNum && Offset == UnpackedOffset)
+ Diag(D->getLocation(), diag::warn_unnecessary_packed)
+ << D->getIdentifier();
}
const CXXMethodDecl *
@@ -1435,6 +1593,13 @@ RecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
if (RD->isInAnonymousNamespace())
return 0;
+ // Template instantiations don't have key functions,see Itanium C++ ABI 5.2.6.
+ // Same behavior as GCC.
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return 0;
+
for (CXXRecordDecl::method_iterator I = RD->method_begin(),
E = RD->method_end(); I != E; ++I) {
const CXXMethodDecl *MD = *I;
@@ -1463,26 +1628,21 @@ RecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
return 0;
}
-// This class implements layout specific to the Microsoft ABI.
-class MSRecordLayoutBuilder: public RecordLayoutBuilder {
-public:
- MSRecordLayoutBuilder(ASTContext& Ctx, EmptySubobjectMap *EmptySubobjects):
- RecordLayoutBuilder(Ctx, EmptySubobjects) {}
-
- virtual bool IsNearlyEmpty(const CXXRecordDecl *RD) const;
- virtual uint64_t GetVirtualPointersSize(const CXXRecordDecl *RD) const;
-};
+DiagnosticBuilder
+RecordLayoutBuilder::Diag(SourceLocation Loc, unsigned DiagID) {
+ return Context.getDiagnostics().Report(Loc, DiagID);
+}
-bool MSRecordLayoutBuilder::IsNearlyEmpty(const CXXRecordDecl *RD) const {
- // FIXME: Audit the corners
- if (!RD->isDynamicClass())
- return false;
- const ASTRecordLayout &BaseInfo = Context.getASTRecordLayout(RD);
- // In the Microsoft ABI, classes can have one or two vtable pointers.
- if (BaseInfo.getNonVirtualSize() == Context.Target.getPointerWidth(0) ||
- BaseInfo.getNonVirtualSize() == Context.Target.getPointerWidth(0) * 2)
- return true;
- return false;
+namespace {
+ // This class implements layout specific to the Microsoft ABI.
+ class MSRecordLayoutBuilder : public RecordLayoutBuilder {
+ public:
+ MSRecordLayoutBuilder(const ASTContext& Ctx,
+ EmptySubobjectMap *EmptySubobjects) :
+ RecordLayoutBuilder(Ctx, EmptySubobjects) {}
+
+ virtual uint64_t GetVirtualPointersSize(const CXXRecordDecl *RD) const;
+ };
}
uint64_t
@@ -1497,7 +1657,8 @@ MSRecordLayoutBuilder::GetVirtualPointersSize(const CXXRecordDecl *RD) const {
/// getASTRecordLayout - Get or compute information about the layout of the
/// specified record (struct/union/class), which indicates its size and field
/// position information.
-const ASTRecordLayout &ASTContext::getASTRecordLayout(const RecordDecl *D) {
+const ASTRecordLayout &
+ASTContext::getASTRecordLayout(const RecordDecl *D) const {
D = D->getDefinition();
assert(D && "Cannot get layout of forward declarations!");
@@ -1531,12 +1692,16 @@ const ASTRecordLayout &ASTContext::getASTRecordLayout(const RecordDecl *D) {
// FIXME: This should be done in FinalizeLayout.
uint64_t DataSize =
IsPODForThePurposeOfLayout ? Builder->Size : Builder->DataSize;
- uint64_t NonVirtualSize =
- IsPODForThePurposeOfLayout ? DataSize : Builder->NonVirtualSize;
+ CharUnits NonVirtualSize =
+ IsPODForThePurposeOfLayout ?
+ toCharUnitsFromBits(DataSize) : Builder->NonVirtualSize;
+ CharUnits RecordSize = toCharUnitsFromBits(Builder->Size);
NewEntry =
- new (*this) ASTRecordLayout(*this, Builder->Size, Builder->Alignment,
- DataSize, Builder->FieldOffsets.data(),
+ new (*this) ASTRecordLayout(*this, RecordSize,
+ Builder->Alignment,
+ toCharUnitsFromBits(DataSize),
+ Builder->FieldOffsets.data(),
Builder->FieldOffsets.size(),
NonVirtualSize,
Builder->NonVirtualAlignment,
@@ -1548,9 +1713,12 @@ const ASTRecordLayout &ASTContext::getASTRecordLayout(const RecordDecl *D) {
RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
Builder.Layout(D);
+ CharUnits RecordSize = toCharUnitsFromBits(Builder.Size);
+
NewEntry =
- new (*this) ASTRecordLayout(*this, Builder.Size, Builder.Alignment,
- Builder.Size,
+ new (*this) ASTRecordLayout(*this, RecordSize,
+ Builder.Alignment,
+ toCharUnitsFromBits(Builder.Size),
Builder.FieldOffsets.data(),
Builder.FieldOffsets.size());
}
@@ -1572,9 +1740,6 @@ const CXXMethodDecl *ASTContext::getKeyFunction(const CXXRecordDecl *RD) {
const CXXMethodDecl *&Entry = KeyFunctions[RD];
if (!Entry)
Entry = RecordLayoutBuilder::ComputeKeyFunction(RD);
- else
- assert(Entry == RecordLayoutBuilder::ComputeKeyFunction(RD) &&
- "Key function changed!");
return Entry;
}
@@ -1586,7 +1751,7 @@ const CXXMethodDecl *ASTContext::getKeyFunction(const CXXRecordDecl *RD) {
/// implementation. This may differ by including synthesized ivars.
const ASTRecordLayout &
ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
- const ObjCImplementationDecl *Impl) {
+ const ObjCImplementationDecl *Impl) const {
assert(!D->isForwardDecl() && "Invalid interface decl!");
// Look up this layout, if already laid out, return what we have.
@@ -1609,9 +1774,12 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
Builder.Layout(D);
+ CharUnits RecordSize = toCharUnitsFromBits(Builder.Size);
+
const ASTRecordLayout *NewEntry =
- new (*this) ASTRecordLayout(*this, Builder.Size, Builder.Alignment,
- Builder.DataSize,
+ new (*this) ASTRecordLayout(*this, RecordSize,
+ Builder.Alignment,
+ toCharUnitsFromBits(Builder.DataSize),
Builder.FieldOffsets.data(),
Builder.FieldOffsets.size());
@@ -1621,18 +1789,18 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
}
static void PrintOffset(llvm::raw_ostream &OS,
- uint64_t Offset, unsigned IndentLevel) {
- OS << llvm::format("%4d | ", Offset);
+ CharUnits Offset, unsigned IndentLevel) {
+ OS << llvm::format("%4d | ", Offset.getQuantity());
OS.indent(IndentLevel * 2);
}
static void DumpCXXRecordLayout(llvm::raw_ostream &OS,
- const CXXRecordDecl *RD, ASTContext &C,
- uint64_t Offset,
+ const CXXRecordDecl *RD, const ASTContext &C,
+ CharUnits Offset,
unsigned IndentLevel,
const char* Description,
bool IncludeVirtualBases) {
- const ASTRecordLayout &Info = C.getASTRecordLayout(RD);
+ const ASTRecordLayout &Layout = C.getASTRecordLayout(RD);
PrintOffset(OS, Offset, IndentLevel);
OS << C.getTypeDeclType(const_cast<CXXRecordDecl *>(RD)).getAsString();
@@ -1644,7 +1812,7 @@ static void DumpCXXRecordLayout(llvm::raw_ostream &OS,
IndentLevel++;
- const CXXRecordDecl *PrimaryBase = Info.getPrimaryBase();
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
// Vtable pointer.
if (RD->isDynamicClass() && !PrimaryBase) {
@@ -1662,7 +1830,7 @@ static void DumpCXXRecordLayout(llvm::raw_ostream &OS,
const CXXRecordDecl *Base =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t BaseOffset = Offset + Info.getBaseClassOffset(Base) / 8;
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base);
DumpCXXRecordLayout(OS, Base, C, BaseOffset, IndentLevel,
Base == PrimaryBase ? "(primary base)" : "(base)",
@@ -1674,7 +1842,8 @@ static void DumpCXXRecordLayout(llvm::raw_ostream &OS,
for (CXXRecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I, ++FieldNo) {
const FieldDecl *Field = *I;
- uint64_t FieldOffset = Offset + Info.getFieldOffset(FieldNo) / 8;
+ CharUnits FieldOffset = Offset +
+ C.toCharUnitsFromBits(Layout.getFieldOffset(FieldNo));
if (const RecordType *RT = Field->getType()->getAs<RecordType>()) {
if (const CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
@@ -1699,27 +1868,27 @@ static void DumpCXXRecordLayout(llvm::raw_ostream &OS,
const CXXRecordDecl *VBase =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t VBaseOffset = Offset + Info.getVBaseClassOffset(VBase) / 8;
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
DumpCXXRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel,
VBase == PrimaryBase ?
"(primary virtual base)" : "(virtual base)",
/*IncludeVirtualBases=*/false);
}
- OS << " sizeof=" << Info.getSize() / 8;
- OS << ", dsize=" << Info.getDataSize() / 8;
- OS << ", align=" << Info.getAlignment() / 8 << '\n';
- OS << " nvsize=" << Info.getNonVirtualSize() / 8;
- OS << ", nvalign=" << Info.getNonVirtualAlign() / 8 << '\n';
+ OS << " sizeof=" << Layout.getSize().getQuantity();
+ OS << ", dsize=" << Layout.getDataSize().getQuantity();
+ OS << ", align=" << Layout.getAlignment().getQuantity() << '\n';
+ OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
+ OS << ", nvalign=" << Layout.getNonVirtualAlign().getQuantity() << '\n';
OS << '\n';
}
void ASTContext::DumpRecordLayout(const RecordDecl *RD,
- llvm::raw_ostream &OS) {
+ llvm::raw_ostream &OS) const {
const ASTRecordLayout &Info = getASTRecordLayout(RD);
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- return DumpCXXRecordLayout(OS, CXXRD, *this, 0, 0, 0,
+ return DumpCXXRecordLayout(OS, CXXRD, *this, CharUnits(), 0, 0,
/*IncludeVirtualBases=*/true);
OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n";
@@ -1727,9 +1896,9 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD,
RD->dump();
OS << "\nLayout: ";
OS << "<ASTRecordLayout\n";
- OS << " Size:" << Info.getSize() << "\n";
- OS << " DataSize:" << Info.getDataSize() << "\n";
- OS << " Alignment:" << Info.getAlignment() << "\n";
+ OS << " Size:" << toBits(Info.getSize()) << "\n";
+ OS << " DataSize:" << toBits(Info.getDataSize()) << "\n";
+ OS << " Alignment:" << toBits(Info.getAlignment()) << "\n";
OS << " FieldOffsets: [";
for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) {
if (i) OS << ", ";
diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
index fc88981..7e73f02 100644
--- a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
@@ -46,7 +46,7 @@ static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) {
}
const char *Stmt::getStmtClassName() const {
- return getStmtInfoTableEntry((StmtClass)sClass).Name;
+ return getStmtInfoTableEntry((StmtClass) StmtBits.sClass).Name;
}
void Stmt::PrintStats() {
@@ -84,17 +84,84 @@ bool Stmt::CollectingStats(bool Enable) {
return StatSwitch;
}
+namespace {
+ struct good {};
+ struct bad {};
+
+ // These silly little functions have to be static inline to suppress
+ // unused warnings, and they have to be defined to suppress other
+ // warnings.
+ static inline good is_good(good) { return good(); }
+
+ typedef Stmt::child_range children_t();
+ template <class T> good implements_children(children_t T::*) {
+ return good();
+ }
+ static inline bad implements_children(children_t Stmt::*) {
+ return bad();
+ }
+
+ typedef SourceRange getSourceRange_t() const;
+ template <class T> good implements_getSourceRange(getSourceRange_t T::*) {
+ return good();
+ }
+ static inline bad implements_getSourceRange(getSourceRange_t Stmt::*) {
+ return bad();
+ }
+
+#define ASSERT_IMPLEMENTS_children(type) \
+ (void) sizeof(is_good(implements_children(&type::children)))
+#define ASSERT_IMPLEMENTS_getSourceRange(type) \
+ (void) sizeof(is_good(implements_getSourceRange(&type::getSourceRange)))
+}
+
+/// Check whether the various Stmt classes implement their member
+/// functions.
+static inline void check_implementations() {
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ ASSERT_IMPLEMENTS_children(type); \
+ ASSERT_IMPLEMENTS_getSourceRange(type);
+#include "clang/AST/StmtNodes.inc"
+}
+
+Stmt::child_range Stmt::children() {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<type*>(this)->children();
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind!");
+ return child_range();
+}
+
+SourceRange Stmt::getSourceRange() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<const type*>(this)->getSourceRange();
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind!");
+ return SourceRange();
+}
+
void CompoundStmt::setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts) {
if (this->Body)
C.Deallocate(Body);
- this->NumStmts = NumStmts;
+ this->CompoundStmtBits.NumStmts = NumStmts;
Body = new (C) Stmt*[NumStmts];
memcpy(Body, Stmts, sizeof(Stmt *) * NumStmts);
}
const char *LabelStmt::getName() const {
- return getID()->getNameStart();
+ return getDecl()->getIdentifier()->getNameStart();
}
// This is defined here to avoid polluting Stmt.h with importing Expr.h
@@ -106,7 +173,7 @@ SourceRange ReturnStmt::getSourceRange() const {
}
bool Stmt::hasImplicitControlFlow() const {
- switch (sClass) {
+ switch (StmtBits.sClass) {
default:
return false;
@@ -416,7 +483,7 @@ ObjCAtTryStmt *ObjCAtTryStmt::Create(ASTContext &Context,
Stmt *atFinallyStmt) {
unsigned Size = sizeof(ObjCAtTryStmt) +
(1 + NumCatchStmts + (atFinallyStmt != 0)) * sizeof(Stmt *);
- void *Mem = Context.Allocate(Size, llvm::alignof<ObjCAtTryStmt>());
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
atFinallyStmt);
}
@@ -426,7 +493,7 @@ ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(ASTContext &Context,
bool HasFinally) {
unsigned Size = sizeof(ObjCAtTryStmt) +
(1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
- void *Mem = Context.Allocate(Size, llvm::alignof<ObjCAtTryStmt>());
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
}
@@ -448,7 +515,7 @@ CXXTryStmt *CXXTryStmt::Create(ASTContext &C, SourceLocation tryLoc,
std::size_t Size = sizeof(CXXTryStmt);
Size += ((numHandlers + 1) * sizeof(Stmt));
- void *Mem = C.Allocate(Size, llvm::alignof<CXXTryStmt>());
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers, numHandlers);
}
@@ -457,7 +524,7 @@ CXXTryStmt *CXXTryStmt::Create(ASTContext &C, EmptyShell Empty,
std::size_t Size = sizeof(CXXTryStmt);
Size += ((numHandlers + 1) * sizeof(Stmt));
- void *Mem = C.Allocate(Size, llvm::alignof<CXXTryStmt>());
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
return new (Mem) CXXTryStmt(Empty, numHandlers);
}
@@ -530,7 +597,7 @@ void ForStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
}
SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond)
- : Stmt(SwitchStmtClass), FirstCase(0)
+ : Stmt(SwitchStmtClass), FirstCase(0), AllEnumCasesCovered(0)
{
setConditionVariable(C, Var);
SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
@@ -556,6 +623,11 @@ void SwitchStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
V->getSourceRange().getEnd());
}
+Stmt *SwitchCase::getSubStmt() {
+ if (isa<CaseStmt>(this)) return cast<CaseStmt>(this)->getSubStmt();
+ return cast<DefaultStmt>(this)->getSubStmt();
+}
+
WhileStmt::WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL)
: Stmt(WhileStmtClass)
@@ -585,101 +657,13 @@ void WhileStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
V->getSourceRange().getEnd());
}
-//===----------------------------------------------------------------------===//
-// Child Iterators for iterating over subexpressions/substatements
-//===----------------------------------------------------------------------===//
-
-// DeclStmt
-Stmt::child_iterator DeclStmt::child_begin() {
- return StmtIterator(DG.begin(), DG.end());
-}
-
-Stmt::child_iterator DeclStmt::child_end() {
- return StmtIterator(DG.end(), DG.end());
-}
-
-// NullStmt
-Stmt::child_iterator NullStmt::child_begin() { return child_iterator(); }
-Stmt::child_iterator NullStmt::child_end() { return child_iterator(); }
-
-// CompoundStmt
-Stmt::child_iterator CompoundStmt::child_begin() { return &Body[0]; }
-Stmt::child_iterator CompoundStmt::child_end() { return &Body[0]+NumStmts; }
-
-// CaseStmt
-Stmt::child_iterator CaseStmt::child_begin() { return &SubExprs[0]; }
-Stmt::child_iterator CaseStmt::child_end() { return &SubExprs[END_EXPR]; }
-
-// DefaultStmt
-Stmt::child_iterator DefaultStmt::child_begin() { return &SubStmt; }
-Stmt::child_iterator DefaultStmt::child_end() { return &SubStmt+1; }
-
-// LabelStmt
-Stmt::child_iterator LabelStmt::child_begin() { return &SubStmt; }
-Stmt::child_iterator LabelStmt::child_end() { return &SubStmt+1; }
-
-// IfStmt
-Stmt::child_iterator IfStmt::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator IfStmt::child_end() {
- return &SubExprs[0]+END_EXPR;
-}
-
-// SwitchStmt
-Stmt::child_iterator SwitchStmt::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator SwitchStmt::child_end() {
- return &SubExprs[0]+END_EXPR;
-}
-
-// WhileStmt
-Stmt::child_iterator WhileStmt::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator WhileStmt::child_end() {
- return &SubExprs[0]+END_EXPR;
-}
-
-// DoStmt
-Stmt::child_iterator DoStmt::child_begin() { return &SubExprs[0]; }
-Stmt::child_iterator DoStmt::child_end() { return &SubExprs[0]+END_EXPR; }
-
-// ForStmt
-Stmt::child_iterator ForStmt::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator ForStmt::child_end() {
- return &SubExprs[0]+END_EXPR;
-}
-
-// ObjCForCollectionStmt
-Stmt::child_iterator ObjCForCollectionStmt::child_begin() {
- return &SubExprs[0];
-}
-Stmt::child_iterator ObjCForCollectionStmt::child_end() {
- return &SubExprs[0]+END_EXPR;
-}
-
-// GotoStmt
-Stmt::child_iterator GotoStmt::child_begin() { return child_iterator(); }
-Stmt::child_iterator GotoStmt::child_end() { return child_iterator(); }
-
// IndirectGotoStmt
-Expr* IndirectGotoStmt::getTarget() { return cast<Expr>(Target); }
-const Expr* IndirectGotoStmt::getTarget() const { return cast<Expr>(Target); }
-
-Stmt::child_iterator IndirectGotoStmt::child_begin() { return &Target; }
-Stmt::child_iterator IndirectGotoStmt::child_end() { return &Target+1; }
-
-// ContinueStmt
-Stmt::child_iterator ContinueStmt::child_begin() { return child_iterator(); }
-Stmt::child_iterator ContinueStmt::child_end() { return child_iterator(); }
-
-// BreakStmt
-Stmt::child_iterator BreakStmt::child_begin() { return child_iterator(); }
-Stmt::child_iterator BreakStmt::child_end() { return child_iterator(); }
+LabelDecl *IndirectGotoStmt::getConstantTarget() {
+ if (AddrLabelExpr *E =
+ dyn_cast<AddrLabelExpr>(getTarget()->IgnoreParenImpCasts()))
+ return E->getLabel();
+ return 0;
+}
// ReturnStmt
const Expr* ReturnStmt::getRetValue() const {
@@ -688,69 +672,3 @@ const Expr* ReturnStmt::getRetValue() const {
Expr* ReturnStmt::getRetValue() {
return cast_or_null<Expr>(RetExpr);
}
-
-Stmt::child_iterator ReturnStmt::child_begin() {
- return &RetExpr;
-}
-Stmt::child_iterator ReturnStmt::child_end() {
- return RetExpr ? &RetExpr+1 : &RetExpr;
-}
-
-// AsmStmt
-Stmt::child_iterator AsmStmt::child_begin() {
- return NumOutputs + NumInputs == 0 ? 0 : &Exprs[0];
-}
-Stmt::child_iterator AsmStmt::child_end() {
- return NumOutputs + NumInputs == 0 ? 0 : &Exprs[0] + NumOutputs + NumInputs;
-}
-
-// ObjCAtCatchStmt
-Stmt::child_iterator ObjCAtCatchStmt::child_begin() { return &Body; }
-Stmt::child_iterator ObjCAtCatchStmt::child_end() { return &Body + 1; }
-
-// ObjCAtFinallyStmt
-Stmt::child_iterator ObjCAtFinallyStmt::child_begin() { return &AtFinallyStmt; }
-Stmt::child_iterator ObjCAtFinallyStmt::child_end() { return &AtFinallyStmt+1; }
-
-// ObjCAtTryStmt
-Stmt::child_iterator ObjCAtTryStmt::child_begin() { return getStmts(); }
-
-Stmt::child_iterator ObjCAtTryStmt::child_end() {
- return getStmts() + 1 + NumCatchStmts + HasFinally;
-}
-
-// ObjCAtThrowStmt
-Stmt::child_iterator ObjCAtThrowStmt::child_begin() {
- return &Throw;
-}
-
-Stmt::child_iterator ObjCAtThrowStmt::child_end() {
- return &Throw+1;
-}
-
-// ObjCAtSynchronizedStmt
-Stmt::child_iterator ObjCAtSynchronizedStmt::child_begin() {
- return &SubStmts[0];
-}
-
-Stmt::child_iterator ObjCAtSynchronizedStmt::child_end() {
- return &SubStmts[0]+END_EXPR;
-}
-
-// CXXCatchStmt
-Stmt::child_iterator CXXCatchStmt::child_begin() {
- return &HandlerBlock;
-}
-
-Stmt::child_iterator CXXCatchStmt::child_end() {
- return &HandlerBlock + 1;
-}
-
-// CXXTryStmt
-Stmt::child_iterator CXXTryStmt::child_begin() {
- return reinterpret_cast<Stmt **>(this + 1);
-}
-
-Stmt::child_iterator CXXTryStmt::child_end() {
- return reinterpret_cast<Stmt **>(this + 1) + NumHandlers + 1;
-}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
index 5c236a4..846bd4c 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
@@ -59,19 +59,12 @@ namespace {
Visit(S);
// Print out children.
- Stmt::child_iterator CI = S->child_begin(), CE = S->child_end();
- if (CI != CE) {
- while (CI != CE) {
+ Stmt::child_range CI = S->children();
+ if (CI) {
+ while (CI) {
OS << '\n';
DumpSubTree(*CI++);
}
- if (const ConditionalOperator *CO =
- dyn_cast<ConditionalOperator>(S)) {
- if (CO->getSAVE()) {
- OS << '\n';
- DumpSubTree(CO->getSAVE());
- }
- }
}
}
OS << ')';
@@ -90,25 +83,44 @@ namespace {
}
void DumpType(QualType T) {
- OS << "'" << T.getAsString() << "'";
+ SplitQualType T_split = T.split();
+ OS << "'" << QualType::getAsString(T_split) << "'";
if (!T.isNull()) {
// If the type is sugared, also dump a (shallow) desugared type.
- QualType Simplified = T.getDesugaredType();
- if (Simplified != T)
- OS << ":'" << Simplified.getAsString() << "'";
+ SplitQualType D_split = T.getSplitDesugaredType();
+ if (T_split != D_split)
+ OS << ":'" << QualType::getAsString(D_split) << "'";
}
}
+ void DumpDeclRef(Decl *node);
void DumpStmt(const Stmt *Node) {
Indent();
OS << "(" << Node->getStmtClassName()
<< " " << (void*)Node;
DumpSourceRange(Node);
}
+ void DumpValueKind(ExprValueKind K) {
+ switch (K) {
+ case VK_RValue: break;
+ case VK_LValue: OS << " lvalue"; break;
+ case VK_XValue: OS << " xvalue"; break;
+ }
+ }
+ void DumpObjectKind(ExprObjectKind K) {
+ switch (K) {
+ case OK_Ordinary: break;
+ case OK_BitField: OS << " bitfield"; break;
+ case OK_ObjCProperty: OS << " objcproperty"; break;
+ case OK_VectorComponent: OS << " vectorcomponent"; break;
+ }
+ }
void DumpExpr(const Expr *Node) {
DumpStmt(Node);
OS << ' ';
DumpType(Node->getType());
+ DumpValueKind(Node->getValueKind());
+ DumpObjectKind(Node->getObjectKind());
}
void DumpSourceRange(const Stmt *Node);
void DumpLocation(SourceLocation Loc);
@@ -122,7 +134,6 @@ namespace {
// Exprs
void VisitExpr(Expr *Node);
void VisitCastExpr(CastExpr *Node);
- void VisitImplicitCastExpr(ImplicitCastExpr *Node);
void VisitDeclRefExpr(DeclRefExpr *Node);
void VisitPredefinedExpr(PredefinedExpr *Node);
void VisitCharacterLiteral(CharacterLiteral *Node);
@@ -136,7 +147,7 @@ namespace {
void VisitBinaryOperator(BinaryOperator *Node);
void VisitCompoundAssignOperator(CompoundAssignOperator *Node);
void VisitAddrLabelExpr(AddrLabelExpr *Node);
- void VisitTypesCompatibleExpr(TypesCompatibleExpr *Node);
+ void VisitBlockExpr(BlockExpr *Node);
// C++
void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
@@ -145,7 +156,7 @@ namespace {
void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node);
void VisitCXXConstructExpr(CXXConstructExpr *Node);
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node);
- void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *Node);
+ void VisitExprWithCleanups(ExprWithCleanups *Node);
void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node);
void DumpCXXTemporary(CXXTemporary *Temporary);
@@ -156,10 +167,7 @@ namespace {
void VisitObjCSelectorExpr(ObjCSelectorExpr *Node);
void VisitObjCProtocolExpr(ObjCProtocolExpr *Node);
void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node);
- void VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *Node);
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node);
- void VisitObjCSuperExpr(ObjCSuperExpr *Node);
};
}
@@ -170,15 +178,15 @@ namespace {
void StmtDumper::DumpLocation(SourceLocation Loc) {
SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
- if (SpellingLoc.isInvalid()) {
- OS << "<invalid sloc>";
- return;
- }
-
// The general format we print out is filename:line:col, but we drop pieces
// that haven't changed since the last loc printed.
PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
+ if (PLoc.isInvalid()) {
+ OS << "<invalid sloc>";
+ return;
+ }
+
if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
OS << PLoc.getFilename() << ':' << PLoc.getLine()
<< ':' << PLoc.getColumn();
@@ -274,6 +282,8 @@ void StmtDumper::DumpDeclarator(Decl *D) {
UD->getTargetNestedNameDecl()->print(OS,
PrintingPolicy(UD->getASTContext().getLangOptions()));
OS << ";\"";
+ } else if (LabelDecl *LD = dyn_cast<LabelDecl>(D)) {
+ OS << "label " << LD->getNameAsString();
} else {
assert(0 && "Unexpected decl");
}
@@ -345,39 +355,25 @@ void StmtDumper::VisitCastExpr(CastExpr *Node) {
OS << ">";
}
-void StmtDumper::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
- VisitCastExpr(Node);
- switch (Node->getValueKind()) {
- case VK_LValue:
- OS << " lvalue";
- break;
- case VK_XValue:
- OS << " xvalue";
- break;
- case VK_RValue:
- break;
- }
-}
-
void StmtDumper::VisitDeclRefExpr(DeclRefExpr *Node) {
DumpExpr(Node);
OS << " ";
- switch (Node->getDecl()->getKind()) {
- default: OS << "Decl"; break;
- case Decl::Function: OS << "FunctionDecl"; break;
- case Decl::Var: OS << "Var"; break;
- case Decl::ParmVar: OS << "ParmVar"; break;
- case Decl::EnumConstant: OS << "EnumConstant"; break;
- case Decl::Typedef: OS << "Typedef"; break;
- case Decl::Record: OS << "Record"; break;
- case Decl::Enum: OS << "Enum"; break;
- case Decl::CXXRecord: OS << "CXXRecord"; break;
- case Decl::ObjCInterface: OS << "ObjCInterface"; break;
- case Decl::ObjCClass: OS << "ObjCClass"; break;
+ DumpDeclRef(Node->getDecl());
+}
+
+void StmtDumper::DumpDeclRef(Decl *d) {
+ OS << d->getDeclKindName() << ' ' << (void*) d;
+
+ if (NamedDecl *nd = dyn_cast<NamedDecl>(d)) {
+ OS << " '";
+ nd->getDeclName().printName(OS);
+ OS << "'";
}
- OS << "='" << Node->getDecl() << "' " << (void*)Node->getDecl();
+ if (ValueDecl *vd = dyn_cast<ValueDecl>(d)) {
+ OS << ' '; DumpType(vd->getType());
+ }
}
void StmtDumper::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
@@ -475,6 +471,30 @@ void StmtDumper::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
DumpType(Node->getComputationResultType());
}
+void StmtDumper::VisitBlockExpr(BlockExpr *Node) {
+ DumpExpr(Node);
+
+ IndentLevel++;
+ BlockDecl *block = Node->getBlockDecl();
+ if (block->capturesCXXThis()) {
+ OS << '\n'; Indent(); OS << "(capture this)";
+ }
+ for (BlockDecl::capture_iterator
+ i = block->capture_begin(), e = block->capture_end(); i != e; ++i) {
+ OS << '\n';
+ Indent();
+ OS << "(capture ";
+ if (i->isByRef()) OS << "byref ";
+ if (i->isNested()) OS << "nested ";
+ DumpDeclRef(i->getVariable());
+ if (i->hasCopyExpr()) DumpSubTree(i->getCopyExpr());
+ OS << ")";
+ }
+ IndentLevel--;
+
+ DumpSubTree(block->getBody());
+}
+
// GNU extensions.
void StmtDumper::VisitAddrLabelExpr(AddrLabelExpr *Node) {
@@ -483,14 +503,6 @@ void StmtDumper::VisitAddrLabelExpr(AddrLabelExpr *Node) {
<< " " << (void*)Node->getLabel();
}
-void StmtDumper::VisitTypesCompatibleExpr(TypesCompatibleExpr *Node) {
- DumpExpr(Node);
- OS << " ";
- DumpType(Node->getArgType1());
- OS << " ";
- DumpType(Node->getArgType2());
-}
-
//===----------------------------------------------------------------------===//
// C++ Expressions
//===----------------------------------------------------------------------===//
@@ -535,7 +547,7 @@ void StmtDumper::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
DumpCXXTemporary(Node->getTemporary());
}
-void StmtDumper::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *Node) {
+void StmtDumper::VisitExprWithCleanups(ExprWithCleanups *Node) {
DumpExpr(Node);
++IndentLevel;
for (unsigned i = 0, e = Node->getNumTemporaries(); i != e; ++i) {
@@ -606,29 +618,25 @@ void StmtDumper::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
void StmtDumper::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
DumpExpr(Node);
+ if (Node->isImplicitProperty()) {
+ OS << " Kind=MethodRef Getter=\"";
+ if (Node->getImplicitPropertyGetter())
+ OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+ else
+ OS << "(null)";
- OS << " Kind=PropertyRef Property=\"" << Node->getProperty() << '"';
-}
-
-void StmtDumper::VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *Node) {
- DumpExpr(Node);
-
- ObjCMethodDecl *Getter = Node->getGetterMethod();
- ObjCMethodDecl *Setter = Node->getSetterMethod();
- OS << " Kind=MethodRef Getter=\""
- << Getter->getSelector().getAsString()
- << "\" Setter=\"";
- if (Setter)
- OS << Setter->getSelector().getAsString();
- else
- OS << "(null)";
- OS << "\"";
-}
+ OS << "\" Setter=\"";
+ if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter())
+ OS << Setter->getSelector().getAsString();
+ else
+ OS << "(null)";
+ OS << "\"";
+ } else {
+ OS << " Kind=PropertyRef Property=\"" << Node->getExplicitProperty() << '"';
+ }
-void StmtDumper::VisitObjCSuperExpr(ObjCSuperExpr *Node) {
- DumpExpr(Node);
- OS << " super";
+ if (Node->isSuperReceiver())
+ OS << " super";
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp b/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp
index 7fc7c96..9a7265a 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp
@@ -18,9 +18,9 @@ using namespace clang;
// FIXME: Add support for dependent-sized array types in C++?
// Does it even make sense to build a CFG for an uninstantiated template?
-static inline VariableArrayType* FindVA(Type* t) {
- while (ArrayType* vt = dyn_cast<ArrayType>(t)) {
- if (VariableArrayType* vat = dyn_cast<VariableArrayType>(vt))
+static inline const VariableArrayType *FindVA(const Type* t) {
+ while (const ArrayType *vt = dyn_cast<ArrayType>(t)) {
+ if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt))
if (vat->getSizeExpr())
return vat;
@@ -33,7 +33,7 @@ static inline VariableArrayType* FindVA(Type* t) {
void StmtIteratorBase::NextVA() {
assert (getVAPtr());
- VariableArrayType* p = getVAPtr();
+ const VariableArrayType *p = getVAPtr();
p = FindVA(p->getElementType().getTypePtr());
setVAPtr(p);
@@ -90,7 +90,7 @@ void StmtIteratorBase::NextDecl(bool ImmediateAdvance) {
bool StmtIteratorBase::HandleDecl(Decl* D) {
if (VarDecl* VD = dyn_cast<VarDecl>(D)) {
- if (VariableArrayType* VAPtr = FindVA(VD->getType().getTypePtr())) {
+ if (const VariableArrayType* VAPtr = FindVA(VD->getType().getTypePtr())) {
setVAPtr(VAPtr);
return true;
}
@@ -99,7 +99,7 @@ bool StmtIteratorBase::HandleDecl(Decl* D) {
return true;
}
else if (TypedefDecl* TD = dyn_cast<TypedefDecl>(D)) {
- if (VariableArrayType* VAPtr =
+ if (const VariableArrayType* VAPtr =
FindVA(TD->getUnderlyingType().getTypePtr())) {
setVAPtr(VAPtr);
return true;
@@ -124,16 +124,16 @@ StmtIteratorBase::StmtIteratorBase(Decl** dgi, Decl** dge)
NextDecl(false);
}
-StmtIteratorBase::StmtIteratorBase(VariableArrayType* t)
+StmtIteratorBase::StmtIteratorBase(const VariableArrayType* t)
: stmt(0), decl(0), RawVAPtr(SizeOfTypeVAMode) {
RawVAPtr |= reinterpret_cast<uintptr_t>(t);
}
Stmt*& StmtIteratorBase::GetDeclExpr() const {
- if (VariableArrayType* VAPtr = getVAPtr()) {
+ if (const VariableArrayType* VAPtr = getVAPtr()) {
assert (VAPtr->SizeExpr);
- return VAPtr->SizeExpr;
+ return const_cast<Stmt*&>(VAPtr->SizeExpr);
}
assert (inDecl() || inDeclGroup());
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
index 5236a66..1cdd220 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
@@ -15,9 +15,11 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/PrettyPrinter.h"
#include "llvm/Support/Format.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -63,6 +65,7 @@ namespace {
void PrintRawDeclStmt(DeclStmt *S);
void PrintRawIfStmt(IfStmt *If);
void PrintRawCXXCatchStmt(CXXCatchStmt *Catch);
+ void PrintCallArgs(CallExpr *E);
void PrintExpr(Expr *E) {
if (E)
@@ -83,10 +86,10 @@ namespace {
else StmtVisitor<StmtPrinter>::Visit(S);
}
- void VisitStmt(Stmt *Node) ATTRIBUTE_UNUSED {
+ void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
Indent() << "<<unknown stmt type>>\n";
}
- void VisitExpr(Expr *Node) ATTRIBUTE_UNUSED {
+ void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED {
OS << "<<unknown expr type>>";
}
void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
@@ -217,10 +220,6 @@ void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
}
}
-void StmtPrinter::VisitSwitchCase(SwitchCase*) {
- assert(0 && "SwitchCase is an abstract class");
-}
-
void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
Indent() << "while (";
PrintExpr(Node->getCond());
@@ -479,7 +478,8 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
void StmtPrinter::VisitDependentScopeDeclRefExpr(
DependentScopeDeclRefExpr *Node) {
- Node->getQualifier()->print(OS, Policy);
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
OS << Node->getNameInfo();
if (Node->hasExplicitTemplateArgs())
OS << TemplateSpecializationType::PrintTemplateArgumentList(
@@ -508,22 +508,17 @@ void StmtPrinter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
}
void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
- if (Node->getBase()) {
+ if (Node->isSuperReceiver())
+ OS << "super.";
+ else if (Node->getBase()) {
PrintExpr(Node->getBase());
OS << ".";
}
- OS << Node->getProperty()->getName();
-}
-
-void StmtPrinter::VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *Node) {
- if (Node->getBase()) {
- PrintExpr(Node->getBase());
- OS << ".";
- }
- if (Node->getGetterMethod())
- OS << Node->getGetterMethod();
+ if (Node->isImplicitProperty())
+ OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+ else
+ OS << Node->getExplicitProperty()->getName();
}
void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
@@ -727,9 +722,7 @@ void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
OS << "]";
}
-void StmtPrinter::VisitCallExpr(CallExpr *Call) {
- PrintExpr(Call->getCallee());
- OS << "(";
+void StmtPrinter::PrintCallArgs(CallExpr *Call) {
for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) {
if (isa<CXXDefaultArgExpr>(Call->getArg(i))) {
// Don't print any defaulted arguments
@@ -739,6 +732,12 @@ void StmtPrinter::VisitCallExpr(CallExpr *Call) {
if (i) OS << ", ";
PrintExpr(Call->getArg(i));
}
+}
+
+void StmtPrinter::VisitCallExpr(CallExpr *Call) {
+ PrintExpr(Call->getCallee());
+ OS << "(";
+ PrintCallArgs(Call);
OS << ")";
}
void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
@@ -793,21 +792,20 @@ void StmtPrinter::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
}
void StmtPrinter::VisitConditionalOperator(ConditionalOperator *Node) {
PrintExpr(Node->getCond());
-
- if (Node->getLHS()) {
- OS << " ? ";
- PrintExpr(Node->getLHS());
- OS << " : ";
- }
- else { // Handle GCC extension where LHS can be NULL.
- OS << " ?: ";
- }
-
+ OS << " ? ";
+ PrintExpr(Node->getLHS());
+ OS << " : ";
PrintExpr(Node->getRHS());
}
// GNU extensions.
+void
+StmtPrinter::VisitBinaryConditionalOperator(BinaryConditionalOperator *Node) {
+ PrintExpr(Node->getCommon());
+ OS << " ?: ";
+ PrintExpr(Node->getFalseExpr());
+}
void StmtPrinter::VisitAddrLabelExpr(AddrLabelExpr *Node) {
OS << "&&" << Node->getLabel()->getName();
}
@@ -818,12 +816,6 @@ void StmtPrinter::VisitStmtExpr(StmtExpr *E) {
OS << ")";
}
-void StmtPrinter::VisitTypesCompatibleExpr(TypesCompatibleExpr *Node) {
- OS << "__builtin_types_compatible_p(";
- OS << Node->getArgType1().getAsString(Policy) << ",";
- OS << Node->getArgType2().getAsString(Policy) << ")";
-}
-
void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) {
OS << "__builtin_choose_expr(";
PrintExpr(Node->getCond());
@@ -968,6 +960,15 @@ void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
VisitCallExpr(cast<CallExpr>(Node));
}
+void StmtPrinter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *Node) {
+ PrintExpr(Node->getCallee());
+ OS << "<<<";
+ PrintCallArgs(Node->getConfig());
+ OS << ">>>(";
+ PrintCallArgs(Node);
+ OS << ")";
+}
+
void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
OS << Node->getCastName() << '<';
OS << Node->getTypeAsWritten().getAsString(Policy) << ">(";
@@ -1001,6 +1002,16 @@ void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
OS << ")";
}
+void StmtPrinter::VisitCXXUuidofExpr(CXXUuidofExpr *Node) {
+ OS << "__uuidof(";
+ if (Node->isTypeOperand()) {
+ OS << Node->getTypeOperand().getAsString(Policy);
+ } else {
+ PrintExpr(Node->getExprOperand());
+ }
+ OS << ")";
+}
+
void StmtPrinter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
OS << (Node->getValue() ? "true" : "false");
}
@@ -1051,7 +1062,10 @@ void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
}
void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
- OS << Node->getType().getAsString(Policy) << "()";
+ if (TypeSourceInfo *TSInfo = Node->getTypeSourceInfo())
+ OS << TSInfo->getType().getAsString(Policy) << "()";
+ else
+ OS << Node->getType().getAsString(Policy) << "()";
}
void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
@@ -1123,17 +1137,18 @@ void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
}
void StmtPrinter::VisitCXXConstructExpr(CXXConstructExpr *E) {
- // FIXME. For now we just print a trivial constructor call expression,
- // constructing its first argument object.
- if (E->getNumArgs() == 1) {
- CXXConstructorDecl *CD = E->getConstructor();
- if (CD->isTrivial())
- PrintExpr(E->getArg(0));
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ if (isa<CXXDefaultArgExpr>(E->getArg(i))) {
+ // Don't print any defaulted arguments
+ break;
+ }
+
+ if (i) OS << ", ";
+ PrintExpr(E->getArg(i));
}
- // Nothing to print.
}
-void StmtPrinter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) {
// Just forward to the sub expression.
PrintExpr(E->getSubExpr());
}
@@ -1197,7 +1212,7 @@ void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
static const char *getTypeTraitName(UnaryTypeTrait UTT) {
switch (UTT) {
- default: assert(false && "Unknown type trait");
+ default: llvm_unreachable("Unknown unary type trait");
case UTT_HasNothrowAssign: return "__has_nothrow_assign";
case UTT_HasNothrowCopy: return "__has_nothrow_copy";
case UTT_HasNothrowConstructor: return "__has_nothrow_constructor";
@@ -1214,6 +1229,16 @@ static const char *getTypeTraitName(UnaryTypeTrait UTT) {
case UTT_IsPolymorphic: return "__is_polymorphic";
case UTT_IsUnion: return "__is_union";
}
+ return "";
+}
+
+static const char *getTypeTraitName(BinaryTypeTrait BTT) {
+ switch (BTT) {
+ case BTT_IsBaseOf: return "__is_base_of";
+ case BTT_TypeCompatible: return "__builtin_types_compatible_p";
+ case BTT_IsConvertibleTo: return "__is_convertible_to";
+ }
+ return "";
}
void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
@@ -1221,6 +1246,32 @@ void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
<< E->getQueriedType().getAsString(Policy) << ")";
}
+void StmtPrinter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "("
+ << E->getLhsType().getAsString(Policy) << ","
+ << E->getRhsType().getAsString(Policy) << ")";
+}
+
+void StmtPrinter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ OS << "noexcept(";
+ PrintExpr(E->getOperand());
+ OS << ")";
+}
+
+void StmtPrinter::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ PrintExpr(E->getPattern());
+ OS << "...";
+}
+
+void StmtPrinter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ OS << "sizeof...(" << E->getPack()->getNameAsString() << ")";
+}
+
+void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *Node) {
+ OS << Node->getParameterPack()->getNameAsString();
+}
+
// Obj-C
void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
@@ -1260,7 +1311,7 @@ void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) {
OS << ' ';
Selector selector = Mess->getSelector();
if (selector.isUnarySelector()) {
- OS << selector.getIdentifierInfoForSlot(0)->getName();
+ OS << selector.getNameForSlot(0);
} else {
for (unsigned i = 0, e = Mess->getNumArgs(); i != e; ++i) {
if (i < selector.getNumArgs()) {
@@ -1278,9 +1329,6 @@ void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) {
OS << "]";
}
-void StmtPrinter::VisitObjCSuperExpr(ObjCSuperExpr *) {
- OS << "super";
-}
void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
BlockDecl *BD = Node->getBlockDecl();
@@ -1313,6 +1361,9 @@ void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
void StmtPrinter::VisitBlockDeclRefExpr(BlockDeclRefExpr *Node) {
OS << Node->getDecl();
}
+
+void StmtPrinter::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {}
+
//===----------------------------------------------------------------------===//
// Stmt method implementations
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
index 78a336d..b540011 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
@@ -25,11 +25,11 @@ using namespace clang;
namespace {
class StmtProfiler : public StmtVisitor<StmtProfiler> {
llvm::FoldingSetNodeID &ID;
- ASTContext &Context;
+ const ASTContext &Context;
bool Canonical;
public:
- StmtProfiler(llvm::FoldingSetNodeID &ID, ASTContext &Context,
+ StmtProfiler(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical)
: ID(ID), Context(Context), Canonical(Canonical) { }
@@ -68,8 +68,7 @@ namespace {
void StmtProfiler::VisitStmt(Stmt *S) {
ID.AddInteger(S->getStmtClass());
- for (Stmt::child_iterator C = S->child_begin(), CEnd = S->child_end();
- C != CEnd; ++C)
+ for (Stmt::child_range C = S->children(); C; ++C)
Visit(*C);
}
@@ -102,7 +101,7 @@ void StmtProfiler::VisitDefaultStmt(DefaultStmt *S) {
void StmtProfiler::VisitLabelStmt(LabelStmt *S) {
VisitStmt(S);
- VisitName(S->getID());
+ VisitDecl(S->getDecl());
}
void StmtProfiler::VisitIfStmt(IfStmt *S) {
@@ -130,7 +129,7 @@ void StmtProfiler::VisitForStmt(ForStmt *S) {
void StmtProfiler::VisitGotoStmt(GotoStmt *S) {
VisitStmt(S);
- VisitName(S->getLabel()->getID());
+ VisitDecl(S->getLabel());
}
void StmtProfiler::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
@@ -350,19 +349,17 @@ void StmtProfiler::VisitConditionalOperator(ConditionalOperator *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitAddrLabelExpr(AddrLabelExpr *S) {
+void StmtProfiler::VisitBinaryConditionalOperator(BinaryConditionalOperator *S){
VisitExpr(S);
- VisitName(S->getLabel()->getID());
}
-void StmtProfiler::VisitStmtExpr(StmtExpr *S) {
+void StmtProfiler::VisitAddrLabelExpr(AddrLabelExpr *S) {
VisitExpr(S);
+ VisitDecl(S->getLabel());
}
-void StmtProfiler::VisitTypesCompatibleExpr(TypesCompatibleExpr *S) {
+void StmtProfiler::VisitStmtExpr(StmtExpr *S) {
VisitExpr(S);
- VisitType(S->getArgType1());
- VisitType(S->getArgType2());
}
void StmtProfiler::VisitShuffleVectorExpr(ShuffleVectorExpr *S) {
@@ -431,8 +428,6 @@ void StmtProfiler::VisitBlockDeclRefExpr(BlockDeclRefExpr *S) {
VisitDecl(S->getDecl());
ID.AddBoolean(S->isByRef());
ID.AddBoolean(S->isConstQualAdded());
- if (S->getCopyConstructorExpr())
- Visit(S->getCopyConstructorExpr());
}
static Stmt::StmtClass DecodeOperatorCall(CXXOperatorCallExpr *S,
@@ -652,6 +647,10 @@ void StmtProfiler::VisitCXXMemberCallExpr(CXXMemberCallExpr *S) {
VisitCallExpr(S);
}
+void StmtProfiler::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *S) {
+ VisitCallExpr(S);
+}
+
void StmtProfiler::VisitCXXNamedCastExpr(CXXNamedCastExpr *S) {
VisitExplicitCastExpr(S);
}
@@ -687,6 +686,12 @@ void StmtProfiler::VisitCXXTypeidExpr(CXXTypeidExpr *S) {
VisitType(S->getTypeOperand());
}
+void StmtProfiler::VisitCXXUuidofExpr(CXXUuidofExpr *S) {
+ VisitExpr(S);
+ if (S->isTypeOperand())
+ VisitType(S->getTypeOperand());
+}
+
void StmtProfiler::VisitCXXThisExpr(CXXThisExpr *S) {
VisitExpr(S);
}
@@ -774,6 +779,13 @@ void StmtProfiler::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *S) {
VisitType(S->getQueriedType());
}
+void StmtProfiler::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitType(S->getLhsType());
+ VisitType(S->getRhsType());
+}
+
void
StmtProfiler::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *S) {
VisitExpr(S);
@@ -784,11 +796,8 @@ StmtProfiler::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *S) {
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
-void StmtProfiler::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *S) {
+void StmtProfiler::VisitExprWithCleanups(ExprWithCleanups *S) {
VisitExpr(S);
- for (unsigned I = 0, N = S->getNumTemporaries(); I != N; ++I)
- VisitDecl(
- const_cast<CXXDestructorDecl *>(S->getTemporary(I)->getDestructor()));
}
void
@@ -824,6 +833,30 @@ void StmtProfiler::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *S) {
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
+void StmtProfiler::VisitCXXNoexceptExpr(CXXNoexceptExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitPackExpansionExpr(PackExpansionExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitSizeOfPackExpr(SizeOfPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getPack());
+}
+
+void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getParameterPack());
+ VisitTemplateArgument(S->getArgumentPack());
+}
+
+void StmtProfiler::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ VisitExpr(E);
+}
+
void StmtProfiler::VisitObjCStringLiteral(ObjCStringLiteral *S) {
VisitExpr(S);
}
@@ -852,15 +885,16 @@ void StmtProfiler::VisitObjCIvarRefExpr(ObjCIvarRefExpr *S) {
void StmtProfiler::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *S) {
VisitExpr(S);
- VisitDecl(S->getProperty());
-}
-
-void StmtProfiler::VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *S) {
- VisitExpr(S);
- VisitDecl(S->getGetterMethod());
- VisitDecl(S->getSetterMethod());
- VisitDecl(S->getInterfaceDecl());
+ if (S->isImplicitProperty()) {
+ VisitDecl(S->getImplicitPropertyGetter());
+ VisitDecl(S->getImplicitPropertySetter());
+ } else {
+ VisitDecl(S->getExplicitProperty());
+ }
+ if (S->isSuperReceiver()) {
+ ID.AddBoolean(S->isSuperReceiver());
+ VisitType(S->getSuperReceiverType());
+ }
}
void StmtProfiler::VisitObjCMessageExpr(ObjCMessageExpr *S) {
@@ -869,10 +903,6 @@ void StmtProfiler::VisitObjCMessageExpr(ObjCMessageExpr *S) {
VisitDecl(S->getMethodDecl());
}
-void StmtProfiler::VisitObjCSuperExpr(ObjCSuperExpr *S) {
- VisitExpr(S);
-}
-
void StmtProfiler::VisitObjCIsaExpr(ObjCIsaExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isArrow());
@@ -885,6 +915,7 @@ void StmtProfiler::VisitDecl(Decl *D) {
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
ID.AddInteger(NTTP->getDepth());
ID.AddInteger(NTTP->getIndex());
+ ID.AddBoolean(NTTP->isParameterPack());
VisitType(NTTP->getType());
return;
}
@@ -902,6 +933,7 @@ void StmtProfiler::VisitDecl(Decl *D) {
if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(D)) {
ID.AddInteger(TTP->getDepth());
ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
return;
}
}
@@ -952,7 +984,8 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
break;
case TemplateArgument::Template:
- VisitTemplateName(Arg.getAsTemplate());
+ case TemplateArgument::TemplateExpansion:
+ VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
break;
case TemplateArgument::Declaration:
@@ -976,7 +1009,7 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
}
}
-void Stmt::Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context,
+void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) {
StmtProfiler Profiler(ID, Context, Canonical);
Profiler.Visit(this);
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
index a3bf145..5ab5f46 100644
--- a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
@@ -12,41 +12,172 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/FoldingSet.h"
#include "clang/AST/TemplateBase.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/FoldingSet.h"
+#include <algorithm>
+#include <cctype>
+#include <iomanip>
+#include <sstream>
using namespace clang;
+/// \brief Print a template integral argument value.
+///
+/// \param TemplArg the TemplateArgument instance to print.
+///
+/// \param Out the raw_ostream instance to use for printing.
+static void printIntegral(const TemplateArgument &TemplArg,
+ llvm::raw_ostream &Out) {
+ const ::clang::Type *T = TemplArg.getIntegralType().getTypePtr();
+ const llvm::APSInt *Val = TemplArg.getAsIntegral();
+
+ if (T->isBooleanType()) {
+ Out << (Val->getBoolValue() ? "true" : "false");
+ } else if (T->isCharType()) {
+ char Ch = Val->getSExtValue();
+ if (std::isprint(Ch)) {
+ Out << "'";
+ if (Ch == '\'' || Ch == '\\')
+ Out << '\\';
+ Out << Ch << "'";
+ } else {
+ std::ostringstream Str;
+ Str << std::setw(2) << std::setfill('0') << std::hex << (int)Ch;
+ Out << "'\\x" << Str.str() << "'";
+ }
+ } else {
+ Out << Val->toString(10);
+ }
+}
+
//===----------------------------------------------------------------------===//
// TemplateArgument Implementation
//===----------------------------------------------------------------------===//
-/// \brief Construct a template argument pack.
-void TemplateArgument::setArgumentPack(TemplateArgument *args, unsigned NumArgs,
- bool CopyArgs) {
- assert(isNull() && "Must call setArgumentPack on a null argument");
+TemplateArgument TemplateArgument::CreatePackCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ if (NumArgs == 0)
+ return TemplateArgument(0, 0);
+
+ TemplateArgument *Storage = new (Context) TemplateArgument [NumArgs];
+ std::copy(Args, Args + NumArgs, Storage);
+ return TemplateArgument(Storage, NumArgs);
+}
+
+bool TemplateArgument::isDependent() const {
+ switch (getKind()) {
+ case Null:
+ assert(false && "Should not have a NULL template argument");
+ return false;
+
+ case Type:
+ return getAsType()->isDependentType();
+
+ case Template:
+ return getAsTemplate().isDependent();
+
+ case TemplateExpansion:
+ return true;
+
+ case Declaration:
+ if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
+ return DC->isDependentContext();
+ return getAsDecl()->getDeclContext()->isDependentContext();
+
+ case Integral:
+ // Never dependent
+ return false;
+
+ case Expression:
+ return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent());
+
+ case Pack:
+ for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P) {
+ if (P->isDependent())
+ return true;
+ }
+
+ return false;
+ }
+
+ return false;
+}
+
+bool TemplateArgument::isPackExpansion() const {
+ switch (getKind()) {
+ case Null:
+ case Declaration:
+ case Integral:
+ case Pack:
+ case Template:
+ return false;
+
+ case TemplateExpansion:
+ return true;
+
+ case Type:
+ return isa<PackExpansionType>(getAsType());
+
+ case Expression:
+ return isa<PackExpansionExpr>(getAsExpr());
+ }
+
+ return false;
+}
+
+bool TemplateArgument::containsUnexpandedParameterPack() const {
+ switch (getKind()) {
+ case Null:
+ case Declaration:
+ case Integral:
+ case TemplateExpansion:
+ break;
+
+ case Type:
+ if (getAsType()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Template:
+ if (getAsTemplate().containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Expression:
+ if (getAsExpr()->containsUnexpandedParameterPack())
+ return true;
+ break;
- Kind = Pack;
- Args.NumArgs = NumArgs;
- Args.CopyArgs = CopyArgs;
- if (!Args.CopyArgs) {
- Args.Args = args;
- return;
+ case Pack:
+ for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P)
+ if (P->containsUnexpandedParameterPack())
+ return true;
+
+ break;
}
- // FIXME: Allocate in ASTContext
- Args.Args = new TemplateArgument[NumArgs];
- for (unsigned I = 0; I != Args.NumArgs; ++I)
- Args.Args[I] = args[I];
+ return false;
+}
+
+llvm::Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
+ assert(Kind == TemplateExpansion);
+ if (TemplateArg.NumExpansions)
+ return TemplateArg.NumExpansions - 1;
+
+ return llvm::Optional<unsigned>();
}
void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
- ASTContext &Context) const {
+ const ASTContext &Context) const {
ID.AddInteger(Kind);
switch (Kind) {
case Null:
@@ -61,18 +192,22 @@ void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
break;
case Template:
+ case TemplateExpansion: {
+ TemplateName Template = getAsTemplateOrTemplatePattern();
if (TemplateTemplateParmDecl *TTP
= dyn_cast_or_null<TemplateTemplateParmDecl>(
- getAsTemplate().getAsTemplateDecl())) {
+ Template.getAsTemplateDecl())) {
ID.AddBoolean(true);
ID.AddInteger(TTP->getDepth());
ID.AddInteger(TTP->getPosition());
+ ID.AddBoolean(TTP->isParameterPack());
} else {
ID.AddBoolean(false);
- ID.AddPointer(Context.getCanonicalTemplateName(getAsTemplate())
- .getAsVoidPointer());
+ ID.AddPointer(Context.getCanonicalTemplateName(Template)
+ .getAsVoidPointer());
}
break;
+ }
case Integral:
getAsIntegral()->Profile(ID);
@@ -97,8 +232,9 @@ bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
case Null:
case Type:
case Declaration:
+ case Expression:
case Template:
- case Expression:
+ case TemplateExpansion:
return TypeOrValue == Other.TypeOrValue;
case Integral:
@@ -117,10 +253,102 @@ bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
return false;
}
+TemplateArgument TemplateArgument::getPackExpansionPattern() const {
+ assert(isPackExpansion());
+
+ switch (getKind()) {
+ case Type:
+ return getAsType()->getAs<PackExpansionType>()->getPattern();
+
+ case Expression:
+ return cast<PackExpansionExpr>(getAsExpr())->getPattern();
+
+ case TemplateExpansion:
+ return TemplateArgument(getAsTemplateOrTemplatePattern());
+
+ case Declaration:
+ case Integral:
+ case Pack:
+ case Null:
+ case Template:
+ return TemplateArgument();
+ }
+
+ return TemplateArgument();
+}
+
+void TemplateArgument::print(const PrintingPolicy &Policy,
+ llvm::raw_ostream &Out) const {
+ switch (getKind()) {
+ case Null:
+ Out << "<no value>";
+ break;
+
+ case Type: {
+ std::string TypeStr;
+ getAsType().getAsStringInternal(TypeStr, Policy);
+ Out << TypeStr;
+ break;
+ }
+
+ case Declaration: {
+ bool Unnamed = true;
+ if (NamedDecl *ND = dyn_cast_or_null<NamedDecl>(getAsDecl())) {
+ if (ND->getDeclName()) {
+ Unnamed = false;
+ Out << ND->getNameAsString();
+ }
+ }
+
+ if (Unnamed) {
+ Out << "<anonymous>";
+ }
+ break;
+ }
+
+ case Template:
+ getAsTemplate().print(Out, Policy);
+ break;
+
+ case TemplateExpansion:
+ getAsTemplateOrTemplatePattern().print(Out, Policy);
+ Out << "...";
+ break;
+
+ case Integral: {
+ printIntegral(*this, Out);
+ break;
+ }
+
+ case Expression:
+ getAsExpr()->printPretty(Out, 0, Policy);
+ break;
+
+ case Pack:
+ Out << "<";
+ bool First = true;
+ for (TemplateArgument::pack_iterator P = pack_begin(), PEnd = pack_end();
+ P != PEnd; ++P) {
+ if (First)
+ First = false;
+ else
+ Out << ", ";
+
+ P->print(Policy, Out);
+ }
+ Out << ">";
+ break;
+ }
+}
+
//===----------------------------------------------------------------------===//
// TemplateArgumentLoc Implementation
//===----------------------------------------------------------------------===//
+TemplateArgumentLocInfo::TemplateArgumentLocInfo() {
+ memset(this, 0, sizeof(TemplateArgumentLocInfo));
+}
+
SourceRange TemplateArgumentLoc::getSourceRange() const {
switch (Argument.getKind()) {
case TemplateArgument::Expression:
@@ -137,10 +365,16 @@ SourceRange TemplateArgumentLoc::getSourceRange() const {
case TemplateArgument::Template:
if (getTemplateQualifierRange().isValid())
- return SourceRange(getTemplateQualifierRange().getBegin(),
+ return SourceRange(getTemplateQualifierRange().getBegin(),
getTemplateNameLoc());
return SourceRange(getTemplateNameLoc());
+ case TemplateArgument::TemplateExpansion:
+ if (getTemplateQualifierRange().isValid())
+ return SourceRange(getTemplateQualifierRange().getBegin(),
+ getTemplateEllipsisLoc());
+ return SourceRange(getTemplateNameLoc(), getTemplateEllipsisLoc());
+
case TemplateArgument::Integral:
case TemplateArgument::Pack:
case TemplateArgument::Null:
@@ -151,6 +385,68 @@ SourceRange TemplateArgumentLoc::getSourceRange() const {
return SourceRange();
}
+TemplateArgumentLoc
+TemplateArgumentLoc::getPackExpansionPattern(SourceLocation &Ellipsis,
+ llvm::Optional<unsigned> &NumExpansions,
+ ASTContext &Context) const {
+ assert(Argument.isPackExpansion());
+
+ switch (Argument.getKind()) {
+ case TemplateArgument::Type: {
+ // FIXME: We shouldn't ever have to worry about missing
+ // type-source info!
+ TypeSourceInfo *ExpansionTSInfo = getTypeSourceInfo();
+ if (!ExpansionTSInfo)
+ ExpansionTSInfo = Context.getTrivialTypeSourceInfo(
+ getArgument().getAsType(),
+ Ellipsis);
+ PackExpansionTypeLoc Expansion
+ = cast<PackExpansionTypeLoc>(ExpansionTSInfo->getTypeLoc());
+ Ellipsis = Expansion.getEllipsisLoc();
+
+ TypeLoc Pattern = Expansion.getPatternLoc();
+ NumExpansions = Expansion.getTypePtr()->getNumExpansions();
+
+ // FIXME: This is horrible. We know where the source location data is for
+ // the pattern, and we have the pattern's type, but we are forced to copy
+ // them into an ASTContext because TypeSourceInfo bundles them together
+ // and TemplateArgumentLoc traffics in TypeSourceInfo pointers.
+ TypeSourceInfo *PatternTSInfo
+ = Context.CreateTypeSourceInfo(Pattern.getType(),
+ Pattern.getFullDataSize());
+ memcpy(PatternTSInfo->getTypeLoc().getOpaqueData(),
+ Pattern.getOpaqueData(), Pattern.getFullDataSize());
+ return TemplateArgumentLoc(TemplateArgument(Pattern.getType()),
+ PatternTSInfo);
+ }
+
+ case TemplateArgument::Expression: {
+ PackExpansionExpr *Expansion
+ = cast<PackExpansionExpr>(Argument.getAsExpr());
+ Expr *Pattern = Expansion->getPattern();
+ Ellipsis = Expansion->getEllipsisLoc();
+ NumExpansions = Expansion->getNumExpansions();
+ return TemplateArgumentLoc(Pattern, Pattern);
+ }
+
+ case TemplateArgument::TemplateExpansion:
+ Ellipsis = getTemplateEllipsisLoc();
+ NumExpansions = Argument.getNumTemplateExpansions();
+ return TemplateArgumentLoc(Argument.getPackExpansionPattern(),
+ getTemplateQualifierRange(),
+ getTemplateNameLoc());
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Template:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Null:
+ return TemplateArgumentLoc();
+ }
+
+ return TemplateArgumentLoc();
+}
+
const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
const TemplateArgument &Arg) {
switch (Arg.getKind()) {
@@ -170,7 +466,10 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
case TemplateArgument::Template:
return DB << Arg.getAsTemplate();
-
+
+ case TemplateArgument::TemplateExpansion:
+ return DB << Arg.getAsTemplateOrTemplatePattern() << "...";
+
case TemplateArgument::Expression: {
// This shouldn't actually ever happen, so it's okay that we're
// regurgitating an expression here.
@@ -184,9 +483,16 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
return DB << OS.str();
}
- case TemplateArgument::Pack:
- // FIXME: Format arguments in a list!
- return DB << "<parameter pack>";
+ case TemplateArgument::Pack: {
+ // FIXME: We're guessing at LangOptions!
+ llvm::SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = true;
+ PrintingPolicy Policy(LangOpts);
+ Arg.print(Policy, OS);
+ return DB << OS.str();
+ }
}
return DB;
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
index ef7b315..6b378a0 100644
--- a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/TemplateName.h"
+#include "clang/AST/TemplateBase.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/PrettyPrinter.h"
@@ -21,15 +22,33 @@
using namespace clang;
using namespace llvm;
+TemplateArgument
+SubstTemplateTemplateParmPackStorage::getArgumentPack() const {
+ return TemplateArgument(Arguments, size());
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Context, Parameter, TemplateArgument(Arguments, size()));
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context,
+ TemplateTemplateParmDecl *Parameter,
+ const TemplateArgument &ArgPack) {
+ ID.AddPointer(Parameter);
+ ArgPack.Profile(ID, Context);
+}
+
TemplateName::NameKind TemplateName::getKind() const {
if (Storage.is<TemplateDecl *>())
return Template;
- if (Storage.is<OverloadedTemplateStorage *>())
- return OverloadedTemplate;
+ if (Storage.is<DependentTemplateName *>())
+ return DependentTemplate;
if (Storage.is<QualifiedTemplateName *>())
return QualifiedTemplate;
- assert(Storage.is<DependentTemplateName *>() && "There's a case unhandled!");
- return DependentTemplate;
+
+ return getAsOverloadedTemplate()? OverloadedTemplate
+ : SubstTemplateTemplateParmPack;
}
TemplateDecl *TemplateName::getAsTemplateDecl() const {
@@ -44,8 +63,14 @@ TemplateDecl *TemplateName::getAsTemplateDecl() const {
bool TemplateName::isDependent() const {
if (TemplateDecl *Template = getAsTemplateDecl()) {
- return isa<TemplateTemplateParmDecl>(Template) ||
- Template->getDeclContext()->isDependentContext();
+ if (isa<TemplateTemplateParmDecl>(Template))
+ return true;
+ // FIXME: Hack, getDeclContext() can be null if Template is still
+ // initializing due to PCH reading, so we check it before using it.
+ // Should probably modify TemplateSpecializationType to allow constructing
+ // it without the isDependent() checking.
+ return Template->getDeclContext() &&
+ Template->getDeclContext()->isDependentContext();
}
assert(!getAsOverloadedTemplate() &&
@@ -54,6 +79,22 @@ bool TemplateName::isDependent() const {
return true;
}
+bool TemplateName::containsUnexpandedParameterPack() const {
+ if (TemplateDecl *Template = getAsTemplateDecl()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
+ return TTP->isParameterPack();
+
+ return false;
+ }
+
+ if (DependentTemplateName *DTN = getAsDependentTemplateName())
+ return DTN->getQualifier() &&
+ DTN->getQualifier()->containsUnexpandedParameterPack();
+
+ return getAsSubstTemplateTemplateParmPack() != 0;
+}
+
void
TemplateName::print(llvm::raw_ostream &OS, const PrintingPolicy &Policy,
bool SuppressNNS) const {
@@ -74,7 +115,9 @@ TemplateName::print(llvm::raw_ostream &OS, const PrintingPolicy &Policy,
OS << DTN->getIdentifier()->getName();
else
OS << "operator " << getOperatorSpelling(DTN->getOperator());
- }
+ } else if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = getAsSubstTemplateTemplateParmPack())
+ OS << SubstPack->getParameterPack()->getNameAsString();
}
const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp
index ca10532..b03314e 100644
--- a/contrib/llvm/tools/clang/lib/AST/Type.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -35,14 +36,13 @@ bool QualType::isConstant(QualType T, ASTContext &Ctx) {
return false;
}
-Type::~Type() { }
-
unsigned ConstantArrayType::getNumAddressingBits(ASTContext &Context,
QualType ElementType,
const llvm::APInt &NumElements) {
llvm::APSInt SizeExtended(NumElements, true);
unsigned SizeTypeBits = Context.getTypeSize(Context.getSizeType());
- SizeExtended.extend(std::max(SizeTypeBits, SizeExtended.getBitWidth()) * 2);
+ SizeExtended = SizeExtended.extend(std::max(SizeTypeBits,
+ SizeExtended.getBitWidth()) * 2);
uint64_t ElementSize
= Context.getTypeSizeInChars(ElementType).getQuantity();
@@ -63,8 +63,20 @@ unsigned ConstantArrayType::getMaxSizeBits(ASTContext &Context) {
return Bits;
}
+DependentSizedArrayType::DependentSizedArrayType(const ASTContext &Context,
+ QualType et, QualType can,
+ Expr *e, ArraySizeModifier sm,
+ unsigned tq,
+ SourceRange brackets)
+ : ArrayType(DependentSizedArray, et, can, sm, tq,
+ (et->containsUnexpandedParameterPack() ||
+ (e && e->containsUnexpandedParameterPack()))),
+ Context(Context), SizeExpr((Stmt*) e), Brackets(brackets)
+{
+}
+
void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
- ASTContext &Context,
+ const ASTContext &Context,
QualType ET,
ArraySizeModifier SizeMod,
unsigned TypeQuals,
@@ -75,14 +87,51 @@ void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
E->Profile(ID, Context, true);
}
+DependentSizedExtVectorType::DependentSizedExtVectorType(const
+ ASTContext &Context,
+ QualType ElementType,
+ QualType can,
+ Expr *SizeExpr,
+ SourceLocation loc)
+ : Type(DependentSizedExtVector, can, /*Dependent=*/true,
+ ElementType->isVariablyModifiedType(),
+ (ElementType->containsUnexpandedParameterPack() ||
+ (SizeExpr && SizeExpr->containsUnexpandedParameterPack()))),
+ Context(Context), SizeExpr(SizeExpr), ElementType(ElementType),
+ loc(loc)
+{
+}
+
void
DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
- ASTContext &Context,
+ const ASTContext &Context,
QualType ElementType, Expr *SizeExpr) {
ID.AddPointer(ElementType.getAsOpaquePtr());
SizeExpr->Profile(ID, Context, true);
}
+VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
+ VectorKind vecKind)
+ : Type(Vector, canonType, vecType->isDependentType(),
+ vecType->isVariablyModifiedType(),
+ vecType->containsUnexpandedParameterPack()),
+ ElementType(vecType)
+{
+ VectorTypeBits.VecKind = vecKind;
+ VectorTypeBits.NumElements = nElements;
+}
+
+VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
+ QualType canonType, VectorKind vecKind)
+ : Type(tc, canonType, vecType->isDependentType(),
+ vecType->isVariablyModifiedType(),
+ vecType->containsUnexpandedParameterPack()),
+ ElementType(vecType)
+{
+ VectorTypeBits.VecKind = vecKind;
+ VectorTypeBits.NumElements = nElements;
+}
+
/// getArrayElementTypeNoTypeQual - If this is an array type, return the
/// element type of the array, potentially with type qualifiers missing.
/// This method should never be used when type qualifiers are meaningful.
@@ -101,51 +150,18 @@ const Type *Type::getArrayElementTypeNoTypeQual() const {
->getElementType().getTypePtr();
}
-/// \brief Retrieve the unqualified variant of the given type, removing as
-/// little sugar as possible.
-///
-/// This routine looks through various kinds of sugar to find the
-/// least-desuraged type that is unqualified. For example, given:
-///
-/// \code
-/// typedef int Integer;
-/// typedef const Integer CInteger;
-/// typedef CInteger DifferenceType;
-/// \endcode
-///
-/// Executing \c getUnqualifiedTypeSlow() on the type \c DifferenceType will
-/// desugar until we hit the type \c Integer, which has no qualifiers on it.
-QualType QualType::getUnqualifiedTypeSlow() const {
- QualType Cur = *this;
- while (true) {
- if (!Cur.hasQualifiers())
- return Cur;
-
- const Type *CurTy = Cur.getTypePtr();
- switch (CurTy->getTypeClass()) {
-#define ABSTRACT_TYPE(Class, Parent)
-#define TYPE(Class, Parent) \
- case Type::Class: { \
- const Class##Type *Ty = cast<Class##Type>(CurTy); \
- if (!Ty->isSugared()) \
- return Cur.getLocalUnqualifiedType(); \
- Cur = Ty->desugar(); \
- break; \
- }
-#include "clang/AST/TypeNodes.def"
- }
- }
-
- return Cur.getUnqualifiedType();
-}
-
/// getDesugaredType - Return the specified type with any "sugar" removed from
/// the type. This takes off typedefs, typeof's etc. If the outer level of
/// the type is already concrete, it returns it unmodified. This is similar
/// to getting the canonical type, but it doesn't remove *all* typedefs. For
/// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
/// concrete.
-QualType QualType::getDesugaredType(QualType T) {
+QualType QualType::getDesugaredType(QualType T, const ASTContext &Context) {
+ SplitQualType split = getSplitDesugaredType(T);
+ return Context.getQualifiedType(split.first, split.second);
+}
+
+SplitQualType QualType::getSplitDesugaredType(QualType T) {
QualifierCollector Qs;
QualType Cur = T;
@@ -157,7 +173,7 @@ QualType QualType::getDesugaredType(QualType T) {
case Type::Class: { \
const Class##Type *Ty = cast<Class##Type>(CurTy); \
if (!Ty->isSugared()) \
- return Qs.apply(Cur); \
+ return SplitQualType(Ty, Qs); \
Cur = Ty->desugar(); \
break; \
}
@@ -166,6 +182,52 @@ QualType QualType::getDesugaredType(QualType T) {
}
}
+SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) {
+ SplitQualType split = type.split();
+
+ // All the qualifiers we've seen so far.
+ Qualifiers quals = split.second;
+
+ // The last type node we saw with any nodes inside it.
+ const Type *lastTypeWithQuals = split.first;
+
+ while (true) {
+ QualType next;
+
+ // Do a single-step desugar, aborting the loop if the type isn't
+ // sugared.
+ switch (split.first->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *ty = cast<Class##Type>(split.first); \
+ if (!ty->isSugared()) goto done; \
+ next = ty->desugar(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+
+ // Otherwise, split the underlying type. If that yields qualifiers,
+ // update the information.
+ split = next.split();
+ if (!split.second.empty()) {
+ lastTypeWithQuals = split.first;
+ quals.addConsistentQualifiers(split.second);
+ }
+ }
+
+ done:
+ return SplitQualType(lastTypeWithQuals, quals);
+}
+
+QualType QualType::IgnoreParens(QualType T) {
+ // FIXME: this seems inherently un-qualifiers-safe.
+ while (const ParenType *PT = T->getAs<ParenType>())
+ T = PT->getInnerType();
+ return T;
+}
+
/// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic
/// sugar off the given type. This should produce an object of the
/// same dynamic type as the canonical type.
@@ -268,42 +330,6 @@ QualType Type::getPointeeType() const {
return QualType();
}
-/// isVariablyModifiedType (C99 6.7.5p3) - Return true for variable length
-/// array types and types that contain variable array types in their
-/// declarator
-bool Type::isVariablyModifiedType() const {
- // FIXME: We should really keep a "variably modified" bit in Type, rather
- // than walking the type hierarchy to recompute it.
-
- // A VLA is a variably modified type.
- if (isVariableArrayType())
- return true;
-
- // An array can contain a variably modified type
- if (const Type *T = getArrayElementTypeNoTypeQual())
- return T->isVariablyModifiedType();
-
- // A pointer can point to a variably modified type.
- // Also, C++ references and member pointers can point to a variably modified
- // type, where VLAs appear as an extension to C++, and should be treated
- // correctly.
- if (const PointerType *PT = getAs<PointerType>())
- return PT->getPointeeType()->isVariablyModifiedType();
- if (const ReferenceType *RT = getAs<ReferenceType>())
- return RT->getPointeeType()->isVariablyModifiedType();
- if (const MemberPointerType *PT = getAs<MemberPointerType>())
- return PT->getPointeeType()->isVariablyModifiedType();
-
- // A function can return a variably modified type
- // This one isn't completely obvious, but it follows from the
- // definition in C99 6.7.5p3. Because of this rule, it's
- // illegal to declare a function returning a variably modified type.
- if (const FunctionType *FT = getAs<FunctionType>())
- return FT->getResultType()->isVariablyModifiedType();
-
- return false;
-}
-
const RecordType *Type::getAsStructureType() const {
// If this is directly a structure type, return it.
if (const RecordType *RT = dyn_cast<RecordType>(this)) {
@@ -346,10 +372,11 @@ const RecordType *Type::getAsUnionType() const {
ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
ObjCProtocolDecl * const *Protocols,
unsigned NumProtocols)
- : Type(ObjCObject, Canonical, false),
- NumProtocols(NumProtocols),
- BaseType(Base) {
- assert(this->NumProtocols == NumProtocols &&
+ : Type(ObjCObject, Canonical, false, false, false),
+ BaseType(Base)
+{
+ ObjCObjectTypeBits.NumProtocols = NumProtocols;
+ assert(getNumProtocols() == NumProtocols &&
"bitfield overflow in protocol count");
if (NumProtocols)
memcpy(getProtocolStorage(), Protocols,
@@ -405,15 +432,69 @@ CXXRecordDecl *Type::getAsCXXRecordDecl() const {
return 0;
}
+namespace {
+ class GetContainedAutoVisitor :
+ public TypeVisitor<GetContainedAutoVisitor, AutoType*> {
+ public:
+ using TypeVisitor<GetContainedAutoVisitor, AutoType*>::Visit;
+ AutoType *Visit(QualType T) {
+ if (T.isNull())
+ return 0;
+ return Visit(T.getTypePtr());
+ }
+
+ // The 'auto' type itself.
+ AutoType *VisitAutoType(const AutoType *AT) {
+ return const_cast<AutoType*>(AT);
+ }
+
+ // Only these types can contain the desired 'auto' type.
+ AutoType *VisitPointerType(const PointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitBlockPointerType(const BlockPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitReferenceType(const ReferenceType *T) {
+ return Visit(T->getPointeeTypeAsWritten());
+ }
+ AutoType *VisitMemberPointerType(const MemberPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitArrayType(const ArrayType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitVectorType(const VectorType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitFunctionType(const FunctionType *T) {
+ return Visit(T->getResultType());
+ }
+ AutoType *VisitParenType(const ParenType *T) {
+ return Visit(T->getInnerType());
+ }
+ AutoType *VisitAttributedType(const AttributedType *T) {
+ return Visit(T->getModifiedType());
+ }
+ };
+}
+
+AutoType *Type::getContainedAutoType() const {
+ return GetContainedAutoVisitor().Visit(this);
+}
+
bool Type::isIntegerType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
- if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
- if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
- return true;
+ return ET->getDecl()->isComplete();
return false;
}
@@ -449,9 +530,8 @@ bool Type::isIntegralType(ASTContext &Ctx) const {
BT->getKind() <= BuiltinType::Int128;
if (!Ctx.getLangOptions().CPlusPlus)
- if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
- if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
- return true; // Complete enum types are integral in C.
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete(); // Complete enum types are integral in C.
return false;
}
@@ -463,19 +543,28 @@ bool Type::isIntegralOrEnumerationType() const {
// Check for a complete enum type; incomplete enum types are not properly an
// enumeration type in the sense required here.
- if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
- if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
- return true;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete();
return false;
}
-bool Type::isEnumeralType() const {
- if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
- return TT->getDecl()->isEnum();
+bool Type::isIntegralOrUnscopedEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ // Check for a complete enum type; incomplete enum types are not properly an
+ // enumeration type in the sense required here.
+ // C++0x: However, if the underlying type of the enum is fixed, it is
+ // considered complete.
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+
return false;
}
+
bool Type::isBooleanType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() == BuiltinType::Bool;
@@ -493,20 +582,28 @@ bool Type::isCharType() const {
bool Type::isWideCharType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() == BuiltinType::WChar;
+ return BT->getKind() == BuiltinType::WChar_S ||
+ BT->getKind() == BuiltinType::WChar_U;
return false;
}
/// \brief Determine whether this type is any of the built-in character
/// types.
bool Type::isAnyCharacterType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
- return (BT->getKind() >= BuiltinType::Char_U &&
- BT->getKind() <= BuiltinType::Char32) ||
- (BT->getKind() >= BuiltinType::Char_S &&
- BT->getKind() <= BuiltinType::WChar);
-
- return false;
+ const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType);
+ if (BT == 0) return false;
+ switch (BT->getKind()) {
+ default: return false;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ case BuiltinType::WChar_S:
+ return true;
+ }
}
/// isSignedIntegerType - Return true if this is an integer type that is
@@ -518,8 +615,12 @@ bool Type::isSignedIntegerType() const {
BT->getKind() <= BuiltinType::Int128;
}
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
- return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (ET->getDecl()->isComplete())
+ return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ }
return false;
}
@@ -540,8 +641,12 @@ bool Type::isUnsignedIntegerType() const {
BT->getKind() <= BuiltinType::UInt128;
}
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
- return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (ET->getDecl()->isComplete())
+ return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ }
return false;
}
@@ -579,8 +684,8 @@ bool Type::isRealType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::LongDouble;
- if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
- return TT->getDecl()->isEnum() && TT->getDecl()->isDefinition();
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
return false;
}
@@ -591,20 +696,22 @@ bool Type::isArithmeticType() const {
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
- return ET->getDecl()->isDefinition();
+ //
+ // C++0x: Enumerations are not arithmetic types. For now, just return
+ // false for scoped enumerations since that will disable any
+ // unwanted implicit conversions.
+ return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
return isa<ComplexType>(CanonicalType);
}
bool Type::isScalarType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() != BuiltinType::Void;
- if (const TagType *TT = dyn_cast<TagType>(CanonicalType)) {
+ return BT->getKind() > BuiltinType::Void &&
+ BT->getKind() <= BuiltinType::NullPtr;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
// Enums are scalar types, but only if they are defined. Incomplete enums
// are not treated as scalar types.
- if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
- return true;
- return false;
- }
+ return ET->getDecl()->isComplete();
return isa<PointerType>(CanonicalType) ||
isa<BlockPointerType>(CanonicalType) ||
isa<MemberPointerType>(CanonicalType) ||
@@ -612,6 +719,35 @@ bool Type::isScalarType() const {
isa<ObjCObjectPointerType>(CanonicalType);
}
+Type::ScalarTypeKind Type::getScalarTypeKind() const {
+ assert(isScalarType());
+
+ const Type *T = CanonicalType.getTypePtr();
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T)) {
+ if (BT->getKind() == BuiltinType::Bool) return STK_Bool;
+ if (BT->getKind() == BuiltinType::NullPtr) return STK_Pointer;
+ if (BT->isInteger()) return STK_Integral;
+ if (BT->isFloatingPoint()) return STK_Floating;
+ llvm_unreachable("unknown scalar builtin type");
+ } else if (isa<PointerType>(T) ||
+ isa<BlockPointerType>(T) ||
+ isa<ObjCObjectPointerType>(T)) {
+ return STK_Pointer;
+ } else if (isa<MemberPointerType>(T)) {
+ return STK_MemberPointer;
+ } else if (isa<EnumType>(T)) {
+ assert(cast<EnumType>(T)->getDecl()->isComplete());
+ return STK_Integral;
+ } else if (const ComplexType *CT = dyn_cast<ComplexType>(T)) {
+ if (CT->getElementType()->isRealFloatingType())
+ return STK_FloatingComplex;
+ return STK_IntegralComplex;
+ }
+
+ llvm_unreachable("unknown scalar type");
+ return STK_Pointer;
+}
+
/// \brief Determines whether the type is a C++ aggregate type or C
/// aggregate or union type.
///
@@ -652,8 +788,12 @@ bool Type::isIncompleteType() const {
// Void is the only incomplete builtin type. Per C99 6.2.5p19, it can never
// be completed.
return isVoidType();
- case Record:
case Enum:
+ // An enumeration with fixed underlying type is complete (C++0x 7.2p3).
+ if (cast<EnumType>(CanonicalType)->getDecl()->isFixed())
+ return false;
+ // Fall through.
+ case Record:
// A tagged type (struct/union/enum/class) is incomplete if the decl is a
// forward declaration, but not a full definition (C99 6.2.5p22).
return !cast<TagType>(CanonicalType)->getDecl()->isDefinition();
@@ -678,7 +818,11 @@ bool Type::isIncompleteType() const {
/// isPODType - Return true if this is a plain-old-data type (C++ 3.9p10)
bool Type::isPODType() const {
// The compiler shouldn't query this for incomplete types, but the user might.
- // We return false for that case.
+ // We return false for that case. Except for incomplete arrays of PODs, which
+ // are PODs according to the standard.
+ if (isIncompleteArrayType() &&
+ cast<ArrayType>(CanonicalType)->getElementType()->isPODType())
+ return true;
if (isIncompleteType())
return false;
@@ -687,7 +831,7 @@ bool Type::isPODType() const {
default: return false;
case VariableArray:
case ConstantArray:
- // IncompleteArray is caught by isIncompleteType() above.
+ // IncompleteArray is handled above.
return cast<ArrayType>(CanonicalType)->getElementType()->isPODType();
case Builtin:
@@ -765,7 +909,8 @@ bool Type::isPromotableIntegerType() const {
// Enumerated types are promotable to their compatible integer types
// (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
if (const EnumType *ET = getAs<EnumType>()){
- if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull())
+ if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull()
+ || ET->getDecl()->isScoped())
return false;
const BuiltinType *BT
@@ -808,9 +953,6 @@ bool Type::isSpecifierType() const {
}
}
-TypeWithKeyword::~TypeWithKeyword() {
-}
-
ElaboratedTypeKeyword
TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
switch (TypeSpec) {
@@ -830,8 +972,10 @@ TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
case TST_struct: return TTK_Struct;
case TST_union: return TTK_Union;
case TST_enum: return TTK_Enum;
- default: llvm_unreachable("Type specifier is not a tag type kind.");
}
+
+ llvm_unreachable("Type specifier is not a tag type kind.");
+ return TTK_Union;
}
ElaboratedTypeKeyword
@@ -877,7 +1021,6 @@ TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
const char*
TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
- default: llvm_unreachable("Unknown elaborated type keyword.");
case ETK_None: return "";
case ETK_Typename: return "typename";
case ETK_Class: return "class";
@@ -885,28 +1028,33 @@ TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
case ETK_Union: return "union";
case ETK_Enum: return "enum";
}
-}
-ElaboratedType::~ElaboratedType() {}
-DependentNameType::~DependentNameType() {}
-DependentTemplateSpecializationType::~DependentTemplateSpecializationType() {}
+ llvm_unreachable("Unknown elaborated type keyword.");
+ return "";
+}
DependentTemplateSpecializationType::DependentTemplateSpecializationType(
ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS, const IdentifierInfo *Name,
unsigned NumArgs, const TemplateArgument *Args,
QualType Canon)
- : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true),
+ : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true,
+ /*VariablyModified=*/false,
+ NNS->containsUnexpandedParameterPack()),
NNS(NNS), Name(Name), NumArgs(NumArgs) {
assert(NNS && NNS->isDependent() &&
"DependentTemplateSpecializatonType requires dependent qualifier");
- for (unsigned I = 0; I != NumArgs; ++I)
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I].containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
new (&getArgBuffer()[I]) TemplateArgument(Args[I]);
+ }
}
void
DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
- ASTContext &Context,
+ const ASTContext &Context,
ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *Qualifier,
const IdentifierInfo *Name,
@@ -935,17 +1083,18 @@ bool Type::isElaboratedTypeSpecifier() const {
}
const char *Type::getTypeClassName() const {
- switch (TC) {
- default: assert(0 && "Type class not in TypeNodes.def!");
+ switch (TypeBits.TC) {
#define ABSTRACT_TYPE(Derived, Base)
#define TYPE(Derived, Base) case Derived: return #Derived;
#include "clang/AST/TypeNodes.def"
}
+
+ llvm_unreachable("Invalid type class.");
+ return 0;
}
const char *BuiltinType::getName(const LangOptions &LO) const {
switch (getKind()) {
- default: assert(0 && "Unknown builtin type!");
case Void: return "void";
case Bool: return LO.Bool ? "bool" : "_Bool";
case Char_S: return "char";
@@ -965,21 +1114,22 @@ const char *BuiltinType::getName(const LangOptions &LO) const {
case Float: return "float";
case Double: return "double";
case LongDouble: return "long double";
- case WChar: return "wchar_t";
+ case WChar_S:
+ case WChar_U: return "wchar_t";
case Char16: return "char16_t";
case Char32: return "char32_t";
case NullPtr: return "nullptr_t";
case Overload: return "<overloaded function type>";
case Dependent: return "<dependent type>";
- case UndeducedAuto: return "auto";
case ObjCId: return "id";
case ObjCClass: return "Class";
case ObjCSel: return "SEL";
}
+
+ llvm_unreachable("Invalid builtin type.");
+ return 0;
}
-void FunctionType::ANCHOR() {} // Key function for FunctionType.
-
QualType QualType::getNonLValueExprType(ASTContext &Context) const {
if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>())
return RefType->getPointeeType();
@@ -998,8 +1148,9 @@ QualType QualType::getNonLValueExprType(ASTContext &Context) const {
llvm::StringRef FunctionType::getNameForCallConv(CallingConv CC) {
switch (CC) {
- case CC_Default: llvm_unreachable("no name for default cc");
- default: return "";
+ case CC_Default:
+ llvm_unreachable("no name for default cc");
+ return "";
case CC_C: return "cdecl";
case CC_X86StdCall: return "stdcall";
@@ -1007,62 +1158,76 @@ llvm::StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_X86ThisCall: return "thiscall";
case CC_X86Pascal: return "pascal";
}
+
+ llvm_unreachable("Invalid calling convention.");
+ return "";
+}
+
+FunctionProtoType::FunctionProtoType(QualType result, const QualType *args,
+ unsigned numArgs, QualType canonical,
+ const ExtProtoInfo &epi)
+ : FunctionType(FunctionProto, result, epi.Variadic, epi.TypeQuals,
+ epi.RefQualifier, canonical,
+ result->isDependentType(),
+ result->isVariablyModifiedType(),
+ result->containsUnexpandedParameterPack(),
+ epi.ExtInfo),
+ NumArgs(numArgs), NumExceptions(epi.NumExceptions),
+ HasExceptionSpec(epi.HasExceptionSpec),
+ HasAnyExceptionSpec(epi.HasAnyExceptionSpec)
+{
+ // Fill in the trailing argument array.
+ QualType *argSlot = reinterpret_cast<QualType*>(this+1);
+ for (unsigned i = 0; i != numArgs; ++i) {
+ if (args[i]->isDependentType())
+ setDependent();
+
+ if (args[i]->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ argSlot[i] = args[i];
+ }
+
+ // Fill in the exception array.
+ QualType *exnSlot = argSlot + numArgs;
+ for (unsigned i = 0, e = epi.NumExceptions; i != e; ++i) {
+ if (epi.Exceptions[i]->isDependentType())
+ setDependent();
+
+ if (epi.Exceptions[i]->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ exnSlot[i] = epi.Exceptions[i];
+ }
+}
+
+bool FunctionProtoType::isTemplateVariadic() const {
+ for (unsigned ArgIdx = getNumArgs(); ArgIdx; --ArgIdx)
+ if (isa<PackExpansionType>(getArgType(ArgIdx - 1)))
+ return true;
+
+ return false;
}
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
- arg_type_iterator ArgTys,
- unsigned NumArgs, bool isVariadic,
- unsigned TypeQuals, bool hasExceptionSpec,
- bool anyExceptionSpec, unsigned NumExceptions,
- exception_iterator Exs,
- const FunctionType::ExtInfo &Info) {
+ const QualType *ArgTys, unsigned NumArgs,
+ const ExtProtoInfo &epi) {
ID.AddPointer(Result.getAsOpaquePtr());
for (unsigned i = 0; i != NumArgs; ++i)
ID.AddPointer(ArgTys[i].getAsOpaquePtr());
- ID.AddInteger(isVariadic);
- ID.AddInteger(TypeQuals);
- ID.AddInteger(hasExceptionSpec);
- if (hasExceptionSpec) {
- ID.AddInteger(anyExceptionSpec);
- for (unsigned i = 0; i != NumExceptions; ++i)
- ID.AddPointer(Exs[i].getAsOpaquePtr());
+ ID.AddBoolean(epi.Variadic);
+ ID.AddInteger(epi.TypeQuals);
+ ID.AddInteger(epi.RefQualifier);
+ if (epi.HasExceptionSpec) {
+ ID.AddBoolean(epi.HasAnyExceptionSpec);
+ for (unsigned i = 0; i != epi.NumExceptions; ++i)
+ ID.AddPointer(epi.Exceptions[i].getAsOpaquePtr());
}
- ID.AddInteger(Info.getNoReturn());
- ID.AddInteger(Info.getRegParm());
- ID.AddInteger(Info.getCC());
+ epi.ExtInfo.Profile(ID);
}
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getResultType(), arg_type_begin(), NumArgs, isVariadic(),
- getTypeQuals(), hasExceptionSpec(), hasAnyExceptionSpec(),
- getNumExceptions(), exception_begin(),
- getExtInfo());
-}
-
-/// LookThroughTypedefs - Return the ultimate type this typedef corresponds to
-/// potentially looking through *all* consequtive typedefs. This returns the
-/// sum of the type qualifiers, so if you have:
-/// typedef const int A;
-/// typedef volatile A B;
-/// looking through the typedefs for B will give you "const volatile A".
-///
-QualType TypedefType::LookThroughTypedefs() const {
- // Usually, there is only a single level of typedefs, be fast in that case.
- QualType FirstType = getDecl()->getUnderlyingType();
- if (!isa<TypedefType>(FirstType))
- return FirstType;
-
- // Otherwise, do the fully general loop.
- QualifierCollector Qs;
-
- QualType CurType;
- const TypedefType *TDT = this;
- do {
- CurType = TDT->getDecl()->getUnderlyingType();
- TDT = dyn_cast<TypedefType>(Qs.strip(CurType));
- } while (TDT);
-
- return Qs.apply(CurType);
+ Profile(ID, getResultType(), arg_type_begin(), NumArgs, getExtProtoInfo());
}
QualType TypedefType::desugar() const {
@@ -1070,7 +1235,10 @@ QualType TypedefType::desugar() const {
}
TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
- : Type(TypeOfExpr, can, E->isTypeDependent()), TOExpr(E) {
+ : Type(TypeOfExpr, can, E->isTypeDependent(),
+ E->getType()->isVariablyModifiedType(),
+ E->containsUnexpandedParameterPack()),
+ TOExpr(E) {
}
QualType TypeOfExprType::desugar() const {
@@ -1078,25 +1246,29 @@ QualType TypeOfExprType::desugar() const {
}
void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
- ASTContext &Context, Expr *E) {
+ const ASTContext &Context, Expr *E) {
E->Profile(ID, Context, true);
}
DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
- : Type(Decltype, can, E->isTypeDependent()), E(E),
+ : Type(Decltype, can, E->isTypeDependent(),
+ E->getType()->isVariablyModifiedType(),
+ E->containsUnexpandedParameterPack()),
+ E(E),
UnderlyingType(underlyingType) {
}
-DependentDecltypeType::DependentDecltypeType(ASTContext &Context, Expr *E)
+DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E)
: DecltypeType(E, Context.DependentTy), Context(Context) { }
void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
- ASTContext &Context, Expr *E) {
+ const ASTContext &Context, Expr *E) {
E->Profile(ID, Context, true);
}
TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
- : Type(TC, can, D->isDependentType()),
+ : Type(TC, can, D->isDependentType(), /*VariablyModified=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
decl(const_cast<TagDecl*>(D)) {}
static TagDecl *getInterestingTagDecl(TagDecl *decl) {
@@ -1130,43 +1302,32 @@ bool EnumType::classof(const TagType *TT) {
return isa<EnumDecl>(TT->getDecl());
}
-static bool isDependent(const TemplateArgument &Arg) {
- switch (Arg.getKind()) {
- case TemplateArgument::Null:
- assert(false && "Should not have a NULL template argument");
- return false;
-
- case TemplateArgument::Type:
- return Arg.getAsType()->isDependentType();
-
- case TemplateArgument::Template:
- return Arg.getAsTemplate().isDependent();
-
- case TemplateArgument::Declaration:
- if (DeclContext *DC = dyn_cast<DeclContext>(Arg.getAsDecl()))
- return DC->isDependentContext();
- return Arg.getAsDecl()->getDeclContext()->isDependentContext();
-
- case TemplateArgument::Integral:
- // Never dependent
- return false;
-
- case TemplateArgument::Expression:
- return (Arg.getAsExpr()->isTypeDependent() ||
- Arg.getAsExpr()->isValueDependent());
+SubstTemplateTypeParmPackType::
+SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
+ QualType Canon,
+ const TemplateArgument &ArgPack)
+ : Type(SubstTemplateTypeParmPack, Canon, true, false, true), Replaced(Param),
+ Arguments(ArgPack.pack_begin()), NumArguments(ArgPack.pack_size())
+{
+}
- case TemplateArgument::Pack:
- for (TemplateArgument::pack_iterator P = Arg.pack_begin(),
- PEnd = Arg.pack_end();
- P != PEnd; ++P) {
- if (isDependent(*P))
- return true;
- }
+TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const {
+ return TemplateArgument(Arguments, NumArguments);
+}
- return false;
- }
+void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getReplacedParameter(), getArgumentPack());
+}
- return false;
+void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
+ const TemplateTypeParmType *Replaced,
+ const TemplateArgument &ArgPack) {
+ ID.AddPointer(Replaced);
+ ID.AddInteger(ArgPack.pack_size());
+ for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(),
+ PEnd = ArgPack.pack_end();
+ P != PEnd; ++P)
+ ID.AddPointer(P->getAsType().getAsOpaquePtr());
}
bool TemplateSpecializationType::
@@ -1177,7 +1338,7 @@ anyDependentTemplateArguments(const TemplateArgumentListInfo &Args) {
bool TemplateSpecializationType::
anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N) {
for (unsigned i = 0; i != N; ++i)
- if (isDependent(Args[i].getArgument()))
+ if (Args[i].getArgument().isDependent())
return true;
return false;
}
@@ -1185,7 +1346,7 @@ anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N) {
bool TemplateSpecializationType::
anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N) {
for (unsigned i = 0; i != N; ++i)
- if (isDependent(Args[i]))
+ if (Args[i].isDependent())
return true;
return false;
}
@@ -1196,16 +1357,28 @@ TemplateSpecializationType(TemplateName T,
unsigned NumArgs, QualType Canon)
: Type(TemplateSpecialization,
Canon.isNull()? QualType(this, 0) : Canon,
- T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)),
- Template(T), NumArgs(NumArgs) {
+ T.isDependent(), false,
+ T.containsUnexpandedParameterPack()),
+ Template(T), NumArgs(NumArgs)
+{
assert((!Canon.isNull() ||
T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)) &&
"No canonical type for non-dependent class template specialization");
TemplateArgument *TemplateArgs
= reinterpret_cast<TemplateArgument *>(this + 1);
- for (unsigned Arg = 0; Arg < NumArgs; ++Arg)
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ // Update dependent and variably-modified bits.
+ if (Args[Arg].isDependent())
+ setDependent();
+ if (Args[Arg].getKind() == TemplateArgument::Type &&
+ Args[Arg].getAsType()->isVariablyModifiedType())
+ setVariablyModified();
+ if (Args[Arg].containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]);
+ }
}
void
@@ -1213,26 +1386,26 @@ TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
TemplateName T,
const TemplateArgument *Args,
unsigned NumArgs,
- ASTContext &Context) {
+ const ASTContext &Context) {
T.Profile(ID);
for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
Args[Idx].Profile(ID, Context);
}
-QualType QualifierCollector::apply(QualType QT) const {
+QualType
+QualifierCollector::apply(const ASTContext &Context, QualType QT) const {
if (!hasNonFastQualifiers())
return QT.withFastQualifiers(getFastQualifiers());
- assert(Context && "extended qualifiers but no context!");
- return Context->getQualifiedType(QT, *this);
+ return Context.getQualifiedType(QT, *this);
}
-QualType QualifierCollector::apply(const Type *T) const {
+QualType
+QualifierCollector::apply(const ASTContext &Context, const Type *T) const {
if (!hasNonFastQualifiers())
return QualType(T, getFastQualifiers());
- assert(Context && "extended qualifiers but no context!");
- return Context->getQualifiedType(T, *this);
+ return Context.getQualifiedType(T, *this);
}
void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID,
@@ -1248,95 +1421,223 @@ void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getBaseType(), qual_begin(), getNumProtocols());
}
-/// \brief Determine the linkage of this type.
-Linkage Type::getLinkage() const {
- if (this != CanonicalType.getTypePtr())
- return CanonicalType->getLinkage();
+namespace {
+
+/// \brief The cached properties of a type.
+class CachedProperties {
+ char linkage;
+ char visibility;
+ bool local;
- if (!LinkageKnown) {
- CachedLinkage = getLinkageImpl();
- LinkageKnown = true;
- }
+public:
+ CachedProperties(Linkage linkage, Visibility visibility, bool local)
+ : linkage(linkage), visibility(visibility), local(local) {}
- return static_cast<clang::Linkage>(CachedLinkage);
+ Linkage getLinkage() const { return (Linkage) linkage; }
+ Visibility getVisibility() const { return (Visibility) visibility; }
+ bool hasLocalOrUnnamedType() const { return local; }
+
+ friend CachedProperties merge(CachedProperties L, CachedProperties R) {
+ return CachedProperties(minLinkage(L.getLinkage(), R.getLinkage()),
+ minVisibility(L.getVisibility(), R.getVisibility()),
+ L.hasLocalOrUnnamedType() | R.hasLocalOrUnnamedType());
+ }
+};
}
-Linkage Type::getLinkageImpl() const {
- // C++ [basic.link]p8:
- // Names not covered by these rules have no linkage.
- return NoLinkage;
-}
+static CachedProperties computeCachedProperties(const Type *T);
-void Type::ClearLinkageCache() {
- if (this != CanonicalType.getTypePtr())
- CanonicalType->ClearLinkageCache();
- else
- LinkageKnown = false;
-}
+namespace clang {
+/// The type-property cache. This is templated so as to be
+/// instantiated at an internal type to prevent unnecessary symbol
+/// leakage.
+template <class Private> class TypePropertyCache {
+public:
+ static CachedProperties get(QualType T) {
+ return get(T.getTypePtr());
+ }
-Linkage BuiltinType::getLinkageImpl() const {
- // C++ [basic.link]p8:
- // A type is said to have linkage if and only if:
- // - it is a fundamental type (3.9.1); or
- return ExternalLinkage;
-}
+ static CachedProperties get(const Type *T) {
+ ensure(T);
+ return CachedProperties(T->TypeBits.getLinkage(),
+ T->TypeBits.getVisibility(),
+ T->TypeBits.hasLocalOrUnnamedType());
+ }
-Linkage TagType::getLinkageImpl() const {
- // C++ [basic.link]p8:
- // - it is a class or enumeration type that is named (or has a name for
- // linkage purposes (7.1.3)) and the name has linkage; or
- // - it is a specialization of a class template (14); or
- return getDecl()->getLinkage();
-}
+ static void ensure(const Type *T) {
+ // If the cache is valid, we're okay.
+ if (T->TypeBits.isCacheValid()) return;
+
+ // If this type is non-canonical, ask its canonical type for the
+ // relevant information.
+ if (!T->isCanonicalUnqualified()) {
+ const Type *CT = T->getCanonicalTypeInternal().getTypePtr();
+ ensure(CT);
+ T->TypeBits.CacheValidAndVisibility =
+ CT->TypeBits.CacheValidAndVisibility;
+ T->TypeBits.CachedLinkage = CT->TypeBits.CachedLinkage;
+ T->TypeBits.CachedLocalOrUnnamed = CT->TypeBits.CachedLocalOrUnnamed;
+ return;
+ }
-// C++ [basic.link]p8:
-// - it is a compound type (3.9.2) other than a class or enumeration,
-// compounded exclusively from types that have linkage; or
-Linkage ComplexType::getLinkageImpl() const {
- return ElementType->getLinkage();
+ // Compute the cached properties and then set the cache.
+ CachedProperties Result = computeCachedProperties(T);
+ T->TypeBits.CacheValidAndVisibility = Result.getVisibility() + 1U;
+ assert(T->TypeBits.isCacheValid() &&
+ T->TypeBits.getVisibility() == Result.getVisibility());
+ T->TypeBits.CachedLinkage = Result.getLinkage();
+ T->TypeBits.CachedLocalOrUnnamed = Result.hasLocalOrUnnamedType();
+ }
+};
}
-Linkage PointerType::getLinkageImpl() const {
- return PointeeType->getLinkage();
-}
+// Instantiate the friend template at a private class. In a
+// reasonable implementation, these symbols will be internal.
+// It is terrible that this is the best way to accomplish this.
+namespace { class Private {}; }
+typedef TypePropertyCache<Private> Cache;
+
+static CachedProperties computeCachedProperties(const Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(Class,Base)
+#define NON_CANONICAL_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("didn't expect a non-canonical type here");
+
+#define TYPE(Class,Base)
+#define DEPENDENT_TYPE(Class,Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ // Treat dependent types as external.
+ assert(T->isDependentType());
+ return CachedProperties(ExternalLinkage, DefaultVisibility, false);
+
+ case Type::Builtin:
+ // C++ [basic.link]p8:
+ // A type is said to have linkage if and only if:
+ // - it is a fundamental type (3.9.1); or
+ return CachedProperties(ExternalLinkage, DefaultVisibility, false);
+
+ case Type::Record:
+ case Type::Enum: {
+ const TagDecl *Tag = cast<TagType>(T)->getDecl();
+
+ // C++ [basic.link]p8:
+ // - it is a class or enumeration type that is named (or has a name
+ // for linkage purposes (7.1.3)) and the name has linkage; or
+ // - it is a specialization of a class template (14); or
+ NamedDecl::LinkageInfo LV = Tag->getLinkageAndVisibility();
+ bool IsLocalOrUnnamed =
+ Tag->getDeclContext()->isFunctionOrMethod() ||
+ (!Tag->getIdentifier() && !Tag->getTypedefForAnonDecl());
+ return CachedProperties(LV.linkage(), LV.visibility(), IsLocalOrUnnamed);
+ }
+
+ // C++ [basic.link]p8:
+ // - it is a compound type (3.9.2) other than a class or enumeration,
+ // compounded exclusively from types that have linkage; or
+ case Type::Complex:
+ return Cache::get(cast<ComplexType>(T)->getElementType());
+ case Type::Pointer:
+ return Cache::get(cast<PointerType>(T)->getPointeeType());
+ case Type::BlockPointer:
+ return Cache::get(cast<BlockPointerType>(T)->getPointeeType());
+ case Type::LValueReference:
+ case Type::RValueReference:
+ return Cache::get(cast<ReferenceType>(T)->getPointeeType());
+ case Type::MemberPointer: {
+ const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ return merge(Cache::get(MPT->getClass()),
+ Cache::get(MPT->getPointeeType()));
+ }
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ return Cache::get(cast<ArrayType>(T)->getElementType());
+ case Type::Vector:
+ case Type::ExtVector:
+ return Cache::get(cast<VectorType>(T)->getElementType());
+ case Type::FunctionNoProto:
+ return Cache::get(cast<FunctionType>(T)->getResultType());
+ case Type::FunctionProto: {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ CachedProperties result = Cache::get(FPT->getResultType());
+ for (FunctionProtoType::arg_type_iterator ai = FPT->arg_type_begin(),
+ ae = FPT->arg_type_end(); ai != ae; ++ai)
+ result = merge(result, Cache::get(*ai));
+ return result;
+ }
+ case Type::ObjCInterface: {
+ NamedDecl::LinkageInfo LV =
+ cast<ObjCInterfaceType>(T)->getDecl()->getLinkageAndVisibility();
+ return CachedProperties(LV.linkage(), LV.visibility(), false);
+ }
+ case Type::ObjCObject:
+ return Cache::get(cast<ObjCObjectType>(T)->getBaseType());
+ case Type::ObjCObjectPointer:
+ return Cache::get(cast<ObjCObjectPointerType>(T)->getPointeeType());
+ }
+
+ llvm_unreachable("unhandled type class");
-Linkage BlockPointerType::getLinkageImpl() const {
- return PointeeType->getLinkage();
+ // C++ [basic.link]p8:
+ // Names not covered by these rules have no linkage.
+ return CachedProperties(NoLinkage, DefaultVisibility, false);
}
-Linkage ReferenceType::getLinkageImpl() const {
- return PointeeType->getLinkage();
+/// \brief Determine the linkage of this type.
+Linkage Type::getLinkage() const {
+ Cache::ensure(this);
+ return TypeBits.getLinkage();
}
-Linkage MemberPointerType::getLinkageImpl() const {
- return minLinkage(Class->getLinkage(), PointeeType->getLinkage());
+/// \brief Determine the linkage of this type.
+Visibility Type::getVisibility() const {
+ Cache::ensure(this);
+ return TypeBits.getVisibility();
}
-Linkage ArrayType::getLinkageImpl() const {
- return ElementType->getLinkage();
+bool Type::hasUnnamedOrLocalType() const {
+ Cache::ensure(this);
+ return TypeBits.hasLocalOrUnnamedType();
}
-Linkage VectorType::getLinkageImpl() const {
- return ElementType->getLinkage();
+std::pair<Linkage,Visibility> Type::getLinkageAndVisibility() const {
+ Cache::ensure(this);
+ return std::make_pair(TypeBits.getLinkage(), TypeBits.getVisibility());
}
-Linkage FunctionNoProtoType::getLinkageImpl() const {
- return getResultType()->getLinkage();
+void Type::ClearLinkageCache() {
+ TypeBits.CacheValidAndVisibility = 0;
+ if (QualType(this, 0) != CanonicalType)
+ CanonicalType->TypeBits.CacheValidAndVisibility = 0;
}
-Linkage FunctionProtoType::getLinkageImpl() const {
- Linkage L = getResultType()->getLinkage();
- for (arg_type_iterator A = arg_type_begin(), AEnd = arg_type_end();
- A != AEnd; ++A)
- L = minLinkage(L, (*A)->getLinkage());
+bool Type::hasSizedVLAType() const {
+ if (!isVariablyModifiedType()) return false;
- return L;
-}
+ if (const PointerType *ptr = getAs<PointerType>())
+ return ptr->getPointeeType()->hasSizedVLAType();
+ if (const ReferenceType *ref = getAs<ReferenceType>())
+ return ref->getPointeeType()->hasSizedVLAType();
+ if (const ArrayType *arr = getAsArrayTypeUnsafe()) {
+ if (isa<VariableArrayType>(arr) &&
+ cast<VariableArrayType>(arr)->getSizeExpr())
+ return true;
+
+ return arr->getElementType()->hasSizedVLAType();
+ }
-Linkage ObjCObjectType::getLinkageImpl() const {
- return ExternalLinkage;
+ return false;
}
-Linkage ObjCObjectPointerType::getLinkageImpl() const {
- return ExternalLinkage;
+QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
+ /// Currently, the only destruction kind we recognize is C++ objects
+ /// with non-trivial destructors.
+ const CXXRecordDecl *record =
+ type->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (record && !record->hasTrivialDestructor())
+ return DK_cxx_destructor;
+
+ return DK_none;
}
diff --git a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
index 66578fb..14db7f8 100644
--- a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
@@ -77,14 +77,15 @@ TypeLoc TypeLoc::getNextTypeLocImpl(TypeLoc TL) {
/// \brief Initializes a type location, and all of its children
/// recursively, as if the entire tree had been written in the
/// given location.
-void TypeLoc::initializeImpl(TypeLoc TL, SourceLocation Loc) {
+void TypeLoc::initializeImpl(ASTContext &Context, TypeLoc TL,
+ SourceLocation Loc) {
while (true) {
switch (TL.getTypeLocClass()) {
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
case CLASS: { \
CLASS##TypeLoc TLCasted = cast<CLASS##TypeLoc>(TL); \
- TLCasted.initializeLocal(Loc); \
+ TLCasted.initializeLocal(Context, Loc); \
TL = TLCasted.getNextTypeLoc(); \
if (!TL) return; \
continue; \
@@ -187,11 +188,10 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
return TST_char16;
case BuiltinType::Char32:
return TST_char32;
- case BuiltinType::WChar:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
return TST_wchar;
- case BuiltinType::UndeducedAuto:
- return TST_auto;
-
+
case BuiltinType::UChar:
case BuiltinType::UShort:
case BuiltinType::UInt:
@@ -222,3 +222,44 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
return TST_unspecified;
}
+
+TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) {
+ while (ParenTypeLoc* PTL = dyn_cast<ParenTypeLoc>(&TL))
+ TL = PTL->getInnerLoc();
+ return TL;
+}
+
+void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
+ unsigned NumArgs,
+ const TemplateArgument *Args,
+ TemplateArgumentLocInfo *ArgInfos,
+ SourceLocation Loc) {
+ for (unsigned i = 0, e = NumArgs; i != e; ++i) {
+ switch (Args[i].getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Expression:
+ // FIXME: Can we do better for declarations and integral values?
+ ArgInfos[i] = TemplateArgumentLocInfo();
+ break;
+
+ case TemplateArgument::Type:
+ ArgInfos[i] = TemplateArgumentLocInfo(
+ Context.getTrivialTypeSourceInfo(Args[i].getAsType(),
+ Loc));
+ break;
+
+ case TemplateArgument::Template:
+ ArgInfos[i] = TemplateArgumentLocInfo(SourceRange(Loc), Loc,
+ SourceLocation());
+ break;
+
+ case TemplateArgument::TemplateExpansion:
+ ArgInfos[i] = TemplateArgumentLocInfo(SourceRange(Loc), Loc, Loc);
+ break;
+ }
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
index d3a6b64..1390739 100644
--- a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
@@ -30,12 +30,13 @@ namespace {
public:
explicit TypePrinter(const PrintingPolicy &Policy) : Policy(Policy) { }
- void Print(QualType T, std::string &S);
+ void print(const Type *ty, Qualifiers qs, std::string &buffer);
+ void print(QualType T, std::string &S);
void AppendScope(DeclContext *DC, std::string &S);
- void PrintTag(TagDecl *T, std::string &S);
+ void printTag(TagDecl *T, std::string &S);
#define ABSTRACT_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) \
- void Print##CLASS(const CLASS##Type *T, std::string &S);
+ void print##CLASS(const CLASS##Type *T, std::string &S);
#include "clang/AST/TypeNodes.def"
};
}
@@ -55,9 +56,14 @@ static void AppendTypeQualList(std::string &S, unsigned TypeQuals) {
}
}
-void TypePrinter::Print(QualType T, std::string &S) {
- if (T.isNull()) {
- S += "NULL TYPE";
+void TypePrinter::print(QualType t, std::string &buffer) {
+ SplitQualType split = t.split();
+ print(split.first, split.second, buffer);
+}
+
+void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
+ if (!T) {
+ buffer += "NULL TYPE";
return;
}
@@ -65,28 +71,104 @@ void TypePrinter::Print(QualType T, std::string &S) {
return;
// Print qualifiers as appropriate.
- Qualifiers Quals = T.getLocalQualifiers();
- if (!Quals.empty()) {
- std::string TQS;
- Quals.getAsStringInternal(TQS, Policy);
+
+ // CanPrefixQualifiers - We prefer to print type qualifiers before the type,
+ // so that we get "const int" instead of "int const", but we can't do this if
+ // the type is complex. For example if the type is "int*", we *must* print
+ // "int * const", printing "const int *" is different. Only do this when the
+ // type expands to a simple string.
+ bool CanPrefixQualifiers = false;
+
+ Type::TypeClass TC = T->getTypeClass();
+ if (const AutoType *AT = dyn_cast<AutoType>(T))
+ TC = AT->desugar()->getTypeClass();
+ if (const SubstTemplateTypeParmType *Subst
+ = dyn_cast<SubstTemplateTypeParmType>(T))
+ TC = Subst->getReplacementType()->getTypeClass();
+
+ switch (TC) {
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::UnresolvedUsing:
+ case Type::Typedef:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ case Type::TemplateSpecialization:
+ case Type::InjectedClassName:
+ case Type::DependentName:
+ case Type::DependentTemplateSpecialization:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ CanPrefixQualifiers = true;
+ break;
+
+ case Type::ObjCObjectPointer:
+ CanPrefixQualifiers = T->isObjCIdType() || T->isObjCClassType() ||
+ T->isObjCQualifiedIdType() || T->isObjCQualifiedClassType();
+ break;
+
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Paren:
+ case Type::Attributed:
+ case Type::PackExpansion:
+ case Type::SubstTemplateTypeParm:
+ case Type::Auto:
+ CanPrefixQualifiers = false;
+ break;
+ }
+
+ if (!CanPrefixQualifiers && !Quals.empty()) {
+ std::string qualsBuffer;
+ Quals.getAsStringInternal(qualsBuffer, Policy);
- if (!S.empty()) {
- TQS += ' ';
- TQS += S;
+ if (!buffer.empty()) {
+ qualsBuffer += ' ';
+ qualsBuffer += buffer;
}
- std::swap(S, TQS);
+ std::swap(buffer, qualsBuffer);
}
switch (T->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, PARENT)
-#define TYPE(CLASS, PARENT) case Type::CLASS: \
- Print##CLASS(cast<CLASS##Type>(T.getTypePtr()), S); \
+#define TYPE(CLASS, PARENT) case Type::CLASS: \
+ print##CLASS(cast<CLASS##Type>(T), buffer); \
break;
#include "clang/AST/TypeNodes.def"
}
+
+ // If we're adding the qualifiers as a prefix, do it now.
+ if (CanPrefixQualifiers && !Quals.empty()) {
+ std::string qualsBuffer;
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+
+ if (!buffer.empty()) {
+ qualsBuffer += ' ';
+ qualsBuffer += buffer;
+ }
+ std::swap(buffer, qualsBuffer);
+ }
}
-void TypePrinter::PrintBuiltin(const BuiltinType *T, std::string &S) {
+void TypePrinter::printBuiltin(const BuiltinType *T, std::string &S) {
if (S.empty()) {
S = T->getName(Policy.LangOpts);
} else {
@@ -96,12 +178,12 @@ void TypePrinter::PrintBuiltin(const BuiltinType *T, std::string &S) {
}
}
-void TypePrinter::PrintComplex(const ComplexType *T, std::string &S) {
- Print(T->getElementType(), S);
+void TypePrinter::printComplex(const ComplexType *T, std::string &S) {
+ print(T->getElementType(), S);
S = "_Complex " + S;
}
-void TypePrinter::PrintPointer(const PointerType *T, std::string &S) {
+void TypePrinter::printPointer(const PointerType *T, std::string &S) {
S = '*' + S;
// Handle things like 'int (*A)[4];' correctly.
@@ -109,15 +191,15 @@ void TypePrinter::PrintPointer(const PointerType *T, std::string &S) {
if (isa<ArrayType>(T->getPointeeType()))
S = '(' + S + ')';
- Print(T->getPointeeType(), S);
+ print(T->getPointeeType(), S);
}
-void TypePrinter::PrintBlockPointer(const BlockPointerType *T, std::string &S) {
+void TypePrinter::printBlockPointer(const BlockPointerType *T, std::string &S) {
S = '^' + S;
- Print(T->getPointeeType(), S);
+ print(T->getPointeeType(), S);
}
-void TypePrinter::PrintLValueReference(const LValueReferenceType *T,
+void TypePrinter::printLValueReference(const LValueReferenceType *T,
std::string &S) {
S = '&' + S;
@@ -126,10 +208,10 @@ void TypePrinter::PrintLValueReference(const LValueReferenceType *T,
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
S = '(' + S + ')';
- Print(T->getPointeeTypeAsWritten(), S);
+ print(T->getPointeeTypeAsWritten(), S);
}
-void TypePrinter::PrintRValueReference(const RValueReferenceType *T,
+void TypePrinter::printRValueReference(const RValueReferenceType *T,
std::string &S) {
S = "&&" + S;
@@ -138,13 +220,13 @@ void TypePrinter::PrintRValueReference(const RValueReferenceType *T,
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
S = '(' + S + ')';
- Print(T->getPointeeTypeAsWritten(), S);
+ print(T->getPointeeTypeAsWritten(), S);
}
-void TypePrinter::PrintMemberPointer(const MemberPointerType *T,
+void TypePrinter::printMemberPointer(const MemberPointerType *T,
std::string &S) {
std::string C;
- Print(QualType(T->getClass(), 0), C);
+ print(QualType(T->getClass(), 0), C);
C += "::*";
S = C + S;
@@ -153,25 +235,25 @@ void TypePrinter::PrintMemberPointer(const MemberPointerType *T,
if (isa<ArrayType>(T->getPointeeType()))
S = '(' + S + ')';
- Print(T->getPointeeType(), S);
+ print(T->getPointeeType(), S);
}
-void TypePrinter::PrintConstantArray(const ConstantArrayType *T,
+void TypePrinter::printConstantArray(const ConstantArrayType *T,
std::string &S) {
S += '[';
S += llvm::utostr(T->getSize().getZExtValue());
S += ']';
- Print(T->getElementType(), S);
+ print(T->getElementType(), S);
}
-void TypePrinter::PrintIncompleteArray(const IncompleteArrayType *T,
+void TypePrinter::printIncompleteArray(const IncompleteArrayType *T,
std::string &S) {
S += "[]";
- Print(T->getElementType(), S);
+ print(T->getElementType(), S);
}
-void TypePrinter::PrintVariableArray(const VariableArrayType *T,
+void TypePrinter::printVariableArray(const VariableArrayType *T,
std::string &S) {
S += '[';
@@ -193,10 +275,10 @@ void TypePrinter::PrintVariableArray(const VariableArrayType *T,
}
S += ']';
- Print(T->getElementType(), S);
+ print(T->getElementType(), S);
}
-void TypePrinter::PrintDependentSizedArray(const DependentSizedArrayType *T,
+void TypePrinter::printDependentSizedArray(const DependentSizedArrayType *T,
std::string &S) {
S += '[';
@@ -208,13 +290,13 @@ void TypePrinter::PrintDependentSizedArray(const DependentSizedArrayType *T,
}
S += ']';
- Print(T->getElementType(), S);
+ print(T->getElementType(), S);
}
-void TypePrinter::PrintDependentSizedExtVector(
+void TypePrinter::printDependentSizedExtVector(
const DependentSizedExtVectorType *T,
std::string &S) {
- Print(T->getElementType(), S);
+ print(T->getElementType(), S);
S += " __attribute__((ext_vector_type(";
if (T->getSizeExpr()) {
@@ -226,36 +308,52 @@ void TypePrinter::PrintDependentSizedExtVector(
S += ")))";
}
-void TypePrinter::PrintVector(const VectorType *T, std::string &S) {
- if (T->getAltiVecSpecific() != VectorType::NotAltiVec) {
- if (T->getAltiVecSpecific() == VectorType::Pixel)
- S = "__vector __pixel " + S;
- else {
- Print(T->getElementType(), S);
- S = ((T->getAltiVecSpecific() == VectorType::Bool)
- ? "__vector __bool " : "__vector ") + S;
- }
- } else {
+void TypePrinter::printVector(const VectorType *T, std::string &S) {
+ switch (T->getVectorKind()) {
+ case VectorType::AltiVecPixel:
+ S = "__vector __pixel " + S;
+ break;
+ case VectorType::AltiVecBool:
+ print(T->getElementType(), S);
+ S = "__vector __bool " + S;
+ break;
+ case VectorType::AltiVecVector:
+ print(T->getElementType(), S);
+ S = "__vector " + S;
+ break;
+ case VectorType::NeonVector:
+ print(T->getElementType(), S);
+ S = ("__attribute__((neon_vector_type(" +
+ llvm::utostr_32(T->getNumElements()) + "))) " + S);
+ break;
+ case VectorType::NeonPolyVector:
+ print(T->getElementType(), S);
+ S = ("__attribute__((neon_polyvector_type(" +
+ llvm::utostr_32(T->getNumElements()) + "))) " + S);
+ break;
+ case VectorType::GenericVector: {
// FIXME: We prefer to print the size directly here, but have no way
// to get the size of the type.
- Print(T->getElementType(), S);
+ print(T->getElementType(), S);
std::string V = "__attribute__((__vector_size__(";
V += llvm::utostr_32(T->getNumElements()); // convert back to bytes.
std::string ET;
- Print(T->getElementType(), ET);
+ print(T->getElementType(), ET);
V += " * sizeof(" + ET + ")))) ";
S = V + S;
+ break;
+ }
}
}
-void TypePrinter::PrintExtVector(const ExtVectorType *T, std::string &S) {
+void TypePrinter::printExtVector(const ExtVectorType *T, std::string &S) {
S += " __attribute__((ext_vector_type(";
S += llvm::utostr_32(T->getNumElements());
S += ")))";
- Print(T->getElementType(), S);
+ print(T->getElementType(), S);
}
-void TypePrinter::PrintFunctionProto(const FunctionProtoType *T,
+void TypePrinter::printFunctionProto(const FunctionProtoType *T,
std::string &S) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
if (!S.empty())
@@ -267,7 +365,7 @@ void TypePrinter::PrintFunctionProto(const FunctionProtoType *T,
ParamPolicy.SuppressSpecifiers = false;
for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
if (i) S += ", ";
- Print(T->getArgType(i), Tmp);
+ print(T->getArgType(i), Tmp);
S += Tmp;
Tmp.clear();
}
@@ -309,6 +407,21 @@ void TypePrinter::PrintFunctionProto(const FunctionProtoType *T,
S += " __attribute__((regparm (" +
llvm::utostr_32(Info.getRegParm()) + ")))";
+ AppendTypeQualList(S, T->getTypeQuals());
+
+ switch (T->getRefQualifier()) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ S += " &";
+ break;
+
+ case RQ_RValue:
+ S += " &&";
+ break;
+ }
+
if (T->hasExceptionSpec()) {
S += " throw(";
if (T->hasAnyExceptionSpec())
@@ -319,18 +432,16 @@ void TypePrinter::PrintFunctionProto(const FunctionProtoType *T,
S += ", ";
std::string ExceptionType;
- Print(T->getExceptionType(I), ExceptionType);
+ print(T->getExceptionType(I), ExceptionType);
S += ExceptionType;
}
S += ")";
}
- AppendTypeQualList(S, T->getTypeQuals());
-
- Print(T->getResultType(), S);
+ print(T->getResultType(), S);
}
-void TypePrinter::PrintFunctionNoProto(const FunctionNoProtoType *T,
+void TypePrinter::printFunctionNoProto(const FunctionNoProtoType *T,
std::string &S) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
if (!S.empty())
@@ -339,10 +450,10 @@ void TypePrinter::PrintFunctionNoProto(const FunctionNoProtoType *T,
S += "()";
if (T->getNoReturnAttr())
S += " __attribute__((noreturn))";
- Print(T->getResultType(), S);
+ print(T->getResultType(), S);
}
-static void PrintTypeSpec(const NamedDecl *D, std::string &S) {
+static void printTypeSpec(const NamedDecl *D, std::string &S) {
IdentifierInfo *II = D->getIdentifier();
if (S.empty())
S = II->getName().str();
@@ -350,16 +461,16 @@ static void PrintTypeSpec(const NamedDecl *D, std::string &S) {
S = II->getName().str() + ' ' + S;
}
-void TypePrinter::PrintUnresolvedUsing(const UnresolvedUsingType *T,
+void TypePrinter::printUnresolvedUsing(const UnresolvedUsingType *T,
std::string &S) {
- PrintTypeSpec(T->getDecl(), S);
+ printTypeSpec(T->getDecl(), S);
}
-void TypePrinter::PrintTypedef(const TypedefType *T, std::string &S) {
- PrintTypeSpec(T->getDecl(), S);
+void TypePrinter::printTypedef(const TypedefType *T, std::string &S) {
+ printTypeSpec(T->getDecl(), S);
}
-void TypePrinter::PrintTypeOfExpr(const TypeOfExprType *T, std::string &S) {
+void TypePrinter::printTypeOfExpr(const TypeOfExprType *T, std::string &S) {
if (!S.empty()) // Prefix the basic type, e.g. 'typeof(e) X'.
S = ' ' + S;
std::string Str;
@@ -368,15 +479,15 @@ void TypePrinter::PrintTypeOfExpr(const TypeOfExprType *T, std::string &S) {
S = "typeof " + s.str() + S;
}
-void TypePrinter::PrintTypeOf(const TypeOfType *T, std::string &S) {
+void TypePrinter::printTypeOf(const TypeOfType *T, std::string &S) {
if (!S.empty()) // Prefix the basic type, e.g. 'typeof(t) X'.
S = ' ' + S;
std::string Tmp;
- Print(T->getUnderlyingType(), Tmp);
+ print(T->getUnderlyingType(), Tmp);
S = "typeof(" + Tmp + ")" + S;
}
-void TypePrinter::PrintDecltype(const DecltypeType *T, std::string &S) {
+void TypePrinter::printDecltype(const DecltypeType *T, std::string &S) {
if (!S.empty()) // Prefix the basic type, e.g. 'decltype(t) X'.
S = ' ' + S;
std::string Str;
@@ -385,6 +496,17 @@ void TypePrinter::PrintDecltype(const DecltypeType *T, std::string &S) {
S = "decltype(" + s.str() + ")" + S;
}
+void TypePrinter::printAuto(const AutoType *T, std::string &S) {
+ // If the type has been deduced, do not print 'auto'.
+ if (T->isDeduced()) {
+ print(T->getDeducedType(), S);
+ } else {
+ if (!S.empty()) // Prefix the basic type, e.g. 'auto X'.
+ S = ' ' + S;
+ S = "auto" + S;
+ }
+}
+
/// Appends the given scope to the end of a string.
void TypePrinter::AppendScope(DeclContext *DC, std::string &Buffer) {
if (DC->isTranslationUnit()) return;
@@ -402,8 +524,8 @@ void TypePrinter::AppendScope(DeclContext *DC, std::string &Buffer) {
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
std::string TemplateArgsStr
= TemplateSpecializationType::PrintTemplateArgumentList(
- TemplateArgs.getFlatArgumentList(),
- TemplateArgs.flat_size(),
+ TemplateArgs.data(),
+ TemplateArgs.size(),
Policy);
Buffer += Spec->getIdentifier()->getName();
Buffer += TemplateArgsStr;
@@ -418,7 +540,7 @@ void TypePrinter::AppendScope(DeclContext *DC, std::string &Buffer) {
Buffer += "::";
}
-void TypePrinter::PrintTag(TagDecl *D, std::string &InnerString) {
+void TypePrinter::printTag(TagDecl *D, std::string &InnerString) {
if (Policy.SuppressTag)
return;
@@ -457,9 +579,9 @@ void TypePrinter::PrintTag(TagDecl *D, std::string &InnerString) {
if (!HasKindDecoration)
OS << " " << D->getKindName();
- if (D->getLocation().isValid()) {
- PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
+ PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
D->getLocation());
+ if (PLoc.isValid()) {
OS << " at " << PLoc.getFilename()
<< ':' << PLoc.getLine()
<< ':' << PLoc.getColumn();
@@ -482,8 +604,8 @@ void TypePrinter::PrintTag(TagDecl *D, std::string &InnerString) {
NumArgs = TST->getNumArgs();
} else {
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- Args = TemplateArgs.getFlatArgumentList();
- NumArgs = TemplateArgs.flat_size();
+ Args = TemplateArgs.data();
+ NumArgs = TemplateArgs.size();
}
Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
NumArgs,
@@ -498,15 +620,15 @@ void TypePrinter::PrintTag(TagDecl *D, std::string &InnerString) {
std::swap(Buffer, InnerString);
}
-void TypePrinter::PrintRecord(const RecordType *T, std::string &S) {
- PrintTag(T->getDecl(), S);
+void TypePrinter::printRecord(const RecordType *T, std::string &S) {
+ printTag(T->getDecl(), S);
}
-void TypePrinter::PrintEnum(const EnumType *T, std::string &S) {
- PrintTag(T->getDecl(), S);
+void TypePrinter::printEnum(const EnumType *T, std::string &S) {
+ printTag(T->getDecl(), S);
}
-void TypePrinter::PrintTemplateTypeParm(const TemplateTypeParmType *T,
+void TypePrinter::printTemplateTypeParm(const TemplateTypeParmType *T,
std::string &S) {
if (!S.empty()) // Prefix the basic type, e.g. 'parmname X'.
S = ' ' + S;
@@ -518,12 +640,18 @@ void TypePrinter::PrintTemplateTypeParm(const TemplateTypeParmType *T,
S = T->getName()->getName().str() + S;
}
-void TypePrinter::PrintSubstTemplateTypeParm(const SubstTemplateTypeParmType *T,
+void TypePrinter::printSubstTemplateTypeParm(const SubstTemplateTypeParmType *T,
std::string &S) {
- Print(T->getReplacementType(), S);
+ print(T->getReplacementType(), S);
}
-void TypePrinter::PrintTemplateSpecialization(
+void TypePrinter::printSubstTemplateTypeParmPack(
+ const SubstTemplateTypeParmPackType *T,
+ std::string &S) {
+ printTemplateTypeParm(T->getReplacedParameter(), S);
+}
+
+void TypePrinter::printTemplateSpecialization(
const TemplateSpecializationType *T,
std::string &S) {
std::string SpecString;
@@ -543,12 +671,12 @@ void TypePrinter::PrintTemplateSpecialization(
S = SpecString + ' ' + S;
}
-void TypePrinter::PrintInjectedClassName(const InjectedClassNameType *T,
+void TypePrinter::printInjectedClassName(const InjectedClassNameType *T,
std::string &S) {
- PrintTemplateSpecialization(T->getInjectedTST(), S);
+ printTemplateSpecialization(T->getInjectedTST(), S);
}
-void TypePrinter::PrintElaborated(const ElaboratedType *T, std::string &S) {
+void TypePrinter::printElaborated(const ElaboratedType *T, std::string &S) {
std::string MyString;
{
@@ -564,7 +692,7 @@ void TypePrinter::PrintElaborated(const ElaboratedType *T, std::string &S) {
std::string TypeStr;
PrintingPolicy InnerPolicy(Policy);
InnerPolicy.SuppressScope = true;
- TypePrinter(InnerPolicy).Print(T->getNamedType(), TypeStr);
+ TypePrinter(InnerPolicy).print(T->getNamedType(), TypeStr);
MyString += TypeStr;
if (S.empty())
@@ -573,7 +701,13 @@ void TypePrinter::PrintElaborated(const ElaboratedType *T, std::string &S) {
S = MyString + ' ' + S;
}
-void TypePrinter::PrintDependentName(const DependentNameType *T, std::string &S) {
+void TypePrinter::printParen(const ParenType *T, std::string &S) {
+ if (!S.empty() && !isa<FunctionType>(T->getInnerType()))
+ S = '(' + S + ')';
+ print(T->getInnerType(), S);
+}
+
+void TypePrinter::printDependentName(const DependentNameType *T, std::string &S) {
std::string MyString;
{
@@ -593,7 +727,7 @@ void TypePrinter::PrintDependentName(const DependentNameType *T, std::string &S)
S = MyString + ' ' + S;
}
-void TypePrinter::PrintDependentTemplateSpecialization(
+void TypePrinter::printDependentTemplateSpecialization(
const DependentTemplateSpecializationType *T, std::string &S) {
std::string MyString;
{
@@ -617,7 +751,91 @@ void TypePrinter::PrintDependentTemplateSpecialization(
S = MyString + ' ' + S;
}
-void TypePrinter::PrintObjCInterface(const ObjCInterfaceType *T,
+void TypePrinter::printPackExpansion(const PackExpansionType *T,
+ std::string &S) {
+ print(T->getPattern(), S);
+ S += "...";
+}
+
+void TypePrinter::printAttributed(const AttributedType *T,
+ std::string &S) {
+ print(T->getModifiedType(), S);
+
+ // TODO: not all attributes are GCC-style attributes.
+ S += "__attribute__((";
+ switch (T->getAttrKind()) {
+ case AttributedType::attr_address_space:
+ S += "address_space(";
+ S += T->getEquivalentType().getAddressSpace();
+ S += ")";
+ break;
+
+ case AttributedType::attr_vector_size: {
+ S += "__vector_size__(";
+ if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) {
+ S += vector->getNumElements();
+ S += " * sizeof(";
+
+ std::string tmp;
+ print(vector->getElementType(), tmp);
+ S += tmp;
+ S += ")";
+ }
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_neon_vector_type:
+ case AttributedType::attr_neon_polyvector_type: {
+ if (T->getAttrKind() == AttributedType::attr_neon_vector_type)
+ S += "neon_vector_type(";
+ else
+ S += "neon_polyvector_type(";
+ const VectorType *vector = T->getEquivalentType()->getAs<VectorType>();
+ S += llvm::utostr_32(vector->getNumElements());
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_regparm: {
+ S += "regparm(";
+ QualType t = T->getEquivalentType();
+ while (!t->isFunctionType())
+ t = t->getPointeeType();
+ S += t->getAs<FunctionType>()->getRegParmType();
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_objc_gc: {
+ S += "objc_gc(";
+
+ QualType tmp = T->getEquivalentType();
+ while (tmp.getObjCGCAttr() == Qualifiers::GCNone) {
+ QualType next = tmp->getPointeeType();
+ if (next == tmp) break;
+ tmp = next;
+ }
+
+ if (tmp.isObjCGCWeak())
+ S += "weak";
+ else
+ S += "strong";
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_noreturn: S += "noreturn"; break;
+ case AttributedType::attr_cdecl: S += "cdecl"; break;
+ case AttributedType::attr_fastcall: S += "fastcall"; break;
+ case AttributedType::attr_stdcall: S += "stdcall"; break;
+ case AttributedType::attr_thiscall: S += "thiscall"; break;
+ case AttributedType::attr_pascal: S += "pascal"; break;
+ }
+ S += "))";
+}
+
+void TypePrinter::printObjCInterface(const ObjCInterfaceType *T,
std::string &S) {
if (!S.empty()) // Prefix the basic type, e.g. 'typedefname X'.
S = ' ' + S;
@@ -626,13 +844,13 @@ void TypePrinter::PrintObjCInterface(const ObjCInterfaceType *T,
S = ObjCQIString + S;
}
-void TypePrinter::PrintObjCObject(const ObjCObjectType *T,
+void TypePrinter::printObjCObject(const ObjCObjectType *T,
std::string &S) {
if (T->qual_empty())
- return Print(T->getBaseType(), S);
+ return print(T->getBaseType(), S);
std::string tmp;
- Print(T->getBaseType(), tmp);
+ print(T->getBaseType(), tmp);
tmp += '<';
bool isFirst = true;
for (ObjCObjectType::qual_iterator
@@ -652,18 +870,23 @@ void TypePrinter::PrintObjCObject(const ObjCObjectType *T,
std::swap(tmp, S);
}
-void TypePrinter::PrintObjCObjectPointer(const ObjCObjectPointerType *T,
+void TypePrinter::printObjCObjectPointer(const ObjCObjectPointerType *T,
std::string &S) {
std::string ObjCQIString;
+ T->getPointeeType().getLocalQualifiers().getAsStringInternal(ObjCQIString,
+ Policy);
+ if (!ObjCQIString.empty())
+ ObjCQIString += ' ';
+
if (T->isObjCIdType() || T->isObjCQualifiedIdType())
- ObjCQIString = "id";
+ ObjCQIString += "id";
else if (T->isObjCClassType() || T->isObjCQualifiedClassType())
- ObjCQIString = "Class";
+ ObjCQIString += "Class";
else if (T->isObjCSelType())
- ObjCQIString = "SEL";
+ ObjCQIString += "SEL";
else
- ObjCQIString = T->getInterfaceDecl()->getNameAsString();
+ ObjCQIString += T->getInterfaceDecl()->getNameAsString();
if (!T->qual_empty()) {
ObjCQIString += '<';
@@ -677,9 +900,6 @@ void TypePrinter::PrintObjCObjectPointer(const ObjCObjectPointerType *T,
ObjCQIString += '>';
}
- T->getPointeeType().getLocalQualifiers().getAsStringInternal(ObjCQIString,
- Policy);
-
if (!T->isObjCIdType() && !T->isObjCQualifiedIdType())
ObjCQIString += " *"; // Don't forget the implicit pointer.
else if (!S.empty()) // Prefix the basic type, e.g. 'typedefname X'.
@@ -688,44 +908,6 @@ void TypePrinter::PrintObjCObjectPointer(const ObjCObjectPointerType *T,
S = ObjCQIString + S;
}
-static void PrintTemplateArgument(std::string &Buffer,
- const TemplateArgument &Arg,
- const PrintingPolicy &Policy) {
- switch (Arg.getKind()) {
- case TemplateArgument::Null:
- assert(false && "Null template argument");
- break;
-
- case TemplateArgument::Type:
- Arg.getAsType().getAsStringInternal(Buffer, Policy);
- break;
-
- case TemplateArgument::Declaration:
- Buffer = cast<NamedDecl>(Arg.getAsDecl())->getNameAsString();
- break;
-
- case TemplateArgument::Template: {
- llvm::raw_string_ostream s(Buffer);
- Arg.getAsTemplate().print(s, Policy);
- break;
- }
-
- case TemplateArgument::Integral:
- Buffer = Arg.getAsIntegral()->toString(10, true);
- break;
-
- case TemplateArgument::Expression: {
- llvm::raw_string_ostream s(Buffer);
- Arg.getAsExpr()->printPretty(s, 0, Policy);
- break;
- }
-
- case TemplateArgument::Pack:
- assert(0 && "FIXME: Implement!");
- break;
- }
-}
-
std::string TemplateSpecializationType::
PrintTemplateArgumentList(const TemplateArgumentListInfo &Args,
const PrintingPolicy &Policy) {
@@ -738,17 +920,27 @@ std::string
TemplateSpecializationType::PrintTemplateArgumentList(
const TemplateArgument *Args,
unsigned NumArgs,
- const PrintingPolicy &Policy) {
+ const PrintingPolicy &Policy,
+ bool SkipBrackets) {
std::string SpecString;
- SpecString += '<';
+ if (!SkipBrackets)
+ SpecString += '<';
+
for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
- if (Arg)
+ if (SpecString.size() > !SkipBrackets)
SpecString += ", ";
// Print the argument into a string.
std::string ArgString;
- PrintTemplateArgument(ArgString, Args[Arg], Policy);
-
+ if (Args[Arg].getKind() == TemplateArgument::Pack) {
+ ArgString = PrintTemplateArgumentList(Args[Arg].pack_begin(),
+ Args[Arg].pack_size(),
+ Policy, true);
+ } else {
+ llvm::raw_string_ostream ArgOut(ArgString);
+ Args[Arg].print(Policy, ArgOut);
+ }
+
// If this is the first argument and its string representation
// begins with the global scope specifier ('::foo'), add a space
// to avoid printing the diagraph '<:'.
@@ -761,10 +953,11 @@ TemplateSpecializationType::PrintTemplateArgumentList(
// If the last character of our string is '>', add another space to
// keep the two '>''s separate tokens. We don't *have* to do this in
// C++0x, but it's still good hygiene.
- if (SpecString[SpecString.size() - 1] == '>')
+ if (!SpecString.empty() && SpecString[SpecString.size() - 1] == '>')
SpecString += ' ';
- SpecString += '>';
+ if (!SkipBrackets)
+ SpecString += '>';
return SpecString;
}
@@ -776,12 +969,20 @@ PrintTemplateArgumentList(const TemplateArgumentLoc *Args, unsigned NumArgs,
std::string SpecString;
SpecString += '<';
for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
- if (Arg)
+ if (SpecString.size() > 1)
SpecString += ", ";
// Print the argument into a string.
std::string ArgString;
- PrintTemplateArgument(ArgString, Args[Arg].getArgument(), Policy);
+ if (Args[Arg].getArgument().getKind() == TemplateArgument::Pack) {
+ ArgString = PrintTemplateArgumentList(
+ Args[Arg].getArgument().pack_begin(),
+ Args[Arg].getArgument().pack_size(),
+ Policy, true);
+ } else {
+ llvm::raw_string_ostream ArgOut(ArgString);
+ Args[Arg].getArgument().print(Policy, ArgOut);
+ }
// If this is the first argument and its string representation
// begins with the global scope specifier ('::foo'), add a space
@@ -847,15 +1048,15 @@ void Qualifiers::getAsStringInternal(std::string &S,
}
}
-std::string QualType::getAsString() const {
- std::string S;
- LangOptions LO;
- getAsStringInternal(S, PrintingPolicy(LO));
- return S;
+std::string QualType::getAsString(const Type *ty, Qualifiers qs) {
+ std::string buffer;
+ LangOptions options;
+ getAsStringInternal(ty, qs, buffer, PrintingPolicy(options));
+ return buffer;
}
-void QualType::getAsStringInternal(std::string &S,
- const PrintingPolicy &Policy) const {
- TypePrinter Printer(Policy);
- Printer.Print(*this, S);
+void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
+ std::string &buffer,
+ const PrintingPolicy &policy) {
+ TypePrinter(policy).print(ty, qs, buffer);
}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp b/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp
index bf9f967..5233d3b 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp
@@ -59,7 +59,11 @@ CFG *AnalysisContext::getCFG() {
return getUnoptimizedCFG();
if (!builtCFG) {
- cfg = CFG::buildCFG(D, getBody(), &D->getASTContext(), true, AddEHEdges);
+ CFG::BuildOptions B;
+ B.AddEHEdges = AddEHEdges;
+ B.AddImplicitDtors = AddImplicitDtors;
+ B.AddInitializers = AddInitializers;
+ cfg = CFG::buildCFG(D, getBody(), &D->getASTContext(), B);
// Even when the cfg is not successfully built, we don't
// want to try building it again.
builtCFG = true;
@@ -69,8 +73,12 @@ CFG *AnalysisContext::getCFG() {
CFG *AnalysisContext::getUnoptimizedCFG() {
if (!builtCompleteCFG) {
- completeCFG = CFG::buildCFG(D, getBody(), &D->getASTContext(),
- false, AddEHEdges);
+ CFG::BuildOptions B;
+ B.PruneTriviallyFalseEdges = false;
+ B.AddEHEdges = AddEHEdges;
+ B.AddImplicitDtors = AddImplicitDtors;
+ B.AddInitializers = AddInitializers;
+ completeCFG = CFG::buildCFG(D, getBody(), &D->getASTContext(), B);
// Even when the cfg is not successfully built, we don't
// want to try building it again.
builtCompleteCFG = true;
@@ -78,6 +86,10 @@ CFG *AnalysisContext::getUnoptimizedCFG() {
return completeCFG;
}
+void AnalysisContext::dumpCFG() {
+ getCFG()->dump(getASTContext().getLangOptions());
+}
+
ParentMap &AnalysisContext::getParentMap() {
if (!PM)
PM = new ParentMap(getBody());
@@ -122,7 +134,8 @@ AnalysisContext *AnalysisContextManager::getContext(const Decl *D,
idx::TranslationUnit *TU) {
AnalysisContext *&AC = Contexts[D];
if (!AC)
- AC = new AnalysisContext(D, TU, UseUnoptimizedCFG);
+ AC = new AnalysisContext(D, TU, UseUnoptimizedCFG, false,
+ AddImplicitDtors, AddInitializers);
return AC;
}
@@ -179,8 +192,8 @@ LocationContextManager::getLocationContext(AnalysisContext *ctx,
const StackFrameContext*
LocationContextManager::getStackFrame(AnalysisContext *ctx,
const LocationContext *parent,
- const Stmt *s, const CFGBlock *blk,
- unsigned idx) {
+ const Stmt *s,
+ const CFGBlock *blk, unsigned idx) {
llvm::FoldingSetNodeID ID;
StackFrameContext::Profile(ID, ctx, parent, s, blk, idx);
void *InsertPos;
@@ -260,7 +273,7 @@ public:
}
void VisitStmt(Stmt *S) {
- for (Stmt::child_iterator I = S->child_begin(), E = S->child_end();I!=E;++I)
+ for (Stmt::child_range I = S->children(); I; ++I)
if (Stmt *child = *I)
Visit(child);
}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
index c97b916..a0ec5fe 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
@@ -32,24 +32,181 @@ static SourceLocation GetEndLoc(Decl* D) {
if (VarDecl* VD = dyn_cast<VarDecl>(D))
if (Expr* Ex = VD->getInit())
return Ex->getSourceRange().getEnd();
-
return D->getLocation();
}
+/// The CFG builder uses a recursive algorithm to build the CFG. When
+/// we process an expression, sometimes we know that we must add the
+/// subexpressions as block-level expressions. For example:
+///
+/// exp1 || exp2
+///
+/// When processing the '||' expression, we know that exp1 and exp2
+/// need to be added as block-level expressions, even though they
+/// might not normally need to be. AddStmtChoice records this
+/// contextual information. If AddStmtChoice is 'NotAlwaysAdd', then
+/// the builder has an option not to add a subexpression as a
+/// block-level expression.
+///
class AddStmtChoice {
public:
- enum Kind { NotAlwaysAdd = 0,
- AlwaysAdd = 1,
- AsLValueNotAlwaysAdd = 2,
- AlwaysAddAsLValue = 3 };
+ enum Kind { NotAlwaysAdd = 0, AlwaysAdd = 1 };
- AddStmtChoice(Kind kind) : k(kind) {}
+ AddStmtChoice(Kind a_kind = NotAlwaysAdd) : kind(a_kind) {}
- bool alwaysAdd() const { return (unsigned)k & 0x1; }
- bool asLValue() const { return k >= AsLValueNotAlwaysAdd; }
+ bool alwaysAdd() const { return kind & AlwaysAdd; }
+
+ /// Return a copy of this object, except with the 'always-add' bit
+ /// set as specified.
+ AddStmtChoice withAlwaysAdd(bool alwaysAdd) const {
+ return AddStmtChoice(alwaysAdd ? Kind(kind | AlwaysAdd) :
+ Kind(kind & ~AlwaysAdd));
+ }
private:
- Kind k;
+ Kind kind;
+};
+
+/// LocalScope - Node in tree of local scopes created for C++ implicit
+/// destructor calls generation. It contains list of automatic variables
+/// declared in the scope and link to position in previous scope this scope
+/// began in.
+///
+/// The process of creating local scopes is as follows:
+/// - Init CFGBuilder::ScopePos with invalid position (equivalent for null),
+/// - Before processing statements in scope (e.g. CompoundStmt) create
+/// LocalScope object using CFGBuilder::ScopePos as link to previous scope
+/// and set CFGBuilder::ScopePos to the end of new scope,
+/// - On every occurrence of VarDecl increase CFGBuilder::ScopePos if it points
+/// at this VarDecl,
+/// - For every normal (without jump) end of scope add to CFGBlock destructors
+/// for objects in the current scope,
+/// - For every jump add to CFGBlock destructors for objects
+/// between CFGBuilder::ScopePos and local scope position saved for jump
+/// target. Thanks to C++ restrictions on goto jumps we can be sure that
+/// jump target position will be on the path to root from CFGBuilder::ScopePos
+/// (adding any variable that doesn't need constructor to be called to
+/// LocalScope can break this assumption),
+///
+class LocalScope {
+public:
+ typedef BumpVector<VarDecl*> AutomaticVarsTy;
+
+ /// const_iterator - Iterates local scope backwards and jumps to previous
+ /// scope on reaching the beginning of currently iterated scope.
+ class const_iterator {
+ const LocalScope* Scope;
+
+ /// VarIter is guaranteed to be greater then 0 for every valid iterator.
+ /// Invalid iterator (with null Scope) has VarIter equal to 0.
+ unsigned VarIter;
+
+ public:
+ /// Create invalid iterator. Dereferencing invalid iterator is not allowed.
+ /// Incrementing invalid iterator is allowed and will result in invalid
+ /// iterator.
+ const_iterator()
+ : Scope(NULL), VarIter(0) {}
+
+ /// Create valid iterator. In case when S.Prev is an invalid iterator and
+ /// I is equal to 0, this will create invalid iterator.
+ const_iterator(const LocalScope& S, unsigned I)
+ : Scope(&S), VarIter(I) {
+ // Iterator to "end" of scope is not allowed. Handle it by going up
+ // in scopes tree possibly up to invalid iterator in the root.
+ if (VarIter == 0 && Scope)
+ *this = Scope->Prev;
+ }
+
+ VarDecl* const* operator->() const {
+ assert (Scope && "Dereferencing invalid iterator is not allowed");
+ assert (VarIter != 0 && "Iterator has invalid value of VarIter member");
+ return &Scope->Vars[VarIter - 1];
+ }
+ VarDecl* operator*() const {
+ return *this->operator->();
+ }
+
+ const_iterator& operator++() {
+ if (!Scope)
+ return *this;
+
+ assert (VarIter != 0 && "Iterator has invalid value of VarIter member");
+ --VarIter;
+ if (VarIter == 0)
+ *this = Scope->Prev;
+ return *this;
+ }
+ const_iterator operator++(int) {
+ const_iterator P = *this;
+ ++*this;
+ return P;
+ }
+
+ bool operator==(const const_iterator& rhs) const {
+ return Scope == rhs.Scope && VarIter == rhs.VarIter;
+ }
+ bool operator!=(const const_iterator& rhs) const {
+ return !(*this == rhs);
+ }
+
+ operator bool() const {
+ return *this != const_iterator();
+ }
+
+ int distance(const_iterator L);
+ };
+
+ friend class const_iterator;
+
+private:
+ BumpVectorContext ctx;
+
+ /// Automatic variables in order of declaration.
+ AutomaticVarsTy Vars;
+ /// Iterator to variable in previous scope that was declared just before
+ /// begin of this scope.
+ const_iterator Prev;
+
+public:
+ /// Constructs empty scope linked to previous scope in specified place.
+ LocalScope(BumpVectorContext &ctx, const_iterator P)
+ : ctx(ctx), Vars(ctx, 4), Prev(P) {}
+
+ /// Begin of scope in direction of CFG building (backwards).
+ const_iterator begin() const { return const_iterator(*this, Vars.size()); }
+
+ void addVar(VarDecl* VD) {
+ Vars.push_back(VD, ctx);
+ }
+};
+
+/// distance - Calculates distance from this to L. L must be reachable from this
+/// (with use of ++ operator). Cost of calculating the distance is linear w.r.t.
+/// number of scopes between this and L.
+int LocalScope::const_iterator::distance(LocalScope::const_iterator L) {
+ int D = 0;
+ const_iterator F = *this;
+ while (F.Scope != L.Scope) {
+ assert (F != const_iterator()
+ && "L iterator is not reachable from F iterator.");
+ D += F.VarIter;
+ F = F.Scope->Prev;
+ }
+ D += F.VarIter - L.VarIter;
+ return D;
+}
+
+/// BlockScopePosPair - Structure for specifying position in CFG during its
+/// build process. It consists of CFGBlock that specifies position in CFG graph
+/// and LocalScope::const_iterator that specifies position in LocalScope graph.
+struct BlockScopePosPair {
+ BlockScopePosPair() : block(0) {}
+ BlockScopePosPair(CFGBlock* b, LocalScope::const_iterator scopePos)
+ : block(b), scopePosition(scopePos) {}
+
+ CFGBlock *block;
+ LocalScope::const_iterator scopePosition;
};
/// CFGBuilder - This class implements CFG construction from an AST.
@@ -67,41 +224,48 @@ private:
/// implicit fall-throughs without extra basic blocks.
///
class CFGBuilder {
+ typedef BlockScopePosPair JumpTarget;
+ typedef BlockScopePosPair JumpSource;
+
ASTContext *Context;
llvm::OwningPtr<CFG> cfg;
CFGBlock* Block;
CFGBlock* Succ;
- CFGBlock* ContinueTargetBlock;
- CFGBlock* BreakTargetBlock;
+ JumpTarget ContinueJumpTarget;
+ JumpTarget BreakJumpTarget;
CFGBlock* SwitchTerminatedBlock;
CFGBlock* DefaultCaseBlock;
CFGBlock* TryTerminatedBlock;
- // LabelMap records the mapping from Label expressions to their blocks.
- typedef llvm::DenseMap<LabelStmt*,CFGBlock*> LabelMapTy;
+ // Current position in local scope.
+ LocalScope::const_iterator ScopePos;
+
+ // LabelMap records the mapping from Label expressions to their jump targets.
+ typedef llvm::DenseMap<LabelDecl*, JumpTarget> LabelMapTy;
LabelMapTy LabelMap;
// A list of blocks that end with a "goto" that must be backpatched to their
// resolved targets upon completion of CFG construction.
- typedef std::vector<CFGBlock*> BackpatchBlocksTy;
+ typedef std::vector<JumpSource> BackpatchBlocksTy;
BackpatchBlocksTy BackpatchBlocks;
// A list of labels whose address has been taken (for indirect gotos).
- typedef llvm::SmallPtrSet<LabelStmt*,5> LabelSetTy;
+ typedef llvm::SmallPtrSet<LabelDecl*, 5> LabelSetTy;
LabelSetTy AddressTakenLabels;
+ bool badCFG;
+ CFG::BuildOptions BuildOpts;
+
public:
explicit CFGBuilder() : cfg(new CFG()), // crew a new CFG
Block(NULL), Succ(NULL),
- ContinueTargetBlock(NULL), BreakTargetBlock(NULL),
SwitchTerminatedBlock(NULL), DefaultCaseBlock(NULL),
- TryTerminatedBlock(NULL) {}
+ TryTerminatedBlock(NULL), badCFG(false) {}
// buildCFG - Used by external clients to construct the CFG.
CFG* buildCFG(const Decl *D, Stmt *Statement, ASTContext *C,
- bool pruneTriviallyFalseEdges, bool AddEHEdges,
- bool AddScopes);
+ CFG::BuildOptions BO);
private:
// Visitors to walk an AST and construct the CFG.
@@ -110,22 +274,33 @@ private:
CFGBlock *VisitBlockExpr(BlockExpr* E, AddStmtChoice asc);
CFGBlock *VisitBreakStmt(BreakStmt *B);
CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
+ CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E,
+ AddStmtChoice asc);
CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
+ CFGBlock *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXConstructExpr(CXXConstructExpr *C, AddStmtChoice asc);
+ CFGBlock *VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
+ AddStmtChoice asc);
CFGBlock *VisitCXXMemberCallExpr(CXXMemberCallExpr *C, AddStmtChoice asc);
CFGBlock *VisitCallExpr(CallExpr *C, AddStmtChoice asc);
CFGBlock *VisitCaseStmt(CaseStmt *C);
CFGBlock *VisitChooseExpr(ChooseExpr *C, AddStmtChoice asc);
CFGBlock *VisitCompoundStmt(CompoundStmt *C);
- CFGBlock *VisitConditionalOperator(ConditionalOperator *C, AddStmtChoice asc);
+ CFGBlock *VisitConditionalOperator(AbstractConditionalOperator *C,
+ AddStmtChoice asc);
CFGBlock *VisitContinueStmt(ContinueStmt *C);
CFGBlock *VisitDeclStmt(DeclStmt *DS);
- CFGBlock *VisitDeclSubExpr(Decl* D);
+ CFGBlock *VisitDeclSubExpr(DeclStmt* DS);
CFGBlock *VisitDefaultStmt(DefaultStmt *D);
CFGBlock *VisitDoStmt(DoStmt *D);
CFGBlock *VisitForStmt(ForStmt *F);
CFGBlock *VisitGotoStmt(GotoStmt* G);
CFGBlock *VisitIfStmt(IfStmt *I);
+ CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
CFGBlock *VisitLabelStmt(LabelStmt *L);
CFGBlock *VisitMemberExpr(MemberExpr *M, AddStmtChoice asc);
@@ -138,55 +313,81 @@ private:
CFGBlock *VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E, AddStmtChoice asc);
CFGBlock *VisitStmtExpr(StmtExpr *S, AddStmtChoice asc);
CFGBlock *VisitSwitchStmt(SwitchStmt *S);
+ CFGBlock *VisitUnaryOperator(UnaryOperator *U, AddStmtChoice asc);
CFGBlock *VisitWhileStmt(WhileStmt *W);
CFGBlock *Visit(Stmt *S, AddStmtChoice asc = AddStmtChoice::NotAlwaysAdd);
CFGBlock *VisitStmt(Stmt *S, AddStmtChoice asc);
CFGBlock *VisitChildren(Stmt* S);
+ // Visitors to walk an AST and generate destructors of temporaries in
+ // full expression.
+ CFGBlock *VisitForTemporaryDtors(Stmt *E, bool BindToTemporary = false);
+ CFGBlock *VisitChildrenForTemporaryDtors(Stmt *E);
+ CFGBlock *VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E);
+ CFGBlock *VisitCXXBindTemporaryExprForTemporaryDtors(CXXBindTemporaryExpr *E,
+ bool BindToTemporary);
+ CFGBlock *
+ VisitConditionalOperatorForTemporaryDtors(AbstractConditionalOperator *E,
+ bool BindToTemporary);
+
// NYS == Not Yet Supported
CFGBlock* NYS() {
badCFG = true;
return Block;
}
- CFGBlock *StartScope(Stmt *S, CFGBlock *B) {
- if (!AddScopes)
- return B;
-
- if (B == 0)
- B = createBlock();
- B->StartScope(S, cfg->getBumpVectorContext());
- return B;
- }
-
- void EndScope(Stmt *S) {
- if (!AddScopes)
- return;
-
- if (Block == 0)
- Block = createBlock();
- Block->EndScope(S, cfg->getBumpVectorContext());
- }
-
void autoCreateBlock() { if (!Block) Block = createBlock(); }
CFGBlock *createBlock(bool add_successor = true);
- bool FinishBlock(CFGBlock* B);
+
CFGBlock *addStmt(Stmt *S) {
return Visit(S, AddStmtChoice::AlwaysAdd);
}
+ CFGBlock *addInitializer(CXXCtorInitializer *I);
+ void addAutomaticObjDtors(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt* S);
+ void addImplicitDtorsForDestructor(const CXXDestructorDecl *DD);
+
+ // Local scopes creation.
+ LocalScope* createOrReuseLocalScope(LocalScope* Scope);
+
+ void addLocalScopeForStmt(Stmt* S);
+ LocalScope* addLocalScopeForDeclStmt(DeclStmt* DS, LocalScope* Scope = NULL);
+ LocalScope* addLocalScopeForVarDecl(VarDecl* VD, LocalScope* Scope = NULL);
- void AppendStmt(CFGBlock *B, Stmt *S,
+ void addLocalScopeAndDtors(Stmt* S);
+
+ // Interface to CFGBlock - adding CFGElements.
+ void appendStmt(CFGBlock *B, Stmt *S,
AddStmtChoice asc = AddStmtChoice::AlwaysAdd) {
- B->appendStmt(S, cfg->getBumpVectorContext(), asc.asLValue());
+ B->appendStmt(S, cfg->getBumpVectorContext());
+ }
+ void appendInitializer(CFGBlock *B, CXXCtorInitializer *I) {
+ B->appendInitializer(I, cfg->getBumpVectorContext());
+ }
+ void appendBaseDtor(CFGBlock *B, const CXXBaseSpecifier *BS) {
+ B->appendBaseDtor(BS, cfg->getBumpVectorContext());
+ }
+ void appendMemberDtor(CFGBlock *B, FieldDecl *FD) {
+ B->appendMemberDtor(FD, cfg->getBumpVectorContext());
}
+ void appendTemporaryDtor(CFGBlock *B, CXXBindTemporaryExpr *E) {
+ B->appendTemporaryDtor(E, cfg->getBumpVectorContext());
+ }
+
+ void insertAutomaticObjDtors(CFGBlock* Blk, CFGBlock::iterator I,
+ LocalScope::const_iterator B, LocalScope::const_iterator E, Stmt* S);
+ void appendAutomaticObjDtors(CFGBlock* Blk, LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt* S);
+ void prependAutomaticObjDtorsWithTerminator(CFGBlock* Blk,
+ LocalScope::const_iterator B, LocalScope::const_iterator E);
- void AddSuccessor(CFGBlock *B, CFGBlock *S) {
+ void addSuccessor(CFGBlock *B, CFGBlock *S) {
B->addSuccessor(S, cfg->getBumpVectorContext());
}
/// TryResult - a class representing a variant over the values
- /// 'true', 'false', or 'unknown'. This is returned by TryEvaluateBool,
+ /// 'true', 'false', or 'unknown'. This is returned by tryEvaluateBool,
/// and is used by the CFGBuilder to decide if a branch condition
/// can be decided up front during CFG construction.
class TryResult {
@@ -204,10 +405,10 @@ private:
}
};
- /// TryEvaluateBool - Try and evaluate the Stmt and return 0 or 1
+ /// tryEvaluateBool - Try and evaluate the Stmt and return 0 or 1
/// if we can evaluate to a known value, otherwise return -1.
- TryResult TryEvaluateBool(Expr *S) {
- if (!PruneTriviallyFalseEdges)
+ TryResult tryEvaluateBool(Expr *S) {
+ if (!BuildOpts.PruneTriviallyFalseEdges)
return TryResult();
Expr::EvalResult Result;
@@ -217,24 +418,13 @@ private:
return TryResult();
}
-
- bool badCFG;
-
- // True iff trivially false edges should be pruned from the CFG.
- bool PruneTriviallyFalseEdges;
-
- // True iff EH edges on CallExprs should be added to the CFG.
- bool AddEHEdges;
-
- // True iff scope start and scope end notes should be added to the CFG.
- bool AddScopes;
};
// FIXME: Add support for dependent-sized array types in C++?
// Does it even make sense to build a CFG for an uninstantiated template?
-static VariableArrayType* FindVA(Type* t) {
- while (ArrayType* vt = dyn_cast<ArrayType>(t)) {
- if (VariableArrayType* vat = dyn_cast<VariableArrayType>(vt))
+static const VariableArrayType *FindVA(const Type *t) {
+ while (const ArrayType *vt = dyn_cast<ArrayType>(t)) {
+ if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt))
if (vat->getSizeExpr())
return vat;
@@ -250,19 +440,14 @@ static VariableArrayType* FindVA(Type* t) {
/// transferred to the caller. If CFG construction fails, this method returns
/// NULL.
CFG* CFGBuilder::buildCFG(const Decl *D, Stmt* Statement, ASTContext* C,
- bool pruneTriviallyFalseEdges,
- bool addehedges, bool AddScopes) {
-
- AddEHEdges = addehedges;
- PruneTriviallyFalseEdges = pruneTriviallyFalseEdges;
+ CFG::BuildOptions BO) {
Context = C;
assert(cfg.get());
if (!Statement)
return NULL;
- this->AddScopes = AddScopes;
- badCFG = false;
+ BuildOpts = BO;
// Create an empty block that will serve as the exit block for the CFG. Since
// this is the first block added to the CFG, it will be implicitly registered
@@ -271,59 +456,67 @@ CFG* CFGBuilder::buildCFG(const Decl *D, Stmt* Statement, ASTContext* C,
assert(Succ == &cfg->getExit());
Block = NULL; // the EXIT block is empty. Create all other blocks lazily.
+ if (BuildOpts.AddImplicitDtors)
+ if (const CXXDestructorDecl *DD = dyn_cast_or_null<CXXDestructorDecl>(D))
+ addImplicitDtorsForDestructor(DD);
+
// Visit the statements and create the CFG.
- CFGBlock* B = addStmt(Statement);
+ CFGBlock *B = addStmt(Statement);
+ if (badCFG)
+ return NULL;
+
+ // For C++ constructor add initializers to CFG.
if (const CXXConstructorDecl *CD = dyn_cast_or_null<CXXConstructorDecl>(D)) {
- // FIXME: Add code for base initializers and member initializers.
- (void)CD;
+ for (CXXConstructorDecl::init_const_reverse_iterator I = CD->init_rbegin(),
+ E = CD->init_rend(); I != E; ++I) {
+ B = addInitializer(*I);
+ if (badCFG)
+ return NULL;
+ }
}
- if (!B)
- B = Succ;
-
- if (B) {
- // Finalize the last constructed block. This usually involves reversing the
- // order of the statements in the block.
- if (Block) FinishBlock(B);
-
- // Backpatch the gotos whose label -> block mappings we didn't know when we
- // encountered them.
- for (BackpatchBlocksTy::iterator I = BackpatchBlocks.begin(),
- E = BackpatchBlocks.end(); I != E; ++I ) {
- CFGBlock* B = *I;
- GotoStmt* G = cast<GotoStmt>(B->getTerminator());
- LabelMapTy::iterator LI = LabelMap.find(G->getLabel());
-
- // If there is no target for the goto, then we are looking at an
- // incomplete AST. Handle this by not registering a successor.
- if (LI == LabelMap.end()) continue;
+ if (B)
+ Succ = B;
- AddSuccessor(B, LI->second);
- }
+ // Backpatch the gotos whose label -> block mappings we didn't know when we
+ // encountered them.
+ for (BackpatchBlocksTy::iterator I = BackpatchBlocks.begin(),
+ E = BackpatchBlocks.end(); I != E; ++I ) {
- // Add successors to the Indirect Goto Dispatch block (if we have one).
- if (CFGBlock* B = cfg->getIndirectGotoBlock())
- for (LabelSetTy::iterator I = AddressTakenLabels.begin(),
- E = AddressTakenLabels.end(); I != E; ++I ) {
+ CFGBlock* B = I->block;
+ GotoStmt* G = cast<GotoStmt>(B->getTerminator());
+ LabelMapTy::iterator LI = LabelMap.find(G->getLabel());
- // Lookup the target block.
- LabelMapTy::iterator LI = LabelMap.find(*I);
+ // If there is no target for the goto, then we are looking at an
+ // incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
- // If there is no target block that contains label, then we are looking
- // at an incomplete AST. Handle this by not registering a successor.
- if (LI == LabelMap.end()) continue;
+ JumpTarget JT = LI->second;
+ prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
+ JT.scopePosition);
+ addSuccessor(B, JT.block);
+ }
- AddSuccessor(B, LI->second);
- }
+ // Add successors to the Indirect Goto Dispatch block (if we have one).
+ if (CFGBlock* B = cfg->getIndirectGotoBlock())
+ for (LabelSetTy::iterator I = AddressTakenLabels.begin(),
+ E = AddressTakenLabels.end(); I != E; ++I ) {
+
+ // Lookup the target block.
+ LabelMapTy::iterator LI = LabelMap.find(*I);
- Succ = B;
- }
+ // If there is no target block that contains label, then we are looking
+ // at an incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
+
+ addSuccessor(B, LI->second.block);
+ }
// Create an empty entry block that has no predecessors.
cfg->setEntry(createBlock());
- return badCFG ? NULL : cfg.take();
+ return cfg.take();
}
/// createBlock - Used to lazily create blocks that are connected
@@ -331,17 +524,251 @@ CFG* CFGBuilder::buildCFG(const Decl *D, Stmt* Statement, ASTContext* C,
CFGBlock* CFGBuilder::createBlock(bool add_successor) {
CFGBlock* B = cfg->createBlock();
if (add_successor && Succ)
- AddSuccessor(B, Succ);
+ addSuccessor(B, Succ);
return B;
}
-/// FinishBlock - "Finalize" the block by checking if we have a bad CFG.
-bool CFGBuilder::FinishBlock(CFGBlock* B) {
- if (badCFG)
- return false;
+/// addInitializer - Add C++ base or member initializer element to CFG.
+CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
+ if (!BuildOpts.AddInitializers)
+ return Block;
- assert(B);
- return true;
+ bool IsReference = false;
+ bool HasTemporaries = false;
+
+ // Destructors of temporaries in initialization expression should be called
+ // after initialization finishes.
+ Expr *Init = I->getInit();
+ if (Init) {
+ if (FieldDecl *FD = I->getAnyMember())
+ IsReference = FD->getType()->isReferenceType();
+ HasTemporaries = isa<ExprWithCleanups>(Init);
+
+ if (BuildOpts.AddImplicitDtors && HasTemporaries) {
+ // Generate destructors for temporaries in initialization expression.
+ VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
+ IsReference);
+ }
+ }
+
+ autoCreateBlock();
+ appendInitializer(Block, I);
+
+ if (Init) {
+ if (HasTemporaries) {
+ // For expression with temporaries go directly to subexpression to omit
+ // generating destructors for the second time.
+ return Visit(cast<ExprWithCleanups>(Init)->getSubExpr());
+ }
+ return Visit(Init);
+ }
+
+ return Block;
+}
+
+/// addAutomaticObjDtors - Add to current block automatic objects destructors
+/// for objects in range of local scope positions. Use S as trigger statement
+/// for destructors.
+void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt* S) {
+ if (!BuildOpts.AddImplicitDtors)
+ return;
+
+ if (B == E)
+ return;
+
+ autoCreateBlock();
+ appendAutomaticObjDtors(Block, B, E, S);
+}
+
+/// addImplicitDtorsForDestructor - Add implicit destructors generated for
+/// base and member objects in destructor.
+void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
+ assert (BuildOpts.AddImplicitDtors
+ && "Can be called only when dtors should be added");
+ const CXXRecordDecl *RD = DD->getParent();
+
+ // At the end destroy virtual base objects.
+ for (CXXRecordDecl::base_class_const_iterator VI = RD->vbases_begin(),
+ VE = RD->vbases_end(); VI != VE; ++VI) {
+ const CXXRecordDecl *CD = VI->getType()->getAsCXXRecordDecl();
+ if (!CD->hasTrivialDestructor()) {
+ autoCreateBlock();
+ appendBaseDtor(Block, VI);
+ }
+ }
+
+ // Before virtual bases destroy direct base objects.
+ for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
+ BE = RD->bases_end(); BI != BE; ++BI) {
+ if (!BI->isVirtual()) {
+ const CXXRecordDecl *CD = BI->getType()->getAsCXXRecordDecl();
+ if (!CD->hasTrivialDestructor()) {
+ autoCreateBlock();
+ appendBaseDtor(Block, BI);
+ }
+ }
+ }
+
+ // First destroy member objects.
+ for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
+ FE = RD->field_end(); FI != FE; ++FI) {
+ // Check for constant size array. Set type to array element type.
+ QualType QT = FI->getType();
+ if (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
+ if (AT->getSize() == 0)
+ continue;
+ QT = AT->getElementType();
+ }
+
+ if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl())
+ if (!CD->hasTrivialDestructor()) {
+ autoCreateBlock();
+ appendMemberDtor(Block, *FI);
+ }
+ }
+}
+
+/// createOrReuseLocalScope - If Scope is NULL create new LocalScope. Either
+/// way return valid LocalScope object.
+LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) {
+ if (!Scope) {
+ llvm::BumpPtrAllocator &alloc = cfg->getAllocator();
+ Scope = alloc.Allocate<LocalScope>();
+ BumpVectorContext ctx(alloc);
+ new (Scope) LocalScope(ctx, ScopePos);
+ }
+ return Scope;
+}
+
+/// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement
+/// that should create implicit scope (e.g. if/else substatements).
+void CFGBuilder::addLocalScopeForStmt(Stmt* S) {
+ if (!BuildOpts.AddImplicitDtors)
+ return;
+
+ LocalScope *Scope = 0;
+
+ // For compound statement we will be creating explicit scope.
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
+ for (CompoundStmt::body_iterator BI = CS->body_begin(), BE = CS->body_end()
+ ; BI != BE; ++BI) {
+ Stmt *SI = *BI;
+ if (LabelStmt *LS = dyn_cast<LabelStmt>(SI))
+ SI = LS->getSubStmt();
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(SI))
+ Scope = addLocalScopeForDeclStmt(DS, Scope);
+ }
+ return;
+ }
+
+ // For any other statement scope will be implicit and as such will be
+ // interesting only for DeclStmt.
+ if (LabelStmt *LS = dyn_cast<LabelStmt>(S))
+ S = LS->getSubStmt();
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S))
+ addLocalScopeForDeclStmt(DS);
+}
+
+/// addLocalScopeForDeclStmt - Add LocalScope for declaration statement. Will
+/// reuse Scope if not NULL.
+LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt* DS,
+ LocalScope* Scope) {
+ if (!BuildOpts.AddImplicitDtors)
+ return Scope;
+
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end()
+ ; DI != DE; ++DI) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(*DI))
+ Scope = addLocalScopeForVarDecl(VD, Scope);
+ }
+ return Scope;
+}
+
+/// addLocalScopeForVarDecl - Add LocalScope for variable declaration. It will
+/// create add scope for automatic objects and temporary objects bound to
+/// const reference. Will reuse Scope if not NULL.
+LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl* VD,
+ LocalScope* Scope) {
+ if (!BuildOpts.AddImplicitDtors)
+ return Scope;
+
+ // Check if variable is local.
+ switch (VD->getStorageClass()) {
+ case SC_None:
+ case SC_Auto:
+ case SC_Register:
+ break;
+ default: return Scope;
+ }
+
+ // Check for const references bound to temporary. Set type to pointee.
+ QualType QT = VD->getType();
+ if (const ReferenceType* RT = QT.getTypePtr()->getAs<ReferenceType>()) {
+ QT = RT->getPointeeType();
+ if (!QT.isConstQualified())
+ return Scope;
+ if (!VD->getInit() || !VD->getInit()->Classify(*Context).isRValue())
+ return Scope;
+ }
+
+ // Check for constant size array. Set type to array element type.
+ if (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
+ if (AT->getSize() == 0)
+ return Scope;
+ QT = AT->getElementType();
+ }
+
+ // Check if type is a C++ class with non-trivial destructor.
+ if (const CXXRecordDecl* CD = QT->getAsCXXRecordDecl())
+ if (!CD->hasTrivialDestructor()) {
+ // Add the variable to scope
+ Scope = createOrReuseLocalScope(Scope);
+ Scope->addVar(VD);
+ ScopePos = Scope->begin();
+ }
+ return Scope;
+}
+
+/// addLocalScopeAndDtors - For given statement add local scope for it and
+/// add destructors that will cleanup the scope. Will reuse Scope if not NULL.
+void CFGBuilder::addLocalScopeAndDtors(Stmt* S) {
+ if (!BuildOpts.AddImplicitDtors)
+ return;
+
+ LocalScope::const_iterator scopeBeginPos = ScopePos;
+ addLocalScopeForStmt(S);
+ addAutomaticObjDtors(ScopePos, scopeBeginPos, S);
+}
+
+/// insertAutomaticObjDtors - Insert destructor CFGElements for variables with
+/// automatic storage duration to CFGBlock's elements vector. Insertion will be
+/// performed in place specified with iterator.
+void CFGBuilder::insertAutomaticObjDtors(CFGBlock* Blk, CFGBlock::iterator I,
+ LocalScope::const_iterator B, LocalScope::const_iterator E, Stmt* S) {
+ BumpVectorContext& C = cfg->getBumpVectorContext();
+ I = Blk->beginAutomaticObjDtorsInsert(I, B.distance(E), C);
+ while (B != E)
+ I = Blk->insertAutomaticObjDtor(I, *B++, S);
+}
+
+/// appendAutomaticObjDtors - Append destructor CFGElements for variables with
+/// automatic storage duration to CFGBlock's elements vector. Elements will be
+/// appended to physical end of the vector which happens to be logical
+/// beginning.
+void CFGBuilder::appendAutomaticObjDtors(CFGBlock* Blk,
+ LocalScope::const_iterator B, LocalScope::const_iterator E, Stmt* S) {
+ insertAutomaticObjDtors(Blk, Blk->begin(), B, E, S);
+}
+
+/// prependAutomaticObjDtorsWithTerminator - Prepend destructor CFGElements for
+/// variables with automatic storage duration to CFGBlock's elements vector.
+/// Elements will be prepended to physical beginning of the vector which
+/// happens to be logical end. Use blocks terminator as statement that specifies
+/// destructors call site.
+void CFGBuilder::prependAutomaticObjDtorsWithTerminator(CFGBlock* Blk,
+ LocalScope::const_iterator B, LocalScope::const_iterator E) {
+ insertAutomaticObjDtors(Blk, Blk->end(), B, E, Blk->getTerminator());
}
/// Visit - Walk the subtree of a statement and add extra
@@ -360,6 +787,9 @@ tryAgain:
case Stmt::AddrLabelExprClass:
return VisitAddrLabelExpr(cast<AddrLabelExpr>(S), asc);
+ case Stmt::BinaryConditionalOperatorClass:
+ return VisitConditionalOperator(cast<BinaryConditionalOperator>(S), asc);
+
case Stmt::BinaryOperatorClass:
return VisitBinaryOperator(cast<BinaryOperator>(S), asc);
@@ -391,11 +821,20 @@ tryAgain:
case Stmt::CXXCatchStmtClass:
return VisitCXXCatchStmt(cast<CXXCatchStmt>(S));
- case Stmt::CXXExprWithTemporariesClass: {
- // FIXME: Handle temporaries. For now, just visit the subexpression
- // so we don't artificially create extra blocks.
- return Visit(cast<CXXExprWithTemporaries>(S)->getSubExpr(), asc);
- }
+ case Stmt::ExprWithCleanupsClass:
+ return VisitExprWithCleanups(cast<ExprWithCleanups>(S), asc);
+
+ case Stmt::CXXBindTemporaryExprClass:
+ return VisitCXXBindTemporaryExpr(cast<CXXBindTemporaryExpr>(S), asc);
+
+ case Stmt::CXXConstructExprClass:
+ return VisitCXXConstructExpr(cast<CXXConstructExpr>(S), asc);
+
+ case Stmt::CXXFunctionalCastExprClass:
+ return VisitCXXFunctionalCastExpr(cast<CXXFunctionalCastExpr>(S), asc);
+
+ case Stmt::CXXTemporaryObjectExprClass:
+ return VisitCXXTemporaryObjectExpr(cast<CXXTemporaryObjectExpr>(S), asc);
case Stmt::CXXMemberCallExprClass:
return VisitCXXMemberCallExpr(cast<CXXMemberCallExpr>(S), asc);
@@ -424,6 +863,9 @@ tryAgain:
case Stmt::IfStmtClass:
return VisitIfStmt(cast<IfStmt>(S));
+ case Stmt::ImplicitCastExprClass:
+ return VisitImplicitCastExpr(cast<ImplicitCastExpr>(S), asc);
+
case Stmt::IndirectGotoStmtClass:
return VisitIndirectGotoStmt(cast<IndirectGotoStmt>(S));
@@ -467,6 +909,9 @@ tryAgain:
case Stmt::SwitchStmtClass:
return VisitSwitchStmt(cast<SwitchStmt>(S));
+ case Stmt::UnaryOperatorClass:
+ return VisitUnaryOperator(cast<UnaryOperator>(S), asc);
+
case Stmt::WhileStmtClass:
return VisitWhileStmt(cast<WhileStmt>(S));
}
@@ -475,7 +920,7 @@ tryAgain:
CFGBlock *CFGBuilder::VisitStmt(Stmt *S, AddStmtChoice asc) {
if (asc.alwaysAdd()) {
autoCreateBlock();
- AppendStmt(Block, S, asc);
+ appendStmt(Block, S, asc);
}
return VisitChildren(S);
@@ -484,8 +929,7 @@ CFGBlock *CFGBuilder::VisitStmt(Stmt *S, AddStmtChoice asc) {
/// VisitChildren - Visit the children of a Stmt.
CFGBlock *CFGBuilder::VisitChildren(Stmt* Terminator) {
CFGBlock *B = Block;
- for (Stmt::child_iterator I = Terminator->child_begin(),
- E = Terminator->child_end(); I != E; ++I) {
+ for (Stmt::child_range I = Terminator->children(); I; ++I) {
if (*I) B = Visit(*I);
}
return B;
@@ -497,19 +941,29 @@ CFGBlock *CFGBuilder::VisitAddrLabelExpr(AddrLabelExpr *A,
if (asc.alwaysAdd()) {
autoCreateBlock();
- AppendStmt(Block, A, asc);
+ appendStmt(Block, A, asc);
}
return Block;
}
+CFGBlock *CFGBuilder::VisitUnaryOperator(UnaryOperator *U,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd()) {
+ autoCreateBlock();
+ appendStmt(Block, U, asc);
+ }
+
+ return Visit(U->getSubExpr(), AddStmtChoice());
+}
+
CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
AddStmtChoice asc) {
if (B->isLogicalOp()) { // && or ||
CFGBlock* ConfluenceBlock = Block ? Block : createBlock();
- AppendStmt(ConfluenceBlock, B, asc);
+ appendStmt(ConfluenceBlock, B, asc);
- if (!FinishBlock(ConfluenceBlock))
+ if (badCFG)
return 0;
// create the block evaluating the LHS
@@ -522,57 +976,67 @@ CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
CFGBlock* RHSBlock = addStmt(B->getRHS());
if (RHSBlock) {
- if (!FinishBlock(RHSBlock))
+ if (badCFG)
return 0;
- }
- else {
+ } else {
// Create an empty block for cases where the RHS doesn't require
// any explicit statements in the CFG.
RHSBlock = createBlock();
}
// See if this is a known constant.
- TryResult KnownVal = TryEvaluateBool(B->getLHS());
+ TryResult KnownVal = tryEvaluateBool(B->getLHS());
if (KnownVal.isKnown() && (B->getOpcode() == BO_LOr))
KnownVal.negate();
// Now link the LHSBlock with RHSBlock.
if (B->getOpcode() == BO_LOr) {
- AddSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
- AddSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
} else {
assert(B->getOpcode() == BO_LAnd);
- AddSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
- AddSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
}
// Generate the blocks for evaluating the LHS.
Block = LHSBlock;
return addStmt(B->getLHS());
}
- else if (B->getOpcode() == BO_Comma) { // ,
+
+ if (B->getOpcode() == BO_Comma) { // ,
autoCreateBlock();
- AppendStmt(Block, B, asc);
+ appendStmt(Block, B, asc);
addStmt(B->getRHS());
return addStmt(B->getLHS());
}
- else if (B->isAssignmentOp()) {
+
+ if (B->isAssignmentOp()) {
if (asc.alwaysAdd()) {
autoCreateBlock();
- AppendStmt(Block, B, asc);
+ appendStmt(Block, B, asc);
}
+ Visit(B->getLHS());
+ return Visit(B->getRHS());
+ }
- Visit(B->getRHS());
- return Visit(B->getLHS(), AddStmtChoice::AsLValueNotAlwaysAdd);
+ if (asc.alwaysAdd()) {
+ autoCreateBlock();
+ appendStmt(Block, B, asc);
}
- return VisitStmt(B, asc);
+ CFGBlock *RBlock = Visit(B->getRHS());
+ CFGBlock *LBlock = Visit(B->getLHS());
+ // If visiting RHS causes us to finish 'Block', e.g. the RHS is a StmtExpr
+ // containing a DoStmt, and the LHS doesn't create a new block, then we should
+ // return RBlock. Otherwise we'll incorrectly return NULL.
+ return (LBlock ? LBlock : RBlock);
}
CFGBlock *CFGBuilder::VisitBlockExpr(BlockExpr *E, AddStmtChoice asc) {
if (asc.alwaysAdd()) {
autoCreateBlock();
- AppendStmt(Block, E, asc);
+ appendStmt(Block, E, asc);
}
return Block;
}
@@ -580,8 +1044,8 @@ CFGBlock *CFGBuilder::VisitBlockExpr(BlockExpr *E, AddStmtChoice asc) {
CFGBlock *CFGBuilder::VisitBreakStmt(BreakStmt *B) {
// "break" is a control-flow statement. Thus we stop processing the current
// block.
- if (Block && !FinishBlock(Block))
- return 0;
+ if (badCFG)
+ return 0;
// Now create a new block that ends with the break statement.
Block = createBlock(false);
@@ -589,9 +1053,10 @@ CFGBlock *CFGBuilder::VisitBreakStmt(BreakStmt *B) {
// If there is no target for the break, then we are looking at an incomplete
// AST. This means that the CFG cannot be constructed.
- if (BreakTargetBlock)
- AddSuccessor(Block, BreakTargetBlock);
- else
+ if (BreakJumpTarget.block) {
+ addAutomaticObjDtors(ScopePos, BreakJumpTarget.scopePosition, B);
+ addSuccessor(Block, BreakJumpTarget.block);
+ } else
badCFG = true;
@@ -624,8 +1089,8 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
bool AddEHEdge = false;
// Languages without exceptions are assumed to not throw.
- if (Context->getLangOptions().Exceptions) {
- if (AddEHEdges)
+ if (Context->getLangOptions().areExceptionsEnabled()) {
+ if (BuildOpts.AddEHEdges)
AddEHEdge = true;
}
@@ -639,32 +1104,28 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
if (!CanThrow(C->getCallee()))
AddEHEdge = false;
- if (!NoReturn && !AddEHEdge) {
- if (asc.asLValue())
- return VisitStmt(C, AddStmtChoice::AlwaysAddAsLValue);
- else
- return VisitStmt(C, AddStmtChoice::AlwaysAdd);
- }
+ if (!NoReturn && !AddEHEdge)
+ return VisitStmt(C, asc.withAlwaysAdd(true));
if (Block) {
Succ = Block;
- if (!FinishBlock(Block))
+ if (badCFG)
return 0;
}
Block = createBlock(!NoReturn);
- AppendStmt(Block, C, asc);
+ appendStmt(Block, C, asc);
if (NoReturn) {
// Wire this to the exit block directly.
- AddSuccessor(Block, &cfg->getExit());
+ addSuccessor(Block, &cfg->getExit());
}
if (AddEHEdge) {
// Add exceptional edges.
if (TryTerminatedBlock)
- AddSuccessor(Block, TryTerminatedBlock);
+ addSuccessor(Block, TryTerminatedBlock);
else
- AddSuccessor(Block, &cfg->getExit());
+ addSuccessor(Block, &cfg->getExit());
}
return VisitChildren(C);
@@ -673,38 +1134,35 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C,
AddStmtChoice asc) {
CFGBlock* ConfluenceBlock = Block ? Block : createBlock();
- AppendStmt(ConfluenceBlock, C, asc);
- if (!FinishBlock(ConfluenceBlock))
+ appendStmt(ConfluenceBlock, C, asc);
+ if (badCFG)
return 0;
- asc = asc.asLValue() ? AddStmtChoice::AlwaysAddAsLValue
- : AddStmtChoice::AlwaysAdd;
-
+ AddStmtChoice alwaysAdd = asc.withAlwaysAdd(true);
Succ = ConfluenceBlock;
Block = NULL;
- CFGBlock* LHSBlock = Visit(C->getLHS(), asc);
- if (!FinishBlock(LHSBlock))
+ CFGBlock* LHSBlock = Visit(C->getLHS(), alwaysAdd);
+ if (badCFG)
return 0;
Succ = ConfluenceBlock;
Block = NULL;
- CFGBlock* RHSBlock = Visit(C->getRHS(), asc);
- if (!FinishBlock(RHSBlock))
+ CFGBlock* RHSBlock = Visit(C->getRHS(), alwaysAdd);
+ if (badCFG)
return 0;
Block = createBlock(false);
// See if this is a known constant.
- const TryResult& KnownVal = TryEvaluateBool(C->getCond());
- AddSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
- AddSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+ const TryResult& KnownVal = tryEvaluateBool(C->getCond());
+ addSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
+ addSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
Block->setTerminator(C);
return addStmt(C->getCond());
}
CFGBlock* CFGBuilder::VisitCompoundStmt(CompoundStmt* C) {
- EndScope(C);
-
+ addLocalScopeAndDtors(C);
CFGBlock* LastBlock = Block;
for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend();
@@ -718,22 +1176,22 @@ CFGBlock* CFGBuilder::VisitCompoundStmt(CompoundStmt* C) {
return NULL;
}
- LastBlock = StartScope(C, LastBlock);
-
return LastBlock;
}
-CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C,
+CFGBlock *CFGBuilder::VisitConditionalOperator(AbstractConditionalOperator *C,
AddStmtChoice asc) {
+ const BinaryConditionalOperator *BCO = dyn_cast<BinaryConditionalOperator>(C);
+ const OpaqueValueExpr *opaqueValue = (BCO ? BCO->getOpaqueValue() : NULL);
+
// Create the confluence block that will "merge" the results of the ternary
// expression.
CFGBlock* ConfluenceBlock = Block ? Block : createBlock();
- AppendStmt(ConfluenceBlock, C, asc);
- if (!FinishBlock(ConfluenceBlock))
+ appendStmt(ConfluenceBlock, C, asc);
+ if (badCFG)
return 0;
- asc = asc.asLValue() ? AddStmtChoice::AlwaysAddAsLValue
- : AddStmtChoice::AlwaysAdd;
+ AddStmtChoice alwaysAdd = asc.withAlwaysAdd(true);
// Create a block for the LHS expression if there is an LHS expression. A
// GCC extension allows LHS to be NULL, causing the condition to be the
@@ -741,60 +1199,48 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C,
// e.g: x ?: y is shorthand for: x ? x : y;
Succ = ConfluenceBlock;
Block = NULL;
- CFGBlock* LHSBlock = NULL;
- if (C->getLHS()) {
- LHSBlock = Visit(C->getLHS(), asc);
- if (!FinishBlock(LHSBlock))
+ CFGBlock* LHSBlock = 0;
+ const Expr *trueExpr = C->getTrueExpr();
+ if (trueExpr != opaqueValue) {
+ LHSBlock = Visit(C->getTrueExpr(), alwaysAdd);
+ if (badCFG)
return 0;
Block = NULL;
}
// Create the block for the RHS expression.
Succ = ConfluenceBlock;
- CFGBlock* RHSBlock = Visit(C->getRHS(), asc);
- if (!FinishBlock(RHSBlock))
+ CFGBlock* RHSBlock = Visit(C->getFalseExpr(), alwaysAdd);
+ if (badCFG)
return 0;
// Create the block that will contain the condition.
Block = createBlock(false);
// See if this is a known constant.
- const TryResult& KnownVal = TryEvaluateBool(C->getCond());
- if (LHSBlock) {
- AddSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
- } else {
- if (KnownVal.isFalse()) {
- // If we know the condition is false, add NULL as the successor for
- // the block containing the condition. In this case, the confluence
- // block will have just one predecessor.
- AddSuccessor(Block, 0);
- assert(ConfluenceBlock->pred_size() == 1);
- } else {
- // If we have no LHS expression, add the ConfluenceBlock as a direct
- // successor for the block containing the condition. Moreover, we need to
- // reverse the order of the predecessors in the ConfluenceBlock because
- // the RHSBlock will have been added to the succcessors already, and we
- // want the first predecessor to the the block containing the expression
- // for the case when the ternary expression evaluates to true.
- AddSuccessor(Block, ConfluenceBlock);
- assert(ConfluenceBlock->pred_size() == 2);
- std::reverse(ConfluenceBlock->pred_begin(),
- ConfluenceBlock->pred_end());
- }
- }
-
- AddSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+ const TryResult& KnownVal = tryEvaluateBool(C->getCond());
+ if (LHSBlock)
+ addSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
+ addSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
Block->setTerminator(C);
- return addStmt(C->getCond());
+ Expr *condExpr = C->getCond();
+
+ CFGBlock *result = 0;
+
+ // Run the condition expression if it's not trivially expressed in
+ // terms of the opaque value (or if there is no opaque value).
+ if (condExpr != opaqueValue) result = addStmt(condExpr);
+
+ // Before that, run the common subexpression if there was one.
+ // At least one of this or the above will be run.
+ if (opaqueValue) result = addStmt(BCO->getCommon());
+
+ return result;
}
CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
- autoCreateBlock();
-
- if (DS->isSingleDecl()) {
- AppendStmt(Block, DS);
- return VisitDeclSubExpr(DS->getSingleDecl());
- }
+ if (DS->isSingleDecl())
+ return VisitDeclSubExpr(DS);
CFGBlock *B = 0;
@@ -815,37 +1261,63 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
DeclStmt *DSNew = new (Mem) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
// Append the fake DeclStmt to block.
- AppendStmt(Block, DSNew);
- B = VisitDeclSubExpr(D);
+ B = VisitDeclSubExpr(DSNew);
}
return B;
}
/// VisitDeclSubExpr - Utility method to add block-level expressions for
-/// initializers in Decls.
-CFGBlock *CFGBuilder::VisitDeclSubExpr(Decl* D) {
- assert(Block);
+/// DeclStmts and initializers in them.
+CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt* DS) {
+ assert(DS->isSingleDecl() && "Can handle single declarations only.");
- VarDecl *VD = dyn_cast<VarDecl>(D);
+ VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
- if (!VD)
+ if (!VD) {
+ autoCreateBlock();
+ appendStmt(Block, DS);
return Block;
+ }
+ bool IsReference = false;
+ bool HasTemporaries = false;
+
+ // Destructors of temporaries in initialization expression should be called
+ // after initialization finishes.
Expr *Init = VD->getInit();
+ if (Init) {
+ IsReference = VD->getType()->isReferenceType();
+ HasTemporaries = isa<ExprWithCleanups>(Init);
+
+ if (BuildOpts.AddImplicitDtors && HasTemporaries) {
+ // Generate destructors for temporaries in initialization expression.
+ VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
+ IsReference);
+ }
+ }
+
+ autoCreateBlock();
+ appendStmt(Block, DS);
if (Init) {
- AddStmtChoice::Kind k =
- VD->getType()->isReferenceType() ? AddStmtChoice::AsLValueNotAlwaysAdd
- : AddStmtChoice::NotAlwaysAdd;
- Visit(Init, AddStmtChoice(k));
+ if (HasTemporaries)
+ // For expression with temporaries go directly to subexpression to omit
+ // generating destructors for the second time.
+ Visit(cast<ExprWithCleanups>(Init)->getSubExpr());
+ else
+ Visit(Init);
}
// If the type of VD is a VLA, then we must process its size expressions.
- for (VariableArrayType* VA = FindVA(VD->getType().getTypePtr()); VA != 0;
- VA = FindVA(VA->getElementType().getTypePtr()))
+ for (const VariableArrayType* VA = FindVA(VD->getType().getTypePtr());
+ VA != 0; VA = FindVA(VA->getElementType().getTypePtr()))
Block = addStmt(VA->getSizeExpr());
+ // Remove variable from local scope.
+ if (ScopePos && VD == *ScopePos)
+ ++ScopePos;
+
return Block;
}
@@ -857,11 +1329,23 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) {
// middle of a block, we stop processing that block. That block is then the
// implicit successor for the "then" and "else" clauses.
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible condition variable.
+ // Store scope position. Add implicit destructor.
+ if (VarDecl* VD = I->getConditionVariable()) {
+ LocalScope::const_iterator BeginScopePos = ScopePos;
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, BeginScopePos, I);
+ }
+
// The block we were proccessing is now finished. Make it the successor
// block.
if (Block) {
Succ = Block;
- if (!FinishBlock(Block))
+ if (badCFG)
return 0;
}
@@ -874,12 +1358,18 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) {
// NULL out Block so that the recursive call to Visit will
// create a new basic block.
Block = NULL;
+
+ // If branch is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(Else))
+ addLocalScopeAndDtors(Else);
+
ElseBlock = addStmt(Else);
if (!ElseBlock) // Can occur when the Else body has all NullStmts.
ElseBlock = sv.get();
else if (Block) {
- if (!FinishBlock(ElseBlock))
+ if (badCFG)
return 0;
}
}
@@ -891,6 +1381,12 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) {
assert(Then);
SaveAndRestore<CFGBlock*> sv(Succ);
Block = NULL;
+
+ // If branch is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(Then))
+ addLocalScopeAndDtors(Then);
+
ThenBlock = addStmt(Then);
if (!ThenBlock) {
@@ -898,9 +1394,9 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) {
// Create an empty block so we can distinguish between true and false
// branches in path-sensitive analyses.
ThenBlock = createBlock(false);
- AddSuccessor(ThenBlock, sv.get());
+ addSuccessor(ThenBlock, sv.get());
} else if (Block) {
- if (!FinishBlock(ThenBlock))
+ if (badCFG)
return 0;
}
}
@@ -912,11 +1408,11 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) {
Block->setTerminator(I);
// See if this is a known constant.
- const TryResult &KnownVal = TryEvaluateBool(I->getCond());
+ const TryResult &KnownVal = tryEvaluateBool(I->getCond());
// Now add the successors.
- AddSuccessor(Block, KnownVal.isFalse() ? NULL : ThenBlock);
- AddSuccessor(Block, KnownVal.isTrue()? NULL : ElseBlock);
+ addSuccessor(Block, KnownVal.isFalse() ? NULL : ThenBlock);
+ addSuccessor(Block, KnownVal.isTrue()? NULL : ElseBlock);
// Add the condition as the last statement in the new block. This may create
// new blocks as the condition may contain control-flow. Any newly created
@@ -928,7 +1424,7 @@ CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) {
if (VarDecl *VD = I->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
- AppendStmt(Block, I, AddStmtChoice::AlwaysAdd);
+ appendStmt(Block, I, AddStmtChoice::AlwaysAdd);
addStmt(Init);
}
}
@@ -944,37 +1440,37 @@ CFGBlock* CFGBuilder::VisitReturnStmt(ReturnStmt* R) {
// code afterwards is DEAD (unreachable). We still keep a basic block
// for that code; a simple "mark-and-sweep" from the entry block will be
// able to report such dead blocks.
- if (Block)
- FinishBlock(Block);
// Create the new block.
Block = createBlock(false);
// The Exit block is the only successor.
- AddSuccessor(Block, &cfg->getExit());
+ addAutomaticObjDtors(ScopePos, LocalScope::const_iterator(), R);
+ addSuccessor(Block, &cfg->getExit());
// Add the return statement to the block. This may create new blocks if R
// contains control-flow (short-circuit operations).
return VisitStmt(R, AddStmtChoice::AlwaysAdd);
}
-CFGBlock* CFGBuilder::VisitLabelStmt(LabelStmt* L) {
+CFGBlock* CFGBuilder::VisitLabelStmt(LabelStmt *L) {
// Get the block of the labeled statement. Add it to our map.
addStmt(L->getSubStmt());
- CFGBlock* LabelBlock = Block;
+ CFGBlock *LabelBlock = Block;
if (!LabelBlock) // This can happen when the body is empty, i.e.
LabelBlock = createBlock(); // scopes that only contains NullStmts.
- assert(LabelMap.find(L) == LabelMap.end() && "label already in map");
- LabelMap[ L ] = LabelBlock;
+ assert(LabelMap.find(L->getDecl()) == LabelMap.end() &&
+ "label already in map");
+ LabelMap[L->getDecl()] = JumpTarget(LabelBlock, ScopePos);
// Labels partition blocks, so this is the end of the basic block we were
// processing (L is the block's label). Because this is label (and we have
// already processed the substatement) there is no extra control-flow to worry
// about.
LabelBlock->setLabel(L);
- if (!FinishBlock(LabelBlock))
+ if (badCFG)
return 0;
// We set Block to NULL to allow lazy creation of a new block (if necessary);
@@ -989,8 +1485,6 @@ CFGBlock* CFGBuilder::VisitLabelStmt(LabelStmt* L) {
CFGBlock* CFGBuilder::VisitGotoStmt(GotoStmt* G) {
// Goto is a control-flow statement. Thus we stop processing the current
// block and create a new one.
- if (Block)
- FinishBlock(Block);
Block = createBlock(false);
Block->setTerminator(G);
@@ -1000,9 +1494,12 @@ CFGBlock* CFGBuilder::VisitGotoStmt(GotoStmt* G) {
if (I == LabelMap.end())
// We will need to backpatch this block later.
- BackpatchBlocks.push_back(Block);
- else
- AddSuccessor(Block, I->second);
+ BackpatchBlocks.push_back(JumpSource(Block, ScopePos));
+ else {
+ JumpTarget JT = I->second;
+ addAutomaticObjDtors(ScopePos, JT.scopePosition, G);
+ addSuccessor(Block, JT.block);
+ }
return Block;
}
@@ -1010,10 +1507,27 @@ CFGBlock* CFGBuilder::VisitGotoStmt(GotoStmt* G) {
CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
CFGBlock* LoopSuccessor = NULL;
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for init statement and possible condition variable.
+ // Add destructor for init statement and condition variable.
+ // Store scope position for continue statement.
+ if (Stmt* Init = F->getInit())
+ addLocalScopeForStmt(Init);
+ LocalScope::const_iterator LoopBeginScopePos = ScopePos;
+
+ if (VarDecl* VD = F->getConditionVariable())
+ addLocalScopeForVarDecl(VD);
+ LocalScope::const_iterator ContinueScopePos = ScopePos;
+
+ addAutomaticObjDtors(ScopePos, save_scope_pos.get(), F);
+
// "for" is a control-flow statement. Thus we stop processing the current
// block.
if (Block) {
- if (!FinishBlock(Block))
+ if (badCFG)
return 0;
LoopSuccessor = Block;
} else
@@ -1021,8 +1535,8 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
// Save the current value for the break targets.
// All breaks should go to the code following the loop.
- SaveAndRestore<CFGBlock*> save_break(BreakTargetBlock);
- BreakTargetBlock = LoopSuccessor;
+ SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
// Because of short-circuit evaluation, the condition of the loop can span
// multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
@@ -1038,21 +1552,24 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
if (Stmt* C = F->getCond()) {
Block = ExitConditionBlock;
EntryConditionBlock = addStmt(C);
- assert(Block == EntryConditionBlock);
+ if (badCFG)
+ return 0;
+ assert(Block == EntryConditionBlock ||
+ (Block == 0 && EntryConditionBlock == Succ));
// If this block contains a condition variable, add both the condition
// variable and initializer to the CFG.
if (VarDecl *VD = F->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
- AppendStmt(Block, F, AddStmtChoice::AlwaysAdd);
+ appendStmt(Block, F, AddStmtChoice::AlwaysAdd);
EntryConditionBlock = addStmt(Init);
assert(Block == EntryConditionBlock);
}
}
if (Block) {
- if (!FinishBlock(EntryConditionBlock))
+ if (badCFG)
return 0;
}
}
@@ -1065,18 +1582,21 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
TryResult KnownVal(true);
if (F->getCond())
- KnownVal = TryEvaluateBool(F->getCond());
+ KnownVal = tryEvaluateBool(F->getCond());
// Now create the loop body.
{
assert(F->getBody());
// Save the current values for Block, Succ, and continue targets.
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
- save_continue(ContinueTargetBlock);
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
// Create a new block to contain the (bottom) of the loop body.
Block = NULL;
+
+ // Loop body should end with destructor of Condition variable (if any).
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, F);
if (Stmt* I = F->getInc()) {
// Generate increment code in its own basic block. This is the target of
@@ -1086,62 +1606,65 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
// No increment code. Create a special, empty, block that is used as the
// target block for "looping back" to the start of the loop.
assert(Succ == EntryConditionBlock);
- Succ = createBlock();
+ Succ = Block ? Block : createBlock();
}
// Finish up the increment (or empty) block if it hasn't been already.
if (Block) {
assert(Block == Succ);
- if (!FinishBlock(Block))
+ if (badCFG)
return 0;
Block = 0;
}
- ContinueTargetBlock = Succ;
+ ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
// The starting block for the loop increment is the block that should
// represent the 'loop target' for looping back to the start of the loop.
- ContinueTargetBlock->setLoopTarget(F);
+ ContinueJumpTarget.block->setLoopTarget(F);
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(F->getBody()))
+ addLocalScopeAndDtors(F->getBody());
// Now populate the body block, and in the process create new blocks as we
// walk the body of the loop.
CFGBlock* BodyBlock = addStmt(F->getBody());
if (!BodyBlock)
- BodyBlock = ContinueTargetBlock; // can happen for "for (...;...;...) ;"
- else if (Block && !FinishBlock(BodyBlock))
+ BodyBlock = ContinueJumpTarget.block;//can happen for "for (...;...;...);"
+ else if (badCFG)
return 0;
// This new body block is a successor to our "exit" condition block.
- AddSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
+ addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
}
// Link up the condition block with the code that follows the loop. (the
// false branch).
- AddSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
// If the loop contains initialization, create a new block for those
// statements. This block can also contain statements that precede the loop.
if (Stmt* I = F->getInit()) {
Block = createBlock();
return addStmt(I);
- } else {
- // There is no loop initialization. We are thus basically a while loop.
- // NULL out Block to force lazy block construction.
- Block = NULL;
- Succ = EntryConditionBlock;
- return EntryConditionBlock;
}
+
+ // There is no loop initialization. We are thus basically a while loop.
+ // NULL out Block to force lazy block construction.
+ Block = NULL;
+ Succ = EntryConditionBlock;
+ return EntryConditionBlock;
}
CFGBlock *CFGBuilder::VisitMemberExpr(MemberExpr *M, AddStmtChoice asc) {
if (asc.alwaysAdd()) {
autoCreateBlock();
- AppendStmt(Block, M, asc);
+ appendStmt(Block, M, asc);
}
- return Visit(M->getBase(),
- M->isArrow() ? AddStmtChoice::NotAlwaysAdd
- : AddStmtChoice::AsLValueNotAlwaysAdd);
+ return Visit(M->getBase());
}
CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
@@ -1180,7 +1703,7 @@ CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
CFGBlock* LoopSuccessor = 0;
if (Block) {
- if (!FinishBlock(Block))
+ if (badCFG)
return 0;
LoopSuccessor = Block;
Block = 0;
@@ -1197,7 +1720,7 @@ CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
// The last statement in the block should be the ObjCForCollectionStmt, which
// performs the actual binding to 'element' and determines if there are any
// more items in the collection.
- AppendStmt(ExitConditionBlock, S);
+ appendStmt(ExitConditionBlock, S);
Block = ExitConditionBlock;
// Walk the 'element' expression to see if there are any side-effects. We
@@ -1205,7 +1728,7 @@ CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
// the CFG unless it contains control-flow.
EntryConditionBlock = Visit(S->getElement(), AddStmtChoice::NotAlwaysAdd);
if (Block) {
- if (!FinishBlock(EntryConditionBlock))
+ if (badCFG)
return 0;
Block = 0;
}
@@ -1217,28 +1740,29 @@ CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
// Now create the true branch.
{
// Save the current values for Succ, continue and break targets.
- SaveAndRestore<CFGBlock*> save_Succ(Succ),
- save_continue(ContinueTargetBlock), save_break(BreakTargetBlock);
+ SaveAndRestore<CFGBlock*> save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
- BreakTargetBlock = LoopSuccessor;
- ContinueTargetBlock = EntryConditionBlock;
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+ ContinueJumpTarget = JumpTarget(EntryConditionBlock, ScopePos);
CFGBlock* BodyBlock = addStmt(S->getBody());
if (!BodyBlock)
BodyBlock = EntryConditionBlock; // can happen for "for (X in Y) ;"
else if (Block) {
- if (!FinishBlock(BodyBlock))
+ if (badCFG)
return 0;
}
// This new body block is a successor to our "exit" condition block.
- AddSuccessor(ExitConditionBlock, BodyBlock);
+ addSuccessor(ExitConditionBlock, BodyBlock);
}
// Link up the condition block with the code that follows the loop.
// (the false branch).
- AddSuccessor(ExitConditionBlock, LoopSuccessor);
+ addSuccessor(ExitConditionBlock, LoopSuccessor);
// Now create a prologue block to contain the collection expression.
Block = createBlock();
@@ -1254,13 +1778,17 @@ CFGBlock* CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt* S) {
// The sync body starts its own basic block. This makes it a little easier
// for diagnostic clients.
if (SyncBlock) {
- if (!FinishBlock(SyncBlock))
+ if (badCFG)
return 0;
Block = 0;
Succ = SyncBlock;
}
+ // Add the @synchronized to the CFG.
+ autoCreateBlock();
+ appendStmt(Block, S, AddStmtChoice::AlwaysAdd);
+
// Inline the sync expression.
return addStmt(S->getSynchExpr());
}
@@ -1273,10 +1801,22 @@ CFGBlock* CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt* S) {
CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) {
CFGBlock* LoopSuccessor = NULL;
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible condition variable.
+ // Store scope position for continue statement.
+ LocalScope::const_iterator LoopBeginScopePos = ScopePos;
+ if (VarDecl* VD = W->getConditionVariable()) {
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
+ }
+
// "while" is a control-flow statement. Thus we stop processing the current
// block.
if (Block) {
- if (!FinishBlock(Block))
+ if (badCFG)
return 0;
LoopSuccessor = Block;
} else
@@ -1297,21 +1837,22 @@ CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) {
if (Stmt* C = W->getCond()) {
Block = ExitConditionBlock;
EntryConditionBlock = addStmt(C);
- assert(Block == EntryConditionBlock);
+ // The condition might finish the current 'Block'.
+ Block = EntryConditionBlock;
// If this block contains a condition variable, add both the condition
// variable and initializer to the CFG.
if (VarDecl *VD = W->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
- AppendStmt(Block, W, AddStmtChoice::AlwaysAdd);
+ appendStmt(Block, W, AddStmtChoice::AlwaysAdd);
EntryConditionBlock = addStmt(Init);
assert(Block == EntryConditionBlock);
}
}
if (Block) {
- if (!FinishBlock(EntryConditionBlock))
+ if (badCFG)
return 0;
}
}
@@ -1321,16 +1862,16 @@ CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) {
Succ = EntryConditionBlock;
// See if this is a known constant.
- const TryResult& KnownVal = TryEvaluateBool(W->getCond());
+ const TryResult& KnownVal = tryEvaluateBool(W->getCond());
// Process the loop body.
{
assert(W->getBody());
// Save the current values for Block, Succ, and continue and break targets
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
- save_continue(ContinueTargetBlock),
- save_break(BreakTargetBlock);
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
// Create an empty block to represent the transition block for looping back
// to the head of the loop.
@@ -1338,31 +1879,39 @@ CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) {
assert(Succ == EntryConditionBlock);
Succ = createBlock();
Succ->setLoopTarget(W);
- ContinueTargetBlock = Succ;
+ ContinueJumpTarget = JumpTarget(Succ, LoopBeginScopePos);
// All breaks should go to the code following the loop.
- BreakTargetBlock = LoopSuccessor;
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
// NULL out Block to force lazy instantiation of blocks for the body.
Block = NULL;
+ // Loop body should end with destructor of Condition variable (if any).
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(W->getBody()))
+ addLocalScopeAndDtors(W->getBody());
+
// Create the body. The returned block is the entry to the loop body.
CFGBlock* BodyBlock = addStmt(W->getBody());
if (!BodyBlock)
- BodyBlock = ContinueTargetBlock; // can happen for "while(...) ;"
+ BodyBlock = ContinueJumpTarget.block; // can happen for "while(...) ;"
else if (Block) {
- if (!FinishBlock(BodyBlock))
+ if (badCFG)
return 0;
}
// Add the loop body entry as a successor to the condition.
- AddSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
+ addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
}
// Link up the condition block with the code that follows the loop. (the
// false branch).
- AddSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
// There can be no more statements in the condition block since we loop back
// to this block. NULL out Block to force lazy creation of another block.
@@ -1385,14 +1934,14 @@ CFGBlock* CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt* S) {
// statement.
// If we were in the middle of a block we stop processing that block.
- if (Block && !FinishBlock(Block))
+ if (badCFG)
return 0;
// Create the new block.
Block = createBlock(false);
// The Exit block is the only successor.
- AddSuccessor(Block, &cfg->getExit());
+ addSuccessor(Block, &cfg->getExit());
// Add the statement to the block. This may create new blocks if S contains
// control-flow (short-circuit operations).
@@ -1401,7 +1950,7 @@ CFGBlock* CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt* S) {
CFGBlock* CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr* T) {
// If we were in the middle of a block we stop processing that block.
- if (Block && !FinishBlock(Block))
+ if (badCFG)
return 0;
// Create the new block.
@@ -1409,10 +1958,10 @@ CFGBlock* CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr* T) {
if (TryTerminatedBlock)
// The current try statement is the only successor.
- AddSuccessor(Block, TryTerminatedBlock);
+ addSuccessor(Block, TryTerminatedBlock);
else
// otherwise the Exit block is the only successor.
- AddSuccessor(Block, &cfg->getExit());
+ addSuccessor(Block, &cfg->getExit());
// Add the statement to the block. This may create new blocks if S contains
// control-flow (short-circuit operations).
@@ -1425,7 +1974,7 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) {
// "do...while" is a control-flow statement. Thus we stop processing the
// current block.
if (Block) {
- if (!FinishBlock(Block))
+ if (badCFG)
return 0;
LoopSuccessor = Block;
} else
@@ -1446,7 +1995,7 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) {
Block = ExitConditionBlock;
EntryConditionBlock = addStmt(C);
if (Block) {
- if (!FinishBlock(EntryConditionBlock))
+ if (badCFG)
return 0;
}
}
@@ -1455,7 +2004,7 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) {
Succ = EntryConditionBlock;
// See if this is a known constant.
- const TryResult &KnownVal = TryEvaluateBool(D->getCond());
+ const TryResult &KnownVal = tryEvaluateBool(D->getCond());
// Process the loop body.
CFGBlock* BodyBlock = NULL;
@@ -1463,26 +2012,31 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) {
assert(D->getBody());
// Save the current values for Block, Succ, and continue and break targets
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
- save_continue(ContinueTargetBlock),
- save_break(BreakTargetBlock);
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
// All continues within this loop should go to the condition block
- ContinueTargetBlock = EntryConditionBlock;
+ ContinueJumpTarget = JumpTarget(EntryConditionBlock, ScopePos);
// All breaks should go to the code following the loop.
- BreakTargetBlock = LoopSuccessor;
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
// NULL out Block to force lazy instantiation of blocks for the body.
Block = NULL;
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(D->getBody()))
+ addLocalScopeAndDtors(D->getBody());
+
// Create the body. The returned block is the entry to the loop body.
BodyBlock = addStmt(D->getBody());
if (!BodyBlock)
BodyBlock = EntryConditionBlock; // can happen for "do ; while(...)"
else if (Block) {
- if (!FinishBlock(BodyBlock))
+ if (badCFG)
return 0;
}
@@ -1498,15 +2052,15 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) {
LoopBackBlock->setLoopTarget(D);
// Add the loop body entry as a successor to the condition.
- AddSuccessor(ExitConditionBlock, LoopBackBlock);
+ addSuccessor(ExitConditionBlock, LoopBackBlock);
}
else
- AddSuccessor(ExitConditionBlock, NULL);
+ addSuccessor(ExitConditionBlock, NULL);
}
// Link up the condition block with the code that follows the loop.
// (the false branch).
- AddSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
// There can be no more statements in the body block(s) since we loop back to
// the body. NULL out Block to force lazy creation of another block.
@@ -1520,8 +2074,8 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt* D) {
CFGBlock* CFGBuilder::VisitContinueStmt(ContinueStmt* C) {
// "continue" is a control-flow statement. Thus we stop processing the
// current block.
- if (Block && !FinishBlock(Block))
- return 0;
+ if (badCFG)
+ return 0;
// Now create a new block that ends with the continue statement.
Block = createBlock(false);
@@ -1529,9 +2083,10 @@ CFGBlock* CFGBuilder::VisitContinueStmt(ContinueStmt* C) {
// If there is no target for the continue, then we are looking at an
// incomplete AST. This means the CFG cannot be constructed.
- if (ContinueTargetBlock)
- AddSuccessor(Block, ContinueTargetBlock);
- else
+ if (ContinueJumpTarget.block) {
+ addAutomaticObjDtors(ScopePos, ContinueJumpTarget.scopePosition, C);
+ addSuccessor(Block, ContinueJumpTarget.block);
+ } else
badCFG = true;
return Block;
@@ -1542,12 +2097,12 @@ CFGBlock *CFGBuilder::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E,
if (asc.alwaysAdd()) {
autoCreateBlock();
- AppendStmt(Block, E);
+ appendStmt(Block, E);
}
// VLA types have expressions that must be evaluated.
if (E->isArgumentType()) {
- for (VariableArrayType* VA = FindVA(E->getArgumentType().getTypePtr());
+ for (const VariableArrayType *VA =FindVA(E->getArgumentType().getTypePtr());
VA != 0; VA = FindVA(VA->getElementType().getTypePtr()))
addStmt(VA->getSizeExpr());
}
@@ -1560,7 +2115,7 @@ CFGBlock *CFGBuilder::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E,
CFGBlock* CFGBuilder::VisitStmtExpr(StmtExpr *SE, AddStmtChoice asc) {
if (asc.alwaysAdd()) {
autoCreateBlock();
- AppendStmt(Block, SE);
+ appendStmt(Block, SE);
}
return VisitCompoundStmt(SE->getSubStmt());
}
@@ -1570,16 +2125,28 @@ CFGBlock* CFGBuilder::VisitSwitchStmt(SwitchStmt* Terminator) {
// block.
CFGBlock* SwitchSuccessor = NULL;
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible condition variable.
+ // Store scope position. Add implicit destructor.
+ if (VarDecl* VD = Terminator->getConditionVariable()) {
+ LocalScope::const_iterator SwitchBeginScopePos = ScopePos;
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, SwitchBeginScopePos, Terminator);
+ }
+
if (Block) {
- if (!FinishBlock(Block))
+ if (badCFG)
return 0;
SwitchSuccessor = Block;
} else SwitchSuccessor = Succ;
// Save the current "switch" context.
SaveAndRestore<CFGBlock*> save_switch(SwitchTerminatedBlock),
- save_break(BreakTargetBlock),
save_default(DefaultCaseBlock);
+ SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
// Set the "default" case to be the block after the switch statement. If the
// switch statement contains a "default:", this value will be overwritten with
@@ -1592,22 +2159,28 @@ CFGBlock* CFGBuilder::VisitSwitchStmt(SwitchStmt* Terminator) {
// Now process the switch body. The code after the switch is the implicit
// successor.
Succ = SwitchSuccessor;
- BreakTargetBlock = SwitchSuccessor;
+ BreakJumpTarget = JumpTarget(SwitchSuccessor, ScopePos);
// When visiting the body, the case statements should automatically get linked
// up to the switch. We also don't keep a pointer to the body, since all
// control-flow from the switch goes to case/default statements.
assert(Terminator->getBody() && "switch must contain a non-NULL body");
Block = NULL;
- CFGBlock *BodyBlock = addStmt(Terminator->getBody());
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(Terminator->getBody()))
+ addLocalScopeAndDtors(Terminator->getBody());
+
+ addStmt(Terminator->getBody());
if (Block) {
- if (!FinishBlock(BodyBlock))
+ if (badCFG)
return 0;
}
// If we have no "default:" case, the default transition is to the code
// following the switch body.
- AddSuccessor(SwitchTerminatedBlock, DefaultCaseBlock);
+ addSuccessor(SwitchTerminatedBlock, DefaultCaseBlock);
// Add the terminator and condition in the switch block.
SwitchTerminatedBlock->setTerminator(Terminator);
@@ -1620,7 +2193,7 @@ CFGBlock* CFGBuilder::VisitSwitchStmt(SwitchStmt* Terminator) {
if (VarDecl *VD = Terminator->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
- AppendStmt(Block, Terminator, AddStmtChoice::AlwaysAdd);
+ appendStmt(Block, Terminator, AddStmtChoice::AlwaysAdd);
addStmt(Init);
}
}
@@ -1638,16 +2211,16 @@ CFGBlock* CFGBuilder::VisitCaseStmt(CaseStmt* CS) {
// (which can blow out the stack), manually unroll and create blocks
// along the way.
while (isa<CaseStmt>(Sub)) {
- CFGBlock *CurrentBlock = createBlock(false);
- CurrentBlock->setLabel(CS);
+ CFGBlock *currentBlock = createBlock(false);
+ currentBlock->setLabel(CS);
if (TopBlock)
- AddSuccessor(LastBlock, CurrentBlock);
+ addSuccessor(LastBlock, currentBlock);
else
- TopBlock = CurrentBlock;
+ TopBlock = currentBlock;
- AddSuccessor(SwitchTerminatedBlock, CurrentBlock);
- LastBlock = CurrentBlock;
+ addSuccessor(SwitchTerminatedBlock, currentBlock);
+ LastBlock = currentBlock;
CS = cast<CaseStmt>(Sub);
Sub = CS->getSubStmt();
@@ -1664,22 +2237,21 @@ CFGBlock* CFGBuilder::VisitCaseStmt(CaseStmt* CS) {
// were processing (the "case XXX:" is the label).
CaseBlock->setLabel(CS);
- if (!FinishBlock(CaseBlock))
+ if (badCFG)
return 0;
// Add this block to the list of successors for the block with the switch
// statement.
assert(SwitchTerminatedBlock);
- AddSuccessor(SwitchTerminatedBlock, CaseBlock);
+ addSuccessor(SwitchTerminatedBlock, CaseBlock);
// We set Block to NULL to allow lazy creation of a new block (if necessary)
Block = NULL;
if (TopBlock) {
- AddSuccessor(LastBlock, CaseBlock);
+ addSuccessor(LastBlock, CaseBlock);
Succ = TopBlock;
- }
- else {
+ } else {
// This block is now the implicit successor of other blocks.
Succ = CaseBlock;
}
@@ -1700,7 +2272,7 @@ CFGBlock* CFGBuilder::VisitDefaultStmt(DefaultStmt* Terminator) {
// we were processing (the "default:" is the label).
DefaultCaseBlock->setLabel(Terminator);
- if (!FinishBlock(DefaultCaseBlock))
+ if (badCFG)
return 0;
// Unlike case statements, we don't add the default block to the successors
@@ -1724,7 +2296,7 @@ CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) {
CFGBlock* TrySuccessor = NULL;
if (Block) {
- if (!FinishBlock(Block))
+ if (badCFG)
return 0;
TrySuccessor = Block;
} else TrySuccessor = Succ;
@@ -1750,13 +2322,13 @@ CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) {
return 0;
// Add this block to the list of successors for the block with the try
// statement.
- AddSuccessor(NewTryTerminatedBlock, CatchBlock);
+ addSuccessor(NewTryTerminatedBlock, CatchBlock);
}
if (!HasCatchAll) {
if (PrevTryTerminatedBlock)
- AddSuccessor(NewTryTerminatedBlock, PrevTryTerminatedBlock);
+ addSuccessor(NewTryTerminatedBlock, PrevTryTerminatedBlock);
else
- AddSuccessor(NewTryTerminatedBlock, &cfg->getExit());
+ addSuccessor(NewTryTerminatedBlock, &cfg->getExit());
}
// The code after the try is the implicit successor.
@@ -1776,6 +2348,18 @@ CFGBlock* CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt* CS) {
// CXXCatchStmt are treated like labels, so they are the first statement in a
// block.
+ // Save local scope position because in case of exception variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible exception variable.
+ // Store scope position. Add implicit destructor.
+ if (VarDecl* VD = CS->getExceptionDecl()) {
+ LocalScope::const_iterator BeginScopePos = ScopePos;
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, BeginScopePos, CS);
+ }
+
if (CS->getHandlerBlock())
addStmt(CS->getHandlerBlock());
@@ -1785,7 +2369,7 @@ CFGBlock* CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt* CS) {
CatchBlock->setLabel(CS);
- if (!FinishBlock(CatchBlock))
+ if (badCFG)
return 0;
// We set Block to NULL to allow lazy creation of a new block (if necessary)
@@ -1794,15 +2378,75 @@ CFGBlock* CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt* CS) {
return CatchBlock;
}
+CFGBlock *CFGBuilder::VisitExprWithCleanups(ExprWithCleanups *E,
+ AddStmtChoice asc) {
+ if (BuildOpts.AddImplicitDtors) {
+ // If adding implicit destructors visit the full expression for adding
+ // destructors of temporaries.
+ VisitForTemporaryDtors(E->getSubExpr());
+
+ // Full expression has to be added as CFGStmt so it will be sequenced
+ // before destructors of it's temporaries.
+ asc = asc.withAlwaysAdd(true);
+ }
+ return Visit(E->getSubExpr(), asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd()) {
+ autoCreateBlock();
+ appendStmt(Block, E, asc);
+
+ // We do not want to propagate the AlwaysAdd property.
+ asc = asc.withAlwaysAdd(false);
+ }
+ return Visit(E->getSubExpr(), asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXConstructExpr(CXXConstructExpr *C,
+ AddStmtChoice asc) {
+ autoCreateBlock();
+ if (!C->isElidable())
+ appendStmt(Block, C, asc.withAlwaysAdd(true));
+
+ return VisitChildren(C);
+}
+
+CFGBlock *CFGBuilder::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd()) {
+ autoCreateBlock();
+ appendStmt(Block, E, asc);
+ // We do not want to propagate the AlwaysAdd property.
+ asc = asc.withAlwaysAdd(false);
+ }
+ return Visit(E->getSubExpr(), asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
+ AddStmtChoice asc) {
+ autoCreateBlock();
+ appendStmt(Block, C, asc.withAlwaysAdd(true));
+ return VisitChildren(C);
+}
+
CFGBlock *CFGBuilder::VisitCXXMemberCallExpr(CXXMemberCallExpr *C,
AddStmtChoice asc) {
- AddStmtChoice::Kind K = asc.asLValue() ? AddStmtChoice::AlwaysAddAsLValue
- : AddStmtChoice::AlwaysAdd;
autoCreateBlock();
- AppendStmt(Block, C, AddStmtChoice(K));
+ appendStmt(Block, C, asc.withAlwaysAdd(true));
return VisitChildren(C);
}
+CFGBlock *CFGBuilder::VisitImplicitCastExpr(ImplicitCastExpr *E,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd()) {
+ autoCreateBlock();
+ appendStmt(Block, E, asc);
+ }
+ return Visit(E->getSubExpr(), AddStmtChoice());
+}
+
CFGBlock* CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt* I) {
// Lazily create the indirect-goto dispatch block if there isn't one already.
CFGBlock* IBlock = cfg->getIndirectGotoBlock();
@@ -1814,15 +2458,210 @@ CFGBlock* CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt* I) {
// IndirectGoto is a control-flow statement. Thus we stop processing the
// current block and create a new one.
- if (Block && !FinishBlock(Block))
+ if (badCFG)
return 0;
Block = createBlock(false);
Block->setTerminator(I);
- AddSuccessor(Block, IBlock);
+ addSuccessor(Block, IBlock);
return addStmt(I->getTarget());
}
+CFGBlock *CFGBuilder::VisitForTemporaryDtors(Stmt *E, bool BindToTemporary) {
+tryAgain:
+ if (!E) {
+ badCFG = true;
+ return NULL;
+ }
+ switch (E->getStmtClass()) {
+ default:
+ return VisitChildrenForTemporaryDtors(E);
+
+ case Stmt::BinaryOperatorClass:
+ return VisitBinaryOperatorForTemporaryDtors(cast<BinaryOperator>(E));
+
+ case Stmt::CXXBindTemporaryExprClass:
+ return VisitCXXBindTemporaryExprForTemporaryDtors(
+ cast<CXXBindTemporaryExpr>(E), BindToTemporary);
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ return VisitConditionalOperatorForTemporaryDtors(
+ cast<AbstractConditionalOperator>(E), BindToTemporary);
+
+ case Stmt::ImplicitCastExprClass:
+ // For implicit cast we want BindToTemporary to be passed further.
+ E = cast<CastExpr>(E)->getSubExpr();
+ goto tryAgain;
+
+ case Stmt::ParenExprClass:
+ E = cast<ParenExpr>(E)->getSubExpr();
+ goto tryAgain;
+ }
+}
+
+CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E) {
+ // When visiting children for destructors we want to visit them in reverse
+ // order. Because there's no reverse iterator for children must to reverse
+ // them in helper vector.
+ typedef llvm::SmallVector<Stmt *, 4> ChildrenVect;
+ ChildrenVect ChildrenRev;
+ for (Stmt::child_range I = E->children(); I; ++I) {
+ if (*I) ChildrenRev.push_back(*I);
+ }
+
+ CFGBlock *B = Block;
+ for (ChildrenVect::reverse_iterator I = ChildrenRev.rbegin(),
+ L = ChildrenRev.rend(); I != L; ++I) {
+ if (CFGBlock *R = VisitForTemporaryDtors(*I))
+ B = R;
+ }
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E) {
+ if (E->isLogicalOp()) {
+ // Destructors for temporaries in LHS expression should be called after
+ // those for RHS expression. Even if this will unnecessarily create a block,
+ // this block will be used at least by the full expression.
+ autoCreateBlock();
+ CFGBlock *ConfluenceBlock = VisitForTemporaryDtors(E->getLHS());
+ if (badCFG)
+ return NULL;
+
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
+
+ if (RHSBlock) {
+ if (badCFG)
+ return NULL;
+
+ // If RHS expression did produce destructors we need to connect created
+ // blocks to CFG in same manner as for binary operator itself.
+ CFGBlock *LHSBlock = createBlock(false);
+ LHSBlock->setTerminator(CFGTerminator(E, true));
+
+ // For binary operator LHS block is before RHS in list of predecessors
+ // of ConfluenceBlock.
+ std::reverse(ConfluenceBlock->pred_begin(),
+ ConfluenceBlock->pred_end());
+
+ // See if this is a known constant.
+ TryResult KnownVal = tryEvaluateBool(E->getLHS());
+ if (KnownVal.isKnown() && (E->getOpcode() == BO_LOr))
+ KnownVal.negate();
+
+ // Link LHSBlock with RHSBlock exactly the same way as for binary operator
+ // itself.
+ if (E->getOpcode() == BO_LOr) {
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ } else {
+ assert (E->getOpcode() == BO_LAnd);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ }
+
+ Block = LHSBlock;
+ return LHSBlock;
+ }
+
+ Block = ConfluenceBlock;
+ return ConfluenceBlock;
+ }
+
+ if (E->isAssignmentOp()) {
+ // For assignment operator (=) LHS expression is visited
+ // before RHS expression. For destructors visit them in reverse order.
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
+ CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS());
+ return LHSBlock ? LHSBlock : RHSBlock;
+ }
+
+ // For any other binary operator RHS expression is visited before
+ // LHS expression (order of children). For destructors visit them in reverse
+ // order.
+ CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS());
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
+ return RHSBlock ? RHSBlock : LHSBlock;
+}
+
+CFGBlock *CFGBuilder::VisitCXXBindTemporaryExprForTemporaryDtors(
+ CXXBindTemporaryExpr *E, bool BindToTemporary) {
+ // First add destructors for temporaries in subexpression.
+ CFGBlock *B = VisitForTemporaryDtors(E->getSubExpr());
+ if (!BindToTemporary) {
+ // If lifetime of temporary is not prolonged (by assigning to constant
+ // reference) add destructor for it.
+ autoCreateBlock();
+ appendTemporaryDtor(Block, E);
+ B = Block;
+ }
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitConditionalOperatorForTemporaryDtors(
+ AbstractConditionalOperator *E, bool BindToTemporary) {
+ // First add destructors for condition expression. Even if this will
+ // unnecessarily create a block, this block will be used at least by the full
+ // expression.
+ autoCreateBlock();
+ CFGBlock *ConfluenceBlock = VisitForTemporaryDtors(E->getCond());
+ if (badCFG)
+ return NULL;
+ if (BinaryConditionalOperator *BCO
+ = dyn_cast<BinaryConditionalOperator>(E)) {
+ ConfluenceBlock = VisitForTemporaryDtors(BCO->getCommon());
+ if (badCFG)
+ return NULL;
+ }
+
+ // Try to add block with destructors for LHS expression.
+ CFGBlock *LHSBlock = NULL;
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ LHSBlock = VisitForTemporaryDtors(E->getTrueExpr(), BindToTemporary);
+ if (badCFG)
+ return NULL;
+
+ // Try to add block with destructors for RHS expression;
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getFalseExpr(),
+ BindToTemporary);
+ if (badCFG)
+ return NULL;
+
+ if (!RHSBlock && !LHSBlock) {
+ // If neither LHS nor RHS expression had temporaries to destroy don't create
+ // more blocks.
+ Block = ConfluenceBlock;
+ return Block;
+ }
+
+ Block = createBlock(false);
+ Block->setTerminator(CFGTerminator(E, true));
+
+ // See if this is a known constant.
+ const TryResult &KnownVal = tryEvaluateBool(E->getCond());
+
+ if (LHSBlock) {
+ addSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
+ } else if (KnownVal.isFalse()) {
+ addSuccessor(Block, NULL);
+ } else {
+ addSuccessor(Block, ConfluenceBlock);
+ std::reverse(ConfluenceBlock->pred_begin(), ConfluenceBlock->pred_end());
+ }
+
+ if (!RHSBlock)
+ RHSBlock = ConfluenceBlock;
+ addSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+
+ return Block;
+}
+
} // end anonymous namespace
/// createBlock - Constructs and adds a new CFGBlock to the CFG. The block has
@@ -1847,11 +2686,9 @@ CFGBlock* CFG::createBlock() {
/// buildCFG - Constructs a CFG from an AST. Ownership of the returned
/// CFG is returned to the caller.
CFG* CFG::buildCFG(const Decl *D, Stmt* Statement, ASTContext *C,
- bool PruneTriviallyFalse,
- bool AddEHEdges, bool AddScopes) {
+ BuildOptions BO) {
CFGBuilder Builder;
- return Builder.buildCFG(D, Statement, C, PruneTriviallyFalse,
- AddEHEdges, AddScopes);
+ return Builder.buildCFG(D, Statement, C, BO);
}
//===----------------------------------------------------------------------===//
@@ -1867,7 +2704,7 @@ static void FindSubExprAssignments(Stmt *S,
if (!S)
return;
- for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I!=E; ++I) {
+ for (Stmt::child_range I = S->children(); I; ++I) {
Stmt *child = *I;
if (!child)
continue;
@@ -1890,15 +2727,19 @@ static BlkExprMapTy* PopulateBlkExprMap(CFG& cfg) {
for (CFG::iterator I=cfg.begin(), E=cfg.end(); I != E; ++I)
for (CFGBlock::iterator BI=(*I)->begin(), EI=(*I)->end(); BI != EI; ++BI)
- FindSubExprAssignments(*BI, SubExprAssignments);
+ if (CFGStmt S = BI->getAs<CFGStmt>())
+ FindSubExprAssignments(S, SubExprAssignments);
for (CFG::iterator I=cfg.begin(), E=cfg.end(); I != E; ++I) {
// Iterate over the statements again on identify the Expr* and Stmt* at the
// block-level that are block-level expressions.
- for (CFGBlock::iterator BI=(*I)->begin(), EI=(*I)->end(); BI != EI; ++BI)
- if (Expr* Exp = dyn_cast<Expr>(*BI)) {
+ for (CFGBlock::iterator BI=(*I)->begin(), EI=(*I)->end(); BI != EI; ++BI) {
+ CFGStmt CS = BI->getAs<CFGStmt>();
+ if (!CS.isValid())
+ continue;
+ if (Expr* Exp = dyn_cast<Expr>(CS.getStmt())) {
if (BinaryOperator* B = dyn_cast<BinaryOperator>(Exp)) {
// Assignment expressions that are not nested within another
@@ -1919,6 +2760,7 @@ static BlkExprMapTy* PopulateBlkExprMap(CFG& cfg) {
unsigned x = M->size();
(*M)[Exp] = x;
}
+ }
// Look at terminators. The condition is a block-level expression.
@@ -1945,12 +2787,34 @@ CFG::BlkExprNumTy CFG::getBlkExprNum(const Stmt* S) {
unsigned CFG::getNumBlkExprs() {
if (const BlkExprMapTy* M = reinterpret_cast<const BlkExprMapTy*>(BlkExprMap))
return M->size();
- else {
- // We assume callers interested in the number of BlkExprs will want
- // the map constructed if it doesn't already exist.
- BlkExprMap = (void*) PopulateBlkExprMap(*this);
- return reinterpret_cast<BlkExprMapTy*>(BlkExprMap)->size();
+
+ // We assume callers interested in the number of BlkExprs will want
+ // the map constructed if it doesn't already exist.
+ BlkExprMap = (void*) PopulateBlkExprMap(*this);
+ return reinterpret_cast<BlkExprMapTy*>(BlkExprMap)->size();
+}
+
+//===----------------------------------------------------------------------===//
+// Filtered walking of the CFG.
+//===----------------------------------------------------------------------===//
+
+bool CFGBlock::FilterEdge(const CFGBlock::FilterOptions &F,
+ const CFGBlock *From, const CFGBlock *To) {
+
+ if (F.IgnoreDefaultsWithCoveredEnums) {
+ // If the 'To' has no label or is labeled but the label isn't a
+ // CaseStmt then filter this edge.
+ if (const SwitchStmt *S =
+ dyn_cast_or_null<SwitchStmt>(From->getTerminator().getStmt())) {
+ if (S->isAllEnumCasesCovered()) {
+ const Stmt *L = To->getLabel();
+ if (!L || !isa<CaseStmt>(L))
+ return true;
+ }
+ }
}
+
+ return false;
}
//===----------------------------------------------------------------------===//
@@ -1969,37 +2833,81 @@ namespace {
class StmtPrinterHelper : public PrinterHelper {
typedef llvm::DenseMap<Stmt*,std::pair<unsigned,unsigned> > StmtMapTy;
+ typedef llvm::DenseMap<Decl*,std::pair<unsigned,unsigned> > DeclMapTy;
StmtMapTy StmtMap;
- signed CurrentBlock;
- unsigned CurrentStmt;
+ DeclMapTy DeclMap;
+ signed currentBlock;
+ unsigned currentStmt;
const LangOptions &LangOpts;
public:
StmtPrinterHelper(const CFG* cfg, const LangOptions &LO)
- : CurrentBlock(0), CurrentStmt(0), LangOpts(LO) {
+ : currentBlock(0), currentStmt(0), LangOpts(LO) {
for (CFG::const_iterator I = cfg->begin(), E = cfg->end(); I != E; ++I ) {
unsigned j = 1;
for (CFGBlock::const_iterator BI = (*I)->begin(), BEnd = (*I)->end() ;
- BI != BEnd; ++BI, ++j )
- StmtMap[*BI] = std::make_pair((*I)->getBlockID(),j);
+ BI != BEnd; ++BI, ++j ) {
+ if (CFGStmt SE = BI->getAs<CFGStmt>()) {
+ std::pair<unsigned, unsigned> P((*I)->getBlockID(), j);
+ StmtMap[SE] = P;
+
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(SE.getStmt())) {
+ DeclMap[DS->getSingleDecl()] = P;
+
+ } else if (IfStmt* IS = dyn_cast<IfStmt>(SE.getStmt())) {
+ if (VarDecl* VD = IS->getConditionVariable())
+ DeclMap[VD] = P;
+
+ } else if (ForStmt* FS = dyn_cast<ForStmt>(SE.getStmt())) {
+ if (VarDecl* VD = FS->getConditionVariable())
+ DeclMap[VD] = P;
+
+ } else if (WhileStmt* WS = dyn_cast<WhileStmt>(SE.getStmt())) {
+ if (VarDecl* VD = WS->getConditionVariable())
+ DeclMap[VD] = P;
+
+ } else if (SwitchStmt* SS = dyn_cast<SwitchStmt>(SE.getStmt())) {
+ if (VarDecl* VD = SS->getConditionVariable())
+ DeclMap[VD] = P;
+
+ } else if (CXXCatchStmt* CS = dyn_cast<CXXCatchStmt>(SE.getStmt())) {
+ if (VarDecl* VD = CS->getExceptionDecl())
+ DeclMap[VD] = P;
+ }
+ }
}
+ }
}
virtual ~StmtPrinterHelper() {}
const LangOptions &getLangOpts() const { return LangOpts; }
- void setBlockID(signed i) { CurrentBlock = i; }
- void setStmtID(unsigned i) { CurrentStmt = i; }
-
- virtual bool handledStmt(Stmt* Terminator, llvm::raw_ostream& OS) {
+ void setBlockID(signed i) { currentBlock = i; }
+ void setStmtID(unsigned i) { currentStmt = i; }
- StmtMapTy::iterator I = StmtMap.find(Terminator);
+ virtual bool handledStmt(Stmt* S, llvm::raw_ostream& OS) {
+ StmtMapTy::iterator I = StmtMap.find(S);
if (I == StmtMap.end())
return false;
- if (CurrentBlock >= 0 && I->second.first == (unsigned) CurrentBlock
- && I->second.second == CurrentStmt) {
+ if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
+ && I->second.second == currentStmt) {
+ return false;
+ }
+
+ OS << "[B" << I->second.first << "." << I->second.second << "]";
+ return true;
+ }
+
+ bool handleDecl(Decl* D, llvm::raw_ostream& OS) {
+ DeclMapTy::iterator I = DeclMap.find(D);
+
+ if (I == DeclMap.end())
+ return false;
+
+ if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
+ && I->second.second == currentStmt) {
return false;
}
@@ -2066,7 +2974,7 @@ public:
OS << "try ...";
}
- void VisitConditionalOperator(ConditionalOperator* C) {
+ void VisitAbstractConditionalOperator(AbstractConditionalOperator* C) {
C->getCond()->printPretty(OS, Helper, Policy);
OS << " ? ... : ...";
}
@@ -2108,58 +3016,95 @@ public:
};
} // end anonymous namespace
-
-static void print_stmt(llvm::raw_ostream &OS, StmtPrinterHelper* Helper,
+static void print_elem(llvm::raw_ostream &OS, StmtPrinterHelper* Helper,
const CFGElement &E) {
-
- if (E.asStartScope()) {
- OS << "start scope\n";
- return;
- }
- if (E.asEndScope()) {
- OS << "end scope\n";
- return;
- }
-
- Stmt *S = E;
-
- if (Helper) {
- // special printing for statement-expressions.
- if (StmtExpr* SE = dyn_cast<StmtExpr>(S)) {
- CompoundStmt* Sub = SE->getSubStmt();
-
- if (Sub->child_begin() != Sub->child_end()) {
- OS << "({ ... ; ";
- Helper->handledStmt(*SE->getSubStmt()->body_rbegin(),OS);
- OS << " })\n";
- return;
+ if (CFGStmt CS = E.getAs<CFGStmt>()) {
+ Stmt *S = CS;
+
+ if (Helper) {
+
+ // special printing for statement-expressions.
+ if (StmtExpr* SE = dyn_cast<StmtExpr>(S)) {
+ CompoundStmt* Sub = SE->getSubStmt();
+
+ if (Sub->children()) {
+ OS << "({ ... ; ";
+ Helper->handledStmt(*SE->getSubStmt()->body_rbegin(),OS);
+ OS << " })\n";
+ return;
+ }
+ }
+ // special printing for comma expressions.
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+ if (B->getOpcode() == BO_Comma) {
+ OS << "... , ";
+ Helper->handledStmt(B->getRHS(),OS);
+ OS << '\n';
+ return;
+ }
}
}
+ S->printPretty(OS, Helper, PrintingPolicy(Helper->getLangOpts()));
- // special printing for comma expressions.
- if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
- if (B->getOpcode() == BO_Comma) {
- OS << "... , ";
- Helper->handledStmt(B->getRHS(),OS);
- OS << '\n';
- return;
- }
+ if (isa<CXXOperatorCallExpr>(S)) {
+ OS << " (OperatorCall)";
+ } else if (isa<CXXBindTemporaryExpr>(S)) {
+ OS << " (BindTemporary)";
}
- }
- S->printPretty(OS, Helper, PrintingPolicy(Helper->getLangOpts()));
-
- if (isa<CXXOperatorCallExpr>(S)) {
- OS << " (OperatorCall)";
- }
- else if (isa<CXXBindTemporaryExpr>(S)) {
- OS << " (BindTemporary)";
- }
+ // Expressions need a newline.
+ if (isa<Expr>(S))
+ OS << '\n';
+ } else if (CFGInitializer IE = E.getAs<CFGInitializer>()) {
+ CXXCtorInitializer* I = IE;
+ if (I->isBaseInitializer())
+ OS << I->getBaseClass()->getAsCXXRecordDecl()->getName();
+ else OS << I->getAnyMember()->getName();
- // Expressions need a newline.
- if (isa<Expr>(S))
- OS << '\n';
+ OS << "(";
+ if (Expr* IE = I->getInit())
+ IE->printPretty(OS, Helper, PrintingPolicy(Helper->getLangOpts()));
+ OS << ")";
+
+ if (I->isBaseInitializer())
+ OS << " (Base initializer)\n";
+ else OS << " (Member initializer)\n";
+
+ } else if (CFGAutomaticObjDtor DE = E.getAs<CFGAutomaticObjDtor>()){
+ VarDecl* VD = DE.getVarDecl();
+ Helper->handleDecl(VD, OS);
+
+ const Type* T = VD->getType().getTypePtr();
+ if (const ReferenceType* RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType().getTypePtr();
+ else if (const Type *ET = T->getArrayElementTypeNoTypeQual())
+ T = ET;
+
+ OS << ".~" << T->getAsCXXRecordDecl()->getName().str() << "()";
+ OS << " (Implicit destructor)\n";
+
+ } else if (CFGBaseDtor BE = E.getAs<CFGBaseDtor>()) {
+ const CXXBaseSpecifier *BS = BE.getBaseSpecifier();
+ OS << "~" << BS->getType()->getAsCXXRecordDecl()->getName() << "()";
+ OS << " (Base object destructor)\n";
+
+ } else if (CFGMemberDtor ME = E.getAs<CFGMemberDtor>()) {
+ FieldDecl *FD = ME.getFieldDecl();
+
+ const Type *T = FD->getType().getTypePtr();
+ if (const Type *ET = T->getArrayElementTypeNoTypeQual())
+ T = ET;
+
+ OS << "this->" << FD->getName();
+ OS << ".~" << T->getAsCXXRecordDecl()->getName() << "()";
+ OS << " (Member object destructor)\n";
+
+ } else if (CFGTemporaryDtor TE = E.getAs<CFGTemporaryDtor>()) {
+ CXXBindTemporaryExpr *BT = TE.getBindTemporaryExpr();
+ OS << "~" << BT->getType()->getAsCXXRecordDecl()->getName() << "()";
+ OS << " (Temporary object destructor)\n";
+ }
}
static void print_block(llvm::raw_ostream& OS, const CFG* cfg,
@@ -2229,7 +3174,7 @@ static void print_block(llvm::raw_ostream& OS, const CFG* cfg,
if (Helper)
Helper->setStmtID(j);
- print_stmt(OS,Helper,*I);
+ print_elem(OS,Helper,*I);
}
// Print the terminator of this block.
@@ -2243,7 +3188,7 @@ static void print_block(llvm::raw_ostream& OS, const CFG* cfg,
CFGBlockTerminatorPrint TPrinter(OS, Helper,
PrintingPolicy(Helper->getLangOpts()));
- TPrinter.Visit(const_cast<Stmt*>(B.getTerminator()));
+ TPrinter.Visit(const_cast<Stmt*>(B.getTerminator().getStmt()));
OS << '\n';
}
@@ -2325,11 +3270,11 @@ void CFGBlock::print(llvm::raw_ostream& OS, const CFG* cfg,
void CFGBlock::printTerminator(llvm::raw_ostream &OS,
const LangOptions &LO) const {
CFGBlockTerminatorPrint TPrinter(OS, NULL, PrintingPolicy(LO));
- TPrinter.Visit(const_cast<Stmt*>(getTerminator()));
+ TPrinter.Visit(const_cast<Stmt*>(getTerminator().getStmt()));
}
Stmt* CFGBlock::getTerminatorCondition() {
-
+ Stmt *Terminator = this->Terminator;
if (!Terminator)
return NULL;
@@ -2367,6 +3312,10 @@ Stmt* CFGBlock::getTerminatorCondition() {
E = cast<SwitchStmt>(Terminator)->getCond();
break;
+ case Stmt::BinaryConditionalOperatorClass:
+ E = cast<BinaryConditionalOperator>(Terminator)->getCond();
+ break;
+
case Stmt::ConditionalOperatorClass:
E = cast<ConditionalOperator>(Terminator)->getCond();
break;
@@ -2383,7 +3332,7 @@ Stmt* CFGBlock::getTerminatorCondition() {
}
bool CFGBlock::hasBinaryBranchTerminator() const {
-
+ const Stmt *Terminator = this->Terminator;
if (!Terminator)
return false;
@@ -2398,6 +3347,7 @@ bool CFGBlock::hasBinaryBranchTerminator() const {
case Stmt::DoStmtClass:
case Stmt::IfStmtClass:
case Stmt::ChooseExprClass:
+ case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass:
case Stmt::BinaryOperatorClass:
return true;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp
index 965eca1..3a030f9 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp
@@ -50,15 +50,18 @@ static void Accumulate(SMap &SM, CFGBlock *B) {
// First walk the block-level expressions.
for (CFGBlock::iterator I = B->begin(), E = B->end(); I != E; ++I) {
const CFGElement &CE = *I;
- if (Stmt *S = CE.getStmt()) {
- CFGBlock *&Entry = SM[S];
- // If 'Entry' is already initialized (e.g., a terminator was already),
- // skip.
- if (Entry)
- continue;
+ CFGStmt CS = CE.getAs<CFGStmt>();
+ if (!CS.isValid())
+ continue;
+
+ CFGBlock *&Entry = SM[CS];
+ // If 'Entry' is already initialized (e.g., a terminator was already),
+ // skip.
+ if (Entry)
+ continue;
- Entry = B;
- }
+ Entry = B;
+
}
// Look at the label of the block.
diff --git a/contrib/llvm/tools/clang/lib/Checker/CocoaConventions.cpp b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
index b446a04..22b6c1a 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CocoaConventions.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
@@ -11,13 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/DomainSpecific/CocoaConventions.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "clang/AST/Type.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
+using namespace ento;
using llvm::StringRef;
@@ -53,7 +54,8 @@ static const char* parseWord(const char* s) {
return s;
}
-cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S) {
+cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S,
+ bool ignorePrefix) {
IdentifierInfo *II = S.getIdentifierInfoForSlot(0);
if (!II)
@@ -61,80 +63,62 @@ cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S) {
const char *s = II->getNameStart();
+ const char *orig = s;
// A method/function name may contain a prefix. We don't know it is there,
// however, until we encounter the first '_'.
- bool InPossiblePrefix = true;
- bool AtBeginning = true;
- NamingConvention C = NoConvention;
-
while (*s != '\0') {
- // Skip '_'.
- if (*s == '_') {
- if (InPossiblePrefix) {
- // If we already have a convention, return it. Otherwise, skip
- // the prefix as if it wasn't there.
- if (C != NoConvention)
- break;
-
- InPossiblePrefix = false;
- AtBeginning = true;
- assert(C == NoConvention);
- }
+ // Skip '_', numbers, ':', etc.
+ if (*s == '_' || !isalpha(*s)) {
++s;
continue;
}
+ break;
+ }
- // Skip numbers, ':', etc.
- if (!isalpha(*s)) {
- ++s;
- continue;
- }
+ if (!ignorePrefix && s != orig)
+ return NoConvention;
- const char *wordEnd = parseWord(s);
- assert(wordEnd > s);
- unsigned len = wordEnd - s;
+ // Parse the first word, and look for specific keywords.
+ const char *wordEnd = parseWord(s);
+ assert(wordEnd > s);
+ unsigned len = wordEnd - s;
- switch (len) {
+ switch (len) {
default:
- break;
+ return NoConvention;
case 3:
// Methods starting with 'new' follow the create rule.
- if (AtBeginning && StringRef(s, len).equals_lower("new"))
- C = CreateRule;
- break;
+ return (memcmp(s, "new", 3) == 0) ? CreateRule : NoConvention;
case 4:
- // Methods starting with 'alloc' or contain 'copy' follow the
- // create rule
- if (C == NoConvention && StringRef(s, len).equals_lower("copy"))
- C = CreateRule;
- else // Methods starting with 'init' follow the init rule.
- if (AtBeginning && StringRef(s, len).equals_lower("init"))
- C = InitRule;
- break;
+ // Methods starting with 'copy' follow the create rule.
+ if (memcmp(s, "copy", 4) == 0)
+ return CreateRule;
+ // Methods starting with 'init' follow the init rule.
+ if (memcmp(s, "init", 4) == 0)
+ return InitRule;
+ return NoConvention;
case 5:
- if (AtBeginning && StringRef(s, len).equals_lower("alloc"))
- C = CreateRule;
- break;
- }
-
- // If we aren't in the prefix and have a derived convention then just
- // return it now.
- if (!InPossiblePrefix && C != NoConvention)
- return C;
-
- AtBeginning = false;
- s = wordEnd;
+ return (memcmp(s, "alloc", 5) == 0) ? CreateRule : NoConvention;
+ case 7:
+ // Methods starting with 'mutableCopy' follow the create rule.
+ if (memcmp(s, "mutable", 7) == 0) {
+ // Look at the next word to see if it is "Copy".
+ s = wordEnd;
+ if (*s != '\0') {
+ wordEnd = parseWord(s);
+ len = wordEnd - s;
+ if (len == 4 && memcmp(s, "Copy", 4) == 0)
+ return CreateRule;
+ }
+ }
+ return NoConvention;
}
-
- // We will get here if there wasn't more than one word
- // after the prefix.
- return C;
}
bool cocoa::isRefType(QualType RetTy, llvm::StringRef Prefix,
llvm::StringRef Name) {
// Recursively walk the typedef stack, allowing typedefs of reference types.
- while (TypedefType* TD = dyn_cast<TypedefType>(RetTy.getTypePtr())) {
+ while (const TypedefType *TD = dyn_cast<TypedefType>(RetTy.getTypePtr())) {
llvm::StringRef TDName = TD->getDecl()->getIdentifier()->getName();
if (TDName.startswith(Prefix) && TDName.endswith("Ref"))
return true;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
index e57258e..a6d6108 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
@@ -296,8 +296,8 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
}
case CPointerTy:
- return argTy->getAs<PointerType>() != NULL ||
- argTy->getAs<ObjCObjectPointerType>() != NULL;
+ return argTy->isPointerType() || argTy->isObjCObjectPointerType() ||
+ argTy->isNullPtrType();
case ObjCPointerTy:
return argTy->getAs<ObjCObjectPointerType>() != NULL;
@@ -423,7 +423,7 @@ bool FormatSpecifier::hasValidLengthModifier() const {
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::nArg:
- case ConversionSpecifier::rArg:
+ case ConversionSpecifier::rArg:
return true;
default:
return false;
@@ -449,7 +449,7 @@ bool FormatSpecifier::hasValidLengthModifier() const {
case ConversionSpecifier::nArg:
case ConversionSpecifier::cArg:
case ConversionSpecifier::sArg:
- case ConversionSpecifier::rArg:
+ case ConversionSpecifier::rArg:
return true;
default:
return false;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
index 47b2e3d..303dc0f 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
@@ -104,8 +104,9 @@ namespace {
class TransferFuncs : public CFGRecStmtVisitor<TransferFuncs>{
LiveVariables::AnalysisDataTy& AD;
LiveVariables::ValTy LiveState;
+ const CFGBlock *currentBlock;
public:
- TransferFuncs(LiveVariables::AnalysisDataTy& ad) : AD(ad) {}
+ TransferFuncs(LiveVariables::AnalysisDataTy& ad) : AD(ad), currentBlock(0) {}
LiveVariables::ValTy& getVal() { return LiveState; }
CFG& getCFG() { return AD.getCFG(); }
@@ -128,7 +129,10 @@ public:
void SetTopValue(LiveVariables::ValTy& V) {
V = AD.AlwaysLive;
}
-
+
+ void setCurrentBlock(const CFGBlock *block) {
+ currentBlock = block;
+ }
};
void TransferFuncs::Visit(Stmt *S) {
@@ -136,7 +140,7 @@ void TransferFuncs::Visit(Stmt *S) {
if (S == getCurrentBlkStmt()) {
if (AD.Observer)
- AD.Observer->ObserveStmt(S,AD,LiveState);
+ AD.Observer->ObserveStmt(S, currentBlock, AD, LiveState);
if (getCFG().isBlkExpr(S))
LiveState(S, AD) = Dead;
@@ -146,7 +150,7 @@ void TransferFuncs::Visit(Stmt *S) {
else if (!getCFG().isBlkExpr(S)) {
if (AD.Observer)
- AD.Observer->ObserveStmt(S,AD,LiveState);
+ AD.Observer->ObserveStmt(S, currentBlock, AD, LiveState);
StmtVisitor<TransferFuncs,void>::Visit(S);
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
index 1e9601a..ef5c0fb 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
@@ -101,6 +101,10 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
for ( ; I != E; ++I) {
switch (*I) {
default: hasMore = false; break;
+ case '\'':
+ // FIXME: POSIX specific. Always accept?
+ FS.setHasThousandsGrouping(I);
+ break;
case '-': FS.setIsLeftJustified(I); break;
case '+': FS.setHasPlusPrefix(I); break;
case ' ': FS.setHasSpacePrefix(I); break;
@@ -186,7 +190,7 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case 's': k = ConversionSpecifier::sArg; break;
case 'u': k = ConversionSpecifier::uArg; break;
case 'x': k = ConversionSpecifier::xArg; break;
- // Mac OS X (unicode) specific
+ // POSIX specific.
case 'C': k = ConversionSpecifier::CArg; break;
case 'S': k = ConversionSpecifier::SArg; break;
// Objective-C.
@@ -209,7 +213,7 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (k == ConversionSpecifier::InvalidSpecifier) {
// Assume the conversion takes one argument.
- return !H.HandleInvalidPrintfConversionSpecifier(FS, Beg, I - Beg);
+ return !H.HandleInvalidPrintfConversionSpecifier(FS, Start, I - Start);
}
return PrintfSpecifierResult(Start, FS);
}
@@ -293,7 +297,7 @@ const char *ConversionSpecifier::toString() const {
ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx) const {
const PrintfConversionSpecifier &CS = getConversionSpecifier();
-
+
if (!CS.consumesDataArgument())
return ArgTypeResult::Invalid();
@@ -304,7 +308,7 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx) const {
default:
return ArgTypeResult::Invalid();
}
-
+
if (CS.isIntArg())
switch (LM.getKind()) {
case LengthModifier::AsLongDouble:
@@ -398,7 +402,20 @@ bool PrintfSpecifier::fixType(QualType QT) {
LM.setKind(LengthModifier::None);
break;
- case BuiltinType::WChar:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ LM.setKind(LengthModifier::AsChar);
+ break;
+
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ LM.setKind(LengthModifier::AsShort);
+ break;
+
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
case BuiltinType::Long:
case BuiltinType::ULong:
LM.setKind(LengthModifier::AsLong);
@@ -415,8 +432,10 @@ bool PrintfSpecifier::fixType(QualType QT) {
}
// Set conversion specifier and disable any flags which do not apply to it.
- if (QT->isAnyCharacterType()) {
+ // Let typedefs to char fall through to int, as %c is silly for uint8_t.
+ if (isa<TypedefType>(QT) && QT->isAnyCharacterType()) {
CS.setKind(ConversionSpecifier::cArg);
+ LM.setKind(LengthModifier::None);
Precision.setHowSpecified(OptionalAmount::NotSpecified);
HasAlternativeForm = 0;
HasLeadingZeroes = 0;
@@ -451,7 +470,7 @@ bool PrintfSpecifier::fixType(QualType QT) {
void PrintfSpecifier::toString(llvm::raw_ostream &os) const {
// Whilst some features have no defined order, we are using the order
- // appearing in the C99 standard (ISO/IEC 9899:1999 (E) ¤7.19.6.1)
+ // appearing in the C99 standard (ISO/IEC 9899:1999 (E) 7.19.6.1)
os << "%";
// Positional args
@@ -504,10 +523,11 @@ bool PrintfSpecifier::hasValidAlternativeForm() const {
if (!HasAlternativeForm)
return true;
- // Alternate form flag only valid with the oxaAeEfFgG conversions
+ // Alternate form flag only valid with the oxXaAeEfFgG conversions
switch (CS.getKind()) {
case ConversionSpecifier::oArg:
case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::eArg:
@@ -588,6 +608,24 @@ bool PrintfSpecifier::hasValidLeftJustified() const {
}
}
+bool PrintfSpecifier::hasValidThousandsGroupingPrefix() const {
+ if (!HasThousandsGrouping)
+ return true;
+
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool PrintfSpecifier::hasValidPrecision() const {
if (Precision.getHowSpecified() == OptionalAmount::NotSpecified)
return true;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
index ff43fc2..ff96eb4 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
@@ -86,6 +86,9 @@ void PseudoConstantAnalysis::RunAnalysis() {
const Stmt* Head = WorkList.front();
WorkList.pop_front();
+ if (const Expr *Ex = dyn_cast<Expr>(Head))
+ Head = Ex->IgnoreParenCasts();
+
switch (Head->getStmtClass()) {
// Case 1: Assignment operators modifying VarDecls
case Stmt::BinaryOperatorClass: {
@@ -225,13 +228,12 @@ void PseudoConstantAnalysis::RunAnalysis() {
continue;
}
- default:
- break;
+ default:
+ break;
} // switch (head->getStmtClass())
// Add all substatements to the worklist
- for (Stmt::const_child_iterator I = Head->child_begin(),
- E = Head->child_end(); I != E; ++I)
+ for (Stmt::const_child_range I = Head->children(); I; ++I)
if (*I)
WorkList.push_back(*I);
} // while (!WorkList.empty())
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
index 0543939..7afa586 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
@@ -30,20 +30,26 @@ static SourceLocation GetUnreachableLoc(const CFGBlock &b, SourceRange &R1,
unsigned sn = 0;
R1 = R2 = SourceRange();
-top:
- if (sn < b.size())
- S = b[sn].getStmt();
- else if (b.getTerminator())
+ if (sn < b.size()) {
+ CFGStmt CS = b[sn].getAs<CFGStmt>();
+ if (!CS)
+ return SourceLocation();
+
+ S = CS.getStmt();
+ } else if (b.getTerminator())
S = b.getTerminator();
else
return SourceLocation();
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreParenImpCasts();
+
switch (S->getStmtClass()) {
case Expr::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(S);
if (BO->getOpcode() == BO_Comma) {
if (sn+1 < b.size())
- return b[sn+1].getStmt()->getLocStart();
+ return b[sn+1].getAs<CFGStmt>().getStmt()->getLocStart();
const CFGBlock *n = &b;
while (1) {
if (n->getTerminator())
@@ -54,7 +60,7 @@ top:
if (n->pred_size() != 1)
return SourceLocation();
if (!n->empty())
- return n[0][0].getStmt()->getLocStart();
+ return n[0][0].getAs<CFGStmt>().getStmt()->getLocStart();
}
}
R1 = BO->getLHS()->getSourceRange();
@@ -72,8 +78,10 @@ top:
R2 = CAO->getRHS()->getSourceRange();
return CAO->getOperatorLoc();
}
+ case Expr::BinaryConditionalOperatorClass:
case Expr::ConditionalOperatorClass: {
- const ConditionalOperator *CO = cast<ConditionalOperator>(S);
+ const AbstractConditionalOperator *CO =
+ cast<AbstractConditionalOperator>(S);
return CO->getQuestionLoc();
}
case Expr::MemberExprClass: {
@@ -97,9 +105,6 @@ top:
R1 = CE->getSubExpr()->getSourceRange();
return CE->getTypeBeginLoc();
}
- case Expr::ImplicitCastExprClass:
- ++sn;
- goto top;
case Stmt::CXXTryStmtClass: {
return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc();
}
@@ -131,6 +136,9 @@ static SourceLocation MarkLiveTop(const CFGBlock *Start,
}
// Solve
+ CFGBlock::FilterOptions FO;
+ FO.IgnoreDefaultsWithCoveredEnums = 1;
+
while (!WL.empty()) {
const CFGBlock *item = WL.back();
WL.pop_back();
@@ -147,8 +155,8 @@ static SourceLocation MarkLiveTop(const CFGBlock *Start,
}
reachable.set(item->getBlockID());
- for (CFGBlock::const_succ_iterator I=item->succ_begin(), E=item->succ_end();
- I != E; ++I)
+ for (CFGBlock::filtered_succ_iterator I =
+ item->filtered_succ_start_end(FO); I.hasMore(); ++I)
if (const CFGBlock *B = *I) {
unsigned blockID = B->getBlockID();
if (!reachable[blockID]) {
@@ -190,14 +198,17 @@ unsigned ScanReachableFromBlock(const CFGBlock &Start,
++count;
WL.push_back(&Start);
- // Find the reachable blocks from 'Start'.
+ // Find the reachable blocks from 'Start'.
+ CFGBlock::FilterOptions FO;
+ FO.IgnoreDefaultsWithCoveredEnums = 1;
+
while (!WL.empty()) {
const CFGBlock *item = WL.back();
WL.pop_back();
// Look at the successors and mark then reachable.
- for (CFGBlock::const_succ_iterator I=item->succ_begin(), E=item->succ_end();
- I != E; ++I)
+ for (CFGBlock::filtered_succ_iterator I= item->filtered_succ_start_end(FO);
+ I.hasMore(); ++I)
if (const CFGBlock *B = *I) {
unsigned blockID = B->getBlockID();
if (!Reachable[blockID]) {
@@ -234,7 +245,8 @@ void FindUnreachableCode(AnalysisContext &AC, Callback &CB) {
CFGBlock &b = **I;
if (!reachable[b.getBlockID()]) {
if (b.pred_empty()) {
- if (!AddEHEdges && dyn_cast_or_null<CXXTryStmt>(b.getTerminator())) {
+ if (!AddEHEdges
+ && dyn_cast_or_null<CXXTryStmt>(b.getTerminator().getStmt())) {
// When not adding EH edges from calls, catch clauses
// can otherwise seem dead. Avoid noting them as dead.
numReachable += ScanReachableFromBlock(b, reachable);
diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
index 0f43efa..c08cbed 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
@@ -72,13 +72,15 @@ public:
bool VisitStmt(Stmt* S);
bool VisitCallExpr(CallExpr* C);
bool VisitDeclStmt(DeclStmt* D);
- bool VisitConditionalOperator(ConditionalOperator* C);
+ bool VisitAbstractConditionalOperator(AbstractConditionalOperator* C);
bool BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S);
bool Visit(Stmt *S);
bool BlockStmt_VisitExpr(Expr* E);
void VisitTerminator(CFGBlock* B) { }
+
+ void setCurrentBlock(const CFGBlock *block) {}
};
static const bool Initialized = false;
@@ -87,7 +89,7 @@ static const bool Uninitialized = true;
bool TransferFuncs::VisitDeclRefExpr(DeclRefExpr* DR) {
if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
- if (VD->isBlockVarDecl()) {
+ if (VD->isLocalVarDecl()) {
if (AD.Observer)
AD.Observer->ObserveDeclRefExpr(V, AD, DR, VD);
@@ -112,7 +114,7 @@ static VarDecl* FindBlockVarDecl(Expr* E) {
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
- if (VD->isBlockVarDecl()) return VD;
+ if (VD->isLocalVarDecl()) return VD;
return NULL;
}
@@ -133,7 +135,7 @@ bool TransferFuncs::VisitBinaryOperator(BinaryOperator* B) {
bool TransferFuncs::VisitDeclStmt(DeclStmt* S) {
for (DeclStmt::decl_iterator I=S->decl_begin(), E=S->decl_end(); I!=E; ++I) {
VarDecl *VD = dyn_cast<VarDecl>(*I);
- if (VD && VD->isBlockVarDecl()) {
+ if (VD && VD->isLocalVarDecl()) {
if (Stmt* I = VD->getInit()) {
// Visit the subexpression to check for uses of uninitialized values,
// even if we don't propagate that value.
@@ -170,7 +172,7 @@ bool TransferFuncs::VisitUnaryOperator(UnaryOperator* U) {
switch (U->getOpcode()) {
case UO_AddrOf: {
VarDecl* VD = FindBlockVarDecl(U->getSubExpr());
- if (VD && VD->isBlockVarDecl())
+ if (VD && VD->isLocalVarDecl())
return V(VD,AD) = Initialized;
break;
}
@@ -211,13 +213,14 @@ TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
}
-bool TransferFuncs::VisitConditionalOperator(ConditionalOperator* C) {
+bool TransferFuncs::
+VisitAbstractConditionalOperator(AbstractConditionalOperator* C) {
Visit(C->getCond());
- bool rhsResult = Visit(C->getRHS());
+ bool rhsResult = Visit(C->getFalseExpr());
// Handle the GNU extension for missing LHS.
- if (Expr *lhs = C->getLHS())
- return Visit(lhs) & rhsResult; // Yes: we want &, not &&.
+ if (isa<ConditionalOperator>(C))
+ return Visit(C->getTrueExpr()) & rhsResult; // Yes: we want &, not &&.
else
return rhsResult;
}
@@ -228,7 +231,7 @@ bool TransferFuncs::VisitStmt(Stmt* S) {
// We don't stop at the first subexpression that is Uninitialized because
// evaluating some subexpressions may result in propogating "Uninitialized"
// or "Initialized" to variables referenced in the other subexpressions.
- for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I!=E; ++I)
+ for (Stmt::child_range I = S->children(); I; ++I)
if (*I && Visit(*I) == Uninitialized) x = Uninitialized;
return x;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValuesV2.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValuesV2.cpp
new file mode 100644
index 0000000..75eccbf
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValuesV2.cpp
@@ -0,0 +1,610 @@
+//==- UninitializedValuesV2.cpp - Find Uninitialized Values -----*- C++ --*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements uninitialized values analysis for source-level CFGs.
+//
+//===----------------------------------------------------------------------===//
+
+#include <utility>
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/Decl.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
+#include "clang/Analysis/Analyses/UninitializedValuesV2.h"
+#include "clang/Analysis/Support/SaveAndRestore.h"
+
+using namespace clang;
+
+static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
+ return vd->isLocalVarDecl() && !vd->hasGlobalStorage() &&
+ vd->getType()->isScalarType() &&
+ vd->getDeclContext() == dc;
+}
+
+//------------------------------------------------------------------------====//
+// DeclToBit: a mapping from Decls we track to bitvector indices.
+//====------------------------------------------------------------------------//
+
+namespace {
+class DeclToBit {
+ llvm::DenseMap<const VarDecl *, unsigned> map;
+public:
+ DeclToBit() {}
+
+ /// Compute the actual mapping from declarations to bits.
+ void computeMap(const DeclContext &dc);
+
+ /// Return the number of declarations in the map.
+ unsigned size() const { return map.size(); }
+
+ /// Returns the bit vector index for a given declaration.
+ llvm::Optional<unsigned> getBitVectorIndex(const VarDecl *d);
+};
+}
+
+void DeclToBit::computeMap(const DeclContext &dc) {
+ unsigned count = 0;
+ DeclContext::specific_decl_iterator<VarDecl> I(dc.decls_begin()),
+ E(dc.decls_end());
+ for ( ; I != E; ++I) {
+ const VarDecl *vd = *I;
+ if (isTrackedVar(vd, &dc))
+ map[vd] = count++;
+ }
+}
+
+llvm::Optional<unsigned> DeclToBit::getBitVectorIndex(const VarDecl *d) {
+ llvm::DenseMap<const VarDecl *, unsigned>::iterator I = map.find(d);
+ if (I == map.end())
+ return llvm::Optional<unsigned>();
+ return I->second;
+}
+
+//------------------------------------------------------------------------====//
+// CFGBlockValues: dataflow values for CFG blocks.
+//====------------------------------------------------------------------------//
+
+typedef std::pair<llvm::BitVector *, llvm::BitVector *> BVPair;
+
+namespace {
+class CFGBlockValues {
+ const CFG &cfg;
+ BVPair *vals;
+ llvm::BitVector scratch;
+ DeclToBit declToBit;
+
+ llvm::BitVector &lazyCreate(llvm::BitVector *&bv);
+public:
+ CFGBlockValues(const CFG &cfg);
+ ~CFGBlockValues();
+
+ void computeSetOfDeclarations(const DeclContext &dc);
+ llvm::BitVector &getBitVector(const CFGBlock *block,
+ const CFGBlock *dstBlock);
+
+ BVPair &getBitVectors(const CFGBlock *block, bool shouldLazyCreate);
+
+ void mergeIntoScratch(llvm::BitVector const &source, bool isFirst);
+ bool updateBitVectorWithScratch(const CFGBlock *block);
+ bool updateBitVectors(const CFGBlock *block, const BVPair &newVals);
+
+ bool hasNoDeclarations() const {
+ return declToBit.size() == 0;
+ }
+
+ void resetScratch();
+ llvm::BitVector &getScratch() { return scratch; }
+
+ llvm::BitVector::reference operator[](const VarDecl *vd);
+};
+}
+
+CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {
+ unsigned n = cfg.getNumBlockIDs();
+ if (!n)
+ return;
+ vals = new std::pair<llvm::BitVector*, llvm::BitVector*>[n];
+ memset(vals, 0, sizeof(*vals) * n);
+}
+
+CFGBlockValues::~CFGBlockValues() {
+ unsigned n = cfg.getNumBlockIDs();
+ if (n == 0)
+ return;
+ for (unsigned i = 0; i < n; ++i) {
+ delete vals[i].first;
+ delete vals[i].second;
+ }
+ delete [] vals;
+}
+
+void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
+ declToBit.computeMap(dc);
+ scratch.resize(declToBit.size());
+}
+
+llvm::BitVector &CFGBlockValues::lazyCreate(llvm::BitVector *&bv) {
+ if (!bv)
+ bv = new llvm::BitVector(declToBit.size());
+ return *bv;
+}
+
+/// This function pattern matches for a '&&' or '||' that appears at
+/// the beginning of a CFGBlock that also (1) has a terminator and
+/// (2) has no other elements. If such an expression is found, it is returned.
+static BinaryOperator *getLogicalOperatorInChain(const CFGBlock *block) {
+ if (block->empty())
+ return 0;
+
+ CFGStmt cstmt = block->front().getAs<CFGStmt>();
+ BinaryOperator *b = llvm::dyn_cast_or_null<BinaryOperator>(cstmt.getStmt());
+
+ if (!b || !b->isLogicalOp())
+ return 0;
+
+ if (block->pred_size() == 2 &&
+ ((block->succ_size() == 2 && block->getTerminatorCondition() == b) ||
+ block->size() == 1))
+ return b;
+
+ return 0;
+}
+
+llvm::BitVector &CFGBlockValues::getBitVector(const CFGBlock *block,
+ const CFGBlock *dstBlock) {
+ unsigned idx = block->getBlockID();
+ if (dstBlock && getLogicalOperatorInChain(block)) {
+ if (*block->succ_begin() == dstBlock)
+ return lazyCreate(vals[idx].first);
+ assert(*(block->succ_begin()+1) == dstBlock);
+ return lazyCreate(vals[idx].second);
+ }
+
+ assert(vals[idx].second == 0);
+ return lazyCreate(vals[idx].first);
+}
+
+BVPair &CFGBlockValues::getBitVectors(const clang::CFGBlock *block,
+ bool shouldLazyCreate) {
+ unsigned idx = block->getBlockID();
+ lazyCreate(vals[idx].first);
+ if (shouldLazyCreate)
+ lazyCreate(vals[idx].second);
+ return vals[idx];
+}
+
+void CFGBlockValues::mergeIntoScratch(llvm::BitVector const &source,
+ bool isFirst) {
+ if (isFirst)
+ scratch = source;
+ else
+ scratch |= source;
+}
+#if 0
+static void printVector(const CFGBlock *block, llvm::BitVector &bv,
+ unsigned num) {
+
+ llvm::errs() << block->getBlockID() << " :";
+ for (unsigned i = 0; i < bv.size(); ++i) {
+ llvm::errs() << ' ' << bv[i];
+ }
+ llvm::errs() << " : " << num << '\n';
+}
+#endif
+
+bool CFGBlockValues::updateBitVectorWithScratch(const CFGBlock *block) {
+ llvm::BitVector &dst = getBitVector(block, 0);
+ bool changed = (dst != scratch);
+ if (changed)
+ dst = scratch;
+#if 0
+ printVector(block, scratch, 0);
+#endif
+ return changed;
+}
+
+bool CFGBlockValues::updateBitVectors(const CFGBlock *block,
+ const BVPair &newVals) {
+ BVPair &vals = getBitVectors(block, true);
+ bool changed = *newVals.first != *vals.first ||
+ *newVals.second != *vals.second;
+ *vals.first = *newVals.first;
+ *vals.second = *newVals.second;
+#if 0
+ printVector(block, *vals.first, 1);
+ printVector(block, *vals.second, 2);
+#endif
+ return changed;
+}
+
+void CFGBlockValues::resetScratch() {
+ scratch.reset();
+}
+
+llvm::BitVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
+ const llvm::Optional<unsigned> &idx = declToBit.getBitVectorIndex(vd);
+ assert(idx.hasValue());
+ return scratch[idx.getValue()];
+}
+
+//------------------------------------------------------------------------====//
+// Worklist: worklist for dataflow analysis.
+//====------------------------------------------------------------------------//
+
+namespace {
+class DataflowWorklist {
+ llvm::SmallVector<const CFGBlock *, 20> worklist;
+ llvm::BitVector enqueuedBlocks;
+public:
+ DataflowWorklist(const CFG &cfg) : enqueuedBlocks(cfg.getNumBlockIDs()) {}
+
+ void enqueue(const CFGBlock *block);
+ void enqueueSuccessors(const CFGBlock *block);
+ const CFGBlock *dequeue();
+
+};
+}
+
+void DataflowWorklist::enqueue(const CFGBlock *block) {
+ if (!block)
+ return;
+ unsigned idx = block->getBlockID();
+ if (enqueuedBlocks[idx])
+ return;
+ worklist.push_back(block);
+ enqueuedBlocks[idx] = true;
+}
+
+void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
+ for (CFGBlock::const_succ_iterator I = block->succ_begin(),
+ E = block->succ_end(); I != E; ++I) {
+ enqueue(*I);
+ }
+}
+
+const CFGBlock *DataflowWorklist::dequeue() {
+ if (worklist.empty())
+ return 0;
+ const CFGBlock *b = worklist.back();
+ worklist.pop_back();
+ enqueuedBlocks[b->getBlockID()] = false;
+ return b;
+}
+
+//------------------------------------------------------------------------====//
+// Transfer function for uninitialized values analysis.
+//====------------------------------------------------------------------------//
+
+static const bool Initialized = false;
+static const bool Uninitialized = true;
+
+namespace {
+class FindVarResult {
+ const VarDecl *vd;
+ const DeclRefExpr *dr;
+public:
+ FindVarResult(VarDecl *vd, DeclRefExpr *dr) : vd(vd), dr(dr) {}
+
+ const DeclRefExpr *getDeclRefExpr() const { return dr; }
+ const VarDecl *getDecl() const { return vd; }
+};
+
+class TransferFunctions : public CFGRecStmtVisitor<TransferFunctions> {
+ CFGBlockValues &vals;
+ const CFG &cfg;
+ AnalysisContext &ac;
+ UninitVariablesHandler *handler;
+ const DeclRefExpr *currentDR;
+ const Expr *currentVoidCast;
+ const bool flagBlockUses;
+public:
+ TransferFunctions(CFGBlockValues &vals, const CFG &cfg,
+ AnalysisContext &ac,
+ UninitVariablesHandler *handler,
+ bool flagBlockUses)
+ : vals(vals), cfg(cfg), ac(ac), handler(handler), currentDR(0),
+ currentVoidCast(0), flagBlockUses(flagBlockUses) {}
+
+ const CFG &getCFG() { return cfg; }
+ void reportUninit(const DeclRefExpr *ex, const VarDecl *vd);
+
+ void VisitBlockExpr(BlockExpr *be);
+ void VisitDeclStmt(DeclStmt *ds);
+ void VisitDeclRefExpr(DeclRefExpr *dr);
+ void VisitUnaryOperator(UnaryOperator *uo);
+ void VisitBinaryOperator(BinaryOperator *bo);
+ void VisitCastExpr(CastExpr *ce);
+ void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *se);
+ void BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs);
+
+ bool isTrackedVar(const VarDecl *vd) {
+ return ::isTrackedVar(vd, cast<DeclContext>(ac.getDecl()));
+ }
+
+ FindVarResult findBlockVarDecl(Expr *ex);
+};
+}
+
+void TransferFunctions::reportUninit(const DeclRefExpr *ex,
+ const VarDecl *vd) {
+ if (handler) handler->handleUseOfUninitVariable(ex, vd);
+}
+
+FindVarResult TransferFunctions::findBlockVarDecl(Expr* ex) {
+ if (DeclRefExpr* dr = dyn_cast<DeclRefExpr>(ex->IgnoreParenCasts()))
+ if (VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl()))
+ if (isTrackedVar(vd))
+ return FindVarResult(vd, dr);
+ return FindVarResult(0, 0);
+}
+
+void TransferFunctions::BlockStmt_VisitObjCForCollectionStmt(
+ ObjCForCollectionStmt *fs) {
+
+ Visit(fs->getCollection());
+
+ // This represents an initialization of the 'element' value.
+ Stmt *element = fs->getElement();
+ const VarDecl* vd = 0;
+
+ if (DeclStmt* ds = dyn_cast<DeclStmt>(element)) {
+ vd = cast<VarDecl>(ds->getSingleDecl());
+ if (!isTrackedVar(vd))
+ vd = 0;
+ }
+ else {
+ // Initialize the value of the reference variable.
+ const FindVarResult &res = findBlockVarDecl(cast<Expr>(element));
+ vd = res.getDecl();
+ if (!vd) {
+ Visit(element);
+ return;
+ }
+ }
+
+ if (vd)
+ vals[vd] = Initialized;
+}
+
+void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
+ if (!flagBlockUses || !handler)
+ return;
+ AnalysisContext::referenced_decls_iterator i, e;
+ llvm::tie(i, e) = ac.getReferencedBlockVars(be->getBlockDecl());
+ for ( ; i != e; ++i) {
+ const VarDecl *vd = *i;
+ if (vd->getAttr<BlocksAttr>() || !vd->hasLocalStorage() ||
+ !isTrackedVar(vd))
+ continue;
+ if (vals[vd] == Uninitialized)
+ handler->handleUseOfUninitVariable(be, vd);
+ }
+}
+
+void TransferFunctions::VisitDeclStmt(DeclStmt *ds) {
+ for (DeclStmt::decl_iterator DI = ds->decl_begin(), DE = ds->decl_end();
+ DI != DE; ++DI) {
+ if (VarDecl *vd = dyn_cast<VarDecl>(*DI)) {
+ if (isTrackedVar(vd)) {
+ vals[vd] = Uninitialized;
+ if (Stmt *init = vd->getInit()) {
+ Visit(init);
+ vals[vd] = Initialized;
+ }
+ }
+ else if (Stmt *init = vd->getInit()) {
+ Visit(init);
+ }
+ }
+ }
+}
+
+void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
+ // We assume that DeclRefExprs wrapped in an lvalue-to-rvalue cast
+ // cannot be block-level expressions. Therefore, we determine if
+ // a DeclRefExpr is involved in a "load" by comparing it to the current
+ // DeclRefExpr found when analyzing the last lvalue-to-rvalue CastExpr.
+ // If a DeclRefExpr is not involved in a load, we are essentially computing
+ // its address, either for assignment to a reference or via the '&' operator.
+ // In such cases, treat the variable as being initialized, since this
+ // analysis isn't powerful enough to do alias tracking.
+ if (dr != currentDR)
+ if (const VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl()))
+ if (isTrackedVar(vd))
+ vals[vd] = Initialized;
+}
+
+void TransferFunctions::VisitBinaryOperator(clang::BinaryOperator *bo) {
+ if (bo->isAssignmentOp()) {
+ const FindVarResult &res = findBlockVarDecl(bo->getLHS());
+ if (const VarDecl* vd = res.getDecl()) {
+ // We assume that DeclRefExprs wrapped in a BinaryOperator "assignment"
+ // cannot be block-level expressions. Therefore, we determine if
+ // a DeclRefExpr is involved in a "load" by comparing it to the current
+ // DeclRefExpr found when analyzing the last lvalue-to-rvalue CastExpr.
+ SaveAndRestore<const DeclRefExpr*> lastDR(currentDR,
+ res.getDeclRefExpr());
+ Visit(bo->getRHS());
+ Visit(bo->getLHS());
+
+ llvm::BitVector::reference bit = vals[vd];
+ if (bit == Uninitialized) {
+ if (bo->getOpcode() != BO_Assign)
+ reportUninit(res.getDeclRefExpr(), vd);
+ bit = Initialized;
+ }
+ return;
+ }
+ }
+ Visit(bo->getRHS());
+ Visit(bo->getLHS());
+}
+
+void TransferFunctions::VisitUnaryOperator(clang::UnaryOperator *uo) {
+ switch (uo->getOpcode()) {
+ case clang::UO_PostDec:
+ case clang::UO_PostInc:
+ case clang::UO_PreDec:
+ case clang::UO_PreInc: {
+ const FindVarResult &res = findBlockVarDecl(uo->getSubExpr());
+ if (const VarDecl *vd = res.getDecl()) {
+ // We assume that DeclRefExprs wrapped in a unary operator ++/--
+ // cannot be block-level expressions. Therefore, we determine if
+ // a DeclRefExpr is involved in a "load" by comparing it to the current
+ // DeclRefExpr found when analyzing the last lvalue-to-rvalue CastExpr.
+ SaveAndRestore<const DeclRefExpr*> lastDR(currentDR,
+ res.getDeclRefExpr());
+ Visit(uo->getSubExpr());
+
+ llvm::BitVector::reference bit = vals[vd];
+ if (bit == Uninitialized) {
+ reportUninit(res.getDeclRefExpr(), vd);
+ bit = Initialized;
+ }
+ return;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ Visit(uo->getSubExpr());
+}
+
+void TransferFunctions::VisitCastExpr(clang::CastExpr *ce) {
+ if (ce->getCastKind() == CK_LValueToRValue) {
+ const FindVarResult &res = findBlockVarDecl(ce->getSubExpr());
+ if (const VarDecl *vd = res.getDecl()) {
+ // We assume that DeclRefExprs wrapped in an lvalue-to-rvalue cast
+ // cannot be block-level expressions. Therefore, we determine if
+ // a DeclRefExpr is involved in a "load" by comparing it to the current
+ // DeclRefExpr found when analyzing the last lvalue-to-rvalue CastExpr.
+ // Here we update 'currentDR' to be the one associated with this
+ // lvalue-to-rvalue cast. Then, when we analyze the DeclRefExpr, we
+ // will know that we are not computing its lvalue for other purposes
+ // than to perform a load.
+ SaveAndRestore<const DeclRefExpr*> lastDR(currentDR,
+ res.getDeclRefExpr());
+ Visit(ce->getSubExpr());
+ if (currentVoidCast != ce && vals[vd] == Uninitialized) {
+ reportUninit(res.getDeclRefExpr(), vd);
+ // Don't cascade warnings.
+ vals[vd] = Initialized;
+ }
+ return;
+ }
+ }
+ else if (CStyleCastExpr *cse = dyn_cast<CStyleCastExpr>(ce)) {
+ if (cse->getType()->isVoidType()) {
+ // e.g. (void) x;
+ SaveAndRestore<const Expr *>
+ lastVoidCast(currentVoidCast, cse->getSubExpr()->IgnoreParens());
+ Visit(cse->getSubExpr());
+ return;
+ }
+ }
+ Visit(ce->getSubExpr());
+}
+
+void TransferFunctions::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *se) {
+ if (se->isSizeOf()) {
+ if (se->getType()->isConstantSizeType())
+ return;
+ // Handle VLAs.
+ Visit(se->getArgumentExpr());
+ }
+}
+
+//------------------------------------------------------------------------====//
+// High-level "driver" logic for uninitialized values analysis.
+//====------------------------------------------------------------------------//
+
+static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
+ AnalysisContext &ac, CFGBlockValues &vals,
+ UninitVariablesHandler *handler = 0,
+ bool flagBlockUses = false) {
+
+ if (const BinaryOperator *b = getLogicalOperatorInChain(block)) {
+ CFGBlock::const_pred_iterator itr = block->pred_begin();
+ BVPair vA = vals.getBitVectors(*itr, false);
+ ++itr;
+ BVPair vB = vals.getBitVectors(*itr, false);
+
+ BVPair valsAB;
+
+ if (b->getOpcode() == BO_LAnd) {
+ // Merge the 'F' bits from the first and second.
+ vals.mergeIntoScratch(*(vA.second ? vA.second : vA.first), true);
+ vals.mergeIntoScratch(*(vB.second ? vB.second : vB.first), false);
+ valsAB.first = vA.first;
+ valsAB.second = &vals.getScratch();
+ }
+ else {
+ // Merge the 'T' bits from the first and second.
+ assert(b->getOpcode() == BO_LOr);
+ vals.mergeIntoScratch(*vA.first, true);
+ vals.mergeIntoScratch(*vB.first, false);
+ valsAB.first = &vals.getScratch();
+ valsAB.second = vA.second ? vA.second : vA.first;
+ }
+ return vals.updateBitVectors(block, valsAB);
+ }
+
+ // Default behavior: merge in values of predecessor blocks.
+ vals.resetScratch();
+ bool isFirst = true;
+ for (CFGBlock::const_pred_iterator I = block->pred_begin(),
+ E = block->pred_end(); I != E; ++I) {
+ vals.mergeIntoScratch(vals.getBitVector(*I, block), isFirst);
+ isFirst = false;
+ }
+ // Apply the transfer function.
+ TransferFunctions tf(vals, cfg, ac, handler, flagBlockUses);
+ for (CFGBlock::const_iterator I = block->begin(), E = block->end();
+ I != E; ++I) {
+ if (const CFGStmt *cs = dyn_cast<CFGStmt>(&*I)) {
+ tf.BlockStmt_Visit(cs->getStmt());
+ }
+ }
+ return vals.updateBitVectorWithScratch(block);
+}
+
+void clang::runUninitializedVariablesAnalysis(const DeclContext &dc,
+ const CFG &cfg,
+ AnalysisContext &ac,
+ UninitVariablesHandler &handler) {
+ CFGBlockValues vals(cfg);
+ vals.computeSetOfDeclarations(dc);
+ if (vals.hasNoDeclarations())
+ return;
+ DataflowWorklist worklist(cfg);
+ llvm::BitVector previouslyVisited(cfg.getNumBlockIDs());
+
+ worklist.enqueueSuccessors(&cfg.getEntry());
+
+ while (const CFGBlock *block = worklist.dequeue()) {
+ // Did the block change?
+ bool changed = runOnBlock(block, cfg, ac, vals);
+ if (changed || !previouslyVisited[block->getBlockID()])
+ worklist.enqueueSuccessors(block);
+ previouslyVisited[block->getBlockID()] = true;
+ }
+
+ // Run through the blocks one more time, and report uninitialized variabes.
+ for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
+ runOnBlock(*BI, cfg, ac, vals, &handler, /* flagBlockUses */ true);
+ }
+}
+
+UninitVariablesHandler::~UninitVariablesHandler() {}
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp b/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
index 040cdb5..845ae81 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
@@ -14,12 +14,14 @@
#include "clang/Basic/Builtins.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/LangOptions.h"
using namespace clang;
static const Builtin::Info BuiltinInfo[] = {
- { "not a builtin function", 0, 0, 0, false },
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, false },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER, false },
+ { "not a builtin function", 0, 0, 0, ALL_LANGUAGES, false },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES, false },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, BUILTIN_LANG) { #ID, TYPE, ATTRS, HEADER,\
+ BUILTIN_LANG, false },
#include "clang/Basic/Builtins.def"
};
@@ -41,17 +43,20 @@ Builtin::Context::Context(const TargetInfo &Target) {
/// appropriate builtin ID # and mark any non-portable builtin identifiers as
/// such.
void Builtin::Context::InitializeBuiltins(IdentifierTable &Table,
- bool NoBuiltins) {
+ const LangOptions& LangOpts) {
// Step #1: mark all target-independent builtins with their ID's.
for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
if (!BuiltinInfo[i].Suppressed &&
- (!NoBuiltins || !strchr(BuiltinInfo[i].Attributes, 'f')))
- Table.get(BuiltinInfo[i].Name).setBuiltinID(i);
+ (!LangOpts.NoBuiltin || !strchr(BuiltinInfo[i].Attributes, 'f'))) {
+ if (LangOpts.ObjC1 ||
+ BuiltinInfo[i].builtin_lang != clang::OBJC_LANG)
+ Table.get(BuiltinInfo[i].Name).setBuiltinID(i);
+ }
// Step #2: Register target-specific builtins.
for (unsigned i = 0, e = NumTSRecords; i != e; ++i)
if (!TSRecords[i].Suppressed &&
- (!NoBuiltins ||
+ (!LangOpts.NoBuiltin ||
(TSRecords[i].Attributes &&
!strchr(TSRecords[i].Attributes, 'f'))))
Table.get(TSRecords[i].Name).setBuiltinID(i+Builtin::FirstTSBuiltin);
@@ -75,6 +80,10 @@ Builtin::Context::GetBuiltinNames(llvm::SmallVectorImpl<const char *> &Names,
Names.push_back(TSRecords[i].Name);
}
+void Builtin::Context::ForgetBuiltin(unsigned ID, IdentifierTable &Table) {
+ Table.get(GetRecord(ID).Name).setBuiltinID(0);
+}
+
bool
Builtin::Context::isPrintfLike(unsigned ID, unsigned &FormatIdx,
bool &HasVAListArg) {
@@ -112,4 +121,3 @@ Builtin::Context::isScanfLike(unsigned ID, unsigned &FormatIdx,
return true;
}
-
diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
index d8095f4..31e3331 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
@@ -11,227 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/ASTDiagnostic.h"
-#include "clang/Analysis/AnalysisDiagnostic.h"
#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/PartialDiagnostic.h"
-#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Frontend/FrontendDiagnostic.h"
-#include "clang/Lex/LexDiagnostic.h"
-#include "clang/Parse/ParseDiagnostic.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-
-#include <vector>
-#include <map>
-#include <cstring>
using namespace clang;
-//===----------------------------------------------------------------------===//
-// Builtin Diagnostic information
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-// Diagnostic classes.
-enum {
- CLASS_NOTE = 0x01,
- CLASS_WARNING = 0x02,
- CLASS_EXTENSION = 0x03,
- CLASS_ERROR = 0x04
-};
-
-struct StaticDiagInfoRec {
- unsigned short DiagID;
- unsigned Mapping : 3;
- unsigned Class : 3;
- bool SFINAE : 1;
- unsigned Category : 5;
-
- const char *Description;
- const char *OptionGroup;
-
- bool operator<(const StaticDiagInfoRec &RHS) const {
- return DiagID < RHS.DiagID;
- }
-};
-
-}
-
-static const StaticDiagInfoRec StaticDiagInfo[] = {
-#define DIAG(ENUM,CLASS,DEFAULT_MAPPING,DESC,GROUP,SFINAE, CATEGORY) \
- { diag::ENUM, DEFAULT_MAPPING, CLASS, SFINAE, CATEGORY, DESC, GROUP },
-#include "clang/Basic/DiagnosticCommonKinds.inc"
-#include "clang/Basic/DiagnosticDriverKinds.inc"
-#include "clang/Basic/DiagnosticFrontendKinds.inc"
-#include "clang/Basic/DiagnosticLexKinds.inc"
-#include "clang/Basic/DiagnosticParseKinds.inc"
-#include "clang/Basic/DiagnosticASTKinds.inc"
-#include "clang/Basic/DiagnosticSemaKinds.inc"
-#include "clang/Basic/DiagnosticAnalysisKinds.inc"
- { 0, 0, 0, 0, 0, 0, 0}
-};
-#undef DIAG
-
-/// GetDiagInfo - Return the StaticDiagInfoRec entry for the specified DiagID,
-/// or null if the ID is invalid.
-static const StaticDiagInfoRec *GetDiagInfo(unsigned DiagID) {
- unsigned NumDiagEntries = sizeof(StaticDiagInfo)/sizeof(StaticDiagInfo[0])-1;
-
- // If assertions are enabled, verify that the StaticDiagInfo array is sorted.
-#ifndef NDEBUG
- static bool IsFirst = true;
- if (IsFirst) {
- for (unsigned i = 1; i != NumDiagEntries; ++i) {
- assert(StaticDiagInfo[i-1].DiagID != StaticDiagInfo[i].DiagID &&
- "Diag ID conflict, the enums at the start of clang::diag (in "
- "Diagnostic.h) probably need to be increased");
-
- assert(StaticDiagInfo[i-1] < StaticDiagInfo[i] &&
- "Improperly sorted diag info");
- }
- IsFirst = false;
- }
-#endif
-
- // Search the diagnostic table with a binary search.
- StaticDiagInfoRec Find = { DiagID, 0, 0, 0, 0, 0, 0 };
-
- const StaticDiagInfoRec *Found =
- std::lower_bound(StaticDiagInfo, StaticDiagInfo + NumDiagEntries, Find);
- if (Found == StaticDiagInfo + NumDiagEntries ||
- Found->DiagID != DiagID)
- return 0;
-
- return Found;
-}
-
-static unsigned GetDefaultDiagMapping(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->Mapping;
- return diag::MAP_FATAL;
-}
-
-/// getWarningOptionForDiag - Return the lowest-level warning option that
-/// enables the specified diagnostic. If there is no -Wfoo flag that controls
-/// the diagnostic, this returns null.
-const char *Diagnostic::getWarningOptionForDiag(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->OptionGroup;
- return 0;
-}
-
-/// getWarningOptionForDiag - Return the category number that a specified
-/// DiagID belongs to, or 0 if no category.
-unsigned Diagnostic::getCategoryNumberForDiag(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->Category;
- return 0;
-}
-
-/// getCategoryNameFromID - Given a category ID, return the name of the
-/// category, an empty string if CategoryID is zero, or null if CategoryID is
-/// invalid.
-const char *Diagnostic::getCategoryNameFromID(unsigned CategoryID) {
- // Second the table of options, sorted by name for fast binary lookup.
- static const char *CategoryNameTable[] = {
-#define GET_CATEGORY_TABLE
-#define CATEGORY(X) X,
-#include "clang/Basic/DiagnosticGroups.inc"
-#undef GET_CATEGORY_TABLE
- "<<END>>"
- };
- static const size_t CategoryNameTableSize =
- sizeof(CategoryNameTable) / sizeof(CategoryNameTable[0])-1;
-
- if (CategoryID >= CategoryNameTableSize) return 0;
- return CategoryNameTable[CategoryID];
-}
-
-
-
-Diagnostic::SFINAEResponse
-Diagnostic::getDiagnosticSFINAEResponse(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID)) {
- if (!Info->SFINAE)
- return SFINAE_Report;
-
- if (Info->Class == CLASS_ERROR)
- return SFINAE_SubstitutionFailure;
-
- // Suppress notes, warnings, and extensions;
- return SFINAE_Suppress;
- }
-
- return SFINAE_Report;
-}
-
-/// getDiagClass - Return the class field of the diagnostic.
-///
-static unsigned getBuiltinDiagClass(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->Class;
- return ~0U;
-}
-
-//===----------------------------------------------------------------------===//
-// Custom Diagnostic information
-//===----------------------------------------------------------------------===//
-
-namespace clang {
- namespace diag {
- class CustomDiagInfo {
- typedef std::pair<Diagnostic::Level, std::string> DiagDesc;
- std::vector<DiagDesc> DiagInfo;
- std::map<DiagDesc, unsigned> DiagIDs;
- public:
-
- /// getDescription - Return the description of the specified custom
- /// diagnostic.
- const char *getDescription(unsigned DiagID) const {
- assert(this && DiagID-DIAG_UPPER_LIMIT < DiagInfo.size() &&
- "Invalid diagnosic ID");
- return DiagInfo[DiagID-DIAG_UPPER_LIMIT].second.c_str();
- }
-
- /// getLevel - Return the level of the specified custom diagnostic.
- Diagnostic::Level getLevel(unsigned DiagID) const {
- assert(this && DiagID-DIAG_UPPER_LIMIT < DiagInfo.size() &&
- "Invalid diagnosic ID");
- return DiagInfo[DiagID-DIAG_UPPER_LIMIT].first;
- }
-
- unsigned getOrCreateDiagID(Diagnostic::Level L, llvm::StringRef Message,
- Diagnostic &Diags) {
- DiagDesc D(L, Message);
- // Check to see if it already exists.
- std::map<DiagDesc, unsigned>::iterator I = DiagIDs.lower_bound(D);
- if (I != DiagIDs.end() && I->first == D)
- return I->second;
-
- // If not, assign a new ID.
- unsigned ID = DiagInfo.size()+DIAG_UPPER_LIMIT;
- DiagIDs.insert(std::make_pair(D, ID));
- DiagInfo.push_back(D);
- return ID;
- }
- };
-
- } // end diag namespace
-} // end clang namespace
-
-
-//===----------------------------------------------------------------------===//
-// Common Diagnostic implementation
-//===----------------------------------------------------------------------===//
-
static void DummyArgToStringFn(Diagnostic::ArgumentKind AK, intptr_t QT,
const char *Modifier, unsigned ML,
const char *Argument, unsigned ArgLen,
@@ -244,7 +30,10 @@ static void DummyArgToStringFn(Diagnostic::ArgumentKind AK, intptr_t QT,
}
-Diagnostic::Diagnostic(DiagnosticClient *client) : Client(client) {
+Diagnostic::Diagnostic(const llvm::IntrusiveRefCntPtr<DiagnosticIDs> &diags,
+ DiagnosticClient *client, bool ShouldOwnClient)
+ : Diags(diags), Client(client), OwnsDiagClient(ShouldOwnClient),
+ SourceMgr(0) {
ArgToStringFn = DummyArgToStringFn;
ArgToStringCookie = 0;
@@ -259,72 +48,41 @@ Diagnostic::Diagnostic(DiagnosticClient *client) : Client(client) {
ErrorLimit = 0;
TemplateBacktraceLimit = 0;
- CustomDiagInfo = 0;
- // Set all mappings to 'unset'.
- DiagMappingsStack.clear();
- DiagMappingsStack.push_back(DiagMappings());
+ // Create a DiagState and DiagStatePoint representing diagnostic changes
+ // through command-line.
+ DiagStates.push_back(DiagState());
+ PushDiagStatePoint(&DiagStates.back(), SourceLocation());
Reset();
}
Diagnostic::~Diagnostic() {
- delete CustomDiagInfo;
-}
-
-
-void Diagnostic::pushMappings() {
- // Avoids undefined behavior when the stack has to resize.
- DiagMappingsStack.reserve(DiagMappingsStack.size() + 1);
- DiagMappingsStack.push_back(DiagMappingsStack.back());
-}
-
-bool Diagnostic::popMappings() {
- if (DiagMappingsStack.size() == 1)
- return false;
-
- DiagMappingsStack.pop_back();
- return true;
-}
-
-/// getCustomDiagID - Return an ID for a diagnostic with the specified message
-/// and level. If this is the first request for this diagnosic, it is
-/// registered and created, otherwise the existing ID is returned.
-unsigned Diagnostic::getCustomDiagID(Level L, llvm::StringRef Message) {
- if (CustomDiagInfo == 0)
- CustomDiagInfo = new diag::CustomDiagInfo();
- return CustomDiagInfo->getOrCreateDiagID(L, Message, *this);
+ if (OwnsDiagClient)
+ delete Client;
}
-
-/// isBuiltinWarningOrExtension - Return true if the unmapped diagnostic
-/// level of the specified diagnostic ID is a Warning or Extension.
-/// This only works on builtin diagnostics, not custom ones, and is not legal to
-/// call on NOTEs.
-bool Diagnostic::isBuiltinWarningOrExtension(unsigned DiagID) {
- return DiagID < diag::DIAG_UPPER_LIMIT &&
- getBuiltinDiagClass(DiagID) != CLASS_ERROR;
+void Diagnostic::setClient(DiagnosticClient *client, bool ShouldOwnClient) {
+ if (OwnsDiagClient && Client)
+ delete Client;
+
+ Client = client;
+ OwnsDiagClient = ShouldOwnClient;
}
-/// \brief Determine whether the given built-in diagnostic ID is a
-/// Note.
-bool Diagnostic::isBuiltinNote(unsigned DiagID) {
- return DiagID < diag::DIAG_UPPER_LIMIT &&
- getBuiltinDiagClass(DiagID) == CLASS_NOTE;
+void Diagnostic::pushMappings(SourceLocation Loc) {
+ DiagStateOnPushStack.push_back(GetCurDiagState());
}
-/// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
-/// ID is for an extension of some sort. This also returns EnabledByDefault,
-/// which is set to indicate whether the diagnostic is ignored by default (in
-/// which case -pedantic enables it) or treated as a warning/error by default.
-///
-bool Diagnostic::isBuiltinExtensionDiag(unsigned DiagID,
- bool &EnabledByDefault) {
- if (DiagID >= diag::DIAG_UPPER_LIMIT ||
- getBuiltinDiagClass(DiagID) != CLASS_EXTENSION)
+bool Diagnostic::popMappings(SourceLocation Loc) {
+ if (DiagStateOnPushStack.empty())
return false;
-
- EnabledByDefault = GetDefaultDiagMapping(DiagID) != diag::MAP_IGNORE;
+
+ if (DiagStateOnPushStack.back() != GetCurDiagState()) {
+ // State changed at some point between push/pop.
+ PushDiagStatePoint(DiagStateOnPushStack.back(), Loc);
+ }
+ DiagStateOnPushStack.pop_back();
return true;
}
@@ -336,18 +94,14 @@ void Diagnostic::Reset() {
NumErrors = 0;
NumErrorsSuppressed = 0;
CurDiagID = ~0U;
- LastDiagLevel = Ignored;
+ // Set LastDiagLevel to an "unset" state. If we set it to 'Ignored', notes
+ // using a Diagnostic associated to a translation unit that follow
+ // diagnostics from a Diagnostic associated to anoter t.u. will not be
+ // displayed.
+ LastDiagLevel = (DiagnosticIDs::Level)-1;
DelayedDiagID = 0;
}
-/// getDescription - Given a diagnostic ID, return a description of the
-/// issue.
-const char *Diagnostic::getDescription(unsigned DiagID) const {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->Description;
- return CustomDiagInfo->getDescription(DiagID);
-}
-
void Diagnostic::SetDelayedDiagnostic(unsigned DiagID, llvm::StringRef Arg1,
llvm::StringRef Arg2) {
if (DelayedDiagID)
@@ -365,264 +119,96 @@ void Diagnostic::ReportDelayed() {
DelayedDiagArg2.clear();
}
-/// getDiagnosticLevel - Based on the way the client configured the Diagnostic
-/// object, classify the specified diagnostic ID into a Level, consumable by
-/// the DiagnosticClient.
-Diagnostic::Level Diagnostic::getDiagnosticLevel(unsigned DiagID) const {
- // Handle custom diagnostics, which cannot be mapped.
- if (DiagID >= diag::DIAG_UPPER_LIMIT)
- return CustomDiagInfo->getLevel(DiagID);
-
- unsigned DiagClass = getBuiltinDiagClass(DiagID);
- assert(DiagClass != CLASS_NOTE && "Cannot get diagnostic level of a note!");
- return getDiagnosticLevel(DiagID, DiagClass);
-}
-
-/// getDiagnosticLevel - Based on the way the client configured the Diagnostic
-/// object, classify the specified diagnostic ID into a Level, consumable by
-/// the DiagnosticClient.
-Diagnostic::Level
-Diagnostic::getDiagnosticLevel(unsigned DiagID, unsigned DiagClass) const {
- // Specific non-error diagnostics may be mapped to various levels from ignored
- // to error. Errors can only be mapped to fatal.
- Diagnostic::Level Result = Diagnostic::Fatal;
-
- // Get the mapping information, if unset, compute it lazily.
- unsigned MappingInfo = getDiagnosticMappingInfo((diag::kind)DiagID);
- if (MappingInfo == 0) {
- MappingInfo = GetDefaultDiagMapping(DiagID);
- setDiagnosticMappingInternal(DiagID, MappingInfo, false);
- }
-
- switch (MappingInfo & 7) {
- default: assert(0 && "Unknown mapping!");
- case diag::MAP_IGNORE:
- // Ignore this, unless this is an extension diagnostic and we're mapping
- // them onto warnings or errors.
- if (!isBuiltinExtensionDiag(DiagID) || // Not an extension
- ExtBehavior == Ext_Ignore || // Extensions ignored anyway
- (MappingInfo & 8) != 0) // User explicitly mapped it.
- return Diagnostic::Ignored;
- Result = Diagnostic::Warning;
- if (ExtBehavior == Ext_Error) Result = Diagnostic::Error;
- if (Result == Diagnostic::Error && ErrorsAsFatal)
- Result = Diagnostic::Fatal;
- break;
- case diag::MAP_ERROR:
- Result = Diagnostic::Error;
- if (ErrorsAsFatal)
- Result = Diagnostic::Fatal;
- break;
- case diag::MAP_FATAL:
- Result = Diagnostic::Fatal;
- break;
- case diag::MAP_WARNING:
- // If warnings are globally mapped to ignore or error, do it.
- if (IgnoreAllWarnings)
- return Diagnostic::Ignored;
-
- Result = Diagnostic::Warning;
-
- // If this is an extension diagnostic and we're in -pedantic-error mode, and
- // if the user didn't explicitly map it, upgrade to an error.
- if (ExtBehavior == Ext_Error &&
- (MappingInfo & 8) == 0 &&
- isBuiltinExtensionDiag(DiagID))
- Result = Diagnostic::Error;
-
- if (WarningsAsErrors)
- Result = Diagnostic::Error;
- if (Result == Diagnostic::Error && ErrorsAsFatal)
- Result = Diagnostic::Fatal;
- break;
-
- case diag::MAP_WARNING_NO_WERROR:
- // Diagnostics specified with -Wno-error=foo should be set to warnings, but
- // not be adjusted by -Werror or -pedantic-errors.
- Result = Diagnostic::Warning;
-
- // If warnings are globally mapped to ignore or error, do it.
- if (IgnoreAllWarnings)
- return Diagnostic::Ignored;
-
- break;
-
- case diag::MAP_ERROR_NO_WFATAL:
- // Diagnostics specified as -Wno-fatal-error=foo should be errors, but
- // unaffected by -Wfatal-errors.
- Result = Diagnostic::Error;
- break;
- }
-
- // Okay, we're about to return this as a "diagnostic to emit" one last check:
- // if this is any sort of extension warning, and if we're in an __extension__
- // block, silence it.
- if (AllExtensionsSilenced && isBuiltinExtensionDiag(DiagID))
- return Diagnostic::Ignored;
-
- return Result;
-}
-
-struct WarningOption {
- const char *Name;
- const short *Members;
- const short *SubGroups;
-};
-
-#define GET_DIAG_ARRAYS
-#include "clang/Basic/DiagnosticGroups.inc"
-#undef GET_DIAG_ARRAYS
-
-// Second the table of options, sorted by name for fast binary lookup.
-static const WarningOption OptionTable[] = {
-#define GET_DIAG_TABLE
-#include "clang/Basic/DiagnosticGroups.inc"
-#undef GET_DIAG_TABLE
-};
-static const size_t OptionTableSize =
-sizeof(OptionTable) / sizeof(OptionTable[0]);
-
-static bool WarningOptionCompare(const WarningOption &LHS,
- const WarningOption &RHS) {
- return strcmp(LHS.Name, RHS.Name) < 0;
-}
-
-static void MapGroupMembers(const WarningOption *Group, diag::Mapping Mapping,
- Diagnostic &Diags) {
- // Option exists, poke all the members of its diagnostic set.
- if (const short *Member = Group->Members) {
- for (; *Member != -1; ++Member)
- Diags.setDiagnosticMapping(*Member, Mapping);
- }
+Diagnostic::DiagStatePointsTy::iterator
+Diagnostic::GetDiagStatePointForLoc(SourceLocation L) const {
+ assert(!DiagStatePoints.empty());
+ assert(DiagStatePoints.front().Loc.isInvalid() &&
+ "Should have created a DiagStatePoint for command-line");
- // Enable/disable all subgroups along with this one.
- if (const short *SubGroups = Group->SubGroups) {
- for (; *SubGroups != (short)-1; ++SubGroups)
- MapGroupMembers(&OptionTable[(short)*SubGroups], Mapping, Diags);
- }
-}
+ FullSourceLoc Loc(L, *SourceMgr);
+ if (Loc.isInvalid())
+ return DiagStatePoints.end() - 1;
-/// setDiagnosticGroupMapping - Change an entire diagnostic group (e.g.
-/// "unknown-pragmas" to have the specified mapping. This returns true and
-/// ignores the request if "Group" was unknown, false otherwise.
-bool Diagnostic::setDiagnosticGroupMapping(const char *Group,
- diag::Mapping Map) {
-
- WarningOption Key = { Group, 0, 0 };
- const WarningOption *Found =
- std::lower_bound(OptionTable, OptionTable + OptionTableSize, Key,
- WarningOptionCompare);
- if (Found == OptionTable + OptionTableSize ||
- strcmp(Found->Name, Group) != 0)
- return true; // Option not found.
-
- MapGroupMembers(Found, Map, *this);
- return false;
+ DiagStatePointsTy::iterator Pos = DiagStatePoints.end();
+ FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
+ if (LastStateChangePos.isValid() &&
+ Loc.isBeforeInTranslationUnitThan(LastStateChangePos))
+ Pos = std::upper_bound(DiagStatePoints.begin(), DiagStatePoints.end(),
+ DiagStatePoint(0, Loc));
+ --Pos;
+ return Pos;
}
-
-/// ProcessDiag - This is the method used to report a diagnostic that is
-/// finally fully formed.
-bool Diagnostic::ProcessDiag() {
- DiagnosticInfo Info(this);
-
- if (SuppressAllDiagnostics)
- return false;
-
- // Figure out the diagnostic level of this message.
- Diagnostic::Level DiagLevel;
- unsigned DiagID = Info.getID();
-
- // ShouldEmitInSystemHeader - True if this diagnostic should be produced even
- // in a system header.
- bool ShouldEmitInSystemHeader;
-
- if (DiagID >= diag::DIAG_UPPER_LIMIT) {
- // Handle custom diagnostics, which cannot be mapped.
- DiagLevel = CustomDiagInfo->getLevel(DiagID);
-
- // Custom diagnostics always are emitted in system headers.
- ShouldEmitInSystemHeader = true;
- } else {
- // Get the class of the diagnostic. If this is a NOTE, map it onto whatever
- // the diagnostic level was for the previous diagnostic so that it is
- // filtered the same as the previous diagnostic.
- unsigned DiagClass = getBuiltinDiagClass(DiagID);
- if (DiagClass == CLASS_NOTE) {
- DiagLevel = Diagnostic::Note;
- ShouldEmitInSystemHeader = false; // extra consideration is needed
- } else {
- // If this is not an error and we are in a system header, we ignore it.
- // Check the original Diag ID here, because we also want to ignore
- // extensions and warnings in -Werror and -pedantic-errors modes, which
- // *map* warnings/extensions to errors.
- ShouldEmitInSystemHeader = DiagClass == CLASS_ERROR;
-
- DiagLevel = getDiagnosticLevel(DiagID, DiagClass);
- }
- }
-
- if (DiagLevel != Diagnostic::Note) {
- // Record that a fatal error occurred only when we see a second
- // non-note diagnostic. This allows notes to be attached to the
- // fatal error, but suppresses any diagnostics that follow those
- // notes.
- if (LastDiagLevel == Diagnostic::Fatal)
- FatalErrorOccurred = true;
-
- LastDiagLevel = DiagLevel;
+/// \brief This allows the client to specify that certain
+/// warnings are ignored. Notes can never be mapped, errors can only be
+/// mapped to fatal, and WARNINGs and EXTENSIONs can be mapped arbitrarily.
+///
+/// \param The source location that this change of diagnostic state should
+/// take affect. It can be null if we are setting the latest state.
+void Diagnostic::setDiagnosticMapping(diag::kind Diag, diag::Mapping Map,
+ SourceLocation L) {
+ assert(Diag < diag::DIAG_UPPER_LIMIT &&
+ "Can only map builtin diagnostics");
+ assert((Diags->isBuiltinWarningOrExtension(Diag) ||
+ (Map == diag::MAP_FATAL || Map == diag::MAP_ERROR)) &&
+ "Cannot map errors into warnings!");
+ assert(!DiagStatePoints.empty());
+
+ bool isPragma = L.isValid();
+ FullSourceLoc Loc(L, *SourceMgr);
+ FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
+
+ // Common case; setting all the diagnostics of a group in one place.
+ if (Loc.isInvalid() || Loc == LastStateChangePos) {
+ setDiagnosticMappingInternal(Diag, Map, GetCurDiagState(), true, isPragma);
+ return;
}
- // If a fatal error has already been emitted, silence all subsequent
- // diagnostics.
- if (FatalErrorOccurred) {
- if (DiagLevel >= Diagnostic::Error && Client->IncludeInDiagnosticCounts()) {
- ++NumErrors;
- ++NumErrorsSuppressed;
- }
-
- return false;
+ // Another common case; modifying diagnostic state in a source location
+ // after the previous one.
+ if ((Loc.isValid() && LastStateChangePos.isInvalid()) ||
+ LastStateChangePos.isBeforeInTranslationUnitThan(Loc)) {
+ // A diagnostic pragma occured, create a new DiagState initialized with
+ // the current one and a new DiagStatePoint to record at which location
+ // the new state became active.
+ DiagStates.push_back(*GetCurDiagState());
+ PushDiagStatePoint(&DiagStates.back(), Loc);
+ setDiagnosticMappingInternal(Diag, Map, GetCurDiagState(), true, isPragma);
+ return;
}
- // If the client doesn't care about this message, don't issue it. If this is
- // a note and the last real diagnostic was ignored, ignore it too.
- if (DiagLevel == Diagnostic::Ignored ||
- (DiagLevel == Diagnostic::Note && LastDiagLevel == Diagnostic::Ignored))
- return false;
-
- // If this diagnostic is in a system header and is not a clang error, suppress
- // it.
- if (SuppressSystemWarnings && !ShouldEmitInSystemHeader &&
- Info.getLocation().isValid() &&
- Info.getLocation().getInstantiationLoc().isInSystemHeader() &&
- (DiagLevel != Diagnostic::Note || LastDiagLevel == Diagnostic::Ignored)) {
- LastDiagLevel = Diagnostic::Ignored;
- return false;
- }
+ // We allow setting the diagnostic state in random source order for
+ // completeness but it should not be actually happening in normal practice.
- if (DiagLevel >= Diagnostic::Error) {
- if (Client->IncludeInDiagnosticCounts()) {
- ErrorOccurred = true;
- ++NumErrors;
- }
+ DiagStatePointsTy::iterator Pos = GetDiagStatePointForLoc(Loc);
+ assert(Pos != DiagStatePoints.end());
- // If we've emitted a lot of errors, emit a fatal error after it to stop a
- // flood of bogus errors.
- if (ErrorLimit && NumErrors >= ErrorLimit &&
- DiagLevel == Diagnostic::Error)
- SetDelayedDiagnostic(diag::fatal_too_many_errors);
+ // Update all diagnostic states that are active after the given location.
+ for (DiagStatePointsTy::iterator
+ I = Pos+1, E = DiagStatePoints.end(); I != E; ++I) {
+ setDiagnosticMappingInternal(Diag, Map, I->State, true, isPragma);
}
- // Finally, report it.
- Client->HandleDiagnostic(DiagLevel, Info);
- if (Client->IncludeInDiagnosticCounts()) {
- if (DiagLevel == Diagnostic::Warning)
- ++NumWarnings;
+ // If the location corresponds to an existing point, just update its state.
+ if (Pos->Loc == Loc) {
+ setDiagnosticMappingInternal(Diag, Map, Pos->State, true, isPragma);
+ return;
}
- CurDiagID = ~0U;
+ // Create a new state/point and fit it into the vector of DiagStatePoints
+ // so that the vector is always ordered according to location.
+ Pos->Loc.isBeforeInTranslationUnitThan(Loc);
+ DiagStates.push_back(*Pos->State);
+ DiagState *NewState = &DiagStates.back();
+ setDiagnosticMappingInternal(Diag, Map, NewState, true, isPragma);
+ DiagStatePoints.insert(Pos+1, DiagStatePoint(NewState,
+ FullSourceLoc(Loc, *SourceMgr)));
+}
- return true;
+void DiagnosticBuilder::FlushCounts() {
+ DiagObj->NumDiagArgs = NumArgs;
+ DiagObj->NumDiagRanges = NumRanges;
+ DiagObj->NumFixItHints = NumFixItHints;
}
bool DiagnosticBuilder::Emit() {
@@ -632,9 +218,7 @@ bool DiagnosticBuilder::Emit() {
// When emitting diagnostics, we set the final argument count into
// the Diagnostic object.
- DiagObj->NumDiagArgs = NumArgs;
- DiagObj->NumDiagRanges = NumRanges;
- DiagObj->NumFixItHints = NumFixItHints;
+ FlushCounts();
// Process the diagnostic, sending the accumulated information to the
// DiagnosticClient.
@@ -657,6 +241,16 @@ bool DiagnosticBuilder::Emit() {
DiagnosticClient::~DiagnosticClient() {}
+void DiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
+ const DiagnosticInfo &Info) {
+ if (!IncludeInDiagnosticCounts())
+ return;
+
+ if (DiagLevel == Diagnostic::Warning)
+ ++NumWarnings;
+ else if (DiagLevel >= Diagnostic::Error)
+ ++NumErrors;
+}
/// ModifierIs - Return true if the specified modifier matches specified string.
template <std::size_t StrLen>
@@ -855,7 +449,7 @@ static bool EvalPluralExpr(unsigned ValNo, const char *Start, const char *End) {
/// {1:form0|[2,4]:form1|:form2}
/// Polish (requires repeated form):
/// {1:form0|%100=[10,20]:form2|%10=[2,4]:form1|:form2}
-static void HandlePluralModifier(unsigned ValNo,
+static void HandlePluralModifier(const DiagnosticInfo &DInfo, unsigned ValNo,
const char *Argument, unsigned ArgumentLen,
llvm::SmallVectorImpl<char> &OutStr) {
const char *ArgumentEnd = Argument + ArgumentLen;
@@ -869,7 +463,10 @@ static void HandlePluralModifier(unsigned ValNo,
if (EvalPluralExpr(ValNo, Argument, ExprEnd)) {
Argument = ExprEnd + 1;
ExprEnd = ScanFormat(Argument, ArgumentEnd, '|');
- OutStr.append(Argument, ExprEnd);
+
+ // Recursively format the result of the plural clause into the
+ // output string.
+ DInfo.FormatDiagnostic(Argument, ExprEnd, OutStr);
return;
}
Argument = ScanFormat(Argument, ArgumentEnd - 1, '|') + 1;
@@ -882,7 +479,7 @@ static void HandlePluralModifier(unsigned ValNo,
/// array.
void DiagnosticInfo::
FormatDiagnostic(llvm::SmallVectorImpl<char> &OutStr) const {
- const char *DiagStr = getDiags()->getDescription(getID());
+ const char *DiagStr = getDiags()->getDiagnosticIDs()->getDescription(getID());
const char *DiagEnd = DiagStr+strlen(DiagStr);
FormatDiagnostic(DiagStr, DiagEnd, OutStr);
@@ -971,11 +568,13 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
int Val = getArgSInt(ArgNo);
if (ModifierIs(Modifier, ModifierLen, "select")) {
- HandleSelectModifier(*this, (unsigned)Val, Argument, ArgumentLen, OutStr);
+ HandleSelectModifier(*this, (unsigned)Val, Argument, ArgumentLen,
+ OutStr);
} else if (ModifierIs(Modifier, ModifierLen, "s")) {
HandleIntegerSModifier(Val, OutStr);
} else if (ModifierIs(Modifier, ModifierLen, "plural")) {
- HandlePluralModifier((unsigned)Val, Argument, ArgumentLen, OutStr);
+ HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
+ OutStr);
} else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
HandleOrdinalModifier((unsigned)Val, OutStr);
} else {
@@ -992,7 +591,8 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
} else if (ModifierIs(Modifier, ModifierLen, "s")) {
HandleIntegerSModifier(Val, OutStr);
} else if (ModifierIs(Modifier, ModifierLen, "plural")) {
- HandlePluralModifier((unsigned)Val, Argument, ArgumentLen, OutStr);
+ HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
+ OutStr);
} else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
HandleOrdinalModifier(Val, OutStr);
} else {
@@ -1043,13 +643,18 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
StoredDiagnostic::StoredDiagnostic() { }
-StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
+StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level, unsigned ID,
llvm::StringRef Message)
- : Level(Level), Loc(), Message(Message) { }
+ : ID(ID), Level(Level), Loc(), Message(Message) { }
StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
const DiagnosticInfo &Info)
- : Level(Level), Loc(Info.getLocation()) {
+ : ID(Info.getID()), Level(Level)
+{
+ assert((Info.getLocation().isInvalid() || Info.hasSourceManager()) &&
+ "Valid source location without setting a source manager for diagnostic");
+ if (Info.getLocation().isValid())
+ Loc = FullSourceLoc(Info.getLocation(), Info.getSourceManager());
llvm::SmallString<64> Message;
Info.FormatDiagnostic(Message);
this->Message.assign(Message.begin(), Message.end());
@@ -1065,251 +670,6 @@ StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
StoredDiagnostic::~StoredDiagnostic() { }
-static void WriteUnsigned(llvm::raw_ostream &OS, unsigned Value) {
- OS.write((const char *)&Value, sizeof(unsigned));
-}
-
-static void WriteString(llvm::raw_ostream &OS, llvm::StringRef String) {
- WriteUnsigned(OS, String.size());
- OS.write(String.data(), String.size());
-}
-
-static void WriteSourceLocation(llvm::raw_ostream &OS,
- SourceManager *SM,
- SourceLocation Location) {
- if (!SM || Location.isInvalid()) {
- // If we don't have a source manager or this location is invalid,
- // just write an invalid location.
- WriteUnsigned(OS, 0);
- WriteUnsigned(OS, 0);
- WriteUnsigned(OS, 0);
- return;
- }
-
- Location = SM->getInstantiationLoc(Location);
- std::pair<FileID, unsigned> Decomposed = SM->getDecomposedLoc(Location);
-
- const FileEntry *FE = SM->getFileEntryForID(Decomposed.first);
- if (FE)
- WriteString(OS, FE->getName());
- else {
- // Fallback to using the buffer name when there is no entry.
- WriteString(OS, SM->getBuffer(Decomposed.first)->getBufferIdentifier());
- }
-
- WriteUnsigned(OS, SM->getLineNumber(Decomposed.first, Decomposed.second));
- WriteUnsigned(OS, SM->getColumnNumber(Decomposed.first, Decomposed.second));
-}
-
-void StoredDiagnostic::Serialize(llvm::raw_ostream &OS) const {
- SourceManager *SM = 0;
- if (getLocation().isValid())
- SM = &const_cast<SourceManager &>(getLocation().getManager());
-
- // Write a short header to help identify diagnostics.
- OS << (char)0x06 << (char)0x07;
-
- // Write the diagnostic level and location.
- WriteUnsigned(OS, (unsigned)Level);
- WriteSourceLocation(OS, SM, getLocation());
-
- // Write the diagnostic message.
- llvm::SmallString<64> Message;
- WriteString(OS, getMessage());
-
- // Count the number of ranges that don't point into macros, since
- // only simple file ranges serialize well.
- unsigned NumNonMacroRanges = 0;
- for (range_iterator R = range_begin(), REnd = range_end(); R != REnd; ++R) {
- if (R->getBegin().isMacroID() || R->getEnd().isMacroID())
- continue;
-
- ++NumNonMacroRanges;
- }
-
- // Write the ranges.
- WriteUnsigned(OS, NumNonMacroRanges);
- if (NumNonMacroRanges) {
- for (range_iterator R = range_begin(), REnd = range_end(); R != REnd; ++R) {
- if (R->getBegin().isMacroID() || R->getEnd().isMacroID())
- continue;
-
- WriteSourceLocation(OS, SM, R->getBegin());
- WriteSourceLocation(OS, SM, R->getEnd());
- WriteUnsigned(OS, R->isTokenRange());
- }
- }
-
- // Determine if all of the fix-its involve rewrites with simple file
- // locations (not in macro instantiations). If so, we can write
- // fix-it information.
- unsigned NumFixIts = 0;
- for (fixit_iterator F = fixit_begin(), FEnd = fixit_end(); F != FEnd; ++F) {
- if (F->RemoveRange.isValid() &&
- (F->RemoveRange.getBegin().isMacroID() ||
- F->RemoveRange.getEnd().isMacroID())) {
- NumFixIts = 0;
- break;
- }
-
- ++NumFixIts;
- }
-
- // Write the fix-its.
- WriteUnsigned(OS, NumFixIts);
- for (fixit_iterator F = fixit_begin(), FEnd = fixit_end(); F != FEnd; ++F) {
- WriteSourceLocation(OS, SM, F->RemoveRange.getBegin());
- WriteSourceLocation(OS, SM, F->RemoveRange.getEnd());
- WriteUnsigned(OS, F->RemoveRange.isTokenRange());
- WriteString(OS, F->CodeToInsert);
- }
-}
-
-static bool ReadUnsigned(const char *&Memory, const char *MemoryEnd,
- unsigned &Value) {
- if (Memory + sizeof(unsigned) > MemoryEnd)
- return true;
-
- memmove(&Value, Memory, sizeof(unsigned));
- Memory += sizeof(unsigned);
- return false;
-}
-
-static bool ReadSourceLocation(FileManager &FM, SourceManager &SM,
- const char *&Memory, const char *MemoryEnd,
- SourceLocation &Location) {
- // Read the filename.
- unsigned FileNameLen = 0;
- if (ReadUnsigned(Memory, MemoryEnd, FileNameLen) ||
- Memory + FileNameLen > MemoryEnd)
- return true;
-
- llvm::StringRef FileName(Memory, FileNameLen);
- Memory += FileNameLen;
-
- // Read the line, column.
- unsigned Line = 0, Column = 0;
- if (ReadUnsigned(Memory, MemoryEnd, Line) ||
- ReadUnsigned(Memory, MemoryEnd, Column))
- return true;
-
- if (FileName.empty()) {
- Location = SourceLocation();
- return false;
- }
-
- const FileEntry *File = FM.getFile(FileName);
- if (!File)
- return true;
-
- // Make sure that this file has an entry in the source manager.
- if (!SM.hasFileInfo(File))
- SM.createFileID(File, SourceLocation(), SrcMgr::C_User);
-
- Location = SM.getLocation(File, Line, Column);
- return false;
-}
-
-StoredDiagnostic
-StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
- const char *&Memory, const char *MemoryEnd) {
- while (true) {
- if (Memory == MemoryEnd)
- return StoredDiagnostic();
-
- if (*Memory != 0x06) {
- ++Memory;
- continue;
- }
-
- ++Memory;
- if (Memory == MemoryEnd)
- return StoredDiagnostic();
-
- if (*Memory != 0x07) {
- ++Memory;
- continue;
- }
-
- // We found the header. We're done.
- ++Memory;
- break;
- }
-
- // Read the severity level.
- unsigned Level = 0;
- if (ReadUnsigned(Memory, MemoryEnd, Level) || Level > Diagnostic::Fatal)
- return StoredDiagnostic();
-
- // Read the source location.
- SourceLocation Location;
- if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, Location))
- return StoredDiagnostic();
-
- // Read the diagnostic text.
- if (Memory == MemoryEnd)
- return StoredDiagnostic();
-
- unsigned MessageLen = 0;
- if (ReadUnsigned(Memory, MemoryEnd, MessageLen) ||
- Memory + MessageLen > MemoryEnd)
- return StoredDiagnostic();
-
- llvm::StringRef Message(Memory, MessageLen);
- Memory += MessageLen;
-
-
- // At this point, we have enough information to form a diagnostic. Do so.
- StoredDiagnostic Diag;
- Diag.Level = (Diagnostic::Level)Level;
- Diag.Loc = FullSourceLoc(Location, SM);
- Diag.Message = Message;
- if (Memory == MemoryEnd)
- return Diag;
-
- // Read the source ranges.
- unsigned NumSourceRanges = 0;
- if (ReadUnsigned(Memory, MemoryEnd, NumSourceRanges))
- return Diag;
- for (unsigned I = 0; I != NumSourceRanges; ++I) {
- SourceLocation Begin, End;
- unsigned IsTokenRange;
- if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, Begin) ||
- ReadSourceLocation(FM, SM, Memory, MemoryEnd, End) ||
- ReadUnsigned(Memory, MemoryEnd, IsTokenRange))
- return Diag;
-
- Diag.Ranges.push_back(CharSourceRange(SourceRange(Begin, End),
- IsTokenRange));
- }
-
- // Read the fix-it hints.
- unsigned NumFixIts = 0;
- if (ReadUnsigned(Memory, MemoryEnd, NumFixIts))
- return Diag;
- for (unsigned I = 0; I != NumFixIts; ++I) {
- SourceLocation RemoveBegin, RemoveEnd;
- unsigned InsertLen = 0, RemoveIsTokenRange;
- if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveBegin) ||
- ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveEnd) ||
- ReadUnsigned(Memory, MemoryEnd, RemoveIsTokenRange) ||
- ReadUnsigned(Memory, MemoryEnd, InsertLen) ||
- Memory + InsertLen > MemoryEnd) {
- Diag.FixIts.clear();
- return Diag;
- }
-
- FixItHint Hint;
- Hint.RemoveRange = CharSourceRange(SourceRange(RemoveBegin, RemoveEnd),
- RemoveIsTokenRange);
- Hint.CodeToInsert.assign(Memory, Memory + InsertLen);
- Memory += InsertLen;
- Diag.FixIts.push_back(Hint);
- }
-
- return Diag;
-}
-
/// IncludeInDiagnosticCounts - This method (whose default implementation
/// returns true) indicates whether the diagnostics handled by this
/// DiagnosticClient should be included in the number of diagnostics
diff --git a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
new file mode 100644
index 0000000..8725e7f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
@@ -0,0 +1,586 @@
+//===--- DiagnosticIDs.cpp - Diagnostic IDs Handling ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Diagnostic IDs-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/Analysis/AnalysisDiagnostic.h"
+#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+#include <map>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Builtin Diagnostic information
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+// Diagnostic classes.
+enum {
+ CLASS_NOTE = 0x01,
+ CLASS_WARNING = 0x02,
+ CLASS_EXTENSION = 0x03,
+ CLASS_ERROR = 0x04
+};
+
+struct StaticDiagInfoRec {
+ unsigned short DiagID;
+ unsigned Mapping : 3;
+ unsigned Class : 3;
+ unsigned SFINAE : 1;
+ unsigned AccessControl : 1;
+ unsigned Category : 5;
+
+ const char *Description;
+ const char *OptionGroup;
+
+ bool operator<(const StaticDiagInfoRec &RHS) const {
+ return DiagID < RHS.DiagID;
+ }
+};
+
+}
+
+static const StaticDiagInfoRec StaticDiagInfo[] = {
+#define DIAG(ENUM,CLASS,DEFAULT_MAPPING,DESC,GROUP,SFINAE,ACCESS,CATEGORY) \
+ { diag::ENUM, DEFAULT_MAPPING, CLASS, SFINAE, ACCESS, CATEGORY, DESC, GROUP },
+#include "clang/Basic/DiagnosticCommonKinds.inc"
+#include "clang/Basic/DiagnosticDriverKinds.inc"
+#include "clang/Basic/DiagnosticFrontendKinds.inc"
+#include "clang/Basic/DiagnosticLexKinds.inc"
+#include "clang/Basic/DiagnosticParseKinds.inc"
+#include "clang/Basic/DiagnosticASTKinds.inc"
+#include "clang/Basic/DiagnosticSemaKinds.inc"
+#include "clang/Basic/DiagnosticAnalysisKinds.inc"
+ { 0, 0, 0, 0, 0, 0, 0, 0}
+};
+#undef DIAG
+
+/// GetDiagInfo - Return the StaticDiagInfoRec entry for the specified DiagID,
+/// or null if the ID is invalid.
+static const StaticDiagInfoRec *GetDiagInfo(unsigned DiagID) {
+ unsigned NumDiagEntries = sizeof(StaticDiagInfo)/sizeof(StaticDiagInfo[0])-1;
+
+ // If assertions are enabled, verify that the StaticDiagInfo array is sorted.
+#ifndef NDEBUG
+ static bool IsFirst = true;
+ if (IsFirst) {
+ for (unsigned i = 1; i != NumDiagEntries; ++i) {
+ assert(StaticDiagInfo[i-1].DiagID != StaticDiagInfo[i].DiagID &&
+ "Diag ID conflict, the enums at the start of clang::diag (in "
+ "DiagnosticIDs.h) probably need to be increased");
+
+ assert(StaticDiagInfo[i-1] < StaticDiagInfo[i] &&
+ "Improperly sorted diag info");
+ }
+ IsFirst = false;
+ }
+#endif
+
+ // Search the diagnostic table with a binary search.
+ StaticDiagInfoRec Find = { DiagID, 0, 0, 0, 0, 0, 0, 0 };
+
+ const StaticDiagInfoRec *Found =
+ std::lower_bound(StaticDiagInfo, StaticDiagInfo + NumDiagEntries, Find);
+ if (Found == StaticDiagInfo + NumDiagEntries ||
+ Found->DiagID != DiagID)
+ return 0;
+
+ return Found;
+}
+
+static unsigned GetDefaultDiagMapping(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Mapping;
+ return diag::MAP_FATAL;
+}
+
+/// getWarningOptionForDiag - Return the lowest-level warning option that
+/// enables the specified diagnostic. If there is no -Wfoo flag that controls
+/// the diagnostic, this returns null.
+const char *DiagnosticIDs::getWarningOptionForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->OptionGroup;
+ return 0;
+}
+
+/// getWarningOptionForDiag - Return the category number that a specified
+/// DiagID belongs to, or 0 if no category.
+unsigned DiagnosticIDs::getCategoryNumberForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Category;
+ return 0;
+}
+
+/// getCategoryNameFromID - Given a category ID, return the name of the
+/// category, an empty string if CategoryID is zero, or null if CategoryID is
+/// invalid.
+const char *DiagnosticIDs::getCategoryNameFromID(unsigned CategoryID) {
+ // Second the table of options, sorted by name for fast binary lookup.
+ static const char *CategoryNameTable[] = {
+#define GET_CATEGORY_TABLE
+#define CATEGORY(X) X,
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_CATEGORY_TABLE
+ "<<END>>"
+ };
+ static const size_t CategoryNameTableSize =
+ sizeof(CategoryNameTable) / sizeof(CategoryNameTable[0])-1;
+
+ if (CategoryID >= CategoryNameTableSize) return 0;
+ return CategoryNameTable[CategoryID];
+}
+
+
+
+DiagnosticIDs::SFINAEResponse
+DiagnosticIDs::getDiagnosticSFINAEResponse(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID)) {
+ if (Info->AccessControl)
+ return SFINAE_AccessControl;
+
+ if (!Info->SFINAE)
+ return SFINAE_Report;
+
+ if (Info->Class == CLASS_ERROR)
+ return SFINAE_SubstitutionFailure;
+
+ // Suppress notes, warnings, and extensions;
+ return SFINAE_Suppress;
+ }
+
+ return SFINAE_Report;
+}
+
+/// getDiagClass - Return the class field of the diagnostic.
+///
+static unsigned getBuiltinDiagClass(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Class;
+ return ~0U;
+}
+
+//===----------------------------------------------------------------------===//
+// Custom Diagnostic information
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ namespace diag {
+ class CustomDiagInfo {
+ typedef std::pair<DiagnosticIDs::Level, std::string> DiagDesc;
+ std::vector<DiagDesc> DiagInfo;
+ std::map<DiagDesc, unsigned> DiagIDs;
+ public:
+
+ /// getDescription - Return the description of the specified custom
+ /// diagnostic.
+ const char *getDescription(unsigned DiagID) const {
+ assert(this && DiagID-DIAG_UPPER_LIMIT < DiagInfo.size() &&
+ "Invalid diagnosic ID");
+ return DiagInfo[DiagID-DIAG_UPPER_LIMIT].second.c_str();
+ }
+
+ /// getLevel - Return the level of the specified custom diagnostic.
+ DiagnosticIDs::Level getLevel(unsigned DiagID) const {
+ assert(this && DiagID-DIAG_UPPER_LIMIT < DiagInfo.size() &&
+ "Invalid diagnosic ID");
+ return DiagInfo[DiagID-DIAG_UPPER_LIMIT].first;
+ }
+
+ unsigned getOrCreateDiagID(DiagnosticIDs::Level L, llvm::StringRef Message,
+ DiagnosticIDs &Diags) {
+ DiagDesc D(L, Message);
+ // Check to see if it already exists.
+ std::map<DiagDesc, unsigned>::iterator I = DiagIDs.lower_bound(D);
+ if (I != DiagIDs.end() && I->first == D)
+ return I->second;
+
+ // If not, assign a new ID.
+ unsigned ID = DiagInfo.size()+DIAG_UPPER_LIMIT;
+ DiagIDs.insert(std::make_pair(D, ID));
+ DiagInfo.push_back(D);
+ return ID;
+ }
+ };
+
+ } // end diag namespace
+} // end clang namespace
+
+
+//===----------------------------------------------------------------------===//
+// Common Diagnostic implementation
+//===----------------------------------------------------------------------===//
+
+DiagnosticIDs::DiagnosticIDs() {
+ CustomDiagInfo = 0;
+}
+
+DiagnosticIDs::~DiagnosticIDs() {
+ delete CustomDiagInfo;
+}
+
+/// getCustomDiagID - Return an ID for a diagnostic with the specified message
+/// and level. If this is the first request for this diagnosic, it is
+/// registered and created, otherwise the existing ID is returned.
+unsigned DiagnosticIDs::getCustomDiagID(Level L, llvm::StringRef Message) {
+ if (CustomDiagInfo == 0)
+ CustomDiagInfo = new diag::CustomDiagInfo();
+ return CustomDiagInfo->getOrCreateDiagID(L, Message, *this);
+}
+
+
+/// isBuiltinWarningOrExtension - Return true if the unmapped diagnostic
+/// level of the specified diagnostic ID is a Warning or Extension.
+/// This only works on builtin diagnostics, not custom ones, and is not legal to
+/// call on NOTEs.
+bool DiagnosticIDs::isBuiltinWarningOrExtension(unsigned DiagID) {
+ return DiagID < diag::DIAG_UPPER_LIMIT &&
+ getBuiltinDiagClass(DiagID) != CLASS_ERROR;
+}
+
+/// \brief Determine whether the given built-in diagnostic ID is a
+/// Note.
+bool DiagnosticIDs::isBuiltinNote(unsigned DiagID) {
+ return DiagID < diag::DIAG_UPPER_LIMIT &&
+ getBuiltinDiagClass(DiagID) == CLASS_NOTE;
+}
+
+/// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
+/// ID is for an extension of some sort. This also returns EnabledByDefault,
+/// which is set to indicate whether the diagnostic is ignored by default (in
+/// which case -pedantic enables it) or treated as a warning/error by default.
+///
+bool DiagnosticIDs::isBuiltinExtensionDiag(unsigned DiagID,
+ bool &EnabledByDefault) {
+ if (DiagID >= diag::DIAG_UPPER_LIMIT ||
+ getBuiltinDiagClass(DiagID) != CLASS_EXTENSION)
+ return false;
+
+ EnabledByDefault = GetDefaultDiagMapping(DiagID) != diag::MAP_IGNORE;
+ return true;
+}
+
+/// getDescription - Given a diagnostic ID, return a description of the
+/// issue.
+const char *DiagnosticIDs::getDescription(unsigned DiagID) const {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Description;
+ return CustomDiagInfo->getDescription(DiagID);
+}
+
+/// getDiagnosticLevel - Based on the way the client configured the Diagnostic
+/// object, classify the specified diagnostic ID into a Level, consumable by
+/// the DiagnosticClient.
+DiagnosticIDs::Level
+DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, SourceLocation Loc,
+ const Diagnostic &Diag) const {
+ // Handle custom diagnostics, which cannot be mapped.
+ if (DiagID >= diag::DIAG_UPPER_LIMIT)
+ return CustomDiagInfo->getLevel(DiagID);
+
+ unsigned DiagClass = getBuiltinDiagClass(DiagID);
+ assert(DiagClass != CLASS_NOTE && "Cannot get diagnostic level of a note!");
+ return getDiagnosticLevel(DiagID, DiagClass, Loc, Diag);
+}
+
+/// \brief Based on the way the client configured the Diagnostic
+/// object, classify the specified diagnostic ID into a Level, consumable by
+/// the DiagnosticClient.
+///
+/// \param Loc The source location we are interested in finding out the
+/// diagnostic state. Can be null in order to query the latest state.
+DiagnosticIDs::Level
+DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, unsigned DiagClass,
+ SourceLocation Loc,
+ const Diagnostic &Diag) const {
+ // Specific non-error diagnostics may be mapped to various levels from ignored
+ // to error. Errors can only be mapped to fatal.
+ DiagnosticIDs::Level Result = DiagnosticIDs::Fatal;
+
+ Diagnostic::DiagStatePointsTy::iterator
+ Pos = Diag.GetDiagStatePointForLoc(Loc);
+ Diagnostic::DiagState *State = Pos->State;
+
+ // Get the mapping information, if unset, compute it lazily.
+ unsigned MappingInfo = Diag.getDiagnosticMappingInfo((diag::kind)DiagID,
+ State);
+ if (MappingInfo == 0) {
+ MappingInfo = GetDefaultDiagMapping(DiagID);
+ Diag.setDiagnosticMappingInternal(DiagID, MappingInfo, State, false, false);
+ }
+
+ switch (MappingInfo & 7) {
+ default: assert(0 && "Unknown mapping!");
+ case diag::MAP_IGNORE:
+ // Ignore this, unless this is an extension diagnostic and we're mapping
+ // them onto warnings or errors.
+ if (!isBuiltinExtensionDiag(DiagID) || // Not an extension
+ Diag.ExtBehavior == Diagnostic::Ext_Ignore || // Ext ignored
+ (MappingInfo & 8) != 0) // User explicitly mapped it.
+ return DiagnosticIDs::Ignored;
+ Result = DiagnosticIDs::Warning;
+ if (Diag.ExtBehavior == Diagnostic::Ext_Error) Result = DiagnosticIDs::Error;
+ if (Result == DiagnosticIDs::Error && Diag.ErrorsAsFatal)
+ Result = DiagnosticIDs::Fatal;
+ break;
+ case diag::MAP_ERROR:
+ Result = DiagnosticIDs::Error;
+ if (Diag.ErrorsAsFatal)
+ Result = DiagnosticIDs::Fatal;
+ break;
+ case diag::MAP_FATAL:
+ Result = DiagnosticIDs::Fatal;
+ break;
+ case diag::MAP_WARNING:
+ // If warnings are globally mapped to ignore or error, do it.
+ if (Diag.IgnoreAllWarnings)
+ return DiagnosticIDs::Ignored;
+
+ Result = DiagnosticIDs::Warning;
+
+ // If this is an extension diagnostic and we're in -pedantic-error mode, and
+ // if the user didn't explicitly map it, upgrade to an error.
+ if (Diag.ExtBehavior == Diagnostic::Ext_Error &&
+ (MappingInfo & 8) == 0 &&
+ isBuiltinExtensionDiag(DiagID))
+ Result = DiagnosticIDs::Error;
+
+ if (Diag.WarningsAsErrors)
+ Result = DiagnosticIDs::Error;
+ if (Result == DiagnosticIDs::Error && Diag.ErrorsAsFatal)
+ Result = DiagnosticIDs::Fatal;
+ break;
+
+ case diag::MAP_WARNING_NO_WERROR:
+ // Diagnostics specified with -Wno-error=foo should be set to warnings, but
+ // not be adjusted by -Werror or -pedantic-errors.
+ Result = DiagnosticIDs::Warning;
+
+ // If warnings are globally mapped to ignore or error, do it.
+ if (Diag.IgnoreAllWarnings)
+ return DiagnosticIDs::Ignored;
+
+ break;
+
+ case diag::MAP_ERROR_NO_WFATAL:
+ // Diagnostics specified as -Wno-fatal-error=foo should be errors, but
+ // unaffected by -Wfatal-errors.
+ Result = DiagnosticIDs::Error;
+ break;
+ }
+
+ // Okay, we're about to return this as a "diagnostic to emit" one last check:
+ // if this is any sort of extension warning, and if we're in an __extension__
+ // block, silence it.
+ if (Diag.AllExtensionsSilenced && isBuiltinExtensionDiag(DiagID))
+ return DiagnosticIDs::Ignored;
+
+ return Result;
+}
+
+struct WarningOption {
+ const char *Name;
+ const short *Members;
+ const short *SubGroups;
+};
+
+#define GET_DIAG_ARRAYS
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_DIAG_ARRAYS
+
+// Second the table of options, sorted by name for fast binary lookup.
+static const WarningOption OptionTable[] = {
+#define GET_DIAG_TABLE
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_DIAG_TABLE
+};
+static const size_t OptionTableSize =
+sizeof(OptionTable) / sizeof(OptionTable[0]);
+
+static bool WarningOptionCompare(const WarningOption &LHS,
+ const WarningOption &RHS) {
+ return strcmp(LHS.Name, RHS.Name) < 0;
+}
+
+static void MapGroupMembers(const WarningOption *Group, diag::Mapping Mapping,
+ SourceLocation Loc, Diagnostic &Diag) {
+ // Option exists, poke all the members of its diagnostic set.
+ if (const short *Member = Group->Members) {
+ for (; *Member != -1; ++Member)
+ Diag.setDiagnosticMapping(*Member, Mapping, Loc);
+ }
+
+ // Enable/disable all subgroups along with this one.
+ if (const short *SubGroups = Group->SubGroups) {
+ for (; *SubGroups != (short)-1; ++SubGroups)
+ MapGroupMembers(&OptionTable[(short)*SubGroups], Mapping, Loc, Diag);
+ }
+}
+
+/// setDiagnosticGroupMapping - Change an entire diagnostic group (e.g.
+/// "unknown-pragmas" to have the specified mapping. This returns true and
+/// ignores the request if "Group" was unknown, false otherwise.
+bool DiagnosticIDs::setDiagnosticGroupMapping(const char *Group,
+ diag::Mapping Map,
+ SourceLocation Loc,
+ Diagnostic &Diag) const {
+ assert((Loc.isValid() ||
+ Diag.DiagStatePoints.empty() ||
+ Diag.DiagStatePoints.back().Loc.isInvalid()) &&
+ "Loc should be invalid only when the mapping comes from command-line");
+ assert((Loc.isInvalid() || Diag.DiagStatePoints.empty() ||
+ Diag.DiagStatePoints.back().Loc.isInvalid() ||
+ !Diag.SourceMgr->isBeforeInTranslationUnit(Loc,
+ Diag.DiagStatePoints.back().Loc)) &&
+ "Source location of new mapping is before the previous one!");
+
+ WarningOption Key = { Group, 0, 0 };
+ const WarningOption *Found =
+ std::lower_bound(OptionTable, OptionTable + OptionTableSize, Key,
+ WarningOptionCompare);
+ if (Found == OptionTable + OptionTableSize ||
+ strcmp(Found->Name, Group) != 0)
+ return true; // Option not found.
+
+ MapGroupMembers(Found, Map, Loc, Diag);
+ return false;
+}
+
+/// ProcessDiag - This is the method used to report a diagnostic that is
+/// finally fully formed.
+bool DiagnosticIDs::ProcessDiag(Diagnostic &Diag) const {
+ DiagnosticInfo Info(&Diag);
+
+ if (Diag.SuppressAllDiagnostics)
+ return false;
+
+ assert(Diag.getClient() && "DiagnosticClient not set!");
+
+ // Figure out the diagnostic level of this message.
+ DiagnosticIDs::Level DiagLevel;
+ unsigned DiagID = Info.getID();
+
+ // ShouldEmitInSystemHeader - True if this diagnostic should be produced even
+ // in a system header.
+ bool ShouldEmitInSystemHeader;
+
+ if (DiagID >= diag::DIAG_UPPER_LIMIT) {
+ // Handle custom diagnostics, which cannot be mapped.
+ DiagLevel = CustomDiagInfo->getLevel(DiagID);
+
+ // Custom diagnostics always are emitted in system headers.
+ ShouldEmitInSystemHeader = true;
+ } else {
+ // Get the class of the diagnostic. If this is a NOTE, map it onto whatever
+ // the diagnostic level was for the previous diagnostic so that it is
+ // filtered the same as the previous diagnostic.
+ unsigned DiagClass = getBuiltinDiagClass(DiagID);
+ if (DiagClass == CLASS_NOTE) {
+ DiagLevel = DiagnosticIDs::Note;
+ ShouldEmitInSystemHeader = false; // extra consideration is needed
+ } else {
+ // If this is not an error and we are in a system header, we ignore it.
+ // Check the original Diag ID here, because we also want to ignore
+ // extensions and warnings in -Werror and -pedantic-errors modes, which
+ // *map* warnings/extensions to errors.
+ ShouldEmitInSystemHeader = DiagClass == CLASS_ERROR;
+
+ DiagLevel = getDiagnosticLevel(DiagID, DiagClass, Info.getLocation(),
+ Diag);
+ }
+ }
+
+ if (DiagLevel != DiagnosticIDs::Note) {
+ // Record that a fatal error occurred only when we see a second
+ // non-note diagnostic. This allows notes to be attached to the
+ // fatal error, but suppresses any diagnostics that follow those
+ // notes.
+ if (Diag.LastDiagLevel == DiagnosticIDs::Fatal)
+ Diag.FatalErrorOccurred = true;
+
+ Diag.LastDiagLevel = DiagLevel;
+ }
+
+ // If a fatal error has already been emitted, silence all subsequent
+ // diagnostics.
+ if (Diag.FatalErrorOccurred) {
+ if (DiagLevel >= DiagnosticIDs::Error &&
+ Diag.Client->IncludeInDiagnosticCounts()) {
+ ++Diag.NumErrors;
+ ++Diag.NumErrorsSuppressed;
+ }
+
+ return false;
+ }
+
+ // If the client doesn't care about this message, don't issue it. If this is
+ // a note and the last real diagnostic was ignored, ignore it too.
+ if (DiagLevel == DiagnosticIDs::Ignored ||
+ (DiagLevel == DiagnosticIDs::Note &&
+ Diag.LastDiagLevel == DiagnosticIDs::Ignored))
+ return false;
+
+ // If this diagnostic is in a system header and is not a clang error, suppress
+ // it.
+ if (Diag.SuppressSystemWarnings && !ShouldEmitInSystemHeader &&
+ Info.getLocation().isValid() &&
+ Diag.getSourceManager().isInSystemHeader(
+ Diag.getSourceManager().getInstantiationLoc(Info.getLocation())) &&
+ (DiagLevel != DiagnosticIDs::Note ||
+ Diag.LastDiagLevel == DiagnosticIDs::Ignored)) {
+ Diag.LastDiagLevel = DiagnosticIDs::Ignored;
+ return false;
+ }
+
+ if (DiagLevel >= DiagnosticIDs::Error) {
+ if (Diag.Client->IncludeInDiagnosticCounts()) {
+ Diag.ErrorOccurred = true;
+ ++Diag.NumErrors;
+ }
+
+ // If we've emitted a lot of errors, emit a fatal error after it to stop a
+ // flood of bogus errors.
+ if (Diag.ErrorLimit && Diag.NumErrors >= Diag.ErrorLimit &&
+ DiagLevel == DiagnosticIDs::Error)
+ Diag.SetDelayedDiagnostic(diag::fatal_too_many_errors);
+ }
+
+ // If we have any Fix-Its, make sure that all of the Fix-Its point into
+ // source locations that aren't macro instantiations. If any point into
+ // macro instantiations, remove all of the Fix-Its.
+ for (unsigned I = 0, N = Diag.NumFixItHints; I != N; ++I) {
+ const FixItHint &FixIt = Diag.FixItHints[I];
+ if (FixIt.RemoveRange.isInvalid() ||
+ FixIt.RemoveRange.getBegin().isMacroID() ||
+ FixIt.RemoveRange.getEnd().isMacroID()) {
+ Diag.NumFixItHints = 0;
+ break;
+ }
+ }
+
+ // Finally, report it.
+ Diag.Client->HandleDiagnostic((Diagnostic::Level)DiagLevel, Info);
+ if (Diag.Client->IncludeInDiagnosticCounts()) {
+ if (DiagLevel == DiagnosticIDs::Warning)
+ ++Diag.NumWarnings;
+ }
+
+ Diag.CurDiagID = ~0U;
+
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
index 565f8a6..342413d 100644
--- a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
@@ -1,4 +1,4 @@
-///===--- FileManager.cpp - File System Probing and Caching ----------------===//
+//===--- FileManager.cpp - File System Probing and Caching ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,35 +18,52 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
#include "llvm/Config/config.h"
#include <map>
#include <set>
#include <string>
+
+// FIXME: This is terrible, we need this for ::close.
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#include <sys/uio.h>
+#else
+#include <io.h>
+#endif
using namespace clang;
// FIXME: Enhance libsystem to support inode and other fields.
#include <sys/stat.h>
-#if defined(_MSC_VER)
-#define S_ISDIR(s) (_S_IFDIR & s)
-#endif
-
/// NON_EXISTENT_DIR - A special value distinct from null that is used to
/// represent a dir name that doesn't exist on the disk.
#define NON_EXISTENT_DIR reinterpret_cast<DirectoryEntry*>((intptr_t)-1)
+/// NON_EXISTENT_FILE - A special value distinct from null that is used to
+/// represent a filename that doesn't exist on the disk.
+#define NON_EXISTENT_FILE reinterpret_cast<FileEntry*>((intptr_t)-1)
+
+
+FileEntry::~FileEntry() {
+ // If this FileEntry owns an open file descriptor that never got used, close
+ // it.
+ if (FD != -1) ::close(FD);
+}
+
//===----------------------------------------------------------------------===//
// Windows.
//===----------------------------------------------------------------------===//
#ifdef LLVM_ON_WIN32
-#define IS_DIR_SEPARATOR_CHAR(x) ((x) == '/' || (x) == '\\')
-
namespace {
static std::string GetFullPath(const char *relPath) {
char *absPathStrPtr = _fullpath(NULL, relPath, 0);
@@ -65,15 +82,16 @@ class FileManager::UniqueDirContainer {
llvm::StringMap<DirectoryEntry> UniqueDirs;
public:
- DirectoryEntry &getDirectory(const char *Name, struct stat &StatBuf) {
+ /// getDirectory - Return an existing DirectoryEntry with the given
+ /// name if there is already one; otherwise create and return a
+ /// default-constructed DirectoryEntry.
+ DirectoryEntry &getDirectory(const char *Name,
+ const struct stat & /*StatBuf*/) {
std::string FullPath(GetFullPath(Name));
- return UniqueDirs.GetOrCreateValue(
- FullPath.c_str(),
- FullPath.c_str() + FullPath.size()
- ).getValue();
+ return UniqueDirs.GetOrCreateValue(FullPath).getValue();
}
- size_t size() { return UniqueDirs.size(); }
+ size_t size() const { return UniqueDirs.size(); }
};
class FileManager::UniqueFileContainer {
@@ -82,18 +100,18 @@ class FileManager::UniqueFileContainer {
llvm::StringMap<FileEntry, llvm::BumpPtrAllocator> UniqueFiles;
public:
- FileEntry &getFile(const char *Name, struct stat &StatBuf) {
+ /// getFile - Return an existing FileEntry with the given name if
+ /// there is already one; otherwise create and return a
+ /// default-constructed FileEntry.
+ FileEntry &getFile(const char *Name, const struct stat & /*StatBuf*/) {
std::string FullPath(GetFullPath(Name));
-
+
// LowercaseString because Windows filesystem is case insensitive.
FullPath = llvm::LowercaseString(FullPath);
- return UniqueFiles.GetOrCreateValue(
- FullPath.c_str(),
- FullPath.c_str() + FullPath.size()
- ).getValue();
+ return UniqueFiles.GetOrCreateValue(FullPath).getValue();
}
- size_t size() { return UniqueFiles.size(); }
+ size_t size() const { return UniqueFiles.size(); }
};
//===----------------------------------------------------------------------===//
@@ -102,28 +120,31 @@ public:
#else
-#define IS_DIR_SEPARATOR_CHAR(x) ((x) == '/')
-
class FileManager::UniqueDirContainer {
/// UniqueDirs - Cache from ID's to existing directories/files.
- ///
std::map<std::pair<dev_t, ino_t>, DirectoryEntry> UniqueDirs;
public:
- DirectoryEntry &getDirectory(const char *Name, struct stat &StatBuf) {
+ /// getDirectory - Return an existing DirectoryEntry with the given
+ /// ID's if there is already one; otherwise create and return a
+ /// default-constructed DirectoryEntry.
+ DirectoryEntry &getDirectory(const char * /*Name*/,
+ const struct stat &StatBuf) {
return UniqueDirs[std::make_pair(StatBuf.st_dev, StatBuf.st_ino)];
}
- size_t size() { return UniqueDirs.size(); }
+ size_t size() const { return UniqueDirs.size(); }
};
class FileManager::UniqueFileContainer {
/// UniqueFiles - Cache from ID's to existing directories/files.
- ///
std::set<FileEntry> UniqueFiles;
public:
- FileEntry &getFile(const char *Name, struct stat &StatBuf) {
+ /// getFile - Return an existing FileEntry with the given ID's if
+ /// there is already one; otherwise create and return a
+ /// default-constructed FileEntry.
+ FileEntry &getFile(const char * /*Name*/, const struct stat &StatBuf) {
return
const_cast<FileEntry&>(
*UniqueFiles.insert(FileEntry(StatBuf.st_dev,
@@ -131,7 +152,7 @@ public:
StatBuf.st_mode)).first);
}
- size_t size() { return UniqueFiles.size(); }
+ size_t size() const { return UniqueFiles.size(); }
};
#endif
@@ -140,26 +161,26 @@ public:
// Common logic.
//===----------------------------------------------------------------------===//
-FileManager::FileManager()
- : UniqueDirs(*new UniqueDirContainer),
- UniqueFiles(*new UniqueFileContainer),
- DirEntries(64), FileEntries(64), NextFileUID(0) {
+FileManager::FileManager(const FileSystemOptions &FSO)
+ : FileSystemOpts(FSO),
+ UniqueRealDirs(*new UniqueDirContainer()),
+ UniqueRealFiles(*new UniqueFileContainer()),
+ SeenDirEntries(64), SeenFileEntries(64), NextFileUID(0) {
NumDirLookups = NumFileLookups = 0;
NumDirCacheMisses = NumFileCacheMisses = 0;
}
FileManager::~FileManager() {
- delete &UniqueDirs;
- delete &UniqueFiles;
- for (llvm::SmallVectorImpl<FileEntry *>::iterator
- V = VirtualFileEntries.begin(),
- VEnd = VirtualFileEntries.end();
- V != VEnd;
- ++V)
- delete *V;
+ delete &UniqueRealDirs;
+ delete &UniqueRealFiles;
+ for (unsigned i = 0, e = VirtualFileEntries.size(); i != e; ++i)
+ delete VirtualFileEntries[i];
+ for (unsigned i = 0, e = VirtualDirectoryEntries.size(); i != e; ++i)
+ delete VirtualDirectoryEntries[i];
}
-void FileManager::addStatCache(StatSysCallCache *statCache, bool AtBeginning) {
+void FileManager::addStatCache(FileSystemStatCache *statCache,
+ bool AtBeginning) {
assert(statCache && "No stat cache provided?");
if (AtBeginning || StatCache.get() == 0) {
statCache->setNextStatCache(StatCache.take());
@@ -167,14 +188,14 @@ void FileManager::addStatCache(StatSysCallCache *statCache, bool AtBeginning) {
return;
}
- StatSysCallCache *LastCache = StatCache.get();
+ FileSystemStatCache *LastCache = StatCache.get();
while (LastCache->getNextStatCache())
LastCache = LastCache->getNextStatCache();
LastCache->setNextStatCache(statCache);
}
-void FileManager::removeStatCache(StatSysCallCache *statCache) {
+void FileManager::removeStatCache(FileSystemStatCache *statCache) {
if (!statCache)
return;
@@ -185,54 +206,74 @@ void FileManager::removeStatCache(StatSysCallCache *statCache) {
}
// Find the stat cache in the list.
- StatSysCallCache *PrevCache = StatCache.get();
+ FileSystemStatCache *PrevCache = StatCache.get();
while (PrevCache && PrevCache->getNextStatCache() != statCache)
PrevCache = PrevCache->getNextStatCache();
- if (PrevCache)
- PrevCache->setNextStatCache(statCache->getNextStatCache());
- else
- assert(false && "Stat cache not found for removal");
+
+ assert(PrevCache && "Stat cache not found for removal");
+ PrevCache->setNextStatCache(statCache->getNextStatCache());
}
/// \brief Retrieve the directory that the given file name resides in.
+/// Filename can point to either a real file or a virtual file.
static const DirectoryEntry *getDirectoryFromFile(FileManager &FileMgr,
- const char *NameStart,
- const char *NameEnd) {
- // Figure out what directory it is in. If the string contains a / in it,
- // strip off everything after it.
- // FIXME: this logic should be in sys::Path.
- const char *SlashPos = NameEnd-1;
- while (SlashPos >= NameStart && !IS_DIR_SEPARATOR_CHAR(SlashPos[0]))
- --SlashPos;
- // Ignore duplicate //'s.
- while (SlashPos > NameStart && IS_DIR_SEPARATOR_CHAR(SlashPos[-1]))
- --SlashPos;
-
- if (SlashPos < NameStart) {
- // Use the current directory if file has no path component.
- const char *Name = ".";
- return FileMgr.getDirectory(Name, Name+1);
- } else if (SlashPos == NameEnd-1)
- return 0; // If filename ends with a /, it's a directory.
- else
- return FileMgr.getDirectory(NameStart, SlashPos);
+ llvm::StringRef Filename) {
+ if (Filename.empty())
+ return NULL;
+
+ if (llvm::sys::path::is_separator(Filename[Filename.size() - 1]))
+ return NULL; // If Filename is a directory.
+
+ llvm::StringRef DirName = llvm::sys::path::parent_path(Filename);
+ // Use the current directory if file has no path component.
+ if (DirName.empty())
+ DirName = ".";
+
+ return FileMgr.getDirectory(DirName);
}
-/// getDirectory - Lookup, cache, and verify the specified directory. This
-/// returns null if the directory doesn't exist.
+/// Add all ancestors of the given path (pointing to either a file or
+/// a directory) as virtual directories.
+void FileManager::addAncestorsAsVirtualDirs(llvm::StringRef Path) {
+ llvm::StringRef DirName = llvm::sys::path::parent_path(Path);
+ if (DirName.empty())
+ return;
+
+ llvm::StringMapEntry<DirectoryEntry *> &NamedDirEnt =
+ SeenDirEntries.GetOrCreateValue(DirName);
+
+ // When caching a virtual directory, we always cache its ancestors
+ // at the same time. Therefore, if DirName is already in the cache,
+ // we don't need to recurse as its ancestors must also already be in
+ // the cache.
+ if (NamedDirEnt.getValue())
+ return;
+
+ // Add the virtual directory to the cache.
+ DirectoryEntry *UDE = new DirectoryEntry;
+ UDE->Name = NamedDirEnt.getKeyData();
+ NamedDirEnt.setValue(UDE);
+ VirtualDirectoryEntries.push_back(UDE);
+
+ // Recursively add the other ancestors.
+ addAncestorsAsVirtualDirs(DirName);
+}
+
+/// getDirectory - Lookup, cache, and verify the specified directory
+/// (real or virtual). This returns NULL if the directory doesn't
+/// exist.
///
-const DirectoryEntry *FileManager::getDirectory(const char *NameStart,
- const char *NameEnd) {
+const DirectoryEntry *FileManager::getDirectory(llvm::StringRef DirName) {
// stat doesn't like trailing separators (at least on Windows).
- if (((NameEnd - NameStart) > 1) &&
- ((*(NameEnd - 1) == '/') || (*(NameEnd - 1) == '\\')))
- NameEnd--;
+ if (DirName.size() > 1 && llvm::sys::path::is_separator(DirName.back()))
+ DirName = DirName.substr(0, DirName.size()-1);
++NumDirLookups;
llvm::StringMapEntry<DirectoryEntry *> &NamedDirEnt =
- DirEntries.GetOrCreateValue(NameStart, NameEnd);
+ SeenDirEntries.GetOrCreateValue(DirName);
- // See if there is already an entry in the map.
+ // See if there was already an entry in the map. Note that the map
+ // contains both virtual and real directories.
if (NamedDirEnt.getValue())
return NamedDirEnt.getValue() == NON_EXISTENT_DIR
? 0 : NamedDirEnt.getValue();
@@ -243,43 +284,41 @@ const DirectoryEntry *FileManager::getDirectory(const char *NameStart,
NamedDirEnt.setValue(NON_EXISTENT_DIR);
// Get the null-terminated directory name as stored as the key of the
- // DirEntries map.
+ // SeenDirEntries map.
const char *InterndDirName = NamedDirEnt.getKeyData();
// Check to see if the directory exists.
struct stat StatBuf;
- if (stat_cached(InterndDirName, &StatBuf) || // Error stat'ing.
- !S_ISDIR(StatBuf.st_mode)) // Not a directory?
+ if (getStatValue(InterndDirName, StatBuf, 0/*directory lookup*/)) {
+ // There's no real directory at the given path.
return 0;
+ }
- // It exists. See if we have already opened a directory with the same inode.
- // This occurs when one dir is symlinked to another, for example.
- DirectoryEntry &UDE = UniqueDirs.getDirectory(InterndDirName, StatBuf);
+ // It exists. See if we have already opened a directory with the
+ // same inode (this occurs on Unix-like systems when one dir is
+ // symlinked to another, for example) or the same path (on
+ // Windows).
+ DirectoryEntry &UDE = UniqueRealDirs.getDirectory(InterndDirName, StatBuf);
NamedDirEnt.setValue(&UDE);
- if (UDE.getName()) // Already have an entry with this inode, return it.
- return &UDE;
+ if (!UDE.getName()) {
+ // We don't have this directory yet, add it. We use the string
+ // key from the SeenDirEntries map as the string.
+ UDE.Name = InterndDirName;
+ }
- // Otherwise, we don't have this directory yet, add it. We use the string
- // key from the DirEntries map as the string.
- UDE.Name = InterndDirName;
return &UDE;
}
-/// NON_EXISTENT_FILE - A special value distinct from null that is used to
-/// represent a filename that doesn't exist on the disk.
-#define NON_EXISTENT_FILE reinterpret_cast<FileEntry*>((intptr_t)-1)
-
-/// getFile - Lookup, cache, and verify the specified file. This returns null
-/// if the file doesn't exist.
+/// getFile - Lookup, cache, and verify the specified file (real or
+/// virtual). This returns NULL if the file doesn't exist.
///
-const FileEntry *FileManager::getFile(const char *NameStart,
- const char *NameEnd) {
+const FileEntry *FileManager::getFile(llvm::StringRef Filename) {
++NumFileLookups;
// See if there is already an entry in the map.
llvm::StringMapEntry<FileEntry *> &NamedFileEnt =
- FileEntries.GetOrCreateValue(NameStart, NameEnd);
+ SeenFileEntries.GetOrCreateValue(Filename);
// See if there is already an entry in the map.
if (NamedFileEnt.getValue())
@@ -291,13 +330,16 @@ const FileEntry *FileManager::getFile(const char *NameStart,
// By default, initialize it to invalid.
NamedFileEnt.setValue(NON_EXISTENT_FILE);
-
// Get the null-terminated file name as stored as the key of the
- // FileEntries map.
+ // SeenFileEntries map.
const char *InterndFileName = NamedFileEnt.getKeyData();
- const DirectoryEntry *DirInfo
- = getDirectoryFromFile(*this, NameStart, NameEnd);
+ // Look up the directory for the file. When looking up something like
+ // sys/foo.h we'll discover all of the search directories that have a 'sys'
+ // subdirectory. This will let us avoid having to waste time on known-to-fail
+ // searches when we go to find sys/bar.h, because all the search directories
+ // without a 'sys' subdir will get a cached failure result.
+ const DirectoryEntry *DirInfo = getDirectoryFromFile(*this, Filename);
if (DirInfo == 0) // Directory doesn't exist, file can't exist.
return 0;
@@ -305,89 +347,218 @@ const FileEntry *FileManager::getFile(const char *NameStart,
// FIXME: This will reduce the # syscalls.
// Nope, there isn't. Check to see if the file exists.
+ int FileDescriptor = -1;
struct stat StatBuf;
- //llvm::errs() << "STATING: " << Filename;
- if (stat_cached(InterndFileName, &StatBuf) || // Error stat'ing.
- S_ISDIR(StatBuf.st_mode)) { // A directory?
- // If this file doesn't exist, we leave a null in FileEntries for this path.
- //llvm::errs() << ": Not existing\n";
+ if (getStatValue(InterndFileName, StatBuf, &FileDescriptor)) {
+ // There's no real file at the given path.
return 0;
}
- //llvm::errs() << ": exists\n";
// It exists. See if we have already opened a file with the same inode.
// This occurs when one dir is symlinked to another, for example.
- FileEntry &UFE = UniqueFiles.getFile(InterndFileName, StatBuf);
+ FileEntry &UFE = UniqueRealFiles.getFile(InterndFileName, StatBuf);
NamedFileEnt.setValue(&UFE);
- if (UFE.getName()) // Already have an entry with this inode, return it.
+ if (UFE.getName()) { // Already have an entry with this inode, return it.
+ // If the stat process opened the file, close it to avoid a FD leak.
+ if (FileDescriptor != -1)
+ close(FileDescriptor);
+
return &UFE;
+ }
// Otherwise, we don't have this directory yet, add it.
- // FIXME: Change the name to be a char* that points back to the 'FileEntries'
- // key.
+ // FIXME: Change the name to be a char* that points back to the
+ // 'SeenFileEntries' key.
UFE.Name = InterndFileName;
UFE.Size = StatBuf.st_size;
UFE.ModTime = StatBuf.st_mtime;
UFE.Dir = DirInfo;
UFE.UID = NextFileUID++;
+ UFE.FD = FileDescriptor;
return &UFE;
}
const FileEntry *
FileManager::getVirtualFile(llvm::StringRef Filename, off_t Size,
time_t ModificationTime) {
- const char *NameStart = Filename.begin(), *NameEnd = Filename.end();
-
++NumFileLookups;
// See if there is already an entry in the map.
llvm::StringMapEntry<FileEntry *> &NamedFileEnt =
- FileEntries.GetOrCreateValue(NameStart, NameEnd);
+ SeenFileEntries.GetOrCreateValue(Filename);
// See if there is already an entry in the map.
- if (NamedFileEnt.getValue())
- return NamedFileEnt.getValue() == NON_EXISTENT_FILE
- ? 0 : NamedFileEnt.getValue();
+ if (NamedFileEnt.getValue() && NamedFileEnt.getValue() != NON_EXISTENT_FILE)
+ return NamedFileEnt.getValue();
++NumFileCacheMisses;
// By default, initialize it to invalid.
NamedFileEnt.setValue(NON_EXISTENT_FILE);
- const DirectoryEntry *DirInfo
- = getDirectoryFromFile(*this, NameStart, NameEnd);
- if (DirInfo == 0) // Directory doesn't exist, file can't exist.
- return 0;
+ addAncestorsAsVirtualDirs(Filename);
+ FileEntry *UFE = 0;
- FileEntry *UFE = new FileEntry();
- VirtualFileEntries.push_back(UFE);
- NamedFileEnt.setValue(UFE);
+ // Now that all ancestors of Filename are in the cache, the
+ // following call is guaranteed to find the DirectoryEntry from the
+ // cache.
+ const DirectoryEntry *DirInfo = getDirectoryFromFile(*this, Filename);
+ assert(DirInfo &&
+ "The directory of a virtual file should already be in the cache.");
- UFE->Name = NamedFileEnt.getKeyData();
+ // Check to see if the file exists. If so, drop the virtual file
+ int FileDescriptor = -1;
+ struct stat StatBuf;
+ const char *InterndFileName = NamedFileEnt.getKeyData();
+ if (getStatValue(InterndFileName, StatBuf, &FileDescriptor) == 0) {
+ // If the stat process opened the file, close it to avoid a FD leak.
+ if (FileDescriptor != -1)
+ close(FileDescriptor);
+
+ StatBuf.st_size = Size;
+ StatBuf.st_mtime = ModificationTime;
+ UFE = &UniqueRealFiles.getFile(InterndFileName, StatBuf);
+
+ NamedFileEnt.setValue(UFE);
+
+ // If we had already opened this file, close it now so we don't
+ // leak the descriptor. We're not going to use the file
+ // descriptor anyway, since this is a virtual file.
+ if (UFE->FD != -1) {
+ close(UFE->FD);
+ UFE->FD = -1;
+ }
+
+ // If we already have an entry with this inode, return it.
+ if (UFE->getName())
+ return UFE;
+ }
+
+ if (!UFE) {
+ UFE = new FileEntry();
+ VirtualFileEntries.push_back(UFE);
+ NamedFileEnt.setValue(UFE);
+ }
+
+ UFE->Name = InterndFileName;
UFE->Size = Size;
UFE->ModTime = ModificationTime;
UFE->Dir = DirInfo;
UFE->UID = NextFileUID++;
+ UFE->FD = -1;
+ return UFE;
+}
+
+void FileManager::FixupRelativePath(llvm::sys::Path &path,
+ const FileSystemOptions &FSOpts) {
+ if (FSOpts.WorkingDir.empty() || llvm::sys::path::is_absolute(path.str()))
+ return;
+
+ llvm::SmallString<128> NewPath(FSOpts.WorkingDir);
+ llvm::sys::path::append(NewPath, path.str());
+ path = NewPath;
+}
+
+llvm::MemoryBuffer *FileManager::
+getBufferForFile(const FileEntry *Entry, std::string *ErrorStr) {
+ llvm::OwningPtr<llvm::MemoryBuffer> Result;
+ llvm::error_code ec;
+ if (FileSystemOpts.WorkingDir.empty()) {
+ const char *Filename = Entry->getName();
+ // If the file is already open, use the open file descriptor.
+ if (Entry->FD != -1) {
+ ec = llvm::MemoryBuffer::getOpenFile(Entry->FD, Filename, Result,
+ Entry->getSize());
+ if (ErrorStr)
+ *ErrorStr = ec.message();
+
+ close(Entry->FD);
+ Entry->FD = -1;
+ return Result.take();
+ }
+
+ // Otherwise, open the file.
+ ec = llvm::MemoryBuffer::getFile(Filename, Result, Entry->getSize());
+ if (ec && ErrorStr)
+ *ErrorStr = ec.message();
+ return Result.take();
+ }
- // If this virtual file resolves to a file, also map that file to the
- // newly-created file entry.
- const char *InterndFileName = NamedFileEnt.getKeyData();
- struct stat StatBuf;
- if (!stat_cached(InterndFileName, &StatBuf) &&
- !S_ISDIR(StatBuf.st_mode)) {
- llvm::sys::Path FilePath(InterndFileName);
- FilePath.makeAbsolute();
- FileEntries[FilePath.str()] = UFE;
+ llvm::sys::Path FilePath(Entry->getName());
+ FixupRelativePath(FilePath, FileSystemOpts);
+ ec = llvm::MemoryBuffer::getFile(FilePath.c_str(), Result, Entry->getSize());
+ if (ec && ErrorStr)
+ *ErrorStr = ec.message();
+ return Result.take();
+}
+
+llvm::MemoryBuffer *FileManager::
+getBufferForFile(llvm::StringRef Filename, std::string *ErrorStr) {
+ llvm::OwningPtr<llvm::MemoryBuffer> Result;
+ llvm::error_code ec;
+ if (FileSystemOpts.WorkingDir.empty()) {
+ ec = llvm::MemoryBuffer::getFile(Filename, Result);
+ if (ec && ErrorStr)
+ *ErrorStr = ec.message();
+ return Result.take();
}
+
+ llvm::sys::Path FilePath(Filename);
+ FixupRelativePath(FilePath, FileSystemOpts);
+ ec = llvm::MemoryBuffer::getFile(FilePath.c_str(), Result);
+ if (ec && ErrorStr)
+ *ErrorStr = ec.message();
+ return Result.take();
+}
+
+/// getStatValue - Get the 'stat' information for the specified path,
+/// using the cache to accelerate it if possible. This returns true
+/// if the path points to a virtual file or does not exist, or returns
+/// false if it's an existent real file. If FileDescriptor is NULL,
+/// do directory look-up instead of file look-up.
+bool FileManager::getStatValue(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ // FIXME: FileSystemOpts shouldn't be passed in here, all paths should be
+ // absolute!
+ if (FileSystemOpts.WorkingDir.empty())
+ return FileSystemStatCache::get(Path, StatBuf, FileDescriptor,
+ StatCache.get());
+
+ llvm::sys::Path FilePath(Path);
+ FixupRelativePath(FilePath, FileSystemOpts);
+
+ return FileSystemStatCache::get(FilePath.c_str(), StatBuf, FileDescriptor,
+ StatCache.get());
+}
+
+void FileManager::GetUniqueIDMapping(
+ llvm::SmallVectorImpl<const FileEntry *> &UIDToFiles) const {
+ UIDToFiles.clear();
+ UIDToFiles.resize(NextFileUID);
- return UFE;
+ // Map file entries
+ for (llvm::StringMap<FileEntry*, llvm::BumpPtrAllocator>::const_iterator
+ FE = SeenFileEntries.begin(), FEEnd = SeenFileEntries.end();
+ FE != FEEnd; ++FE)
+ if (FE->getValue() && FE->getValue() != NON_EXISTENT_FILE)
+ UIDToFiles[FE->getValue()->getUID()] = FE->getValue();
+
+ // Map virtual file entries
+ for (llvm::SmallVector<FileEntry*, 4>::const_iterator
+ VFE = VirtualFileEntries.begin(), VFEEnd = VirtualFileEntries.end();
+ VFE != VFEEnd; ++VFE)
+ if (*VFE && *VFE != NON_EXISTENT_FILE)
+ UIDToFiles[(*VFE)->getUID()] = *VFE;
}
+
void FileManager::PrintStats() const {
llvm::errs() << "\n*** File Manager Stats:\n";
- llvm::errs() << UniqueFiles.size() << " files found, "
- << UniqueDirs.size() << " dirs found.\n";
+ llvm::errs() << UniqueRealFiles.size() << " real files found, "
+ << UniqueRealDirs.size() << " real dirs found.\n";
+ llvm::errs() << VirtualFileEntries.size() << " virtual files found, "
+ << VirtualDirectoryEntries.size() << " virtual dirs found.\n";
llvm::errs() << NumDirLookups << " dir lookups, "
<< NumDirCacheMisses << " dir cache misses.\n";
llvm::errs() << NumFileLookups << " file lookups, "
@@ -395,20 +566,3 @@ void FileManager::PrintStats() const {
//llvm::errs() << PagesMapped << BytesOfPagesMapped << FSLookups;
}
-
-int MemorizeStatCalls::stat(const char *path, struct stat *buf) {
- int result = StatSysCallCache::stat(path, buf);
-
- // Do not cache failed stats, it is easy to construct common inconsistent
- // situations if we do, and they are not important for PCH performance (which
- // currently only needs the stats to construct the initial FileManager
- // entries).
- if (result != 0)
- return result;
-
- // Cache file 'stat' results and directories with absolutely paths.
- if (!S_ISDIR(buf->st_mode) || llvm::sys::Path(path).isAbsolute())
- StatCalls[path] = StatResult(result, *buf);
-
- return result;
-}
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp b/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp
new file mode 100644
index 0000000..c8b07af
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp
@@ -0,0 +1,120 @@
+//===--- FileSystemStatCache.cpp - Caching for 'stat' calls ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FileSystemStatCache interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FileSystemStatCache.h"
+#include "llvm/Support/Path.h"
+#include <fcntl.h>
+
+// FIXME: This is terrible, we need this for ::close.
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#include <sys/uio.h>
+#else
+#include <io.h>
+#endif
+using namespace clang;
+
+#if defined(_MSC_VER)
+#define S_ISDIR(s) ((_S_IFDIR & s) !=0)
+#endif
+
+/// FileSystemStatCache::get - Get the 'stat' information for the specified
+/// path, using the cache to accelerate it if possible. This returns true if
+/// the path does not exist or false if it exists.
+///
+/// If FileDescriptor is non-null, then this lookup should only return success
+/// for files (not directories). If it is null this lookup should only return
+/// success for directories (not files). On a successful file lookup, the
+/// implementation can optionally fill in FileDescriptor with a valid
+/// descriptor and the client guarantees that it will close it.
+bool FileSystemStatCache::get(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor, FileSystemStatCache *Cache) {
+ LookupResult R;
+ bool isForDir = FileDescriptor == 0;
+
+ // If we have a cache, use it to resolve the stat query.
+ if (Cache)
+ R = Cache->getStat(Path, StatBuf, FileDescriptor);
+ else if (isForDir) {
+ // If this is a directory and we have no cache, just go to the file system.
+ R = ::stat(Path, &StatBuf) != 0 ? CacheMissing : CacheExists;
+ } else {
+ // Otherwise, we have to go to the filesystem. We can always just use
+ // 'stat' here, but (for files) the client is asking whether the file exists
+ // because it wants to turn around and *open* it. It is more efficient to
+ // do "open+fstat" on success than it is to do "stat+open".
+ //
+ // Because of this, check to see if the file exists with 'open'. If the
+ // open succeeds, use fstat to get the stat info.
+ int OpenFlags = O_RDONLY;
+#ifdef O_BINARY
+ OpenFlags |= O_BINARY; // Open input file in binary mode on win32.
+#endif
+ *FileDescriptor = ::open(Path, OpenFlags);
+
+ if (*FileDescriptor == -1) {
+ // If the open fails, our "stat" fails.
+ R = CacheMissing;
+ } else {
+ // Otherwise, the open succeeded. Do an fstat to get the information
+ // about the file. We'll end up returning the open file descriptor to the
+ // client to do what they please with it.
+ if (::fstat(*FileDescriptor, &StatBuf) == 0)
+ R = CacheExists;
+ else {
+ // fstat rarely fails. If it does, claim the initial open didn't
+ // succeed.
+ R = CacheMissing;
+ ::close(*FileDescriptor);
+ *FileDescriptor = -1;
+ }
+ }
+ }
+
+ // If the path doesn't exist, return failure.
+ if (R == CacheMissing) return true;
+
+ // If the path exists, make sure that its "directoryness" matches the clients
+ // demands.
+ if (S_ISDIR(StatBuf.st_mode) != isForDir) {
+ // If not, close the file if opened.
+ if (FileDescriptor && *FileDescriptor != -1) {
+ ::close(*FileDescriptor);
+ *FileDescriptor = -1;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+
+MemorizeStatCalls::LookupResult
+MemorizeStatCalls::getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ LookupResult Result = statChained(Path, StatBuf, FileDescriptor);
+
+ // Do not cache failed stats, it is easy to construct common inconsistent
+ // situations if we do, and they are not important for PCH performance (which
+ // currently only needs the stats to construct the initial FileManager
+ // entries).
+ if (Result == CacheMissing)
+ return Result;
+
+ // Cache file 'stat' results and directories with absolutely paths.
+ if (!S_ISDIR(StatBuf.st_mode) || llvm::sys::path::is_absolute(Path))
+ StatCalls[Path] = StatBuf;
+
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
index 6b673e3..ef11d65 100644
--- a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
@@ -44,8 +44,24 @@ IdentifierInfo::IdentifierInfo() {
// IdentifierTable Implementation
//===----------------------------------------------------------------------===//
+IdentifierIterator::~IdentifierIterator() { }
+
IdentifierInfoLookup::~IdentifierInfoLookup() {}
+namespace {
+ /// \brief A simple identifier lookup iterator that represents an
+ /// empty sequence of identifiers.
+ class EmptyLookupIterator : public IdentifierIterator
+ {
+ public:
+ virtual llvm::StringRef Next() { return llvm::StringRef(); }
+ };
+}
+
+IdentifierIterator *IdentifierInfoLookup::getIdentifiers() const {
+ return new EmptyLookupIterator();
+}
+
ExternalIdentifierLookup::~ExternalIdentifierLookup() {}
IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
@@ -73,8 +89,9 @@ namespace {
KEYMS = 32,
BOOLSUPPORT = 64,
KEYALTIVEC = 128,
- KEYNOMS = 256,
- KEYBORLAND = 512
+ KEYNOCXX = 256,
+ KEYBORLAND = 512,
+ KEYOPENCL = 1024
};
}
@@ -99,7 +116,8 @@ static void AddKeyword(llvm::StringRef Keyword,
else if (LangOpts.Borland && (Flags & KEYBORLAND)) AddResult = 1;
else if (LangOpts.Bool && (Flags & BOOLSUPPORT)) AddResult = 2;
else if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) AddResult = 2;
- else if (!LangOpts.Microsoft && (Flags & KEYNOMS)) AddResult = 2;
+ else if (LangOpts.OpenCL && (Flags & KEYOPENCL)) AddResult = 2;
+ else if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) AddResult = 2;
// Don't add this keyword if disabled in this language.
if (AddResult == 0) return;
@@ -308,6 +326,11 @@ IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const {
return SI->getIdentifierInfoForSlot(argIndex);
}
+llvm::StringRef Selector::getNameForSlot(unsigned int argIndex) const {
+ IdentifierInfo *II = getIdentifierInfoForSlot(argIndex);
+ return II? II->getName() : llvm::StringRef();
+}
+
std::string MultiKeywordSelector::getName() const {
llvm::SmallString<256> Str;
llvm::raw_svector_ostream OS(Str);
@@ -374,7 +397,7 @@ Selector SelectorTable::getSelector(unsigned nKeys, IdentifierInfo **IIV) {
unsigned Size = sizeof(MultiKeywordSelector) + nKeys*sizeof(IdentifierInfo *);
MultiKeywordSelector *SI =
(MultiKeywordSelector*)SelTabImpl.Allocator.Allocate(Size,
- llvm::alignof<MultiKeywordSelector>());
+ llvm::alignOf<MultiKeywordSelector>());
new (SI) MultiKeywordSelector(nKeys, IIV);
SelTabImpl.Table.InsertNode(SI, InsertPos);
return Selector(SI);
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
index 7412b95..5062d43 100644
--- a/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
@@ -43,6 +43,11 @@ void SourceLocation::print(llvm::raw_ostream &OS, const SourceManager &SM)const{
if (isFileID()) {
PresumedLoc PLoc = SM.getPresumedLoc(*this);
+
+ if (PLoc.isInvalid()) {
+ OS << "<invalid>";
+ return;
+ }
// The instantiation and spelling pos is identical for file locs.
OS << PLoc.getFilename() << ':' << PLoc.getLine()
<< ':' << PLoc.getColumn();
@@ -105,6 +110,11 @@ bool FullSourceLoc::isInSystemHeader() const {
return SrcMgr->isInSystemHeader(*this);
}
+bool FullSourceLoc::isBeforeInTranslationUnitThan(SourceLocation Loc) const {
+ assert(isValid());
+ return SrcMgr->isBeforeInTranslationUnit(*this, Loc);
+}
+
const char *FullSourceLoc::getCharacterData(bool *Invalid) const {
assert(isValid());
return SrcMgr->getCharacterData(*this, Invalid);
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
index 633d86c..044c88d 100644
--- a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
@@ -15,13 +15,16 @@
#include "clang/Basic/SourceManagerInternals.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include <algorithm>
#include <string>
#include <cstring>
+#include <sys/stat.h>
using namespace clang;
using namespace SrcMgr;
@@ -66,100 +69,86 @@ const llvm::MemoryBuffer *ContentCache::getBuffer(Diagnostic &Diag,
const SourceManager &SM,
SourceLocation Loc,
bool *Invalid) const {
- if (Invalid)
- *Invalid = false;
-
- // Lazily create the Buffer for ContentCaches that wrap files.
- if (!Buffer.getPointer() && Entry) {
- std::string ErrorStr;
- struct stat FileInfo;
- Buffer.setPointer(MemoryBuffer::getFile(Entry->getName(), &ErrorStr,
- Entry->getSize(), &FileInfo));
+ // Lazily create the Buffer for ContentCaches that wrap files. If we already
+ // computed it, jsut return what we have.
+ if (Buffer.getPointer() || Entry == 0) {
+ if (Invalid)
+ *Invalid = isBufferInvalid();
- // If we were unable to open the file, then we are in an inconsistent
- // situation where the content cache referenced a file which no longer
- // exists. Most likely, we were using a stat cache with an invalid entry but
- // the file could also have been removed during processing. Since we can't
- // really deal with this situation, just create an empty buffer.
- //
- // FIXME: This is definitely not ideal, but our immediate clients can't
- // currently handle returning a null entry here. Ideally we should detect
- // that we are in an inconsistent situation and error out as quickly as
- // possible.
- if (!Buffer.getPointer()) {
- const llvm::StringRef FillStr("<<<MISSING SOURCE FILE>>>\n");
- Buffer.setPointer(MemoryBuffer::getNewMemBuffer(Entry->getSize(),
- "<invalid>"));
- char *Ptr = const_cast<char*>(Buffer.getPointer()->getBufferStart());
- for (unsigned i = 0, e = Entry->getSize(); i != e; ++i)
- Ptr[i] = FillStr[i % FillStr.size()];
-
- if (Diag.isDiagnosticInFlight())
- Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
- Entry->getName(), ErrorStr);
- else
- Diag.Report(FullSourceLoc(Loc, SM), diag::err_cannot_open_file)
- << Entry->getName() << ErrorStr;
-
- Buffer.setInt(Buffer.getInt() | InvalidFlag);
-
- // FIXME: This conditionalization is horrible, but we see spurious failures
- // in the test suite due to this warning and no one has had time to hunt it
- // down. So for now, we just don't emit this diagnostic on Win32, and hope
- // nothing bad happens.
- //
- // PR6812.
-#if !defined(LLVM_ON_WIN32)
- } else if (FileInfo.st_size != Entry->getSize() ||
- FileInfo.st_mtime != Entry->getModificationTime()) {
- // Check that the file's size and modification time are the same
- // as in the file entry (which may have come from a stat cache).
- if (Diag.isDiagnosticInFlight())
- Diag.SetDelayedDiagnostic(diag::err_file_modified,
- Entry->getName());
- else
- Diag.Report(FullSourceLoc(Loc, SM), diag::err_file_modified)
- << Entry->getName();
-
- Buffer.setInt(Buffer.getInt() | InvalidFlag);
-#endif
- }
+ return Buffer.getPointer();
+ }
+
+ std::string ErrorStr;
+ Buffer.setPointer(SM.getFileManager().getBufferForFile(Entry, &ErrorStr));
+
+ // If we were unable to open the file, then we are in an inconsistent
+ // situation where the content cache referenced a file which no longer
+ // exists. Most likely, we were using a stat cache with an invalid entry but
+ // the file could also have been removed during processing. Since we can't
+ // really deal with this situation, just create an empty buffer.
+ //
+ // FIXME: This is definitely not ideal, but our immediate clients can't
+ // currently handle returning a null entry here. Ideally we should detect
+ // that we are in an inconsistent situation and error out as quickly as
+ // possible.
+ if (!Buffer.getPointer()) {
+ const llvm::StringRef FillStr("<<<MISSING SOURCE FILE>>>\n");
+ Buffer.setPointer(MemoryBuffer::getNewMemBuffer(Entry->getSize(),
+ "<invalid>"));
+ char *Ptr = const_cast<char*>(Buffer.getPointer()->getBufferStart());
+ for (unsigned i = 0, e = Entry->getSize(); i != e; ++i)
+ Ptr[i] = FillStr[i % FillStr.size()];
+
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
+ Entry->getName(), ErrorStr);
+ else
+ Diag.Report(Loc, diag::err_cannot_open_file)
+ << Entry->getName() << ErrorStr;
+
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
- // If the buffer is valid, check to see if it has a UTF Byte Order Mark
- // (BOM). We only support UTF-8 without a BOM right now. See
- // http://en.wikipedia.org/wiki/Byte_order_mark for more information.
- if (!isBufferInvalid()) {
- llvm::StringRef BufStr = Buffer.getPointer()->getBuffer();
- const char *BOM = 0;
- if (BufStr.startswith("\xFE\xBB\xBF"))
- BOM = "UTF-8";
- else if (BufStr.startswith("\xFE\xFF"))
- BOM = "UTF-16 (BE)";
- else if (BufStr.startswith("\xFF\xFE"))
- BOM = "UTF-16 (LE)";
- else if (BufStr.startswith(llvm::StringRef("\x00\x00\xFE\xFF", 4)))
- BOM = "UTF-32 (BE)";
- else if (BufStr.startswith(llvm::StringRef("\xFF\xFE\x00\x00", 4)))
- BOM = "UTF-32 (LE)";
- else if (BufStr.startswith("\x2B\x2F\x76"))
- BOM = "UTF-7";
- else if (BufStr.startswith("\xF7\x64\x4C"))
- BOM = "UTF-1";
- else if (BufStr.startswith("\xDD\x73\x66\x73"))
- BOM = "UTF-EBCDIC";
- else if (BufStr.startswith("\x0E\xFE\xFF"))
- BOM = "SDSU";
- else if (BufStr.startswith("\xFB\xEE\x28"))
- BOM = "BOCU-1";
- else if (BufStr.startswith("\x84\x31\x95\x33"))
- BOM = "BOCU-1";
-
- if (BOM) {
- Diag.Report(FullSourceLoc(Loc, SM), diag::err_unsupported_bom)
- << BOM << Entry->getName();
- Buffer.setInt(1);
- }
- }
+ if (Invalid) *Invalid = true;
+ return Buffer.getPointer();
+ }
+
+ // Check that the file's size is the same as in the file entry (which may
+ // have come from a stat cache).
+ if (getRawBuffer()->getBufferSize() != (size_t)Entry->getSize()) {
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_file_modified,
+ Entry->getName());
+ else
+ Diag.Report(Loc, diag::err_file_modified)
+ << Entry->getName();
+
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
+ if (Invalid) *Invalid = true;
+ return Buffer.getPointer();
+ }
+
+ // If the buffer is valid, check to see if it has a UTF Byte Order Mark
+ // (BOM). We only support UTF-8 without a BOM right now. See
+ // http://en.wikipedia.org/wiki/Byte_order_mark for more information.
+ llvm::StringRef BufStr = Buffer.getPointer()->getBuffer();
+ const char *BOM = llvm::StringSwitch<const char *>(BufStr)
+ .StartsWith("\xEF\xBB\xBF", "UTF-8")
+ .StartsWith("\xFE\xFF", "UTF-16 (BE)")
+ .StartsWith("\xFF\xFE", "UTF-16 (LE)")
+ .StartsWith("\x00\x00\xFE\xFF", "UTF-32 (BE)")
+ .StartsWith("\xFF\xFE\x00\x00", "UTF-32 (LE)")
+ .StartsWith("\x2B\x2F\x76", "UTF-7")
+ .StartsWith("\xF7\x64\x4C", "UTF-1")
+ .StartsWith("\xDD\x73\x66\x73", "UTF-EBCDIC")
+ .StartsWith("\x0E\xFE\xFF", "SDSU")
+ .StartsWith("\xFB\xEE\x28", "BOCU-1")
+ .StartsWith("\x84\x31\x95\x33", "GB-18030")
+ .Default(0);
+
+ if (BOM) {
+ Diag.Report(Loc, diag::err_unsupported_bom)
+ << BOM << Entry->getName();
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
}
if (Invalid)
@@ -350,6 +339,14 @@ LineTableInfo &SourceManager::getLineTable() {
// Private 'Create' methods.
//===----------------------------------------------------------------------===//
+SourceManager::SourceManager(Diagnostic &Diag, FileManager &FileMgr)
+ : Diag(Diag), FileMgr(FileMgr),
+ ExternalSLocEntries(0), LineTable(0), NumLinearScans(0),
+ NumBinaryProbes(0) {
+ clearIDTables();
+ Diag.setSourceManager(this);
+}
+
SourceManager::~SourceManager() {
delete LineTable;
@@ -525,25 +522,32 @@ SourceManager::getMemoryBufferForFile(const FileEntry *File,
return IR->getBuffer(Diag, *this, SourceLocation(), Invalid);
}
-bool SourceManager::overrideFileContents(const FileEntry *SourceFile,
+void SourceManager::overrideFileContents(const FileEntry *SourceFile,
const llvm::MemoryBuffer *Buffer,
bool DoNotFree) {
const SrcMgr::ContentCache *IR = getOrCreateContentCache(SourceFile);
- if (IR == 0)
- return true;
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(Buffer, DoNotFree);
- return false;
}
llvm::StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
bool MyInvalid = false;
- const llvm::MemoryBuffer *Buf = getBuffer(FID, &MyInvalid);
+ const SLocEntry &SLoc = getSLocEntry(FID.ID);
+ if (!SLoc.isFile()) {
+ if (Invalid)
+ *Invalid = true;
+ return "<<<<<INVALID SOURCE LOCATION>>>>>";
+ }
+
+ const llvm::MemoryBuffer *Buf
+ = SLoc.getFile().getContentCache()->getBuffer(Diag, *this, SourceLocation(),
+ &MyInvalid);
if (Invalid)
*Invalid = MyInvalid;
if (MyInvalid)
- return "";
+ return "<<<<<INVALID SOURCE LOCATION>>>>>";
return Buf->getBuffer();
}
@@ -796,21 +800,30 @@ unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
return FilePos-LineStart+1;
}
+// isInvalid - Return the result of calling loc.isInvalid(), and
+// if Invalid is not null, set its value to same.
+static bool isInvalid(SourceLocation Loc, bool *Invalid) {
+ bool MyInvalid = Loc.isInvalid();
+ if (Invalid)
+ *Invalid = MyInvalid;
+ return MyInvalid;
+}
+
unsigned SourceManager::getSpellingColumnNumber(SourceLocation Loc,
bool *Invalid) const {
- if (Loc.isInvalid()) return 0;
+ if (isInvalid(Loc, Invalid)) return 0;
std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
return getColumnNumber(LocInfo.first, LocInfo.second, Invalid);
}
unsigned SourceManager::getInstantiationColumnNumber(SourceLocation Loc,
bool *Invalid) const {
- if (Loc.isInvalid()) return 0;
+ if (isInvalid(Loc, Invalid)) return 0;
std::pair<FileID, unsigned> LocInfo = getDecomposedInstantiationLoc(Loc);
return getColumnNumber(LocInfo.first, LocInfo.second, Invalid);
}
-static DISABLE_INLINE void
+static LLVM_ATTRIBUTE_NOINLINE void
ComputeLineNumbers(Diagnostic &Diag, ContentCache *FI,
llvm::BumpPtrAllocator &Alloc,
const SourceManager &SM, bool &Invalid);
@@ -974,13 +987,13 @@ unsigned SourceManager::getLineNumber(FileID FID, unsigned FilePos,
unsigned SourceManager::getInstantiationLineNumber(SourceLocation Loc,
bool *Invalid) const {
- if (Loc.isInvalid()) return 0;
+ if (isInvalid(Loc, Invalid)) return 0;
std::pair<FileID, unsigned> LocInfo = getDecomposedInstantiationLoc(Loc);
return getLineNumber(LocInfo.first, LocInfo.second);
}
unsigned SourceManager::getSpellingLineNumber(SourceLocation Loc,
bool *Invalid) const {
- if (Loc.isInvalid()) return 0;
+ if (isInvalid(Loc, Invalid)) return 0;
std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
return getLineNumber(LocInfo.first, LocInfo.second);
}
@@ -1021,7 +1034,7 @@ SourceManager::getFileCharacteristic(SourceLocation Loc) const {
/// for normal clients.
const char *SourceManager::getBufferName(SourceLocation Loc,
bool *Invalid) const {
- if (Loc.isInvalid()) return "<invalid loc>";
+ if (isInvalid(Loc, Invalid)) return "<invalid loc>";
return getBuffer(getFileID(Loc), Invalid)->getBufferIdentifier();
}
@@ -1051,8 +1064,14 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc) const {
Filename = C->Entry->getName();
else
Filename = C->getBuffer(Diag, *this)->getBufferIdentifier();
- unsigned LineNo = getLineNumber(LocInfo.first, LocInfo.second);
- unsigned ColNo = getColumnNumber(LocInfo.first, LocInfo.second);
+ bool Invalid = false;
+ unsigned LineNo = getLineNumber(LocInfo.first, LocInfo.second, &Invalid);
+ if (Invalid)
+ return PresumedLoc();
+ unsigned ColNo = getColumnNumber(LocInfo.first, LocInfo.second, &Invalid);
+ if (Invalid)
+ return PresumedLoc();
+
SourceLocation IncludeLoc = FI.getIncludeLoc();
// If we have #line directives in this file, update and overwrite the physical
@@ -1090,38 +1109,65 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc) const {
// Other miscellaneous methods.
//===----------------------------------------------------------------------===//
+/// \brief Retrieve the inode for the given file entry, if possible.
+///
+/// This routine involves a system call, and therefore should only be used
+/// in non-performance-critical code.
+static llvm::Optional<ino_t> getActualFileInode(const FileEntry *File) {
+ if (!File)
+ return llvm::Optional<ino_t>();
+
+ struct stat StatBuf;
+ if (::stat(File->getName(), &StatBuf))
+ return llvm::Optional<ino_t>();
+
+ return StatBuf.st_ino;
+}
+
/// \brief Get the source location for the given file:line:col triplet.
///
/// If the source file is included multiple times, the source location will
/// be based upon the first inclusion.
SourceLocation SourceManager::getLocation(const FileEntry *SourceFile,
- unsigned Line, unsigned Col) const {
+ unsigned Line, unsigned Col) {
assert(SourceFile && "Null source file!");
assert(Line && Col && "Line and column should start from 1!");
- fileinfo_iterator FI = FileInfos.find(SourceFile);
- if (FI == FileInfos.end())
- return SourceLocation();
- ContentCache *Content = FI->second;
-
- // If this is the first use of line information for this buffer, compute the
- /// SourceLineCache for it on demand.
- if (Content->SourceLineCache == 0) {
- bool MyInvalid = false;
- ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
- if (MyInvalid)
- return SourceLocation();
- }
-
// Find the first file ID that corresponds to the given file.
FileID FirstFID;
// First, check the main file ID, since it is common to look for a
// location in the main file.
+ llvm::Optional<ino_t> SourceFileInode;
+ llvm::Optional<llvm::StringRef> SourceFileName;
if (!MainFileID.isInvalid()) {
const SLocEntry &MainSLoc = getSLocEntry(MainFileID);
- if (MainSLoc.isFile() && MainSLoc.getFile().getContentCache() == Content)
- FirstFID = MainFileID;
+ if (MainSLoc.isFile()) {
+ const ContentCache *MainContentCache
+ = MainSLoc.getFile().getContentCache();
+ if (!MainContentCache) {
+ // Can't do anything
+ } else if (MainContentCache->Entry == SourceFile) {
+ FirstFID = MainFileID;
+ } else {
+ // Fall back: check whether we have the same base name and inode
+ // as the main file.
+ const FileEntry *MainFile = MainContentCache->Entry;
+ SourceFileName = llvm::sys::path::filename(SourceFile->getName());
+ if (*SourceFileName == llvm::sys::path::filename(MainFile->getName())) {
+ SourceFileInode = getActualFileInode(SourceFile);
+ if (SourceFileInode) {
+ if (llvm::Optional<ino_t> MainFileInode
+ = getActualFileInode(MainFile)) {
+ if (*SourceFileInode == *MainFileInode) {
+ FirstFID = MainFileID;
+ SourceFile = MainFile;
+ }
+ }
+ }
+ }
+ }
+ }
}
if (FirstFID.isInvalid()) {
@@ -1129,16 +1175,63 @@ SourceLocation SourceManager::getLocation(const FileEntry *SourceFile,
// through all of the source locations.
for (unsigned I = 0, N = sloc_entry_size(); I != N; ++I) {
const SLocEntry &SLoc = getSLocEntry(I);
- if (SLoc.isFile() && SLoc.getFile().getContentCache() == Content) {
+ if (SLoc.isFile() &&
+ SLoc.getFile().getContentCache() &&
+ SLoc.getFile().getContentCache()->Entry == SourceFile) {
FirstFID = FileID::get(I);
break;
}
}
}
+
+ // If we haven't found what we want yet, try again, but this time stat()
+ // each of the files in case the files have changed since we originally
+ // parsed the file.
+ if (FirstFID.isInvalid() &&
+ (SourceFileName ||
+ (SourceFileName = llvm::sys::path::filename(SourceFile->getName()))) &&
+ (SourceFileInode ||
+ (SourceFileInode = getActualFileInode(SourceFile)))) {
+ for (unsigned I = 0, N = sloc_entry_size(); I != N; ++I) {
+ const SLocEntry &SLoc = getSLocEntry(I);
+ if (SLoc.isFile()) {
+ const ContentCache *FileContentCache
+ = SLoc.getFile().getContentCache();
+ const FileEntry *Entry =FileContentCache? FileContentCache->Entry : 0;
+ if (Entry &&
+ *SourceFileName == llvm::sys::path::filename(Entry->getName())) {
+ if (llvm::Optional<ino_t> EntryInode = getActualFileInode(Entry)) {
+ if (*SourceFileInode == *EntryInode) {
+ FirstFID = FileID::get(I);
+ SourceFile = Entry;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
if (FirstFID.isInvalid())
return SourceLocation();
+ if (Line == 1 && Col == 1)
+ return getLocForStartOfFile(FirstFID);
+
+ ContentCache *Content
+ = const_cast<ContentCache *>(getOrCreateContentCache(SourceFile));
+ if (!Content)
+ return SourceLocation();
+
+ // If this is the first use of line information for this buffer, compute the
+ /// SourceLineCache for it on demand.
+ if (Content->SourceLineCache == 0) {
+ bool MyInvalid = false;
+ ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
+ if (MyInvalid)
+ return SourceLocation();
+ }
+
if (Line > Content->NumLines) {
unsigned Size = Content->getBuffer(Diag, *this)->getBufferSize();
if (Size > 0)
@@ -1190,6 +1283,13 @@ bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
if (LHS == RHS)
return false;
+ // If both locations are macro instantiations, the order of their offsets
+ // reflect the order that the tokens, pointed to by these locations, were
+ // instantiated (during parsing each token that is instantiated by a macro,
+ // expands the SLocEntries).
+ if (LHS.isMacroID() && RHS.isMacroID())
+ return LHS.getOffset() < RHS.getOffset();
+
std::pair<FileID, unsigned> LOffs = getDecomposedLoc(LHS);
std::pair<FileID, unsigned> ROffs = getDecomposedLoc(RHS);
diff --git a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
index 6d42883..a9eeb8b 100644
--- a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
@@ -15,6 +15,7 @@
#include "clang/Basic/LangOptions.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
+#include <cctype>
#include <cstdlib>
using namespace clang;
@@ -25,6 +26,7 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
TLSSupported = true;
NoAsmVariants = false;
PointerWidth = PointerAlign = 32;
+ BoolWidth = BoolAlign = 8;
IntWidth = IntAlign = 32;
LongWidth = LongAlign = 32;
LongLongWidth = LongLongAlign = 64;
@@ -54,6 +56,7 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-n32";
UserLabelPrefix = "_";
+ MCountName = "mcount";
HasAlignMac68kSupport = false;
// Default to no types using fpret.
@@ -98,7 +101,7 @@ const char *TargetInfo::getTypeConstantSuffix(IntType T) {
}
}
-/// getTypeWidth - Return the width (in bits) of the specified integer type
+/// getTypeWidth - Return the width (in bits) of the specified integer type
/// enum. For example, SignedInt -> getIntWidth().
unsigned TargetInfo::getTypeWidth(IntType T) const {
switch (T) {
@@ -114,7 +117,7 @@ unsigned TargetInfo::getTypeWidth(IntType T) const {
};
}
-/// getTypeAlign - Return the alignment (in bits) of the specified integer type
+/// getTypeAlign - Return the alignment (in bits) of the specified integer type
/// enum. For example, SignedInt -> getIntAlign().
unsigned TargetInfo::getTypeAlign(IntType T) const {
switch (T) {
@@ -132,18 +135,18 @@ unsigned TargetInfo::getTypeAlign(IntType T) const {
/// isTypeSigned - Return whether an integer types is signed. Returns true if
/// the type is signed; false otherwise.
-bool TargetInfo::isTypeSigned(IntType T) const {
+bool TargetInfo::isTypeSigned(IntType T) {
switch (T) {
default: assert(0 && "not an integer!");
case SignedShort:
case SignedInt:
case SignedLong:
- case SignedLongLong:
+ case SignedLongLong:
return true;
case UnsignedShort:
case UnsignedInt:
case UnsignedLong:
- case UnsignedLongLong:
+ case UnsignedLongLong:
return false;
};
}
@@ -164,7 +167,7 @@ void TargetInfo::setForcedLangOptions(LangOptions &Opts) {
static llvm::StringRef removeGCCRegisterPrefix(llvm::StringRef Name) {
if (Name[0] == '%' || Name[0] == '#')
Name = Name.substr(1);
-
+
return Name;
}
@@ -174,7 +177,7 @@ static llvm::StringRef removeGCCRegisterPrefix(llvm::StringRef Name) {
bool TargetInfo::isValidGCCRegisterName(llvm::StringRef Name) const {
if (Name.empty())
return false;
-
+
const char * const *Names;
unsigned NumNames;
@@ -216,7 +219,7 @@ bool TargetInfo::isValidGCCRegisterName(llvm::StringRef Name) const {
return false;
}
-llvm::StringRef
+llvm::StringRef
TargetInfo::getNormalizedGCCRegisterName(llvm::StringRef Name) const {
assert(isValidGCCRegisterName(Name) && "Invalid register passed in");
@@ -283,6 +286,10 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
Info.setAllowsRegister();
break;
case 'm': // memory operand.
+ case 'o': // offsetable memory operand.
+ case 'V': // non-offsetable memory operand.
+ case '<': // autodecrement memory operand.
+ case '>': // autoincrement memory operand.
Info.setAllowsMemory();
break;
case 'g': // general register, memory operand or immediate integer.
@@ -291,13 +298,12 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
Info.setAllowsMemory();
break;
case ',': // multiple alternative constraint. Pass it.
- Name++;
// Handle additional optional '=' or '+' modifiers.
- if (*Name == '=' || *Name == '+')
+ if (Name[1] == '=' || Name[1] == '+')
Name++;
break;
case '?': // Disparage slightly code.
- case '!': // Disparage severly.
+ case '!': // Disparage severely.
break; // Pass them.
}
@@ -347,6 +353,15 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
if (i >= NumOutputs)
return false;
+ // A number must refer to an output only operand.
+ if (OutputConstraints[i].isReadWrite())
+ return false;
+
+ // If the constraint is already tied, it must be tied to the
+ // same operand referenced to by the number.
+ if (Info.hasTiedOperand() && Info.getTiedOperand() != i)
+ return false;
+
// The constraint should have the same info as the respective
// output constraint.
Info.setTiedOperand(i, OutputConstraints[i]);
@@ -362,6 +377,11 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
if (!resolveSymbolicName(Name, OutputConstraints, NumOutputs, Index))
return false;
+ // If the constraint is already tied, it must be tied to the
+ // same operand referenced to by the number.
+ if (Info.hasTiedOperand() && Info.getTiedOperand() != Index)
+ return false;
+
Info.setTiedOperand(Index, OutputConstraints[Index]);
break;
}
@@ -384,8 +404,10 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
Info.setAllowsRegister();
break;
case 'm': // memory operand.
- case 'o': // offsettable memory operand
- case 'V': // non-offsettable memory operand
+ case 'o': // offsettable memory operand.
+ case 'V': // non-offsettable memory operand.
+ case '<': // autodecrement memory operand.
+ case '>': // autoincrement memory operand.
Info.setAllowsMemory();
break;
case 'g': // general register, memory operand or immediate integer.
@@ -393,6 +415,10 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
Info.setAllowsRegister();
Info.setAllowsMemory();
break;
+ case 'E': // immediate floating point.
+ case 'F': // immediate floating point.
+ case 'p': // address operand.
+ break;
case ',': // multiple alternative constraint. Ignore comma.
break;
case '?': // Disparage slightly code.
diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
index df20def..a8198e4 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/Type.h"
#include <algorithm>
using namespace clang;
@@ -160,12 +161,12 @@ public:
return llvm::MCSectionMachO::ParseSectionSpecifier(SR, Segment, Section,
TAA, StubSize);
}
-
+
virtual const char *getStaticInitSectionSpecifier() const {
// FIXME: We should return 0 when building kexts.
return "__TEXT,__StaticInit,regular,pure_instructions";
}
-
+
};
@@ -209,6 +210,25 @@ public:
FreeBSDTargetInfo(const std::string &triple)
: OSTargetInfo<Target>(triple) {
this->UserLabelPrefix = "";
+
+ llvm::Triple Triple(triple);
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->MCountName = ".mcount";
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ this->MCountName = "_mcount";
+ break;
+ case llvm::Triple::arm:
+ this->MCountName = "__mcount";
+ break;
+ }
+
}
};
@@ -256,6 +276,7 @@ public:
LinuxTargetInfo(const std::string& triple)
: OSTargetInfo<Target>(triple) {
this->UserLabelPrefix = "";
+ this->WIntType = TargetInfo::UnsignedInt;
}
};
@@ -405,6 +426,54 @@ public:
// FIXME: WIntType should be SignedLong
}
};
+
+// Windows target
+template<typename Target>
+class WindowsTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("_WIN32");
+ }
+ void getVisualStudioDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ if (Opts.CPlusPlus) {
+ if (Opts.RTTI)
+ Builder.defineMacro("_CPPRTTI");
+
+ if (Opts.Exceptions)
+ Builder.defineMacro("_CPPUNWIND");
+ }
+
+ if (!Opts.CharIsSigned)
+ Builder.defineMacro("_CHAR_UNSIGNED");
+
+ // FIXME: POSIXThreads isn't exactly the option this should be defined for,
+ // but it works for now.
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_MT");
+
+ if (Opts.MSCVersion != 0)
+ Builder.defineMacro("_MSC_VER", llvm::Twine(Opts.MSCVersion));
+
+ if (Opts.Microsoft) {
+ Builder.defineMacro("_MSC_EXTENSIONS");
+
+ if (Opts.CPlusPlus0x) {
+ Builder.defineMacro("_RVALUE_REFERENCES_V2_SUPPORTED");
+ Builder.defineMacro("_RVALUE_REFERENCES_SUPPORTED");
+ Builder.defineMacro("_NATIVE_NULLPTR_SUPPORTED");
+ }
+ }
+
+ Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
+ }
+
+public:
+ WindowsTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {}
+};
+
} // end anonymous namespace.
//===----------------------------------------------------------------------===//
@@ -430,17 +499,6 @@ public:
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const;
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
- // This is the right definition for ABI/V4: System V.4/eabi.
- /*return "typedef struct __va_list_tag {"
- " unsigned char gpr;"
- " unsigned char fpr;"
- " unsigned short reserved;"
- " void* overflow_arg_area;"
- " void* reg_save_area;"
- "} __builtin_va_list[1];";*/
- }
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const;
virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
@@ -458,16 +516,16 @@ public:
// FIXME: The following are added to allow parsing.
// I just took a guess at what the actions should be.
// Also, is more specific checking needed? I.e. specific registers?
- case 'd': // Floating point register (containing 64-bit value)
+ case 'd': // Floating point register (containing 64-bit value)
case 'v': // Altivec vector register
Info.setAllowsRegister();
break;
case 'w':
switch (Name[1]) {
- case 'd':// VSX vector register to hold vector double data
- case 'f':// VSX vector register to hold vector float data
- case 's':// VSX vector register to hold scalar float data
- case 'a':// Any VSX register
+ case 'd':// VSX vector register to hold vector double data
+ case 'f':// VSX vector register to hold vector float data
+ case 's':// VSX vector register to hold scalar float data
+ case 'a':// Any VSX register
break;
default:
return false;
@@ -475,27 +533,27 @@ public:
Info.setAllowsRegister();
Name++; // Skip over 'w'.
break;
- case 'h': // `MQ', `CTR', or `LINK' register
- case 'q': // `MQ' register
- case 'c': // `CTR' register
- case 'l': // `LINK' register
- case 'x': // `CR' register (condition register) number 0
- case 'y': // `CR' register (condition register)
- case 'z': // `XER[CA]' carry bit (part of the XER register)
+ case 'h': // `MQ', `CTR', or `LINK' register
+ case 'q': // `MQ' register
+ case 'c': // `CTR' register
+ case 'l': // `LINK' register
+ case 'x': // `CR' register (condition register) number 0
+ case 'y': // `CR' register (condition register)
+ case 'z': // `XER[CA]' carry bit (part of the XER register)
Info.setAllowsRegister();
break;
- case 'I': // Signed 16-bit constant
+ case 'I': // Signed 16-bit constant
case 'J': // Unsigned 16-bit constant shifted left 16 bits
- // (use `L' instead for SImode constants)
- case 'K': // Unsigned 16-bit constant
- case 'L': // Signed 16-bit constant shifted left 16 bits
- case 'M': // Constant larger than 31
- case 'N': // Exact power of 2
- case 'P': // Constant whose negation is a signed 16-bit constant
+ // (use `L' instead for SImode constants)
+ case 'K': // Unsigned 16-bit constant
+ case 'L': // Signed 16-bit constant shifted left 16 bits
+ case 'M': // Constant larger than 31
+ case 'N': // Exact power of 2
+ case 'P': // Constant whose negation is a signed 16-bit constant
case 'G': // Floating point constant that can be loaded into a
- // register with one instruction per word
+ // register with one instruction per word
case 'H': // Integer/Floating point constant that can be loaded
- // into a register using three instructions
+ // into a register using three instructions
break;
case 'm': // Memory operand. Note that on PowerPC targets, m can
// include addresses that update the base register. It
@@ -503,13 +561,13 @@ public:
// if that asm statement accesses the operand exactly once.
// The asm statement must also use `%U<opno>' as a
// placeholder for the "update" flag in the corresponding
- // load or store instruction. For example:
+ // load or store instruction. For example:
// asm ("st%U0 %1,%0" : "=m" (mem) : "r" (val));
- // is correct but:
+ // is correct but:
// asm ("st %1,%0" : "=m" (mem) : "r" (val));
// is not. Use es rather than m if you don't want the base
- // register to be updated.
- case 'e':
+ // register to be updated.
+ case 'e':
if (Name[1] != 's')
return false;
// es: A "stable" memory operand; that is, one which does not
@@ -521,23 +579,23 @@ public:
Name++; // Skip over 'e'.
break;
case 'Q': // Memory operand that is an offset from a register (it is
- // usually better to use `m' or `es' in asm statements)
+ // usually better to use `m' or `es' in asm statements)
case 'Z': // Memory operand that is an indexed or indirect from a
// register (it is usually better to use `m' or `es' in
- // asm statements)
+ // asm statements)
Info.setAllowsMemory();
Info.setAllowsRegister();
break;
- case 'R': // AIX TOC entry
+ case 'R': // AIX TOC entry
case 'a': // Address operand that is an indexed or indirect from a
- // register (`p' is preferable for asm statements)
- case 'S': // Constant suitable as a 64-bit mask operand
- case 'T': // Constant suitable as a 32-bit mask operand
- case 'U': // System V Release 4 small data area reference
+ // register (`p' is preferable for asm statements)
+ case 'S': // Constant suitable as a 64-bit mask operand
+ case 'T': // Constant suitable as a 32-bit mask operand
+ case 'U': // System V Release 4 small data area reference
case 't': // AND masks that can be performed by two rldic{l, r}
- // instructions
- case 'W': // Vector constant that does not require memory
- case 'j': // Vector constant that is all zeros.
+ // instructions
+ case 'W': // Vector constant that does not require memory
+ case 'j': // Vector constant that is all zeros.
break;
// End FIXME.
}
@@ -549,8 +607,9 @@ public:
};
const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, false },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER, false },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES, false },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES, false },
#include "clang/Basic/BuiltinsPPC.def"
};
@@ -584,7 +643,7 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// FIXME: Should be controlled by command line option.
Builder.defineMacro("__LONG_DOUBLE_128__");
-
+
if (Opts.AltiVec) {
Builder.defineMacro("__VEC__", "10206");
Builder.defineMacro("__ALTIVEC__");
@@ -704,7 +763,18 @@ public:
"i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
if (getTriple().getOS() == llvm::Triple::FreeBSD)
- this->SizeType = TargetInfo::UnsignedInt;
+ SizeType = UnsignedInt;
+ }
+
+ virtual const char *getVAListDeclaration() const {
+ // This is the ELF definition, and is overridden by the Darwin sub-target
+ return "typedef struct __va_list_tag {"
+ " unsigned char gpr;"
+ " unsigned char fpr;"
+ " unsigned short reserved;"
+ " void* overflow_arg_area;"
+ " void* reg_save_area;"
+ "} __builtin_va_list[1];";
}
};
} // end anonymous namespace.
@@ -720,17 +790,24 @@ public:
DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64";
}
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
};
} // end anonymous namespace.
namespace {
-class DarwinPPCTargetInfo :
- public DarwinTargetInfo<PPCTargetInfo> {
+class DarwinPPC32TargetInfo :
+ public DarwinTargetInfo<PPC32TargetInfo> {
public:
- DarwinPPCTargetInfo(const std::string& triple)
- : DarwinTargetInfo<PPCTargetInfo>(triple) {
+ DarwinPPC32TargetInfo(const std::string& triple)
+ : DarwinTargetInfo<PPC32TargetInfo>(triple) {
HasAlignMac68kSupport = true;
+ BoolWidth = BoolAlign = 32; //XXX support -mone-byte-bool?
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
}
};
@@ -752,8 +829,7 @@ class MBlazeTargetInfo : public TargetInfo {
public:
MBlazeTargetInfo(const std::string& triple) : TargetInfo(triple) {
- DescriptionString = "E-p:32:32-i8:8:8-i16:16:16-i64:32:32-f64:32:32-"
- "v64:32:32-v128:32:32-n32";
+ DescriptionString = "E-p:32:32:32-i8:8:8-i16:16:16";
}
virtual void getTargetBuiltins(const Builtin::Info *&Records,
@@ -875,8 +951,9 @@ void MBlazeTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
namespace {
// Namespace for x86 abstract base class
const Builtin::Info BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, false },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER, false },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES, false },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES, false },
#include "clang/Basic/BuiltinsX86.def"
};
@@ -1010,6 +1087,9 @@ void X86TargetInfo::getDefaultFeatures(const std::string &CPU,
CPU == "athlon-fx") {
setFeatureEnabled(Features, "sse2", true);
setFeatureEnabled(Features, "3dnowa", true);
+ } else if (CPU == "k8-sse3") {
+ setFeatureEnabled(Features, "sse3", true);
+ setFeatureEnabled(Features, "3dnowa", true);
} else if (CPU == "c3-2")
setFeatureEnabled(Features, "sse", true);
}
@@ -1116,13 +1196,13 @@ void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
.Case("mmx", MMX)
.Default(NoMMXSSE);
SSELevel = std::max(SSELevel, Level);
-
- AMD3DNowEnum ThreeDNowLevel =
+
+ AMD3DNowEnum ThreeDNowLevel =
llvm::StringSwitch<AMD3DNowEnum>(Features[i].substr(1))
.Case("3dnowa", AMD3DNowAthlon)
.Case("3dnow", AMD3DNow)
.Default(NoAMD3DNow);
-
+
AMD3DNowLevel = std::max(AMD3DNowLevel, ThreeDNowLevel);
}
}
@@ -1184,7 +1264,24 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case NoMMXSSE:
break;
}
-
+
+ if (Opts.Microsoft && PointerWidth == 32) {
+ switch (SSELevel) {
+ case SSE42:
+ case SSE41:
+ case SSSE3:
+ case SSE3:
+ case SSE2:
+ Builder.defineMacro("_M_IX86_FP", llvm::Twine(2));
+ break;
+ case SSE1:
+ Builder.defineMacro("_M_IX86_FP", llvm::Twine(1));
+ break;
+ default:
+ Builder.defineMacro("_M_IX86_FP", llvm::Twine(0));
+ }
+ }
+
// Each case falls through to the previous one here.
switch (AMD3DNowLevel) {
case AMD3DNowAthlon:
@@ -1241,6 +1338,7 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
return false;
}
+
std::string
X86TargetInfo::convertConstraint(const char Constraint) const {
switch (Constraint) {
@@ -1250,6 +1348,8 @@ X86TargetInfo::convertConstraint(const char Constraint) const {
case 'd': return std::string("{dx}");
case 'S': return std::string("{si}");
case 'D': return std::string("{di}");
+ case 'p': // address
+ return std::string("im");
case 't': // top of floating point stack.
return std::string("{st}");
case 'u': // second from top of floating point stack.
@@ -1284,7 +1384,7 @@ public:
virtual const char *getVAListDeclaration() const {
return "typedef char* __builtin_va_list;";
}
-
+
int getEHDataRegisterNumber(unsigned RegNo) const {
if (RegNo == 0) return 0;
if (RegNo == 1) return 2;
@@ -1325,10 +1425,10 @@ public:
namespace {
// x86-32 Windows target
-class WindowsX86_32TargetInfo : public X86_32TargetInfo {
+class WindowsX86_32TargetInfo : public WindowsTargetInfo<X86_32TargetInfo> {
public:
WindowsX86_32TargetInfo(const std::string& triple)
- : X86_32TargetInfo(triple) {
+ : WindowsTargetInfo<X86_32TargetInfo>(triple) {
TLSSupported = false;
WCharType = UnsignedShort;
DoubleAlign = LongLongAlign = 64;
@@ -1338,12 +1438,7 @@ public:
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- X86_32TargetInfo::getTargetDefines(Opts, Builder);
- // This list is based off of the the list of things MingW defines
- Builder.defineMacro("_WIN32");
- DefineStd(Builder, "WIN32", Opts);
- DefineStd(Builder, "WINNT", Opts);
- Builder.defineMacro("_X86_");
+ WindowsTargetInfo<X86_32TargetInfo>::getTargetDefines(Opts, Builder);
}
};
} // end anonymous namespace
@@ -1355,16 +1450,17 @@ class VisualStudioWindowsX86_32TargetInfo : public WindowsX86_32TargetInfo {
public:
VisualStudioWindowsX86_32TargetInfo(const std::string& triple)
: WindowsX86_32TargetInfo(triple) {
+ LongDoubleWidth = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
+ WindowsX86_32TargetInfo::getVisualStudioDefines(Opts, Builder);
// The value of the following reflects processor type.
// 300=386, 400=486, 500=Pentium, 600=Blend (default)
// We lost the original triple, so we use the default.
Builder.defineMacro("_M_IX86", "600");
- Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
- Builder.defineMacro("_STDCALL_SUPPORTED");
}
};
} // end anonymous namespace
@@ -1379,6 +1475,9 @@ public:
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "WIN32", Opts);
+ DefineStd(Builder, "WINNT", Opts);
+ Builder.defineMacro("_X86_");
Builder.defineMacro("__MSVCRT__");
Builder.defineMacro("__MINGW32__");
Builder.defineMacro("__declspec", "__declspec");
@@ -1420,6 +1519,7 @@ public:
SizeType = UnsignedLong;
IntPtrType = SignedLong;
PtrDiffType = SignedLong;
+ this->UserLabelPrefix = "";
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
@@ -1461,7 +1561,7 @@ public:
"} __va_list_tag;"
"typedef __va_list_tag __builtin_va_list[1];";
}
-
+
int getEHDataRegisterNumber(unsigned RegNo) const {
if (RegNo == 0) return 0;
if (RegNo == 1) return 1;
@@ -1472,23 +1572,29 @@ public:
namespace {
// x86-64 Windows target
-class WindowsX86_64TargetInfo : public X86_64TargetInfo {
+class WindowsX86_64TargetInfo : public WindowsTargetInfo<X86_64TargetInfo> {
public:
WindowsX86_64TargetInfo(const std::string& triple)
- : X86_64TargetInfo(triple) {
+ : WindowsTargetInfo<X86_64TargetInfo>(triple) {
TLSSupported = false;
WCharType = UnsignedShort;
LongWidth = LongAlign = 32;
- DoubleAlign = LongLongAlign = 64;
+ DoubleAlign = LongLongAlign = 64;
IntMaxType = SignedLongLong;
UIntMaxType = UnsignedLongLong;
Int64Type = SignedLongLong;
+ SizeType = UnsignedLongLong;
+ PtrDiffType = SignedLongLong;
+ IntPtrType = SignedLongLong;
+ this->UserLabelPrefix = "";
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- X86_64TargetInfo::getTargetDefines(Opts, Builder);
+ WindowsTargetInfo<X86_64TargetInfo>::getTargetDefines(Opts, Builder);
Builder.defineMacro("_WIN64");
- DefineStd(Builder, "WIN64", Opts);
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
}
};
} // end anonymous namespace
@@ -1503,11 +1609,9 @@ public:
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
+ WindowsX86_64TargetInfo::getVisualStudioDefines(Opts, Builder);
Builder.defineMacro("_M_X64");
- Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
- }
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
+ Builder.defineMacro("_M_AMD64");
}
};
} // end anonymous namespace
@@ -1522,9 +1626,10 @@ public:
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "WIN64", Opts);
Builder.defineMacro("__MSVCRT__");
Builder.defineMacro("__MINGW64__");
- Builder.defineMacro("__declspec");
+ Builder.defineMacro("__declspec", "__declspec");
}
};
} // end anonymous namespace
@@ -1590,7 +1695,7 @@ public:
// {} in inline assembly are neon specifiers, not assembly variant
// specifiers.
NoAsmVariants = true;
-
+
// FIXME: Should we just treat this as a feature?
IsThumb = getTriple().getArchName().startswith("thumb");
if (IsThumb) {
@@ -1655,7 +1760,7 @@ public:
else if (CPU == "cortex-a8" || CPU == "cortex-a9")
Features["neon"] = true;
}
-
+
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
const std::string &Name,
bool Enabled) const {
@@ -1715,6 +1820,7 @@ public:
.Cases("arm1136jf-s", "mpcorenovfp", "mpcore", "6K")
.Cases("arm1156t2-s", "arm1156t2f-s", "6T2")
.Cases("cortex-a8", "cortex-a9", "7A")
+ .Case("cortex-m3", "7M")
.Default(0);
}
virtual bool setCPU(const std::string &Name) {
@@ -1816,10 +1922,17 @@ const char * const ARMTargetInfo::GCCRegNames[] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
- "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
- // FIXME: Need double and NEON registers, but we need support for aliasing
- // multiple registers for that.
+ // Double registers
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
+
+ // Quad registers
+ "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
};
void ARMTargetInfo::getGCCRegNames(const char * const *&Names,
@@ -1845,6 +1958,8 @@ const TargetInfo::GCCRegAlias ARMTargetInfo::GCCRegAliases[] = {
{ { "r13" }, "sp" },
{ { "r14" }, "lr" },
{ { "r15" }, "pc" },
+ // The S, D and Q registers overlap, but aren't really aliases; we
+ // don't want to substitute one of these for a different-sized one.
};
void ARMTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
@@ -1854,8 +1969,9 @@ void ARMTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
}
const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, false },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER, false },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES, false },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES, false },
#include "clang/Basic/BuiltinsARM.def"
};
} // end anonymous namespace.
@@ -1882,17 +1998,37 @@ namespace {
class SparcV8TargetInfo : public TargetInfo {
static const TargetInfo::GCCRegAlias GCCRegAliases[];
static const char * const GCCRegNames[];
+ bool SoftFloat;
public:
SparcV8TargetInfo(const std::string& triple) : TargetInfo(triple) {
// FIXME: Support Sparc quad-precision long double?
DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
}
+ virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
+ const std::string &Name,
+ bool Enabled) const {
+ if (Name == "soft-float")
+ Features[Name] = Enabled;
+ else
+ return false;
+
+ return true;
+ }
+ virtual void HandleTargetFeatures(std::vector<std::string> &Features) {
+ SoftFloat = false;
+ for (unsigned i = 0, e = Features.size(); i != e; ++i)
+ if (Features[i] == "+soft-float")
+ SoftFloat = true;
+ }
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
DefineStd(Builder, "sparc", Opts);
Builder.defineMacro("__sparcv8");
Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ if (SoftFloat)
+ Builder.defineMacro("SOFT_FLOAT", "1");
}
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
@@ -1991,75 +2127,6 @@ public:
} // end anonymous namespace.
namespace {
- class PIC16TargetInfo : public TargetInfo{
- public:
- PIC16TargetInfo(const std::string& triple) : TargetInfo(triple) {
- TLSSupported = false;
- IntWidth = 16;
- LongWidth = LongLongWidth = 32;
- PointerWidth = 16;
- IntAlign = 8;
- LongAlign = LongLongAlign = 8;
- PointerAlign = 8;
- SizeType = UnsignedInt;
- IntMaxType = SignedLong;
- UIntMaxType = UnsignedLong;
- IntPtrType = SignedShort;
- PtrDiffType = SignedInt;
- SigAtomicType = SignedLong;
- FloatWidth = 32;
- FloatAlign = 32;
- DoubleWidth = 32;
- DoubleAlign = 32;
- LongDoubleWidth = 32;
- LongDoubleAlign = 32;
- FloatFormat = &llvm::APFloat::IEEEsingle;
- DoubleFormat = &llvm::APFloat::IEEEsingle;
- LongDoubleFormat = &llvm::APFloat::IEEEsingle;
- DescriptionString = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8-f32:32:32-n8";
-
- }
- virtual uint64_t getPointerWidthV(unsigned AddrSpace) const { return 16; }
- virtual uint64_t getPointerAlignV(unsigned AddrSpace) const { return 8; }
- virtual void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
- Builder.defineMacro("__pic16");
- Builder.defineMacro("__PIC16");
- Builder.defineMacro("rom", "__attribute__((address_space(1)))");
- Builder.defineMacro("ram", "__attribute__((address_space(0)))");
- Builder.defineMacro("__section(SectName)",
- "__attribute__((section(SectName)))");
- Builder.defineMacro("near",
- "__attribute__((section(\"Address=NEAR\")))");
- Builder.defineMacro("__address(Addr)",
- "__attribute__((section(\"Address=\"#Addr)))");
- Builder.defineMacro("__config(conf)", "asm(\"CONFIG \"#conf)");
- Builder.defineMacro("__idlocs(value)", "asm(\"__IDLOCS \"#value)");
- Builder.defineMacro("interrupt",
- "__attribute__((section(\"interrupt=0x4\"))) \
- __attribute__((used))");
- }
- virtual void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const {}
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
- }
- virtual const char *getClobbers() const {
- return "";
- }
- virtual void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {}
- virtual bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const {
- return true;
- }
- virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const {}
- virtual bool useGlobalsForAutomaticVariables() const {return true;}
- };
-}
-
-namespace {
class MSP430TargetInfo : public TargetInfo {
static const char * const GCCRegNames[];
public:
@@ -2294,8 +2361,8 @@ namespace {
LongDoubleFormat = &llvm::APFloat::IEEEsingle;
DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:32-"
"i16:16:32-i32:32:32-i64:32:32-"
- "f32:32:32-f64:64:64-v64:64:64-"
- "v128:128:128-a0:0:64-n32";
+ "f32:32:32-f64:32:32-v64:32:32-"
+ "v128:32:32-a0:0:32-n32";
}
virtual void getTargetDefines(const LangOptions &Opts,
@@ -2399,7 +2466,7 @@ public:
};
const char * const MipsTargetInfo::GCCRegNames[] = {
- "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
+ "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
"$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
"$24", "$25", "$26", "$27", "$28", "$sp", "$fp", "$31",
@@ -2526,12 +2593,9 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new LinuxTargetInfo<MipselTargetInfo>(T);
return new MipselTargetInfo(T);
- case llvm::Triple::pic16:
- return new PIC16TargetInfo(T);
-
case llvm::Triple::ppc:
if (os == llvm::Triple::Darwin)
- return new DarwinPPCTargetInfo(T);
+ return new DarwinPPC32TargetInfo(T);
else if (os == llvm::Triple::FreeBSD)
return new FreeBSDTargetInfo<PPC32TargetInfo>(T);
return new PPC32TargetInfo(T);
@@ -2615,10 +2679,13 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new FreeBSDTargetInfo<X86_64TargetInfo>(T);
case llvm::Triple::Solaris:
return new SolarisTargetInfo<X86_64TargetInfo>(T);
- case llvm::Triple::MinGW64:
+ case llvm::Triple::MinGW32:
return new MinGWX86_64TargetInfo(T);
case llvm::Triple::Win32: // This is what Triple.h supports now.
- return new VisualStudioWindowsX86_64TargetInfo(T);
+ if (Triple.getEnvironment() == llvm::Triple::MachO)
+ return new DarwinX86_64TargetInfo(T);
+ else
+ return new VisualStudioWindowsX86_64TargetInfo(T);
default:
return new X86_64TargetInfo(T);
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Version.cpp b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
index 986f4d5..3a82e5d 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Version.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
@@ -13,6 +13,7 @@
#include "clang/Basic/Version.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Config/config.h"
#include <cstring>
#include <cstdlib>
@@ -20,45 +21,52 @@ using namespace std;
namespace clang {
-llvm::StringRef getClangRepositoryPath() {
- static const char URL[] = "$URL: http://llvm.org/svn/llvm-project/cfe/tags/RELEASE_28/lib/Basic/Version.cpp $";
- const char *URLEnd = URL + strlen(URL);
+std::string getClangRepositoryPath() {
+#ifdef SVN_REPOSITORY
+ llvm::StringRef URL(SVN_REPOSITORY);
+#else
+ llvm::StringRef URL("");
+#endif
- const char *End = strstr(URL, "/lib/Basic");
- if (End)
- URLEnd = End;
+ // If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
+ // pick up a tag in an SVN export, for example.
+ static llvm::StringRef SVNRepository("$URL: http://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
+ if (URL.empty()) {
+ URL = SVNRepository.slice(SVNRepository.find(':'),
+ SVNRepository.find("/lib/Basic"));
+ }
// Strip off version from a build from an integration branch.
- End = strstr(URL, "/src/tools/clang");
- if (End)
- URLEnd = End;
+ URL = URL.slice(0, URL.find("/src/tools/clang"));
- const char *Begin = strstr(URL, "cfe/");
- if (Begin)
- return llvm::StringRef(Begin + 4, URLEnd - Begin - 4);
+ // Trim path prefix off, assuming path came from standard cfe path.
+ size_t Start = URL.find("cfe/");
+ if (Start != llvm::StringRef::npos)
+ URL = URL.substr(Start + 4);
- return llvm::StringRef(URL, URLEnd - URL);
+ return URL;
}
std::string getClangRevision() {
#ifdef SVN_REVISION
- if (SVN_REVISION[0] != '\0') {
- std::string revision;
- llvm::raw_string_ostream OS(revision);
- OS << strtol(SVN_REVISION, 0, 10);
- return OS.str();
- }
-#endif
+ return SVN_REVISION;
+#else
return "";
+#endif
}
std::string getClangFullRepositoryVersion() {
std::string buf;
llvm::raw_string_ostream OS(buf);
- OS << getClangRepositoryPath();
- const std::string &Revision = getClangRevision();
- if (!Revision.empty())
- OS << ' ' << Revision;
+ std::string Path = getClangRepositoryPath();
+ std::string Revision = getClangRevision();
+ if (!Path.empty())
+ OS << Path;
+ if (!Revision.empty()) {
+ if (!Path.empty())
+ OS << ' ';
+ OS << Revision;
+ }
return OS.str();
}
@@ -70,9 +78,14 @@ std::string getClangFullVersion() {
#endif
OS << "clang version " CLANG_VERSION_STRING " ("
<< getClangFullRepositoryVersion() << ')';
+
#ifdef CLANG_VENDOR_SUFFIX
OS << CLANG_VENDOR_SUFFIX;
+#elif defined(CLANG_VENDOR)
+ // If vendor supplied, include the base LLVM version as well.
+ OS << " (based on LLVM " << PACKAGE_VERSION << ")";
#endif
+
return OS.str();
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp
deleted file mode 100644
index 3c1a6d1..0000000
--- a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp
+++ /dev/null
@@ -1,585 +0,0 @@
-//== BasicObjCFoundationChecks.cpp - Simple Apple-Foundation checks -*- C++ -*--
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines BasicObjCFoundationChecks, a class that encapsulates
-// a set of simple checks to run on Objective-C code using Apple's Foundation
-// classes.
-//
-//===----------------------------------------------------------------------===//
-
-#include "BasicObjCFoundationChecks.h"
-
-#include "clang/Checker/PathSensitive/ExplodedGraph.h"
-#include "clang/Checker/PathSensitive/GRSimpleAPICheck.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/MemRegion.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/Checkers/LocalCheckers.h"
-#include "clang/AST/DeclObjC.h"
-#include "clang/AST/Expr.h"
-#include "clang/AST/ExprObjC.h"
-#include "clang/AST/ASTContext.h"
-
-using namespace clang;
-
-static const ObjCInterfaceType* GetReceiverType(const ObjCMessageExpr* ME) {
- QualType T;
- switch (ME->getReceiverKind()) {
- case ObjCMessageExpr::Instance:
- T = ME->getInstanceReceiver()->getType();
- break;
-
- case ObjCMessageExpr::SuperInstance:
- T = ME->getSuperType();
- break;
-
- case ObjCMessageExpr::Class:
- case ObjCMessageExpr::SuperClass:
- return 0;
- }
-
- if (const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>())
- return PT->getInterfaceType();
-
- return NULL;
-}
-
-static const char* GetReceiverNameType(const ObjCMessageExpr* ME) {
- if (const ObjCInterfaceType *ReceiverType = GetReceiverType(ME))
- return ReceiverType->getDecl()->getIdentifier()->getNameStart();
- return NULL;
-}
-
-namespace {
-
-class APIMisuse : public BugType {
-public:
- APIMisuse(const char* name) : BugType(name, "API Misuse (Apple)") {}
-};
-
-class BasicObjCFoundationChecks : public GRSimpleAPICheck {
- APIMisuse *BT;
- BugReporter& BR;
- ASTContext &Ctx;
-
- bool isNSString(const ObjCInterfaceType *T, llvm::StringRef suffix);
- bool AuditNSString(ExplodedNode* N, const ObjCMessageExpr* ME);
-
- bool CheckNilArg(ExplodedNode* N, unsigned Arg);
-
-public:
- BasicObjCFoundationChecks(ASTContext& ctx, BugReporter& br)
- : BT(0), BR(br), Ctx(ctx) {}
-
- bool Audit(ExplodedNode* N, GRStateManager&);
-
-private:
- void WarnNilArg(ExplodedNode* N, const ObjCMessageExpr* ME, unsigned Arg) {
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
- os << "Argument to '" << GetReceiverNameType(ME) << "' method '"
- << ME->getSelector().getAsString() << "' cannot be nil.";
-
- // Lazily create the BugType object for NilArg. This will be owned
- // by the BugReporter object 'BR' once we call BR.EmitWarning.
- if (!BT) BT = new APIMisuse("nil argument");
-
- RangedBugReport *R = new RangedBugReport(*BT, os.str(), N);
- R->addRange(ME->getArg(Arg)->getSourceRange());
- BR.EmitReport(R);
- }
-};
-
-} // end anonymous namespace
-
-
-GRSimpleAPICheck*
-clang::CreateBasicObjCFoundationChecks(ASTContext& Ctx, BugReporter& BR) {
- return new BasicObjCFoundationChecks(Ctx, BR);
-}
-
-
-
-bool BasicObjCFoundationChecks::Audit(ExplodedNode* N,
- GRStateManager&) {
-
- const ObjCMessageExpr* ME =
- cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
-
- const ObjCInterfaceType *ReceiverType = GetReceiverType(ME);
-
- if (!ReceiverType)
- return false;
-
- if (isNSString(ReceiverType,
- ReceiverType->getDecl()->getIdentifier()->getName()))
- return AuditNSString(N, ME);
-
- return false;
-}
-
-static inline bool isNil(SVal X) {
- return isa<loc::ConcreteInt>(X);
-}
-
-//===----------------------------------------------------------------------===//
-// Error reporting.
-//===----------------------------------------------------------------------===//
-
-bool BasicObjCFoundationChecks::CheckNilArg(ExplodedNode* N, unsigned Arg) {
- const ObjCMessageExpr* ME =
- cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
-
- const Expr * E = ME->getArg(Arg);
-
- if (isNil(N->getState()->getSVal(E))) {
- WarnNilArg(N, ME, Arg);
- return true;
- }
-
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// NSString checking.
-//===----------------------------------------------------------------------===//
-
-bool BasicObjCFoundationChecks::isNSString(const ObjCInterfaceType *T,
- llvm::StringRef ClassName) {
- return ClassName == "NSString" || ClassName == "NSMutableString";
-}
-
-bool BasicObjCFoundationChecks::AuditNSString(ExplodedNode* N,
- const ObjCMessageExpr* ME) {
-
- Selector S = ME->getSelector();
-
- if (S.isUnarySelector())
- return false;
-
- // FIXME: This is going to be really slow doing these checks with
- // lexical comparisons.
-
- std::string NameStr = S.getAsString();
- llvm::StringRef Name(NameStr);
- assert(!Name.empty());
-
- // FIXME: Checking for initWithFormat: will not work in most cases
- // yet because [NSString alloc] returns id, not NSString*. We will
- // need support for tracking expected-type information in the analyzer
- // to find these errors.
- if (Name == "caseInsensitiveCompare:" ||
- Name == "compare:" ||
- Name == "compare:options:" ||
- Name == "compare:options:range:" ||
- Name == "compare:options:range:locale:" ||
- Name == "componentsSeparatedByCharactersInSet:" ||
- Name == "initWithFormat:")
- return CheckNilArg(N, 0);
-
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Error reporting.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class AuditCFNumberCreate : public GRSimpleAPICheck {
- APIMisuse* BT;
-
- // FIXME: Either this should be refactored into GRSimpleAPICheck, or
- // it should always be passed with a call to Audit. The latter
- // approach makes this class more stateless.
- ASTContext& Ctx;
- IdentifierInfo* II;
- BugReporter& BR;
-
-public:
- AuditCFNumberCreate(ASTContext& ctx, BugReporter& br)
- : BT(0), Ctx(ctx), II(&Ctx.Idents.get("CFNumberCreate")), BR(br){}
-
- ~AuditCFNumberCreate() {}
-
- bool Audit(ExplodedNode* N, GRStateManager&);
-
-private:
- void AddError(const TypedRegion* R, const Expr* Ex, ExplodedNode *N,
- uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
-};
-} // end anonymous namespace
-
-enum CFNumberType {
- kCFNumberSInt8Type = 1,
- kCFNumberSInt16Type = 2,
- kCFNumberSInt32Type = 3,
- kCFNumberSInt64Type = 4,
- kCFNumberFloat32Type = 5,
- kCFNumberFloat64Type = 6,
- kCFNumberCharType = 7,
- kCFNumberShortType = 8,
- kCFNumberIntType = 9,
- kCFNumberLongType = 10,
- kCFNumberLongLongType = 11,
- kCFNumberFloatType = 12,
- kCFNumberDoubleType = 13,
- kCFNumberCFIndexType = 14,
- kCFNumberNSIntegerType = 15,
- kCFNumberCGFloatType = 16
-};
-
-namespace {
- template<typename T>
- class Optional {
- bool IsKnown;
- T Val;
- public:
- Optional() : IsKnown(false), Val(0) {}
- Optional(const T& val) : IsKnown(true), Val(val) {}
-
- bool isKnown() const { return IsKnown; }
-
- const T& getValue() const {
- assert (isKnown());
- return Val;
- }
-
- operator const T&() const {
- return getValue();
- }
- };
-}
-
-static Optional<uint64_t> GetCFNumberSize(ASTContext& Ctx, uint64_t i) {
- static const unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
-
- if (i < kCFNumberCharType)
- return FixedSize[i-1];
-
- QualType T;
-
- switch (i) {
- case kCFNumberCharType: T = Ctx.CharTy; break;
- case kCFNumberShortType: T = Ctx.ShortTy; break;
- case kCFNumberIntType: T = Ctx.IntTy; break;
- case kCFNumberLongType: T = Ctx.LongTy; break;
- case kCFNumberLongLongType: T = Ctx.LongLongTy; break;
- case kCFNumberFloatType: T = Ctx.FloatTy; break;
- case kCFNumberDoubleType: T = Ctx.DoubleTy; break;
- case kCFNumberCFIndexType:
- case kCFNumberNSIntegerType:
- case kCFNumberCGFloatType:
- // FIXME: We need a way to map from names to Type*.
- default:
- return Optional<uint64_t>();
- }
-
- return Ctx.getTypeSize(T);
-}
-
-#if 0
-static const char* GetCFNumberTypeStr(uint64_t i) {
- static const char* Names[] = {
- "kCFNumberSInt8Type",
- "kCFNumberSInt16Type",
- "kCFNumberSInt32Type",
- "kCFNumberSInt64Type",
- "kCFNumberFloat32Type",
- "kCFNumberFloat64Type",
- "kCFNumberCharType",
- "kCFNumberShortType",
- "kCFNumberIntType",
- "kCFNumberLongType",
- "kCFNumberLongLongType",
- "kCFNumberFloatType",
- "kCFNumberDoubleType",
- "kCFNumberCFIndexType",
- "kCFNumberNSIntegerType",
- "kCFNumberCGFloatType"
- };
-
- return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
-}
-#endif
-
-bool AuditCFNumberCreate::Audit(ExplodedNode* N,GRStateManager&){
- const CallExpr* CE =
- cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
- const Expr* Callee = CE->getCallee();
- SVal CallV = N->getState()->getSVal(Callee);
- const FunctionDecl* FD = CallV.getAsFunctionDecl();
-
- if (!FD || FD->getIdentifier() != II || CE->getNumArgs()!=3)
- return false;
-
- // Get the value of the "theType" argument.
- SVal TheTypeVal = N->getState()->getSVal(CE->getArg(1));
-
- // FIXME: We really should allow ranges of valid theType values, and
- // bifurcate the state appropriately.
- nonloc::ConcreteInt* V = dyn_cast<nonloc::ConcreteInt>(&TheTypeVal);
-
- if (!V)
- return false;
-
- uint64_t NumberKind = V->getValue().getLimitedValue();
- Optional<uint64_t> TargetSize = GetCFNumberSize(Ctx, NumberKind);
-
- // FIXME: In some cases we can emit an error.
- if (!TargetSize.isKnown())
- return false;
-
- // Look at the value of the integer being passed by reference. Essentially
- // we want to catch cases where the value passed in is not equal to the
- // size of the type being created.
- SVal TheValueExpr = N->getState()->getSVal(CE->getArg(2));
-
- // FIXME: Eventually we should handle arbitrary locations. We can do this
- // by having an enhanced memory model that does low-level typing.
- loc::MemRegionVal* LV = dyn_cast<loc::MemRegionVal>(&TheValueExpr);
-
- if (!LV)
- return false;
-
- const TypedRegion* R = dyn_cast<TypedRegion>(LV->StripCasts());
-
- if (!R)
- return false;
-
- QualType T = Ctx.getCanonicalType(R->getValueType());
-
- // FIXME: If the pointee isn't an integer type, should we flag a warning?
- // People can do weird stuff with pointers.
-
- if (!T->isIntegerType())
- return false;
-
- uint64_t SourceSize = Ctx.getTypeSize(T);
-
- // CHECK: is SourceSize == TargetSize
-
- if (SourceSize == TargetSize)
- return false;
-
- AddError(R, CE->getArg(2), N, SourceSize, TargetSize, NumberKind);
-
- // FIXME: We can actually create an abstract "CFNumber" object that has
- // the bits initialized to the provided values.
- return SourceSize < TargetSize;
-}
-
-void AuditCFNumberCreate::AddError(const TypedRegion* R, const Expr* Ex,
- ExplodedNode *N,
- uint64_t SourceSize, uint64_t TargetSize,
- uint64_t NumberKind) {
-
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
-
- os << (SourceSize == 8 ? "An " : "A ")
- << SourceSize << " bit integer is used to initialize a CFNumber "
- "object that represents "
- << (TargetSize == 8 ? "an " : "a ")
- << TargetSize << " bit integer. ";
-
- if (SourceSize < TargetSize)
- os << (TargetSize - SourceSize)
- << " bits of the CFNumber value will be garbage." ;
- else
- os << (SourceSize - TargetSize)
- << " bits of the input integer will be lost.";
-
- // Lazily create the BugType object. This will be owned
- // by the BugReporter object 'BR' once we call BR.EmitWarning.
- if (!BT) BT = new APIMisuse("Bad use of CFNumberCreate");
- RangedBugReport *report = new RangedBugReport(*BT, os.str(), N);
- report->addRange(Ex->getSourceRange());
- BR.EmitReport(report);
-}
-
-GRSimpleAPICheck*
-clang::CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR) {
- return new AuditCFNumberCreate(Ctx, BR);
-}
-
-//===----------------------------------------------------------------------===//
-// CFRetain/CFRelease checking for null arguments.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class CFRetainReleaseChecker : public CheckerVisitor<CFRetainReleaseChecker> {
- APIMisuse *BT;
- IdentifierInfo *Retain, *Release;
-
-public:
- CFRetainReleaseChecker(ASTContext& Ctx): BT(NULL),
- Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease"))
- {}
-
- static void *getTag() { static int x = 0; return &x; }
-
- void PreVisitCallExpr(CheckerContext& C, const CallExpr* CE);
-};
-} // end anonymous namespace
-
-
-void CFRetainReleaseChecker::PreVisitCallExpr(CheckerContext& C,
- const CallExpr* CE) {
- // If the CallExpr doesn't have exactly 1 argument just give up checking.
- if (CE->getNumArgs() != 1)
- return;
-
- // Get the function declaration of the callee.
- const GRState* state = C.getState();
- SVal X = state->getSVal(CE->getCallee());
- const FunctionDecl* FD = X.getAsFunctionDecl();
-
- if (!FD)
- return;
-
- // Check if we called CFRetain/CFRelease.
- const IdentifierInfo *FuncII = FD->getIdentifier();
- if (!(FuncII == Retain || FuncII == Release))
- return;
-
- // FIXME: The rest of this just checks that the argument is non-null.
- // It should probably be refactored and combined with AttrNonNullChecker.
-
- // Get the argument's value.
- const Expr *Arg = CE->getArg(0);
- SVal ArgVal = state->getSVal(Arg);
- DefinedSVal *DefArgVal = dyn_cast<DefinedSVal>(&ArgVal);
- if (!DefArgVal)
- return;
-
- // Get a NULL value.
- ValueManager &ValMgr = C.getValueManager();
- DefinedSVal Zero = cast<DefinedSVal>(ValMgr.makeZeroVal(Arg->getType()));
-
- // Make an expression asserting that they're equal.
- SValuator &SVator = ValMgr.getSValuator();
- DefinedOrUnknownSVal ArgIsNull = SVator.EvalEQ(state, Zero, *DefArgVal);
-
- // Are they equal?
- const GRState *stateTrue, *stateFalse;
- llvm::tie(stateTrue, stateFalse) = state->Assume(ArgIsNull);
-
- if (stateTrue && !stateFalse) {
- ExplodedNode *N = C.GenerateSink(stateTrue);
- if (!N)
- return;
-
- if (!BT)
- BT = new APIMisuse("null passed to CFRetain/CFRelease");
-
- const char *description = (FuncII == Retain)
- ? "Null pointer argument in call to CFRetain"
- : "Null pointer argument in call to CFRelease";
-
- EnhancedBugReport *report = new EnhancedBugReport(*BT, description, N);
- report->addRange(Arg->getSourceRange());
- report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Arg);
-
- C.EmitReport(report);
- return;
- }
-
- // From here on, we know the argument is non-null.
- C.addTransition(stateFalse);
-}
-
-//===----------------------------------------------------------------------===//
-// Check for sending 'retain', 'release', or 'autorelease' directly to a Class.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class ClassReleaseChecker :
- public CheckerVisitor<ClassReleaseChecker> {
- Selector releaseS;
- Selector retainS;
- Selector autoreleaseS;
- Selector drainS;
- BugType *BT;
-public:
- ClassReleaseChecker(ASTContext &Ctx)
- : releaseS(GetNullarySelector("release", Ctx)),
- retainS(GetNullarySelector("retain", Ctx)),
- autoreleaseS(GetNullarySelector("autorelease", Ctx)),
- drainS(GetNullarySelector("drain", Ctx)),
- BT(0) {}
-
- static void *getTag() { static int x = 0; return &x; }
-
- void PreVisitObjCMessageExpr(CheckerContext &C, const ObjCMessageExpr *ME);
-};
-}
-
-void ClassReleaseChecker::PreVisitObjCMessageExpr(CheckerContext &C,
- const ObjCMessageExpr *ME) {
- ObjCInterfaceDecl *Class = 0;
- switch (ME->getReceiverKind()) {
- case ObjCMessageExpr::Class:
- Class = ME->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
- break;
-
- case ObjCMessageExpr::SuperClass:
- Class = ME->getSuperType()->getAs<ObjCObjectType>()->getInterface();
- break;
-
- case ObjCMessageExpr::Instance:
- case ObjCMessageExpr::SuperInstance:
- return;
- }
-
- Selector S = ME->getSelector();
- if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS))
- return;
-
- if (!BT)
- BT = new APIMisuse("message incorrectly sent to class instead of class "
- "instance");
-
- ExplodedNode *N = C.GenerateNode();
-
- if (!N)
- return;
-
- llvm::SmallString<200> buf;
- llvm::raw_svector_ostream os(buf);
-
- os << "The '" << S.getAsString() << "' message should be sent to instances "
- "of class '" << Class->getName()
- << "' and not the class directly";
-
- RangedBugReport *report = new RangedBugReport(*BT, os.str(), N);
- report->addRange(ME->getSourceRange());
- C.EmitReport(report);
-}
-
-//===----------------------------------------------------------------------===//
-// Check registration.
-//===----------------------------------------------------------------------===//
-
-void clang::RegisterAppleChecks(GRExprEngine& Eng, const Decl &D) {
- ASTContext& Ctx = Eng.getContext();
- BugReporter &BR = Eng.getBugReporter();
-
- Eng.AddCheck(CreateBasicObjCFoundationChecks(Ctx, BR),
- Stmt::ObjCMessageExprClass);
- Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, BR), Stmt::CallExprClass);
-
- RegisterNSErrorChecks(BR, Eng, D);
- RegisterNSAutoreleasePoolChecks(Eng);
-
- Eng.registerCheck(new CFRetainReleaseChecker(Ctx));
- Eng.registerCheck(new ClassReleaseChecker(Ctx));
-}
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRCXXExprEngine.cpp b/contrib/llvm/tools/clang/lib/Checker/GRCXXExprEngine.cpp
deleted file mode 100644
index a49989b..0000000
--- a/contrib/llvm/tools/clang/lib/Checker/GRCXXExprEngine.cpp
+++ /dev/null
@@ -1,240 +0,0 @@
-//===- GRCXXExprEngine.cpp - C++ expr evaluation engine ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the C++ expression evaluation engine.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Checker/PathSensitive/AnalysisManager.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/AST/DeclCXX.h"
-
-using namespace clang;
-
-void GRExprEngine::EvalArguments(ConstExprIterator AI, ConstExprIterator AE,
- const FunctionProtoType *FnType,
- ExplodedNode *Pred, ExplodedNodeSet &Dst) {
- llvm::SmallVector<CallExprWLItem, 20> WorkList;
- WorkList.reserve(AE - AI);
- WorkList.push_back(CallExprWLItem(AI, Pred));
-
- while (!WorkList.empty()) {
- CallExprWLItem Item = WorkList.back();
- WorkList.pop_back();
-
- if (Item.I == AE) {
- Dst.insert(Item.N);
- continue;
- }
-
- ExplodedNodeSet Tmp;
- const unsigned ParamIdx = Item.I - AI;
- bool VisitAsLvalue = FnType? FnType->getArgType(ParamIdx)->isReferenceType()
- : false;
- if (VisitAsLvalue)
- VisitLValue(*Item.I, Item.N, Tmp);
- else
- Visit(*Item.I, Item.N, Tmp);
-
- ++(Item.I);
- for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI != NE; ++NI)
- WorkList.push_back(CallExprWLItem(Item.I, *NI));
- }
-}
-
-const CXXThisRegion *GRExprEngine::getCXXThisRegion(const CXXMethodDecl *D,
- const StackFrameContext *SFC) {
- Type *T = D->getParent()->getTypeForDecl();
- QualType PT = getContext().getPointerType(QualType(T,0));
- return ValMgr.getRegionManager().getCXXThisRegion(PT, SFC);
-}
-
-void GRExprEngine::CreateCXXTemporaryObject(const Expr *Ex, ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- ExplodedNodeSet Tmp;
- Visit(Ex, Pred, Tmp);
- for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
- const GRState *state = GetState(*I);
-
- // Bind the temporary object to the value of the expression. Then bind
- // the expression to the location of the object.
- SVal V = state->getSVal(Ex);
-
- const MemRegion *R =
- ValMgr.getRegionManager().getCXXObjectRegion(Ex,
- Pred->getLocationContext());
-
- state = state->bindLoc(loc::MemRegionVal(R), V);
- MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, loc::MemRegionVal(R)));
- }
-}
-
-void GRExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E, SVal Dest,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- if (E->isElidable()) {
- VisitAggExpr(E->getArg(0), Dest, Pred, Dst);
- return;
- }
-
- const CXXConstructorDecl *CD = E->getConstructor();
- assert(CD);
-
- if (!(CD->isThisDeclarationADefinition() && AMgr.shouldInlineCall()))
- // FIXME: invalidate the object.
- return;
-
-
- // Evaluate other arguments.
- ExplodedNodeSet ArgsEvaluated;
- const FunctionProtoType *FnType = CD->getType()->getAs<FunctionProtoType>();
- EvalArguments(E->arg_begin(), E->arg_end(), FnType, Pred, ArgsEvaluated);
- // The callee stack frame context used to create the 'this' parameter region.
- const StackFrameContext *SFC = AMgr.getStackFrame(CD,
- Pred->getLocationContext(),
- E, Builder->getBlock(), Builder->getIndex());
-
- const CXXThisRegion *ThisR = getCXXThisRegion(E->getConstructor(), SFC);
-
- CallEnter Loc(E, SFC->getAnalysisContext(), Pred->getLocationContext());
- for (ExplodedNodeSet::iterator NI = ArgsEvaluated.begin(),
- NE = ArgsEvaluated.end(); NI != NE; ++NI) {
- const GRState *state = GetState(*NI);
- // Setup 'this' region, so that the ctor is evaluated on the object pointed
- // by 'Dest'.
- state = state->bindLoc(loc::MemRegionVal(ThisR), Dest);
- ExplodedNode *N = Builder->generateNode(Loc, state, Pred);
- if (N)
- Dst.Add(N);
- }
-}
-
-void GRExprEngine::VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- // Get the method type.
- const FunctionProtoType *FnType =
- MCE->getCallee()->getType()->getAs<FunctionProtoType>();
- assert(FnType && "Method type not available");
-
- // Evaluate explicit arguments with a worklist.
- ExplodedNodeSet ArgsEvaluated;
- EvalArguments(MCE->arg_begin(), MCE->arg_end(), FnType, Pred, ArgsEvaluated);
-
- // Evaluate the implicit object argument.
- ExplodedNodeSet AllArgsEvaluated;
- const MemberExpr *ME = dyn_cast<MemberExpr>(MCE->getCallee()->IgnoreParens());
- if (!ME)
- return;
- Expr *ObjArgExpr = ME->getBase();
- for (ExplodedNodeSet::iterator I = ArgsEvaluated.begin(),
- E = ArgsEvaluated.end(); I != E; ++I) {
- if (ME->isArrow())
- Visit(ObjArgExpr, *I, AllArgsEvaluated);
- else
- VisitLValue(ObjArgExpr, *I, AllArgsEvaluated);
- }
-
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
- assert(MD && "not a CXXMethodDecl?");
-
- if (!(MD->isThisDeclarationADefinition() && AMgr.shouldInlineCall()))
- // FIXME: conservative method call evaluation.
- return;
-
- const StackFrameContext *SFC = AMgr.getStackFrame(MD,
- Pred->getLocationContext(),
- MCE,
- Builder->getBlock(),
- Builder->getIndex());
- const CXXThisRegion *ThisR = getCXXThisRegion(MD, SFC);
- CallEnter Loc(MCE, SFC->getAnalysisContext(), Pred->getLocationContext());
- for (ExplodedNodeSet::iterator I = AllArgsEvaluated.begin(),
- E = AllArgsEvaluated.end(); I != E; ++I) {
- // Set up 'this' region.
- const GRState *state = GetState(*I);
- state = state->bindLoc(loc::MemRegionVal(ThisR),state->getSVal(ObjArgExpr));
- ExplodedNode *N = Builder->generateNode(Loc, state, *I);
- if (N)
- Dst.Add(N);
- }
-}
-
-void GRExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- if (CNE->isArray()) {
- // FIXME: allocating an array has not been handled.
- return;
- }
-
- unsigned Count = Builder->getCurrentBlockCount();
- DefinedOrUnknownSVal SymVal = getValueManager().getConjuredSymbolVal(NULL,CNE,
- CNE->getType(), Count);
- const MemRegion *NewReg = cast<loc::MemRegionVal>(SymVal).getRegion();
-
- QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
-
- const ElementRegion *EleReg =
- getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
-
- // Evaluate constructor arguments.
- const FunctionProtoType *FnType = NULL;
- const CXXConstructorDecl *CD = CNE->getConstructor();
- if (CD)
- FnType = CD->getType()->getAs<FunctionProtoType>();
- ExplodedNodeSet ArgsEvaluated;
- EvalArguments(CNE->constructor_arg_begin(), CNE->constructor_arg_end(),
- FnType, Pred, ArgsEvaluated);
-
- // Initialize the object region and bind the 'new' expression.
- for (ExplodedNodeSet::iterator I = ArgsEvaluated.begin(),
- E = ArgsEvaluated.end(); I != E; ++I) {
- const GRState *state = GetState(*I);
-
- if (ObjTy->isRecordType()) {
- state = state->InvalidateRegion(EleReg, CNE, Count);
- } else {
- if (CNE->hasInitializer()) {
- SVal V = state->getSVal(*CNE->constructor_arg_begin());
- state = state->bindLoc(loc::MemRegionVal(EleReg), V);
- } else {
- // Explicitly set to undefined, because currently we retrieve symbolic
- // value from symbolic region.
- state = state->bindLoc(loc::MemRegionVal(EleReg), UndefinedVal());
- }
- }
- state = state->BindExpr(CNE, loc::MemRegionVal(EleReg));
- MakeNode(Dst, CNE, *I, state);
- }
-}
-
-void GRExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
- ExplodedNode *Pred,ExplodedNodeSet &Dst) {
- // Should do more checking.
- ExplodedNodeSet ArgEvaluated;
- Visit(CDE->getArgument(), Pred, ArgEvaluated);
- for (ExplodedNodeSet::iterator I = ArgEvaluated.begin(),
- E = ArgEvaluated.end(); I != E; ++I) {
- const GRState *state = GetState(*I);
- MakeNode(Dst, CDE, *I, state);
- }
-}
-
-void GRExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- // Get the this object region from StoreManager.
- const MemRegion *R =
- ValMgr.getRegionManager().getCXXThisRegion(
- getContext().getCanonicalType(TE->getType()),
- Pred->getLocationContext());
-
- const GRState *state = GetState(Pred);
- SVal V = state->getSVal(loc::MemRegionVal(R));
- MakeNode(Dst, TE, Pred, state->BindExpr(TE, V));
-}
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h
deleted file mode 100644
index 7b5b0ed..0000000
--- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h
+++ /dev/null
@@ -1,30 +0,0 @@
-//=-- GRExprEngineExperimentalChecks.h ------------------------------*- C++ -*-=
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines functions to instantiate and register experimental
-// checks in GRExprEngine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_GREXPRENGINE_EXPERIMENTAL_CHECKS
-#define LLVM_CLANG_GREXPRENGINE_EXPERIMENTAL_CHECKS
-
-namespace clang {
-
-class GRExprEngine;
-
-void RegisterCStringChecker(GRExprEngine &Eng);
-void RegisterIdempotentOperationChecker(GRExprEngine &Eng);
-void RegisterMallocChecker(GRExprEngine &Eng);
-void RegisterPthreadLockChecker(GRExprEngine &Eng);
-void RegisterStreamChecker(GRExprEngine &Eng);
-void RegisterUnreachableCodeChecker(GRExprEngine &Eng);
-
-} // end clang namespace
-#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h
deleted file mode 100644
index f91a759..0000000
--- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//=-- GRExprEngineInternalChecks.h- Builtin GRExprEngine Checks -----*- C++ -*-=
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines functions to instantiate and register the "built-in"
-// checks in GRExprEngine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_GREXPRENGINE_INTERNAL_CHECKS
-#define LLVM_CLANG_GREXPRENGINE_INTERNAL_CHECKS
-
-namespace clang {
-
-class GRExprEngine;
-
-// Foundational checks that handle basic semantics.
-void RegisterAdjustedReturnValueChecker(GRExprEngine &Eng);
-void RegisterArrayBoundChecker(GRExprEngine &Eng);
-void RegisterAttrNonNullChecker(GRExprEngine &Eng);
-void RegisterBuiltinFunctionChecker(GRExprEngine &Eng);
-void RegisterCallAndMessageChecker(GRExprEngine &Eng);
-void RegisterCastToStructChecker(GRExprEngine &Eng);
-void RegisterCastSizeChecker(GRExprEngine &Eng);
-void RegisterDereferenceChecker(GRExprEngine &Eng);
-void RegisterDivZeroChecker(GRExprEngine &Eng);
-void RegisterFixedAddressChecker(GRExprEngine &Eng);
-void RegisterNoReturnFunctionChecker(GRExprEngine &Eng);
-void RegisterPointerArithChecker(GRExprEngine &Eng);
-void RegisterPointerSubChecker(GRExprEngine &Eng);
-void RegisterReturnPointerRangeChecker(GRExprEngine &Eng);
-void RegisterReturnUndefChecker(GRExprEngine &Eng);
-void RegisterStackAddrLeakChecker(GRExprEngine &Eng);
-void RegisterUndefBranchChecker(GRExprEngine &Eng);
-void RegisterUndefCapturedBlockVarChecker(GRExprEngine &Eng);
-void RegisterUndefResultChecker(GRExprEngine &Eng);
-void RegisterUndefinedArraySubscriptChecker(GRExprEngine &Eng);
-void RegisterUndefinedAssignmentChecker(GRExprEngine &Eng);
-void RegisterVLASizeChecker(GRExprEngine &Eng);
-
-// API checks.
-void RegisterMacOSXAPIChecker(GRExprEngine &Eng);
-void RegisterOSAtomicChecker(GRExprEngine &Eng);
-void RegisterUnixAPIChecker(GRExprEngine &Eng);
-
-} // end clang namespace
-#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp b/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp
deleted file mode 100644
index 273e574..0000000
--- a/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp
+++ /dev/null
@@ -1,157 +0,0 @@
-// SValuator.cpp - Basic class for all SValuator implementations --*- C++ -*--//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines SValuator, the base class for all (complete) SValuator
-// implementations.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Checker/PathSensitive/SValuator.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-
-using namespace clang;
-
-
-SVal SValuator::EvalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
- SVal L, SVal R, QualType T) {
-
- if (L.isUndef() || R.isUndef())
- return UndefinedVal();
-
- if (L.isUnknown() || R.isUnknown())
- return UnknownVal();
-
- if (isa<Loc>(L)) {
- if (isa<Loc>(R))
- return EvalBinOpLL(ST, Op, cast<Loc>(L), cast<Loc>(R), T);
-
- return EvalBinOpLN(ST, Op, cast<Loc>(L), cast<NonLoc>(R), T);
- }
-
- if (isa<Loc>(R)) {
- // Support pointer arithmetic where the addend is on the left
- // and the pointer on the right.
- assert(Op == BO_Add);
-
- // Commute the operands.
- return EvalBinOpLN(ST, Op, cast<Loc>(R), cast<NonLoc>(L), T);
- }
-
- return EvalBinOpNN(ST, Op, cast<NonLoc>(L), cast<NonLoc>(R), T);
-}
-
-DefinedOrUnknownSVal SValuator::EvalEQ(const GRState *ST,
- DefinedOrUnknownSVal L,
- DefinedOrUnknownSVal R) {
- return cast<DefinedOrUnknownSVal>(EvalBinOp(ST, BO_EQ, L, R,
- ValMgr.getContext().IntTy));
-}
-
-SVal SValuator::EvalCast(SVal val, QualType castTy, QualType originalTy) {
- if (val.isUnknownOrUndef() || castTy == originalTy)
- return val;
-
- ASTContext &C = ValMgr.getContext();
-
- // For const casts, just propagate the value.
- if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
- if (C.hasSameUnqualifiedType(castTy, originalTy))
- return val;
-
- // Check for casts to real or complex numbers. We don't handle these at all
- // right now.
- if (castTy->isFloatingType() || castTy->isAnyComplexType())
- return UnknownVal();
-
- // Check for casts from integers to integers.
- if (castTy->isIntegerType() && originalTy->isIntegerType())
- return EvalCastNL(cast<NonLoc>(val), castTy);
-
- // Check for casts from pointers to integers.
- if (castTy->isIntegerType() && Loc::IsLocType(originalTy))
- return EvalCastL(cast<Loc>(val), castTy);
-
- // Check for casts from integers to pointers.
- if (Loc::IsLocType(castTy) && originalTy->isIntegerType()) {
- if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) {
- if (const MemRegion *R = LV->getLoc().getAsRegion()) {
- StoreManager &storeMgr = ValMgr.getStateManager().getStoreManager();
- R = storeMgr.CastRegion(R, castTy);
- return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
- }
- return LV->getLoc();
- }
- goto DispatchCast;
- }
-
- // Just pass through function and block pointers.
- if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
- assert(Loc::IsLocType(castTy));
- return val;
- }
-
- // Check for casts from array type to another type.
- if (originalTy->isArrayType()) {
- // We will always decay to a pointer.
- val = ValMgr.getStateManager().ArrayToPointer(cast<Loc>(val));
-
- // Are we casting from an array to a pointer? If so just pass on
- // the decayed value.
- if (castTy->isPointerType())
- return val;
-
- // Are we casting from an array to an integer? If so, cast the decayed
- // pointer value to an integer.
- assert(castTy->isIntegerType());
-
- // FIXME: Keep these here for now in case we decide soon that we
- // need the original decayed type.
- // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
- // QualType pointerTy = C.getPointerType(elemTy);
- return EvalCastL(cast<Loc>(val), castTy);
- }
-
- // Check for casts from a region to a specific type.
- if (const MemRegion *R = val.getAsRegion()) {
- // FIXME: We should handle the case where we strip off view layers to get
- // to a desugared type.
-
- assert(Loc::IsLocType(castTy));
- // We get a symbolic function pointer for a dereference of a function
- // pointer, but it is of function type. Example:
-
- // struct FPRec {
- // void (*my_func)(int * x);
- // };
- //
- // int bar(int x);
- //
- // int f1_a(struct FPRec* foo) {
- // int x;
- // (*foo->my_func)(&x);
- // return bar(x)+1; // no-warning
- // }
-
- assert(Loc::IsLocType(originalTy) || originalTy->isFunctionType() ||
- originalTy->isBlockPointerType());
-
- StoreManager &storeMgr = ValMgr.getStateManager().getStoreManager();
-
- // Delegate to store manager to get the result of casting a region to a
- // different type. If the MemRegion* returned is NULL, this expression
- // evaluates to UnknownVal.
- R = storeMgr.CastRegion(R, castTy);
- return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
- }
-
-DispatchCast:
- // All other cases.
- return isa<Loc>(val) ? EvalCastL(cast<Loc>(val), castTy)
- : EvalCastNL(cast<NonLoc>(val), castTy);
-}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ValueManager.cpp b/contrib/llvm/tools/clang/lib/Checker/ValueManager.cpp
deleted file mode 100644
index 8b7cd7b..0000000
--- a/contrib/llvm/tools/clang/lib/Checker/ValueManager.cpp
+++ /dev/null
@@ -1,162 +0,0 @@
-//== ValueManager.cpp - Aggregate manager of symbols and SVals --*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines ValueManager, a class that manages symbolic values
-// and SVals created for use by GRExprEngine and related classes. It
-// wraps and owns SymbolManager, MemRegionManager, and BasicValueFactory.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Checker/PathSensitive/ValueManager.h"
-#include "clang/Analysis/AnalysisContext.h"
-
-using namespace clang;
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Utility methods for constructing SVals.
-//===----------------------------------------------------------------------===//
-
-DefinedOrUnknownSVal ValueManager::makeZeroVal(QualType T) {
- if (Loc::IsLocType(T))
- return makeNull();
-
- if (T->isIntegerType())
- return makeIntVal(0, T);
-
- // FIXME: Handle floats.
- // FIXME: Handle structs.
- return UnknownVal();
-}
-
-//===----------------------------------------------------------------------===//
-// Utility methods for constructing Non-Locs.
-//===----------------------------------------------------------------------===//
-
-NonLoc ValueManager::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
- const APSInt& v, QualType T) {
- // The Environment ensures we always get a persistent APSInt in
- // BasicValueFactory, so we don't need to get the APSInt from
- // BasicValueFactory again.
- assert(!Loc::IsLocType(T));
- return nonloc::SymExprVal(SymMgr.getSymIntExpr(lhs, op, v, T));
-}
-
-NonLoc ValueManager::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
- const SymExpr *rhs, QualType T) {
- assert(SymMgr.getType(lhs) == SymMgr.getType(rhs));
- assert(!Loc::IsLocType(T));
- return nonloc::SymExprVal(SymMgr.getSymSymExpr(lhs, op, rhs, T));
-}
-
-
-SVal ValueManager::convertToArrayIndex(SVal V) {
- if (V.isUnknownOrUndef())
- return V;
-
- // Common case: we have an appropriately sized integer.
- if (nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&V)) {
- const llvm::APSInt& I = CI->getValue();
- if (I.getBitWidth() == ArrayIndexWidth && I.isSigned())
- return V;
- }
-
- return SVator->EvalCastNL(cast<NonLoc>(V), ArrayIndexTy);
-}
-
-DefinedOrUnknownSVal
-ValueManager::getRegionValueSymbolVal(const TypedRegion* R) {
- QualType T = R->getValueType();
-
- if (!SymbolManager::canSymbolicate(T))
- return UnknownVal();
-
- SymbolRef sym = SymMgr.getRegionValueSymbol(R);
-
- if (Loc::IsLocType(T))
- return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
- return nonloc::SymbolVal(sym);
-}
-
-DefinedOrUnknownSVal ValueManager::getConjuredSymbolVal(const void *SymbolTag,
- const Expr *E,
- unsigned Count) {
- QualType T = E->getType();
-
- if (!SymbolManager::canSymbolicate(T))
- return UnknownVal();
-
- SymbolRef sym = SymMgr.getConjuredSymbol(E, Count, SymbolTag);
-
- if (Loc::IsLocType(T))
- return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
- return nonloc::SymbolVal(sym);
-}
-
-DefinedOrUnknownSVal ValueManager::getConjuredSymbolVal(const void *SymbolTag,
- const Expr *E,
- QualType T,
- unsigned Count) {
-
- if (!SymbolManager::canSymbolicate(T))
- return UnknownVal();
-
- SymbolRef sym = SymMgr.getConjuredSymbol(E, T, Count, SymbolTag);
-
- if (Loc::IsLocType(T))
- return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
- return nonloc::SymbolVal(sym);
-}
-
-DefinedSVal ValueManager::getMetadataSymbolVal(const void *SymbolTag,
- const MemRegion *MR,
- const Expr *E, QualType T,
- unsigned Count) {
- assert(SymbolManager::canSymbolicate(T) && "Invalid metadata symbol type");
-
- SymbolRef sym = SymMgr.getMetadataSymbol(MR, E, T, Count, SymbolTag);
-
- if (Loc::IsLocType(T))
- return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
- return nonloc::SymbolVal(sym);
-}
-
-DefinedOrUnknownSVal
-ValueManager::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
- const TypedRegion *R) {
- QualType T = R->getValueType();
-
- if (!SymbolManager::canSymbolicate(T))
- return UnknownVal();
-
- SymbolRef sym = SymMgr.getDerivedSymbol(parentSymbol, R);
-
- if (Loc::IsLocType(T))
- return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
- return nonloc::SymbolVal(sym);
-}
-
-DefinedSVal ValueManager::getFunctionPointer(const FunctionDecl* FD) {
- return loc::MemRegionVal(MemMgr.getFunctionTextRegion(FD));
-}
-
-DefinedSVal ValueManager::getBlockPointer(const BlockDecl *D,
- CanQualType locTy,
- const LocationContext *LC) {
- const BlockTextRegion *BC =
- MemMgr.getBlockTextRegion(D, locTy, LC->getAnalysisContext());
- const BlockDataRegion *BD = MemMgr.getBlockDataRegion(BC, LC);
- return loc::MemRegionVal(BD);
-}
-
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
index 91b77425..ce10398 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -70,11 +70,12 @@ namespace clang {
Kind TheKind;
llvm::PATypeHolder TypeData;
unsigned UIntData;
- bool BoolData;
+ bool BoolData0;
+ bool BoolData1;
ABIArgInfo(Kind K, const llvm::Type *TD=0,
- unsigned UI=0, bool B = false)
- : TheKind(K), TypeData(TD), UIntData(UI), BoolData(B) {}
+ unsigned UI=0, bool B0 = false, bool B1 = false)
+ : TheKind(K), TypeData(TD), UIntData(UI), BoolData0(B0), BoolData1(B1) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
@@ -88,8 +89,9 @@ namespace clang {
static ABIArgInfo getIgnore() {
return ABIArgInfo(Ignore);
}
- static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal);
+ static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true
+ , bool Realign = false) {
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign);
}
static ABIArgInfo getExpand() {
return ABIArgInfo(Expand);
@@ -105,7 +107,7 @@ namespace clang {
bool canHaveCoerceToType() const {
return TheKind == Direct || TheKind == Extend;
}
-
+
// Direct/Extend accessors
unsigned getDirectOffset() const {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
@@ -115,12 +117,12 @@ namespace clang {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
}
-
+
void setCoerceToType(const llvm::Type *T) {
assert(canHaveCoerceToType() && "Invalid kind!");
TypeData = T;
}
-
+
// Indirect accessors
unsigned getIndirectAlign() const {
assert(TheKind == Indirect && "Invalid kind!");
@@ -129,9 +131,14 @@ namespace clang {
bool getIndirectByVal() const {
assert(TheKind == Indirect && "Invalid kind!");
- return BoolData;
+ return BoolData0;
}
-
+
+ bool getIndirectRealign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return BoolData1;
+ }
+
void dump() const;
};
@@ -140,10 +147,10 @@ namespace clang {
class ABIInfo {
public:
CodeGen::CodeGenTypes &CGT;
-
+
ABIInfo(CodeGen::CodeGenTypes &cgt) : CGT(cgt) {}
virtual ~ABIInfo();
-
+
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
const llvm::TargetData &getTargetData() const;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
index 69efe43..9897b1b 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -26,6 +26,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/SubtargetFeature.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
@@ -42,14 +43,14 @@ class EmitAssemblyHelper {
Timer CodeGenerationTime;
- mutable FunctionPassManager *CodeGenPasses;
+ mutable PassManager *CodeGenPasses;
mutable PassManager *PerModulePasses;
mutable FunctionPassManager *PerFunctionPasses;
private:
- FunctionPassManager *getCodeGenPasses() const {
+ PassManager *getCodeGenPasses() const {
if (!CodeGenPasses) {
- CodeGenPasses = new FunctionPassManager(TheModule);
+ CodeGenPasses = new PassManager();
CodeGenPasses->add(new TargetData(TheModule));
}
return CodeGenPasses;
@@ -107,14 +108,22 @@ void EmitAssemblyHelper::CreatePasses() {
OptLevel = 0;
Inlining = CodeGenOpts.NoInlining;
}
+
+ FunctionPassManager *FPM = getPerFunctionPasses();
+
+ TargetLibraryInfo *TLI =
+ new TargetLibraryInfo(Triple(TheModule->getTargetTriple()));
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ FPM->add(TLI);
// In -O0 if checking is disabled, we don't even have per-function passes.
if (CodeGenOpts.VerifyModule)
- getPerFunctionPasses()->add(createVerifierPass());
+ FPM->add(createVerifierPass());
// Assume that standard function passes aren't run for -O0.
if (OptLevel > 0)
- llvm::createStandardFunctionPasses(getPerFunctionPasses(), OptLevel);
+ llvm::createStandardFunctionPasses(FPM, OptLevel);
llvm::Pass *InliningPass = 0;
switch (Inlining) {
@@ -136,8 +145,15 @@ void EmitAssemblyHelper::CreatePasses() {
break;
}
+ PassManager *MPM = getPerModulePasses();
+
+ TLI = new TargetLibraryInfo(Triple(TheModule->getTargetTriple()));
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ MPM->add(TLI);
+
// For now we always create per module passes.
- llvm::createStandardModulePasses(getPerModulePasses(), OptLevel,
+ llvm::createStandardModulePasses(MPM, OptLevel,
CodeGenOpts.OptimizeSize,
CodeGenOpts.UnitAtATime,
CodeGenOpts.UnrollLoops,
@@ -183,7 +199,11 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
llvm::FloatABIType = llvm::FloatABI::Default;
}
+ llvm::LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
+ llvm::NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
+ llvm::NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ llvm::UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
UnwindTablesMandatory = CodeGenOpts.UnwindTables;
@@ -248,7 +268,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
TM->setMCRelaxAll(true);
// Create the code generator passes.
- FunctionPassManager *PM = getCodeGenPasses();
+ PassManager *PM = getCodeGenPasses();
CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
switch (CodeGenOpts.OptimizationLevel) {
@@ -320,13 +340,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
if (CodeGenPasses) {
PrettyStackTraceString CrashInfo("Code generation");
-
- CodeGenPasses->doInitialization();
- for (Module::iterator I = TheModule->begin(),
- E = TheModule->end(); I != E; ++I)
- if (!I->isDeclaration())
- CodeGenPasses->run(*I);
- CodeGenPasses->doFinalization();
+ CodeGenPasses->run(*TheModule);
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
index 04f1ef2..a35648d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -15,6 +15,7 @@
#include "CodeGenFunction.h"
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
+#include "CGBlocks.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/Module.h"
#include "llvm/ADT/SmallSet.h"
@@ -24,397 +25,631 @@
using namespace clang;
using namespace CodeGen;
-CGBlockInfo::CGBlockInfo(const char *N)
- : Name(N), CXXThisRef(0), NeedsObjCSelf(false) {
+CGBlockInfo::CGBlockInfo(const BlockExpr *blockExpr, const char *N)
+ : Name(N), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ HasCXXObject(false), StructureType(0), Block(blockExpr) {
// Skip asm prefix, if any.
if (Name && Name[0] == '\01')
++Name;
}
+/// Build the given block as a global block.
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn);
-llvm::Constant *CodeGenFunction::
-BuildDescriptorBlockDecl(const BlockExpr *BE, const CGBlockInfo &Info,
- const llvm::StructType* Ty,
- llvm::Constant *BlockVarLayout,
- std::vector<HelperInfo> *NoteForHelper) {
- bool BlockHasCopyDispose = Info.BlockHasCopyDispose;
- CharUnits Size = Info.BlockSize;
- const llvm::Type *UnsignedLongTy
- = CGM.getTypes().ConvertType(getContext().UnsignedLongTy);
- llvm::Constant *C;
- std::vector<llvm::Constant*> Elts;
+/// Build the helper function to copy a block.
+static llvm::Constant *buildCopyHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateCopyHelperFunction(blockInfo);
+}
+
+/// Build the helper function to dipose of a block.
+static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo);
+}
+
+/// Build the block descriptor constant for a block.
+static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ ASTContext &C = CGM.getContext();
+
+ const llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
+ const llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+
+ llvm::SmallVector<llvm::Constant*, 6> elements;
// reserved
- C = llvm::ConstantInt::get(UnsignedLongTy, 0);
- Elts.push_back(C);
+ elements.push_back(llvm::ConstantInt::get(ulong, 0));
// Size
// FIXME: What is the right way to say this doesn't fit? We should give
// a user diagnostic in that case. Better fix would be to change the
// API to size_t.
- C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity());
- Elts.push_back(C);
+ elements.push_back(llvm::ConstantInt::get(ulong,
+ blockInfo.BlockSize.getQuantity()));
- // optional copy/dispose helpers
- if (BlockHasCopyDispose) {
+ // Optional copy/dispose helpers.
+ if (blockInfo.NeedsCopyDispose) {
// copy_func_helper_decl
- Elts.push_back(BuildCopyHelper(Ty, NoteForHelper));
+ elements.push_back(buildCopyHelper(CGM, blockInfo));
// destroy_func_decl
- Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
+ elements.push_back(buildDisposeHelper(CGM, blockInfo));
}
- // Signature. non-optional ObjC-style method descriptor @encode sequence
- std::string BlockTypeEncoding;
- CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
-
- Elts.push_back(llvm::ConstantExpr::getBitCast(
- CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty));
+ // Signature. Mandatory ObjC-style method descriptor @encode sequence.
+ std::string typeAtEncoding =
+ CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
+ elements.push_back(llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
- // Layout.
- C = BlockVarLayout;
-
- Elts.push_back(C);
+ // GC layout.
+ if (C.getLangOptions().ObjC1)
+ elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ else
+ elements.push_back(llvm::Constant::getNullValue(i8p));
+
+ llvm::Constant *init =
+ llvm::ConstantStruct::get(CGM.getLLVMContext(), elements.data(),
+ elements.size(), false);
- C = llvm::ConstantStruct::get(VMContext, Elts, false);
+ llvm::GlobalVariable *global =
+ new llvm::GlobalVariable(CGM.getModule(), init->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ init, "__block_descriptor_tmp");
- C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
- llvm::GlobalValue::InternalLinkage,
- C, "__block_descriptor_tmp");
- return C;
+ return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
}
-static void CollectBlockDeclRefInfo(const Stmt *S, CGBlockInfo &Info) {
- for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
- I != E; ++I)
- if (*I)
- CollectBlockDeclRefInfo(*I, Info);
-
- // We want to ensure we walk down into block literals so we can find
- // all nested BlockDeclRefExprs.
- if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
- Info.InnerBlocks.insert(BE->getBlockDecl());
- CollectBlockDeclRefInfo(BE->getBody(), Info);
- }
+static BlockFlags computeBlockFlag(CodeGenModule &CGM,
+ const BlockExpr *BE,
+ BlockFlags flags) {
+ const FunctionType *ftype = BE->getFunctionType();
+
+ // This is a bit overboard.
+ CallArgList args;
+ const CGFunctionInfo &fnInfo =
+ CGM.getTypes().getFunctionInfo(ftype->getResultType(), args,
+ ftype->getExtInfo());
+
+ if (CGM.ReturnTypeUsesSRet(fnInfo))
+ flags |= BLOCK_USE_STRET;
- else if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
- const ValueDecl *D = BDRE->getDecl();
- // FIXME: Handle enums.
- if (isa<FunctionDecl>(D))
- return;
-
- if (isa<ImplicitParamDecl>(D) &&
- isa<ObjCMethodDecl>(D->getDeclContext()) &&
- cast<ObjCMethodDecl>(D->getDeclContext())->getSelfDecl() == D) {
- Info.NeedsObjCSelf = true;
- return;
+ return flags;
+}
+
+/*
+ Purely notional variadic template describing the layout of a block.
+
+ template <class _ResultType, class... _ParamTypes, class... _CaptureTypes>
+ struct Block_literal {
+ /// Initialized to one of:
+ /// extern void *_NSConcreteStackBlock[];
+ /// extern void *_NSConcreteGlobalBlock[];
+ ///
+ /// In theory, we could start one off malloc'ed by setting
+ /// BLOCK_NEEDS_FREE, giving it a refcount of 1, and using
+ /// this isa:
+ /// extern void *_NSConcreteMallocBlock[];
+ struct objc_class *isa;
+
+ /// These are the flags (with corresponding bit number) that the
+ /// compiler is actually supposed to know about.
+ /// 25. BLOCK_HAS_COPY_DISPOSE - indicates that the block
+ /// descriptor provides copy and dispose helper functions
+ /// 26. BLOCK_HAS_CXX_OBJ - indicates that there's a captured
+ /// object with a nontrivial destructor or copy constructor
+ /// 28. BLOCK_IS_GLOBAL - indicates that the block is allocated
+ /// as global memory
+ /// 29. BLOCK_USE_STRET - indicates that the block function
+ /// uses stret, which objc_msgSend needs to know about
+ /// 30. BLOCK_HAS_SIGNATURE - indicates that the block has an
+ /// @encoded signature string
+ /// And we're not supposed to manipulate these:
+ /// 24. BLOCK_NEEDS_FREE - indicates that the block has been moved
+ /// to malloc'ed memory
+ /// 27. BLOCK_IS_GC - indicates that the block has been moved to
+ /// to GC-allocated memory
+ /// Additionally, the bottom 16 bits are a reference count which
+ /// should be zero on the stack.
+ int flags;
+
+ /// Reserved; should be zero-initialized.
+ int reserved;
+
+ /// Function pointer generated from block literal.
+ _ResultType (*invoke)(Block_literal *, _ParamTypes...);
+
+ /// Block description metadata generated from block literal.
+ struct Block_descriptor *block_descriptor;
+
+ /// Captured values follow.
+ _CapturesTypes captures...;
+ };
+ */
+
+/// The number of fields in a block header.
+const unsigned BlockHeaderSize = 5;
+
+namespace {
+ /// A chunk of data that we actually have to capture in the block.
+ struct BlockLayoutChunk {
+ CharUnits Alignment;
+ CharUnits Size;
+ const BlockDecl::Capture *Capture; // null for 'this'
+ const llvm::Type *Type;
+
+ BlockLayoutChunk(CharUnits align, CharUnits size,
+ const BlockDecl::Capture *capture,
+ const llvm::Type *type)
+ : Alignment(align), Size(size), Capture(capture), Type(type) {}
+
+ /// Tell the block info that this chunk has the given field index.
+ void setIndex(CGBlockInfo &info, unsigned index) {
+ if (!Capture)
+ info.CXXThisIndex = index;
+ else
+ info.Captures[Capture->getVariable()]
+ = CGBlockInfo::Capture::makeIndex(index);
}
+ };
- // Only Decls that escape are added.
- if (!Info.InnerBlocks.count(D->getDeclContext()))
- Info.DeclRefs.push_back(BDRE);
+ /// Order by descending alignment.
+ bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
+ return left.Alignment > right.Alignment;
}
+}
- // Make sure to capture implicit 'self' references due to super calls.
- else if (const ObjCMessageExpr *E = dyn_cast<ObjCMessageExpr>(S)) {
- if (E->getReceiverKind() == ObjCMessageExpr::SuperClass ||
- E->getReceiverKind() == ObjCMessageExpr::SuperInstance)
- Info.NeedsObjCSelf = true;
+/// Determines if the given record type has a mutable field.
+static bool hasMutableField(const CXXRecordDecl *record) {
+ for (CXXRecordDecl::field_iterator
+ i = record->field_begin(), e = record->field_end(); i != e; ++i)
+ if ((*i)->isMutable())
+ return true;
+
+ for (CXXRecordDecl::base_class_const_iterator
+ i = record->bases_begin(), e = record->bases_end(); i != e; ++i) {
+ const RecordType *record = i->getType()->castAs<RecordType>();
+ if (hasMutableField(cast<CXXRecordDecl>(record->getDecl())))
+ return true;
}
- // Getter/setter uses may also cause implicit super references,
- // which we can check for with:
- else if (isa<ObjCSuperExpr>(S))
- Info.NeedsObjCSelf = true;
+ return false;
+}
+
+/// Determines if the given type is safe for constant capture in C++.
+static bool isSafeForCXXConstantCapture(QualType type) {
+ const RecordType *recordType =
+ type->getBaseElementTypeUnsafe()->getAs<RecordType>();
- else if (isa<CXXThisExpr>(S))
- Info.CXXThisRef = cast<CXXThisExpr>(S);
+ // Only records can be unsafe.
+ if (!recordType) return true;
+
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(recordType->getDecl());
+
+ // Maintain semantics for classes with non-trivial dtors or copy ctors.
+ if (!record->hasTrivialDestructor()) return false;
+ if (!record->hasTrivialCopyConstructor()) return false;
+
+ // Otherwise, we just have to make sure there aren't any mutable
+ // fields that might have changed since initialization.
+ return !hasMutableField(record);
}
-/// CanBlockBeGlobal - Given a CGBlockInfo struct, determines if a block can be
-/// declared as a global variable instead of on the stack.
-static bool CanBlockBeGlobal(const CGBlockInfo &Info) {
- return Info.DeclRefs.empty();
+/// It is illegal to modify a const object after initialization.
+/// Therefore, if a const object has a constant initializer, we don't
+/// actually need to keep storage for it in the block; we'll just
+/// rematerialize it at the start of the block function. This is
+/// acceptable because we make no promises about address stability of
+/// captured variables.
+static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
+ const VarDecl *var) {
+ QualType type = var->getType();
+
+ // We can only do this if the variable is const.
+ if (!type.isConstQualified()) return 0;
+
+ // Furthermore, in C++ we have to worry about mutable fields:
+ // C++ [dcl.type.cv]p4:
+ // Except that any class member declared mutable can be
+ // modified, any attempt to modify a const object during its
+ // lifetime results in undefined behavior.
+ if (CGM.getLangOptions().CPlusPlus && !isSafeForCXXConstantCapture(type))
+ return 0;
+
+ // If the variable doesn't have any initializer (shouldn't this be
+ // invalid?), it's not clear what we should do. Maybe capture as
+ // zero?
+ const Expr *init = var->getInit();
+ if (!init) return 0;
+
+ return CGM.EmitConstantExpr(init, var->getType());
}
-/// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to
-/// ensure we can generate the debug information for the parameter for the block
-/// invoke function.
-static void AllocateAllBlockDeclRefs(CodeGenFunction &CGF, CGBlockInfo &Info) {
- if (Info.CXXThisRef)
- CGF.AllocateBlockCXXThisPointer(Info.CXXThisRef);
-
- for (size_t i = 0; i < Info.DeclRefs.size(); ++i)
- CGF.AllocateBlockDecl(Info.DeclRefs[i]);
-
- if (Info.NeedsObjCSelf) {
- ValueDecl *Self = cast<ObjCMethodDecl>(CGF.CurFuncDecl)->getSelfDecl();
- BlockDeclRefExpr *BDRE =
- new (CGF.getContext()) BlockDeclRefExpr(Self, Self->getType(),
- SourceLocation(), false);
- Info.DeclRefs.push_back(BDRE);
- CGF.AllocateBlockDecl(BDRE);
- }
+/// Get the low bit of a nonzero character count. This is the
+/// alignment of the nth byte if the 0th byte is universally aligned.
+static CharUnits getLowBit(CharUnits v) {
+ return CharUnits::fromQuantity(v.getQuantity() & (~v.getQuantity() + 1));
}
-static unsigned computeBlockFlag(CodeGenModule &CGM,
- const BlockExpr *BE, unsigned flags) {
- QualType BPT = BE->getType();
- const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
- QualType ResultType = ftype->getResultType();
-
- CallArgList Args;
- CodeGenTypes &Types = CGM.getTypes();
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
- FunctionType::ExtInfo());
- if (CGM.ReturnTypeUsesSRet(FnInfo))
- flags |= CodeGenFunction::BLOCK_USE_STRET;
- return flags;
+static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
+ std::vector<const llvm::Type*> &elementTypes) {
+ ASTContext &C = CGM.getContext();
+
+ // The header is basically a 'struct { void *; int; int; void *; void *; }'.
+ CharUnits ptrSize, ptrAlign, intSize, intAlign;
+ llvm::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.VoidPtrTy);
+ llvm::tie(intSize, intAlign) = C.getTypeInfoInChars(C.IntTy);
+
+ // Are there crazy embedded platforms where this isn't true?
+ assert(intSize <= ptrSize && "layout assumptions horribly violated");
+
+ CharUnits headerSize = ptrSize;
+ if (2 * intSize < ptrAlign) headerSize += ptrSize;
+ else headerSize += 2 * intSize;
+ headerSize += 2 * ptrSize;
+
+ info.BlockAlign = ptrAlign;
+ info.BlockSize = headerSize;
+
+ assert(elementTypes.empty());
+ const llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ const llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
+ elementTypes.push_back(i8p);
+ elementTypes.push_back(intTy);
+ elementTypes.push_back(intTy);
+ elementTypes.push_back(i8p);
+ elementTypes.push_back(CGM.getBlockDescriptorType());
+
+ assert(elementTypes.size() == BlockHeaderSize);
}
-// FIXME: Push most into CGM, passing down a few bits, like current function
-// name.
-llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
- std::string Name = CurFn->getName();
- CGBlockInfo Info(Name.c_str());
- Info.InnerBlocks.insert(BE->getBlockDecl());
- CollectBlockDeclRefInfo(BE->getBody(), Info);
+/// Compute the layout of the given block. Attempts to lay the block
+/// out with minimal space requirements.
+static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
+ ASTContext &C = CGM.getContext();
+ const BlockDecl *block = info.getBlockDecl();
- // Check if the block can be global.
- // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like
- // to just have one code path. We should move this function into CGM and pass
- // CGF, then we can just check to see if CGF is 0.
- if (0 && CanBlockBeGlobal(Info))
- return CGM.GetAddrOfGlobalBlock(BE, Name.c_str());
+ std::vector<const llvm::Type*> elementTypes;
+ initializeForBlockHeader(CGM, info, elementTypes);
- size_t BlockFields = 5;
+ if (!block->hasCaptures()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
- std::vector<llvm::Constant*> Elts(BlockFields);
+ // Collect the layout chunks.
+ llvm::SmallVector<BlockLayoutChunk, 16> layout;
+ layout.reserve(block->capturesCXXThis() +
+ (block->capture_end() - block->capture_begin()));
- llvm::Constant *C;
- llvm::Value *V;
+ CharUnits maxFieldAlign;
- {
- llvm::Constant *BlockVarLayout;
- // C = BuildBlockStructInitlist();
- unsigned int flags = BLOCK_HAS_SIGNATURE;
+ // First, 'this'.
+ if (block->capturesCXXThis()) {
+ const DeclContext *DC = block->getDeclContext();
+ for (; isa<BlockDecl>(DC); DC = cast<BlockDecl>(DC)->getDeclContext())
+ ;
+ QualType thisType = cast<CXXMethodDecl>(DC)->getThisType(C);
- // We run this first so that we set BlockHasCopyDispose from the entire
- // block literal.
- // __invoke
- llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, BE, Info, CurFuncDecl,
- BlockVarLayout,
- LocalDeclMap);
- BlockHasCopyDispose |= Info.BlockHasCopyDispose;
- Elts[3] = Fn;
-
- // FIXME: Don't use BlockHasCopyDispose, it is set more often then
- // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); }
- if (Info.BlockHasCopyDispose)
- flags |= BLOCK_HAS_COPY_DISPOSE;
-
- // __isa
- C = CGM.getNSConcreteStackBlock();
- C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
- Elts[0] = C;
-
- // __flags
- flags = computeBlockFlag(CGM, BE, flags);
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
- CGM.getTypes().ConvertType(CGM.getContext().IntTy));
- C = llvm::ConstantInt::get(IntTy, flags);
- Elts[1] = C;
-
- // __reserved
- C = llvm::ConstantInt::get(IntTy, 0);
- Elts[2] = C;
-
- if (Info.BlockLayout.empty()) {
- // __descriptor
- C = llvm::Constant::getNullValue(PtrToInt8Ty);
- Elts[4] = BuildDescriptorBlockDecl(BE, Info, 0, C, 0);
-
- // Optimize to being a global block.
- Elts[0] = CGM.getNSConcreteGlobalBlock();
-
- Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
-
- C = llvm::ConstantStruct::get(VMContext, Elts, false);
-
- C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
- llvm::GlobalValue::InternalLinkage, C,
- "__block_holder_tmp_" +
- llvm::Twine(CGM.getGlobalUniqueCount()));
- QualType BPT = BE->getType();
- C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
- return C;
+ const llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
+ std::pair<CharUnits,CharUnits> tinfo
+ = CGM.getContext().getTypeInfoInChars(thisType);
+ maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+
+ layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first, 0, llvmType));
+ }
+
+ // Next, all the block captures.
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+
+ if (ci->isByRef()) {
+ // We have to copy/dispose of the __block reference.
+ info.NeedsCopyDispose = true;
+
+ // Just use void* instead of a pointer to the byref type.
+ QualType byRefPtrTy = C.VoidPtrTy;
+
+ const llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
+ std::pair<CharUnits,CharUnits> tinfo
+ = CGM.getContext().getTypeInfoInChars(byRefPtrTy);
+ maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+
+ layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
+ &*ci, llvmType));
+ continue;
+ }
+
+ // Otherwise, build a layout chunk with the size and alignment of
+ // the declaration.
+ if (llvm::Constant *constant = tryCaptureAsConstant(CGM, variable)) {
+ info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
+ continue;
}
- std::vector<const llvm::Type *> Types(BlockFields+Info.BlockLayout.size());
- for (int i=0; i<4; ++i)
- Types[i] = Elts[i]->getType();
- Types[4] = PtrToInt8Ty;
-
- for (unsigned i = 0, n = Info.BlockLayout.size(); i != n; ++i) {
- const Expr *E = Info.BlockLayout[i];
- const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
- QualType Ty = E->getType();
- if (BDRE && BDRE->isByRef()) {
- Types[i+BlockFields] =
- llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
- } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) {
- Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0);
- } else
- Types[i+BlockFields] = ConvertType(Ty);
+ // Block pointers require copy/dispose.
+ if (variable->getType()->isBlockPointerType()) {
+ info.NeedsCopyDispose = true;
+
+ // So do Objective-C pointers.
+ } else if (variable->getType()->isObjCObjectPointerType() ||
+ C.isObjCNSObjectType(variable->getType())) {
+ info.NeedsCopyDispose = true;
+
+ // So do types that require non-trivial copy construction.
+ } else if (ci->hasCopyExpr()) {
+ info.NeedsCopyDispose = true;
+ info.HasCXXObject = true;
+
+ // And so do types with destructors.
+ } else if (CGM.getLangOptions().CPlusPlus) {
+ if (const CXXRecordDecl *record =
+ variable->getType()->getAsCXXRecordDecl()) {
+ if (!record->hasTrivialDestructor()) {
+ info.HasCXXObject = true;
+ info.NeedsCopyDispose = true;
+ }
+ }
}
- llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true);
+ CharUnits size = C.getTypeSizeInChars(variable->getType());
+ CharUnits align = C.getDeclAlign(variable);
+ maxFieldAlign = std::max(maxFieldAlign, align);
- llvm::AllocaInst *A = CreateTempAlloca(Ty);
- A->setAlignment(Info.BlockAlign.getQuantity());
- V = A;
+ const llvm::Type *llvmType =
+ CGM.getTypes().ConvertTypeForMem(variable->getType());
+
+ layout.push_back(BlockLayoutChunk(align, size, &*ci, llvmType));
+ }
+
+ // If that was everything, we're done here.
+ if (layout.empty()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
- // Build layout / cleanup information for all the data entries in the
- // layout, and write the enclosing fields into the type.
- std::vector<HelperInfo> NoteForHelper(Info.BlockLayout.size());
- unsigned NumHelpers = 0;
+ // Sort the layout by alignment. We have to use a stable sort here
+ // to get reproducible results. There should probably be an
+ // llvm::array_pod_stable_sort.
+ std::stable_sort(layout.begin(), layout.end());
+
+ CharUnits &blockSize = info.BlockSize;
+ info.BlockAlign = std::max(maxFieldAlign, info.BlockAlign);
+
+ // Assuming that the first byte in the header is maximally aligned,
+ // get the alignment of the first byte following the header.
+ CharUnits endAlign = getLowBit(blockSize);
+
+ // If the end of the header isn't satisfactorily aligned for the
+ // maximum thing, look for things that are okay with the header-end
+ // alignment, and keep appending them until we get something that's
+ // aligned right. This algorithm is only guaranteed optimal if
+ // that condition is satisfied at some point; otherwise we can get
+ // things like:
+ // header // next byte has alignment 4
+ // something_with_size_5; // next byte has alignment 1
+ // something_with_alignment_8;
+ // which has 7 bytes of padding, as opposed to the naive solution
+ // which might have less (?).
+ if (endAlign < maxFieldAlign) {
+ llvm::SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin() + 1, le = layout.end();
+
+ // Look for something that the header end is already
+ // satisfactorily aligned for.
+ for (; li != le && endAlign < li->Alignment; ++li)
+ ;
+
+ // If we found something that's naturally aligned for the end of
+ // the header, keep adding things...
+ if (li != le) {
+ llvm::SmallVectorImpl<BlockLayoutChunk>::iterator first = li;
+ for (; li != le; ++li) {
+ assert(endAlign >= li->Alignment);
+
+ li->setIndex(info, elementTypes.size());
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+
+ // ...until we get to the alignment of the maximum field.
+ if (endAlign >= maxFieldAlign)
+ break;
+ }
- for (unsigned i=0; i<4; ++i)
- Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
+ // Don't re-append everything we just appended.
+ layout.erase(first, li);
+ }
+ }
- for (unsigned i=0; i < Info.BlockLayout.size(); ++i) {
- const Expr *E = Info.BlockLayout[i];
+ // At this point, we just have to add padding if the end align still
+ // isn't aligned right.
+ if (endAlign < maxFieldAlign) {
+ CharUnits padding = maxFieldAlign - endAlign;
- // Skip padding.
- if (isa<DeclRefExpr>(E)) continue;
+ elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
+ padding.getQuantity()));
+ blockSize += padding;
- llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp");
- HelperInfo &Note = NoteForHelper[NumHelpers++];
+ endAlign = getLowBit(blockSize);
+ assert(endAlign >= maxFieldAlign);
+ }
- Note.index = i+5;
+ // Slam everything else on now. This works because they have
+ // strictly decreasing alignment and we expect that size is always a
+ // multiple of alignment.
+ for (llvm::SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin(), le = layout.end(); li != le; ++li) {
+ assert(endAlign >= li->Alignment);
+ li->setIndex(info, elementTypes.size());
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+ }
- if (isa<CXXThisExpr>(E)) {
- Note.RequiresCopying = false;
- Note.flag = BLOCK_FIELD_IS_OBJECT;
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+}
- Builder.CreateStore(LoadCXXThis(), Addr);
- continue;
- }
+/// Emit a block literal expression in the current function.
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
+ std::string Name = CurFn->getName();
+ CGBlockInfo blockInfo(blockExpr, Name.c_str());
+
+ // Compute information about the layout, etc., of this block.
+ computeBlockInfo(CGM, blockInfo);
+
+ // Using that metadata, generate the actual block function.
+ llvm::Constant *blockFn
+ = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, blockInfo,
+ CurFuncDecl, LocalDeclMap);
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
+
+ // If there is nothing to capture, we can emit this as a global block.
+ if (blockInfo.CanBeGlobal)
+ return buildGlobalBlock(CGM, blockInfo, blockFn);
+
+ // Otherwise, we have to emit this as a local block.
+
+ llvm::Constant *isa = CGM.getNSConcreteStackBlock();
+ isa = llvm::ConstantExpr::getBitCast(isa, VoidPtrTy);
+
+ // Build the block descriptor.
+ llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
+
+ const llvm::Type *intTy = ConvertType(getContext().IntTy);
+
+ llvm::AllocaInst *blockAddr =
+ CreateTempAlloca(blockInfo.StructureType, "block");
+ blockAddr->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // Compute the initial on-stack block flags.
+ BlockFlags flags = BLOCK_HAS_SIGNATURE;
+ if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
+ if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
+ flags = computeBlockFlag(CGM, blockInfo.getBlockExpr(), flags);
+
+ // Initialize the block literal.
+ Builder.CreateStore(isa, Builder.CreateStructGEP(blockAddr, 0, "block.isa"));
+ Builder.CreateStore(llvm::ConstantInt::get(intTy, flags.getBitMask()),
+ Builder.CreateStructGEP(blockAddr, 1, "block.flags"));
+ Builder.CreateStore(llvm::ConstantInt::get(intTy, 0),
+ Builder.CreateStructGEP(blockAddr, 2, "block.reserved"));
+ Builder.CreateStore(blockFn, Builder.CreateStructGEP(blockAddr, 3,
+ "block.invoke"));
+ Builder.CreateStore(descriptor, Builder.CreateStructGEP(blockAddr, 4,
+ "block.descriptor"));
+
+ // Finally, capture all the values into the block.
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // First, 'this'.
+ if (blockDecl->capturesCXXThis()) {
+ llvm::Value *addr = Builder.CreateStructGEP(blockAddr,
+ blockInfo.CXXThisIndex,
+ "block.captured-this.addr");
+ Builder.CreateStore(LoadCXXThis(), addr);
+ }
- const BlockDeclRefExpr *BDRE = cast<BlockDeclRefExpr>(E);
- const ValueDecl *VD = BDRE->getDecl();
- QualType T = VD->getType();
+ // Next, captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
- Note.RequiresCopying = BlockRequiresCopying(T);
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
- if (BDRE->isByRef()) {
- Note.flag = BLOCK_FIELD_IS_BYREF;
- if (T.isObjCGCWeak())
- Note.flag |= BLOCK_FIELD_IS_WEAK;
- } else if (T->isBlockPointerType()) {
- Note.flag = BLOCK_FIELD_IS_BLOCK;
- } else {
- Note.flag = BLOCK_FIELD_IS_OBJECT;
- }
+ QualType type = variable->getType();
- if (LocalDeclMap[VD]) {
- if (BDRE->isByRef()) {
- llvm::Value *Loc = LocalDeclMap[VD];
- Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
- Loc = Builder.CreateLoad(Loc);
- Builder.CreateStore(Loc, Addr);
- continue;
- } else {
- if (BDRE->getCopyConstructorExpr()) {
- E = BDRE->getCopyConstructorExpr();
- PushDestructorCleanup(E->getType(), Addr);
- }
- else {
- E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
- VD->getType().getNonReferenceType(),
- SourceLocation());
- if (VD->getType()->isReferenceType()) {
- E = new (getContext())
- UnaryOperator(const_cast<Expr*>(E), UO_AddrOf,
- getContext().getPointerType(E->getType()),
- SourceLocation());
- }
- }
- }
- }
+ // This will be a [[type]]*, except that a byref entry will just be
+ // an i8**.
+ llvm::Value *blockField =
+ Builder.CreateStructGEP(blockAddr, capture.getIndex(),
+ "block.captured");
- if (BDRE->isByRef()) {
- E = new (getContext())
- UnaryOperator(const_cast<Expr*>(E), UO_AddrOf,
- getContext().getPointerType(E->getType()),
- SourceLocation());
- }
+ // Compute the address of the thing we're going to move into the
+ // block literal.
+ llvm::Value *src;
+ if (ci->isNested()) {
+ // We need to use the capture from the enclosing block.
+ const CGBlockInfo::Capture &enclosingCapture =
+ BlockInfo->getCapture(variable);
+
+ // This is a [[type]]*, except that a byref entry wil just be an i8**.
+ src = Builder.CreateStructGEP(LoadBlockStruct(),
+ enclosingCapture.getIndex(),
+ "block.capture.addr");
+ } else {
+ // This is a [[type]]*.
+ src = LocalDeclMap[variable];
+ }
- RValue r = EmitAnyExpr(E, Addr, false);
- if (r.isScalar()) {
- llvm::Value *Loc = r.getScalarVal();
- const llvm::Type *Ty = Types[i+BlockFields];
- if (BDRE->isByRef()) {
- // E is now the address of the value field, instead, we want the
- // address of the actual ByRef struct. We optimize this slightly
- // compared to gcc by not grabbing the forwarding slot as this must
- // be done during Block_copy for us, and we can postpone the work
- // until then.
- CharUnits offset = BlockDecls[BDRE->getDecl()];
-
- llvm::Value *BlockLiteral = LoadBlockStruct();
-
- Loc = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
- "block.literal");
- Ty = llvm::PointerType::get(Ty, 0);
- Loc = Builder.CreateBitCast(Loc, Ty);
- Loc = Builder.CreateLoad(Loc);
- // Loc = Builder.CreateBitCast(Loc, Ty);
- }
- Builder.CreateStore(Loc, Addr);
- } else if (r.isComplex())
- // FIXME: implement
- ErrorUnsupported(BE, "complex in block literal");
- else if (r.isAggregate())
- ; // Already created into the destination
+ // For byrefs, we just write the pointer to the byref struct into
+ // the block field. There's no need to chase the forwarding
+ // pointer at this point, since we're building something that will
+ // live a shorter life than the stack byref anyway.
+ if (ci->isByRef()) {
+ // Get a void* that points to the byref struct.
+ if (ci->isNested())
+ src = Builder.CreateLoad(src, "byref.capture");
else
- assert (0 && "bad block variable");
- // FIXME: Ensure that the offset created by the backend for
- // the struct matches the previously computed offset in BlockDecls.
+ src = Builder.CreateBitCast(src, VoidPtrTy);
+
+ // Write that void* into the capture field.
+ Builder.CreateStore(src, blockField);
+
+ // If we have a copy constructor, evaluate that into the block field.
+ } else if (const Expr *copyExpr = ci->getCopyExpr()) {
+ EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
+
+ // If it's a reference variable, copy the reference into the block field.
+ } else if (type->isReferenceType()) {
+ Builder.CreateStore(Builder.CreateLoad(src, "ref.val"), blockField);
+
+ // Otherwise, fake up a POD copy into the block field.
+ } else {
+ // We use one of these or the other depending on whether the
+ // reference is nested.
+ DeclRefExpr notNested(const_cast<VarDecl*>(variable), type, VK_LValue,
+ SourceLocation());
+ BlockDeclRefExpr nested(const_cast<VarDecl*>(variable), type,
+ VK_LValue, SourceLocation(), /*byref*/ false);
+
+ Expr *declRef =
+ (ci->isNested() ? static_cast<Expr*>(&nested) : &notNested);
+
+ ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
+ declRef, VK_RValue);
+ EmitAnyExprToMem(&l2r, blockField, /*volatile*/ false, /*init*/ true);
}
- NoteForHelper.resize(NumHelpers);
-
- // __descriptor
- llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE, Info, Ty,
- BlockVarLayout,
- &NoteForHelper);
- Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
- Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
- }
- QualType BPT = BE->getType();
- V = Builder.CreateBitCast(V, ConvertType(BPT));
- // See if this is a __weak block variable and the must call objc_read_weak
- // on it.
- const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
- QualType RES = ftype->getResultType();
- if (RES.isObjCGCWeak()) {
- // Must cast argument to id*
- const llvm::Type *ObjectPtrTy =
- ConvertType(CGM.getContext().getObjCIdType());
- const llvm::Type *PtrObjectPtrTy =
- llvm::PointerType::getUnqual(ObjectPtrTy);
- V = Builder.CreateBitCast(V, PtrObjectPtrTy);
- V = CGM.getObjCRuntime().EmitObjCWeakRead(*this, V);
+ // Push a destructor if necessary. The semantics for when this
+ // actually gets run are really obscure.
+ if (!ci->isByRef() && CGM.getLangOptions().CPlusPlus)
+ PushDestructorCleanup(type, blockField);
}
- return V;
+
+ // Cast to the converted block-pointer type, which happens (somewhat
+ // unfortunately) to be a pointer to function type.
+ llvm::Value *result =
+ Builder.CreateBitCast(blockAddr,
+ ConvertType(blockInfo.getBlockExpr()->getType()));
+
+ return result;
}
-const llvm::Type *BlockModule::getBlockDescriptorType() {
+const llvm::Type *CodeGenModule::getBlockDescriptorType() {
if (BlockDescriptorType)
return BlockDescriptorType;
@@ -443,18 +678,16 @@ const llvm::Type *BlockModule::getBlockDescriptorType() {
getModule().addTypeName("struct.__block_descriptor",
BlockDescriptorType);
+ // Now form a pointer to that.
+ BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
return BlockDescriptorType;
}
-const llvm::Type *BlockModule::getGenericBlockLiteralType() {
+const llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
if (GenericBlockLiteralType)
return GenericBlockLiteralType;
- const llvm::Type *BlockDescPtrTy =
- llvm::PointerType::getUnqual(getBlockDescriptorType());
-
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
- getTypes().ConvertType(getContext().IntTy));
+ const llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
// struct __block_literal_generic {
// void *__isa;
@@ -463,11 +696,11 @@ const llvm::Type *BlockModule::getGenericBlockLiteralType() {
// void (*__invoke)(void *);
// struct __block_descriptor *__descriptor;
// };
- GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
- PtrToInt8Ty,
+ GenericBlockLiteralType = llvm::StructType::get(getLLVMContext(),
+ VoidPtrTy,
IntTy,
IntTy,
- PtrToInt8Ty,
+ VoidPtrTy,
BlockDescPtrTy,
NULL);
@@ -496,10 +729,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
// Get the function pointer from the literal.
llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
- BlockLiteral =
- Builder.CreateBitCast(BlockLiteral,
- llvm::Type::getInt8PtrTy(VMContext),
- "tmp");
+ BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy, "tmp");
// Add the block literal.
QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
@@ -533,318 +763,222 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
return EmitCall(FnInfo, Func, ReturnValue, Args);
}
-void CodeGenFunction::AllocateBlockCXXThisPointer(const CXXThisExpr *E) {
- assert(BlockCXXThisOffset.isZero() && "already computed 'this' pointer");
-
- // Figure out what the offset is.
- QualType T = E->getType();
- std::pair<CharUnits,CharUnits> TypeInfo = getContext().getTypeInfoInChars(T);
- CharUnits Offset = getBlockOffset(TypeInfo.first, TypeInfo.second);
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
+ bool isByRef) {
+ assert(BlockInfo && "evaluating block ref without block information?");
+ const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable);
- BlockCXXThisOffset = Offset;
- BlockLayout.push_back(E);
-}
-
-void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
- const ValueDecl *VD = E->getDecl();
- CharUnits &Offset = BlockDecls[VD];
+ // Handle constant captures.
+ if (capture.isConstant()) return LocalDeclMap[variable];
- // See if we have already allocated an offset for this variable.
- if (!Offset.isZero())
- return;
+ llvm::Value *addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
+ "block.capture.addr");
- // Don't run the expensive check, unless we have to.
- if (!BlockHasCopyDispose)
- if (E->isByRef()
- || BlockRequiresCopying(E->getType()))
- BlockHasCopyDispose = true;
+ if (isByRef) {
+ // addr should be a void** right now. Load, then cast the result
+ // to byref*.
- const ValueDecl *D = cast<ValueDecl>(E->getDecl());
+ addr = Builder.CreateLoad(addr);
+ const llvm::PointerType *byrefPointerType
+ = llvm::PointerType::get(BuildByRefType(variable), 0);
+ addr = Builder.CreateBitCast(addr, byrefPointerType,
+ "byref.addr");
- CharUnits Size;
- CharUnits Align;
+ // Follow the forwarding pointer.
+ addr = Builder.CreateStructGEP(addr, 1, "byref.forwarding");
+ addr = Builder.CreateLoad(addr, "byref.addr.forwarded");
- if (E->isByRef()) {
- llvm::tie(Size,Align) =
- getContext().getTypeInfoInChars(getContext().VoidPtrTy);
- } else {
- Size = getContext().getTypeSizeInChars(D->getType());
- Align = getContext().getDeclAlign(D);
+ // Cast back to byref* and GEP over to the actual object.
+ addr = Builder.CreateBitCast(addr, byrefPointerType);
+ addr = Builder.CreateStructGEP(addr, getByRefValueLLVMField(variable),
+ variable->getNameAsString());
}
- Offset = getBlockOffset(Size, Align);
- BlockLayout.push_back(E);
-}
+ if (variable->getType()->isReferenceType())
+ addr = Builder.CreateLoad(addr, "ref.tmp");
-llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
- bool IsByRef) {
-
- CharUnits offset = BlockDecls[VD];
- assert(!offset.isZero() && "getting address of unallocated decl");
-
- llvm::Value *BlockLiteral = LoadBlockStruct();
- llvm::Value *V = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
- "block.literal");
- if (IsByRef) {
- const llvm::Type *PtrStructTy
- = llvm::PointerType::get(BuildByRefType(VD), 0);
- // The block literal will need a copy/destroy helper.
- BlockHasCopyDispose = true;
-
- const llvm::Type *Ty = PtrStructTy;
- Ty = llvm::PointerType::get(Ty, 0);
- V = Builder.CreateBitCast(V, Ty);
- V = Builder.CreateLoad(V);
- V = Builder.CreateStructGEP(V, 1, "forwarding");
- V = Builder.CreateLoad(V);
- V = Builder.CreateBitCast(V, PtrStructTy);
- V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
- VD->getNameAsString());
- if (VD->getType()->isReferenceType())
- V = Builder.CreateLoad(V);
- } else {
- const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
- Ty = llvm::PointerType::get(Ty, 0);
- V = Builder.CreateBitCast(V, Ty);
- if (VD->getType()->isReferenceType())
- V = Builder.CreateLoad(V, "ref.tmp");
- }
- return V;
+ return addr;
}
llvm::Constant *
-BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
- // Generate the block descriptor.
- const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy);
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
- getTypes().ConvertType(getContext().IntTy));
-
- llvm::Constant *DescriptorFields[4];
-
- // Reserved
- DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy);
-
- // Block literal size. For global blocks we just use the size of the generic
- // block literal struct.
- CharUnits BlockLiteralSize =
- CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType());
- DescriptorFields[1] =
- llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity());
-
- // signature. non-optional ObjC-style method descriptor @encode sequence
- std::string BlockTypeEncoding;
- CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
-
- DescriptorFields[2] = llvm::ConstantExpr::getBitCast(
- CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty);
-
- // layout
- DescriptorFields[3] =
- llvm::ConstantInt::get(UnsignedLongTy,0);
+CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
+ const char *name) {
+ CGBlockInfo blockInfo(blockExpr, name);
- // build the structure from the 4 elements
- llvm::Constant *DescriptorStruct =
- llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 4, false);
+ // Compute information about the layout, etc., of this block.
+ computeBlockInfo(*this, blockInfo);
- llvm::GlobalVariable *Descriptor =
- new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true,
- llvm::GlobalVariable::InternalLinkage,
- DescriptorStruct, "__block_descriptor_global");
+ // Using that metadata, generate the actual block function.
+ llvm::Constant *blockFn;
+ {
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
+ blockInfo,
+ 0, LocalDeclMap);
+ }
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
- int FieldCount = 5;
- // Generate the constants for the block literal.
+ return buildGlobalBlock(*this, blockInfo, blockFn);
+}
- std::vector<llvm::Constant*> LiteralFields(FieldCount);
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn) {
+ assert(blockInfo.CanBeGlobal);
- CGBlockInfo Info(n);
- llvm::Constant *BlockVarLayout;
- llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
- llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE,
- Info, 0, BlockVarLayout,
- LocalDeclMap);
- assert(Info.BlockSize == BlockLiteralSize
- && "no imports allowed for global block");
+ // Generate the constants for the block literal initializer.
+ llvm::Constant *fields[BlockHeaderSize];
// isa
- LiteralFields[0] = CGM.getNSConcreteGlobalBlock();
+ fields[0] = CGM.getNSConcreteGlobalBlock();
// __flags
- unsigned flags = computeBlockFlag(CGM, BE,
- (BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE));
- LiteralFields[1] =
- llvm::ConstantInt::get(IntTy, flags);
+ BlockFlags flags = computeBlockFlag(CGM, blockInfo.getBlockExpr(),
+ BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE);
+ fields[1] = llvm::ConstantInt::get(CGM.IntTy, flags.getBitMask());
// Reserved
- LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
+ fields[2] = llvm::Constant::getNullValue(CGM.IntTy);
// Function
- LiteralFields[3] = Fn;
+ fields[3] = blockFn;
// Descriptor
- LiteralFields[4] = Descriptor;
-
- llvm::Constant *BlockLiteralStruct =
- llvm::ConstantStruct::get(VMContext, LiteralFields, false);
-
- llvm::GlobalVariable *BlockLiteral =
- new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true,
- llvm::GlobalVariable::InternalLinkage,
- BlockLiteralStruct, "__block_literal_global");
+ fields[4] = buildBlockDescriptor(CGM, blockInfo);
- return BlockLiteral;
-}
+ llvm::Constant *init =
+ llvm::ConstantStruct::get(CGM.getLLVMContext(), fields, BlockHeaderSize,
+ /*packed*/ false);
-llvm::Value *CodeGenFunction::LoadBlockStruct() {
- llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()],
- "self");
- // For now, we codegen based upon byte offsets.
- return Builder.CreateBitCast(V, PtrToInt8Ty);
+ llvm::GlobalVariable *literal =
+ new llvm::GlobalVariable(CGM.getModule(),
+ init->getType(),
+ /*constant*/ true,
+ llvm::GlobalVariable::InternalLinkage,
+ init,
+ "__block_literal_global");
+ literal->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // Return a constant of the appropriately-casted type.
+ const llvm::Type *requiredType =
+ CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
+ return llvm::ConstantExpr::getBitCast(literal, requiredType);
}
llvm::Function *
-CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
- CGBlockInfo &Info,
- const Decl *OuterFuncDecl,
- llvm::Constant *& BlockVarLayout,
- llvm::DenseMap<const Decl*, llvm::Value*> ldm) {
+CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
+ const CGBlockInfo &blockInfo,
+ const Decl *outerFnDecl,
+ const DeclMapTy &ldm) {
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
- // Check if we should generate debug info for this block.
- if (CGM.getDebugInfo())
- DebugInfo = CGM.getDebugInfo();
+ DebugInfo = CGM.getDebugInfo();
+ BlockInfo = &blockInfo;
// Arrange for local static and local extern declarations to appear
- // to be local to this function as well, as they are directly referenced
- // in a block.
- for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin();
- i != ldm.end();
- ++i) {
- const VarDecl *VD = dyn_cast<VarDecl>(i->first);
-
- if (VD->getStorageClass() == SC_Static || VD->hasExternalStorage())
- LocalDeclMap[VD] = i->second;
- }
-
- BlockOffset =
- CGM.GetTargetTypeStoreSize(CGM.getGenericBlockLiteralType());
- BlockAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
-
- const FunctionType *BlockFunctionType = BExpr->getFunctionType();
- QualType ResultType;
- FunctionType::ExtInfo EInfo = getFunctionExtInfo(*BlockFunctionType);
- bool IsVariadic;
- if (const FunctionProtoType *FTy =
- dyn_cast<FunctionProtoType>(BlockFunctionType)) {
- ResultType = FTy->getResultType();
- IsVariadic = FTy->isVariadic();
- } else {
- // K&R style block.
- ResultType = BlockFunctionType->getResultType();
- IsVariadic = false;
+ // to be local to this function as well, in case they're directly
+ // referenced in a block.
+ for (DeclMapTy::const_iterator i = ldm.begin(), e = ldm.end(); i != e; ++i) {
+ const VarDecl *var = dyn_cast<VarDecl>(i->first);
+ if (var && !var->hasLocalStorage())
+ LocalDeclMap[var] = i->second;
}
- FunctionArgList Args;
-
- CurFuncDecl = OuterFuncDecl;
+ // Begin building the function declaration.
- const BlockDecl *BD = BExpr->getBlockDecl();
+ // Build the argument list.
+ FunctionArgList args;
+ // The first argument is the block pointer. Just take it as a void*
+ // and cast it later.
+ QualType selfTy = getContext().VoidPtrTy;
IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
- // Build the block struct now.
- AllocateAllBlockDeclRefs(*this, Info);
+ // FIXME: this leaks, and we only need it very temporarily.
+ ImplicitParamDecl *selfDecl =
+ ImplicitParamDecl::Create(getContext(),
+ const_cast<BlockDecl*>(blockDecl),
+ SourceLocation(), II, selfTy);
+ args.push_back(std::make_pair(selfDecl, selfTy));
+
+ // Now add the rest of the parameters.
+ for (BlockDecl::param_const_iterator i = blockDecl->param_begin(),
+ e = blockDecl->param_end(); i != e; ++i)
+ args.push_back(std::make_pair(*i, (*i)->getType()));
+
+ // Create the function declaration.
+ const FunctionProtoType *fnType =
+ cast<FunctionProtoType>(blockInfo.getBlockExpr()->getFunctionType());
+ const CGFunctionInfo &fnInfo =
+ CGM.getTypes().getFunctionInfo(fnType->getResultType(), args,
+ fnType->getExtInfo());
+ const llvm::FunctionType *fnLLVMType =
+ CGM.getTypes().GetFunctionType(fnInfo, fnType->isVariadic());
+
+ MangleBuffer name;
+ CGM.getBlockMangledName(GD, name, blockDecl);
+ llvm::Function *fn =
+ llvm::Function::Create(fnLLVMType, llvm::GlobalValue::InternalLinkage,
+ name.getString(), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(blockDecl, fn, fnInfo);
+
+ // Begin generating the function.
+ StartFunction(blockDecl, fnType->getResultType(), fn, args,
+ blockInfo.getBlockExpr()->getBody()->getLocEnd());
+ CurFuncDecl = outerFnDecl; // StartFunction sets this to blockDecl
+
+ // Okay. Undo some of what StartFunction did. We really don't need
+ // an alloca for the block address; in theory we could remove it,
+ // but that might do unpleasant things to debug info.
+ llvm::AllocaInst *blockAddrAlloca
+ = cast<llvm::AllocaInst>(LocalDeclMap[selfDecl]);
+ llvm::Value *blockAddr = Builder.CreateLoad(blockAddrAlloca);
+ BlockPointer = Builder.CreateBitCast(blockAddr,
+ blockInfo.StructureType->getPointerTo(),
+ "block");
- // Capture block layout info. here.
- if (CGM.getContext().getLangOptions().ObjC1)
- BlockVarLayout = CGM.getObjCRuntime().GCBlockLayout(*this, Info.DeclRefs);
- else
- BlockVarLayout = llvm::Constant::getNullValue(PtrToInt8Ty);
-
- QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose,
- BlockLayout);
-
- // FIXME: This leaks
- ImplicitParamDecl *SelfDecl =
- ImplicitParamDecl::Create(getContext(), const_cast<BlockDecl*>(BD),
- SourceLocation(), II,
- ParmTy);
-
- Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType()));
- BlockStructDecl = SelfDecl;
+ // If we have a C++ 'this' reference, go ahead and force it into
+ // existence now.
+ if (blockDecl->capturesCXXThis()) {
+ llvm::Value *addr = Builder.CreateStructGEP(BlockPointer,
+ blockInfo.CXXThisIndex,
+ "block.captured-this");
+ CXXThisValue = Builder.CreateLoad(addr, "this");
+ }
- for (BlockDecl::param_const_iterator i = BD->param_begin(),
- e = BD->param_end(); i != e; ++i)
- Args.push_back(std::make_pair(*i, (*i)->getType()));
+ // LoadObjCSelf() expects there to be an entry for 'self' in LocalDeclMap;
+ // appease it.
+ if (const ObjCMethodDecl *method
+ = dyn_cast_or_null<ObjCMethodDecl>(CurFuncDecl)) {
+ const VarDecl *self = method->getSelfDecl();
+
+ // There might not be a capture for 'self', but if there is...
+ if (blockInfo.Captures.count(self)) {
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(self);
+ llvm::Value *selfAddr = Builder.CreateStructGEP(BlockPointer,
+ capture.getIndex(),
+ "block.captured-self");
+ LocalDeclMap[self] = selfAddr;
+ }
+ }
- const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(ResultType, Args, EInfo);
+ // Also force all the constant captures.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (!capture.isConstant()) continue;
- CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
+ unsigned align = getContext().getDeclAlign(variable).getQuantity();
- MangleBuffer Name;
- CGM.getMangledName(GD, Name, BD);
- llvm::Function *Fn =
- llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- Name.getString(), &CGM.getModule());
+ llvm::AllocaInst *alloca =
+ CreateMemTemp(variable->getType(), "block.captured-const");
+ alloca->setAlignment(align);
- CGM.SetInternalFunctionAttributes(BD, Fn, FI);
- StartFunction(BD, ResultType, Fn, Args,
- BExpr->getBody()->getLocEnd());
-
- CurFuncDecl = OuterFuncDecl;
-
- QualType FnType(BlockFunctionType, 0);
- bool HasPrototype = isa<FunctionProtoType>(BlockFunctionType);
-
- IdentifierInfo *ID = &getContext().Idents.get(Name.getString());
- CurCodeDecl = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), ID, FnType,
- 0,
- SC_Static,
- SC_None,
- false, HasPrototype);
- if (FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FnType)) {
- const FunctionDecl *CFD = dyn_cast<FunctionDecl>(CurCodeDecl);
- FunctionDecl *FD = const_cast<FunctionDecl *>(CFD);
- llvm::SmallVector<ParmVarDecl*, 16> Params;
- for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i)
- Params.push_back(ParmVarDecl::Create(getContext(), FD,
- SourceLocation(), 0,
- FT->getArgType(i), /*TInfo=*/0,
- SC_None, SC_None, 0));
- FD->setParams(Params.data(), Params.size());
- }
-
-
- // If we have a C++ 'this' reference, go ahead and force it into
- // existence now.
- if (Info.CXXThisRef) {
- assert(!BlockCXXThisOffset.isZero() &&
- "haven't yet allocated 'this' reference");
-
- // TODO: I have a dream that one day this will be typed.
- llvm::Value *BlockLiteral = LoadBlockStruct();
- llvm::Value *ThisPtrRaw =
- Builder.CreateConstInBoundsGEP1_64(BlockLiteral,
- BlockCXXThisOffset.getQuantity(),
- "this.ptr.raw");
-
- const llvm::Type *Ty =
- CGM.getTypes().ConvertType(Info.CXXThisRef->getType());
- Ty = llvm::PointerType::get(Ty, 0);
- llvm::Value *ThisPtr = Builder.CreateBitCast(ThisPtrRaw, Ty, "this.ptr");
-
- CXXThisValue = Builder.CreateLoad(ThisPtr, "this");
- }
+ Builder.CreateStore(capture.getConstant(), alloca, align);
- // If we have an Objective C 'self' reference, go ahead and force it
- // into existence now.
- if (Info.NeedsObjCSelf) {
- ValueDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
- LocalDeclMap[Self] = GetAddrOfBlockDecl(Self, false);
+ LocalDeclMap[variable] = alloca;
}
// Save a spot to insert the debug information for all the BlockDeclRefDecls.
@@ -852,7 +986,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
--entry_ptr;
- EmitStmt(BExpr->getBody());
+ EmitStmt(blockDecl->getBody());
// Remember where we were...
llvm::BasicBlock *resume = Builder.GetInsertBlock();
@@ -861,96 +995,78 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
++entry_ptr;
Builder.SetInsertPoint(entry, entry_ptr);
+ // Emit debug information for all the BlockDeclRefDecls.
+ // FIXME: also for 'this'
if (CGDebugInfo *DI = getDebugInfo()) {
- // Emit debug information for all the BlockDeclRefDecls.
- // FIXME: also for 'this'
- for (unsigned i = 0, e = BlockLayout.size(); i != e; ++i) {
- if (const BlockDeclRefExpr *BDRE =
- dyn_cast<BlockDeclRefExpr>(BlockLayout[i])) {
- const ValueDecl *D = BDRE->getDecl();
- DI->setLocation(D->getLocation());
- DI->EmitDeclareOfBlockDeclRefVariable(BDRE,
- LocalDeclMap[getBlockStructDecl()],
- Builder, this);
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ DI->setLocation(variable->getLocation());
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) {
+ DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
+ Builder);
+ continue;
}
+
+ DI->EmitDeclareOfBlockDeclRefVariable(variable, blockAddrAlloca,
+ Builder, blockInfo);
}
}
+
// And resume where we left off.
if (resume == 0)
Builder.ClearInsertionPoint();
else
Builder.SetInsertPoint(resume);
- FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc());
+ FinishFunction(cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc());
- // The runtime needs a minimum alignment of a void *.
- CharUnits MinAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
- BlockOffset = CharUnits::fromQuantity(
- llvm::RoundUpToAlignment(BlockOffset.getQuantity(),
- MinAlign.getQuantity()));
-
- Info.BlockSize = BlockOffset;
- Info.BlockAlign = BlockAlign;
- Info.BlockLayout = BlockLayout;
- Info.BlockHasCopyDispose = BlockHasCopyDispose;
- return Fn;
+ return fn;
}
-CharUnits BlockFunction::getBlockOffset(CharUnits Size, CharUnits Align) {
- assert((Align.isPositive()) && "alignment must be 1 byte or more");
-
- CharUnits OldOffset = BlockOffset;
-
- // Ensure proper alignment, even if it means we have to have a gap
- BlockOffset = CharUnits::fromQuantity(
- llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align.getQuantity()));
- BlockAlign = std::max(Align, BlockAlign);
-
- CharUnits Pad = BlockOffset - OldOffset;
- if (Pad.isPositive()) {
- QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
- llvm::APInt(32,
- Pad.getQuantity()),
- ArrayType::Normal, 0);
- ValueDecl *PadDecl = VarDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(),
- 0, QualType(PadTy), 0,
- SC_None, SC_None);
- Expr *E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
- SourceLocation());
- BlockLayout.push_back(E);
- }
+/*
+ notes.push_back(HelperInfo());
+ HelperInfo &note = notes.back();
+ note.index = capture.getIndex();
+ note.RequiresCopying = (ci->hasCopyExpr() || BlockRequiresCopying(type));
+ note.cxxbar_import = ci->getCopyExpr();
+
+ if (ci->isByRef()) {
+ note.flag = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ note.flag |= BLOCK_FIELD_IS_WEAK;
+ } else if (type->isBlockPointerType()) {
+ note.flag = BLOCK_FIELD_IS_BLOCK;
+ } else {
+ note.flag = BLOCK_FIELD_IS_OBJECT;
+ }
+ */
- BlockOffset += Size;
- return BlockOffset - Size;
-}
-llvm::Constant *BlockFunction::
-GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
- std::vector<HelperInfo> *NoteForHelperp) {
- QualType R = getContext().VoidTy;
- FunctionArgList Args;
+
+
+llvm::Constant *
+CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
+
+ FunctionArgList args;
// FIXME: This leaks
- ImplicitParamDecl *Dst =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Dst, Dst->getType()));
- ImplicitParamDecl *Src =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Src, Src->getType()));
+ ImplicitParamDecl *dstDecl =
+ ImplicitParamDecl::Create(C, 0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(std::make_pair(dstDecl, dstDecl->getType()));
+ ImplicitParamDecl *srcDecl =
+ ImplicitParamDecl::Create(C, 0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(std::make_pair(srcDecl, srcDecl->getType()));
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+ CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
- // FIXME: We'd like to put these into a mergable by content, with
- // internal linkage.
- CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ // FIXME: it would be nice if these were mergeable with things with
+ // identical semantics.
+ const llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -959,80 +1075,89 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
IdentifierInfo *II
= &CGM.getContext().Idents.get("__copy_helper_block_");
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R, 0,
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(), II, C.VoidTy, 0,
SC_Static,
SC_None,
false,
true);
- CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
-
- llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
- llvm::Type *PtrPtrT;
-
- if (NoteForHelperp) {
- std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
-
- PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
- SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
- SrcObj = Builder.CreateLoad(SrcObj);
-
- llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst);
- llvm::Type *PtrPtrT;
- PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
- DstObj = Builder.CreateBitCast(DstObj, PtrPtrT);
- DstObj = Builder.CreateLoad(DstObj);
-
- for (unsigned i=0; i < NoteForHelper.size(); ++i) {
- int flag = NoteForHelper[i].flag;
- int index = NoteForHelper[i].index;
-
- if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
- || NoteForHelper[i].RequiresCopying) {
- llvm::Value *Srcv = SrcObj;
- Srcv = Builder.CreateStructGEP(Srcv, index);
- Srcv = Builder.CreateBitCast(Srcv,
- llvm::PointerType::get(PtrToInt8Ty, 0));
- Srcv = Builder.CreateLoad(Srcv);
-
- llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
- Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
-
- llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
- llvm::Value *F = CGM.getBlockObjectAssign();
- Builder.CreateCall3(F, Dstv, Srcv, N);
- }
+ StartFunction(FD, C.VoidTy, Fn, args, SourceLocation());
+
+ const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ llvm::Value *src = GetAddrOfLocalVar(srcDecl);
+ src = Builder.CreateLoad(src);
+ src = Builder.CreateBitCast(src, structPtrTy, "block.source");
+
+ llvm::Value *dst = GetAddrOfLocalVar(dstDecl);
+ dst = Builder.CreateLoad(dst);
+ dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ const Expr *copyExpr = ci->getCopyExpr();
+ unsigned flags = 0;
+
+ if (copyExpr) {
+ assert(!ci->isByRef());
+ // don't bother computing flags
+ } else if (ci->isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (type->isBlockPointerType()) {
+ flags = BLOCK_FIELD_IS_BLOCK;
+ } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ }
+
+ if (!copyExpr && !flags) continue;
+
+ unsigned index = capture.getIndex();
+ llvm::Value *srcField = Builder.CreateStructGEP(src, index);
+ llvm::Value *dstField = Builder.CreateStructGEP(dst, index);
+
+ // If there's an explicit copy expression, we do that.
+ if (copyExpr) {
+ EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
+ } else {
+ llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
+ srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
+ llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
+ Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
+ llvm::ConstantInt::get(Int32Ty, flags));
}
}
- CGF.FinishFunction();
+ FinishFunction();
- return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
}
-llvm::Constant *BlockFunction::
-GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
- const llvm::StructType* T,
- std::vector<HelperInfo> *NoteForHelperp) {
- QualType R = getContext().VoidTy;
+llvm::Constant *
+CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
- FunctionArgList Args;
+ FunctionArgList args;
// FIXME: This leaks
- ImplicitParamDecl *Src =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
-
- Args.push_back(std::make_pair(Src, Src->getType()));
+ ImplicitParamDecl *srcDecl =
+ ImplicitParamDecl::Create(C, 0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(std::make_pair(srcDecl, srcDecl->getType()));
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+ CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
- CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ const llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1041,59 +1166,76 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
IdentifierInfo *II
= &CGM.getContext().Idents.get("__destroy_helper_block_");
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R, 0,
+ FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(),
+ SourceLocation(), II, C.VoidTy, 0,
SC_Static,
SC_None,
false, true);
- CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
-
- if (NoteForHelperp) {
- std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
-
- llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
- llvm::Type *PtrPtrT;
- PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
- SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
- SrcObj = Builder.CreateLoad(SrcObj);
-
- for (unsigned i=0; i < NoteForHelper.size(); ++i) {
- int flag = NoteForHelper[i].flag;
- int index = NoteForHelper[i].index;
-
- if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
- || NoteForHelper[i].RequiresCopying) {
- llvm::Value *Srcv = SrcObj;
- Srcv = Builder.CreateStructGEP(Srcv, index);
- Srcv = Builder.CreateBitCast(Srcv,
- llvm::PointerType::get(PtrToInt8Ty, 0));
- Srcv = Builder.CreateLoad(Srcv);
-
- BuildBlockRelease(Srcv, flag);
- }
+ StartFunction(FD, C.VoidTy, Fn, args, SourceLocation());
+
+ const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ llvm::Value *src = GetAddrOfLocalVar(srcDecl);
+ src = Builder.CreateLoad(src);
+ src = Builder.CreateBitCast(src, structPtrTy, "block");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
+
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ BlockFieldFlags flags;
+ const CXXDestructorDecl *dtor = 0;
+
+ if (ci->isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (type->isBlockPointerType()) {
+ flags = BLOCK_FIELD_IS_BLOCK;
+ } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ } else if (C.getLangOptions().CPlusPlus) {
+ if (const CXXRecordDecl *record = type->getAsCXXRecordDecl())
+ if (!record->hasTrivialDestructor())
+ dtor = record->getDestructor();
}
- }
- CGF.FinishFunction();
+ if (!dtor && flags.empty()) continue;
- return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
-}
+ unsigned index = capture.getIndex();
+ llvm::Value *srcField = Builder.CreateStructGEP(src, index);
-llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T,
- std::vector<HelperInfo> *NoteForHelper) {
- return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose,
- T, NoteForHelper);
-}
+ // If there's an explicit copy expression, we do that.
+ if (dtor) {
+ PushDestructorCleanup(dtor, srcField);
+
+ // Otherwise we call _Block_object_dispose. It wouldn't be too
+ // hard to just emit this as a cleanup if we wanted to make sure
+ // that things were done in reverse.
+ } else {
+ llvm::Value *value = Builder.CreateLoad(srcField);
+ value = Builder.CreateBitCast(value, VoidPtrTy);
+ BuildBlockRelease(value, flags);
+ }
+ }
+
+ cleanups.ForceCleanup();
-llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T,
- std::vector<HelperInfo> *NoteForHelperp) {
- return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose,
- T, NoteForHelperp);
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
}
-llvm::Constant *BlockFunction::
-GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
+llvm::Constant *CodeGenFunction::
+GeneratebyrefCopyHelperFunction(const llvm::Type *T, BlockFieldFlags flags,
+ const VarDecl *variable) {
QualType R = getContext().VoidTy;
FunctionArgList Args;
@@ -1121,10 +1263,10 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
// internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- "__Block_byref_id_object_copy_", &CGM.getModule());
+ "__Block_byref_object_copy_", &CGM.getModule());
IdentifierInfo *II
- = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_");
+ = &CGM.getContext().Idents.get("__Block_byref_object_copy_");
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
@@ -1132,37 +1274,43 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
SC_Static,
SC_None,
false, true);
- CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+ StartFunction(FD, R, Fn, Args, SourceLocation());
// dst->x
- llvm::Value *V = CGF.GetAddrOfLocalVar(Dst);
+ llvm::Value *V = GetAddrOfLocalVar(Dst);
V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
V = Builder.CreateLoad(V);
V = Builder.CreateStructGEP(V, 6, "x");
- llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty);
+ llvm::Value *DstObj = V;
// src->x
- V = CGF.GetAddrOfLocalVar(Src);
+ V = GetAddrOfLocalVar(Src);
V = Builder.CreateLoad(V);
V = Builder.CreateBitCast(V, T);
V = Builder.CreateStructGEP(V, 6, "x");
- V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
- llvm::Value *SrcObj = Builder.CreateLoad(V);
-
- flag |= BLOCK_BYREF_CALLER;
-
- llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
- llvm::Value *F = CGM.getBlockObjectAssign();
- Builder.CreateCall3(F, DstObj, SrcObj, N);
-
- CGF.FinishFunction();
+
+ if (Expr *copyExpr = getContext().getBlockVarCopyInits(variable)) {
+ llvm::Value *SrcObj = V;
+ EmitSynthesizedCXXCopyCtor(DstObj, SrcObj, copyExpr);
+ } else {
+ DstObj = Builder.CreateBitCast(DstObj, VoidPtrTy);
+ V = Builder.CreateBitCast(V, VoidPtrPtrTy);
+ llvm::Value *SrcObj = Builder.CreateLoad(V);
+ flags |= BLOCK_BYREF_CALLER;
+ llvm::Value *N = llvm::ConstantInt::get(Int32Ty, flags.getBitMask());
+ llvm::Value *F = CGM.getBlockObjectAssign();
+ Builder.CreateCall3(F, DstObj, SrcObj, N);
+ }
+
+ FinishFunction();
- return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+ return llvm::ConstantExpr::getBitCast(Fn, Int8PtrTy);
}
llvm::Constant *
-BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
- int flag) {
+CodeGenFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
+ BlockFieldFlags flags,
+ const VarDecl *variable) {
QualType R = getContext().VoidTy;
FunctionArgList Args;
@@ -1184,11 +1332,11 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
// internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- "__Block_byref_id_object_dispose_",
+ "__Block_byref_object_dispose_",
&CGM.getModule());
IdentifierInfo *II
- = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_");
+ = &CGM.getContext().Idents.get("__Block_byref_object_dispose_");
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
@@ -1196,72 +1344,78 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
SC_Static,
SC_None,
false, true);
- CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+ StartFunction(FD, R, Fn, Args, SourceLocation());
- llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
+ llvm::Value *V = GetAddrOfLocalVar(Src);
V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
V = Builder.CreateLoad(V);
V = Builder.CreateStructGEP(V, 6, "x");
- V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
- V = Builder.CreateLoad(V);
- flag |= BLOCK_BYREF_CALLER;
- BuildBlockRelease(V, flag);
- CGF.FinishFunction();
+ // If it's not any kind of special object, it must have a destructor
+ // or something.
+ if (!flags.isSpecialPointer()) {
+ EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
+ PushDestructorCleanup(variable->getType(), V);
+ PopCleanupBlocks(CleanupDepth);
- return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+ // Otherwise, call _Block_object_dispose.
+ } else {
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(Int8PtrTy, 0));
+ V = Builder.CreateLoad(V);
+
+ flags |= BLOCK_BYREF_CALLER;
+ BuildBlockRelease(V, flags);
+ }
+
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, Int8PtrTy);
}
-llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
- int Flag, unsigned Align) {
+llvm::Constant *CodeGenModule::BuildbyrefCopyHelper(const llvm::Type *T,
+ BlockFieldFlags flags,
+ unsigned align,
+ const VarDecl *var) {
// All alignments below that of pointer alignment collapse down to just
// pointer alignment, as we always have at least that much alignment to begin
// with.
- Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+ align /= unsigned(getTarget().getPointerAlign(0) / 8);
// As an optimization, we only generate a single function of each kind we
// might need. We need a different one for each alignment and for each
// setting of flags. We mix Align and flag to get the kind.
- uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
- llvm::Constant *&Entry = CGM.AssignCache[Kind];
- if (Entry)
- return Entry;
- return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag);
+ uint64_t Kind = (uint64_t)align*BLOCK_BYREF_CURRENT_MAX + flags.getBitMask();
+ llvm::Constant *&Entry = AssignCache[Kind];
+ if (!Entry)
+ Entry = CodeGenFunction(*this).
+ GeneratebyrefCopyHelperFunction(T, flags, var);
+ return Entry;
}
-llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
- int Flag,
- unsigned Align) {
+llvm::Constant *CodeGenModule::BuildbyrefDestroyHelper(const llvm::Type *T,
+ BlockFieldFlags flags,
+ unsigned align,
+ const VarDecl *var) {
// All alignments below that of pointer alignment collpase down to just
// pointer alignment, as we always have at least that much alignment to begin
// with.
- Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+ align /= unsigned(getTarget().getPointerAlign(0) / 8);
// As an optimization, we only generate a single function of each kind we
// might need. We need a different one for each alignment and for each
// setting of flags. We mix Align and flag to get the kind.
- uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
- llvm::Constant *&Entry = CGM.DestroyCache[Kind];
- if (Entry)
- return Entry;
- return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag);
+ uint64_t Kind = (uint64_t)align*BLOCK_BYREF_CURRENT_MAX + flags.getBitMask();
+ llvm::Constant *&Entry = DestroyCache[Kind];
+ if (!Entry)
+ Entry = CodeGenFunction(*this).
+ GeneratebyrefDestroyHelperFunction(T, flags, var);
+ return Entry;
}
-void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
+void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
llvm::Value *F = CGM.getBlockObjectDispose();
llvm::Value *N;
- V = Builder.CreateBitCast(V, PtrToInt8Ty);
- N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
+ V = Builder.CreateBitCast(V, Int8PtrTy);
+ N = llvm::ConstantInt::get(Int32Ty, flags.getBitMask());
Builder.CreateCall2(F, V, N);
}
-
-ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
-
-BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
- CGBuilderTy &B)
- : CGM(cgm), VMContext(cgm.getLLVMContext()), CGF(cgf), Builder(B) {
- PtrToInt8Ty = llvm::PointerType::getUnqual(
- llvm::Type::getInt8Ty(VMContext));
-
- BlockHasCopyDispose = false;
-}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
index 743e3c8..bee729d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
@@ -24,9 +24,6 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
-#include <vector>
-#include <map>
-
#include "CGBuilder.h"
#include "CGCall.h"
#include "CGValue.h"
@@ -46,157 +43,153 @@ namespace llvm {
namespace clang {
namespace CodeGen {
+
class CodeGenModule;
+class CGBlockInfo;
+
+enum BlockFlag_t {
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_USE_STRET = (1 << 29),
+ BLOCK_HAS_SIGNATURE = (1 << 30)
+};
+class BlockFlags {
+ uint32_t flags;
-class BlockBase {
+ BlockFlags(uint32_t flags) : flags(flags) {}
public:
- enum {
- BLOCK_HAS_COPY_DISPOSE = (1 << 25),
- BLOCK_HAS_CXX_OBJ = (1 << 26),
- BLOCK_IS_GLOBAL = (1 << 28),
- BLOCK_USE_STRET = (1 << 29),
- BLOCK_HAS_SIGNATURE = (1 << 30)
- };
-};
+ BlockFlags() : flags(0) {}
+ BlockFlags(BlockFlag_t flag) : flags(flag) {}
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
-class BlockModule : public BlockBase {
- ASTContext &Context;
- llvm::Module &TheModule;
- const llvm::TargetData &TheTargetData;
- CodeGenTypes &Types;
- CodeGenModule &CGM;
- llvm::LLVMContext &VMContext;
+ friend BlockFlags operator|(BlockFlags l, BlockFlags r) {
+ return BlockFlags(l.flags | r.flags);
+ }
+ friend BlockFlags &operator|=(BlockFlags &l, BlockFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFlags l, BlockFlags r) {
+ return (l.flags & r.flags);
+ }
+};
+inline BlockFlags operator|(BlockFlag_t l, BlockFlag_t r) {
+ return BlockFlags(l) | BlockFlags(r);
+}
- ASTContext &getContext() const { return Context; }
- llvm::Module &getModule() const { return TheModule; }
- CodeGenTypes &getTypes() { return Types; }
- const llvm::TargetData &getTargetData() const { return TheTargetData; }
-public:
- int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
- const llvm::Type *getBlockDescriptorType();
+enum BlockFieldFlag_t {
+ BLOCK_FIELD_IS_OBJECT = 0x03, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 0x07, /* a block variable */
- const llvm::Type *getGenericBlockLiteralType();
+ BLOCK_FIELD_IS_BYREF = 0x08, /* the on stack structure holding the __block
+ variable */
+ BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy
+ helpers */
- llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+};
- const llvm::Type *BlockDescriptorType;
- const llvm::Type *GenericBlockLiteralType;
+class BlockFieldFlags {
+ uint32_t flags;
- struct {
- int GlobalUniqueCount;
- } Block;
+ BlockFieldFlags(uint32_t flags) : flags(flags) {}
+public:
+ BlockFieldFlags() : flags(0) {}
+ BlockFieldFlags(BlockFieldFlag_t flag) : flags(flag) {}
- const llvm::PointerType *PtrToInt8Ty;
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
- std::map<uint64_t, llvm::Constant *> AssignCache;
- std::map<uint64_t, llvm::Constant *> DestroyCache;
+ /// Answers whether the flags indicate that this field is an object
+ /// or block pointer that requires _Block_object_assign/dispose.
+ bool isSpecialPointer() const { return flags & BLOCK_FIELD_IS_OBJECT; }
- BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
- CodeGenTypes &T, CodeGenModule &CodeGen)
- : Context(C), TheModule(M), TheTargetData(TD), Types(T),
- CGM(CodeGen), VMContext(M.getContext()),
- BlockDescriptorType(0), GenericBlockLiteralType(0) {
- Block.GlobalUniqueCount = 0;
- PtrToInt8Ty = llvm::Type::getInt8PtrTy(M.getContext());
+ friend BlockFieldFlags operator|(BlockFieldFlags l, BlockFieldFlags r) {
+ return BlockFieldFlags(l.flags | r.flags);
+ }
+ friend BlockFieldFlags &operator|=(BlockFieldFlags &l, BlockFieldFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFieldFlags l, BlockFieldFlags r) {
+ return (l.flags & r.flags);
}
-
- bool BlockRequiresCopying(QualType Ty)
- { return getContext().BlockRequiresCopying(Ty); }
};
+inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
+ return BlockFieldFlags(l) | BlockFieldFlags(r);
+}
-class BlockFunction : public BlockBase {
- CodeGenModule &CGM;
- ASTContext &getContext() const;
-
-protected:
- llvm::LLVMContext &VMContext;
-
+/// CGBlockInfo - Information to generate a block literal.
+class CGBlockInfo {
public:
- CodeGenFunction &CGF;
-
- const llvm::PointerType *PtrToInt8Ty;
- struct HelperInfo {
- int index;
- int flag;
- bool RequiresCopying;
+ /// Name - The name of the block, kindof.
+ const char *Name;
+
+ /// The field index of 'this' within the block, if there is one.
+ unsigned CXXThisIndex;
+
+ class Capture {
+ uintptr_t Data;
+
+ public:
+ bool isIndex() const { return (Data & 1) != 0; }
+ bool isConstant() const { return !isIndex(); }
+ unsigned getIndex() const { assert(isIndex()); return Data >> 1; }
+ llvm::Value *getConstant() const {
+ assert(isConstant());
+ return reinterpret_cast<llvm::Value*>(Data);
+ }
+
+ static Capture makeIndex(unsigned index) {
+ Capture v;
+ v.Data = (index << 1) | 1;
+ return v;
+ }
+
+ static Capture makeConstant(llvm::Value *value) {
+ Capture v;
+ v.Data = reinterpret_cast<uintptr_t>(value);
+ return v;
+ }
};
- enum {
- BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
- block, ... */
- BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
- BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the __block
- variable */
- BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
- helpers */
- BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
- support routines */
- BLOCK_BYREF_CURRENT_MAX = 256
- };
+ /// The mapping of allocated indexes within the block.
+ llvm::DenseMap<const VarDecl*, Capture> Captures;
+
+ /// CanBeGlobal - True if the block can be global, i.e. it has
+ /// no non-constant captures.
+ bool CanBeGlobal : 1;
- CGBuilderTy &Builder;
+ /// True if the block needs a custom copy or dispose function.
+ bool NeedsCopyDispose : 1;
- BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B);
+ /// HasCXXObject - True if the block's custom copy/dispose functions
+ /// need to be run even in GC mode.
+ bool HasCXXObject : 1;
- /// BlockOffset - The offset in bytes for the next allocation of an
- /// imported block variable.
- CharUnits BlockOffset;
- /// BlockAlign - Maximal alignment needed for the Block expressed in
- /// characters.
+ const llvm::StructType *StructureType;
+ const BlockExpr *Block;
+ CharUnits BlockSize;
CharUnits BlockAlign;
+ llvm::SmallVector<const Expr*, 8> BlockLayout;
+
+ const Capture &getCapture(const VarDecl *var) const {
+ llvm::DenseMap<const VarDecl*, Capture>::const_iterator
+ it = Captures.find(var);
+ assert(it != Captures.end() && "no entry for variable!");
+ return it->second;
+ }
+
+ const BlockDecl *getBlockDecl() const { return Block->getBlockDecl(); }
+ const BlockExpr *getBlockExpr() const { return Block; }
- /// getBlockOffset - Allocate a location within the block's storage
- /// for a value with the given size and alignment requirements.
- CharUnits getBlockOffset(CharUnits Size, CharUnits Align);
-
- /// BlockHasCopyDispose - True iff the block uses copy/dispose.
- bool BlockHasCopyDispose;
-
- /// BlockLayout - The layout of the block's storage, represented as
- /// a sequence of expressions which require such storage. The
- /// expressions can be:
- /// - a BlockDeclRefExpr, indicating that the given declaration
- /// from an enclosing scope is needed by the block;
- /// - a DeclRefExpr, which always wraps an anonymous VarDecl with
- /// array type, used to insert padding into the block; or
- /// - a CXXThisExpr, indicating that the C++ 'this' value should
- /// propagate from the parent to the block.
- /// This is a really silly representation.
- llvm::SmallVector<const Expr *, 8> BlockLayout;
-
- /// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
- llvm::DenseMap<const Decl*, CharUnits> BlockDecls;
-
- /// BlockCXXThisOffset - The offset of the C++ 'this' value within
- /// the block structure.
- CharUnits BlockCXXThisOffset;
-
- ImplicitParamDecl *BlockStructDecl;
- ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
-
- llvm::Constant *GenerateCopyHelperFunction(bool, const llvm::StructType *,
- std::vector<HelperInfo> *);
- llvm::Constant *GenerateDestroyHelperFunction(bool, const llvm::StructType *,
- std::vector<HelperInfo> *);
-
- llvm::Constant *BuildCopyHelper(const llvm::StructType *,
- std::vector<HelperInfo> *);
- llvm::Constant *BuildDestroyHelper(const llvm::StructType *,
- std::vector<HelperInfo> *);
-
- llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag);
- llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int);
-
- llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag,
- unsigned Align);
- llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag,
- unsigned Align);
-
- void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF);
-
- bool BlockRequiresCopying(QualType Ty)
- { return getContext().BlockRequiresCopying(Ty); }
+ CGBlockInfo(const BlockExpr *blockExpr, const char *Name);
};
} // end namespace CodeGen
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
index 986f621..5eeb605 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -30,40 +30,39 @@ static void EmitMemoryBarrier(CodeGenFunction &CGF,
bool LoadLoad, bool LoadStore,
bool StoreLoad, bool StoreStore,
bool Device) {
- Value *True = llvm::ConstantInt::getTrue(CGF.getLLVMContext());
- Value *False = llvm::ConstantInt::getFalse(CGF.getLLVMContext());
+ Value *True = CGF.Builder.getTrue();
+ Value *False = CGF.Builder.getFalse();
Value *C[5] = { LoadLoad ? True : False,
LoadStore ? True : False,
StoreLoad ? True : False,
- StoreStore ? True : False,
+ StoreStore ? True : False,
Device ? True : False };
CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier),
C, C + 5);
}
-static Value *EmitCastToInt(CodeGenFunction &CGF,
- const llvm::Type *ToType, Value *Val) {
- if (Val->getType()->isPointerTy()) {
- return CGF.Builder.CreatePtrToInt(Val, ToType);
- }
- assert(Val->getType()->isIntegerTy() &&
- "Used a non-integer and non-pointer type with atomic builtin");
- assert(Val->getType()->getScalarSizeInBits() <=
- ToType->getScalarSizeInBits() && "Integer type too small");
- return CGF.Builder.CreateSExtOrBitCast(Val, ToType);
+/// Emit the conversions required to turn the given value into an
+/// integer of the given size.
+static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, const llvm::IntegerType *IntType) {
+ V = CGF.EmitToMemory(V, T);
+
+ if (V->getType()->isPointerTy())
+ return CGF.Builder.CreatePtrToInt(V, IntType);
+
+ assert(V->getType() == IntType);
+ return V;
}
-static Value *EmitCastFromInt(CodeGenFunction &CGF, QualType ToQualType,
- Value *Val) {
- const llvm::Type *ToType = CGF.ConvertType(ToQualType);
- if (ToType->isPointerTy()) {
- return CGF.Builder.CreateIntToPtr(Val, ToType);
- }
- assert(Val->getType()->isIntegerTy() &&
- "Used a non-integer and non-pointer type with atomic builtin");
- assert(Val->getType()->getScalarSizeInBits() >=
- ToType->getScalarSizeInBits() && "Integer type too small");
- return CGF.Builder.CreateTruncOrBitCast(Val, ToType);
+static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, const llvm::Type *ResultType) {
+ V = CGF.EmitFromMemory(V, T);
+
+ if (ResultType->isPointerTy())
+ return CGF.Builder.CreateIntToPtr(V, ResultType);
+
+ assert(V->getType() == ResultType);
+ return V;
}
// The atomic builtins are also full memory barriers. This is a utility for
@@ -85,43 +84,69 @@ static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
/// and the expression node.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
Intrinsic::ID Id, const CallExpr *E) {
- const llvm::Type *ValueType =
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ const llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(E->getType()));
- const llvm::Type *PtrType = ValueType->getPointerTo();
- const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
- Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
-
- Value *Args[2] = { CGF.Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
- PtrType),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(1))) };
- return RValue::get(EmitCastFromInt(CGF, E->getType(),
- EmitCallWithBarrier(CGF, AtomF, Args,
- Args + 2)));
+ CGF.getContext().getTypeSize(T));
+ const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+
+ llvm::Value *Args[2];
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ Result = EmitFromInt(CGF, Result, T, ValueType);
+ return RValue::get(Result);
}
/// Utility to insert an atomic instruction based Instrinsic::ID and
-// the expression node, where the return value is the result of the
-// operation.
+/// the expression node, where the return value is the result of the
+/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
Intrinsic::ID Id, const CallExpr *E,
Instruction::BinaryOps Op) {
- const llvm::Type *ValueType =
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ const llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(E->getType()));
- const llvm::Type *PtrType = ValueType->getPointerTo();
- const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
- Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
-
- Value *Args[2] = { CGF.Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
- PtrType),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(1))) };
- Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
- return RValue::get(EmitCastFromInt(CGF, E->getType(),
- CGF.Builder.CreateBinOp(Op, Result,
- Args[1])));
+ CGF.getContext().getTypeSize(T));
+ const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+
+ llvm::Value *Args[2];
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
+ Result = EmitFromInt(CGF, Result, T, ValueType);
+ return RValue::get(Result);
}
/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
@@ -153,10 +178,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Expr::EvalResult Result;
if (E->Evaluate(Result, CGM.getContext())) {
if (Result.Val.isInt())
- return RValue::get(llvm::ConstantInt::get(VMContext,
+ return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
Result.Val.getInt()));
- else if (Result.Val.isFloat())
- return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
+ if (Result.Val.isFloat())
+ return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
+ Result.Val.getFloat()));
}
switch (BuiltinID) {
@@ -168,7 +194,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_va_start:
case Builtin::BI__builtin_va_end: {
Value *ArgValue = EmitVAListRef(E->getArg(0));
- const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
ArgValue = Builder.CreateBitCast(ArgValue, DestType,
ArgValue->getName().data());
@@ -181,7 +207,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *DstPtr = EmitVAListRef(E->getArg(0));
Value *SrcPtr = EmitVAListRef(E->getArg(1));
- const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *Type = Int8PtrTy;
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
@@ -310,7 +336,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
assert(CI);
uint64_t val = CI->getZExtValue();
- CI = ConstantInt::get(llvm::Type::getInt1Ty(VMContext), (val & 0x2) >> 1);
+ CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
return RValue::get(Builder.CreateCall2(F,
@@ -332,11 +358,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_unreachable: {
- if (CatchUndefined && HaveInsertPoint())
+ if (CatchUndefined)
EmitBranch(getTrapBB());
- Value *V = Builder.CreateUnreachable();
- Builder.ClearInsertionPoint();
- return RValue::get(V);
+ else
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("unreachable.cont"));
+
+ return RValue::get(0);
}
case Builtin::BI__builtin_powi:
@@ -495,18 +525,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
- return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
+ return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size, "tmp"));
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()),
- Address,
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
- SizeVal,
- llvm::ConstantInt::get(Int32Ty, 1),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, 1, false);
return RValue::get(Address);
}
case Builtin::BImemcpy:
@@ -514,11 +539,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateCall5(CGM.getMemCpyFn(Address->getType(), SrcAddr->getType(),
- SizeVal->getType()),
- Address, SrcAddr, SizeVal,
- llvm::ConstantInt::get(Int32Ty, 1),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ Builder.CreateMemCpy(Address, SrcAddr, SizeVal, 1, false);
return RValue::get(Address);
}
@@ -536,24 +557,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateCall5(CGM.getMemMoveFn(Address->getType(), SrcAddr->getType(),
- SizeVal->getType()),
- Address, SrcAddr, SizeVal,
- llvm::ConstantInt::get(Int32Ty, 1),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ Builder.CreateMemMove(Address, SrcAddr, SizeVal, 1, false);
return RValue::get(Address);
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()),
- Address,
- Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
- llvm::Type::getInt8Ty(VMContext)),
- SizeVal,
- llvm::ConstantInt::get(Int32Ty, 1),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false);
return RValue::get(Address);
}
case Builtin::BI__builtin_dwarf_cfa: {
@@ -621,9 +634,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
: Intrinsic::eh_return_i64,
0, 0);
Builder.CreateCall2(F, Int, Ptr);
- Value *V = Builder.CreateUnreachable();
- Builder.ClearInsertionPoint();
- return RValue::get(V);
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("builtin_eh_return.cont"));
+
+ return RValue::get(0);
}
case Builtin::BI__builtin_unwind_init: {
Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
@@ -640,11 +656,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
//
// See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
- LLVMContext &C = CGM.getLLVMContext();
-
// Cast the pointer to intptr_t.
Value *Ptr = EmitScalarExpr(E->getArg(0));
- const llvm::IntegerType *IntPtrTy = CGM.getTargetData().getIntPtrType(C);
Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
// If that's 64 bits, we're done.
@@ -676,20 +689,23 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Call LLVM's EH setjmp, which is lightweight.
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
- Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
return RValue::get(Builder.CreateCall(F, Buf));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
- Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
// Call LLVM's EH longjmp, which is lightweight.
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
- // longjmp doesn't return; mark this as unreachable
- Value *V = Builder.CreateUnreachable();
- Builder.ClearInsertionPoint();
- return RValue::get(V);
+ // longjmp doesn't return; mark this as unreachable.
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("longjmp.cont"));
+
+ return RValue::get(0);
}
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
@@ -788,23 +804,29 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_val_compare_and_swap_4:
case Builtin::BI__sync_val_compare_and_swap_8:
case Builtin::BI__sync_val_compare_and_swap_16: {
- const llvm::Type *ValueType =
- llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(E->getType()));
- const llvm::Type *PtrType = ValueType->getPointerTo();
- const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ QualType T = E->getType();
+ llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ const llvm::IntegerType *IntType =
+ llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(T));
+ const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
IntrinsicTypes, 2);
- Value *Args[3] = { Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
- PtrType),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(1))),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(2))) };
- return RValue::get(EmitCastFromInt(CGF, E->getType(),
- EmitCallWithBarrier(CGF, AtomF, Args,
- Args + 3)));
+ Value *Args[3];
+ Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(*this, Args[1], T, IntType);
+ Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
+
+ Value *Result = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Result = EmitFromInt(*this, Result, T, ValueType);
+ return RValue::get(Result);
}
case Builtin::BI__sync_bool_compare_and_swap_1:
@@ -812,26 +834,30 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_bool_compare_and_swap_4:
case Builtin::BI__sync_bool_compare_and_swap_8:
case Builtin::BI__sync_bool_compare_and_swap_16: {
- const llvm::Type *ValueType =
- llvm::IntegerType::get(
- CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(E->getArg(1)->getType()));
- const llvm::Type *PtrType = ValueType->getPointerTo();
- const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ QualType T = E->getArg(1)->getType();
+ llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ const llvm::IntegerType *IntType =
+ llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(T));
+ const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
IntrinsicTypes, 2);
- Value *Args[3] = { Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
- PtrType),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(1))),
- EmitCastToInt(CGF, ValueType,
- CGF.EmitScalarExpr(E->getArg(2))) };
+ Value *Args[3];
+ Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
+ Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
+
Value *OldVal = Args[1];
Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
// zext bool to int.
- return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
+ return RValue::get(Result);
}
case Builtin::BI__sync_lock_test_and_set_1:
@@ -935,11 +961,30 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (IntrinsicID != Intrinsic::not_intrinsic) {
SmallVector<Value*, 16> Args;
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
Function *F = CGM.getIntrinsic(IntrinsicID);
const llvm::FunctionType *FTy = F->getFunctionType();
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
- Value *ArgValue = EmitScalarExpr(E->getArg(i));
+ Value *ArgValue;
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ ArgValue = EmitScalarExpr(E->getArg(i));
+ } else {
+ // If this is required to be a constant, constant fold it so that we
+ // know that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
+ assert(IsConst && "Constant arg isn't actually constant?");
+ (void)IsConst;
+ ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
+ }
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
@@ -956,7 +1001,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
QualType BuiltinRetType = E->getType();
- const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
+ const llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
@@ -997,7 +1042,8 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
}
}
-const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
+static const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type,
+ bool q) {
switch (type) {
default: break;
case 0:
@@ -1012,17 +1058,15 @@ const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
return 0;
}
-Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C, bool widen) {
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
- if (widen)
- nElts <<= 1;
SmallVector<Constant*, 16> Indices(nElts, C);
- Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(V, V, SV, "lane");
}
Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
- const char *name, bool splat,
+ const char *name,
unsigned shift, bool rightshift) {
unsigned j = 0;
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
@@ -1032,10 +1076,6 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
- if (splat) {
- Ops[j-1] = EmitNeonSplat(Ops[j-1], cast<Constant>(Ops[j]));
- Ops.resize(j);
- }
return Builder.CreateCall(F, Ops.begin(), Ops.end(), name);
}
@@ -1047,7 +1087,7 @@ Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
- return llvm::ConstantVector::get(CV.begin(), CV.size());
+ return llvm::ConstantVector::get(CV);
}
/// GetPointeeAlignment - Given an expression with a pointer type, find the
@@ -1099,9 +1139,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Determine the overloaded type of this builtin.
const llvm::Type *Ty;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
- Ty = llvm::Type::getFloatTy(VMContext);
+ Ty = llvm::Type::getFloatTy(getLLVMContext());
else
- Ty = llvm::Type::getDoubleTy(VMContext);
+ Ty = llvm::Type::getDoubleTy(getLLVMContext());
// Determine whether this is an unsigned conversion or not.
bool usgn = Result.getZExtValue() == 1;
@@ -1117,9 +1157,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
bool usgn = type & 0x08;
bool quad = type & 0x10;
bool poly = (type & 0x7) == 5 || (type & 0x7) == 6;
- bool splat = false;
+ (void)poly; // Only used in assert()s.
+ bool rightShift = false;
- const llvm::VectorType *VTy = GetNeonType(VMContext, type & 0x7, quad);
+ const llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad);
const llvm::Type *Ty = VTy;
if (!Ty)
return 0;
@@ -1127,37 +1168,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned Int;
switch (BuiltinID) {
default: return 0;
- case ARM::BI__builtin_neon_vaba_v:
- case ARM::BI__builtin_neon_vabaq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- SmallVector<Value*, 2> Args;
- Args.push_back(Ops[1]);
- Args.push_back(Ops[2]);
- Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
- Ops[1] = EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Args, "vaba");
- return Builder.CreateAdd(Ops[0], Ops[1], "vaba");
- }
- case ARM::BI__builtin_neon_vabal_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- SmallVector<Value*, 2> Args;
- Args.push_back(Ops[1]);
- Args.push_back(Ops[2]);
- Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[1] = EmitNeonCall(CGM.getIntrinsic(Int, &DTy, 1), Args, "vabal");
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- return Builder.CreateAdd(Ops[0], Ops[1], "vabal");
- }
case ARM::BI__builtin_neon_vabd_v:
case ARM::BI__builtin_neon_vabdq_v:
Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd");
- case ARM::BI__builtin_neon_vabdl_v: {
- Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, &DTy, 1), Ops, "vabdl");
- return Builder.CreateZExt(Ops[0], Ty, "vabdl");
- }
case ARM::BI__builtin_neon_vabs_v:
case ARM::BI__builtin_neon_vabsq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1),
@@ -1165,51 +1179,28 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vaddhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1),
Ops, "vaddhn");
- case ARM::BI__builtin_neon_vaddl_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn) {
- Ops[0] = Builder.CreateZExt(Ops[0], Ty);
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- } else {
- Ops[0] = Builder.CreateSExt(Ops[0], Ty);
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- }
- return Builder.CreateAdd(Ops[0], Ops[1], "vaddl");
- }
- case ARM::BI__builtin_neon_vaddw_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn)
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- else
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- return Builder.CreateAdd(Ops[0], Ops[1], "vaddw");
- }
case ARM::BI__builtin_neon_vcale_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcage_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged);
return EmitNeonCall(F, Ops, "vcage");
}
case ARM::BI__builtin_neon_vcaleq_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcageq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
return EmitNeonCall(F, Ops, "vcage");
}
case ARM::BI__builtin_neon_vcalt_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcagt_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd);
return EmitNeonCall(F, Ops, "vcagt");
}
case ARM::BI__builtin_neon_vcaltq_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcagtq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
return EmitNeonCall(F, Ops, "vcagt");
}
case ARM::BI__builtin_neon_vcls_v:
@@ -1227,11 +1218,20 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1);
return EmitNeonCall(F, Ops, "vcnt");
}
- // FIXME: intrinsics for f16<->f32 convert missing from ARM target.
+ case ARM::BI__builtin_neon_vcvt_f16_v: {
+ assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f16_v builtin");
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
+ return EmitNeonCall(F, Ops, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_f32_f16: {
+ assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f32_f16 builtin");
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
+ return EmitNeonCall(F, Ops, "vcvt");
+ }
case ARM::BI__builtin_neon_vcvt_f32_v:
case ARM::BI__builtin_neon_vcvtq_f32_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(VMContext, 4, quad);
+ Ty = GetNeonType(getLLVMContext(), 4, quad);
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
}
@@ -1239,13 +1239,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vcvt_u32_v:
case ARM::BI__builtin_neon_vcvtq_s32_v:
case ARM::BI__builtin_neon_vcvtq_u32_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(VMContext, 4, quad));
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(getLLVMContext(), 4, quad));
return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_n_f32_v:
case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
- const llvm::Type *Tys[2] = { GetNeonType(VMContext, 4, quad), Ty };
+ const llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty };
Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp;
Function *F = CGM.getIntrinsic(Int, Tys, 2);
return EmitNeonCall(F, Ops, "vcvt_n");
@@ -1254,28 +1254,21 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vcvt_n_u32_v:
case ARM::BI__builtin_neon_vcvtq_n_s32_v:
case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
- const llvm::Type *Tys[2] = { Ty, GetNeonType(VMContext, 4, quad) };
+ const llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) };
Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs;
Function *F = CGM.getIntrinsic(Int, Tys, 2);
return EmitNeonCall(F, Ops, "vcvt_n");
}
- case ARM::BI__builtin_neon_vdup_lane_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return EmitNeonSplat(Ops[0], cast<Constant>(Ops[1]));
- case ARM::BI__builtin_neon_vdupq_lane_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return EmitNeonSplat(Ops[0], cast<Constant>(Ops[1]), true);
case ARM::BI__builtin_neon_vext_v:
case ARM::BI__builtin_neon_vextq_v: {
- ConstantInt *C = dyn_cast<ConstantInt>(Ops[2]);
- int CV = C->getSExtValue();
+ int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ Value *SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
}
case ARM::BI__builtin_neon_vget_lane_i8:
@@ -1386,6 +1379,27 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld2_dup_v:
case ARM::BI__builtin_neon_vld3_dup_v:
case ARM::BI__builtin_neon_vld4_dup_v: {
+ // Handle 64-bit elements as a special-case. There is no "dup" needed.
+ if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ default: assert(0 && "unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
switch (BuiltinID) {
case ARM::BI__builtin_neon_vld2_dup_v:
Int = Intrinsic::arm_neon_vld2lane;
@@ -1430,46 +1444,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vminq_v:
Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin");
- case ARM::BI__builtin_neon_vmlal_lane_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
- Ops[2] = EmitNeonSplat(Ops[2], cast<Constant>(Ops[3]));
- }
- case ARM::BI__builtin_neon_vmlal_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
- if (usgn) {
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- Ops[2] = Builder.CreateZExt(Ops[2], Ty);
- } else {
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- Ops[2] = Builder.CreateSExt(Ops[2], Ty);
- }
- Ops[1] = Builder.CreateMul(Ops[1], Ops[2]);
- return Builder.CreateAdd(Ops[0], Ops[1], "vmlal");
- }
- case ARM::BI__builtin_neon_vmlsl_lane_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
- Ops[2] = EmitNeonSplat(Ops[2], cast<Constant>(Ops[3]));
- }
- case ARM::BI__builtin_neon_vmlsl_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
- if (usgn) {
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- Ops[2] = Builder.CreateZExt(Ops[2], Ty);
- } else {
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- Ops[2] = Builder.CreateSExt(Ops[2], Ty);
- }
- Ops[1] = Builder.CreateMul(Ops[1], Ops[2]);
- return Builder.CreateSub(Ops[0], Ops[1], "vmlsl");
- }
case ARM::BI__builtin_neon_vmovl_v: {
const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
@@ -1482,38 +1456,41 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
}
- case ARM::BI__builtin_neon_vmull_lane_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- Ops[1] = EmitNeonSplat(Ops[1], cast<Constant>(Ops[2]));
- }
- case ARM::BI__builtin_neon_vmull_v: {
- if (poly)
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmullp, &Ty, 1),
- Ops, "vmull");
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn) {
- Ops[0] = Builder.CreateZExt(Ops[0], Ty);
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- } else {
- Ops[0] = Builder.CreateSExt(Ops[0], Ty);
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- }
- return Builder.CreateMul(Ops[0], Ops[1], "vmull");
- }
+ case ARM::BI__builtin_neon_vmul_v:
+ case ARM::BI__builtin_neon_vmulq_v:
+ assert(poly && "vmul builtin only supported for polynomial types");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, &Ty, 1),
+ Ops, "vmul");
+ case ARM::BI__builtin_neon_vmull_v:
+ assert(poly && "vmull builtin only supported for polynomial types");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmullp, &Ty, 1),
+ Ops, "vmull");
case ARM::BI__builtin_neon_vpadal_v:
- case ARM::BI__builtin_neon_vpadalq_v:
+ case ARM::BI__builtin_neon_vpadalq_v: {
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpadal");
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ const llvm::Type *EltTy =
+ llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ const llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ const llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpadal");
+ }
case ARM::BI__builtin_neon_vpadd_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1),
Ops, "vpadd");
case ARM::BI__builtin_neon_vpaddl_v:
- case ARM::BI__builtin_neon_vpaddlq_v:
+ case ARM::BI__builtin_neon_vpaddlq_v: {
Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpaddl");
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ const llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ const llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpaddl");
+ }
case ARM::BI__builtin_neon_vpmax_v:
Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax");
@@ -1528,28 +1505,19 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vqaddq_v:
Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd");
- case ARM::BI__builtin_neon_vqdmlal_lane_v:
- splat = true;
case ARM::BI__builtin_neon_vqdmlal_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1),
- Ops, "vqdmlal", splat);
- case ARM::BI__builtin_neon_vqdmlsl_lane_v:
- splat = true;
+ Ops, "vqdmlal");
case ARM::BI__builtin_neon_vqdmlsl_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1),
- Ops, "vqdmlsl", splat);
- case ARM::BI__builtin_neon_vqdmulh_lane_v:
- case ARM::BI__builtin_neon_vqdmulhq_lane_v:
- splat = true;
+ Ops, "vqdmlsl");
case ARM::BI__builtin_neon_vqdmulh_v:
case ARM::BI__builtin_neon_vqdmulhq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1),
- Ops, "vqdmulh", splat);
- case ARM::BI__builtin_neon_vqdmull_lane_v:
- splat = true;
+ Ops, "vqdmulh");
case ARM::BI__builtin_neon_vqdmull_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1),
- Ops, "vqdmull", splat);
+ Ops, "vqdmull");
case ARM::BI__builtin_neon_vqmovn_v:
Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn");
@@ -1557,26 +1525,24 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1),
Ops, "vqdmull");
case ARM::BI__builtin_neon_vqneg_v:
+ case ARM::BI__builtin_neon_vqnegq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1),
Ops, "vqneg");
- case ARM::BI__builtin_neon_vqrdmulh_lane_v:
- case ARM::BI__builtin_neon_vqrdmulhq_lane_v:
- splat = true;
case ARM::BI__builtin_neon_vqrdmulh_v:
case ARM::BI__builtin_neon_vqrdmulhq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1),
- Ops, "vqrdmulh", splat);
+ Ops, "vqrdmulh");
case ARM::BI__builtin_neon_vqrshl_v:
case ARM::BI__builtin_neon_vqrshlq_v:
Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl");
case ARM::BI__builtin_neon_vqrshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n", false,
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n",
1, true);
case ARM::BI__builtin_neon_vqrshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1),
- Ops, "vqrshrun_n", false, 1, true);
+ Ops, "vqrshrun_n", 1, true);
case ARM::BI__builtin_neon_vqshl_v:
case ARM::BI__builtin_neon_vqshlq_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
@@ -1584,7 +1550,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vqshl_n_v:
case ARM::BI__builtin_neon_vqshlq_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n", false,
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n",
1, false);
case ARM::BI__builtin_neon_vqshlu_n_v:
case ARM::BI__builtin_neon_vqshluq_n_v:
@@ -1592,11 +1558,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops, "vqshlu", 1, false);
case ARM::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n", false,
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n",
1, true);
case ARM::BI__builtin_neon_vqshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1),
- Ops, "vqshrun_n", false, 1, true);
+ Ops, "vqshrun_n", 1, true);
case ARM::BI__builtin_neon_vqsub_v:
case ARM::BI__builtin_neon_vqsubq_v:
Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
@@ -1622,12 +1588,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl");
case ARM::BI__builtin_neon_vrshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1),
- Ops, "vrshrn_n", false, 1, true);
+ Ops, "vrshrn_n", 1, true);
case ARM::BI__builtin_neon_vrshr_n_v:
case ARM::BI__builtin_neon_vrshrq_n_v:
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", false,
- 1, true);
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", 1, true);
case ARM::BI__builtin_neon_vrsqrte_v:
case ARM::BI__builtin_neon_vrsqrteq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1),
@@ -1665,14 +1630,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl");
case ARM::BI__builtin_neon_vshll_n_v:
Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", false, 1);
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", 1);
case ARM::BI__builtin_neon_vshl_n_v:
case ARM::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
case ARM::BI__builtin_neon_vshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1),
- Ops, "vshrn_n", false, 1, true);
+ Ops, "vshrn_n", 1, true);
case ARM::BI__builtin_neon_vshr_n_v:
case ARM::BI__builtin_neon_vshrq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1683,10 +1648,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
case ARM::BI__builtin_neon_vsri_n_v:
case ARM::BI__builtin_neon_vsriq_n_v:
- poly = true;
+ rightShift = true;
case ARM::BI__builtin_neon_vsli_n_v:
case ARM::BI__builtin_neon_vsliq_n_v:
- Ops[2] = EmitNeonShiftVector(Ops[2], Ty, poly);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1),
Ops, "vsli_n");
case ARM::BI__builtin_neon_vsra_n_v:
@@ -1743,29 +1708,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vsubhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1),
Ops, "vsubhn");
- case ARM::BI__builtin_neon_vsubl_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn) {
- Ops[0] = Builder.CreateZExt(Ops[0], Ty);
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- } else {
- Ops[0] = Builder.CreateSExt(Ops[0], Ty);
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- }
- return Builder.CreateSub(Ops[0], Ops[1], "vsubl");
- }
- case ARM::BI__builtin_neon_vsubw_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
- if (usgn)
- Ops[1] = Builder.CreateZExt(Ops[1], Ty);
- else
- Ops[1] = Builder.CreateSExt(Ops[1], Ty);
- return Builder.CreateSub(Ops[0], Ops[1], "vsubw");
- }
case ARM::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
Ops, "vtbl1");
@@ -1804,7 +1746,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV;
+ Value *SV = 0;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -1813,7 +1755,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
- SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
SV = Builder.CreateStore(SV, Addr);
}
@@ -1824,7 +1766,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV;
+ Value *SV = 0;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -1832,7 +1774,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
- SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
SV = Builder.CreateStore(SV, Addr);
}
@@ -1843,7 +1785,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV;
+ Value *SV = 0;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -1852,7 +1794,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
- SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
SV = Builder.CreateStore(SV, Addr);
}
@@ -1861,13 +1803,57 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
}
+llvm::Value *CodeGenFunction::
+BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops) {
+ assert((Ops.size() & (Ops.size() - 1)) == 0 &&
+ "Not a power-of-two sized vector!");
+ bool AllConstants = true;
+ for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
+ AllConstants &= isa<Constant>(Ops[i]);
+
+ // If this is a constant vector, create a ConstantVector.
+ if (AllConstants) {
+ std::vector<llvm::Constant*> CstOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ CstOps.push_back(cast<Constant>(Ops[i]));
+ return llvm::ConstantVector::get(CstOps);
+ }
+
+ // Otherwise, insertelement the values to build the vector.
+ Value *Result =
+ llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
+
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ Result = Builder.CreateInsertElement(Result, Ops[i],
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), i));
+
+ return Result;
+}
+
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
-
llvm::SmallVector<Value*, 4> Ops;
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ continue;
+ }
+
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
+ assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
switch (BuiltinID) {
default: return 0;
@@ -1926,6 +1912,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
}
+ case X86::BI__builtin_ia32_vec_init_v8qi:
+ case X86::BI__builtin_ia32_vec_init_v4hi:
+ case X86::BI__builtin_ia32_vec_init_v2si:
+ return Builder.CreateBitCast(BuildVector(Ops),
+ llvm::Type::getX86_MMXTy(getLLVMContext()));
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ return Builder.CreateExtractElement(Ops[0],
+ llvm::ConstantInt::get(Ops[1]->getType(), 0));
case X86::BI__builtin_ia32_pslldi:
case X86::BI__builtin_ia32_psllqi:
case X86::BI__builtin_ia32_psllwi:
@@ -1987,7 +1981,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
Builder.CreateStore(Ops[0], Tmp);
@@ -1995,7 +1989,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Builder.CreateBitCast(Tmp, PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
@@ -2037,7 +2031,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i != 8; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
- Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
@@ -2068,7 +2062,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i != 16; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
- Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
@@ -2112,7 +2106,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_lvsl:
case PPC::BI__builtin_altivec_lvsr:
{
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::Type::getInt8PtrTy(VMContext));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp");
Ops.pop_back();
@@ -2152,7 +2146,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvehx:
case PPC::BI__builtin_altivec_stvewx:
{
- Ops[2] = Builder.CreateBitCast(Ops[2], llvm::Type::getInt8PtrTy(VMContext));
+ Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp");
Ops.pop_back();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
index 179716f..7ffc6e7 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
@@ -1,4 +1,4 @@
-//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,12 +16,12 @@
#include "CGCXXABI.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
@@ -60,13 +60,12 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
return true;
}
- // If any fields have a non-trivial destructor, we have to emit it
- // separately.
+ // If any field has a non-trivial destructor, we have to emit the
+ // destructor separately.
for (CXXRecordDecl::field_iterator I = Class->field_begin(),
E = Class->field_end(); I != E; ++I)
- if (const RecordType *RT = (*I)->getType()->getAs<RecordType>())
- if (!cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor())
- return true;
+ if ((*I)->getType().isDestructedType())
+ return true;
// Try to find a unique base class with a non-trivial destructor.
const CXXRecordDecl *UniqueBase = 0;
@@ -103,7 +102,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// If the base is at a non-zero offset, give up.
const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
- if (ClassLayout.getBaseClassOffset(UniqueBase) != 0)
+ if (ClassLayout.getBaseClassOffsetInBits(UniqueBase) != 0)
return true;
return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
@@ -180,7 +179,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
}
// Finally, set up the alias with its proper name and attributes.
- SetCommonAttributes(AliasDecl.getDecl(), Alias);
+ SetCommonAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
return false;
}
@@ -227,7 +226,8 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type),
FPT->isVariadic());
- return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD,
+ /*ForVTable=*/false));
}
void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
@@ -284,16 +284,15 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false);
- return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD,
+ /*ForVTable=*/false));
}
static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
llvm::Value *This, const llvm::Type *Ty) {
- Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo();
-
- llvm::Value *VTable = CGF.Builder.CreateBitCast(This, Ty);
- VTable = CGF.Builder.CreateLoad(VTable);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ llvm::Value *VTable = CGF.GetVTablePtr(This, Ty);
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
return CGF.Builder.CreateLoad(VFuncPtr);
@@ -308,164 +307,84 @@ CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
+/// BuildVirtualCall - This routine is to support gcc's kext ABI making
+/// indirect call to virtual functions. It makes the call through indexing
+/// into the vtable.
llvm::Value *
-CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
- llvm::Value *&This, const llvm::Type *Ty) {
- DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
- uint64_t VTableIndex =
- CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
-
- return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
-}
-
-/// Implementation for CGCXXABI. Possibly this should be moved into
-/// the incomplete ABI implementations?
-
-CGCXXABI::~CGCXXABI() {}
-
-static void ErrorUnsupportedABI(CodeGenFunction &CGF,
- llvm::StringRef S) {
- Diagnostic &Diags = CGF.CGM.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
- "cannot yet compile %1 in this ABI");
- Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
- DiagID)
- << S;
-}
-
-static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
- QualType T) {
- return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
-}
-
-const llvm::Type *
-CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
- return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
-}
-
-llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
- llvm::Value *&This,
- llvm::Value *MemPtr,
- const MemberPointerType *MPT) {
- ErrorUnsupportedABI(CGF, "calls through member pointers");
-
- const FunctionProtoType *FPT =
- MPT->getPointeeType()->getAs<FunctionProtoType>();
- const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- const llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
- FPT->isVariadic());
- return llvm::Constant::getNullValue(FTy->getPointerTo());
-}
-
-llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
- llvm::Value *Base,
- llvm::Value *MemPtr,
- const MemberPointerType *MPT) {
- ErrorUnsupportedABI(CGF, "loads of member pointers");
- const llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
- return llvm::Constant::getNullValue(Ty);
-}
-
-llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
- const CastExpr *E,
- llvm::Value *Src) {
- ErrorUnsupportedABI(CGF, "member function pointer conversions");
- return GetBogusMemberPointer(CGM, E->getType());
+CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ const llvm::Type *Ty) {
+ llvm::Value *VTable = 0;
+ assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
+ "BuildAppleKextVirtualCall - bad Qual kind");
+
+ const Type *QTy = Qual->getAsType();
+ QualType T = QualType(QTy, 0);
+ const RecordType *RT = T->getAs<RecordType>();
+ assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD))
+ return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
+
+ VTable = CGM.getVTables().GetAddrOfVTable(RD);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ VTable = Builder.CreateBitCast(VTable, Ty);
+ assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
+ MD = MD->getCanonicalDecl();
+ uint64_t VTableIndex = CGM.getVTables().getMethodVTableIndex(MD);
+ uint64_t AddressPoint =
+ CGM.getVTables().getAddressPoint(BaseSubobject(RD, 0), RD);
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ return Builder.CreateLoad(VFuncPtr);
}
+/// BuildVirtualCall - This routine makes indirect vtable call for
+/// call to virtual destructors. It returns 0 if it could not do it.
llvm::Value *
-CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
- llvm::Value *L,
- llvm::Value *R,
- const MemberPointerType *MPT,
- bool Inequality) {
- ErrorUnsupportedABI(CGF, "member function pointer comparison");
- return CGF.Builder.getFalse();
+CodeGenFunction::BuildAppleKextVirtualDestructorCall(
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD) {
+ llvm::Value * Callee = 0;
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(DD);
+ // FIXME. Dtor_Base dtor is always direct!!
+ // It need be somehow inline expanded into the caller.
+ // -O does that. But need to support -O0 as well.
+ if (MD->isVirtual() && Type != Dtor_Base) {
+ // Compute the function type we're calling.
+ const CGFunctionInfo *FInfo =
+ &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty
+ = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
+
+ llvm::Value *VTable = CGM.getVTables().GetAddrOfVTable(RD);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ VTable = Builder.CreateBitCast(VTable, Ty);
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
+ uint64_t AddressPoint =
+ CGM.getVTables().getAddressPoint(BaseSubobject(RD, 0), RD);
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ Callee = Builder.CreateLoad(VFuncPtr);
+ }
+ return Callee;
}
llvm::Value *
-CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
- llvm::Value *MemPtr,
- const MemberPointerType *MPT) {
- ErrorUnsupportedABI(CGF, "member function pointer null testing");
- return CGF.Builder.getFalse();
-}
-
-llvm::Constant *
-CGCXXABI::EmitMemberPointerConversion(llvm::Constant *C, const CastExpr *E) {
- return GetBogusMemberPointer(CGM, E->getType());
-}
-
-llvm::Constant *
-CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
- return GetBogusMemberPointer(CGM, QualType(MPT, 0));
-}
-
-llvm::Constant *CGCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
- return GetBogusMemberPointer(CGM,
- CGM.getContext().getMemberPointerType(MD->getType(),
- MD->getParent()->getTypeForDecl()));
-}
-
-llvm::Constant *CGCXXABI::EmitMemberPointer(const FieldDecl *FD) {
- return GetBogusMemberPointer(CGM,
- CGM.getContext().getMemberPointerType(FD->getType(),
- FD->getParent()->getTypeForDecl()));
-}
-
-bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
- // Fake answer.
- return true;
-}
-
-void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
-
- // FIXME: I'm not entirely sure I like using a fake decl just for code
- // generation. Maybe we can come up with a better way?
- ImplicitParamDecl *ThisDecl
- = ImplicitParamDecl::Create(CGM.getContext(), 0, MD->getLocation(),
- &CGM.getContext().Idents.get("this"),
- MD->getThisType(CGM.getContext()));
- Params.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
- getThisDecl(CGF) = ThisDecl;
-}
-
-void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
- /// Initialize the 'this' slot.
- assert(getThisDecl(CGF) && "no 'this' variable for function");
- getThisValue(CGF)
- = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
- "this");
-}
-
-void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
- RValue RV, QualType ResultType) {
- CGF.EmitReturnOfRValue(RV, ResultType);
-}
-
-CharUnits CGCXXABI::GetArrayCookieSize(QualType ElementType) {
- return CharUnits::Zero();
-}
+CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *This, const llvm::Type *Ty) {
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
-llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- QualType ElementType) {
- // Should never be called.
- ErrorUnsupportedABI(CGF, "array cookie initialization");
- return 0;
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
-void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- QualType ElementType, llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize) {
- ErrorUnsupportedABI(CGF, "array cookie reading");
-
- // This should be enough to avoid assertions.
- NumElements = 0;
- AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
- CookieSize = CharUnits::Zero();
-}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h
deleted file mode 100644
index 1e6adb0..0000000
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h
+++ /dev/null
@@ -1,36 +0,0 @@
-//===----- CGCXX.h - C++ related code CodeGen declarations ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// These classes wrap the information about a call or function
-// definition used to handle ABI compliancy.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_CODEGEN_CGCXX_H
-#define CLANG_CODEGEN_CGCXX_H
-
-namespace clang {
-
-/// CXXCtorType - C++ constructor types
-enum CXXCtorType {
- Ctor_Complete, // Complete object ctor
- Ctor_Base, // Base object ctor
- Ctor_CompleteAllocating // Complete object allocating ctor
-};
-
-/// CXXDtorType - C++ destructor types
-enum CXXDtorType {
- Dtor_Deleting, // Deleting dtor
- Dtor_Complete, // Complete object dtor
- Dtor_Base // Base object dtor
-};
-
-} // end namespace clang
-
-#endif // CLANG_CODEGEN_CGCXX_H
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
new file mode 100644
index 0000000..8373b66
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
@@ -0,0 +1,174 @@
+//===----- CGCXXABI.cpp - Interface to C++ ABIs -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGCXXABI::~CGCXXABI() { }
+
+static void ErrorUnsupportedABI(CodeGenFunction &CGF,
+ llvm::StringRef S) {
+ Diagnostic &Diags = CGF.CGM.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot yet compile %1 in this ABI");
+ Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
+ DiagID)
+ << S;
+}
+
+static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
+ QualType T) {
+ return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
+}
+
+const llvm::Type *
+CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+}
+
+llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "calls through member pointers");
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+ const llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+ FPT->isVariadic());
+ return llvm::Constant::getNullValue(FTy->getPointerTo());
+}
+
+llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "loads of member pointers");
+ const llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ return llvm::Constant::getNullValue(Ty);
+}
+
+llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ ErrorUnsupportedABI(CGF, "member function pointer conversions");
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ ErrorUnsupportedABI(CGF, "member function pointer comparison");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "member function pointer null testing");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Constant *
+CGCXXABI::EmitMemberPointerConversion(llvm::Constant *C, const CastExpr *E) {
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Constant *
+CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return GetBogusMemberPointer(CGM,
+ CGM.getContext().getMemberPointerType(MD->getType(),
+ MD->getParent()->getTypeForDecl()));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ // Fake answer.
+ return true;
+}
+
+void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ ImplicitParamDecl *ThisDecl
+ = ImplicitParamDecl::Create(CGM.getContext(), 0, MD->getLocation(),
+ &CGM.getContext().Idents.get("this"),
+ MD->getThisType(CGM.getContext()));
+ Params.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
+ getThisDecl(CGF) = ThisDecl;
+}
+
+void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ assert(getThisDecl(CGF) && "no 'this' variable for function");
+ getThisValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
+ "this");
+}
+
+void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ CGF.EmitReturnOfRValue(RV, ResultType);
+}
+
+CharUnits CGCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ return CharUnits::Zero();
+}
+
+llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ // Should never be called.
+ ErrorUnsupportedABI(CGF, "array cookie initialization");
+ return 0;
+}
+
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr, QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ ErrorUnsupportedABI(CGF, "array cookie reading");
+
+ // This should be enough to avoid assertions.
+ NumElements = 0;
+ AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
+ CookieSize = CharUnits::Zero();
+}
+
+void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ ErrorUnsupportedABI(CGF, "static local variable initialization");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
index 367e345..de4df3d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -32,18 +32,20 @@ namespace clang {
class CXXMethodDecl;
class CXXRecordDecl;
class FieldDecl;
+ class MangleContext;
namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
- class MangleContext;
/// Implements C++ ABI-specific code generation functions.
class CGCXXABI {
protected:
CodeGenModule &CGM;
+ llvm::OwningPtr<MangleContext> MangleCtx;
- CGCXXABI(CodeGenModule &CGM) : CGM(CGM) {}
+ CGCXXABI(CodeGenModule &CGM)
+ : CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
protected:
ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
@@ -74,7 +76,9 @@ public:
virtual ~CGCXXABI();
/// Gets the mangle context.
- virtual MangleContext &getMangleContext() = 0;
+ MangleContext &getMangleContext() {
+ return *MangleCtx;
+ }
/// Find the LLVM type used to represent the given member pointer
/// type.
@@ -118,7 +122,8 @@ public:
virtual llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
/// Create a member pointer for the given field.
- virtual llvm::Constant *EmitMemberPointer(const FieldDecl *FD);
+ virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
/// Emit a comparison between two member pointers. Returns an i1.
virtual llvm::Value *
@@ -185,7 +190,7 @@ public:
///
/// \param ElementType - the allocated type of the expression,
/// i.e. the pointee type of the expression result type
- virtual CharUnits GetArrayCookieSize(QualType ElementType);
+ virtual CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
/// Initialize the array cookie for the given allocation.
///
@@ -198,6 +203,7 @@ public:
virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType);
/// Reads the array cookie associated with the given pointer,
@@ -214,9 +220,21 @@ public:
/// \param CookieSize - an out parameter which will be initialized
/// with the size of the cookie, or zero if there is no cookie
virtual void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
+ /*************************** Static local guards ****************************/
+
+ /// Emits the guarded initializer and destructor setup for the given
+ /// variable, given that it couldn't be emitted as a constant.
+ ///
+ /// The variable may be:
+ /// - a static local variable
+ /// - a static data member of a class template instantiation
+ virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr);
+
};
/// Creates an instance of a C++ ABI class.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
index 475dfa5..ae84b61 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -131,7 +131,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
CXXCtorType Type) {
llvm::SmallVector<CanQualType, 16> ArgTys;
ArgTys.push_back(GetThisType(Context, D->getParent()));
@@ -170,7 +170,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
assert(isa<FunctionType>(FTy));
if (isa<FunctionNoProtoType>(FTy))
- return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
+ return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
assert(isa<FunctionProtoType>(FTy));
return getFunctionInfo(FTy.getAs<FunctionProtoType>());
}
@@ -195,13 +195,13 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
// FIXME: Do we need to handle ObjCMethodDecl?
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
-
+
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
return getFunctionInfo(CD, GD.getCtorType());
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
return getFunctionInfo(DD, GD.getDtorType());
-
+
return getFunctionInfo(FD);
}
@@ -256,14 +256,14 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// Compute ABI information.
getABIInfo().computeInfo(*FI);
-
+
// Loop over all of the computed argument and return value info. If any of
// them are direct or extend without a specified coerce type, specify the
// default now.
ABIArgInfo &RetInfo = FI->getReturnInfo();
if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
-
+
for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
I != E; ++I)
if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
@@ -274,7 +274,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// we *just* filled in the FunctionInfo for.
if (!IsRecursive && !PointersToResolve.empty())
HandleLateResolvedPointers();
-
+
return *FI;
}
@@ -288,7 +288,7 @@ CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
NoReturn(_NoReturn), RegParm(_RegParm)
{
NumArgs = NumArgTys;
-
+
// FIXME: Coallocate with the CGFunctionInfo object.
Args = new ArgInfo[1 + NumArgTys];
Args[0].type = ResTy;
@@ -386,20 +386,20 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
uint64_t DstSize, CodeGenFunction &CGF) {
// We can't dive into a zero-element struct.
if (SrcSTy->getNumElements() == 0) return SrcPtr;
-
+
const llvm::Type *FirstElt = SrcSTy->getElementType(0);
-
+
// If the first elt is at least as large as what we're looking for, or if the
// first element is the same size as the whole struct, we can enter it.
- uint64_t FirstEltSize =
+ uint64_t FirstEltSize =
CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
- if (FirstEltSize < DstSize &&
+ if (FirstEltSize < DstSize &&
FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
return SrcPtr;
-
+
// GEP into the first element.
SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
-
+
// If the first element is a struct, recurse.
const llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
@@ -417,23 +417,23 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
CodeGenFunction &CGF) {
if (Val->getType() == Ty)
return Val;
-
+
if (isa<llvm::PointerType>(Val->getType())) {
// If this is Pointer->Pointer avoid conversion to and from int.
if (isa<llvm::PointerType>(Ty))
return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
-
+
// Convert the pointer to an integer so we can play with its width.
Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
}
-
+
const llvm::Type *DestIntTy = Ty;
if (isa<llvm::PointerType>(DestIntTy))
DestIntTy = CGF.IntPtrTy;
-
+
if (Val->getType() != DestIntTy)
Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
-
+
if (isa<llvm::PointerType>(Ty))
Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
return Val;
@@ -452,18 +452,18 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
CodeGenFunction &CGF) {
const llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
-
+
// If SrcTy and Ty are the same, just do a load.
if (SrcTy == Ty)
return CGF.Builder.CreateLoad(SrcPtr);
-
+
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
-
+
if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
}
-
+
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
// If the source and destination are integer or pointer types, just do an
@@ -473,7 +473,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
}
-
+
// If load is legal, just bitcast the src pointer.
if (SrcSize >= DstSize) {
// Generally SrcSize is never greater than DstSize, since this means we are
@@ -489,7 +489,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
Load->setAlignment(1);
return Load;
}
-
+
// Otherwise do coercion through memory. This is stupid, but
// simple.
llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
@@ -518,14 +518,14 @@ static void CreateCoercedStore(llvm::Value *Src,
CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
return;
}
-
+
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
-
+
if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
}
-
+
// If the source and destination are integer or pointer types, just do an
// extension or truncation to the desired type.
if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
@@ -534,7 +534,7 @@ static void CreateCoercedStore(llvm::Value *Src,
CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
return;
}
-
+
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
// If store is legal, just bitcast the src pointer.
@@ -590,7 +590,7 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
const CGFunctionInfo &FI = getFunctionInfo(GD);
-
+
// For definition purposes, don't consider a K&R function variadic.
bool Variadic = false;
if (const FunctionProtoType *FPT =
@@ -673,7 +673,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
-
+
if (!VerifyFuncTypeComplete(FPT)) {
const CGFunctionInfo *Info;
if (isa<CXXDestructorDecl>(MD))
@@ -688,7 +688,7 @@ const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const Decl *TargetDecl,
- AttributeListType &PAL,
+ AttributeListType &PAL,
unsigned &CallingConv) {
unsigned FuncAttrs = 0;
unsigned RetAttrs = 0;
@@ -755,10 +755,10 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (RetAttrs)
PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
- // FIXME: we need to honor command line settings also.
- // FIXME: RegParm should be reduced in case of nested functions and/or global
- // register variable.
+ // FIXME: RegParm should be reduced in case of global register variable.
signed RegParm = FI.getRegParm();
+ if (!RegParm)
+ RegParm = CodeGenOpts.NumRegisterParameters;
unsigned PointerWidth = getContext().Target.getPointerWidth(0);
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
@@ -769,7 +769,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
- // sense to do it here because parameters are so fucked up.
+ // sense to do it here because parameters are so messed up.
switch (AI.getKind()) {
case ABIArgInfo::Extend:
if (ParamType->isSignedIntegerType())
@@ -786,7 +786,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Attributes |= llvm::Attribute::InReg;
}
// FIXME: handle sseregparm someday...
-
+
if (const llvm::StructType *STy =
dyn_cast<llvm::StructType>(AI.getCoerceToType()))
Index += STy->getNumElements()-1; // 1 will be added below.
@@ -866,13 +866,32 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
switch (ArgI.getKind()) {
case ABIArgInfo::Indirect: {
llvm::Value *V = AI;
+
if (hasAggregateLLVMType(Ty)) {
- // Do nothing, aggregates and complex variables are accessed by
- // reference.
+ // Aggregates and complex variables are accessed by reference. All we
+ // need to do is realign the value, if requested
+ if (ArgI.getIndirectRealign()) {
+ llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
+
+ // Copy from the incoming argument pointer to the temporary with the
+ // appropriate alignment.
+ //
+ // FIXME: We should have a common utility for generating an aggregate
+ // copy.
+ const llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ CharUnits Size = getContext().getTypeSizeInChars(Ty);
+ Builder.CreateMemCpy(Builder.CreateBitCast(AlignedTemp, I8PtrTy),
+ Builder.CreateBitCast(V, I8PtrTy),
+ llvm::ConstantInt::get(IntPtrTy,
+ Size.getQuantity()),
+ ArgI.getIndirectAlign(),
+ false);
+ V = AlignedTemp;
+ }
} else {
// Load scalar value from indirect argument.
- unsigned Alignment = getContext().getTypeAlignInChars(Ty).getQuantity();
- V = EmitLoadOfScalar(V, false, Alignment, Ty);
+ CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
+ V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
// This must be a promotion, for something like
// "void a(x) short x; {..."
@@ -891,7 +910,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
ArgI.getDirectOffset() == 0) {
assert(AI != Fn->arg_end() && "Argument mismatch!");
llvm::Value *V = AI;
-
+
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::Attribute::NoAlias);
@@ -905,33 +924,33 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
}
llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
-
+
// The alignment we need to use is the max of the requested alignment for
// the argument plus the alignment required by our access code below.
- unsigned AlignmentToUse =
- CGF.CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
+ unsigned AlignmentToUse =
+ CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
AlignmentToUse = std::max(AlignmentToUse,
(unsigned)getContext().getDeclAlign(Arg).getQuantity());
-
+
Alloca->setAlignment(AlignmentToUse);
llvm::Value *V = Alloca;
llvm::Value *Ptr = V; // Pointer to store into.
-
+
// If the value is offset in memory, apply the offset now.
if (unsigned Offs = ArgI.getDirectOffset()) {
Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
- Ptr = Builder.CreateBitCast(Ptr,
+ Ptr = Builder.CreateBitCast(Ptr,
llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
}
-
+
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
if (const llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
-
+
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
assert(AI != Fn->arg_end() && "Argument mismatch!");
AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
@@ -944,8 +963,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AI->setName(Arg->getName() + ".coerce");
CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
}
-
-
+
+
// Match to what EmitParmDecl is expecting for this type.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
@@ -1024,13 +1043,13 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
RetAI.getDirectOffset() == 0) {
// The internal return value temp always will have pointer-to-return-type
// type, just do a load.
-
+
// If the instruction right before the insertion point is a store to the
// return value, we can elide the load, zap the store, and usually zap the
// alloca.
llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
llvm::StoreInst *SI = 0;
- if (InsertBB->empty() ||
+ if (InsertBB->empty() ||
!(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
RV = Builder.CreateLoad(ReturnValue);
@@ -1039,7 +1058,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
RetDbgLoc = SI->getDebugLoc();
RV = SI->getValueOperand();
SI->eraseFromParent();
-
+
// If that was the only use of the return value, nuke it as well now.
if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
@@ -1052,10 +1071,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
if (unsigned Offs = RetAI.getDirectOffset()) {
V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
V = Builder.CreateConstGEP1_32(V, Offs);
- V = Builder.CreateBitCast(V,
+ V = Builder.CreateBitCast(V,
llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
}
-
+
RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
break;
@@ -1079,7 +1098,7 @@ RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
llvm::Value *Local = GetAddrOfLocalVar(Param);
QualType ArgType = Param->getType();
-
+
// For the most part, we just need to load the alloca, except:
// 1) aggregate r-values are actually pointers to temporaries, and
// 2) references to aggregates are pointers directly to the aggregate.
@@ -1180,7 +1199,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Ignore:
break;
-
+
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
@@ -1204,16 +1223,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
} else
SrcPtr = RV.getAggregateAddr();
-
+
// If the value is offset in memory, apply the offset now.
if (unsigned Offs = ArgInfo.getDirectOffset()) {
SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
- SrcPtr = Builder.CreateBitCast(SrcPtr,
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
}
-
+
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
@@ -1233,7 +1252,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
*this));
}
-
+
break;
}
@@ -1332,7 +1351,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If we are ignoring an argument that had a result, make sure to
// construct the appropriate return value for our caller.
return GetUndefRValue(RetTy);
-
+
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
@@ -1355,25 +1374,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
return RValue::get(CI);
}
-
+
llvm::Value *DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
-
+
if (!DestPtr) {
DestPtr = CreateMemTemp(RetTy, "coerce");
DestIsVolatile = false;
}
-
+
// If the value is offset in memory, apply the offset now.
llvm::Value *StorePtr = DestPtr;
if (unsigned Offs = RetAI.getDirectOffset()) {
StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
- StorePtr = Builder.CreateBitCast(StorePtr,
+ StorePtr = Builder.CreateBitCast(StorePtr,
llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
}
CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
-
+
unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
if (RetTy->isAnyComplexType())
return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
index bf26799..8e88beb 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
@@ -40,7 +41,7 @@ ComputeNonVirtualBaseClassOffset(ASTContext &Context,
cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
// Add the offset.
- Offset += Layout.getBaseClassOffset(BaseDecl);
+ Offset += Layout.getBaseClassOffsetInBits(BaseDecl);
RD = BaseDecl;
}
@@ -86,9 +87,9 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
uint64_t Offset;
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
if (BaseIsVirtual)
- Offset = Layout.getVBaseClassOffset(Base);
+ Offset = Layout.getVBaseClassOffsetInBits(Base);
else
- Offset = Layout.getBaseClassOffset(Base);
+ Offset = Layout.getBaseClassOffsetInBits(Base);
// Shift and cast down to the base type.
// TODO: for complete types, this should be possible with a GEP.
@@ -179,8 +180,17 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
llvm::Value *VirtualOffset = 0;
- if (VBase)
- VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ if (VBase) {
+ if (Derived->hasAttr<FinalAttr>()) {
+ VirtualOffset = 0;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
+
+ uint64_t VBaseOffset = Layout.getVBaseClassOffsetInBits(VBase);
+ NonVirtualOffset += VBaseOffset / 8;
+ } else
+ VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ }
// Apply the offsets.
Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
@@ -294,7 +304,8 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
const ASTRecordLayout &Layout =
CGF.getContext().getASTRecordLayout(RD);
uint64_t BaseOffset = ForVirtualBase ?
- Layout.getVBaseClassOffset(Base) : Layout.getBaseClassOffset(Base);
+ Layout.getVBaseClassOffsetInBits(Base) :
+ Layout.getBaseClassOffsetInBits(Base);
SubVTTIndex =
CGF.CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
@@ -307,7 +318,7 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
} else {
// We're the complete constructor, so get the VTT by name.
- VTT = CGF.CGM.getVTables().getVTT(RD);
+ VTT = CGF.CGM.getVTables().GetAddrOfVTT(RD);
VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
}
@@ -334,11 +345,34 @@ namespace {
CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, Addr);
}
};
+
+ /// A visitor which checks whether an initializer uses 'this' in a
+ /// way which requires the vtable to be properly set.
+ struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
+ typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
+
+ bool UsesThis;
+
+ DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
+
+ // Black-list all explicit and implicit references to 'this'.
+ //
+ // Do we need to worry about external references to 'this' derived
+ // from arbitrary code? If so, then anything which runs arbitrary
+ // external code might potentially access the vtable.
+ void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
+ };
+}
+
+static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
+ DynamicThisUseChecker Checker(C);
+ Checker.Visit(const_cast<Expr*>(Init));
+ return Checker.UsesThis;
}
static void EmitBaseInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
- CXXBaseOrMemberInitializer *BaseInit,
+ CXXCtorInitializer *BaseInit,
CXXCtorType CtorType) {
assert(BaseInit->isBaseInitializer() &&
"Must have base initializer!");
@@ -355,6 +389,12 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
if (CtorType == Ctor_Base && isBaseVirtual)
return;
+ // If the initializer for the base (other than the constructor
+ // itself) accesses 'this' in any way, we need to initialize the
+ // vtables.
+ if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
+ CGF.InitializeVTablePointers(ClassDecl);
+
// We can pretend to be a complete class because it only matters for
// virtual bases, and we only do virtual bases for complete ctors.
llvm::Value *V =
@@ -362,9 +402,12 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
BaseClassDecl,
isBaseVirtual);
- CGF.EmitAggExpr(BaseInit->getInit(), V, false, false, true);
+ AggValueSlot AggSlot = AggValueSlot::forAddr(V, false, /*Lifetime*/ true);
+
+ CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
- if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor())
+ if (CGF.CGM.getLangOptions().areExceptionsEnabled() &&
+ !BaseClassDecl->hasTrivialDestructor())
CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
isBaseVirtual);
}
@@ -372,7 +415,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
static void EmitAggMemberInitializer(CodeGenFunction &CGF,
LValue LHS,
llvm::Value *ArrayIndexVar,
- CXXBaseOrMemberInitializer *MemberInit,
+ CXXCtorInitializer *MemberInit,
QualType T,
unsigned Index) {
if (Index == MemberInit->getNumArrayIndices()) {
@@ -388,11 +431,11 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
CGF.Builder.CreateStore(Next, ArrayIndexVar);
}
+
+ AggValueSlot Slot = AggValueSlot::forAddr(Dest, LHS.isVolatileQualified(),
+ /*Lifetime*/ true);
- CGF.EmitAggExpr(MemberInit->getInit(), Dest,
- LHS.isVolatileQualified(),
- /*IgnoreResult*/ false,
- /*IsInitializer*/ true);
+ CGF.EmitAggExpr(MemberInit->getInit(), Slot);
return;
}
@@ -476,29 +519,29 @@ namespace {
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
- CXXBaseOrMemberInitializer *MemberInit,
+ CXXCtorInitializer *MemberInit,
const CXXConstructorDecl *Constructor,
FunctionArgList &Args) {
- assert(MemberInit->isMemberInitializer() &&
+ assert(MemberInit->isAnyMemberInitializer() &&
"Must have member initializer!");
// non-static data member initializers.
- FieldDecl *Field = MemberInit->getMember();
+ FieldDecl *Field = MemberInit->getAnyMember();
QualType FieldType = CGF.getContext().getCanonicalType(Field->getType());
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS;
// If we are initializing an anonymous union field, drill down to the field.
- if (MemberInit->getAnonUnionMember()) {
- Field = MemberInit->getAnonUnionMember();
- LHS = CGF.EmitLValueForAnonRecordField(ThisPtr, Field, 0);
- FieldType = Field->getType();
+ if (MemberInit->isIndirectMemberInitializer()) {
+ LHS = CGF.EmitLValueForAnonRecordField(ThisPtr,
+ MemberInit->getIndirectMember(), 0);
+ FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
} else {
LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
}
- // FIXME: If there's no initializer and the CXXBaseOrMemberInitializer
+ // FIXME: If there's no initializer and the CXXCtorInitializer
// was implicitly generated, we shouldn't be zeroing memory.
RValue RHS;
if (FieldType->isReferenceType()) {
@@ -557,12 +600,12 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// Emit the block variables for the array indices, if any.
for (unsigned I = 0, N = MemberInit->getNumArrayIndices(); I != N; ++I)
- CGF.EmitLocalBlockVarDecl(*MemberInit->getArrayIndex(I));
+ CGF.EmitAutoVarDecl(*MemberInit->getArrayIndex(I));
}
EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit, FieldType, 0);
- if (!CGF.Exceptions)
+ if (!CGF.CGM.getLangOptions().areExceptionsEnabled())
return;
// FIXME: If we have an array of classes w/ non-trivial destructors,
@@ -674,12 +717,12 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
FunctionArgList &Args) {
const CXXRecordDecl *ClassDecl = CD->getParent();
- llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> MemberInitializers;
+ llvm::SmallVector<CXXCtorInitializer *, 8> MemberInitializers;
for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
E = CD->init_end();
B != E; ++B) {
- CXXBaseOrMemberInitializer *Member = (*B);
+ CXXCtorInitializer *Member = (*B);
if (Member->isBaseInitializer())
EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
@@ -754,6 +797,10 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
assert(Dtor->isImplicit() && "bodyless dtor not implicit");
// nothing to do besides what's in the epilogue
}
+ // -fapple-kext must inline any call to this dtor into
+ // the caller's body.
+ if (getContext().getLangOptions().AppleKext)
+ CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
break;
}
@@ -1118,6 +1165,64 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
}
void
+CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ llvm::Value *This, llvm::Value *Src,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ if (D->isTrivial()) {
+ assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(D->isCopyConstructor() && "trivial 1-arg ctor not a copy ctor");
+ EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
+ return;
+ }
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D,
+ clang::Ctor_Complete);
+ assert(D->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This),
+ D->getThisType(getContext())));
+
+
+ // Push the src ptr.
+ QualType QT = *(FPT->arg_type_begin());
+ const llvm::Type *t = CGM.getTypes().ConvertType(QT);
+ Src = Builder.CreateBitCast(Src, t);
+ Args.push_back(std::make_pair(RValue::get(Src), QT));
+
+ // Skip over first argument (Src).
+ ++ArgBeg;
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+ for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
+ E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
+ assert(Arg != ArgEnd && "Running over edge of argument list!");
+ QualType ArgType = *I;
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || FPT->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg) {
+ QualType ArgType = Arg->getType();
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+
+ QualType ResultType = FPT->getResultType();
+ EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
+ FPT->getExtInfo()),
+ Callee, ReturnValueSlot(), Args, D);
+}
+
+void
CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
CXXCtorType CtorType,
const FunctionArgList &Args) {
@@ -1164,7 +1269,13 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
llvm::Value *This) {
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
ForVirtualBase);
- llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+ llvm::Value *Callee = 0;
+ if (getContext().getLangOptions().AppleKext)
+ Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
+ DD->getParent());
+
+ if (!Callee)
+ Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
}
@@ -1202,13 +1313,7 @@ llvm::Value *
CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8Ty(VMContext)->getPointerTo();
-
- llvm::Value *VTablePtr = Builder.CreateBitCast(This,
- Int8PtrTy->getPointerTo());
- VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
-
+ llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
int64_t VBaseOffsetOffset =
CGM.getVTables().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
@@ -1326,15 +1431,16 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
const ASTRecordLayout &Layout =
getContext().getASTRecordLayout(VTableClass);
- BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ BaseOffset = Layout.getVBaseClassOffsetInBits(BaseDecl);
BaseOffsetFromNearestVBase = 0;
BaseDeclIsNonVirtualPrimaryBase = false;
} else {
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffset =
+ Base.getBaseOffset() + Layout.getBaseClassOffsetInBits(BaseDecl);
BaseOffsetFromNearestVBase =
- OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
+ OffsetFromNearestVBase + Layout.getBaseClassOffsetInBits(BaseDecl);
BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
}
@@ -1361,3 +1467,9 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
/*BaseIsNonVirtualPrimaryBase=*/false,
VTable, RD, VBases);
}
+
+llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
+ const llvm::Type *Ty) {
+ llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
+ return Builder.CreateLoad(VTablePtrSrc, "vtable");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
new file mode 100644
index 0000000..374ede8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
@@ -0,0 +1,1144 @@
+//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code dealing with the IR generation for cleanups
+// and related information.
+//
+// A "cleanup" is a piece of code which needs to be executed whenever
+// control transfers out of a particular scope. This can be
+// conditionalized to occur only on exceptional control flow, only on
+// normal control flow, or both.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
+ if (rv.isScalar())
+ return DominatingLLVMValue::needsSaving(rv.getScalarVal());
+ if (rv.isAggregate())
+ return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
+ return true;
+}
+
+DominatingValue<RValue>::saved_type
+DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
+ if (rv.isScalar()) {
+ llvm::Value *V = rv.getScalarVal();
+
+ // These automatically dominate and don't need to be saved.
+ if (!DominatingLLVMValue::needsSaving(V))
+ return saved_type(V, ScalarLiteral);
+
+ // Everything else needs an alloca.
+ llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ CGF.Builder.CreateStore(V, addr);
+ return saved_type(addr, ScalarAddress);
+ }
+
+ if (rv.isComplex()) {
+ CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
+ const llvm::Type *ComplexTy =
+ llvm::StructType::get(CGF.getLLVMContext(),
+ V.first->getType(), V.second->getType(),
+ (void*) 0);
+ llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
+ CGF.StoreComplexToAddr(V, addr, /*volatile*/ false);
+ return saved_type(addr, ComplexAddress);
+ }
+
+ assert(rv.isAggregate());
+ llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
+ if (!DominatingLLVMValue::needsSaving(V))
+ return saved_type(V, AggregateLiteral);
+
+ llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ CGF.Builder.CreateStore(V, addr);
+ return saved_type(addr, AggregateAddress);
+}
+
+/// Given a saved r-value produced by SaveRValue, perform the code
+/// necessary to restore it to usability at the current insertion
+/// point.
+RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
+ switch (K) {
+ case ScalarLiteral:
+ return RValue::get(Value);
+ case ScalarAddress:
+ return RValue::get(CGF.Builder.CreateLoad(Value));
+ case AggregateLiteral:
+ return RValue::getAggregate(Value);
+ case AggregateAddress:
+ return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
+ case ComplexAddress:
+ return RValue::getComplex(CGF.LoadComplexFromAddr(Value, false));
+ }
+
+ llvm_unreachable("bad saved r-value kind");
+ return RValue();
+}
+
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t Size) {
+ if (!StartOfBuffer) {
+ unsigned Capacity = 1024;
+ while (Capacity < Size) Capacity *= 2;
+ StartOfBuffer = new char[Capacity];
+ StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
+ } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
+ unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
+ unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
+
+ unsigned NewCapacity = CurrentCapacity;
+ do {
+ NewCapacity *= 2;
+ } while (NewCapacity < UsedCapacity + Size);
+
+ char *NewStartOfBuffer = new char[NewCapacity];
+ char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
+ char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
+ memcpy(NewStartOfData, StartOfData, UsedCapacity);
+ delete [] StartOfBuffer;
+ StartOfBuffer = NewStartOfBuffer;
+ EndOfBuffer = NewEndOfBuffer;
+ StartOfData = NewStartOfData;
+ }
+
+ assert(StartOfBuffer + Size <= StartOfData);
+ StartOfData -= Size;
+ return StartOfData;
+}
+
+EHScopeStack::stable_iterator
+EHScopeStack::getEnclosingEHCleanup(iterator it) const {
+ assert(it != end());
+ do {
+ if (isa<EHCleanupScope>(*it)) {
+ if (cast<EHCleanupScope>(*it).isEHCleanup())
+ return stabilize(it);
+ return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
+ }
+ ++it;
+ } while (it != end());
+ return stable_end();
+}
+
+
+void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
+ assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
+ char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind & NormalCleanup;
+ bool IsEHCleanup = Kind & EHCleanup;
+ bool IsActive = !(Kind & InactiveCleanup);
+ EHCleanupScope *Scope =
+ new (Buffer) EHCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ IsActive,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup);
+ if (IsNormalCleanup)
+ InnermostNormalCleanup = stable_begin();
+ if (IsEHCleanup)
+ InnermostEHCleanup = stable_begin();
+
+ return Scope->getCleanupBuffer();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += Cleanup.getAllocatedSize();
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ // Destroy the cleanup.
+ Cleanup.~EHCleanupScope();
+
+ // Check whether we can shrink the branch-fixups stack.
+ if (!BranchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups())
+ BranchFixups.clear();
+
+ // Otherwise we can still trim out unnecessary nulls.
+ else
+ popNullFixups();
+ }
+}
+
+EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) {
+ char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters));
+ CatchDepth++;
+ return new (Buffer) EHFilterScope(NumFilters);
+}
+
+void EHScopeStack::popFilter() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHFilterScope &Filter = cast<EHFilterScope>(*begin());
+ StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ assert(CatchDepth > 0 && "mismatched filter push/pop");
+ CatchDepth--;
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
+ char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
+ CatchDepth++;
+ EHCatchScope *Scope = new (Buffer) EHCatchScope(NumHandlers);
+ for (unsigned I = 0; I != NumHandlers; ++I)
+ Scope->getHandlers()[I].Index = getNextEHDestIndex();
+ return Scope;
+}
+
+void EHScopeStack::pushTerminate() {
+ char *Buffer = allocate(EHTerminateScope::getSize());
+ CatchDepth++;
+ new (Buffer) EHTerminateScope(getNextEHDestIndex());
+}
+
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ assert(hasNormalCleanups());
+
+ EHScopeStack::iterator it = find(InnermostNormalCleanup);
+ unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
+
+ while (BranchFixups.size() > MinSize &&
+ BranchFixups.back().Destination == 0)
+ BranchFixups.pop_back();
+}
+
+void CodeGenFunction::initFullExprCleanup() {
+ // Create a variable to decide whether the cleanup needs to be run.
+ llvm::AllocaInst *active
+ = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
+
+ // Initialize it to false at a site that's guaranteed to be run
+ // before each evaluation.
+ llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
+ new llvm::StoreInst(Builder.getFalse(), active, &block->back());
+
+ // Initialize it to true at the current location.
+ Builder.CreateStore(Builder.getTrue(), active);
+
+ // Set that as the active flag in the cleanup.
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
+ assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?");
+ cleanup.setActiveFlag(active);
+
+ if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
+ if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
+}
+
+EHScopeStack::Cleanup::~Cleanup() {
+ llvm_unreachable("Cleanup is indestructable");
+}
+
+/// All the branch fixups on the EH stack have propagated out past the
+/// outermost normal cleanup; resolve them all by adding cases to the
+/// given switch instruction.
+static void ResolveAllBranchFixups(CodeGenFunction &CGF,
+ llvm::SwitchInst *Switch,
+ llvm::BasicBlock *CleanupEntry) {
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
+
+ for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination isn't set.
+ BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
+ if (Fixup.Destination == 0) continue;
+
+ // If there isn't an OptimisticBranchBlock, then InitialBranch is
+ // still pointing directly to its destination; forward it to the
+ // appropriate cleanup entry. This is required in the specific
+ // case of
+ // { std::string s; goto lbl; }
+ // lbl:
+ // i.e. where there's an unresolved fixup inside a single cleanup
+ // entry which we're currently popping.
+ if (Fixup.OptimisticBranchBlock == 0) {
+ new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ CGF.getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
+ }
+
+ // Don't add this case to the switch statement twice.
+ if (!CasesAdded.insert(Fixup.Destination)) continue;
+
+ Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ Fixup.Destination);
+ }
+
+ CGF.EHStack.clearFixups();
+}
+
+/// Transitions the terminator of the given exit-block of a cleanup to
+/// be a cleanup switch.
+static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ // If it's a branch, turn it into a switch whose default
+ // destination is its original target.
+ llvm::TerminatorInst *Term = Block->getTerminator();
+ assert(Term && "can't transition block without terminator");
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional());
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
+ Br->eraseFromParent();
+ return Switch;
+ } else {
+ return cast<llvm::SwitchInst>(Term);
+ }
+}
+
+void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
+ assert(Block && "resolving a null target block");
+ if (!EHStack.getNumBranchFixups()) return;
+
+ assert(EHStack.hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
+ bool ResolvedAny = false;
+
+ for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination doesn't match.
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (Fixup.Destination != Block) continue;
+
+ Fixup.Destination = 0;
+ ResolvedAny = true;
+
+ // If it doesn't have an optimistic branch block, LatestBranch is
+ // already pointing to the right place.
+ llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
+ if (!BranchBB)
+ continue;
+
+ // Don't process the same optimistic branch block twice.
+ if (!ModifiedOptimisticBlocks.insert(BranchBB))
+ continue;
+
+ llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
+
+ // Add a case to the switch.
+ Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
+ }
+
+ if (ResolvedAny)
+ EHStack.popNullFixups();
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+ assert(Old.isValid());
+
+ while (EHStack.stable_begin() != Old) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+
+ // As long as Old strictly encloses the scope's enclosing normal
+ // cleanup, we're going to emit another normal cleanup which
+ // fallthrough can propagate through.
+ bool FallThroughIsBranchThrough =
+ Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
+
+ PopCleanupBlock(FallThroughIsBranchThrough);
+ }
+}
+
+static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isNormalCleanup());
+ llvm::BasicBlock *Entry = Scope.getNormalBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("cleanup");
+ Scope.setNormalBlock(Entry);
+ }
+ return Entry;
+}
+
+static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isEHCleanup());
+ llvm::BasicBlock *Entry = Scope.getEHBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("eh.cleanup");
+ Scope.setEHBlock(Entry);
+ }
+ return Entry;
+}
+
+/// Attempts to reduce a cleanup's entry block to a fallthrough. This
+/// is basically llvm::MergeBlockIntoPredecessor, except
+/// simplified/optimized for the tighter constraints on cleanup blocks.
+///
+/// Returns the new block, whatever it is.
+static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
+ llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
+ if (!Pred) return Entry;
+
+ llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
+ if (!Br || Br->isConditional()) return Entry;
+ assert(Br->getSuccessor(0) == Entry);
+
+ // If we were previously inserting at the end of the cleanup entry
+ // block, we'll need to continue inserting at the end of the
+ // predecessor.
+ bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
+ assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
+
+ // Kill the branch.
+ Br->eraseFromParent();
+
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
+
+ // Replace all uses of the entry with the predecessor, in case there
+ // are phis in the cleanup.
+ Entry->replaceAllUsesWith(Pred);
+
+ // Kill the entry block.
+ Entry->eraseFromParent();
+
+ if (WasInsertBlock)
+ CGF.Builder.SetInsertPoint(Pred);
+
+ return Pred;
+}
+
+static void EmitCleanup(CodeGenFunction &CGF,
+ EHScopeStack::Cleanup *Fn,
+ bool ForEH,
+ llvm::Value *ActiveFlag) {
+ // EH cleanups always occur within a terminate scope.
+ if (ForEH) CGF.EHStack.pushTerminate();
+
+ // If there's an active flag, load it and skip the cleanup if it's
+ // false.
+ llvm::BasicBlock *ContBB = 0;
+ if (ActiveFlag) {
+ ContBB = CGF.createBasicBlock("cleanup.done");
+ llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
+ llvm::Value *IsActive
+ = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
+ CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
+ CGF.EmitBlock(CleanupBB);
+ }
+
+ // Ask the cleanup to emit itself.
+ Fn->Emit(CGF, ForEH);
+ assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+
+ // Emit the continuation block if there was an active flag.
+ if (ActiveFlag)
+ CGF.EmitBlock(ContBB);
+
+ // Leave the terminate scope.
+ if (ForEH) CGF.EHStack.popTerminate();
+}
+
+static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
+ llvm::BasicBlock *From,
+ llvm::BasicBlock *To) {
+ // Exit is the exit block of a cleanup, so it always terminates in
+ // an unconditional branch or a switch.
+ llvm::TerminatorInst *Term = Exit->getTerminator();
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
+ Br->setSuccessor(0, To);
+ } else {
+ llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
+ for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
+ if (Switch->getSuccessor(I) == From)
+ Switch->setSuccessor(I, To);
+ }
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+
+ // Remember activation information.
+ bool IsActive = Scope.isActive();
+ llvm::Value *NormalActiveFlag =
+ Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0;
+ llvm::Value *EHActiveFlag =
+ Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0;
+
+ // Check whether we need an EH cleanup. This is only true if we've
+ // generated a lazy EH cleanup block.
+ bool RequiresEHCleanup = Scope.hasEHBranches();
+
+ // Check the three conditions which might require a normal cleanup:
+
+ // - whether there are branch fix-ups through this cleanup
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+
+ // - whether there are branch-throughs or branch-afters
+ bool HasExistingBranches = Scope.hasBranches();
+
+ // - whether there's a fallthrough
+ llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
+ bool HasFallthrough = (FallthroughSource != 0 && IsActive);
+
+ // Branch-through fall-throughs leave the insertion point set to the
+ // end of the last cleanup, which points to the current scope. The
+ // rest of IR gen doesn't need to worry about this; it only happens
+ // during the execution of PopCleanupBlocks().
+ bool HasPrebranchedFallthrough =
+ (FallthroughSource && FallthroughSource->getTerminator());
+
+ // If this is a normal cleanup, then having a prebranched
+ // fallthrough implies that the fallthrough source unconditionally
+ // jumps here.
+ assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
+ (Scope.getNormalBlock() &&
+ FallthroughSource->getTerminator()->getSuccessor(0)
+ == Scope.getNormalBlock()));
+
+ bool RequiresNormalCleanup = false;
+ if (Scope.isNormalCleanup() &&
+ (HasFixups || HasExistingBranches || HasFallthrough)) {
+ RequiresNormalCleanup = true;
+ }
+
+ // Even if we don't need the normal cleanup, we might still have
+ // prebranched fallthrough to worry about.
+ if (Scope.isNormalCleanup() && !RequiresNormalCleanup &&
+ HasPrebranchedFallthrough) {
+ assert(!IsActive);
+
+ llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
+
+ // If we're branching through this cleanup, just forward the
+ // prebranched fallthrough to the next cleanup, leaving the insert
+ // point in the old block.
+ if (FallthroughIsBranchThrough) {
+ EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ llvm::BasicBlock *EnclosingEntry =
+ CreateNormalEntry(*this, cast<EHCleanupScope>(S));
+
+ ForwardPrebranchedFallthrough(FallthroughSource,
+ NormalEntry, EnclosingEntry);
+ assert(NormalEntry->use_empty() &&
+ "uses of entry remain after forwarding?");
+ delete NormalEntry;
+
+ // Otherwise, we're branching out; just emit the next block.
+ } else {
+ EmitBlock(NormalEntry);
+ SimplifyCleanupEntry(*this, NormalEntry);
+ }
+ }
+
+ // If we don't need the cleanup at all, we're done.
+ if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ EHStack.popCleanup(); // safe because there are no fixups
+ assert(EHStack.getNumBranchFixups() == 0 ||
+ EHStack.hasNormalCleanups());
+ return;
+ }
+
+ // Copy the cleanup emission data out. Note that SmallVector
+ // guarantees maximal alignment for its buffer regardless of its
+ // type parameter.
+ llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
+ CleanupBuffer.reserve(Scope.getCleanupSize());
+ memcpy(CleanupBuffer.data(),
+ Scope.getCleanupBuffer(), Scope.getCleanupSize());
+ CleanupBuffer.set_size(Scope.getCleanupSize());
+ EHScopeStack::Cleanup *Fn =
+ reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
+
+ // We want to emit the EH cleanup after the normal cleanup, but go
+ // ahead and do the setup for the EH cleanup while the scope is still
+ // alive.
+ llvm::BasicBlock *EHEntry = 0;
+ llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend;
+ if (RequiresEHCleanup) {
+ EHEntry = CreateEHEntry(*this, Scope);
+
+ // Figure out the branch-through dest if necessary.
+ llvm::BasicBlock *EHBranchThroughDest = 0;
+ if (Scope.hasEHBranchThroughs()) {
+ assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end());
+ EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup());
+ EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S));
+ }
+
+ // If we have exactly one branch-after and no branch-throughs, we
+ // can dispatch it without a switch.
+ if (!Scope.hasEHBranchThroughs() &&
+ Scope.getNumEHBranchAfters() == 1) {
+ assert(!EHBranchThroughDest);
+
+ // TODO: remove the spurious eh.cleanup.dest stores if this edge
+ // never went through any switches.
+ llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0);
+ EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest));
+
+ // Otherwise, if we have any branch-afters, we need a switch.
+ } else if (Scope.getNumEHBranchAfters()) {
+ // The default of the switch belongs to the branch-throughs if
+ // they exist.
+ llvm::BasicBlock *Default =
+ (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock());
+
+ const unsigned SwitchCapacity = Scope.getNumEHBranchAfters();
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ EHInstsToAppend.push_back(Load);
+ EHInstsToAppend.push_back(Switch);
+
+ for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I)
+ Switch->addCase(Scope.getEHBranchAfterIndex(I),
+ Scope.getEHBranchAfterBlock(I));
+
+ // Otherwise, we have only branch-throughs; jump to the next EH
+ // cleanup.
+ } else {
+ assert(EHBranchThroughDest);
+ EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest));
+ }
+ }
+
+ if (!RequiresNormalCleanup) {
+ EHStack.popCleanup();
+ } else {
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (HasFallthrough && !HasPrebranchedFallthrough &&
+ !HasFixups && !HasExistingBranches) {
+
+ // Fixups can cause us to optimistically create a normal block,
+ // only to later have no real uses for it. Just delete it in
+ // this case.
+ // TODO: we can potentially simplify all the uses after this.
+ if (Scope.getNormalBlock()) {
+ Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock());
+ delete Scope.getNormalBlock();
+ }
+
+ EHStack.popCleanup();
+
+ EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
+
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+ } else {
+ // Force the entry block to exist.
+ llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
+
+ // I. Set up the fallthrough edge in.
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (HasFallthrough) {
+ if (!HasPrebranchedFallthrough)
+ Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
+
+ // Otherwise, clear the IP if we don't have fallthrough because
+ // the cleanup is inactive. We don't need to save it because
+ // it's still just FallthroughSource.
+ } else if (FallthroughSource) {
+ assert(!IsActive && "source without fallthrough for active cleanup");
+ Builder.ClearInsertionPoint();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ EmitBlock(NormalEntry);
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+
+ bool HasEnclosingCleanups =
+ (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ llvm::BasicBlock *BranchThroughDest = 0;
+ if (Scope.hasBranchThroughs() ||
+ (FallthroughSource && FallthroughIsBranchThrough) ||
+ (HasFixups && HasEnclosingCleanups)) {
+ assert(HasEnclosingCleanups);
+ EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
+ }
+
+ llvm::BasicBlock *FallthroughDest = 0;
+ llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
+ Scope.getNumBranchAfters() == 1) {
+ assert(!BranchThroughDest || !IsActive);
+
+ // TODO: clean up the possibly dead stores to the cleanup dest slot.
+ llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
+
+ // Build a switch-out if we need it:
+ // - if there are branch-afters threaded through the scope
+ // - if fall-through is a branch-after
+ // - if there are fixups that have nowhere left to go and
+ // so must be immediately resolved
+ } else if (Scope.getNumBranchAfters() ||
+ (HasFallthrough && !FallthroughIsBranchThrough) ||
+ (HasFixups && !HasEnclosingCleanups)) {
+
+ llvm::BasicBlock *Default =
+ (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
+
+ // TODO: base this on the number of branch-afters and fixups
+ const unsigned SwitchCapacity = 10;
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ InstsToAppend.push_back(Load);
+ InstsToAppend.push_back(Switch);
+
+ // Branch-after fallthrough.
+ if (FallthroughSource && !FallthroughIsBranchThrough) {
+ FallthroughDest = createBasicBlock("cleanup.cont");
+ if (HasFallthrough)
+ Switch->addCase(Builder.getInt32(0), FallthroughDest);
+ }
+
+ for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
+ Switch->addCase(Scope.getBranchAfterIndex(I),
+ Scope.getBranchAfterBlock(I));
+ }
+
+ // If there aren't any enclosing cleanups, we can resolve all
+ // the fixups now.
+ if (HasFixups && !HasEnclosingCleanups)
+ ResolveAllBranchFixups(*this, Switch, NormalEntry);
+ } else {
+ // We should always have a branch-through destination in this case.
+ assert(BranchThroughDest);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
+ }
+
+ // IV. Pop the cleanup and emit it.
+ EHStack.popCleanup();
+ assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
+
+ EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
+ NormalExit->getInstList().push_back(InstsToAppend[I]);
+
+ // Optimistically hope that any fixups will continue falling through.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I) {
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (!Fixup.Destination) continue;
+ if (!Fixup.OptimisticBranchBlock) {
+ new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
+ getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, NormalEntry);
+ }
+ Fixup.OptimisticBranchBlock = NormalExit;
+ }
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but shouldn't branch to
+ // the cleanup because the cleanup is inactive.
+ if (!HasFallthrough && FallthroughSource) {
+ assert(!IsActive);
+
+ // If we have a prebranched fallthrough, that needs to be
+ // forwarded to the right block.
+ if (HasPrebranchedFallthrough) {
+ llvm::BasicBlock *Next;
+ if (FallthroughIsBranchThrough) {
+ Next = BranchThroughDest;
+ assert(!FallthroughDest);
+ } else {
+ Next = FallthroughDest;
+ }
+
+ ForwardPrebranchedFallthrough(FallthroughSource, NormalEntry, Next);
+ }
+ Builder.SetInsertPoint(FallthroughSource);
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (HasFallthrough && FallthroughDest) {
+ assert(!FallthroughIsBranchThrough);
+ EmitBlock(FallthroughDest);
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (HasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ Builder.ClearInsertionPoint();
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ llvm::BasicBlock *NewNormalEntry =
+ SimplifyCleanupEntry(*this, NormalEntry);
+
+ // If it did invalidate those pointers, and NormalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I)
+ EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
+ }
+ }
+
+ assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
+
+ // Emit the EH cleanup if required.
+ if (RequiresEHCleanup) {
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ EmitBlock(EHEntry);
+ EmitCleanup(*this, Fn, /*ForEH*/ true, EHActiveFlag);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I)
+ EHExit->getInstList().push_back(EHInstsToAppend[I]);
+
+ Builder.restoreIP(SavedIP);
+
+ SimplifyCleanupEntry(*this, EHEntry);
+ }
+}
+
+/// Terminate the current block by emitting a branch which might leave
+/// the current cleanup-protected scope. The target scope may not yet
+/// be known, in which case this will require a fixup.
+///
+/// As a side-effect, this method clears the insertion point.
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
+ assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup())
+ && "stale jump destination");
+
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator
+ TopCleanup = EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!Dest.getScopeDepth().isValid()) {
+ BranchFixup &Fixup = EHStack.addBranchFixup();
+ Fixup.Destination = Dest.getBlock();
+ Fixup.DestinationIndex = Dest.getDestIndex();
+ Fixup.InitialBranch = BI;
+ Fixup.OptimisticBranchBlock = 0;
+
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // Otherwise, thread through all the normal cleanups in scope.
+
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
+
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(TopCleanup));
+ BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
+ }
+
+ // Add this destination to all the scopes involved.
+ EHScopeStack::stable_iterator I = TopCleanup;
+ EHScopeStack::stable_iterator E = Dest.getScopeDepth();
+ if (E.strictlyEncloses(I)) {
+ while (true) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isNormalCleanup());
+ I = Scope.getEnclosingNormalCleanup();
+
+ // If this is the last cleanup we're propagating through, tell it
+ // that there's a resolved jump moving through it.
+ if (!E.strictlyEncloses(I)) {
+ Scope.addBranchAfter(Index, Dest.getBlock());
+ break;
+ }
+
+ // Otherwise, tell the scope that there's a jump propoagating
+ // through it. If this isn't new information, all the rest of
+ // the work has been done before.
+ if (!Scope.addBranchThrough(Dest.getBlock()))
+ break;
+ }
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) {
+ // We should never get invalid scope depths for an UnwindDest; that
+ // implies that the destination wasn't set up correctly.
+ assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?");
+
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
+
+ // Calculate the innermost active cleanup.
+ EHScopeStack::stable_iterator
+ InnermostCleanup = EHStack.getInnermostActiveEHCleanup();
+
+ // If the destination is in the same EH cleanup scope as us, we
+ // don't need to thread through anything.
+ if (InnermostCleanup.encloses(Dest.getScopeDepth())) {
+ Builder.ClearInsertionPoint();
+ return;
+ }
+ assert(InnermostCleanup != EHStack.stable_end());
+
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI);
+
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(InnermostCleanup));
+ BI->setSuccessor(0, CreateEHEntry(*this, Scope));
+ }
+
+ // Add this destination to all the scopes involved.
+ for (EHScopeStack::stable_iterator
+ I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) {
+ assert(E.strictlyEncloses(I));
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isEHCleanup());
+ I = Scope.getEnclosingEHCleanup();
+
+ // If this is the last cleanup we're propagating through, add this
+ // as a branch-after.
+ if (I == E) {
+ Scope.addEHBranchAfter(Index, Dest.getBlock());
+ break;
+ }
+
+ // Otherwise, add it as a branch-through. If this isn't new
+ // information, all the rest of the work has been done before.
+ if (!Scope.addEHBranchThrough(Dest.getBlock()))
+ break;
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator C) {
+ // If we needed a normal block for any reason, that counts.
+ if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostNormalCleanup();
+ I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getNormalBlock()) return true;
+ I = S.getEnclosingNormalCleanup();
+ }
+
+ return false;
+}
+
+static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator C) {
+ // If we needed an EH block for any reason, that counts.
+ if (cast<EHCleanupScope>(*EHStack.find(C)).getEHBlock())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostEHCleanup(); I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getEHBlock()) return true;
+ I = S.getEnclosingEHCleanup();
+ }
+
+ return false;
+}
+
+enum ForActivation_t {
+ ForActivation,
+ ForDeactivation
+};
+
+/// The given cleanup block is changing activation state. Configure a
+/// cleanup variable if necessary.
+///
+/// It would be good if we had some way of determining if there were
+/// extra uses *after* the change-over point.
+static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
+ EHScopeStack::stable_iterator C,
+ ForActivation_t Kind) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
+
+ // We always need the flag if we're activating the cleanup, because
+ // we have to assume that the current location doesn't necessarily
+ // dominate all future uses of the cleanup.
+ bool NeedFlag = (Kind == ForActivation);
+
+ // Calculate whether the cleanup was used:
+
+ // - as a normal cleanup
+ if (Scope.isNormalCleanup() && IsUsedAsNormalCleanup(CGF.EHStack, C)) {
+ Scope.setTestFlagInNormalCleanup();
+ NeedFlag = true;
+ }
+
+ // - as an EH cleanup
+ if (Scope.isEHCleanup() && IsUsedAsEHCleanup(CGF.EHStack, C)) {
+ Scope.setTestFlagInEHCleanup();
+ NeedFlag = true;
+ }
+
+ // If it hasn't yet been used as either, we're done.
+ if (!NeedFlag) return;
+
+ llvm::AllocaInst *Var = Scope.getActiveFlag();
+ if (!Var) {
+ Var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
+ Scope.setActiveFlag(Var);
+
+ // Initialize to true or false depending on whether it was
+ // active up to this point.
+ CGF.InitTempAlloca(Var, CGF.Builder.getInt1(Kind == ForDeactivation));
+ }
+
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(Kind == ForActivation), Var);
+}
+
+/// Activate a cleanup that was created in an inactivated state.
+void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C) {
+ assert(C != EHStack.stable_end() && "activating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(!Scope.isActive() && "double activation");
+
+ SetupCleanupBlockActivation(*this, C, ForActivation);
+
+ Scope.setActive(true);
+}
+
+/// Deactive a cleanup that was created in an active state.
+void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) {
+ assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(Scope.isActive() && "double deactivation");
+
+ // If it's the top of the stack, just pop it.
+ if (C == EHStack.stable_begin()) {
+ // If it's a normal cleanup, we need to pretend that the
+ // fallthrough is unreachable.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ PopCleanupBlock();
+ Builder.restoreIP(SavedIP);
+ return;
+ }
+
+ // Otherwise, follow the general case.
+ SetupCleanupBlockActivation(*this, C, ForDeactivation);
+
+ Scope.setActive(false);
+}
+
+llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
+ if (!NormalCleanupDest)
+ NormalCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
+ return NormalCleanupDest;
+}
+
+llvm::Value *CodeGenFunction::getEHCleanupDestSlot() {
+ if (!EHCleanupDest)
+ EHCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot");
+ return EHCleanupDest;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h
new file mode 100644
index 0000000..c93ec5b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h
@@ -0,0 +1,560 @@
+//===-- CGCleanup.h - Classes for cleanups IR generation --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of LLVM IR for cleanups.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCLEANUP_H
+#define CLANG_CODEGEN_CGCLEANUP_H
+
+/// EHScopeStack is defined in CodeGenFunction.h, but its
+/// implementation is in this file and in CGCleanup.cpp.
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Value;
+ class BasicBlock;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ llvm::BasicBlock *CachedLandingPad;
+
+ unsigned K : 2;
+
+protected:
+ enum { BitsRemaining = 30 };
+
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind K) : CachedLandingPad(0), K(K) {}
+
+ Kind getKind() const { return static_cast<Kind>(K); }
+
+ llvm::BasicBlock *getCachedLandingPad() const {
+ return CachedLandingPad;
+ }
+
+ void setCachedLandingPad(llvm::BasicBlock *Block) {
+ CachedLandingPad = Block;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C @finally blocks are represented using a cleanup scope
+/// after the catch scope.
+class EHCatchScope : public EHScope {
+ unsigned NumHandlers : BitsRemaining;
+
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null (C++ null, not an LLVM null pointer)
+ /// for a catch-all.
+ llvm::Value *Type;
+
+ /// The catch handler for this type.
+ llvm::BasicBlock *Block;
+
+ /// The unwind destination index for this handler.
+ unsigned Index;
+ };
+
+private:
+ friend class EHScopeStack;
+
+ Handler *getHandlers() {
+ return reinterpret_cast<Handler*>(this+1);
+ }
+
+ const Handler *getHandlers() const {
+ return reinterpret_cast<const Handler*>(this+1);
+ }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned N) {
+ return sizeof(EHCatchScope) + N * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned NumHandlers)
+ : EHScope(Catch), NumHandlers(NumHandlers) {
+ }
+
+ unsigned getNumHandlers() const {
+ return NumHandlers;
+ }
+
+ void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
+ setHandler(I, /*catchall*/ 0, Block);
+ }
+
+ void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I].Type = Type;
+ getHandlers()[I].Block = Block;
+ }
+
+ const Handler &getHandler(unsigned I) const {
+ assert(I < getNumHandlers());
+ return getHandlers()[I];
+ }
+
+ typedef const Handler *iterator;
+ iterator begin() const { return getHandlers(); }
+ iterator end() const { return getHandlers() + getNumHandlers(); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Catch;
+ }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class EHCleanupScope : public EHScope {
+ /// Whether this cleanup needs to be run along normal edges.
+ bool IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ bool IsEHCleanup : 1;
+
+ /// Whether this cleanup is currently active.
+ bool IsActive : 1;
+
+ /// Whether the normal cleanup should test the activation flag.
+ bool TestFlagInNormalCleanup : 1;
+
+ /// Whether the EH cleanup should test the activation flag.
+ bool TestFlagInEHCleanup : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : BitsRemaining - 17; // currently 13
+
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *NormalBlock;
+
+ /// The dual entry/exit block along the EH edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *EHBlock;
+
+ /// An optional i1 variable indicating whether this cleanup has been
+ /// activated yet.
+ llvm::AllocaInst *ActiveFlag;
+
+ /// Extra information required for cleanups that have resolved
+ /// branches through them. This has to be allocated on the side
+ /// because everything on the cleanup stack has be trivially
+ /// movable.
+ struct ExtInfo {
+ /// The destinations of normal branch-afters and branch-throughs.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
+
+ /// Normal branch-afters.
+ llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ BranchAfters;
+
+ /// The destinations of EH branch-afters and branch-throughs.
+ /// TODO: optimize for the extremely common case of a single
+ /// branch-through.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> EHBranches;
+
+ /// EH branch-afters.
+ llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ EHBranchAfters;
+ };
+ mutable struct ExtInfo *ExtInfo;
+
+ struct ExtInfo &getExtInfo() {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+ const struct ExtInfo &getExtInfo() const {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t Size) {
+ return sizeof(EHCleanupScope) + Size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHCleanupScope) + CleanupSize;
+ }
+
+ EHCleanupScope(bool IsNormal, bool IsEH, bool IsActive,
+ unsigned CleanupSize, unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH)
+ : EHScope(EHScope::Cleanup),
+ IsNormalCleanup(IsNormal), IsEHCleanup(IsEH), IsActive(IsActive),
+ TestFlagInNormalCleanup(false), TestFlagInEHCleanup(false),
+ CleanupSize(CleanupSize), FixupDepth(FixupDepth),
+ EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
+ NormalBlock(0), EHBlock(0), ActiveFlag(0), ExtInfo(0)
+ {
+ assert(this->CleanupSize == CleanupSize && "cleanup size overflow");
+ }
+
+ ~EHCleanupScope() {
+ delete ExtInfo;
+ }
+
+ bool isNormalCleanup() const { return IsNormalCleanup; }
+ llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
+ void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
+
+ bool isEHCleanup() const { return IsEHCleanup; }
+ llvm::BasicBlock *getEHBlock() const { return EHBlock; }
+ void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
+
+ bool isActive() const { return IsActive; }
+ void setActive(bool A) { IsActive = A; }
+
+ llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; }
+ void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; }
+
+ void setTestFlagInNormalCleanup() { TestFlagInNormalCleanup = true; }
+ bool shouldTestFlagInNormalCleanup() const { return TestFlagInNormalCleanup; }
+
+ void setTestFlagInEHCleanup() { TestFlagInEHCleanup = true; }
+ bool shouldTestFlagInEHCleanup() const { return TestFlagInEHCleanup; }
+
+ unsigned getFixupDepth() const { return FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+ EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
+ return EnclosingEH;
+ }
+
+ size_t getCleanupSize() const { return CleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::Cleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
+ }
+
+ /// True if this cleanup scope has any branch-afters or branch-throughs.
+ bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
+
+ /// Add a branch-after to this cleanup scope. A branch-after is a
+ /// branch from a point protected by this (normal) cleanup to a
+ /// point in the normal cleanup scope immediately containing it.
+ /// For example,
+ /// for (;;) { A a; break; }
+ /// contains a branch-after.
+ ///
+ /// Branch-afters each have their own destination out of the
+ /// cleanup, guaranteed distinct from anything else threaded through
+ /// it. Therefore branch-afters usually force a switch after the
+ /// cleanup.
+ void addBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.Branches.insert(Block))
+ ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
+ }
+
+ /// Return the number of unique branch-afters on this scope.
+ unsigned getNumBranchAfters() const {
+ return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
+ }
+
+ llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].first;
+ }
+
+ llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].second;
+ }
+
+ /// Add a branch-through to this cleanup scope. A branch-through is
+ /// a branch from a scope protected by this (normal) cleanup to an
+ /// enclosing scope other than the immediately-enclosing normal
+ /// cleanup scope.
+ ///
+ /// In the following example, the branch through B's scope is a
+ /// branch-through, while the branch through A's scope is a
+ /// branch-after:
+ /// for (;;) { A a; B b; break; }
+ ///
+ /// All branch-throughs have a common destination out of the
+ /// cleanup, one possibly shared with the fall-through. Therefore
+ /// branch-throughs usually don't force a switch after the cleanup.
+ ///
+ /// \return true if the branch-through was new to this scope
+ bool addBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().Branches.insert(Block);
+ }
+
+ /// Determines if this cleanup scope has any branch throughs.
+ bool hasBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
+ }
+
+ // Same stuff, only for EH branches instead of normal branches.
+ // It's quite possible that we could find a better representation
+ // for this.
+
+ bool hasEHBranches() const { return ExtInfo && !ExtInfo->EHBranches.empty(); }
+ void addEHBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.EHBranches.insert(Block))
+ ExtInfo.EHBranchAfters.push_back(std::make_pair(Block, Index));
+ }
+
+ unsigned getNumEHBranchAfters() const {
+ return ExtInfo ? ExtInfo->EHBranchAfters.size() : 0;
+ }
+
+ llvm::BasicBlock *getEHBranchAfterBlock(unsigned I) const {
+ assert(I < getNumEHBranchAfters());
+ return ExtInfo->EHBranchAfters[I].first;
+ }
+
+ llvm::ConstantInt *getEHBranchAfterIndex(unsigned I) const {
+ assert(I < getNumEHBranchAfters());
+ return ExtInfo->EHBranchAfters[I].second;
+ }
+
+ bool addEHBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().EHBranches.insert(Block);
+ }
+
+ bool hasEHBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->EHBranchAfters.size() != ExtInfo->EHBranches.size());
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return (Scope->getKind() == Cleanup);
+ }
+};
+
+/// An exceptions scope which filters exceptions thrown through it.
+/// Only exceptions matching the filter types will be permitted to be
+/// thrown.
+///
+/// This is used to implement C++ exception specifications.
+class EHFilterScope : public EHScope {
+ unsigned NumFilters : BitsRemaining;
+
+ // Essentially ends in a flexible array member:
+ // llvm::Value *FilterTypes[0];
+
+ llvm::Value **getFilters() {
+ return reinterpret_cast<llvm::Value**>(this+1);
+ }
+
+ llvm::Value * const *getFilters() const {
+ return reinterpret_cast<llvm::Value* const *>(this+1);
+ }
+
+public:
+ EHFilterScope(unsigned NumFilters) :
+ EHScope(Filter), NumFilters(NumFilters) {}
+
+ static size_t getSizeForNumFilters(unsigned NumFilters) {
+ return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*);
+ }
+
+ unsigned getNumFilters() const { return NumFilters; }
+
+ void setFilter(unsigned I, llvm::Value *FilterValue) {
+ assert(I < getNumFilters());
+ getFilters()[I] = FilterValue;
+ }
+
+ llvm::Value *getFilter(unsigned I) const {
+ assert(I < getNumFilters());
+ return getFilters()[I];
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Filter;
+ }
+};
+
+/// An exceptions scope which calls std::terminate if any exception
+/// reaches it.
+class EHTerminateScope : public EHScope {
+ unsigned DestIndex : BitsRemaining;
+public:
+ EHTerminateScope(unsigned Index) : EHScope(Terminate), DestIndex(Index) {}
+ static size_t getSize() { return sizeof(EHTerminateScope); }
+
+ unsigned getDestIndex() const { return DestIndex; }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Terminate;
+ }
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *Ptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *Ptr) : Ptr(Ptr) {}
+
+public:
+ iterator() : Ptr(0) {}
+
+ EHScope *get() const {
+ return reinterpret_cast<EHScope*>(Ptr);
+ }
+
+ EHScope *operator->() const { return get(); }
+ EHScope &operator*() const { return *get(); }
+
+ iterator &operator++() {
+ switch (get()->getKind()) {
+ case EHScope::Catch:
+ Ptr += EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope*>(get())->getNumHandlers());
+ break;
+
+ case EHScope::Filter:
+ Ptr += EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope*>(get())->getNumFilters());
+ break;
+
+ case EHScope::Cleanup:
+ Ptr += static_cast<const EHCleanupScope*>(get())
+ ->getAllocatedSize();
+ break;
+
+ case EHScope::Terminate:
+ Ptr += EHTerminateScope::getSize();
+ break;
+ }
+
+ return *this;
+ }
+
+ iterator next() {
+ iterator copy = *this;
+ ++copy;
+ return copy;
+ }
+
+ iterator operator++(int) {
+ iterator copy = *this;
+ operator++();
+ return copy;
+ }
+
+ bool encloses(iterator other) const { return Ptr >= other.Ptr; }
+ bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
+
+ bool operator==(iterator other) const { return Ptr == other.Ptr; }
+ bool operator!=(iterator other) const { return Ptr != other.Ptr; }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(StartOfData);
+}
+
+inline EHScopeStack::iterator EHScopeStack::end() const {
+ return iterator(EndOfBuffer);
+}
+
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCatchScope>(*begin()));
+ StartOfData += EHCatchScope::getSizeForNumHandlers(
+ cast<EHCatchScope>(*begin()).getNumHandlers());
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline void EHScopeStack::popTerminate() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHTerminateScope>(*begin()));
+ StartOfData += EHTerminateScope::getSize();
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
+ assert(sp.isValid() && "finding invalid savepoint");
+ assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
+ return iterator(EndOfBuffer - sp.Size);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::stabilize(iterator ir) const {
+ assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
+ return stable_iterator(EndOfBuffer - ir.Ptr);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ for (EHScopeStack::stable_iterator
+ I = getInnermostNormalCleanup(), E = stable_end(); I != E; ) {
+ EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
+ if (S.isActive()) return I;
+ I = S.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveEHCleanup() const {
+ for (EHScopeStack::stable_iterator
+ I = getInnermostEHCleanup(), E = stable_end(); I != E; ) {
+ EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
+ if (S.isActive()) return I;
+ I = S.getEnclosingEHCleanup();
+ }
+ return stable_end();
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
index 406db88..469b460 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGBlocks.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
@@ -32,13 +33,14 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Dwarf.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
using namespace clang;
using namespace clang::CodeGen;
CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
- : CGM(CGM), DebugFactory(CGM.getModule()),
+ : CGM(CGM), DBuilder(CGM.getModule()),
BlockLiteralGenericSet(false) {
CreateCompileUnit();
}
@@ -53,28 +55,27 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
}
/// getContextDescriptor - Get context info for the decl.
-llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context,
- llvm::DIDescriptor &CompileUnit) {
+llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
if (!Context)
- return CompileUnit;
+ return TheCU;
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
I = RegionMap.find(Context);
if (I != RegionMap.end())
- return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(I->second));
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
// Check namespace.
if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
- return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl, CompileUnit));
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context)) {
if (!RDecl->isDependentType()) {
- llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
- llvm::DIFile(CompileUnit));
+ llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ getOrCreateMainFile());
return llvm::DIDescriptor(Ty);
}
}
- return CompileUnit;
+ return TheCU;
}
/// getFunctionName - Get function name for the given FunctionDecl. If the
@@ -100,9 +101,14 @@ llvm::StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
llvm::raw_svector_ostream OS(MethodName);
OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
const DeclContext *DC = OMD->getDeclContext();
- if (const ObjCImplementationDecl *OID = dyn_cast<const ObjCImplementationDecl>(DC)) {
+ if (const ObjCImplementationDecl *OID =
+ dyn_cast<const ObjCImplementationDecl>(DC)) {
OS << OID->getName();
- } else if (const ObjCCategoryImplDecl *OCD = dyn_cast<const ObjCCategoryImplDecl>(DC)){
+ } else if (const ObjCInterfaceDecl *OID =
+ dyn_cast<const ObjCInterfaceDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCCategoryImplDecl *OCD =
+ dyn_cast<const ObjCCategoryImplDecl>(DC)){
OS << ((NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
OCD->getIdentifier()->getNameStart() << ')';
}
@@ -131,8 +137,8 @@ CGDebugInfo::getClassName(RecordDecl *RD) {
NumArgs = TST->getNumArgs();
} else {
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- Args = TemplateArgs.getFlatArgumentList();
- NumArgs = TemplateArgs.flat_size();
+ Args = TemplateArgs.data();
+ NumArgs = TemplateArgs.size();
}
Buffer = RD->getIdentifier()->getNameStart();
PrintingPolicy Policy(CGM.getLangOptions());
@@ -144,18 +150,21 @@ CGDebugInfo::getClassName(RecordDecl *RD) {
char *StrPtr = DebugInfoNames.Allocate<char>(Buffer.length());
memcpy(StrPtr, Buffer.data(), Buffer.length());
return llvm::StringRef(StrPtr, Buffer.length());
-
}
/// getOrCreateFile - Get the file debug info descriptor for the input location.
llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (!Loc.isValid())
// If Location is not valid then use main input file.
- return DebugFactory.CreateFile(TheCU.getFilename(), TheCU.getDirectory(),
- TheCU);
+ return DBuilder.CreateFile(TheCU.getFilename(), TheCU.getDirectory());
+
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isInvalid() || llvm::StringRef(PLoc.getFilename()).empty())
+ // If the location is not valid then use main input file.
+ return DBuilder.CreateFile(TheCU.getFilename(), TheCU.getDirectory());
+
// Cache the results.
const char *fname = PLoc.getFilename();
llvm::DenseMap<const char *, llvm::WeakVH>::iterator it =
@@ -167,21 +176,25 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
return llvm::DIFile(cast<llvm::MDNode>(it->second));
}
- llvm::DIFile F = DebugFactory.CreateFile(PLoc.getFilename(),
- getCurrentDirname(), TheCU);
+ llvm::DIFile F = DBuilder.CreateFile(PLoc.getFilename(), getCurrentDirname());
DIFileCache[fname] = F;
return F;
}
+/// getOrCreateMainFile - Get the file info for main compile unit.
+llvm::DIFile CGDebugInfo::getOrCreateMainFile() {
+ return DBuilder.CreateFile(TheCU.getFilename(), TheCU.getDirectory());
+}
+
/// getLineNumber - Get line number for the location. If location is invalid
/// then use current location.
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
assert (CurLoc.isValid() && "Invalid current location!");
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
- return PLoc.getLine();
+ return PLoc.isValid()? PLoc.getLine() : 0;
}
/// getColumnNumber - Get column number for the location. If location is
@@ -190,7 +203,7 @@ unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
assert (CurLoc.isValid() && "Invalid current location!");
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
- return PLoc.getColumn();
+ return PLoc.isValid()? PLoc.getColumn() : 0;
}
llvm::StringRef CGDebugInfo::getCurrentDirname() {
@@ -251,16 +264,17 @@ void CGDebugInfo::CreateCompileUnit() {
RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
// Create new compile unit.
- TheCU = DebugFactory.CreateCompileUnit(
+ DBuilder.CreateCompileUnit(
LangTag, Filename, getCurrentDirname(),
- Producer, true,
+ Producer,
LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers);
+ // FIXME - Eliminate TheCU.
+ TheCU = llvm::DICompileUnit(DBuilder.getCU());
}
/// CreateType - Get the Basic type from the cache or create a new
/// one if necessary.
-llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
unsigned Encoding = 0;
const char *BTName = NULL;
switch (BT->getKind()) {
@@ -268,10 +282,10 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
case BuiltinType::Void:
return llvm::DIType();
case BuiltinType::ObjCClass:
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- Unit, "objc_class", Unit, 0, 0, 0, 0,
- llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray());
+ return DBuilder.CreateStructType(TheCU, "objc_class",
+ getOrCreateMainFile(), 0, 0, 0,
+ llvm::DIDescriptor::FlagFwdDecl,
+ llvm::DIArray());
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
// typedef struct objc_object {
@@ -279,31 +293,31 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
// } *id;
llvm::DIType OCTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- Unit, "objc_class", Unit, 0, 0, 0, 0,
- llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray());
+ DBuilder.CreateStructType(TheCU, "objc_class",
+ getOrCreateMainFile(), 0, 0, 0,
+ llvm::DIDescriptor::FlagFwdDecl,
+ llvm::DIArray());
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- llvm::DIType ISATy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
- Unit, "", Unit,
- 0, Size, 0, 0, 0, OCTy);
-
- llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+ llvm::DIType ISATy = DBuilder.CreatePointerType(OCTy, Size);
+ llvm::SmallVector<llvm::Value *, 16> EltTys;
llvm::DIType FieldTy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "isa", Unit,
- 0,Size, 0, 0, 0, ISATy);
+ DBuilder.CreateMemberType("isa", getOrCreateMainFile(),
+ 0,Size, 0, 0, 0, ISATy);
EltTys.push_back(FieldTy);
llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- Unit, "objc_object", Unit, 0, 0, 0, 0,
- 0,
- llvm::DIType(), Elements);
+ return DBuilder.CreateStructType(TheCU, "objc_object",
+ getOrCreateMainFile(),
+ 0, 0, 0, 0, Elements);
+ }
+ case BuiltinType::ObjCSel: {
+ return DBuilder.CreateStructType(TheCU, "objc_selector",
+ getOrCreateMainFile(), 0, 0, 0,
+ llvm::DIDescriptor::FlagFwdDecl,
+ llvm::DIArray());
}
case BuiltinType::UChar:
case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
@@ -335,17 +349,12 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
uint64_t Align = CGM.getContext().getTypeAlign(BT);
- uint64_t Offset = 0;
-
llvm::DIType DbgTy =
- DebugFactory.CreateBasicType(Unit, BTName,
- Unit, 0, Size, Align,
- Offset, /*flags*/ 0, Encoding);
+ DBuilder.CreateBasicType(BTName, Size, Align, Encoding);
return DbgTy;
}
-llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty) {
// Bit size, align and offset of the type.
unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
if (Ty->isComplexIntegerType())
@@ -353,12 +362,9 @@ llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- uint64_t Offset = 0;
-
llvm::DIType DbgTy =
- DebugFactory.CreateBasicType(Unit, "complex",
- Unit, 0, Size, Align,
- Offset, /*flags*/ 0, Encoding);
+ DBuilder.CreateBasicType("complex", Size, Align, Encoding);
+
return DbgTy;
}
@@ -389,13 +395,12 @@ llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) {
return getOrCreateType(QualType(T, 0), Unit);
}
- llvm::DIType FromTy = getOrCreateType(Qc.apply(T), Unit);
+ llvm::DIType FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit);
// No need to fill in the Name, Line, Size, Alignment, Offset in case of
// CVR derived types.
- llvm::DIType DbgTy =
- DebugFactory.CreateDerivedType(Tag, Unit, "", Unit,
- 0, 0, 0, 0, 0, FromTy);
+ llvm::DIType DbgTy = DBuilder.CreateQualifiedType(Tag, FromTy);
+
return DbgTy;
}
@@ -413,24 +418,56 @@ llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
Ty->getPointeeType(), Unit);
}
+/// CreatePointeeType - Create PointTee type. If Pointee is a record
+/// then emit record's fwd if debug info size reduction is enabled.
+llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
+ llvm::DIFile Unit) {
+ if (!CGM.getCodeGenOpts().LimitDebugInfo)
+ return getOrCreateType(PointeeTy, Unit);
+
+ if (const RecordType *RTy = dyn_cast<RecordType>(PointeeTy)) {
+ RecordDecl *RD = RTy->getDecl();
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ llvm::DIDescriptor FDContext =
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()));
+
+ if (RD->isStruct())
+ return DBuilder.CreateStructType(FDContext, RD->getName(), DefUnit,
+ Line, 0, 0, llvm::DIType::FlagFwdDecl,
+ llvm::DIArray());
+ else if (RD->isUnion())
+ return DBuilder.CreateUnionType(FDContext, RD->getName(), DefUnit,
+ Line, 0, 0, llvm::DIType::FlagFwdDecl,
+ llvm::DIArray());
+ else {
+ assert(RD->isClass() && "Unknown RecordType!");
+ return DBuilder.CreateClassType(FDContext, RD->getName(), DefUnit,
+ Line, 0, 0, 0, llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray());
+ }
+ }
+ return getOrCreateType(PointeeTy, Unit);
+
+}
+
llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
const Type *Ty,
QualType PointeeTy,
llvm::DIFile Unit) {
- llvm::DIType EltTy = getOrCreateType(PointeeTy, Unit);
+ if (Tag == llvm::dwarf::DW_TAG_reference_type)
+ return DBuilder.CreateReferenceType(CreatePointeeType(PointeeTy, Unit));
+
// Bit size, align and offset of the type.
-
// Size is always the size of a pointer. We can't use getTypeSize here
// because that does not return the correct value for references.
uint64_t Size =
CGM.getContext().Target.getPointerWidth(PointeeTy.getAddressSpace());
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- return
- DebugFactory.CreateDerivedType(Tag, Unit, "", Unit,
- 0, Size, Align, 0, 0, EltTy);
-
+ return
+ DBuilder.CreatePointerType(CreatePointeeType(PointeeTy, Unit), Size, Align);
}
llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
@@ -438,16 +475,11 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
if (BlockLiteralGenericSet)
return BlockLiteralGeneric;
- unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
-
- llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
-
+ llvm::SmallVector<llvm::Value *, 8> EltTys;
llvm::DIType FieldTy;
-
QualType FType;
uint64_t FieldSize, FieldOffset;
unsigned FieldAlign;
-
llvm::DIArray Elements;
llvm::DIType EltTy, DescTy;
@@ -456,23 +488,20 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
- Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ Elements = DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
EltTys.clear();
- unsigned Flags = llvm::DIType::FlagAppleBlock;
+ unsigned Flags = llvm::DIDescriptor::FlagAppleBlock;
unsigned LineNo = getLineNumber(CurLoc);
- EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
- Unit, LineNo, FieldOffset, 0, 0,
- Flags, llvm::DIType(), Elements);
+ EltTy = DBuilder.CreateStructType(Unit, "__block_descriptor",
+ Unit, LineNo, FieldOffset, 0,
+ Flags, Elements);
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
- Unit, "", Unit,
- LineNo, Size, Align, 0, 0, EltTy);
+ DescTy = DBuilder.CreatePointerType(EltTy, Size);
FieldOffset = 0;
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
@@ -487,24 +516,20 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
FieldTy = DescTy;
FieldSize = CGM.getContext().getTypeSize(Ty);
FieldAlign = CGM.getContext().getTypeAlign(Ty);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__descriptor", Unit,
- LineNo, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
+ FieldTy = DBuilder.CreateMemberType("__descriptor", Unit,
+ LineNo, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
- Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ Elements = DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
- EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
- Unit, LineNo, FieldOffset, 0, 0,
- Flags, llvm::DIType(), Elements);
+ EltTy = DBuilder.CreateStructType(Unit, "__block_literal_generic",
+ Unit, LineNo, FieldOffset, 0,
+ Flags, Elements);
BlockLiteralGenericSet = true;
- BlockLiteralGeneric
- = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
- "", Unit,
- LineNo, Size, Align, 0, 0, EltTy);
+ BlockLiteralGeneric = DBuilder.CreatePointerType(EltTy, Size);
return BlockLiteralGeneric;
}
@@ -513,46 +538,36 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
// Typedefs are derived from some other type. If we have a typedef of a
// typedef, make sure to emit the whole chain.
llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
-
+ if (!Src.Verify())
+ return llvm::DIType();
// We don't set size information, but do specify where the typedef was
// declared.
unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
-
- llvm::DIDescriptor TyContext
- = getContextDescriptor(dyn_cast<Decl>(Ty->getDecl()->getDeclContext()),
- Unit);
- llvm::DIType DbgTy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_typedef,
- TyContext,
- Ty->getDecl()->getName(), Unit,
- Line, 0, 0, 0, 0, Src);
+ llvm::DIType DbgTy = DBuilder.CreateTypedef(Src, Ty->getDecl()->getName(),
+ Unit, Line);
return DbgTy;
}
llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
llvm::DIFile Unit) {
- llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+ llvm::SmallVector<llvm::Value *, 16> EltTys;
// Add the result type at least.
EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
// Set up remainder of arguments if there is a prototype.
// FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
- if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
+ if (isa<FunctionNoProtoType>(Ty))
+ EltTys.push_back(DBuilder.CreateUnspecifiedParameter());
+ else if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
- } else {
- // FIXME: Handle () case in C. llvm-gcc doesn't do it either.
}
llvm::DIArray EltTypeArray =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
- llvm::DIType DbgTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
- Unit, "", Unit,
- 0, 0, 0, 0, 0,
- llvm::DIType(), EltTypeArray);
+ llvm::DIType DbgTy = DBuilder.CreateSubroutineType(Unit, EltTypeArray);
return DbgTy;
}
@@ -560,7 +575,7 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
/// record fields. This is used while creating debug info entry for a Record.
void CGDebugInfo::
CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys) {
unsigned FieldNo = 0;
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (RecordDecl::field_iterator I = RD->field_begin(),
@@ -595,17 +610,13 @@ CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
unsigned Flags = 0;
AccessSpecifier Access = I->getAccess();
if (Access == clang::AS_private)
- Flags |= llvm::DIType::FlagPrivate;
+ Flags |= llvm::DIDescriptor::FlagPrivate;
else if (Access == clang::AS_protected)
- Flags |= llvm::DIType::FlagProtected;
-
- // Create a DW_TAG_member node to remember the offset of this field in the
- // struct. FIXME: This is an absolutely insane way to capture this
- // information. When we gut debug info, this should be fixed.
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- FieldName, FieldDefUnit,
- FieldLine, FieldSize, FieldAlign,
- FieldOffset, Flags, FieldTy);
+ Flags |= llvm::DIDescriptor::FlagProtected;
+
+ FieldTy = DBuilder.CreateMemberType(FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
EltTys.push_back(FieldTy);
}
}
@@ -621,19 +632,12 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
0),
Unit);
- unsigned BFlags=0;
- AccessSpecifier Access = Method->getAccess();
- if (Access == clang::AS_private)
- BFlags |= llvm::DIType::FlagPrivate;
- else if (Access == clang::AS_protected)
- BFlags |= llvm::DIType::FlagProtected;
-
// Add "this" pointer.
llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
assert (Args.getNumElements() && "Invalid number of arguments!");
- llvm::SmallVector<llvm::DIDescriptor, 16> Elts;
+ llvm::SmallVector<llvm::Value *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(Args.getElement(0));
@@ -645,7 +649,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
QualType ThisPtr =
Context.getPointerType(Context.getTagDeclType(Method->getParent()));
llvm::DIType ThisPtrType =
- DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
+ DBuilder.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
Elts.push_back(ThisPtrType);
@@ -656,15 +660,22 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
Elts.push_back(Args.getElement(i));
llvm::DIArray EltTypeArray =
- DebugFactory.GetOrCreateArray(Elts.data(), Elts.size());
+ DBuilder.GetOrCreateArray(Elts.data(), Elts.size());
- return
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
- Unit, "", Unit,
- 0, 0, 0, 0, 0,
- llvm::DIType(), EltTypeArray);
+ return DBuilder.CreateSubroutineType(Unit, EltTypeArray);
}
+/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
+/// inside a function.
+static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
+ if (const CXXRecordDecl *NRD =
+ dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
+ return isFunctionLocalClass(NRD);
+ else if (isa<FunctionDecl>(RD->getDeclContext()))
+ return true;
+ return false;
+
+}
/// CreateCXXMemberFunction - A helper function to create a DISubprogram for
/// a single member function GlobalDecl.
llvm::DISubprogram
@@ -680,7 +691,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
llvm::StringRef MethodLinkageName;
- if (!IsCtorOrDtor)
+ if (!IsCtorOrDtor && !isFunctionLocalClass(Method->getParent()))
MethodLinkageName = CGM.getMangledName(Method);
// Get the location for the method.
@@ -705,15 +716,32 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
ContainingType = RecordTy;
}
+ unsigned Flags = 0;
+ if (Method->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
+ AccessSpecifier Access = Method->getAccess();
+ if (Access == clang::AS_private)
+ Flags |= llvm::DIDescriptor::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ Flags |= llvm::DIDescriptor::FlagProtected;
+ if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DIDescriptor::FlagExplicit;
+ } else if (const CXXConversionDecl *CXXC =
+ dyn_cast<CXXConversionDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DIDescriptor::FlagExplicit;
+ }
+ if (Method->hasPrototype())
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+
llvm::DISubprogram SP =
- DebugFactory.CreateSubprogram(RecordTy , MethodName, MethodName,
- MethodLinkageName,
- MethodDefUnit, MethodLine,
- MethodTy, /*isLocalToUnit=*/false,
- /* isDefintion=*/ false,
- Virtuality, VIndex, ContainingType,
- Method->isImplicit(),
- CGM.getLangOptions().Optimize);
+ DBuilder.CreateMethod(RecordTy , MethodName, MethodLinkageName,
+ MethodDefUnit, MethodLine,
+ MethodTy, /*isLocalToUnit=*/false,
+ /* isDefinition=*/ false,
+ Virtuality, VIndex, ContainingType,
+ Flags, CGM.getLangOptions().Optimize);
// Don't cache ctors or dtors since we have to emit multiple functions for
// a single ctor or dtor.
@@ -728,7 +756,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
/// a Record.
void CGDebugInfo::
CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
for(CXXRecordDecl::method_iterator I = RD->method_begin(),
E = RD->method_end(); I != E; ++I) {
@@ -746,26 +774,15 @@ CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// a Record.
void CGDebugInfo::
CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
BE = RD->friend_end(); BI != BE; ++BI) {
-
- TypeSourceInfo *TInfo = (*BI)->getFriendType();
- if(TInfo)
- {
- llvm::DIType Ty = getOrCreateType(TInfo->getType(), Unit);
-
- llvm::DIType DTy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_friend,
- RecordTy, llvm::StringRef(),
- Unit, 0, 0, 0,
- 0, 0, Ty);
-
- EltTys.push_back(DTy);
- }
-
+ if (TypeSourceInfo *TInfo = (*BI)->getFriendType())
+ EltTys.push_back(DBuilder.CreateFriend(RecordTy,
+ getOrCreateType(TInfo->getType(),
+ Unit)));
}
}
@@ -774,7 +791,7 @@ CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// a Record.
void CGDebugInfo::
CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
@@ -790,23 +807,20 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
// virtual base offset offset is -ve. The code generator emits dwarf
// expression where it expects +ve number.
BaseOffset = 0 - CGM.getVTables().getVirtualBaseOffsetOffset(RD, Base);
- BFlags = llvm::DIType::FlagVirtual;
+ BFlags = llvm::DIDescriptor::FlagVirtual;
} else
- BaseOffset = RL.getBaseClassOffset(Base);
+ BaseOffset = RL.getBaseClassOffsetInBits(Base);
AccessSpecifier Access = BI->getAccessSpecifier();
if (Access == clang::AS_private)
- BFlags |= llvm::DIType::FlagPrivate;
+ BFlags |= llvm::DIDescriptor::FlagPrivate;
else if (Access == clang::AS_protected)
- BFlags |= llvm::DIType::FlagProtected;
+ BFlags |= llvm::DIDescriptor::FlagProtected;
- llvm::DIType DTy =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
- RecordTy, llvm::StringRef(),
- Unit, 0, 0, 0,
- BaseOffset, BFlags,
- getOrCreateType(BI->getType(),
- Unit));
+ llvm::DIType DTy =
+ DBuilder.CreateInheritance(RecordTy,
+ getOrCreateType(BI->getType(), Unit),
+ BaseOffset, BFlags);
EltTys.push_back(DTy);
}
}
@@ -819,23 +833,13 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
ASTContext &Context = CGM.getContext();
/* Function type */
- llvm::DIDescriptor STy = getOrCreateType(Context.IntTy, Unit);
- llvm::DIArray SElements = DebugFactory.GetOrCreateArray(&STy, 1);
- llvm::DIType SubTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
- Unit, "", Unit,
- 0, 0, 0, 0, 0, llvm::DIType(), SElements);
-
+ llvm::Value *STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DIArray SElements = DBuilder.GetOrCreateArray(&STy, 1);
+ llvm::DIType SubTy = DBuilder.CreateSubroutineType(Unit, SElements);
unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
- llvm::DIType vtbl_ptr_type
- = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
- Unit, "__vtbl_ptr_type", Unit,
- 0, Size, 0, 0, 0, SubTy);
-
- VTablePtrType =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
- Unit, "", Unit,
- 0, Size, 0, 0, 0, vtbl_ptr_type);
+ llvm::DIType vtbl_ptr_type = DBuilder.CreatePointerType(SubTy, Size, 0,
+ "__vtbl_ptr_type");
+ VTablePtrType = DBuilder.CreatePointerType(vtbl_ptr_type, Size);
return VTablePtrType;
}
@@ -855,7 +859,7 @@ llvm::StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
/// debug info entry in EltTys vector.
void CGDebugInfo::
CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
// If there is a primary base then it will hold vtable info.
@@ -868,27 +872,24 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType VPTR
- = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- getVTableName(RD), Unit,
- 0, Size, 0, 0, 0,
- getOrCreateVTablePtrType(Unit));
+ = DBuilder.CreateMemberType(getVTableName(RD), Unit,
+ 0, Size, 0, 0, 0,
+ getOrCreateVTablePtrType(Unit));
EltTys.push_back(VPTR);
}
+/// getOrCreateRecordType - Emit record type's standalone debug info.
+llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
+ SourceLocation Loc) {
+ llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
+ DBuilder.RetainType(T);
+ return T;
+}
+
/// CreateType - get structure or union type.
-llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
-
- unsigned Tag;
- if (RD->isStruct())
- Tag = llvm::dwarf::DW_TAG_structure_type;
- else if (RD->isUnion())
- Tag = llvm::dwarf::DW_TAG_union_type;
- else {
- assert(RD->isClass() && "Unknown RecordType!");
- Tag = llvm::dwarf::DW_TAG_class_type;
- }
+ llvm::DIFile Unit = getOrCreateFile(RD->getLocation());
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
@@ -901,21 +902,21 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
llvm::DIDescriptor FDContext =
- getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()));
// If this is just a forward declaration, construct an appropriately
// marked node and just return it.
if (!RD->getDefinition()) {
- llvm::DICompositeType FwdDecl =
- DebugFactory.CreateCompositeType(Tag, FDContext, RD->getName(),
- DefUnit, Line, 0, 0, 0,
- llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray());
+ llvm::DIType FwdDecl =
+ DBuilder.CreateStructType(FDContext, RD->getName(),
+ DefUnit, Line, 0, 0,
+ llvm::DIDescriptor::FlagFwdDecl,
+ llvm::DIArray());
return FwdDecl;
}
- llvm::DIType FwdDecl = DebugFactory.CreateTemporaryType();
+ llvm::DIType FwdDecl = DBuilder.CreateTemporaryType(DefUnit);
llvm::MDNode *MN = FwdDecl;
llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
@@ -927,7 +928,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
// Convert all the elements.
- llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+ llvm::SmallVector<llvm::Value *, 16> EltTys;
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
@@ -951,36 +952,41 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
// Do not use DIGlobalVariable for enums.
if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
- DebugFactory.CreateGlobalVariable(FwdDecl, VName, VName, VName, VUnit,
- getLineNumber(V->getLocation()),
- VTy, true, true, CI);
+ DBuilder.CreateStaticVariable(FwdDecl, VName, VName, VUnit,
+ getLineNumber(V->getLocation()),
+ VTy, true, CI);
}
}
}
}
CollectRecordFields(RD, Unit, EltTys);
- llvm::MDNode *ContainingType = NULL;
+ llvm::SmallVector<llvm::Value *, 16> TemplateParams;
if (CXXDecl) {
CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
CollectCXXFriends(CXXDecl, Unit, EltTys, FwdDecl);
-
- // A class's primary base or the class itself contains the vtable.
- const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- if (const CXXRecordDecl *PBase = RL.getPrimaryBase())
- ContainingType =
- getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit);
- else if (CXXDecl->isDynamicClass())
- ContainingType = FwdDecl;
+ if (ClassTemplateSpecializationDecl *TSpecial
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ const TemplateArgumentList &TAL = TSpecial->getTemplateArgs();
+ for (unsigned i = 0, e = TAL.size(); i != e; ++i) {
+ const TemplateArgument &TA = TAL[i];
+ if (TA.getKind() == TemplateArgument::Type) {
+ llvm::DIType TTy = getOrCreateType(TA.getAsType(), Unit);
+ llvm::DITemplateTypeParameter TTP =
+ DBuilder.CreateTemplateTypeParameter(TheCU, TTy.getName(), TTy);
+ TemplateParams.push_back(TTP);
+ } else if (TA.getKind() == TemplateArgument::Integral) {
+ llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit);
+ // FIXME: Get parameter name, instead of parameter type name.
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.CreateTemplateValueParameter(TheCU, TTy.getName(), TTy,
+ TA.getAsIntegral()->getZExtValue());
+ TemplateParams.push_back(TVP);
+ }
+ }
+ }
}
- llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
-
- // Bit size, align and offset of the type.
- uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
-
RegionStack.pop_back();
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
RegionMap.find(Ty->getDecl());
@@ -988,25 +994,54 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
RegionMap.erase(RI);
llvm::DIDescriptor RDContext =
- getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
-
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()));
llvm::StringRef RDName = RD->getName();
- // If this is a class, include the template arguments also.
- if (Tag == llvm::dwarf::DW_TAG_class_type)
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ llvm::DIArray Elements =
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
+ llvm::MDNode *RealDecl = NULL;
+
+ if (RD->isStruct())
+ RealDecl = DBuilder.CreateStructType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, Elements);
+ else if (RD->isUnion())
+ RealDecl = DBuilder.CreateUnionType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, Elements);
+ else {
+ assert(RD->isClass() && "Unknown RecordType!");
RDName = getClassName(RD);
-
- llvm::DICompositeType RealDecl =
- DebugFactory.CreateCompositeType(Tag, RDContext,
- RDName,
- DefUnit, Line, Size, Align, 0, 0,
- llvm::DIType(), Elements,
- 0, ContainingType);
+ // A class's primary base or the class itself contains the vtable.
+ llvm::MDNode *ContainingType = NULL;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
+ // Seek non virtual primary base root.
+ while (1) {
+ const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
+ const CXXRecordDecl *PBT = BRL.getPrimaryBase();
+ if (PBT && !BRL.isPrimaryBaseVirtual())
+ PBase = PBT;
+ else
+ break;
+ }
+ ContainingType =
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit);
+ }
+ else if (CXXDecl->isDynamicClass())
+ ContainingType = FwdDecl;
+ llvm::DIArray TParamsArray =
+ DBuilder.GetOrCreateArray(TemplateParams.data(), TemplateParams.size());
+ RealDecl = DBuilder.CreateClassType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, 0, llvm::DIType(),
+ Elements, ContainingType,
+ TParamsArray);
+ }
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
RegionMap[RD] = llvm::WeakVH(RealDecl);
- return RealDecl;
+ return llvm::DIType(RealDecl);
}
/// CreateType - get objective-c object type.
@@ -1020,7 +1055,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DIFile Unit) {
ObjCInterfaceDecl *ID = Ty->getDecl();
- unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+ if (!ID)
+ return llvm::DIType();
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation());
@@ -1030,11 +1066,10 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// If this is just a forward declaration, return a special forward-declaration
// debug type.
if (ID->isForwardDecl()) {
- llvm::DICompositeType FwdDecl =
- DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(),
- DefUnit, Line, 0, 0, 0, 0,
- llvm::DIType(), llvm::DIArray(),
- RuntimeLang);
+ llvm::DIType FwdDecl =
+ DBuilder.CreateStructType(Unit, ID->getName(),
+ DefUnit, Line, 0, 0, 0,
+ llvm::DIArray(), RuntimeLang);
return FwdDecl;
}
@@ -1044,7 +1079,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DIType FwdDecl = DebugFactory.CreateTemporaryType();
+ llvm::DIType FwdDecl = DBuilder.CreateTemporaryType(DefUnit);
llvm::MDNode *MN = FwdDecl;
llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
@@ -1056,27 +1091,29 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
// Convert all the elements.
- llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+ llvm::SmallVector<llvm::Value *, 16> EltTys;
ObjCInterfaceDecl *SClass = ID->getSuperClass();
if (SClass) {
llvm::DIType SClassTy =
getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+ if (!SClassTy.isValid())
+ return llvm::DIType();
+
llvm::DIType InhTag =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
- Unit, "", Unit, 0, 0, 0,
- 0 /* offset */, 0, SClassTy);
+ DBuilder.CreateInheritance(FwdDecl, SClassTy, 0, 0);
EltTys.push_back(InhTag);
}
const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
unsigned FieldNo = 0;
- for (ObjCInterfaceDecl::ivar_iterator I = ID->ivar_begin(),
- E = ID->ivar_end(); I != E; ++I, ++FieldNo) {
- ObjCIvarDecl *Field = *I;
+ for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field;
+ Field = Field->getNextIvar(), ++FieldNo) {
llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
-
+ if (!FieldTy.isValid())
+ return llvm::DIType();
+
llvm::StringRef FieldName = Field->getName();
// Ignore unnamed fields.
@@ -1105,22 +1142,18 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
unsigned Flags = 0;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
- Flags = llvm::DIType::FlagProtected;
+ Flags = llvm::DIDescriptor::FlagProtected;
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
- Flags = llvm::DIType::FlagPrivate;
-
- // Create a DW_TAG_member node to remember the offset of this field in the
- // struct. FIXME: This is an absolutely insane way to capture this
- // information. When we gut debug info, this should be fixed.
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- FieldName, FieldDefUnit,
- FieldLine, FieldSize, FieldAlign,
- FieldOffset, Flags, FieldTy);
+ Flags = llvm::DIDescriptor::FlagPrivate;
+
+ FieldTy = DBuilder.CreateMemberType(FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
EltTys.push_back(FieldTy);
}
llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
RegionStack.pop_back();
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
@@ -1132,10 +1165,10 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- llvm::DICompositeType RealDecl =
- DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(), DefUnit,
- Line, Size, Align, 0, 0, llvm::DIType(),
- Elements, RuntimeLang);
+ llvm::DIType RealDecl =
+ DBuilder.CreateStructType(Unit, ID->getName(), DefUnit,
+ Line, Size, Align, 0,
+ Elements, RuntimeLang);
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
@@ -1145,18 +1178,11 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
return RealDecl;
}
-llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
- llvm::DIFile Unit) {
- return CreateEnumType(Ty->getDecl(), Unit);
-
-}
-
-llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const TagType *Ty) {
if (const RecordType *RT = dyn_cast<RecordType>(Ty))
- return CreateType(RT, Unit);
+ return CreateType(RT);
else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
- return CreateType(ET, Unit);
+ return CreateEnumType(ET->getDecl());
return llvm::DIType();
}
@@ -1168,17 +1194,14 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
if (NumElems > 0)
--NumElems;
- llvm::DIDescriptor Subscript = DebugFactory.GetOrCreateSubrange(0, NumElems);
- llvm::DIArray SubscriptArray = DebugFactory.GetOrCreateArray(&Subscript, 1);
+ llvm::Value *Subscript = DBuilder.GetOrCreateSubrange(0, NumElems);
+ llvm::DIArray SubscriptArray = DBuilder.GetOrCreateArray(&Subscript, 1);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
return
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_vector_type,
- Unit, "", Unit,
- 0, Size, Align, 0, 0,
- ElementTy, SubscriptArray);
+ DBuilder.CreateVectorType(Size, Align, ElementTy, SubscriptArray);
}
llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
@@ -1204,27 +1227,28 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
// interior arrays, do we care? Why aren't nested arrays represented the
// obvious/recursive way?
- llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
+ llvm::SmallVector<llvm::Value *, 8> Subscripts;
QualType EltTy(Ty, 0);
- while ((Ty = dyn_cast<ArrayType>(EltTy))) {
- uint64_t Upper = 0;
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
- if (CAT->getSize().getZExtValue())
- Upper = CAT->getSize().getZExtValue() - 1;
- // FIXME: Verify this is right for VLAs.
- Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper));
+ if (Ty->isIncompleteArrayType())
EltTy = Ty->getElementType();
+ else {
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ uint64_t Upper = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ if (CAT->getSize().getZExtValue())
+ Upper = CAT->getSize().getZExtValue() - 1;
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DBuilder.GetOrCreateSubrange(0, Upper));
+ EltTy = Ty->getElementType();
+ }
}
llvm::DIArray SubscriptArray =
- DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+ DBuilder.GetOrCreateArray(Subscripts.data(), Subscripts.size());
llvm::DIType DbgTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type,
- Unit, "", Unit,
- 0, Size, Align, 0, 0,
- getOrCreateType(EltTy, Unit),
- SubscriptArray);
+ DBuilder.CreateArrayType(Size, Align, getOrCreateType(EltTy, Unit),
+ SubscriptArray);
return DbgTy;
}
@@ -1234,6 +1258,12 @@ llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
Ty, Ty->getPointeeType(), Unit);
}
+llvm::DIType CGDebugInfo::CreateType(const RValueReferenceType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type,
+ Ty, Ty->getPointeeType(), Unit);
+}
+
llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIFile U) {
QualType PointerDiffTy = CGM.getContext().getPointerDiffType();
@@ -1249,47 +1279,46 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
std::pair<uint64_t, unsigned> Info = CGM.getContext().getTypeInfo(Ty);
uint64_t FieldOffset = 0;
- llvm::DIDescriptor ElementTypes[2];
+ llvm::Value *ElementTypes[2];
// FIXME: This should probably be a function type instead.
ElementTypes[0] =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
- "ptr", U, 0,
- Info.first, Info.second, FieldOffset, 0,
- PointerDiffDITy);
+ DBuilder.CreateMemberType("ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
FieldOffset += Info.first;
ElementTypes[1] =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
- "ptr", U, 0,
- Info.first, Info.second, FieldOffset, 0,
- PointerDiffDITy);
+ DBuilder.CreateMemberType("ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(&ElementTypes[0],
- llvm::array_lengthof(ElementTypes));
+ DBuilder.GetOrCreateArray(&ElementTypes[0],
+ llvm::array_lengthof(ElementTypes));
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- U, llvm::StringRef("test"),
- U, 0, FieldOffset,
- 0, 0, 0, llvm::DIType(), Elements);
+ return DBuilder.CreateStructType(U, llvm::StringRef("test"),
+ U, 0, FieldOffset,
+ 0, 0, Elements);
}
/// CreateEnumType - get enumeration type.
-llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit){
- llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
+llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
+ llvm::DIFile Unit = getOrCreateFile(ED->getLocation());
+ llvm::SmallVector<llvm::Value *, 16> Enumerators;
// Create DIEnumerator elements for each enumerator.
for (EnumDecl::enumerator_iterator
Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
Enum != EnumEnd; ++Enum) {
- Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getName(),
- Enum->getInitVal().getZExtValue()));
+ Enumerators.push_back(
+ DBuilder.CreateEnumerator(Enum->getName(),
+ Enum->getInitVal().getZExtValue()));
}
// Return a CompositeType for the enum itself.
llvm::DIArray EltArray =
- DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
+ DBuilder.GetOrCreateArray(Enumerators.data(), Enumerators.size());
llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
@@ -1299,11 +1328,11 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit){
Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
}
+ llvm::DIDescriptor EnumContext =
+ getContextDescriptor(dyn_cast<Decl>(ED->getDeclContext()));
llvm::DIType DbgTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
- Unit, ED->getName(), DefUnit, Line,
- Size, Align, 0, 0,
- llvm::DIType(), EltArray);
+ DBuilder.CreateEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
+ Size, Align, EltArray);
return DbgTy;
}
@@ -1316,20 +1345,23 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
case Type::TemplateSpecialization:
T = cast<TemplateSpecializationType>(T)->desugar();
break;
- case Type::TypeOfExpr: {
- TypeOfExprType *Ty = cast<TypeOfExprType>(T);
- T = Ty->getUnderlyingExpr()->getType();
+ case Type::TypeOfExpr:
+ T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
break;
- }
case Type::TypeOf:
T = cast<TypeOfType>(T)->getUnderlyingType();
break;
case Type::Decltype:
T = cast<DecltypeType>(T)->getUnderlyingType();
break;
+ case Type::Attributed:
+ T = cast<AttributedType>(T)->getEquivalentType();
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
+ case Type::Paren:
+ T = cast<ParenType>(T)->getInnerType();
+ break;
case Type::SubstTemplateTypeParm:
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
@@ -1400,15 +1432,15 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
return CreateType(cast<ObjCObjectType>(Ty), Unit);
case Type::ObjCInterface:
return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
- case Type::Builtin: return CreateType(cast<BuiltinType>(Ty), Unit);
- case Type::Complex: return CreateType(cast<ComplexType>(Ty), Unit);
+ case Type::Builtin: return CreateType(cast<BuiltinType>(Ty));
+ case Type::Complex: return CreateType(cast<ComplexType>(Ty));
case Type::Pointer: return CreateType(cast<PointerType>(Ty), Unit);
case Type::BlockPointer:
return CreateType(cast<BlockPointerType>(Ty), Unit);
case Type::Typedef: return CreateType(cast<TypedefType>(Ty), Unit);
case Type::Record:
case Type::Enum:
- return CreateType(cast<TagType>(Ty), Unit);
+ return CreateType(cast<TagType>(Ty));
case Type::FunctionProto:
case Type::FunctionNoProto:
return CreateType(cast<FunctionType>(Ty), Unit);
@@ -1419,29 +1451,29 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
case Type::LValueReference:
return CreateType(cast<LValueReferenceType>(Ty), Unit);
+ case Type::RValueReference:
+ return CreateType(cast<RValueReferenceType>(Ty), Unit);
case Type::MemberPointer:
return CreateType(cast<MemberPointerType>(Ty), Unit);
+ case Type::Attributed:
case Type::TemplateSpecialization:
case Type::Elaborated:
+ case Type::Paren:
case Type::SubstTemplateTypeParm:
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
+ case Type::Auto:
llvm_unreachable("type should have been unwrapped!");
- return llvm::DIType();
-
- case Type::RValueReference:
- // FIXME: Implement!
- Diag = "rvalue references";
- break;
+ return llvm::DIType();
}
assert(Diag && "Fall through without a diagnostic?");
unsigned DiagID = CGM.getDiags().getCustomDiagID(Diagnostic::Error,
"debug information for %0 is not yet supported");
- CGM.getDiags().Report(FullSourceLoc(), DiagID)
+ CGM.getDiags().Report(DiagID)
<< Diag;
return llvm::DIType();
}
@@ -1453,10 +1485,9 @@ llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
- llvm::DIType Ty = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
- Unit, Name, Unit, 0,
- FieldSize, FieldAlign,
- *Offset, 0, FieldTy);
+ llvm::DIType Ty = DBuilder.CreateMemberType(Name, Unit, 0,
+ FieldSize, FieldAlign,
+ *Offset, 0, FieldTy);
*Offset += FieldSize;
return Ty;
}
@@ -1473,12 +1504,15 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
FnBeginRegionCount.push_back(RegionStack.size());
const Decl *D = GD.getDecl();
+ unsigned Flags = 0;
+ llvm::DIFile Unit = getOrCreateFile(CurLoc);
+ llvm::DIDescriptor FDContext(Unit);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
FI = SPCache.find(FD);
if (FI != SPCache.end()) {
- llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(FI->second));
+ llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(&*FI->second));
if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
llvm::MDNode *SPN = SP;
RegionStack.push_back(SPN);
@@ -1489,13 +1523,20 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
Name = getFunctionName(FD);
// Use mangled name as linkage name for c/c++ functions.
LinkageName = CGM.getMangledName(GD);
+ if (LinkageName == Name)
+ LinkageName = llvm::StringRef();
+ if (FD->hasPrototype())
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ if (const NamespaceDecl *NSDecl =
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ FDContext = getOrCreateNameSpace(NSDecl);
} else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
- LinkageName = Name;
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
} else {
- // Use llvm function name as linkage name.
+ // Use llvm function name.
Name = Fn->getName();
- LinkageName = Name;
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
}
if (!Name.empty() && Name[0] == '\01')
Name = Name.substr(1);
@@ -1503,16 +1544,14 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
// It is expected that CurLoc is set before using EmitFunctionStart.
// Usually, CurLoc points to the left bracket location of compound
// statement representing function body.
- llvm::DIFile Unit = getOrCreateFile(CurLoc);
unsigned LineNo = getLineNumber(CurLoc);
-
+ if (D->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
llvm::DISubprogram SP =
- DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
- getOrCreateType(FnType, Unit),
- Fn->hasInternalLinkage(), true/*definition*/,
- 0, 0, llvm::DIType(),
- D->isImplicit(),
- CGM.getLangOptions().Optimize, Fn);
+ DBuilder.CreateFunction(FDContext, Name, LinkageName, Unit,
+ LineNo, getOrCreateType(FnType, Unit),
+ Fn->hasInternalLinkage(), true/*definition*/,
+ Flags, CGM.getLangOptions().Optimize, Fn);
// Push function on region stack.
llvm::MDNode *SPN = SP;
@@ -1556,7 +1595,8 @@ void CGDebugInfo::UpdateLineDirectiveRegion(CGBuilderTy &Builder) {
PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
PresumedLoc PPLoc = SM.getPresumedLoc(PrevLoc);
- if (!strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
+ if (PCLoc.isInvalid() || PPLoc.isInvalid() ||
+ !strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
return;
// If #line directive stack is empty then we are entering a new scope.
@@ -1570,9 +1610,10 @@ void CGDebugInfo::UpdateLineDirectiveRegion(CGBuilderTy &Builder) {
&& "error handling #line regions!");
bool SeenThisFile = false;
+ // Chek if current file is already seen earlier.
for(std::vector<const char *>::iterator I = LineDirectiveFiles.begin(),
E = LineDirectiveFiles.end(); I != E; ++I)
- if (!strcmp(PPLoc.getFilename(), *I)) {
+ if (!strcmp(PCLoc.getFilename(), *I)) {
SeenThisFile = true;
break;
}
@@ -1599,12 +1640,12 @@ void CGDebugInfo::UpdateLineDirectiveRegion(CGBuilderTy &Builder) {
/// region - "llvm.dbg.region.start.".
void CGDebugInfo::EmitRegionStart(CGBuilderTy &Builder) {
llvm::DIDescriptor D =
- DebugFactory.CreateLexicalBlock(RegionStack.empty() ?
- llvm::DIDescriptor() :
- llvm::DIDescriptor(RegionStack.back()),
- getOrCreateFile(CurLoc),
- getLineNumber(CurLoc),
- getColumnNumber(CurLoc));
+ DBuilder.CreateLexicalBlock(RegionStack.empty() ?
+ llvm::DIDescriptor() :
+ llvm::DIDescriptor(RegionStack.back()),
+ getOrCreateFile(CurLoc),
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
llvm::MDNode *DN = D;
RegionStack.push_back(DN);
}
@@ -1637,8 +1678,7 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
uint64_t *XOffset) {
- llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
-
+ llvm::SmallVector<llvm::Value *, 5> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
unsigned FieldAlign;
@@ -1654,7 +1694,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
- bool HasCopyAndDispose = CGM.BlockRequiresCopying(Type);
+ bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type);
if (HasCopyAndDispose) {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
EltTys.push_back(CreateMemberType(Unit, FType, "__copy_helper",
@@ -1685,24 +1725,21 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
FieldAlign = Align.getQuantity()*8;
*XOffset = FieldOffset;
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- VD->getName(), Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
+ FieldTy = DBuilder.CreateMemberType(VD->getName(), Unit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ DBuilder.GetOrCreateArray(EltTys.data(), EltTys.size());
- unsigned Flags = llvm::DIType::FlagBlockByrefStruct;
-
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
- Unit, "", Unit,
- 0, FieldOffset, 0, 0, Flags,
- llvm::DIType(), Elements);
+ unsigned Flags = llvm::DIDescriptor::FlagBlockByrefStruct;
+ return DBuilder.CreateStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags,
+ Elements);
}
+
/// EmitDeclare - Emit local variable declaration debug info.
void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage, CGBuilderTy &Builder) {
@@ -1721,37 +1758,119 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
if (!Ty)
return;
+ if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage)) {
+ // If Storage is an aggregate returned as 'sret' then let debugger know
+ // about this.
+ if (Arg->hasStructRetAttr())
+ Ty = DBuilder.CreateReferenceType(Ty);
+ else if (CXXRecordDecl *Record = VD->getType()->getAsCXXRecordDecl()) {
+ // If an aggregate variable has non trivial destructor or non trivial copy
+ // constructor than it is pass indirectly. Let debug info know about this
+ // by using reference of the aggregate type as a argument type.
+ if (!Record->hasTrivialCopyConstructor() || !Record->hasTrivialDestructor())
+ Ty = DBuilder.CreateReferenceType(Ty);
+ }
+ }
+
// Get location information.
unsigned Line = getLineNumber(VD->getLocation());
unsigned Column = getColumnNumber(VD->getLocation());
-
- // Create the descriptor for the variable.
- llvm::DIVariable D =
- DebugFactory.CreateVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
- VD->getName(),
- Unit, Line, Ty, CGM.getLangOptions().Optimize);
- // Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
- DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
-
+ unsigned Flags = 0;
+ if (VD->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
llvm::MDNode *Scope = RegionStack.back();
- Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+
+ llvm::StringRef Name = VD->getName();
+ if (!Name.empty()) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ CharUnits offset = CharUnits::fromQuantity(32);
+ llvm::SmallVector<llvm::Value *, 9> addr;
+ const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of __forwarding field
+ offset =
+ CharUnits::fromQuantity(CGM.getContext().Target.getPointerWidth(0)/8);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of x field
+ offset = CharUnits::fromQuantity(XOffset/8);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.CreateComplexVariable(Tag,
+ llvm::DIDescriptor(RegionStack.back()),
+ VD->getName(), Unit, Line, Ty,
+ addr.data(), addr.size());
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.CreateLocalVariable(Tag, llvm::DIDescriptor(Scope),
+ Name, Unit, Line, Ty,
+ CGM.getLangOptions().Optimize, Flags);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+
+ // If VD is an anonymous union then Storage represents value for
+ // all union fields.
+ if (const RecordType *RT = dyn_cast<RecordType>(VD->getType()))
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(RT->getDecl()))
+ if (RD->isUnion()) {
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end();
+ I != E; ++I) {
+ FieldDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+ llvm::StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields. Do not ignore unnamed records.
+ if (FieldName.empty() && !isa<RecordType>(Field->getType()))
+ continue;
+
+ // Use VarDecl's Tag, Scope and Line number.
+ llvm::DIVariable D =
+ DBuilder.CreateLocalVariable(Tag, llvm::DIDescriptor(Scope),
+ FieldName, Unit, Line, FieldTy,
+ CGM.getLangOptions().Optimize, Flags);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ }
+ }
}
/// EmitDeclare - Emit local variable declaration debug info.
-void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
+void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage, CGBuilderTy &Builder,
- CodeGenFunction *CGF) {
- const ValueDecl *VD = BDRE->getDecl();
+ const CGBlockInfo &blockInfo) {
assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
if (Builder.GetInsertBlock() == 0)
return;
+ bool isByRef = VD->hasAttr<BlocksAttr>();
+
uint64_t XOffset = 0;
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
llvm::DIType Ty;
- if (VD->hasAttr<BlocksAttr>())
+ if (isByRef)
Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
else
Ty = getOrCreateType(VD->getType(), Unit);
@@ -1760,20 +1879,25 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
unsigned Line = getLineNumber(VD->getLocation());
unsigned Column = getColumnNumber(VD->getLocation());
- CharUnits offset = CGF->BlockDecls[VD];
+ const llvm::TargetData &target = CGM.getTargetData();
+
+ CharUnits offset = CharUnits::fromQuantity(
+ target.getStructLayout(blockInfo.StructureType)
+ ->getElementOffset(blockInfo.getCapture(VD).getIndex()));
+
llvm::SmallVector<llvm::Value *, 9> addr;
const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
- if (BDRE->isByRef()) {
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ if (isByRef) {
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
- offset = CharUnits::fromQuantity(CGF->LLVMPointerWidth/8);
+ offset = CharUnits::fromQuantity(target.getPointerSize()/8);
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of x field
offset = CharUnits::fromQuantity(XOffset/8);
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
@@ -1781,13 +1905,12 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
// Create the descriptor for the variable.
llvm::DIVariable D =
- DebugFactory.CreateComplexVariable(Tag,
- llvm::DIDescriptor(RegionStack.back()),
- VD->getName(), Unit, Line, Ty,
- addr);
+ DBuilder.CreateComplexVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
+ VD->getName(), Unit, Line, Ty,
+ addr.data(), addr.size());
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
- DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+ DBuilder.InsertDeclare(Storage, D, Builder.GetInsertBlock());
llvm::MDNode *Scope = RegionStack.back();
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
@@ -1800,9 +1923,10 @@ void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
}
void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
- const BlockDeclRefExpr *BDRE, llvm::Value *Storage, CGBuilderTy &Builder,
- CodeGenFunction *CGF) {
- EmitDeclare(BDRE, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder, CGF);
+ const VarDecl *variable, llvm::Value *Storage, CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo) {
+ EmitDeclare(variable, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder,
+ blockInfo);
}
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
@@ -1836,14 +1960,16 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
}
llvm::StringRef DeclName = D->getName();
llvm::StringRef LinkageName;
- if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext()))
+ if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext())
+ && !isa<ObjCMethodDecl>(D->getDeclContext()))
LinkageName = Var->getName();
+ if (LinkageName == DeclName)
+ LinkageName = llvm::StringRef();
llvm::DIDescriptor DContext =
- getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()), Unit);
- DebugFactory.CreateGlobalVariable(DContext, DeclName, DeclName, LinkageName,
- Unit, LineNo, getOrCreateType(T, Unit),
- Var->hasInternalLinkage(),
- true/*definition*/, Var);
+ getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()));
+ DBuilder.CreateStaticVariable(DContext, DeclName, LinkageName,
+ Unit, LineNo, getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var);
}
/// EmitGlobalVariable - Emit information about an objective-c interface.
@@ -1868,49 +1994,45 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ArrayType::Normal, 0);
}
- DebugFactory.CreateGlobalVariable(Unit, Name, Name, Name, Unit, LineNo,
- getOrCreateType(T, Unit),
- Var->hasInternalLinkage(),
- true/*definition*/, Var);
+ DBuilder.CreateGlobalVariable(Name, Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var);
}
/// EmitGlobalVariable - Emit global variable's debug info.
void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
- llvm::ConstantInt *Init,
- CGBuilderTy &Builder) {
+ llvm::Constant *Init) {
// Create the descriptor for the variable.
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
llvm::StringRef Name = VD->getName();
llvm::DIType Ty = getOrCreateType(VD->getType(), Unit);
if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
if (const EnumDecl *ED = dyn_cast<EnumDecl>(ECD->getDeclContext()))
- Ty = CreateEnumType(ED, Unit);
+ Ty = CreateEnumType(ED);
}
// Do not use DIGlobalVariable for enums.
if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
return;
- DebugFactory.CreateGlobalVariable(Unit, Name, Name, Name, Unit,
- getLineNumber(VD->getLocation()),
- Ty, true, true, Init);
+ DBuilder.CreateStaticVariable(Unit, Name, Name, Unit,
+ getLineNumber(VD->getLocation()),
+ Ty, true, Init);
}
/// getOrCreateNamesSpace - Return namespace descriptor for the given
/// namespace decl.
llvm::DINameSpace
-CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl,
- llvm::DIDescriptor Unit) {
+CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
NameSpaceCache.find(NSDecl);
if (I != NameSpaceCache.end())
return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
unsigned LineNo = getLineNumber(NSDecl->getLocation());
-
+ llvm::DIFile FileD = getOrCreateFile(NSDecl->getLocation());
llvm::DIDescriptor Context =
- getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()), Unit);
+ getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()));
llvm::DINameSpace NS =
- DebugFactory.CreateNameSpace(Context, NSDecl->getName(),
- llvm::DIFile(Unit), LineNo);
+ DBuilder.CreateNameSpace(Context, NSDecl->getName(), FileD, LineNo);
NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
return NS;
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
index a1ad012..6a9ab9c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
@@ -19,6 +19,7 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Analysis/DIBuilder.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/Allocator.h"
@@ -36,13 +37,14 @@ namespace CodeGen {
class CodeGenModule;
class CodeGenFunction;
class GlobalDecl;
+ class CGBlockInfo;
/// CGDebugInfo - This class gathers all debug information during compilation
/// and is responsible for emitting to llvm globals or pass directly to
/// the backend.
class CGDebugInfo {
CodeGenModule &CGM;
- llvm::DIFactory DebugFactory;
+ llvm::DIBuilder DBuilder;
llvm::DICompileUnit TheCU;
SourceLocation CurLoc, PrevLoc;
llvm::DIType VTablePtrType;
@@ -74,8 +76,8 @@ class CGDebugInfo {
llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
/// Helper functions for getOrCreateType.
- llvm::DIType CreateType(const BuiltinType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const ComplexType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const BuiltinType *Ty);
+ llvm::DIType CreateType(const ComplexType *Ty);
llvm::DIType CreateQualifiedType(QualType Ty, llvm::DIFile F);
llvm::DIType CreateType(const TypedefType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
@@ -83,22 +85,21 @@ class CGDebugInfo {
llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const TagType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const RecordType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const TagType *Ty);
+ llvm::DIType CreateType(const RecordType *Ty);
llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const EnumType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ArrayType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const RValueReferenceType *Ty, llvm::DIFile Unit);
llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
- llvm::DIType CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit);
+ llvm::DIType CreateEnumType(const EnumDecl *ED);
llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile F);
llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F);
- llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N,
- llvm::DIDescriptor Unit);
-
+ llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N);
+ llvm::DIType CreatePointeeType(QualType PointeeTy, llvm::DIFile F);
llvm::DIType CreatePointerLikeType(unsigned Tag,
const Type *Ty, QualType PointeeTy,
llvm::DIFile F);
@@ -109,26 +110,26 @@ class CGDebugInfo {
void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &E,
+ llvm::SmallVectorImpl<llvm::Value *> &E,
llvm::DIType T);
void CollectCXXFriends(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy);
void CollectCXXBases(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &E);
+ llvm::SmallVectorImpl<llvm::Value *> &E);
void CollectVTableInfo(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys);
+ llvm::SmallVectorImpl<llvm::Value *> &EltTys);
public:
CGDebugInfo(CodeGenModule &CGM);
@@ -169,10 +170,10 @@ public:
/// EmitDeclareOfBlockDeclRefVariable - Emit call to llvm.dbg.declare for an
/// imported variable declaration in a block.
- void EmitDeclareOfBlockDeclRefVariable(const BlockDeclRefExpr *BDRE,
- llvm::Value *AI,
+ void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable,
+ llvm::Value *storage,
CGBuilderTy &Builder,
- CodeGenFunction *CGF);
+ const CGBlockInfo &blockInfo);
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
/// variable declaration.
@@ -186,17 +187,19 @@ public:
void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
/// EmitGlobalVariable - Emit global variable's debug info.
- void EmitGlobalVariable(const ValueDecl *VD, llvm::ConstantInt *Init,
- CGBuilderTy &Builder);
+ void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init);
+ /// getOrCreateRecordType - Emit record type's standalone debug info.
+ llvm::DIType getOrCreateRecordType(QualType Ty, SourceLocation L);
private:
/// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
CGBuilderTy &Builder);
- /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
- void EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag, llvm::Value *AI,
- CGBuilderTy &Builder, CodeGenFunction *CGF);
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable
+ /// declaration from an enclosing block.
+ void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
+ CGBuilderTy &Builder, const CGBlockInfo &blockInfo);
// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
// See BuildByRefType.
@@ -204,8 +207,7 @@ private:
uint64_t *OffSet);
/// getContextDescriptor - Get context info for the decl.
- llvm::DIDescriptor getContextDescriptor(const Decl *Decl,
- llvm::DIDescriptor &CU);
+ llvm::DIDescriptor getContextDescriptor(const Decl *Decl);
/// getCurrentDirname - Return current directory name.
llvm::StringRef getCurrentDirname();
@@ -217,6 +219,9 @@ private:
/// location.
llvm::DIFile getOrCreateFile(SourceLocation Loc);
+ /// getOrCreateMainFile - Get the file info for main compile unit.
+ llvm::DIFile getOrCreateMainFile();
+
/// getOrCreateType - Get the type from the cache or create a new type if
/// necessary.
llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile F);
@@ -232,6 +237,7 @@ private:
/// name is constructred on demand (e.g. C++ destructor) then the name
/// is stored on the side.
llvm::StringRef getFunctionName(const FunctionDecl *FD);
+
/// getObjCMethodName - Returns the unmangled name of an Objective-C method.
/// This is the display name for the debugging info.
llvm::StringRef getObjCMethodName(const ObjCMethodDecl *FD);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
index 57e5236..a87dfae 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGBlocks.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
@@ -44,6 +45,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::CXXDestructor:
case Decl::CXXConversion:
case Decl::Field:
+ case Decl::IndirectField:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
case Decl::ParmVar:
@@ -68,7 +70,6 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Friend:
case Decl::FriendTemplate:
case Decl::Block:
-
assert(0 && "Declaration not should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
@@ -80,14 +81,15 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::UsingDirective: // using namespace X; [C++]
case Decl::NamespaceAlias:
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
+ case Decl::Label: // __label__ x;
// None of these decls require codegen support.
return;
case Decl::Var: {
const VarDecl &VD = cast<VarDecl>(D);
- assert(VD.isBlockVarDecl() &&
+ assert(VD.isLocalVarDecl() &&
"Should not see file-scope variables inside a function!");
- return EmitBlockVarDecl(VD);
+ return EmitVarDecl(VD);
}
case Decl::Typedef: { // typedef int X;
@@ -100,17 +102,14 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
}
}
-/// EmitBlockVarDecl - This method handles emission of any variable declaration
+/// EmitVarDecl - This method handles emission of any variable declaration
/// inside a function, including static vars etc.
-void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
- if (D.hasAttr<AsmLabelAttr>())
- CGM.ErrorUnsupported(&D, "__asm__");
-
+void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
switch (D.getStorageClass()) {
case SC_None:
case SC_Auto:
case SC_Register:
- return EmitLocalBlockVarDecl(D);
+ return EmitAutoVarDecl(D);
case SC_Static: {
llvm::GlobalValue::LinkageTypes Linkage =
llvm::GlobalValue::InternalLinkage;
@@ -124,7 +123,7 @@ void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
Linkage = CurFn->getLinkage();
- return EmitStaticBlockVarDecl(D, Linkage);
+ return EmitStaticVarDecl(D, Linkage);
}
case SC_Extern:
case SC_PrivateExtern:
@@ -144,22 +143,32 @@ static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
}
std::string ContextName;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
+ if (!CGF.CurFuncDecl) {
+ // Better be in a block declared in global scope.
+ const NamedDecl *ND = cast<NamedDecl>(&D);
+ const DeclContext *DC = ND->getDeclContext();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ MangleBuffer Name;
+ CGM.getBlockMangledName(GlobalDecl(), Name, BD);
+ ContextName = Name.getString();
+ }
+ else
+ assert(0 && "Unknown context for block static var decl");
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
llvm::StringRef Name = CGM.getMangledName(FD);
ContextName = Name.str();
} else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
ContextName = CGF.CurFn->getName();
else
- // FIXME: What about in a block??
- assert(0 && "Unknown context for block var decl");
+ assert(0 && "Unknown context for static var decl");
return ContextName + Separator + D.getNameAsString();
}
llvm::GlobalVariable *
-CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
- const char *Separator,
- llvm::GlobalValue::LinkageTypes Linkage) {
+CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage) {
QualType Ty = D.getType();
assert(Ty->isConstantSizeType() && "VLAs can't be static");
@@ -172,16 +181,18 @@ CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
CGM.EmitNullConstant(D.getType()), Name, 0,
D.isThreadSpecified(), Ty.getAddressSpace());
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ if (Linkage != llvm::GlobalValue::InternalLinkage)
+ GV->setVisibility(CurFn->getVisibility());
return GV;
}
-/// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
/// one. Otherwise it just returns GV.
llvm::GlobalVariable *
-CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
- llvm::GlobalVariable *GV) {
+CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
// If constant emission failed, then this should be a C++ static
@@ -189,12 +200,12 @@ CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
if (!Init) {
if (!getContext().getLangOptions().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
- else {
+ else if (Builder.GetInsertBlock()) {
// Since we have a static initializer, this global variable can't
// be constant.
GV->setConstant(false);
-
- EmitStaticCXXBlockVarDeclInit(D, GV);
+
+ EmitCXXGuardedInit(D, GV);
}
return GV;
}
@@ -209,8 +220,10 @@ CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
OldGV->isConstant(),
OldGV->getLinkage(), Init, "",
- 0, D.isThreadSpecified(),
+ /*InsertBefore*/ OldGV,
+ D.isThreadSpecified(),
D.getType().getAddressSpace());
+ GV->setVisibility(OldGV->getVisibility());
// Steal the name of the old global
GV->takeName(OldGV);
@@ -228,12 +241,12 @@ CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
return GV;
}
-void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
+void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage) {
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
- llvm::GlobalVariable *GV = CreateStaticBlockVarDecl(D, ".", Linkage);
+ llvm::GlobalVariable *GV = CreateStaticVarDecl(D, ".", Linkage);
// Store into LocalDeclMap before generating initializer to handle
// circular references.
@@ -244,10 +257,14 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
// Make sure to evaluate VLA bounds now so that we have them for later.
if (D.getType()->isVariablyModifiedType())
EmitVLASize(D.getType());
+
+ // Local static block variables must be treated as globals as they may be
+ // referenced in their RHS initializer block-literal expresion.
+ CGM.setStaticLocalDeclAddress(&D, GV);
// If this value has an initializer, emit it.
if (D.getInit())
- GV = AddInitializerToGlobalBlockVarDecl(D, GV);
+ GV = AddInitializerToStaticVarDecl(D, GV);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
@@ -266,17 +283,13 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
if (D.hasAttr<UsedAttr>())
CGM.AddUsedGlobal(GV);
- if (getContext().getLangOptions().CPlusPlus)
- CGM.setStaticLocalDeclAddress(&D, GV);
-
// We may have to cast the constant because of the initializer
// mismatch above.
//
// FIXME: It is really dangerous to store this in the map; if anyone
// RAUW's the GV uses of this constant will be invalid.
const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
- const llvm::Type *LPtrTy =
- llvm::PointerType::get(LTy, D.getType().getAddressSpace());
+ const llvm::Type *LPtrTy = LTy->getPointerTo(D.getType().getAddressSpace());
DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
// Emit global variable debug descriptor for static vars.
@@ -293,6 +306,15 @@ unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
return ByRefValueInfo.find(VD)->second.second;
}
+llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
+ const VarDecl *V) {
+ llvm::Value *Loc = Builder.CreateStructGEP(BaseAddr, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc);
+ Loc = Builder.CreateStructGEP(Loc, getByRefValueLLVMField(V),
+ V->getNameAsString());
+ return Loc;
+}
+
/// BuildByRefType - This routine changes a __block variable declared as T x
/// into:
///
@@ -307,7 +329,7 @@ unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
/// T x;
/// } x
///
-const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
+const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
if (Info.first)
return Info.first;
@@ -316,9 +338,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
std::vector<const llvm::Type *> Types;
- const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
-
- llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext);
+ llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(getLLVMContext());
// void *__isa;
Types.push_back(Int8PtrTy);
@@ -332,7 +352,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
// int32_t __size;
Types.push_back(Int32Ty);
- bool HasCopyAndDispose = BlockRequiresCopying(Ty);
+ bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty);
if (HasCopyAndDispose) {
/// void *__copy_helper;
Types.push_back(Int8PtrTy);
@@ -343,7 +363,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
bool Packed = false;
CharUnits Align = getContext().getDeclAlign(D);
- if (Align > CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)) {
+ if (Align > getContext().toCharUnitsFromBits(Target.getPointerAlign(0))) {
// We have to insert padding.
// The struct above has 2 32-bit integers.
@@ -359,7 +379,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
if (NumPaddingBytes > 0) {
- const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
// FIXME: We need a sema error for alignment larger than the minimum of
// the maximal stack alignmint and the alignment of malloc on the system.
if (NumPaddingBytes > 1)
@@ -375,7 +395,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
// T x;
Types.push_back(ConvertTypeForMem(Ty));
- const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed);
+ const llvm::Type *T = llvm::StructType::get(getLLVMContext(), Types, Packed);
cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
@@ -484,18 +504,101 @@ namespace {
CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
void Emit(CodeGenFunction &CGF, bool IsForEH) {
- llvm::Value *V = CGF.Builder.CreateStructGEP(Addr, 1, "forwarding");
- V = CGF.Builder.CreateLoad(V);
- CGF.BuildBlockRelease(V);
+ CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
}
};
}
-/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
+
+/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
+/// non-zero parts of the specified initializer with equal or fewer than
+/// NumStores scalar stores.
+static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
+ unsigned &NumStores) {
+ // Zero and Undef never requires any extra stores.
+ if (isa<llvm::ConstantAggregateZero>(Init) ||
+ isa<llvm::ConstantPointerNull>(Init) ||
+ isa<llvm::UndefValue>(Init))
+ return true;
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init))
+ return Init->isNullValue() || NumStores--;
+
+ // See if we can emit each element.
+ if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
+
+ // Anything else is hard and scary.
+ return false;
+}
+
+/// emitStoresForInitAfterMemset - For inits that
+/// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
+/// stores that would be required.
+static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
+ CGBuilderTy &Builder) {
+ // Zero doesn't require any stores.
+ if (isa<llvm::ConstantAggregateZero>(Init) ||
+ isa<llvm::ConstantPointerNull>(Init) ||
+ isa<llvm::UndefValue>(Init))
+ return;
+
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init)) {
+ if (!Init->isNullValue())
+ Builder.CreateStore(Init, Loc);
+ return;
+ }
+
+ assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
+ "Unknown value type!");
+
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+ if (Elt->isNullValue()) continue;
+
+ // Otherwise, get a pointer to the element and emit it.
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ Builder);
+ }
+}
+
+
+/// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
+/// plus some stores to initialize a local variable instead of using a memcpy
+/// from a constant global. It is beneficial to use memset if the global is all
+/// zeros, or mostly zeros and large.
+static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
+ uint64_t GlobalSize) {
+ // If a global is all zeros, always use a memset.
+ if (isa<llvm::ConstantAggregateZero>(Init)) return true;
+
+
+ // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
+ // do it if it will require 6 or fewer scalar stores.
+ // TODO: Should budget depends on the size? Avoiding a large global warrants
+ // plopping in more stores.
+ unsigned StoreBudget = 6;
+ uint64_t SizeLimit = 32;
+
+ return GlobalSize > SizeLimit &&
+ canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
+}
+
+
+/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
-void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
- SpecialInitFn *SpecialInit) {
+void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D,
+ SpecialInitFn *SpecialInit) {
QualType Ty = D.getType();
unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
bool isByRef = D.hasAttr<BlocksAttr>();
@@ -520,7 +623,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
// If this variable is marked 'const', emit the value as a global.
if (CGM.getCodeGenOpts().MergeAllConstants &&
Ty.isConstant(getContext())) {
- EmitStaticBlockVarDecl(D, llvm::GlobalValue::InternalLinkage);
+ EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
return;
}
@@ -542,9 +645,9 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
// Create a flag that is used to indicate when the NRVO was applied
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
- const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
- llvm::Value *Zero = llvm::ConstantInt::get(BoolTy, 0);
- NRVOFlag = CreateTempAlloca(BoolTy, "nrvo");
+ llvm::Value *Zero = Builder.getFalse();
+ NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
+ EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
// Record the NRVO flag for this variable.
@@ -561,7 +664,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
Align = getContext().getDeclAlign(&D);
if (isByRef)
Align = std::max(Align,
- CharUnits::fromQuantity(Target.getPointerAlign(0) / 8));
+ getContext().toCharUnitsFromBits(Target.getPointerAlign(0)));
Alloc->setAlignment(Align.getQuantity());
DeclPtr = Alloc;
}
@@ -569,9 +672,8 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
// Targets that don't support recursion emit locals as globals.
const char *Class =
D.getStorageClass() == SC_Register ? ".reg." : ".auto.";
- DeclPtr = CreateStaticBlockVarDecl(D, Class,
- llvm::GlobalValue
- ::InternalLinkage);
+ DeclPtr = CreateStaticVarDecl(D, Class,
+ llvm::GlobalValue::InternalLinkage);
}
// FIXME: Can this happen?
@@ -582,8 +684,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
if (!DidCallStackSave) {
// Save the stack.
- const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
- llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
+ llvm::Value *Stack = CreateTempAlloca(Int8PtrTy, "saved_stack");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
@@ -593,19 +694,19 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
DidCallStackSave = true;
// Push a cleanup block and restore the stack there.
+ // FIXME: in general circumstances, this should be an EH cleanup.
EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
}
// Get the element type.
const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
- const llvm::Type *LElemPtrTy =
- llvm::PointerType::get(LElemTy, Ty.getAddressSpace());
+ const llvm::Type *LElemPtrTy = LElemTy->getPointerTo(Ty.getAddressSpace());
llvm::Value *VLASize = EmitVLASize(Ty);
// Allocate memory for the array.
llvm::AllocaInst *VLA =
- Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
+ Builder.CreateAlloca(llvm::Type::getInt8Ty(getLLVMContext()), VLASize, "vla");
VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity());
DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
@@ -639,59 +740,65 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
}
if (isByRef) {
- const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);
-
EnsureInsertPoint();
- llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
- llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
- llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
- llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
llvm::Value *V;
- int flag = 0;
- int flags = 0;
+
+ BlockFieldFlags fieldFlags;
+ bool fieldNeedsCopyDispose = false;
needsDispose = true;
if (Ty->isBlockPointerType()) {
- flag |= BLOCK_FIELD_IS_BLOCK;
- flags |= BLOCK_HAS_COPY_DISPOSE;
- } else if (BlockRequiresCopying(Ty)) {
- flag |= BLOCK_FIELD_IS_OBJECT;
- flags |= BLOCK_HAS_COPY_DISPOSE;
+ fieldFlags |= BLOCK_FIELD_IS_BLOCK;
+ fieldNeedsCopyDispose = true;
+ } else if (getContext().isObjCNSObjectType(Ty) ||
+ Ty->isObjCObjectPointerType()) {
+ fieldFlags |= BLOCK_FIELD_IS_OBJECT;
+ fieldNeedsCopyDispose = true;
+ } else if (getLangOptions().CPlusPlus) {
+ if (getContext().getBlockVarCopyInits(&D))
+ fieldNeedsCopyDispose = true;
+ else if (const CXXRecordDecl *record = D.getType()->getAsCXXRecordDecl())
+ fieldNeedsCopyDispose = !record->hasTrivialDestructor();
}
// FIXME: Someone double check this.
if (Ty.isObjCGCWeak())
- flag |= BLOCK_FIELD_IS_WEAK;
+ fieldFlags |= BLOCK_FIELD_IS_WEAK;
int isa = 0;
- if (flag&BLOCK_FIELD_IS_WEAK)
+ if (fieldFlags & BLOCK_FIELD_IS_WEAK)
isa = 1;
- V = llvm::ConstantInt::get(Int32Ty, isa);
- V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
- Builder.CreateStore(V, isa_field);
+ V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
+ Builder.CreateStore(V, Builder.CreateStructGEP(DeclPtr, 0, "byref.isa"));
- Builder.CreateStore(DeclPtr, forwarding_field);
+ Builder.CreateStore(DeclPtr, Builder.CreateStructGEP(DeclPtr, 1,
+ "byref.forwarding"));
- V = llvm::ConstantInt::get(Int32Ty, flags);
- Builder.CreateStore(V, flags_field);
+ // Blocks ABI:
+ // c) the flags field is set to either 0 if no helper functions are
+ // needed or BLOCK_HAS_COPY_DISPOSE if they are,
+ BlockFlags flags;
+ if (fieldNeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ Builder.CreateStructGEP(DeclPtr, 2, "byref.flags"));
const llvm::Type *V1;
V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
- V = llvm::ConstantInt::get(Int32Ty,
- CGM.GetTargetTypeStoreSize(V1).getQuantity());
- Builder.CreateStore(V, size_field);
+ V = llvm::ConstantInt::get(IntTy, CGM.GetTargetTypeStoreSize(V1).getQuantity());
+ Builder.CreateStore(V, Builder.CreateStructGEP(DeclPtr, 3, "byref.size"));
- if (flags & BLOCK_HAS_COPY_DISPOSE) {
- BlockHasCopyDispose = true;
+ if (fieldNeedsCopyDispose) {
llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
- Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag,
- Align.getQuantity()),
+ Builder.CreateStore(CGM.BuildbyrefCopyHelper(DeclPtr->getType(),
+ fieldFlags,
+ Align.getQuantity(), &D),
copy_helper);
llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
- Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
- Align.getQuantity()),
+ Builder.CreateStore(CGM.BuildbyrefDestroyHelper(DeclPtr->getType(),
+ fieldFlags,
+ Align.getQuantity(), &D),
destroy_helper);
}
}
@@ -700,9 +807,6 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
SpecialInit(*this, D, DeclPtr);
} else if (Init) {
llvm::Value *Loc = DeclPtr;
- if (isByRef)
- Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
- D.getNameAsString());
bool isVolatile = getContext().getCanonicalType(Ty).isVolatileQualified();
@@ -712,27 +816,25 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), Ty,this);
assert(Init != 0 && "Wasn't a simple constant init?");
- llvm::Value *AlignVal =
- llvm::ConstantInt::get(Int32Ty, Align.getQuantity());
- const llvm::Type *IntPtr =
- llvm::IntegerType::get(VMContext, LLVMPointerWidth);
llvm::Value *SizeVal =
- llvm::ConstantInt::get(IntPtr,
+ llvm::ConstantInt::get(IntPtrTy,
getContext().getTypeSizeInChars(Ty).getQuantity());
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *BP = Int8PtrTy;
if (Loc->getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP, "tmp");
-
- llvm::Value *NotVolatile =
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
-
- // If the initializer is all zeros, codegen with memset.
- if (isa<llvm::ConstantAggregateZero>(Init)) {
- llvm::Value *Zero =
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
- Builder.CreateCall5(CGM.getMemSetFn(Loc->getType(), SizeVal->getType()),
- Loc, Zero, SizeVal, AlignVal, NotVolatile);
+
+ // If the initializer is all or mostly zeros, codegen with memset then do
+ // a few stores afterward.
+ if (shouldUseMemSetPlusStoresToInitialize(Init,
+ CGM.getTargetData().getTypeAllocSize(Init->getType()))) {
+ Builder.CreateMemSet(Loc, Builder.getInt8(0), SizeVal,
+ Align.getQuantity(), false);
+ if (!Init->isNullValue()) {
+ Loc = Builder.CreateBitCast(Loc, Init->getType()->getPointerTo());
+ emitStoresForInitAfterMemset(Init, Loc, Builder);
+ }
+
} else {
// Otherwise, create a temporary global with the initializer then
// memcpy from the global to the alloca.
@@ -747,20 +849,36 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
if (SrcPtr->getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
- Builder.CreateCall5(CGM.getMemCpyFn(Loc->getType(), SrcPtr->getType(),
- SizeVal->getType()),
- Loc, SrcPtr, SizeVal, AlignVal, NotVolatile);
+ Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, Align.getQuantity(), false);
}
} else if (Ty->isReferenceType()) {
RValue RV = EmitReferenceBindingToExpr(Init, &D);
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Alignment, Ty);
} else if (!hasAggregateLLVMType(Init->getType())) {
llvm::Value *V = EmitScalarExpr(Init);
+ if (isByRef) {
+ // When RHS has side-effect, must go through "forwarding' field
+ // to get to the address of the __block variable descriptor.
+ if (Init->HasSideEffects(getContext()))
+ Loc = BuildBlockByrefAddress(DeclPtr, &D);
+ else
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+ }
EmitStoreOfScalar(V, Loc, isVolatile, Alignment, Ty);
} else if (Init->getType()->isAnyComplexType()) {
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
EmitComplexExprIntoAddr(Init, Loc, isVolatile);
} else {
- EmitAggExpr(Init, Loc, isVolatile);
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+ EmitAggExpr(Init, AggValueSlot::forAddr(Loc, isVolatile, true, false));
}
}
@@ -805,9 +923,8 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
}
// If this is a block variable, clean it up.
- // FIXME: this should be an EH cleanup as well. rdar://problem/8224178
if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly)
- EHStack.pushCleanup<CallBlockRelease>(NormalCleanup, DeclPtr);
+ EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, DeclPtr);
}
/// Emit an alloca (or GlobalValue depending on target)
@@ -817,7 +934,6 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
QualType Ty = D.getType();
- CanQualType CTy = getContext().getCanonicalType(Ty);
llvm::Value *DeclPtr;
// If this is an aggregate or variable sized value, reuse the input pointer.
@@ -829,8 +945,9 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
// Store the initial value into the alloca.
- unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
- EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Alignment, Ty);
+ EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(),
+ getContext().getDeclAlign(&D).getQuantity(), Ty,
+ CGM.getTBAAInfo(Ty));
}
Arg->setName(D.getName());
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
index e2f1975..e295267 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
#include "CGCXXABI.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
@@ -34,14 +35,24 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
unsigned Alignment = Context.getDeclAlign(&D).getQuantity();
if (!CGF.hasAggregateLLVMType(T)) {
llvm::Value *V = CGF.EmitScalarExpr(Init);
- CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, Alignment, T);
+ CodeGenModule &CGM = CGF.CGM;
+ Qualifiers::GC GCAttr = CGM.getContext().getObjCGCAttrKind(T);
+ if (GCAttr == Qualifiers::Strong)
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, V, DeclPtr,
+ D.isThreadSpecified());
+ else if (GCAttr == Qualifiers::Weak)
+ CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, V, DeclPtr);
+ else
+ CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, Alignment, T);
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
} else {
- CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
+ CGF.EmitAggExpr(Init, AggValueSlot::forAddr(DeclPtr, isVolatile, true));
}
}
+/// Emit code to cause the destruction of the given variable with
+/// static storage duration.
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *DeclPtr) {
CodeGenModule &CGM = CGF.CGM;
@@ -106,15 +117,13 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
return;
}
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8Ty(VMContext)->getPointerTo();
-
std::vector<const llvm::Type *> Params;
Params.push_back(Int8PtrTy);
// Get the destructor function type
const llvm::Type *DtorFnTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
+ Params, false);
DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
Params.clear();
@@ -138,6 +147,11 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
}
+void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr) {
+ CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr);
+}
+
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
const llvm::FunctionType *FTy,
@@ -145,20 +159,22 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
+ if (!CGM.getContext().getLangOptions().AppleKext) {
+ // Set the section if needed.
+ if (const char *Section =
+ CGM.getContext().Target.getStaticInitSectionSpecifier())
+ Fn->setSection(Section);
+ }
- // Set the section if needed.
- if (const char *Section =
- CGM.getContext().Target.getStaticInitSectionSpecifier())
- Fn->setSection(Section);
-
- if (!CGM.getLangOptions().Exceptions)
+ if (!CGM.getLangOptions().areExceptionsEnabled())
Fn->setDoesNotThrow();
return Fn;
}
void
-CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
+CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr) {
const llvm::FunctionType *FTy
= llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false);
@@ -167,7 +183,7 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
llvm::Function *Fn =
CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
- CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D);
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr);
if (D->hasAttr<InitPriorityAttr>()) {
unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
@@ -240,13 +256,20 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
AddGlobalDtor(Fn);
}
+/// Emit the code necessary to initialize the given global variable.
void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
- const VarDecl *D) {
+ const VarDecl *D,
+ llvm::GlobalVariable *Addr) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
SourceLocation());
- llvm::Constant *DeclPtr = CGM.GetAddrOfGlobalVar(D);
- EmitCXXGlobalVarDeclInit(*D, DeclPtr);
+ // Use guarded initialization if the global variable is weak due to
+ // being a class template's static data member.
+ if (Addr->hasWeakLinkage() && D->getInstantiatedFromStaticDataMember()) {
+ EmitCXXGuardedInit(*D, Addr);
+ } else {
+ EmitCXXGlobalVarDeclInit(*D, Addr);
+ }
FinishFunction();
}
@@ -283,143 +306,6 @@ void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
FinishFunction();
}
-static llvm::Constant *getGuardAcquireFn(CodeGenFunction &CGF) {
- // int __cxa_guard_acquire(__int64_t *guard_object);
-
- const llvm::Type *Int64PtrTy =
- llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
-
- std::vector<const llvm::Type*> Args(1, Int64PtrTy);
-
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.ConvertType(CGF.getContext().IntTy),
- Args, /*isVarArg=*/false);
-
- return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
-}
-
-static llvm::Constant *getGuardReleaseFn(CodeGenFunction &CGF) {
- // void __cxa_guard_release(__int64_t *guard_object);
-
- const llvm::Type *Int64PtrTy =
- llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
-
- std::vector<const llvm::Type*> Args(1, Int64PtrTy);
-
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Args, /*isVarArg=*/false);
-
- return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
-}
-
-static llvm::Constant *getGuardAbortFn(CodeGenFunction &CGF) {
- // void __cxa_guard_abort(__int64_t *guard_object);
-
- const llvm::Type *Int64PtrTy =
- llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
-
- std::vector<const llvm::Type*> Args(1, Int64PtrTy);
-
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Args, /*isVarArg=*/false);
-
- return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
-}
-
-namespace {
- struct CallGuardAbort : EHScopeStack::Cleanup {
- llvm::GlobalVariable *Guard;
- CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- // It shouldn't be possible for this to throw, but if it can,
- // this should allow for the possibility of an invoke.
- CGF.Builder.CreateCall(getGuardAbortFn(CGF), Guard)
- ->setDoesNotThrow();
- }
- };
-}
-
-void
-CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
- llvm::GlobalVariable *GV) {
- // Bail out early if this initializer isn't reachable.
- if (!Builder.GetInsertBlock()) return;
-
- bool ThreadsafeStatics = getContext().getLangOptions().ThreadsafeStatics;
-
- llvm::SmallString<256> GuardVName;
- CGM.getCXXABI().getMangleContext().mangleGuardVariable(&D, GuardVName);
-
- // Create the guard variable.
- llvm::GlobalVariable *GuardVariable =
- new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
- false, GV->getLinkage(),
- llvm::Constant::getNullValue(Int64Ty),
- GuardVName.str());
-
- // Load the first byte of the guard variable.
- const llvm::Type *PtrTy
- = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
- llvm::Value *V =
- Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
-
- llvm::BasicBlock *InitCheckBlock = createBasicBlock("init.check");
- llvm::BasicBlock *EndBlock = createBasicBlock("init.end");
-
- // Check if the first byte of the guard variable is zero.
- Builder.CreateCondBr(Builder.CreateIsNull(V, "tobool"),
- InitCheckBlock, EndBlock);
-
- EmitBlock(InitCheckBlock);
-
- // Variables used when coping with thread-safe statics and exceptions.
- if (ThreadsafeStatics) {
- // Call __cxa_guard_acquire.
- V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
-
- llvm::BasicBlock *InitBlock = createBasicBlock("init");
-
- Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
- InitBlock, EndBlock);
-
- // Call __cxa_guard_abort along the exceptional edge.
- if (Exceptions)
- EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
-
- EmitBlock(InitBlock);
- }
-
- if (D.getType()->isReferenceType()) {
- unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
- QualType T = D.getType();
- RValue RV = EmitReferenceBindingToExpr(D.getInit(), &D);
- EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, Alignment, T);
- } else
- EmitDeclInit(*this, D, GV);
-
- if (ThreadsafeStatics) {
- // Pop the guard-abort cleanup if we pushed one.
- if (Exceptions)
- PopCleanupBlock();
-
- // Call __cxa_guard_release. This cannot throw.
- Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
- } else {
- llvm::Value *One =
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1);
- Builder.CreateStore(One, Builder.CreateBitCast(GuardVariable, PtrTy));
- }
-
- // Register the call to the destructor.
- if (!D.getType()->isReferenceType())
- EmitDeclDestroy(*this, D, GV);
-
- EmitBlock(EndBlock);
-}
-
/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
/// invoked, calls the default destructor on array elements in reverse order of
/// construction.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
index 7fb616e5..6181965 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -14,163 +14,18 @@
#include "clang/AST/StmtCXX.h"
#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Support/CallSite.h"
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CGException.h"
+#include "CGCleanup.h"
#include "TargetInfo.h"
using namespace clang;
using namespace CodeGen;
-/// Push an entry of the given size onto this protected-scope stack.
-char *EHScopeStack::allocate(size_t Size) {
- if (!StartOfBuffer) {
- unsigned Capacity = 1024;
- while (Capacity < Size) Capacity *= 2;
- StartOfBuffer = new char[Capacity];
- StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
- } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
- unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
- unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
-
- unsigned NewCapacity = CurrentCapacity;
- do {
- NewCapacity *= 2;
- } while (NewCapacity < UsedCapacity + Size);
-
- char *NewStartOfBuffer = new char[NewCapacity];
- char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
- char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
- memcpy(NewStartOfData, StartOfData, UsedCapacity);
- delete [] StartOfBuffer;
- StartOfBuffer = NewStartOfBuffer;
- EndOfBuffer = NewEndOfBuffer;
- StartOfData = NewStartOfData;
- }
-
- assert(StartOfBuffer + Size <= StartOfData);
- StartOfData -= Size;
- return StartOfData;
-}
-
-EHScopeStack::stable_iterator
-EHScopeStack::getEnclosingEHCleanup(iterator it) const {
- assert(it != end());
- do {
- if (isa<EHCleanupScope>(*it)) {
- if (cast<EHCleanupScope>(*it).isEHCleanup())
- return stabilize(it);
- return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
- }
- ++it;
- } while (it != end());
- return stable_end();
-}
-
-
-void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
- assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
- char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
- bool IsNormalCleanup = Kind & NormalCleanup;
- bool IsEHCleanup = Kind & EHCleanup;
- bool IsActive = !(Kind & InactiveCleanup);
- EHCleanupScope *Scope =
- new (Buffer) EHCleanupScope(IsNormalCleanup,
- IsEHCleanup,
- IsActive,
- Size,
- BranchFixups.size(),
- InnermostNormalCleanup,
- InnermostEHCleanup);
- if (IsNormalCleanup)
- InnermostNormalCleanup = stable_begin();
- if (IsEHCleanup)
- InnermostEHCleanup = stable_begin();
-
- return Scope->getCleanupBuffer();
-}
-
-void EHScopeStack::popCleanup() {
- assert(!empty() && "popping exception stack when not empty");
-
- assert(isa<EHCleanupScope>(*begin()));
- EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
- InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
- InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
- StartOfData += Cleanup.getAllocatedSize();
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- // Destroy the cleanup.
- Cleanup.~EHCleanupScope();
-
- // Check whether we can shrink the branch-fixups stack.
- if (!BranchFixups.empty()) {
- // If we no longer have any normal cleanups, all the fixups are
- // complete.
- if (!hasNormalCleanups())
- BranchFixups.clear();
-
- // Otherwise we can still trim out unnecessary nulls.
- else
- popNullFixups();
- }
-}
-
-EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) {
- char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters));
- CatchDepth++;
- return new (Buffer) EHFilterScope(NumFilters);
-}
-
-void EHScopeStack::popFilter() {
- assert(!empty() && "popping exception stack when not empty");
-
- EHFilterScope &Filter = cast<EHFilterScope>(*begin());
- StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- assert(CatchDepth > 0 && "mismatched filter push/pop");
- CatchDepth--;
-}
-
-EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
- char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
- CatchDepth++;
- EHCatchScope *Scope = new (Buffer) EHCatchScope(NumHandlers);
- for (unsigned I = 0; I != NumHandlers; ++I)
- Scope->getHandlers()[I].Index = getNextEHDestIndex();
- return Scope;
-}
-
-void EHScopeStack::pushTerminate() {
- char *Buffer = allocate(EHTerminateScope::getSize());
- CatchDepth++;
- new (Buffer) EHTerminateScope(getNextEHDestIndex());
-}
-
-/// Remove any 'null' fixups on the stack. However, we can't pop more
-/// fixups than the fixup depth on the innermost normal cleanup, or
-/// else fixups that we try to add to that cleanup will end up in the
-/// wrong place. We *could* try to shrink fixup depths, but that's
-/// actually a lot of work for little benefit.
-void EHScopeStack::popNullFixups() {
- // We expect this to only be called when there's still an innermost
- // normal cleanup; otherwise there really shouldn't be any fixups.
- assert(hasNormalCleanups());
-
- EHScopeStack::iterator it = find(InnermostNormalCleanup);
- unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
- assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
-
- while (BranchFixups.size() > MinSize &&
- BranchFixups.back().Destination == 0)
- BranchFixups.pop_back();
-}
-
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
@@ -287,7 +142,7 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
}
static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
- const char *Name) {
+ llvm::StringRef Name) {
const llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
std::vector<const llvm::Type*> Args(1, Int8PtrTy);
@@ -299,6 +154,7 @@ static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
}
const EHPersonality EHPersonality::GNU_C("__gcc_personality_v0");
+const EHPersonality EHPersonality::GNU_C_SJLJ("__gcc_personality_sj0");
const EHPersonality EHPersonality::NeXT_ObjC("__objc_personality_v0");
const EHPersonality EHPersonality::GNU_CPlusPlus("__gxx_personality_v0");
const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ("__gxx_personality_sj0");
@@ -306,6 +162,8 @@ const EHPersonality EHPersonality::GNU_ObjC("__gnu_objc_personality_v0",
"objc_exception_throw");
static const EHPersonality &getCPersonality(const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_C_SJLJ;
return EHPersonality::GNU_C;
}
@@ -357,24 +215,102 @@ const EHPersonality &EHPersonality::get(const LangOptions &L) {
return getCPersonality(L);
}
-static llvm::Constant *getPersonalityFn(CodeGenFunction &CGF,
+static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
- const char *Name = Personality.getPersonalityFnName();
-
llvm::Constant *Fn =
- CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::Type::getInt32Ty(
- CGF.CGM.getLLVMContext()),
- true),
- Name);
- return llvm::ConstantExpr::getBitCast(Fn, CGF.CGM.PtrToInt8Ty);
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getInt32Ty(CGM.getLLVMContext()),
+ true),
+ Personality.getPersonalityFnName());
+ return Fn;
+}
+
+static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
+ const EHPersonality &Personality) {
+ llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
+ return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
+}
+
+/// Check whether a personality function could reasonably be swapped
+/// for a C++ personality function.
+static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
+ for (llvm::Constant::use_iterator
+ I = Fn->use_begin(), E = Fn->use_end(); I != E; ++I) {
+ llvm::User *User = *I;
+
+ // Conditionally white-list bitcasts.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(User)) {
+ if (CE->getOpcode() != llvm::Instruction::BitCast) return false;
+ if (!PersonalityHasOnlyCXXUses(CE))
+ return false;
+ continue;
+ }
+
+ // Otherwise, it has to be a selector call.
+ if (!isa<llvm::EHSelectorInst>(User)) return false;
+
+ llvm::EHSelectorInst *Selector = cast<llvm::EHSelectorInst>(User);
+ for (unsigned I = 2, E = Selector->getNumArgOperands(); I != E; ++I) {
+ // Look for something that would've been returned by the ObjC
+ // runtime's GetEHType() method.
+ llvm::GlobalVariable *GV
+ = dyn_cast<llvm::GlobalVariable>(Selector->getArgOperand(I));
+ if (!GV) continue;
+
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// Try to use the C++ personality function in ObjC++. Not doing this
+/// can cause some incompatibilities with gcc, which is more
+/// aggressive about only using the ObjC++ personality in a function
+/// when it really needs it.
+void CodeGenModule::SimplifyPersonality() {
+ // For now, this is really a Darwin-specific operation.
+ if (Context.Target.getTriple().getOS() != llvm::Triple::Darwin)
+ return;
+
+ // If we're not in ObjC++ -fexceptions, there's nothing to do.
+ if (!Features.CPlusPlus || !Features.ObjC1 || !Features.Exceptions)
+ return;
+
+ const EHPersonality &ObjCXX = EHPersonality::get(Features);
+ const EHPersonality &CXX = getCXXPersonality(Features);
+ if (&ObjCXX == &CXX ||
+ ObjCXX.getPersonalityFnName() == CXX.getPersonalityFnName())
+ return;
+
+ llvm::Function *Fn =
+ getModule().getFunction(ObjCXX.getPersonalityFnName());
+
+ // Nothing to do if it's unused.
+ if (!Fn || Fn->use_empty()) return;
+
+ // Can't do the optimization if it has non-C++ uses.
+ if (!PersonalityHasOnlyCXXUses(Fn)) return;
+
+ // Create the C++ personality function and kill off the old
+ // function.
+ llvm::Constant *CXXFn = getPersonalityFn(*this, CXX);
+
+ // This can happen if the user is screwing with us.
+ if (Fn->getType() != CXXFn->getType()) return;
+
+ Fn->replaceAllUsesWith(CXXFn);
+ Fn->eraseFromParent();
}
/// Returns the value to inject into a selector to indicate the
/// presence of a catch-all.
static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
// Possibly we should use @llvm.eh.catch.all.value here.
- return llvm::ConstantPointerNull::get(CGF.CGM.PtrToInt8Ty);
+ return llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
}
/// Returns the value to inject into a selector to indicate the
@@ -386,26 +322,11 @@ static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
namespace {
/// A cleanup to free the exception object if its initialization
/// throws.
- struct FreeExceptionCleanup : EHScopeStack::Cleanup {
- FreeExceptionCleanup(llvm::Value *ShouldFreeVar,
- llvm::Value *ExnLocVar)
- : ShouldFreeVar(ShouldFreeVar), ExnLocVar(ExnLocVar) {}
-
- llvm::Value *ShouldFreeVar;
- llvm::Value *ExnLocVar;
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
-
- llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
- "should-free-exnobj");
- CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
- CGF.EmitBlock(FreeBB);
- llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
- CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal)
+ struct FreeException {
+ static void Emit(CodeGenFunction &CGF, bool forEH,
+ llvm::Value *exn) {
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), exn)
->setDoesNotThrow();
- CGF.EmitBlock(DoneBB);
}
};
}
@@ -414,41 +335,17 @@ namespace {
// differs from EmitAnyExprToMem only in that, if a final copy-ctor
// call is required, an exception within that copy ctor causes
// std::terminate to be invoked.
-static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *ExnLoc) {
- // We want to release the allocated exception object if this
- // expression throws. We do this by pushing an EH-only cleanup
- // block which, furthermore, deactivates itself after the expression
- // is complete.
- llvm::AllocaInst *ShouldFreeVar =
- CGF.CreateTempAlloca(llvm::Type::getInt1Ty(CGF.getLLVMContext()),
- "should-free-exnobj.var");
- CGF.InitTempAlloca(ShouldFreeVar,
- llvm::ConstantInt::getFalse(CGF.getLLVMContext()));
-
- // A variable holding the exception pointer. This is necessary
- // because the throw expression does not necessarily dominate the
- // cleanup, for example if it appears in a conditional expression.
- llvm::AllocaInst *ExnLocVar =
- CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var");
-
+static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
+ llvm::Value *addr) {
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
- // FIXME: stmt expressions might require this to be a normal
- // cleanup, too.
- CGF.EHStack.pushCleanup<FreeExceptionCleanup>(EHCleanup,
- ShouldFreeVar,
- ExnLocVar);
- EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin();
-
- CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
- CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
- ShouldFreeVar);
+ CGF.pushFullExprCleanup<FreeException>(EHCleanup, addr);
+ EHScopeStack::stable_iterator cleanup = CGF.EHStack.stable_begin();
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
- const llvm::Type *Ty = CGF.ConvertType(E->getType())->getPointerTo();
- llvm::Value *TypedExnLoc = CGF.Builder.CreateBitCast(ExnLoc, Ty);
+ const llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo();
+ llvm::Value *typedAddr = CGF.Builder.CreateBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
@@ -457,22 +354,10 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
// evaluated but before the exception is caught. But the best way
// to handle that is to teach EmitAggExpr to do the final copy
// differently if it can't be elided.
- CGF.EmitAnyExprToMem(E, TypedExnLoc, /*Volatile*/ false);
-
- CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
- ShouldFreeVar);
+ CGF.EmitAnyExprToMem(e, typedAddr, /*Volatile*/ false, /*IsInit*/ true);
- // Technically, the exception object is like a temporary; it has to
- // be cleaned up when its full-expression is complete.
- // Unfortunately, the AST represents full-expressions by creating a
- // CXXExprWithTemporaries, which it only does when there are actually
- // temporaries.
- //
- // If any cleanups have been added since we pushed ours, they must
- // be from temporaries; this will get popped at the same time.
- // Otherwise we need to pop ours off. FIXME: this is very brittle.
- if (Cleanup == CGF.EHStack.stable_begin())
- CGF.PopCleanupBlock();
+ // Deactivate the cleanup block.
+ CGF.DeactivateCleanupBlock(cleanup);
}
llvm::Value *CodeGenFunction::getExceptionSlot() {
@@ -495,8 +380,10 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
Builder.CreateUnreachable();
}
- // Clear the insertion point to indicate we are in unreachable code.
- Builder.ClearInsertionPoint();
+ // throw is an expression, and the expression emitters expect us
+ // to leave ourselves at a valid insertion point.
+ EmitBlock(createBasicBlock("throw.cont"));
+
return;
}
@@ -517,7 +404,8 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
// Now throw the exception.
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
- llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, true);
+ llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
+ /*ForEH=*/true);
// The address of the destructor. If the exception type has a
// trivial destructor (or isn't a record), we just pass null.
@@ -545,16 +433,13 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
Builder.CreateUnreachable();
}
- // Clear the insertion point to indicate we are in unreachable code.
- Builder.ClearInsertionPoint();
-
- // FIXME: For now, emit a dummy basic block because expr emitters in generally
- // are not ready to handle emitting expressions at unreachable points.
- EnsureInsertPoint();
+ // throw is an expression, and the expression emitters expect us
+ // to leave ourselves at a valid insertion point.
+ EmitBlock(createBasicBlock("throw.cont"));
}
void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
- if (!Exceptions)
+ if (!CGM.getLangOptions().areExceptionsEnabled())
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -575,13 +460,14 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
for (unsigned I = 0; I != NumExceptions; ++I) {
QualType Ty = Proto->getExceptionType(I);
QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
- llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType,
+ /*ForEH=*/true);
Filter->setFilter(I, EHType);
}
}
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
- if (!Exceptions)
+ if (!CGM.getLangOptions().areExceptionsEnabled())
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -625,7 +511,7 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
if (CaughtType->isObjCObjectPointerType())
TypeInfo = CGM.getObjCRuntime().GetEHType(CaughtType);
else
- TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, true);
+ TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, /*ForEH=*/true);
CatchScope->setHandler(I, TypeInfo, Handler);
} else {
// No exception decl indicates '...', a catch-all.
@@ -655,7 +541,7 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
assert(EHStack.requiresLandingPad());
assert(!EHStack.empty());
- if (!Exceptions)
+ if (!CGM.getLangOptions().areExceptionsEnabled())
return 0;
// Check the innermost scope for a cached landing pad. If this is
@@ -739,8 +625,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Save the current IR generation state.
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- const EHPersonality &Personality =
- EHPersonality::get(CGF.CGM.getLangOptions());
+ const EHPersonality &Personality = EHPersonality::get(getLangOptions());
// Create and configure the landing pad.
llvm::BasicBlock *LP = createBasicBlock("lpad");
@@ -757,7 +642,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Build the selector arguments.
llvm::SmallVector<llvm::Value*, 8> EHSelector;
EHSelector.push_back(Exn);
- EHSelector.push_back(getPersonalityFn(*this, Personality));
+ EHSelector.push_back(getOpaquePersonalityFn(CGM, Personality));
// Accumulate all the handlers in scope.
llvm::DenseMap<llvm::Value*, UnwindDest> EHHandlers;
@@ -837,7 +722,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// If we have a catch-all, add null to the selector.
if (CatchAll.isValid()) {
- EHSelector.push_back(getCatchAllValue(CGF));
+ EHSelector.push_back(getCatchAllValue(*this));
// If we have an EH filter, we need to add those handlers in the
// right place in the selector, which is to say, at the end.
@@ -853,14 +738,14 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Also check whether we need a cleanup.
if (UseInvokeInlineHack || HasEHCleanup)
EHSelector.push_back(UseInvokeInlineHack
- ? getCatchAllValue(CGF)
- : getCleanupValue(CGF));
+ ? getCatchAllValue(*this)
+ : getCleanupValue(*this));
// Otherwise, signal that we at least have cleanups.
} else if (UseInvokeInlineHack || HasEHCleanup) {
EHSelector.push_back(UseInvokeInlineHack
- ? getCatchAllValue(CGF)
- : getCleanupValue(CGF));
+ ? getCatchAllValue(*this)
+ : getCleanupValue(*this));
} else {
assert(LastToEmitInLoop > 2);
LastToEmitInLoop--;
@@ -902,7 +787,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Check whether the exception matches.
llvm::CallInst *Id
= Builder.CreateCall(llvm_eh_typeid_for,
- Builder.CreateBitCast(Type, CGM.PtrToInt8Ty));
+ Builder.CreateBitCast(Type, Int8PtrTy));
Id->setDoesNotThrow();
Builder.CreateCondBr(Builder.CreateICmpEQ(Selection, Id),
Match, Next);
@@ -1141,55 +1026,54 @@ static void InitCatchParam(CodeGenFunction &CGF,
return;
}
- // FIXME: this *really* needs to be done via a proper, Sema-emitted
- // initializer expression.
-
- CXXRecordDecl *RD = CatchType.getTypePtr()->getAsCXXRecordDecl();
- assert(RD && "aggregate catch type was not a record!");
+ assert(isa<RecordType>(CatchType) && "unexpected catch type!");
const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
- if (RD->hasTrivialCopyConstructor()) {
- llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, true);
- llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
- CGF.EmitAggregateCopy(ParamAddr, Cast, CatchType);
+ // Check for a copy expression. If we don't have a copy expression,
+ // that means a trivial copy is okay.
+ const Expr *copyExpr = CatchParam.getInit();
+ if (!copyExpr) {
+ llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
+ llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+ CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
return;
}
// We have to call __cxa_get_exception_ptr to get the adjusted
// pointer before copying.
- llvm::CallInst *AdjustedExn =
+ llvm::CallInst *rawAdjustedExn =
CGF.Builder.CreateCall(getGetExceptionPtrFn(CGF), Exn);
- AdjustedExn->setDoesNotThrow();
- llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ rawAdjustedExn->setDoesNotThrow();
- CXXConstructorDecl *CD = RD->getCopyConstructor(CGF.getContext(), 0);
- assert(CD && "record has no copy constructor!");
- llvm::Value *CopyCtor = CGF.CGM.GetAddrOfCXXConstructor(CD, Ctor_Complete);
+ // Cast that to the appropriate type.
+ llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
- CallArgList CallArgs;
- CallArgs.push_back(std::make_pair(RValue::get(ParamAddr),
- CD->getThisType(CGF.getContext())));
- CallArgs.push_back(std::make_pair(RValue::get(Cast),
- CD->getParamDecl(0)->getType()));
-
- const FunctionProtoType *FPT
- = CD->getType()->getAs<FunctionProtoType>();
+ // The copy expression is defined in terms of an OpaqueValueExpr.
+ // Find it and map it to the adjusted expression.
+ CodeGenFunction::OpaqueValueMapping
+ opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
+ CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
// Call the copy ctor in a terminate scope.
CGF.EHStack.pushTerminate();
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- CopyCtor, ReturnValueSlot(), CallArgs, CD);
+
+ // Perform the copy construction.
+ CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, false, false));
+
+ // Leave the terminate scope.
CGF.EHStack.popTerminate();
+ // Undo the opaque value mapping.
+ opaque.pop();
+
// Finally we can call __cxa_begin_catch.
CallBeginCatch(CGF, Exn, true);
}
/// Begins a catch statement by initializing the catch variable and
/// calling __cxa_begin_catch.
-static void BeginCatch(CodeGenFunction &CGF,
- const CXXCatchStmt *S) {
+static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
// We have to be very careful with the ordering of cleanups here:
// C++ [except.throw]p4:
// The destruction [of the exception temporary] occurs
@@ -1221,7 +1105,7 @@ static void BeginCatch(CodeGenFunction &CGF,
}
// Emit the local.
- CGF.EmitLocalBlockVarDecl(*CatchParam, &InitCatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam, &InitCatchParam);
}
namespace {
@@ -1428,7 +1312,7 @@ CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
JumpDest RethrowDest = getJumpDestInCurrentScope(getUnreachableBlock());
// Whether the finally block is being executed for EH purposes.
- llvm::AllocaInst *ForEHVar = CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ llvm::AllocaInst *ForEHVar = CreateTempAlloca(Builder.getInt1Ty(),
"finally.for-eh");
InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext()));
@@ -1502,7 +1386,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
// Tell the backend what the exception table should be:
// nothing but a catch-all.
- llvm::Value *Args[3] = { Exn, getPersonalityFn(*this, Personality),
+ llvm::Value *Args[3] = { Exn, getOpaquePersonalityFn(CGM, Personality),
getCatchAllValue(*this) };
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
Args, Args+3, "eh.selector")
@@ -1511,7 +1395,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
TerminateCall->setDoesNotReturn();
TerminateCall->setDoesNotThrow();
- CGF.Builder.CreateUnreachable();
+ Builder.CreateUnreachable();
// Restore the saved insertion state.
Builder.restoreIP(SavedIP);
@@ -1553,8 +1437,9 @@ CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
+ llvm::StringRef RethrowName = Personality.getCatchallRethrowFnName();
llvm::Constant *RethrowFn;
- if (const char *RethrowName = Personality.getCatchallRethrowFnName())
+ if (!RethrowName.empty())
RethrowFn = getCatchallRethrowFn(*this, RethrowName);
else
RethrowFn = getUnwindResumeOrRethrowFn();
@@ -1569,6 +1454,3 @@ CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
return RethrowBlock;
}
-EHScopeStack::Cleanup::~Cleanup() {
- llvm_unreachable("Cleanup is indestructable");
-}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.h b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
index f129474..1f9b896 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
@@ -15,578 +15,40 @@
#ifndef CLANG_CODEGEN_CGEXCEPTION_H
#define CLANG_CODEGEN_CGEXCEPTION_H
-/// EHScopeStack is defined in CodeGenFunction.h, but its
-/// implementation is in this file and in CGException.cpp.
-#include "CodeGenFunction.h"
-
-namespace llvm {
- class Value;
- class BasicBlock;
-}
+#include "llvm/ADT/StringRef.h"
namespace clang {
+class LangOptions;
+
namespace CodeGen {
/// The exceptions personality for a function. When
class EHPersonality {
- const char *PersonalityFn;
+ llvm::StringRef PersonalityFn;
// If this is non-null, this personality requires a non-standard
// function for rethrowing an exception after a catchall cleanup.
// This function must have prototype void(void*).
- const char *CatchallRethrowFn;
+ llvm::StringRef CatchallRethrowFn;
- EHPersonality(const char *PersonalityFn,
- const char *CatchallRethrowFn = 0)
+ EHPersonality(llvm::StringRef PersonalityFn,
+ llvm::StringRef CatchallRethrowFn = llvm::StringRef())
: PersonalityFn(PersonalityFn),
CatchallRethrowFn(CatchallRethrowFn) {}
public:
static const EHPersonality &get(const LangOptions &Lang);
static const EHPersonality GNU_C;
+ static const EHPersonality GNU_C_SJLJ;
static const EHPersonality GNU_ObjC;
static const EHPersonality NeXT_ObjC;
static const EHPersonality GNU_CPlusPlus;
static const EHPersonality GNU_CPlusPlus_SJLJ;
- const char *getPersonalityFnName() const { return PersonalityFn; }
- const char *getCatchallRethrowFnName() const { return CatchallRethrowFn; }
-};
-
-/// A protected scope for zero-cost EH handling.
-class EHScope {
- llvm::BasicBlock *CachedLandingPad;
-
- unsigned K : 2;
-
-protected:
- enum { BitsRemaining = 30 };
-
-public:
- enum Kind { Cleanup, Catch, Terminate, Filter };
-
- EHScope(Kind K) : CachedLandingPad(0), K(K) {}
-
- Kind getKind() const { return static_cast<Kind>(K); }
-
- llvm::BasicBlock *getCachedLandingPad() const {
- return CachedLandingPad;
- }
-
- void setCachedLandingPad(llvm::BasicBlock *Block) {
- CachedLandingPad = Block;
- }
-};
-
-/// A scope which attempts to handle some, possibly all, types of
-/// exceptions.
-///
-/// Objective C @finally blocks are represented using a cleanup scope
-/// after the catch scope.
-class EHCatchScope : public EHScope {
- unsigned NumHandlers : BitsRemaining;
-
- // In effect, we have a flexible array member
- // Handler Handlers[0];
- // But that's only standard in C99, not C++, so we have to do
- // annoying pointer arithmetic instead.
-
-public:
- struct Handler {
- /// A type info value, or null (C++ null, not an LLVM null pointer)
- /// for a catch-all.
- llvm::Value *Type;
-
- /// The catch handler for this type.
- llvm::BasicBlock *Block;
-
- /// The unwind destination index for this handler.
- unsigned Index;
- };
-
-private:
- friend class EHScopeStack;
-
- Handler *getHandlers() {
- return reinterpret_cast<Handler*>(this+1);
- }
-
- const Handler *getHandlers() const {
- return reinterpret_cast<const Handler*>(this+1);
- }
-
-public:
- static size_t getSizeForNumHandlers(unsigned N) {
- return sizeof(EHCatchScope) + N * sizeof(Handler);
- }
-
- EHCatchScope(unsigned NumHandlers)
- : EHScope(Catch), NumHandlers(NumHandlers) {
- }
-
- unsigned getNumHandlers() const {
- return NumHandlers;
- }
-
- void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
- setHandler(I, /*catchall*/ 0, Block);
- }
-
- void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
- assert(I < getNumHandlers());
- getHandlers()[I].Type = Type;
- getHandlers()[I].Block = Block;
- }
-
- const Handler &getHandler(unsigned I) const {
- assert(I < getNumHandlers());
- return getHandlers()[I];
- }
-
- typedef const Handler *iterator;
- iterator begin() const { return getHandlers(); }
- iterator end() const { return getHandlers() + getNumHandlers(); }
-
- static bool classof(const EHScope *Scope) {
- return Scope->getKind() == Catch;
- }
-};
-
-/// A cleanup scope which generates the cleanup blocks lazily.
-class EHCleanupScope : public EHScope {
- /// Whether this cleanup needs to be run along normal edges.
- bool IsNormalCleanup : 1;
-
- /// Whether this cleanup needs to be run along exception edges.
- bool IsEHCleanup : 1;
-
- /// Whether this cleanup was activated before all normal uses.
- bool ActivatedBeforeNormalUse : 1;
-
- /// Whether this cleanup was activated before all EH uses.
- bool ActivatedBeforeEHUse : 1;
-
- /// The amount of extra storage needed by the Cleanup.
- /// Always a multiple of the scope-stack alignment.
- unsigned CleanupSize : 12;
-
- /// The number of fixups required by enclosing scopes (not including
- /// this one). If this is the top cleanup scope, all the fixups
- /// from this index onwards belong to this scope.
- unsigned FixupDepth : BitsRemaining - 16;
-
- /// The nearest normal cleanup scope enclosing this one.
- EHScopeStack::stable_iterator EnclosingNormal;
-
- /// The nearest EH cleanup scope enclosing this one.
- EHScopeStack::stable_iterator EnclosingEH;
-
- /// The dual entry/exit block along the normal edge. This is lazily
- /// created if needed before the cleanup is popped.
- llvm::BasicBlock *NormalBlock;
-
- /// The dual entry/exit block along the EH edge. This is lazily
- /// created if needed before the cleanup is popped.
- llvm::BasicBlock *EHBlock;
-
- /// An optional i1 variable indicating whether this cleanup has been
- /// activated yet. This has one of three states:
- /// - it is null if the cleanup is inactive
- /// - it is activeSentinel() if the cleanup is active and was not
- /// required before activation
- /// - it points to a valid variable
- llvm::AllocaInst *ActiveVar;
-
- /// Extra information required for cleanups that have resolved
- /// branches through them. This has to be allocated on the side
- /// because everything on the cleanup stack has be trivially
- /// movable.
- struct ExtInfo {
- /// The destinations of normal branch-afters and branch-throughs.
- llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
-
- /// Normal branch-afters.
- llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
- BranchAfters;
-
- /// The destinations of EH branch-afters and branch-throughs.
- /// TODO: optimize for the extremely common case of a single
- /// branch-through.
- llvm::SmallPtrSet<llvm::BasicBlock*, 4> EHBranches;
-
- /// EH branch-afters.
- llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
- EHBranchAfters;
- };
- mutable struct ExtInfo *ExtInfo;
-
- struct ExtInfo &getExtInfo() {
- if (!ExtInfo) ExtInfo = new struct ExtInfo();
- return *ExtInfo;
- }
-
- const struct ExtInfo &getExtInfo() const {
- if (!ExtInfo) ExtInfo = new struct ExtInfo();
- return *ExtInfo;
- }
-
-public:
- /// Gets the size required for a lazy cleanup scope with the given
- /// cleanup-data requirements.
- static size_t getSizeForCleanupSize(size_t Size) {
- return sizeof(EHCleanupScope) + Size;
- }
-
- size_t getAllocatedSize() const {
- return sizeof(EHCleanupScope) + CleanupSize;
- }
-
- EHCleanupScope(bool IsNormal, bool IsEH, bool IsActive,
- unsigned CleanupSize, unsigned FixupDepth,
- EHScopeStack::stable_iterator EnclosingNormal,
- EHScopeStack::stable_iterator EnclosingEH)
- : EHScope(EHScope::Cleanup),
- IsNormalCleanup(IsNormal), IsEHCleanup(IsEH),
- ActivatedBeforeNormalUse(IsActive),
- ActivatedBeforeEHUse(IsActive),
- CleanupSize(CleanupSize), FixupDepth(FixupDepth),
- EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
- NormalBlock(0), EHBlock(0),
- ActiveVar(IsActive ? activeSentinel() : 0),
- ExtInfo(0)
- {
- assert(this->CleanupSize == CleanupSize && "cleanup size overflow");
- }
-
- ~EHCleanupScope() {
- delete ExtInfo;
- }
-
- bool isNormalCleanup() const { return IsNormalCleanup; }
- llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
- void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
-
- bool isEHCleanup() const { return IsEHCleanup; }
- llvm::BasicBlock *getEHBlock() const { return EHBlock; }
- void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
-
- static llvm::AllocaInst *activeSentinel() {
- return reinterpret_cast<llvm::AllocaInst*>(1);
- }
-
- bool isActive() const { return ActiveVar != 0; }
- llvm::AllocaInst *getActiveVar() const { return ActiveVar; }
- void setActiveVar(llvm::AllocaInst *Var) { ActiveVar = Var; }
-
- bool wasActivatedBeforeNormalUse() const { return ActivatedBeforeNormalUse; }
- void setActivatedBeforeNormalUse(bool B) { ActivatedBeforeNormalUse = B; }
-
- bool wasActivatedBeforeEHUse() const { return ActivatedBeforeEHUse; }
- void setActivatedBeforeEHUse(bool B) { ActivatedBeforeEHUse = B; }
-
- unsigned getFixupDepth() const { return FixupDepth; }
- EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
- return EnclosingNormal;
- }
- EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
- return EnclosingEH;
- }
-
- size_t getCleanupSize() const { return CleanupSize; }
- void *getCleanupBuffer() { return this + 1; }
-
- EHScopeStack::Cleanup *getCleanup() {
- return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
- }
-
- /// True if this cleanup scope has any branch-afters or branch-throughs.
- bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
-
- /// Add a branch-after to this cleanup scope. A branch-after is a
- /// branch from a point protected by this (normal) cleanup to a
- /// point in the normal cleanup scope immediately containing it.
- /// For example,
- /// for (;;) { A a; break; }
- /// contains a branch-after.
- ///
- /// Branch-afters each have their own destination out of the
- /// cleanup, guaranteed distinct from anything else threaded through
- /// it. Therefore branch-afters usually force a switch after the
- /// cleanup.
- void addBranchAfter(llvm::ConstantInt *Index,
- llvm::BasicBlock *Block) {
- struct ExtInfo &ExtInfo = getExtInfo();
- if (ExtInfo.Branches.insert(Block))
- ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
- }
-
- /// Return the number of unique branch-afters on this scope.
- unsigned getNumBranchAfters() const {
- return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
- }
-
- llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
- assert(I < getNumBranchAfters());
- return ExtInfo->BranchAfters[I].first;
- }
-
- llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
- assert(I < getNumBranchAfters());
- return ExtInfo->BranchAfters[I].second;
- }
-
- /// Add a branch-through to this cleanup scope. A branch-through is
- /// a branch from a scope protected by this (normal) cleanup to an
- /// enclosing scope other than the immediately-enclosing normal
- /// cleanup scope.
- ///
- /// In the following example, the branch through B's scope is a
- /// branch-through, while the branch through A's scope is a
- /// branch-after:
- /// for (;;) { A a; B b; break; }
- ///
- /// All branch-throughs have a common destination out of the
- /// cleanup, one possibly shared with the fall-through. Therefore
- /// branch-throughs usually don't force a switch after the cleanup.
- ///
- /// \return true if the branch-through was new to this scope
- bool addBranchThrough(llvm::BasicBlock *Block) {
- return getExtInfo().Branches.insert(Block);
- }
-
- /// Determines if this cleanup scope has any branch throughs.
- bool hasBranchThroughs() const {
- if (!ExtInfo) return false;
- return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
- }
-
- // Same stuff, only for EH branches instead of normal branches.
- // It's quite possible that we could find a better representation
- // for this.
-
- bool hasEHBranches() const { return ExtInfo && !ExtInfo->EHBranches.empty(); }
- void addEHBranchAfter(llvm::ConstantInt *Index,
- llvm::BasicBlock *Block) {
- struct ExtInfo &ExtInfo = getExtInfo();
- if (ExtInfo.EHBranches.insert(Block))
- ExtInfo.EHBranchAfters.push_back(std::make_pair(Block, Index));
- }
-
- unsigned getNumEHBranchAfters() const {
- return ExtInfo ? ExtInfo->EHBranchAfters.size() : 0;
- }
-
- llvm::BasicBlock *getEHBranchAfterBlock(unsigned I) const {
- assert(I < getNumEHBranchAfters());
- return ExtInfo->EHBranchAfters[I].first;
- }
-
- llvm::ConstantInt *getEHBranchAfterIndex(unsigned I) const {
- assert(I < getNumEHBranchAfters());
- return ExtInfo->EHBranchAfters[I].second;
- }
-
- bool addEHBranchThrough(llvm::BasicBlock *Block) {
- return getExtInfo().EHBranches.insert(Block);
- }
-
- bool hasEHBranchThroughs() const {
- if (!ExtInfo) return false;
- return (ExtInfo->EHBranchAfters.size() != ExtInfo->EHBranches.size());
- }
-
- static bool classof(const EHScope *Scope) {
- return (Scope->getKind() == Cleanup);
- }
-};
-
-/// An exceptions scope which filters exceptions thrown through it.
-/// Only exceptions matching the filter types will be permitted to be
-/// thrown.
-///
-/// This is used to implement C++ exception specifications.
-class EHFilterScope : public EHScope {
- unsigned NumFilters : BitsRemaining;
-
- // Essentially ends in a flexible array member:
- // llvm::Value *FilterTypes[0];
-
- llvm::Value **getFilters() {
- return reinterpret_cast<llvm::Value**>(this+1);
- }
-
- llvm::Value * const *getFilters() const {
- return reinterpret_cast<llvm::Value* const *>(this+1);
- }
-
-public:
- EHFilterScope(unsigned NumFilters) :
- EHScope(Filter), NumFilters(NumFilters) {}
-
- static size_t getSizeForNumFilters(unsigned NumFilters) {
- return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*);
- }
-
- unsigned getNumFilters() const { return NumFilters; }
-
- void setFilter(unsigned I, llvm::Value *FilterValue) {
- assert(I < getNumFilters());
- getFilters()[I] = FilterValue;
- }
-
- llvm::Value *getFilter(unsigned I) const {
- assert(I < getNumFilters());
- return getFilters()[I];
- }
-
- static bool classof(const EHScope *Scope) {
- return Scope->getKind() == Filter;
- }
+ llvm::StringRef getPersonalityFnName() const { return PersonalityFn; }
+ llvm::StringRef getCatchallRethrowFnName() const { return CatchallRethrowFn; }
};
-/// An exceptions scope which calls std::terminate if any exception
-/// reaches it.
-class EHTerminateScope : public EHScope {
- unsigned DestIndex : BitsRemaining;
-public:
- EHTerminateScope(unsigned Index) : EHScope(Terminate), DestIndex(Index) {}
- static size_t getSize() { return sizeof(EHTerminateScope); }
-
- unsigned getDestIndex() const { return DestIndex; }
-
- static bool classof(const EHScope *Scope) {
- return Scope->getKind() == Terminate;
- }
-};
-
-/// A non-stable pointer into the scope stack.
-class EHScopeStack::iterator {
- char *Ptr;
-
- friend class EHScopeStack;
- explicit iterator(char *Ptr) : Ptr(Ptr) {}
-
-public:
- iterator() : Ptr(0) {}
-
- EHScope *get() const {
- return reinterpret_cast<EHScope*>(Ptr);
- }
-
- EHScope *operator->() const { return get(); }
- EHScope &operator*() const { return *get(); }
-
- iterator &operator++() {
- switch (get()->getKind()) {
- case EHScope::Catch:
- Ptr += EHCatchScope::getSizeForNumHandlers(
- static_cast<const EHCatchScope*>(get())->getNumHandlers());
- break;
-
- case EHScope::Filter:
- Ptr += EHFilterScope::getSizeForNumFilters(
- static_cast<const EHFilterScope*>(get())->getNumFilters());
- break;
-
- case EHScope::Cleanup:
- Ptr += static_cast<const EHCleanupScope*>(get())
- ->getAllocatedSize();
- break;
-
- case EHScope::Terminate:
- Ptr += EHTerminateScope::getSize();
- break;
- }
-
- return *this;
- }
-
- iterator next() {
- iterator copy = *this;
- ++copy;
- return copy;
- }
-
- iterator operator++(int) {
- iterator copy = *this;
- operator++();
- return copy;
- }
-
- bool encloses(iterator other) const { return Ptr >= other.Ptr; }
- bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
-
- bool operator==(iterator other) const { return Ptr == other.Ptr; }
- bool operator!=(iterator other) const { return Ptr != other.Ptr; }
-};
-
-inline EHScopeStack::iterator EHScopeStack::begin() const {
- return iterator(StartOfData);
-}
-
-inline EHScopeStack::iterator EHScopeStack::end() const {
- return iterator(EndOfBuffer);
-}
-
-inline void EHScopeStack::popCatch() {
- assert(!empty() && "popping exception stack when not empty");
-
- assert(isa<EHCatchScope>(*begin()));
- StartOfData += EHCatchScope::getSizeForNumHandlers(
- cast<EHCatchScope>(*begin()).getNumHandlers());
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
- CatchDepth--;
-}
-
-inline void EHScopeStack::popTerminate() {
- assert(!empty() && "popping exception stack when not empty");
-
- assert(isa<EHTerminateScope>(*begin()));
- StartOfData += EHTerminateScope::getSize();
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
- CatchDepth--;
-}
-
-inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
- assert(sp.isValid() && "finding invalid savepoint");
- assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
- return iterator(EndOfBuffer - sp.Size);
-}
-
-inline EHScopeStack::stable_iterator
-EHScopeStack::stabilize(iterator ir) const {
- assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
- return stable_iterator(EndOfBuffer - ir.Ptr);
-}
-
-inline EHScopeStack::stable_iterator
-EHScopeStack::getInnermostActiveNormalCleanup() const {
- for (EHScopeStack::stable_iterator
- I = getInnermostNormalCleanup(), E = stable_end(); I != E; ) {
- EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
- if (S.isActive()) return I;
- I = S.getEnclosingNormalCleanup();
- }
- return stable_end();
-}
-
-inline EHScopeStack::stable_iterator
-EHScopeStack::getInnermostActiveEHCleanup() const {
- for (EHScopeStack::stable_iterator
- I = getInnermostEHCleanup(), E = stable_end(); I != E; ) {
- EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
- if (S.isActive()) return I;
- I = S.getEnclosingEHCleanup();
- }
- return stable_end();
-}
-
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
index 3750ab8..1b7e7a0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -29,6 +29,18 @@ using namespace CodeGen;
// Miscellaneous Helper Methods
//===--------------------------------------------------------------------===//
+llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
+ unsigned addressSpace =
+ cast<llvm::PointerType>(value->getType())->getAddressSpace();
+
+ const llvm::PointerType *destType = Int8PtrTy;
+ if (addressSpace)
+ destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
+
+ if (value->getType() == destType) return value;
+ return Builder.CreateBitCast(value, destType);
+}
+
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
@@ -68,7 +80,7 @@ llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
llvm::Value *MemPtr = EmitScalarExpr(E);
- return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
+ return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
}
QualType BoolTy = getContext().BoolTy;
@@ -78,35 +90,40 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
}
-/// EmitAnyExpr - Emit code to compute the specified expression which can have
-/// any type. The result is returned as an RValue struct. If this is an
-/// aggregate expression, the aggloc/agglocvolatile arguments indicate where the
+/// EmitIgnoredExpr - Emit code to compute the specified expression,
+/// ignoring the result.
+void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
+ if (E->isRValue())
+ return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
+
+ // Just emit it as an l-value and drop the result.
+ EmitLValue(E);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which
+/// can have any type. The result is returned as an RValue struct.
+/// If this is an aggregate expression, AggSlot indicates where the
/// result should be returned.
-RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc,
- bool IsAggLocVolatile, bool IgnoreResult,
- bool IsInitializer) {
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
+ bool IgnoreResult) {
if (!hasAggregateLLVMType(E->getType()))
return RValue::get(EmitScalarExpr(E, IgnoreResult));
else if (E->getType()->isAnyComplexType())
- return RValue::getComplex(EmitComplexExpr(E, false, false,
- IgnoreResult, IgnoreResult));
+ return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
- EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer);
- return RValue::getAggregate(AggLoc, IsAggLocVolatile);
+ EmitAggExpr(E, AggSlot, IgnoreResult);
+ return AggSlot.asRValue();
}
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
-RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E,
- bool IsAggLocVolatile,
- bool IsInitializer) {
- llvm::Value *AggLoc = 0;
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
+ AggValueSlot AggSlot = AggValueSlot::ignored();
if (hasAggregateLLVMType(E->getType()) &&
!E->getType()->isAnyComplexType())
- AggLoc = CreateMemTemp(E->getType(), "agg.tmp");
- return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false,
- IsInitializer);
+ AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
+ return EmitAnyExpr(E, AggSlot);
}
/// EmitAnyExprToMem - Evaluate an expression into a given memory
@@ -118,7 +135,7 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
if (E->getType()->isComplexType())
EmitComplexExprIntoAddr(E, Location, IsLocationVolatile);
else if (hasAggregateLLVMType(E->getType()))
- EmitAggExpr(E, Location, IsLocationVolatile, /*Ignore*/ false, IsInit);
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, IsLocationVolatile, IsInit));
else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
@@ -126,34 +143,34 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
}
}
+namespace {
/// \brief An adjustment to be made to the temporary created when emitting a
/// reference binding, which accesses a particular subobject of that temporary.
-struct SubobjectAdjustment {
- enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
-
- union {
- struct {
- const CastExpr *BasePath;
- const CXXRecordDecl *DerivedClass;
- } DerivedToBase;
-
- FieldDecl *Field;
+ struct SubobjectAdjustment {
+ enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
+
+ union {
+ struct {
+ const CastExpr *BasePath;
+ const CXXRecordDecl *DerivedClass;
+ } DerivedToBase;
+
+ FieldDecl *Field;
+ };
+
+ SubobjectAdjustment(const CastExpr *BasePath,
+ const CXXRecordDecl *DerivedClass)
+ : Kind(DerivedToBaseAdjustment) {
+ DerivedToBase.BasePath = BasePath;
+ DerivedToBase.DerivedClass = DerivedClass;
+ }
+
+ SubobjectAdjustment(FieldDecl *Field)
+ : Kind(FieldAdjustment) {
+ this->Field = Field;
+ }
};
-
- SubobjectAdjustment(const CastExpr *BasePath,
- const CXXRecordDecl *DerivedClass)
- : Kind(DerivedToBaseAdjustment)
- {
- DerivedToBase.BasePath = BasePath;
- DerivedToBase.DerivedClass = DerivedClass;
- }
-
- SubobjectAdjustment(FieldDecl *Field)
- : Kind(FieldAdjustment)
- {
- this->Field = Field;
- }
-};
+}
static llvm::Value *
CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
@@ -161,8 +178,10 @@ CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
if (VD->hasGlobalStorage()) {
llvm::SmallString<256> Name;
- CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Name);
-
+ llvm::raw_svector_ostream Out(Name);
+ CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
+ Out.flush();
+
const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
// Create the reference temporary.
@@ -180,14 +199,14 @@ CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
}
static llvm::Value *
-EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
+EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
llvm::Value *&ReferenceTemporary,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
const NamedDecl *InitializedDecl) {
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
- if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
+ if (const ExprWithCleanups *TE = dyn_cast<ExprWithCleanups>(E)) {
CodeGenFunction::RunCleanupsScope Scope(CGF);
return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
@@ -197,10 +216,9 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
}
RValue RV;
- if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) {
+ if (E->isGLValue()) {
// Emit the expression as an lvalue.
LValue LV = CGF.EmitLValue(E);
-
if (LV.isSimple())
return LV.getAddress();
@@ -232,8 +250,8 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
continue;
}
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (ME->getBase()->isLvalue(CGF.getContext()) != Expr::LV_Valid &&
- ME->getBase()->getType()->isRecordType()) {
+ if (!ME->isArrow() && ME->getBase()->isRValue()) {
+ assert(ME->getBase()->getType()->isRecordType());
if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
E = ME->getBase();
Adjustments.push_back(SubobjectAdjustment(Field));
@@ -247,13 +265,16 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
}
// Create a reference temporary if necessary.
+ AggValueSlot AggSlot = AggValueSlot::ignored();
if (CGF.hasAggregateLLVMType(E->getType()) &&
- !E->getType()->isAnyComplexType())
+ !E->getType()->isAnyComplexType()) {
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, false,
+ InitializedDecl != 0);
+ }
- RV = CGF.EmitAnyExpr(E, ReferenceTemporary, /*IsAggLocVolatile=*/false,
- /*IgnoreResult=*/false, InitializedDecl);
+ RV = CGF.EmitAnyExpr(E, AggSlot);
if (InitializedDecl) {
// Get the destructor for the reference temporary.
@@ -328,7 +349,7 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
}
RValue
-CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
+CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
const NamedDecl *InitializedDecl) {
llvm::Value *ReferenceTemporary = 0;
const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
@@ -343,8 +364,8 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
if (VD->hasGlobalStorage()) {
llvm::Constant *DtorFn =
CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
- CGF.EmitCXXGlobalDtorRegistration(DtorFn,
- cast<llvm::Constant>(ReferenceTemporary));
+ EmitCXXGlobalDtorRegistration(DtorFn,
+ cast<llvm::Constant>(ReferenceTemporary));
return RValue::get(Value);
}
@@ -370,14 +391,14 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
if (!CatchUndefined)
return;
- Address = Builder.CreateBitCast(Address, PtrToInt8Ty);
+ // This needs to be to the standard address space.
+ Address = Builder.CreateBitCast(Address, Int8PtrTy);
const llvm::Type *IntPtrT = IntPtrTy;
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1);
- const llvm::IntegerType *Int1Ty = llvm::Type::getInt1Ty(VMContext);
// In time, people may want to control this and use a 1 here.
- llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0);
+ llvm::Value *Arg = Builder.getFalse();
llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
llvm::BasicBlock *Cont = createBasicBlock();
llvm::BasicBlock *Check = createBasicBlock();
@@ -468,7 +489,8 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
LValue LV = EmitLValue(E);
if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
- EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8);
+ EmitCheck(LV.getAddress(),
+ getContext().getTypeSizeInChars(E->getType()).getQuantity());
return LV;
}
@@ -498,7 +520,9 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::BinaryOperatorClass:
return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
case Expr::CompoundAssignOperatorClass:
- return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
+ if (!E->getType()->isAnyComplexType())
+ return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
+ return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
case Expr::CallExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CXXOperatorCallExprClass:
@@ -523,8 +547,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
case Expr::CXXBindTemporaryExprClass:
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
- case Expr::CXXExprWithTemporariesClass:
- return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E));
+ case Expr::ExprWithCleanupsClass:
+ return EmitExprWithCleanupsLValue(cast<ExprWithCleanups>(E));
case Expr::CXXScalarValueInitExprClass:
return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
case Expr::CXXDefaultArgExprClass:
@@ -538,11 +562,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
case Expr::ObjCPropertyRefExprClass:
return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
- case Expr::ObjCImplicitSetterGetterRefExprClass:
- return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E));
- case Expr::ObjCSuperExprClass:
- return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E));
-
case Expr::StmtExprClass:
return EmitStmtExprLValue(cast<StmtExpr>(E));
case Expr::UnaryOperatorClass:
@@ -557,8 +576,12 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
case Expr::ConditionalOperatorClass:
return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
+ case Expr::BinaryConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
case Expr::ChooseExprClass:
return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+ case Expr::OpaqueValueExprClass:
+ return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
case Expr::CXXFunctionalCastExprClass:
@@ -571,35 +594,58 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
}
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- unsigned Alignment, QualType Ty) {
+ unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo) {
llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp");
if (Volatile)
Load->setVolatile(true);
if (Alignment)
Load->setAlignment(Alignment);
+ if (TBAAInfo)
+ CGM.DecorateInstruction(Load, TBAAInfo);
- // Bool can have different representation in memory than in registers.
- llvm::Value *V = Load;
- if (Ty->isBooleanType())
- if (V->getType() != llvm::Type::getInt1Ty(VMContext))
- V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool");
+ return EmitFromMemory(Load, Ty);
+}
- return V;
+static bool isBooleanUnderlyingType(QualType Ty) {
+ if (const EnumType *ET = dyn_cast<EnumType>(Ty))
+ return ET->getDecl()->getIntegerType()->isBooleanType();
+ return false;
}
-void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, unsigned Alignment,
- QualType Ty) {
+llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ // This should really always be an i1, but sometimes it's already
+ // an i8, and it's awkward to track those cases down.
+ if (Value->getType()->isIntegerTy(1))
+ return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
+ assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
+ }
- if (Ty->isBooleanType()) {
- // Bool can have different representation in memory than in registers.
- const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
- Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false);
+ return Value;
+}
+
+llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
+ return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
}
+ return Value;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, unsigned Alignment,
+ QualType Ty,
+ llvm::MDNode *TBAAInfo) {
+ Value = EmitToMemory(Value, Ty);
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
if (Alignment)
Store->setAlignment(Alignment);
+ if (TBAAInfo)
+ CGM.DecorateInstruction(Store, TBAAInfo);
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
@@ -622,7 +668,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
// Everything needs a load.
return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
- LV.getAlignment(), ExprType));
+ LV.getAlignment(), ExprType,
+ LV.getTBAAInfo()));
}
@@ -641,11 +688,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
if (LV.isBitField())
return EmitLoadOfBitfieldLValue(LV, ExprType);
- if (LV.isPropertyRef())
- return EmitLoadOfPropertyRefLValue(LV, ExprType);
-
- assert(LV.isKVCRef() && "Unknown LValue type!");
- return EmitLoadOfKVCRefLValue(LV, ExprType);
+ assert(LV.isPropertyRef() && "Unknown LValue type!");
+ return EmitLoadOfPropertyRefLValue(LV);
}
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
@@ -671,13 +715,13 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
// Offset by the byte offset, if used.
if (AI.FieldByteOffset) {
- const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
- Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = EmitCastToVoidPtr(Ptr);
Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
}
// Cast to the access type.
- const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
+ const llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
+ AI.AccessWidth,
ExprType.getAddressSpace());
Ptr = Builder.CreateBitCast(Ptr, PTy);
@@ -720,16 +764,6 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
return RValue::get(Res);
}
-RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
- QualType ExprType) {
- return EmitObjCPropertyGet(LV.getPropertyRefExpr());
-}
-
-RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV,
- QualType ExprType) {
- return EmitObjCPropertyGet(LV.getKVCRefExpr());
-}
-
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
@@ -757,9 +791,8 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
}
- llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
- Vec = Builder.CreateShuffleVector(Vec,
- llvm::UndefValue::get(Vec->getType()),
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
MaskV, "tmp");
return RValue::get(Vec);
}
@@ -790,11 +823,8 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isBitField())
return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
- if (Dst.isPropertyRef())
- return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty);
-
- assert(Dst.isKVCRef() && "Unknown LValue type");
- return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
+ assert(Dst.isPropertyRef() && "Unknown LValue type");
+ return EmitStoreThroughPropertyRefLValue(Src, Dst);
}
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
@@ -831,7 +861,8 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
assert(Src.isScalar() && "Can't emit an agg store with this method");
EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
- Dst.isVolatileQualified(), Dst.getAlignment(), Ty);
+ Dst.isVolatileQualified(), Dst.getAlignment(), Ty,
+ Dst.getTBAAInfo());
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
@@ -877,6 +908,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Get the field pointer.
llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
+ unsigned addressSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
// Only offset by the field index if used, so that incoming values are not
// required to be structures.
@@ -885,14 +918,15 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Offset by the byte offset, if used.
if (AI.FieldByteOffset) {
- const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
- Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = EmitCastToVoidPtr(Ptr);
Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
}
// Cast to the access type.
- const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
- Ty.getAddressSpace());
+ const llvm::Type *AccessLTy =
+ llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
+
+ const llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
Ptr = Builder.CreateBitCast(Ptr, PTy);
// Extract the piece of the bit-field value to write in this access, limited
@@ -904,8 +938,6 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
AI.TargetBitWidth));
// Extend or truncate to the access size.
- const llvm::Type *AccessLTy =
- llvm::Type::getIntNTy(VMContext, AI.AccessWidth);
if (ResSizeInBits < AI.AccessWidth)
Val = Builder.CreateZExt(Val, AccessLTy);
else if (ResSizeInBits > AI.AccessWidth)
@@ -938,18 +970,6 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
}
}
-void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
- LValue Dst,
- QualType Ty) {
- EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src);
-}
-
-void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src,
- LValue Dst,
- QualType Ty) {
- EmitObjCPropertySet(Dst.getKVCRefExpr(), Src);
-}
-
void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
LValue Dst,
QualType Ty) {
@@ -975,7 +995,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
}
- llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(Vec->getType()),
MaskV, "tmp");
@@ -990,8 +1010,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
for (; i != NumDstElts; ++i)
ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
- llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
- ExtMask.size());
+ llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
llvm::Value *ExtSrcVal =
Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(SrcVal->getType()),
@@ -1006,7 +1025,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
unsigned Idx = getAccessedFieldNo(i, Elts);
Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
}
- llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
} else {
// We should never shorten the vector
@@ -1040,8 +1059,7 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
- if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) ||
- VD->isFileVarDecl()) {
+ if (VD->hasGlobalStorage()) {
LV.setGlobalObjCRef(true);
LV.setThreadLocalRef(VD->isThreadSpecified());
}
@@ -1116,7 +1134,7 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
const Expr *E, const FunctionDecl *FD) {
- llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD);
+ llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
FD->getType()->getAs<FunctionProtoType>()) {
@@ -1135,10 +1153,10 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
- unsigned Alignment = CGF.getContext().getDeclAlign(ND).getQuantity();
+ unsigned Alignment = getContext().getDeclAlign(ND).getQuantity();
if (ND->hasAttr<WeakRefAttr>()) {
- const ValueDecl* VD = cast<ValueDecl>(ND);
+ const ValueDecl *VD = cast<ValueDecl>(ND);
llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
return MakeAddrLValue(Aliasee, E->getType(), Alignment);
}
@@ -1149,20 +1167,18 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (VD->hasExternalStorage() || VD->isFileVarDecl())
return EmitGlobalVarDeclLValue(*this, E, VD);
- bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>();
+ bool NonGCable = VD->hasLocalStorage() &&
+ !VD->getType()->isReferenceType() &&
+ !VD->hasAttr<BlocksAttr>();
llvm::Value *V = LocalDeclMap[VD];
- if (!V && getContext().getLangOptions().CPlusPlus &&
- VD->isStaticLocal())
+ if (!V && VD->isStaticLocal())
V = CGM.getStaticLocalDeclAddress(VD);
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
- if (VD->hasAttr<BlocksAttr>()) {
- V = Builder.CreateStructGEP(V, 1, "forwarding");
- V = Builder.CreateLoad(V);
- V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
- VD->getNameAsString());
- }
+ if (VD->hasAttr<BlocksAttr>())
+ V = BuildBlockByrefAddress(V, VD);
+
if (VD->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
@@ -1174,24 +1190,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
setObjCGCLValueClass(getContext(), E, LV);
return LV;
}
-
- // If we're emitting an instance method as an independent lvalue,
- // we're actually emitting a member pointer.
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
- if (MD->isInstance()) {
- llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(MD);
- return MakeAddrLValue(V, MD->getType(), Alignment);
- }
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
- return EmitFunctionDeclLValue(*this, E, FD);
-
- // If we're emitting a field as an independent lvalue, we're
- // actually emitting a member pointer.
- if (const FieldDecl *FD = dyn_cast<FieldDecl>(ND)) {
- llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(FD);
- return MakeAddrLValue(V, FD->getType(), Alignment);
- }
-
+
+ if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, fn);
+
assert(false && "Unhandled DeclRefExpr");
// an invalid LValue, but the assert will
@@ -1201,7 +1203,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
unsigned Alignment =
- CGF.getContext().getDeclAlign(E->getDecl()).getQuantity();
+ getContext().getDeclAlign(E->getDecl()).getQuantity();
return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
}
@@ -1233,9 +1235,22 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
case UO_Real:
case UO_Imag: {
LValue LV = EmitLValue(E->getSubExpr());
+ assert(LV.isSimple() && "real/imag on non-ordinary l-value");
+ llvm::Value *Addr = LV.getAddress();
+
+ // real and imag are valid on scalars. This is a faster way of
+ // testing that.
+ if (!cast<llvm::PointerType>(Addr->getType())
+ ->getElementType()->isStructTy()) {
+ assert(E->getSubExpr()->getType()->isArithmeticType());
+ return LV;
+ }
+
+ assert(E->getSubExpr()->getType()->isAnyComplexType());
+
unsigned Idx = E->getOpcode() == UO_Imag;
return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
- Idx, "idx"),
+ Idx, "idx"),
ExprTy);
}
case UO_PreInc:
@@ -1297,7 +1312,9 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
CurDecl = getContext().getTranslationUnitDecl();
std::string FunctionName =
- PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl);
+ (isa<BlockDecl>(CurDecl)
+ ? FnName.str()
+ : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
llvm::Constant *C =
CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
@@ -1363,15 +1380,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
- Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vidx");
+ Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
E->getBase()->getType().getCVRQualifiers());
}
// Extend or truncate the index type to 32 or 64-bits.
- if (!Idx->getType()->isIntegerTy(LLVMPointerWidth))
- Idx = Builder.CreateIntCast(Idx, IntPtrTy,
- IdxSigned, "idxprom");
+ if (Idx->getType() != IntPtrTy)
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
// FIXME: As llvm implements the object size checking, this can come out.
if (CatchUndefined) {
@@ -1401,17 +1417,12 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
Idx = Builder.CreateMul(Idx, VLASize);
- QualType BaseType = getContext().getBaseElementType(VAT);
-
- CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType);
- Idx = Builder.CreateUDiv(Idx,
- llvm::ConstantInt::get(Idx->getType(),
- BaseTypeSize.getQuantity()));
-
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
-
- Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+
+ Address = EmitCastToVoidPtr(Base);
+ Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
+ Address = Builder.CreateBitCast(Address, Base->getType());
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
llvm::Value *InterfaceSize =
@@ -1420,12 +1431,10 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
-
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
- Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
- Idx, "arrayidx");
+ Address = EmitCastToVoidPtr(Base);
+ Address = Builder.CreateGEP(Address, Idx, "arrayidx");
Address = Builder.CreateBitCast(Address, Base->getType());
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
@@ -1469,7 +1478,7 @@ llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
- return llvm::ConstantVector::get(&CElts[0], CElts.size());
+ return llvm::ConstantVector::get(CElts);
}
LValue CodeGenFunction::
@@ -1485,7 +1494,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
Base = MakeAddrLValue(Ptr, PT->getPointeeType());
Base.getQuals().removeObjCGCAttr();
- } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) {
+ } else if (E->getBase()->isGLValue()) {
// Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
// emit the base as an lvalue.
assert(E->getBase()->getType()->isVectorType());
@@ -1507,7 +1516,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
- llvm::Constant *CV = GenerateConstantVector(VMContext, Indices);
+ llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices);
return LValue::MakeExtVectorElt(Base.getAddress(), CV,
Base.getVRQualifiers());
}
@@ -1522,7 +1531,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
else
CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
}
- llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
+ llvm::Constant *CV = llvm::ConstantVector::get(CElts);
return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
Base.getVRQualifiers());
}
@@ -1539,12 +1548,6 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
const PointerType *PTy =
BaseExpr->getType()->getAs<PointerType>();
BaseQuals = PTy->getPointeeType().getQualifiers();
- } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) ||
- isa<ObjCImplicitSetterGetterRefExpr>(
- BaseExpr->IgnoreParens())) {
- RValue RV = EmitObjCPropertyGet(BaseExpr);
- BaseValue = RV.getAggregateAddr();
- BaseQuals = BaseExpr->getType().getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
if (BaseLV.isNonGC())
@@ -1574,8 +1577,8 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
return LValue();
}
-LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
- const FieldDecl* Field,
+LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
+ const FieldDecl *Field,
unsigned CVRQualifiers) {
const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(Field->getParent());
@@ -1589,23 +1592,13 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
/// that the base value is a pointer to the enclosing record, derive
/// an lvalue for the ultimate field.
LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
- const FieldDecl *Field,
+ const IndirectFieldDecl *Field,
unsigned CVRQualifiers) {
- llvm::SmallVector<const FieldDecl *, 8> Path;
- Path.push_back(Field);
-
- while (Field->getParent()->isAnonymousStructOrUnion()) {
- const ValueDecl *VD = Field->getParent()->getAnonymousStructOrUnionObject();
- if (!isa<FieldDecl>(VD)) break;
- Field = cast<FieldDecl>(VD);
- Path.push_back(Field);
- }
-
- llvm::SmallVectorImpl<const FieldDecl*>::reverse_iterator
- I = Path.rbegin(), E = Path.rend();
+ IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
+ IEnd = Field->chain_end();
while (true) {
- LValue LV = EmitLValueForField(BaseValue, *I, CVRQualifiers);
- if (++I == E) return LV;
+ LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I), CVRQualifiers);
+ if (++I == IEnd) return LV;
assert(LV.isSimple());
BaseValue = LV.getAddress();
@@ -1613,8 +1606,8 @@ LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
}
}
-LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
- const FieldDecl* Field,
+LValue CodeGenFunction::EmitLValueForField(llvm::Value *BaseValue,
+ const FieldDecl *Field,
unsigned CVRQualifiers) {
if (Field->isBitField())
return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
@@ -1628,7 +1621,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
if (Field->getParent()->isUnion()) {
const llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(Field->getType());
- const llvm::PointerType * BaseTy =
+ const llvm::PointerType *BaseTy =
cast<llvm::PointerType>(BaseValue->getType());
unsigned AS = BaseTy->getAddressSpace();
V = Builder.CreateBitCast(V,
@@ -1650,8 +1643,8 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
}
LValue
-CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue,
- const FieldDecl* Field,
+CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
+ const FieldDecl *Field,
unsigned CVRQualifiers) {
QualType FieldType = Field->getType();
@@ -1669,71 +1662,74 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue,
return MakeAddrLValue(V, FieldType, Alignment);
}
-LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
- const Expr* InitExpr = E->getInitializer();
+ const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
- EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false);
+ EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false, /*Init*/ true);
return Result;
}
-LValue
-CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) {
- if (E->isLvalue(getContext()) == Expr::LV_Valid) {
- if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) {
- Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS();
- if (Live)
- return EmitLValue(Live);
- }
+LValue CodeGenFunction::
+EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert((hasAggregateLLVMType(expr->getType()) &&
+ !expr->getType()->isAnyComplexType()) &&
+ "Unexpected conditional operator!");
+ return EmitAggExprToLValue(expr);
+ }
- if (!E->getLHS())
- return EmitUnsupportedLValue(E, "conditional operator with missing LHS");
+ const Expr *condExpr = expr->getCond();
- llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
- llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
- llvm::BasicBlock *ContBlock = createBasicBlock("cond.end");
-
- EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+ if (int condValue = ConstantFoldsToSimpleInteger(condExpr)) {
+ const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
+ if (condValue == -1) std::swap(live, dead);
+
+ if (!ContainsLabel(dead))
+ return EmitLValue(live);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+
+ llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
+ llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
+
+ ConditionalEvaluation eval(*this);
+ EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
- // Any temporaries created here are conditional.
- BeginConditionalBranch();
- EmitBlock(LHSBlock);
- LValue LHS = EmitLValue(E->getLHS());
- EndConditionalBranch();
+ // Any temporaries created here are conditional.
+ EmitBlock(lhsBlock);
+ eval.begin(*this);
+ LValue lhs = EmitLValue(expr->getTrueExpr());
+ eval.end(*this);
- if (!LHS.isSimple())
- return EmitUnsupportedLValue(E, "conditional operator");
+ if (!lhs.isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
- // FIXME: We shouldn't need an alloca for this.
- llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp");
- Builder.CreateStore(LHS.getAddress(), Temp);
- EmitBranch(ContBlock);
-
- // Any temporaries created here are conditional.
- BeginConditionalBranch();
- EmitBlock(RHSBlock);
- LValue RHS = EmitLValue(E->getRHS());
- EndConditionalBranch();
- if (!RHS.isSimple())
- return EmitUnsupportedLValue(E, "conditional operator");
-
- Builder.CreateStore(RHS.getAddress(), Temp);
- EmitBranch(ContBlock);
-
- EmitBlock(ContBlock);
+ lhsBlock = Builder.GetInsertBlock();
+ Builder.CreateBr(contBlock);
- Temp = Builder.CreateLoad(Temp, "lv");
- return MakeAddrLValue(Temp, E->getType());
- }
-
- // ?: here should be an aggregate.
- assert((hasAggregateLLVMType(E->getType()) &&
- !E->getType()->isAnyComplexType()) &&
- "Unexpected conditional operator!");
+ // Any temporaries created here are conditional.
+ EmitBlock(rhsBlock);
+ eval.begin(*this);
+ LValue rhs = EmitLValue(expr->getFalseExpr());
+ eval.end(*this);
+ if (!rhs.isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
+ rhsBlock = Builder.GetInsertBlock();
- return EmitAggExprToLValue(E);
+ EmitBlock(contBlock);
+
+ llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(),
+ "cond-lvalue");
+ phi->reserveOperandSpace(2);
+ phi->addIncoming(lhs.getAddress(), lhsBlock);
+ phi->addIncoming(rhs.getAddress(), rhsBlock);
+ return MakeAddrLValue(phi, expr->getType());
}
/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
@@ -1747,36 +1743,57 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
switch (E->getCastKind()) {
case CK_ToVoid:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
-
- case CK_NoOp:
- if (E->getSubExpr()->Classify(getContext()).getKind()
- != Expr::Classification::CL_PRValue) {
- LValue LV = EmitLValue(E->getSubExpr());
- if (LV.isPropertyRef() || LV.isKVCRef()) {
- QualType QT = E->getSubExpr()->getType();
- RValue RV =
- LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
- : EmitLoadOfKVCRefLValue(LV, QT);
- assert(!RV.isScalar() && "EmitCastLValue-scalar cast of property ref");
- llvm::Value *V = RV.getAggregateAddr();
- return MakeAddrLValue(V, QT);
- }
- return LV;
+
+ case CK_Dependent:
+ llvm_unreachable("dependent cast kind in IR gen!");
+
+ case CK_GetObjCProperty: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ assert(LV.isPropertyRef());
+ RValue RV = EmitLoadOfPropertyRefLValue(LV);
+
+ // Property is an aggregate r-value.
+ if (RV.isAggregate()) {
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
}
+
+ // Implicit property returns an l-value.
+ assert(RV.isScalar());
+ return MakeAddrLValue(RV.getScalarVal(), E->getSubExpr()->getType());
+ }
+
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ if (!E->getSubExpr()->Classify(getContext()).isPRValue()
+ || E->getType()->isRecordType())
+ return EmitLValue(E->getSubExpr());
// Fall through to synthesize a temporary.
-
- case CK_Unknown:
+
case CK_BitCast:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_NullToMemberPointer:
+ case CK_NullToPointer:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
case CK_VectorSplat:
case CK_IntegralCast:
+ case CK_IntegralToBoolean:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
case CK_FloatingCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
@@ -1810,17 +1827,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
- llvm::Value *This;
- if (LV.isPropertyRef() || LV.isKVCRef()) {
- QualType QT = E->getSubExpr()->getType();
- RValue RV =
- LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
- : EmitLoadOfKVCRefLValue(LV, QT);
- assert (!RV.isScalar() && "EmitCastLValue");
- This = RV.getAggregateAddr();
- }
- else
- This = LV.getAddress();
+ llvm::Value *This = LV.getAddress();
// Perform the derived-to-base conversion
llvm::Value *Base =
@@ -1876,6 +1883,11 @@ LValue CodeGenFunction::EmitNullInitializationLValue(
return LV;
}
+LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
+ assert(e->isGLValue() || e->getType()->isRecordType());
+ return getOpaqueLValueMapping(e);
+}
+
//===--------------------------------------------------------------------===//
// Expression Emission
//===--------------------------------------------------------------------===//
@@ -1922,7 +1934,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
// Comma expressions just emit their LHS then their RHS as an l-value.
if (E->getOpcode() == BO_Comma) {
- EmitAnyExpr(E->getLHS());
+ EmitIgnoredExpr(E->getLHS());
EnsureInsertPoint();
return EmitLValue(E->getRHS());
}
@@ -1930,20 +1942,20 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
if (E->getOpcode() == BO_PtrMemD ||
E->getOpcode() == BO_PtrMemI)
return EmitPointerToDataMemberBinaryExpr(E);
-
- // Can only get l-value for binary operator expressions which are a
- // simple assignment of aggregate type.
- if (E->getOpcode() != BO_Assign)
- return EmitUnsupportedLValue(E, "binary l-value expression");
+ assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
+
if (!hasAggregateLLVMType(E->getType())) {
- // Emit the LHS as an l-value.
+ // __block variables need the RHS evaluated first.
+ RValue RV = EmitAnyExpr(E->getRHS());
LValue LV = EmitLValue(E->getLHS());
- // Store the value through the l-value.
- EmitStoreThroughLValue(EmitAnyExpr(E->getRHS()), LV, E->getType());
+ EmitStoreThroughLValue(RV, LV, E->getType());
return LV;
}
-
+
+ if (E->getType()->isAnyComplexType())
+ return EmitComplexAssignmentLValue(E);
+
return EmitAggExprToLValue(E);
}
@@ -1966,9 +1978,11 @@ LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
}
LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
- llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp");
- EmitCXXConstructExpr(Temp, E);
- return MakeAddrLValue(Temp, E->getType());
+ assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
+ && "binding l-value to type which needs a temporary");
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "tmp");
+ EmitCXXConstructExpr(E, Slot);
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
}
LValue
@@ -1978,9 +1992,11 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
- LValue LV = EmitLValue(E->getSubExpr());
- EmitCXXTemporary(E->getTemporary(), LV.getAddress());
- return LV;
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ Slot.setLifetimeExternallyManaged();
+ EmitAggExpr(E->getSubExpr(), Slot);
+ EmitCXXTemporary(E->getTemporary(), Slot.getAddr());
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
}
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
@@ -2040,24 +2056,6 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
return LV;
}
-LValue
-CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
- // This is a special l-value that just issues sends when we load or store
- // through it.
- return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
-}
-
-LValue CodeGenFunction::EmitObjCKVCRefLValue(
- const ObjCImplicitSetterGetterRefExpr *E) {
- // This is a special l-value that just issues sends when we load or store
- // through it.
- return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
-}
-
-LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
- return EmitUnsupportedLValue(E, "use of super");
-}
-
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitAnyExprToTemp(E);
@@ -2078,7 +2076,6 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
const FunctionType *FnType
= cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
- QualType ResultType = FnType->getResultType();
CallArgList Args;
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
@@ -2105,4 +2102,3 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
return MakeAddrLValue(AddV, MPT->getPointeeType());
}
-
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
index 28c8b35..f992dc7 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -32,27 +32,28 @@ namespace {
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CodeGenFunction &CGF;
CGBuilderTy &Builder;
- llvm::Value *DestPtr;
- bool VolatileDest;
+ AggValueSlot Dest;
bool IgnoreResult;
- bool IsInitializer;
- bool RequiresGCollection;
ReturnValueSlot getReturnValueSlot() const {
// If the destination slot requires garbage collection, we can't
// use the real return value slot, because we have to use the GC
// API.
- if (RequiresGCollection) return ReturnValueSlot();
+ if (Dest.requiresGCollection()) return ReturnValueSlot();
- return ReturnValueSlot(DestPtr, VolatileDest);
+ return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
+ }
+
+ AggValueSlot EnsureSlot(QualType T) {
+ if (!Dest.isIgnored()) return Dest;
+ return CGF.CreateAggTemp(T, "agg.tmp.ensured");
}
public:
- AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
- bool ignore, bool isinit, bool requiresGCollection)
- : CGF(cgf), Builder(CGF.Builder),
- DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore),
- IsInitializer(isinit), RequiresGCollection(requiresGCollection) {
+ AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
+ bool ignore)
+ : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
+ IgnoreResult(ignore) {
}
//===--------------------------------------------------------------------===//
@@ -114,9 +115,8 @@ public:
EmitAggLoadOfLValue(E);
}
void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
- void VisitObjCImplicitSetterGetterRefExpr(ObjCImplicitSetterGetterRefExpr *E);
- void VisitConditionalOperator(const ConditionalOperator *CO);
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
void VisitChooseExpr(const ChooseExpr *CE);
void VisitInitListExpr(InitListExpr *E);
void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
@@ -125,10 +125,12 @@ public:
}
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
void VisitCXXConstructExpr(const CXXConstructExpr *E);
- void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+ void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
+ void VisitOpaqueValueExpr(OpaqueValueExpr *E);
+
void VisitVAArgExpr(VAArgExpr *E);
void EmitInitializationToLValue(Expr *E, LValue Address, QualType T);
@@ -176,13 +178,13 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
/// directly into the return value slot. If GC does interfere, a final
/// move will be performed.
void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
- if (RequiresGCollection) {
+ if (Dest.requiresGCollection()) {
std::pair<uint64_t, unsigned> TypeInfo =
CGF.getContext().getTypeInfo(E->getType());
unsigned long size = TypeInfo.first/8;
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
- CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
Src.getAggregateAddr(),
SizeVal);
}
@@ -192,13 +194,13 @@ void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
assert(Src.isAggregate() && "value must be aggregate value!");
- // If DestPtr is null, then we're evaluating an aggregate expression
+ // If Dest is ignored, then we're evaluating an aggregate expression
// in a context (like an expression statement) that doesn't care
// about the result. C says that an lvalue-to-rvalue conversion is
// performed in these cases; C++ says that it is not. In either
// case, we don't actually need to do anything unless the value is
// volatile.
- if (DestPtr == 0) {
+ if (Dest.isIgnored()) {
if (!Src.isVolatileQualified() ||
CGF.CGM.getLangOptions().CPlusPlus ||
(IgnoreResult && Ignore))
@@ -206,26 +208,27 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
// If the source is volatile, we must read from it; to do that, we need
// some place to put it.
- DestPtr = CGF.CreateMemTemp(E->getType(), "agg.tmp");
+ Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
}
- if (RequiresGCollection) {
+ if (Dest.requiresGCollection()) {
std::pair<uint64_t, unsigned> TypeInfo =
CGF.getContext().getTypeInfo(E->getType());
unsigned long size = TypeInfo.first/8;
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
- DestPtr, Src.getAggregateAddr(),
- SizeVal);
+ Dest.getAddr(),
+ Src.getAggregateAddr(),
+ SizeVal);
return;
}
// If the result of the assignment is used, copy the LHS there also.
// FIXME: Pass VolatileDest as well. I think we also need to merge volatile
// from the source as well, as we can't eliminate it if either operand
// is volatile, unless copy has volatile for both source and destination..
- CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(),
- VolatileDest|Src.isVolatileQualified());
+ CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
+ Dest.isVolatile()|Src.isVolatileQualified());
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
@@ -241,15 +244,17 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
// Visitor Methods
//===----------------------------------------------------------------------===//
+void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
+ EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
+}
+
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
- if (!DestPtr && E->getCastKind() != CK_Dynamic) {
+ if (Dest.isIgnored() && E->getCastKind() != CK_Dynamic) {
Visit(E->getSubExpr());
return;
}
switch (E->getCastKind()) {
- default: assert(0 && "Unhandled cast kind!");
-
case CK_Dynamic: {
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
@@ -259,8 +264,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
else
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
- if (DestPtr)
- CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
+ if (!Dest.isIgnored())
+ CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
break;
}
@@ -268,7 +273,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// GCC union extension
QualType Ty = E->getSubExpr()->getType();
QualType PtrTy = CGF.getContext().getPointerType(Ty);
- llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr,
+ llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
CGF.ConvertType(PtrTy));
EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty),
Ty);
@@ -283,8 +288,15 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
break;
}
- // FIXME: Remove the CK_Unknown check here.
- case CK_Unknown:
+ case CK_GetObjCProperty: {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ assert(LV.isPropertyRef());
+ RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
+ EmitGCMove(E, RV);
+ break;
+ }
+
+ case CK_LValueToRValue: // hope for downstream optimization
case CK_NoOp:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
@@ -293,10 +305,45 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
"Implicit cast types must be compatible");
Visit(E->getSubExpr());
break;
-
+
case CK_LValueBitCast:
- llvm_unreachable("there are no lvalue bit-casts on aggregates");
+ llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
break;
+
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -316,24 +363,18 @@ void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
}
void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
- EmitGCMove(E, RV);
-}
-
-void AggExprEmitter::VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E) {
- RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
- EmitGCMove(E, RV);
+ llvm_unreachable("direct property access not surrounded by "
+ "lvalue-to-rvalue cast");
}
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
- CGF.EmitAnyExpr(E->getLHS(), 0, false, true);
- CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest,
- /*IgnoreResult=*/false, IsInitializer);
+ CGF.EmitIgnoredExpr(E->getLHS());
+ Visit(E->getRHS());
}
void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
- CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest);
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
}
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
@@ -355,64 +396,62 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
E->getRHS()->getType())
&& "Invalid assignment");
+
+ // FIXME: __block variables need the RHS evaluated first!
LValue LHS = CGF.EmitLValue(E->getLHS());
// We have to special case property setters, otherwise we must have
// a simple lvalue (no aggregates inside vectors, bitfields).
if (LHS.isPropertyRef()) {
- llvm::Value *AggLoc = DestPtr;
- if (!AggLoc)
- AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
- CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
- CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
- RValue::getAggregate(AggLoc, VolatileDest));
- } else if (LHS.isKVCRef()) {
- llvm::Value *AggLoc = DestPtr;
- if (!AggLoc)
- AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
- CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
- CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
- RValue::getAggregate(AggLoc, VolatileDest));
+ AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
+ CGF.EmitAggExpr(E->getRHS(), Slot);
+ CGF.EmitStoreThroughPropertyRefLValue(Slot.asRValue(), LHS);
} else {
- bool RequiresGCollection = false;
+ bool GCollection = false;
if (CGF.getContext().getLangOptions().getGCMode())
- RequiresGCollection = TypeRequiresGCollection(E->getLHS()->getType());
+ GCollection = TypeRequiresGCollection(E->getLHS()->getType());
// Codegen the RHS so that it stores directly into the LHS.
- CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(),
- false, false, RequiresGCollection);
+ AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true,
+ GCollection);
+ CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
EmitFinalDestCopy(E, LHS, true);
}
}
-void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
- if (!E->getLHS()) {
- CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
- return;
- }
-
+void AggExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
- CGF.BeginConditionalBranch();
+ // Save whether the destination's lifetime is externally managed.
+ bool DestLifetimeManaged = Dest.isLifetimeExternallyManaged();
+
+ eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
+ Visit(E->getTrueExpr());
+ eval.end(CGF);
- // Handle the GNU extension for missing LHS.
- assert(E->getLHS() && "Must have LHS for aggregate value");
+ assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
+ CGF.Builder.CreateBr(ContBlock);
- Visit(E->getLHS());
- CGF.EndConditionalBranch();
- CGF.EmitBranch(ContBlock);
+ // If the result of an agg expression is unused, then the emission
+ // of the LHS might need to create a destination slot. That's fine
+ // with us, and we can safely emit the RHS into the same slot, but
+ // we shouldn't claim that its lifetime is externally managed.
+ Dest.setLifetimeExternallyManaged(DestLifetimeManaged);
- CGF.BeginConditionalBranch();
+ eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
-
- Visit(E->getRHS());
- CGF.EndConditionalBranch();
- CGF.EmitBranch(ContBlock);
+ Visit(E->getFalseExpr());
+ eval.end(CGF);
CGF.EmitBlock(ContBlock);
}
@@ -434,65 +473,78 @@ void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
}
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
- llvm::Value *Val = DestPtr;
-
- if (!Val) {
- // Create a temporary variable.
- Val = CGF.CreateMemTemp(E->getType(), "tmp");
-
- // FIXME: volatile
- CGF.EmitAggExpr(E->getSubExpr(), Val, false);
- } else
- Visit(E->getSubExpr());
-
- // Don't make this a live temporary if we're emitting an initializer expr.
- if (!IsInitializer)
- CGF.EmitCXXTemporary(E->getTemporary(), Val);
+ // Ensure that we have a slot, but if we already do, remember
+ // whether its lifetime was externally managed.
+ bool WasManaged = Dest.isLifetimeExternallyManaged();
+ Dest = EnsureSlot(E->getType());
+ Dest.setLifetimeExternallyManaged();
+
+ Visit(E->getSubExpr());
+
+ // Set up the temporary's destructor if its lifetime wasn't already
+ // being managed.
+ if (!WasManaged)
+ CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
}
void
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
- llvm::Value *Val = DestPtr;
-
- if (!Val) // Create a temporary variable.
- Val = CGF.CreateMemTemp(E->getType(), "tmp");
-
- CGF.EmitCXXConstructExpr(Val, E);
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitCXXConstructExpr(E, Slot);
}
-void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
- llvm::Value *Val = DestPtr;
-
- CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer);
+void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.EmitExprWithCleanups(E, Dest);
}
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
- llvm::Value *Val = DestPtr;
-
- if (!Val) {
- // Create a temporary variable.
- Val = CGF.CreateMemTemp(E->getType(), "tmp");
- }
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Val, E->getType()),
- E->getType());
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T);
}
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
- llvm::Value *Val = DestPtr;
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T);
+}
- if (!Val) {
- // Create a temporary variable.
- Val = CGF.CreateMemTemp(E->getType(), "tmp");
- }
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Val, E->getType()),
- E->getType());
+/// isSimpleZero - If emitting this value will obviously just cause a store of
+/// zero to memory, return true. This can return false if uncertain, so it just
+/// handles simple cases.
+static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
+ // (0)
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return isSimpleZero(PE->getSubExpr(), CGF);
+ // 0
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
+ return IL->getValue() == 0;
+ // +0.0
+ if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
+ return FL->getValue().isPosZero();
+ // int()
+ if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ return true;
+ // (int*)0 - Null pointer expressions.
+ if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
+ return ICE->getCastKind() == CK_NullToPointer;
+ // '\0'
+ if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
+ return CL->getValue() == 0;
+
+ // Otherwise, hard case: conservatively return false.
+ return false;
}
+
void
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
- if (isa<ImplicitValueInitExpr>(E)) {
+ if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
+ // Storing "i32 0" to a zero'd memory location is a noop.
+ } else if (isa<ImplicitValueInitExpr>(E)) {
EmitNullInitializationToLValue(LV, T);
} else if (T->isReferenceType()) {
RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
@@ -500,13 +552,19 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
} else if (CGF.hasAggregateLLVMType(T)) {
- CGF.EmitAnyExpr(E, LV.getAddress(), false);
+ CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true,
+ false, Dest.isZeroed()));
} else {
- CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T);
+ CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T);
}
}
void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
+ // If the destination slot is already zeroed out before the aggregate is
+ // copied into it, we don't have to emit any zeros here.
+ if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(T))
+ return;
+
if (!CGF.hasAggregateLLVMType(T)) {
// For non-aggregates, we can store zero
llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
@@ -534,9 +592,10 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
return;
}
#endif
- if (E->hadArrayRangeDesignator()) {
+ if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
- }
+
+ llvm::Value *DestPtr = Dest.getAddr();
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
@@ -563,13 +622,27 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// FIXME: were we intentionally ignoring address spaces and GC attributes?
for (uint64_t i = 0; i != NumArrayElements; ++i) {
+ // If we're done emitting initializers and the destination is known-zeroed
+ // then we're done.
+ if (i == NumInitElements &&
+ Dest.isZeroed() &&
+ CGF.getTypes().isZeroInitializable(ElementType))
+ break;
+
llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
LValue LV = CGF.MakeAddrLValue(NextVal, ElementType);
+
if (i < NumInitElements)
EmitInitializationToLValue(E->getInit(i), LV, ElementType);
-
else
EmitNullInitializationToLValue(LV, ElementType);
+
+ // If the GEP didn't get used because of a dead zero init or something
+ // else, clean it up for -O0 builds and general tidiness.
+ if (llvm::GetElementPtrInst *GEP =
+ dyn_cast<llvm::GetElementPtrInst>(NextVal))
+ if (GEP->use_empty())
+ GEP->eraseFromParent();
}
return;
}
@@ -583,16 +656,6 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
unsigned NumInitElements = E->getNumInits();
RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
- // If we're initializing the whole aggregate, just do it in place.
- // FIXME: This is a hack around an AST bug (PR6537).
- if (NumInitElements == 1 && E->getType() == E->getInit(0)->getType()) {
- EmitInitializationToLValue(E->getInit(0),
- CGF.MakeAddrLValue(DestPtr, E->getType()),
- E->getType());
- return;
- }
-
-
if (E->getType()->isUnionType()) {
// Only initialize one field of a union. The field itself is
// specified by the initializer list.
@@ -612,13 +675,13 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// FIXME: volatility
FieldDecl *Field = E->getInitializedFieldInUnion();
- LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
+ LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
if (NumInitElements) {
// Store the initializer into the field
EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
} else {
- // Default-initialize to null
+ // Default-initialize to null.
EmitNullInitializationToLValue(FieldLoc, Field->getType());
}
@@ -638,10 +701,16 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (Field->isUnnamedBitfield())
continue;
+ // Don't emit GEP before a noop store of zero.
+ if (CurInitVal == NumInitElements && Dest.isZeroed() &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ break;
+
// FIXME: volatility
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
// We never generate write-barries for initialized fields.
FieldLoc.setNonGC(true);
+
if (CurInitVal < NumInitElements) {
// Store the initializer into the field.
EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
@@ -650,6 +719,14 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// We're out of initalizers; default-initialize to null
EmitNullInitializationToLValue(FieldLoc, Field->getType());
}
+
+ // If the GEP didn't get used because of a dead zero init or something
+ // else, clean it up for -O0 builds and general tidiness.
+ if (FieldLoc.isSimple())
+ if (llvm::GetElementPtrInst *GEP =
+ dyn_cast<llvm::GetElementPtrInst>(FieldLoc.getAddress()))
+ if (GEP->use_empty())
+ GEP->eraseFromParent();
}
}
@@ -657,31 +734,126 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Entry Points into this File
//===----------------------------------------------------------------------===//
+/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
+/// non-zero bytes that will be stored when outputting the initializer for the
+/// specified initializer expression.
+static uint64_t GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return GetNumNonZeroBytesInInit(PE->getSubExpr(), CGF);
+
+ // 0 and 0.0 won't require any non-zero stores!
+ if (isSimpleZero(E, CGF)) return 0;
+
+ // If this is an initlist expr, sum up the size of sizes of the (present)
+ // elements. If this is something weird, assume the whole thing is non-zero.
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
+ if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
+ return CGF.getContext().getTypeSize(E->getType())/8;
+
+ // InitListExprs for structs have to be handled carefully. If there are
+ // reference members, we need to consider the size of the reference, not the
+ // referencee. InitListExprs for unions and arrays can't have references.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ if (!RT->isUnionType()) {
+ RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+ uint64_t NumNonZeroBytes = 0;
+
+ unsigned ILEElement = 0;
+ for (RecordDecl::field_iterator Field = SD->field_begin(),
+ FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
+ // We're done once we hit the flexible array member or run out of
+ // InitListExpr elements.
+ if (Field->getType()->isIncompleteArrayType() ||
+ ILEElement == ILE->getNumInits())
+ break;
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ const Expr *E = ILE->getInit(ILEElement++);
+
+ // Reference values are always non-null and have the width of a pointer.
+ if (Field->getType()->isReferenceType())
+ NumNonZeroBytes += CGF.getContext().Target.getPointerWidth(0);
+ else
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
+ }
+
+ return NumNonZeroBytes;
+ }
+ }
+
+
+ uint64_t NumNonZeroBytes = 0;
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
+ return NumNonZeroBytes;
+}
+
+/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
+/// zeros in it, emit a memset and avoid storing the individual zeros.
+///
+static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
+ CodeGenFunction &CGF) {
+ // If the slot is already known to be zeroed, nothing to do. Don't mess with
+ // volatile stores.
+ if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
+
+ // If the type is 16-bytes or smaller, prefer individual stores over memset.
+ std::pair<uint64_t, unsigned> TypeInfo =
+ CGF.getContext().getTypeInfo(E->getType());
+ if (TypeInfo.first/8 <= 16)
+ return;
+
+ // Check to see if over 3/4 of the initializer are known to be zero. If so,
+ // we prefer to emit memset + individual stores for the rest.
+ uint64_t NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
+ if (NumNonZeroBytes*4 > TypeInfo.first/8)
+ return;
+
+ // Okay, it seems like a good idea to use an initial memset, emit the call.
+ llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first/8);
+ unsigned Align = TypeInfo.second/8;
+
+ llvm::Value *Loc = Slot.getAddr();
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+
+ Loc = CGF.Builder.CreateBitCast(Loc, BP);
+ CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, Align, false);
+
+ // Tell the AggExprEmitter that the slot is known zero.
+ Slot.setZeroed();
+}
+
+
+
+
/// EmitAggExpr - Emit the computation of the specified expression of aggregate
/// type. The result is computed into DestPtr. Note that if DestPtr is null,
/// the value of the aggregate expression is not needed. If VolatileDest is
/// true, DestPtr cannot be 0.
+///
+/// \param IsInitializer - true if this evaluation is initializing an
+/// object whose lifetime is already being managed.
//
// FIXME: Take Qualifiers object.
-void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
- bool VolatileDest, bool IgnoreResult,
- bool IsInitializer,
- bool RequiresGCollection) {
+void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
+ bool IgnoreResult) {
assert(E && hasAggregateLLVMType(E->getType()) &&
"Invalid aggregate expression to emit");
- assert ((DestPtr != 0 || VolatileDest == false)
- && "volatile aggregate can't be 0");
+ assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
+ "slot has bits but no address");
- AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult, IsInitializer,
- RequiresGCollection)
- .Visit(const_cast<Expr*>(E));
+ // Optimize the slot if possible.
+ CheckAggExprForMemSetUse(Slot, E, *this);
+
+ AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
}
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
llvm::Value *Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, Temp, LV.isVolatileQualified());
+ EmitAggExpr(E, AggValueSlot::forAddr(Temp, LV.isVolatileQualified(), false));
return LV;
}
@@ -734,12 +906,12 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
const llvm::Type *DBP =
- llvm::Type::getInt8PtrTy(VMContext, DPT->getAddressSpace());
+ llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
const llvm::Type *SBP =
- llvm::Type::getInt8PtrTy(VMContext, SPT->getAddressSpace());
+ llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
@@ -766,11 +938,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
}
}
- Builder.CreateCall5(CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(),
- IntPtrTy),
- DestPtr, SrcPtr,
- // TypeInfo.first describes size in bits.
- llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
- Builder.getInt32(TypeInfo.second/8),
- Builder.getInt1(isVolatile));
+ Builder.CreateMemCpy(DestPtr, SrcPtr,
+ llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
+ TypeInfo.second/8, isVolatile);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
index 9a98281..bba7864 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -11,9 +11,11 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Frontend/CodeGenOptions.h"
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "CGDebugInfo.h"
#include "llvm/Intrinsics.h"
using namespace clang;
using namespace CodeGen;
@@ -51,9 +53,65 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
Callee, ReturnValue, Args, MD);
}
+static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
+ const Expr *E = Base;
+
+ while (true) {
+ E = E->IgnoreParens();
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ QualType DerivedType = E->getType();
+ if (const PointerType *PTy = DerivedType->getAs<PointerType>())
+ DerivedType = PTy->getPointeeType();
+
+ return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
+}
+
/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
/// expr can be devirtualized.
-static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
+static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
+ const Expr *Base,
+ const CXXMethodDecl *MD) {
+
+ // When building with -fapple-kext, all calls must go through the vtable since
+ // the kernel linker can do runtime patching of vtables.
+ if (Context.getLangOptions().AppleKext)
+ return false;
+
+ // If the most derived class is marked final, we know that no subclass can
+ // override this member function and so we can devirtualize it. For example:
+ //
+ // struct A { virtual void f(); }
+ // struct B final : A { };
+ //
+ // void f(B *b) {
+ // b->f();
+ // }
+ //
+ const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ if (MostDerivedClassDecl->hasAttr<FinalAttr>())
+ return true;
+
+ // If the member function is marked 'final', we know that it can't be
+ // overridden and can therefore devirtualize it.
+ if (MD->hasAttr<FinalAttr>())
+ return true;
+
+ // Similarly, if the class itself is marked 'final' it can't be overridden
+ // and we can therefore devirtualize the member function call.
+ if (MD->getParent()->hasAttr<FinalAttr>())
+ return true;
+
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
// This is a record decl. We know the type and can devirtualize it.
@@ -74,11 +132,13 @@ static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
// Check if this is a call expr that returns a record type.
if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
return CE->getCallReturnType()->isRecordType();
-
+
// We can't devirtualize the call.
return false;
}
+// Note: This function also emit constructor calls to support a MSVC
+// extensions allowing explicit constructor function call.
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
ReturnValueSlot ReturnValue) {
if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
@@ -87,6 +147,16 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI && CGM.getCodeGenOpts().LimitDebugInfo
+ && !isa<CallExpr>(ME->getBase())) {
+ QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
+ if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
+ MD->getParent()->getLocation());
+ }
+ }
+
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
@@ -98,32 +168,47 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
llvm::Value *This;
if (ME->isArrow())
This = EmitScalarExpr(ME->getBase());
- else {
- LValue BaseLV = EmitLValue(ME->getBase());
- This = BaseLV.getAddress();
- }
+ else
+ This = EmitLValue(ME->getBase()).getAddress();
if (MD->isTrivial()) {
if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
-
- assert(MD->isCopyAssignment() && "unknown trivial member function");
- // We don't like to generate the trivial copy assignment operator when
- // it isn't necessary; just produce the proper effect here.
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateCopy(This, RHS, CE->getType());
- return RValue::get(This);
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
+ return RValue::get(0);
+
+ if (MD->isCopyAssignmentOperator()) {
+ // We don't like to generate the trivial copy assignment operator when
+ // it isn't necessary; just produce the proper effect here.
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, CE->getType());
+ return RValue::get(This);
+ }
+
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
+ CE->arg_begin(), CE->arg_end());
+ return RValue::get(This);
+ }
+ llvm_unreachable("unknown trivial member function");
}
// Compute the function type we're calling.
- const CGFunctionInfo &FInfo =
- (isa<CXXDestructorDecl>(MD)
- ? CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
- Dtor_Complete)
- : CGM.getTypes().getFunctionInfo(MD));
+ const CGFunctionInfo *FInfo = 0;
+ if (isa<CXXDestructorDecl>(MD))
+ FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ else if (isa<CXXConstructorDecl>(MD))
+ FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
+ Ctor_Complete);
+ else
+ FInfo = &CGM.getTypes().getFunctionInfo(MD);
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
const llvm::Type *Ty
- = CGM.getTypes().GetFunctionType(FInfo, FPT->isVariadic());
+ = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
@@ -131,20 +216,34 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
//
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
- bool UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
- && !canDevirtualizeMemberFunctionCalls(ME->getBase());
-
+ bool UseVirtualCall;
+ UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
+ && !canDevirtualizeMemberFunctionCalls(getContext(),
+ ME->getBase(), MD);
llvm::Value *Callee;
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
if (UseVirtualCall) {
Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
} else {
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
+ if (getContext().getLangOptions().AppleKext &&
+ MD->isVirtual() &&
+ ME->hasQualifier())
+ Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
}
+ } else if (const CXXConstructorDecl *Ctor =
+ dyn_cast<CXXConstructorDecl>(MD)) {
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
} else if (UseVirtualCall) {
- Callee = BuildVirtualCall(MD, This, Ty);
+ Callee = BuildVirtualCall(MD, This, Ty);
} else {
- Callee = CGM.GetAddrOfFunction(MD, Ty);
+ if (getContext().getLangOptions().AppleKext &&
+ MD->isVirtual() &&
+ ME->hasQualifier())
+ Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
}
return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
@@ -180,7 +279,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// Ask the ABI to load the callee. Note that This is modified.
llvm::Value *Callee =
- CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
+ CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
CallArgList Args;
@@ -203,29 +302,14 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
ReturnValueSlot ReturnValue) {
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
- if (MD->isCopyAssignment()) {
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This = LV.getAddress();
+
+ if (MD->isCopyAssignmentOperator()) {
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
if (ClassDecl->hasTrivialCopyAssignment()) {
assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
"EmitCXXOperatorMemberCallExpr - user declared copy assignment");
- LValue LV = EmitLValue(E->getArg(0));
- llvm::Value *This;
- if (LV.isPropertyRef() || LV.isKVCRef()) {
- llvm::Value *AggLoc = CreateMemTemp(E->getArg(1)->getType());
- EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/);
- if (LV.isPropertyRef())
- EmitObjCPropertySet(LV.getPropertyRefExpr(),
- RValue::getAggregate(AggLoc,
- false /*VolatileDest*/));
- else
- EmitObjCPropertySet(LV.getKVCRefExpr(),
- RValue::getAggregate(AggLoc,
- false /*VolatileDest*/));
- return RValue::getAggregate(0, false);
- }
- else
- This = LV.getAddress();
-
llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
QualType Ty = E->getType();
EmitAggregateCopy(This, Src, Ty);
@@ -237,21 +321,10 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
- LValue LV = EmitLValue(E->getArg(0));
- llvm::Value *This;
- if (LV.isPropertyRef() || LV.isKVCRef()) {
- QualType QT = E->getArg(0)->getType();
- RValue RV =
- LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
- : EmitLoadOfKVCRefLValue(LV, QT);
- assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
- This = RV.getAggregateAddr();
- }
- else
- This = LV.getAddress();
-
llvm::Value *Callee;
- if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
+ if (MD->isVirtual() &&
+ !canDevirtualizeMemberFunctionCalls(getContext(),
+ E->getArg(0), MD))
Callee = BuildVirtualCall(MD, This, Ty);
else
Callee = CGM.GetAddrOfFunction(MD, Ty);
@@ -261,28 +334,29 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
}
void
-CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
- const CXXConstructExpr *E) {
- assert(Dest && "Must have a destination!");
+CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
+ AggValueSlot Dest) {
+ assert(!Dest.isIgnored() && "Must have a destination!");
const CXXConstructorDecl *CD = E->getConstructor();
// If we require zero initialization before (or instead of) calling the
// constructor, as can be the case with a non-user-provided default
// constructor, emit the zero initialization now.
if (E->requiresZeroInitialization())
- EmitNullInitialization(Dest, E->getType());
-
+ EmitNullInitialization(Dest.getAddr(), E->getType());
// If this is a call to a trivial default constructor, do nothing.
if (CD->isTrivial() && CD->isDefaultConstructor())
return;
- // Code gen optimization to eliminate copy constructor and return
- // its first argument instead, if in fact that argument is a temporary
- // object.
+ // Elide the constructor if we're constructing from a temporary.
+ // The temporary check is required because Sema sets this on NRVO
+ // returns.
if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
- if (const Expr *Arg = E->getArg(0)->getTemporaryObject()) {
- EmitAggExpr(Arg, Dest, false);
+ assert(getContext().hasSameUnqualifiedType(E->getType(),
+ E->getArg(0)->getType()));
+ if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
+ EmitAggExpr(E->getArg(0), Dest);
return;
}
}
@@ -294,7 +368,7 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
const llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Dest, BasePtr);
+ Builder.CreateBitCast(Dest.getAddr(), BasePtr);
EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
E->arg_begin(), E->arg_end());
@@ -307,11 +381,36 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
// Call the constructor.
- EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest,
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
E->arg_begin(), E->arg_end());
}
}
+void
+CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
+ llvm::Value *Src,
+ const Expr *Exp) {
+ if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
+ Exp = E->getSubExpr();
+ assert(isa<CXXConstructExpr>(Exp) &&
+ "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
+ const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
+ const CXXConstructorDecl *CD = E->getConstructor();
+ RunCleanupsScope Scope(*this);
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now.
+ // FIXME. Do I still need this for a copy ctor synthesis?
+ if (E->requiresZeroInitialization())
+ EmitNullInitialization(Dest, E->getType());
+
+ assert(!getContext().getAsConstantArrayType(E->getType())
+ && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
+ EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
+ E->arg_begin(), E->arg_end());
+}
+
/// Check whether the given operator new[] is the global placement
/// operator new[].
static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
@@ -341,7 +440,7 @@ static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
return CharUnits::Zero();
- return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType());
+ return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
}
static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
@@ -387,7 +486,7 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
unsigned SizeWidth = NEC.getBitWidth();
// Determine if there is an overflow here by doing an extended multiply.
- NEC.zext(SizeWidth*2);
+ NEC = NEC.zext(SizeWidth*2);
llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
SC *= NEC;
@@ -396,8 +495,7 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
// overflow's already happened because SizeWithoutCookie isn't
// used if the allocator returns null or throws, as it should
// always do on an overflow.
- llvm::APInt SWC = SC;
- SWC.trunc(SizeWidth);
+ llvm::APInt SWC = SC.trunc(SizeWidth);
SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
// Add the cookie size.
@@ -405,7 +503,7 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
}
if (SC.countLeadingZeros() >= SizeWidth) {
- SC.trunc(SizeWidth);
+ SC = SC.trunc(SizeWidth);
Size = llvm::ConstantInt::get(SizeTy, SC);
} else {
// On overflow, produce a -1 so operator new throws.
@@ -531,8 +629,11 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
else if (AllocType->isAnyComplexType())
CGF.EmitComplexExprIntoAddr(Init, NewPtr,
AllocType.isVolatileQualified());
- else
- CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+ else {
+ AggValueSlot Slot
+ = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
+ CGF.EmitAggExpr(Init, Slot);
+ }
}
void
@@ -591,18 +692,10 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
llvm::Value *NewPtr, llvm::Value *Size) {
- llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
- if (NewPtr->getType() != BP)
- NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
-
- CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr,
- llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
- Size,
- llvm::ConstantInt::get(CGF.Int32Ty,
- CGF.getContext().getTypeAlign(T)/8),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
- 0));
+ CGF.EmitCastToVoidPtr(NewPtr);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
+ CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
+ Alignment.getQuantity(), false);
}
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
@@ -669,6 +762,163 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
}
+namespace {
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression.
+ class CallDeleteDuringNew : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *Ptr;
+ llvm::Value *AllocSize;
+
+ RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(RValue);
+ }
+
+ CallDeleteDuringNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ llvm::Value *AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, RValue Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
+ (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+ DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumArgs() == NumPlacementArgs + 2)
+ DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I)
+ DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
+
+ // Call 'operator delete'.
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ }
+ };
+
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression when the new expression is
+ /// conditional.
+ class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ DominatingValue<RValue>::saved_type Ptr;
+ DominatingValue<RValue>::saved_type AllocSize;
+
+ DominatingValue<RValue>::saved_type *getPlacementArgs() {
+ return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
+ }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
+ }
+
+ CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ DominatingValue<RValue>::saved_type Ptr,
+ DominatingValue<RValue>::saved_type AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
+ (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+ DeleteArgs.push_back(std::make_pair(Ptr.restore(CGF), *AI++));
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumArgs() == NumPlacementArgs + 2) {
+ RValue RV = AllocSize.restore(CGF);
+ DeleteArgs.push_back(std::make_pair(RV, *AI++));
+ }
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I) {
+ RValue RV = getPlacementArgs()[I].restore(CGF);
+ DeleteArgs.push_back(std::make_pair(RV, *AI++));
+ }
+
+ // Call 'operator delete'.
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ }
+ };
+}
+
+/// Enter a cleanup to call 'operator delete' if the initializer in a
+/// new-expression throws.
+static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
+ const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *AllocSize,
+ const CallArgList &NewArgs) {
+ // If we're not inside a conditional branch, then the cleanup will
+ // dominate and we can do the easier (and more efficient) thing.
+ if (!CGF.isInConditionalBranch()) {
+ CallDeleteDuringNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ NewPtr, AllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I, NewArgs[I+1].first);
+
+ return;
+ }
+
+ // Otherwise, we need to save all this stuff.
+ DominatingValue<RValue>::saved_type SavedNewPtr =
+ DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
+ DominatingValue<RValue>::saved_type SavedAllocSize =
+ DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
+
+ CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ SavedNewPtr,
+ SavedAllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I,
+ DominatingValue<RValue>::save(CGF, NewArgs[I+1].first));
+
+ CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
+}
+
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
QualType AllocType = E->getAllocatedType();
if (AllocType->isArrayType())
@@ -757,13 +1007,22 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
CalculateCookiePadding(*this, E).isZero());
if (AllocSize != AllocSizeWithoutCookie) {
assert(E->isArray());
- NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements,
- AllocType);
+ NewPtr = CGM.getCXXABI().InitializeArrayCookie(*this, NewPtr, NumElements,
+ E, AllocType);
+ }
+
+ // If there's an operator delete, enter a cleanup to call it if an
+ // exception is thrown.
+ EHScopeStack::stable_iterator CallOperatorDelete;
+ if (E->getOperatorDelete()) {
+ EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs);
+ CallOperatorDelete = EHStack.stable_begin();
}
const llvm::Type *ElementPtrTy
= ConvertTypeForMem(AllocType)->getPointerTo(AS);
NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
+
if (E->isArray()) {
EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
@@ -776,6 +1035,11 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
} else {
EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
}
+
+ // Deactivate the 'operator delete' cleanup if we finished
+ // initialization.
+ if (CallOperatorDelete.isValid())
+ DeactivateCleanupBlock(CallOperatorDelete);
if (NullCheckResult) {
Builder.CreateBr(NewEnd);
@@ -876,6 +1140,8 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
}
// Make sure that we call delete even if the dtor throws.
+ // This doesn't have to a conditional cleanup because we're going
+ // to pop it off in a second.
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
Ptr, OperatorDelete, ElementType);
@@ -950,18 +1216,19 @@ namespace {
/// Emit the code for deleting an array of objects.
static void EmitArrayDelete(CodeGenFunction &CGF,
- const FunctionDecl *OperatorDelete,
+ const CXXDeleteExpr *E,
llvm::Value *Ptr,
QualType ElementType) {
llvm::Value *NumElements = 0;
llvm::Value *AllocatedPtr = 0;
CharUnits CookieSize;
- CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType,
+ CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
NumElements, AllocatedPtr, CookieSize);
assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
// Make sure that we call delete even if one of the dtors throws.
+ const FunctionDecl *OperatorDelete = E->getOperatorDelete();
CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
AllocatedPtr, OperatorDelete,
NumElements, ElementType,
@@ -1031,7 +1298,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
cast<llvm::PointerType>(Ptr->getType())->getElementType());
if (E->isArrayForm()) {
- EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
+ EmitArrayDelete(*this, E, Ptr, DeleteTy);
} else {
EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
}
@@ -1039,7 +1306,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
EmitBlock(DeleteEnd);
}
-llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
+llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
QualType Ty = E->getType();
const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
@@ -1059,8 +1326,6 @@ llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
// FIXME: if subE is an lvalue do
LValue Obj = EmitLValue(subE);
llvm::Value *This = Obj.getAddress();
- LTy = LTy->getPointerTo()->getPointerTo();
- llvm::Value *V = Builder.CreateBitCast(This, LTy);
// We need to do a zero check for *p, unless it has NonNullAttr.
// FIXME: PointerType->hasAttr<NonNullAttr>()
bool CanBeZero = false;
@@ -1071,12 +1336,12 @@ llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
llvm::BasicBlock *NonZeroBlock = createBasicBlock();
llvm::BasicBlock *ZeroBlock = createBasicBlock();
- llvm::Value *Zero = llvm::Constant::getNullValue(LTy);
- Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero),
+ llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
+ Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
NonZeroBlock, ZeroBlock);
EmitBlock(ZeroBlock);
/// Call __cxa_bad_typeid
- const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(getLLVMContext());
const llvm::FunctionType *FTy;
FTy = llvm::FunctionType::get(ResultType, false);
llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
@@ -1084,7 +1349,7 @@ llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
Builder.CreateUnreachable();
EmitBlock(NonZeroBlock);
}
- V = Builder.CreateLoad(V, "vtable");
+ llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
V = Builder.CreateLoad(V);
return V;
@@ -1141,23 +1406,20 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
// See if this is a dynamic_cast(void*)
if (ToVoid) {
llvm::Value *This = V;
- V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo());
- V = Builder.CreateLoad(V, "vtable");
+ V = GetVTablePtr(This, PtrDiffTy->getPointerTo());
V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
V = Builder.CreateLoad(V, "offset to top");
- This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
+ This = EmitCastToVoidPtr(This);
V = Builder.CreateInBoundsGEP(This, V);
V = Builder.CreateBitCast(V, LTy);
} else {
/// Call __dynamic_cast
- const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::Type *ResultType = Int8PtrTy;
const llvm::FunctionType *FTy;
std::vector<const llvm::Type*> ArgTys;
- const llvm::Type *PtrToInt8Ty
- = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
- ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(Int8PtrTy);
+ ArgTys.push_back(Int8PtrTy);
+ ArgTys.push_back(Int8PtrTy);
ArgTys.push_back(PtrDiffTy);
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
@@ -1172,7 +1434,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
llvm::Value *DestArg
= CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
- V = Builder.CreateBitCast(V, PtrToInt8Ty);
+ V = Builder.CreateBitCast(V, Int8PtrTy);
V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
V, SrcArg, DestArg, hint);
V = Builder.CreateBitCast(V, LTy);
@@ -1182,7 +1444,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
EmitBlock(BadCastBlock);
/// Invoke __cxa_bad_cast
- ResultType = llvm::Type::getVoidTy(VMContext);
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
const llvm::FunctionType *FBadTy;
FBadTy = llvm::FunctionType::get(ResultType, false);
llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
index 79e9dd4..7b0292b 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
@@ -35,14 +35,9 @@ class ComplexExprEmitter
// True is we should ignore the value of a
bool IgnoreReal;
bool IgnoreImag;
- // True if we should ignore the value of a=b
- bool IgnoreRealAssign;
- bool IgnoreImagAssign;
public:
- ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false,
- bool irn=false, bool iin=false)
- : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii),
- IgnoreRealAssign(irn), IgnoreImagAssign(iin) {
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii) {
}
@@ -60,36 +55,36 @@ public:
IgnoreImag = false;
return I;
}
- bool TestAndClearIgnoreRealAssign() {
- bool I = IgnoreRealAssign;
- IgnoreRealAssign = false;
- return I;
- }
- bool TestAndClearIgnoreImagAssign() {
- bool I = IgnoreImagAssign;
- IgnoreImagAssign = false;
- return I;
- }
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
ComplexPairTy EmitLoadOfLValue(const Expr *E) {
- LValue LV = CGF.EmitLValue(E);
+ return EmitLoadOfLValue(CGF.EmitLValue(E));
+ }
+
+ ComplexPairTy EmitLoadOfLValue(LValue LV) {
if (LV.isSimple())
return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
- if (LV.isPropertyRef())
- return CGF.EmitObjCPropertyGet(LV.getPropertyRefExpr()).getComplexVal();
-
- assert(LV.isKVCRef() && "Unknown LValue type!");
- return CGF.EmitObjCPropertyGet(LV.getKVCRefExpr()).getComplexVal();
+ assert(LV.isPropertyRef() && "Unknown LValue type!");
+ return CGF.EmitLoadOfPropertyRefLValue(LV).getComplexVal();
}
/// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
/// the real and imaginary pieces.
ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
+ /// EmitStoreThroughLValue - Given an l-value of complex type, store
+ /// a complex number into it.
+ void EmitStoreThroughLValue(ComplexPairTy Val, LValue LV) {
+ if (LV.isSimple())
+ return EmitStoreOfComplex(Val, LV.getAddress(), LV.isVolatileQualified());
+
+ assert(LV.isPropertyRef() && "Unknown LValue type!");
+ CGF.EmitStoreThroughPropertyRefLValue(RValue::getComplex(Val), LV);
+ }
+
/// EmitStoreOfComplex - Store the specified real/imag parts into the
/// specified value pointer.
void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
@@ -102,6 +97,10 @@ public:
// Visitor Methods
//===--------------------------------------------------------------------===//
+ ComplexPairTy Visit(Expr *E) {
+ return StmtVisitor<ComplexExprEmitter, ComplexPairTy>::Visit(E);
+ }
+
ComplexPairTy VisitStmt(Stmt *S) {
S->dump(CGF.getContext().getSourceManager());
assert(0 && "Stmt can't have complex result type!");
@@ -117,10 +116,7 @@ public:
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- return EmitLoadOfLValue(E);
- }
- ComplexPairTy VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E) {
+ assert(E->getObjectKind() == OK_Ordinary);
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -128,6 +124,11 @@ public:
}
ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
+ return CGF.getOpaqueRValueMapping(E).getComplexVal();
+ }
// FIXME: CompoundLiteralExpr
@@ -165,8 +166,6 @@ public:
ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
return Visit(E->getSubExpr());
}
ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
@@ -178,8 +177,8 @@ public:
ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
}
- ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
- return CGF.EmitCXXExprWithTemporaries(E).getComplexVal();
+ ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
+ return CGF.EmitExprWithCleanups(E).getComplexVal();
}
ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
@@ -202,6 +201,10 @@ public:
};
BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &),
+ ComplexPairTy &Val);
ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)
(const BinOpInfo &));
@@ -211,15 +214,15 @@ public:
ComplexPairTy EmitBinMul(const BinOpInfo &Op);
ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
- ComplexPairTy VisitBinMul(const BinaryOperator *E) {
- return EmitBinMul(EmitBinOps(E));
- }
ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
return EmitBinAdd(EmitBinOps(E));
}
ComplexPairTy VisitBinSub(const BinaryOperator *E) {
return EmitBinSub(EmitBinOps(E));
}
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
return EmitBinDiv(EmitBinOps(E));
}
@@ -242,11 +245,15 @@ public:
// Logical and/or always return int, never complex.
// No comparisons produce a complex result.
+
+ LValue EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val);
ComplexPairTy VisitBinAssign (const BinaryOperator *E);
ComplexPairTy VisitBinComma (const BinaryOperator *E);
- ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO);
+ ComplexPairTy
+ VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
ComplexPairTy VisitInitListExpr(InitListExpr *E);
@@ -265,13 +272,13 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
bool isVolatile) {
llvm::Value *Real=0, *Imag=0;
- if (!IgnoreReal) {
+ if (!IgnoreReal || isVolatile) {
llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0,
SrcPtr->getName() + ".realp");
Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real");
}
- if (!IgnoreImag) {
+ if (!IgnoreImag || isVolatile) {
llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1,
SrcPtr->getName() + ".imagp");
Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag");
@@ -307,8 +314,7 @@ ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
ComplexPairTy ComplexExprEmitter::
VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
- return
- ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+ return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
}
@@ -320,6 +326,7 @@ ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
}
ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
}
@@ -341,6 +348,22 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
QualType DestTy) {
+ switch (CK) {
+ case CK_GetObjCProperty: {
+ LValue LV = CGF.EmitLValue(Op);
+ assert(LV.isPropertyRef() && "Unknown LValue type!");
+ return CGF.EmitLoadOfPropertyRefLValue(LV).getComplexVal();
+ }
+
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ return Visit(Op);
+
+ // TODO: do all of these
+ default:
+ break;
+ }
+
// Two cases here: cast from (complex to complex) and (scalar to complex).
if (Op->getType()->isAnyComplexType())
return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
@@ -372,8 +395,6 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
ComplexPairTy Op = Visit(E->getSubExpr());
llvm::Value *ResR, *ResI;
@@ -390,8 +411,6 @@ ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
// ~(a+ib) = a + i*-b
ComplexPairTy Op = Visit(E->getSubExpr());
llvm::Value *ResI;
@@ -505,8 +524,6 @@ ComplexExprEmitter::BinOpInfo
ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
BinOpInfo Ops;
Ops.LHS = Visit(E->getLHS());
Ops.RHS = Visit(E->getRHS());
@@ -515,37 +532,31 @@ ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
}
-// Compound assignments.
-ComplexPairTy ComplexExprEmitter::
-EmitCompoundAssign(const CompoundAssignOperator *E,
- ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+LValue ComplexExprEmitter::
+EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
+ ComplexPairTy &Val) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- bool ignreal = TestAndClearIgnoreRealAssign();
- bool ignimag = TestAndClearIgnoreImagAssign();
- QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
+ QualType LHSTy = E->getLHS()->getType();
BinOpInfo OpInfo;
// Load the RHS and LHS operands.
// __block variables need to have the rhs evaluated first, plus this should
- // improve codegen a little. It is possible for the RHS to be complex or
- // scalar.
+ // improve codegen a little.
OpInfo.Ty = E->getComputationResultType();
- OpInfo.RHS = EmitCast(CK_Unknown, E->getRHS(), OpInfo.Ty);
+
+ // The RHS should have been converted to the computation type.
+ assert(OpInfo.Ty->isAnyComplexType());
+ assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
+ E->getRHS()->getType()));
+ OpInfo.RHS = Visit(E->getRHS());
LValue LHS = CGF.EmitLValue(E->getLHS());
- // We know the LHS is a complex lvalue.
- ComplexPairTy LHSComplexPair;
- if (LHS.isPropertyRef())
- LHSComplexPair =
- CGF.EmitObjCPropertyGet(LHS.getPropertyRefExpr()).getComplexVal();
- else if (LHS.isKVCRef())
- LHSComplexPair =
- CGF.EmitObjCPropertyGet(LHS.getKVCRefExpr()).getComplexVal();
- else
- LHSComplexPair = EmitLoadOfComplex(LHS.getAddress(),
- LHS.isVolatileQualified());
+
+ // Load from the l-value.
+ ComplexPairTy LHSComplexPair = EmitLoadOfLValue(LHS);
OpInfo.LHS = EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
@@ -554,108 +565,107 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
// Truncate the result back to the LHS type.
Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+ Val = Result;
// Store the result value into the LHS lvalue.
- if (LHS.isPropertyRef())
- CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
- RValue::getComplex(Result));
- else if (LHS.isKVCRef())
- CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Result));
- else
- EmitStoreOfComplex(Result, LHS.getAddress(), LHS.isVolatileQualified());
-
- // Restore the Ignore* flags.
- IgnoreReal = ignreal;
- IgnoreImag = ignimag;
- IgnoreRealAssign = ignreal;
- IgnoreImagAssign = ignimag;
-
+ EmitStoreThroughLValue(Result, LHS);
+
+ return LHS;
+}
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ ComplexPairTy Val;
+ LValue LV = EmitCompoundAssignLValue(E, Func, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return Val;
+
// Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef() || LHS.isKVCRef())
- return Result;
+ if (LV.isPropertyRef())
+ return Val;
- // Otherwise, reload the value.
- return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val;
+
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
}
-ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
- TestAndClearIgnoreReal();
- TestAndClearIgnoreImag();
- bool ignreal = TestAndClearIgnoreRealAssign();
- bool ignimag = TestAndClearIgnoreImagAssign();
+LValue ComplexExprEmitter::EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val) {
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
E->getRHS()->getType()) &&
"Invalid assignment");
- // Emit the RHS.
- ComplexPairTy Val = Visit(E->getRHS());
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+
+ // Emit the RHS. __block variables need the RHS evaluated first.
+ Val = Visit(E->getRHS());
// Compute the address to store into.
LValue LHS = CGF.EmitLValue(E->getLHS());
// Store the result value into the LHS lvalue.
- if (LHS.isPropertyRef())
- CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val));
- else if (LHS.isKVCRef())
- CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Val));
- else
- EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
+ EmitStoreThroughLValue(Val, LHS);
+
+ return LHS;
+}
- // Restore the Ignore* flags.
- IgnoreReal = ignreal;
- IgnoreImag = ignimag;
- IgnoreRealAssign = ignreal;
- IgnoreImagAssign = ignimag;
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ ComplexPairTy Val;
+ LValue LV = EmitBinAssignLValue(E, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return Val;
// Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef() || LHS.isKVCRef())
+ if (LV.isPropertyRef())
+ return Val;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
return Val;
- // Otherwise, reload the value.
- return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
}
ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
- CGF.EmitStmt(E->getLHS());
- CGF.EnsureInsertPoint();
+ CGF.EmitIgnoredExpr(E->getLHS());
return Visit(E->getRHS());
}
ComplexPairTy ComplexExprEmitter::
-VisitConditionalOperator(const ConditionalOperator *E) {
- if (!E->getLHS()) {
- CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
- const llvm::Type *EltTy =
- CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
- llvm::Value *U = llvm::UndefValue::get(EltTy);
- return ComplexPairTy(U, U);
- }
-
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- TestAndClearIgnoreRealAssign();
- TestAndClearIgnoreImagAssign();
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+ eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
-
- // Handle the GNU extension for missing LHS.
- assert(E->getLHS() && "Must have LHS for complex value");
-
- ComplexPairTy LHS = Visit(E->getLHS());
+ ComplexPairTy LHS = Visit(E->getTrueExpr());
LHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
+ eval.end(CGF);
+ eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
-
- ComplexPairTy RHS = Visit(E->getRHS());
+ ComplexPairTy RHS = Visit(E->getFalseExpr());
RHSBlock = Builder.GetInsertBlock();
- CGF.EmitBranch(ContBlock);
-
CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
// Create a PHI node for the real part.
llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r");
@@ -716,12 +726,11 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, ignoring the result.
ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
- bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) {
+ bool IgnoreImag) {
assert(E && E->getType()->isAnyComplexType() &&
"Invalid complex expression to emit");
- return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign,
- IgnoreImagAssign)
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag)
.Visit(const_cast<Expr*>(E));
}
@@ -749,3 +758,27 @@ ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
bool SrcIsVolatile) {
return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
}
+
+LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
+ assert(E->getOpcode() == BO_Assign);
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
+}
+
+LValue CodeGenFunction::
+EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
+ ComplexPairTy(ComplexExprEmitter::*Op)(const ComplexExprEmitter::BinOpInfo &);
+ switch (E->getOpcode()) {
+ case BO_MulAssign: Op = &ComplexExprEmitter::EmitBinMul; break;
+ case BO_DivAssign: Op = &ComplexExprEmitter::EmitBinDiv; break;
+ case BO_SubAssign: Op = &ComplexExprEmitter::EmitBinSub; break;
+ case BO_AddAssign: Op = &ComplexExprEmitter::EmitBinAdd; break;
+
+ default:
+ llvm_unreachable("unexpected complex compound assignment");
+ Op = 0;
+ }
+
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
index 9c31c2a..40d7b6c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -142,11 +142,11 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
// constants are cast to bool, and because clang is not enforcing bitfield
// width limits.
if (FieldSize > FieldValue.getBitWidth())
- FieldValue.zext(FieldSize);
+ FieldValue = FieldValue.zext(FieldSize);
// Truncate the size of FieldValue to the bit field size.
if (FieldSize < FieldValue.getBitWidth())
- FieldValue.trunc(FieldSize);
+ FieldValue = FieldValue.trunc(FieldSize);
if (FieldOffset < NextFieldOffsetInBytes * 8) {
// Either part of the field or the entire field can go into the previous
@@ -166,20 +166,20 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
if (CGM.getTargetData().isBigEndian()) {
Tmp = Tmp.lshr(NewFieldWidth);
- Tmp.trunc(BitsInPreviousByte);
+ Tmp = Tmp.trunc(BitsInPreviousByte);
// We want the remaining high bits.
- FieldValue.trunc(NewFieldWidth);
+ FieldValue = FieldValue.trunc(NewFieldWidth);
} else {
- Tmp.trunc(BitsInPreviousByte);
+ Tmp = Tmp.trunc(BitsInPreviousByte);
// We want the remaining low bits.
FieldValue = FieldValue.lshr(BitsInPreviousByte);
- FieldValue.trunc(NewFieldWidth);
+ FieldValue = FieldValue.trunc(NewFieldWidth);
}
}
- Tmp.zext(8);
+ Tmp = Tmp.zext(8);
if (CGM.getTargetData().isBigEndian()) {
if (FitsCompletelyInPreviousByte)
Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
@@ -231,13 +231,10 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
if (CGM.getTargetData().isBigEndian()) {
// We want the high bits.
- Tmp = FieldValue;
- Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
- Tmp.trunc(8);
+ Tmp = FieldValue.lshr(FieldValue.getBitWidth() - 8).trunc(8);
} else {
// We want the low bits.
- Tmp = FieldValue;
- Tmp.trunc(8);
+ Tmp = FieldValue.trunc(8);
FieldValue = FieldValue.lshr(8);
}
@@ -245,7 +242,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
NextFieldOffsetInBytes++;
- FieldValue.trunc(FieldValue.getBitWidth() - 8);
+ FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - 8);
}
assert(FieldValue.getBitWidth() > 0 &&
@@ -257,10 +254,9 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
if (CGM.getTargetData().isBigEndian()) {
unsigned BitWidth = FieldValue.getBitWidth();
- FieldValue.zext(8);
- FieldValue = FieldValue << (8 - BitWidth);
+ FieldValue = FieldValue.zext(8) << (8 - BitWidth);
} else
- FieldValue.zext(8);
+ FieldValue = FieldValue.zext(8);
}
// Append the last element.
@@ -372,7 +368,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
}
}
- uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
+ uint64_t LayoutSizeInBytes = Layout.getSize().getQuantity();
if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
// If the struct is bigger than the size of the record type,
@@ -398,9 +394,9 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
}
// Append tail padding if necessary.
- AppendTailPadding(Layout.getSize());
+ AppendTailPadding(CGM.getContext().toBits(Layout.getSize()));
- assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
+ assert(Layout.getSize().getQuantity() == NextFieldOffsetInBytes &&
"Tail padding mismatch!");
return true;
@@ -454,17 +450,10 @@ public:
llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
return Visit(E->getInitializer());
}
-
+
llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
- if (const MemberPointerType *MPT =
- E->getType()->getAs<MemberPointerType>()) {
- DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
- NamedDecl *ND = DRE->getDecl();
- if (MPT->isMemberFunctionPointer())
- return CGM.getCXXABI().EmitMemberPointer(cast<CXXMethodDecl>(ND));
- else
- return CGM.getCXXABI().EmitMemberPointer(cast<FieldDecl>(ND));
- }
+ if (E->getType()->isMemberPointerType())
+ return CGM.getMemberPointerConstant(E);
return 0;
}
@@ -755,7 +744,7 @@ public:
if (!VD->hasLocalStorage()) {
if (VD->isFileVarDecl() || VD->hasExternalStorage())
return CGM.GetAddrOfGlobalVar(VD);
- else if (VD->isBlockVarDecl()) {
+ else if (VD->isLocalVarDecl()) {
assert(CGF && "Can't access static local vars without CGF");
return CGF->GetAddrOfStaticLocalVar(VD);
}
@@ -925,7 +914,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
else
Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
}
- return llvm::ConstantVector::get(&Inits[0], Inits.size());
+ return llvm::ConstantVector::get(Inits);
}
}
}
@@ -938,6 +927,38 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
return C;
}
+static uint64_t getFieldOffset(ASTContext &C, const FieldDecl *field) {
+ const ASTRecordLayout &layout = C.getASTRecordLayout(field->getParent());
+ return layout.getFieldOffset(field->getFieldIndex());
+}
+
+llvm::Constant *
+CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
+ // Member pointer constants always have a very particular form.
+ const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
+ const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
+
+ // A member function pointer.
+ if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
+ return getCXXABI().EmitMemberPointer(method);
+
+ // Otherwise, a member data pointer.
+ uint64_t fieldOffset;
+ if (const FieldDecl *field = dyn_cast<FieldDecl>(decl))
+ fieldOffset = getFieldOffset(getContext(), field);
+ else {
+ const IndirectFieldDecl *ifield = cast<IndirectFieldDecl>(decl);
+
+ fieldOffset = 0;
+ for (IndirectFieldDecl::chain_iterator ci = ifield->chain_begin(),
+ ce = ifield->chain_end(); ci != ce; ++ci)
+ fieldOffset += getFieldOffset(getContext(), cast<FieldDecl>(*ci));
+ }
+
+ CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
+ return getCXXABI().EmitMemberDataPointer(type, chars);
+}
+
static void
FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
std::vector<llvm::Constant *> &Elements,
@@ -964,8 +985,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
if (I->isVirtual()) {
- // FIXME: We should initialize null pointer to data members in virtual
- // bases here.
+ // Ignore virtual bases.
continue;
}
@@ -980,7 +1000,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
if (CGM.getTypes().isZeroInitializable(BaseDecl))
continue;
- uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ uint64_t BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
FillInNullDataMemberPointers(CGM, I->getType(),
Elements, StartOffset + BaseOffset);
}
@@ -1005,9 +1025,10 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
uint64_t StartIndex = StartOffset / 8;
uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
+ // FIXME: hardcodes Itanium member pointer representation!
llvm::Constant *NegativeOne =
llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
- -1ULL, /*isSigned=*/true);
+ -1ULL, /*isSigned*/true);
// Fill in the null data member pointer.
for (uint64_t I = StartIndex; I != EndIndex; ++I)
@@ -1015,6 +1036,124 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
}
}
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ const llvm::Type *baseType,
+ const CXXRecordDecl *base);
+
+static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
+ const CXXRecordDecl *record,
+ bool asCompleteObject) {
+ const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
+ const llvm::StructType *structure =
+ (asCompleteObject ? layout.getLLVMType()
+ : layout.getBaseSubobjectLLVMType());
+
+ unsigned numElements = structure->getNumElements();
+ std::vector<llvm::Constant *> elements(numElements);
+
+ // Fill in all the bases.
+ for (CXXRecordDecl::base_class_const_iterator
+ I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
+ if (I->isVirtual()) {
+ // Ignore virtual bases; if we're laying out for a complete
+ // object, we'll lay these out later.
+ continue;
+ }
+
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
+ const llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+
+ // Fill in all the fields.
+ for (RecordDecl::field_iterator I = record->field_begin(),
+ E = record->field_end(); I != E; ++I) {
+ const FieldDecl *field = *I;
+
+ // Ignore bit fields.
+ if (field->isBitField())
+ continue;
+
+ unsigned fieldIndex = layout.getLLVMFieldNo(field);
+ elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
+ }
+
+ // Fill in the virtual bases, if we're working with the complete object.
+ if (asCompleteObject) {
+ for (CXXRecordDecl::base_class_const_iterator
+ I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getVirtualBaseIndex(base);
+
+ // We might have already laid this field out.
+ if (elements[fieldIndex]) continue;
+
+ const llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+ }
+
+ // Now go through all other fields and zero them out.
+ for (unsigned i = 0; i != numElements; ++i) {
+ if (!elements[i])
+ elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
+ }
+
+ return llvm::ConstantStruct::get(structure, elements);
+}
+
+/// Emit the null constant for a base subobject.
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ const llvm::Type *baseType,
+ const CXXRecordDecl *base) {
+ const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
+
+ // Just zero out bases that don't have any pointer to data members.
+ if (baseLayout.isZeroInitializableAsBase())
+ return llvm::Constant::getNullValue(baseType);
+
+ // If the base type is a struct, we can just use its null constant.
+ if (isa<llvm::StructType>(baseType)) {
+ return EmitNullConstant(CGM, base, /*complete*/ false);
+ }
+
+ // Otherwise, some bases are represented as arrays of i8 if the size
+ // of the base is smaller than its corresponding LLVM type. Figure
+ // out how many elements this base array has.
+ const llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
+ unsigned numBaseElements = baseArrayType->getNumElements();
+
+ // Fill in null data member pointers.
+ std::vector<llvm::Constant *> baseElements(numBaseElements);
+ FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
+ baseElements, 0);
+
+ // Now go through all other elements and zero them out.
+ if (numBaseElements) {
+ const llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Constant *i8_zero = llvm::Constant::getNullValue(i8);
+ for (unsigned i = 0; i != numBaseElements; ++i) {
+ if (!baseElements[i])
+ baseElements[i] = i8_zero;
+ }
+ }
+
+ return llvm::ConstantArray::get(baseArrayType, baseElements);
+}
+
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
if (getTypes().isZeroInitializable(T))
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
@@ -1036,79 +1175,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
if (const RecordType *RT = T->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- const llvm::StructType *STy =
- cast<llvm::StructType>(getTypes().ConvertTypeForMem(T));
- unsigned NumElements = STy->getNumElements();
- std::vector<llvm::Constant *> Elements(NumElements);
-
- const CGRecordLayout &Layout = getTypes().getCGRecordLayout(RD);
-
- // Go through all bases and fill in any null pointer to data members.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- if (I->isVirtual()) {
- // FIXME: We should initialize null pointer to data members in virtual
- // bases here.
- continue;
- }
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Ignore empty bases.
- if (BaseDecl->isEmpty())
- continue;
-
- // Ignore bases that don't have any pointer to data members.
- if (getTypes().isZeroInitializable(BaseDecl))
- continue;
-
- // Currently, all bases are arrays of i8. Figure out how many elements
- // this base array has.
- unsigned BaseFieldNo = Layout.getNonVirtualBaseLLVMFieldNo(BaseDecl);
- const llvm::ArrayType *BaseArrayTy =
- cast<llvm::ArrayType>(STy->getElementType(BaseFieldNo));
-
- unsigned NumBaseElements = BaseArrayTy->getNumElements();
- std::vector<llvm::Constant *> BaseElements(NumBaseElements);
-
- // Now fill in null data member pointers.
- FillInNullDataMemberPointers(*this, I->getType(), BaseElements, 0);
-
- // Now go through all other elements and zero them out.
- if (NumBaseElements) {
- llvm::Constant *Zero =
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(getLLVMContext()), 0);
-
- for (unsigned I = 0; I != NumBaseElements; ++I) {
- if (!BaseElements[I])
- BaseElements[I] = Zero;
- }
- }
-
- Elements[BaseFieldNo] = llvm::ConstantArray::get(BaseArrayTy,
- BaseElements);
- }
-
- for (RecordDecl::field_iterator I = RD->field_begin(),
- E = RD->field_end(); I != E; ++I) {
- const FieldDecl *FD = *I;
-
- // Ignore bit fields.
- if (FD->isBitField())
- continue;
-
- unsigned FieldNo = Layout.getLLVMFieldNo(FD);
- Elements[FieldNo] = EmitNullConstant(FD->getType());
- }
-
- // Now go through all other fields and zero them out.
- for (unsigned i = 0; i != NumElements; ++i) {
- if (!Elements[i])
- Elements[i] = llvm::Constant::getNullValue(STy->getElementType(i));
- }
-
- return llvm::ConstantStruct::get(STy, Elements);
+ return ::EmitNullConstant(*this, RD, /*complete object*/ true);
}
assert(T->isMemberPointerType() && "Should only see member pointers here!");
@@ -1117,6 +1184,5 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
// Itanium C++ ABI 2.3:
// A NULL pointer is represented as -1.
- return llvm::ConstantInt::get(getTypes().ConvertTypeForMem(T), -1ULL,
- /*isSigned=*/true);
+ return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
index 2318cc4..3e1debd 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -11,10 +11,12 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Frontend/CodeGenOptions.h"
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecordLayout.h"
@@ -37,6 +39,7 @@ using llvm::Value;
// Scalar Expression Emitter
//===----------------------------------------------------------------------===//
+namespace {
struct BinOpInfo {
Value *LHS;
Value *RHS;
@@ -45,7 +48,13 @@ struct BinOpInfo {
const Expr *E; // Entire expr, for error unsupported. May not be binop.
};
-namespace {
+static bool MustVisitNullValue(const Expr *E) {
+ // If a null pointer expression's type is the C++0x nullptr_t, then
+ // it's not necessarily a simple constant and it must be evaluated
+ // for its potential side effects.
+ return E->getType()->isNullPtrType();
+}
+
class ScalarExprEmitter
: public StmtVisitor<ScalarExprEmitter, Value*> {
CodeGenFunction &CGF;
@@ -101,10 +110,49 @@ public:
/// EmitNullValue - Emit a value that corresponds to null for the given type.
Value *EmitNullValue(QualType Ty);
+ /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
+ Value *EmitFloatToBoolConversion(Value *V) {
+ // Compare against 0.0 for fp scalars.
+ llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
+ return Builder.CreateFCmpUNE(V, Zero, "tobool");
+ }
+
+ /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
+ Value *EmitPointerToBoolConversion(Value *V) {
+ Value *Zero = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(V->getType()));
+ return Builder.CreateICmpNE(V, Zero, "tobool");
+ }
+
+ Value *EmitIntToBoolConversion(Value *V) {
+ // Because of the type rules of C, we often end up computing a
+ // logical value, then zero extending it to int, then wanting it
+ // as a logical value again. Optimize this common case.
+ if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
+ if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
+ Value *Result = ZI->getOperand(0);
+ // If there aren't any more uses, zap the instruction to save space.
+ // Note that there can be more uses, for example if this
+ // is the result of an assignment.
+ if (ZI->use_empty())
+ ZI->eraseFromParent();
+ return Result;
+ }
+ }
+
+ const llvm::IntegerType *Ty = cast<llvm::IntegerType>(V->getType());
+ Value *Zero = llvm::ConstantInt::get(Ty, 0);
+ return Builder.CreateICmpNE(V, Zero, "tobool");
+ }
+
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
+ Value *Visit(Expr *E) {
+ return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
+ }
+
Value *VisitStmt(Stmt *S) {
S->dump(CGF.getContext().getSourceManager());
assert(0 && "Stmt can't have complex result type!");
@@ -112,7 +160,9 @@ public:
}
Value *VisitExpr(Expr *S);
- Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
+ Value *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
// Leaves.
Value *VisitIntegerLiteral(const IntegerLiteral *E) {
@@ -133,11 +183,6 @@ public:
Value *VisitGNUNullExpr(const GNUNullExpr *E) {
return EmitNullValue(E->getType());
}
- Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
- return llvm::ConstantInt::get(ConvertType(E->getType()),
- CGF.getContext().typesAreCompatible(
- E->getArgType1(), E->getArgType2()));
- }
Value *VisitOffsetOfExpr(OffsetOfExpr *E);
Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
@@ -145,17 +190,45 @@ public:
return Builder.CreateBitCast(V, ConvertType(E->getType()));
}
+ Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()),
+ E->getPackLength());
+ }
+
+ Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getType());
+
+ // Otherwise, assume the mapping is the scalar directly.
+ return CGF.getOpaqueRValueMapping(E).getScalarVal();
+ }
+
// l-values.
Value *VisitDeclRefExpr(DeclRefExpr *E) {
Expr::EvalResult Result;
- if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
- assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
- llvm::ConstantInt *CI
- = llvm::ConstantInt::get(VMContext, Result.Val.getInt());
- CGF.EmitDeclRefExprDbgValue(E, CI);
- return CI;
+ if (!E->Evaluate(Result, CGF.getContext()))
+ return EmitLoadOfLValue(E);
+
+ assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
+
+ llvm::Constant *C;
+ if (Result.Val.isInt()) {
+ C = llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ } else if (Result.Val.isFloat()) {
+ C = llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
+ } else {
+ return EmitLoadOfLValue(E);
}
- return EmitLoadOfLValue(E);
+
+ // Make sure we emit a debug reference to the global variable.
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) {
+ if (!CGF.getContext().DeclMustBeEmitted(VD))
+ CGF.EmitDeclRefExprDbgValue(E, C);
+ } else if (isa<EnumConstantDecl>(E->getDecl())) {
+ CGF.EmitDeclRefExprDbgValue(E, C);
+ }
+
+ return C;
}
Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
return CGF.EmitObjCSelectorExpr(E);
@@ -167,10 +240,8 @@ public:
return EmitLoadOfLValue(E);
}
Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- return EmitLoadOfLValue(E);
- }
- Value *VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E) {
+ assert(E->getObjectKind() == OK_Ordinary &&
+ "reached property reference without lvalue-to-rvalue");
return EmitLoadOfLValue(E);
}
Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -234,17 +305,26 @@ public:
return EmitScalarPrePostIncDec(E, LV, true, true);
}
+ llvm::Value *EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
+ llvm::Value *InVal,
+ llvm::Value *NextVal,
+ bool IsInc);
+
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
Value *VisitUnaryAddrOf(const UnaryOperator *E) {
- // If the sub-expression is an instance member reference,
- // EmitDeclRefLValue will magically emit it with the appropriate
- // value as the "address".
+ if (isa<MemberPointerType>(E->getType())) // never sugared
+ return CGF.CGM.getMemberPointerConstant(E);
+
return EmitLValue(E->getSubExpr()).getAddress();
}
- Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitUnaryDeref(const UnaryOperator *E) {
+ if (E->getType()->isVoidType())
+ return Visit(E->getSubExpr()); // the actual value should be unused
+ return EmitLoadOfLValue(E);
+ }
Value *VisitUnaryPlus(const UnaryOperator *E) {
// This differs from gcc, though, most likely due to a bug in gcc.
TestAndClearIgnoreResultAssign();
@@ -267,8 +347,8 @@ public:
return CGF.LoadCXXThis();
}
- Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
- return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
+ Value *VisitExprWithCleanups(ExprWithCleanups *E) {
+ return CGF.EmitExprWithCleanups(E).getScalarVal();
}
Value *VisitCXXNewExpr(const CXXNewExpr *E) {
return CGF.EmitCXXNewExpr(E);
@@ -278,8 +358,11 @@ public:
return 0;
}
Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
- return llvm::ConstantInt::get(Builder.getInt1Ty(),
- E->EvaluateTrait(CGF.getContext()));
+ return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ }
+
+ Value *VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
@@ -301,6 +384,10 @@ public:
return 0;
}
+ Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ }
+
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
if (Ops.Ty->hasSignedIntegerRepresentation()) {
@@ -318,9 +405,23 @@ public:
return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
+ bool isTrapvOverflowBehavior() {
+ return CGF.getContext().getLangOptions().getSignedOverflowBehavior()
+ == LangOptions::SOB_Trapping;
+ }
/// Create a binary op that checks for overflow.
/// Currently only supports +, - and *.
Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+ // Emit the overflow BB when -ftrapv option is activated.
+ void EmitOverflowBB(llvm::BasicBlock *overflowBB) {
+ Builder.SetInsertPoint(overflowBB);
+ llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
+ Builder.CreateCall(Trap);
+ Builder.CreateUnreachable();
+ }
+ // Check for undefined division and modulus behaviors.
+ void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
+ llvm::Value *Zero,bool isDiv);
Value *EmitDiv(const BinOpInfo &Ops);
Value *EmitRem(const BinOpInfo &Ops);
Value *EmitAdd(const BinOpInfo &Ops);
@@ -391,7 +492,7 @@ public:
// Other Operators.
Value *VisitBlockExpr(const BlockExpr *BE);
- Value *VisitConditionalOperator(const ConditionalOperator *CO);
+ Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
Value *VisitChooseExpr(ChooseExpr *CE);
Value *VisitVAArgExpr(VAArgExpr *VE);
Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
@@ -409,11 +510,8 @@ public:
Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
- if (SrcType->isRealFloatingType()) {
- // Compare against 0.0 for fp scalars.
- llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
- return Builder.CreateFCmpUNE(Src, Zero, "tobool");
- }
+ if (SrcType->isRealFloatingType())
+ return EmitFloatToBoolConversion(Src);
if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
@@ -421,25 +519,11 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
"Unknown scalar type to convert");
- // Because of the type rules of C, we often end up computing a logical value,
- // then zero extending it to int, then wanting it as a logical value again.
- // Optimize this common case.
- if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
- if (ZI->getOperand(0)->getType() ==
- llvm::Type::getInt1Ty(CGF.getLLVMContext())) {
- Value *Result = ZI->getOperand(0);
- // If there aren't any more uses, zap the instruction to save space.
- // Note that there can be more uses, for example if this
- // is the result of an assignment.
- if (ZI->use_empty())
- ZI->eraseFromParent();
- return Result;
- }
- }
+ if (isa<llvm::IntegerType>(Src->getType()))
+ return EmitIntToBoolConversion(Src);
- // Compare against an integer or pointer null.
- llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
- return Builder.CreateICmpNE(Src, Zero, "tobool");
+ assert(isa<llvm::PointerType>(Src->getType()));
+ return EmitPointerToBoolConversion(Src);
}
/// EmitScalarConversion - Emit a conversion from the specified type to the
@@ -501,10 +585,10 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
- for (unsigned i = 0; i < NumElements; i++)
+ for (unsigned i = 0; i != NumElements; ++i)
Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
@@ -603,7 +687,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1));
}
- Value* CV = llvm::ConstantVector::get(concat.begin(), concat.size());
+ Value* CV = llvm::ConstantVector::get(concat);
LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
LHSElts *= 2;
} else {
@@ -629,7 +713,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i)
MaskV.push_back(EltMask);
- Value* MaskBits = llvm::ConstantVector::get(MaskV.begin(), MaskV.size());
+ Value* MaskBits = llvm::ConstantVector::get(MaskV);
Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
// newv = undef
@@ -681,7 +765,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
indices.push_back(C);
}
- Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
+ Value *SV = llvm::ConstantVector::get(indices);
return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
}
Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
@@ -693,6 +777,17 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
EmitLValue(E->getBase());
return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
}
+
+ // Emit debug info for aggregate now, if it was delayed to reduce
+ // debug info size.
+ CGDebugInfo *DI = CGF.getDebugInfo();
+ if (DI && CGF.CGM.getCodeGenOpts().LimitDebugInfo) {
+ QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
+ if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
+ if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
+ M->getParent()->getLocation());
+ }
return EmitLoadOfLValue(E);
}
@@ -790,7 +885,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
VIsUndefShuffle = false;
}
if (!Args.empty()) {
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
V = Builder.CreateShuffleVector(LHS, RHS, Mask);
++CurIdx;
continue;
@@ -845,7 +940,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
for (unsigned j = InitElts; j != ResElts; ++j)
Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
Mask, "vext");
@@ -862,7 +957,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// merging subsequent shuffles into this one.
if (CurIdx == 0)
std::swap(V, Init);
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
VIsUndefShuffle = isa<llvm::UndefValue>(Init);
CurIdx += InitElts;
@@ -916,11 +1011,8 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// a default case, so the compiler will warn on a missing case. The cases
// are in the same order as in the CastKind enum.
switch (Kind) {
- case CK_Unknown:
- // FIXME: All casts should have a known kind!
- //assert(0 && "Unknown cast kind!");
- break;
-
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
Value *V = EmitLValue(E).getAddress();
@@ -963,9 +1055,6 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
return CGF.EmitDynamicCast(V, DCE);
}
- case CK_ToUnion:
- assert(0 && "Should be unreachable!");
- break;
case CK_ArrayToPointerDecay: {
assert(E->getType()->isArrayType() &&
@@ -988,10 +1077,15 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
case CK_FunctionToPointerDecay:
return EmitLValue(E).getAddress();
+ case CK_NullToPointer:
+ if (MustVisitNullValue(E))
+ (void) Visit(E);
+
+ return llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(ConvertType(DestTy)));
+
case CK_NullToMemberPointer: {
- // If the subexpression's type is the C++0x nullptr_t, emit the
- // subexpression, which may have side effects.
- if (E->getType()->isNullPtrType())
+ if (MustVisitNullValue(E))
(void) Visit(E);
const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
@@ -1011,11 +1105,30 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
}
-
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexCast:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_FloatingComplexToIntegralComplex:
case CK_ConstructorConversion:
- assert(0 && "Should be unreachable!");
+ case CK_ToUnion:
+ llvm_unreachable("scalar cast to non-scalar value");
break;
+ case CK_GetObjCProperty: {
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
+ assert(E->isGLValue() && E->getObjectKind() == OK_ObjCProperty &&
+ "CK_GetObjCProperty for non-lvalue or non-ObjCProperty");
+ RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType());
+ return RV.getScalarVal();
+ }
+
+ case CK_LValueToRValue:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
+ assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
+ return Visit(const_cast<Expr*>(E));
+
case CK_IntegralToPointer: {
Value *Src = Visit(const_cast<Expr*>(E));
@@ -1038,10 +1151,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
}
case CK_ToVoid: {
- if (E->Classify(CGF.getContext()).isGLValue())
- CGF.EmitLValue(E);
- else
- CGF.EmitAnyExpr(E, 0, false, true);
+ CGF.EmitIgnoredExpr(E);
return 0;
}
case CK_VectorSplat: {
@@ -1056,62 +1166,55 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Int32Ty, 0);
for (unsigned i = 0; i < NumElements; i++)
- Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
+ Args.push_back(Zero);
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
+
case CK_IntegralCast:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingCast:
return EmitScalarConversion(Visit(E), E->getType(), DestTy);
+ case CK_IntegralToBoolean:
+ return EmitIntToBoolConversion(Visit(E));
+ case CK_PointerToBoolean:
+ return EmitPointerToBoolConversion(Visit(E));
+ case CK_FloatingToBoolean:
+ return EmitFloatToBoolConversion(Visit(E));
case CK_MemberPointerToBoolean: {
llvm::Value *MemPtr = Visit(E);
const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
}
- }
-
- // Handle cases where the source is an non-complex type.
- if (!CGF.hasAggregateLLVMType(E->getType())) {
- Value *Src = Visit(const_cast<Expr*>(E));
+ case CK_FloatingComplexToReal:
+ case CK_IntegralComplexToReal:
+ return CGF.EmitComplexExpr(E, false, true).first;
- // Use EmitScalarConversion to perform the conversion.
- return EmitScalarConversion(Src, E->getType(), DestTy);
- }
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
- if (E->getType()->isAnyComplexType()) {
- // Handle cases where the source is a complex type.
- bool IgnoreImag = true;
- bool IgnoreImagAssign = true;
- bool IgnoreReal = IgnoreResultAssign;
- bool IgnoreRealAssign = IgnoreResultAssign;
- if (DestTy->isBooleanType())
- IgnoreImagAssign = IgnoreImag = false;
- else if (DestTy->isVoidType()) {
- IgnoreReal = IgnoreImag = false;
- IgnoreRealAssign = IgnoreImagAssign = true;
- }
- CodeGenFunction::ComplexPairTy V
- = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign,
- IgnoreImagAssign);
+ // TODO: kill this function off, inline appropriate case here
return EmitComplexToScalarConversion(V, E->getType(), DestTy);
}
- // Okay, this is a cast from an aggregate. It must be a cast to void. Just
- // evaluate the result and return.
- CGF.EmitAggExpr(E, 0, false, true);
+ }
+
+ llvm_unreachable("unknown scalar cast");
return 0;
}
Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
- return CGF.EmitCompoundStmt(*E->getSubStmt(),
- !E->getType()->isVoidType()).getScalarVal();
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType())
+ .getScalarVal();
}
Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
@@ -1126,108 +1229,147 @@ Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
//===----------------------------------------------------------------------===//
llvm::Value *ScalarExprEmitter::
-EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
- bool isInc, bool isPre) {
-
- QualType ValTy = E->getSubExpr()->getType();
- llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy);
-
- int AmountVal = isInc ? 1 : -1;
-
- if (ValTy->isPointerType() &&
- ValTy->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition/subtraction needs to account for the VLA size
- CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
+EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
+ llvm::Value *InVal,
+ llvm::Value *NextVal, bool IsInc) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Trapping:
+ BinOpInfo BinOp;
+ BinOp.LHS = InVal;
+ BinOp.RHS = NextVal;
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BO_Add;
+ BinOp.E = E;
+ return EmitOverflowCheckedBinOp(BinOp);
+ break;
}
+ assert(false && "Unknown SignedOverflowBehaviorTy");
+ return 0;
+}
+
+llvm::Value *
+ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+
+ QualType type = E->getSubExpr()->getType();
+ llvm::Value *value = EmitLoadOfLValue(LV, type);
+ llvm::Value *input = value;
+
+ int amount = (isInc ? 1 : -1);
+
+ // Special case of integer increment that we have to check first: bool++.
+ // Due to promotion rules, we get:
+ // bool++ -> bool = bool + 1
+ // -> bool = (int)bool + 1
+ // -> bool = ((int)bool + 1 != 0)
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ if (isInc && type->isBooleanType()) {
+ value = Builder.getTrue();
+
+ // Most common case by far: integer increment.
+ } else if (type->isIntegerType()) {
+
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+
+ if (type->isSignedIntegerType())
+ value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
+
+ // Unsigned integer inc is always two's complement.
+ else
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
- llvm::Value *NextVal;
- if (const llvm::PointerType *PT =
- dyn_cast<llvm::PointerType>(InVal->getType())) {
- llvm::Constant *Inc = llvm::ConstantInt::get(CGF.Int32Ty, AmountVal);
- if (!isa<llvm::FunctionType>(PT->getElementType())) {
- QualType PTEE = ValTy->getPointeeType();
- if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) {
- // Handle interface types, which are not represented with a concrete
- // type.
- int size = CGF.getContext().getTypeSize(OIT) / 8;
- if (!isInc)
- size = -size;
- Inc = llvm::ConstantInt::get(Inc->getType(), size);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- InVal = Builder.CreateBitCast(InVal, i8Ty);
- NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
- llvm::Value *lhs = LV.getAddress();
- lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
- LV = CGF.MakeAddrLValue(lhs, ValTy);
- } else
- NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
+ // Next most common: pointer increment.
+ } else if (const PointerType *ptr = type->getAs<PointerType>()) {
+ QualType type = ptr->getPointeeType();
+
+ // VLA types don't have constant size.
+ if (type->isVariableArrayType()) {
+ llvm::Value *vlaSize =
+ CGF.GetVLASize(CGF.getContext().getAsVariableArrayType(type));
+ value = CGF.EmitCastToVoidPtr(value);
+ if (!isInc) vlaSize = Builder.CreateNSWNeg(vlaSize, "vla.negsize");
+ value = Builder.CreateInBoundsGEP(value, vlaSize, "vla.inc");
+ value = Builder.CreateBitCast(value, input->getType());
+
+ // Arithmetic on function pointers (!) is just +-1.
+ } else if (type->isFunctionType()) {
+ llvm::Value *amt = llvm::ConstantInt::get(CGF.Int32Ty, amount);
+
+ value = CGF.EmitCastToVoidPtr(value);
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
+ value = Builder.CreateBitCast(value, input->getType());
+
+ // For everything else, we can just do a simple increment.
} else {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
- NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
- NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+ llvm::Value *amt = llvm::ConstantInt::get(CGF.Int32Ty, amount);
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
}
- } else if (InVal->getType()->isIntegerTy(1) && isInc) {
- // Bool++ is an interesting case, due to promotion rules, we get:
- // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
- // Bool = ((int)Bool+1) != 0
- // An interesting aspect of this is that increment is always true.
- // Decrement does not have this property.
- NextVal = llvm::ConstantInt::getTrue(VMContext);
- } else if (isa<llvm::IntegerType>(InVal->getType())) {
- NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
-
- if (!ValTy->isSignedIntegerType())
- // Unsigned integer inc is always two's complement.
- NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
- else {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
- break;
- case LangOptions::SOB_Defined:
- NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
- break;
- case LangOptions::SOB_Trapping:
- BinOpInfo BinOp;
- BinOp.LHS = InVal;
- BinOp.RHS = NextVal;
- BinOp.Ty = E->getType();
- BinOp.Opcode = BO_Add;
- BinOp.E = E;
- NextVal = EmitOverflowCheckedBinOp(BinOp);
- break;
- }
+
+ // Vector increment/decrement.
+ } else if (type->isVectorType()) {
+ if (type->hasIntegerRepresentation()) {
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+
+ if (type->hasSignedIntegerRepresentation())
+ value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
+ else
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+ } else {
+ value = Builder.CreateFAdd(
+ value,
+ llvm::ConstantFP::get(value->getType(), amount),
+ isInc ? "inc" : "dec");
}
- } else {
+
+ // Floating point.
+ } else if (type->isRealFloatingType()) {
// Add the inc/dec to the real part.
- if (InVal->getType()->isFloatTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<float>(AmountVal)));
- else if (InVal->getType()->isDoubleTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<double>(AmountVal)));
+ llvm::Value *amt;
+ if (value->getType()->isFloatTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(amount)));
+ else if (value->getType()->isDoubleTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(amount)));
else {
- llvm::APFloat F(static_cast<float>(AmountVal));
+ llvm::APFloat F(static_cast<float>(amount));
bool ignored;
F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
&ignored);
- NextVal = llvm::ConstantFP::get(VMContext, F);
+ amt = llvm::ConstantFP::get(VMContext, F);
}
- NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
+
+ // Objective-C pointer types.
+ } else {
+ const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
+ value = CGF.EmitCastToVoidPtr(value);
+
+ CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
+ if (!isInc) size = -size;
+ llvm::Value *sizeValue =
+ llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
+
+ value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
+ value = Builder.CreateBitCast(value, input->getType());
}
// Store the updated result through the lvalue.
if (LV.isBitField())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, type, &value);
else
- CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+ CGF.EmitStoreThroughLValue(RValue::get(value), LV, type);
// If this is a postinc, return the value read from memory, otherwise use the
// updated value.
- return isPre ? NextVal : InVal;
+ return isPre ? value : input;
}
@@ -1346,7 +1488,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Compute the offset to the base.
const RecordType *BaseRT = CurrentType->getAs<RecordType>();
CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
- int64_t OffsetInt = RL.getBaseClassOffset(BaseRD) /
+ int64_t OffsetInt = RL.getBaseClassOffsetInBits(BaseRD) /
CGF.getContext().getCharWidth();
Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
break;
@@ -1371,7 +1513,7 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
} else {
// C99 6.5.3.4p2: If the argument is an expression of type
// VLA, it is evaluated.
- CGF.EmitAnyExpr(E->getArgumentExpr());
+ CGF.EmitIgnoredExpr(E->getArgumentExpr());
}
return CGF.GetVLASize(VAT);
@@ -1387,21 +1529,38 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
Expr *Op = E->getSubExpr();
- if (Op->getType()->isAnyComplexType())
- return CGF.EmitComplexExpr(Op, false, true, false, true).first;
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (E->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
+ .getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, false, true).first;
+ }
+
return Visit(Op);
}
+
Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
Expr *Op = E->getSubExpr();
- if (Op->getType()->isAnyComplexType())
- return CGF.EmitComplexExpr(Op, true, false, true, false).second;
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (Op->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
+ .getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, true, false).second;
+ }
// __imag on a scalar returns zero. Emit the subexpr to ensure side
// effects are evaluated, but not the actual value.
- if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
- CGF.EmitLValue(Op);
- else
- CGF.EmitScalarExpr(Op, true);
+ CGF.EmitScalarExpr(Op, true);
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
@@ -1478,8 +1637,12 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
if (Ignore)
return 0;
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return RHS;
+
// Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef() || LHS.isKVCRef())
+ if (LHS.isPropertyRef())
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -1490,8 +1653,51 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return EmitLoadOfLValue(LHS, E->getType());
}
+void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
+ const BinOpInfo &Ops,
+ llvm::Value *Zero, bool isDiv) {
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+ llvm::BasicBlock *contBB =
+ CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn);
+
+ const llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ llvm::Value *IntMin =
+ llvm::ConstantInt::get(VMContext,
+ llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
+ llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
+
+ llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero);
+ llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin);
+ llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne);
+ llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and");
+ Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"),
+ overflowBB, contBB);
+ } else {
+ CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero),
+ overflowBB, contBB);
+ }
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(contBB);
+}
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+ if (isTrapvOverflowBehavior()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+
+ if (Ops.Ty->isIntegerType())
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
+ else if (Ops.Ty->isRealFloatingType()) {
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
+ CGF.CurFn);
+ llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn);
+ CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
+ overflowBB, DivCont);
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(DivCont);
+ }
+ }
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
else if (Ops.Ty->hasUnsignedIntegerRepresentation())
@@ -1502,6 +1708,13 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
+ if (isTrapvOverflowBehavior()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+
+ if (Ops.Ty->isIntegerType())
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
+ }
+
if (Ops.Ty->isUnsignedIntegerType())
return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
else
@@ -1544,21 +1757,56 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
// Branch in case of overflow.
+ llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn);
Builder.CreateCondBr(overflow, overflowBB, continueBB);
// Handle overflow with llvm.trap.
- // TODO: it would be better to generate one of these blocks per function.
+ const std::string *handlerName =
+ &CGF.getContext().getLangOptions().OverflowHandler;
+ if (handlerName->empty()) {
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(continueBB);
+ return result;
+ }
+
+ // If an overflow handler is set, then we want to call it and then use its
+ // result, if it returns.
Builder.SetInsertPoint(overflowBB);
- llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
- Builder.CreateCall(Trap);
- Builder.CreateUnreachable();
-
- // Continue on.
+
+ // Get the overflow handler.
+ const llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ std::vector<const llvm::Type*> argTypes;
+ argTypes.push_back(CGF.Int64Ty); argTypes.push_back(CGF.Int64Ty);
+ argTypes.push_back(Int8Ty); argTypes.push_back(Int8Ty);
+ llvm::FunctionType *handlerTy =
+ llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
+ llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
+
+ // Sign extend the args to 64-bit, so that we can use the same handler for
+ // all types of overflow.
+ llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
+ llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
+
+ // Call the handler with the two arguments, the operation, and the size of
+ // the result.
+ llvm::Value *handlerResult = Builder.CreateCall4(handler, lhs, rhs,
+ Builder.getInt8(OpID),
+ Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()));
+
+ // Truncate the result back to the desired size.
+ handlerResult = Builder.CreateTrunc(handlerResult, opTy);
+ Builder.CreateBr(continueBB);
+
Builder.SetInsertPoint(continueBB);
- return result;
+ llvm::PHINode *phi = Builder.CreatePHI(opTy);
+ phi->reserveOperandSpace(2);
+ phi->addIncoming(result, initialBB);
+ phi->addIncoming(handlerResult, overflowBB);
+
+ return phi;
}
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
@@ -1609,7 +1857,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
}
unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.LLVMPointerWidth) {
+ if (Width < CGF.PointerWidthInBits) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = CGF.IntPtrTy;
@@ -1682,7 +1930,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
// pointer - int
Value *Idx = Ops.RHS;
unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.LLVMPointerWidth) {
+ if (Width < CGF.PointerWidthInBits) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = CGF.IntPtrTy;
@@ -1792,6 +2040,48 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
return Builder.CreateAShr(Ops.LHS, RHS, "shr");
}
+enum IntrinsicType { VCMPEQ, VCMPGT };
+// return corresponding comparison intrinsic for given vector type
+static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
+ BuiltinType::Kind ElemKind) {
+ switch (ElemKind) {
+ default: assert(0 && "unexpected element type");
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
+ break;
+ case BuiltinType::UShort:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
+ break;
+ case BuiltinType::Short:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
+ break;
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
+ break;
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
+ break;
+ case BuiltinType::Float:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
+ break;
+ }
+ return llvm::Intrinsic::not_intrinsic;
+}
+
Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
unsigned SICmpOpc, unsigned FCmpOpc) {
TestAndClearIgnoreResultAssign();
@@ -1808,6 +2098,71 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Value *LHS = Visit(E->getLHS());
Value *RHS = Visit(E->getRHS());
+ // If AltiVec, the comparison results in a numeric type, so we use
+ // intrinsics comparing vectors and giving 0 or 1 as a result
+ if (LHSTy->isVectorType() && CGF.getContext().getLangOptions().AltiVec) {
+ // constants for mapping CR6 register bits to predicate result
+ enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
+
+ llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
+
+ // in several cases vector arguments order will be reversed
+ Value *FirstVecArg = LHS,
+ *SecondVecArg = RHS;
+
+ QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
+ const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
+ BuiltinType::Kind ElementKind = BTy->getKind();
+
+ switch(E->getOpcode()) {
+ default: assert(0 && "is not a comparison operation");
+ case BO_EQ:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_NE:
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_LT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ break;
+ case BO_GT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ break;
+ case BO_LE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ }
+ break;
+ case BO_GE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ break;
+ }
+
+ Value *CR6Param = llvm::ConstantInt::get(CGF.Int32Ty, CR6);
+ llvm::Function *F = CGF.CGM.getIntrinsic(ID);
+ Result = Builder.CreateCall3(F, CR6Param, FirstVecArg, SecondVecArg, "");
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+ }
+
if (LHS->getType()->isFPOrFPVectorTy()) {
Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
LHS, RHS, "cmp");
@@ -1881,8 +2236,12 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
if (Ignore)
return 0;
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOptions().CPlusPlus)
+ return RHS;
+
// Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef() || LHS.isKVCRef())
+ if (LHS.isPropertyRef())
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -1913,6 +2272,8 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
// Branch on the LHS first. If it is false, go to the failure (cont) block.
CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
@@ -1926,10 +2287,10 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
- CGF.BeginConditionalBranch();
+ eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
- CGF.EndConditionalBranch();
+ eval.end(CGF);
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
@@ -1963,6 +2324,8 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
// Branch on the LHS first. If it is true, go to the success (cont) block.
CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
@@ -1976,13 +2339,13 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
- CGF.BeginConditionalBranch();
+ eval.begin(CGF);
// Emit the RHS condition as a bool value.
CGF.EmitBlock(RHSBlock);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
- CGF.EndConditionalBranch();
+ eval.end(CGF);
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
@@ -1997,7 +2360,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
- CGF.EmitStmt(E->getLHS());
+ CGF.EmitIgnoredExpr(E->getLHS());
CGF.EnsureInsertPoint();
return Visit(E->getRHS());
}
@@ -2034,95 +2397,107 @@ static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
Value *ScalarExprEmitter::
-VisitConditionalOperator(const ConditionalOperator *E) {
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
TestAndClearIgnoreResultAssign();
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ Expr *condExpr = E->getCond();
+ Expr *lhsExpr = E->getTrueExpr();
+ Expr *rhsExpr = E->getFalseExpr();
+
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm.
- if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){
- Expr *Live = E->getLHS(), *Dead = E->getRHS();
- if (Cond == -1)
- std::swap(Live, Dead);
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(condExpr)){
+ Expr *live = lhsExpr, *dead = rhsExpr;
+ if (Cond == -1) std::swap(live, dead);
// If the dead side doesn't have labels we need, and if the Live side isn't
// the gnu missing ?: extension (which we could handle, but don't bother
// to), just emit the Live part.
- if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part
- Live) // Live part isn't missing.
- return Visit(Live);
+ if (!CGF.ContainsLabel(dead))
+ return Visit(live);
}
+ // OpenCL: If the condition is a vector, we can treat this condition like
+ // the select function.
+ if (CGF.getContext().getLangOptions().OpenCL
+ && condExpr->getType()->isVectorType()) {
+ llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
+
+ const llvm::Type *condType = ConvertType(condExpr->getType());
+ const llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
+
+ unsigned numElem = vecTy->getNumElements();
+ const llvm::Type *elemType = vecTy->getElementType();
+
+ std::vector<llvm::Constant*> Zvals;
+ for (unsigned i = 0; i < numElem; ++i)
+ Zvals.push_back(llvm::ConstantInt::get(elemType,0));
+
+ llvm::Value *zeroVec = llvm::ConstantVector::get(Zvals);
+ llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
+ llvm::Value *tmp = Builder.CreateSExt(TestMSB,
+ llvm::VectorType::get(elemType,
+ numElem),
+ "sext");
+ llvm::Value *tmp2 = Builder.CreateNot(tmp);
+
+ // Cast float to int to perform ANDs if necessary.
+ llvm::Value *RHSTmp = RHS;
+ llvm::Value *LHSTmp = LHS;
+ bool wasCast = false;
+ const llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
+ if (rhsVTy->getElementType()->isFloatTy()) {
+ RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
+ LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
+ wasCast = true;
+ }
+
+ llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
+ llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
+ llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
+ if (wasCast)
+ tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
+ return tmp5;
+ }
+
// If this is a really simple expression (like x ? 4 : 5), emit this as a
// select instead of as control flow. We can only do this if it is cheap and
// safe to evaluate the LHS and RHS unconditionally.
- if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS(),
- CGF) &&
- isCheapEnoughToEvaluateUnconditionally(E->getRHS(), CGF)) {
- llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
- llvm::Value *LHS = Visit(E->getLHS());
- llvm::Value *RHS = Visit(E->getRHS());
+ if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
+ isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
+ llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
return Builder.CreateSelect(CondV, LHS, RHS, "cond");
}
- if (!E->getLHS() && CGF.getContext().getLangOptions().CPlusPlus) {
- // Does not support GNU missing condition extension in C++ yet (see #7726)
- CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
- return llvm::UndefValue::get(ConvertType(E->getType()));
- }
-
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
- Value *CondVal = 0;
-
- // If we don't have the GNU missing condition extension, emit a branch on bool
- // the normal way.
- if (E->getLHS()) {
- // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
- // the branch on bool.
- CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
- } else {
- // Otherwise, for the ?: extension, evaluate the conditional and then
- // convert it to bool the hard way. We do this explicitly because we need
- // the unconverted value for the missing middle value of the ?:.
- CondVal = CGF.EmitScalarExpr(E->getCond());
-
- // In some cases, EmitScalarConversion will delete the "CondVal" expression
- // if there are no extra uses (an optimization). Inhibit this by making an
- // extra dead use, because we're going to add a use of CondVal later. We
- // don't use the builder for this, because we don't want it to get optimized
- // away. This leaves dead code, but the ?: extension isn't common.
- new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
- Builder.GetInsertBlock());
-
- Value *CondBoolVal =
- CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
- CGF.getContext().BoolTy);
- Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
- }
-
- CGF.BeginConditionalBranch();
- CGF.EmitBlock(LHSBlock);
- // Handle the GNU extension for missing LHS.
- Value *LHS;
- if (E->getLHS())
- LHS = Visit(E->getLHS());
- else // Perform promotions, to handle cases like "short ?: int"
- LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+ eval.begin(CGF);
+ Value *LHS = Visit(lhsExpr);
+ eval.end(CGF);
- CGF.EndConditionalBranch();
LHSBlock = Builder.GetInsertBlock();
- CGF.EmitBranch(ContBlock);
+ Builder.CreateBr(ContBlock);
- CGF.BeginConditionalBranch();
CGF.EmitBlock(RHSBlock);
+ eval.begin(CGF);
+ Value *RHS = Visit(rhsExpr);
+ eval.end(CGF);
- Value *RHS = Visit(E->getRHS());
- CGF.EndConditionalBranch();
RHSBlock = Builder.GetInsertBlock();
- CGF.EmitBranch(ContBlock);
-
CGF.EmitBlock(ContBlock);
// If the LHS or RHS is a throw expression, it will be legitimately null.
@@ -2155,8 +2530,8 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
return Builder.CreateLoad(ArgPtr);
}
-Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
- return CGF.BuildBlockLiteralTmp(BE);
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
+ return CGF.EmitBlockLiteral(block);
}
//===----------------------------------------------------------------------===//
@@ -2209,7 +2584,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
const llvm::Type *ClassPtrTy = ConvertType(E->getType());
Expr *BaseExpr = E->getBase();
- if (BaseExpr->isLvalue(getContext()) != Expr::LV_Valid) {
+ if (BaseExpr->isRValue()) {
V = CreateTempAlloca(ClassPtrTy, "resval");
llvm::Value *Src = EmitScalarExpr(BaseExpr);
Builder.CreateStore(Src, V);
@@ -2229,7 +2604,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
}
-LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
+LValue CodeGenFunction::EmitCompoundAssignmentLValue(
const CompoundAssignOperator *E) {
ScalarExprEmitter Scalar(*this);
Value *Result = 0;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
index 6a6d63d..08c458b 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
@@ -137,9 +138,49 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
E = OMD->param_end(); PI != E; ++PI)
Args.push_back(std::make_pair(*PI, (*PI)->getType()));
+ CurGD = OMD;
+
StartFunction(OMD, OMD->getResultType(), Fn, Args, OMD->getLocStart());
}
+void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
+ bool IsAtomic, bool IsStrong) {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetGetStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ // objc_copyStruct (ReturnValue, &structIvar,
+ // sizeof (Type of Ivar), isAtomic, false);
+ CallArgList Args;
+ RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue,
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ CharUnits Size = getContext().getTypeSizeInChars(Ivar->getType());
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
+ Size.getQuantity());
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *isAtomic =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsAtomic ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(isAtomic),
+ getContext().BoolTy));
+ llvm::Value *hasStrong =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsStrong ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(hasStrong),
+ getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
+}
+
/// Generate an Objective-C method. An Objective-C method is a C function with
/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
@@ -211,51 +252,30 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
} else {
- if (Ivar->getType()->isAnyComplexType()) {
+ const llvm::Triple &Triple = getContext().Target.getTriple();
+ QualType IVART = Ivar->getType();
+ if (IsAtomic &&
+ IVART->isScalarType() &&
+ (Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb) &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(4)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCGetterBody(Ivar, true, false);
+ }
+ else if (IVART->isAnyComplexType()) {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
Ivar, 0);
ComplexPairTy Pair = LoadComplexFromAddr(LV.getAddress(),
LV.isVolatileQualified());
StoreComplexToAddr(Pair, ReturnValue, LV.isVolatileQualified());
}
- else if (hasAggregateLLVMType(Ivar->getType())) {
+ else if (hasAggregateLLVMType(IVART)) {
bool IsStrong = false;
- if ((IsAtomic || (IsStrong = IvarTypeWithAggrGCObjects(Ivar->getType())))
+ if ((IsAtomic || (IsStrong = IvarTypeWithAggrGCObjects(IVART)))
&& CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect
- && CGM.getObjCRuntime().GetCopyStructFunction()) {
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
- Ivar, 0);
- llvm::Value *GetCopyStructFn =
- CGM.getObjCRuntime().GetCopyStructFunction();
- CodeGenTypes &Types = CGM.getTypes();
- // objc_copyStruct (ReturnValue, &structIvar,
- // sizeof (Type of Ivar), isAtomic, false);
- CallArgList Args;
- RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue,
- Types.ConvertType(getContext().VoidPtrTy)));
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
- RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
- Types.ConvertType(getContext().VoidPtrTy)));
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
- // sizeof (Type of Ivar)
- uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
- llvm::Value *SizeVal =
- llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
- Args.push_back(std::make_pair(RValue::get(SizeVal),
- getContext().LongTy));
- llvm::Value *isAtomic =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
- IsAtomic ? 1 : 0);
- Args.push_back(std::make_pair(RValue::get(isAtomic),
- getContext().BoolTy));
- llvm::Value *hasStrong =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
- IsStrong ? 1 : 0);
- Args.push_back(std::make_pair(RValue::get(hasStrong),
- getContext().BoolTy));
- EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
- FunctionType::ExtInfo()),
- GetCopyStructFn, ReturnValueSlot(), Args);
+ && CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCGetterBody(Ivar, IsAtomic, IsStrong);
}
else {
if (PID->getGetterCXXConstructor()) {
@@ -268,23 +288,61 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
else {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
Ivar, 0);
- EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), IVART);
}
}
- } else {
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ }
+ else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
Ivar, 0);
- CodeGenTypes &Types = CGM.getTypes();
- RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
- RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- Types.ConvertType(PD->getType())));
- EmitReturnOfRValue(RV, PD->getType());
+ CodeGenTypes &Types = CGM.getTypes();
+ RValue RV = EmitLoadOfLValue(LV, IVART);
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
}
}
FinishFunction();
}
+void CodeGenFunction::GenerateObjCAtomicSetterBody(ObjCMethodDecl *OMD,
+ ObjCIvarDecl *Ivar) {
+ // objc_copyStruct (&structIvar, &Arg,
+ // sizeof (struct something), true, false);
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetSetStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ CallArgList Args;
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
+ RValue RV =
+ RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+ llvm::Value *ArgAsPtrTy =
+ Builder.CreateBitCast(Arg,
+ Types.ConvertType(getContext().VoidPtrTy));
+ RV = RValue::get(ArgAsPtrTy);
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ CharUnits Size = getContext().getTypeSizeInChars(Ivar->getType());
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
+ Size.getQuantity());
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ llvm::Value *False =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+ Args.push_back(std::make_pair(RValue::get(False), getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
+}
+
/// GenerateObjCSetter - Generate an Objective-C property setter
/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
/// is illegal within a category.
@@ -353,66 +411,49 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
} else if (IsAtomic && hasAggregateLLVMType(Ivar->getType()) &&
!Ivar->getType()->isAnyComplexType() &&
IndirectObjCSetterArg(*CurFnInfo)
- && CGM.getObjCRuntime().GetCopyStructFunction()) {
+ && CGM.getObjCRuntime().GetSetStructFunction()) {
// objc_copyStruct (&structIvar, &Arg,
// sizeof (struct something), true, false);
- llvm::Value *GetCopyStructFn =
- CGM.getObjCRuntime().GetCopyStructFunction();
- CodeGenTypes &Types = CGM.getTypes();
- CallArgList Args;
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
- RValue RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
- Types.ConvertType(getContext().VoidPtrTy)));
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
- llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
- llvm::Value *ArgAsPtrTy =
- Builder.CreateBitCast(Arg,
- Types.ConvertType(getContext().VoidPtrTy));
- RV = RValue::get(ArgAsPtrTy);
- Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
- // sizeof (Type of Ivar)
- uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
- llvm::Value *SizeVal =
- llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
- Args.push_back(std::make_pair(RValue::get(SizeVal),
- getContext().LongTy));
- llvm::Value *True =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
- Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
- llvm::Value *False =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
- Args.push_back(std::make_pair(RValue::get(False), getContext().BoolTy));
- EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
- FunctionType::ExtInfo()),
- GetCopyStructFn, ReturnValueSlot(), Args);
+ GenerateObjCAtomicSetterBody(OMD, Ivar);
} else if (PID->getSetterCXXAssignment()) {
- EmitAnyExpr(PID->getSetterCXXAssignment(), (llvm::Value *)0, false, true,
- false);
-
+ EmitIgnoredExpr(PID->getSetterCXXAssignment());
} else {
- // FIXME: Find a clean way to avoid AST node creation.
- SourceLocation Loc = PD->getLocation();
- ValueDecl *Self = OMD->getSelfDecl();
- ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
- DeclRefExpr Base(Self, Self->getType(), Loc);
- ParmVarDecl *ArgDecl = *OMD->param_begin();
- DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), Loc);
- ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
+ const llvm::Triple &Triple = getContext().Target.getTriple();
+ QualType IVART = Ivar->getType();
+ if (IsAtomic &&
+ IVART->isScalarType() &&
+ (Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb) &&
+ (getContext().getTypeSizeInChars(IVART)
+ > CharUnits::fromQuantity(4)) &&
+ CGM.getObjCRuntime().GetGetStructFunction()) {
+ GenerateObjCAtomicSetterBody(OMD, Ivar);
+ }
+ else {
+ // FIXME: Find a clean way to avoid AST node creation.
+ SourceLocation Loc = PD->getLocation();
+ ValueDecl *Self = OMD->getSelfDecl();
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ DeclRefExpr Base(Self, Self->getType(), VK_RValue, Loc);
+ ParmVarDecl *ArgDecl = *OMD->param_begin();
+ DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), VK_LValue, Loc);
+ ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
- // The property type can differ from the ivar type in some situations with
- // Objective-C pointer types, we can always bit cast the RHS in these cases.
- if (getContext().getCanonicalType(Ivar->getType()) !=
- getContext().getCanonicalType(ArgDecl->getType())) {
- ImplicitCastExpr ArgCasted(ImplicitCastExpr::OnStack,
- Ivar->getType(), CK_BitCast, &Arg,
- VK_RValue);
- BinaryOperator Assign(&IvarRef, &ArgCasted, BO_Assign,
- Ivar->getType(), Loc);
- EmitStmt(&Assign);
- } else {
- BinaryOperator Assign(&IvarRef, &Arg, BO_Assign,
- Ivar->getType(), Loc);
- EmitStmt(&Assign);
+ // The property type can differ from the ivar type in some situations with
+ // Objective-C pointer types, we can always bit cast the RHS in these cases.
+ if (getContext().getCanonicalType(Ivar->getType()) !=
+ getContext().getCanonicalType(ArgDecl->getType())) {
+ ImplicitCastExpr ArgCasted(ImplicitCastExpr::OnStack,
+ Ivar->getType(), CK_BitCast, &Arg,
+ VK_RValue);
+ BinaryOperator Assign(&IvarRef, &ArgCasted, BO_Assign,
+ Ivar->getType(), VK_RValue, OK_Ordinary, Loc);
+ EmitStmt(&Assign);
+ } else {
+ BinaryOperator Assign(&IvarRef, &Arg, BO_Assign,
+ Ivar->getType(), VK_RValue, OK_Ordinary, Loc);
+ EmitStmt(&Assign);
+ }
}
}
@@ -422,24 +463,22 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD,
bool ctor) {
- llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> IvarInitializers;
+ llvm::SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
StartObjCMethod(MD, IMP->getClassInterface());
for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
E = IMP->init_end(); B != E; ++B) {
- CXXBaseOrMemberInitializer *Member = (*B);
+ CXXCtorInitializer *Member = (*B);
IvarInitializers.push_back(Member);
}
if (ctor) {
for (unsigned I = 0, E = IvarInitializers.size(); I != E; ++I) {
- CXXBaseOrMemberInitializer *IvarInit = IvarInitializers[I];
- FieldDecl *Field = IvarInit->getMember();
- QualType FieldType = Field->getType();
+ CXXCtorInitializer *IvarInit = IvarInitializers[I];
+ FieldDecl *Field = IvarInit->getAnyMember();
ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
LoadObjCSelf(), Ivar, 0);
- EmitAggExpr(IvarInit->getInit(), LV.getAddress(),
- LV.isVolatileQualified(), false, true);
+ EmitAggExpr(IvarInit->getInit(), AggValueSlot::forLValue(LV, true));
}
// constructor returns 'self'.
CodeGenTypes &Types = CGM.getTypes();
@@ -450,7 +489,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
} else {
// dtor
for (size_t i = IvarInitializers.size(); i > 0; --i) {
- FieldDecl *Field = IvarInitializers[i - 1]->getMember();
+ FieldDecl *Field = IvarInitializers[i - 1]->getAnyMember();
QualType FieldType = Field->getType();
const ConstantArrayType *Array =
getContext().getAsConstantArrayType(FieldType);
@@ -511,138 +550,128 @@ QualType CodeGenFunction::TypeOfSelfObject() {
return PTy->getPointeeType();
}
-RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
- const Selector &S,
- ReturnValueSlot Return) {
- llvm::Value *Receiver = LoadObjCSelf();
- const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+LValue
+CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or
+ // store through it.
+
+ // For certain base kinds, we need to emit the base immediately.
+ llvm::Value *Base;
+ if (E->isSuperReceiver())
+ Base = LoadObjCSelf();
+ else if (E->isClassReceiver())
+ Base = CGM.getObjCRuntime().GetClass(Builder, E->getClassReceiver());
+ else
+ Base = EmitScalarExpr(E->getBase());
+ return LValue::MakePropertyRef(E, Base);
+}
+
+static RValue GenerateMessageSendSuper(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector S,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs) {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CGF.CurFuncDecl);
bool isClassMessage = OMD->isClassMethod();
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
- Return,
- Exp->getType(),
- S,
- OMD->getClassInterface(),
- isCategoryImpl,
- Receiver,
- isClassMessage,
- CallArgList());
-
+ return CGF.CGM.getObjCRuntime()
+ .GenerateMessageSendSuper(CGF, Return, ResultType,
+ S, OMD->getClassInterface(),
+ isCategoryImpl, Receiver,
+ isClassMessage, CallArgs);
}
-RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp,
- ReturnValueSlot Return) {
- Exp = Exp->IgnoreParens();
- // FIXME: Split it into two separate routines.
- if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
- Selector S = E->getProperty()->getGetterName();
- if (isa<ObjCSuperExpr>(E->getBase()))
- return EmitObjCSuperPropertyGet(E, S, Return);
- return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, Exp->getType(), S,
- EmitScalarExpr(E->getBase()),
- CallArgList());
+RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
+ ReturnValueSlot Return) {
+ const ObjCPropertyRefExpr *E = LV.getPropertyRefExpr();
+ QualType ResultType;
+ Selector S;
+ if (E->isExplicitProperty()) {
+ const ObjCPropertyDecl *Property = E->getExplicitProperty();
+ S = Property->getGetterName();
+ ResultType = E->getType();
} else {
- const ObjCImplicitSetterGetterRefExpr *KE =
- cast<ObjCImplicitSetterGetterRefExpr>(Exp);
- Selector S = KE->getGetterMethod()->getSelector();
- llvm::Value *Receiver;
- if (KE->getInterfaceDecl()) {
- const ObjCInterfaceDecl *OID = KE->getInterfaceDecl();
- Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
- } else if (isa<ObjCSuperExpr>(KE->getBase()))
- return EmitObjCSuperPropertyGet(KE, S, Return);
- else
- Receiver = EmitScalarExpr(KE->getBase());
- return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, Exp->getType(), S,
- Receiver,
- CallArgList(), KE->getInterfaceDecl());
+ const ObjCMethodDecl *Getter = E->getImplicitPropertyGetter();
+ S = Getter->getSelector();
+ ResultType = Getter->getResultType(); // with reference!
}
+
+ llvm::Value *Receiver = LV.getPropertyRefBaseAddr();
+
+ // Accesses to 'super' follow a different code path.
+ if (E->isSuperReceiver())
+ return GenerateMessageSendSuper(*this, Return, ResultType,
+ S, Receiver, CallArgList());
+
+ const ObjCInterfaceDecl *ReceiverClass
+ = (E->isClassReceiver() ? E->getClassReceiver() : 0);
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, ResultType, S,
+ Receiver, CallArgList(), ReceiverClass);
}
-void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
- const Selector &S,
- RValue Src) {
+void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
+ LValue Dst) {
+ const ObjCPropertyRefExpr *E = Dst.getPropertyRefExpr();
+ Selector S = E->getSetterSelector();
+ QualType ArgType;
+ if (E->isImplicitProperty()) {
+ const ObjCMethodDecl *Setter = E->getImplicitPropertySetter();
+ ObjCMethodDecl::param_iterator P = Setter->param_begin();
+ ArgType = (*P)->getType();
+ } else {
+ ArgType = E->getType();
+ }
+ // FIXME. Other than scalars, AST is not adequate for setter and
+ // getter type mismatches which require conversion.
+ if (Src.isScalar()) {
+ llvm::Value *SrcVal = Src.getScalarVal();
+ QualType DstType = getContext().getCanonicalType(ArgType);
+ const llvm::Type *DstTy = ConvertType(DstType);
+ if (SrcVal->getType() != DstTy)
+ Src =
+ RValue::get(EmitScalarConversion(SrcVal, E->getType(), DstType));
+ }
+
CallArgList Args;
- llvm::Value *Receiver = LoadObjCSelf();
- const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
- bool isClassMessage = OMD->isClassMethod();
- bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- Args.push_back(std::make_pair(Src, Exp->getType()));
- CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
- ReturnValueSlot(),
- getContext().VoidTy,
- S,
- OMD->getClassInterface(),
- isCategoryImpl,
- Receiver,
- isClassMessage,
- Args);
- return;
-}
+ Args.push_back(std::make_pair(Src, ArgType));
-void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
- RValue Src) {
- // FIXME: Split it into two separate routines.
- if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
- Selector S = E->getProperty()->getSetterName();
- if (isa<ObjCSuperExpr>(E->getBase())) {
- EmitObjCSuperPropertySet(E, S, Src);
- return;
- }
- CallArgList Args;
- Args.push_back(std::make_pair(Src, E->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
- getContext().VoidTy, S,
- EmitScalarExpr(E->getBase()),
- Args);
- } else if (const ObjCImplicitSetterGetterRefExpr *E =
- dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
- const ObjCMethodDecl *SetterMD = E->getSetterMethod();
- Selector S = SetterMD->getSelector();
- CallArgList Args;
- llvm::Value *Receiver;
- if (E->getInterfaceDecl()) {
- const ObjCInterfaceDecl *OID = E->getInterfaceDecl();
- Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
- } else if (isa<ObjCSuperExpr>(E->getBase())) {
- EmitObjCSuperPropertySet(E, S, Src);
- return;
- } else
- Receiver = EmitScalarExpr(E->getBase());
- ObjCMethodDecl::param_iterator P = SetterMD->param_begin();
- Args.push_back(std::make_pair(Src, (*P)->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
- getContext().VoidTy, S,
- Receiver,
- Args, E->getInterfaceDecl());
- } else
- assert (0 && "bad expression node in EmitObjCPropertySet");
+ llvm::Value *Receiver = Dst.getPropertyRefBaseAddr();
+ QualType ResultType = getContext().VoidTy;
+
+ if (E->isSuperReceiver()) {
+ GenerateMessageSendSuper(*this, ReturnValueSlot(),
+ ResultType, S, Receiver, Args);
+ return;
+ }
+
+ const ObjCInterfaceDecl *ReceiverClass
+ = (E->isClassReceiver() ? E->getClassReceiver() : 0);
+
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ ResultType, S, Receiver, Args,
+ ReceiverClass);
}
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Constant *EnumerationMutationFn =
CGM.getObjCRuntime().EnumerationMutationFunction();
- llvm::Value *DeclAddress;
- QualType ElementTy;
if (!EnumerationMutationFn) {
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
return;
}
- if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
- EmitStmt(SD);
- assert(HaveInsertPoint() && "DeclStmt destroyed insert point!");
- const Decl* D = SD->getSingleDecl();
- ElementTy = cast<ValueDecl>(D)->getType();
- DeclAddress = LocalDeclMap[D];
- } else {
- ElementTy = cast<Expr>(S.getElement())->getType();
- DeclAddress = 0;
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getBegin());
+ DI->EmitRegionStart(Builder);
}
+ JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
+ JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
+
// Fast enumeration state.
QualType StateTy = getContext().getObjCFastEnumerationStateType();
llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
@@ -651,7 +680,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Number of elements in the items array.
static const unsigned NumItems = 16;
- // Get selector
+ // Fetch the countByEnumeratingWithState:objects:count: selector.
IdentifierInfo *II[] = {
&CGM.getContext().Idents.get("countByEnumeratingWithState"),
&CGM.getContext().Idents.get("objects"),
@@ -666,79 +695,92 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
ArrayType::Normal, 0);
llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+ // Emit the collection pointer.
llvm::Value *Collection = EmitScalarExpr(S.getCollection());
+ // Send it our message:
CallArgList Args;
+
+ // The first argument is a temporary of the enumeration-state type.
Args.push_back(std::make_pair(RValue::get(StatePtr),
getContext().getPointerType(StateTy)));
+ // The second argument is a temporary array with space for NumItems
+ // pointers. We'll actually be loading elements from the array
+ // pointer written into the control state; this buffer is so that
+ // collections that *aren't* backed by arrays can still queue up
+ // batches of elements.
Args.push_back(std::make_pair(RValue::get(ItemsPtr),
getContext().getPointerType(ItemsTy)));
+ // The third argument is the capacity of that temporary array.
const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
Args.push_back(std::make_pair(RValue::get(Count),
getContext().UnsignedLongTy));
+ // Start the enumeration.
RValue CountRV =
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().UnsignedLongTy,
FastEnumSel,
Collection, Args);
- llvm::Value *LimitPtr = CreateMemTemp(getContext().UnsignedLongTy,
- "limit.ptr");
- Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+ // The initial number of objects that were returned in the buffer.
+ llvm::Value *initialBufferLimit = CountRV.getScalarVal();
- llvm::BasicBlock *NoElements = createBasicBlock("noelements");
- llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
+ llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
+ llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
- llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
- llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+ llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
- llvm::Value *IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
- Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
+ // If the limit pointer was zero to begin with, the collection is
+ // empty; skip all this.
+ Builder.CreateCondBr(Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"),
+ EmptyBB, LoopInitBB);
- EmitBlock(SetStartMutations);
-
- llvm::Value *StartMutationsPtr = CreateMemTemp(getContext().UnsignedLongTy);
+ // Otherwise, initialize the loop.
+ EmitBlock(LoopInitBB);
+ // Save the initial mutations value. This is the value at an
+ // address that was written into the state object by
+ // countByEnumeratingWithState:objects:count:.
llvm::Value *StateMutationsPtrPtr =
Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
"mutationsptr");
- llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
- "mutations");
-
- Builder.CreateStore(StateMutations, StartMutationsPtr);
+ llvm::Value *initialMutations =
+ Builder.CreateLoad(StateMutationsPtr, "forcoll.initial-mutations");
- llvm::BasicBlock *LoopStart = createBasicBlock("loopstart");
- EmitBlock(LoopStart);
+ // Start looping. This is the point we return to whenever we have a
+ // fresh, non-empty batch of objects.
+ llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
+ EmitBlock(LoopBodyBB);
- llvm::Value *CounterPtr = CreateMemTemp(getContext().UnsignedLongTy,
- "counter.ptr");
- Builder.CreateStore(Zero, CounterPtr);
+ // The current index into the buffer.
+ llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, "forcoll.index");
+ index->addIncoming(zero, LoopInitBB);
- llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
- EmitBlock(LoopBody);
+ // The current buffer size.
+ llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, "forcoll.count");
+ count->addIncoming(initialBufferLimit, LoopInitBB);
+ // Check whether the mutations value has changed from where it was
+ // at start. StateMutationsPtr should actually be invariant between
+ // refreshes.
StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
- StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations");
-
- llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
- "mutations");
- llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
- StartMutations,
- "tobool");
+ llvm::Value *currentMutations
+ = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+ llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
+ llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcool.notmutated");
- llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated");
- llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated");
+ Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
+ WasNotMutatedBB, WasMutatedBB);
- Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated);
-
- EmitBlock(WasMutated);
+ // If so, call the enumeration-mutation function.
+ EmitBlock(WasMutatedBB);
llvm::Value *V =
Builder.CreateBitCast(Collection,
ConvertType(getContext().getObjCIdType()),
@@ -752,81 +794,114 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
FunctionType::ExtInfo()),
EnumerationMutationFn, ReturnValueSlot(), Args2);
- EmitBlock(WasNotMutated);
+ // Otherwise, or if the mutation function returns, just continue.
+ EmitBlock(WasNotMutatedBB);
- llvm::Value *StateItemsPtr =
- Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+ // Initialize the element variable.
+ RunCleanupsScope elementVariableScope(*this);
+ bool elementIsDecl;
+ LValue elementLValue;
+ QualType elementType;
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+ EmitStmt(SD);
+ const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
- llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter");
+ DeclRefExpr tempDRE(const_cast<VarDecl*>(D), D->getType(),
+ VK_LValue, SourceLocation());
+ elementLValue = EmitLValue(&tempDRE);
+ elementType = D->getType();
+ elementIsDecl = true;
+ } else {
+ elementLValue = LValue(); // suppress warning
+ elementType = cast<Expr>(S.getElement())->getType();
+ elementIsDecl = false;
+ }
+ const llvm::Type *convertedElementType = ConvertType(elementType);
- llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr,
- "stateitems");
+ // Fetch the buffer out of the enumeration state.
+ // TODO: this pointer should actually be invariant between
+ // refreshes, which would help us do certain loop optimizations.
+ llvm::Value *StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+ llvm::Value *EnumStateItems =
+ Builder.CreateLoad(StateItemsPtr, "stateitems");
+ // Fetch the value at the current index from the buffer.
llvm::Value *CurrentItemPtr =
- Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr");
-
- llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem");
-
- // Cast the item to the right type.
- CurrentItem = Builder.CreateBitCast(CurrentItem,
- ConvertType(ElementTy), "tmp");
+ Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
+ llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr);
- if (!DeclAddress) {
- LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+ // Cast that value to the right type.
+ CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
+ "currentitem");
- // Set the value to null.
- Builder.CreateStore(CurrentItem, LV.getAddress());
- } else
- Builder.CreateStore(CurrentItem, DeclAddress);
+ // Make sure we have an l-value. Yes, this gets evaluated every
+ // time through the loop.
+ if (!elementIsDecl)
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
- // Increment the counter.
- Counter = Builder.CreateAdd(Counter,
- llvm::ConstantInt::get(UnsignedLongLTy, 1));
- Builder.CreateStore(Counter, CounterPtr);
-
- JumpDest LoopEnd = getJumpDestInCurrentScope("loopend");
- JumpDest AfterBody = getJumpDestInCurrentScope("afterbody");
+ EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, elementType);
+ // Perform the loop body, setting up break and continue labels.
BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
-
- EmitStmt(S.getBody());
-
+ {
+ RunCleanupsScope Scope(*this);
+ EmitStmt(S.getBody());
+ }
BreakContinueStack.pop_back();
+ // Destroy the element variable now.
+ elementVariableScope.ForceCleanup();
+
+ // Check whether there are more elements.
EmitBlock(AfterBody.getBlock());
- llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
+ llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
+
+ // First we check in the local buffer.
+ llvm::Value *indexPlusOne
+ = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
+
+ // If we haven't overrun the buffer yet, we can continue.
+ Builder.CreateCondBr(Builder.CreateICmpULT(indexPlusOne, count),
+ LoopBodyBB, FetchMoreBB);
- Counter = Builder.CreateLoad(CounterPtr);
- Limit = Builder.CreateLoad(LimitPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, Limit, "isless");
- Builder.CreateCondBr(IsLess, LoopBody, FetchMore);
+ index->addIncoming(indexPlusOne, AfterBody.getBlock());
+ count->addIncoming(count, AfterBody.getBlock());
- // Fetch more elements.
- EmitBlock(FetchMore);
+ // Otherwise, we have to fetch more elements.
+ EmitBlock(FetchMoreBB);
CountRV =
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().UnsignedLongTy,
FastEnumSel,
Collection, Args);
- Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
- Limit = Builder.CreateLoad(LimitPtr);
- IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
- Builder.CreateCondBr(IsZero, NoElements, LoopStart);
+ // If we got a zero count, we're done.
+ llvm::Value *refetchCount = CountRV.getScalarVal();
+
+ // (note that the message send might split FetchMoreBB)
+ index->addIncoming(zero, Builder.GetInsertBlock());
+ count->addIncoming(refetchCount, Builder.GetInsertBlock());
+
+ Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
+ EmptyBB, LoopBodyBB);
// No more elements.
- EmitBlock(NoElements);
+ EmitBlock(EmptyBB);
- if (!DeclAddress) {
+ if (!elementIsDecl) {
// If the element was not a declaration, set it to be null.
- LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+ llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
+ EmitStoreThroughLValue(RValue::get(null), elementLValue, elementType);
+ }
- // Set the value to null.
- Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)),
- LV.getAddress());
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getEnd());
+ DI->EmitRegionEnd(Builder);
}
EmitBlock(LoopEnd.getBlock());
@@ -846,5 +921,3 @@ void CodeGenFunction::EmitObjCAtSynchronizedStmt(
}
CGObjCRuntime::~CGObjCRuntime() {}
-
-
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
index d7960be..d481e77 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -17,7 +17,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
-#include "CGException.h"
+#include "CGCleanup.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -62,7 +62,10 @@ private:
const llvm::IntegerType *IntTy;
const llvm::PointerType *PtrTy;
const llvm::IntegerType *LongTy;
+ const llvm::IntegerType *SizeTy;
+ const llvm::IntegerType *PtrDiffTy;
const llvm::PointerType *PtrToIntTy;
+ const llvm::Type *BoolTy;
llvm::GlobalAlias *ClassPtrAlias;
llvm::GlobalAlias *MetaClassPtrAlias;
std::vector<llvm::Constant*> Classes;
@@ -179,7 +182,8 @@ public:
virtual llvm::Function *ModuleInitFunction();
virtual llvm::Function *GetPropertyGetFunction();
virtual llvm::Function *GetPropertySetFunction();
- virtual llvm::Function *GetCopyStructFunction();
+ virtual llvm::Function *GetSetStructFunction();
+ virtual llvm::Function *GetGetStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
@@ -212,8 +216,8 @@ public:
virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
- virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
- const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &) {
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
return NULLPtr;
}
};
@@ -273,6 +277,11 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
CGM.getTypes().ConvertType(CGM.getContext().IntTy));
LongTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().LongTy));
+ SizeTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().getSizeType()));
+ PtrDiffTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()));
+ BoolTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
Int8Ty = llvm::Type::getInt8Ty(VMContext);
// C string type. Used in lots of places.
@@ -318,8 +327,7 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
// id objc_assign_ivar(id, id, ptrdiff_t);
std::vector<const llvm::Type*> Args(1, IdTy);
Args.push_back(PtrToIdTy);
- // FIXME: ptrdiff_t
- Args.push_back(LongTy);
+ Args.push_back(PtrDiffTy);
llvm::FunctionType *FTy = llvm::FunctionType::get(IdTy, Args, false);
IvarAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
// id objc_assign_strongCast (id, id*)
@@ -342,8 +350,7 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
Args.clear();
Args.push_back(PtrToInt8Ty);
Args.push_back(PtrToInt8Ty);
- // FIXME: size_t
- Args.push_back(LongTy);
+ Args.push_back(SizeTy);
FTy = llvm::FunctionType::get(IdTy, Args, false);
MemMoveFn = CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
}
@@ -435,7 +442,7 @@ llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
llvm::GlobalValue::LinkageTypes linkage) {
llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
return new llvm::GlobalVariable(TheModule, Ty, false,
- llvm::GlobalValue::InternalLinkage, C, Name);
+ linkage, C, Name);
}
llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
@@ -443,7 +450,7 @@ llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
llvm::GlobalValue::LinkageTypes linkage) {
llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
return new llvm::GlobalVariable(TheModule, Ty, false,
- llvm::GlobalValue::InternalLinkage, C, Name);
+ linkage, C, Name);
}
/// Generate an NSConstantString object.
@@ -995,7 +1002,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(
Protocols.size());
llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
- LongTy,//FIXME: Should be size_t
+ SizeTy,
ProtocolArrayTy,
NULL);
std::vector<llvm::Constant*> Elements;
@@ -1250,7 +1257,7 @@ void CGObjCGNU::GenerateProtocolHolderCategory(void) {
ExistingProtocols.size());
llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
- LongTy,//FIXME: Should be size_t
+ SizeTy,
ProtocolArrayTy,
NULL);
std::vector<llvm::Constant*> ProtocolElements;
@@ -1430,7 +1437,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
}
// Get the size of instances.
- int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8;
+ int instanceSize =
+ Context.getASTObjCImplementationLayout(OID).getSize().getQuantity();
// Collect information about instance variables.
llvm::SmallVector<llvm::Constant*, 16> IvarNames;
@@ -1440,7 +1448,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
std::vector<llvm::Constant*> IvarOffsetValues;
int superInstanceSize = !SuperClassDecl ? 0 :
- Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8;
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
@@ -1469,7 +1477,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset));
IvarOffsetValues.push_back(new llvm::GlobalVariable(TheModule, IntTy,
false, llvm::GlobalValue::ExternalLinkage,
- llvm::ConstantInt::get(IntTy, BaseOffset),
+ llvm::ConstantInt::get(IntTy, Offset),
"__objc_ivar_offset_value_" + ClassName +"." +
IVD->getNameAsString()));
}
@@ -1538,7 +1546,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// allows code compiled for the non-Fragile ABI to inherit from code compiled
// for the legacy ABI, without causing problems. The converse is also
// possible, but causes all ivar accesses to be fragile.
- int i = 0;
+
// Offset pointer for getting at the correct field in the ivar list when
// setting up the alias. These are: The base address for the global, the
// ivar array (second field), the ivar in this list (set for each ivar), and
@@ -1548,15 +1556,16 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::ConstantInt::get(IndexTy, 1), 0,
llvm::ConstantInt::get(IndexTy, 2) };
- for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
- endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
- +(*iter)->getNameAsString();
- offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, i++);
+ + IVD->getNameAsString();
+ offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, i);
// Get the correct ivar field
llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
IvarList, offsetPointerIndexes, 4);
- // Get the existing alias, if one exists.
+ // Get the existing variable, if one exists.
llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
if (offset) {
offset->setInitializer(offsetValue);
@@ -1585,13 +1594,15 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Resolve the class aliases, if they exist.
if (ClassPtrAlias) {
- ClassPtrAlias->setAliasee(
+ ClassPtrAlias->replaceAllUsesWith(
llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias->eraseFromParent();
ClassPtrAlias = 0;
}
if (MetaClassPtrAlias) {
- MetaClassPtrAlias->setAliasee(
+ MetaClassPtrAlias->replaceAllUsesWith(
llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias->eraseFromParent();
MetaClassPtrAlias = 0;
}
@@ -1818,8 +1829,6 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
std::vector<const llvm::Type*> Params;
- const llvm::Type *BoolTy =
- CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
Params.push_back(IdTy);
Params.push_back(SelectorTy);
Params.push_back(IntTy);
@@ -1833,8 +1842,6 @@ llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
llvm::Function *CGObjCGNU::GetPropertySetFunction() {
std::vector<const llvm::Type*> Params;
- const llvm::Type *BoolTy =
- CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
Params.push_back(IdTy);
Params.push_back(SelectorTy);
Params.push_back(IntTy);
@@ -1848,9 +1855,31 @@ llvm::Function *CGObjCGNU::GetPropertySetFunction() {
"objc_setProperty"));
}
-// FIXME. Implement this.
-llvm::Function *CGObjCGNU::GetCopyStructFunction() {
- return 0;
+llvm::Function *CGObjCGNU::GetGetStructFunction() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ Params.push_back(PtrDiffTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // objc_setPropertyStruct (void*, void*, ptrdiff_t, BOOL, BOOL)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_getPropertyStruct"));
+}
+llvm::Function *CGObjCGNU::GetSetStructFunction() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ Params.push_back(PtrTy);
+ Params.push_back(PtrDiffTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // objc_setPropertyStruct (void*, void*, ptrdiff_t, BOOL, BOOL)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_setPropertyStruct"));
}
llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
@@ -2008,7 +2037,7 @@ void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
Exn = CGF.Builder.CreateBitCast(Exn, CatchType);
- CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam);
CGF.Builder.CreateStore(Exn, CGF.GetAddrOfLocalVar(CatchParam));
}
@@ -2142,12 +2171,18 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
// when linked against code which isn't (most of the time).
llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
if (!IvarOffsetPointer) {
- uint64_t Offset;
- if (ObjCImplementationDecl *OID =
- CGM.getContext().getObjCImplementation(
+ // This will cause a run-time crash if we accidentally use it. A value of
+ // 0 would seem more sensible, but will silently overwrite the isa pointer
+ // causing a great deal of confusion.
+ uint64_t Offset = -1;
+ // We can't call ComputeIvarBaseOffset() here if we have the
+ // implementation, because it will create an invalid ASTRecordLayout object
+ // that we are then stuck with forever, so we only initialize the ivar
+ // offset variable with a guess if we only have the interface. The
+ // initializer will be reset later anyway, when we are generating the class
+ // description.
+ if (!CGM.getContext().getObjCImplementation(
const_cast<ObjCInterfaceDecl *>(ID)))
- Offset = ComputeIvarBaseOffset(CGM, OID, Ivar);
- else
Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
llvm::ConstantInt *OffsetGuess =
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
index 54d0ff2..7c679b9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -16,7 +16,8 @@
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
-#include "CGException.h"
+#include "CGBlocks.h"
+#include "CGCleanup.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -77,6 +78,7 @@ static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
++Index;
}
assert(Index != Ivars.size() && "Ivar is not inside container!");
+ assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
return RL->getFieldOffset(Index);
}
@@ -129,7 +131,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
// a synthesized ivar can never be a bit-field, so this is safe.
const ASTRecordLayout &RL =
CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
- uint64_t TypeSizeInBits = RL.getSize();
+ uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize());
uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
uint64_t BitOffset = FieldBitOffset % 8;
uint64_t ContainingTypeAlign = 8;
@@ -462,7 +464,7 @@ public:
// void objc_exception_rethrow(void)
std::vector<const llvm::Type*> Args;
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, true);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
}
@@ -999,8 +1001,8 @@ public:
/// forward references will be filled in with empty bodies if no
/// definition is seen. The return value has type ProtocolPtrTy.
virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
- virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
- const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &);
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo);
};
@@ -1156,7 +1158,8 @@ public:
virtual llvm::Constant *GetPropertyGetFunction();
virtual llvm::Constant *GetPropertySetFunction();
- virtual llvm::Constant *GetCopyStructFunction();
+ virtual llvm::Constant *GetGetStructFunction();
+ virtual llvm::Constant *GetSetStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
@@ -1401,7 +1404,10 @@ public:
return ObjCTypes.getSetPropertyFn();
}
- virtual llvm::Constant *GetCopyStructFunction() {
+ virtual llvm::Constant *GetSetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+ }
+ virtual llvm::Constant *GetGetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
@@ -1520,7 +1526,7 @@ llvm::Constant *CGObjCCommonMac::GenerateConstantString(
const StringLiteral *SL) {
return (CGM.getLangOptions().NoConstantCFStrings == 0 ?
CGM.GetAddrOfConstantCFString(SL) :
- CGM.GetAddrOfConstantNSString(SL));
+ CGM.GetAddrOfConstantString(SL));
}
/// Generates a message send where the super is the receiver. This is
@@ -1662,57 +1668,76 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
return Qualifiers::GCNone;
}
-llvm::Constant *CGObjCCommonMac::GCBlockLayout(CodeGen::CodeGenFunction &CGF,
- const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &DeclRefs) {
- llvm::Constant *NullPtr =
+llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ llvm::Constant *nullPtr =
llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
- if ((CGM.getLangOptions().getGCMode() == LangOptions::NonGC) ||
- DeclRefs.empty())
- return NullPtr;
+
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return nullPtr;
+
bool hasUnion = false;
SkipIvars.clear();
IvarsInfo.clear();
unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
- for (size_t i = 0; i < DeclRefs.size(); ++i) {
- const BlockDeclRefExpr *BDRE = DeclRefs[i];
- const ValueDecl *VD = BDRE->getDecl();
- CharUnits Offset = CGF.BlockDecls[VD];
- uint64_t FieldOffset = Offset.getQuantity();
- QualType Ty = VD->getType();
- assert(!Ty->isArrayType() &&
- "Array block variable should have been caught");
- if ((Ty->isRecordType() || Ty->isUnionType()) && !BDRE->isByRef()) {
- BuildAggrIvarRecordLayout(Ty->getAs<RecordType>(),
- FieldOffset,
- true,
- hasUnion);
+ // __isa is the first field in block descriptor and must assume by runtime's
+ // convention that it is GC'able.
+ IvarsInfo.push_back(GC_IVAR(0, 1));
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Calculate the basic layout of the block structure.
+ const llvm::StructLayout *layout =
+ CGM.getTargetData().getStructLayout(blockInfo.StructureType);
+
+ // Ignore the optional 'this' capture: C++ objects are not assumed
+ // to be GC'ed.
+
+ // Walk the captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ uint64_t fieldOffset = layout->getElementOffset(capture.getIndex());
+
+ // __block variables are passed by their descriptor address.
+ if (ci->isByRef()) {
+ IvarsInfo.push_back(GC_IVAR(fieldOffset, /*size in words*/ 1));
+ continue;
+ }
+
+ assert(!type->isArrayType() && "array variable should not be caught");
+ if (const RecordType *record = type->getAs<RecordType>()) {
+ BuildAggrIvarRecordLayout(record, fieldOffset, true, hasUnion);
continue;
}
- Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), Ty);
- unsigned FieldSize = CGM.getContext().getTypeSize(Ty);
- // __block variables are passed by their descriptior address. So, size
- // must reflect this.
- if (BDRE->isByRef())
- FieldSize = WordSizeInBits;
- if (GCAttr == Qualifiers::Strong || BDRE->isByRef())
- IvarsInfo.push_back(GC_IVAR(FieldOffset,
- FieldSize / WordSizeInBits));
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), type);
+ unsigned fieldSize = CGM.getContext().getTypeSize(type);
+
+ if (GCAttr == Qualifiers::Strong)
+ IvarsInfo.push_back(GC_IVAR(fieldOffset,
+ fieldSize / WordSizeInBits));
else if (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak)
- SkipIvars.push_back(GC_IVAR(FieldOffset,
- FieldSize / ByteSizeInBits));
+ SkipIvars.push_back(GC_IVAR(fieldOffset,
+ fieldSize / ByteSizeInBits));
}
if (IvarsInfo.empty())
- return NullPtr;
- // Sort on byte position in case we encounterred a union nested in
- // block variable type's aggregate type.
- if (hasUnion && !IvarsInfo.empty())
- std::sort(IvarsInfo.begin(), IvarsInfo.end());
- if (hasUnion && !SkipIvars.empty())
- std::sort(SkipIvars.begin(), SkipIvars.end());
+ return nullPtr;
+
+ // Sort on byte position; captures might not be allocated in order,
+ // and unions can do funny things.
+ llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
+ llvm::array_pod_sort(SkipIvars.begin(), SkipIvars.end());
std::string BitMap;
llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
@@ -2189,10 +2214,10 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
if (ID->getNumIvarInitializers())
Flags |= eClassFlags_HasCXXStructors;
unsigned Size =
- CGM.getContext().getASTObjCImplementationLayout(ID).getSize() / 8;
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
// FIXME: Set CXX-structors flag.
- if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= eClassFlags_Hidden;
std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
@@ -2277,7 +2302,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
unsigned Flags = eClassFlags_Meta;
unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
- if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= eClassFlags_Hidden;
std::vector<llvm::Constant*> Values(12);
@@ -2578,7 +2603,10 @@ llvm::Constant *CGObjCMac::GetPropertySetFunction() {
return ObjCTypes.getSetPropertyFn();
}
-llvm::Constant *CGObjCMac::GetCopyStructFunction() {
+llvm::Constant *CGObjCMac::GetGetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+llvm::Constant *CGObjCMac::GetSetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
@@ -2968,6 +2996,10 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
"_call_try_exit");
+ // A slot containing the exception to rethrow. Only needed when we
+ // have both a @catch and a @finally.
+ llvm::Value *PropagatingExnVar = 0;
+
// Push a normal cleanup to leave the try scope.
CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalCleanup, &S,
SyncArgSlot,
@@ -3039,6 +3071,12 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::BasicBlock *CatchBlock = 0;
llvm::BasicBlock *CatchHandler = 0;
if (HasFinally) {
+ // Save the currently-propagating exception before
+ // objc_exception_try_enter clears the exception slot.
+ PropagatingExnVar = CGF.CreateTempAlloca(Caught->getType(),
+ "propagating_exception");
+ CGF.Builder.CreateStore(Caught, PropagatingExnVar);
+
// Enter a new exception try block (in case a @catch block
// throws an exception).
CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
@@ -3090,7 +3128,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
if (CatchParam) {
- CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
// These types work out because ConvertType(id) == i8*.
@@ -3134,7 +3172,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// the end of the catch body.
CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
- CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
// Initialize the catch variable.
@@ -3173,6 +3211,15 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// the try's write hazard and here.
//Hazards.emitWriteHazard();
+ // Extract the new exception and save it to the
+ // propagating-exception slot.
+ assert(PropagatingExnVar);
+ llvm::CallInst *NewCaught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ NewCaught->setDoesNotThrow();
+ CGF.Builder.CreateStore(NewCaught, PropagatingExnVar);
+
// Don't pop the catch handler; the throw already did.
CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
CGF.EmitBranchThroughCleanup(FinallyRethrow);
@@ -3193,13 +3240,21 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
CGF.EmitBlock(FinallyRethrow.getBlock(), true);
if (CGF.HaveInsertPoint()) {
- // Just look in the buffer for the exception to throw.
- llvm::CallInst *Caught =
- CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData);
- Caught->setDoesNotThrow();
+ // If we have a propagating-exception variable, check it.
+ llvm::Value *PropagatingExn;
+ if (PropagatingExnVar) {
+ PropagatingExn = CGF.Builder.CreateLoad(PropagatingExnVar);
+
+ // Otherwise, just look in the buffer for the exception to throw.
+ } else {
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData);
+ Caught->setDoesNotThrow();
+ PropagatingExn = Caught;
+ }
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), Caught)
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), PropagatingExn)
->setDoesNotThrow();
CGF.Builder.CreateUnreachable();
}
@@ -3586,10 +3641,10 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
uint64_t MaxSkippedUnionIvarSize = 0;
FieldDecl *MaxField = 0;
FieldDecl *MaxSkippedField = 0;
- FieldDecl *LastFieldBitfield = 0;
+ FieldDecl *LastFieldBitfieldOrUnnamed = 0;
uint64_t MaxFieldOffset = 0;
uint64_t MaxSkippedFieldOffset = 0;
- uint64_t LastBitfieldOffset = 0;
+ uint64_t LastBitfieldOrUnnamedOffset = 0;
if (RecFields.empty())
return;
@@ -3609,12 +3664,12 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
// Skip over unnamed or bitfields
if (!Field->getIdentifier() || Field->isBitField()) {
- LastFieldBitfield = Field;
- LastBitfieldOffset = FieldOffset;
+ LastFieldBitfieldOrUnnamed = Field;
+ LastBitfieldOrUnnamedOffset = FieldOffset;
continue;
}
- LastFieldBitfield = 0;
+ LastFieldBitfieldOrUnnamed = 0;
QualType FQT = Field->getType();
if (FQT->isRecordType() || FQT->isUnionType()) {
if (FQT->isUnionType())
@@ -3641,7 +3696,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
assert(!FQT->isUnionType() &&
"layout for array of unions not supported");
- if (FQT->isRecordType()) {
+ if (FQT->isRecordType() && ElCount) {
int OldIndex = IvarsInfo.size() - 1;
int OldSkIndex = SkipIvars.size() -1;
@@ -3703,16 +3758,25 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
}
}
- if (LastFieldBitfield) {
- // Last field was a bitfield. Must update skip info.
- Expr *BitWidth = LastFieldBitfield->getBitWidth();
- uint64_t BitFieldSize =
- BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
- GC_IVAR skivar;
- skivar.ivar_bytepos = BytePos + LastBitfieldOffset;
- skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
- + ((BitFieldSize % ByteSizeInBits) != 0);
- SkipIvars.push_back(skivar);
+ if (LastFieldBitfieldOrUnnamed) {
+ if (LastFieldBitfieldOrUnnamed->isBitField()) {
+ // Last field was a bitfield. Must update skip info.
+ Expr *BitWidth = LastFieldBitfieldOrUnnamed->getBitWidth();
+ uint64_t BitFieldSize =
+ BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ GC_IVAR skivar;
+ skivar.ivar_bytepos = BytePos + LastBitfieldOrUnnamedOffset;
+ skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
+ + ((BitFieldSize % ByteSizeInBits) != 0);
+ SkipIvars.push_back(skivar);
+ } else {
+ assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed");
+ // Last field was unnamed. Must update skip info.
+ unsigned FieldSize
+ = CGM.getContext().getTypeSize(LastFieldBitfieldOrUnnamed->getType());
+ SkipIvars.push_back(GC_IVAR(BytePos + LastBitfieldOrUnnamedOffset,
+ FieldSize / ByteSizeInBits));
+ }
}
if (MaxField)
@@ -4886,7 +4950,7 @@ void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
CGM.getContext().getASTObjCImplementationLayout(OID);
// InstanceSize is really instance end.
- InstanceSize = llvm::RoundUpToAlignment(RL.getDataSize(), 8) / 8;
+ InstanceSize = RL.getDataSize().getQuantity();
// If there are no fields, the start is the same as the end.
if (!RL.getFieldCount())
@@ -4927,7 +4991,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::GlobalVariable *SuperClassGV, *IsAGV;
bool classIsHidden =
- CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden;
+ ID->getClassInterface()->getVisibility() == HiddenVisibility;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
if (ID->getNumIvarInitializers())
@@ -5222,7 +5286,7 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
// well (i.e., in ObjCIvarOffsetVariable).
if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
Ivar->getAccessControl() == ObjCIvarDecl::Package ||
- CGM.getDeclVisibilityMode(ID) == LangOptions::Hidden)
+ ID->getVisibility() == HiddenVisibility)
IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
else
IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
@@ -6096,7 +6160,7 @@ void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
- CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.EmitAutoVarDecl(*CatchParam);
CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
}
@@ -6124,32 +6188,20 @@ void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
/// EmitThrowStmt - Generate code for a throw statement.
void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S) {
- llvm::Value *Exception;
- llvm::Constant *FunctionThrowOrRethrow;
if (const Expr *ThrowExpr = S.getThrowExpr()) {
- Exception = CGF.EmitScalarExpr(ThrowExpr);
- FunctionThrowOrRethrow = ObjCTypes.getExceptionThrowFn();
- } else {
- assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
- "Unexpected rethrow outside @catch block.");
- Exception = CGF.ObjCEHValueStack.back();
- FunctionThrowOrRethrow = ObjCTypes.getExceptionRethrowFn();
- }
-
- llvm::Value *ExceptionAsObject =
- CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
- llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
- if (InvokeDest) {
- CGF.Builder.CreateInvoke(FunctionThrowOrRethrow,
- CGF.getUnreachableBlock(), InvokeDest,
- &ExceptionAsObject, &ExceptionAsObject + 1);
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy,
+ "tmp");
+ llvm::Value *Args[] = { Exception };
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionThrowFn(),
+ Args, Args+1)
+ .setDoesNotReturn();
} else {
- CGF.Builder.CreateCall(FunctionThrowOrRethrow, ExceptionAsObject)
- ->setDoesNotReturn();
- CGF.Builder.CreateUnreachable();
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionRethrowFn(), 0, 0)
+ .setDoesNotReturn();
}
- // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.CreateUnreachable();
CGF.Builder.ClearInsertionPoint();
}
@@ -6208,7 +6260,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
ID->getIdentifier()->getName()));
}
- if (CGM.getLangOptions().getVisibilityMode() == LangOptions::Hidden)
+ if (CGM.getLangOptions().getVisibilityMode() == HiddenVisibility)
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
ObjCTypes.EHTypeTy));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
index 584760f..5ad3a50 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -56,6 +56,7 @@ namespace CodeGen {
namespace CodeGen {
class CodeGenModule;
+ class CGBlockInfo;
// FIXME: Several methods should be pure virtual but aren't to avoid the
// partially-implemented subclass breaking.
@@ -176,8 +177,10 @@ public:
/// Return the runtime function for setting properties.
virtual llvm::Constant *GetPropertySetFunction() = 0;
- // API for atomic copying of qualified aggregates in setter/getter.
- virtual llvm::Constant *GetCopyStructFunction() = 0;
+ // API for atomic copying of qualified aggregates in getter.
+ virtual llvm::Constant *GetGetStructFunction() = 0;
+ // API for atomic copying of qualified aggregates in setter.
+ virtual llvm::Constant *GetSetStructFunction() = 0;
/// GetClass - Return a reference to the class for the given
/// interface decl.
@@ -219,9 +222,8 @@ public:
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
llvm::Value *Size) = 0;
- virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
- const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &) = 0;
-
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CodeGen::CGBlockInfo &blockInfo) = 0;
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
index 60df613..7ec0ee4 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
@@ -30,6 +30,10 @@ class RTTIBuilder {
/// Fields - The fields of the RTTI descriptor currently being built.
llvm::SmallVector<llvm::Constant *, 16> Fields;
+ /// GetAddrOfTypeName - Returns the mangled type name of the given type.
+ llvm::GlobalVariable *
+ GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
+
/// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
/// descriptor of the given type.
llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
@@ -59,64 +63,10 @@ class RTTIBuilder {
void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
public:
- RTTIBuilder(CodeGenModule &cgm)
- : CGM(cgm), VMContext(cgm.getModule().getContext()),
- Int8PtrTy(llvm::Type::getInt8PtrTy(VMContext)) { }
-
- llvm::Constant *BuildName(QualType Ty, bool Hidden,
- llvm::GlobalVariable::LinkageTypes Linkage) {
- llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, OutName);
- llvm::StringRef Name = OutName.str();
-
- llvm::GlobalVariable *OGV = CGM.getModule().getNamedGlobal(Name);
- if (OGV && !OGV->isDeclaration())
- return llvm::ConstantExpr::getBitCast(OGV, Int8PtrTy);
-
- llvm::Constant *C = llvm::ConstantArray::get(VMContext, Name.substr(4));
-
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, Linkage,
- C, Name);
- if (OGV) {
- GV->takeName(OGV);
- llvm::Constant *NewPtr = llvm::ConstantExpr::getBitCast(GV,
- OGV->getType());
- OGV->replaceAllUsesWith(NewPtr);
- OGV->eraseFromParent();
- }
- if (Hidden)
- GV->setVisibility(llvm::GlobalVariable::HiddenVisibility);
- return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
- }
+ RTTIBuilder(CodeGenModule &CGM) : CGM(CGM),
+ VMContext(CGM.getModule().getContext()),
+ Int8PtrTy(llvm::Type::getInt8PtrTy(VMContext)) { }
- // FIXME: unify with DecideExtern
- bool DecideHidden(QualType Ty) {
- // For this type, see if all components are never hidden.
- if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
- return (DecideHidden(MPT->getPointeeType())
- && DecideHidden(QualType(MPT->getClass(), 0)));
- if (const PointerType *PT = Ty->getAs<PointerType>())
- return DecideHidden(PT->getPointeeType());
- if (const FunctionType *FT = Ty->getAs<FunctionType>()) {
- if (DecideHidden(FT->getResultType()) == false)
- return false;
- if (const FunctionProtoType *FPT = Ty->getAs<FunctionProtoType>()) {
- for (unsigned i = 0; i <FPT->getNumArgs(); ++i)
- if (DecideHidden(FPT->getArgType(i)) == false)
- return false;
- for (unsigned i = 0; i <FPT->getNumExceptions(); ++i)
- if (DecideHidden(FPT->getExceptionType(i)) == false)
- return false;
- return true;
- }
- }
- if (const RecordType *RT = Ty->getAs<RecordType>())
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- return CGM.getDeclVisibilityMode(RD) == LangOptions::Hidden;
- return false;
- }
-
// Pointer type info flags.
enum {
/// PTI_Const - Type has const qualifier.
@@ -162,10 +112,34 @@ public:
};
}
+llvm::GlobalVariable *
+RTTIBuilder::GetAddrOfTypeName(QualType Ty,
+ llvm::GlobalVariable::LinkageTypes Linkage) {
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
+ Out.flush();
+ llvm::StringRef Name = OutName.str();
+
+ // We know that the mangled name of the type starts at index 4 of the
+ // mangled name of the typename, so we can just index into it in order to
+ // get the mangled name of the type.
+ llvm::Constant *Init = llvm::ConstantArray::get(VMContext, Name.substr(4));
+
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
+
+ GV->setInitializer(Init);
+
+ return GV;
+}
+
llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Mangle the RTTI name.
llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, OutName);
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ Out.flush();
llvm::StringRef Name = OutName.str();
// Look for an existing global.
@@ -187,15 +161,17 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
// Basic type information (e.g. for "int", "bool", etc.) will be kept in
// the run-time support library. Specifically, the run-time support
// library should contain type_info objects for the types X, X* and
- // X const*, for every X in: void, bool, wchar_t, char, unsigned char,
- // signed char, short, unsigned short, int, unsigned int, long,
- // unsigned long, long long, unsigned long long, float, double, long double,
- // char16_t, char32_t, and the IEEE 754r decimal and half-precision
- // floating point types.
+ // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
+ // unsigned char, signed char, short, unsigned short, int, unsigned int,
+ // long, unsigned long, long long, unsigned long long, float, double,
+ // long double, char16_t, char32_t, and the IEEE 754r decimal and
+ // half-precision floating point types.
switch (Ty->getKind()) {
case BuiltinType::Void:
+ case BuiltinType::NullPtr:
case BuiltinType::Bool:
- case BuiltinType::WChar:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
case BuiltinType::Char_U:
case BuiltinType::Char_S:
case BuiltinType::UChar:
@@ -219,12 +195,8 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::Overload:
case BuiltinType::Dependent:
- case BuiltinType::UndeducedAuto:
assert(false && "Should not see this type here!");
- case BuiltinType::NullPtr:
- assert(false && "FIXME: nullptr_t is not handled!");
-
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
@@ -270,8 +242,9 @@ static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
/// the given type exists somewhere else, and that we should not emit the type
/// information in this translation unit. Assumes that it is not a
/// standard-library type.
-static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
- QualType Ty) {
+static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) {
+ ASTContext &Context = CGM.getContext();
+
// If RTTI is disabled, don't consider key functions.
if (!Context.getLangOptions().RTTI) return false;
@@ -283,13 +256,7 @@ static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
if (!RD->isDynamicClass())
return false;
- // Get the key function.
- const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
- if (KeyFunction && !KeyFunction->hasBody()) {
- // The class has a key function, but it is not defined in this translation
- // unit, so we should use the external descriptor for it.
- return true;
- }
+ return !CGM.getVTables().ShouldEmitVTableInThisTU(RD);
}
return false;
@@ -335,7 +302,8 @@ static bool ContainsIncompleteClassType(QualType Ty) {
/// getTypeInfoLinkage - Return the linkage that the type info and type info
/// name constants should have for the given type.
-static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
+static llvm::GlobalVariable::LinkageTypes
+getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
// Itanium C++ ABI 2.9.5p7:
// In addition, it and all of the intermediate abi::__pointer_type_info
// structs in the chain down to the abi::__class_type_info for the
@@ -355,16 +323,22 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
return llvm::GlobalValue::InternalLinkage;
case ExternalLinkage:
+ if (!CGM.getLangOptions().RTTI) {
+ // RTTI is not enabled, which means that this type info struct is going
+ // to be used for exception handling. Give it linkonce_odr linkage.
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
if (RD->isDynamicClass())
- return CodeGenModule::getVTableLinkage(RD);
+ return CGM.getVTableLinkage(RD);
}
- return llvm::GlobalValue::WeakODRLinkage;
+ return llvm::GlobalValue::LinkOnceODRLinkage;
}
- return llvm::GlobalValue::WeakODRLinkage;
+ return llvm::GlobalValue::LinkOnceODRLinkage;
}
// CanUseSingleInheritance - Return whether the given record decl has a "single,
@@ -513,23 +487,81 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
Fields.push_back(VTable);
}
+// maybeUpdateRTTILinkage - Will update the linkage of the RTTI data structures
+// from available_externally to the correct linkage if necessary. An example of
+// this is:
+//
+// struct A {
+// virtual void f();
+// };
+//
+// const std::type_info &g() {
+// return typeid(A);
+// }
+//
+// void A::f() { }
+//
+// When we're generating the typeid(A) expression, we do not yet know that
+// A's key function is defined in this translation unit, so we will give the
+// typeinfo and typename structures available_externally linkage. When A::f
+// forces the vtable to be generated, we need to change the linkage of the
+// typeinfo and typename structs, otherwise we'll end up with undefined
+// externals when linking.
+static void
+maybeUpdateRTTILinkage(CodeGenModule &CGM, llvm::GlobalVariable *GV,
+ QualType Ty) {
+ // We're only interested in globals with available_externally linkage.
+ if (!GV->hasAvailableExternallyLinkage())
+ return;
+
+ // Get the real linkage for the type.
+ llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
+
+ // If variable is supposed to have available_externally linkage, we don't
+ // need to do anything.
+ if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
+ return;
+
+ // Update the typeinfo linkage.
+ GV->setLinkage(Linkage);
+
+ // Get the typename global.
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
+ Out.flush();
+ llvm::StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *TypeNameGV = CGM.getModule().getNamedGlobal(Name);
+
+ assert(TypeNameGV->hasAvailableExternallyLinkage() &&
+ "Type name has different linkage from type info!");
+
+ // And update its linkage.
+ TypeNameGV->setLinkage(Linkage);
+}
+
llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// We want to operate on the canonical type.
Ty = CGM.getContext().getCanonicalType(Ty);
// Check if we've already emitted an RTTI descriptor for this type.
llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, OutName);
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ Out.flush();
llvm::StringRef Name = OutName.str();
-
+
llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
- if (OldGV && !OldGV->isDeclaration())
+ if (OldGV && !OldGV->isDeclaration()) {
+ maybeUpdateRTTILinkage(CGM, OldGV, Ty);
+
return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
+ }
// Check if there is already an external RTTI descriptor for this type.
bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
- if (!Force &&
- (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM.getContext(), Ty)))
+ if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
return GetAddrOfExternalRTTIDescriptor(Ty);
// Emit the standard library with external linkage.
@@ -537,13 +569,16 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
if (IsStdLib)
Linkage = llvm::GlobalValue::ExternalLinkage;
else
- Linkage = getTypeInfoLinkage(Ty);
+ Linkage = getTypeInfoLinkage(CGM, Ty);
// Add the vtable pointer.
BuildVTablePointer(cast<Type>(Ty));
// And the name.
- Fields.push_back(BuildName(Ty, DecideHidden(Ty), Linkage));
+ llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, Int8PtrTy));
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
@@ -641,13 +676,28 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// type_infos themselves, so we can emit these as hidden symbols.
// But don't do this if we're worried about strict visibility
// compatibility.
- if (const RecordType *RT = dyn_cast<RecordType>(Ty))
- CGM.setTypeVisibility(GV, cast<CXXRecordDecl>(RT->getDecl()),
- /*ForRTTI*/ true);
- else if (CGM.getCodeGenOpts().HiddenWeakVTables &&
- Linkage == llvm::GlobalValue::WeakODRLinkage)
- GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
-
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ CGM.setTypeVisibility(GV, RD, CodeGenModule::TVK_ForRTTI);
+ CGM.setTypeVisibility(TypeName, RD, CodeGenModule::TVK_ForRTTIName);
+ } else {
+ Visibility TypeInfoVisibility = DefaultVisibility;
+ if (CGM.getCodeGenOpts().HiddenWeakVTables &&
+ Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
+ TypeInfoVisibility = HiddenVisibility;
+
+ // The type name should have the same visibility as the type itself.
+ Visibility ExplicitVisibility = Ty->getVisibility();
+ TypeName->setVisibility(CodeGenModule::
+ GetLLVMVisibility(ExplicitVisibility));
+
+ TypeInfoVisibility = minVisibility(TypeInfoVisibility, Ty->getVisibility());
+ GV->setVisibility(CodeGenModule::GetLLVMVisibility(TypeInfoVisibility));
+ }
+
+ GV->setUnnamedAddr(true);
+
return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
}
@@ -701,12 +751,14 @@ void RTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
Fields.push_back(BaseTypeInfo);
}
-/// SeenBases - Contains virtual and non-virtual bases seen when traversing
-/// a class hierarchy.
-struct SeenBases {
- llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
- llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
-};
+namespace {
+ /// SeenBases - Contains virtual and non-virtual bases seen when traversing
+ /// a class hierarchy.
+ struct SeenBases {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
+ };
+}
/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
/// abi::__vmi_class_type_info.
@@ -827,7 +879,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
OffsetFlags = CGM.getVTables().getVirtualBaseOffsetOffset(RD, BaseDecl);
else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- OffsetFlags = Layout.getBaseClassOffset(BaseDecl) / 8;
+ OffsetFlags = Layout.getBaseClassOffsetInBits(BaseDecl) / 8;
};
OffsetFlags <<= 8;
@@ -938,16 +990,16 @@ void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) {
}
void CodeGenModule::EmitFundamentalRTTIDescriptors() {
- QualType FundamentalTypes[] = { Context.VoidTy, Context.Char32Ty,
- Context.Char16Ty, Context.UnsignedLongLongTy,
- Context.LongLongTy, Context.WCharTy,
- Context.UnsignedShortTy, Context.ShortTy,
- Context.UnsignedLongTy, Context.LongTy,
- Context.UnsignedIntTy, Context.IntTy,
- Context.UnsignedCharTy, Context.FloatTy,
- Context.LongDoubleTy, Context.DoubleTy,
- Context.CharTy, Context.BoolTy,
- Context.SignedCharTy };
+ QualType FundamentalTypes[] = { Context.VoidTy, Context.NullPtrTy,
+ Context.BoolTy, Context.WCharTy,
+ Context.CharTy, Context.UnsignedCharTy,
+ Context.SignedCharTy, Context.ShortTy,
+ Context.UnsignedShortTy, Context.IntTy,
+ Context.UnsignedIntTy, Context.LongTy,
+ Context.UnsignedLongTy, Context.LongLongTy,
+ Context.UnsignedLongLongTy, Context.FloatTy,
+ Context.DoubleTy, Context.LongDoubleTy,
+ Context.Char16Ty, Context.Char32Ty };
for (unsigned i = 0; i < sizeof(FundamentalTypes)/sizeof(QualType); ++i)
EmitFundamentalRTTIDescriptor(FundamentalTypes[i]);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
index 9b4e9f8..30da05f 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
@@ -11,10 +11,11 @@
#define CLANG_CODEGEN_CGRECORDLAYOUT_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/DerivedTypes.h"
#include "clang/AST/Decl.h"
namespace llvm {
class raw_ostream;
- class Type;
+ class StructType;
}
namespace clang {
@@ -172,8 +173,13 @@ class CGRecordLayout {
void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT
private:
- /// The LLVMType corresponding to this record layout.
- const llvm::Type *LLVMType;
+ /// The LLVM type corresponding to this record layout; used when
+ /// laying it out as a complete object.
+ llvm::PATypeHolder CompleteObjectType;
+
+ /// The LLVM type for the non-virtual part of this record layout;
+ /// used when laying it out as a base subobject.
+ llvm::PATypeHolder BaseSubobjectType;
/// Map from (non-bit-field) struct field to the corresponding llvm struct
/// type field no. This info is populated by record builder.
@@ -185,19 +191,41 @@ private:
// FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
// map for both virtual and non virtual bases.
- llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBaseFields;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+
+ /// Map from virtual bases to their field index in the complete object.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> CompleteObjectVirtualBases;
- /// Whether one of the fields in this record layout is a pointer to data
- /// member, or a struct that contains pointer to data member.
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a complete object, requires a non-zero bitpattern
+ /// when zero-initialized.
bool IsZeroInitializable : 1;
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a base subobject, requires a non-zero bitpattern
+ /// when zero-initialized.
+ bool IsZeroInitializableAsBase : 1;
+
public:
- CGRecordLayout(const llvm::Type *T, bool IsZeroInitializable)
- : LLVMType(T), IsZeroInitializable(IsZeroInitializable) {}
+ CGRecordLayout(const llvm::StructType *CompleteObjectType,
+ const llvm::StructType *BaseSubobjectType,
+ bool IsZeroInitializable,
+ bool IsZeroInitializableAsBase)
+ : CompleteObjectType(CompleteObjectType),
+ BaseSubobjectType(BaseSubobjectType),
+ IsZeroInitializable(IsZeroInitializable),
+ IsZeroInitializableAsBase(IsZeroInitializableAsBase) {}
+
+ /// \brief Return the "complete object" LLVM type associated with
+ /// this record.
+ const llvm::StructType *getLLVMType() const {
+ return cast<llvm::StructType>(CompleteObjectType.get());
+ }
- /// \brief Return the LLVM type associated with this record.
- const llvm::Type *getLLVMType() const {
- return LLVMType;
+ /// \brief Return the "base subobject" LLVM type associated with
+ /// this record.
+ const llvm::StructType *getBaseSubobjectLLVMType() const {
+ return cast<llvm::StructType>(BaseSubobjectType.get());
}
/// \brief Check whether this struct can be C++ zero-initialized
@@ -206,6 +234,12 @@ public:
return IsZeroInitializable;
}
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer when considered as a base subobject.
+ bool isZeroInitializableAsBase() const {
+ return IsZeroInitializableAsBase;
+ }
+
/// \brief Return llvm::StructType element number that corresponds to the
/// field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const {
@@ -215,8 +249,15 @@ public:
}
unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
- assert(NonVirtualBaseFields.count(RD) && "Invalid non-virtual base!");
- return NonVirtualBaseFields.lookup(RD);
+ assert(NonVirtualBases.count(RD) && "Invalid non-virtual base!");
+ return NonVirtualBases.lookup(RD);
+ }
+
+ /// \brief Return the LLVM field index corresponding to the given
+ /// virtual base. Only valid when operating on the complete object.
+ unsigned getVirtualBaseIndex(const CXXRecordDecl *base) const {
+ assert(CompleteObjectVirtualBases.count(base) && "Invalid virtual base!");
+ return CompleteObjectVirtualBases.lookup(base);
}
/// \brief Return the BitFieldInfo that corresponds to the field FD.
@@ -224,7 +265,7 @@ public:
assert(FD->isBitField() && "Invalid call for non bit-field decl!");
llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
it = BitFields.find(FD);
- assert(it != BitFields.end() && "Unable to find bitfield info");
+ assert(it != BitFields.end() && "Unable to find bitfield info");
return it->second;
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 77a319f..4b19aef 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -14,6 +14,7 @@
#include "CGRecordLayout.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
@@ -27,28 +28,52 @@
using namespace clang;
using namespace CodeGen;
-namespace clang {
-namespace CodeGen {
+namespace {
class CGRecordLayoutBuilder {
public:
/// FieldTypes - Holds the LLVM types that the struct is created from.
+ ///
std::vector<const llvm::Type *> FieldTypes;
- /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
- typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
- llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
-
- /// LLVMBitFieldInfo - Holds location and size information about a bit field.
- typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
- llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
-
- typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
- llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
+ /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
+ /// of the struct. For example, consider:
+ ///
+ /// struct A { int i; };
+ /// struct B { void *v; };
+ /// struct C : virtual A, B { };
+ ///
+ /// The LLVM type of C will be
+ /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
+ ///
+ /// And the LLVM type of the non-virtual base struct will be
+ /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
+ ///
+ /// This only gets initialized if the base subobject type is
+ /// different from the complete-object type.
+ const llvm::StructType *BaseSubobjectType;
+
+ /// FieldInfo - Holds a field and its corresponding LLVM field number.
+ llvm::DenseMap<const FieldDecl *, unsigned> Fields;
+
+ /// BitFieldInfo - Holds location and size information about a bit field.
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
+
+ /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
+ /// primary base classes for some other direct or indirect base class.
+ CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
+
+ /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
+ /// avoid laying out virtual bases more than once.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
/// IsZeroInitializable - Whether this struct can be C++
/// zero-initialized with an LLVM zeroinitializer.
bool IsZeroInitializable;
+ bool IsZeroInitializableAsBase;
/// Packed - Whether the resulting LLVM struct will be packed or not.
bool Packed;
@@ -59,18 +84,14 @@ private:
/// Alignment - Contains the alignment of the RecordDecl.
//
// FIXME: This is not needed and should be removed.
- unsigned Alignment;
-
- /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
- /// LLVM types.
- unsigned AlignmentAsLLVMStruct;
+ CharUnits Alignment;
/// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
/// this will have the number of bits still available in the field.
char BitsAvailableInLastField;
- /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
- uint64_t NextFieldOffsetInBytes;
+ /// NextFieldOffset - Holds the next field offset.
+ CharUnits NextFieldOffset;
/// LayoutUnionField - Will layout a field in an union and return the type
/// that the field will have.
@@ -84,14 +105,30 @@ private:
/// Returns false if the operation failed because the struct is not packed.
bool LayoutFields(const RecordDecl *D);
+ /// Layout a single base, virtual or non-virtual
+ void LayoutBase(const CXXRecordDecl *base,
+ const CGRecordLayout &baseLayout,
+ CharUnits baseOffset);
+
+ /// LayoutVirtualBase - layout a single virtual base.
+ void LayoutVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset);
+
+ /// LayoutVirtualBases - layout the virtual bases of a record decl.
+ void LayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
/// LayoutNonVirtualBase - layout a single non-virtual base.
- void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
- uint64_t BaseOffset);
+ void LayoutNonVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset);
- /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
+ /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
void LayoutNonVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout);
+ /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
+ bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
+
/// LayoutField - layout a single field. Returns false if the operation failed
/// because the current struct is not packed.
bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
@@ -100,41 +137,47 @@ private:
void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
/// AppendField - Appends a field with the given offset and type.
- void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
+ void AppendField(CharUnits fieldOffset, const llvm::Type *FieldTy);
/// AppendPadding - Appends enough padding bytes so that the total
/// struct size is a multiple of the field alignment.
- void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
+ void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
+ /// getByteArrayType - Returns a byte array type with the given number of
+ /// elements.
+ const llvm::Type *getByteArrayType(CharUnits NumBytes);
+
/// AppendBytes - Append a given number of bytes to the record.
- void AppendBytes(uint64_t NumBytes);
+ void AppendBytes(CharUnits numBytes);
/// AppendTailPadding - Append enough tail padding so that the type will have
/// the passed size.
void AppendTailPadding(uint64_t RecordSize);
- unsigned getTypeAlignment(const llvm::Type *Ty) const;
+ CharUnits getTypeAlignment(const llvm::Type *Ty) const;
+
+ /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
+ /// LLVM element types.
+ CharUnits getAlignmentAsLLVMStruct() const;
/// CheckZeroInitializable - Check if the given type contains a pointer
/// to data member.
void CheckZeroInitializable(QualType T);
- void CheckZeroInitializable(const CXXRecordDecl *RD);
public:
CGRecordLayoutBuilder(CodeGenTypes &Types)
- : IsZeroInitializable(true), Packed(false), Types(Types),
- Alignment(0), AlignmentAsLLVMStruct(1),
- BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
+ : BaseSubobjectType(0),
+ IsZeroInitializable(true), IsZeroInitializableAsBase(true),
+ Packed(false), Types(Types), BitsAvailableInLastField(0) { }
/// Layout - Will layout a RecordDecl.
void Layout(const RecordDecl *D);
};
}
-}
void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
- Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
+ Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
Packed = D->hasAttr<PackedAttr>();
if (D->isUnion()) {
@@ -147,12 +190,12 @@ void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
// We weren't able to layout the struct. Try again with a packed struct
Packed = true;
- AlignmentAsLLVMStruct = 1;
- NextFieldOffsetInBytes = 0;
+ NextFieldOffset = CharUnits::Zero();
FieldTypes.clear();
- LLVMFields.clear();
- LLVMBitFields.clear();
- LLVMNonVirtualBases.clear();
+ Fields.clear();
+ BitFields.clear();
+ NonVirtualBases.clear();
+ VirtualBases.clear();
LayoutFields(D);
}
@@ -182,6 +225,12 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
FieldSize = TypeSizeInBits;
}
+ // in big-endian machines the first fields are in higher bit positions,
+ // so revert the offset. The byte offsets are reversed(back) later.
+ if (Types.getTargetData().isBigEndian()) {
+ FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
+ }
+
// Compute the access components. The policy we use is to start by attempting
// to access using the width of the bit-field type itself and to always access
// at aligned indices of that type. If such an access would fail because it
@@ -189,7 +238,6 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
// power of two and retry. The current algorithm assumes pow2 sized types,
// although this is easy to fix.
//
- // FIXME: This algorithm is wrong on big-endian systems, I think.
assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
CGBitFieldInfo::AccessInfo Components[3];
unsigned NumComponents = 0;
@@ -237,7 +285,15 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
// FIXME: We still follow the old access pattern of only using the field
// byte offset. We should switch this once we fix the struct layout to be
// pretty.
- AI.FieldByteOffset = AccessStart / 8;
+
+ // on big-endian machines we reverted the bit offset because first fields are
+ // in higher bits. But this also reverts the bytes, so fix this here by reverting
+ // the byte offset on big-endian machines.
+ if (Types.getTargetData().isBigEndian()) {
+ AI.FieldByteOffset = (ContainingTypeSizeInBits - AccessStart - AccessWidth )/8;
+ } else {
+ AI.FieldByteOffset = AccessStart / 8;
+ }
AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
AI.AccessWidth = AccessWidth;
AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
@@ -258,56 +314,54 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
uint64_t FieldSize) {
const RecordDecl *RD = FD->getParent();
const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
- uint64_t ContainingTypeSizeInBits = RL.getSize();
- unsigned ContainingTypeAlign = RL.getAlignment();
+ uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
+ unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
ContainingTypeAlign);
}
void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
- uint64_t FieldOffset) {
- uint64_t FieldSize =
+ uint64_t fieldOffset) {
+ uint64_t fieldSize =
D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
- if (FieldSize == 0)
+ if (fieldSize == 0)
return;
- uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
- unsigned NumBytesToAppend;
+ uint64_t nextFieldOffsetInBits = NextFieldOffset.getQuantity() * 8;
+ unsigned numBytesToAppend;
- if (FieldOffset < NextFieldOffset) {
+ if (fieldOffset < nextFieldOffsetInBits) {
assert(BitsAvailableInLastField && "Bitfield size mismatch!");
- assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
+ assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
// The bitfield begins in the previous bit-field.
- NumBytesToAppend =
- llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
+ numBytesToAppend =
+ llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField, 8) / 8;
} else {
- assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
+ assert(fieldOffset % 8 == 0 && "Field offset not aligned correctly");
// Append padding if necessary.
- AppendBytes((FieldOffset - NextFieldOffset) / 8);
+ AppendPadding(CharUnits::fromQuantity(fieldOffset / 8), CharUnits::One());
- NumBytesToAppend =
- llvm::RoundUpToAlignment(FieldSize, 8) / 8;
+ numBytesToAppend = llvm::RoundUpToAlignment(fieldSize, 8) / 8;
- assert(NumBytesToAppend && "No bytes to append!");
+ assert(numBytesToAppend && "No bytes to append!");
}
// Add the bit field info.
- LLVMBitFields.push_back(
- LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
- FieldSize)));
+ BitFields.insert(std::make_pair(D,
+ CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
- AppendBytes(NumBytesToAppend);
+ AppendBytes(CharUnits::fromQuantity(numBytesToAppend));
BitsAvailableInLastField =
- NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
+ NextFieldOffset.getQuantity() * 8 - (fieldOffset + fieldSize);
}
bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
- uint64_t FieldOffset) {
+ uint64_t fieldOffset) {
// If the field is packed, then we need a packed struct.
if (!Packed && D->hasAttr<PackedAttr>())
return false;
@@ -318,54 +372,50 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
if (!Packed && !D->getDeclName())
return false;
- LayoutBitField(D, FieldOffset);
+ LayoutBitField(D, fieldOffset);
return true;
}
CheckZeroInitializable(D->getType());
- assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
- uint64_t FieldOffsetInBytes = FieldOffset / 8;
+ assert(fieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
+ CharUnits fieldOffsetInBytes = CharUnits::fromQuantity(fieldOffset / 8);
const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
- unsigned TypeAlignment = getTypeAlignment(Ty);
+ CharUnits typeAlignment = getTypeAlignment(Ty);
// If the type alignment is larger then the struct alignment, we must use
// a packed struct.
- if (TypeAlignment > Alignment) {
+ if (typeAlignment > Alignment) {
assert(!Packed && "Alignment is wrong even with packed struct!");
return false;
}
- if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
- const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
- if (const MaxFieldAlignmentAttr *MFAA =
- RD->getAttr<MaxFieldAlignmentAttr>()) {
- if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
- return false;
+ if (!Packed) {
+ if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (const MaxFieldAlignmentAttr *MFAA =
+ RD->getAttr<MaxFieldAlignmentAttr>()) {
+ if (MFAA->getAlignment() != typeAlignment.getQuantity() * 8)
+ return false;
+ }
}
}
// Round up the field offset to the alignment of the field type.
- uint64_t AlignedNextFieldOffsetInBytes =
- llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
+ CharUnits alignedNextFieldOffsetInBytes =
+ NextFieldOffset.RoundUpToAlignment(typeAlignment);
- if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
+ if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
assert(!Packed && "Could not place field even with packed struct!");
return false;
}
- if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
- // Even with alignment, the field offset is not at the right place,
- // insert padding.
- uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
-
- AppendBytes(PaddingInBytes);
- }
+ AppendPadding(fieldOffsetInBytes, typeAlignment);
// Now append the field.
- LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
- AppendField(FieldOffsetInBytes, Ty);
+ Fields[D] = FieldTypes.size();
+ AppendField(fieldOffsetInBytes, Ty);
return true;
}
@@ -389,95 +439,167 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
// Add the bit field info.
- LLVMBitFields.push_back(
- LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
- 0, FieldSize)));
+ BitFields.insert(std::make_pair(Field,
+ CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
return FieldTy;
}
// This is a regular union field.
- LLVMFields.push_back(LLVMFieldInfo(Field, 0));
+ Fields[Field] = 0;
return Types.ConvertTypeForMemRecursive(Field->getType());
}
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
- const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+ const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
- const llvm::Type *Ty = 0;
- uint64_t Size = 0;
- unsigned Align = 0;
+ const llvm::Type *unionType = 0;
+ CharUnits unionSize = CharUnits::Zero();
+ CharUnits unionAlign = CharUnits::Zero();
- bool HasOnlyZeroSizedBitFields = true;
+ bool hasOnlyZeroSizedBitFields = true;
- unsigned FieldNo = 0;
- for (RecordDecl::field_iterator Field = D->field_begin(),
- FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
- assert(Layout.getFieldOffset(FieldNo) == 0 &&
+ unsigned fieldNo = 0;
+ for (RecordDecl::field_iterator field = D->field_begin(),
+ fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
+ assert(layout.getFieldOffset(fieldNo) == 0 &&
"Union field offset did not start at the beginning of record!");
- const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
+ const llvm::Type *fieldType = LayoutUnionField(*field, layout);
- if (!FieldTy)
+ if (!fieldType)
continue;
- HasOnlyZeroSizedBitFields = false;
+ hasOnlyZeroSizedBitFields = false;
- unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
- uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
+ CharUnits fieldAlign = CharUnits::fromQuantity(
+ Types.getTargetData().getABITypeAlignment(fieldType));
+ CharUnits fieldSize = CharUnits::fromQuantity(
+ Types.getTargetData().getTypeAllocSize(fieldType));
- if (FieldAlign < Align)
+ if (fieldAlign < unionAlign)
continue;
- if (FieldAlign > Align || FieldSize > Size) {
- Ty = FieldTy;
- Align = FieldAlign;
- Size = FieldSize;
+ if (fieldAlign > unionAlign || fieldSize > unionSize) {
+ unionType = fieldType;
+ unionAlign = fieldAlign;
+ unionSize = fieldSize;
}
}
// Now add our field.
- if (Ty) {
- AppendField(0, Ty);
+ if (unionType) {
+ AppendField(CharUnits::Zero(), unionType);
- if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
+ if (getTypeAlignment(unionType) > layout.getAlignment()) {
// We need a packed struct.
Packed = true;
- Align = 1;
+ unionAlign = CharUnits::One();
}
}
- if (!Align) {
- assert(HasOnlyZeroSizedBitFields &&
+ if (unionAlign.isZero()) {
+ assert(hasOnlyZeroSizedBitFields &&
"0-align record did not have all zero-sized bit-fields!");
- Align = 1;
+ unionAlign = CharUnits::One();
}
// Append tail padding.
- if (Layout.getSize() / 8 > Size)
- AppendPadding(Layout.getSize() / 8, Align);
+ CharUnits recordSize = layout.getSize();
+ if (recordSize > unionSize)
+ AppendPadding(recordSize, unionAlign);
+}
+
+void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
+ const CGRecordLayout &baseLayout,
+ CharUnits baseOffset) {
+ AppendPadding(baseOffset, CharUnits::One());
+
+ const ASTRecordLayout &baseASTLayout
+ = Types.getContext().getASTRecordLayout(base);
+
+ // Fields and bases can be laid out in the tail padding of previous
+ // bases. If this happens, we need to allocate the base as an i8
+ // array; otherwise, we can use the subobject type. However,
+ // actually doing that would require knowledge of what immediately
+ // follows this base in the layout, so instead we do a conservative
+ // approximation, which is to use the base subobject type if it
+ // has the same LLVM storage size as the nvsize.
+
+ // The nvsize, i.e. the unpadded size of the base class.
+ CharUnits nvsize = baseASTLayout.getNonVirtualSize();
+
+#if 0
+ const llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
+ const llvm::StructLayout *baseLLVMLayout =
+ Types.getTargetData().getStructLayout(subobjectType);
+ CharUnits stsize = CharUnits::fromQuantity(baseLLVMLayout->getSizeInBytes());
+
+ if (nvsize == stsize)
+ AppendField(baseOffset, subobjectType);
+ else
+#endif
+ AppendBytes(nvsize);
}
-void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
- uint64_t BaseOffset) {
- const ASTRecordLayout &Layout =
- Types.getContext().getASTRecordLayout(BaseDecl);
+void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset) {
+ // Ignore empty bases.
+ if (base->isEmpty()) return;
- uint64_t NonVirtualSize = Layout.getNonVirtualSize();
+ const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
+ if (IsZeroInitializableAsBase) {
+ assert(IsZeroInitializable &&
+ "class zero-initializable as base but not as complete object");
- if (BaseDecl->isEmpty()) {
- // FIXME: Lay out empty bases.
- return;
+ IsZeroInitializable = IsZeroInitializableAsBase =
+ baseLayout.isZeroInitializableAsBase();
}
- CheckZeroInitializable(BaseDecl);
+ LayoutBase(base, baseLayout, baseOffset);
+ NonVirtualBases[base] = (FieldTypes.size() - 1);
+}
- // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
- AppendPadding(BaseOffset / 8, 1);
-
- // Append the base field.
- LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
+void
+CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset) {
+ // Ignore empty bases.
+ if (base->isEmpty()) return;
+
+ const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
+ if (IsZeroInitializable)
+ IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
+
+ LayoutBase(base, baseLayout, baseOffset);
+ VirtualBases[base] = (FieldTypes.size() - 1);
+}
- AppendBytes(NonVirtualSize / 8);
+/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
+void
+CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // We only want to lay out virtual bases that aren't indirect primary bases
+ // of some other base.
+ if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
+ // Only lay out the base once.
+ if (!LaidOutVirtualBases.insert(BaseDecl))
+ continue;
+
+ CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ LayoutVirtualBase(BaseDecl, vbaseOffset);
+ }
+
+ if (!BaseDecl->getNumVBases()) {
+ // This base isn't interesting since it doesn't have any virtual bases.
+ continue;
+ }
+
+ LayoutVirtualBases(BaseDecl, Layout);
+ }
}
void
@@ -493,13 +615,14 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
/*isVarArg=*/true);
const llvm::Type *VTableTy = FunctionType->getPointerTo();
- assert(NextFieldOffsetInBytes == 0 &&
+ assert(NextFieldOffset.isZero() &&
"VTable pointer must come first!");
- AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
+ AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
} else {
- // FIXME: Handle a virtual primary base.
- if (!Layout.getPrimaryBaseWasVirtual())
- LayoutNonVirtualBase(PrimaryBase, 0);
+ if (!Layout.isPrimaryBaseVirtual())
+ LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero());
+ else
+ LayoutVirtualBase(PrimaryBase, CharUnits::Zero());
}
}
@@ -513,20 +636,61 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
// We've already laid out the primary base.
- if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
+ if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
continue;
LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
}
}
+bool
+CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
+
+ CharUnits NonVirtualSize = Layout.getNonVirtualSize();
+ CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
+ CharUnits AlignedNonVirtualTypeSize =
+ NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
+
+ // First check if we can use the same fields as for the complete class.
+ CharUnits RecordSize = Layout.getSize();
+ if (AlignedNonVirtualTypeSize == RecordSize)
+ return true;
+
+ // Check if we need padding.
+ CharUnits AlignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
+
+ if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
+ assert(!Packed && "cannot layout even as packed struct");
+ return false; // Needs packing.
+ }
+
+ bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
+ if (needsPadding) {
+ CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
+ FieldTypes.push_back(getByteArrayType(NumBytes));
+ }
+
+ BaseSubobjectType = llvm::StructType::get(Types.getLLVMContext(),
+ FieldTypes, Packed);
+
+ if (needsPadding) {
+ // Pull the padding back off.
+ FieldTypes.pop_back();
+ }
+
+ return true;
+}
+
bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
assert(!D->isUnion() && "Can't call LayoutFields on a union!");
- assert(Alignment && "Did not set alignment!");
+ assert(!Alignment.isZero() && "Did not set alignment!");
const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
+ if (RD)
LayoutNonVirtualBases(RD, Layout);
unsigned FieldNo = 0;
@@ -540,8 +704,23 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
}
}
+ if (RD) {
+ // We've laid out the non-virtual bases and the fields, now compute the
+ // non-virtual base field types.
+ if (!ComputeNonVirtualBaseType(RD)) {
+ assert(!Packed && "Could not layout even with a packed LLVM struct!");
+ return false;
+ }
+
+ // And lay out the virtual bases.
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
+ if (Layout.isPrimaryBaseVirtual())
+ IndirectPrimaryBases.insert(Layout.getPrimaryBase());
+ LayoutVirtualBases(RD, Layout);
+ }
+
// Append tail padding if necessary.
- AppendTailPadding(Layout.getSize());
+ AppendTailPadding(Types.getContext().toBits(Layout.getSize()));
return true;
}
@@ -549,129 +728,137 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
assert(RecordSize % 8 == 0 && "Invalid record size!");
- uint64_t RecordSizeInBytes = RecordSize / 8;
- assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+ CharUnits RecordSizeInBytes = CharUnits::fromQuantity(RecordSize / 8);
+ assert(NextFieldOffset <= RecordSizeInBytes && "Size mismatch!");
- uint64_t AlignedNextFieldOffset =
- llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
+ CharUnits AlignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
if (AlignedNextFieldOffset == RecordSizeInBytes) {
// We don't need any padding.
return;
}
- unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+ CharUnits NumPadBytes = RecordSizeInBytes - NextFieldOffset;
AppendBytes(NumPadBytes);
}
-void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
- const llvm::Type *FieldTy) {
- AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
- getTypeAlignment(FieldTy));
+void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
+ const llvm::Type *fieldType) {
+ CharUnits fieldSize =
+ CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
- uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
+ FieldTypes.push_back(fieldType);
- FieldTypes.push_back(FieldTy);
-
- NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
+ NextFieldOffset = fieldOffset + fieldSize;
BitsAvailableInLastField = 0;
}
-void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
- unsigned FieldAlignment) {
- assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
+void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
+ CharUnits fieldAlignment) {
+ assert(NextFieldOffset <= fieldOffset &&
"Incorrect field layout!");
// Round up the field offset to the alignment of the field type.
- uint64_t AlignedNextFieldOffsetInBytes =
- llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+ CharUnits alignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(fieldAlignment);
- if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ if (alignedNextFieldOffset < fieldOffset) {
// Even with alignment, the field offset is not at the right place,
// insert padding.
- uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
+ CharUnits padding = fieldOffset - NextFieldOffset;
- AppendBytes(PaddingInBytes);
+ AppendBytes(padding);
}
}
-void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
- if (NumBytes == 0)
- return;
+const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
+ assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
- if (NumBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumBytes);
+ if (numBytes > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
+
+ return Ty;
+}
+
+void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
+ if (numBytes.isZero())
+ return;
// Append the padding field
- AppendField(NextFieldOffsetInBytes, Ty);
+ AppendField(NextFieldOffset, getByteArrayType(numBytes));
}
-unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
+CharUnits CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
if (Packed)
- return 1;
+ return CharUnits::One();
- return Types.getTargetData().getABITypeAlignment(Ty);
+ return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
}
+CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
+ if (Packed)
+ return CharUnits::One();
+
+ CharUnits maxAlignment = CharUnits::One();
+ for (size_t i = 0; i != FieldTypes.size(); ++i)
+ maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
+
+ return maxAlignment;
+}
+
+/// Merge in whether a field of the given type is zero-initializable.
void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
// This record already contains a member pointer.
- if (!IsZeroInitializable)
+ if (!IsZeroInitializableAsBase)
return;
// Can only have member pointers if we're compiling C++.
if (!Types.getContext().getLangOptions().CPlusPlus)
return;
- T = Types.getContext().getBaseElementType(T);
+ const Type *elementType = T->getBaseElementTypeUnsafe();
- if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
+ if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
if (!Types.getCXXABI().isZeroInitializable(MPT))
- IsZeroInitializable = false;
- } else if (const RecordType *RT = T->getAs<RecordType>()) {
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
+ } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- CheckZeroInitializable(RD);
+ const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+ if (!Layout.isZeroInitializable())
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
}
}
-void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
- // This record already contains a member pointer.
- if (!IsZeroInitializable)
- return;
-
- // FIXME: It would be better if there was a way to explicitly compute the
- // record layout instead of converting to a type.
- Types.ConvertTagDeclType(RD);
-
- const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
-
- if (!Layout.isZeroInitializable())
- IsZeroInitializable = false;
-}
-
CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
CGRecordLayoutBuilder Builder(*this);
Builder.Layout(D);
- const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
- Builder.FieldTypes,
- Builder.Packed);
+ const llvm::StructType *Ty = llvm::StructType::get(getLLVMContext(),
+ Builder.FieldTypes,
+ Builder.Packed);
+
+ // If we're in C++, compute the base subobject type.
+ const llvm::StructType *BaseTy = 0;
+ if (isa<CXXRecordDecl>(D)) {
+ BaseTy = Builder.BaseSubobjectType;
+ if (!BaseTy) BaseTy = Ty;
+ }
CGRecordLayout *RL =
- new CGRecordLayout(Ty, Builder.IsZeroInitializable);
+ new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
+ Builder.IsZeroInitializableAsBase);
- // Add all the non-virtual base field numbers.
- RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
- Builder.LLVMNonVirtualBases.end());
+ RL->NonVirtualBases.swap(Builder.NonVirtualBases);
+ RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
// Add all the field numbers.
- RL->FieldInfo.insert(Builder.LLVMFields.begin(),
- Builder.LLVMFields.end());
+ RL->FieldInfo.swap(Builder.Fields);
// Add bitfield info.
- RL->BitFields.insert(Builder.LLVMBitFields.begin(),
- Builder.LLVMBitFields.end());
+ RL->BitFields.swap(Builder.BitFields);
// Dump the layout, if requested.
if (getContext().getLangOptions().DumpRecordLayouts) {
@@ -684,10 +871,26 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
#ifndef NDEBUG
// Verify that the computed LLVM struct size matches the AST layout size.
- uint64_t TypeSizeInBits = getContext().getASTRecordLayout(D).getSize();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
+
+ uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
"Type size mismatch!");
+ if (BaseTy) {
+ CharUnits NonVirtualSize = Layout.getNonVirtualSize();
+ CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
+ CharUnits AlignedNonVirtualTypeSize =
+ NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
+
+ uint64_t AlignedNonVirtualTypeSizeInBits =
+ getContext().toBits(AlignedNonVirtualTypeSize);
+
+ assert(AlignedNonVirtualTypeSizeInBits ==
+ getTargetData().getTypeAllocSizeInBits(BaseTy) &&
+ "Type size mismatch!");
+ }
+
// Verify that the LLVM and AST field offsets agree.
const llvm::StructType *ST =
dyn_cast<llvm::StructType>(RL->getLLVMType());
@@ -729,7 +932,9 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
void CGRecordLayout::print(llvm::raw_ostream &OS) const {
OS << "<CGRecordLayout\n";
- OS << " LLVMType:" << *LLVMType << "\n";
+ OS << " LLVMType:" << *CompleteObjectType << "\n";
+ if (BaseSubobjectType)
+ OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
OS << " BitFields:[\n";
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
index 16145f7..cd23811 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/TargetInfo.h"
@@ -69,24 +70,54 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
EmitStopPoint(S);
switch (S->getStmtClass()) {
- default:
- // Must be an expression in a stmt context. Emit the value (to get
- // side-effects) and ignore the result.
- if (!isa<Expr>(S))
- ErrorUnsupported(S, "statement");
-
- EmitAnyExpr(cast<Expr>(S), 0, false, true);
-
- // Expression emitters don't handle unreachable blocks yet, so look for one
- // explicitly here. This handles the common case of a call to a noreturn
- // function.
- if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
- if (CurBB->empty() && CurBB->use_empty()) {
- CurBB->eraseFromParent();
- Builder.ClearInsertionPoint();
- }
+ case Stmt::NoStmtClass:
+ case Stmt::CXXCatchStmtClass:
+ llvm_unreachable("invalid statement class to emit generically");
+ case Stmt::NullStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::DeclStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::CaseStmtClass:
+ llvm_unreachable("should have emitted these statements as simple");
+
+#define STMT(Type, Base)
+#define ABSTRACT_STMT(Op)
+#define EXPR(Type, Base) \
+ case Stmt::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ {
+ // Remember the block we came in on.
+ llvm::BasicBlock *incoming = Builder.GetInsertBlock();
+ assert(incoming && "expression emission must have an insertion point");
+
+ EmitIgnoredExpr(cast<Expr>(S));
+
+ llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
+ assert(outgoing && "expression emission cleared block!");
+
+ // The expression emitters assume (reasonably!) that the insertion
+ // point is always set. To maintain that, the call-emission code
+ // for noreturn functions has to enter a new block with no
+ // predecessors. We want to kill that block and mark the current
+ // insertion point unreachable in the common case of a call like
+ // "exit();". Since expression emission doesn't otherwise create
+ // blocks with no predecessors, we can just test for that.
+ // However, we must be careful not to do this to our incoming
+ // block, because *statement* emission does sometimes create
+ // reachable blocks which will have no predecessors until later in
+ // the function. This occurs with, e.g., labels that are not
+ // reachable by fallthrough.
+ if (incoming != outgoing && outgoing->use_empty()) {
+ outgoing->eraseFromParent();
+ Builder.ClearInsertionPoint();
}
break;
+ }
+
case Stmt::IndirectGotoStmtClass:
EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
@@ -146,7 +177,7 @@ bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
/// this captures the expression result of the last sub-statement and returns it
/// (for use by the statement expression extension).
RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
- llvm::Value *AggLoc, bool isAggVol) {
+ AggValueSlot AggSlot) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
"LLVM IR generation of compound statement ('{}')");
@@ -178,13 +209,13 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
// emitting them before we evaluate the subexpr.
const Stmt *LastStmt = S.body_back();
while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
- EmitLabel(*LS);
+ EmitLabel(LS->getDecl());
LastStmt = LS->getSubStmt();
}
EnsureInsertPoint();
- RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
+ RV = EmitAnyExpr(cast<Expr>(LastStmt), AggSlot);
}
return RV;
@@ -246,24 +277,24 @@ void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
}
CodeGenFunction::JumpDest
-CodeGenFunction::getJumpDestForLabel(const LabelStmt *S) {
- JumpDest &Dest = LabelMap[S];
+CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
if (Dest.isValid()) return Dest;
// Create, but don't insert, the new block.
- Dest = JumpDest(createBasicBlock(S->getName()),
+ Dest = JumpDest(createBasicBlock(D->getName()),
EHScopeStack::stable_iterator::invalid(),
NextCleanupDestIndex++);
return Dest;
}
-void CodeGenFunction::EmitLabel(const LabelStmt &S) {
- JumpDest &Dest = LabelMap[&S];
+void CodeGenFunction::EmitLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
// If we didn't need a forward reference to this label, just go
// ahead and create a destination at the current scope.
if (!Dest.isValid()) {
- Dest = getJumpDestInCurrentScope(S.getName());
+ Dest = getJumpDestInCurrentScope(D->getName());
// Otherwise, we need to give this label a target depth and remove
// it from the branch-fixups list.
@@ -281,7 +312,7 @@ void CodeGenFunction::EmitLabel(const LabelStmt &S) {
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
- EmitLabel(S);
+ EmitLabel(S.getDecl());
EmitStmt(S.getSubStmt());
}
@@ -297,10 +328,14 @@ void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ if (const LabelDecl *Target = S.getConstantTarget()) {
+ EmitBranchThroughCleanup(getJumpDestForLabel(Target));
+ return;
+ }
+
// Ensure that we have an i8* for our PHI node.
llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
- llvm::Type::getInt8PtrTy(VMContext),
- "addr");
+ Int8PtrTy, "addr");
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
@@ -320,7 +355,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
@@ -395,7 +430,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
@@ -527,7 +562,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// declaration.
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (S.getConditionVariable()) {
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
}
// If there are any cleanups between here and the loop-exit scope,
@@ -621,11 +656,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// If there is an NRVO flag for this variable, set it to 1 into indicate
// that the cleanup code should not destroy the variable.
- if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) {
- const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
- llvm::Value *One = llvm::ConstantInt::get(BoolTy, 1);
- Builder.CreateStore(One, NRVOFlag);
- }
+ if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
+ Builder.CreateStore(Builder.getTrue(), NRVOFlag);
} else if (!ReturnValue) {
// Make sure not to return anything, but evaluate the expression
// for side effects.
@@ -643,7 +675,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (RV->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(RV, ReturnValue, false);
} else {
- EmitAggExpr(RV, ReturnValue, false);
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, false, true));
}
EmitBranchThroughCleanup(ReturnBlock);
@@ -713,7 +745,8 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
// Range is small enough to add multiple switch instruction cases.
for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
- SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, LHS), CaseDest);
+ SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), LHS),
+ CaseDest);
LHS++;
}
return;
@@ -735,10 +768,10 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
// Emit range check.
llvm::Value *Diff =
Builder.CreateSub(SwitchInsn->getCondition(),
- llvm::ConstantInt::get(VMContext, LHS), "tmp");
+ llvm::ConstantInt::get(getLLVMContext(), LHS), "tmp");
llvm::Value *Cond =
- Builder.CreateICmpULE(Diff,
- llvm::ConstantInt::get(VMContext, Range), "tmp");
+ Builder.CreateICmpULE(Diff, llvm::ConstantInt::get(getLLVMContext(), Range),
+ "inbounds");
Builder.CreateCondBr(Cond, CaseDest, FalseDest);
// Restore the appropriate insertion point.
@@ -757,7 +790,8 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
EmitBlock(createBasicBlock("sw.bb"));
llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
- SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+ SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), CaseVal),
+ CaseDest);
// Recursively emitting the statement is acceptable, but is not wonderful for
// code where we have many case statements nested together, i.e.:
@@ -775,7 +809,8 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
while (NextCase && NextCase->getRHS() == 0) {
CurCase = NextCase;
CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
- SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+ SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), CaseVal),
+ CaseDest);
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
}
@@ -798,7 +833,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
llvm::Value *CondV = EmitScalarExpr(S.getCond());
@@ -861,14 +896,11 @@ static std::string
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
std::string Result;
- std::string tmp;
while (*Constraint) {
switch (*Constraint) {
default:
- tmp = Target.convertConstraint(*Constraint);
- if (Result.find(tmp) == std::string::npos) // Combine unique constraints
- Result += tmp;
+ Result += Target.convertConstraint(*Constraint);
break;
// Ignore these
case '*':
@@ -877,8 +909,8 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
case '=': // Will see this and the following in mult-alt constraints.
case '+':
break;
- case ',': // FIXME - Until the back-end properly supports
- return Result; // multiple alternative constraints, we stop here.
+ case ',':
+ Result += "|";
break;
case 'g':
Result += "imr";
@@ -890,7 +922,7 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
bool result = Target.resolveSymbolicName(Constraint,
&(*OutCons)[0],
OutCons->size(), Index);
- assert(result && "Could not resolve symbolic name"); result=result;
+ assert(result && "Could not resolve symbolic name"); (void)result;
Result += llvm::utostr(Index);
break;
}
@@ -902,6 +934,33 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
return Result;
}
+/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
+/// as using a particular register add that as a constraint that will be used
+/// in this asm stmt.
+static std::string
+AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
+ const TargetInfo &Target, CodeGenModule &CGM,
+ const AsmStmt &Stmt) {
+ const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
+ if (!AsmDeclRef)
+ return Constraint;
+ const ValueDecl &Value = *AsmDeclRef->getDecl();
+ const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
+ if (!Variable)
+ return Constraint;
+ AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return Constraint;
+ llvm::StringRef Register = Attr->getLabel();
+ assert(Target.isValidGCCRegisterName(Register));
+ // FIXME: We should check which registers are compatible with "r" or "x".
+ if (Constraint != "r" && Constraint != "x") {
+ CGM.ErrorUnsupported(&Stmt, "__asm__");
+ return Constraint;
+ }
+ return "{" + Register.str() + "}";
+}
+
llvm::Value*
CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
const TargetInfo::ConstraintInfo &Info,
@@ -915,7 +974,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
const llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
- Ty = llvm::IntegerType::get(VMContext, Size);
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
Ty = llvm::PointerType::getUnqual(Ty);
Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
@@ -946,6 +1005,35 @@ llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
}
+/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
+/// asm call instruction. The !srcloc MDNode contains a list of constant
+/// integers which are the source locations of the start of each line in the
+/// asm.
+static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
+ CodeGenFunction &CGF) {
+ llvm::SmallVector<llvm::Value *, 8> Locs;
+ // Add the location of the first line to the MDNode.
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ Str->getLocStart().getRawEncoding()));
+ llvm::StringRef StrVal = Str->getString();
+ if (!StrVal.empty()) {
+ const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
+ const LangOptions &LangOpts = CGF.CGM.getLangOptions();
+
+ // Add the location of the start of each subsequent line of the asm to the
+ // MDNode.
+ for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
+ if (StrVal[i] != '\n') continue;
+ SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
+ CGF.Target);
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ LineLoc.getRawEncoding()));
+ }
+ }
+
+ return llvm::MDNode::get(CGF.getLLVMContext(), Locs.data(), Locs.size());
+}
+
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Analyze the asm string to decompose it into its pieces. We know that Sema
// has already done this, so it is guaranteed to be successful.
@@ -1010,6 +1098,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const Expr *OutExpr = S.getOutputExpr(i);
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+ OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, Target,
+ CGM, S);
+
LValue Dest = EmitLValue(OutExpr);
if (!Constraints.empty())
Constraints += ',';
@@ -1044,6 +1135,10 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ResultRegTypes.back() = ConvertType(InputTy);
}
}
+ if (const llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ ResultRegTypes.back()))
+ ResultRegTypes.back() = AdjTy;
} else {
ArgTypes.push_back(Dest.getAddress()->getType());
Args.push_back(Dest.getAddress());
@@ -1083,6 +1178,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
&OutputConstraintInfos);
+ InputConstraint =
+ AddVariableConstraints(InputConstraint,
+ *InputExpr->IgnoreParenNoopCasts(getContext()),
+ Target, CGM, S);
+
llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
// If this input argument is tied to a larger output result, extend the
@@ -1107,7 +1207,10 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Arg = Builder.CreateFPExt(Arg, OutputTy);
}
}
-
+ if (const llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
ArgTypes.push_back(Arg->getType());
Args.push_back(Arg);
@@ -1145,11 +1248,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const llvm::Type *ResultType;
if (ResultRegTypes.empty())
- ResultType = llvm::Type::getVoidTy(VMContext);
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
else if (ResultRegTypes.size() == 1)
ResultType = ResultRegTypes[0];
else
- ResultType = llvm::StructType::get(VMContext, ResultRegTypes);
+ ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
const llvm::FunctionType *FTy =
llvm::FunctionType::get(ResultType, ArgTypes, false);
@@ -1162,10 +1265,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Slap the source location of the inline asm into a !srcloc metadata on the
// call.
- unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding();
- llvm::Value *LocIDC =
- llvm::ConstantInt::get(Int32Ty, LocID);
- Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1));
+ Result->setMetadata("srcloc", getAsmSrcLocInfo(S.getAsmString(), *this));
// Extract all of the register value results from the asm.
std::vector<llvm::Value*> RegResults;
@@ -1192,16 +1292,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
- Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext,
- (unsigned)ResSize));
+ Tmp = Builder.CreateTrunc(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
} else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
- Tmp = Builder.CreatePtrToInt(Tmp, llvm::IntegerType::get(VMContext,
- (unsigned)TmpSize));
+ Tmp = Builder.CreatePtrToInt(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
} else if (TruncTy->isIntegerTy()) {
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isVectorTy()) {
+ Tmp = Builder.CreateBitCast(Tmp, TruncTy);
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
index dfb8dc6..3b4c509 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
@@ -16,39 +16,11 @@ using namespace clang;
using namespace CodeGen;
namespace {
- struct DestroyTemporary : EHScopeStack::Cleanup {
- const CXXTemporary *Temporary;
- llvm::Value *Addr;
- llvm::Value *CondPtr;
-
- DestroyTemporary(const CXXTemporary *Temporary, llvm::Value *Addr,
- llvm::Value *CondPtr)
- : Temporary(Temporary), Addr(Addr), CondPtr(CondPtr) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- llvm::BasicBlock *CondEnd = 0;
-
- // If this is a conditional temporary, we need to check the condition
- // boolean and only call the destructor if it's true.
- if (CondPtr) {
- llvm::BasicBlock *CondBlock =
- CGF.createBasicBlock("temp.cond-dtor.call");
- CondEnd = CGF.createBasicBlock("temp.cond-dtor.cont");
-
- llvm::Value *Cond = CGF.Builder.CreateLoad(CondPtr);
- CGF.Builder.CreateCondBr(Cond, CondBlock, CondEnd);
- CGF.EmitBlock(CondBlock);
- }
-
- CGF.EmitCXXDestructorCall(Temporary->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- Addr);
-
- if (CondPtr) {
- // Reset the condition to false.
- CGF.Builder.CreateStore(CGF.Builder.getFalse(), CondPtr);
- CGF.EmitBlock(CondEnd);
- }
+ struct DestroyTemporary {
+ static void Emit(CodeGenFunction &CGF, bool forEH,
+ const CXXDestructorDecl *dtor, llvm::Value *addr) {
+ CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ addr);
}
};
}
@@ -56,37 +28,19 @@ namespace {
/// Emits all the code to cause the given temporary to be cleaned up.
void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
llvm::Value *Ptr) {
- llvm::AllocaInst *CondPtr = 0;
-
- // Check if temporaries need to be conditional. If so, we'll create a
- // condition boolean, initialize it to 0 and
- if (ConditionalBranchLevel != 0) {
- CondPtr = CreateTempAlloca(llvm::Type::getInt1Ty(VMContext), "cond");
-
- // Initialize it to false. This initialization takes place right after
- // the alloca insert point.
- InitTempAlloca(CondPtr, llvm::ConstantInt::getFalse(VMContext));
-
- // Now set it to true.
- Builder.CreateStore(Builder.getTrue(), CondPtr);
- }
-
- EHStack.pushCleanup<DestroyTemporary>(NormalAndEHCleanup,
- Temporary, Ptr, CondPtr);
+ pushFullExprCleanup<DestroyTemporary>(NormalAndEHCleanup,
+ Temporary->getDestructor(),
+ Ptr);
}
RValue
-CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
- llvm::Value *AggLoc,
- bool IsAggLocVolatile,
- bool IsInitializer) {
+CodeGenFunction::EmitExprWithCleanups(const ExprWithCleanups *E,
+ AggValueSlot Slot) {
RunCleanupsScope Scope(*this);
- return EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
- /*IgnoreResult=*/false, IsInitializer);
+ return EmitAnyExpr(E->getSubExpr(), Slot);
}
-LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
- const CXXExprWithTemporaries *E) {
+LValue CodeGenFunction::EmitExprWithCleanupsLValue(const ExprWithCleanups *E) {
RunCleanupsScope Scope(*this);
return EmitLValue(E->getSubExpr());
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
index 56acfc8..78b2dbe 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
@@ -218,7 +218,7 @@ void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
uint64_t BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffset(BaseDecl);
+ Layout.getBaseClassOffsetInBits(BaseDecl);
// Layout the VTT for this base.
LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
@@ -262,14 +262,15 @@ VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
if (!VBases.insert(BaseDecl))
continue;
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
BaseDeclIsMorallyVirtual = true;
} else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffset =
+ Base.getBaseOffset() + Layout.getBaseClassOffsetInBits(BaseDecl);
- if (!Layout.getPrimaryBaseWasVirtual() &&
+ if (!Layout.isPrimaryBaseVirtual() &&
Layout.getPrimaryBase() == BaseDecl)
BaseDeclIsNonVirtualPrimaryBase = true;
}
@@ -316,7 +317,7 @@ void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
continue;
uint64_t BaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
}
@@ -365,57 +366,52 @@ void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
}
-llvm::GlobalVariable *
-CodeGenVTables::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
- bool GenerateDefinition,
- const CXXRecordDecl *RD) {
- // Only classes that have virtual bases need a VTT.
- if (RD->getNumVBases() == 0)
- return 0;
+void
+CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/true);
- llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, OutName);
- llvm::StringRef Name = OutName.str();
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ const llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+
+ llvm::Constant *Init =
+ llvm::ConstantArray::get(ArrayType, Builder.getVTTComponents().data(),
+ Builder.getVTTComponents().size());
+
+ VTT->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTT->setLinkage(Linkage);
- D1(printf("vtt %s\n", RD->getNameAsCString()));
+ // Set the right visibility.
+ CGM.setTypeVisibility(VTT, RD, CodeGenModule::TVK_ForVTT);
+}
- llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
- if (GV == 0 || GV->isDeclaration()) {
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
+ assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT");
- VTTBuilder Builder(CGM, RD, GenerateDefinition);
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, Out);
+ Out.flush();
+ llvm::StringRef Name = OutName.str();
- const llvm::ArrayType *Type =
- llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
- llvm::Constant *Init = 0;
- if (GenerateDefinition)
- Init = llvm::ConstantArray::get(Type, Builder.getVTTComponents().data(),
- Builder.getVTTComponents().size());
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ const llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
- llvm::GlobalVariable *OldGV = GV;
- GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true,
- Linkage, Init, Name);
- CGM.setGlobalVisibility(GV, RD);
-
- if (OldGV) {
- GV->takeName(OldGV);
- llvm::Constant *NewPtr =
- llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
- OldGV->replaceAllUsesWith(NewPtr);
- OldGV->eraseFromParent();
- }
- }
-
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+ GV->setUnnamedAddr(true);
return GV;
}
-llvm::GlobalVariable *CodeGenVTables::getVTT(const CXXRecordDecl *RD) {
- return GenerateVTT(llvm::GlobalValue::ExternalLinkage,
- /*GenerateDefinition=*/false, RD);
-}
-
bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
index bed4670..891697f 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -240,7 +240,7 @@ static BaseOffset ComputeBaseOffset(ASTContext &Context,
const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
- NonVirtualOffset += Layout.getBaseClassOffset(Base);
+ NonVirtualOffset += Layout.getBaseClassOffsetInBits(Base);
}
// FIXME: This should probably use CharUnits or something. Maybe we should
@@ -358,12 +358,12 @@ FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl);
} else {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- uint64_t Offset = Layout.getBaseClassOffset(BaseDecl);
+ uint64_t Offset = Layout.getBaseClassOffsetInBits(BaseDecl);
BaseOffset = Base.getBaseOffset() + Offset;
BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
@@ -396,9 +396,9 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base,
continue;
}
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
} else {
- BaseOffset = Layout.getBaseClassOffset(BaseDecl) +
+ BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl) +
Base.getBaseOffset();
}
@@ -793,22 +793,22 @@ VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
// (Since we're emitting the vcall and vbase offsets in reverse order, we'll
// emit them for the primary base first).
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
- bool PrimaryBaseIsVirtual = Layout.getPrimaryBaseWasVirtual();
+ bool PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
uint64_t PrimaryBaseOffset;
// Get the base offset of the primary base.
if (PrimaryBaseIsVirtual) {
- assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary vbase should have a zero offset!");
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
PrimaryBaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ MostDerivedClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
} else {
- assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should have a zero offset!");
PrimaryBaseOffset = Base.getBaseOffset();
@@ -849,9 +849,9 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
// Handle the primary base first.
// We only want to add vcall offsets if the base is non-virtual; a virtual
// primary base will have its vcall and vbase offsets emitted already.
- if (PrimaryBase && !Layout.getPrimaryBaseWasVirtual()) {
+ if (PrimaryBase && !Layout.isPrimaryBaseVirtual()) {
// Get the base offset of the primary base.
- assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should have a zero offset!");
AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
@@ -903,7 +903,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
// Get the base offset of this base.
uint64_t BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffset(BaseDecl);
+ Layout.getBaseClassOffsetInBits(BaseDecl);
AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset), VBaseOffset);
}
@@ -924,7 +924,7 @@ void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
// FIXME: We shouldn't use / 8 here.
int64_t Offset =
- (int64_t)(LayoutClassLayout.getVBaseClassOffset(BaseDecl) -
+ (int64_t)(LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl) -
OffsetInLayoutClass) / 8;
// Add the vbase offset offset.
@@ -1372,7 +1372,7 @@ VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
/// Get the virtual base offset, relative to the most derived class
/// layout.
OffsetToBaseSubobject +=
- LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
+ LayoutClassLayout.getVBaseClassOffsetInBits(Offset.VirtualBase);
} else {
// Otherwise, the non-virtual offset is relative to the derived class
// offset.
@@ -1520,8 +1520,8 @@ VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
if (!PrimaryBase)
break;
- if (Layout.getPrimaryBaseWasVirtual()) {
- assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should always be at offset 0!");
const ASTRecordLayout &LayoutClassLayout =
@@ -1529,13 +1529,13 @@ VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
// Now check if this is the primary base that is not a primary base in the
// most derived class.
- if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ if (LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase) !=
FirstBaseOffsetInLayoutClass) {
// We found it, stop walking the chain.
break;
}
} else {
- assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should always be at offset 0!");
}
@@ -1586,23 +1586,23 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
uint64_t PrimaryBaseOffset;
uint64_t PrimaryBaseOffsetInLayoutClass;
- if (Layout.getPrimaryBaseWasVirtual()) {
- assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary vbase should have a zero offset!");
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
PrimaryBaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ MostDerivedClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
PrimaryBaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+ LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
} else {
- assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
"Primary base should have a zero offset!");
PrimaryBaseOffset = Base.getBaseOffset();
@@ -1664,9 +1664,18 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
if (ThisAdjustment.VCallOffsetOffset &&
Overrider.Method->getParent() == MostDerivedClass) {
+
+ // There's no return adjustment from OverriddenMD and MD,
+ // but that doesn't mean there isn't one between MD and
+ // the final overrider.
+ BaseOffset ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
// This is a virtual thunk for the most derived class, add it.
AddThunk(Overrider.Method,
- ThunkInfo(ThisAdjustment, ReturnAdjustment()));
+ ThunkInfo(ThisAdjustment, ReturnAdjustment));
}
}
@@ -1779,13 +1788,13 @@ VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
if (!PrimaryBase)
break;
- if (Layout.getPrimaryBaseWasVirtual()) {
+ if (Layout.isPrimaryBaseVirtual()) {
// Check if this virtual primary base is a primary base in the layout
// class. If it's not, we don't want to add it.
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ if (LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase) !=
OffsetInLayoutClass) {
// We don't want to add this class (or any of its primary bases).
break;
@@ -1835,7 +1844,7 @@ void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
}
// Get the base offset of this base.
- uint64_t RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ uint64_t RelativeBaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
uint64_t BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
uint64_t BaseOffsetInLayoutClass = OffsetInLayoutClass + RelativeBaseOffset;
@@ -1866,7 +1875,7 @@ VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
// Check if it's virtual.
- if (Layout.getPrimaryBaseWasVirtual()) {
+ if (Layout.isPrimaryBaseVirtual()) {
bool IsPrimaryVirtualBase = true;
if (isBuildingConstructorVTable()) {
@@ -1876,7 +1885,7 @@ VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
Context.getASTRecordLayout(LayoutClass);
uint64_t PrimaryBaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+ LayoutClassLayout.getVBaseClassOffsetInBits(PrimaryBase);
// We know that the base is not a primary base in the layout class if
// the base offsets are different.
@@ -1904,10 +1913,11 @@ VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
- BaseOffsetInLayoutClass = LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl);
} else {
BaseOffsetInLayoutClass =
- OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
+ OffsetInLayoutClass + Layout.getBaseClassOffsetInBits(BaseDecl);
}
DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
@@ -1933,12 +1943,12 @@ VTableBuilder::LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
uint64_t BaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ MostDerivedClassLayout.getVBaseClassOffsetInBits(BaseDecl);
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
uint64_t BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ LayoutClassLayout.getVBaseClassOffsetInBits(BaseDecl);
LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
/*BaseIsMorallyVirtual=*/true,
@@ -2230,6 +2240,19 @@ void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
}
+static void
+CollectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
+ VTableBuilder::PrimaryBasesSetVectorTy &PrimaryBases) {
+ while (RD) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ if (PrimaryBase)
+ PrimaryBases.insert(PrimaryBase);
+
+ RD = PrimaryBase;
+ }
+}
+
void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
// Itanium C++ ABI 2.5.2:
@@ -2258,10 +2281,7 @@ void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
// Collect all the primary bases, so we can check whether methods override
// a method from the base.
VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
- for (ASTRecordLayout::primary_base_info_iterator
- I = Layout.primary_base_begin(), E = Layout.primary_base_end();
- I != E; ++I)
- PrimaryBases.insert((*I).getBase());
+ CollectPrimaryBases(RD, CGM.getContext(), PrimaryBases);
const CXXDestructorDecl *ImplicitVirtualDtor = 0;
@@ -2336,6 +2356,33 @@ void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
NumVirtualFunctionPointers[RD] = CurrentIndex;
}
+bool CodeGenVTables::ShouldEmitVTableInThisTU(const CXXRecordDecl *RD) {
+ assert(RD->isDynamicClass() && "Non dynamic classes have no VTable.");
+
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ return false;
+
+ const CXXMethodDecl *KeyFunction = CGM.getContext().getKeyFunction(RD);
+ if (!KeyFunction)
+ return true;
+
+ // Itanium C++ ABI, 5.2.6 Instantiated Templates:
+ // An instantiation of a class template requires:
+ // - In the object where instantiated, the virtual table...
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return true;
+
+ // If we're building with optimization, we always emit VTables since that
+ // allows for virtual function calls to be devirtualized.
+ // (We don't want to do this in -fapple-kext mode however).
+ if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOptions().AppleKext)
+ return true;
+
+ return KeyFunction->hasBody();
+}
+
uint64_t CodeGenVTables::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
NumVirtualFunctionPointers.find(RD);
@@ -2409,14 +2456,16 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
// Compute the mangled name.
llvm::SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
- Thunk.This, Name);
+ Thunk.This, Out);
else
- getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Name);
-
+ getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
+ Out.flush();
+
const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
- return GetOrCreateLLVMFunction(Name, Ty, GD);
+ return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true);
}
static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
@@ -2563,7 +2612,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
const llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(GD),
FPT->isVariadic());
- llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
const CGFunctionInfo &FnInfo =
CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
@@ -2621,7 +2670,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
}
if (!ResultType->isVoidType() && Slot.isNull())
- CGM.getCXXABI().EmitReturnFromThunk(CGF, RV, ResultType);
+ CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
FinishFunction();
@@ -2632,7 +2681,8 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
setThunkVisibility(CGM, MD, Thunk, Fn);
}
-void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
+void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool UseAvailableExternallyLinkage)
{
llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
@@ -2667,9 +2717,42 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
OldThunkFn->eraseFromParent();
}
- // Actually generate the thunk body.
llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
+
+ if (!ThunkFn->isDeclaration()) {
+ if (UseAvailableExternallyLinkage) {
+ // There is already a thunk emitted for this function, do nothing.
+ return;
+ }
+
+ // If a function has a body, it should have available_externally linkage.
+ assert(ThunkFn->hasAvailableExternallyLinkage() &&
+ "Function should have available_externally linkage!");
+
+ // Change the linkage.
+ CGM.setFunctionLinkage(cast<CXXMethodDecl>(GD.getDecl()), ThunkFn);
+ return;
+ }
+
+ // Actually generate the thunk body.
CodeGenFunction(CGM).GenerateThunk(ThunkFn, GD, Thunk);
+
+ if (UseAvailableExternallyLinkage)
+ ThunkFn->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
+}
+
+void CodeGenVTables::MaybeEmitThunkAvailableExternally(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ // We only want to do this when building with optimizations.
+ if (!CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // We can't emit thunks for member functions with incomplete types.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ if (CGM.getTypes().VerifyFuncTypeComplete(MD->getType().getTypePtr()))
+ return;
+
+ EmitThunk(GD, Thunk, /*UseAvailableExternallyLinkage=*/true);
}
void CodeGenVTables::EmitThunks(GlobalDecl GD)
@@ -2694,7 +2777,7 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
const ThunkInfoVectorTy &ThunkInfoVector = I->second;
for (unsigned I = 0, E = ThunkInfoVector.size(); I != E; ++I)
- EmitThunk(GD, ThunkInfoVector[I]);
+ EmitThunk(GD, ThunkInfoVector[I], /*UseAvailableExternallyLinkage=*/false);
}
void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
@@ -2703,9 +2786,7 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
// We may need to generate a definition for this vtable.
if (RequireVTable && !Entry.getInt()) {
- if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) &&
- RD->getTemplateSpecializationKind()
- != TSK_ExplicitInstantiationDeclaration)
+ if (ShouldEmitVTableInThisTU(RD))
CGM.DeferredVTables.push_back(RD);
Entry.setInt(true);
@@ -2719,7 +2800,14 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
// Add the VTable layout.
uint64_t NumVTableComponents = Builder.getNumVTableComponents();
+ // -fapple-kext adds an extra entry at end of vtbl.
+ bool IsAppleKext = CGM.getContext().getLangOptions().AppleKext;
+ if (IsAppleKext)
+ NumVTableComponents += 1;
+
uint64_t *LayoutData = new uint64_t[NumVTableComponents + 1];
+ if (IsAppleKext)
+ LayoutData[NumVTableComponents] = 0;
Entry.setPointer(LayoutData);
// Store the number of components.
@@ -2861,12 +2949,13 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
Init = CGM.GetAddrOfThunk(GD, Thunk);
-
+ MaybeEmitThunkAvailableExternally(GD, Thunk);
+
NextVTableThunkIndex++;
} else {
const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
- Init = CGM.GetAddrOfFunction(GD, Ty);
+ Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
}
Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
@@ -2886,52 +2975,11 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
return llvm::ConstantArray::get(ArrayType, Inits.data(), Inits.size());
}
-/// GetGlobalVariable - Will return a global variable of the given type.
-/// If a variable with a different type already exists then a new variable
-/// with the right type will be created.
-/// FIXME: We should move this to CodeGenModule and rename it to something
-/// better and then use it in CGVTT and CGRTTI.
-static llvm::GlobalVariable *
-GetGlobalVariable(llvm::Module &Module, llvm::StringRef Name,
- const llvm::Type *Ty,
- llvm::GlobalValue::LinkageTypes Linkage) {
-
- llvm::GlobalVariable *GV = Module.getNamedGlobal(Name);
- llvm::GlobalVariable *OldGV = 0;
-
- if (GV) {
- // Check if the variable has the right type.
- if (GV->getType()->getElementType() == Ty)
- return GV;
-
- assert(GV->isDeclaration() && "Declaration has wrong type!");
-
- OldGV = GV;
- }
-
- // Create a new variable.
- GV = new llvm::GlobalVariable(Module, Ty, /*isConstant=*/true,
- Linkage, 0, Name);
-
- if (OldGV) {
- // Replace occurrences of the old variable if needed.
- GV->takeName(OldGV);
-
- if (!OldGV->use_empty()) {
- llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
- OldGV->replaceAllUsesWith(NewPtrForOldDecl);
- }
-
- OldGV->eraseFromParent();
- }
-
- return GV;
-}
-
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
llvm::SmallString<256> OutName;
- CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, OutName);
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out);
+ Out.flush();
llvm::StringRef Name = OutName.str();
ComputeVTableRelatedInformation(RD, true);
@@ -2940,8 +2988,11 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, getNumVTableComponents(RD));
- return GetGlobalVariable(CGM.getModule(), Name, ArrayType,
- llvm::GlobalValue::ExternalLinkage);
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+ GV->setUnnamedAddr(true);
+ return GV;
}
void
@@ -2970,7 +3021,7 @@ CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
VTable->setLinkage(Linkage);
// Set the right visibility.
- CGM.setTypeVisibility(VTable, RD, /*ForRTTI*/ false);
+ CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForVTable);
}
llvm::GlobalVariable *
@@ -2991,8 +3042,10 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Get the mangled construction vtable name.
llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().
- mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8, Base.getBase(), OutName);
+ mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8, Base.getBase(), Out);
+ Out.flush();
llvm::StringRef Name = OutName.str();
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
@@ -3001,8 +3054,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
- GetGlobalVariable(CGM.getModule(), Name, ArrayType,
- llvm::GlobalValue::InternalLinkage);
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::InternalLinkage);
// Add the thunks.
VTableThunksTy VTableThunks;
@@ -3034,7 +3087,10 @@ CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
VTable = GetAddrOfVTable(RD);
EmitVTableDefinition(VTable, Linkage, RD);
- GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);
+ if (RD->getNumVBases()) {
+ llvm::GlobalVariable *VTT = GetAddrOfVTT(RD);
+ EmitVTTDefinition(VTT, Linkage, RD);
+ }
// If this is the magic class __cxxabiv1::__fundamental_type_info,
// we will emit the typeinfo for the fundamental types. This is the
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
index abcafd6..7c119fa 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/GlobalVariable.h"
+#include "clang/Basic/ABI.h"
#include "GlobalDecl.h"
namespace clang {
@@ -24,94 +25,6 @@ namespace clang {
namespace CodeGen {
class CodeGenModule;
-/// ReturnAdjustment - A return adjustment.
-struct ReturnAdjustment {
- /// NonVirtual - The non-virtual adjustment from the derived object to its
- /// nearest virtual base.
- int64_t NonVirtual;
-
- /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
- /// of the virtual base class offset.
- int64_t VBaseOffsetOffset;
-
- ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { }
-
- bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; }
-
- friend bool operator==(const ReturnAdjustment &LHS,
- const ReturnAdjustment &RHS) {
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset;
- }
-
- friend bool operator<(const ReturnAdjustment &LHS,
- const ReturnAdjustment &RHS) {
- if (LHS.NonVirtual < RHS.NonVirtual)
- return true;
-
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VBaseOffsetOffset < RHS.VBaseOffsetOffset;
- }
-};
-
-/// ThisAdjustment - A 'this' pointer adjustment.
-struct ThisAdjustment {
- /// NonVirtual - The non-virtual adjustment from the derived object to its
- /// nearest virtual base.
- int64_t NonVirtual;
-
- /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
- /// of the virtual call offset.
- int64_t VCallOffsetOffset;
-
- ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { }
-
- bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
-
- friend bool operator==(const ThisAdjustment &LHS,
- const ThisAdjustment &RHS) {
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VCallOffsetOffset == RHS.VCallOffsetOffset;
- }
-
- friend bool operator<(const ThisAdjustment &LHS,
- const ThisAdjustment &RHS) {
- if (LHS.NonVirtual < RHS.NonVirtual)
- return true;
-
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VCallOffsetOffset < RHS.VCallOffsetOffset;
- }
-};
-
-/// ThunkInfo - The 'this' pointer adjustment as well as an optional return
-/// adjustment for a thunk.
-struct ThunkInfo {
- /// This - The 'this' pointer adjustment.
- ThisAdjustment This;
-
- /// Return - The return adjustment.
- ReturnAdjustment Return;
-
- ThunkInfo() { }
-
- ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return)
- : This(This), Return(Return) { }
-
- friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
- return LHS.This == RHS.This && LHS.Return == RHS.Return;
- }
-
- friend bool operator<(const ThunkInfo &LHS, const ThunkInfo &RHS) {
- if (LHS.This < RHS.This)
- return true;
-
- return LHS.This == RHS.This && LHS.Return < RHS.Return;
- }
-
- bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); }
-};
-
// BaseSubobject - Uniquely identifies a direct or indirect base class.
// Stores both the base class decl and the offset from the most derived class to
// the base class.
@@ -269,13 +182,16 @@ class CodeGenVTables {
void ComputeMethodVTableIndices(const CXXRecordDecl *RD);
- llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
- bool GenerateDefinition,
- const CXXRecordDecl *RD);
-
/// EmitThunk - Emit a single thunk.
- void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk);
-
+ void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool UseAvailableExternallyLinkage);
+
+ /// MaybeEmitThunkAvailableExternally - Try to emit the given thunk with
+ /// available_externally linkage to allow for inlining of thunks.
+ /// This will be done iff optimizations are enabled and the member function
+ /// doesn't contain any incomplete types.
+ void MaybeEmitThunkAvailableExternally(GlobalDecl GD, const ThunkInfo &Thunk);
+
/// ComputeVTableRelatedInformation - Compute and store all vtable related
/// information (vtable layout, vbase offset offsets, thunks etc) for the
/// given record decl.
@@ -295,14 +211,9 @@ public:
CodeGenVTables(CodeGenModule &CGM)
: CGM(CGM) { }
- // isKeyFunctionInAnotherTU - True if this record has a key function and it is
- // in another translation unit.
- static bool isKeyFunctionInAnotherTU(ASTContext &Context,
- const CXXRecordDecl *RD) {
- assert (RD->isDynamicClass() && "Non dynamic classes have no key.");
- const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
- return KeyFunction && !KeyFunction->hasBody();
- }
+ /// \brief True if the VTable of this record must be emitted in the
+ /// translation unit.
+ bool ShouldEmitVTableInThisTU(const CXXRecordDecl *RD);
/// needsVTTParameter - Return whether the given global decl needs a VTT
/// parameter, which it does if it's a base constructor or destructor with
@@ -349,8 +260,15 @@ public:
GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base,
bool BaseIsVirtual,
VTableAddressPointsMapTy& AddressPoints);
-
- llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD);
+
+
+ /// GetAddrOfVTable - Get the address of the VTT for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTT(const CXXRecordDecl *RD);
+
+ /// EmitVTTDefinition - Emit the definition of the given vtable.
+ void EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
/// EmitThunks - Emit the associated thunks for the given global decl.
void EmitThunks(GlobalDecl GD);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
index f57ecd2..7f77d55 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
@@ -25,7 +25,6 @@ namespace llvm {
namespace clang {
class ObjCPropertyRefExpr;
- class ObjCImplicitSetterGetterRefExpr;
namespace CodeGen {
class CGBitFieldInfo;
@@ -109,10 +108,8 @@ class LValue {
VectorElt, // This is a vector element l-value (V[i]), use getVector*
BitField, // This is a bitfield l-value, use getBitfield*.
ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
- PropertyRef, // This is an Objective-C property reference, use
+ PropertyRef // This is an Objective-C property reference, use
// getPropertyRefExpr
- KVCRef // This is an objective-c 'implicit' property ref,
- // use getKVCRefExpr
} LVType;
llvm::Value *V;
@@ -129,9 +126,6 @@ class LValue {
// Obj-C property reference expression
const ObjCPropertyRefExpr *PropertyRefExpr;
-
- // ObjC 'implicit' property reference expression
- const ObjCImplicitSetterGetterRefExpr *KVCRefExpr;
};
// 'const' is unused here
@@ -157,8 +151,13 @@ class LValue {
bool ThreadLocalRef : 1;
Expr *BaseIvarExp;
+
+ /// TBAAInfo - TBAA information to attach to dereferences of this LValue.
+ llvm::MDNode *TBAAInfo;
+
private:
- void Initialize(Qualifiers Quals, unsigned Alignment = 0) {
+ void Initialize(Qualifiers Quals, unsigned Alignment = 0,
+ llvm::MDNode *TBAAInfo = 0) {
this->Quals = Quals;
this->Alignment = Alignment;
assert(this->Alignment == Alignment && "Alignment exceeds allowed max!");
@@ -167,6 +166,7 @@ private:
this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
this->ThreadLocalRef = false;
this->BaseIvarExp = 0;
+ this->TBAAInfo = TBAAInfo;
}
public:
@@ -175,7 +175,6 @@ public:
bool isBitField() const { return LVType == BitField; }
bool isExtVectorElt() const { return LVType == ExtVectorElt; }
bool isPropertyRef() const { return LVType == PropertyRef; }
- bool isKVCRef() const { return LVType == KVCRef; }
bool isVolatileQualified() const { return Quals.hasVolatile(); }
bool isRestrictQualified() const { return Quals.hasRestrict(); }
@@ -208,6 +207,9 @@ public:
Expr *getBaseIvarExp() const { return BaseIvarExp; }
void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
+ llvm::MDNode *getTBAAInfo() const { return TBAAInfo; }
+ void setTBAAInfo(llvm::MDNode *N) { TBAAInfo = N; }
+
const Qualifiers &getQuals() const { return Quals; }
Qualifiers &getQuals() { return Quals; }
@@ -240,26 +242,25 @@ public:
}
// property ref lvalue
+ llvm::Value *getPropertyRefBaseAddr() const {
+ assert(isPropertyRef());
+ return V;
+ }
const ObjCPropertyRefExpr *getPropertyRefExpr() const {
assert(isPropertyRef());
return PropertyRefExpr;
}
- // 'implicit' property ref lvalue
- const ObjCImplicitSetterGetterRefExpr *getKVCRefExpr() const {
- assert(isKVCRef());
- return KVCRefExpr;
- }
-
static LValue MakeAddr(llvm::Value *V, QualType T, unsigned Alignment,
- ASTContext &Context) {
- Qualifiers Quals = Context.getCanonicalType(T).getQualifiers();
+ ASTContext &Context,
+ llvm::MDNode *TBAAInfo = 0) {
+ Qualifiers Quals = T.getQualifiers();
Quals.setObjCGCAttr(Context.getObjCGCAttrKind(T));
LValue R;
R.LVType = Simple;
R.V = V;
- R.Initialize(Quals, Alignment);
+ R.Initialize(Quals, Alignment, TBAAInfo);
return R;
}
@@ -303,21 +304,98 @@ public:
// the lvalue. However, this complicates the code a bit, and I haven't figured
// out how to make it go wrong yet.
static LValue MakePropertyRef(const ObjCPropertyRefExpr *E,
- unsigned CVR) {
+ llvm::Value *Base) {
LValue R;
R.LVType = PropertyRef;
+ R.V = Base;
R.PropertyRefExpr = E;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(Qualifiers());
return R;
}
+};
- static LValue MakeKVCRef(const ObjCImplicitSetterGetterRefExpr *E,
- unsigned CVR) {
- LValue R;
- R.LVType = KVCRef;
- R.KVCRefExpr = E;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
- return R;
+/// An aggregate value slot.
+class AggValueSlot {
+ /// The address.
+ llvm::Value *Addr;
+
+ // Associated flags.
+ bool VolatileFlag : 1;
+ bool LifetimeFlag : 1;
+ bool RequiresGCollection : 1;
+
+ /// IsZeroed - This is set to true if the destination is known to be zero
+ /// before the assignment into it. This means that zero fields don't need to
+ /// be set.
+ bool IsZeroed : 1;
+
+public:
+ /// ignored - Returns an aggregate value slot indicating that the
+ /// aggregate value is being ignored.
+ static AggValueSlot ignored() {
+ AggValueSlot AV;
+ AV.Addr = 0;
+ AV.VolatileFlag = AV.LifetimeFlag = AV.RequiresGCollection = AV.IsZeroed =0;
+ return AV;
+ }
+
+ /// forAddr - Make a slot for an aggregate value.
+ ///
+ /// \param Volatile - true if the slot should be volatile-initialized
+ /// \param LifetimeExternallyManaged - true if the slot's lifetime
+ /// is being externally managed; false if a destructor should be
+ /// registered for any temporaries evaluated into the slot
+ /// \param RequiresGCollection - true if the slot is located
+ /// somewhere that ObjC GC calls should be emitted for
+ static AggValueSlot forAddr(llvm::Value *Addr, bool Volatile,
+ bool LifetimeExternallyManaged,
+ bool RequiresGCollection = false,
+ bool IsZeroed = false) {
+ AggValueSlot AV;
+ AV.Addr = Addr;
+ AV.VolatileFlag = Volatile;
+ AV.LifetimeFlag = LifetimeExternallyManaged;
+ AV.RequiresGCollection = RequiresGCollection;
+ AV.IsZeroed = IsZeroed;
+ return AV;
+ }
+
+ static AggValueSlot forLValue(LValue LV, bool LifetimeExternallyManaged,
+ bool RequiresGCollection = false) {
+ return forAddr(LV.getAddress(), LV.isVolatileQualified(),
+ LifetimeExternallyManaged, RequiresGCollection);
+ }
+
+ bool isLifetimeExternallyManaged() const {
+ return LifetimeFlag;
+ }
+ void setLifetimeExternallyManaged(bool Managed = true) {
+ LifetimeFlag = Managed;
+ }
+
+ bool isVolatile() const {
+ return VolatileFlag;
+ }
+
+ bool requiresGCollection() const {
+ return RequiresGCollection;
+ }
+
+ llvm::Value *getAddr() const {
+ return Addr;
+ }
+
+ bool isIgnored() const {
+ return Addr == 0;
+ }
+
+ RValue asRValue() const {
+ return RValue::getAggregate(getAddr(), isVolatile());
+ }
+
+ void setZeroed(bool V = true) { IsZeroed = V; }
+ bool isZeroed() const {
+ return IsZeroed;
}
};
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
index 51c55a1..a24bbc4 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
@@ -28,7 +28,7 @@
using namespace clang;
using namespace llvm;
-namespace {
+namespace clang {
class BackendConsumer : public ASTConsumer {
Diagnostic &Diags;
BackendAction Action;
@@ -121,10 +121,10 @@ namespace {
// Install an inline asm handler so that diagnostics get printed through
// our diagnostics hooks.
LLVMContext &Ctx = TheModule->getContext();
- void *OldHandler = Ctx.getInlineAsmDiagnosticHandler();
+ LLVMContext::InlineAsmDiagHandlerTy OldHandler =
+ Ctx.getInlineAsmDiagnosticHandler();
void *OldContext = Ctx.getInlineAsmDiagnosticContext();
- Ctx.setInlineAsmDiagnosticHandler((void*)(intptr_t)InlineAsmDiagHandler,
- this);
+ Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this);
EmitBackendOutput(Diags, CodeGenOpts, TargetOpts,
TheModule.get(), Action, AsmOutStream);
@@ -209,8 +209,7 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
// issue as being an error in the source with a note showing the instantiated
// code.
if (LocCookie.isValid()) {
- Diags.Report(FullSourceLoc(LocCookie, Context->getSourceManager()),
- diag::err_fe_inline_asm).AddString(Message);
+ Diags.Report(LocCookie, diag::err_fe_inline_asm).AddString(Message);
if (D.getLoc().isValid())
Diags.Report(Loc, diag::note_fe_inline_asm_here);
@@ -225,9 +224,15 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
//
-CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {}
+CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
+ : Act(_Act), VMContext(_VMContext ? _VMContext : new LLVMContext),
+ OwnsVMContext(!_VMContext) {}
-CodeGenAction::~CodeGenAction() {}
+CodeGenAction::~CodeGenAction() {
+ TheModule.reset();
+ if (OwnsVMContext)
+ delete VMContext;
+}
bool CodeGenAction::hasIRSupport() const { return true; }
@@ -237,16 +242,18 @@ void CodeGenAction::EndSourceFileAction() {
return;
// Steal the module from the consumer.
- BackendConsumer *Consumer = static_cast<BackendConsumer*>(
- &getCompilerInstance().getASTConsumer());
-
- TheModule.reset(Consumer->takeModule());
+ TheModule.reset(BEConsumer->takeModule());
}
llvm::Module *CodeGenAction::takeModule() {
return TheModule.take();
}
+llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
+ OwnsVMContext = false;
+ return VMContext;
+}
+
static raw_ostream *GetOutputStream(CompilerInstance &CI,
llvm::StringRef InFile,
BackendAction Action) {
@@ -275,10 +282,12 @@ ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
if (BA != Backend_EmitNothing && !OS)
return 0;
- return new BackendConsumer(BA, CI.getDiagnostics(),
- CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
- CI.getLLVMContext());
+ BEConsumer =
+ new BackendConsumer(BA, CI.getDiagnostics(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
+ *VMContext);
+ return BEConsumer;
}
void CodeGenAction::ExecuteAction() {
@@ -303,7 +312,7 @@ void CodeGenAction::ExecuteAction() {
getCurrentFile().c_str());
llvm::SMDiagnostic Err;
- TheModule.reset(ParseIR(MainFileCopy, Err, CI.getLLVMContext()));
+ TheModule.reset(ParseIR(MainFileCopy, Err, *VMContext));
if (!TheModule) {
// Translate from the diagnostic info to the SourceManager location.
SourceLocation Loc = SM.getLocation(
@@ -318,7 +327,7 @@ void CodeGenAction::ExecuteAction() {
unsigned DiagID = CI.getDiagnostics().getCustomDiagID(Diagnostic::Error,
Msg);
- CI.getDiagnostics().Report(FullSourceLoc(Loc, SM), DiagID);
+ CI.getDiagnostics().Report(Loc, DiagID);
return;
}
@@ -334,15 +343,20 @@ void CodeGenAction::ExecuteAction() {
//
-EmitAssemblyAction::EmitAssemblyAction()
- : CodeGenAction(Backend_EmitAssembly) {}
+EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitAssembly, _VMContext) {}
-EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {}
+EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitBC, _VMContext) {}
-EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {}
+EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitLL, _VMContext) {}
-EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {}
+EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitNothing, _VMContext) {}
-EmitCodeGenOnlyAction::EmitCodeGenOnlyAction() : CodeGenAction(Backend_EmitMCNull) {}
+EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitMCNull, _VMContext) {}
-EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {}
+EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitObj, _VMContext) {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
index 51d084e..f1b7286 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -29,25 +29,17 @@ using namespace clang;
using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
- : BlockFunction(cgm, *this, Builder), CGM(cgm),
- Target(CGM.getContext().Target),
- Builder(cgm.getModule().getContext()),
+ : CodeGenTypeCache(cgm), CGM(cgm),
+ Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
+ BlockInfo(0), BlockPointer(0),
NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
SwitchInsn(0), CaseRangeBlock(0),
DidCallStackSave(false), UnreachableBlock(0),
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
- ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
+ OutermostConditional(0), TerminateLandingPad(0), TerminateHandler(0),
TrapBB(0) {
-
- // Get some frequently used types.
- LLVMPointerWidth = Target.getPointerWidth(0);
- llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
- IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
- Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
- Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
-
- Exceptions = getContext().getLangOptions().Exceptions;
+
CatchUndefined = getContext().getLangOptions().CatchUndefined;
CGM.getCXXABI().getMangleContext().startNewFunction();
}
@@ -125,7 +117,8 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
// Emit function epilog (to return).
EmitReturnBlock();
- EmitFunctionInstrumentation("__cyg_profile_func_exit");
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_exit");
// Emit debug descriptor for function end.
if (CGDebugInfo *DI = getDebugInfo()) {
@@ -184,20 +177,16 @@ bool CodeGenFunction::ShouldInstrumentFunction() {
/// instrumentation function with the current function and the call site, if
/// function instrumentation is enabled.
void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
- if (!ShouldInstrumentFunction())
- return;
-
const llvm::PointerType *PointerTy;
const llvm::FunctionType *FunctionTy;
std::vector<const llvm::Type*> ProfileFuncArgs;
// void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
- PointerTy = llvm::Type::getInt8PtrTy(VMContext);
+ PointerTy = Int8PtrTy;
ProfileFuncArgs.push_back(PointerTy);
ProfileFuncArgs.push_back(PointerTy);
- FunctionTy = llvm::FunctionType::get(
- llvm::Type::getVoidTy(VMContext),
- ProfileFuncArgs, false);
+ FunctionTy = llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
+ ProfileFuncArgs, false);
llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
llvm::CallInst *CallSite = Builder.CreateCall(
@@ -210,6 +199,15 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
CallSite);
}
+void CodeGenFunction::EmitMCountInstrumentation() {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), false);
+
+ llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
+ Target.getMCountName());
+ Builder.CreateCall(MCountFn);
+}
+
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Function *Fn,
const FunctionArgList &Args,
@@ -232,6 +230,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
break;
}
+ if (getContext().getLangOptions().OpenCL) {
+ // Add metadata for a kernel function.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ llvm::LLVMContext &Context = getLLVMContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
+
+ llvm::Value *Op = Fn;
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, &Op, 1));
+ }
+ }
+
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
@@ -246,18 +257,23 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
Builder.SetInsertPoint(EntryBB);
- QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
- false, false, 0, 0,
- /*FIXME?*/
- FunctionType::ExtInfo());
-
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
+ // FIXME: what is going on here and why does it ignore all these
+ // interesting type properties?
+ QualType FnType =
+ getContext().getFunctionType(RetTy, 0, 0,
+ FunctionProtoType::ExtProtoInfo());
+
DI->setLocation(StartLoc);
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
- EmitFunctionInstrumentation("__cyg_profile_func_enter");
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_enter");
+
+ if (CGM.getCodeGenOpts().InstrumentForProfiling)
+ EmitMCountInstrumentation();
// FIXME: Leaked.
// CC info is ignored, hopefully?
@@ -384,8 +400,7 @@ bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
IgnoreCaseStmts = true;
// Scan subexpressions for verboten labels.
- for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
- I != E; ++I)
+ for (Stmt::const_child_range I = S->children(); I; ++I)
if (ContainsLabel(*I, IgnoreCaseStmts))
return true;
@@ -442,13 +457,15 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// Emit the LHS as a conditional. If the LHS conditional is false, we
// want to jump to the FalseBlock.
llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+
+ ConditionalEvaluation eval(*this);
EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
EmitBlock(LHSTrue);
// Any temporaries created here are conditional.
- BeginConditionalBranch();
+ eval.begin(*this);
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
- EndConditionalBranch();
+ eval.end(*this);
return;
} else if (CondBOp->getOpcode() == BO_LOr) {
@@ -469,13 +486,15 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// Emit the LHS as a conditional. If the LHS conditional is true, we
// want to jump to the TrueBlock.
llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+
+ ConditionalEvaluation eval(*this);
EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
EmitBlock(LHSFalse);
// Any temporaries created here are conditional.
- BeginConditionalBranch();
+ eval.begin(*this);
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
- EndConditionalBranch();
+ eval.end(*this);
return;
}
@@ -495,11 +514,20 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+
+ ConditionalEvaluation cond(*this);
EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+
+ cond.begin(*this);
EmitBlock(LHSBlock);
EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
+
+ cond.begin(*this);
EmitBlock(RHSBlock);
EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
+
return;
}
}
@@ -516,6 +544,57 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
CGM.ErrorUnsupported(S, Type, OmitOnError);
}
+/// emitNonZeroVLAInit - Emit the "zero" initialization of a
+/// variable-length array whose elements have a non-zero bit-pattern.
+///
+/// \param src - a char* pointing to the bit-pattern for a single
+/// base element of the array
+/// \param sizeInChars - the total size of the VLA, in chars
+/// \param align - the total alignment of the VLA
+static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *sizeInChars) {
+ std::pair<CharUnits,CharUnits> baseSizeAndAlign
+ = CGF.getContext().getTypeInfoInChars(baseType);
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::Value *baseSizeInChars
+ = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
+
+ const llvm::Type *i8p = Builder.getInt8PtrTy();
+
+ llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
+ llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
+
+ llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
+
+ // Make a loop over the VLA. C99 guarantees that the VLA element
+ // count must be nonzero.
+ CGF.EmitBlock(loopBB);
+
+ llvm::PHINode *cur = Builder.CreatePHI(i8p, "vla.cur");
+ cur->reserveOperandSpace(2);
+ cur->addIncoming(begin, originBB);
+
+ // memcpy the individual element bit-pattern.
+ Builder.CreateMemCpy(cur, src, baseSizeInChars,
+ baseSizeAndAlign.second.getQuantity(),
+ /*volatile*/ false);
+
+ // Go to the next element.
+ llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
+
+ // Leave if that's the end of the VLA.
+ llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
+ Builder.CreateCondBr(done, contBB, loopBB);
+ cur->addIncoming(next, loopBB);
+
+ CGF.EmitBlock(contBB);
+}
+
void
CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Ignore empty classes in C++.
@@ -529,26 +608,42 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Cast the dest ptr to the appropriate i8 pointer type.
unsigned DestAS =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::Type *BP =
- llvm::Type::getInt8PtrTy(VMContext, DestAS);
+ const llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
// Get size and alignment info for this aggregate.
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
- uint64_t Size = TypeInfo.first;
- unsigned Align = TypeInfo.second;
+ uint64_t Size = TypeInfo.first / 8;
+ unsigned Align = TypeInfo.second / 8;
- // Don't bother emitting a zero-byte memset.
- if (Size == 0)
- return;
+ llvm::Value *SizeVal;
+ const VariableArrayType *vla;
- llvm::ConstantInt *SizeVal = llvm::ConstantInt::get(IntPtrTy, Size / 8);
- llvm::ConstantInt *AlignVal = Builder.getInt32(Align / 8);
+ // Don't bother emitting a zero-byte memset.
+ if (Size == 0) {
+ // But note that getTypeInfo returns 0 for a VLA.
+ if (const VariableArrayType *vlaType =
+ dyn_cast_or_null<VariableArrayType>(
+ getContext().getAsArrayType(Ty))) {
+ SizeVal = GetVLASize(vlaType);
+ vla = vlaType;
+ } else {
+ return;
+ }
+ } else {
+ SizeVal = llvm::ConstantInt::get(IntPtrTy, Size);
+ vla = 0;
+ }
// If the type contains a pointer to data member we can't memset it to zero.
// Instead, create a null constant and copy it to the destination.
+ // TODO: there are other patterns besides zero that we can usefully memset,
+ // like -1, which happens to be the pattern used by member-pointers.
if (!CGM.getTypes().isZeroInitializable(Ty)) {
+ // For a VLA, emit a single element, then splat that over the VLA.
+ if (vla) Ty = getContext().getBaseElementType(vla);
+
llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
llvm::GlobalVariable *NullVariable =
@@ -559,27 +654,20 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
llvm::Value *SrcPtr =
Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
- // FIXME: variable-size types?
+ if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
// Get and call the appropriate llvm.memcpy overload.
- llvm::Constant *Memcpy =
- CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(), IntPtrTy);
- Builder.CreateCall5(Memcpy, DestPtr, SrcPtr, SizeVal, AlignVal,
- /*volatile*/ Builder.getFalse());
+ Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align, false);
return;
}
// Otherwise, just memset the whole thing to zero. This is legal
// because in LLVM, all default initializers (other than the ones we just
// handled above) are guaranteed to have a bit pattern of all zeros.
-
- // FIXME: Handle variable sized types.
- Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
- Builder.getInt8(0),
- SizeVal, AlignVal, /*volatile*/ Builder.getFalse());
+ Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, Align, false);
}
-llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
+llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
// Make sure that there is a block for the indirect goto.
if (IndirectBranch == 0)
GetIndirectGotoBlock();
@@ -597,8 +685,6 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
-
// Create the PHI node that indirect gotos will add entries to.
llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
@@ -621,6 +707,9 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
EnsureInsertPoint();
if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
+ // unknown size indication requires no size computation.
+ if (!VAT->getSizeExpr())
+ return 0;
llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
if (!SizeEntry) {
@@ -649,6 +738,11 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
return 0;
}
+ if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ EmitVLASize(PT->getInnerType());
+ return 0;
+ }
+
const PointerType *PT = Ty->getAs<PointerType>();
assert(PT && "unknown VM type!");
EmitVLASize(PT->getPointeeType());
@@ -656,686 +750,41 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
}
llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
- if (CGM.getContext().getBuiltinVaListType()->isArrayType())
+ if (getContext().getBuiltinVaListType()->isArrayType())
return EmitScalarExpr(E);
return EmitLValue(E).getAddress();
}
-/// Pops cleanup blocks until the given savepoint is reached.
-void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
- assert(Old.isValid());
-
- while (EHStack.stable_begin() != Old) {
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
-
- // As long as Old strictly encloses the scope's enclosing normal
- // cleanup, we're going to emit another normal cleanup which
- // fallthrough can propagate through.
- bool FallThroughIsBranchThrough =
- Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
-
- PopCleanupBlock(FallThroughIsBranchThrough);
- }
-}
-
-static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
- EHCleanupScope &Scope) {
- assert(Scope.isNormalCleanup());
- llvm::BasicBlock *Entry = Scope.getNormalBlock();
- if (!Entry) {
- Entry = CGF.createBasicBlock("cleanup");
- Scope.setNormalBlock(Entry);
- }
- return Entry;
-}
-
-static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF,
- EHCleanupScope &Scope) {
- assert(Scope.isEHCleanup());
- llvm::BasicBlock *Entry = Scope.getEHBlock();
- if (!Entry) {
- Entry = CGF.createBasicBlock("eh.cleanup");
- Scope.setEHBlock(Entry);
- }
- return Entry;
-}
-
-/// Transitions the terminator of the given exit-block of a cleanup to
-/// be a cleanup switch.
-static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
- llvm::BasicBlock *Block) {
- // If it's a branch, turn it into a switch whose default
- // destination is its original target.
- llvm::TerminatorInst *Term = Block->getTerminator();
- assert(Term && "can't transition block without terminator");
-
- if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
- assert(Br->isUnconditional());
- llvm::LoadInst *Load =
- new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
- llvm::SwitchInst *Switch =
- llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
- Br->eraseFromParent();
- return Switch;
- } else {
- return cast<llvm::SwitchInst>(Term);
- }
-}
-
-/// Attempts to reduce a cleanup's entry block to a fallthrough. This
-/// is basically llvm::MergeBlockIntoPredecessor, except
-/// simplified/optimized for the tighter constraints on cleanup blocks.
-///
-/// Returns the new block, whatever it is.
-static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
- llvm::BasicBlock *Entry) {
- llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
- if (!Pred) return Entry;
-
- llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
- if (!Br || Br->isConditional()) return Entry;
- assert(Br->getSuccessor(0) == Entry);
-
- // If we were previously inserting at the end of the cleanup entry
- // block, we'll need to continue inserting at the end of the
- // predecessor.
- bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
- assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
-
- // Kill the branch.
- Br->eraseFromParent();
-
- // Merge the blocks.
- Pred->getInstList().splice(Pred->end(), Entry->getInstList());
-
- // Kill the entry block.
- Entry->eraseFromParent();
-
- if (WasInsertBlock)
- CGF.Builder.SetInsertPoint(Pred);
-
- return Pred;
-}
-
-static void EmitCleanup(CodeGenFunction &CGF,
- EHScopeStack::Cleanup *Fn,
- bool ForEH) {
- if (ForEH) CGF.EHStack.pushTerminate();
- Fn->Emit(CGF, ForEH);
- if (ForEH) CGF.EHStack.popTerminate();
- assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
-}
-
-/// Pops a cleanup block. If the block includes a normal cleanup, the
-/// current insertion point is threaded through the cleanup, as are
-/// any branch fixups on the cleanup.
-void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
- assert(!EHStack.empty() && "cleanup stack is empty!");
- assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
- assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
- assert(Scope.isActive() && "cleanup was still inactive when popped!");
-
- // Check whether we need an EH cleanup. This is only true if we've
- // generated a lazy EH cleanup block.
- bool RequiresEHCleanup = Scope.hasEHBranches();
-
- // Check the three conditions which might require a normal cleanup:
-
- // - whether there are branch fix-ups through this cleanup
- unsigned FixupDepth = Scope.getFixupDepth();
- bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
-
- // - whether there are branch-throughs or branch-afters
- bool HasExistingBranches = Scope.hasBranches();
-
- // - whether there's a fallthrough
- llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
- bool HasFallthrough = (FallthroughSource != 0);
-
- bool RequiresNormalCleanup = false;
- if (Scope.isNormalCleanup() &&
- (HasFixups || HasExistingBranches || HasFallthrough)) {
- RequiresNormalCleanup = true;
- }
-
- // If we don't need the cleanup at all, we're done.
- if (!RequiresNormalCleanup && !RequiresEHCleanup) {
- EHStack.popCleanup(); // safe because there are no fixups
- assert(EHStack.getNumBranchFixups() == 0 ||
- EHStack.hasNormalCleanups());
- return;
- }
-
- // Copy the cleanup emission data out. Note that SmallVector
- // guarantees maximal alignment for its buffer regardless of its
- // type parameter.
- llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
- CleanupBuffer.reserve(Scope.getCleanupSize());
- memcpy(CleanupBuffer.data(),
- Scope.getCleanupBuffer(), Scope.getCleanupSize());
- CleanupBuffer.set_size(Scope.getCleanupSize());
- EHScopeStack::Cleanup *Fn =
- reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
-
- // We want to emit the EH cleanup after the normal cleanup, but go
- // ahead and do the setup for the EH cleanup while the scope is still
- // alive.
- llvm::BasicBlock *EHEntry = 0;
- llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend;
- if (RequiresEHCleanup) {
- EHEntry = CreateEHEntry(*this, Scope);
-
- // Figure out the branch-through dest if necessary.
- llvm::BasicBlock *EHBranchThroughDest = 0;
- if (Scope.hasEHBranchThroughs()) {
- assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end());
- EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup());
- EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S));
- }
-
- // If we have exactly one branch-after and no branch-throughs, we
- // can dispatch it without a switch.
- if (!Scope.hasEHBranchThroughs() &&
- Scope.getNumEHBranchAfters() == 1) {
- assert(!EHBranchThroughDest);
-
- // TODO: remove the spurious eh.cleanup.dest stores if this edge
- // never went through any switches.
- llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0);
- EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest));
-
- // Otherwise, if we have any branch-afters, we need a switch.
- } else if (Scope.getNumEHBranchAfters()) {
- // The default of the switch belongs to the branch-throughs if
- // they exist.
- llvm::BasicBlock *Default =
- (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock());
-
- const unsigned SwitchCapacity = Scope.getNumEHBranchAfters();
-
- llvm::LoadInst *Load =
- new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest");
- llvm::SwitchInst *Switch =
- llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
-
- EHInstsToAppend.push_back(Load);
- EHInstsToAppend.push_back(Switch);
-
- for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I)
- Switch->addCase(Scope.getEHBranchAfterIndex(I),
- Scope.getEHBranchAfterBlock(I));
-
- // Otherwise, we have only branch-throughs; jump to the next EH
- // cleanup.
- } else {
- assert(EHBranchThroughDest);
- EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest));
- }
- }
-
- if (!RequiresNormalCleanup) {
- EHStack.popCleanup();
- } else {
- // As a kindof crazy internal case, branch-through fall-throughs
- // leave the insertion point set to the end of the last cleanup.
- bool HasPrebranchedFallthrough =
- (HasFallthrough && FallthroughSource->getTerminator());
- assert(!HasPrebranchedFallthrough ||
- FallthroughSource->getTerminator()->getSuccessor(0)
- == Scope.getNormalBlock());
-
- // If we have a fallthrough and no other need for the cleanup,
- // emit it directly.
- if (HasFallthrough && !HasPrebranchedFallthrough &&
- !HasFixups && !HasExistingBranches) {
-
- // Fixups can cause us to optimistically create a normal block,
- // only to later have no real uses for it. Just delete it in
- // this case.
- // TODO: we can potentially simplify all the uses after this.
- if (Scope.getNormalBlock()) {
- Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock());
- delete Scope.getNormalBlock();
- }
-
- EHStack.popCleanup();
-
- EmitCleanup(*this, Fn, /*ForEH*/ false);
-
- // Otherwise, the best approach is to thread everything through
- // the cleanup block and then try to clean up after ourselves.
- } else {
- // Force the entry block to exist.
- llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
-
- // If there's a fallthrough, we need to store the cleanup
- // destination index. For fall-throughs this is always zero.
- if (HasFallthrough && !HasPrebranchedFallthrough)
- Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
-
- // Emit the entry block. This implicitly branches to it if we
- // have fallthrough. All the fixups and existing branches should
- // already be branched to it.
- EmitBlock(NormalEntry);
-
- bool HasEnclosingCleanups =
- (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
-
- // Compute the branch-through dest if we need it:
- // - if there are branch-throughs threaded through the scope
- // - if fall-through is a branch-through
- // - if there are fixups that will be optimistically forwarded
- // to the enclosing cleanup
- llvm::BasicBlock *BranchThroughDest = 0;
- if (Scope.hasBranchThroughs() ||
- (HasFallthrough && FallthroughIsBranchThrough) ||
- (HasFixups && HasEnclosingCleanups)) {
- assert(HasEnclosingCleanups);
- EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
- BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
- }
-
- llvm::BasicBlock *FallthroughDest = 0;
- llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend;
-
- // If there's exactly one branch-after and no other threads,
- // we can route it without a switch.
- if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
- Scope.getNumBranchAfters() == 1) {
- assert(!BranchThroughDest);
-
- // TODO: clean up the possibly dead stores to the cleanup dest slot.
- llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
- InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
-
- // Build a switch-out if we need it:
- // - if there are branch-afters threaded through the scope
- // - if fall-through is a branch-after
- // - if there are fixups that have nowhere left to go and
- // so must be immediately resolved
- } else if (Scope.getNumBranchAfters() ||
- (HasFallthrough && !FallthroughIsBranchThrough) ||
- (HasFixups && !HasEnclosingCleanups)) {
-
- llvm::BasicBlock *Default =
- (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
-
- // TODO: base this on the number of branch-afters and fixups
- const unsigned SwitchCapacity = 10;
-
- llvm::LoadInst *Load =
- new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
- llvm::SwitchInst *Switch =
- llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
-
- InstsToAppend.push_back(Load);
- InstsToAppend.push_back(Switch);
-
- // Branch-after fallthrough.
- if (HasFallthrough && !FallthroughIsBranchThrough) {
- FallthroughDest = createBasicBlock("cleanup.cont");
- Switch->addCase(Builder.getInt32(0), FallthroughDest);
- }
-
- for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
- Switch->addCase(Scope.getBranchAfterIndex(I),
- Scope.getBranchAfterBlock(I));
- }
-
- if (HasFixups && !HasEnclosingCleanups)
- ResolveAllBranchFixups(Switch);
- } else {
- // We should always have a branch-through destination in this case.
- assert(BranchThroughDest);
- InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
- }
-
- // We're finally ready to pop the cleanup.
- EHStack.popCleanup();
- assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
-
- EmitCleanup(*this, Fn, /*ForEH*/ false);
-
- // Append the prepared cleanup prologue from above.
- llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
- for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
- NormalExit->getInstList().push_back(InstsToAppend[I]);
-
- // Optimistically hope that any fixups will continue falling through.
- for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
- I < E; ++I) {
- BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
- if (!Fixup.Destination) continue;
- if (!Fixup.OptimisticBranchBlock) {
- new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
- getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
- Fixup.InitialBranch->setSuccessor(0, NormalEntry);
- }
- Fixup.OptimisticBranchBlock = NormalExit;
- }
-
- if (FallthroughDest)
- EmitBlock(FallthroughDest);
- else if (!HasFallthrough)
- Builder.ClearInsertionPoint();
-
- // Check whether we can merge NormalEntry into a single predecessor.
- // This might invalidate (non-IR) pointers to NormalEntry.
- llvm::BasicBlock *NewNormalEntry =
- SimplifyCleanupEntry(*this, NormalEntry);
-
- // If it did invalidate those pointers, and NormalEntry was the same
- // as NormalExit, go back and patch up the fixups.
- if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
- for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
- I < E; ++I)
- CGF.EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
- }
- }
-
- assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
-
- // Emit the EH cleanup if required.
- if (RequiresEHCleanup) {
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
-
- EmitBlock(EHEntry);
- EmitCleanup(*this, Fn, /*ForEH*/ true);
-
- // Append the prepared cleanup prologue from above.
- llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
- for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I)
- EHExit->getInstList().push_back(EHInstsToAppend[I]);
-
- Builder.restoreIP(SavedIP);
-
- SimplifyCleanupEntry(*this, EHEntry);
- }
-}
-
-/// Terminate the current block by emitting a branch which might leave
-/// the current cleanup-protected scope. The target scope may not yet
-/// be known, in which case this will require a fixup.
-///
-/// As a side-effect, this method clears the insertion point.
-void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
- assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup())
- && "stale jump destination");
-
- if (!HaveInsertPoint())
- return;
-
- // Create the branch.
- llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
-
- // Calculate the innermost active normal cleanup.
- EHScopeStack::stable_iterator
- TopCleanup = EHStack.getInnermostActiveNormalCleanup();
-
- // If we're not in an active normal cleanup scope, or if the
- // destination scope is within the innermost active normal cleanup
- // scope, we don't need to worry about fixups.
- if (TopCleanup == EHStack.stable_end() ||
- TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
- Builder.ClearInsertionPoint();
- return;
- }
-
- // If we can't resolve the destination cleanup scope, just add this
- // to the current cleanup scope as a branch fixup.
- if (!Dest.getScopeDepth().isValid()) {
- BranchFixup &Fixup = EHStack.addBranchFixup();
- Fixup.Destination = Dest.getBlock();
- Fixup.DestinationIndex = Dest.getDestIndex();
- Fixup.InitialBranch = BI;
- Fixup.OptimisticBranchBlock = 0;
-
- Builder.ClearInsertionPoint();
- return;
- }
-
- // Otherwise, thread through all the normal cleanups in scope.
-
- // Store the index at the start.
- llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
- new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
-
- // Adjust BI to point to the first cleanup block.
- {
- EHCleanupScope &Scope =
- cast<EHCleanupScope>(*EHStack.find(TopCleanup));
- BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
- }
-
- // Add this destination to all the scopes involved.
- EHScopeStack::stable_iterator I = TopCleanup;
- EHScopeStack::stable_iterator E = Dest.getScopeDepth();
- if (E.strictlyEncloses(I)) {
- while (true) {
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
- assert(Scope.isNormalCleanup());
- I = Scope.getEnclosingNormalCleanup();
-
- // If this is the last cleanup we're propagating through, tell it
- // that there's a resolved jump moving through it.
- if (!E.strictlyEncloses(I)) {
- Scope.addBranchAfter(Index, Dest.getBlock());
- break;
- }
-
- // Otherwise, tell the scope that there's a jump propoagating
- // through it. If this isn't new information, all the rest of
- // the work has been done before.
- if (!Scope.addBranchThrough(Dest.getBlock()))
- break;
- }
- }
-
- Builder.ClearInsertionPoint();
-}
-
-void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) {
- // We should never get invalid scope depths for an UnwindDest; that
- // implies that the destination wasn't set up correctly.
- assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?");
-
- if (!HaveInsertPoint())
- return;
-
- // Create the branch.
- llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
-
- // Calculate the innermost active cleanup.
- EHScopeStack::stable_iterator
- InnermostCleanup = EHStack.getInnermostActiveEHCleanup();
-
- // If the destination is in the same EH cleanup scope as us, we
- // don't need to thread through anything.
- if (InnermostCleanup.encloses(Dest.getScopeDepth())) {
- Builder.ClearInsertionPoint();
- return;
- }
- assert(InnermostCleanup != EHStack.stable_end());
-
- // Store the index at the start.
- llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
- new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI);
-
- // Adjust BI to point to the first cleanup block.
- {
- EHCleanupScope &Scope =
- cast<EHCleanupScope>(*EHStack.find(InnermostCleanup));
- BI->setSuccessor(0, CreateEHEntry(*this, Scope));
- }
-
- // Add this destination to all the scopes involved.
- for (EHScopeStack::stable_iterator
- I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) {
- assert(E.strictlyEncloses(I));
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
- assert(Scope.isEHCleanup());
- I = Scope.getEnclosingEHCleanup();
-
- // If this is the last cleanup we're propagating through, add this
- // as a branch-after.
- if (I == E) {
- Scope.addEHBranchAfter(Index, Dest.getBlock());
- break;
- }
-
- // Otherwise, add it as a branch-through. If this isn't new
- // information, all the rest of the work has been done before.
- if (!Scope.addEHBranchThrough(Dest.getBlock()))
- break;
- }
-
- Builder.ClearInsertionPoint();
-}
-
-/// All the branch fixups on the EH stack have propagated out past the
-/// outermost normal cleanup; resolve them all by adding cases to the
-/// given switch instruction.
-void CodeGenFunction::ResolveAllBranchFixups(llvm::SwitchInst *Switch) {
- llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
-
- for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
- // Skip this fixup if its destination isn't set or if we've
- // already treated it.
- BranchFixup &Fixup = EHStack.getBranchFixup(I);
- if (Fixup.Destination == 0) continue;
- if (!CasesAdded.insert(Fixup.Destination)) continue;
-
- Switch->addCase(Builder.getInt32(Fixup.DestinationIndex),
- Fixup.Destination);
- }
-
- EHStack.clearFixups();
-}
-
-void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
- assert(Block && "resolving a null target block");
- if (!EHStack.getNumBranchFixups()) return;
-
- assert(EHStack.hasNormalCleanups() &&
- "branch fixups exist with no normal cleanups on stack");
-
- llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
- bool ResolvedAny = false;
-
- for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
- // Skip this fixup if its destination doesn't match.
- BranchFixup &Fixup = EHStack.getBranchFixup(I);
- if (Fixup.Destination != Block) continue;
-
- Fixup.Destination = 0;
- ResolvedAny = true;
-
- // If it doesn't have an optimistic branch block, LatestBranch is
- // already pointing to the right place.
- llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
- if (!BranchBB)
- continue;
-
- // Don't process the same optimistic branch block twice.
- if (!ModifiedOptimisticBlocks.insert(BranchBB))
- continue;
-
- llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
-
- // Add a case to the switch.
- Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
- }
-
- if (ResolvedAny)
- EHStack.popNullFixups();
+void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
+ llvm::Constant *Init) {
+ assert (Init && "Invalid DeclRefExpr initializer!");
+ if (CGDebugInfo *Dbg = getDebugInfo())
+ Dbg->EmitGlobalVariable(E->getDecl(), Init);
}
-/// Activate a cleanup that was created in an inactivated state.
-void CodeGenFunction::ActivateCleanup(EHScopeStack::stable_iterator C) {
- assert(C != EHStack.stable_end() && "activating bottom of stack?");
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
- assert(!Scope.isActive() && "double activation");
-
- // Calculate whether the cleanup was used:
- bool Used = false;
+CodeGenFunction::PeepholeProtection
+CodeGenFunction::protectFromPeepholes(RValue rvalue) {
+ // At the moment, the only aggressive peephole we do in IR gen
+ // is trunc(zext) folding, but if we add more, we can easily
+ // extend this protection.
- // - as a normal cleanup
- if (Scope.isNormalCleanup()) {
- bool NormalUsed = false;
- if (Scope.getNormalBlock()) {
- NormalUsed = true;
- } else {
- // Check whether any enclosed cleanups were needed.
- for (EHScopeStack::stable_iterator
- I = EHStack.getInnermostNormalCleanup(); I != C; ) {
- assert(C.strictlyEncloses(I));
- EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
- if (S.getNormalBlock()) {
- NormalUsed = true;
- break;
- }
- I = S.getEnclosingNormalCleanup();
- }
- }
+ if (!rvalue.isScalar()) return PeepholeProtection();
+ llvm::Value *value = rvalue.getScalarVal();
+ if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
- if (NormalUsed)
- Used = true;
- else
- Scope.setActivatedBeforeNormalUse(true);
- }
+ // Just make an extra bitcast.
+ assert(HaveInsertPoint());
+ llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
+ Builder.GetInsertBlock());
- // - as an EH cleanup
- if (Scope.isEHCleanup()) {
- bool EHUsed = false;
- if (Scope.getEHBlock()) {
- EHUsed = true;
- } else {
- // Check whether any enclosed cleanups were needed.
- for (EHScopeStack::stable_iterator
- I = EHStack.getInnermostEHCleanup(); I != C; ) {
- assert(C.strictlyEncloses(I));
- EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
- if (S.getEHBlock()) {
- EHUsed = true;
- break;
- }
- I = S.getEnclosingEHCleanup();
- }
- }
-
- if (EHUsed)
- Used = true;
- else
- Scope.setActivatedBeforeEHUse(true);
- }
-
- llvm::AllocaInst *Var = EHCleanupScope::activeSentinel();
- if (Used) {
- Var = CreateTempAlloca(Builder.getInt1Ty());
- InitTempAlloca(Var, Builder.getFalse());
- }
- Scope.setActiveVar(Var);
+ PeepholeProtection protection;
+ protection.Inst = inst;
+ return protection;
}
-llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
- if (!NormalCleanupDest)
- NormalCleanupDest =
- CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
- return NormalCleanupDest;
-}
+void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
+ if (!protection.Inst) return;
-llvm::Value *CodeGenFunction::getEHCleanupDestSlot() {
- if (!EHCleanupDest)
- EHCleanupDest =
- CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot");
- return EHCleanupDest;
-}
-
-void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
- llvm::ConstantInt *Init) {
- assert (Init && "Invalid DeclRefExpr initializer!");
- if (CGDebugInfo *Dbg = getDebugInfo())
- Dbg->EmitGlobalVariable(E->getDecl(), Init, Builder);
+ // In theory, we could try to duplicate the peepholes now, but whatever.
+ protection.Inst->eraseFromParent();
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
index 4f04205..67ef414 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -18,15 +18,14 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/CharUnits.h"
+#include "clang/Basic/ABI.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ValueHandle.h"
#include "CodeGenModule.h"
-#include "CGBlocks.h"
#include "CGBuilder.h"
#include "CGCall.h"
-#include "CGCXX.h"
#include "CGValue.h"
namespace llvm {
@@ -46,6 +45,7 @@ namespace clang {
class CXXDestructorDecl;
class CXXTryStmt;
class Decl;
+ class LabelDecl;
class EnumConstantDecl;
class FunctionDecl;
class FunctionProtoType;
@@ -71,6 +71,8 @@ namespace CodeGen {
class CGRecordLayout;
class CGBlockInfo;
class CGCXXABI;
+ class BlockFlags;
+ class BlockFieldFlags;
/// A branch fixup. These are required when emitting a goto to a
/// label which hasn't been emitted yet. The goto is optimistically
@@ -97,6 +99,28 @@ struct BranchFixup {
llvm::BranchInst *InitialBranch;
};
+template <class T> struct InvariantValue {
+ typedef T type;
+ typedef T saved_type;
+ static bool needsSaving(type value) { return false; }
+ static saved_type save(CodeGenFunction &CGF, type value) { return value; }
+ static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
+};
+
+/// A metaprogramming class for ensuring that a value will dominate an
+/// arbitrary position in a function.
+template <class T> struct DominatingValue : InvariantValue<T> {};
+
+template <class T, bool mightBeInstruction =
+ llvm::is_base_of<llvm::Value, T>::value &&
+ !llvm::is_base_of<llvm::Constant, T>::value &&
+ !llvm::is_base_of<llvm::BasicBlock, T>::value>
+struct DominatingPointer;
+template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
+// template <class T> struct DominatingPointer<T,true> at end of file
+
+template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
+
enum CleanupKind {
EHCleanup = 0x1,
NormalCleanup = 0x2,
@@ -175,6 +199,63 @@ public:
virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
};
+ /// UnconditionalCleanupN stores its N parameters and just passes
+ /// them to the real cleanup function.
+ template <class T, class A0>
+ class UnconditionalCleanup1 : public Cleanup {
+ A0 a0;
+ public:
+ UnconditionalCleanup1(A0 a0) : a0(a0) {}
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ T::Emit(CGF, IsForEHCleanup, a0);
+ }
+ };
+
+ template <class T, class A0, class A1>
+ class UnconditionalCleanup2 : public Cleanup {
+ A0 a0; A1 a1;
+ public:
+ UnconditionalCleanup2(A0 a0, A1 a1) : a0(a0), a1(a1) {}
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ T::Emit(CGF, IsForEHCleanup, a0, a1);
+ }
+ };
+
+ /// ConditionalCleanupN stores the saved form of its N parameters,
+ /// then restores them and performs the cleanup.
+ template <class T, class A0>
+ class ConditionalCleanup1 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ A0_saved a0_saved;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ T::Emit(CGF, IsForEHCleanup, a0);
+ }
+
+ public:
+ ConditionalCleanup1(A0_saved a0)
+ : a0_saved(a0) {}
+ };
+
+ template <class T, class A0, class A1>
+ class ConditionalCleanup2 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ T::Emit(CGF, IsForEHCleanup, a0, a1);
+ }
+
+ public:
+ ConditionalCleanup2(A0_saved a0, A1_saved a1)
+ : a0_saved(a0), a1_saved(a1) {}
+ };
+
private:
// The implementation for this class is in CGException.h and
// CGException.cpp; the definition is here because it's used as a
@@ -285,6 +366,25 @@ public:
(void) Obj;
}
+ // Feel free to add more variants of the following:
+
+ /// Push a cleanup with non-constant storage requirements on the
+ /// stack. The cleanup type must provide an additional static method:
+ /// static size_t getExtraSize(size_t);
+ /// The argument to this method will be the value N, which will also
+ /// be passed as the first argument to the constructor.
+ ///
+ /// The data stored in the extra storage must obey the same
+ /// restrictions as normal cleanup member data.
+ ///
+ /// The pointer returned from this method is valid until the cleanup
+ /// stack is modified.
+ template <class T, class A0, class A1, class A2>
+ T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
+ return new (Buffer) T(N, a0, a1, a2);
+ }
+
/// Pops a cleanup scope off the stack. This should only be called
/// by CodeGenFunction::PopCleanupBlock.
void popCleanup();
@@ -395,7 +495,7 @@ public:
void popNullFixups();
/// Clears the branch-fixups list. This should only be called by
- /// CodeGenFunction::ResolveAllBranchFixups.
+ /// ResolveAllBranchFixups.
void clearFixups() { BranchFixups.clear(); }
/// Gets the next EH destination index.
@@ -404,7 +504,7 @@ public:
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
-class CodeGenFunction : public BlockFunction {
+class CodeGenFunction : public CodeGenTypeCache {
CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
@@ -423,7 +523,7 @@ public:
llvm::BasicBlock *getBlock() const { return Block; }
EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
unsigned getDestIndex() const { return Index; }
-
+
private:
llvm::BasicBlock *Block;
EHScopeStack::stable_iterator ScopeDepth;
@@ -482,13 +582,11 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
- // intptr_t, i32, i64
- const llvm::IntegerType *IntPtrTy, *Int32Ty, *Int64Ty;
- uint32_t LLVMPointerWidth;
-
- bool Exceptions;
bool CatchUndefined;
-
+
+ const CodeGen::CGBlockInfo *BlockInfo;
+ llvm::Value *BlockPointer;
+
/// \brief A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
@@ -510,6 +608,15 @@ public:
llvm::BasicBlock *getInvokeDestImpl();
+ /// Set up the last cleaup that was pushed as a conditional
+ /// full-expression cleanup.
+ void initFullExprCleanup();
+
+ template <class T>
+ typename DominatingValue<T>::saved_type saveValueInCond(T value) {
+ return DominatingValue<T>::save(*this, value);
+ }
+
public:
/// ObjCEHValueStack - Stack of Objective-C exception values, used for
/// rethrows.
@@ -526,6 +633,45 @@ public:
llvm::Constant *RethrowFn);
void ExitFinallyBlock(FinallyInfo &FinallyInfo);
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ typedef EHScopeStack::UnconditionalCleanup1<T, A0> CleanupType;
+ return EHStack.pushCleanup<CleanupType>(kind, a0);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+
+ typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ typedef EHScopeStack::UnconditionalCleanup2<T, A0, A1> CleanupType;
+ return EHStack.pushCleanup<CleanupType>(kind, a0, a1);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+
+ typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
+ initFullExprCleanup();
+ }
+
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
@@ -542,7 +688,14 @@ public:
/// process all branch fixups.
void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
- void ActivateCleanup(EHScopeStack::stable_iterator Cleanup);
+ /// DeactivateCleanupBlock - Deactivates the given cleanup block.
+ /// The block cannot be reactivated. Pops it if it's the top of the
+ /// stack.
+ void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
+
+ /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
+ /// Cannot be used to resurrect a deactivated cleanup.
+ void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
/// \brief Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
@@ -557,11 +710,12 @@ public:
public:
/// \brief Enter a new cleanup scope.
- explicit RunCleanupsScope(CodeGenFunction &CGF)
- : CGF(CGF), PerformCleanup(true)
+ explicit RunCleanupsScope(CodeGenFunction &CGF)
+ : CGF(CGF), PerformCleanup(true)
{
CleanupStackDepth = CGF.EHStack.stable_begin();
OldDidCallStackSave = CGF.DidCallStackSave;
+ CGF.DidCallStackSave = false;
}
/// \brief Exit this cleanup scope, emitting any accumulated
@@ -593,7 +747,6 @@ public:
/// the cleanup blocks that have been added.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
- void ResolveAllBranchFixups(llvm::SwitchInst *Switch);
void ResolveBranchFixups(llvm::BasicBlock *Target);
/// The given basic block lies in the current EH scope, but may be a
@@ -608,7 +761,7 @@ public:
/// The given basic block lies in the current EH scope, but may be a
/// target of a potentially scope-crossing jump; get a stable handle
/// to which we can perform this jump later.
- JumpDest getJumpDestInCurrentScope(const char *Name = 0) {
+ JumpDest getJumpDestInCurrentScope(llvm::StringRef Name = llvm::StringRef()) {
return getJumpDestInCurrentScope(createBasicBlock(Name));
}
@@ -626,27 +779,169 @@ public:
/// destination.
UnwindDest getRethrowDest();
- /// BeginConditionalBranch - Should be called before a conditional part of an
- /// expression is emitted. For example, before the RHS of the expression below
- /// is emitted:
- ///
- /// b && f(T());
- ///
- /// This is used to make sure that any temporaries created in the conditional
- /// branch are only destroyed if the branch is taken.
- void BeginConditionalBranch() {
- ++ConditionalBranchLevel;
- }
+ /// An object to manage conditionally-evaluated expressions.
+ class ConditionalEvaluation {
+ llvm::BasicBlock *StartBB;
- /// EndConditionalBranch - Should be called after a conditional part of an
- /// expression has been emitted.
- void EndConditionalBranch() {
- assert(ConditionalBranchLevel != 0 &&
- "Conditional branch mismatch!");
-
- --ConditionalBranchLevel;
- }
+ public:
+ ConditionalEvaluation(CodeGenFunction &CGF)
+ : StartBB(CGF.Builder.GetInsertBlock()) {}
+
+ void begin(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != this);
+ if (!CGF.OutermostConditional)
+ CGF.OutermostConditional = this;
+ }
+
+ void end(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != 0);
+ if (CGF.OutermostConditional == this)
+ CGF.OutermostConditional = 0;
+ }
+
+ /// Returns a block which will be executed prior to each
+ /// evaluation of the conditional code.
+ llvm::BasicBlock *getStartingBlock() const {
+ return StartBB;
+ }
+ };
+
+ /// isInConditionalBranch - Return true if we're currently emitting
+ /// one branch or the other of a conditional expression.
+ bool isInConditionalBranch() const { return OutermostConditional != 0; }
+
+ /// An RAII object to record that we're evaluating a statement
+ /// expression.
+ class StmtExprEvaluation {
+ CodeGenFunction &CGF;
+
+ /// We have to save the outermost conditional: cleanups in a
+ /// statement expression aren't conditional just because the
+ /// StmtExpr is.
+ ConditionalEvaluation *SavedOutermostConditional;
+
+ public:
+ StmtExprEvaluation(CodeGenFunction &CGF)
+ : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
+ CGF.OutermostConditional = 0;
+ }
+
+ ~StmtExprEvaluation() {
+ CGF.OutermostConditional = SavedOutermostConditional;
+ CGF.EnsureInsertPoint();
+ }
+ };
+
+ /// An object which temporarily prevents a value from being
+ /// destroyed by aggressive peephole optimizations that assume that
+ /// all uses of a value have been realized in the IR.
+ class PeepholeProtection {
+ llvm::Instruction *Inst;
+ friend class CodeGenFunction;
+
+ public:
+ PeepholeProtection() : Inst(0) {}
+ };
+
+ /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
+ class OpaqueValueMapping {
+ CodeGenFunction &CGF;
+ const OpaqueValueExpr *OpaqueValue;
+ bool BoundLValue;
+ CodeGenFunction::PeepholeProtection Protection;
+
+ public:
+ static bool shouldBindAsLValue(const Expr *expr) {
+ return expr->isGLValue() || expr->getType()->isRecordType();
+ }
+
+ /// Build the opaque value mapping for the given conditional
+ /// operator if it's the GNU ?: extension. This is a common
+ /// enough pattern that the convenience operator is really
+ /// helpful.
+ ///
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const AbstractConditionalOperator *op) : CGF(CGF) {
+ if (isa<ConditionalOperator>(op)) {
+ OpaqueValue = 0;
+ BoundLValue = false;
+ return;
+ }
+
+ const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
+ init(e->getOpaqueValue(), e->getCommon());
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ LValue lvalue)
+ : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(true) {
+ assert(opaqueValue && "no opaque value expression!");
+ assert(shouldBindAsLValue(opaqueValue));
+ initLValue(lvalue);
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ RValue rvalue)
+ : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(false) {
+ assert(opaqueValue && "no opaque value expression!");
+ assert(!shouldBindAsLValue(opaqueValue));
+ initRValue(rvalue);
+ }
+
+ void pop() {
+ assert(OpaqueValue && "mapping already popped!");
+ popImpl();
+ OpaqueValue = 0;
+ }
+
+ ~OpaqueValueMapping() {
+ if (OpaqueValue) popImpl();
+ }
+
+ private:
+ void popImpl() {
+ if (BoundLValue)
+ CGF.OpaqueLValues.erase(OpaqueValue);
+ else {
+ CGF.OpaqueRValues.erase(OpaqueValue);
+ CGF.unprotectFromPeepholes(Protection);
+ }
+ }
+
+ void init(const OpaqueValueExpr *ov, const Expr *e) {
+ OpaqueValue = ov;
+ BoundLValue = shouldBindAsLValue(ov);
+ assert(BoundLValue == shouldBindAsLValue(e)
+ && "inconsistent expression value kinds!");
+ if (BoundLValue)
+ initLValue(CGF.EmitLValue(e));
+ else
+ initRValue(CGF.EmitAnyExpr(e));
+ }
+ void initLValue(const LValue &lv) {
+ CGF.OpaqueLValues.insert(std::make_pair(OpaqueValue, lv));
+ }
+
+ void initRValue(const RValue &rv) {
+ // Work around an extremely aggressive peephole optimization in
+ // EmitScalarConversion which assumes that all other uses of a
+ // value are extant.
+ Protection = CGF.protectFromPeepholes(rv);
+ CGF.OpaqueRValues.insert(std::make_pair(OpaqueValue, rv));
+ }
+ };
+
+ /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
+ /// number that holds the value.
+ unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+
+ /// BuildBlockByrefAddress - Computes address location of the
+ /// variable which is declared as __block.
+ llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
+ const VarDecl *V);
private:
CGDebugInfo *DebugInfo;
@@ -658,10 +953,11 @@ private:
/// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
/// decls.
- llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
+ DeclMapTy LocalDeclMap;
/// LabelMap - This keeps track of the LLVM basic block for each C label.
- llvm::DenseMap<const LabelStmt*, JumpDest> LabelMap;
+ llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
// BreakContinueStack - This keeps track of where break and continue
// statements should jump to.
@@ -682,6 +978,11 @@ private:
/// statement range in current switch instruction.
llvm::BasicBlock *CaseRangeBlock;
+ /// OpaqueLValues - Keeps track of the current set of opaque value
+ /// expressions.
+ llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
+ llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
+
// VLASizeMap - This keeps track of the associated size for each VLA type.
// We track this by the size expression rather than the type itself because
// in certain situations, like a const qualifier applied to an VLA typedef,
@@ -708,21 +1009,17 @@ private:
/// VTT parameter.
ImplicitParamDecl *CXXVTTDecl;
llvm::Value *CXXVTTValue;
-
- /// ConditionalBranchLevel - Contains the nesting level of the current
- /// conditional branch. This is used so that we know if a temporary should be
- /// destroyed conditionally.
- unsigned ConditionalBranchLevel;
+
+ /// OutermostConditional - Points to the outermost active
+ /// conditional control. This is used so that we know if a
+ /// temporary should be destroyed conditionally.
+ ConditionalEvaluation *OutermostConditional;
/// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
/// type as well as the field number that contains the actual data.
- llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
+ llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
unsigned> > ByRefValueInfo;
-
- /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
- /// number that holds the value.
- unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
llvm::BasicBlock *TerminateLandingPad;
llvm::BasicBlock *TerminateHandler;
@@ -735,6 +1032,8 @@ public:
ASTContext &getContext() const;
CGDebugInfo *getDebugInfo() { return DebugInfo; }
+ const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
+
/// Returns a pointer to the function's exception object slot, which
/// is assigned in every landing pad.
llvm::Value *getExceptionSlot();
@@ -755,7 +1054,7 @@ public:
return getInvokeDestImpl();
}
- llvm::LLVMContext &getLLVMContext() { return VMContext; }
+ llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
//===--------------------------------------------------------------------===//
// Objective-C
@@ -769,6 +1068,10 @@ public:
/// GenerateObjCGetter - Synthesize an Objective-C property getter function.
void GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
+ void GenerateObjCGetterBody(ObjCIvarDecl *Ivar, bool IsAtomic, bool IsStrong);
+ void GenerateObjCAtomicSetterBody(ObjCMethodDecl *OMD,
+ ObjCIvarDecl *Ivar);
+
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD, bool ctor);
@@ -783,29 +1086,41 @@ public:
// Block Bits
//===--------------------------------------------------------------------===//
- llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
+ llvm::Value *EmitBlockLiteral(const BlockExpr *);
llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
const CGBlockInfo &Info,
const llvm::StructType *,
- llvm::Constant *BlockVarLayout,
- std::vector<HelperInfo> *);
+ llvm::Constant *BlockVarLayout);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
- const BlockExpr *BExpr,
- CGBlockInfo &Info,
+ const CGBlockInfo &Info,
const Decl *OuterFuncDecl,
- llvm::Constant *& BlockVarLayout,
- llvm::DenseMap<const Decl*, llvm::Value*> ldm);
+ const DeclMapTy &ldm);
+
+ llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
+
+ llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *,
+ BlockFieldFlags flags,
+ const VarDecl *BD);
+ llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
+ BlockFieldFlags flags,
+ const VarDecl *BD);
- llvm::Value *LoadBlockStruct();
+ void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
+
+ llvm::Value *LoadBlockStruct() {
+ assert(BlockPointer && "no block pointer set!");
+ return BlockPointer;
+ }
void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
void AllocateBlockDecl(const BlockDeclRefExpr *E);
llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
}
- llvm::Value *GetAddrOfBlockDecl(const ValueDecl *D, bool ByRef);
- const llvm::Type *BuildByRefType(const ValueDecl *D);
+ llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
+ const llvm::Type *BuildByRefType(const VarDecl *var);
void GenerateCode(GlobalDecl GD, llvm::Function *Fn);
void StartFunction(GlobalDecl GD, QualType RetTy,
@@ -827,21 +1142,21 @@ public:
/// GenerateThunk - Generate a thunk for the given method.
void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
-
+
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
FunctionArgList &Args);
/// InitializeVTablePointer - Initialize the vtable pointer of the given
/// subobject.
///
- void InitializeVTablePointer(BaseSubobject Base,
+ void InitializeVTablePointer(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
uint64_t OffsetFromNearestVBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass);
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
- void InitializeVTablePointers(BaseSubobject Base,
+ void InitializeVTablePointers(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
uint64_t OffsetFromNearestVBase,
bool BaseIsNonVirtualPrimaryBase,
@@ -851,6 +1166,9 @@ public:
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
+ /// GetVTablePtr - Return the Value of the vtable pointer member pointed
+ /// to by This.
+ llvm::Value *GetVTablePtr(llvm::Value *This, const llvm::Type *Ty);
/// EnterDtorCleanups - Enter the cleanups necessary to complete the
/// given phase of destruction for a destructor. The end result
@@ -867,6 +1185,9 @@ public:
/// function instrumentation is enabled.
void EmitFunctionInstrumentation(const char *Fn);
+ /// EmitMCountInstrumentation - Emit call to .mcount.
+ void EmitMCountInstrumentation();
+
/// EmitFunctionProlog - Emit the target specific LLVM code to load the
/// arguments for the given function. This is also responsible for naming the
/// LLVM function arguments.
@@ -910,19 +1231,19 @@ public:
static bool hasAggregateLLVMType(QualType T);
/// createBasicBlock - Create an LLVM basic block.
- llvm::BasicBlock *createBasicBlock(const char *Name="",
- llvm::Function *Parent=0,
- llvm::BasicBlock *InsertBefore=0) {
+ llvm::BasicBlock *createBasicBlock(llvm::StringRef name = "",
+ llvm::Function *parent = 0,
+ llvm::BasicBlock *before = 0) {
#ifdef NDEBUG
- return llvm::BasicBlock::Create(VMContext, "", Parent, InsertBefore);
+ return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
#else
- return llvm::BasicBlock::Create(VMContext, Name, Parent, InsertBefore);
+ return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
#endif
}
/// getBasicBlockForLabel - Return the LLVM basicblock that the specified
/// label maps to.
- JumpDest getJumpDestForLabel(const LabelStmt *S);
+ JumpDest getJumpDestForLabel(const LabelDecl *S);
/// SimplifyForwardingBlocks - If the given basic block is only a branch to
/// another basic block, simplify it. This assumes that no other code could
@@ -974,7 +1295,8 @@ public:
//===--------------------------------------------------------------------===//
LValue MakeAddrLValue(llvm::Value *V, QualType T, unsigned Alignment = 0) {
- return LValue::MakeAddr(V, T, Alignment, getContext());
+ return LValue::MakeAddr(V, T, Alignment, getContext(),
+ CGM.getTBAAInfo(T));
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
@@ -997,19 +1319,31 @@ public:
/// appropriate alignment.
llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
+ /// CreateAggTemp - Create a temporary memory object for the given
+ /// aggregate type.
+ AggValueSlot CreateAggTemp(QualType T, const llvm::Twine &Name = "tmp") {
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name), false, false);
+ }
+
+ /// Emit a cast to void* in the appropriate address space.
+ llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
+
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *EvaluateExprAsBool(const Expr *E);
+ /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
+ void EmitIgnoredExpr(const Expr *E);
+
/// EmitAnyExpr - Emit code to compute the specified expression which can have
/// any type. The result is returned as an RValue struct. If this is an
/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
/// the result should be returned.
///
/// \param IgnoreResult - True if the resulting value isn't used.
- RValue EmitAnyExpr(const Expr *E, llvm::Value *AggLoc = 0,
- bool IsAggLocVolatile = false, bool IgnoreResult = false,
- bool IsInitializer = false);
+ RValue EmitAnyExpr(const Expr *E,
+ AggValueSlot AggSlot = AggValueSlot::ignored(),
+ bool IgnoreResult = false);
// EmitVAListRef - Emit a "reference" to a va_list; this is either the address
// or the value of the expression, depending on how va_list is defined.
@@ -1017,14 +1351,13 @@ public:
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
- RValue EmitAnyExprToTemp(const Expr *E, bool IsAggLocVolatile = false,
- bool IsInitializer = false);
+ RValue EmitAnyExprToTemp(const Expr *E);
/// EmitsAnyExprToMem - Emits the code necessary to evaluate an
/// arbitrary expression into the given memory location.
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
- bool IsLocationVolatile = false,
- bool IsInitializer = false);
+ bool IsLocationVolatile,
+ bool IsInitializer);
/// EmitAggregateCopy - Emit an aggrate copy.
///
@@ -1049,11 +1382,33 @@ public:
return Res;
}
+ /// getOpaqueLValueMapping - Given an opaque value expression (which
+ /// must be mapped to an l-value), return its mapping.
+ const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
+ it = OpaqueLValues.find(e);
+ assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
+ /// getOpaqueRValueMapping - Given an opaque value expression (which
+ /// must be mapped to an r-value), return its mapping.
+ const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
+ assert(!OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
+ it = OpaqueRValues.find(e);
+ assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
/// getAccessedFieldNo - Given an encoded value and a result number, return
/// the input field number being accessed.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
- llvm::BlockAddress *GetAddrOfLabel(const LabelStmt *L);
+ llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
llvm::BasicBlock *GetIndirectGotoBlock();
/// EmitNullInitialization - Generate code to set a value of the given type to
@@ -1102,7 +1457,7 @@ public:
/// GetAddressOfBaseClass - This function will add the necessary delta to the
/// load of 'this' and returns address of the base class.
- llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
+ llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd,
@@ -1117,7 +1472,7 @@ public:
llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl);
-
+
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
CXXCtorType CtorType,
const FunctionArgList &Args);
@@ -1125,6 +1480,11 @@ public:
bool ForVirtualBase, llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ llvm::Value *This, llvm::Value *Src,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
const ConstantArrayType *ArrayTy,
@@ -1132,7 +1492,7 @@ public:
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
bool ZeroInitialization = false);
-
+
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
llvm::Value *NumElements,
llvm::Value *ArrayPtr,
@@ -1154,7 +1514,7 @@ public:
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, llvm::Value *This);
-
+
void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
llvm::Value *NumElements);
@@ -1184,25 +1544,37 @@ public:
/// This function can be called with a null (unreachable) insert point.
void EmitDecl(const Decl &D);
- /// EmitBlockVarDecl - Emit a block variable declaration.
+ /// EmitVarDecl - Emit a local variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
- void EmitBlockVarDecl(const VarDecl &D);
+ void EmitVarDecl(const VarDecl &D);
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
- /// EmitLocalBlockVarDecl - Emit a local block variable declaration.
+ /// EmitAutoVarDecl - Emit an auto variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
- void EmitLocalBlockVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
+ void EmitAutoVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
- void EmitStaticBlockVarDecl(const VarDecl &D,
- llvm::GlobalValue::LinkageTypes Linkage);
+ void EmitStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage);
/// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
+ /// protectFromPeepholes - Protect a value that we're intending to
+ /// store to the side, but which will probably be used later, from
+ /// aggressive peepholing optimizations that might delete it.
+ ///
+ /// Pass the result to unprotectFromPeepholes to declare that
+ /// protection is no longer required.
+ ///
+ /// There's no particular reason why this shouldn't apply to
+ /// l-values, it's just that no existing peepholes work on pointers.
+ PeepholeProtection protectFromPeepholes(RValue rvalue);
+ void unprotectFromPeepholes(PeepholeProtection protection);
+
//===--------------------------------------------------------------------===//
// Statement Emission
//===--------------------------------------------------------------------===//
@@ -1227,11 +1599,11 @@ public:
bool EmitSimpleStmt(const Stmt *S);
RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
- llvm::Value *AggLoc = 0, bool isAggVol = false);
+ AggValueSlot AVS = AggValueSlot::ignored());
/// EmitLabel - Emit the block for the given label. It is legal to call this
/// function even if there is no current insertion point.
- void EmitLabel(const LabelStmt &S); // helper for EmitLabelStmt.
+ void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
void EmitLabelStmt(const LabelStmt &S);
void EmitGotoStmt(const GotoStmt &S);
@@ -1260,7 +1632,7 @@ public:
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
void EmitCXXTryStmt(const CXXTryStmt &S);
-
+
//===--------------------------------------------------------------------===//
// LValue Expression Emission
//===--------------------------------------------------------------------===//
@@ -1303,17 +1675,27 @@ public:
/// object.
LValue EmitCheckedLValue(const Expr *E);
+ /// EmitToMemory - Change a scalar value from its value
+ /// representation to its in-memory representation.
+ llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
+
+ /// EmitFromMemory - Change a scalar value from its memory
+ /// representation to its value representation.
+ llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
+
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- unsigned Alignment, QualType Ty);
+ unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo = 0);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, unsigned Alignment, QualType Ty);
+ bool Volatile, unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo = 0);
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
/// this method emits the address of the lvalue, then loads the result as an
@@ -1321,9 +1703,8 @@ public:
RValue EmitLoadOfLValue(LValue V, QualType LVType);
RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
- RValue EmitLoadOfPropertyRefLValue(LValue LV, QualType ExprType);
- RValue EmitLoadOfKVCRefLValue(LValue LV, QualType ExprType);
-
+ RValue EmitLoadOfPropertyRefLValue(LValue LV,
+ ReturnValueSlot Return = ReturnValueSlot());
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
@@ -1331,8 +1712,7 @@ public:
void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
QualType Ty);
- void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst, QualType Ty);
- void EmitStoreThroughKVCRefLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst);
/// EmitStoreThroughLValue - Store Src into Dst with same constraints as
/// EmitStoreThroughLValue.
@@ -1343,9 +1723,13 @@ public:
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
llvm::Value **Result=0);
+ /// Emit an l-value for an assignment (simple or compound) of complex type.
+ LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
+ LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
+
// Note: only availabe for agg return types
LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
- LValue EmitCompoundAssignOperatorLValue(const CompoundAssignOperator *E);
+ LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
// Note: only available for agg return types
LValue EmitCallExprLValue(const CallExpr *E);
// Note: only available for agg return types
@@ -1360,25 +1744,26 @@ public:
LValue EmitMemberExpr(const MemberExpr *E);
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
- LValue EmitConditionalOperatorLValue(const ConditionalOperator *E);
+ LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
-
+ LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
+
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
LValue EmitLValueForAnonRecordField(llvm::Value* Base,
- const FieldDecl* Field,
+ const IndirectFieldDecl* Field,
unsigned CVRQualifiers);
LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
unsigned CVRQualifiers);
-
+
/// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
/// if the Field is a reference, this will return the address of the reference
/// and not the address of the value stored in the reference.
- LValue EmitLValueForFieldInitialization(llvm::Value* Base,
+ LValue EmitLValueForFieldInitialization(llvm::Value* Base,
const FieldDecl* Field,
unsigned CVRQualifiers);
-
+
LValue EmitLValueForIvar(QualType ObjectTy,
llvm::Value* Base, const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers);
@@ -1390,18 +1775,17 @@ public:
LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
- LValue EmitCXXExprWithTemporariesLValue(const CXXExprWithTemporaries *E);
+ LValue EmitExprWithCleanupsLValue(const ExprWithCleanups *E);
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
-
+
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
- LValue EmitObjCKVCRefLValue(const ObjCImplicitSetterGetterRefExpr *E);
- LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E);
LValue EmitStmtExprLValue(const StmtExpr *E);
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
- void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::ConstantInt *Init);
+ void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
+
//===--------------------------------------------------------------------===//
// Scalar Expression Emission
//===--------------------------------------------------------------------===//
@@ -1424,7 +1808,7 @@ public:
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
const Decl *TargetDecl = 0);
- RValue EmitCallExpr(const CallExpr *E,
+ RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
@@ -1434,8 +1818,15 @@ public:
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
const llvm::Type *Ty);
- llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
- llvm::Value *&This, const llvm::Type *Ty);
+ llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *This, const llvm::Type *Ty);
+ llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ const llvm::Type *Ty);
+
+ llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD);
RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
llvm::Value *Callee,
@@ -1453,7 +1844,7 @@ public:
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue);
-
+
RValue EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E);
@@ -1464,15 +1855,15 @@ public:
llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
- llvm::Value *EmitNeonCall(llvm::Function *F,
+ llvm::Value *EmitNeonCall(llvm::Function *F,
llvm::SmallVectorImpl<llvm::Value*> &O,
- const char *name, bool splat = false,
+ const char *name,
unsigned shift = 0, bool rightshift = false);
- llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
- bool widen = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
bool negateForRightShift);
-
+
+ llvm::Value *BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -1481,17 +1872,10 @@ public:
llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return = ReturnValueSlot());
- RValue EmitObjCPropertyGet(const Expr *E,
- ReturnValueSlot Return = ReturnValueSlot());
- RValue EmitObjCSuperPropertyGet(const Expr *Exp, const Selector &S,
- ReturnValueSlot Return = ReturnValueSlot());
- void EmitObjCPropertySet(const Expr *E, RValue Src);
- void EmitObjCSuperPropertySet(const Expr *E, const Selector &S, RValue Src);
-
/// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
/// expression. Will emit a temporary variable if E is not an LValue.
- RValue EmitReferenceBindingToExpr(const Expr* E,
+ RValue EmitReferenceBindingToExpr(const Expr* E,
const NamedDecl *InitializedDecl);
//===--------------------------------------------------------------------===//
@@ -1516,12 +1900,10 @@ public:
QualType DstTy);
- /// EmitAggExpr - Emit the computation of the specified expression of
- /// aggregate type. The result is computed into DestPtr. Note that if
- /// DestPtr is null, the value of the aggregate expression is not needed.
- void EmitAggExpr(const Expr *E, llvm::Value *DestPtr, bool VolatileDest,
- bool IgnoreResult = false, bool IsInitializer = false,
- bool RequiresGCollection = false);
+ /// EmitAggExpr - Emit the computation of the specified expression
+ /// of aggregate type. The result is computed into the given slot,
+ /// which may be null to indicate that the value is not needed.
+ void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
/// EmitAggExprToLValue - Emit the computation of the specified expression of
/// aggregate type into a temporary LValue.
@@ -1534,10 +1916,9 @@ public:
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, returning the result.
- ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
- bool IgnoreImag = false,
- bool IgnoreRealAssign = false,
- bool IgnoreImagAssign = false);
+ ComplexPairTy EmitComplexExpr(const Expr *E,
+ bool IgnoreReal = false,
+ bool IgnoreImag = false);
/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
/// of complex type, storing into the specified Value*.
@@ -1550,25 +1931,20 @@ public:
/// LoadComplexFromAddr - Load a complex number from the specified address.
ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
- /// CreateStaticBlockVarDecl - Create a zero-initialized LLVM global for a
- /// static block var decl.
- llvm::GlobalVariable *CreateStaticBlockVarDecl(const VarDecl &D,
- const char *Separator,
+ /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
+ /// a static local variable.
+ llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
+ const char *Separator,
llvm::GlobalValue::LinkageTypes Linkage);
-
- /// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+
+ /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
/// one. Otherwise it just returns GV.
llvm::GlobalVariable *
- AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
- llvm::GlobalVariable *GV);
-
+ AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV);
- /// EmitStaticCXXBlockVarDeclInit - Create the initializer for a C++ runtime
- /// initialized static block var decl.
- void EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
- llvm::GlobalVariable *GV);
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
@@ -1579,6 +1955,13 @@ public:
void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
llvm::Constant *DeclPtr);
+ /// Emit code in this function to perform a guarded variable
+ /// initialization. Guarded initializations are used when it's not
+ /// possible to prove that an initialization will be done exactly
+ /// once, e.g. with a static local variable or a static data member
+ /// of a class template.
+ void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr);
+
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
@@ -1591,14 +1974,16 @@ public:
const std::vector<std::pair<llvm::WeakVH,
llvm::Constant*> > &DtorsAndObjects);
- void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D);
+ void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D,
+ llvm::GlobalVariable *Addr);
- void EmitCXXConstructExpr(llvm::Value *Dest, const CXXConstructExpr *E);
+ void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
+
+ void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
+ const Expr *Exp);
- RValue EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
- llvm::Value *AggLoc = 0,
- bool IsAggLocVolatile = false,
- bool IsInitializer = false);
+ RValue EmitExprWithCleanups(const ExprWithCleanups *E,
+ AggValueSlot Slot =AggValueSlot::ignored());
void EmitCXXThrowExpr(const CXXThrowExpr *E);
@@ -1626,7 +2011,7 @@ public:
/// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
/// generate a branch around the created basic block as necessary.
llvm::BasicBlock *getTrapBB();
-
+
/// EmitCallArg - Emit a single call argument.
RValue EmitCallArg(const Expr *E, QualType ArgType);
@@ -1678,12 +2063,26 @@ private:
E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
assert(Arg != ArgEnd && "Running over edge of argument list!");
QualType ArgType = *I;
-
+#ifndef NDEBUG
+ QualType ActualArgType = Arg->getType();
+ if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
+ QualType ActualBaseType =
+ ActualArgType->getAs<PointerType>()->getPointeeType();
+ QualType ArgBaseType =
+ ArgType->getAs<PointerType>()->getPointeeType();
+ if (ArgBaseType->isVariableArrayType()) {
+ if (const VariableArrayType *VAT =
+ getContext().getAsVariableArrayType(ActualBaseType)) {
+ if (!VAT->getSizeExpr())
+ ActualArgType = ArgType;
+ }
+ }
+ }
assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
getTypePtr() ==
- getContext().getCanonicalType(Arg->getType()).getTypePtr() &&
+ getContext().getCanonicalType(ActualArgType).getTypePtr() &&
"type mismatch in call argument!");
-
+#endif
Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
ArgType));
}
@@ -1710,36 +2109,78 @@ private:
void EmitDeclMetadata();
};
-/// CGBlockInfo - Information to generate a block literal.
-class CGBlockInfo {
-public:
- /// Name - The name of the block, kindof.
- const char *Name;
-
- /// DeclRefs - Variables from parent scopes that have been
- /// imported into this block.
- llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
-
- /// InnerBlocks - This block and the blocks it encloses.
- llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks;
-
- /// CXXThisRef - Non-null if 'this' was required somewhere, in
- /// which case this is that expression.
- const CXXThisExpr *CXXThisRef;
-
- /// NeedsObjCSelf - True if something in this block has an implicit
- /// reference to 'self'.
- bool NeedsObjCSelf;
-
- /// These are initialized by GenerateBlockFunction.
- bool BlockHasCopyDispose;
- CharUnits BlockSize;
- CharUnits BlockAlign;
- llvm::SmallVector<const Expr*, 8> BlockLayout;
-
- CGBlockInfo(const char *Name);
+/// Helper class with most of the code for saving a value for a
+/// conditional expression cleanup.
+struct DominatingLLVMValue {
+ typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
+
+ /// Answer whether the given value needs extra work to be saved.
+ static bool needsSaving(llvm::Value *value) {
+ // If it's not an instruction, we don't need to save.
+ if (!isa<llvm::Instruction>(value)) return false;
+
+ // If it's an instruction in the entry block, we don't need to save.
+ llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
+ return (block != &block->getParent()->getEntryBlock());
+ }
+
+ /// Try to save the given value.
+ static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
+ if (!needsSaving(value)) return saved_type(value, false);
+
+ // Otherwise we need an alloca.
+ llvm::Value *alloca =
+ CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
+ CGF.Builder.CreateStore(value, alloca);
+
+ return saved_type(alloca, true);
+ }
+
+ static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
+ if (!value.getInt()) return value.getPointer();
+ return CGF.Builder.CreateLoad(value.getPointer());
+ }
};
-
+
+/// A partial specialization of DominatingValue for llvm::Values that
+/// might be llvm::Instructions.
+template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
+ typedef T *type;
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
+ }
+};
+
+/// A specialization of DominatingValue for RValue.
+template <> struct DominatingValue<RValue> {
+ typedef RValue type;
+ class saved_type {
+ enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
+ AggregateAddress, ComplexAddress };
+
+ llvm::Value *Value;
+ Kind K;
+ saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
+
+ public:
+ static bool needsSaving(RValue value);
+ static saved_type save(CodeGenFunction &CGF, RValue value);
+ RValue restore(CodeGenFunction &CGF);
+
+ // implementations in CGExprCXX.cpp
+ };
+
+ static bool needsSaving(type value) {
+ return saved_type::needsSaving(value);
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return saved_type::save(CGF, value);
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return value.restore(CGF);
+ }
+};
+
} // end namespace CodeGen
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
index d125b37..9e5d7cf 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -14,10 +14,10 @@
#include "CodeGenModule.h"
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
+#include "CodeGenTBAA.h"
#include "CGCall.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
-#include "Mangle.h"
#include "TargetInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/AST/ASTContext.h"
@@ -25,6 +25,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/Diagnostic.h"
@@ -36,6 +37,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
@@ -57,19 +59,19 @@ static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
llvm::Module &M, const llvm::TargetData &TD,
Diagnostic &diags)
- : BlockModule(C, M, TD, Types, *this), Context(C),
- Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
+ : Context(C), Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
ABI(createCXXABI(*this)),
Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI),
+ TBAA(0),
VTables(*this), Runtime(0),
- CFConstantStringClassRef(0), NSConstantStringClassRef(0),
+ CFConstantStringClassRef(0), ConstantStringClassRef(0),
VMContext(M.getContext()),
NSConcreteGlobalBlockDecl(0), NSConcreteStackBlockDecl(0),
NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
BlockObjectAssignDecl(0), BlockObjectDisposeDecl(0),
- BlockObjectAssign(0), BlockObjectDispose(0){
-
+ BlockObjectAssign(0), BlockObjectDispose(0),
+ BlockDescriptorType(0), GenericBlockLiteralType(0) {
if (!Features.ObjC1)
Runtime = 0;
else if (!Features.NeXTRuntime)
@@ -79,13 +81,32 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
else
Runtime = CreateMacObjCRuntime(*this);
+ // Enable TBAA unless it's suppressed.
+ if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
+ TBAA = new CodeGenTBAA(Context, VMContext, getLangOptions(),
+ ABI.getMangleContext());
+
// If debug info generation is enabled, create the CGDebugInfo object.
DebugInfo = CodeGenOpts.DebugInfo ? new CGDebugInfo(*this) : 0;
+
+ Block.GlobalUniqueCount = 0;
+
+ // Initialize the type cache.
+ llvm::LLVMContext &LLVMContext = M.getContext();
+ Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+ PointerWidthInBits = C.Target.getPointerWidth(0);
+ IntTy = llvm::IntegerType::get(LLVMContext, C.Target.getIntWidth());
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
+ Int8PtrTy = Int8Ty->getPointerTo(0);
+ Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
}
CodeGenModule::~CodeGenModule() {
delete Runtime;
delete &ABI;
+ delete TBAA;
delete DebugInfo;
}
@@ -110,10 +131,23 @@ void CodeGenModule::Release() {
EmitAnnotations();
EmitLLVMUsed();
+ SimplifyPersonality();
+
if (getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
}
+llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAInfo(QTy);
+}
+
+void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo) {
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
+}
+
bool CodeGenModule::isTargetDarwin() const {
return getContext().Target.getTriple().getOS() == llvm::Triple::Darwin;
}
@@ -143,93 +177,34 @@ void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
}
-LangOptions::VisibilityMode
-CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
- if (const VarDecl *VD = dyn_cast<VarDecl>(D))
- if (VD->getStorageClass() == SC_PrivateExtern)
- return LangOptions::Hidden;
-
- if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) {
- switch (attr->getVisibility()) {
- default: assert(0 && "Unknown visibility!");
- case VisibilityAttr::Default:
- return LangOptions::Default;
- case VisibilityAttr::Hidden:
- return LangOptions::Hidden;
- case VisibilityAttr::Protected:
- return LangOptions::Protected;
- }
- }
-
- if (getLangOptions().CPlusPlus) {
- // Entities subject to an explicit instantiation declaration get default
- // visibility.
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
- if (Function->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration)
- return LangOptions::Default;
- } else if (const ClassTemplateSpecializationDecl *ClassSpec
- = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
- if (ClassSpec->getSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration)
- return LangOptions::Default;
- } else if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
- if (Record->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration)
- return LangOptions::Default;
- } else if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
- if (Var->isStaticDataMember() &&
- (Var->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration))
- return LangOptions::Default;
- }
-
- // If -fvisibility-inlines-hidden was provided, then inline C++ member
- // functions get "hidden" visibility by default.
- if (getLangOptions().InlineVisibilityHidden)
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- if (Method->isInlined())
- return LangOptions::Hidden;
- }
-
- // If this decl is contained in a class, it should have the same visibility
- // as the parent class.
- if (const DeclContext *DC = D->getDeclContext())
- if (DC->isRecord())
- return getDeclVisibilityMode(cast<Decl>(DC));
-
- return getLangOptions().getVisibilityMode();
-}
-
void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
- const Decl *D) const {
+ const NamedDecl *D) const {
// Internal definitions always have default visibility.
if (GV->hasLocalLinkage()) {
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
return;
}
- switch (getDeclVisibilityMode(D)) {
- default: assert(0 && "Unknown visibility!");
- case LangOptions::Default:
- return GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
- case LangOptions::Hidden:
- return GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
- case LangOptions::Protected:
- return GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
- }
+ // Set visibility for definitions.
+ NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
+ if (LV.visibilityExplicit() || !GV->hasAvailableExternallyLinkage())
+ GV->setVisibility(GetLLVMVisibility(LV.visibility()));
}
/// Set the symbol visibility of type information (vtable and RTTI)
/// associated with the given type.
void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
const CXXRecordDecl *RD,
- bool IsForRTTI) const {
+ TypeVisibilityKind TVK) const {
setGlobalVisibility(GV, RD);
if (!CodeGenOpts.HiddenWeakVTables)
return;
+ // We never want to drop the visibility for RTTI names.
+ if (TVK == TVK_ForRTTIName)
+ return;
+
// We want to drop the visibility to hidden for weak type symbols.
// This isn't possible if there might be unresolved references
// elsewhere that rely on this symbol being visible.
@@ -238,7 +213,7 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
// in CGVTables.cpp.
// Preconditions.
- if (GV->getLinkage() != llvm::GlobalVariable::WeakODRLinkage ||
+ if (GV->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage ||
GV->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
return;
@@ -273,12 +248,14 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
// If there's a key function, there may be translation units
// that don't have the key function's definition. But ignore
// this if we're emitting RTTI under -fno-rtti.
- if (!IsForRTTI || Features.RTTI)
+ if (!(TVK != TVK_ForRTTI) || Features.RTTI) {
if (Context.getKeyFunction(RD))
return;
+ }
// Otherwise, drop the visibility to hidden.
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ GV->setUnnamedAddr(true);
}
llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
@@ -297,16 +274,18 @@ llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
}
llvm::SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
- getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Buffer);
+ getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out);
else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
- getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Buffer);
+ getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out);
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
- getCXXABI().getMangleContext().mangleBlock(GD, BD, Buffer);
+ getCXXABI().getMangleContext().mangleBlock(BD, Out);
else
- getCXXABI().getMangleContext().mangleName(ND, Buffer);
+ getCXXABI().getMangleContext().mangleName(ND, Out);
// Allocate space for the mangled name.
+ Out.flush();
size_t Length = Buffer.size();
char *Name = MangledNamesAllocator.Allocate<char>(Length);
std::copy(Buffer.begin(), Buffer.end(), Name);
@@ -316,9 +295,19 @@ llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
return Str;
}
-void CodeGenModule::getMangledName(GlobalDecl GD, MangleBuffer &Buffer,
- const BlockDecl *BD) {
- getCXXABI().getMangleContext().mangleBlock(GD, BD, Buffer.getBuffer());
+void CodeGenModule::getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD) {
+ MangleContext &MangleCtx = getCXXABI().getMangleContext();
+ const Decl *D = GD.getDecl();
+ llvm::raw_svector_ostream Out(Buffer.getBuffer());
+ if (D == 0)
+ MangleCtx.mangleGlobalBlock(BD, Out);
+ else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+ MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
+ else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
+ MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
+ else
+ MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
}
llvm::GlobalValue *CodeGenModule::GetGlobalValue(llvm::StringRef Name) {
@@ -342,9 +331,7 @@ void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
// Ctor function type is void()*.
llvm::FunctionType* CtorFTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- std::vector<const llvm::Type*>(),
- false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false);
llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
// Get the type of a ctor entry, { i32, void ()* }.
@@ -412,14 +399,18 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
// merged with other definitions. c) C++ has the ODR, so we know the
// definition is dependable.
if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
- return llvm::Function::LinkOnceODRLinkage;
+ return !Context.getLangOptions().AppleKext
+ ? llvm::Function::LinkOnceODRLinkage
+ : llvm::Function::InternalLinkage;
// An explicit instantiation of a template has weak linkage, since
// explicit instantiations can occur in multiple translation units
// and must all be equivalent. However, we are not allowed to
// throw away these explicit instantiations.
if (Linkage == GVA_ExplicitTemplateInstantiation)
- return llvm::Function::WeakODRLinkage;
+ return !Context.getLangOptions().AppleKext
+ ? llvm::Function::WeakODRLinkage
+ : llvm::Function::InternalLinkage;
// Otherwise, we have strong external linkage.
assert(Linkage == GVA_StrongExternal);
@@ -455,9 +446,15 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (D->hasAttr<AlwaysInlineAttr>())
F->addFnAttr(llvm::Attribute::AlwaysInline);
+ if (D->hasAttr<NakedAttr>())
+ F->addFnAttr(llvm::Attribute::Naked);
+
if (D->hasAttr<NoInlineAttr>())
F->addFnAttr(llvm::Attribute::NoInline);
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
+ F->setUnnamedAddr(true);
+
if (Features.getStackProtectorMode() == LangOptions::SSPOn)
F->addFnAttr(llvm::Attribute::StackProtect);
else if (Features.getStackProtectorMode() == LangOptions::SSPReq)
@@ -474,7 +471,10 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
void CodeGenModule::SetCommonAttributes(const Decl *D,
llvm::GlobalValue *GV) {
- setGlobalVisibility(GV, D);
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ setGlobalVisibility(GV, ND);
+ else
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
if (D->hasAttr<UsedAttr>())
AddUsedGlobal(GV);
@@ -516,6 +516,11 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
F->setLinkage(llvm::Function::ExternalWeakLinkage);
} else {
F->setLinkage(llvm::Function::ExternalLinkage);
+
+ NamedDecl::LinkageInfo LV = FD->getLinkageAndVisibility();
+ if (LV.linkage() == ExternalLinkage && LV.visibilityExplicit()) {
+ F->setVisibility(GetLLVMVisibility(LV.visibility()));
+ }
}
if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
@@ -634,6 +639,7 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
new llvm::GlobalVariable(*M, unit->getType(), false,
llvm::GlobalValue::PrivateLinkage, unit,
".str");
+ unitGV->setUnnamedAddr(true);
// Create the ConstantStruct for the global annotation.
llvm::Constant *Fields[4] = {
@@ -664,7 +670,8 @@ llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
- Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl());
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ /*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy), 0);
@@ -796,7 +803,7 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
llvm::Constant *
CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
- GlobalDecl D) {
+ GlobalDecl D, bool ForVTable) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -825,8 +832,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
if (isa<llvm::FunctionType>(Ty)) {
FTy = cast<llvm::FunctionType>(Ty);
} else {
- FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- std::vector<const llvm::Type*>(), false);
+ FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false);
IsIncompleteFunction = true;
}
@@ -846,30 +852,34 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
// list, and remove it from DeferredDecls (since we don't need it anymore).
DeferredDeclsToEmit.push_back(DDI->second);
DeferredDecls.erase(DDI);
- } else if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl())) {
- // If this the first reference to a C++ inline function in a class, queue up
- // the deferred function body for emission. These are not seen as
- // top-level declarations.
- if (FD->isThisDeclarationADefinition() && MayDeferGeneration(FD))
- DeferredDeclsToEmit.push_back(D);
- // A called constructor which has no definition or declaration need be
- // synthesized.
- else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- if (CD->isImplicit()) {
- assert(CD->isUsed() && "Sema doesn't consider constructor as used.");
- DeferredDeclsToEmit.push_back(D);
- }
- } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
- if (DD->isImplicit()) {
- assert(DD->isUsed() && "Sema doesn't consider destructor as used.");
- DeferredDeclsToEmit.push_back(D);
- }
- } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (MD->isCopyAssignment() && MD->isImplicit()) {
- assert(MD->isUsed() && "Sema doesn't consider CopyAssignment as used.");
- DeferredDeclsToEmit.push_back(D);
+
+ // Otherwise, there are cases we have to worry about where we're
+ // using a declaration for which we must emit a definition but where
+ // we might not find a top-level definition:
+ // - member functions defined inline in their classes
+ // - friend functions defined inline in some class
+ // - special member functions with implicit definitions
+ // If we ever change our AST traversal to walk into class methods,
+ // this will be unnecessary.
+ //
+ // We also don't emit a definition for a function if it's going to be an entry
+ // in a vtable, unless it's already marked as used.
+ } else if (getLangOptions().CPlusPlus && D.getDecl()) {
+ // Look for a declaration that's lexically in a record.
+ const FunctionDecl *FD = cast<FunctionDecl>(D.getDecl());
+ do {
+ if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
+ if (FD->isImplicit() && !ForVTable) {
+ assert(FD->isUsed() && "Sema didn't mark implicit function as used!");
+ DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
+ break;
+ } else if (FD->isThisDeclarationADefinition()) {
+ DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
+ break;
+ }
}
- }
+ FD = FD->getPreviousDeclaration();
+ } while (FD);
}
// Make sure the result is of the requested type.
@@ -886,13 +896,14 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
/// non-null, then this function will use the specified type if it has to
/// create it (this occurs when we see a definition of the function).
llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
- const llvm::Type *Ty) {
+ const llvm::Type *Ty,
+ bool ForVTable) {
// If there was no specific requested type, just convert it now.
if (!Ty)
Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
llvm::StringRef MangledName = getMangledName(GD);
- return GetOrCreateLLVMFunction(MangledName, Ty, GD);
+ return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable);
}
/// CreateRuntimeFunction - Create a new runtime function with the specified
@@ -900,7 +911,7 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
llvm::Constant *
CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
llvm::StringRef Name) {
- return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl());
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false);
}
static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D) {
@@ -924,7 +935,8 @@ static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D) {
llvm::Constant *
CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
const llvm::PointerType *Ty,
- const VarDecl *D) {
+ const VarDecl *D,
+ bool UnnamedAddr) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -935,6 +947,9 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
WeakRefReferences.erase(Entry);
}
+ if (UnnamedAddr)
+ Entry->setUnnamedAddr(true);
+
if (Entry->getType() == Ty)
return Entry;
@@ -965,13 +980,20 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
// handling.
GV->setConstant(DeclIsConstantGlobal(Context, D));
- // FIXME: Merge with other attribute handling code.
- if (D->getStorageClass() == SC_PrivateExtern)
- GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
-
- if (D->hasAttr<WeakAttr>() ||
- D->hasAttr<WeakImportAttr>())
- GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ // Set linkage and visibility in case we never see a definition.
+ NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
+ if (LV.linkage() != ExternalLinkage) {
+ // Don't set internal linkage on declarations.
+ } else {
+ if (D->hasAttr<DLLImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::DLLImportLinkage);
+ else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ // Set visibility on a declaration only if it's explicit.
+ if (LV.visibilityExplicit())
+ GV->setVisibility(GetLLVMVisibility(LV.visibility()));
+ }
GV->setThreadLocal(D->isThreadSpecified());
}
@@ -980,6 +1002,45 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
}
+llvm::GlobalVariable *
+CodeGenModule::CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name,
+ const llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
+ llvm::GlobalVariable *OldGV = 0;
+
+
+ if (GV) {
+ // Check if the variable has the right type.
+ if (GV->getType()->getElementType() == Ty)
+ return GV;
+
+ // Because C++ name mangling, the only way we can end up with an already
+ // existing global with the same name is if it has been declared extern "C".
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
+ OldGV = GV;
+ }
+
+ // Create a new variable.
+ GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
+ Linkage, 0, Name);
+
+ if (OldGV) {
+ // Replace occurrences of the old variable if needed.
+ GV->takeName(OldGV);
+
+ if (!OldGV->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ OldGV->eraseFromParent();
+ }
+
+ return GV;
+}
+
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
/// given global variable. If Ty is non-null and if the global doesn't exist,
/// then it will be greated with the specified type instead of whatever the
@@ -1003,7 +1064,8 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Constant *
CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
llvm::StringRef Name) {
- return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0);
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0,
+ true);
}
void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
@@ -1045,44 +1107,63 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
switch (KeyFunction->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
+ // When compiling with optimizations turned on, we emit all vtables,
+ // even if the key function is not defined in the current translation
+ // unit. If this is the case, use available_externally linkage.
+ if (!Def && CodeGenOpts.OptimizationLevel)
+ return llvm::GlobalVariable::AvailableExternallyLinkage;
+
if (KeyFunction->isInlined())
- return llvm::GlobalVariable::WeakODRLinkage;
+ return !Context.getLangOptions().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
return llvm::GlobalVariable::ExternalLinkage;
case TSK_ImplicitInstantiation:
+ return !Context.getLangOptions().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+
case TSK_ExplicitInstantiationDefinition:
- return llvm::GlobalVariable::WeakODRLinkage;
-
+ return !Context.getLangOptions().AppleKext ?
+ llvm::GlobalVariable::WeakODRLinkage :
+ llvm::Function::InternalLinkage;
+
case TSK_ExplicitInstantiationDeclaration:
// FIXME: Use available_externally linkage. However, this currently
// breaks LLVM's build due to undefined symbols.
// return llvm::GlobalVariable::AvailableExternallyLinkage;
- return llvm::GlobalVariable::WeakODRLinkage;
+ return !Context.getLangOptions().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
}
}
+ if (Context.getLangOptions().AppleKext)
+ return llvm::Function::InternalLinkage;
+
switch (RD->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
case TSK_ImplicitInstantiation:
- case TSK_ExplicitInstantiationDefinition:
- return llvm::GlobalVariable::WeakODRLinkage;
-
- case TSK_ExplicitInstantiationDeclaration:
// FIXME: Use available_externally linkage. However, this currently
// breaks LLVM's build due to undefined symbols.
// return llvm::GlobalVariable::AvailableExternallyLinkage;
- return llvm::GlobalVariable::WeakODRLinkage;
+ case TSK_ExplicitInstantiationDeclaration:
+ return llvm::GlobalVariable::LinkOnceODRLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
}
// Silence GCC warning.
- return llvm::GlobalVariable::WeakODRLinkage;
+ return llvm::GlobalVariable::LinkOnceODRLinkage;
}
CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const {
- return CharUnits::fromQuantity(
- TheTargetData.getTypeStoreSizeInBits(Ty) / Context.getCharWidth());
+ return Context.toCharUnitsFromBits(
+ TheTargetData.getTypeStoreSizeInBits(Ty));
}
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
@@ -1112,7 +1193,6 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
T = D->getType();
if (getLangOptions().CPlusPlus) {
- EmitCXXGlobalVarDeclInitFunc(D);
Init = EmitNullConstant(T);
NonConstInit = true;
} else {
@@ -1183,42 +1263,57 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
GV->setConstant(true);
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
-
+
// Set the llvm linkage type as appropriate.
+ llvm::GlobalValue::LinkageTypes Linkage =
+ GetLLVMLinkageVarDefinition(D, GV);
+ GV->setLinkage(Linkage);
+ if (Linkage == llvm::GlobalVariable::CommonLinkage)
+ // common vars aren't constant even if declared const.
+ GV->setConstant(false);
+
+ SetCommonAttributes(D, GV);
+
+ // Emit the initializer function if necessary.
+ if (NonConstInit)
+ EmitCXXGlobalVarDeclInitFunc(D, GV);
+
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D->getLocation());
+ DI->EmitGlobalVariable(GV, D);
+ }
+}
+
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
+ llvm::GlobalVariable *GV) {
GVALinkage Linkage = getContext().GetGVALinkageForVariable(D);
if (Linkage == GVA_Internal)
- GV->setLinkage(llvm::Function::InternalLinkage);
+ return llvm::Function::InternalLinkage;
else if (D->hasAttr<DLLImportAttr>())
- GV->setLinkage(llvm::Function::DLLImportLinkage);
+ return llvm::Function::DLLImportLinkage;
else if (D->hasAttr<DLLExportAttr>())
- GV->setLinkage(llvm::Function::DLLExportLinkage);
+ return llvm::Function::DLLExportLinkage;
else if (D->hasAttr<WeakAttr>()) {
if (GV->isConstant())
- GV->setLinkage(llvm::GlobalVariable::WeakODRLinkage);
+ return llvm::GlobalVariable::WeakODRLinkage;
else
- GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+ return llvm::GlobalVariable::WeakAnyLinkage;
} else if (Linkage == GVA_TemplateInstantiation ||
Linkage == GVA_ExplicitTemplateInstantiation)
// FIXME: It seems like we can provide more specific linkage here
// (LinkOnceODR, WeakODR).
- GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
- else if (!getLangOptions().CPlusPlus && !CodeGenOpts.NoCommon &&
+ return llvm::GlobalVariable::WeakAnyLinkage;
+ else if (!getLangOptions().CPlusPlus &&
+ ((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
+ D->getAttr<CommonAttr>()) &&
!D->hasExternalStorage() && !D->getInit() &&
!D->getAttr<SectionAttr>() && !D->isThreadSpecified()) {
// Thread local vars aren't considered common linkage.
- GV->setLinkage(llvm::GlobalVariable::CommonLinkage);
- // common vars aren't constant even if declared const.
- GV->setConstant(false);
- } else
- GV->setLinkage(llvm::GlobalVariable::ExternalLinkage);
-
- SetCommonAttributes(D, GV);
-
- // Emit global variable debug information.
- if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(D->getLocation());
- DI->EmitGlobalVariable(GV, D);
+ return llvm::GlobalVariable::CommonLinkage;
}
+ return llvm::GlobalVariable::ExternalLinkage;
}
/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
@@ -1295,7 +1390,6 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
const llvm::FunctionType *Ty = getTypes().GetFunctionType(GD);
- getCXXABI().getMangleContext().mangleInitDiscriminator();
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1345,9 +1439,16 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
Entry = NewFn;
}
+ // We need to set linkage and visibility on the function before
+ // generating code for it because various parts of IR generation
+ // want to propagate this information down (e.g. to local static
+ // declarations).
llvm::Function *Fn = cast<llvm::Function>(Entry);
setFunctionLinkage(D, Fn);
+ // FIXME: this is redundant with part of SetFunctionDefinitionAttributes
+ setGlobalVisibility(Fn, D);
+
CodeGenFunction(*this).GenerateCode(D, Fn);
SetFunctionDefinitionAttributes(D, Fn);
@@ -1378,7 +1479,8 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
// if a deferred decl.
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
- Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl());
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ /*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy), 0);
@@ -1444,7 +1546,7 @@ llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
const llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
- return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl(FD));
+ return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl(FD), /*ForVTable=*/false);
}
llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
@@ -1453,27 +1555,6 @@ llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
(llvm::Intrinsic::ID)IID, Tys, NumTys);
}
-
-llvm::Function *CodeGenModule::getMemCpyFn(const llvm::Type *DestType,
- const llvm::Type *SrcType,
- const llvm::Type *SizeType) {
- const llvm::Type *ArgTypes[3] = {DestType, SrcType, SizeType };
- return getIntrinsic(llvm::Intrinsic::memcpy, ArgTypes, 3);
-}
-
-llvm::Function *CodeGenModule::getMemMoveFn(const llvm::Type *DestType,
- const llvm::Type *SrcType,
- const llvm::Type *SizeType) {
- const llvm::Type *ArgTypes[3] = {DestType, SrcType, SizeType };
- return getIntrinsic(llvm::Intrinsic::memmove, ArgTypes, 3);
-}
-
-llvm::Function *CodeGenModule::getMemSetFn(const llvm::Type *DestType,
- const llvm::Type *SizeType) {
- const llvm::Type *ArgTypes[2] = { DestType, SizeType };
- return getIntrinsic(llvm::Intrinsic::memset, ArgTypes, 2);
-}
-
static llvm::StringMapEntry<llvm::Constant*> &
GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
const StringLiteral *Literal,
@@ -1494,18 +1575,9 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
const UTF8 *FromPtr = (UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
- ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
- &ToPtr, ToPtr + NumBytes,
- strictConversion);
-
- // Check for conversion failure.
- if (Result != conversionOK) {
- // FIXME: Have Sema::CheckObjCString() validate the UTF-8 string and remove
- // this duplicate code.
- assert(Result == sourceIllegal && "UTF-8 to UTF-16 conversion failed");
- StringLength = NumBytes;
- return Map.GetOrCreateValue(String);
- }
+ (void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
// ConvertUTF8toUTF16 returns the length in ToPtr.
StringLength = ToPtr - &ToBuf[0];
@@ -1595,6 +1667,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
".str");
+ GV->setUnnamedAddr(true);
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
GV->setAlignment(Align.getQuantity());
@@ -1618,7 +1691,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
}
llvm::Constant *
-CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
+CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
unsigned StringLength = 0;
bool isUTF16 = false;
llvm::StringMapEntry<llvm::Constant*> &Entry =
@@ -1634,16 +1707,27 @@ CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
llvm::Constant *Zeros[] = { Zero, Zero };
// If we don't already have it, get _NSConstantStringClassReference.
- if (!NSConstantStringClassRef) {
+ if (!ConstantStringClassRef) {
+ std::string StringClass(getLangOptions().ObjCConstantStringClass);
const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
- llvm::Constant *GV = CreateRuntimeVariable(Ty,
- Features.ObjCNonFragileABI ?
- "OBJC_CLASS_$_NSConstantString" :
- "_NSConstantStringClassReference");
+ llvm::Constant *GV;
+ if (StringClass.empty())
+ GV = CreateRuntimeVariable(Ty,
+ Features.ObjCNonFragileABI ?
+ "OBJC_CLASS_$_NSConstantString" :
+ "_NSConstantStringClassReference");
+ else {
+ std::string str;
+ if (Features.ObjCNonFragileABI)
+ str = "OBJC_CLASS_$_" + StringClass;
+ else
+ str = "_" + StringClass + "ClassReference";
+ GV = CreateRuntimeVariable(Ty, str);
+ }
// Decay array -> ptr
- NSConstantStringClassRef =
- llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ ConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
}
QualType NSTy = getContext().getNSConstantStringType();
@@ -1654,7 +1738,7 @@ CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
std::vector<llvm::Constant*> Fields(3);
// Class pointer.
- Fields[0] = NSConstantStringClassRef;
+ Fields[0] = ConstantStringClassRef;
// String pointer.
llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
@@ -1675,6 +1759,7 @@ CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
".str");
+ GV->setUnnamedAddr(true);
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
GV->setAlignment(Align.getQuantity());
@@ -1704,15 +1789,16 @@ CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
/// GetStringForStringLiteral - Return the appropriate bytes for a
/// string literal, properly padded to match the literal type.
std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
+ const ASTContext &Context = getContext();
const ConstantArrayType *CAT =
- getContext().getAsConstantArrayType(E->getType());
+ Context.getAsConstantArrayType(E->getType());
assert(CAT && "String isn't pointer or array!");
// Resize the string to the right size.
uint64_t RealLen = CAT->getSize().getZExtValue();
if (E->isWide())
- RealLen *= getContext().Target.getWCharWidth()/8;
+ RealLen *= Context.Target.getWCharWidth() / Context.getCharWidth();
std::string Str = E->getString().str();
Str.resize(RealLen, '\0');
@@ -1756,9 +1842,12 @@ static llvm::Constant *GenerateStringLiteral(const std::string &str,
llvm::ConstantArray::get(CGM.getLLVMContext(), str, false);
// Create a global variable for this string
- return new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
- llvm::GlobalValue::PrivateLinkage,
- C, GlobalName);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
+ llvm::GlobalValue::PrivateLinkage,
+ C, GlobalName);
+ GV->setUnnamedAddr(true);
+ return GV;
}
/// GetAddrOfConstantString - Returns a pointer to a character array
@@ -2097,7 +2186,7 @@ llvm::Constant *CodeGenModule::getBlockObjectDispose() {
const llvm::FunctionType *FTy;
std::vector<const llvm::Type*> ArgTys;
const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
- ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(Int8PtrTy);
ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
return BlockObjectDispose =
@@ -2119,8 +2208,8 @@ llvm::Constant *CodeGenModule::getBlockObjectAssign() {
const llvm::FunctionType *FTy;
std::vector<const llvm::Type*> ArgTys;
const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
- ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(Int8PtrTy);
+ ArgTys.push_back(Int8PtrTy);
ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
return BlockObjectAssign =
@@ -2139,8 +2228,8 @@ llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
}
// Otherwise construct the variable by hand.
- return NSConcreteGlobalBlock = CreateRuntimeVariable(
- PtrToInt8Ty, "_NSConcreteGlobalBlock");
+ return NSConcreteGlobalBlock =
+ CreateRuntimeVariable(Int8PtrTy, "_NSConcreteGlobalBlock");
}
llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
@@ -2155,8 +2244,8 @@ llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
}
// Otherwise construct the variable by hand.
- return NSConcreteStackBlock = CreateRuntimeVariable(
- PtrToInt8Ty, "_NSConcreteStackBlock");
+ return NSConcreteStackBlock =
+ CreateRuntimeVariable(Int8PtrTy, "_NSConcreteStackBlock");
}
///@}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
index cabff9e..b6bd37c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -14,17 +14,16 @@
#ifndef CLANG_CODEGEN_CODEGENMODULE_H
#define CLANG_CODEGEN_CODEGENMODULE_H
+#include "clang/Basic/ABI.h"
#include "clang/Basic/LangOptions.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
-#include "CGBlocks.h"
+#include "clang/AST/Mangle.h"
#include "CGCall.h"
-#include "CGCXX.h"
#include "CGVTables.h"
#include "CodeGenTypes.h"
#include "GlobalDecl.h"
-#include "Mangle.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
@@ -66,14 +65,16 @@ namespace clang {
class Diagnostic;
class AnnotateAttr;
class CXXDestructorDecl;
+ class MangleBuffer;
namespace CodeGen {
class CodeGenFunction;
+ class CodeGenTBAA;
class CGCXXABI;
class CGDebugInfo;
class CGObjCRuntime;
- class MangleBuffer;
+ class BlockFieldFlags;
struct OrderGlobalInits {
unsigned int priority;
@@ -93,10 +94,39 @@ namespace CodeGen {
return priority == RHS.priority && lex_order < RHS.lex_order;
}
};
+
+ struct CodeGenTypeCache {
+ /// i8, i32, and i64
+ const llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty;
+
+ /// int
+ const llvm::IntegerType *IntTy;
+
+ /// intptr_t and size_t, which we assume are the same
+ union {
+ const llvm::IntegerType *IntPtrTy;
+ const llvm::IntegerType *SizeTy;
+ };
+
+ /// void* in address space 0
+ union {
+ const llvm::PointerType *VoidPtrTy;
+ const llvm::PointerType *Int8PtrTy;
+ };
+
+ /// void** in address space 0
+ union {
+ const llvm::PointerType *VoidPtrPtrTy;
+ const llvm::PointerType *Int8PtrPtrTy;
+ };
+
+ /// The width of an address-zero pointer.
+ unsigned char PointerWidthInBits;
+ };
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
-class CodeGenModule : public BlockModule {
+class CodeGenModule : public CodeGenTypeCache {
CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
@@ -111,6 +141,7 @@ class CodeGenModule : public BlockModule {
Diagnostic &Diags;
CGCXXABI &ABI;
CodeGenTypes Types;
+ CodeGenTBAA *TBAA;
/// VTables - Holds information about C++ vtables.
CodeGenVTables VTables;
@@ -183,9 +214,9 @@ class CodeGenModule : public BlockModule {
/// strings. This value has type int * but is actually an Obj-C class pointer.
llvm::Constant *CFConstantStringClassRef;
- /// NSConstantStringClassRef - Cached reference to the class for constant
+ /// ConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
- llvm::Constant *NSConstantStringClassRef;
+ llvm::Constant *ConstantStringClassRef;
/// Lazily create the Objective-C runtime
void createObjCRuntime();
@@ -205,6 +236,16 @@ class CodeGenModule : public BlockModule {
llvm::Constant *BlockObjectAssign;
llvm::Constant *BlockObjectDispose;
+ const llvm::Type *BlockDescriptorType;
+ const llvm::Type *GenericBlockLiteralType;
+
+ struct {
+ int GlobalUniqueCount;
+ } Block;
+
+ llvm::DenseMap<uint64_t, llvm::Constant *> AssignCache;
+ llvm::DenseMap<uint64_t, llvm::Constant *> DestroyCache;
+
/// @}
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
@@ -246,21 +287,43 @@ public:
CodeGenVTables &getVTables() { return VTables; }
Diagnostic &getDiags() const { return Diags; }
const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const TargetInfo &getTarget() const { return Context.Target; }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
const TargetCodeGenInfo &getTargetCodeGenInfo();
bool isTargetDarwin() const;
- /// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
- LangOptions::VisibilityMode getDeclVisibilityMode(const Decl *D) const;
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+
+ static void DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo);
/// setGlobalVisibility - Set the visibility for the given LLVM
/// GlobalValue.
- void setGlobalVisibility(llvm::GlobalValue *GV, const Decl *D) const;
+ void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
+
+ /// TypeVisibilityKind - The kind of global variable that is passed to
+ /// setTypeVisibility
+ enum TypeVisibilityKind {
+ TVK_ForVTT,
+ TVK_ForVTable,
+ TVK_ForRTTI,
+ TVK_ForRTTIName
+ };
/// setTypeVisibility - Set the visibility for the given global
/// value which holds information about a type.
void setTypeVisibility(llvm::GlobalValue *GV, const CXXRecordDecl *D,
- bool IsForRTTI) const;
+ TypeVisibilityKind TVK) const;
+
+ static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) {
+ switch (V) {
+ case DefaultVisibility: return llvm::GlobalValue::DefaultVisibility;
+ case HiddenVisibility: return llvm::GlobalValue::HiddenVisibility;
+ case ProtectedVisibility: return llvm::GlobalValue::ProtectedVisibility;
+ }
+ llvm_unreachable("unknown visibility!");
+ return llvm::GlobalValue::DefaultVisibility;
+ }
llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
if (isa<CXXConstructorDecl>(GD.getDecl()))
@@ -275,6 +338,14 @@ public:
return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
}
+ /// CreateOrReplaceCXXRuntimeVariable - Will return a global variable of the given
+ /// type. If a variable with a different type already exists then a new
+ /// variable with the right type will be created and all uses of the old
+ /// variable will be replaced with a bitcast to the new variable.
+ llvm::GlobalVariable *
+ CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name, const llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
/// given global variable. If Ty is non-null and if the global doesn't exist,
/// then it will be greated with the specified type instead of whatever the
@@ -286,7 +357,8 @@ public:
/// non-null, then this function will use the specified type if it has to
/// create it.
llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
- const llvm::Type *Ty = 0);
+ const llvm::Type *Ty = 0,
+ bool ForVTable = false);
/// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor
/// for the given type.
@@ -304,6 +376,29 @@ public:
GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd);
+
+ llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T,
+ BlockFieldFlags flags,
+ unsigned Align,
+ const VarDecl *variable);
+ llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T,
+ BlockFieldFlags flags,
+ unsigned Align,
+ const VarDecl *variable);
+
+ /// getGlobalUniqueCount - Fetches the global unique block count.
+ int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
+
+ /// getBlockDescriptorType - Fetches the type of a generic block
+ /// descriptor.
+ const llvm::Type *getBlockDescriptorType();
+
+ /// getGenericBlockLiteralType - The type of a generic block literal.
+ const llvm::Type *getGenericBlockLiteralType();
+
+ /// GetAddrOfGlobalBlock - Gets the address of a block which
+ /// requires no captures.
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
/// GetStringForStringLiteral - Return the appropriate bytes for a string
/// literal, properly padded to match the literal type. If only the address of
@@ -314,9 +409,10 @@ public:
/// for the given string.
llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
- /// GetAddrOfConstantNSString - Return a pointer to a constant NSString object
- /// for the given string.
- llvm::Constant *GetAddrOfConstantNSString(const StringLiteral *Literal);
+ /// GetAddrOfConstantString - Return a pointer to a constant NSString object
+ /// for the given string. Or a user defined String object as defined via
+ /// -fconstant-string-class=class_name option.
+ llvm::Constant *GetAddrOfConstantString(const StringLiteral *Literal);
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
/// for the given string literal.
@@ -363,17 +459,6 @@ public:
llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID);
- llvm::Function *getMemCpyFn(const llvm::Type *DestType,
- const llvm::Type *SrcType,
- const llvm::Type *SizeType);
-
- llvm::Function *getMemMoveFn(const llvm::Type *DestType,
- const llvm::Type *SrcType,
- const llvm::Type *SizeType);
-
- llvm::Function *getMemSetFn(const llvm::Type *DestType,
- const llvm::Type *SizeType);
-
llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0,
unsigned NumTys = 0);
@@ -417,6 +502,8 @@ public:
Types.UpdateCompletedType(TD);
}
+ llvm::Constant *getMemberPointerConstant(const UnaryOperator *e);
+
/// EmitConstantExpr - Try to emit the given expression as a
/// constant; returns 0 if the expression cannot be emitted as a
/// constant.
@@ -485,7 +572,8 @@ public:
unsigned &CallingConv);
llvm::StringRef getMangledName(GlobalDecl GD);
- void getMangledName(GlobalDecl GD, MangleBuffer &Buffer, const BlockDecl *BD);
+ void getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD);
void EmitTentativeDefinition(const VarDecl *D);
@@ -500,13 +588,18 @@ public:
/// getVTableLinkage - Return the appropriate linkage for the vtable, VTT,
/// and type information of the given class.
- static llvm::GlobalVariable::LinkageTypes
- getVTableLinkage(const CXXRecordDecl *RD);
+ llvm::GlobalVariable::LinkageTypes getVTableLinkage(const CXXRecordDecl *RD);
/// GetTargetTypeStoreSize - Return the store size, in character units, of
/// the given LLVM type.
CharUnits GetTargetTypeStoreSize(const llvm::Type *Ty) const;
-
+
+ /// GetLLVMLinkageVarDefinition - Returns LLVM linkage for a global
+ /// variable.
+ llvm::GlobalValue::LinkageTypes
+ GetLLVMLinkageVarDefinition(const VarDecl *D,
+ llvm::GlobalVariable *GV);
+
std::vector<const CXXRecordDecl*> DeferredVTables;
private:
@@ -514,10 +607,12 @@ private:
llvm::Constant *GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
- GlobalDecl D);
+ GlobalDecl D,
+ bool ForVTable);
llvm::Constant *GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
const llvm::PointerType *PTy,
- const VarDecl *D);
+ const VarDecl *D,
+ bool UnnamedAddr = false);
/// SetCommonAttributes - Set attributes which are common to any
/// form of a global definition (alias, Objective-C method,
@@ -547,7 +642,7 @@ private:
void EmitAliasDefinition(GlobalDecl GD);
void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
-
+
// C++ related functions.
bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target);
@@ -578,7 +673,8 @@ private:
/// EmitCXXGlobalDtorFunc - Emit the function that destroys C++ globals.
void EmitCXXGlobalDtorFunc();
- void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D);
+ void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr);
// FIXME: Hardcoding priority here is gross.
void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
@@ -613,6 +709,10 @@ private:
/// lazily; this is only relevant for definitions. The given decl
/// must be either a function or var decl.
bool MayDeferGeneration(const ValueDecl *D);
+
+ /// SimplifyPersonality - Check whether we can use a "simpler", more
+ /// core exceptions personality function.
+ void SimplifyPersonality();
};
} // end namespace CodeGen
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
new file mode 100644
index 0000000..3f2c6ca
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -0,0 +1,180 @@
+//===--- CodeGenTypes.cpp - TBAA information for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use. Relevant standards text includes:
+//
+// C99 6.5p7
+// C++ [basic.lval] (p10 in n3126, p15 in some earlier versions)
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTBAA.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Mangle.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
+#include "llvm/Constants.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext& VMContext,
+ const LangOptions &Features, MangleContext &MContext)
+ : Context(Ctx), VMContext(VMContext), Features(Features), MContext(MContext),
+ Root(0), Char(0) {
+}
+
+CodeGenTBAA::~CodeGenTBAA() {
+}
+
+llvm::MDNode *CodeGenTBAA::getRoot() {
+ // Define the root of the tree. This identifies the tree, so that
+ // if our LLVM IR is linked with LLVM IR from a different front-end
+ // (or a different version of this front-end), their TBAA trees will
+ // remain distinct, and the optimizer will treat them conservatively.
+ if (!Root)
+ Root = getTBAAInfoForNamedType("Simple C/C++ TBAA", 0);
+
+ return Root;
+}
+
+llvm::MDNode *CodeGenTBAA::getChar() {
+ // Define the root of the tree for user-accessible memory. C and C++
+ // give special powers to char and certain similar types. However,
+ // these special powers only cover user-accessible memory, and doesn't
+ // include things like vtables.
+ if (!Char)
+ Char = getTBAAInfoForNamedType("omnipotent char", getRoot());
+
+ return Char;
+}
+
+/// getTBAAInfoForNamedType - Create a TBAA tree node with the given string
+/// as its identifier, and the given Parent node as its tree parent.
+llvm::MDNode *CodeGenTBAA::getTBAAInfoForNamedType(llvm::StringRef NameStr,
+ llvm::MDNode *Parent,
+ bool Readonly) {
+ // Currently there is only one flag defined - the readonly flag.
+ llvm::Value *Flags = 0;
+ if (Readonly)
+ Flags = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), true);
+
+ // Set up the mdnode operand list.
+ llvm::Value *Ops[] = {
+ llvm::MDString::get(VMContext, NameStr),
+ Parent,
+ Flags
+ };
+
+ // Create the mdnode.
+ return llvm::MDNode::get(VMContext, Ops, llvm::array_lengthof(Ops) - !Flags);
+}
+
+static bool TypeHasMayAlias(QualType QTy) {
+ // Tagged types have declarations, and therefore may have attributes.
+ if (const TagType *TTy = dyn_cast<TagType>(QTy))
+ return TTy->getDecl()->hasAttr<MayAliasAttr>();
+
+ // Typedef types have declarations, and therefore may have attributes.
+ if (const TypedefType *TTy = dyn_cast<TypedefType>(QTy)) {
+ if (TTy->getDecl()->hasAttr<MayAliasAttr>())
+ return true;
+ // Also, their underlying types may have relevant attributes.
+ return TypeHasMayAlias(TTy->desugar());
+ }
+
+ return false;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAInfo(QualType QTy) {
+ // If the type has the may_alias attribute (even on a typedef), it is
+ // effectively in the general char alias class.
+ if (TypeHasMayAlias(QTy))
+ return getChar();
+
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+
+ if (llvm::MDNode *N = MetadataCache[Ty])
+ return N;
+
+ // Handle builtin types.
+ if (const BuiltinType *BTy = dyn_cast<BuiltinType>(Ty)) {
+ switch (BTy->getKind()) {
+ // Character types are special and can alias anything.
+ // In C++, this technically only includes "char" and "unsigned char",
+ // and not "signed char". In C, it includes all three. For now,
+ // the risk of exploiting this detail in C++ seems likely to outweigh
+ // the benefit.
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ return getChar();
+
+ // Unsigned types can alias their corresponding signed types.
+ case BuiltinType::UShort:
+ return getTBAAInfo(Context.ShortTy);
+ case BuiltinType::UInt:
+ return getTBAAInfo(Context.IntTy);
+ case BuiltinType::ULong:
+ return getTBAAInfo(Context.LongTy);
+ case BuiltinType::ULongLong:
+ return getTBAAInfo(Context.LongLongTy);
+ case BuiltinType::UInt128:
+ return getTBAAInfo(Context.Int128Ty);
+
+ // Treat all other builtin types as distinct types. This includes
+ // treating wchar_t, char16_t, and char32_t as distinct from their
+ // "underlying types".
+ default:
+ return MetadataCache[Ty] =
+ getTBAAInfoForNamedType(BTy->getName(Features), getChar());
+ }
+ }
+
+ // Handle pointers.
+ // TODO: Implement C++'s type "similarity" and consider dis-"similar"
+ // pointers distinct.
+ if (Ty->isPointerType())
+ return MetadataCache[Ty] = getTBAAInfoForNamedType("any pointer",
+ getChar());
+
+ // Enum types are distinct types. In C++ they have "underlying types",
+ // however they aren't related for TBAA.
+ if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
+ // In C mode, two anonymous enums are compatible iff their members
+ // are the same -- see C99 6.2.7p1. For now, be conservative. We could
+ // theoretically implement this by combining information about all the
+ // members into a single identifying MDNode.
+ if (!Features.CPlusPlus &&
+ ETy->getDecl()->getTypedefForAnonDecl())
+ return MetadataCache[Ty] = getChar();
+
+ // In C++ mode, types have linkage, so we can rely on the ODR and
+ // on their mangled names, if they're external.
+ // TODO: Is there a way to get a program-wide unique name for a
+ // decl with local linkage or no linkage?
+ if (Features.CPlusPlus &&
+ ETy->getDecl()->getLinkage() != ExternalLinkage)
+ return MetadataCache[Ty] = getChar();
+
+ // TODO: This is using the RTTI name. Is there a better way to get
+ // a unique string for a type?
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ MContext.mangleCXXRTTIName(QualType(ETy, 0), Out);
+ Out.flush();
+ return MetadataCache[Ty] = getTBAAInfoForNamedType(OutName, getChar());
+ }
+
+ // For now, handle any other kind of type conservatively.
+ return MetadataCache[Ty] = getChar();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
new file mode 100644
index 0000000..c458347
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
@@ -0,0 +1,76 @@
+//===--- CodeGenTBAA.h - TBAA information for LLVM CodeGen ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTBAA_H
+#define CLANG_CODEGEN_CODEGENTBAA_H
+
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+ class LLVMContext;
+ class MDNode;
+}
+
+namespace clang {
+ class ASTContext;
+ class LangOptions;
+ class MangleContext;
+ class QualType;
+ class Type;
+
+namespace CodeGen {
+ class CGRecordLayout;
+
+/// CodeGenTBAA - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTBAA {
+ ASTContext &Context;
+ llvm::LLVMContext& VMContext;
+ const LangOptions &Features;
+ MangleContext &MContext;
+
+ /// MetadataCache - This maps clang::Types to llvm::MDNodes describing them.
+ llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache;
+
+ llvm::MDNode *Root;
+ llvm::MDNode *Char;
+
+ /// getRoot - This is the mdnode for the root of the metadata type graph
+ /// for this translation unit.
+ llvm::MDNode *getRoot();
+
+ /// getChar - This is the mdnode for "char", which is special, and any types
+ /// considered to be equivalent to it.
+ llvm::MDNode *getChar();
+
+ llvm::MDNode *getTBAAInfoForNamedType(llvm::StringRef NameStr,
+ llvm::MDNode *Parent,
+ bool Readonly = false);
+
+public:
+ CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
+ const LangOptions &Features,
+ MangleContext &MContext);
+ ~CodeGenTBAA();
+
+ /// getTBAAInfo - Get the TBAA MDNode to be used for a dereference
+ /// of the given type.
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
index 5ab65c5..5254922 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -85,7 +85,7 @@ const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
T = Context.getCanonicalType(T);
// See if type is already cached.
- llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
+ llvm::DenseMap<const Type *, llvm::PATypeHolder>::iterator
I = TypeCache.find(T.getTypePtr());
// If type is found in map and this is not a definition for a opaque
// place holder type then use it. Otherwise, convert type T.
@@ -228,7 +228,8 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::ULong:
case BuiltinType::LongLong:
case BuiltinType::ULongLong:
- case BuiltinType::WChar:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
case BuiltinType::Char16:
case BuiltinType::Char32:
return llvm::IntegerType::get(getLLVMContext(),
@@ -252,7 +253,6 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::Overload:
case BuiltinType::Dependent:
- case BuiltinType::UndeducedAuto:
assert(0 && "Unexpected builtin type!");
break;
}
@@ -371,26 +371,30 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
const TagDecl *TD = cast<TagType>(Ty).getDecl();
const llvm::Type *Res = ConvertTagDeclType(TD);
- std::string TypeName(TD->getKindName());
- TypeName += '.';
+ llvm::SmallString<256> TypeName;
+ llvm::raw_svector_ostream OS(TypeName);
+ OS << TD->getKindName() << '.';
// Name the codegen type after the typedef name
// if there is no tag type name available
- if (TD->getIdentifier())
+ if (TD->getIdentifier()) {
// FIXME: We should not have to check for a null decl context here.
// Right now we do it because the implicit Obj-C decls don't have one.
- TypeName += TD->getDeclContext() ? TD->getQualifiedNameAsString() :
- TD->getNameAsString();
- else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
+ if (TD->getDeclContext())
+ OS << TD->getQualifiedNameAsString();
+ else
+ TD->printName(OS);
+ } else if (const TypedefDecl *TDD = TD->getTypedefForAnonDecl()) {
// FIXME: We should not have to check for a null decl context here.
// Right now we do it because the implicit Obj-C decls don't have one.
- TypeName += TdT->getDecl()->getDeclContext() ?
- TdT->getDecl()->getQualifiedNameAsString() :
- TdT->getDecl()->getNameAsString();
- else
- TypeName += "anon";
-
- TheModule.addTypeName(TypeName, Res);
+ if (TDD->getDeclContext())
+ OS << TDD->getQualifiedNameAsString();
+ else
+ TDD->printName(OS);
+ } else
+ OS << "anon";
+
+ TheModule.addTypeName(OS.str(), Res);
return Res;
}
@@ -424,9 +428,13 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
if (TDTI != TagDeclTypes.end())
return TDTI->second;
+ const EnumDecl *ED = dyn_cast<EnumDecl>(TD);
+
// If this is still a forward declaration, just define an opaque
// type to use for this tagged decl.
- if (!TD->isDefinition()) {
+ // C++0x: If this is a enumeration type with fixed underlying type,
+ // consider it complete.
+ if (!TD->isDefinition() && !(ED && ED->isFixed())) {
llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
TagDeclTypes.insert(std::make_pair(Key, ResultType));
return ResultType;
@@ -434,8 +442,8 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
// Okay, this is a definition of a type. Compile the implementation now.
- if (TD->isEnum()) // Don't bother storing enums in TagDeclTypes.
- return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
+ if (ED) // Don't bother storing enums in TagDeclTypes.
+ return ConvertTypeRecursive(ED->getIntegerType());
// This decl could well be recursive. In this case, insert an opaque
// definition of this type, which the recursive uses will get. We will then
@@ -474,11 +482,20 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
return ResultHolder.get();
}
-/// getCGRecordLayout - Return record layout info for the given llvm::Type.
+/// getCGRecordLayout - Return record layout info for the given record decl.
const CGRecordLayout &
-CodeGenTypes::getCGRecordLayout(const RecordDecl *TD) const {
- const Type *Key = Context.getTagDeclType(TD).getTypePtr();
+CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+
const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
+ if (!Layout) {
+ // Compute the type information.
+ ConvertTagDeclType(RD);
+
+ // Now try again.
+ Layout = CGRecordLayouts.lookup(Key);
+ }
+
assert(Layout && "Unable to find record layout information for type");
return *Layout;
}
@@ -506,11 +523,5 @@ bool CodeGenTypes::isZeroInitializable(QualType T) {
}
bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) {
-
- // FIXME: It would be better if there was a way to explicitly compute the
- // record layout instead of converting to a type.
- ConvertTagDeclType(RD);
-
- const CGRecordLayout &Layout = getCGRecordLayout(RD);
- return Layout.isZeroInitializable();
+ return getCGRecordLayout(RD).isZeroInitializable();
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
index 1fc2153..41513da 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -88,14 +88,14 @@ private:
/// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
/// used instead of llvm::Type because it allows us to bypass potential
/// dangling type pointers due to type refinement on llvm side.
- llvm::DenseMap<Type *, llvm::PATypeHolder> TypeCache;
+ llvm::DenseMap<const Type *, llvm::PATypeHolder> TypeCache;
/// ConvertNewType - Convert type T into a llvm::Type. Do not use this
/// method directly because it does not do any type caching. This method
/// is available only for ConvertType(). CovertType() is preferred
/// interface to convert type T into a llvm::Type.
const llvm::Type *ConvertNewType(QualType T);
-
+
/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
/// pointers that are referenced but have not been converted yet. This is
/// used to handle cyclic structures properly.
@@ -139,11 +139,11 @@ public:
static const TagType *VerifyFuncTypeComplete(const Type* T);
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
- /// given a CXXMethodDecl. If the method to has an incomplete return type,
+ /// given a CXXMethodDecl. If the method to has an incomplete return type,
/// and/or incomplete argument types, this will return the opaque type.
const llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
-
- const CGRecordLayout &getCGRecordLayout(const RecordDecl*) const;
+
+ const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
@@ -151,7 +151,7 @@ public:
/// getFunctionInfo - Get the function info for the specified function decl.
const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
-
+
const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
@@ -176,7 +176,7 @@ public:
/// pointers.
const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
const FunctionProtoType *FTP);
-
+
/// getFunctionInfo - Get the function info for a function described by a
/// return type and argument types. If the calling convention is not
/// specified, the "C" calling convention will be used.
@@ -188,7 +188,7 @@ public:
const FunctionType::ExtInfo &Info);
/// Retrieves the ABI information for the given function signature.
- ///
+ ///
/// \param ArgTys - must all actually be canonical as params
const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
@@ -208,11 +208,11 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// ArgTys. See ABIArgInfo::Expand.
void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys,
bool IsRecursive);
-
+
/// IsZeroInitializable - Return whether a type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(QualType T);
-
+
/// IsZeroInitializable - Return whether a record type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(const CXXRecordDecl *RD);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
index 26dea40..c2f36d2 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
@@ -15,9 +15,9 @@
#ifndef CLANG_CODEGEN_GLOBALDECL_H
#define CLANG_CODEGEN_GLOBALDECL_H
-#include "CGCXX.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/ABI.h"
namespace clang {
@@ -81,6 +81,12 @@ public:
GD.Value.setFromOpaqueValue(P);
return GD;
}
+
+ GlobalDecl getWithDecl(const Decl *D) {
+ GlobalDecl Result(*this);
+ Result.Value.setPointer(D);
+ return Result;
+ }
};
} // end namespace CodeGen
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
index eefc530..95654a3 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -22,7 +22,7 @@
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "Mangle.h"
+#include <clang/AST/Mangle.h>
#include <clang/AST/Type.h>
#include <llvm/Target/TargetData.h>
#include <llvm/Value.h>
@@ -35,7 +35,6 @@ class ItaniumCXXABI : public CodeGen::CGCXXABI {
private:
const llvm::IntegerType *PtrDiffTy;
protected:
- CodeGen::MangleContext MangleCtx;
bool IsARM;
// It's a little silly for us to cache this.
@@ -48,16 +47,13 @@ protected:
return PtrDiffTy;
}
- bool NeedsArrayCookie(QualType ElementType);
+ bool NeedsArrayCookie(const CXXNewExpr *expr);
+ bool NeedsArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType);
public:
ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) :
- CGCXXABI(CGM), PtrDiffTy(0), MangleCtx(getContext(), CGM.getDiags()),
- IsARM(IsARM) { }
-
- CodeGen::MangleContext &getMangleContext() {
- return MangleCtx;
- }
+ CGCXXABI(CGM), PtrDiffTy(0), IsARM(IsARM) { }
bool isZeroInitializable(const MemberPointerType *MPT);
@@ -83,7 +79,8 @@ public:
llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
- llvm::Constant *EmitMemberPointer(const FieldDecl *FD);
+ llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
@@ -111,14 +108,19 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
- CharUnits GetArrayCookieSize(QualType ElementType);
+ CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType);
void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+ void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr);
};
class ARMCXXABI : public ItaniumCXXABI {
@@ -143,12 +145,14 @@ public:
void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
- CharUnits GetArrayCookieSize(QualType ElementType);
+ CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType);
void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
@@ -352,11 +356,11 @@ ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
bool DerivedToBase =
E->getCastKind() == CK_DerivedToBaseMemberPointer;
- const CXXRecordDecl *BaseDecl, *DerivedDecl;
+ const CXXRecordDecl *DerivedDecl;
if (DerivedToBase)
- DerivedDecl = SrcDecl, BaseDecl = DestDecl;
+ DerivedDecl = SrcDecl;
else
- BaseDecl = SrcDecl, DerivedDecl = DestDecl;
+ DerivedDecl = DestDecl;
llvm::Constant *Adj =
CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
@@ -490,21 +494,13 @@ ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
/*Packed=*/false);
}
-llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const FieldDecl *FD) {
+llvm::Constant *
+ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
// Itanium C++ ABI 2.3:
// A pointer to data member is an offset from the base address of
// the class object containing it, represented as a ptrdiff_t
-
- QualType ClassType = getContext().getTypeDeclType(FD->getParent());
- const llvm::StructType *ClassLTy =
- cast<llvm::StructType>(CGM.getTypes().ConvertType(ClassType));
-
- const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(FD->getParent());
- unsigned FieldNo = RL.getLLVMFieldNo(FD);
- uint64_t Offset =
- CGM.getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
-
- return llvm::ConstantInt::get(getPtrDiffTy(), Offset);
+ return llvm::ConstantInt::get(getPtrDiffTy(), offset.getQuantity());
}
llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
@@ -799,67 +795,49 @@ void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
/************************** Array allocation cookies **************************/
-bool ItaniumCXXABI::NeedsArrayCookie(QualType ElementType) {
- ElementType = getContext().getBaseElementType(ElementType);
- const RecordType *RT = ElementType->getAs<RecordType>();
- if (!RT) return false;
-
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-
- // If the class has a non-trivial destructor, it always needs a cookie.
- if (!RD->hasTrivialDestructor()) return true;
+bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ // Otherwise, if the class has a non-trivial destructor, it always
+ // needs a cookie.
+ const CXXRecordDecl *record =
+ expr->getAllocatedType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return (record && !record->hasTrivialDestructor());
+}
+bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
// If the class's usual deallocation function takes two arguments,
- // it needs a cookie. Otherwise we don't need a cookie.
- const CXXMethodDecl *UsualDeallocationFunction = 0;
-
- // Usual deallocation functions of this form are always found on the
- // class.
- //
- // FIXME: what exactly is this code supposed to do if there's an
- // ambiguity? That's possible with using declarations.
- DeclarationName OpName =
- getContext().DeclarationNames.getCXXOperatorName(OO_Array_Delete);
- DeclContext::lookup_const_iterator Op, OpEnd;
- for (llvm::tie(Op, OpEnd) = RD->lookup(OpName); Op != OpEnd; ++Op) {
- const CXXMethodDecl *Delete =
- cast<CXXMethodDecl>((*Op)->getUnderlyingDecl());
-
- if (Delete->isUsualDeallocationFunction()) {
- UsualDeallocationFunction = Delete;
- break;
- }
- }
-
- // No usual deallocation function, we don't need a cookie.
- if (!UsualDeallocationFunction)
- return false;
-
- // The usual deallocation function doesn't take a size_t argument,
- // so we don't need a cookie.
- if (UsualDeallocationFunction->getNumParams() == 1)
- return false;
-
- assert(UsualDeallocationFunction->getNumParams() == 2 &&
- "Unexpected deallocation function type!");
- return true;
-}
-
-CharUnits ItaniumCXXABI::GetArrayCookieSize(QualType ElementType) {
- if (!NeedsArrayCookie(ElementType))
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ // Otherwise, if the class has a non-trivial destructor, it always
+ // needs a cookie.
+ const CXXRecordDecl *record =
+ elementType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return (record && !record->hasTrivialDestructor());
+}
+
+CharUnits ItaniumCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!NeedsArrayCookie(expr))
return CharUnits::Zero();
- // Padding is the maximum of sizeof(size_t) and alignof(ElementType)
+ // Padding is the maximum of sizeof(size_t) and alignof(elementType)
ASTContext &Ctx = getContext();
return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
- Ctx.getTypeAlignInChars(ElementType));
+ Ctx.getTypeAlignInChars(expr->getAllocatedType()));
}
llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType) {
- assert(NeedsArrayCookie(ElementType));
+ assert(NeedsArrayCookie(expr));
unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
@@ -892,6 +870,7 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType,
llvm::Value *&NumElements,
llvm::Value *&AllocPtr,
@@ -901,7 +880,7 @@ void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
// If we don't need an array cookie, bail out early.
- if (!NeedsArrayCookie(ElementType)) {
+ if (!NeedsArrayCookie(expr, ElementType)) {
AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
NumElements = 0;
CookieSize = CharUnits::Zero();
@@ -932,8 +911,8 @@ void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
}
-CharUnits ARMCXXABI::GetArrayCookieSize(QualType ElementType) {
- if (!NeedsArrayCookie(ElementType))
+CharUnits ARMCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!NeedsArrayCookie(expr))
return CharUnits::Zero();
// On ARM, the cookie is always:
@@ -949,8 +928,9 @@ CharUnits ARMCXXABI::GetArrayCookieSize(QualType ElementType) {
llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
+ const CXXNewExpr *expr,
QualType ElementType) {
- assert(NeedsArrayCookie(ElementType));
+ assert(NeedsArrayCookie(expr));
// NewPtr is a char*.
@@ -983,6 +963,7 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
QualType ElementType,
llvm::Value *&NumElements,
llvm::Value *&AllocPtr,
@@ -992,7 +973,7 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
// If we don't need an array cookie, bail out early.
- if (!NeedsArrayCookie(ElementType)) {
+ if (!NeedsArrayCookie(expr, ElementType)) {
AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
NumElements = 0;
CookieSize = CharUnits::Zero();
@@ -1005,7 +986,6 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
// The cookie size is always 2 * sizeof(size_t).
CookieSize = 2 * SizeSize;
- CharUnits NumElementsOffset = CookieSize - SizeSize;
// The allocated pointer is the input ptr, minus that amount.
AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
@@ -1021,3 +1001,168 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
}
+/*********************** Static local initialization **************************/
+
+static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
+ const llvm::PointerType *GuardPtrTy) {
+ // int __cxa_guard_acquire(__guard *guard_object);
+
+ std::vector<const llvm::Type*> Args(1, GuardPtrTy);
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
+ Args, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
+}
+
+static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
+ const llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_release(__guard *guard_object);
+
+ std::vector<const llvm::Type*> Args(1, GuardPtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ Args, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
+}
+
+static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
+ const llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_abort(__guard *guard_object);
+
+ std::vector<const llvm::Type*> Args(1, GuardPtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ Args, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
+}
+
+namespace {
+ struct CallGuardAbort : EHScopeStack::Cleanup {
+ llvm::GlobalVariable *Guard;
+ CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.Builder.CreateCall(getGuardAbortFn(CGF.CGM, Guard->getType()), Guard)
+ ->setDoesNotThrow();
+ }
+ };
+}
+
+/// The ARM code here follows the Itanium code closely enough that we
+/// just special-case it at particular places.
+void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // We only need to use thread-safe statics for local variables;
+ // global initialization is always single-threaded.
+ bool ThreadsafeStatics = (getContext().getLangOptions().ThreadsafeStatics &&
+ D.isLocalVarDecl());
+
+ // Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
+ const llvm::IntegerType *GuardTy
+ = (IsARM ? Builder.getInt32Ty() : Builder.getInt64Ty());
+ const llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
+
+ // Create the guard variable.
+ llvm::SmallString<256> GuardVName;
+ llvm::raw_svector_ostream Out(GuardVName);
+ getMangleContext().mangleItaniumGuardVariable(&D, Out);
+ Out.flush();
+
+ // Just absorb linkage and visibility from the variable.
+ llvm::GlobalVariable *GuardVariable =
+ new llvm::GlobalVariable(CGM.getModule(), GuardTy,
+ false, GV->getLinkage(),
+ llvm::ConstantInt::get(GuardTy, 0),
+ GuardVName.str());
+ GuardVariable->setVisibility(GV->getVisibility());
+
+ // Test whether the variable has completed initialization.
+ llvm::Value *IsInitialized;
+
+ // ARM C++ ABI 3.2.3.1:
+ // To support the potential use of initialization guard variables
+ // as semaphores that are the target of ARM SWP and LDREX/STREX
+ // synchronizing instructions we define a static initialization
+ // guard variable to be a 4-byte aligned, 4- byte word with the
+ // following inline access protocol.
+ // #define INITIALIZED 1
+ // if ((obj_guard & INITIALIZED) != INITIALIZED) {
+ // if (__cxa_guard_acquire(&obj_guard))
+ // ...
+ // }
+ if (IsARM) {
+ llvm::Value *V = Builder.CreateLoad(GuardVariable);
+ V = Builder.CreateAnd(V, Builder.getInt32(1));
+ IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+
+ // Itanium C++ ABI 3.3.2:
+ // The following is pseudo-code showing how these functions can be used:
+ // if (obj_guard.first_byte == 0) {
+ // if ( __cxa_guard_acquire (&obj_guard) ) {
+ // try {
+ // ... initialize the object ...;
+ // } catch (...) {
+ // __cxa_guard_abort (&obj_guard);
+ // throw;
+ // }
+ // ... queue object destructor with __cxa_atexit() ...;
+ // __cxa_guard_release (&obj_guard);
+ // }
+ // }
+ } else {
+ // Load the first byte of the guard variable.
+ const llvm::Type *PtrTy = Builder.getInt8PtrTy();
+ llvm::Value *V =
+ Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
+
+ IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+ }
+
+ llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+
+ // Check if the first byte of the guard variable is zero.
+ Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock);
+
+ CGF.EmitBlock(InitCheckBlock);
+
+ // Variables used when coping with thread-safe statics and exceptions.
+ if (ThreadsafeStatics) {
+ // Call __cxa_guard_acquire.
+ llvm::Value *V
+ = Builder.CreateCall(getGuardAcquireFn(CGM, GuardPtrTy), GuardVariable);
+
+ llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
+
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
+ InitBlock, EndBlock);
+
+ // Call __cxa_guard_abort along the exceptional edge.
+ CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
+
+ CGF.EmitBlock(InitBlock);
+ }
+
+ // Emit the initializer and add a global destructor if appropriate.
+ CGF.EmitCXXGlobalVarDeclInit(D, GV);
+
+ if (ThreadsafeStatics) {
+ // Pop the guard-abort cleanup if we pushed one.
+ CGF.PopCleanupBlock();
+
+ // Call __cxa_guard_release. This cannot throw.
+ Builder.CreateCall(getGuardReleaseFn(CGM, GuardPtrTy), GuardVariable);
+ } else {
+ Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable);
+ }
+
+ CGF.EmitBlock(EndBlock);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 9407335..3a63eba 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -16,107 +16,17 @@
#include "CGCXXABI.h"
#include "CodeGenModule.h"
-#include "Mangle.h"
-#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclTemplate.h"
-#include "clang/AST/ExprCXX.h"
-#include "CGVTables.h"
using namespace clang;
using namespace CodeGen;
namespace {
-/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
-/// Microsoft Visual C++ ABI.
-class MicrosoftCXXNameMangler {
- MangleContext &Context;
- llvm::raw_svector_ostream Out;
-
- ASTContext &getASTContext() const { return Context.getASTContext(); }
-
-public:
- MicrosoftCXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
- : Context(C), Out(Res) { }
-
- void mangle(const NamedDecl *D, llvm::StringRef Prefix = "?");
- void mangleName(const NamedDecl *ND);
- void mangleFunctionEncoding(const FunctionDecl *FD);
- void mangleVariableEncoding(const VarDecl *VD);
- void mangleNumber(int64_t Number);
- void mangleType(QualType T);
-
-private:
- void mangleUnqualifiedName(const NamedDecl *ND) {
- mangleUnqualifiedName(ND, ND->getDeclName());
- }
- void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
- void mangleSourceName(const IdentifierInfo *II);
- void manglePostfix(const DeclContext *DC, bool NoFunction=false);
- void mangleOperatorName(OverloadedOperatorKind OO);
- void mangleQualifiers(Qualifiers Quals, bool IsMember);
-
- void mangleObjCMethodName(const ObjCMethodDecl *MD);
-
- // Declare manglers for every type class.
-#define ABSTRACT_TYPE(CLASS, PARENT)
-#define NON_CANONICAL_TYPE(CLASS, PARENT)
-#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
-#include "clang/AST/TypeNodes.def"
-
- void mangleType(const TagType*);
- void mangleType(const FunctionType *T, const FunctionDecl *D,
- bool IsStructor, bool IsInstMethod);
- void mangleType(const ArrayType *T, bool IsGlobal);
- void mangleExtraDimensions(QualType T);
- void mangleFunctionClass(const FunctionDecl *FD);
- void mangleCallingConvention(const FunctionType *T);
- void mangleThrowSpecification(const FunctionProtoType *T);
-
-};
-
-/// MicrosoftMangleContext - Overrides the default MangleContext for the
-/// Microsoft Visual C++ ABI.
-class MicrosoftMangleContext : public MangleContext {
-public:
- MicrosoftMangleContext(ASTContext &Context,
- Diagnostic &Diags) : MangleContext(Context, Diags) { }
- virtual bool shouldMangleDeclName(const NamedDecl *D);
- virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
- virtual void mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleGuardVariable(const VarDecl *D,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXVTable(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXVTT(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
- const CXXRecordDecl *Type,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- llvm::SmallVectorImpl<char> &);
- virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- llvm::SmallVectorImpl<char> &);
-};
-
class MicrosoftCXXABI : public CGCXXABI {
- MicrosoftMangleContext MangleCtx;
public:
- MicrosoftCXXABI(CodeGenModule &CGM)
- : CGCXXABI(CGM), MangleCtx(CGM.getContext(), CGM.getDiags()) {}
-
- MicrosoftMangleContext &getMangleContext() {
- return MangleCtx;
- }
+ MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
@@ -145,1071 +55,31 @@ public:
EmitThisParam(CGF);
// TODO: 'for base' flag
}
-};
-
-}
-
-static bool isInCLinkageSpecification(const Decl *D) {
- D = D->getCanonicalDecl();
- for (const DeclContext *DC = D->getDeclContext();
- !DC->isTranslationUnit(); DC = DC->getParent()) {
- if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
- return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
- }
-
- return false;
-}
-
-bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
- // In C, functions with no attributes never need to be mangled. Fastpath them.
- if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
- return false;
-
- // Any decl can be declared with __asm("foo") on it, and this takes precedence
- // over all other naming in the .o file.
- if (D->hasAttr<AsmLabelAttr>())
- return true;
-
- // Clang's "overloadable" attribute extension to C/C++ implies name mangling
- // (always) as does passing a C++ member function and a function
- // whose name is not a simple identifier.
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
- !FD->getDeclName().isIdentifier()))
- return true;
-
- // Otherwise, no mangling is done outside C++ mode.
- if (!getASTContext().getLangOptions().CPlusPlus)
- return false;
-
- // Variables at global scope with internal linkage are not mangled.
- if (!FD) {
- const DeclContext *DC = D->getDeclContext();
- if (DC->isTranslationUnit() && D->getLinkage() == InternalLinkage)
- return false;
- }
-
- // C functions and "main" are not mangled.
- if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
- return false;
-
- return true;
-}
-
-void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
- llvm::StringRef Prefix) {
- // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
- // Therefore it's really important that we don't decorate the
- // name with leading underscores or leading/trailing at signs. So, emit a
- // asm marker at the start so we get the name right.
- Out << '\01'; // LLVM IR Marker for __asm("foo")
-
- // Any decl can be declared with __asm("foo") on it, and this takes precedence
- // over all other naming in the .o file.
- if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
- // If we have an asm name, then we use it as the mangling.
- Out << ALA->getLabel();
- return;
- }
-
- // <mangled-name> ::= ? <name> <type-encoding>
- Out << Prefix;
- mangleName(D);
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- mangleFunctionEncoding(FD);
- else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
- mangleVariableEncoding(VD);
- // TODO: Fields? Can MSVC even mangle them?
-}
-
-void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
- // <type-encoding> ::= <function-class> <function-type>
-
- // Don't mangle in the type if this isn't a decl we should typically mangle.
- if (!Context.shouldMangleDeclName(FD))
- return;
-
- // We should never ever see a FunctionNoProtoType at this point.
- // We don't even know how to mangle their types anyway :).
- const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType());
-
- bool InStructor = false, InInstMethod = false;
- const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
- if (MD) {
- if (MD->isInstance())
- InInstMethod = true;
- if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))
- InStructor = true;
- }
-
- // First, the function class.
- mangleFunctionClass(FD);
-
- mangleType(FT, FD, InStructor, InInstMethod);
-}
-
-void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
- // <type-encoding> ::= <storage-class> <variable-type>
- // <storage-class> ::= 0 # private static member
- // ::= 1 # protected static member
- // ::= 2 # public static member
- // ::= 3 # global
- // ::= 4 # static local
-
- // The first character in the encoding (after the name) is the storage class.
- if (VD->isStaticDataMember()) {
- // If it's a static member, it also encodes the access level.
- switch (VD->getAccess()) {
- default:
- case AS_private: Out << '0'; break;
- case AS_protected: Out << '1'; break;
- case AS_public: Out << '2'; break;
- }
- }
- else if (!VD->isStaticLocal())
- Out << '3';
- else
- Out << '4';
- // Now mangle the type.
- // <variable-type> ::= <type> <cvr-qualifiers>
- // ::= <type> A # pointers, references, arrays
- // Pointers and references are odd. The type of 'int * const foo;' gets
- // mangled as 'QAHA' instead of 'PAHB', for example.
- QualType Ty = VD->getType();
- if (Ty->isPointerType() || Ty->isReferenceType()) {
- mangleType(Ty);
- Out << 'A';
- } else if (Ty->isArrayType()) {
- // Global arrays are funny, too.
- mangleType(static_cast<ArrayType *>(Ty.getTypePtr()), true);
- Out << 'A';
- } else {
- mangleType(Ty.getLocalUnqualifiedType());
- mangleQualifiers(Ty.getLocalQualifiers(), false);
- }
-}
-
-void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
- // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
- const DeclContext *DC = ND->getDeclContext();
-
- // Always start with the unqualified name.
- mangleUnqualifiedName(ND);
-
- // If this is an extern variable declared locally, the relevant DeclContext
- // is that of the containing namespace, or the translation unit.
- if (isa<FunctionDecl>(DC) && ND->hasLinkage())
- while (!DC->isNamespace() && !DC->isTranslationUnit())
- DC = DC->getParent();
-
- manglePostfix(DC);
-
- // Terminate the whole name with an '@'.
- Out << '@';
-}
-
-void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
- // <number> ::= [?] <decimal digit> # <= 9
- // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc...
- if (Number < 0) {
- Out << '?';
- Number = -Number;
- }
- if (Number >= 1 && Number <= 10) {
- Out << Number-1;
- } else {
- // We have to build up the encoding in reverse order, so it will come
- // out right when we write it out.
- char Encoding[16];
- char *EndPtr = Encoding+sizeof(Encoding);
- char *CurPtr = EndPtr;
- while (Number) {
- *--CurPtr = 'A' + (Number % 16);
- Number /= 16;
- }
- Out.write(CurPtr, EndPtr-CurPtr);
- Out << '@';
- }
-}
-
-void
-MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
- DeclarationName Name) {
- // <unqualified-name> ::= <operator-name>
- // ::= <ctor-dtor-name>
- // ::= <source-name>
- switch (Name.getNameKind()) {
- case DeclarationName::Identifier: {
- if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
- mangleSourceName(II);
- break;
- }
-
- // Otherwise, an anonymous entity. We must have a declaration.
- assert(ND && "mangling empty name without declaration");
-
- if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
- if (NS->isAnonymousNamespace()) {
- Out << "?A";
- break;
- }
- }
-
- // We must have an anonymous struct.
- const TagDecl *TD = cast<TagDecl>(ND);
- if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
- assert(TD->getDeclContext() == D->getDeclContext() &&
- "Typedef should not be in another decl context!");
- assert(D->getDeclName().getAsIdentifierInfo() &&
- "Typedef was not named!");
- mangleSourceName(D->getDeclName().getAsIdentifierInfo());
- break;
- }
-
- // When VC encounters an anonymous type with no tag and no typedef,
- // it literally emits '<unnamed-tag>'.
- Out << "<unnamed-tag>";
- break;
- }
-
- case DeclarationName::ObjCZeroArgSelector:
- case DeclarationName::ObjCOneArgSelector:
- case DeclarationName::ObjCMultiArgSelector:
- assert(false && "Can't mangle Objective-C selector names here!");
- break;
-
- case DeclarationName::CXXConstructorName:
- assert(false && "Can't mangle constructors yet!");
- break;
-
- case DeclarationName::CXXDestructorName:
- assert(false && "Can't mangle destructors yet!");
- break;
-
- case DeclarationName::CXXConversionFunctionName:
- // <operator-name> ::= ?B # (cast)
- // The target type is encoded as the return type.
- Out << "?B";
- break;
-
- case DeclarationName::CXXOperatorName:
- mangleOperatorName(Name.getCXXOverloadedOperator());
- break;
-
- case DeclarationName::CXXLiteralOperatorName:
- // FIXME: Was this added in VS2010? Does MS even know how to mangle this?
- assert(false && "Don't know how to mangle literal operators yet!");
- break;
-
- case DeclarationName::CXXUsingDirective:
- assert(false && "Can't mangle a using directive name!");
- break;
- }
-}
-
-void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
- bool NoFunction) {
- // <postfix> ::= <unqualified-name> [<postfix>]
- // ::= <template-postfix> <template-args> [<postfix>]
- // ::= <template-param>
- // ::= <substitution> [<postfix>]
-
- if (!DC) return;
-
- while (isa<LinkageSpecDecl>(DC))
- DC = DC->getParent();
-
- if (DC->isTranslationUnit())
- return;
-
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
- llvm::SmallString<64> Name;
- Context.mangleBlock(GlobalDecl(), BD, Name);
- Out << Name << '@';
- return manglePostfix(DC->getParent(), NoFunction);
- }
-
- if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
- return;
- else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
- mangleObjCMethodName(Method);
- else {
- mangleUnqualifiedName(cast<NamedDecl>(DC));
- manglePostfix(DC->getParent(), NoFunction);
- }
-}
-
-void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
- switch (OO) {
- // ?0 # constructor
- // ?1 # destructor
- // <operator-name> ::= ?2 # new
- case OO_New: Out << "?2"; break;
- // <operator-name> ::= ?3 # delete
- case OO_Delete: Out << "?3"; break;
- // <operator-name> ::= ?4 # =
- case OO_Equal: Out << "?4"; break;
- // <operator-name> ::= ?5 # >>
- case OO_GreaterGreater: Out << "?5"; break;
- // <operator-name> ::= ?6 # <<
- case OO_LessLess: Out << "?6"; break;
- // <operator-name> ::= ?7 # !
- case OO_Exclaim: Out << "?7"; break;
- // <operator-name> ::= ?8 # ==
- case OO_EqualEqual: Out << "?8"; break;
- // <operator-name> ::= ?9 # !=
- case OO_ExclaimEqual: Out << "?9"; break;
- // <operator-name> ::= ?A # []
- case OO_Subscript: Out << "?A"; break;
- // ?B # conversion
- // <operator-name> ::= ?C # ->
- case OO_Arrow: Out << "?C"; break;
- // <operator-name> ::= ?D # *
- case OO_Star: Out << "?D"; break;
- // <operator-name> ::= ?E # ++
- case OO_PlusPlus: Out << "?E"; break;
- // <operator-name> ::= ?F # --
- case OO_MinusMinus: Out << "?F"; break;
- // <operator-name> ::= ?G # -
- case OO_Minus: Out << "?G"; break;
- // <operator-name> ::= ?H # +
- case OO_Plus: Out << "?H"; break;
- // <operator-name> ::= ?I # &
- case OO_Amp: Out << "?I"; break;
- // <operator-name> ::= ?J # ->*
- case OO_ArrowStar: Out << "?J"; break;
- // <operator-name> ::= ?K # /
- case OO_Slash: Out << "?K"; break;
- // <operator-name> ::= ?L # %
- case OO_Percent: Out << "?L"; break;
- // <operator-name> ::= ?M # <
- case OO_Less: Out << "?M"; break;
- // <operator-name> ::= ?N # <=
- case OO_LessEqual: Out << "?N"; break;
- // <operator-name> ::= ?O # >
- case OO_Greater: Out << "?O"; break;
- // <operator-name> ::= ?P # >=
- case OO_GreaterEqual: Out << "?P"; break;
- // <operator-name> ::= ?Q # ,
- case OO_Comma: Out << "?Q"; break;
- // <operator-name> ::= ?R # ()
- case OO_Call: Out << "?R"; break;
- // <operator-name> ::= ?S # ~
- case OO_Tilde: Out << "?S"; break;
- // <operator-name> ::= ?T # ^
- case OO_Caret: Out << "?T"; break;
- // <operator-name> ::= ?U # |
- case OO_Pipe: Out << "?U"; break;
- // <operator-name> ::= ?V # &&
- case OO_AmpAmp: Out << "?V"; break;
- // <operator-name> ::= ?W # ||
- case OO_PipePipe: Out << "?W"; break;
- // <operator-name> ::= ?X # *=
- case OO_StarEqual: Out << "?X"; break;
- // <operator-name> ::= ?Y # +=
- case OO_PlusEqual: Out << "?Y"; break;
- // <operator-name> ::= ?Z # -=
- case OO_MinusEqual: Out << "?Z"; break;
- // <operator-name> ::= ?_0 # /=
- case OO_SlashEqual: Out << "?_0"; break;
- // <operator-name> ::= ?_1 # %=
- case OO_PercentEqual: Out << "?_1"; break;
- // <operator-name> ::= ?_2 # >>=
- case OO_GreaterGreaterEqual: Out << "?_2"; break;
- // <operator-name> ::= ?_3 # <<=
- case OO_LessLessEqual: Out << "?_3"; break;
- // <operator-name> ::= ?_4 # &=
- case OO_AmpEqual: Out << "?_4"; break;
- // <operator-name> ::= ?_5 # |=
- case OO_PipeEqual: Out << "?_5"; break;
- // <operator-name> ::= ?_6 # ^=
- case OO_CaretEqual: Out << "?_6"; break;
- // ?_7 # vftable
- // ?_8 # vbtable
- // ?_9 # vcall
- // ?_A # typeof
- // ?_B # local static guard
- // ?_C # string
- // ?_D # vbase destructor
- // ?_E # vector deleting destructor
- // ?_F # default constructor closure
- // ?_G # scalar deleting destructor
- // ?_H # vector constructor iterator
- // ?_I # vector destructor iterator
- // ?_J # vector vbase constructor iterator
- // ?_K # virtual displacement map
- // ?_L # eh vector constructor iterator
- // ?_M # eh vector destructor iterator
- // ?_N # eh vector vbase constructor iterator
- // ?_O # copy constructor closure
- // ?_P<name> # udt returning <name>
- // ?_Q # <unknown>
- // ?_R0 # RTTI Type Descriptor
- // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
- // ?_R2 # RTTI Base Class Array
- // ?_R3 # RTTI Class Hierarchy Descriptor
- // ?_R4 # RTTI Complete Object Locator
- // ?_S # local vftable
- // ?_T # local vftable constructor closure
- // <operator-name> ::= ?_U # new[]
- case OO_Array_New: Out << "?_U"; break;
- // <operator-name> ::= ?_V # delete[]
- case OO_Array_Delete: Out << "?_V"; break;
-
- case OO_Conditional:
- assert(false && "Don't know how to mangle ?:");
- break;
-
- case OO_None:
- case NUM_OVERLOADED_OPERATORS:
- assert(false && "Not an overloaded operator");
- break;
- }
-}
-
-void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
- // <source name> ::= <identifier> @
- Out << II->getName() << '@';
-}
-
-void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
- llvm::SmallString<64> Buffer;
- MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
- Out << Buffer;
-}
-
-void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
- bool IsMember) {
- // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
- // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
- // 'I' means __restrict (32/64-bit).
- // Note that the MSVC __restrict keyword isn't the same as the C99 restrict
- // keyword!
- // <base-cvr-qualifiers> ::= A # near
- // ::= B # near const
- // ::= C # near volatile
- // ::= D # near const volatile
- // ::= E # far (16-bit)
- // ::= F # far const (16-bit)
- // ::= G # far volatile (16-bit)
- // ::= H # far const volatile (16-bit)
- // ::= I # huge (16-bit)
- // ::= J # huge const (16-bit)
- // ::= K # huge volatile (16-bit)
- // ::= L # huge const volatile (16-bit)
- // ::= M <basis> # based
- // ::= N <basis> # based const
- // ::= O <basis> # based volatile
- // ::= P <basis> # based const volatile
- // ::= Q # near member
- // ::= R # near const member
- // ::= S # near volatile member
- // ::= T # near const volatile member
- // ::= U # far member (16-bit)
- // ::= V # far const member (16-bit)
- // ::= W # far volatile member (16-bit)
- // ::= X # far const volatile member (16-bit)
- // ::= Y # huge member (16-bit)
- // ::= Z # huge const member (16-bit)
- // ::= 0 # huge volatile member (16-bit)
- // ::= 1 # huge const volatile member (16-bit)
- // ::= 2 <basis> # based member
- // ::= 3 <basis> # based const member
- // ::= 4 <basis> # based volatile member
- // ::= 5 <basis> # based const volatile member
- // ::= 6 # near function (pointers only)
- // ::= 7 # far function (pointers only)
- // ::= 8 # near method (pointers only)
- // ::= 9 # far method (pointers only)
- // ::= _A <basis> # based function (pointers only)
- // ::= _B <basis> # based function (far?) (pointers only)
- // ::= _C <basis> # based method (pointers only)
- // ::= _D <basis> # based method (far?) (pointers only)
- // ::= _E # block (Clang)
- // <basis> ::= 0 # __based(void)
- // ::= 1 # __based(segment)?
- // ::= 2 <name> # __based(name)
- // ::= 3 # ?
- // ::= 4 # ?
- // ::= 5 # not really based
- if (!IsMember) {
- if (!Quals.hasVolatile()) {
- if (!Quals.hasConst())
- Out << 'A';
- else
- Out << 'B';
- } else {
- if (!Quals.hasConst())
- Out << 'C';
- else
- Out << 'D';
- }
- } else {
- if (!Quals.hasVolatile()) {
- if (!Quals.hasConst())
- Out << 'Q';
- else
- Out << 'R';
- } else {
- if (!Quals.hasConst())
- Out << 'S';
- else
- Out << 'T';
- }
- }
-
- // FIXME: For now, just drop all extension qualifiers on the floor.
-}
-
-void MicrosoftCXXNameMangler::mangleType(QualType T) {
- // Only operate on the canonical type!
- T = getASTContext().getCanonicalType(T);
-
- Qualifiers Quals = T.getLocalQualifiers();
- if (Quals) {
- // We have to mangle these now, while we still have enough information.
- // <pointer-cvr-qualifiers> ::= P # pointer
- // ::= Q # const pointer
- // ::= R # volatile pointer
- // ::= S # const volatile pointer
- if (T->isAnyPointerType() || T->isMemberPointerType() ||
- T->isBlockPointerType()) {
- if (!Quals.hasVolatile())
- Out << 'Q';
- else {
- if (!Quals.hasConst())
- Out << 'R';
- else
- Out << 'S';
- }
- } else
- // Just emit qualifiers like normal.
- // NB: When we mangle a pointer/reference type, and the pointee
- // type has no qualifiers, the lack of qualifier gets mangled
- // in there.
- mangleQualifiers(Quals, false);
- } else if (T->isAnyPointerType() || T->isMemberPointerType() ||
- T->isBlockPointerType()) {
- Out << 'P';
- }
- switch (T->getTypeClass()) {
-#define ABSTRACT_TYPE(CLASS, PARENT)
-#define NON_CANONICAL_TYPE(CLASS, PARENT) \
-case Type::CLASS: \
-llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
-return;
-#define TYPE(CLASS, PARENT) \
-case Type::CLASS: \
-mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
-break;
-#include "clang/AST/TypeNodes.def"
- }
-}
-
-void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
- // <type> ::= <builtin-type>
- // <builtin-type> ::= X # void
- // ::= C # signed char
- // ::= D # char
- // ::= E # unsigned char
- // ::= F # short
- // ::= G # unsigned short (or wchar_t if it's not a builtin)
- // ::= H # int
- // ::= I # unsigned int
- // ::= J # long
- // ::= K # unsigned long
- // L # <none>
- // ::= M # float
- // ::= N # double
- // ::= O # long double (__float80 is mangled differently)
- // ::= _D # __int8 (yup, it's a distinct type in MSVC)
- // ::= _E # unsigned __int8
- // ::= _F # __int16
- // ::= _G # unsigned __int16
- // ::= _H # __int32
- // ::= _I # unsigned __int32
- // ::= _J # long long, __int64
- // ::= _K # unsigned long long, __int64
- // ::= _L # __int128
- // ::= _M # unsigned __int128
- // ::= _N # bool
- // _O # <array in parameter>
- // ::= _T # __float80 (Intel)
- // ::= _W # wchar_t
- // ::= _Z # __float80 (Digital Mars)
- switch (T->getKind()) {
- case BuiltinType::Void: Out << 'X'; break;
- case BuiltinType::SChar: Out << 'C'; break;
- case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
- case BuiltinType::UChar: Out << 'E'; break;
- case BuiltinType::Short: Out << 'F'; break;
- case BuiltinType::UShort: Out << 'G'; break;
- case BuiltinType::Int: Out << 'H'; break;
- case BuiltinType::UInt: Out << 'I'; break;
- case BuiltinType::Long: Out << 'J'; break;
- case BuiltinType::ULong: Out << 'K'; break;
- case BuiltinType::Float: Out << 'M'; break;
- case BuiltinType::Double: Out << 'N'; break;
- // TODO: Determine size and mangle accordingly
- case BuiltinType::LongDouble: Out << 'O'; break;
- // TODO: __int8 and friends
- case BuiltinType::LongLong: Out << "_J"; break;
- case BuiltinType::ULongLong: Out << "_K"; break;
- case BuiltinType::Int128: Out << "_L"; break;
- case BuiltinType::UInt128: Out << "_M"; break;
- case BuiltinType::Bool: Out << "_N"; break;
- case BuiltinType::WChar: Out << "_W"; break;
-
- case BuiltinType::Overload:
- case BuiltinType::Dependent:
- assert(false &&
- "Overloaded and dependent types shouldn't get to name mangling");
- break;
- case BuiltinType::UndeducedAuto:
- assert(0 && "Should not see undeduced auto here");
- break;
- case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
- case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
- case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
-
- case BuiltinType::Char16:
- case BuiltinType::Char32:
- case BuiltinType::NullPtr:
- assert(false && "Don't know how to mangle this type");
- break;
- }
-}
-
-// <type> ::= <function-type>
-void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) {
- // Structors only appear in decls, so at this point we know it's not a
- // structor type.
- // I'll probably have mangleType(MemberPointerType) call the mangleType()
- // method directly.
- mangleType(T, NULL, false, false);
-}
-void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) {
- llvm_unreachable("Can't mangle K&R function prototypes");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
- const FunctionDecl *D,
- bool IsStructor,
- bool IsInstMethod) {
- // <function-type> ::= <this-cvr-qualifiers> <calling-convention>
- // <return-type> <argument-list> <throw-spec>
- const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
-
- // If this is a C++ instance method, mangle the CVR qualifiers for the
- // this pointer.
- if (IsInstMethod)
- mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
-
- mangleCallingConvention(T);
-
- // <return-type> ::= <type>
- // ::= @ # structors (they have no declared return type)
- if (IsStructor)
- Out << '@';
- else
- mangleType(Proto->getResultType());
-
- // <argument-list> ::= X # void
- // ::= <type>+ @
- // ::= <type>* Z # varargs
- if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
- Out << 'X';
- } else {
- if (D) {
- // If we got a decl, use the "types-as-written" to make sure arrays
- // get mangled right.
- for (FunctionDecl::param_const_iterator Parm = D->param_begin(),
- ParmEnd = D->param_end();
- Parm != ParmEnd; ++Parm)
- mangleType((*Parm)->getTypeSourceInfo()->getType());
- } else {
- for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
- ArgEnd = Proto->arg_type_end();
- Arg != ArgEnd; ++Arg)
- mangleType(*Arg);
- }
- // <builtin-type> ::= Z # ellipsis
- if (Proto->isVariadic())
- Out << 'Z';
- else
- Out << '@';
- }
-
- mangleThrowSpecification(Proto);
-}
-
-void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
- // <function-class> ::= A # private: near
- // ::= B # private: far
- // ::= C # private: static near
- // ::= D # private: static far
- // ::= E # private: virtual near
- // ::= F # private: virtual far
- // ::= G # private: thunk near
- // ::= H # private: thunk far
- // ::= I # protected: near
- // ::= J # protected: far
- // ::= K # protected: static near
- // ::= L # protected: static far
- // ::= M # protected: virtual near
- // ::= N # protected: virtual far
- // ::= O # protected: thunk near
- // ::= P # protected: thunk far
- // ::= Q # public: near
- // ::= R # public: far
- // ::= S # public: static near
- // ::= T # public: static far
- // ::= U # public: virtual near
- // ::= V # public: virtual far
- // ::= W # public: thunk near
- // ::= X # public: thunk far
- // ::= Y # global near
- // ::= Z # global far
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- switch (MD->getAccess()) {
- default:
- case AS_private:
- if (MD->isStatic())
- Out << 'C';
- else if (MD->isVirtual())
- Out << 'E';
- else
- Out << 'A';
- break;
- case AS_protected:
- if (MD->isStatic())
- Out << 'K';
- else if (MD->isVirtual())
- Out << 'M';
- else
- Out << 'I';
- break;
- case AS_public:
- if (MD->isStatic())
- Out << 'S';
- else if (MD->isVirtual())
- Out << 'U';
- else
- Out << 'Q';
- }
- } else
- Out << 'Y';
-}
-void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
- // <calling-convention> ::= A # __cdecl
- // ::= B # __export __cdecl
- // ::= C # __pascal
- // ::= D # __export __pascal
- // ::= E # __thiscall
- // ::= F # __export __thiscall
- // ::= G # __stdcall
- // ::= H # __export __stdcall
- // ::= I # __fastcall
- // ::= J # __export __fastcall
- // The 'export' calling conventions are from a bygone era
- // (*cough*Win16*cough*) when functions were declared for export with
- // that keyword. (It didn't actually export them, it just made them so
- // that they could be in a DLL and somebody from another module could call
- // them.)
- switch (T->getCallConv()) {
- case CC_Default:
- case CC_C: Out << 'A'; break;
- case CC_X86Pascal: Out << 'C'; break;
- case CC_X86ThisCall: Out << 'E'; break;
- case CC_X86StdCall: Out << 'G'; break;
- case CC_X86FastCall: Out << 'I'; break;
- }
-}
-void MicrosoftCXXNameMangler::mangleThrowSpecification(
- const FunctionProtoType *FT) {
- // <throw-spec> ::= Z # throw(...) (default)
- // ::= @ # throw() or __declspec/__attribute__((nothrow))
- // ::= <type>+
- // NOTE: Since the Microsoft compiler ignores throw specifications, they are
- // all actually mangled as 'Z'. (They're ignored because their associated
- // functionality isn't implemented, and probably never will be.)
- Out << 'Z';
-}
-
-void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
- assert(false && "Don't know how to mangle UnresolvedUsingTypes yet!");
-}
-
-// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
-// <union-type> ::= T <name>
-// <struct-type> ::= U <name>
-// <class-type> ::= V <name>
-// <enum-type> ::= W <size> <name>
-void MicrosoftCXXNameMangler::mangleType(const EnumType *T) {
- mangleType(static_cast<const TagType*>(T));
-}
-void MicrosoftCXXNameMangler::mangleType(const RecordType *T) {
- mangleType(static_cast<const TagType*>(T));
-}
-void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
- switch (T->getDecl()->getTagKind()) {
- case TTK_Union:
- Out << 'T';
- break;
- case TTK_Struct:
- Out << 'U';
- break;
- case TTK_Class:
- Out << 'V';
- break;
- case TTK_Enum:
- Out << 'W';
- Out << getASTContext().getTypeSizeInChars(
- cast<EnumDecl>(T->getDecl())->getIntegerType()).getQuantity();
- break;
- }
- mangleName(T->getDecl());
-}
-
-// <type> ::= <array-type>
-// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+]
-// <element-type> # as global
-// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+]
-// <element-type> # as param
-// It's supposed to be the other way around, but for some strange reason, it
-// isn't. Today this behavior is retained for the sole purpose of backwards
-// compatibility.
-void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
- // This isn't a recursive mangling, so now we have to do it all in this
- // one call.
- if (IsGlobal)
- Out << 'P';
- else
- Out << 'Q';
- mangleExtraDimensions(T->getElementType());
-}
-void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) {
- mangleType(static_cast<const ArrayType *>(T), false);
-}
-void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) {
- mangleType(static_cast<const ArrayType *>(T), false);
-}
-void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) {
- mangleType(static_cast<const ArrayType *>(T), false);
-}
-void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) {
- mangleType(static_cast<const ArrayType *>(T), false);
-}
-void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
- llvm::SmallVector<llvm::APInt, 3> Dimensions;
- for (;;) {
- if (ElementTy->isConstantArrayType()) {
- const ConstantArrayType *CAT =
- static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
- Dimensions.push_back(CAT->getSize());
- ElementTy = CAT->getElementType();
- } else if (ElementTy->isVariableArrayType()) {
- assert(false && "Don't know how to mangle VLAs!");
- } else if (ElementTy->isDependentSizedArrayType()) {
- // The dependent expression has to be folded into a constant (TODO).
- assert(false && "Don't know how to mangle dependent-sized arrays!");
- } else if (ElementTy->isIncompleteArrayType()) continue;
- else break;
- }
- mangleQualifiers(ElementTy.getQualifiers(), false);
- // If there are any additional dimensions, mangle them now.
- if (Dimensions.size() > 0) {
- Out << 'Y';
- // <dimension-count> ::= <number> # number of extra dimensions
- mangleNumber(Dimensions.size());
- for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) {
- mangleNumber(Dimensions[Dim].getLimitedValue());
- }
- }
- mangleType(ElementTy.getLocalUnqualifiedType());
-}
-
-// <type> ::= <pointer-to-member-type>
-// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
-// <class name> <type>
-void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
- QualType PointeeType = T->getPointeeType();
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
- Out << '8';
- mangleName(cast<RecordType>(T->getClass())->getDecl());
- mangleType(FPT, NULL, false, true);
- } else {
- mangleQualifiers(PointeeType.getQualifiers(), true);
- mangleName(cast<RecordType>(T->getClass())->getDecl());
- mangleType(PointeeType.getLocalUnqualifiedType());
- }
-}
-void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) {
- assert(false && "Don't know how to mangle TemplateTypeParmTypes yet!");
-}
-
-// <type> ::= <pointer-type>
-// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
-void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
- QualType PointeeTy = T->getPointeeType();
- if (PointeeTy->isArrayType()) {
- // Pointers to arrays are mangled like arrays.
- mangleExtraDimensions(T->getPointeeType());
- } else if (PointeeTy->isFunctionType()) {
- // Function pointers are special.
- Out << '6';
- mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
- NULL, false, false);
- } else {
- if (!PointeeTy.hasQualifiers())
- // Lack of qualifiers is mangled as 'A'.
- Out << 'A';
- mangleType(PointeeTy);
- }
-}
-void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
- // Object pointers never have qualifiers.
- Out << 'A';
- mangleType(T->getPointeeType());
-}
-
-// <type> ::= <reference-type>
-// <reference-type> ::= A <cvr-qualifiers> <type>
-void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) {
- Out << 'A';
- QualType PointeeTy = T->getPointeeType();
- if (!PointeeTy.hasQualifiers())
- // Lack of qualifiers is mangled as 'A'.
- Out << 'A';
- mangleType(PointeeTy);
-}
-
-void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) {
- assert(false && "Don't know how to mangle RValueReferenceTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) {
- assert(false && "Don't know how to mangle ComplexTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const VectorType *T) {
- assert(false && "Don't know how to mangle VectorTypes yet!");
-}
-void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) {
- assert(false && "Don't know how to mangle ExtVectorTypes yet!");
-}
-void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
- assert(false && "Don't know how to mangle DependentSizedExtVectorTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) {
- // ObjC interfaces have structs underlying them.
- Out << 'U';
- mangleName(T->getDecl());
-}
-
-void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) {
- // We don't allow overloading by different protocol qualification,
- // so mangling them isn't necessary.
- mangleType(T->getBaseType());
-}
-
-void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) {
- Out << "_E";
- mangleType(T->getPointeeType());
-}
-
-void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) {
- assert(false && "Don't know how to mangle InjectedClassNameTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) {
- assert(false && "Don't know how to mangle TemplateSpecializationTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) {
- assert(false && "Don't know how to mangle DependentNameTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(
- const DependentTemplateSpecializationType *T) {
- assert(false &&
- "Don't know how to mangle DependentTemplateSpecializationTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) {
- assert(false && "Don't know how to mangle TypeOfTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) {
- assert(false && "Don't know how to mangle TypeOfExprTypes yet!");
-}
-
-void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) {
- assert(false && "Don't know how to mangle DecltypeTypes yet!");
-}
-
-void MicrosoftMangleContext::mangleName(const NamedDecl *D,
- llvm::SmallVectorImpl<char> &Name) {
- assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
- "Invalid mangleName() call, argument is not a variable or function!");
- assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
- "Invalid mangleName() call on 'structor decl!");
-
- PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
- getASTContext().getSourceManager(),
- "Mangling declaration");
+ // ==== Notes on array cookies =========
+ //
+ // MSVC seems to only use cookies when the class has a destructor; a
+ // two-argument usual array deallocation function isn't sufficient.
+ //
+ // For example, this code prints "100" and "1":
+ // struct A {
+ // char x;
+ // void *operator new[](size_t sz) {
+ // printf("%u\n", sz);
+ // return malloc(sz);
+ // }
+ // void operator delete[](void *p, size_t sz) {
+ // printf("%u\n", sz);
+ // free(p);
+ // }
+ // };
+ // int main() {
+ // A *p = new A[100];
+ // delete[] p;
+ // }
+ // Whereas it prints "104" and "104" if you give A a destructor.
+};
- MicrosoftCXXNameMangler Mangler(*this, Name);
- return Mangler.mangle(D);
-}
-void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle thunks!");
-}
-void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
- CXXDtorType Type,
- const ThisAdjustment &,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle destructor thunks!");
-}
-void MicrosoftMangleContext::mangleGuardVariable(const VarDecl *D,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle guard variables!");
-}
-void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle virtual tables!");
-}
-void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<char> &) {
- llvm_unreachable("The MS C++ ABI does not have virtual table tables!");
-}
-void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
- int64_t Offset,
- const CXXRecordDecl *Type,
- llvm::SmallVectorImpl<char> &) {
- llvm_unreachable("The MS C++ ABI does not have constructor vtables!");
-}
-void MicrosoftMangleContext::mangleCXXRTTI(QualType T,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle RTTI!");
-}
-void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle RTTI names!");
-}
-void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
- CXXCtorType Type,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle constructors!");
-}
-void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
- CXXDtorType Type,
- llvm::SmallVectorImpl<char> &) {
- assert(false && "Can't yet mangle destructors!");
}
CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
index 6d9d277..8945028 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -71,6 +71,19 @@ namespace {
/// (because these can be defined in declspecs).
virtual void HandleTagDeclDefinition(TagDecl *D) {
Builder->UpdateCompletedType(D);
+
+ // In C++, we may have member functions that need to be emitted at this
+ // point.
+ if (Ctx->getLangOptions().CPlusPlus && !D->isDependentContext()) {
+ for (DeclContext::decl_iterator M = D->decls_begin(),
+ MEnd = D->decls_end();
+ M != MEnd; ++M)
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*M))
+ if (Method->isThisDeclarationADefinition() &&
+ (Method->hasAttr<UsedAttr>() ||
+ Method->hasAttr<ConstructorAttr>()))
+ Builder->EmitTopLevelDecl(Method);
+ }
}
virtual void HandleTranslationUnit(ASTContext &Ctx) {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
index 4d221d4..d74b3f3 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -18,7 +18,6 @@
#include "clang/AST/RecordLayout.h"
#include "llvm/Type.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -75,7 +74,8 @@ void ABIArgInfo::dump() const {
break;
case Indirect:
OS << "Indirect Align=" << getIndirectAlign()
- << " Byal=" << getIndirectByVal();
+ << " Byal=" << getIndirectByVal()
+ << " Realign=" << getIndirectRealign();
break;
case Expand:
OS << "Expand";
@@ -330,12 +330,47 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+/// UseX86_MMXType - Return true if this is an MMX type that should use the special
+/// x86_mmx type.
+bool UseX86_MMXType(const llvm::Type *IRType) {
+ // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
+ // special x86_mmx type.
+ return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
+ IRType->getScalarSizeInBits() != 64;
+}
+
+static const llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ const llvm::Type* Ty) {
+ if (Constraint=="y" && Ty->isVectorTy())
+ return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
+ return Ty;
+}
+
//===----------------------------------------------------------------------===//
// X86-32 ABI Implementation
//===----------------------------------------------------------------------===//
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
+ static const unsigned MinABIStackAlignInBytes = 4;
+
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
@@ -349,6 +384,9 @@ class X86_32ABIInfo : public ABIInfo {
/// such that the argument will be passed in memory.
ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
+ /// \brief Return the alignment to use for the given type on the stack.
+ unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
+
public:
ABIArgInfo classifyReturnType(QualType RetTy) const;
@@ -385,6 +423,13 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const;
+
+ const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ const llvm::Type* Ty) const {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
};
}
@@ -547,17 +592,70 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i)
+ if (!isRecordWithSSEVectorType(Context, i->getType()))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ QualType FT = i->getType();
+
+ if (FT->getAs<VectorType>() && Context.getTypeSize(Ty) == 128)
+ return true;
+
+ if (isRecordWithSSEVectorType(Context, FT))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
+ unsigned Align) const {
+ // Otherwise, if the alignment is less than or equal to the minimum ABI
+ // alignment, just use the default; the backend will handle this.
+ if (Align <= MinABIStackAlignInBytes)
+ return 0; // Use default alignment.
+
+ // On non-Darwin, the stack type alignment is always 4.
+ if (!IsDarwinVectorABI) {
+ // Set explicit alignment, since we may need to realign the top.
+ return MinABIStackAlignInBytes;
+ }
+
+ // Otherwise, if the type contains an SSE vector type, the alignment is 16.
+ if (isRecordWithSSEVectorType(getContext(), Ty))
+ return 16;
+
+ return MinABIStackAlignInBytes;
+}
+
ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
if (!ByVal)
return ABIArgInfo::getIndirect(0, false);
- // Compute the byval alignment. We trust the back-end to honor the
- // minimum ABI alignment for byval, to make cleaner IR.
- const unsigned MinABIAlign = 4;
- unsigned Align = getContext().getTypeAlign(Ty) / 8;
- if (Align > MinABIAlign)
- return ABIArgInfo::getIndirect(Align);
- return ABIArgInfo::getIndirect(0);
+ // Compute the byval alignment.
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
+ if (StackAlign == 0)
+ return ABIArgInfo::getIndirect(0);
+
+ // If the stack alignment is less than the type alignment, realign the
+ // argument.
+ if (StackAlign < TypeAlign)
+ return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
+ /*Realign=*/true);
+
+ return ABIArgInfo::getIndirect(StackAlign);
}
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
@@ -599,11 +697,18 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
}
-
+
+ const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+ if (UseX86_MMXType(IRType)) {
+ ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
+ AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
+ return AAI;
+ }
+
return ABIArgInfo::getDirect();
}
-
-
+
+
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -755,7 +860,8 @@ class X86_64ABIInfo : public ABIInfo {
ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty, unsigned &neededInt,
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ unsigned &neededInt,
unsigned &neededSSE) const;
public:
@@ -768,9 +874,14 @@ public:
};
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
-class WinX86_64ABIInfo : public X86_64ABIInfo {
+class WinX86_64ABIInfo : public ABIInfo {
+
+ ABIArgInfo classify(QualType Ty) const;
+
public:
- WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : X86_64ABIInfo(CGT) {}
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
@@ -799,6 +910,13 @@ public:
return false;
}
+
+ const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ const llvm::Type* Ty) const {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
};
class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -817,7 +935,7 @@ public:
const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
-
+
// 0-15 are the 16 integer registers.
// 16 is %rip.
AssignToArrayRange(Builder, Address, Eight8, 0, 16);
@@ -1065,7 +1183,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// single eightbyte, each is classified separately. Each eightbyte gets
// initialized to class NO_CLASS.
Class FieldLo, FieldHi;
- uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
+ uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base);
classify(i->getType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
@@ -1264,7 +1382,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
// If the base is after the span we care about, ignore it.
- unsigned BaseOffset = (unsigned)Layout.getBaseClassOffset(Base);
+ unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base);
if (BaseOffset >= EndBit) continue;
unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
@@ -1443,7 +1561,7 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
unsigned HiAlign = TD.getABITypeAlignment(Hi);
unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
-
+
// To handle this, we have to increase the size of the low part so that the
// second element will start at an 8 byte offset. We can't increase the size
// of the second element because it might make us access off the end of the
@@ -1459,11 +1577,11 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
Lo = llvm::Type::getInt64Ty(Lo->getContext());
}
}
-
- const llvm::StructType *Result =
+
+ const llvm::StructType *Result =
llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
-
-
+
+
// Verify that the second element is at an 8-byte offset.
assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
"Invalid x86-64 argument pair!");
@@ -1592,7 +1710,7 @@ classifyReturnType(QualType RetTy) const {
}
break;
}
-
+
// If a high part was specified, merge it together with the low part. It is
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
@@ -1665,11 +1783,18 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
// available SSE register is used, the registers are taken in the
// order from %xmm0 to %xmm7.
- case SSE:
+ case SSE: {
+ const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+ if (Hi != NoClass || !UseX86_MMXType(IRType))
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
+ else
+ // This is an MMX type. Treat it as such.
+ ResType = llvm::Type::getX86_MMXTy(getVMContext());
+
++neededSSE;
- ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
break;
}
+ }
const llvm::Type *HighPart = 0;
switch (Hi) {
@@ -1719,7 +1844,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// first class struct aggregate with the high and low part: {low, high}
if (HighPart)
ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
-
+
return ABIArgInfo::getDirect(ResType);
}
@@ -1965,76 +2090,48 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return ResAddr;
}
-llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
-}
-
-//===----------------------------------------------------------------------===//
-// PIC16 ABI Implementation
-//===----------------------------------------------------------------------===//
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
-namespace {
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
-class PIC16ABIInfo : public ABIInfo {
-public:
- PIC16ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ uint64_t Size = getContext().getTypeSize(Ty);
- ABIArgInfo classifyReturnType(QualType RetTy) const;
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (hasNonTrivialDestructorOrCopyConstructor(RT) ||
+ RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ // FIXME: mingw64-gcc emits 128-bit struct as i128
+ if (Size <= 128 &&
+ (Size & (Size - 1)) == 0)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
- virtual void computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- PIC16TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new PIC16ABIInfo(CGT)) {}
-};
+ if (Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ return ABIArgInfo::getDirect();
}
-ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else {
- return ABIArgInfo::getDirect();
- }
-}
+void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
-ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty) const {
- return ABIArgInfo::getDirect();
+ QualType RetTy = FI.getReturnType();
+ FI.getReturnInfo() = classify(RetTy);
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classify(it->type);
}
-llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
@@ -2046,18 +2143,16 @@ llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
- uint64_t Offset = CGF.getContext().getTypeSize(Ty) / 8;
-
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
return AddrTyped;
}
-
// PowerPC-32
namespace {
@@ -2213,6 +2308,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ // Otherwise, pass by coercing to a structure of the appropriate size.
+ //
// FIXME: This is kind of nasty... but there isn't much choice because the ARM
// backend doesn't support byval.
// FIXME: This doesn't handle alignment > 64 bits.
@@ -2321,6 +2418,10 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
+ return ABIArgInfo::getIndirect(0);
+
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
@@ -2407,21 +2508,6 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return AddrTyped;
}
-ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (isAggregateTypeForABI(RetTy))
- return ABIArgInfo::getIndirect(0);
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-}
-
//===----------------------------------------------------------------------===//
// SystemZ ABI Implementation
//===----------------------------------------------------------------------===//
@@ -2502,6 +2588,116 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
}
//===----------------------------------------------------------------------===//
+// MBlaze ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MBlazeABIInfo : public ABIInfo {
+public:
+ MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ bool isPromotableIntegerType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
+ // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Implement
+ return 0;
+}
+
+
+ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M)
+ const {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::CallingConv::ID CC = llvm::CallingConv::C;
+ if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
+ CC = llvm::CallingConv::MBLAZE_INTR;
+ else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
+ CC = llvm::CallingConv::MBLAZE_SVOL;
+
+ if (CC != llvm::CallingConv::C) {
+ // Handle 'interrupt_handler' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(CC);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+
+ // Step 3: Emit _interrupt_handler alias.
+ if (CC == llvm::CallingConv::MBLAZE_INTR)
+ new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+ "_interrupt_handler", GV, &M.getModule());
+}
+
+
+//===----------------------------------------------------------------------===//
// MSP430 ABI Implementation
//===----------------------------------------------------------------------===//
@@ -2534,8 +2730,7 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
// Step 3: Emit ISR vector alias.
unsigned Num = attr->getNumber() + 0xffe0;
new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
- "vector_" +
- llvm::LowercaseString(llvm::utohexstr(Num)),
+ "vector_" + llvm::Twine::utohexstr(Num),
GV, &M.getModule());
}
}
@@ -2621,15 +2816,15 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return *(TheTargetCodeGenInfo =
new ARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS));
- case llvm::Triple::pic16:
- return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo(Types));
-
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
case llvm::Triple::systemz:
return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
+ case llvm::Triple::mblaze:
+ return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
+
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
@@ -2644,6 +2839,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
+ case llvm::Triple::NetBSD:
return *(TheTargetCodeGenInfo =
new X86_32TargetCodeGenInfo(Types, false, true));
@@ -2655,7 +2851,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::x86_64:
switch (Triple.getOS()) {
case llvm::Triple::Win32:
- case llvm::Triple::MinGW64:
+ case llvm::Triple::MinGW32:
case llvm::Triple::Cygwin:
return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
default:
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
index 9d4cf16..4f59eb6 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
@@ -15,8 +15,11 @@
#ifndef CLANG_CODEGEN_TARGETINFO_H
#define CLANG_CODEGEN_TARGETINFO_H
+#include "llvm/ADT/StringRef.h"
+
namespace llvm {
class GlobalValue;
+ class Type;
class Value;
}
@@ -102,6 +105,12 @@ namespace clang {
llvm::Value *Address) const {
return Address;
}
+
+ virtual const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ const llvm::Type* Ty) const {
+ return Ty;
+ }
};
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Action.cpp b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
index f34971b..549ce0a 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Action.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
@@ -1,4 +1,4 @@
-//===--- Action.cpp - Abstract compilation steps ------------------------*-===//
+//===--- Action.cpp - Abstract compilation steps --------------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
index 83d0d26..f1177cf 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
@@ -1,4 +1,4 @@
-//===--- Arg.cpp - Argument Implementations -----------------------------*-===//
+//===--- Arg.cpp - Argument Implementations -------------------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
index 9101523..596e2a7 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
@@ -1,4 +1,4 @@
-//===--- ArgList.cpp - Argument List Management -------------------------*-===//
+//===--- ArgList.cpp - Argument List Management ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -55,62 +55,59 @@ Arg *ArgList::getLastArgNoClaim(OptSpecifier Id) const {
}
Arg *ArgList::getLastArg(OptSpecifier Id) const {
- Arg *A = getLastArgNoClaim(Id);
- if (A)
- A->claim();
- return A;
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id)) {
+ Res = *it;
+ Res->claim();
+ }
+ }
+
+ return Res;
}
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1) const {
Arg *Res = 0;
- for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) {
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1)) {
Res = *it;
- break;
+ Res->claim();
+
}
}
- if (Res)
- Res->claim();
-
return Res;
}
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
OptSpecifier Id2) const {
Arg *Res = 0;
- for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) {
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1) ||
(*it)->getOption().matches(Id2)) {
Res = *it;
- break;
+ Res->claim();
}
}
- if (Res)
- Res->claim();
-
return Res;
}
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
OptSpecifier Id2, OptSpecifier Id3) const {
Arg *Res = 0;
- for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) {
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
if ((*it)->getOption().matches(Id0) ||
(*it)->getOption().matches(Id1) ||
(*it)->getOption().matches(Id2) ||
(*it)->getOption().matches(Id3)) {
Res = *it;
- break;
+ Res->claim();
}
}
- if (Res)
- Res->claim();
-
return Res;
}
@@ -214,7 +211,8 @@ const char *ArgList::GetOrMakeJoinedArgString(unsigned Index,
//
-InputArgList::InputArgList(const char **ArgBegin, const char **ArgEnd)
+InputArgList::InputArgList(const char* const *ArgBegin,
+ const char* const *ArgEnd)
: NumInputArgStrings(ArgEnd - ArgBegin) {
ArgStrings.append(ArgBegin, ArgEnd);
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
index c059afd..5619212 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
@@ -1,4 +1,4 @@
-//===--- Compilation.cpp - Compilation Task Implementation --------------*-===//
+//===--- Compilation.cpp - Compilation Task Implementation ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,7 +17,7 @@
#include "clang/Driver/ToolChain.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Program.h"
+#include "llvm/Support/Program.h"
#include <sys/stat.h>
#include <errno.h>
using namespace clang::driver;
@@ -101,21 +101,15 @@ bool Compilation::CleanupFileList(const ArgStringList &Files,
llvm::sys::Path P(*it);
std::string Error;
- if (!P.isRegularFile()) {
- // If we have a special file in our list, i.e. /dev/null
- // then don't call eraseFromDisk() and just continue.
- continue;
- }
-
if (P.eraseFromDisk(false, &Error)) {
- // Failure is only failure if the file doesn't exist. There is a
- // race condition here due to the limited interface of
- // llvm::sys::Path, we want to know if the removal gave E_NOENT.
+ // Failure is only failure if the file exists and is "regular". There is
+ // a race condition here due to the limited interface of
+ // llvm::sys::Path, we want to know if the removal gave ENOENT.
// FIXME: Grumble, P.exists() is broken. PR3837.
struct stat buf;
- if (::stat(P.c_str(), &buf) == 0
- || errno != ENOENT) {
+ if (::stat(P.c_str(), &buf) == 0 ? (buf.st_mode & S_IFMT) == S_IFREG :
+ (errno != ENOENT)) {
if (IssueErrors)
getDriver().Diag(clang::diag::err_drv_unable_to_remove_file)
<< Error;
diff --git a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
index b8400aa..4b6aef6 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
@@ -1,4 +1,4 @@
-//===--- Driver.cpp - Clang GCC Compatible Driver -----------------------*-===//
+//===--- Driver.cpp - Clang GCC Compatible Driver -------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,6 +7,10 @@
//
//===----------------------------------------------------------------------===//
+#ifdef HAVE_CLANG_CONFIG_H
+# include "clang/Config/config.h"
+#endif
+
#include "clang/Driver/Driver.h"
#include "clang/Driver/Action.h"
@@ -30,13 +34,21 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
-#include "llvm/System/Program.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
#include "InputInfo.h"
#include <map>
+#ifdef __CYGWIN__
+#include <cygwin/version.h>
+#if defined(CYGWIN_VERSION_DLL_MAJOR) && CYGWIN_VERSION_DLL_MAJOR<1007
+#define IS_CYGWIN15 1
+#endif
+#endif
+
using namespace clang::driver;
using namespace clang;
@@ -50,8 +62,9 @@ Driver::Driver(llvm::StringRef _ClangExecutable,
DefaultImageName(_DefaultImageName),
DriverTitle("clang \"gcc-compatible\" driver"),
Host(0),
- CCCGenericGCCName("gcc"), CCPrintOptionsFilename(0), CCCIsCXX(false),
+ CCPrintOptionsFilename(0), CCPrintHeadersFilename(0), CCCIsCXX(false),
CCCEcho(false), CCCPrintBindings(false), CCPrintOptions(false),
+ CCPrintHeaders(false), CCCGenericGCCName("gcc"),
CheckInputsExist(true), CCCUseClang(true), CCCUseClangCXX(true),
CCCUseClangCPP(true), CCCUsePCH(true), SuppressMissingInputWarning(false) {
if (IsProduction) {
@@ -69,16 +82,16 @@ Driver::Driver(llvm::StringRef _ClangExecutable,
CCCUseClangCXX = false;
}
- llvm::sys::Path Executable(ClangExecutable);
- Name = Executable.getBasename();
- Dir = Executable.getDirname();
+ Name = llvm::sys::path::stem(ClangExecutable);
+ Dir = llvm::sys::path::parent_path(ClangExecutable);
// Compute the path to the resource directory.
- llvm::sys::Path P(Dir);
- P.eraseComponent(); // Remove /bin from foo/bin
- P.appendComponent("lib");
- P.appendComponent("clang");
- P.appendComponent(CLANG_VERSION_STRING);
+ llvm::StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
+ llvm::SmallString<128> P(Dir);
+ if (ClangResourceDir != "")
+ llvm::sys::path::append(P, ClangResourceDir);
+ else
+ llvm::sys::path::append(P, "..", "lib", "clang", CLANG_VERSION_STRING);
ResourceDir = P.str();
}
@@ -115,6 +128,7 @@ InputArgList *Driver::ParseArgStrings(const char **ArgBegin,
DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
DerivedArgList *DAL = new DerivedArgList(Args);
+ bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
for (ArgList::const_iterator it = Args.begin(),
ie = Args.end(); it != ie; ++it) {
const Arg *A = *it;
@@ -157,6 +171,25 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
continue;
}
+ // Rewrite reserved library names.
+ if (A->getOption().matches(options::OPT_l)) {
+ llvm::StringRef Value = A->getValue(Args);
+
+ // Rewrite unless -nostdlib is present.
+ if (!HasNostdlib && Value == "stdc++") {
+ DAL->AddFlagArg(A, Opts->getOption(
+ options::OPT_Z_reserved_lib_stdcxx));
+ continue;
+ }
+
+ // Rewrite unconditionally.
+ if (Value == "cc_kext") {
+ DAL->AddFlagArg(A, Opts->getOption(
+ options::OPT_Z_reserved_lib_cckext));
+ continue;
+ }
+ }
+
DAL->append(*it);
}
@@ -205,6 +238,13 @@ Compilation *Driver::BuildCompilation(int argc, const char **argv) {
CCCPrintActions = Args->hasArg(options::OPT_ccc_print_phases);
CCCPrintBindings = Args->hasArg(options::OPT_ccc_print_bindings);
CCCIsCXX = Args->hasArg(options::OPT_ccc_cxx) || CCCIsCXX;
+ if (CCCIsCXX) {
+#ifdef IS_CYGWIN15
+ CCCGenericGCCName = "g++-4";
+#else
+ CCCGenericGCCName = "g++";
+#endif
+ }
CCCEcho = Args->hasArg(options::OPT_ccc_echo);
if (const Arg *A = Args->getLastArg(options::OPT_ccc_gcc_name))
CCCGenericGCCName = A->getValue(*Args);
@@ -241,8 +281,12 @@ Compilation *Driver::BuildCompilation(int argc, const char **argv) {
DefaultHostTriple = A->getValue(*Args);
if (const Arg *A = Args->getLastArg(options::OPT_ccc_install_dir))
Dir = InstalledDir = A->getValue(*Args);
- if (const Arg *A = Args->getLastArg(options::OPT_B))
- PrefixDir = A->getValue(*Args);
+ for (arg_iterator it = Args->filtered_begin(options::OPT_B),
+ ie = Args->filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ A->claim();
+ PrefixDirs.push_back(A->getValue(*Args, 0));
+ }
Host = GetHostInfo(DefaultHostTriple.c_str());
@@ -287,7 +331,7 @@ int Driver::ExecuteCompilation(const Compilation &C) const {
}
// If there were errors building the compilation, quit now.
- if (getDiags().getNumErrors())
+ if (getDiags().hasErrorOccurred())
return 1;
const Command *FailingCommand = 0;
@@ -365,7 +409,7 @@ void Driver::PrintVersion(const Compilation &C, llvm::raw_ostream &OS) const {
/// option.
static void PrintDiagnosticCategories(llvm::raw_ostream &OS) {
for (unsigned i = 1; // Skip the empty category.
- const char *CategoryName = Diagnostic::getCategoryNameFromID(i); ++i)
+ const char *CategoryName = DiagnosticIDs::getCategoryNameFromID(i); ++i)
OS << i << ',' << CategoryName << '\n';
}
@@ -373,8 +417,19 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
// The order these options are handled in gcc is all over the place, but we
// don't expect inconsistencies w.r.t. that to matter in practice.
+ if (C.getArgs().hasArg(options::OPT_dumpmachine)) {
+ llvm::outs() << C.getDefaultToolChain().getTripleString() << '\n';
+ return false;
+ }
+
if (C.getArgs().hasArg(options::OPT_dumpversion)) {
- llvm::outs() << CLANG_VERSION_STRING "\n";
+ // Since -dumpversion is only implemented for pedantic GCC compatibility, we
+ // return an answer which matches our definition of __VERSION__.
+ //
+ // If we want to return a more correct answer some day, then we should
+ // introduce a non-pedantically GCC compatible mode to Clang in which we
+ // provide sensible definitions for -dumpversion, __VERSION__, etc.
+ llvm::outs() << "4.2.1\n";
return false;
}
@@ -709,10 +764,20 @@ void Driver::BuildActions(const ToolChain &TC, const ArgList &Args,
}
// Check that the file exists, if enabled.
- if (CheckInputsExist && memcmp(Value, "-", 2) != 0 &&
- !llvm::sys::Path(Value).exists())
- Diag(clang::diag::err_drv_no_such_file) << A->getValue(Args);
- else
+ if (CheckInputsExist && memcmp(Value, "-", 2) != 0) {
+ llvm::SmallString<64> Path(Value);
+ if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory))
+ if (llvm::sys::path::is_absolute(Path.str())) {
+ Path = WorkDir->getValue(Args);
+ llvm::sys::path::append(Path, Value);
+ }
+
+ bool exists = false;
+ if (/*error_code ec =*/llvm::sys::fs::exists(Value, exists) || !exists)
+ Diag(clang::diag::err_drv_no_such_file) << Path.str();
+ else
+ Inputs.push_back(std::make_pair(Ty, A));
+ } else
Inputs.push_back(std::make_pair(Ty, A));
} else if (A->getOption().isLinkerInput()) {
@@ -726,7 +791,7 @@ void Driver::BuildActions(const ToolChain &TC, const ArgList &Args,
// Follow gcc behavior and treat as linker input for invalid -x
// options. Its not clear why we shouldn't just revert to unknown; but
- // this isn't very important, we might as well be bug comatible.
+ // this isn't very important, we might as well be bug compatible.
if (!InputType) {
Diag(clang::diag::err_drv_unknown_language) << A->getValue(Args);
InputType = types::TY_Object;
@@ -747,8 +812,7 @@ void Driver::BuildActions(const ToolChain &TC, const ArgList &Args,
// -{E,M,MM} only run the preprocessor.
if ((FinalPhaseArg = Args.getLastArg(options::OPT_E)) ||
- (FinalPhaseArg = Args.getLastArg(options::OPT_M)) ||
- (FinalPhaseArg = Args.getLastArg(options::OPT_MM))) {
+ (FinalPhaseArg = Args.getLastArg(options::OPT_M, options::OPT_MM))) {
FinalPhase = phases::Preprocess;
// -{fsyntax-only,-analyze,emit-ast,S} only run up to the compiler.
@@ -856,7 +920,7 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
case phases::Preprocess: {
types::ID OutputTy;
// -{M, MM} alter the output type.
- if (Args.hasArg(options::OPT_M) || Args.hasArg(options::OPT_MM)) {
+ if (Args.hasArg(options::OPT_M, options::OPT_MM)) {
OutputTy = types::TY_Dependencies;
} else {
OutputTy = types::getPreprocessedType(Input->getType());
@@ -945,7 +1009,7 @@ void Driver::BuildJobs(Compilation &C) const {
// If the user passed -Qunused-arguments or there were errors, don't warn
// about any unused arguments.
- if (Diags.getNumErrors() ||
+ if (Diags.hasErrorOccurred() ||
C.getArgs().hasArg(options::OPT_Qunused_arguments))
return;
@@ -1141,8 +1205,8 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
}
- llvm::sys::Path BasePath(BaseInput);
- std::string BaseName(BasePath.getLast());
+ llvm::SmallString<128> BasePath(BaseInput);
+ llvm::StringRef BaseName = llvm::sys::path::filename(BasePath);
// Determine what the derived output name should be.
const char *NamedOutput;
@@ -1163,11 +1227,11 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
// As an annoying special case, PCH generation doesn't strip the pathname.
if (JA.getType() == types::TY_PCH) {
- BasePath.eraseComponent();
- if (BasePath.isEmpty())
+ llvm::sys::path::remove_filename(BasePath);
+ if (BasePath.empty())
BasePath = NamedOutput;
else
- BasePath.appendComponent(NamedOutput);
+ llvm::sys::path::append(BasePath, NamedOutput);
return C.addResultFile(C.getArgs().MakeArgString(BasePath.c_str()));
} else {
return C.addResultFile(NamedOutput);
@@ -1177,10 +1241,12 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
// Respect a limited subset of the '-Bprefix' functionality in GCC by
// attempting to use this prefix when lokup up program paths.
- if (!PrefixDir.empty()) {
- llvm::sys::Path P(PrefixDir);
+ for (Driver::prefix_list::const_iterator it = PrefixDirs.begin(),
+ ie = PrefixDirs.end(); it != ie; ++it) {
+ llvm::sys::Path P(*it);
P.appendComponent(Name);
- if (P.exists())
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
return P.str();
}
@@ -1189,7 +1255,8 @@ std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
it = List.begin(), ie = List.end(); it != ie; ++it) {
llvm::sys::Path P(*it);
P.appendComponent(Name);
- if (P.exists())
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
return P.str();
}
@@ -1200,10 +1267,13 @@ std::string Driver::GetProgramPath(const char *Name, const ToolChain &TC,
bool WantFile) const {
// Respect a limited subset of the '-Bprefix' functionality in GCC by
// attempting to use this prefix when lokup up program paths.
- if (!PrefixDir.empty()) {
- llvm::sys::Path P(PrefixDir);
+ for (Driver::prefix_list::const_iterator it = PrefixDirs.begin(),
+ ie = PrefixDirs.end(); it != ie; ++it) {
+ llvm::sys::Path P(*it);
P.appendComponent(Name);
- if (WantFile ? P.exists() : P.canExecute())
+ bool Exists;
+ if (WantFile ? !llvm::sys::fs::exists(P.str(), Exists) && Exists
+ : P.canExecute())
return P.str();
}
@@ -1212,7 +1282,9 @@ std::string Driver::GetProgramPath(const char *Name, const ToolChain &TC,
it = List.begin(), ie = List.end(); it != ie; ++it) {
llvm::sys::Path P(*it);
P.appendComponent(Name);
- if (WantFile ? P.exists() : P.canExecute())
+ bool Exists;
+ if (WantFile ? !llvm::sys::fs::exists(P.str(), Exists) && Exists
+ : P.canExecute())
return P.str();
}
@@ -1266,6 +1338,8 @@ const HostInfo *Driver::GetHostInfo(const char *TripleStr) const {
return createDragonFlyHostInfo(*this, Triple);
case llvm::Triple::OpenBSD:
return createOpenBSDHostInfo(*this, Triple);
+ case llvm::Triple::NetBSD:
+ return createNetBSDHostInfo(*this, Triple);
case llvm::Triple::FreeBSD:
return createFreeBSDHostInfo(*this, Triple);
case llvm::Triple::Minix:
@@ -1275,7 +1349,6 @@ const HostInfo *Driver::GetHostInfo(const char *TripleStr) const {
case llvm::Triple::Win32:
return createWindowsHostInfo(*this, Triple);
case llvm::Triple::MinGW32:
- case llvm::Triple::MinGW64:
return createMinGWHostInfo(*this, Triple);
default:
return createUnknownHostInfo(*this, Triple);
diff --git a/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
index 72aaf56..f2d9af8 100644
--- a/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
@@ -1,4 +1,4 @@
-//===--- DriverOptions.cpp - Driver Options Table -----------------------*-===//
+//===--- DriverOptions.cpp - Driver Options Table -------------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp b/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp
index 7c5e430..cd413180 100644
--- a/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp
@@ -1,4 +1,4 @@
-//===--- HostInfo.cpp - Host specific information -----------------------*-===//
+//===--- HostInfo.cpp - Host specific information -------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -115,6 +115,7 @@ ToolChain *DarwinHostInfo::CreateToolChain(const ArgList &Args,
// If we recognized the arch, match it to the toolchains we support.
const char *UseNewToolChain = ::getenv("CCC_ENABLE_NEW_DARWIN_TOOLCHAIN");
if (UseNewToolChain ||
+ Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64 ||
Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb) {
TC = new toolchains::DarwinClang(*this, TCTriple);
} else if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
@@ -371,6 +372,65 @@ ToolChain *FreeBSDHostInfo::CreateToolChain(const ArgList &Args,
return TC;
}
+// NetBSD Host Info
+
+/// NetBSDHostInfo - NetBSD host information implementation.
+class NetBSDHostInfo : public HostInfo {
+ /// Cache of tool chains we have created.
+ mutable llvm::StringMap<ToolChain*> ToolChains;
+
+public:
+ NetBSDHostInfo(const Driver &D, const llvm::Triple& Triple)
+ : HostInfo(D, Triple) {}
+ ~NetBSDHostInfo();
+
+ virtual bool useDriverDriver() const;
+
+ virtual ToolChain *CreateToolChain(const ArgList &Args,
+ const char *ArchName) const;
+};
+
+NetBSDHostInfo::~NetBSDHostInfo() {
+ for (llvm::StringMap<ToolChain*>::iterator
+ it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
+ delete it->second;
+}
+
+bool NetBSDHostInfo::useDriverDriver() const {
+ return false;
+}
+
+ToolChain *NetBSDHostInfo::CreateToolChain(const ArgList &Args,
+ const char *ArchName) const {
+ assert(!ArchName &&
+ "Unexpected arch name on platform without driver driver support.");
+
+ // Automatically handle some instances of -m32/-m64 we know about.
+ std::string Arch = getArchName();
+ ArchName = Arch.c_str();
+ if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
+ if (Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::x86_64) {
+ ArchName =
+ (A->getOption().matches(options::OPT_m32)) ? "i386" : "x86_64";
+ } else if (Triple.getArch() == llvm::Triple::ppc ||
+ Triple.getArch() == llvm::Triple::ppc64) {
+ ArchName =
+ (A->getOption().matches(options::OPT_m32)) ? "powerpc" : "powerpc64";
+ }
+ }
+
+ ToolChain *&TC = ToolChains[ArchName];
+ if (!TC) {
+ llvm::Triple TCTriple(getTriple());
+ TCTriple.setArchName(ArchName);
+
+ TC = new toolchains::NetBSD(*this, TCTriple);
+ }
+
+ return TC;
+}
+
// Minix Host Info
/// MinixHostInfo - Minix host information implementation.
@@ -623,6 +683,12 @@ clang::driver::createFreeBSDHostInfo(const Driver &D,
}
const HostInfo *
+clang::driver::createNetBSDHostInfo(const Driver &D,
+ const llvm::Triple& Triple) {
+ return new NetBSDHostInfo(D, Triple);
+}
+
+const HostInfo *
clang::driver::createMinixHostInfo(const Driver &D,
const llvm::Triple& Triple) {
return new MinixHostInfo(D, Triple);
diff --git a/contrib/llvm/tools/clang/lib/Driver/Job.cpp b/contrib/llvm/tools/clang/lib/Driver/Job.cpp
index fa7d060..51055e9 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Job.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Job.cpp
@@ -1,4 +1,4 @@
-//===--- Job.cpp - Command to Execute -----------------------------------*-===//
+//===--- Job.cpp - Command to Execute -------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
index 3c36314..c3d3048 100644
--- a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
@@ -1,4 +1,4 @@
-//===--- OptTable.cpp - Option Table Implementation ---------------------*-===//
+//===--- OptTable.cpp - Option Table Implementation -----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -226,7 +226,8 @@ Arg *OptTable::ParseOneArg(const ArgList &Args, unsigned &Index) const {
return new Arg(TheUnknownOption, Index++, Str);
}
-InputArgList *OptTable::ParseArgs(const char **ArgBegin, const char **ArgEnd,
+InputArgList *OptTable::ParseArgs(const char* const *ArgBegin,
+ const char* const *ArgEnd,
unsigned &MissingArgIndex,
unsigned &MissingArgCount) const {
InputArgList *Args = new InputArgList(ArgBegin, ArgEnd);
diff --git a/contrib/llvm/tools/clang/lib/Driver/Option.cpp b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
index 5396250..a992cef 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Option.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
@@ -1,4 +1,4 @@
-//===--- Option.cpp - Abstract Driver Options ---------------------------*-===//
+//===--- Option.cpp - Abstract Driver Options -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/tools/clang/lib/Driver/Phases.cpp b/contrib/llvm/tools/clang/lib/Driver/Phases.cpp
index df4cdee..f360002 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Phases.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Phases.cpp
@@ -1,4 +1,4 @@
-//===--- Phases.cpp - Transformations on Driver Types -------------------*-===//
+//===--- Phases.cpp - Transformations on Driver Types ---------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tool.cpp b/contrib/llvm/tools/clang/lib/Driver/Tool.cpp
index fe01531..b93864f 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tool.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Tool.cpp
@@ -1,4 +1,4 @@
-//===--- Tool.cpp - Compilation Tools -----------------------------------*-===//
+//===--- Tool.cpp - Compilation Tools -------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
index 94c1c6b..e4051a1 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
@@ -1,4 +1,4 @@
-//===--- ToolChain.cpp - Collections of tools for one platform ----------*-===//
+//===--- ToolChain.cpp - Collections of tools for one platform ------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -43,6 +43,10 @@ types::ID ToolChain::LookupTypeForExtension(const char *Ext) const {
return types::lookupTypeForExtension(Ext);
}
+bool ToolChain::HasNativeLLVMSupport() const {
+ return false;
+}
+
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targetting.
//
// FIXME: tblgen this.
@@ -174,3 +178,52 @@ std::string ToolChain::ComputeEffectiveClangTriple(const ArgList &Args) const {
return ComputeLLVMTriple(Args);
}
+ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ llvm::StringRef Value = A->getValue(Args);
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ getDriver().Diag(clang::diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::CST_Libstdcxx;
+}
+
+void ToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CXXStdlibType Type = GetCXXStdlibType(Args);
+
+ switch (Type) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-cxx-system-include");
+ CmdArgs.push_back("/usr/include/c++/v1");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ // Currently handled by the mass of goop in InitHeaderSearch.
+ break;
+ }
+}
+
+void ToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CXXStdlibType Type = GetCXXStdlibType(Args);
+
+ switch (Type) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+}
+
+void ToolChain::AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-lcc_kext");
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
index 596173d..8028fe0 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
@@ -1,4 +1,4 @@
-//===--- ToolChains.cpp - ToolChain Implementations ---------------------*-===//
+//===--- ToolChains.cpp - ToolChain Implementations -----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,8 +23,11 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
#include <cstdlib> // ::getenv
@@ -63,6 +66,10 @@ types::ID Darwin::LookupTypeForExtension(const char *Ext) const {
return Ty;
}
+bool Darwin::HasNativeLLVMSupport() const {
+ return true;
+}
+
// FIXME: Can we tablegen this?
static const char *GetArmArchForMArch(llvm::StringRef Value) {
if (Value == "armv6k")
@@ -146,7 +153,8 @@ DarwinGCC::DarwinGCC(const HostInfo &Host, const llvm::Triple& Triple)
// Try the next major version if that tool chain dir is invalid.
std::string Tmp = "/usr/lib/gcc/" + ToolChainDir;
- if (!llvm::sys::Path(Tmp).exists()) {
+ bool Exists;
+ if (llvm::sys::fs::exists(Tmp, Exists) || Exists) {
std::string Next = "i686-apple-darwin";
Next += llvm::utostr(DarwinVersion[0] + 1);
Next += "/";
@@ -160,7 +168,7 @@ DarwinGCC::DarwinGCC(const HostInfo &Host, const llvm::Triple& Triple)
//
// FIXME: Drop dependency on gcc's tool chain.
Tmp = "/usr/lib/gcc/" + Next;
- if (llvm::sys::Path(Tmp).exists())
+ if (!llvm::sys::fs::exists(Tmp, Exists) && Exists)
ToolChainDir = Next;
}
@@ -305,19 +313,20 @@ void DarwinGCC::AddLinkSearchPathArgs(const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("-L/usr/lib/" + ToolChainDir));
Tmp = getDriver().Dir + "/../lib/gcc/" + ToolChainDir;
- if (llvm::sys::Path(Tmp).exists())
+ bool Exists;
+ if (!llvm::sys::fs::exists(Tmp, Exists) && Exists)
CmdArgs.push_back(Args.MakeArgString("-L" + Tmp));
Tmp = getDriver().Dir + "/../lib/gcc";
- if (llvm::sys::Path(Tmp).exists())
+ if (!llvm::sys::fs::exists(Tmp, Exists) && Exists)
CmdArgs.push_back(Args.MakeArgString("-L" + Tmp));
CmdArgs.push_back(Args.MakeArgString("-L/usr/lib/gcc/" + ToolChainDir));
// Intentionally duplicated for (temporary) gcc bug compatibility.
CmdArgs.push_back(Args.MakeArgString("-L/usr/lib/gcc/" + ToolChainDir));
Tmp = getDriver().Dir + "/../lib/" + ToolChainDir;
- if (llvm::sys::Path(Tmp).exists())
+ if (!llvm::sys::fs::exists(Tmp, Exists) && Exists)
CmdArgs.push_back(Args.MakeArgString("-L" + Tmp));
Tmp = getDriver().Dir + "/../lib";
- if (llvm::sys::Path(Tmp).exists())
+ if (!llvm::sys::fs::exists(Tmp, Exists) && Exists)
CmdArgs.push_back(Args.MakeArgString("-L" + Tmp));
CmdArgs.push_back(Args.MakeArgString("-L/usr/lib/gcc/" + ToolChainDir +
"/../../../" + ToolChainDir));
@@ -373,10 +382,30 @@ void DarwinGCC::AddLinkRuntimeLibArgs(const ArgList &Args,
DarwinClang::DarwinClang(const HostInfo &Host, const llvm::Triple& Triple)
: Darwin(Host, Triple)
{
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
// We expect 'as', 'ld', etc. to be adjacent to our install dir.
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
+
+ // For fallback, we need to know how to find the GCC cc1 executables, so we
+ // also add the GCC libexec paths. This is legiy code that can be removed once
+ // fallback is no longer useful.
+ std::string ToolChainDir = "i686-apple-darwin";
+ ToolChainDir += llvm::utostr(DarwinVersion[0]);
+ ToolChainDir += "/4.2.1";
+
+ std::string Path = getDriver().Dir;
+ Path += "/../libexec/gcc/";
+ Path += ToolChainDir;
+ getProgramPaths().push_back(Path);
+
+ Path = "/usr/libexec/gcc/";
+ Path += ToolChainDir;
+ getProgramPaths().push_back(Path);
}
void DarwinClang::AddLinkSearchPathArgs(const ArgList &Args,
@@ -435,12 +464,14 @@ void DarwinClang::AddLinkSearchPathArgs(const ArgList &Args,
if (ArchSpecificDir) {
P.appendComponent(ArchSpecificDir);
- if (P.exists())
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
CmdArgs.push_back(Args.MakeArgString("-L" + P.str()));
P.eraseComponent();
}
- if (P.exists())
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
CmdArgs.push_back(Args.MakeArgString("-L" + P.str()));
}
@@ -481,11 +512,20 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
else if (isMacosxVersionLT(10, 6))
CmdArgs.push_back("-lgcc_s.10.5");
- // For OS X, we only need a static runtime library when targetting 10.4, to
- // provide versions of the static functions which were omitted from
- // 10.4.dylib.
- if (isMacosxVersionLT(10, 5))
+ // For OS X, we thought we would only need a static runtime library when
+ // targetting 10.4, to provide versions of the static functions which were
+ // omitted from 10.4.dylib.
+ //
+ // Unfortunately, that turned out to not be true, because Darwin system
+ // headers can still use eprintf on i386, and it is not exported from
+ // libSystem. Therefore, we still must provide a runtime library just for
+ // the tiny tiny handful of projects that *might* use that symbol.
+ if (isMacosxVersionLT(10, 5)) {
DarwinStaticLib = "libclang_rt.10.4.a";
+ } else {
+ if (getTriple().getArch() == llvm::Triple::x86)
+ DarwinStaticLib = "libclang_rt.eprintf.a";
+ }
}
/// Add the target specific static library, if needed.
@@ -497,10 +537,8 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
// For now, allow missing resource libraries to support developers who may
// not have compiler-rt checked out or integrated into their build.
- if (!P.exists())
- getDriver().Diag(clang::diag::warn_drv_missing_resource_library)
- << P.str();
- else
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
CmdArgs.push_back(Args.MakeArgString(P.str()));
}
}
@@ -578,6 +616,72 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
setTarget(iPhoneVersion, Major, Minor, Micro);
}
+void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CXXStdlibType Type = GetCXXStdlibType(Args);
+
+ switch (Type) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ break;
+
+ case ToolChain::CST_Libstdcxx: {
+ // Unfortunately, -lstdc++ doesn't always exist in the standard search path;
+ // it was previously found in the gcc lib dir. However, for all the Darwin
+ // platforms we care about it was -lstdc++.6, so we search for that
+ // explicitly if we can't see an obvious -lstdc++ candidate.
+
+ // Check in the sysroot first.
+ bool Exists;
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ llvm::sys::Path P(A->getValue(Args));
+ P.appendComponent("usr");
+ P.appendComponent("lib");
+ P.appendComponent("libstdc++.dylib");
+
+ if (llvm::sys::fs::exists(P.str(), Exists) || !Exists) {
+ P.eraseComponent();
+ P.appendComponent("libstdc++.6.dylib");
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists) {
+ CmdArgs.push_back(Args.MakeArgString(P.str()));
+ return;
+ }
+ }
+ }
+
+ // Otherwise, look in the root.
+ if ((llvm::sys::fs::exists("/usr/lib/libstdc++.dylib", Exists) || !Exists)&&
+ (!llvm::sys::fs::exists("/usr/lib/libstdc++.6.dylib", Exists) && Exists)){
+ CmdArgs.push_back("/usr/lib/libstdc++.6.dylib");
+ return;
+ }
+
+ // Otherwise, let the linker search.
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+ }
+}
+
+void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+
+ // For Darwin platforms, use the compiler-rt-based support library
+ // instead of the gcc-provided one (which is also incidentally
+ // only present in the gcc lib dir, which makes it hard to find).
+
+ llvm::sys::Path P(getDriver().ResourceDir);
+ P.appendComponent("lib");
+ P.appendComponent("darwin");
+ P.appendComponent("libclang_rt.cc_kext.a");
+
+ // For now, allow missing resource libraries to support developers who may
+ // not have compiler-rt checked out or integrated into their build.
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ CmdArgs.push_back(Args.MakeArgString(P.str()));
+}
+
DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
const char *BoundArch) const {
DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
@@ -599,6 +703,7 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
if (getArchName() != A->getValue(Args, 0))
continue;
+ Arg *OriginalArg = A;
unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(Args, 1));
unsigned Prev = Index;
Arg *XarchArg = Opts.ParseOneArg(Args, Index);
@@ -622,6 +727,20 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
A = XarchArg;
DAL->AddSynthesizedArg(A);
+
+ // Linker input arguments require custom handling. The problem is that we
+ // have already constructed the phase actions, so we can not treat them as
+ // "input arguments".
+ if (A->getOption().isLinkerInput()) {
+ // Convert the argument into individual Zlinker_input_args.
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i) {
+ DAL->AddSeparateArg(OriginalArg,
+ Opts.getOption(options::OPT_Zlinker_input),
+ A->getValue(Args, i));
+
+ }
+ continue;
+ }
}
// Sob. These is strictly gcc compatible for the time being. Apple
@@ -941,7 +1060,7 @@ Tool &TCEToolChain::SelectTool(const Compilation &C,
/// OpenBSD - OpenBSD tool chain which can call as(1) and ld(1) directly.
OpenBSD::OpenBSD(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_GCC(Host, Triple) {
+ : Generic_ELF(Host, Triple) {
getFilePaths().push_back(getDriver().Dir + "/../lib");
getFilePaths().push_back("/usr/lib");
}
@@ -953,11 +1072,20 @@ Tool &OpenBSD::SelectTool(const Compilation &C, const JobAction &JA) const {
else
Key = JA.getKind();
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
Tool *&T = Tools[Key];
if (!T) {
switch (Key) {
- case Action::AssembleJobClass:
- T = new tools::openbsd::Assemble(*this); break;
+ case Action::AssembleJobClass: {
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::openbsd::Assemble(*this);
+ break;
+ }
case Action::LinkJobClass:
T = new tools::openbsd::Link(*this); break;
default:
@@ -971,7 +1099,7 @@ Tool &OpenBSD::SelectTool(const Compilation &C, const JobAction &JA) const {
/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
FreeBSD::FreeBSD(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_GCC(Host, Triple) {
+ : Generic_ELF(Host, Triple) {
// Determine if we are compiling 32-bit code on an x86_64 platform.
bool Lib32 = false;
@@ -996,11 +1124,19 @@ Tool &FreeBSD::SelectTool(const Compilation &C, const JobAction &JA) const {
else
Key = JA.getKind();
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
Tool *&T = Tools[Key];
if (!T) {
switch (Key) {
case Action::AssembleJobClass:
- T = new tools::freebsd::Assemble(*this); break;
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::freebsd::Assemble(*this);
+ break;
case Action::LinkJobClass:
T = new tools::freebsd::Link(*this); break;
default:
@@ -1011,6 +1147,57 @@ Tool &FreeBSD::SelectTool(const Compilation &C, const JobAction &JA) const {
return *T;
}
+/// NetBSD - NetBSD tool chain which can call as(1) and ld(1) directly.
+
+NetBSD::NetBSD(const HostInfo &Host, const llvm::Triple& Triple)
+ : Generic_ELF(Host, Triple) {
+
+ // Determine if we are compiling 32-bit code on an x86_64 platform.
+ bool Lib32 = false;
+ if (Triple.getArch() == llvm::Triple::x86 &&
+ llvm::Triple(getDriver().DefaultHostTriple).getArch() ==
+ llvm::Triple::x86_64)
+ Lib32 = true;
+
+ getProgramPaths().push_back(getDriver().Dir + "/../libexec");
+ getProgramPaths().push_back("/usr/libexec");
+ if (Lib32) {
+ getFilePaths().push_back("/usr/lib/i386");
+ } else {
+ getFilePaths().push_back("/usr/lib");
+ }
+}
+
+Tool &NetBSD::SelectTool(const Compilation &C, const JobAction &JA) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::netbsd::Assemble(*this);
+ break;
+ case Action::LinkJobClass:
+ T = new tools::netbsd::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA);
+ }
+ }
+
+ return *T;
+}
+
/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
Minix::Minix(const HostInfo &Host, const llvm::Triple& Triple)
@@ -1085,28 +1272,253 @@ Tool &AuroraUX::SelectTool(const Compilation &C, const JobAction &JA) const {
/// Linux toolchain (very bare-bones at the moment).
-Linux::Linux(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_GCC(Host, Triple) {
- getFilePaths().push_back(getDriver().Dir +
- "/../lib/clang/" CLANG_VERSION_STRING "/");
- getFilePaths().push_back("/lib/");
- getFilePaths().push_back("/usr/lib/");
-
- // Depending on the Linux distribution, any combination of lib{,32,64} is
- // possible. E.g. Debian uses lib and lib32 for mixed i386/x86-64 systems,
- // openSUSE uses lib and lib64 for the same purpose.
- getFilePaths().push_back("/lib32/");
- getFilePaths().push_back("/usr/lib32/");
- getFilePaths().push_back("/lib64/");
- getFilePaths().push_back("/usr/lib64/");
-
- // FIXME: Figure out some way to get gcc's libdir
- // (e.g. /usr/lib/gcc/i486-linux-gnu/4.3/ for Ubuntu 32-bit); we need
- // crtbegin.o/crtend.o/etc., and want static versions of various
- // libraries. If we had our own crtbegin.o/crtend.o/etc, we could probably
- // get away with using shared versions in /usr/lib, though.
- // We could fall back to the approach we used for includes (a massive
- // list), but that's messy at best.
+enum LinuxDistro {
+ DebianLenny,
+ DebianSqueeze,
+ Exherbo,
+ Fedora13,
+ Fedora14,
+ OpenSuse11_3,
+ UbuntuJaunty,
+ UbuntuKarmic,
+ UbuntuLucid,
+ UbuntuMaverick,
+ UnknownDistro
+};
+
+static bool IsFedora(enum LinuxDistro Distro) {
+ return Distro == Fedora13 || Distro == Fedora14;
+}
+
+static bool IsOpenSuse(enum LinuxDistro Distro) {
+ return Distro == OpenSuse11_3;
+}
+
+static bool IsDebian(enum LinuxDistro Distro) {
+ return Distro == DebianLenny || Distro == DebianSqueeze;
+}
+
+static bool IsUbuntu(enum LinuxDistro Distro) {
+ return Distro == UbuntuLucid || Distro == UbuntuMaverick ||
+ Distro == UbuntuJaunty || Distro == UbuntuKarmic;
+}
+
+static bool IsDebianBased(enum LinuxDistro Distro) {
+ return IsDebian(Distro) || IsUbuntu(Distro);
+}
+
+static bool HasMultilib(llvm::Triple::ArchType Arch, enum LinuxDistro Distro) {
+ if (Arch == llvm::Triple::x86_64) {
+ bool Exists;
+ if (Distro == Exherbo &&
+ (llvm::sys::fs::exists("/usr/lib32/libc.so", Exists) || !Exists))
+ return false;
+
+ return true;
+ }
+ if (Arch == llvm::Triple::x86 && IsDebianBased(Distro))
+ return true;
+ return false;
+}
+
+static LinuxDistro DetectLinuxDistro(llvm::Triple::ArchType Arch) {
+ llvm::OwningPtr<llvm::MemoryBuffer> File;
+ if (!llvm::MemoryBuffer::getFile("/etc/lsb-release", File)) {
+ llvm::StringRef Data = File.get()->getBuffer();
+ llvm::SmallVector<llvm::StringRef, 8> Lines;
+ Data.split(Lines, "\n");
+ for (unsigned int i = 0, s = Lines.size(); i < s; ++ i) {
+ if (Lines[i] == "DISTRIB_CODENAME=maverick")
+ return UbuntuMaverick;
+ else if (Lines[i] == "DISTRIB_CODENAME=lucid")
+ return UbuntuLucid;
+ else if (Lines[i] == "DISTRIB_CODENAME=jaunty")
+ return UbuntuJaunty;
+ else if (Lines[i] == "DISTRIB_CODENAME=karmic")
+ return UbuntuKarmic;
+ }
+ return UnknownDistro;
+ }
+
+ if (!llvm::MemoryBuffer::getFile("/etc/redhat-release", File)) {
+ llvm::StringRef Data = File.get()->getBuffer();
+ if (Data.startswith("Fedora release 14 (Laughlin)"))
+ return Fedora14;
+ else if (Data.startswith("Fedora release 13 (Goddard)"))
+ return Fedora13;
+ return UnknownDistro;
+ }
+
+ if (!llvm::MemoryBuffer::getFile("/etc/debian_version", File)) {
+ llvm::StringRef Data = File.get()->getBuffer();
+ if (Data[0] == '5')
+ return DebianLenny;
+ else if (Data.startswith("squeeze/sid"))
+ return DebianSqueeze;
+ return UnknownDistro;
+ }
+
+ if (!llvm::MemoryBuffer::getFile("/etc/SuSE-release", File)) {
+ llvm::StringRef Data = File.get()->getBuffer();
+ if (Data.startswith("openSUSE 11.3"))
+ return OpenSuse11_3;
+ return UnknownDistro;
+ }
+
+ bool Exists;
+ if (!llvm::sys::fs::exists("/etc/exherbo-release", Exists) && Exists)
+ return Exherbo;
+
+ return UnknownDistro;
+}
+
+Linux::Linux(const HostInfo &Host, const llvm::Triple &Triple)
+ : Generic_ELF(Host, Triple) {
+ llvm::Triple::ArchType Arch =
+ llvm::Triple(getDriver().DefaultHostTriple).getArch();
+
+ std::string Suffix32 = "";
+ if (Arch == llvm::Triple::x86_64)
+ Suffix32 = "/32";
+
+ std::string Suffix64 = "";
+ if (Arch == llvm::Triple::x86)
+ Suffix64 = "/64";
+
+ std::string Lib32 = "lib";
+
+ bool Exists;
+ if (!llvm::sys::fs::exists("/lib32", Exists) && Exists)
+ Lib32 = "lib32";
+
+ std::string Lib64 = "lib";
+ bool Symlink;
+ if (!llvm::sys::fs::exists("/lib64", Exists) && Exists &&
+ (llvm::sys::fs::is_symlink("/lib64", Symlink) || !Symlink))
+ Lib64 = "lib64";
+
+ std::string GccTriple = "";
+ if (Arch == llvm::Triple::arm) {
+ if (!llvm::sys::fs::exists("/usr/lib/gcc/arm-linux-gnueabi", Exists) &&
+ Exists)
+ GccTriple = "arm-linux-gnueabi";
+ } else if (Arch == llvm::Triple::x86_64) {
+ if (!llvm::sys::fs::exists("/usr/lib/gcc/x86_64-linux-gnu", Exists) &&
+ Exists)
+ GccTriple = "x86_64-linux-gnu";
+ else if (!llvm::sys::fs::exists("/usr/lib/gcc/x86_64-unknown-linux-gnu",
+ Exists) && Exists)
+ GccTriple = "x86_64-unknown-linux-gnu";
+ else if (!llvm::sys::fs::exists("/usr/lib/gcc/x86_64-pc-linux-gnu",
+ Exists) && Exists)
+ GccTriple = "x86_64-pc-linux-gnu";
+ else if (!llvm::sys::fs::exists("/usr/lib/gcc/x86_64-redhat-linux",
+ Exists) && Exists)
+ GccTriple = "x86_64-redhat-linux";
+ else if (!llvm::sys::fs::exists("/usr/lib64/gcc/x86_64-suse-linux",
+ Exists) && Exists)
+ GccTriple = "x86_64-suse-linux";
+ else if (!llvm::sys::fs::exists("/usr/lib/gcc/x86_64-manbo-linux-gnu",
+ Exists) && Exists)
+ GccTriple = "x86_64-manbo-linux-gnu";
+ } else if (Arch == llvm::Triple::x86) {
+ if (!llvm::sys::fs::exists("/usr/lib/gcc/i686-linux-gnu", Exists) && Exists)
+ GccTriple = "i686-linux-gnu";
+ else if (!llvm::sys::fs::exists("/usr/lib/gcc/i686-pc-linux-gnu", Exists) &&
+ Exists)
+ GccTriple = "i686-pc-linux-gnu";
+ else if (!llvm::sys::fs::exists("/usr/lib/gcc/i486-linux-gnu", Exists) &&
+ Exists)
+ GccTriple = "i486-linux-gnu";
+ else if (!llvm::sys::fs::exists("/usr/lib/gcc/i686-redhat-linux", Exists) &&
+ Exists)
+ GccTriple = "i686-redhat-linux";
+ else if (!llvm::sys::fs::exists("/usr/lib/gcc/i586-suse-linux", Exists) &&
+ Exists)
+ GccTriple = "i586-suse-linux";
+ }
+
+ const char* GccVersions[] = {"4.5.1", "4.5", "4.4.5", "4.4.4", "4.4.3", "4.4",
+ "4.3.4", "4.3.3", "4.3.2"};
+ std::string Base = "";
+ for (unsigned i = 0; i < sizeof(GccVersions)/sizeof(char*); ++i) {
+ std::string Suffix = GccTriple + "/" + GccVersions[i];
+ std::string t1 = "/usr/lib/gcc/" + Suffix;
+ if (!llvm::sys::fs::exists(t1 + "/crtbegin.o", Exists) && Exists) {
+ Base = t1;
+ break;
+ }
+ std::string t2 = "/usr/lib64/gcc/" + Suffix;
+ if (!llvm::sys::fs::exists(t2 + "/crtbegin.o", Exists) && Exists) {
+ Base = t2;
+ break;
+ }
+ }
+
+ path_list &Paths = getFilePaths();
+ bool Is32Bits = getArch() == llvm::Triple::x86;
+
+ std::string Suffix;
+ std::string Lib;
+
+ if (Is32Bits) {
+ Suffix = Suffix32;
+ Lib = Lib32;
+ } else {
+ Suffix = Suffix64;
+ Lib = Lib64;
+ }
+
+ llvm::sys::Path LinkerPath(Base + "/../../../../" + GccTriple + "/bin/ld");
+ if (!llvm::sys::fs::exists(LinkerPath.str(), Exists) && Exists)
+ Linker = LinkerPath.str();
+ else
+ Linker = GetProgramPath("ld");
+
+ LinuxDistro Distro = DetectLinuxDistro(Arch);
+
+ if (IsUbuntu(Distro)) {
+ ExtraOpts.push_back("-z");
+ ExtraOpts.push_back("relro");
+ }
+
+ if (Arch == llvm::Triple::arm)
+ ExtraOpts.push_back("-X");
+
+ if (IsFedora(Distro) || Distro == UbuntuMaverick)
+ ExtraOpts.push_back("--hash-style=gnu");
+
+ if (IsDebian(Distro) || Distro == UbuntuLucid || Distro == UbuntuJaunty ||
+ Distro == UbuntuKarmic)
+ ExtraOpts.push_back("--hash-style=both");
+
+ if (IsFedora(Distro))
+ ExtraOpts.push_back("--no-add-needed");
+
+ if (Distro == DebianSqueeze || IsOpenSuse(Distro) ||
+ IsFedora(Distro) || Distro == UbuntuLucid || Distro == UbuntuMaverick ||
+ Distro == UbuntuKarmic)
+ ExtraOpts.push_back("--build-id");
+
+ Paths.push_back(Base + Suffix);
+ if (HasMultilib(Arch, Distro)) {
+ if (IsOpenSuse(Distro) && Is32Bits)
+ Paths.push_back(Base + "/../../../../" + GccTriple + "/lib/../lib");
+ Paths.push_back(Base + "/../../../../" + Lib);
+ Paths.push_back("/lib/../" + Lib);
+ Paths.push_back("/usr/lib/../" + Lib);
+ }
+ if (!Suffix.empty())
+ Paths.push_back(Base);
+ if (IsOpenSuse(Distro))
+ Paths.push_back(Base + "/../../../../" + GccTriple + "/lib");
+ Paths.push_back(Base + "/../../..");
+ if (Arch == getArch() && IsUbuntu(Distro))
+ Paths.push_back("/usr/lib/" + GccTriple);
+}
+
+bool Linux::HasNativeLLVMSupport() const {
+ return true;
}
Tool &Linux::SelectTool(const Compilation &C, const JobAction &JA) const {
@@ -1116,11 +1528,21 @@ Tool &Linux::SelectTool(const Compilation &C, const JobAction &JA) const {
else
Key = JA.getKind();
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
Tool *&T = Tools[Key];
if (!T) {
switch (Key) {
case Action::AssembleJobClass:
- T = new tools::linuxtools::Assemble(*this); break;
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::linuxtools::Assemble(*this);
+ break;
+ case Action::LinkJobClass:
+ T = new tools::linuxtools::Link(*this); break;
default:
T = &Generic_GCC::SelectTool(C, JA);
}
@@ -1132,7 +1554,7 @@ Tool &Linux::SelectTool(const Compilation &C, const JobAction &JA) const {
/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
DragonFly::DragonFly(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_GCC(Host, Triple) {
+ : Generic_ELF(Host, Triple) {
// Path mangling to find libexec
getProgramPaths().push_back(getDriver().getInstalledDir());
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
index d1f1556..0e3645c 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
@@ -155,6 +155,8 @@ public:
virtual types::ID LookupTypeForExtension(const char *Ext) const;
+ virtual bool HasNativeLLVMSupport() const;
+
virtual DerivedArgList *TranslateArgs(const DerivedArgList &Args,
const char *BoundArch) const;
@@ -174,6 +176,18 @@ public:
getTriple().getArch() == llvm::Triple::x86_64);
#endif
}
+ virtual bool IsStrictAliasingDefault() const {
+#ifdef DISABLE_DEFAULT_STRICT_ALIASING
+ return false;
+#else
+ return ToolChain::IsStrictAliasingDefault();
+#endif
+ }
+
+ virtual bool IsObjCDefaultSynthPropertiesDefault() const {
+ return false;
+ }
+
virtual bool IsObjCNonFragileABIDefault() const {
// Non-fragile ABI is default for everything but i386.
return getTriple().getArch() != llvm::Triple::x86;
@@ -221,6 +235,12 @@ public:
virtual void AddLinkRuntimeLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const;
+ virtual void AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+
+ virtual void AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+
/// }
};
@@ -258,6 +278,18 @@ public:
virtual const char *GetDefaultRelocationModel() const { return "pic"; }
};
+class LLVM_LIBRARY_VISIBILITY Generic_ELF : public Generic_GCC {
+ public:
+ Generic_ELF(const HostInfo &Host, const llvm::Triple& Triple)
+ : Generic_GCC(Host, Triple) {}
+
+ virtual bool IsIntegratedAssemblerDefault() const {
+ // Default integrated assembler to on for x86.
+ return (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64);
+ }
+};
+
class LLVM_LIBRARY_VISIBILITY AuroraUX : public Generic_GCC {
public:
AuroraUX(const HostInfo &Host, const llvm::Triple& Triple);
@@ -265,20 +297,27 @@ public:
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
-class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic_ELF {
public:
OpenBSD(const HostInfo &Host, const llvm::Triple& Triple);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
-class LLVM_LIBRARY_VISIBILITY FreeBSD : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY FreeBSD : public Generic_ELF {
public:
FreeBSD(const HostInfo &Host, const llvm::Triple& Triple);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
+class LLVM_LIBRARY_VISIBILITY NetBSD : public Generic_ELF {
+public:
+ NetBSD(const HostInfo &Host, const llvm::Triple& Triple);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
+};
+
class LLVM_LIBRARY_VISIBILITY Minix : public Generic_GCC {
public:
Minix(const HostInfo &Host, const llvm::Triple& Triple);
@@ -286,18 +325,23 @@ public:
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
-class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_ELF {
public:
DragonFly(const HostInfo &Host, const llvm::Triple& Triple);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
-class LLVM_LIBRARY_VISIBILITY Linux : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY Linux : public Generic_ELF {
public:
Linux(const HostInfo &Host, const llvm::Triple& Triple);
+ virtual bool HasNativeLLVMSupport() const;
+
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
+
+ std::string Linker;
+ std::vector<std::string> ExtraOpts;
};
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
index 5c16cd3..5a63213 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
@@ -1,4 +1,4 @@
-//===--- Tools.cpp - Tools Implementations ------------------------------*-===//
+//===--- Tools.cpp - Tools Implementations --------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,10 +25,11 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Host.h"
-#include "llvm/System/Process.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Process.h"
#include "InputInfo.h"
#include "ToolChains.h"
@@ -85,6 +86,48 @@ static void QuoteTarget(llvm::StringRef Target,
}
}
+static void AddLinkerInputs(const ToolChain &TC,
+ const InputInfoList &Inputs, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ const Driver &D = TC.getDriver();
+
+ // Add extra linker input arguments which are not treated as inputs
+ // (constructed via -Xarch_).
+ Args.AddAllArgValues(CmdArgs, options::OPT_Zlinker_input);
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ if (!TC.HasNativeLLVMSupport()) {
+ // Don't try to pass LLVM inputs unless we have native support.
+ if (II.getType() == types::TY_LLVM_IR ||
+ II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC ||
+ II.getType() == types::TY_LTO_BC)
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << TC.getTripleString();
+ }
+
+ // Add filenames immediately.
+ if (II.isFilename()) {
+ CmdArgs.push_back(II.getFilename());
+ continue;
+ }
+
+ // Otherwise, this is a linker input argument.
+ const Arg &A = II.getInputArg();
+
+ // Handle reserved library options.
+ if (A.getOption().matches(options::OPT_Z_reserved_lib_stdcxx)) {
+ TC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ } else if (A.getOption().matches(options::OPT_Z_reserved_lib_cckext)) {
+ TC.AddCCKextLibArgs(Args, CmdArgs);
+ } else
+ A.renderAsInput(Args, CmdArgs);
+ }
+}
+
void Clang::AddPreprocessingOptions(const Driver &D,
const ArgList &Args,
ArgStringList &CmdArgs,
@@ -98,8 +141,7 @@ void Clang::AddPreprocessingOptions(const Driver &D,
Args.AddLastArg(CmdArgs, options::OPT_CC);
// Handle dependency file generation.
- if ((A = Args.getLastArg(options::OPT_M)) ||
- (A = Args.getLastArg(options::OPT_MM)) ||
+ if ((A = Args.getLastArg(options::OPT_M, options::OPT_MM)) ||
(A = Args.getLastArg(options::OPT_MD)) ||
(A = Args.getLastArg(options::OPT_MMD))) {
// Determine the output location.
@@ -130,11 +172,9 @@ void Clang::AddPreprocessingOptions(const Driver &D,
// Otherwise derive from the base input.
//
// FIXME: This should use the computed output file location.
- llvm::sys::Path P(Inputs[0].getBaseInput());
-
- P.eraseSuffix();
- P.appendSuffix("o");
- DepTarget = Args.MakeArgString(P.getLast());
+ llvm::SmallString<128> P(Inputs[0].getBaseInput());
+ llvm::sys::path::replace_extension(P, "o");
+ DepTarget = Args.MakeArgString(llvm::sys::path::filename(P));
}
CmdArgs.push_back("-MT");
@@ -174,20 +214,25 @@ void Clang::AddPreprocessingOptions(const Driver &D,
// wonky, but we include looking for .gch so we can support seamless
// replacement into a build system already set up to be generating
// .gch files.
+ bool RenderedImplicitInclude = false;
for (arg_iterator it = Args.filtered_begin(options::OPT_clang_i_Group),
ie = Args.filtered_end(); it != ie; ++it) {
const Arg *A = it;
if (A->getOption().matches(options::OPT_include)) {
+ bool IsFirstImplicitInclude = !RenderedImplicitInclude;
+ RenderedImplicitInclude = true;
+
// Use PCH if the user requested it.
bool UsePCH = D.CCCUsePCH;
bool FoundPTH = false;
bool FoundPCH = false;
llvm::sys::Path P(A->getValue(Args));
+ bool Exists;
if (UsePCH) {
P.appendSuffix("pch");
- if (P.exists())
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
FoundPCH = true;
else
P.eraseSuffix();
@@ -195,7 +240,7 @@ void Clang::AddPreprocessingOptions(const Driver &D,
if (!FoundPCH) {
P.appendSuffix("pth");
- if (P.exists())
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
FoundPTH = true;
else
P.eraseSuffix();
@@ -203,7 +248,7 @@ void Clang::AddPreprocessingOptions(const Driver &D,
if (!FoundPCH && !FoundPTH) {
P.appendSuffix("gch");
- if (P.exists()) {
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists) {
FoundPCH = UsePCH;
FoundPTH = !UsePCH;
}
@@ -212,13 +257,19 @@ void Clang::AddPreprocessingOptions(const Driver &D,
}
if (FoundPCH || FoundPTH) {
- A->claim();
- if (UsePCH)
- CmdArgs.push_back("-include-pch");
- else
- CmdArgs.push_back("-include-pth");
- CmdArgs.push_back(Args.MakeArgString(P.str()));
- continue;
+ if (IsFirstImplicitInclude) {
+ A->claim();
+ if (UsePCH)
+ CmdArgs.push_back("-include-pch");
+ else
+ CmdArgs.push_back("-include-pth");
+ CmdArgs.push_back(Args.MakeArgString(P.str()));
+ continue;
+ } else {
+ // Ignore the PCH if not first on command line and emit warning.
+ D.Diag(clang::diag::warn_drv_pch_not_first_include)
+ << P.str() << A->getAsString(Args);
+ }
}
}
@@ -230,6 +281,11 @@ void Clang::AddPreprocessingOptions(const Driver &D,
Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U);
Args.AddAllArgs(CmdArgs, options::OPT_I_Group, options::OPT_F);
+ // Add C++ include arguments, if needed.
+ types::ID InputType = Inputs[0].getType();
+ if (types::isCXX(InputType))
+ getToolChain().AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+
// Add -Wp, and -Xassembler if using the preprocessor.
// FIXME: There is a very unfortunate problem here, some troubled
@@ -242,6 +298,15 @@ void Clang::AddPreprocessingOptions(const Driver &D,
// -I- is a deprecated GCC feature, reject it.
if (Arg *A = Args.getLastArg(options::OPT_I_))
D.Diag(clang::diag::err_drv_I_dash_not_supported) << A->getAsString(Args);
+
+ // If we have a --sysroot, and don't have an explicit -isysroot flag, add an
+ // -isysroot to the CC1 invocation.
+ if (Arg *A = Args.getLastArg(options::OPT__sysroot_EQ)) {
+ if (!Args.hasArg(options::OPT_isysroot)) {
+ CmdArgs.push_back("-isysroot");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+ }
}
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targetting.
@@ -367,13 +432,16 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
ABIName = A->getValue(Args);
} else {
// Select the default based on the platform.
- llvm::StringRef env = Triple.getEnvironmentName();
- if (env == "gnueabi")
+ switch(Triple.getEnvironment()) {
+ case llvm::Triple::GNUEABI:
ABIName = "aapcs-linux";
- else if (env == "eabi")
+ break;
+ case llvm::Triple::EABI:
ABIName = "aapcs";
- else
+ break;
+ default:
ABIName = "apcs-gnu";
+ }
}
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
@@ -420,8 +488,7 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
}
case llvm::Triple::Linux: {
- llvm::StringRef Env = getToolChain().getTriple().getEnvironmentName();
- if (Env == "gnueabi") {
+ if (getToolChain().getTriple().getEnvironment() == llvm::Triple::GNUEABI) {
FloatABI = "softfp";
break;
}
@@ -429,10 +496,20 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// fall through
default:
- // Assume "soft", but warn the user we are guessing.
- FloatABI = "soft";
- D.Diag(clang::diag::warn_drv_assuming_mfloat_abi_is) << "soft";
- break;
+ switch(Triple.getEnvironment()) {
+ case llvm::Triple::GNUEABI:
+ FloatABI = "softfp";
+ break;
+ case llvm::Triple::EABI:
+ // EABI is always AAPCS, and if it was not marked 'hard', it's softfp
+ FloatABI = "softfp";
+ break;
+ default:
+ // Assume "soft", but warn the user we are guessing.
+ FloatABI = "soft";
+ D.Diag(clang::diag::warn_drv_assuming_mfloat_abi_is) << "soft";
+ break;
+ }
}
}
@@ -557,6 +634,51 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
}
}
+void Clang::AddSparcTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ llvm::StringRef MArch = A->getValue(Args);
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(MArch.str().c_str());
+ }
+
+ // Select the float ABI as determined by -msoft-float, -mhard-float, and
+ llvm::StringRef FloatABI;
+ if (Arg *A = Args.getLastArg(options::OPT_msoft_float,
+ options::OPT_mhard_float)) {
+ if (A->getOption().matches(options::OPT_msoft_float))
+ FloatABI = "soft";
+ else if (A->getOption().matches(options::OPT_mhard_float))
+ FloatABI = "hard";
+ }
+
+ // If unspecified, choose the default based on the platform.
+ if (FloatABI.empty()) {
+ switch (getToolChain().getTriple().getOS()) {
+ default:
+ // Assume "soft", but warn the user we are guessing.
+ FloatABI = "soft";
+ D.Diag(clang::diag::warn_drv_assuming_mfloat_abi_is) << "soft";
+ break;
+ }
+ }
+
+ if (FloatABI == "soft") {
+ // Floating point operations and argument passing are soft.
+ //
+ // FIXME: This changes CPP defines, we need -target-soft-float.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("soft");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+soft-float");
+ } else {
+ assert(FloatABI == "hard" && "Invalid float abi!");
+ CmdArgs.push_back("-mhard-float");
+ }
+}
+
void Clang::AddX86TargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
if (!Args.hasFlag(options::OPT_mred_zone,
@@ -604,11 +726,21 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
CPUName = "x86-64";
else if (getToolChain().getArchName() == "i386")
CPUName = "i486";
- } else {
+ } else if (getToolChain().getOS().startswith("netbsd")) {
if (getToolChain().getArchName() == "x86_64")
CPUName = "x86-64";
else if (getToolChain().getArchName() == "i386")
CPUName = "i486";
+ } else if (getToolChain().getOS().startswith("freebsd")) {
+ if (getToolChain().getArchName() == "x86_64")
+ CPUName = "x86-64";
+ else if (getToolChain().getArchName() == "i386")
+ CPUName = "i486";
+ } else {
+ if (getToolChain().getArchName() == "x86_64")
+ CPUName = "x86-64";
+ else if (getToolChain().getArchName() == "i386")
+ CPUName = "pentium4";
}
}
@@ -637,6 +769,7 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
static bool needsExceptions(const ArgList &Args, types::ID InputType,
const llvm::Triple &Triple) {
+ // Handle -fno-exceptions.
if (Arg *A = Args.getLastArg(options::OPT_fexceptions,
options::OPT_fno_exceptions)) {
if (A->getOption().matches(options::OPT_fexceptions))
@@ -644,25 +777,24 @@ static bool needsExceptions(const ArgList &Args, types::ID InputType,
else
return false;
}
- switch (InputType) {
- case types::TY_CXX: case types::TY_CXXHeader:
- case types::TY_PP_CXX: case types::TY_PP_CXXHeader:
- case types::TY_ObjCXX: case types::TY_ObjCXXHeader:
- case types::TY_PP_ObjCXX: case types::TY_PP_ObjCXXHeader:
+
+ // Otherwise, C++ inputs use exceptions.
+ if (types::isCXX(InputType))
return true;
- case types::TY_ObjC: case types::TY_ObjCHeader:
- case types::TY_PP_ObjC: case types::TY_PP_ObjCHeader:
+ // As do Objective-C non-fragile ABI inputs and all Objective-C inputs on
+ // x86_64 and ARM after SnowLeopard.
+ if (types::isObjC(InputType)) {
if (Args.hasArg(options::OPT_fobjc_nonfragile_abi))
return true;
if (Triple.getOS() != llvm::Triple::Darwin)
return false;
return (Triple.getDarwinMajorNumber() >= 9 &&
- Triple.getArch() == llvm::Triple::x86_64);
-
- default:
- return false;
+ (Triple.getArch() == llvm::Triple::x86_64 ||
+ Triple.getArch() == llvm::Triple::arm));
}
+
+ return false;
}
void Clang::ConstructJob(Compilation &C, const JobAction &JA,
@@ -709,10 +841,33 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
!IsOpt))
CmdArgs.push_back("-mrelax-all");
- // When using an integrated assembler, we send -Wa, and -Xassembler options
- // to -cc1.
- Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
- options::OPT_Xassembler);
+ // When using an integrated assembler, translate -Wa, and -Xassembler
+ // options.
+ for (arg_iterator it = Args.filtered_begin(options::OPT_Wa_COMMA,
+ options::OPT_Xassembler),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ A->claim();
+
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i) {
+ llvm::StringRef Value = A->getValue(Args, i);
+
+ if (Value == "-force_cpusubtype_ALL") {
+ // Do nothing, this is the default and we don't support anything else.
+ } else if (Value == "-L") {
+ // We don't support -L yet, but it isn't important enough to error
+ // on. No one should really be using it for a semantic change.
+ D.Diag(clang::diag::warn_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
+ } else {
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
+ }
+ }
+ }
+
+ // Also ignore explicit -force_cpusubtype_ALL option.
+ (void) Args.hasArg(options::OPT_force__cpusubtype__ALL);
} else if (isa<PrecompileJobAction>(JA)) {
// Use PCH if the user requested it.
bool UsePCH = D.CCCUsePCH;
@@ -772,17 +927,27 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Add default argument set.
if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
- CmdArgs.push_back("-analyzer-check-dead-stores");
- // Do not enable the security-syntatic check since it
- // it needs to be refined (known issues).
- // CmdArgs.push_back("-analyzer-check-security-syntactic");
+ types::ID InputType = Inputs[0].getType();
+
+ // Checks to perform for all language types.
+
+ CmdArgs.push_back("-analyzer-checker=core");
+ if (getToolChain().getTriple().getOS() != llvm::Triple::Win32)
+ CmdArgs.push_back("-analyzer-checker=unix");
+ if (getToolChain().getTriple().getVendor() == llvm::Triple::Apple)
+ CmdArgs.push_back("-analyzer-checker=macosx");
+
+ // Checks to perform for Objective-C/Objective-C++.
+ if (types::isObjC(InputType)) {
+ // Enable all checkers in 'cocoa' package.
+ CmdArgs.push_back("-analyzer-checker=cocoa");
+ }
+
+ // NOTE: Leaving -analyzer-check-objc-mem here is intentional.
+ // It also checks C code.
CmdArgs.push_back("-analyzer-check-objc-mem");
+
CmdArgs.push_back("-analyzer-eagerly-assume");
- CmdArgs.push_back("-analyzer-check-objc-methodsigs");
- // Do not enable the missing -dealloc check.
- // '-analyzer-check-objc-missing-dealloc',
- CmdArgs.push_back("-analyzer-check-objc-unused-ivars");
- CmdArgs.push_back("-analyzer-check-idempotent-operations");
}
// Set the output format. The default is plist, for (lame) historical
@@ -850,6 +1015,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// LLVM Code Generator Options.
+ if (Arg *A = Args.getLastArg(options::OPT_mregparm_EQ)) {
+ CmdArgs.push_back("-mregparm");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
// FIXME: Set --enable-unsafe-fp-math.
if (Args.hasFlag(options::OPT_fno_omit_frame_pointer,
options::OPT_fomit_frame_pointer))
@@ -857,12 +1027,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
options::OPT_fno_zero_initialized_in_bss))
CmdArgs.push_back("-mno-zero-initialized-in-bss");
+ if (!Args.hasFlag(options::OPT_fstrict_aliasing,
+ options::OPT_fno_strict_aliasing,
+ getToolChain().IsStrictAliasingDefault()))
+ CmdArgs.push_back("-relaxed-aliasing");
// Decide whether to use verbose asm. Verbose assembly is the default on
// toolchains which have the integrated assembler on by default.
bool IsVerboseAsmDefault = getToolChain().IsIntegratedAssemblerDefault();
if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
- IsVerboseAsmDefault) ||
+ IsVerboseAsmDefault) ||
Args.hasArg(options::OPT_dA))
CmdArgs.push_back("-masm-verbose");
@@ -880,6 +1054,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (getToolChain().getTriple().getOS() != llvm::Triple::Darwin)
CmdArgs.push_back("-mconstructor-aliases");
+ if (Args.hasArg(options::OPT_mms_bitfields)) {
+ CmdArgs.push_back("-mms-bitfields");
+ }
+
// This is a coarse approximation of what llvm-gcc actually does, both
// -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
// complicated ways.
@@ -920,6 +1098,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AddMIPSTargetArgs(Args, CmdArgs);
break;
+ case llvm::Triple::sparc:
+ AddSparcTargetArgs(Args, CmdArgs);
+ break;
+
case llvm::Triple::x86:
case llvm::Triple::x86_64:
AddX86TargetArgs(Args, CmdArgs);
@@ -932,9 +1114,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue(Args));
}
- // -mno-omit-leaf-frame-pointer is default.
+ // -mno-omit-leaf-frame-pointer is the default on Darwin.
if (Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
- options::OPT_mno_omit_leaf_frame_pointer, false))
+ options::OPT_mno_omit_leaf_frame_pointer,
+ getToolChain().getTriple().getOS() != llvm::Triple::Darwin))
CmdArgs.push_back("-momit-leaf-frame-pointer");
// -fno-math-errno is default.
@@ -946,23 +1129,29 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Explicitly error on some things we know we don't support and can't just
// ignore.
types::ID InputType = Inputs[0].getType();
- Arg *Unsupported;
- if ((Unsupported = Args.getLastArg(options::OPT_MG)) ||
- (Unsupported = Args.getLastArg(options::OPT_iframework)) ||
- (Unsupported = Args.getLastArg(options::OPT_fshort_enums)))
- D.Diag(clang::diag::err_drv_clang_unsupported)
- << Unsupported->getOption().getName();
-
- if (types::isCXX(InputType) &&
- getToolChain().getTriple().getOS() == llvm::Triple::Darwin &&
- getToolChain().getTriple().getArch() == llvm::Triple::x86) {
- if ((Unsupported = Args.getLastArg(options::OPT_fapple_kext)))
- D.Diag(clang::diag::err_drv_clang_unsupported_opt_cxx_darwin_i386)
+ if (!Args.hasArg(options::OPT_fallow_unsupported)) {
+ Arg *Unsupported;
+ if ((Unsupported = Args.getLastArg(options::OPT_MG)) ||
+ (Unsupported = Args.getLastArg(options::OPT_iframework)))
+ D.Diag(clang::diag::err_drv_clang_unsupported)
<< Unsupported->getOption().getName();
+
+ if (types::isCXX(InputType) &&
+ getToolChain().getTriple().getOS() == llvm::Triple::Darwin &&
+ getToolChain().getTriple().getArch() == llvm::Triple::x86) {
+ if ((Unsupported = Args.getLastArg(options::OPT_fapple_kext)))
+ D.Diag(clang::diag::err_drv_clang_unsupported_opt_cxx_darwin_i386)
+ << Unsupported->getOption().getName();
+ }
}
Args.AddAllArgs(CmdArgs, options::OPT_v);
Args.AddLastArg(CmdArgs, options::OPT_H);
+ if (D.CCPrintHeaders) {
+ CmdArgs.push_back("-header-include-file");
+ CmdArgs.push_back(D.CCPrintHeadersFilename ?
+ D.CCPrintHeadersFilename : "-");
+ }
Args.AddLastArg(CmdArgs, options::OPT_P);
Args.AddLastArg(CmdArgs, options::OPT_print_ivar_layout);
@@ -986,6 +1175,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-resource-dir");
CmdArgs.push_back(D.ResourceDir.c_str());
+ Args.AddLastArg(CmdArgs, options::OPT_working_directory);
+
// Add preprocessing options like -I, -D, etc. if we are using the
// preprocessor.
//
@@ -1001,6 +1192,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else if (A->getOption().matches(options::OPT_O) &&
A->getValue(Args)[0] == '\0')
CmdArgs.push_back("-O2");
+ else if (A->getOption().matches(options::OPT_O) &&
+ A->getValue(Args)[0] == 'z' &&
+ A->getValue(Args)[1] == '\0')
+ CmdArgs.push_back("-Os");
else
A->render(Args, CmdArgs);
}
@@ -1054,6 +1249,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue(Args));
}
+ if (Arg *A = Args.getLastArg(options::OPT_Wlarge_by_value_copy_EQ,
+ options::OPT_Wlarge_by_value_copy_def)) {
+ CmdArgs.push_back("-Wlarge-by-value-copy");
+ if (A->getNumValues())
+ CmdArgs.push_back(A->getValue(Args));
+ else
+ CmdArgs.push_back("64"); // default value for -Wlarge-by-value-copy.
+ }
+
if (Args.hasArg(options::OPT__relocatable_pch))
CmdArgs.push_back("-relocatable-pch");
@@ -1100,7 +1304,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
-
+
// -fhosted is default.
if (KernelOrKext || Args.hasFlag(options::OPT_ffreestanding,
options::OPT_fhosted,
@@ -1110,8 +1314,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -f (flag) options which we can pass directly.
Args.AddLastArg(CmdArgs, options::OPT_fcatch_undefined_behavior);
Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
- Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
Args.AddLastArg(CmdArgs, options::OPT_fformat_extensions);
+ Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
+ Args.AddLastArg(CmdArgs, options::OPT_flimit_debug_info);
+ Args.AddLastArg(CmdArgs, options::OPT_pg);
// -flax-vector-conversions is default.
if (!Args.hasFlag(options::OPT_flax_vector_conversions,
@@ -1133,12 +1339,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (Args.getLastArg(options::OPT_fapple_kext))
+ CmdArgs.push_back("-fapple-kext");
+
Args.AddLastArg(CmdArgs, options::OPT_fno_show_column);
Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch);
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info);
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_parseable_fixits);
Args.AddLastArg(CmdArgs, options::OPT_ftime_report);
Args.AddLastArg(CmdArgs, options::OPT_ftrapv);
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftrapv_handler_EQ)) {
+ CmdArgs.push_back("-ftrapv-handler");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
Args.AddLastArg(CmdArgs, options::OPT_fwrapv);
Args.AddLastArg(CmdArgs, options::OPT_fwritable_strings);
Args.AddLastArg(CmdArgs, options::OPT_funroll_loops);
@@ -1164,8 +1379,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -f options with positive and negative forms; we translate
// these by hand.
+ if (Args.hasArg(options::OPT_mkernel)) {
+ if (!Args.hasArg(options::OPT_fapple_kext) && types::isCXX(InputType))
+ CmdArgs.push_back("-fapple-kext");
+ if (!Args.hasArg(options::OPT_fbuiltin))
+ CmdArgs.push_back("-fno-builtin");
+ }
// -fbuiltin is default.
- if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin))
+ else if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin))
CmdArgs.push_back("-fno-builtin");
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
@@ -1184,8 +1405,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
false))
CmdArgs.push_back("-fno-access-control");
+ // -felide-constructors is the default.
+ if (Args.hasFlag(options::OPT_fno_elide_constructors,
+ options::OPT_felide_constructors,
+ false))
+ CmdArgs.push_back("-fno-elide-constructors");
+
// -fexceptions=0 is default.
- if (needsExceptions(Args, InputType, getToolChain().getTriple()))
+ if (!KernelOrKext &&
+ needsExceptions(Args, InputType, getToolChain().getTriple()))
CmdArgs.push_back("-fexceptions");
if (getToolChain().UseSjLjExceptions())
@@ -1196,19 +1424,27 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
!Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti))
CmdArgs.push_back("-fno-rtti");
+ // -fshort-enums=0 is default.
+ // FIXME: Are there targers where -fshort-enums is on by default ?
+ if (Args.hasFlag(options::OPT_fshort_enums,
+ options::OPT_fno_short_enums, false))
+ CmdArgs.push_back("-fshort-enums");
+
// -fsigned-char is default.
if (!Args.hasFlag(options::OPT_fsigned_char, options::OPT_funsigned_char,
isSignedCharDefault(getToolChain().getTriple())))
CmdArgs.push_back("-fno-signed-char");
// -fthreadsafe-static is default.
- if (!Args.hasFlag(options::OPT_fthreadsafe_statics,
+ if (!Args.hasFlag(options::OPT_fthreadsafe_statics,
options::OPT_fno_threadsafe_statics))
CmdArgs.push_back("-fno-threadsafe-statics");
// -fuse-cxa-atexit is default.
- if (KernelOrKext || !Args.hasFlag(options::OPT_fuse_cxa_atexit,
- options::OPT_fno_use_cxa_atexit))
+ if (KernelOrKext ||
+ !Args.hasFlag(options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
+ getToolChain().getTriple().getOS() != llvm::Triple::Cygwin &&
+ getToolChain().getTriple().getOS() != llvm::Triple::MinGW32))
CmdArgs.push_back("-fno-use-cxa-atexit");
// -fms-extensions=0 is default.
@@ -1216,6 +1452,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().getTriple().getOS() == llvm::Triple::Win32))
CmdArgs.push_back("-fms-extensions");
+ // -fmsc-version=1300 is default.
+ if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
+ getToolChain().getTriple().getOS() == llvm::Triple::Win32) ||
+ Args.hasArg(options::OPT_fmsc_version)) {
+ llvm::StringRef msc_ver = Args.getLastArgValue(options::OPT_fmsc_version);
+ if (msc_ver.empty())
+ CmdArgs.push_back("-fmsc-version=1300");
+ else
+ CmdArgs.push_back(Args.MakeArgString("-fmsc-version=" + msc_ver));
+ }
+
+
// -fborland-extensions=0 is default.
if (Args.hasFlag(options::OPT_fborland_extensions,
options::OPT_fno_borland_extensions, false))
@@ -1229,7 +1477,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fnext-runtime defaults to on Darwin and when rewriting Objective-C, and is
// -the -cc1 default.
- bool NeXTRuntimeIsDefault =
+ bool NeXTRuntimeIsDefault =
IsRewriter || getToolChain().getTriple().getOS() == llvm::Triple::Darwin;
if (!Args.hasFlag(options::OPT_fnext_runtime, options::OPT_fgnu_runtime,
NeXTRuntimeIsDefault))
@@ -1237,20 +1485,52 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fobjc-nonfragile-abi=0 is default.
if (types::isObjC(InputType)) {
+ // Compute the Objective-C ABI "version" to use. Version numbers are
+ // slightly confusing for historical reasons:
+ // 1 - Traditional "fragile" ABI
+ // 2 - Non-fragile ABI, version 1
+ // 3 - Non-fragile ABI, version 2
unsigned Version = 1;
- if (Args.hasArg(options::OPT_fobjc_nonfragile_abi) ||
- getToolChain().IsObjCNonFragileABIDefault())
- Version = 2;
+ // If -fobjc-abi-version= is present, use that to set the version.
if (Arg *A = Args.getLastArg(options::OPT_fobjc_abi_version_EQ)) {
if (llvm::StringRef(A->getValue(Args)) == "1")
Version = 1;
else if (llvm::StringRef(A->getValue(Args)) == "2")
Version = 2;
+ else if (llvm::StringRef(A->getValue(Args)) == "3")
+ Version = 3;
else
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ } else {
+ // Otherwise, determine if we are using the non-fragile ABI.
+ if (Args.hasFlag(options::OPT_fobjc_nonfragile_abi,
+ options::OPT_fno_objc_nonfragile_abi,
+ getToolChain().IsObjCNonFragileABIDefault())) {
+ // Determine the non-fragile ABI version to use.
+#ifdef DISABLE_DEFAULT_NONFRAGILEABI_TWO
+ unsigned NonFragileABIVersion = 1;
+#else
+ unsigned NonFragileABIVersion = 2;
+#endif
+
+ if (Arg *A = Args.getLastArg(
+ options::OPT_fobjc_nonfragile_abi_version_EQ)) {
+ if (llvm::StringRef(A->getValue(Args)) == "1")
+ NonFragileABIVersion = 1;
+ else if (llvm::StringRef(A->getValue(Args)) == "2")
+ NonFragileABIVersion = 2;
+ else
+ D.Diag(clang::diag::err_drv_clang_unsupported)
+ << A->getAsString(Args);
+ }
+
+ Version = 1 + NonFragileABIVersion;
+ } else {
+ Version = 1;
+ }
}
- if (Version == 2) {
+ if (Version == 2 || Version == 3) {
CmdArgs.push_back("-fobjc-nonfragile-abi");
// -fobjc-dispatch-method is only relevant with the nonfragile-abi, and
@@ -1265,10 +1545,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- // FIXME: -fobjc-nonfragile-abi2 is a transient option meant to expose
- // features in testing. It will eventually be removed.
- if (Args.hasArg(options::OPT_fobjc_nonfragile_abi2))
- CmdArgs.push_back("-fobjc-nonfragile-abi2");
+ // -fobjc-default-synthesize-properties=0 is default.
+ if (Args.hasFlag(options::OPT_fobjc_default_synthesize_properties,
+ options::OPT_fno_objc_default_synthesize_properties,
+ getToolChain().IsObjCDefaultSynthPropertiesDefault())) {
+ CmdArgs.push_back("-fobjc-default-synthesize-properties");
+ }
+
+ // -fobjc-exceptions is default.
+ if (!Args.hasFlag(options::OPT_fobjc_exceptions,
+ options::OPT_fno_objc_exceptions))
+ CmdArgs.push_back("-fno-objc-exceptions");
}
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
@@ -1301,17 +1588,28 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
false))
CmdArgs.push_back("-fpascal-strings");
+ if (Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext)) {
+ if (!Args.hasArg(options::OPT_fcommon))
+ CmdArgs.push_back("-fno-common");
+ }
// -fcommon is default, only pass non-default.
- if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common))
+ else if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common))
CmdArgs.push_back("-fno-common");
// -fsigned-bitfields is default, and clang doesn't yet support
- // --funsigned-bitfields.
+ // -funsigned-bitfields.
if (!Args.hasFlag(options::OPT_fsigned_bitfields,
options::OPT_funsigned_bitfields))
D.Diag(clang::diag::warn_drv_clang_unsupported)
<< Args.getLastArg(options::OPT_funsigned_bitfields)->getAsString(Args);
+ // -fsigned-bitfields is default, and clang doesn't support -fno-for-scope.
+ if (!Args.hasFlag(options::OPT_ffor_scope,
+ options::OPT_fno_for_scope))
+ D.Diag(clang::diag::err_drv_clang_unsupported)
+ << Args.getLastArg(options::OPT_fno_for_scope)->getAsString(Args);
+
// -fcaret-diagnostics is default.
if (!Args.hasFlag(options::OPT_fcaret_diagnostics,
options::OPT_fno_caret_diagnostics, true))
@@ -1322,8 +1620,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_diagnostics_fixit_info))
CmdArgs.push_back("-fno-diagnostics-fixit-info");
- Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_binary);
-
// Enable -fdiagnostics-show-option by default.
if (Args.hasFlag(options::OPT_fdiagnostics_show_option,
options::OPT_fno_diagnostics_show_option))
@@ -1338,8 +1634,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Color diagnostics are the default, unless the terminal doesn't support
// them.
if (Args.hasFlag(options::OPT_fcolor_diagnostics,
- options::OPT_fno_color_diagnostics) &&
- llvm::sys::Process::StandardErrHasColors())
+ options::OPT_fno_color_diagnostics,
+ llvm::sys::Process::StandardErrHasColors()))
CmdArgs.push_back("-fcolor-diagnostics");
if (!Args.hasFlag(options::OPT_fshow_source_location,
@@ -1350,6 +1646,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_spell_checking))
CmdArgs.push_back("-fno-spell-checking");
+
+ // Silently ignore -fasm-blocks for now.
+ (void) Args.hasFlag(options::OPT_fasm_blocks, options::OPT_fno_asm_blocks,
+ false);
+
if (Arg *A = Args.getLastArg(options::OPT_fshow_overloads_EQ))
A->render(Args, CmdArgs);
@@ -1451,13 +1752,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
- // Explicitly warn that these options are unsupported, even though
- // we are allowing compilation to continue.
- for (arg_iterator it = Args.filtered_begin(options::OPT_pg),
- ie = Args.filtered_end(); it != ie; ++it) {
- (*it)->claim();
- D.Diag(clang::diag::warn_drv_clang_unsupported) << (*it)->getAsString(Args);
- }
+ if (Arg *A = Args.getLastArg(options::OPT_pg))
+ if (Args.hasArg(options::OPT_fomit_frame_pointer))
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << "-fomit-frame-pointer" << A->getAsString(Args);
// Claim some arguments which clang supports automatically.
@@ -1482,6 +1780,9 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
assert(Inputs.size() == 1 && "Unexpected number of inputs.");
const InputInfo &Input = Inputs[0];
+ // Don't warn about "clang -w -c foo.s"
+ Args.ClaimAllArgs(options::OPT_w);
+
// Invoke ourselves in -cc1as mode.
//
// FIXME: Implement custom jobs for internal actions.
@@ -1615,12 +1916,21 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
if (II.isFilename())
CmdArgs.push_back(II.getFilename());
- else
+ else {
+ const Arg &A = II.getInputArg();
+
+ // Reverse translate some rewritten options.
+ if (A.getOption().matches(options::OPT_Z_reserved_lib_stdcxx)) {
+ CmdArgs.push_back("-lstdc++");
+ continue;
+ }
+
// Don't render as input, we need gcc to do the translations.
- II.getInputArg().render(Args, CmdArgs);
+ A.render(Args, CmdArgs);
+ }
}
- const char *GCCName = getToolChain().getDriver().CCCGenericGCCName.c_str();
+ const char *GCCName = getToolChain().getDriver().getCCCGenericGCCName().c_str();
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
@@ -1648,7 +1958,7 @@ void gcc::Compile::RenderExtraToolArgs(const JobAction &JA,
if (JA.getType() != types::TY_PP_Asm)
D.Diag(clang::diag::err_drv_invalid_gcc_output_type)
<< getTypeName(JA.getType());
-
+
CmdArgs.push_back("-S");
}
}
@@ -1685,15 +1995,15 @@ const char *darwin::CC1::getCC1Name(types::ID Type) const {
const char *darwin::CC1::getBaseInputName(const ArgList &Args,
const InputInfoList &Inputs) {
- llvm::sys::Path P(Inputs[0].getBaseInput());
- return Args.MakeArgString(P.getLast());
+ return Args.MakeArgString(
+ llvm::sys::path::filename(Inputs[0].getBaseInput()));
}
const char *darwin::CC1::getBaseInputStem(const ArgList &Args,
const InputInfoList &Inputs) {
const char *Str = getBaseInputName(Args, Inputs);
- if (const char *End = strchr(Str, '.'))
+ if (const char *End = strrchr(Str, '.'))
return Args.MakeArgString(std::string(Str, End));
return Str;
@@ -2188,7 +2498,8 @@ void darwin::DarwinTool::AddDarwinArch(const ArgList &Args,
CmdArgs.push_back("-force_cpusubtype_ALL");
}
-void darwin::Link::AddLinkArgs(const ArgList &Args,
+void darwin::Link::AddLinkArgs(Compilation &C,
+ const ArgList &Args,
ArgStringList &CmdArgs) const {
const Driver &D = getToolChain().getDriver();
@@ -2204,8 +2515,30 @@ void darwin::Link::AddLinkArgs(const ArgList &Args,
// Newer linkers support -demangle, pass it if supported and not disabled by
// the user.
- if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) {
- CmdArgs.push_back("-demangle");
+ //
+ // FIXME: We temporarily avoid passing -demangle to any iOS linker, because
+ // unfortunately we can't be guaranteed that the linker version used there
+ // will match the linker version detected at configure time. We need the
+ // universal driver.
+ if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle) &&
+ !getDarwinToolChain().isTargetIPhoneOS()) {
+ // Don't pass -demangle to ld_classic.
+ //
+ // FIXME: This is a temporary workaround, ld should be handling this.
+ bool UsesLdClassic = (getToolChain().getArch() == llvm::Triple::x86 &&
+ Args.hasArg(options::OPT_static));
+ if (getToolChain().getArch() == llvm::Triple::x86) {
+ for (arg_iterator it = Args.filtered_begin(options::OPT_Xlinker,
+ options::OPT_Wl_COMMA),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i)
+ if (llvm::StringRef(A->getValue(Args, i)) == "-kext")
+ UsesLdClassic = true;
+ }
+ }
+ if (!UsesLdClassic)
+ CmdArgs.push_back("-demangle");
}
// Derived from the "link" spec.
@@ -2362,7 +2695,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
// I'm not sure why this particular decomposition exists in gcc, but
// we follow suite for ease of comparison.
- AddLinkArgs(Args, CmdArgs);
+ AddLinkArgs(C, Args, CmdArgs);
Args.AddAllArgs(CmdArgs, options::OPT_d_Flag);
Args.AddAllArgs(CmdArgs, options::OPT_s);
@@ -2374,6 +2707,12 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_m_Separate);
Args.AddAllArgs(CmdArgs, options::OPT_r);
+ // Forward -ObjC when either -ObjC or -ObjC++ is used, to force loading
+ // members of static archive libraries which implement Objective-C classes or
+ // categories.
+ if (Args.hasArg(options::OPT_ObjC) || Args.hasArg(options::OPT_ObjCXX))
+ CmdArgs.push_back("-ObjC");
+
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -2459,14 +2798,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
getDarwinToolChain().AddLinkSearchPathArgs(Args, CmdArgs);
- for (InputInfoList::const_iterator
- it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
- const InputInfo &II = *it;
- if (II.isFilename())
- CmdArgs.push_back(II.getFilename());
- else
- II.getInputArg().renderAsInput(Args, CmdArgs);
- }
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
if (LinkingOutput) {
CmdArgs.push_back("-arch_multiple");
@@ -2485,10 +2817,8 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
- // FIXME: g++ is more complicated here, it tries to put -lstdc++
- // before -lm, for example.
if (getToolChain().getDriver().CCCIsCXX)
- CmdArgs.push_back("-lstdc++");
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
// link_ssp spec is empty.
@@ -2583,7 +2913,6 @@ void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
if ((!Args.hasArg(options::OPT_nostdlib)) &&
@@ -2638,21 +2967,7 @@ void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
Args.AddAllArgs(CmdArgs, options::OPT_e);
- for (InputInfoList::const_iterator
- it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
- const InputInfo &II = *it;
-
- // Don't try to pass LLVM inputs to a generic gcc.
- if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
- D.Diag(clang::diag::err_drv_no_linker_llvm_support)
- << getToolChain().getTripleString();
-
- if (II.isFilename())
- CmdArgs.push_back(II.getFilename());
- else
- II.getInputArg().renderAsInput(Args, CmdArgs);
- }
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
@@ -2720,6 +3035,8 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-Bstatic");
} else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
CmdArgs.push_back("--eh-frame-hdr");
CmdArgs.push_back("-Bdynamic");
if (Args.hasArg(options::OPT_shared)) {
@@ -2760,26 +3077,12 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
Args.AddAllArgs(CmdArgs, options::OPT_e);
- for (InputInfoList::const_iterator
- it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
- const InputInfo &II = *it;
-
- // Don't try to pass LLVM inputs to a generic gcc.
- if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
- D.Diag(clang::diag::err_drv_no_linker_llvm_support)
- << getToolChain().getTripleString();
-
- if (II.isFilename())
- CmdArgs.push_back(II.getFilename());
- else
- II.getInputArg().renderAsInput(Args, CmdArgs);
- }
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX) {
- CmdArgs.push_back("-lstdc++");
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lm");
}
@@ -2821,7 +3124,7 @@ void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
if (getToolChain().getArchName() == "i386")
CmdArgs.push_back("--32");
-
+
// Set byte order explicitly
if (getToolChain().getArchName() == "mips")
CmdArgs.push_back("-EB");
@@ -2856,6 +3159,8 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-Bstatic");
} else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
CmdArgs.push_back("--eh-frame-hdr");
if (Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back("-Bshareable");
@@ -2882,8 +3187,12 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crt1.o")));
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("gcrt1.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt1.o")));
CmdArgs.push_back(Args.MakeArgString(
getToolChain().GetFilePath("crti.o")));
CmdArgs.push_back(Args.MakeArgString(
@@ -2897,6 +3206,7 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
+ CmdArgs.push_back("-L/usr/lib");
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
Args.AddAllArgs(CmdArgs, options::OPT_e);
Args.AddAllArgs(CmdArgs, options::OPT_s);
@@ -2904,26 +3214,182 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
Args.AddAllArgs(CmdArgs, options::OPT_r);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+ // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
+ // the default system libraries. Just mimic this for now.
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lgcc_p");
+ else
+ CmdArgs.push_back("-lgcc");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lpthread_p");
+ else
+ CmdArgs.push_back("-lpthread");
+ }
+
+ if (Args.hasArg(options::OPT_pg)) {
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-lc");
+ else
+ CmdArgs.push_back("-lc_p");
+ CmdArgs.push_back("-lgcc_p");
+ } else {
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lgcc");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtend.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtn.o")));
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void netbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ // When building 32-bit code on NetBSD/amd64, we have to explicitly
+ // instruct as in the base system to assemble 32-bit code.
+ if (getToolChain().getArchName() == "i386")
+ CmdArgs.push_back("--32");
+
+
+ // Set byte order explicitly
+ if (getToolChain().getArchName() == "mips")
+ CmdArgs.push_back("-EB");
+ else if (getToolChain().getArchName() == "mipsel")
+ CmdArgs.push_back("-EL");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
for (InputInfoList::const_iterator
it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
- // Don't try to pass LLVM inputs to a generic gcc.
- if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
- D.Diag(clang::diag::err_drv_no_linker_llvm_support)
- << getToolChain().getTripleString();
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
- if (II.isFilename())
- CmdArgs.push_back(II.getFilename());
- else
- II.getInputArg().renderAsInput(Args, CmdArgs);
+void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-Bshareable");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/libexec/ld.elf_so");
+ }
+ }
+
+ // When building 32-bit code on NetBSD/amd64, we have to explicitly
+ // instruct ld in the base system to link 32-bit code.
+ if (getToolChain().getArchName() == "i386") {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386");
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
}
if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt0.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbeginS.o")));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX) {
- CmdArgs.push_back("-lstdc++");
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lm");
}
// FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
@@ -3004,6 +3470,185 @@ void linuxtools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
+void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const toolchains::Linux& ToolChain =
+ static_cast<const toolchains::Linux&>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and for "clang -g foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (Arg *A = Args.getLastArg(options::OPT__sysroot_EQ)) {
+ CmdArgs.push_back("--sysroot");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("-s");
+
+ for (std::vector<std::string>::const_iterator i = ToolChain.ExtraOpts.begin(),
+ e = ToolChain.ExtraOpts.end();
+ i != e; ++i)
+ CmdArgs.push_back(i->c_str());
+
+ if (!Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("--eh-frame-hdr");
+ }
+
+ CmdArgs.push_back("-m");
+ if (ToolChain.getArch() == llvm::Triple::x86)
+ CmdArgs.push_back("elf_i386");
+ else if (ToolChain.getArch() == llvm::Triple::arm)
+ CmdArgs.push_back("armelf_linux_eabi");
+ else
+ CmdArgs.push_back("elf_x86_64");
+
+ if (Args.hasArg(options::OPT_static)) {
+ if (ToolChain.getArch() == llvm::Triple::arm)
+ CmdArgs.push_back("-Bstatic");
+ else
+ CmdArgs.push_back("-static");
+ } else if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ }
+
+ if (ToolChain.getArch() == llvm::Triple::arm ||
+ (!Args.hasArg(options::OPT_static) &&
+ !Args.hasArg(options::OPT_shared))) {
+ CmdArgs.push_back("-dynamic-linker");
+ if (ToolChain.getArch() == llvm::Triple::x86)
+ CmdArgs.push_back("/lib/ld-linux.so.2");
+ else if (ToolChain.getArch() == llvm::Triple::arm)
+ CmdArgs.push_back("/lib/ld-linux.so.3");
+ else
+ CmdArgs.push_back("/lib64/ld-linux-x86-64.so.2");
+ }
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ const char *crt1 = NULL;
+ if (!Args.hasArg(options::OPT_shared)){
+ if (Args.hasArg(options::OPT_pie))
+ crt1 = "Scrt1.o";
+ else
+ crt1 = "crt1.o";
+ }
+ if (crt1)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+
+ const char *crtbegin;
+ if (Args.hasArg(options::OPT_static))
+ crtbegin = "crtbeginT.o";
+ else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ crtbegin = "crtbeginS.o";
+ else
+ crtbegin = "crtbegin.o";
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+
+ const ToolChain::path_list Paths = ToolChain.getFilePaths();
+
+ for (ToolChain::path_list::const_iterator i = Paths.begin(),
+ e = Paths.end();
+ i != e; ++i) {
+ const std::string &s = *i;
+ CmdArgs.push_back(Args.MakeArgString(std::string("-L") + s));
+ }
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+
+ if (D.CCCIsCXX && !Args.hasArg(options::OPT_nostdlib)) {
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--start-group");
+
+ if (!Args.hasArg(options::OPT_nostdlib)) {
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+
+ if (Args.hasArg(options::OPT_static)) {
+ if (D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+ } else {
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-lgcc_eh");
+ else if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+
+ if (Args.hasArg(options::OPT_pthread) ||
+ Args.hasArg(options::OPT_pthreads))
+ CmdArgs.push_back("-lpthread");
+
+ CmdArgs.push_back("-lc");
+
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--end-group");
+ else {
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("--no-as-needed");
+
+ if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+ }
+
+
+ if (!Args.hasArg(options::OPT_nostartfiles)) {
+ const char *crtend;
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ crtend = "crtendS.o";
+ else
+ crtend = "crtend.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+ }
+
+ if (Args.hasArg(options::OPT_use_gold_plugin)) {
+ CmdArgs.push_back("-plugin");
+ std::string Plugin = ToolChain.getDriver().Dir + "/../lib/LLVMgold.so";
+ CmdArgs.push_back(Args.MakeArgString(Plugin));
+ }
+
+ C.addCommand(new Command(JA, *this, ToolChain.Linker.c_str(), CmdArgs));
+}
void minix::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -3053,26 +3698,12 @@ void minix::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
Args.AddAllArgs(CmdArgs, options::OPT_e);
- for (InputInfoList::const_iterator
- it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
- const InputInfo &II = *it;
-
- // Don't try to pass LLVM inputs to a generic gcc.
- if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
- D.Diag(clang::diag::err_drv_no_linker_llvm_support)
- << getToolChain().getTripleString();
-
- if (II.isFilename())
- CmdArgs.push_back(II.getFilename());
- else
- II.getInputArg().renderAsInput(Args, CmdArgs);
- }
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX) {
- CmdArgs.push_back("-lstdc++");
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lm");
}
@@ -3184,21 +3815,7 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
Args.AddAllArgs(CmdArgs, options::OPT_e);
- for (InputInfoList::const_iterator
- it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
- const InputInfo &II = *it;
-
- // Don't try to pass LLVM inputs to a generic gcc.
- if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
- D.Diag(clang::diag::err_drv_no_linker_llvm_support)
- << getToolChain().getTripleString();
-
- if (II.isFilename())
- CmdArgs.push_back(II.getFilename());
- else
- II.getInputArg().renderAsInput(Args, CmdArgs);
- }
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
@@ -3221,7 +3838,7 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
if (D.CCCIsCXX) {
- CmdArgs.push_back("-lstdc++");
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lm");
}
@@ -3268,11 +3885,11 @@ void visualstudio::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
if (Output.isFilename()) {
- CmdArgs.push_back(Args.MakeArgString(std::string("-out:") + Output.getFilename()));
+ CmdArgs.push_back(Args.MakeArgString(std::string("-out:") +
+ Output.getFilename()));
} else {
assert(Output.isNothing() && "Invalid output.");
}
@@ -3284,22 +3901,9 @@ void visualstudio::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-nologo");
- for (InputInfoList::const_iterator
- it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
- const InputInfo &II = *it;
-
- // Don't try to pass LLVM inputs to visual studio linker.
- if (II.getType() == types::TY_LLVM_BC)
- D.Diag(clang::diag::err_drv_no_linker_llvm_support)
- << getToolChain().getTripleString();
-
- if (II.isFilename())
- CmdArgs.push_back(II.getFilename());
- else
- II.getInputArg().renderAsInput(Args, CmdArgs);
- }
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("link.exe"));
+ Args.MakeArgString(getToolChain().GetProgramPath("link.exe"));
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.h b/contrib/llvm/tools/clang/lib/Driver/Tools.h
index b5defa4..10c8839 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.h
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.h
@@ -36,6 +36,7 @@ namespace tools {
void AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
void AddMIPSTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddSparcTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
void AddX86TargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
public:
@@ -232,7 +233,8 @@ namespace darwin {
};
class LLVM_LIBRARY_VISIBILITY Link : public DarwinTool {
- void AddLinkArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddLinkArgs(Compilation &C, const ArgList &Args,
+ ArgStringList &CmdArgs) const;
public:
Link(const ToolChain &TC) : DarwinTool("darwin::Link", "linker", TC) {}
@@ -332,6 +334,35 @@ namespace freebsd {
};
} // end namespace freebsd
+ /// netbsd -- Directly call GNU Binutils assembler and linker
+namespace netbsd {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("netbsd::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("netbsd::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace netbsd
+
/// linux -- Directly call GNU Binutils assembler and linker
namespace linuxtools {
class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
@@ -347,6 +378,18 @@ namespace linuxtools {
const ArgList &TCArgs,
const char *LinkingOutput) const;
};
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("linux::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
}
/// minix -- Directly call GNU Binutils assembler and linker
namespace minix {
diff --git a/contrib/llvm/tools/clang/lib/Driver/Types.cpp b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
index 3c07cf2..4a4312b 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
@@ -1,4 +1,4 @@
-//===--- Types.cpp - Driver input & temporary type information ----------*-===//
+//===--- Types.cpp - Driver input & temporary type information ------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -78,6 +78,7 @@ bool types::isAcceptedByClang(ID Id) {
case TY_Asm:
case TY_C: case TY_PP_C:
case TY_CL:
+ case TY_CUDA:
case TY_ObjC: case TY_PP_ObjC:
case TY_CXX: case TY_PP_CXX:
case TY_ObjCXX: case TY_PP_ObjCXX:
@@ -151,6 +152,7 @@ types::ID types::lookupTypeForExtension(const char *Ext) {
.Case("CC", TY_CXX)
.Case("cl", TY_CL)
.Case("cp", TY_CXX)
+ .Case("cu", TY_CUDA)
.Case("hh", TY_CXXHeader)
.Case("ll", TY_LLVM_IR)
.Case("hpp", TY_CXXHeader)
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
index eb7f270..92fb1e8 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
@@ -24,7 +24,7 @@
#include "llvm/Module.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -354,8 +354,18 @@ void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
PrintDeclContext(DC, Indentation+2);
break;
}
+ case Decl::IndirectField: {
+ IndirectFieldDecl* IFD = cast<IndirectFieldDecl>(*I);
+ Out << "<IndirectField> " << IFD << '\n';
+ break;
+ }
+ case Decl::Label: {
+ LabelDecl *LD = cast<LabelDecl>(*I);
+ Out << "<Label> " << LD << '\n';
+ break;
+ }
case Decl::Field: {
- FieldDecl* FD = cast<FieldDecl>(*I);
+ FieldDecl *FD = cast<FieldDecl>(*I);
Out << "<field> " << FD << '\n';
break;
}
@@ -423,29 +433,21 @@ ASTConsumer *clang::CreateDeclContextPrinter() {
}
//===----------------------------------------------------------------------===//
-/// InheritanceViewer - C++ Inheritance Visualization
+/// ASTDumperXML - In-depth XML dumping.
namespace {
-class InheritanceViewer : public ASTConsumer {
- const std::string clsname;
+class ASTDumpXML : public ASTConsumer {
+ llvm::raw_ostream &OS;
+
public:
- InheritanceViewer(const std::string& cname) : clsname(cname) {}
+ ASTDumpXML(llvm::raw_ostream &OS) : OS(OS) {}
void HandleTranslationUnit(ASTContext &C) {
- for (ASTContext::type_iterator I=C.types_begin(),E=C.types_end(); I!=E; ++I)
- if (RecordType *T = dyn_cast<RecordType>(*I)) {
- if (CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(T->getDecl())) {
- // FIXME: This lookup needs to be generalized to handle namespaces and
- // (when we support them) templates.
- if (D->getNameAsString() == clsname) {
- D->viewInheritance(C);
- }
- }
- }
- }
+ C.getTranslationUnitDecl()->dumpXML(OS);
+ }
};
}
-ASTConsumer *clang::CreateInheritanceViewer(const std::string& clsname) {
- return new InheritanceViewer(clsname);
+ASTConsumer *clang::CreateASTDumperXML(llvm::raw_ostream &OS) {
+ return new ASTDumpXML(OS);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
index b46212f..3905b99 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
@@ -38,23 +38,22 @@ void ASTMergeAction::ExecuteAction() {
CI.getASTContext().getLangOptions());
CI.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument,
&CI.getASTContext());
- llvm::IntrusiveRefCntPtr<Diagnostic> Diags(&CI.getDiagnostics());
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs>
+ DiagIDs(CI.getDiagnostics().getDiagnosticIDs());
for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) {
- ASTUnit *Unit = ASTUnit::LoadFromASTFile(ASTFiles[I], Diags, false);
+ llvm::IntrusiveRefCntPtr<Diagnostic>
+ Diags(new Diagnostic(DiagIDs, CI.getDiagnostics().getClient(),
+ /*ShouldOwnClient=*/false));
+ ASTUnit *Unit = ASTUnit::LoadFromASTFile(ASTFiles[I], Diags,
+ CI.getFileSystemOpts(), false);
if (!Unit)
continue;
- // Reset the argument -> string function so that it has the AST
- // context we want, since the Sema object created by
- // LoadFromASTFile will override it.
- CI.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument,
- &CI.getASTContext());
-
- ASTImporter Importer(CI.getDiagnostics(),
- CI.getASTContext(),
+ ASTImporter Importer(CI.getASTContext(),
CI.getFileManager(),
Unit->getASTContext(),
- Unit->getFileManager());
+ Unit->getFileManager(),
+ /*MinimalImport=*/false);
TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
for (DeclContext::decl_iterator D = TU->decls_begin(),
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
index c76488b..4a5a51d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
@@ -25,17 +25,21 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/Utils.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTSerializationListener.h"
#include "clang/Serialization/ASTWriter.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Atomic.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/System/Host.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Timer.h"
#include <cstdlib>
@@ -43,20 +47,63 @@
#include <sys/stat.h>
using namespace clang;
+using llvm::TimeRecord;
+
+namespace {
+ class SimpleTimer {
+ bool WantTiming;
+ TimeRecord Start;
+ std::string Output;
+
+ public:
+ explicit SimpleTimer(bool WantTiming) : WantTiming(WantTiming) {
+ if (WantTiming)
+ Start = TimeRecord::getCurrentTime();
+ }
+
+ void setOutput(const llvm::Twine &Output) {
+ if (WantTiming)
+ this->Output = Output.str();
+ }
+
+ ~SimpleTimer() {
+ if (WantTiming) {
+ TimeRecord Elapsed = TimeRecord::getCurrentTime();
+ Elapsed -= Start;
+ llvm::errs() << Output << ':';
+ Elapsed.print(Elapsed, llvm::errs());
+ llvm::errs() << '\n';
+ }
+ }
+ };
+}
+
/// \brief After failing to build a precompiled preamble (due to
/// errors in the source that occurs in the preamble), the number of
/// reparses during which we'll skip even trying to precompile the
/// preamble.
const unsigned DefaultPreambleRebuildInterval = 5;
+/// \brief Tracks the number of ASTUnit objects that are currently active.
+///
+/// Used for debugging purposes only.
+static llvm::sys::cas_flag ActiveASTUnitObjects;
+
ASTUnit::ASTUnit(bool _MainFileIsAST)
: CaptureDiagnostics(false), MainFileIsAST(_MainFileIsAST),
- CompleteTranslationUnit(true), ConcurrencyCheckValue(CheckUnlocked),
+ CompleteTranslationUnit(true), WantTiming(getenv("LIBCLANG_TIMING")),
+ NumStoredDiagnosticsFromDriver(0),
+ ConcurrencyCheckValue(CheckUnlocked),
PreambleRebuildCounter(0), SavedMainFileBuffer(0), PreambleBuffer(0),
ShouldCacheCodeCompletionResults(false),
- NumTopLevelDeclsAtLastCompletionCache(0),
- CacheCodeCompletionCoolDown(0),
+ CompletionCacheTopLevelHashValue(0),
+ PreambleTopLevelHashValue(0),
+ CurrentTopLevelHashValue(0),
UnsafeToFree(false) {
+ if (getenv("LIBCLANG_OBJTRACKING")) {
+ llvm::sys::AtomicIncrement(&ActiveASTUnitObjects);
+ fprintf(stderr, "+++ %d translation units\n", ActiveASTUnitObjects);
+ }
}
ASTUnit::~ASTUnit() {
@@ -82,10 +129,12 @@ ASTUnit::~ASTUnit() {
delete SavedMainFileBuffer;
delete PreambleBuffer;
- ClearCachedCompletionResults();
+ ClearCachedCompletionResults();
- for (unsigned I = 0, N = Timers.size(); I != N; ++I)
- delete Timers[I];
+ if (getenv("LIBCLANG_OBJTRACKING")) {
+ llvm::sys::AtomicDecrement(&ActiveASTUnitObjects);
+ fprintf(stderr, "--- %d translation units\n", ActiveASTUnitObjects);
+ }
}
void ASTUnit::CleanTemporaryFiles() {
@@ -115,7 +164,8 @@ static unsigned getDeclShowContexts(NamedDecl *ND,
| (1 << (CodeCompletionContext::CCC_ObjCIvarList - 1))
| (1 << (CodeCompletionContext::CCC_ClassStructUnion - 1))
| (1 << (CodeCompletionContext::CCC_Statement - 1))
- | (1 << (CodeCompletionContext::CCC_Type - 1));
+ | (1 << (CodeCompletionContext::CCC_Type - 1))
+ | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1));
// In C++, types can appear in expressions contexts (for functional casts).
if (LangOpts.CPlusPlus)
@@ -141,12 +191,13 @@ static unsigned getDeclShowContexts(NamedDecl *ND,
if (LangOpts.CPlusPlus)
IsNestedNameSpecifier = true;
- } else if (isa<ClassTemplateDecl>(ND) || isa<TemplateTemplateParmDecl>(ND))
+ } else if (isa<ClassTemplateDecl>(ND))
IsNestedNameSpecifier = true;
} else if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)) {
// Values can appear in these contexts.
Contexts = (1 << (CodeCompletionContext::CCC_Statement - 1))
| (1 << (CodeCompletionContext::CCC_Expression - 1))
+ | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
| (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1));
} else if (isa<ObjCProtocolDecl>(ND)) {
Contexts = (1 << (CodeCompletionContext::CCC_ObjCProtocolName - 1));
@@ -164,13 +215,8 @@ void ASTUnit::CacheCodeCompletionResults() {
if (!TheSema)
return;
- llvm::Timer *CachingTimer = 0;
- if (TimerGroup.get()) {
- CachingTimer = new llvm::Timer("Cache global code completions",
- *TimerGroup);
- CachingTimer->startTimer();
- Timers.push_back(CachingTimer);
- }
+ SimpleTimer Timer(WantTiming);
+ Timer.setOutput("Cache global code completions for " + getMainFileName());
// Clear out the previous results.
ClearCachedCompletionResults();
@@ -178,7 +224,8 @@ void ASTUnit::CacheCodeCompletionResults() {
// Gather the set of global code completions.
typedef CodeCompletionResult Result;
llvm::SmallVector<Result, 8> Results;
- TheSema->GatherGlobalCodeCompletions(Results);
+ CachedCompletionAllocator = new GlobalCodeCompletionAllocator;
+ TheSema->GatherGlobalCodeCompletions(*CachedCompletionAllocator, Results);
// Translate global code completions into cached completions.
llvm::DenseMap<CanQualType, unsigned> CompletionTypes;
@@ -188,7 +235,8 @@ void ASTUnit::CacheCodeCompletionResults() {
case Result::RK_Declaration: {
bool IsNestedNameSpecifier = false;
CachedCodeCompletionResult CachedResult;
- CachedResult.Completion = Results[I].CreateCodeCompletionString(*TheSema);
+ CachedResult.Completion = Results[I].CreateCodeCompletionString(*TheSema,
+ *CachedCompletionAllocator);
CachedResult.ShowInContexts = getDeclShowContexts(Results[I].Declaration,
Ctx->getLangOptions(),
IsNestedNameSpecifier);
@@ -237,7 +285,8 @@ void ASTUnit::CacheCodeCompletionResults() {
| (1 << (CodeCompletionContext::CCC_UnionTag - 1))
| (1 << (CodeCompletionContext::CCC_ClassOrStructTag - 1))
| (1 << (CodeCompletionContext::CCC_Type - 1))
- | (1 << (CodeCompletionContext::CCC_PotentiallyQualifiedName - 1));
+ | (1 << (CodeCompletionContext::CCC_PotentiallyQualifiedName - 1))
+ | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1));
if (isa<NamespaceDecl>(Results[I].Declaration) ||
isa<NamespaceAliasDecl>(Results[I].Declaration))
@@ -249,7 +298,9 @@ void ASTUnit::CacheCodeCompletionResults() {
// nested-name-specifier but isn't already an option, create a
// nested-name-specifier completion.
Results[I].StartsNestedNameSpecifier = true;
- CachedResult.Completion = Results[I].CreateCodeCompletionString(*TheSema);
+ CachedResult.Completion
+ = Results[I].CreateCodeCompletionString(*TheSema,
+ *CachedCompletionAllocator);
CachedResult.ShowInContexts = RemainingContexts;
CachedResult.Priority = CCP_NestedNameSpecifier;
CachedResult.TypeClass = STC_Void;
@@ -268,7 +319,9 @@ void ASTUnit::CacheCodeCompletionResults() {
case Result::RK_Macro: {
CachedCodeCompletionResult CachedResult;
- CachedResult.Completion = Results[I].CreateCodeCompletionString(*TheSema);
+ CachedResult.Completion
+ = Results[I].CreateCodeCompletionString(*TheSema,
+ *CachedCompletionAllocator);
CachedResult.ShowInContexts
= (1 << (CodeCompletionContext::CCC_TopLevel - 1))
| (1 << (CodeCompletionContext::CCC_ObjCInterface - 1))
@@ -279,7 +332,9 @@ void ASTUnit::CacheCodeCompletionResults() {
| (1 << (CodeCompletionContext::CCC_Expression - 1))
| (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
| (1 << (CodeCompletionContext::CCC_MacroNameUse - 1))
- | (1 << (CodeCompletionContext::CCC_PreprocessorExpression - 1));
+ | (1 << (CodeCompletionContext::CCC_PreprocessorExpression - 1))
+ | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
+ | (1 << (CodeCompletionContext::CCC_OtherWithMacros - 1));
CachedResult.Priority = Results[I].Priority;
CachedResult.Kind = Results[I].CursorKind;
@@ -290,22 +345,16 @@ void ASTUnit::CacheCodeCompletionResults() {
break;
}
}
- Results[I].Destroy();
}
-
- if (CachingTimer)
- CachingTimer->stopTimer();
- // Make a note of the state when we performed this caching.
- NumTopLevelDeclsAtLastCompletionCache = top_level_size();
- CacheCodeCompletionCoolDown = 15;
+ // Save the current top-level hash value.
+ CompletionCacheTopLevelHashValue = CurrentTopLevelHashValue;
}
void ASTUnit::ClearCachedCompletionResults() {
- for (unsigned I = 0, N = CachedCompletionResults.size(); I != N; ++I)
- delete CachedCompletionResults[I].Completion;
CachedCompletionResults.clear();
CachedCompletionTypes.clear();
+ CachedCompletionAllocator = 0;
}
namespace {
@@ -378,7 +427,7 @@ class CaptureDroppedDiagnostics {
public:
CaptureDroppedDiagnostics(bool RequestCapture, Diagnostic &Diags,
- llvm::SmallVectorImpl<StoredDiagnostic> &StoredDiags)
+ llvm::SmallVectorImpl<StoredDiagnostic> &StoredDiags)
: Diags(Diags), Client(StoredDiags), PreviousClient(0)
{
if (RequestCapture || Diags.getClient() == 0) {
@@ -399,6 +448,9 @@ public:
void StoredDiagnosticClient::HandleDiagnostic(Diagnostic::Level Level,
const DiagnosticInfo &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticClient::HandleDiagnostic(Level, Info);
+
StoredDiags.push_back(StoredDiagnostic(Level, Info));
}
@@ -411,32 +463,48 @@ const std::string &ASTUnit::getASTFileName() {
return static_cast<ASTReader *>(Ctx->getExternalSource())->getFileName();
}
+llvm::MemoryBuffer *ASTUnit::getBufferForFile(llvm::StringRef Filename,
+ std::string *ErrorStr) {
+ assert(FileMgr);
+ return FileMgr->getBufferForFile(Filename, ErrorStr);
+}
+
+/// \brief Configure the diagnostics object for use with ASTUnit.
+void ASTUnit::ConfigureDiags(llvm::IntrusiveRefCntPtr<Diagnostic> &Diags,
+ const char **ArgBegin, const char **ArgEnd,
+ ASTUnit &AST, bool CaptureDiagnostics) {
+ if (!Diags.getPtr()) {
+ // No diagnostics engine was provided, so create our own diagnostics object
+ // with the default options.
+ DiagnosticOptions DiagOpts;
+ DiagnosticClient *Client = 0;
+ if (CaptureDiagnostics)
+ Client = new StoredDiagnosticClient(AST.StoredDiagnostics);
+ Diags = CompilerInstance::createDiagnostics(DiagOpts, ArgEnd- ArgBegin,
+ ArgBegin, Client);
+ } else if (CaptureDiagnostics) {
+ Diags->setClient(new StoredDiagnosticClient(AST.StoredDiagnostics));
+ }
+}
+
ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
llvm::IntrusiveRefCntPtr<Diagnostic> Diags,
+ const FileSystemOptions &FileSystemOpts,
bool OnlyLocalDecls,
RemappedFile *RemappedFiles,
unsigned NumRemappedFiles,
bool CaptureDiagnostics) {
llvm::OwningPtr<ASTUnit> AST(new ASTUnit(true));
-
- if (!Diags.getPtr()) {
- // No diagnostics engine was provided, so create our own diagnostics object
- // with the default options.
- DiagnosticOptions DiagOpts;
- Diags = CompilerInstance::createDiagnostics(DiagOpts, 0, 0);
- }
+ ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
- AST->CaptureDiagnostics = CaptureDiagnostics;
AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
AST->Diagnostics = Diags;
- AST->FileMgr.reset(new FileManager);
- AST->SourceMgr.reset(new SourceManager(AST->getDiagnostics()));
+ AST->FileMgr.reset(new FileManager(FileSystemOpts));
+ AST->SourceMgr.reset(new SourceManager(AST->getDiagnostics(),
+ AST->getFileManager()));
AST->HeaderInfo.reset(new HeaderSearch(AST->getFileManager()));
- // If requested, capture diagnostics in the ASTUnit.
- CaptureDroppedDiagnostics Capture(CaptureDiagnostics, AST->getDiagnostics(),
- AST->StoredDiagnostics);
-
for (unsigned I = 0; I != NumRemappedFiles; ++I) {
// Create the file entry for the file that we're mapping from.
const FileEntry *FromFile
@@ -471,7 +539,7 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
Reader->setListener(new ASTInfoCollector(LangInfo, HeaderInfo, TargetTriple,
Predefines, Counter));
- switch (Reader->ReadAST(Filename)) {
+ switch (Reader->ReadAST(Filename, ASTReader::MainFile)) {
case ASTReader::Success:
break;
@@ -538,12 +606,69 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
namespace {
+/// \brief Preprocessor callback class that updates a hash value with the names
+/// of all macros that have been defined by the translation unit.
+class MacroDefinitionTrackerPPCallbacks : public PPCallbacks {
+ unsigned &Hash;
+
+public:
+ explicit MacroDefinitionTrackerPPCallbacks(unsigned &Hash) : Hash(Hash) { }
+
+ virtual void MacroDefined(const Token &MacroNameTok, const MacroInfo *MI) {
+ Hash = llvm::HashString(MacroNameTok.getIdentifierInfo()->getName(), Hash);
+ }
+};
+
+/// \brief Add the given declaration to the hash of all top-level entities.
+void AddTopLevelDeclarationToHash(Decl *D, unsigned &Hash) {
+ if (!D)
+ return;
+
+ DeclContext *DC = D->getDeclContext();
+ if (!DC)
+ return;
+
+ if (!(DC->isTranslationUnit() || DC->getLookupParent()->isTranslationUnit()))
+ return;
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ if (ND->getIdentifier())
+ Hash = llvm::HashString(ND->getIdentifier()->getName(), Hash);
+ else if (DeclarationName Name = ND->getDeclName()) {
+ std::string NameStr = Name.getAsString();
+ Hash = llvm::HashString(NameStr, Hash);
+ }
+ return;
+ }
+
+ if (ObjCForwardProtocolDecl *Forward
+ = dyn_cast<ObjCForwardProtocolDecl>(D)) {
+ for (ObjCForwardProtocolDecl::protocol_iterator
+ P = Forward->protocol_begin(),
+ PEnd = Forward->protocol_end();
+ P != PEnd; ++P)
+ AddTopLevelDeclarationToHash(*P, Hash);
+ return;
+ }
+
+ if (ObjCClassDecl *Class = llvm::dyn_cast<ObjCClassDecl>(D)) {
+ for (ObjCClassDecl::iterator I = Class->begin(), IEnd = Class->end();
+ I != IEnd; ++I)
+ AddTopLevelDeclarationToHash(I->getInterface(), Hash);
+ return;
+ }
+}
+
class TopLevelDeclTrackerConsumer : public ASTConsumer {
ASTUnit &Unit;
-
+ unsigned &Hash;
+
public:
- TopLevelDeclTrackerConsumer(ASTUnit &_Unit) : Unit(_Unit) {}
-
+ TopLevelDeclTrackerConsumer(ASTUnit &_Unit, unsigned &Hash)
+ : Unit(_Unit), Hash(Hash) {
+ Hash = 0;
+ }
+
void HandleTopLevelDecl(DeclGroupRef D) {
for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it) {
Decl *D = *it;
@@ -553,6 +678,8 @@ public:
// fundamental problem in the parser right now.
if (isa<ObjCMethodDecl>(D))
continue;
+
+ AddTopLevelDeclarationToHash(D, Hash);
Unit.addTopLevelDecl(D);
}
}
@@ -567,7 +694,10 @@ public:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile) {
- return new TopLevelDeclTrackerConsumer(Unit);
+ CI.getPreprocessor().addPPCallbacks(
+ new MacroDefinitionTrackerPPCallbacks(Unit.getCurrentTopLevelHashValue()));
+ return new TopLevelDeclTrackerConsumer(Unit,
+ Unit.getCurrentTopLevelHashValue());
}
public:
@@ -579,15 +709,20 @@ public:
}
};
-class PrecompilePreambleConsumer : public PCHGenerator {
+class PrecompilePreambleConsumer : public PCHGenerator,
+ public ASTSerializationListener {
ASTUnit &Unit;
+ unsigned &Hash;
std::vector<Decl *> TopLevelDecls;
-
+
public:
PrecompilePreambleConsumer(ASTUnit &Unit,
const Preprocessor &PP, bool Chaining,
const char *isysroot, llvm::raw_ostream *Out)
- : PCHGenerator(PP, Chaining, isysroot, Out), Unit(Unit) { }
+ : PCHGenerator(PP, "", Chaining, isysroot, Out), Unit(Unit),
+ Hash(Unit.getCurrentTopLevelHashValue()) {
+ Hash = 0;
+ }
virtual void HandleTopLevelDecl(DeclGroupRef D) {
for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it) {
@@ -598,6 +733,7 @@ public:
// fundamental problem in the parser right now.
if (isa<ObjCMethodDecl>(D))
continue;
+ AddTopLevelDeclarationToHash(D, Hash);
TopLevelDecls.push_back(D);
}
}
@@ -614,6 +750,15 @@ public:
getWriter().getDeclID(TopLevelDecls[I]));
}
}
+
+ virtual void SerializedPreprocessedEntity(PreprocessedEntity *Entity,
+ uint64_t Offset) {
+ Unit.addPreprocessedEntityFromPreamble(Offset);
+ }
+
+ virtual ASTSerializationListener *GetASTSerializationListener() {
+ return this;
+ }
};
class PrecompilePreambleAction : public ASTFrontendAction {
@@ -625,14 +770,18 @@ public:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile) {
std::string Sysroot;
+ std::string OutputFile;
llvm::raw_ostream *OS = 0;
bool Chaining;
- if (GeneratePCHAction::ComputeASTConsumerArguments(CI, InFile, Sysroot,
+ if (GeneratePCHAction::ComputeASTConsumerArguments(CI, InFile, Sysroot,
+ OutputFile,
OS, Chaining))
return 0;
const char *isysroot = CI.getFrontendOpts().RelocatablePCH ?
Sysroot.c_str() : 0;
+ CI.getPreprocessor().addPPCallbacks(
+ new MacroDefinitionTrackerPPCallbacks(Unit.getCurrentTopLevelHashValue()));
return new PrecompilePreambleConsumer(Unit, CI.getPreprocessor(), Chaining,
isysroot, OS);
}
@@ -666,11 +815,9 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
Clang.setDiagnostics(&getDiagnostics());
- CaptureDroppedDiagnostics Capture(CaptureDiagnostics,
- getDiagnostics(),
- StoredDiagnostics);
// Create the target instance.
+ Clang.getTargetOpts().Features = TargetFeatures;
Clang.setTarget(TargetInfo::CreateTargetInfo(Clang.getDiagnostics(),
Clang.getTargetOpts()));
if (!Clang.hasTarget()) {
@@ -693,20 +840,25 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
// Configure the various subsystems.
// FIXME: Should we retain the previous file manager?
- FileMgr.reset(new FileManager);
- SourceMgr.reset(new SourceManager(getDiagnostics()));
+ FileSystemOpts = Clang.getFileSystemOpts();
+ FileMgr.reset(new FileManager(Clang.getFileSystemOpts()));
+ SourceMgr.reset(new SourceManager(getDiagnostics(), *FileMgr));
TheSema.reset();
Ctx.reset();
PP.reset();
// Clear out old caches and data.
TopLevelDecls.clear();
+ PreprocessedEntities.clear();
CleanTemporaryFiles();
PreprocessedEntitiesByFile.clear();
if (!OverrideMainBuffer) {
- StoredDiagnostics.clear();
+ StoredDiagnostics.erase(
+ StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver,
+ StoredDiagnostics.end());
TopLevelDeclsInPreamble.clear();
+ PreprocessedEntitiesInPreamble.clear();
}
// Create a file manager object to provide access to and cache the filesystem.
@@ -728,19 +880,21 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
PreprocessorOpts.ImplicitPCHInclude = PreambleFile;
PreprocessorOpts.DisablePCHValidation = true;
- // Keep track of the override buffer;
- SavedMainFileBuffer = OverrideMainBuffer;
-
// The stored diagnostic has the old source manager in it; update
// the locations to refer into the new source manager. Since we've
// been careful to make sure that the source manager's state
// before and after are identical, so that we can reuse the source
// location itself.
- for (unsigned I = 0, N = StoredDiagnostics.size(); I != N; ++I) {
+ for (unsigned I = NumStoredDiagnosticsFromDriver,
+ N = StoredDiagnostics.size();
+ I < N; ++I) {
FullSourceLoc Loc(StoredDiagnostics[I].getLocation(),
getSourceManager());
StoredDiagnostics[I].setLocation(Loc);
}
+
+ // Keep track of the override buffer;
+ SavedMainFileBuffer = OverrideMainBuffer;
} else {
PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
@@ -774,12 +928,6 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
}
Invocation.reset(Clang.takeInvocation());
-
- // If we were asked to cache code-completion results and don't have any
- // results yet, do so now.
- if (ShouldCacheCodeCompletionResults && CachedCompletionResults.empty())
- CacheCodeCompletionResults();
-
return false;
error:
@@ -787,11 +935,12 @@ error:
if (OverrideMainBuffer) {
PreprocessorOpts.eraseRemappedFile(
PreprocessorOpts.remapped_file_buffer_end() - 1);
- PreprocessorOpts.DisablePCHValidation = true;
PreprocessorOpts.ImplicitPCHInclude = PriorImplicitPCHInclude;
delete OverrideMainBuffer;
+ SavedMainFileBuffer = 0;
}
+ StoredDiagnostics.clear();
Clang.takeSourceManager();
Clang.takeFileManager();
Invocation.reset(Clang.takeInvocation());
@@ -803,15 +952,27 @@ static std::string GetPreamblePCHPath() {
// FIXME: This is lame; sys::Path should provide this function (in particular,
// it should know how to find the temporary files dir).
// FIXME: This is really lame. I copied this code from the Driver!
+ // FIXME: This is a hack so that we can override the preamble file during
+ // crash-recovery testing, which is the only case where the preamble files
+ // are not necessarily cleaned up.
+ const char *TmpFile = ::getenv("CINDEXTEST_PREAMBLE_FILE");
+ if (TmpFile)
+ return TmpFile;
+
std::string Error;
const char *TmpDir = ::getenv("TMPDIR");
if (!TmpDir)
TmpDir = ::getenv("TEMP");
if (!TmpDir)
TmpDir = ::getenv("TMP");
+#ifdef LLVM_ON_WIN32
+ if (!TmpDir)
+ TmpDir = ::getenv("USERPROFILE");
+#endif
if (!TmpDir)
TmpDir = "/tmp";
llvm::sys::Path P(TmpDir);
+ P.createDirectoryOnDisk(true);
P.appendComponent("preamble");
P.appendSuffix("pch");
if (P.createTemporaryFileOnDisk())
@@ -827,8 +988,7 @@ std::pair<llvm::MemoryBuffer *, std::pair<unsigned, bool> >
ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
unsigned MaxLines, bool &CreatedBuffer) {
FrontendOptions &FrontendOpts = Invocation.getFrontendOpts();
- PreprocessorOptions &PreprocessorOpts
- = Invocation.getPreprocessorOpts();
+ PreprocessorOptions &PreprocessorOpts = Invocation.getPreprocessorOpts();
CreatedBuffer = false;
// Try to determine if the main file has been remapped, either from the
@@ -852,17 +1012,11 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
CreatedBuffer = false;
}
- Buffer = llvm::MemoryBuffer::getFile(M->second);
+ Buffer = getBufferForFile(M->second);
if (!Buffer)
return std::make_pair((llvm::MemoryBuffer*)0,
std::make_pair(0, true));
CreatedBuffer = true;
-
- // Remove this remapping. We've captured the buffer already.
- M = PreprocessorOpts.eraseRemappedFile(M);
- E = PreprocessorOpts.remapped_file_end();
- if (M == E)
- break;
}
}
}
@@ -884,12 +1038,6 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
}
Buffer = const_cast<llvm::MemoryBuffer *>(M->second);
-
- // Remove this remapping. We've captured the buffer already.
- M = PreprocessorOpts.eraseRemappedFile(M);
- E = PreprocessorOpts.remapped_file_buffer_end();
- if (M == E)
- break;
}
}
}
@@ -897,7 +1045,7 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
// If the main source file was not remapped, load it now.
if (!Buffer) {
- Buffer = llvm::MemoryBuffer::getFile(FrontendOpts.Inputs[0].second);
+ Buffer = getBufferForFile(FrontendOpts.Inputs[0].second);
if (!Buffer)
return std::make_pair((llvm::MemoryBuffer*)0, std::make_pair(0, true));
@@ -908,7 +1056,6 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
}
static llvm::MemoryBuffer *CreatePaddedMainFileBuffer(llvm::MemoryBuffer *Old,
- bool DeleteOld,
unsigned NewSize,
llvm::StringRef NewName) {
llvm::MemoryBuffer *Result
@@ -919,9 +1066,6 @@ static llvm::MemoryBuffer *CreatePaddedMainFileBuffer(llvm::MemoryBuffer *Old,
' ', NewSize - Old->getBufferSize() - 1);
const_cast<char*>(Result->getBufferEnd())[-1] = '\n';
- if (DeleteOld)
- delete Old;
-
return Result;
}
@@ -957,6 +1101,11 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
std::pair<llvm::MemoryBuffer *, std::pair<unsigned, bool> > NewPreamble
= ComputePreamble(PreambleInvocation, MaxLines, CreatedPreambleBuffer);
+ // If ComputePreamble() Take ownership of the
+ llvm::OwningPtr<llvm::MemoryBuffer> OwnedPreambleBuffer;
+ if (CreatedPreambleBuffer)
+ OwnedPreambleBuffer.reset(NewPreamble.first);
+
if (!NewPreamble.second.first) {
// We couldn't find a preamble in the main source. Clear out the current
// preamble, if we have one. It's obviously no good any more.
@@ -965,8 +1114,6 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
llvm::sys::Path(PreambleFile).eraseFromDisk();
PreambleFile.clear();
}
- if (CreatedPreambleBuffer)
- delete NewPreamble.first;
// The next time we actually see a preamble, precompile it.
PreambleRebuildCounter = 1;
@@ -1049,7 +1196,11 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// Set the state of the diagnostic object to mimic its state
// after parsing the preamble.
+ // FIXME: This won't catch any #pragma push warning changes that
+ // have occurred in the preamble.
getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(),
+ PreambleInvocation.getDiagnosticOpts());
getDiagnostics().setNumWarnings(NumWarningsInPreamble);
if (StoredDiagnostics.size() > NumStoredDiagnosticsInPreamble)
StoredDiagnostics.erase(
@@ -1059,7 +1210,6 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// Create a version of the main file buffer that is padded to
// buffer size we reserved when creating the preamble.
return CreatePaddedMainFileBuffer(NewPreamble.first,
- CreatedPreambleBuffer,
PreambleReservedSize,
FrontendOpts.Inputs[0].second);
}
@@ -1069,7 +1219,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// return now.
if (!AllowRebuild)
return 0;
-
+
// We can't reuse the previously-computed preamble. Build a new one.
Preamble.clear();
llvm::sys::Path(PreambleFile).eraseFromDisk();
@@ -1088,14 +1238,19 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
return 0;
}
- // We did not previously compute a preamble, or it can't be reused anyway.
- llvm::Timer *PreambleTimer = 0;
- if (TimerGroup.get()) {
- PreambleTimer = new llvm::Timer("Precompiling preamble", *TimerGroup);
- PreambleTimer->startTimer();
- Timers.push_back(PreambleTimer);
+ // Create a temporary file for the precompiled preamble. In rare
+ // circumstances, this can fail.
+ std::string PreamblePCHPath = GetPreamblePCHPath();
+ if (PreamblePCHPath.empty()) {
+ // Try again next time.
+ PreambleRebuildCounter = 1;
+ return 0;
}
+ // We did not previously compute a preamble, or it can't be reused anyway.
+ SimpleTimer PreambleTimer(WantTiming);
+ PreambleTimer.setOutput("Precompiling preamble");
+
// Create a new buffer that stores the preamble. The buffer also contains
// extra space for the original contents of the file (which will be present
// when we actually parse the file) along with more room in case the file
@@ -1129,11 +1284,11 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// Tell the compiler invocation to generate a temporary precompiled header.
FrontendOpts.ProgramAction = frontend::GeneratePCH;
- // FIXME: Set ChainedPCH unconditionally, once it is ready.
- if (::getenv("LIBCLANG_CHAINING"))
- FrontendOpts.ChainedPCH = true;
+ FrontendOpts.ChainedPCH = true;
// FIXME: Generate the precompiled header into memory?
- FrontendOpts.OutputFile = GetPreamblePCHPath();
+ FrontendOpts.OutputFile = PreamblePCHPath;
+ PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
+ PreprocessorOpts.PrecompiledPreambleBytes.second = false;
// Create the compiler instance to use for building the precompiled preamble.
CompilerInstance Clang;
@@ -1142,20 +1297,14 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// Set up diagnostics, capturing all of the diagnostics produced.
Clang.setDiagnostics(&getDiagnostics());
- CaptureDroppedDiagnostics Capture(CaptureDiagnostics,
- getDiagnostics(),
- StoredDiagnostics);
// Create the target instance.
+ Clang.getTargetOpts().Features = TargetFeatures;
Clang.setTarget(TargetInfo::CreateTargetInfo(Clang.getDiagnostics(),
Clang.getTargetOpts()));
if (!Clang.hasTarget()) {
llvm::sys::Path(FrontendOpts.OutputFile).eraseFromDisk();
Preamble.clear();
- if (CreatedPreambleBuffer)
- delete NewPreamble.first;
- if (PreambleTimer)
- PreambleTimer->stopTimer();
PreambleRebuildCounter = DefaultPreambleRebuildInterval;
PreprocessorOpts.eraseRemappedFile(
PreprocessorOpts.remapped_file_buffer_end() - 1);
@@ -1176,15 +1325,22 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
"IR inputs not support here!");
// Clear out old caches and data.
- StoredDiagnostics.clear();
+ getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(), Clang.getDiagnosticOpts());
+ StoredDiagnostics.erase(
+ StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver,
+ StoredDiagnostics.end());
TopLevelDecls.clear();
TopLevelDeclsInPreamble.clear();
+ PreprocessedEntities.clear();
+ PreprocessedEntitiesInPreamble.clear();
// Create a file manager object to provide access to and cache the filesystem.
- Clang.setFileManager(new FileManager);
+ Clang.setFileManager(new FileManager(Clang.getFileSystemOpts()));
// Create the source manager.
- Clang.setSourceManager(new SourceManager(getDiagnostics()));
+ Clang.setSourceManager(new SourceManager(getDiagnostics(),
+ Clang.getFileManager()));
llvm::OwningPtr<PrecompilePreambleAction> Act;
Act.reset(new PrecompilePreambleAction(*this));
@@ -1193,10 +1349,6 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
Clang.takeInvocation();
llvm::sys::Path(FrontendOpts.OutputFile).eraseFromDisk();
Preamble.clear();
- if (CreatedPreambleBuffer)
- delete NewPreamble.first;
- if (PreambleTimer)
- PreambleTimer->stopTimer();
PreambleRebuildCounter = DefaultPreambleRebuildInterval;
PreprocessorOpts.eraseRemappedFile(
PreprocessorOpts.remapped_file_buffer_end() - 1);
@@ -1213,11 +1365,9 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// FIXME: Should we leave a note for ourselves to try again?
llvm::sys::Path(FrontendOpts.OutputFile).eraseFromDisk();
Preamble.clear();
- if (CreatedPreambleBuffer)
- delete NewPreamble.first;
- if (PreambleTimer)
- PreambleTimer->stopTimer();
TopLevelDeclsInPreamble.clear();
+ PreprocessedEntities.clear();
+ PreprocessedEntitiesInPreamble.clear();
PreambleRebuildCounter = DefaultPreambleRebuildInterval;
PreprocessorOpts.eraseRemappedFile(
PreprocessorOpts.remapped_file_buffer_end() - 1);
@@ -1247,14 +1397,19 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
= std::make_pair(F->second->getSize(), File->getModificationTime());
}
- if (PreambleTimer)
- PreambleTimer->stopTimer();
-
PreambleRebuildCounter = 1;
PreprocessorOpts.eraseRemappedFile(
PreprocessorOpts.remapped_file_buffer_end() - 1);
+
+ // If the hash of top-level entities differs from the hash of the top-level
+ // entities the last time we rebuilt the preamble, clear out the completion
+ // cache.
+ if (CurrentTopLevelHashValue != PreambleTopLevelHashValue) {
+ CompletionCacheTopLevelHashValue = 0;
+ PreambleTopLevelHashValue = CurrentTopLevelHashValue;
+ }
+
return CreatePaddedMainFileBuffer(NewPreamble.first,
- CreatedPreambleBuffer,
PreambleReservedSize,
FrontendOpts.Inputs[0].second);
}
@@ -1274,14 +1429,90 @@ void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
TopLevelDecls.insert(TopLevelDecls.begin(), Resolved.begin(), Resolved.end());
}
+void ASTUnit::RealizePreprocessedEntitiesFromPreamble() {
+ if (!PP)
+ return;
+
+ PreprocessingRecord *PPRec = PP->getPreprocessingRecord();
+ if (!PPRec)
+ return;
+
+ ExternalPreprocessingRecordSource *External = PPRec->getExternalSource();
+ if (!External)
+ return;
+
+ for (unsigned I = 0, N = PreprocessedEntitiesInPreamble.size(); I != N; ++I) {
+ if (PreprocessedEntity *PE
+ = External->ReadPreprocessedEntityAtOffset(
+ PreprocessedEntitiesInPreamble[I]))
+ PreprocessedEntities.push_back(PE);
+ }
+
+ if (PreprocessedEntities.empty())
+ return;
+
+ PreprocessedEntities.insert(PreprocessedEntities.end(),
+ PPRec->begin(true), PPRec->end(true));
+}
+
+ASTUnit::pp_entity_iterator ASTUnit::pp_entity_begin() {
+ if (!PreprocessedEntitiesInPreamble.empty() &&
+ PreprocessedEntities.empty())
+ RealizePreprocessedEntitiesFromPreamble();
+
+ if (PreprocessedEntities.empty())
+ if (PreprocessingRecord *PPRec = PP->getPreprocessingRecord())
+ return PPRec->begin(true);
+
+ return PreprocessedEntities.begin();
+}
+
+ASTUnit::pp_entity_iterator ASTUnit::pp_entity_end() {
+ if (!PreprocessedEntitiesInPreamble.empty() &&
+ PreprocessedEntities.empty())
+ RealizePreprocessedEntitiesFromPreamble();
+
+ if (PreprocessedEntities.empty())
+ if (PreprocessingRecord *PPRec = PP->getPreprocessingRecord())
+ return PPRec->end(true);
+
+ return PreprocessedEntities.end();
+}
+
unsigned ASTUnit::getMaxPCHLevel() const {
if (!getOnlyLocalDecls())
return Decl::MaxPCHLevel;
- unsigned Result = 0;
- if (isMainFileAST() || SavedMainFileBuffer)
- ++Result;
- return Result;
+ return 0;
+}
+
+llvm::StringRef ASTUnit::getMainFileName() const {
+ return Invocation->getFrontendOpts().Inputs[0].second;
+}
+
+bool ASTUnit::LoadFromCompilerInvocation(bool PrecompilePreamble) {
+ if (!Invocation)
+ return true;
+
+ // We'll manage file buffers ourselves.
+ Invocation->getPreprocessorOpts().RetainRemappedFileBuffers = true;
+ Invocation->getFrontendOpts().DisableFree = false;
+ ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
+
+ // Save the target features.
+ TargetFeatures = Invocation->getTargetOpts().Features;
+
+ llvm::MemoryBuffer *OverrideMainBuffer = 0;
+ if (PrecompilePreamble) {
+ PreambleRebuildCounter = 2;
+ OverrideMainBuffer
+ = getMainBufferWithPrecompiledPreamble(*Invocation);
+ }
+
+ SimpleTimer ParsingTimer(WantTiming);
+ ParsingTimer.setOutput("Parsing " + getMainFileName());
+
+ return Parse(OverrideMainBuffer);
}
ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
@@ -1290,50 +1521,19 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
bool CaptureDiagnostics,
bool PrecompilePreamble,
bool CompleteTranslationUnit,
- bool CacheCodeCompletionResults) {
- if (!Diags.getPtr()) {
- // No diagnostics engine was provided, so create our own diagnostics object
- // with the default options.
- DiagnosticOptions DiagOpts;
- Diags = CompilerInstance::createDiagnostics(DiagOpts, 0, 0);
- }
-
+ bool CacheCodeCompletionResults) {
// Create the AST unit.
llvm::OwningPtr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
+ ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
- AST->CaptureDiagnostics = CaptureDiagnostics;
AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
AST->CompleteTranslationUnit = CompleteTranslationUnit;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
AST->Invocation.reset(CI);
- CI->getPreprocessorOpts().RetainRemappedFileBuffers = true;
-
- if (getenv("LIBCLANG_TIMING"))
- AST->TimerGroup.reset(
- new llvm::TimerGroup(CI->getFrontendOpts().Inputs[0].second));
-
- llvm::MemoryBuffer *OverrideMainBuffer = 0;
- // FIXME: When C++ PCH is ready, allow use of it for a precompiled preamble.
- if (PrecompilePreamble && !CI->getLangOpts().CPlusPlus) {
- AST->PreambleRebuildCounter = 1;
- OverrideMainBuffer
- = AST->getMainBufferWithPrecompiledPreamble(*AST->Invocation);
- }
-
- llvm::Timer *ParsingTimer = 0;
- if (AST->TimerGroup.get()) {
- ParsingTimer = new llvm::Timer("Initial parse", *AST->TimerGroup);
- ParsingTimer->startTimer();
- AST->Timers.push_back(ParsingTimer);
- }
-
- bool Failed = AST->Parse(OverrideMainBuffer);
- if (ParsingTimer)
- ParsingTimer->stopTimer();
-
- return Failed? 0 : AST.take();
+ return AST->LoadFromCompilerInvocation(PrecompilePreamble)? 0 : AST.take();
}
ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
@@ -1341,20 +1541,20 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
llvm::IntrusiveRefCntPtr<Diagnostic> Diags,
llvm::StringRef ResourceFilesPath,
bool OnlyLocalDecls,
+ bool CaptureDiagnostics,
RemappedFile *RemappedFiles,
unsigned NumRemappedFiles,
- bool CaptureDiagnostics,
bool PrecompilePreamble,
bool CompleteTranslationUnit,
- bool CacheCodeCompletionResults) {
- bool CreatedDiagnosticsObject = false;
-
+ bool CacheCodeCompletionResults,
+ bool CXXPrecompilePreamble,
+ bool CXXChainedPCH) {
if (!Diags.getPtr()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
DiagnosticOptions DiagOpts;
- Diags = CompilerInstance::createDiagnostics(DiagOpts, 0, 0);
- CreatedDiagnosticsObject = true;
+ Diags = CompilerInstance::createDiagnostics(DiagOpts, ArgEnd - ArgBegin,
+ ArgBegin);
}
llvm::SmallVector<const char *, 16> Args;
@@ -1365,40 +1565,49 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
// also want to force it to use clang.
Args.push_back("-fsyntax-only");
- // FIXME: We shouldn't have to pass in the path info.
- driver::Driver TheDriver("clang", llvm::sys::getHostTriple(),
- "a.out", false, false, *Diags);
-
- // Don't check that inputs exist, they have been remapped.
- TheDriver.setCheckInputsExist(false);
-
- llvm::OwningPtr<driver::Compilation> C(
- TheDriver.BuildCompilation(Args.size(), Args.data()));
+ llvm::SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
+
+ llvm::OwningPtr<CompilerInvocation> CI;
- // We expect to get back exactly one command job, if we didn't something
- // failed.
- const driver::JobList &Jobs = C->getJobs();
- if (Jobs.size() != 1 || !isa<driver::Command>(Jobs.begin())) {
- llvm::SmallString<256> Msg;
- llvm::raw_svector_ostream OS(Msg);
- C->PrintJob(OS, C->getJobs(), "; ", true);
- Diags->Report(diag::err_fe_expected_compiler_job) << OS.str();
- return 0;
- }
+ {
+ CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags,
+ StoredDiagnostics);
+
+ // FIXME: We shouldn't have to pass in the path info.
+ driver::Driver TheDriver("clang", llvm::sys::getHostTriple(),
+ "a.out", false, false, *Diags);
+
+ // Don't check that inputs exist, they have been remapped.
+ TheDriver.setCheckInputsExist(false);
+
+ llvm::OwningPtr<driver::Compilation> C(
+ TheDriver.BuildCompilation(Args.size(), Args.data()));
+
+ // We expect to get back exactly one command job, if we didn't something
+ // failed.
+ const driver::JobList &Jobs = C->getJobs();
+ if (Jobs.size() != 1 || !isa<driver::Command>(Jobs.begin())) {
+ llvm::SmallString<256> Msg;
+ llvm::raw_svector_ostream OS(Msg);
+ C->PrintJob(OS, C->getJobs(), "; ", true);
+ Diags->Report(diag::err_fe_expected_compiler_job) << OS.str();
+ return 0;
+ }
- const driver::Command *Cmd = cast<driver::Command>(*Jobs.begin());
- if (llvm::StringRef(Cmd->getCreator().getName()) != "clang") {
- Diags->Report(diag::err_fe_expected_clang_command);
- return 0;
- }
+ const driver::Command *Cmd = cast<driver::Command>(*Jobs.begin());
+ if (llvm::StringRef(Cmd->getCreator().getName()) != "clang") {
+ Diags->Report(diag::err_fe_expected_clang_command);
+ return 0;
+ }
- const driver::ArgStringList &CCArgs = Cmd->getArguments();
- llvm::OwningPtr<CompilerInvocation> CI(new CompilerInvocation);
- CompilerInvocation::CreateFromArgs(*CI,
+ const driver::ArgStringList &CCArgs = Cmd->getArguments();
+ CI.reset(new CompilerInvocation);
+ CompilerInvocation::CreateFromArgs(*CI,
const_cast<const char **>(CCArgs.data()),
const_cast<const char **>(CCArgs.data()) +
- CCArgs.size(),
- *Diags);
+ CCArgs.size(),
+ *Diags);
+ }
// Override any files that need remapping
for (unsigned I = 0; I != NumRemappedFiles; ++I)
@@ -1408,26 +1617,44 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
// Override the resources path.
CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
- CI->getFrontendOpts().DisableFree = false;
- return LoadFromCompilerInvocation(CI.take(), Diags, OnlyLocalDecls,
- CaptureDiagnostics, PrecompilePreamble,
- CompleteTranslationUnit,
- CacheCodeCompletionResults);
+ // Check whether we should precompile the preamble and/or use chained PCH.
+ // FIXME: This is a temporary hack while we debug C++ chained PCH.
+ if (CI->getLangOpts().CPlusPlus) {
+ PrecompilePreamble = PrecompilePreamble && CXXPrecompilePreamble;
+
+ if (PrecompilePreamble && !CXXChainedPCH &&
+ !CI->getPreprocessorOpts().ImplicitPCHInclude.empty())
+ PrecompilePreamble = false;
+ }
+
+ // Create the AST unit.
+ llvm::OwningPtr<ASTUnit> AST;
+ AST.reset(new ASTUnit(false));
+ ConfigureDiags(Diags, ArgBegin, ArgEnd, *AST, CaptureDiagnostics);
+ AST->Diagnostics = Diags;
+
+ AST->FileMgr.reset(new FileManager(FileSystemOptions()));
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ AST->CompleteTranslationUnit = CompleteTranslationUnit;
+ AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+ AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size();
+ AST->NumStoredDiagnosticsInPreamble = StoredDiagnostics.size();
+ AST->StoredDiagnostics.swap(StoredDiagnostics);
+ AST->Invocation.reset(CI.take());
+ return AST->LoadFromCompilerInvocation(PrecompilePreamble) ? 0 : AST.take();
}
bool ASTUnit::Reparse(RemappedFile *RemappedFiles, unsigned NumRemappedFiles) {
if (!Invocation.get())
return true;
- llvm::Timer *ReparsingTimer = 0;
- if (TimerGroup.get()) {
- ReparsingTimer = new llvm::Timer("Reparse", *TimerGroup);
- ReparsingTimer->startTimer();
- Timers.push_back(ReparsingTimer);
- }
-
+ SimpleTimer ParsingTimer(WantTiming);
+ ParsingTimer.setOutput("Reparsing " + getMainFileName());
+
// Remap files.
PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
+ PPOpts.DisableStatCache = true;
for (PreprocessorOptions::remapped_file_buffer_iterator
R = PPOpts.remapped_file_buffer_begin(),
REnd = PPOpts.remapped_file_buffer_end();
@@ -1447,21 +1674,20 @@ bool ASTUnit::Reparse(RemappedFile *RemappedFiles, unsigned NumRemappedFiles) {
OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(*Invocation);
// Clear out the diagnostics state.
- if (!OverrideMainBuffer)
+ if (!OverrideMainBuffer) {
getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
+ }
// Parse the sources
- bool Result = Parse(OverrideMainBuffer);
- if (ReparsingTimer)
- ReparsingTimer->stopTimer();
-
- if (ShouldCacheCodeCompletionResults) {
- if (CacheCodeCompletionCoolDown > 0)
- --CacheCodeCompletionCoolDown;
- else if (top_level_size() != NumTopLevelDeclsAtLastCompletionCache)
- CacheCodeCompletionResults();
- }
+ bool Result = Parse(OverrideMainBuffer);
+ // If we're caching global code-completion results, and the top-level
+ // declarations have changed, clear out the code-completion cache.
+ if (!Result && ShouldCacheCodeCompletionResults &&
+ CurrentTopLevelHashValue != CompletionCacheTopLevelHashValue)
+ CacheCodeCompletionResults();
+
return Result;
}
@@ -1496,8 +1722,10 @@ namespace {
| (1 << (CodeCompletionContext::CCC_Expression - 1))
| (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
| (1 << (CodeCompletionContext::CCC_MemberAccess - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCProtocolName - 1));
-
+ | (1 << (CodeCompletionContext::CCC_ObjCProtocolName - 1))
+ | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
+ | (1 << (CodeCompletionContext::CCC_Recovery - 1));
+
if (AST.getASTContext().getLangOptions().CPlusPlus)
NormalContexts |= (1 << (CodeCompletionContext::CCC_EnumTag - 1))
| (1 << (CodeCompletionContext::CCC_UnionTag - 1))
@@ -1514,19 +1742,23 @@ namespace {
unsigned NumCandidates) {
Next.ProcessOverloadCandidates(S, CurrentArg, Candidates, NumCandidates);
}
+
+ virtual CodeCompletionAllocator &getAllocator() {
+ return Next.getAllocator();
+ }
};
}
/// \brief Helper function that computes which global names are hidden by the
/// local code-completion results.
-void CalculateHiddenNames(const CodeCompletionContext &Context,
- CodeCompletionResult *Results,
- unsigned NumResults,
- ASTContext &Ctx,
- llvm::StringSet<> &HiddenNames) {
+static void CalculateHiddenNames(const CodeCompletionContext &Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults,
+ ASTContext &Ctx,
+ llvm::StringSet<llvm::BumpPtrAllocator> &HiddenNames){
bool OnlyTagNames = false;
switch (Context.getKind()) {
- case CodeCompletionContext::CCC_Other:
+ case CodeCompletionContext::CCC_Recovery:
case CodeCompletionContext::CCC_TopLevel:
case CodeCompletionContext::CCC_ObjCInterface:
case CodeCompletionContext::CCC_ObjCImplementation:
@@ -1540,6 +1772,7 @@ void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_Type:
case CodeCompletionContext::CCC_Name:
case CodeCompletionContext::CCC_PotentiallyQualifiedName:
+ case CodeCompletionContext::CCC_ParenthesizedExpression:
break;
case CodeCompletionContext::CCC_EnumTag:
@@ -1556,6 +1789,8 @@ void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_NaturalLanguage:
case CodeCompletionContext::CCC_SelectorName:
case CodeCompletionContext::CCC_TypeQualifiers:
+ case CodeCompletionContext::CCC_Other:
+ case CodeCompletionContext::CCC_OtherWithMacros:
// We're looking for nothing, or we're looking for names that cannot
// be hidden.
return;
@@ -1600,12 +1835,11 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
// Merge the results we were given with the results we cached.
bool AddedResult = false;
unsigned InContexts
- = (Context.getKind() == CodeCompletionContext::CCC_Other? NormalContexts
+ = (Context.getKind() == CodeCompletionContext::CCC_Recovery? NormalContexts
: (1 << (Context.getKind() - 1)));
// Contains the set of names that are hidden by "local" completion results.
- llvm::StringSet<> HiddenNames;
- llvm::SmallVector<CodeCompletionString *, 4> StringsToDestroy;
+ llvm::StringSet<llvm::BumpPtrAllocator> HiddenNames;
typedef CodeCompletionResult Result;
llvm::SmallVector<Result, 8> AllResults;
for (ASTUnit::cached_completion_iterator
@@ -1638,6 +1872,7 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
if (!Context.getPreferredType().isNull()) {
if (C->Kind == CXCursor_MacroDefinition) {
Priority = getMacroUsagePriority(C->Completion->getTypedText(),
+ S.getLangOptions(),
Context.getPreferredType()->isAnyPointerType());
} else if (C->Type) {
CanQualType Expected
@@ -1663,11 +1898,12 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
Context.getKind() == CodeCompletionContext::CCC_MacroNameUse) {
// Create a new code-completion string that just contains the
// macro name, without its arguments.
- Completion = new CodeCompletionString;
- Completion->AddTypedTextChunk(C->Completion->getTypedText());
- StringsToDestroy.push_back(Completion);
+ CodeCompletionBuilder Builder(getAllocator(), CCP_CodePattern,
+ C->Availability);
+ Builder.AddTypedTextChunk(C->Completion->getTypedText());
CursorKind = CXCursor_NotImplemented;
Priority = CCP_CodePattern;
+ Completion = Builder.TakeString();
}
AllResults.push_back(Result(Completion, Priority, CursorKind,
@@ -1683,9 +1919,6 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
Next.ProcessCodeCompleteResults(S, Context, AllResults.data(),
AllResults.size());
-
- for (unsigned I = 0, N = StringsToDestroy.size(); I != N; ++I)
- delete StringsToDestroy[I];
}
@@ -1703,16 +1936,9 @@ void ASTUnit::CodeComplete(llvm::StringRef File, unsigned Line, unsigned Column,
if (!Invocation.get())
return;
- llvm::Timer *CompletionTimer = 0;
- if (TimerGroup.get()) {
- llvm::SmallString<128> TimerName;
- llvm::raw_svector_ostream TimerNameOut(TimerName);
- TimerNameOut << "Code completion @ " << File << ":" << Line << ":"
- << Column;
- CompletionTimer = new llvm::Timer(TimerNameOut.str(), *TimerGroup);
- CompletionTimer->startTimer();
- Timers.push_back(CompletionTimer);
- }
+ SimpleTimer CompletionTimer(WantTiming);
+ CompletionTimer.setOutput("Code completion @ " + File + ":" +
+ llvm::Twine(Line) + ":" + llvm::Twine(Column));
CompilerInvocation CCInvocation(*Invocation);
FrontendOptions &FrontendOpts = CCInvocation.getFrontendOpts();
@@ -1727,11 +1953,6 @@ void ASTUnit::CodeComplete(llvm::StringRef File, unsigned Line, unsigned Column,
FrontendOpts.CodeCompletionAt.Line = Line;
FrontendOpts.CodeCompletionAt.Column = Column;
- // Turn on spell-checking when performing code completion. It leads
- // to better results.
- unsigned SpellChecking = CCInvocation.getLangOpts().SpellChecking;
- CCInvocation.getLangOpts().SpellChecking = 1;
-
// Set the language options appropriately.
LangOpts = CCInvocation.getLangOpts();
@@ -1741,16 +1962,17 @@ void ASTUnit::CodeComplete(llvm::StringRef File, unsigned Line, unsigned Column,
// Set up diagnostics, capturing any diagnostics produced.
Clang.setDiagnostics(&Diag);
+ ProcessWarningOptions(Diag, CCInvocation.getDiagnosticOpts());
CaptureDroppedDiagnostics Capture(true,
- Clang.getDiagnostics(),
+ Clang.getDiagnostics(),
StoredDiagnostics);
// Create the target instance.
+ Clang.getTargetOpts().Features = TargetFeatures;
Clang.setTarget(TargetInfo::CreateTargetInfo(Clang.getDiagnostics(),
Clang.getTargetOpts()));
if (!Clang.hasTarget()) {
Clang.takeInvocation();
- CCInvocation.getLangOpts().SpellChecking = SpellChecking;
return;
}
@@ -1783,11 +2005,12 @@ void ASTUnit::CodeComplete(llvm::StringRef File, unsigned Line, unsigned Column,
// Use the code completion consumer we were given, but adding any cached
// code-completion results.
- AugmentedCodeCompleteConsumer
- AugmentedConsumer(*this, Consumer, FrontendOpts.ShowMacrosInCodeCompletion,
- FrontendOpts.ShowCodePatternsInCodeCompletion,
- FrontendOpts.ShowGlobalSymbolsInCodeCompletion);
- Clang.setCodeCompletionConsumer(&AugmentedConsumer);
+ AugmentedCodeCompleteConsumer *AugmentedConsumer
+ = new AugmentedCodeCompleteConsumer(*this, Consumer,
+ FrontendOpts.ShowMacrosInCodeCompletion,
+ FrontendOpts.ShowCodePatternsInCodeCompletion,
+ FrontendOpts.ShowGlobalSymbolsInCodeCompletion);
+ Clang.setCodeCompletionConsumer(AugmentedConsumer);
// If we have a precompiled preamble, try to use it. We only allow
// the use of the precompiled preamble if we're if the completion
@@ -1808,6 +2031,10 @@ void ASTUnit::CodeComplete(llvm::StringRef File, unsigned Line, unsigned Column,
// If the main file has been overridden due to the use of a preamble,
// make that override happen and introduce the preamble.
+ PreprocessorOpts.DisableStatCache = true;
+ StoredDiagnostics.insert(StoredDiagnostics.end(),
+ this->StoredDiagnostics.begin(),
+ this->StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver);
if (OverrideMainBuffer) {
PreprocessorOpts.addRemappedFile(OriginalSourceFile, OverrideMainBuffer);
PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
@@ -1819,12 +2046,14 @@ void ASTUnit::CodeComplete(llvm::StringRef File, unsigned Line, unsigned Column,
// The stored diagnostics have the old source manager. Copy them
// to our output set of stored diagnostics, updating the source
// manager to the one we were given.
- for (unsigned I = 0, N = this->StoredDiagnostics.size(); I != N; ++I) {
+ for (unsigned I = NumStoredDiagnosticsFromDriver,
+ N = this->StoredDiagnostics.size();
+ I < N; ++I) {
StoredDiagnostics.push_back(this->StoredDiagnostics[I]);
FullSourceLoc Loc(StoredDiagnostics[I].getLocation(), SourceMgr);
StoredDiagnostics[I].setLocation(Loc);
}
-
+
OwnedBuffers.push_back(OverrideMainBuffer);
} else {
PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
@@ -1839,15 +2068,10 @@ void ASTUnit::CodeComplete(llvm::StringRef File, unsigned Line, unsigned Column,
Act->EndSourceFile();
}
- if (CompletionTimer)
- CompletionTimer->stopTimer();
-
// Steal back our resources.
Clang.takeFileManager();
Clang.takeSourceManager();
Clang.takeInvocation();
- Clang.takeCodeCompletionConsumer();
- CCInvocation.getLangOpts().SpellChecking = SpellChecking;
}
bool ASTUnit::Save(llvm::StringRef File) {
@@ -1865,7 +2089,7 @@ bool ASTUnit::Save(llvm::StringRef File) {
std::vector<unsigned char> Buffer;
llvm::BitstreamWriter Stream(Buffer);
ASTWriter Writer(Stream);
- Writer.WriteAST(getSema(), 0, 0);
+ Writer.WriteAST(getSema(), 0, std::string(), 0);
// Write the generated bitstream to "Out".
if (!Buffer.empty())
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
index 53f7362..ee3fdd8 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
@@ -13,18 +13,20 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/Utils.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
#include "clang/Basic/IdentifierTable.h"
-#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
// FIXME: put this somewhere else?
#ifndef S_ISDIR
@@ -189,8 +191,6 @@ class PTHWriter {
void Emit16(uint32_t V) { ::Emit16(Out, V); }
- void Emit24(uint32_t V) { ::Emit24(Out, V); }
-
void Emit32(uint32_t V) { ::Emit32(Out, V); }
void EmitBuf(const char *Ptr, unsigned NumBytes) {
@@ -300,7 +300,7 @@ PTHEntry PTHWriter::LexTokens(Lexer& L) {
ParsingPreprocessorDirective = false;
}
- if (Tok.is(tok::identifier)) {
+ if (Tok.is(tok::raw_identifier)) {
PP.LookUpIdentifierInfo(Tok);
EmitToken(Tok);
continue;
@@ -320,13 +320,13 @@ PTHEntry PTHWriter::LexTokens(Lexer& L) {
// this case, discard both tokens.
if (NextTok.isAtStartOfLine())
goto NextToken;
-
+
// The token is the start of a directive. Emit it.
EmitToken(Tok);
Tok = NextTok;
// Did we see 'include'/'import'/'include_next'?
- if (Tok.isNot(tok::identifier)) {
+ if (Tok.isNot(tok::raw_identifier)) {
EmitToken(Tok);
continue;
}
@@ -353,7 +353,7 @@ PTHEntry PTHWriter::LexTokens(Lexer& L) {
L.LexIncludeFilename(Tok);
L.setParsingPreprocessorDirective(false);
assert(!Tok.isAtStartOfLine());
- if (Tok.is(tok::identifier))
+ if (Tok.is(tok::raw_identifier))
PP.LookUpIdentifierInfo(Tok);
break;
@@ -476,8 +476,7 @@ void PTHWriter::GeneratePTH(const std::string &MainFile) {
const FileEntry *FE = C.Entry;
// FIXME: Handle files with non-absolute paths.
- llvm::sys::Path P(FE->getName());
- if (!P.isAbsolute())
+ if (llvm::sys::path::is_relative(FE->getName()))
continue;
const llvm::MemoryBuffer *B = C.getBuffer(PP.getDiagnostics(), SM);
@@ -512,26 +511,27 @@ namespace {
/// as input to PTH generation. StatListener populates the PTHWriter's
/// file map with stat information for directories as well as negative stats.
/// Stat information for files are populated elsewhere.
-class StatListener : public StatSysCallCache {
+class StatListener : public FileSystemStatCache {
PTHMap &PM;
public:
StatListener(PTHMap &pm) : PM(pm) {}
~StatListener() {}
- int stat(const char *path, struct stat *buf) {
- int result = StatSysCallCache::stat(path, buf);
+ LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ LookupResult Result = statChained(Path, StatBuf, FileDescriptor);
- if (result != 0) // Failed 'stat'.
- PM.insert(PTHEntryKeyVariant(path), PTHEntry());
- else if (S_ISDIR(buf->st_mode)) {
+ if (Result == CacheMissing) // Failed 'stat'.
+ PM.insert(PTHEntryKeyVariant(Path), PTHEntry());
+ else if (S_ISDIR(StatBuf.st_mode)) {
// Only cache directories with absolute paths.
- if (!llvm::sys::Path(path).isAbsolute())
- return result;
+ if (llvm::sys::path::is_relative(Path))
+ return Result;
- PM.insert(PTHEntryKeyVariant(buf, path), PTHEntry());
+ PM.insert(PTHEntryKeyVariant(&StatBuf, Path), PTHEntry());
}
- return result;
+ return Result;
}
};
} // end anonymous namespace
@@ -541,9 +541,9 @@ void clang::CacheTokens(Preprocessor &PP, llvm::raw_fd_ostream* OS) {
// Get the name of the main file.
const SourceManager &SrcMgr = PP.getSourceManager();
const FileEntry *MainFile = SrcMgr.getFileEntryForID(SrcMgr.getMainFileID());
- llvm::sys::Path MainFilePath(MainFile->getName());
+ llvm::SmallString<128> MainFilePath(MainFile->getName());
- MainFilePath.makeAbsolute();
+ llvm::sys::fs::make_absolute(MainFilePath);
// Create the PTHWriter.
PTHWriter PW(*OS, PP);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
index ce0b072..fd593de 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
@@ -27,14 +27,16 @@
#include "clang/Frontend/Utils.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Sema/CodeCompleteConsumer.h"
-#include "llvm/LLVMContext.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Timer.h"
-#include "llvm/System/Host.h"
-#include "llvm/System/Path.h"
-#include "llvm/System/Program.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/system_error.h"
using namespace clang;
CompilerInstance::CompilerInstance()
@@ -44,10 +46,6 @@ CompilerInstance::CompilerInstance()
CompilerInstance::~CompilerInstance() {
}
-void CompilerInstance::setLLVMContext(llvm::LLVMContext *Value) {
- LLVMContext.reset(Value);
-}
-
void CompilerInstance::setInvocation(CompilerInvocation *Value) {
Invocation.reset(Value);
}
@@ -89,26 +87,8 @@ void CompilerInstance::setCodeCompletionConsumer(CodeCompleteConsumer *Value) {
}
// Diagnostics
-namespace {
- class BinaryDiagnosticSerializer : public DiagnosticClient {
- llvm::raw_ostream &OS;
- SourceManager *SourceMgr;
- public:
- explicit BinaryDiagnosticSerializer(llvm::raw_ostream &OS)
- : OS(OS), SourceMgr(0) { }
-
- virtual void HandleDiagnostic(Diagnostic::Level DiagLevel,
- const DiagnosticInfo &Info);
- };
-}
-
-void BinaryDiagnosticSerializer::HandleDiagnostic(Diagnostic::Level DiagLevel,
- const DiagnosticInfo &Info) {
- StoredDiagnostic(DiagLevel, Info).Serialize(OS);
-}
-
static void SetUpBuildDumpLog(const DiagnosticOptions &DiagOpts,
- unsigned argc, char **argv,
+ unsigned argc, const char* const *argv,
Diagnostic &Diags) {
std::string ErrorInfo;
llvm::OwningPtr<llvm::raw_ostream> OS(
@@ -130,33 +110,24 @@ static void SetUpBuildDumpLog(const DiagnosticOptions &DiagOpts,
Diags.setClient(new ChainedDiagnosticClient(Diags.takeClient(), Logger));
}
-void CompilerInstance::createDiagnostics(int Argc, char **Argv) {
- Diagnostics = createDiagnostics(getDiagnosticOpts(), Argc, Argv);
+void CompilerInstance::createDiagnostics(int Argc, const char* const *Argv,
+ DiagnosticClient *Client) {
+ Diagnostics = createDiagnostics(getDiagnosticOpts(), Argc, Argv, Client);
}
llvm::IntrusiveRefCntPtr<Diagnostic>
CompilerInstance::createDiagnostics(const DiagnosticOptions &Opts,
- int Argc, char **Argv) {
- llvm::IntrusiveRefCntPtr<Diagnostic> Diags(new Diagnostic());
+ int Argc, const char* const *Argv,
+ DiagnosticClient *Client) {
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ llvm::IntrusiveRefCntPtr<Diagnostic> Diags(new Diagnostic(DiagID));
// Create the diagnostic client for reporting errors or for
// implementing -verify.
- llvm::OwningPtr<DiagnosticClient> DiagClient;
- if (Opts.BinaryOutput) {
- if (llvm::sys::Program::ChangeStderrToBinary()) {
- // We weren't able to set standard error to binary, which is a
- // bit of a problem. So, just create a text diagnostic printer
- // to complain about this problem, and pretend that the user
- // didn't try to use binary output.
- Diags->setClient(new TextDiagnosticPrinter(llvm::errs(), Opts));
- Diags->Report(diag::err_fe_stderr_binary);
- return Diags;
- } else {
- Diags->setClient(new BinaryDiagnosticSerializer(llvm::errs()));
- }
- } else {
+ if (Client)
+ Diags->setClient(Client);
+ else
Diags->setClient(new TextDiagnosticPrinter(llvm::errs(), Opts));
- }
// Chain in -verify checker, if requested.
if (Opts.VerifyDiagnostics)
@@ -174,13 +145,13 @@ CompilerInstance::createDiagnostics(const DiagnosticOptions &Opts,
// File Manager
void CompilerInstance::createFileManager() {
- FileMgr.reset(new FileManager());
+ FileMgr.reset(new FileManager(getFileSystemOpts()));
}
// Source Manager
-void CompilerInstance::createSourceManager() {
- SourceMgr.reset(new SourceManager(getDiagnostics()));
+void CompilerInstance::createSourceManager(FileManager &FileMgr) {
+ SourceMgr.reset(new SourceManager(getDiagnostics(), FileMgr));
}
// Preprocessor
@@ -231,6 +202,16 @@ CompilerInstance::createPreprocessor(Diagnostic &Diags,
if (!DepOpts.OutputFile.empty())
AttachDependencyFileGen(*PP, DepOpts);
+ // Handle generating header include information, if requested.
+ if (DepOpts.ShowHeaderIncludes)
+ AttachHeaderIncludeGen(*PP);
+ if (!DepOpts.HeaderIncludeOutputFile.empty()) {
+ llvm::StringRef OutputPath = DepOpts.HeaderIncludeOutputFile;
+ if (OutputPath == "-")
+ OutputPath = "";
+ AttachHeaderIncludeGen(*PP, /*ShowAllHeaders=*/true, OutputPath);
+ }
+
return PP;
}
@@ -248,12 +229,16 @@ void CompilerInstance::createASTContext() {
void CompilerInstance::createPCHExternalASTSource(llvm::StringRef Path,
bool DisablePCHValidation,
+ bool DisableStatCache,
void *DeserializationListener){
llvm::OwningPtr<ExternalASTSource> Source;
+ bool Preamble = getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
Source.reset(createPCHExternalASTSource(Path, getHeaderSearchOpts().Sysroot,
- DisablePCHValidation,
+ DisablePCHValidation,
+ DisableStatCache,
getPreprocessor(), getASTContext(),
- DeserializationListener));
+ DeserializationListener,
+ Preamble));
getASTContext().setExternalSource(Source);
}
@@ -261,17 +246,20 @@ ExternalASTSource *
CompilerInstance::createPCHExternalASTSource(llvm::StringRef Path,
const std::string &Sysroot,
bool DisablePCHValidation,
+ bool DisableStatCache,
Preprocessor &PP,
ASTContext &Context,
- void *DeserializationListener) {
+ void *DeserializationListener,
+ bool Preamble) {
llvm::OwningPtr<ASTReader> Reader;
Reader.reset(new ASTReader(PP, &Context,
Sysroot.empty() ? 0 : Sysroot.c_str(),
- DisablePCHValidation));
+ DisablePCHValidation, DisableStatCache));
Reader->setDeserializationListener(
static_cast<ASTDeserializationListener *>(DeserializationListener));
- switch (Reader->ReadAST(Path)) {
+ switch (Reader->ReadAST(Path,
+ Preamble ? ASTReader::Preamble : ASTReader::PCH)) {
case ASTReader::Success:
// Set the predefines buffer as suggested by the PCH reader. Typically, the
// predefines buffer will be empty.
@@ -316,7 +304,6 @@ void CompilerInstance::createCodeCompletionConsumer() {
CompletionConsumer.reset(
createCodeCompletionConsumer(getPreprocessor(),
Loc.FileName, Loc.Line, Loc.Column,
- getFrontendOpts().DebugCodeCompletionPrinter,
getFrontendOpts().ShowMacrosInCodeCompletion,
getFrontendOpts().ShowCodePatternsInCodeCompletion,
getFrontendOpts().ShowGlobalSymbolsInCodeCompletion,
@@ -345,7 +332,6 @@ CompilerInstance::createCodeCompletionConsumer(Preprocessor &PP,
const std::string &Filename,
unsigned Line,
unsigned Column,
- bool UseDebugPrinter,
bool ShowMacros,
bool ShowCodePatterns,
bool ShowGlobals,
@@ -354,11 +340,7 @@ CompilerInstance::createCodeCompletionConsumer(Preprocessor &PP,
return 0;
// Set up the creation routine for code-completion.
- if (UseDebugPrinter)
- return new PrintingCodeCompleteConsumer(ShowMacros, ShowCodePatterns,
- ShowGlobals, OS);
- else
- return new CIndexCodeCompleteConsumer(ShowMacros, ShowCodePatterns,
+ return new PrintingCodeCompleteConsumer(ShowMacros, ShowCodePatterns,
ShowGlobals, OS);
}
@@ -370,18 +352,34 @@ void CompilerInstance::createSema(bool CompleteTranslationUnit,
// Output Files
-void CompilerInstance::addOutputFile(llvm::StringRef Path,
- llvm::raw_ostream *OS) {
- assert(OS && "Attempt to add empty stream to output list!");
- OutputFiles.push_back(std::make_pair(Path, OS));
+void CompilerInstance::addOutputFile(const OutputFile &OutFile) {
+ assert(OutFile.OS && "Attempt to add empty stream to output list!");
+ OutputFiles.push_back(OutFile);
}
void CompilerInstance::clearOutputFiles(bool EraseFiles) {
- for (std::list< std::pair<std::string, llvm::raw_ostream*> >::iterator
+ for (std::list<OutputFile>::iterator
it = OutputFiles.begin(), ie = OutputFiles.end(); it != ie; ++it) {
- delete it->second;
- if (EraseFiles && !it->first.empty())
- llvm::sys::Path(it->first).eraseFromDisk();
+ delete it->OS;
+ if (!it->TempFilename.empty()) {
+ llvm::sys::Path TempPath(it->TempFilename);
+ if (EraseFiles)
+ TempPath.eraseFromDisk();
+ else {
+ std::string Error;
+ llvm::sys::Path NewOutFile(it->Filename);
+ // If '-working-directory' was passed, the output filename should be
+ // relative to that.
+ FileManager::FixupRelativePath(NewOutFile, getFileSystemOpts());
+ if (TempPath.renamePathOnDisk(NewOutFile, &Error)) {
+ getDiagnostics().Report(diag::err_fe_unable_to_rename_temp)
+ << it->TempFilename << it->Filename << Error;
+ TempPath.eraseFromDisk();
+ }
+ }
+ } else if (!it->Filename.empty() && EraseFiles)
+ llvm::sys::Path(it->Filename).eraseFromDisk();
+
}
OutputFiles.clear();
}
@@ -391,18 +389,20 @@ CompilerInstance::createDefaultOutputFile(bool Binary,
llvm::StringRef InFile,
llvm::StringRef Extension) {
return createOutputFile(getFrontendOpts().OutputFile, Binary,
- InFile, Extension);
+ /*RemoveFileOnSignal=*/true, InFile, Extension);
}
llvm::raw_fd_ostream *
CompilerInstance::createOutputFile(llvm::StringRef OutputPath,
- bool Binary,
+ bool Binary, bool RemoveFileOnSignal,
llvm::StringRef InFile,
llvm::StringRef Extension) {
- std::string Error, OutputPathName;
+ std::string Error, OutputPathName, TempPathName;
llvm::raw_fd_ostream *OS = createOutputFile(OutputPath, Error, Binary,
+ RemoveFileOnSignal,
InFile, Extension,
- &OutputPathName);
+ &OutputPathName,
+ &TempPathName);
if (!OS) {
getDiagnostics().Report(diag::err_fe_unable_to_open_output)
<< OutputPath << Error;
@@ -411,7 +411,8 @@ CompilerInstance::createOutputFile(llvm::StringRef OutputPath,
// Add the output file -- but don't try to remove "-", since this means we are
// using stdin.
- addOutputFile((OutputPathName != "-") ? OutputPathName : "", OS);
+ addOutputFile(OutputFile((OutputPathName != "-") ? OutputPathName : "",
+ TempPathName, OS));
return OS;
}
@@ -420,10 +421,12 @@ llvm::raw_fd_ostream *
CompilerInstance::createOutputFile(llvm::StringRef OutputPath,
std::string &Error,
bool Binary,
+ bool RemoveFileOnSignal,
llvm::StringRef InFile,
llvm::StringRef Extension,
- std::string *ResultPathName) {
- std::string OutFile;
+ std::string *ResultPathName,
+ std::string *TempPathName) {
+ std::string OutFile, TempFile;
if (!OutputPath.empty()) {
OutFile = OutputPath;
} else if (InFile == "-") {
@@ -436,15 +439,39 @@ CompilerInstance::createOutputFile(llvm::StringRef OutputPath,
} else {
OutFile = "-";
}
+
+ if (OutFile != "-") {
+ llvm::sys::Path OutPath(OutFile);
+ // Only create the temporary if we can actually write to OutPath, otherwise
+ // we want to fail early.
+ bool Exists;
+ if ((llvm::sys::fs::exists(OutPath.str(), Exists) || !Exists) ||
+ (OutPath.isRegularFile() && OutPath.canWrite())) {
+ // Create a temporary file.
+ llvm::sys::Path TempPath(OutFile);
+ if (!TempPath.createTemporaryFileOnDisk())
+ TempFile = TempPath.str();
+ }
+ }
+
+ std::string OSFile = OutFile;
+ if (!TempFile.empty())
+ OSFile = TempFile;
llvm::OwningPtr<llvm::raw_fd_ostream> OS(
- new llvm::raw_fd_ostream(OutFile.c_str(), Error,
+ new llvm::raw_fd_ostream(OSFile.c_str(), Error,
(Binary ? llvm::raw_fd_ostream::F_Binary : 0)));
if (!Error.empty())
return 0;
+ // Make sure the out stream file gets removed if we crash.
+ if (RemoveFileOnSignal)
+ llvm::sys::RemoveFileOnSignal(llvm::sys::Path(OSFile));
+
if (ResultPathName)
*ResultPathName = OutFile;
+ if (TempPathName)
+ *TempPathName = TempFile;
return OS.take();
}
@@ -461,23 +488,32 @@ bool CompilerInstance::InitializeSourceManager(llvm::StringRef InputFile,
FileManager &FileMgr,
SourceManager &SourceMgr,
const FrontendOptions &Opts) {
- // Figure out where to get and map in the main file.
- if (InputFile != "-") {
+ // Figure out where to get and map in the main file, unless it's already
+ // been created (e.g., by a precompiled preamble).
+ if (!SourceMgr.getMainFileID().isInvalid()) {
+ // Do nothing: the main file has already been set.
+ } else if (InputFile != "-") {
const FileEntry *File = FileMgr.getFile(InputFile);
- if (File) SourceMgr.createMainFileID(File);
- if (SourceMgr.getMainFileID().isInvalid()) {
+ if (!File) {
Diags.Report(diag::err_fe_error_reading) << InputFile;
return false;
}
+ SourceMgr.createMainFileID(File);
} else {
- llvm::MemoryBuffer *SB = llvm::MemoryBuffer::getSTDIN();
- if (SB) SourceMgr.createMainFileIDForMemBuffer(SB);
- if (SourceMgr.getMainFileID().isInvalid()) {
+ llvm::OwningPtr<llvm::MemoryBuffer> SB;
+ if (llvm::MemoryBuffer::getSTDIN(SB)) {
+ // FIXME: Give ec.message() in this diag.
Diags.Report(diag::err_fe_error_reading_stdin);
return false;
}
+ const FileEntry *File = FileMgr.getVirtualFile(SB->getBufferIdentifier(),
+ SB->getBufferSize(), 0);
+ SourceMgr.createMainFileID(File);
+ SourceMgr.overrideFileContents(File, SB.take());
}
+ assert(!SourceMgr.getMainFileID().isInvalid() &&
+ "Couldn't establish MainFileID!");
return true;
}
@@ -529,9 +565,10 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
}
if (getDiagnosticOpts().ShowCarets) {
- unsigned NumWarnings = getDiagnostics().getNumWarnings();
- unsigned NumErrors = getDiagnostics().getNumErrors() -
- getDiagnostics().getNumErrorsSuppressed();
+ // We can have multiple diagnostics sharing one diagnostic client.
+ // Get the total number of warnings/errors from the client.
+ unsigned NumWarnings = getDiagnostics().getClient()->getNumWarnings();
+ unsigned NumErrors = getDiagnostics().getClient()->getNumErrors();
if (NumWarnings)
OS << NumWarnings << " warning" << (NumWarnings == 1 ? "" : "s");
@@ -548,15 +585,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
OS << "\n";
}
- // Return the appropriate status when verifying diagnostics.
- //
- // FIXME: If we could make getNumErrors() do the right thing, we wouldn't need
- // this.
- if (getDiagnosticOpts().VerifyDiagnostics)
- return !static_cast<VerifyDiagnosticsClient&>(
- getDiagnosticClient()).HadErrors();
-
- return !getDiagnostics().getNumErrors();
+ return !getDiagnostics().getClient()->getNumErrors();
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
index bf8b867..beb5ad2 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -10,6 +10,7 @@
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/Version.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Driver/Arg.h"
#include "clang/Driver/ArgList.h"
#include "clang/Driver/CC1Options.h"
@@ -25,8 +26,8 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/Host.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
using namespace clang;
static const char *getAnalysisName(Analyses Kind) {
@@ -99,6 +100,8 @@ static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts,
Res.push_back("-analyzer-display-progress");
if (Opts.AnalyzeNestedBlocks)
Res.push_back("-analyzer-opt-analyze-nested-blocks");
+ if (Opts.AnalyzerStats)
+ Res.push_back("-analyzer-stats");
if (Opts.EagerlyAssume)
Res.push_back("-analyzer-eagerly-assume");
if (!Opts.PurgeDead)
@@ -113,8 +116,17 @@ static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts,
Res.push_back("-analyzer-experimental-checks");
if (Opts.EnableExperimentalInternalChecks)
Res.push_back("-analyzer-experimental-internal-checks");
- if (Opts.IdempotentOps)
- Res.push_back("-analyzer-check-idempotent-operations");
+ if (Opts.BufferOverflows)
+ Res.push_back("-analyzer-check-buffer-overflows");
+
+ for (unsigned i = 0, e = Opts.CheckersControlList.size(); i != e; ++i) {
+ const std::pair<std::string, bool> &opt = Opts.CheckersControlList[i];
+ if (opt.second)
+ Res.push_back("-analyzer-disable-checker");
+ else
+ Res.push_back("-analyzer-checker");
+ Res.push_back(opt.first);
+ }
}
static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
@@ -150,7 +162,7 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
// TimePasses is only derived.
// UnitAtATime is unused.
// Inlining is only derived.
-
+
// UnrollLoops is derived, but also accepts an option, no
// harm in pushing it back here.
if (Opts.UnrollLoops)
@@ -195,6 +207,10 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-fobjc-dispatch-method=non-legacy");
break;
}
+ if (Opts.NumRegisterParameters) {
+ Res.push_back("-mregparm");
+ Res.push_back(llvm::utostr(Opts.NumRegisterParameters));
+ }
if (Opts.RelaxAll)
Res.push_back("-mrelax-all");
if (Opts.SoftFloat)
@@ -213,6 +229,12 @@ static void DependencyOutputOptsToArgs(const DependencyOutputOptions &Opts,
std::vector<std::string> &Res) {
if (Opts.IncludeSystemHeaders)
Res.push_back("-sys-header-deps");
+ if (Opts.ShowHeaderIncludes)
+ Res.push_back("-H");
+ if (!Opts.HeaderIncludeOutputFile.empty()) {
+ Res.push_back("-header-include-file");
+ Res.push_back(Opts.HeaderIncludeOutputFile);
+ }
if (Opts.UsePhonyTargets)
Res.push_back("-MP");
if (!Opts.OutputFile.empty()) {
@@ -251,8 +273,6 @@ static void DiagnosticOptsToArgs(const DiagnosticOptions &Opts,
Res.push_back("-fcolor-diagnostics");
if (Opts.VerifyDiagnostics)
Res.push_back("-verify");
- if (Opts.BinaryOutput)
- Res.push_back("-fdiagnostics-binary");
if (Opts.ShowOptionNames)
Res.push_back("-fdiagnostics-show-option");
if (Opts.ShowCategories == 1)
@@ -301,6 +321,7 @@ static const char *getInputKindName(InputKind Kind) {
case IK_ObjC: return "objective-c";
case IK_ObjCXX: return "objective-c++";
case IK_OpenCL: return "cl";
+ case IK_CUDA: return "cuda";
case IK_PreprocessedC: return "cpp-output";
case IK_PreprocessedCXX: return "c++-cpp-output";
case IK_PreprocessedObjC: return "objective-c-cpp-output";
@@ -314,10 +335,10 @@ static const char *getInputKindName(InputKind Kind) {
static const char *getActionName(frontend::ActionKind Kind) {
switch (Kind) {
case frontend::PluginAction:
- case frontend::InheritanceView:
llvm_unreachable("Invalid kind!");
case frontend::ASTDump: return "-ast-dump";
+ case frontend::ASTDumpXML: return "-ast-dump-xml";
case frontend::ASTPrint: return "-ast-print";
case frontend::ASTPrintXML: return "-ast-print-xml";
case frontend::ASTView: return "-ast-view";
@@ -351,10 +372,16 @@ static const char *getActionName(frontend::ActionKind Kind) {
return 0;
}
+static void FileSystemOptsToArgs(const FileSystemOptions &Opts,
+ std::vector<std::string> &Res) {
+ if (!Opts.WorkingDir.empty()) {
+ Res.push_back("-working-directory");
+ Res.push_back(Opts.WorkingDir);
+ }
+}
+
static void FrontendOptsToArgs(const FrontendOptions &Opts,
std::vector<std::string> &Res) {
- if (!Opts.DebugCodeCompletionPrinter)
- Res.push_back("-no-code-completion-debug-printer");
if (Opts.DisableFree)
Res.push_back("-disable-free");
if (Opts.RelocatablePCH)
@@ -397,18 +424,13 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts,
Res.push_back("-o");
Res.push_back(Opts.OutputFile);
}
- if (!Opts.ViewClassInheritance.empty()) {
- Res.push_back("-cxx-inheritance-view");
- Res.push_back(Opts.ViewClassInheritance);
- }
if (!Opts.CodeCompletionAt.FileName.empty()) {
Res.push_back("-code-completion-at");
Res.push_back(Opts.CodeCompletionAt.FileName + ":" +
llvm::utostr(Opts.CodeCompletionAt.Line) + ":" +
llvm::utostr(Opts.CodeCompletionAt.Column));
}
- if (Opts.ProgramAction != frontend::InheritanceView &&
- Opts.ProgramAction != frontend::PluginAction)
+ if (Opts.ProgramAction != frontend::PluginAction)
Res.push_back(getActionName(Opts.ProgramAction));
if (!Opts.ActionName.empty()) {
Res.push_back("-plugin");
@@ -422,6 +444,14 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts,
Res.push_back("-load");
Res.push_back(Opts.Plugins[i]);
}
+ for (unsigned i = 0, e = Opts.AddPluginActions.size(); i != e; ++i) {
+ Res.push_back("-add-plugin");
+ Res.push_back(Opts.AddPluginActions[i]);
+ for(unsigned ai = 0, ae = Opts.AddPluginArgs.size(); ai != ae; ++ai) {
+ Res.push_back("-plugin-arg-" + Opts.AddPluginActions[i]);
+ Res.push_back(Opts.AddPluginArgs[i][ai]);
+ }
+ }
for (unsigned i = 0, e = Opts.ASTMergeFiles.size(); i != e; ++i) {
Res.push_back("-ast-merge");
Res.push_back(Opts.ASTMergeFiles[i]);
@@ -443,6 +473,11 @@ static void HeaderSearchOptsToArgs(const HeaderSearchOptions &Opts,
Res.push_back(Opts.Sysroot);
}
+ for (unsigned i = 0, e = Opts.CXXSystemIncludes.size(); i != e; ++i) {
+ Res.push_back("-cxx-system-include");
+ Res.push_back(Opts.CXXSystemIncludes[i]);
+ }
+
/// User specified include entries.
for (unsigned i = 0, e = Opts.UserEntries.size(); i != e; ++i) {
const HeaderSearchOptions::Entry &E = Opts.UserEntries[i];
@@ -526,12 +561,16 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-fgnu-keywords");
if (Opts.Microsoft)
Res.push_back("-fms-extensions");
+ if (Opts.MSCVersion != 0)
+ Res.push_back("-fmsc-version=" + llvm::utostr(Opts.MSCVersion));
if (Opts.Borland)
Res.push_back("-fborland-extensions");
if (Opts.ObjCNonFragileABI)
Res.push_back("-fobjc-nonfragile-abi");
if (Opts.ObjCNonFragileABI2)
- Res.push_back("-fobjc-nonfragile-abi2");
+ Res.push_back("-fobjc-nonfragile-abi");
+ if (Opts.ObjCDefaultSynthProperties)
+ Res.push_back("-fobjc-default-synthesize-properties");
// NoInline is implicit.
if (!Opts.CXXOperatorNames)
Res.push_back("-fno-operator-names");
@@ -551,8 +590,12 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-fexceptions");
if (Opts.SjLjExceptions)
Res.push_back("-fsjlj-exceptions");
+ if (!Opts.ObjCExceptions)
+ Res.push_back("-fno-objc-exceptions");
if (!Opts.RTTI)
Res.push_back("-fno-rtti");
+ if (Opts.MSBitfields)
+ Res.push_back("-mms-bitfields");
if (!Opts.NeXTRuntime)
Res.push_back("-fgnu-runtime");
if (Opts.Freestanding)
@@ -576,7 +619,12 @@ static void LangOptsToArgs(const LangOptions &Opts,
switch (Opts.getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined: break;
case LangOptions::SOB_Defined: Res.push_back("-fwrapv"); break;
- case LangOptions::SOB_Trapping: Res.push_back("-ftrapv"); break;
+ case LangOptions::SOB_Trapping:
+ Res.push_back("-ftrapv"); break;
+ if (!Opts.OverflowHandler.empty()) {
+ Res.push_back("-ftrapv-handler");
+ Res.push_back(Opts.OverflowHandler);
+ }
}
if (Opts.HeinousExtensions)
Res.push_back("-fheinous-gnu-extensions");
@@ -590,8 +638,6 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-fdump-vtable-layouts");
if (Opts.NoBitFieldTypeAlign)
Res.push_back("-fno-bitfield-type-alignment");
- if (Opts.SjLjExceptions)
- Res.push_back("-fsjlj-exceptions");
if (Opts.PICLevel) {
Res.push_back("-pic-level");
Res.push_back(llvm::utostr(Opts.PICLevel));
@@ -616,19 +662,22 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-fobjc-gc-only");
}
}
- if (Opts.getVisibilityMode() != LangOptions::Default) {
+ if (Opts.AppleKext)
+ Res.push_back("-fapple-kext");
+
+ if (Opts.getVisibilityMode() != DefaultVisibility) {
Res.push_back("-fvisibility");
- if (Opts.getVisibilityMode() == LangOptions::Hidden) {
+ if (Opts.getVisibilityMode() == HiddenVisibility) {
Res.push_back("hidden");
} else {
- assert(Opts.getVisibilityMode() == LangOptions::Protected &&
+ assert(Opts.getVisibilityMode() == ProtectedVisibility &&
"Invalid visibility!");
Res.push_back("protected");
}
}
if (Opts.InlineVisibilityHidden)
Res.push_back("-fvisibility-inlines-hidden");
-
+
if (Opts.getStackProtectorMode() != 0) {
Res.push_back("-stack-protector");
Res.push_back(llvm::utostr(Opts.getStackProtectorMode()));
@@ -694,8 +743,6 @@ static void PreprocessorOutputOptsToArgs(const PreprocessorOutputOptions &Opts,
else if (!Opts.ShowCPP && Opts.ShowMacros)
Res.push_back("-dM");
- if (Opts.ShowHeaderIncludes)
- Res.push_back("-H");
if (!Opts.ShowLineMarkers)
Res.push_back("-P");
if (Opts.ShowComments)
@@ -735,6 +782,7 @@ void CompilerInvocation::toArgs(std::vector<std::string> &Res) {
CodeGenOptsToArgs(getCodeGenOpts(), Res);
DependencyOutputOptsToArgs(getDependencyOutputOpts(), Res);
DiagnosticOptsToArgs(getDiagnosticOpts(), Res);
+ FileSystemOptsToArgs(getFileSystemOpts(), Res);
FrontendOptsToArgs(getFrontendOpts(), Res);
HeaderSearchOptsToArgs(getHeaderSearchOpts(), Res);
LangOptsToArgs(getLangOpts(), Res);
@@ -752,6 +800,16 @@ using namespace clang::driver::cc1options;
//
+static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
+ Diagnostic &Diags) {
+ unsigned DefaultOpt = 0;
+ if (IK == IK_OpenCL && !Args.hasArg(OPT_cl_opt_disable))
+ DefaultOpt = 2;
+ // -Os implies -O2
+ return Args.hasArg(OPT_Os) ? 2 :
+ Args.getLastArgIntValue(OPT_O, DefaultOpt, Diags);
+}
+
static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Diagnostic &Diags) {
using namespace cc1options;
@@ -812,33 +870,44 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
Opts.AnalyzeNestedBlocks =
Args.hasArg(OPT_analyzer_opt_analyze_nested_blocks);
+ Opts.AnalyzerStats = Args.hasArg(OPT_analysis_AnalyzerStats);
Opts.PurgeDead = !Args.hasArg(OPT_analyzer_no_purge_dead);
Opts.EagerlyAssume = Args.hasArg(OPT_analyzer_eagerly_assume);
Opts.AnalyzeSpecificFunction = Args.getLastArgValue(OPT_analyze_function);
Opts.UnoptimizedCFG = Args.hasArg(OPT_analysis_UnoptimizedCFG);
+ Opts.CFGAddImplicitDtors = Args.hasArg(OPT_analysis_CFGAddImplicitDtors);
+ Opts.CFGAddInitializers = Args.hasArg(OPT_analysis_CFGAddInitializers);
Opts.EnableExperimentalChecks = Args.hasArg(OPT_analyzer_experimental_checks);
Opts.EnableExperimentalInternalChecks =
Args.hasArg(OPT_analyzer_experimental_internal_checks);
Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
Opts.MaxNodes = Args.getLastArgIntValue(OPT_analyzer_max_nodes, 150000,Diags);
- Opts.MaxLoop = Args.getLastArgIntValue(OPT_analyzer_max_loop, 3, Diags);
+ Opts.MaxLoop = Args.getLastArgIntValue(OPT_analyzer_max_loop, 4, Diags);
+ Opts.EagerlyTrimEGraph = !Args.hasArg(OPT_analyzer_no_eagerly_trim_egraph);
Opts.InlineCall = Args.hasArg(OPT_analyzer_inline_call);
- Opts.IdempotentOps = Args.hasArg(OPT_analysis_WarnIdempotentOps);
+ Opts.BufferOverflows = Args.hasArg(OPT_analysis_WarnBufferOverflows);
+
+ Opts.CheckersControlList.clear();
+ for (arg_iterator it = Args.filtered_begin(OPT_analyzer_checker,
+ OPT_analyzer_disable_checker),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ A->claim();
+ bool enable = (A->getOption().getID() == OPT_analyzer_checker);
+ Opts.CheckersControlList.push_back(std::make_pair(A->getValue(Args),
+ enable));
+ }
}
-static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
+static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Diagnostic &Diags) {
using namespace cc1options;
- // -Os implies -O2
- if (Args.hasArg(OPT_Os))
- Opts.OptimizationLevel = 2;
- else {
- Opts.OptimizationLevel = Args.getLastArgIntValue(OPT_O, 0, Diags);
- if (Opts.OptimizationLevel > 3) {
- Diags.Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_O)->getAsString(Args) << Opts.OptimizationLevel;
- Opts.OptimizationLevel = 3;
- }
+
+ Opts.OptimizationLevel = getOptimizationLevel(Args, IK, Diags);
+ if (Opts.OptimizationLevel > 3) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_O)->getAsString(Args) << Opts.OptimizationLevel;
+ Opts.OptimizationLevel = 3;
}
// We must always run at least the always inlining pass.
@@ -846,8 +915,10 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
: CodeGenOptions::OnlyAlwaysInlining;
Opts.DebugInfo = Args.hasArg(OPT_g);
+ Opts.LimitDebugInfo = Args.hasArg(OPT_flimit_debug_info);
Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
+ Opts.RelaxedAliasing = Args.hasArg(OPT_relaxed_aliasing);
Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
Opts.MergeAllConstants = !Args.hasArg(OPT_fno_merge_all_constants);
Opts.NoCommon = Args.hasArg(OPT_fno_common);
@@ -855,7 +926,7 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.OptimizeSize = Args.hasArg(OPT_Os);
Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) ||
Args.hasArg(OPT_ffreestanding));
- Opts.UnrollLoops = Args.hasArg(OPT_funroll_loops) ||
+ Opts.UnrollLoops = Args.hasArg(OPT_funroll_loops) ||
(Opts.OptimizationLevel > 1 && !Opts.OptimizeSize);
Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
@@ -866,11 +937,17 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.DisableFPElim = Args.hasArg(OPT_mdisable_fp_elim);
Opts.FloatABI = Args.getLastArgValue(OPT_mfloat_abi);
Opts.HiddenWeakVTables = Args.hasArg(OPT_fhidden_weak_vtables);
+ Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable);
Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
+ Opts.NoInfsFPMath = Opts.NoNaNsFPMath = Args.hasArg(OPT_cl_finite_math_only)||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
+ Opts.NumRegisterParameters = Args.getLastArgIntValue(OPT_mregparm, 0, Diags);
Opts.RelaxAll = Args.hasArg(OPT_mrelax_all);
Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer);
Opts.SoftFloat = Args.hasArg(OPT_msoft_float);
+ Opts.UnsafeFPMath = Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
Opts.RelocationModel = Args.getLastArgValue(OPT_mrelocation_model, "pic");
@@ -881,6 +958,7 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
Opts.InstrumentFunctions = Args.hasArg(OPT_finstrument_functions);
+ Opts.InstrumentForProfiling = Args.hasArg(OPT_pg);
if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
llvm::StringRef Name = A->getValue(Args);
@@ -903,6 +981,8 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
Opts.Targets = Args.getAllArgValues(OPT_MT);
Opts.IncludeSystemHeaders = Args.hasArg(OPT_sys_header_deps);
Opts.UsePhonyTargets = Args.hasArg(OPT_MP);
+ Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
+ Opts.HeaderIncludeOutputFile = Args.getLastArgValue(OPT_header_include_file);
}
static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
@@ -942,18 +1022,17 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value)
<< Args.getLastArg(OPT_fdiagnostics_show_category)->getAsString(Args)
<< ShowCategory;
-
+
Opts.ShowSourceRanges = Args.hasArg(OPT_fdiagnostics_print_source_range_info);
Opts.ShowParseableFixits = Args.hasArg(OPT_fdiagnostics_parseable_fixits);
Opts.VerifyDiagnostics = Args.hasArg(OPT_verify);
- Opts.BinaryOutput = Args.hasArg(OPT_fdiagnostics_binary);
Opts.ErrorLimit = Args.getLastArgIntValue(OPT_ferror_limit, 0, Diags);
Opts.MacroBacktraceLimit
- = Args.getLastArgIntValue(OPT_fmacro_backtrace_limit,
+ = Args.getLastArgIntValue(OPT_fmacro_backtrace_limit,
DiagnosticOptions::DefaultMacroBacktraceLimit, Diags);
Opts.TemplateBacktraceLimit
- = Args.getLastArgIntValue(OPT_ftemplate_backtrace_limit,
- DiagnosticOptions::DefaultTemplateBacktraceLimit,
+ = Args.getLastArgIntValue(OPT_ftemplate_backtrace_limit,
+ DiagnosticOptions::DefaultTemplateBacktraceLimit,
Diags);
Opts.TabStop = Args.getLastArgIntValue(OPT_ftabstop,
DiagnosticOptions::DefaultTabStop, Diags);
@@ -967,6 +1046,10 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.Warnings = Args.getAllArgValues(OPT_W);
}
+static void ParseFileSystemArgs(FileSystemOptions &Opts, ArgList &Args) {
+ Opts.WorkingDir = Args.getLastArgValue(OPT_working_directory);
+}
+
static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Diagnostic &Diags) {
using namespace cc1options;
@@ -977,6 +1060,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
assert(0 && "Invalid option in group!");
case OPT_ast_dump:
Opts.ProgramAction = frontend::ASTDump; break;
+ case OPT_ast_dump_xml:
+ Opts.ProgramAction = frontend::ASTDumpXML; break;
case OPT_ast_print:
Opts.ProgramAction = frontend::ASTPrint; break;
case OPT_ast_print_xml:
@@ -1049,6 +1134,16 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
}
}
+ Opts.AddPluginActions = Args.getAllArgValues(OPT_add_plugin);
+ Opts.AddPluginArgs.resize(Opts.AddPluginActions.size());
+ for (int i = 0, e = Opts.AddPluginActions.size(); i != e; ++i) {
+ for (arg_iterator it = Args.filtered_begin(OPT_plugin_arg),
+ end = Args.filtered_end(); it != end; ++it) {
+ if ((*it)->getValue(Args, 0) == Opts.AddPluginActions[i])
+ Opts.AddPluginArgs[i].push_back((*it)->getValue(Args, 1));
+ }
+ }
+
if (const Arg *A = Args.getLastArg(OPT_code_completion_at)) {
Opts.CodeCompletionAt =
ParsedSourceLocation::FromString(A->getValue(Args));
@@ -1056,8 +1151,6 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue(Args);
}
- Opts.DebugCodeCompletionPrinter =
- !Args.hasArg(OPT_no_code_completion_debug_printer);
Opts.DisableFree = Args.hasArg(OPT_disable_free);
Opts.OutputFile = Args.getLastArgValue(OPT_o);
@@ -1073,7 +1166,6 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ShowStats = Args.hasArg(OPT_print_stats);
Opts.ShowTimers = Args.hasArg(OPT_ftime_report);
Opts.ShowVersion = Args.hasArg(OPT_version);
- Opts.ViewClassInheritance = Args.getLastArgValue(OPT_cxx_inheritance_view);
Opts.ASTMergeFiles = Args.getAllArgValues(OPT_ast_merge);
Opts.LLVMArgs = Args.getAllArgValues(OPT_mllvm);
Opts.FixWhatYouCan = Args.hasArg(OPT_fix_what_you_can);
@@ -1086,6 +1178,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Case("cl", IK_OpenCL)
.Case("c", IK_C)
.Case("cl", IK_OpenCL)
+ .Case("cuda", IK_CUDA)
.Case("c++", IK_CXX)
.Case("objective-c", IK_ObjC)
.Case("objective-c++", IK_ObjCXX)
@@ -1145,6 +1238,7 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
using namespace cc1options;
+ Opts.CXXSystemIncludes = Args.getAllArgValues(OPT_cxx_system_include);
Opts.Sysroot = Args.getLastArgValue(OPT_isysroot, "/");
Opts.Verbose = Args.hasArg(OPT_v);
Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
@@ -1188,10 +1282,8 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
// FIXME: Need options for the various environment variables!
}
-static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
- Diagnostic &Diags) {
- // FIXME: Cleanup per-file based stuff.
-
+void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
+ LangStandard::Kind LangStd) {
// Set some properties which depend soley on the input kind; it would be nice
// to move these to the language standard, and have the driver resolve the
// input kind + language standard.
@@ -1204,18 +1296,6 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ObjC1 = Opts.ObjC2 = 1;
}
- LangStandard::Kind LangStd = LangStandard::lang_unspecified;
- if (const Arg *A = Args.getLastArg(OPT_std_EQ)) {
- LangStd = llvm::StringSwitch<LangStandard::Kind>(A->getValue(Args))
-#define LANGSTANDARD(id, name, desc, features) \
- .Case(name, LangStandard::lang_##id)
-#include "clang/Frontend/LangStandards.def"
- .Default(LangStandard::lang_unspecified);
- if (LangStd == LangStandard::lang_unspecified)
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue(Args);
- }
-
if (LangStd == LangStandard::lang_unspecified) {
// Based on the base language, pick one.
switch (IK) {
@@ -1226,6 +1306,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
case IK_OpenCL:
LangStd = LangStandard::lang_opencl;
break;
+ case IK_CUDA:
+ LangStd = LangStandard::lang_cuda;
+ break;
case IK_Asm:
case IK_C:
case IK_PreprocessedC:
@@ -1259,26 +1342,71 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.AltiVec = 1;
Opts.CXXOperatorNames = 1;
Opts.LaxVectorConversions = 1;
+ Opts.DefaultFPContract = 1;
}
+ if (LangStd == LangStandard::lang_cuda)
+ Opts.CUDA = 1;
+
// OpenCL and C++ both have bool, true, false keywords.
Opts.Bool = Opts.OpenCL || Opts.CPlusPlus;
+ Opts.GNUKeywords = Opts.GNUMode;
+ Opts.CXXOperatorNames = Opts.CPlusPlus;
+
+ // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs
+ // is specified, or -std is set to a conforming mode.
+ Opts.Trigraphs = !Opts.GNUMode;
+
+ Opts.DollarIdents = !Opts.AsmPreprocessor;
+}
+
+static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
+ Diagnostic &Diags) {
+ // FIXME: Cleanup per-file based stuff.
+ LangStandard::Kind LangStd = LangStandard::lang_unspecified;
+ if (const Arg *A = Args.getLastArg(OPT_std_EQ)) {
+ LangStd = llvm::StringSwitch<LangStandard::Kind>(A->getValue(Args))
+#define LANGSTANDARD(id, name, desc, features) \
+ .Case(name, LangStandard::lang_##id)
+#include "clang/Frontend/LangStandards.def"
+ .Default(LangStandard::lang_unspecified);
+ if (LangStd == LangStandard::lang_unspecified)
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue(Args);
+ }
+
+ if (const Arg *A = Args.getLastArg(OPT_cl_std_EQ)) {
+ if (strcmp(A->getValue(Args), "CL1.1") != 0) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue(Args);
+ }
+ }
+
+ CompilerInvocation::setLangDefaults(Opts, IK, LangStd);
+
// We abuse '-f[no-]gnu-keywords' to force overriding all GNU-extension
// keywords. This behavior is provided by GCC's poorly named '-fasm' flag,
// while a subset (the non-C++ GNU keywords) is provided by GCC's
// '-fgnu-keywords'. Clang conflates the two for simplicity under the single
// name, as it doesn't seem a useful distinction.
Opts.GNUKeywords = Args.hasFlag(OPT_fgnu_keywords, OPT_fno_gnu_keywords,
- Opts.GNUMode);
+ Opts.GNUKeywords);
- if (Opts.CPlusPlus)
- Opts.CXXOperatorNames = !Args.hasArg(OPT_fno_operator_names);
+ if (Args.hasArg(OPT_fno_operator_names))
+ Opts.CXXOperatorNames = 0;
if (Args.hasArg(OPT_fobjc_gc_only))
Opts.setGCMode(LangOptions::GCOnly);
else if (Args.hasArg(OPT_fobjc_gc))
Opts.setGCMode(LangOptions::HybridGC);
+
+ if (Args.hasArg(OPT_fapple_kext)) {
+ if (!Opts.CPlusPlus)
+ Diags.Report(diag::warn_c_kext);
+ else
+ Opts.AppleKext = 1;
+ }
if (Args.hasArg(OPT_print_ivar_layout))
Opts.ObjCGCBitmapPrint = 1;
@@ -1293,34 +1421,36 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
llvm::StringRef Vis = Args.getLastArgValue(OPT_fvisibility, "default");
if (Vis == "default")
- Opts.setVisibilityMode(LangOptions::Default);
+ Opts.setVisibilityMode(DefaultVisibility);
else if (Vis == "hidden")
- Opts.setVisibilityMode(LangOptions::Hidden);
+ Opts.setVisibilityMode(HiddenVisibility);
else if (Vis == "protected")
- Opts.setVisibilityMode(LangOptions::Protected);
+ Opts.setVisibilityMode(ProtectedVisibility);
else
Diags.Report(diag::err_drv_invalid_value)
<< Args.getLastArg(OPT_fvisibility)->getAsString(Args) << Vis;
if (Args.hasArg(OPT_fvisibility_inlines_hidden))
Opts.InlineVisibilityHidden = 1;
-
- if (Args.hasArg(OPT_ftrapv))
- Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
+
+ if (Args.hasArg(OPT_ftrapv)) {
+ Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
+ // Set the handler, if one is specified.
+ Opts.OverflowHandler =
+ Args.getLastArgValue(OPT_ftrapv_handler);
+ }
else if (Args.hasArg(OPT_fwrapv))
- Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
+ Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
- // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs
- // is specified, or -std is set to a conforming mode.
- Opts.Trigraphs = !Opts.GNUMode;
if (Args.hasArg(OPT_trigraphs))
Opts.Trigraphs = 1;
Opts.DollarIdents = Args.hasFlag(OPT_fdollars_in_identifiers,
OPT_fno_dollars_in_identifiers,
- !Opts.AsmPreprocessor);
+ Opts.DollarIdents);
Opts.PascalStrings = Args.hasArg(OPT_fpascal_strings);
Opts.Microsoft = Args.hasArg(OPT_fms_extensions);
+ Opts.MSCVersion = Args.getLastArgIntValue(OPT_fmsc_version, 0, Diags);
Opts.Borland = Args.hasArg(OPT_fborland_extensions);
Opts.WritableStrings = Args.hasArg(OPT_fwritable_strings);
Opts.ConstStrings = Args.hasArg(OPT_Wwrite_strings);
@@ -1329,10 +1459,12 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fno_threadsafe_statics))
Opts.ThreadsafeStatics = 0;
Opts.Exceptions = Args.hasArg(OPT_fexceptions);
+ Opts.ObjCExceptions = !Args.hasArg(OPT_fno_objc_exceptions);
Opts.RTTI = !Args.hasArg(OPT_fno_rtti);
Opts.Blocks = Args.hasArg(OPT_fblocks);
Opts.CharIsSigned = !Args.hasArg(OPT_fno_signed_char);
Opts.ShortWChar = Args.hasArg(OPT_fshort_wchar);
+ Opts.ShortEnums = Args.hasArg(OPT_fshort_enums);
Opts.Freestanding = Args.hasArg(OPT_ffreestanding);
Opts.FormatExtensions = Args.hasArg(OPT_fformat_extensions);
Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
@@ -1343,27 +1475,33 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.MathErrno = Args.hasArg(OPT_fmath_errno);
Opts.InstantiationDepth = Args.getLastArgIntValue(OPT_ftemplate_depth, 1024,
Diags);
+ Opts.NumLargeByValueCopy = Args.getLastArgIntValue(OPT_Wlarge_by_value_copy,
+ 0, Diags);
+ Opts.MSBitfields = Args.hasArg(OPT_mms_bitfields);
Opts.NeXTRuntime = !Args.hasArg(OPT_fgnu_runtime);
Opts.ObjCConstantStringClass =
Args.getLastArgValue(OPT_fconstant_string_class);
Opts.ObjCNonFragileABI = Args.hasArg(OPT_fobjc_nonfragile_abi);
- Opts.ObjCNonFragileABI2 = Args.hasArg(OPT_fobjc_nonfragile_abi2);
- if (Opts.ObjCNonFragileABI2)
- Opts.ObjCNonFragileABI = true;
+ if (Opts.ObjCNonFragileABI)
+ Opts.ObjCNonFragileABI2 = true;
+ Opts.ObjCDefaultSynthProperties =
+ Args.hasArg(OPT_fobjc_default_synthesize_properties);
Opts.CatchUndefined = Args.hasArg(OPT_fcatch_undefined_behavior);
Opts.EmitAllDecls = Args.hasArg(OPT_femit_all_decls);
Opts.PICLevel = Args.getLastArgIntValue(OPT_pic_level, 0, Diags);
Opts.SjLjExceptions = Args.hasArg(OPT_fsjlj_exceptions);
+ Opts.ObjCExceptions = !Args.hasArg(OPT_fno_objc_exceptions);
Opts.Static = Args.hasArg(OPT_static_define);
Opts.DumpRecordLayouts = Args.hasArg(OPT_fdump_record_layouts);
Opts.DumpVTableLayouts = Args.hasArg(OPT_fdump_vtable_layouts);
Opts.SpellChecking = !Args.hasArg(OPT_fno_spell_checking);
Opts.NoBitFieldTypeAlign = Args.hasArg(OPT_fno_bitfield_type_align);
+ Opts.SinglePrecisionConstants = Args.hasArg(OPT_cl_single_precision_constant);
+ Opts.FastRelaxedMath = Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.OptimizeSize = 0;
// FIXME: Eliminate this dependency.
- unsigned Opt =
- Args.hasArg(OPT_Os) ? 2 : Args.getLastArgIntValue(OPT_O, 0, Diags);
+ unsigned Opt = getOptimizationLevel(Args, IK, Diags);
Opts.Optimize = Opt != 0;
// This is the __NO_INLINE__ define, which just depends on things like the
@@ -1386,6 +1524,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
+ FileManager &FileMgr,
Diagnostic &Diags) {
using namespace cc1options;
Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
@@ -1398,12 +1537,19 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.DetailedRecord = Args.hasArg(OPT_detailed_preprocessing_record);
Opts.DisablePCHValidation = Args.hasArg(OPT_fno_validate_pch);
+ Opts.DumpDeserializedPCHDecls = Args.hasArg(OPT_dump_deserialized_pch_decls);
+ for (arg_iterator it = Args.filtered_begin(OPT_error_on_deserialized_pch_decl),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ Opts.DeserializedPCHDeclsToErrorOn.insert(A->getValue(Args));
+ }
+
if (const Arg *A = Args.getLastArg(OPT_preamble_bytes_EQ)) {
llvm::StringRef Value(A->getValue(Args));
size_t Comma = Value.find(',');
unsigned Bytes = 0;
unsigned EndOfLine = 0;
-
+
if (Comma == llvm::StringRef::npos ||
Value.substr(0, Comma).getAsInteger(10, Bytes) ||
Value.substr(Comma + 1).getAsInteger(10, EndOfLine))
@@ -1413,7 +1559,7 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.PrecompiledPreambleBytes.second = (EndOfLine != 0);
}
}
-
+
// Add macros from the command line.
for (arg_iterator it = Args.filtered_begin(OPT_D, OPT_U),
ie = Args.filtered_end(); it != ie; ++it) {
@@ -1433,7 +1579,7 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
// PCH is handled specially, we need to extra the original include path.
if (A->getOption().matches(OPT_include_pch)) {
std::string OriginalFile =
- ASTReader::getOriginalSourceFile(A->getValue(Args), Diags);
+ ASTReader::getOriginalSourceFile(A->getValue(Args), FileMgr, Diags);
if (OriginalFile.empty())
continue;
@@ -1466,7 +1612,6 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
using namespace cc1options;
Opts.ShowCPP = !Args.hasArg(OPT_dM);
Opts.ShowComments = Args.hasArg(OPT_C);
- Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
Opts.ShowLineMarkers = !Args.hasArg(OPT_P);
Opts.ShowMacroComments = Args.hasArg(OPT_CC);
Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD);
@@ -1489,8 +1634,8 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) {
//
void CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
- const char **ArgBegin,
- const char **ArgEnd,
+ const char *const *ArgBegin,
+ const char *const *ArgEnd,
Diagnostic &Diags) {
// Parse the arguments.
llvm::OwningPtr<OptTable> Opts(createCC1OptTable());
@@ -1509,14 +1654,21 @@ void CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Diags.Report(diag::err_drv_unknown_argument) << (*it)->getAsString(*Args);
ParseAnalyzerArgs(Res.getAnalyzerOpts(), *Args, Diags);
- ParseCodeGenArgs(Res.getCodeGenOpts(), *Args, Diags);
ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), *Args);
ParseDiagnosticArgs(Res.getDiagnosticOpts(), *Args, Diags);
+ ParseFileSystemArgs(Res.getFileSystemOpts(), *Args);
+ // FIXME: We shouldn't have to pass the DashX option around here
InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), *Args, Diags);
+ ParseCodeGenArgs(Res.getCodeGenOpts(), *Args, DashX, Diags);
ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), *Args);
if (DashX != IK_AST && DashX != IK_LLVM_IR)
ParseLangArgs(Res.getLangOpts(), *Args, DashX, Diags);
- ParsePreprocessorArgs(Res.getPreprocessorOpts(), *Args, Diags);
+ // FIXME: ParsePreprocessorArgs uses the FileManager to read the contents of
+ // PCH file and find the original header name. Remove the need to do that in
+ // ParsePreprocessorArgs and remove the FileManager
+ // parameters from the function and the "FileManager.h" #include.
+ FileManager FileMgr(Res.getFileSystemOpts());
+ ParsePreprocessorArgs(Res.getPreprocessorOpts(), *Args, FileMgr, Diags);
ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), *Args);
ParseTargetArgs(Res.getTargetOpts(), *Args);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DeclXML.cpp b/contrib/llvm/tools/clang/lib/Frontend/DeclXML.cpp
index 97a7f55..8d3d225 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/DeclXML.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/DeclXML.cpp
@@ -38,10 +38,12 @@ class DocumentXML::DeclPrinter : public DeclVisitor<DocumentXML::DeclPrinter> {
}
void addSubNodes(RecordDecl* RD) {
- for (RecordDecl::field_iterator i = RD->field_begin(),
- e = RD->field_end(); i != e; ++i) {
- Visit(*i);
- Doc.toParent();
+ for (RecordDecl::decl_iterator i = RD->decls_begin(),
+ e = RD->decls_end(); i != e; ++i) {
+ if (!(*i)->isImplicit()) {
+ Visit(*i);
+ Doc.toParent();
+ }
}
}
@@ -71,15 +73,7 @@ class DocumentXML::DeclPrinter : public DeclVisitor<DocumentXML::DeclPrinter> {
Doc.addAttribute("is_virtual", base->isVirtual());
Doc.toParent();
}
-
- for (CXXRecordDecl::method_iterator i = RD->method_begin(),
- e = RD->method_end(); i != e; ++i) {
- Visit(*i);
- Doc.toParent();
- }
-
}
-
}
void addSubNodes(EnumDecl* ED) {
@@ -110,7 +104,7 @@ class DocumentXML::DeclPrinter : public DeclVisitor<DocumentXML::DeclPrinter> {
Doc.PrintStmt(argDecl->getDefaultArg());
}
- void addSubNodes(NamespaceDecl* ns) {
+ void addSubNodes(DeclContext* ns) {
for (DeclContext::decl_iterator
d = ns->decls_begin(),
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
index cdff807..bc5a55d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
@@ -13,7 +13,6 @@
#include "clang/Frontend/Utils.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@@ -22,7 +21,6 @@
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/raw_ostream.h"
-#include <string>
using namespace clang;
@@ -117,6 +115,16 @@ void DependencyFileCallback::FileChanged(SourceLocation Loc,
Files.push_back(Filename);
}
+/// PrintFilename - GCC escapes spaces, but apparently not ' or " or other
+/// scary characters.
+static void PrintFilename(llvm::raw_ostream &OS, llvm::StringRef Filename) {
+ for (unsigned i = 0, e = Filename.size(); i != e; ++i) {
+ if (Filename[i] == ' ')
+ OS << '\\';
+ OS << Filename[i];
+ }
+}
+
void DependencyFileCallback::OutputDependencyFile() {
// Write out the dependency targets, trying to avoid overly long
// lines when possible. We try our best to emit exactly the same
@@ -130,14 +138,15 @@ void DependencyFileCallback::OutputDependencyFile() {
unsigned N = I->length();
if (Columns == 0) {
Columns += N;
- *OS << *I;
} else if (Columns + N + 2 > MaxColumns) {
Columns = N + 2;
- *OS << " \\\n " << *I;
+ *OS << " \\\n ";
} else {
Columns += N + 1;
- *OS << ' ' << *I;
+ *OS << ' ';
}
+ // Targets already quoted as needed.
+ *OS << *I;
}
*OS << ':';
@@ -155,7 +164,8 @@ void DependencyFileCallback::OutputDependencyFile() {
*OS << " \\\n ";
Columns = 2;
}
- *OS << ' ' << *I;
+ *OS << ' ';
+ PrintFilename(*OS, *I);
Columns += N + 1;
}
*OS << '\n';
@@ -166,7 +176,8 @@ void DependencyFileCallback::OutputDependencyFile() {
for (std::vector<std::string>::iterator I = Files.begin() + 1,
E = Files.end(); I != E; ++I) {
*OS << '\n';
- *OS << *I << ":\n";
+ PrintFilename(*OS, *I);
+ *OS << ":\n";
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DocumentXML.cpp b/contrib/llvm/tools/clang/lib/Frontend/DocumentXML.cpp
index 894f230..b24ece5 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/DocumentXML.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/DocumentXML.cpp
@@ -17,6 +17,8 @@
#include "clang/AST/ASTContext.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Config/config.h"
+#include <cstdio>
namespace clang {
@@ -104,7 +106,11 @@ std::string DocumentXML::escapeString(const char* pStr,
if (isprint(C))
value += C;
else {
+#ifdef LLVM_ON_WIN32
sprintf(buffer, "\\%03o", C);
+#else
+ snprintf(buffer, sizeof(buffer), "\\%03o", C);
+#endif
value += buffer;
}
break;
@@ -321,9 +327,11 @@ PresumedLoc DocumentXML::addLocation(const SourceLocation& Loc)
if (!SpellingLoc.isInvalid())
{
PLoc = SM.getPresumedLoc(SpellingLoc);
- addSourceFileAttribute(PLoc.getFilename());
- addAttribute("line", PLoc.getLine());
- addAttribute("col", PLoc.getColumn());
+ if (PLoc.isValid()) {
+ addSourceFileAttribute(PLoc.getFilename());
+ addAttribute("line", PLoc.getLine());
+ addAttribute("col", PLoc.getColumn());
+ }
}
// else there is no error in some cases (eg. CXXThisExpr)
return PLoc;
@@ -340,8 +348,9 @@ void DocumentXML::addLocationRange(const SourceRange& R)
if (!SpellingLoc.isInvalid())
{
PresumedLoc PLoc = SM.getPresumedLoc(SpellingLoc);
- if (PStartLoc.isInvalid() ||
- strcmp(PLoc.getFilename(), PStartLoc.getFilename()) != 0) {
+ if (PLoc.isInvalid()) {
+ } else if (PStartLoc.isInvalid() ||
+ strcmp(PLoc.getFilename(), PStartLoc.getFilename()) != 0) {
addToMap(SourceFiles, PLoc.getFilename(), ID_FILE);
addAttribute("endfile", PLoc.getFilename());
addAttribute("endline", PLoc.getLine());
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
index b244c5c..e3d8b85 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
@@ -10,18 +10,73 @@
#include "clang/Frontend/FrontendAction.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclGroup.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendPluginRegistry.h"
+#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Parse/ParseAST.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+namespace {
+
+/// \brief Dumps deserialized declarations.
+class DeserializedDeclsDumper : public ASTDeserializationListener {
+ ASTDeserializationListener *Previous;
+
+public:
+ DeserializedDeclsDumper(ASTDeserializationListener *Previous)
+ : Previous(Previous) { }
+
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+ llvm::outs() << "PCH DECL: " << D->getDeclKindName();
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ llvm::outs() << " - " << ND->getNameAsString();
+ llvm::outs() << "\n";
+
+ if (Previous)
+ Previous->DeclRead(ID, D);
+ }
+};
+
+ /// \brief Checks deserialized declarations and emits error if a name
+ /// matches one given in command-line using -error-on-deserialized-decl.
+ class DeserializedDeclsChecker : public ASTDeserializationListener {
+ ASTContext &Ctx;
+ std::set<std::string> NamesToCheck;
+ ASTDeserializationListener *Previous;
+
+ public:
+ DeserializedDeclsChecker(ASTContext &Ctx,
+ const std::set<std::string> &NamesToCheck,
+ ASTDeserializationListener *Previous)
+ : Ctx(Ctx), NamesToCheck(NamesToCheck), Previous(Previous) { }
+
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (NamesToCheck.find(ND->getNameAsString()) != NamesToCheck.end()) {
+ unsigned DiagID
+ = Ctx.getDiagnostics().getCustomDiagID(Diagnostic::Error,
+ "%0 was deserialized");
+ Ctx.getDiagnostics().Report(Ctx.getFullLoc(D->getLocation()), DiagID)
+ << ND->getNameAsString();
+ }
+
+ if (Previous)
+ Previous->DeclRead(ID, D);
+ }
+};
+
+} // end anonymous namespace
+
FrontendAction::FrontendAction() : Instance(0) {}
FrontendAction::~FrontendAction() {}
@@ -33,6 +88,39 @@ void FrontendAction::setCurrentFile(llvm::StringRef Value, InputKind Kind,
CurrentASTUnit.reset(AST);
}
+ASTConsumer* FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ ASTConsumer* Consumer = CreateASTConsumer(CI, InFile);
+ if (!Consumer)
+ return 0;
+
+ if (CI.getFrontendOpts().AddPluginActions.size() == 0)
+ return Consumer;
+
+ // Make sure the non-plugin consumer is first, so that plugins can't
+ // modifiy the AST.
+ std::vector<ASTConsumer*> Consumers(1, Consumer);
+
+ for (size_t i = 0, e = CI.getFrontendOpts().AddPluginActions.size();
+ i != e; ++i) {
+ // This is O(|plugins| * |add_plugins|), but since both numbers are
+ // way below 50 in practice, that's ok.
+ for (FrontendPluginRegistry::iterator
+ it = FrontendPluginRegistry::begin(),
+ ie = FrontendPluginRegistry::end();
+ it != ie; ++it) {
+ if (it->getName() == CI.getFrontendOpts().AddPluginActions[i]) {
+ llvm::OwningPtr<PluginASTAction> P(it->instantiate());
+ FrontendAction* c = P.get();
+ if (P->ParseArgs(CI, CI.getFrontendOpts().AddPluginArgs[i]))
+ Consumers.push_back(c->CreateASTConsumer(CI, InFile));
+ }
+ }
+ }
+
+ return new MultiplexConsumer(Consumers);
+}
+
bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
llvm::StringRef Filename,
InputKind InputKind) {
@@ -51,7 +139,8 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
llvm::IntrusiveRefCntPtr<Diagnostic> Diags(&CI.getDiagnostics());
std::string Error;
- ASTUnit *AST = ASTUnit::LoadFromASTFile(Filename, Diags);
+ ASTUnit *AST = ASTUnit::LoadFromASTFile(Filename, Diags,
+ CI.getFileSystemOpts());
if (!AST)
goto failure;
@@ -69,7 +158,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
goto failure;
/// Create the AST consumer.
- CI.setASTConsumer(CreateASTConsumer(CI, Filename));
+ CI.setASTConsumer(CreateWrappedASTConsumer(CI, Filename));
if (!CI.hasASTConsumer())
goto failure;
@@ -80,7 +169,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!CI.hasFileManager())
CI.createFileManager();
if (!CI.hasSourceManager())
- CI.createSourceManager();
+ CI.createSourceManager(CI.getFileManager());
// IR files bypass the rest of initialization.
if (InputKind == IK_LLVM_IR) {
@@ -113,16 +202,30 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!usesPreprocessorOnly()) {
CI.createASTContext();
- llvm::OwningPtr<ASTConsumer> Consumer(CreateASTConsumer(CI, Filename));
+ llvm::OwningPtr<ASTConsumer> Consumer(
+ CreateWrappedASTConsumer(CI, Filename));
+ if (!Consumer)
+ goto failure;
+
+ CI.getASTContext().setASTMutationListener(Consumer->GetASTMutationListener());
/// Use PCH?
if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
assert(hasPCHSupport() && "This action does not have PCH support!");
+ ASTDeserializationListener *DeserialListener
+ = CI.getInvocation().getFrontendOpts().ChainedPCH ?
+ Consumer->GetASTDeserializationListener() : 0;
+ if (CI.getPreprocessorOpts().DumpDeserializedPCHDecls)
+ DeserialListener = new DeserializedDeclsDumper(DeserialListener);
+ if (!CI.getPreprocessorOpts().DeserializedPCHDeclsToErrorOn.empty())
+ DeserialListener = new DeserializedDeclsChecker(CI.getASTContext(),
+ CI.getPreprocessorOpts().DeserializedPCHDeclsToErrorOn,
+ DeserialListener);
CI.createPCHExternalASTSource(
CI.getPreprocessorOpts().ImplicitPCHInclude,
CI.getPreprocessorOpts().DisablePCHValidation,
- CI.getInvocation().getFrontendOpts().ChainedPCH?
- Consumer->GetASTDeserializationListener() : 0);
+ CI.getPreprocessorOpts().DisableStatCache,
+ DeserialListener);
if (!CI.getASTContext().getExternalSource())
goto failure;
}
@@ -137,7 +240,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!CI.hasASTContext() || !CI.getASTContext().getExternalSource()) {
Preprocessor &PP = CI.getPreprocessor();
PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(),
- PP.getLangOptions().NoBuiltin);
+ PP.getLangOptions());
}
return true;
@@ -187,6 +290,9 @@ void FrontendAction::Execute() {
void FrontendAction::EndSourceFile() {
CompilerInstance &CI = getCompilerInstance();
+ // Inform the diagnostic client we are done with this source file.
+ CI.getDiagnosticClient().EndSourceFile();
+
// Finalize the action.
EndSourceFileAction();
@@ -223,10 +329,7 @@ void FrontendAction::EndSourceFile() {
// Cleanup the output streams, and erase the output files if we encountered
// an error.
- CI.clearOutputFiles(/*EraseFiles=*/CI.getDiagnostics().getNumErrors());
-
- // Inform the diagnostic client we are done with this source file.
- CI.getDiagnosticClient().EndSourceFile();
+ CI.clearOutputFiles(/*EraseFiles=*/CI.getDiagnostics().hasErrorOccurred());
if (isCurrentFileAST()) {
CI.takeSema();
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
index 5bc6506..d8e7d29 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
@@ -59,6 +59,17 @@ ASTConsumer *ASTDumpAction::CreateASTConsumer(CompilerInstance &CI,
return CreateASTDumper();
}
+ASTConsumer *ASTDumpXMLAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ llvm::raw_ostream *OS;
+ if (CI.getFrontendOpts().OutputFile.empty())
+ OS = &llvm::outs();
+ else
+ OS = CI.createDefaultOutputFile(false, InFile);
+ if (!OS) return 0;
+ return CreateASTDumperXML(*OS);
+}
+
ASTConsumer *ASTViewAction::CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile) {
return CreateASTViewer();
@@ -72,19 +83,21 @@ ASTConsumer *DeclContextPrintAction::CreateASTConsumer(CompilerInstance &CI,
ASTConsumer *GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile) {
std::string Sysroot;
+ std::string OutputFile;
llvm::raw_ostream *OS = 0;
bool Chaining;
- if (ComputeASTConsumerArguments(CI, InFile, Sysroot, OS, Chaining))
+ if (ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile, OS, Chaining))
return 0;
const char *isysroot = CI.getFrontendOpts().RelocatablePCH ?
Sysroot.c_str() : 0;
- return new PCHGenerator(CI.getPreprocessor(), Chaining, isysroot, OS);
+ return new PCHGenerator(CI.getPreprocessor(), OutputFile, Chaining, isysroot, OS);
}
bool GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI,
llvm::StringRef InFile,
std::string &Sysroot,
+ std::string &OutputFile,
llvm::raw_ostream *&OS,
bool &Chaining) {
Sysroot = CI.getHeaderSearchOpts().Sysroot;
@@ -93,20 +106,19 @@ bool GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI,
return true;
}
- OS = CI.createDefaultOutputFile(true, InFile);
+ // We use createOutputFile here because this is exposed via libclang, and we
+ // must disable the RemoveFileOnSignal behavior.
+ OS = CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
+ /*RemoveFileOnSignal=*/false, InFile);
if (!OS)
return true;
+ OutputFile = CI.getFrontendOpts().OutputFile;
Chaining = CI.getInvocation().getFrontendOpts().ChainedPCH &&
!CI.getPreprocessorOpts().ImplicitPCHInclude.empty();
return false;
}
-ASTConsumer *InheritanceViewAction::CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile) {
- return CreateInheritanceViewer(CI.getFrontendOpts().ViewClassInheritance);
-}
-
ASTConsumer *SyntaxOnlyAction::CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile) {
return new ASTConsumer();
@@ -193,6 +205,7 @@ void PrintPreambleAction::ExecuteAction() {
case IK_ObjC:
case IK_ObjCXX:
case IK_OpenCL:
+ case IK_CUDA:
break;
case IK_None:
@@ -207,7 +220,9 @@ void PrintPreambleAction::ExecuteAction() {
return;
}
- llvm::MemoryBuffer *Buffer = llvm::MemoryBuffer::getFile(getCurrentFile());
+ CompilerInstance &CI = getCompilerInstance();
+ llvm::MemoryBuffer *Buffer
+ = CI.getFileManager().getBufferForFile(getCurrentFile());
if (Buffer) {
unsigned Preamble = Lexer::ComputePreamble(Buffer).first;
llvm::outs().write(Buffer->getBufferStart(), Preamble);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
index 9dfee24..0a20051 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
@@ -26,6 +26,7 @@ InputKind FrontendOptions::getInputKindForExtension(llvm::StringRef Extension) {
.Cases("C", "cc", "cp", IK_CXX)
.Cases("cpp", "CPP", "c++", "cxx", "hpp", IK_CXX)
.Case("cl", IK_OpenCL)
+ .Case("cu", IK_CUDA)
.Cases("ll", "bc", IK_LLVM_IR)
.Default(IK_C);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp
new file mode 100644
index 0000000..45ff1d2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp
@@ -0,0 +1,113 @@
+//===--- HeaderIncludes.cpp - Generate Header Includes --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+class HeaderIncludesCallback : public PPCallbacks {
+ SourceManager &SM;
+ llvm::raw_ostream *OutputFile;
+ unsigned CurrentIncludeDepth;
+ bool HasProcessedPredefines;
+ bool OwnsOutputFile;
+ bool ShowAllHeaders;
+
+public:
+ HeaderIncludesCallback(const Preprocessor *PP, bool ShowAllHeaders_,
+ llvm::raw_ostream *OutputFile_, bool OwnsOutputFile_)
+ : SM(PP->getSourceManager()), OutputFile(OutputFile_),
+ CurrentIncludeDepth(0), HasProcessedPredefines(false),
+ OwnsOutputFile(OwnsOutputFile_), ShowAllHeaders(ShowAllHeaders_) {}
+
+ ~HeaderIncludesCallback() {
+ if (OwnsOutputFile)
+ delete OutputFile;
+ }
+
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType);
+};
+}
+
+void clang::AttachHeaderIncludeGen(Preprocessor &PP, bool ShowAllHeaders,
+ llvm::StringRef OutputPath) {
+ llvm::raw_ostream *OutputFile = &llvm::errs();
+ bool OwnsOutputFile = false;
+
+ // Open the output file, if used.
+ if (!OutputPath.empty()) {
+ std::string Error;
+ llvm::raw_fd_ostream *OS = new llvm::raw_fd_ostream(
+ OutputPath.str().c_str(), Error, llvm::raw_fd_ostream::F_Append);
+ if (!Error.empty()) {
+ PP.getDiagnostics().Report(
+ clang::diag::warn_fe_cc_print_header_failure) << Error;
+ delete OS;
+ } else {
+ OS->SetUnbuffered();
+ OS->SetUseAtomicWrites(true);
+ OutputFile = OS;
+ OwnsOutputFile = true;
+ }
+ }
+
+ PP.addPPCallbacks(new HeaderIncludesCallback(&PP, ShowAllHeaders,
+ OutputFile, OwnsOutputFile));
+}
+
+void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind NewFileType) {
+ // Unless we are exiting a #include, make sure to skip ahead to the line the
+ // #include directive was at.
+ PresumedLoc UserLoc = SM.getPresumedLoc(Loc);
+ if (UserLoc.isInvalid())
+ return;
+
+ // Adjust the current include depth.
+ if (Reason == PPCallbacks::EnterFile) {
+ ++CurrentIncludeDepth;
+ } else {
+ if (CurrentIncludeDepth)
+ --CurrentIncludeDepth;
+
+ // We track when we are done with the predefines by watching for the first
+ // place where we drop back to a nesting depth of 0.
+ if (CurrentIncludeDepth == 0 && !HasProcessedPredefines)
+ HasProcessedPredefines = true;
+ }
+
+ // Show the header if we are (a) past the predefines, or (b) showing all
+ // headers and in the predefines at a depth past the initial file and command
+ // line buffers.
+ bool ShowHeader = (HasProcessedPredefines ||
+ (ShowAllHeaders && CurrentIncludeDepth > 2));
+
+ // Dump the header include information we are past the predefines buffer or
+ // are showing all headers.
+ if (ShowHeader && Reason == PPCallbacks::EnterFile) {
+ // Write to a temporary string to avoid unnecessary flushing on errs().
+ llvm::SmallString<512> Filename(UserLoc.getFilename());
+ Lexer::Stringify(Filename);
+
+ llvm::SmallString<256> Msg;
+ for (unsigned i = 0; i != CurrentIncludeDepth; ++i)
+ Msg += '.';
+ Msg += ' ';
+ Msg += Filename;
+ Msg += '\n';
+
+ OutputFile->write(Msg.data(), Msg.size());
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
index cd2fef8..69199e2 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -11,6 +11,10 @@
//
//===----------------------------------------------------------------------===//
+#ifdef HAVE_CLANG_CONFIG_H
+# include "clang/Config/config.h"
+#endif
+
#include "clang/Frontend/Utils.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
@@ -24,7 +28,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include "llvm/Config/config.h"
#ifdef _MSC_VER
#define WIN32_LEAN_AND_MEAN 1
@@ -45,13 +49,15 @@ class InitHeaderSearch {
std::vector<DirectoryLookup> IncludeGroup[4];
HeaderSearch& Headers;
bool Verbose;
- std::string isysroot;
+ std::string IncludeSysroot;
+ bool IsNotEmptyOrRoot;
public:
- InitHeaderSearch(HeaderSearch &HS,
- bool verbose = false, const std::string &iSysroot = "")
- : Headers(HS), Verbose(verbose), isysroot(iSysroot) {}
+ InitHeaderSearch(HeaderSearch &HS, bool verbose, llvm::StringRef sysroot)
+ : Headers(HS), Verbose(verbose), IncludeSysroot(sysroot),
+ IsNotEmptyOrRoot(!(sysroot.empty() || sysroot == "/")) {
+ }
/// AddPath - Add the specified path to the specified group list.
void AddPath(const llvm::Twine &Path, IncludeDirGroup Group,
@@ -66,7 +72,7 @@ public:
llvm::StringRef Dir64,
const llvm::Triple &triple);
- /// AddMinGWCPlusPlusIncludePaths - Add the necessary paths to suport a MinGW
+ /// AddMinGWCPlusPlusIncludePaths - Add the necessary paths to support a MinGW
/// libstdc++.
void AddMinGWCPlusPlusIncludePaths(llvm::StringRef Base,
llvm::StringRef Arch,
@@ -105,19 +111,18 @@ void InitHeaderSearch::AddPath(const llvm::Twine &Path,
FileManager &FM = Headers.getFileMgr();
// Compute the actual path, taking into consideration -isysroot.
- llvm::SmallString<256> MappedPathStr;
- llvm::raw_svector_ostream MappedPath(MappedPathStr);
+ llvm::SmallString<256> MappedPathStorage;
+ llvm::StringRef MappedPathStr = Path.toStringRef(MappedPathStorage);
// Handle isysroot.
- if (Group == System && !IgnoreSysRoot) {
- // FIXME: Portability. This should be a sys::Path interface, this doesn't
- // handle things like C:\ right, nor win32 \\network\device\blah.
- if (isysroot.size() != 1 || isysroot[0] != '/') // Add isysroot if present.
- MappedPath << isysroot;
+ if (Group == System && !IgnoreSysRoot &&
+ llvm::sys::path::is_absolute(MappedPathStr) &&
+ IsNotEmptyOrRoot) {
+ MappedPathStorage.clear();
+ MappedPathStr =
+ (IncludeSysroot + Path).toStringRef(MappedPathStorage);
}
- Path.print(MappedPath);
-
// Compute the DirectoryLookup type.
SrcMgr::CharacteristicKind Type;
if (Group == Quoted || Group == Angled)
@@ -129,7 +134,7 @@ void InitHeaderSearch::AddPath(const llvm::Twine &Path,
// If the directory exists, add it.
- if (const DirectoryEntry *DE = FM.getDirectory(MappedPath.str())) {
+ if (const DirectoryEntry *DE = FM.getDirectory(MappedPathStr)) {
IncludeGroup[Group].push_back(DirectoryLookup(DE, Type, isUserSupplied,
isFramework));
return;
@@ -138,7 +143,7 @@ void InitHeaderSearch::AddPath(const llvm::Twine &Path,
// Check to see if this is an apple-style headermap (which are not allowed to
// be frameworks).
if (!isFramework) {
- if (const FileEntry *FE = FM.getFile(MappedPath.str())) {
+ if (const FileEntry *FE = FM.getFile(MappedPathStr)) {
if (const HeaderMap *HM = Headers.CreateHeaderMap(FE)) {
// It is a headermap, add it to the search path.
IncludeGroup[Group].push_back(DirectoryLookup(HM, Type,isUserSupplied));
@@ -149,7 +154,7 @@ void InitHeaderSearch::AddPath(const llvm::Twine &Path,
if (Verbose)
llvm::errs() << "ignoring nonexistent directory \""
- << MappedPath.str() << "\"\n";
+ << MappedPathStr << "\"\n";
}
@@ -195,8 +200,6 @@ void InitHeaderSearch::AddGnuCPlusPlusIncludePaths(llvm::StringRef Base,
void InitHeaderSearch::AddMinGWCPlusPlusIncludePaths(llvm::StringRef Base,
llvm::StringRef Arch,
llvm::StringRef Version) {
- AddPath(Base + "/" + Arch + "/" + Version + "/include",
- System, true, false, false);
AddPath(Base + "/" + Arch + "/" + Version + "/include/c++",
System, true, false, false);
AddPath(Base + "/" + Arch + "/" + Version + "/include/c++/" + Arch,
@@ -348,7 +351,7 @@ static bool getVisualStudioDir(std::string &path) {
bool hasVCExpressDir = getSystemRegistryString(
"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VCExpress\\$VERSION",
"InstallDir", vsExpressIDEInstallDir, sizeof(vsExpressIDEInstallDir) - 1);
- // If we have both vc80 and vc90, pick version we were compiled with.
+ // If we have both vc80 and vc90, pick version we were compiled with.
if (hasVCDir && vsIDEInstallDir[0]) {
char *p = (char*)strstr(vsIDEInstallDir, "\\Common7\\IDE");
if (p)
@@ -370,7 +373,7 @@ static bool getVisualStudioDir(std::string &path) {
const char* vs80comntools = getenv("VS80COMNTOOLS");
const char* vscomntools = NULL;
- // Try to find the version that we were compiled with
+ // Try to find the version that we were compiled with
if(false) {}
#if (_MSC_VER >= 1600) // VC100
else if(vs100comntools) {
@@ -413,7 +416,7 @@ static bool getWindowsSDKDir(std::string &path) {
bool hasSDKDir = getSystemRegistryString(
"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
"InstallationFolder", windowsSDKInstallDir, sizeof(windowsSDKInstallDir) - 1);
- // If we have both vc80 and vc90, pick version we were compiled with.
+ // If we have both vc80 and vc90, pick version we were compiled with.
if (hasSDKDir && windowsSDKInstallDir[0]) {
path = windowsSDKInstallDir;
return(true);
@@ -423,10 +426,17 @@ static bool getWindowsSDKDir(std::string &path) {
void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts) {
- // FIXME: temporary hack: hard-coded paths.
-#ifndef __FreeBSD__
- AddPath(CLANG_PREFIX "/usr/local/include", System, true, false, false);
-#endif
+ llvm::Triple::OSType os = triple.getOS();
+
+ switch (os) {
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::NetBSD:
+ break;
+ default:
+ // FIXME: temporary hack: hard-coded paths.
+ AddPath("/usr/local/include", System, true, false, false);
+ break;
+ }
// Builtin includes use #include_next directives and should be positioned
// just prior C include dirs.
@@ -445,47 +455,39 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
CIncludeDirs.split(dirs, ":");
for (llvm::SmallVectorImpl<llvm::StringRef>::iterator i = dirs.begin();
i != dirs.end();
- ++i)
+ ++i)
AddPath(*i, System, false, false, false);
return;
}
- llvm::Triple::OSType os = triple.getOS();
+
switch (os) {
- case llvm::Triple::Win32:
- {
- std::string VSDir;
- std::string WindowsSDKDir;
- if (getVisualStudioDir(VSDir)) {
- AddPath(VSDir + "\\VC\\include", System, false, false, false);
- if (getWindowsSDKDir(WindowsSDKDir))
- AddPath(WindowsSDKDir + "\\include", System, false, false, false);
- else
- AddPath(VSDir + "\\VC\\PlatformSDK\\Include",
- System, false, false, false);
- }
- else {
- // Default install paths.
- AddPath("C:/Program Files/Microsoft Visual Studio 10.0/VC/include",
- System, false, false, false);
- AddPath("C:/Program Files/Microsoft Visual Studio 9.0/VC/include",
- System, false, false, false);
- AddPath(
+ case llvm::Triple::Win32: {
+ std::string VSDir;
+ std::string WindowsSDKDir;
+ if (getVisualStudioDir(VSDir)) {
+ AddPath(VSDir + "\\VC\\include", System, false, false, false);
+ if (getWindowsSDKDir(WindowsSDKDir))
+ AddPath(WindowsSDKDir + "\\include", System, false, false, false);
+ else
+ AddPath(VSDir + "\\VC\\PlatformSDK\\Include",
+ System, false, false, false);
+ } else {
+ // Default install paths.
+ AddPath("C:/Program Files/Microsoft Visual Studio 10.0/VC/include",
+ System, false, false, false);
+ AddPath("C:/Program Files/Microsoft Visual Studio 9.0/VC/include",
+ System, false, false, false);
+ AddPath(
"C:/Program Files/Microsoft Visual Studio 9.0/VC/PlatformSDK/Include",
- System, false, false, false);
- AddPath("C:/Program Files/Microsoft Visual Studio 8/VC/include",
- System, false, false, false);
- AddPath(
+ System, false, false, false);
+ AddPath("C:/Program Files/Microsoft Visual Studio 8/VC/include",
+ System, false, false, false);
+ AddPath(
"C:/Program Files/Microsoft Visual Studio 8/VC/PlatformSDK/Include",
- System, false, false, false);
- // For some clang developers.
- AddPath("G:/Program Files/Microsoft Visual Studio 9.0/VC/include",
- System, false, false, false);
- AddPath(
- "G:/Program Files/Microsoft Visual Studio 9.0/VC/PlatformSDK/Include",
- System, false, false, false);
- }
+ System, false, false, false);
}
break;
+ }
case llvm::Triple::Haiku:
AddPath("/boot/common/include", System, true, false, false);
AddPath("/boot/develop/headers/os", System, true, false, false);
@@ -493,7 +495,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
AddPath("/boot/develop/headers/os/arch", System, true, false, false);
AddPath("/boot/develop/headers/os/device", System, true, false, false);
AddPath("/boot/develop/headers/os/drivers", System, true, false, false);
- AddPath("/boot/develop/headers/os/game", System, true, false, false);
+ AddPath("/boot/develop/headers/os/game", System, true, false, false);
AddPath("/boot/develop/headers/os/interface", System, true, false, false);
AddPath("/boot/develop/headers/os/kernel", System, true, false, false);
AddPath("/boot/develop/headers/os/locale", System, true, false, false);
@@ -506,13 +508,13 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
AddPath("/boot/develop/headers/os/support", System, true, false, false);
AddPath("/boot/develop/headers/os/translation",
System, true, false, false);
- AddPath("/boot/develop/headers/os/add-ons/graphics",
+ AddPath("/boot/develop/headers/os/add-ons/graphics",
System, true, false, false);
- AddPath("/boot/develop/headers/os/add-ons/input_server",
+ AddPath("/boot/develop/headers/os/add-ons/input_server",
System, true, false, false);
- AddPath("/boot/develop/headers/os/add-ons/screen_saver",
+ AddPath("/boot/develop/headers/os/add-ons/screen_saver",
System, true, false, false);
- AddPath("/boot/develop/headers/os/add-ons/tracker",
+ AddPath("/boot/develop/headers/os/add-ons/tracker",
System, true, false, false);
AddPath("/boot/develop/headers/os/be_apps/Deskbar",
System, true, false, false);
@@ -521,7 +523,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
AddPath("/boot/develop/headers/os/be_apps/Tracker",
System, true, false, false);
AddPath("/boot/develop/headers/cpp", System, true, false, false);
- AddPath("/boot/develop/headers/cpp/i586-pc-haiku",
+ AddPath("/boot/develop/headers/cpp/i586-pc-haiku",
System, true, false, false);
AddPath("/boot/develop/headers/3rdparty", System, true, false, false);
AddPath("/boot/develop/headers/bsd", System, true, false, false);
@@ -529,7 +531,9 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
AddPath("/boot/develop/headers/posix", System, true, false, false);
AddPath("/boot/develop/headers", System, true, false, false);
break;
- case llvm::Triple::MinGW64:
+ case llvm::Triple::Cygwin:
+ AddPath("/usr/include/w32api", System, true, false, false);
+ break;
case llvm::Triple::MinGW32:
AddPath("c:/mingw/include", System, true, false, false);
break;
@@ -563,24 +567,24 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
// FIXME: temporary hack: hard-coded paths.
switch (os) {
case llvm::Triple::Cygwin:
- AddPath("/lib/gcc/i686-pc-cygwin/3.4.4/include",
- System, true, false, false);
- AddPath("/lib/gcc/i686-pc-cygwin/3.4.4/include/c++",
- System, true, false, false);
- AddPath("/lib/gcc/i686-pc-cygwin/3.4.4/include/c++/i686-pc-cygwin",
- System, true, false, false);
+ // Cygwin-1.7
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.4");
+ // g++-4 / Cygwin-1.5
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.2");
+ // FIXME: Do we support g++-3.4.4?
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "3.4.4");
break;
- case llvm::Triple::MinGW64:
- // Try gcc 4.5.0
- AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw64", "4.5.0");
- // Try gcc 4.4.0
- AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw64", "4.4.0");
- // Try gcc 4.3.0
- AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw64", "4.3.0");
- // Fall through.
case llvm::Triple::MinGW32:
- // Try gcc 4.5.0
- AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.0");
+ // mingw-w64-20110207
+ AddPath("c:/MinGW/include/c++/4.5.3", System, true, false, false);
+ AddPath("c:/MinGW/include/c++/4.5.3/x86_64-w64-mingw32", System, true, false, false);
+ AddPath("c:/MinGW/include/c++/4.5.3/backward", System, true, false, false);
+ // mingw-w64-20101129
+ AddPath("c:/MinGW/include/c++/4.5.2", System, true, false, false);
+ AddPath("c:/MinGW/include/c++/4.5.2/x86_64-w64-mingw32", System, true, false, false);
+ AddPath("c:/MinGW/include/c++/4.5.2/backward", System, true, false, false);
+ // Try gcc 4.5.0
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.0");
// Try gcc 4.4.0
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.4.0");
// Try gcc 4.3.0
@@ -590,13 +594,13 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
switch (triple.getArch()) {
default: break;
- case llvm::Triple::ppc:
+ case llvm::Triple::ppc:
case llvm::Triple::ppc64:
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
- "powerpc-apple-darwin10", "", "ppc64",
+ "powerpc-apple-darwin10", "", "ppc64",
triple);
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
- "powerpc-apple-darwin10", "", "ppc64",
+ "powerpc-apple-darwin10", "", "ppc64",
triple);
break;
@@ -625,6 +629,11 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
// Debian based distros.
// Note: these distros symlink /usr/include/c++/X.Y.Z -> X.Y
//===------------------------------------------------------------------===//
+ // Ubuntu 10.10 "Maverick Meerkat" -- gcc-4.4.5
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4",
+ "i686-linux-gnu", "", "64", triple);
+ // The rest of 10.10 is the same as previous versions.
+
// Ubuntu 10.04 LTS "Lucid Lynx" -- gcc-4.4.3
// Ubuntu 9.10 "Karmic Koala" -- gcc-4.4.1
// Debian 6.0 "squeeze" -- gcc-4.4.2
@@ -632,6 +641,8 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
"x86_64-linux-gnu", "32", "", triple);
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4",
"i486-linux-gnu", "", "64", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4",
+ "arm-linux-gnueabi", "", "", triple);
// Ubuntu 9.04 "Jaunty Jackalope" -- gcc-4.3.3
// Ubuntu 8.10 "Intrepid Ibex" -- gcc-4.3.2
// Debian 5.0 "lenny" -- gcc-4.3.2
@@ -656,6 +667,11 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
//===------------------------------------------------------------------===//
// Redhat based distros.
//===------------------------------------------------------------------===//
+ // Fedora 14
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.5.1",
+ "x86_64-redhat-linux", "32", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.5.1",
+ "i686-redhat-linux", "", "", triple);
// Fedora 13
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.4",
"x86_64-redhat-linux", "32", "", triple);
@@ -711,11 +727,22 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
"i586-suse-linux", "", "", triple);
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4",
"x86_64-suse-linux", "", "", triple);
+
+ // openSUSE 11.4
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.5",
+ "i586-suse-linux", "", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.5",
+ "x86_64-suse-linux", "", "", triple);
+
// Arch Linux 2008-06-24
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.3.1",
"i686-pc-linux-gnu", "", "", triple);
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.3.1",
"x86_64-unknown-linux-gnu", "", "", triple);
+ // Gentoo x86 2010.0 stable
+ AddGnuCPlusPlusIncludePaths(
+ "/usr/lib/gcc/i686-pc-linux-gnu/4.4.3/include/g++-v4",
+ "i686-pc-linux-gnu", "", "", triple);
// Gentoo x86 2009.1 stable
AddGnuCPlusPlusIncludePaths(
"/usr/lib/gcc/i686-pc-linux-gnu/4.3.4/include/g++-v4",
@@ -728,26 +755,33 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
AddGnuCPlusPlusIncludePaths(
"/usr/lib/gcc/i686-pc-linux-gnu/4.1.2/include/g++-v4",
"i686-pc-linux-gnu", "", "", triple);
- // Gentoo amd64 stable
+
+ // Gentoo amd64 gcc 4.4.5
AddGnuCPlusPlusIncludePaths(
- "/usr/lib/gcc/x86_64-pc-linux-gnu/4.1.2/include/g++-v4",
- "i686-pc-linux-gnu", "", "", triple);
-
- // Gentoo amd64 gcc 4.3.2
+ "/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.5/include/g++-v4",
+ "x86_64-pc-linux-gnu", "32", "", triple);
+ // Gentoo amd64 gcc 4.4.4
AddGnuCPlusPlusIncludePaths(
- "/usr/lib/gcc/x86_64-pc-linux-gnu/4.3.2/include/g++-v4",
- "x86_64-pc-linux-gnu", "", "", triple);
-
+ "/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4/include/g++-v4",
+ "x86_64-pc-linux-gnu", "32", "", triple);
// Gentoo amd64 gcc 4.4.3
AddGnuCPlusPlusIncludePaths(
"/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.3/include/g++-v4",
"x86_64-pc-linux-gnu", "32", "", triple);
+ // Gentoo amd64 gcc 4.3.2
+ AddGnuCPlusPlusIncludePaths(
+ "/usr/lib/gcc/x86_64-pc-linux-gnu/4.3.2/include/g++-v4",
+ "x86_64-pc-linux-gnu", "", "", triple);
+ // Gentoo amd64 stable
+ AddGnuCPlusPlusIncludePaths(
+ "/usr/lib/gcc/x86_64-pc-linux-gnu/4.1.2/include/g++-v4",
+ "i686-pc-linux-gnu", "", "", triple);
// Gentoo amd64 llvm-gcc trunk
AddGnuCPlusPlusIncludePaths(
"/usr/lib/llvm-gcc-4.2-9999/include/c++/4.2.1",
"x86_64-pc-linux-gnu", "", "", triple);
-
+
break;
case llvm::Triple::FreeBSD:
// FreeBSD 8.0
@@ -787,8 +821,13 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
void InitHeaderSearch::AddDefaultSystemIncludePaths(const LangOptions &Lang,
const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts) {
- if (Lang.CPlusPlus && HSOpts.UseStandardCXXIncludes)
- AddDefaultCPlusPlusIncludePaths(triple);
+ if (Lang.CPlusPlus && HSOpts.UseStandardCXXIncludes) {
+ if (!HSOpts.CXXSystemIncludes.empty()) {
+ for (unsigned i = 0, e = HSOpts.CXXSystemIncludes.size(); i != e; ++i)
+ AddPath(HSOpts.CXXSystemIncludes[i], System, true, false, false);
+ } else
+ AddDefaultCPlusPlusIncludePaths(triple);
+ }
AddDefaultCIncludePaths(triple, HSOpts);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
index 0d07192..d0111a5 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
@@ -22,8 +22,9 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/APFloat.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
using namespace clang;
// Append a #define line to Buf for Macro. Macro should be of the form XXX,
@@ -55,9 +56,10 @@ std::string clang::NormalizeDashIncludePath(llvm::StringRef File) {
// it has not file entry. For now, workaround this by using an
// absolute path if we find the file here, and otherwise letting
// header search handle it.
- llvm::sys::Path Path(File);
- Path.makeAbsolute();
- if (!Path.exists())
+ llvm::SmallString<128> Path(File);
+ llvm::sys::fs::make_absolute(Path);
+ bool exists;
+ if (llvm::sys::fs::exists(Path.str(), exists) || !exists)
Path = File;
return Lexer::Stringify(Path.str());
@@ -342,6 +344,10 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("_NATIVE_WCHAR_T_DEFINED");
Builder.append("class type_info;");
}
+
+ if (LangOpts.CPlusPlus0x) {
+ Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", "1");
+ }
}
if (LangOpts.Optimize)
@@ -465,6 +471,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (FEOpts.ProgramAction == frontend::RunAnalysis)
Builder.defineMacro("__clang_analyzer__");
+ if (LangOpts.FastRelaxedMath)
+ Builder.defineMacro("__FAST_RELAXED_MATH__");
+
// Get other target #defines.
TI.getTargetDefines(LangOpts, Builder);
}
@@ -515,8 +524,7 @@ static void InitializeFileRemapping(Diagnostic &Diags,
// Create the file entry for the file that we're mapping from.
const FileEntry *FromFile = FileMgr.getVirtualFile(Remap->first,
- ToFile->getSize(),
- 0);
+ ToFile->getSize(), 0);
if (!FromFile) {
Diags.Report(diag::err_fe_remap_missing_from_file)
<< Remap->first;
@@ -526,7 +534,7 @@ static void InitializeFileRemapping(Diagnostic &Diags,
// Load the contents of the file we're mapping to.
std::string ErrorStr;
const llvm::MemoryBuffer *Buffer
- = llvm::MemoryBuffer::getFile(ToFile->getName(), &ErrorStr);
+ = FileMgr.getBufferForFile(ToFile->getName(), &ErrorStr);
if (!Buffer) {
Diags.Report(diag::err_fe_error_opening)
<< Remap->second << ErrorStr;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp
new file mode 100644
index 0000000..3649c3c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp
@@ -0,0 +1,221 @@
+//===- MultiplexConsumer.cpp - AST Consumer for PCH Generation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MultiplexConsumer class. It also declares and defines
+// MultiplexASTDeserializationListener and MultiplexASTMutationListener, which
+// are implementation details of MultiplexConsumer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/MultiplexConsumer.h"
+
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+
+using namespace clang;
+
+namespace clang {
+
+// This ASTDeserializationListener forwards its notifications to a set of
+// child listeners.
+class MultiplexASTDeserializationListener
+ : public ASTDeserializationListener {
+public:
+ // Does NOT take ownership of the elements in L.
+ MultiplexASTDeserializationListener(
+ const std::vector<ASTDeserializationListener*>& L);
+ virtual void ReaderInitialized(ASTReader *Reader);
+ virtual void IdentifierRead(serialization::IdentID ID,
+ IdentifierInfo *II);
+ virtual void TypeRead(serialization::TypeIdx Idx, QualType T);
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D);
+ virtual void SelectorRead(serialization::SelectorID iD, Selector Sel);
+ virtual void MacroDefinitionRead(serialization::MacroID,
+ MacroDefinition *MD);
+private:
+ std::vector<ASTDeserializationListener*> Listeners;
+};
+
+MultiplexASTDeserializationListener::MultiplexASTDeserializationListener(
+ const std::vector<ASTDeserializationListener*>& L)
+ : Listeners(L) {
+}
+
+void MultiplexASTDeserializationListener::ReaderInitialized(
+ ASTReader *Reader) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->ReaderInitialized(Reader);
+}
+
+void MultiplexASTDeserializationListener::IdentifierRead(
+ serialization::IdentID ID, IdentifierInfo *II) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->IdentifierRead(ID, II);
+}
+
+void MultiplexASTDeserializationListener::TypeRead(
+ serialization::TypeIdx Idx, QualType T) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->TypeRead(Idx, T);
+}
+
+void MultiplexASTDeserializationListener::DeclRead(
+ serialization::DeclID ID, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->DeclRead(ID, D);
+}
+
+void MultiplexASTDeserializationListener::SelectorRead(
+ serialization::SelectorID ID, Selector Sel) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->SelectorRead(ID, Sel);
+}
+
+void MultiplexASTDeserializationListener::MacroDefinitionRead(
+ serialization::MacroID ID, MacroDefinition *MD) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->MacroDefinitionRead(ID, MD);
+}
+
+// This ASTMutationListener forwards its notifications to a set of
+// child listeners.
+class MultiplexASTMutationListener : public ASTMutationListener {
+public:
+ // Does NOT take ownership of the elements in L.
+ MultiplexASTMutationListener(const std::vector<ASTMutationListener*>& L);
+ virtual void CompletedTagDefinition(const TagDecl *D);
+ virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D);
+ virtual void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D);
+ virtual void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+ const ClassTemplateSpecializationDecl *D);
+private:
+ std::vector<ASTMutationListener*> Listeners;
+};
+
+MultiplexASTMutationListener::MultiplexASTMutationListener(
+ const std::vector<ASTMutationListener*>& L)
+ : Listeners(L) {
+}
+
+void MultiplexASTMutationListener::CompletedTagDefinition(const TagDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->CompletedTagDefinition(D);
+}
+
+void MultiplexASTMutationListener::AddedVisibleDecl(
+ const DeclContext *DC, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedVisibleDecl(DC, D);
+}
+
+void MultiplexASTMutationListener::AddedCXXImplicitMember(
+ const CXXRecordDecl *RD, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedCXXImplicitMember(RD, D);
+}
+void MultiplexASTMutationListener::AddedCXXTemplateSpecialization(
+ const ClassTemplateDecl *TD, const ClassTemplateSpecializationDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedCXXTemplateSpecialization(TD, D);
+}
+
+} // end namespace clang
+
+
+MultiplexConsumer::MultiplexConsumer(const std::vector<ASTConsumer*>& C)
+ : Consumers(C), MutationListener(0), DeserializationListener(0) {
+ // Collect the mutation listeners and deserialization listeners of all
+ // children, and create a multiplex listener each if so.
+ std::vector<ASTMutationListener*> mutationListeners;
+ std::vector<ASTDeserializationListener*> serializationListeners;
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i) {
+ ASTMutationListener* mutationListener =
+ Consumers[i]->GetASTMutationListener();
+ if (mutationListener)
+ mutationListeners.push_back(mutationListener);
+ ASTDeserializationListener* serializationListener =
+ Consumers[i]->GetASTDeserializationListener();
+ if (serializationListener)
+ serializationListeners.push_back(serializationListener);
+ }
+ if (mutationListeners.size()) {
+ MutationListener.reset(new MultiplexASTMutationListener(mutationListeners));
+ }
+ if (serializationListeners.size()) {
+ DeserializationListener.reset(
+ new MultiplexASTDeserializationListener(serializationListeners));
+ }
+}
+
+MultiplexConsumer::~MultiplexConsumer() {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ delete Consumers[i];
+}
+
+void MultiplexConsumer::Initialize(ASTContext &Context) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->Initialize(Context);
+}
+
+void MultiplexConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleTopLevelDecl(D);
+}
+
+void MultiplexConsumer::HandleInterestingDecl(DeclGroupRef D) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleInterestingDecl(D);
+}
+
+void MultiplexConsumer::HandleTranslationUnit(ASTContext &Ctx) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleTranslationUnit(Ctx);
+}
+
+void MultiplexConsumer::HandleTagDeclDefinition(TagDecl *D) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleTagDeclDefinition(D);
+}
+
+void MultiplexConsumer::CompleteTentativeDefinition(VarDecl *D) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->CompleteTentativeDefinition(D);
+}
+
+void MultiplexConsumer::HandleVTable(
+ CXXRecordDecl *RD, bool DefinitionRequired) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleVTable(RD, DefinitionRequired);
+}
+
+ASTMutationListener *MultiplexConsumer::GetASTMutationListener() {
+ return MutationListener.get();
+}
+
+ASTDeserializationListener *MultiplexConsumer::GetASTDeserializationListener() {
+ return DeserializationListener.get();
+}
+
+void MultiplexConsumer::PrintStats() {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->PrintStats();
+}
+
+void MultiplexConsumer::InitializeSema(Sema &S) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumers[i]))
+ SC->InitializeSema(S);
+}
+
+void MultiplexConsumer::ForgetSema() {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumers[i]))
+ SC->ForgetSema();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index cfaf8a2..922d743 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -86,9 +86,6 @@ public:
private:
unsigned CurLine;
- /// The current include nesting level, used by header include dumping (-H).
- unsigned CurrentIncludeDepth;
-
bool EmittedTokensOnThisLine;
bool EmittedMacroOnThisLine;
SrcMgr::CharacteristicKind FileType;
@@ -96,22 +93,19 @@ private:
bool Initialized;
bool DisableLineMarkers;
bool DumpDefines;
- bool DumpHeaderIncludes;
bool UseLineDirective;
- bool HasProcessedPredefines;
public:
PrintPPOutputPPCallbacks(Preprocessor &pp, llvm::raw_ostream &os,
- bool lineMarkers, bool defines, bool headers)
+ bool lineMarkers, bool defines)
: PP(pp), SM(PP.getSourceManager()),
ConcatInfo(PP), OS(os), DisableLineMarkers(lineMarkers),
- DumpDefines(defines), DumpHeaderIncludes(headers) {
- CurLine = CurrentIncludeDepth = 0;
+ DumpDefines(defines) {
+ CurLine = 0;
CurFilename += "<uninit>";
EmittedTokensOnThisLine = false;
EmittedMacroOnThisLine = false;
FileType = SrcMgr::C_User;
Initialized = false;
- HasProcessedPredefines = false;
// If we're in microsoft mode, use normal #line instead of line markers.
UseLineDirective = PP.getLangOptions().Microsoft;
@@ -120,6 +114,8 @@ public:
void SetEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
bool hasEmittedTokensOnThisLine() const { return EmittedTokensOnThisLine; }
+ bool StartNewLineIfNeeded();
+
virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType);
virtual void Ident(SourceLocation Loc, const std::string &str);
@@ -129,7 +125,10 @@ public:
bool HandleFirstTokOnLine(Token &Tok);
bool MoveToLine(SourceLocation Loc) {
- return MoveToLine(SM.getPresumedLoc(Loc).getLine());
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isInvalid())
+ return false;
+ return MoveToLine(PLoc.getLine());
}
bool MoveToLine(unsigned LineNo);
@@ -138,15 +137,14 @@ public:
return ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok, Tok);
}
void WriteLineInfo(unsigned LineNo, const char *Extra=0, unsigned ExtraLen=0);
-
+ bool LineMarkersAreDisabled() const { return DisableLineMarkers; }
void HandleNewlinesInToken(const char *TokStr, unsigned Len);
/// MacroDefined - This hook is called whenever a macro definition is seen.
- void MacroDefined(const IdentifierInfo *II, const MacroInfo *MI);
+ void MacroDefined(const Token &MacroNameTok, const MacroInfo *MI);
/// MacroUndefined - This hook is called whenever a macro #undef is seen.
- void MacroUndefined(SourceLocation Loc, const IdentifierInfo *II,
- const MacroInfo *MI);
+ void MacroUndefined(const Token &MacroNameTok, const MacroInfo *MI);
};
} // end anonymous namespace
@@ -162,11 +160,11 @@ void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo,
// Emit #line directives or GNU line markers depending on what mode we're in.
if (UseLineDirective) {
OS << "#line" << ' ' << LineNo << ' ' << '"';
- OS.write(&CurFilename[0], CurFilename.size());
+ OS.write(CurFilename.data(), CurFilename.size());
OS << '"';
} else {
OS << '#' << ' ' << LineNo << ' ' << '"';
- OS.write(&CurFilename[0], CurFilename.size());
+ OS.write(CurFilename.data(), CurFilename.size());
OS << '"';
if (ExtraLen)
@@ -213,6 +211,17 @@ bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) {
return true;
}
+bool PrintPPOutputPPCallbacks::StartNewLineIfNeeded() {
+ if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) {
+ OS << '\n';
+ EmittedTokensOnThisLine = false;
+ EmittedMacroOnThisLine = false;
+ ++CurLine;
+ return true;
+ }
+
+ return false;
+}
/// FileChanged - Whenever the preprocessor enters or exits a #include file
/// it invokes this handler. Update our conception of the current source
@@ -225,10 +234,13 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
SourceManager &SourceMgr = SM;
PresumedLoc UserLoc = SourceMgr.getPresumedLoc(Loc);
+ if (UserLoc.isInvalid())
+ return;
+
unsigned NewLine = UserLoc.getLine();
if (Reason == PPCallbacks::EnterFile) {
- SourceLocation IncludeLoc = SourceMgr.getPresumedLoc(Loc).getIncludeLoc();
+ SourceLocation IncludeLoc = UserLoc.getIncludeLoc();
if (IncludeLoc.isValid())
MoveToLine(IncludeLoc);
} else if (Reason == PPCallbacks::SystemHeaderPragma) {
@@ -238,19 +250,6 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
// directive and emits a bunch of spaces that aren't needed. Emulate this
// strange behavior.
}
-
- // Adjust the current include depth.
- if (Reason == PPCallbacks::EnterFile) {
- ++CurrentIncludeDepth;
- } else {
- if (CurrentIncludeDepth)
- --CurrentIncludeDepth;
-
- // We track when we are done with the predefines by watching for the first
- // place where we drop back to a nesting depth of 0.
- if (CurrentIncludeDepth == 0 && !HasProcessedPredefines)
- HasProcessedPredefines = true;
- }
CurLine = NewLine;
@@ -259,18 +258,6 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
Lexer::Stringify(CurFilename);
FileType = NewFileType;
- // Dump the header include information, if enabled and we are past the
- // predefines buffer.
- if (DumpHeaderIncludes && HasProcessedPredefines &&
- Reason == PPCallbacks::EnterFile) {
- llvm::SmallString<256> Msg;
- llvm::raw_svector_ostream OS(Msg);
- for (unsigned i = 0; i != CurrentIncludeDepth; ++i)
- OS << '.';
- OS << ' ' << CurFilename << '\n';
- llvm::errs() << OS.str();
- }
-
if (DisableLineMarkers) return;
if (!Initialized) {
@@ -303,7 +290,7 @@ void PrintPPOutputPPCallbacks::Ident(SourceLocation Loc, const std::string &S) {
}
/// MacroDefined - This hook is called whenever a macro definition is seen.
-void PrintPPOutputPPCallbacks::MacroDefined(const IdentifierInfo *II,
+void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
const MacroInfo *MI) {
// Only print out macro definitions in -dD mode.
if (!DumpDefines ||
@@ -311,18 +298,17 @@ void PrintPPOutputPPCallbacks::MacroDefined(const IdentifierInfo *II,
MI->isBuiltinMacro()) return;
MoveToLine(MI->getDefinitionLoc());
- PrintMacroDefinition(*II, *MI, PP, OS);
+ PrintMacroDefinition(*MacroNameTok.getIdentifierInfo(), *MI, PP, OS);
EmittedMacroOnThisLine = true;
}
-void PrintPPOutputPPCallbacks::MacroUndefined(SourceLocation Loc,
- const IdentifierInfo *II,
+void PrintPPOutputPPCallbacks::MacroUndefined(const Token &MacroNameTok,
const MacroInfo *MI) {
// Only print out macro definitions in -dD mode.
if (!DumpDefines) return;
- MoveToLine(Loc);
- OS << "#undef " << II->getName();
+ MoveToLine(MacroNameTok.getLocation());
+ OS << "#undef " << MacroNameTok.getIdentifierInfo()->getName();
EmittedMacroOnThisLine = true;
}
@@ -437,12 +423,14 @@ struct UnknownPragmaHandler : public PragmaHandler {
UnknownPragmaHandler(const char *prefix, PrintPPOutputPPCallbacks *callbacks)
: Prefix(prefix), Callbacks(callbacks) {}
- virtual void HandlePragma(Preprocessor &PP, Token &PragmaTok) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PragmaTok) {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
+ Callbacks->StartNewLineIfNeeded();
Callbacks->MoveToLine(PragmaTok.getLocation());
Callbacks->OS.write(Prefix, strlen(Prefix));
-
+ Callbacks->SetEmittedTokensOnThisLine();
// Read and print all of the pragma tokens.
while (PragmaTok.isNot(tok::eom)) {
if (PragmaTok.hasLeadingSpace())
@@ -451,7 +439,7 @@ struct UnknownPragmaHandler : public PragmaHandler {
Callbacks->OS.write(&TokSpell[0], TokSpell.size());
PP.LexUnexpandedToken(PragmaTok);
}
- Callbacks->OS << '\n';
+ Callbacks->StartNewLineIfNeeded();
}
};
} // end anonymous namespace
@@ -561,10 +549,11 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, llvm::raw_ostream *OS,
PrintPPOutputPPCallbacks *Callbacks =
new PrintPPOutputPPCallbacks(PP, *OS, !Opts.ShowLineMarkers,
- Opts.ShowMacros, Opts.ShowHeaderIncludes);
+ Opts.ShowMacros);
PP.AddPragmaHandler(new UnknownPragmaHandler("#pragma", Callbacks));
- PP.AddPragmaHandler("GCC", new UnknownPragmaHandler("#pragma GCC",
- Callbacks));
+ PP.AddPragmaHandler("GCC", new UnknownPragmaHandler("#pragma GCC",Callbacks));
+ PP.AddPragmaHandler("clang",
+ new UnknownPragmaHandler("#pragma clang", Callbacks));
PP.addPPCallbacks(Callbacks);
@@ -576,13 +565,20 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, llvm::raw_ostream *OS,
// start.
const SourceManager &SourceMgr = PP.getSourceManager();
Token Tok;
- do PP.Lex(Tok);
- while (Tok.isNot(tok::eof) && Tok.getLocation().isFileID() &&
- !strcmp(SourceMgr.getPresumedLoc(Tok.getLocation()).getFilename(),
- "<built-in>"));
+ do {
+ PP.Lex(Tok);
+ if (Tok.is(tok::eof) || !Tok.getLocation().isFileID())
+ break;
+
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
+ if (PLoc.isInvalid())
+ break;
+
+ if (strcmp(PLoc.getFilename(), "<built-in>"))
+ break;
+ } while (true);
// Read all the preprocessed tokens, printing them out to the stream.
PrintPreprocessedTokens(PP, Tok, Callbacks, *OS);
*OS << '\n';
}
-
diff --git a/contrib/llvm/tools/clang/lib/Frontend/StmtXML.cpp b/contrib/llvm/tools/clang/lib/Frontend/StmtXML.cpp
index b660734..c113cc1 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/StmtXML.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/StmtXML.cpp
@@ -61,8 +61,7 @@ namespace {
Doc.PrintDecl(*DI);
}
} else {
- for (Stmt::child_iterator i = S->child_begin(), e = S->child_end();
- i != e; ++i)
+ for (Stmt::child_range i = S->children(); i; ++i)
DumpSubTree(*i);
}
Doc.toParent();
@@ -133,7 +132,6 @@ namespace {
void VisitBinaryOperator(BinaryOperator *Node);
void VisitCompoundAssignOperator(CompoundAssignOperator *Node);
void VisitAddrLabelExpr(AddrLabelExpr *Node);
- void VisitTypesCompatibleExpr(TypesCompatibleExpr *Node);
// C++
void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
@@ -150,7 +148,6 @@ namespace {
void VisitObjCImplicitSetterGetterRefExpr(
ObjCImplicitSetterGetterRefExpr *Node);
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node);
- void VisitObjCSuperExpr(ObjCSuperExpr *Node);
#endif
};
}
@@ -357,12 +354,6 @@ void StmtXML::VisitAddrLabelExpr(AddrLabelExpr *Node) {
Doc.addAttribute("name", Node->getLabel()->getName());
}
-void StmtXML::VisitTypesCompatibleExpr(TypesCompatibleExpr *Node) {
- DumpExpr(Node);
- DumpTypeExpr(Node->getArgType1());
- DumpTypeExpr(Node->getArgType2());
-}
-
//===----------------------------------------------------------------------===//
// C++ Expressions
//===----------------------------------------------------------------------===//
@@ -428,11 +419,6 @@ void StmtXML::VisitObjCImplicitSetterGetterRefExpr(
Doc.addAttribute("Setter", Setter ? Setter->getSelector().getAsString().c_str() : "(null)");
}
-void StmtXML::VisitObjCSuperExpr(ObjCSuperExpr *Node) {
- DumpExpr(Node);
- Doc.addAttribute("super", "1");
-}
-
void StmtXML::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
DumpExpr(Node);
Doc.addAttribute("kind", Node->getDecl()->getDeclKindName());
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
index fdf2ec8..069c86d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -20,6 +20,9 @@ using namespace clang;
///
void TextDiagnosticBuffer::HandleDiagnostic(Diagnostic::Level Level,
const DiagnosticInfo &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticClient::HandleDiagnostic(Level, Info);
+
llvm::SmallString<100> Buf;
Info.FormatDiagnostic(Buf);
switch (Level) {
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
index 1e453a0..04c6a68 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Lex/Lexer.h"
@@ -57,7 +58,9 @@ PrintIncludeStack(SourceLocation Loc, const SourceManager &SM) {
if (Loc.isInvalid()) return;
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
-
+ if (PLoc.isInvalid())
+ return;
+
// Print out the other include frames first.
PrintIncludeStack(PLoc.getIncludeLoc(), SM);
@@ -137,8 +140,9 @@ void TextDiagnosticPrinter::HighlightRange(const CharSourceRange &R,
(SourceLine[EndColNo-1] == ' ' || SourceLine[EndColNo-1] == '\t'))
--EndColNo;
- // If the start/end passed each other, then we are trying to highlight a range
- // that just exists in whitespace, which must be some sort of other bug.
+ // If the start/end passed each other, then we are trying to highlight a
+ // range that just exists in whitespace, which must be some sort of other
+ // bug.
assert(StartColNo <= EndColNo && "Trying to highlight whitespace??");
}
@@ -328,7 +332,9 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc,
if (!Suppressed) {
// Get the pretty name, according to #line directives etc.
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
-
+ if (PLoc.isInvalid())
+ return;
+
// If this diagnostic is not in the main file, print out the
// "included from" lines.
if (LastWarningLoc != PLoc.getIncludeLoc()) {
@@ -567,6 +573,10 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc,
// We specifically do not do word-wrapping or tab-expansion here,
// because this is supposed to be easy to parse.
+ PresumedLoc PLoc = SM.getPresumedLoc(B);
+ if (PLoc.isInvalid())
+ break;
+
OS << "fix-it:\"";
OS.write_escaped(SM.getPresumedLoc(B).getFilename());
OS << "\":{" << SM.getLineNumber(BInfo.first, BInfo.second)
@@ -756,6 +766,9 @@ static bool PrintWordWrapped(llvm::raw_ostream &OS,
void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
const DiagnosticInfo &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticClient::HandleDiagnostic(Level, Info);
+
// Keeps track of the the starting position of the location
// information (e.g., "foo.c:10:4:") that precedes the error
// message. We use this information to determine how long the
@@ -768,77 +781,96 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
// If the location is specified, print out a file/line/col and include trace
// if enabled.
if (Info.getLocation().isValid()) {
- const SourceManager &SM = Info.getLocation().getManager();
+ const SourceManager &SM = Info.getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Info.getLocation());
- unsigned LineNo = PLoc.getLine();
-
- // First, if this diagnostic is not in the main file, print out the
- // "included from" lines.
- if (LastWarningLoc != PLoc.getIncludeLoc()) {
- LastWarningLoc = PLoc.getIncludeLoc();
- PrintIncludeStack(LastWarningLoc, SM);
- StartOfLocationInfo = OS.tell();
- }
+ if (PLoc.isInvalid()) {
+ // At least print the file name if available:
+ FileID FID = SM.getFileID(Info.getLocation());
+ if (!FID.isInvalid()) {
+ const FileEntry* FE = SM.getFileEntryForID(FID);
+ if (FE && FE->getName()) {
+ OS << FE->getName();
+ if (FE->getDevice() == 0 && FE->getInode() == 0
+ && FE->getFileMode() == 0) {
+ // in PCH is a guess, but a good one:
+ OS << " (in PCH)";
+ }
+ OS << ": ";
+ }
+ }
+ } else {
+ unsigned LineNo = PLoc.getLine();
- // Compute the column number.
- if (DiagOpts->ShowLocation) {
- if (DiagOpts->ShowColors)
- OS.changeColor(savedColor, true);
-
- // Emit a Visual Studio compatible line number syntax.
- if (LangOpts && LangOpts->Microsoft) {
- OS << PLoc.getFilename() << '(' << LineNo << ')';
- OS << " : ";
- } else {
- OS << PLoc.getFilename() << ':' << LineNo << ':';
- if (DiagOpts->ShowColumn)
- if (unsigned ColNo = PLoc.getColumn())
- OS << ColNo << ':';
+ // First, if this diagnostic is not in the main file, print out the
+ // "included from" lines.
+ if (LastWarningLoc != PLoc.getIncludeLoc()) {
+ LastWarningLoc = PLoc.getIncludeLoc();
+ PrintIncludeStack(LastWarningLoc, SM);
+ StartOfLocationInfo = OS.tell();
}
- if (DiagOpts->ShowSourceRanges && Info.getNumRanges()) {
- FileID CaretFileID =
- SM.getFileID(SM.getInstantiationLoc(Info.getLocation()));
- bool PrintedRange = false;
-
- for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i) {
- // Ignore invalid ranges.
- if (!Info.getRange(i).isValid()) continue;
-
- SourceLocation B = Info.getRange(i).getBegin();
- SourceLocation E = Info.getRange(i).getEnd();
- B = SM.getInstantiationLoc(B);
- E = SM.getInstantiationLoc(E);
-
- // If the End location and the start location are the same and are a
- // macro location, then the range was something that came from a macro
- // expansion or _Pragma. If this is an object-like macro, the best we
- // can do is to highlight the range. If this is a function-like
- // macro, we'd also like to highlight the arguments.
- if (B == E && Info.getRange(i).getEnd().isMacroID())
- E = SM.getInstantiationRange(Info.getRange(i).getEnd()).second;
-
- std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
- std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
-
- // If the start or end of the range is in another file, just discard
- // it.
- if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
- continue;
-
- // Add in the length of the token, so that we cover multi-char tokens.
- unsigned TokSize = 0;
- if (Info.getRange(i).isTokenRange())
- TokSize = Lexer::MeasureTokenLength(E, SM, *LangOpts);
-
- OS << '{' << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
- << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
- << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
- << (SM.getColumnNumber(EInfo.first, EInfo.second)+TokSize) << '}';
- PrintedRange = true;
- }
- if (PrintedRange)
- OS << ':';
+ // Compute the column number.
+ if (DiagOpts->ShowLocation && PLoc.isValid()) {
+ if (DiagOpts->ShowColors)
+ OS.changeColor(savedColor, true);
+
+ // Emit a Visual Studio compatible line number syntax.
+ if (LangOpts && LangOpts->Microsoft) {
+ OS << PLoc.getFilename() << '(' << LineNo << ')';
+ OS << " : ";
+ } else {
+ OS << PLoc.getFilename() << ':' << LineNo << ':';
+ if (DiagOpts->ShowColumn)
+ if (unsigned ColNo = PLoc.getColumn())
+ OS << ColNo << ':';
+ }
+ if (DiagOpts->ShowSourceRanges && Info.getNumRanges()) {
+ FileID CaretFileID =
+ SM.getFileID(SM.getInstantiationLoc(Info.getLocation()));
+ bool PrintedRange = false;
+
+ for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i) {
+ // Ignore invalid ranges.
+ if (!Info.getRange(i).isValid()) continue;
+
+ SourceLocation B = Info.getRange(i).getBegin();
+ SourceLocation E = Info.getRange(i).getEnd();
+ B = SM.getInstantiationLoc(B);
+ E = SM.getInstantiationLoc(E);
+
+ // If the End location and the start location are the same and are a
+ // macro location, then the range was something that came from a
+ // macro expansion or _Pragma. If this is an object-like macro, the
+ // best we can do is to highlight the range. If this is a
+ // function-like macro, we'd also like to highlight the arguments.
+ if (B == E && Info.getRange(i).getEnd().isMacroID())
+ E = SM.getInstantiationRange(Info.getRange(i).getEnd()).second;
+
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
+
+ // If the start or end of the range is in another file, just discard
+ // it.
+ if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
+ continue;
+
+ // Add in the length of the token, so that we cover multi-char
+ // tokens.
+ unsigned TokSize = 0;
+ if (Info.getRange(i).isTokenRange())
+ TokSize = Lexer::MeasureTokenLength(E, SM, *LangOpts);
+
+ OS << '{' << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
+ << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
+ << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
+ << (SM.getColumnNumber(EInfo.first, EInfo.second)+TokSize)
+ << '}';
+ PrintedRange = true;
+ }
+
+ if (PrintedRange)
+ OS << ':';
+ }
}
OS << ' ';
if (DiagOpts->ShowColors)
@@ -873,7 +905,8 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
std::string OptionName;
if (DiagOpts->ShowOptionNames) {
- if (const char *Opt = Diagnostic::getWarningOptionForDiag(Info.getID())) {
+ if (const char *
+ Opt = DiagnosticIDs::getWarningOptionForDiag(Info.getID())) {
OptionName = "-W";
OptionName += Opt;
} else if (Info.getID() == diag::fatal_too_many_errors) {
@@ -882,7 +915,8 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
// If the diagnostic is an extension diagnostic and not enabled by default
// then it must have been turned on with -pedantic.
bool EnabledByDefault;
- if (Diagnostic::isBuiltinExtensionDiag(Info.getID(), EnabledByDefault) &&
+ if (DiagnosticIDs::isBuiltinExtensionDiag(Info.getID(),
+ EnabledByDefault) &&
!EnabledByDefault)
OptionName = "-pedantic";
}
@@ -891,7 +925,7 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
// If the user wants to see category information, include it too.
unsigned DiagCategory = 0;
if (DiagOpts->ShowCategories)
- DiagCategory = Diagnostic::getCategoryNumberForDiag(Info.getID());
+ DiagCategory = DiagnosticIDs::getCategoryNumberForDiag(Info.getID());
// If there is any categorization information, include it.
if (!OptionName.empty() || DiagCategory != 0) {
@@ -909,7 +943,7 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
OutStr += llvm::utostr(DiagCategory);
else {
assert(DiagOpts->ShowCategories == 2 && "Invalid ShowCategories value");
- OutStr += Diagnostic::getCategoryNameFromID(DiagCategory);
+ OutStr += DiagnosticIDs::getCategoryNameFromID(DiagCategory);
}
}
@@ -951,7 +985,7 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
(LastCaretDiagnosticWasNote && Level != Diagnostic::Note) ||
Info.getNumFixItHints())) {
// Cache the LastLoc, it allows us to omit duplicate source/caret spewage.
- LastLoc = Info.getLocation();
+ LastLoc = FullSourceLoc(Info.getLocation(), Info.getSourceManager());
LastCaretDiagnosticWasNote = (Level == Diagnostic::Note);
// Get the ranges into a local array we can hack on.
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TypeXML.cpp b/contrib/llvm/tools/clang/lib/Frontend/TypeXML.cpp
index be9db42..a8c8f75 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TypeXML.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TypeXML.cpp
@@ -29,7 +29,7 @@ public:
TypeWriter(DocumentXML& doc) : Doc(doc) {}
#define NODE_XML( CLASS, NAME ) \
- void Visit##CLASS(CLASS* T) { \
+ void Visit##CLASS(const CLASS* T) { \
Doc.addSubNode(NAME);
#define ID_ATTRIBUTE_XML // done by the Document class itself
@@ -82,7 +82,7 @@ public:
TypeAdder(DocumentXML& doc) : Doc(doc) {}
#define NODE_XML( CLASS, NAME ) \
- void Visit##CLASS(CLASS* T) \
+ void Visit##CLASS(const CLASS* T) \
{
#define ID_ATTRIBUTE_XML
@@ -101,7 +101,7 @@ public:
//---------------------------------------------------------
void DocumentXML::addParentTypes(const Type* pType) {
- TypeAdder(*this).Visit(const_cast<Type*>(pType));
+ TypeAdder(*this).Visit(pType);
}
//---------------------------------------------------------
diff --git a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticsClient.cpp b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticsClient.cpp
index 31eb28f..51b351f 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticsClient.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticsClient.cpp
@@ -23,7 +23,7 @@ using namespace clang;
VerifyDiagnosticsClient::VerifyDiagnosticsClient(Diagnostic &_Diags,
DiagnosticClient *_Primary)
: Diags(_Diags), PrimaryClient(_Primary),
- Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(0), NumErrors(0) {
+ Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(0) {
}
VerifyDiagnosticsClient::~VerifyDiagnosticsClient() {
@@ -57,14 +57,6 @@ void VerifyDiagnosticsClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
Buffer->HandleDiagnostic(DiagLevel, Info);
}
-// FIXME: It would be nice to just get this from the primary diagnostic client
-// or something.
-bool VerifyDiagnosticsClient::HadErrors() {
- CheckDiagnostics();
-
- return NumErrors != 0;
-}
-
//===----------------------------------------------------------------------===//
// Checking diagnostics implementation.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 63c6287..4bb85e7 100644
--- a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -13,7 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/FrontendTool/Utils.h"
-#include "clang/Checker/FrontendActions.h"
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "clang/CodeGen/CodeGenAction.h"
#include "clang/Driver/CC1Options.h"
#include "clang/Driver/OptTable.h"
@@ -24,7 +24,7 @@
#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/Rewrite/FrontendActions.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Support/DynamicLibrary.h"
using namespace clang;
static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
@@ -35,6 +35,7 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
llvm_unreachable("Invalid program action!");
case ASTDump: return new ASTDumpAction();
+ case ASTDumpXML: return new ASTDumpXMLAction();
case ASTPrint: return new ASTPrintAction();
case ASTPrintXML: return new ASTPrintXMLAction();
case ASTView: return new ASTViewAction();
@@ -52,7 +53,6 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
case FixIt: return new FixItAction();
case GeneratePCH: return new GeneratePCHAction();
case GeneratePTH: return new GeneratePTHAction();
- case InheritanceView: return new InheritanceViewAction();
case InitOnly: return new InitOnlyAction();
case ParseSyntaxOnly: return new SyntaxOnlyAction();
@@ -79,7 +79,7 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
case RewriteMacros: return new RewriteMacrosAction();
case RewriteObjC: return new RewriteObjCAction();
case RewriteTest: return new RewriteTestAction();
- case RunAnalysis: return new AnalysisAction();
+ case RunAnalysis: return new ento::AnalysisAction();
case RunPreprocessorOnly: return new PreprocessOnlyAction();
}
}
@@ -141,7 +141,7 @@ bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
// If there were errors in processing arguments, don't do anything else.
bool Success = false;
- if (!Clang->getDiagnostics().getNumErrors()) {
+ if (!Clang->getDiagnostics().hasErrorOccurred()) {
// Create and execute the frontend action.
llvm::OwningPtr<FrontendAction> Act(CreateFrontendAction(*Clang));
if (Act) {
diff --git a/contrib/llvm/tools/clang/lib/Headers/altivec.h b/contrib/llvm/tools/clang/lib/Headers/altivec.h
index 89bd259..a225378 100644
--- a/contrib/llvm/tools/clang/lib/Headers/altivec.h
+++ b/contrib/llvm/tools/clang/lib/Headers/altivec.h
@@ -20,9 +20,6 @@
*
\*===----------------------------------------------------------------------===*/
-// TODO: add functions for 'vector bool ..' and 'vector pixel' argument types according to
-// the 'AltiVec Technology Programming Interface Manual'
-
#ifndef __ALTIVEC_H
#define __ALTIVEC_H
@@ -43,7 +40,9 @@ static vector signed char __ATTRS_o_ai
vec_perm(vector signed char a, vector signed char b, vector unsigned char c);
static vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char a, vector unsigned char b, vector unsigned char c);
+vec_perm(vector unsigned char a,
+ vector unsigned char b,
+ vector unsigned char c);
static vector bool char __ATTRS_o_ai
vec_perm(vector bool char a, vector bool char b, vector unsigned char c);
@@ -52,7 +51,9 @@ static vector short __ATTRS_o_ai
vec_perm(vector short a, vector short b, vector unsigned char c);
static vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short a, vector unsigned short b, vector unsigned char c);
+vec_perm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned char c);
static vector bool short __ATTRS_o_ai
vec_perm(vector bool short a, vector bool short b, vector unsigned char c);
@@ -99,7 +100,8 @@ vec_abs(vector signed int a)
static vector float __ATTRS_o_ai
vec_abs(vector float a)
{
- vector unsigned int res = (vector unsigned int)a & (vector unsigned int)(0x7FFFFFFF);
+ vector unsigned int res = (vector unsigned int)a
+ & (vector unsigned int)(0x7FFFFFFF);
return (vector float)res;
}
@@ -112,19 +114,22 @@ vec_abs(vector float a)
static vector signed char __ATTRS_o_ai
vec_abss(vector signed char a)
{
- return __builtin_altivec_vmaxsb(a, __builtin_altivec_vsubsbs((vector signed char)(0), a));
+ return __builtin_altivec_vmaxsb
+ (a, __builtin_altivec_vsubsbs((vector signed char)(0), a));
}
static vector signed short __ATTRS_o_ai
vec_abss(vector signed short a)
{
- return __builtin_altivec_vmaxsh(a, __builtin_altivec_vsubshs((vector signed short)(0), a));
+ return __builtin_altivec_vmaxsh
+ (a, __builtin_altivec_vsubshs((vector signed short)(0), a));
}
static vector signed int __ATTRS_o_ai
vec_abss(vector signed int a)
{
- return __builtin_altivec_vmaxsw(a, __builtin_altivec_vsubsws((vector signed int)(0), a));
+ return __builtin_altivec_vmaxsw
+ (a, __builtin_altivec_vsubsws((vector signed int)(0), a));
}
/* vec_add */
@@ -1634,7 +1639,7 @@ vec_dssall(void)
/* vec_dst */
static void __attribute__((__always_inline__))
-vec_dst(void *a, int b, int c)
+vec_dst(const void *a, int b, int c)
{
__builtin_altivec_dst(a, b, c);
}
@@ -1642,7 +1647,7 @@ vec_dst(void *a, int b, int c)
/* vec_dstst */
static void __attribute__((__always_inline__))
-vec_dstst(void *a, int b, int c)
+vec_dstst(const void *a, int b, int c)
{
__builtin_altivec_dstst(a, b, c);
}
@@ -1650,7 +1655,7 @@ vec_dstst(void *a, int b, int c)
/* vec_dststt */
static void __attribute__((__always_inline__))
-vec_dststt(void *a, int b, int c)
+vec_dststt(const void *a, int b, int c)
{
__builtin_altivec_dststt(a, b, c);
}
@@ -1658,7 +1663,7 @@ vec_dststt(void *a, int b, int c)
/* vec_dstt */
static void __attribute__((__always_inline__))
-vec_dstt(void *a, int b, int c)
+vec_dstt(const void *a, int b, int c)
{
__builtin_altivec_dstt(a, b, c);
}
@@ -1698,109 +1703,109 @@ vec_vrfim(vector float a)
/* vec_ld */
static vector signed char __ATTRS_o_ai
-vec_ld(int a, vector signed char *b)
+vec_ld(int a, const vector signed char *b)
{
return (vector signed char)__builtin_altivec_lvx(a, b);
}
static vector signed char __ATTRS_o_ai
-vec_ld(int a, signed char *b)
+vec_ld(int a, const signed char *b)
{
return (vector signed char)__builtin_altivec_lvx(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_ld(int a, vector unsigned char *b)
+vec_ld(int a, const vector unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvx(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_ld(int a, unsigned char *b)
+vec_ld(int a, const unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvx(a, b);
}
static vector bool char __ATTRS_o_ai
-vec_ld(int a, vector bool char *b)
+vec_ld(int a, const vector bool char *b)
{
return (vector bool char)__builtin_altivec_lvx(a, b);
}
static vector short __ATTRS_o_ai
-vec_ld(int a, vector short *b)
+vec_ld(int a, const vector short *b)
{
return (vector short)__builtin_altivec_lvx(a, b);
}
static vector short __ATTRS_o_ai
-vec_ld(int a, short *b)
+vec_ld(int a, const short *b)
{
return (vector short)__builtin_altivec_lvx(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_ld(int a, vector unsigned short *b)
+vec_ld(int a, const vector unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvx(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_ld(int a, unsigned short *b)
+vec_ld(int a, const unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvx(a, b);
}
static vector bool short __ATTRS_o_ai
-vec_ld(int a, vector bool short *b)
+vec_ld(int a, const vector bool short *b)
{
return (vector bool short)__builtin_altivec_lvx(a, b);
}
static vector pixel __ATTRS_o_ai
-vec_ld(int a, vector pixel *b)
+vec_ld(int a, const vector pixel *b)
{
return (vector pixel)__builtin_altivec_lvx(a, b);
}
static vector int __ATTRS_o_ai
-vec_ld(int a, vector int *b)
+vec_ld(int a, const vector int *b)
{
return (vector int)__builtin_altivec_lvx(a, b);
}
static vector int __ATTRS_o_ai
-vec_ld(int a, int *b)
+vec_ld(int a, const int *b)
{
return (vector int)__builtin_altivec_lvx(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_ld(int a, vector unsigned int *b)
+vec_ld(int a, const vector unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvx(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_ld(int a, unsigned int *b)
+vec_ld(int a, const unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvx(a, b);
}
static vector bool int __ATTRS_o_ai
-vec_ld(int a, vector bool int *b)
+vec_ld(int a, const vector bool int *b)
{
return (vector bool int)__builtin_altivec_lvx(a, b);
}
static vector float __ATTRS_o_ai
-vec_ld(int a, vector float *b)
+vec_ld(int a, const vector float *b)
{
return (vector float)__builtin_altivec_lvx(a, b);
}
static vector float __ATTRS_o_ai
-vec_ld(int a, float *b)
+vec_ld(int a, const float *b)
{
return (vector float)__builtin_altivec_lvx(a, b);
}
@@ -1808,109 +1813,109 @@ vec_ld(int a, float *b)
/* vec_lvx */
static vector signed char __ATTRS_o_ai
-vec_lvx(int a, vector signed char *b)
+vec_lvx(int a, const vector signed char *b)
{
return (vector signed char)__builtin_altivec_lvx(a, b);
}
static vector signed char __ATTRS_o_ai
-vec_lvx(int a, signed char *b)
+vec_lvx(int a, const signed char *b)
{
return (vector signed char)__builtin_altivec_lvx(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvx(int a, vector unsigned char *b)
+vec_lvx(int a, const vector unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvx(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvx(int a, unsigned char *b)
+vec_lvx(int a, const unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvx(a, b);
}
static vector bool char __ATTRS_o_ai
-vec_lvx(int a, vector bool char *b)
+vec_lvx(int a, const vector bool char *b)
{
return (vector bool char)__builtin_altivec_lvx(a, b);
}
static vector short __ATTRS_o_ai
-vec_lvx(int a, vector short *b)
+vec_lvx(int a, const vector short *b)
{
return (vector short)__builtin_altivec_lvx(a, b);
}
static vector short __ATTRS_o_ai
-vec_lvx(int a, short *b)
+vec_lvx(int a, const short *b)
{
return (vector short)__builtin_altivec_lvx(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_lvx(int a, vector unsigned short *b)
+vec_lvx(int a, const vector unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvx(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_lvx(int a, unsigned short *b)
+vec_lvx(int a, const unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvx(a, b);
}
static vector bool short __ATTRS_o_ai
-vec_lvx(int a, vector bool short *b)
+vec_lvx(int a, const vector bool short *b)
{
return (vector bool short)__builtin_altivec_lvx(a, b);
}
static vector pixel __ATTRS_o_ai
-vec_lvx(int a, vector pixel *b)
+vec_lvx(int a, const vector pixel *b)
{
return (vector pixel)__builtin_altivec_lvx(a, b);
}
static vector int __ATTRS_o_ai
-vec_lvx(int a, vector int *b)
+vec_lvx(int a, const vector int *b)
{
return (vector int)__builtin_altivec_lvx(a, b);
}
static vector int __ATTRS_o_ai
-vec_lvx(int a, int *b)
+vec_lvx(int a, const int *b)
{
return (vector int)__builtin_altivec_lvx(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_lvx(int a, vector unsigned int *b)
+vec_lvx(int a, const vector unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvx(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_lvx(int a, unsigned int *b)
+vec_lvx(int a, const unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvx(a, b);
}
static vector bool int __ATTRS_o_ai
-vec_lvx(int a, vector bool int *b)
+vec_lvx(int a, const vector bool int *b)
{
return (vector bool int)__builtin_altivec_lvx(a, b);
}
static vector float __ATTRS_o_ai
-vec_lvx(int a, vector float *b)
+vec_lvx(int a, const vector float *b)
{
return (vector float)__builtin_altivec_lvx(a, b);
}
static vector float __ATTRS_o_ai
-vec_lvx(int a, float *b)
+vec_lvx(int a, const float *b)
{
return (vector float)__builtin_altivec_lvx(a, b);
}
@@ -1918,43 +1923,43 @@ vec_lvx(int a, float *b)
/* vec_lde */
static vector signed char __ATTRS_o_ai
-vec_lde(int a, vector signed char *b)
+vec_lde(int a, const vector signed char *b)
{
return (vector signed char)__builtin_altivec_lvebx(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lde(int a, vector unsigned char *b)
+vec_lde(int a, const vector unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvebx(a, b);
}
static vector short __ATTRS_o_ai
-vec_lde(int a, vector short *b)
+vec_lde(int a, const vector short *b)
{
return (vector short)__builtin_altivec_lvehx(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_lde(int a, vector unsigned short *b)
+vec_lde(int a, const vector unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvehx(a, b);
}
static vector int __ATTRS_o_ai
-vec_lde(int a, vector int *b)
+vec_lde(int a, const vector int *b)
{
return (vector int)__builtin_altivec_lvewx(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_lde(int a, vector unsigned int *b)
+vec_lde(int a, const vector unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvewx(a, b);
}
static vector float __ATTRS_o_ai
-vec_lde(int a, vector float *b)
+vec_lde(int a, const vector float *b)
{
return (vector float)__builtin_altivec_lvewx(a, b);
}
@@ -1962,13 +1967,13 @@ vec_lde(int a, vector float *b)
/* vec_lvebx */
static vector signed char __ATTRS_o_ai
-vec_lvebx(int a, vector signed char *b)
+vec_lvebx(int a, const vector signed char *b)
{
return (vector signed char)__builtin_altivec_lvebx(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvebx(int a, vector unsigned char *b)
+vec_lvebx(int a, const vector unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvebx(a, b);
}
@@ -1976,13 +1981,13 @@ vec_lvebx(int a, vector unsigned char *b)
/* vec_lvehx */
static vector short __ATTRS_o_ai
-vec_lvehx(int a, vector short *b)
+vec_lvehx(int a, const vector short *b)
{
return (vector short)__builtin_altivec_lvehx(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_lvehx(int a, vector unsigned short *b)
+vec_lvehx(int a, const vector unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvehx(a, b);
}
@@ -1990,19 +1995,19 @@ vec_lvehx(int a, vector unsigned short *b)
/* vec_lvewx */
static vector int __ATTRS_o_ai
-vec_lvewx(int a, vector int *b)
+vec_lvewx(int a, const vector int *b)
{
return (vector int)__builtin_altivec_lvewx(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_lvewx(int a, vector unsigned int *b)
+vec_lvewx(int a, const vector unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvewx(a, b);
}
static vector float __ATTRS_o_ai
-vec_lvewx(int a, vector float *b)
+vec_lvewx(int a, const vector float *b)
{
return (vector float)__builtin_altivec_lvewx(a, b);
}
@@ -2010,109 +2015,109 @@ vec_lvewx(int a, vector float *b)
/* vec_ldl */
static vector signed char __ATTRS_o_ai
-vec_ldl(int a, vector signed char *b)
+vec_ldl(int a, const vector signed char *b)
{
return (vector signed char)__builtin_altivec_lvxl(a, b);
}
static vector signed char __ATTRS_o_ai
-vec_ldl(int a, signed char *b)
+vec_ldl(int a, const signed char *b)
{
return (vector signed char)__builtin_altivec_lvxl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_ldl(int a, vector unsigned char *b)
+vec_ldl(int a, const vector unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvxl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_ldl(int a, unsigned char *b)
+vec_ldl(int a, const unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvxl(a, b);
}
static vector bool char __ATTRS_o_ai
-vec_ldl(int a, vector bool char *b)
+vec_ldl(int a, const vector bool char *b)
{
return (vector bool char)__builtin_altivec_lvxl(a, b);
}
static vector short __ATTRS_o_ai
-vec_ldl(int a, vector short *b)
+vec_ldl(int a, const vector short *b)
{
return (vector short)__builtin_altivec_lvxl(a, b);
}
static vector short __ATTRS_o_ai
-vec_ldl(int a, short *b)
+vec_ldl(int a, const short *b)
{
return (vector short)__builtin_altivec_lvxl(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_ldl(int a, vector unsigned short *b)
+vec_ldl(int a, const vector unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvxl(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_ldl(int a, unsigned short *b)
+vec_ldl(int a, const unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvxl(a, b);
}
static vector bool short __ATTRS_o_ai
-vec_ldl(int a, vector bool short *b)
+vec_ldl(int a, const vector bool short *b)
{
return (vector bool short)__builtin_altivec_lvxl(a, b);
}
static vector pixel __ATTRS_o_ai
-vec_ldl(int a, vector pixel *b)
+vec_ldl(int a, const vector pixel *b)
{
return (vector pixel short)__builtin_altivec_lvxl(a, b);
}
static vector int __ATTRS_o_ai
-vec_ldl(int a, vector int *b)
+vec_ldl(int a, const vector int *b)
{
return (vector int)__builtin_altivec_lvxl(a, b);
}
static vector int __ATTRS_o_ai
-vec_ldl(int a, int *b)
+vec_ldl(int a, const int *b)
{
return (vector int)__builtin_altivec_lvxl(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_ldl(int a, vector unsigned int *b)
+vec_ldl(int a, const vector unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvxl(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_ldl(int a, unsigned int *b)
+vec_ldl(int a, const unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvxl(a, b);
}
static vector bool int __ATTRS_o_ai
-vec_ldl(int a, vector bool int *b)
+vec_ldl(int a, const vector bool int *b)
{
return (vector bool int)__builtin_altivec_lvxl(a, b);
}
static vector float __ATTRS_o_ai
-vec_ldl(int a, vector float *b)
+vec_ldl(int a, const vector float *b)
{
return (vector float)__builtin_altivec_lvxl(a, b);
}
static vector float __ATTRS_o_ai
-vec_ldl(int a, float *b)
+vec_ldl(int a, const float *b)
{
return (vector float)__builtin_altivec_lvxl(a, b);
}
@@ -2120,109 +2125,109 @@ vec_ldl(int a, float *b)
/* vec_lvxl */
static vector signed char __ATTRS_o_ai
-vec_lvxl(int a, vector signed char *b)
+vec_lvxl(int a, const vector signed char *b)
{
return (vector signed char)__builtin_altivec_lvxl(a, b);
}
static vector signed char __ATTRS_o_ai
-vec_lvxl(int a, signed char *b)
+vec_lvxl(int a, const signed char *b)
{
return (vector signed char)__builtin_altivec_lvxl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvxl(int a, vector unsigned char *b)
+vec_lvxl(int a, const vector unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvxl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvxl(int a, unsigned char *b)
+vec_lvxl(int a, const unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvxl(a, b);
}
static vector bool char __ATTRS_o_ai
-vec_lvxl(int a, vector bool char *b)
+vec_lvxl(int a, const vector bool char *b)
{
return (vector bool char)__builtin_altivec_lvxl(a, b);
}
static vector short __ATTRS_o_ai
-vec_lvxl(int a, vector short *b)
+vec_lvxl(int a, const vector short *b)
{
return (vector short)__builtin_altivec_lvxl(a, b);
}
static vector short __ATTRS_o_ai
-vec_lvxl(int a, short *b)
+vec_lvxl(int a, const short *b)
{
return (vector short)__builtin_altivec_lvxl(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_lvxl(int a, vector unsigned short *b)
+vec_lvxl(int a, const vector unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvxl(a, b);
}
static vector unsigned short __ATTRS_o_ai
-vec_lvxl(int a, unsigned short *b)
+vec_lvxl(int a, const unsigned short *b)
{
return (vector unsigned short)__builtin_altivec_lvxl(a, b);
}
static vector bool short __ATTRS_o_ai
-vec_lvxl(int a, vector bool short *b)
+vec_lvxl(int a, const vector bool short *b)
{
return (vector bool short)__builtin_altivec_lvxl(a, b);
}
static vector pixel __ATTRS_o_ai
-vec_lvxl(int a, vector pixel *b)
+vec_lvxl(int a, const vector pixel *b)
{
return (vector pixel)__builtin_altivec_lvxl(a, b);
}
static vector int __ATTRS_o_ai
-vec_lvxl(int a, vector int *b)
+vec_lvxl(int a, const vector int *b)
{
return (vector int)__builtin_altivec_lvxl(a, b);
}
static vector int __ATTRS_o_ai
-vec_lvxl(int a, int *b)
+vec_lvxl(int a, const int *b)
{
return (vector int)__builtin_altivec_lvxl(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_lvxl(int a, vector unsigned int *b)
+vec_lvxl(int a, const vector unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvxl(a, b);
}
static vector unsigned int __ATTRS_o_ai
-vec_lvxl(int a, unsigned int *b)
+vec_lvxl(int a, const unsigned int *b)
{
return (vector unsigned int)__builtin_altivec_lvxl(a, b);
}
static vector bool int __ATTRS_o_ai
-vec_lvxl(int a, vector bool int *b)
+vec_lvxl(int a, const vector bool int *b)
{
return (vector bool int)__builtin_altivec_lvxl(a, b);
}
static vector float __ATTRS_o_ai
-vec_lvxl(int a, vector float *b)
+vec_lvxl(int a, const vector float *b)
{
return (vector float)__builtin_altivec_lvxl(a, b);
}
static vector float __ATTRS_o_ai
-vec_lvxl(int a, float *b)
+vec_lvxl(int a, const float *b)
{
return (vector float)__builtin_altivec_lvxl(a, b);
}
@@ -2246,43 +2251,43 @@ vec_vlogefp(vector float a)
/* vec_lvsl */
static vector unsigned char __ATTRS_o_ai
-vec_lvsl(int a, signed char *b)
+vec_lvsl(int a, const signed char *b)
{
return (vector unsigned char)__builtin_altivec_lvsl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsl(int a, unsigned char *b)
+vec_lvsl(int a, const unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvsl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsl(int a, short *b)
+vec_lvsl(int a, const short *b)
{
return (vector unsigned char)__builtin_altivec_lvsl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsl(int a, unsigned short *b)
+vec_lvsl(int a, const unsigned short *b)
{
return (vector unsigned char)__builtin_altivec_lvsl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsl(int a, int *b)
+vec_lvsl(int a, const int *b)
{
return (vector unsigned char)__builtin_altivec_lvsl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsl(int a, unsigned int *b)
+vec_lvsl(int a, const unsigned int *b)
{
return (vector unsigned char)__builtin_altivec_lvsl(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsl(int a, float *b)
+vec_lvsl(int a, const float *b)
{
return (vector unsigned char)__builtin_altivec_lvsl(a, b);
}
@@ -2290,43 +2295,43 @@ vec_lvsl(int a, float *b)
/* vec_lvsr */
static vector unsigned char __ATTRS_o_ai
-vec_lvsr(int a, signed char *b)
+vec_lvsr(int a, const signed char *b)
{
return (vector unsigned char)__builtin_altivec_lvsr(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsr(int a, unsigned char *b)
+vec_lvsr(int a, const unsigned char *b)
{
return (vector unsigned char)__builtin_altivec_lvsr(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsr(int a, short *b)
+vec_lvsr(int a, const short *b)
{
return (vector unsigned char)__builtin_altivec_lvsr(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsr(int a, unsigned short *b)
+vec_lvsr(int a, const unsigned short *b)
{
return (vector unsigned char)__builtin_altivec_lvsr(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsr(int a, int *b)
+vec_lvsr(int a, const int *b)
{
return (vector unsigned char)__builtin_altivec_lvsr(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsr(int a, unsigned int *b)
+vec_lvsr(int a, const unsigned int *b)
{
return (vector unsigned char)__builtin_altivec_lvsr(a, b);
}
static vector unsigned char __ATTRS_o_ai
-vec_lvsr(int a, float *b)
+vec_lvsr(int a, const float *b)
{
return (vector unsigned char)__builtin_altivec_lvsr(a, b);
}
@@ -2357,7 +2362,9 @@ vec_madds(vector signed short a, vector signed short b, vector signed short c)
/* vec_vmhaddshs */
static vector signed short __attribute__((__always_inline__))
-vec_vmhaddshs(vector signed short a, vector signed short b, vector signed short c)
+vec_vmhaddshs(vector signed short a,
+ vector signed short b,
+ vector signed short c)
{
return __builtin_altivec_vmhaddshs(a, b, c);
}
@@ -3261,7 +3268,9 @@ vec_mladd(vector unsigned short a, vector short b, vector short c)
}
static vector unsigned short __ATTRS_o_ai
-vec_mladd(vector unsigned short a, vector unsigned short b, vector unsigned short c)
+vec_mladd(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned short c)
{
return a * b + c;
}
@@ -3287,7 +3296,9 @@ vec_vmladduhm(vector unsigned short a, vector short b, vector short c)
}
static vector unsigned short __ATTRS_o_ai
-vec_vmladduhm(vector unsigned short a, vector unsigned short b, vector unsigned short c)
+vec_vmladduhm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned short c)
{
return a * b + c;
}
@@ -3329,7 +3340,9 @@ vec_msum(vector short a, vector short b, vector int c)
}
static vector unsigned int __ATTRS_o_ai
-vec_msum(vector unsigned short a, vector unsigned short b, vector unsigned int c)
+vec_msum(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned int c)
{
return __builtin_altivec_vmsumuhm(a, b, c);
}
@@ -3345,7 +3358,9 @@ vec_vmsummbm(vector signed char a, vector unsigned char b, vector int c)
/* vec_vmsumubm */
static vector unsigned int __attribute__((__always_inline__))
-vec_vmsumubm(vector unsigned char a, vector unsigned char b, vector unsigned int c)
+vec_vmsumubm(vector unsigned char a,
+ vector unsigned char b,
+ vector unsigned int c)
{
return __builtin_altivec_vmsumubm(a, b, c);
}
@@ -3361,7 +3376,9 @@ vec_vmsumshm(vector short a, vector short b, vector int c)
/* vec_vmsumuhm */
static vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhm(vector unsigned short a, vector unsigned short b, vector unsigned int c)
+vec_vmsumuhm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned int c)
{
return __builtin_altivec_vmsumuhm(a, b, c);
}
@@ -3375,7 +3392,9 @@ vec_msums(vector short a, vector short b, vector int c)
}
static vector unsigned int __ATTRS_o_ai
-vec_msums(vector unsigned short a, vector unsigned short b, vector unsigned int c)
+vec_msums(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned int c)
{
return __builtin_altivec_vmsumuhs(a, b, c);
}
@@ -3391,7 +3410,9 @@ vec_vmsumshs(vector short a, vector short b, vector int c)
/* vec_vmsumuhs */
static vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhs(vector unsigned short a, vector unsigned short b, vector unsigned int c)
+vec_vmsumuhs(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned int c)
{
return __builtin_altivec_vmsumuhs(a, b, c);
}
@@ -4263,43 +4284,54 @@ vec_vpkswus(vector unsigned int a, vector unsigned int b)
vector signed char __ATTRS_o_ai
vec_perm(vector signed char a, vector signed char b, vector unsigned char c)
{
- return (vector signed char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector signed char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char a, vector unsigned char b, vector unsigned char c)
+vec_perm(vector unsigned char a,
+ vector unsigned char b,
+ vector unsigned char c)
{
- return (vector unsigned char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector unsigned char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector bool char __ATTRS_o_ai
vec_perm(vector bool char a, vector bool char b, vector unsigned char c)
{
- return (vector bool char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector bool char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector short __ATTRS_o_ai
vec_perm(vector short a, vector short b, vector unsigned char c)
{
- return (vector short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short a, vector unsigned short b, vector unsigned char c)
+vec_perm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned char c)
{
- return (vector unsigned short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector unsigned short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector bool short __ATTRS_o_ai
vec_perm(vector bool short a, vector bool short b, vector unsigned char c)
{
- return (vector bool short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector bool short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector pixel __ATTRS_o_ai
vec_perm(vector pixel a, vector pixel b, vector unsigned char c)
{
- return (vector pixel)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector pixel)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector int __ATTRS_o_ai
@@ -4311,19 +4343,22 @@ vec_perm(vector int a, vector int b, vector unsigned char c)
vector unsigned int __ATTRS_o_ai
vec_perm(vector unsigned int a, vector unsigned int b, vector unsigned char c)
{
- return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector unsigned int)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector bool int __ATTRS_o_ai
vec_perm(vector bool int a, vector bool int b, vector unsigned char c)
{
- return (vector bool int)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector bool int)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector float __ATTRS_o_ai
vec_perm(vector float a, vector float b, vector unsigned char c)
{
- return (vector float)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector float)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
/* vec_vperm */
@@ -4331,43 +4366,54 @@ vec_perm(vector float a, vector float b, vector unsigned char c)
vector signed char __ATTRS_o_ai
vec_vperm(vector signed char a, vector signed char b, vector unsigned char c)
{
- return (vector signed char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector signed char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector unsigned char __ATTRS_o_ai
-vec_vperm(vector unsigned char a, vector unsigned char b, vector unsigned char c)
+vec_vperm(vector unsigned char a,
+ vector unsigned char b,
+ vector unsigned char c)
{
- return (vector unsigned char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector unsigned char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector bool char __ATTRS_o_ai
vec_vperm(vector bool char a, vector bool char b, vector unsigned char c)
{
- return (vector bool char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector bool char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector short __ATTRS_o_ai
vec_vperm(vector short a, vector short b, vector unsigned char c)
{
- return (vector short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector unsigned short __ATTRS_o_ai
-vec_vperm(vector unsigned short a, vector unsigned short b, vector unsigned char c)
+vec_vperm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned char c)
{
- return (vector unsigned short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector unsigned short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector bool short __ATTRS_o_ai
vec_vperm(vector bool short a, vector bool short b, vector unsigned char c)
{
- return (vector bool short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector bool short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector pixel __ATTRS_o_ai
vec_vperm(vector pixel a, vector pixel b, vector unsigned char c)
{
- return (vector pixel)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector pixel)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector int __ATTRS_o_ai
@@ -4379,19 +4425,22 @@ vec_vperm(vector int a, vector int b, vector unsigned char c)
vector unsigned int __ATTRS_o_ai
vec_vperm(vector unsigned int a, vector unsigned int b, vector unsigned char c)
{
- return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector unsigned int)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector bool int __ATTRS_o_ai
vec_vperm(vector bool int a, vector bool int b, vector unsigned char c)
{
- return (vector bool int)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector bool int)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
vector float __ATTRS_o_ai
vec_vperm(vector float a, vector float b, vector unsigned char c)
{
- return (vector float)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+ return (vector float)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
}
/* vec_re */
@@ -4575,7 +4624,9 @@ vec_sel(vector short a, vector short b, vector bool short c)
}
static vector unsigned short __ATTRS_o_ai
-vec_sel(vector unsigned short a, vector unsigned short b, vector unsigned short c)
+vec_sel(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned short c)
{
return (a & ~c) | (b & c);
}
@@ -4637,14 +4688,16 @@ vec_sel(vector bool int a, vector bool int b, vector bool int c)
static vector float __ATTRS_o_ai
vec_sel(vector float a, vector float b, vector unsigned int c)
{
- vector int res = ((vector int)a & ~(vector int)c) | ((vector int)b & (vector int)c);
+ vector int res = ((vector int)a & ~(vector int)c)
+ | ((vector int)b & (vector int)c);
return (vector float)res;
}
static vector float __ATTRS_o_ai
vec_sel(vector float a, vector float b, vector bool int c)
{
- vector int res = ((vector int)a & ~(vector int)c) | ((vector int)b & (vector int)c);
+ vector int res = ((vector int)a & ~(vector int)c)
+ | ((vector int)b & (vector int)c);
return (vector float)res;
}
@@ -4699,7 +4752,9 @@ vec_vsel(vector short a, vector short b, vector bool short c)
}
static vector unsigned short __ATTRS_o_ai
-vec_vsel(vector unsigned short a, vector unsigned short b, vector unsigned short c)
+vec_vsel(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned short c)
{
return (a & ~c) | (b & c);
}
@@ -4761,14 +4816,16 @@ vec_vsel(vector bool int a, vector bool int b, vector bool int c)
static vector float __ATTRS_o_ai
vec_vsel(vector float a, vector float b, vector unsigned int c)
{
- vector int res = ((vector int)a & ~(vector int)c) | ((vector int)b & (vector int)c);
+ vector int res = ((vector int)a & ~(vector int)c)
+ | ((vector int)b & (vector int)c);
return (vector float)res;
}
static vector float __ATTRS_o_ai
vec_vsel(vector float a, vector float b, vector bool int c)
{
- vector int res = ((vector int)a & ~(vector int)c) | ((vector int)b & (vector int)c);
+ vector int res = ((vector int)a & ~(vector int)c)
+ | ((vector int)b & (vector int)c);
return (vector float)res;
}
@@ -4997,37 +5054,43 @@ vec_vsldoi(vector float a, vector float b, unsigned char c)
static vector signed char __ATTRS_o_ai
vec_sll(vector signed char a, vector unsigned char b)
{
- return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_sll(vector signed char a, vector unsigned short b)
{
- return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_sll(vector signed char a, vector unsigned int b)
{
- return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_sll(vector unsigned char a, vector unsigned char b)
{
- return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_sll(vector unsigned char a, vector unsigned short b)
{
- return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_sll(vector unsigned char a, vector unsigned int b)
{
- return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector bool char __ATTRS_o_ai
@@ -5069,19 +5132,22 @@ vec_sll(vector short a, vector unsigned int b)
static vector unsigned short __ATTRS_o_ai
vec_sll(vector unsigned short a, vector unsigned char b)
{
- return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_sll(vector unsigned short a, vector unsigned short b)
{
- return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_sll(vector unsigned short a, vector unsigned int b)
{
- return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector bool short __ATTRS_o_ai
@@ -5141,19 +5207,22 @@ vec_sll(vector int a, vector unsigned int b)
static vector unsigned int __ATTRS_o_ai
vec_sll(vector unsigned int a, vector unsigned char b)
{
- return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_sll(vector unsigned int a, vector unsigned short b)
{
- return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_sll(vector unsigned int a, vector unsigned int b)
{
- return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector bool int __ATTRS_o_ai
@@ -5179,37 +5248,43 @@ vec_sll(vector bool int a, vector unsigned int b)
static vector signed char __ATTRS_o_ai
vec_vsl(vector signed char a, vector unsigned char b)
{
- return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_vsl(vector signed char a, vector unsigned short b)
{
- return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_vsl(vector signed char a, vector unsigned int b)
{
- return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vsl(vector unsigned char a, vector unsigned char b)
{
- return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vsl(vector unsigned char a, vector unsigned short b)
{
- return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vsl(vector unsigned char a, vector unsigned int b)
{
- return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector bool char __ATTRS_o_ai
@@ -5251,19 +5326,22 @@ vec_vsl(vector short a, vector unsigned int b)
static vector unsigned short __ATTRS_o_ai
vec_vsl(vector unsigned short a, vector unsigned char b)
{
- return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_vsl(vector unsigned short a, vector unsigned short b)
{
- return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_vsl(vector unsigned short a, vector unsigned int b)
{
- return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector bool short __ATTRS_o_ai
@@ -5323,19 +5401,22 @@ vec_vsl(vector int a, vector unsigned int b)
static vector unsigned int __ATTRS_o_ai
vec_vsl(vector unsigned int a, vector unsigned char b)
{
- return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_vsl(vector unsigned int a, vector unsigned short b)
{
- return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_vsl(vector unsigned int a, vector unsigned int b)
{
- return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
}
static vector bool int __ATTRS_o_ai
@@ -5361,25 +5442,29 @@ vec_vsl(vector bool int a, vector unsigned int b)
static vector signed char __ATTRS_o_ai
vec_slo(vector signed char a, vector signed char b)
{
- return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_slo(vector signed char a, vector unsigned char b)
{
- return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_slo(vector unsigned char a, vector signed char b)
{
- return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_slo(vector unsigned char a, vector unsigned char b)
{
- return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector short __ATTRS_o_ai
@@ -5397,13 +5482,15 @@ vec_slo(vector short a, vector unsigned char b)
static vector unsigned short __ATTRS_o_ai
vec_slo(vector unsigned short a, vector signed char b)
{
- return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_slo(vector unsigned short a, vector unsigned char b)
{
- return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector pixel __ATTRS_o_ai
@@ -5433,13 +5520,15 @@ vec_slo(vector int a, vector unsigned char b)
static vector unsigned int __ATTRS_o_ai
vec_slo(vector unsigned int a, vector signed char b)
{
- return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_slo(vector unsigned int a, vector unsigned char b)
{
- return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector float __ATTRS_o_ai
@@ -5459,25 +5548,29 @@ vec_slo(vector float a, vector unsigned char b)
static vector signed char __ATTRS_o_ai
vec_vslo(vector signed char a, vector signed char b)
{
- return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_vslo(vector signed char a, vector unsigned char b)
{
- return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vslo(vector unsigned char a, vector signed char b)
{
- return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vslo(vector unsigned char a, vector unsigned char b)
{
- return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector short __ATTRS_o_ai
@@ -5495,13 +5588,15 @@ vec_vslo(vector short a, vector unsigned char b)
static vector unsigned short __ATTRS_o_ai
vec_vslo(vector unsigned short a, vector signed char b)
{
- return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_vslo(vector unsigned short a, vector unsigned char b)
{
- return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector pixel __ATTRS_o_ai
@@ -5531,13 +5626,15 @@ vec_vslo(vector int a, vector unsigned char b)
static vector unsigned int __ATTRS_o_ai
vec_vslo(vector unsigned int a, vector signed char b)
{
- return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_vslo(vector unsigned int a, vector unsigned char b)
{
- return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
}
static vector float __ATTRS_o_ai
@@ -5576,64 +5673,72 @@ static vector short __ATTRS_o_ai
vec_splat(vector short a, unsigned char b)
{
b *= 2;
+ unsigned char b1=b+1;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
}
static vector unsigned short __ATTRS_o_ai
vec_splat(vector unsigned short a, unsigned char b)
{
b *= 2;
+ unsigned char b1=b+1;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
}
static vector bool short __ATTRS_o_ai
vec_splat(vector bool short a, unsigned char b)
{
b *= 2;
+ unsigned char b1=b+1;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
}
static vector pixel __ATTRS_o_ai
vec_splat(vector pixel a, unsigned char b)
{
b *= 2;
+ unsigned char b1=b+1;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
}
static vector int __ATTRS_o_ai
vec_splat(vector int a, unsigned char b)
{
b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
}
static vector unsigned int __ATTRS_o_ai
vec_splat(vector unsigned int a, unsigned char b)
{
b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
}
static vector bool int __ATTRS_o_ai
vec_splat(vector bool int a, unsigned char b)
{
b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
}
static vector float __ATTRS_o_ai
vec_splat(vector float a, unsigned char b)
{
b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
}
/* vec_vspltb */
@@ -5666,32 +5771,36 @@ static vector short __ATTRS_o_ai
vec_vsplth(vector short a, unsigned char b)
{
b *= 2;
+ unsigned char b1=b+1;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
}
static vector unsigned short __ATTRS_o_ai
vec_vsplth(vector unsigned short a, unsigned char b)
{
b *= 2;
+ unsigned char b1=b+1;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
}
static vector bool short __ATTRS_o_ai
vec_vsplth(vector bool short a, unsigned char b)
{
b *= 2;
+ unsigned char b1=b+1;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
}
static vector pixel __ATTRS_o_ai
vec_vsplth(vector pixel a, unsigned char b)
{
b *= 2;
+ unsigned char b1=b+1;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
}
/* vec_vspltw */
@@ -5702,32 +5811,36 @@ static vector int __ATTRS_o_ai
vec_vspltw(vector int a, unsigned char b)
{
b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
}
static vector unsigned int __ATTRS_o_ai
vec_vspltw(vector unsigned int a, unsigned char b)
{
b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
}
static vector bool int __ATTRS_o_ai
vec_vspltw(vector bool int a, unsigned char b)
{
b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
}
static vector float __ATTRS_o_ai
vec_vspltw(vector float a, unsigned char b)
{
b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
return vec_perm(a, a, (vector unsigned char)
- (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
}
/* vec_splat_s8 */
@@ -5988,37 +6101,43 @@ vec_vsraw(vector unsigned int a, vector unsigned int b)
static vector signed char __ATTRS_o_ai
vec_srl(vector signed char a, vector unsigned char b)
{
- return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_srl(vector signed char a, vector unsigned short b)
{
- return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_srl(vector signed char a, vector unsigned int b)
{
- return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_srl(vector unsigned char a, vector unsigned char b)
{
- return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_srl(vector unsigned char a, vector unsigned short b)
{
- return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_srl(vector unsigned char a, vector unsigned int b)
{
- return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector bool char __ATTRS_o_ai
@@ -6060,19 +6179,22 @@ vec_srl(vector short a, vector unsigned int b)
static vector unsigned short __ATTRS_o_ai
vec_srl(vector unsigned short a, vector unsigned char b)
{
- return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_srl(vector unsigned short a, vector unsigned short b)
{
- return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_srl(vector unsigned short a, vector unsigned int b)
{
- return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector bool short __ATTRS_o_ai
@@ -6132,19 +6254,22 @@ vec_srl(vector int a, vector unsigned int b)
static vector unsigned int __ATTRS_o_ai
vec_srl(vector unsigned int a, vector unsigned char b)
{
- return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_srl(vector unsigned int a, vector unsigned short b)
{
- return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_srl(vector unsigned int a, vector unsigned int b)
{
- return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector bool int __ATTRS_o_ai
@@ -6170,37 +6295,43 @@ vec_srl(vector bool int a, vector unsigned int b)
static vector signed char __ATTRS_o_ai
vec_vsr(vector signed char a, vector unsigned char b)
{
- return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_vsr(vector signed char a, vector unsigned short b)
{
- return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_vsr(vector signed char a, vector unsigned int b)
{
- return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vsr(vector unsigned char a, vector unsigned char b)
{
- return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vsr(vector unsigned char a, vector unsigned short b)
{
- return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vsr(vector unsigned char a, vector unsigned int b)
{
- return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector bool char __ATTRS_o_ai
@@ -6242,19 +6373,22 @@ vec_vsr(vector short a, vector unsigned int b)
static vector unsigned short __ATTRS_o_ai
vec_vsr(vector unsigned short a, vector unsigned char b)
{
- return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_vsr(vector unsigned short a, vector unsigned short b)
{
- return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_vsr(vector unsigned short a, vector unsigned int b)
{
- return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector bool short __ATTRS_o_ai
@@ -6314,19 +6448,22 @@ vec_vsr(vector int a, vector unsigned int b)
static vector unsigned int __ATTRS_o_ai
vec_vsr(vector unsigned int a, vector unsigned char b)
{
- return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_vsr(vector unsigned int a, vector unsigned short b)
{
- return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_vsr(vector unsigned int a, vector unsigned int b)
{
- return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
}
static vector bool int __ATTRS_o_ai
@@ -6352,25 +6489,29 @@ vec_vsr(vector bool int a, vector unsigned int b)
static vector signed char __ATTRS_o_ai
vec_sro(vector signed char a, vector signed char b)
{
- return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_sro(vector signed char a, vector unsigned char b)
{
- return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_sro(vector unsigned char a, vector signed char b)
{
- return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_sro(vector unsigned char a, vector unsigned char b)
{
- return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector short __ATTRS_o_ai
@@ -6388,13 +6529,15 @@ vec_sro(vector short a, vector unsigned char b)
static vector unsigned short __ATTRS_o_ai
vec_sro(vector unsigned short a, vector signed char b)
{
- return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_sro(vector unsigned short a, vector unsigned char b)
{
- return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector pixel __ATTRS_o_ai
@@ -6424,13 +6567,15 @@ vec_sro(vector int a, vector unsigned char b)
static vector unsigned int __ATTRS_o_ai
vec_sro(vector unsigned int a, vector signed char b)
{
- return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_sro(vector unsigned int a, vector unsigned char b)
{
- return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector float __ATTRS_o_ai
@@ -6450,25 +6595,29 @@ vec_sro(vector float a, vector unsigned char b)
static vector signed char __ATTRS_o_ai
vec_vsro(vector signed char a, vector signed char b)
{
- return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector signed char __ATTRS_o_ai
vec_vsro(vector signed char a, vector unsigned char b)
{
- return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector signed char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vsro(vector unsigned char a, vector signed char b)
{
- return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector unsigned char __ATTRS_o_ai
vec_vsro(vector unsigned char a, vector unsigned char b)
{
- return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector short __ATTRS_o_ai
@@ -6486,13 +6635,15 @@ vec_vsro(vector short a, vector unsigned char b)
static vector unsigned short __ATTRS_o_ai
vec_vsro(vector unsigned short a, vector signed char b)
{
- return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector unsigned short __ATTRS_o_ai
vec_vsro(vector unsigned short a, vector unsigned char b)
{
- return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned short)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector pixel __ATTRS_o_ai
@@ -6522,13 +6673,15 @@ vec_vsro(vector int a, vector unsigned char b)
static vector unsigned int __ATTRS_o_ai
vec_vsro(vector unsigned int a, vector signed char b)
{
- return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector unsigned int __ATTRS_o_ai
vec_vsro(vector unsigned int a, vector unsigned char b)
{
- return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b);
+ return (vector unsigned int)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
}
static vector float __ATTRS_o_ai
@@ -8379,7 +8532,1410 @@ vec_vxor(vector float a, vector bool int b)
return (vector float)res;
}
-/* ------------------------------ predicates ------------------------------------ */
+/* ------------------------ extensions for CBEA ----------------------------- */
+
+/* vec_extract */
+
+static signed char __ATTRS_o_ai
+vec_extract(vector signed char a, int b)
+{
+ return a[b];
+}
+
+static unsigned char __ATTRS_o_ai
+vec_extract(vector unsigned char a, int b)
+{
+ return a[b];
+}
+
+static short __ATTRS_o_ai
+vec_extract(vector short a, int b)
+{
+ return a[b];
+}
+
+static unsigned short __ATTRS_o_ai
+vec_extract(vector unsigned short a, int b)
+{
+ return a[b];
+}
+
+static int __ATTRS_o_ai
+vec_extract(vector int a, int b)
+{
+ return a[b];
+}
+
+static unsigned int __ATTRS_o_ai
+vec_extract(vector unsigned int a, int b)
+{
+ return a[b];
+}
+
+static float __ATTRS_o_ai
+vec_extract(vector float a, int b)
+{
+ return a[b];
+}
+
+/* vec_insert */
+
+static vector signed char __ATTRS_o_ai
+vec_insert(signed char a, vector signed char b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_insert(unsigned char a, vector unsigned char b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector short __ATTRS_o_ai
+vec_insert(short a, vector short b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_insert(unsigned short a, vector unsigned short b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector int __ATTRS_o_ai
+vec_insert(int a, vector int b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_insert(unsigned int a, vector unsigned int b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector float __ATTRS_o_ai
+vec_insert(float a, vector float b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+/* vec_lvlx */
+
+static vector signed char __ATTRS_o_ai
+vec_lvlx(int a, const signed char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector signed char)(0),
+ vec_lvsl(a, b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvlx(int a, const vector signed char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector signed char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlx(int a, const unsigned char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned char)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlx(int a, const vector unsigned char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvlx(int a, const vector bool char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector bool char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvlx(int a, const short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector short)(0),
+ vec_lvsl(a, b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvlx(int a, const vector short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlx(int a, const unsigned short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned short)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlx(int a, const vector unsigned short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvlx(int a, const vector bool short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector bool short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvlx(int a, const vector pixel *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector pixel)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvlx(int a, const int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector int)(0),
+ vec_lvsl(a, b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvlx(int a, const vector int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlx(int a, const unsigned int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned int)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlx(int a, const vector unsigned int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvlx(int a, const vector bool int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector bool int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvlx(int a, const float *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector float)(0),
+ vec_lvsl(a, b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvlx(int a, const vector float *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector float)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+/* vec_lvlxl */
+
+static vector signed char __ATTRS_o_ai
+vec_lvlxl(int a, const signed char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector signed char)(0),
+ vec_lvsl(a, b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvlxl(int a, const vector signed char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector signed char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlxl(int a, const unsigned char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned char)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlxl(int a, const vector unsigned char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvlxl(int a, const vector bool char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector bool char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvlxl(int a, const short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector short)(0),
+ vec_lvsl(a, b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvlxl(int a, const vector short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlxl(int a, const unsigned short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned short)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlxl(int a, const vector unsigned short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvlxl(int a, const vector bool short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector bool short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvlxl(int a, const vector pixel *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector pixel)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvlxl(int a, const int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector int)(0),
+ vec_lvsl(a, b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvlxl(int a, const vector int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlxl(int a, const unsigned int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned int)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlxl(int a, const vector unsigned int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvlxl(int a, const vector bool int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector bool int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvlxl(int a, const float *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector float)(0),
+ vec_lvsl(a, b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvlxl(int a, vector float *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector float)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+/* vec_lvrx */
+
+static vector signed char __ATTRS_o_ai
+vec_lvrx(int a, const signed char *b)
+{
+ return vec_perm((vector signed char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvrx(int a, const vector signed char *b)
+{
+ return vec_perm((vector signed char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrx(int a, const unsigned char *b)
+{
+ return vec_perm((vector unsigned char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrx(int a, const vector unsigned char *b)
+{
+ return vec_perm((vector unsigned char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvrx(int a, const vector bool char *b)
+{
+ return vec_perm((vector bool char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvrx(int a, const short *b)
+{
+ return vec_perm((vector short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvrx(int a, const vector short *b)
+{
+ return vec_perm((vector short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrx(int a, const unsigned short *b)
+{
+ return vec_perm((vector unsigned short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrx(int a, const vector unsigned short *b)
+{
+ return vec_perm((vector unsigned short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvrx(int a, const vector bool short *b)
+{
+ return vec_perm((vector bool short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvrx(int a, const vector pixel *b)
+{
+ return vec_perm((vector pixel)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvrx(int a, const int *b)
+{
+ return vec_perm((vector int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvrx(int a, const vector int *b)
+{
+ return vec_perm((vector int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrx(int a, const unsigned int *b)
+{
+ return vec_perm((vector unsigned int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrx(int a, const vector unsigned int *b)
+{
+ return vec_perm((vector unsigned int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvrx(int a, const vector bool int *b)
+{
+ return vec_perm((vector bool int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvrx(int a, const float *b)
+{
+ return vec_perm((vector float)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvrx(int a, const vector float *b)
+{
+ return vec_perm((vector float)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+/* vec_lvrxl */
+
+static vector signed char __ATTRS_o_ai
+vec_lvrxl(int a, const signed char *b)
+{
+ return vec_perm((vector signed char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvrxl(int a, const vector signed char *b)
+{
+ return vec_perm((vector signed char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrxl(int a, const unsigned char *b)
+{
+ return vec_perm((vector unsigned char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrxl(int a, const vector unsigned char *b)
+{
+ return vec_perm((vector unsigned char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvrxl(int a, const vector bool char *b)
+{
+ return vec_perm((vector bool char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvrxl(int a, const short *b)
+{
+ return vec_perm((vector short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvrxl(int a, const vector short *b)
+{
+ return vec_perm((vector short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrxl(int a, const unsigned short *b)
+{
+ return vec_perm((vector unsigned short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrxl(int a, const vector unsigned short *b)
+{
+ return vec_perm((vector unsigned short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvrxl(int a, const vector bool short *b)
+{
+ return vec_perm((vector bool short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvrxl(int a, const vector pixel *b)
+{
+ return vec_perm((vector pixel)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvrxl(int a, const int *b)
+{
+ return vec_perm((vector int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvrxl(int a, const vector int *b)
+{
+ return vec_perm((vector int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrxl(int a, const unsigned int *b)
+{
+ return vec_perm((vector unsigned int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrxl(int a, const vector unsigned int *b)
+{
+ return vec_perm((vector unsigned int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvrxl(int a, const vector bool int *b)
+{
+ return vec_perm((vector bool int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvrxl(int a, const float *b)
+{
+ return vec_perm((vector float)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvrxl(int a, const vector float *b)
+{
+ return vec_perm((vector float)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+/* vec_stvlx */
+
+static void __ATTRS_o_ai
+vec_stvlx(vector signed char a, int b, signed char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector signed char a, int b, vector signed char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned char a, int b, unsigned char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned char a, int b, vector unsigned char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector bool char a, int b, vector bool char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector short a, int b, short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector short a, int b, vector short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned short a, int b, unsigned short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned short a, int b, vector unsigned short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector bool short a, int b, vector bool short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector pixel a, int b, vector pixel *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector int a, int b, int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector int a, int b, vector int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned int a, int b, unsigned int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned int a, int b, vector unsigned int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector bool int a, int b, vector bool int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector float a, int b, vector float *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+/* vec_stvlxl */
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector signed char a, int b, signed char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector signed char a, int b, vector signed char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned char a, int b, unsigned char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned char a, int b, vector unsigned char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector bool char a, int b, vector bool char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector short a, int b, short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector short a, int b, vector short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned short a, int b, unsigned short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned short a, int b, vector unsigned short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector bool short a, int b, vector bool short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector pixel a, int b, vector pixel *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector int a, int b, int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector int a, int b, vector int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned int a, int b, unsigned int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned int a, int b, vector unsigned int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector bool int a, int b, vector bool int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector float a, int b, vector float *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+/* vec_stvrx */
+
+static void __ATTRS_o_ai
+vec_stvrx(vector signed char a, int b, signed char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector signed char a, int b, vector signed char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned char a, int b, unsigned char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned char a, int b, vector unsigned char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector bool char a, int b, vector bool char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector short a, int b, short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector short a, int b, vector short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned short a, int b, unsigned short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned short a, int b, vector unsigned short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector bool short a, int b, vector bool short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector pixel a, int b, vector pixel *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector int a, int b, int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector int a, int b, vector int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned int a, int b, unsigned int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned int a, int b, vector unsigned int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector bool int a, int b, vector bool int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector float a, int b, vector float *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+/* vec_stvrxl */
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector signed char a, int b, signed char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector signed char a, int b, vector signed char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned char a, int b, unsigned char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned char a, int b, vector unsigned char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector bool char a, int b, vector bool char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector short a, int b, short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector short a, int b, vector short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned short a, int b, unsigned short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned short a, int b, vector unsigned short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector bool short a, int b, vector bool short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector pixel a, int b, vector pixel *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector int a, int b, int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector int a, int b, vector int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned int a, int b, unsigned int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned int a, int b, vector unsigned int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector bool int a, int b, vector bool int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector float a, int b, vector float *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+/* vec_promote */
+
+static vector signed char __ATTRS_o_ai
+vec_promote(signed char a, int b)
+{
+ vector signed char res = (vector signed char)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_promote(unsigned char a, int b)
+{
+ vector unsigned char res = (vector unsigned char)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector short __ATTRS_o_ai
+vec_promote(short a, int b)
+{
+ vector short res = (vector short)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_promote(unsigned short a, int b)
+{
+ vector unsigned short res = (vector unsigned short)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector int __ATTRS_o_ai
+vec_promote(int a, int b)
+{
+ vector int res = (vector int)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_promote(unsigned int a, int b)
+{
+ vector unsigned int res = (vector unsigned int)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector float __ATTRS_o_ai
+vec_promote(float a, int b)
+{
+ vector float res = (vector float)(0);
+ res[b] = a;
+ return res;
+}
+
+/* vec_splats */
+
+static vector signed char __ATTRS_o_ai
+vec_splats(signed char a)
+{
+ return (vector signed char)(a);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_splats(unsigned char a)
+{
+ return (vector unsigned char)(a);
+}
+
+static vector short __ATTRS_o_ai
+vec_splats(short a)
+{
+ return (vector short)(a);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_splats(unsigned short a)
+{
+ return (vector unsigned short)(a);
+}
+
+static vector int __ATTRS_o_ai
+vec_splats(int a)
+{
+ return (vector int)(a);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_splats(unsigned int a)
+{
+ return (vector unsigned int)(a);
+}
+
+static vector float __ATTRS_o_ai
+vec_splats(float a)
+{
+ return (vector float)(a);
+}
+
+/* ----------------------------- predicates --------------------------------- */
/* vec_all_eq */
@@ -8440,37 +9996,43 @@ vec_all_eq(vector short a, vector bool short b)
static int __ATTRS_o_ai
vec_all_eq(vector unsigned short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_eq(vector unsigned short a, vector bool short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_eq(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_eq(vector bool short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_eq(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_eq(vector pixel a, vector pixel b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
@@ -8550,8 +10112,9 @@ vec_all_ge(vector unsigned char a, vector bool char b)
static int __ATTRS_o_ai
vec_all_ge(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)b,
- (vector unsigned char)a);
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
}
static int __ATTRS_o_ai
@@ -8563,8 +10126,9 @@ vec_all_ge(vector bool char a, vector unsigned char b)
static int __ATTRS_o_ai
vec_all_ge(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)b,
- (vector unsigned char)a);
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
}
static int __ATTRS_o_ai
@@ -8594,8 +10158,9 @@ vec_all_ge(vector unsigned short a, vector bool short b)
static int __ATTRS_o_ai
vec_all_ge(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)b,
- (vector unsigned short)a);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
}
static int __ATTRS_o_ai
@@ -8607,8 +10172,9 @@ vec_all_ge(vector bool short a, vector unsigned short b)
static int __ATTRS_o_ai
vec_all_ge(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)b,
- (vector unsigned short)a);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
}
static int __ATTRS_o_ai
@@ -8638,8 +10204,9 @@ vec_all_ge(vector unsigned int a, vector bool int b)
static int __ATTRS_o_ai
vec_all_ge(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)b,
- (vector unsigned int)a);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
}
static int __ATTRS_o_ai
@@ -8651,8 +10218,9 @@ vec_all_ge(vector bool int a, vector unsigned int b)
static int __ATTRS_o_ai
vec_all_ge(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)b,
- (vector unsigned int)a);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
}
static int __ATTRS_o_ai
@@ -8690,8 +10258,9 @@ vec_all_gt(vector unsigned char a, vector bool char b)
static int __ATTRS_o_ai
vec_all_gt(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)a,
- (vector unsigned char)b);
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
}
static int __ATTRS_o_ai
@@ -8703,8 +10272,9 @@ vec_all_gt(vector bool char a, vector unsigned char b)
static int __ATTRS_o_ai
vec_all_gt(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)a,
- (vector unsigned char)b);
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
}
static int __ATTRS_o_ai
@@ -8734,8 +10304,9 @@ vec_all_gt(vector unsigned short a, vector bool short b)
static int __ATTRS_o_ai
vec_all_gt(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)a,
- (vector unsigned short)b);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
}
static int __ATTRS_o_ai
@@ -8747,8 +10318,9 @@ vec_all_gt(vector bool short a, vector unsigned short b)
static int __ATTRS_o_ai
vec_all_gt(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)a,
- (vector unsigned short)b);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
}
static int __ATTRS_o_ai
@@ -8778,8 +10350,9 @@ vec_all_gt(vector unsigned int a, vector bool int b)
static int __ATTRS_o_ai
vec_all_gt(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)a,
- (vector unsigned int)b);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
}
static int __ATTRS_o_ai
@@ -8791,8 +10364,9 @@ vec_all_gt(vector bool int a, vector unsigned int b)
static int __ATTRS_o_ai
vec_all_gt(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)a,
- (vector unsigned int)b);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
}
static int __ATTRS_o_ai
@@ -8838,8 +10412,9 @@ vec_all_le(vector unsigned char a, vector bool char b)
static int __ATTRS_o_ai
vec_all_le(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)a,
- (vector unsigned char)b);
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
}
static int __ATTRS_o_ai
@@ -8851,8 +10426,9 @@ vec_all_le(vector bool char a, vector unsigned char b)
static int __ATTRS_o_ai
vec_all_le(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)a,
- (vector unsigned char)b);
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
}
static int __ATTRS_o_ai
@@ -8882,8 +10458,9 @@ vec_all_le(vector unsigned short a, vector bool short b)
static int __ATTRS_o_ai
vec_all_le(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)a,
- (vector unsigned short)b);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
}
static int __ATTRS_o_ai
@@ -8895,8 +10472,9 @@ vec_all_le(vector bool short a, vector unsigned short b)
static int __ATTRS_o_ai
vec_all_le(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)a,
- (vector unsigned short)b);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
}
static int __ATTRS_o_ai
@@ -8926,8 +10504,9 @@ vec_all_le(vector unsigned int a, vector bool int b)
static int __ATTRS_o_ai
vec_all_le(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)a,
- (vector unsigned int)b);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
}
static int __ATTRS_o_ai
@@ -8939,8 +10518,9 @@ vec_all_le(vector bool int a, vector unsigned int b)
static int __ATTRS_o_ai
vec_all_le(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)a,
- (vector unsigned int)b);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
}
static int __ATTRS_o_ai
@@ -8978,8 +10558,9 @@ vec_all_lt(vector unsigned char a, vector bool char b)
static int __ATTRS_o_ai
vec_all_lt(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)b,
- (vector unsigned char)a);
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
}
static int __ATTRS_o_ai
@@ -8991,8 +10572,9 @@ vec_all_lt(vector bool char a, vector unsigned char b)
static int __ATTRS_o_ai
vec_all_lt(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)b,
- (vector unsigned char)a);
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
}
static int __ATTRS_o_ai
@@ -9022,8 +10604,9 @@ vec_all_lt(vector unsigned short a, vector bool short b)
static int __ATTRS_o_ai
vec_all_lt(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)b,
- (vector unsigned short)a);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
}
static int __ATTRS_o_ai
@@ -9035,8 +10618,9 @@ vec_all_lt(vector bool short a, vector unsigned short b)
static int __ATTRS_o_ai
vec_all_lt(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)b,
- (vector unsigned short)a);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
}
static int __ATTRS_o_ai
@@ -9066,8 +10650,9 @@ vec_all_lt(vector unsigned int a, vector bool int b)
static int __ATTRS_o_ai
vec_all_lt(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)b,
- (vector unsigned int)a);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
}
static int __ATTRS_o_ai
@@ -9079,8 +10664,9 @@ vec_all_lt(vector bool int a, vector unsigned int b)
static int __ATTRS_o_ai
vec_all_lt(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)b,
- (vector unsigned int)a);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
}
static int __ATTRS_o_ai
@@ -9156,37 +10742,43 @@ vec_all_ne(vector short a, vector bool short b)
static int __ATTRS_o_ai
vec_all_ne(vector unsigned short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_ne(vector unsigned short a, vector bool short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_ne(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_ne(vector bool short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_ne(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
vec_all_ne(vector pixel a, vector pixel b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
}
static int __ATTRS_o_ai
@@ -9282,43 +10874,50 @@ vec_all_numeric(vector float a)
static int __ATTRS_o_ai
vec_any_eq(vector signed char a, vector signed char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector signed char a, vector bool char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector unsigned char a, vector unsigned char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector unsigned char a, vector bool char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector bool char a, vector unsigned char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
@@ -9336,37 +10935,49 @@ vec_any_eq(vector short a, vector bool short b)
static int __ATTRS_o_ai
vec_any_eq(vector unsigned short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector unsigned short a, vector bool short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector bool short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector pixel a, vector pixel b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
@@ -9384,31 +10995,36 @@ vec_any_eq(vector int a, vector bool int b)
static int __ATTRS_o_ai
vec_any_eq(vector unsigned int a, vector unsigned int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector unsigned int a, vector bool int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector bool int a, vector unsigned int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
vec_any_eq(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
@@ -9446,8 +11062,9 @@ vec_any_ge(vector unsigned char a, vector bool char b)
static int __ATTRS_o_ai
vec_any_ge(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)b,
- (vector unsigned char)a);
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
}
static int __ATTRS_o_ai
@@ -9459,8 +11076,9 @@ vec_any_ge(vector bool char a, vector unsigned char b)
static int __ATTRS_o_ai
vec_any_ge(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)b,
- (vector unsigned char)a);
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
}
static int __ATTRS_o_ai
@@ -9484,14 +11102,16 @@ vec_any_ge(vector unsigned short a, vector unsigned short b)
static int __ATTRS_o_ai
vec_any_ge(vector unsigned short a, vector bool short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)b, a);
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)b, a);
}
static int __ATTRS_o_ai
vec_any_ge(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)b,
- (vector unsigned short)a);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
}
static int __ATTRS_o_ai
@@ -9504,8 +11124,9 @@ vec_any_ge(vector bool short a, vector unsigned short b)
static int __ATTRS_o_ai
vec_any_ge(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)b,
- (vector unsigned short)a);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
}
static int __ATTRS_o_ai
@@ -9535,8 +11156,9 @@ vec_any_ge(vector unsigned int a, vector bool int b)
static int __ATTRS_o_ai
vec_any_ge(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)b,
- (vector unsigned int)a);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
}
static int __ATTRS_o_ai
@@ -9548,8 +11170,9 @@ vec_any_ge(vector bool int a, vector unsigned int b)
static int __ATTRS_o_ai
vec_any_ge(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)b,
- (vector unsigned int)a);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
}
static int __ATTRS_o_ai
@@ -9588,8 +11211,9 @@ vec_any_gt(vector unsigned char a, vector bool char b)
static int __ATTRS_o_ai
vec_any_gt(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)a,
- (vector unsigned char)b);
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
}
static int __ATTRS_o_ai
@@ -9602,8 +11226,9 @@ vec_any_gt(vector bool char a, vector unsigned char b)
static int __ATTRS_o_ai
vec_any_gt(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)a,
- (vector unsigned char)b);
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
}
static int __ATTRS_o_ai
@@ -9634,21 +11259,24 @@ vec_any_gt(vector unsigned short a, vector bool short b)
static int __ATTRS_o_ai
vec_any_gt(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)a,
- (vector unsigned short)b);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
}
static int __ATTRS_o_ai
vec_any_gt(vector bool short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)a, b);
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)a, b);
}
static int __ATTRS_o_ai
vec_any_gt(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)a,
- (vector unsigned short)b);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
}
static int __ATTRS_o_ai
@@ -9678,8 +11306,9 @@ vec_any_gt(vector unsigned int a, vector bool int b)
static int __ATTRS_o_ai
vec_any_gt(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)a,
- (vector unsigned int)b);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
}
static int __ATTRS_o_ai
@@ -9691,8 +11320,9 @@ vec_any_gt(vector bool int a, vector unsigned int b)
static int __ATTRS_o_ai
vec_any_gt(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)a,
- (vector unsigned int)b);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
}
static int __ATTRS_o_ai
@@ -9731,8 +11361,9 @@ vec_any_le(vector unsigned char a, vector bool char b)
static int __ATTRS_o_ai
vec_any_le(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)a,
- (vector unsigned char)b);
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
}
static int __ATTRS_o_ai
@@ -9745,8 +11376,9 @@ vec_any_le(vector bool char a, vector unsigned char b)
static int __ATTRS_o_ai
vec_any_le(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)a,
- (vector unsigned char)b);
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
}
static int __ATTRS_o_ai
@@ -9777,8 +11409,9 @@ vec_any_le(vector unsigned short a, vector bool short b)
static int __ATTRS_o_ai
vec_any_le(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)a,
- (vector unsigned short)b);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
}
static int __ATTRS_o_ai
@@ -9791,8 +11424,9 @@ vec_any_le(vector bool short a, vector unsigned short b)
static int __ATTRS_o_ai
vec_any_le(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)a,
- (vector unsigned short)b);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
}
static int __ATTRS_o_ai
@@ -9822,8 +11456,9 @@ vec_any_le(vector unsigned int a, vector bool int b)
static int __ATTRS_o_ai
vec_any_le(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)a,
- (vector unsigned int)b);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
}
static int __ATTRS_o_ai
@@ -9835,8 +11470,9 @@ vec_any_le(vector bool int a, vector unsigned int b)
static int __ATTRS_o_ai
vec_any_le(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)a,
- (vector unsigned int)b);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
}
static int __ATTRS_o_ai
@@ -9875,8 +11511,9 @@ vec_any_lt(vector unsigned char a, vector bool char b)
static int __ATTRS_o_ai
vec_any_lt(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)b,
- (vector unsigned char)a);
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
}
static int __ATTRS_o_ai
@@ -9889,8 +11526,9 @@ vec_any_lt(vector bool char a, vector unsigned char b)
static int __ATTRS_o_ai
vec_any_lt(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)b,
- (vector unsigned char)a);
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
}
static int __ATTRS_o_ai
@@ -9921,8 +11559,9 @@ vec_any_lt(vector unsigned short a, vector bool short b)
static int __ATTRS_o_ai
vec_any_lt(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)b,
- (vector unsigned short)a);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
}
static int __ATTRS_o_ai
@@ -9935,8 +11574,9 @@ vec_any_lt(vector bool short a, vector unsigned short b)
static int __ATTRS_o_ai
vec_any_lt(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)b,
- (vector unsigned short)a);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
}
static int __ATTRS_o_ai
@@ -9966,8 +11606,9 @@ vec_any_lt(vector unsigned int a, vector bool int b)
static int __ATTRS_o_ai
vec_any_lt(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)b,
- (vector unsigned int)a);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
}
static int __ATTRS_o_ai
@@ -9979,8 +11620,9 @@ vec_any_lt(vector bool int a, vector unsigned int b)
static int __ATTRS_o_ai
vec_any_lt(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)b,
- (vector unsigned int)a);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
}
static int __ATTRS_o_ai
@@ -10002,43 +11644,50 @@ vec_any_nan(vector float a)
static int __ATTRS_o_ai
vec_any_ne(vector signed char a, vector signed char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector signed char a, vector bool char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector unsigned char a, vector unsigned char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector unsigned char a, vector bool char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector bool char a, vector signed char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector bool char a, vector unsigned char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector bool char a, vector bool char b)
{
- return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
}
static int __ATTRS_o_ai
@@ -10056,37 +11705,49 @@ vec_any_ne(vector short a, vector bool short b)
static int __ATTRS_o_ai
vec_any_ne(vector unsigned short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector unsigned short a, vector bool short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector bool short a, vector short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector bool short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector bool short a, vector bool short b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector pixel a, vector pixel b)
{
- return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)a, (vector short)b);
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
}
static int __ATTRS_o_ai
@@ -10104,31 +11765,36 @@ vec_any_ne(vector int a, vector bool int b)
static int __ATTRS_o_ai
vec_any_ne(vector unsigned int a, vector unsigned int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector unsigned int a, vector bool int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector bool int a, vector int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector bool int a, vector unsigned int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
vec_any_ne(vector bool int a, vector bool int b)
{
- return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
}
static int __ATTRS_o_ai
diff --git a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
index e5dfe26..11b2581 100644
--- a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
@@ -777,11 +777,8 @@ _mm_xor_si128(__m128i a, __m128i b)
return a ^ b;
}
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_slli_si128(__m128i a, int imm)
-{
- return __builtin_ia32_pslldqi128(a, imm * 8);
-}
+#define _mm_slli_si128(VEC, IMM) \
+ ((__m128i)__builtin_ia32_pslldqi128((__m128i)(VEC), (IMM)*8))
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_slli_epi16(__m128i a, int count)
@@ -843,11 +840,9 @@ _mm_sra_epi32(__m128i a, __m128i count)
return (__m128i)__builtin_ia32_psrad128((__v4si)a, (__v4si)count);
}
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_srli_si128(__m128i a, int imm)
-{
- return __builtin_ia32_psrldqi128(a, imm * 8);
-}
+
+#define _mm_srli_si128(VEC, IMM) \
+ ((__m128i)__builtin_ia32_psrldqi128((__m128i)(VEC), (IMM)*8))
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srli_epi16(__m128i a, int count)
diff --git a/contrib/llvm/tools/clang/lib/Headers/limits.h b/contrib/llvm/tools/clang/lib/Headers/limits.h
index 2627533..ecd09a4 100644
--- a/contrib/llvm/tools/clang/lib/Headers/limits.h
+++ b/contrib/llvm/tools/clang/lib/Headers/limits.h
@@ -31,8 +31,12 @@
#define _GCC_LIMITS_H_
#endif
-/* System headers include a number of constants from POSIX in <limits.h>. */
+/* System headers include a number of constants from POSIX in <limits.h>.
+ Include it if we're hosted. */
+#if __STDC_HOSTED__ && \
+ defined(__has_include_next) && __has_include_next(<limits.h>)
#include_next <limits.h>
+#endif
/* Many system headers try to "help us out" by defining these. No really, we
know how big each datatype is. */
diff --git a/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h b/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h
index fba8651..e7da543 100644
--- a/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h
+++ b/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h
@@ -24,39 +24,48 @@
#ifndef __MM_MALLOC_H
#define __MM_MALLOC_H
-#include <errno.h>
#include <stdlib.h>
-static __inline__ void *__attribute__((__always_inline__, __nodebug__))
+#ifdef _WIN32
+#include <malloc.h>
+#else
+#ifndef __cplusplus
+extern int posix_memalign(void **memptr, size_t alignment, size_t size);
+#else
+// Some systems (e.g. those with GNU libc) declare posix_memalign with an
+// exception specifier. Via an "egregious workaround" in
+// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid
+// redeclaration of glibc's declaration.
+extern "C" int posix_memalign(void **memptr, size_t alignment, size_t size);
+#endif
+#endif
+
+static __inline__ void *__attribute__((__always_inline__, __nodebug__,
+ __malloc__))
_mm_malloc(size_t size, size_t align)
{
- if (align & (align - 1)) {
- errno = EINVAL;
- return 0;
+ if (align == 1) {
+ return malloc(size);
}
- if (!size)
- return 0;
+ if (!(align & (align - 1)) && align < sizeof(void *))
+ align = sizeof(void *);
- if (align < 2 * sizeof(void *))
- align = 2 * sizeof(void *);
-
- void *mallocedMemory = malloc(size + align);
- if (!mallocedMemory)
+ void *mallocedMemory;
+#ifdef _WIN32
+ mallocedMemory = _aligned_malloc(size, align);
+#else
+ if (posix_memalign(&mallocedMemory, align, size))
return 0;
+#endif
- void *alignedMemory =
- (void *)(((size_t)mallocedMemory + align) & ~((size_t)align - 1));
- ((void **)alignedMemory)[-1] = mallocedMemory;
-
- return alignedMemory;
+ return mallocedMemory;
}
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_free(void *p)
{
- if (p)
- free(((void **)p)[-1]);
+ free(p);
}
#endif /* __MM_MALLOC_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/mmintrin.h b/contrib/llvm/tools/clang/lib/Headers/mmintrin.h
index bad9e1c..fefb42f 100644
--- a/contrib/llvm/tools/clang/lib/Headers/mmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/mmintrin.h
@@ -43,14 +43,13 @@ _mm_empty(void)
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi32_si64(int __i)
{
- return (__m64)(__v2si){__i, 0};
+ return (__m64)__builtin_ia32_vec_init_v2si(__i, 0);
}
static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_si32(__m64 __m)
{
- __v2si __mmx_var2 = (__v2si)__m;
- return __mmx_var2[0];
+ return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
@@ -86,59 +85,55 @@ _mm_packs_pu16(__m64 __m1, __m64 __m2)
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
{
- return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 4, 8+4, 5,
- 8+5, 6, 8+6, 7, 8+7);
+ return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
{
- return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 2, 4+2, 3,
- 4+3);
+ return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
{
- return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 1, 2+1);
+ return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
{
- return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 0, 8+0, 1,
- 8+1, 2, 8+2, 3, 8+3);
+ return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
{
- return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 0, 4+0, 1,
- 4+1);
+ return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
{
- return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 0, 2+0);
+ return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_pi8(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v8qi)__m1 + (__v8qi)__m2);
+ return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_pi16(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v4hi)__m1 + (__v4hi)__m2);
+ return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_pi32(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v2si)__m1 + (__v2si)__m2);
+ return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
@@ -168,19 +163,19 @@ _mm_adds_pu16(__m64 __m1, __m64 __m2)
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_pi8(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v8qi)__m1 - (__v8qi)__m2);
+ return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_pi16(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v4hi)__m1 - (__v4hi)__m2);
+ return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_pi32(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v2si)__m1 - (__v2si)__m2);
+ return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
@@ -222,7 +217,7 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2)
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_mullo_pi16(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v4hi)__m1 * (__v4hi)__m2);
+ return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
@@ -252,13 +247,13 @@ _mm_slli_pi32(__m64 __m, int __count)
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sll_si64(__m64 __m, __m64 __count)
{
- return __builtin_ia32_psllq(__m, __count);
+ return (__m64)__builtin_ia32_psllq(__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_slli_si64(__m64 __m, int __count)
{
- return __builtin_ia32_psllqi(__m, __count);
+ return (__m64)__builtin_ia32_psllqi(__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
@@ -318,67 +313,67 @@ _mm_srl_si64(__m64 __m, __m64 __count)
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srli_si64(__m64 __m, int __count)
{
- return __builtin_ia32_psrlqi(__m, __count);
+ return (__m64)__builtin_ia32_psrlqi(__m, __count);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_and_si64(__m64 __m1, __m64 __m2)
{
- return __m1 & __m2;
+ return __builtin_ia32_pand(__m1, __m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_andnot_si64(__m64 __m1, __m64 __m2)
{
- return ~__m1 & __m2;
+ return __builtin_ia32_pandn(__m1, __m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_or_si64(__m64 __m1, __m64 __m2)
{
- return __m1 | __m2;
+ return __builtin_ia32_por(__m1, __m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_xor_si64(__m64 __m1, __m64 __m2)
{
- return __m1 ^ __m2;
+ return __builtin_ia32_pxor(__m1, __m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v8qi)__m1 == (__v8qi)__m2);
+ return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v4hi)__m1 == (__v4hi)__m2);
+ return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v2si)__m1 == (__v2si)__m2);
+ return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v8qi)__m1 > (__v8qi)__m2);
+ return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v4hi)__m1 > (__v4hi)__m2);
+ return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
{
- return (__m64)((__v2si)__m1 > (__v2si)__m2);
+ return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
@@ -390,57 +385,58 @@ _mm_setzero_si64(void)
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set_pi32(int __i1, int __i0)
{
- return (__m64)(__v2si){ __i0, __i1 };
+ return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
{
- return (__m64)(__v4hi){ __s0, __s1, __s2, __s3 };
+ return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
char __b1, char __b0)
{
- return (__m64)(__v8qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7 };
+ return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3,
+ __b4, __b5, __b6, __b7);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi32(int __i)
{
- return (__m64)(__v2si){ __i, __i };
+ return _mm_set_pi32(__i, __i);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
-_mm_set1_pi16(short __s)
+_mm_set1_pi16(short __w)
{
- return (__m64)(__v4hi){ __s, __s, __s, __s };
+ return _mm_set_pi16(__w, __w, __w, __w);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi8(char __b)
{
- return (__m64)(__v8qi){ __b, __b, __b, __b, __b, __b, __b, __b };
+ return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setr_pi32(int __i1, int __i0)
{
- return (__m64)(__v2si){ __i1, __i0 };
+ return _mm_set_pi32(__i1, __i0);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
-_mm_setr_pi16(short __s3, short __s2, short __s1, short __s0)
+_mm_setr_pi16(short __w3, short __w2, short __w1, short __w0)
{
- return (__m64)(__v4hi){ __s3, __s2, __s1, __s0 };
+ return _mm_set_pi16(__w3, __w2, __w1, __w0);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setr_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
char __b1, char __b0)
{
- return (__m64)(__v8qi){ __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0 };
+ return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
}
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdbool.h b/contrib/llvm/tools/clang/lib/Headers/stdbool.h
index e44a1f9..0467893 100644
--- a/contrib/llvm/tools/clang/lib/Headers/stdbool.h
+++ b/contrib/llvm/tools/clang/lib/Headers/stdbool.h
@@ -26,11 +26,17 @@
#ifndef __STDBOOL_H
#define __STDBOOL_H
-/* Don't define bool, true, and false in C++ */
+/* Don't define bool, true, and false in C++, except as a GNU extension. */
#ifndef __cplusplus
#define bool _Bool
#define true 1
#define false 0
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Define _Bool, bool, false, true as a GNU extension. */
+#define _Bool bool
+#define bool bool
+#define false false
+#define true true
#endif
#define __bool_true_false_are_defined 1
diff --git a/contrib/llvm/tools/clang/lib/Headers/stddef.h b/contrib/llvm/tools/clang/lib/Headers/stddef.h
index 84ec1a7..7cc0bc1 100644
--- a/contrib/llvm/tools/clang/lib/Headers/stddef.h
+++ b/contrib/llvm/tools/clang/lib/Headers/stddef.h
@@ -46,13 +46,16 @@ typedef __WCHAR_TYPE__ wchar_t;
#define NULL ((void*)0)
#endif
-// Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
-// __WINT_TYPE__ directly; accomodate both by requiring __need_wint_t
-#if defined(__need_wint_t) && !defined(_WINT_T)
-#define _WINT_T
-typedef __WINT_TYPE__ wint_t;
-#endif
-
#define offsetof(t, d) __builtin_offsetof(t, d)
#endif /* __STDDEF_H */
+
+/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
+__WINT_TYPE__ directly; accomodate both by requiring __need_wint_t */
+#if defined(__need_wint_t)
+#if !defined(_WINT_T)
+#define _WINT_T
+typedef __WINT_TYPE__ wint_t;
+#endif /* _WINT_T */
+#undef __need_wint_t
+#endif /* __need_wint_t */
diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
index 8363b45..42dd3e8 100644
--- a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
@@ -34,7 +34,11 @@ typedef int __v4si __attribute__((__vector_size__(16)));
typedef float __v4sf __attribute__((__vector_size__(16)));
typedef float __m128 __attribute__((__vector_size__(16)));
+// This header should only be included in a hosted environment as it depends on
+// a standard library to provide allocation routines.
+#if __STDC_HOSTED__
#include <mm_malloc.h>
+#endif
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_add_ss(__m128 a, __m128 b)
@@ -712,9 +716,7 @@ _mm_mulhi_pu16(__m64 a, __m64 b)
}
#define _mm_shuffle_pi16(a, n) \
- ((__m64)__builtin_shufflevector((__v4hi)(a), (__v4hi) {0}, \
- (n) & 0x3, ((n) & 0xc) >> 2, \
- ((n) & 0x30) >> 4, ((n) & 0xc0) >> 6))
+ ((__m64)__builtin_ia32_pshufw(a, n))
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_maskmove_si64(__m64 d, __m64 n, char *p)
diff --git a/contrib/llvm/tools/clang/lib/Index/ASTVisitor.h b/contrib/llvm/tools/clang/lib/Index/ASTVisitor.h
index 943c720..0b8425b 100644
--- a/contrib/llvm/tools/clang/lib/Index/ASTVisitor.h
+++ b/contrib/llvm/tools/clang/lib/Index/ASTVisitor.h
@@ -108,8 +108,7 @@ public:
}
void VisitStmt(Stmt *Node) {
- for (Stmt::child_iterator
- I = Node->child_begin(), E = Node->child_end(); I != E; ++I)
+ for (Stmt::child_range I = Node->children(); I; ++I)
if (*I)
Visit(*I);
}
diff --git a/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp b/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp
index dedcc0e..bf3f5a8 100644
--- a/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp
@@ -40,7 +40,7 @@ public:
void VisitCallExpr(CallExpr *CE);
void VisitChildren(Stmt *S) {
- for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I != E;++I)
+ for (Stmt::child_range I = S->children(); I; ++I)
if (*I)
static_cast<CGBuilder*>(this)->Visit(*I);
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
index 4010d61..e424f91 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
@@ -15,9 +15,10 @@
#include "clang/Basic/FileManager.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
+#include <cctype>
#include <cstdio>
using namespace clang;
@@ -75,13 +76,12 @@ static inline unsigned HashHMapKey(llvm::StringRef Str) {
/// map. If it doesn't look like a HeaderMap, it gives up and returns null.
/// If it looks like a HeaderMap but is obviously corrupted, it puts a reason
/// into the string error argument and returns null.
-const HeaderMap *HeaderMap::Create(const FileEntry *FE) {
+const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
// If the file is too small to be a header map, ignore it.
unsigned FileSize = FE->getSize();
if (FileSize <= sizeof(HMapHeader)) return 0;
- llvm::OwningPtr<const llvm::MemoryBuffer> FileBuffer(
- llvm::MemoryBuffer::getFile(FE->getName(), 0, FE->getSize()));
+ llvm::OwningPtr<const llvm::MemoryBuffer> FileBuffer(FM.getBufferForFile(FE));
if (FileBuffer == 0) return 0; // Unreadable file?
const char *FileStart = FileBuffer->getBufferStart();
@@ -223,6 +223,6 @@ const FileEntry *HeaderMap::LookupFile(llvm::StringRef Filename,
llvm::SmallString<1024> DestPath;
DestPath += getString(B.Prefix);
DestPath += getString(B.Suffix);
- return FM.getFile(DestPath.begin(), DestPath.end());
+ return FM.getFile(DestPath.str());
}
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
index 4554aba..b028e33 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
@@ -15,7 +15,8 @@
#include "clang/Lex/HeaderMap.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
#include "llvm/ADT/SmallString.h"
#include <cstdio>
using namespace clang;
@@ -32,11 +33,15 @@ HeaderFileInfo::getControllingMacro(ExternalIdentifierLookup *External) {
return ControllingMacro;
}
-HeaderSearch::HeaderSearch(FileManager &FM) : FileMgr(FM), FrameworkMap(64) {
+ExternalHeaderFileInfoSource::~ExternalHeaderFileInfoSource() {}
+
+HeaderSearch::HeaderSearch(FileManager &FM)
+ : FileMgr(FM), FrameworkMap(64) {
SystemDirIdx = 0;
NoCurDirSearch = false;
ExternalLookup = 0;
+ ExternalSource = 0;
NumIncluded = 0;
NumMultiIncludeFileOptzn = 0;
NumFrameworkLookups = NumSubFrameworkLookups = 0;
@@ -83,7 +88,7 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
return HeaderMaps[i].second;
}
- if (const HeaderMap *HM = HeaderMap::Create(FE)) {
+ if (const HeaderMap *HM = HeaderMap::Create(FE, FileMgr)) {
HeaderMaps.push_back(std::make_pair(FE, HM));
return HM;
}
@@ -118,7 +123,7 @@ const FileEntry *DirectoryLookup::LookupFile(llvm::StringRef Filename,
TmpDir += getDir()->getName();
TmpDir.push_back('/');
TmpDir.append(Filename.begin(), Filename.end());
- return HS.getFileMgr().getFile(TmpDir.begin(), TmpDir.end());
+ return HS.getFileMgr().getFile(TmpDir.str());
}
if (isFramework())
@@ -169,8 +174,8 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(llvm::StringRef Filename,
// If the framework dir doesn't exist, we fail.
// FIXME: It's probably more efficient to query this with FileMgr.getDir.
- if (!llvm::sys::Path(std::string(FrameworkName.begin(),
- FrameworkName.end())).exists())
+ bool Exists;
+ if (llvm::sys::fs::exists(FrameworkName.str(), Exists) || !Exists)
return 0;
// Otherwise, if it does, remember that this is the right direntry for this
@@ -183,16 +188,14 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(llvm::StringRef Filename,
FrameworkName += "Headers/";
FrameworkName.append(Filename.begin()+SlashPos+1, Filename.end());
- if (const FileEntry *FE = FileMgr.getFile(FrameworkName.begin(),
- FrameworkName.end())) {
+ if (const FileEntry *FE = FileMgr.getFile(FrameworkName.str()))
return FE;
- }
// Check "/System/Library/Frameworks/Cocoa.framework/PrivateHeaders/file.h"
const char *Private = "Private";
FrameworkName.insert(FrameworkName.begin()+OrigSize, Private,
Private+strlen(Private));
- return FileMgr.getFile(FrameworkName.begin(), FrameworkName.end());
+ return FileMgr.getFile(FrameworkName.str());
}
@@ -212,7 +215,7 @@ const FileEntry *HeaderSearch::LookupFile(llvm::StringRef Filename,
const DirectoryLookup *&CurDir,
const FileEntry *CurFileEnt) {
// If 'Filename' is absolute, check to see if it exists and no searching.
- if (llvm::sys::Path::isAbsolute(Filename.begin(), Filename.size())) {
+ if (llvm::sys::path::is_absolute(Filename)) {
CurDir = 0;
// If this was an #include_next "/absolute/file", fail.
@@ -329,7 +332,7 @@ LookupSubframeworkHeader(llvm::StringRef Filename,
FrameworkName += ".framework/";
llvm::StringMapEntry<const DirectoryEntry *> &CacheLookup =
- FrameworkMap.GetOrCreateValue(Filename.begin(), Filename.begin()+SlashPos);
+ FrameworkMap.GetOrCreateValue(Filename.substr(0, SlashPos));
// Some other location?
if (CacheLookup.getValue() &&
@@ -343,8 +346,7 @@ LookupSubframeworkHeader(llvm::StringRef Filename,
++NumSubFrameworkLookups;
// If the framework dir doesn't exist, we fail.
- const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName.begin(),
- FrameworkName.end());
+ const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName.str());
if (Dir == 0) return 0;
// Otherwise, if it does, remember that this is the right direntry for this
@@ -358,14 +360,13 @@ LookupSubframeworkHeader(llvm::StringRef Filename,
llvm::SmallString<1024> HeadersFilename(FrameworkName);
HeadersFilename += "Headers/";
HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end());
- if (!(FE = FileMgr.getFile(HeadersFilename.begin(),
- HeadersFilename.end()))) {
+ if (!(FE = FileMgr.getFile(HeadersFilename.str()))) {
// Check ".../Frameworks/HIToolbox.framework/PrivateHeaders/HIToolbox.h"
HeadersFilename = FrameworkName;
HeadersFilename += "PrivateHeaders/";
HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end());
- if (!(FE = FileMgr.getFile(HeadersFilename.begin(), HeadersFilename.end())))
+ if (!(FE = FileMgr.getFile(HeadersFilename.str())))
return 0;
}
@@ -389,12 +390,19 @@ LookupSubframeworkHeader(llvm::StringRef Filename,
HeaderFileInfo &HeaderSearch::getFileInfo(const FileEntry *FE) {
if (FE->getUID() >= FileInfo.size())
FileInfo.resize(FE->getUID()+1);
- return FileInfo[FE->getUID()];
+
+ HeaderFileInfo &HFI = FileInfo[FE->getUID()];
+ if (ExternalSource && !HFI.Resolved) {
+ HFI = ExternalSource->GetHeaderFileInfo(FE);
+ HFI.Resolved = true;
+ }
+ return HFI;
}
void HeaderSearch::setHeaderFileInfoForUID(HeaderFileInfo HFI, unsigned UID) {
if (UID >= FileInfo.size())
FileInfo.resize(UID+1);
+ HFI.Resolved = true;
FileInfo[UID] = HFI;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
index 917829b..b17198b 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -212,6 +212,109 @@ void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) {
}
}
+//===----------------------------------------------------------------------===//
+// Token Spelling
+//===----------------------------------------------------------------------===//
+
+/// getSpelling() - Return the 'spelling' of this token. The spelling of a
+/// token are the characters used to represent the token in the source file
+/// after trigraph expansion and escaped-newline folding. In particular, this
+/// wants to get the true, uncanonicalized, spelling of things like digraphs
+/// UCNs, etc.
+std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
+ const LangOptions &Features, bool *Invalid) {
+ assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
+
+ // If this token contains nothing interesting, return it directly.
+ bool CharDataInvalid = false;
+ const char* TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
+ &CharDataInvalid);
+ if (Invalid)
+ *Invalid = CharDataInvalid;
+ if (CharDataInvalid)
+ return std::string();
+
+ if (!Tok.needsCleaning())
+ return std::string(TokStart, TokStart+Tok.getLength());
+
+ std::string Result;
+ Result.reserve(Tok.getLength());
+
+ // Otherwise, hard case, relex the characters into the string.
+ for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
+ Ptr != End; ) {
+ unsigned CharSize;
+ Result.push_back(Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features));
+ Ptr += CharSize;
+ }
+ assert(Result.size() != unsigned(Tok.getLength()) &&
+ "NeedsCleaning flag set on something that didn't need cleaning!");
+ return Result;
+}
+
+/// getSpelling - This method is used to get the spelling of a token into a
+/// preallocated buffer, instead of as an std::string. The caller is required
+/// to allocate enough space for the token, which is guaranteed to be at least
+/// Tok.getLength() bytes long. The actual length of the token is returned.
+///
+/// Note that this method may do two possible things: it may either fill in
+/// the buffer specified with characters, or it may *change the input pointer*
+/// to point to a constant buffer with the data already in it (avoiding a
+/// copy). The caller is not allowed to modify the returned buffer pointer
+/// if an internal buffer is returned.
+unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
+ const SourceManager &SourceMgr,
+ const LangOptions &Features, bool *Invalid) {
+ assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
+
+ const char *TokStart = 0;
+ // NOTE: this has to be checked *before* testing for an IdentifierInfo.
+ if (Tok.is(tok::raw_identifier))
+ TokStart = Tok.getRawIdentifierData();
+ else if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ // Just return the string from the identifier table, which is very quick.
+ Buffer = II->getNameStart();
+ return II->getLength();
+ }
+
+ // NOTE: this can be checked even after testing for an IdentifierInfo.
+ if (Tok.isLiteral())
+ TokStart = Tok.getLiteralData();
+
+ if (TokStart == 0) {
+ // Compute the start of the token in the input lexer buffer.
+ bool CharDataInvalid = false;
+ TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
+ if (Invalid)
+ *Invalid = CharDataInvalid;
+ if (CharDataInvalid) {
+ Buffer = "";
+ return 0;
+ }
+ }
+
+ // If this token contains nothing interesting, return it directly.
+ if (!Tok.needsCleaning()) {
+ Buffer = TokStart;
+ return Tok.getLength();
+ }
+
+ // Otherwise, hard case, relex the characters into the string.
+ char *OutBuf = const_cast<char*>(Buffer);
+ for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
+ Ptr != End; ) {
+ unsigned CharSize;
+ *OutBuf++ = Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features);
+ Ptr += CharSize;
+ }
+ assert(unsigned(OutBuf-Buffer) != Tok.getLength() &&
+ "NeedsCleaning flag set on something that didn't need cleaning!");
+
+ return OutBuf-Buffer;
+}
+
+
+
static bool isWhitespace(unsigned char c);
/// MeasureTokenLength - Relex the token at the specified location and return
@@ -242,7 +345,8 @@ unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
return 0;
// Create a lexer starting at the beginning of this token.
- Lexer TheLexer(Loc, LangOpts, Buffer.begin(), StrData, Buffer.end());
+ Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
+ Buffer.begin(), StrData, Buffer.end());
TheLexer.SetCommentRetentionState(true);
Token TheTok;
TheLexer.LexFromRawLexer(TheTok);
@@ -253,6 +357,9 @@ SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts) {
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ if (LocInfo.first.isInvalid())
+ return Loc;
+
bool Invalid = false;
llvm::StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
if (Invalid)
@@ -261,6 +368,9 @@ SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
// Back up from the current location until we hit the beginning of a line
// (or the buffer). We'll relex from that point.
const char *BufStart = Buffer.data();
+ if (LocInfo.second >= Buffer.size())
+ return Loc;
+
const char *StrData = BufStart+LocInfo.second;
if (StrData[0] == '\n' || StrData[0] == '\r')
return Loc;
@@ -371,10 +481,9 @@ Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer, unsigned MaxLines) {
// we don't have an identifier table available. Instead, just look at
// the raw identifier to recognize and categorize preprocessor directives.
TheLexer.LexFromRawLexer(TheTok);
- if (TheTok.getKind() == tok::identifier && !TheTok.needsCleaning()) {
- const char *IdStart = Buffer->getBufferStart()
- + TheTok.getLocation().getRawEncoding() - 1;
- llvm::StringRef Keyword(IdStart, TheTok.getLength());
+ if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) {
+ llvm::StringRef Keyword(TheTok.getRawIdentifierData(),
+ TheTok.getLength());
PreambleDirectiveKind PDK
= llvm::StringSwitch<PreambleDirectiveKind>(Keyword)
.Case("include", PDK_Skipped)
@@ -443,6 +552,83 @@ Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer, unsigned MaxLines) {
: TheTok.isAtStartOfLine());
}
+
+/// AdvanceToTokenCharacter - Given a location that specifies the start of a
+/// token, return a new location that specifies a character within the token.
+SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
+ unsigned CharNo,
+ const SourceManager &SM,
+ const LangOptions &Features) {
+ // Figure out how many physical characters away the specified instantiation
+ // character is. This needs to take into consideration newlines and
+ // trigraphs.
+ bool Invalid = false;
+ const char *TokPtr = SM.getCharacterData(TokStart, &Invalid);
+
+ // If they request the first char of the token, we're trivially done.
+ if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
+ return TokStart;
+
+ unsigned PhysOffset = 0;
+
+ // The usual case is that tokens don't contain anything interesting. Skip
+ // over the uninteresting characters. If a token only consists of simple
+ // chars, this method is extremely fast.
+ while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
+ if (CharNo == 0)
+ return TokStart.getFileLocWithOffset(PhysOffset);
+ ++TokPtr, --CharNo, ++PhysOffset;
+ }
+
+ // If we have a character that may be a trigraph or escaped newline, use a
+ // lexer to parse it correctly.
+ for (; CharNo; --CharNo) {
+ unsigned Size;
+ Lexer::getCharAndSizeNoWarn(TokPtr, Size, Features);
+ TokPtr += Size;
+ PhysOffset += Size;
+ }
+
+ // Final detail: if we end up on an escaped newline, we want to return the
+ // location of the actual byte of the token. For example foo\<newline>bar
+ // advanced by 3 should return the location of b, not of \\. One compounding
+ // detail of this is that the escape may be made by a trigraph.
+ if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
+ PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
+
+ return TokStart.getFileLocWithOffset(PhysOffset);
+}
+
+/// \brief Computes the source location just past the end of the
+/// token at this source location.
+///
+/// This routine can be used to produce a source location that
+/// points just past the end of the token referenced by \p Loc, and
+/// is generally used when a diagnostic needs to point just after a
+/// token where it expected something different that it received. If
+/// the returned source location would not be meaningful (e.g., if
+/// it points into a macro), this routine returns an invalid
+/// source location.
+///
+/// \param Offset an offset from the end of the token, where the source
+/// location should refer to. The default offset (0) produces a source
+/// location pointing just past the end of the token; an offset of 1 produces
+/// a source location pointing to the last character in the token, etc.
+SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
+ const SourceManager &SM,
+ const LangOptions &Features) {
+ if (Loc.isInvalid() || !Loc.isFileID())
+ return SourceLocation();
+
+ unsigned Len = Lexer::MeasureTokenLength(Loc, SM, Features);
+ if (Len > Offset)
+ Len = Len - Offset;
+ else
+ return Loc;
+
+ return AdvanceToTokenCharacter(Loc, Len, SM, Features);
+}
+
//===----------------------------------------------------------------------===//
// Character information.
//===----------------------------------------------------------------------===//
@@ -584,10 +770,8 @@ static inline bool isNumberBody(unsigned char c) {
/// lexer buffer was all instantiated at a single point, perform the mapping.
/// This is currently only used for _Pragma implementation, so it is the slow
/// path of the hot getSourceLocation method. Do not allow it to be inlined.
-static DISABLE_INLINE SourceLocation GetMappedTokenLoc(Preprocessor &PP,
- SourceLocation FileLoc,
- unsigned CharNo,
- unsigned TokLen);
+static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
+ Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen);
static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
SourceLocation FileLoc,
unsigned CharNo, unsigned TokLen) {
@@ -869,19 +1053,17 @@ void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) {
FinishIdentifier:
const char *IdStart = BufferPtr;
- FormTokenWithChars(Result, CurPtr, tok::identifier);
+ FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
+ Result.setRawIdentifierData(IdStart);
// If we are in raw mode, return this identifier raw. There is no need to
// look up identifier information or attempt to macro expand it.
- if (LexingRawMode) return;
-
- // Fill in Result.IdentifierInfo, looking up the identifier in the
- // identifier table.
- IdentifierInfo *II = PP->LookUpIdentifierInfo(Result, IdStart);
+ if (LexingRawMode)
+ return;
- // Change the kind of this identifier to the appropriate token kind, e.g.
- // turning "for" into a keyword.
- Result.setKind(II->getTokenID());
+ // Fill in Result.IdentifierInfo and update the token kind,
+ // looking up the identifier in the identifier table.
+ IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
// Finally, now that we know we have an identifier, pass this off to the
// preprocessor, which may macro expand it or something.
@@ -980,7 +1162,7 @@ void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) {
if (C == 0 && PP && PP->isCodeCompletionFile(FileLoc))
PP->CodeCompleteNaturalLanguage();
else if (!isLexingRawMode() && !Features.AsmPreprocessor)
- Diag(BufferPtr, diag::err_unterminated_string);
+ Diag(BufferPtr, diag::warn_unterminated_string);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
}
@@ -1059,7 +1241,7 @@ void Lexer::LexCharConstant(Token &Result, const char *CurPtr) {
if (C == 0 && PP && PP->isCodeCompletionFile(FileLoc))
PP->CodeCompleteNaturalLanguage();
else if (!isLexingRawMode() && !Features.AsmPreprocessor)
- Diag(BufferPtr, diag::err_unterminated_char);
+ Diag(BufferPtr, diag::warn_unterminated_char);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
} else if (C == 0) {
@@ -2141,6 +2323,10 @@ LexNextToken:
// If this is actually a '<<<<<<<' version control conflict marker,
// recognize it as such and recover nicely.
goto LexNextToken;
+ } else if (Features.CUDA && After == '<') {
+ Kind = tok::lesslessless;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
} else {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::lessless;
@@ -2172,6 +2358,10 @@ LexNextToken:
} else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) {
// If this is '>>>>>>>' and we're in a conflict marker, ignore it.
goto LexNextToken;
+ } else if (Features.CUDA && After == '>') {
+ Kind = tok::greatergreatergreater;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
} else {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::greatergreater;
diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
index fb543d0..16d7b36 100644
--- a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
@@ -33,8 +33,8 @@ static int HexDigitValue(char C) {
/// either a character or a string literal.
static unsigned ProcessCharEscape(const char *&ThisTokBuf,
const char *ThisTokEnd, bool &HadError,
- SourceLocation Loc, bool IsWide,
- Preprocessor &PP, bool Complain) {
+ FullSourceLoc Loc, bool IsWide,
+ Diagnostic *Diags, const TargetInfo &Target) {
// Skip the '\' char.
++ThisTokBuf;
@@ -54,13 +54,13 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
ResultChar = 8;
break;
case 'e':
- if (Complain)
- PP.Diag(Loc, diag::ext_nonstandard_escape) << "e";
+ if (Diags)
+ Diags->Report(Loc, diag::ext_nonstandard_escape) << "e";
ResultChar = 27;
break;
case 'E':
- if (Complain)
- PP.Diag(Loc, diag::ext_nonstandard_escape) << "E";
+ if (Diags)
+ Diags->Report(Loc, diag::ext_nonstandard_escape) << "E";
ResultChar = 27;
break;
case 'f':
@@ -81,8 +81,8 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
case 'x': { // Hex escape.
ResultChar = 0;
if (ThisTokBuf == ThisTokEnd || !isxdigit(*ThisTokBuf)) {
- if (Complain)
- PP.Diag(Loc, diag::err_hex_escape_no_digits);
+ if (Diags)
+ Diags->Report(Loc, diag::err_hex_escape_no_digits);
HadError = 1;
break;
}
@@ -99,9 +99,8 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
}
// See if any bits will be truncated when evaluated as a character.
- unsigned CharWidth = IsWide
- ? PP.getTargetInfo().getWCharWidth()
- : PP.getTargetInfo().getCharWidth();
+ unsigned CharWidth =
+ IsWide ? Target.getWCharWidth() : Target.getCharWidth();
if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
Overflow = true;
@@ -109,8 +108,8 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
}
// Check for overflow.
- if (Overflow && Complain) // Too many digits to fit in
- PP.Diag(Loc, diag::warn_hex_escape_too_large);
+ if (Overflow && Diags) // Too many digits to fit in
+ Diags->Report(Loc, diag::warn_hex_escape_too_large);
break;
}
case '0': case '1': case '2': case '3':
@@ -130,13 +129,12 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
ThisTokBuf[0] >= '0' && ThisTokBuf[0] <= '7');
// Check for overflow. Reject '\777', but not L'\777'.
- unsigned CharWidth = IsWide
- ? PP.getTargetInfo().getWCharWidth()
- : PP.getTargetInfo().getCharWidth();
+ unsigned CharWidth =
+ IsWide ? Target.getWCharWidth() : Target.getCharWidth();
if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
- if (Complain)
- PP.Diag(Loc, diag::warn_octal_escape_too_large);
+ if (Diags)
+ Diags->Report(Loc, diag::warn_octal_escape_too_large);
ResultChar &= ~0U >> (32-CharWidth);
}
break;
@@ -145,18 +143,20 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
// Otherwise, these are not valid escapes.
case '(': case '{': case '[': case '%':
// GCC accepts these as extensions. We warn about them as such though.
- if (Complain)
- PP.Diag(Loc, diag::ext_nonstandard_escape)
+ if (Diags)
+ Diags->Report(Loc, diag::ext_nonstandard_escape)
<< std::string()+(char)ResultChar;
break;
default:
- if (!Complain)
+ if (Diags == 0)
break;
- if (isgraph(ThisTokBuf[0]))
- PP.Diag(Loc, diag::ext_unknown_escape) << std::string()+(char)ResultChar;
+ if (isgraph(ResultChar))
+ Diags->Report(Loc, diag::ext_unknown_escape)
+ << std::string()+(char)ResultChar;
else
- PP.Diag(Loc, diag::ext_unknown_escape) << "x"+llvm::utohexstr(ResultChar);
+ Diags->Report(Loc, diag::ext_unknown_escape)
+ << "x"+llvm::utohexstr(ResultChar);
break;
}
@@ -164,16 +164,13 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
}
/// ProcessUCNEscape - Read the Universal Character Name, check constraints and
-/// convert the UTF32 to UTF8. This is a subroutine of StringLiteralParser.
-/// When we decide to implement UCN's for character constants and identifiers,
-/// we will likely rework our support for UCN's.
-static void ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
- char *&ResultBuf, bool &HadError,
- SourceLocation Loc, Preprocessor &PP,
- bool wide,
- bool Complain) {
- // FIXME: Add a warning - UCN's are only valid in C++ & C99.
- // FIXME: Handle wide strings.
+/// return the UTF32.
+static bool ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
+ uint32_t &UcnVal, unsigned short &UcnLen,
+ FullSourceLoc Loc, Diagnostic *Diags,
+ const LangOptions &Features) {
+ if (!Features.CPlusPlus && !Features.C99 && Diags)
+ Diags->Report(Loc, diag::warn_ucn_not_valid_in_c89);
// Save the beginning of the string (for error diagnostics).
const char *ThisTokBegin = ThisTokBuf;
@@ -182,49 +179,87 @@ static void ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
ThisTokBuf += 2;
if (ThisTokBuf == ThisTokEnd || !isxdigit(*ThisTokBuf)) {
- if (Complain)
- PP.Diag(Loc, diag::err_ucn_escape_no_digits);
- HadError = 1;
- return;
+ if (Diags)
+ Diags->Report(Loc, diag::err_ucn_escape_no_digits);
+ return false;
}
- typedef uint32_t UTF32;
-
- UTF32 UcnVal = 0;
- unsigned short UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8);
+ UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8);
unsigned short UcnLenSave = UcnLen;
- for (; ThisTokBuf != ThisTokEnd && UcnLen; ++ThisTokBuf, UcnLen--) {
+ for (; ThisTokBuf != ThisTokEnd && UcnLenSave; ++ThisTokBuf, UcnLenSave--) {
int CharVal = HexDigitValue(ThisTokBuf[0]);
if (CharVal == -1) break;
UcnVal <<= 4;
UcnVal |= CharVal;
}
// If we didn't consume the proper number of digits, there is a problem.
- if (UcnLen) {
- if (Complain)
- PP.Diag(PP.AdvanceToTokenCharacter(Loc, ThisTokBuf-ThisTokBegin),
- diag::err_ucn_escape_incomplete);
- HadError = 1;
- return;
+ if (UcnLenSave) {
+ if (Diags) {
+ SourceLocation L =
+ Lexer::AdvanceToTokenCharacter(Loc, ThisTokBuf-ThisTokBegin,
+ Loc.getManager(), Features);
+ Diags->Report(FullSourceLoc(L, Loc.getManager()),
+ diag::err_ucn_escape_incomplete);
+ }
+ return false;
}
// Check UCN constraints (C99 6.4.3p2).
if ((UcnVal < 0xa0 &&
(UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60 )) // $, @, `
|| (UcnVal >= 0xD800 && UcnVal <= 0xDFFF)
|| (UcnVal > 0x10FFFF)) /* the maximum legal UTF32 value */ {
- if (Complain)
- PP.Diag(Loc, diag::err_ucn_escape_invalid);
+ if (Diags)
+ Diags->Report(Loc, diag::err_ucn_escape_invalid);
+ return false;
+ }
+ return true;
+}
+
+/// EncodeUCNEscape - Read the Universal Character Name, check constraints and
+/// convert the UTF32 to UTF8 or UTF16. This is a subroutine of
+/// StringLiteralParser. When we decide to implement UCN's for identifiers,
+/// we will likely rework our support for UCN's.
+static void EncodeUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
+ char *&ResultBuf, bool &HadError,
+ FullSourceLoc Loc, bool wide, Diagnostic *Diags,
+ const LangOptions &Features) {
+ typedef uint32_t UTF32;
+ UTF32 UcnVal = 0;
+ unsigned short UcnLen = 0;
+ if (!ProcessUCNEscape(ThisTokBuf, ThisTokEnd, UcnVal, UcnLen, Loc, Diags,
+ Features)) {
HadError = 1;
return;
}
+
if (wide) {
- (void)UcnLenSave;
- assert(UcnLenSave == 4 &&
- "ProcessUCNEscape - only ucn length of 4 supported");
- // little endian assumed.
- *ResultBuf++ = (UcnVal & 0x000000FF);
- *ResultBuf++ = (UcnVal & 0x0000FF00) >> 8;
- *ResultBuf++ = (UcnVal & 0x00FF0000) >> 16;
- *ResultBuf++ = (UcnVal & 0xFF000000) >> 24;
+ (void)UcnLen;
+ assert((UcnLen== 4 || UcnLen== 8) && "only ucn length of 4 or 8 supported");
+
+ if (!Features.ShortWChar) {
+ // Note: our internal rep of wide char tokens is always little-endian.
+ *ResultBuf++ = (UcnVal & 0x000000FF);
+ *ResultBuf++ = (UcnVal & 0x0000FF00) >> 8;
+ *ResultBuf++ = (UcnVal & 0x00FF0000) >> 16;
+ *ResultBuf++ = (UcnVal & 0xFF000000) >> 24;
+ return;
+ }
+
+ // Convert to UTF16.
+ if (UcnVal < (UTF32)0xFFFF) {
+ *ResultBuf++ = (UcnVal & 0x000000FF);
+ *ResultBuf++ = (UcnVal & 0x0000FF00) >> 8;
+ return;
+ }
+ if (Diags) Diags->Report(Loc, diag::warn_ucn_escape_too_large);
+
+ typedef uint16_t UTF16;
+ UcnVal -= 0x10000;
+ UTF16 surrogate1 = 0xD800 + (UcnVal >> 10);
+ UTF16 surrogate2 = 0xDC00 + (UcnVal & 0x3FF);
+ *ResultBuf++ = (surrogate1 & 0x000000FF);
+ *ResultBuf++ = (surrogate1 & 0x0000FF00) >> 8;
+ *ResultBuf++ = (surrogate2 & 0x000000FF);
+ *ResultBuf++ = (surrogate2 & 0x0000FF00) >> 8;
return;
}
// Now that we've parsed/checked the UCN, we convert from UTF32->UTF8.
@@ -398,6 +433,7 @@ NumericLiteralParser(const char *begin, const char *end,
}
continue; // Success.
case 'i':
+ case 'I':
if (PP.getLangOptions().Microsoft) {
if (isFPConstant || isLong || isLongLong) break;
@@ -410,22 +446,33 @@ NumericLiteralParser(const char *begin, const char *end,
break;
case '1':
if (s + 2 == ThisTokEnd) break;
- if (s[2] == '6') s += 3; // i16 suffix
+ if (s[2] == '6') {
+ s += 3; // i16 suffix
+ isMicrosoftInteger = true;
+ }
else if (s[2] == '2') {
if (s + 3 == ThisTokEnd) break;
- if (s[3] == '8') s += 4; // i128 suffix
+ if (s[3] == '8') {
+ s += 4; // i128 suffix
+ isMicrosoftInteger = true;
+ }
}
- isMicrosoftInteger = true;
break;
case '3':
if (s + 2 == ThisTokEnd) break;
- if (s[2] == '2') s += 3; // i32 suffix
- isMicrosoftInteger = true;
+ if (s[2] == '2') {
+ s += 3; // i32 suffix
+ isLong = true;
+ isMicrosoftInteger = true;
+ }
break;
case '6':
if (s + 2 == ThisTokEnd) break;
- if (s[2] == '4') s += 3; // i64 suffix
- isMicrosoftInteger = true;
+ if (s[2] == '4') {
+ s += 3; // i64 suffix
+ isLongLong = true;
+ isMicrosoftInteger = true;
+ }
break;
default:
break;
@@ -434,7 +481,6 @@ NumericLiteralParser(const char *begin, const char *end,
}
}
// fall through.
- case 'I':
case 'j':
case 'J':
if (isImaginary) break; // Cannot be repeated.
@@ -681,11 +727,29 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
bool Warned = false;
while (begin[0] != '\'') {
uint64_t ResultChar;
+
+ // Is this a Universal Character Name escape?
if (begin[0] != '\\') // If this is a normal character, consume it.
ResultChar = *begin++;
- else // Otherwise, this is an escape character.
- ResultChar = ProcessCharEscape(begin, end, HadError, Loc, IsWide, PP,
- /*Complain=*/true);
+ else { // Otherwise, this is an escape character.
+ // Check for UCN.
+ if (begin[1] == 'u' || begin[1] == 'U') {
+ uint32_t utf32 = 0;
+ unsigned short UcnLen = 0;
+ if (!ProcessUCNEscape(begin, end, utf32, UcnLen,
+ FullSourceLoc(Loc, PP.getSourceManager()),
+ &PP.getDiagnostics(), PP.getLangOptions())) {
+ HadError = 1;
+ }
+ ResultChar = utf32;
+ } else {
+ // Otherwise, this is a non-UCN escape character. Process it.
+ ResultChar = ProcessCharEscape(begin, end, HadError,
+ FullSourceLoc(Loc,PP.getSourceManager()),
+ IsWide,
+ &PP.getDiagnostics(), PP.getTargetInfo());
+ }
+ }
// If this is a multi-character constant (e.g. 'abc'), handle it. These are
// implementation defined (C99 6.4.4.4p10).
@@ -725,6 +789,9 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
// Transfer the value from APInt to uint64_t
Value = LitVal.getZExtValue();
+ if (IsWide && PP.getLangOptions().ShortWChar && Value > 0xFFFF)
+ PP.Diag(Loc, diag::warn_ucn_escape_too_large);
+
// If this is a single narrow character, sign extend it (e.g. '\xFF' is "-1")
// if 'char' is signed for this target (C99 6.4.4.4p10). Note that multiple
// character constants are not sign extended in the this implementation:
@@ -771,7 +838,13 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
///
StringLiteralParser::
StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
- Preprocessor &pp, bool Complain) : PP(pp) {
+ Preprocessor &PP, bool Complain)
+ : SM(PP.getSourceManager()), Features(PP.getLangOptions()),
+ Target(PP.getTargetInfo()), Diags(Complain ? &PP.getDiagnostics() : 0) {
+ init(StringToks, NumStringToks);
+}
+
+void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
// Scan all of the string portions, remember the max individual token length,
// computing a bound on the concatenated string length, and see whether any
// piece is a wide-string. If any of the string portions is a wide-string
@@ -806,7 +879,7 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
// query the target. As such, wchar_tByteWidth is only valid if AnyWide=true.
wchar_tByteWidth = ~0U;
if (AnyWide) {
- wchar_tByteWidth = PP.getTargetInfo().getWCharWidth();
+ wchar_tByteWidth = Target.getWCharWidth();
assert((wchar_tByteWidth & 7) == 0 && "Assumes wchar_t is byte multiple!");
wchar_tByteWidth /= 8;
}
@@ -835,8 +908,9 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
// that ThisTokBuf points to a buffer that is big enough for the whole token
// and 'spelled' tokens can only shrink.
bool StringInvalid = false;
- unsigned ThisTokLen = PP.getSpelling(StringToks[i], ThisTokBuf,
- &StringInvalid);
+ unsigned ThisTokLen =
+ Lexer::getSpelling(StringToks[i], ThisTokBuf, SM, Features,
+ &StringInvalid);
if (StringInvalid) {
hadError = 1;
continue;
@@ -856,7 +930,7 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
++ThisTokBuf;
// Check if this is a pascal string
- if (pp.getLangOptions().PascalStrings && ThisTokBuf + 1 != ThisTokEnd &&
+ if (Features.PascalStrings && ThisTokBuf + 1 != ThisTokEnd &&
ThisTokBuf[0] == '\\' && ThisTokBuf[1] == 'p') {
// If the \p sequence is found in the first token, we have a pascal string
@@ -894,15 +968,16 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
}
// Is this a Universal Character Name escape?
if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') {
- ProcessUCNEscape(ThisTokBuf, ThisTokEnd, ResultPtr,
- hadError, StringToks[i].getLocation(), PP, wide,
- Complain);
+ EncodeUCNEscape(ThisTokBuf, ThisTokEnd, ResultPtr,
+ hadError, FullSourceLoc(StringToks[i].getLocation(),SM),
+ wide, Diags, Features);
continue;
}
// Otherwise, this is a non-UCN escape character. Process it.
- unsigned ResultChar = ProcessCharEscape(ThisTokBuf, ThisTokEnd, hadError,
- StringToks[i].getLocation(),
- AnyWide, PP, Complain);
+ unsigned ResultChar =
+ ProcessCharEscape(ThisTokBuf, ThisTokEnd, hadError,
+ FullSourceLoc(StringToks[i].getLocation(), SM),
+ AnyWide, Diags, Target);
// Note: our internal rep of wide char tokens is always little-endian.
*ResultPtr++ = ResultChar & 0xFF;
@@ -920,25 +995,24 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
ResultBuf[0] /= wchar_tByteWidth;
// Verify that pascal strings aren't too large.
- if (GetStringLength() > 256 && Complain) {
- PP.Diag(StringToks[0].getLocation(), diag::err_pascal_string_too_long)
- << SourceRange(StringToks[0].getLocation(),
- StringToks[NumStringToks-1].getLocation());
+ if (GetStringLength() > 256) {
+ if (Diags)
+ Diags->Report(FullSourceLoc(StringToks[0].getLocation(), SM),
+ diag::err_pascal_string_too_long)
+ << SourceRange(StringToks[0].getLocation(),
+ StringToks[NumStringToks-1].getLocation());
hadError = 1;
return;
}
- } else if (Complain) {
+ } else if (Diags) {
// Complain if this string literal has too many characters.
- unsigned MaxChars = PP.getLangOptions().CPlusPlus? 65536
- : PP.getLangOptions().C99 ? 4095
- : 509;
+ unsigned MaxChars = Features.CPlusPlus? 65536 : Features.C99 ? 4095 : 509;
if (GetNumStringChars() > MaxChars)
- PP.Diag(StringToks[0].getLocation(), diag::ext_string_too_long)
+ Diags->Report(FullSourceLoc(StringToks[0].getLocation(), SM),
+ diag::ext_string_too_long)
<< GetNumStringChars() << MaxChars
- << (PP.getLangOptions().CPlusPlus? 2
- : PP.getLangOptions().C99 ? 1
- : 0)
+ << (Features.CPlusPlus ? 2 : Features.C99 ? 1 : 0)
<< SourceRange(StringToks[0].getLocation(),
StringToks[NumStringToks-1].getLocation());
}
@@ -949,19 +1023,17 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
/// specified byte of the string data represented by Token. This handles
/// advancing over escape sequences in the string.
unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
- unsigned ByteNo,
- Preprocessor &PP,
- bool Complain) {
+ unsigned ByteNo) const {
// Get the spelling of the token.
- llvm::SmallString<16> SpellingBuffer;
+ llvm::SmallString<32> SpellingBuffer;
SpellingBuffer.resize(Tok.getLength());
bool StringInvalid = false;
const char *SpellingPtr = &SpellingBuffer[0];
- unsigned TokLen = PP.getSpelling(Tok, SpellingPtr, &StringInvalid);
- if (StringInvalid) {
+ unsigned TokLen = Lexer::getSpelling(Tok, SpellingPtr, SM, Features,
+ &StringInvalid);
+ if (StringInvalid)
return 0;
- }
assert(SpellingPtr[0] != 'L' && "Doesn't handle wide strings yet");
@@ -987,7 +1059,8 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
// Otherwise, this is an escape character. Advance over it.
bool HadError = false;
ProcessCharEscape(SpellingPtr, SpellingEnd, HadError,
- Tok.getLocation(), false, PP, Complain);
+ FullSourceLoc(Tok.getLocation(), SM),
+ false, Diags, Target);
assert(!HadError && "This method isn't valid on erroneous strings");
--ByteNo;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
index c6d0934..c819011 100644
--- a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
@@ -22,8 +22,9 @@ MacroInfo::MacroInfo(SourceLocation DefLoc) : Location(DefLoc) {
IsBuiltinMacro = false;
IsFromAST = false;
IsDisabled = false;
- IsUsed = true;
+ IsUsed = false;
IsAllowRedefinitionsWithoutWarning = false;
+ IsWarnIfUnused = false;
ArgumentList = 0;
NumArguments = 0;
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
index 8da7def..0f0d25b 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
@@ -17,6 +17,7 @@
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/Pragma.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/APInt.h"
@@ -27,14 +28,23 @@ using namespace clang;
//===----------------------------------------------------------------------===//
MacroInfo *Preprocessor::AllocateMacroInfo() {
- MacroInfo *MI;
+ MacroInfoChain *MIChain;
- if (!MICache.empty()) {
- MI = MICache.back();
- MICache.pop_back();
- } else
- MI = (MacroInfo*) BP.Allocate<MacroInfo>();
- return MI;
+ if (MICache) {
+ MIChain = MICache;
+ MICache = MICache->Next;
+ }
+ else {
+ MIChain = BP.Allocate<MacroInfoChain>();
+ }
+
+ MIChain->Next = MIChainHead;
+ MIChain->Prev = 0;
+ if (MIChainHead)
+ MIChainHead->Prev = MIChain;
+ MIChainHead = MIChain;
+
+ return &(MIChain->MI);
}
MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
@@ -52,10 +62,23 @@ MacroInfo *Preprocessor::CloneMacroInfo(const MacroInfo &MacroToClone) {
/// ReleaseMacroInfo - Release the specified MacroInfo. This memory will
/// be reused for allocating new MacroInfo objects.
void Preprocessor::ReleaseMacroInfo(MacroInfo *MI) {
- MICache.push_back(MI);
- MI->FreeArgumentList();
-}
+ MacroInfoChain *MIChain = (MacroInfoChain*) MI;
+ if (MacroInfoChain *Prev = MIChain->Prev) {
+ MacroInfoChain *Next = MIChain->Next;
+ Prev->Next = Next;
+ if (Next)
+ Next->Prev = Prev;
+ }
+ else {
+ assert(MIChainHead == MIChain);
+ MIChainHead = MIChain->Next;
+ MIChainHead->Prev = 0;
+ }
+ MIChain->Next = MICache;
+ MICache = MIChain;
+ MI->Destroy();
+}
/// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
/// current line until the tok::eom token is found.
@@ -222,7 +245,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
// If this isn't an identifier directive (e.g. is "# 1\n" or "#\n", or
// something bogus), skip it.
- if (Tok.isNot(tok::identifier)) {
+ if (Tok.isNot(tok::raw_identifier)) {
CurPPLexer->ParsingPreprocessorDirective = false;
// Restore comment saving mode.
if (CurLexer) CurLexer->SetCommentRetentionState(KeepComments);
@@ -234,12 +257,8 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
// to spell an i/e in a strange way that is another letter. Skipping this
// allows us to avoid looking up the identifier info for #define/#undef and
// other common directives.
- bool Invalid = false;
- const char *RawCharData = SourceMgr.getCharacterData(Tok.getLocation(),
- &Invalid);
- if (Invalid)
- return;
-
+ const char *RawCharData = Tok.getRawIdentifierData();
+
char FirstChar = RawCharData[0];
if (FirstChar >= 'a' && FirstChar <= 'z' &&
FirstChar != 'i' && FirstChar != 'e') {
@@ -279,7 +298,10 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
DiscardUntilEndOfDirective();
CurPPLexer->pushConditionalLevel(Tok.getLocation(), /*wasskipping*/true,
/*foundnonskip*/false,
- /*fnddelse*/false);
+ /*foundelse*/false);
+
+ if (Callbacks)
+ Callbacks->Endif();
}
} else if (Directive[0] == 'e') {
llvm::StringRef Sub = Directive.substr(1);
@@ -288,7 +310,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
PPConditionalInfo CondInfo;
CondInfo.WasSkipping = true; // Silence bogus warning.
bool InCond = CurPPLexer->popConditionalLevel(CondInfo);
- InCond = InCond; // Silence warning in no-asserts mode.
+ (void)InCond; // Silence warning in no-asserts mode.
assert(!InCond && "Can't be skipping if not in a conditional!");
// If we popped the outermost skipping block, we're done skipping!
@@ -307,6 +329,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
// Note that we've seen a #else in this conditional.
CondInfo.FoundElse = true;
+ if (Callbacks)
+ Callbacks->Else();
+
// If the conditional is at the top level, and the #if block wasn't
// entered, enter the #else block now.
if (!CondInfo.WasSkipping && !CondInfo.FoundNonSkip) {
@@ -317,6 +342,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
bool ShouldEnter;
+ const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
// If this is in a skipping block or if we're already handled this #if
// block, don't bother parsing the condition.
if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
@@ -331,10 +357,14 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro);
CurPPLexer->LexingRawMode = true;
}
+ const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
// If this is a #elif with a #else before it, report the error.
if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
+ if (Callbacks)
+ Callbacks->Elif(SourceRange(ConditionalBegin, ConditionalEnd));
+
// If this condition is true, enter it!
if (ShouldEnter) {
CondInfo.FoundNonSkip = true;
@@ -366,7 +396,7 @@ void Preprocessor::PTHSkipExcludedConditionalBlock() {
// have been consumed by the PTHLexer. Just pop off the condition level.
PPConditionalInfo CondInfo;
bool InCond = CurPTHLexer->popConditionalLevel(CondInfo);
- InCond = InCond; // Silence warning in no-asserts mode.
+ (void)InCond; // Silence warning in no-asserts mode.
assert(!InCond && "Can't be skipping if not in a conditional!");
break;
}
@@ -568,9 +598,11 @@ TryAgain:
// C99 6.10.2 - Source File Inclusion.
case tok::pp_include:
- return HandleIncludeDirective(Result); // Handle #include.
+ // Handle #include.
+ return HandleIncludeDirective(SavedHash.getLocation(), Result);
case tok::pp___include_macros:
- return HandleIncludeMacrosDirective(Result); // Handle -imacros.
+ // Handle -imacros.
+ return HandleIncludeMacrosDirective(SavedHash.getLocation(), Result);
// C99 6.10.3 - Macro Replacement.
case tok::pp_define:
@@ -588,13 +620,13 @@ TryAgain:
// C99 6.10.6 - Pragma Directive.
case tok::pp_pragma:
- return HandlePragmaDirective();
+ return HandlePragmaDirective(PIK_HashPragma);
// GNU Extensions.
case tok::pp_import:
- return HandleImportDirective(Result);
+ return HandleImportDirective(SavedHash.getLocation(), Result);
case tok::pp_include_next:
- return HandleIncludeNextDirective(Result);
+ return HandleIncludeNextDirective(SavedHash.getLocation(), Result);
case tok::pp_warning:
Diag(Result, diag::ext_pp_warning_directive);
@@ -622,6 +654,12 @@ TryAgain:
// Return the # and the token after it.
Toks[0] = SavedHash;
Toks[1] = Result;
+
+ // If the second token is a hashhash token, then we need to translate it to
+ // unknown so the token lexer doesn't try to perform token pasting.
+ if (Result.is(tok::hashhash))
+ Toks[1].setKind(tok::unknown);
+
// Enter this token stream so that we re-lex the tokens. Make sure to
// enable macro expansion, in case the token after the # is an identifier
// that is expanded.
@@ -779,7 +817,9 @@ static bool ReadLineMarkerFlags(bool &IsFileEntry, bool &IsFileExit,
FileID CurFileID =
SM.getDecomposedInstantiationLoc(FlagTok.getLocation()).first;
PresumedLoc PLoc = SM.getPresumedLoc(FlagTok.getLocation());
-
+ if (PLoc.isInvalid())
+ return true;
+
// If there is no include loc (main file) or if the include loc is in a
// different physical file, then we aren't in a "1" line marker flag region.
SourceLocation IncLoc = PLoc.getIncludeLoc();
@@ -1011,11 +1051,20 @@ bool Preprocessor::GetIncludeFilenameSpelling(SourceLocation Loc,
/// false if the > was found, otherwise it returns true if it finds and consumes
/// the EOM marker.
bool Preprocessor::ConcatenateIncludeName(
- llvm::SmallString<128> &FilenameBuffer) {
+ llvm::SmallString<128> &FilenameBuffer,
+ SourceLocation &End) {
Token CurTok;
Lex(CurTok);
while (CurTok.isNot(tok::eom)) {
+ End = CurTok.getLocation();
+
+ // FIXME: Provide code completion for #includes.
+ if (CurTok.is(tok::code_completion)) {
+ Lex(CurTok);
+ continue;
+ }
+
// Append the spelling of this token to the buffer. If there was a space
// before it, add it now.
if (CurTok.hasLeadingSpace())
@@ -1054,7 +1103,8 @@ bool Preprocessor::ConcatenateIncludeName(
/// routine with functionality shared between #include, #include_next and
/// #import. LookupFrom is set when this is a #include_next directive, it
/// specifies the file to start searching from.
-void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
+void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
+ Token &IncludeTok,
const DirectoryLookup *LookupFrom,
bool isImport) {
@@ -1064,7 +1114,8 @@ void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
// Reserve a buffer to get the spelling.
llvm::SmallString<128> FilenameBuffer;
llvm::StringRef Filename;
-
+ SourceLocation End;
+
switch (FilenameTok.getKind()) {
case tok::eom:
// If the token kind is EOM, the error has already been diagnosed.
@@ -1073,13 +1124,14 @@ void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
case tok::angle_string_literal:
case tok::string_literal:
Filename = getSpelling(FilenameTok, FilenameBuffer);
+ End = FilenameTok.getLocation();
break;
case tok::less:
// This could be a <foo/bar.h> file coming from a macro expansion. In this
// case, glue the tokens together into FilenameBuffer and interpret those.
FilenameBuffer.push_back('<');
- if (ConcatenateIncludeName(FilenameBuffer))
+ if (ConcatenateIncludeName(FilenameBuffer, End))
return; // Found <eom> but no ">"? Diagnostic already emitted.
Filename = FilenameBuffer.str();
break;
@@ -1118,6 +1170,11 @@ void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
return;
}
+ // Notify the callback object that we've seen an inclusion directive.
+ if (Callbacks)
+ Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled, File,
+ End);
+
// The #included file will be considered to be a system header if either it is
// in a system include directory, or if the #includer is a system include
// header.
@@ -1147,7 +1204,8 @@ void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
/// HandleIncludeNextDirective - Implements #include_next.
///
-void Preprocessor::HandleIncludeNextDirective(Token &IncludeNextTok) {
+void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
+ Token &IncludeNextTok) {
Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
// #include_next is like #include, except that we start searching after
@@ -1164,23 +1222,25 @@ void Preprocessor::HandleIncludeNextDirective(Token &IncludeNextTok) {
++Lookup;
}
- return HandleIncludeDirective(IncludeNextTok, Lookup);
+ return HandleIncludeDirective(HashLoc, IncludeNextTok, Lookup);
}
/// HandleImportDirective - Implements #import.
///
-void Preprocessor::HandleImportDirective(Token &ImportTok) {
+void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
+ Token &ImportTok) {
if (!Features.ObjC1) // #import is standard for ObjC.
Diag(ImportTok, diag::ext_pp_import_directive);
- return HandleIncludeDirective(ImportTok, 0, true);
+ return HandleIncludeDirective(HashLoc, ImportTok, 0, true);
}
/// HandleIncludeMacrosDirective - The -imacros command line option turns into a
/// pseudo directive in the predefines buffer. This handles it by sucking all
/// tokens through the preprocessor and discarding them (only keeping the side
/// effects on the preprocessor).
-void Preprocessor::HandleIncludeMacrosDirective(Token &IncludeMacrosTok) {
+void Preprocessor::HandleIncludeMacrosDirective(SourceLocation HashLoc,
+ Token &IncludeMacrosTok) {
// This directive should only occur in the predefines buffer. If not, emit an
// error and reject it.
SourceLocation Loc = IncludeMacrosTok.getLocation();
@@ -1193,7 +1253,7 @@ void Preprocessor::HandleIncludeMacrosDirective(Token &IncludeMacrosTok) {
// Treat this as a normal #include for checking purposes. If this is
// successful, it will push a new lexer onto the include stack.
- HandleIncludeDirective(IncludeMacrosTok, 0, false);
+ HandleIncludeDirective(HashLoc, IncludeMacrosTok, 0, false);
Token TmpTok;
do {
@@ -1457,11 +1517,6 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
}
}
- // If this is the primary source file, remember that this macro hasn't been
- // used yet.
- if (isInPrimaryFile())
- MI->setIsUsed(false);
-
MI->setDefinitionEndLoc(LastTok.getLocation());
// Finally, if this identifier already had a macro defined for it, verify that
@@ -1472,7 +1527,7 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
// then don't bother calling MacroInfo::isIdenticalTo.
if (!getDiagnostics().getSuppressSystemWarnings() ||
!SourceMgr.isInSystemHeader(DefineTok.getLocation())) {
- if (!OtherMI->isUsed())
+ if (!OtherMI->isUsed() && OtherMI->isWarnIfUnused())
Diag(OtherMI->getDefinitionLoc(), diag::pp_macro_not_used);
// Macros must be identical. This means all tokens and whitespace
@@ -1484,14 +1539,26 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
Diag(OtherMI->getDefinitionLoc(), diag::note_previous_definition);
}
}
+ if (OtherMI->isWarnIfUnused())
+ WarnUnusedMacroLocs.erase(OtherMI->getDefinitionLoc());
ReleaseMacroInfo(OtherMI);
}
setMacroInfo(MacroNameTok.getIdentifierInfo(), MI);
+ assert(!MI->isUsed());
+ // If we need warning for not using the macro, add its location in the
+ // warn-because-unused-macro set. If it gets used it will be removed from set.
+ if (isInPrimaryFile() && // don't warn for include'd macros.
+ Diags->getDiagnosticLevel(diag::pp_macro_not_used,
+ MI->getDefinitionLoc()) != Diagnostic::Ignored) {
+ MI->setIsWarnIfUnused(true);
+ WarnUnusedMacroLocs.insert(MI->getDefinitionLoc());
+ }
+
// If the callbacks want to know, tell them about the macro definition.
if (Callbacks)
- Callbacks->MacroDefined(MacroNameTok.getIdentifierInfo(), MI);
+ Callbacks->MacroDefined(MacroNameTok, MI);
}
/// HandleUndefDirective - Implements #undef.
@@ -1520,8 +1587,10 @@ void Preprocessor::HandleUndefDirective(Token &UndefTok) {
// If the callbacks want to know, tell them about the macro #undef.
if (Callbacks)
- Callbacks->MacroUndefined(MacroNameTok.getLocation(),
- MacroNameTok.getIdentifierInfo(), MI);
+ Callbacks->MacroUndefined(MacroNameTok, MI);
+
+ if (MI->isWarnIfUnused())
+ WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
// Free macro definition.
ReleaseMacroInfo(MI);
@@ -1575,7 +1644,7 @@ void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
// If there is a macro, process it.
if (MI) // Mark it used.
- MI->setIsUsed(true);
+ markMacroAsUsed(MI);
// Should we include the stuff contained by this directive?
if (!MI == isIfndef) {
@@ -1584,11 +1653,18 @@ void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
/*wasskip*/false, /*foundnonskip*/true,
/*foundelse*/false);
} else {
- // No, skip the contents of this block and return the first token after it.
+ // No, skip the contents of this block.
SkipExcludedConditionalBlock(DirectiveTok.getLocation(),
/*Foundnonskip*/false,
/*FoundElse*/false);
}
+
+ if (Callbacks) {
+ if (isIfndef)
+ Callbacks->Ifndef(MacroNameTok);
+ else
+ Callbacks->Ifdef(MacroNameTok);
+ }
}
/// HandleIfDirective - Implements the #if directive.
@@ -1597,10 +1673,11 @@ void Preprocessor::HandleIfDirective(Token &IfToken,
bool ReadAnyTokensBeforeDirective) {
++NumIf;
- // Parse and evaluation the conditional expression.
+ // Parse and evaluate the conditional expression.
IdentifierInfo *IfNDefMacro = 0;
- bool ConditionalTrue = EvaluateDirectiveExpression(IfNDefMacro);
-
+ const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
+ const bool ConditionalTrue = EvaluateDirectiveExpression(IfNDefMacro);
+ const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
// If this condition is equivalent to #ifndef X, and if this is the first
// directive seen, handle it for the multiple-include optimization.
@@ -1617,10 +1694,13 @@ void Preprocessor::HandleIfDirective(Token &IfToken,
CurPPLexer->pushConditionalLevel(IfToken.getLocation(), /*wasskip*/false,
/*foundnonskip*/true, /*foundelse*/false);
} else {
- // No, skip the contents of this block and return the first token after it.
+ // No, skip the contents of this block.
SkipExcludedConditionalBlock(IfToken.getLocation(), /*Foundnonskip*/false,
/*FoundElse*/false);
}
+
+ if (Callbacks)
+ Callbacks->If(SourceRange(ConditionalBegin, ConditionalEnd));
}
/// HandleEndifDirective - Implements the #endif directive.
@@ -1644,9 +1724,13 @@ void Preprocessor::HandleEndifDirective(Token &EndifToken) {
assert(!CondInfo.WasSkipping && !CurPPLexer->LexingRawMode &&
"This code should only be reachable in the non-skipping case!");
-}
+ if (Callbacks)
+ Callbacks->Endif();
+}
+/// HandleElseDirective - Implements the #else directive.
+///
void Preprocessor::HandleElseDirective(Token &Result) {
++NumElse;
@@ -1666,19 +1750,25 @@ void Preprocessor::HandleElseDirective(Token &Result) {
// If this is a #else with a #else before it, report the error.
if (CI.FoundElse) Diag(Result, diag::pp_err_else_after_else);
- // Finally, skip the rest of the contents of this block and return the first
- // token after it.
- return SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
- /*FoundElse*/true);
+ // Finally, skip the rest of the contents of this block.
+ SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
+ /*FoundElse*/true);
+
+ if (Callbacks)
+ Callbacks->Else();
}
+/// HandleElifDirective - Implements the #elif directive.
+///
void Preprocessor::HandleElifDirective(Token &ElifToken) {
++NumElse;
// #elif directive in a non-skipping conditional... start skipping.
// We don't care what the condition is, because we will always skip it (since
// the block immediately before it was included).
+ const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
DiscardUntilEndOfDirective();
+ const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
PPConditionalInfo CI;
if (CurPPLexer->popConditionalLevel(CI)) {
@@ -1693,8 +1783,10 @@ void Preprocessor::HandleElifDirective(Token &ElifToken) {
// If this is a #elif with a #else before it, report the error.
if (CI.FoundElse) Diag(ElifToken, diag::pp_err_elif_after_else);
- // Finally, skip the rest of the contents of this block and return the first
- // token after it.
- return SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
- /*FoundElse*/CI.FoundElse);
+ // Finally, skip the rest of the contents of this block.
+ SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
+ /*FoundElse*/CI.FoundElse);
+
+ if (Callbacks)
+ Callbacks->Elif(SourceRange(ConditionalBegin, ConditionalEnd));
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
index 163e869..1451c5a 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
@@ -112,7 +112,7 @@ static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// If there is a macro, mark it used.
if (Result.Val != 0 && ValueLive) {
MacroInfo *Macro = PP.getMacroInfo(II);
- Macro->setIsUsed(true);
+ PP.markMacroAsUsed(Macro);
}
// Consume identifier.
@@ -519,7 +519,6 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
RHS.Val.setIsUnsigned(Res.isUnsigned());
}
- // FIXME: All of these should detect and report overflow??
bool Overflow = false;
switch (Operator) {
default: assert(0 && "Unknown operator token!");
@@ -534,9 +533,10 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
break;
case tok::slash:
if (RHS.Val != 0) {
- Res = LHS.Val / RHS.Val;
- if (LHS.Val.isSigned()) // MININT/-1 --> overflow.
- Overflow = LHS.Val.isMinSignedValue() && RHS.Val.isAllOnesValue();
+ if (LHS.Val.isSigned())
+ Res = llvm::APSInt(LHS.Val.sdiv_ov(RHS.Val, Overflow), false);
+ else
+ Res = LHS.Val / RHS.Val;
} else if (ValueLive) {
PP.Diag(OpLoc, diag::err_pp_division_by_zero)
<< LHS.getRange() << RHS.getRange();
@@ -545,23 +545,22 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
break;
case tok::star:
- Res = LHS.Val * RHS.Val;
- if (Res.isSigned() && LHS.Val != 0 && RHS.Val != 0)
- Overflow = Res/RHS.Val != LHS.Val || Res/LHS.Val != RHS.Val;
+ if (Res.isSigned())
+ Res = llvm::APSInt(LHS.Val.smul_ov(RHS.Val, Overflow), false);
+ else
+ Res = LHS.Val * RHS.Val;
break;
case tok::lessless: {
// Determine whether overflow is about to happen.
unsigned ShAmt = static_cast<unsigned>(RHS.Val.getLimitedValue());
- if (ShAmt >= LHS.Val.getBitWidth())
- Overflow = true, ShAmt = LHS.Val.getBitWidth()-1;
- else if (LHS.isUnsigned())
- Overflow = false;
- else if (LHS.Val.isNonNegative()) // Don't allow sign change.
- Overflow = ShAmt >= LHS.Val.countLeadingZeros();
- else
- Overflow = ShAmt >= LHS.Val.countLeadingOnes();
-
- Res = LHS.Val << ShAmt;
+ if (LHS.isUnsigned()) {
+ Overflow = ShAmt >= LHS.Val.getBitWidth();
+ if (Overflow)
+ ShAmt = LHS.Val.getBitWidth()-1;
+ Res = LHS.Val << ShAmt;
+ } else {
+ Res = llvm::APSInt(LHS.Val.sshl_ov(ShAmt, Overflow), false);
+ }
break;
}
case tok::greatergreater: {
@@ -573,20 +572,16 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
break;
}
case tok::plus:
- Res = LHS.Val + RHS.Val;
if (LHS.isUnsigned())
- Overflow = false;
- else if (LHS.Val.isNonNegative() == RHS.Val.isNonNegative() &&
- Res.isNonNegative() != LHS.Val.isNonNegative())
- Overflow = true; // Overflow for signed addition.
+ Res = LHS.Val + RHS.Val;
+ else
+ Res = llvm::APSInt(LHS.Val.sadd_ov(RHS.Val, Overflow), false);
break;
case tok::minus:
- Res = LHS.Val - RHS.Val;
if (LHS.isUnsigned())
- Overflow = false;
- else if (LHS.Val.isNonNegative() != RHS.Val.isNonNegative() &&
- Res.isNonNegative() != LHS.Val.isNonNegative())
- Overflow = true; // Overflow for signed subtraction.
+ Res = LHS.Val - RHS.Val;
+ else
+ Res = llvm::APSInt(LHS.Val.ssub_ov(RHS.Val, Overflow), false);
break;
case tok::lessequal:
Res = LHS.Val <= RHS.Val;
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
index 4a40405..eef42b6 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
@@ -250,15 +250,11 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
CurPPLexer = 0;
- // This is the end of the top-level file. If the diag::pp_macro_not_used
- // diagnostic is enabled, look for macros that have not been used.
- if (getDiagnostics().getDiagnosticLevel(diag::pp_macro_not_used) !=
- Diagnostic::Ignored) {
- for (macro_iterator I = macro_begin(false), E = macro_end(false);
- I != E; ++I)
- if (!I->second->isUsed())
- Diag(I->second->getDefinitionLoc(), diag::pp_macro_not_used);
- }
+ // This is the end of the top-level file. 'WarnUnusedMacroLocs' has collected
+ // all macro locations that we need to warn because they are not used.
+ for (WarnUnusedMacroLocsTy::iterator
+ I=WarnUnusedMacroLocs.begin(), E=WarnUnusedMacroLocs.end(); I!=E; ++I)
+ Diag(*I, diag::pp_macro_not_used);
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
index 9015c27..ba92614 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
@@ -20,12 +20,28 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Config/config.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
#include <ctime>
using namespace clang;
+MacroInfo *Preprocessor::getInfoForMacro(IdentifierInfo *II) const {
+ assert(II->hasMacroDefinition() && "Identifier is not a macro!");
+
+ llvm::DenseMap<IdentifierInfo*, MacroInfo*>::const_iterator Pos
+ = Macros.find(II);
+ if (Pos == Macros.end()) {
+ // Load this macro from the external source.
+ getExternalSource()->LoadMacroDefinition(II);
+ Pos = Macros.find(II);
+ }
+ assert(Pos != Macros.end() && "Identifier macro info is missing!");
+ return Pos->second;
+}
+
/// setMacroInfo - Specify a macro for this identifier.
///
void Preprocessor::setMacroInfo(IdentifierInfo *II, MacroInfo *MI) {
@@ -70,6 +86,7 @@ void Preprocessor::RegisterBuiltinMacros() {
// Clang Extensions.
Ident__has_feature = RegisterBuiltinMacro(*this, "__has_feature");
Ident__has_builtin = RegisterBuiltinMacro(*this, "__has_builtin");
+ Ident__has_attribute = RegisterBuiltinMacro(*this, "__has_attribute");
Ident__has_include = RegisterBuiltinMacro(*this, "__has_include");
Ident__has_include_next = RegisterBuiltinMacro(*this, "__has_include_next");
@@ -206,7 +223,7 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
}
// Notice that this macro has been used.
- MI->setIsUsed(true);
+ markMacroAsUsed(MI);
// If we started lexing a macro, enter the macro expansion body.
@@ -231,6 +248,7 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
if (IsAtStartOfLine) Identifier.setFlag(Token::StartOfLine);
if (HadLeadingSpace) Identifier.setFlag(Token::LeadingSpace);
}
+ Identifier.setFlag(Token::LeadingEmptyMacro);
++NumFastMacroExpanded;
return false;
@@ -481,16 +499,25 @@ static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"
};
- char TmpBuffer[100];
+ char TmpBuffer[32];
+#ifdef LLVM_ON_WIN32
sprintf(TmpBuffer, "\"%s %2d %4d\"", Months[TM->tm_mon], TM->tm_mday,
TM->tm_year+1900);
+#else
+ snprintf(TmpBuffer, sizeof(TmpBuffer), "\"%s %2d %4d\"", Months[TM->tm_mon], TM->tm_mday,
+ TM->tm_year+1900);
+#endif
Token TmpTok;
TmpTok.startToken();
PP.CreateString(TmpBuffer, strlen(TmpBuffer), TmpTok);
DATELoc = TmpTok.getLocation();
+#ifdef LLVM_ON_WIN32
sprintf(TmpBuffer, "\"%02d:%02d:%02d\"", TM->tm_hour, TM->tm_min, TM->tm_sec);
+#else
+ snprintf(TmpBuffer, sizeof(TmpBuffer), "\"%02d:%02d:%02d\"", TM->tm_hour, TM->tm_min, TM->tm_sec);
+#endif
PP.CreateString(TmpBuffer, strlen(TmpBuffer), TmpTok);
TIMELoc = TmpTok.getLocation();
}
@@ -505,40 +532,77 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
.Case("attribute_analyzer_noreturn", true)
.Case("attribute_cf_returns_not_retained", true)
.Case("attribute_cf_returns_retained", true)
+ .Case("attribute_deprecated_with_message", true)
.Case("attribute_ext_vector_type", true)
.Case("attribute_ns_returns_not_retained", true)
.Case("attribute_ns_returns_retained", true)
+ .Case("attribute_ns_consumes_self", true)
+ .Case("attribute_ns_consumed", true)
+ .Case("attribute_cf_consumed", true)
.Case("attribute_objc_ivar_unused", true)
.Case("attribute_overloadable", true)
+ .Case("attribute_unavailable_with_message", true)
.Case("blocks", LangOpts.Blocks)
- .Case("cxx_attributes", LangOpts.CPlusPlus0x)
- .Case("cxx_auto_type", LangOpts.CPlusPlus0x)
- .Case("cxx_decltype", LangOpts.CPlusPlus0x)
- .Case("cxx_deleted_functions", LangOpts.CPlusPlus0x)
.Case("cxx_exceptions", LangOpts.Exceptions)
.Case("cxx_rtti", LangOpts.RTTI)
- .Case("cxx_static_assert", LangOpts.CPlusPlus0x)
+ .Case("enumerator_attributes", true)
.Case("objc_nonfragile_abi", LangOpts.ObjCNonFragileABI)
.Case("objc_weak_class", LangOpts.ObjCNonFragileABI)
.Case("ownership_holds", true)
.Case("ownership_returns", true)
.Case("ownership_takes", true)
- .Case("cxx_inline_namespaces", true)
- //.Case("cxx_concepts", false)
+ // C++0x features
+ .Case("cxx_attributes", LangOpts.CPlusPlus0x)
+ .Case("cxx_auto_type", LangOpts.CPlusPlus0x)
+ .Case("cxx_decltype", LangOpts.CPlusPlus0x)
+ .Case("cxx_default_function_template_args", LangOpts.CPlusPlus0x)
+ .Case("cxx_deleted_functions", LangOpts.CPlusPlus0x)
+ .Case("cxx_inline_namespaces", LangOpts.CPlusPlus0x)
//.Case("cxx_lambdas", false)
//.Case("cxx_nullptr", false)
- //.Case("cxx_rvalue_references", false)
- //.Case("cxx_variadic_templates", false)
+ .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus0x)
+ .Case("cxx_rvalue_references", LangOpts.CPlusPlus0x)
+ .Case("cxx_strong_enums", LangOpts.CPlusPlus0x)
+ .Case("cxx_static_assert", LangOpts.CPlusPlus0x)
+ .Case("cxx_trailing_return", LangOpts.CPlusPlus0x)
+ .Case("cxx_variadic_templates", LangOpts.CPlusPlus0x)
+ // Type traits
+ .Case("has_nothrow_assign", LangOpts.CPlusPlus)
+ .Case("has_nothrow_copy", LangOpts.CPlusPlus)
+ .Case("has_nothrow_constructor", LangOpts.CPlusPlus)
+ .Case("has_trivial_assign", LangOpts.CPlusPlus)
+ .Case("has_trivial_copy", LangOpts.CPlusPlus)
+ .Case("has_trivial_constructor", LangOpts.CPlusPlus)
+ .Case("has_trivial_destructor", LangOpts.CPlusPlus)
+ .Case("has_virtual_destructor", LangOpts.CPlusPlus)
+ .Case("is_abstract", LangOpts.CPlusPlus)
+ .Case("is_base_of", LangOpts.CPlusPlus)
+ .Case("is_class", LangOpts.CPlusPlus)
+ .Case("is_convertible_to", LangOpts.CPlusPlus)
+ .Case("is_empty", LangOpts.CPlusPlus)
+ .Case("is_enum", LangOpts.CPlusPlus)
+ .Case("is_pod", LangOpts.CPlusPlus)
+ .Case("is_polymorphic", LangOpts.CPlusPlus)
+ .Case("is_union", LangOpts.CPlusPlus)
+ .Case("is_literal", LangOpts.CPlusPlus)
.Case("tls", PP.getTargetInfo().isTLSSupported())
.Default(false);
}
+/// HasAttribute - Return true if we recognize and implement the attribute
+/// specified by the given identifier.
+static bool HasAttribute(const IdentifierInfo *II) {
+ return llvm::StringSwitch<bool>(II->getName())
+#include "clang/Lex/AttrSpellings.inc"
+ .Default(false);
+}
+
/// EvaluateHasIncludeCommon - Process a '__has_include("path")'
/// or '__has_include_next("path")' expression.
/// Returns true if successful.
-static bool EvaluateHasIncludeCommon(bool &Result, Token &Tok,
- IdentifierInfo *II, Preprocessor &PP,
- const DirectoryLookup *LookupFrom) {
+static bool EvaluateHasIncludeCommon(Token &Tok,
+ IdentifierInfo *II, Preprocessor &PP,
+ const DirectoryLookup *LookupFrom) {
SourceLocation LParenLoc;
// Get '('.
@@ -559,7 +623,8 @@ static bool EvaluateHasIncludeCommon(bool &Result, Token &Tok,
// Reserve a buffer to get the spelling.
llvm::SmallString<128> FilenameBuffer;
llvm::StringRef Filename;
-
+ SourceLocation EndLoc;
+
switch (Tok.getKind()) {
case tok::eom:
// If the token kind is EOM, the error has already been diagnosed.
@@ -578,7 +643,7 @@ static bool EvaluateHasIncludeCommon(bool &Result, Token &Tok,
// This could be a <foo/bar.h> file coming from a macro expansion. In this
// case, glue the tokens together into FilenameBuffer and interpret those.
FilenameBuffer.push_back('<');
- if (PP.ConcatenateIncludeName(FilenameBuffer))
+ if (PP.ConcatenateIncludeName(FilenameBuffer, EndLoc))
return false; // Found <eom> but no ">"? Diagnostic already emitted.
Filename = FilenameBuffer.str();
break;
@@ -598,7 +663,7 @@ static bool EvaluateHasIncludeCommon(bool &Result, Token &Tok,
const FileEntry *File = PP.LookupFile(Filename, isAngled, LookupFrom, CurDir);
// Get the result value. Result = true means the file exists.
- Result = File != 0;
+ bool Result = File != 0;
// Get ')'.
PP.LexNonComment(Tok);
@@ -610,19 +675,19 @@ static bool EvaluateHasIncludeCommon(bool &Result, Token &Tok,
return false;
}
- return true;
+ return Result;
}
/// EvaluateHasInclude - Process a '__has_include("path")' expression.
/// Returns true if successful.
-static bool EvaluateHasInclude(bool &Result, Token &Tok, IdentifierInfo *II,
+static bool EvaluateHasInclude(Token &Tok, IdentifierInfo *II,
Preprocessor &PP) {
- return(EvaluateHasIncludeCommon(Result, Tok, II, PP, NULL));
+ return EvaluateHasIncludeCommon(Tok, II, PP, NULL);
}
/// EvaluateHasIncludeNext - Process '__has_include_next("path")' expression.
/// Returns true if successful.
-static bool EvaluateHasIncludeNext(bool &Result, Token &Tok,
+static bool EvaluateHasIncludeNext(Token &Tok,
IdentifierInfo *II, Preprocessor &PP) {
// __has_include_next is like __has_include, except that we start
// searching after the current found directory. If we can't do this,
@@ -638,7 +703,7 @@ static bool EvaluateHasIncludeNext(bool &Result, Token &Tok,
++Lookup;
}
- return(EvaluateHasIncludeCommon(Result, Tok, II, PP, Lookup));
+ return EvaluateHasIncludeCommon(Tok, II, PP, Lookup);
}
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
@@ -683,7 +748,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
PresumedLoc PLoc = SourceMgr.getPresumedLoc(Loc);
// __LINE__ expands to a simple numeric value.
- OS << PLoc.getLine();
+ OS << (PLoc.isValid()? PLoc.getLine() : 1);
Tok.setKind(tok::numeric_constant);
} else if (II == Ident__FILE__ || II == Ident__BASE_FILE__) {
// C99 6.10.8: "__FILE__: The presumed name of the current source file (a
@@ -692,19 +757,24 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// __BASE_FILE__ is a GNU extension that returns the top of the presumed
// #include stack instead of the current file.
- if (II == Ident__BASE_FILE__) {
+ if (II == Ident__BASE_FILE__ && PLoc.isValid()) {
SourceLocation NextLoc = PLoc.getIncludeLoc();
while (NextLoc.isValid()) {
PLoc = SourceMgr.getPresumedLoc(NextLoc);
+ if (PLoc.isInvalid())
+ break;
+
NextLoc = PLoc.getIncludeLoc();
}
}
// Escape this filename. Turn '\' -> '\\' '"' -> '\"'
llvm::SmallString<128> FN;
- FN += PLoc.getFilename();
- Lexer::Stringify(FN);
- OS << '"' << FN.str() << '"';
+ if (PLoc.isValid()) {
+ FN += PLoc.getFilename();
+ Lexer::Stringify(FN);
+ OS << '"' << FN.str() << '"';
+ }
Tok.setKind(tok::string_literal);
} else if (II == Ident__DATE__) {
if (!DATELoc.isValid())
@@ -730,9 +800,11 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
unsigned Depth = 0;
PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
- PLoc = SourceMgr.getPresumedLoc(PLoc.getIncludeLoc());
- for (; PLoc.isValid(); ++Depth)
+ if (PLoc.isValid()) {
PLoc = SourceMgr.getPresumedLoc(PLoc.getIncludeLoc());
+ for (; PLoc.isValid(); ++Depth)
+ PLoc = SourceMgr.getPresumedLoc(PLoc.getIncludeLoc());
+ }
// __INCLUDE_LEVEL__ expands to a simple numeric value.
OS << Depth;
@@ -765,7 +837,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
OS << CounterValue++;
Tok.setKind(tok::numeric_constant);
} else if (II == Ident__has_feature ||
- II == Ident__has_builtin) {
+ II == Ident__has_builtin ||
+ II == Ident__has_attribute) {
// The argument to these two builtins should be a parenthesized identifier.
SourceLocation StartLoc = Tok.getLocation();
@@ -793,7 +866,9 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
else if (II == Ident__has_builtin) {
// Check for a builtin is trivial.
Value = FeatureII->getBuiltinID() != 0;
- } else {
+ } else if (II == Ident__has_attribute)
+ Value = HasAttribute(FeatureII);
+ else {
assert(II == Ident__has_feature && "Must be feature check");
Value = HasFeature(*this, FeatureII);
}
@@ -805,12 +880,11 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// The argument to these two builtins should be a parenthesized
// file name string literal using angle brackets (<>) or
// double-quotes ("").
- bool Value = false;
- bool IsValid;
+ bool Value;
if (II == Ident__has_include)
- IsValid = EvaluateHasInclude(Value, Tok, II, *this);
+ Value = EvaluateHasInclude(Tok, II, *this);
else
- IsValid = EvaluateHasIncludeNext(Value, Tok, II, *this);
+ Value = EvaluateHasIncludeNext(Tok, II, *this);
OS << (int)Value;
Tok.setKind(tok::numeric_constant);
} else {
@@ -818,3 +892,11 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
}
CreateString(OS.str().data(), OS.str().size(), Tok, Tok.getLocation());
}
+
+void Preprocessor::markMacroAsUsed(MacroInfo *MI) {
+ // If the 'used' status changed, and the macro requires 'unused' warning,
+ // remove its SourceLocation from the warn-for-unused-macro locations.
+ if (MI->isWarnIfUnused() && !MI->isUsed())
+ WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
+ MI->setIsUsed(true);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
index 63b4823..975753b 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
@@ -13,6 +13,7 @@
#include "clang/Basic/TokenKinds.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/OnDiskHashTable.h"
#include "clang/Lex/LexDiagnostic.h"
@@ -25,7 +26,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/MemoryBuffer.h"
-#include <sys/stat.h>
+#include "llvm/Support/system_error.h"
using namespace clang;
using namespace clang::io;
@@ -434,23 +435,23 @@ static void InvalidPTH(Diagnostic &Diags, const char *Msg) {
Diags.Report(Diags.getCustomDiagID(Diagnostic::Error, Msg));
}
-PTHManager* PTHManager::Create(const std::string& file, Diagnostic &Diags) {
+PTHManager *PTHManager::Create(const std::string &file, Diagnostic &Diags) {
// Memory map the PTH file.
- llvm::OwningPtr<llvm::MemoryBuffer>
- File(llvm::MemoryBuffer::getFile(file.c_str()));
+ llvm::OwningPtr<llvm::MemoryBuffer> File;
- if (!File) {
+ if (llvm::MemoryBuffer::getFile(file, File)) {
+ // FIXME: Add ec.message() to this diag.
Diags.Report(diag::err_invalid_pth_file) << file;
return 0;
}
// Get the buffer ranges and check if there are at least three 32-bit
// words at the end of the file.
- const unsigned char* BufBeg = (unsigned char*)File->getBufferStart();
- const unsigned char* BufEnd = (unsigned char*)File->getBufferEnd();
+ const unsigned char *BufBeg = (unsigned char*)File->getBufferStart();
+ const unsigned char *BufEnd = (unsigned char*)File->getBufferEnd();
// Check the prologue of the file.
- if ((BufEnd - BufBeg) < (signed) (sizeof("cfe-pth") + 3 + 4) ||
+ if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 3 + 4) ||
memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth") - 1) != 0) {
Diags.Report(diag::err_invalid_pth_file) << file;
return 0;
@@ -668,7 +669,7 @@ public:
}
};
-class PTHStatCache : public StatSysCallCache {
+class PTHStatCache : public FileSystemStatCache {
typedef OnDiskChainedHashTable<PTHStatLookupTrait> CacheTy;
CacheTy Cache;
@@ -679,29 +680,30 @@ public:
~PTHStatCache() {}
- int stat(const char *path, struct stat *buf) {
+ LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
// Do the lookup for the file's data in the PTH file.
- CacheTy::iterator I = Cache.find(path);
+ CacheTy::iterator I = Cache.find(Path);
// If we don't get a hit in the PTH file just forward to 'stat'.
if (I == Cache.end())
- return StatSysCallCache::stat(path, buf);
+ return statChained(Path, StatBuf, FileDescriptor);
- const PTHStatData& Data = *I;
+ const PTHStatData &Data = *I;
if (!Data.hasStat)
- return 1;
+ return CacheMissing;
- buf->st_ino = Data.ino;
- buf->st_dev = Data.dev;
- buf->st_mtime = Data.mtime;
- buf->st_mode = Data.mode;
- buf->st_size = Data.size;
- return 0;
+ StatBuf.st_ino = Data.ino;
+ StatBuf.st_dev = Data.dev;
+ StatBuf.st_mtime = Data.mtime;
+ StatBuf.st_mode = Data.mode;
+ StatBuf.st_size = Data.size;
+ return CacheExists;
}
};
} // end anonymous namespace
-StatSysCallCache *PTHManager::createStatCache() {
+FileSystemStatCache *PTHManager::createStatCache() {
return new PTHStatCache(*((PTHFileLookup*) FileLookup));
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
index a7b289e..f0475bc 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
@@ -35,7 +35,9 @@ PragmaHandler::~PragmaHandler() {
EmptyPragmaHandler::EmptyPragmaHandler() {}
-void EmptyPragmaHandler::HandlePragma(Preprocessor &PP, Token &FirstToken) {}
+void EmptyPragmaHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &FirstToken) {}
//===----------------------------------------------------------------------===//
// PragmaNamespace Implementation.
@@ -73,7 +75,9 @@ void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) {
Handlers.erase(Handler->getName());
}
-void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) {
+void PragmaNamespace::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
// Read the 'namespace' that the directive is in, e.g. STDC. Do not macro
// expand it, the user can have a STDC #define, that should not affect this.
PP.LexUnexpandedToken(Tok);
@@ -89,7 +93,7 @@ void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) {
}
// Otherwise, pass it down.
- Handler->HandlePragma(PP, Tok);
+ Handler->HandlePragma(PP, Introducer, Tok);
}
//===----------------------------------------------------------------------===//
@@ -98,12 +102,12 @@ void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) {
/// HandlePragmaDirective - The "#pragma" directive has been parsed. Lex the
/// rest of the pragma, passing it to the registered pragma handlers.
-void Preprocessor::HandlePragmaDirective() {
+void Preprocessor::HandlePragmaDirective(unsigned Introducer) {
++NumPragma;
// Invoke the first level of pragma handlers which reads the namespace id.
Token Tok;
- PragmaHandlers->HandlePragma(*this, Tok);
+ PragmaHandlers->HandlePragma(*this, PragmaIntroducerKind(Introducer), Tok);
// If the pragma handler didn't read the rest of the line, consume it now.
if (CurPPLexer && CurPPLexer->ParsingPreprocessorDirective)
@@ -170,7 +174,7 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
}
}
- Handle_Pragma(StrVal, PragmaLoc, RParenLoc);
+ Handle_Pragma(PIK__Pragma, StrVal, PragmaLoc, RParenLoc);
// Finally, return whatever came after the pragma directive.
return Lex(Tok);
@@ -216,13 +220,14 @@ void Preprocessor::HandleMicrosoft__pragma(Token &Tok) {
SourceLocation RParenLoc = Tok.getLocation();
- Handle_Pragma(StrVal, PragmaLoc, RParenLoc);
+ Handle_Pragma(PIK___pragma, StrVal, PragmaLoc, RParenLoc);
// Finally, return whatever came after the pragma directive.
return Lex(Tok);
}
-void Preprocessor::Handle_Pragma(const std::string &StrVal,
+void Preprocessor::Handle_Pragma(unsigned Introducer,
+ const std::string &StrVal,
SourceLocation PragmaLoc,
SourceLocation RParenLoc) {
@@ -241,7 +246,7 @@ void Preprocessor::Handle_Pragma(const std::string &StrVal,
EnterSourceFileWithLexer(TL, 0);
// With everything set up, lex this as a #pragma directive.
- HandlePragmaDirective();
+ HandlePragmaDirective(Introducer);
}
@@ -287,7 +292,7 @@ void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
if (Tok.is(tok::eom)) return;
// Can only poison identifiers.
- if (Tok.isNot(tok::identifier)) {
+ if (Tok.isNot(tok::raw_identifier)) {
Diag(Tok, diag::err_pp_invalid_poison);
return;
}
@@ -324,6 +329,9 @@ void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
PresumedLoc PLoc = SourceMgr.getPresumedLoc(SysHeaderTok.getLocation());
+ if (PLoc.isInvalid())
+ return;
+
unsigned FilenameLen = strlen(PLoc.getFilename());
unsigned FilenameID = SourceMgr.getLineTableFilenameID(PLoc.getFilename(),
FilenameLen);
@@ -478,23 +486,32 @@ void Preprocessor::HandlePragmaComment(Token &Tok) {
Callbacks->PragmaComment(CommentLoc, II, ArgumentString);
}
-/// HandlePragmaMessage - Handle the microsoft #pragma message extension. The
-/// syntax is:
-/// #pragma message(messagestring)
-/// messagestring is a string, which is fully macro expanded, and permits string
-/// concatenation, embedded escape characters etc. See MSDN for more details.
+/// HandlePragmaMessage - Handle the microsoft and gcc #pragma message
+/// extension. The syntax is:
+/// #pragma message(string)
+/// OR, in GCC mode:
+/// #pragma message string
+/// string is a string, which is fully macro expanded, and permits string
+/// concatenation, embedded escape characters, etc... See MSDN for more details.
void Preprocessor::HandlePragmaMessage(Token &Tok) {
SourceLocation MessageLoc = Tok.getLocation();
Lex(Tok);
- if (Tok.isNot(tok::l_paren)) {
+ bool ExpectClosingParen = false;
+ switch (Tok.getKind()) {
+ case tok::l_paren:
+ // We have a MSVC style pragma message.
+ ExpectClosingParen = true;
+ // Read the string.
+ Lex(Tok);
+ break;
+ case tok::string_literal:
+ // We have a GCC style pragma message, and we just read the string.
+ break;
+ default:
Diag(MessageLoc, diag::err_pragma_message_malformed);
return;
}
- // Read the string.
- Lex(Tok);
-
-
// We need at least one string.
if (Tok.isNot(tok::string_literal)) {
Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
@@ -522,11 +539,13 @@ void Preprocessor::HandlePragmaMessage(Token &Tok) {
llvm::StringRef MessageString(Literal.GetString(), Literal.GetStringLength());
- if (Tok.isNot(tok::r_paren)) {
- Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
- return;
+ if (ExpectClosingParen) {
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
+ return;
+ }
+ Lex(Tok); // eat the r_paren.
}
- Lex(Tok); // eat the r_paren.
if (Tok.isNot(tok::eom)) {
Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
@@ -580,7 +599,7 @@ IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) {
// Create a Token from the string.
Token MacroTok;
MacroTok.startToken();
- MacroTok.setKind(tok::identifier);
+ MacroTok.setKind(tok::raw_identifier);
CreateString(&StrVal[1], StrVal.size() - 2, MacroTok);
// Get the IdentifierInfo of MacroToPushTok.
@@ -611,7 +630,7 @@ void Preprocessor::HandlePragmaPushMacro(Token &PushMacroTok) {
PragmaPushMacroInfo[IdentInfo].push_back(MacroCopyToPush);
}
-/// HandlePragmaPopMacro - Handle #pragma push_macro.
+/// HandlePragmaPopMacro - Handle #pragma pop_macro.
/// The syntax is:
/// #pragma pop_macro("macro")
void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
@@ -627,7 +646,11 @@ void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
if (iter != PragmaPushMacroInfo.end()) {
// Release the MacroInfo currently associated with IdentInfo.
MacroInfo *CurrentMI = getMacroInfo(IdentInfo);
- if (CurrentMI) ReleaseMacroInfo(CurrentMI);
+ if (CurrentMI) {
+ if (CurrentMI->isWarnIfUnused())
+ WarnUnusedMacroLocs.erase(CurrentMI->getDefinitionLoc());
+ ReleaseMacroInfo(CurrentMI);
+ }
// Get the MacroInfo we want to reinstall.
MacroInfo *MacroToReInstall = iter->second.back();
@@ -700,11 +723,39 @@ void Preprocessor::RemovePragmaHandler(llvm::StringRef Namespace,
PragmaHandlers->RemovePragmaHandler(NS);
}
+bool Preprocessor::LexOnOffSwitch(tok::OnOffSwitch &Result) {
+ Token Tok;
+ LexUnexpandedToken(Tok);
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::ext_on_off_switch_syntax);
+ return true;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("ON"))
+ Result = tok::OOS_ON;
+ else if (II->isStr("OFF"))
+ Result = tok::OOS_OFF;
+ else if (II->isStr("DEFAULT"))
+ Result = tok::OOS_DEFAULT;
+ else {
+ Diag(Tok, diag::ext_on_off_switch_syntax);
+ return true;
+ }
+
+ // Verify that this is followed by EOM.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eom))
+ Diag(Tok, diag::ext_pragma_syntax_eom);
+ return false;
+}
+
namespace {
/// PragmaOnceHandler - "#pragma once" marks the file as atomically included.
struct PragmaOnceHandler : public PragmaHandler {
PragmaOnceHandler() : PragmaHandler("once") {}
- virtual void HandlePragma(Preprocessor &PP, Token &OnceTok) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &OnceTok) {
PP.CheckEndOfDirective("pragma once");
PP.HandlePragmaOnce(OnceTok);
}
@@ -714,7 +765,8 @@ struct PragmaOnceHandler : public PragmaHandler {
/// rest of the line is not lexed.
struct PragmaMarkHandler : public PragmaHandler {
PragmaMarkHandler() : PragmaHandler("mark") {}
- virtual void HandlePragma(Preprocessor &PP, Token &MarkTok) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &MarkTok) {
PP.HandlePragmaMark();
}
};
@@ -722,7 +774,8 @@ struct PragmaMarkHandler : public PragmaHandler {
/// PragmaPoisonHandler - "#pragma poison x" marks x as not usable.
struct PragmaPoisonHandler : public PragmaHandler {
PragmaPoisonHandler() : PragmaHandler("poison") {}
- virtual void HandlePragma(Preprocessor &PP, Token &PoisonTok) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PoisonTok) {
PP.HandlePragmaPoison(PoisonTok);
}
};
@@ -731,21 +784,24 @@ struct PragmaPoisonHandler : public PragmaHandler {
/// as a system header, which silences warnings in it.
struct PragmaSystemHeaderHandler : public PragmaHandler {
PragmaSystemHeaderHandler() : PragmaHandler("system_header") {}
- virtual void HandlePragma(Preprocessor &PP, Token &SHToken) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &SHToken) {
PP.HandlePragmaSystemHeader(SHToken);
PP.CheckEndOfDirective("pragma");
}
};
struct PragmaDependencyHandler : public PragmaHandler {
PragmaDependencyHandler() : PragmaHandler("dependency") {}
- virtual void HandlePragma(Preprocessor &PP, Token &DepToken) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DepToken) {
PP.HandlePragmaDependency(DepToken);
}
};
struct PragmaDebugHandler : public PragmaHandler {
PragmaDebugHandler() : PragmaHandler("__debug") {}
- virtual void HandlePragma(Preprocessor &PP, Token &DepToken) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DepToken) {
Token Tok;
PP.LexUnexpandedToken(Tok);
if (Tok.isNot(tok::identifier)) {
@@ -783,7 +839,9 @@ struct PragmaDebugHandler : public PragmaHandler {
struct PragmaDiagnosticHandler : public PragmaHandler {
public:
explicit PragmaDiagnosticHandler() : PragmaHandler("diagnostic") {}
- virtual void HandlePragma(Preprocessor &PP, Token &DiagToken) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DiagToken) {
+ SourceLocation DiagLoc = DiagToken.getLocation();
Token Tok;
PP.LexUnexpandedToken(Tok);
if (Tok.isNot(tok::identifier)) {
@@ -802,12 +860,12 @@ public:
else if (II->isStr("fatal"))
Map = diag::MAP_FATAL;
else if (II->isStr("pop")) {
- if (!PP.getDiagnostics().popMappings())
+ if (!PP.getDiagnostics().popMappings(DiagLoc))
PP.Diag(Tok, diag::warn_pragma_diagnostic_cannot_pop);
return;
} else if (II->isStr("push")) {
- PP.getDiagnostics().pushMappings();
+ PP.getDiagnostics().pushMappings(DiagLoc);
return;
} else {
PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
@@ -857,7 +915,7 @@ public:
}
if (PP.getDiagnostics().setDiagnosticGroupMapping(WarningName.c_str()+2,
- Map))
+ Map, DiagLoc))
PP.Diag(StrToks[0].getLocation(),
diag::warn_pragma_diagnostic_unknown_warning) << WarningName;
}
@@ -866,7 +924,8 @@ public:
/// PragmaCommentHandler - "#pragma comment ...".
struct PragmaCommentHandler : public PragmaHandler {
PragmaCommentHandler() : PragmaHandler("comment") {}
- virtual void HandlePragma(Preprocessor &PP, Token &CommentTok) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &CommentTok) {
PP.HandlePragmaComment(CommentTok);
}
};
@@ -874,7 +933,8 @@ struct PragmaCommentHandler : public PragmaHandler {
/// PragmaMessageHandler - "#pragma message("...")".
struct PragmaMessageHandler : public PragmaHandler {
PragmaMessageHandler() : PragmaHandler("message") {}
- virtual void HandlePragma(Preprocessor &PP, Token &CommentTok) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &CommentTok) {
PP.HandlePragmaMessage(CommentTok);
}
};
@@ -883,7 +943,8 @@ struct PragmaMessageHandler : public PragmaHandler {
/// macro on the top of the stack.
struct PragmaPushMacroHandler : public PragmaHandler {
PragmaPushMacroHandler() : PragmaHandler("push_macro") {}
- virtual void HandlePragma(Preprocessor &PP, Token &PushMacroTok) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PushMacroTok) {
PP.HandlePragmaPushMacro(PushMacroTok);
}
};
@@ -893,62 +954,23 @@ struct PragmaPushMacroHandler : public PragmaHandler {
/// macro to the value on the top of the stack.
struct PragmaPopMacroHandler : public PragmaHandler {
PragmaPopMacroHandler() : PragmaHandler("pop_macro") {}
- virtual void HandlePragma(Preprocessor &PP, Token &PopMacroTok) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PopMacroTok) {
PP.HandlePragmaPopMacro(PopMacroTok);
}
};
// Pragma STDC implementations.
-enum STDCSetting {
- STDC_ON, STDC_OFF, STDC_DEFAULT, STDC_INVALID
-};
-
-static STDCSetting LexOnOffSwitch(Preprocessor &PP) {
- Token Tok;
- PP.LexUnexpandedToken(Tok);
-
- if (Tok.isNot(tok::identifier)) {
- PP.Diag(Tok, diag::ext_stdc_pragma_syntax);
- return STDC_INVALID;
- }
- IdentifierInfo *II = Tok.getIdentifierInfo();
- STDCSetting Result;
- if (II->isStr("ON"))
- Result = STDC_ON;
- else if (II->isStr("OFF"))
- Result = STDC_OFF;
- else if (II->isStr("DEFAULT"))
- Result = STDC_DEFAULT;
- else {
- PP.Diag(Tok, diag::ext_stdc_pragma_syntax);
- return STDC_INVALID;
- }
-
- // Verify that this is followed by EOM.
- PP.LexUnexpandedToken(Tok);
- if (Tok.isNot(tok::eom))
- PP.Diag(Tok, diag::ext_stdc_pragma_syntax_eom);
- return Result;
-}
-
-/// PragmaSTDC_FP_CONTRACTHandler - "#pragma STDC FP_CONTRACT ...".
-struct PragmaSTDC_FP_CONTRACTHandler : public PragmaHandler {
- PragmaSTDC_FP_CONTRACTHandler() : PragmaHandler("FP_CONTRACT") {}
- virtual void HandlePragma(Preprocessor &PP, Token &Tok) {
- // We just ignore the setting of FP_CONTRACT. Since we don't do contractions
- // at all, our default is OFF and setting it to ON is an optimization hint
- // we can safely ignore. When we support -ffma or something, we would need
- // to diagnose that we are ignoring FMA.
- LexOnOffSwitch(PP);
- }
-};
-
/// PragmaSTDC_FENV_ACCESSHandler - "#pragma STDC FENV_ACCESS ...".
struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
- virtual void HandlePragma(Preprocessor &PP, Token &Tok) {
- if (LexOnOffSwitch(PP) == STDC_ON)
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ tok::OnOffSwitch OOS;
+ if (PP.LexOnOffSwitch(OOS))
+ return;
+ if (OOS == tok::OOS_ON)
PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
}
};
@@ -957,15 +979,18 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
PragmaSTDC_CX_LIMITED_RANGEHandler()
: PragmaHandler("CX_LIMITED_RANGE") {}
- virtual void HandlePragma(Preprocessor &PP, Token &Tok) {
- LexOnOffSwitch(PP);
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ tok::OnOffSwitch OOS;
+ PP.LexOnOffSwitch(OOS);
}
};
/// PragmaSTDC_UnknownHandler - "#pragma STDC ...".
struct PragmaSTDC_UnknownHandler : public PragmaHandler {
PragmaSTDC_UnknownHandler() {}
- virtual void HandlePragma(Preprocessor &PP, Token &UnknownTok) {
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &UnknownTok) {
// C99 6.10.6p2, unknown forms are not allowed.
PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored);
}
@@ -981,6 +1006,7 @@ void Preprocessor::RegisterBuiltinPragmas() {
AddPragmaHandler(new PragmaMarkHandler());
AddPragmaHandler(new PragmaPushMacroHandler());
AddPragmaHandler(new PragmaPopMacroHandler());
+ AddPragmaHandler(new PragmaMessageHandler());
// #pragma GCC ...
AddPragmaHandler("GCC", new PragmaPoisonHandler());
@@ -994,7 +1020,6 @@ void Preprocessor::RegisterBuiltinPragmas() {
AddPragmaHandler("clang", new PragmaDependencyHandler());
AddPragmaHandler("clang", new PragmaDiagnosticHandler());
- AddPragmaHandler("STDC", new PragmaSTDC_FP_CONTRACTHandler());
AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler());
AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler());
AddPragmaHandler("STDC", new PragmaSTDC_UnknownHandler());
@@ -1002,6 +1027,5 @@ void Preprocessor::RegisterBuiltinPragmas() {
// MS extensions.
if (Features.Microsoft) {
AddPragmaHandler(new PragmaCommentHandler());
- AddPragmaHandler(new PragmaMessageHandler());
}
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
index c446d96..3a43ac1 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
@@ -14,11 +14,29 @@
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Token.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace clang;
ExternalPreprocessingRecordSource::~ExternalPreprocessingRecordSource() { }
+
+InclusionDirective::InclusionDirective(PreprocessingRecord &PPRec,
+ InclusionKind Kind,
+ llvm::StringRef FileName,
+ bool InQuotes, const FileEntry *File,
+ SourceRange Range)
+ : PreprocessingDirective(InclusionDirectiveKind, Range),
+ InQuotes(InQuotes), Kind(Kind), File(File)
+{
+ char *Memory
+ = (char*)PPRec.Allocate(FileName.size() + 1, llvm::alignOf<char>());
+ memcpy(Memory, FileName.data(), FileName.size());
+ Memory[FileName.size()] = 0;
+ this->FileName = llvm::StringRef(Memory, FileName.size());
+}
+
void PreprocessingRecord::MaybeLoadPreallocatedEntities() const {
if (!ExternalSource || LoadedPreallocatedEntities)
return;
@@ -109,17 +127,18 @@ void PreprocessingRecord::MacroExpands(const Token &Id, const MacroInfo* MI) {
Def));
}
-void PreprocessingRecord::MacroDefined(const IdentifierInfo *II,
+void PreprocessingRecord::MacroDefined(const Token &Id,
const MacroInfo *MI) {
SourceRange R(MI->getDefinitionLoc(), MI->getDefinitionEndLoc());
MacroDefinition *Def
- = new (*this) MacroDefinition(II, MI->getDefinitionLoc(), R);
+ = new (*this) MacroDefinition(Id.getIdentifierInfo(),
+ MI->getDefinitionLoc(),
+ R);
MacroDefinitions[MI] = Def;
PreprocessedEntities.push_back(Def);
}
-void PreprocessingRecord::MacroUndefined(SourceLocation Loc,
- const IdentifierInfo *II,
+void PreprocessingRecord::MacroUndefined(const Token &Id,
const MacroInfo *MI) {
llvm::DenseMap<const MacroInfo *, MacroDefinition *>::iterator Pos
= MacroDefinitions.find(MI);
@@ -127,3 +146,38 @@ void PreprocessingRecord::MacroUndefined(SourceLocation Loc,
MacroDefinitions.erase(Pos);
}
+void PreprocessingRecord::InclusionDirective(SourceLocation HashLoc,
+ const clang::Token &IncludeTok,
+ llvm::StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ clang::SourceLocation EndLoc) {
+ InclusionDirective::InclusionKind Kind = InclusionDirective::Include;
+
+ switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_include:
+ Kind = InclusionDirective::Include;
+ break;
+
+ case tok::pp_import:
+ Kind = InclusionDirective::Import;
+ break;
+
+ case tok::pp_include_next:
+ Kind = InclusionDirective::IncludeNext;
+ break;
+
+ case tok::pp___include_macros:
+ Kind = InclusionDirective::IncludeMacros;
+ break;
+
+ default:
+ llvm_unreachable("Unknown include directive kind");
+ return;
+ }
+
+ clang::InclusionDirective *ID
+ = new (*this) clang::InclusionDirective(*this, Kind, FileName, !IsAngled,
+ File, SourceRange(HashLoc, EndLoc));
+ PreprocessedEntities.push_back(ID);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
index 5160acf..6fe414b 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
@@ -53,10 +53,12 @@ Preprocessor::Preprocessor(Diagnostic &diags, const LangOptions &opts,
IdentifierInfoLookup* IILookup,
bool OwnsHeaders)
: Diags(&diags), Features(opts), Target(target),FileMgr(Headers.getFileMgr()),
- SourceMgr(SM), HeaderInfo(Headers), ExternalSource(0),
+ SourceMgr(SM),
+ HeaderInfo(Headers), ExternalSource(0),
Identifiers(opts, IILookup), BuiltinInfo(Target), CodeComplete(0),
CodeCompletionFile(0), SkipMainFilePreamble(0, true), CurPPLexer(0),
- CurDirLookup(0), Callbacks(0), MacroArgCache(0), Record(0) {
+ CurDirLookup(0), Callbacks(0), MacroArgCache(0), Record(0), MIChainHead(0),
+ MICache(0) {
ScratchBuf = new ScratchBuffer(SourceMgr);
CounterValue = 0; // __COUNTER__ starts at 0.
OwnsHeaderSearch = OwnsHeaders;
@@ -106,23 +108,8 @@ Preprocessor::~Preprocessor() {
}
// Free any macro definitions.
- for (llvm::DenseMap<IdentifierInfo*, MacroInfo*>::iterator I =
- Macros.begin(), E = Macros.end(); I != E; ++I) {
- // We don't need to free the MacroInfo objects directly. These
- // will be released when the BumpPtrAllocator 'BP' object gets
- // destroyed. We still need to run the dtor, however, to free
- // memory alocated by MacroInfo.
- I->second->Destroy();
- I->first->setHasMacroDefinition(false);
- }
- for (std::vector<MacroInfo*>::iterator I = MICache.begin(),
- E = MICache.end(); I != E; ++I) {
- // We don't need to free the MacroInfo objects directly. These
- // will be released when the BumpPtrAllocator 'BP' object gets
- // destroyed. We still need to run the dtor, however, to free
- // memory alocated by MacroInfo.
- (*I)->Destroy();
- }
+ for (MacroInfoChain *I = MIChainHead ; I ; I = I->Next)
+ I->MI.Destroy();
// Free any cached macro expanders.
for (unsigned i = 0, e = NumCachedTokenLexers; i != e; ++i)
@@ -291,114 +278,6 @@ void Preprocessor::CodeCompleteNaturalLanguage() {
CodeComplete->CodeCompleteNaturalLanguage();
}
-//===----------------------------------------------------------------------===//
-// Token Spelling
-//===----------------------------------------------------------------------===//
-
-/// getSpelling() - Return the 'spelling' of this token. The spelling of a
-/// token are the characters used to represent the token in the source file
-/// after trigraph expansion and escaped-newline folding. In particular, this
-/// wants to get the true, uncanonicalized, spelling of things like digraphs
-/// UCNs, etc.
-std::string Preprocessor::getSpelling(const Token &Tok,
- const SourceManager &SourceMgr,
- const LangOptions &Features,
- bool *Invalid) {
- assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
-
- // If this token contains nothing interesting, return it directly.
- bool CharDataInvalid = false;
- const char* TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
- &CharDataInvalid);
- if (Invalid)
- *Invalid = CharDataInvalid;
- if (CharDataInvalid)
- return std::string();
-
- if (!Tok.needsCleaning())
- return std::string(TokStart, TokStart+Tok.getLength());
-
- std::string Result;
- Result.reserve(Tok.getLength());
-
- // Otherwise, hard case, relex the characters into the string.
- for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
- Ptr != End; ) {
- unsigned CharSize;
- Result.push_back(Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features));
- Ptr += CharSize;
- }
- assert(Result.size() != unsigned(Tok.getLength()) &&
- "NeedsCleaning flag set on something that didn't need cleaning!");
- return Result;
-}
-
-/// getSpelling() - Return the 'spelling' of this token. The spelling of a
-/// token are the characters used to represent the token in the source file
-/// after trigraph expansion and escaped-newline folding. In particular, this
-/// wants to get the true, uncanonicalized, spelling of things like digraphs
-/// UCNs, etc.
-std::string Preprocessor::getSpelling(const Token &Tok, bool *Invalid) const {
- return getSpelling(Tok, SourceMgr, Features, Invalid);
-}
-
-/// getSpelling - This method is used to get the spelling of a token into a
-/// preallocated buffer, instead of as an std::string. The caller is required
-/// to allocate enough space for the token, which is guaranteed to be at least
-/// Tok.getLength() bytes long. The actual length of the token is returned.
-///
-/// Note that this method may do two possible things: it may either fill in
-/// the buffer specified with characters, or it may *change the input pointer*
-/// to point to a constant buffer with the data already in it (avoiding a
-/// copy). The caller is not allowed to modify the returned buffer pointer
-/// if an internal buffer is returned.
-unsigned Preprocessor::getSpelling(const Token &Tok,
- const char *&Buffer, bool *Invalid) const {
- assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
-
- // If this token is an identifier, just return the string from the identifier
- // table, which is very quick.
- if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
- Buffer = II->getNameStart();
- return II->getLength();
- }
-
- // Otherwise, compute the start of the token in the input lexer buffer.
- const char *TokStart = 0;
-
- if (Tok.isLiteral())
- TokStart = Tok.getLiteralData();
-
- if (TokStart == 0) {
- bool CharDataInvalid = false;
- TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
- if (Invalid)
- *Invalid = CharDataInvalid;
- if (CharDataInvalid) {
- Buffer = "";
- return 0;
- }
- }
-
- // If this token contains nothing interesting, return it directly.
- if (!Tok.needsCleaning()) {
- Buffer = TokStart;
- return Tok.getLength();
- }
-
- // Otherwise, hard case, relex the characters into the string.
- char *OutBuf = const_cast<char*>(Buffer);
- for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
- Ptr != End; ) {
- unsigned CharSize;
- *OutBuf++ = Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features);
- Ptr += CharSize;
- }
- assert(unsigned(OutBuf-Buffer) != Tok.getLength() &&
- "NeedsCleaning flag set on something that didn't need cleaning!");
-
- return OutBuf-Buffer;
-}
/// getSpelling - This method is used to get the spelling of a token into a
/// SmallVector. Note that the returned StringRef may not point to the
@@ -406,9 +285,12 @@ unsigned Preprocessor::getSpelling(const Token &Tok,
llvm::StringRef Preprocessor::getSpelling(const Token &Tok,
llvm::SmallVectorImpl<char> &Buffer,
bool *Invalid) const {
- // Try the fast path.
- if (const IdentifierInfo *II = Tok.getIdentifierInfo())
- return II->getName();
+ // NOTE: this has to be checked *before* testing for an IdentifierInfo.
+ if (Tok.isNot(tok::raw_identifier)) {
+ // Try the fast path.
+ if (const IdentifierInfo *II = Tok.getIdentifierInfo())
+ return II->getName();
+ }
// Resize the buffer if we need to copy into it.
if (Tok.needsCleaning())
@@ -434,71 +316,14 @@ void Preprocessor::CreateString(const char *Buf, unsigned Len, Token &Tok,
InstantiationLoc, Len);
Tok.setLocation(Loc);
- // If this is a literal token, set the pointer data.
- if (Tok.isLiteral())
+ // If this is a raw identifier or a literal token, set the pointer data.
+ if (Tok.is(tok::raw_identifier))
+ Tok.setRawIdentifierData(DestPtr);
+ else if (Tok.isLiteral())
Tok.setLiteralData(DestPtr);
}
-/// AdvanceToTokenCharacter - Given a location that specifies the start of a
-/// token, return a new location that specifies a character within the token.
-SourceLocation Preprocessor::AdvanceToTokenCharacter(SourceLocation TokStart,
- unsigned CharNo) {
- // Figure out how many physical characters away the specified instantiation
- // character is. This needs to take into consideration newlines and
- // trigraphs.
- bool Invalid = false;
- const char *TokPtr = SourceMgr.getCharacterData(TokStart, &Invalid);
-
- // If they request the first char of the token, we're trivially done.
- if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
- return TokStart;
-
- unsigned PhysOffset = 0;
-
- // The usual case is that tokens don't contain anything interesting. Skip
- // over the uninteresting characters. If a token only consists of simple
- // chars, this method is extremely fast.
- while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
- if (CharNo == 0)
- return TokStart.getFileLocWithOffset(PhysOffset);
- ++TokPtr, --CharNo, ++PhysOffset;
- }
-
- // If we have a character that may be a trigraph or escaped newline, use a
- // lexer to parse it correctly.
- for (; CharNo; --CharNo) {
- unsigned Size;
- Lexer::getCharAndSizeNoWarn(TokPtr, Size, Features);
- TokPtr += Size;
- PhysOffset += Size;
- }
-
- // Final detail: if we end up on an escaped newline, we want to return the
- // location of the actual byte of the token. For example foo\<newline>bar
- // advanced by 3 should return the location of b, not of \\. One compounding
- // detail of this is that the escape may be made by a trigraph.
- if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
- PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
-
- return TokStart.getFileLocWithOffset(PhysOffset);
-}
-
-SourceLocation Preprocessor::getLocForEndOfToken(SourceLocation Loc,
- unsigned Offset) {
- if (Loc.isInvalid() || !Loc.isFileID())
- return SourceLocation();
-
- unsigned Len = Lexer::MeasureTokenLength(Loc, getSourceManager(), Features);
- if (Len > Offset)
- Len = Len - Offset;
- else
- return Loc;
-
- return AdvanceToTokenCharacter(Loc, Len);
-}
-
-
//===----------------------------------------------------------------------===//
// Preprocessor Initialization Methods
@@ -549,25 +374,29 @@ void Preprocessor::EndSourceFile() {
// Lexer Event Handling.
//===----------------------------------------------------------------------===//
-/// LookUpIdentifierInfo - Given a tok::identifier token, look up the
-/// identifier information for the token and install it into the token.
-IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier,
- const char *BufPtr) const {
- assert(Identifier.is(tok::identifier) && "Not an identifier!");
- assert(Identifier.getIdentifierInfo() == 0 && "Identinfo already exists!");
+/// LookUpIdentifierInfo - Given a tok::raw_identifier token, look up the
+/// identifier information for the token and install it into the token,
+/// updating the token kind accordingly.
+IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier) const {
+ assert(Identifier.getRawIdentifierData() != 0 && "No raw identifier data!");
// Look up this token, see if it is a macro, or if it is a language keyword.
IdentifierInfo *II;
- if (BufPtr && !Identifier.needsCleaning()) {
+ if (!Identifier.needsCleaning()) {
// No cleaning needed, just use the characters from the lexed buffer.
- II = getIdentifierInfo(llvm::StringRef(BufPtr, Identifier.getLength()));
+ II = getIdentifierInfo(llvm::StringRef(Identifier.getRawIdentifierData(),
+ Identifier.getLength()));
} else {
// Cleaning needed, alloca a buffer, clean into it, then use the buffer.
llvm::SmallString<64> IdentifierBuffer;
llvm::StringRef CleanedStr = getSpelling(Identifier, IdentifierBuffer);
II = getIdentifierInfo(CleanedStr);
}
+
+ // Update the token info (identifier info and appropriate token kind).
Identifier.setIdentifierInfo(II);
+ Identifier.setKind(II->getTokenID());
+
return II;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
index fc6db21..3e9e855 100644
--- a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
@@ -13,6 +13,7 @@
#include "clang/Lex/TokenConcatenation.h"
#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -165,7 +166,14 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
}
switch (PrevKind) {
- default: assert(0 && "InitAvoidConcatTokenInfo built wrong");
+ default:
+ llvm_unreachable("InitAvoidConcatTokenInfo built wrong");
+ return true;
+
+ case tok::raw_identifier:
+ llvm_unreachable("tok::raw_identifier in non-raw lexing mode!");
+ return true;
+
case tok::identifier: // id+id or id+number or id+L"foo".
// id+'.'... will not append.
if (Tok.is(tok::numeric_constant))
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
index 94719b0..ea39b47 100644
--- a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
@@ -287,7 +287,7 @@ void TokenLexer::ExpandFunctionArguments() {
llvm::BumpPtrAllocator &Alloc = PP.getPreprocessorAllocator();
Token *Res =
static_cast<Token *>(Alloc.Allocate(sizeof(Token)*ResultToks.size(),
- llvm::alignof<Token>()));
+ llvm::alignOf<Token>()));
if (NumTokens)
memcpy(Res, &ResultToks[0], NumTokens*sizeof(Token));
Tokens = Res;
@@ -435,12 +435,13 @@ bool TokenLexer::PasteTokens(Token &Tok) {
// Lex the resultant pasted token into Result.
Token Result;
- if (Tok.is(tok::identifier) && RHS.is(tok::identifier)) {
+ if (Tok.isAnyIdentifier() && RHS.isAnyIdentifier()) {
// Common paste case: identifier+identifier = identifier. Avoid creating
// a lexer and other overhead.
PP.IncrementPasteCounter(true);
Result.startToken();
- Result.setKind(tok::identifier);
+ Result.setKind(tok::raw_identifier);
+ Result.setRawIdentifierData(ResultTokStrPtr);
Result.setLocation(ResultTokLoc);
Result.setLength(LHSLen+RHSLen);
} else {
@@ -524,10 +525,10 @@ bool TokenLexer::PasteTokens(Token &Tok) {
// Now that we got the result token, it will be subject to expansion. Since
// token pasting re-lexes the result token in raw mode, identifier information
// isn't looked up. As such, if the result is an identifier, look up id info.
- if (Tok.is(tok::identifier)) {
+ if (Tok.is(tok::raw_identifier)) {
// Look up the identifier info for the token. We disabled identifier lookup
// by saying we're skipping contents, so we need to do this manually.
- PP.LookUpIdentifierInfo(Tok, ResultTokStrPtr);
+ PP.LookUpIdentifierInfo(Tok);
}
return false;
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
index d027879..edb1675 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
@@ -25,26 +25,6 @@
using namespace clang;
-static void DumpRecordLayouts(ASTContext &C) {
- for (ASTContext::type_iterator I = C.types_begin(), E = C.types_end();
- I != E; ++I) {
- const RecordType *RT = dyn_cast<RecordType>(*I);
- if (!RT)
- continue;
-
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD || RD->isImplicit() || RD->isDependentType() ||
- RD->isInvalidDecl() || !RD->getDefinition())
- continue;
-
- // FIXME: Do we really need to hard code this?
- if (RD->getQualifiedNameAsString() == "__va_list_tag")
- continue;
-
- C.DumpRecordLayout(RD, llvm::errs());
- }
-}
-
//===----------------------------------------------------------------------===//
// Public interface to the file
//===----------------------------------------------------------------------===//
@@ -97,10 +77,6 @@ void clang::ParseAST(Sema &S, bool PrintStats) {
E = S.WeakTopLevelDecls().end(); I != E; ++I)
Consumer->HandleTopLevelDecl(DeclGroupRef(*I));
- // Dump record layouts, if requested.
- if (S.getLangOptions().DumpRecordLayouts)
- DumpRecordLayouts(S.getASTContext());
-
Consumer->HandleTranslationUnit(S.getASTContext());
if (PrintStats) {
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
index d327db4..3994738 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -20,10 +20,10 @@ using namespace clang;
/// ParseCXXInlineMethodDef - We parsed and verified that the specified
/// Declarator is a well formed C++ inline method definition. Now lex its body
/// and store its tokens for parsing after the C++ class is complete.
-Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D,
- const ParsedTemplateInfo &TemplateInfo) {
- assert(D.getTypeObject(0).Kind == DeclaratorChunk::Function &&
- "This isn't a function declarator!");
+Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, ParsingDeclarator &D,
+ const ParsedTemplateInfo &TemplateInfo,
+ const VirtSpecifiers& VS) {
+ assert(D.isFunctionDeclarator() && "This isn't a function declarator!");
assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try)) &&
"Current token not a '{', ':' or 'try'!");
@@ -36,19 +36,29 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D,
// FIXME: Friend templates
FnD = Actions.ActOnFriendFunctionDecl(getCurScope(), D, true,
move(TemplateParams));
- else // FIXME: pass template information through
+ else { // FIXME: pass template information through
+ if (VS.isOverrideSpecified())
+ Diag(VS.getOverrideLoc(), diag::ext_override_inline) << "override";
+ if (VS.isFinalSpecified())
+ Diag(VS.getFinalLoc(), diag::ext_override_inline) << "final";
+ if (VS.isNewSpecified())
+ Diag(VS.getNewLoc(), diag::ext_override_inline) << "new";
+
FnD = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS, D,
- move(TemplateParams), 0, 0,
- /*IsDefinition*/true);
+ move(TemplateParams), 0,
+ VS, 0, /*IsDefinition*/true);
+ }
HandleMemberFunctionDefaultArgs(D, FnD);
+ D.complete(FnD);
+
// Consume the tokens and store them for later parsing.
- getCurrentClass().MethodDefs.push_back(LexedMethod(FnD));
- getCurrentClass().MethodDefs.back().TemplateScope
- = getCurScope()->isTemplateParamScope();
- CachedTokens &Toks = getCurrentClass().MethodDefs.back().Toks;
+ LexedMethod* LM = new LexedMethod(this, FnD);
+ getCurrentClass().LateParsedDeclarations.push_back(LM);
+ LM->TemplateScope = getCurScope()->isTemplateParamScope();
+ CachedTokens &Toks = LM->Toks;
tok::TokenKind kind = Tok.getKind();
// We may have a constructor initializer or function-try-block here.
@@ -62,7 +72,8 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D,
// don't try to parse this method later.
Diag(Tok.getLocation(), diag::err_expected_lbrace);
ConsumeAnyToken();
- getCurrentClass().MethodDefs.pop_back();
+ delete getCurrentClass().LateParsedDeclarations.back();
+ getCurrentClass().LateParsedDeclarations.pop_back();
return FnD;
}
}
@@ -86,13 +97,40 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D,
return FnD;
}
+Parser::LateParsedDeclaration::~LateParsedDeclaration() {}
+void Parser::LateParsedDeclaration::ParseLexedMethodDeclarations() {}
+void Parser::LateParsedDeclaration::ParseLexedMethodDefs() {}
+
+Parser::LateParsedClass::LateParsedClass(Parser *P, ParsingClass *C)
+ : Self(P), Class(C) {}
+
+Parser::LateParsedClass::~LateParsedClass() {
+ Self->DeallocateParsedClasses(Class);
+}
+
+void Parser::LateParsedClass::ParseLexedMethodDeclarations() {
+ Self->ParseLexedMethodDeclarations(*Class);
+}
+
+void Parser::LateParsedClass::ParseLexedMethodDefs() {
+ Self->ParseLexedMethodDefs(*Class);
+}
+
+void Parser::LateParsedMethodDeclaration::ParseLexedMethodDeclarations() {
+ Self->ParseLexedMethodDeclaration(*this);
+}
+
+void Parser::LexedMethod::ParseLexedMethodDefs() {
+ Self->ParseLexedMethodDef(*this);
+}
+
/// ParseLexedMethodDeclarations - We finished parsing the member
/// specification of a top (non-nested) C++ class. Now go over the
/// stack of method declarations with some parts for which parsing was
/// delayed (such as default arguments) and parse them.
void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope TemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
if (HasTemplateScope)
Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
@@ -104,75 +142,79 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
if (HasClassScope)
Actions.ActOnStartDelayedMemberDeclarations(getCurScope(), Class.TagOrTemplate);
- for (; !Class.MethodDecls.empty(); Class.MethodDecls.pop_front()) {
- LateParsedMethodDeclaration &LM = Class.MethodDecls.front();
-
- // If this is a member template, introduce the template parameter scope.
- ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
- if (LM.TemplateScope)
- Actions.ActOnReenterTemplateScope(getCurScope(), LM.Method);
-
- // Start the delayed C++ method declaration
- Actions.ActOnStartDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
-
- // Introduce the parameters into scope and parse their default
- // arguments.
- ParseScope PrototypeScope(this,
- Scope::FunctionPrototypeScope|Scope::DeclScope);
- for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
- // Introduce the parameter into scope.
- Actions.ActOnDelayedCXXMethodParameter(getCurScope(), LM.DefaultArgs[I].Param);
+ for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
+ Class.LateParsedDeclarations[i]->ParseLexedMethodDeclarations();
+ }
- if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
- // Save the current token position.
- SourceLocation origLoc = Tok.getLocation();
+ if (HasClassScope)
+ Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(), Class.TagOrTemplate);
+}
- // Parse the default argument from its saved token stream.
- Toks->push_back(Tok); // So that the current token doesn't get lost
- PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);
+void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
+ // If this is a member template, introduce the template parameter scope.
+ ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
+ if (LM.TemplateScope)
+ Actions.ActOnReenterTemplateScope(getCurScope(), LM.Method);
+
+ // Start the delayed C++ method declaration
+ Actions.ActOnStartDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
+
+ // Introduce the parameters into scope and parse their default
+ // arguments.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
+ for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
+ // Introduce the parameter into scope.
+ Actions.ActOnDelayedCXXMethodParameter(getCurScope(), LM.DefaultArgs[I].Param);
+
+ if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
+ // Save the current token position.
+ SourceLocation origLoc = Tok.getLocation();
+
+ // Parse the default argument from its saved token stream.
+ Toks->push_back(Tok); // So that the current token doesn't get lost
+ PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);
+
+ // Consume the previously-pushed token.
+ ConsumeAnyToken();
+
+ // Consume the '='.
+ assert(Tok.is(tok::equal) && "Default argument not starting with '='");
+ SourceLocation EqualLoc = ConsumeToken();
+
+ // The argument isn't actually potentially evaluated unless it is
+ // used.
+ EnterExpressionEvaluationContext Eval(Actions,
+ Sema::PotentiallyEvaluatedIfUsed);
+
+ ExprResult DefArgResult(ParseAssignmentExpression());
+ if (DefArgResult.isInvalid())
+ Actions.ActOnParamDefaultArgumentError(LM.DefaultArgs[I].Param);
+ else {
+ if (Tok.is(tok::cxx_defaultarg_end))
+ ConsumeToken();
+ else
+ Diag(Tok.getLocation(), diag::err_default_arg_unparsed);
+ Actions.ActOnParamDefaultArgument(LM.DefaultArgs[I].Param, EqualLoc,
+ DefArgResult.take());
+ }
- // Consume the previously-pushed token.
+ assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
+ Tok.getLocation()) &&
+ "ParseAssignmentExpression went over the default arg tokens!");
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the original token position.
+ while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
ConsumeAnyToken();
- // Consume the '='.
- assert(Tok.is(tok::equal) && "Default argument not starting with '='");
- SourceLocation EqualLoc = ConsumeToken();
-
- ExprResult DefArgResult(ParseAssignmentExpression());
- if (DefArgResult.isInvalid())
- Actions.ActOnParamDefaultArgumentError(LM.DefaultArgs[I].Param);
- else {
- if (Tok.is(tok::cxx_defaultarg_end))
- ConsumeToken();
- else
- Diag(Tok.getLocation(), diag::err_default_arg_unparsed);
- Actions.ActOnParamDefaultArgument(LM.DefaultArgs[I].Param, EqualLoc,
- DefArgResult.take());
- }
-
- assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
- Tok.getLocation()) &&
- "ParseAssignmentExpression went over the default arg tokens!");
- // There could be leftover tokens (e.g. because of an error).
- // Skip through until we reach the original token position.
- while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
- ConsumeAnyToken();
-
- delete Toks;
- LM.DefaultArgs[I].Toks = 0;
- }
+ delete Toks;
+ LM.DefaultArgs[I].Toks = 0;
}
- PrototypeScope.Exit();
-
- // Finish the delayed C++ method declaration.
- Actions.ActOnFinishDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
}
+ PrototypeScope.Exit();
- for (unsigned I = 0, N = Class.NestedClasses.size(); I != N; ++I)
- ParseLexedMethodDeclarations(*Class.NestedClasses[I]);
-
- if (HasClassScope)
- Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(), Class.TagOrTemplate);
+ // Finish the delayed C++ method declaration.
+ Actions.ActOnFinishDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
}
/// ParseLexedMethodDefs - We finished parsing the member specification of a top
@@ -180,7 +222,7 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
/// collected during its parsing and parse them all.
void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope TemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
if (HasTemplateScope)
Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
@@ -188,73 +230,72 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
HasClassScope);
- for (; !Class.MethodDefs.empty(); Class.MethodDefs.pop_front()) {
- LexedMethod &LM = Class.MethodDefs.front();
-
- // If this is a member template, introduce the template parameter scope.
- ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
- if (LM.TemplateScope)
- Actions.ActOnReenterTemplateScope(getCurScope(), LM.D);
-
- // Save the current token position.
- SourceLocation origLoc = Tok.getLocation();
-
- assert(!LM.Toks.empty() && "Empty body!");
- // Append the current token at the end of the new token stream so that it
- // doesn't get lost.
- LM.Toks.push_back(Tok);
- PP.EnterTokenStream(LM.Toks.data(), LM.Toks.size(), true, false);
-
- // Consume the previously pushed token.
- ConsumeAnyToken();
- assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try))
- && "Inline method not starting with '{', ':' or 'try'");
+ for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
+ Class.LateParsedDeclarations[i]->ParseLexedMethodDefs();
+ }
+}
- // Parse the method body. Function body parsing code is similar enough
- // to be re-used for method bodies as well.
- ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
- Actions.ActOnStartOfFunctionDef(getCurScope(), LM.D);
+void Parser::ParseLexedMethodDef(LexedMethod &LM) {
+ // If this is a member template, introduce the template parameter scope.
+ ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
+ if (LM.TemplateScope)
+ Actions.ActOnReenterTemplateScope(getCurScope(), LM.D);
+
+ // Save the current token position.
+ SourceLocation origLoc = Tok.getLocation();
+
+ assert(!LM.Toks.empty() && "Empty body!");
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LM.Toks.push_back(Tok);
+ PP.EnterTokenStream(LM.Toks.data(), LM.Toks.size(), true, false);
+
+ // Consume the previously pushed token.
+ ConsumeAnyToken();
+ assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try))
+ && "Inline method not starting with '{', ':' or 'try'");
+
+ // Parse the method body. Function body parsing code is similar enough
+ // to be re-used for method bodies as well.
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
+ Actions.ActOnStartOfFunctionDef(getCurScope(), LM.D);
+
+ if (Tok.is(tok::kw_try)) {
+ ParseFunctionTryBlock(LM.D);
+ assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
+ Tok.getLocation()) &&
+ "ParseFunctionTryBlock went over the cached tokens!");
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the original token position.
+ while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ return;
+ }
+ if (Tok.is(tok::colon)) {
+ ParseConstructorInitializer(LM.D);
- if (Tok.is(tok::kw_try)) {
- ParseFunctionTryBlock(LM.D);
- assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
- Tok.getLocation()) &&
- "ParseFunctionTryBlock went over the cached tokens!");
- // There could be leftover tokens (e.g. because of an error).
- // Skip through until we reach the original token position.
+ // Error recovery.
+ if (!Tok.is(tok::l_brace)) {
+ Actions.ActOnFinishFunctionBody(LM.D, 0);
+ return;
+ }
+ } else
+ Actions.ActOnDefaultCtorInitializers(LM.D);
+
+ ParseFunctionStatementBody(LM.D);
+
+ if (Tok.getLocation() != origLoc) {
+ // Due to parsing error, we either went over the cached tokens or
+ // there are still cached tokens left. If it's the latter case skip the
+ // leftover tokens.
+ // Since this is an uncommon situation that should be avoided, use the
+ // expensive isBeforeInTranslationUnit call.
+ if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
+ origLoc))
while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
ConsumeAnyToken();
- continue;
- }
- if (Tok.is(tok::colon)) {
- ParseConstructorInitializer(LM.D);
-
- // Error recovery.
- if (!Tok.is(tok::l_brace)) {
- Actions.ActOnFinishFunctionBody(LM.D, 0);
- continue;
- }
- } else
- Actions.ActOnDefaultCtorInitializers(LM.D);
-
- ParseFunctionStatementBody(LM.D);
-
- if (Tok.getLocation() != origLoc) {
- // Due to parsing error, we either went over the cached tokens or
- // there are still cached tokens left. If it's the latter case skip the
- // leftover tokens.
- // Since this is an uncommon situation that should be avoided, use the
- // expensive isBeforeInTranslationUnit call.
- if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
- origLoc))
- while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
- ConsumeAnyToken();
- }
}
-
- for (unsigned I = 0, N = Class.NestedClasses.size(); I != N; ++I)
- ParseLexedMethodDefs(*Class.NestedClasses[I]);
}
/// ConsumeAndStoreUntil - Consume and store the token at the passed token
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
index 555fcf0..5a7fc7e 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
@@ -29,13 +29,14 @@ using namespace clang;
/// specifier-qualifier-list abstract-declarator[opt]
///
/// Called type-id in C++.
-TypeResult Parser::ParseTypeName(SourceRange *Range) {
+TypeResult Parser::ParseTypeName(SourceRange *Range,
+ Declarator::TheContext Context) {
// Parse the common declaration-specifiers piece.
DeclSpec DS;
ParseSpecifierQualifierList(DS);
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, Context);
ParseDeclarator(DeclaratorInfo);
if (Range)
*Range = DeclaratorInfo.getSourceRange();
@@ -82,21 +83,20 @@ TypeResult Parser::ParseTypeName(SourceRange *Range) {
/// attributes are very simple in practice. Until we find a bug, I don't see
/// a pressing need to implement the 2 token lookahead.
-AttributeList *Parser::ParseGNUAttributes(SourceLocation *EndLoc) {
+void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc) {
assert(Tok.is(tok::kw___attribute) && "Not a GNU attribute list!");
- AttributeList *CurrAttr = 0;
-
while (Tok.is(tok::kw___attribute)) {
ConsumeToken();
if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
"attribute")) {
SkipUntil(tok::r_paren, true); // skip until ) or ;
- return CurrAttr;
+ return;
}
if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after, "(")) {
SkipUntil(tok::r_paren, true); // skip until ) or ;
- return CurrAttr;
+ return;
}
// Parse the attribute-list. e.g. __attribute__(( weak, alias("__f") ))
while (Tok.is(tok::identifier) || isDeclarationSpecifier() ||
@@ -122,8 +122,8 @@ AttributeList *Parser::ParseGNUAttributes(SourceLocation *EndLoc) {
if (Tok.is(tok::r_paren)) {
// __attribute__(( mode(byte) ))
ConsumeParen(); // ignore the right paren loc for now
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0, AttrNameLoc,
- ParmName, ParmLoc, 0, 0, CurrAttr);
+ attrs.add(AttrFactory.Create(AttrName, AttrNameLoc, 0, AttrNameLoc,
+ ParmName, ParmLoc, 0, 0));
} else if (Tok.is(tok::comma)) {
ConsumeToken();
// __attribute__(( format(printf, 1, 2) ))
@@ -146,10 +146,9 @@ AttributeList *Parser::ParseGNUAttributes(SourceLocation *EndLoc) {
}
if (ArgExprsOk && Tok.is(tok::r_paren)) {
ConsumeParen(); // ignore the right paren loc for now
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0,
+ attrs.add(AttrFactory.Create(AttrName, AttrNameLoc, 0,
AttrNameLoc, ParmName, ParmLoc,
- ArgExprs.take(), ArgExprs.size(),
- CurrAttr);
+ ArgExprs.take(), ArgExprs.size()));
}
}
} else { // not an identifier
@@ -158,8 +157,8 @@ AttributeList *Parser::ParseGNUAttributes(SourceLocation *EndLoc) {
// parse a possibly empty comma separated list of expressions
// __attribute__(( nonnull() ))
ConsumeParen(); // ignore the right paren loc for now
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0, AttrNameLoc,
- 0, SourceLocation(), 0, 0, CurrAttr);
+ attrs.add(AttrFactory.Create(AttrName, AttrNameLoc, 0, AttrNameLoc,
+ 0, SourceLocation(), 0, 0));
break;
case tok::kw_char:
case tok::kw_wchar_t:
@@ -174,10 +173,12 @@ AttributeList *Parser::ParseGNUAttributes(SourceLocation *EndLoc) {
case tok::kw_float:
case tok::kw_double:
case tok::kw_void:
- case tok::kw_typeof:
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0, AttrNameLoc,
- 0, SourceLocation(), 0, 0, CurrAttr);
- if (CurrAttr->getKind() == AttributeList::AT_IBOutletCollection)
+ case tok::kw_typeof: {
+ AttributeList *attr
+ = AttrFactory.Create(AttrName, AttrNameLoc, 0, AttrNameLoc,
+ 0, SourceLocation(), 0, 0);
+ attrs.add(attr);
+ if (attr->getKind() == AttributeList::AT_IBOutletCollection)
Diag(Tok, diag::err_iboutletcollection_builtintype);
// If it's a builtin type name, eat it and expect a rparen
// __attribute__(( vec_type_hint(char) ))
@@ -185,6 +186,7 @@ AttributeList *Parser::ParseGNUAttributes(SourceLocation *EndLoc) {
if (Tok.is(tok::r_paren))
ConsumeParen();
break;
+ }
default:
// __attribute__(( aligned(16) ))
ExprVector ArgExprs(Actions);
@@ -207,17 +209,16 @@ AttributeList *Parser::ParseGNUAttributes(SourceLocation *EndLoc) {
// Match the ')'.
if (ArgExprsOk && Tok.is(tok::r_paren)) {
ConsumeParen(); // ignore the right paren loc for now
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0,
- AttrNameLoc, 0, SourceLocation(), ArgExprs.take(),
- ArgExprs.size(),
- CurrAttr);
+ attrs.add(AttrFactory.Create(AttrName, AttrNameLoc, 0,
+ AttrNameLoc, 0, SourceLocation(),
+ ArgExprs.take(), ArgExprs.size()));
}
break;
}
}
} else {
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0, AttrNameLoc,
- 0, SourceLocation(), 0, 0, CurrAttr);
+ attrs.add(AttrFactory.Create(AttrName, AttrNameLoc, 0, AttrNameLoc,
+ 0, SourceLocation(), 0, 0));
}
}
if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
@@ -226,10 +227,9 @@ AttributeList *Parser::ParseGNUAttributes(SourceLocation *EndLoc) {
if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen)) {
SkipUntil(tok::r_paren, false);
}
- if (EndLoc)
- *EndLoc = Loc;
+ if (endLoc)
+ *endLoc = Loc;
}
- return CurrAttr;
}
/// ParseMicrosoftDeclSpec - Parse an __declspec construct
@@ -241,14 +241,14 @@ AttributeList *Parser::ParseGNUAttributes(SourceLocation *EndLoc) {
/// extended-decl-modifier[opt]
/// extended-decl-modifier extended-decl-modifier-seq
-AttributeList* Parser::ParseMicrosoftDeclSpec(AttributeList *CurrAttr) {
+void Parser::ParseMicrosoftDeclSpec(ParsedAttributes &attrs) {
assert(Tok.is(tok::kw___declspec) && "Not a declspec!");
ConsumeToken();
if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
"declspec")) {
SkipUntil(tok::r_paren, true); // skip until ) or ;
- return CurrAttr;
+ return;
}
while (Tok.getIdentifierInfo()) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
@@ -260,23 +260,22 @@ AttributeList* Parser::ParseMicrosoftDeclSpec(AttributeList *CurrAttr) {
ExprResult ArgExpr(ParseAssignmentExpression());
if (!ArgExpr.isInvalid()) {
Expr *ExprList = ArgExpr.take();
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
- SourceLocation(), &ExprList, 1,
- CurrAttr, true);
+ attrs.add(AttrFactory.Create(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), &ExprList, 1, true));
}
if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
SkipUntil(tok::r_paren, false);
} else {
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0, AttrNameLoc,
- 0, SourceLocation(), 0, 0, CurrAttr, true);
+ attrs.add(AttrFactory.Create(AttrName, AttrNameLoc, 0, AttrNameLoc,
+ 0, SourceLocation(), 0, 0, true));
}
}
if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
SkipUntil(tok::r_paren, false);
- return CurrAttr;
+ return;
}
-AttributeList* Parser::ParseMicrosoftTypeAttributes(AttributeList *CurrAttr) {
+void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
// Treat these like attributes
// FIXME: Allow Sema to distinguish between these and real attributes!
while (Tok.is(tok::kw___fastcall) || Tok.is(tok::kw___stdcall) ||
@@ -287,21 +286,34 @@ AttributeList* Parser::ParseMicrosoftTypeAttributes(AttributeList *CurrAttr) {
if (Tok.is(tok::kw___ptr64) || Tok.is(tok::kw___w64))
// FIXME: Support these properly!
continue;
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
- SourceLocation(), 0, 0, CurrAttr, true);
+ attrs.add(AttrFactory.Create(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), 0, 0, true));
}
- return CurrAttr;
}
-AttributeList* Parser::ParseBorlandTypeAttributes(AttributeList *CurrAttr) {
+void Parser::ParseBorlandTypeAttributes(ParsedAttributes &attrs) {
// Treat these like attributes
while (Tok.is(tok::kw___pascal)) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
- CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
- SourceLocation(), 0, 0, CurrAttr, true);
+ attrs.add(AttrFactory.Create(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), 0, 0, true));
+ }
+}
+
+void Parser::ParseOpenCLAttributes(ParsedAttributes &attrs) {
+ // Treat these like attributes
+ while (Tok.is(tok::kw___kernel)) {
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.add(AttrFactory.Create(PP.getIdentifierInfo("opencl_kernel_function"),
+ AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), 0, 0, false));
}
- return CurrAttr;
+}
+
+void Parser::DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs) {
+ Diag(attrs.Range.getBegin(), diag::err_attributes_not_allowed)
+ << attrs.Range;
}
/// ParseDeclaration - Parse a full 'declaration', which consists of
@@ -320,48 +332,43 @@ AttributeList* Parser::ParseBorlandTypeAttributes(AttributeList *CurrAttr) {
/// [C++0x] static_assert-declaration
/// others... [FIXME]
///
-Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
+Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
+ unsigned Context,
SourceLocation &DeclEnd,
- CXX0XAttributeList Attr) {
+ ParsedAttributesWithRange &attrs) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
Decl *SingleDecl = 0;
switch (Tok.getKind()) {
case tok::kw_template:
case tok::kw_export:
- if (Attr.HasAttr)
- Diag(Attr.Range.getBegin(), diag::err_attributes_not_allowed)
- << Attr.Range;
+ ProhibitAttributes(attrs);
SingleDecl = ParseDeclarationStartingWithTemplate(Context, DeclEnd);
break;
case tok::kw_inline:
// Could be the start of an inline namespace. Allowed as an ext in C++03.
if (getLang().CPlusPlus && NextToken().is(tok::kw_namespace)) {
- if (Attr.HasAttr)
- Diag(Attr.Range.getBegin(), diag::err_attributes_not_allowed)
- << Attr.Range;
+ ProhibitAttributes(attrs);
SourceLocation InlineLoc = ConsumeToken();
SingleDecl = ParseNamespace(Context, DeclEnd, InlineLoc);
break;
}
- return ParseSimpleDeclaration(Context, DeclEnd, Attr.AttrList, true);
+ return ParseSimpleDeclaration(Stmts, Context, DeclEnd, attrs,
+ true);
case tok::kw_namespace:
- if (Attr.HasAttr)
- Diag(Attr.Range.getBegin(), diag::err_attributes_not_allowed)
- << Attr.Range;
+ ProhibitAttributes(attrs);
SingleDecl = ParseNamespace(Context, DeclEnd);
break;
case tok::kw_using:
- SingleDecl = ParseUsingDirectiveOrDeclaration(Context, DeclEnd, Attr);
+ SingleDecl = ParseUsingDirectiveOrDeclaration(Context, ParsedTemplateInfo(),
+ DeclEnd, attrs);
break;
case tok::kw_static_assert:
- if (Attr.HasAttr)
- Diag(Attr.Range.getBegin(), diag::err_attributes_not_allowed)
- << Attr.Range;
+ ProhibitAttributes(attrs);
SingleDecl = ParseStaticAssertDeclaration(DeclEnd);
break;
default:
- return ParseSimpleDeclaration(Context, DeclEnd, Attr.AttrList, true);
+ return ParseSimpleDeclaration(Stmts, Context, DeclEnd, attrs, true);
}
// This routine returns a DeclGroup, if the thing we parsed only contains a
@@ -376,16 +383,19 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
///
/// If RequireSemi is false, this does not check for a ';' at the end of the
/// declaration. If it is true, it checks for and eats it.
-Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(unsigned Context,
+Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(StmtVector &Stmts,
+ unsigned Context,
SourceLocation &DeclEnd,
- AttributeList *Attr,
+ ParsedAttributes &attrs,
bool RequireSemi) {
// Parse the common declaration-specifiers piece.
ParsingDeclSpec DS(*this);
- if (Attr)
- DS.AddAttributes(Attr);
+ DS.takeAttributesFrom(attrs);
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS_none,
- getDeclSpecContextFromDeclaratorContext(Context));
+ getDeclSpecContextFromDeclaratorContext(Context));
+ StmtResult R = Actions.ActOnVlaStmt(DS);
+ if (R.isUsable())
+ Stmts.push_back(R.release());
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
// declaration-specifiers init-declarator-list[opt] ';'
@@ -474,11 +484,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// short __attribute__((common)) var; -> declspec
// short var __attribute__((common)); -> declarator
// short x, __attribute__((common)) var; -> declarator
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- D.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(D);
ParseDeclarator(D);
@@ -547,12 +553,7 @@ Decl *Parser::ParseDeclarationAfterDeclarator(Declarator &D,
D.SetRangeEnd(Loc);
}
- // If attributes are present, parse them.
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- D.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(D);
// Inform the current actions module that we just parsed this declarator.
Decl *ThisDecl = 0;
@@ -586,11 +587,19 @@ Decl *Parser::ParseDeclarationAfterDeclarator(Declarator &D,
}
}
+ bool TypeContainsAuto =
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto;
+
// Parse declarator '=' initializer.
- if (Tok.is(tok::equal)) {
+ if (isTokenEqualOrMistypedEqualEqual(
+ diag::err_invalid_equalequal_after_declarator)) {
ConsumeToken();
- if (getLang().CPlusPlus0x && Tok.is(tok::kw_delete)) {
+ if (Tok.is(tok::kw_delete)) {
SourceLocation DelLoc = ConsumeToken();
+
+ if (!getLang().CPlusPlus0x)
+ Diag(DelLoc, diag::warn_deleted_function_accepted_as_extension);
+
Actions.SetDeclDeleted(ThisDecl, DelLoc);
} else {
if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
@@ -616,7 +625,8 @@ Decl *Parser::ParseDeclarationAfterDeclarator(Declarator &D,
SkipUntil(tok::comma, true, true);
Actions.ActOnInitializerError(ThisDecl);
} else
- Actions.AddInitializerToDecl(ThisDecl, Init.take());
+ Actions.AddInitializerToDecl(ThisDecl, Init.take(),
+ /*DirectInit=*/false, TypeContainsAuto);
}
} else if (Tok.is(tok::l_paren)) {
// Parse C++ direct initializer: '(' expression-list ')'
@@ -650,12 +660,11 @@ Decl *Parser::ParseDeclarationAfterDeclarator(Declarator &D,
Actions.AddCXXDirectInitializerToDecl(ThisDecl, LParenLoc,
move_arg(Exprs),
- CommaLocs.data(), RParenLoc);
+ RParenLoc,
+ TypeContainsAuto);
}
} else {
- bool TypeContainsUndeducedAuto =
- D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto;
- Actions.ActOnUninitializedDecl(ThisDecl, TypeContainsUndeducedAuto);
+ Actions.ActOnUninitializedDecl(ThisDecl, TypeContainsAuto);
}
return ThisDecl;
@@ -675,7 +684,7 @@ void Parser::ParseSpecifierQualifierList(DeclSpec &DS) {
// Validate declspec for type-name.
unsigned Specs = DS.getParsedSpecifiers();
if (Specs == DeclSpec::PQ_None && !DS.getNumProtocolQualifiers() &&
- !DS.getAttributes())
+ !DS.hasAttributes())
Diag(Tok, diag::err_typename_requires_specqual);
// Issue diagnostic and remove storage class if present.
@@ -868,6 +877,7 @@ Parser::getDeclSpecContextFromDeclaratorContext(unsigned Context) {
/// [C99] 'inline'
/// [C++] 'virtual'
/// [C++] 'explicit'
+/// [OpenCL] '__kernel'
/// 'friend': [C++ dcl.friend]
/// 'constexpr': [C++0x dcl.constexpr]
@@ -877,6 +887,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
AccessSpecifier AS,
DeclSpecContext DSContext) {
DS.SetRangeStart(Tok.getLocation());
+ DS.SetRangeEnd(Tok.getLocation());
while (1) {
bool isInvalid = false;
const char *PrevSpec = 0;
@@ -905,13 +916,16 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
= DSContext == DSC_top_level ||
(DSContext == DSC_class && DS.isFriendSpecified());
- Actions.CodeCompleteDeclarator(getCurScope(), AllowNonIdentifiers,
- AllowNestedNameSpecifiers);
+ Actions.CodeCompleteDeclSpec(getCurScope(), DS,
+ AllowNonIdentifiers,
+ AllowNestedNameSpecifiers);
ConsumeCodeCompletionToken();
return;
}
- if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate)
+ if (getCurScope()->getFnParent() || getCurScope()->getBlockParent())
+ CCC = Sema::PCC_LocalDeclarationSpecifiers;
+ else if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate)
CCC = DSContext == DSC_class? Sema::PCC_MemberTemplate
: Sema::PCC_Template;
else if (DSContext == DSC_class)
@@ -1005,7 +1019,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ConsumeToken(); // The C++ scope.
if (Tok.getAnnotationValue()) {
ParsedType T = getTypeAnnotation(Tok);
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc,
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
+ Tok.getAnnotationEndLoc(),
PrevSpec, DiagID, T);
}
else
@@ -1080,20 +1095,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
// is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
- // Objective-C interface. If we don't have Objective-C or a '<', this is
- // just a normal reference to a typedef name.
- if (!Tok.is(tok::less) || !getLang().ObjC1)
- continue;
-
- SourceLocation LAngleLoc, EndProtoLoc;
- llvm::SmallVector<Decl *, 8> ProtocolDecl;
- llvm::SmallVector<SourceLocation, 8> ProtocolLocs;
- ParseObjCProtocolReferences(ProtocolDecl, ProtocolLocs, false,
- LAngleLoc, EndProtoLoc);
- DS.setProtocolQualifiers(ProtocolDecl.data(), ProtocolDecl.size(),
- ProtocolLocs.data(), LAngleLoc);
-
- DS.SetRangeEnd(EndProtoLoc);
+ // Objective-C interface.
+ if (Tok.is(tok::less) && getLang().ObjC1)
+ ParseObjCProtocolQualifiers(DS);
+
continue;
}
@@ -1150,21 +1155,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
// is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
- // Objective-C interface. If we don't have Objective-C or a '<', this is
- // just a normal reference to a typedef name.
- if (!Tok.is(tok::less) || !getLang().ObjC1)
- continue;
-
- SourceLocation LAngleLoc, EndProtoLoc;
- llvm::SmallVector<Decl *, 8> ProtocolDecl;
- llvm::SmallVector<SourceLocation, 8> ProtocolLocs;
- ParseObjCProtocolReferences(ProtocolDecl, ProtocolLocs, false,
- LAngleLoc, EndProtoLoc);
- DS.setProtocolQualifiers(ProtocolDecl.data(), ProtocolDecl.size(),
- ProtocolLocs.data(), LAngleLoc);
-
- DS.SetRangeEnd(EndProtoLoc);
-
+ // Objective-C interface.
+ if (Tok.is(tok::less) && getLang().ObjC1)
+ ParseObjCProtocolQualifiers(DS);
+
// Need to support trailing type qualifiers (e.g. "id<p> const").
// If a type specifier follows, it will be diagnosed elsewhere.
continue;
@@ -1196,12 +1190,12 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// GNU attributes support.
case tok::kw___attribute:
- DS.AddAttributes(ParseGNUAttributes());
+ ParseGNUAttributes(DS.getAttributes());
continue;
// Microsoft declspec support.
case tok::kw___declspec:
- DS.AddAttributes(ParseMicrosoftDeclSpec());
+ ParseMicrosoftDeclSpec(DS.getAttributes());
continue;
// Microsoft single token adornments.
@@ -1215,34 +1209,39 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
- DS.AddAttributes(ParseMicrosoftTypeAttributes());
+ ParseMicrosoftTypeAttributes(DS.getAttributes());
continue;
// Borland single token adornments.
case tok::kw___pascal:
- DS.AddAttributes(ParseBorlandTypeAttributes());
+ ParseBorlandTypeAttributes(DS.getAttributes());
+ continue;
+
+ // OpenCL single token adornments.
+ case tok::kw___kernel:
+ ParseOpenCLAttributes(DS.getAttributes());
continue;
// storage-class-specifier
case tok::kw_typedef:
isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_typedef, Loc, PrevSpec,
- DiagID);
+ DiagID, getLang());
break;
case tok::kw_extern:
if (DS.isThreadSpecified())
Diag(Tok, diag::ext_thread_before) << "extern";
isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_extern, Loc, PrevSpec,
- DiagID);
+ DiagID, getLang());
break;
case tok::kw___private_extern__:
isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_private_extern, Loc,
- PrevSpec, DiagID);
+ PrevSpec, DiagID, getLang());
break;
case tok::kw_static:
if (DS.isThreadSpecified())
Diag(Tok, diag::ext_thread_before) << "static";
isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_static, Loc, PrevSpec,
- DiagID);
+ DiagID, getLang());
break;
case tok::kw_auto:
if (getLang().CPlusPlus0x)
@@ -1250,15 +1249,15 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DiagID);
else
isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_auto, Loc, PrevSpec,
- DiagID);
+ DiagID, getLang());
break;
case tok::kw_register:
isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_register, Loc, PrevSpec,
- DiagID);
+ DiagID, getLang());
break;
case tok::kw_mutable:
isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_mutable, Loc, PrevSpec,
- DiagID);
+ DiagID, getLang());
break;
case tok::kw___thread:
isInvalid = DS.SetStorageClassSpecThread(Loc, PrevSpec, DiagID);
@@ -1354,8 +1353,16 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
break;
case tok::kw_bool:
case tok::kw__Bool:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec,
- DiagID);
+ if (Tok.is(tok::kw_bool) &&
+ DS.getTypeSpecType() != DeclSpec::TST_unspecified &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ PrevSpec = ""; // Not used by the diagnostic.
+ DiagID = diag::err_bool_redeclaration;
+ isInvalid = true;
+ } else {
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec,
+ DiagID);
+ }
break;
case tok::kw__Decimal32:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal32, Loc, PrevSpec,
@@ -1432,23 +1439,14 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (DS.hasTypeSpecifier() || !getLang().ObjC1)
goto DoneWithDeclSpec;
- {
- SourceLocation LAngleLoc, EndProtoLoc;
- llvm::SmallVector<Decl *, 8> ProtocolDecl;
- llvm::SmallVector<SourceLocation, 8> ProtocolLocs;
- ParseObjCProtocolReferences(ProtocolDecl, ProtocolLocs, false,
- LAngleLoc, EndProtoLoc);
- DS.setProtocolQualifiers(ProtocolDecl.data(), ProtocolDecl.size(),
- ProtocolLocs.data(), LAngleLoc);
- DS.SetRangeEnd(EndProtoLoc);
-
+ if (!ParseObjCProtocolQualifiers(DS))
Diag(Loc, diag::warn_objc_protocol_qualifier_missing_id)
<< FixItHint::CreateInsertion(Loc, "id")
- << SourceRange(Loc, EndProtoLoc);
- // Need to support trailing type qualifiers (e.g. "id<p> const").
- // If a type specifier follows, it will be diagnosed elsewhere.
- continue;
- }
+ << SourceRange(Loc, DS.getSourceRange().getEnd());
+
+ // Need to support trailing type qualifiers (e.g. "id<p> const").
+ // If a type specifier follows, it will be diagnosed elsewhere.
+ continue;
}
// If the specifier wasn't legal, issue a diagnostic.
if (isInvalid) {
@@ -1552,7 +1550,8 @@ bool Parser::ParseOptionalTypeSpecifier(DeclSpec &DS, bool& isInvalid,
// simple-type-specifier:
case tok::annot_typename: {
if (ParsedType T = getTypeAnnotation(Tok)) {
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
+ Tok.getAnnotationEndLoc(), PrevSpec,
DiagID, T);
} else
DS.SetTypeSpecError();
@@ -1563,18 +1562,9 @@ bool Parser::ParseOptionalTypeSpecifier(DeclSpec &DS, bool& isInvalid,
// is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
// Objective-C interface. If we don't have Objective-C or a '<', this is
// just a normal reference to a typedef name.
- if (!Tok.is(tok::less) || !getLang().ObjC1)
- return true;
-
- SourceLocation LAngleLoc, EndProtoLoc;
- llvm::SmallVector<Decl *, 8> ProtocolDecl;
- llvm::SmallVector<SourceLocation, 8> ProtocolLocs;
- ParseObjCProtocolReferences(ProtocolDecl, ProtocolLocs, false,
- LAngleLoc, EndProtoLoc);
- DS.setProtocolQualifiers(ProtocolDecl.data(), ProtocolDecl.size(),
- ProtocolLocs.data(), LAngleLoc);
-
- DS.SetRangeEnd(EndProtoLoc);
+ if (Tok.is(tok::less) && getLang().ObjC1)
+ ParseObjCProtocolQualifiers(DS);
+
return true;
}
@@ -1706,11 +1696,11 @@ bool Parser::ParseOptionalTypeSpecifier(DeclSpec &DS, bool& isInvalid,
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
- DS.AddAttributes(ParseMicrosoftTypeAttributes());
+ ParseMicrosoftTypeAttributes(DS.getAttributes());
return true;
case tok::kw___pascal:
- DS.AddAttributes(ParseBorlandTypeAttributes());
+ ParseBorlandTypeAttributes(DS.getAttributes());
return true;
default:
@@ -1756,7 +1746,6 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
}
// Parse the common specifier-qualifiers-list piece.
- SourceLocation DSStart = Tok.getLocation();
ParseSpecifierQualifierList(DS);
// If there are no declarators, this is a free-standing declaration
@@ -1773,11 +1762,8 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
FieldDeclarator DeclaratorInfo(DS);
// Attributes are only allowed here on successive declarators.
- if (!FirstDeclarator && Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- DeclaratorInfo.D.AddAttributes(AttrList, Loc);
- }
+ if (!FirstDeclarator)
+ MaybeParseGNUAttributes(DeclaratorInfo.D);
/// struct-declarator: declarator
/// struct-declarator: declarator[opt] ':' constant-expression
@@ -1797,11 +1783,7 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
}
// If attributes exist after the declarator, parse them.
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- DeclaratorInfo.D.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(DeclaratorInfo.D);
// We're done with this declarator; invoke the callback.
Decl *D = Fields.invoke(DeclaratorInfo);
@@ -1922,20 +1904,18 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc);
- llvm::OwningPtr<AttributeList> AttrList;
+ ParsedAttributes attrs;
// If attributes exist after struct contents, parse them.
- if (Tok.is(tok::kw___attribute))
- AttrList.reset(ParseGNUAttributes());
+ MaybeParseGNUAttributes(attrs);
Actions.ActOnFields(getCurScope(),
RecordLoc, TagDecl, FieldDecls.data(), FieldDecls.size(),
LBraceLoc, RBraceLoc,
- AttrList.get());
+ attrs.getList());
StructScope.Exit();
Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl, RBraceLoc);
}
-
/// ParseEnumSpecifier
/// enum-specifier: [C99 6.7.2.2]
/// 'enum' identifier[opt] '{' enumerator-list '}'
@@ -1945,6 +1925,21 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
/// 'enum' identifier
/// [GNU] 'enum' attributes[opt] identifier
///
+/// [C++0x] enum-head '{' enumerator-list[opt] '}'
+/// [C++0x] enum-head '{' enumerator-list ',' '}'
+///
+/// enum-head: [C++0x]
+/// enum-key attributes[opt] identifier[opt] enum-base[opt]
+/// enum-key attributes[opt] nested-name-specifier identifier enum-base[opt]
+///
+/// enum-key: [C++0x]
+/// 'enum'
+/// 'enum' 'class'
+/// 'enum' 'struct'
+///
+/// enum-base: [C++0x]
+/// ':' type-specifier-seq
+///
/// [C++] elaborated-type-specifier:
/// [C++] 'enum' '::'[opt] nested-name-specifier[opt] identifier
///
@@ -1958,10 +1953,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
ConsumeCodeCompletionToken();
}
- llvm::OwningPtr<AttributeList> Attr;
// If attributes exist after tag, parse them.
- if (Tok.is(tok::kw___attribute))
- Attr.reset(ParseGNUAttributes());
+ ParsedAttributes attrs;
+ MaybeParseGNUAttributes(attrs);
CXXScopeSpec &SS = DS.getTypeSpecScope();
if (getLang().CPlusPlus) {
@@ -1979,6 +1973,16 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
}
}
+ bool IsScopedEnum = false;
+ bool IsScopedUsingClassTag = false;
+
+ if (getLang().CPlusPlus0x &&
+ (Tok.is(tok::kw_class) || Tok.is(tok::kw_struct))) {
+ IsScopedEnum = true;
+ IsScopedUsingClassTag = Tok.is(tok::kw_class);
+ ConsumeToken();
+ }
+
// Must have either 'enum name' or 'enum {...}'.
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace)) {
Diag(Tok, diag::err_expected_ident_lbrace);
@@ -1996,6 +2000,69 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
NameLoc = ConsumeToken();
}
+ if (!Name && IsScopedEnum) {
+ // C++0x 7.2p2: The optional identifier shall not be omitted in the
+ // declaration of a scoped enumeration.
+ Diag(Tok, diag::err_scoped_enum_missing_identifier);
+ IsScopedEnum = false;
+ IsScopedUsingClassTag = false;
+ }
+
+ TypeResult BaseType;
+
+ // Parse the fixed underlying type.
+ if (getLang().CPlusPlus0x && Tok.is(tok::colon)) {
+ bool PossibleBitfield = false;
+ if (getCurScope()->getFlags() & Scope::ClassScope) {
+ // If we're in class scope, this can either be an enum declaration with
+ // an underlying type, or a declaration of a bitfield member. We try to
+ // use a simple disambiguation scheme first to catch the common cases
+ // (integer literal, sizeof); if it's still ambiguous, we then consider
+ // anything that's a simple-type-specifier followed by '(' as an
+ // expression. This suffices because function types are not valid
+ // underlying types anyway.
+ TPResult TPR = isExpressionOrTypeSpecifierSimple(NextToken().getKind());
+ // If the next token starts an expression, we know we're parsing a
+ // bit-field. This is the common case.
+ if (TPR == TPResult::True())
+ PossibleBitfield = true;
+ // If the next token starts a type-specifier-seq, it may be either a
+ // a fixed underlying type or the start of a function-style cast in C++;
+ // lookahead one more token to see if it's obvious that we have a
+ // fixed underlying type.
+ else if (TPR == TPResult::False() &&
+ GetLookAheadToken(2).getKind() == tok::semi) {
+ // Consume the ':'.
+ ConsumeToken();
+ } else {
+ // We have the start of a type-specifier-seq, so we have to perform
+ // tentative parsing to determine whether we have an expression or a
+ // type.
+ TentativeParsingAction TPA(*this);
+
+ // Consume the ':'.
+ ConsumeToken();
+
+ if (isCXXDeclarationSpecifier() != TPResult::True()) {
+ // We'll parse this as a bitfield later.
+ PossibleBitfield = true;
+ TPA.Revert();
+ } else {
+ // We have a type-specifier-seq.
+ TPA.Commit();
+ }
+ }
+ } else {
+ // Consume the ':'.
+ ConsumeToken();
+ }
+
+ if (!PossibleBitfield) {
+ SourceRange Range;
+ BaseType = ParseTypeName(&Range);
+ }
+ }
+
// There are three options here. If we have 'enum foo;', then this is a
// forward declaration. If we have 'enum foo {...' then this is a
// definition. Otherwise we have something like 'enum foo xyz', a reference.
@@ -2029,10 +2096,12 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
const char *PrevSpec = 0;
unsigned DiagID;
Decl *TagDecl = Actions.ActOnTag(getCurScope(), DeclSpec::TST_enum, TUK,
- StartLoc, SS, Name, NameLoc, Attr.get(),
+ StartLoc, SS, Name, NameLoc, attrs.getList(),
AS,
MultiTemplateParamsArg(Actions),
- Owned, IsDependent);
+ Owned, IsDependent, IsScopedEnum,
+ IsScopedUsingClassTag, BaseType);
+
if (IsDependent) {
// This enum has a dependent nested-name-specifier. Handle it as a
// dependent tag.
@@ -2109,6 +2178,10 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
IdentifierInfo *Ident = Tok.getIdentifierInfo();
SourceLocation IdentLoc = ConsumeToken();
+ // If attributes exist after the enumerator, parse them.
+ ParsedAttributes attrs;
+ MaybeParseGNUAttributes(attrs);
+
SourceLocation EqualLoc;
ExprResult AssignedVal;
if (Tok.is(tok::equal)) {
@@ -2122,11 +2195,19 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
Decl *EnumConstDecl = Actions.ActOnEnumConstant(getCurScope(), EnumDecl,
LastEnumConstDecl,
IdentLoc, Ident,
- EqualLoc,
+ attrs.getList(), EqualLoc,
AssignedVal.release());
EnumConstantDecls.push_back(EnumConstDecl);
LastEnumConstDecl = EnumConstDecl;
+ if (Tok.is(tok::identifier)) {
+ // We're missing a comma between enumerators.
+ SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(Loc, diag::err_enumerator_list_missing_comma)
+ << FixItHint::CreateInsertion(Loc, ", ");
+ continue;
+ }
+
if (Tok.isNot(tok::comma))
break;
SourceLocation CommaLoc = ConsumeToken();
@@ -2141,14 +2222,13 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
// Eat the }.
SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc);
- llvm::OwningPtr<AttributeList> Attr;
// If attributes exist after the identifier list, parse them.
- if (Tok.is(tok::kw___attribute))
- Attr.reset(ParseGNUAttributes()); // FIXME: where do they do?
+ ParsedAttributes attrs;
+ MaybeParseGNUAttributes(attrs);
Actions.ActOnEnumBody(StartLoc, LBraceLoc, RBraceLoc, EnumDecl,
EnumConstantDecls.data(), EnumConstantDecls.size(),
- getCurScope(), Attr.get());
+ getCurScope(), attrs.getList());
EnumScope.Exit();
Actions.ActOnTagFinishDefinition(getCurScope(), EnumDecl, RBraceLoc);
@@ -2296,7 +2376,10 @@ bool Parser::isTypeSpecifierQualifier() {
/// isDeclarationSpecifier() - Return true if the current token is part of a
/// declaration specifier.
-bool Parser::isDeclarationSpecifier() {
+///
+/// \param DisambiguatingWithExpression True to indicate that the purpose of
+/// this check is to disambiguate between an expression and a declaration.
+bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
switch (Tok.getKind()) {
default: return false;
@@ -2314,6 +2397,16 @@ bool Parser::isDeclarationSpecifier() {
return true;
if (Tok.is(tok::identifier))
return false;
+
+ // If we're in Objective-C and we have an Objective-C class type followed
+ // by an identifier and then either ':' or ']', in a place where an
+ // expression is permitted, then this is probably a class message send
+ // missing the initial '['. In this case, we won't consider this to be
+ // the start of a declaration.
+ if (DisambiguatingWithExpression &&
+ isStartOfObjCClassMessageMissingOpenBracket())
+ return false;
+
return isDeclarationSpecifier();
case tok::coloncolon: // ::foo::bar
@@ -2441,6 +2534,10 @@ bool Parser::isConstructorDeclarator() {
if (SS.isSet() && Actions.ShouldEnterDeclaratorScope(getCurScope(), SS))
DeclScopeObj.EnterDeclaratorScope();
+ // Optionally skip Microsoft attributes.
+ ParsedAttributes Attrs;
+ MaybeParseMicrosoftAttributes(Attrs);
+
// Check whether the next token(s) are part of a declaration
// specifier, in which case we have the start of a parameter and,
// therefore, we know that this is a constructor.
@@ -2466,9 +2563,10 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
bool CXX0XAttributesAllowed) {
if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
SourceLocation Loc = Tok.getLocation();
- CXX0XAttributeList Attr = ParseCXX0XAttributes();
+ ParsedAttributesWithRange attrs;
+ ParseCXX0XAttributes(attrs);
if (CXX0XAttributesAllowed)
- DS.AddAttributes(Attr.AttrList);
+ DS.takeAttributesFrom(attrs);
else
Diag(Loc, diag::err_attributes_not_allowed);
}
@@ -2504,19 +2602,19 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
case tok::kw___fastcall:
case tok::kw___thiscall:
if (VendorAttributesAllowed) {
- DS.AddAttributes(ParseMicrosoftTypeAttributes());
+ ParseMicrosoftTypeAttributes(DS.getAttributes());
continue;
}
goto DoneWithTypeQuals;
case tok::kw___pascal:
if (VendorAttributesAllowed) {
- DS.AddAttributes(ParseBorlandTypeAttributes());
+ ParseBorlandTypeAttributes(DS.getAttributes());
continue;
}
goto DoneWithTypeQuals;
case tok::kw___attribute:
if (VendorAttributesAllowed) {
- DS.AddAttributes(ParseGNUAttributes());
+ ParseGNUAttributes(DS.getAttributes());
continue; // do *not* consume the next token!
}
// otherwise, FALL THROUGH!
@@ -2602,7 +2700,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Sema will have to catch (syntactically invalid) pointers into global
// scope. It has to catch pointers into namespace scope anyway.
D.AddTypeInfo(DeclaratorChunk::getMemberPointer(SS,DS.getTypeQualifiers(),
- Loc, DS.TakeAttributes()),
+ Loc, DS.takeAttributes()),
/* Don't replace range end. */SourceLocation());
return;
}
@@ -2636,12 +2734,12 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
if (Kind == tok::star)
// Remember that we parsed a pointer type, and remember the type-quals.
D.AddTypeInfo(DeclaratorChunk::getPointer(DS.getTypeQualifiers(), Loc,
- DS.TakeAttributes()),
+ DS.takeAttributes()),
SourceLocation());
else
// Remember that we parsed a Block type, and remember the type-quals.
D.AddTypeInfo(DeclaratorChunk::getBlockPointer(DS.getTypeQualifiers(),
- Loc, DS.TakeAttributes()),
+ Loc, DS.takeAttributes()),
SourceLocation());
} else {
// Is a reference
@@ -2650,7 +2748,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Complain about rvalue references in C++03, but then go on and build
// the declarator.
if (Kind == tok::ampamp && !getLang().CPlusPlus0x)
- Diag(Loc, diag::err_rvalue_reference);
+ Diag(Loc, diag::ext_rvalue_reference);
// C++ 8.3.2p1: cv-qualified references are ill-formed except when the
// cv-qualifiers are introduced through the use of a typedef or of a
@@ -2693,7 +2791,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Remember that we parsed a reference type. It doesn't have type-quals.
D.AddTypeInfo(DeclaratorChunk::getReference(DS.getTypeQualifiers(), Loc,
- DS.TakeAttributes(),
+ DS.takeAttributes(),
Kind == tok::amp),
SourceLocation());
}
@@ -2718,7 +2816,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
/// [C++] declarator-id
///
/// declarator-id: [C++ 8]
-/// id-expression
+/// '...'[opt] id-expression
/// '::'[opt] nested-name-specifier[opt] type-name
///
/// id-expression: [C++ 5.1]
@@ -2748,6 +2846,20 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
DeclScopeObj.EnterDeclaratorScope();
}
+ // C++0x [dcl.fct]p14:
+ // There is a syntactic ambiguity when an ellipsis occurs at the end
+ // of a parameter-declaration-clause without a preceding comma. In
+ // this case, the ellipsis is parsed as part of the
+ // abstract-declarator if the type of the parameter names a template
+ // parameter pack that has not been expanded; otherwise, it is parsed
+ // as part of the parameter-declaration-clause.
+ if (Tok.is(tok::ellipsis) &&
+ !((D.getContext() == Declarator::PrototypeContext ||
+ D.getContext() == Declarator::BlockLiteralContext) &&
+ NextToken().is(tok::r_paren) &&
+ !Actions.containsUnexpandedParameterPacks(D)))
+ D.setEllipsisLoc(ConsumeToken());
+
if (Tok.is(tok::identifier) || Tok.is(tok::kw_operator) ||
Tok.is(tok::annot_template_id) || Tok.is(tok::tilde)) {
// We found something that indicates the start of an unqualified-id.
@@ -2830,12 +2942,8 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
"Haven't past the location of the identifier yet?");
// Don't parse attributes unless we have an identifier.
- if (D.getIdentifier() && getLang().CPlusPlus0x
- && isCXX0XAttributeSpecifier(true)) {
- SourceLocation AttrEndLoc;
- CXX0XAttributeList Attr = ParseCXX0XAttributes();
- D.AddAttributes(Attr.AttrList, AttrEndLoc);
- }
+ if (D.getIdentifier())
+ MaybeParseCXX0XAttributes(D);
while (1) {
if (Tok.is(tok::l_paren)) {
@@ -2849,7 +2957,8 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
if (!isCXXFunctionDeclarator(warnIfAmbiguous))
break;
}
- ParseFunctionDeclarator(ConsumeParen(), D);
+ ParsedAttributes attrs;
+ ParseFunctionDeclarator(ConsumeParen(), D, attrs);
} else if (Tok.is(tok::l_square)) {
ParseBracketDeclarator(D);
} else {
@@ -2885,10 +2994,10 @@ void Parser::ParseParenDeclarator(Declarator &D) {
// In either case, we need to eat any attributes to be able to determine what
// sort of paren this is.
//
- llvm::OwningPtr<AttributeList> AttrList;
+ ParsedAttributes attrs;
bool RequiresArg = false;
if (Tok.is(tok::kw___attribute)) {
- AttrList.reset(ParseGNUAttributes());
+ ParseGNUAttributes(attrs);
// We require that the argument list (if this is a non-grouping paren) be
// present even if the attribute list was empty.
@@ -2898,12 +3007,11 @@ void Parser::ParseParenDeclarator(Declarator &D) {
if (Tok.is(tok::kw___cdecl) || Tok.is(tok::kw___stdcall) ||
Tok.is(tok::kw___thiscall) || Tok.is(tok::kw___fastcall) ||
Tok.is(tok::kw___w64) || Tok.is(tok::kw___ptr64)) {
- AttrList.reset(ParseMicrosoftTypeAttributes(AttrList.take()));
+ ParseMicrosoftTypeAttributes(attrs);
}
// Eat any Borland extensions.
- if (Tok.is(tok::kw___pascal)) {
- AttrList.reset(ParseBorlandTypeAttributes(AttrList.take()));
- }
+ if (Tok.is(tok::kw___pascal))
+ ParseBorlandTypeAttributes(attrs);
// If we haven't past the identifier yet (or where the identifier would be
// stored, if this is an abstract declarator), then this is probably just
@@ -2932,15 +3040,15 @@ void Parser::ParseParenDeclarator(Declarator &D) {
if (isGrouping) {
bool hadGroupingParens = D.hasGroupingParens();
D.setGroupingParens(true);
- if (AttrList)
- D.AddAttributes(AttrList.take(), SourceLocation());
+ if (!attrs.empty())
+ D.addAttributes(attrs.getList(), SourceLocation());
ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
// Match the ')'.
- SourceLocation Loc = MatchRHSPunctuation(tok::r_paren, StartLoc);
+ SourceLocation EndLoc = MatchRHSPunctuation(tok::r_paren, StartLoc);
+ D.AddTypeInfo(DeclaratorChunk::getParen(StartLoc, EndLoc), EndLoc);
D.setGroupingParens(hadGroupingParens);
- D.SetRangeEnd(Loc);
return;
}
@@ -2950,7 +3058,7 @@ void Parser::ParseParenDeclarator(Declarator &D) {
// ParseFunctionDeclarator to handle of argument list.
D.SetIdentifier(0, Tok.getLocation());
- ParseFunctionDeclarator(StartLoc, D, AttrList.take(), RequiresArg);
+ ParseFunctionDeclarator(StartLoc, D, attrs, RequiresArg);
}
/// ParseFunctionDeclarator - We are after the identifier and have parsed the
@@ -2982,37 +3090,51 @@ void Parser::ParseParenDeclarator(Declarator &D) {
/// '=' assignment-expression
/// [GNU] declaration-specifiers abstract-declarator[opt] attributes
///
-/// For C++, after the parameter-list, it also parses "cv-qualifier-seq[opt]"
-/// and "exception-specification[opt]".
+/// For C++, after the parameter-list, it also parses "cv-qualifier-seq[opt]",
+/// C++0x "ref-qualifier[opt]" and "exception-specification[opt]".
///
void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
- AttributeList *AttrList,
+ ParsedAttributes &attrs,
bool RequiresArg) {
// lparen is already consumed!
assert(D.isPastIdentifier() && "Should not call before identifier!");
+ ParsedType TrailingReturnType;
+
// This parameter list may be empty.
if (Tok.is(tok::r_paren)) {
- if (RequiresArg) {
+ if (RequiresArg)
Diag(Tok, diag::err_argument_required_after_attribute);
- delete AttrList;
- }
SourceLocation RParenLoc = ConsumeParen(); // Eat the closing ')'.
SourceLocation EndLoc = RParenLoc;
// cv-qualifier-seq[opt].
DeclSpec DS;
+ SourceLocation RefQualifierLoc;
+ bool RefQualifierIsLValueRef = true;
bool hasExceptionSpec = false;
SourceLocation ThrowLoc;
bool hasAnyExceptionSpec = false;
llvm::SmallVector<ParsedType, 2> Exceptions;
llvm::SmallVector<SourceRange, 2> ExceptionRanges;
if (getLang().CPlusPlus) {
+ MaybeParseCXX0XAttributes(attrs);
+
ParseTypeQualifierListOpt(DS, false /*no attributes*/);
if (!DS.getSourceRange().getEnd().isInvalid())
EndLoc = DS.getSourceRange().getEnd();
+ // Parse ref-qualifier[opt]
+ if (Tok.is(tok::amp) || Tok.is(tok::ampamp)) {
+ if (!getLang().CPlusPlus0x)
+ Diag(Tok, diag::ext_ref_qualifier);
+
+ RefQualifierIsLValueRef = Tok.is(tok::amp);
+ RefQualifierLoc = ConsumeToken();
+ EndLoc = RefQualifierLoc;
+ }
+
// Parse exception-specification[opt].
if (Tok.is(tok::kw_throw)) {
hasExceptionSpec = true;
@@ -3022,21 +3144,30 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
assert(Exceptions.size() == ExceptionRanges.size() &&
"Produced different number of exception types and ranges.");
}
+
+ // Parse trailing-return-type.
+ if (getLang().CPlusPlus0x && Tok.is(tok::arrow)) {
+ TrailingReturnType = ParseTrailingReturnType().get();
+ }
}
// Remember that we parsed a function type, and remember the attributes.
// int() -> no prototype, no '...'.
- D.AddTypeInfo(DeclaratorChunk::getFunction(/*prototype*/getLang().CPlusPlus,
+ D.AddTypeInfo(DeclaratorChunk::getFunction(attrs,
+ /*prototype*/getLang().CPlusPlus,
/*variadic*/ false,
SourceLocation(),
/*arglist*/ 0, 0,
DS.getTypeQualifiers(),
+ RefQualifierIsLValueRef,
+ RefQualifierLoc,
hasExceptionSpec, ThrowLoc,
hasAnyExceptionSpec,
Exceptions.data(),
ExceptionRanges.data(),
Exceptions.size(),
- LParenLoc, RParenLoc, D),
+ LParenLoc, RParenLoc, D,
+ TrailingReturnType),
EndLoc);
return;
}
@@ -3048,10 +3179,8 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
if (TryAnnotateTypeOrScopeToken() || !Tok.is(tok::annot_typename)) {
// K&R identifier lists can't have typedefs as identifiers, per
// C99 6.7.5.3p11.
- if (RequiresArg) {
+ if (RequiresArg)
Diag(Tok, diag::err_argument_required_after_attribute);
- delete AttrList;
- }
// Identifier list. Note that '(' identifier-list ')' is only allowed for
// normal declarators, not for abstract-declarators. Get the first
@@ -3101,17 +3230,20 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
break;
}
- SourceLocation DSStart = Tok.getLocation();
-
// Parse the declaration-specifiers.
// Just use the ParsingDeclaration "scope" of the declarator.
DeclSpec DS;
+
+ // Skip any Microsoft attributes before a param.
+ if (getLang().Microsoft && Tok.is(tok::l_square))
+ ParseMicrosoftAttributes(DS.getAttributes());
+
+ SourceLocation DSStart = Tok.getLocation();
// If the caller parsed attributes for the first argument, add them now.
- if (AttrList) {
- DS.AddAttributes(AttrList);
- AttrList = 0; // Only apply the attributes to the first parameter.
- }
+ // Take them so that we only apply the attributes to the first parameter.
+ DS.takeAttributesFrom(attrs);
+
ParseDeclarationSpecifiers(DS);
// Parse the declarator. This is "PrototypeContext", because we must
@@ -3120,11 +3252,7 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
ParseDeclarator(ParmDecl);
// Parse GNU attributes, if present.
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- ParmDecl.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(ParmDecl);
// Remember this parsed parameter in ParamInfo.
IdentifierInfo *ParmII = ParmDecl.getIdentifier();
@@ -3184,6 +3312,11 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
// Consume the '='.
ConsumeToken();
+ // The argument isn't actually potentially evaluated unless it is
+ // used.
+ EnterExpressionEvaluationContext Eval(Actions,
+ Sema::PotentiallyEvaluatedIfUsed);
+
ExprResult DefArgResult(ParseAssignmentExpression());
if (DefArgResult.isInvalid()) {
Actions.ActOnParamDefaultArgumentError(Param);
@@ -3222,14 +3355,13 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
ConsumeToken();
}
- // Leave prototype scope.
- PrototypeScope.Exit();
-
// If we have the closing ')', eat it.
SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
SourceLocation EndLoc = RParenLoc;
DeclSpec DS;
+ SourceLocation RefQualifierLoc;
+ bool RefQualifierIsLValueRef = true;
bool hasExceptionSpec = false;
SourceLocation ThrowLoc;
bool hasAnyExceptionSpec = false;
@@ -3237,11 +3369,23 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
llvm::SmallVector<SourceRange, 2> ExceptionRanges;
if (getLang().CPlusPlus) {
+ MaybeParseCXX0XAttributes(attrs);
+
// Parse cv-qualifier-seq[opt].
ParseTypeQualifierListOpt(DS, false /*no attributes*/);
if (!DS.getSourceRange().getEnd().isInvalid())
EndLoc = DS.getSourceRange().getEnd();
+ // Parse ref-qualifier[opt]
+ if (Tok.is(tok::amp) || Tok.is(tok::ampamp)) {
+ if (!getLang().CPlusPlus0x)
+ Diag(Tok, diag::ext_ref_qualifier);
+
+ RefQualifierIsLValueRef = Tok.is(tok::amp);
+ RefQualifierLoc = ConsumeToken();
+ EndLoc = RefQualifierLoc;
+ }
+
// Parse exception-specification[opt].
if (Tok.is(tok::kw_throw)) {
hasExceptionSpec = true;
@@ -3251,19 +3395,34 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
assert(Exceptions.size() == ExceptionRanges.size() &&
"Produced different number of exception types and ranges.");
}
+
+ // Parse trailing-return-type.
+ if (getLang().CPlusPlus0x && Tok.is(tok::arrow)) {
+ TrailingReturnType = ParseTrailingReturnType().get();
+ }
}
+ // FIXME: We should leave the prototype scope before parsing the exception
+ // specification, and then reenter it when parsing the trailing return type.
+
+ // Leave prototype scope.
+ PrototypeScope.Exit();
+
// Remember that we parsed a function type, and remember the attributes.
- D.AddTypeInfo(DeclaratorChunk::getFunction(/*proto*/true, IsVariadic,
+ D.AddTypeInfo(DeclaratorChunk::getFunction(attrs,
+ /*proto*/true, IsVariadic,
EllipsisLoc,
ParamInfo.data(), ParamInfo.size(),
DS.getTypeQualifiers(),
+ RefQualifierIsLValueRef,
+ RefQualifierLoc,
hasExceptionSpec, ThrowLoc,
hasAnyExceptionSpec,
Exceptions.data(),
ExceptionRanges.data(),
Exceptions.size(),
- LParenLoc, RParenLoc, D),
+ LParenLoc, RParenLoc, D,
+ TrailingReturnType),
EndLoc);
}
@@ -3333,10 +3492,12 @@ void Parser::ParseFunctionDeclaratorIdentifierList(SourceLocation LParenLoc,
// Remember that we parsed a function type, and remember the attributes. This
// function type is always a K&R style function type, which is not varargs and
// has no prototype.
- D.AddTypeInfo(DeclaratorChunk::getFunction(/*proto*/false, /*varargs*/false,
+ D.AddTypeInfo(DeclaratorChunk::getFunction(ParsedAttributes(),
+ /*proto*/false, /*varargs*/false,
SourceLocation(),
&ParamInfo[0], ParamInfo.size(),
/*TypeQuals*/0,
+ true, SourceLocation(),
/*exception*/false,
SourceLocation(), false, 0, 0, 0,
LParenLoc, RLoc, D),
@@ -3355,15 +3516,12 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
// This code does a fast path to handle some of the most obvious cases.
if (Tok.getKind() == tok::r_square) {
SourceLocation EndLoc = MatchRHSPunctuation(tok::r_square, StartLoc);
- //FIXME: Use these
- CXX0XAttributeList Attr;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier(true)) {
- Attr = ParseCXX0XAttributes();
- }
+ ParsedAttributes attrs;
+ MaybeParseCXX0XAttributes(attrs);
// Remember that we parsed the empty array type.
ExprResult NumElements;
- D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false, 0,
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, attrs, false, false, 0,
StartLoc, EndLoc),
EndLoc);
return;
@@ -3374,18 +3532,12 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
ConsumeToken();
SourceLocation EndLoc = MatchRHSPunctuation(tok::r_square, StartLoc);
- //FIXME: Use these
- CXX0XAttributeList Attr;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
- Attr = ParseCXX0XAttributes();
- }
-
- // If there was an error parsing the assignment-expression, recover.
- if (ExprRes.isInvalid())
- ExprRes.release(); // Deallocate expr, just use [].
+ ParsedAttributes attrs;
+ MaybeParseCXX0XAttributes(attrs);
// Remember that we parsed a array type, and remember its features.
- D.AddTypeInfo(DeclaratorChunk::getArray(0, false, 0, ExprRes.release(),
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, attrs, false, 0,
+ ExprRes.release(),
StartLoc, EndLoc),
EndLoc);
return;
@@ -3446,14 +3598,11 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
SourceLocation EndLoc = MatchRHSPunctuation(tok::r_square, StartLoc);
- //FIXME: Use these
- CXX0XAttributeList Attr;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
- Attr = ParseCXX0XAttributes();
- }
+ ParsedAttributes attrs;
+ MaybeParseCXX0XAttributes(attrs);
// Remember that we parsed a array type, and remember its features.
- D.AddTypeInfo(DeclaratorChunk::getArray(DS.getTypeQualifiers(),
+ D.AddTypeInfo(DeclaratorChunk::getArray(DS.getTypeQualifiers(), attrs,
StaticLoc.isValid(), isStar,
NumElements.release(),
StartLoc, EndLoc),
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
index b277156..b3ad25b 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
@@ -69,16 +69,16 @@ Decl *Parser::ParseNamespace(unsigned Context,
}
// Read label attributes, if present.
- llvm::OwningPtr<AttributeList> AttrList;
+ ParsedAttributes attrs;
if (Tok.is(tok::kw___attribute)) {
attrTok = Tok;
// FIXME: save these somewhere.
- AttrList.reset(ParseGNUAttributes());
+ ParseGNUAttributes(attrs);
}
if (Tok.is(tok::equal)) {
- if (AttrList)
+ if (!attrs.empty())
Diag(attrTok, diag::err_unexpected_namespace_attributes_alias);
if (InlineLoc.isValid())
Diag(InlineLoc, diag::err_inline_namespace_alias)
@@ -112,16 +112,16 @@ Decl *Parser::ParseNamespace(unsigned Context,
Decl *NamespcDecl =
Actions.ActOnStartNamespaceDef(getCurScope(), InlineLoc, IdentLoc, Ident,
- LBrace, AttrList.get());
+ LBrace, attrs.getList());
PrettyDeclStackTraceEntry CrashInfo(Actions, NamespcDecl, NamespaceLoc,
"parsing namespace");
while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
- CXX0XAttributeList Attr;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
- Attr = ParseCXX0XAttributes();
- ParseExternalDeclaration(Attr);
+ ParsedAttributesWithRange attrs;
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ ParseExternalDeclaration(attrs);
}
// Leave the namespace scope.
@@ -181,11 +181,9 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
/// 'extern' string-literal '{' declaration-seq[opt] '}'
/// 'extern' string-literal declaration
///
-Decl *Parser::ParseLinkage(ParsingDeclSpec &DS,
- unsigned Context) {
+Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
assert(Tok.is(tok::string_literal) && "Not a string literal!");
llvm::SmallString<8> LangBuffer;
- // LangBuffer is guaranteed to be big enough.
bool Invalid = false;
llvm::StringRef Lang = PP.getSpelling(Tok, LangBuffer, &Invalid);
if (Invalid)
@@ -201,41 +199,40 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS,
Tok.is(tok::l_brace)? Tok.getLocation()
: SourceLocation());
- CXX0XAttributeList Attr;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
- Attr = ParseCXX0XAttributes();
- }
+ ParsedAttributesWithRange attrs;
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
if (Tok.isNot(tok::l_brace)) {
DS.setExternInLinkageSpec(true);
- ParseExternalDeclaration(Attr, &DS);
+ ParseExternalDeclaration(attrs, &DS);
return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec,
SourceLocation());
}
DS.abort();
- if (Attr.HasAttr)
- Diag(Attr.Range.getBegin(), diag::err_attributes_not_allowed)
- << Attr.Range;
+ ProhibitAttributes(attrs);
SourceLocation LBrace = ConsumeBrace();
while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
- CXX0XAttributeList Attr;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
- Attr = ParseCXX0XAttributes();
- ParseExternalDeclaration(Attr);
+ ParsedAttributesWithRange attrs;
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ ParseExternalDeclaration(attrs);
}
SourceLocation RBrace = MatchRHSPunctuation(tok::r_brace, LBrace);
- return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec, RBrace);
+ return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec,
+ RBrace);
}
/// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
/// using-directive. Assumes that current token is 'using'.
Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
- SourceLocation &DeclEnd,
- CXX0XAttributeList Attr) {
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs) {
assert(Tok.is(tok::kw_using) && "Not using token");
// Eat 'using'.
@@ -246,17 +243,24 @@ Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
ConsumeCodeCompletionToken();
}
- if (Tok.is(tok::kw_namespace))
- // Next token after 'using' is 'namespace' so it must be using-directive
- return ParseUsingDirective(Context, UsingLoc, DeclEnd, Attr.AttrList);
+ // 'using namespace' means this is a using-directive.
+ if (Tok.is(tok::kw_namespace)) {
+ // Template parameters are always an error here.
+ if (TemplateInfo.Kind) {
+ SourceRange R = TemplateInfo.getSourceRange();
+ Diag(UsingLoc, diag::err_templated_using_directive)
+ << R << FixItHint::CreateRemoval(R);
+ }
+
+ return ParseUsingDirective(Context, UsingLoc, DeclEnd, attrs);
+ }
+
+ // Otherwise, it must be a using-declaration.
- if (Attr.HasAttr)
- Diag(Attr.Range.getBegin(), diag::err_attributes_not_allowed)
- << Attr.Range;
+ // Using declarations can't have attributes.
+ ProhibitAttributes(attrs);
- // Otherwise, it must be using-declaration.
- // Ignore illegal attributes (the caller should already have issued an error.
- return ParseUsingDeclaration(Context, UsingLoc, DeclEnd);
+ return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd);
}
/// ParseUsingDirective - Parse C++ using-directive, assumes
@@ -270,9 +274,9 @@ Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
/// namespace-name attributes[opt] ;
///
Decl *Parser::ParseUsingDirective(unsigned Context,
- SourceLocation UsingLoc,
- SourceLocation &DeclEnd,
- AttributeList *Attr) {
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &attrs) {
assert(Tok.is(tok::kw_namespace) && "Not 'namespace' token");
// Eat 'namespace'.
@@ -307,17 +311,18 @@ Decl *Parser::ParseUsingDirective(unsigned Context,
bool GNUAttr = false;
if (Tok.is(tok::kw___attribute)) {
GNUAttr = true;
- Attr = addAttributeLists(Attr, ParseGNUAttributes());
+ ParseGNUAttributes(attrs);
}
// Eat ';'.
DeclEnd = Tok.getLocation();
ExpectAndConsume(tok::semi,
- GNUAttr ? diag::err_expected_semi_after_attribute_list :
- diag::err_expected_semi_after_namespace_name, "", tok::semi);
+ GNUAttr ? diag::err_expected_semi_after_attribute_list
+ : diag::err_expected_semi_after_namespace_name,
+ "", tok::semi);
return Actions.ActOnUsingDirective(getCurScope(), UsingLoc, NamespcLoc, SS,
- IdentLoc, NamespcName, Attr);
+ IdentLoc, NamespcName, attrs.getList());
}
/// ParseUsingDeclaration - Parse C++ using-declaration. Assumes that
@@ -329,13 +334,18 @@ Decl *Parser::ParseUsingDirective(unsigned Context,
/// 'using' :: unqualified-id
///
Decl *Parser::ParseUsingDeclaration(unsigned Context,
- SourceLocation UsingLoc,
- SourceLocation &DeclEnd,
- AccessSpecifier AS) {
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS) {
CXXScopeSpec SS;
SourceLocation TypenameLoc;
bool IsTypeName;
+ // TODO: in C++0x, if we have template parameters this must be a
+ // template alias:
+ // template <...> using id = type;
+
// Ignore optional 'typename'.
// FIXME: This is wrong; we should parse this as a typename-specifier.
if (Tok.is(tok::kw_typename)) {
@@ -370,18 +380,30 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
}
// Parse (optional) attributes (most likely GNU strong-using extension).
- llvm::OwningPtr<AttributeList> AttrList;
- if (Tok.is(tok::kw___attribute))
- AttrList.reset(ParseGNUAttributes());
+ ParsedAttributes attrs;
+ MaybeParseGNUAttributes(attrs);
// Eat ';'.
DeclEnd = Tok.getLocation();
ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
- AttrList ? "attributes list" : "using declaration",
+ !attrs.empty() ? "attributes list" : "using declaration",
tok::semi);
- return Actions.ActOnUsingDeclaration(getCurScope(), AS, true, UsingLoc, SS, Name,
- AttrList.get(), IsTypeName, TypenameLoc);
+ // Diagnose an attempt to declare a templated using-declaration.
+ if (TemplateInfo.Kind) {
+ SourceRange R = TemplateInfo.getSourceRange();
+ Diag(UsingLoc, diag::err_templated_using_declaration)
+ << R << FixItHint::CreateRemoval(R);
+
+ // Unfortunately, we have to bail out instead of recovering by
+ // ignoring the parameters, just in case the nested name specifier
+ // depends on the parameters.
+ return 0;
+ }
+
+ return Actions.ActOnUsingDeclaration(getCurScope(), AS, true, UsingLoc, SS,
+ Name, attrs.getList(),
+ IsTypeName, TypenameLoc);
}
/// ParseStaticAssertDeclaration - Parse C++0x static_assert-declaratoion.
@@ -422,7 +444,7 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
MatchRHSPunctuation(tok::r_paren, LParenLoc);
DeclEnd = Tok.getLocation();
- ExpectAndConsume(tok::semi, diag::err_expected_semi_after_static_assert);
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert);
return Actions.ActOnStaticAssertDeclaration(StaticAssertLoc,
AssertExpr.take(),
@@ -652,20 +674,19 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
SuppressingAccessChecks = true;
}
- AttributeList *AttrList = 0;
+ ParsedAttributes attrs;
// If attributes exist after tag, parse them.
if (Tok.is(tok::kw___attribute))
- AttrList = ParseGNUAttributes();
+ ParseGNUAttributes(attrs);
// If declspecs exist after tag, parse them.
while (Tok.is(tok::kw___declspec))
- AttrList = ParseMicrosoftDeclSpec(AttrList);
+ ParseMicrosoftDeclSpec(attrs);
// If C++0x attributes exist here, parse them.
// FIXME: Are we consistent with the ordering of parsing of different
// styles of attributes?
- if (isCXX0XAttributeSpecifier())
- AttrList = addAttributeLists(AttrList, ParseCXX0XAttributes().AttrList);
+ MaybeParseCXX0XAttributes(attrs);
if (TagType == DeclSpec::TST_struct && Tok.is(tok::kw___is_pod)) {
// GNU libstdc++ 4.2 uses __is_pod as the name of a struct template, but
@@ -786,9 +807,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// There are four options here. If we have 'struct foo;', then this
// is either a forward declaration or a friend declaration, which
- // have to be treated differently. If we have 'struct foo {...' or
- // 'struct foo :...' then this is a definition. Otherwise we have
- // something like 'struct foo xyz', a reference.
+ // have to be treated differently. If we have 'struct foo {...',
+ // 'struct foo :...' or 'struct foo <class-virt-specifier>' then this is a
+ // definition. Otherwise we have something like 'struct foo xyz', a reference.
// However, in some contexts, things look like declarations but are just
// references, e.g.
// new struct s;
@@ -798,7 +819,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Sema::TagUseKind TUK;
if (SuppressDeclarations)
TUK = Sema::TUK_Reference;
- else if (Tok.is(tok::l_brace) || (getLang().CPlusPlus && Tok.is(tok::colon))){
+ else if (Tok.is(tok::l_brace) ||
+ (getLang().CPlusPlus && Tok.is(tok::colon)) ||
+ isCXX0XClassVirtSpecifier() != ClassVirtSpecifiers::CVS_None) {
if (DS.isFriendSpecified()) {
// C++ [class.friend]p2:
// A class shall not be defined in a friend declaration.
@@ -859,7 +882,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateId->LAngleLoc,
TemplateArgsPtr,
TemplateId->RAngleLoc,
- AttrList);
+ attrs.getList());
// Friend template-ids are treated as references unless
// they have template headers, in which case they're ill-formed
@@ -875,7 +898,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateArgsPtr,
TemplateId->RAngleLoc);
- TypeResult = Actions.ActOnTagTemplateIdType(TypeResult, TUK,
+ TypeResult = Actions.ActOnTagTemplateIdType(SS, TypeResult, TUK,
TagType, StartLoc);
} else {
// This is an explicit specialization or a class template
@@ -921,7 +944,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateId->LAngleLoc,
TemplateArgsPtr,
TemplateId->RAngleLoc,
- AttrList,
+ attrs.getList(),
MultiTemplateParamsArg(Actions,
TemplateParams? &(*TemplateParams)[0] : 0,
TemplateParams? TemplateParams->size() : 0));
@@ -939,7 +962,16 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateInfo.ExternLoc,
TemplateInfo.TemplateLoc,
TagType, StartLoc, SS, Name,
- NameLoc, AttrList);
+ NameLoc, attrs.getList());
+ } else if (TUK == Sema::TUK_Friend &&
+ TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
+ TagOrTempResult =
+ Actions.ActOnTemplatedFriendTag(getCurScope(), DS.getFriendSpecLoc(),
+ TagType, StartLoc, SS,
+ Name, NameLoc, attrs.getList(),
+ MultiTemplateParamsArg(Actions,
+ TemplateParams? &(*TemplateParams)[0] : 0,
+ TemplateParams? TemplateParams->size() : 0));
} else {
if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
TUK == Sema::TUK_Definition) {
@@ -948,25 +980,34 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
bool IsDependent = false;
+ // Don't pass down template parameter lists if this is just a tag
+ // reference. For example, we don't need the template parameters here:
+ // template <class T> class A *makeA(T t);
+ MultiTemplateParamsArg TParams;
+ if (TUK != Sema::TUK_Reference && TemplateParams)
+ TParams =
+ MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
+
// Declaration or definition of a class type
- TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc, SS,
- Name, NameLoc, AttrList, AS,
- MultiTemplateParamsArg(Actions,
- TemplateParams? &(*TemplateParams)[0] : 0,
- TemplateParams? TemplateParams->size() : 0),
- Owned, IsDependent);
+ TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc,
+ SS, Name, NameLoc, attrs.getList(), AS,
+ TParams, Owned, IsDependent, false,
+ false, clang::TypeResult());
// If ActOnTag said the type was dependent, try again with the
// less common call.
- if (IsDependent)
+ if (IsDependent) {
+ assert(TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend);
TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK,
SS, Name, StartLoc, NameLoc);
+ }
}
// If there is a body, parse it and inform the actions module.
if (TUK == Sema::TUK_Definition) {
assert(Tok.is(tok::l_brace) ||
- (getLang().CPlusPlus && Tok.is(tok::colon)));
+ (getLang().CPlusPlus && Tok.is(tok::colon)) ||
+ isCXX0XClassVirtSpecifier() != ClassVirtSpecifiers::CVS_None);
if (getLang().CPlusPlus)
ParseCXXMemberSpecification(StartLoc, TagType, TagOrTempResult.get());
else
@@ -1158,13 +1199,20 @@ Parser::BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
if (BaseType.isInvalid())
return true;
+ // Parse the optional ellipsis (for a pack expansion). The ellipsis is
+ // actually part of the base-specifier-list grammar productions, but we
+ // parse it here for convenience.
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
// Find the complete source range for the base-specifier.
SourceRange Range(StartLoc, EndLocation);
// Notify semantic analysis that we have parsed a complete
// base-specifier.
return Actions.ActOnBaseSpecifier(ClassDecl, Range, IsVirtual, Access,
- BaseType.get(), BaseLoc);
+ BaseType.get(), BaseLoc, EllipsisLoc);
}
/// getAccessSpecifierIfPresent - Determine whether the next token is
@@ -1189,15 +1237,14 @@ void Parser::HandleMemberFunctionDefaultArgs(Declarator& DeclaratorInfo,
// has any default arguments, we'll need to parse them later.
LateParsedMethodDeclaration *LateMethod = 0;
DeclaratorChunk::FunctionTypeInfo &FTI
- = DeclaratorInfo.getTypeObject(0).Fun;
+ = DeclaratorInfo.getFunctionTypeInfo();
for (unsigned ParamIdx = 0; ParamIdx < FTI.NumArgs; ++ParamIdx) {
if (LateMethod || FTI.ArgInfo[ParamIdx].DefaultArgTokens) {
if (!LateMethod) {
// Push this method onto the stack of late-parsed method
// declarations.
- getCurrentClass().MethodDecls.push_back(
- LateParsedMethodDeclaration(ThisDecl));
- LateMethod = &getCurrentClass().MethodDecls.back();
+ LateMethod = new LateParsedMethodDeclaration(this, ThisDecl);
+ getCurrentClass().LateParsedDeclarations.push_back(LateMethod);
LateMethod->TemplateScope = getCurScope()->isTemplateParamScope();
// Add all of the parameters prior to this one (they don't
@@ -1217,6 +1264,122 @@ void Parser::HandleMemberFunctionDefaultArgs(Declarator& DeclaratorInfo,
}
}
+/// isCXX0XVirtSpecifier - Determine whether the next token is a C++0x
+/// virt-specifier.
+///
+/// virt-specifier:
+/// override
+/// final
+/// new
+VirtSpecifiers::Specifier Parser::isCXX0XVirtSpecifier() const {
+ if (!getLang().CPlusPlus)
+ return VirtSpecifiers::VS_None;
+
+ if (Tok.is(tok::kw_new))
+ return VirtSpecifiers::VS_New;
+
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ // Initialize the contextual keywords.
+ if (!Ident_final) {
+ Ident_final = &PP.getIdentifierTable().get("final");
+ Ident_override = &PP.getIdentifierTable().get("override");
+ }
+
+ if (II == Ident_override)
+ return VirtSpecifiers::VS_Override;
+
+ if (II == Ident_final)
+ return VirtSpecifiers::VS_Final;
+ }
+
+ return VirtSpecifiers::VS_None;
+}
+
+/// ParseOptionalCXX0XVirtSpecifierSeq - Parse a virt-specifier-seq.
+///
+/// virt-specifier-seq:
+/// virt-specifier
+/// virt-specifier-seq virt-specifier
+void Parser::ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS) {
+ while (true) {
+ VirtSpecifiers::Specifier Specifier = isCXX0XVirtSpecifier();
+ if (Specifier == VirtSpecifiers::VS_None)
+ return;
+
+ // C++ [class.mem]p8:
+ // A virt-specifier-seq shall contain at most one of each virt-specifier.
+ const char *PrevSpec = 0;
+ if (VS.SetSpecifier(Specifier, Tok.getLocation(), PrevSpec))
+ Diag(Tok.getLocation(), diag::err_duplicate_virt_specifier)
+ << PrevSpec
+ << FixItHint::CreateRemoval(Tok.getLocation());
+
+ if (!getLang().CPlusPlus0x)
+ Diag(Tok.getLocation(), diag::ext_override_control_keyword)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ ConsumeToken();
+ }
+}
+
+/// isCXX0XClassVirtSpecifier - Determine whether the next token is a C++0x
+/// class-virt-specifier.
+///
+/// class-virt-specifier:
+/// final
+/// explicit
+ClassVirtSpecifiers::Specifier Parser::isCXX0XClassVirtSpecifier() const {
+ if (!getLang().CPlusPlus)
+ return ClassVirtSpecifiers::CVS_None;
+
+ if (Tok.is(tok::kw_explicit))
+ return ClassVirtSpecifiers::CVS_Explicit;
+
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ // Initialize the contextual keywords.
+ if (!Ident_final) {
+ Ident_final = &PP.getIdentifierTable().get("final");
+ Ident_override = &PP.getIdentifierTable().get("override");
+ }
+
+ if (II == Ident_final)
+ return ClassVirtSpecifiers::CVS_Final;
+ }
+
+ return ClassVirtSpecifiers::CVS_None;
+}
+
+/// ParseOptionalCXX0XClassVirtSpecifierSeq - Parse a class-virt-specifier-seq.
+///
+/// class-virt-specifier-seq:
+/// class-virt-specifier
+/// class-virt-specifier-seq class-virt-specifier
+void Parser::ParseOptionalCXX0XClassVirtSpecifierSeq(ClassVirtSpecifiers &CVS) {
+ while (true) {
+ ClassVirtSpecifiers::Specifier Specifier = isCXX0XClassVirtSpecifier();
+ if (Specifier == ClassVirtSpecifiers::CVS_None)
+ return;
+
+ // C++ [class]p1:
+ // A class-virt-specifier-seq shall contain at most one of each
+ // class-virt-specifier.
+ const char *PrevSpec = 0;
+ if (CVS.SetSpecifier(Specifier, Tok.getLocation(), PrevSpec))
+ Diag(Tok.getLocation(), diag::err_duplicate_class_virt_specifier)
+ << PrevSpec
+ << FixItHint::CreateRemoval(Tok.getLocation());
+
+ if (!getLang().CPlusPlus0x)
+ Diag(Tok.getLocation(), diag::ext_override_control_keyword)
+ << ClassVirtSpecifiers::getSpecifierName(Specifier);
+
+ ConsumeToken();
+ }
+}
+
/// ParseCXXClassMemberDeclaration - Parse a C++ class member declaration.
///
/// member-declaration:
@@ -1233,10 +1396,19 @@ void Parser::HandleMemberFunctionDefaultArgs(Declarator& DeclaratorInfo,
/// member-declarator-list ',' member-declarator
///
/// member-declarator:
-/// declarator pure-specifier[opt]
+/// declarator virt-specifier-seq[opt] pure-specifier[opt]
/// declarator constant-initializer[opt]
/// identifier[opt] ':' constant-expression
///
+/// virt-specifier-seq:
+/// virt-specifier
+/// virt-specifier-seq virt-specifier
+///
+/// virt-specifier:
+/// override
+/// final
+/// new
+///
/// pure-specifier:
/// '= 0'
///
@@ -1315,17 +1487,15 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// is a bitfield.
ColonProtectionRAIIObject X(*this);
- CXX0XAttributeList AttrList;
+ ParsedAttributesWithRange attrs;
// Optional C++0x attribute-specifier
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
- AttrList = ParseCXX0XAttributes();
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
if (Tok.is(tok::kw_using)) {
// FIXME: Check for template aliases
- if (AttrList.HasAttr)
- Diag(AttrList.Range.getBegin(), diag::err_attributes_not_allowed)
- << AttrList.Range;
+ ProhibitAttributes(attrs);
// Eat 'using'.
SourceLocation UsingLoc = ConsumeToken();
@@ -1336,16 +1506,16 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
} else {
SourceLocation DeclEnd;
// Otherwise, it must be using-declaration.
- ParseUsingDeclaration(Declarator::MemberContext, UsingLoc, DeclEnd, AS);
+ ParseUsingDeclaration(Declarator::MemberContext, TemplateInfo,
+ UsingLoc, DeclEnd, AS);
}
return;
}
- SourceLocation DSStart = Tok.getLocation();
// decl-specifier-seq:
// Parse the common declaration-specifiers piece.
ParsingDeclSpec DS(*this, TemplateDiags);
- DS.AddAttributes(AttrList.AttrList);
+ DS.takeAttributesFrom(attrs);
ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class);
MultiTemplateParamsArg TemplateParams(Actions,
@@ -1361,6 +1531,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
ParsingDeclarator DeclaratorInfo(*this, DS, Declarator::MemberContext);
+ VirtSpecifiers VS;
if (Tok.isNot(tok::colon)) {
// Don't parse FOO:BAR as if it were a typo for FOO::BAR.
@@ -1377,12 +1548,10 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return;
}
+ ParseOptionalCXX0XVirtSpecifierSeq(VS);
+
// If attributes exist after the declarator, but before an '{', parse them.
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- DeclaratorInfo.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(DeclaratorInfo);
// function-definition:
if (Tok.is(tok::l_brace)
@@ -1392,6 +1561,10 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
Diag(Tok, diag::err_func_def_no_params);
ConsumeBrace();
SkipUntil(tok::r_brace, true);
+
+ // Consume the optional ';'
+ if (Tok.is(tok::semi))
+ ConsumeToken();
return;
}
@@ -1402,10 +1575,18 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// assumes the declarator represents a function, not a typedef.
ConsumeBrace();
SkipUntil(tok::r_brace, true);
+
+ // Consume the optional ';'
+ if (Tok.is(tok::semi))
+ ConsumeToken();
return;
}
- ParseCXXInlineMethodDef(AS, DeclaratorInfo, TemplateInfo);
+ ParseCXXInlineMethodDef(AS, DeclaratorInfo, TemplateInfo, VS);
+ // Consume the optional ';'
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+
return;
}
}
@@ -1431,6 +1612,8 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SkipUntil(tok::comma, true, true);
}
+ ParseOptionalCXX0XVirtSpecifierSeq(VS);
+
// pure-specifier:
// '= 0'
//
@@ -1442,7 +1625,9 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// '=' 'delete'
if (Tok.is(tok::equal)) {
ConsumeToken();
- if (getLang().CPlusPlus0x && Tok.is(tok::kw_delete)) {
+ if (Tok.is(tok::kw_delete)) {
+ if (!getLang().CPlusPlus0x)
+ Diag(Tok, diag::warn_deleted_function_accepted_as_extension);
ConsumeToken();
Deleted = true;
} else {
@@ -1464,11 +1649,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
// If attributes exist after the declarator, parse them.
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- DeclaratorInfo.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(DeclaratorInfo);
// NOTE: If Sema is the Action module and declarator is an instance field,
// this call will *not* return the created decl; It will return null.
@@ -1485,7 +1666,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DeclaratorInfo,
move(TemplateParams),
BitfieldSize.release(),
- Init.release(),
+ VS, Init.release(),
/*IsDefinition*/Deleted,
Deleted);
}
@@ -1510,16 +1691,13 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Parse the next declarator.
DeclaratorInfo.clear();
+ VS.clear();
BitfieldSize = 0;
Init = 0;
Deleted = false;
// Attributes are only allowed on the second declarator.
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- DeclaratorInfo.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(DeclaratorInfo);
if (Tok.isNot(tok::colon))
ParseDeclarator(DeclaratorInfo);
@@ -1585,6 +1763,9 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
if (TagDecl)
Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
+ ClassVirtSpecifiers CVS;
+ ParseOptionalCXX0XClassVirtSpecifierSeq(CVS);
+
if (Tok.is(tok::colon)) {
ParseBaseClause(TagDecl);
@@ -1602,7 +1783,8 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
SourceLocation LBraceLoc = ConsumeBrace();
if (TagDecl)
- Actions.ActOnStartCXXMemberDeclarations(getCurScope(), TagDecl, LBraceLoc);
+ Actions.ActOnStartCXXMemberDeclarations(getCurScope(), TagDecl, CVS,
+ LBraceLoc);
// C++ 11p3: Members of a class defined with the keyword class are private
// by default. Members of a class defined with the keywords struct or union
@@ -1654,14 +1836,13 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
}
// If attributes exist after class contents, parse them.
- llvm::OwningPtr<AttributeList> AttrList;
- if (Tok.is(tok::kw___attribute))
- AttrList.reset(ParseGNUAttributes());
+ ParsedAttributes attrs;
+ MaybeParseGNUAttributes(attrs);
if (TagDecl)
Actions.ActOnFinishCXXMemberSpecification(getCurScope(), RecordLoc, TagDecl,
LBraceLoc, RBraceLoc,
- AttrList.get());
+ attrs.getList());
// C++ 9.2p2: Within the class member-specification, the class is regarded as
// complete within function bodies, default arguments,
@@ -1707,14 +1888,14 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
/// ':' mem-initializer-list
///
/// [C++] mem-initializer-list:
-/// mem-initializer
-/// mem-initializer , mem-initializer-list
+/// mem-initializer ...[opt]
+/// mem-initializer ...[opt] , mem-initializer-list
void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
assert(Tok.is(tok::colon) && "Constructor initializer always starts with ':'");
SourceLocation ColonLoc = ConsumeToken();
- llvm::SmallVector<CXXBaseOrMemberInitializer*, 4> MemInitializers;
+ llvm::SmallVector<CXXCtorInitializer*, 4> MemInitializers;
bool AnyErrors = false;
do {
@@ -1735,7 +1916,13 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
ConsumeToken();
else if (Tok.is(tok::l_brace))
break;
- else {
+ // If the next token looks like a base or member initializer, assume that
+ // we're just missing a comma.
+ else if (Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) {
+ SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(Loc, diag::err_ctor_init_missing_comma)
+ << FixItHint::CreateInsertion(Loc, ", ");
+ } else {
// Skip over garbage, until we get to '{'. Don't eat the '{'.
Diag(Tok.getLocation(), diag::err_expected_lbrace_or_comma);
SkipUntil(tok::l_brace, true, true);
@@ -1801,11 +1988,15 @@ Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
TemplateTypeTy, IdLoc,
LParenLoc, ArgExprs.take(),
- ArgExprs.size(), CommaLocs.data(),
- RParenLoc);
+ ArgExprs.size(), RParenLoc,
+ EllipsisLoc);
}
/// ParseExceptionSpecification - Parse a C++ exception-specification
@@ -1816,8 +2007,8 @@ Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
/// [MS] 'throw' '(' '...' ')'
///
/// type-id-list:
-/// type-id
-/// type-id-list ',' type-id
+/// type-id ... [opt]
+/// type-id-list ',' type-id ... [opt]
///
bool Parser::ParseExceptionSpecification(SourceLocation &EndLoc,
llvm::SmallVectorImpl<ParsedType>
@@ -1827,7 +2018,7 @@ bool Parser::ParseExceptionSpecification(SourceLocation &EndLoc,
bool &hasAnyExceptionSpec) {
assert(Tok.is(tok::kw_throw) && "expected throw");
- SourceLocation ThrowLoc = ConsumeToken();
+ ConsumeToken();
if (!Tok.is(tok::l_paren)) {
return Diag(Tok, diag::err_expected_lparen_after) << "throw";
@@ -1849,10 +2040,21 @@ bool Parser::ParseExceptionSpecification(SourceLocation &EndLoc,
SourceRange Range;
while (Tok.isNot(tok::r_paren)) {
TypeResult Res(ParseTypeName(&Range));
+
+ if (Tok.is(tok::ellipsis)) {
+ // C++0x [temp.variadic]p5:
+ // - In a dynamic-exception-specification (15.4); the pattern is a
+ // type-id.
+ SourceLocation Ellipsis = ConsumeToken();
+ if (!Res.isInvalid())
+ Res = Actions.ActOnPackExpansion(Res.get(), Ellipsis);
+ }
+
if (!Res.isInvalid()) {
Exceptions.push_back(Res.get());
Ranges.push_back(Range);
}
+
if (Tok.is(tok::comma))
ConsumeToken();
else
@@ -1863,20 +2065,41 @@ bool Parser::ParseExceptionSpecification(SourceLocation &EndLoc,
return false;
}
+/// ParseTrailingReturnType - Parse a trailing return type on a new-style
+/// function declaration.
+TypeResult Parser::ParseTrailingReturnType() {
+ assert(Tok.is(tok::arrow) && "expected arrow");
+
+ ConsumeToken();
+
+ // FIXME: Need to suppress declarations when parsing this typename.
+ // Otherwise in this function definition:
+ //
+ // auto f() -> struct X {}
+ //
+ // struct X is parsed as class definition because of the trailing
+ // brace.
+
+ SourceRange Range;
+ return ParseTypeName(&Range);
+}
+
/// \brief We have just started parsing the definition of a new class,
/// so push that class onto our stack of classes that is currently
/// being parsed.
-void Parser::PushParsingClass(Decl *ClassDecl, bool NonNestedClass) {
+Sema::ParsingClassState
+Parser::PushParsingClass(Decl *ClassDecl, bool NonNestedClass) {
assert((NonNestedClass || !ClassStack.empty()) &&
"Nested class without outer class");
ClassStack.push(new ParsingClass(ClassDecl, NonNestedClass));
+ return Actions.PushParsingClass();
}
/// \brief Deallocate the given parsed class and all of its nested
/// classes.
void Parser::DeallocateParsedClasses(Parser::ParsingClass *Class) {
- for (unsigned I = 0, N = Class->NestedClasses.size(); I != N; ++I)
- DeallocateParsedClasses(Class->NestedClasses[I]);
+ for (unsigned I = 0, N = Class->LateParsedDeclarations.size(); I != N; ++I)
+ delete Class->LateParsedDeclarations[I];
delete Class;
}
@@ -1889,9 +2112,11 @@ void Parser::DeallocateParsedClasses(Parser::ParsingClass *Class) {
///
/// \returns true if the class we've popped is a top-level class,
/// false otherwise.
-void Parser::PopParsingClass() {
+void Parser::PopParsingClass(Sema::ParsingClassState state) {
assert(!ClassStack.empty() && "Mismatched push/pop for class parsing");
+ Actions.PopParsingClass(state);
+
ParsingClass *Victim = ClassStack.top();
ClassStack.pop();
if (Victim->TopLevelClass) {
@@ -1902,13 +2127,12 @@ void Parser::PopParsingClass() {
}
assert(!ClassStack.empty() && "Missing top-level class?");
- if (Victim->MethodDecls.empty() && Victim->MethodDefs.empty() &&
- Victim->NestedClasses.empty()) {
+ if (Victim->LateParsedDeclarations.empty()) {
// The victim is a nested class, but we will not need to perform
// any processing after the definition of this class since it has
// no members whose handling was delayed. Therefore, we can just
// remove this nested class.
- delete Victim;
+ DeallocateParsedClasses(Victim);
return;
}
@@ -1916,7 +2140,7 @@ void Parser::PopParsingClass() {
// after the top-level class is completely defined. Therefore, add
// it to the list of nested classes within its parent.
assert(getCurScope()->isClassScope() && "Nested class outside of class scope?");
- ClassStack.top()->NestedClasses.push_back(Victim);
+ ClassStack.top()->LateParsedDeclarations.push_back(new LateParsedClass(this, Victim));
Victim->TemplateScope = getCurScope()->getParent()->isTemplateParamScope();
}
@@ -1955,12 +2179,12 @@ void Parser::PopParsingClass() {
/// '[' balanced-token-seq ']'
/// '{' balanced-token-seq '}'
/// any token but '(', ')', '[', ']', '{', or '}'
-CXX0XAttributeList Parser::ParseCXX0XAttributes(SourceLocation *EndLoc) {
+void Parser::ParseCXX0XAttributes(ParsedAttributesWithRange &attrs,
+ SourceLocation *endLoc) {
assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square)
&& "Not a C++0x attribute list");
SourceLocation StartLoc = Tok.getLocation(), Loc;
- AttributeList *CurrAttr = 0;
ConsumeBracket();
ConsumeBracket();
@@ -2004,21 +2228,16 @@ CXX0XAttributeList Parser::ParseCXX0XAttributes(SourceLocation *EndLoc) {
switch(AttributeList::getKind(AttrName))
{
// No arguments
- case AttributeList::AT_base_check:
case AttributeList::AT_carries_dependency:
- case AttributeList::AT_final:
- case AttributeList::AT_hiding:
- case AttributeList::AT_noreturn:
- case AttributeList::AT_override: {
+ case AttributeList::AT_noreturn: {
if (Tok.is(tok::l_paren)) {
Diag(Tok.getLocation(), diag::err_cxx0x_attribute_forbids_arguments)
<< AttrName->getName();
break;
}
- CurrAttr = new AttributeList(AttrName, AttrLoc, 0, AttrLoc, 0,
- SourceLocation(), 0, 0, CurrAttr, false,
- true);
+ attrs.add(AttrFactory.Create(AttrName, AttrLoc, 0, AttrLoc, 0,
+ SourceLocation(), 0, 0, false, true));
AttrParsed = true;
break;
}
@@ -2038,9 +2257,9 @@ CXX0XAttributeList Parser::ParseCXX0XAttributes(SourceLocation *EndLoc) {
ExprVector ArgExprs(Actions);
ArgExprs.push_back(ArgExpr.release());
- CurrAttr = new AttributeList(AttrName, AttrLoc, 0, AttrLoc,
- 0, ParamLoc, ArgExprs.take(), 1, CurrAttr,
- false, true);
+ attrs.add(AttrFactory.Create(AttrName, AttrLoc, 0, AttrLoc,
+ 0, ParamLoc, ArgExprs.take(), 1,
+ false, true));
AttrParsed = true;
break;
@@ -2065,8 +2284,7 @@ CXX0XAttributeList Parser::ParseCXX0XAttributes(SourceLocation *EndLoc) {
if (ExpectAndConsume(tok::r_square, diag::err_expected_rsquare))
SkipUntil(tok::r_square, false);
- CXX0XAttributeList Attr (CurrAttr, SourceRange(StartLoc, Loc), true);
- return Attr;
+ attrs.Range = SourceRange(StartLoc, Loc);
}
/// ParseCXX0XAlignArgument - Parse the argument to C++0x's [[align]]
@@ -2088,3 +2306,23 @@ ExprResult Parser::ParseCXX0XAlignArgument(SourceLocation Start) {
} else
return ParseConstantExpression();
}
+
+/// ParseMicrosoftAttributes - Parse a Microsoft attribute [Attr]
+///
+/// [MS] ms-attribute:
+/// '[' token-seq ']'
+///
+/// [MS] ms-attribute-seq:
+/// ms-attribute[opt]
+/// ms-attribute ms-attribute-seq
+void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc) {
+ assert(Tok.is(tok::l_square) && "Not a Microsoft attribute list");
+
+ while (Tok.is(tok::l_square)) {
+ ConsumeBracket();
+ SkipUntil(tok::r_square, true, true);
+ if (endLoc) *endLoc = Tok.getLocation();
+ ExpectAndConsume(tok::r_square, diag::err_expected_rsquare);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
index c4beab1..616c251 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
@@ -172,13 +172,11 @@ static prec::Level getBinOpPrecedence(tok::TokenKind Kind,
/// = *= /= %= += -= <<= >>= &= ^= |=
///
/// expression: [C99 6.5.17]
-/// assignment-expression
-/// expression ',' assignment-expression
+/// assignment-expression ...[opt]
+/// expression ',' assignment-expression ...[opt]
///
ExprResult Parser::ParseExpression() {
ExprResult LHS(ParseAssignmentExpression());
- if (LHS.isInvalid()) return move(LHS);
-
return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
}
@@ -190,8 +188,6 @@ ExprResult Parser::ParseExpression() {
ExprResult
Parser::ParseExpressionWithLeadingAt(SourceLocation AtLoc) {
ExprResult LHS(ParseObjCAtExpression(AtLoc));
- if (LHS.isInvalid()) return move(LHS);
-
return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
}
@@ -206,14 +202,13 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
ExtensionRAIIObject O(Diags);
LHS = ParseCastExpression(false);
- if (LHS.isInvalid()) return move(LHS);
}
- LHS = Actions.ActOnUnaryOp(getCurScope(), ExtLoc, tok::kw___extension__,
- LHS.take());
- if (LHS.isInvalid()) return move(LHS);
+ if (!LHS.isInvalid())
+ LHS = Actions.ActOnUnaryOp(getCurScope(), ExtLoc, tok::kw___extension__,
+ LHS.take());
- return ParseRHSOfBinaryExpression(LHS.take(), prec::Comma);
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
}
/// ParseAssignmentExpression - Parse an expr that doesn't include commas.
@@ -228,9 +223,7 @@ ExprResult Parser::ParseAssignmentExpression() {
return ParseThrowExpression();
ExprResult LHS(ParseCastExpression(false));
- if (LHS.isInvalid()) return move(LHS);
-
- return ParseRHSOfBinaryExpression(LHS.take(), prec::Assignment);
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Assignment);
}
/// ParseAssignmentExprWithObjCMessageExprStart - Parse an assignment expression
@@ -249,10 +242,8 @@ Parser::ParseAssignmentExprWithObjCMessageExprStart(SourceLocation LBracLoc,
ExprResult R
= ParseObjCMessageExpressionBody(LBracLoc, SuperLoc,
ReceiverType, ReceiverExpr);
- if (R.isInvalid()) return move(R);
- R = ParsePostfixExpressionSuffix(R.take());
- if (R.isInvalid()) return move(R);
- return ParseRHSOfBinaryExpression(R.take(), prec::Assignment);
+ R = ParsePostfixExpressionSuffix(R);
+ return ParseRHSOfBinaryExpression(R, prec::Assignment);
}
@@ -264,9 +255,7 @@ ExprResult Parser::ParseConstantExpression() {
Sema::Unevaluated);
ExprResult LHS(ParseCastExpression(false));
- if (LHS.isInvalid()) return move(LHS);
-
- return ParseRHSOfBinaryExpression(LHS.take(), prec::Conditional);
+ return ParseRHSOfBinaryExpression(LHS, prec::Conditional);
}
/// ParseRHSOfBinaryExpression - Parse a binary expression that starts with
@@ -301,8 +290,10 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// In particular, the RHS of the '?' is 'expression', not
// 'logical-OR-expression' as we might expect.
TernaryMiddle = ParseExpression();
- if (TernaryMiddle.isInvalid())
- return move(TernaryMiddle);
+ if (TernaryMiddle.isInvalid()) {
+ LHS = ExprError();
+ TernaryMiddle = 0;
+ }
} else {
// Special case handling of "X ? Y : Z" where Y is empty:
// logical-OR-expression '?' ':' conditional-expression [GNU]
@@ -362,9 +353,10 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
RHS = ParseAssignmentExpression();
else
RHS = ParseCastExpression(false);
- if (RHS.isInvalid())
- return move(RHS);
+ if (RHS.isInvalid())
+ LHS = ExprError();
+
// Remember the precedence of this operator and get the precedence of the
// operator immediately to the right of the RHS.
prec::Level ThisPrec = NextTokPrec;
@@ -384,10 +376,11 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// is okay, to bind exactly as tightly. For example, compile A=B=C=D as
// A=(B=(C=D)), where each paren is a level of recursion here.
// The function takes ownership of the RHS.
- RHS = ParseRHSOfBinaryExpression(RHS.get(),
+ RHS = ParseRHSOfBinaryExpression(RHS,
static_cast<prec::Level>(ThisPrec + !isRightAssoc));
+
if (RHS.isInvalid())
- return move(RHS);
+ LHS = ExprError();
NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
getLang().CPlusPlus0x);
@@ -426,9 +419,9 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
ParsedType TypeOfCast) {
bool NotCastExpr;
ExprResult Res = ParseCastExpression(isUnaryExpression,
- isAddressOfOperand,
- NotCastExpr,
- TypeOfCast);
+ isAddressOfOperand,
+ NotCastExpr,
+ TypeOfCast);
if (NotCastExpr)
Diag(Tok, diag::err_expected_expression);
return move(Res);
@@ -451,12 +444,14 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
/// unary-operator cast-expression
/// 'sizeof' unary-expression
/// 'sizeof' '(' type-name ')'
+/// [C++0x] 'sizeof' '...' '(' identifier ')'
/// [GNU] '__alignof' unary-expression
/// [GNU] '__alignof' '(' type-name ')'
/// [C++0x] 'alignof' '(' type-id ')'
/// [GNU] '&&' identifier
/// [C++] new-expression
/// [C++] delete-expression
+/// [C++0x] 'noexcept' '(' expression ')'
///
/// unary-operator: one of
/// '&' '*' '+' '-' '~' '!'
@@ -542,13 +537,14 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
/// '__is_polymorphic'
/// '__is_union'
///
-/// [GNU] binary-type-trait:
-/// '__is_base_of' [TODO]
+/// binary-type-trait:
+/// [GNU] '__is_base_of'
+/// [MS] '__is_convertible_to'
///
ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
- bool isAddressOfOperand,
- bool &NotCastExpr,
- ParsedType TypeOfCast) {
+ bool isAddressOfOperand,
+ bool &NotCastExpr,
+ ParsedType TypeOfCast) {
ExprResult Res;
tok::TokenKind SavedKind = Tok.getKind();
NotCastExpr = false;
@@ -571,17 +567,15 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
ParenParseOption ParenExprType =
(isUnaryExpression && !getLang().CPlusPlus)? CompoundLiteral : CastExpr;
ParsedType CastTy;
- SourceLocation LParenLoc = Tok.getLocation();
SourceLocation RParenLoc;
{
- // The inside of the parens don't need to be a colon protected scope.
+ // The inside of the parens don't need to be a colon protected scope, and
+ // isn't immediately a message send.
ColonProtectionRAIIObject X(*this, false);
-
+
Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/,
TypeOfCast, CastTy, RParenLoc);
- if (Res.isInvalid())
- return move(Res);
}
switch (ParenExprType) {
@@ -647,7 +641,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
(Actions.getTypeName(II, ILoc, getCurScope()) ||
// Allow the base to be 'super' if in an objc-method.
(&II == Ident_super && getCurScope()->isInObjcMethodScope()))) {
- SourceLocation DotLoc = ConsumeToken();
+ ConsumeToken();
if (Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected_property_name);
@@ -661,6 +655,54 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
break;
}
+ // In an Objective-C method, if we have "super" followed by an identifier,
+ // the token sequence is ill-formed. However, if there's a ':' or ']' after
+ // that identifier, this is probably a message send with a missing open
+ // bracket. Treat it as such.
+ if (getLang().ObjC1 && &II == Ident_super && !InMessageExpression &&
+ getCurScope()->isInObjcMethodScope() &&
+ ((Tok.is(tok::identifier) &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square))) ||
+ Tok.is(tok::code_completion))) {
+ Res = ParseObjCMessageExpressionBody(SourceLocation(), ILoc, ParsedType(),
+ 0);
+ break;
+ }
+
+ // If we have an Objective-C class name followed by an identifier
+ // and either ':' or ']', this is an Objective-C class message
+ // send that's missing the opening '['. Recovery
+ // appropriately. Also take this path if we're performing code
+ // completion after an Objective-C class name.
+ if (getLang().ObjC1 &&
+ ((Tok.is(tok::identifier) && !InMessageExpression) ||
+ Tok.is(tok::code_completion))) {
+ const Token& Next = NextToken();
+ if (Tok.is(tok::code_completion) ||
+ Next.is(tok::colon) || Next.is(tok::r_square))
+ if (ParsedType Typ = Actions.getTypeName(II, ILoc, getCurScope()))
+ if (Typ.get()->isObjCObjectOrInterfaceType()) {
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS;
+ DS.SetRangeStart(ILoc);
+ DS.SetRangeEnd(ILoc);
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ DS.SetTypeSpecType(TST_typename, ILoc, PrevSpec, DiagID, Typ);
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ TypeResult Ty = Actions.ActOnTypeName(getCurScope(),
+ DeclaratorInfo);
+ if (Ty.isInvalid())
+ break;
+
+ Res = ParseObjCMessageExpressionBody(SourceLocation(),
+ SourceLocation(),
+ Ty.get(), 0);
+ break;
+ }
+ }
+
// Make sure to pass down the right value for isAddressOfOperand.
if (isAddressOfOperand && isPostfixExpressionSuffixStart())
isAddressOfOperand = false;
@@ -692,7 +734,6 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw___builtin_va_arg:
case tok::kw___builtin_offsetof:
case tok::kw___builtin_choose_expr:
- case tok::kw___builtin_types_compatible_p:
return ParseBuiltinPrimaryExpression();
case tok::kw___null:
return Actions.ActOnGNUNullExpr(ConsumeToken());
@@ -753,9 +794,13 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
if (Tok.isNot(tok::identifier))
return ExprError(Diag(Tok, diag::err_expected_ident));
+ if (getCurScope()->getFnParent() == 0)
+ return ExprError(Diag(Tok, diag::err_address_of_label_outside_fn));
+
Diag(AmpAmpLoc, diag::ext_gnu_address_of_label);
- Res = Actions.ActOnAddrLabel(AmpAmpLoc, Tok.getLocation(),
- Tok.getIdentifierInfo());
+ LabelDecl *LD = Actions.LookupOrCreateLabel(Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ Res = Actions.ActOnAddrLabel(AmpAmpLoc, Tok.getLocation(), LD);
ConsumeToken();
return move(Res);
}
@@ -768,10 +813,39 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw_typeid:
Res = ParseCXXTypeid();
break;
+ case tok::kw___uuidof:
+ Res = ParseCXXUuidof();
+ break;
case tok::kw_this:
Res = ParseCXXThis();
break;
+ case tok::annot_typename:
+ if (isStartOfObjCClassMessageMissingOpenBracket()) {
+ ParsedType Type = getTypeAnnotation(Tok);
+
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS;
+ DS.SetRangeStart(Tok.getLocation());
+ DS.SetRangeEnd(Tok.getLastLoc());
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ DS.SetTypeSpecType(TST_typename, Tok.getAnnotationEndLoc(),
+ PrevSpec, DiagID, Type);
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ if (Ty.isInvalid())
+ break;
+
+ ConsumeToken();
+ Res = ParseObjCMessageExpressionBody(SourceLocation(), SourceLocation(),
+ Ty.get(), 0);
+ break;
+ }
+ // Fall through
+
case tok::kw_char:
case tok::kw_wchar_t:
case tok::kw_char16_t:
@@ -787,8 +861,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw_void:
case tok::kw_typename:
case tok::kw_typeof:
- case tok::kw___vector:
- case tok::annot_typename: {
+ case tok::kw___vector: {
if (!getLang().CPlusPlus) {
Diag(Tok, diag::err_expected_expression);
return ExprError();
@@ -888,6 +961,23 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw_delete: // [C++] delete-expression
return ParseCXXDeleteExpression(false, Tok.getLocation());
+ case tok::kw_noexcept: { // [C++0x] 'noexcept' '(' expression ')'
+ SourceLocation KeyLoc = ConsumeToken();
+ SourceLocation LParen = Tok.getLocation();
+ if (ExpectAndConsume(tok::l_paren,
+ diag::err_expected_lparen_after, "noexcept"))
+ return ExprError();
+ // C++ [expr.unary.noexcept]p1:
+ // The noexcept operator determines whether the evaluation of its operand,
+ // which is an unevaluated operand, can throw an exception.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ ExprResult Result = ParseExpression();
+ SourceLocation RParen = MatchRHSPunctuation(tok::r_paren, LParen);
+ if (!Result.isInvalid())
+ Result = Actions.ActOnNoexceptExpr(KeyLoc, LParen, Result.take(), RParen);
+ return move(Result);
+ }
+
case tok::kw___is_pod: // [GNU] unary-type-trait
case tok::kw___is_class:
case tok::kw___is_enum:
@@ -906,6 +996,11 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw___has_virtual_destructor:
return ParseUnaryTypeTrait();
+ case tok::kw___builtin_types_compatible_p:
+ case tok::kw___is_base_of:
+ case tok::kw___is_convertible_to:
+ return ParseBinaryTypeTrait();
+
case tok::at: {
SourceLocation AtLoc = ConsumeToken();
return ParseObjCAtExpression(AtLoc);
@@ -928,8 +1023,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
}
// These can be followed by postfix-expr pieces.
- if (Res.isInvalid()) return move(Res);
- return ParsePostfixExpressionSuffix(Res.get());
+ return ParsePostfixExpressionSuffix(Res);
}
/// ParsePostfixExpressionSuffix - Once the leading part of a postfix-expression
@@ -947,8 +1041,8 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
/// '(' type-name ')' '{' initializer-list ',' '}'
///
/// argument-expression-list: [C99 6.5.2]
-/// argument-expression
-/// argument-expression-list ',' assignment-expression
+/// argument-expression ...[opt]
+/// argument-expression-list ',' assignment-expression ...[opt]
///
ExprResult
Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
@@ -957,6 +1051,28 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
SourceLocation Loc;
while (1) {
switch (Tok.getKind()) {
+ case tok::code_completion:
+ if (InMessageExpression)
+ return move(LHS);
+
+ Actions.CodeCompletePostfixExpression(getCurScope(), LHS);
+ ConsumeCodeCompletionToken();
+ LHS = ExprError();
+ break;
+
+ case tok::identifier:
+ // If we see identifier: after an expression, and we're not already in a
+ // message send, then this is probably a message send with a missing
+ // opening bracket '['.
+ if (getLang().ObjC1 && !InMessageExpression &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
+ LHS = ParseObjCMessageExpressionBody(SourceLocation(), SourceLocation(),
+ ParsedType(), LHS.get());
+ break;
+ }
+
+ // Fall through; this isn't a message send.
+
default: // Not a postfix-expression suffix.
return move(LHS);
case tok::l_square: { // postfix-expression: p-e '[' expression ']'
@@ -986,45 +1102,87 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
break;
}
- case tok::l_paren: { // p-e: p-e '(' argument-expression-list[opt] ')'
- ExprVector ArgExprs(Actions);
- CommaLocsTy CommaLocs;
+ case tok::l_paren: // p-e: p-e '(' argument-expression-list[opt] ')'
+ case tok::lesslessless: { // p-e: p-e '<<<' argument-expression-list '>>>'
+ // '(' argument-expression-list[opt] ')'
+ tok::TokenKind OpKind = Tok.getKind();
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
+ Expr *ExecConfig = 0;
- Loc = ConsumeParen();
+ if (OpKind == tok::lesslessless) {
+ ExprVector ExecConfigExprs(Actions);
+ CommaLocsTy ExecConfigCommaLocs;
+ SourceLocation LLLLoc, GGGLoc;
- if (LHS.isInvalid()) {
- SkipUntil(tok::r_paren);
- return ExprError();
+ LLLLoc = ConsumeToken();
+
+ if (ParseExpressionList(ExecConfigExprs, ExecConfigCommaLocs)) {
+ LHS = ExprError();
+ }
+
+ if (LHS.isInvalid()) {
+ SkipUntil(tok::greatergreatergreater);
+ } else if (Tok.isNot(tok::greatergreatergreater)) {
+ MatchRHSPunctuation(tok::greatergreatergreater, LLLLoc);
+ LHS = ExprError();
+ } else {
+ GGGLoc = ConsumeToken();
+ }
+
+ if (!LHS.isInvalid()) {
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen, ""))
+ LHS = ExprError();
+ else
+ Loc = PrevTokLocation;
+ }
+
+ if (!LHS.isInvalid()) {
+ ExprResult ECResult = Actions.ActOnCUDAExecConfigExpr(getCurScope(),
+ LLLLoc, move_arg(ExecConfigExprs), GGGLoc);
+ if (ECResult.isInvalid())
+ LHS = ExprError();
+ else
+ ExecConfig = ECResult.get();
+ }
+ } else {
+ Loc = ConsumeParen();
}
+ ExprVector ArgExprs(Actions);
+ CommaLocsTy CommaLocs;
+
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteCall(getCurScope(), LHS.get(), 0, 0);
ConsumeCodeCompletionToken();
}
-
- if (Tok.isNot(tok::r_paren)) {
- if (ParseExpressionList(ArgExprs, CommaLocs, &Sema::CodeCompleteCall,
- LHS.get())) {
- SkipUntil(tok::r_paren);
- return ExprError();
+
+ if (OpKind == tok::l_paren || !LHS.isInvalid()) {
+ if (Tok.isNot(tok::r_paren)) {
+ if (ParseExpressionList(ArgExprs, CommaLocs, &Sema::CodeCompleteCall,
+ LHS.get())) {
+ SkipUntil(tok::r_paren);
+ LHS = ExprError();
+ }
}
}
// Match the ')'.
- if (Tok.isNot(tok::r_paren)) {
+ if (LHS.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ } else if (Tok.isNot(tok::r_paren)) {
MatchRHSPunctuation(tok::r_paren, Loc);
- return ExprError();
- }
-
- if (!LHS.isInvalid()) {
- assert((ArgExprs.size() == 0 || ArgExprs.size()-1 == CommaLocs.size())&&
+ LHS = ExprError();
+ } else {
+ assert((ArgExprs.size() == 0 ||
+ ArgExprs.size()-1 == CommaLocs.size())&&
"Unexpected number of commas!");
LHS = Actions.ActOnCallExpr(getCurScope(), LHS.take(), Loc,
- move_arg(ArgExprs), CommaLocs.data(),
- Tok.getLocation());
+ move_arg(ArgExprs), Tok.getLocation(),
+ ExecConfig);
+ ConsumeParen();
}
- ConsumeParen();
break;
}
case tok::arrow:
@@ -1069,14 +1227,16 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// expression), or we didn't see a '~' in the right place. We
// can still parse a destructor name here, but in that case it
// names a real destructor.
+ // Allow explicit constructor calls in Microsoft mode.
+ // FIXME: Add support for explicit call of template constructor.
UnqualifiedId Name;
if (ParseUnqualifiedId(SS,
/*EnteringContext=*/false,
/*AllowDestructorName=*/true,
- /*AllowConstructorName=*/false,
+ /*AllowConstructorName=*/ getLang().Microsoft,
ObjectType,
Name))
- return ExprError();
+ LHS = ExprError();
if (!LHS.isInvalid())
LHS = Actions.ActOnMemberAccessExpr(getCurScope(), LHS.take(), OpLoc,
@@ -1189,6 +1349,7 @@ Parser::ParseExprAfterTypeofSizeofAlignof(const Token &OpTok,
/// unary-expression: [C99 6.5.3]
/// 'sizeof' unary-expression
/// 'sizeof' '(' type-name ')'
+/// [C++0x] 'sizeof' '...' '(' identifier ')'
/// [GNU] '__alignof' unary-expression
/// [GNU] '__alignof' '(' type-name ')'
/// [C++0x] 'alignof' '(' type-id ')'
@@ -1199,6 +1360,46 @@ ExprResult Parser::ParseSizeofAlignofExpression() {
Token OpTok = Tok;
ConsumeToken();
+ // [C++0x] 'sizeof' '...' '(' identifier ')'
+ if (Tok.is(tok::ellipsis) && OpTok.is(tok::kw_sizeof)) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ SourceLocation LParenLoc, RParenLoc;
+ IdentifierInfo *Name = 0;
+ SourceLocation NameLoc;
+ if (Tok.is(tok::l_paren)) {
+ LParenLoc = ConsumeParen();
+ if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ if (RParenLoc.isInvalid())
+ RParenLoc = PP.getLocForEndOfToken(NameLoc);
+ } else {
+ Diag(Tok, diag::err_expected_parameter_pack);
+ SkipUntil(tok::r_paren);
+ }
+ } else if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ LParenLoc = PP.getLocForEndOfToken(EllipsisLoc);
+ RParenLoc = PP.getLocForEndOfToken(NameLoc);
+ Diag(LParenLoc, diag::err_paren_sizeof_parameter_pack)
+ << Name
+ << FixItHint::CreateInsertion(LParenLoc, "(")
+ << FixItHint::CreateInsertion(RParenLoc, ")");
+ } else {
+ Diag(Tok, diag::err_sizeof_parameter_pack);
+ }
+
+ if (!Name)
+ return ExprError();
+
+ return Actions.ActOnSizeofParameterPackExpr(getCurScope(),
+ OpTok.getLocation(),
+ *Name, NameLoc,
+ RParenLoc);
+ }
+
bool isCastExpr;
ParsedType CastTy;
SourceRange CastRange;
@@ -1256,21 +1457,18 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
default: assert(0 && "Not a builtin primary expression!");
case tok::kw___builtin_va_arg: {
ExprResult Expr(ParseAssignmentExpression());
- if (Expr.isInvalid()) {
- SkipUntil(tok::r_paren);
- return ExprError();
- }
if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
- return ExprError();
+ Expr = ExprError();
TypeResult Ty = ParseTypeName();
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::err_expected_rparen);
- return ExprError();
+ Expr = ExprError();
}
- if (Ty.isInvalid())
+
+ if (Expr.isInvalid() || Ty.isInvalid())
Res = ExprError();
else
Res = Actions.ActOnVAArg(StartLoc, Expr.take(), Ty.get(), ConsumeParen());
@@ -1378,25 +1576,6 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
Expr2.take(), ConsumeParen());
break;
}
- case tok::kw___builtin_types_compatible_p:
- TypeResult Ty1 = ParseTypeName();
-
- if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
- return ExprError();
-
- TypeResult Ty2 = ParseTypeName();
-
- if (Tok.isNot(tok::r_paren)) {
- Diag(Tok, diag::err_expected_rparen);
- return ExprError();
- }
-
- if (Ty1.isInvalid() || Ty2.isInvalid())
- Res = ExprError();
- else
- Res = Actions.ActOnTypesCompatibleExpr(StartLoc, Ty1.get(), Ty2.get(),
- ConsumeParen());
- break;
}
if (Res.isInvalid())
@@ -1432,9 +1611,18 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
bool isAmbiguousTypeId;
CastTy = ParsedType();
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ ExprType >= CompoundLiteral? Sema::PCC_ParenthesizedExpression
+ : Sema::PCC_Expression);
+ ConsumeCodeCompletionToken();
+ return ExprError();
+ }
+
if (ExprType >= CompoundStmt && Tok.is(tok::l_brace)) {
Diag(Tok, diag::ext_gnu_statement_expr);
- StmtResult Stmt(ParseCompoundStatement(0, true));
+ ParsedAttributes attrs;
+ StmtResult Stmt(ParseCompoundStatement(attrs, true));
ExprType = CompoundStmt;
// If the substmt parsed correctly, build the AST node.
@@ -1455,55 +1643,74 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
return ParseCXXAmbiguousParenExpression(ExprType, CastTy,
OpenLoc, RParenLoc);
- TypeResult Ty = ParseTypeName();
-
- // Match the ')'.
- if (Tok.is(tok::r_paren))
- RParenLoc = ConsumeParen();
- else
- MatchRHSPunctuation(tok::r_paren, OpenLoc);
-
- if (Tok.is(tok::l_brace)) {
- ExprType = CompoundLiteral;
- return ParseCompoundLiteralExpression(Ty.get(), OpenLoc, RParenLoc);
+ TypeResult Ty;
+
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = ParseTypeName();
}
+
+ // If our type is followed by an identifier and either ':' or ']', then
+ // this is probably an Objective-C message send where the leading '[' is
+ // missing. Recover as if that were the case.
+ if (!Ty.isInvalid() && Tok.is(tok::identifier) && !InMessageExpression &&
+ getLang().ObjC1 && !Ty.get().get().isNull() &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square)) &&
+ Ty.get().get()->isObjCObjectOrInterfaceType()) {
+ Result = ParseObjCMessageExpressionBody(SourceLocation(),
+ SourceLocation(),
+ Ty.get(), 0);
+ } else {
+ // Match the ')'.
+ if (Tok.is(tok::r_paren))
+ RParenLoc = ConsumeParen();
+ else
+ MatchRHSPunctuation(tok::r_paren, OpenLoc);
+
+ if (Tok.is(tok::l_brace)) {
+ ExprType = CompoundLiteral;
+ return ParseCompoundLiteralExpression(Ty.get(), OpenLoc, RParenLoc);
+ }
- if (ExprType == CastExpr) {
- // We parsed '(' type-name ')' and the thing after it wasn't a '{'.
+ if (ExprType == CastExpr) {
+ // We parsed '(' type-name ')' and the thing after it wasn't a '{'.
- if (Ty.isInvalid())
- return ExprError();
+ if (Ty.isInvalid())
+ return ExprError();
- CastTy = Ty.get();
+ CastTy = Ty.get();
- // Note that this doesn't parse the subsequent cast-expression, it just
- // returns the parsed type to the callee.
- if (stopIfCastExpr)
- return ExprResult();
-
- // Reject the cast of super idiom in ObjC.
- if (Tok.is(tok::identifier) && getLang().ObjC1 &&
- Tok.getIdentifierInfo() == Ident_super &&
- getCurScope()->isInObjcMethodScope() &&
- GetLookAheadToken(1).isNot(tok::period)) {
- Diag(Tok.getLocation(), diag::err_illegal_super_cast)
- << SourceRange(OpenLoc, RParenLoc);
- return ExprError();
+ // Note that this doesn't parse the subsequent cast-expression, it just
+ // returns the parsed type to the callee.
+ if (stopIfCastExpr)
+ return ExprResult();
+
+ // Reject the cast of super idiom in ObjC.
+ if (Tok.is(tok::identifier) && getLang().ObjC1 &&
+ Tok.getIdentifierInfo() == Ident_super &&
+ getCurScope()->isInObjcMethodScope() &&
+ GetLookAheadToken(1).isNot(tok::period)) {
+ Diag(Tok.getLocation(), diag::err_illegal_super_cast)
+ << SourceRange(OpenLoc, RParenLoc);
+ return ExprError();
+ }
+
+ // Parse the cast-expression that follows it next.
+ // TODO: For cast expression with CastTy.
+ Result = ParseCastExpression(false, false, CastTy);
+ if (!Result.isInvalid())
+ Result = Actions.ActOnCastExpr(getCurScope(), OpenLoc, CastTy,
+ RParenLoc, Result.take());
+ return move(Result);
}
- // Parse the cast-expression that follows it next.
- // TODO: For cast expression with CastTy.
- Result = ParseCastExpression(false, false, CastTy);
- if (!Result.isInvalid())
- Result = Actions.ActOnCastExpr(getCurScope(), OpenLoc, CastTy, RParenLoc,
- Result.take());
- return move(Result);
+ Diag(Tok, diag::err_expected_lbrace_in_compound_literal);
+ return ExprError();
}
-
- Diag(Tok, diag::err_expected_lbrace_in_compound_literal);
- return ExprError();
} else if (TypeOfCast) {
// Parse the expression-list.
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
ExprVector ArgExprs(Actions);
CommaLocsTy CommaLocs;
@@ -1513,6 +1720,8 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
move_arg(ArgExprs), TypeOfCast);
}
} else {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
Result = ParseExpression();
ExprType = SimpleExpr;
if (!Result.isInvalid() && Tok.is(tok::r_paren))
@@ -1582,8 +1791,8 @@ ExprResult Parser::ParseStringLiteralExpression() {
/// argument-expression-list , assignment-expression
///
/// [C++] expression-list:
-/// [C++] assignment-expression
-/// [C++] expression-list , assignment-expression
+/// [C++] assignment-expression ...[opt]
+/// [C++] expression-list , assignment-expression ...[opt]
///
bool Parser::ParseExpressionList(llvm::SmallVectorImpl<Expr*> &Exprs,
llvm::SmallVectorImpl<SourceLocation> &CommaLocs,
@@ -1596,10 +1805,14 @@ bool Parser::ParseExpressionList(llvm::SmallVectorImpl<Expr*> &Exprs,
if (Tok.is(tok::code_completion)) {
if (Completer)
(Actions.*Completer)(getCurScope(), Data, Exprs.data(), Exprs.size());
+ else
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
ConsumeCodeCompletionToken();
}
ExprResult Expr(ParseAssignmentExpression());
+ if (Tok.is(tok::ellipsis))
+ Expr = Actions.ActOnPackExpansion(Expr.get(), ConsumeToken());
if (Expr.isInvalid())
return true;
@@ -1618,6 +1831,11 @@ bool Parser::ParseExpressionList(llvm::SmallVectorImpl<Expr*> &Exprs,
/// [clang] specifier-qualifier-list block-declarator
///
void Parser::ParseBlockId() {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
+ ConsumeCodeCompletionToken();
+ }
+
// Parse the specifier-qualifier-list piece.
DeclSpec DS;
ParseSpecifierQualifierList(DS);
@@ -1627,14 +1845,9 @@ void Parser::ParseBlockId() {
ParseDeclarator(DeclaratorInfo);
// We do this for: ^ __attribute__((noreturn)) {, as DS has the attributes.
- DeclaratorInfo.AddAttributes(DS.TakeAttributes(),
- SourceLocation());
+ DeclaratorInfo.addAttributes(DS.takeAttributes());
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- DeclaratorInfo.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(DeclaratorInfo);
// Inform sema that we are starting a block.
Actions.ActOnBlockArguments(DeclaratorInfo, getCurScope());
@@ -1692,11 +1905,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
return ExprError();
}
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- ParamInfo.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(ParamInfo);
// Inform sema that we are starting a block.
Actions.ActOnBlockArguments(ParamInfo, getCurScope());
@@ -1704,20 +1913,18 @@ ExprResult Parser::ParseBlockLiteralExpression() {
ParseBlockId();
} else {
// Otherwise, pretend we saw (void).
- ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(true, false,
+ ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(ParsedAttributes(),
+ true, false,
SourceLocation(),
0, 0, 0,
+ true, SourceLocation(),
false, SourceLocation(),
false, 0, 0, 0,
CaretLoc, CaretLoc,
ParamInfo),
CaretLoc);
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- ParamInfo.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(ParamInfo);
// Inform sema that we are starting a block.
Actions.ActOnBlockArguments(ParamInfo, getCurScope());
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
index 5041a21..e73578f 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
@@ -13,6 +13,7 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "llvm/Support/ErrorHandling.h"
@@ -500,9 +501,9 @@ ExprResult Parser::ParseCXXTypeid() {
TypeResult Ty = ParseTypeName();
// Match the ')'.
- MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
- if (Ty.isInvalid())
+ if (Ty.isInvalid() || RParenLoc.isInvalid())
return ExprError();
Result = Actions.ActOnCXXTypeid(OpLoc, LParenLoc, /*isType=*/true,
@@ -524,8 +525,10 @@ ExprResult Parser::ParseCXXTypeid() {
if (Result.isInvalid())
SkipUntil(tok::r_paren);
else {
- MatchRHSPunctuation(tok::r_paren, LParenLoc);
-
+ RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ if (RParenLoc.isInvalid())
+ return ExprError();
+
Result = Actions.ActOnCXXTypeid(OpLoc, LParenLoc, /*isType=*/false,
Result.release(), RParenLoc);
}
@@ -534,6 +537,54 @@ ExprResult Parser::ParseCXXTypeid() {
return move(Result);
}
+/// ParseCXXUuidof - This handles the Microsoft C++ __uuidof expression.
+///
+/// '__uuidof' '(' expression ')'
+/// '__uuidof' '(' type-id ')'
+///
+ExprResult Parser::ParseCXXUuidof() {
+ assert(Tok.is(tok::kw___uuidof) && "Not '__uuidof'!");
+
+ SourceLocation OpLoc = ConsumeToken();
+ SourceLocation LParenLoc = Tok.getLocation();
+ SourceLocation RParenLoc;
+
+ // __uuidof expressions are always parenthesized.
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
+ "__uuidof"))
+ return ExprError();
+
+ ExprResult Result;
+
+ if (isTypeIdInParens()) {
+ TypeResult Ty = ParseTypeName();
+
+ // Match the ')'.
+ RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ Result = Actions.ActOnCXXUuidof(OpLoc, LParenLoc, /*isType=*/true,
+ Ty.get().getAsOpaquePtr(), RParenLoc);
+ } else {
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ Result = ParseExpression();
+
+ // Match the ')'.
+ if (Result.isInvalid())
+ SkipUntil(tok::r_paren);
+ else {
+ RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ Result = Actions.ActOnCXXUuidof(OpLoc, LParenLoc, /*isType=*/false,
+ Result.release(), RParenLoc);
+ }
+ }
+
+ return move(Result);
+}
+
/// \brief Parse a C++ pseudo-destructor expression after the base,
/// . or -> operator, and nested-name-specifier have already been
/// parsed.
@@ -670,6 +721,8 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
assert(Tok.is(tok::l_paren) && "Expected '('!");
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, true);
+
SourceLocation LParenLoc = ConsumeParen();
ExprVector Exprs(Actions);
@@ -691,9 +744,8 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
assert((Exprs.size() == 0 || Exprs.size()-1 == CommaLocs.size())&&
"Unexpected number of commas!");
- return Actions.ActOnCXXTypeConstructExpr(DS.getSourceRange(), TypeRep,
- LParenLoc, move_arg(Exprs),
- CommaLocs.data(), RParenLoc);
+ return Actions.ActOnCXXTypeConstructExpr(TypeRep, LParenLoc, move_arg(Exprs),
+ RParenLoc);
}
/// ParseCXXCondition - if/switch/while condition expression.
@@ -761,24 +813,22 @@ bool Parser::ParseCXXCondition(ExprResult &ExprOut,
}
// If attributes are present, parse them.
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- DeclaratorInfo.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(DeclaratorInfo);
// Type-check the declaration itself.
DeclResult Dcl = Actions.ActOnCXXConditionDeclaration(getCurScope(),
- DeclaratorInfo);
+ DeclaratorInfo);
DeclOut = Dcl.get();
ExprOut = ExprError();
-
+
// '=' assignment-expression
- if (Tok.is(tok::equal)) {
- SourceLocation EqualLoc = ConsumeToken();
+ if (isTokenEqualOrMistypedEqualEqual(
+ diag::err_invalid_equalequal_after_declarator)) {
+ ConsumeToken();
ExprResult AssignExpr(ParseAssignmentExpression());
if (!AssignExpr.isInvalid())
- Actions.AddInitializerToDecl(DeclOut, AssignExpr.take());
+ Actions.AddInitializerToDecl(DeclOut, AssignExpr.take(), false,
+ DS.getTypeSpecType() == DeclSpec::TST_auto);
} else {
// FIXME: C++0x allows a braced-init-list
Diag(Tok, diag::err_expected_equal_after_declarator);
@@ -862,9 +912,24 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
// type-name
case tok::annot_typename: {
- DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID,
- getTypeAnnotation(Tok));
- break;
+ if (getTypeAnnotation(Tok))
+ DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID,
+ getTypeAnnotation(Tok));
+ else
+ DS.SetTypeSpecError();
+
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ ConsumeToken();
+
+ // Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
+ // is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
+ // Objective-C interface. If we don't have Objective-C or a '<', this is
+ // just a normal reference to a typedef name.
+ if (Tok.is(tok::less) && getLang().ObjC1)
+ ParseObjCProtocolQualifiers(DS);
+
+ DS.Finish(Diags, PP);
+ return;
}
// builtin types
@@ -943,7 +1008,7 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
// Parse one or more of the type specifiers.
if (!ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, DiagID,
ParsedTemplateInfo(), /*SuppressDeclarations*/true)) {
- Diag(Tok, diag::err_operator_missing_type_specifier);
+ Diag(Tok, diag::err_expected_type);
return true;
}
@@ -1667,7 +1732,8 @@ void Parser::ParseDirectNewDeclarator(Declarator &D) {
first = false;
SourceLocation RLoc = MatchRHSPunctuation(tok::r_square, LLoc);
- D.AddTypeInfo(DeclaratorChunk::getArray(0, /*static=*/false, /*star=*/false,
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, ParsedAttributes(),
+ /*static=*/false, /*star=*/false,
Size.release(), LLoc, RLoc),
RLoc);
@@ -1738,7 +1804,7 @@ Parser::ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start) {
static UnaryTypeTrait UnaryTypeTraitFromTokKind(tok::TokenKind kind) {
switch(kind) {
- default: assert(false && "Not a known unary type trait.");
+ default: llvm_unreachable("Not a known unary type trait");
case tok::kw___has_nothrow_assign: return UTT_HasNothrowAssign;
case tok::kw___has_nothrow_copy: return UTT_HasNothrowCopy;
case tok::kw___has_nothrow_constructor: return UTT_HasNothrowConstructor;
@@ -1758,6 +1824,15 @@ static UnaryTypeTrait UnaryTypeTraitFromTokKind(tok::TokenKind kind) {
}
}
+static BinaryTypeTrait BinaryTypeTraitFromTokKind(tok::TokenKind kind) {
+ switch(kind) {
+ default: llvm_unreachable("Not a known binary type trait");
+ case tok::kw___is_base_of: return BTT_IsBaseOf;
+ case tok::kw___builtin_types_compatible_p: return BTT_TypeCompatible;
+ case tok::kw___is_convertible_to: return BTT_IsConvertibleTo;
+ }
+}
+
/// ParseUnaryTypeTrait - Parse the built-in unary type-trait
/// pseudo-functions that allow implementation of the TR1/C++0x type traits
/// templates.
@@ -1783,7 +1858,44 @@ ExprResult Parser::ParseUnaryTypeTrait() {
if (Ty.isInvalid())
return ExprError();
- return Actions.ActOnUnaryTypeTrait(UTT, Loc, LParen, Ty.get(), RParen);
+ return Actions.ActOnUnaryTypeTrait(UTT, Loc, Ty.get(), RParen);
+}
+
+/// ParseBinaryTypeTrait - Parse the built-in binary type-trait
+/// pseudo-functions that allow implementation of the TR1/C++0x type traits
+/// templates.
+///
+/// primary-expression:
+/// [GNU] binary-type-trait '(' type-id ',' type-id ')'
+///
+ExprResult Parser::ParseBinaryTypeTrait() {
+ BinaryTypeTrait BTT = BinaryTypeTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ SourceLocation LParen = Tok.getLocation();
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen))
+ return ExprError();
+
+ TypeResult LhsTy = ParseTypeName();
+ if (LhsTy.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma)) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ TypeResult RhsTy = ParseTypeName();
+ if (RhsTy.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ SourceLocation RParen = MatchRHSPunctuation(tok::r_paren, LParen);
+
+ return Actions.ActOnBinaryTypeTrait(BTT, Loc, LhsTy.get(), RhsTy.get(), RParen);
}
/// ParseCXXAmbiguousParenExpression - We have parsed the left paren of a
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
index 4347294..82dda2b 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
@@ -13,6 +13,7 @@
#include "clang/Parse/Parser.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "RAIIObjectsForParser.h"
#include "clang/Sema/Designator.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/SmallString.h"
@@ -136,6 +137,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// [foo ... bar] -> array designator
// [4][foo bar] -> obsolete GNU designation with objc message send.
//
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
SourceLocation StartLoc = ConsumeBracket();
ExprResult Idx;
@@ -146,7 +149,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
if (getLang().ObjC1 && getLang().CPlusPlus) {
// Send to 'super'.
if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
- NextToken().isNot(tok::period) && getCurScope()->isInObjcMethodScope()) {
+ NextToken().isNot(tok::period) &&
+ getCurScope()->isInObjcMethodScope()) {
CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
ConsumeToken(),
@@ -306,10 +310,12 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
/// [GNU] '{' '}'
///
/// initializer-list:
-/// designation[opt] initializer
-/// initializer-list ',' designation[opt] initializer
+/// designation[opt] initializer ...[opt]
+/// initializer-list ',' designation[opt] initializer ...[opt]
///
ExprResult Parser::ParseBraceInitializer() {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
SourceLocation LBraceLoc = ConsumeBrace();
/// InitExprs - This is the actual list of expressions contained in the
@@ -338,6 +344,9 @@ ExprResult Parser::ParseBraceInitializer() {
else
SubElt = ParseInitializer();
+ if (Tok.is(tok::ellipsis))
+ SubElt = Actions.ActOnPackExpansion(SubElt.get(), ConsumeToken());
+
// If we couldn't parse the subelement, bail out.
if (!SubElt.isInvalid()) {
InitExprs.push_back(SubElt.release());
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
index 6861ce9..f32a322 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
@@ -13,6 +13,7 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
@@ -39,10 +40,14 @@ Decl *Parser::ParseObjCAtDirectives() {
switch (Tok.getObjCKeywordID()) {
case tok::objc_class:
return ParseObjCAtClassDeclaration(AtLoc);
- case tok::objc_interface:
- return ParseObjCAtInterfaceDeclaration(AtLoc);
- case tok::objc_protocol:
- return ParseObjCAtProtocolDeclaration(AtLoc);
+ case tok::objc_interface: {
+ ParsedAttributes attrs;
+ return ParseObjCAtInterfaceDeclaration(AtLoc, attrs);
+ }
+ case tok::objc_protocol: {
+ ParsedAttributes attrs;
+ return ParseObjCAtProtocolDeclaration(AtLoc, attrs);
+ }
case tok::objc_implementation:
return ParseObjCAtImplementationDeclaration(AtLoc);
case tok::objc_end:
@@ -123,8 +128,8 @@ Decl *Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
/// __attribute__((unavailable))
/// __attribute__((objc_exception)) - used by NSException on 64-bit
///
-Decl *Parser::ParseObjCAtInterfaceDeclaration(
- SourceLocation atLoc, AttributeList *attrList) {
+Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation atLoc,
+ ParsedAttributes &attrs) {
assert(Tok.isObjCAtKeyword(tok::objc_interface) &&
"ParseObjCAtInterfaceDeclaration(): Expected @interface");
ConsumeToken(); // the "interface" identifier
@@ -145,7 +150,9 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(
SourceLocation nameLoc = ConsumeToken();
if (Tok.is(tok::l_paren) &&
!isKnownToBeTypeSpecifier(GetLookAheadToken(1))) { // we have a category.
- SourceLocation lparenLoc = ConsumeParen();
+ // TODO(dgregor): Use the return value from the next line to provide better
+ // recovery.
+ ConsumeParen();
SourceLocation categoryLoc, rparenLoc;
IdentifierInfo *categoryId = 0;
if (Tok.is(tok::code_completion)) {
@@ -177,7 +184,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(
LAngleLoc, EndProtoLoc))
return 0;
- if (attrList) // categories don't support attributes.
+ if (!attrs.empty()) // categories don't support attributes.
Diag(Tok, diag::err_objc_no_attributes_on_category);
Decl *CategoryType =
@@ -229,7 +236,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(
superClassId, superClassLoc,
ProtocolRefs.data(), ProtocolRefs.size(),
ProtocolLocs.data(),
- EndProtoLoc, attrList);
+ EndProtoLoc, attrs.getList());
if (Tok.is(tok::l_brace))
ParseObjCClassInstanceVariables(ClsType, tok::objc_protected, atLoc);
@@ -364,7 +371,8 @@ void Parser::ParseObjCInterfaceDeclList(Decl *interfaceDecl,
// FIXME: as the name implies, this rule allows function definitions.
// We could pass a flag or check for functions during semantic analysis.
- allTUVariables.push_back(ParseDeclarationOrFunctionDefinition(0));
+ ParsedAttributes attrs;
+ allTUVariables.push_back(ParseDeclarationOrFunctionDefinition(attrs));
continue;
}
@@ -401,7 +409,13 @@ void Parser::ParseObjCInterfaceDeclList(Decl *interfaceDecl,
// Skip until we see an '@' or '}' or ';'.
SkipUntil(tok::r_brace, tok::at);
break;
-
+
+ case tok::objc_implementation:
+ case tok::objc_interface:
+ Diag(Tok, diag::err_objc_missing_end);
+ ConsumeToken();
+ break;
+
case tok::objc_required:
case tok::objc_optional:
// This is only valid on protocols.
@@ -414,13 +428,12 @@ void Parser::ParseObjCInterfaceDeclList(Decl *interfaceDecl,
case tok::objc_property:
if (!getLang().ObjC2)
- Diag(AtLoc, diag::err_objc_propertoes_require_objc2);
+ Diag(AtLoc, diag::err_objc_properties_require_objc2);
ObjCDeclSpec OCDS;
// Parse property attribute list, if any.
if (Tok.is(tok::l_paren))
- ParseObjCPropertyAttribute(OCDS, interfaceDecl,
- allMethods.data(), allMethods.size());
+ ParseObjCPropertyAttribute(OCDS, interfaceDecl);
ObjCPropertyCallback Callback(*this, interfaceDecl, allProperties,
OCDS, AtLoc, MethodImplKind);
@@ -469,9 +482,7 @@ void Parser::ParseObjCInterfaceDeclList(Decl *interfaceDecl,
/// copy
/// nonatomic
///
-void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, Decl *ClassDecl,
- Decl **Methods,
- unsigned NumMethods) {
+void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, Decl *ClassDecl) {
assert(Tok.getKind() == tok::l_paren);
SourceLocation LHSLoc = ConsumeParen(); // consume '('
@@ -502,32 +513,40 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, Decl *ClassDecl,
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_copy);
else if (II->isStr("nonatomic"))
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nonatomic);
+ else if (II->isStr("atomic"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_atomic);
else if (II->isStr("getter") || II->isStr("setter")) {
+ bool IsSetter = II->getNameStart()[0] == 's';
+
// getter/setter require extra treatment.
- if (ExpectAndConsume(tok::equal, diag::err_objc_expected_equal, "",
- tok::r_paren))
+ unsigned DiagID = IsSetter ? diag::err_objc_expected_equal_for_setter :
+ diag::err_objc_expected_equal_for_getter;
+
+ if (ExpectAndConsume(tok::equal, DiagID, "", tok::r_paren))
return;
if (Tok.is(tok::code_completion)) {
- if (II->getNameStart()[0] == 's')
- Actions.CodeCompleteObjCPropertySetter(getCurScope(), ClassDecl,
- Methods, NumMethods);
+ if (IsSetter)
+ Actions.CodeCompleteObjCPropertySetter(getCurScope(), ClassDecl);
else
- Actions.CodeCompleteObjCPropertyGetter(getCurScope(), ClassDecl,
- Methods, NumMethods);
+ Actions.CodeCompleteObjCPropertyGetter(getCurScope(), ClassDecl);
ConsumeCodeCompletionToken();
}
- if (Tok.isNot(tok::identifier)) {
- Diag(Tok, diag::err_expected_ident);
+
+ SourceLocation SelLoc;
+ IdentifierInfo *SelIdent = ParseObjCSelectorPiece(SelLoc);
+
+ if (!SelIdent) {
+ Diag(Tok, diag::err_objc_expected_selector_for_getter_setter)
+ << IsSetter;
SkipUntil(tok::r_paren);
return;
}
- if (II->getNameStart()[0] == 's') {
+ if (IsSetter) {
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_setter);
- DS.setSetterName(Tok.getIdentifierInfo());
- ConsumeToken(); // consume method name
+ DS.setSetterName(SelIdent);
if (ExpectAndConsume(tok::colon,
diag::err_expected_colon_after_setter_name, "",
@@ -535,8 +554,7 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, Decl *ClassDecl,
return;
} else {
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_getter);
- DS.setGetterName(Tok.getIdentifierInfo());
- ConsumeToken(); // consume method name
+ DS.setGetterName(SelIdent);
}
} else {
Diag(AttrName, diag::err_objc_expected_property_attr) << II;
@@ -705,7 +723,7 @@ bool Parser::isTokIdentifier_in() const {
void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS, bool IsParameter) {
while (1) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCPassingType(getCurScope(), DS);
+ Actions.CodeCompleteObjCPassingType(getCurScope(), DS, IsParameter);
ConsumeCodeCompletionToken();
}
@@ -819,9 +837,9 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ReturnType = ParseObjCTypeName(DSRet, false);
// If attributes exist before the method, parse them.
- llvm::OwningPtr<AttributeList> MethodAttrs;
- if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
- MethodAttrs.reset(ParseGNUAttributes());
+ ParsedAttributes attrs;
+ if (getLang().ObjC2)
+ MaybeParseGNUAttributes(attrs);
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
@@ -845,25 +863,25 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
llvm::SmallVector<DeclaratorChunk::ParamInfo, 8> CParamInfo;
if (Tok.isNot(tok::colon)) {
// If attributes exist after the method, parse them.
- if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
- MethodAttrs.reset(addAttributeLists(MethodAttrs.take(),
- ParseGNUAttributes()));
+ if (getLang().ObjC2)
+ MaybeParseGNUAttributes(attrs);
Selector Sel = PP.getSelectorTable().getNullarySelector(SelIdent);
Decl *Result
- = Actions.ActOnMethodDeclaration(mLoc, Tok.getLocation(),
+ = Actions.ActOnMethodDeclaration(getCurScope(), mLoc, Tok.getLocation(),
mType, IDecl, DSRet, ReturnType, Sel,
0,
CParamInfo.data(), CParamInfo.size(),
- MethodAttrs.get(),
- MethodImplKind);
+ attrs.getList(), MethodImplKind);
PD.complete(Result);
return Result;
}
llvm::SmallVector<IdentifierInfo *, 12> KeyIdents;
llvm::SmallVector<Sema::ObjCArgInfo, 12> ArgInfos;
-
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
+
while (1) {
Sema::ObjCArgInfo ArgInfo;
@@ -880,8 +898,11 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist before the argument name, parse them.
ArgInfo.ArgAttrs = 0;
- if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
- ArgInfo.ArgAttrs = ParseGNUAttributes();
+ if (getLang().ObjC2) {
+ ParsedAttributes attrs;
+ MaybeParseGNUAttributes(attrs);
+ ArgInfo.ArgAttrs = attrs.getList();
+ }
// Code completion for the next piece of the selector.
if (Tok.is(tok::code_completion)) {
@@ -953,30 +974,30 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
}
- // FIXME: Add support for optional parmameter list...
+ // FIXME: Add support for optional parameter list...
// If attributes exist after the method, parse them.
- if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
- MethodAttrs.reset(addAttributeLists(MethodAttrs.take(),
- ParseGNUAttributes()));
-
- if (KeyIdents.size() == 0)
+ if (getLang().ObjC2)
+ MaybeParseGNUAttributes(attrs);
+
+ if (KeyIdents.size() == 0) {
+ // Leave prototype scope.
+ PrototypeScope.Exit();
return 0;
+ }
+
Selector Sel = PP.getSelectorTable().getSelector(KeyIdents.size(),
&KeyIdents[0]);
Decl *Result
- = Actions.ActOnMethodDeclaration(mLoc, Tok.getLocation(),
+ = Actions.ActOnMethodDeclaration(getCurScope(), mLoc, Tok.getLocation(),
mType, IDecl, DSRet, ReturnType, Sel,
&ArgInfos[0],
CParamInfo.data(), CParamInfo.size(),
- MethodAttrs.get(),
+ attrs.getList(),
MethodImplKind, isVariadic);
- PD.complete(Result);
-
- // Delete referenced AttributeList objects.
- for (llvm::SmallVectorImpl<Sema::ObjCArgInfo>::iterator
- I = ArgInfos.begin(), E = ArgInfos.end(); I != E; ++I)
- delete I->ArgAttrs;
+ // Leave prototype scope.
+ PrototypeScope.Exit();
+ PD.complete(Result);
return Result;
}
@@ -1031,6 +1052,24 @@ ParseObjCProtocolReferences(llvm::SmallVectorImpl<Decl *> &Protocols,
return false;
}
+/// \brief Parse the Objective-C protocol qualifiers that follow a typename
+/// in a decl-specifier-seq, starting at the '<'.
+bool Parser::ParseObjCProtocolQualifiers(DeclSpec &DS) {
+ assert(Tok.is(tok::less) && "Protocol qualifiers start with '<'");
+ assert(getLang().ObjC1 && "Protocol qualifiers only exist in Objective-C");
+ SourceLocation LAngleLoc, EndProtoLoc;
+ llvm::SmallVector<Decl *, 8> ProtocolDecl;
+ llvm::SmallVector<SourceLocation, 8> ProtocolLocs;
+ bool Result = ParseObjCProtocolReferences(ProtocolDecl, ProtocolLocs, false,
+ LAngleLoc, EndProtoLoc);
+ DS.setProtocolQualifiers(ProtocolDecl.data(), ProtocolDecl.size(),
+ ProtocolLocs.data(), LAngleLoc);
+ if (EndProtoLoc.isValid())
+ DS.SetRangeEnd(EndProtoLoc);
+ return Result;
+}
+
+
/// objc-class-instance-variables:
/// '{' objc-instance-variable-decl-list[opt] '}'
///
@@ -1164,7 +1203,7 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
/// identifier-list ;": objc-interface-decl-list may not start with a
/// semicolon in the first alternative if objc-protocol-refs are omitted.
Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
- AttributeList *attrList) {
+ ParsedAttributes &attrs) {
assert(Tok.isObjCAtKeyword(tok::objc_protocol) &&
"ParseObjCAtProtocolDeclaration(): Expected @protocol");
ConsumeToken(); // the "protocol" identifier
@@ -1186,7 +1225,7 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
IdentifierLocPair ProtoInfo(protocolName, nameLoc);
ConsumeToken();
return Actions.ActOnForwardProtocolDeclaration(AtLoc, &ProtoInfo, 1,
- attrList);
+ attrs.getList());
}
if (Tok.is(tok::comma)) { // list of forward declarations.
@@ -1215,7 +1254,7 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
return Actions.ActOnForwardProtocolDeclaration(AtLoc,
&ProtocolRefs[0],
ProtocolRefs.size(),
- attrList);
+ attrs.getList());
}
// Last, and definitely not least, parse a protocol declaration.
@@ -1233,7 +1272,7 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
ProtocolRefs.data(),
ProtocolRefs.size(),
ProtocolLocs.data(),
- EndProtoLoc, attrList);
+ EndProtoLoc, attrs.getList());
ParseObjCInterfaceDeclList(ProtoType, tok::objc_protocol);
return ProtoType;
}
@@ -1270,7 +1309,7 @@ Decl *Parser::ParseObjCAtImplementationDeclaration(
if (Tok.is(tok::l_paren)) {
// we have a category implementation.
- SourceLocation lparenLoc = ConsumeParen();
+ ConsumeParen();
SourceLocation categoryLoc, rparenLoc;
IdentifierInfo *categoryId = 0;
@@ -1370,10 +1409,8 @@ Decl *Parser::ParseObjCAtAliasDeclaration(SourceLocation atLoc) {
}
IdentifierInfo *classId = Tok.getIdentifierInfo();
SourceLocation classLoc = ConsumeToken(); // consume class-name;
- if (Tok.isNot(tok::semi)) {
- Diag(Tok, diag::err_expected_semi_after) << "@compatibility_alias";
- return 0;
- }
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+ "@compatibility_alias");
return Actions.ActOnCompatiblityAlias(atLoc, aliasId, aliasLoc,
classId, classLoc);
}
@@ -1392,7 +1429,7 @@ Decl *Parser::ParseObjCAtAliasDeclaration(SourceLocation atLoc) {
Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
assert(Tok.isObjCAtKeyword(tok::objc_synthesize) &&
"ParseObjCPropertyDynamic(): Expected '@synthesize'");
- SourceLocation loc = ConsumeToken(); // consume synthesize
+ ConsumeToken(); // consume synthesize
while (true) {
if (Tok.is(tok::code_completion)) {
@@ -1409,6 +1446,7 @@ Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
IdentifierInfo *propertyIvar = 0;
IdentifierInfo *propertyId = Tok.getIdentifierInfo();
SourceLocation propertyLoc = ConsumeToken(); // consume property name
+ SourceLocation propertyIvarLoc;
if (Tok.is(tok::equal)) {
// property '=' ivar-name
ConsumeToken(); // consume '='
@@ -1424,20 +1462,15 @@ Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
break;
}
propertyIvar = Tok.getIdentifierInfo();
- ConsumeToken(); // consume ivar-name
+ propertyIvarLoc = ConsumeToken(); // consume ivar-name
}
Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, true, ObjCImpDecl,
- propertyId, propertyIvar);
+ propertyId, propertyIvar, propertyIvarLoc);
if (Tok.isNot(tok::comma))
break;
ConsumeToken(); // consume ','
}
- if (Tok.isNot(tok::semi)) {
- Diag(Tok, diag::err_expected_semi_after) << "@synthesize";
- SkipUntil(tok::semi);
- }
- else
- ConsumeToken(); // consume ';'
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@synthesize");
return 0;
}
@@ -1451,7 +1484,7 @@ Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
Decl *Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
assert(Tok.isObjCAtKeyword(tok::objc_dynamic) &&
"ParseObjCPropertyDynamic(): Expected '@dynamic'");
- SourceLocation loc = ConsumeToken(); // consume dynamic
+ ConsumeToken(); // consume dynamic
while (true) {
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCPropertyDefinition(getCurScope(), ObjCImpDecl);
@@ -1467,18 +1500,13 @@ Decl *Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
IdentifierInfo *propertyId = Tok.getIdentifierInfo();
SourceLocation propertyLoc = ConsumeToken(); // consume property name
Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, false, ObjCImpDecl,
- propertyId, 0);
+ propertyId, 0, SourceLocation());
if (Tok.isNot(tok::comma))
break;
ConsumeToken(); // consume ','
}
- if (Tok.isNot(tok::semi)) {
- Diag(Tok, diag::err_expected_semi_after) << "@dynamic";
- SkipUntil(tok::semi);
- }
- else
- ConsumeToken(); // consume ';'
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@dynamic");
return 0;
}
@@ -1689,6 +1717,10 @@ Decl *Parser::ParseObjCMethodDefinition() {
// specified Declarator for the method.
Actions.ActOnStartOfObjCMethodDef(getCurScope(), MDecl);
+ if (PP.isCodeCompletionEnabled())
+ if (trySkippingFunctionBodyForCodeCompletion())
+ return Actions.ActOnFinishFunctionBody(MDecl, 0);
+
StmtResult FnBody(ParseCompoundStatementBody());
// If the function body could not be parsed, make a bogus compoundstmt.
@@ -1731,7 +1763,7 @@ StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
}
// Otherwise, eat the semicolon.
- ExpectAndConsume(tok::semi, diag::err_expected_semi_after_expr);
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
return Actions.ActOnExprStmt(Actions.MakeFullExpr(Res.take()));
}
@@ -1785,6 +1817,8 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
/// simple-type-specifier
/// typename-specifier
bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
if (Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope))
TryAnnotateTypeOrScopeToken();
@@ -1859,6 +1893,35 @@ bool Parser::isSimpleObjCMessageExpression() {
GetLookAheadToken(2).is(tok::identifier);
}
+bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
+ if (!getLang().ObjC1 || !NextToken().is(tok::identifier) ||
+ InMessageExpression)
+ return false;
+
+
+ ParsedType Type;
+
+ if (Tok.is(tok::annot_typename))
+ Type = getTypeAnnotation(Tok);
+ else if (Tok.is(tok::identifier))
+ Type = Actions.getTypeName(*Tok.getIdentifierInfo(), Tok.getLocation(),
+ getCurScope());
+ else
+ return false;
+
+ if (!Type.get().isNull() && Type.get()->isObjCObjectOrInterfaceType()) {
+ const Token &AfterNext = GetLookAheadToken(2);
+ if (AfterNext.is(tok::colon) || AfterNext.is(tok::r_square)) {
+ if (Tok.is(tok::identifier))
+ TryAnnotateTypeOrScopeToken();
+
+ return Tok.is(tok::annot_typename);
+ }
+ }
+
+ return false;
+}
+
/// objc-message-expr:
/// '[' objc-receiver objc-message-args ']'
///
@@ -1879,6 +1942,8 @@ ExprResult Parser::ParseObjCMessageExpression() {
return ExprError();
}
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
if (getLang().CPlusPlus) {
// We completely separate the C and C++ cases because C++ requires
// more complicated (read: slower) parsing.
@@ -1893,7 +1958,7 @@ ExprResult Parser::ParseObjCMessageExpression() {
// Parse the receiver, which is either a type or an expression.
bool IsExpr;
- void *TypeOrExpr;
+ void *TypeOrExpr = NULL;
if (ParseObjCXXMessageReceiver(IsExpr, TypeOrExpr)) {
SkipUntil(tok::r_square);
return ExprError();
@@ -1992,14 +2057,18 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
ExprArg ReceiverExpr) {
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
if (Tok.is(tok::code_completion)) {
if (SuperLoc.isValid())
- Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc, 0, 0);
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc, 0, 0,
+ false);
else if (ReceiverType)
- Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType, 0, 0);
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType, 0, 0,
+ false);
else
Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
- 0, 0);
+ 0, 0, false);
ConsumeCodeCompletionToken();
}
@@ -2028,6 +2097,29 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
ConsumeToken(); // Eat the ':'.
/// Parse the expression after ':'
+
+ if (Tok.is(tok::code_completion)) {
+ if (SuperLoc.isValid())
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
+ KeyIdents.data(),
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/true);
+ else if (ReceiverType)
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
+ KeyIdents.data(),
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/true);
+ else
+ Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
+ KeyIdents.data(),
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/true);
+
+ ConsumeCodeCompletionToken();
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
ExprResult Res(ParseAssignmentExpression());
if (Res.isInvalid()) {
// We must manually skip to a ']', otherwise the expression skipper will
@@ -2045,16 +2137,21 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
if (SuperLoc.isValid())
Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
KeyIdents.data(),
- KeyIdents.size());
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/false);
else if (ReceiverType)
Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
KeyIdents.data(),
- KeyIdents.size());
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/false);
else
Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
KeyIdents.data(),
- KeyIdents.size());
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/false);
ConsumeCodeCompletionToken();
+ SkipUntil(tok::r_square);
+ return ExprError();
}
// Check for another keyword selector.
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
index ddba09a..dfd0da0 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
@@ -17,11 +17,24 @@
#include "clang/Lex/Preprocessor.h"
using namespace clang;
+/// \brief Handle the annotation token produced for #pragma unused(...)
+///
+/// Each annot_pragma_unused is followed by the argument token so e.g.
+/// "#pragma unused(x,y)" becomes:
+/// annot_pragma_unused 'x' annot_pragma_unused 'y'
+void Parser::HandlePragmaUnused() {
+ assert(Tok.is(tok::annot_pragma_unused));
+ SourceLocation UnusedLoc = ConsumeToken();
+ Actions.ActOnPragmaUnused(Tok, getCurScope(), UnusedLoc);
+ ConsumeToken(); // The argument token.
+}
// #pragma GCC visibility comes in two variants:
// 'push' '(' [visibility] ')'
// 'pop'
-void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP, Token &VisTok) {
+void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &VisTok) {
SourceLocation VisLoc = VisTok.getLocation();
Token Tok;
@@ -74,7 +87,9 @@ void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP, Token &VisTok) {
// pack '(' [integer] ')'
// pack '(' 'show' ')'
// pack '(' ('push' | 'pop') [',' identifier] [, integer] ')'
-void PragmaPackHandler::HandlePragma(Preprocessor &PP, Token &PackTok) {
+void PragmaPackHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &PackTok) {
SourceLocation PackLoc = PackTok.getLocation();
Token Tok;
@@ -222,16 +237,22 @@ static void ParseAlignPragma(Sema &Actions, Preprocessor &PP, Token &FirstTok,
Actions.ActOnPragmaOptionsAlign(Kind, FirstTok.getLocation(), KindLoc);
}
-void PragmaAlignHandler::HandlePragma(Preprocessor &PP, Token &AlignTok) {
+void PragmaAlignHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &AlignTok) {
ParseAlignPragma(Actions, PP, AlignTok, /*IsOptions=*/false);
}
-void PragmaOptionsHandler::HandlePragma(Preprocessor &PP, Token &OptionsTok) {
+void PragmaOptionsHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &OptionsTok) {
ParseAlignPragma(Actions, PP, OptionsTok, /*IsOptions=*/true);
}
// #pragma unused(identifier)
-void PragmaUnusedHandler::HandlePragma(Preprocessor &PP, Token &UnusedTok) {
+void PragmaUnusedHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &UnusedTok) {
// FIXME: Should we be expanding macros here? My guess is no.
SourceLocation UnusedLoc = UnusedTok.getLocation();
@@ -291,14 +312,27 @@ void PragmaUnusedHandler::HandlePragma(Preprocessor &PP, Token &UnusedTok) {
assert(RParenLoc.isValid() && "Valid '#pragma unused' must have ')'");
assert(!Identifiers.empty() && "Valid '#pragma unused' must have arguments");
- // Perform the action to handle the pragma.
- Actions.ActOnPragmaUnused(Identifiers.data(), Identifiers.size(),
- parser.getCurScope(), UnusedLoc, LParenLoc, RParenLoc);
+ // For each identifier token, insert into the token stream a
+ // annot_pragma_unused token followed by the identifier token.
+ // This allows us to cache a "#pragma unused" that occurs inside an inline
+ // C++ member function.
+
+ Token *Toks = new Token[2*Identifiers.size()];
+ for (unsigned i=0; i != Identifiers.size(); i++) {
+ Token &pragmaUnusedTok = Toks[2*i], &idTok = Toks[2*i+1];
+ pragmaUnusedTok.startToken();
+ pragmaUnusedTok.setKind(tok::annot_pragma_unused);
+ pragmaUnusedTok.setLocation(UnusedLoc);
+ idTok = Identifiers[i];
+ }
+ PP.EnterTokenStream(Toks, 2*Identifiers.size(), /*DisableMacroExpansion=*/true, /*OwnsTokens=*/true);
}
// #pragma weak identifier
// #pragma weak identifier '=' identifier
-void PragmaWeakHandler::HandlePragma(Preprocessor &PP, Token &WeakTok) {
+void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &WeakTok) {
// FIXME: Should we be expanding macros here? My guess is no.
SourceLocation WeakLoc = WeakTok.getLocation();
@@ -337,3 +371,64 @@ void PragmaWeakHandler::HandlePragma(Preprocessor &PP, Token &WeakTok) {
Actions.ActOnPragmaWeakID(WeakName, WeakLoc, WeakNameLoc);
}
}
+
+void
+PragmaFPContractHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ tok::OnOffSwitch OOS;
+ if (PP.LexOnOffSwitch(OOS))
+ return;
+
+ Actions.ActOnPragmaFPContract(OOS);
+}
+
+void
+PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) <<
+ "OPENCL";
+ return;
+ }
+ IdentifierInfo *ename = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::colon)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_colon) << ename;
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_enable_disable);
+ return;
+ }
+ IdentifierInfo *op = Tok.getIdentifierInfo();
+
+ unsigned state;
+ if (op->isStr("enable")) {
+ state = 1;
+ } else if (op->isStr("disable")) {
+ state = 0;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_enable_disable);
+ return;
+ }
+
+ OpenCLOptions &f = Actions.getOpenCLOptions();
+ if (ename->isStr("all")) {
+#define OPENCLEXT(nm) f.nm = state;
+#include "clang/Basic/OpenCLExtensions.def"
+ }
+#define OPENCLEXT(nm) else if (ename->isStr(#nm)) { f.nm = state; }
+#include "clang/Basic/OpenCLExtensions.def"
+ else {
+ PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << ename;
+ return;
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
index 0feaa99..bee6af3 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
@@ -25,7 +25,8 @@ class PragmaAlignHandler : public PragmaHandler {
public:
explicit PragmaAlignHandler(Sema &A) : PragmaHandler("align"), Actions(A) {}
- virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
};
class PragmaGCCVisibilityHandler : public PragmaHandler {
@@ -34,7 +35,8 @@ public:
explicit PragmaGCCVisibilityHandler(Sema &A) : PragmaHandler("visibility"),
Actions(A) {}
- virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
};
class PragmaOptionsHandler : public PragmaHandler {
@@ -43,7 +45,8 @@ public:
explicit PragmaOptionsHandler(Sema &A) : PragmaHandler("options"),
Actions(A) {}
- virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
};
class PragmaPackHandler : public PragmaHandler {
@@ -52,7 +55,8 @@ public:
explicit PragmaPackHandler(Sema &A) : PragmaHandler("pack"),
Actions(A) {}
- virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
};
class PragmaUnusedHandler : public PragmaHandler {
@@ -62,7 +66,8 @@ public:
PragmaUnusedHandler(Sema &A, Parser& p)
: PragmaHandler("unused"), Actions(A), parser(p) {}
- virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
};
class PragmaWeakHandler : public PragmaHandler {
@@ -71,9 +76,32 @@ public:
explicit PragmaWeakHandler(Sema &A)
: PragmaHandler("weak"), Actions(A) {}
- virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
};
+class PragmaOpenCLExtensionHandler : public PragmaHandler {
+ Sema &Actions;
+ Parser &parser;
+public:
+ PragmaOpenCLExtensionHandler(Sema &S, Parser& p) :
+ PragmaHandler("EXTENSION"), Actions(S), parser(p) {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+
+class PragmaFPContractHandler : public PragmaHandler {
+ Sema &Actions;
+ Parser &parser;
+public:
+ PragmaFPContractHandler(Sema &S, Parser& p) :
+ PragmaHandler("FP_CONTRACT"), Actions(S), parser(p) {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
index 6c240e6..2d97583 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
@@ -75,16 +75,14 @@ using namespace clang;
/// [OBC] '@' 'throw' ';'
///
StmtResult
-Parser::ParseStatementOrDeclaration(bool OnlyStatement) {
+Parser::ParseStatementOrDeclaration(StmtVector &Stmts, bool OnlyStatement) {
const char *SemiError = 0;
StmtResult Res;
ParenBraceBracketBalancer BalancerRAIIObj(*this);
- CXX0XAttributeList Attr;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
- Attr = ParseCXX0XAttributes();
- llvm::OwningPtr<AttributeList> AttrList(Attr.AttrList);
+ ParsedAttributesWithRange attrs;
+ MaybeParseCXX0XAttributes(attrs);
// Cases in this switch statement should fall through if the parser expects
// the token to end in a semicolon (in which case SemiError should be set),
@@ -101,21 +99,20 @@ Parser::ParseStatementOrDeclaration(bool OnlyStatement) {
case tok::code_completion:
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Statement);
ConsumeCodeCompletionToken();
- return ParseStatementOrDeclaration(OnlyStatement);
+ return ParseStatementOrDeclaration(Stmts, OnlyStatement);
case tok::identifier:
if (NextToken().is(tok::colon)) { // C99 6.8.1: labeled-statement
// identifier ':' statement
- return ParseLabeledStatement(AttrList.take());
+ return ParseLabeledStatement(attrs);
}
// PASS THROUGH.
default: {
if ((getLang().CPlusPlus || !OnlyStatement) && isDeclarationStatement()) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- AttrList.take(); //Passing 'Attr' to ParseDeclaration transfers ownership.
- DeclGroupPtrTy Decl = ParseDeclaration(Declarator::BlockContext, DeclEnd,
- Attr);
+ DeclGroupPtrTy Decl = ParseDeclaration(Stmts, Declarator::BlockContext,
+ DeclEnd, attrs);
return Actions.ActOnDeclStmt(Decl, DeclStart, DeclEnd);
}
@@ -137,64 +134,65 @@ Parser::ParseStatementOrDeclaration(bool OnlyStatement) {
return StmtError();
}
// Otherwise, eat the semicolon.
- ExpectAndConsume(tok::semi, diag::err_expected_semi_after_expr);
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
return Actions.ActOnExprStmt(Actions.MakeFullExpr(Expr.get()));
}
case tok::kw_case: // C99 6.8.1: labeled-statement
- return ParseCaseStatement(AttrList.take());
+ return ParseCaseStatement(attrs);
case tok::kw_default: // C99 6.8.1: labeled-statement
- return ParseDefaultStatement(AttrList.take());
+ return ParseDefaultStatement(attrs);
case tok::l_brace: // C99 6.8.2: compound-statement
- return ParseCompoundStatement(AttrList.take());
- case tok::semi: // C99 6.8.3p3: expression[opt] ';'
- return Actions.ActOnNullStmt(ConsumeToken());
+ return ParseCompoundStatement(attrs);
+ case tok::semi: { // C99 6.8.3p3: expression[opt] ';'
+ bool LeadingEmptyMacro = Tok.hasLeadingEmptyMacro();
+ return Actions.ActOnNullStmt(ConsumeToken(), LeadingEmptyMacro);
+ }
case tok::kw_if: // C99 6.8.4.1: if-statement
- return ParseIfStatement(AttrList.take());
+ return ParseIfStatement(attrs);
case tok::kw_switch: // C99 6.8.4.2: switch-statement
- return ParseSwitchStatement(AttrList.take());
+ return ParseSwitchStatement(attrs);
case tok::kw_while: // C99 6.8.5.1: while-statement
- return ParseWhileStatement(AttrList.take());
+ return ParseWhileStatement(attrs);
case tok::kw_do: // C99 6.8.5.2: do-statement
- Res = ParseDoStatement(AttrList.take());
+ Res = ParseDoStatement(attrs);
SemiError = "do/while";
break;
case tok::kw_for: // C99 6.8.5.3: for-statement
- return ParseForStatement(AttrList.take());
+ return ParseForStatement(attrs);
case tok::kw_goto: // C99 6.8.6.1: goto-statement
- Res = ParseGotoStatement(AttrList.take());
+ Res = ParseGotoStatement(attrs);
SemiError = "goto";
break;
case tok::kw_continue: // C99 6.8.6.2: continue-statement
- Res = ParseContinueStatement(AttrList.take());
+ Res = ParseContinueStatement(attrs);
SemiError = "continue";
break;
case tok::kw_break: // C99 6.8.6.3: break-statement
- Res = ParseBreakStatement(AttrList.take());
+ Res = ParseBreakStatement(attrs);
SemiError = "break";
break;
case tok::kw_return: // C99 6.8.6.4: return-statement
- Res = ParseReturnStatement(AttrList.take());
+ Res = ParseReturnStatement(attrs);
SemiError = "return";
break;
case tok::kw_asm: {
- if (Attr.HasAttr)
- Diag(Attr.Range.getBegin(), diag::err_attributes_not_allowed)
- << Attr.Range;
+ ProhibitAttributes(attrs);
bool msAsm = false;
Res = ParseAsmStatement(msAsm);
+ Res = Actions.ActOnFinishFullStmt(Res.get());
if (msAsm) return move(Res);
SemiError = "asm";
break;
}
case tok::kw_try: // C++ 15: try-block
- return ParseCXXTryBlock(AttrList.take());
+ return ParseCXXTryBlock(attrs);
}
// If we reached this code, the statement must end in a semicolon.
@@ -218,11 +216,10 @@ Parser::ParseStatementOrDeclaration(bool OnlyStatement) {
/// identifier ':' statement
/// [GNU] identifier ':' attributes[opt] statement
///
-StmtResult Parser::ParseLabeledStatement(AttributeList *Attr) {
+StmtResult Parser::ParseLabeledStatement(ParsedAttributes &attrs) {
assert(Tok.is(tok::identifier) && Tok.getIdentifierInfo() &&
"Not an identifier!");
- llvm::OwningPtr<AttributeList> AttrList(Attr);
Token IdentTok = Tok; // Save the whole token.
ConsumeToken(); // eat the identifier.
@@ -232,19 +229,21 @@ StmtResult Parser::ParseLabeledStatement(AttributeList *Attr) {
SourceLocation ColonLoc = ConsumeToken();
// Read label attributes, if present.
- if (Tok.is(tok::kw___attribute))
- AttrList.reset(addAttributeLists(AttrList.take(), ParseGNUAttributes()));
+ MaybeParseGNUAttributes(attrs);
StmtResult SubStmt(ParseStatement());
// Broken substmt shouldn't prevent the label from being added to the AST.
if (SubStmt.isInvalid())
SubStmt = Actions.ActOnNullStmt(ColonLoc);
-
- // FIXME: use attributes?
- return Actions.ActOnLabelStmt(IdentTok.getLocation(),
- IdentTok.getIdentifierInfo(),
- ColonLoc, SubStmt.get());
+
+ LabelDecl *LD = Actions.LookupOrCreateLabel(IdentTok.getIdentifierInfo(),
+ IdentTok.getLocation());
+ if (AttributeList *Attrs = attrs.getList())
+ Actions.ProcessDeclAttributeList(Actions.CurScope, LD, Attrs);
+
+ return Actions.ActOnLabelStmt(IdentTok.getLocation(), LD, ColonLoc,
+ SubStmt.get());
}
/// ParseCaseStatement
@@ -252,10 +251,9 @@ StmtResult Parser::ParseLabeledStatement(AttributeList *Attr) {
/// 'case' constant-expression ':' statement
/// [GNU] 'case' constant-expression '...' constant-expression ':' statement
///
-StmtResult Parser::ParseCaseStatement(AttributeList *Attr) {
+StmtResult Parser::ParseCaseStatement(ParsedAttributes &attrs) {
assert(Tok.is(tok::kw_case) && "Not a case stmt!");
// FIXME: Use attributes?
- delete Attr;
// It is very very common for code to contain many case statements recursively
// nested, as in (but usually without indentation):
@@ -316,14 +314,22 @@ StmtResult Parser::ParseCaseStatement(AttributeList *Attr) {
ColonProtection.restore();
- if (Tok.isNot(tok::colon)) {
- Diag(Tok, diag::err_expected_colon_after) << "'case'";
- SkipUntil(tok::colon);
- return StmtError();
- }
-
- SourceLocation ColonLoc = ConsumeToken();
+ SourceLocation ColonLoc;
+ if (Tok.is(tok::colon)) {
+ ColonLoc = ConsumeToken();
+ // Treat "case blah;" as a typo for "case blah:".
+ } else if (Tok.is(tok::semi)) {
+ ColonLoc = ConsumeToken();
+ Diag(ColonLoc, diag::err_expected_colon_after) << "'case'"
+ << FixItHint::CreateReplacement(ColonLoc, ":");
+ } else {
+ SourceLocation ExpectedLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(ExpectedLoc, diag::err_expected_colon_after) << "'case'"
+ << FixItHint::CreateInsertion(ExpectedLoc, ":");
+ ColonLoc = ExpectedLoc;
+ }
+
StmtResult Case =
Actions.ActOnCaseStmt(CaseLoc, LHS.get(), DotDotDotLoc,
RHS.get(), ColonLoc);
@@ -379,21 +385,28 @@ StmtResult Parser::ParseCaseStatement(AttributeList *Attr) {
/// 'default' ':' statement
/// Note that this does not parse the 'statement' at the end.
///
-StmtResult Parser::ParseDefaultStatement(AttributeList *Attr) {
+StmtResult Parser::ParseDefaultStatement(ParsedAttributes &attrs) {
//FIXME: Use attributes?
- delete Attr;
assert(Tok.is(tok::kw_default) && "Not a default stmt!");
SourceLocation DefaultLoc = ConsumeToken(); // eat the 'default'.
- if (Tok.isNot(tok::colon)) {
- Diag(Tok, diag::err_expected_colon_after) << "'default'";
- SkipUntil(tok::colon);
- return StmtError();
- }
-
- SourceLocation ColonLoc = ConsumeToken();
+ SourceLocation ColonLoc;
+ if (Tok.is(tok::colon)) {
+ ColonLoc = ConsumeToken();
+ // Treat "default;" as a typo for "default:".
+ } else if (Tok.is(tok::semi)) {
+ ColonLoc = ConsumeToken();
+ Diag(ColonLoc, diag::err_expected_colon_after) << "'default'"
+ << FixItHint::CreateReplacement(ColonLoc, ":");
+ } else {
+ SourceLocation ExpectedLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(ExpectedLoc, diag::err_expected_colon_after) << "'default'"
+ << FixItHint::CreateInsertion(ExpectedLoc, ":");
+ ColonLoc = ExpectedLoc;
+ }
+
// Diagnose the common error "switch (X) {... default: }", which is not valid.
if (Tok.is(tok::r_brace)) {
Diag(Tok, diag::err_label_end_of_compound_statement);
@@ -436,10 +449,9 @@ StmtResult Parser::ParseDefaultStatement(AttributeList *Attr) {
/// [OMP] barrier-directive
/// [OMP] flush-directive
///
-StmtResult Parser::ParseCompoundStatement(AttributeList *Attr,
+StmtResult Parser::ParseCompoundStatement(ParsedAttributes &attrs,
bool isStmtExpr) {
//FIXME: Use attributes?
- delete Attr;
assert(Tok.is(tok::l_brace) && "Not a compount stmt!");
@@ -460,18 +472,53 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
PrettyStackTraceLoc CrashInfo(PP.getSourceManager(),
Tok.getLocation(),
"in compound statement ('{}')");
-
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
SourceLocation LBraceLoc = ConsumeBrace(); // eat the '{'.
- // TODO: "__label__ X, Y, Z;" is the GNU "Local Label" extension. These are
- // only allowed at the start of a compound stmt regardless of the language.
+ StmtVector Stmts(Actions);
- typedef StmtVector StmtsTy;
- StmtsTy Stmts(Actions);
+ // "__label__ X, Y, Z;" is the GNU "Local Label" extension. These are
+ // only allowed at the start of a compound stmt regardless of the language.
+ while (Tok.is(tok::kw___label__)) {
+ SourceLocation LabelLoc = ConsumeToken();
+ Diag(LabelLoc, diag::ext_gnu_local_label);
+
+ llvm::SmallVector<Decl *, 8> DeclsInGroup;
+ while (1) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ break;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation IdLoc = ConsumeToken();
+ DeclsInGroup.push_back(Actions.LookupOrCreateLabel(II, IdLoc, true));
+
+ if (!Tok.is(tok::comma))
+ break;
+ ConsumeToken();
+ }
+
+ DeclSpec DS;
+ DeclGroupPtrTy Res = Actions.FinalizeDeclaratorGroup(getCurScope(), DS,
+ DeclsInGroup.data(), DeclsInGroup.size());
+ StmtResult R = Actions.ActOnDeclStmt(Res, LabelLoc, Tok.getLocation());
+
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_declaration);
+ if (R.isUsable())
+ Stmts.push_back(R.release());
+ }
+
while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ if (Tok.is(tok::annot_pragma_unused)) {
+ HandlePragmaUnused();
+ continue;
+ }
+
StmtResult R;
if (Tok.isNot(tok::kw___extension__)) {
- R = ParseStatementOrDeclaration(false);
+ R = ParseStatementOrDeclaration(Stmts, false);
} else {
// __extension__ can start declarations and it can also be a unary
// operator for expressions. Consume multiple __extension__ markers here
@@ -481,9 +528,8 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
while (Tok.is(tok::kw___extension__))
ConsumeToken();
- CXX0XAttributeList Attr;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
- Attr = ParseCXX0XAttributes();
+ ParsedAttributesWithRange attrs;
+ MaybeParseCXX0XAttributes(attrs);
// If this is the start of a declaration, parse it as such.
if (isDeclarationStatement()) {
@@ -492,8 +538,9 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
ExtensionRAIIObject O(Diags);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy Res = ParseDeclaration(Declarator::BlockContext, DeclEnd,
- Attr);
+ DeclGroupPtrTy Res = ParseDeclaration(Stmts,
+ Declarator::BlockContext, DeclEnd,
+ attrs);
R = Actions.ActOnDeclStmt(Res, DeclStart, DeclEnd);
} else {
// Otherwise this was a unary __extension__ marker.
@@ -507,7 +554,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
// FIXME: Use attributes?
// Eat the semicolon at the end of stmt and convert the expr into a
// statement.
- ExpectAndConsume(tok::semi, diag::err_expected_semi_after_expr);
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
R = Actions.ActOnExprStmt(Actions.MakeFullExpr(Res.get()));
}
}
@@ -543,12 +590,9 @@ bool Parser::ParseParenExprOrCondition(ExprResult &ExprResult,
Decl *&DeclResult,
SourceLocation Loc,
bool ConvertToBoolean) {
- bool ParseError = false;
-
SourceLocation LParenLoc = ConsumeParen();
if (getLang().CPlusPlus)
- ParseError = ParseCXXCondition(ExprResult, DeclResult, Loc,
- ConvertToBoolean);
+ ParseCXXCondition(ExprResult, DeclResult, Loc, ConvertToBoolean);
else {
ExprResult = ParseExpression();
DeclResult = 0;
@@ -583,9 +627,8 @@ bool Parser::ParseParenExprOrCondition(ExprResult &ExprResult,
/// [C++] 'if' '(' condition ')' statement
/// [C++] 'if' '(' condition ')' statement 'else' statement
///
-StmtResult Parser::ParseIfStatement(AttributeList *Attr) {
+StmtResult Parser::ParseIfStatement(ParsedAttributes &attrs) {
// FIXME: Use attributes?
- delete Attr;
assert(Tok.is(tok::kw_if) && "Not an if stmt!");
SourceLocation IfLoc = ConsumeToken(); // eat the 'if'.
@@ -706,9 +749,8 @@ StmtResult Parser::ParseIfStatement(AttributeList *Attr) {
/// switch-statement:
/// 'switch' '(' expression ')' statement
/// [C++] 'switch' '(' condition ')' statement
-StmtResult Parser::ParseSwitchStatement(AttributeList *Attr) {
+StmtResult Parser::ParseSwitchStatement(ParsedAttributes &attrs) {
// FIXME: Use attributes?
- delete Attr;
assert(Tok.is(tok::kw_switch) && "Not a switch stmt!");
SourceLocation SwitchLoc = ConsumeToken(); // eat the 'switch'.
@@ -792,9 +834,8 @@ StmtResult Parser::ParseSwitchStatement(AttributeList *Attr) {
/// while-statement: [C99 6.8.5.1]
/// 'while' '(' expression ')' statement
/// [C++] 'while' '(' condition ')' statement
-StmtResult Parser::ParseWhileStatement(AttributeList *Attr) {
+StmtResult Parser::ParseWhileStatement(ParsedAttributes &attrs) {
// FIXME: Use attributes?
- delete Attr;
assert(Tok.is(tok::kw_while) && "Not a while stmt!");
SourceLocation WhileLoc = Tok.getLocation();
@@ -867,9 +908,8 @@ StmtResult Parser::ParseWhileStatement(AttributeList *Attr) {
/// do-statement: [C99 6.8.5.2]
/// 'do' statement 'while' '(' expression ')' ';'
/// Note: this lets the caller parse the end ';'.
-StmtResult Parser::ParseDoStatement(AttributeList *Attr) {
+StmtResult Parser::ParseDoStatement(ParsedAttributes &attrs) {
// FIXME: Use attributes?
- delete Attr;
assert(Tok.is(tok::kw_do) && "Not a do stmt!");
SourceLocation DoLoc = ConsumeToken(); // eat the 'do'.
@@ -944,9 +984,8 @@ StmtResult Parser::ParseDoStatement(AttributeList *Attr) {
/// [C++] expression-statement
/// [C++] simple-declaration
///
-StmtResult Parser::ParseForStatement(AttributeList *Attr) {
+StmtResult Parser::ParseForStatement(ParsedAttributes &attrs) {
// FIXME: Use attributes?
- delete Attr;
assert(Tok.is(tok::kw_for) && "Not a for stmt!");
SourceLocation ForLoc = ConsumeToken(); // eat the 'for'.
@@ -1010,13 +1049,13 @@ StmtResult Parser::ParseForStatement(AttributeList *Attr) {
if (!C99orCXXorObjC) // Use of C99-style for loops in C90 mode?
Diag(Tok, diag::ext_c99_variable_decl_in_for_loop);
- AttributeList *AttrList = 0;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
- AttrList = ParseCXX0XAttributes().AttrList;
+ ParsedAttributesWithRange attrs;
+ MaybeParseCXX0XAttributes(attrs);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy DG = ParseSimpleDeclaration(Declarator::ForContext, DeclEnd,
- AttrList, false);
+ StmtVector Stmts(Actions);
+ DeclGroupPtrTy DG = ParseSimpleDeclaration(Stmts, Declarator::ForContext,
+ DeclEnd, attrs, false);
FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
if (Tok.is(tok::semi)) { // for (int x = 4;
@@ -1033,18 +1072,23 @@ StmtResult Parser::ParseForStatement(AttributeList *Attr) {
Collection = ParseExpression();
} else {
Diag(Tok, diag::err_expected_semi_for);
- SkipUntil(tok::semi);
}
} else {
Value = ParseExpression();
+ ForEach = isTokIdentifier_in();
+
// Turn the expression into a stmt.
- if (!Value.isInvalid())
- FirstPart = Actions.ActOnExprStmt(Actions.MakeFullExpr(Value.get()));
+ if (!Value.isInvalid()) {
+ if (ForEach)
+ FirstPart = Actions.ActOnForEachLValueExpr(Value.get());
+ else
+ FirstPart = Actions.ActOnExprStmt(Actions.MakeFullExpr(Value.get()));
+ }
if (Tok.is(tok::semi)) {
ConsumeToken();
- } else if ((ForEach = isTokIdentifier_in())) {
+ } else if (ForEach) {
ConsumeToken(); // consume 'in'
if (Tok.is(tok::code_completion)) {
@@ -1053,8 +1097,14 @@ StmtResult Parser::ParseForStatement(AttributeList *Attr) {
}
Collection = ParseExpression();
} else {
- if (!Value.isInvalid()) Diag(Tok, diag::err_expected_semi_for);
- SkipUntil(tok::semi);
+ if (!Value.isInvalid()) {
+ Diag(Tok, diag::err_expected_semi_for);
+ } else {
+ // Skip until semicolon or rparen, don't consume it.
+ SkipUntil(tok::r_paren, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
}
}
if (!ForEach) {
@@ -1062,6 +1112,8 @@ StmtResult Parser::ParseForStatement(AttributeList *Attr) {
// Parse the second part of the for specifier.
if (Tok.is(tok::semi)) { // for (...;;
// no second part.
+ } else if (Tok.is(tok::r_paren)) {
+ // missing both semicolons.
} else {
ExprResult Second;
if (getLang().CPlusPlus)
@@ -1076,12 +1128,16 @@ StmtResult Parser::ParseForStatement(AttributeList *Attr) {
SecondPart = Actions.MakeFullExpr(Second.get());
}
+ if (Tok.isNot(tok::semi)) {
+ if (!SecondPartIsInvalid || SecondVar)
+ Diag(Tok, diag::err_expected_semi_for);
+ else
+ // Skip until semicolon or rparen, don't consume it.
+ SkipUntil(tok::r_paren, true, true);
+ }
+
if (Tok.is(tok::semi)) {
ConsumeToken();
- } else {
- if (!SecondPartIsInvalid || SecondVar)
- Diag(Tok, diag::err_expected_semi_for);
- SkipUntil(tok::semi);
}
// Parse the third part of the for specifier.
@@ -1137,17 +1193,17 @@ StmtResult Parser::ParseForStatement(AttributeList *Attr) {
///
/// Note: this lets the caller parse the end ';'.
///
-StmtResult Parser::ParseGotoStatement(AttributeList *Attr) {
+StmtResult Parser::ParseGotoStatement(ParsedAttributes &attrs) {
// FIXME: Use attributes?
- delete Attr;
assert(Tok.is(tok::kw_goto) && "Not a goto stmt!");
SourceLocation GotoLoc = ConsumeToken(); // eat the 'goto'.
StmtResult Res;
if (Tok.is(tok::identifier)) {
- Res = Actions.ActOnGotoStmt(GotoLoc, Tok.getLocation(),
- Tok.getIdentifierInfo());
+ LabelDecl *LD = Actions.LookupOrCreateLabel(Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ Res = Actions.ActOnGotoStmt(GotoLoc, Tok.getLocation(), LD);
ConsumeToken();
} else if (Tok.is(tok::star)) {
// GNU indirect goto extension.
@@ -1173,9 +1229,8 @@ StmtResult Parser::ParseGotoStatement(AttributeList *Attr) {
///
/// Note: this lets the caller parse the end ';'.
///
-StmtResult Parser::ParseContinueStatement(AttributeList *Attr) {
+StmtResult Parser::ParseContinueStatement(ParsedAttributes &attrs) {
// FIXME: Use attributes?
- delete Attr;
SourceLocation ContinueLoc = ConsumeToken(); // eat the 'continue'.
return Actions.ActOnContinueStmt(ContinueLoc, getCurScope());
@@ -1187,9 +1242,8 @@ StmtResult Parser::ParseContinueStatement(AttributeList *Attr) {
///
/// Note: this lets the caller parse the end ';'.
///
-StmtResult Parser::ParseBreakStatement(AttributeList *Attr) {
+StmtResult Parser::ParseBreakStatement(ParsedAttributes &attrs) {
// FIXME: Use attributes?
- delete Attr;
SourceLocation BreakLoc = ConsumeToken(); // eat the 'break'.
return Actions.ActOnBreakStmt(BreakLoc, getCurScope());
@@ -1198,9 +1252,8 @@ StmtResult Parser::ParseBreakStatement(AttributeList *Attr) {
/// ParseReturnStatement
/// jump-statement:
/// 'return' expression[opt] ';'
-StmtResult Parser::ParseReturnStatement(AttributeList *Attr) {
+StmtResult Parser::ParseReturnStatement(ParsedAttributes &attrs) {
// FIXME: Use attributes?
- delete Attr;
assert(Tok.is(tok::kw_return) && "Not a return stmt!");
SourceLocation ReturnLoc = ConsumeToken(); // eat the 'return'.
@@ -1225,10 +1278,12 @@ StmtResult Parser::ParseReturnStatement(AttributeList *Attr) {
/// FuzzyParseMicrosoftAsmStatement. When -fms-extensions is enabled, this
/// routine is called to skip/ignore tokens that comprise the MS asm statement.
-StmtResult Parser::FuzzyParseMicrosoftAsmStatement() {
+StmtResult Parser::FuzzyParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
+ SourceLocation EndLoc;
if (Tok.is(tok::l_brace)) {
unsigned short savedBraceCount = BraceCount;
do {
+ EndLoc = Tok.getLocation();
ConsumeAnyToken();
} while (BraceCount > savedBraceCount && Tok.isNot(tok::eof));
} else {
@@ -1238,6 +1293,7 @@ StmtResult Parser::FuzzyParseMicrosoftAsmStatement() {
SourceLocation TokLoc = Tok.getLocation();
unsigned LineNo = SrcMgr.getInstantiationLineNumber(TokLoc);
do {
+ EndLoc = TokLoc;
ConsumeAnyToken();
TokLoc = Tok.getLocation();
} while ((SrcMgr.getInstantiationLineNumber(TokLoc) == LineNo) &&
@@ -1253,10 +1309,10 @@ StmtResult Parser::FuzzyParseMicrosoftAsmStatement() {
ExprVector Constraints(Actions);
ExprVector Exprs(Actions);
ExprVector Clobbers(Actions);
- return Actions.ActOnAsmStmt(Tok.getLocation(), true, true, 0, 0, 0,
+ return Actions.ActOnAsmStmt(AsmLoc, true, true, 0, 0, 0,
move_arg(Constraints), move_arg(Exprs),
AsmString.take(), move_arg(Clobbers),
- Tok.getLocation(), true);
+ EndLoc, true);
}
/// ParseAsmStatement - Parse a GNU extended asm statement.
@@ -1292,7 +1348,7 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
if (getLang().Microsoft && Tok.isNot(tok::l_paren) && !isTypeQualifier()) {
msAsm = true;
- return FuzzyParseMicrosoftAsmStatement();
+ return FuzzyParseMicrosoftAsmStatement(AsmLoc);
}
DeclSpec DS;
SourceLocation Loc = Tok.getLocation();
@@ -1470,6 +1526,10 @@ Decl *Parser::ParseFunctionStatementBody(Decl *Decl) {
assert(Tok.is(tok::l_brace));
SourceLocation LBraceLoc = Tok.getLocation();
+ if (PP.isCodeCompletionEnabled())
+ if (trySkippingFunctionBodyForCodeCompletion())
+ return Actions.ActOnFinishFunctionBody(Decl, 0);
+
PrettyDeclStackTraceEntry CrashInfo(Actions, Decl, LBraceLoc,
"parsing function body");
@@ -1502,6 +1562,10 @@ Decl *Parser::ParseFunctionTryBlock(Decl *Decl) {
if (Tok.is(tok::colon))
ParseConstructorInitializer(Decl);
+ if (PP.isCodeCompletionEnabled())
+ if (trySkippingFunctionBodyForCodeCompletion())
+ return Actions.ActOnFinishFunctionBody(Decl, 0);
+
SourceLocation LBraceLoc = Tok.getLocation();
StmtResult FnBody(ParseCXXTryBlockCommon(TryLoc));
// If we failed to parse the try-catch, we just give the function an empty
@@ -1513,14 +1577,32 @@ Decl *Parser::ParseFunctionTryBlock(Decl *Decl) {
return Actions.ActOnFinishFunctionBody(Decl, FnBody.take());
}
+bool Parser::trySkippingFunctionBodyForCodeCompletion() {
+ assert(Tok.is(tok::l_brace));
+ assert(PP.isCodeCompletionEnabled() &&
+ "Should only be called when in code-completion mode");
+
+ // We're in code-completion mode. Skip parsing for all function bodies unless
+ // the body contains the code-completion point.
+ TentativeParsingAction PA(*this);
+ ConsumeBrace();
+ if (SkipUntil(tok::r_brace, /*StopAtSemi=*/false, /*DontConsume=*/false,
+ /*StopAtCodeCompletion=*/true)) {
+ PA.Commit();
+ return true;
+ }
+
+ PA.Revert();
+ return false;
+}
+
/// ParseCXXTryBlock - Parse a C++ try-block.
///
/// try-block:
/// 'try' compound-statement handler-seq
///
-StmtResult Parser::ParseCXXTryBlock(AttributeList* Attr) {
+StmtResult Parser::ParseCXXTryBlock(ParsedAttributes &attrs) {
// FIXME: Add attributes?
- delete Attr;
assert(Tok.is(tok::kw_try) && "Expected 'try'");
@@ -1544,16 +1626,15 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
if (Tok.isNot(tok::l_brace))
return StmtError(Diag(Tok, diag::err_expected_lbrace));
// FIXME: Possible draft standard bug: attribute-specifier should be allowed?
- StmtResult TryBlock(ParseCompoundStatement(0));
+ ParsedAttributesWithRange attrs;
+ StmtResult TryBlock(ParseCompoundStatement(attrs));
if (TryBlock.isInvalid())
return move(TryBlock);
StmtVector Handlers(Actions);
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
- CXX0XAttributeList Attr = ParseCXX0XAttributes();
- Diag(Attr.Range.getBegin(), diag::err_attributes_not_allowed)
- << Attr.Range;
- }
+ MaybeParseCXX0XAttributes(attrs);
+ ProhibitAttributes(attrs);
+
if (Tok.isNot(tok::kw_catch))
return StmtError(Diag(Tok, diag::err_expected_catch));
while (Tok.is(tok::kw_catch)) {
@@ -1614,7 +1695,8 @@ StmtResult Parser::ParseCXXCatchBlock() {
return StmtError(Diag(Tok, diag::err_expected_lbrace));
// FIXME: Possible draft standard bug: attribute-specifier should be allowed?
- StmtResult Block(ParseCompoundStatement(0));
+ ParsedAttributes attrs;
+ StmtResult Block(ParseCompoundStatement(attrs));
if (Block.isInvalid())
return move(Block);
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
index dfb4785..8387c88 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
@@ -196,12 +196,18 @@ Parser::ParseSingleDeclarationAfterTemplate(
return 0;
}
+ ParsedAttributesWithRange prefixAttrs;
+ MaybeParseCXX0XAttributes(prefixAttrs);
+
+ if (Tok.is(tok::kw_using))
+ return ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
+ prefixAttrs);
+
// Parse the declaration specifiers, stealing the accumulated
// diagnostics from the template parameters.
ParsingDeclSpec DS(DiagsFromTParams);
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
- DS.AddAttributes(ParseCXX0XAttributes().AttrList);
+ DS.takeAttributesFrom(prefixAttrs);
ParseDeclarationSpecifiers(DS, TemplateInfo, AS,
getDeclSpecContextFromDeclaratorContext(Context));
@@ -240,7 +246,7 @@ Parser::ParseSingleDeclarationAfterTemplate(
// Eat the semi colon after the declaration.
ExpectAndConsume(tok::semi, diag::err_expected_semi_declaration);
- DS.complete(ThisDecl);
+ DeclaratorInfo.complete(ThisDecl);
return ThisDecl;
}
@@ -337,7 +343,7 @@ Parser::ParseTemplateParameterList(unsigned Depth,
// subsumed by whatever goes on in ParseTemplateParameter.
// TODO: This could match >>, and it would be nice to avoid those
// silly errors with template <vec<T>>.
- // Diag(Tok.getLocation(), diag::err_expected_comma_greater);
+ Diag(Tok.getLocation(), diag::err_expected_comma_greater);
SkipUntil(tok::greater, true, true);
return false;
}
@@ -415,12 +421,14 @@ bool Parser::isStartOfTemplateTypeParameter() {
/// parameter-declaration
///
/// type-parameter: (see below)
-/// 'class' ...[opt][C++0x] identifier[opt]
+/// 'class' ...[opt] identifier[opt]
/// 'class' identifier[opt] '=' type-id
-/// 'typename' ...[opt][C++0x] identifier[opt]
+/// 'typename' ...[opt] identifier[opt]
/// 'typename' identifier[opt] '=' type-id
-/// 'template' ...[opt][C++0x] '<' template-parameter-list '>' 'class' identifier[opt]
-/// 'template' '<' template-parameter-list '>' 'class' identifier[opt] = id-expression
+/// 'template' '<' template-parameter-list '>'
+/// 'class' ...[opt] identifier[opt]
+/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
+/// = id-expression
Decl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
if (isStartOfTemplateTypeParameter())
return ParseTypeParameter(Depth, Position);
@@ -459,7 +467,7 @@ Decl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
EllipsisLoc = ConsumeToken();
if (!getLang().CPlusPlus0x)
- Diag(EllipsisLoc, diag::err_variadic_templates);
+ Diag(EllipsisLoc, diag::ext_variadic_templates);
}
// Grab the template parameter name (if given)
@@ -496,8 +504,10 @@ Decl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
/// template parameters.
///
/// type-parameter: [C++ temp.param]
-/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
-/// 'template' '<' template-parameter-list '>' 'class' identifier[opt] = id-expression
+/// 'template' '<' template-parameter-list '>' 'class'
+/// ...[opt] identifier[opt]
+/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
+/// = id-expression
Decl *
Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
assert(Tok.is(tok::kw_template) && "Expected 'template' keyword");
@@ -521,8 +531,17 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
<< PP.getSpelling(Tok);
return 0;
}
- SourceLocation ClassLoc = ConsumeToken();
+ ConsumeToken();
+ // Parse the ellipsis, if given.
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis)) {
+ EllipsisLoc = ConsumeToken();
+
+ if (!getLang().CPlusPlus0x)
+ Diag(EllipsisLoc, diag::ext_variadic_templates);
+ }
+
// Get the identifier, if given.
SourceLocation NameLoc;
IdentifierInfo* ParamName = 0;
@@ -540,7 +559,7 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
TemplateParamsTy *ParamList =
Actions.ActOnTemplateParameterList(Depth, SourceLocation(),
TemplateLoc, LAngleLoc,
- &TemplateParams[0],
+ TemplateParams.data(),
TemplateParams.size(),
RAngleLoc);
@@ -563,9 +582,9 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
}
return Actions.ActOnTemplateTemplateParameter(getCurScope(), TemplateLoc,
- ParamList, ParamName,
- NameLoc, Depth, Position,
- EqualLoc, DefaultArg);
+ ParamList, EllipsisLoc,
+ ParamName, NameLoc, Depth,
+ Position, EqualLoc, DefaultArg);
}
/// ParseNonTypeTemplateParameter - Handle the parsing of non-type
@@ -576,8 +595,6 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
/// parameter-declaration
Decl *
Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
- SourceLocation StartLoc = Tok.getLocation();
-
// Parse the declaration-specifiers (i.e., the type).
// FIXME: The type should probably be restricted in some way... Not all
// declarators (parts of declarators?) are accepted for parameters.
@@ -658,7 +675,7 @@ Parser::ParseTemplateIdAfterTemplateName(TemplateTy Template,
bool Invalid = false;
{
GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
- if (Tok.isNot(tok::greater))
+ if (Tok.isNot(tok::greater) && Tok.isNot(tok::greatergreater))
Invalid = ParseTemplateArgumentList(TemplateArgs);
if (Invalid) {
@@ -888,7 +905,7 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
// We parse an id-expression that refers to a class template or template
// alias. The grammar we parse is:
//
- // nested-name-specifier[opt] template[opt] identifier
+ // nested-name-specifier[opt] template[opt] identifier ...[opt]
//
// followed by a token that terminates a template argument, such as ',',
// '>', or (in some cases) '>>'.
@@ -896,6 +913,8 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
/*EnteringContext=*/false);
+ ParsedTemplateArgument Result;
+ SourceLocation EllipsisLoc;
if (SS.isSet() && Tok.is(tok::kw_template)) {
// Parse the optional 'template' keyword following the
// nested-name-specifier.
@@ -907,6 +926,10 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
Name.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
ConsumeToken(); // the identifier
+ // Parse the ellipsis.
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
// If the next token signals the end of a template argument,
// then we have a dependent template name that could be a template
// template argument.
@@ -917,7 +940,7 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
/*ObjectType=*/ ParsedType(),
/*EnteringContext=*/false,
Template))
- return ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
}
} else if (Tok.is(tok::identifier)) {
// We may have a (non-dependent) template name.
@@ -926,6 +949,10 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
Name.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
ConsumeToken(); // the identifier
+ // Parse the ellipsis.
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
if (isEndOfTemplateArgument(Tok)) {
bool MemberOfUnknownSpecialization;
TemplateNameKind TNK = Actions.isTemplateName(getCurScope(), SS,
@@ -938,13 +965,16 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
if (TNK == TNK_Dependent_template_name || TNK == TNK_Type_template) {
// We have an id-expression that refers to a class template or
// (C++0x) template alias.
- return ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
}
}
}
- // We don't have a template template argument.
- return ParsedTemplateArgument();
+ // If this is a pack expansion, build it as such.
+ if (EllipsisLoc.isValid() && !Result.isInvalid())
+ Result = Actions.ActOnPackExpansion(Result, EllipsisLoc);
+
+ return Result;
}
/// ParseTemplateArgument - Parse a C++ template argument (C++ [temp.names]).
@@ -962,7 +992,8 @@ ParsedTemplateArgument Parser::ParseTemplateArgument() {
// Therefore, we initially try to parse a type-id.
if (isCXXTypeId(TypeIdAsTemplateArgument)) {
SourceLocation Loc = Tok.getLocation();
- TypeResult TypeArg = ParseTypeName();
+ TypeResult TypeArg = ParseTypeName(/*Range=*/0,
+ Declarator::TemplateTypeArgContext);
if (TypeArg.isInvalid())
return ParsedTemplateArgument();
@@ -1037,6 +1068,11 @@ bool
Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs) {
while (true) {
ParsedTemplateArgument Arg = ParseTemplateArgument();
+ if (Tok.is(tok::ellipsis)) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ Arg = Actions.ActOnPackExpansion(Arg, EllipsisLoc);
+ }
+
if (Arg.isInvalid()) {
SkipUntil(tok::comma, tok::greater, true, true);
return true;
@@ -1075,3 +1111,14 @@ Decl *Parser::ParseExplicitInstantiation(SourceLocation ExternLoc,
ParsingTemplateParams,
DeclEnd, AS_none);
}
+
+SourceRange Parser::ParsedTemplateInfo::getSourceRange() const {
+ if (TemplateParams)
+ return getTemplateParamsRange(TemplateParams->data(),
+ TemplateParams->size());
+
+ SourceRange R(TemplateLoc);
+ if (ExternLoc.isValid())
+ R.setBegin(ExternLoc);
+ return R;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
index c22d99f..a603c37 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
@@ -111,10 +111,7 @@ bool Parser::isCXXSimpleDeclaration() {
// We need tentative parsing...
TentativeParsingAction PA(*this);
-
TPR = TryParseSimpleDeclaration();
- SourceLocation TentativeParseLoc = Tok.getLocation();
-
PA.Revert();
// In case of an error, let the declaration parsing code handle it.
@@ -139,9 +136,13 @@ Parser::TPResult Parser::TryParseSimpleDeclaration() {
if (Tok.is(tok::kw_typeof))
TryParseTypeofSpecifier();
- else
+ else {
ConsumeToken();
-
+
+ if (getLang().ObjC1 && Tok.is(tok::less))
+ TryParseProtocolQualifiers();
+ }
+
assert(Tok.is(tok::l_paren) && "Expected '('");
TPResult TPR = TryParseInitDeclaratorList();
@@ -242,8 +243,12 @@ bool Parser::isCXXConditionDeclaration() {
// type-specifier-seq
if (Tok.is(tok::kw_typeof))
TryParseTypeofSpecifier();
- else
+ else {
ConsumeToken();
+
+ if (getLang().ObjC1 && Tok.is(tok::less))
+ TryParseProtocolQualifiers();
+ }
assert(Tok.is(tok::l_paren) && "Expected '('");
// declarator
@@ -313,8 +318,13 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
// type-specifier-seq
if (Tok.is(tok::kw_typeof))
TryParseTypeofSpecifier();
- else
+ else {
ConsumeToken();
+
+ if (getLang().ObjC1 && Tok.is(tok::less))
+ TryParseProtocolQualifiers();
+ }
+
assert(Tok.is(tok::l_paren) && "Expected '('");
// declarator
@@ -448,6 +458,7 @@ bool Parser::isCXX0XAttributeSpecifier (bool CheckClosing,
/// abstract-declarator:
/// ptr-operator abstract-declarator[opt]
/// direct-abstract-declarator
+/// ...
///
/// direct-abstract-declarator:
/// direct-abstract-declarator[opt]
@@ -470,7 +481,7 @@ bool Parser::isCXX0XAttributeSpecifier (bool CheckClosing,
/// 'volatile'
///
/// declarator-id:
-/// id-expression
+/// '...'[opt] id-expression
///
/// id-expression:
/// unqualified-id
@@ -495,6 +506,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
return TPResult::Error();
if (Tok.is(tok::star) || Tok.is(tok::amp) || Tok.is(tok::caret) ||
+ Tok.is(tok::ampamp) ||
(Tok.is(tok::annot_cxxscope) && NextToken().is(tok::star))) {
// ptr-operator
ConsumeToken();
@@ -509,7 +521,9 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
// direct-declarator:
// direct-abstract-declarator:
-
+ if (Tok.is(tok::ellipsis))
+ ConsumeToken();
+
if ((Tok.is(tok::identifier) ||
(Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier))) &&
mayHaveIdentifier) {
@@ -532,7 +546,12 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
// '(' declarator ')'
// '(' attributes declarator ')'
// '(' abstract-declarator ')'
- if (Tok.is(tok::kw___attribute))
+ if (Tok.is(tok::kw___attribute) ||
+ Tok.is(tok::kw___declspec) ||
+ Tok.is(tok::kw___cdecl) ||
+ Tok.is(tok::kw___stdcall) ||
+ Tok.is(tok::kw___fastcall) ||
+ Tok.is(tok::kw___thiscall))
return TPResult::True(); // attributes indicate declaration
TPResult TPR = TryParseDeclarator(mayBeAbstract, mayHaveIdentifier);
if (TPR != TPResult::Ambiguous())
@@ -548,6 +567,10 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
while (1) {
TPResult TPR(TPResult::Ambiguous());
+ // abstract-declarator: ...
+ if (Tok.is(tok::ellipsis))
+ ConsumeToken();
+
if (Tok.is(tok::l_paren)) {
// Check whether we have a function declarator or a possible ctor-style
// initializer that follows the declarator. Note that ctor-style
@@ -575,6 +598,118 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
return TPResult::Ambiguous();
}
+Parser::TPResult
+Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
+ switch (Kind) {
+ // Obviously starts an expression.
+ case tok::numeric_constant:
+ case tok::char_constant:
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::l_square:
+ case tok::l_paren:
+ case tok::amp:
+ case tok::ampamp:
+ case tok::star:
+ case tok::plus:
+ case tok::plusplus:
+ case tok::minus:
+ case tok::minusminus:
+ case tok::tilde:
+ case tok::exclaim:
+ case tok::kw_sizeof:
+ case tok::kw___func__:
+ case tok::kw_const_cast:
+ case tok::kw_delete:
+ case tok::kw_dynamic_cast:
+ case tok::kw_false:
+ case tok::kw_new:
+ case tok::kw_operator:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_static_cast:
+ case tok::kw_this:
+ case tok::kw_throw:
+ case tok::kw_true:
+ case tok::kw_typeid:
+ case tok::kw_alignof:
+ case tok::kw_noexcept:
+ case tok::kw_nullptr:
+ case tok::kw___null:
+ case tok::kw___alignof:
+ case tok::kw___builtin_choose_expr:
+ case tok::kw___builtin_offsetof:
+ case tok::kw___builtin_types_compatible_p:
+ case tok::kw___builtin_va_arg:
+ case tok::kw___imag:
+ case tok::kw___real:
+ case tok::kw___FUNCTION__:
+ case tok::kw___PRETTY_FUNCTION__:
+ case tok::kw___has_nothrow_assign:
+ case tok::kw___has_nothrow_copy:
+ case tok::kw___has_nothrow_constructor:
+ case tok::kw___has_trivial_assign:
+ case tok::kw___has_trivial_copy:
+ case tok::kw___has_trivial_constructor:
+ case tok::kw___has_trivial_destructor:
+ case tok::kw___has_virtual_destructor:
+ case tok::kw___is_abstract:
+ case tok::kw___is_base_of:
+ case tok::kw___is_class:
+ case tok::kw___is_convertible_to:
+ case tok::kw___is_empty:
+ case tok::kw___is_enum:
+ case tok::kw___is_pod:
+ case tok::kw___is_polymorphic:
+ case tok::kw___is_union:
+ case tok::kw___is_literal:
+ case tok::kw___uuidof:
+ return TPResult::True();
+
+ // Obviously starts a type-specifier-seq:
+ case tok::kw_char:
+ case tok::kw_const:
+ case tok::kw_double:
+ case tok::kw_enum:
+ case tok::kw_float:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw_restrict:
+ case tok::kw_short:
+ case tok::kw_signed:
+ case tok::kw_struct:
+ case tok::kw_union:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_volatile:
+ case tok::kw__Bool:
+ case tok::kw__Complex:
+ case tok::kw_class:
+ case tok::kw_typename:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_decltype:
+ case tok::kw_thread_local:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___thread:
+ case tok::kw_typeof:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___vector:
+ case tok::kw___pixel:
+ return TPResult::False();
+
+ default:
+ break;
+ }
+
+ return TPResult::Ambiguous();
+}
+
/// isCXXDeclarationSpecifier - Returns TPResult::True() if it is a declaration
/// specifier, TPResult::False() if it is not, TPResult::Ambiguous() if it could
/// be either a decl-specifier or a function-style cast, and TPResult::Error()
@@ -803,6 +938,28 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
// simple-type-specifier:
+ case tok::annot_typename:
+ case_typename:
+ // In Objective-C, we might have a protocol-qualified type.
+ if (getLang().ObjC1 && NextToken().is(tok::less)) {
+ // Tentatively parse the
+ TentativeParsingAction PA(*this);
+ ConsumeToken(); // The type token
+
+ TPResult TPR = TryParseProtocolQualifiers();
+ bool isFollowedByParen = Tok.is(tok::l_paren);
+
+ PA.Revert();
+
+ if (TPR == TPResult::Error())
+ return TPResult::Error();
+
+ if (isFollowedByParen)
+ return TPResult::Ambiguous();
+
+ return TPResult::True();
+ }
+
case tok::kw_char:
case tok::kw_wchar_t:
case tok::kw_char16_t:
@@ -816,11 +973,12 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
case tok::kw_float:
case tok::kw_double:
case tok::kw_void:
- case tok::annot_typename:
- case_typename:
if (NextToken().is(tok::l_paren))
return TPResult::Ambiguous();
+ if (isStartOfObjCClassMessageMissingOpenBracket())
+ return TPResult::False();
+
return TPResult::True();
// GNU typeof support.
@@ -870,6 +1028,30 @@ Parser::TPResult Parser::TryParseTypeofSpecifier() {
return TPResult::Ambiguous();
}
+/// [ObjC] protocol-qualifiers:
+//// '<' identifier-list '>'
+Parser::TPResult Parser::TryParseProtocolQualifiers() {
+ assert(Tok.is(tok::less) && "Expected '<' for qualifier list");
+ ConsumeToken();
+ do {
+ if (Tok.isNot(tok::identifier))
+ return TPResult::Error();
+ ConsumeToken();
+
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ continue;
+ }
+
+ if (Tok.is(tok::greater)) {
+ ConsumeToken();
+ return TPResult::Ambiguous();
+ }
+ } while (false);
+
+ return TPResult::Error();
+}
+
Parser::TPResult Parser::TryParseDeclarationSpecifier() {
TPResult TPR = isCXXDeclarationSpecifier();
if (TPR != TPResult::Ambiguous())
@@ -877,8 +1059,12 @@ Parser::TPResult Parser::TryParseDeclarationSpecifier() {
if (Tok.is(tok::kw_typeof))
TryParseTypeofSpecifier();
- else
+ else {
ConsumeToken();
+
+ if (getLang().ObjC1 && Tok.is(tok::less))
+ TryParseProtocolQualifiers();
+ }
assert(Tok.is(tok::l_paren) && "Expected '('!");
return TPResult::Ambiguous();
@@ -940,10 +1126,11 @@ bool Parser::isCXXFunctionDeclarator(bool warnIfAmbiguous) {
/// parameter-declaration-list ',' parameter-declaration
///
/// parameter-declaration:
-/// decl-specifier-seq declarator
-/// decl-specifier-seq declarator '=' assignment-expression
-/// decl-specifier-seq abstract-declarator[opt]
-/// decl-specifier-seq abstract-declarator[opt] '=' assignment-expression
+/// decl-specifier-seq declarator attributes[opt]
+/// decl-specifier-seq declarator attributes[opt] '=' assignment-expression
+/// decl-specifier-seq abstract-declarator[opt] attributes[opt]
+/// decl-specifier-seq abstract-declarator[opt] attributes[opt]
+/// '=' assignment-expression
///
Parser::TPResult Parser::TryParseParameterDeclarationClause() {
@@ -964,6 +1151,9 @@ Parser::TPResult Parser::TryParseParameterDeclarationClause() {
return TPResult::True(); // '...' is a sign of a function declarator.
}
+ ParsedAttributes attrs;
+ MaybeParseMicrosoftAttributes(attrs);
+
// decl-specifier-seq
TPResult TPR = TryParseDeclarationSpecifier();
if (TPR != TPResult::Ambiguous())
@@ -975,11 +1165,15 @@ Parser::TPResult Parser::TryParseParameterDeclarationClause() {
if (TPR != TPResult::Ambiguous())
return TPR;
+ // [GNU] attributes[opt]
+ if (Tok.is(tok::kw___attribute))
+ return TPResult::True();
+
if (Tok.is(tok::equal)) {
// '=' assignment-expression
// Parse through assignment-expression.
- tok::TokenKind StopToks[3] ={ tok::comma, tok::ellipsis, tok::r_paren };
- if (!SkipUntil(StopToks, 3, true/*StopAtSemi*/, true/*DontConsume*/))
+ tok::TokenKind StopToks[2] ={ tok::comma, tok::r_paren };
+ if (!SkipUntil(StopToks, 2, true/*StopAtSemi*/, true/*DontConsume*/))
return TPResult::Error();
}
@@ -1029,6 +1223,10 @@ Parser::TPResult Parser::TryParseFunctionDeclarator() {
Tok.is(tok::kw_restrict) )
ConsumeToken();
+ // ref-qualifier[opt]
+ if (Tok.is(tok::amp) || Tok.is(tok::ampamp))
+ ConsumeToken();
+
// exception-specification
if (Tok.is(tok::kw_throw)) {
ConsumeToken();
diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
index 44bd0fb..a50763a 100644
--- a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
@@ -23,8 +23,8 @@ using namespace clang;
Parser::Parser(Preprocessor &pp, Sema &actions)
: CrashInfo(*this), PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
- GreaterThanIsOperator(true), ColonIsSacred(false),
- TemplateParameterDepth(0) {
+ GreaterThanIsOperator(true), ColonIsSacred(false),
+ InMessageExpression(false), TemplateParameterDepth(0) {
Tok.setKind(tok::eof);
Actions.CurScope = 0;
NumCachedScopes = 0;
@@ -50,6 +50,17 @@ Parser::Parser(Preprocessor &pp, Sema &actions)
WeakHandler.reset(new PragmaWeakHandler(actions));
PP.AddPragmaHandler(WeakHandler.get());
+
+ FPContractHandler.reset(new PragmaFPContractHandler(actions, *this));
+ PP.AddPragmaHandler("STDC", FPContractHandler.get());
+
+ if (getLang().OpenCL) {
+ OpenCLExtensionHandler.reset(
+ new PragmaOpenCLExtensionHandler(actions, *this));
+ PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
+
+ PP.AddPragmaHandler("OPENCL", FPContractHandler.get());
+ }
PP.setCodeCompletionHandler(*this);
}
@@ -78,7 +89,7 @@ void PrettyStackTraceParserEntry::print(llvm::raw_ostream &OS) const {
DiagnosticBuilder Parser::Diag(SourceLocation Loc, unsigned DiagID) {
- return Diags.Report(FullSourceLoc(Loc, PP.getSourceManager()), DiagID);
+ return Diags.Report(Loc, DiagID);
}
DiagnosticBuilder Parser::Diag(const Token &Tok, unsigned DiagID) {
@@ -126,6 +137,8 @@ SourceLocation Parser::MatchRHSPunctuation(tok::TokenKind RHSTok,
case tok::r_brace : LHSName = "{"; DID = diag::err_expected_rbrace; break;
case tok::r_square: LHSName = "["; DID = diag::err_expected_rsquare; break;
case tok::greater: LHSName = "<"; DID = diag::err_expected_greater; break;
+ case tok::greatergreatergreater:
+ LHSName = "<<<"; DID = diag::err_expected_ggg; break;
}
Diag(Tok, DID);
Diag(LHSLoc, diag::note_matching) << LHSName;
@@ -133,6 +146,13 @@ SourceLocation Parser::MatchRHSPunctuation(tok::TokenKind RHSTok,
return R;
}
+static bool IsCommonTypo(tok::TokenKind ExpectedTok, const Token &Tok) {
+ switch (ExpectedTok) {
+ case tok::semi: return Tok.is(tok::colon); // : for ;
+ default: return false;
+ }
+}
+
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
@@ -146,6 +166,19 @@ bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
return false;
}
+ // Detect common single-character typos and resume.
+ if (IsCommonTypo(ExpectedTok, Tok)) {
+ SourceLocation Loc = Tok.getLocation();
+ Diag(Loc, DiagID)
+ << Msg
+ << FixItHint::CreateReplacement(SourceRange(Loc),
+ getTokenSimpleSpelling(ExpectedTok));
+ ConsumeAnyToken();
+
+ // Pretend there wasn't a problem.
+ return false;
+ }
+
const char *Spelling = 0;
SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
if (EndLoc.isValid() &&
@@ -162,6 +195,25 @@ bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
return true;
}
+bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
+ if (Tok.is(tok::semi) || Tok.is(tok::code_completion)) {
+ ConsumeAnyToken();
+ return false;
+ }
+
+ if ((Tok.is(tok::r_paren) || Tok.is(tok::r_square)) &&
+ NextToken().is(tok::semi)) {
+ Diag(Tok, diag::err_extraneous_token_before_semi)
+ << PP.getSpelling(Tok)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeAnyToken(); // The ')' or ']'.
+ ConsumeToken(); // The ';'.
+ return false;
+ }
+
+ return ExpectAndConsume(tok::semi, DiagID);
+}
+
//===----------------------------------------------------------------------===//
// Error recovery.
//===----------------------------------------------------------------------===//
@@ -175,7 +227,8 @@ bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool Parser::SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
- bool StopAtSemi, bool DontConsume) {
+ bool StopAtSemi, bool DontConsume,
+ bool StopAtCodeCompletion) {
// We always want this function to skip at least one token if the first token
// isn't T and if not at EOF.
bool isFirstTokenSkipped = true;
@@ -198,23 +251,24 @@ bool Parser::SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
return false;
case tok::code_completion:
- ConsumeToken();
+ if (!StopAtCodeCompletion)
+ ConsumeToken();
return false;
case tok::l_paren:
// Recursively skip properly-nested parens.
ConsumeParen();
- SkipUntil(tok::r_paren, false);
+ SkipUntil(tok::r_paren, false, false, StopAtCodeCompletion);
break;
case tok::l_square:
// Recursively skip properly-nested square brackets.
ConsumeBracket();
- SkipUntil(tok::r_square, false);
+ SkipUntil(tok::r_square, false, false, StopAtCodeCompletion);
break;
case tok::l_brace:
// Recursively skip properly-nested braces.
ConsumeBrace();
- SkipUntil(tok::r_brace, false);
+ SkipUntil(tok::r_brace, false, false, StopAtCodeCompletion);
break;
// Okay, we found a ']' or '}' or ')', which we think should be balanced.
@@ -266,9 +320,8 @@ void Parser::EnterScope(unsigned ScopeFlags) {
N->Init(getCurScope(), ScopeFlags);
Actions.CurScope = N;
} else {
- Actions.CurScope = new Scope(getCurScope(), ScopeFlags);
+ Actions.CurScope = new Scope(getCurScope(), ScopeFlags, Diags);
}
- getCurScope()->setNumErrorsAtStart(Diags.getNumErrors());
}
/// ExitScope - Pop a scope off the scope stack.
@@ -318,6 +371,15 @@ Parser::~Parser() {
UnusedHandler.reset();
PP.RemovePragmaHandler(WeakHandler.get());
WeakHandler.reset();
+
+ if (getLang().OpenCL) {
+ PP.RemovePragmaHandler("OPENCL", OpenCLExtensionHandler.get());
+ OpenCLExtensionHandler.reset();
+ PP.RemovePragmaHandler("OPENCL", FPContractHandler.get());
+ }
+
+ PP.RemovePragmaHandler("STDC", FPContractHandler.get());
+ FPContractHandler.reset();
PP.clearCodeCompletionHandler();
}
@@ -347,6 +409,9 @@ void Parser::Initialize() {
ObjCTypeQuals[objc_byref] = &PP.getIdentifierTable().get("byref");
}
+ Ident_final = 0;
+ Ident_override = 0;
+
Ident_super = &PP.getIdentifierTable().get("super");
if (getLang().AltiVec) {
@@ -358,16 +423,21 @@ void Parser::Initialize() {
/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
/// action tells us to. This returns true if the EOF was encountered.
bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
+
+ while (Tok.is(tok::annot_pragma_unused))
+ HandlePragmaUnused();
+
Result = DeclGroupPtrTy();
if (Tok.is(tok::eof)) {
Actions.ActOnEndOfTranslationUnit();
return true;
}
- CXX0XAttributeList Attr;
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
- Attr = ParseCXX0XAttributes();
- Result = ParseExternalDeclaration(Attr);
+ ParsedAttributesWithRange attrs;
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+
+ Result = ParseExternalDeclaration(attrs);
return false;
}
@@ -408,8 +478,9 @@ void Parser::ParseTranslationUnit() {
/// ';'
///
/// [C++0x/GNU] 'extern' 'template' declaration
-Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr,
- ParsingDeclSpec *DS) {
+Parser::DeclGroupPtrTy
+Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec *DS) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
Decl *SingleDecl = 0;
@@ -433,12 +504,10 @@ Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr,
// __extension__ silences extension warnings in the subexpression.
ExtensionRAIIObject O(Diags); // Use RAII to do this.
ConsumeToken();
- return ParseExternalDeclaration(Attr);
+ return ParseExternalDeclaration(attrs);
}
case tok::kw_asm: {
- if (Attr.HasAttr)
- Diag(Attr.Range.getBegin(), diag::err_attributes_not_allowed)
- << Attr.Range;
+ ProhibitAttributes(attrs);
ExprResult Result(ParseSimpleAsm());
@@ -470,7 +539,7 @@ Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr,
ObjCImpDecl? Sema::PCC_ObjCImplementation
: Sema::PCC_Namespace);
ConsumeCodeCompletionToken();
- return ParseExternalDeclaration(Attr);
+ return ParseExternalDeclaration(attrs);
case tok::kw_using:
case tok::kw_namespace:
case tok::kw_typedef:
@@ -480,14 +549,42 @@ Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr,
// A function definition cannot start with a these keywords.
{
SourceLocation DeclEnd;
- return ParseDeclaration(Declarator::FileContext, DeclEnd, Attr);
+ StmtVector Stmts(Actions);
+ return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
}
+ case tok::kw_static:
+ // Parse (then ignore) 'static' prior to a template instantiation. This is
+ // a GCC extension that we intentionally do not support.
+ if (getLang().CPlusPlus && NextToken().is(tok::kw_template)) {
+ Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
+ << 0;
+ SourceLocation DeclEnd;
+ StmtVector Stmts(Actions);
+ return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ }
+ goto dont_know;
+
case tok::kw_inline:
- if (getLang().CPlusPlus && NextToken().is(tok::kw_namespace)) {
+ if (getLang().CPlusPlus) {
+ tok::TokenKind NextKind = NextToken().getKind();
+
// Inline namespaces. Allowed as an extension even in C++03.
- SourceLocation DeclEnd;
- return ParseDeclaration(Declarator::FileContext, DeclEnd, Attr);
+ if (NextKind == tok::kw_namespace) {
+ SourceLocation DeclEnd;
+ StmtVector Stmts(Actions);
+ return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ }
+
+ // Parse (then ignore) 'inline' prior to a template instantiation. This is
+ // a GCC extension that we intentionally do not support.
+ if (NextKind == tok::kw_template) {
+ Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
+ << 1;
+ SourceLocation DeclEnd;
+ StmtVector Stmts(Actions);
+ return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ }
}
goto dont_know;
@@ -506,10 +603,12 @@ Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr,
default:
dont_know:
// We can't tell whether this is a function-definition or declaration yet.
- if (DS)
- return ParseDeclarationOrFunctionDefinition(*DS, Attr.AttrList);
- else
- return ParseDeclarationOrFunctionDefinition(Attr.AttrList);
+ if (DS) {
+ DS->takeAttributesFrom(attrs);
+ return ParseDeclarationOrFunctionDefinition(*DS);
+ } else {
+ return ParseDeclarationOrFunctionDefinition(attrs);
+ }
}
// This routine returns a DeclGroup, if the thing we parsed only contains a
@@ -532,14 +631,13 @@ bool Parser::isDeclarationAfterDeclarator() const {
/// \brief Determine whether the current token, if it occurs after a
/// declarator, indicates the start of a function definition.
bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
- assert(Declarator.getTypeObject(0).Kind == DeclaratorChunk::Function &&
- "Isn't a function declarator");
+ assert(Declarator.isFunctionDeclarator() && "Isn't a function declarator");
if (Tok.is(tok::l_brace)) // int X() {}
return true;
// Handle K&R C argument lists: int X(f) int f; {}
if (!getLang().CPlusPlus &&
- Declarator.getTypeObject(0).Fun.isKNRPrototype())
+ Declarator.getFunctionTypeInfo().isKNRPrototype())
return isDeclarationSpecifier();
return Tok.is(tok::colon) || // X() : Base() {} (used for ctors)
@@ -564,12 +662,8 @@ bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
///
Parser::DeclGroupPtrTy
Parser::ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
- AttributeList *Attr,
AccessSpecifier AS) {
// Parse the common declaration-specifiers piece.
- if (Attr)
- DS.AddAttributes(Attr);
-
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC_top_level);
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
@@ -622,10 +716,11 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
}
Parser::DeclGroupPtrTy
-Parser::ParseDeclarationOrFunctionDefinition(AttributeList *Attr,
+Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributes &attrs,
AccessSpecifier AS) {
ParsingDeclSpec DS(*this);
- return ParseDeclarationOrFunctionDefinition(DS, Attr, AS);
+ DS.takeAttributesFrom(attrs);
+ return ParseDeclarationOrFunctionDefinition(DS, AS);
}
/// ParseFunctionDefinition - We parsed and verified that the specified
@@ -643,11 +738,8 @@ Parser::ParseDeclarationOrFunctionDefinition(AttributeList *Attr,
/// decl-specifier-seq[opt] declarator function-try-block
///
Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
- const ParsedTemplateInfo &TemplateInfo) {
- const DeclaratorChunk &FnTypeInfo = D.getTypeObject(0);
- assert(FnTypeInfo.Kind == DeclaratorChunk::Function &&
- "This isn't a function declarator!");
- const DeclaratorChunk::FunctionTypeInfo &FTI = FnTypeInfo.Fun;
+ const ParsedTemplateInfo &TemplateInfo) {
+ const DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
// If this is C90 and the declspecs were completely missing, fudge in an
// implicit int. We do this here because this is the only place where
@@ -669,8 +761,9 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// We should have either an opening brace or, in a C++ constructor,
// we may have a colon.
- if (Tok.isNot(tok::l_brace) && Tok.isNot(tok::colon) &&
- Tok.isNot(tok::kw_try)) {
+ if (Tok.isNot(tok::l_brace) &&
+ (!getLang().CPlusPlus ||
+ (Tok.isNot(tok::colon) && Tok.isNot(tok::kw_try)))) {
Diag(Tok, diag::err_expected_fn_body);
// Skip over garbage, until we get to '{'. Don't eat the '{'.
@@ -724,7 +817,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
/// types for a function with a K&R-style identifier list for arguments.
void Parser::ParseKNRParamDeclarations(Declarator &D) {
// We know that the top-level of this declarator is a function.
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
// Enter function-declaration scope, limiting any declarators to the
// function prototype scope, including parameter declarators.
@@ -770,11 +863,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
// Handle the full declarator list.
while (1) {
// If attributes are present, parse them.
- if (Tok.is(tok::kw___attribute)) {
- SourceLocation Loc;
- AttributeList *AttrList = ParseGNUAttributes(&Loc);
- ParmDeclarator.AddAttributes(AttrList, Loc);
- }
+ MaybeParseGNUAttributes(ParmDeclarator);
// Ask the actions module to compute the type for this declarator.
Decl *Param =
@@ -994,7 +1083,8 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
// Determine whether the identifier is a type name.
if (ParsedType Ty = Actions.getTypeName(*Tok.getIdentifierInfo(),
Tok.getLocation(), getCurScope(),
- &SS)) {
+ &SS, false,
+ NextToken().is(tok::period))) {
// This is a typename. Replace the current token in-place with an
// annotation type token.
Tok.setKind(tok::annot_typename);
@@ -1115,6 +1205,20 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
return false;
}
+bool Parser::isTokenEqualOrMistypedEqualEqual(unsigned DiagID) {
+ if (Tok.is(tok::equalequal)) {
+ // We have '==' in a context that we would expect a '='.
+ // The user probably made a typo, intending to type '='. Emit diagnostic,
+ // fixit hint to turn '==' -> '=' and continue as if the user typed '='.
+ Diag(Tok, DiagID)
+ << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()),
+ getTokenSimpleSpelling(tok::equal));
+ return true;
+ }
+
+ return Tok.is(tok::equal);
+}
+
void Parser::CodeCompletionRecovery() {
for (Scope *S = getCurScope(); S; S = S->getParent()) {
if (S->getFlags() & Scope::FnScope) {
diff --git a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
index addc795..583f184 100644
--- a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
+++ b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
@@ -80,6 +80,22 @@ namespace clang {
}
};
+ class InMessageExpressionRAIIObject {
+ bool &InMessageExpression;
+ bool OldValue;
+
+ public:
+ InMessageExpressionRAIIObject(Parser &P, bool Value)
+ : InMessageExpression(P.InMessageExpression),
+ OldValue(P.InMessageExpression) {
+ InMessageExpression = Value;
+ }
+
+ ~InMessageExpressionRAIIObject() {
+ InMessageExpression = OldValue;
+ }
+ };
+
/// \brief RAII object that makes sure paren/bracket/brace count is correct
/// after declaration/statement parsing, even when there's a parsing error.
class ParenBraceBracketBalancer {
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
index 5820969..8dcc5dc 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
@@ -19,7 +19,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include "llvm/ADT/OwningPtr.h"
#include <cstdio>
@@ -80,6 +80,9 @@ bool FixItRewriter::IncludeInDiagnosticCounts() const {
void FixItRewriter::HandleDiagnostic(Diagnostic::Level DiagLevel,
const DiagnosticInfo &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticClient::HandleDiagnostic(DiagLevel, Info);
+
Client->HandleDiagnostic(DiagLevel, Info);
// Skip over any diagnostics that are ignored or notes.
@@ -141,7 +144,7 @@ void FixItRewriter::HandleDiagnostic(Diagnostic::Level DiagLevel,
}
/// \brief Emit a diagnostic via the adapted diagnostic client.
-void FixItRewriter::Diag(FullSourceLoc Loc, unsigned DiagID) {
+void FixItRewriter::Diag(SourceLocation Loc, unsigned DiagID) {
// When producing this diagnostic, we temporarily bypass ourselves,
// clear out any current diagnostic, and let the downstream client
// format the diagnostic.
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
index 977e0cf..33e79ed 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
@@ -20,7 +20,7 @@
#include "clang/Rewrite/Rewriters.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -42,6 +42,7 @@ ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI,
return new ASTConsumer();
}
+namespace {
class FixItRewriteInPlace : public FixItOptions {
public:
std::string RewriteFilename(const std::string &Filename) { return Filename; }
@@ -57,13 +58,13 @@ public:
}
std::string RewriteFilename(const std::string &Filename) {
- llvm::sys::Path Path(Filename);
- std::string Suffix = Path.getSuffix();
- Path.eraseSuffix();
- Path.appendSuffix(NewSuffix + "." + Suffix);
- return Path.c_str();
+ llvm::SmallString<128> Path(Filename);
+ llvm::sys::path::replace_extension(Path,
+ NewSuffix + llvm::sys::path::extension(Path));
+ return Path.str();
}
};
+} // end anonymous namespace
bool FixItAction::BeginSourceFileAction(CompilerInstance &CI,
llvm::StringRef Filename) {
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
index b461df4..df08cd7 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -20,6 +20,7 @@
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -378,14 +379,16 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
unsigned TokLen = Tok.getLength();
switch (Tok.getKind()) {
default: break;
- case tok::identifier: {
- // Fill in Result.IdentifierInfo, looking up the identifier in the
- // identifier table.
- const IdentifierInfo *II =
- PP.LookUpIdentifierInfo(Tok, BufferStart+TokOffs);
+ case tok::identifier:
+ llvm_unreachable("tok::identifier in raw lexing mode!");
+ break;
+ case tok::raw_identifier: {
+ // Fill in Result.IdentifierInfo and update the token kind,
+ // looking up the identifier in the identifier table.
+ PP.LookUpIdentifierInfo(Tok);
// If this is a pp-identifier, for a keyword, highlight it as such.
- if (II->getTokenID() != tok::identifier)
+ if (Tok.isNot(tok::identifier))
HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
"<span class='keyword'>", "</span>");
break;
@@ -473,11 +476,8 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// If this raw token is an identifier, the raw lexer won't have looked up
// the corresponding identifier info for it. Do this now so that it will be
// macro expanded when we re-preprocess it.
- if (Tok.is(tok::identifier)) {
- // Change the kind of this identifier to the appropriate token kind, e.g.
- // turning "for" into a keyword.
- Tok.setKind(PP.LookUpIdentifierInfo(Tok)->getTokenID());
- }
+ if (Tok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(Tok);
TokenStream.push_back(Tok);
@@ -486,7 +486,8 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// Temporarily change the diagnostics object so that we ignore any generated
// diagnostics from this pass.
- Diagnostic TmpDiags(new IgnoringDiagClient);
+ Diagnostic TmpDiags(PP.getDiagnostics().getDiagnosticIDs(),
+ new IgnoringDiagClient);
// FIXME: This is a huge hack; we reuse the input preprocessor because we want
// its state, but we aren't actually changing it (we hope). This should really
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
index 910fa6b..0453098 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
@@ -17,7 +17,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include "llvm/ADT/OwningPtr.h"
#include <cstdio>
@@ -78,7 +78,7 @@ static void LexRawTokensFromMainFile(Preprocessor &PP,
// If we have an identifier with no identifier info for our raw token, look
// up the indentifier info. This is important for equality comparison of
// identifier tokens.
- if (RawTok.is(tok::identifier) && !RawTok.getIdentifierInfo())
+ if (RawTok.is(tok::raw_identifier))
PP.LookUpIdentifierInfo(RawTok);
RawTokens.push_back(RawTok);
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
index 578a063..875a0c7 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
@@ -33,14 +33,14 @@ using llvm::utostr;
namespace {
class RewriteObjC : public ASTConsumer {
enum {
- BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
block, ... */
BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the
__block variable */
- BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
helpers */
- BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
support routines */
BLOCK_BYREF_CURRENT_MAX = 256
};
@@ -139,10 +139,10 @@ namespace {
llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
// This maps a property to it's assignment statement.
- llvm::DenseMap<ObjCPropertyRefExpr *, BinaryOperator *> PropSetters;
+ llvm::DenseMap<Expr *, BinaryOperator *> PropSetters;
// This maps a property to it's synthesied message expression.
// This allows us to rewrite chained getters (e.g. o.a.b.c).
- llvm::DenseMap<ObjCPropertyRefExpr *, Stmt *> PropGetters;
+ llvm::DenseMap<Expr *, Stmt *> PropGetters;
// This maps an original source AST to it's rewritten form. This allows
// us to avoid rewriting the same node twice (which is very uncommon).
@@ -155,7 +155,7 @@ namespace {
bool DisableReplaceStmt;
- static const int OBJC_ABI_VERSION =7 ;
+ static const int OBJC_ABI_VERSION = 7;
public:
virtual void Initialize(ASTContext &context);
@@ -195,7 +195,7 @@ namespace {
}
void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
- // Measaure the old text.
+ // Measure the old text.
int Size = Rewrite.getRangeSize(SrcRange);
if (Size == -1) {
Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
@@ -247,11 +247,12 @@ namespace {
ObjCCategoryImplDecl *CID);
void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
void RewriteImplementationDecl(Decl *Dcl);
- void RewriteObjCMethodDecl(ObjCMethodDecl *MDecl, std::string &ResultStr);
+ void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *MDecl, std::string &ResultStr);
void RewriteTypeIntoString(QualType T, std::string &ResultStr,
const FunctionType *&FPRetType);
void RewriteByRefString(std::string &ResultStr, const std::string &Name,
- ValueDecl *VD);
+ ValueDecl *VD, bool def=false);
void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
void RewriteForwardProtocolDecl(ObjCForwardProtocolDecl *Dcl);
@@ -281,8 +282,8 @@ namespace {
Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV, SourceLocation OrigStart,
bool &replaced);
Stmt *RewriteObjCNestedIvarRefExpr(Stmt *S, bool &replaced);
- Stmt *RewritePropertyGetter(ObjCPropertyRefExpr *PropRefExpr);
- Stmt *RewritePropertySetter(BinaryOperator *BinOp, Expr *newStmt,
+ Stmt *RewritePropertyOrImplicitGetter(Expr *PropOrGetterRefExpr);
+ Stmt *RewritePropertyOrImplicitSetter(BinaryOperator *BinOp, Expr *newStmt,
SourceRange SrcRange);
Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
@@ -344,8 +345,7 @@ namespace {
std::string &Result);
void SynthesizeObjCInternalStruct(ObjCInterfaceDecl *CDecl,
std::string &Result);
- void SynthesizeIvarOffsetComputation(ObjCContainerDecl *IDecl,
- ObjCIvarDecl *ivar,
+ void SynthesizeIvarOffsetComputation(ObjCIvarDecl *ivar,
std::string &Result);
void RewriteImplementations();
void SynthesizeMetaDataIntoBuffer(std::string &Result);
@@ -404,6 +404,16 @@ namespace {
return false;
}
+ void convertToUnqualifiedObjCType(QualType &T) {
+ if (T->isObjCQualifiedIdType())
+ T = Context->getObjCIdType();
+ else if (T->isObjCQualifiedClassType())
+ T = Context->getObjCClassType();
+ else if (T->isObjCObjectPointerType() &&
+ T->getPointeeType()->isObjCQualifiedInterfaceType())
+ T = Context->getObjCIdType();
+ }
+
// FIXME: This predicate seems like it would be useful to add to ASTContext.
bool isObjCType(QualType T) {
if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
@@ -423,6 +433,7 @@ namespace {
return false;
}
bool PointerTypeTakesAnyBlockArguments(QualType QT);
+ bool PointerTypeTakesAnyObjCQualifiedType(QualType QT);
void GetExtentOfArgList(const char *Name, const char *&LParen,
const char *&RParen);
void RewriteCastExpr(CStyleCastExpr *CE);
@@ -439,20 +450,30 @@ namespace {
To += From[i];
}
}
+
+ QualType getSimpleFunctionType(QualType result,
+ const QualType *args,
+ unsigned numArgs,
+ bool variadic = false) {
+ FunctionProtoType::ExtProtoInfo fpi;
+ fpi.Variadic = variadic;
+ return Context->getFunctionType(result, args, numArgs, fpi);
+ }
};
// Helper function: create a CStyleCastExpr with trivial type source info.
CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
CastKind Kind, Expr *E) {
TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
- return CStyleCastExpr::Create(*Ctx, Ty, Kind, E, 0, TInfo,
+ return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo,
SourceLocation(), SourceLocation());
}
}
void RewriteObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
NamedDecl *D) {
- if (FunctionProtoType *fproto = dyn_cast<FunctionProtoType>(funcType)) {
+ if (const FunctionProtoType *fproto
+ = dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(),
E = fproto->arg_type_end(); I && (I != E); ++I)
if (isTopLevelBlockPointerType(*I)) {
@@ -733,8 +754,8 @@ void RewriteObjC::RewriteInclude() {
}
}
-static std::string getIvarAccessString(ObjCInterfaceDecl *ClassDecl,
- ObjCIvarDecl *OID) {
+static std::string getIvarAccessString(ObjCIvarDecl *OID) {
+ const ObjCInterfaceDecl *ClassDecl = OID->getContainingInterface();
std::string S;
S = "((struct ";
S += ClassDecl->getIdentifier()->getName();
@@ -762,64 +783,67 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
// Generate the 'getter' function.
ObjCPropertyDecl *PD = PID->getPropertyDecl();
- ObjCInterfaceDecl *ClassDecl = PD->getGetterMethodDecl()->getClassInterface();
ObjCIvarDecl *OID = PID->getPropertyIvarDecl();
if (!OID)
return;
unsigned Attributes = PD->getPropertyAttributes();
- bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
- (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy));
- std::string Getr;
- if (GenGetProperty && !objcGetPropertyDefined) {
- objcGetPropertyDefined = true;
- // FIXME. Is this attribute correct in all cases?
- Getr = "\nextern \"C\" __declspec(dllimport) "
- "id objc_getProperty(id, SEL, long, bool);\n";
- }
- RewriteObjCMethodDecl(PD->getGetterMethodDecl(), Getr);
- Getr += "{ ";
- // Synthesize an explicit cast to gain access to the ivar.
- // See objc-act.c:objc_synthesize_new_getter() for details.
- if (GenGetProperty) {
- // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
- Getr += "typedef ";
- const FunctionType *FPRetType = 0;
- RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr,
- FPRetType);
- Getr += " _TYPE";
- if (FPRetType) {
- Getr += ")"; // close the precedence "scope" for "*".
+ if (!PD->getGetterMethodDecl()->isDefined()) {
+ bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy));
+ std::string Getr;
+ if (GenGetProperty && !objcGetPropertyDefined) {
+ objcGetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Getr = "\nextern \"C\" __declspec(dllimport) "
+ "id objc_getProperty(id, SEL, long, bool);\n";
+ }
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getGetterMethodDecl(), Getr);
+ Getr += "{ ";
+ // Synthesize an explicit cast to gain access to the ivar.
+ // See objc-act.c:objc_synthesize_new_getter() for details.
+ if (GenGetProperty) {
+ // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
+ Getr += "typedef ";
+ const FunctionType *FPRetType = 0;
+ RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr,
+ FPRetType);
+ Getr += " _TYPE";
+ if (FPRetType) {
+ Getr += ")"; // close the precedence "scope" for "*".
- // Now, emit the argument types (if any).
- if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
- Getr += "(";
- for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
- if (i) Getr += ", ";
- std::string ParamStr = FT->getArgType(i).getAsString(
- Context->PrintingPolicy);
- Getr += ParamStr;
- }
- if (FT->isVariadic()) {
- if (FT->getNumArgs()) Getr += ", ";
- Getr += "...";
- }
- Getr += ")";
- } else
- Getr += "()";
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
+ Getr += "(";
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ if (i) Getr += ", ";
+ std::string ParamStr = FT->getArgType(i).getAsString(
+ Context->PrintingPolicy);
+ Getr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumArgs()) Getr += ", ";
+ Getr += "...";
+ }
+ Getr += ")";
+ } else
+ Getr += "()";
+ }
+ Getr += ";\n";
+ Getr += "return (_TYPE)";
+ Getr += "objc_getProperty(self, _cmd, ";
+ SynthesizeIvarOffsetComputation(OID, Getr);
+ Getr += ", 1)";
}
- Getr += ";\n";
- Getr += "return (_TYPE)";
- Getr += "objc_getProperty(self, _cmd, ";
- SynthesizeIvarOffsetComputation(ClassDecl, OID, Getr);
- Getr += ", 1)";
+ else
+ Getr += "return " + getIvarAccessString(OID);
+ Getr += "; }";
+ InsertText(onePastSemiLoc, Getr);
}
- else
- Getr += "return " + getIvarAccessString(ClassDecl, OID);
- Getr += "; }";
- InsertText(onePastSemiLoc, Getr);
- if (PD->isReadOnly())
+
+ if (PD->isReadOnly() || PD->getSetterMethodDecl()->isDefined())
return;
// Generate the 'setter' function.
@@ -833,13 +857,14 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
"void objc_setProperty (id, SEL, long, id, bool, bool);\n";
}
- RewriteObjCMethodDecl(PD->getSetterMethodDecl(), Setr);
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getSetterMethodDecl(), Setr);
Setr += "{ ";
// Synthesize an explicit cast to initialize the ivar.
// See objc-act.c:objc_synthesize_new_setter() for details.
if (GenSetProperty) {
Setr += "objc_setProperty (self, _cmd, ";
- SynthesizeIvarOffsetComputation(ClassDecl, OID, Setr);
+ SynthesizeIvarOffsetComputation(OID, Setr);
Setr += ", (id)";
Setr += PD->getName();
Setr += ", ";
@@ -853,7 +878,7 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
Setr += "0)";
}
else {
- Setr += getIvarAccessString(ClassDecl, OID) + " = ";
+ Setr += getIvarAccessString(OID) + " = ";
Setr += PD->getName();
}
Setr += "; }";
@@ -962,6 +987,10 @@ void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
I != E; ++I)
RewriteMethodDeclaration(*I);
+ for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(),
+ E = PDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+
// Lastly, comment out the @end.
SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
ReplaceText(LocEnd, strlen("@end"), "/* @end */");
@@ -1014,7 +1043,8 @@ void RewriteObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
ResultStr += T.getAsString(Context->PrintingPolicy);
}
-void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
+void RewriteObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *OMD,
std::string &ResultStr) {
//fprintf(stderr,"In RewriteObjCMethodDecl\n");
const FunctionType *FPRetType = 0;
@@ -1030,7 +1060,7 @@ void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
else
NameStr += "_C_";
- NameStr += OMD->getClassInterface()->getNameAsString();
+ NameStr += IDecl->getNameAsString();
NameStr += "_";
if (ObjCCategoryImplDecl *CID =
@@ -1056,14 +1086,14 @@ void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
// invisible arguments
if (OMD->isInstanceMethod()) {
- QualType selfTy = Context->getObjCInterfaceType(OMD->getClassInterface());
+ QualType selfTy = Context->getObjCInterfaceType(IDecl);
selfTy = Context->getPointerType(selfTy);
if (!LangOpts.Microsoft) {
- if (ObjCSynthesizedStructs.count(OMD->getClassInterface()))
+ if (ObjCSynthesizedStructs.count(const_cast<ObjCInterfaceDecl*>(IDecl)))
ResultStr += "struct ";
}
// When rewriting for Microsoft, explicitly omit the structure name.
- ResultStr += OMD->getClassInterface()->getNameAsString();
+ ResultStr += IDecl->getNameAsString();
ResultStr += " *";
}
else
@@ -1131,7 +1161,7 @@ void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
I != E; ++I) {
std::string ResultStr;
ObjCMethodDecl *OMD = *I;
- RewriteObjCMethodDecl(OMD, ResultStr);
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
SourceLocation LocStart = OMD->getLocStart();
SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
@@ -1146,7 +1176,7 @@ void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
I != E; ++I) {
std::string ResultStr;
ObjCMethodDecl *OMD = *I;
- RewriteObjCMethodDecl(OMD, ResultStr);
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
SourceLocation LocStart = OMD->getLocStart();
SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
@@ -1199,42 +1229,74 @@ void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
"/* @end */");
}
-Stmt *RewriteObjC::RewritePropertySetter(BinaryOperator *BinOp, Expr *newStmt,
+Stmt *RewriteObjC::RewritePropertyOrImplicitSetter(BinaryOperator *BinOp, Expr *newStmt,
SourceRange SrcRange) {
- // Synthesize a ObjCMessageExpr from a ObjCPropertyRefExpr.
+ ObjCMethodDecl *OMD = 0;
+ QualType Ty;
+ Selector Sel;
+ Stmt *Receiver = 0;
+ bool Super = false;
+ QualType SuperTy;
+ SourceLocation SuperLocation;
+ SourceLocation SelectorLoc;
+ // Synthesize a ObjCMessageExpr from a ObjCPropertyRefExpr or ObjCImplicitSetterGetterRefExpr.
// This allows us to reuse all the fun and games in SynthMessageExpr().
- ObjCPropertyRefExpr *PropRefExpr = dyn_cast<ObjCPropertyRefExpr>(BinOp->getLHS());
- ObjCMessageExpr *MsgExpr;
- ObjCPropertyDecl *PDecl = PropRefExpr->getProperty();
+ if (ObjCPropertyRefExpr *PropRefExpr =
+ dyn_cast<ObjCPropertyRefExpr>(BinOp->getLHS())) {
+ SelectorLoc = PropRefExpr->getLocation();
+ if (PropRefExpr->isExplicitProperty()) {
+ ObjCPropertyDecl *PDecl = PropRefExpr->getExplicitProperty();
+ OMD = PDecl->getSetterMethodDecl();
+ Ty = PDecl->getType();
+ Sel = PDecl->getSetterName();
+ } else {
+ OMD = PropRefExpr->getImplicitPropertySetter();
+ Sel = OMD->getSelector();
+ Ty = PropRefExpr->getType();
+ }
+ Super = PropRefExpr->isSuperReceiver();
+ if (!Super) {
+ Receiver = PropRefExpr->getBase();
+ } else {
+ SuperTy = PropRefExpr->getSuperReceiverType();
+ SuperLocation = PropRefExpr->getReceiverLocation();
+ }
+ }
+
+ assert(OMD && "RewritePropertyOrImplicitSetter - null OMD");
llvm::SmallVector<Expr *, 1> ExprVec;
ExprVec.push_back(newStmt);
- Stmt *Receiver = PropRefExpr->getBase();
- ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(Receiver);
- if (PRE && PropGetters[PRE]) {
- // This allows us to handle chain/nested property getters.
- Receiver = PropGetters[PRE];
- }
- if (isa<ObjCSuperExpr>(Receiver))
+ ObjCMessageExpr *MsgExpr;
+ if (Super)
MsgExpr = ObjCMessageExpr::Create(*Context,
- PDecl->getType().getNonReferenceType(),
+ Ty.getNonReferenceType(),
+ Expr::getValueKindForType(Ty),
/*FIXME?*/SourceLocation(),
- Receiver->getLocStart(),
+ SuperLocation,
/*IsInstanceSuper=*/true,
- cast<Expr>(Receiver)->getType(),
- PDecl->getSetterName(),
- PDecl->getSetterMethodDecl(),
+ SuperTy,
+ Sel, SelectorLoc, OMD,
&ExprVec[0], 1,
/*FIXME:*/SourceLocation());
- else
+ else {
+ // FIXME. Refactor this into common code with that in
+ // RewritePropertyOrImplicitGetter
+ assert(Receiver && "RewritePropertyOrImplicitSetter - null Receiver");
+ if (Expr *Exp = dyn_cast<Expr>(Receiver))
+ if (PropGetters[Exp])
+ // This allows us to handle chain/nested property/implicit getters.
+ Receiver = PropGetters[Exp];
+
MsgExpr = ObjCMessageExpr::Create(*Context,
- PDecl->getType().getNonReferenceType(),
+ Ty.getNonReferenceType(),
+ Expr::getValueKindForType(Ty),
/*FIXME: */SourceLocation(),
cast<Expr>(Receiver),
- PDecl->getSetterName(),
- PDecl->getSetterMethodDecl(),
+ Sel, SelectorLoc, OMD,
&ExprVec[0], 1,
/*FIXME:*/SourceLocation());
+ }
Stmt *ReplacingStmt = SynthMessageExpr(MsgExpr);
// Now do the actual rewrite.
@@ -1246,57 +1308,93 @@ Stmt *RewriteObjC::RewritePropertySetter(BinaryOperator *BinOp, Expr *newStmt,
return ReplacingStmt;
}
-Stmt *RewriteObjC::RewritePropertyGetter(ObjCPropertyRefExpr *PropRefExpr) {
- // Synthesize a ObjCMessageExpr from a ObjCPropertyRefExpr.
+Stmt *RewriteObjC::RewritePropertyOrImplicitGetter(Expr *PropOrGetterRefExpr) {
+ // Synthesize a ObjCMessageExpr from a ObjCPropertyRefExpr or ImplicitGetter.
// This allows us to reuse all the fun and games in SynthMessageExpr().
- ObjCMessageExpr *MsgExpr;
- ObjCPropertyDecl *PDecl = PropRefExpr->getProperty();
-
- Stmt *Receiver = PropRefExpr->getBase();
-
- ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(Receiver);
- if (PRE && PropGetters[PRE]) {
- // This allows us to handle chain/nested property getters.
- Receiver = PropGetters[PRE];
+ Stmt *Receiver = 0;
+ ObjCMethodDecl *OMD = 0;
+ QualType Ty;
+ Selector Sel;
+ bool Super = false;
+ QualType SuperTy;
+ SourceLocation SuperLocation;
+ SourceLocation SelectorLoc;
+ if (ObjCPropertyRefExpr *PropRefExpr =
+ dyn_cast<ObjCPropertyRefExpr>(PropOrGetterRefExpr)) {
+ SelectorLoc = PropRefExpr->getLocation();
+ if (PropRefExpr->isExplicitProperty()) {
+ ObjCPropertyDecl *PDecl = PropRefExpr->getExplicitProperty();
+ OMD = PDecl->getGetterMethodDecl();
+ Ty = PDecl->getType();
+ Sel = PDecl->getGetterName();
+ } else {
+ OMD = PropRefExpr->getImplicitPropertyGetter();
+ Sel = OMD->getSelector();
+ Ty = PropRefExpr->getType();
+ }
+ Super = PropRefExpr->isSuperReceiver();
+ if (!Super)
+ Receiver = PropRefExpr->getBase();
+ else {
+ SuperTy = PropRefExpr->getSuperReceiverType();
+ SuperLocation = PropRefExpr->getReceiverLocation();
+ }
}
-
- if (isa<ObjCSuperExpr>(Receiver))
+
+ assert (OMD && "RewritePropertyOrImplicitGetter - OMD is null");
+
+ ObjCMessageExpr *MsgExpr;
+ if (Super)
MsgExpr = ObjCMessageExpr::Create(*Context,
- PDecl->getType().getNonReferenceType(),
- /*FIXME:*/SourceLocation(),
- Receiver->getLocStart(),
+ Ty.getNonReferenceType(),
+ Expr::getValueKindForType(Ty),
+ /*FIXME?*/SourceLocation(),
+ SuperLocation,
/*IsInstanceSuper=*/true,
- cast<Expr>(Receiver)->getType(),
- PDecl->getGetterName(),
- PDecl->getGetterMethodDecl(),
+ SuperTy,
+ Sel, SelectorLoc, OMD,
0, 0,
/*FIXME:*/SourceLocation());
- else
+ else {
+ assert (Receiver && "RewritePropertyOrImplicitGetter - Receiver is null");
+ if (Expr *Exp = dyn_cast<Expr>(Receiver))
+ if (PropGetters[Exp])
+ // This allows us to handle chain/nested property/implicit getters.
+ Receiver = PropGetters[Exp];
MsgExpr = ObjCMessageExpr::Create(*Context,
- PDecl->getType().getNonReferenceType(),
+ Ty.getNonReferenceType(),
+ Expr::getValueKindForType(Ty),
/*FIXME:*/SourceLocation(),
cast<Expr>(Receiver),
- PDecl->getGetterName(),
- PDecl->getGetterMethodDecl(),
+ Sel, SelectorLoc, OMD,
0, 0,
/*FIXME:*/SourceLocation());
+ }
Stmt *ReplacingStmt = SynthMessageExpr(MsgExpr);
if (!PropParentMap)
PropParentMap = new ParentMap(CurrentBody);
-
- Stmt *Parent = PropParentMap->getParent(PropRefExpr);
- if (Parent && isa<ObjCPropertyRefExpr>(Parent)) {
+ bool NestedPropertyRef = false;
+ Stmt *Parent = PropParentMap->getParent(PropOrGetterRefExpr);
+ ImplicitCastExpr*ICE=0;
+ if (Parent)
+ if ((ICE = dyn_cast<ImplicitCastExpr>(Parent))) {
+ assert((ICE->getCastKind() == CK_GetObjCProperty)
+ && "RewritePropertyOrImplicitGetter");
+ Parent = PropParentMap->getParent(Parent);
+ NestedPropertyRef = (Parent && isa<ObjCPropertyRefExpr>(Parent));
+ }
+ if (NestedPropertyRef) {
// We stash away the ReplacingStmt since actually doing the
// replacement/rewrite won't work for nested getters (e.g. obj.p.i)
- PropGetters[PropRefExpr] = ReplacingStmt;
+ PropGetters[ICE] = ReplacingStmt;
// NOTE: We don't want to call MsgExpr->Destroy(), as it holds references
// to things that stay around.
Context->Deallocate(MsgExpr);
- return PropRefExpr; // return the original...
+ return PropOrGetterRefExpr; // return the original...
} else {
- ReplaceStmt(PropRefExpr, ReplacingStmt);
+ ReplaceStmt(PropOrGetterRefExpr, ReplacingStmt);
// delete PropRefExpr; elsewhere...
// NOTE: We don't want to call MsgExpr->Destroy(), as it holds references
// to things that stay around.
@@ -1312,7 +1410,7 @@ Stmt *RewriteObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV,
const Expr *BaseExpr = IV->getBase();
if (CurMethodDef) {
if (BaseExpr->getType()->isObjCObjectPointerType()) {
- ObjCInterfaceType *iFaceDecl =
+ const ObjCInterfaceType *iFaceDecl =
dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
// lookup which class implements the instance variable.
@@ -1330,19 +1428,20 @@ Stmt *RewriteObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV,
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
- CK_Unknown,
+ CK_BitCast,
IV->getBase());
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
- IV->getBase()->getLocEnd(),
- castExpr);
+ IV->getBase()->getLocEnd(),
+ castExpr);
replaced = true;
if (IV->isFreeIvar() &&
CurMethodDef->getClassInterface() == iFaceDecl->getDecl()) {
MemberExpr *ME = new (Context) MemberExpr(PE, true, D,
- IV->getLocation(),
- D->getType());
- // delete IV; leak for now, see RewritePropertySetter() usage for more info.
+ IV->getLocation(),
+ D->getType(),
+ VK_LValue, OK_Ordinary);
+ // delete IV; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return ME;
}
// Get the new text
@@ -1358,7 +1457,7 @@ Stmt *RewriteObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV,
// Explicit ivar refs need to have a cast inserted.
// FIXME: consider sharing some of this code with the code above.
if (BaseExpr->getType()->isObjCObjectPointerType()) {
- ObjCInterfaceType *iFaceDecl =
+ const ObjCInterfaceType *iFaceDecl =
dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
// lookup which class implements the instance variable.
ObjCInterfaceDecl *clsDeclared = 0;
@@ -1375,7 +1474,7 @@ Stmt *RewriteObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV,
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
- CK_Unknown,
+ CK_BitCast,
IV->getBase());
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
@@ -1392,8 +1491,7 @@ Stmt *RewriteObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV,
}
Stmt *RewriteObjC::RewriteObjCNestedIvarRefExpr(Stmt *S, bool &replaced) {
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
- CI != E; ++CI) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI) {
if (*CI) {
Stmt *newStmt = RewriteObjCNestedIvarRefExpr(*CI, replaced);
if (newStmt)
@@ -1711,7 +1809,7 @@ Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
std::string syncBuf;
syncBuf += " objc_sync_exit(";
Expr *syncExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_Unknown,
+ CK_BitCast,
S->getSynchExpr());
std::string syncExprBufS;
llvm::raw_string_ostream syncExprBuf(syncExprBufS);
@@ -1738,8 +1836,7 @@ Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
void RewriteObjC::WarnAboutReturnGotoStmts(Stmt *S)
{
// Perform a bottom up traversal of all children.
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
- CI != E; ++CI)
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI)
WarnAboutReturnGotoStmts(*CI);
@@ -1753,8 +1850,7 @@ void RewriteObjC::WarnAboutReturnGotoStmts(Stmt *S)
void RewriteObjC::HasReturnStmts(Stmt *S, bool &hasReturns)
{
// Perform a bottom up traversal of all children.
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
- CI != E; ++CI)
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI)
HasReturnStmts(*CI, hasReturns);
@@ -1765,8 +1861,7 @@ void RewriteObjC::HasReturnStmts(Stmt *S, bool &hasReturns)
void RewriteObjC::RewriteTryReturnStmts(Stmt *S) {
// Perform a bottom up traversal of all children.
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
- CI != E; ++CI)
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI) {
RewriteTryReturnStmts(*CI);
}
@@ -1789,8 +1884,7 @@ void RewriteObjC::RewriteTryReturnStmts(Stmt *S) {
void RewriteObjC::RewriteSyncReturnStmts(Stmt *S, std::string syncExitBuf) {
// Perform a bottom up traversal of all children.
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
- CI != E; ++CI)
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI) {
RewriteSyncReturnStmts(*CI, syncExitBuf);
}
@@ -1853,7 +1947,6 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
buf += "}";
ReplaceText(lastCurlyLoc, 1, buf);
}
- bool sawIdTypedCatch = false;
Stmt *lastCatchBody = 0;
for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
@@ -1886,7 +1979,6 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
if (t == Context->getObjCIdType()) {
buf += "1) { ";
ReplaceText(startLoc, lParenLoc-startBuf+1, buf);
- sawIdTypedCatch = true;
} else if (const ObjCObjectPointerType *Ptr =
t->getAs<ObjCObjectPointerType>()) {
// Should be a pointer to a class.
@@ -2020,7 +2112,7 @@ Stmt *RewriteObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
ReplaceStmt(Exp, Replacement);
// Replace this subexpr in the parent.
- // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return Replacement;
}
@@ -2038,7 +2130,7 @@ Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size());
ReplaceStmt(Exp, SelExp);
- // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return SelExp;
}
@@ -2049,19 +2141,21 @@ CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
QualType msgSendType = FD->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, msgSendType, SourceLocation());
+ DeclRefExpr *DRE =
+ new (Context) DeclRefExpr(FD, msgSendType, VK_LValue, SourceLocation());
// Now, we cast the reference to a pointer to the objc_msgSend type.
QualType pToFunc = Context->getPointerType(msgSendType);
ImplicitCastExpr *ICE =
- ImplicitCastExpr::Create(*Context, pToFunc, CK_Unknown,
+ ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
DRE, 0, VK_RValue);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *Exp =
new (Context) CallExpr(*Context, ICE, args, nargs,
- FT->getCallResultType(*Context), EndLoc);
+ FT->getCallResultType(*Context),
+ VK_RValue, EndLoc);
return Exp;
}
@@ -2105,6 +2199,10 @@ bool RewriteObjC::needToScanForQualifiers(QualType T) {
T = T->getPointeeType();
return T->isObjCQualifiedInterfaceType();
}
+ if (T->isArrayType()) {
+ QualType ElemTy = Context->getBaseElementType(T);
+ return needToScanForQualifiers(ElemTy);
+ }
return false;
}
@@ -2257,11 +2355,8 @@ void RewriteObjC::SynthSelGetUidFunctionDecl() {
IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName");
llvm::SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
- QualType getFuncType = Context->getFunctionType(Context->getObjCSelType(),
- &ArgTys[0], ArgTys.size(),
- false /*isVariadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType getFuncType =
+ getSimpleFunctionType(Context->getObjCSelType(), &ArgTys[0], ArgTys.size());
SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SelGetUidIdent, getFuncType, 0,
@@ -2356,11 +2451,8 @@ void RewriteObjC::SynthSuperContructorFunctionDecl() {
assert(!argT.isNull() && "Can't find 'id' type");
ArgTys.push_back(argT);
ArgTys.push_back(argT);
- QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
- &ArgTys[0], ArgTys.size(),
- false, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2378,11 +2470,9 @@ void RewriteObjC::SynthMsgSendFunctionDecl() {
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
- QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
- &ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2403,11 +2493,9 @@ void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
- QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
- &ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2425,11 +2513,9 @@ void RewriteObjC::SynthMsgSendStretFunctionDecl() {
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
- QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
- &ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2452,11 +2538,9 @@ void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
- QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
- &ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2474,11 +2558,9 @@ void RewriteObjC::SynthMsgSendFpretFunctionDecl() {
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
- QualType msgSendType = Context->getFunctionType(Context->DoubleTy,
- &ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2491,11 +2573,8 @@ void RewriteObjC::SynthGetClassFunctionDecl() {
IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
llvm::SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
- QualType getClassType = Context->getFunctionType(Context->getObjCIdType(),
- &ArgTys[0], ArgTys.size(),
- false /*isVariadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
getClassIdent, getClassType, 0,
@@ -2509,11 +2588,8 @@ void RewriteObjC::SynthGetSuperClassFunctionDecl() {
&Context->Idents.get("class_getSuperclass");
llvm::SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getObjCClassType());
- QualType getClassType = Context->getFunctionType(Context->getObjCClassType(),
- &ArgTys[0], ArgTys.size(),
- false /*isVariadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
+ &ArgTys[0], ArgTys.size());
GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
getSuperClassIdent,
@@ -2528,11 +2604,8 @@ void RewriteObjC::SynthGetMetaClassFunctionDecl() {
IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
llvm::SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
- QualType getClassType = Context->getFunctionType(Context->getObjCIdType(),
- &ArgTys[0], ArgTys.size(),
- false /*isVariadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
getClassIdent, getClassType, 0,
@@ -2572,15 +2645,17 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
&Context->Idents.get(S), strType, 0,
SC_Static, SC_None);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, strType, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, strType, VK_LValue,
+ SourceLocation());
Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
- SourceLocation());
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
- CK_Unknown, Unop);
+ CK_BitCast, Unop);
ReplaceStmt(Exp, cast);
- // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return cast;
}
@@ -2692,10 +2767,11 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// set the receiver to self, the first argument to all methods.
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_Unknown,
+ CK_BitCast,
new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
- Context->getObjCIdType(),
- SourceLocation()))
+ Context->getObjCIdType(),
+ VK_RValue,
+ SourceLocation()))
); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
@@ -2713,7 +2789,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// (Class)objc_getClass("CurrentClass")
CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
Context->getObjCClassType(),
- CK_Unknown, Cls);
+ CK_BitCast, Cls);
ClsExprs.clear();
ClsExprs.push_back(ArgExpr);
Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
@@ -2725,7 +2801,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
InitExprs.push_back( // set 'super class', using class_getSuperclass().
NoTypeInfoCStyleCastExpr(Context,
Context->getObjCIdType(),
- CK_Unknown, Cls));
+ CK_BitCast, Cls));
// struct objc_super
QualType superType = getSuperStructType();
Expr *SuperRep;
@@ -2734,10 +2810,12 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SynthSuperContructorFunctionDecl();
// Simulate a contructor call...
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
- superType, SourceLocation());
+ superType, VK_LValue,
+ SourceLocation());
SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
InitExprs.size(),
- superType, SourceLocation());
+ superType, VK_LValue,
+ SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -2746,10 +2824,11 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
//
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
- SourceLocation());
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
- CK_Unknown, SuperRep);
+ CK_BitCast, SuperRep);
} else {
// (struct objc_super) { <exprs from above> }
InitListExpr *ILE =
@@ -2759,11 +2838,13 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
- superType, ILE, false);
+ superType, VK_LValue,
+ ILE, false);
// struct objc_super *
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
- SourceLocation());
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
}
MsgExprs.push_back(SuperRep);
break;
@@ -2798,10 +2879,10 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_Unknown,
+ CK_BitCast,
new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
- Context->getObjCIdType(),
- SourceLocation()))
+ Context->getObjCIdType(),
+ VK_RValue, SourceLocation()))
); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
@@ -2818,7 +2899,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// (Class)objc_getClass("CurrentClass")
CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
Context->getObjCClassType(),
- CK_Unknown, Cls);
+ CK_BitCast, Cls);
ClsExprs.clear();
ClsExprs.push_back(ArgExpr);
Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
@@ -2830,7 +2911,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
InitExprs.push_back(
// set 'super class', using class_getSuperclass().
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_Unknown, Cls));
+ CK_BitCast, Cls));
// struct objc_super
QualType superType = getSuperStructType();
Expr *SuperRep;
@@ -2839,10 +2920,11 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SynthSuperContructorFunctionDecl();
// Simulate a contructor call...
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
- superType, SourceLocation());
+ superType, VK_LValue,
+ SourceLocation());
SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
InitExprs.size(),
- superType, SourceLocation());
+ superType, VK_LValue, SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -2851,10 +2933,11 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
//
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
SourceLocation());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
- CK_Unknown, SuperRep);
+ CK_BitCast, SuperRep);
} else {
// (struct objc_super) { <exprs from above> }
InitListExpr *ILE =
@@ -2864,7 +2947,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
- superType, ILE, false);
+ superType, VK_RValue, ILE,
+ false);
}
MsgExprs.push_back(SuperRep);
break;
@@ -2877,7 +2961,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
while (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(recExpr))
recExpr = CE->getSubExpr();
recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_Unknown, recExpr);
+ CK_BitCast, recExpr);
MsgExprs.push_back(recExpr);
break;
}
@@ -2907,7 +2991,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
: ICE->getType();
// Make sure we convert "type (^)(...)" to "type (*)(...)".
(void)convertBlockPointerToFunctionPointer(type);
- userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK_Unknown,
+ userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK_BitCast,
userExpr);
}
// Make id<P...> cast into an 'id' cast.
@@ -2916,13 +3000,13 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
while ((CE = dyn_cast<CStyleCastExpr>(userExpr)))
userExpr = CE->getSubExpr();
userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_Unknown, userExpr);
+ CK_BitCast, userExpr);
}
}
MsgExprs.push_back(userExpr);
// We've transferred the ownership to MsgExprs. For now, we *don't* null
// out the argument in the original expression (since we aren't deleting
- // the ObjCMessageExpr). See RewritePropertySetter() usage for more info.
+ // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info.
//Exp->setArg(i, 0);
}
// Generate the funky cast.
@@ -2958,7 +3042,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// Create a reference to the objc_msgSend() declaration.
DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, msgSendType,
- SourceLocation());
+ VK_LValue, SourceLocation());
// Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
// If we don't do this cast, we get the following bizarre warning/note:
@@ -2966,17 +3050,15 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// xx.m:13: note: if this code is reached, the program will abort
cast = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->VoidTy),
- CK_Unknown, DRE);
+ CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
- QualType castType = Context->getFunctionType(returnType,
- &ArgTypes[0], ArgTypes.size(),
- // If we don't have a method decl, force a variadic cast.
- Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ // If we don't have a method decl, force a variadic cast.
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true);
castType = Context->getPointerType(castType);
- cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_Unknown,
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
cast);
// Don't forget the parens to enforce the proper binding.
@@ -2985,7 +3067,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
MsgExprs.size(),
- FT->getResultType(), EndLoc);
+ FT->getResultType(), VK_RValue,
+ EndLoc);
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
@@ -2995,19 +3078,16 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// Create a reference to the objc_msgSend_stret() declaration.
DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor, msgSendType,
- SourceLocation());
+ VK_LValue, SourceLocation());
// Need to cast objc_msgSend_stret to "void *" (see above comment).
cast = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->VoidTy),
- CK_Unknown, STDRE);
+ CK_BitCast, STDRE);
// Now do the "normal" pointer to function cast.
- castType = Context->getFunctionType(returnType,
- &ArgTypes[0], ArgTypes.size(),
- Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false);
castType = Context->getPointerType(castType);
- cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_Unknown,
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
cast);
// Don't forget the parens to enforce the proper binding.
@@ -3016,7 +3096,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
FT = msgSendType->getAs<FunctionType>();
CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
MsgExprs.size(),
- FT->getResultType(), SourceLocation());
+ FT->getResultType(), VK_RValue,
+ SourceLocation());
// Build sizeof(returnType)
SizeOfAlignOfExpr *sizeofExpr = new (Context) SizeOfAlignOfExpr(true,
@@ -3033,20 +3114,19 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
llvm::APInt(IntSize, 8),
Context->IntTy,
SourceLocation());
- BinaryOperator *lessThanExpr = new (Context) BinaryOperator(sizeofExpr, limit,
- BO_LE,
- Context->IntTy,
- SourceLocation());
+ BinaryOperator *lessThanExpr =
+ new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
+ VK_RValue, OK_Ordinary, SourceLocation());
// (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
ConditionalOperator *CondExpr =
new (Context) ConditionalOperator(lessThanExpr,
SourceLocation(), CE,
- SourceLocation(), STCE, (Expr*)0,
- returnType);
+ SourceLocation(), STCE,
+ returnType, VK_RValue, OK_Ordinary);
ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
CondExpr);
}
- // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return ReplacingStmt;
}
@@ -3057,7 +3137,7 @@ Stmt *RewriteObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
// Now do the actual rewrite.
ReplaceStmt(Exp, ReplacingStmt);
- // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return ReplacingStmt;
}
@@ -3084,16 +3164,17 @@ Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
ID, getProtocolType(), 0,
SC_Extern, SC_None);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, getProtocolType(), SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, getProtocolType(), VK_LValue,
+ SourceLocation());
Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
- SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
- CK_Unknown,
+ CK_BitCast,
DerefExpr);
ReplaceStmt(Exp, castExpr);
ProtocolExprDecls.insert(Exp->getProtocol());
- // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return castExpr;
}
@@ -3637,8 +3718,7 @@ void RewriteObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
/// SynthesizeIvarOffsetComputation - This rutine synthesizes computation of
/// ivar offset.
-void RewriteObjC::SynthesizeIvarOffsetComputation(ObjCContainerDecl *IDecl,
- ObjCIvarDecl *ivar,
+void RewriteObjC::SynthesizeIvarOffsetComputation(ObjCIvarDecl *ivar,
std::string &Result) {
if (ivar->isBitField()) {
// FIXME: The hack below doesn't work for bitfields. For now, we simply
@@ -3646,7 +3726,7 @@ void RewriteObjC::SynthesizeIvarOffsetComputation(ObjCContainerDecl *IDecl,
Result += "0";
} else {
Result += "__OFFSETOFIVAR__(struct ";
- Result += IDecl->getNameAsString();
+ Result += ivar->getContainingInterface()->getNameAsString();
if (LangOpts.Microsoft)
Result += "_IMPL";
Result += ", ";
@@ -3729,7 +3809,7 @@ void RewriteObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
QuoteDoublequotes(TmpString, StrEncoding);
Result += StrEncoding;
Result += "\", ";
- SynthesizeIvarOffsetComputation(IDecl, *IVI, Result);
+ SynthesizeIvarOffsetComputation(*IVI, Result);
Result += "}\n";
for (++IVI; IVI != IVE; ++IVI) {
Result += "\t ,{\"";
@@ -3740,7 +3820,7 @@ void RewriteObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
QuoteDoublequotes(TmpString, StrEncoding);
Result += StrEncoding;
Result += "\", ";
- SynthesizeIvarOffsetComputation(IDecl, (*IVI), Result);
+ SynthesizeIvarOffsetComputation((*IVI), Result);
Result += "}\n";
}
@@ -3764,11 +3844,13 @@ void RewriteObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
if (!PD)
continue;
if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
- InstanceMethods.push_back(Getter);
+ if (!Getter->isDefined())
+ InstanceMethods.push_back(Getter);
if (PD->isReadOnly())
continue;
if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
- InstanceMethods.push_back(Setter);
+ if (!Setter->isDefined())
+ InstanceMethods.push_back(Setter);
}
RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
true, "", IDecl->getName(), Result);
@@ -4031,10 +4113,12 @@ void RewriteObjC::SynthesizeMetaDataIntoBuffer(std::string &Result) {
void RewriteObjC::RewriteByRefString(std::string &ResultStr,
const std::string &Name,
- ValueDecl *VD) {
+ ValueDecl *VD, bool def) {
assert(BlockByRefDeclNo.count(VD) &&
"RewriteByRefString: ByRef decl missing");
- ResultStr += "struct __Block_byref_" + Name +
+ if (def)
+ ResultStr += "struct ";
+ ResultStr += "__Block_byref_" + Name +
"_" + utostr(BlockByRefDeclNo[VD]) ;
}
@@ -4373,6 +4457,12 @@ void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
BlockByRefDeclsPtrSet.insert(VD);
BlockByRefDecls.push_back(VD);
}
+ // imported objects in the inner blocks not used in the outer
+ // blocks must be copied/disposed in the outer block as well.
+ if (Exp->isByRef() ||
+ VD->getType()->isObjCObjectPointerType() ||
+ VD->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(VD);
}
std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i);
@@ -4450,8 +4540,7 @@ void RewriteObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
}
void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
- CI != E; ++CI)
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI) {
if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI))
GetBlockDeclRefExprs(CBE->getBody());
@@ -4467,8 +4556,9 @@ void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
if (HasLocalVariableExternalStorage(DRE->getDecl())) {
BlockDeclRefExpr *BDRE =
- new (Context)BlockDeclRefExpr(DRE->getDecl(), DRE->getType(),
- DRE->getLocation(), false);
+ new (Context)BlockDeclRefExpr(cast<VarDecl>(DRE->getDecl()),
+ DRE->getType(),
+ VK_LValue, DRE->getLocation(), false);
BlockDeclRefs.push_back(BDRE);
}
@@ -4478,8 +4568,7 @@ void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S,
llvm::SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs,
llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts) {
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
- CI != E; ++CI)
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI) {
if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI)) {
InnerContexts.insert(cast<DeclContext>(CBE->getBlockDecl()));
@@ -4534,9 +4623,7 @@ QualType RewriteObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) {
// FIXME. Does this work if block takes no argument but has a return type
// which is of block type?
if (HasBlockType)
- FuncType = Context->getFunctionType(Res,
- &ArgTypes[0], ArgTypes.size(), false/*no variadic*/, 0,
- false, false, 0, 0, FunctionType::ExtInfo());
+ FuncType = getSimpleFunctionType(Res, &ArgTypes[0], ArgTypes.size());
else FuncType = QualType(FT, 0);
return FuncType;
}
@@ -4569,8 +4656,7 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
new (Context) ConditionalOperator(CONDExp,
SourceLocation(), cast<Expr>(LHSStmt),
SourceLocation(), cast<Expr>(RHSStmt),
- (Expr*)0,
- Exp->getType());
+ Exp->getType(), VK_RValue, OK_Ordinary);
return CondExpr;
} else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
CPT = IRE->getType()->getAs<BlockPointerType>();
@@ -4598,20 +4684,19 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
E = FTP->arg_type_end(); I && (I != E); ++I) {
QualType t = *I;
// Make sure we convert "t (^)(...)" to "t (*)(...)".
- (void)convertBlockPointerToFunctionPointer(t);
+ if (!convertBlockPointerToFunctionPointer(t))
+ convertToUnqualifiedObjCType(t);
ArgTypes.push_back(t);
}
}
// Now do the pointer to function cast.
- QualType PtrToFuncCastType = Context->getFunctionType(Exp->getType(),
- &ArgTypes[0], ArgTypes.size(), false/*no variadic*/, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
+ QualType PtrToFuncCastType
+ = getSimpleFunctionType(Exp->getType(), &ArgTypes[0], ArgTypes.size());
PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock,
- CK_Unknown,
+ CK_BitCast,
const_cast<Expr*>(BlockExp));
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
@@ -4622,10 +4707,12 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
&Context->Idents.get("FuncPtr"), Context->VoidPtrTy, 0,
/*BitWidth=*/0, /*Mutable=*/true);
MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(),
- FD->getType());
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
+
CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
- CK_Unknown, ME);
+ CK_BitCast, ME);
PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast);
llvm::SmallVector<Expr*, 8> BlkExprs;
@@ -4638,7 +4725,8 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
}
CallExpr *CE = new (Context) CallExpr(*Context, PE, &BlkExprs[0],
BlkExprs.size(),
- Exp->getType(), SourceLocation());
+ Exp->getType(), VK_RValue,
+ SourceLocation());
return CE;
}
@@ -4673,7 +4761,8 @@ Stmt *RewriteObjC::RewriteBlockDeclRefExpr(Expr *DeclRefExp) {
/*BitWidth=*/0, /*Mutable=*/true);
MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow,
FD, SourceLocation(),
- FD->getType());
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
llvm::StringRef Name = VD->getName();
FD = FieldDecl::Create(*Context, 0, SourceLocation(),
@@ -4681,7 +4770,7 @@ Stmt *RewriteObjC::RewriteBlockDeclRefExpr(Expr *DeclRefExp) {
Context->VoidPtrTy, 0,
/*BitWidth=*/0, /*Mutable=*/true);
ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(),
- DeclRefExp->getType());
+ DeclRefExp->getType(), VK_LValue, OK_Ordinary);
@@ -4700,8 +4789,9 @@ Stmt *RewriteObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
if (VarDecl *Var = dyn_cast<VarDecl>(VD))
if (!ImportedLocalExternalDecls.count(Var))
return DRE;
- Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref,
- DRE->getType(), DRE->getLocation());
+ Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary,
+ DRE->getLocation());
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Exp);
@@ -4802,6 +4892,30 @@ bool RewriteObjC::PointerTypeTakesAnyBlockArguments(QualType QT) {
return false;
}
+bool RewriteObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I != E; ++I) {
+ if ((*I)->isObjCQualifiedIdType())
+ return true;
+ if ((*I)->isObjCObjectPointerType() &&
+ (*I)->getPointeeType()->isObjCQualifiedInterfaceType())
+ return true;
+ }
+
+ }
+ return false;
+}
+
void RewriteObjC::GetExtentOfArgList(const char *Name, const char *&LParen,
const char *&RParen) {
const char *argPtr = strchr(Name, '(');
@@ -4845,28 +4959,57 @@ void RewriteObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
// scan backward (from the decl location) for the end of the previous decl.
while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
startBuf--;
-
+ SourceLocation Start = DeclLoc.getFileLocWithOffset(startBuf-endBuf);
+ std::string buf;
+ unsigned OrigLength=0;
// *startBuf != '^' if we are dealing with a pointer to function that
// may take block argument types (which will be handled below).
if (*startBuf == '^') {
// Replace the '^' with '*', computing a negative offset.
- DeclLoc = DeclLoc.getFileLocWithOffset(startBuf-endBuf);
- ReplaceText(DeclLoc, 1, "*");
+ buf = '*';
+ startBuf++;
+ OrigLength++;
}
- if (PointerTypeTakesAnyBlockArguments(DeclT)) {
+ while (*startBuf != ')') {
+ buf += *startBuf;
+ startBuf++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+
+ if (PointerTypeTakesAnyBlockArguments(DeclT) ||
+ PointerTypeTakesAnyObjCQualifiedType(DeclT)) {
// Replace the '^' with '*' for arguments.
+ // Replace id<P> with id/*<>*/
DeclLoc = ND->getLocation();
startBuf = SM->getCharacterData(DeclLoc);
const char *argListBegin, *argListEnd;
GetExtentOfArgList(startBuf, argListBegin, argListEnd);
while (argListBegin < argListEnd) {
- if (*argListBegin == '^') {
- SourceLocation CaretLoc = DeclLoc.getFileLocWithOffset(argListBegin-startBuf);
- ReplaceText(CaretLoc, 1, "*");
+ if (*argListBegin == '^')
+ buf += '*';
+ else if (*argListBegin == '<') {
+ buf += "/*";
+ buf += *argListBegin++;
+ OrigLength++;;
+ while (*argListBegin != '>') {
+ buf += *argListBegin++;
+ OrigLength++;
+ }
+ buf += *argListBegin;
+ buf += "*/";
}
+ else
+ buf += *argListBegin;
argListBegin++;
+ OrigLength++;
}
+ buf += ')';
+ OrigLength++;
}
+ ReplaceText(Start, OrigLength, buf);
+
return;
}
@@ -4964,7 +5107,7 @@ void RewriteObjC::RewriteByRefVar(VarDecl *ND) {
const char *endBuf = SM->getCharacterData(X);
std::string Name(ND->getNameAsString());
std::string ByrefType;
- RewriteByRefString(ByrefType, Name, ND);
+ RewriteByRefString(ByrefType, Name, ND, true);
ByrefType += " {\n";
ByrefType += " void *__isa;\n";
RewriteByRefString(ByrefType, Name, ND);
@@ -5125,6 +5268,7 @@ FunctionDecl *RewriteObjC::SynthBlockInitFunctionDecl(llvm::StringRef name) {
Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
const llvm::SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs) {
+ const BlockDecl *block = Exp->getBlockDecl();
Blocks.push_back(Exp);
CollectBlockDeclRefInfo(Exp);
@@ -5183,16 +5327,17 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
// Simulate a contructor call...
FD = SynthBlockInitFunctionDecl(Tag);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, FType, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, FType, VK_RValue,
+ SourceLocation());
llvm::SmallVector<Expr*, 4> InitExprs;
// Initialize the block function.
FD = SynthBlockInitFunctionDecl(Func);
- DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, FD->getType(),
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
SourceLocation());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
- CK_Unknown, Arg);
+ CK_BitCast, Arg);
InitExprs.push_back(castExpr);
// Initialize the block descriptor.
@@ -5202,12 +5347,15 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
&Context->Idents.get(DescData.c_str()),
Context->VoidPtrTy, 0,
SC_Static, SC_None);
- UnaryOperator *DescRefExpr = new (Context) UnaryOperator(
- new (Context) DeclRefExpr(NewVD,
- Context->VoidPtrTy, SourceLocation()),
- UO_AddrOf,
- Context->getPointerType(Context->VoidPtrTy),
- SourceLocation());
+ UnaryOperator *DescRefExpr =
+ new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD,
+ Context->VoidPtrTy,
+ VK_LValue,
+ SourceLocation()),
+ UO_AddrOf,
+ Context->getPointerType(Context->VoidPtrTy),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
@@ -5219,26 +5367,29 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (isObjCType((*I)->getType())) {
// FIXME: Conform to ABI ([[obj retain] autorelease]).
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
+ Exp = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
+ SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT,
- SourceLocation());
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Arg = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
+ Arg = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
+ SourceLocation());
Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
- CK_Unknown, Arg);
+ CK_BitCast, Arg);
} else {
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
+ Exp = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
+ SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT,
- SourceLocation());
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
}
}
@@ -5250,7 +5401,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
ValueDecl *ND = (*I);
std::string Name(ND->getNameAsString());
std::string RecName;
- RewriteByRefString(RecName, Name, ND);
+ RewriteByRefString(RecName, Name, ND, true);
IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ sizeof("struct"));
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
@@ -5259,11 +5410,27 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
- Context->getPointerType(Exp->getType()),
- SourceLocation());
- Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_Unknown, Exp);
+ Exp = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
+ SourceLocation());
+ bool isNestedCapturedVar = false;
+ if (block)
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ if (variable == ND && ci->isNested()) {
+ assert (ci->isByRef() &&
+ "SynthBlockInitExpr - captured block variable is not byref");
+ isNestedCapturedVar = true;
+ break;
+ }
+ }
+ // captured nested byref variable has its address passed. Do not take
+ // its address again.
+ if (!isNestedCapturedVar)
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
}
}
@@ -5277,11 +5444,11 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
InitExprs.push_back(FlagExp);
}
NewRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], InitExprs.size(),
- FType, SourceLocation());
+ FType, VK_LValue, SourceLocation());
NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
Context->getPointerType(NewRep->getType()),
- SourceLocation());
- NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_Unknown,
+ VK_RValue, OK_Ordinary, SourceLocation());
+ NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
BlockDeclRefs.clear();
BlockByRefDecls.clear();
@@ -5304,15 +5471,14 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
// we get this right.
void RewriteObjC::CollectPropertySetters(Stmt *S) {
// Perform a bottom up traversal of all children.
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
- CI != E; ++CI)
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI)
CollectPropertySetters(*CI);
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
if (BinOp->isAssignmentOp()) {
- if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(BinOp->getLHS()))
- PropSetters[PRE] = BinOp;
+ if (isa<ObjCPropertyRefExpr>(BinOp->getLHS()))
+ PropSetters[BinOp->getLHS()] = BinOp;
}
}
}
@@ -5329,8 +5495,7 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
SourceRange OrigStmtRange = S->getSourceRange();
// Perform a bottom up rewrite of all children.
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
- CI != E; ++CI)
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI) {
Stmt *newStmt;
Stmt *S = (*CI);
@@ -5349,6 +5514,17 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
newStmt = RewriteFunctionBodyOrGlobalInitializer(S);
if (newStmt)
*CI = newStmt;
+ // If dealing with an assignment with LHS being a property reference
+ // expression, the entire assignment tree is rewritten into a property
+ // setter messaging. This involvs the RHS too. Do not attempt to rewrite
+ // RHS again.
+ if (Expr *Exp = dyn_cast<Expr>(S))
+ if (isa<ObjCPropertyRefExpr>(Exp)) {
+ if (PropSetters[Exp]) {
+ ++CI;
+ continue;
+ }
+ }
}
if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
@@ -5359,7 +5535,12 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
GetInnerBlockDeclRefExprs(BE->getBody(),
InnerBlockDeclRefs, InnerContexts);
// Rewrite the block body in place.
+ Stmt *SaveCurrentBody = CurrentBody;
+ CurrentBody = BE->getBody();
+ PropParentMap = 0;
RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
+ CurrentBody = SaveCurrentBody;
+ PropParentMap = 0;
ImportedLocalExternalDecls.clear();
// Now we snarf the rewritten text and stash it away for later use.
std::string Str = Rewrite.getRewrittenText(BE->getSourceRange());
@@ -5375,8 +5556,11 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
return RewriteAtEncode(AtEncode);
- if (ObjCPropertyRefExpr *PropRefExpr = dyn_cast<ObjCPropertyRefExpr>(S)) {
- BinaryOperator *BinOp = PropSetters[PropRefExpr];
+ if (isa<ObjCPropertyRefExpr>(S)) {
+ Expr *PropOrImplicitRefExpr = dyn_cast<Expr>(S);
+ assert(PropOrImplicitRefExpr && "Property or implicit setter/getter is null");
+
+ BinaryOperator *BinOp = PropSetters[PropOrImplicitRefExpr];
if (BinOp) {
// Because the rewriter doesn't allow us to rewrite rewritten code,
// we need to rewrite the right hand side prior to rewriting the setter.
@@ -5388,6 +5572,12 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
// ^(NSURL *errorURL, NSError *error) { return (BOOL)1; };
SourceRange SrcRange = BinOp->getSourceRange();
Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(BinOp->getRHS());
+ // Need to rewrite the ivar access expression if need be.
+ if (isa<ObjCIvarRefExpr>(newStmt)) {
+ bool replaced = false;
+ newStmt = RewriteObjCNestedIvarRefExpr(newStmt, replaced);
+ }
+
DisableReplaceStmt = false;
//
// Unlike the main iterator, we explicily avoid changing 'BinOp'. If
@@ -5414,18 +5604,19 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
// (CStyleCastExpr 0x231d220 'void *'
// (DeclRefExpr 0x231d200 'id (id, SEL, ...)' FunctionDecl='objc_msgSend' 0x231cdc0))))
//
- // Note that 'newStmt' is passed to RewritePropertySetter so that it
+ // Note that 'newStmt' is passed to RewritePropertyOrImplicitSetter so that it
// can be used as the setter argument. ReplaceStmt() will still 'see'
// the original RHS (since we haven't altered BinOp).
//
// This implies the Rewrite* routines can no longer delete the original
// node. As a result, we now leak the original AST nodes.
//
- return RewritePropertySetter(BinOp, dyn_cast<Expr>(newStmt), SrcRange);
+ return RewritePropertyOrImplicitSetter(BinOp, dyn_cast<Expr>(newStmt), SrcRange);
} else {
- return RewritePropertyGetter(PropRefExpr);
+ return RewritePropertyOrImplicitGetter(PropOrImplicitRefExpr);
}
}
+
if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
return RewriteAtSelector(AtSelector);
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
index e290921..cfedd4b 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
@@ -119,7 +119,7 @@ namespace {
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
- static inline bool classof(const RopePieceBTreeNode *) { return true; }
+ //static inline bool classof(const RopePieceBTreeNode *) { return true; }
};
} // end anonymous namespace
@@ -223,7 +223,7 @@ namespace {
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
- static inline bool classof(const RopePieceBTreeLeaf *) { return true; }
+ //static inline bool classof(const RopePieceBTreeLeaf *) { return true; }
static inline bool classof(const RopePieceBTreeNode *N) {
return N->isLeaf();
}
@@ -455,7 +455,7 @@ namespace {
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
- static inline bool classof(const RopePieceBTreeInterior *) { return true; }
+ //static inline bool classof(const RopePieceBTreeInterior *) { return true; }
static inline bool classof(const RopePieceBTreeNode *N) {
return !N->isLeaf();
}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp
index 789d53f..03ce63e 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp
@@ -34,10 +34,10 @@ TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM,
RawLex.LexFromRawLexer(RawTok);
while (RawTok.isNot(tok::eof)) {
#if 0
- if (Tok.is(tok::identifier)) {
+ if (Tok.is(tok::raw_identifier)) {
// Look up the identifier info for the token. This should use
// IdentifierTable directly instead of PP.
- Tok.setIdentifierInfo(PP.LookUpIdentifierInfo(Tok));
+ PP.LookUpIdentifierInfo(Tok);
}
#endif
@@ -73,7 +73,7 @@ TokenRewriter::AddToken(const Token &T, TokenRefTy Where) {
bool InsertSuccess = TokenAtLoc.insert(std::make_pair(T.getLocation(),
Where)).second;
assert(InsertSuccess && "Token location already in rewriter!");
- InsertSuccess = InsertSuccess;
+ (void)InsertSuccess;
return Where;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
index cfebed6..63f561d 100644
--- a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -16,6 +16,7 @@
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -25,6 +26,7 @@
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/Analyses/ReachableCode.h"
+#include "clang/Analysis/Analyses/UninitializedValuesV2.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Casting.h"
@@ -108,24 +110,41 @@ static ControlFlowKind CheckFallThrough(AnalysisContext &AC) {
bool HasFakeEdge = false;
bool HasPlainEdge = false;
bool HasAbnormalEdge = false;
- for (CFGBlock::pred_iterator I=cfg->getExit().pred_begin(),
- E = cfg->getExit().pred_end();
- I != E;
- ++I) {
- CFGBlock& B = **I;
+
+ // Ignore default cases that aren't likely to be reachable because all
+ // enums in a switch(X) have explicit case statements.
+ CFGBlock::FilterOptions FO;
+ FO.IgnoreDefaultsWithCoveredEnums = 1;
+
+ for (CFGBlock::filtered_pred_iterator
+ I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) {
+ const CFGBlock& B = **I;
if (!live[B.getBlockID()])
continue;
- if (B.size() == 0) {
+
+ // Destructors can appear after the 'return' in the CFG. This is
+ // normal. We need to look pass the destructors for the return
+ // statement (if it exists).
+ CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend();
+ for ( ; ri != re ; ++ri) {
+ CFGElement CE = *ri;
+ if (isa<CFGStmt>(CE))
+ break;
+ }
+
+ // No more CFGElements in the block?
+ if (ri == re) {
if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) {
HasAbnormalEdge = true;
continue;
}
-
// A labeled empty statement, or the entry block...
HasPlainEdge = true;
continue;
}
- Stmt *S = B[B.size()-1];
+
+ CFGStmt CS = cast<CFGStmt>(*ri);
+ Stmt *S = CS.getStmt();
if (isa<ReturnStmt>(S)) {
HasLiveReturn = true;
continue;
@@ -169,19 +188,6 @@ static ControlFlowKind CheckFallThrough(AnalysisContext &AC) {
}
}
}
- // FIXME: Remove this hack once temporaries and their destructors are
- // modeled correctly by the CFG.
- if (CXXExprWithTemporaries *E = dyn_cast<CXXExprWithTemporaries>(S)) {
- for (unsigned I = 0, N = E->getNumTemporaries(); I != N; ++I) {
- const FunctionDecl *FD = E->getTemporary(I)->getDestructor();
- if (FD->hasAttr<NoReturnAttr>() ||
- FD->getType()->getAs<FunctionType>()->getNoReturnAttr()) {
- NoReturnEdge = true;
- HasFakeEdge = true;
- break;
- }
- }
- }
// FIXME: Add noreturn message sends.
if (NoReturnEdge == false)
HasPlainEdge = true;
@@ -208,9 +214,11 @@ struct CheckFallThroughDiagnostics {
unsigned diag_AlwaysFallThrough_ReturnsNonVoid;
unsigned diag_NeverFallThroughOrReturn;
bool funMode;
+ SourceLocation FuncLoc;
static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) {
CheckFallThroughDiagnostics D;
+ D.FuncLoc = Func->getLocation();
D.diag_MaybeFallThrough_HasNoReturn =
diag::warn_falloff_noreturn_function;
D.diag_MaybeFallThrough_ReturnsNonVoid =
@@ -255,18 +263,22 @@ struct CheckFallThroughDiagnostics {
bool checkDiagnostics(Diagnostic &D, bool ReturnsVoid,
bool HasNoReturn) const {
if (funMode) {
- return (D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function)
- == Diagnostic::Ignored || ReturnsVoid)
- && (D.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr)
- == Diagnostic::Ignored || !HasNoReturn)
- && (D.getDiagnosticLevel(diag::warn_suggest_noreturn_block)
- == Diagnostic::Ignored || !ReturnsVoid);
+ return (ReturnsVoid ||
+ D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function,
+ FuncLoc) == Diagnostic::Ignored)
+ && (!HasNoReturn ||
+ D.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr,
+ FuncLoc) == Diagnostic::Ignored)
+ && (!ReturnsVoid ||
+ D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
+ == Diagnostic::Ignored);
}
// For blocks.
return ReturnsVoid && !HasNoReturn
- && (D.getDiagnosticLevel(diag::warn_suggest_noreturn_block)
- == Diagnostic::Ignored || !ReturnsVoid);
+ && (!ReturnsVoid ||
+ D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
+ == Diagnostic::Ignored);
}
};
@@ -343,6 +355,112 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
}
//===----------------------------------------------------------------------===//
+// -Wuninitialized
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct SLocSort {
+ bool operator()(const Expr *a, const Expr *b) {
+ SourceLocation aLoc = a->getLocStart();
+ SourceLocation bLoc = b->getLocStart();
+ return aLoc.getRawEncoding() < bLoc.getRawEncoding();
+ }
+};
+
+class UninitValsDiagReporter : public UninitVariablesHandler {
+ Sema &S;
+ typedef llvm::SmallVector<const Expr *, 2> UsesVec;
+ typedef llvm::DenseMap<const VarDecl *, UsesVec*> UsesMap;
+ UsesMap *uses;
+
+public:
+ UninitValsDiagReporter(Sema &S) : S(S), uses(0) {}
+ ~UninitValsDiagReporter() {
+ flushDiagnostics();
+ }
+
+ void handleUseOfUninitVariable(const Expr *ex, const VarDecl *vd) {
+ if (!uses)
+ uses = new UsesMap();
+
+ UsesVec *&vec = (*uses)[vd];
+ if (!vec)
+ vec = new UsesVec();
+
+ vec->push_back(ex);
+ }
+
+ void flushDiagnostics() {
+ if (!uses)
+ return;
+
+ for (UsesMap::iterator i = uses->begin(), e = uses->end(); i != e; ++i) {
+ const VarDecl *vd = i->first;
+ UsesVec *vec = i->second;
+
+ bool fixitIssued = false;
+
+ // Sort the uses by their SourceLocations. While not strictly
+ // guaranteed to produce them in line/column order, this will provide
+ // a stable ordering.
+ std::sort(vec->begin(), vec->end(), SLocSort());
+
+ for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve; ++vi)
+ {
+ if (const DeclRefExpr *dr = dyn_cast<DeclRefExpr>(*vi)) {
+ S.Diag(dr->getLocStart(), diag::warn_uninit_var)
+ << vd->getDeclName() << dr->getSourceRange();
+ }
+ else {
+ const BlockExpr *be = cast<BlockExpr>(*vi);
+ S.Diag(be->getLocStart(), diag::warn_uninit_var_captured_by_block)
+ << vd->getDeclName();
+ }
+
+ // Report where the variable was declared.
+ S.Diag(vd->getLocStart(), diag::note_uninit_var_def)
+ << vd->getDeclName();
+
+ // Only report the fixit once.
+ if (fixitIssued)
+ continue;
+
+ fixitIssued = true;
+
+ // Suggest possible initialization (if any).
+ const char *initialization = 0;
+ QualType vdTy = vd->getType().getCanonicalType();
+
+ if (vdTy->getAs<ObjCObjectPointerType>()) {
+ // Check if 'nil' is defined.
+ if (S.PP.getMacroInfo(&S.getASTContext().Idents.get("nil")))
+ initialization = " = nil";
+ else
+ initialization = " = 0";
+ }
+ else if (vdTy->isRealFloatingType())
+ initialization = " = 0.0";
+ else if (vdTy->isBooleanType() && S.Context.getLangOptions().CPlusPlus)
+ initialization = " = false";
+ else if (vdTy->isEnumeralType())
+ continue;
+ else if (vdTy->isScalarType())
+ initialization = " = 0";
+
+ if (initialization) {
+ SourceLocation loc = S.PP.getLocForEndOfToken(vd->getLocEnd());
+ S.Diag(loc, diag::note_var_fixit_add_initialization)
+ << FixItHint::CreateInsertion(loc, initialization);
+ }
+ }
+ delete vec;
+ }
+ delete uses;
+ }
+};
+}
+
+//===----------------------------------------------------------------------===//
// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
// warnings on a function, method, or block.
//===----------------------------------------------------------------------===//
@@ -355,7 +473,8 @@ clang::sema::AnalysisBasedWarnings::Policy::Policy() {
clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) : S(s) {
Diagnostic &D = S.getDiagnostics();
DefaultPolicy.enableCheckUnreachable = (unsigned)
- (D.getDiagnosticLevel(diag::warn_unreachable) != Diagnostic::Ignored);
+ (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) !=
+ Diagnostic::Ignored);
}
void clang::sema::
@@ -390,7 +509,8 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
// Don't generate EH edges for CallExprs as we'd like to avoid the n^2
// explosion for destrutors that can result and the compile time hit.
- AnalysisContext AC(D, 0, false);
+ AnalysisContext AC(D, 0, /*useUnoptimizedCFG=*/false, /*addehedges=*/false,
+ /*addImplicitDtors=*/true, /*addInitializers=*/true);
// Warning: check missing 'return'
if (P.enableCheckFallThrough) {
@@ -403,6 +523,32 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
// Warning: check for unreachable code
if (P.enableCheckUnreachable)
CheckUnreachable(S, AC);
+
+ if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart())
+ != Diagnostic::Ignored) {
+ ASTContext &ctx = D->getASTContext();
+ llvm::OwningPtr<CFG> tmpCFG;
+ bool useAlternateCFG = false;
+ if (ctx.getLangOptions().CPlusPlus) {
+ // Temporary workaround: implicit dtors in the CFG can confuse
+ // the path-sensitivity in the uninitialized values analysis.
+ // For now create (if necessary) a separate CFG without implicit dtors.
+ // FIXME: We should not need to do this, as it results in multiple
+ // CFGs getting constructed.
+ CFG::BuildOptions B;
+ B.AddEHEdges = false;
+ B.AddImplicitDtors = false;
+ B.AddInitializers = true;
+ tmpCFG.reset(CFG::buildCFG(D, AC.getBody(), &ctx, B));
+ useAlternateCFG = true;
+ }
+ CFG *cfg = useAlternateCFG ? tmpCFG.get() : AC.getCFG();
+ if (cfg) {
+ UninitValsDiagReporter reporter(S);
+ runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC,
+ reporter);
+ }
+ }
}
void clang::sema::
diff --git a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
index 8ccb2ca..c0a3053 100644
--- a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
@@ -16,37 +16,26 @@
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
-AttributeList::AttributeList(IdentifierInfo *aName, SourceLocation aLoc,
+AttributeList::AttributeList(llvm::BumpPtrAllocator &Alloc,
+ IdentifierInfo *aName, SourceLocation aLoc,
IdentifierInfo *sName, SourceLocation sLoc,
IdentifierInfo *pName, SourceLocation pLoc,
Expr **ExprList, unsigned numArgs,
- AttributeList *n, bool declspec, bool cxx0x)
- : AttrName(aName), AttrLoc(aLoc), ScopeName(sName), ScopeLoc(sLoc),
- ParmName(pName), ParmLoc(pLoc), NumArgs(numArgs), Next(n),
+ bool declspec, bool cxx0x)
+ : AttrName(aName), AttrLoc(aLoc), ScopeName(sName),
+ ScopeLoc(sLoc),
+ ParmName(pName), ParmLoc(pLoc), NumArgs(numArgs), Next(0),
DeclspecAttribute(declspec), CXX0XAttribute(cxx0x), Invalid(false) {
if (numArgs == 0)
Args = 0;
else {
- Args = new Expr*[numArgs];
+ // Allocate the Args array using the BumpPtrAllocator.
+ Args = Alloc.Allocate<Expr*>(numArgs);
memcpy(Args, ExprList, numArgs*sizeof(Args[0]));
}
}
-AttributeList::~AttributeList() {
- if (Args) {
- // FIXME: before we delete the vector, we need to make sure the Expr's
- // have been deleted. Since ActionBase::ExprTy is "void", we are dependent
- // on the actions module for actually freeing the memory. The specific
- // hooks are ActOnDeclarator, ActOnTypeName, ActOnParamDeclaratorType,
- // ParseField, ParseTag. Once these routines have freed the expression,
- // they should zero out the Args slot (to indicate the memory has been
- // freed). If any element of the vector is non-null, we should assert.
- delete [] Args;
- }
- delete Next;
-}
-
AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
llvm::StringRef AttrName = Name->getName();
@@ -62,17 +51,17 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
.Case("used", AT_used)
.Case("alias", AT_alias)
.Case("align", AT_aligned)
- .Case("final", AT_final)
.Case("cdecl", AT_cdecl)
.Case("const", AT_const)
+ .Case("__const", AT_const) // some GCC headers do contain this spelling
.Case("blocks", AT_blocks)
.Case("format", AT_format)
- .Case("hiding", AT_hiding)
.Case("malloc", AT_malloc)
.Case("packed", AT_packed)
.Case("unused", AT_unused)
.Case("aligned", AT_aligned)
.Case("cleanup", AT_cleanup)
+ .Case("naked", AT_naked)
.Case("nodebug", AT_nodebug)
.Case("nonnull", AT_nonnull)
.Case("nothrow", AT_nothrow)
@@ -87,12 +76,11 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
.Case("iboutletcollection", AT_IBOutletCollection)
.Case("noreturn", AT_noreturn)
.Case("noinline", AT_noinline)
- .Case("override", AT_override)
.Case("sentinel", AT_sentinel)
.Case("NSObject", AT_nsobject)
.Case("dllimport", AT_dllimport)
.Case("dllexport", AT_dllexport)
- .Case("may_alias", IgnoredAttribute) // FIXME: TBAA
+ .Case("may_alias", AT_may_alias)
.Case("base_check", AT_base_check)
.Case("deprecated", AT_deprecated)
.Case("visibility", AT_visibility)
@@ -111,12 +99,18 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
.Case("vec_type_hint", IgnoredAttribute)
.Case("objc_exception", AT_objc_exception)
.Case("ext_vector_type", AT_ext_vector_type)
+ .Case("neon_vector_type", AT_neon_vector_type)
+ .Case("neon_polyvector_type", AT_neon_polyvector_type)
.Case("transparent_union", AT_transparent_union)
.Case("analyzer_noreturn", AT_analyzer_noreturn)
.Case("warn_unused_result", AT_warn_unused_result)
.Case("carries_dependency", AT_carries_dependency)
+ .Case("ns_consumed", AT_ns_consumed)
+ .Case("ns_consumes_self", AT_ns_consumes_self)
+ .Case("ns_returns_autoreleased", AT_ns_returns_autoreleased)
.Case("ns_returns_not_retained", AT_ns_returns_not_retained)
.Case("ns_returns_retained", AT_ns_returns_retained)
+ .Case("cf_consumed", AT_cf_consumed)
.Case("cf_returns_not_retained", AT_cf_returns_not_retained)
.Case("cf_returns_retained", AT_cf_returns_retained)
.Case("ownership_returns", AT_ownership_returns)
@@ -126,11 +120,22 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
.Case("init_priority", AT_init_priority)
.Case("no_instrument_function", AT_no_instrument_function)
.Case("thiscall", AT_thiscall)
+ .Case("bounded", IgnoredAttribute) // OpenBSD
.Case("pascal", AT_pascal)
.Case("__cdecl", AT_cdecl)
.Case("__stdcall", AT_stdcall)
.Case("__fastcall", AT_fastcall)
.Case("__thiscall", AT_thiscall)
.Case("__pascal", AT_pascal)
+ .Case("constant", AT_constant)
+ .Case("device", AT_device)
+ .Case("global", AT_global)
+ .Case("host", AT_host)
+ .Case("shared", AT_shared)
+ .Case("launch_bounds", AT_launch_bounds)
+ .Case("common", AT_common)
+ .Case("nocommon", AT_nocommon)
+ .Case("opencl_kernel_function", AT_opencl_kernel_function)
+ .Case("uuid", AT_uuid)
.Default(UnknownAttribute);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
index 58a1627..b7037ce 100644
--- a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -19,6 +19,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang-c/Index.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstring>
@@ -28,9 +29,51 @@ using namespace clang;
using llvm::StringRef;
//===----------------------------------------------------------------------===//
+// Code completion context implementation
+//===----------------------------------------------------------------------===//
+
+bool CodeCompletionContext::wantConstructorResults() const {
+ switch (Kind) {
+ case CCC_Recovery:
+ case CCC_Statement:
+ case CCC_Expression:
+ case CCC_ObjCMessageReceiver:
+ case CCC_ParenthesizedExpression:
+ return true;
+
+ case CCC_TopLevel:
+ case CCC_ObjCInterface:
+ case CCC_ObjCImplementation:
+ case CCC_ObjCIvarList:
+ case CCC_ClassStructUnion:
+ case CCC_MemberAccess:
+ case CCC_EnumTag:
+ case CCC_UnionTag:
+ case CCC_ClassOrStructTag:
+ case CCC_ObjCProtocolName:
+ case CCC_Namespace:
+ case CCC_Type:
+ case CCC_Name:
+ case CCC_PotentiallyQualifiedName:
+ case CCC_MacroName:
+ case CCC_MacroNameUse:
+ case CCC_PreprocessorExpression:
+ case CCC_PreprocessorDirective:
+ case CCC_NaturalLanguage:
+ case CCC_SelectorName:
+ case CCC_TypeQualifiers:
+ case CCC_Other:
+ case CCC_OtherWithMacros:
+ return false;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
// Code completion string implementation
//===----------------------------------------------------------------------===//
-CodeCompletionString::Chunk::Chunk(ChunkKind Kind, llvm::StringRef Text)
+CodeCompletionString::Chunk::Chunk(ChunkKind Kind, const char *Text)
: Kind(Kind), Text("")
{
switch (Kind) {
@@ -39,13 +82,9 @@ CodeCompletionString::Chunk::Chunk(ChunkKind Kind, llvm::StringRef Text)
case CK_Placeholder:
case CK_Informative:
case CK_ResultType:
- case CK_CurrentParameter: {
- char *New = new char [Text.size() + 1];
- std::memcpy(New, Text.data(), Text.size());
- New[Text.size()] = '\0';
- this->Text = New;
+ case CK_CurrentParameter:
+ this->Text = Text;
break;
- }
case CK_Optional:
llvm_unreachable("Optional strings cannot be created from text");
@@ -110,112 +149,48 @@ CodeCompletionString::Chunk::Chunk(ChunkKind Kind, llvm::StringRef Text)
}
CodeCompletionString::Chunk
-CodeCompletionString::Chunk::CreateText(StringRef Text) {
+CodeCompletionString::Chunk::CreateText(const char *Text) {
return Chunk(CK_Text, Text);
}
CodeCompletionString::Chunk
-CodeCompletionString::Chunk::CreateOptional(
- std::auto_ptr<CodeCompletionString> Optional) {
+CodeCompletionString::Chunk::CreateOptional(CodeCompletionString *Optional) {
Chunk Result;
Result.Kind = CK_Optional;
- Result.Optional = Optional.release();
+ Result.Optional = Optional;
return Result;
}
CodeCompletionString::Chunk
-CodeCompletionString::Chunk::CreatePlaceholder(StringRef Placeholder) {
+CodeCompletionString::Chunk::CreatePlaceholder(const char *Placeholder) {
return Chunk(CK_Placeholder, Placeholder);
}
CodeCompletionString::Chunk
-CodeCompletionString::Chunk::CreateInformative(StringRef Informative) {
+CodeCompletionString::Chunk::CreateInformative(const char *Informative) {
return Chunk(CK_Informative, Informative);
}
CodeCompletionString::Chunk
-CodeCompletionString::Chunk::CreateResultType(StringRef ResultType) {
+CodeCompletionString::Chunk::CreateResultType(const char *ResultType) {
return Chunk(CK_ResultType, ResultType);
}
CodeCompletionString::Chunk
CodeCompletionString::Chunk::CreateCurrentParameter(
- StringRef CurrentParameter) {
+ const char *CurrentParameter) {
return Chunk(CK_CurrentParameter, CurrentParameter);
}
-CodeCompletionString::Chunk CodeCompletionString::Chunk::Clone() const {
- switch (Kind) {
- case CK_TypedText:
- case CK_Text:
- case CK_Placeholder:
- case CK_Informative:
- case CK_ResultType:
- case CK_CurrentParameter:
- case CK_LeftParen:
- case CK_RightParen:
- case CK_LeftBracket:
- case CK_RightBracket:
- case CK_LeftBrace:
- case CK_RightBrace:
- case CK_LeftAngle:
- case CK_RightAngle:
- case CK_Comma:
- case CK_Colon:
- case CK_SemiColon:
- case CK_Equal:
- case CK_HorizontalSpace:
- case CK_VerticalSpace:
- return Chunk(Kind, Text);
-
- case CK_Optional: {
- std::auto_ptr<CodeCompletionString> Opt(Optional->Clone());
- return CreateOptional(Opt);
- }
- }
-
- // Silence GCC warning.
- return Chunk();
-}
-
-void
-CodeCompletionString::Chunk::Destroy() {
- switch (Kind) {
- case CK_Optional:
- delete Optional;
- break;
-
- case CK_TypedText:
- case CK_Text:
- case CK_Placeholder:
- case CK_Informative:
- case CK_ResultType:
- case CK_CurrentParameter:
- delete [] Text;
- break;
-
- case CK_LeftParen:
- case CK_RightParen:
- case CK_LeftBracket:
- case CK_RightBracket:
- case CK_LeftBrace:
- case CK_RightBrace:
- case CK_LeftAngle:
- case CK_RightAngle:
- case CK_Comma:
- case CK_Colon:
- case CK_SemiColon:
- case CK_Equal:
- case CK_HorizontalSpace:
- case CK_VerticalSpace:
- break;
- }
-}
-
-void CodeCompletionString::clear() {
- std::for_each(Chunks.begin(), Chunks.end(),
- std::mem_fun_ref(&Chunk::Destroy));
- Chunks.clear();
+CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
+ unsigned NumChunks,
+ unsigned Priority,
+ CXAvailabilityKind Availability)
+ : NumChunks(NumChunks), Priority(Priority), Availability(Availability)
+{
+ Chunk *StoredChunks = reinterpret_cast<Chunk *>(this + 1);
+ for (unsigned I = 0; I != NumChunks; ++I)
+ StoredChunks[I] = Chunks[I];
}
std::string CodeCompletionString::getAsString() const {
@@ -247,140 +222,30 @@ const char *CodeCompletionString::getTypedText() const {
return 0;
}
-CodeCompletionString *
-CodeCompletionString::Clone(CodeCompletionString *Result) const {
- if (!Result)
- Result = new CodeCompletionString;
- for (iterator C = begin(), CEnd = end(); C != CEnd; ++C)
- Result->AddChunk(C->Clone());
- return Result;
+const char *CodeCompletionAllocator::CopyString(llvm::StringRef String) {
+ char *Mem = (char *)Allocate(String.size() + 1, 1);
+ std::copy(String.begin(), String.end(), Mem);
+ Mem[String.size()] = 0;
+ return Mem;
}
-static void WriteUnsigned(llvm::raw_ostream &OS, unsigned Value) {
- OS.write((const char *)&Value, sizeof(unsigned));
+const char *CodeCompletionAllocator::CopyString(llvm::Twine String) {
+ // FIXME: It would be more efficient to teach Twine to tell us its size and
+ // then add a routine there to fill in an allocated char* with the contents
+ // of the string.
+ llvm::SmallString<128> Data;
+ return CopyString(String.toStringRef(Data));
}
-static bool ReadUnsigned(const char *&Memory, const char *MemoryEnd,
- unsigned &Value) {
- if (Memory + sizeof(unsigned) > MemoryEnd)
- return true;
-
- memmove(&Value, Memory, sizeof(unsigned));
- Memory += sizeof(unsigned);
- return false;
-}
-
-void CodeCompletionString::Serialize(llvm::raw_ostream &OS) const {
- // Write the number of chunks.
- WriteUnsigned(OS, size());
-
- for (iterator C = begin(), CEnd = end(); C != CEnd; ++C) {
- WriteUnsigned(OS, C->Kind);
-
- switch (C->Kind) {
- case CK_TypedText:
- case CK_Text:
- case CK_Placeholder:
- case CK_Informative:
- case CK_ResultType:
- case CK_CurrentParameter: {
- const char *Text = C->Text;
- unsigned StrLen = strlen(Text);
- WriteUnsigned(OS, StrLen);
- OS.write(Text, StrLen);
- break;
- }
-
- case CK_Optional:
- C->Optional->Serialize(OS);
- break;
-
- case CK_LeftParen:
- case CK_RightParen:
- case CK_LeftBracket:
- case CK_RightBracket:
- case CK_LeftBrace:
- case CK_RightBrace:
- case CK_LeftAngle:
- case CK_RightAngle:
- case CK_Comma:
- case CK_Colon:
- case CK_SemiColon:
- case CK_Equal:
- case CK_HorizontalSpace:
- case CK_VerticalSpace:
- break;
- }
- }
-}
-
-bool CodeCompletionString::Deserialize(const char *&Str, const char *StrEnd) {
- if (Str == StrEnd || *Str == 0)
- return false;
-
- unsigned NumBlocks;
- if (ReadUnsigned(Str, StrEnd, NumBlocks))
- return false;
-
- for (unsigned I = 0; I != NumBlocks; ++I) {
- if (Str + 1 >= StrEnd)
- break;
-
- // Parse the next kind.
- unsigned KindValue;
- if (ReadUnsigned(Str, StrEnd, KindValue))
- return false;
-
- switch (ChunkKind Kind = (ChunkKind)KindValue) {
- case CK_TypedText:
- case CK_Text:
- case CK_Placeholder:
- case CK_Informative:
- case CK_ResultType:
- case CK_CurrentParameter: {
- unsigned StrLen;
- if (ReadUnsigned(Str, StrEnd, StrLen) || (Str + StrLen > StrEnd))
- return false;
-
- AddChunk(Chunk(Kind, StringRef(Str, StrLen)));
- Str += StrLen;
- break;
- }
-
- case CK_Optional: {
- std::auto_ptr<CodeCompletionString> Optional(new CodeCompletionString());
- if (Optional->Deserialize(Str, StrEnd))
- AddOptionalChunk(Optional);
- break;
- }
-
- case CK_LeftParen:
- case CK_RightParen:
- case CK_LeftBracket:
- case CK_RightBracket:
- case CK_LeftBrace:
- case CK_RightBrace:
- case CK_LeftAngle:
- case CK_RightAngle:
- case CK_Comma:
- case CK_Colon:
- case CK_SemiColon:
- case CK_Equal:
- case CK_HorizontalSpace:
- case CK_VerticalSpace:
- AddChunk(Chunk(Kind));
- break;
- }
- };
-
- return true;
-}
-
-void CodeCompletionResult::Destroy() {
- if (Kind == RK_Pattern) {
- delete Pattern;
- Pattern = 0;
- }
+CodeCompletionString *CodeCompletionBuilder::TakeString() {
+ void *Mem = Allocator.Allocate(
+ sizeof(CodeCompletionString) + sizeof(Chunk) * Chunks.size(),
+ llvm::alignOf<CodeCompletionString>());
+ CodeCompletionString *Result
+ = new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(),
+ Priority, Availability);
+ Chunks.clear();
+ return Result;
}
unsigned CodeCompletionResult::getPriorityFromDecl(NamedDecl *ND) {
@@ -389,8 +254,15 @@ unsigned CodeCompletionResult::getPriorityFromDecl(NamedDecl *ND) {
// Context-based decisions.
DeclContext *DC = ND->getDeclContext()->getRedeclContext();
- if (DC->isFunctionOrMethod() || isa<BlockDecl>(DC))
+ if (DC->isFunctionOrMethod() || isa<BlockDecl>(DC)) {
+ // _cmd is relatively rare
+ if (ImplicitParamDecl *ImplicitParam = dyn_cast<ImplicitParamDecl>(ND))
+ if (ImplicitParam->getIdentifier() &&
+ ImplicitParam->getIdentifier()->isStr("_cmd"))
+ return CCP_ObjC_cmd;
+
return CCP_LocalDeclaration;
+ }
if (DC->isRecord() || isa<ObjCContainerDecl>(DC))
return CCP_MemberDeclaration;
@@ -399,6 +271,7 @@ unsigned CodeCompletionResult::getPriorityFromDecl(NamedDecl *ND) {
return CCP_Constant;
if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND))
return CCP_Type;
+
return CCP_Declaration;
}
@@ -454,9 +327,8 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
if (Results[I].Hidden)
OS << " (Hidden)";
if (CodeCompletionString *CCS
- = Results[I].CreateCodeCompletionString(SemaRef)) {
+ = Results[I].CreateCodeCompletionString(SemaRef, Allocator)) {
OS << " : " << CCS->getAsString();
- delete CCS;
}
OS << '\n';
@@ -469,9 +341,8 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
case CodeCompletionResult::RK_Macro: {
OS << Results[I].Macro->getName();
if (CodeCompletionString *CCS
- = Results[I].CreateCodeCompletionString(SemaRef)) {
+ = Results[I].CreateCodeCompletionString(SemaRef, Allocator)) {
OS << " : " << CCS->getAsString();
- delete CCS;
}
OS << '\n';
break;
@@ -493,9 +364,9 @@ PrintingCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef,
unsigned NumCandidates) {
for (unsigned I = 0; I != NumCandidates; ++I) {
if (CodeCompletionString *CCS
- = Candidates[I].CreateSignatureString(CurrentArg, SemaRef)) {
+ = Candidates[I].CreateSignatureString(CurrentArg, SemaRef,
+ Allocator)) {
OS << "OVERLOAD: " << CCS->getAsString() << "\n";
- delete CCS;
}
}
}
@@ -587,37 +458,3 @@ bool clang::operator<(const CodeCompletionResult &X,
return false;
}
-
-void
-CIndexCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
- CodeCompletionContext Context,
- CodeCompletionResult *Results,
- unsigned NumResults) {
- // Print the results.
- for (unsigned I = 0; I != NumResults; ++I) {
- WriteUnsigned(OS, Results[I].CursorKind);
- WriteUnsigned(OS, Results[I].Priority);
- WriteUnsigned(OS, Results[I].Availability);
- CodeCompletionString *CCS = Results[I].CreateCodeCompletionString(SemaRef);
- assert(CCS && "No code-completion string?");
- CCS->Serialize(OS);
- delete CCS;
- }
-}
-
-void
-CIndexCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef,
- unsigned CurrentArg,
- OverloadCandidate *Candidates,
- unsigned NumCandidates) {
- for (unsigned I = 0; I != NumCandidates; ++I) {
- WriteUnsigned(OS, CXCursor_NotImplemented);
- WriteUnsigned(OS, /*Priority=*/I);
- WriteUnsigned(OS, /*Availability=*/CXAvailability_Available);
- CodeCompletionString *CCS
- = Candidates[I].CreateSignatureString(CurrentArg, SemaRef);
- assert(CCS && "No code-completion string?");
- CCS->Serialize(OS);
- delete CCS;
- }
-}
diff --git a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
index b46e8af..bc289ec 100644
--- a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
@@ -23,8 +23,8 @@ using namespace clang;
static DiagnosticBuilder Diag(Diagnostic &D, SourceLocation Loc,
- SourceManager &SrcMgr, unsigned DiagID) {
- return D.Report(FullSourceLoc(Loc, SrcMgr), DiagID);
+ unsigned DiagID) {
+ return D.Report(Loc, DiagID);
}
@@ -46,11 +46,14 @@ void UnqualifiedId::setConstructorTemplateId(TemplateIdAnnotation *TemplateId) {
/// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
/// "TheDeclarator" is the declarator that this will be added to.
-DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
+DeclaratorChunk DeclaratorChunk::getFunction(const ParsedAttributes &attrs,
+ bool hasProto, bool isVariadic,
SourceLocation EllipsisLoc,
ParamInfo *ArgInfo,
unsigned NumArgs,
unsigned TypeQuals,
+ bool RefQualifierIsLvalueRef,
+ SourceLocation RefQualifierLoc,
bool hasExceptionSpec,
SourceLocation ThrowLoc,
bool hasAnyExceptionSpec,
@@ -59,11 +62,13 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
unsigned NumExceptions,
SourceLocation LPLoc,
SourceLocation RPLoc,
- Declarator &TheDeclarator) {
+ Declarator &TheDeclarator,
+ ParsedType TrailingReturnType) {
DeclaratorChunk I;
I.Kind = Function;
I.Loc = LPLoc;
I.EndLoc = RPLoc;
+ I.Fun.AttrList = attrs.getList();
I.Fun.hasPrototype = hasProto;
I.Fun.isVariadic = isVariadic;
I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding();
@@ -71,11 +76,14 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
I.Fun.TypeQuals = TypeQuals;
I.Fun.NumArgs = NumArgs;
I.Fun.ArgInfo = 0;
+ I.Fun.RefQualifierIsLValueRef = RefQualifierIsLvalueRef;
+ I.Fun.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
I.Fun.hasExceptionSpec = hasExceptionSpec;
I.Fun.ThrowLoc = ThrowLoc.getRawEncoding();
I.Fun.hasAnyExceptionSpec = hasAnyExceptionSpec;
I.Fun.NumExceptions = NumExceptions;
I.Fun.Exceptions = 0;
+ I.Fun.TrailingReturnType = TrailingReturnType.getAsOpaquePtr();
// new[] an argument array if needed.
if (NumArgs) {
@@ -218,7 +226,25 @@ const char *DeclSpec::getSpecifierName(TQ T) {
bool DeclSpec::SetStorageClassSpec(SCS S, SourceLocation Loc,
const char *&PrevSpec,
- unsigned &DiagID) {
+ unsigned &DiagID,
+ const LangOptions &Lang) {
+ // OpenCL prohibits extern, auto, register, and static
+ // It seems sensible to prohibit private_extern too
+ if (Lang.OpenCL) {
+ switch (S) {
+ case SCS_extern:
+ case SCS_private_extern:
+ case SCS_auto:
+ case SCS_register:
+ case SCS_static:
+ DiagID = diag::err_not_opencl_storage_class_specifier;
+ PrevSpec = getSpecifierName(S);
+ return true;
+ default:
+ break;
+ }
+ }
+
if (StorageClassSpec != SCS_unspecified) {
// Changing storage class is allowed only if the previous one
// was the 'extern' that is part of a linkage specification and
@@ -481,7 +507,7 @@ void DeclSpec::SaveWrittenBuiltinSpecs() {
writtenBS.Type = getTypeSpecType();
// Search the list of attributes for the presence of a mode attribute.
writtenBS.ModeAttr = false;
- AttributeList* attrs = getAttributes();
+ AttributeList* attrs = getAttributes().getList();
while (attrs) {
if (attrs->getKind() == AttributeList::AT_mode) {
writtenBS.ModeAttr = true;
@@ -510,28 +536,27 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
SaveStorageSpecifierAsWritten();
// Check the type specifier components first.
- SourceManager &SrcMgr = PP.getSourceManager();
// Validate and finalize AltiVec vector declspec.
if (TypeAltiVecVector) {
if (TypeAltiVecBool) {
// Sign specifiers are not allowed with vector bool. (PIM 2.1)
if (TypeSpecSign != TSS_unspecified) {
- Diag(D, TSSLoc, SrcMgr, diag::err_invalid_vector_bool_decl_spec)
+ Diag(D, TSSLoc, diag::err_invalid_vector_bool_decl_spec)
<< getSpecifierName((TSS)TypeSpecSign);
}
// Only char/int are valid with vector bool. (PIM 2.1)
if (((TypeSpecType != TST_unspecified) && (TypeSpecType != TST_char) &&
(TypeSpecType != TST_int)) || TypeAltiVecPixel) {
- Diag(D, TSTLoc, SrcMgr, diag::err_invalid_vector_bool_decl_spec)
+ Diag(D, TSTLoc, diag::err_invalid_vector_bool_decl_spec)
<< (TypeAltiVecPixel ? "__pixel" :
getSpecifierName((TST)TypeSpecType));
}
// Only 'short' is valid with vector bool. (PIM 2.1)
if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short))
- Diag(D, TSWLoc, SrcMgr, diag::err_invalid_vector_bool_decl_spec)
+ Diag(D, TSWLoc, diag::err_invalid_vector_bool_decl_spec)
<< getSpecifierName((TSW)TypeSpecWidth);
// Elements of vector bool are interpreted as unsigned. (PIM 2.1)
@@ -555,7 +580,7 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
else if (TypeSpecType != TST_int &&
TypeSpecType != TST_char && TypeSpecType != TST_wchar) {
- Diag(D, TSSLoc, SrcMgr, diag::err_invalid_sign_spec)
+ Diag(D, TSSLoc, diag::err_invalid_sign_spec)
<< getSpecifierName((TST)TypeSpecType);
// signed double -> double.
TypeSpecSign = TSS_unspecified;
@@ -570,7 +595,7 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // short -> short int, long long -> long long int.
else if (TypeSpecType != TST_int) {
- Diag(D, TSWLoc, SrcMgr,
+ Diag(D, TSWLoc,
TypeSpecWidth == TSW_short ? diag::err_invalid_short_spec
: diag::err_invalid_longlong_spec)
<< getSpecifierName((TST)TypeSpecType);
@@ -582,7 +607,7 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // long -> long int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_double) {
- Diag(D, TSWLoc, SrcMgr, diag::err_invalid_long_spec)
+ Diag(D, TSWLoc, diag::err_invalid_long_spec)
<< getSpecifierName((TST)TypeSpecType);
TypeSpecType = TST_int;
TypeSpecOwned = false;
@@ -594,16 +619,16 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
// disallow their use. Need information about the backend.
if (TypeSpecComplex != TSC_unspecified) {
if (TypeSpecType == TST_unspecified) {
- Diag(D, TSCLoc, SrcMgr, diag::ext_plain_complex)
+ Diag(D, TSCLoc, diag::ext_plain_complex)
<< FixItHint::CreateInsertion(
PP.getLocForEndOfToken(getTypeSpecComplexLoc()),
" double");
TypeSpecType = TST_double; // _Complex -> _Complex double.
} else if (TypeSpecType == TST_int || TypeSpecType == TST_char) {
// Note that this intentionally doesn't include _Complex _Bool.
- Diag(D, TSTLoc, SrcMgr, diag::ext_integer_complex);
+ Diag(D, TSTLoc, diag::ext_integer_complex);
} else if (TypeSpecType != TST_float && TypeSpecType != TST_double) {
- Diag(D, TSCLoc, SrcMgr, diag::err_invalid_complex_spec)
+ Diag(D, TSCLoc, diag::err_invalid_complex_spec)
<< getSpecifierName((TST)TypeSpecType);
TypeSpecComplex = TSC_unspecified;
}
@@ -619,7 +644,7 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
SourceLocation SCLoc = getStorageClassSpecLoc();
SourceLocation SCEndLoc = SCLoc.getFileLocWithOffset(strlen(SpecName));
- Diag(D, SCLoc, SrcMgr, diag::err_friend_storage_spec)
+ Diag(D, SCLoc, diag::err_friend_storage_spec)
<< SpecName
<< FixItHint::CreateRemoval(SourceRange(SCLoc, SCEndLoc));
@@ -665,3 +690,58 @@ void UnqualifiedId::setOperatorFunctionId(SourceLocation OperatorLoc,
EndLocation = SymbolLocations[I];
}
}
+
+bool VirtSpecifiers::SetSpecifier(Specifier VS, SourceLocation Loc,
+ const char *&PrevSpec) {
+ if (Specifiers & VS) {
+ PrevSpec = getSpecifierName(VS);
+ return true;
+ }
+
+ Specifiers |= VS;
+
+ switch (VS) {
+ default: assert(0 && "Unknown specifier!");
+ case VS_Override: VS_overrideLoc = Loc; break;
+ case VS_Final: VS_finalLoc = Loc; break;
+ case VS_New: VS_newLoc = Loc; break;
+ }
+
+ return false;
+}
+
+const char *VirtSpecifiers::getSpecifierName(Specifier VS) {
+ switch (VS) {
+ default: assert(0 && "Unknown specifier");
+ case VS_Override: return "override";
+ case VS_Final: return "final";
+ case VS_New: return "new";
+ }
+}
+
+bool ClassVirtSpecifiers::SetSpecifier(Specifier CVS, SourceLocation Loc,
+ const char *&PrevSpec) {
+ if (Specifiers & CVS) {
+ PrevSpec = getSpecifierName(CVS);
+ return true;
+ }
+
+ Specifiers |= CVS;
+
+ switch (CVS) {
+ default: assert(0 && "Unknown specifier!");
+ case CVS_Final: CVS_finalLoc = Loc; break;
+ case CVS_Explicit: CVS_explicitLoc = Loc; break;
+ }
+
+ return false;
+}
+
+const char *ClassVirtSpecifiers::getSpecifierName(Specifier CVS) {
+ switch (CVS) {
+ default: assert(0 && "Unknown specifier");
+ case CVS_Final: return "final";
+ case CVS_Explicit: return "explicit";
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
index b23f615..b73f0e9 100644
--- a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
@@ -62,7 +62,7 @@ class JumpScopeChecker {
llvm::SmallVector<Stmt*, 16> Jumps;
llvm::SmallVector<IndirectGotoStmt*, 4> IndirectJumps;
- llvm::SmallVector<LabelStmt*, 4> IndirectJumpTargets;
+ llvm::SmallVector<LabelDecl*, 4> IndirectJumpTargets;
public:
JumpScopeChecker(Stmt *Body, Sema &S);
private:
@@ -71,7 +71,7 @@ private:
void VerifyJumps();
void VerifyIndirectJumps();
void DiagnoseIndirectJump(IndirectGotoStmt *IG, unsigned IGScope,
- LabelStmt *Target, unsigned TargetScope);
+ LabelDecl *Target, unsigned TargetScope);
void CheckJump(Stmt *From, Stmt *To,
SourceLocation DiagLoc, unsigned JumpDiag);
@@ -186,6 +186,17 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
break;
case Stmt::IndirectGotoStmtClass:
+ // "goto *&&lbl;" is a special case which we treat as equivalent
+ // to a normal goto. In addition, we don't calculate scope in the
+ // operand (to avoid recording the address-of-label use), which
+ // works only because of the restricted set of expressions which
+ // we detect as constant targets.
+ if (cast<IndirectGotoStmt>(S)->getConstantTarget()) {
+ LabelAndGotoScopes[S] = ParentScope;
+ Jumps.push_back(S);
+ return;
+ }
+
LabelAndGotoScopes[S] = ParentScope;
IndirectJumps.push_back(cast<IndirectGotoStmt>(S));
break;
@@ -210,8 +221,7 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
break;
}
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end(); CI != E;
- ++CI) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI) {
if (SkipFirstSubStmt) {
SkipFirstSubStmt = false;
continue;
@@ -225,12 +235,12 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
// order to avoid blowing out the stack.
while (true) {
Stmt *Next;
- if (isa<CaseStmt>(SubStmt))
- Next = cast<CaseStmt>(SubStmt)->getSubStmt();
- else if (isa<DefaultStmt>(SubStmt))
- Next = cast<DefaultStmt>(SubStmt)->getSubStmt();
- else if (isa<LabelStmt>(SubStmt))
- Next = cast<LabelStmt>(SubStmt)->getSubStmt();
+ if (CaseStmt *CS = dyn_cast<CaseStmt>(SubStmt))
+ Next = CS->getSubStmt();
+ else if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SubStmt))
+ Next = DS->getSubStmt();
+ else if (LabelStmt *LS = dyn_cast<LabelStmt>(SubStmt))
+ Next = LS->getSubStmt();
else
break;
@@ -336,7 +346,15 @@ void JumpScopeChecker::VerifyJumps() {
// With a goto,
if (GotoStmt *GS = dyn_cast<GotoStmt>(Jump)) {
- CheckJump(GS, GS->getLabel(), GS->getGotoLoc(),
+ CheckJump(GS, GS->getLabel()->getStmt(), GS->getGotoLoc(),
+ diag::err_goto_into_protected_scope);
+ continue;
+ }
+
+ // We only get indirect gotos here when they have a constant target.
+ if (IndirectGotoStmt *IGS = dyn_cast<IndirectGotoStmt>(Jump)) {
+ LabelDecl *Target = IGS->getConstantTarget();
+ CheckJump(IGS, Target->getStmt(), IGS->getGotoLoc(),
diag::err_goto_into_protected_scope);
continue;
}
@@ -406,15 +424,15 @@ void JumpScopeChecker::VerifyIndirectJumps() {
// Collect a single representative of every scope containing a
// label whose address was taken somewhere in the function.
// For most code bases, there will be only one such scope.
- llvm::DenseMap<unsigned, LabelStmt*> TargetScopes;
- for (llvm::SmallVectorImpl<LabelStmt*>::iterator
+ llvm::DenseMap<unsigned, LabelDecl*> TargetScopes;
+ for (llvm::SmallVectorImpl<LabelDecl*>::iterator
I = IndirectJumpTargets.begin(), E = IndirectJumpTargets.end();
I != E; ++I) {
- LabelStmt *TheLabel = *I;
- assert(LabelAndGotoScopes.count(TheLabel) &&
+ LabelDecl *TheLabel = *I;
+ assert(LabelAndGotoScopes.count(TheLabel->getStmt()) &&
"Referenced label didn't get added to scopes?");
- unsigned LabelScope = LabelAndGotoScopes[TheLabel];
- LabelStmt *&Target = TargetScopes[LabelScope];
+ unsigned LabelScope = LabelAndGotoScopes[TheLabel->getStmt()];
+ LabelDecl *&Target = TargetScopes[LabelScope];
if (!Target) Target = TheLabel;
}
@@ -427,10 +445,10 @@ void JumpScopeChecker::VerifyIndirectJumps() {
// entered, then verify that every jump scope can be trivially
// exitted to reach a scope in S.
llvm::BitVector Reachable(Scopes.size(), false);
- for (llvm::DenseMap<unsigned,LabelStmt*>::iterator
+ for (llvm::DenseMap<unsigned,LabelDecl*>::iterator
TI = TargetScopes.begin(), TE = TargetScopes.end(); TI != TE; ++TI) {
unsigned TargetScope = TI->first;
- LabelStmt *TargetLabel = TI->second;
+ LabelDecl *TargetLabel = TI->second;
Reachable.reset();
@@ -493,12 +511,12 @@ void JumpScopeChecker::VerifyIndirectJumps() {
/// Diagnose an indirect jump which is known to cross scopes.
void JumpScopeChecker::DiagnoseIndirectJump(IndirectGotoStmt *Jump,
unsigned JumpScope,
- LabelStmt *Target,
+ LabelDecl *Target,
unsigned TargetScope) {
assert(JumpScope != TargetScope);
- S.Diag(Jump->getGotoLoc(), diag::warn_indirect_goto_in_protected_scope);
- S.Diag(Target->getIdentLoc(), diag::note_indirect_goto_target);
+ S.Diag(Jump->getGotoLoc(), diag::err_indirect_goto_in_protected_scope);
+ S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
unsigned Common = GetDeepestCommonScope(JumpScope, TargetScope);
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
index 17817d4..23a3c24 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
@@ -19,7 +19,9 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/APFloat.h"
#include "clang/Sema/CXXFieldCollector.h"
+#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
@@ -29,6 +31,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/StmtCXX.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
@@ -37,15 +40,14 @@ using namespace sema;
FunctionScopeInfo::~FunctionScopeInfo() { }
-void FunctionScopeInfo::Clear(unsigned NumErrors) {
+void FunctionScopeInfo::Clear() {
HasBranchProtectedScope = false;
HasBranchIntoScope = false;
HasIndirectGoto = false;
- LabelMap.clear();
SwitchStack.clear();
Returns.clear();
- NumErrorsAtStartOfFunction = NumErrors;
+ ErrorTrap.reset();
}
BlockScopeInfo::~BlockScopeInfo() { }
@@ -129,15 +131,18 @@ void Sema::ActOnTranslationUnitScope(Scope *S) {
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
bool CompleteTranslationUnit,
CodeCompleteConsumer *CodeCompleter)
- : TheTargetAttributesSema(0),
+ : TheTargetAttributesSema(0), FPFeatures(pp.getLangOptions()),
LangOpts(pp.getLangOptions()), PP(pp), Context(ctxt), Consumer(consumer),
Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
ExternalSource(0), CodeCompleter(CodeCompleter), CurContext(0),
- PackContext(0), VisContext(0), ParsingDeclDepth(0),
- IdResolver(pp.getLangOptions()), GlobalNewDeleteDeclared(false),
+ PackContext(0), VisContext(0),
+ IdResolver(pp.getLangOptions()), CXXTypeInfoDecl(0), MSVCGuidDecl(0),
+ GlobalNewDeleteDeclared(false),
CompleteTranslationUnit(CompleteTranslationUnit),
- NumSFINAEErrors(0), SuppressAccessChecking(false),
- NonInstantiationEntries(0), CurrentInstantiationScope(0), TyposCorrected(0),
+ NumSFINAEErrors(0), SuppressAccessChecking(false),
+ AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
+ NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
+ CurrentInstantiationScope(0), TyposCorrected(0),
AnalysisWarnings(*this)
{
TUScope = 0;
@@ -151,7 +156,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
ExprEvalContexts.push_back(
ExpressionEvaluationContextRecord(PotentiallyEvaluated, 0));
- FunctionScopes.push_back(new FunctionScopeInfo(Diags.getNumErrors()));
+ FunctionScopes.push_back(new FunctionScopeInfo(Diags));
}
void Sema::Initialize() {
@@ -201,15 +206,6 @@ void Sema::ImpCastExprToType(Expr *&Expr, QualType Ty,
if (ExprTy == TypeTy)
return;
- if (Expr->getType()->isPointerType() && Ty->isPointerType()) {
- QualType ExprBaseType = cast<PointerType>(ExprTy)->getPointeeType();
- QualType BaseType = cast<PointerType>(TypeTy)->getPointeeType();
- if (ExprBaseType.getAddressSpace() != BaseType.getAddressSpace()) {
- Diag(Expr->getExprLoc(), diag::err_implicit_pointer_address_space_cast)
- << Expr->getSourceRange();
- }
- }
-
// If this is a derived-to-base cast to a through a virtual base, we
// need a vtable.
if (Kind == CK_DerivedToBase &&
@@ -275,32 +271,103 @@ static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
return false;
}
+namespace {
+ struct UndefinedInternal {
+ NamedDecl *decl;
+ FullSourceLoc useLoc;
+
+ UndefinedInternal(NamedDecl *decl, FullSourceLoc useLoc)
+ : decl(decl), useLoc(useLoc) {}
+ };
+
+ bool operator<(const UndefinedInternal &l, const UndefinedInternal &r) {
+ return l.useLoc.isBeforeInTranslationUnitThan(r.useLoc);
+ }
+}
+
+/// checkUndefinedInternals - Check for undefined objects with internal linkage.
+static void checkUndefinedInternals(Sema &S) {
+ if (S.UndefinedInternals.empty()) return;
+
+ // Collect all the still-undefined entities with internal linkage.
+ llvm::SmallVector<UndefinedInternal, 16> undefined;
+ for (llvm::DenseMap<NamedDecl*,SourceLocation>::iterator
+ i = S.UndefinedInternals.begin(), e = S.UndefinedInternals.end();
+ i != e; ++i) {
+ NamedDecl *decl = i->first;
+
+ // Ignore attributes that have become invalid.
+ if (decl->isInvalidDecl()) continue;
+
+ // __attribute__((weakref)) is basically a definition.
+ if (decl->hasAttr<WeakRefAttr>()) continue;
+
+ if (FunctionDecl *fn = dyn_cast<FunctionDecl>(decl)) {
+ if (fn->isPure() || fn->hasBody())
+ continue;
+ } else {
+ if (cast<VarDecl>(decl)->hasDefinition() != VarDecl::DeclarationOnly)
+ continue;
+ }
+
+ // We build a FullSourceLoc so that we can sort with array_pod_sort.
+ FullSourceLoc loc(i->second, S.Context.getSourceManager());
+ undefined.push_back(UndefinedInternal(decl, loc));
+ }
+
+ if (undefined.empty()) return;
+
+ // Sort (in order of use site) so that we're not (as) dependent on
+ // the iteration order through an llvm::DenseMap.
+ llvm::array_pod_sort(undefined.begin(), undefined.end());
+
+ for (llvm::SmallVectorImpl<UndefinedInternal>::iterator
+ i = undefined.begin(), e = undefined.end(); i != e; ++i) {
+ NamedDecl *decl = i->decl;
+ S.Diag(decl->getLocation(), diag::warn_undefined_internal)
+ << isa<VarDecl>(decl) << decl;
+ S.Diag(i->useLoc, diag::note_used_here);
+ }
+}
+
/// ActOnEndOfTranslationUnit - This is called at the very end of the
/// translation unit when EOF is reached and all but the top-level scope is
/// popped.
void Sema::ActOnEndOfTranslationUnit() {
// At PCH writing, implicit instantiations and VTable handling info are
// stored and performed when the PCH is included.
- if (CompleteTranslationUnit)
- while (1) {
- // C++: Perform implicit template instantiations.
- //
- // FIXME: When we perform these implicit instantiations, we do not
- // carefully keep track of the point of instantiation (C++ [temp.point]).
- // This means that name lookup that occurs within the template
- // instantiation will always happen at the end of the translation unit,
- // so it will find some names that should not be found. Although this is
- // common behavior for C++ compilers, it is technically wrong. In the
- // future, we either need to be able to filter the results of name lookup
- // or we need to perform template instantiations earlier.
- PerformPendingInstantiations();
-
- /// If DefinedUsedVTables ends up marking any virtual member
- /// functions it might lead to more pending template
- /// instantiations, which is why we need to loop here.
- if (!DefineUsedVTables())
- break;
+ if (CompleteTranslationUnit) {
+ // If any dynamic classes have their key function defined within
+ // this translation unit, then those vtables are considered "used" and must
+ // be emitted.
+ for (unsigned I = 0, N = DynamicClasses.size(); I != N; ++I) {
+ assert(!DynamicClasses[I]->isDependentType() &&
+ "Should not see dependent types here!");
+ if (const CXXMethodDecl *KeyFunction
+ = Context.getKeyFunction(DynamicClasses[I])) {
+ const FunctionDecl *Definition = 0;
+ if (KeyFunction->hasBody(Definition))
+ MarkVTableUsed(Definition->getLocation(), DynamicClasses[I], true);
+ }
}
+
+ // If DefinedUsedVTables ends up marking any virtual member functions it
+ // might lead to more pending template instantiations, which we then need
+ // to instantiate.
+ DefineUsedVTables();
+
+ // C++: Perform implicit template instantiations.
+ //
+ // FIXME: When we perform these implicit instantiations, we do not
+ // carefully keep track of the point of instantiation (C++ [temp.point]).
+ // This means that name lookup that occurs within the template
+ // instantiation will always happen at the end of the translation unit,
+ // so it will find some names that should not be found. Although this is
+ // common behavior for C++ compilers, it is technically wrong. In the
+ // future, we either need to be able to filter the results of name lookup
+ // or we need to perform template instantiations earlier.
+ PerformPendingInstantiations();
+ }
// Remove file scoped decls that turned out to be used.
UnusedFileScopedDecls.erase(std::remove_if(UnusedFileScopedDecls.begin(),
@@ -371,26 +438,32 @@ void Sema::ActOnEndOfTranslationUnit() {
Consumer.CompleteTentativeDefinition(VD);
}
-
- // Output warning for unused file scoped decls.
- for (llvm::SmallVectorImpl<const DeclaratorDecl*>::iterator
- I = UnusedFileScopedDecls.begin(),
- E = UnusedFileScopedDecls.end(); I != E; ++I) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
- const FunctionDecl *DiagD;
- if (!FD->hasBody(DiagD))
- DiagD = FD;
- Diag(DiagD->getLocation(),
- isa<CXXMethodDecl>(DiagD) ? diag::warn_unused_member_function
- : diag::warn_unused_function)
- << DiagD->getDeclName();
- } else {
- const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
- if (!DiagD)
- DiagD = cast<VarDecl>(*I);
- Diag(DiagD->getLocation(), diag::warn_unused_variable)
- << DiagD->getDeclName();
+
+ // If there were errors, disable 'unused' warnings since they will mostly be
+ // noise.
+ if (!Diags.hasErrorOccurred()) {
+ // Output warning for unused file scoped decls.
+ for (llvm::SmallVectorImpl<const DeclaratorDecl*>::iterator
+ I = UnusedFileScopedDecls.begin(),
+ E = UnusedFileScopedDecls.end(); I != E; ++I) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ const FunctionDecl *DiagD;
+ if (!FD->hasBody(DiagD))
+ DiagD = FD;
+ Diag(DiagD->getLocation(),
+ isa<CXXMethodDecl>(DiagD) ? diag::warn_unused_member_function
+ : diag::warn_unused_function)
+ << DiagD->getDeclName();
+ } else {
+ const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
+ if (!DiagD)
+ DiagD = cast<VarDecl>(*I);
+ Diag(DiagD->getLocation(), diag::warn_unused_variable)
+ << DiagD->getDeclName();
+ }
}
+
+ checkUndefinedInternals(*this);
}
TUScope = 0;
@@ -431,6 +504,50 @@ NamedDecl *Sema::getCurFunctionOrMethodDecl() {
}
Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
+ if (!isActive())
+ return;
+
+ if (llvm::Optional<TemplateDeductionInfo*> Info = SemaRef.isSFINAEContext()) {
+ switch (DiagnosticIDs::getDiagnosticSFINAEResponse(getDiagID())) {
+ case DiagnosticIDs::SFINAE_Report:
+ // Fall through; we'll report the diagnostic below.
+ break;
+
+ case DiagnosticIDs::SFINAE_AccessControl:
+ // Unless access checking is specifically called out as a SFINAE
+ // error, report this diagnostic.
+ if (!SemaRef.AccessCheckingSFINAE)
+ break;
+
+ case DiagnosticIDs::SFINAE_SubstitutionFailure:
+ // Count this failure so that we know that template argument deduction
+ // has failed.
+ ++SemaRef.NumSFINAEErrors;
+ SemaRef.Diags.setLastDiagnosticIgnored();
+ SemaRef.Diags.Clear();
+ Clear();
+ return;
+
+ case DiagnosticIDs::SFINAE_Suppress:
+ // Make a copy of this suppressed diagnostic and store it with the
+ // template-deduction information;
+ FlushCounts();
+ DiagnosticInfo DiagInfo(&SemaRef.Diags);
+
+ if (*Info)
+ (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
+ PartialDiagnostic(DiagInfo,
+ SemaRef.Context.getDiagAllocator()));
+
+ // Suppress this diagnostic.
+ SemaRef.Diags.setLastDiagnosticIgnored();
+ SemaRef.Diags.Clear();
+ Clear();
+ return;
+ }
+ }
+
+ // Emit the diagnostic.
if (!this->Emit())
return;
@@ -438,7 +555,7 @@ Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
// that is different from the last template instantiation where
// we emitted an error, print a template instantiation
// backtrace.
- if (!SemaRef.Diags.isBuiltinNote(DiagID) &&
+ if (!DiagnosticIDs::isBuiltinNote(DiagID) &&
!SemaRef.ActiveTemplateInstantiations.empty() &&
SemaRef.ActiveTemplateInstantiations.back()
!= SemaRef.LastTemplateInstantiationErrorContext) {
@@ -449,26 +566,7 @@ Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
}
Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID) {
- if (isSFINAEContext()) {
- switch (Diagnostic::getDiagnosticSFINAEResponse(DiagID)) {
- case Diagnostic::SFINAE_Report:
- // Fall through; we'll report the diagnostic below.
- break;
-
- case Diagnostic::SFINAE_SubstitutionFailure:
- // Count this failure so that we know that template argument deduction
- // has failed.
- ++NumSFINAEErrors;
- // Fall through
-
- case Diagnostic::SFINAE_Suppress:
- // Suppress this diagnostic.
- Diags.setLastDiagnosticIgnored();
- return SemaDiagnosticBuilder(*this);
- }
- }
-
- DiagnosticBuilder DB = Diags.Report(FullSourceLoc(Loc, SourceMgr), DiagID);
+ DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
@@ -514,17 +612,16 @@ void Sema::PushFunctionScope() {
if (FunctionScopes.size() == 1) {
// Use the "top" function scope rather than having to allocate
// memory for a new scope.
- FunctionScopes.back()->Clear(getDiagnostics().getNumErrors());
+ FunctionScopes.back()->Clear();
FunctionScopes.push_back(FunctionScopes.back());
return;
}
- FunctionScopes.push_back(
- new FunctionScopeInfo(getDiagnostics().getNumErrors()));
+ FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
}
void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
- FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics().getNumErrors(),
+ FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
BlockScope, Block));
}
@@ -538,8 +635,7 @@ void Sema::PopFunctionOrBlockScope() {
/// \brief Determine whether any errors occurred within this function/method/
/// block.
bool Sema::hasAnyErrorsInThisFunction() const {
- return getCurFunction()->NumErrorsAtStartOfFunction
- != getDiagnostics().getNumErrors();
+ return getCurFunction()->ErrorTrap.hasErrorOccurred();
}
BlockScopeInfo *Sema::getCurBlock() {
@@ -552,6 +648,11 @@ BlockScopeInfo *Sema::getCurBlock() {
// Pin this vtable to this file.
ExternalSemaSource::~ExternalSemaSource() {}
+std::pair<ObjCMethodList, ObjCMethodList>
+ExternalSemaSource::ReadMethodPool(Selector Sel) {
+ return std::pair<ObjCMethodList, ObjCMethodList>();
+}
+
void PrettyDeclStackTraceEntry::print(llvm::raw_ostream &OS) const {
SourceLocation Loc = this->Loc;
if (!Loc.isValid() && TheDecl) Loc = TheDecl->getLocation();
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
index e629f0f..4f9bf1c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
@@ -134,7 +134,7 @@ struct EffectiveContext {
bool Dependent;
};
-/// Like sema:;AccessedEntity, but kindly lets us scribble all over
+/// Like sema::AccessedEntity, but kindly lets us scribble all over
/// it.
struct AccessTarget : public AccessedEntity {
AccessTarget(const AccessedEntity &Entity)
@@ -516,6 +516,11 @@ static AccessResult MatchesFriend(Sema &S,
static AccessResult MatchesFriend(Sema &S,
const EffectiveContext &EC,
FriendDecl *FriendD) {
+ // Whitelist accesses if there's an invalid or unsupported friend
+ // declaration.
+ if (FriendD->isInvalidDecl() || FriendD->isUnsupportedFriend())
+ return AR_accessible;
+
if (TypeSourceInfo *T = FriendD->getFriendType())
return MatchesFriend(S, EC, T->getType()->getCanonicalTypeUnqualified());
@@ -1003,9 +1008,51 @@ static void DiagnoseAccessPath(Sema &S,
TryDiagnoseProtectedAccess(S, EC, Entity))
return;
+ // Find an original declaration.
+ while (D->isOutOfLine()) {
+ NamedDecl *PrevDecl = 0;
+ if (isa<VarDecl>(D))
+ PrevDecl = cast<VarDecl>(D)->getPreviousDeclaration();
+ else if (isa<FunctionDecl>(D))
+ PrevDecl = cast<FunctionDecl>(D)->getPreviousDeclaration();
+ else if (isa<TypedefDecl>(D))
+ PrevDecl = cast<TypedefDecl>(D)->getPreviousDeclaration();
+ else if (isa<TagDecl>(D)) {
+ if (isa<RecordDecl>(D) && cast<RecordDecl>(D)->isInjectedClassName())
+ break;
+ PrevDecl = cast<TagDecl>(D)->getPreviousDeclaration();
+ }
+ if (!PrevDecl) break;
+ D = PrevDecl;
+ }
+
+ CXXRecordDecl *DeclaringClass = FindDeclaringClass(D);
+ Decl *ImmediateChild;
+ if (D->getDeclContext() == DeclaringClass)
+ ImmediateChild = D;
+ else {
+ DeclContext *DC = D->getDeclContext();
+ while (DC->getParent() != DeclaringClass)
+ DC = DC->getParent();
+ ImmediateChild = cast<Decl>(DC);
+ }
+
+ // Check whether there's an AccessSpecDecl preceding this in the
+ // chain of the DeclContext.
+ bool Implicit = true;
+ for (CXXRecordDecl::decl_iterator
+ I = DeclaringClass->decls_begin(), E = DeclaringClass->decls_end();
+ I != E; ++I) {
+ if (*I == ImmediateChild) break;
+ if (isa<AccessSpecDecl>(*I)) {
+ Implicit = false;
+ break;
+ }
+ }
+
S.Diag(D->getLocation(), diag::note_access_natural)
<< (unsigned) (Access == AS_protected)
- << /*FIXME: not implicitly*/ 0;
+ << Implicit;
return;
}
@@ -1213,13 +1260,19 @@ static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc,
if (S.SuppressAccessChecking)
return Sema::AR_accessible;
- // If we're currently parsing a top-level declaration, delay
- // diagnostics. This is the only case where parsing a declaration
- // can actually change our effective context for the purposes of
- // access control.
- if (S.CurContext->isFileContext() && S.ParsingDeclDepth) {
- S.DelayedDiagnostics.push_back(
- DelayedDiagnostic::makeAccess(Loc, Entity));
+ // If we're currently parsing a declaration, we may need to delay
+ // access control checking, because our effective context might be
+ // different based on what the declaration comes out as.
+ //
+ // For example, we might be parsing a declaration with a scope
+ // specifier, like this:
+ // A::private_type A::foo() { ... }
+ //
+ // Or we might be parsing something that will turn out to be a friend:
+ // void foo(A::private_type);
+ // void B::foo(A::private_type);
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(DelayedDiagnostic::makeAccess(Loc, Entity));
return Sema::AR_delayed;
}
@@ -1233,16 +1286,20 @@ static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc,
return Sema::AR_accessible;
}
-void Sema::HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *Ctx) {
- // Pretend we did this from the context of the newly-parsed
- // declaration. If that declaration itself forms a declaration context,
- // include it in the effective context so that parameters and return types of
- // befriended functions have that function's access priveledges.
- DeclContext *DC = Ctx->getDeclContext();
- if (isa<FunctionDecl>(Ctx))
- DC = cast<DeclContext>(Ctx);
- else if (FunctionTemplateDecl *FnTpl = dyn_cast<FunctionTemplateDecl>(Ctx))
- DC = cast<DeclContext>(FnTpl->getTemplatedDecl());
+void Sema::HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *decl) {
+ // Access control for names used in the declarations of functions
+ // and function templates should normally be evaluated in the context
+ // of the declaration, just in case it's a friend of something.
+ // However, this does not apply to local extern declarations.
+
+ DeclContext *DC = decl->getDeclContext();
+ if (FunctionDecl *fn = dyn_cast<FunctionDecl>(decl)) {
+ if (!DC->isFunctionOrMethod()) DC = fn;
+ } else if (FunctionTemplateDecl *fnt = dyn_cast<FunctionTemplateDecl>(decl)) {
+ // Never a local declaration.
+ DC = fnt->getTemplatedDecl();
+ }
+
EffectiveContext EC(DC);
AccessTarget Target(DD.getAccessData());
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
index 0921156..794b0b1 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
@@ -263,37 +263,35 @@ void Sema::ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name,
}
}
-void Sema::ActOnPragmaUnused(const Token *Identifiers, unsigned NumIdentifiers,
- Scope *curScope,
- SourceLocation PragmaLoc,
- SourceLocation LParenLoc,
- SourceLocation RParenLoc) {
-
- for (unsigned i = 0; i < NumIdentifiers; ++i) {
- const Token &Tok = Identifiers[i];
- IdentifierInfo *Name = Tok.getIdentifierInfo();
- LookupResult Lookup(*this, Name, Tok.getLocation(), LookupOrdinaryName);
- LookupParsedName(Lookup, curScope, NULL, true);
-
- if (Lookup.empty()) {
- Diag(PragmaLoc, diag::warn_pragma_unused_undeclared_var)
- << Name << SourceRange(Tok.getLocation());
- continue;
- }
+void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
+ SourceLocation PragmaLoc) {
- VarDecl *VD = Lookup.getAsSingle<VarDecl>();
- if (!VD || !VD->hasLocalStorage()) {
- Diag(PragmaLoc, diag::warn_pragma_unused_expected_localvar)
- << Name << SourceRange(Tok.getLocation());
- continue;
- }
+ IdentifierInfo *Name = IdTok.getIdentifierInfo();
+ LookupResult Lookup(*this, Name, IdTok.getLocation(), LookupOrdinaryName);
+ LookupParsedName(Lookup, curScope, NULL, true);
+
+ if (Lookup.empty()) {
+ Diag(PragmaLoc, diag::warn_pragma_unused_undeclared_var)
+ << Name << SourceRange(IdTok.getLocation());
+ return;
+ }
- VD->addAttr(::new (Context) UnusedAttr(Tok.getLocation(), Context));
+ VarDecl *VD = Lookup.getAsSingle<VarDecl>();
+ if (!VD) {
+ Diag(PragmaLoc, diag::warn_pragma_unused_expected_var_arg)
+ << Name << SourceRange(IdTok.getLocation());
+ return;
}
+
+ // Warn if this was used before being marked unused.
+ if (VD->isUsed())
+ Diag(PragmaLoc, diag::warn_used_but_marked_unused) << Name;
+
+ VD->addAttr(::new (Context) UnusedAttr(IdTok.getLocation(), Context));
}
-typedef std::vector<std::pair<VisibilityAttr::VisibilityType,
- SourceLocation> > VisStack;
+typedef std::vector<std::pair<unsigned, SourceLocation> > VisStack;
+enum { NoVisibility = (unsigned) -1 };
void Sema::AddPushedVisibilityAttribute(Decl *D) {
if (!VisContext)
@@ -303,7 +301,11 @@ void Sema::AddPushedVisibilityAttribute(Decl *D) {
return;
VisStack *Stack = static_cast<VisStack*>(VisContext);
- VisibilityAttr::VisibilityType type = Stack->back().first;
+ unsigned rawType = Stack->back().first;
+ if (rawType == NoVisibility) return;
+
+ VisibilityAttr::VisibilityType type
+ = (VisibilityAttr::VisibilityType) rawType;
SourceLocation loc = Stack->back().second;
D->addAttr(::new (Context) VisibilityAttr(loc, Context, type));
@@ -315,8 +317,7 @@ void Sema::FreeVisContext() {
VisContext = 0;
}
-static void PushPragmaVisibility(Sema &S, VisibilityAttr::VisibilityType type,
- SourceLocation loc) {
+static void PushPragmaVisibility(Sema &S, unsigned type, SourceLocation loc) {
// Put visibility on stack.
if (!S.VisContext)
S.VisContext = new VisStack;
@@ -349,8 +350,26 @@ void Sema::ActOnPragmaVisibility(bool IsPush, const IdentifierInfo* VisType,
}
}
-void Sema::PushVisibilityAttr(const VisibilityAttr *Attr) {
- PushPragmaVisibility(*this, Attr->getVisibility(), Attr->getLocation());
+void Sema::ActOnPragmaFPContract(tok::OnOffSwitch OOS) {
+ switch (OOS) {
+ case tok::OOS_ON:
+ FPFeatures.fp_contract = 1;
+ break;
+ case tok::OOS_OFF:
+ FPFeatures.fp_contract = 0;
+ break;
+ case tok::OOS_DEFAULT:
+ FPFeatures.fp_contract = getLangOptions().DefaultFPContract;
+ break;
+ }
+}
+
+void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr) {
+ // Visibility calculations will consider the namespace's visibility.
+ // Here we just want to note that we're in a visibility context
+ // which overrides any enclosing #pragma context, but doesn't itself
+ // contribute visibility.
+ PushPragmaVisibility(*this, NoVisibility, SourceLocation());
}
void Sema::PopPragmaVisibility() {
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp
index 21b1a73..506d261 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp
@@ -21,6 +21,8 @@
#include <set>
using namespace clang;
+
+
enum TryCastResult {
TC_NotApplicable, ///< The cast method is not applicable.
TC_Success, ///< The cast method is appropriate and successful.
@@ -37,18 +39,25 @@ enum CastType {
CT_Functional ///< Type(expr)
};
+
+
+
static void CheckConstCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ ExprValueKind &VK,
const SourceRange &OpRange,
const SourceRange &DestRange);
static void CheckReinterpretCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ ExprValueKind &VK,
const SourceRange &OpRange,
const SourceRange &DestRange,
CastKind &Kind);
static void CheckStaticCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ ExprValueKind &VK,
const SourceRange &OpRange,
CastKind &Kind,
CXXCastPath &BasePath);
static void CheckDynamicCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ ExprValueKind &VK,
const SourceRange &OpRange,
const SourceRange &DestRange,
CastKind &Kind,
@@ -68,7 +77,10 @@ static bool CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType);
// %1: Source Type
// %2: Destination Type
static TryCastResult TryLValueToRValueCast(Sema &Self, Expr *SrcExpr,
- QualType DestType, unsigned &msg);
+ QualType DestType, bool CStyle,
+ CastKind &Kind,
+ CXXCastPath &BasePath,
+ unsigned &msg);
static TryCastResult TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
@@ -109,12 +121,24 @@ static TryCastResult TryStaticCast(Sema &Self, Expr *&SrcExpr,
CXXCastPath &BasePath);
static TryCastResult TryConstCast(Sema &Self, Expr *SrcExpr, QualType DestType,
bool CStyle, unsigned &msg);
-static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
+static TryCastResult TryReinterpretCast(Sema &Self, Expr *&SrcExpr,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
unsigned &msg,
CastKind &Kind);
+
+static ExprResult
+ResolveAndFixSingleFunctionTemplateSpecialization(
+ Sema &Self, Expr *SrcExpr,
+ bool DoFunctionPointerConverion = false,
+ bool Complain = false,
+ const SourceRange& OpRangeForComplaining = SourceRange(),
+ QualType DestTypeForComplaining = QualType(),
+ unsigned DiagIDForComplaining = 0);
+
+
+
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult
Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
@@ -146,51 +170,147 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
// FIXME: should we check this in a more fine-grained manner?
bool TypeDependent = DestType->isDependentType() || Ex->isTypeDependent();
+ if (Ex->isBoundMemberFunction(Context))
+ Diag(Ex->getLocStart(), diag::err_invalid_use_of_bound_member_func)
+ << Ex->getSourceRange();
+
+ ExprValueKind VK = VK_RValue;
+ if (TypeDependent)
+ VK = Expr::getValueKindForType(DestType);
+
switch (Kind) {
- default: assert(0 && "Unknown C++ cast!");
+ default: llvm_unreachable("Unknown C++ cast!");
case tok::kw_const_cast:
if (!TypeDependent)
- CheckConstCast(*this, Ex, DestType, OpRange, DestRange);
+ CheckConstCast(*this, Ex, DestType, VK, OpRange, DestRange);
return Owned(CXXConstCastExpr::Create(Context,
DestType.getNonLValueExprType(Context),
- Ex, DestTInfo, OpLoc));
+ VK, Ex, DestTInfo, OpLoc,
+ Parens.getEnd()));
case tok::kw_dynamic_cast: {
- CastKind Kind = CK_Unknown;
+ CastKind Kind = CK_Dependent;
CXXCastPath BasePath;
if (!TypeDependent)
- CheckDynamicCast(*this, Ex, DestType, OpRange, DestRange, Kind, BasePath);
+ CheckDynamicCast(*this, Ex, DestType, VK, OpRange, DestRange,
+ Kind, BasePath);
return Owned(CXXDynamicCastExpr::Create(Context,
DestType.getNonLValueExprType(Context),
- Kind, Ex, &BasePath, DestTInfo,
- OpLoc));
+ VK, Kind, Ex, &BasePath, DestTInfo,
+ OpLoc, Parens.getEnd()));
}
case tok::kw_reinterpret_cast: {
- CastKind Kind = CK_Unknown;
+ CastKind Kind = CK_Dependent;
if (!TypeDependent)
- CheckReinterpretCast(*this, Ex, DestType, OpRange, DestRange, Kind);
+ CheckReinterpretCast(*this, Ex, DestType, VK, OpRange, DestRange, Kind);
return Owned(CXXReinterpretCastExpr::Create(Context,
DestType.getNonLValueExprType(Context),
- Kind, Ex, 0,
- DestTInfo, OpLoc));
+ VK, Kind, Ex, 0,
+ DestTInfo, OpLoc, Parens.getEnd()));
}
case tok::kw_static_cast: {
- CastKind Kind = CK_Unknown;
+ CastKind Kind = CK_Dependent;
CXXCastPath BasePath;
if (!TypeDependent)
- CheckStaticCast(*this, Ex, DestType, OpRange, Kind, BasePath);
+ CheckStaticCast(*this, Ex, DestType, VK, OpRange, Kind, BasePath);
return Owned(CXXStaticCastExpr::Create(Context,
DestType.getNonLValueExprType(Context),
- Kind, Ex, &BasePath,
- DestTInfo, OpLoc));
+ VK, Kind, Ex, &BasePath,
+ DestTInfo, OpLoc, Parens.getEnd()));
}
}
return ExprError();
}
+/// Try to diagnose a failed overloaded cast. Returns true if
+/// diagnostics were emitted.
+static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
+ SourceRange range, Expr *src,
+ QualType destType) {
+ switch (CT) {
+ // These cast kinds don't consider user-defined conversions.
+ case CT_Const:
+ case CT_Reinterpret:
+ case CT_Dynamic:
+ return false;
+
+ // These do.
+ case CT_Static:
+ case CT_CStyle:
+ case CT_Functional:
+ break;
+ }
+
+ QualType srcType = src->getType();
+ if (!destType->isRecordType() && !srcType->isRecordType())
+ return false;
+
+ InitializedEntity entity = InitializedEntity::InitializeTemporary(destType);
+ InitializationKind initKind
+ = InitializationKind::CreateCast(/*type range?*/ range,
+ (CT == CT_CStyle || CT == CT_Functional));
+ InitializationSequence sequence(S, entity, initKind, &src, 1);
+
+ assert(sequence.getKind() == InitializationSequence::FailedSequence &&
+ "initialization succeeded on second try?");
+ switch (sequence.getFailureKind()) {
+ default: return false;
+
+ case InitializationSequence::FK_ConstructorOverloadFailed:
+ case InitializationSequence::FK_UserConversionOverloadFailed:
+ break;
+ }
+
+ OverloadCandidateSet &candidates = sequence.getFailedCandidateSet();
+
+ unsigned msg = 0;
+ OverloadCandidateDisplayKind howManyCandidates = OCD_AllCandidates;
+
+ switch (sequence.getFailedOverloadResult()) {
+ case OR_Success: llvm_unreachable("successful failed overload");
+ return false;
+ case OR_No_Viable_Function:
+ if (candidates.empty())
+ msg = diag::err_ovl_no_conversion_in_cast;
+ else
+ msg = diag::err_ovl_no_viable_conversion_in_cast;
+ howManyCandidates = OCD_AllCandidates;
+ break;
+
+ case OR_Ambiguous:
+ msg = diag::err_ovl_ambiguous_conversion_in_cast;
+ howManyCandidates = OCD_ViableCandidates;
+ break;
+
+ case OR_Deleted:
+ msg = diag::err_ovl_deleted_conversion_in_cast;
+ howManyCandidates = OCD_ViableCandidates;
+ break;
+ }
+
+ S.Diag(range.getBegin(), msg)
+ << CT << srcType << destType
+ << range << src->getSourceRange();
+
+ candidates.NoteCandidates(S, howManyCandidates, &src, 1);
+
+ return true;
+}
+
+/// Diagnose a failed cast.
+static void diagnoseBadCast(Sema &S, unsigned msg, CastType castType,
+ SourceRange opRange, Expr *src, QualType destType) {
+ if (msg == diag::err_bad_cxx_cast_generic &&
+ tryDiagnoseOverloadedCast(S, castType, opRange, src, destType))
+ return;
+
+ S.Diag(opRange.getBegin(), msg) << castType
+ << src->getType() << destType << opRange << src->getSourceRange();
+}
+
/// UnwrapDissimilarPointerTypes - Like Sema::UnwrapSimilarPointerTypes,
/// this removes one level of indirection from both types, provided that they're
/// the same kind of pointer (plain or to-member). Unlike the Sema function,
@@ -297,7 +417,7 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType) {
// Test if they're compatible.
return SrcConstruct != DestConstruct &&
- !Self.IsQualificationConversion(SrcConstruct, DestConstruct);
+ !Self.IsQualificationConversion(SrcConstruct, DestConstruct, false);
}
/// CheckDynamicCast - Check that a dynamic_cast\<DestType\>(SrcExpr) is valid.
@@ -305,7 +425,7 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType) {
/// checked downcasts in class hierarchies.
static void
CheckDynamicCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
- const SourceRange &OpRange,
+ ExprValueKind &VK, const SourceRange &OpRange,
const SourceRange &DestRange, CastKind &Kind,
CXXCastPath &BasePath) {
QualType OrigDestType = DestType, OrigSrcType = SrcExpr->getType();
@@ -316,11 +436,14 @@ CheckDynamicCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
QualType DestPointee;
const PointerType *DestPointer = DestType->getAs<PointerType>();
- const ReferenceType *DestReference = DestType->getAs<ReferenceType>();
+ const ReferenceType *DestReference = 0;
if (DestPointer) {
DestPointee = DestPointer->getPointeeType();
- } else if (DestReference) {
+ } else if ((DestReference = DestType->getAs<ReferenceType>())) {
DestPointee = DestReference->getPointeeType();
+ VK = isa<LValueReferenceType>(DestReference) ? VK_LValue
+ : isa<RValueReferenceType>(DestReference) ? VK_XValue
+ : VK_RValue;
} else {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_ref_or_ptr)
<< OrigDestType << DestRange;
@@ -343,10 +466,8 @@ CheckDynamicCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
// C++0x 5.2.7p2: If T is a pointer type, v shall be an rvalue of a pointer to
// complete class type, [...]. If T is an lvalue reference type, v shall be
- // an lvalue of a complete class type, [...]. If T is an rvalue reference
- // type, v shall be an expression having a complete effective class type,
- // [...]
-
+ // an lvalue of a complete class type, [...]. If T is an rvalue reference
+ // type, v shall be an expression having a complete class type, [...]
QualType SrcType = Self.Context.getCanonicalType(OrigSrcType);
QualType SrcPointee;
if (DestPointer) {
@@ -358,7 +479,7 @@ CheckDynamicCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
return;
}
} else if (DestReference->isLValueReferenceType()) {
- if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
+ if (!SrcExpr->isLValue()) {
Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_rvalue)
<< CT_Dynamic << OrigSrcType << OrigDestType << OpRange;
}
@@ -437,9 +558,10 @@ CheckDynamicCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
/// const char *str = "literal";
/// legacy_function(const_cast\<char*\>(str));
void
-CheckConstCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+CheckConstCast(Sema &Self, Expr *&SrcExpr, QualType DestType, ExprValueKind &VK,
const SourceRange &OpRange, const SourceRange &DestRange) {
- if (!DestType->isLValueReferenceType())
+ VK = Expr::getValueKindForType(DestType);
+ if (VK == VK_RValue)
Self.DefaultFunctionArrayLvalueConversion(SrcExpr);
unsigned msg = diag::err_bad_cxx_cast_generic;
@@ -456,17 +578,28 @@ CheckConstCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
/// char *bytes = reinterpret_cast\<char*\>(int_ptr);
void
CheckReinterpretCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
- const SourceRange &OpRange, const SourceRange &DestRange,
- CastKind &Kind) {
- if (!DestType->isLValueReferenceType())
+ ExprValueKind &VK, const SourceRange &OpRange,
+ const SourceRange &DestRange, CastKind &Kind) {
+ VK = Expr::getValueKindForType(DestType);
+ if (VK == VK_RValue)
Self.DefaultFunctionArrayLvalueConversion(SrcExpr);
unsigned msg = diag::err_bad_cxx_cast_generic;
if (TryReinterpretCast(Self, SrcExpr, DestType, /*CStyle*/false, OpRange,
msg, Kind)
!= TC_Success && msg != 0)
- Self.Diag(OpRange.getBegin(), msg) << CT_Reinterpret
- << SrcExpr->getType() << DestType << OpRange;
+ {
+ if (SrcExpr->getType() == Self.Context.OverloadTy) {
+ //FIXME: &f<int>; is overloaded and resolvable
+ Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_overload)
+ << OverloadExpr::find(SrcExpr).Expression->getName()
+ << DestType << OpRange;
+ Self.NoteAllOverloadCandidates(SrcExpr);
+
+ } else {
+ diagnoseBadCast(Self, msg, CT_Reinterpret, OpRange, SrcExpr, DestType);
+ }
+ }
}
@@ -475,25 +608,47 @@ CheckReinterpretCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
/// implicit conversions explicit and getting rid of data loss warnings.
void
CheckStaticCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
- const SourceRange &OpRange, CastKind &Kind,
- CXXCastPath &BasePath) {
+ ExprValueKind &VK, const SourceRange &OpRange,
+ CastKind &Kind, CXXCastPath &BasePath) {
// This test is outside everything else because it's the only case where
// a non-lvalue-reference target type does not lead to decay.
// C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
if (DestType->isVoidType()) {
- Kind = CK_ToVoid;
+ Self.IgnoredValueConversions(SrcExpr);
+ if (SrcExpr->getType() == Self.Context.OverloadTy) {
+ ExprResult SingleFunctionExpression =
+ ResolveAndFixSingleFunctionTemplateSpecialization(Self, SrcExpr,
+ false, // Decay Function to ptr
+ true, // Complain
+ OpRange, DestType, diag::err_bad_static_cast_overload);
+ if (SingleFunctionExpression.isUsable())
+ {
+ SrcExpr = SingleFunctionExpression.release();
+ Kind = CK_ToVoid;
+ }
+ }
+ else
+ Kind = CK_ToVoid;
return;
}
- if (!DestType->isLValueReferenceType() && !DestType->isRecordType())
+ VK = Expr::getValueKindForType(DestType);
+ if (VK == VK_RValue && !DestType->isRecordType())
Self.DefaultFunctionArrayLvalueConversion(SrcExpr);
unsigned msg = diag::err_bad_cxx_cast_generic;
if (TryStaticCast(Self, SrcExpr, DestType, /*CStyle*/false, OpRange, msg,
- Kind, BasePath) != TC_Success && msg != 0)
- Self.Diag(OpRange.getBegin(), msg) << CT_Static
- << SrcExpr->getType() << DestType << OpRange;
- else if (Kind == CK_Unknown || Kind == CK_BitCast)
+ Kind, BasePath) != TC_Success && msg != 0) {
+ if (SrcExpr->getType() == Self.Context.OverloadTy) {
+ OverloadExpr* oe = OverloadExpr::find(SrcExpr).Expression;
+ Self.Diag(OpRange.getBegin(), diag::err_bad_static_cast_overload)
+ << oe->getName() << DestType << OpRange << oe->getQualifierRange();
+ Self.NoteAllOverloadCandidates(SrcExpr);
+ } else {
+ diagnoseBadCast(Self, msg, CT_Static, OpRange, SrcExpr, DestType);
+ }
+ }
+ else if (Kind == CK_BitCast)
Self.CheckCastAlign(SrcExpr, DestType, OpRange);
}
@@ -530,13 +685,13 @@ static TryCastResult TryStaticCast(Sema &Self, Expr *&SrcExpr,
if (tcr != TC_NotApplicable)
return tcr;
- // N2844 5.2.9p3: An lvalue of type "cv1 T1" can be cast to type "rvalue
- // reference to cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1".
- tcr = TryLValueToRValueCast(Self, SrcExpr, DestType, msg);
- if (tcr != TC_NotApplicable) {
- Kind = CK_NoOp;
+ // C++0x [expr.static.cast]p3:
+ // A glvalue of type "cv1 T1" can be cast to type "rvalue reference to cv2
+ // T2" if "cv2 T2" is reference-compatible with "cv1 T1".
+ tcr = TryLValueToRValueCast(Self, SrcExpr, DestType, CStyle, Kind, BasePath,
+ msg);
+ if (tcr != TC_NotApplicable)
return tcr;
- }
// C++ 5.2.9p2: An expression e can be explicitly converted to a type T
// [...] if the declaration "T t(e);" is well-formed, [...].
@@ -553,10 +708,26 @@ static TryCastResult TryStaticCast(Sema &Self, Expr *&SrcExpr,
// In the CStyle case, the earlier attempt to const_cast should have taken
// care of reverse qualification conversions.
- QualType OrigSrcType = SrcExpr->getType();
-
QualType SrcType = Self.Context.getCanonicalType(SrcExpr->getType());
+ // C++0x 5.2.9p9: A value of a scoped enumeration type can be explicitly
+ // converted to an integral type. [...] A value of a scoped enumeration type
+ // can also be explicitly converted to a floating-point type [...].
+ if (const EnumType *Enum = SrcType->getAs<EnumType>()) {
+ if (Enum->getDecl()->isScoped()) {
+ if (DestType->isBooleanType()) {
+ Kind = CK_IntegralToBoolean;
+ return TC_Success;
+ } else if (DestType->isIntegralType(Self.Context)) {
+ Kind = CK_IntegralCast;
+ return TC_Success;
+ } else if (DestType->isRealFloatingType()) {
+ Kind = CK_IntegralToFloating;
+ return TC_Success;
+ }
+ }
+ }
+
// Reverse integral promotion/conversion. All such conversions are themselves
// again integral promotions or conversions and are thus already handled by
// p2 (TryDirectInitialization above).
@@ -623,8 +794,10 @@ static TryCastResult TryStaticCast(Sema &Self, Expr *&SrcExpr,
}
// Allow arbitray objective-c pointer conversion with static casts.
if (SrcType->isObjCObjectPointerType() &&
- DestType->isObjCObjectPointerType())
+ DestType->isObjCObjectPointerType()) {
+ Kind = CK_BitCast;
return TC_Success;
+ }
// We tried everything. Everything! Nothing works! :-(
return TC_NotApplicable;
@@ -633,14 +806,16 @@ static TryCastResult TryStaticCast(Sema &Self, Expr *&SrcExpr,
/// Tests whether a conversion according to N2844 is valid.
TryCastResult
TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ bool CStyle, CastKind &Kind, CXXCastPath &BasePath,
unsigned &msg) {
- // N2844 5.2.9p3: An lvalue of type "cv1 T1" can be cast to type "rvalue
- // reference to cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1".
+ // C++0x [expr.static.cast]p3:
+ // A glvalue of type "cv1 T1" can be cast to type "rvalue reference to
+ // cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1".
const RValueReferenceType *R = DestType->getAs<RValueReferenceType>();
if (!R)
return TC_NotApplicable;
- if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid)
+ if (!SrcExpr->isGLValue())
return TC_NotApplicable;
// Because we try the reference downcast before this function, from now on
@@ -648,16 +823,32 @@ TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
// FIXME: Should allow casting away constness if CStyle.
bool DerivedToBase;
bool ObjCConversion;
+ QualType FromType = SrcExpr->getType();
+ QualType ToType = R->getPointeeType();
+ if (CStyle) {
+ FromType = FromType.getUnqualifiedType();
+ ToType = ToType.getUnqualifiedType();
+ }
+
if (Self.CompareReferenceRelationship(SrcExpr->getLocStart(),
- SrcExpr->getType(), R->getPointeeType(),
+ ToType, FromType,
DerivedToBase, ObjCConversion) <
Sema::Ref_Compatible_With_Added_Qualification) {
msg = diag::err_bad_lvalue_to_rvalue_cast;
return TC_Failed;
}
- // FIXME: We should probably have an AST node for lvalue-to-rvalue
- // conversions.
+ if (DerivedToBase) {
+ Kind = CK_DerivedToBase;
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ if (!Self.IsDerivedFrom(SrcExpr->getType(), R->getPointeeType(), Paths))
+ return TC_NotApplicable;
+
+ Self.BuildBasePathArray(Paths, BasePath);
+ } else
+ Kind = CK_NoOp;
+
return TC_Success;
}
@@ -681,7 +872,7 @@ TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr, QualType DestType,
return TC_NotApplicable;
}
bool RValueRef = DestReference->isRValueReferenceType();
- if (!RValueRef && SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
+ if (!RValueRef && !SrcExpr->isLValue()) {
// We know the left side is an lvalue reference, so we can suggest a reason.
msg = diag::err_bad_cxx_cast_rvalue;
return TC_NotApplicable;
@@ -818,12 +1009,20 @@ TryStaticDowncast(Sema &Self, CanQualType SrcType, CanQualType DestType,
return TC_Failed;
}
- if (!CStyle && Self.CheckBaseClassAccess(OpRange.getBegin(),
- SrcType, DestType,
- Paths.front(),
+ if (!CStyle) {
+ switch (Self.CheckBaseClassAccess(OpRange.getBegin(),
+ SrcType, DestType,
+ Paths.front(),
diag::err_downcast_from_inaccessible_base)) {
- msg = 0;
- return TC_Failed;
+ case Sema::AR_accessible:
+ case Sema::AR_delayed: // be optimistic
+ case Sema::AR_dependent: // be optimistic
+ break;
+
+ case Sema::AR_inaccessible:
+ msg = 0;
+ return TC_Failed;
+ }
}
Self.BuildBasePathArray(Paths, BasePath);
@@ -887,7 +1086,7 @@ TryStaticMemberPointerUpcast(Sema &Self, Expr *&SrcExpr, QualType SrcType,
Paths.setRecordingPaths(true);
bool StillOkay = Self.IsDerivedFrom(SrcClass, DestClass, Paths);
assert(StillOkay);
- StillOkay = StillOkay;
+ (void)StillOkay;
std::string PathDisplayStr = Self.getAmbiguousPathsDisplayString(Paths);
Self.Diag(OpRange.getBegin(), diag::err_ambiguous_memptr_conv)
<< 1 << SrcClass << DestClass << PathDisplayStr << OpRange;
@@ -902,12 +1101,22 @@ TryStaticMemberPointerUpcast(Sema &Self, Expr *&SrcExpr, QualType SrcType,
return TC_Failed;
}
- if (!CStyle && Self.CheckBaseClassAccess(OpRange.getBegin(),
- DestClass, SrcClass,
- Paths.front(),
- diag::err_upcast_to_inaccessible_base)) {
- msg = 0;
- return TC_Failed;
+ if (!CStyle) {
+ switch (Self.CheckBaseClassAccess(OpRange.getBegin(),
+ DestClass, SrcClass,
+ Paths.front(),
+ diag::err_upcast_to_inaccessible_base)) {
+ case Sema::AR_accessible:
+ case Sema::AR_delayed:
+ case Sema::AR_dependent:
+ // Optimistically assume that the delayed and dependent cases
+ // will work out.
+ break;
+
+ case Sema::AR_inaccessible:
+ msg = 0;
+ return TC_Failed;
+ }
}
if (WasOverloadedFunction) {
@@ -951,17 +1160,19 @@ TryStaticImplicitCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
}
}
- // At this point of CheckStaticCast, if the destination is a reference,
- // this has to work. There is no other way that works.
- // On the other hand, if we're checking a C-style cast, we've still got
- // the reinterpret_cast way.
InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType);
InitializationKind InitKind
- = InitializationKind::CreateCast(/*FIXME:*/OpRange,
- CStyle);
+ = InitializationKind::CreateCast(/*FIXME:*/OpRange, CStyle);
InitializationSequence InitSeq(Self, Entity, InitKind, &SrcExpr, 1);
+
+ // At this point of CheckStaticCast, if the destination is a reference,
+ // or the expression is an overload expression this has to work.
+ // There is no other way that works.
+ // On the other hand, if we're checking a C-style cast, we've still got
+ // the reinterpret_cast way.
+
if (InitSeq.getKind() == InitializationSequence::FailedSequence &&
- (CStyle || !DestType->isReferenceType()))
+ (CStyle || !DestType->isReferenceType()))
return TC_NotApplicable;
ExprResult Result
@@ -986,9 +1197,8 @@ static TryCastResult TryConstCast(Sema &Self, Expr *SrcExpr, QualType DestType,
bool CStyle, unsigned &msg) {
DestType = Self.Context.getCanonicalType(DestType);
QualType SrcType = SrcExpr->getType();
- if (const LValueReferenceType *DestTypeTmp =
- DestType->getAs<LValueReferenceType>()) {
- if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
+ if (const ReferenceType *DestTypeTmp =DestType->getAs<ReferenceType>()) {
+ if (DestTypeTmp->isLValueReferenceType() && !SrcExpr->isLValue()) {
// Cannot const_cast non-lvalue to lvalue reference type. But if this
// is C-style, static_cast might find a way, so we simply suggest a
// message and tell the parent to keep searching.
@@ -1049,7 +1259,43 @@ static TryCastResult TryConstCast(Sema &Self, Expr *SrcExpr, QualType DestType,
return TC_Success;
}
-static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
+// A helper function to resolve and fix an overloaded expression that
+// can be resolved because it identifies a single function
+// template specialization
+// Last three arguments should only be supplied if Complain = true
+static ExprResult ResolveAndFixSingleFunctionTemplateSpecialization(
+ Sema &Self, Expr *SrcExpr,
+ bool DoFunctionPointerConverion,
+ bool Complain,
+ const SourceRange& OpRangeForComplaining,
+ QualType DestTypeForComplaining,
+ unsigned DiagIDForComplaining) {
+ assert(SrcExpr->getType() == Self.Context.OverloadTy);
+ DeclAccessPair Found;
+ Expr* SingleFunctionExpression = 0;
+ if (FunctionDecl* Fn = Self.ResolveSingleFunctionTemplateSpecialization(
+ SrcExpr, false, // false -> Complain
+ &Found)) {
+ if (!Self.DiagnoseUseOfDecl(Fn, SrcExpr->getSourceRange().getBegin())) {
+ // mark the expression as resolved to Fn
+ SingleFunctionExpression = Self.FixOverloadedFunctionReference(SrcExpr,
+ Found, Fn);
+
+ if (DoFunctionPointerConverion)
+ Self.DefaultFunctionArrayLvalueConversion(SingleFunctionExpression);
+ }
+ }
+ if (!SingleFunctionExpression && Complain) {
+ OverloadExpr* oe = OverloadExpr::find(SrcExpr).Expression;
+ Self.Diag(OpRangeForComplaining.getBegin(), DiagIDForComplaining)
+ << oe->getName() << DestTypeForComplaining << OpRangeForComplaining
+ << oe->getQualifierRange();
+ Self.NoteAllOverloadCandidates(SrcExpr);
+ }
+ return SingleFunctionExpression;
+}
+
+static TryCastResult TryReinterpretCast(Sema &Self, Expr *&SrcExpr,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
unsigned &msg,
@@ -1058,11 +1304,28 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
DestType = Self.Context.getCanonicalType(DestType);
QualType SrcType = SrcExpr->getType();
+
+ // Is the source an overloaded name? (i.e. &foo)
+ // If so, reinterpret_cast can not help us here (13.4, p1, bullet 5) ...
+ if (SrcType == Self.Context.OverloadTy) {
+ // ... unless foo<int> resolves to an lvalue unambiguously
+ ExprResult SingleFunctionExpr =
+ ResolveAndFixSingleFunctionTemplateSpecialization(Self, SrcExpr,
+ Expr::getValueKindForType(DestType) == VK_RValue // Convert Fun to Ptr
+ );
+ if (SingleFunctionExpr.isUsable()) {
+ SrcExpr = SingleFunctionExpr.release();
+ SrcType = SrcExpr->getType();
+ }
+ else
+ return TC_NotApplicable;
+ }
+
if (const ReferenceType *DestTypeTmp = DestType->getAs<ReferenceType>()) {
bool LValue = DestTypeTmp->isLValueReferenceType();
- if (LValue && SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
- // Cannot cast non-lvalue to reference type. See the similar comment in
- // const_cast.
+ if (LValue && !SrcExpr->isLValue()) {
+ // Cannot cast non-lvalue to lvalue reference type. See the similar
+ // comment in const_cast.
msg = diag::err_bad_cxx_cast_rvalue;
return TC_NotApplicable;
}
@@ -1073,6 +1336,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
// This code does this transformation for the checked types.
DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
SrcType = Self.Context.getPointerType(SrcType);
+
IsLValueCast = true;
}
@@ -1193,6 +1457,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
assert(destIsPtr && "One type must be a pointer");
// C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
// converted to a pointer.
+ // C++ 5.2.10p9: [Note: ...a null pointer constant of integral type is not
+ // necessarily converted to a null pointer value.]
Kind = CK_IntegralToPointer;
return TC_Success;
}
@@ -1257,26 +1523,54 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
// So we finish by allowing everything that remains - it's got to be two
// object pointers.
return TC_Success;
-}
+}
bool
-Sema::CXXCheckCStyleCast(SourceRange R, QualType CastTy, Expr *&CastExpr,
- CastKind &Kind,
+Sema::CXXCheckCStyleCast(SourceRange R, QualType CastTy, ExprValueKind &VK,
+ Expr *&CastExpr, CastKind &Kind,
CXXCastPath &BasePath,
bool FunctionalStyle) {
+ if (CastExpr->isBoundMemberFunction(Context))
+ return Diag(CastExpr->getLocStart(),
+ diag::err_invalid_use_of_bound_member_func)
+ << CastExpr->getSourceRange();
+
// This test is outside everything else because it's the only case where
// a non-lvalue-reference target type does not lead to decay.
// C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
if (CastTy->isVoidType()) {
- Kind = CK_ToVoid;
- return false;
+ IgnoredValueConversions(CastExpr);
+ bool ret = false; // false is 'able to convert'
+ if (CastExpr->getType() == Context.OverloadTy) {
+ ExprResult SingleFunctionExpr =
+ ResolveAndFixSingleFunctionTemplateSpecialization(*this,
+ CastExpr,
+ /* Decay Function to ptr */ false,
+ /* Complain */ true,
+ R, CastTy, diag::err_bad_cstyle_cast_overload);
+ if (SingleFunctionExpr.isUsable()) {
+ CastExpr = SingleFunctionExpr.release();
+ Kind = CK_ToVoid;
+ }
+ else
+ ret = true;
+ }
+ else
+ Kind = CK_ToVoid;
+ return ret;
}
+ // Make sure we determine the value kind before we bail out for
+ // dependent types.
+ VK = Expr::getValueKindForType(CastTy);
+
// If the type is dependent, we won't do any other semantic analysis now.
- if (CastTy->isDependentType() || CastExpr->isTypeDependent())
+ if (CastTy->isDependentType() || CastExpr->isTypeDependent()) {
+ Kind = CK_Dependent;
return false;
+ }
- if (!CastTy->isLValueReferenceType() && !CastTy->isRecordType())
+ if (VK == VK_RValue && !CastTy->isRecordType())
DefaultFunctionArrayLvalueConversion(CastExpr);
// C++ [expr.cast]p5: The conversions performed by
@@ -1307,10 +1601,24 @@ Sema::CXXCheckCStyleCast(SourceRange R, QualType CastTy, Expr *&CastExpr,
}
}
- if (tcr != TC_Success && msg != 0)
- Diag(R.getBegin(), msg) << (FunctionalStyle ? CT_Functional : CT_CStyle)
- << CastExpr->getType() << CastTy << R;
- else if (Kind == CK_Unknown || Kind == CK_BitCast)
+ if (tcr != TC_Success && msg != 0) {
+ if (CastExpr->getType() == Context.OverloadTy) {
+ DeclAccessPair Found;
+ FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(CastExpr,
+ CastTy,
+ /* Complain */ true,
+ Found);
+
+ assert(!Fn
+ && "cast failed but able to resolve overload expression!!");
+ (void)Fn;
+
+ } else {
+ diagnoseBadCast(*this, msg, (FunctionalStyle ? CT_Functional : CT_CStyle),
+ R, CastExpr, CastTy);
+ }
+ }
+ else if (Kind == CK_BitCast)
CheckCastAlign(CastExpr, CastTy, R);
return tcr != TC_Success;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 631308e..aa0efcd 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -24,14 +24,26 @@
using namespace clang;
/// \brief Find the current instantiation that associated with the given type.
-static CXXRecordDecl *getCurrentInstantiationOf(QualType T) {
+static CXXRecordDecl *getCurrentInstantiationOf(QualType T,
+ DeclContext *CurContext) {
if (T.isNull())
return 0;
const Type *Ty = T->getCanonicalTypeInternal().getTypePtr();
- if (isa<RecordType>(Ty))
- return cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
- else if (isa<InjectedClassNameType>(Ty))
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!T->isDependentType())
+ return Record;
+
+ // This may be a member of a class template or class template partial
+ // specialization. If it's part of the current semantic context, then it's
+ // an injected-class-name;
+ for (; !CurContext->isFileContext(); CurContext = CurContext->getParent())
+ if (CurContext->Equals(Record))
+ return Record;
+
+ return 0;
+ } else if (isa<InjectedClassNameType>(Ty))
return cast<InjectedClassNameType>(Ty)->getDecl();
else
return 0;
@@ -45,10 +57,11 @@ static CXXRecordDecl *getCurrentInstantiationOf(QualType T) {
/// or NULL if the declaration context cannot be computed (e.g., because it is
/// dependent and not the current instantiation).
DeclContext *Sema::computeDeclContext(QualType T) {
- if (const TagType *Tag = T->getAs<TagType>())
- return Tag->getDecl();
+ if (!T->isDependentType())
+ if (const TagType *Tag = T->getAs<TagType>())
+ return Tag->getDecl();
- return ::getCurrentInstantiationOf(T);
+ return ::getCurrentInstantiationOf(T, CurContext);
}
/// \brief Compute the DeclContext that is associated with the given
@@ -174,7 +187,7 @@ CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) {
return 0;
QualType T = QualType(NNS->getAsType(), 0);
- return ::getCurrentInstantiationOf(T);
+ return ::getCurrentInstantiationOf(T, CurContext);
}
/// \brief Require that the context specified by SS be complete.
@@ -518,7 +531,6 @@ Sema::CXXScopeTy *Sema::BuildCXXNestedNameSpecifier(Scope *S,
// a declaration context.
if (NamespaceAliasDecl *Alias = dyn_cast<NamespaceAliasDecl>(SD))
return NestedNameSpecifier::Create(Context, Prefix,
-
Alias->getNamespace());
QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD));
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
index 8ce95ae..24cdaef 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -25,82 +25,23 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
-#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
#include <limits>
using namespace clang;
using namespace sema;
-/// getLocationOfStringLiteralByte - Return a source location that points to the
-/// specified byte of the specified string literal.
-///
-/// Strings are amazingly complex. They can be formed from multiple tokens and
-/// can have escape sequences in them in addition to the usual trigraph and
-/// escaped newline business. This routine handles this complexity.
-///
SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const {
- assert(!SL->isWide() && "This doesn't work for wide strings yet");
-
- // Loop over all of the tokens in this string until we find the one that
- // contains the byte we're looking for.
- unsigned TokNo = 0;
- while (1) {
- assert(TokNo < SL->getNumConcatenated() && "Invalid byte number!");
- SourceLocation StrTokLoc = SL->getStrTokenLoc(TokNo);
-
- // Get the spelling of the string so that we can get the data that makes up
- // the string literal, not the identifier for the macro it is potentially
- // expanded through.
- SourceLocation StrTokSpellingLoc = SourceMgr.getSpellingLoc(StrTokLoc);
-
- // Re-lex the token to get its length and original spelling.
- std::pair<FileID, unsigned> LocInfo =
- SourceMgr.getDecomposedLoc(StrTokSpellingLoc);
- bool Invalid = false;
- llvm::StringRef Buffer = SourceMgr.getBufferData(LocInfo.first, &Invalid);
- if (Invalid)
- return StrTokSpellingLoc;
-
- const char *StrData = Buffer.data()+LocInfo.second;
-
- // Create a langops struct and enable trigraphs. This is sufficient for
- // relexing tokens.
- LangOptions LangOpts;
- LangOpts.Trigraphs = true;
-
- // Create a lexer starting at the beginning of this token.
- Lexer TheLexer(StrTokSpellingLoc, LangOpts, Buffer.begin(), StrData,
- Buffer.end());
- Token TheTok;
- TheLexer.LexFromRawLexer(TheTok);
-
- // Use the StringLiteralParser to compute the length of the string in bytes.
- StringLiteralParser SLP(&TheTok, 1, PP, /*Complain=*/false);
- unsigned TokNumBytes = SLP.GetStringLength();
-
- // If the byte is in this token, return the location of the byte.
- if (ByteNo < TokNumBytes ||
- (ByteNo == TokNumBytes && TokNo == SL->getNumConcatenated())) {
- unsigned Offset =
- StringLiteralParser::getOffsetOfStringByte(TheTok, ByteNo, PP,
- /*Complain=*/false);
-
- // Now that we know the offset of the token in the spelling, use the
- // preprocessor to get the offset in the original source.
- return PP.AdvanceToTokenCharacter(StrTokLoc, Offset);
- }
-
- // Move to the next string token.
- ++TokNo;
- ByteNo -= TokNumBytes;
- }
+ return SL->getLocationOfByte(ByteNo, PP.getSourceManager(),
+ PP.getLangOptions(), PP.getTargetInfo());
}
+
/// CheckablePrintfAttr - does a function call have a "printf" attribute
/// and arguments that merit checking?
@@ -129,6 +70,24 @@ ExprResult
Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
ExprResult TheCallResult(Owned(TheCall));
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ if (Error != ASTContext::GE_None)
+ ICEArguments = 0; // Don't diagnose previously diagnosed errors.
+
+ // If any arguments are required to be ICE's, check and diagnose.
+ for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
+ // Skip arguments not required to be ICE's.
+ if ((ICEArguments & (1 << ArgNo)) == 0) continue;
+
+ llvm::APSInt Result;
+ if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
+ return true;
+ ICEArguments &= ~(1 << ArgNo);
+ }
+
switch (BuiltinID) {
case Builtin::BI__builtin___CFStringMakeConstantString:
assert(TheCall->getNumArgs() == 1 &&
@@ -162,19 +121,6 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (SemaBuiltinFPClassification(TheCall, 1))
return ExprError();
break;
- case Builtin::BI__builtin_return_address:
- case Builtin::BI__builtin_frame_address: {
- llvm::APSInt Result;
- if (SemaBuiltinConstantArg(TheCall, 0, Result))
- return ExprError();
- break;
- }
- case Builtin::BI__builtin_eh_return_data_regno: {
- llvm::APSInt Result;
- if (SemaBuiltinConstantArg(TheCall, 0, Result))
- return ExprError();
- break;
- }
case Builtin::BI__builtin_shufflevector:
return SemaBuiltinShuffleVector(TheCall);
// TheCall will be freed by the smart pointer here, but that's fine, since
@@ -191,6 +137,16 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (SemaBuiltinLongjmp(TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_constant_p:
+ if (TheCall->getNumArgs() == 0)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/ << 1 << 0 << TheCall->getSourceRange();
+ if (TheCall->getNumArgs() > 1)
+ return Diag(TheCall->getArg(1)->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << 1 << TheCall->getNumArgs()
+ << TheCall->getArg(1)->getSourceRange();
+ break;
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_or:
@@ -217,11 +173,6 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
default:
break;
}
@@ -230,19 +181,6 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return move(TheCallResult);
}
-bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- switch (BuiltinID) {
- case X86::BI__builtin_ia32_palignr128:
- case X86::BI__builtin_ia32_palignr: {
- llvm::APSInt Result;
- if (SemaBuiltinConstantArg(TheCall, 2, Result))
- return true;
- break;
- }
- }
- return false;
-}
-
// Get the valid immediate range for the specified NEON type code.
static unsigned RFT(unsigned t, bool shift = false) {
bool quad = t & 0x10;
@@ -260,11 +198,9 @@ static unsigned RFT(unsigned t, bool shift = false) {
assert(!shift && "cannot shift float types!");
return (2 << (int)quad) - 1;
case 5: // poly8
- assert(!shift && "cannot shift polynomial types!");
- return (8 << (int)quad) - 1;
+ return shift ? 7 : (8 << (int)quad) - 1;
case 6: // poly16
- assert(!shift && "cannot shift polynomial types!");
- return (4 << (int)quad) - 1;
+ return shift ? 15 : (4 << (int)quad) - 1;
case 7: // float16
assert(!shift && "cannot shift float types!");
return (4 << (int)quad) - 1;
@@ -339,8 +275,12 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
// more efficient. For example, just map function ids to custom
// handlers.
- // Printf checking.
- if (const FormatAttr *Format = FDecl->getAttr<FormatAttr>()) {
+ // Printf and scanf checking.
+ for (specific_attr_iterator<FormatAttr>
+ i = FDecl->specific_attr_begin<FormatAttr>(),
+ e = FDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+
+ const FormatAttr *Format = *i;
const bool b = Format->getType() == "scanf";
if (b || CheckablePrintfAttr(Format, TheCall)) {
bool HasVAListArg = Format->getFirstArg() == 0;
@@ -351,12 +291,11 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
}
}
- specific_attr_iterator<NonNullAttr>
- i = FDecl->specific_attr_begin<NonNullAttr>(),
- e = FDecl->specific_attr_end<NonNullAttr>();
-
- for (; i != e; ++i)
+ for (specific_attr_iterator<NonNullAttr>
+ i = FDecl->specific_attr_begin<NonNullAttr>(),
+ e = FDecl->specific_attr_end<NonNullAttr>(); i != e; ++i) {
CheckNonNullArguments(*i, TheCall);
+ }
return false;
}
@@ -422,7 +361,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
QualType ValType =
FirstArg->getType()->getAs<PointerType>()->getPointeeType();
- if (!ValType->isIntegerType() && !ValType->isPointerType() &&
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType()) {
Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr)
<< FirstArg->getType() << FirstArg->getSourceRange();
@@ -545,9 +484,10 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// GCC does an implicit conversion to the pointer or integer ValType. This
// can fail in some cases (1i -> int**), check for this error case now.
- CastKind Kind = CK_Unknown;
+ CastKind Kind = CK_Invalid;
+ ExprValueKind VK = VK_RValue;
CXXCastPath BasePath;
- if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg, Kind, BasePath))
+ if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg, Kind, VK, BasePath))
return ExprError();
// Okay, we have something that *can* be converted to the right type. Check
@@ -556,7 +496,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// pass in 42. The 42 gets converted to char. This is even more strange
// for things like 45.123 -> char, etc.
// FIXME: Do this check.
- ImpCastExprToType(Arg, ValType, Kind, VK_RValue, &BasePath);
+ ImpCastExprToType(Arg, ValType, Kind, VK, &BasePath);
TheCall->setArg(i+1, Arg);
}
@@ -581,9 +521,6 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
/// CheckObjCString - Checks that the argument to the builtin
/// CFString constructor is correct
-/// FIXME: GCC currently emits the following warning:
-/// "warning: input conversion stopped due to an input byte that does not
-/// belong to the input codeset UTF-8"
/// Note: It might also make sense to do the UTF-16 conversion here (would
/// simplify the backend).
bool Sema::CheckObjCString(Expr *Arg) {
@@ -602,7 +539,21 @@ bool Sema::CheckObjCString(Expr *Arg) {
diag::warn_cfstring_literal_contains_nul_character)
<< Arg->getSourceRange();
}
-
+ if (Literal->containsNonAsciiOrNull()) {
+ llvm::StringRef String = Literal->getString();
+ unsigned NumBytes = String.size();
+ llvm::SmallVector<UTF16, 128> ToBuf(NumBytes);
+ const UTF8 *FromPtr = (UTF8 *)String.data();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
+ // Check for conversion failure.
+ if (Result != conversionOK)
+ Diag(Arg->getLocStart(),
+ diag::warn_cfstring_truncated) << Arg->getSourceRange();
+ }
return false;
}
@@ -798,7 +749,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
} else if (numElements != numResElements) {
QualType eltType = LHSType->getAs<VectorType>()->getElementType();
resType = Context.getVectorType(eltType, numResElements,
- VectorType::NotAltiVec);
+ VectorType::GenericVector);
}
}
@@ -929,31 +880,44 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
bool isPrintf) {
-
+ tryAgain:
if (E->isTypeDependent() || E->isValueDependent())
return false;
switch (E->getStmtClass()) {
+ case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass: {
- const ConditionalOperator *C = cast<ConditionalOperator>(E);
+ const AbstractConditionalOperator *C = cast<AbstractConditionalOperator>(E);
return SemaCheckStringLiteral(C->getTrueExpr(), TheCall, HasVAListArg,
format_idx, firstDataArg, isPrintf)
- && SemaCheckStringLiteral(C->getRHS(), TheCall, HasVAListArg,
+ && SemaCheckStringLiteral(C->getFalseExpr(), TheCall, HasVAListArg,
format_idx, firstDataArg, isPrintf);
}
+ case Stmt::IntegerLiteralClass:
+ // Technically -Wformat-nonliteral does not warn about this case.
+ // The behavior of printf and friends in this case is implementation
+ // dependent. Ideally if the format string cannot be null then
+ // it should have a 'nonnull' attribute in the function prototype.
+ return true;
+
case Stmt::ImplicitCastExprClass: {
- const ImplicitCastExpr *Expr = cast<ImplicitCastExpr>(E);
- return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg,
- format_idx, firstDataArg, isPrintf);
+ E = cast<ImplicitCastExpr>(E)->getSubExpr();
+ goto tryAgain;
}
case Stmt::ParenExprClass: {
- const ParenExpr *Expr = cast<ParenExpr>(E);
- return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg,
- format_idx, firstDataArg, isPrintf);
+ E = cast<ParenExpr>(E)->getSubExpr();
+ goto tryAgain;
}
+ case Stmt::OpaqueValueExprClass:
+ if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
+ E = src;
+ goto tryAgain;
+ }
+ return false;
+
case Stmt::DeclRefExprClass: {
const DeclRefExpr *DR = cast<DeclRefExpr>(E);
@@ -1072,12 +1036,16 @@ Sema::CheckPrintfScanfArguments(const CallExpr *TheCall, bool HasVAListArg,
// of member functions is counted. However, it doesn't appear in our own
// lists, so decrement format_idx in that case.
if (isa<CXXMemberCallExpr>(TheCall)) {
- // Catch a format attribute mistakenly referring to the object argument.
- if (format_idx == 0)
- return;
- --format_idx;
- if(firstDataArg != 0)
- --firstDataArg;
+ const CXXMethodDecl *method_decl =
+ dyn_cast<CXXMethodDecl>(TheCall->getCalleeDecl());
+ if (method_decl && method_decl->isInstance()) {
+ // Catch a format attribute mistakenly referring to the object argument.
+ if (format_idx == 0)
+ return;
+ --format_idx;
+ if(firstDataArg != 0)
+ --firstDataArg;
+ }
}
// CHECK: printf/scanf-like function is called with no format string.
@@ -1564,6 +1532,8 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
}
// Check each flag does not conflict with any other component.
+ if (!FS.hasValidThousandsGroupingPrefix())
+ HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen);
if (!FS.hasValidLeadingZeros())
HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen);
if (!FS.hasValidPlusPrefix())
@@ -1618,9 +1588,12 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
// or 'short' to an 'int'. This is done because printf is a varargs
// function.
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Ex))
- if (ICE->getType() == S.Context.IntTy)
- if (ATR.matchesType(S.Context, ICE->getSubExpr()->getType()))
+ if (ICE->getType() == S.Context.IntTy) {
+ // All further checking is done on the subexpression.
+ Ex = ICE->getSubExpr();
+ if (ATR.matchesType(S.Context, Ex->getType()))
return true;
+ }
// We may be able to offer a FixItHint if it is a supported type.
PrintfSpecifier fixedFS = FS;
@@ -1829,8 +1802,8 @@ void Sema::CheckFormatString(const StringLiteral *FExpr,
//===--- CHECK: Return Address of Stack Variable --------------------------===//
-static DeclRefExpr* EvalVal(Expr *E);
-static DeclRefExpr* EvalAddr(Expr* E);
+static Expr *EvalVal(Expr *E, llvm::SmallVectorImpl<DeclRefExpr *> &refVars);
+static Expr *EvalAddr(Expr* E, llvm::SmallVectorImpl<DeclRefExpr *> &refVars);
/// CheckReturnStackAddr - Check if a return statement returns the address
/// of a stack variable.
@@ -1838,45 +1811,79 @@ void
Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc) {
- // Perform checking for returned stack addresses.
- if (lhsType->isPointerType() || lhsType->isBlockPointerType()) {
- if (DeclRefExpr *DR = EvalAddr(RetValExp))
- Diag(DR->getLocStart(), diag::warn_ret_stack_addr)
- << DR->getDecl()->getDeclName() << RetValExp->getSourceRange();
-
- // Skip over implicit cast expressions when checking for block expressions.
- RetValExp = RetValExp->IgnoreParenCasts();
+ Expr *stackE = 0;
+ llvm::SmallVector<DeclRefExpr *, 8> refVars;
- if (BlockExpr *C = dyn_cast<BlockExpr>(RetValExp))
- if (C->hasBlockDeclRefExprs())
- Diag(C->getLocStart(), diag::err_ret_local_block)
- << C->getSourceRange();
+ // Perform checking for returned stack addresses, local blocks,
+ // label addresses or references to temporaries.
+ if (lhsType->isPointerType() || lhsType->isBlockPointerType()) {
+ stackE = EvalAddr(RetValExp, refVars);
+ } else if (lhsType->isReferenceType()) {
+ stackE = EvalVal(RetValExp, refVars);
+ }
- if (AddrLabelExpr *ALE = dyn_cast<AddrLabelExpr>(RetValExp))
- Diag(ALE->getLocStart(), diag::warn_ret_addr_label)
- << ALE->getSourceRange();
+ if (stackE == 0)
+ return; // Nothing suspicious was found.
- } else if (lhsType->isReferenceType()) {
- // Perform checking for stack values returned by reference.
- // Check for a reference to the stack
- if (DeclRefExpr *DR = EvalVal(RetValExp))
- Diag(DR->getLocStart(), diag::warn_ret_stack_ref)
- << DR->getDecl()->getDeclName() << RetValExp->getSourceRange();
+ SourceLocation diagLoc;
+ SourceRange diagRange;
+ if (refVars.empty()) {
+ diagLoc = stackE->getLocStart();
+ diagRange = stackE->getSourceRange();
+ } else {
+ // We followed through a reference variable. 'stackE' contains the
+ // problematic expression but we will warn at the return statement pointing
+ // at the reference variable. We will later display the "trail" of
+ // reference variables using notes.
+ diagLoc = refVars[0]->getLocStart();
+ diagRange = refVars[0]->getSourceRange();
+ }
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(stackE)) { //address of local var.
+ Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_stack_ref
+ : diag::warn_ret_stack_addr)
+ << DR->getDecl()->getDeclName() << diagRange;
+ } else if (isa<BlockExpr>(stackE)) { // local block.
+ Diag(diagLoc, diag::err_ret_local_block) << diagRange;
+ } else if (isa<AddrLabelExpr>(stackE)) { // address of label.
+ Diag(diagLoc, diag::warn_ret_addr_label) << diagRange;
+ } else { // local temporary.
+ Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_local_temp_ref
+ : diag::warn_ret_local_temp_addr)
+ << diagRange;
+ }
+
+ // Display the "trail" of reference variables that we followed until we
+ // found the problematic expression using notes.
+ for (unsigned i = 0, e = refVars.size(); i != e; ++i) {
+ VarDecl *VD = cast<VarDecl>(refVars[i]->getDecl());
+ // If this var binds to another reference var, show the range of the next
+ // var, otherwise the var binds to the problematic expression, in which case
+ // show the range of the expression.
+ SourceRange range = (i < e-1) ? refVars[i+1]->getSourceRange()
+ : stackE->getSourceRange();
+ Diag(VD->getLocation(), diag::note_ref_var_local_bind)
+ << VD->getDeclName() << range;
}
}
/// EvalAddr - EvalAddr and EvalVal are mutually recursive functions that
/// check if the expression in a return statement evaluates to an address
-/// to a location on the stack. The recursion is used to traverse the
+/// to a location on the stack, a local block, an address of a label, or a
+/// reference to local temporary. The recursion is used to traverse the
/// AST of the return expression, with recursion backtracking when we
-/// encounter a subexpression that (1) clearly does not lead to the address
-/// of a stack variable or (2) is something we cannot determine leads to
-/// the address of a stack variable based on such local checking.
+/// encounter a subexpression that (1) clearly does not lead to one of the
+/// above problematic expressions (2) is something we cannot determine leads to
+/// a problematic expression based on such local checking.
+///
+/// Both EvalAddr and EvalVal follow through reference variables to evaluate
+/// the expression that they point to. Such variables are added to the
+/// 'refVars' vector so that we know what the reference variable "trail" was.
///
/// EvalAddr processes expressions that are pointers that are used as
/// references (and not L-values). EvalVal handles all other values.
-/// At the base case of the recursion is a check for a DeclRefExpr* in
-/// the refers to a stack variable.
+/// At the base case of the recursion is a check for the above problematic
+/// expressions.
///
/// This implementation handles:
///
@@ -1886,7 +1893,10 @@ Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
/// * arbitrary interplay between "&" and "*" operators
/// * pointer arithmetic from an address of a stack variable
/// * taking the address of an array element where the array is on the stack
-static DeclRefExpr* EvalAddr(Expr *E) {
+static Expr *EvalAddr(Expr *E, llvm::SmallVectorImpl<DeclRefExpr *> &refVars) {
+ if (E->isTypeDependent())
+ return NULL;
+
// We should only be called for evaluating pointer expressions.
assert((E->getType()->isAnyPointerType() ||
E->getType()->isBlockPointerType() ||
@@ -1899,7 +1909,23 @@ static DeclRefExpr* EvalAddr(Expr *E) {
switch (E->getStmtClass()) {
case Stmt::ParenExprClass:
// Ignore parentheses.
- return EvalAddr(cast<ParenExpr>(E)->getSubExpr());
+ return EvalAddr(cast<ParenExpr>(E)->getSubExpr(), refVars);
+
+ case Stmt::DeclRefExprClass: {
+ DeclRefExpr *DR = cast<DeclRefExpr>(E);
+
+ if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
+ // If this is a reference variable, follow through to the expression that
+ // it points to.
+ if (V->hasLocalStorage() &&
+ V->getType()->isReferenceType() && V->hasInit()) {
+ // Add the reference variable to the "trail".
+ refVars.push_back(DR);
+ return EvalAddr(V->getInit(), refVars);
+ }
+
+ return NULL;
+ }
case Stmt::UnaryOperatorClass: {
// The only unary operator that make sense to handle here
@@ -1907,7 +1933,7 @@ static DeclRefExpr* EvalAddr(Expr *E) {
UnaryOperator *U = cast<UnaryOperator>(E);
if (U->getOpcode() == UO_AddrOf)
- return EvalVal(U->getSubExpr());
+ return EvalVal(U->getSubExpr(), refVars);
else
return NULL;
}
@@ -1928,7 +1954,7 @@ static DeclRefExpr* EvalAddr(Expr *E) {
if (!Base->getType()->isPointerType()) Base = B->getRHS();
assert (Base->getType()->isPointerType());
- return EvalAddr(Base);
+ return EvalAddr(Base, refVars);
}
// For conditional operators we need to see if either the LHS or RHS are
@@ -1937,12 +1963,27 @@ static DeclRefExpr* EvalAddr(Expr *E) {
ConditionalOperator *C = cast<ConditionalOperator>(E);
// Handle the GNU extension for missing LHS.
- if (Expr *lhsExpr = C->getLHS())
- if (DeclRefExpr* LHS = EvalAddr(lhsExpr))
- return LHS;
+ if (Expr *lhsExpr = C->getLHS()) {
+ // In C++, we can have a throw-expression, which has 'void' type.
+ if (!lhsExpr->getType()->isVoidType())
+ if (Expr* LHS = EvalAddr(lhsExpr, refVars))
+ return LHS;
+ }
- return EvalAddr(C->getRHS());
+ // In C++, we can have a throw-expression, which has 'void' type.
+ if (C->getRHS()->getType()->isVoidType())
+ return NULL;
+
+ return EvalAddr(C->getRHS(), refVars);
}
+
+ case Stmt::BlockExprClass:
+ if (cast<BlockExpr>(E)->getBlockDecl()->hasCaptures())
+ return E; // local block.
+ return NULL;
+
+ case Stmt::AddrLabelExprClass:
+ return E; // address of label.
// For casts, we need to handle conversions from arrays to
// pointer values, and pointer-to-pointer conversions.
@@ -1955,9 +1996,9 @@ static DeclRefExpr* EvalAddr(Expr *E) {
if (SubExpr->getType()->isPointerType() ||
SubExpr->getType()->isBlockPointerType() ||
SubExpr->getType()->isObjCQualifiedIdType())
- return EvalAddr(SubExpr);
+ return EvalAddr(SubExpr, refVars);
else if (T->isArrayType())
- return EvalVal(SubExpr);
+ return EvalVal(SubExpr, refVars);
else
return 0;
}
@@ -1976,7 +2017,7 @@ static DeclRefExpr* EvalAddr(Expr *E) {
case Stmt::CXXReinterpretCastExprClass: {
Expr *S = cast<CXXNamedCastExpr>(E)->getSubExpr();
if (S->getType()->isPointerType() || S->getType()->isBlockPointerType())
- return EvalAddr(S);
+ return EvalAddr(S, refVars);
else
return NULL;
}
@@ -1990,7 +2031,7 @@ static DeclRefExpr* EvalAddr(Expr *E) {
/// EvalVal - This function is complements EvalAddr in the mutual recursion.
/// See the comments for EvalAddr for more details.
-static DeclRefExpr* EvalVal(Expr *E) {
+static Expr *EvalVal(Expr *E, llvm::SmallVectorImpl<DeclRefExpr *> &refVars) {
do {
// We should only be called for evaluating non-pointer expressions, or
// expressions with a pointer type that are not used as references but instead
@@ -2010,13 +2051,24 @@ do {
}
case Stmt::DeclRefExprClass: {
- // DeclRefExpr: the base case. When we hit a DeclRefExpr we are looking
- // at code that refers to a variable's name. We check if it has local
- // storage within the function, and if so, return the expression.
+ // When we hit a DeclRefExpr we are looking at code that refers to a
+ // variable's name. If it's not a reference variable we check if it has
+ // local storage within the function, and if so, return the expression.
DeclRefExpr *DR = cast<DeclRefExpr>(E);
if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
- if (V->hasLocalStorage() && !V->getType()->isReferenceType()) return DR;
+ if (V->hasLocalStorage()) {
+ if (!V->getType()->isReferenceType())
+ return DR;
+
+ // Reference variable, follow through to the expression that
+ // it points to.
+ if (V->hasInit()) {
+ // Add the reference variable to the "trail".
+ refVars.push_back(DR);
+ return EvalVal(V->getInit(), refVars);
+ }
+ }
return NULL;
}
@@ -2034,7 +2086,7 @@ do {
UnaryOperator *U = cast<UnaryOperator>(E);
if (U->getOpcode() == UO_Deref)
- return EvalAddr(U->getSubExpr());
+ return EvalAddr(U->getSubExpr(), refVars);
return NULL;
}
@@ -2043,20 +2095,20 @@ do {
// Array subscripts are potential references to data on the stack. We
// retrieve the DeclRefExpr* for the array variable if it indeed
// has local storage.
- return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase());
+ return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase(), refVars);
}
case Stmt::ConditionalOperatorClass: {
// For conditional operators we need to see if either the LHS or RHS are
- // non-NULL DeclRefExpr's. If one is non-NULL, we return it.
+ // non-NULL Expr's. If one is non-NULL, we return it.
ConditionalOperator *C = cast<ConditionalOperator>(E);
// Handle the GNU extension for missing LHS.
if (Expr *lhsExpr = C->getLHS())
- if (DeclRefExpr *LHS = EvalVal(lhsExpr))
+ if (Expr *LHS = EvalVal(lhsExpr, refVars))
return LHS;
- return EvalVal(C->getRHS());
+ return EvalVal(C->getRHS(), refVars);
}
// Accesses to members are potential references to data on the stack.
@@ -2072,11 +2124,16 @@ do {
if (M->getMemberDecl()->getType()->isReferenceType())
return NULL;
- return EvalVal(M->getBase());
+ return EvalVal(M->getBase(), refVars);
}
- // Everything else: we simply don't reason about them.
default:
+ // Check that we don't return or take the address of a reference to a
+ // temporary. This is only useful in C++.
+ if (!E->isTypeDependent() && E->isRValue())
+ return E;
+
+ // Everything else: we simply don't reason about them.
return NULL;
}
} while (true);
@@ -2090,8 +2147,8 @@ do {
void Sema::CheckFloatComparison(SourceLocation loc, Expr* lex, Expr *rex) {
bool EmitWarning = true;
- Expr* LeftExprSansParen = lex->IgnoreParens();
- Expr* RightExprSansParen = rex->IgnoreParens();
+ Expr* LeftExprSansParen = lex->IgnoreParenImpCasts();
+ Expr* RightExprSansParen = rex->IgnoreParenImpCasts();
// Special case: check for x == x (which is OK).
// Do not emit warnings for such cases.
@@ -2152,19 +2209,19 @@ struct IntRange {
: Width(Width), NonNegative(NonNegative)
{}
- // Returns the range of the bool type.
+ /// Returns the range of the bool type.
static IntRange forBoolType() {
return IntRange(1, true);
}
- // Returns the range of an integral type.
- static IntRange forType(ASTContext &C, QualType T) {
- return forCanonicalType(C, T->getCanonicalTypeInternal().getTypePtr());
+ /// Returns the range of an opaque value of the given integral type.
+ static IntRange forValueOfType(ASTContext &C, QualType T) {
+ return forValueOfCanonicalType(C,
+ T->getCanonicalTypeInternal().getTypePtr());
}
- // Returns the range of an integeral type based on its canonical
- // representation.
- static IntRange forCanonicalType(ASTContext &C, const Type *T) {
+ /// Returns the range of an opaque value of a canonical integral type.
+ static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
assert(T->isCanonicalUnqualified());
if (const VectorType *VT = dyn_cast<VectorType>(T))
@@ -2172,8 +2229,12 @@ struct IntRange {
if (const ComplexType *CT = dyn_cast<ComplexType>(T))
T = CT->getElementType().getTypePtr();
+ // For enum types, use the known bit width of the enumerators.
if (const EnumType *ET = dyn_cast<EnumType>(T)) {
EnumDecl *Enum = ET->getDecl();
+ if (!Enum->isDefinition())
+ return IntRange(C.getIntWidth(QualType(T, 0)), false);
+
unsigned NumPositive = Enum->getNumPositiveBits();
unsigned NumNegative = Enum->getNumNegativeBits();
@@ -2186,13 +2247,34 @@ struct IntRange {
return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
}
- // Returns the supremum of two ranges: i.e. their conservative merge.
+ /// Returns the "target" range of a canonical integral type, i.e.
+ /// the range of values expressible in the type.
+ ///
+ /// This matches forValueOfCanonicalType except that enums have the
+ /// full range of their type, not the range of their enumerators.
+ static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
+ assert(T->isCanonicalUnqualified());
+
+ if (const VectorType *VT = dyn_cast<VectorType>(T))
+ T = VT->getElementType().getTypePtr();
+ if (const ComplexType *CT = dyn_cast<ComplexType>(T))
+ T = CT->getElementType().getTypePtr();
+ if (const EnumType *ET = dyn_cast<EnumType>(T))
+ T = ET->getDecl()->getIntegerType().getTypePtr();
+
+ const BuiltinType *BT = cast<BuiltinType>(T);
+ assert(BT->isInteger());
+
+ return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
+ }
+
+ /// Returns the supremum of two ranges: i.e. their conservative merge.
static IntRange join(IntRange L, IntRange R) {
return IntRange(std::max(L.Width, R.Width),
L.NonNegative && R.NonNegative);
}
- // Returns the infinum of two ranges: i.e. their aggressive merge.
+ /// Returns the infinum of two ranges: i.e. their aggressive merge.
static IntRange meet(IntRange L, IntRange R) {
return IntRange(std::min(L.Width, R.Width),
L.NonNegative || R.NonNegative);
@@ -2204,7 +2286,7 @@ IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, unsigned MaxWidth) {
return IntRange(value.getMinSignedBits(), false);
if (value.getBitWidth() > MaxWidth)
- value.trunc(MaxWidth);
+ value = value.trunc(MaxWidth);
// isNonNegative() just checks the sign bit without considering
// signedness.
@@ -2259,11 +2341,9 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
if (CE->getCastKind() == CK_NoOp)
return GetExprRange(C, CE->getSubExpr(), MaxWidth);
- IntRange OutputTypeRange = IntRange::forType(C, CE->getType());
+ IntRange OutputTypeRange = IntRange::forValueOfType(C, CE->getType());
bool isIntegerCast = (CE->getCastKind() == CK_IntegralCast);
- if (!isIntegerCast && CE->getCastKind() == CK_Unknown)
- isIntegerCast = CE->getSubExpr()->getType()->isIntegerType();
// Assume that non-integer casts can span the full range of the type.
if (!isIntegerCast)
@@ -2318,12 +2398,12 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
case BO_RemAssign:
case BO_AddAssign:
case BO_SubAssign:
- return IntRange::forType(C, E->getType());
+ return IntRange::forValueOfType(C, E->getType());
// Operations with opaque sources are black-listed.
case BO_PtrMemD:
case BO_PtrMemI:
- return IntRange::forType(C, E->getType());
+ return IntRange::forValueOfType(C, E->getType());
// Bitwise-and uses the *infinum* of the two source ranges.
case BO_And:
@@ -2338,14 +2418,14 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
if (IntegerLiteral *I
= dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) {
if (I->getValue() == 1) {
- IntRange R = IntRange::forType(C, E->getType());
+ IntRange R = IntRange::forValueOfType(C, E->getType());
return IntRange(R.Width, /*NonNegative*/ true);
}
}
// fallthrough
case BO_ShlAssign:
- return IntRange::forType(C, E->getType());
+ return IntRange::forValueOfType(C, E->getType());
// Right shift by a constant can narrow its left argument.
case BO_Shr:
@@ -2374,7 +2454,7 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
// Black-list pointer subtractions.
case BO_Sub:
if (BO->getLHS()->getType()->isPointerType())
- return IntRange::forType(C, E->getType());
+ return IntRange::forValueOfType(C, E->getType());
// fallthrough
default:
@@ -2397,7 +2477,7 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
// Operations with opaque sources are black-listed.
case UO_Deref:
case UO_AddrOf: // should be impossible
- return IntRange::forType(C, E->getType());
+ return IntRange::forValueOfType(C, E->getType());
default:
return GetExprRange(C, UO->getSubExpr(), MaxWidth);
@@ -2405,7 +2485,7 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
}
if (dyn_cast<OffsetOfExpr>(E)) {
- IntRange::forType(C, E->getType());
+ IntRange::forValueOfType(C, E->getType());
}
FieldDecl *BitField = E->getBitField();
@@ -2416,7 +2496,7 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
return IntRange(BitWidth, BitField->getType()->isUnsignedIntegerType());
}
- return IntRange::forType(C, E->getType());
+ return IntRange::forValueOfType(C, E->getType());
}
IntRange GetExprRange(ASTContext &C, Expr *E) {
@@ -2461,30 +2541,55 @@ bool IsSameFloatAfterCast(const APValue &value,
IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
}
-void AnalyzeImplicitConversions(Sema &S, Expr *E);
+void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC);
+
+static bool IsZero(Sema &S, Expr *E) {
+ // Suppress cases where we are comparing against an enum constant.
+ if (const DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ if (isa<EnumConstantDecl>(DR->getDecl()))
+ return false;
+
+ // Suppress cases where the '0' value is expanded from a macro.
+ if (E->getLocStart().isMacroID())
+ return false;
-bool IsZero(Sema &S, Expr *E) {
llvm::APSInt Value;
return E->isIntegerConstantExpr(Value, S.Context) && Value == 0;
}
+static bool HasEnumType(Expr *E) {
+ // Strip off implicit integral promotions.
+ while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() != CK_IntegralCast &&
+ ICE->getCastKind() != CK_NoOp)
+ break;
+ E = ICE->getSubExpr();
+ }
+
+ return E->getType()->isEnumeralType();
+}
+
void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) {
BinaryOperatorKind op = E->getOpcode();
+ if (E->isValueDependent())
+ return;
+
if (op == BO_LT && IsZero(S, E->getRHS())) {
S.Diag(E->getOperatorLoc(), diag::warn_lunsigned_always_true_comparison)
- << "< 0" << "false"
+ << "< 0" << "false" << HasEnumType(E->getLHS())
<< E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
} else if (op == BO_GE && IsZero(S, E->getRHS())) {
S.Diag(E->getOperatorLoc(), diag::warn_lunsigned_always_true_comparison)
- << ">= 0" << "true"
+ << ">= 0" << "true" << HasEnumType(E->getLHS())
<< E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
} else if (op == BO_GT && IsZero(S, E->getLHS())) {
S.Diag(E->getOperatorLoc(), diag::warn_runsigned_always_true_comparison)
- << "0 >" << "false"
+ << "0 >" << "false" << HasEnumType(E->getRHS())
<< E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
} else if (op == BO_LE && IsZero(S, E->getLHS())) {
S.Diag(E->getOperatorLoc(), diag::warn_runsigned_always_true_comparison)
- << "0 <=" << "true"
+ << "0 <=" << "true" << HasEnumType(E->getRHS())
<< E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
}
}
@@ -2492,8 +2597,8 @@ void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) {
/// Analyze the operands of the given comparison. Implements the
/// fallback case from AnalyzeComparison.
void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
- AnalyzeImplicitConversions(S, E->getLHS());
- AnalyzeImplicitConversions(S, E->getRHS());
+ AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
+ AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
}
/// \brief Implements -Wsign-compare.
@@ -2511,7 +2616,11 @@ void AnalyzeComparison(Sema &S, BinaryOperator *E) {
// We don't do anything special if this isn't an unsigned integral
// comparison: we're only interested in integral comparisons, and
// signed comparisons only happen in cases we don't care to warn about.
- if (!T->hasUnsignedIntegerRepresentation())
+ //
+ // We also don't care about value-dependent expressions or expressions
+ // whose result is a constant.
+ if (!T->hasUnsignedIntegerRepresentation()
+ || E->isValueDependent() || E->isIntegerConstantExpr(S.Context))
return AnalyzeImpConvsInComparison(S, E);
Expr *lex = E->getLHS()->IgnoreParenImpCasts();
@@ -2538,8 +2647,8 @@ void AnalyzeComparison(Sema &S, BinaryOperator *E) {
// Go ahead and analyze implicit conversions in the operands. Note
// that we skip the implicit conversions on both sides.
- AnalyzeImplicitConversions(S, lex);
- AnalyzeImplicitConversions(S, rex);
+ AnalyzeImplicitConversions(S, lex, E->getOperatorLoc());
+ AnalyzeImplicitConversions(S, rex, E->getOperatorLoc());
// If the signed range is non-negative, -Wsign-compare won't fire,
// but we should still check for comparisons which are always true
@@ -2568,13 +2677,103 @@ void AnalyzeComparison(Sema &S, BinaryOperator *E) {
<< lex->getSourceRange() << rex->getSourceRange();
}
+/// Analyzes an attempt to assign the given value to a bitfield.
+///
+/// Returns true if there was something fishy about the attempt.
+bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
+ SourceLocation InitLoc) {
+ assert(Bitfield->isBitField());
+ if (Bitfield->isInvalidDecl())
+ return false;
+
+ // White-list bool bitfields.
+ if (Bitfield->getType()->isBooleanType())
+ return false;
+
+ // Ignore value- or type-dependent expressions.
+ if (Bitfield->getBitWidth()->isValueDependent() ||
+ Bitfield->getBitWidth()->isTypeDependent() ||
+ Init->isValueDependent() ||
+ Init->isTypeDependent())
+ return false;
+
+ Expr *OriginalInit = Init->IgnoreParenImpCasts();
+
+ llvm::APSInt Width(32);
+ Expr::EvalResult InitValue;
+ if (!Bitfield->getBitWidth()->isIntegerConstantExpr(Width, S.Context) ||
+ !OriginalInit->Evaluate(InitValue, S.Context) ||
+ !InitValue.Val.isInt())
+ return false;
+
+ const llvm::APSInt &Value = InitValue.Val.getInt();
+ unsigned OriginalWidth = Value.getBitWidth();
+ unsigned FieldWidth = Width.getZExtValue();
+
+ if (OriginalWidth <= FieldWidth)
+ return false;
+
+ llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
+
+ // It's fairly common to write values into signed bitfields
+ // that, if sign-extended, would end up becoming a different
+ // value. We don't want to warn about that.
+ if (Value.isSigned() && Value.isNegative())
+ TruncatedValue = TruncatedValue.sext(OriginalWidth);
+ else
+ TruncatedValue = TruncatedValue.zext(OriginalWidth);
+
+ if (Value == TruncatedValue)
+ return false;
+
+ std::string PrettyValue = Value.toString(10);
+ std::string PrettyTrunc = TruncatedValue.toString(10);
+
+ S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant)
+ << PrettyValue << PrettyTrunc << OriginalInit->getType()
+ << Init->getSourceRange();
+
+ return true;
+}
+
+/// Analyze the given simple or compound assignment for warning-worthy
+/// operations.
+void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
+ // Just recurse on the LHS.
+ AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
+
+ // We want to recurse on the RHS as normal unless we're assigning to
+ // a bitfield.
+ if (FieldDecl *Bitfield = E->getLHS()->getBitField()) {
+ if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(),
+ E->getOperatorLoc())) {
+ // Recurse, ignoring any implicit conversions on the RHS.
+ return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(),
+ E->getOperatorLoc());
+ }
+ }
+
+ AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
+}
+
/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
-void DiagnoseImpCast(Sema &S, Expr *E, QualType T, unsigned diag) {
- S.Diag(E->getExprLoc(), diag) << E->getType() << T << E->getSourceRange();
+void DiagnoseImpCast(Sema &S, Expr *E, QualType T, SourceLocation CContext,
+ unsigned diag) {
+ S.Diag(E->getExprLoc(), diag)
+ << E->getType() << T << E->getSourceRange() << SourceRange(CContext);
+}
+
+std::string PrettyPrintInRange(const llvm::APSInt &Value, IntRange Range) {
+ if (!Range.Width) return "0";
+
+ llvm::APSInt ValueInRange = Value;
+ ValueInRange.setIsSigned(!Range.NonNegative);
+ ValueInRange = ValueInRange.trunc(Range.Width);
+ return ValueInRange.toString(10);
}
void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
- bool *ICContext = 0) {
+ SourceLocation CC, bool *ICContext = 0) {
if (E->isTypeDependent() || E->isValueDependent()) return;
const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr();
@@ -2582,6 +2781,13 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (Source == Target) return;
if (Target->isDependentType()) return;
+ // If the conversion context location is invalid or instantiated
+ // from a system macro, don't complain.
+ if (CC.isInvalid() ||
+ (CC.isMacroID() && S.Context.getSourceManager().isInSystemHeader(
+ S.Context.getSourceManager().getSpellingLoc(CC))))
+ return;
+
// Never diagnose implicit casts to bool.
if (Target->isSpecificBuiltinType(BuiltinType::Bool))
return;
@@ -2589,7 +2795,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Strip vector types.
if (isa<VectorType>(Source)) {
if (!isa<VectorType>(Target))
- return DiagnoseImpCast(S, E, T, diag::warn_impcast_vector_scalar);
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
Source = cast<VectorType>(Source)->getElementType().getTypePtr();
Target = cast<VectorType>(Target)->getElementType().getTypePtr();
@@ -2598,7 +2804,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Strip complex types.
if (isa<ComplexType>(Source)) {
if (!isa<ComplexType>(Target))
- return DiagnoseImpCast(S, E, T, diag::warn_impcast_complex_scalar);
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_complex_scalar);
Source = cast<ComplexType>(Source)->getElementType().getTypePtr();
Target = cast<ComplexType>(Target)->getElementType().getTypePtr();
@@ -2626,15 +2832,21 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
}
- DiagnoseImpCast(S, E, T, diag::warn_impcast_float_precision);
+ DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
}
return;
}
// If the target is integral, always warn.
- if ((TargetBT && TargetBT->isInteger()))
- // TODO: don't warn for integer values?
- DiagnoseImpCast(S, E, T, diag::warn_impcast_float_integer);
+ if ((TargetBT && TargetBT->isInteger())) {
+ Expr *InnerE = E->IgnoreParenImpCasts();
+ if (FloatingLiteral *LiteralExpr = dyn_cast<FloatingLiteral>(InnerE)) {
+ DiagnoseImpCast(S, LiteralExpr, T, CC,
+ diag::warn_impcast_literal_float_to_integer);
+ } else {
+ DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_integer);
+ }
+ }
return;
}
@@ -2643,14 +2855,27 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
IntRange SourceRange = GetExprRange(S.Context, E);
- IntRange TargetRange = IntRange::forCanonicalType(S.Context, Target);
+ IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
if (SourceRange.Width > TargetRange.Width) {
+ // If the source is a constant, use a default-on diagnostic.
+ // TODO: this should happen for bitfield stores, too.
+ llvm::APSInt Value(32);
+ if (E->isIntegerConstantExpr(Value, S.Context)) {
+ std::string PrettySourceValue = Value.toString(10);
+ std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
+
+ S.Diag(E->getExprLoc(), diag::warn_impcast_integer_precision_constant)
+ << PrettySourceValue << PrettyTargetValue
+ << E->getType() << T << E->getSourceRange() << clang::SourceRange(CC);
+ return;
+ }
+
// People want to build with -Wshorten-64-to-32 and not -Wconversion
// and by god we'll let them.
if (SourceRange.Width == 64 && TargetRange.Width == 32)
- return DiagnoseImpCast(S, E, T, diag::warn_impcast_integer_64_32);
- return DiagnoseImpCast(S, E, T, diag::warn_impcast_integer_precision);
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32);
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
}
if ((TargetRange.NonNegative && !SourceRange.NonNegative) ||
@@ -2668,7 +2893,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
*ICContext = true;
}
- return DiagnoseImpCast(S, E, T, DiagID);
+ return DiagnoseImpCast(S, E, T, CC, DiagID);
}
return;
@@ -2677,35 +2902,38 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
void CheckConditionalOperator(Sema &S, ConditionalOperator *E, QualType T);
void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
- bool &ICContext) {
+ SourceLocation CC, bool &ICContext) {
E = E->IgnoreParenImpCasts();
if (isa<ConditionalOperator>(E))
return CheckConditionalOperator(S, cast<ConditionalOperator>(E), T);
- AnalyzeImplicitConversions(S, E);
+ AnalyzeImplicitConversions(S, E, CC);
if (E->getType() != T)
- return CheckImplicitConversion(S, E, T, &ICContext);
+ return CheckImplicitConversion(S, E, T, CC, &ICContext);
return;
}
void CheckConditionalOperator(Sema &S, ConditionalOperator *E, QualType T) {
- AnalyzeImplicitConversions(S, E->getCond());
+ SourceLocation CC = E->getQuestionLoc();
+
+ AnalyzeImplicitConversions(S, E->getCond(), CC);
bool Suspicious = false;
- CheckConditionalOperand(S, E->getTrueExpr(), T, Suspicious);
- CheckConditionalOperand(S, E->getFalseExpr(), T, Suspicious);
+ CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
+ CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
// If -Wconversion would have warned about either of the candidates
// for a signedness conversion to the context type...
if (!Suspicious) return;
// ...but it's currently ignored...
- if (S.Diags.getDiagnosticLevel(diag::warn_impcast_integer_sign_conditional))
+ if (S.Diags.getDiagnosticLevel(diag::warn_impcast_integer_sign_conditional,
+ CC))
return;
// ...and -Wsign-compare isn't...
- if (!S.Diags.getDiagnosticLevel(diag::warn_mixed_sign_conditional))
+ if (!S.Diags.getDiagnosticLevel(diag::warn_mixed_sign_conditional, CC))
return;
// ...then check whether it would have warned about either of the
@@ -2713,10 +2941,10 @@ void CheckConditionalOperator(Sema &S, ConditionalOperator *E, QualType T) {
if (E->getType() != T) {
Suspicious = false;
CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(),
- E->getType(), &Suspicious);
+ E->getType(), CC, &Suspicious);
if (!Suspicious)
CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
- E->getType(), &Suspicious);
+ E->getType(), CC, &Suspicious);
if (!Suspicious)
return;
}
@@ -2732,7 +2960,7 @@ void CheckConditionalOperator(Sema &S, ConditionalOperator *E, QualType T) {
/// AnalyzeImplicitConversions - Find and report any interesting
/// implicit conversions in the given expression. There are a couple
/// of competing diagnostics here, -Wconversion and -Wsign-compare.
-void AnalyzeImplicitConversions(Sema &S, Expr *OrigE) {
+void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
QualType T = OrigE->getType();
Expr *E = OrigE->IgnoreParenImpCasts();
@@ -2748,19 +2976,25 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE) {
// The non-canonical typecheck is just an optimization;
// CheckImplicitConversion will filter out dead implicit conversions.
if (E->getType() != T)
- CheckImplicitConversion(S, E, T);
+ CheckImplicitConversion(S, E, T, CC);
// Now continue drilling into this expression.
// Skip past explicit casts.
if (isa<ExplicitCastExpr>(E)) {
E = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreParenImpCasts();
- return AnalyzeImplicitConversions(S, E);
+ return AnalyzeImplicitConversions(S, E, CC);
}
- // Do a somewhat different check with comparison operators.
- if (isa<BinaryOperator>(E) && cast<BinaryOperator>(E)->isComparisonOp())
- return AnalyzeComparison(S, cast<BinaryOperator>(E));
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ // Do a somewhat different check with comparison operators.
+ if (BO->isComparisonOp())
+ return AnalyzeComparison(S, BO);
+
+ // And with assignments and compound assignments.
+ if (BO->isAssignmentOp())
+ return AnalyzeAssignment(S, BO);
+ }
// These break the otherwise-useful invariant below. Fortunately,
// we don't really need to recurse into them, because any internal
@@ -2772,9 +3006,9 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE) {
if (isa<SizeOfAlignOfExpr>(E)) return;
// Now just recurse over the expression's children.
- for (Stmt::child_iterator I = E->child_begin(), IE = E->child_end();
- I != IE; ++I)
- AnalyzeImplicitConversions(S, cast<Expr>(*I));
+ CC = E->getExprLoc();
+ for (Stmt::child_range I = E->children(); I; ++I)
+ AnalyzeImplicitConversions(S, cast<Expr>(*I), CC);
}
} // end anonymous namespace
@@ -2782,7 +3016,11 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE) {
/// Diagnoses "dangerous" implicit conversions within the given
/// expression (which is a full expression). Implements -Wconversion
/// and -Wsign-compare.
-void Sema::CheckImplicitConversions(Expr *E) {
+///
+/// \param CC the "context" location of the implicit conversion, i.e.
+/// the most location of the syntactic entity requiring the implicit
+/// conversion
+void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
// Don't diagnose in unevaluated contexts.
if (ExprEvalContexts.back().Context == Sema::Unevaluated)
return;
@@ -2791,7 +3029,14 @@ void Sema::CheckImplicitConversions(Expr *E) {
if (E->isTypeDependent() || E->isValueDependent())
return;
- AnalyzeImplicitConversions(*this, E);
+ // This is not the right CC for (e.g.) a variable initialization.
+ AnalyzeImplicitConversions(*this, E, CC);
+}
+
+void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
+ FieldDecl *BitField,
+ Expr *Init) {
+ (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc);
}
/// CheckParmsForFunctionDef - Check that the parameters of the given
@@ -2799,11 +3044,12 @@ void Sema::CheckImplicitConversions(Expr *E) {
/// takes care of any checks that cannot be performed on the
/// declaration itself, e.g., that the types of each of the function
/// parameters are complete.
-bool Sema::CheckParmsForFunctionDef(FunctionDecl *FD) {
+bool Sema::CheckParmsForFunctionDef(ParmVarDecl **P, ParmVarDecl **PEnd,
+ bool CheckParameterNames) {
bool HasInvalidParm = false;
- for (unsigned p = 0, NumParams = FD->getNumParams(); p < NumParams; ++p) {
- ParmVarDecl *Param = FD->getParamDecl(p);
-
+ for (; P != PEnd; ++P) {
+ ParmVarDecl *Param = *P;
+
// C99 6.7.5.3p4: the parameters in a parameter type list in a
// function declarator that is part of a function definition of
// that function shall not have incomplete type.
@@ -2818,7 +3064,8 @@ bool Sema::CheckParmsForFunctionDef(FunctionDecl *FD) {
// C99 6.9.1p5: If the declarator includes a parameter type list, the
// declaration of each parameter shall include an identifier.
- if (Param->getIdentifier() == 0 &&
+ if (CheckParameterNames &&
+ Param->getIdentifier() == 0 &&
!Param->isImplicit() &&
!getLangOptions().CPlusPlus)
Diag(Param->getLocation(), diag::err_parameter_name_omitted);
@@ -2846,7 +3093,8 @@ bool Sema::CheckParmsForFunctionDef(FunctionDecl *FD) {
void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
// This is actually a lot of work to potentially be doing on every
// cast; don't do it if we're ignoring -Wcast_align (as is the default).
- if (getDiagnostics().getDiagnosticLevel(diag::warn_cast_align)
+ if (getDiagnostics().getDiagnosticLevel(diag::warn_cast_align,
+ TRange.getBegin())
== Diagnostic::Ignored)
return;
@@ -2885,3 +3133,47 @@ void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
<< TRange << Op->getSourceRange();
}
+void Sema::CheckArrayAccess(const clang::ArraySubscriptExpr *E) {
+ const Expr *BaseExpr = E->getBase()->IgnoreParenImpCasts();
+ const ConstantArrayType *ArrayTy =
+ Context.getAsConstantArrayType(BaseExpr->getType());
+ if (!ArrayTy)
+ return;
+
+ const Expr *IndexExpr = E->getIdx();
+ if (IndexExpr->isValueDependent())
+ return;
+ llvm::APSInt index;
+ if (!IndexExpr->isIntegerConstantExpr(index, Context))
+ return;
+
+ if (!index.isNegative()) {
+ llvm::APInt size = ArrayTy->getSize();
+ if (!size.isStrictlyPositive())
+ return;
+ if (size.getBitWidth() > index.getBitWidth())
+ index = index.sext(size.getBitWidth());
+ else if (size.getBitWidth() < index.getBitWidth())
+ size = size.sext(index.getBitWidth());
+
+ if (index.slt(size))
+ return;
+
+ Diag(E->getBase()->getLocStart(), diag::warn_array_index_exceeds_bounds)
+ << index.toString(10, true) << size.toString(10, true)
+ << IndexExpr->getSourceRange();
+ } else {
+ Diag(E->getBase()->getLocStart(), diag::warn_array_index_precedes_bounds)
+ << index.toString(10, true) << IndexExpr->getSourceRange();
+ }
+
+ const NamedDecl *ND = NULL;
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(DRE->getDecl());
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(ME->getMemberDecl());
+ if (ND)
+ Diag(ND->getLocStart(), diag::note_array_index_out_of_bounds)
+ << ND->getDeclName();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
index f00d1cd..bab665a 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
@@ -116,6 +117,9 @@ namespace {
/// \brief The semantic analysis object for which results are being
/// produced.
Sema &SemaRef;
+
+ /// \brief The allocator used to allocate new code-completion strings.
+ CodeCompletionAllocator &Allocator;
/// \brief If non-NULL, a filter function used to remove any code-completion
/// results that are not desirable.
@@ -146,12 +150,44 @@ namespace {
/// \brief The selector that we prefer.
Selector PreferredSelector;
- void AdjustResultPriorityForPreferredType(Result &R);
+ /// \brief The completion context in which we are gathering results.
+ CodeCompletionContext CompletionContext;
+
+ /// \brief If we are in an instance method definition, the @implementation
+ /// object.
+ ObjCImplementationDecl *ObjCImplementation;
+
+ void AdjustResultPriorityForDecl(Result &R);
+ void MaybeAddConstructorResults(Result R);
+
public:
- explicit ResultBuilder(Sema &SemaRef, LookupFilter Filter = 0)
- : SemaRef(SemaRef), Filter(Filter), AllowNestedNameSpecifiers(false),
- HasObjectTypeQualifiers(false) { }
+ explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
+ const CodeCompletionContext &CompletionContext,
+ LookupFilter Filter = 0)
+ : SemaRef(SemaRef), Allocator(Allocator), Filter(Filter),
+ AllowNestedNameSpecifiers(false), HasObjectTypeQualifiers(false),
+ CompletionContext(CompletionContext),
+ ObjCImplementation(0)
+ {
+ // If this is an Objective-C instance method definition, dig out the
+ // corresponding implementation.
+ switch (CompletionContext.getKind()) {
+ case CodeCompletionContext::CCC_Expression:
+ case CodeCompletionContext::CCC_ObjCMessageReceiver:
+ case CodeCompletionContext::CCC_ParenthesizedExpression:
+ case CodeCompletionContext::CCC_Statement:
+ case CodeCompletionContext::CCC_Recovery:
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
+ if (Method->isInstanceMethod())
+ if (ObjCInterfaceDecl *Interface = Method->getClassInterface())
+ ObjCImplementation = Interface->getImplementation();
+ break;
+
+ default:
+ break;
+ }
+ }
/// \brief Whether we should include code patterns in the completion
/// results.
@@ -165,10 +201,6 @@ namespace {
this->Filter = Filter;
}
- typedef std::vector<Result>::iterator iterator;
- iterator begin() { return Results.begin(); }
- iterator end() { return Results.end(); }
-
Result *data() { return Results.empty()? 0 : &Results.front(); }
unsigned size() const { return Results.size(); }
bool empty() const { return Results.empty(); }
@@ -198,12 +230,25 @@ namespace {
void setPreferredSelector(Selector Sel) {
PreferredSelector = Sel;
}
+
+ /// \brief Retrieve the code-completion context for which results are
+ /// being collected.
+ const CodeCompletionContext &getCompletionContext() const {
+ return CompletionContext;
+ }
/// \brief Specify whether nested-name-specifiers are allowed.
void allowNestedNameSpecifiers(bool Allow = true) {
AllowNestedNameSpecifiers = Allow;
}
+ /// \brief Return the semantic analysis object for which we are collecting
+ /// code completion results.
+ Sema &getSema() const { return SemaRef; }
+
+ /// \brief Retrieve the allocator used to allocate code completion strings.
+ CodeCompletionAllocator &getAllocator() const { return Allocator; }
+
/// \brief Determine whether the given declaration is at all interesting
/// as a code-completion result.
///
@@ -278,6 +323,7 @@ namespace {
bool IsObjCIvar(NamedDecl *ND) const;
bool IsObjCMessageReceiver(NamedDecl *ND) const;
bool IsObjCCollection(NamedDecl *ND) const;
+ bool IsImpossibleToSatisfy(NamedDecl *ND) const;
//@}
};
}
@@ -324,11 +370,11 @@ public:
return *this;
}
- iterator operator++(int) {
+ /*iterator operator++(int) {
iterator tmp(*this);
++(*this);
return tmp;
- }
+ }*/
reference operator*() const {
if (NamedDecl *ND = DeclOrIterator.dyn_cast<NamedDecl *>())
@@ -464,15 +510,20 @@ bool ResultBuilder::isInterestingDecl(NamedDecl *ND,
return false;
}
}
-
- // C++ constructors are never found by name lookup.
- if (isa<CXXConstructorDecl>(ND))
+
+ // Skip out-of-line declarations and definitions.
+ // NOTE: Unless it's an Objective-C property, method, or ivar, where
+ // the contexts can be messy.
+ if (!ND->getDeclContext()->Equals(ND->getLexicalDeclContext()) &&
+ !(isa<ObjCPropertyDecl>(ND) || isa<ObjCIvarDecl>(ND) ||
+ isa<ObjCMethodDecl>(ND)))
return false;
-
+
if (Filter == &ResultBuilder::IsNestedNameSpecifier ||
((isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) &&
Filter != &ResultBuilder::IsNamespace &&
- Filter != &ResultBuilder::IsNamespaceOrAlias))
+ Filter != &ResultBuilder::IsNamespaceOrAlias &&
+ Filter != 0))
AsNestedNameSpecifier = true;
// Filter out any unwanted results.
@@ -535,7 +586,6 @@ SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
case BuiltinType::Overload:
case BuiltinType::Dependent:
- case BuiltinType::UndeducedAuto:
return STC_Other;
case BuiltinType::ObjCId:
@@ -621,23 +671,66 @@ QualType clang::getDeclUsageType(ASTContext &C, NamedDecl *ND) {
return T.getNonReferenceType();
}
-void ResultBuilder::AdjustResultPriorityForPreferredType(Result &R) {
- QualType T = getDeclUsageType(SemaRef.Context, R.Declaration);
- if (T.isNull())
- return;
+void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
+ // If this is an Objective-C method declaration whose selector matches our
+ // preferred selector, give it a priority boost.
+ if (!PreferredSelector.isNull())
+ if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
+ if (PreferredSelector == Method->getSelector())
+ R.Priority += CCD_SelectorMatch;
- CanQualType TC = SemaRef.Context.getCanonicalType(T);
- // Check for exactly-matching types (modulo qualifiers).
- if (SemaRef.Context.hasSameUnqualifiedType(PreferredType, TC)) {
- if (PreferredType->isVoidType())
- R.Priority += CCD_VoidMatch;
- else
- R.Priority /= CCF_ExactTypeMatch;
- } // Check for nearly-matching types, based on classification of each.
- else if ((getSimplifiedTypeClass(PreferredType)
+ // If we have a preferred type, adjust the priority for results with exactly-
+ // matching or nearly-matching types.
+ if (!PreferredType.isNull()) {
+ QualType T = getDeclUsageType(SemaRef.Context, R.Declaration);
+ if (!T.isNull()) {
+ CanQualType TC = SemaRef.Context.getCanonicalType(T);
+ // Check for exactly-matching types (modulo qualifiers).
+ if (SemaRef.Context.hasSameUnqualifiedType(PreferredType, TC))
+ R.Priority /= CCF_ExactTypeMatch;
+ // Check for nearly-matching types, based on classification of each.
+ else if ((getSimplifiedTypeClass(PreferredType)
== getSimplifiedTypeClass(TC)) &&
- !(PreferredType->isEnumeralType() && TC->isEnumeralType()))
- R.Priority /= CCF_SimilarTypeMatch;
+ !(PreferredType->isEnumeralType() && TC->isEnumeralType()))
+ R.Priority /= CCF_SimilarTypeMatch;
+ }
+ }
+}
+
+void ResultBuilder::MaybeAddConstructorResults(Result R) {
+ if (!SemaRef.getLangOptions().CPlusPlus || !R.Declaration ||
+ !CompletionContext.wantConstructorResults())
+ return;
+
+ ASTContext &Context = SemaRef.Context;
+ NamedDecl *D = R.Declaration;
+ CXXRecordDecl *Record = 0;
+ if (ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(D))
+ Record = ClassTemplate->getTemplatedDecl();
+ else if ((Record = dyn_cast<CXXRecordDecl>(D))) {
+ // Skip specializations and partial specializations.
+ if (isa<ClassTemplateSpecializationDecl>(Record))
+ return;
+ } else {
+ // There are no constructors here.
+ return;
+ }
+
+ Record = Record->getDefinition();
+ if (!Record)
+ return;
+
+
+ QualType RecordTy = Context.getTypeDeclType(Record);
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(RecordTy));
+ for (DeclContext::lookup_result Ctors = Record->lookup(ConstructorName);
+ Ctors.first != Ctors.second; ++Ctors.first) {
+ R.Declaration = *Ctors.first;
+ R.CursorKind = getCursorKindForDecl(R.Declaration);
+ Results.push_back(R);
+ }
}
void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
@@ -662,6 +755,10 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
return;
+ // C++ constructors are never found by name lookup.
+ if (isa<CXXConstructorDecl>(R.Declaration))
+ return;
+
ShadowMap &SMap = ShadowMaps.back();
ShadowMapEntry::iterator I, IEnd;
ShadowMap::iterator NamePos = SMap.find(R.Declaration->getDeclName());
@@ -719,20 +816,13 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
if (!AllDeclsFound.insert(CanonDecl))
return;
- // If this is an Objective-C method declaration whose selector matches our
- // preferred selector, give it a priority boost.
- if (!PreferredSelector.isNull())
- if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
- if (PreferredSelector == Method->getSelector())
- R.Priority += CCD_SelectorMatch;
-
// If the filter is for nested-name-specifiers, then this result starts a
// nested-name-specifier.
if (AsNestedNameSpecifier) {
R.StartsNestedNameSpecifier = true;
R.Priority = CCP_NestedNameSpecifier;
- } else if (!PreferredType.isNull())
- AdjustResultPriorityForPreferredType(R);
+ } else
+ AdjustResultPriorityForDecl(R);
// If this result is supposed to have an informative qualifier, add one.
if (R.QualifierIsInformative && !R.Qualifier &&
@@ -751,6 +841,9 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
// map.
SMap[R.Declaration->getDeclName()].Add(R.Declaration, Results.size());
Results.push_back(R);
+
+ if (!AsNestedNameSpecifier)
+ MaybeAddConstructorResults(R);
}
void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
@@ -771,6 +864,10 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
return;
+ // C++ constructors are never found by name lookup.
+ if (isa<CXXConstructorDecl>(R.Declaration))
+ return;
+
if (Hiding && CheckHiddenResult(R, CurContext, Hiding))
return;
@@ -806,15 +903,7 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (InBaseClass)
R.Priority += CCD_InBaseClass;
- // If this is an Objective-C method declaration whose selector matches our
- // preferred selector, give it a priority boost.
- if (!PreferredSelector.isNull())
- if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
- if (PreferredSelector == Method->getSelector())
- R.Priority += CCD_SelectorMatch;
-
- if (!PreferredType.isNull())
- AdjustResultPriorityForPreferredType(R);
+ AdjustResultPriorityForDecl(R);
if (HasObjectTypeQualifiers)
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
@@ -832,6 +921,9 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
// Insert this result into the set of results.
Results.push_back(R);
+
+ if (!AsNestedNameSpecifier)
+ MaybeAddConstructorResults(R);
}
void ResultBuilder::AddResult(Result R) {
@@ -864,9 +956,14 @@ bool ResultBuilder::IsOrdinaryName(NamedDecl *ND) const {
unsigned IDNS = Decl::IDNS_Ordinary;
if (SemaRef.getLangOptions().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
- else if (SemaRef.getLangOptions().ObjC1 && isa<ObjCIvarDecl>(ND))
- return true;
-
+ else if (SemaRef.getLangOptions().ObjC1) {
+ if (isa<ObjCIvarDecl>(ND))
+ return true;
+ if (isa<ObjCPropertyDecl>(ND) &&
+ SemaRef.canSynthesizeProvisionalIvar(cast<ObjCPropertyDecl>(ND)))
+ return true;
+ }
+
return ND->getIdentifierNamespace() & IDNS;
}
@@ -880,9 +977,14 @@ bool ResultBuilder::IsOrdinaryNonTypeName(NamedDecl *ND) const {
unsigned IDNS = Decl::IDNS_Ordinary;
if (SemaRef.getLangOptions().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
- else if (SemaRef.getLangOptions().ObjC1 && isa<ObjCIvarDecl>(ND))
- return true;
-
+ else if (SemaRef.getLangOptions().ObjC1) {
+ if (isa<ObjCIvarDecl>(ND))
+ return true;
+ if (isa<ObjCPropertyDecl>(ND) &&
+ SemaRef.canSynthesizeProvisionalIvar(cast<ObjCPropertyDecl>(ND)))
+ return true;
+ }
+
return ND->getIdentifierNamespace() & IDNS;
}
@@ -1038,6 +1140,10 @@ bool ResultBuilder::IsObjCCollection(NamedDecl *ND) const {
(SemaRef.getLangOptions().CPlusPlus && T->isRecordType());
}
+bool ResultBuilder::IsImpossibleToSatisfy(NamedDecl *ND) const {
+ return false;
+}
+
/// \rief Determines whether the given declaration is an Objective-C
/// instance variable.
bool ResultBuilder::IsObjCIvar(NamedDecl *ND) const {
@@ -1088,32 +1194,32 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Results.AddResult(Result("restrict", CCP_Type));
}
+ CodeCompletionBuilder Builder(Results.getAllocator());
if (LangOpts.CPlusPlus) {
// C++-specific
- Results.AddResult(Result("bool", CCP_Type));
+ Results.AddResult(Result("bool", CCP_Type +
+ (LangOpts.ObjC1? CCD_bool_in_ObjC : 0)));
Results.AddResult(Result("class", CCP_Type));
Results.AddResult(Result("wchar_t", CCP_Type));
// typename qualified-id
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("typename");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("qualifier");
- Pattern->AddTextChunk("::");
- Pattern->AddPlaceholderChunk("name");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("typename");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("qualifier");
+ Builder.AddTextChunk("::");
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
if (LangOpts.CPlusPlus0x) {
Results.AddResult(Result("auto", CCP_Type));
Results.AddResult(Result("char16_t", CCP_Type));
Results.AddResult(Result("char32_t", CCP_Type));
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("decltype");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("decltype");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
}
}
@@ -1124,18 +1230,16 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
// Results.AddResult(Result("_Decimal64"));
// Results.AddResult(Result("_Decimal128"));
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("typeof");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Results.AddResult(Result(Pattern));
-
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("typeof");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("type");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("typeof");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ Builder.AddTypedTextChunk("typeof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
}
}
@@ -1180,6 +1284,8 @@ static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
case Sema::PCC_Condition:
case Sema::PCC_RecoveryInFunction:
case Sema::PCC_Type:
+ case Sema::PCC_ParenthesizedExpression:
+ case Sema::PCC_LocalDeclarationSpecifiers:
break;
}
}
@@ -1198,20 +1304,17 @@ static void AddObjCInterfaceResults(const LangOptions &LangOpts,
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt);
static void AddTypedefResult(ResultBuilder &Results) {
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("typedef");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("type");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("name");
- Results.AddResult(CodeCompletionResult(Pattern));
+ CodeCompletionBuilder Builder(Results.getAllocator());
+ Builder.AddTypedTextChunk("typedef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
const LangOptions &LangOpts) {
- if (LangOpts.CPlusPlus)
- return true;
-
switch (CCC) {
case Sema::PCC_Namespace:
case Sema::PCC_Class:
@@ -1221,16 +1324,20 @@ static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
case Sema::PCC_Statement:
case Sema::PCC_RecoveryInFunction:
case Sema::PCC_Type:
+ case Sema::PCC_ParenthesizedExpression:
+ case Sema::PCC_LocalDeclarationSpecifiers:
return true;
- case Sema::PCC_ObjCInterface:
- case Sema::PCC_ObjCImplementation:
case Sema::PCC_Expression:
case Sema::PCC_Condition:
+ return LangOpts.CPlusPlus;
+
+ case Sema::PCC_ObjCInterface:
+ case Sema::PCC_ObjCImplementation:
return false;
case Sema::PCC_ForInit:
- return LangOpts.ObjC1 || LangOpts.C99;
+ return LangOpts.CPlusPlus || LangOpts.ObjC1 || LangOpts.C99;
}
return false;
@@ -1241,58 +1348,53 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Scope *S,
Sema &SemaRef,
ResultBuilder &Results) {
+ CodeCompletionBuilder Builder(Results.getAllocator());
+
typedef CodeCompletionResult Result;
switch (CCC) {
case Sema::PCC_Namespace:
if (SemaRef.getLangOptions().CPlusPlus) {
- CodeCompletionString *Pattern = 0;
-
if (Results.includeCodePatterns()) {
// namespace <identifier> { declarations }
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("namespace");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("identifier");
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("declarations");
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("identifier");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("declarations");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
}
// namespace identifier = identifier ;
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("namespace");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("name");
- Pattern->AddChunk(CodeCompletionString::CK_Equal);
- Pattern->AddPlaceholderChunk("namespace");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Builder.AddChunk(CodeCompletionString::CK_Equal);
+ Builder.AddPlaceholderChunk("namespace");
+ Results.AddResult(Result(Builder.TakeString()));
// Using directives
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("using");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("namespace");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("identifier");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("identifier");
+ Results.AddResult(Result(Builder.TakeString()));
// asm(string-literal)
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("asm");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("string-literal");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("asm");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("string-literal");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
if (Results.includeCodePatterns()) {
// Explicit template instantiation
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("template");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("declaration");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("template");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("declaration");
+ Results.AddResult(Result(Builder.TakeString()));
}
}
@@ -1305,47 +1407,42 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
case Sema::PCC_Class:
if (SemaRef.getLangOptions().CPlusPlus) {
// Using declaration
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("using");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("qualifier");
- Pattern->AddTextChunk("::");
- Pattern->AddPlaceholderChunk("name");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("qualifier");
+ Builder.AddTextChunk("::");
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
// using typename qualifier::name (only in a dependent context)
if (SemaRef.CurContext->isDependentContext()) {
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("using");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("typename");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("qualifier");
- Pattern->AddTextChunk("::");
- Pattern->AddPlaceholderChunk("name");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("typename");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("qualifier");
+ Builder.AddTextChunk("::");
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
}
if (CCC == Sema::PCC_Class) {
AddTypedefResult(Results);
// public:
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("public");
- Pattern->AddChunk(CodeCompletionString::CK_Colon);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("public");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
// protected:
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("protected");
- Pattern->AddChunk(CodeCompletionString::CK_Colon);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("protected");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
// private:
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("private");
- Pattern->AddChunk(CodeCompletionString::CK_Colon);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("private");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
}
}
// Fall through
@@ -1354,12 +1451,11 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
case Sema::PCC_MemberTemplate:
if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns()) {
// template < parameters >
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("template");
- Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
- Pattern->AddPlaceholderChunk("parameters");
- Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("template");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("parameters");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Results.AddResult(Result(Builder.TakeString()));
}
AddStorageSpecifiers(CCC, SemaRef.getLangOptions(), Results);
@@ -1386,136 +1482,126 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
case Sema::PCC_Statement: {
AddTypedefResult(Results);
- CodeCompletionString *Pattern = 0;
if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns()) {
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("try");
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Pattern->AddTextChunk("catch");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("declaration");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("try");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("catch");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("declaration");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
}
if (SemaRef.getLangOptions().ObjC1)
AddObjCStatementResults(Results, true);
if (Results.includeCodePatterns()) {
// if (condition) { statements }
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("if");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTypedTextChunk("if");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOptions().CPlusPlus)
- Pattern->AddPlaceholderChunk("condition");
+ Builder.AddPlaceholderChunk("condition");
else
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
// switch (condition) { }
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("switch");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTypedTextChunk("switch");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOptions().CPlusPlus)
- Pattern->AddPlaceholderChunk("condition");
+ Builder.AddPlaceholderChunk("condition");
else
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
}
// Switch-specific statements.
if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
// case expression:
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("case");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_Colon);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("case");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
// default:
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("default");
- Pattern->AddChunk(CodeCompletionString::CK_Colon);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("default");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
}
if (Results.includeCodePatterns()) {
/// while (condition) { statements }
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("while");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTypedTextChunk("while");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOptions().CPlusPlus)
- Pattern->AddPlaceholderChunk("condition");
+ Builder.AddPlaceholderChunk("condition");
else
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
// do { statements } while ( expression );
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("do");
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Pattern->AddTextChunk("while");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("do");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("while");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// for ( for-init-statement ; condition ; expression ) { statements }
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("for");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTypedTextChunk("for");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOptions().CPlusPlus || SemaRef.getLangOptions().C99)
- Pattern->AddPlaceholderChunk("init-statement");
+ Builder.AddPlaceholderChunk("init-statement");
else
- Pattern->AddPlaceholderChunk("init-expression");
- Pattern->AddChunk(CodeCompletionString::CK_SemiColon);
- Pattern->AddPlaceholderChunk("condition");
- Pattern->AddChunk(CodeCompletionString::CK_SemiColon);
- Pattern->AddPlaceholderChunk("inc-expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ Builder.AddPlaceholderChunk("init-expression");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Builder.AddPlaceholderChunk("condition");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Builder.AddPlaceholderChunk("inc-expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
}
if (S->getContinueParent()) {
// continue ;
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("continue");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("continue");
+ Results.AddResult(Result(Builder.TakeString()));
}
if (S->getBreakParent()) {
// break ;
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("break");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("break");
+ Results.AddResult(Result(Builder.TakeString()));
}
// "return expression ;" or "return ;", depending on whether we
@@ -1529,29 +1615,26 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
else if (SemaRef.getCurBlock() &&
!SemaRef.getCurBlock()->ReturnType.isNull())
isVoid = SemaRef.getCurBlock()->ReturnType->isVoidType();
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("return");
+ Builder.AddTypedTextChunk("return");
if (!isVoid) {
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
}
- Results.AddResult(Result(Pattern));
+ Results.AddResult(Result(Builder.TakeString()));
// goto identifier ;
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("goto");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("label");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("goto");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("label");
+ Results.AddResult(Result(Builder.TakeString()));
// Using directives
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("using");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("namespace");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("identifier");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("identifier");
+ Results.AddResult(Result(Builder.TakeString()));
}
// Fall through (for statement expressions).
@@ -1560,8 +1643,8 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
AddStorageSpecifiers(CCC, SemaRef.getLangOptions(), Results);
// Fall through: conditions and statements can have expressions.
+ case Sema::PCC_ParenthesizedExpression:
case Sema::PCC_Expression: {
- CodeCompletionString *Pattern = 0;
if (SemaRef.getLangOptions().CPlusPlus) {
// 'this', if we're in a non-static member function.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(SemaRef.CurContext))
@@ -1573,103 +1656,93 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Results.AddResult(Result("false"));
// dynamic_cast < type-id > ( expression )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("dynamic_cast");
- Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
- Pattern->AddPlaceholderChunk("type");
- Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("dynamic_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// static_cast < type-id > ( expression )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("static_cast");
- Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
- Pattern->AddPlaceholderChunk("type");
- Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("static_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// reinterpret_cast < type-id > ( expression )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("reinterpret_cast");
- Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
- Pattern->AddPlaceholderChunk("type");
- Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("reinterpret_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// const_cast < type-id > ( expression )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("const_cast");
- Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
- Pattern->AddPlaceholderChunk("type");
- Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("const_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// typeid ( expression-or-type )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("typeid");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression-or-type");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("typeid");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression-or-type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// new T ( ... )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("new");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("type");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expressions");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("new");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expressions");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// new T [ ] ( ... )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("new");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("type");
- Pattern->AddChunk(CodeCompletionString::CK_LeftBracket);
- Pattern->AddPlaceholderChunk("size");
- Pattern->AddChunk(CodeCompletionString::CK_RightBracket);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expressions");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("new");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBracket);
+ Builder.AddPlaceholderChunk("size");
+ Builder.AddChunk(CodeCompletionString::CK_RightBracket);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expressions");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// delete expression
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("delete");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("delete");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
// delete [] expression
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("delete");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBracket);
- Pattern->AddChunk(CodeCompletionString::CK_RightBracket);
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("delete");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBracket);
+ Builder.AddChunk(CodeCompletionString::CK_RightBracket);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
// throw expression
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("throw");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("throw");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
// FIXME: Rethrow?
}
@@ -1687,16 +1760,16 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
}
// sizeof expression
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("sizeof");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression-or-type");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk("sizeof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression-or-type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
break;
}
case Sema::PCC_Type:
+ case Sema::PCC_LocalDeclarationSpecifiers:
break;
}
@@ -1707,16 +1780,56 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Results.AddResult(Result("operator"));
}
+/// \brief Retrieve the string representation of the given type as a string
+/// that has the appropriate lifetime for code completion.
+///
+/// This routine provides a fast path where we provide constant strings for
+/// common type names.
+const char *GetCompletionTypeString(QualType T,
+ ASTContext &Context,
+ CodeCompletionAllocator &Allocator) {
+ PrintingPolicy Policy(Context.PrintingPolicy);
+ Policy.AnonymousTagLocations = false;
+
+ if (!T.getLocalQualifiers()) {
+ // Built-in type names are constant strings.
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T))
+ return BT->getName(Context.getLangOptions());
+
+ // Anonymous tag types are constant strings.
+ if (const TagType *TagT = dyn_cast<TagType>(T))
+ if (TagDecl *Tag = TagT->getDecl())
+ if (!Tag->getIdentifier() && !Tag->getTypedefForAnonDecl()) {
+ switch (Tag->getTagKind()) {
+ case TTK_Struct: return "struct <anonymous>";
+ case TTK_Class: return "class <anonymous>";
+ case TTK_Union: return "union <anonymous>";
+ case TTK_Enum: return "enum <anonymous>";
+ }
+ }
+ }
+
+ // Slow path: format the type as a string.
+ std::string Result;
+ T.getAsStringInternal(Result, Policy);
+ return Allocator.CopyString(Result);
+}
+
/// \brief If the given declaration has an associated type, add it as a result
/// type chunk.
static void AddResultTypeChunk(ASTContext &Context,
NamedDecl *ND,
- CodeCompletionString *Result) {
+ CodeCompletionBuilder &Result) {
if (!ND)
return;
-
+
+ // Skip constructors and conversion functions, which have their return types
+ // built into their names.
+ if (isa<CXXConstructorDecl>(ND) || isa<CXXConversionDecl>(ND))
+ return;
+
// Determine the type of the declaration (if it has a type).
- QualType T;
+ QualType T;
if (FunctionDecl *Function = dyn_cast<FunctionDecl>(ND))
T = Function->getResultType();
else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
@@ -1735,25 +1848,21 @@ static void AddResultTypeChunk(ASTContext &Context,
if (T.isNull() || Context.hasSameType(T, Context.DependentTy))
return;
- PrintingPolicy Policy(Context.PrintingPolicy);
- Policy.AnonymousTagLocations = false;
-
- std::string TypeStr;
- T.getAsStringInternal(TypeStr, Policy);
- Result->AddResultTypeChunk(TypeStr);
+ Result.AddResultTypeChunk(GetCompletionTypeString(T, Context,
+ Result.getAllocator()));
}
static void MaybeAddSentinel(ASTContext &Context, NamedDecl *FunctionOrMethod,
- CodeCompletionString *Result) {
+ CodeCompletionBuilder &Result) {
if (SentinelAttr *Sentinel = FunctionOrMethod->getAttr<SentinelAttr>())
if (Sentinel->getSentinel() == 0) {
if (Context.getLangOptions().ObjC1 &&
Context.Idents.get("nil").hasMacroDefinition())
- Result->AddTextChunk(", nil");
+ Result.AddTextChunk(", nil");
else if (Context.Idents.get("NULL").hasMacroDefinition())
- Result->AddTextChunk(", NULL");
+ Result.AddTextChunk(", NULL");
else
- Result->AddTextChunk(", (void*)0");
+ Result.AddTextChunk(", (void*)0");
}
}
@@ -1784,7 +1893,8 @@ static std::string FormatFunctionParameter(ASTContext &Context,
// The argument for a block pointer parameter is a block literal with
// the appropriate type.
- FunctionProtoTypeLoc *Block = 0;
+ FunctionTypeLoc *Block = 0;
+ FunctionProtoTypeLoc *BlockProto = 0;
TypeLoc TL;
if (TypeSourceInfo *TSInfo = Param->getTypeSourceInfo()) {
TL = TSInfo->getTypeLoc().getUnqualifiedLoc();
@@ -1808,8 +1918,9 @@ static std::string FormatFunctionParameter(ASTContext &Context,
// then we're done.
if (BlockPointerTypeLoc *BlockPtr
= dyn_cast<BlockPointerTypeLoc>(&TL)) {
- TL = BlockPtr->getPointeeLoc();
- Block = dyn_cast<FunctionProtoTypeLoc>(&TL);
+ TL = BlockPtr->getPointeeLoc().IgnoreParens();
+ Block = dyn_cast<FunctionTypeLoc>(&TL);
+ BlockProto = dyn_cast<FunctionProtoTypeLoc>(&TL);
}
break;
}
@@ -1834,47 +1945,65 @@ static std::string FormatFunctionParameter(ASTContext &Context,
// We have the function prototype behind the block pointer type, as it was
// written in the source.
- std::string Result = "(^)(";
- for (unsigned I = 0, N = Block->getNumArgs(); I != N; ++I) {
- if (I)
- Result += ", ";
- Result += FormatFunctionParameter(Context, Block->getArg(I));
-
- if (I == N - 1 && Block->getTypePtr()->isVariadic())
- Result += ", ...";
- }
- if (Block->getTypePtr()->isVariadic() && Block->getNumArgs() == 0)
- Result += "...";
- else if (Block->getNumArgs() == 0 && !Context.getLangOptions().CPlusPlus)
- Result += "void";
-
- Result += ")";
- Block->getTypePtr()->getResultType().getAsStringInternal(Result,
- Context.PrintingPolicy);
+ std::string Result;
+ QualType ResultType = Block->getTypePtr()->getResultType();
+ if (!ResultType->isVoidType())
+ ResultType.getAsStringInternal(Result, Context.PrintingPolicy);
+
+ Result = '^' + Result;
+ if (!BlockProto || Block->getNumArgs() == 0) {
+ if (BlockProto && BlockProto->getTypePtr()->isVariadic())
+ Result += "(...)";
+ else
+ Result += "(void)";
+ } else {
+ Result += "(";
+ for (unsigned I = 0, N = Block->getNumArgs(); I != N; ++I) {
+ if (I)
+ Result += ", ";
+ Result += FormatFunctionParameter(Context, Block->getArg(I));
+
+ if (I == N - 1 && BlockProto->getTypePtr()->isVariadic())
+ Result += ", ...";
+ }
+ Result += ")";
+ }
+
+ if (Param->getIdentifier())
+ Result += Param->getIdentifier()->getName();
+
return Result;
}
/// \brief Add function parameter chunks to the given code completion string.
static void AddFunctionParameterChunks(ASTContext &Context,
FunctionDecl *Function,
- CodeCompletionString *Result) {
+ CodeCompletionBuilder &Result,
+ unsigned Start = 0,
+ bool InOptional = false) {
typedef CodeCompletionString::Chunk Chunk;
+ bool FirstParameter = true;
- CodeCompletionString *CCStr = Result;
-
- for (unsigned P = 0, N = Function->getNumParams(); P != N; ++P) {
+ for (unsigned P = Start, N = Function->getNumParams(); P != N; ++P) {
ParmVarDecl *Param = Function->getParamDecl(P);
- if (Param->hasDefaultArg()) {
+ if (Param->hasDefaultArg() && !InOptional) {
// When we see an optional default argument, put that argument and
// the remaining default arguments into a new, optional string.
- CodeCompletionString *Opt = new CodeCompletionString;
- CCStr->AddOptionalChunk(std::auto_ptr<CodeCompletionString>(Opt));
- CCStr = Opt;
+ CodeCompletionBuilder Opt(Result.getAllocator());
+ if (!FirstParameter)
+ Opt.AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ AddFunctionParameterChunks(Context, Function, Opt, P, true);
+ Result.AddOptionalChunk(Opt.TakeString());
+ break;
}
- if (P != 0)
- CCStr->AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ if (FirstParameter)
+ FirstParameter = false;
+ else
+ Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
+
+ InOptional = false;
// Format the placeholder string.
std::string PlaceholderStr = FormatFunctionParameter(Context, Param);
@@ -1883,34 +2012,36 @@ static void AddFunctionParameterChunks(ASTContext &Context,
PlaceholderStr += ", ...";
// Add the placeholder string.
- CCStr->AddPlaceholderChunk(PlaceholderStr);
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString(PlaceholderStr));
}
if (const FunctionProtoType *Proto
= Function->getType()->getAs<FunctionProtoType>())
if (Proto->isVariadic()) {
if (Proto->getNumArgs() == 0)
- CCStr->AddPlaceholderChunk("...");
+ Result.AddPlaceholderChunk("...");
- MaybeAddSentinel(Context, Function, CCStr);
+ MaybeAddSentinel(Context, Function, Result);
}
}
/// \brief Add template parameter chunks to the given code completion string.
static void AddTemplateParameterChunks(ASTContext &Context,
TemplateDecl *Template,
- CodeCompletionString *Result,
- unsigned MaxParameters = 0) {
+ CodeCompletionBuilder &Result,
+ unsigned MaxParameters = 0,
+ unsigned Start = 0,
+ bool InDefaultArg = false) {
typedef CodeCompletionString::Chunk Chunk;
-
- CodeCompletionString *CCStr = Result;
bool FirstParameter = true;
TemplateParameterList *Params = Template->getTemplateParameters();
TemplateParameterList::iterator PEnd = Params->end();
if (MaxParameters)
PEnd = Params->begin() + MaxParameters;
- for (TemplateParameterList::iterator P = Params->begin(); P != PEnd; ++P) {
+ for (TemplateParameterList::iterator P = Params->begin() + Start;
+ P != PEnd; ++P) {
bool HasDefaultArg = false;
std::string PlaceholderStr;
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
@@ -1926,7 +2057,7 @@ static void AddTemplateParameterChunks(ASTContext &Context,
HasDefaultArg = TTP->hasDefaultArgument();
} else if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
PlaceholderStr = NTTP->getIdentifier()->getName();
NTTP->getType().getAsStringInternal(PlaceholderStr,
@@ -1947,28 +2078,35 @@ static void AddTemplateParameterChunks(ASTContext &Context,
HasDefaultArg = TTP->hasDefaultArgument();
}
- if (HasDefaultArg) {
+ if (HasDefaultArg && !InDefaultArg) {
// When we see an optional default argument, put that argument and
// the remaining default arguments into a new, optional string.
- CodeCompletionString *Opt = new CodeCompletionString;
- CCStr->AddOptionalChunk(std::auto_ptr<CodeCompletionString>(Opt));
- CCStr = Opt;
+ CodeCompletionBuilder Opt(Result.getAllocator());
+ if (!FirstParameter)
+ Opt.AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ AddTemplateParameterChunks(Context, Template, Opt, MaxParameters,
+ P - Params->begin(), true);
+ Result.AddOptionalChunk(Opt.TakeString());
+ break;
}
+ InDefaultArg = false;
+
if (FirstParameter)
FirstParameter = false;
else
- CCStr->AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
// Add the placeholder string.
- CCStr->AddPlaceholderChunk(PlaceholderStr);
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString(PlaceholderStr));
}
}
/// \brief Add a qualifier to the given code-completion string, if the
/// provided nested-name-specifier is non-NULL.
static void
-AddQualifierToCompletionString(CodeCompletionString *Result,
+AddQualifierToCompletionString(CodeCompletionBuilder &Result,
NestedNameSpecifier *Qualifier,
bool QualifierIsInformative,
ASTContext &Context) {
@@ -1981,18 +2119,38 @@ AddQualifierToCompletionString(CodeCompletionString *Result,
Qualifier->print(OS, Context.PrintingPolicy);
}
if (QualifierIsInformative)
- Result->AddInformativeChunk(PrintedNNS);
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(PrintedNNS));
else
- Result->AddTextChunk(PrintedNNS);
+ Result.AddTextChunk(Result.getAllocator().CopyString(PrintedNNS));
}
-static void AddFunctionTypeQualsToCompletionString(CodeCompletionString *Result,
- FunctionDecl *Function) {
+static void
+AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
+ FunctionDecl *Function) {
const FunctionProtoType *Proto
= Function->getType()->getAs<FunctionProtoType>();
if (!Proto || !Proto->getTypeQuals())
return;
+ // FIXME: Add ref-qualifier!
+
+ // Handle single qualifiers without copying
+ if (Proto->getTypeQuals() == Qualifiers::Const) {
+ Result.AddInformativeChunk(" const");
+ return;
+ }
+
+ if (Proto->getTypeQuals() == Qualifiers::Volatile) {
+ Result.AddInformativeChunk(" volatile");
+ return;
+ }
+
+ if (Proto->getTypeQuals() == Qualifiers::Restrict) {
+ Result.AddInformativeChunk(" restrict");
+ return;
+ }
+
+ // Handle multiple qualifiers.
std::string QualsStr;
if (Proto->getTypeQuals() & Qualifiers::Const)
QualsStr += " const";
@@ -2000,7 +2158,82 @@ static void AddFunctionTypeQualsToCompletionString(CodeCompletionString *Result,
QualsStr += " volatile";
if (Proto->getTypeQuals() & Qualifiers::Restrict)
QualsStr += " restrict";
- Result->AddInformativeChunk(QualsStr);
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(QualsStr));
+}
+
+/// \brief Add the name of the given declaration
+static void AddTypedNameChunk(ASTContext &Context, NamedDecl *ND,
+ CodeCompletionBuilder &Result) {
+ typedef CodeCompletionString::Chunk Chunk;
+
+ DeclarationName Name = ND->getDeclName();
+ if (!Name)
+ return;
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXOperatorName: {
+ const char *OperatorName = 0;
+ switch (Name.getCXXOverloadedOperator()) {
+ case OO_None:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ OperatorName = "operator";
+ break;
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case OO_##Name: OperatorName = "operator" Spelling; break;
+#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#include "clang/Basic/OperatorKinds.def"
+
+ case OO_New: OperatorName = "operator new"; break;
+ case OO_Delete: OperatorName = "operator delete"; break;
+ case OO_Array_New: OperatorName = "operator new[]"; break;
+ case OO_Array_Delete: OperatorName = "operator delete[]"; break;
+ case OO_Call: OperatorName = "operator()"; break;
+ case OO_Subscript: OperatorName = "operator[]"; break;
+ }
+ Result.AddTypedTextChunk(OperatorName);
+ break;
+ }
+
+ case DeclarationName::Identifier:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ break;
+
+ case DeclarationName::CXXConstructorName: {
+ CXXRecordDecl *Record = 0;
+ QualType Ty = Name.getCXXNameType();
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>())
+ Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ else if (const InjectedClassNameType *InjectedTy
+ = Ty->getAs<InjectedClassNameType>())
+ Record = InjectedTy->getDecl();
+ else {
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ break;
+ }
+
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Record->getNameAsString()));
+ if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) {
+ Result.AddChunk(Chunk(CodeCompletionString::CK_LeftAngle));
+ AddTemplateParameterChunks(Context, Template, Result);
+ Result.AddChunk(Chunk(CodeCompletionString::CK_RightAngle));
+ }
+ break;
+ }
+ }
}
/// \brief If possible, create a new code completion string for the given
@@ -2011,39 +2244,42 @@ static void AddFunctionTypeQualsToCompletionString(CodeCompletionString *Result,
/// result is all that is needed.
CodeCompletionString *
CodeCompletionResult::CreateCodeCompletionString(Sema &S,
- CodeCompletionString *Result) {
+ CodeCompletionAllocator &Allocator) {
typedef CodeCompletionString::Chunk Chunk;
+ CodeCompletionBuilder Result(Allocator, Priority, Availability);
- if (Kind == RK_Pattern)
- return Pattern->Clone(Result);
+ if (Kind == RK_Pattern) {
+ Pattern->Priority = Priority;
+ Pattern->Availability = Availability;
+ return Pattern;
+ }
- if (!Result)
- Result = new CodeCompletionString;
-
if (Kind == RK_Keyword) {
- Result->AddTypedTextChunk(Keyword);
- return Result;
+ Result.AddTypedTextChunk(Keyword);
+ return Result.TakeString();
}
if (Kind == RK_Macro) {
MacroInfo *MI = S.PP.getMacroInfo(Macro);
assert(MI && "Not a macro?");
- Result->AddTypedTextChunk(Macro->getName());
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Macro->getName()));
if (!MI->isFunctionLike())
- return Result;
+ return Result.TakeString();
// Format a function-like macro with placeholders for the arguments.
- Result->AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
for (MacroInfo::arg_iterator A = MI->arg_begin(), AEnd = MI->arg_end();
A != AEnd; ++A) {
if (A != MI->arg_begin())
- Result->AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
if (!MI->isVariadic() || A != AEnd - 1) {
// Non-variadic argument.
- Result->AddPlaceholderChunk((*A)->getName());
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString((*A)->getName()));
continue;
}
@@ -2051,24 +2287,25 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
// variadic macros, providing a single placeholder for the rest of the
// arguments.
if ((*A)->isStr("__VA_ARGS__"))
- Result->AddPlaceholderChunk("...");
+ Result.AddPlaceholderChunk("...");
else {
std::string Arg = (*A)->getName();
Arg += "...";
- Result->AddPlaceholderChunk(Arg);
+ Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
}
}
- Result->AddChunk(Chunk(CodeCompletionString::CK_RightParen));
- return Result;
+ Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ return Result.TakeString();
}
assert(Kind == RK_Declaration && "Missed a result kind?");
NamedDecl *ND = Declaration;
if (StartsNestedNameSpecifier) {
- Result->AddTypedTextChunk(ND->getNameAsString());
- Result->AddTextChunk("::");
- return Result;
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.AddTextChunk("::");
+ return Result.TakeString();
}
AddResultTypeChunk(S.Context, ND, Result);
@@ -2076,20 +2313,20 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
if (FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
S.Context);
- Result->AddTypedTextChunk(Function->getNameAsString());
- Result->AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
+ AddTypedNameChunk(S.Context, ND, Result);
+ Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
AddFunctionParameterChunks(S.Context, Function, Result);
- Result->AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
AddFunctionTypeQualsToCompletionString(Result, Function);
- return Result;
+ return Result.TakeString();
}
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
S.Context);
FunctionDecl *Function = FunTmpl->getTemplatedDecl();
- Result->AddTypedTextChunk(Function->getNameAsString());
-
+ AddTypedNameChunk(S.Context, Function, Result);
+
// Figure out which template parameters are deduced (or have default
// arguments).
llvm::SmallVector<bool, 16> Deduced;
@@ -2103,7 +2340,7 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
// FIXME: We need to abstract template parameters better!
bool HasDefaultArg = false;
NamedDecl *Param = FunTmpl->getTemplateParameters()->getParam(
- LastDeducibleArgument - 1);
+ LastDeducibleArgument - 1);
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
HasDefaultArg = TTP->hasDefaultArgument();
else if (NonTypeTemplateParmDecl *NTTP
@@ -2124,48 +2361,50 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
// Some of the function template arguments cannot be deduced from a
// function call, so we introduce an explicit template argument list
// containing all of the arguments up to the first deducible argument.
- Result->AddChunk(Chunk(CodeCompletionString::CK_LeftAngle));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_LeftAngle));
AddTemplateParameterChunks(S.Context, FunTmpl, Result,
LastDeducibleArgument);
- Result->AddChunk(Chunk(CodeCompletionString::CK_RightAngle));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_RightAngle));
}
// Add the function parameters
- Result->AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
AddFunctionParameterChunks(S.Context, Function, Result);
- Result->AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
AddFunctionTypeQualsToCompletionString(Result, Function);
- return Result;
+ return Result.TakeString();
}
if (TemplateDecl *Template = dyn_cast<TemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
S.Context);
- Result->AddTypedTextChunk(Template->getNameAsString());
- Result->AddChunk(Chunk(CodeCompletionString::CK_LeftAngle));
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Template->getNameAsString()));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_LeftAngle));
AddTemplateParameterChunks(S.Context, Template, Result);
- Result->AddChunk(Chunk(CodeCompletionString::CK_RightAngle));
- return Result;
+ Result.AddChunk(Chunk(CodeCompletionString::CK_RightAngle));
+ return Result.TakeString();
}
if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
Selector Sel = Method->getSelector();
if (Sel.isUnarySelector()) {
- Result->AddTypedTextChunk(Sel.getIdentifierInfoForSlot(0)->getName());
- return Result;
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ return Result.TakeString();
}
- std::string SelName = Sel.getIdentifierInfoForSlot(0)->getName().str();
+ std::string SelName = Sel.getNameForSlot(0).str();
SelName += ':';
if (StartParameter == 0)
- Result->AddTypedTextChunk(SelName);
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(SelName));
else {
- Result->AddInformativeChunk(SelName);
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(SelName));
// If there is only one parameter, and we're past it, add an empty
// typed-text chunk since there is nothing to type.
if (Method->param_size() == 1)
- Result->AddTypedTextChunk("");
+ Result.AddTypedTextChunk("");
}
unsigned Idx = 0;
for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
@@ -2174,16 +2413,14 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
if (Idx > 0) {
std::string Keyword;
if (Idx > StartParameter)
- Result->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Result.AddChunk(CodeCompletionString::CK_HorizontalSpace);
if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx))
Keyword += II->getName().str();
Keyword += ":";
if (Idx < StartParameter || AllParametersAreInformative)
- Result->AddInformativeChunk(Keyword);
- else if (Idx == StartParameter)
- Result->AddTypedTextChunk(Keyword);
- else
- Result->AddTextChunk(Keyword);
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(Keyword));
+ else
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(Keyword));
}
// If we're before the starting parameter, skip the placeholder.
@@ -2206,44 +2443,47 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
Arg += ", ...";
if (DeclaringEntity)
- Result->AddTextChunk(Arg);
+ Result.AddTextChunk(Result.getAllocator().CopyString(Arg));
else if (AllParametersAreInformative)
- Result->AddInformativeChunk(Arg);
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(Arg));
else
- Result->AddPlaceholderChunk(Arg);
+ Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
}
if (Method->isVariadic()) {
if (Method->param_size() == 0) {
if (DeclaringEntity)
- Result->AddTextChunk(", ...");
+ Result.AddTextChunk(", ...");
else if (AllParametersAreInformative)
- Result->AddInformativeChunk(", ...");
+ Result.AddInformativeChunk(", ...");
else
- Result->AddPlaceholderChunk(", ...");
+ Result.AddPlaceholderChunk(", ...");
}
MaybeAddSentinel(S.Context, Method, Result);
}
- return Result;
+ return Result.TakeString();
}
if (Qualifier)
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
S.Context);
- Result->AddTypedTextChunk(ND->getNameAsString());
- return Result;
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ return Result.TakeString();
}
CodeCompletionString *
CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
unsigned CurrentArg,
- Sema &S) const {
+ Sema &S,
+ CodeCompletionAllocator &Allocator) const {
typedef CodeCompletionString::Chunk Chunk;
- CodeCompletionString *Result = new CodeCompletionString;
+ // FIXME: Set priority, availability appropriately.
+ CodeCompletionBuilder Result(Allocator, 1, CXAvailability_Available);
FunctionDecl *FDecl = getFunction();
AddResultTypeChunk(S.Context, FDecl, Result);
const FunctionProtoType *Proto
@@ -2252,25 +2492,28 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
// Function without a prototype. Just give the return type and a
// highlighted ellipsis.
const FunctionType *FT = getFunctionType();
- Result->AddTextChunk(
- FT->getResultType().getAsString(S.Context.PrintingPolicy));
- Result->AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
- Result->AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter, "..."));
- Result->AddChunk(Chunk(CodeCompletionString::CK_RightParen));
- return Result;
+ Result.AddTextChunk(GetCompletionTypeString(FT->getResultType(),
+ S.Context,
+ Result.getAllocator()));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter, "..."));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ return Result.TakeString();
}
if (FDecl)
- Result->AddTextChunk(FDecl->getNameAsString());
+ Result.AddTextChunk(
+ Result.getAllocator().CopyString(FDecl->getNameAsString()));
else
- Result->AddTextChunk(
- Proto->getResultType().getAsString(S.Context.PrintingPolicy));
+ Result.AddTextChunk(
+ Result.getAllocator().CopyString(
+ Proto->getResultType().getAsString(S.Context.PrintingPolicy)));
- Result->AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
unsigned NumParams = FDecl? FDecl->getNumParams() : Proto->getNumArgs();
for (unsigned I = 0; I != NumParams; ++I) {
if (I)
- Result->AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
std::string ArgString;
QualType ArgType;
@@ -2285,34 +2528,44 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
ArgType.getAsStringInternal(ArgString, S.Context.PrintingPolicy);
if (I == CurrentArg)
- Result->AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter,
- ArgString));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter,
+ Result.getAllocator().CopyString(ArgString)));
else
- Result->AddTextChunk(ArgString);
+ Result.AddTextChunk(Result.getAllocator().CopyString(ArgString));
}
if (Proto && Proto->isVariadic()) {
- Result->AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
if (CurrentArg < NumParams)
- Result->AddTextChunk("...");
+ Result.AddTextChunk("...");
else
- Result->AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter, "..."));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter, "..."));
}
- Result->AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
- return Result;
+ return Result.TakeString();
}
unsigned clang::getMacroUsagePriority(llvm::StringRef MacroName,
+ const LangOptions &LangOpts,
bool PreferredTypeIsPointer) {
unsigned Priority = CCP_Macro;
- // Treat the "nil" and "NULL" macros as null pointer constants.
- if (MacroName.equals("nil") || MacroName.equals("NULL")) {
+ // Treat the "nil", "Nil" and "NULL" macros as null pointer constants.
+ if (MacroName.equals("nil") || MacroName.equals("NULL") ||
+ MacroName.equals("Nil")) {
Priority = CCP_Constant;
if (PreferredTypeIsPointer)
Priority = Priority / CCF_SimilarTypeMatch;
- }
+ }
+ // Treat "YES", "NO", "true", and "false" as constants.
+ else if (MacroName.equals("YES") || MacroName.equals("NO") ||
+ MacroName.equals("true") || MacroName.equals("false"))
+ Priority = CCP_Constant;
+ // Treat "bool" as a type.
+ else if (MacroName.equals("bool"))
+ Priority = CCP_Type + (LangOpts.ObjC1? CCD_bool_in_ObjC : 0);
+
return Priority;
}
@@ -2385,14 +2638,18 @@ static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
typedef CodeCompletionResult Result;
Results.EnterNewScope();
+
for (Preprocessor::macro_iterator M = PP.macro_begin(),
MEnd = PP.macro_end();
M != MEnd; ++M) {
Results.AddResult(Result(M->first,
getMacroUsagePriority(M->first->getName(),
+ PP.getLangOptions(),
TargetTypeIsPointer)));
}
+
Results.ExitScope();
+
}
static void AddPrettyFunctionResults(const LangOptions &LangOpts,
@@ -2400,6 +2657,7 @@ static void AddPrettyFunctionResults(const LangOptions &LangOpts,
typedef CodeCompletionResult Result;
Results.EnterNewScope();
+
Results.AddResult(Result("__PRETTY_FUNCTION__", CCP_Constant));
Results.AddResult(Result("__FUNCTION__", CCP_Constant));
if (LangOpts.C99 || LangOpts.CPlusPlus0x)
@@ -2414,9 +2672,6 @@ static void HandleCodeCompleteResults(Sema *S,
unsigned NumResults) {
if (CodeCompleter)
CodeCompleter->ProcessCodeCompleteResults(*S, Context, Results, NumResults);
-
- for (unsigned I = 0; I != NumResults; ++I)
- Results[I].Destroy();
}
static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
@@ -2439,11 +2694,24 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
case Sema::PCC_Template:
case Sema::PCC_MemberTemplate:
- case Sema::PCC_RecoveryInFunction:
- return CodeCompletionContext::CCC_Other;
+ if (S.CurContext->isFileContext())
+ return CodeCompletionContext::CCC_TopLevel;
+ else if (S.CurContext->isRecord())
+ return CodeCompletionContext::CCC_ClassStructUnion;
+ else
+ return CodeCompletionContext::CCC_Other;
- case Sema::PCC_Expression:
+ case Sema::PCC_RecoveryInFunction:
+ return CodeCompletionContext::CCC_Recovery;
+
case Sema::PCC_ForInit:
+ if (S.getLangOptions().CPlusPlus || S.getLangOptions().C99 ||
+ S.getLangOptions().ObjC1)
+ return CodeCompletionContext::CCC_ParenthesizedExpression;
+ else
+ return CodeCompletionContext::CCC_Expression;
+
+ case Sema::PCC_Expression:
case Sema::PCC_Condition:
return CodeCompletionContext::CCC_Expression;
@@ -2452,6 +2720,12 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
case Sema::PCC_Type:
return CodeCompletionContext::CCC_Type;
+
+ case Sema::PCC_ParenthesizedExpression:
+ return CodeCompletionContext::CCC_ParenthesizedExpression;
+
+ case Sema::PCC_LocalDeclarationSpecifiers:
+ return CodeCompletionContext::CCC_Type;
}
return CodeCompletionContext::CCC_Other;
@@ -2490,7 +2764,7 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
for (CXXMethodDecl::method_iterator M = Method->begin_overridden_methods(),
MEnd = Method->end_overridden_methods();
M != MEnd; ++M) {
- CodeCompletionString *Pattern = new CodeCompletionString;
+ CodeCompletionBuilder Builder(Results.getAllocator());
CXXMethodDecl *Overridden = const_cast<CXXMethodDecl *>(*M);
if (Overridden->getCanonicalDecl() == Method->getCanonicalDecl())
continue;
@@ -2504,13 +2778,14 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
std::string Str;
llvm::raw_string_ostream OS(Str);
NNS->print(OS, S.Context.PrintingPolicy);
- Pattern->AddTextChunk(OS.str());
+ Builder.AddTextChunk(Results.getAllocator().CopyString(OS.str()));
}
} else if (!InContext->Equals(Overridden->getDeclContext()))
continue;
- Pattern->AddTypedTextChunk(Overridden->getNameAsString());
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTypedTextChunk(Results.getAllocator().CopyString(
+ Overridden->getNameAsString()));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
bool FirstParam = true;
for (CXXMethodDecl::param_iterator P = Method->param_begin(),
PEnd = Method->param_end();
@@ -2518,12 +2793,13 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
if (FirstParam)
FirstParam = false;
else
- Pattern->AddChunk(CodeCompletionString::CK_Comma);
+ Builder.AddChunk(CodeCompletionString::CK_Comma);
- Pattern->AddPlaceholderChunk((*P)->getIdentifier()->getName());
+ Builder.AddPlaceholderChunk(Results.getAllocator().CopyString(
+ (*P)->getIdentifier()->getName()));
}
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Pattern,
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
CCP_SuperCompletion,
CXCursor_CXXMethod));
Results.Ignore(Overridden);
@@ -2533,9 +2809,10 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
void Sema::CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ mapCodeCompletionContext(*this, CompletionContext));
Results.EnterNewScope();
-
+
// Determine how to filter results, e.g., so that the names of
// values (functions, enumerators, function templates, etc.) are
// only allowed where we can have an expression.
@@ -2548,16 +2825,12 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
case PCC_Template:
case PCC_MemberTemplate:
case PCC_Type:
+ case PCC_LocalDeclarationSpecifiers:
Results.setFilter(&ResultBuilder::IsOrdinaryNonValueName);
break;
case PCC_Statement:
- // For statements that are expressions, we prefer to call 'void' functions
- // rather than functions that return a result, since then the result would
- // be ignored.
- Results.setPreferredType(Context.VoidTy);
- // Fall through
-
+ case PCC_ParenthesizedExpression:
case PCC_Expression:
case PCC_ForInit:
case PCC_Condition:
@@ -2590,6 +2863,7 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
Results.ExitScope();
switch (CompletionContext) {
+ case PCC_ParenthesizedExpression:
case PCC_Expression:
case PCC_Statement:
case PCC_RecoveryInFunction:
@@ -2607,22 +2881,33 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
case PCC_ForInit:
case PCC_Condition:
case PCC_Type:
+ case PCC_LocalDeclarationSpecifiers:
break;
}
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results);
- HandleCodeCompleteResults(this, CodeCompleter,
- mapCodeCompletionContext(*this, CompletionContext),
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
-void Sema::CodeCompleteDeclarator(Scope *S,
- bool AllowNonIdentifiers,
- bool AllowNestedNameSpecifiers) {
+static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
+ ParsedType Receiver,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper,
+ ResultBuilder &Results);
+
+void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
+ bool AllowNonIdentifiers,
+ bool AllowNestedNameSpecifiers) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ AllowNestedNameSpecifiers
+ ? CodeCompletionContext::CCC_PotentiallyQualifiedName
+ : CodeCompletionContext::CCC_Name);
Results.EnterNewScope();
// Type qualifiers can come after names.
@@ -2639,20 +2924,41 @@ void Sema::CodeCompleteDeclarator(Scope *S,
// Add nested-name-specifiers.
if (AllowNestedNameSpecifiers) {
Results.allowNestedNameSpecifiers();
+ Results.setFilter(&ResultBuilder::IsImpossibleToSatisfy);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer,
CodeCompleter->includeGlobals());
+ Results.setFilter(0);
}
}
Results.ExitScope();
+ // If we're in a context where we might have an expression (rather than a
+ // declaration), and what we've seen so far is an Objective-C type that could
+ // be a receiver of a class message, this may be a class message send with
+ // the initial opening bracket '[' missing. Add appropriate completions.
+ if (AllowNonIdentifiers && !AllowNestedNameSpecifiers &&
+ DS.getTypeSpecType() == DeclSpec::TST_typename &&
+ DS.getStorageClassSpecAsWritten() == DeclSpec::SCS_unspecified &&
+ !DS.isThreadSpecified() && !DS.isExternInLinkageSpec() &&
+ DS.getTypeSpecComplex() == DeclSpec::TSC_unspecified &&
+ DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
+ DS.getTypeQualifiers() == 0 &&
+ S &&
+ (S->getFlags() & Scope::DeclScope) != 0 &&
+ (S->getFlags() & (Scope::ClassScope | Scope::TemplateParamScope |
+ Scope::FunctionPrototypeScope |
+ Scope::AtCatchScope)) == 0) {
+ ParsedType T = DS.getRepAsType();
+ if (!T.get().isNull() && T.get()->isObjCObjectOrInterfaceType())
+ AddClassMessageCompletions(*this, S, T, 0, 0, false, false, Results);
+ }
+
// Note that we intentionally suppress macro results here, since we do not
// encourage using macros to produce the names of entities.
- HandleCodeCompleteResults(this, CodeCompleter,
- AllowNestedNameSpecifiers
- ? CodeCompletionContext::CCC_PotentiallyQualifiedName
- : CodeCompletionContext::CCC_Name,
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -2675,8 +2981,8 @@ struct Sema::CodeCompleteExpressionData {
void Sema::CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
-
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Expression);
if (Data.ObjCCollection)
Results.setFilter(&ResultBuilder::IsObjCCollection);
else if (Data.IntegralConstantExpression)
@@ -2720,10 +3026,21 @@ void Sema::CodeCompleteExpression(Scope *S,
Results.data(),Results.size());
}
+void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
+ if (E.isInvalid())
+ CodeCompleteOrdinaryName(S, PCC_RecoveryInFunction);
+ else if (getLangOptions().ObjC1)
+ CodeCompleteObjCInstanceMessage(S, E.take(), 0, 0, false);
+}
+
+/// \brief The set of properties that have already been added, referenced by
+/// property name.
+typedef llvm::SmallPtrSet<IdentifierInfo*, 16> AddedPropertiesSet;
static void AddObjCProperties(ObjCContainerDecl *Container,
bool AllowCategories,
DeclContext *CurContext,
+ AddedPropertiesSet &AddedProperties,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
@@ -2731,40 +3048,46 @@ static void AddObjCProperties(ObjCContainerDecl *Container,
for (ObjCContainerDecl::prop_iterator P = Container->prop_begin(),
PEnd = Container->prop_end();
P != PEnd;
- ++P)
- Results.MaybeAddResult(Result(*P, 0), CurContext);
+ ++P) {
+ if (AddedProperties.insert(P->getIdentifier()))
+ Results.MaybeAddResult(Result(*P, 0), CurContext);
+ }
// Add properties in referenced protocols.
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
for (ObjCProtocolDecl::protocol_iterator P = Protocol->protocol_begin(),
PEnd = Protocol->protocol_end();
P != PEnd; ++P)
- AddObjCProperties(*P, AllowCategories, CurContext, Results);
+ AddObjCProperties(*P, AllowCategories, CurContext, AddedProperties,
+ Results);
} else if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)){
if (AllowCategories) {
// Look through categories.
for (ObjCCategoryDecl *Category = IFace->getCategoryList();
Category; Category = Category->getNextClassCategory())
- AddObjCProperties(Category, AllowCategories, CurContext, Results);
+ AddObjCProperties(Category, AllowCategories, CurContext,
+ AddedProperties, Results);
}
// Look through protocols.
for (ObjCInterfaceDecl::all_protocol_iterator
I = IFace->all_referenced_protocol_begin(),
E = IFace->all_referenced_protocol_end(); I != E; ++I)
- AddObjCProperties(*I, AllowCategories, CurContext, Results);
+ AddObjCProperties(*I, AllowCategories, CurContext, AddedProperties,
+ Results);
// Look in the superclass.
if (IFace->getSuperClass())
AddObjCProperties(IFace->getSuperClass(), AllowCategories, CurContext,
- Results);
+ AddedProperties, Results);
} else if (const ObjCCategoryDecl *Category
= dyn_cast<ObjCCategoryDecl>(Container)) {
// Look through protocols.
for (ObjCCategoryDecl::protocol_iterator P = Category->protocol_begin(),
PEnd = Category->protocol_end();
P != PEnd; ++P)
- AddObjCProperties(*P, AllowCategories, CurContext, Results);
+ AddObjCProperties(*P, AllowCategories, CurContext, AddedProperties,
+ Results);
}
}
@@ -2788,7 +3111,10 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, ExprTy *BaseE,
return;
}
- ResultBuilder Results(*this, &ResultBuilder::IsMember);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext(CodeCompletionContext::CCC_MemberAccess,
+ BaseType),
+ &ResultBuilder::IsMember);
Results.EnterNewScope();
if (const RecordType *Record = BaseType->getAs<RecordType>()) {
// Indicate that we are performing a member access, and the cv-qualifiers
@@ -2821,18 +3147,20 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, ExprTy *BaseE,
}
} else if (!IsArrow && BaseType->getAsObjCInterfacePointerType()) {
// Objective-C property reference.
+ AddedPropertiesSet AddedProperties;
// Add property results based on our interface.
const ObjCObjectPointerType *ObjCPtr
= BaseType->getAsObjCInterfacePointerType();
assert(ObjCPtr && "Non-NULL pointer guaranteed above!");
- AddObjCProperties(ObjCPtr->getInterfaceDecl(), true, CurContext, Results);
+ AddObjCProperties(ObjCPtr->getInterfaceDecl(), true, CurContext,
+ AddedProperties, Results);
// Add properties from the protocols in a qualified interface.
for (ObjCObjectPointerType::qual_iterator I = ObjCPtr->qual_begin(),
E = ObjCPtr->qual_end();
I != E; ++I)
- AddObjCProperties(*I, true, CurContext, Results);
+ AddObjCProperties(*I, true, CurContext, AddedProperties, Results);
} else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
(!IsArrow && BaseType->isObjCObjectType())) {
// Objective-C instance variable access.
@@ -2858,8 +3186,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, ExprTy *BaseE,
// Hand off the results found for code completion.
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext(CodeCompletionContext::CCC_MemberAccess,
- BaseType),
+ Results.getCompletionContext(),
Results.data(),Results.size());
}
@@ -2893,7 +3220,7 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
return;
}
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(), ContextKind);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
// First pass: look for tags.
@@ -2907,12 +3234,13 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer);
}
- HandleCodeCompleteResults(this, CodeCompleter, ContextKind,
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_TypeQualifiers);
Results.EnterNewScope();
if (!(DS.getTypeQualifiers() & DeclSpec::TQ_const))
Results.AddResult("const");
@@ -2923,7 +3251,7 @@ void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
Results.AddResult("restrict");
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_TypeQualifiers,
+ Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -2993,7 +3321,8 @@ void Sema::CodeCompleteCase(Scope *S) {
}
// Add any enumerators that have not yet been mentioned.
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Expression);
Results.EnterNewScope();
for (EnumDecl::enumerator_iterator E = Enum->enumerator_begin(),
EEnd = Enum->enumerator_end();
@@ -3001,15 +3330,16 @@ void Sema::CodeCompleteCase(Scope *S) {
if (EnumeratorsSeen.count(*E))
continue;
- Results.AddResult(CodeCompletionResult(*E, Qualifier),
- CurContext, 0, false);
+ CodeCompletionResult R(*E, Qualifier);
+ R.Priority = CCP_EnumInCase;
+ Results.AddResult(R, CurContext, 0, false);
}
Results.ExitScope();
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results);
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Expression,
+ CodeCompletionContext::CCC_OtherWithMacros,
Results.data(),Results.size());
}
@@ -3196,9 +3526,10 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
if (!isDependentScopeSpecifier(SS) && RequireCompleteDeclContext(SS, Ctx))
return;
- ResultBuilder Results(*this);
-
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Name);
Results.EnterNewScope();
+
// The "template" keyword can follow "::" in the grammar, but only
// put it into the grammar if the nested-name-specifier is dependent.
NestedNameSpecifier *NNS = (NestedNameSpecifier *)SS.getScopeRep();
@@ -3226,7 +3557,9 @@ void Sema::CodeCompleteUsing(Scope *S) {
if (!CodeCompleter)
return;
- ResultBuilder Results(*this, &ResultBuilder::IsNestedNameSpecifier);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_PotentiallyQualifiedName,
+ &ResultBuilder::IsNestedNameSpecifier);
Results.EnterNewScope();
// If we aren't in class scope, we could see the "namespace" keyword.
@@ -3241,7 +3574,7 @@ void Sema::CodeCompleteUsing(Scope *S) {
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
+ CodeCompletionContext::CCC_PotentiallyQualifiedName,
Results.data(),Results.size());
}
@@ -3251,7 +3584,9 @@ void Sema::CodeCompleteUsingDirective(Scope *S) {
// After "using namespace", we expect to see a namespace name or namespace
// alias.
- ResultBuilder Results(*this, &ResultBuilder::IsNamespaceOrAlias);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Namespace,
+ &ResultBuilder::IsNamespaceOrAlias);
Results.EnterNewScope();
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
@@ -3266,12 +3601,20 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
if (!CodeCompleter)
return;
- ResultBuilder Results(*this, &ResultBuilder::IsNamespace);
DeclContext *Ctx = (DeclContext *)S->getEntity();
if (!S->getParent())
Ctx = Context.getTranslationUnitDecl();
- if (Ctx && Ctx->isFileContext()) {
+ bool SuppressedGlobalResults
+ = Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ SuppressedGlobalResults
+ ? CodeCompletionContext::CCC_Namespace
+ : CodeCompletionContext::CCC_Other,
+ &ResultBuilder::IsNamespace);
+
+ if (Ctx && Ctx->isFileContext() && !SuppressedGlobalResults) {
// We only want to see those namespaces that have already been defined
// within this scope, because its likely that the user is creating an
// extended namespace declaration. Keep track of the most recent
@@ -3294,7 +3637,7 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
}
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
+ Results.getCompletionContext(),
Results.data(),Results.size());
}
@@ -3303,12 +3646,14 @@ void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
return;
// After "namespace", we expect to see a namespace or alias.
- ResultBuilder Results(*this, &ResultBuilder::IsNamespaceOrAlias);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Namespace,
+ &ResultBuilder::IsNamespaceOrAlias);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Namespace,
+ Results.getCompletionContext(),
Results.data(),Results.size());
}
@@ -3317,7 +3662,9 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
return;
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, &ResultBuilder::IsType);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Type,
+ &ResultBuilder::IsType);
Results.EnterNewScope();
// Add the names of overloadable operators.
@@ -3342,14 +3689,15 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
}
void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
- CXXBaseOrMemberInitializer** Initializers,
+ CXXCtorInitializer** Initializers,
unsigned NumInitializers) {
CXXConstructorDecl *Constructor
= static_cast<CXXConstructorDecl *>(ConstructorD);
if (!Constructor)
return;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_PotentiallyQualifiedName);
Results.EnterNewScope();
// Fill in any already-initialized fields or base classes.
@@ -3360,10 +3708,12 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
InitializedBases.insert(
Context.getCanonicalType(QualType(Initializers[I]->getBaseClass(), 0)));
else
- InitializedFields.insert(cast<FieldDecl>(Initializers[I]->getMember()));
+ InitializedFields.insert(cast<FieldDecl>(
+ Initializers[I]->getAnyMember()));
}
// Add completions for base classes.
+ CodeCompletionBuilder Builder(Results.getAllocator());
bool SawLastInitializer = (NumInitializers == 0);
CXXRecordDecl *ClassDecl = Constructor->getParent();
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
@@ -3378,13 +3728,13 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
continue;
}
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(
- Base->getType().getAsString(Context.PrintingPolicy));
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("args");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Pattern,
+ Builder.AddTypedTextChunk(
+ Results.getAllocator().CopyString(
+ Base->getType().getAsString(Context.PrintingPolicy)));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
: CCP_MemberDeclaration));
SawLastInitializer = false;
@@ -3403,13 +3753,13 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
continue;
}
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(
- Base->getType().getAsString(Context.PrintingPolicy));
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("args");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Pattern,
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(
+ Base->getType().getAsString(Context.PrintingPolicy)));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
: CCP_MemberDeclaration));
SawLastInitializer = false;
@@ -3422,28 +3772,28 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))) {
SawLastInitializer
= NumInitializers > 0 &&
- Initializers[NumInitializers - 1]->isMemberInitializer() &&
- Initializers[NumInitializers - 1]->getMember() == *Field;
+ Initializers[NumInitializers - 1]->isAnyMemberInitializer() &&
+ Initializers[NumInitializers - 1]->getAnyMember() == *Field;
continue;
}
if (!Field->getDeclName())
continue;
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(Field->getIdentifier()->getName());
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("args");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Pattern,
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Field->getIdentifier()->getName()));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
- : CCP_MemberDeclaration));
+ : CCP_MemberDeclaration,
+ CXCursor_MemberRef));
SawLastInitializer = false;
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Name,
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -3457,21 +3807,19 @@ static void AddObjCImplementationResults(const LangOptions &LangOpts,
// Since we have an implementation, we can end it.
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,end)));
- CodeCompletionString *Pattern = 0;
+ CodeCompletionBuilder Builder(Results.getAllocator());
if (LangOpts.ObjC2) {
// @dynamic
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,dynamic));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("property");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,dynamic));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("property");
+ Results.AddResult(Result(Builder.TakeString()));
// @synthesize
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synthesize));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("property");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synthesize));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("property");
+ Results.AddResult(Result(Builder.TakeString()));
}
}
@@ -3497,54 +3845,50 @@ static void AddObjCInterfaceResults(const LangOptions &LangOpts,
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
- CodeCompletionString *Pattern = 0;
+ CodeCompletionBuilder Builder(Results.getAllocator());
// @class name ;
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,class));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("name");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,class));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
if (Results.includeCodePatterns()) {
// @interface name
// FIXME: Could introduce the whole pattern, including superclasses and
// such.
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,interface));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("class");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,interface));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("class");
+ Results.AddResult(Result(Builder.TakeString()));
// @protocol name
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("protocol");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("protocol");
+ Results.AddResult(Result(Builder.TakeString()));
// @implementation name
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,implementation));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("class");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,implementation));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("class");
+ Results.AddResult(Result(Builder.TakeString()));
}
// @compatibility_alias name
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,compatibility_alias));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("alias");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("class");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,compatibility_alias));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("alias");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("class");
+ Results.AddResult(Result(Builder.TakeString()));
}
void Sema::CodeCompleteObjCAtDirective(Scope *S, Decl *ObjCImpDecl,
bool InInterface) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
if (ObjCImpDecl)
AddObjCImplementationResults(getLangOptions(), Results, false);
@@ -3560,78 +3904,72 @@ void Sema::CodeCompleteObjCAtDirective(Scope *S, Decl *ObjCImpDecl,
static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
- CodeCompletionString *Pattern = 0;
+ CodeCompletionBuilder Builder(Results.getAllocator());
// @encode ( type-name )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,encode));
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("type-name");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,encode));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type-name");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// @protocol ( protocol-name )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("protocol-name");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("protocol-name");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
// @selector ( selector )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,selector));
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("selector");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,selector));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("selector");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
}
static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
- CodeCompletionString *Pattern = 0;
+ CodeCompletionBuilder Builder(Results.getAllocator());
if (Results.includeCodePatterns()) {
// @try { statements } @catch ( declaration ) { statements } @finally
// { statements }
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,try));
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Pattern->AddTextChunk("@catch");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("parameter");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Pattern->AddTextChunk("@finally");
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,try));
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("@catch");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("parameter");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("@finally");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
}
// @throw
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,throw));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,throw));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
if (Results.includeCodePatterns()) {
// @synchronized ( expression ) { statements }
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synchronized));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synchronized));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
}
}
@@ -3647,7 +3985,8 @@ static void AddObjCVisibilityResults(const LangOptions &LangOpts,
}
void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCVisibilityResults(getLangOptions(), Results, false);
Results.ExitScope();
@@ -3657,7 +3996,8 @@ void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
}
void Sema::CodeCompleteObjCAtStatement(Scope *S) {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCStatementResults(Results, false);
AddObjCExpressionResults(Results, false);
@@ -3668,7 +4008,8 @@ void Sema::CodeCompleteObjCAtStatement(Scope *S) {
}
void Sema::CodeCompleteObjCAtExpression(Scope *S) {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCExpressionResults(Results, false);
Results.ExitScope();
@@ -3714,7 +4055,8 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
unsigned Attributes = ODS.getPropertyAttributes();
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readonly))
Results.AddResult(CodeCompletionResult("readonly"));
@@ -3729,18 +4071,18 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nonatomic))
Results.AddResult(CodeCompletionResult("nonatomic"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_setter)) {
- CodeCompletionString *Setter = new CodeCompletionString;
- Setter->AddTypedTextChunk("setter");
- Setter->AddTextChunk(" = ");
- Setter->AddPlaceholderChunk("method");
- Results.AddResult(CodeCompletionResult(Setter));
+ CodeCompletionBuilder Setter(Results.getAllocator());
+ Setter.AddTypedTextChunk("setter");
+ Setter.AddTextChunk(" = ");
+ Setter.AddPlaceholderChunk("method");
+ Results.AddResult(CodeCompletionResult(Setter.TakeString()));
}
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_getter)) {
- CodeCompletionString *Getter = new CodeCompletionString;
- Getter->AddTypedTextChunk("getter");
- Getter->AddTextChunk(" = ");
- Getter->AddPlaceholderChunk("method");
- Results.AddResult(CodeCompletionResult(Getter));
+ CodeCompletionBuilder Getter(Results.getAllocator());
+ Getter.AddTypedTextChunk("getter");
+ Getter.AddTextChunk(" = ");
+ Getter.AddPlaceholderChunk("method");
+ Results.AddResult(CodeCompletionResult(Getter.TakeString()));
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
@@ -3759,7 +4101,8 @@ enum ObjCMethodKind {
static bool isAcceptableObjCSelector(Selector Sel,
ObjCMethodKind WantKind,
IdentifierInfo **SelIdents,
- unsigned NumSelIdents) {
+ unsigned NumSelIdents,
+ bool AllowSameLength = true) {
if (NumSelIdents > Sel.getNumArgs())
return false;
@@ -3769,6 +4112,9 @@ static bool isAcceptableObjCSelector(Selector Sel,
case MK_OneArgSelector: return Sel.getNumArgs() == 1;
}
+ if (!AllowSameLength && NumSelIdents && NumSelIdents == Sel.getNumArgs())
+ return false;
+
for (unsigned I = 0; I != NumSelIdents; ++I)
if (SelIdents[I] != Sel.getIdentifierInfoForSlot(I))
return false;
@@ -3779,11 +4125,18 @@ static bool isAcceptableObjCSelector(Selector Sel,
static bool isAcceptableObjCMethod(ObjCMethodDecl *Method,
ObjCMethodKind WantKind,
IdentifierInfo **SelIdents,
- unsigned NumSelIdents) {
+ unsigned NumSelIdents,
+ bool AllowSameLength = true) {
return isAcceptableObjCSelector(Method->getSelector(), WantKind, SelIdents,
- NumSelIdents);
+ NumSelIdents, AllowSameLength);
}
-
+
+namespace {
+ /// \brief A set of selectors, which is used to avoid introducing multiple
+ /// completions with the same selector into the result set.
+ typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
+}
+
/// \brief Add all of the Objective-C methods in the given Objective-C
/// container to the set of results.
///
@@ -3800,6 +4153,9 @@ static bool isAcceptableObjCMethod(ObjCMethodDecl *Method,
/// \param CurContext the context in which we're performing the lookup that
/// finds methods.
///
+/// \param AllowSameLength Whether we allow a method to be added to the list
+/// when it has the same number of parameters as we have selector identifiers.
+///
/// \param Results the structure into which we'll add results.
static void AddObjCMethods(ObjCContainerDecl *Container,
bool WantInstanceMethods,
@@ -3807,6 +4163,8 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
IdentifierInfo **SelIdents,
unsigned NumSelIdents,
DeclContext *CurContext,
+ VisitedSelectorSet &Selectors,
+ bool AllowSameLength,
ResultBuilder &Results,
bool InOriginalClass = true) {
typedef CodeCompletionResult Result;
@@ -3816,9 +4174,13 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
if ((*M)->isInstanceMethod() == WantInstanceMethods) {
// Check whether the selector identifiers we've been given are a
// subset of the identifiers for this particular method.
- if (!isAcceptableObjCMethod(*M, WantKind, SelIdents, NumSelIdents))
+ if (!isAcceptableObjCMethod(*M, WantKind, SelIdents, NumSelIdents,
+ AllowSameLength))
continue;
+ if (!Selectors.insert((*M)->getSelector()))
+ continue;
+
Result R = Result(*M, 0);
R.StartParameter = NumSelIdents;
R.AllParametersAreInformative = (WantKind != MK_Any);
@@ -3828,6 +4190,17 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
}
}
+ // Visit the protocols of protocols.
+ if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Protocol->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents, NumSelIdents,
+ CurContext, Selectors, AllowSameLength, Results, false);
+ }
+
ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container);
if (!IFace)
return;
@@ -3838,13 +4211,14 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
E = Protocols.end();
I != E; ++I)
AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents, NumSelIdents,
- CurContext, Results, false);
+ CurContext, Selectors, AllowSameLength, Results, false);
// Add methods in categories.
for (ObjCCategoryDecl *CatDecl = IFace->getCategoryList(); CatDecl;
CatDecl = CatDecl->getNextClassCategory()) {
AddObjCMethods(CatDecl, WantInstanceMethods, WantKind, SelIdents,
- NumSelIdents, CurContext, Results, InOriginalClass);
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, InOriginalClass);
// Add a categories protocol methods.
const ObjCList<ObjCProtocolDecl> &Protocols
@@ -3853,29 +4227,31 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
E = Protocols.end();
I != E; ++I)
AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents,
- NumSelIdents, CurContext, Results, false);
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, false);
// Add methods in category implementations.
if (ObjCCategoryImplDecl *Impl = CatDecl->getImplementation())
AddObjCMethods(Impl, WantInstanceMethods, WantKind, SelIdents,
- NumSelIdents, CurContext, Results, InOriginalClass);
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, InOriginalClass);
}
// Add methods in superclass.
if (IFace->getSuperClass())
AddObjCMethods(IFace->getSuperClass(), WantInstanceMethods, WantKind,
- SelIdents, NumSelIdents, CurContext, Results, false);
+ SelIdents, NumSelIdents, CurContext, Selectors,
+ AllowSameLength, Results, false);
// Add methods in our implementation, if any.
if (ObjCImplementationDecl *Impl = IFace->getImplementation())
AddObjCMethods(Impl, WantInstanceMethods, WantKind, SelIdents,
- NumSelIdents, CurContext, Results, InOriginalClass);
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, InOriginalClass);
}
-void Sema::CodeCompleteObjCPropertyGetter(Scope *S, Decl *ClassDecl,
- Decl **Methods,
- unsigned NumMethods) {
+void Sema::CodeCompleteObjCPropertyGetter(Scope *S, Decl *ClassDecl) {
typedef CodeCompletionResult Result;
// Try to find the interface where getters might live.
@@ -3890,32 +4266,20 @@ void Sema::CodeCompleteObjCPropertyGetter(Scope *S, Decl *ClassDecl,
}
// Find all of the potential getters.
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
- // FIXME: We need to do this because Objective-C methods don't get
- // pushed into DeclContexts early enough. Argh!
- for (unsigned I = 0; I != NumMethods; ++I) {
- if (ObjCMethodDecl *Method
- = dyn_cast_or_null<ObjCMethodDecl>(Methods[I]))
- if (Method->isInstanceMethod() &&
- isAcceptableObjCMethod(Method, MK_ZeroArgSelector, 0, 0)) {
- Result R = Result(Method, 0);
- R.AllParametersAreInformative = true;
- Results.MaybeAddResult(R, CurContext);
- }
- }
-
- AddObjCMethods(Class, true, MK_ZeroArgSelector, 0, 0, CurContext, Results);
+ VisitedSelectorSet Selectors;
+ AddObjCMethods(Class, true, MK_ZeroArgSelector, 0, 0, CurContext, Selectors,
+ /*AllowSameLength=*/true, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
-void Sema::CodeCompleteObjCPropertySetter(Scope *S, Decl *ObjCImplDecl,
- Decl **Methods,
- unsigned NumMethods) {
+void Sema::CodeCompleteObjCPropertySetter(Scope *S, Decl *ObjCImplDecl) {
typedef CodeCompletionResult Result;
// Try to find the interface where setters might live.
@@ -3931,23 +4295,13 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S, Decl *ObjCImplDecl,
}
// Find all of the potential getters.
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
- // FIXME: We need to do this because Objective-C methods don't get
- // pushed into DeclContexts early enough. Argh!
- for (unsigned I = 0; I != NumMethods; ++I) {
- if (ObjCMethodDecl *Method
- = dyn_cast_or_null<ObjCMethodDecl>(Methods[I]))
- if (Method->isInstanceMethod() &&
- isAcceptableObjCMethod(Method, MK_OneArgSelector, 0, 0)) {
- Result R = Result(Method, 0);
- R.AllParametersAreInformative = true;
- Results.MaybeAddResult(R, CurContext);
- }
- }
-
- AddObjCMethods(Class, true, MK_OneArgSelector, 0, 0, CurContext, Results);
+ VisitedSelectorSet Selectors;
+ AddObjCMethods(Class, true, MK_OneArgSelector, 0, 0, CurContext,
+ Selectors, /*AllowSameLength=*/true, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
@@ -3955,9 +4309,11 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S, Decl *ObjCImplDecl,
Results.data(),Results.size());
}
-void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS) {
+void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
+ bool IsParameter) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Type);
Results.EnterNewScope();
// Add context-sensitive, Objective-C parameter-passing keywords.
@@ -3982,6 +4338,26 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS) {
Results.AddResult("oneway");
}
+ // If we're completing the return type of an Objective-C method and the
+ // identifier IBAction refers to a macro, provide a completion item for
+ // an action, e.g.,
+ // IBAction)<#selector#>:(id)sender
+ if (DS.getObjCDeclQualifier() == 0 && !IsParameter &&
+ Context.Idents.get("IBAction").hasMacroDefinition()) {
+ typedef CodeCompletionString::Chunk Chunk;
+ CodeCompletionBuilder Builder(Results.getAllocator(), CCP_CodePattern,
+ CXAvailability_Available);
+ Builder.AddTypedTextChunk("IBAction");
+ Builder.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Builder.AddPlaceholderChunk("selector");
+ Builder.AddChunk(Chunk(CodeCompletionString::CK_Colon));
+ Builder.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
+ Builder.AddTextChunk("id");
+ Builder.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Builder.AddTextChunk("sender");
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+ }
+
// Add various builtin type names and specifiers.
AddOrdinaryNameResults(PCC_Type, S, *this, Results);
Results.ExitScope();
@@ -4005,7 +4381,7 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS) {
/// common uses of Objective-C. This routine returns that class type,
/// or NULL if no better result could be determined.
static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
- ObjCMessageExpr *Msg = dyn_cast<ObjCMessageExpr>(E);
+ ObjCMessageExpr *Msg = dyn_cast_or_null<ObjCMessageExpr>(E);
if (!Msg)
return 0;
@@ -4102,10 +4478,21 @@ static ObjCMethodDecl *AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
// Try to find a superclass method with the same selector.
ObjCMethodDecl *SuperMethod = 0;
- while ((Class = Class->getSuperClass()) && !SuperMethod)
+ while ((Class = Class->getSuperClass()) && !SuperMethod) {
+ // Check in the class
SuperMethod = Class->getMethod(CurMethod->getSelector(),
CurMethod->isInstanceMethod());
+ // Check in categories or class extensions.
+ if (!SuperMethod) {
+ for (ObjCCategoryDecl *Category = Class->getCategoryList(); Category;
+ Category = Category->getNextClassCategory())
+ if ((SuperMethod = Category->getMethod(CurMethod->getSelector(),
+ CurMethod->isInstanceMethod())))
+ break;
+ }
+ }
+
if (!SuperMethod)
return 0;
@@ -4129,45 +4516,52 @@ static ObjCMethodDecl *AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
}
// We have a superclass method. Now, form the send-to-super completion.
- CodeCompletionString *Pattern = new CodeCompletionString;
+ CodeCompletionBuilder Builder(Results.getAllocator());
// Give this completion a return type.
- AddResultTypeChunk(S.Context, SuperMethod, Pattern);
+ AddResultTypeChunk(S.Context, SuperMethod, Builder);
// If we need the "super" keyword, add it (plus some spacing).
if (NeedSuperKeyword) {
- Pattern->AddTypedTextChunk("super");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("super");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
}
Selector Sel = CurMethod->getSelector();
if (Sel.isUnarySelector()) {
if (NeedSuperKeyword)
- Pattern->AddTextChunk(Sel.getIdentifierInfoForSlot(0)->getName());
+ Builder.AddTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
else
- Pattern->AddTypedTextChunk(Sel.getIdentifierInfoForSlot(0)->getName());
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
} else {
ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin();
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I, ++CurP) {
if (I > NumSelIdents)
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
if (I < NumSelIdents)
- Pattern->AddInformativeChunk(
- Sel.getIdentifierInfoForSlot(I)->getName().str() + ":");
+ Builder.AddInformativeChunk(
+ Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(I) + ":"));
else if (NeedSuperKeyword || I > NumSelIdents) {
- Pattern->AddTextChunk(
- Sel.getIdentifierInfoForSlot(I)->getName().str() + ":");
- Pattern->AddPlaceholderChunk((*CurP)->getIdentifier()->getName());
+ Builder.AddTextChunk(
+ Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(I) + ":"));
+ Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
+ (*CurP)->getIdentifier()->getName()));
} else {
- Pattern->AddTypedTextChunk(
- Sel.getIdentifierInfoForSlot(I)->getName().str() + ":");
- Pattern->AddPlaceholderChunk((*CurP)->getIdentifier()->getName());
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(I) + ":"));
+ Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
+ (*CurP)->getIdentifier()->getName()));
}
}
}
- Results.AddResult(CodeCompletionResult(Pattern, CCP_SuperCompletion,
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(), CCP_SuperCompletion,
SuperMethod->isInstanceMethod()
? CXCursor_ObjCInstanceMethodDecl
: CXCursor_ObjCClassMethodDecl));
@@ -4176,10 +4570,10 @@ static ObjCMethodDecl *AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_ObjCMessageReceiver,
+ &ResultBuilder::IsObjCMessageReceiver);
- // Find anything that looks like it could be a message receiver.
- Results.setFilter(&ResultBuilder::IsObjCMessageReceiver);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
Results.EnterNewScope();
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
@@ -4199,15 +4593,15 @@ void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results);
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_ObjCMessageReceiver,
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
IdentifierInfo **SelIdents,
- unsigned NumSelIdents) {
+ unsigned NumSelIdents,
+ bool AtArgumentExpression) {
ObjCInterfaceDecl *CDecl = 0;
if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
// Figure out which interface we're in.
@@ -4223,15 +4617,11 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
if (CurMethod->isInstanceMethod()) {
// We are inside an instance method, which means that the message
// send [super ...] is actually calling an instance method on the
- // current object. Build the super expression and handle this like
- // an instance method.
- QualType SuperTy = Context.getObjCInterfaceType(CDecl);
- SuperTy = Context.getObjCObjectPointerType(SuperTy);
- ExprResult Super
- = Owned(new (Context) ObjCSuperExpr(SuperLoc, SuperTy));
- return CodeCompleteObjCInstanceMessage(S, (Expr *)Super.get(),
+ // current object.
+ return CodeCompleteObjCInstanceMessage(S, 0,
SelIdents, NumSelIdents,
- /*IsSuper=*/true);
+ AtArgumentExpression,
+ CDecl);
}
// Fall through to send to the superclass in CDecl.
@@ -4256,7 +4646,8 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
id.setIdentifier(Super, SuperLoc);
ExprResult SuperExpr = ActOnIdExpression(S, SS, id, false, false);
return CodeCompleteObjCInstanceMessage(S, (Expr *)SuperExpr.get(),
- SelIdents, NumSelIdents);
+ SelIdents, NumSelIdents,
+ AtArgumentExpression);
}
// Fall through
@@ -4266,71 +4657,105 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
if (CDecl)
Receiver = ParsedType::make(Context.getObjCInterfaceType(CDecl));
return CodeCompleteObjCClassMessage(S, Receiver, SelIdents,
- NumSelIdents, /*IsSuper=*/true);
+ NumSelIdents, AtArgumentExpression,
+ /*IsSuper=*/true);
}
-void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
- IdentifierInfo **SelIdents,
- unsigned NumSelIdents) {
- CodeCompleteObjCClassMessage(S, Receiver, SelIdents, NumSelIdents, false);
+/// \brief Given a set of code-completion results for the argument of a message
+/// send, determine the preferred type (if any) for that argument expression.
+static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
+ unsigned NumSelIdents) {
+ typedef CodeCompletionResult Result;
+ ASTContext &Context = Results.getSema().Context;
+
+ QualType PreferredType;
+ unsigned BestPriority = CCP_Unlikely * 2;
+ Result *ResultsData = Results.data();
+ for (unsigned I = 0, N = Results.size(); I != N; ++I) {
+ Result &R = ResultsData[I];
+ if (R.Kind == Result::RK_Declaration &&
+ isa<ObjCMethodDecl>(R.Declaration)) {
+ if (R.Priority <= BestPriority) {
+ ObjCMethodDecl *Method = cast<ObjCMethodDecl>(R.Declaration);
+ if (NumSelIdents <= Method->param_size()) {
+ QualType MyPreferredType = Method->param_begin()[NumSelIdents - 1]
+ ->getType();
+ if (R.Priority < BestPriority || PreferredType.isNull()) {
+ BestPriority = R.Priority;
+ PreferredType = MyPreferredType;
+ } else if (!Context.hasSameUnqualifiedType(PreferredType,
+ MyPreferredType)) {
+ PreferredType = QualType();
+ }
+ }
+ }
+ }
+ }
+
+ return PreferredType;
}
-void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
- IdentifierInfo **SelIdents,
- unsigned NumSelIdents,
- bool IsSuper) {
+static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
+ ParsedType Receiver,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper,
+ ResultBuilder &Results) {
typedef CodeCompletionResult Result;
ObjCInterfaceDecl *CDecl = 0;
-
+
// If the given name refers to an interface type, retrieve the
// corresponding declaration.
if (Receiver) {
- QualType T = GetTypeFromParser(Receiver, 0);
+ QualType T = SemaRef.GetTypeFromParser(Receiver, 0);
if (!T.isNull())
if (const ObjCObjectType *Interface = T->getAs<ObjCObjectType>())
CDecl = Interface->getInterface();
}
-
+
// Add all of the factory methods in this Objective-C class, its protocols,
// superclasses, categories, implementation, etc.
- ResultBuilder Results(*this);
Results.EnterNewScope();
-
+
// If this is a send-to-super, try to add the special "super" send
// completion.
if (IsSuper) {
if (ObjCMethodDecl *SuperMethod
- = AddSuperSendCompletion(*this, false, SelIdents, NumSelIdents,
- Results))
+ = AddSuperSendCompletion(SemaRef, false, SelIdents, NumSelIdents,
+ Results))
Results.Ignore(SuperMethod);
}
-
+
// If we're inside an Objective-C method definition, prefer its selector to
// others.
- if (ObjCMethodDecl *CurMethod = getCurMethodDecl())
+ if (ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl())
Results.setPreferredSelector(CurMethod->getSelector());
-
+
+ VisitedSelectorSet Selectors;
if (CDecl)
- AddObjCMethods(CDecl, false, MK_Any, SelIdents, NumSelIdents, CurContext,
+ AddObjCMethods(CDecl, false, MK_Any, SelIdents, NumSelIdents,
+ SemaRef.CurContext, Selectors, AtArgumentExpression,
Results);
else {
// We're messaging "id" as a type; provide all class/factory methods.
-
+
// If we have an external source, load the entire class method
// pool from the AST file.
- if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ if (SemaRef.ExternalSource) {
+ for (uint32_t I = 0,
+ N = SemaRef.ExternalSource->GetNumExternalSelectors();
I != N; ++I) {
- Selector Sel = ExternalSource->GetExternalSelector(I);
- if (Sel.isNull() || MethodPool.count(Sel))
+ Selector Sel = SemaRef.ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || SemaRef.MethodPool.count(Sel))
continue;
-
- ReadMethodPool(Sel);
+
+ SemaRef.ReadMethodPool(Sel);
}
}
-
- for (GlobalMethodPool::iterator M = MethodPool.begin(),
- MEnd = MethodPool.end();
+
+ for (Sema::GlobalMethodPool::iterator M = SemaRef.MethodPool.begin(),
+ MEnd = SemaRef.MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.second;
MethList && MethList->Method;
@@ -4338,16 +4763,43 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents,
NumSelIdents))
continue;
-
+
Result R(MethList->Method, 0);
R.StartParameter = NumSelIdents;
R.AllParametersAreInformative = false;
- Results.MaybeAddResult(R, CurContext);
+ Results.MaybeAddResult(R, SemaRef.CurContext);
}
}
}
+
+ Results.ExitScope();
+}
+
+void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
+ AddClassMessageCompletions(*this, S, Receiver, SelIdents, NumSelIdents,
+ AtArgumentExpression, IsSuper, Results);
+
+ // If we're actually at the argument expression (rather than prior to the
+ // selector), we're actually performing code completion for an expression.
+ // Determine whether we have a single, best method. If so, we can
+ // code-complete the expression using the corresponding parameter type as
+ // our preferred type, improving completion results.
+ if (AtArgumentExpression) {
+ QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
+ NumSelIdents);
+ if (PreferredType.isNull())
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ else
+ CodeCompleteExpression(S, PreferredType);
+ return;
+ }
- Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(), Results.size());
@@ -4355,30 +4807,45 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
IdentifierInfo **SelIdents,
- unsigned NumSelIdents) {
- CodeCompleteObjCInstanceMessage(S, Receiver, SelIdents, NumSelIdents, false);
-}
-
-void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
- IdentifierInfo **SelIdents,
unsigned NumSelIdents,
- bool IsSuper) {
+ bool AtArgumentExpression,
+ ObjCInterfaceDecl *Super) {
typedef CodeCompletionResult Result;
Expr *RecExpr = static_cast<Expr *>(Receiver);
// If necessary, apply function/array conversion to the receiver.
// C99 6.7.5.3p[7,8].
- DefaultFunctionArrayLvalueConversion(RecExpr);
- QualType ReceiverType = RecExpr->getType();
+ if (RecExpr)
+ DefaultFunctionArrayLvalueConversion(RecExpr);
+ QualType ReceiverType = RecExpr? RecExpr->getType()
+ : Super? Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(Super))
+ : Context.getObjCIdType();
+ // If we're messaging an expression with type "id" or "Class", check
+ // whether we know something special about the receiver that allows
+ // us to assume a more-specific receiver type.
+ if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType())
+ if (ObjCInterfaceDecl *IFace = GetAssumedMessageSendExprType(RecExpr)) {
+ if (ReceiverType->isObjCClassType())
+ return CodeCompleteObjCClassMessage(S,
+ ParsedType::make(Context.getObjCInterfaceType(IFace)),
+ SelIdents, NumSelIdents,
+ AtArgumentExpression, Super);
+
+ ReceiverType = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(IFace));
+ }
+
// Build the set of methods we can see.
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
// If this is a send-to-super, try to add the special "super" send
// completion.
- if (IsSuper) {
+ if (Super) {
if (ObjCMethodDecl *SuperMethod
= AddSuperSendCompletion(*this, false, SelIdents, NumSelIdents,
Results))
@@ -4389,14 +4856,9 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
// others.
if (ObjCMethodDecl *CurMethod = getCurMethodDecl())
Results.setPreferredSelector(CurMethod->getSelector());
-
- // If we're messaging an expression with type "id" or "Class", check
- // whether we know something special about the receiver that allows
- // us to assume a more-specific receiver type.
- if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType())
- if (ObjCInterfaceDecl *IFace = GetAssumedMessageSendExprType(RecExpr))
- ReceiverType = Context.getObjCObjectPointerType(
- Context.getObjCInterfaceType(IFace));
+
+ // Keep track of the selectors we've already added.
+ VisitedSelectorSet Selectors;
// Handle messages to Class. This really isn't a message to an instance
// method, so we treat it the same way we would treat a message send to a
@@ -4406,7 +4868,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
if (ObjCInterfaceDecl *ClassDecl = CurMethod->getClassInterface())
AddObjCMethods(ClassDecl, false, MK_Any, SelIdents, NumSelIdents,
- CurContext, Results);
+ CurContext, Selectors, AtArgumentExpression, Results);
}
}
// Handle messages to a qualified ID ("id<foo>").
@@ -4417,21 +4879,22 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
E = QualID->qual_end();
I != E; ++I)
AddObjCMethods(*I, true, MK_Any, SelIdents, NumSelIdents, CurContext,
- Results);
+ Selectors, AtArgumentExpression, Results);
}
// Handle messages to a pointer to interface type.
else if (const ObjCObjectPointerType *IFacePtr
= ReceiverType->getAsObjCInterfacePointerType()) {
// Search the class, its superclasses, etc., for instance methods.
AddObjCMethods(IFacePtr->getInterfaceDecl(), true, MK_Any, SelIdents,
- NumSelIdents, CurContext, Results);
+ NumSelIdents, CurContext, Selectors, AtArgumentExpression,
+ Results);
// Search protocols for instance methods.
for (ObjCObjectPointerType::qual_iterator I = IFacePtr->qual_begin(),
E = IFacePtr->qual_end();
I != E; ++I)
AddObjCMethods(*I, true, MK_Any, SelIdents, NumSelIdents, CurContext,
- Results);
+ Selectors, AtArgumentExpression, Results);
}
// Handle messages to "id".
else if (ReceiverType->isObjCIdType()) {
@@ -4460,7 +4923,10 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents,
NumSelIdents))
continue;
-
+
+ if (!Selectors.insert(MethList->Method->getSelector()))
+ continue;
+
Result R(MethList->Method, 0);
R.StartParameter = NumSelIdents;
R.AllParametersAreInformative = false;
@@ -4468,8 +4934,24 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
}
}
}
-
Results.ExitScope();
+
+
+ // If we're actually at the argument expression (rather than prior to the
+ // selector), we're actually performing code completion for an expression.
+ // Determine whether we have a single, best method. If so, we can
+ // code-complete the expression using the corresponding parameter type as
+ // our preferred type, improving completion results.
+ if (AtArgumentExpression) {
+ QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
+ NumSelIdents);
+ if (PreferredType.isNull())
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ else
+ CodeCompleteExpression(S, PreferredType);
+ return;
+ }
+
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
@@ -4506,7 +4988,8 @@ void Sema::CodeCompleteObjCSelector(Scope *S, IdentifierInfo **SelIdents,
}
}
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_SelectorName);
Results.EnterNewScope();
for (GlobalMethodPool::iterator M = MethodPool.begin(),
MEnd = MethodPool.end();
@@ -4516,10 +4999,11 @@ void Sema::CodeCompleteObjCSelector(Scope *S, IdentifierInfo **SelIdents,
if (!isAcceptableObjCSelector(Sel, MK_Any, SelIdents, NumSelIdents))
continue;
- CodeCompletionString *Pattern = new CodeCompletionString;
+ CodeCompletionBuilder Builder(Results.getAllocator());
if (Sel.isUnarySelector()) {
- Pattern->AddTypedTextChunk(Sel.getIdentifierInfoForSlot(0)->getName());
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ Results.AddResult(Builder.TakeString());
continue;
}
@@ -4527,16 +5011,17 @@ void Sema::CodeCompleteObjCSelector(Scope *S, IdentifierInfo **SelIdents,
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I) {
if (I == NumSelIdents) {
if (!Accumulator.empty()) {
- Pattern->AddInformativeChunk(Accumulator);
+ Builder.AddInformativeChunk(Builder.getAllocator().CopyString(
+ Accumulator));
Accumulator.clear();
}
}
- Accumulator += Sel.getIdentifierInfoForSlot(I)->getName().str();
+ Accumulator += Sel.getNameForSlot(I).str();
Accumulator += ':';
}
- Pattern->AddTypedTextChunk(Accumulator);
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString( Accumulator));
+ Results.AddResult(Builder.TakeString());
}
Results.ExitScope();
@@ -4575,35 +5060,46 @@ static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
void Sema::CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols) {
- ResultBuilder Results(*this);
- Results.EnterNewScope();
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_ObjCProtocolName);
- // Tell the result set to ignore all of the protocols we have
- // already seen.
- for (unsigned I = 0; I != NumProtocols; ++I)
- if (ObjCProtocolDecl *Protocol = LookupProtocol(Protocols[I].first,
- Protocols[I].second))
- Results.Ignore(Protocol);
-
- // Add all protocols.
- AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, false,
- Results);
+ if (CodeCompleter && CodeCompleter->includeGlobals()) {
+ Results.EnterNewScope();
+
+ // Tell the result set to ignore all of the protocols we have
+ // already seen.
+ // FIXME: This doesn't work when caching code-completion results.
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ if (ObjCProtocolDecl *Protocol = LookupProtocol(Protocols[I].first,
+ Protocols[I].second))
+ Results.Ignore(Protocol);
+
+ // Add all protocols.
+ AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, false,
+ Results);
- Results.ExitScope();
+ Results.ExitScope();
+ }
+
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_ObjCProtocolName,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCProtocolDecl(Scope *) {
- ResultBuilder Results(*this);
- Results.EnterNewScope();
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_ObjCProtocolName);
- // Add all protocols.
- AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, true,
- Results);
+ if (CodeCompleter && CodeCompleter->includeGlobals()) {
+ Results.EnterNewScope();
+
+ // Add all protocols.
+ AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, true,
+ Results);
- Results.ExitScope();
+ Results.ExitScope();
+ }
+
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_ObjCProtocolName,
Results.data(),Results.size());
@@ -4639,7 +5135,8 @@ static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
}
void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
// Add all classes.
@@ -4647,6 +5144,8 @@ void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
false, Results);
Results.ExitScope();
+ // FIXME: Add a special context for this, use cached global completion
+ // results.
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
@@ -4654,7 +5153,8 @@ void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
// Make sure that we ignore the class we're currently defining.
@@ -4668,13 +5168,16 @@ void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
false, Results);
Results.ExitScope();
+ // FIXME: Add a special context for this, use cached global completion
+ // results.
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
// Add all unimplemented classes.
@@ -4682,6 +5185,8 @@ void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
true, Results);
Results.ExitScope();
+ // FIXME: Add a special context for this, use cached global completion
+ // results.
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
@@ -4692,7 +5197,8 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
SourceLocation ClassNameLoc) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
// Ignore any categories we find that have already been implemented by this
// interface.
@@ -4734,7 +5240,8 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
if (!Class)
return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
// Add all of the categories that have have corresponding interface
// declarations in this class and any of its superclasses, except for
@@ -4761,7 +5268,8 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
void Sema::CodeCompleteObjCPropertyDefinition(Scope *S, Decl *ObjCImpDecl) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
// Figure out where this @synthesize lives.
ObjCContainerDecl *Container
@@ -4779,14 +5287,15 @@ void Sema::CodeCompleteObjCPropertyDefinition(Scope *S, Decl *ObjCImpDecl) {
Results.Ignore(PropertyImpl->getPropertyDecl());
// Add any properties that we find.
+ AddedPropertiesSet AddedProperties;
Results.EnterNewScope();
if (ObjCImplementationDecl *ClassImpl
= dyn_cast<ObjCImplementationDecl>(Container))
AddObjCProperties(ClassImpl->getClassInterface(), false, CurContext,
- Results);
+ AddedProperties, Results);
else
AddObjCProperties(cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl(),
- false, CurContext, Results);
+ false, CurContext, AddedProperties, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
@@ -4798,7 +5307,8 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName,
Decl *ObjCImpDecl) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
// Figure out where this @synthesize lives.
ObjCContainerDecl *Container
@@ -4847,7 +5357,6 @@ static void FindImplementableMethods(ASTContext &Context,
ObjCContainerDecl *Container,
bool WantInstanceMethods,
QualType ReturnType,
- bool IsInImplementation,
KnownMethodsMap &KnownMethods,
bool InOriginalClass = true) {
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)) {
@@ -4855,28 +5364,23 @@ static void FindImplementableMethods(ASTContext &Context,
const ObjCList<ObjCProtocolDecl> &Protocols
= IFace->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
+ E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
- IsInImplementation, KnownMethods,
- InOriginalClass);
+ KnownMethods, InOriginalClass);
- // If we're not in the implementation of a class, also visit the
- // superclass.
- if (!IsInImplementation && IFace->getSuperClass())
- FindImplementableMethods(Context, IFace->getSuperClass(),
- WantInstanceMethods, ReturnType,
- IsInImplementation, KnownMethods,
- false);
-
- // Add methods from any class extensions (but not from categories;
- // those should go into category implementations).
- for (const ObjCCategoryDecl *Cat = IFace->getFirstClassExtension(); Cat;
- Cat = Cat->getNextClassExtension())
+ // Add methods from any class extensions and categories.
+ for (const ObjCCategoryDecl *Cat = IFace->getCategoryList(); Cat;
+ Cat = Cat->getNextClassCategory())
FindImplementableMethods(Context, const_cast<ObjCCategoryDecl*>(Cat),
WantInstanceMethods, ReturnType,
- IsInImplementation, KnownMethods,
- InOriginalClass);
+ KnownMethods, false);
+
+ // Visit the superclass.
+ if (IFace->getSuperClass())
+ FindImplementableMethods(Context, IFace->getSuperClass(),
+ WantInstanceMethods, ReturnType,
+ KnownMethods, false);
}
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
@@ -4884,11 +5388,16 @@ static void FindImplementableMethods(ASTContext &Context,
const ObjCList<ObjCProtocolDecl> &Protocols
= Category->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
+ E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
- IsInImplementation, KnownMethods,
- InOriginalClass);
+ KnownMethods, InOriginalClass);
+
+ // If this category is the original class, jump to the interface.
+ if (InOriginalClass && Category->getClassInterface())
+ FindImplementableMethods(Context, Category->getClassInterface(),
+ WantInstanceMethods, ReturnType, KnownMethods,
+ false);
}
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
@@ -4899,7 +5408,7 @@ static void FindImplementableMethods(ASTContext &Context,
E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
- IsInImplementation, KnownMethods, false);
+ KnownMethods, false);
}
// Add methods in this container. This operation occurs last because
@@ -4918,6 +5427,632 @@ static void FindImplementableMethods(ASTContext &Context,
}
}
+/// \brief Add the parenthesized return or parameter type chunk to a code
+/// completion string.
+static void AddObjCPassingTypeChunk(QualType Type,
+ ASTContext &Context,
+ CodeCompletionBuilder &Builder) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk(GetCompletionTypeString(Type, Context,
+ Builder.getAllocator()));
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+}
+
+/// \brief Determine whether the given class is or inherits from a class by
+/// the given name.
+static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
+ llvm::StringRef Name) {
+ if (!Class)
+ return false;
+
+ if (Class->getIdentifier() && Class->getIdentifier()->getName() == Name)
+ return true;
+
+ return InheritsFromClassNamed(Class->getSuperClass(), Name);
+}
+
+/// \brief Add code completions for Objective-C Key-Value Coding (KVC) and
+/// Key-Value Observing (KVO).
+static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
+ bool IsInstanceMethod,
+ QualType ReturnType,
+ ASTContext &Context,
+ const KnownMethodsMap &KnownMethods,
+ ResultBuilder &Results) {
+ IdentifierInfo *PropName = Property->getIdentifier();
+ if (!PropName || PropName->getLength() == 0)
+ return;
+
+
+ // Builder that will create each code completion.
+ typedef CodeCompletionResult Result;
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator);
+
+ // The selector table.
+ SelectorTable &Selectors = Context.Selectors;
+
+ // The property name, copied into the code completion allocation region
+ // on demand.
+ struct KeyHolder {
+ CodeCompletionAllocator &Allocator;
+ llvm::StringRef Key;
+ const char *CopiedKey;
+
+ KeyHolder(CodeCompletionAllocator &Allocator, llvm::StringRef Key)
+ : Allocator(Allocator), Key(Key), CopiedKey(0) { }
+
+ operator const char *() {
+ if (CopiedKey)
+ return CopiedKey;
+
+ return CopiedKey = Allocator.CopyString(Key);
+ }
+ } Key(Allocator, PropName->getName());
+
+ // The uppercased name of the property name.
+ std::string UpperKey = PropName->getName();
+ if (!UpperKey.empty())
+ UpperKey[0] = toupper(UpperKey[0]);
+
+ bool ReturnTypeMatchesProperty = ReturnType.isNull() ||
+ Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
+ Property->getType());
+ bool ReturnTypeMatchesVoid
+ = ReturnType.isNull() || ReturnType->isVoidType();
+
+ // Add the normal accessor -(type)key.
+ if (IsInstanceMethod &&
+ !KnownMethods.count(Selectors.getNullarySelector(PropName)) &&
+ ReturnTypeMatchesProperty && !Property->getGetterMethodDecl()) {
+ if (ReturnType.isNull())
+ AddObjCPassingTypeChunk(Property->getType(), Context, Builder);
+
+ Builder.AddTypedTextChunk(Key);
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+
+ // If we have an integral or boolean property (or the user has provided
+ // an integral or boolean return type), add the accessor -(type)isKey.
+ if (IsInstanceMethod &&
+ ((!ReturnType.isNull() &&
+ (ReturnType->isIntegerType() || ReturnType->isBooleanType())) ||
+ (ReturnType.isNull() &&
+ (Property->getType()->isIntegerType() ||
+ Property->getType()->isBooleanType())))) {
+ std::string SelectorName = (llvm::Twine("is") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getNullarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("BOOL");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName()));
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Add the normal mutator.
+ if (IsInstanceMethod && ReturnTypeMatchesVoid &&
+ !Property->getSetterMethodDecl()) {
+ std::string SelectorName = (llvm::Twine("set") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName()));
+ Builder.AddTypedTextChunk(":");
+ AddObjCPassingTypeChunk(Property->getType(), Context, Builder);
+ Builder.AddTextChunk(Key);
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Indexed and unordered accessors
+ unsigned IndexedGetterPriority = CCP_CodePattern;
+ unsigned IndexedSetterPriority = CCP_CodePattern;
+ unsigned UnorderedGetterPriority = CCP_CodePattern;
+ unsigned UnorderedSetterPriority = CCP_CodePattern;
+ if (const ObjCObjectPointerType *ObjCPointer
+ = Property->getType()->getAs<ObjCObjectPointerType>()) {
+ if (ObjCInterfaceDecl *IFace = ObjCPointer->getInterfaceDecl()) {
+ // If this interface type is not provably derived from a known
+ // collection, penalize the corresponding completions.
+ if (!InheritsFromClassNamed(IFace, "NSMutableArray")) {
+ IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
+ if (!InheritsFromClassNamed(IFace, "NSArray"))
+ IndexedGetterPriority += CCD_ProbablyNotObjCCollection;
+ }
+
+ if (!InheritsFromClassNamed(IFace, "NSMutableSet")) {
+ UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
+ if (!InheritsFromClassNamed(IFace, "NSSet"))
+ UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
+ }
+ }
+ } else {
+ IndexedGetterPriority += CCD_ProbablyNotObjCCollection;
+ IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
+ UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
+ UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
+ }
+
+ // Add -(NSUInteger)countOf<key>
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() || ReturnType->isIntegerType())) {
+ std::string SelectorName = (llvm::Twine("countOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getNullarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName()));
+ Results.AddResult(Result(Builder.TakeString(),
+ std::min(IndexedGetterPriority,
+ UnorderedGetterPriority),
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Indexed getters
+ // Add -(id)objectInKeyAtIndex:(NSUInteger)index
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
+ std::string SelectorName
+ = (llvm::Twine("objectIn") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("id");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Add -(NSArray *)keyAtIndexes:(NSIndexSet *)indexes
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ (ReturnType->isObjCObjectPointerType() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
+ ->getName() == "NSArray"))) {
+ std::string SelectorName
+ = (llvm::Twine(Property->getName()) + "AtIndexes").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSArray *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Add -(void)getKey:(type **)buffer range:(NSRange)inRange
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (llvm::Twine("get") + UpperKey).str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName),
+ &Context.Idents.get("range")
+ };
+
+ if (!KnownMethods.count(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" **");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("buffer");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("range:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSRange");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("inRange");
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Mutable indexed accessors
+
+ // - (void)insertObject:(type *)object inKeyAtIndex:(NSUInteger)index
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (llvm::Twine("in") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get("insertObject"),
+ &Context.Idents.get(SelectorName)
+ };
+
+ if (!KnownMethods.count(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk("insertObject:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)insertKey:(NSArray *)array atIndexes:(NSIndexSet *)indexes
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (llvm::Twine("insert") + UpperKey).str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName),
+ &Context.Idents.get("atIndexes")
+ };
+
+ if (!KnownMethods.count(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSArray *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("array");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("atIndexes:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // -(void)removeObjectFromKeyAtIndex:(NSUInteger)index
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (llvm::Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // -(void)removeKeyAtIndexes:(NSIndexSet *)indexes
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (llvm::Twine("remove") + UpperKey + "AtIndexes").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)replaceObjectInKeyAtIndex:(NSUInteger)index withObject:(id)object
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (llvm::Twine("replaceObjectIn") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName),
+ &Context.Idents.get("withObject")
+ };
+
+ if (!KnownMethods.count(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("withObject:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("id");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)replaceKeyAtIndexes:(NSIndexSet *)indexes withKey:(NSArray *)array
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName1
+ = (llvm::Twine("replace") + UpperKey + "AtIndexes").str();
+ std::string SelectorName2 = (llvm::Twine("with") + UpperKey).str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName1),
+ &Context.Idents.get(SelectorName2)
+ };
+
+ if (!KnownMethods.count(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName1 + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName2 + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSArray *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("array");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Unordered getters
+ // - (NSEnumerator *)enumeratorOfKey
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ (ReturnType->isObjCObjectPointerType() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
+ ->getName() == "NSEnumerator"))) {
+ std::string SelectorName = (llvm::Twine("enumeratorOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getNullarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSEnumerator *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
+ Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (type *)memberOfKey:(type *)object
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
+ std::string SelectorName = (llvm::Twine("memberOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (ReturnType.isNull()) {
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ } else {
+ Builder.AddTextChunk(GetCompletionTypeString(ReturnType, Context,
+ Builder.getAllocator()));
+ }
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Mutable unordered accessors
+ // - (void)addKeyObject:(type *)object
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (llvm::Twine("add") + UpperKey + llvm::Twine("Object")).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)addKey:(NSSet *)objects
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (llvm::Twine("add") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("objects");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)removeKeyObject:(type *)object
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (llvm::Twine("remove") + UpperKey + llvm::Twine("Object")).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)removeKey:(NSSet *)objects
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (llvm::Twine("remove") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("objects");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)intersectKey:(NSSet *)objects
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (llvm::Twine("intersect") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("objects");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Key-Value Observing
+ // + (NSSet *)keyPathsForValuesAffectingKey
+ if (!IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ (ReturnType->isObjCObjectPointerType() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
+ ->getName() == "NSSet"))) {
+ std::string SelectorName
+ = (llvm::Twine("keyPathsForValuesAffecting") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (!KnownMethods.count(Selectors.getNullarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+}
+
void Sema::CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnTy,
@@ -4926,33 +6061,27 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
// provided.
QualType ReturnType = GetTypeFromParser(ReturnTy);
- // Determine where we should start searching for methods, and where we
- ObjCContainerDecl *SearchDecl = 0, *CurrentDecl = 0;
+ // Determine where we should start searching for methods.
+ ObjCContainerDecl *SearchDecl = 0;
bool IsInImplementation = false;
if (Decl *D = IDecl) {
if (ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D)) {
SearchDecl = Impl->getClassInterface();
- CurrentDecl = Impl;
IsInImplementation = true;
} else if (ObjCCategoryImplDecl *CatImpl
- = dyn_cast<ObjCCategoryImplDecl>(D)) {
+ = dyn_cast<ObjCCategoryImplDecl>(D)) {
SearchDecl = CatImpl->getCategoryDecl();
- CurrentDecl = CatImpl;
IsInImplementation = true;
- } else {
+ } else
SearchDecl = dyn_cast<ObjCContainerDecl>(D);
- CurrentDecl = SearchDecl;
- }
}
if (!SearchDecl && S) {
- if (DeclContext *DC = static_cast<DeclContext *>(S->getEntity())) {
+ if (DeclContext *DC = static_cast<DeclContext *>(S->getEntity()))
SearchDecl = dyn_cast<ObjCContainerDecl>(DC);
- CurrentDecl = SearchDecl;
- }
}
- if (!SearchDecl || !CurrentDecl) {
+ if (!SearchDecl) {
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
0, 0);
@@ -4962,24 +6091,12 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
// Find all of the methods that we could declare/implement here.
KnownMethodsMap KnownMethods;
FindImplementableMethods(Context, SearchDecl, IsInstanceMethod,
- ReturnType, IsInImplementation, KnownMethods);
+ ReturnType, KnownMethods);
- // Erase any methods that have already been declared or
- // implemented here.
- for (ObjCContainerDecl::method_iterator M = CurrentDecl->meth_begin(),
- MEnd = CurrentDecl->meth_end();
- M != MEnd; ++M) {
- if ((*M)->isInstanceMethod() != IsInstanceMethod)
- continue;
-
- KnownMethodsMap::iterator Pos = KnownMethods.find((*M)->getSelector());
- if (Pos != KnownMethods.end())
- KnownMethods.erase(Pos);
- }
-
// Add declarations or definitions for each of the known methods.
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
PrintingPolicy Policy(Context.PrintingPolicy);
Policy.AnonymousTagLocations = false;
@@ -4987,22 +6104,18 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
MEnd = KnownMethods.end();
M != MEnd; ++M) {
ObjCMethodDecl *Method = M->second.first;
- CodeCompletionString *Pattern = new CodeCompletionString;
+ CodeCompletionBuilder Builder(Results.getAllocator());
// If the result type was not already provided, add it to the
// pattern as (type).
- if (ReturnType.isNull()) {
- std::string TypeStr;
- Method->getResultType().getAsStringInternal(TypeStr, Policy);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddTextChunk(TypeStr);
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- }
+ if (ReturnType.isNull())
+ AddObjCPassingTypeChunk(Method->getResultType(), Context, Builder);
Selector Sel = Method->getSelector();
// Add the first part of the selector to the pattern.
- Pattern->AddTypedTextChunk(Sel.getIdentifierInfoForSlot(0)->getName());
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
// Add parameters to the pattern.
unsigned I = 0;
@@ -5011,59 +6124,82 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
P != PEnd; (void)++P, ++I) {
// Add the part of the selector name.
if (I == 0)
- Pattern->AddChunk(CodeCompletionString::CK_Colon);
+ Builder.AddTypedTextChunk(":");
else if (I < Sel.getNumArgs()) {
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk(Sel.getIdentifierInfoForSlot(I)->getName());
- Pattern->AddChunk(CodeCompletionString::CK_Colon);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
} else
break;
// Add the parameter type.
- std::string TypeStr;
- (*P)->getOriginalType().getAsStringInternal(TypeStr, Policy);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddTextChunk(TypeStr);
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ AddObjCPassingTypeChunk((*P)->getOriginalType(), Context, Builder);
if (IdentifierInfo *Id = (*P)->getIdentifier())
- Pattern->AddTextChunk(Id->getName());
+ Builder.AddTextChunk(Builder.getAllocator().CopyString( Id->getName()));
}
if (Method->isVariadic()) {
if (Method->param_size() > 0)
- Pattern->AddChunk(CodeCompletionString::CK_Comma);
- Pattern->AddTextChunk("...");
+ Builder.AddChunk(CodeCompletionString::CK_Comma);
+ Builder.AddTextChunk("...");
}
if (IsInImplementation && Results.includeCodePatterns()) {
// We will be defining the method here, so add a compound statement.
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
if (!Method->getResultType()->isVoidType()) {
// If the result type is not void, add a return clause.
- Pattern->AddTextChunk("return");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_SemiColon);
+ Builder.AddTextChunk("return");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
} else
- Pattern->AddPlaceholderChunk("statements");
+ Builder.AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
}
unsigned Priority = CCP_CodePattern;
if (!M->second.second)
Priority += CCD_InBaseClass;
- Results.AddResult(Result(Pattern, Priority,
+ Results.AddResult(Result(Builder.TakeString(), Priority,
Method->isInstanceMethod()
? CXCursor_ObjCInstanceMethodDecl
: CXCursor_ObjCClassMethodDecl));
}
+ // Add Key-Value-Coding and Key-Value-Observing accessor methods for all of
+ // the properties in this class and its categories.
+ if (Context.getLangOptions().ObjC2) {
+ llvm::SmallVector<ObjCContainerDecl *, 4> Containers;
+ Containers.push_back(SearchDecl);
+
+ ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(SearchDecl);
+ if (!IFace)
+ if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(SearchDecl))
+ IFace = Category->getClassInterface();
+
+ if (IFace) {
+ for (ObjCCategoryDecl *Category = IFace->getCategoryList(); Category;
+ Category = Category->getNextClassCategory())
+ Containers.push_back(Category);
+ }
+
+ for (unsigned I = 0, N = Containers.size(); I != N; ++I) {
+ for (ObjCContainerDecl::prop_iterator P = Containers[I]->prop_begin(),
+ PEnd = Containers[I]->prop_end();
+ P != PEnd; ++P) {
+ AddObjCKeyValueCompletions(*P, IsInstanceMethod, ReturnType, Context,
+ KnownMethods, Results);
+ }
+ }
+ }
+
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
@@ -5092,7 +6228,8 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
// Build the set of methods we can see.
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_Other);
if (ReturnTy)
Results.setPreferredType(GetTypeFromParser(ReturnTy).getNonReferenceType());
@@ -5114,9 +6251,10 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
if (NumSelIdents && NumSelIdents <= MethList->Method->param_size()) {
ParmVarDecl *Param = MethList->Method->param_begin()[NumSelIdents-1];
if (Param->getIdentifier()) {
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(Param->getIdentifier()->getName());
- Results.AddResult(Pattern);
+ CodeCompletionBuilder Builder(Results.getAllocator());
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Param->getIdentifier()->getName()));
+ Results.AddResult(Builder.TakeString());
}
}
@@ -5138,167 +6276,149 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
}
void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_PreprocessorDirective);
Results.EnterNewScope();
// #if <condition>
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("if");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("condition");
- Results.AddResult(Pattern);
+ CodeCompletionBuilder Builder(Results.getAllocator());
+ Builder.AddTypedTextChunk("if");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("condition");
+ Results.AddResult(Builder.TakeString());
// #ifdef <macro>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("ifdef");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("macro");
- Results.AddResult(Pattern);
-
+ Builder.AddTypedTextChunk("ifdef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
// #ifndef <macro>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("ifndef");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("macro");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("ifndef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
if (InConditional) {
// #elif <condition>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("elif");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("condition");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("elif");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("condition");
+ Results.AddResult(Builder.TakeString());
// #else
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("else");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("else");
+ Results.AddResult(Builder.TakeString());
// #endif
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("endif");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("endif");
+ Results.AddResult(Builder.TakeString());
}
// #include "header"
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("include");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("\"");
- Pattern->AddPlaceholderChunk("header");
- Pattern->AddTextChunk("\"");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("include");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
// #include <header>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("include");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("<");
- Pattern->AddPlaceholderChunk("header");
- Pattern->AddTextChunk(">");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("include");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("<");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk(">");
+ Results.AddResult(Builder.TakeString());
// #define <macro>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("define");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("macro");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("define");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
// #define <macro>(<args>)
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("define");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("macro");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("args");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("define");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Builder.TakeString());
// #undef <macro>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("undef");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("macro");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("undef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
// #line <number>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("line");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("number");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("line");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("number");
+ Results.AddResult(Builder.TakeString());
// #line <number> "filename"
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("line");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("number");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("\"");
- Pattern->AddPlaceholderChunk("filename");
- Pattern->AddTextChunk("\"");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("line");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("number");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("filename");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
// #error <message>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("error");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("message");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("error");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("message");
+ Results.AddResult(Builder.TakeString());
// #pragma <arguments>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("pragma");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("arguments");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("pragma");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("arguments");
+ Results.AddResult(Builder.TakeString());
if (getLangOptions().ObjC1) {
// #import "header"
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("import");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("\"");
- Pattern->AddPlaceholderChunk("header");
- Pattern->AddTextChunk("\"");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("import");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
// #import <header>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("import");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("<");
- Pattern->AddPlaceholderChunk("header");
- Pattern->AddTextChunk(">");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("import");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("<");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk(">");
+ Results.AddResult(Builder.TakeString());
}
// #include_next "header"
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("include_next");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("\"");
- Pattern->AddPlaceholderChunk("header");
- Pattern->AddTextChunk("\"");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("include_next");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
// #include_next <header>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("include_next");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("<");
- Pattern->AddPlaceholderChunk("header");
- Pattern->AddTextChunk(">");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("include_next");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("<");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk(">");
+ Results.AddResult(Builder.TakeString());
// #warning <message>
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("warning");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("message");
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk("warning");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("message");
+ Results.AddResult(Builder.TakeString());
// Note: #ident and #sccs are such crazy anachronisms that we don't provide
// completions for them. And __include_macros is a Clang-internal extension
@@ -5319,43 +6439,45 @@ void Sema::CodeCompleteInPreprocessorConditionalExclusion(Scope *S) {
}
void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ IsDefinition? CodeCompletionContext::CCC_MacroName
+ : CodeCompletionContext::CCC_MacroNameUse);
if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
// Add just the names of macros, not their arguments.
+ CodeCompletionBuilder Builder(Results.getAllocator());
Results.EnterNewScope();
for (Preprocessor::macro_iterator M = PP.macro_begin(),
MEnd = PP.macro_end();
M != MEnd; ++M) {
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(M->first->getName());
- Results.AddResult(Pattern);
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ M->first->getName()));
+ Results.AddResult(Builder.TakeString());
}
Results.ExitScope();
} else if (IsDefinition) {
// FIXME: Can we detect when the user just wrote an include guard above?
}
- HandleCodeCompleteResults(this, CodeCompleter,
- IsDefinition? CodeCompletionContext::CCC_MacroName
- : CodeCompletionContext::CCC_MacroNameUse,
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
void Sema::CodeCompletePreprocessorExpression() {
- ResultBuilder Results(*this);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompletionContext::CCC_PreprocessorExpression);
if (!CodeCompleter || CodeCompleter->includeMacros())
AddMacroResults(PP, Results);
// defined (<macro>)
Results.EnterNewScope();
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("defined");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("macro");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Pattern);
+ CodeCompletionBuilder Builder(Results.getAllocator());
+ Builder.AddTypedTextChunk("defined");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("macro");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Builder.TakeString());
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
@@ -5381,10 +6503,9 @@ void Sema::CodeCompleteNaturalLanguage() {
0, 0);
}
-void Sema::GatherGlobalCodeCompletions(
+void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
llvm::SmallVectorImpl<CodeCompletionResult> &Results) {
- ResultBuilder Builder(*this);
-
+ ResultBuilder Builder(*this, Allocator, CodeCompletionContext::CCC_Recovery);
if (!CodeCompleter || CodeCompleter->includeGlobals()) {
CodeCompletionDeclConsumer Consumer(Builder,
Context.getTranslationUnitDecl());
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
index f5e045a..dd30c12 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
@@ -26,6 +26,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/AST/CharUnits.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Parse/ParseDiagnostic.h"
@@ -59,7 +60,7 @@ Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr) {
/// and then return NULL.
ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS,
- bool isClassName,
+ bool isClassName, bool HasTrailingDot,
ParsedType ObjectTypePtr) {
// Determine where we will perform name lookup.
DeclContext *LookupCtx = 0;
@@ -192,13 +193,15 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
T = getElaboratedType(ETK_None, *SS, T);
} else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
- T = Context.getObjCInterfaceType(IDecl);
- } else {
+ if (!HasTrailingDot)
+ T = Context.getObjCInterfaceType(IDecl);
+ }
+
+ if (T.isNull()) {
// If it's not plausibly a type, suppress diagnostics.
Result.suppressDiagnostics();
return ParsedType();
}
-
return ParsedType::make(T);
}
@@ -544,7 +547,9 @@ static bool IsDisallowedCopyOrAssign(const CXXMethodDecl *D) {
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
return CD->isCopyConstructor();
- return D->isCopyAssignment();
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ return Method->isCopyAssignmentOperator();
+ return false;
}
bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const {
@@ -554,11 +559,8 @@ bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const {
return false;
// Ignore class templates.
- if (D->getDeclContext()->isDependentContext())
- return false;
-
- // We warn for unused decls internal to the translation unit.
- if (D->getLinkage() == ExternalLinkage)
+ if (D->getDeclContext()->isDependentContext() ||
+ D->getLexicalDeclContext()->isDependentContext())
return false;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
@@ -575,25 +577,32 @@ bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const {
return false;
}
- if (FD->isThisDeclarationADefinition())
- return !Context.DeclMustBeEmitted(FD);
- return true;
- }
+ if (FD->isThisDeclarationADefinition() &&
+ Context.DeclMustBeEmitted(FD))
+ return false;
+
+ } else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->isFileVarDecl() ||
+ VD->getType().isConstant(Context) ||
+ Context.DeclMustBeEmitted(VD))
+ return false;
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->isStaticDataMember() &&
VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return false;
- if ( VD->isFileVarDecl() &&
- !VD->getType().isConstant(Context))
- return !Context.DeclMustBeEmitted(VD);
+ } else {
+ return false;
}
- return false;
- }
+ // Only warn for unused decls internal to the translation unit.
+ if (D->getLinkage() == ExternalLinkage)
+ return false;
+
+ return true;
+}
- void Sema::MarkUnusedFileScopedDecl(const DeclaratorDecl *D) {
+void Sema::MarkUnusedFileScopedDecl(const DeclaratorDecl *D) {
if (!D)
return;
@@ -620,6 +629,9 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (D->isUsed() || D->hasAttr<UnusedAttr>())
return false;
+ if (isa<LabelDecl>(D))
+ return true;
+
// White-list anything that isn't a local variable.
if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D) ||
!D->getDeclContext()->isFunctionOrMethod())
@@ -662,16 +674,29 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
return true;
}
+/// DiagnoseUnusedDecl - Emit warnings about declarations that are not used
+/// unless they are marked attr(unused).
void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
if (!ShouldDiagnoseUnusedDecl(D))
return;
+ unsigned DiagID;
if (isa<VarDecl>(D) && cast<VarDecl>(D)->isExceptionVariable())
- Diag(D->getLocation(), diag::warn_unused_exception_param)
- << D->getDeclName();
+ DiagID = diag::warn_unused_exception_param;
+ else if (isa<LabelDecl>(D))
+ DiagID = diag::warn_unused_label;
else
- Diag(D->getLocation(), diag::warn_unused_variable)
- << D->getDeclName();
+ DiagID = diag::warn_unused_variable;
+
+ Diag(D->getLocation(), DiagID) << D->getDeclName();
+}
+
+static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
+ // Verify that we have no forward references left. If so, there was a goto
+ // or address of a label taken, but no definition of it. Label fwd
+ // definitions are indicated with a null substmt.
+ if (L->getStmt() == 0)
+ S.Diag(L->getLocation(), diag::err_undeclared_label_use) <<L->getDeclName();
}
void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
@@ -690,9 +715,13 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
if (!D->getDeclName()) continue;
// Diagnose unused variables in this scope.
- if (S->getNumErrorsAtStart() == getDiagnostics().getNumErrors())
+ if (!S->hasErrorOccurred())
DiagnoseUnusedDecl(D);
+ // If this was a forward reference to a label, verify it was defined.
+ if (LabelDecl *LD = dyn_cast<LabelDecl>(D))
+ CheckPoppedLabel(LD, *this);
+
// Remove this name from our lexical scope.
IdResolver.RemoveDecl(D);
}
@@ -769,17 +798,6 @@ Scope *Sema::getNonFieldDeclScope(Scope *S) {
return S;
}
-void Sema::InitBuiltinVaListType() {
- if (!Context.getBuiltinVaListType().isNull())
- return;
-
- IdentifierInfo *VaIdent = &Context.Idents.get("__builtin_va_list");
- NamedDecl *VaDecl = LookupSingleName(TUScope, VaIdent, SourceLocation(),
- LookupOrdinaryName, ForRedeclaration);
- TypedefDecl *VaTypedef = cast<TypedefDecl>(VaDecl);
- Context.setBuiltinVaListType(Context.getTypedefType(VaTypedef));
-}
-
/// LazilyCreateBuiltin - The specified Builtin-ID was first used at
/// file scope. lazily create a decl for it. ForRedeclaration is true
/// if we're creating this built-in in anticipation of redeclaring the
@@ -789,9 +807,6 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned bid,
SourceLocation Loc) {
Builtin::ID BID = (Builtin::ID)bid;
- if (Context.BuiltinInfo.hasVAListUse(BID))
- InitBuiltinVaListType();
-
ASTContext::GetBuiltinTypeError Error;
QualType R = Context.GetBuiltinType(BID, Error);
switch (Error) {
@@ -801,13 +816,13 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned bid,
case ASTContext::GE_Missing_stdio:
if (ForRedeclaration)
- Diag(Loc, diag::err_implicit_decl_requires_stdio)
+ Diag(Loc, diag::warn_implicit_decl_requires_stdio)
<< Context.BuiltinInfo.GetName(BID);
return 0;
case ASTContext::GE_Missing_setjmp:
if (ForRedeclaration)
- Diag(Loc, diag::err_implicit_decl_requires_setjmp)
+ Diag(Loc, diag::warn_implicit_decl_requires_setjmp)
<< Context.BuiltinInfo.GetName(BID);
return 0;
}
@@ -817,7 +832,7 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned bid,
<< Context.BuiltinInfo.GetName(BID)
<< R;
if (Context.BuiltinInfo.getHeaderName(BID) &&
- Diags.getDiagnosticLevel(diag::ext_implicit_lib_function_decl)
+ Diags.getDiagnosticLevel(diag::ext_implicit_lib_function_decl, Loc)
!= Diagnostic::Ignored)
Diag(Loc, diag::note_please_include_header)
<< Context.BuiltinInfo.getHeaderName(BID)
@@ -834,7 +849,7 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned bid,
// Create Decl objects for each parameter, adding them to the
// FunctionDecl.
- if (FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
llvm::SmallVector<ParmVarDecl*, 16> Params;
for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i)
Params.push_back(ParmVarDecl::Create(Context, New, SourceLocation(), 0,
@@ -1026,11 +1041,11 @@ static void MergeDeclAttributes(Decl *New, Decl *Old, ASTContext &C) {
// we process them.
if (!New->hasAttrs())
New->setAttrs(AttrVec());
- for (Decl::attr_iterator i = Old->attr_begin(), e = Old->attr_end(); i != e;
- ++i) {
- // FIXME: Make this more general than just checking for Overloadable.
- if (!DeclHasAttr(New, *i) && (*i)->getKind() != attr::Overloadable) {
- Attr *NewAttr = (*i)->clone(C);
+ for (specific_attr_iterator<InheritableAttr>
+ i = Old->specific_attr_begin<InheritableAttr>(),
+ e = Old->specific_attr_end<InheritableAttr>(); i != e; ++i) {
+ if (!DeclHasAttr(New, *i)) {
+ InheritableAttr *NewAttr = cast<InheritableAttr>((*i)->clone(C));
NewAttr->setInherited(true);
New->addAttr(NewAttr);
}
@@ -1061,7 +1076,8 @@ Sema::CXXSpecialMember Sema::getSpecialMember(const CXXMethodDecl *MD) {
if (isa<CXXDestructorDecl>(MD))
return Sema::CXXDestructor;
- assert(MD->isCopyAssignment() && "Must have copy assignment operator");
+ assert(MD->isCopyAssignmentOperator() &&
+ "Must have copy assignment operator");
return Sema::CXXCopyAssignment;
}
@@ -1147,15 +1163,15 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
//
// Note also that we DO NOT return at this point, because we still have
// other tests to run.
- const FunctionType *OldType = OldQType->getAs<FunctionType>();
+ const FunctionType *OldType = cast<FunctionType>(OldQType);
const FunctionType *NewType = New->getType()->getAs<FunctionType>();
- const FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
- const FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
+ FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
+ FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
+ bool RequiresAdjustment = false;
if (OldTypeInfo.getCC() != CC_Default &&
NewTypeInfo.getCC() == CC_Default) {
- NewQType = Context.getCallConvType(NewQType, OldTypeInfo.getCC());
- New->setType(NewQType);
- NewQType = Context.getCanonicalType(NewQType);
+ NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
+ RequiresAdjustment = true;
} else if (!Context.isSameCallConv(OldTypeInfo.getCC(),
NewTypeInfo.getCC())) {
// Calling conventions really aren't compatible, so complain.
@@ -1169,25 +1185,29 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
}
// FIXME: diagnose the other way around?
- if (OldType->getNoReturnAttr() && !NewType->getNoReturnAttr()) {
- NewQType = Context.getNoReturnType(NewQType);
- New->setType(NewQType);
- assert(NewQType.isCanonical());
+ if (OldTypeInfo.getNoReturn() && !NewTypeInfo.getNoReturn()) {
+ NewTypeInfo = NewTypeInfo.withNoReturn(true);
+ RequiresAdjustment = true;
}
// Merge regparm attribute.
- if (OldType->getRegParmType() != NewType->getRegParmType()) {
- if (NewType->getRegParmType()) {
+ if (OldTypeInfo.getRegParm() != NewTypeInfo.getRegParm()) {
+ if (NewTypeInfo.getRegParm()) {
Diag(New->getLocation(), diag::err_regparm_mismatch)
<< NewType->getRegParmType()
<< OldType->getRegParmType();
Diag(Old->getLocation(), diag::note_previous_declaration);
return true;
}
-
- NewQType = Context.getRegParmType(NewQType, OldType->getRegParmType());
- New->setType(NewQType);
- assert(NewQType.isCanonical());
+
+ NewTypeInfo = NewTypeInfo.withRegParm(OldTypeInfo.getRegParm());
+ RequiresAdjustment = true;
+ }
+
+ if (RequiresAdjustment) {
+ NewType = Context.adjustFunctionType(NewType, NewTypeInfo);
+ New->setType(QualType(NewType, 0));
+ NewQType = Context.getCanonicalType(New->getType());
}
if (getLangOptions().CPlusPlus) {
@@ -1195,17 +1215,19 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
// Certain function declarations cannot be overloaded:
// -- Function declarations that differ only in the return type
// cannot be overloaded.
- QualType OldReturnType
- = cast<FunctionType>(OldQType.getTypePtr())->getResultType();
- QualType NewReturnType
- = cast<FunctionType>(NewQType.getTypePtr())->getResultType();
+ QualType OldReturnType = OldType->getResultType();
+ QualType NewReturnType = cast<FunctionType>(NewQType)->getResultType();
QualType ResQT;
if (OldReturnType != NewReturnType) {
if (NewReturnType->isObjCObjectPointerType()
&& OldReturnType->isObjCObjectPointerType())
ResQT = Context.mergeObjCGCQualifiers(NewQType, OldQType);
if (ResQT.isNull()) {
- Diag(New->getLocation(), diag::err_ovl_diff_return_type);
+ if (New->isCXXClassMember() && New->isOutOfLine())
+ Diag(New->getLocation(),
+ diag::err_member_def_does_not_match_ret_type) << New;
+ else
+ Diag(New->getLocation(), diag::err_ovl_diff_return_type);
Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
return true;
}
@@ -1268,9 +1290,19 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
// (C++98 8.3.5p3):
// All declarations for a function shall agree exactly in both the
// return type and the parameter-type-list.
- // attributes should be ignored when comparing.
- if (Context.getNoReturnType(OldQType, false) ==
- Context.getNoReturnType(NewQType, false))
+ // We also want to respect all the extended bits except noreturn.
+
+ // noreturn should now match unless the old type info didn't have it.
+ QualType OldQTypeForComparison = OldQType;
+ if (!OldTypeInfo.getNoReturn() && NewTypeInfo.getNoReturn()) {
+ assert(OldQType == QualType(OldType, 0));
+ const FunctionType *OldTypeForComparison
+ = Context.adjustFunctionType(OldType, OldTypeInfo.withNoReturn(true));
+ OldQTypeForComparison = QualType(OldTypeForComparison, 0);
+ assert(OldQTypeForComparison.isCanonical());
+ }
+
+ if (OldQTypeForComparison == NewQType)
return MergeCompatibleFunctionDecls(New, Old);
// Fall through for conflicting redeclarations and redefinitions.
@@ -1292,10 +1324,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
OldProto->arg_type_end());
NewQType = Context.getFunctionType(NewFuncType->getResultType(),
ParamTypes.data(), ParamTypes.size(),
- OldProto->isVariadic(),
- OldProto->getTypeQuals(),
- false, false, 0, 0,
- OldProto->getExtInfo());
+ OldProto->getExtProtoInfo());
New->setType(NewQType);
New->setHasInheritedPrototype();
@@ -1377,9 +1406,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
New->setType(Context.getFunctionType(MergedReturn, &ArgTypes[0],
ArgTypes.size(),
- OldProto->isVariadic(), 0,
- false, false, 0, 0,
- OldProto->getExtInfo()));
+ OldProto->getExtProtoInfo()));
return MergeCompatibleFunctionDecls(New, Old);
}
@@ -1444,48 +1471,26 @@ bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old) {
return false;
}
-/// MergeVarDecl - We just parsed a variable 'New' which has the same name
-/// and scope as a previous declaration 'Old'. Figure out how to resolve this
-/// situation, merging decls or emitting diagnostics as appropriate.
+/// MergeVarDecl - We parsed a variable 'New' which has the same name and scope
+/// as a previous declaration 'Old'. Figure out how to merge their types,
+/// emitting diagnostics as appropriate.
///
-/// Tentative definition rules (C99 6.9.2p2) are checked by
-/// FinalizeDeclaratorGroup. Unfortunately, we can't analyze tentative
-/// definitions here, since the initializer hasn't been attached.
+/// Declarations using the auto type specifier (C++ [decl.spec.auto]) call back
+/// to here in AddInitializerToDecl and AddCXXDirectInitializerToDecl. We can't
+/// check them before the initializer is attached.
///
-void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
- // If the new decl is already invalid, don't do any other checking.
- if (New->isInvalidDecl())
+void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old) {
+ if (New->isInvalidDecl() || Old->isInvalidDecl())
return;
- // Verify the old decl was also a variable.
- VarDecl *Old = 0;
- if (!Previous.isSingleResult() ||
- !(Old = dyn_cast<VarDecl>(Previous.getFoundDecl()))) {
- Diag(New->getLocation(), diag::err_redefinition_different_kind)
- << New->getDeclName();
- Diag(Previous.getRepresentativeDecl()->getLocation(),
- diag::note_previous_definition);
- return New->setInvalidDecl();
- }
-
- // C++ [class.mem]p1:
- // A member shall not be declared twice in the member-specification [...]
- //
- // Here, we need only consider static data members.
- if (Old->isStaticDataMember() && !New->isOutOfLine()) {
- Diag(New->getLocation(), diag::err_duplicate_member)
- << New->getIdentifier();
- Diag(Old->getLocation(), diag::note_previous_declaration);
- New->setInvalidDecl();
- }
-
- MergeDeclAttributes(New, Old, Context);
-
- // Merge the types
QualType MergedT;
if (getLangOptions().CPlusPlus) {
- if (Context.hasSameType(New->getType(), Old->getType()))
- MergedT = New->getType();
+ AutoType *AT = New->getType()->getContainedAutoType();
+ if (AT && !AT->isDeduced()) {
+ // We don't know what the new type is until the initializer is attached.
+ return;
+ } else if (Context.hasSameType(New->getType(), Old->getType()))
+ return;
// C++ [basic.link]p10:
// [...] the types specified by all declarations referring to a given
// object or function shall be identical, except that declarations for an
@@ -1509,7 +1514,8 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
MergedT = Old->getType();
} else if (New->getType()->isObjCObjectPointerType()
&& Old->getType()->isObjCObjectPointerType()) {
- MergedT = Context.mergeObjCGCQualifiers(New->getType(), Old->getType());
+ MergedT = Context.mergeObjCGCQualifiers(New->getType(),
+ Old->getType());
}
} else {
MergedT = Context.mergeTypes(New->getType(), Old->getType());
@@ -1521,6 +1527,49 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
return New->setInvalidDecl();
}
New->setType(MergedT);
+}
+
+/// MergeVarDecl - We just parsed a variable 'New' which has the same name
+/// and scope as a previous declaration 'Old'. Figure out how to resolve this
+/// situation, merging decls or emitting diagnostics as appropriate.
+///
+/// Tentative definition rules (C99 6.9.2p2) are checked by
+/// FinalizeDeclaratorGroup. Unfortunately, we can't analyze tentative
+/// definitions here, since the initializer hasn't been attached.
+///
+void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
+ // If the new decl is already invalid, don't do any other checking.
+ if (New->isInvalidDecl())
+ return;
+
+ // Verify the old decl was also a variable.
+ VarDecl *Old = 0;
+ if (!Previous.isSingleResult() ||
+ !(Old = dyn_cast<VarDecl>(Previous.getFoundDecl()))) {
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+ Diag(Previous.getRepresentativeDecl()->getLocation(),
+ diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // C++ [class.mem]p1:
+ // A member shall not be declared twice in the member-specification [...]
+ //
+ // Here, we need only consider static data members.
+ if (Old->isStaticDataMember() && !New->isOutOfLine()) {
+ Diag(New->getLocation(), diag::err_duplicate_member)
+ << New->getIdentifier();
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ New->setInvalidDecl();
+ }
+
+ MergeDeclAttributes(New, Old, Context);
+
+ // Merge the types.
+ MergeVarDeclTypes(New, Old);
+ if (New->isInvalidDecl())
+ return;
// C99 6.2.2p4: Check if we have a static decl followed by a non-static.
if (New->getStorageClass() == SC_Static &&
@@ -1547,6 +1596,20 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
return New->setInvalidDecl();
}
+ // Check if extern is followed by non-extern and vice-versa.
+ if (New->hasExternalStorage() &&
+ !Old->hasLinkage() && Old->isLocalVarDecl()) {
+ Diag(New->getLocation(), diag::err_extern_non_extern) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+ if (Old->hasExternalStorage() &&
+ !New->hasLinkage() && New->isLocalVarDecl()) {
+ Diag(New->getLocation(), diag::err_non_extern_extern) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
// Variables with external linkage are analyzed in FinalizeDeclaratorGroup.
// FIXME: The test for external storage here seems wrong? We still
@@ -1602,7 +1665,6 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
/// no declarator (e.g. "struct foo;") is parsed.
Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS) {
- // FIXME: Error on auto/register at file scope
// FIXME: Error on inline/virtual/explicit
// FIXME: Warn on useless __thread
// FIXME: Warn on useless const/volatile
@@ -1635,15 +1697,15 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
}
if (DS.isFriendSpecified()) {
- // If we're dealing with a class template decl, assume that the
- // template routines are handling it.
- if (TagD && isa<ClassTemplateDecl>(TagD))
+ // If we're dealing with a decl but not a TagDecl, assume that
+ // whatever routines created it handled the friendship aspect.
+ if (TagD && !Tag)
return 0;
return ActOnFriendTypeDecl(S, DS, MultiTemplateParamsArg(*this, 0, 0));
}
if (RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag)) {
- ProcessDeclAttributeList(S, Record, DS.getAttributes());
+ ProcessDeclAttributeList(S, Record, DS.getAttributes().getList());
if (!Record->getDeclName() && Record->isDefinition() &&
DS.getStorageClassSpec() != DeclSpec::SCS_typedef) {
@@ -1654,12 +1716,24 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
Diag(DS.getSourceRange().getBegin(), diag::ext_no_declarators)
<< DS.getSourceRange();
}
+ }
- // Microsoft allows unnamed struct/union fields. Don't complain
- // about them.
- // FIXME: Should we support Microsoft's extensions in this area?
- if (Record->getDeclName() && getLangOptions().Microsoft)
- return Tag;
+ // Check for Microsoft C extension: anonymous struct.
+ if (getLangOptions().Microsoft && !getLangOptions().CPlusPlus &&
+ CurContext->isRecord() &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_unspecified) {
+ // Handle 2 kinds of anonymous struct:
+ // struct STRUCT;
+ // and
+ // STRUCT_TYPE; <- where STRUCT_TYPE is a typedef struct.
+ RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag);
+ if ((Record && Record->getDeclName() && !Record->isDefinition()) ||
+ (DS.getTypeSpecType() == DeclSpec::TST_typename &&
+ DS.getRepAsType().get()->isStructureType())) {
+ Diag(DS.getSourceRange().getBegin(), diag::ext_ms_anonymous_struct)
+ << DS.getSourceRange();
+ return BuildMicrosoftCAnonymousStruct(S, DS, Record);
+ }
}
if (getLangOptions().CPlusPlus &&
@@ -1688,6 +1762,25 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
return TagD;
}
+/// ActOnVlaStmt - This rouine if finds a vla expression in a decl spec.
+/// builds a statement for it and returns it so it is evaluated.
+StmtResult Sema::ActOnVlaStmt(const DeclSpec &DS) {
+ StmtResult R;
+ if (DS.getTypeSpecType() == DeclSpec::TST_typeofExpr) {
+ Expr *Exp = DS.getRepAsExpr();
+ QualType Ty = Exp->getType();
+ if (Ty->isPointerType()) {
+ do
+ Ty = Ty->getAs<PointerType>()->getPointeeType();
+ while (Ty->isPointerType());
+ }
+ if (Ty->isVariableArrayType()) {
+ R = ActOnExprStmt(MakeFullExpr(Exp));
+ }
+ }
+ return R;
+}
+
/// We are trying to inject an anonymous member into the given scope;
/// check if there's an existing declaration that can't be overloaded.
///
@@ -1707,11 +1800,10 @@ static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
// Pick a representative declaration.
NamedDecl *PrevDecl = R.getRepresentativeDecl()->getUnderlyingDecl();
- if (PrevDecl && Owner->isRecord()) {
- RecordDecl *Record = cast<RecordDecl>(Owner);
- if (!SemaRef.isDeclInScope(PrevDecl, Record, S))
- return false;
- }
+ assert(PrevDecl && "Expected a non-null Decl");
+
+ if (!SemaRef.isDeclInScope(PrevDecl, Owner, S))
+ return false;
SemaRef.Diag(NameLoc, diagnostic) << Name;
SemaRef.Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
@@ -1738,18 +1830,24 @@ static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
static bool InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S,
DeclContext *Owner,
RecordDecl *AnonRecord,
- AccessSpecifier AS) {
+ AccessSpecifier AS,
+ llvm::SmallVector<NamedDecl*, 2> &Chaining,
+ bool MSAnonStruct) {
unsigned diagKind
= AnonRecord->isUnion() ? diag::err_anonymous_union_member_redecl
: diag::err_anonymous_struct_member_redecl;
bool Invalid = false;
- for (RecordDecl::field_iterator F = AnonRecord->field_begin(),
- FEnd = AnonRecord->field_end();
- F != FEnd; ++F) {
- if ((*F)->getDeclName()) {
- if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, (*F)->getDeclName(),
- (*F)->getLocation(), diagKind)) {
+
+ // Look every FieldDecl and IndirectFieldDecl with a name.
+ for (RecordDecl::decl_iterator D = AnonRecord->decls_begin(),
+ DEnd = AnonRecord->decls_end();
+ D != DEnd; ++D) {
+ if ((isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D)) &&
+ cast<NamedDecl>(*D)->getDeclName()) {
+ ValueDecl *VD = cast<ValueDecl>(*D);
+ if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(),
+ VD->getLocation(), diagKind)) {
// C++ [class.union]p2:
// The names of the members of an anonymous union shall be
// distinct from the names of any other entity in the
@@ -1761,20 +1859,34 @@ static bool InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S,
// definition, the members of the anonymous union are
// considered to have been defined in the scope in which the
// anonymous union is declared.
- Owner->makeDeclVisibleInContext(*F);
- S->AddDecl(*F);
- SemaRef.IdResolver.AddDecl(*F);
+ unsigned OldChainingSize = Chaining.size();
+ if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD))
+ for (IndirectFieldDecl::chain_iterator PI = IF->chain_begin(),
+ PE = IF->chain_end(); PI != PE; ++PI)
+ Chaining.push_back(*PI);
+ else
+ Chaining.push_back(VD);
+
+ assert(Chaining.size() >= 2);
+ NamedDecl **NamedChain =
+ new (SemaRef.Context)NamedDecl*[Chaining.size()];
+ for (unsigned i = 0; i < Chaining.size(); i++)
+ NamedChain[i] = Chaining[i];
+
+ IndirectFieldDecl* IndirectField =
+ IndirectFieldDecl::Create(SemaRef.Context, Owner, VD->getLocation(),
+ VD->getIdentifier(), VD->getType(),
+ NamedChain, Chaining.size());
+
+ IndirectField->setAccess(AS);
+ IndirectField->setImplicit();
+ SemaRef.PushOnScopeChains(IndirectField, S);
// That includes picking up the appropriate access specifier.
- if (AS != AS_none) (*F)->setAccess(AS);
+ if (AS != AS_none) IndirectField->setAccess(AS);
+
+ Chaining.resize(OldChainingSize);
}
- } else if (const RecordType *InnerRecordType
- = (*F)->getType()->getAs<RecordType>()) {
- RecordDecl *InnerRecord = InnerRecordType->getDecl();
- if (InnerRecord->isAnonymousStructOrUnion())
- Invalid = Invalid ||
- InjectAnonymousStructOrUnionMembers(SemaRef, S, Owner,
- InnerRecord, AS);
}
}
@@ -1819,7 +1931,7 @@ StorageClassSpecToFunctionDeclStorageClass(DeclSpec::SCS StorageClassSpec) {
llvm_unreachable("unknown storage class specifier");
}
-/// ActOnAnonymousStructOrUnion - Handle the declaration of an
+/// BuildAnonymousStructOrUnion - Handle the declaration of an
/// anonymous structure or union. Anonymous unions are a C++ feature
/// (C++ [class.union]) and a GNU C extension; anonymous structures
/// are a GNU C and GNU C++ extension.
@@ -1852,7 +1964,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// Recover by adding 'static'.
DS.SetStorageClassSpec(DeclSpec::SCS_static, SourceLocation(),
- PrevSpec, DiagID);
+ PrevSpec, DiagID, getLangOptions());
}
// C++ [class.union]p3:
// A storage class is not allowed in a declaration of an
@@ -1865,7 +1977,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// Recover by removing the storage specifier.
DS.SetStorageClassSpec(DeclSpec::SCS_unspecified, SourceLocation(),
- PrevSpec, DiagID);
+ PrevSpec, DiagID, getLangOptions());
}
// C++ [class.union]p2:
@@ -1898,10 +2010,16 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
} else if (RecordDecl *MemRecord = dyn_cast<RecordDecl>(*Mem)) {
if (!MemRecord->isAnonymousStructOrUnion() &&
MemRecord->getDeclName()) {
- // This is a nested type declaration.
- Diag(MemRecord->getLocation(), diag::err_anonymous_record_with_type)
- << (int)Record->isUnion();
- Invalid = true;
+ // Visual C++ allows type definition in anonymous struct or union.
+ if (getLangOptions().Microsoft)
+ Diag(MemRecord->getLocation(), diag::ext_anonymous_record_with_type)
+ << (int)Record->isUnion();
+ else {
+ // This is a nested type declaration.
+ Diag(MemRecord->getLocation(), diag::err_anonymous_record_with_type)
+ << (int)Record->isUnion();
+ Invalid = true;
+ }
}
} else if (isa<AccessSpecDecl>(*Mem)) {
// Any access specifier is fine.
@@ -1915,9 +2033,17 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
DK = diag::err_anonymous_record_with_function;
else if (isa<VarDecl>(*Mem))
DK = diag::err_anonymous_record_with_static;
- Diag((*Mem)->getLocation(), DK)
+
+ // Visual C++ allows type definition in anonymous struct or union.
+ if (getLangOptions().Microsoft &&
+ DK == diag::err_anonymous_record_with_type)
+ Diag((*Mem)->getLocation(), diag::ext_anonymous_record_with_type)
<< (int)Record->isUnion();
+ else {
+ Diag((*Mem)->getLocation(), DK)
+ << (int)Record->isUnion();
Invalid = true;
+ }
}
}
}
@@ -1942,11 +2068,8 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
TInfo,
/*BitWidth=*/0, /*Mutable=*/false);
Anon->setAccess(AS);
- if (getLangOptions().CPlusPlus) {
+ if (getLangOptions().CPlusPlus)
FieldCollector->Add(cast<FieldDecl>(Anon));
- if (!cast<CXXRecordDecl>(Record)->isEmpty())
- cast<CXXRecordDecl>(OwningClass)->setEmpty(false);
- }
} else {
DeclSpec::SCS SCSpec = DS.getStorageClassSpec();
assert(SCSpec != DeclSpec::SCS_typedef &&
@@ -1978,7 +2101,11 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// Inject the members of the anonymous struct/union into the owning
// context and into the identifier resolver chain for name lookup
// purposes.
- if (InjectAnonymousStructOrUnionMembers(*this, S, Owner, Record, AS))
+ llvm::SmallVector<NamedDecl*, 2> Chain;
+ Chain.push_back(Anon);
+
+ if (InjectAnonymousStructOrUnionMembers(*this, S, Owner, Record, AS,
+ Chain, false))
Invalid = true;
// Mark this as an anonymous struct/union type. Note that we do not
@@ -1995,6 +2122,57 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
return Anon;
}
+/// BuildMicrosoftCAnonymousStruct - Handle the declaration of an
+/// Microsoft C anonymous structure.
+/// Ref: http://msdn.microsoft.com/en-us/library/z2cx9y4f.aspx
+/// Example:
+///
+/// struct A { int a; };
+/// struct B { struct A; int b; };
+///
+/// void foo() {
+/// B var;
+/// var.a = 3;
+/// }
+///
+Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
+ RecordDecl *Record) {
+
+ // If there is no Record, get the record via the typedef.
+ if (!Record)
+ Record = DS.getRepAsType().get()->getAsStructureType()->getDecl();
+
+ // Mock up a declarator.
+ Declarator Dc(DS, Declarator::TypeNameContext);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
+ assert(TInfo && "couldn't build declarator info for anonymous struct");
+
+ // Create a declaration for this anonymous struct.
+ NamedDecl* Anon = FieldDecl::Create(Context,
+ cast<RecordDecl>(CurContext),
+ DS.getSourceRange().getBegin(),
+ /*IdentifierInfo=*/0,
+ Context.getTypeDeclType(Record),
+ TInfo,
+ /*BitWidth=*/0, /*Mutable=*/false);
+ Anon->setImplicit();
+
+ // Add the anonymous struct object to the current context.
+ CurContext->addDecl(Anon);
+
+ // Inject the members of the anonymous struct into the current
+ // context and into the identifier resolver chain for name lookup
+ // purposes.
+ llvm::SmallVector<NamedDecl*, 2> Chain;
+ Chain.push_back(Anon);
+
+ if (InjectAnonymousStructOrUnionMembers(*this, S, CurContext,
+ Record->getDefinition(),
+ AS_none, Chain, true))
+ Anon->setInvalidDecl();
+
+ return Anon;
+}
/// GetNameForDeclarator - Determine the full declaration name for the
/// given Declarator.
@@ -2216,7 +2394,8 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
diag::err_declarator_need_ident)
<< D.getDeclSpec().getSourceRange() << D.getSourceRange();
return 0;
- }
+ } else if (DiagnoseUnexpandedParameterPack(NameInfo, UPPC_DeclarationType))
+ return 0;
// The scope passed in may not be a decl scope. Zip up the scope tree until
// we find one that is.
@@ -2228,6 +2407,10 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
if (D.getCXXScopeSpec().isInvalid())
D.setInvalidType();
else if (D.getCXXScopeSpec().isSet()) {
+ if (DiagnoseUnexpandedParameterPack(D.getCXXScopeSpec(),
+ UPPC_DeclarationQualifier))
+ return 0;
+
bool EnteringContext = !D.getDeclSpec().isFriendSpecified();
DC = computeDeclContext(D.getCXXScopeSpec(), EnteringContext);
if (!DC) {
@@ -2248,11 +2431,30 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
RequireCompleteDeclContext(D.getCXXScopeSpec(), DC))
return 0;
- if (isa<CXXRecordDecl>(DC) && !cast<CXXRecordDecl>(DC)->hasDefinition()) {
- Diag(D.getIdentifierLoc(),
- diag::err_member_def_undefined_record)
- << Name << DC << D.getCXXScopeSpec().getRange();
- D.setInvalidType();
+ if (isa<CXXRecordDecl>(DC)) {
+ if (!cast<CXXRecordDecl>(DC)->hasDefinition()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_member_def_undefined_record)
+ << Name << DC << D.getCXXScopeSpec().getRange();
+ D.setInvalidType();
+ } else if (isa<CXXRecordDecl>(CurContext) &&
+ !D.getDeclSpec().isFriendSpecified()) {
+ // The user provided a superfluous scope specifier inside a class
+ // definition:
+ //
+ // class X {
+ // void X::f();
+ // };
+ if (CurContext->Equals(DC))
+ Diag(D.getIdentifierLoc(), diag::warn_member_extra_qualification)
+ << Name << FixItHint::CreateRemoval(D.getCXXScopeSpec().getRange());
+ else
+ Diag(D.getIdentifierLoc(), diag::err_member_qualification)
+ << Name << D.getCXXScopeSpec().getRange();
+
+ // Pretend that this qualifier was not here.
+ D.getCXXScopeSpec().clear();
+ }
}
// Check whether we need to rebuild the type of the given
@@ -2264,12 +2466,33 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
D.setInvalidType();
}
}
-
+
+ // C++ [class.mem]p13:
+ // If T is the name of a class, then each of the following shall have a
+ // name different from T:
+ // - every static data member of class T;
+ // - every member function of class T
+ // - every member of class T that is itself a type;
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getIdentifier() && Record->getDeclName() == Name) {
+ Diag(D.getIdentifierLoc(), diag::err_member_name_of_class)
+ << Name;
+
+ // If this is a typedef, we'll end up spewing multiple diagnostics.
+ // Just return early; it's safer.
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ return 0;
+ }
+
NamedDecl *New;
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType R = TInfo->getType();
+ if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_DeclarationType))
+ D.setInvalidType();
+
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
ForRedeclaration);
@@ -2425,7 +2648,16 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T,
Oversized);
if (FixedType.isNull()) return FixedType;
FixedType = Context.getPointerType(FixedType);
- return Qs.apply(FixedType);
+ return Qs.apply(Context, FixedType);
+ }
+ if (const ParenType* PTy = dyn_cast<ParenType>(Ty)) {
+ QualType Inner = PTy->getInnerType();
+ QualType FixedType =
+ TryToFixInvalidVariablyModifiedType(Inner, Context, SizeIsNegative,
+ Oversized);
+ if (FixedType.isNull()) return FixedType;
+ FixedType = Context.getParenType(FixedType);
+ return Qs.apply(Context, FixedType);
}
const VariableArrayType* VLATy = dyn_cast<VariableArrayType>(T);
@@ -2595,6 +2827,8 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
Context.setjmp_bufDecl(NewTD);
else if (II->isStr("sigjmp_buf"))
Context.setsigjmp_bufDecl(NewTD);
+ else if (II->isStr("__builtin_va_list"))
+ Context.setBuiltinVaListType(Context.getTypedefType(NewTD));
}
return NewTD;
@@ -2666,7 +2900,7 @@ static void SetNestedNameSpecifier(DeclaratorDecl *DD, Declarator &D) {
}
NamedDecl*
-Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
QualType R, TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
@@ -2715,73 +2949,99 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
D.setInvalidType();
}
}
- if (DC->isRecord() && !CurContext->isRecord()) {
- // This is an out-of-line definition of a static data member.
+
+ bool isExplicitSpecialization = false;
+ VarDecl *NewVD;
+ if (!getLangOptions().CPlusPlus) {
+ NewVD = VarDecl::Create(Context, DC, D.getIdentifierLoc(),
+ II, R, TInfo, SC, SCAsWritten);
+
+ if (D.isInvalidType())
+ NewVD->setInvalidDecl();
+ } else {
+ if (DC->isRecord() && !CurContext->isRecord()) {
+ // This is an out-of-line definition of a static data member.
+ if (SC == SC_Static) {
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_out_of_line)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ } else if (SC == SC_None)
+ SC = SC_Static;
+ }
if (SC == SC_Static) {
- Diag(D.getDeclSpec().getStorageClassSpecLoc(),
- diag::err_static_out_of_line)
- << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
- } else if (SC == SC_None)
- SC = SC_Static;
- }
- if (SC == SC_Static) {
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
- if (RD->isLocalClass())
- Diag(D.getIdentifierLoc(),
- diag::err_static_data_member_not_allowed_in_local_class)
- << Name << RD->getDeclName();
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
+ if (RD->isLocalClass())
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_local_class)
+ << Name << RD->getDeclName();
+
+ // C++ [class.union]p1: If a union contains a static data member,
+ // the program is ill-formed.
+ //
+ // We also disallow static data members in anonymous structs.
+ if (CurContext->isRecord() && (RD->isUnion() || !RD->getDeclName()))
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_union_or_anon_struct)
+ << Name << RD->isUnion();
+ }
}
- }
- // Match up the template parameter lists with the scope specifier, then
- // determine whether we have a template or a template specialization.
- bool isExplicitSpecialization = false;
- unsigned NumMatchedTemplateParamLists = TemplateParamLists.size();
- bool Invalid = false;
- if (TemplateParameterList *TemplateParams
+ // Match up the template parameter lists with the scope specifier, then
+ // determine whether we have a template or a template specialization.
+ isExplicitSpecialization = false;
+ unsigned NumMatchedTemplateParamLists = TemplateParamLists.size();
+ bool Invalid = false;
+ if (TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(
- D.getDeclSpec().getSourceRange().getBegin(),
+ D.getDeclSpec().getSourceRange().getBegin(),
D.getCXXScopeSpec(),
- (TemplateParameterList**)TemplateParamLists.get(),
- TemplateParamLists.size(),
+ TemplateParamLists.get(),
+ TemplateParamLists.size(),
/*never a friend*/ false,
isExplicitSpecialization,
Invalid)) {
- // All but one template parameter lists have been matching.
- --NumMatchedTemplateParamLists;
-
- if (TemplateParams->size() > 0) {
- // There is no such thing as a variable template.
- Diag(D.getIdentifierLoc(), diag::err_template_variable)
- << II
- << SourceRange(TemplateParams->getTemplateLoc(),
- TemplateParams->getRAngleLoc());
- return 0;
- } else {
- // There is an extraneous 'template<>' for this variable. Complain
- // about it, but allow the declaration of the variable.
- Diag(TemplateParams->getTemplateLoc(),
- diag::err_template_variable_noparams)
- << II
- << SourceRange(TemplateParams->getTemplateLoc(),
- TemplateParams->getRAngleLoc());
+ // All but one template parameter lists have been matching.
+ --NumMatchedTemplateParamLists;
+
+ if (TemplateParams->size() > 0) {
+ // There is no such thing as a variable template.
+ Diag(D.getIdentifierLoc(), diag::err_template_variable)
+ << II
+ << SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc());
+ return 0;
+ } else {
+ // There is an extraneous 'template<>' for this variable. Complain
+ // about it, but allow the declaration of the variable.
+ Diag(TemplateParams->getTemplateLoc(),
+ diag::err_template_variable_noparams)
+ << II
+ << SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc());
- isExplicitSpecialization = true;
+ isExplicitSpecialization = true;
+ }
}
- }
- VarDecl *NewVD = VarDecl::Create(Context, DC, D.getIdentifierLoc(),
- II, R, TInfo, SC, SCAsWritten);
+ NewVD = VarDecl::Create(Context, DC, D.getIdentifierLoc(),
+ II, R, TInfo, SC, SCAsWritten);
- if (D.isInvalidType() || Invalid)
- NewVD->setInvalidDecl();
+ // If this decl has an auto type in need of deduction, mark the VarDecl so
+ // we can diagnose uses of it in its own initializer.
+ if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto) {
+ NewVD->setParsingAutoInit(R->getContainedAutoType());
+ }
+
+ if (D.isInvalidType() || Invalid)
+ NewVD->setInvalidDecl();
- SetNestedNameSpecifier(NewVD, D);
+ SetNestedNameSpecifier(NewVD, D);
- if (NumMatchedTemplateParamLists > 0 && D.getCXXScopeSpec().isSet()) {
- NewVD->setTemplateParameterListsInfo(Context,
- NumMatchedTemplateParamLists,
- (TemplateParameterList**)TemplateParamLists.release());
+ if (NumMatchedTemplateParamLists > 0 && D.getCXXScopeSpec().isSet()) {
+ NewVD->setTemplateParameterListsInfo(Context,
+ NumMatchedTemplateParamLists,
+ TemplateParamLists.release());
+ }
}
if (D.getDeclSpec().isThreadSpecified()) {
@@ -2801,11 +3061,29 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
ProcessDeclAttributes(S, NewVD, D);
// Handle GNU asm-label extension (encoded as an attribute).
- if (Expr *E = (Expr*) D.getAsmLabel()) {
+ if (Expr *E = (Expr*)D.getAsmLabel()) {
// The parser guarantees this is a string.
StringLiteral *SE = cast<StringLiteral>(E);
- NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0),
- Context, SE->getString()));
+ llvm::StringRef Label = SE->getString();
+ if (S->getFnParent() != 0) {
+ switch (SC) {
+ case SC_None:
+ case SC_Auto:
+ Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
+ break;
+ case SC_Register:
+ if (!Context.Target.isValidGCCRegisterName(Label))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ break;
+ case SC_Static:
+ case SC_Extern:
+ case SC_PrivateExtern:
+ break;
+ }
+ }
+
+ NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0),
+ Context, Label));
}
// Diagnose shadowed variables before filtering for scope.
@@ -2817,33 +3095,37 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// declaration has linkage).
FilterLookupForScope(*this, Previous, DC, S, NewVD->hasLinkage());
- // Merge the decl with the existing one if appropriate.
- if (!Previous.empty()) {
- if (Previous.isSingleResult() &&
- isa<FieldDecl>(Previous.getFoundDecl()) &&
- D.getCXXScopeSpec().isSet()) {
- // The user tried to define a non-static data member
- // out-of-line (C++ [dcl.meaning]p1).
- Diag(NewVD->getLocation(), diag::err_nonstatic_member_out_of_line)
+ if (!getLangOptions().CPlusPlus)
+ CheckVariableDeclaration(NewVD, Previous, Redeclaration);
+ else {
+ // Merge the decl with the existing one if appropriate.
+ if (!Previous.empty()) {
+ if (Previous.isSingleResult() &&
+ isa<FieldDecl>(Previous.getFoundDecl()) &&
+ D.getCXXScopeSpec().isSet()) {
+ // The user tried to define a non-static data member
+ // out-of-line (C++ [dcl.meaning]p1).
+ Diag(NewVD->getLocation(), diag::err_nonstatic_member_out_of_line)
+ << D.getCXXScopeSpec().getRange();
+ Previous.clear();
+ NewVD->setInvalidDecl();
+ }
+ } else if (D.getCXXScopeSpec().isSet()) {
+ // No previous declaration in the qualifying scope.
+ Diag(D.getIdentifierLoc(), diag::err_no_member)
+ << Name << computeDeclContext(D.getCXXScopeSpec(), true)
<< D.getCXXScopeSpec().getRange();
- Previous.clear();
NewVD->setInvalidDecl();
}
- } else if (D.getCXXScopeSpec().isSet()) {
- // No previous declaration in the qualifying scope.
- Diag(D.getIdentifierLoc(), diag::err_no_member)
- << Name << computeDeclContext(D.getCXXScopeSpec(), true)
- << D.getCXXScopeSpec().getRange();
- NewVD->setInvalidDecl();
- }
-
- CheckVariableDeclaration(NewVD, Previous, Redeclaration);
- // This is an explicit specialization of a static data member. Check it.
- if (isExplicitSpecialization && !NewVD->isInvalidDecl() &&
- CheckMemberSpecialization(NewVD, Previous))
- NewVD->setInvalidDecl();
+ CheckVariableDeclaration(NewVD, Previous, Redeclaration);
+ // This is an explicit specialization of a static data member. Check it.
+ if (isExplicitSpecialization && !NewVD->isInvalidDecl() &&
+ CheckMemberSpecialization(NewVD, Previous))
+ NewVD->setInvalidDecl();
+ }
+
// attributes declared post-definition are currently ignored
// FIXME: This should be handled in attribute merging, not
// here.
@@ -2866,7 +3148,7 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// member, set the visibility of this variable.
if (NewVD->getLinkage() == ExternalLinkage && !DC->isRecord())
AddPushedVisibilityAttribute(NewVD);
-
+
MarkUnusedFileScopedDecl(NewVD);
return NewVD;
@@ -2883,13 +3165,13 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
///
void Sema::CheckShadow(Scope *S, VarDecl *D, const LookupResult& R) {
// Return if warning is ignored.
- if (Diags.getDiagnosticLevel(diag::warn_decl_shadow) == Diagnostic::Ignored)
+ if (Diags.getDiagnosticLevel(diag::warn_decl_shadow, R.getNameLoc()) ==
+ Diagnostic::Ignored)
return;
- // Don't diagnose declarations at file scope. The scope might not
- // have a DeclContext if (e.g.) we're parsing a function prototype.
- DeclContext *NewDC = static_cast<DeclContext*>(S->getEntity());
- if (NewDC && NewDC->isFileContext())
+ // Don't diagnose declarations at file scope.
+ DeclContext *NewDC = D->getDeclContext();
+ if (NewDC->isFileContext())
return;
// Only diagnose if we're shadowing an unambiguous field or variable.
@@ -2900,6 +3182,36 @@ void Sema::CheckShadow(Scope *S, VarDecl *D, const LookupResult& R) {
if (!isa<VarDecl>(ShadowedDecl) && !isa<FieldDecl>(ShadowedDecl))
return;
+ // Fields are not shadowed by variables in C++ static methods.
+ if (isa<FieldDecl>(ShadowedDecl))
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewDC))
+ if (MD->isStatic())
+ return;
+
+ if (VarDecl *shadowedVar = dyn_cast<VarDecl>(ShadowedDecl))
+ if (shadowedVar->isExternC()) {
+ // Don't warn for this case:
+ //
+ // @code
+ // extern int bob;
+ // void f() {
+ // extern int bob;
+ // }
+ // @endcode
+ if (D->isExternC())
+ return;
+
+ // For shadowing external vars, make sure that we point to the global
+ // declaration, not a locally scoped extern declaration.
+ for (VarDecl::redecl_iterator
+ I = shadowedVar->redecls_begin(), E = shadowedVar->redecls_end();
+ I != E; ++I)
+ if (I->isFileVarDecl()) {
+ ShadowedDecl = *I;
+ break;
+ }
+ }
+
DeclContext *OldDC = ShadowedDecl->getDeclContext();
// Only warn about certain kinds of shadowing for class members.
@@ -2937,6 +3249,10 @@ void Sema::CheckShadow(Scope *S, VarDecl *D, const LookupResult& R) {
/// \brief Check -Wshadow without the advantage of a previous lookup.
void Sema::CheckShadow(Scope *S, VarDecl *D) {
+ if (Diags.getDiagnosticLevel(diag::warn_decl_shadow, D->getLocation()) ==
+ Diagnostic::Ignored)
+ return;
+
LookupResult R(*this, D->getDeclName(), D->getLocation(),
Sema::LookupOrdinaryName, Sema::ForRedeclaration);
LookupName(R, S);
@@ -2971,7 +3287,7 @@ void Sema::CheckVariableDeclaration(VarDecl *NewVD,
// This includes arrays of objects with address space qualifiers, but not
// automatic variables that point to other address spaces.
// ISO/IEC TR 18037 S5.1.2
- if (NewVD->hasLocalStorage() && (T.getAddressSpace() != 0)) {
+ if (NewVD->hasLocalStorage() && T.getAddressSpace() != 0) {
Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl);
return NewVD->setInvalidDecl();
}
@@ -2979,7 +3295,7 @@ void Sema::CheckVariableDeclaration(VarDecl *NewVD,
if (NewVD->hasLocalStorage() && T.isObjCGCWeak()
&& !NewVD->hasAttr<BlocksAttr>())
Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local);
-
+
bool isVM = T->isVariablyModifiedType();
if (isVM || NewVD->hasAttr<CleanupAttr>() ||
NewVD->hasAttr<BlocksAttr>())
@@ -3115,23 +3431,42 @@ static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier,
/// AddOverriddenMethods - See if a method overrides any in the base classes,
/// and if so, check that it's a valid override and remember it.
-void Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
+bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
// Look for virtual methods in base classes that this method might override.
CXXBasePaths Paths;
FindOverriddenMethodData Data;
Data.Method = MD;
Data.S = this;
+ bool AddedAny = false;
if (DC->lookupInBases(&FindOverriddenMethod, &Data, Paths)) {
for (CXXBasePaths::decl_iterator I = Paths.found_decls_begin(),
E = Paths.found_decls_end(); I != E; ++I) {
if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(*I)) {
if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
!CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
- !CheckOverridingFunctionAttributes(MD, OldMD))
+ !CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
MD->addOverriddenMethod(OldMD->getCanonicalDecl());
+ AddedAny = true;
+ }
}
}
}
+
+ return AddedAny;
+}
+
+static void DiagnoseInvalidRedeclaration(Sema &S, FunctionDecl *NewFD) {
+ LookupResult Prev(S, NewFD->getDeclName(), NewFD->getLocation(),
+ Sema::LookupOrdinaryName, Sema::ForRedeclaration);
+ S.LookupQualifiedName(Prev, NewFD->getDeclContext());
+ assert(!Prev.isAmbiguous() &&
+ "Cannot have an ambiguity in previous-declaration lookup");
+ for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
+ Func != FuncEnd; ++Func) {
+ if (isa<FunctionDecl>(*Func) &&
+ isNearlyMatchingFunction(S.Context, cast<FunctionDecl>(*Func), NewFD))
+ S.Diag((*Func)->getLocation(), diag::note_member_def_close_match);
+ }
}
NamedDecl*
@@ -3171,315 +3506,336 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
SC = SC_Static;
break;
}
- case DeclSpec::SCS_private_extern: SC = SC_PrivateExtern;break;
+ case DeclSpec::SCS_private_extern: SC = SC_PrivateExtern; break;
}
if (D.getDeclSpec().isThreadSpecified())
Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
- bool isFriend = D.getDeclSpec().isFriendSpecified();
- bool isInline = D.getDeclSpec().isInlineSpecified();
- bool isVirtual = D.getDeclSpec().isVirtualSpecified();
- bool isExplicit = D.getDeclSpec().isExplicitSpecified();
-
- DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpecAsWritten();
- FunctionDecl::StorageClass SCAsWritten
- = StorageClassSpecToFunctionDeclStorageClass(SCSpec);
-
- // Check that the return type is not an abstract class type.
- // For record types, this is done by the AbstractClassUsageDiagnoser once
- // the class has been completely parsed.
- if (!DC->isRecord() &&
- RequireNonAbstractType(D.getIdentifierLoc(),
- R->getAs<FunctionType>()->getResultType(),
- diag::err_abstract_type_in_decl,
- AbstractReturnType))
- D.setInvalidType();
-
// Do not allow returning a objc interface by-value.
if (R->getAs<FunctionType>()->getResultType()->isObjCObjectType()) {
Diag(D.getIdentifierLoc(),
diag::err_object_cannot_be_passed_returned_by_value) << 0
- << R->getAs<FunctionType>()->getResultType();
+ << R->getAs<FunctionType>()->getResultType();
D.setInvalidType();
}
-
- bool isVirtualOkay = false;
+
FunctionDecl *NewFD;
-
- if (isFriend) {
- // C++ [class.friend]p5
- // A function can be defined in a friend declaration of a
- // class . . . . Such a function is implicitly inline.
- isInline |= IsFunctionDefinition;
- }
-
- if (Name.getNameKind() == DeclarationName::CXXConstructorName) {
- // This is a C++ constructor declaration.
- assert(DC->isRecord() &&
- "Constructors can only be declared in a member context");
-
- R = CheckConstructorDeclarator(D, R, SC);
-
- // Create the new declaration
- NewFD = CXXConstructorDecl::Create(Context,
- cast<CXXRecordDecl>(DC),
- NameInfo, R, TInfo,
- isExplicit, isInline,
- /*isImplicitlyDeclared=*/false);
- } else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
- // This is a C++ destructor declaration.
- if (DC->isRecord()) {
- R = CheckDestructorDeclarator(D, R, SC);
-
- NewFD = CXXDestructorDecl::Create(Context,
- cast<CXXRecordDecl>(DC),
- NameInfo, R,
- isInline,
- /*isImplicitlyDeclared=*/false);
- NewFD->setTypeSourceInfo(TInfo);
-
- isVirtualOkay = true;
- } else {
- Diag(D.getIdentifierLoc(), diag::err_destructor_not_member);
-
- // Create a FunctionDecl to satisfy the function definition parsing
- // code path.
- NewFD = FunctionDecl::Create(Context, DC, D.getIdentifierLoc(),
- Name, R, TInfo, SC, SCAsWritten, isInline,
- /*hasPrototype=*/true);
- D.setInvalidType();
- }
- } else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
- if (!DC->isRecord()) {
- Diag(D.getIdentifierLoc(),
- diag::err_conv_function_not_member);
- return 0;
- }
-
- CheckConversionDeclarator(D, R, SC);
- NewFD = CXXConversionDecl::Create(Context, cast<CXXRecordDecl>(DC),
- NameInfo, R, TInfo,
- isInline, isExplicit);
-
- isVirtualOkay = true;
- } else if (DC->isRecord()) {
- // If the of the function is the same as the name of the record, then this
- // must be an invalid constructor that has a return type.
- // (The parser checks for a return type and makes the declarator a
- // constructor if it has no return type).
- // must have an invalid constructor that has a return type
- if (Name.getAsIdentifierInfo() &&
- Name.getAsIdentifierInfo() == cast<CXXRecordDecl>(DC)->getIdentifier()){
- Diag(D.getIdentifierLoc(), diag::err_constructor_return_type)
- << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
- << SourceRange(D.getIdentifierLoc());
- return 0;
- }
-
- bool isStatic = SC == SC_Static;
-
- // [class.free]p1:
- // Any allocation function for a class T is a static member
- // (even if not explicitly declared static).
- if (Name.getCXXOverloadedOperator() == OO_New ||
- Name.getCXXOverloadedOperator() == OO_Array_New)
- isStatic = true;
-
- // [class.free]p6 Any deallocation function for a class X is a static member
- // (even if not explicitly declared static).
- if (Name.getCXXOverloadedOperator() == OO_Delete ||
- Name.getCXXOverloadedOperator() == OO_Array_Delete)
- isStatic = true;
-
- // This is a C++ method declaration.
- NewFD = CXXMethodDecl::Create(Context, cast<CXXRecordDecl>(DC),
- NameInfo, R, TInfo,
- isStatic, SCAsWritten, isInline);
-
- isVirtualOkay = !isStatic;
- } else {
+ bool isInline = D.getDeclSpec().isInlineSpecified();
+ bool isFriend = false;
+ DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpecAsWritten();
+ FunctionDecl::StorageClass SCAsWritten
+ = StorageClassSpecToFunctionDeclStorageClass(SCSpec);
+ FunctionTemplateDecl *FunctionTemplate = 0;
+ bool isExplicitSpecialization = false;
+ bool isFunctionTemplateSpecialization = false;
+ unsigned NumMatchedTemplateParamLists = 0;
+
+ if (!getLangOptions().CPlusPlus) {
// Determine whether the function was written with a
// prototype. This true when:
- // - we're in C++ (where every function has a prototype),
// - there is a prototype in the declarator, or
// - the type R of the function is some kind of typedef or other reference
// to a type name (which eventually refers to a function type).
bool HasPrototype =
- getLangOptions().CPlusPlus ||
- (D.getNumTypeObjects() && D.getTypeObject(0).Fun.hasPrototype) ||
- (!isa<FunctionType>(R.getTypePtr()) && R->isFunctionProtoType());
-
+ (D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
+ (!isa<FunctionType>(R.getTypePtr()) && R->isFunctionProtoType());
+
NewFD = FunctionDecl::Create(Context, DC,
NameInfo, R, TInfo, SC, SCAsWritten, isInline,
HasPrototype);
- }
-
- if (D.isInvalidType())
- NewFD->setInvalidDecl();
+ if (D.isInvalidType())
+ NewFD->setInvalidDecl();
+
+ // Set the lexical context.
+ NewFD->setLexicalDeclContext(CurContext);
+ // Filter out previous declarations that don't match the scope.
+ FilterLookupForScope(*this, Previous, DC, S, NewFD->hasLinkage());
+ } else {
+ isFriend = D.getDeclSpec().isFriendSpecified();
+ bool isVirtual = D.getDeclSpec().isVirtualSpecified();
+ bool isExplicit = D.getDeclSpec().isExplicitSpecified();
+ bool isVirtualOkay = false;
+
+ // Check that the return type is not an abstract class type.
+ // For record types, this is done by the AbstractClassUsageDiagnoser once
+ // the class has been completely parsed.
+ if (!DC->isRecord() &&
+ RequireNonAbstractType(D.getIdentifierLoc(),
+ R->getAs<FunctionType>()->getResultType(),
+ diag::err_abstract_type_in_decl,
+ AbstractReturnType))
+ D.setInvalidType();
- SetNestedNameSpecifier(NewFD, D);
- // Set the lexical context. If the declarator has a C++
- // scope specifier, or is the object of a friend declaration, the
- // lexical context will be different from the semantic context.
- NewFD->setLexicalDeclContext(CurContext);
+ if (isFriend) {
+ // C++ [class.friend]p5
+ // A function can be defined in a friend declaration of a
+ // class . . . . Such a function is implicitly inline.
+ isInline |= IsFunctionDefinition;
+ }
+
+ if (Name.getNameKind() == DeclarationName::CXXConstructorName) {
+ // This is a C++ constructor declaration.
+ assert(DC->isRecord() &&
+ "Constructors can only be declared in a member context");
+
+ R = CheckConstructorDeclarator(D, R, SC);
+
+ // Create the new declaration
+ NewFD = CXXConstructorDecl::Create(Context,
+ cast<CXXRecordDecl>(DC),
+ NameInfo, R, TInfo,
+ isExplicit, isInline,
+ /*isImplicitlyDeclared=*/false);
+ } else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
+ // This is a C++ destructor declaration.
+ if (DC->isRecord()) {
+ R = CheckDestructorDeclarator(D, R, SC);
+
+ NewFD = CXXDestructorDecl::Create(Context,
+ cast<CXXRecordDecl>(DC),
+ NameInfo, R, TInfo,
+ isInline,
+ /*isImplicitlyDeclared=*/false);
+ isVirtualOkay = true;
+ } else {
+ Diag(D.getIdentifierLoc(), diag::err_destructor_not_member);
- // Match up the template parameter lists with the scope specifier, then
- // determine whether we have a template or a template specialization.
- FunctionTemplateDecl *FunctionTemplate = 0;
- bool isExplicitSpecialization = false;
- bool isFunctionTemplateSpecialization = false;
- unsigned NumMatchedTemplateParamLists = TemplateParamLists.size();
- bool Invalid = false;
- if (TemplateParameterList *TemplateParams
- = MatchTemplateParametersToScopeSpecifier(
- D.getDeclSpec().getSourceRange().getBegin(),
- D.getCXXScopeSpec(),
- (TemplateParameterList**)TemplateParamLists.get(),
- TemplateParamLists.size(),
- isFriend,
- isExplicitSpecialization,
- Invalid)) {
- // All but one template parameter lists have been matching.
- --NumMatchedTemplateParamLists;
+ // Create a FunctionDecl to satisfy the function definition parsing
+ // code path.
+ NewFD = FunctionDecl::Create(Context, DC, D.getIdentifierLoc(),
+ Name, R, TInfo, SC, SCAsWritten, isInline,
+ /*hasPrototype=*/true);
+ D.setInvalidType();
+ }
+ } else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
+ if (!DC->isRecord()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_conv_function_not_member);
+ return 0;
+ }
- if (TemplateParams->size() > 0) {
- // This is a function template
+ CheckConversionDeclarator(D, R, SC);
+ NewFD = CXXConversionDecl::Create(Context, cast<CXXRecordDecl>(DC),
+ NameInfo, R, TInfo,
+ isInline, isExplicit);
- // Check that we can declare a template here.
- if (CheckTemplateDeclScope(S, TemplateParams))
+ isVirtualOkay = true;
+ } else if (DC->isRecord()) {
+ // If the of the function is the same as the name of the record, then this
+ // must be an invalid constructor that has a return type.
+ // (The parser checks for a return type and makes the declarator a
+ // constructor if it has no return type).
+ // must have an invalid constructor that has a return type
+ if (Name.getAsIdentifierInfo() &&
+ Name.getAsIdentifierInfo() == cast<CXXRecordDecl>(DC)->getIdentifier()){
+ Diag(D.getIdentifierLoc(), diag::err_constructor_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
return 0;
+ }
+
+ bool isStatic = SC == SC_Static;
+
+ // [class.free]p1:
+ // Any allocation function for a class T is a static member
+ // (even if not explicitly declared static).
+ if (Name.getCXXOverloadedOperator() == OO_New ||
+ Name.getCXXOverloadedOperator() == OO_Array_New)
+ isStatic = true;
+
+ // [class.free]p6 Any deallocation function for a class X is a static member
+ // (even if not explicitly declared static).
+ if (Name.getCXXOverloadedOperator() == OO_Delete ||
+ Name.getCXXOverloadedOperator() == OO_Array_Delete)
+ isStatic = true;
+
+ // This is a C++ method declaration.
+ NewFD = CXXMethodDecl::Create(Context, cast<CXXRecordDecl>(DC),
+ NameInfo, R, TInfo,
+ isStatic, SCAsWritten, isInline);
- FunctionTemplate = FunctionTemplateDecl::Create(Context, DC,
+ isVirtualOkay = !isStatic;
+ } else {
+ // Determine whether the function was written with a
+ // prototype. This true when:
+ // - we're in C++ (where every function has a prototype),
+ NewFD = FunctionDecl::Create(Context, DC,
+ NameInfo, R, TInfo, SC, SCAsWritten, isInline,
+ true/*HasPrototype*/);
+ }
+ SetNestedNameSpecifier(NewFD, D);
+ isExplicitSpecialization = false;
+ isFunctionTemplateSpecialization = false;
+ NumMatchedTemplateParamLists = TemplateParamLists.size();
+ if (D.isInvalidType())
+ NewFD->setInvalidDecl();
+
+ // Set the lexical context. If the declarator has a C++
+ // scope specifier, or is the object of a friend declaration, the
+ // lexical context will be different from the semantic context.
+ NewFD->setLexicalDeclContext(CurContext);
+
+ // Match up the template parameter lists with the scope specifier, then
+ // determine whether we have a template or a template specialization.
+ bool Invalid = false;
+ if (TemplateParameterList *TemplateParams
+ = MatchTemplateParametersToScopeSpecifier(
+ D.getDeclSpec().getSourceRange().getBegin(),
+ D.getCXXScopeSpec(),
+ TemplateParamLists.get(),
+ TemplateParamLists.size(),
+ isFriend,
+ isExplicitSpecialization,
+ Invalid)) {
+ // All but one template parameter lists have been matching.
+ --NumMatchedTemplateParamLists;
+
+ if (TemplateParams->size() > 0) {
+ // This is a function template
+
+ // Check that we can declare a template here.
+ if (CheckTemplateDeclScope(S, TemplateParams))
+ return 0;
+
+ FunctionTemplate = FunctionTemplateDecl::Create(Context, DC,
NewFD->getLocation(),
Name, TemplateParams,
NewFD);
- FunctionTemplate->setLexicalDeclContext(CurContext);
- NewFD->setDescribedFunctionTemplate(FunctionTemplate);
- } else {
- // This is a function template specialization.
- isFunctionTemplateSpecialization = true;
-
- // C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);".
- if (isFriend && isFunctionTemplateSpecialization) {
- // We want to remove the "template<>", found here.
- SourceRange RemoveRange = TemplateParams->getSourceRange();
-
- // If we remove the template<> and the name is not a
- // template-id, we're actually silently creating a problem:
- // the friend declaration will refer to an untemplated decl,
- // and clearly the user wants a template specialization. So
- // we need to insert '<>' after the name.
- SourceLocation InsertLoc;
- if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
- InsertLoc = D.getName().getSourceRange().getEnd();
- InsertLoc = PP.getLocForEndOfToken(InsertLoc);
+ FunctionTemplate->setLexicalDeclContext(CurContext);
+ NewFD->setDescribedFunctionTemplate(FunctionTemplate);
+ } else {
+ // This is a function template specialization.
+ isFunctionTemplateSpecialization = true;
+
+ // C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);".
+ if (isFriend && isFunctionTemplateSpecialization) {
+ // We want to remove the "template<>", found here.
+ SourceRange RemoveRange = TemplateParams->getSourceRange();
+
+ // If we remove the template<> and the name is not a
+ // template-id, we're actually silently creating a problem:
+ // the friend declaration will refer to an untemplated decl,
+ // and clearly the user wants a template specialization. So
+ // we need to insert '<>' after the name.
+ SourceLocation InsertLoc;
+ if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ InsertLoc = D.getName().getSourceRange().getEnd();
+ InsertLoc = PP.getLocForEndOfToken(InsertLoc);
+ }
+
+ Diag(D.getIdentifierLoc(), diag::err_template_spec_decl_friend)
+ << Name << RemoveRange
+ << FixItHint::CreateRemoval(RemoveRange)
+ << FixItHint::CreateInsertion(InsertLoc, "<>");
+ }
+ }
}
- Diag(D.getIdentifierLoc(), diag::err_template_spec_decl_friend)
- << Name << RemoveRange
- << FixItHint::CreateRemoval(RemoveRange)
- << FixItHint::CreateInsertion(InsertLoc, "<>");
- }
+ if (NumMatchedTemplateParamLists > 0 && D.getCXXScopeSpec().isSet()) {
+ NewFD->setTemplateParameterListsInfo(Context,
+ NumMatchedTemplateParamLists,
+ TemplateParamLists.release());
}
- }
- if (NumMatchedTemplateParamLists > 0 && D.getCXXScopeSpec().isSet()) {
- NewFD->setTemplateParameterListsInfo(Context,
- NumMatchedTemplateParamLists,
- (TemplateParameterList**)TemplateParamLists.release());
- }
-
- if (Invalid) {
- NewFD->setInvalidDecl();
- if (FunctionTemplate)
- FunctionTemplate->setInvalidDecl();
- }
+ if (Invalid) {
+ NewFD->setInvalidDecl();
+ if (FunctionTemplate)
+ FunctionTemplate->setInvalidDecl();
+ }
- // C++ [dcl.fct.spec]p5:
- // The virtual specifier shall only be used in declarations of
- // nonstatic class member functions that appear within a
- // member-specification of a class declaration; see 10.3.
- //
- if (isVirtual && !NewFD->isInvalidDecl()) {
- if (!isVirtualOkay) {
- Diag(D.getDeclSpec().getVirtualSpecLoc(),
- diag::err_virtual_non_function);
- } else if (!CurContext->isRecord()) {
- // 'virtual' was specified outside of the class.
- Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_out_of_class)
- << FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
- } else {
- // Okay: Add virtual to the method.
- CXXRecordDecl *CurClass = cast<CXXRecordDecl>(DC);
- CurClass->setMethodAsVirtual(NewFD);
+ // C++ [dcl.fct.spec]p5:
+ // The virtual specifier shall only be used in declarations of
+ // nonstatic class member functions that appear within a
+ // member-specification of a class declaration; see 10.3.
+ //
+ if (isVirtual && !NewFD->isInvalidDecl()) {
+ if (!isVirtualOkay) {
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_non_function);
+ } else if (!CurContext->isRecord()) {
+ // 'virtual' was specified outside of the class.
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_out_of_class)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
+ } else if (NewFD->getDescribedFunctionTemplate()) {
+ // C++ [temp.mem]p3:
+ // A member function template shall not be virtual.
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_member_function_template)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
+ } else {
+ // Okay: Add virtual to the method.
+ NewFD->setVirtualAsWritten(true);
+ }
}
- }
- // C++ [dcl.fct.spec]p3:
- // The inline specifier shall not appear on a block scope function declaration.
- if (isInline && !NewFD->isInvalidDecl() && getLangOptions().CPlusPlus) {
- if (CurContext->isFunctionOrMethod()) {
- // 'inline' is not allowed on block scope function declaration.
- Diag(D.getDeclSpec().getInlineSpecLoc(),
- diag::err_inline_declaration_block_scope) << Name
- << FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
+ // C++ [dcl.fct.spec]p3:
+ // The inline specifier shall not appear on a block scope function declaration.
+ if (isInline && !NewFD->isInvalidDecl()) {
+ if (CurContext->isFunctionOrMethod()) {
+ // 'inline' is not allowed on block scope function declaration.
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ diag::err_inline_declaration_block_scope) << Name
+ << FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
+ }
}
- }
- // C++ [dcl.fct.spec]p6:
- // The explicit specifier shall be used only in the declaration of a
- // constructor or conversion function within its class definition; see 12.3.1
- // and 12.3.2.
- if (isExplicit && !NewFD->isInvalidDecl()) {
- if (!CurContext->isRecord()) {
- // 'explicit' was specified outside of the class.
- Diag(D.getDeclSpec().getExplicitSpecLoc(),
- diag::err_explicit_out_of_class)
- << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
- } else if (!isa<CXXConstructorDecl>(NewFD) &&
- !isa<CXXConversionDecl>(NewFD)) {
- // 'explicit' was specified on a function that wasn't a constructor
- // or conversion function.
- Diag(D.getDeclSpec().getExplicitSpecLoc(),
- diag::err_explicit_non_ctor_or_conv_function)
- << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
- }
- }
-
- // Filter out previous declarations that don't match the scope.
- FilterLookupForScope(*this, Previous, DC, S, NewFD->hasLinkage());
-
- if (isFriend) {
- // DC is the namespace in which the function is being declared.
- assert((DC->isFileContext() || !Previous.empty()) &&
- "previously-undeclared friend function being created "
- "in a non-namespace context");
-
- // For now, claim that the objects have no previous declaration.
- if (FunctionTemplate) {
- FunctionTemplate->setObjectOfFriendDecl(false);
- FunctionTemplate->setAccess(AS_public);
+ // C++ [dcl.fct.spec]p6:
+ // The explicit specifier shall be used only in the declaration of a
+ // constructor or conversion function within its class definition; see 12.3.1
+ // and 12.3.2.
+ if (isExplicit && !NewFD->isInvalidDecl()) {
+ if (!CurContext->isRecord()) {
+ // 'explicit' was specified outside of the class.
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ diag::err_explicit_out_of_class)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
+ } else if (!isa<CXXConstructorDecl>(NewFD) &&
+ !isa<CXXConversionDecl>(NewFD)) {
+ // 'explicit' was specified on a function that wasn't a constructor
+ // or conversion function.
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ diag::err_explicit_non_ctor_or_conv_function)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
+ }
+ }
+
+ // Filter out previous declarations that don't match the scope.
+ FilterLookupForScope(*this, Previous, DC, S, NewFD->hasLinkage());
+
+ if (isFriend) {
+ // For now, claim that the objects have no previous declaration.
+ if (FunctionTemplate) {
+ FunctionTemplate->setObjectOfFriendDecl(false);
+ FunctionTemplate->setAccess(AS_public);
+ }
+ NewFD->setObjectOfFriendDecl(false);
+ NewFD->setAccess(AS_public);
}
- NewFD->setObjectOfFriendDecl(false);
- NewFD->setAccess(AS_public);
- }
- if (SC == SC_Static && isa<CXXMethodDecl>(NewFD) &&
- !CurContext->isRecord()) {
- // C++ [class.static]p1:
- // A data or function member of a class may be declared static
- // in a class definition, in which case it is a static member of
- // the class.
+ if (isa<CXXMethodDecl>(NewFD) && DC == CurContext && IsFunctionDefinition) {
+ // A method is implicitly inline if it's defined in its class
+ // definition.
+ NewFD->setImplicitlyInline();
+ }
- // Complain about the 'static' specifier if it's on an out-of-line
- // member function definition.
- Diag(D.getDeclSpec().getStorageClassSpecLoc(),
- diag::err_static_out_of_line)
- << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
- }
+ if (SC == SC_Static && isa<CXXMethodDecl>(NewFD) &&
+ !CurContext->isRecord()) {
+ // C++ [class.static]p1:
+ // A data or function member of a class may be declared static
+ // in a class definition, in which case it is a static member of
+ // the class.
+ // Complain about the 'static' specifier if it's on an out-of-line
+ // member function definition.
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_out_of_line)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ }
+ }
+
// Handle GNU asm-label extension (encoded as an attribute).
if (Expr *E = (Expr*) D.getAsmLabel()) {
// The parser guarantees this is a string.
@@ -3491,8 +3847,8 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// Copy the parameter declarations from the declarator D to the function
// declaration NewFD, if they are available. First scavenge them into Params.
llvm::SmallVector<ParmVarDecl*, 16> Params;
- if (D.getNumTypeObjects() > 0) {
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+ if (D.isFunctionDeclarator()) {
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
// Check for C99 6.7.5.3p10 - foo(void) is a non-varargs
// function that takes no arguments, not a function that takes a
@@ -3510,7 +3866,6 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
if (getLangOptions().CPlusPlus &&
Param->getType().getUnqualifiedType() != Context.VoidTy)
Diag(Param->getLocation(), diag::err_param_typedef_of_void);
- // FIXME: Leaks decl?
} else if (FTI.NumArgs > 0 && FTI.ArgInfo[0].Param != 0) {
for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
@@ -3547,168 +3902,209 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// Finally, we know we have the right number of parameters, install them.
NewFD->setParams(Params.data(), Params.size());
- // If the declarator is a template-id, translate the parser's template
- // argument list into our AST format.
- bool HasExplicitTemplateArgs = false;
- TemplateArgumentListInfo TemplateArgs;
- if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
- TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
- TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
- TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
- ASTTemplateArgsPtr TemplateArgsPtr(*this,
- TemplateId->getTemplateArgs(),
- TemplateId->NumArgs);
- translateTemplateArguments(TemplateArgsPtr,
- TemplateArgs);
- TemplateArgsPtr.release();
+ // Process the non-inheritable attributes on this declaration.
+ ProcessDeclAttributes(S, NewFD, D,
+ /*NonInheritable=*/true, /*Inheritable=*/false);
+
+ if (!getLangOptions().CPlusPlus) {
+ // Perform semantic checking on the function declaration.
+ bool isExplctSpecialization=false;
+ CheckFunctionDeclaration(S, NewFD, Previous, isExplctSpecialization,
+ Redeclaration);
+ assert((NewFD->isInvalidDecl() || !Redeclaration ||
+ Previous.getResultKind() != LookupResult::FoundOverloaded) &&
+ "previous declaration set still overloaded");
+ } else {
+ // If the declarator is a template-id, translate the parser's template
+ // argument list into our AST format.
+ bool HasExplicitTemplateArgs = false;
+ TemplateArgumentListInfo TemplateArgs;
+ if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+ TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
+ TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
+ TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
+ ASTTemplateArgsPtr TemplateArgsPtr(*this,
+ TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ translateTemplateArguments(TemplateArgsPtr,
+ TemplateArgs);
+ TemplateArgsPtr.release();
- HasExplicitTemplateArgs = true;
+ HasExplicitTemplateArgs = true;
- if (FunctionTemplate) {
- // FIXME: Diagnose function template with explicit template
- // arguments.
- HasExplicitTemplateArgs = false;
- } else if (!isFunctionTemplateSpecialization &&
- !D.getDeclSpec().isFriendSpecified()) {
- // We have encountered something that the user meant to be a
- // specialization (because it has explicitly-specified template
- // arguments) but that was not introduced with a "template<>" (or had
- // too few of them).
- Diag(D.getIdentifierLoc(), diag::err_template_spec_needs_header)
- << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc)
- << FixItHint::CreateInsertion(
- D.getDeclSpec().getSourceRange().getBegin(),
- "template<> ");
- isFunctionTemplateSpecialization = true;
- } else {
- // "friend void foo<>(int);" is an implicit specialization decl.
- isFunctionTemplateSpecialization = true;
- }
- } else if (isFriend && isFunctionTemplateSpecialization) {
- // This combination is only possible in a recovery case; the user
- // wrote something like:
- // template <> friend void foo(int);
- // which we're recovering from as if the user had written:
- // friend void foo<>(int);
- // Go ahead and fake up a template id.
- HasExplicitTemplateArgs = true;
- TemplateArgs.setLAngleLoc(D.getIdentifierLoc());
- TemplateArgs.setRAngleLoc(D.getIdentifierLoc());
- }
-
- // If it's a friend (and only if it's a friend), it's possible
- // that either the specialized function type or the specialized
- // template is dependent, and therefore matching will fail. In
- // this case, don't check the specialization yet.
- if (isFunctionTemplateSpecialization && isFriend &&
- (NewFD->getType()->isDependentType() || DC->isDependentContext())) {
- assert(HasExplicitTemplateArgs &&
- "friend function specialization without template args");
- if (CheckDependentFunctionTemplateSpecialization(NewFD, TemplateArgs,
- Previous))
- NewFD->setInvalidDecl();
- } else if (isFunctionTemplateSpecialization) {
- if (CheckFunctionTemplateSpecialization(NewFD,
- (HasExplicitTemplateArgs ? &TemplateArgs : 0),
- Previous))
- NewFD->setInvalidDecl();
- } else if (isExplicitSpecialization && isa<CXXMethodDecl>(NewFD)) {
- if (CheckMemberSpecialization(NewFD, Previous))
- NewFD->setInvalidDecl();
- }
+ if (FunctionTemplate) {
+ // Function template with explicit template arguments.
+ Diag(D.getIdentifierLoc(), diag::err_function_template_partial_spec)
+ << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc);
+
+ HasExplicitTemplateArgs = false;
+ } else if (!isFunctionTemplateSpecialization &&
+ !D.getDeclSpec().isFriendSpecified()) {
+ // We have encountered something that the user meant to be a
+ // specialization (because it has explicitly-specified template
+ // arguments) but that was not introduced with a "template<>" (or had
+ // too few of them).
+ Diag(D.getIdentifierLoc(), diag::err_template_spec_needs_header)
+ << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc)
+ << FixItHint::CreateInsertion(
+ D.getDeclSpec().getSourceRange().getBegin(),
+ "template<> ");
+ isFunctionTemplateSpecialization = true;
+ } else {
+ // "friend void foo<>(int);" is an implicit specialization decl.
+ isFunctionTemplateSpecialization = true;
+ }
+ } else if (isFriend && isFunctionTemplateSpecialization) {
+ // This combination is only possible in a recovery case; the user
+ // wrote something like:
+ // template <> friend void foo(int);
+ // which we're recovering from as if the user had written:
+ // friend void foo<>(int);
+ // Go ahead and fake up a template id.
+ HasExplicitTemplateArgs = true;
+ TemplateArgs.setLAngleLoc(D.getIdentifierLoc());
+ TemplateArgs.setRAngleLoc(D.getIdentifierLoc());
+ }
+
+ // If it's a friend (and only if it's a friend), it's possible
+ // that either the specialized function type or the specialized
+ // template is dependent, and therefore matching will fail. In
+ // this case, don't check the specialization yet.
+ if (isFunctionTemplateSpecialization && isFriend &&
+ (NewFD->getType()->isDependentType() || DC->isDependentContext())) {
+ assert(HasExplicitTemplateArgs &&
+ "friend function specialization without template args");
+ if (CheckDependentFunctionTemplateSpecialization(NewFD, TemplateArgs,
+ Previous))
+ NewFD->setInvalidDecl();
+ } else if (isFunctionTemplateSpecialization) {
+ if (CheckFunctionTemplateSpecialization(NewFD,
+ (HasExplicitTemplateArgs ? &TemplateArgs : 0),
+ Previous))
+ NewFD->setInvalidDecl();
+ } else if (isExplicitSpecialization && isa<CXXMethodDecl>(NewFD)) {
+ if (CheckMemberSpecialization(NewFD, Previous))
+ NewFD->setInvalidDecl();
+ }
- // Perform semantic checking on the function declaration.
- bool OverloadableAttrRequired = false; // FIXME: HACK!
- CheckFunctionDeclaration(S, NewFD, Previous, isExplicitSpecialization,
- Redeclaration, /*FIXME:*/OverloadableAttrRequired);
+ // Perform semantic checking on the function declaration.
+ CheckFunctionDeclaration(S, NewFD, Previous, isExplicitSpecialization,
+ Redeclaration);
- assert((NewFD->isInvalidDecl() || !Redeclaration ||
- Previous.getResultKind() != LookupResult::FoundOverloaded) &&
- "previous declaration set still overloaded");
+ assert((NewFD->isInvalidDecl() || !Redeclaration ||
+ Previous.getResultKind() != LookupResult::FoundOverloaded) &&
+ "previous declaration set still overloaded");
- NamedDecl *PrincipalDecl = (FunctionTemplate
- ? cast<NamedDecl>(FunctionTemplate)
- : NewFD);
+ NamedDecl *PrincipalDecl = (FunctionTemplate
+ ? cast<NamedDecl>(FunctionTemplate)
+ : NewFD);
- if (isFriend && Redeclaration) {
- AccessSpecifier Access = AS_public;
- if (!NewFD->isInvalidDecl())
- Access = NewFD->getPreviousDeclaration()->getAccess();
+ if (isFriend && Redeclaration) {
+ AccessSpecifier Access = AS_public;
+ if (!NewFD->isInvalidDecl())
+ Access = NewFD->getPreviousDeclaration()->getAccess();
- NewFD->setAccess(Access);
- if (FunctionTemplate) FunctionTemplate->setAccess(Access);
+ NewFD->setAccess(Access);
+ if (FunctionTemplate) FunctionTemplate->setAccess(Access);
- PrincipalDecl->setObjectOfFriendDecl(true);
- }
+ PrincipalDecl->setObjectOfFriendDecl(true);
+ }
- if (NewFD->isOverloadedOperator() && !DC->isRecord() &&
- PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
- PrincipalDecl->setNonMemberOperator();
+ if (NewFD->isOverloadedOperator() && !DC->isRecord() &&
+ PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ PrincipalDecl->setNonMemberOperator();
- // If we have a function template, check the template parameter
- // list. This will check and merge default template arguments.
- if (FunctionTemplate) {
- FunctionTemplateDecl *PrevTemplate = FunctionTemplate->getPreviousDeclaration();
- CheckTemplateParameterList(FunctionTemplate->getTemplateParameters(),
- PrevTemplate? PrevTemplate->getTemplateParameters() : 0,
- D.getDeclSpec().isFriendSpecified()? TPC_FriendFunctionTemplate
- : TPC_FunctionTemplate);
- }
+ // If we have a function template, check the template parameter
+ // list. This will check and merge default template arguments.
+ if (FunctionTemplate) {
+ FunctionTemplateDecl *PrevTemplate = FunctionTemplate->getPreviousDeclaration();
+ CheckTemplateParameterList(FunctionTemplate->getTemplateParameters(),
+ PrevTemplate? PrevTemplate->getTemplateParameters() : 0,
+ D.getDeclSpec().isFriendSpecified()
+ ? (IsFunctionDefinition
+ ? TPC_FriendFunctionTemplateDefinition
+ : TPC_FriendFunctionTemplate)
+ : (D.getCXXScopeSpec().isSet() &&
+ DC && DC->isRecord() &&
+ DC->isDependentContext())
+ ? TPC_ClassTemplateMember
+ : TPC_FunctionTemplate);
+ }
+
+ if (NewFD->isInvalidDecl()) {
+ // Ignore all the rest of this.
+ } else if (!Redeclaration) {
+ // Fake up an access specifier if it's supposed to be a class member.
+ if (isa<CXXRecordDecl>(NewFD->getDeclContext()))
+ NewFD->setAccess(AS_public);
+
+ // Qualified decls generally require a previous declaration.
+ if (D.getCXXScopeSpec().isSet()) {
+ // ...with the major exception of templated-scope or
+ // dependent-scope friend declarations.
+
+ // TODO: we currently also suppress this check in dependent
+ // contexts because (1) the parameter depth will be off when
+ // matching friend templates and (2) we might actually be
+ // selecting a friend based on a dependent factor. But there
+ // are situations where these conditions don't apply and we
+ // can actually do this check immediately.
+ if (isFriend &&
+ (NumMatchedTemplateParamLists ||
+ D.getCXXScopeSpec().getScopeRep()->isDependent() ||
+ CurContext->isDependentContext())) {
+ // ignore these
+ } else {
+ // The user tried to provide an out-of-line definition for a
+ // function that is a member of a class or namespace, but there
+ // was no such member function declared (C++ [class.mfct]p2,
+ // C++ [namespace.memdef]p2). For example:
+ //
+ // class X {
+ // void f() const;
+ // };
+ //
+ // void X::f() { } // ill-formed
+ //
+ // Complain about this problem, and attempt to suggest close
+ // matches (e.g., those that differ only in cv-qualifiers and
+ // whether the parameter types are references).
+ Diag(D.getIdentifierLoc(), diag::err_member_def_does_not_match)
+ << Name << DC << D.getCXXScopeSpec().getRange();
+ NewFD->setInvalidDecl();
+
+ DiagnoseInvalidRedeclaration(*this, NewFD);
+ }
- if (D.getCXXScopeSpec().isSet() && !NewFD->isInvalidDecl()) {
- // Fake up an access specifier if it's supposed to be a class member.
- if (!Redeclaration && isa<CXXRecordDecl>(NewFD->getDeclContext()))
- NewFD->setAccess(AS_public);
+ // Unqualified local friend declarations are required to resolve
+ // to something.
+ } else if (isFriend && cast<CXXRecordDecl>(CurContext)->isLocalClass()) {
+ Diag(D.getIdentifierLoc(), diag::err_no_matching_local_friend);
+ NewFD->setInvalidDecl();
+ DiagnoseInvalidRedeclaration(*this, NewFD);
+ }
- // An out-of-line member function declaration must also be a
- // definition (C++ [dcl.meaning]p1).
- // Note that this is not the case for explicit specializations of
- // function templates or member functions of class templates, per
- // C++ [temp.expl.spec]p2. We also allow these declarations as an extension
- // for compatibility with old SWIG code which likes to generate them.
- if (!IsFunctionDefinition && !isFriend &&
- !isFunctionTemplateSpecialization && !isExplicitSpecialization) {
+ } else if (!IsFunctionDefinition && D.getCXXScopeSpec().isSet() &&
+ !isFriend && !isFunctionTemplateSpecialization &&
+ !isExplicitSpecialization) {
+ // An out-of-line member function declaration must also be a
+ // definition (C++ [dcl.meaning]p1).
+ // Note that this is not the case for explicit specializations of
+ // function templates or member functions of class templates, per
+ // C++ [temp.expl.spec]p2. We also allow these declarations as an extension
+ // for compatibility with old SWIG code which likes to generate them.
Diag(NewFD->getLocation(), diag::ext_out_of_line_declaration)
<< D.getCXXScopeSpec().getRange();
}
- if (!Redeclaration && !(isFriend && CurContext->isDependentContext())) {
- // The user tried to provide an out-of-line definition for a
- // function that is a member of a class or namespace, but there
- // was no such member function declared (C++ [class.mfct]p2,
- // C++ [namespace.memdef]p2). For example:
- //
- // class X {
- // void f() const;
- // };
- //
- // void X::f() { } // ill-formed
- //
- // Complain about this problem, and attempt to suggest close
- // matches (e.g., those that differ only in cv-qualifiers and
- // whether the parameter types are references).
- Diag(D.getIdentifierLoc(), diag::err_member_def_does_not_match)
- << Name << DC << D.getCXXScopeSpec().getRange();
- NewFD->setInvalidDecl();
-
- LookupResult Prev(*this, Name, D.getIdentifierLoc(), LookupOrdinaryName,
- ForRedeclaration);
- LookupQualifiedName(Prev, DC);
- assert(!Prev.isAmbiguous() &&
- "Cannot have an ambiguity in previous-declaration lookup");
- for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
- Func != FuncEnd; ++Func) {
- if (isa<FunctionDecl>(*Func) &&
- isNearlyMatchingFunction(Context, cast<FunctionDecl>(*Func), NewFD))
- Diag((*Func)->getLocation(), diag::note_member_def_close_match);
- }
- }
}
-
+
+
// Handle attributes. We need to have merged decls when handling attributes
// (for example to check for conflicts, etc).
// FIXME: This needs to happen before we merge declarations. Then,
// let attribute merging cope with attribute conflicts.
- ProcessDeclAttributes(S, NewFD, D);
+ ProcessDeclAttributes(S, NewFD, D,
+ /*NonInheritable=*/false, /*Inheritable=*/true);
// attributes declared post-definition are currently ignored
// FIXME: This should happen during attribute merging
@@ -3723,17 +4119,6 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
AddKnownFunctionAttributes(NewFD);
- if (OverloadableAttrRequired && !NewFD->hasAttr<OverloadableAttr>()) {
- // If a function name is overloadable in C, then every function
- // with that name must be marked "overloadable".
- Diag(NewFD->getLocation(), diag::err_attribute_overloadable_missing)
- << Redeclaration << NewFD;
- if (!Previous.empty())
- Diag(Previous.getRepresentativeDecl()->getLocation(),
- diag::note_attribute_overloadable_prev_overload);
- NewFD->addAttr(::new (Context) OverloadableAttr(SourceLocation(), Context));
- }
-
if (NewFD->hasAttr<OverloadableAttr>() &&
!NewFD->getType()->getAs<FunctionProtoType>()) {
Diag(NewFD->getLocation(),
@@ -3742,9 +4127,11 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// Turn this into a variadic function with no parameters.
const FunctionType *FT = NewFD->getType()->getAs<FunctionType>();
- QualType R = Context.getFunctionType(FT->getResultType(),
- 0, 0, true, 0, false, false, 0, 0,
- FT->getExtInfo());
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ EPI.ExtInfo = FT->getExtInfo();
+
+ QualType R = Context.getFunctionType(FT->getResultType(), 0, 0, EPI);
NewFD->setType(R);
}
@@ -3762,14 +4149,28 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// Set this FunctionDecl's range up to the right paren.
NewFD->setLocEnd(D.getSourceRange().getEnd());
- if (FunctionTemplate && NewFD->isInvalidDecl())
- FunctionTemplate->setInvalidDecl();
-
- if (FunctionTemplate)
- return FunctionTemplate;
+ if (getLangOptions().CPlusPlus) {
+ if (FunctionTemplate) {
+ if (NewFD->isInvalidDecl())
+ FunctionTemplate->setInvalidDecl();
+ return FunctionTemplate;
+ }
+ }
MarkUnusedFileScopedDecl(NewFD);
+ if (getLangOptions().CUDA)
+ if (IdentifierInfo *II = NewFD->getIdentifier())
+ if (!NewFD->isInvalidDecl() &&
+ NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ if (II->isStr("cudaConfigureCall")) {
+ if (!R->getAs<FunctionType>()->getResultType()->isScalarType())
+ Diag(NewFD->getLocation(), diag::err_config_scalar_return);
+
+ Context.setcudaConfigureCallDecl(NewFD);
+ }
+ }
+
return NewFD;
}
@@ -3790,8 +4191,7 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
LookupResult &Previous,
bool IsExplicitSpecialization,
- bool &Redeclaration,
- bool &OverloadableAttrRequired) {
+ bool &Redeclaration) {
// If NewFD is already known erroneous, don't do any of this checking.
if (NewFD->isInvalidDecl()) {
// If this is a class member, mark the class invalid immediately.
@@ -3836,9 +4236,6 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
Redeclaration = true;
OldDecl = Previous.getFoundDecl();
} else {
- if (!getLangOptions().CPlusPlus)
- OverloadableAttrRequired = true;
-
switch (CheckOverload(S, NewFD, Previous, OldDecl,
/*NewIsUsingDecl*/ false)) {
case Ovl_Match:
@@ -3853,6 +4250,23 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
Redeclaration = false;
break;
}
+
+ if (!getLangOptions().CPlusPlus && !NewFD->hasAttr<OverloadableAttr>()) {
+ // If a function name is overloadable in C, then every function
+ // with that name must be marked "overloadable".
+ Diag(NewFD->getLocation(), diag::err_attribute_overloadable_missing)
+ << Redeclaration << NewFD;
+ NamedDecl *OverloadedDecl = 0;
+ if (Redeclaration)
+ OverloadedDecl = OldDecl;
+ else if (!Previous.empty())
+ OverloadedDecl = Previous.getRepresentativeDecl();
+ if (OverloadedDecl)
+ Diag(OverloadedDecl->getLocation(),
+ diag::note_attribute_overloadable_prev_overload);
+ NewFD->addAttr(::new (Context) OverloadableAttr(SourceLocation(),
+ Context));
+ }
}
if (Redeclaration) {
@@ -3907,23 +4321,11 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
DeclarationName Name
= Context.DeclarationNames.getCXXDestructorName(
Context.getCanonicalType(ClassType));
-// NewFD->getDeclName().dump();
-// Name.dump();
if (NewFD->getDeclName() != Name) {
Diag(NewFD->getLocation(), diag::err_destructor_name);
return NewFD->setInvalidDecl();
}
}
-
- Record->setUserDeclaredDestructor(true);
- // C++ [class]p4: A POD-struct is an aggregate class that has [...] no
- // user-defined destructor.
- Record->setPOD(false);
-
- // C++ [class.dtor]p3: A destructor is trivial if it is an implicitly-
- // declared destructor.
- // FIXME: C++0x: don't do this for "= default" destructors
- Record->setHasTrivialDestructor(false);
} else if (CXXConversionDecl *Conversion
= dyn_cast<CXXConversionDecl>(NewFD)) {
ActOnConversionDeclarator(Conversion);
@@ -3932,8 +4334,23 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// Find any virtual functions that this function overrides.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD)) {
if (!Method->isFunctionTemplateSpecialization() &&
- !Method->getDescribedFunctionTemplate())
- AddOverriddenMethods(Method->getParent(), Method);
+ !Method->getDescribedFunctionTemplate()) {
+ if (AddOverriddenMethods(Method->getParent(), Method)) {
+ // If the function was marked as "static", we have a problem.
+ if (NewFD->getStorageClass() == SC_Static) {
+ Diag(NewFD->getLocation(), diag::err_static_overrides_virtual)
+ << NewFD->getDeclName();
+ for (CXXMethodDecl::method_iterator
+ Overridden = Method->begin_overridden_methods(),
+ OverriddenEnd = Method->end_overridden_methods();
+ Overridden != OverriddenEnd;
+ ++Overridden) {
+ Diag((*Overridden)->getLocation(),
+ diag::note_overridden_virtual_function);
+ }
+ }
+ }
+ }
}
// Extra checking for C++ overloaded operators (C++ [over.oper]).
@@ -3951,6 +4368,18 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// during delayed parsing anyway.
if (!CurContext->isRecord())
CheckCXXDefaultArguments(NewFD);
+
+ // If this function declares a builtin function, check the type of this
+ // declaration against the expected type for the builtin.
+ if (unsigned BuiltinID = NewFD->getBuiltinID()) {
+ ASTContext::GetBuiltinTypeError Error;
+ QualType T = Context.GetBuiltinType(BuiltinID, Error);
+ if (!T.isNull() && !Context.hasSameType(T, NewFD->getType())) {
+ // The type of this function differs from the type of the builtin,
+ // so forget about the builtin entirely.
+ Context.BuiltinInfo.ForgetBuiltin(BuiltinID, Context.Idents);
+ }
+ }
}
}
@@ -3976,7 +4405,6 @@ void Sema::CheckMain(FunctionDecl* FD) {
const FunctionType* FT = T->getAs<FunctionType>();
if (!Context.hasSameUnqualifiedType(FT->getResultType(), Context.IntTy)) {
- // TODO: add a replacement fixit to turn the return type into 'int'.
Diag(FD->getTypeSpecStartLoc(), diag::err_main_returns_nonint);
FD->setInvalidDecl(true);
}
@@ -4043,6 +4471,11 @@ void Sema::CheckMain(FunctionDecl* FD) {
if (nparams == 1 && !FD->isInvalidDecl()) {
Diag(FD->getLocation(), diag::warn_main_one_arg);
}
+
+ if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
+ Diag(FD->getLocation(), diag::err_main_template_decl);
+ FD->setInvalidDecl();
+ }
}
bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
@@ -4061,17 +4494,14 @@ bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
return true;
}
-void Sema::AddInitializerToDecl(Decl *dcl, Expr *init) {
- AddInitializerToDecl(dcl, init, /*DirectInit=*/false);
-}
-
/// AddInitializerToDecl - Adds the initializer Init to the
/// declaration dcl. If DirectInit is true, this is C++ direct
/// initialization rather than copy initialization.
-void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
+void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
+ bool DirectInit, bool TypeMayContainAuto) {
// If there is no declaration, there was an error parsing it. Just ignore
// the initializer.
- if (RealDecl == 0)
+ if (RealDecl == 0 || RealDecl->isInvalidDecl())
return;
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
@@ -4095,14 +4525,32 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
if (getLangOptions().CPlusPlus &&
RealDecl->getLexicalDeclContext()->isRecord() &&
isa<NamedDecl>(RealDecl))
- Diag(RealDecl->getLocation(), diag::err_member_initialization)
- << cast<NamedDecl>(RealDecl)->getDeclName();
+ Diag(RealDecl->getLocation(), diag::err_member_initialization);
else
Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
RealDecl->setInvalidDecl();
return;
}
+ // C++0x [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+ if (TypeMayContainAuto && VDecl->getType()->getContainedAutoType()) {
+ VDecl->setParsingAutoInit(false);
+
+ QualType DeducedType;
+ if (!DeduceAutoType(VDecl->getType(), Init, DeducedType)) {
+ Diag(VDecl->getLocation(), diag::err_auto_var_deduction_failure)
+ << VDecl->getDeclName() << VDecl->getType() << Init->getType()
+ << Init->getSourceRange();
+ RealDecl->setInvalidDecl();
+ return;
+ }
+ VDecl->setType(DeducedType);
+
+ // If this is a redeclaration, check that the type we just deduced matches
+ // the previously declared type.
+ if (VarDecl *Old = VDecl->getPreviousDeclaration())
+ MergeVarDeclTypes(VDecl, Old);
+ }
// A definition must end up with a complete type, which means it must be
@@ -4132,27 +4580,34 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
return;
}
- // C++ [class.static.data]p4
- // If a static data member is of const integral or const
- // enumeration type, its declaration in the class definition can
- // specify a constant-initializer which shall be an integral
- // constant expression (5.19). In that case, the member can appear
- // in integral constant expressions. The member shall still be
- // defined in a namespace scope if it is used in the program and the
- // namespace scope definition shall not contain an initializer.
- //
- // We already performed a redefinition check above, but for static
- // data members we also need to check whether there was an in-class
- // declaration with an initializer.
const VarDecl* PrevInit = 0;
- if (VDecl->isStaticDataMember() && VDecl->getAnyInitializer(PrevInit)) {
- Diag(VDecl->getLocation(), diag::err_redefinition) << VDecl->getDeclName();
- Diag(PrevInit->getLocation(), diag::note_previous_definition);
- return;
- }
+ if (getLangOptions().CPlusPlus) {
+ // C++ [class.static.data]p4
+ // If a static data member is of const integral or const
+ // enumeration type, its declaration in the class definition can
+ // specify a constant-initializer which shall be an integral
+ // constant expression (5.19). In that case, the member can appear
+ // in integral constant expressions. The member shall still be
+ // defined in a namespace scope if it is used in the program and the
+ // namespace scope definition shall not contain an initializer.
+ //
+ // We already performed a redefinition check above, but for static
+ // data members we also need to check whether there was an in-class
+ // declaration with an initializer.
+ if (VDecl->isStaticDataMember() && VDecl->getAnyInitializer(PrevInit)) {
+ Diag(VDecl->getLocation(), diag::err_redefinition) << VDecl->getDeclName();
+ Diag(PrevInit->getLocation(), diag::note_previous_definition);
+ return;
+ }
- if (getLangOptions().CPlusPlus && VDecl->hasLocalStorage())
- getCurFunction()->setHasBranchProtectedScope();
+ if (VDecl->hasLocalStorage())
+ getCurFunction()->setHasBranchProtectedScope();
+
+ if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer)) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+ }
// Capture the variable that is being initialized and the style of
// initialization.
@@ -4169,7 +4624,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// Get the decls type and save a reference for later, since
// CheckInitializerTypes may change it.
QualType DclT = VDecl->getType(), SavT = DclT;
- if (VDecl->isBlockVarDecl()) {
+ if (VDecl->isLocalVarDecl()) {
if (VDecl->hasExternalStorage()) { // C99 6.7.8p5
Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
VDecl->setInvalidDecl();
@@ -4200,34 +4655,38 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// static const int value = 17;
// };
- // Attach the initializer
- VDecl->setInit(Init);
+ // Try to perform the initialization regardless.
+ if (!VDecl->isInvalidDecl()) {
+ InitializationSequence InitSeq(*this, Entity, Kind, &Init, 1);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(*this, &Init, 1),
+ &DclT);
+ if (Result.isInvalid()) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ Init = Result.takeAs<Expr>();
+ }
// C++ [class.mem]p4:
// A member-declarator can contain a constant-initializer only
// if it declares a static member (9.4) of const integral or
// const enumeration type, see 9.4.2.
QualType T = VDecl->getType();
- if (!T->isDependentType() &&
- (!Context.getCanonicalType(T).isConstQualified() ||
- !T->isIntegralOrEnumerationType())) {
- Diag(VDecl->getLocation(), diag::err_member_initialization)
- << VDecl->getDeclName() << Init->getSourceRange();
+
+ // Do nothing on dependent types.
+ if (T->isDependentType()) {
+
+ // Require constness.
+ } else if (!T.isConstQualified()) {
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_non_const)
+ << Init->getSourceRange();
VDecl->setInvalidDecl();
- } else {
- // C++ [class.static.data]p4:
- // If a static data member is of const integral or const
- // enumeration type, its declaration in the class definition
- // can specify a constant-initializer which shall be an
- // integral constant expression (5.19).
- if (!Init->isTypeDependent() &&
- !Init->getType()->isIntegralOrEnumerationType()) {
- // We have a non-dependent, non-integral or enumeration type.
- Diag(Init->getSourceRange().getBegin(),
- diag::err_in_class_initializer_non_integral_type)
- << Init->getType() << Init->getSourceRange();
- VDecl->setInvalidDecl();
- } else if (!Init->isTypeDependent() && !Init->isValueDependent()) {
+
+ // We allow integer constant expressions in all cases.
+ } else if (T->isIntegralOrEnumerationType()) {
+ if (!Init->isValueDependent()) {
// Check whether the expression is a constant expression.
llvm::APSInt Value;
SourceLocation Loc;
@@ -4235,12 +4694,37 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
Diag(Loc, diag::err_in_class_initializer_non_constant)
<< Init->getSourceRange();
VDecl->setInvalidDecl();
- } else if (!VDecl->getType()->isDependentType())
- ImpCastExprToType(Init, VDecl->getType(), CK_IntegralCast);
+ }
+ }
+
+ // We allow floating-point constants as an extension in C++03, and
+ // C++0x has far more complicated rules that we don't really
+ // implement fully.
+ } else {
+ bool Allowed = false;
+ if (getLangOptions().CPlusPlus0x) {
+ Allowed = T->isLiteralType();
+ } else if (T->isFloatingType()) { // also permits complex, which is ok
+ Diag(VDecl->getLocation(), diag::ext_in_class_initializer_float_type)
+ << T << Init->getSourceRange();
+ Allowed = true;
+ }
+
+ if (!Allowed) {
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_bad_type)
+ << T << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+
+ // TODO: there are probably expressions that pass here that shouldn't.
+ } else if (!Init->isValueDependent() &&
+ !Init->isConstantInitializer(Context, false)) {
+ Diag(Init->getExprLoc(), diag::err_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ VDecl->setInvalidDecl();
}
}
} else if (VDecl->isFileVarDecl()) {
- if (VDecl->getStorageClass() == SC_Extern &&
+ if (VDecl->getStorageClassAsWritten() == SC_Extern &&
(!getLangOptions().CPlusPlus ||
!Context.getBaseElementType(VDecl->getType()).isConstQualified()))
Diag(VDecl->getLocation(), diag::warn_extern_init);
@@ -4273,28 +4757,35 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
Init->setType(DclT);
}
- Init = MaybeCreateCXXExprWithTemporaries(Init);
+
+ // If this variable is a local declaration with record type, make sure it
+ // doesn't have a flexible member initialization. We only support this as a
+ // global/static definition.
+ if (VDecl->hasLocalStorage())
+ if (const RecordType *RT = VDecl->getType()->getAs<RecordType>())
+ if (RT->getDecl()->hasFlexibleArrayMember()) {
+ // Check whether the initializer tries to initialize the flexible
+ // array member itself to anything other than an empty initializer list.
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ unsigned Index = std::distance(RT->getDecl()->field_begin(),
+ RT->getDecl()->field_end()) - 1;
+ if (Index < ILE->getNumInits() &&
+ !(isa<InitListExpr>(ILE->getInit(Index)) &&
+ cast<InitListExpr>(ILE->getInit(Index))->getNumInits() == 0)) {
+ Diag(VDecl->getLocation(), diag::err_nonstatic_flexible_variable);
+ VDecl->setInvalidDecl();
+ }
+ }
+ }
+
+ // Check any implicit conversions within the expression.
+ CheckImplicitConversions(Init, VDecl->getLocation());
+
+ Init = MaybeCreateExprWithCleanups(Init);
// Attach the initializer to the decl.
VDecl->setInit(Init);
- if (getLangOptions().CPlusPlus) {
- if (!VDecl->isInvalidDecl() &&
- !VDecl->getDeclContext()->isDependentContext() &&
- VDecl->hasGlobalStorage() && !VDecl->isStaticLocal() &&
- !Init->isConstantInitializer(Context,
- VDecl->getType()->isReferenceType()))
- Diag(VDecl->getLocation(), diag::warn_global_constructor)
- << Init->getSourceRange();
-
- // Make sure we mark the destructor as used if necessary.
- QualType InitType = VDecl->getType();
- while (const ArrayType *Array = Context.getAsArrayType(InitType))
- InitType = Context.getBaseElementType(Array);
- if (const RecordType *Record = InitType->getAs<RecordType>())
- FinalizeVarWithDestructor(VDecl, Record);
- }
-
- return;
+ CheckCompleteVariableDeclaration(VDecl);
}
/// ActOnInitializerError - Given that there was an error parsing an
@@ -4308,6 +4799,13 @@ void Sema::ActOnInitializerError(Decl *D) {
VarDecl *VD = dyn_cast<VarDecl>(D);
if (!VD) return;
+ // Auto types are meaningless if we can't make sense of the initializer.
+ if (VD->isParsingAutoInit()) {
+ VD->setParsingAutoInit(false);
+ VD->setInvalidDecl();
+ return;
+ }
+
QualType Ty = VD->getType();
if (Ty->isDependentType()) return;
@@ -4332,7 +4830,7 @@ void Sema::ActOnInitializerError(Decl *D) {
}
void Sema::ActOnUninitializedDecl(Decl *RealDecl,
- bool TypeContainsUndeducedAuto) {
+ bool TypeMayContainAuto) {
// If there is no declaration, there was an error parsing it. Just ignore it.
if (RealDecl == 0)
return;
@@ -4341,7 +4839,9 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
QualType Type = Var->getType();
// C++0x [dcl.spec.auto]p3
- if (TypeContainsUndeducedAuto) {
+ if (TypeMayContainAuto && Type->getContainedAutoType()) {
+ Var->setParsingAutoInit(false);
+
Diag(Var->getLocation(), diag::err_auto_var_requires_init)
<< Var->getDeclName() << Type;
Var->setInvalidDecl();
@@ -4365,7 +4865,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
// Block scope. C99 6.7p7: If an identifier for an object is
// declared with no linkage (C99 6.2.2p6), the type for the
// object shall be complete.
- if (!Type->isDependentType() && Var->isBlockVarDecl() &&
+ if (!Type->isDependentType() && Var->isLocalVarDecl() &&
!Var->getLinkage() && !Var->isInvalidDecl() &&
RequireCompleteType(Var->getLocation(), Type,
diag::err_typecheck_decl_incomplete_type))
@@ -4488,22 +4988,62 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
MultiExprArg(*this, 0, 0));
if (Init.isInvalid())
Var->setInvalidDecl();
- else if (Init.get()) {
- Var->setInit(MaybeCreateCXXExprWithTemporaries(Init.takeAs<Expr>()));
-
- if (getLangOptions().CPlusPlus && !Var->isInvalidDecl() &&
- Var->hasGlobalStorage() && !Var->isStaticLocal() &&
- !Var->getDeclContext()->isDependentContext() &&
- !Var->getInit()->isConstantInitializer(Context, false))
- Diag(Var->getLocation(), diag::warn_global_constructor);
- }
+ else if (Init.get())
+ Var->setInit(MaybeCreateExprWithCleanups(Init.get()));
}
- if (!Var->isInvalidDecl() && getLangOptions().CPlusPlus && Record)
- FinalizeVarWithDestructor(Var, Record);
+ CheckCompleteVariableDeclaration(Var);
}
}
+void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
+ if (var->isInvalidDecl()) return;
+
+ // All the following checks are C++ only.
+ if (!getLangOptions().CPlusPlus) return;
+
+ QualType baseType = Context.getBaseElementType(var->getType());
+ if (baseType->isDependentType()) return;
+
+ // __block variables might require us to capture a copy-initializer.
+ if (var->hasAttr<BlocksAttr>()) {
+ // It's currently invalid to ever have a __block variable with an
+ // array type; should we diagnose that here?
+
+ // Regardless, we don't want to ignore array nesting when
+ // constructing this copy.
+ QualType type = var->getType();
+
+ if (type->isStructureOrClassType()) {
+ SourceLocation poi = var->getLocation();
+ Expr *varRef = new (Context) DeclRefExpr(var, type, VK_LValue, poi);
+ ExprResult result =
+ PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(poi, type, false),
+ poi, Owned(varRef));
+ if (!result.isInvalid()) {
+ result = MaybeCreateExprWithCleanups(result);
+ Expr *init = result.takeAs<Expr>();
+ Context.setBlockVarCopyInits(var, init);
+ }
+ }
+ }
+
+ // Check for global constructors.
+ if (!var->getDeclContext()->isDependentContext() &&
+ var->hasGlobalStorage() &&
+ !var->isStaticLocal() &&
+ var->getInit() &&
+ !var->getInit()->isConstantInitializer(Context,
+ baseType->isReferenceType()))
+ Diag(var->getLocation(), diag::warn_global_constructor)
+ << var->getInit()->getSourceRange();
+
+ // Require the destructor.
+ if (const RecordType *recordType = baseType->getAs<RecordType>())
+ FinalizeVarWithDestructor(var, recordType);
+}
+
Sema::DeclGroupPtrTy
Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
Decl **Group, unsigned NumDecls) {
@@ -4512,6 +5052,41 @@ Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
if (DS.isTypeSpecOwned())
Decls.push_back(DS.getRepAsDecl());
+ // C++0x [dcl.spec.auto]p7:
+ // If the type deduced for the template parameter U is not the same in each
+ // deduction, the program is ill-formed.
+ // FIXME: When initializer-list support is added, a distinction is needed
+ // between the deduced type U and the deduced type which 'auto' stands for.
+ // auto a = 0, b = { 1, 2, 3 };
+ // is legal because the deduced type U is 'int' in both cases.
+ bool TypeContainsAuto = DS.getTypeSpecType() == DeclSpec::TST_auto;
+ if (TypeContainsAuto && NumDecls > 1) {
+ QualType Deduced;
+ CanQualType DeducedCanon;
+ VarDecl *DeducedDecl = 0;
+ for (unsigned i = 0; i != NumDecls; ++i) {
+ if (VarDecl *D = dyn_cast<VarDecl>(Group[i])) {
+ AutoType *AT = D->getType()->getContainedAutoType();
+ if (AT && AT->isDeduced()) {
+ QualType U = AT->getDeducedType();
+ CanQualType UCanon = Context.getCanonicalType(U);
+ if (Deduced.isNull()) {
+ Deduced = U;
+ DeducedCanon = UCanon;
+ DeducedDecl = D;
+ } else if (DeducedCanon != UCanon) {
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_auto_different_deductions)
+ << Deduced << DeducedDecl->getDeclName()
+ << U << D->getDeclName()
+ << DeducedDecl->getInit()->getSourceRange()
+ << D->getInit()->getSourceRange();
+ break;
+ }
+ }
+ }
+ }
+ }
+
for (unsigned i = 0; i != NumDecls; ++i)
if (Decl *D = Group[i])
Decls.push_back(D);
@@ -4543,24 +5118,42 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
DiagnoseFunctionSpecifiers(D);
- // Check that there are no default arguments inside the type of this
- // parameter (C++ only).
- if (getLangOptions().CPlusPlus)
- CheckExtraCXXDefaultArguments(D);
-
TagDecl *OwnedDecl = 0;
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedDecl);
QualType parmDeclType = TInfo->getType();
- if (getLangOptions().CPlusPlus && OwnedDecl && OwnedDecl->isDefinition()) {
- // C++ [dcl.fct]p6:
- // Types shall not be defined in return or parameter types.
- Diag(OwnedDecl->getLocation(), diag::err_type_defined_in_param_type)
- << Context.getTypeDeclType(OwnedDecl);
+ if (getLangOptions().CPlusPlus) {
+ // Check that there are no default arguments inside the type of this
+ // parameter.
+ CheckExtraCXXDefaultArguments(D);
+
+ if (OwnedDecl && OwnedDecl->isDefinition()) {
+ // C++ [dcl.fct]p6:
+ // Types shall not be defined in return or parameter types.
+ Diag(OwnedDecl->getLocation(), diag::err_type_defined_in_param_type)
+ << Context.getTypeDeclType(OwnedDecl);
+ }
+
+ // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator)
+ << D.getCXXScopeSpec().getRange();
+ D.getCXXScopeSpec().clear();
+ }
+ }
+
+ // Ensure we have a valid name
+ IdentifierInfo *II = 0;
+ if (D.hasName()) {
+ II = D.getIdentifier();
+ if (!II) {
+ Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name)
+ << GetNameForDeclarator(D).getName().getAsString();
+ D.setInvalidType(true);
+ }
}
// Check for redeclaration of parameters, e.g. int foo(int x, int x);
- IdentifierInfo *II = D.getIdentifier();
if (II) {
LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName,
ForRedeclaration);
@@ -4595,13 +5188,6 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
if (D.isInvalidType())
New->setInvalidDecl();
- // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
- if (D.getCXXScopeSpec().isSet()) {
- Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator)
- << D.getCXXScopeSpec().getRange();
- New->setInvalidDecl();
- }
-
// Add the parameter declaration into this scope.
S->AddDecl(New);
if (II)
@@ -4629,10 +5215,6 @@ ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC,
void Sema::DiagnoseUnusedParameters(ParmVarDecl * const *Param,
ParmVarDecl * const *ParamEnd) {
- if (Diags.getDiagnosticLevel(diag::warn_unused_parameter) ==
- Diagnostic::Ignored)
- return;
-
// Don't diagnose unused-parameter errors in template instantiations; we
// will already have done so in the template itself.
if (!ActiveTemplateInstantiations.empty())
@@ -4647,6 +5229,35 @@ void Sema::DiagnoseUnusedParameters(ParmVarDecl * const *Param,
}
}
+void Sema::DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Param,
+ ParmVarDecl * const *ParamEnd,
+ QualType ReturnTy,
+ NamedDecl *D) {
+ if (LangOpts.NumLargeByValueCopy == 0) // No check.
+ return;
+
+ // Warn if the return value is pass-by-value and larger than the specified
+ // threshold.
+ if (ReturnTy->isPODType()) {
+ unsigned Size = Context.getTypeSizeInChars(ReturnTy).getQuantity();
+ if (Size > LangOpts.NumLargeByValueCopy)
+ Diag(D->getLocation(), diag::warn_return_value_size)
+ << D->getDeclName() << Size;
+ }
+
+ // Warn if any parameter is pass-by-value and larger than the specified
+ // threshold.
+ for (; Param != ParamEnd; ++Param) {
+ QualType T = (*Param)->getType();
+ if (!T->isPODType())
+ continue;
+ unsigned Size = Context.getTypeSizeInChars(T).getQuantity();
+ if (Size > LangOpts.NumLargeByValueCopy)
+ Diag((*Param)->getLocation(), diag::warn_parameter_size)
+ << (*Param)->getDeclName() << Size;
+ }
+}
+
ParmVarDecl *Sema::CheckParameter(DeclContext *DC,
TypeSourceInfo *TSInfo, QualType T,
IdentifierInfo *Name,
@@ -4688,9 +5299,7 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC,
void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls) {
- assert(D.getTypeObject(0).Kind == DeclaratorChunk::Function &&
- "Not a function declarator!");
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
// Verify 6.9.1p6: 'every identifier in the identifier list shall be declared'
// for a K&R function.
@@ -4724,14 +5333,7 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope,
Declarator &D) {
assert(getCurFunctionDecl() == 0 && "Function parsing confused");
- assert(D.getTypeObject(0).Kind == DeclaratorChunk::Function &&
- "Not a function declarator!");
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
-
- if (FTI.hasPrototype) {
- // FIXME: Diagnose arguments without names in C.
- }
-
+ assert(D.isFunctionDeclarator() && "Not a function declarator!");
Scope *ParentScope = FnBodyScope->getParent();
Decl *DP = HandleDeclarator(ParentScope, D,
@@ -4806,7 +5408,12 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) {
const FunctionDecl *Definition;
if (FD->hasBody(Definition) &&
!canRedefineFunction(Definition, getLangOptions())) {
- Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
+ if (getLangOptions().GNUMode && Definition->isInlineSpecified() &&
+ Definition->getStorageClass() == SC_Extern)
+ Diag(FD->getLocation(), diag::err_redefinition_extern_inline)
+ << FD->getDeclName() << getLangOptions().CPlusPlus;
+ else
+ Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
Diag(Definition->getLocation(), diag::note_previous_definition);
}
@@ -4839,10 +5446,8 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) {
PushDeclContext(FnBodyScope, FD);
// Check the validity of our function parameters
- CheckParmsForFunctionDef(FD);
-
- bool ShouldCheckShadow =
- Diags.getDiagnosticLevel(diag::warn_decl_shadow) != Diagnostic::Ignored;
+ CheckParmsForFunctionDef(FD->param_begin(), FD->param_end(),
+ /*CheckParameterNames=*/true);
// Introduce our parameters into the function scope
for (unsigned p = 0, NumParams = FD->getNumParams(); p < NumParams; ++p) {
@@ -4851,8 +5456,7 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) {
// If this has an identifier, add it to the scope stack.
if (Param->getIdentifier() && FnBodyScope) {
- if (ShouldCheckShadow)
- CheckShadow(FnBodyScope, Param);
+ CheckShadow(FnBodyScope, Param);
PushOnScopeChains(Param, FnBodyScope);
}
@@ -4945,6 +5549,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (!FD->isInvalidDecl()) {
DiagnoseUnusedParameters(FD->param_begin(), FD->param_end());
+ DiagnoseSizeOfParametersAndReturnValue(FD->param_begin(), FD->param_end(),
+ FD->getResultType(), FD);
// If this is a constructor, we need a vtable.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
@@ -4957,55 +5563,18 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
} else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
assert(MD == getCurMethodDecl() && "Method parsing confused");
MD->setBody(Body);
- MD->setEndLoc(Body->getLocEnd());
- if (!MD->isInvalidDecl())
+ if (Body)
+ MD->setEndLoc(Body->getLocEnd());
+ if (!MD->isInvalidDecl()) {
DiagnoseUnusedParameters(MD->param_begin(), MD->param_end());
+ DiagnoseSizeOfParametersAndReturnValue(MD->param_begin(), MD->param_end(),
+ MD->getResultType(), MD);
+ }
} else {
return 0;
}
// Verify and clean out per-function state.
-
- // Check goto/label use.
- FunctionScopeInfo *CurFn = getCurFunction();
- for (llvm::DenseMap<IdentifierInfo*, LabelStmt*>::iterator
- I = CurFn->LabelMap.begin(), E = CurFn->LabelMap.end(); I != E; ++I) {
- LabelStmt *L = I->second;
-
- // Verify that we have no forward references left. If so, there was a goto
- // or address of a label taken, but no definition of it. Label fwd
- // definitions are indicated with a null substmt.
- if (L->getSubStmt() != 0)
- continue;
-
- // Emit error.
- Diag(L->getIdentLoc(), diag::err_undeclared_label_use) << L->getName();
-
- // At this point, we have gotos that use the bogus label. Stitch it into
- // the function body so that they aren't leaked and that the AST is well
- // formed.
- if (Body == 0) {
- // The whole function wasn't parsed correctly.
- continue;
- }
-
- // Otherwise, the body is valid: we want to stitch the label decl into the
- // function somewhere so that it is properly owned and so that the goto
- // has a valid target. Do this by creating a new compound stmt with the
- // label in it.
-
- // Give the label a sub-statement.
- L->setSubStmt(new (Context) NullStmt(L->getIdentLoc()));
-
- CompoundStmt *Compound = isa<CXXTryStmt>(Body) ?
- cast<CXXTryStmt>(Body)->getTryBlock() :
- cast<CompoundStmt>(Body);
- llvm::SmallVector<Stmt*, 64> Elements(Compound->body_begin(),
- Compound->body_end());
- Elements.push_back(L);
- Compound->setStmts(Context, Elements.data(), Elements.size());
- }
-
if (Body) {
// C++ constructors that have function-try-blocks can't have return
// statements in the handlers of that block. (C++ [except.handle]p14)
@@ -5091,11 +5660,13 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
DeclSpec DS;
unsigned DiagID;
bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID);
- Error = Error; // Silence warning.
+ (void)Error; // Silence warning.
assert(!Error && "Error setting up implicit decl!");
Declarator D(DS, Declarator::BlockContext);
- D.AddTypeInfo(DeclaratorChunk::getFunction(false, false, SourceLocation(), 0,
- 0, 0, false, SourceLocation(),
+ D.AddTypeInfo(DeclaratorChunk::getFunction(ParsedAttributes(),
+ false, false, SourceLocation(), 0,
+ 0, 0, true, SourceLocation(),
+ false, SourceLocation(),
false, 0,0,0, Loc, Loc, D),
SourceLocation());
D.SetIdentifier(&II, Loc);
@@ -5154,8 +5725,6 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
FD->addAttr(::new (Context) ConstAttr(FD->getLocation(), Context));
}
- if (Context.BuiltinInfo.isNoReturn(BuiltinID))
- FD->setType(Context.getNoReturnType(FD->getType()));
if (Context.BuiltinInfo.isNoThrow(BuiltinID))
FD->addAttr(::new (Context) NoThrowAttr(FD->getLocation(), Context));
if (Context.BuiltinInfo.isConst(BuiltinID))
@@ -5210,17 +5779,46 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
D.getIdentifier(),
TInfo);
- if (const TagType *TT = T->getAs<TagType>()) {
- TagDecl *TD = TT->getDecl();
+ // Bail out immediately if we have an invalid declaration.
+ if (D.isInvalidType()) {
+ NewTD->setInvalidDecl();
+ return NewTD;
+ }
+
+ // C++ [dcl.typedef]p8:
+ // If the typedef declaration defines an unnamed class (or
+ // enum), the first typedef-name declared by the declaration
+ // to be that class type (or enum type) is used to denote the
+ // class type (or enum type) for linkage purposes only.
+ // We need to check whether the type was declared in the declaration.
+ switch (D.getDeclSpec().getTypeSpecType()) {
+ case TST_enum:
+ case TST_struct:
+ case TST_union:
+ case TST_class: {
+ TagDecl *tagFromDeclSpec = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
+
+ // Do nothing if the tag is not anonymous or already has an
+ // associated typedef (from an earlier typedef in this decl group).
+ if (tagFromDeclSpec->getIdentifier()) break;
+ if (tagFromDeclSpec->getTypedefForAnonDecl()) break;
+
+ // A well-formed anonymous tag must always be a TUK_Definition.
+ assert(tagFromDeclSpec->isThisDeclarationADefinition());
+
+ // The type must match the tag exactly; no qualifiers allowed.
+ if (!Context.hasSameType(T, Context.getTagDeclType(tagFromDeclSpec)))
+ break;
- // If the TagDecl that the TypedefDecl points to is an anonymous decl
- // keep track of the TypedefDecl.
- if (!TD->getIdentifier() && !TD->getTypedefForAnonDecl())
- TD->setTypedefForAnonDecl(NewTD);
+ // Otherwise, set this is the anon-decl typedef for the tag.
+ tagFromDeclSpec->setTypedefForAnonDecl(NewTD);
+ break;
+ }
+
+ default:
+ break;
}
- if (D.isInvalidType())
- NewTD->setInvalidDecl();
return NewTD;
}
@@ -5273,14 +5871,17 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
/// TagSpec indicates what kind of tag this is. TUK indicates whether this is a
/// reference/declaration/definition of a tag.
Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
- SourceLocation KWLoc, CXXScopeSpec &SS,
- IdentifierInfo *Name, SourceLocation NameLoc,
- AttributeList *Attr, AccessSpecifier AS,
- MultiTemplateParamsArg TemplateParameterLists,
- bool &OwnedDecl, bool &IsDependent) {
+ SourceLocation KWLoc, CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr, AccessSpecifier AS,
+ MultiTemplateParamsArg TemplateParameterLists,
+ bool &OwnedDecl, bool &IsDependent,
+ bool ScopedEnum, bool ScopedEnumUsesClassTag,
+ TypeResult UnderlyingType) {
// If this is not a definition, it must have a name.
assert((Name != 0 || TUK == TUK_Definition) &&
"Nameless record must be a definition!");
+ assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference);
OwnedDecl = false;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
@@ -5289,11 +5890,16 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
bool isExplicitSpecialization = false;
unsigned NumMatchedTemplateParamLists = TemplateParameterLists.size();
bool Invalid = false;
- if (TUK != TUK_Reference) {
+
+ // We only need to do this matching if we have template parameters
+ // or a scope specifier, which also conveniently avoids this work
+ // for non-C++ cases.
+ if (NumMatchedTemplateParamLists ||
+ (SS.isNotEmpty() && TUK != TUK_Reference)) {
if (TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(KWLoc, SS,
- (TemplateParameterList**)TemplateParameterLists.get(),
- TemplateParameterLists.size(),
+ TemplateParameterLists.get(),
+ TemplateParameterLists.size(),
TUK == TUK_Friend,
isExplicitSpecialization,
Invalid)) {
@@ -5322,6 +5928,41 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
}
}
+ // Figure out the underlying type if this a enum declaration. We need to do
+ // this early, because it's needed to detect if this is an incompatible
+ // redeclaration.
+ llvm::PointerUnion<const Type*, TypeSourceInfo*> EnumUnderlying;
+
+ if (Kind == TTK_Enum) {
+ if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum))
+ // No underlying type explicitly specified, or we failed to parse the
+ // type, default to int.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+ else if (UnderlyingType.get()) {
+ // C++0x 7.2p2: The type-specifier-seq of an enum-base shall name an
+ // integral type; any cv-qualification is ignored.
+ TypeSourceInfo *TI = 0;
+ QualType T = GetTypeFromParser(UnderlyingType.get(), &TI);
+ EnumUnderlying = TI;
+
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+
+ if (!T->isDependentType() && !T->isIntegralType(Context)) {
+ Diag(UnderlyingLoc, diag::err_enum_invalid_underlying)
+ << T;
+ // Recover by falling back to int.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+ }
+
+ if (DiagnoseUnexpandedParameterPack(UnderlyingLoc, TI,
+ UPPC_FixedUnderlyingType))
+ EnumUnderlying = Context.IntTy.getTypePtr();
+
+ } else if (getLangOptions().Microsoft)
+ // Microsoft enums are always of int type.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+ }
+
DeclContext *SearchDC = CurContext;
DeclContext *DC = CurContext;
bool isStdBadAlloc = false;
@@ -5374,7 +6015,9 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// and that current instantiation has any dependent base
// classes, we might find something at instantiation time: treat
// this as a dependent elaborated-type-specifier.
- if (Previous.wasNotFoundInCurrentInstantiation()) {
+ // But this only makes any sense for reference-like lookups.
+ if (Previous.wasNotFoundInCurrentInstantiation() &&
+ (TUK == TUK_Reference || TUK == TUK_Friend)) {
IsDependent = true;
return 0;
}
@@ -5406,6 +6049,10 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
while (isa<RecordDecl>(SearchDC) || isa<EnumDecl>(SearchDC))
SearchDC = SearchDC->getParent();
}
+ } else if (S->isFunctionPrototypeScope()) {
+ // If this is an enum declaration in function prototype scope, set its
+ // initial context to the translation unit.
+ SearchDC = Context.getTranslationUnitDecl();
}
if (Previous.isSingleResult() &&
@@ -5467,7 +6114,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// Find the context where we'll be declaring the tag.
// FIXME: We would like to maintain the current DeclContext as the
// lexical context,
- while (SearchDC->isRecord())
+ while (SearchDC->isRecord() || SearchDC->isTransparentContext())
SearchDC = SearchDC->getParent();
// Find the scope where we'll be declaring the tag.
@@ -5554,6 +6201,41 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
}
}
+ if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) {
+ const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
+
+ // All conflicts with previous declarations are recovered by
+ // returning the previous declaration.
+ if (ScopedEnum != PrevEnum->isScoped()) {
+ Diag(KWLoc, diag::err_enum_redeclare_scoped_mismatch)
+ << PrevEnum->isScoped();
+ Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
+ return PrevTagDecl;
+ }
+ else if (EnumUnderlying && PrevEnum->isFixed()) {
+ QualType T;
+ if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
+ T = TI->getType();
+ else
+ T = QualType(EnumUnderlying.get<const Type*>(), 0);
+
+ if (!Context.hasSameUnqualifiedType(T, PrevEnum->getIntegerType())) {
+ Diag(NameLoc.isValid() ? NameLoc : KWLoc,
+ diag::err_enum_redeclare_type_mismatch)
+ << T
+ << PrevEnum->getIntegerType();
+ Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
+ return PrevTagDecl;
+ }
+ }
+ else if (!EnumUnderlying.isNull() != PrevEnum->isFixed()) {
+ Diag(KWLoc, diag::err_enum_redeclare_fixed_mismatch)
+ << PrevEnum->isFixed();
+ Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
+ return PrevTagDecl;
+ }
+ }
+
if (!Invalid) {
// If this is a use, just return the declaration we found.
@@ -5587,7 +6269,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
} else {
// If the type is currently being defined, complain
// about a nested redefinition.
- TagType *Tag = cast<TagType>(Context.getTagDeclType(PrevTagDecl));
+ const TagType *Tag
+ = cast<TagType>(Context.getTagDeclType(PrevTagDecl));
if (Tag->isBeingDefined()) {
Diag(NameLoc, diag::err_nested_redefinition) << Name;
Diag(PrevTagDecl->getLocation(),
@@ -5689,24 +6372,49 @@ CreateNewDecl:
// PrevDecl.
TagDecl *New;
+ bool IsForwardReference = false;
if (Kind == TTK_Enum) {
// FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
// enum X { A, B, C } D; D should chain to X.
New = EnumDecl::Create(Context, SearchDC, Loc, Name, KWLoc,
- cast_or_null<EnumDecl>(PrevDecl));
+ cast_or_null<EnumDecl>(PrevDecl), ScopedEnum,
+ ScopedEnumUsesClassTag, !EnumUnderlying.isNull());
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
- if (PrevDecl && (Def = cast<EnumDecl>(PrevDecl)->getDefinition())) {
+ if (getLangOptions().CPlusPlus0x && cast<EnumDecl>(New)->isFixed()) {
+ // C++0x: 7.2p2: opaque-enum-declaration.
+ // Conflicts are diagnosed above. Do nothing.
+ }
+ else if (PrevDecl && (Def = cast<EnumDecl>(PrevDecl)->getDefinition())) {
Diag(Loc, diag::ext_forward_ref_enum_def)
<< New;
Diag(Def->getLocation(), diag::note_previous_definition);
} else {
- Diag(Loc,
- getLangOptions().CPlusPlus? diag::err_forward_ref_enum
- : diag::ext_forward_ref_enum);
+ unsigned DiagID = diag::ext_forward_ref_enum;
+ if (getLangOptions().Microsoft)
+ DiagID = diag::ext_ms_forward_ref_enum;
+ else if (getLangOptions().CPlusPlus)
+ DiagID = diag::err_forward_ref_enum;
+ Diag(Loc, DiagID);
+
+ // If this is a forward-declared reference to an enumeration, make a
+ // note of it; we won't actually be introducing the declaration into
+ // the declaration context.
+ if (TUK == TUK_Reference)
+ IsForwardReference = true;
}
}
+
+ if (EnumUnderlying) {
+ EnumDecl *ED = cast<EnumDecl>(New);
+ if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
+ ED->setIntegerTypeSourceInfo(TI);
+ else
+ ED->setIntegerType(QualType(EnumUnderlying.get<const Type*>(), 0));
+ ED->setPromotionType(ED->getIntegerType());
+ }
+
} else {
// struct/union/class
@@ -5798,7 +6506,10 @@ CreateNewDecl:
PushOnScopeChains(New, EnclosingScope, /* AddToContext = */ false);
} else if (Name) {
S = getNonFieldDeclScope(S);
- PushOnScopeChains(New, S);
+ PushOnScopeChains(New, S, !IsForwardReference);
+ if (IsForwardReference)
+ SearchDC->makeDeclVisibleInContext(New, /* Recoverable = */ false);
+
} else {
CurContext->addDecl(New);
}
@@ -5823,6 +6534,7 @@ void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) {
}
void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
+ ClassVirtSpecifiers &CVS,
SourceLocation LBraceLoc) {
AdjustDeclIfTemplate(TagD);
CXXRecordDecl *Record = cast<CXXRecordDecl>(TagD);
@@ -5832,6 +6544,11 @@ void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
if (!Record->getIdentifier())
return;
+ if (CVS.isFinalSpecified())
+ Record->addAttr(new (Context) FinalAttr(CVS.getFinalLoc(), Context));
+ if (CVS.isExplicitSpecified())
+ Record->addAttr(new (Context) ExplicitAttr(CVS.getExplicitLoc(), Context));
+
// C++ [class]p2:
// [...] The class-name is also inserted into the scope of the
// class itself; this is known as the injected-class-name. For
@@ -5842,7 +6559,9 @@ void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
CurContext, Record->getLocation(),
Record->getIdentifier(),
Record->getTagKeywordLoc(),
- Record);
+ /*PrevDecl=*/0,
+ /*DelayTypeCreation=*/true);
+ Context.getTypeDeclType(InjectedClassName, Record);
InjectedClassName->setImplicit();
InjectedClassName->setAccess(AS_public);
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate())
@@ -5899,7 +6618,9 @@ bool Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
<< FieldName << FieldTy << BitWidth->getSourceRange();
return Diag(FieldLoc, diag::err_not_integral_type_anon_bitfield)
<< FieldTy << BitWidth->getSourceRange();
- }
+ } else if (DiagnoseUnexpandedParameterPack(const_cast<Expr *>(BitWidth),
+ UPPC_BitFieldWidth))
+ return true;
// If the bit-width is type- or value-dependent, don't try to check
// it now.
@@ -5974,9 +6695,17 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType T = TInfo->getType();
- if (getLangOptions().CPlusPlus)
+ if (getLangOptions().CPlusPlus) {
CheckExtraCXXDefaultArguments(D);
+ if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_DataMemberType)) {
+ D.setInvalidType();
+ T = Context.IntTy;
+ TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+ }
+ }
+
DiagnoseFunctionSpecifiers(D);
if (D.getDeclSpec().isThreadSpecified())
@@ -6128,35 +6857,27 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
}
if (!InvalidDecl && getLangOptions().CPlusPlus) {
- CXXRecordDecl* CXXRecord = cast<CXXRecordDecl>(Record);
+ if (Record->isUnion()) {
+ if (const RecordType *RT = EltTy->getAs<RecordType>()) {
+ CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (RDecl->getDefinition()) {
+ // C++ [class.union]p1: An object of a class with a non-trivial
+ // constructor, a non-trivial copy constructor, a non-trivial
+ // destructor, or a non-trivial copy assignment operator
+ // cannot be a member of a union, nor can an array of such
+ // objects.
+ // TODO: C++0x alters this restriction significantly.
+ if (CheckNontrivialField(NewFD))
+ NewFD->setInvalidDecl();
+ }
+ }
- if (!T->isPODType())
- CXXRecord->setPOD(false);
- if (!ZeroWidth)
- CXXRecord->setEmpty(false);
- if (T->isReferenceType())
- CXXRecord->setHasTrivialConstructor(false);
-
- if (const RecordType *RT = EltTy->getAs<RecordType>()) {
- CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (RDecl->getDefinition()) {
- if (!RDecl->hasTrivialConstructor())
- CXXRecord->setHasTrivialConstructor(false);
- if (!RDecl->hasTrivialCopyConstructor())
- CXXRecord->setHasTrivialCopyConstructor(false);
- if (!RDecl->hasTrivialCopyAssignment())
- CXXRecord->setHasTrivialCopyAssignment(false);
- if (!RDecl->hasTrivialDestructor())
- CXXRecord->setHasTrivialDestructor(false);
-
- // C++ 9.5p1: An object of a class with a non-trivial
- // constructor, a non-trivial copy constructor, a non-trivial
- // destructor, or a non-trivial copy assignment operator
- // cannot be a member of a union, nor can an array of such
- // objects.
- // TODO: C++0x alters this restriction significantly.
- if (Record->isUnion() && CheckNontrivialField(NewFD))
- NewFD->setInvalidDecl();
+ // C++ [class.union]p1: If a union contains a member of reference type,
+ // the program is ill-formed.
+ if (EltTy->isReferenceType()) {
+ Diag(NewFD->getLocation(), diag::err_union_member_of_reference_type)
+ << NewFD->getDeclName() << EltTy;
+ NewFD->setInvalidDecl();
}
}
}
@@ -6171,18 +6892,6 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
Diag(Loc, diag::warn_attribute_weak_on_field);
NewFD->setAccess(AS);
-
- // C++ [dcl.init.aggr]p1:
- // An aggregate is an array or a class (clause 9) with [...] no
- // private or protected non-static data members (clause 11).
- // A POD must be an aggregate.
- if (getLangOptions().CPlusPlus &&
- (AS == AS_private || AS == AS_protected)) {
- CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record);
- CXXRecord->setAggregate(false);
- CXXRecord->setPOD(false);
- }
-
return NewFD;
}
@@ -6532,7 +7241,7 @@ void Sema::ActOnFields(Scope* S,
FieldDecl *FD = cast<FieldDecl>(Fields[i]);
// Get the type for the field.
- Type *FDTy = FD->getType().getTypePtr();
+ const Type *FDTy = FD->getType().getTypePtr();
if (!FD->isAnonymousStructOrUnion()) {
// Remember all fields written by the user.
@@ -6563,10 +7272,22 @@ void Sema::ActOnFields(Scope* S,
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
- } else if (FDTy->isIncompleteArrayType() && i == NumFields - 1 &&
- Record && !Record->isUnion()) {
+ } else if (FDTy->isIncompleteArrayType() && Record &&
+ ((i == NumFields - 1 && !Record->isUnion()) ||
+ (getLangOptions().Microsoft &&
+ (i == NumFields - 1 || Record->isUnion())))) {
// Flexible array member.
- if (NumNamedMembers < 1) {
+ // Microsoft is more permissive regarding flexible array.
+ // It will accept flexible array in union and also
+ // as the sole element of a struct/class.
+ if (getLangOptions().Microsoft) {
+ if (Record->isUnion())
+ Diag(FD->getLocation(), diag::ext_flexible_array_union)
+ << FD->getDeclName();
+ else if (NumFields == 1)
+ Diag(FD->getLocation(), diag::ext_flexible_array_empty_aggregate)
+ << FD->getDeclName() << Record->getTagKind();
+ } else if (NumNamedMembers < 1) {
Diag(FD->getLocation(), diag::err_flexible_array_empty_struct)
<< FD->getDeclName();
FD->setInvalidDecl();
@@ -6581,7 +7302,6 @@ void Sema::ActOnFields(Scope* S,
EnclosingDecl->setInvalidDecl();
continue;
}
-
// Okay, we have a legal flexible array member at the end of the struct.
if (Record)
Record->setHasFlexibleArrayMember(true);
@@ -6641,7 +7361,64 @@ void Sema::ActOnFields(Scope* S,
// Okay, we successfully defined 'Record'.
if (Record) {
- Record->completeDefinition();
+ bool Completed = false;
+ if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
+ if (!CXXRecord->isInvalidDecl()) {
+ // Set access bits correctly on the directly-declared conversions.
+ UnresolvedSetImpl *Convs = CXXRecord->getConversionFunctions();
+ for (UnresolvedSetIterator I = Convs->begin(), E = Convs->end();
+ I != E; ++I)
+ Convs->setAccess(I, (*I)->getAccess());
+
+ if (!CXXRecord->isDependentType()) {
+ // Add any implicitly-declared members to this class.
+ AddImplicitlyDeclaredMembersToClass(CXXRecord);
+
+ // If we have virtual base classes, we may end up finding multiple
+ // final overriders for a given virtual function. Check for this
+ // problem now.
+ if (CXXRecord->getNumVBases()) {
+ CXXFinalOverriderMap FinalOverriders;
+ CXXRecord->getFinalOverriders(FinalOverriders);
+
+ for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(),
+ MEnd = FinalOverriders.end();
+ M != MEnd; ++M) {
+ for (OverridingMethods::iterator SO = M->second.begin(),
+ SOEnd = M->second.end();
+ SO != SOEnd; ++SO) {
+ assert(SO->second.size() > 0 &&
+ "Virtual function without overridding functions?");
+ if (SO->second.size() == 1)
+ continue;
+
+ // C++ [class.virtual]p2:
+ // In a derived class, if a virtual member function of a base
+ // class subobject has more than one final overrider the
+ // program is ill-formed.
+ Diag(Record->getLocation(), diag::err_multiple_final_overriders)
+ << (NamedDecl *)M->first << Record;
+ Diag(M->first->getLocation(),
+ diag::note_overridden_virtual_function);
+ for (OverridingMethods::overriding_iterator
+ OM = SO->second.begin(),
+ OMEnd = SO->second.end();
+ OM != OMEnd; ++OM)
+ Diag(OM->Method->getLocation(), diag::note_final_overrider)
+ << (NamedDecl *)M->first << OM->Method->getParent();
+
+ Record->setInvalidDecl();
+ }
+ }
+ CXXRecord->completeDefinition(&FinalOverriders);
+ Completed = true;
+ }
+ }
+ }
+ }
+
+ if (!Completed)
+ Record->completeDefinition();
} else {
ObjCIvarDecl **ClsFields =
reinterpret_cast<ObjCIvarDecl**>(RecFields.data());
@@ -6695,9 +7472,11 @@ static bool isRepresentableIntegerValue(ASTContext &Context,
assert(T->isIntegralType(Context) && "Integral type required!");
unsigned BitWidth = Context.getIntWidth(T);
- if (Value.isUnsigned() || Value.isNonNegative())
- return Value.getActiveBits() < BitWidth;
-
+ if (Value.isUnsigned() || Value.isNonNegative()) {
+ if (T->isSignedIntegerType())
+ --BitWidth;
+ return Value.getActiveBits() <= BitWidth;
+ }
return Value.getMinSignedBits() <= BitWidth;
}
@@ -6734,6 +7513,10 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
unsigned IntWidth = Context.Target.getIntWidth();
llvm::APSInt EnumVal(IntWidth);
QualType EltTy;
+
+ if (Val && DiagnoseUnexpandedParameterPack(Val, UPPC_EnumeratorValue))
+ Val = 0;
+
if (Val) {
if (Enum->isDependentType() || Val->isTypeDependent())
EltTy = Context.DependentTy;
@@ -6761,12 +7544,30 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
}
}
- // C++0x [dcl.enum]p5:
- // If the underlying type is not fixed, the type of each enumerator
- // is the type of its initializing value:
- // - If an initializer is specified for an enumerator, the
- // initializing value has the same type as the expression.
- EltTy = Val->getType();
+ if (Enum->isFixed()) {
+ EltTy = Enum->getIntegerType();
+
+ // C++0x [dcl.enum]p5:
+ // ... if the initializing value of an enumerator cannot be
+ // represented by the underlying type, the program is ill-formed.
+ if (!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
+ if (getLangOptions().Microsoft) {
+ Diag(IdLoc, diag::ext_enumerator_too_large) << EltTy;
+ ImpCastExprToType(Val, EltTy, CK_IntegralCast);
+ } else
+ Diag(IdLoc, diag::err_enumerator_too_large)
+ << EltTy;
+ } else
+ ImpCastExprToType(Val, EltTy, CK_IntegralCast);
+ }
+ else {
+ // C++0x [dcl.enum]p5:
+ // If the underlying type is not fixed, the type of each enumerator
+ // is the type of its initializing value:
+ // - If an initializer is specified for an enumerator, the
+ // initializing value has the same type as the expression.
+ EltTy = Val->getType();
+ }
}
}
}
@@ -6783,7 +7584,12 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
//
// GCC uses 'int' for its unspecified integral type, as does
// C99 6.7.2.2p3.
- EltTy = Context.IntTy;
+ if (Enum->isFixed()) {
+ EltTy = Enum->getIntegerType();
+ }
+ else {
+ EltTy = Context.IntTy;
+ }
} else {
// Assign the last value + 1.
EnumVal = LastEnumConst->getInitVal();
@@ -6803,13 +7609,20 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
// sufficient to contain the incremented value. If no such type
// exists, the program is ill-formed.
QualType T = getNextLargerIntegralType(Context, EltTy);
- if (T.isNull()) {
+ if (T.isNull() || Enum->isFixed()) {
// There is no integral type larger enough to represent this
// value. Complain, then allow the value to wrap around.
EnumVal = LastEnumConst->getInitVal();
- EnumVal.zext(EnumVal.getBitWidth() * 2);
- Diag(IdLoc, diag::warn_enumerator_too_large)
- << EnumVal.toString(10);
+ EnumVal = EnumVal.zext(EnumVal.getBitWidth() * 2);
+ ++EnumVal;
+ if (Enum->isFixed())
+ // When the underlying type is fixed, this is ill-formed.
+ Diag(IdLoc, diag::err_enumerator_wrapped)
+ << EnumVal.toString(10)
+ << EltTy;
+ else
+ Diag(IdLoc, diag::warn_enumerator_too_large)
+ << EnumVal.toString(10);
} else {
EltTy = T;
}
@@ -6819,7 +7632,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
// value, then increment.
EnumVal = LastEnumConst->getInitVal();
EnumVal.setIsSigned(EltTy->isSignedIntegerType());
- EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
+ EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
++EnumVal;
// If we're not in C++, diagnose the overflow of enumerator values,
@@ -6841,7 +7654,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
if (!EltTy->isDependentType()) {
// Make the enumerator value match the signedness and size of the
// enumerator's type.
- EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
+ EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
EnumVal.setIsSigned(EltTy->isSignedIntegerType());
}
@@ -6850,11 +7663,10 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
}
-Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl,
- Decl *lastEnumConst,
- SourceLocation IdLoc,
- IdentifierInfo *Id,
- SourceLocation EqualLoc, ExprTy *val) {
+Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ AttributeList *Attr,
+ SourceLocation EqualLoc, ExprTy *val) {
EnumDecl *TheEnumDecl = cast<EnumDecl>(theEnumDecl);
EnumConstantDecl *LastEnumConst =
cast_or_null<EnumConstantDecl>(lastEnumConst);
@@ -6890,11 +7702,25 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl,
}
}
- EnumConstantDecl *New = CheckEnumConstant(TheEnumDecl, LastEnumConst,
- IdLoc, Id, Val);
+ // C++ [class.mem]p13:
+ // If T is the name of a class, then each of the following shall have a
+ // name different from T:
+ // - every enumerator of every member of class T that is an enumerated
+ // type
+ if (CXXRecordDecl *Record
+ = dyn_cast<CXXRecordDecl>(
+ TheEnumDecl->getDeclContext()->getRedeclContext()))
+ if (Record->getIdentifier() && Record->getIdentifier() == Id)
+ Diag(IdLoc, diag::err_member_name_of_class) << Id;
+
+ EnumConstantDecl *New =
+ CheckEnumConstant(TheEnumDecl, LastEnumConst, IdLoc, Id, Val);
- // Register this decl in the current scope stack.
if (New) {
+ // Process attributes.
+ if (Attr) ProcessDeclAttributeList(S, New, Attr);
+
+ // Register this decl in the current scope stack.
New->setAccess(TheEnumDecl->getAccess());
PushOnScopeChains(New, S);
}
@@ -6961,7 +7787,6 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
}
// Figure out the type that should be used for this enum.
- // FIXME: Support -fshort-enums.
QualType BestType;
unsigned BestWidth;
@@ -6977,8 +7802,19 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
QualType BestPromotionType;
bool Packed = Enum->getAttr<PackedAttr>() ? true : false;
-
- if (NumNegativeBits) {
+ // -fshort-enums is the equivalent to specifying the packed attribute on all
+ // enum definitions.
+ if (LangOpts.ShortEnums)
+ Packed = true;
+
+ if (Enum->isFixed()) {
+ BestType = BestPromotionType = Enum->getIntegerType();
+ // We don't need to set BestWidth, because BestType is going to be the type
+ // of the enumerators, but we do anyway because otherwise some compilers
+ // warn that it might be used uninitialized.
+ BestWidth = CharWidth;
+ }
+ else if (NumNegativeBits) {
// If there is a negative value, figure out the smallest integer type (of
// int/long/longlong) that fits.
// If it's packed, check also if it fits a char or a short.
@@ -7081,12 +7917,13 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
}
// Adjust the APSInt value.
- InitVal.extOrTrunc(NewWidth);
+ InitVal = InitVal.extOrTrunc(NewWidth);
InitVal.setIsSigned(NewSign);
ECD->setInitVal(InitVal);
// Adjust the Expr initializer and type.
- if (ECD->getInitExpr())
+ if (ECD->getInitExpr() &&
+ !Context.hasSameType(NewTy, ECD->getInitExpr()->getType()))
ECD->setInitExpr(ImplicitCastExpr::Create(Context, NewTy,
CK_IntegralCast,
ECD->getInitExpr(),
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
index 25af73a..cbc940f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -78,6 +78,13 @@ static bool isFunctionOrMethodOrBlock(const Decl *d) {
return isa<BlockDecl>(d);
}
+/// Return true if the given decl has a declarator that should have
+/// been processed by Sema::GetTypeForDeclarator.
+static bool hasDeclarator(const Decl *d) {
+ // In some sense, TypedefDecl really *ought* to be a DeclaratorDecl.
+ return isa<DeclaratorDecl>(d) || isa<BlockDecl>(d) || isa<TypedefDecl>(d);
+}
+
/// hasFunctionProto - Return true if the given decl has a argument
/// information. This decl should have already passed
/// isFunctionOrMethod or isFunctionOrMethodOrBlock.
@@ -127,6 +134,12 @@ static bool isFunctionOrMethodVariadic(const Decl *d) {
}
}
+static bool isInstanceMethod(const Decl *d) {
+ if (const CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(d))
+ return MethodDecl->isInstance();
+ return false;
+}
+
static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
if (!PT)
@@ -191,7 +204,7 @@ static void HandleExtVectorTypeAttr(Scope *scope, Decl *d,
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
return;
}
- sizeExpr = static_cast<Expr *>(Attr.getArg(0));
+ sizeExpr = Attr.getArg(0);
}
// Instantiate/Install the vector type, and let Sema build the type for us.
@@ -242,7 +255,7 @@ static void HandleIBAction(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- S.Diag(Attr.getLoc(), diag::err_attribute_ibaction) << Attr.getName();
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ibaction) << Attr.getName();
}
static void HandleIBOutlet(Decl *d, const AttributeList &Attr, Sema &S) {
@@ -259,7 +272,7 @@ static void HandleIBOutlet(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- S.Diag(Attr.getLoc(), diag::err_attribute_iboutlet) << Attr.getName();
+ S.Diag(Attr.getLoc(), diag::warn_attribute_iboutlet) << Attr.getName();
}
static void HandleIBOutletCollection(Decl *d, const AttributeList &Attr,
@@ -274,7 +287,7 @@ static void HandleIBOutletCollection(Decl *d, const AttributeList &Attr,
// The IBOutletCollection attributes only apply to instance variables of
// Objective-C classes.
if (!(isa<ObjCIvarDecl>(d) || isa<ObjCPropertyDecl>(d))) {
- S.Diag(Attr.getLoc(), diag::err_attribute_iboutlet) << Attr.getName();
+ S.Diag(Attr.getLoc(), diag::warn_attribute_iboutlet) << Attr.getName();
return;
}
if (const ValueDecl *VD = dyn_cast<ValueDecl>(d))
@@ -323,7 +336,10 @@ static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- unsigned NumArgs = getFunctionOrMethodNumArgs(d);
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(d);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(d) + HasImplicitThisParam;
// The nonnull attribute only applies to pointers.
llvm::SmallVector<unsigned, 10> NonNullArgs;
@@ -333,7 +349,7 @@ static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// The argument must be an integer constant expression.
- Expr *Ex = static_cast<Expr *>(*I);
+ Expr *Ex = *I;
llvm::APSInt ArgNum(32);
if (Ex->isTypeDependent() || Ex->isValueDependent() ||
!Ex->isIntegerConstantExpr(ArgNum, S.Context)) {
@@ -351,9 +367,18 @@ static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
--x;
+ if (HasImplicitThisParam) {
+ if (x == 0) {
+ S.Diag(Attr.getLoc(),
+ diag::err_attribute_invalid_implicit_this_argument)
+ << "nonnull" << Ex->getSourceRange();
+ return;
+ }
+ --x;
+ }
// Is the function argument a pointer type?
- QualType T = getFunctionOrMethodArgType(d, x);
+ QualType T = getFunctionOrMethodArgType(d, x).getNonReferenceType();
if (!T->isAnyPointerType() && !T->isBlockPointerType()) {
// FIXME: Should also highlight argument in decl.
S.Diag(Attr.getLoc(), diag::warn_nonnull_pointers_only)
@@ -368,13 +393,30 @@ static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// arguments have a nonnull attribute.
if (NonNullArgs.empty()) {
for (unsigned I = 0, E = getFunctionOrMethodNumArgs(d); I != E; ++I) {
- QualType T = getFunctionOrMethodArgType(d, I);
+ QualType T = getFunctionOrMethodArgType(d, I).getNonReferenceType();
if (T->isAnyPointerType() || T->isBlockPointerType())
NonNullArgs.push_back(I);
+ else if (const RecordType *UT = T->getAsUnionType()) {
+ if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
+ RecordDecl *UD = UT->getDecl();
+ for (RecordDecl::field_iterator it = UD->field_begin(),
+ itend = UD->field_end(); it != itend; ++it) {
+ T = it->getType();
+ if (T->isAnyPointerType() || T->isBlockPointerType()) {
+ NonNullArgs.push_back(I);
+ break;
+ }
+ }
+ }
+ }
}
+ // No pointer arguments?
if (NonNullArgs.empty()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_no_pointers);
+ // Warn the trivial case only if attribute is not coming from a
+ // macro instantiation.
+ if (Attr.getLoc().isFileID())
+ S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_no_pointers);
return;
}
}
@@ -437,7 +479,10 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
return;
}
- unsigned NumArgs = getFunctionOrMethodNumArgs(d);
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(d);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(d) + HasImplicitThisParam;
llvm::StringRef Module = AL.getParameterName()->getName();
@@ -450,7 +495,7 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
for (AttributeList::arg_iterator I = AL.arg_begin(), E = AL.arg_end(); I != E;
++I) {
- Expr *IdxExpr = static_cast<Expr *>(*I);
+ Expr *IdxExpr = *I;
llvm::APSInt ArgNum(32);
if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent()
|| !IdxExpr->isIntegerConstantExpr(ArgNum, S.Context)) {
@@ -467,6 +512,15 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
continue;
}
--x;
+ if (HasImplicitThisParam) {
+ if (x == 0) {
+ S.Diag(AL.getLoc(), diag::err_attribute_invalid_implicit_this_argument)
+ << "ownership" << IdxExpr->getSourceRange();
+ return;
+ }
+ --x;
+ }
+
switch (K) {
case OwnershipAttr::Takes:
case OwnershipAttr::Holds: {
@@ -485,7 +539,7 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
case OwnershipAttr::Returns: {
if (AL.getNumArgs() > 1) {
// Is the function argument an integer type?
- Expr *IdxExpr = static_cast<Expr *>(AL.getArg(0));
+ Expr *IdxExpr = AL.getArg(0);
llvm::APSInt ArgNum(32);
if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent()
|| !IdxExpr->isIntegerConstantExpr(ArgNum, S.Context)) {
@@ -532,11 +586,23 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
start, size));
}
-static bool isStaticVarOrStaticFunciton(Decl *D) {
- if (VarDecl *VD = dyn_cast<VarDecl>(D))
- return VD->getStorageClass() == SC_Static;
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- return FD->getStorageClass() == SC_Static;
+/// Whether this declaration has internal linkage for the purposes of
+/// things that want to complain about things not have internal linkage.
+static bool hasEffectivelyInternalLinkage(NamedDecl *D) {
+ switch (D->getLinkage()) {
+ case NoLinkage:
+ case InternalLinkage:
+ return true;
+
+ // Template instantiations that go from external to unique-external
+ // shouldn't get diagnosed.
+ case UniqueExternalLinkage:
+ return true;
+
+ case ExternalLinkage:
+ return false;
+ }
+ llvm_unreachable("unknown linkage kind!");
return false;
}
@@ -547,6 +613,14 @@ static void HandleWeakRefAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
+ if (!isa<VarDecl>(d) && !isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variables and functions*/;
+ return;
+ }
+
+ NamedDecl *nd = cast<NamedDecl>(d);
+
// gcc rejects
// class c {
// static int a __attribute__((weakref ("v2")));
@@ -560,7 +634,7 @@ static void HandleWeakRefAttr(Decl *d, const AttributeList &Attr, Sema &S) {
const DeclContext *Ctx = d->getDeclContext()->getRedeclContext();
if (!Ctx->isFileContext()) {
S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_global_context) <<
- dyn_cast<NamedDecl>(d)->getNameAsString();
+ nd->getNameAsString();
return;
}
@@ -582,9 +656,8 @@ static void HandleWeakRefAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// This looks like a bug in gcc. We reject that for now. We should revisit
// it if this behaviour is actually used.
- if (!isStaticVarOrStaticFunciton(d)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_static) <<
- dyn_cast<NamedDecl>(d)->getNameAsString();
+ if (!hasEffectivelyInternalLinkage(nd)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_static);
return;
}
@@ -593,7 +666,7 @@ static void HandleWeakRefAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// Should we? How to check that weakref is before or after alias?
if (Attr.getNumArgs() == 1) {
- Expr *Arg = static_cast<Expr*>(Attr.getArg(0));
+ Expr *Arg = Attr.getArg(0);
Arg = Arg->IgnoreParenCasts();
StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
@@ -604,7 +677,8 @@ static void HandleWeakRefAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
// GCC will accept anything as the argument of weakref. Should we
// check for an existing decl?
- d->addAttr(::new (S.Context) AliasAttr(Attr.getLoc(), S.Context, Str->getString()));
+ d->addAttr(::new (S.Context) AliasAttr(Attr.getLoc(), S.Context,
+ Str->getString()));
}
d->addAttr(::new (S.Context) WeakRefAttr(Attr.getLoc(), S.Context));
@@ -617,7 +691,7 @@ static void HandleAliasAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- Expr *Arg = static_cast<Expr*>(Attr.getArg(0));
+ Expr *Arg = Attr.getArg(0);
Arg = Arg->IgnoreParenCasts();
StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
@@ -627,14 +701,37 @@ static void HandleAliasAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
+ if (S.Context.Target.getTriple().getOS() == llvm::Triple::Darwin) {
+ S.Diag(Attr.getLoc(), diag::err_alias_not_supported_on_darwin);
+ return;
+ }
+
// FIXME: check if target symbol exists in current file
- d->addAttr(::new (S.Context) AliasAttr(Attr.getLoc(), S.Context, Str->getString()));
+ d->addAttr(::new (S.Context) AliasAttr(Attr.getLoc(), S.Context,
+ Str->getString()));
+}
+
+static void HandleNakedAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) NakedAttr(Attr.getLoc(), S.Context));
}
static void HandleAlwaysInlineAttr(Decl *d, const AttributeList &Attr,
Sema &S) {
- // check the attribute arguments.
+ // Check the attribute arguments.
if (Attr.getNumArgs() != 0) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
@@ -642,7 +739,7 @@ static void HandleAlwaysInlineAttr(Decl *d, const AttributeList &Attr,
if (!isa<FunctionDecl>(d)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 0 /*function*/;
+ << Attr.getName() << 0 /*function*/;
return;
}
@@ -650,7 +747,7 @@ static void HandleAlwaysInlineAttr(Decl *d, const AttributeList &Attr,
}
static void HandleMallocAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- // check the attribute arguments.
+ // Check the attribute arguments.
if (Attr.getNumArgs() != 0) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
@@ -667,10 +764,56 @@ static void HandleMallocAttr(Decl *d, const AttributeList &Attr, Sema &S) {
S.Diag(Attr.getLoc(), diag::warn_attribute_malloc_pointer_only);
}
-static void HandleNoReturnAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- /* Diagnostics (if any) was emitted by Sema::ProcessFnAttr(). */
+static void HandleMayAliasAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) MayAliasAttr(Attr.getLoc(), S.Context));
+}
+
+static void HandleNoCommonAttr(Decl *d, const AttributeList &Attr, Sema &S) {
assert(Attr.isInvalid() == false);
- d->addAttr(::new (S.Context) NoReturnAttr(Attr.getLoc(), S.Context));
+ if (isa<VarDecl>(d))
+ d->addAttr(::new (S.Context) NoCommonAttr(Attr.getLoc(), S.Context));
+ else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 12 /* variable */;
+}
+
+static void HandleCommonAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ assert(Attr.isInvalid() == false);
+ if (isa<VarDecl>(d))
+ d->addAttr(::new (S.Context) CommonAttr(Attr.getLoc(), S.Context));
+ else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 12 /* variable */;
+}
+
+static void HandleNoReturnAttr(Decl *d, const AttributeList &attr, Sema &S) {
+ if (hasDeclarator(d)) return;
+
+ if (S.CheckNoReturnAttr(attr)) return;
+
+ if (!isa<ObjCMethodDecl>(d)) {
+ S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) NoReturnAttr(attr.getLoc(), S.Context));
+}
+
+bool Sema::CheckNoReturnAttr(const AttributeList &attr) {
+ if (attr.getNumArgs() != 0) {
+ Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ attr.setInvalid();
+ return true;
+ }
+
+ return false;
}
static void HandleAnalyzerNoReturnAttr(Decl *d, const AttributeList &Attr,
@@ -705,10 +848,11 @@ static void HandleVecReturnAttr(Decl *d, const AttributeList &Attr,
/*
Returning a Vector Class in Registers
- According to the PPU ABI specifications, a class with a single member of vector type is returned in
- memory when used as the return value of a function. This results in inefficient code when implementing
- vector classes. To return the value in a single vector register, add the vecreturn attribute to the class
- definition. This attribute is also applicable to struct types.
+ According to the PPU ABI specifications, a class with a single member of
+ vector type is returned in memory when used as the return value of a function.
+ This results in inefficient code when implementing vector classes. To return
+ the value in a single vector register, add the vecreturn attribute to the
+ class definition. This attribute is also applicable to struct types.
Example:
@@ -724,7 +868,7 @@ static void HandleVecReturnAttr(Decl *d, const AttributeList &Attr,
return result; // This will be returned in a register
}
*/
- if (!isa<CXXRecordDecl>(d)) {
+ if (!isa<RecordDecl>(d)) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
<< Attr.getName() << 9 /*class*/;
return;
@@ -735,6 +879,28 @@ static void HandleVecReturnAttr(Decl *d, const AttributeList &Attr,
return;
}
+ RecordDecl *record = cast<RecordDecl>(d);
+ int count = 0;
+
+ if (!isa<CXXRecordDecl>(record)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
+ return;
+ }
+
+ if (!cast<CXXRecordDecl>(record)->isPOD()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_pod_record);
+ return;
+ }
+
+ for (RecordDecl::field_iterator iter = record->field_begin();
+ iter != record->field_end(); iter++) {
+ if ((count == 1) || !iter->getType()->isVectorType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
+ return;
+ }
+ count++;
+ }
+
d->addAttr(::new (S.Context) VecReturnAttr(Attr.getLoc(), S.Context));
}
@@ -755,9 +921,9 @@ static void HandleUnusedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
if (!isa<VarDecl>(d) && !isa<ObjCIvarDecl>(d) && !isFunctionOrMethod(d) &&
- !isa<TypeDecl>(d)) {
+ !isa<TypeDecl>(d) && !isa<LabelDecl>(d)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 2 /*variable and function*/;
+ << Attr.getName() << 14 /*variable, function, labels*/;
return;
}
@@ -795,7 +961,7 @@ static void HandleConstructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
int priority = 65535; // FIXME: Do not hardcode such constants.
if (Attr.getNumArgs() > 0) {
- Expr *E = static_cast<Expr *>(Attr.getArg(0));
+ Expr *E = Attr.getArg(0);
llvm::APSInt Idx(32);
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(Idx, S.Context)) {
@@ -812,7 +978,8 @@ static void HandleConstructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) ConstructorAttr(Attr.getLoc(), S.Context, priority));
+ d->addAttr(::new (S.Context) ConstructorAttr(Attr.getLoc(), S.Context,
+ priority));
}
static void HandleDestructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
@@ -825,7 +992,7 @@ static void HandleDestructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
int priority = 65535; // FIXME: Do not hardcode such constants.
if (Attr.getNumArgs() > 0) {
- Expr *E = static_cast<Expr *>(Attr.getArg(0));
+ Expr *E = Attr.getArg(0);
llvm::APSInt Idx(32);
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(Idx, S.Context)) {
@@ -842,27 +1009,59 @@ static void HandleDestructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) DestructorAttr(Attr.getLoc(), S.Context, priority));
+ d->addAttr(::new (S.Context) DestructorAttr(Attr.getLoc(), S.Context,
+ priority));
}
static void HandleDeprecatedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
- return;
+ int noArgs = Attr.getNumArgs();
+ if (noArgs > 1) {
+ S.Diag(Attr.getLoc(),
+ diag::err_attribute_wrong_number_arguments) << "0 or 1";
+ return;
+ }
+ // Handle the case where deprecated attribute has a text message.
+ StringLiteral *SE;
+ if (noArgs == 1) {
+ Expr *ArgExpr = Attr.getArg(0);
+ SE = dyn_cast<StringLiteral>(ArgExpr);
+ if (!SE) {
+ S.Diag(ArgExpr->getLocStart(),
+ diag::err_attribute_not_string) << "deprecated";
+ return;
+ }
}
+ else
+ SE = StringLiteral::CreateEmpty(S.Context, 1);
- d->addAttr(::new (S.Context) DeprecatedAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) DeprecatedAttr(Attr.getLoc(), S.Context,
+ SE->getString()));
}
static void HandleUnavailableAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ int noArgs = Attr.getNumArgs();
+ if (noArgs > 1) {
+ S.Diag(Attr.getLoc(),
+ diag::err_attribute_wrong_number_arguments) << "0 or 1";
return;
}
-
- d->addAttr(::new (S.Context) UnavailableAttr(Attr.getLoc(), S.Context));
+ // Handle the case where unavailable attribute has a text message.
+ StringLiteral *SE;
+ if (noArgs == 1) {
+ Expr *ArgExpr = Attr.getArg(0);
+ SE = dyn_cast<StringLiteral>(ArgExpr);
+ if (!SE) {
+ S.Diag(ArgExpr->getLocStart(),
+ diag::err_attribute_not_string) << "unavailable";
+ return;
+ }
+ }
+ else
+ SE = StringLiteral::CreateEmpty(S.Context, 1);
+ d->addAttr(::new (S.Context) UnavailableAttr(Attr.getLoc(), S.Context,
+ SE->getString()));
}
static void HandleVisibilityAttr(Decl *d, const AttributeList &Attr, Sema &S) {
@@ -872,7 +1071,7 @@ static void HandleVisibilityAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- Expr *Arg = static_cast<Expr*>(Attr.getArg(0));
+ Expr *Arg = Attr.getArg(0);
Arg = Arg->IgnoreParenCasts();
StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
@@ -982,7 +1181,7 @@ static void HandleSentinelAttr(Decl *d, const AttributeList &Attr, Sema &S) {
int sentinel = 0;
if (Attr.getNumArgs() > 0) {
- Expr *E = static_cast<Expr *>(Attr.getArg(0));
+ Expr *E = Attr.getArg(0);
llvm::APSInt Idx(32);
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(Idx, S.Context)) {
@@ -1001,7 +1200,7 @@ static void HandleSentinelAttr(Decl *d, const AttributeList &Attr, Sema &S) {
int nullPos = 0;
if (Attr.getNumArgs() > 1) {
- Expr *E = static_cast<Expr *>(Attr.getArg(1));
+ Expr *E = Attr.getArg(1);
llvm::APSInt Idx(32);
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(Idx, S.Context)) {
@@ -1046,7 +1245,7 @@ static void HandleSentinelAttr(Decl *d, const AttributeList &Attr, Sema &S) {
QualType Ty = V->getType();
if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
const FunctionType *FT = Ty->isFunctionPointerType() ? getFunctionType(d)
- : Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>();
+ : Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>();
if (!cast<FunctionProtoType>(FT)->isVariadic()) {
int m = Ty->isFunctionPointerType() ? 0 : 1;
S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << m;
@@ -1062,7 +1261,8 @@ static void HandleSentinelAttr(Decl *d, const AttributeList &Attr, Sema &S) {
<< Attr.getName() << 6 /*function, method or block */;
return;
}
- d->addAttr(::new (S.Context) SentinelAttr(Attr.getLoc(), S.Context, sentinel, nullPos));
+ d->addAttr(::new (S.Context) SentinelAttr(Attr.getLoc(), S.Context, sentinel,
+ nullPos));
}
static void HandleWarnUnusedResult(Decl *D, const AttributeList &Attr, Sema &S) {
@@ -1093,28 +1293,28 @@ static void HandleWarnUnusedResult(Decl *D, const AttributeList &Attr, Sema &S)
D->addAttr(::new (S.Context) WarnUnusedResultAttr(Attr.getLoc(), S.Context));
}
-static void HandleWeakAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+static void HandleWeakAttr(Decl *d, const AttributeList &attr, Sema &S) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (attr.getNumArgs() != 0) {
+ S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
}
- /* weak only applies to non-static declarations */
- if (isStaticVarOrStaticFunciton(D)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_weak_static) <<
- dyn_cast<NamedDecl>(D)->getNameAsString();
+ if (!isa<VarDecl>(d) && !isa<FunctionDecl>(d)) {
+ S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << attr.getName() << 2 /*variables and functions*/;
return;
}
- // TODO: could also be applied to methods?
- if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 2 /*variable and function*/;
+ NamedDecl *nd = cast<NamedDecl>(d);
+
+ // 'weak' only applies to declarations with external linkage.
+ if (hasEffectivelyInternalLinkage(nd)) {
+ S.Diag(attr.getLoc(), diag::err_attribute_weak_static);
return;
}
- D->addAttr(::new (S.Context) WeakAttr(Attr.getLoc(), S.Context));
+ nd->addAttr(::new (S.Context) WeakAttr(attr.getLoc(), S.Context));
}
static void HandleWeakImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
@@ -1163,7 +1363,7 @@ static void HandleReqdWorkGroupSize(Decl *D, const AttributeList &Attr,
unsigned WGSize[3];
for (unsigned i = 0; i < 3; ++i) {
- Expr *E = static_cast<Expr *>(Attr.getArg(i));
+ Expr *E = Attr.getArg(i);
llvm::APSInt ArgNum(32);
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(ArgNum, S.Context)) {
@@ -1187,7 +1387,7 @@ static void HandleSectionAttr(Decl *D, const AttributeList &Attr, Sema &S) {
// Make sure that there is a string literal as the sections's single
// argument.
- Expr *ArgExpr = static_cast<Expr *>(Attr.getArg(0));
+ Expr *ArgExpr = Attr.getArg(0);
StringLiteral *SE = dyn_cast<StringLiteral>(ArgExpr);
if (!SE) {
S.Diag(ArgExpr->getLocStart(), diag::err_attribute_not_string) << "section";
@@ -1208,7 +1408,8 @@ static void HandleSectionAttr(Decl *D, const AttributeList &Attr, Sema &S) {
return;
}
- D->addAttr(::new (S.Context) SectionAttr(Attr.getLoc(), S.Context, SE->getString()));
+ D->addAttr(::new (S.Context) SectionAttr(Attr.getLoc(), S.Context,
+ SE->getString()));
}
@@ -1262,27 +1463,27 @@ static void HandleCleanupAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// Look up the function
// FIXME: Lookup probably isn't looking in the right place
- // FIXME: The lookup source location should be in the attribute, not the
- // start of the attribute.
NamedDecl *CleanupDecl
- = S.LookupSingleName(S.TUScope, Attr.getParameterName(), Attr.getLoc(),
- Sema::LookupOrdinaryName);
+ = S.LookupSingleName(S.TUScope, Attr.getParameterName(),
+ Attr.getParameterLoc(), Sema::LookupOrdinaryName);
if (!CleanupDecl) {
- S.Diag(Attr.getLoc(), diag::err_attribute_cleanup_arg_not_found) <<
+ S.Diag(Attr.getParameterLoc(), diag::err_attribute_cleanup_arg_not_found) <<
Attr.getParameterName();
return;
}
FunctionDecl *FD = dyn_cast<FunctionDecl>(CleanupDecl);
if (!FD) {
- S.Diag(Attr.getLoc(), diag::err_attribute_cleanup_arg_not_function) <<
- Attr.getParameterName();
+ S.Diag(Attr.getParameterLoc(),
+ diag::err_attribute_cleanup_arg_not_function)
+ << Attr.getParameterName();
return;
}
if (FD->getNumParams() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_cleanup_func_must_take_one_arg) <<
- Attr.getParameterName();
+ S.Diag(Attr.getParameterLoc(),
+ diag::err_attribute_cleanup_func_must_take_one_arg)
+ << Attr.getParameterName();
return;
}
@@ -1290,14 +1491,16 @@ static void HandleCleanupAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// If this ever proves to be a problem it should be easy to fix.
QualType Ty = S.Context.getPointerType(VD->getType());
QualType ParamTy = FD->getParamDecl(0)->getType();
- if (S.CheckAssignmentConstraints(ParamTy, Ty) != Sema::Compatible) {
- S.Diag(Attr.getLoc(),
+ if (S.CheckAssignmentConstraints(FD->getParamDecl(0)->getLocation(),
+ ParamTy, Ty) != Sema::Compatible) {
+ S.Diag(Attr.getParameterLoc(),
diag::err_attribute_cleanup_func_arg_incompatible_type) <<
Attr.getParameterName() << ParamTy << Ty;
return;
}
d->addAttr(::new (S.Context) CleanupAttr(Attr.getLoc(), S.Context, FD));
+ S.MarkDeclarationReferenced(Attr.getParameterLoc(), FD);
}
/// Handle __attribute__((format_arg((idx)))) attribute based on
@@ -1312,12 +1515,15 @@ static void HandleFormatArgAttr(Decl *d, const AttributeList &Attr, Sema &S) {
<< Attr.getName() << 0 /*function*/;
return;
}
- // FIXME: in C++ the implicit 'this' function parameter also counts. this is
- // needed in order to be compatible with GCC the index must start with 1.
- unsigned NumArgs = getFunctionOrMethodNumArgs(d);
+
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(d);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(d) + HasImplicitThisParam;
unsigned FirstIdx = 1;
+
// checks for the 2nd argument
- Expr *IdxExpr = static_cast<Expr *>(Attr.getArg(0));
+ Expr *IdxExpr = Attr.getArg(0);
llvm::APSInt Idx(32);
if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
!IdxExpr->isIntegerConstantExpr(Idx, S.Context)) {
@@ -1334,6 +1540,15 @@ static void HandleFormatArgAttr(Decl *d, const AttributeList &Attr, Sema &S) {
unsigned ArgIdx = Idx.getZExtValue() - 1;
+ if (HasImplicitThisParam) {
+ if (ArgIdx == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_implicit_this_argument)
+ << "format_arg" << IdxExpr->getSourceRange();
+ return;
+ }
+ ArgIdx--;
+ }
+
// make sure the format string is really a string
QualType Ty = getFunctionOrMethodArgType(d, ArgIdx);
@@ -1360,7 +1575,8 @@ static void HandleFormatArgAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) FormatArgAttr(Attr.getLoc(), S.Context, Idx.getZExtValue()));
+ d->addAttr(::new (S.Context) FormatArgAttr(Attr.getLoc(), S.Context,
+ Idx.getZExtValue()));
}
enum FormatAttrKind {
@@ -1387,7 +1603,8 @@ static FormatAttrKind getFormatAttrKind(llvm::StringRef Format) {
if (Format == "scanf" || Format == "printf" || Format == "printf0" ||
Format == "strfmon" || Format == "cmn_err" || Format == "strftime" ||
Format == "NSString" || Format == "CFString" || Format == "vcmn_err" ||
- Format == "zcmn_err")
+ Format == "zcmn_err" ||
+ Format == "kprintf") // OpenBSD.
return SupportedFormat;
if (Format == "gcc_diag" || Format == "gcc_cdiag" ||
@@ -1425,7 +1642,7 @@ static void HandleInitPriorityAttr(Decl *d, const AttributeList &Attr,
Attr.setInvalid();
return;
}
- Expr *priorityExpr = static_cast<Expr *>(Attr.getArg(0));
+ Expr *priorityExpr = Attr.getArg(0);
llvm::APSInt priority(32);
if (priorityExpr->isTypeDependent() || priorityExpr->isValueDependent() ||
@@ -1442,7 +1659,8 @@ static void HandleInitPriorityAttr(Decl *d, const AttributeList &Attr,
Attr.setInvalid();
return;
}
- d->addAttr(::new (S.Context) InitPriorityAttr(Attr.getLoc(), S.Context, prioritynum));
+ d->addAttr(::new (S.Context) InitPriorityAttr(Attr.getLoc(), S.Context,
+ prioritynum));
}
/// Handle __attribute__((format(type,idx,firstarg))) attributes based on
@@ -1466,7 +1684,10 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- unsigned NumArgs = getFunctionOrMethodNumArgs(d);
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(d);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(d) + HasImplicitThisParam;
unsigned FirstIdx = 1;
llvm::StringRef Format = Attr.getParameterName()->getName();
@@ -1488,7 +1709,7 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
// checks for the 2nd argument
- Expr *IdxExpr = static_cast<Expr *>(Attr.getArg(0));
+ Expr *IdxExpr = Attr.getArg(0);
llvm::APSInt Idx(32);
if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
!IdxExpr->isIntegerConstantExpr(Idx, S.Context)) {
@@ -1497,16 +1718,6 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- // FIXME: We should handle the implicit 'this' parameter in a more generic
- // way that can be used for other arguments.
- bool HasImplicitThisParam = false;
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(d)) {
- if (MD->isInstance()) {
- HasImplicitThisParam = true;
- NumArgs++;
- }
- }
-
if (Idx.getZExtValue() < FirstIdx || Idx.getZExtValue() > NumArgs) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
<< "format" << 2 << IdxExpr->getSourceRange();
@@ -1518,8 +1729,9 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
if (HasImplicitThisParam) {
if (ArgIdx == 0) {
- S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
- << "a string type" << IdxExpr->getSourceRange();
+ S.Diag(Attr.getLoc(),
+ diag::err_format_attribute_implicit_this_format_string)
+ << IdxExpr->getSourceRange();
return;
}
ArgIdx--;
@@ -1552,7 +1764,7 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
// check the 3rd argument
- Expr *FirstArgExpr = static_cast<Expr *>(Attr.getArg(1));
+ Expr *FirstArgExpr = Attr.getArg(1);
llvm::APSInt FirstArg(32);
if (FirstArgExpr->isTypeDependent() || FirstArgExpr->isValueDependent() ||
!FirstArgExpr->isIntegerConstantExpr(FirstArg, S.Context)) {
@@ -1665,7 +1877,7 @@ static void HandleAnnotateAttr(Decl *d, const AttributeList &Attr, Sema &S) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
return;
}
- Expr *ArgExpr = static_cast<Expr *>(Attr.getArg(0));
+ Expr *ArgExpr = Attr.getArg(0);
StringLiteral *SE = dyn_cast<StringLiteral>(ArgExpr);
// Make sure that there is a string literal as the annotation's single
@@ -1674,7 +1886,8 @@ static void HandleAnnotateAttr(Decl *d, const AttributeList &Attr, Sema &S) {
S.Diag(ArgExpr->getLocStart(), diag::err_attribute_not_string) <<"annotate";
return;
}
- d->addAttr(::new (S.Context) AnnotateAttr(Attr.getLoc(), S.Context, SE->getString()));
+ d->addAttr(::new (S.Context) AnnotateAttr(Attr.getLoc(), S.Context,
+ SE->getString()));
}
static void HandleAlignedAttr(Decl *D, const AttributeList &Attr, Sema &S) {
@@ -1693,7 +1906,7 @@ static void HandleAlignedAttr(Decl *D, const AttributeList &Attr, Sema &S) {
return;
}
- S.AddAlignedAttr(Attr.getLoc(), D, static_cast<Expr *>(Attr.getArg(0)));
+ S.AddAlignedAttr(Attr.getLoc(), D, Attr.getArg(0));
}
void Sema::AddAlignedAttr(SourceLocation AttrLoc, Decl *D, Expr *E) {
@@ -1943,7 +2156,123 @@ static void HandleNoInstrumentFunctionAttr(Decl *d, const AttributeList &Attr,
return;
}
- d->addAttr(::new (S.Context) NoInstrumentFunctionAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) NoInstrumentFunctionAttr(Attr.getLoc(),
+ S.Context));
+}
+
+static void HandleConstantAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<VarDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 12 /*variable*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) CUDAConstantAttr(Attr.getLoc(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "constant";
+ }
+}
+
+static void HandleDeviceAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(d) && !isa<VarDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variable and function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) CUDADeviceAttr(Attr.getLoc(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "device";
+ }
+}
+
+static void HandleGlobalAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ FunctionDecl *FD = cast<FunctionDecl>(d);
+ if (!FD->getResultType()->isVoidType()) {
+ TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc().IgnoreParens();
+ if (FunctionTypeLoc* FTL = dyn_cast<FunctionTypeLoc>(&TL)) {
+ S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
+ << FD->getType()
+ << FixItHint::CreateReplacement(FTL->getResultLoc().getSourceRange(),
+ "void");
+ } else {
+ S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
+ << FD->getType();
+ }
+ return;
+ }
+
+ d->addAttr(::new (S.Context) CUDAGlobalAttr(Attr.getLoc(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "global";
+ }
+}
+
+static void HandleHostAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) CUDAHostAttr(Attr.getLoc(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "host";
+ }
+}
+
+static void HandleSharedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<VarDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 12 /*variable*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) CUDASharedAttr(Attr.getLoc(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "shared";
+ }
}
static void HandleGNUInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) {
@@ -1968,26 +2297,36 @@ static void HandleGNUInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) {
d->addAttr(::new (S.Context) GNUInlineAttr(Attr.getLoc(), S.Context));
}
-static void HandleCallConvAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- // Diagnostic is emitted elsewhere: here we store the (valid) Attr
+static void HandleCallConvAttr(Decl *d, const AttributeList &attr, Sema &S) {
+ if (hasDeclarator(d)) return;
+
+ // Diagnostic is emitted elsewhere: here we store the (valid) attr
// in the Decl node for syntactic reasoning, e.g., pretty-printing.
- assert(Attr.isInvalid() == false);
+ CallingConv CC;
+ if (S.CheckCallingConvAttr(attr, CC))
+ return;
- switch (Attr.getKind()) {
+ if (!isa<ObjCMethodDecl>(d)) {
+ S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ switch (attr.getKind()) {
case AttributeList::AT_fastcall:
- d->addAttr(::new (S.Context) FastCallAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) FastCallAttr(attr.getLoc(), S.Context));
return;
case AttributeList::AT_stdcall:
- d->addAttr(::new (S.Context) StdCallAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) StdCallAttr(attr.getLoc(), S.Context));
return;
case AttributeList::AT_thiscall:
- d->addAttr(::new (S.Context) ThisCallAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) ThisCallAttr(attr.getLoc(), S.Context));
return;
case AttributeList::AT_cdecl:
- d->addAttr(::new (S.Context) CDeclAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) CDeclAttr(attr.getLoc(), S.Context));
return;
case AttributeList::AT_pascal:
- d->addAttr(::new (S.Context) PascalAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) PascalAttr(attr.getLoc(), S.Context));
return;
default:
llvm_unreachable("unexpected attribute kind");
@@ -1995,214 +2334,336 @@ static void HandleCallConvAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
}
-static void HandleRegparmAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- // check the attribute arguments.
- if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
- return;
+static void HandleOpenCLKernelAttr(Decl *d, const AttributeList &Attr, Sema &S){
+ assert(Attr.isInvalid() == false);
+ d->addAttr(::new (S.Context) OpenCLKernelAttr(Attr.getLoc(), S.Context));
+}
+
+bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) {
+ if (attr.isInvalid())
+ return true;
+
+ if (attr.getNumArgs() != 0) {
+ Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ attr.setInvalid();
+ return true;
}
- if (!isFunctionOrMethod(d)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 0 /*function*/;
+ // TODO: diagnose uses of these conventions on the wrong target.
+ switch (attr.getKind()) {
+ case AttributeList::AT_cdecl: CC = CC_C; break;
+ case AttributeList::AT_fastcall: CC = CC_X86FastCall; break;
+ case AttributeList::AT_stdcall: CC = CC_X86StdCall; break;
+ case AttributeList::AT_thiscall: CC = CC_X86ThisCall; break;
+ case AttributeList::AT_pascal: CC = CC_X86Pascal; break;
+ default: llvm_unreachable("unexpected attribute kind"); return true;
+ }
+
+ return false;
+}
+
+static void HandleRegparmAttr(Decl *d, const AttributeList &attr, Sema &S) {
+ if (hasDeclarator(d)) return;
+
+ unsigned numParams;
+ if (S.CheckRegparmAttr(attr, numParams))
+ return;
+
+ if (!isa<ObjCMethodDecl>(d)) {
+ S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << attr.getName() << 0 /*function*/;
return;
}
- Expr *NumParamsExpr = static_cast<Expr *>(Attr.getArg(0));
+ d->addAttr(::new (S.Context) RegparmAttr(attr.getLoc(), S.Context, numParams));
+}
+
+/// Checks a regparm attribute, returning true if it is ill-formed and
+/// otherwise setting numParams to the appropriate value.
+bool Sema::CheckRegparmAttr(const AttributeList &attr, unsigned &numParams) {
+ if (attr.isInvalid())
+ return true;
+
+ if (attr.getNumArgs() != 1) {
+ Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ attr.setInvalid();
+ return true;
+ }
+
+ Expr *NumParamsExpr = attr.getArg(0);
llvm::APSInt NumParams(32);
if (NumParamsExpr->isTypeDependent() || NumParamsExpr->isValueDependent() ||
- !NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ !NumParamsExpr->isIntegerConstantExpr(NumParams, Context)) {
+ Diag(attr.getLoc(), diag::err_attribute_argument_not_int)
<< "regparm" << NumParamsExpr->getSourceRange();
- return;
+ attr.setInvalid();
+ return true;
}
- if (S.Context.Target.getRegParmMax() == 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_regparm_wrong_platform)
+ if (Context.Target.getRegParmMax() == 0) {
+ Diag(attr.getLoc(), diag::err_attribute_regparm_wrong_platform)
<< NumParamsExpr->getSourceRange();
- return;
+ attr.setInvalid();
+ return true;
}
- if (NumParams.getLimitedValue(255) > S.Context.Target.getRegParmMax()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_regparm_invalid_number)
- << S.Context.Target.getRegParmMax() << NumParamsExpr->getSourceRange();
- return;
+ numParams = NumParams.getZExtValue();
+ if (numParams > Context.Target.getRegParmMax()) {
+ Diag(attr.getLoc(), diag::err_attribute_regparm_invalid_number)
+ << Context.Target.getRegParmMax() << NumParamsExpr->getSourceRange();
+ attr.setInvalid();
+ return true;
}
- d->addAttr(::new (S.Context) RegparmAttr(Attr.getLoc(), S.Context,
- NumParams.getZExtValue()));
+ return false;
}
-static void HandleFinalAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- // check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
- return;
- }
+static void HandleLaunchBoundsAttr(Decl *d, const AttributeList &Attr, Sema &S){
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1 && Attr.getNumArgs() != 2) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << "1 or 2";
+ return;
+ }
- if (!isa<CXXRecordDecl>(d)
- && (!isa<CXXMethodDecl>(d) || !cast<CXXMethodDecl>(d)->isVirtual())) {
- S.Diag(Attr.getLoc(),
- Attr.isCXX0XAttribute() ? diag::err_attribute_wrong_decl_type
- : diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 7 /*virtual method or class*/;
- return;
- }
-
- // FIXME: Conform to C++0x redeclaration rules.
-
- if (d->getAttr<FinalAttr>()) {
- S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << "final";
- return;
- }
+ if (!isFunctionOrMethod(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ Expr *MaxThreadsExpr = Attr.getArg(0);
+ llvm::APSInt MaxThreads(32);
+ if (MaxThreadsExpr->isTypeDependent() ||
+ MaxThreadsExpr->isValueDependent() ||
+ !MaxThreadsExpr->isIntegerConstantExpr(MaxThreads, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "launch_bounds" << 1 << MaxThreadsExpr->getSourceRange();
+ return;
+ }
- d->addAttr(::new (S.Context) FinalAttr(Attr.getLoc(), S.Context));
+ llvm::APSInt MinBlocks(32);
+ if (Attr.getNumArgs() > 1) {
+ Expr *MinBlocksExpr = Attr.getArg(1);
+ if (MinBlocksExpr->isTypeDependent() ||
+ MinBlocksExpr->isValueDependent() ||
+ !MinBlocksExpr->isIntegerConstantExpr(MinBlocks, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "launch_bounds" << 2 << MinBlocksExpr->getSourceRange();
+ return;
+ }
+ }
+
+ d->addAttr(::new (S.Context) CUDALaunchBoundsAttr(Attr.getLoc(), S.Context,
+ MaxThreads.getZExtValue(),
+ MinBlocks.getZExtValue()));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "launch_bounds";
+ }
}
//===----------------------------------------------------------------------===//
-// C++0x member checking attributes
+// Checker-specific attribute handlers.
//===----------------------------------------------------------------------===//
-static void HandleBaseCheckAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
- return;
- }
-
- if (!isa<CXXRecordDecl>(d)) {
- S.Diag(Attr.getLoc(),
- Attr.isCXX0XAttribute() ? diag::err_attribute_wrong_decl_type
- : diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 9 /*class*/;
- return;
- }
-
- if (d->getAttr<BaseCheckAttr>()) {
- S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << "base_check";
- return;
- }
-
- d->addAttr(::new (S.Context) BaseCheckAttr(Attr.getLoc(), S.Context));
+static bool isValidSubjectOfNSAttribute(Sema &S, QualType type) {
+ return type->isObjCObjectPointerType() || S.Context.isObjCNSObjectType(type);
+}
+static bool isValidSubjectOfCFAttribute(Sema &S, QualType type) {
+ return type->isPointerType() || isValidSubjectOfNSAttribute(S, type);
}
-static void HandleHidingAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+static void HandleNSConsumedAttr(Decl *d, const AttributeList &attr, Sema &S) {
+ ParmVarDecl *param = dyn_cast<ParmVarDecl>(d);
+ if (!param) {
+ S.Diag(d->getLocStart(), diag::warn_attribute_wrong_decl_type)
+ << SourceRange(attr.getLoc()) << attr.getName() << 4 /*parameter*/;
return;
}
- if (!isa<RecordDecl>(d->getDeclContext())) {
- // FIXME: It's not the type that's the problem
- S.Diag(Attr.getLoc(),
- Attr.isCXX0XAttribute() ? diag::err_attribute_wrong_decl_type
- : diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 11 /*member*/;
- return;
+ bool typeOK, cf;
+ if (attr.getKind() == AttributeList::AT_ns_consumed) {
+ typeOK = isValidSubjectOfNSAttribute(S, param->getType());
+ cf = false;
+ } else {
+ typeOK = isValidSubjectOfCFAttribute(S, param->getType());
+ cf = true;
}
- // FIXME: Conform to C++0x redeclaration rules.
-
- if (d->getAttr<HidingAttr>()) {
- S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << "hiding";
+ if (!typeOK) {
+ S.Diag(d->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
+ << SourceRange(attr.getLoc()) << attr.getName() << cf;
return;
}
- d->addAttr(::new (S.Context) HidingAttr(Attr.getLoc(), S.Context));
+ if (cf)
+ param->addAttr(::new (S.Context) CFConsumedAttr(attr.getLoc(), S.Context));
+ else
+ param->addAttr(::new (S.Context) NSConsumedAttr(attr.getLoc(), S.Context));
}
-static void HandleOverrideAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
- return;
- }
-
- if (!isa<CXXMethodDecl>(d) || !cast<CXXMethodDecl>(d)->isVirtual()) {
- // FIXME: It's not the type that's the problem
- S.Diag(Attr.getLoc(),
- Attr.isCXX0XAttribute() ? diag::err_attribute_wrong_decl_type
- : diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 10 /*virtual method*/;
- return;
- }
-
- // FIXME: Conform to C++0x redeclaration rules.
-
- if (d->getAttr<OverrideAttr>()) {
- S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << "override";
+static void HandleNSConsumesSelfAttr(Decl *d, const AttributeList &attr,
+ Sema &S) {
+ if (!isa<ObjCMethodDecl>(d)) {
+ S.Diag(d->getLocStart(), diag::warn_attribute_wrong_decl_type)
+ << SourceRange(attr.getLoc()) << attr.getName() << 13 /*method*/;
return;
}
- d->addAttr(::new (S.Context) OverrideAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) NSConsumesSelfAttr(attr.getLoc(), S.Context));
}
-//===----------------------------------------------------------------------===//
-// Checker-specific attribute handlers.
-//===----------------------------------------------------------------------===//
-
-static void HandleNSReturnsRetainedAttr(Decl *d, const AttributeList &Attr,
+static void HandleNSReturnsRetainedAttr(Decl *d, const AttributeList &attr,
Sema &S) {
- QualType RetTy;
+ QualType returnType;
if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(d))
- RetTy = MD->getResultType();
+ returnType = MD->getResultType();
else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(d))
- RetTy = FD->getResultType();
+ returnType = FD->getResultType();
else {
- SourceLocation L = Attr.getLoc();
S.Diag(d->getLocStart(), diag::warn_attribute_wrong_decl_type)
- << SourceRange(L, L) << Attr.getName() << 3 /* function or method */;
+ << SourceRange(attr.getLoc()) << attr.getName()
+ << 3 /* function or method */;
return;
}
- if (!(S.Context.isObjCNSObjectType(RetTy) || RetTy->getAs<PointerType>()
- || RetTy->getAs<ObjCObjectPointerType>())) {
- SourceLocation L = Attr.getLoc();
+ bool typeOK;
+ bool cf;
+ switch (attr.getKind()) {
+ default: llvm_unreachable("invalid ownership attribute"); return;
+ case AttributeList::AT_ns_returns_autoreleased:
+ case AttributeList::AT_ns_returns_retained:
+ case AttributeList::AT_ns_returns_not_retained:
+ typeOK = isValidSubjectOfNSAttribute(S, returnType);
+ cf = false;
+ break;
+
+ case AttributeList::AT_cf_returns_retained:
+ case AttributeList::AT_cf_returns_not_retained:
+ typeOK = isValidSubjectOfCFAttribute(S, returnType);
+ cf = true;
+ break;
+ }
+
+ if (!typeOK) {
S.Diag(d->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
- << SourceRange(L, L) << Attr.getName();
+ << SourceRange(attr.getLoc())
+ << attr.getName() << isa<ObjCMethodDecl>(d) << cf;
return;
}
- switch (Attr.getKind()) {
+ switch (attr.getKind()) {
default:
assert(0 && "invalid ownership attribute");
return;
+ case AttributeList::AT_ns_returns_autoreleased:
+ d->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(attr.getLoc(),
+ S.Context));
+ return;
case AttributeList::AT_cf_returns_not_retained:
- d->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(attr.getLoc(),
+ S.Context));
return;
case AttributeList::AT_ns_returns_not_retained:
- d->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(attr.getLoc(),
+ S.Context));
return;
case AttributeList::AT_cf_returns_retained:
- d->addAttr(::new (S.Context) CFReturnsRetainedAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) CFReturnsRetainedAttr(attr.getLoc(),
+ S.Context));
return;
case AttributeList::AT_ns_returns_retained:
- d->addAttr(::new (S.Context) NSReturnsRetainedAttr(Attr.getLoc(), S.Context));
+ d->addAttr(::new (S.Context) NSReturnsRetainedAttr(attr.getLoc(),
+ S.Context));
return;
};
}
static bool isKnownDeclSpecAttr(const AttributeList &Attr) {
return Attr.getKind() == AttributeList::AT_dllimport ||
- Attr.getKind() == AttributeList::AT_dllexport;
+ Attr.getKind() == AttributeList::AT_dllexport ||
+ Attr.getKind() == AttributeList::AT_uuid;
+}
+
+//===----------------------------------------------------------------------===//
+// Microsoft specific attribute handlers.
+//===----------------------------------------------------------------------===//
+
+static void HandleUuidAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ if (S.LangOpts.Microsoft || S.LangOpts.Borland) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ Expr *Arg = Attr.getArg(0);
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+ if (Str == 0 || Str->isWide()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "uuid" << 1;
+ return;
+ }
+
+ llvm::StringRef StrRef = Str->getString();
+
+ bool IsCurly = StrRef.size() > 1 && StrRef.front() == '{' &&
+ StrRef.back() == '}';
+
+ // Validate GUID length.
+ if (IsCurly && StrRef.size() != 38) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+ if (!IsCurly && StrRef.size() != 36) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+
+ // GUID format is "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" or
+ // "{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}"
+ llvm::StringRef::iterator I = StrRef.begin();
+ if (IsCurly) // Skip the optional '{'
+ ++I;
+
+ for (int i = 0; i < 36; ++i) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ if (*I != '-') {
+ S.Diag(Attr.getLoc(), diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+ } else if (!isxdigit(*I)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+ I++;
+ }
+
+ d->addAttr(::new (S.Context) UuidAttr(Attr.getLoc(), S.Context,
+ Str->getString()));
+ } else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "uuid";
}
//===----------------------------------------------------------------------===//
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
-/// ProcessDeclAttribute - Apply the specific attribute to the specified decl if
-/// the attribute applies to decls. If the attribute is a type attribute, just
-/// silently ignore it if a GNU attribute. FIXME: Applying a C++0x attribute to
-/// the wrong thing is illegal (C++0x [dcl.attr.grammar]/4).
-static void ProcessDeclAttribute(Scope *scope, Decl *D,
- const AttributeList &Attr, Sema &S) {
- if (Attr.isInvalid())
- return;
+static void ProcessNonInheritableDeclAttr(Scope *scope, Decl *D,
+ const AttributeList &Attr, Sema &S) {
+ switch (Attr.getKind()) {
+ case AttributeList::AT_device: HandleDeviceAttr (D, Attr, S); break;
+ case AttributeList::AT_host: HandleHostAttr (D, Attr, S); break;
+ case AttributeList::AT_overloadable:HandleOverloadableAttr(D, Attr, S); break;
+ default:
+ break;
+ }
+}
- if (Attr.isDeclspecAttribute() && !isKnownDeclSpecAttr(Attr))
- // FIXME: Try to deal with other __declspec attributes!
- return;
+static void ProcessInheritableDeclAttr(Scope *scope, Decl *D,
+ const AttributeList &Attr, Sema &S) {
switch (Attr.getKind()) {
case AttributeList::AT_IBAction: HandleIBAction(D, Attr, S); break;
case AttributeList::AT_IBOutlet: HandleIBOutlet(D, Attr, S); break;
@@ -2211,9 +2672,17 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
case AttributeList::AT_address_space:
case AttributeList::AT_objc_gc:
case AttributeList::AT_vector_size:
+ case AttributeList::AT_neon_vector_type:
+ case AttributeList::AT_neon_polyvector_type:
// Ignore these, these are type attributes, handled by
// ProcessTypeAttributes.
break;
+ case AttributeList::AT_device:
+ case AttributeList::AT_host:
+ case AttributeList::AT_overloadable:
+ // Ignore, this is a non-inheritable attribute, handled
+ // by ProcessNonInheritableDeclAttr.
+ break;
case AttributeList::AT_alias: HandleAliasAttr (D, Attr, S); break;
case AttributeList::AT_aligned: HandleAlignedAttr (D, Attr, S); break;
case AttributeList::AT_always_inline:
@@ -2221,33 +2690,45 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
case AttributeList::AT_analyzer_noreturn:
HandleAnalyzerNoReturnAttr (D, Attr, S); break;
case AttributeList::AT_annotate: HandleAnnotateAttr (D, Attr, S); break;
- case AttributeList::AT_base_check: HandleBaseCheckAttr (D, Attr, S); break;
case AttributeList::AT_carries_dependency:
HandleDependencyAttr (D, Attr, S); break;
+ case AttributeList::AT_common: HandleCommonAttr (D, Attr, S); break;
+ case AttributeList::AT_constant: HandleConstantAttr (D, Attr, S); break;
case AttributeList::AT_constructor: HandleConstructorAttr (D, Attr, S); break;
case AttributeList::AT_deprecated: HandleDeprecatedAttr (D, Attr, S); break;
case AttributeList::AT_destructor: HandleDestructorAttr (D, Attr, S); break;
case AttributeList::AT_ext_vector_type:
HandleExtVectorTypeAttr(scope, D, Attr, S);
break;
- case AttributeList::AT_final: HandleFinalAttr (D, Attr, S); break;
case AttributeList::AT_format: HandleFormatAttr (D, Attr, S); break;
case AttributeList::AT_format_arg: HandleFormatArgAttr (D, Attr, S); break;
+ case AttributeList::AT_global: HandleGlobalAttr (D, Attr, S); break;
case AttributeList::AT_gnu_inline: HandleGNUInlineAttr (D, Attr, S); break;
- case AttributeList::AT_hiding: HandleHidingAttr (D, Attr, S); break;
+ case AttributeList::AT_launch_bounds:
+ HandleLaunchBoundsAttr(D, Attr, S);
+ break;
case AttributeList::AT_mode: HandleModeAttr (D, Attr, S); break;
case AttributeList::AT_malloc: HandleMallocAttr (D, Attr, S); break;
+ case AttributeList::AT_may_alias: HandleMayAliasAttr (D, Attr, S); break;
+ case AttributeList::AT_nocommon: HandleNoCommonAttr (D, Attr, S); break;
case AttributeList::AT_nonnull: HandleNonNullAttr (D, Attr, S); break;
case AttributeList::AT_ownership_returns:
case AttributeList::AT_ownership_takes:
case AttributeList::AT_ownership_holds:
HandleOwnershipAttr (D, Attr, S); break;
+ case AttributeList::AT_naked: HandleNakedAttr (D, Attr, S); break;
case AttributeList::AT_noreturn: HandleNoReturnAttr (D, Attr, S); break;
case AttributeList::AT_nothrow: HandleNothrowAttr (D, Attr, S); break;
- case AttributeList::AT_override: HandleOverrideAttr (D, Attr, S); break;
+ case AttributeList::AT_shared: HandleSharedAttr (D, Attr, S); break;
case AttributeList::AT_vecreturn: HandleVecReturnAttr (D, Attr, S); break;
// Checker-specific.
+ case AttributeList::AT_cf_consumed:
+ case AttributeList::AT_ns_consumed: HandleNSConsumedAttr (D, Attr, S); break;
+ case AttributeList::AT_ns_consumes_self:
+ HandleNSConsumesSelfAttr(D, Attr, S); break;
+
+ case AttributeList::AT_ns_returns_autoreleased:
case AttributeList::AT_ns_returns_not_retained:
case AttributeList::AT_cf_returns_not_retained:
case AttributeList::AT_ns_returns_retained:
@@ -2277,7 +2758,6 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
case AttributeList::AT_objc_exception:
HandleObjCExceptionAttr(D, Attr, S);
break;
- case AttributeList::AT_overloadable:HandleOverloadableAttr(D, Attr, S); break;
case AttributeList::AT_nsobject: HandleObjCNSObject (D, Attr, S); break;
case AttributeList::AT_blocks: HandleBlocksAttr (D, Attr, S); break;
case AttributeList::AT_sentinel: HandleSentinelAttr (D, Attr, S); break;
@@ -2300,6 +2780,12 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
case AttributeList::AT_pascal:
HandleCallConvAttr(D, Attr, S);
break;
+ case AttributeList::AT_opencl_kernel_function:
+ HandleOpenCLKernelAttr(D, Attr, S);
+ break;
+ case AttributeList::AT_uuid:
+ HandleUuidAttr(D, Attr, S);
+ break;
default:
// Ask target about the attribute.
const TargetAttributesSema &TargetAttrs = S.getTargetAttributesSema();
@@ -2310,17 +2796,40 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
}
}
+/// ProcessDeclAttribute - Apply the specific attribute to the specified decl if
+/// the attribute applies to decls. If the attribute is a type attribute, just
+/// silently ignore it if a GNU attribute. FIXME: Applying a C++0x attribute to
+/// the wrong thing is illegal (C++0x [dcl.attr.grammar]/4).
+static void ProcessDeclAttribute(Scope *scope, Decl *D,
+ const AttributeList &Attr, Sema &S,
+ bool NonInheritable, bool Inheritable) {
+ if (Attr.isInvalid())
+ return;
+
+ if (Attr.isDeclspecAttribute() && !isKnownDeclSpecAttr(Attr))
+ // FIXME: Try to deal with other __declspec attributes!
+ return;
+
+ if (NonInheritable)
+ ProcessNonInheritableDeclAttr(scope, D, Attr, S);
+
+ if (Inheritable)
+ ProcessInheritableDeclAttr(scope, D, Attr, S);
+}
+
/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
/// attribute list to the specified decl, ignoring any type attributes.
-void Sema::ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AttrList) {
+void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
+ const AttributeList *AttrList,
+ bool NonInheritable, bool Inheritable) {
for (const AttributeList* l = AttrList; l; l = l->getNext()) {
- ProcessDeclAttribute(S, D, *l, *this);
+ ProcessDeclAttribute(S, D, *l, *this, NonInheritable, Inheritable);
}
// GCC accepts
// static int a9 __attribute__((weakref));
// but that looks really pointless. We reject it.
- if (D->hasAttr<WeakRefAttr>() && !D->hasAttr<AliasAttr>()) {
+ if (Inheritable && D->hasAttr<WeakRefAttr>() && !D->hasAttr<AliasAttr>()) {
Diag(AttrList->getLoc(), diag::err_attribute_weakref_without_alias) <<
dyn_cast<NamedDecl>(D)->getNameAsString();
return;
@@ -2380,22 +2889,27 @@ void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W) {
/// ProcessDeclAttributes - Given a declarator (PD) with attributes indicated in
/// it, apply them to D. This is a bit tricky because PD can have attributes
/// specified in many different places, and we need to find and apply them all.
-void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
- // Handle #pragma weak
- if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
- if (ND->hasLinkage()) {
- WeakInfo W = WeakUndeclaredIdentifiers.lookup(ND->getIdentifier());
- if (W != WeakInfo()) {
- // Identifier referenced by #pragma weak before it was declared
- DeclApplyPragmaWeak(S, ND, W);
- WeakUndeclaredIdentifiers[ND->getIdentifier()] = W;
+void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD,
+ bool NonInheritable, bool Inheritable) {
+ // It's valid to "forward-declare" #pragma weak, in which case we
+ // have to do this.
+ if (Inheritable && !WeakUndeclaredIdentifiers.empty()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ if (IdentifierInfo *Id = ND->getIdentifier()) {
+ llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator I
+ = WeakUndeclaredIdentifiers.find(Id);
+ if (I != WeakUndeclaredIdentifiers.end() && ND->hasLinkage()) {
+ WeakInfo W = I->second;
+ DeclApplyPragmaWeak(S, ND, W);
+ WeakUndeclaredIdentifiers[Id] = W;
+ }
}
}
}
// Apply decl attributes from the DeclSpec if present.
- if (const AttributeList *Attrs = PD.getDeclSpec().getAttributes())
- ProcessDeclAttributeList(S, D, Attrs);
+ if (const AttributeList *Attrs = PD.getDeclSpec().getAttributes().getList())
+ ProcessDeclAttributeList(S, D, Attrs, NonInheritable, Inheritable);
// Walk the declarator structure, applying decl attributes that were in a type
// position to the decl itself. This handles cases like:
@@ -2403,66 +2917,84 @@ void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
// when X is a decl attribute.
for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i)
if (const AttributeList *Attrs = PD.getTypeObject(i).getAttrs())
- ProcessDeclAttributeList(S, D, Attrs);
+ ProcessDeclAttributeList(S, D, Attrs, NonInheritable, Inheritable);
// Finally, apply any attributes on the decl itself.
if (const AttributeList *Attrs = PD.getAttributes())
- ProcessDeclAttributeList(S, D, Attrs);
+ ProcessDeclAttributeList(S, D, Attrs, NonInheritable, Inheritable);
}
-/// PushParsingDeclaration - Enter a new "scope" of deprecation
-/// warnings.
-///
-/// The state token we use is the start index of this scope
-/// on the warning stack.
-Sema::ParsingDeclStackState Sema::PushParsingDeclaration() {
- ParsingDeclDepth++;
- return (ParsingDeclStackState) DelayedDiagnostics.size();
+// This duplicates a vector push_back but hides the need to know the
+// size of the type.
+void Sema::DelayedDiagnostics::add(const DelayedDiagnostic &diag) {
+ assert(StackSize <= StackCapacity);
+
+ // Grow the stack if necessary.
+ if (StackSize == StackCapacity) {
+ unsigned newCapacity = 2 * StackCapacity + 2;
+ char *newBuffer = new char[newCapacity * sizeof(DelayedDiagnostic)];
+ const char *oldBuffer = (const char*) Stack;
+
+ if (StackCapacity)
+ memcpy(newBuffer, oldBuffer, StackCapacity * sizeof(DelayedDiagnostic));
+
+ delete[] oldBuffer;
+ Stack = reinterpret_cast<sema::DelayedDiagnostic*>(newBuffer);
+ StackCapacity = newCapacity;
+ }
+
+ assert(StackSize < StackCapacity);
+ new (&Stack[StackSize++]) DelayedDiagnostic(diag);
}
-void Sema::PopParsingDeclaration(ParsingDeclStackState S, Decl *D) {
- assert(ParsingDeclDepth > 0 && "empty ParsingDeclaration stack");
- ParsingDeclDepth--;
+void Sema::DelayedDiagnostics::popParsingDecl(Sema &S, ParsingDeclState state,
+ Decl *decl) {
+ DelayedDiagnostics &DD = S.DelayedDiagnostics;
- if (DelayedDiagnostics.empty())
- return;
+ // Check the invariants.
+ assert(DD.StackSize >= state.SavedStackSize);
+ assert(state.SavedStackSize >= DD.ActiveStackBase);
+ assert(DD.ParsingDepth > 0);
- unsigned SavedIndex = (unsigned) S;
- assert(SavedIndex <= DelayedDiagnostics.size() &&
- "saved index is out of bounds");
+ // Drop the parsing depth.
+ DD.ParsingDepth--;
- unsigned E = DelayedDiagnostics.size();
+ // If there are no active diagnostics, we're done.
+ if (DD.StackSize == DD.ActiveStackBase)
+ return;
// We only want to actually emit delayed diagnostics when we
// successfully parsed a decl.
- if (D) {
- // We really do want to start with 0 here. We get one push for a
+ if (decl) {
+ // We emit all the active diagnostics, not just those starting
+ // from the saved state. The idea is this: we get one push for a
// decl spec and another for each declarator; in a decl group like:
// deprecated_typedef foo, *bar, baz();
// only the declarator pops will be passed decls. This is correct;
// we really do need to consider delayed diagnostics from the decl spec
// for each of the different declarations.
- for (unsigned I = 0; I != E; ++I) {
- if (DelayedDiagnostics[I].Triggered)
+ for (unsigned i = DD.ActiveStackBase, e = DD.StackSize; i != e; ++i) {
+ DelayedDiagnostic &diag = DD.Stack[i];
+ if (diag.Triggered)
continue;
- switch (DelayedDiagnostics[I].Kind) {
+ switch (diag.Kind) {
case DelayedDiagnostic::Deprecation:
- HandleDelayedDeprecationCheck(DelayedDiagnostics[I], D);
+ S.HandleDelayedDeprecationCheck(diag, decl);
break;
case DelayedDiagnostic::Access:
- HandleDelayedAccessCheck(DelayedDiagnostics[I], D);
+ S.HandleDelayedAccessCheck(diag, decl);
break;
}
}
}
// Destroy all the delayed diagnostics we're about to pop off.
- for (unsigned I = SavedIndex; I != E; ++I)
- DelayedDiagnostics[I].destroy();
+ for (unsigned i = state.SavedStackSize, e = DD.StackSize; i != e; ++i)
+ DD.Stack[i].destroy();
- DelayedDiagnostics.set_size(SavedIndex);
+ DD.StackSize = state.SavedStackSize;
}
static bool isDeclDeprecated(Decl *D) {
@@ -2479,20 +3011,34 @@ void Sema::HandleDelayedDeprecationCheck(DelayedDiagnostic &DD,
return;
DD.Triggered = true;
- Diag(DD.Loc, diag::warn_deprecated)
- << DD.DeprecationData.Decl->getDeclName();
+ if (!DD.getDeprecationMessage().empty())
+ Diag(DD.Loc, diag::warn_deprecated_message)
+ << DD.getDeprecationDecl()->getDeclName()
+ << DD.getDeprecationMessage();
+ else
+ Diag(DD.Loc, diag::warn_deprecated)
+ << DD.getDeprecationDecl()->getDeclName();
}
-void Sema::EmitDeprecationWarning(NamedDecl *D, SourceLocation Loc) {
+void Sema::EmitDeprecationWarning(NamedDecl *D, llvm::StringRef Message,
+ SourceLocation Loc,
+ bool UnknownObjCClass) {
// Delay if we're currently parsing a declaration.
- if (ParsingDeclDepth) {
- DelayedDiagnostics.push_back(DelayedDiagnostic::makeDeprecation(Loc, D));
+ if (DelayedDiagnostics.shouldDelayDiagnostics()) {
+ DelayedDiagnostics.add(DelayedDiagnostic::makeDeprecation(Loc, D, Message));
return;
}
// Otherwise, don't warn if our current context is deprecated.
if (isDeclDeprecated(cast<Decl>(CurContext)))
return;
-
- Diag(Loc, diag::warn_deprecated) << D->getDeclName();
+ if (!Message.empty())
+ Diag(Loc, diag::warn_deprecated_message) << D->getDeclName()
+ << Message;
+ else {
+ if (!UnknownObjCClass)
+ Diag(Loc, diag::warn_deprecated) << D->getDeclName();
+ else
+ Diag(Loc, diag::warn_deprecated_fwdclass_message) << D->getDeclName();
+ }
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
index 63acdb5..e8abab8 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
@@ -63,8 +63,7 @@ namespace {
/// VisitExpr - Visit all of the children of this expression.
bool CheckDefaultArgumentVisitor::VisitExpr(Expr *Node) {
bool IsInvalid = false;
- for (Stmt::child_iterator I = Node->child_begin(),
- E = Node->child_end(); I != E; ++I)
+ for (Stmt::child_range I = Node->children(); I; ++I)
IsInvalid |= Visit(*I);
return IsInvalid;
}
@@ -90,7 +89,7 @@ namespace {
// C++ [dcl.fct.default]p7
// Local variables shall not be used in default argument
// expressions.
- if (VDecl->isBlockVarDecl())
+ if (VDecl->isLocalVarDecl())
return S->Diag(DRE->getSourceRange().getBegin(),
diag::err_param_default_argument_references_local)
<< VDecl->getDeclName() << DefaultArg->getSourceRange();
@@ -125,21 +124,35 @@ Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
// the same semantic constraints as the initializer expression in
// a declaration of a variable of the parameter type, using the
// copy-initialization semantics (8.5).
- InitializedEntity Entity = InitializedEntity::InitializeParameter(Param);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ Param);
InitializationKind Kind = InitializationKind::CreateCopy(Param->getLocation(),
EqualLoc);
InitializationSequence InitSeq(*this, Entity, Kind, &Arg, 1);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, &Arg, 1));
+ MultiExprArg(*this, &Arg, 1));
if (Result.isInvalid())
return true;
Arg = Result.takeAs<Expr>();
- Arg = MaybeCreateCXXExprWithTemporaries(Arg);
+ CheckImplicitConversions(Arg, EqualLoc);
+ Arg = MaybeCreateExprWithCleanups(Arg);
// Okay: add the default argument to the parameter
Param->setDefaultArg(Arg);
+ // We have already instantiated this parameter; provide each of the
+ // instantiations with the uninstantiated default argument.
+ UnparsedDefaultArgInstantiationsMap::iterator InstPos
+ = UnparsedDefaultArgInstantiations.find(Param);
+ if (InstPos != UnparsedDefaultArgInstantiations.end()) {
+ for (unsigned I = 0, N = InstPos->second.size(); I != N; ++I)
+ InstPos->second[I]->setUninstantiatedDefaultArg(Arg);
+
+ // We're done tracking this parameter's instantiations.
+ UnparsedDefaultArgInstantiations.erase(InstPos);
+ }
+
return false;
}
@@ -163,6 +176,12 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
return;
}
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(DefaultArg, UPPC_DefaultArgument)) {
+ Param->setInvalidDecl();
+ return;
+ }
+
// Check that the default argument is well-formed
CheckDefaultArgumentVisitor DefaultArgChecker(DefaultArg, this);
if (DefaultArgChecker.Visit(DefaultArg)) {
@@ -297,7 +316,7 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
} else if (OldParam->hasDefaultArg()) {
// Merge the old default argument into the new parameter.
// It's important to use getInit() here; getDefaultArg()
- // strips off any top-level CXXExprWithTemporaries.
+ // strips off any top-level ExprWithCleanups.
NewParam->setHasInheritedDefaultArg();
if (OldParam->hasUninstantiatedDefaultArg())
NewParam->setUninstantiatedDefaultArg(
@@ -444,7 +463,8 @@ CXXBaseSpecifier *
Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
- TypeSourceInfo *TInfo) {
+ TypeSourceInfo *TInfo,
+ SourceLocation EllipsisLoc) {
QualType BaseType = TInfo->getType();
// C++ [class.union]p1:
@@ -455,10 +475,17 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
return 0;
}
+ if (EllipsisLoc.isValid() &&
+ !TInfo->getType()->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << TInfo->getTypeLoc().getSourceRange();
+ EllipsisLoc = SourceLocation();
+ }
+
if (BaseType->isDependentType())
return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
Class->getTagKind() == TTK_Class,
- Access, TInfo);
+ Access, TInfo, EllipsisLoc);
SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
@@ -493,90 +520,25 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
CXXRecordDecl * CXXBaseDecl = cast<CXXRecordDecl>(BaseDecl);
assert(CXXBaseDecl && "Base type is not a C++ type");
- // C++0x CWG Issue #817 indicates that [[final]] classes shouldn't be bases.
+ // C++ [class.derived]p2:
+ // If a class is marked with the class-virt-specifier final and it appears
+ // as a base-type-specifier in a base-clause (10 class.derived), the program
+ // is ill-formed.
if (CXXBaseDecl->hasAttr<FinalAttr>()) {
- Diag(BaseLoc, diag::err_final_base) << BaseType.getAsString();
+ Diag(BaseLoc, diag::err_class_marked_final_used_as_base)
+ << CXXBaseDecl->getDeclName();
Diag(CXXBaseDecl->getLocation(), diag::note_previous_decl)
- << BaseType;
+ << CXXBaseDecl->getDeclName();
return 0;
}
- SetClassDeclAttributesFromBase(Class, CXXBaseDecl, Virtual);
-
if (BaseDecl->isInvalidDecl())
Class->setInvalidDecl();
// Create the base specifier.
return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
Class->getTagKind() == TTK_Class,
- Access, TInfo);
-}
-
-void Sema::SetClassDeclAttributesFromBase(CXXRecordDecl *Class,
- const CXXRecordDecl *BaseClass,
- bool BaseIsVirtual) {
- // A class with a non-empty base class is not empty.
- // FIXME: Standard ref?
- if (!BaseClass->isEmpty())
- Class->setEmpty(false);
-
- // C++ [class.virtual]p1:
- // A class that [...] inherits a virtual function is called a polymorphic
- // class.
- if (BaseClass->isPolymorphic())
- Class->setPolymorphic(true);
-
- // C++ [dcl.init.aggr]p1:
- // An aggregate is [...] a class with [...] no base classes [...].
- Class->setAggregate(false);
-
- // C++ [class]p4:
- // A POD-struct is an aggregate class...
- Class->setPOD(false);
-
- if (BaseIsVirtual) {
- // C++ [class.ctor]p5:
- // A constructor is trivial if its class has no virtual base classes.
- Class->setHasTrivialConstructor(false);
-
- // C++ [class.copy]p6:
- // A copy constructor is trivial if its class has no virtual base classes.
- Class->setHasTrivialCopyConstructor(false);
-
- // C++ [class.copy]p11:
- // A copy assignment operator is trivial if its class has no virtual
- // base classes.
- Class->setHasTrivialCopyAssignment(false);
-
- // C++0x [meta.unary.prop] is_empty:
- // T is a class type, but not a union type, with ... no virtual base
- // classes
- Class->setEmpty(false);
- } else {
- // C++ [class.ctor]p5:
- // A constructor is trivial if all the direct base classes of its
- // class have trivial constructors.
- if (!BaseClass->hasTrivialConstructor())
- Class->setHasTrivialConstructor(false);
-
- // C++ [class.copy]p6:
- // A copy constructor is trivial if all the direct base classes of its
- // class have trivial copy constructors.
- if (!BaseClass->hasTrivialCopyConstructor())
- Class->setHasTrivialCopyConstructor(false);
-
- // C++ [class.copy]p11:
- // A copy assignment operator is trivial if all the direct base classes
- // of its class have trivial copy assignment operators.
- if (!BaseClass->hasTrivialCopyAssignment())
- Class->setHasTrivialCopyAssignment(false);
- }
-
- // C++ [class.ctor]p3:
- // A destructor is trivial if all the direct base classes of its class
- // have trivial destructors.
- if (!BaseClass->hasTrivialDestructor())
- Class->setHasTrivialDestructor(false);
+ Access, TInfo, EllipsisLoc);
}
/// ActOnBaseSpecifier - Parsed a base specifier. A base specifier is
@@ -587,7 +549,8 @@ void Sema::SetClassDeclAttributesFromBase(CXXRecordDecl *Class,
BaseResult
Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
- ParsedType basetype, SourceLocation BaseLoc) {
+ ParsedType basetype, SourceLocation BaseLoc,
+ SourceLocation EllipsisLoc) {
if (!classdecl)
return true;
@@ -598,8 +561,15 @@ Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
TypeSourceInfo *TInfo = 0;
GetTypeFromParser(basetype, &TInfo);
+
+ if (EllipsisLoc.isInvalid() &&
+ DiagnoseUnexpandedParameterPack(SpecifierRange.getBegin(), TInfo,
+ UPPC_BaseType))
+ return true;
+
if (CXXBaseSpecifier *BaseSpec = CheckBaseSpecifier(Class, SpecifierRange,
- Virtual, Access, TInfo))
+ Virtual, Access, TInfo,
+ EllipsisLoc))
return BaseSpec;
return true;
@@ -885,6 +855,63 @@ Decl *Sema::ActOnAccessSpecifier(AccessSpecifier Access,
return ASDecl;
}
+/// CheckOverrideControl - Check C++0x override control semantics.
+void Sema::CheckOverrideControl(const Decl *D) {
+ const CXXMethodDecl *MD = llvm::dyn_cast<CXXMethodDecl>(D);
+ if (!MD || !MD->isVirtual())
+ return;
+
+ if (MD->isDependentContext())
+ return;
+
+ // C++0x [class.virtual]p3:
+ // If a virtual function is marked with the virt-specifier override and does
+ // not override a member function of a base class,
+ // the program is ill-formed.
+ bool HasOverriddenMethods =
+ MD->begin_overridden_methods() != MD->end_overridden_methods();
+ if (MD->hasAttr<OverrideAttr>() && !HasOverriddenMethods) {
+ Diag(MD->getLocation(),
+ diag::err_function_marked_override_not_overriding)
+ << MD->getDeclName();
+ return;
+ }
+
+ // C++0x [class.derived]p8:
+ // In a class definition marked with the class-virt-specifier explicit,
+ // if a virtual member function that is neither implicitly-declared nor a
+ // destructor overrides a member function of a base class and it is not
+ // marked with the virt-specifier override, the program is ill-formed.
+ if (MD->getParent()->hasAttr<ExplicitAttr>() && !isa<CXXDestructorDecl>(MD) &&
+ HasOverriddenMethods && !MD->hasAttr<OverrideAttr>()) {
+ llvm::SmallVector<const CXXMethodDecl*, 4>
+ OverriddenMethods(MD->begin_overridden_methods(),
+ MD->end_overridden_methods());
+
+ Diag(MD->getLocation(), diag::err_function_overriding_without_override)
+ << MD->getDeclName()
+ << (unsigned)OverriddenMethods.size();
+
+ for (unsigned I = 0; I != OverriddenMethods.size(); ++I)
+ Diag(OverriddenMethods[I]->getLocation(),
+ diag::note_overridden_virtual_function);
+ }
+}
+
+/// CheckIfOverriddenFunctionIsMarkedFinal - Checks whether a virtual member
+/// function overrides a virtual member function marked 'final', according to
+/// C++0x [class.virtual]p3.
+bool Sema::CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ if (!Old->hasAttr<FinalAttr>())
+ return false;
+
+ Diag(New->getLocation(), diag::err_final_function_overridden)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+}
+
/// ActOnCXXMemberDeclarator - This is invoked when a C++ class member
/// declarator is parsed. 'AS' is the access specifier, 'BW' specifies the
/// bitfield width if there is one and 'InitExpr' specifies the initializer if
@@ -892,12 +919,18 @@ Decl *Sema::ActOnAccessSpecifier(AccessSpecifier Access,
Decl *
Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
- ExprTy *BW, ExprTy *InitExpr, bool IsDefinition,
+ ExprTy *BW, const VirtSpecifiers &VS,
+ ExprTy *InitExpr, bool IsDefinition,
bool Deleted) {
const DeclSpec &DS = D.getDeclSpec();
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
SourceLocation Loc = NameInfo.getLoc();
+
+ // For anonymous bitfields, the location should point to the type.
+ if (Loc.isInvalid())
+ Loc = D.getSourceRange().getBegin();
+
Expr *BitWidth = static_cast<Expr*>(BW);
Expr *Init = static_cast<Expr*>(InitExpr);
@@ -951,7 +984,29 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
Decl *Member;
if (isInstField) {
+ CXXScopeSpec &SS = D.getCXXScopeSpec();
+
+
+ if (SS.isSet() && !SS.isInvalid()) {
+ // The user provided a superfluous scope specifier inside a class
+ // definition:
+ //
+ // class X {
+ // int X::member;
+ // };
+ DeclContext *DC = 0;
+ if ((DC = computeDeclContext(SS, false)) && DC->Equals(CurContext))
+ Diag(D.getIdentifierLoc(), diag::warn_member_extra_qualification)
+ << Name << FixItHint::CreateRemoval(SS.getRange());
+ else
+ Diag(D.getIdentifierLoc(), diag::err_member_qualification)
+ << Name << SS.getRange();
+
+ SS.clear();
+ }
+
// FIXME: Check for template parameters!
+ // FIXME: Check that the name is an identifier!
Member = HandleField(S, cast<CXXRecordDecl>(CurContext), Loc, D, BitWidth,
AS);
assert(Member && "HandleField never returns null");
@@ -994,17 +1049,37 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
FunTmpl->getTemplatedDecl()->setAccess(AS);
}
+ if (VS.isOverrideSpecified()) {
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member);
+ if (!MD || !MD->isVirtual()) {
+ Diag(Member->getLocStart(),
+ diag::override_keyword_only_allowed_on_virtual_member_functions)
+ << "override" << FixItHint::CreateRemoval(VS.getOverrideLoc());
+ } else
+ MD->addAttr(new (Context) OverrideAttr(VS.getOverrideLoc(), Context));
+ }
+ if (VS.isFinalSpecified()) {
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member);
+ if (!MD || !MD->isVirtual()) {
+ Diag(Member->getLocStart(),
+ diag::override_keyword_only_allowed_on_virtual_member_functions)
+ << "final" << FixItHint::CreateRemoval(VS.getFinalLoc());
+ } else
+ MD->addAttr(new (Context) FinalAttr(VS.getFinalLoc(), Context));
+ }
+
+ CheckOverrideControl(Member);
+
assert((Name || isInstField) && "No identifier for non-field ?");
if (Init)
- AddInitializerToDecl(Member, Init, false);
+ AddInitializerToDecl(Member, Init, false,
+ DS.getTypeSpecType() == DeclSpec::TST_auto);
if (Deleted) // FIXME: Source location is not very good.
SetDeclDeleted(Member, D.getSourceRange().getBegin());
- if (isInstField) {
+ if (isInstField)
FieldCollector->Add(cast<FieldDecl>(Member));
- return 0;
- }
return Member;
}
@@ -1063,8 +1138,8 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ExprTy **Args, unsigned NumArgs,
- SourceLocation *CommaLocs,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc,
+ SourceLocation EllipsisLoc) {
if (!ConstructorD)
return true;
@@ -1084,12 +1159,12 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
// C++ [class.base.init]p2:
// Names in a mem-initializer-id are looked up in the scope of the
- // constructor’s class and, if not found in that scope, are looked
- // up in the scope containing the constructor’s
- // definition. [Note: if the constructor’s class contains a member
- // with the same name as a direct or virtual base class of the
- // class, a mem-initializer-id naming the member or base class and
- // composed of a single identifier refers to the class member. A
+ // constructor's class and, if not found in that scope, are looked
+ // up in the scope containing the constructor's definition.
+ // [Note: if the constructor's class contains a member with the
+ // same name as a direct or virtual base class of the class, a
+ // mem-initializer-id naming the member or base class and composed
+ // of a single identifier refers to the class member. A
// mem-initializer-id for the hidden base class may be specified
// using a qualified name. ]
if (!SS.getScopeRep() && !TemplateTypeTy) {
@@ -1097,14 +1172,30 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
FieldDecl *Member = 0;
DeclContext::lookup_result Result
= ClassDecl->lookup(MemberOrBase);
- if (Result.first != Result.second)
+ if (Result.first != Result.second) {
Member = dyn_cast<FieldDecl>(*Result.first);
-
- // FIXME: Handle members of an anonymous union.
-
- if (Member)
- return BuildMemberInitializer(Member, (Expr**)Args, NumArgs, IdLoc,
+
+ if (Member) {
+ if (EllipsisLoc.isValid())
+ Diag(EllipsisLoc, diag::err_pack_expansion_member_init)
+ << MemberOrBase << SourceRange(IdLoc, RParenLoc);
+
+ return BuildMemberInitializer(Member, (Expr**)Args, NumArgs, IdLoc,
LParenLoc, RParenLoc);
+ }
+
+ // Handle anonymous union case.
+ if (IndirectFieldDecl* IndirectField
+ = dyn_cast<IndirectFieldDecl>(*Result.first)) {
+ if (EllipsisLoc.isValid())
+ Diag(EllipsisLoc, diag::err_pack_expansion_member_init)
+ << MemberOrBase << SourceRange(IdLoc, RParenLoc);
+
+ return BuildMemberInitializer(IndirectField, (Expr**)Args,
+ NumArgs, IdLoc,
+ LParenLoc, RParenLoc);
+ }
+ }
}
// It didn't name a member, so see if it names a class.
QualType BaseType;
@@ -1212,7 +1303,7 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
TInfo = Context.getTrivialTypeSourceInfo(BaseType, IdLoc);
return BuildBaseInitializer(BaseType, TInfo, (Expr **)Args, NumArgs,
- LParenLoc, RParenLoc, ClassDecl);
+ LParenLoc, RParenLoc, ClassDecl, EllipsisLoc);
}
/// Checks an initializer expression for use of uninitialized fields, such as
@@ -1220,8 +1311,10 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
/// uninitialized field was used an updates the SourceLocation parameter; false
/// otherwise.
static bool InitExprContainsUninitializedFields(const Stmt *S,
- const FieldDecl *LhsField,
+ const ValueDecl *LhsField,
SourceLocation *L) {
+ assert(isa<FieldDecl>(LhsField) || isa<IndirectFieldDecl>(LhsField));
+
if (isa<CallExpr>(S)) {
// Do not descend into function calls or constructors, as the use
// of an uninitialized field may be valid. One would have to inspect
@@ -1232,6 +1325,20 @@ static bool InitExprContainsUninitializedFields(const Stmt *S,
}
if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
const NamedDecl *RhsField = ME->getMemberDecl();
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(RhsField)) {
+ // The member expression points to a static data member.
+ assert(VD->isStaticDataMember() &&
+ "Member points to non-static data member!");
+ (void)VD;
+ return false;
+ }
+
+ if (isa<EnumConstantDecl>(RhsField)) {
+ // The member expression points to an enum.
+ return false;
+ }
+
if (RhsField == LhsField) {
// Initializing a field with itself. Throw a warning.
// But wait; there are exceptions!
@@ -1247,9 +1354,16 @@ static bool InitExprContainsUninitializedFields(const Stmt *S,
*L = ME->getMemberLoc();
return true;
}
+ } else if (isa<SizeOfAlignOfExpr>(S)) {
+ // sizeof/alignof doesn't reference contents, do not warn.
+ return false;
+ } else if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(S)) {
+ // address-of doesn't reference contents (the pointer may be dereferenced
+ // in the same expression but it would be rare; and weird).
+ if (UOE->getOpcode() == UO_AddrOf)
+ return false;
}
- for (Stmt::const_child_iterator it = S->child_begin(), e = S->child_end();
- it != e; ++it) {
+ for (Stmt::const_child_range it = S->children(); it; ++it) {
if (!*it) {
// An expression such as 'member(arg ?: "")' may trigger this.
continue;
@@ -1261,10 +1375,18 @@ static bool InitExprContainsUninitializedFields(const Stmt *S,
}
MemInitResult
-Sema::BuildMemberInitializer(FieldDecl *Member, Expr **Args,
+Sema::BuildMemberInitializer(ValueDecl *Member, Expr **Args,
unsigned NumArgs, SourceLocation IdLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc) {
+ FieldDecl *DirectMember = dyn_cast<FieldDecl>(Member);
+ IndirectFieldDecl *IndirectMember = dyn_cast<IndirectFieldDecl>(Member);
+ assert((DirectMember || IndirectMember) &&
+ "Member must be a FieldDecl or IndirectFieldDecl");
+
+ if (Member->isInvalidDecl())
+ return true;
+
// Diagnose value-uses of fields to initialize themselves, e.g.
// foo(foo)
// where foo is not also a parameter to the constructor.
@@ -1286,12 +1408,12 @@ Sema::BuildMemberInitializer(FieldDecl *Member, Expr **Args,
for (unsigned i = 0; i < NumArgs; i++)
HasDependentArg |= Args[i]->isTypeDependent();
+ Expr *Init;
if (Member->getType()->isDependentType() || HasDependentArg) {
// Can't check initialization for a member of dependent type or when
// any of the arguments are type-dependent expressions.
- Expr *Init
- = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
- RParenLoc);
+ Init = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
+ RParenLoc);
// Erase any temporaries within this evaluation context; we're not
// going to track them in the AST, since we'll be rebuilding the
@@ -1299,69 +1421,78 @@ Sema::BuildMemberInitializer(FieldDecl *Member, Expr **Args,
ExprTemporaries.erase(
ExprTemporaries.begin() + ExprEvalContexts.back().NumTemporaries,
ExprTemporaries.end());
-
- return new (Context) CXXBaseOrMemberInitializer(Context, Member, IdLoc,
- LParenLoc,
- Init,
- RParenLoc);
-
+ } else {
+ // Initialize the member.
+ InitializedEntity MemberEntity =
+ DirectMember ? InitializedEntity::InitializeMember(DirectMember, 0)
+ : InitializedEntity::InitializeMember(IndirectMember, 0);
+ InitializationKind Kind =
+ InitializationKind::CreateDirect(IdLoc, LParenLoc, RParenLoc);
+
+ InitializationSequence InitSeq(*this, MemberEntity, Kind, Args, NumArgs);
+
+ ExprResult MemberInit =
+ InitSeq.Perform(*this, MemberEntity, Kind,
+ MultiExprArg(*this, Args, NumArgs), 0);
+ if (MemberInit.isInvalid())
+ return true;
+
+ CheckImplicitConversions(MemberInit.get(), LParenLoc);
+
+ // C++0x [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ MemberInit = MaybeCreateExprWithCleanups(MemberInit);
+ if (MemberInit.isInvalid())
+ return true;
+
+ // If we are in a dependent context, template instantiation will
+ // perform this type-checking again. Just save the arguments that we
+ // received in a ParenListExpr.
+ // FIXME: This isn't quite ideal, since our ASTs don't capture all
+ // of the information that we have about the member
+ // initializer. However, deconstructing the ASTs is a dicey process,
+ // and this approach is far more likely to get the corner cases right.
+ if (CurContext->isDependentContext())
+ Init = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
+ RParenLoc);
+ else
+ Init = MemberInit.get();
}
-
- if (Member->isInvalidDecl())
- return true;
-
- // Initialize the member.
- InitializedEntity MemberEntity =
- InitializedEntity::InitializeMember(Member, 0);
- InitializationKind Kind =
- InitializationKind::CreateDirect(IdLoc, LParenLoc, RParenLoc);
-
- InitializationSequence InitSeq(*this, MemberEntity, Kind, Args, NumArgs);
-
- ExprResult MemberInit =
- InitSeq.Perform(*this, MemberEntity, Kind,
- MultiExprArg(*this, Args, NumArgs), 0);
- if (MemberInit.isInvalid())
- return true;
-
- // C++0x [class.base.init]p7:
- // The initialization of each base and member constitutes a
- // full-expression.
- MemberInit = MaybeCreateCXXExprWithTemporaries(MemberInit.get());
- if (MemberInit.isInvalid())
- return true;
-
- // If we are in a dependent context, template instantiation will
- // perform this type-checking again. Just save the arguments that we
- // received in a ParenListExpr.
- // FIXME: This isn't quite ideal, since our ASTs don't capture all
- // of the information that we have about the member
- // initializer. However, deconstructing the ASTs is a dicey process,
- // and this approach is far more likely to get the corner cases right.
- if (CurContext->isDependentContext()) {
- // Bump the reference count of all of the arguments.
- for (unsigned I = 0; I != NumArgs; ++I)
- Args[I]->Retain();
- Expr *Init = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
- RParenLoc);
- return new (Context) CXXBaseOrMemberInitializer(Context, Member, IdLoc,
- LParenLoc,
- Init,
+ if (DirectMember) {
+ return new (Context) CXXCtorInitializer(Context, DirectMember,
+ IdLoc, LParenLoc, Init,
+ RParenLoc);
+ } else {
+ return new (Context) CXXCtorInitializer(Context, IndirectMember,
+ IdLoc, LParenLoc, Init,
RParenLoc);
}
+}
- return new (Context) CXXBaseOrMemberInitializer(Context, Member, IdLoc,
- LParenLoc,
- MemberInit.get(),
- RParenLoc);
+MemInitResult
+Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ CXXRecordDecl *ClassDecl,
+ SourceLocation EllipsisLoc) {
+ SourceLocation Loc = TInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ if (!LangOpts.CPlusPlus0x)
+ return Diag(Loc, diag::err_delegation_0x_only)
+ << TInfo->getTypeLoc().getLocalSourceRange();
+
+ return Diag(Loc, diag::err_delegation_unimplemented)
+ << TInfo->getTypeLoc().getLocalSourceRange();
}
MemInitResult
Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
Expr **Args, unsigned NumArgs,
SourceLocation LParenLoc, SourceLocation RParenLoc,
- CXXRecordDecl *ClassDecl) {
+ CXXRecordDecl *ClassDecl,
+ SourceLocation EllipsisLoc) {
bool HasDependentArg = false;
for (unsigned i = 0; i < NumArgs; i++)
HasDependentArg |= Args[i]->isTypeDependent();
@@ -1375,16 +1506,40 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
// C++ [class.base.init]p2:
// [...] Unless the mem-initializer-id names a nonstatic data
- // member of the constructor’s class or a direct or virtual base
+ // member of the constructor's class or a direct or virtual base
// of that class, the mem-initializer is ill-formed. A
// mem-initializer-list can initialize a base class using any
// name that denotes that base class type.
bool Dependent = BaseType->isDependentType() || HasDependentArg;
+ if (EllipsisLoc.isValid()) {
+ // This is a pack expansion.
+ if (!BaseType->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(BaseLoc, RParenLoc);
+
+ EllipsisLoc = SourceLocation();
+ }
+ } else {
+ // Check for any unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(BaseLoc, BaseTInfo, UPPC_Initializer))
+ return true;
+
+ for (unsigned I = 0; I != NumArgs; ++I)
+ if (DiagnoseUnexpandedParameterPack(Args[I]))
+ return true;
+ }
+
// Check for direct and virtual base classes.
const CXXBaseSpecifier *DirectBaseSpec = 0;
const CXXBaseSpecifier *VirtualBaseSpec = 0;
if (!Dependent) {
+ if (Context.hasSameUnqualifiedType(QualType(ClassDecl->getTypeForDecl(),0),
+ BaseType))
+ return BuildDelegatingInitializer(BaseTInfo, Args, NumArgs,
+ LParenLoc, RParenLoc, ClassDecl,
+ EllipsisLoc);
+
FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec,
VirtualBaseSpec);
@@ -1421,11 +1576,12 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
ExprTemporaries.begin() + ExprEvalContexts.back().NumTemporaries,
ExprTemporaries.end());
- return new (Context) CXXBaseOrMemberInitializer(Context, BaseTInfo,
+ return new (Context) CXXCtorInitializer(Context, BaseTInfo,
/*IsVirtual=*/false,
LParenLoc,
BaseInit.takeAs<Expr>(),
- RParenLoc);
+ RParenLoc,
+ EllipsisLoc);
}
// C++ [base.class.init]p2:
@@ -1454,11 +1610,13 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
MultiExprArg(*this, Args, NumArgs), 0);
if (BaseInit.isInvalid())
return true;
+
+ CheckImplicitConversions(BaseInit.get(), LParenLoc);
// C++0x [class.base.init]p7:
// The initialization of each base and member constitutes a
// full-expression.
- BaseInit = MaybeCreateCXXExprWithTemporaries(BaseInit.get());
+ BaseInit = MaybeCreateExprWithCleanups(BaseInit);
if (BaseInit.isInvalid())
return true;
@@ -1470,25 +1628,23 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
// initializer. However, deconstructing the ASTs is a dicey process,
// and this approach is far more likely to get the corner cases right.
if (CurContext->isDependentContext()) {
- // Bump the reference count of all of the arguments.
- for (unsigned I = 0; I != NumArgs; ++I)
- Args[I]->Retain();
-
ExprResult Init
= Owned(new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
RParenLoc));
- return new (Context) CXXBaseOrMemberInitializer(Context, BaseTInfo,
+ return new (Context) CXXCtorInitializer(Context, BaseTInfo,
BaseSpec->isVirtual(),
LParenLoc,
Init.takeAs<Expr>(),
- RParenLoc);
+ RParenLoc,
+ EllipsisLoc);
}
- return new (Context) CXXBaseOrMemberInitializer(Context, BaseTInfo,
+ return new (Context) CXXCtorInitializer(Context, BaseTInfo,
BaseSpec->isVirtual(),
LParenLoc,
BaseInit.takeAs<Expr>(),
- RParenLoc);
+ RParenLoc,
+ EllipsisLoc);
}
/// ImplicitInitializerKind - How an implicit base or member initializer should
@@ -1504,7 +1660,7 @@ BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
ImplicitInitializerKind ImplicitInitKind,
CXXBaseSpecifier *BaseSpec,
bool IsInheritedVirtualBase,
- CXXBaseOrMemberInitializer *&CXXBaseInit) {
+ CXXCtorInitializer *&CXXBaseInit) {
InitializedEntity InitEntity
= InitializedEntity::InitializeBase(SemaRef.Context, BaseSpec,
IsInheritedVirtualBase);
@@ -1527,7 +1683,8 @@ BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
Expr *CopyCtorArg =
DeclRefExpr::Create(SemaRef.Context, 0, SourceRange(), Param,
- Constructor->getLocation(), ParamType, 0);
+ Constructor->getLocation(), ParamType,
+ VK_LValue, 0);
// Cast to the base class to avoid ambiguities.
QualType ArgTy =
@@ -1554,20 +1711,18 @@ BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
assert(false && "Unhandled initializer kind!");
}
- if (BaseInit.isInvalid())
- return true;
-
- BaseInit = SemaRef.MaybeCreateCXXExprWithTemporaries(BaseInit.get());
+ BaseInit = SemaRef.MaybeCreateExprWithCleanups(BaseInit);
if (BaseInit.isInvalid())
return true;
CXXBaseInit =
- new (SemaRef.Context) CXXBaseOrMemberInitializer(SemaRef.Context,
+ new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context,
SemaRef.Context.getTrivialTypeSourceInfo(BaseSpec->getType(),
SourceLocation()),
BaseSpec->isVirtual(),
SourceLocation(),
BaseInit.takeAs<Expr>(),
+ SourceLocation(),
SourceLocation());
return false;
@@ -1577,7 +1732,7 @@ static bool
BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
ImplicitInitializerKind ImplicitInitKind,
FieldDecl *Field,
- CXXBaseOrMemberInitializer *&CXXMemberInit) {
+ CXXCtorInitializer *&CXXMemberInit) {
if (Field->isInvalidDecl())
return true;
@@ -1589,7 +1744,7 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
Expr *MemberExprBase =
DeclRefExpr::Create(SemaRef.Context, 0, SourceRange(), Param,
- Loc, ParamType, 0);
+ Loc, ParamType, VK_LValue, 0);
// Build a reference to this field within the parameter.
CXXScopeSpec SS;
@@ -1634,7 +1789,7 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
// Create a reference to the iteration variable.
ExprResult IterationVarRef
- = SemaRef.BuildDeclRefExpr(IterationVar, SizeType, Loc);
+ = SemaRef.BuildDeclRefExpr(IterationVar, SizeType, VK_RValue, Loc);
assert(!IterationVarRef.isInvalid() &&
"Reference to invented variable cannot fail!");
@@ -1671,12 +1826,12 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
ExprResult MemberInit
= InitSeq.Perform(SemaRef, Entities.back(), InitKind,
MultiExprArg(&CopyCtorArgE, 1));
- MemberInit = SemaRef.MaybeCreateCXXExprWithTemporaries(MemberInit.get());
+ MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit);
if (MemberInit.isInvalid())
return true;
CXXMemberInit
- = CXXBaseOrMemberInitializer::Create(SemaRef.Context, Field, Loc, Loc,
+ = CXXCtorInitializer::Create(SemaRef.Context, Field, Loc, Loc,
MemberInit.takeAs<Expr>(), Loc,
IndexVariables.data(),
IndexVariables.size());
@@ -1696,15 +1851,13 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, 0, 0);
ExprResult MemberInit =
InitSeq.Perform(SemaRef, InitEntity, InitKind, MultiExprArg());
- if (MemberInit.isInvalid())
- return true;
- MemberInit = SemaRef.MaybeCreateCXXExprWithTemporaries(MemberInit.get());
+ MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit);
if (MemberInit.isInvalid())
return true;
CXXMemberInit =
- new (SemaRef.Context) CXXBaseOrMemberInitializer(SemaRef.Context,
+ new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context,
Field, Loc, Loc,
MemberInit.get(),
Loc);
@@ -1742,8 +1895,8 @@ struct BaseAndFieldInfo {
CXXConstructorDecl *Ctor;
bool AnyErrorsInInits;
ImplicitInitializerKind IIK;
- llvm::DenseMap<const void *, CXXBaseOrMemberInitializer*> AllBaseFields;
- llvm::SmallVector<CXXBaseOrMemberInitializer*, 8> AllToInit;
+ llvm::DenseMap<const void *, CXXCtorInitializer*> AllBaseFields;
+ llvm::SmallVector<CXXCtorInitializer*, 8> AllToInit;
BaseAndFieldInfo(Sema &S, CXXConstructorDecl *Ctor, bool ErrorsInInits)
: S(S), Ctor(Ctor), AnyErrorsInInits(ErrorsInInits) {
@@ -1756,26 +1909,12 @@ struct BaseAndFieldInfo {
};
}
-static void RecordFieldInitializer(BaseAndFieldInfo &Info,
- FieldDecl *Top, FieldDecl *Field,
- CXXBaseOrMemberInitializer *Init) {
- // If the member doesn't need to be initialized, Init will still be null.
- if (!Init)
- return;
-
- Info.AllToInit.push_back(Init);
- if (Field != Top) {
- Init->setMember(Top);
- Init->setAnonUnionMember(Field);
- }
-}
-
static bool CollectFieldInitializer(BaseAndFieldInfo &Info,
FieldDecl *Top, FieldDecl *Field) {
// Overwhelmingly common case: we have a direct initializer for this field.
- if (CXXBaseOrMemberInitializer *Init = Info.AllBaseFields.lookup(Field)) {
- RecordFieldInitializer(Info, Top, Field, Init);
+ if (CXXCtorInitializer *Init = Info.AllBaseFields.lookup(Field)) {
+ Info.AllToInit.push_back(Init);
return false;
}
@@ -1794,8 +1933,8 @@ static bool CollectFieldInitializer(BaseAndFieldInfo &Info,
// First check for an explicit initializer for one field.
for (RecordDecl::field_iterator FA = FieldClassDecl->field_begin(),
EA = FieldClassDecl->field_end(); FA != EA; FA++) {
- if (CXXBaseOrMemberInitializer *Init = Info.AllBaseFields.lookup(*FA)) {
- RecordFieldInitializer(Info, Top, *FA, Init);
+ if (CXXCtorInitializer *Init = Info.AllBaseFields.lookup(*FA)) {
+ Info.AllToInit.push_back(Init);
// Once we've initialized a field of an anonymous union, the union
// field in the class is also initialized, so exit immediately.
@@ -1828,29 +1967,31 @@ static bool CollectFieldInitializer(BaseAndFieldInfo &Info,
if (Info.AnyErrorsInInits)
return false;
- CXXBaseOrMemberInitializer *Init = 0;
+ CXXCtorInitializer *Init = 0;
if (BuildImplicitMemberInitializer(Info.S, Info.Ctor, Info.IIK, Field, Init))
return true;
- RecordFieldInitializer(Info, Top, Field, Init);
+ if (Init)
+ Info.AllToInit.push_back(Init);
+
return false;
}
bool
-Sema::SetBaseOrMemberInitializers(CXXConstructorDecl *Constructor,
- CXXBaseOrMemberInitializer **Initializers,
+Sema::SetCtorInitializers(CXXConstructorDecl *Constructor,
+ CXXCtorInitializer **Initializers,
unsigned NumInitializers,
bool AnyErrors) {
if (Constructor->getDeclContext()->isDependentContext()) {
// Just store the initializers as written, they will be checked during
// instantiation.
if (NumInitializers > 0) {
- Constructor->setNumBaseOrMemberInitializers(NumInitializers);
- CXXBaseOrMemberInitializer **baseOrMemberInitializers =
- new (Context) CXXBaseOrMemberInitializer*[NumInitializers];
+ Constructor->setNumCtorInitializers(NumInitializers);
+ CXXCtorInitializer **baseOrMemberInitializers =
+ new (Context) CXXCtorInitializer*[NumInitializers];
memcpy(baseOrMemberInitializers, Initializers,
- NumInitializers * sizeof(CXXBaseOrMemberInitializer*));
- Constructor->setBaseOrMemberInitializers(baseOrMemberInitializers);
+ NumInitializers * sizeof(CXXCtorInitializer*));
+ Constructor->setCtorInitializers(baseOrMemberInitializers);
}
return false;
@@ -1867,12 +2008,12 @@ Sema::SetBaseOrMemberInitializers(CXXConstructorDecl *Constructor,
bool HadError = false;
for (unsigned i = 0; i < NumInitializers; i++) {
- CXXBaseOrMemberInitializer *Member = Initializers[i];
+ CXXCtorInitializer *Member = Initializers[i];
if (Member->isBaseInitializer())
Info.AllBaseFields[Member->getBaseClass()->getAs<RecordType>()] = Member;
else
- Info.AllBaseFields[Member->getMember()] = Member;
+ Info.AllBaseFields[Member->getAnyMember()] = Member;
}
// Keep track of the direct virtual bases.
@@ -1887,12 +2028,12 @@ Sema::SetBaseOrMemberInitializers(CXXConstructorDecl *Constructor,
for (CXXRecordDecl::base_class_iterator VBase = ClassDecl->vbases_begin(),
E = ClassDecl->vbases_end(); VBase != E; ++VBase) {
- if (CXXBaseOrMemberInitializer *Value
+ if (CXXCtorInitializer *Value
= Info.AllBaseFields.lookup(VBase->getType()->getAs<RecordType>())) {
Info.AllToInit.push_back(Value);
} else if (!AnyErrors) {
bool IsInheritedVirtualBase = !DirectVBases.count(VBase);
- CXXBaseOrMemberInitializer *CXXBaseInit;
+ CXXCtorInitializer *CXXBaseInit;
if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK,
VBase, IsInheritedVirtualBase,
CXXBaseInit)) {
@@ -1911,11 +2052,11 @@ Sema::SetBaseOrMemberInitializers(CXXConstructorDecl *Constructor,
if (Base->isVirtual())
continue;
- if (CXXBaseOrMemberInitializer *Value
+ if (CXXCtorInitializer *Value
= Info.AllBaseFields.lookup(Base->getType()->getAs<RecordType>())) {
Info.AllToInit.push_back(Value);
} else if (!AnyErrors) {
- CXXBaseOrMemberInitializer *CXXBaseInit;
+ CXXCtorInitializer *CXXBaseInit;
if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK,
Base, /*IsInheritedVirtualBase=*/false,
CXXBaseInit)) {
@@ -1941,12 +2082,12 @@ Sema::SetBaseOrMemberInitializers(CXXConstructorDecl *Constructor,
NumInitializers = Info.AllToInit.size();
if (NumInitializers > 0) {
- Constructor->setNumBaseOrMemberInitializers(NumInitializers);
- CXXBaseOrMemberInitializer **baseOrMemberInitializers =
- new (Context) CXXBaseOrMemberInitializer*[NumInitializers];
+ Constructor->setNumCtorInitializers(NumInitializers);
+ CXXCtorInitializer **baseOrMemberInitializers =
+ new (Context) CXXCtorInitializer*[NumInitializers];
memcpy(baseOrMemberInitializers, Info.AllToInit.data(),
- NumInitializers * sizeof(CXXBaseOrMemberInitializer*));
- Constructor->setBaseOrMemberInitializers(baseOrMemberInitializers);
+ NumInitializers * sizeof(CXXCtorInitializer*));
+ Constructor->setCtorInitializers(baseOrMemberInitializers);
// Constructors implicitly reference the base and member
// destructors.
@@ -1967,25 +2108,18 @@ static void *GetKeyForTopLevelField(FieldDecl *Field) {
}
static void *GetKeyForBase(ASTContext &Context, QualType BaseType) {
- return Context.getCanonicalType(BaseType).getTypePtr();
+ return const_cast<Type*>(Context.getCanonicalType(BaseType).getTypePtr());
}
static void *GetKeyForMember(ASTContext &Context,
- CXXBaseOrMemberInitializer *Member,
- bool MemberMaybeAnon = false) {
- if (!Member->isMemberInitializer())
+ CXXCtorInitializer *Member) {
+ if (!Member->isAnyMemberInitializer())
return GetKeyForBase(Context, QualType(Member->getBaseClass(), 0));
// For fields injected into the class via declaration of an anonymous union,
// use its anonymous union class declaration as the unique key.
- FieldDecl *Field = Member->getMember();
-
- // After SetBaseOrMemberInitializers call, Field is the anonymous union
- // data member of the class. Data member used in the initializer list is
- // in AnonUnionMember field.
- if (MemberMaybeAnon && Field->isAnonymousStructOrUnion())
- Field = Member->getAnonUnionMember();
-
+ FieldDecl *Field = Member->getAnyMember();
+
// If the field is a member of an anonymous struct or union, our key
// is the anonymous record decl that's a direct child of the class.
RecordDecl *RD = Field->getParent();
@@ -2007,13 +2141,24 @@ static void *GetKeyForMember(ASTContext &Context,
static void
DiagnoseBaseOrMemInitializerOrder(Sema &SemaRef,
const CXXConstructorDecl *Constructor,
- CXXBaseOrMemberInitializer **Inits,
+ CXXCtorInitializer **Inits,
unsigned NumInits) {
if (Constructor->getDeclContext()->isDependentContext())
return;
- if (SemaRef.Diags.getDiagnosticLevel(diag::warn_initializer_out_of_order)
- == Diagnostic::Ignored)
+ // Don't check initializers order unless the warning is enabled at the
+ // location of at least one initializer.
+ bool ShouldCheckOrder = false;
+ for (unsigned InitIndex = 0; InitIndex != NumInits; ++InitIndex) {
+ CXXCtorInitializer *Init = Inits[InitIndex];
+ if (SemaRef.Diags.getDiagnosticLevel(diag::warn_initializer_out_of_order,
+ Init->getSourceLocation())
+ != Diagnostic::Ignored) {
+ ShouldCheckOrder = true;
+ break;
+ }
+ }
+ if (!ShouldCheckOrder)
return;
// Build the list of bases and members in the order that they'll
@@ -2045,10 +2190,10 @@ DiagnoseBaseOrMemInitializerOrder(Sema &SemaRef,
unsigned NumIdealInits = IdealInitKeys.size();
unsigned IdealIndex = 0;
- CXXBaseOrMemberInitializer *PrevInit = 0;
+ CXXCtorInitializer *PrevInit = 0;
for (unsigned InitIndex = 0; InitIndex != NumInits; ++InitIndex) {
- CXXBaseOrMemberInitializer *Init = Inits[InitIndex];
- void *InitKey = GetKeyForMember(SemaRef.Context, Init, true);
+ CXXCtorInitializer *Init = Inits[InitIndex];
+ void *InitKey = GetKeyForMember(SemaRef.Context, Init);
// Scan forward to try to find this initializer in the idealized
// initializers list.
@@ -2064,13 +2209,13 @@ DiagnoseBaseOrMemInitializerOrder(Sema &SemaRef,
SemaRef.Diag(PrevInit->getSourceLocation(),
diag::warn_initializer_out_of_order);
- if (PrevInit->isMemberInitializer())
- D << 0 << PrevInit->getMember()->getDeclName();
+ if (PrevInit->isAnyMemberInitializer())
+ D << 0 << PrevInit->getAnyMember()->getDeclName();
else
D << 1 << PrevInit->getBaseClassInfo()->getType();
- if (Init->isMemberInitializer())
- D << 0 << Init->getMember()->getDeclName();
+ if (Init->isAnyMemberInitializer())
+ D << 0 << Init->getAnyMember()->getDeclName();
else
D << 1 << Init->getBaseClassInfo()->getType();
@@ -2089,8 +2234,8 @@ DiagnoseBaseOrMemInitializerOrder(Sema &SemaRef,
namespace {
bool CheckRedundantInit(Sema &S,
- CXXBaseOrMemberInitializer *Init,
- CXXBaseOrMemberInitializer *&PrevInit) {
+ CXXCtorInitializer *Init,
+ CXXCtorInitializer *&PrevInit) {
if (!PrevInit) {
PrevInit = Init;
return false;
@@ -2102,7 +2247,7 @@ bool CheckRedundantInit(Sema &S,
<< Field->getDeclName()
<< Init->getSourceRange();
else {
- Type *BaseClass = Init->getBaseClass();
+ const Type *BaseClass = Init->getBaseClass();
assert(BaseClass && "neither field nor base");
S.Diag(Init->getSourceLocation(),
diag::err_multiple_base_initialization)
@@ -2115,13 +2260,13 @@ bool CheckRedundantInit(Sema &S,
return true;
}
-typedef std::pair<NamedDecl *, CXXBaseOrMemberInitializer *> UnionEntry;
+typedef std::pair<NamedDecl *, CXXCtorInitializer *> UnionEntry;
typedef llvm::DenseMap<RecordDecl*, UnionEntry> RedundantUnionMap;
bool CheckRedundantUnionInit(Sema &S,
- CXXBaseOrMemberInitializer *Init,
+ CXXCtorInitializer *Init,
RedundantUnionMap &Unions) {
- FieldDecl *Field = Init->getMember();
+ FieldDecl *Field = Init->getAnyMember();
RecordDecl *Parent = Field->getParent();
if (!Parent->isAnonymousStructOrUnion())
return false;
@@ -2170,26 +2315,26 @@ void Sema::ActOnMemInitializers(Decl *ConstructorDecl,
return;
}
- CXXBaseOrMemberInitializer **MemInits =
- reinterpret_cast<CXXBaseOrMemberInitializer **>(meminits);
+ CXXCtorInitializer **MemInits =
+ reinterpret_cast<CXXCtorInitializer **>(meminits);
// Mapping for the duplicate initializers check.
// For member initializers, this is keyed with a FieldDecl*.
// For base initializers, this is keyed with a Type*.
- llvm::DenseMap<void*, CXXBaseOrMemberInitializer *> Members;
+ llvm::DenseMap<void*, CXXCtorInitializer *> Members;
// Mapping for the inconsistent anonymous-union initializers check.
RedundantUnionMap MemberUnions;
bool HadError = false;
for (unsigned i = 0; i < NumMemInits; i++) {
- CXXBaseOrMemberInitializer *Init = MemInits[i];
+ CXXCtorInitializer *Init = MemInits[i];
// Set the source order index.
Init->setSourceOrder(i);
- if (Init->isMemberInitializer()) {
- FieldDecl *Field = Init->getMember();
+ if (Init->isAnyMemberInitializer()) {
+ FieldDecl *Field = Init->getAnyMember();
if (CheckRedundantInit(*this, Init, Members[Field]) ||
CheckRedundantUnionInit(*this, Init, MemberUnions))
HadError = true;
@@ -2205,7 +2350,7 @@ void Sema::ActOnMemInitializers(Decl *ConstructorDecl,
DiagnoseBaseOrMemInitializerOrder(*this, Constructor, MemInits, NumMemInits);
- SetBaseOrMemberInitializers(Constructor, MemInits, NumMemInits, AnyErrors);
+ SetCtorInitializers(Constructor, MemInits, NumMemInits, AnyErrors);
}
void
@@ -2304,7 +2449,7 @@ void Sema::ActOnDefaultCtorInitializers(Decl *CDtorDecl) {
if (CXXConstructorDecl *Constructor
= dyn_cast<CXXConstructorDecl>(CDtorDecl))
- SetBaseOrMemberInitializers(Constructor, 0, 0, /*AnyErrors=*/false);
+ SetCtorInitializers(Constructor, 0, 0, /*AnyErrors=*/false);
}
bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
@@ -2391,7 +2536,7 @@ void Sema::DiagnoseAbstractType(const CXXRecordDecl *RD) {
Diag(SO->second.front().Method->getLocation(),
diag::note_pure_virtual_function)
- << SO->second.front().Method->getDeclName();
+ << SO->second.front().Method->getDeclName() << RD->getDeclName();
}
}
@@ -2566,84 +2711,9 @@ static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
/// completing, introducing implicitly-declared members, checking for
/// abstract types, etc.
void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
- if (!Record || Record->isInvalidDecl())
- return;
-
- if (!Record->isDependentType())
- AddImplicitlyDeclaredMembersToClass(Record);
-
- if (Record->isInvalidDecl())
+ if (!Record)
return;
- // Set access bits correctly on the directly-declared conversions.
- UnresolvedSetImpl *Convs = Record->getConversionFunctions();
- for (UnresolvedSetIterator I = Convs->begin(), E = Convs->end(); I != E; ++I)
- Convs->setAccess(I, (*I)->getAccess());
-
- // Determine whether we need to check for final overriders. We do
- // this either when there are virtual base classes (in which case we
- // may end up finding multiple final overriders for a given virtual
- // function) or any of the base classes is abstract (in which case
- // we might detect that this class is abstract).
- bool CheckFinalOverriders = false;
- if (Record->isPolymorphic() && !Record->isInvalidDecl() &&
- !Record->isDependentType()) {
- if (Record->getNumVBases())
- CheckFinalOverriders = true;
- else if (!Record->isAbstract()) {
- for (CXXRecordDecl::base_class_const_iterator B = Record->bases_begin(),
- BEnd = Record->bases_end();
- B != BEnd; ++B) {
- CXXRecordDecl *BaseDecl
- = cast<CXXRecordDecl>(B->getType()->getAs<RecordType>()->getDecl());
- if (BaseDecl->isAbstract()) {
- CheckFinalOverriders = true;
- break;
- }
- }
- }
- }
-
- if (CheckFinalOverriders) {
- CXXFinalOverriderMap FinalOverriders;
- Record->getFinalOverriders(FinalOverriders);
-
- for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(),
- MEnd = FinalOverriders.end();
- M != MEnd; ++M) {
- for (OverridingMethods::iterator SO = M->second.begin(),
- SOEnd = M->second.end();
- SO != SOEnd; ++SO) {
- assert(SO->second.size() > 0 &&
- "All virtual functions have overridding virtual functions");
- if (SO->second.size() == 1) {
- // C++ [class.abstract]p4:
- // A class is abstract if it contains or inherits at least one
- // pure virtual function for which the final overrider is pure
- // virtual.
- if (SO->second.front().Method->isPure())
- Record->setAbstract(true);
- continue;
- }
-
- // C++ [class.virtual]p2:
- // In a derived class, if a virtual member function of a base
- // class subobject has more than one final overrider the
- // program is ill-formed.
- Diag(Record->getLocation(), diag::err_multiple_final_overriders)
- << (NamedDecl *)M->first << Record;
- Diag(M->first->getLocation(), diag::note_overridden_virtual_function);
- for (OverridingMethods::overriding_iterator OM = SO->second.begin(),
- OMEnd = SO->second.end();
- OM != OMEnd; ++OM)
- Diag(OM->Method->getLocation(), diag::note_final_overrider)
- << (NamedDecl *)M->first << OM->Method->getParent();
-
- Record->setInvalidDecl();
- }
- }
- }
-
if (Record->isAbstract() && !Record->isInvalidDecl()) {
AbstractUsageInfo Info(*this, Record);
CheckAbstractClassUsage(Info, Record);
@@ -2673,8 +2743,149 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
}
}
- if (Record->isDynamicClass())
+ if (Record->isDynamicClass() && !Record->isDependentType())
DynamicClasses.push_back(Record);
+
+ if (Record->getIdentifier()) {
+ // C++ [class.mem]p13:
+ // If T is the name of a class, then each of the following shall have a
+ // name different from T:
+ // - every member of every anonymous union that is a member of class T.
+ //
+ // C++ [class.mem]p14:
+ // In addition, if class T has a user-declared constructor (12.1), every
+ // non-static data member of class T shall have a name different from T.
+ for (DeclContext::lookup_result R = Record->lookup(Record->getDeclName());
+ R.first != R.second; ++R.first) {
+ NamedDecl *D = *R.first;
+ if ((isa<FieldDecl>(D) && Record->hasUserDeclaredConstructor()) ||
+ isa<IndirectFieldDecl>(D)) {
+ Diag(D->getLocation(), diag::err_member_name_of_class)
+ << D->getDeclName();
+ break;
+ }
+ }
+ }
+
+ // Warn if the class has virtual methods but non-virtual public destructor.
+ if (Record->isPolymorphic() && !Record->isDependentType()) {
+ CXXDestructorDecl *dtor = Record->getDestructor();
+ if (!dtor || (!dtor->isVirtual() && dtor->getAccess() == AS_public))
+ Diag(dtor ? dtor->getLocation() : Record->getLocation(),
+ diag::warn_non_virtual_dtor) << Context.getRecordType(Record);
+ }
+
+ // See if a method overloads virtual methods in a base
+ /// class without overriding any.
+ if (!Record->isDependentType()) {
+ for (CXXRecordDecl::method_iterator M = Record->method_begin(),
+ MEnd = Record->method_end();
+ M != MEnd; ++M) {
+ DiagnoseHiddenVirtualMethods(Record, *M);
+ }
+ }
+
+ // Declare inherited constructors. We do this eagerly here because:
+ // - The standard requires an eager diagnostic for conflicting inherited
+ // constructors from different classes.
+ // - The lazy declaration of the other implicit constructors is so as to not
+ // waste space and performance on classes that are not meant to be
+ // instantiated (e.g. meta-functions). This doesn't apply to classes that
+ // have inherited constructors.
+ DeclareInheritedConstructors(Record);
+}
+
+/// \brief Data used with FindHiddenVirtualMethod
+struct FindHiddenVirtualMethodData {
+ Sema *S;
+ CXXMethodDecl *Method;
+ llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverridenAndUsingBaseMethods;
+ llvm::SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
+};
+
+/// \brief Member lookup function that determines whether a given C++
+/// method overloads virtual methods in a base class without overriding any,
+/// to be used with CXXRecordDecl::lookupInBases().
+static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *UserData) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ FindHiddenVirtualMethodData &Data
+ = *static_cast<FindHiddenVirtualMethodData*>(UserData);
+
+ DeclarationName Name = Data.Method->getDeclName();
+ assert(Name.getNameKind() == DeclarationName::Identifier);
+
+ bool foundSameNameMethod = false;
+ llvm::SmallVector<CXXMethodDecl *, 8> overloadedMethods;
+ for (Path.Decls = BaseRecord->lookup(Name);
+ Path.Decls.first != Path.Decls.second;
+ ++Path.Decls.first) {
+ NamedDecl *D = *Path.Decls.first;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ MD = MD->getCanonicalDecl();
+ foundSameNameMethod = true;
+ // Interested only in hidden virtual methods.
+ if (!MD->isVirtual())
+ continue;
+ // If the method we are checking overrides a method from its base
+ // don't warn about the other overloaded methods.
+ if (!Data.S->IsOverload(Data.Method, MD, false))
+ return true;
+ // Collect the overload only if its hidden.
+ if (!Data.OverridenAndUsingBaseMethods.count(MD))
+ overloadedMethods.push_back(MD);
+ }
+ }
+
+ if (foundSameNameMethod)
+ Data.OverloadedMethods.append(overloadedMethods.begin(),
+ overloadedMethods.end());
+ return foundSameNameMethod;
+}
+
+/// \brief See if a method overloads virtual methods in a base class without
+/// overriding any.
+void Sema::DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
+ if (Diags.getDiagnosticLevel(diag::warn_overloaded_virtual,
+ MD->getLocation()) == Diagnostic::Ignored)
+ return;
+ if (MD->getDeclName().getNameKind() != DeclarationName::Identifier)
+ return;
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, // true to look in all bases.
+ /*bool RecordPaths=*/false,
+ /*bool DetectVirtual=*/false);
+ FindHiddenVirtualMethodData Data;
+ Data.Method = MD;
+ Data.S = this;
+
+ // Keep the base methods that were overriden or introduced in the subclass
+ // by 'using' in a set. A base method not in this set is hidden.
+ for (DeclContext::lookup_result res = DC->lookup(MD->getDeclName());
+ res.first != res.second; ++res.first) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(*res.first))
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I)
+ Data.OverridenAndUsingBaseMethods.insert((*I)->getCanonicalDecl());
+ if (UsingShadowDecl *shad = dyn_cast<UsingShadowDecl>(*res.first))
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(shad->getTargetDecl()))
+ Data.OverridenAndUsingBaseMethods.insert(MD->getCanonicalDecl());
+ }
+
+ if (DC->lookupInBases(&FindHiddenVirtualMethod, &Data, Paths) &&
+ !Data.OverloadedMethods.empty()) {
+ Diag(MD->getLocation(), diag::warn_overloaded_virtual)
+ << MD << (Data.OverloadedMethods.size() > 1);
+
+ for (unsigned i = 0, e = Data.OverloadedMethods.size(); i != e; ++i) {
+ CXXMethodDecl *overloadedMD = Data.OverloadedMethods[i];
+ Diag(overloadedMD->getLocation(),
+ diag::note_hidden_overloaded_virtual_declared_here) << overloadedMD;
+ }
+ }
}
void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
@@ -2915,7 +3126,7 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
SC = SC_None;
}
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
if (FTI.TypeQuals != 0) {
if (FTI.TypeQuals & Qualifiers::Const)
Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
@@ -2926,20 +3137,31 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
if (FTI.TypeQuals & Qualifiers::Restrict)
Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
<< "restrict" << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
}
+ // C++0x [class.ctor]p4:
+ // A constructor shall not be declared with a ref-qualifier.
+ if (FTI.hasRefQualifier()) {
+ Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_constructor)
+ << FTI.RefQualifierIsLValueRef
+ << FixItHint::CreateRemoval(FTI.getRefQualifierLoc());
+ D.setInvalidType();
+ }
+
// Rebuild the function type "R" without any type qualifiers (in
// case any of the errors above fired) and with "void" as the
// return type, since constructors don't have return types.
const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
+ if (Proto->getResultType() == Context.VoidTy && !D.isInvalidType())
+ return R;
+
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+
return Context.getFunctionType(Context.VoidTy, Proto->arg_type_begin(),
- Proto->getNumArgs(),
- Proto->isVariadic(), 0,
- Proto->hasExceptionSpec(),
- Proto->hasAnyExceptionSpec(),
- Proto->getNumExceptions(),
- Proto->exception_begin(),
- Proto->getExtInfo());
+ Proto->getNumArgs(), EPI);
}
/// CheckConstructor - Checks a fully-formed constructor for
@@ -2977,13 +3199,6 @@ void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
Constructor->setInvalidDecl();
}
}
-
- // Notify the class that we've added a constructor. In principle we
- // don't need to do this for out-of-line declarations; in practice
- // we only instantiate the most recent declaration of a method, so
- // we have to call this for everything but friends.
- if (!Constructor->getFriendObjectKind())
- ClassDecl->addedConstructor(Context, Constructor);
}
/// CheckDestructor - Checks a fully-formed destructor definition for
@@ -3071,7 +3286,7 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
<< SourceRange(D.getIdentifierLoc());
}
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
if (FTI.TypeQuals != 0 && !D.isInvalidType()) {
if (FTI.TypeQuals & Qualifiers::Const)
Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
@@ -3085,6 +3300,15 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
D.setInvalidType();
}
+ // C++0x [class.dtor]p2:
+ // A destructor shall not be declared with a ref-qualifier.
+ if (FTI.hasRefQualifier()) {
+ Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_destructor)
+ << FTI.RefQualifierIsLValueRef
+ << FixItHint::CreateRemoval(FTI.getRefQualifierLoc());
+ D.setInvalidType();
+ }
+
// Make sure we don't have any parameters.
if (FTI.NumArgs > 0 && !FTIHasSingleVoidArgument(FTI)) {
Diag(D.getIdentifierLoc(), diag::err_destructor_with_params);
@@ -3104,16 +3328,15 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
// parameters (in case any of the errors above fired) and with
// "void" as the return type, since destructors don't have return
// types.
+ if (!D.isInvalidType())
+ return R;
+
const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
- if (!Proto)
- return QualType();
-
- return Context.getFunctionType(Context.VoidTy, 0, 0, false, 0,
- Proto->hasExceptionSpec(),
- Proto->hasAnyExceptionSpec(),
- Proto->getNumExceptions(),
- Proto->exception_begin(),
- Proto->getExtInfo());
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+ EPI.Variadic = false;
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+ return Context.getFunctionType(Context.VoidTy, 0, 0, EPI);
}
/// CheckConversionDeclarator - Called by ActOnDeclarator to check the
@@ -3161,7 +3384,7 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
Diag(D.getIdentifierLoc(), diag::err_conv_function_with_params);
// Delete the parameters.
- D.getTypeObject(0).Fun.freeArgs();
+ D.getFunctionTypeInfo().freeArgs();
D.setInvalidType();
} else if (Proto->isVariadic()) {
Diag(D.getIdentifierLoc(), diag::err_conv_function_variadic);
@@ -3193,15 +3416,8 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
// Rebuild the function type "R" without any parameters (in case any
// of the errors above fired) and with the conversion type as the
// return type.
- if (D.isInvalidType()) {
- R = Context.getFunctionType(ConvType, 0, 0, false,
- Proto->getTypeQuals(),
- Proto->hasExceptionSpec(),
- Proto->hasAnyExceptionSpec(),
- Proto->getNumExceptions(),
- Proto->exception_begin(),
- Proto->getExtInfo());
- }
+ if (D.isInvalidType())
+ R = Context.getFunctionType(ConvType, 0, 0, Proto->getExtProtoInfo());
// C++0x explicit conversion operators.
if (D.getDeclSpec().isExplicitSpecified() && !getLangOptions().CPlusPlus0x)
@@ -3234,7 +3450,10 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
= Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
if (const ReferenceType *ConvTypeRef = ConvType->getAs<ReferenceType>())
ConvType = ConvTypeRef->getPointeeType();
- if (ConvType->isRecordType()) {
+ if (Conversion->getTemplateSpecializationKind() != TSK_Undeclared &&
+ Conversion->getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
+ /* Suppress diagnostics for instantiations. */;
+ else if (ConvType->isRecordType()) {
ConvType = Context.getCanonicalType(ConvType).getUnqualifiedType();
if (ConvType == ClassType)
Diag(Conversion->getLocation(), diag::warn_conv_to_self_not_used)
@@ -3247,25 +3466,10 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
<< ClassType << ConvType;
}
- if (Conversion->getPrimaryTemplate()) {
- // ignore specializations
- } else if (Conversion->getPreviousDeclaration()) {
- if (FunctionTemplateDecl *ConversionTemplate
- = Conversion->getDescribedFunctionTemplate()) {
- if (ClassDecl->replaceConversion(
- ConversionTemplate->getPreviousDeclaration(),
- ConversionTemplate))
- return ConversionTemplate;
- } else if (ClassDecl->replaceConversion(Conversion->getPreviousDeclaration(),
- Conversion))
- return Conversion;
- assert(Conversion->isInvalidDecl() && "Conversion should not get here.");
- } else if (FunctionTemplateDecl *ConversionTemplate
- = Conversion->getDescribedFunctionTemplate())
- ClassDecl->addConversionFunction(ConversionTemplate);
- else
- ClassDecl->addConversionFunction(Conversion);
-
+ if (FunctionTemplateDecl *ConversionTemplate
+ = Conversion->getDescribedFunctionTemplate())
+ return ConversionTemplate;
+
return Conversion;
}
@@ -3293,20 +3497,22 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
ProcessDeclAttributeList(DeclRegionScope, Namespc, AttrList);
- if (const VisibilityAttr *attr = Namespc->getAttr<VisibilityAttr>())
- PushVisibilityAttr(attr);
+ if (const VisibilityAttr *Attr = Namespc->getAttr<VisibilityAttr>())
+ PushNamespaceVisibilityAttr(Attr);
if (II) {
// C++ [namespace.def]p2:
- // The identifier in an original-namespace-definition shall not have been
- // previously defined in the declarative region in which the
- // original-namespace-definition appears. The identifier in an
- // original-namespace-definition is the name of the namespace. Subsequently
- // in that declarative region, it is treated as an original-namespace-name.
-
- NamedDecl *PrevDecl
- = LookupSingleName(DeclRegionScope, II, IdentLoc, LookupOrdinaryName,
- ForRedeclaration);
+ // The identifier in an original-namespace-definition shall not
+ // have been previously defined in the declarative region in
+ // which the original-namespace-definition appears. The
+ // identifier in an original-namespace-definition is the name of
+ // the namespace. Subsequently in that declarative region, it is
+ // treated as an original-namespace-name.
+ //
+ // Since namespace names are unique in their scope, and we don't
+ // look through using directives, just
+ DeclContext::lookup_result R = CurContext->getRedeclContext()->lookup(II);
+ NamedDecl *PrevDecl = R.first == R.second? 0 : *R.first;
if (NamespaceDecl *OrigNS = dyn_cast_or_null<NamespaceDecl>(PrevDecl)) {
// This is an extended namespace definition.
@@ -3485,6 +3691,10 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
assert(NamespcName && "Invalid NamespcName.");
assert(IdentLoc.isValid() && "Invalid NamespceName location.");
+
+ // This can only happen along a recovery path.
+ while (S->getFlags() & Scope::TemplateParamScope)
+ S = S->getParent();
assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
UsingDirectiveDecl *UDir = 0;
@@ -3562,7 +3772,6 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
}
// FIXME: We ignore attributes for now.
- delete AttrList;
return UDir;
}
@@ -3580,14 +3789,14 @@ void Sema::PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir) {
Decl *Sema::ActOnUsingDeclaration(Scope *S,
- AccessSpecifier AS,
- bool HasUsingKeyword,
- SourceLocation UsingLoc,
- CXXScopeSpec &SS,
- UnqualifiedId &Name,
- AttributeList *AttrList,
- bool IsTypeName,
- SourceLocation TypenameLoc) {
+ AccessSpecifier AS,
+ bool HasUsingKeyword,
+ SourceLocation UsingLoc,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Name,
+ AttributeList *AttrList,
+ bool IsTypeName,
+ SourceLocation TypenameLoc) {
assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
switch (Name.getKind()) {
@@ -3633,6 +3842,10 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S,
<< FixItHint::CreateInsertion(SS.getRange().getBegin(), "using ");
}
+ if (DiagnoseUnexpandedParameterPack(SS, UPPC_UsingDeclaration) ||
+ DiagnoseUnexpandedParameterPack(TargetNameInfo, UPPC_UsingDeclaration))
+ return 0;
+
NamedDecl *UD = BuildUsingDeclaration(S, AS, UsingLoc, SS,
TargetNameInfo, AttrList,
/* IsInstantiation */ false,
@@ -3809,20 +4022,16 @@ UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S,
= UsingShadowDecl::Create(Context, CurContext,
UD->getLocation(), UD, Target);
UD->addShadowDecl(Shadow);
-
+
+ Shadow->setAccess(UD->getAccess());
+ if (Orig->isInvalidDecl() || UD->isInvalidDecl())
+ Shadow->setInvalidDecl();
+
if (S)
PushOnScopeChains(Shadow, S);
else
CurContext->addDecl(Shadow);
- Shadow->setAccess(UD->getAccess());
- // Register it as a conversion if appropriate.
- if (Shadow->getDeclName().getNameKind()
- == DeclarationName::CXXConversionFunctionName)
- cast<CXXRecordDecl>(CurContext)->addConversionFunction(Shadow);
-
- if (Orig->isInvalidDecl() || UD->isInvalidDecl())
- Shadow->setInvalidDecl();
return Shadow;
}
@@ -3893,7 +4102,6 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
assert(IdentLoc.isValid() && "Invalid TargetName location.");
// FIXME: We ignore attributes for now.
- delete AttrList;
if (SS.isEmpty()) {
Diag(IdentLoc, diag::err_using_requires_qualname);
@@ -3921,8 +4129,7 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
LookupQualifiedName(Previous, CurContext);
}
- NestedNameSpecifier *NNS =
- static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ NestedNameSpecifier *NNS = SS.getScopeRep();
// Check for invalid redeclarations.
if (CheckUsingDeclRedeclaration(UsingLoc, IsTypeName, SS, IdentLoc, Previous))
@@ -3962,7 +4169,14 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
return UD;
}
- // Look up the target name.
+ // Constructor inheriting using decls get special treatment.
+ if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) {
+ if (CheckInheritedConstructorUsingDecl(UD))
+ UD->setInvalidDecl();
+ return UD;
+ }
+
+ // Otherwise, look up the target name.
LookupResult R(*this, NameInfo, LookupOrdinaryName);
@@ -4026,6 +4240,42 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
return UD;
}
+/// Additional checks for a using declaration referring to a constructor name.
+bool Sema::CheckInheritedConstructorUsingDecl(UsingDecl *UD) {
+ if (UD->isTypeName()) {
+ // FIXME: Cannot specify typename when specifying constructor
+ return true;
+ }
+
+ const Type *SourceType = UD->getTargetNestedNameDecl()->getAsType();
+ assert(SourceType &&
+ "Using decl naming constructor doesn't have type in scope spec.");
+ CXXRecordDecl *TargetClass = cast<CXXRecordDecl>(CurContext);
+
+ // Check whether the named type is a direct base class.
+ CanQualType CanonicalSourceType = SourceType->getCanonicalTypeUnqualified();
+ CXXRecordDecl::base_class_iterator BaseIt, BaseE;
+ for (BaseIt = TargetClass->bases_begin(), BaseE = TargetClass->bases_end();
+ BaseIt != BaseE; ++BaseIt) {
+ CanQualType BaseType = BaseIt->getType()->getCanonicalTypeUnqualified();
+ if (CanonicalSourceType == BaseType)
+ break;
+ }
+
+ if (BaseIt == BaseE) {
+ // Did not find SourceType in the bases.
+ Diag(UD->getUsingLocation(),
+ diag::err_using_decl_constructor_not_in_direct_base)
+ << UD->getNameInfo().getSourceRange()
+ << QualType(SourceType, 0) << TargetClass;
+ return true;
+ }
+
+ BaseIt->setInheritConstructors();
+
+ return false;
+}
+
/// Checks that the given using declaration is not an invalid
/// redeclaration. Note that this is checking only for the using decl
/// itself, not for any ill-formedness among the UsingShadowDecls.
@@ -4132,6 +4382,10 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
return true;
}
+ if (!NamedContext->isDependentContext() &&
+ RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), NamedContext))
+ return true;
+
if (getLangOptions().CPlusPlus0x) {
// C++0x [namespace.udecl]p3:
// In a using-declaration used as a member-declaration, the
@@ -4301,13 +4555,12 @@ namespace {
/// to implicitly define the body of a C++ member function;
class ImplicitlyDefinedFunctionScope {
Sema &S;
- DeclContext *PreviousContext;
+ Sema::ContextRAII SavedContext;
public:
ImplicitlyDefinedFunctionScope(Sema &S, CXXMethodDecl *Method)
- : S(S), PreviousContext(S.CurContext)
+ : S(S), SavedContext(S, Method)
{
- S.CurContext = Method;
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
@@ -4315,11 +4568,32 @@ namespace {
~ImplicitlyDefinedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionOrBlockScope();
- S.CurContext = PreviousContext;
}
};
}
+static CXXConstructorDecl *getDefaultConstructorUnsafe(Sema &Self,
+ CXXRecordDecl *D) {
+ ASTContext &Context = Self.Context;
+ QualType ClassType = Context.getTypeDeclType(D);
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType.getUnqualifiedType()));
+
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = D->lookup(ConstructorName);
+ Con != ConEnd; ++Con) {
+ // FIXME: In C++0x, a constructor template can be a default constructor.
+ if (isa<FunctionTemplateDecl>(*Con))
+ continue;
+
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isDefaultConstructor())
+ return Constructor;
+ }
+ return 0;
+}
+
CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl) {
// C++ [class.ctor]p5:
@@ -4347,8 +4621,8 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
if (!BaseClassDecl->hasDeclaredDefaultConstructor())
ExceptSpec.CalledDecl(DeclareImplicitDefaultConstructor(BaseClassDecl));
- else if (CXXConstructorDecl *Constructor
- = BaseClassDecl->getDefaultConstructor())
+ else if (CXXConstructorDecl *Constructor
+ = getDefaultConstructorUnsafe(*this, BaseClassDecl))
ExceptSpec.CalledDecl(Constructor);
}
}
@@ -4362,7 +4636,7 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
if (!BaseClassDecl->hasDeclaredDefaultConstructor())
ExceptSpec.CalledDecl(DeclareImplicitDefaultConstructor(BaseClassDecl));
else if (CXXConstructorDecl *Constructor
- = BaseClassDecl->getDefaultConstructor())
+ = getDefaultConstructorUnsafe(*this, BaseClassDecl))
ExceptSpec.CalledDecl(Constructor);
}
}
@@ -4378,11 +4652,16 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
ExceptSpec.CalledDecl(
DeclareImplicitDefaultConstructor(FieldClassDecl));
else if (CXXConstructorDecl *Constructor
- = FieldClassDecl->getDefaultConstructor())
+ = getDefaultConstructorUnsafe(*this, FieldClassDecl))
ExceptSpec.CalledDecl(Constructor);
}
}
-
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasExceptionSpec = ExceptSpec.hasExceptionSpecification();
+ EPI.HasAnyExceptionSpec = ExceptSpec.hasAnyExceptionSpecification();
+ EPI.NumExceptions = ExceptSpec.size();
+ EPI.Exceptions = ExceptSpec.data();
// Create the actual constructor declaration.
CanQualType ClassType
@@ -4393,12 +4672,7 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
CXXConstructorDecl *DefaultCon
= CXXConstructorDecl::Create(Context, ClassDecl, NameInfo,
Context.getFunctionType(Context.VoidTy,
- 0, 0, false, 0,
- ExceptSpec.hasExceptionSpecification(),
- ExceptSpec.hasAnyExceptionSpecification(),
- ExceptSpec.size(),
- ExceptSpec.data(),
- FunctionType::ExtInfo()),
+ 0, 0, EPI),
/*TInfo=*/0,
/*isExplicit=*/false,
/*isInline=*/true,
@@ -4408,7 +4682,6 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
DefaultCon->setTrivial(ClassDecl->hasTrivialConstructor());
// Note that we have declared this constructor.
- ClassDecl->setDeclaredDefaultConstructor(true);
++ASTContext::NumImplicitDefaultConstructorsDeclared;
if (Scope *S = getScopeForContext(ClassDecl))
@@ -4428,15 +4701,193 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
assert(ClassDecl && "DefineImplicitDefaultConstructor - invalid constructor");
ImplicitlyDefinedFunctionScope Scope(*this, Constructor);
- ErrorTrap Trap(*this);
- if (SetBaseOrMemberInitializers(Constructor, 0, 0, /*AnyErrors=*/false) ||
+ DiagnosticErrorTrap Trap(Diags);
+ if (SetCtorInitializers(Constructor, 0, 0, /*AnyErrors=*/false) ||
Trap.hasErrorOccurred()) {
Diag(CurrentLocation, diag::note_member_synthesized_at)
<< CXXConstructor << Context.getTagDeclType(ClassDecl);
Constructor->setInvalidDecl();
- } else {
- Constructor->setUsed();
- MarkVTableUsed(CurrentLocation, ClassDecl);
+ return;
+ }
+
+ SourceLocation Loc = Constructor->getLocation();
+ Constructor->setBody(new (Context) CompoundStmt(Context, 0, 0, Loc, Loc));
+
+ Constructor->setUsed();
+ MarkVTableUsed(CurrentLocation, ClassDecl);
+}
+
+void Sema::DeclareInheritedConstructors(CXXRecordDecl *ClassDecl) {
+ // We start with an initial pass over the base classes to collect those that
+ // inherit constructors from. If there are none, we can forgo all further
+ // processing.
+ typedef llvm::SmallVector<const RecordType *, 4> BasesVector;
+ BasesVector BasesToInheritFrom;
+ for (CXXRecordDecl::base_class_iterator BaseIt = ClassDecl->bases_begin(),
+ BaseE = ClassDecl->bases_end();
+ BaseIt != BaseE; ++BaseIt) {
+ if (BaseIt->getInheritConstructors()) {
+ QualType Base = BaseIt->getType();
+ if (Base->isDependentType()) {
+ // If we inherit constructors from anything that is dependent, just
+ // abort processing altogether. We'll get another chance for the
+ // instantiations.
+ return;
+ }
+ BasesToInheritFrom.push_back(Base->castAs<RecordType>());
+ }
+ }
+ if (BasesToInheritFrom.empty())
+ return;
+
+ // Now collect the constructors that we already have in the current class.
+ // Those take precedence over inherited constructors.
+ // C++0x [class.inhctor]p3: [...] a constructor is implicitly declared [...]
+ // unless there is a user-declared constructor with the same signature in
+ // the class where the using-declaration appears.
+ llvm::SmallSet<const Type *, 8> ExistingConstructors;
+ for (CXXRecordDecl::ctor_iterator CtorIt = ClassDecl->ctor_begin(),
+ CtorE = ClassDecl->ctor_end();
+ CtorIt != CtorE; ++CtorIt) {
+ ExistingConstructors.insert(
+ Context.getCanonicalType(CtorIt->getType()).getTypePtr());
+ }
+
+ Scope *S = getScopeForContext(ClassDecl);
+ DeclarationName CreatedCtorName =
+ Context.DeclarationNames.getCXXConstructorName(
+ ClassDecl->getTypeForDecl()->getCanonicalTypeUnqualified());
+
+ // Now comes the true work.
+ // First, we keep a map from constructor types to the base that introduced
+ // them. Needed for finding conflicting constructors. We also keep the
+ // actually inserted declarations in there, for pretty diagnostics.
+ typedef std::pair<CanQualType, CXXConstructorDecl *> ConstructorInfo;
+ typedef llvm::DenseMap<const Type *, ConstructorInfo> ConstructorToSourceMap;
+ ConstructorToSourceMap InheritedConstructors;
+ for (BasesVector::iterator BaseIt = BasesToInheritFrom.begin(),
+ BaseE = BasesToInheritFrom.end();
+ BaseIt != BaseE; ++BaseIt) {
+ const RecordType *Base = *BaseIt;
+ CanQualType CanonicalBase = Base->getCanonicalTypeUnqualified();
+ CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(Base->getDecl());
+ for (CXXRecordDecl::ctor_iterator CtorIt = BaseDecl->ctor_begin(),
+ CtorE = BaseDecl->ctor_end();
+ CtorIt != CtorE; ++CtorIt) {
+ // Find the using declaration for inheriting this base's constructors.
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXConstructorName(CanonicalBase);
+ UsingDecl *UD = dyn_cast_or_null<UsingDecl>(
+ LookupSingleName(S, Name,SourceLocation(), LookupUsingDeclName));
+ SourceLocation UsingLoc = UD ? UD->getLocation() :
+ ClassDecl->getLocation();
+
+ // C++0x [class.inhctor]p1: The candidate set of inherited constructors
+ // from the class X named in the using-declaration consists of actual
+ // constructors and notional constructors that result from the
+ // transformation of defaulted parameters as follows:
+ // - all non-template default constructors of X, and
+ // - for each non-template constructor of X that has at least one
+ // parameter with a default argument, the set of constructors that
+ // results from omitting any ellipsis parameter specification and
+ // successively omitting parameters with a default argument from the
+ // end of the parameter-type-list.
+ CXXConstructorDecl *BaseCtor = *CtorIt;
+ bool CanBeCopyOrMove = BaseCtor->isCopyOrMoveConstructor();
+ const FunctionProtoType *BaseCtorType =
+ BaseCtor->getType()->getAs<FunctionProtoType>();
+
+ for (unsigned params = BaseCtor->getMinRequiredArguments(),
+ maxParams = BaseCtor->getNumParams();
+ params <= maxParams; ++params) {
+ // Skip default constructors. They're never inherited.
+ if (params == 0)
+ continue;
+ // Skip copy and move constructors for the same reason.
+ if (CanBeCopyOrMove && params == 1)
+ continue;
+
+ // Build up a function type for this particular constructor.
+ // FIXME: The working paper does not consider that the exception spec
+ // for the inheriting constructor might be larger than that of the
+ // source. This code doesn't yet, either.
+ const Type *NewCtorType;
+ if (params == maxParams)
+ NewCtorType = BaseCtorType;
+ else {
+ llvm::SmallVector<QualType, 16> Args;
+ for (unsigned i = 0; i < params; ++i) {
+ Args.push_back(BaseCtorType->getArgType(i));
+ }
+ FunctionProtoType::ExtProtoInfo ExtInfo =
+ BaseCtorType->getExtProtoInfo();
+ ExtInfo.Variadic = false;
+ NewCtorType = Context.getFunctionType(BaseCtorType->getResultType(),
+ Args.data(), params, ExtInfo)
+ .getTypePtr();
+ }
+ const Type *CanonicalNewCtorType =
+ Context.getCanonicalType(NewCtorType);
+
+ // Now that we have the type, first check if the class already has a
+ // constructor with this signature.
+ if (ExistingConstructors.count(CanonicalNewCtorType))
+ continue;
+
+ // Then we check if we have already declared an inherited constructor
+ // with this signature.
+ std::pair<ConstructorToSourceMap::iterator, bool> result =
+ InheritedConstructors.insert(std::make_pair(
+ CanonicalNewCtorType,
+ std::make_pair(CanonicalBase, (CXXConstructorDecl*)0)));
+ if (!result.second) {
+ // Already in the map. If it came from a different class, that's an
+ // error. Not if it's from the same.
+ CanQualType PreviousBase = result.first->second.first;
+ if (CanonicalBase != PreviousBase) {
+ const CXXConstructorDecl *PrevCtor = result.first->second.second;
+ const CXXConstructorDecl *PrevBaseCtor =
+ PrevCtor->getInheritedConstructor();
+ assert(PrevBaseCtor && "Conflicting constructor was not inherited");
+
+ Diag(UsingLoc, diag::err_using_decl_constructor_conflict);
+ Diag(BaseCtor->getLocation(),
+ diag::note_using_decl_constructor_conflict_current_ctor);
+ Diag(PrevBaseCtor->getLocation(),
+ diag::note_using_decl_constructor_conflict_previous_ctor);
+ Diag(PrevCtor->getLocation(),
+ diag::note_using_decl_constructor_conflict_previous_using);
+ }
+ continue;
+ }
+
+ // OK, we're there, now add the constructor.
+ // C++0x [class.inhctor]p8: [...] that would be performed by a
+ // user-writtern inline constructor [...]
+ DeclarationNameInfo DNI(CreatedCtorName, UsingLoc);
+ CXXConstructorDecl *NewCtor = CXXConstructorDecl::Create(
+ Context, ClassDecl, DNI, QualType(NewCtorType, 0), /*TInfo=*/0,
+ BaseCtor->isExplicit(), /*Inline=*/true,
+ /*ImplicitlyDeclared=*/true);
+ NewCtor->setAccess(BaseCtor->getAccess());
+
+ // Build up the parameter decls and add them.
+ llvm::SmallVector<ParmVarDecl *, 16> ParamDecls;
+ for (unsigned i = 0; i < params; ++i) {
+ ParamDecls.push_back(ParmVarDecl::Create(Context, NewCtor, UsingLoc,
+ /*IdentifierInfo=*/0,
+ BaseCtorType->getArgType(i),
+ /*TInfo=*/0, SC_None,
+ SC_None, /*DefaultArg=*/0));
+ }
+ NewCtor->setParams(ParamDecls.data(), ParamDecls.size());
+ NewCtor->setInheritedConstructor(BaseCtor);
+
+ PushOnScopeChains(NewCtor, S, false);
+ ClassDecl->addDecl(NewCtor);
+ result.first->second.second = NewCtor;
+ }
+ }
}
}
@@ -4483,13 +4934,12 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
}
// Create the actual destructor declaration.
- QualType Ty = Context.getFunctionType(Context.VoidTy,
- 0, 0, false, 0,
- ExceptSpec.hasExceptionSpecification(),
- ExceptSpec.hasAnyExceptionSpecification(),
- ExceptSpec.size(),
- ExceptSpec.data(),
- FunctionType::ExtInfo());
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasExceptionSpec = ExceptSpec.hasExceptionSpecification();
+ EPI.HasAnyExceptionSpec = ExceptSpec.hasAnyExceptionSpecification();
+ EPI.NumExceptions = ExceptSpec.size();
+ EPI.Exceptions = ExceptSpec.data();
+ QualType Ty = Context.getFunctionType(Context.VoidTy, 0, 0, EPI);
CanQualType ClassType
= Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
@@ -4497,7 +4947,7 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
= Context.DeclarationNames.getCXXDestructorName(ClassType);
DeclarationNameInfo NameInfo(Name, ClassDecl->getLocation());
CXXDestructorDecl *Destructor
- = CXXDestructorDecl::Create(Context, ClassDecl, NameInfo, Ty,
+ = CXXDestructorDecl::Create(Context, ClassDecl, NameInfo, Ty, 0,
/*isInline=*/true,
/*isImplicitlyDeclared=*/true);
Destructor->setAccess(AS_public);
@@ -4505,7 +4955,6 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
Destructor->setTrivial(ClassDecl->hasTrivialDestructor());
// Note that we have declared this destructor.
- ClassDecl->setDeclaredDestructor(true);
++ASTContext::NumImplicitDestructorsDeclared;
// Introduce this destructor into its scope.
@@ -4533,7 +4982,7 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
ImplicitlyDefinedFunctionScope Scope(*this, Destructor);
- ErrorTrap Trap(*this);
+ DiagnosticErrorTrap Trap(Diags);
MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
Destructor->getParent());
@@ -4545,6 +4994,9 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
return;
}
+ SourceLocation Loc = Destructor->getLocation();
+ Destructor->setBody(new (Context) CompoundStmt(Context, 0, 0, Loc, Loc));
+
Destructor->setUsed();
MarkVTableUsed(CurrentLocation, ClassDecl);
}
@@ -4644,8 +5096,8 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
// Build the call to the assignment operator.
ExprResult Call = S.BuildCallToMemberFunction(/*Scope=*/0,
- OpEqualRef.takeAs<Expr>(),
- Loc, &From, 1, 0, Loc);
+ OpEqualRef.takeAs<Expr>(),
+ Loc, &From, 1, Loc);
if (Call.isInvalid())
return StmtError();
@@ -4693,26 +5145,25 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
// Create a reference to the iteration variable; we'll use this several
// times throughout.
Expr *IterationVarRef
- = S.BuildDeclRefExpr(IterationVar, SizeType, Loc).takeAs<Expr>();
+ = S.BuildDeclRefExpr(IterationVar, SizeType, VK_RValue, Loc).take();
assert(IterationVarRef && "Reference to invented variable cannot fail!");
// Create the DeclStmt that holds the iteration variable.
Stmt *InitStmt = new (S.Context) DeclStmt(DeclGroupRef(IterationVar),Loc,Loc);
// Create the comparison against the array bound.
- llvm::APInt Upper = ArrayTy->getSize();
- Upper.zextOrTrunc(S.Context.getTypeSize(SizeType));
+ llvm::APInt Upper
+ = ArrayTy->getSize().zextOrTrunc(S.Context.getTypeSize(SizeType));
Expr *Comparison
- = new (S.Context) BinaryOperator(IterationVarRef->Retain(),
- IntegerLiteral::Create(S.Context,
- Upper, SizeType, Loc),
- BO_NE, S.Context.BoolTy, Loc);
+ = new (S.Context) BinaryOperator(IterationVarRef,
+ IntegerLiteral::Create(S.Context, Upper, SizeType, Loc),
+ BO_NE, S.Context.BoolTy,
+ VK_RValue, OK_Ordinary, Loc);
// Create the pre-increment of the iteration variable.
Expr *Increment
- = new (S.Context) UnaryOperator(IterationVarRef->Retain(),
- UO_PreInc,
- SizeType, Loc);
+ = new (S.Context) UnaryOperator(IterationVarRef, UO_PreInc, SizeType,
+ VK_LValue, OK_Ordinary, Loc);
// Subscript the "from" and "to" expressions with the iteration variable.
From = AssertSuccess(S.CreateBuiltinArraySubscriptExpr(From, Loc,
@@ -4721,10 +5172,9 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
IterationVarRef, Loc));
// Build the copy for an individual element of the array.
- StmtResult Copy = BuildSingleCopyAssign(S, Loc,
- ArrayTy->getElementType(),
- To, From,
- CopyingBaseSubobject, Depth+1);
+ StmtResult Copy = BuildSingleCopyAssign(S, Loc, ArrayTy->getElementType(),
+ To, From, CopyingBaseSubobject,
+ Depth + 1);
if (Copy.isInvalid())
return StmtError();
@@ -4881,24 +5331,22 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
// An implicitly-declared copy assignment operator is an inline public
// member of its class.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasExceptionSpec = ExceptSpec.hasExceptionSpecification();
+ EPI.HasAnyExceptionSpec = ExceptSpec.hasAnyExceptionSpecification();
+ EPI.NumExceptions = ExceptSpec.size();
+ EPI.Exceptions = ExceptSpec.data();
DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
DeclarationNameInfo NameInfo(Name, ClassDecl->getLocation());
CXXMethodDecl *CopyAssignment
= CXXMethodDecl::Create(Context, ClassDecl, NameInfo,
- Context.getFunctionType(RetType, &ArgType, 1,
- false, 0,
- ExceptSpec.hasExceptionSpecification(),
- ExceptSpec.hasAnyExceptionSpecification(),
- ExceptSpec.size(),
- ExceptSpec.data(),
- FunctionType::ExtInfo()),
+ Context.getFunctionType(RetType, &ArgType, 1, EPI),
/*TInfo=*/0, /*isStatic=*/false,
/*StorageClassAsWritten=*/SC_None,
/*isInline=*/true);
CopyAssignment->setAccess(AS_public);
CopyAssignment->setImplicit();
CopyAssignment->setTrivial(ClassDecl->hasTrivialCopyAssignment());
- CopyAssignment->setCopyAssignment(true);
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
@@ -4910,7 +5358,6 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
CopyAssignment->setParams(&FromParam, 1);
// Note that we have added this copy-assignment operator.
- ClassDecl->setDeclaredCopyAssignment(true);
++ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
if (Scope *S = getScopeForContext(ClassDecl))
@@ -4939,7 +5386,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CopyAssignOperator->setUsed();
ImplicitlyDefinedFunctionScope Scope(*this, CopyAssignOperator);
- ErrorTrap Trap(*this);
+ DiagnosticErrorTrap Trap(Diags);
// C++0x [class.copy]p30:
// The implicitly-defined or explicitly-defaulted copy assignment operator
@@ -4967,7 +5414,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Construct a reference to the "other" object. We'll be using this
// throughout the generated ASTs.
- Expr *OtherRef = BuildDeclRefExpr(Other, OtherRefType, Loc).takeAs<Expr>();
+ Expr *OtherRef = BuildDeclRefExpr(Other, OtherRefType, VK_LValue, Loc).take();
assert(OtherRef && "Reference to parameter cannot fail!");
// Construct the "this" pointer. We'll be using this throughout the generated
@@ -4982,10 +5429,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Form the assignment:
// static_cast<Base*>(this)->Base::operator=(static_cast<Base&>(other));
QualType BaseType = Base->getType().getUnqualifiedType();
- CXXRecordDecl *BaseClassDecl = 0;
- if (const RecordType *BaseRecordT = BaseType->getAs<RecordType>())
- BaseClassDecl = cast<CXXRecordDecl>(BaseRecordT->getDecl());
- else {
+ if (!BaseType->isRecordType()) {
Invalid = true;
continue;
}
@@ -4995,7 +5439,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Construct the "from" expression, which is an implicit cast to the
// appropriately-qualified base type.
- Expr *From = OtherRef->Retain();
+ Expr *From = OtherRef;
ImpCastExprToType(From, Context.getQualifiedType(BaseType, OtherQuals),
CK_UncheckedDerivedToBase,
VK_LValue, &BasePath);
@@ -5073,11 +5517,11 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
MemberLookup.addDecl(*Field);
MemberLookup.resolveKind();
ExprResult From = BuildMemberReferenceExpr(OtherRef, OtherRefType,
- Loc, /*IsArrow=*/false,
- SS, 0, MemberLookup, 0);
+ Loc, /*IsArrow=*/false,
+ SS, 0, MemberLookup, 0);
ExprResult To = BuildMemberReferenceExpr(This, This->getType(),
- Loc, /*IsArrow=*/true,
- SS, 0, MemberLookup, 0);
+ Loc, /*IsArrow=*/true,
+ SS, 0, MemberLookup, 0);
assert(!From.isInvalid() && "Implicit field reference cannot fail");
assert(!To.isInvalid() && "Implicit field reference cannot fail");
@@ -5097,8 +5541,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
= Context.getAsConstantArrayType(FieldType);
Array;
Array = Context.getAsConstantArrayType(Array->getElementType())) {
- llvm::APInt ArraySize = Array->getSize();
- ArraySize.zextOrTrunc(Size.getBitWidth());
+ llvm::APInt ArraySize
+ = Array->getSize().zextOrTrunc(Size.getBitWidth());
Size *= ArraySize;
}
@@ -5128,7 +5572,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CollectableMemCpyRef = BuildDeclRefExpr(CollectableMemCpy,
CollectableMemCpy->getType(),
- Loc, 0).takeAs<Expr>();
+ VK_LValue, Loc, 0).take();
assert(CollectableMemCpyRef && "Builtin reference cannot fail");
}
}
@@ -5148,7 +5592,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
BuiltinMemCpyRef = BuildDeclRefExpr(BuiltinMemCpy,
BuiltinMemCpy->getType(),
- Loc, 0).takeAs<Expr>();
+ VK_LValue, Loc, 0).take();
assert(BuiltinMemCpyRef && "Builtin reference cannot fail");
}
@@ -5156,20 +5600,17 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CallArgs.push_back(To.takeAs<Expr>());
CallArgs.push_back(From.takeAs<Expr>());
CallArgs.push_back(IntegerLiteral::Create(Context, Size, SizeType, Loc));
- llvm::SmallVector<SourceLocation, 4> Commas; // FIXME: Silly
- Commas.push_back(Loc);
- Commas.push_back(Loc);
ExprResult Call = ExprError();
if (NeedsCollectableMemCpy)
Call = ActOnCallExpr(/*Scope=*/0,
CollectableMemCpyRef,
Loc, move_arg(CallArgs),
- Commas.data(), Loc);
+ Loc);
else
Call = ActOnCallExpr(/*Scope=*/0,
BuiltinMemCpyRef,
Loc, move_arg(CallArgs),
- Commas.data(), Loc);
+ Loc);
assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!");
Statements.push_back(Call.takeAs<Expr>());
@@ -5352,6 +5793,11 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
// An implicitly-declared copy constructor is an inline public
// member of its class.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasExceptionSpec = ExceptSpec.hasExceptionSpecification();
+ EPI.HasAnyExceptionSpec = ExceptSpec.hasAnyExceptionSpecification();
+ EPI.NumExceptions = ExceptSpec.size();
+ EPI.Exceptions = ExceptSpec.data();
DeclarationName Name
= Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(ClassType));
@@ -5359,23 +5805,15 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
CXXConstructorDecl *CopyConstructor
= CXXConstructorDecl::Create(Context, ClassDecl, NameInfo,
Context.getFunctionType(Context.VoidTy,
- &ArgType, 1,
- false, 0,
- ExceptSpec.hasExceptionSpecification(),
- ExceptSpec.hasAnyExceptionSpecification(),
- ExceptSpec.size(),
- ExceptSpec.data(),
- FunctionType::ExtInfo()),
+ &ArgType, 1, EPI),
/*TInfo=*/0,
/*isExplicit=*/false,
/*isInline=*/true,
/*isImplicitlyDeclared=*/true);
CopyConstructor->setAccess(AS_public);
- CopyConstructor->setImplicit();
CopyConstructor->setTrivial(ClassDecl->hasTrivialCopyConstructor());
// Note that we have declared this constructor.
- ClassDecl->setDeclaredCopyConstructor(true);
++ASTContext::NumImplicitCopyConstructorsDeclared;
// Add the parameter to the constructor.
@@ -5405,9 +5843,9 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
assert(ClassDecl && "DefineImplicitCopyConstructor - invalid constructor");
ImplicitlyDefinedFunctionScope Scope(*this, CopyConstructor);
- ErrorTrap Trap(*this);
+ DiagnosticErrorTrap Trap(Diags);
- if (SetBaseOrMemberInitializers(CopyConstructor, 0, 0, /*AnyErrors=*/false) ||
+ if (SetCtorInitializers(CopyConstructor, 0, 0, /*AnyErrors=*/false) ||
Trap.hasErrorOccurred()) {
Diag(CurrentLocation, diag::note_member_synthesized_at)
<< CXXCopyConstructor << Context.getTagDeclType(ClassDecl);
@@ -5428,7 +5866,8 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor,
MultiExprArg ExprArgs,
bool RequiresZeroInit,
- unsigned ConstructKind) {
+ unsigned ConstructKind,
+ SourceRange ParenRange) {
bool Elidable = false;
// C++0x [class.copy]p34:
@@ -5441,17 +5880,15 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
// with the same cv-unqualified type, the copy/move operation
// can be omitted by constructing the temporary object
// directly into the target of the omitted copy/move
- if (Constructor->isCopyConstructor() && ExprArgs.size() >= 1) {
+ if (ConstructKind == CXXConstructExpr::CK_Complete &&
+ Constructor->isCopyOrMoveConstructor() && ExprArgs.size() >= 1) {
Expr *SubExpr = ((Expr **)ExprArgs.get())[0];
- Elidable = SubExpr->isTemporaryObject() &&
- ConstructKind == CXXConstructExpr::CK_Complete &&
- Context.hasSameUnqualifiedType(SubExpr->getType(),
- Context.getTypeDeclType(Constructor->getParent()));
+ Elidable = SubExpr->isTemporaryObject(Context, Constructor->getParent());
}
return BuildCXXConstructExpr(ConstructLoc, DeclInitType, Constructor,
Elidable, move(ExprArgs), RequiresZeroInit,
- ConstructKind);
+ ConstructKind, ParenRange);
}
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
@@ -5461,7 +5898,8 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg ExprArgs,
bool RequiresZeroInit,
- unsigned ConstructKind) {
+ unsigned ConstructKind,
+ SourceRange ParenRange) {
unsigned NumExprs = ExprArgs.size();
Expr **Exprs = (Expr **)ExprArgs.release();
@@ -5469,21 +5907,25 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
return Owned(CXXConstructExpr::Create(Context, DeclInitType, ConstructLoc,
Constructor, Elidable, Exprs, NumExprs,
RequiresZeroInit,
- static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind)));
+ static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
+ ParenRange));
}
bool Sema::InitializeVarWithConstructor(VarDecl *VD,
CXXConstructorDecl *Constructor,
MultiExprArg Exprs) {
+ // FIXME: Provide the correct paren SourceRange when available.
ExprResult TempResult =
BuildCXXConstructExpr(VD->getLocation(), VD->getType(), Constructor,
- move(Exprs), false, CXXConstructExpr::CK_Complete);
+ move(Exprs), false, CXXConstructExpr::CK_Complete,
+ SourceRange());
if (TempResult.isInvalid())
return true;
Expr *Temp = TempResult.takeAs<Expr>();
+ CheckImplicitConversions(Temp, VD->getLocation());
MarkDeclarationReferenced(VD->getLocation(), Constructor);
- Temp = MaybeCreateCXXExprWithTemporaries(Temp);
+ Temp = MaybeCreateExprWithCleanups(Temp);
VD->setInit(Temp);
return false;
@@ -5500,7 +5942,8 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
<< VD->getDeclName()
<< VD->getType());
- if (!VD->isInvalidDecl() && VD->hasGlobalStorage())
+ // TODO: this should be re-enabled for static locals by !CXAAtExit
+ if (!VD->isInvalidDecl() && VD->hasGlobalStorage() && !VD->isStaticLocal())
Diag(VD->getLocation(), diag::warn_global_destructor);
}
}
@@ -5511,8 +5954,8 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
void Sema::AddCXXDirectInitializerToDecl(Decl *RealDecl,
SourceLocation LParenLoc,
MultiExprArg Exprs,
- SourceLocation *CommaLocs,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc,
+ bool TypeMayContainAuto) {
assert(Exprs.size() != 0 && Exprs.get() && "missing expressions");
// If there is no declaration, there was an error parsing it. Just ignore
@@ -5527,6 +5970,37 @@ void Sema::AddCXXDirectInitializerToDecl(Decl *RealDecl,
return;
}
+ // C++0x [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+ if (TypeMayContainAuto && VDecl->getType()->getContainedAutoType()) {
+ VDecl->setParsingAutoInit(false);
+
+ // FIXME: n3225 doesn't actually seem to indicate this is ill-formed
+ if (Exprs.size() > 1) {
+ Diag(Exprs.get()[1]->getSourceRange().getBegin(),
+ diag::err_auto_var_init_multiple_expressions)
+ << VDecl->getDeclName() << VDecl->getType()
+ << VDecl->getSourceRange();
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ Expr *Init = Exprs.get()[0];
+ QualType DeducedType;
+ if (!DeduceAutoType(VDecl->getType(), Init, DeducedType)) {
+ Diag(VDecl->getLocation(), diag::err_auto_var_deduction_failure)
+ << VDecl->getDeclName() << VDecl->getType() << Init->getType()
+ << Init->getSourceRange();
+ RealDecl->setInvalidDecl();
+ return;
+ }
+ VDecl->setType(DeducedType);
+
+ // If this is a redeclaration, check that the type we just deduced matches
+ // the previously declared type.
+ if (VarDecl *Old = VDecl->getPreviousDeclaration())
+ MergeVarDeclTypes(VDecl, Old);
+ }
+
// We will represent direct-initialization similarly to copy-initialization:
// int x(1); -as-> int x = 1;
// ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c);
@@ -5583,11 +6057,21 @@ void Sema::AddCXXDirectInitializerToDecl(Decl *RealDecl,
return;
}
+ bool IsDependent = false;
+ for (unsigned I = 0, N = Exprs.size(); I != N; ++I) {
+ if (DiagnoseUnexpandedParameterPack(Exprs.get()[I], UPPC_Expression)) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ if (Exprs.get()[I]->isTypeDependent())
+ IsDependent = true;
+ }
+
// If either the declaration has a dependent type or if any of the
// expressions is type-dependent, we represent the initialization
// via a ParenListExpr for later use during template instantiation.
- if (VDecl->getType()->isDependentType() ||
- Expr::hasAnyTypeDependentArguments((Expr **)Exprs.get(), Exprs.size())) {
+ if (VDecl->getType()->isDependentType() || IsDependent) {
// Let clients know that initialization was done with a direct initializer.
VDecl->setCXXDirectInitializer(true);
@@ -5615,21 +6099,14 @@ void Sema::AddCXXDirectInitializerToDecl(Decl *RealDecl,
VDecl->setInvalidDecl();
return;
}
+
+ CheckImplicitConversions(Result.get(), LParenLoc);
- Result = MaybeCreateCXXExprWithTemporaries(Result.get());
+ Result = MaybeCreateExprWithCleanups(Result);
VDecl->setInit(Result.takeAs<Expr>());
VDecl->setCXXDirectInitializer(true);
- if (!VDecl->isInvalidDecl() &&
- !VDecl->getDeclContext()->isDependentContext() &&
- VDecl->hasGlobalStorage() &&
- !VDecl->getInit()->isConstantInitializer(Context,
- VDecl->getType()->isReferenceType()))
- Diag(VDecl->getLocation(), diag::warn_global_constructor)
- << VDecl->getInit()->getSourceRange();
-
- if (const RecordType *Record = VDecl->getType()->getAs<RecordType>())
- FinalizeVarWithDestructor(VDecl, Record);
+ CheckCompleteVariableDeclaration(VDecl);
}
/// \brief Given a constructor and the set of arguments provided for the
@@ -5926,15 +6403,6 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
<< LastParam->getType() << (Op == OO_MinusMinus);
}
- // Notify the class if it got an assignment operator.
- if (Op == OO_Equal) {
- // Would have returned earlier otherwise.
- assert(isa<CXXMethodDecl>(FnDecl) &&
- "Overloaded = not member, but not filtered.");
- CXXMethodDecl *Method = cast<CXXMethodDecl>(FnDecl);
- Method->getParent()->addedAssignmentOperator(Context, Method);
- }
-
return false;
}
@@ -5964,8 +6432,6 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
cast<NonTypeTemplateParmDecl>(Params->getParam(0));
// The template parameter must be a char parameter pack.
- // FIXME: This test will always fail because non-type parameter packs
- // have not been implemented.
if (PmDecl && PmDecl->isTemplateParameterPack() &&
Context.hasSameType(PmDecl->getType(), Context.CharTy))
Valid = true;
@@ -6042,11 +6508,10 @@ FinishedParams:
/// by Lang/StrSize. LBraceLoc, if valid, provides the location of
/// the '{' brace. Otherwise, this linkage specification does not
/// have any braces.
-Decl *Sema::ActOnStartLinkageSpecification(Scope *S,
- SourceLocation ExternLoc,
- SourceLocation LangLoc,
- llvm::StringRef Lang,
- SourceLocation LBraceLoc) {
+Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
+ SourceLocation LangLoc,
+ llvm::StringRef Lang,
+ SourceLocation LBraceLoc) {
LinkageSpecDecl::LanguageIDs Language;
if (Lang == "\"C\"")
Language = LinkageSpecDecl::lang_c;
@@ -6082,13 +6547,13 @@ Decl *Sema::ActOnFinishLinkageSpecification(Scope *S,
/// \brief Perform semantic analysis for the variable declaration that
/// occurs within a C++ catch clause, returning the newly-created
/// variable.
-VarDecl *Sema::BuildExceptionDeclaration(Scope *S, QualType ExDeclType,
+VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
TypeSourceInfo *TInfo,
IdentifierInfo *Name,
- SourceLocation Loc,
- SourceRange Range) {
+ SourceLocation Loc) {
bool Invalid = false;
-
+ QualType ExDeclType = TInfo->getType();
+
// Arrays and functions decay.
if (ExDeclType->isArrayType())
ExDeclType = Context.getArrayDecayedType(ExDeclType);
@@ -6100,7 +6565,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S, QualType ExDeclType,
// incomplete type, other than [cv] void*.
// N2844 forbids rvalue references.
if (!ExDeclType->isDependentType() && ExDeclType->isRValueReferenceType()) {
- Diag(Loc, diag::err_catch_rvalue_ref) << Range;
+ Diag(Loc, diag::err_catch_rvalue_ref);
Invalid = true;
}
@@ -6161,7 +6626,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S, QualType ExDeclType,
ExDecl->setExceptionVariable(true);
if (!Invalid) {
- if (const RecordType *RecordTy = ExDeclType->getAs<RecordType>()) {
+ if (const RecordType *recordType = ExDeclType->getAs<RecordType>()) {
// C++ [except.handle]p16:
// The object declared in an exception-declaration or, if the
// exception-declaration does not specify a name, a temporary (12.2) is
@@ -6171,18 +6636,32 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S, QualType ExDeclType,
//
// We just pretend to initialize the object with itself, then make sure
// it can be destroyed later.
- InitializedEntity Entity = InitializedEntity::InitializeVariable(ExDecl);
- Expr *ExDeclRef = DeclRefExpr::Create(Context, 0, SourceRange(), ExDecl,
- Loc, ExDeclType, 0);
- InitializationKind Kind = InitializationKind::CreateCopy(Loc,
- SourceLocation());
- InitializationSequence InitSeq(*this, Entity, Kind, &ExDeclRef, 1);
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, &ExDeclRef, 1));
- if (Result.isInvalid())
+ QualType initType = ExDeclType;
+
+ InitializedEntity entity =
+ InitializedEntity::InitializeVariable(ExDecl);
+ InitializationKind initKind =
+ InitializationKind::CreateCopy(Loc, SourceLocation());
+
+ Expr *opaqueValue =
+ new (Context) OpaqueValueExpr(Loc, initType, VK_LValue, OK_Ordinary);
+ InitializationSequence sequence(*this, entity, initKind, &opaqueValue, 1);
+ ExprResult result = sequence.Perform(*this, entity, initKind,
+ MultiExprArg(&opaqueValue, 1));
+ if (result.isInvalid())
Invalid = true;
- else
- FinalizeVarWithDestructor(ExDecl, RecordTy);
+ else {
+ // If the constructor used was non-trivial, set this as the
+ // "initializer".
+ CXXConstructExpr *construct = cast<CXXConstructExpr>(result.take());
+ if (!construct->getConstructor()->isTrivial()) {
+ Expr *init = MaybeCreateExprWithCleanups(construct);
+ ExDecl->setInit(init);
+ }
+
+ // And make sure it's destructable.
+ FinalizeVarWithDestructor(ExDecl, recordType);
+ }
}
}
@@ -6196,9 +6675,16 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S, QualType ExDeclType,
/// handler.
Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
- QualType ExDeclType = TInfo->getType();
-
bool Invalid = D.isInvalidType();
+
+ // Check for unexpanded parameter packs.
+ if (TInfo && DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_ExceptionType)) {
+ TInfo = Context.getTrivialTypeSourceInfo(Context.IntTy,
+ D.getIdentifierLoc());
+ Invalid = true;
+ }
+
IdentifierInfo *II = D.getIdentifier();
if (NamedDecl *PrevDecl = LookupSingleName(S, II, D.getIdentifierLoc(),
LookupOrdinaryName,
@@ -6218,10 +6704,9 @@ Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
Invalid = true;
}
- VarDecl *ExDecl = BuildExceptionDeclaration(S, ExDeclType, TInfo,
+ VarDecl *ExDecl = BuildExceptionDeclaration(S, TInfo,
D.getIdentifier(),
- D.getIdentifierLoc(),
- D.getDeclSpec().getSourceRange());
+ D.getIdentifierLoc());
if (Invalid)
ExDecl->setInvalidDecl();
@@ -6255,6 +6740,9 @@ Decl *Sema::ActOnStaticAssertDeclaration(SourceLocation AssertLoc,
}
}
+ if (DiagnoseUnexpandedParameterPack(AssertExpr, UPPC_StaticAssertExpression))
+ return 0;
+
Decl *Decl = StaticAssertDecl::Create(Context, CurContext, AssertLoc,
AssertExpr, AssertMessage);
@@ -6318,6 +6806,110 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation FriendLoc,
return FriendDecl::Create(Context, CurContext, FriendLoc, TSInfo, FriendLoc);
}
+/// Handle a friend tag declaration where the scope specifier was
+/// templated.
+Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
+ unsigned TagSpec, SourceLocation TagLoc,
+ CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TempParamLists) {
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+
+ bool isExplicitSpecialization = false;
+ unsigned NumMatchedTemplateParamLists = TempParamLists.size();
+ bool Invalid = false;
+
+ if (TemplateParameterList *TemplateParams
+ = MatchTemplateParametersToScopeSpecifier(TagLoc, SS,
+ TempParamLists.get(),
+ TempParamLists.size(),
+ /*friend*/ true,
+ isExplicitSpecialization,
+ Invalid)) {
+ --NumMatchedTemplateParamLists;
+
+ if (TemplateParams->size() > 0) {
+ // This is a declaration of a class template.
+ if (Invalid)
+ return 0;
+
+ return CheckClassTemplate(S, TagSpec, TUK_Friend, TagLoc,
+ SS, Name, NameLoc, Attr,
+ TemplateParams, AS_public).take();
+ } else {
+ // The "template<>" header is extraneous.
+ Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
+ << TypeWithKeyword::getTagTypeKindName(Kind) << Name;
+ isExplicitSpecialization = true;
+ }
+ }
+
+ if (Invalid) return 0;
+
+ assert(SS.isNotEmpty() && "valid templated tag with no SS and no direct?");
+
+ bool isAllExplicitSpecializations = true;
+ for (unsigned I = 0; I != NumMatchedTemplateParamLists; ++I) {
+ if (TempParamLists.get()[I]->size()) {
+ isAllExplicitSpecializations = false;
+ break;
+ }
+ }
+
+ // FIXME: don't ignore attributes.
+
+ // If it's explicit specializations all the way down, just forget
+ // about the template header and build an appropriate non-templated
+ // friend. TODO: for source fidelity, remember the headers.
+ if (isAllExplicitSpecializations) {
+ ElaboratedTypeKeyword Keyword
+ = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
+ QualType T = CheckTypenameType(Keyword, SS.getScopeRep(), *Name,
+ TagLoc, SS.getRange(), NameLoc);
+ if (T.isNull())
+ return 0;
+
+ TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
+ if (isa<DependentNameType>(T)) {
+ DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
+ TL.setKeywordLoc(TagLoc);
+ TL.setQualifierRange(SS.getRange());
+ TL.setNameLoc(NameLoc);
+ } else {
+ ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(TSI->getTypeLoc());
+ TL.setKeywordLoc(TagLoc);
+ TL.setQualifierRange(SS.getRange());
+ cast<TypeSpecTypeLoc>(TL.getNamedTypeLoc()).setNameLoc(NameLoc);
+ }
+
+ FriendDecl *Friend = FriendDecl::Create(Context, CurContext, NameLoc,
+ TSI, FriendLoc);
+ Friend->setAccess(AS_public);
+ CurContext->addDecl(Friend);
+ return Friend;
+ }
+
+ // Handle the case of a templated-scope friend class. e.g.
+ // template <class T> class A<T>::B;
+ // FIXME: we don't support these right now.
+ ElaboratedTypeKeyword ETK = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
+ QualType T = Context.getDependentNameType(ETK, SS.getScopeRep(), Name);
+ TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
+ DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
+ TL.setKeywordLoc(TagLoc);
+ TL.setQualifierRange(SS.getRange());
+ TL.setNameLoc(NameLoc);
+
+ FriendDecl *Friend = FriendDecl::Create(Context, CurContext, NameLoc,
+ TSI, FriendLoc);
+ Friend->setAccess(AS_public);
+ Friend->setUnsupportedFriend(true);
+ CurContext->addDecl(Friend);
+ return Friend;
+}
+
+
/// Handle a friend type declaration. This works in tandem with
/// ActOnTag.
///
@@ -6336,7 +6928,7 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation FriendLoc,
/// parameters present at all, require proper matching, i.e.
/// template <> template <class T> friend class A<int>::B;
Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
- MultiTemplateParamsArg TempParams) {
+ MultiTemplateParamsArg TempParams) {
SourceLocation Loc = DS.getSourceRange().getBegin();
assert(DS.isFriendSpecified());
@@ -6351,6 +6943,9 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
if (TheDeclarator.isInvalidType())
return 0;
+ if (DiagnoseUnexpandedParameterPack(Loc, TSI, UPPC_FriendDeclaration))
+ return 0;
+
// This is definitely an error in C++98. It's probably meant to
// be forbidden in C++0x, too, but the specification is just
// poorly written.
@@ -6386,7 +6981,7 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
if (unsigned NumTempParamLists = TempParams.size())
D = FriendTemplateDecl::Create(Context, CurContext, Loc,
NumTempParamLists,
- (TemplateParameterList**) TempParams.release(),
+ TempParams.release(),
TSI,
DS.getFriendSpecLoc());
else
@@ -6401,10 +6996,8 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
return D;
}
-Decl *Sema::ActOnFriendFunctionDecl(Scope *S,
- Declarator &D,
- bool IsDefinition,
- MultiTemplateParamsArg TemplateParams) {
+Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D, bool IsDefinition,
+ MultiTemplateParamsArg TemplateParams) {
const DeclSpec &DS = D.getDeclSpec();
assert(DS.isFriendSpecified());
@@ -6447,26 +7040,94 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S,
// declared as a friend, scopes outside the innermost enclosing
// namespace scope are not considered.
- CXXScopeSpec &ScopeQual = D.getCXXScopeSpec();
+ CXXScopeSpec &SS = D.getCXXScopeSpec();
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
assert(Name);
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(Loc, TInfo, UPPC_FriendDeclaration) ||
+ DiagnoseUnexpandedParameterPack(NameInfo, UPPC_FriendDeclaration) ||
+ DiagnoseUnexpandedParameterPack(SS, UPPC_FriendDeclaration))
+ return 0;
+
// The context we found the declaration in, or in which we should
// create the declaration.
DeclContext *DC;
+ Scope *DCScope = S;
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
+ ForRedeclaration);
- // FIXME: handle local classes
+ // FIXME: there are different rules in local classes
+ // There are four cases here.
+ // - There's no scope specifier, in which case we just go to the
+ // appropriate scope and look for a function or function template
+ // there as appropriate.
// Recover from invalid scope qualifiers as if they just weren't there.
- LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
- ForRedeclaration);
- if (!ScopeQual.isInvalid() && ScopeQual.isSet()) {
- DC = computeDeclContext(ScopeQual);
+ if (SS.isInvalid() || !SS.isSet()) {
+ // C++0x [namespace.memdef]p3:
+ // If the name in a friend declaration is neither qualified nor
+ // a template-id and the declaration is a function or an
+ // elaborated-type-specifier, the lookup to determine whether
+ // the entity has been previously declared shall not consider
+ // any scopes outside the innermost enclosing namespace.
+ // C++0x [class.friend]p11:
+ // If a friend declaration appears in a local class and the name
+ // specified is an unqualified name, a prior declaration is
+ // looked up without considering scopes that are outside the
+ // innermost enclosing non-class scope. For a friend function
+ // declaration, if there is no prior declaration, the program is
+ // ill-formed.
+ bool isLocal = cast<CXXRecordDecl>(CurContext)->isLocalClass();
+ bool isTemplateId = D.getName().getKind() == UnqualifiedId::IK_TemplateId;
+
+ // Find the appropriate context according to the above.
+ DC = CurContext;
+ while (true) {
+ // Skip class contexts. If someone can cite chapter and verse
+ // for this behavior, that would be nice --- it's what GCC and
+ // EDG do, and it seems like a reasonable intent, but the spec
+ // really only says that checks for unqualified existing
+ // declarations should stop at the nearest enclosing namespace,
+ // not that they should only consider the nearest enclosing
+ // namespace.
+ while (DC->isRecord())
+ DC = DC->getParent();
+
+ LookupQualifiedName(Previous, DC);
- // FIXME: handle dependent contexts
+ // TODO: decide what we think about using declarations.
+ if (isLocal || !Previous.empty())
+ break;
+
+ if (isTemplateId) {
+ if (isa<TranslationUnitDecl>(DC)) break;
+ } else {
+ if (DC->isFileContext()) break;
+ }
+ DC = DC->getParent();
+ }
+
+ // C++ [class.friend]p1: A friend of a class is a function or
+ // class that is not a member of the class . . .
+ // C++0x changes this for both friend types and functions.
+ // Most C++ 98 compilers do seem to give an error here, so
+ // we do, too.
+ if (!Previous.empty() && DC->Equals(CurContext)
+ && !getLangOptions().CPlusPlus0x)
+ Diag(DS.getFriendSpecLoc(), diag::err_friend_is_member);
+
+ DCScope = getScopeForDeclContext(S, DC);
+
+ // - There's a non-dependent scope specifier, in which case we
+ // compute it and do a previous lookup there for a function
+ // or function template.
+ } else if (!SS.getScopeRep()->isDependent()) {
+ DC = computeDeclContext(SS);
if (!DC) return 0;
- if (RequireCompleteDeclContext(ScopeQual, DC)) return 0;
+
+ if (RequireCompleteDeclContext(SS, DC)) return 0;
LookupQualifiedName(Previous, DC);
@@ -6493,43 +7154,17 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S,
if (DC->Equals(CurContext))
Diag(DS.getFriendSpecLoc(), diag::err_friend_is_member);
- // Otherwise walk out to the nearest namespace scope looking for matches.
+ // - There's a scope specifier that does not match any template
+ // parameter lists, in which case we use some arbitrary context,
+ // create a method or method template, and wait for instantiation.
+ // - There's a scope specifier that does match some template
+ // parameter lists, which we don't handle right now.
} else {
- // TODO: handle local class contexts.
-
DC = CurContext;
- while (true) {
- // Skip class contexts. If someone can cite chapter and verse
- // for this behavior, that would be nice --- it's what GCC and
- // EDG do, and it seems like a reasonable intent, but the spec
- // really only says that checks for unqualified existing
- // declarations should stop at the nearest enclosing namespace,
- // not that they should only consider the nearest enclosing
- // namespace.
- while (DC->isRecord())
- DC = DC->getParent();
-
- LookupQualifiedName(Previous, DC);
-
- // TODO: decide what we think about using declarations.
- if (!Previous.empty())
- break;
-
- if (DC->isFileContext()) break;
- DC = DC->getParent();
- }
-
- // C++ [class.friend]p1: A friend of a class is a function or
- // class that is not a member of the class . . .
- // C++0x changes this for both friend types and functions.
- // Most C++ 98 compilers do seem to give an error here, so
- // we do, too.
- if (!Previous.empty() && DC->Equals(CurContext)
- && !getLangOptions().CPlusPlus0x)
- Diag(DS.getFriendSpecLoc(), diag::err_friend_is_member);
+ assert(isa<CXXRecordDecl>(DC) && "friend declaration not in class?");
}
- if (DC->isFileContext()) {
+ if (!DC->isRecord()) {
// This implies that it has to be an operator or function.
if (D.getName().getKind() == UnqualifiedId::IK_ConstructorName ||
D.getName().getKind() == UnqualifiedId::IK_DestructorName ||
@@ -6542,7 +7177,7 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S,
}
bool Redeclaration = false;
- NamedDecl *ND = ActOnFunctionDeclarator(S, D, DC, T, TInfo, Previous,
+ NamedDecl *ND = ActOnFunctionDeclarator(DCScope, D, DC, T, TInfo, Previous,
move(TemplateParams),
IsDefinition,
Redeclaration);
@@ -6570,6 +7205,20 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S,
FrD->setAccess(AS_public);
CurContext->addDecl(FrD);
+ if (ND->isInvalidDecl())
+ FrD->setInvalidDecl();
+ else {
+ FunctionDecl *FD;
+ if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND))
+ FD = FTD->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(ND);
+
+ // Mark templated-scope function declarations as unsupported.
+ if (FD->getNumTemplateParameterLists())
+ FrD->setUnsupportedFriend(true);
+ }
+
return ND;
}
@@ -6591,8 +7240,7 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
}
static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
- for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end(); CI != E;
- ++CI) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI) {
Stmt *SubStmt = *CI;
if (!SubStmt)
continue;
@@ -6676,6 +7324,10 @@ bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
diag::err_covariant_return_ambiguous_derived_to_base_conv,
// FIXME: Should this point to the return type?
New->getLocation(), SourceRange(), New->getDeclName(), 0)) {
+ // FIXME: this note won't trigger for delayed access control
+ // diagnostics, and it's impossible to get an undelayed error
+ // here from access control during the original parse because
+ // the ParsingDeclSpec/ParsingDeclarator are still in scope.
Diag(Old->getLocation(), diag::note_overridden_virtual_function);
return true;
}
@@ -6703,19 +7355,6 @@ bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
return false;
}
-bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
- const CXXMethodDecl *Old)
-{
- if (Old->hasAttr<FinalAttr>()) {
- Diag(New->getLocation(), diag::err_final_function_overridden)
- << New->getDeclName();
- Diag(Old->getLocation(), diag::note_overridden_virtual_function);
- return true;
- }
-
- return false;
-}
-
/// \brief Mark the given method pure.
///
/// \param Method the method to be marked pure.
@@ -6724,9 +7363,6 @@ bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
bool Sema::CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange) {
if (Method->isVirtual() || Method->getParent()->isDependentContext()) {
Method->setPure();
-
- // A class is abstract if at least one function is pure virtual.
- Method->getParent()->setAbstract(true);
return false;
}
@@ -6831,21 +7467,9 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
}
bool Sema::DefineUsedVTables() {
- // If any dynamic classes have their key function defined within
- // this translation unit, then those vtables are considered "used" and must
- // be emitted.
- for (unsigned I = 0, N = DynamicClasses.size(); I != N; ++I) {
- if (const CXXMethodDecl *KeyFunction
- = Context.getKeyFunction(DynamicClasses[I])) {
- const FunctionDecl *Definition = 0;
- if (KeyFunction->hasBody(Definition))
- MarkVTableUsed(Definition->getLocation(), DynamicClasses[I], true);
- }
- }
-
if (VTableUses.empty())
return false;
-
+
// Note: The VTableUses vector could grow as a result of marking
// the members of a class as "used", so we check the size each
// time through the loop and prefer indices (with are stable) to
@@ -6954,13 +7578,13 @@ void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
CollectIvarsToConstructOrDestruct(OID, ivars);
if (ivars.empty())
return;
- llvm::SmallVector<CXXBaseOrMemberInitializer*, 32> AllToInit;
+ llvm::SmallVector<CXXCtorInitializer*, 32> AllToInit;
for (unsigned i = 0; i < ivars.size(); i++) {
FieldDecl *Field = ivars[i];
if (Field->isInvalidDecl())
continue;
- CXXBaseOrMemberInitializer *Member;
+ CXXCtorInitializer *Member;
InitializedEntity InitEntity = InitializedEntity::InitializeMember(Field);
InitializationKind InitKind =
InitializationKind::CreateDefault(ObjCImplementation->getLocation());
@@ -6968,18 +7592,17 @@ void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
InitializationSequence InitSeq(*this, InitEntity, InitKind, 0, 0);
ExprResult MemberInit =
InitSeq.Perform(*this, InitEntity, InitKind, MultiExprArg());
- MemberInit = MaybeCreateCXXExprWithTemporaries(MemberInit.get());
+ MemberInit = MaybeCreateExprWithCleanups(MemberInit);
// Note, MemberInit could actually come back empty if no initialization
// is required (e.g., because it would call a trivial default constructor)
if (!MemberInit.get() || MemberInit.isInvalid())
continue;
-
+
Member =
- new (Context) CXXBaseOrMemberInitializer(Context,
- Field, SourceLocation(),
- SourceLocation(),
- MemberInit.takeAs<Expr>(),
- SourceLocation());
+ new (Context) CXXCtorInitializer(Context, Field, SourceLocation(),
+ SourceLocation(),
+ MemberInit.takeAs<Expr>(),
+ SourceLocation());
AllToInit.push_back(Member);
// Be sure that the destructor is accessible and is marked as referenced.
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
index a6902a3..66f6f2b 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
@@ -24,6 +24,19 @@
using namespace clang;
+static void DiagnoseObjCImplementedDeprecations(Sema &S,
+ NamedDecl *ND,
+ SourceLocation ImplLoc,
+ int select) {
+ if (ND && ND->getAttr<DeprecatedAttr>()) {
+ S.Diag(ImplLoc, diag::warn_deprecated_def) << select;
+ if (select == 0)
+ S.Diag(ND->getLocation(), diag::note_method_declared_at);
+ else
+ S.Diag(ND->getLocation(), diag::note_previous_decl) << "class";
+ }
+}
+
/// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible
/// and user declared, in the method definition's AST.
void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
@@ -55,9 +68,23 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
// Introduce all of the other parameters into this scope.
for (ObjCMethodDecl::param_iterator PI = MDecl->param_begin(),
- E = MDecl->param_end(); PI != E; ++PI)
+ E = MDecl->param_end(); PI != E; ++PI) {
+ ParmVarDecl *Param = (*PI);
+ if (!Param->isInvalidDecl() &&
+ RequireCompleteType(Param->getLocation(), Param->getType(),
+ diag::err_typecheck_decl_incomplete_type))
+ Param->setInvalidDecl();
if ((*PI)->getIdentifier())
PushOnScopeChains(*PI, FnBodyScope);
+ }
+ // Warn on implementating deprecated methods under
+ // -Wdeprecated-implementations flag.
+ if (ObjCInterfaceDecl *IC = MDecl->getClassInterface())
+ if (ObjCMethodDecl *IMD =
+ IC->lookupMethod(MDecl->getSelector(), MDecl->isInstanceMethod()))
+ DiagnoseObjCImplementedDeprecations(*this,
+ dyn_cast<NamedDecl>(IMD),
+ MDecl->getLocation(), 0);
}
Decl *Sema::
@@ -531,8 +558,14 @@ Decl *Sema::ActOnStartCategoryImplementation(
<< CatName;
Diag(CatIDecl->getImplementation()->getLocation(),
diag::note_previous_definition);
- } else
+ } else {
CatIDecl->setImplementation(CDecl);
+ // Warn on implementating category of deprecated class under
+ // -Wdeprecated-implementations flag.
+ DiagnoseObjCImplementedDeprecations(*this,
+ dyn_cast<NamedDecl>(IDecl),
+ CDecl->getLocation(), 2);
+ }
}
CheckObjCDeclScope(CDecl);
@@ -641,6 +674,11 @@ Decl *Sema::ActOnStartClassImplementation(
} else { // add it to the list.
IDecl->setImplementation(IMPDecl);
PushOnScopeChains(IMPDecl, TUScope);
+ // Warn on implementating deprecated class under
+ // -Wdeprecated-implementations flag.
+ DiagnoseObjCImplementedDeprecations(*this,
+ dyn_cast<NamedDecl>(IDecl),
+ IMPDecl->getLocation(), 1);
}
return IMPDecl;
}
@@ -739,36 +777,164 @@ void Sema::WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
Diag(ImpLoc, diag::warn_incomplete_impl);
IncompleteImpl = true;
}
- Diag(method->getLocation(), DiagID)
- << method->getDeclName();
+ if (DiagID == diag::warn_unimplemented_protocol_method)
+ Diag(ImpLoc, DiagID) << method->getDeclName();
+ else
+ Diag(method->getLocation(), DiagID) << method->getDeclName();
}
+/// Determines if type B can be substituted for type A. Returns true if we can
+/// guarantee that anything that the user will do to an object of type A can
+/// also be done to an object of type B. This is trivially true if the two
+/// types are the same, or if B is a subclass of A. It becomes more complex
+/// in cases where protocols are involved.
+///
+/// Object types in Objective-C describe the minimum requirements for an
+/// object, rather than providing a complete description of a type. For
+/// example, if A is a subclass of B, then B* may refer to an instance of A.
+/// The principle of substitutability means that we may use an instance of A
+/// anywhere that we may use an instance of B - it will implement all of the
+/// ivars of B and all of the methods of B.
+///
+/// This substitutability is important when type checking methods, because
+/// the implementation may have stricter type definitions than the interface.
+/// The interface specifies minimum requirements, but the implementation may
+/// have more accurate ones. For example, a method may privately accept
+/// instances of B, but only publish that it accepts instances of A. Any
+/// object passed to it will be type checked against B, and so will implicitly
+/// by a valid A*. Similarly, a method may return a subclass of the class that
+/// it is declared as returning.
+///
+/// This is most important when considering subclassing. A method in a
+/// subclass must accept any object as an argument that its superclass's
+/// implementation accepts. It may, however, accept a more general type
+/// without breaking substitutability (i.e. you can still use the subclass
+/// anywhere that you can use the superclass, but not vice versa). The
+/// converse requirement applies to return types: the return type for a
+/// subclass method must be a valid object of the kind that the superclass
+/// advertises, but it may be specified more accurately. This avoids the need
+/// for explicit down-casting by callers.
+///
+/// Note: This is a stricter requirement than for assignment.
+static bool isObjCTypeSubstitutable(ASTContext &Context,
+ const ObjCObjectPointerType *A,
+ const ObjCObjectPointerType *B,
+ bool rejectId) {
+ // Reject a protocol-unqualified id.
+ if (rejectId && B->isObjCIdType()) return false;
+
+ // If B is a qualified id, then A must also be a qualified id and it must
+ // implement all of the protocols in B. It may not be a qualified class.
+ // For example, MyClass<A> can be assigned to id<A>, but MyClass<A> is a
+ // stricter definition so it is not substitutable for id<A>.
+ if (B->isObjCQualifiedIdType()) {
+ return A->isObjCQualifiedIdType() &&
+ Context.ObjCQualifiedIdTypesAreCompatible(QualType(A, 0),
+ QualType(B,0),
+ false);
+ }
+
+ /*
+ // id is a special type that bypasses type checking completely. We want a
+ // warning when it is used in one place but not another.
+ if (C.isObjCIdType(A) || C.isObjCIdType(B)) return false;
+
+
+ // If B is a qualified id, then A must also be a qualified id (which it isn't
+ // if we've got this far)
+ if (B->isObjCQualifiedIdType()) return false;
+ */
+
+ // Now we know that A and B are (potentially-qualified) class types. The
+ // normal rules for assignment apply.
+ return Context.canAssignObjCInterfaces(A, B);
+}
+
+static SourceRange getTypeRange(TypeSourceInfo *TSI) {
+ return (TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange());
+}
+
+static void CheckMethodOverrideReturn(Sema &S,
+ ObjCMethodDecl *MethodImpl,
+ ObjCMethodDecl *MethodIface) {
+ if (S.Context.hasSameUnqualifiedType(MethodImpl->getResultType(),
+ MethodIface->getResultType()))
+ return;
+
+ unsigned DiagID = diag::warn_conflicting_ret_types;
+
+ // Mismatches between ObjC pointers go into a different warning
+ // category, and sometimes they're even completely whitelisted.
+ if (const ObjCObjectPointerType *ImplPtrTy =
+ MethodImpl->getResultType()->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *IfacePtrTy =
+ MethodIface->getResultType()->getAs<ObjCObjectPointerType>()) {
+ // Allow non-matching return types as long as they don't violate
+ // the principle of substitutability. Specifically, we permit
+ // return types that are subclasses of the declared return type,
+ // or that are more-qualified versions of the declared type.
+ if (isObjCTypeSubstitutable(S.Context, IfacePtrTy, ImplPtrTy, false))
+ return;
+
+ DiagID = diag::warn_non_covariant_ret_types;
+ }
+ }
+
+ S.Diag(MethodImpl->getLocation(), DiagID)
+ << MethodImpl->getDeclName()
+ << MethodIface->getResultType()
+ << MethodImpl->getResultType()
+ << getTypeRange(MethodImpl->getResultTypeSourceInfo());
+ S.Diag(MethodIface->getLocation(), diag::note_previous_definition)
+ << getTypeRange(MethodIface->getResultTypeSourceInfo());
+}
+
+static void CheckMethodOverrideParam(Sema &S,
+ ObjCMethodDecl *MethodImpl,
+ ObjCMethodDecl *MethodIface,
+ ParmVarDecl *ImplVar,
+ ParmVarDecl *IfaceVar) {
+ QualType ImplTy = ImplVar->getType();
+ QualType IfaceTy = IfaceVar->getType();
+ if (S.Context.hasSameUnqualifiedType(ImplTy, IfaceTy))
+ return;
+
+ unsigned DiagID = diag::warn_conflicting_param_types;
+
+ // Mismatches between ObjC pointers go into a different warning
+ // category, and sometimes they're even completely whitelisted.
+ if (const ObjCObjectPointerType *ImplPtrTy =
+ ImplTy->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *IfacePtrTy =
+ IfaceTy->getAs<ObjCObjectPointerType>()) {
+ // Allow non-matching argument types as long as they don't
+ // violate the principle of substitutability. Specifically, the
+ // implementation must accept any objects that the superclass
+ // accepts, however it may also accept others.
+ if (isObjCTypeSubstitutable(S.Context, ImplPtrTy, IfacePtrTy, true))
+ return;
+
+ DiagID = diag::warn_non_contravariant_param_types;
+ }
+ }
+
+ S.Diag(ImplVar->getLocation(), DiagID)
+ << getTypeRange(ImplVar->getTypeSourceInfo())
+ << MethodImpl->getDeclName() << IfaceTy << ImplTy;
+ S.Diag(IfaceVar->getLocation(), diag::note_previous_definition)
+ << getTypeRange(IfaceVar->getTypeSourceInfo());
+}
+
+
void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
ObjCMethodDecl *IntfMethodDecl) {
- if (!Context.typesAreCompatible(IntfMethodDecl->getResultType(),
- ImpMethodDecl->getResultType()) &&
- !Context.QualifiedIdConformsQualifiedId(IntfMethodDecl->getResultType(),
- ImpMethodDecl->getResultType())) {
- Diag(ImpMethodDecl->getLocation(), diag::warn_conflicting_ret_types)
- << ImpMethodDecl->getDeclName() << IntfMethodDecl->getResultType()
- << ImpMethodDecl->getResultType();
- Diag(IntfMethodDecl->getLocation(), diag::note_previous_definition);
- }
+ CheckMethodOverrideReturn(*this, ImpMethodDecl, IntfMethodDecl);
for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
IF = IntfMethodDecl->param_begin(), EM = ImpMethodDecl->param_end();
- IM != EM; ++IM, ++IF) {
- QualType ParmDeclTy = (*IF)->getType().getUnqualifiedType();
- QualType ParmImpTy = (*IM)->getType().getUnqualifiedType();
- if (Context.typesAreCompatible(ParmDeclTy, ParmImpTy) ||
- Context.QualifiedIdConformsQualifiedId(ParmDeclTy, ParmImpTy))
- continue;
+ IM != EM; ++IM, ++IF)
+ CheckMethodOverrideParam(*this, ImpMethodDecl, IntfMethodDecl, *IM, *IF);
- Diag((*IM)->getLocation(), diag::warn_conflicting_param_types)
- << ImpMethodDecl->getDeclName() << (*IF)->getType()
- << (*IM)->getType();
- Diag((*IF)->getLocation(), diag::note_previous_definition);
- }
if (ImpMethodDecl->isVariadic() != IntfMethodDecl->isVariadic()) {
Diag(ImpMethodDecl->getLocation(), diag::warn_conflicting_variadic);
Diag(IntfMethodDecl->getLocation(), diag::note_previous_declaration);
@@ -835,8 +1001,10 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
IDecl->lookupInstanceMethod(method->getSelector());
if (!MethodInClass || !MethodInClass->isSynthesized()) {
unsigned DIAG = diag::warn_unimplemented_protocol_method;
- if (Diags.getDiagnosticLevel(DIAG) != Diagnostic::Ignored) {
+ if (Diags.getDiagnosticLevel(DIAG, ImpLoc)
+ != Diagnostic::Ignored) {
WarnUndefinedMethod(ImpLoc, method, IncompleteImpl, DIAG);
+ Diag(method->getLocation(), diag::note_method_declared_at);
Diag(CDecl->getLocation(), diag::note_required_for_protocol_at)
<< PDecl->getDeclName();
}
@@ -852,8 +1020,9 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
!ClsMap.count(method->getSelector()) &&
(!Super || !Super->lookupClassMethod(method->getSelector()))) {
unsigned DIAG = diag::warn_unimplemented_protocol_method;
- if (Diags.getDiagnosticLevel(DIAG) != Diagnostic::Ignored) {
+ if (Diags.getDiagnosticLevel(DIAG, ImpLoc) != Diagnostic::Ignored) {
WarnUndefinedMethod(ImpLoc, method, IncompleteImpl, DIAG);
+ Diag(method->getLocation(), diag::note_method_declared_at);
Diag(IDecl->getLocation(), diag::note_required_for_protocol_at) <<
PDecl->getDeclName();
}
@@ -921,7 +1090,16 @@ void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
WarnConflictingTypedMethods(ImpMethodDecl, IntfMethodDecl);
}
}
+
if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
+ // Also methods in class extensions need be looked at next.
+ for (const ObjCCategoryDecl *ClsExtDecl = I->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl,
+ const_cast<ObjCCategoryDecl *>(ClsExtDecl),
+ IncompleteImpl, false);
+
// Check for any implementation of a methods declared in protocol.
for (ObjCInterfaceDecl::all_protocol_iterator
PI = I->all_referenced_protocol_begin(),
@@ -949,7 +1127,8 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
// Check and see if properties declared in the interface have either 1)
// an implementation or 2) there is a @synthesize/@dynamic implementation
// of the property in the @implementation.
- if (isa<ObjCInterfaceDecl>(CDecl) && !LangOpts.ObjCNonFragileABI2)
+ if (isa<ObjCInterfaceDecl>(CDecl) &&
+ !(LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2))
DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, InsMap);
llvm::DenseSet<Selector> ClsMap;
@@ -1156,7 +1335,21 @@ void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
// signature.
for (ObjCMethodList *List = &Entry; List; List = List->Next)
if (MatchTwoMethodDeclarations(Method, List->Method)) {
- List->Method->setDefined(impl);
+ ObjCMethodDecl *PrevObjCMethod = List->Method;
+ PrevObjCMethod->setDefined(impl);
+ // If a method is deprecated, push it in the global pool.
+ // This is used for better diagnostics.
+ if (Method->getAttr<DeprecatedAttr>()) {
+ if (!PrevObjCMethod->getAttr<DeprecatedAttr>())
+ List->Method = Method;
+ }
+ // If new method is unavailable, push it into global pool
+ // unless previous one is deprecated.
+ if (Method->getAttr<UnavailableAttr>()) {
+ if (!PrevObjCMethod->getAttr<UnavailableAttr>() &&
+ !PrevObjCMethod->getAttr<DeprecatedAttr>())
+ List->Method = Method;
+ }
return;
}
@@ -1180,7 +1373,8 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
bool strictSelectorMatch = receiverIdOrClass && warn &&
- (Diags.getDiagnosticLevel(diag::warn_strict_multiple_method_decl) !=
+ (Diags.getDiagnosticLevel(diag::warn_strict_multiple_method_decl,
+ R.getBegin()) !=
Diagnostic::Ignored);
if (warn && MethList.Method && MethList.Next) {
bool issueWarning = false;
@@ -1314,8 +1508,6 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
Diag(L, diag::warn_missing_atend);
}
- DeclContext *DC = dyn_cast<DeclContext>(ClassDecl);
-
// FIXME: Remove these and use the ObjCContainerDecl/DeclContext.
llvm::DenseMap<Selector, const ObjCMethodDecl*> InsMap;
llvm::DenseMap<Selector, const ObjCMethodDecl*> ClsMap;
@@ -1335,8 +1527,8 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
Diag(Method->getLocation(), diag::err_duplicate_method_decl)
<< Method->getDeclName();
Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ Method->setInvalidDecl();
} else {
- DC->addDecl(Method);
InsMap[Method->getSelector()] = Method;
/// The following allows us to typecheck messages to "id".
AddInstanceMethodToGlobalPool(Method);
@@ -1354,8 +1546,8 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
Diag(Method->getLocation(), diag::err_duplicate_method_decl)
<< Method->getDeclName();
Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ Method->setInvalidDecl();
} else {
- DC->addDecl(Method);
ClsMap[Method->getSelector()] = Method;
/// The following allows us to typecheck messages to "Class".
AddFactoryMethodToGlobalPool(Method);
@@ -1377,8 +1569,10 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
// Compare protocol properties with those in category
CompareProperties(C, C);
- if (C->IsClassExtension())
- DiagnoseClassExtensionDupMethods(C, C->getClassInterface());
+ if (C->IsClassExtension()) {
+ ObjCInterfaceDecl *CCPrimary = C->getClassInterface();
+ DiagnoseClassExtensionDupMethods(C, CCPrimary);
+ }
}
if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(ClassDecl)) {
if (CDecl->getIdentifier())
@@ -1394,7 +1588,40 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
if (ObjCImplementationDecl *IC=dyn_cast<ObjCImplementationDecl>(ClassDecl)) {
IC->setAtEndRange(AtEnd);
if (ObjCInterfaceDecl* IDecl = IC->getClassInterface()) {
- if (LangOpts.ObjCNonFragileABI2)
+ // Any property declared in a class extension might have user
+ // declared setter or getter in current class extension or one
+ // of the other class extensions. Mark them as synthesized as
+ // property will be synthesized when property with same name is
+ // seen in the @implementation.
+ for (const ObjCCategoryDecl *ClsExtDecl =
+ IDecl->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) {
+ for (ObjCContainerDecl::prop_iterator I = ClsExtDecl->prop_begin(),
+ E = ClsExtDecl->prop_end(); I != E; ++I) {
+ ObjCPropertyDecl *Property = (*I);
+ // Skip over properties declared @dynamic
+ if (const ObjCPropertyImplDecl *PIDecl
+ = IC->FindPropertyImplDecl(Property->getIdentifier()))
+ if (PIDecl->getPropertyImplementation()
+ == ObjCPropertyImplDecl::Dynamic)
+ continue;
+
+ for (const ObjCCategoryDecl *CExtDecl =
+ IDecl->getFirstClassExtension();
+ CExtDecl; CExtDecl = CExtDecl->getNextClassExtension()) {
+ if (ObjCMethodDecl *GetterMethod =
+ CExtDecl->getInstanceMethod(Property->getGetterName()))
+ GetterMethod->setSynthesized(true);
+ if (!Property->isReadOnly())
+ if (ObjCMethodDecl *SetterMethod =
+ CExtDecl->getInstanceMethod(Property->getSetterName()))
+ SetterMethod->setSynthesized(true);
+ }
+ }
+ }
+
+ if (LangOpts.ObjCDefaultSynthProperties &&
+ LangOpts.ObjCNonFragileABI2)
DefaultSynthesizeProperties(S, IC, IDecl);
ImplMethodsVsClassMethods(S, IC, IDecl);
AtomicPropertySetterGetterRules(IC, IDecl);
@@ -1469,6 +1696,7 @@ bool containsInvalidMethodImplAttribute(const AttrVec &A) {
}
Decl *Sema::ActOnMethodDeclaration(
+ Scope *S,
SourceLocation MethodLoc, SourceLocation EndLoc,
tok::TokenKind MethodType, Decl *ClassDecl,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
@@ -1482,7 +1710,6 @@ Decl *Sema::ActOnMethodDeclaration(
// Make sure we can establish a context for the method.
if (!ClassDecl) {
Diag(MethodLoc, diag::error_missing_method_context);
- getCurFunction()->LabelMap.clear();
return 0;
}
QualType resultDeclType;
@@ -1526,6 +1753,20 @@ Decl *Sema::ActOnMethodDeclaration(
ArgType = adjustParameterType(ArgType);
}
+ LookupResult R(*this, ArgInfo[i].Name, ArgInfo[i].NameLoc,
+ LookupOrdinaryName, ForRedeclaration);
+ LookupName(R, S);
+ if (R.isSingleResult()) {
+ NamedDecl *PrevDecl = R.getFoundDecl();
+ if (S->isDeclScope(PrevDecl)) {
+ // FIXME. This should be an error; but will break projects.
+ Diag(ArgInfo[i].NameLoc, diag::warn_method_param_redefinition)
+ << ArgInfo[i].Name;
+ Diag(PrevDecl->getLocation(),
+ diag::note_previous_declaration);
+ }
+ }
+
ParmVarDecl* Param
= ParmVarDecl::Create(Context, ObjCMethod, ArgInfo[i].NameLoc,
ArgInfo[i].Name, ArgType, DI,
@@ -1544,9 +1785,12 @@ Decl *Sema::ActOnMethodDeclaration(
// Apply the attributes to the parameter.
ProcessDeclAttributeList(TUScope, Param, ArgInfo[i].ArgAttrs);
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+
Params.push_back(Param);
}
-
+
for (unsigned i = 0, e = CNumArgs; i != e; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(CParamInfo[i].Param);
QualType ArgType = Param->getType();
@@ -1562,8 +1806,7 @@ Decl *Sema::ActOnMethodDeclaration(
Param->setInvalidDecl();
}
Param->setDeclContext(ObjCMethod);
- if (Param->getDeclName())
- IdResolver.RemoveDecl(Param);
+
Params.push_back(Param);
}
@@ -1578,10 +1821,7 @@ Decl *Sema::ActOnMethodDeclaration(
const ObjCMethodDecl *InterfaceMD = 0;
- // For implementations (which can be very "coarse grain"), we add the
- // method now. This allows the AST to implement lookup methods that work
- // incrementally (without waiting until we parse the @end). It also allows
- // us to flag multiple declaration errors as they occur.
+ // Add the method now.
if (ObjCImplementationDecl *ImpDecl =
dyn_cast<ObjCImplementationDecl>(ClassDecl)) {
if (MethodType == tok::minus) {
@@ -1608,6 +1848,8 @@ Decl *Sema::ActOnMethodDeclaration(
if (ObjCMethod->hasAttrs() &&
containsInvalidMethodImplAttribute(ObjCMethod->getAttrs()))
Diag(EndLoc, diag::warn_attribute_method_def);
+ } else {
+ cast<DeclContext>(ClassDecl)->addDecl(ObjCMethod);
}
if (PrevMethod) {
// You can never have two method definitions with the same name.
@@ -1619,9 +1861,13 @@ Decl *Sema::ActOnMethodDeclaration(
// If the interface declared this method, and it was deprecated there,
// mark it deprecated here.
if (InterfaceMD)
- if (Attr *DA = InterfaceMD->getAttr<DeprecatedAttr>())
- ObjCMethod->addAttr(::new (Context) DeprecatedAttr(DA->getLocation(),
- Context));
+ if (Attr *DA = InterfaceMD->getAttr<DeprecatedAttr>()) {
+ StringLiteral *SE = StringLiteral::CreateEmpty(Context, 1);
+ ObjCMethod->addAttr(::new (Context)
+ DeprecatedAttr(DA->getLocation(),
+ Context,
+ SE->getString()));
+ }
return ObjCMethod;
}
@@ -1783,20 +2029,24 @@ void Sema::CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
}
void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
- CXXBaseOrMemberInitializer ** initializers,
+ CXXCtorInitializer ** initializers,
unsigned numInitializers) {
if (numInitializers > 0) {
NumIvarInitializers = numInitializers;
- CXXBaseOrMemberInitializer **ivarInitializers =
- new (C) CXXBaseOrMemberInitializer*[NumIvarInitializers];
+ CXXCtorInitializer **ivarInitializers =
+ new (C) CXXCtorInitializer*[NumIvarInitializers];
memcpy(ivarInitializers, initializers,
- numInitializers * sizeof(CXXBaseOrMemberInitializer*));
+ numInitializers * sizeof(CXXCtorInitializer*));
IvarInitializers = ivarInitializers;
}
}
void Sema::DiagnoseUseOfUnimplementedSelectors() {
- if (ReferencedSelectors.empty())
+ // Warning will be issued only when selector table is
+ // generated (which means there is at lease one implementation
+ // in the TU). This is to match gcc's behavior.
+ if (ReferencedSelectors.empty() ||
+ !Context.AnyObjCImplementation())
return;
for (llvm::DenseMap<Selector, SourceLocation>::iterator S =
ReferencedSelectors.begin(),
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
index c902e77..5d7993b 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -113,6 +113,9 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
if (!MissingExceptionSpecification && !MissingEmptyExceptionSpecification)
return true;
+ const FunctionProtoType *NewProto
+ = New->getType()->getAs<FunctionProtoType>();
+
// The new function declaration is only missing an empty exception
// specification "throw()". If the throw() specification came from a
// function in a system header that has C linkage, just add an empty
@@ -121,42 +124,38 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
// to many libc functions as an optimization. Unfortunately, that
// optimization isn't permitted by the C++ standard, so we're forced
// to work around it here.
- if (MissingEmptyExceptionSpecification &&
- isa<FunctionProtoType>(New->getType()) &&
+ if (MissingEmptyExceptionSpecification && NewProto &&
(Old->getLocation().isInvalid() ||
Context.getSourceManager().isInSystemHeader(Old->getLocation())) &&
Old->isExternC()) {
- const FunctionProtoType *NewProto
- = cast<FunctionProtoType>(New->getType());
+ FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo();
+ EPI.HasExceptionSpec = true;
+ EPI.HasAnyExceptionSpec = false;
+ EPI.NumExceptions = 0;
QualType NewType = Context.getFunctionType(NewProto->getResultType(),
NewProto->arg_type_begin(),
NewProto->getNumArgs(),
- NewProto->isVariadic(),
- NewProto->getTypeQuals(),
- true, false, 0, 0,
- NewProto->getExtInfo());
+ EPI);
New->setType(NewType);
return false;
}
- if (MissingExceptionSpecification && isa<FunctionProtoType>(New->getType())) {
- const FunctionProtoType *NewProto
- = cast<FunctionProtoType>(New->getType());
+ if (MissingExceptionSpecification && NewProto) {
const FunctionProtoType *OldProto
= Old->getType()->getAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo();
+ EPI.HasExceptionSpec = OldProto->hasExceptionSpec();
+ EPI.HasAnyExceptionSpec = OldProto->hasAnyExceptionSpec();
+ EPI.NumExceptions = OldProto->getNumExceptions();
+ EPI.Exceptions = OldProto->exception_begin();
+
// Update the type of the function with the appropriate exception
// specification.
QualType NewType = Context.getFunctionType(NewProto->getResultType(),
NewProto->arg_type_begin(),
NewProto->getNumArgs(),
- NewProto->isVariadic(),
- NewProto->getTypeQuals(),
- OldProto->hasExceptionSpec(),
- OldProto->hasAnyExceptionSpec(),
- OldProto->getNumExceptions(),
- OldProto->exception_begin(),
- NewProto->getExtInfo());
+ EPI);
New->setType(NewType);
// If exceptions are disabled, suppress the warning about missing
@@ -197,7 +196,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
SourceLocation AfterParenLoc;
if (TypeSourceInfo *TSInfo = New->getTypeSourceInfo()) {
- TypeLoc TL = TSInfo->getTypeLoc();
+ TypeLoc TL = TSInfo->getTypeLoc().IgnoreParens();
if (const FunctionTypeLoc *FTLoc = dyn_cast<FunctionTypeLoc>(&TL))
AfterParenLoc = PP.getLocForEndOfToken(FTLoc->getRParenLoc());
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
index 80b4652..65b57c3 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -48,21 +49,57 @@ using namespace sema;
/// used, or produce an error (and return true) if a C++0x deleted
/// function is being used.
///
-/// If IgnoreDeprecated is set to true, this should not want about deprecated
+/// If IgnoreDeprecated is set to true, this should not warn about deprecated
/// decls.
///
/// \returns true if there was an error (this declaration cannot be
/// referenced), false otherwise.
///
-bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc) {
- // See if the decl is deprecated.
- if (D->getAttr<DeprecatedAttr>()) {
- EmitDeprecationWarning(D, Loc);
+bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
+ bool UnknownObjCClass) {
+ if (getLangOptions().CPlusPlus && isa<FunctionDecl>(D)) {
+ // If there were any diagnostics suppressed by template argument deduction,
+ // emit them now.
+ llvm::DenseMap<Decl *, llvm::SmallVector<PartialDiagnosticAt, 1> >::iterator
+ Pos = SuppressedDiagnostics.find(D->getCanonicalDecl());
+ if (Pos != SuppressedDiagnostics.end()) {
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Suppressed = Pos->second;
+ for (unsigned I = 0, N = Suppressed.size(); I != N; ++I)
+ Diag(Suppressed[I].first, Suppressed[I].second);
+
+ // Clear out the list of suppressed diagnostics, so that we don't emit
+ // them again for this specialization. However, we don't remove this
+ // entry from the table, because we want to avoid ever emitting these
+ // diagnostics again.
+ Suppressed.clear();
+ }
+ }
+
+ // See if this is an auto-typed variable whose initializer we are parsing.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->isParsingAutoInit()) {
+ Diag(Loc, diag::err_auto_variable_cannot_appear_in_own_initializer)
+ << D->getDeclName();
+ return true;
+ }
}
+ // See if the decl is deprecated.
+ if (const DeprecatedAttr *DA = D->getAttr<DeprecatedAttr>())
+ EmitDeprecationWarning(D, DA->getMessage(), Loc, UnknownObjCClass);
+
// See if the decl is unavailable
- if (D->getAttr<UnavailableAttr>()) {
- Diag(Loc, diag::err_unavailable) << D->getDeclName();
+ if (const UnavailableAttr *UA = D->getAttr<UnavailableAttr>()) {
+ if (UA->getMessage().empty()) {
+ if (!UnknownObjCClass)
+ Diag(Loc, diag::err_unavailable) << D->getDeclName();
+ else
+ Diag(Loc, diag::warn_unavailable_fwdclass_message)
+ << D->getDeclName();
+ }
+ else
+ Diag(Loc, diag::err_unavailable_message)
+ << D->getDeclName() << UA->getMessage();
Diag(D->getLocation(), diag::note_unavailable_here) << 0;
}
@@ -75,6 +112,10 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc) {
}
}
+ // Warn if this is used but marked unused.
+ if (D->hasAttr<UnusedAttr>())
+ Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName();
+
return false;
}
@@ -167,6 +208,10 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
if (!sentinelExpr) return;
if (sentinelExpr->isTypeDependent()) return;
if (sentinelExpr->isValueDependent()) return;
+
+ // nullptr_t is always treated as null.
+ if (sentinelExpr->getType()->isNullPtrType()) return;
+
if (sentinelExpr->getType()->isAnyPointerType() &&
sentinelExpr->IgnoreParenCasts()->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull))
@@ -208,32 +253,66 @@ void Sema::DefaultFunctionArrayConversion(Expr *&E) {
// An lvalue or rvalue of type "array of N T" or "array of unknown bound of
// T" can be converted to an rvalue of type "pointer to T".
//
- if (getLangOptions().C99 || getLangOptions().CPlusPlus ||
- E->isLvalue(Context) == Expr::LV_Valid)
+ if (getLangOptions().C99 || getLangOptions().CPlusPlus || E->isLValue())
ImpCastExprToType(E, Context.getArrayDecayedType(Ty),
CK_ArrayToPointerDecay);
}
}
-void Sema::DefaultFunctionArrayLvalueConversion(Expr *&E) {
- DefaultFunctionArrayConversion(E);
+void Sema::DefaultLvalueConversion(Expr *&E) {
+ // C++ [conv.lval]p1:
+ // A glvalue of a non-function, non-array type T can be
+ // converted to a prvalue.
+ if (!E->isGLValue()) return;
- QualType Ty = E->getType();
- assert(!Ty.isNull() && "DefaultFunctionArrayLvalueConversion - missing type");
- if (!Ty->isDependentType() && Ty.hasQualifiers() &&
- (!getLangOptions().CPlusPlus || !Ty->isRecordType()) &&
- E->isLvalue(Context) == Expr::LV_Valid) {
- // C++ [conv.lval]p1:
- // [...] If T is a non-class type, the type of the rvalue is the
- // cv-unqualified version of T. Otherwise, the type of the
- // rvalue is T
- //
- // C99 6.3.2.1p2:
- // If the lvalue has qualified type, the value has the unqualified
- // version of the type of the lvalue; otherwise, the value has the
- // type of the lvalue.
- ImpCastExprToType(E, Ty.getUnqualifiedType(), CK_NoOp);
+ QualType T = E->getType();
+ assert(!T.isNull() && "r-value conversion on typeless expression?");
+
+ // Create a load out of an ObjCProperty l-value, if necessary.
+ if (E->getObjectKind() == OK_ObjCProperty) {
+ ConvertPropertyForRValue(E);
+ if (!E->isGLValue())
+ return;
}
+
+ // We don't want to throw lvalue-to-rvalue casts on top of
+ // expressions of certain types in C++.
+ if (getLangOptions().CPlusPlus &&
+ (E->getType() == Context.OverloadTy ||
+ T->isDependentType() ||
+ T->isRecordType()))
+ return;
+
+ // The C standard is actually really unclear on this point, and
+ // DR106 tells us what the result should be but not why. It's
+ // generally best to say that void types just doesn't undergo
+ // lvalue-to-rvalue at all. Note that expressions of unqualified
+ // 'void' type are never l-values, but qualified void can be.
+ if (T->isVoidType())
+ return;
+
+ // C++ [conv.lval]p1:
+ // [...] If T is a non-class type, the type of the prvalue is the
+ // cv-unqualified version of T. Otherwise, the type of the
+ // rvalue is T.
+ //
+ // C99 6.3.2.1p2:
+ // If the lvalue has qualified type, the value has the unqualified
+ // version of the type of the lvalue; otherwise, the value has the
+ // type of the lvalue.
+ if (T.hasQualifiers())
+ T = T.getUnqualifiedType();
+
+ if (const ArraySubscriptExpr *ae = dyn_cast<ArraySubscriptExpr>(E))
+ CheckArrayAccess(ae);
+
+ E = ImplicitCastExpr::Create(Context, T, CK_LValueToRValue,
+ E, 0, VK_RValue);
+}
+
+void Sema::DefaultFunctionArrayLvalueConversion(Expr *&E) {
+ DefaultFunctionArrayConversion(E);
+ DefaultLvalueConversion(E);
}
@@ -242,36 +321,43 @@ void Sema::DefaultFunctionArrayLvalueConversion(Expr *&E) {
/// sometimes surpressed. For example, the array->pointer conversion doesn't
/// apply if the array is an argument to the sizeof or address (&) operators.
/// In these instances, this routine should *not* be called.
-Expr *Sema::UsualUnaryConversions(Expr *&Expr) {
- QualType Ty = Expr->getType();
+Expr *Sema::UsualUnaryConversions(Expr *&E) {
+ // First, convert to an r-value.
+ DefaultFunctionArrayLvalueConversion(E);
+
+ QualType Ty = E->getType();
assert(!Ty.isNull() && "UsualUnaryConversions - missing type");
-
- // C99 6.3.1.1p2:
- //
- // The following may be used in an expression wherever an int or
- // unsigned int may be used:
- // - an object or expression with an integer type whose integer
- // conversion rank is less than or equal to the rank of int
- // and unsigned int.
- // - A bit-field of type _Bool, int, signed int, or unsigned int.
- //
- // If an int can represent all values of the original type, the
- // value is converted to an int; otherwise, it is converted to an
- // unsigned int. These are called the integer promotions. All
- // other types are unchanged by the integer promotions.
- QualType PTy = Context.isPromotableBitField(Expr);
- if (!PTy.isNull()) {
- ImpCastExprToType(Expr, PTy, CK_IntegralCast);
- return Expr;
- }
- if (Ty->isPromotableIntegerType()) {
- QualType PT = Context.getPromotedIntegerType(Ty);
- ImpCastExprToType(Expr, PT, CK_IntegralCast);
- return Expr;
+
+ // Try to perform integral promotions if the object has a theoretically
+ // promotable type.
+ if (Ty->isIntegralOrUnscopedEnumerationType()) {
+ // C99 6.3.1.1p2:
+ //
+ // The following may be used in an expression wherever an int or
+ // unsigned int may be used:
+ // - an object or expression with an integer type whose integer
+ // conversion rank is less than or equal to the rank of int
+ // and unsigned int.
+ // - A bit-field of type _Bool, int, signed int, or unsigned int.
+ //
+ // If an int can represent all values of the original type, the
+ // value is converted to an int; otherwise, it is converted to an
+ // unsigned int. These are called the integer promotions. All
+ // other types are unchanged by the integer promotions.
+
+ QualType PTy = Context.isPromotableBitField(E);
+ if (!PTy.isNull()) {
+ ImpCastExprToType(E, PTy, CK_IntegralCast);
+ return E;
+ }
+ if (Ty->isPromotableIntegerType()) {
+ QualType PT = Context.getPromotedIntegerType(Ty);
+ ImpCastExprToType(E, PT, CK_IntegralCast);
+ return E;
+ }
}
- DefaultFunctionArrayLvalueConversion(Expr);
- return Expr;
+ return E;
}
/// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
@@ -281,12 +367,11 @@ void Sema::DefaultArgumentPromotion(Expr *&Expr) {
QualType Ty = Expr->getType();
assert(!Ty.isNull() && "DefaultArgumentPromotion - missing type");
+ UsualUnaryConversions(Expr);
+
// If this is a 'float' (CVR qualified or typedef) promote to double.
if (Ty->isSpecificBuiltinType(BuiltinType::Float))
- return ImpCastExprToType(Expr, Context.DoubleTy,
- CK_FloatingCast);
-
- UsualUnaryConversions(Expr);
+ return ImpCastExprToType(Expr, Context.DoubleTy, CK_FloatingCast);
}
/// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
@@ -318,7 +403,6 @@ bool Sema::DefaultVariadicArgumentPromotion(Expr *&Expr, VariadicCallType CT,
return false;
}
-
/// UsualArithmeticConversions - Performs various conversions that are common to
/// binary operators (C99 6.3.1.8). If both operands aren't arithmetic, this
/// routine returns the first non-arithmetic type found. The client is
@@ -348,19 +432,271 @@ QualType Sema::UsualArithmeticConversions(Expr *&lhsExpr, Expr *&rhsExpr,
if (!lhs->isArithmeticType() || !rhs->isArithmeticType())
return lhs;
- // Perform bitfield promotions.
+ // Apply unary and bitfield promotions to the LHS's type.
+ QualType lhs_unpromoted = lhs;
+ if (lhs->isPromotableIntegerType())
+ lhs = Context.getPromotedIntegerType(lhs);
QualType LHSBitfieldPromoteTy = Context.isPromotableBitField(lhsExpr);
if (!LHSBitfieldPromoteTy.isNull())
lhs = LHSBitfieldPromoteTy;
- QualType RHSBitfieldPromoteTy = Context.isPromotableBitField(rhsExpr);
- if (!RHSBitfieldPromoteTy.isNull())
- rhs = RHSBitfieldPromoteTy;
+ if (lhs != lhs_unpromoted && !isCompAssign)
+ ImpCastExprToType(lhsExpr, lhs, CK_IntegralCast);
- QualType destType = Context.UsualArithmeticConversionsType(lhs, rhs);
- if (!isCompAssign)
- ImpCastExprToType(lhsExpr, destType, CK_Unknown);
- ImpCastExprToType(rhsExpr, destType, CK_Unknown);
- return destType;
+ // If both types are identical, no conversion is needed.
+ if (lhs == rhs)
+ return lhs;
+
+ // At this point, we have two different arithmetic types.
+
+ // Handle complex types first (C99 6.3.1.8p1).
+ bool LHSComplexFloat = lhs->isComplexType();
+ bool RHSComplexFloat = rhs->isComplexType();
+ if (LHSComplexFloat || RHSComplexFloat) {
+ // if we have an integer operand, the result is the complex type.
+
+ if (!RHSComplexFloat && !rhs->isRealFloatingType()) {
+ if (rhs->isIntegerType()) {
+ QualType fp = cast<ComplexType>(lhs)->getElementType();
+ ImpCastExprToType(rhsExpr, fp, CK_IntegralToFloating);
+ ImpCastExprToType(rhsExpr, lhs, CK_FloatingRealToComplex);
+ } else {
+ assert(rhs->isComplexIntegerType());
+ ImpCastExprToType(rhsExpr, lhs, CK_IntegralComplexToFloatingComplex);
+ }
+ return lhs;
+ }
+
+ if (!LHSComplexFloat && !lhs->isRealFloatingType()) {
+ if (!isCompAssign) {
+ // int -> float -> _Complex float
+ if (lhs->isIntegerType()) {
+ QualType fp = cast<ComplexType>(rhs)->getElementType();
+ ImpCastExprToType(lhsExpr, fp, CK_IntegralToFloating);
+ ImpCastExprToType(lhsExpr, rhs, CK_FloatingRealToComplex);
+ } else {
+ assert(lhs->isComplexIntegerType());
+ ImpCastExprToType(lhsExpr, rhs, CK_IntegralComplexToFloatingComplex);
+ }
+ }
+ return rhs;
+ }
+
+ // This handles complex/complex, complex/float, or float/complex.
+ // When both operands are complex, the shorter operand is converted to the
+ // type of the longer, and that is the type of the result. This corresponds
+ // to what is done when combining two real floating-point operands.
+ // The fun begins when size promotion occur across type domains.
+ // From H&S 6.3.4: When one operand is complex and the other is a real
+ // floating-point type, the less precise type is converted, within it's
+ // real or complex domain, to the precision of the other type. For example,
+ // when combining a "long double" with a "double _Complex", the
+ // "double _Complex" is promoted to "long double _Complex".
+ int order = Context.getFloatingTypeOrder(lhs, rhs);
+
+ // If both are complex, just cast to the more precise type.
+ if (LHSComplexFloat && RHSComplexFloat) {
+ if (order > 0) {
+ // _Complex float -> _Complex double
+ ImpCastExprToType(rhsExpr, lhs, CK_FloatingComplexCast);
+ return lhs;
+
+ } else if (order < 0) {
+ // _Complex float -> _Complex double
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, rhs, CK_FloatingComplexCast);
+ return rhs;
+ }
+ return lhs;
+ }
+
+ // If just the LHS is complex, the RHS needs to be converted,
+ // and the LHS might need to be promoted.
+ if (LHSComplexFloat) {
+ if (order > 0) { // LHS is wider
+ // float -> _Complex double
+ QualType fp = cast<ComplexType>(lhs)->getElementType();
+ ImpCastExprToType(rhsExpr, fp, CK_FloatingCast);
+ ImpCastExprToType(rhsExpr, lhs, CK_FloatingRealToComplex);
+ return lhs;
+ }
+
+ // RHS is at least as wide. Find its corresponding complex type.
+ QualType result = (order == 0 ? lhs : Context.getComplexType(rhs));
+
+ // double -> _Complex double
+ ImpCastExprToType(rhsExpr, result, CK_FloatingRealToComplex);
+
+ // _Complex float -> _Complex double
+ if (!isCompAssign && order < 0)
+ ImpCastExprToType(lhsExpr, result, CK_FloatingComplexCast);
+
+ return result;
+ }
+
+ // Just the RHS is complex, so the LHS needs to be converted
+ // and the RHS might need to be promoted.
+ assert(RHSComplexFloat);
+
+ if (order < 0) { // RHS is wider
+ // float -> _Complex double
+ if (!isCompAssign) {
+ QualType fp = cast<ComplexType>(rhs)->getElementType();
+ ImpCastExprToType(lhsExpr, fp, CK_FloatingCast);
+ ImpCastExprToType(lhsExpr, rhs, CK_FloatingRealToComplex);
+ }
+ return rhs;
+ }
+
+ // LHS is at least as wide. Find its corresponding complex type.
+ QualType result = (order == 0 ? rhs : Context.getComplexType(lhs));
+
+ // double -> _Complex double
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, result, CK_FloatingRealToComplex);
+
+ // _Complex float -> _Complex double
+ if (order > 0)
+ ImpCastExprToType(rhsExpr, result, CK_FloatingComplexCast);
+
+ return result;
+ }
+
+ // Now handle "real" floating types (i.e. float, double, long double).
+ bool LHSFloat = lhs->isRealFloatingType();
+ bool RHSFloat = rhs->isRealFloatingType();
+ if (LHSFloat || RHSFloat) {
+ // If we have two real floating types, convert the smaller operand
+ // to the bigger result.
+ if (LHSFloat && RHSFloat) {
+ int order = Context.getFloatingTypeOrder(lhs, rhs);
+ if (order > 0) {
+ ImpCastExprToType(rhsExpr, lhs, CK_FloatingCast);
+ return lhs;
+ }
+
+ assert(order < 0 && "illegal float comparison");
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, rhs, CK_FloatingCast);
+ return rhs;
+ }
+
+ // If we have an integer operand, the result is the real floating type.
+ if (LHSFloat) {
+ if (rhs->isIntegerType()) {
+ // Convert rhs to the lhs floating point type.
+ ImpCastExprToType(rhsExpr, lhs, CK_IntegralToFloating);
+ return lhs;
+ }
+
+ // Convert both sides to the appropriate complex float.
+ assert(rhs->isComplexIntegerType());
+ QualType result = Context.getComplexType(lhs);
+
+ // _Complex int -> _Complex float
+ ImpCastExprToType(rhsExpr, result, CK_IntegralComplexToFloatingComplex);
+
+ // float -> _Complex float
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, result, CK_FloatingRealToComplex);
+
+ return result;
+ }
+
+ assert(RHSFloat);
+ if (lhs->isIntegerType()) {
+ // Convert lhs to the rhs floating point type.
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, rhs, CK_IntegralToFloating);
+ return rhs;
+ }
+
+ // Convert both sides to the appropriate complex float.
+ assert(lhs->isComplexIntegerType());
+ QualType result = Context.getComplexType(rhs);
+
+ // _Complex int -> _Complex float
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, result, CK_IntegralComplexToFloatingComplex);
+
+ // float -> _Complex float
+ ImpCastExprToType(rhsExpr, result, CK_FloatingRealToComplex);
+
+ return result;
+ }
+
+ // Handle GCC complex int extension.
+ // FIXME: if the operands are (int, _Complex long), we currently
+ // don't promote the complex. Also, signedness?
+ const ComplexType *lhsComplexInt = lhs->getAsComplexIntegerType();
+ const ComplexType *rhsComplexInt = rhs->getAsComplexIntegerType();
+ if (lhsComplexInt && rhsComplexInt) {
+ int order = Context.getIntegerTypeOrder(lhsComplexInt->getElementType(),
+ rhsComplexInt->getElementType());
+ assert(order && "inequal types with equal element ordering");
+ if (order > 0) {
+ // _Complex int -> _Complex long
+ ImpCastExprToType(rhsExpr, lhs, CK_IntegralComplexCast);
+ return lhs;
+ }
+
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, rhs, CK_IntegralComplexCast);
+ return rhs;
+ } else if (lhsComplexInt) {
+ // int -> _Complex int
+ ImpCastExprToType(rhsExpr, lhs, CK_IntegralRealToComplex);
+ return lhs;
+ } else if (rhsComplexInt) {
+ // int -> _Complex int
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, rhs, CK_IntegralRealToComplex);
+ return rhs;
+ }
+
+ // Finally, we have two differing integer types.
+ // The rules for this case are in C99 6.3.1.8
+ int compare = Context.getIntegerTypeOrder(lhs, rhs);
+ bool lhsSigned = lhs->hasSignedIntegerRepresentation(),
+ rhsSigned = rhs->hasSignedIntegerRepresentation();
+ if (lhsSigned == rhsSigned) {
+ // Same signedness; use the higher-ranked type
+ if (compare >= 0) {
+ ImpCastExprToType(rhsExpr, lhs, CK_IntegralCast);
+ return lhs;
+ } else if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, rhs, CK_IntegralCast);
+ return rhs;
+ } else if (compare != (lhsSigned ? 1 : -1)) {
+ // The unsigned type has greater than or equal rank to the
+ // signed type, so use the unsigned type
+ if (rhsSigned) {
+ ImpCastExprToType(rhsExpr, lhs, CK_IntegralCast);
+ return lhs;
+ } else if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, rhs, CK_IntegralCast);
+ return rhs;
+ } else if (Context.getIntWidth(lhs) != Context.getIntWidth(rhs)) {
+ // The two types are different widths; if we are here, that
+ // means the signed type is larger than the unsigned type, so
+ // use the signed type.
+ if (lhsSigned) {
+ ImpCastExprToType(rhsExpr, lhs, CK_IntegralCast);
+ return lhs;
+ } else if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, rhs, CK_IntegralCast);
+ return rhs;
+ } else {
+ // The signed type is higher-ranked than the unsigned type,
+ // but isn't actually any bigger (like unsigned int and long
+ // on most 32-bit systems). Use the unsigned type corresponding
+ // to the signed type.
+ QualType result =
+ Context.getCorrespondingUnsignedType(lhsSigned ? lhs : rhs);
+ ImpCastExprToType(rhsExpr, result, CK_IntegralCast);
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, result, CK_IntegralCast);
+ return result;
+ }
}
//===----------------------------------------------------------------------===//
@@ -409,245 +745,366 @@ Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks) {
StringTokLocs.size()));
}
-/// ShouldSnapshotBlockValueReference - Return true if a reference inside of
-/// CurBlock to VD should cause it to be snapshotted (as we do for auto
-/// variables defined outside the block) or false if this is not needed (e.g.
-/// for values inside the block or for globals).
+enum CaptureResult {
+ /// No capture is required.
+ CR_NoCapture,
+
+ /// A capture is required.
+ CR_Capture,
+
+ /// A by-ref capture is required.
+ CR_CaptureByRef,
+
+ /// An error occurred when trying to capture the given variable.
+ CR_Error
+};
+
+/// Diagnose an uncapturable value reference.
///
-/// This also keeps the 'hasBlockDeclRefExprs' in the BlockScopeInfo records
-/// up-to-date.
+/// \param var - the variable referenced
+/// \param DC - the context which we couldn't capture through
+static CaptureResult
+diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
+ VarDecl *var, DeclContext *DC) {
+ switch (S.ExprEvalContexts.back().Context) {
+ case Sema::Unevaluated:
+ // The argument will never be evaluated, so don't complain.
+ return CR_NoCapture;
+
+ case Sema::PotentiallyEvaluated:
+ case Sema::PotentiallyEvaluatedIfUsed:
+ break;
+
+ case Sema::PotentiallyPotentiallyEvaluated:
+ // FIXME: delay these!
+ break;
+ }
+
+ // Don't diagnose about capture if we're not actually in code right
+ // now; in general, there are more appropriate places that will
+ // diagnose this.
+ if (!S.CurContext->isFunctionOrMethod()) return CR_NoCapture;
+
+ // This particular madness can happen in ill-formed default
+ // arguments; claim it's okay and let downstream code handle it.
+ if (isa<ParmVarDecl>(var) &&
+ S.CurContext == var->getDeclContext()->getParent())
+ return CR_NoCapture;
+
+ DeclarationName functionName;
+ if (FunctionDecl *fn = dyn_cast<FunctionDecl>(var->getDeclContext()))
+ functionName = fn->getDeclName();
+ // FIXME: variable from enclosing block that we couldn't capture from!
+
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_function)
+ << var->getIdentifier() << functionName;
+ S.Diag(var->getLocation(), diag::note_local_variable_declared_here)
+ << var->getIdentifier();
+
+ return CR_Error;
+}
+
+/// There is a well-formed capture at a particular scope level;
+/// propagate it through all the nested blocks.
+static CaptureResult propagateCapture(Sema &S, unsigned validScopeIndex,
+ const BlockDecl::Capture &capture) {
+ VarDecl *var = capture.getVariable();
+
+ // Update all the inner blocks with the capture information.
+ for (unsigned i = validScopeIndex + 1, e = S.FunctionScopes.size();
+ i != e; ++i) {
+ BlockScopeInfo *innerBlock = cast<BlockScopeInfo>(S.FunctionScopes[i]);
+ innerBlock->Captures.push_back(
+ BlockDecl::Capture(capture.getVariable(), capture.isByRef(),
+ /*nested*/ true, capture.getCopyExpr()));
+ innerBlock->CaptureMap[var] = innerBlock->Captures.size(); // +1
+ }
+
+ return capture.isByRef() ? CR_CaptureByRef : CR_Capture;
+}
+
+/// shouldCaptureValueReference - Determine if a reference to the
+/// given value in the current context requires a variable capture.
///
-static bool ShouldSnapshotBlockValueReference(Sema &S, BlockScopeInfo *CurBlock,
- ValueDecl *VD) {
- // If the value is defined inside the block, we couldn't snapshot it even if
- // we wanted to.
- if (CurBlock->TheDecl == VD->getDeclContext())
- return false;
+/// This also keeps the captures set in the BlockScopeInfo records
+/// up-to-date.
+static CaptureResult shouldCaptureValueReference(Sema &S, SourceLocation loc,
+ ValueDecl *value) {
+ // Only variables ever require capture.
+ VarDecl *var = dyn_cast<VarDecl>(value);
+ if (!var) return CR_NoCapture;
- // If this is an enum constant or function, it is constant, don't snapshot.
- if (isa<EnumConstantDecl>(VD) || isa<FunctionDecl>(VD))
- return false;
+ // Fast path: variables from the current context never require capture.
+ DeclContext *DC = S.CurContext;
+ if (var->getDeclContext() == DC) return CR_NoCapture;
- // If this is a reference to an extern, static, or global variable, no need to
- // snapshot it.
+ // Only variables with local storage require capture.
// FIXME: What about 'const' variables in C++?
- if (const VarDecl *Var = dyn_cast<VarDecl>(VD))
- if (!Var->hasLocalStorage())
- return false;
+ if (!var->hasLocalStorage()) return CR_NoCapture;
- // Blocks that have these can't be constant.
- CurBlock->hasBlockDeclRefExprs = true;
+ // Otherwise, we need to capture.
- // If we have nested blocks, the decl may be declared in an outer block (in
- // which case that outer block doesn't get "hasBlockDeclRefExprs") or it may
- // be defined outside all of the current blocks (in which case the blocks do
- // all get the bit). Walk the nesting chain.
- for (unsigned I = S.FunctionScopes.size() - 1; I; --I) {
- BlockScopeInfo *NextBlock = dyn_cast<BlockScopeInfo>(S.FunctionScopes[I]);
+ unsigned functionScopesIndex = S.FunctionScopes.size() - 1;
+ do {
+ // Only blocks (and eventually C++0x closures) can capture; other
+ // scopes don't work.
+ if (!isa<BlockDecl>(DC))
+ return diagnoseUncapturableValueReference(S, loc, var, DC);
- if (!NextBlock)
- continue;
+ BlockScopeInfo *blockScope =
+ cast<BlockScopeInfo>(S.FunctionScopes[functionScopesIndex]);
+ assert(blockScope->TheDecl == static_cast<BlockDecl*>(DC));
- // If we found the defining block for the variable, don't mark the block as
- // having a reference outside it.
- if (NextBlock->TheDecl == VD->getDeclContext())
- break;
+ // Check whether we've already captured it in this block. If so,
+ // we're done.
+ if (unsigned indexPlus1 = blockScope->CaptureMap[var])
+ return propagateCapture(S, functionScopesIndex,
+ blockScope->Captures[indexPlus1 - 1]);
+
+ functionScopesIndex--;
+ DC = cast<BlockDecl>(DC)->getDeclContext();
+ } while (var->getDeclContext() != DC);
- // Otherwise, the DeclRef from the inner block causes the outer one to need
- // a snapshot as well.
- NextBlock->hasBlockDeclRefExprs = true;
+ // Okay, we descended all the way to the block that defines the variable.
+ // Actually try to capture it.
+ QualType type = var->getType();
+
+ // Prohibit variably-modified types.
+ if (type->isVariablyModifiedType()) {
+ S.Diag(loc, diag::err_ref_vm_type);
+ S.Diag(var->getLocation(), diag::note_declared_at);
+ return CR_Error;
}
- return true;
-}
+ // Prohibit arrays, even in __block variables, but not references to
+ // them.
+ if (type->isArrayType()) {
+ S.Diag(loc, diag::err_ref_array_type);
+ S.Diag(var->getLocation(), diag::note_declared_at);
+ return CR_Error;
+ }
+
+ S.MarkDeclarationReferenced(loc, var);
+
+ // The BlocksAttr indicates the variable is bound by-reference.
+ bool byRef = var->hasAttr<BlocksAttr>();
+
+ // Build a copy expression.
+ Expr *copyExpr = 0;
+ if (!byRef && S.getLangOptions().CPlusPlus &&
+ !type->isDependentType() && type->isStructureOrClassType()) {
+ // According to the blocks spec, the capture of a variable from
+ // the stack requires a const copy constructor. This is not true
+ // of the copy/move done to move a __block variable to the heap.
+ type.addConst();
+
+ Expr *declRef = new (S.Context) DeclRefExpr(var, type, VK_LValue, loc);
+ ExprResult result =
+ S.PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(var->getLocation(),
+ type, false),
+ loc, S.Owned(declRef));
+
+ // Build a full-expression copy expression if initialization
+ // succeeded and used a non-trivial constructor. Recover from
+ // errors by pretending that the copy isn't necessary.
+ if (!result.isInvalid() &&
+ !cast<CXXConstructExpr>(result.get())->getConstructor()->isTrivial()) {
+ result = S.MaybeCreateExprWithCleanups(result);
+ copyExpr = result.take();
+ }
+ }
+
+ // We're currently at the declarer; go back to the closure.
+ functionScopesIndex++;
+ BlockScopeInfo *blockScope =
+ cast<BlockScopeInfo>(S.FunctionScopes[functionScopesIndex]);
+
+ // Build a valid capture in this scope.
+ blockScope->Captures.push_back(
+ BlockDecl::Capture(var, byRef, /*nested*/ false, copyExpr));
+ blockScope->CaptureMap[var] = blockScope->Captures.size(); // +1
+
+ // Propagate that to inner captures if necessary.
+ return propagateCapture(S, functionScopesIndex,
+ blockScope->Captures.back());
+}
+
+static ExprResult BuildBlockDeclRefExpr(Sema &S, ValueDecl *vd,
+ const DeclarationNameInfo &NameInfo,
+ bool byRef) {
+ assert(isa<VarDecl>(vd) && "capturing non-variable");
+
+ VarDecl *var = cast<VarDecl>(vd);
+ assert(var->hasLocalStorage() && "capturing non-local");
+ assert(byRef == var->hasAttr<BlocksAttr>() && "byref set wrong");
+
+ QualType exprType = var->getType().getNonReferenceType();
+
+ BlockDeclRefExpr *BDRE;
+ if (!byRef) {
+ // The variable will be bound by copy; make it const within the
+ // closure, but record that this was done in the expression.
+ bool constAdded = !exprType.isConstQualified();
+ exprType.addConst();
+
+ BDRE = new (S.Context) BlockDeclRefExpr(var, exprType, VK_LValue,
+ NameInfo.getLoc(), false,
+ constAdded);
+ } else {
+ BDRE = new (S.Context) BlockDeclRefExpr(var, exprType, VK_LValue,
+ NameInfo.getLoc(), true);
+ }
+ return S.Owned(BDRE);
+}
ExprResult
-Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, SourceLocation Loc,
+Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
+ SourceLocation Loc,
const CXXScopeSpec *SS) {
DeclarationNameInfo NameInfo(D->getDeclName(), Loc);
- return BuildDeclRefExpr(D, Ty, NameInfo, SS);
+ return BuildDeclRefExpr(D, Ty, VK, NameInfo, SS);
}
-/// BuildDeclRefExpr - Build a DeclRefExpr.
+/// BuildDeclRefExpr - Build an expression that references a
+/// declaration that does not require a closure capture.
ExprResult
-Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty,
+Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS) {
- if (Context.getCanonicalType(Ty) == Context.UndeducedAutoTy) {
- Diag(NameInfo.getLoc(),
- diag::err_auto_variable_cannot_appear_in_own_initializer)
- << D->getDeclName();
- return ExprError();
- }
-
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- if (isa<NonTypeTemplateParmDecl>(VD)) {
- // Non-type template parameters can be referenced anywhere they are
- // visible.
- Ty = Ty.getNonLValueExprType(Context);
- } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CurContext)) {
- if (const FunctionDecl *FD = MD->getParent()->isLocalClass()) {
- if (VD->hasLocalStorage() && VD->getDeclContext() != CurContext) {
- Diag(NameInfo.getLoc(),
- diag::err_reference_to_local_var_in_enclosing_function)
- << D->getIdentifier() << FD->getDeclName();
- Diag(D->getLocation(), diag::note_local_variable_declared_here)
- << D->getIdentifier();
- return ExprError();
- }
- }
- }
- }
-
MarkDeclarationReferenced(NameInfo.getLoc(), D);
- return Owned(DeclRefExpr::Create(Context,
+ Expr *E = DeclRefExpr::Create(Context,
SS? (NestedNameSpecifier *)SS->getScopeRep() : 0,
- SS? SS->getRange() : SourceRange(),
- D, NameInfo, Ty));
-}
+ SS? SS->getRange() : SourceRange(),
+ D, NameInfo, Ty, VK);
-/// \brief Given a field that represents a member of an anonymous
-/// struct/union, build the path from that field's context to the
-/// actual member.
-///
-/// Construct the sequence of field member references we'll have to
-/// perform to get to the field in the anonymous union/struct. The
-/// list of members is built from the field outward, so traverse it
-/// backwards to go from an object in the current context to the field
-/// we found.
-///
-/// \returns The variable from which the field access should begin,
-/// for an anonymous struct/union that is not a member of another
-/// class. Otherwise, returns NULL.
-VarDecl *Sema::BuildAnonymousStructUnionMemberPath(FieldDecl *Field,
- llvm::SmallVectorImpl<FieldDecl *> &Path) {
- assert(Field->getDeclContext()->isRecord() &&
- cast<RecordDecl>(Field->getDeclContext())->isAnonymousStructOrUnion()
- && "Field must be stored inside an anonymous struct or union");
-
- Path.push_back(Field);
- VarDecl *BaseObject = 0;
- DeclContext *Ctx = Field->getDeclContext();
- do {
- RecordDecl *Record = cast<RecordDecl>(Ctx);
- ValueDecl *AnonObject = Record->getAnonymousStructOrUnionObject();
- if (FieldDecl *AnonField = dyn_cast<FieldDecl>(AnonObject))
- Path.push_back(AnonField);
- else {
- BaseObject = cast<VarDecl>(AnonObject);
- break;
- }
- Ctx = Ctx->getParent();
- } while (Ctx->isRecord() &&
- cast<RecordDecl>(Ctx)->isAnonymousStructOrUnion());
+ // Just in case we're building an illegal pointer-to-member.
+ if (isa<FieldDecl>(D) && cast<FieldDecl>(D)->getBitWidth())
+ E->setObjectKind(OK_BitField);
- return BaseObject;
+ return Owned(E);
}
+static ExprResult
+BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
+ const CXXScopeSpec &SS, FieldDecl *Field,
+ DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo);
+
ExprResult
-Sema::BuildAnonymousStructUnionMemberReference(SourceLocation Loc,
- FieldDecl *Field,
- Expr *BaseObjectExpr,
- SourceLocation OpLoc) {
- llvm::SmallVector<FieldDecl *, 4> AnonFields;
- VarDecl *BaseObject = BuildAnonymousStructUnionMemberPath(Field,
- AnonFields);
-
- // Build the expression that refers to the base object, from
- // which we will build a sequence of member references to each
- // of the anonymous union objects and, eventually, the field we
- // found via name lookup.
- bool BaseObjectIsPointer = false;
- Qualifiers BaseQuals;
- if (BaseObject) {
- // BaseObject is an anonymous struct/union variable (and is,
- // therefore, not part of another non-anonymous record).
- MarkDeclarationReferenced(Loc, BaseObject);
- BaseObjectExpr = new (Context) DeclRefExpr(BaseObject,BaseObject->getType(),
- SourceLocation());
- BaseQuals
- = Context.getCanonicalType(BaseObject->getType()).getQualifiers();
- } else if (BaseObjectExpr) {
+Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
+ SourceLocation loc,
+ IndirectFieldDecl *indirectField,
+ Expr *baseObjectExpr,
+ SourceLocation opLoc) {
+ // First, build the expression that refers to the base object.
+
+ bool baseObjectIsPointer = false;
+ Qualifiers baseQuals;
+
+ // Case 1: the base of the indirect field is not a field.
+ VarDecl *baseVariable = indirectField->getVarDecl();
+ CXXScopeSpec EmptySS;
+ if (baseVariable) {
+ assert(baseVariable->getType()->isRecordType());
+
+ // In principle we could have a member access expression that
+ // accesses an anonymous struct/union that's a static member of
+ // the base object's class. However, under the current standard,
+ // static data members cannot be anonymous structs or unions.
+ // Supporting this is as easy as building a MemberExpr here.
+ assert(!baseObjectExpr && "anonymous struct/union is static data member?");
+
+ DeclarationNameInfo baseNameInfo(DeclarationName(), loc);
+
+ ExprResult result =
+ BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable);
+ if (result.isInvalid()) return ExprError();
+
+ baseObjectExpr = result.take();
+ baseObjectIsPointer = false;
+ baseQuals = baseObjectExpr->getType().getQualifiers();
+
+ // Case 2: the base of the indirect field is a field and the user
+ // wrote a member expression.
+ } else if (baseObjectExpr) {
// The caller provided the base object expression. Determine
// whether its a pointer and whether it adds any qualifiers to the
// anonymous struct/union fields we're looking into.
- QualType ObjectType = BaseObjectExpr->getType();
- if (const PointerType *ObjectPtr = ObjectType->getAs<PointerType>()) {
- BaseObjectIsPointer = true;
- ObjectType = ObjectPtr->getPointeeType();
+ QualType objectType = baseObjectExpr->getType();
+
+ if (const PointerType *ptr = objectType->getAs<PointerType>()) {
+ baseObjectIsPointer = true;
+ objectType = ptr->getPointeeType();
+ } else {
+ baseObjectIsPointer = false;
}
- BaseQuals
- = Context.getCanonicalType(ObjectType).getQualifiers();
+ baseQuals = objectType.getQualifiers();
+
+ // Case 3: the base of the indirect field is a field and we should
+ // build an implicit member access.
} else {
// We've found a member of an anonymous struct/union that is
// inside a non-anonymous struct/union, so in a well-formed
// program our base object expression is "this".
- DeclContext *DC = getFunctionLevelDeclContext();
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC)) {
- if (!MD->isStatic()) {
- QualType AnonFieldType
- = Context.getTagDeclType(
- cast<RecordDecl>(AnonFields.back()->getDeclContext()));
- QualType ThisType = Context.getTagDeclType(MD->getParent());
- if ((Context.getCanonicalType(AnonFieldType)
- == Context.getCanonicalType(ThisType)) ||
- IsDerivedFrom(ThisType, AnonFieldType)) {
- // Our base object expression is "this".
- BaseObjectExpr = new (Context) CXXThisExpr(Loc,
- MD->getThisType(Context),
- /*isImplicit=*/true);
- BaseObjectIsPointer = true;
- }
- } else {
- return ExprError(Diag(Loc,diag::err_invalid_member_use_in_static_method)
- << Field->getDeclName());
- }
- BaseQuals = Qualifiers::fromCVRMask(MD->getTypeQualifiers());
+ CXXMethodDecl *method = tryCaptureCXXThis();
+ if (!method) {
+ Diag(loc, diag::err_invalid_member_use_in_static_method)
+ << indirectField->getDeclName();
+ return ExprError();
}
- if (!BaseObjectExpr)
- return ExprError(Diag(Loc, diag::err_invalid_non_static_member_use)
- << Field->getDeclName());
+ // Our base object expression is "this".
+ baseObjectExpr =
+ new (Context) CXXThisExpr(loc, method->getThisType(Context),
+ /*isImplicit=*/ true);
+ baseObjectIsPointer = true;
+ baseQuals = Qualifiers::fromCVRMask(method->getTypeQualifiers());
}
// Build the implicit member references to the field of the
// anonymous struct/union.
- Expr *Result = BaseObjectExpr;
- Qualifiers ResultQuals = BaseQuals;
- for (llvm::SmallVector<FieldDecl *, 4>::reverse_iterator
- FI = AnonFields.rbegin(), FIEnd = AnonFields.rend();
- FI != FIEnd; ++FI) {
- QualType MemberType = (*FI)->getType();
- Qualifiers MemberTypeQuals =
- Context.getCanonicalType(MemberType).getQualifiers();
+ Expr *result = baseObjectExpr;
+ IndirectFieldDecl::chain_iterator
+ FI = indirectField->chain_begin(), FEnd = indirectField->chain_end();
- // CVR attributes from the base are picked up by members,
- // except that 'mutable' members don't pick up 'const'.
- if ((*FI)->isMutable())
- ResultQuals.removeConst();
+ // Build the first member access in the chain with full information.
+ if (!baseVariable) {
+ FieldDecl *field = cast<FieldDecl>(*FI);
- // GC attributes are never picked up by members.
- ResultQuals.removeObjCGCAttr();
+ // FIXME: use the real found-decl info!
+ DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
- // TR 18037 does not allow fields to be declared with address spaces.
- assert(!MemberTypeQuals.hasAddressSpace());
+ // Make a nameInfo that properly uses the anonymous name.
+ DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
- Qualifiers NewQuals = ResultQuals + MemberTypeQuals;
- if (NewQuals != MemberTypeQuals)
- MemberType = Context.getQualifiedType(MemberType, NewQuals);
+ result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer,
+ EmptySS, field, foundDecl,
+ memberNameInfo).take();
+ baseObjectIsPointer = false;
- MarkDeclarationReferenced(Loc, *FI);
- PerformObjectMemberConversion(Result, /*FIXME:Qualifier=*/0, *FI, *FI);
- // FIXME: Might this end up being a qualified name?
- Result = new (Context) MemberExpr(Result, BaseObjectIsPointer, *FI,
- OpLoc, MemberType);
- BaseObjectIsPointer = false;
- ResultQuals = NewQuals;
+ // FIXME: check qualified member access
}
- return Owned(Result);
+ // In all cases, we should now skip the first declaration in the chain.
+ ++FI;
+
+ while (FI != FEnd) {
+ FieldDecl *field = cast<FieldDecl>(*FI++);
+
+ // FIXME: these are somewhat meaningless
+ DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
+ DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
+
+ result = BuildFieldReferenceExpr(*this, result, /*isarrow*/ false,
+ (FI == FEnd? SS : EmptySS), field,
+ foundDecl, memberNameInfo)
+ .take();
+ }
+
+ return Owned(result);
}
/// Decomposes the given name into a DeclarationNameInfo, its location, and
@@ -684,28 +1141,6 @@ static void DecomposeUnqualifiedId(Sema &SemaRef,
}
}
-/// Determines whether the given record is "fully-formed" at the given
-/// location, i.e. whether a qualified lookup into it is assured of
-/// getting consistent results already.
-static bool IsFullyFormedScope(Sema &SemaRef, CXXRecordDecl *Record) {
- if (!Record->hasDefinition())
- return false;
-
- for (CXXRecordDecl::base_class_iterator I = Record->bases_begin(),
- E = Record->bases_end(); I != E; ++I) {
- CanQualType BaseT = SemaRef.Context.getCanonicalType((*I).getType());
- CanQual<RecordType> BaseRT = BaseT->getAs<RecordType>();
- if (!BaseRT) return false;
-
- CXXRecordDecl *BaseRecord = cast<CXXRecordDecl>(BaseRT->getDecl());
- if (!BaseRecord->hasDefinition() ||
- !IsFullyFormedScope(SemaRef, BaseRecord))
- return false;
- }
-
- return true;
-}
-
/// Determines if the given class is provably not derived from all of
/// the prospective base classes.
static bool IsProvablyNotDerivedFrom(Sema &SemaRef,
@@ -757,10 +1192,6 @@ enum IMAKind {
/// context is not an instance method.
IMA_Unresolved_StaticContext,
- /// The reference is to a member of an anonymous structure in a
- /// non-class context.
- IMA_AnonymousMember,
-
/// All possible referrents are instance members and the current
/// context is not an instance method.
IMA_Error_StaticContext,
@@ -790,19 +1221,16 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
// Collect all the declaring classes of instance members we find.
bool hasNonInstance = false;
+ bool hasField = false;
llvm::SmallPtrSet<CXXRecordDecl*, 4> Classes;
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
NamedDecl *D = *I;
+
if (D->isCXXInstanceMember()) {
- CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext());
+ if (dyn_cast<FieldDecl>(D))
+ hasField = true;
- // If this is a member of an anonymous record, move out to the
- // innermost non-anonymous struct or union. If there isn't one,
- // that's a special case.
- while (R->isAnonymousStructOrUnion()) {
- R = dyn_cast<CXXRecordDecl>(R->getParent());
- if (!R) return IMA_AnonymousMember;
- }
+ CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext());
Classes.insert(R->getCanonicalDecl());
}
else
@@ -816,8 +1244,24 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
// If the current context is not an instance method, it can't be
// an implicit member reference.
- if (isStaticContext)
- return (hasNonInstance ? IMA_Mixed_StaticContext : IMA_Error_StaticContext);
+ if (isStaticContext) {
+ if (hasNonInstance)
+ return IMA_Mixed_StaticContext;
+
+ if (SemaRef.getLangOptions().CPlusPlus0x && hasField) {
+ // C++0x [expr.prim.general]p10:
+ // An id-expression that denotes a non-static data member or non-static
+ // member function of a class can only be used:
+ // (...)
+ // - if that id-expression denotes a non-static data member and it appears in an unevaluated operand.
+ const Sema::ExpressionEvaluationContextRecord& record = SemaRef.ExprEvalContexts.back();
+ bool isUnevaluatedExpression = record.Context == Sema::Unevaluated;
+ if (isUnevaluatedExpression)
+ return IMA_Mixed_StaticContext;
+ }
+
+ return IMA_Error_StaticContext;
+ }
// If we can prove that the current context is unrelated to all the
// declaring classes, it can't be an implicit member reference (in
@@ -833,23 +1277,24 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
/// Diagnose a reference to a field with no object available.
static void DiagnoseInstanceReference(Sema &SemaRef,
const CXXScopeSpec &SS,
- const LookupResult &R) {
- SourceLocation Loc = R.getNameLoc();
+ NamedDecl *rep,
+ const DeclarationNameInfo &nameInfo) {
+ SourceLocation Loc = nameInfo.getLoc();
SourceRange Range(Loc);
if (SS.isSet()) Range.setBegin(SS.getRange().getBegin());
- if (R.getAsSingle<FieldDecl>()) {
+ if (isa<FieldDecl>(rep) || isa<IndirectFieldDecl>(rep)) {
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(SemaRef.CurContext)) {
if (MD->isStatic()) {
// "invalid use of member 'x' in static member function"
SemaRef.Diag(Loc, diag::err_invalid_member_use_in_static_method)
- << Range << R.getLookupName();
+ << Range << nameInfo.getName();
return;
}
}
SemaRef.Diag(Loc, diag::err_invalid_non_static_member_use)
- << R.getLookupName() << Range;
+ << nameInfo.getName() << Range;
return;
}
@@ -1001,25 +1446,41 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
return true;
}
-static ObjCPropertyDecl *OkToSynthesizeProvisionalIvar(Sema &SemaRef,
- IdentifierInfo *II,
- SourceLocation NameLoc) {
- ObjCMethodDecl *CurMeth = SemaRef.getCurMethodDecl();
+ObjCPropertyDecl *Sema::canSynthesizeProvisionalIvar(IdentifierInfo *II) {
+ ObjCMethodDecl *CurMeth = getCurMethodDecl();
ObjCInterfaceDecl *IDecl = CurMeth->getClassInterface();
if (!IDecl)
return 0;
ObjCImplementationDecl *ClassImpDecl = IDecl->getImplementation();
if (!ClassImpDecl)
return 0;
- ObjCPropertyDecl *property = SemaRef.LookupPropertyDecl(IDecl, II);
+ ObjCPropertyDecl *property = LookupPropertyDecl(IDecl, II);
if (!property)
return 0;
if (ObjCPropertyImplDecl *PIDecl = ClassImpDecl->FindPropertyImplDecl(II))
- if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic ||
+ PIDecl->getPropertyIvarDecl())
return 0;
return property;
}
+bool Sema::canSynthesizeProvisionalIvar(ObjCPropertyDecl *Property) {
+ ObjCMethodDecl *CurMeth = getCurMethodDecl();
+ ObjCInterfaceDecl *IDecl = CurMeth->getClassInterface();
+ if (!IDecl)
+ return false;
+ ObjCImplementationDecl *ClassImpDecl = IDecl->getImplementation();
+ if (!ClassImpDecl)
+ return false;
+ if (ObjCPropertyImplDecl *PIDecl
+ = ClassImpDecl->FindPropertyImplDecl(Property->getIdentifier()))
+ if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic ||
+ PIDecl->getPropertyIvarDecl())
+ return false;
+
+ return true;
+}
+
static ObjCIvarDecl *SynthesizeProvisionalIvar(Sema &SemaRef,
LookupResult &Lookup,
IdentifierInfo *II,
@@ -1032,7 +1493,8 @@ static ObjCIvarDecl *SynthesizeProvisionalIvar(Sema &SemaRef,
LookForIvars = false;
else
LookForIvars = (Lookup.isSingleResult() &&
- Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod());
+ Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod() &&
+ (Lookup.getAsSingle<VarDecl>() != 0));
if (!LookForIvars)
return 0;
@@ -1046,15 +1508,20 @@ static ObjCIvarDecl *SynthesizeProvisionalIvar(Sema &SemaRef,
ObjCPropertyDecl *property = SemaRef.LookupPropertyDecl(IDecl, II);
if (!property)
return 0;
- if (ObjCPropertyImplDecl *PIDecl = ClassImpDecl->FindPropertyImplDecl(II))
+ if (ObjCPropertyImplDecl *PIDecl = ClassImpDecl->FindPropertyImplDecl(II)) {
DynamicImplSeen =
(PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
+ // property implementation has a designated ivar. No need to assume a new
+ // one.
+ if (!DynamicImplSeen && PIDecl->getPropertyIvarDecl())
+ return 0;
+ }
if (!DynamicImplSeen) {
QualType PropType = SemaRef.Context.getCanonicalType(property->getType());
ObjCIvarDecl *Ivar = ObjCIvarDecl::Create(SemaRef.Context, ClassImpDecl,
NameLoc,
II, PropType, /*Dinfo=*/0,
- ObjCIvarDecl::Protected,
+ ObjCIvarDecl::Private,
(Expr *)0, true);
ClassImpDecl->addDecl(Ivar);
IDecl->makeDeclVisibleInContext(Ivar, false);
@@ -1102,22 +1569,18 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
Name.getCXXNameType()->isDependentType()) {
DependentID = true;
} else if (SS.isSet()) {
- DeclContext *DC = computeDeclContext(SS, false);
- if (DC) {
+ if (DeclContext *DC = computeDeclContext(SS, false)) {
if (RequireCompleteDeclContext(SS, DC))
return ExprError();
- // FIXME: We should be checking whether DC is the current instantiation.
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC))
- DependentID = !IsFullyFormedScope(*this, RD);
} else {
DependentID = true;
}
}
- if (DependentID) {
+ if (DependentID)
return ActOnDependentIdExpression(SS, NameInfo, isAddressOfOperand,
TemplateArgs);
- }
+
bool IvarLookupFollowUp = false;
// Perform the required lookup.
LookupResult R(*this, NameInfo, LookupOrdinaryName);
@@ -1130,10 +1593,21 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
bool MemberOfUnknownSpecialization;
LookupTemplateName(R, S, SS, QualType(), /*EnteringContext=*/false,
MemberOfUnknownSpecialization);
+
+ if (MemberOfUnknownSpecialization ||
+ (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation))
+ return ActOnDependentIdExpression(SS, NameInfo, isAddressOfOperand,
+ TemplateArgs);
} else {
IvarLookupFollowUp = (!SS.isSet() && II && getCurMethodDecl());
LookupParsedName(R, S, &SS, !IvarLookupFollowUp);
+ // If the result might be in a dependent base class, this is a dependent
+ // id-expression.
+ if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ return ActOnDependentIdExpression(SS, NameInfo, isAddressOfOperand,
+ TemplateArgs);
+
// If this reference is in an Objective-C method, then we need to do
// some special Objective-C lookup, too.
if (IvarLookupFollowUp) {
@@ -1141,13 +1615,21 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
if (E.isInvalid())
return ExprError();
- Expr *Ex = E.takeAs<Expr>();
- if (Ex) return Owned(Ex);
- // Synthesize ivars lazily
- if (getLangOptions().ObjCNonFragileABI2) {
- if (SynthesizeProvisionalIvar(*this, R, II, NameLoc))
+ if (Expr *Ex = E.takeAs<Expr>())
+ return Owned(Ex);
+
+ // Synthesize ivars lazily.
+ if (getLangOptions().ObjCDefaultSynthProperties &&
+ getLangOptions().ObjCNonFragileABI2) {
+ if (SynthesizeProvisionalIvar(*this, R, II, NameLoc)) {
+ if (const ObjCPropertyDecl *Property =
+ canSynthesizeProvisionalIvar(II)) {
+ Diag(NameLoc, diag::warn_synthesized_ivar_access) << II;
+ Diag(Property->getLocation(), diag::note_property_declare);
+ }
return ActOnIdExpression(S, SS, Id, HasTrailingLParen,
isAddressOfOperand);
+ }
}
// for further use, this must be set to false if in class method.
IvarLookupFollowUp = getCurMethodDecl()->isInstanceMethod();
@@ -1195,33 +1677,16 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
if (VarDecl *Var = R.getAsSingle<VarDecl>()) {
if (getLangOptions().ObjCNonFragileABI && IvarLookupFollowUp &&
- !getLangOptions().ObjCNonFragileABI2 &&
+ !(getLangOptions().ObjCDefaultSynthProperties &&
+ getLangOptions().ObjCNonFragileABI2) &&
Var->isFileVarDecl()) {
- ObjCPropertyDecl *Property =
- OkToSynthesizeProvisionalIvar(*this, II, NameLoc);
+ ObjCPropertyDecl *Property = canSynthesizeProvisionalIvar(II);
if (Property) {
Diag(NameLoc, diag::warn_ivar_variable_conflict) << Var->getDeclName();
Diag(Property->getLocation(), diag::note_property_declare);
Diag(Var->getLocation(), diag::note_global_declared_at);
}
}
- } else if (FunctionDecl *Func = R.getAsSingle<FunctionDecl>()) {
- if (!getLangOptions().CPlusPlus && !Func->hasPrototype()) {
- // C99 DR 316 says that, if a function type comes from a
- // function definition (without a prototype), that type is only
- // used for checking compatibility. Therefore, when referencing
- // the function, we pretend that we don't have the full function
- // type.
- if (DiagnoseUseOfDecl(Func, NameLoc))
- return ExprError();
-
- QualType T = Func->getType();
- QualType NoProtoType = T;
- if (const FunctionProtoType *Proto = T->getAs<FunctionProtoType>())
- NoProtoType = Context.getFunctionNoProtoType(Proto->getResultType(),
- Proto->getExtInfo());
- return BuildDeclRefExpr(Func, NoProtoType, NameLoc, &SS);
- }
}
// Check whether this might be a C++ implicit instance member access.
@@ -1259,7 +1724,8 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
else if (R.isUnresolvableResult())
MightBeImplicitMember = true;
else
- MightBeImplicitMember = isa<FieldDecl>(R.getFoundDecl());
+ MightBeImplicitMember = isa<FieldDecl>(R.getFoundDecl()) ||
+ isa<IndirectFieldDecl>(R.getFoundDecl());
if (MightBeImplicitMember)
return BuildPossibleImplicitMemberExpr(SS, R, TemplateArgs);
@@ -1280,11 +1746,6 @@ Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
case IMA_Instance:
return BuildImplicitMemberExpr(SS, R, TemplateArgs, true);
- case IMA_AnonymousMember:
- assert(R.isSingleResult());
- return BuildAnonymousStructUnionMemberReference(R.getNameLoc(),
- R.getAsSingle<FieldDecl>());
-
case IMA_Mixed:
case IMA_Mixed_Unrelated:
case IMA_Unresolved:
@@ -1299,7 +1760,8 @@ Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
case IMA_Error_StaticContext:
case IMA_Error_Unrelated:
- DiagnoseInstanceReference(*this, SS, R);
+ DiagnoseInstanceReference(*this, SS, R.getRepresentativeDecl(),
+ R.getLookupNameInfo());
return ExprError();
}
@@ -1400,11 +1862,17 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
SelfName.setIdentifier(&II, SourceLocation());
CXXScopeSpec SelfScopeSpec;
ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec,
- SelfName, false, false);
+ SelfName, false, false);
+ if (SelfExpr.isInvalid())
+ return ExprError();
+
+ Expr *SelfE = SelfExpr.take();
+ DefaultLvalueConversion(SelfE);
+
MarkDeclarationReferenced(Loc, IV);
return Owned(new (Context)
ObjCIvarRefExpr(IV, IV->getType(), Loc,
- SelfExpr.takeAs<Expr>(), true, true));
+ SelfE, true, true));
}
} else if (CurMethod->isInstanceMethod()) {
// We should warn if a local variable hides an ivar.
@@ -1607,6 +2075,7 @@ static MemberExpr *BuildMemberExpr(ASTContext &C, Expr *Base, bool isArrow,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo,
QualType Ty,
+ ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = 0) {
NestedNameSpecifier *Qualifier = 0;
SourceRange QualifierRange;
@@ -1617,7 +2086,65 @@ static MemberExpr *BuildMemberExpr(ASTContext &C, Expr *Base, bool isArrow,
return MemberExpr::Create(C, Base, isArrow, Qualifier, QualifierRange,
Member, FoundDecl, MemberNameInfo,
- TemplateArgs, Ty);
+ TemplateArgs, Ty, VK, OK);
+}
+
+static ExprResult
+BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
+ const CXXScopeSpec &SS, FieldDecl *Field,
+ DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo) {
+ // x.a is an l-value if 'a' has a reference type. Otherwise:
+ // x.a is an l-value/x-value/pr-value if the base is (and note
+ // that *x is always an l-value), except that if the base isn't
+ // an ordinary object then we must have an rvalue.
+ ExprValueKind VK = VK_LValue;
+ ExprObjectKind OK = OK_Ordinary;
+ if (!IsArrow) {
+ if (BaseExpr->getObjectKind() == OK_Ordinary)
+ VK = BaseExpr->getValueKind();
+ else
+ VK = VK_RValue;
+ }
+ if (VK != VK_RValue && Field->isBitField())
+ OK = OK_BitField;
+
+ // Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref]
+ QualType MemberType = Field->getType();
+ if (const ReferenceType *Ref = MemberType->getAs<ReferenceType>()) {
+ MemberType = Ref->getPointeeType();
+ VK = VK_LValue;
+ } else {
+ QualType BaseType = BaseExpr->getType();
+ if (IsArrow) BaseType = BaseType->getAs<PointerType>()->getPointeeType();
+
+ Qualifiers BaseQuals = BaseType.getQualifiers();
+
+ // GC attributes are never picked up by members.
+ BaseQuals.removeObjCGCAttr();
+
+ // CVR attributes from the base are picked up by members,
+ // except that 'mutable' members don't pick up 'const'.
+ if (Field->isMutable()) BaseQuals.removeConst();
+
+ Qualifiers MemberQuals
+ = S.Context.getCanonicalType(MemberType).getQualifiers();
+
+ // TR 18037 does not allow fields to be declared with address spaces.
+ assert(!MemberQuals.hasAddressSpace());
+
+ Qualifiers Combined = BaseQuals + MemberQuals;
+ if (Combined != MemberQuals)
+ MemberType = S.Context.getQualifiedType(MemberType, Combined);
+ }
+
+ S.MarkDeclarationReferenced(MemberNameInfo.getLoc(), Field);
+ if (S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
+ FoundDecl, Field))
+ return ExprError();
+ return S.Owned(BuildMemberExpr(S.Context, BaseExpr, IsArrow, SS,
+ Field, FoundDecl, MemberNameInfo,
+ MemberType, VK, OK));
}
/// Builds an implicit member access expression. The current context
@@ -1631,29 +2158,30 @@ Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
bool IsKnownInstance) {
assert(!R.empty() && !R.isAmbiguous());
- SourceLocation Loc = R.getNameLoc();
+ SourceLocation loc = R.getNameLoc();
// We may have found a field within an anonymous union or struct
// (C++ [class.union]).
- // FIXME: This needs to happen post-isImplicitMemberReference?
// FIXME: template-ids inside anonymous structs?
- if (FieldDecl *FD = R.getAsSingle<FieldDecl>())
- if (cast<RecordDecl>(FD->getDeclContext())->isAnonymousStructOrUnion())
- return BuildAnonymousStructUnionMemberReference(Loc, FD);
+ if (IndirectFieldDecl *FD = R.getAsSingle<IndirectFieldDecl>())
+ return BuildAnonymousStructUnionMemberReference(SS, R.getNameLoc(), FD);
- // If this is known to be an instance access, go ahead and build a
+ // If this is known to be an instance access, go ahead and build an
+ // implicit 'this' expression now.
// 'this' expression now.
- DeclContext *DC = getFunctionLevelDeclContext();
- QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType(Context);
- Expr *This = 0; // null signifies implicit access
+ CXXMethodDecl *method = tryCaptureCXXThis();
+ assert(method && "didn't correctly pre-flight capture of 'this'");
+
+ QualType thisType = method->getThisType(Context);
+ Expr *baseExpr = 0; // null signifies implicit access
if (IsKnownInstance) {
SourceLocation Loc = R.getNameLoc();
if (SS.getRange().isValid())
Loc = SS.getRange().getBegin();
- This = new (Context) CXXThisExpr(Loc, ThisType, /*isImplicit=*/true);
+ baseExpr = new (Context) CXXThisExpr(loc, thisType, /*isImplicit=*/true);
}
- return BuildMemberReferenceExpr(This, ThisType,
+ return BuildMemberReferenceExpr(baseExpr, thisType,
/*OpLoc*/ SourceLocation(),
/*IsArrow*/ true,
SS,
@@ -1762,10 +2290,8 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
// we've picked a target.
R.suppressDiagnostics();
- bool Dependent
- = UnresolvedLookupExpr::ComputeDependence(R.begin(), R.end(), 0);
UnresolvedLookupExpr *ULE
- = UnresolvedLookupExpr::Create(Context, Dependent, R.getNamingClass(),
+ = UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
(NestedNameSpecifier*) SS.getScopeRep(),
SS.getRange(), R.getLookupNameInfo(),
NeedsADL, R.isOverloadedResult(),
@@ -1774,7 +2300,6 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
return Owned(ULE);
}
-
/// \brief Complete semantic analysis for a reference to the given declaration.
ExprResult
Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
@@ -1817,6 +2342,14 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
if (VD->isInvalidDecl())
return ExprError();
+ // Handle members of anonymous structs and unions. If we got here,
+ // and the reference is to a class member indirect field, then this
+ // must be the subject of a pointer-to-member expression.
+ if (IndirectFieldDecl *indirectField = dyn_cast<IndirectFieldDecl>(VD))
+ if (!indirectField->isCXXClassMember())
+ return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(),
+ indirectField);
+
// If the identifier reference is inside a block, and it refers to a value
// that is outside the block, create a BlockDeclRefExpr instead of a
// DeclRefExpr. This ensures the value is treated as a copy-in snapshot when
@@ -1825,59 +2358,140 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
// We do not do this for things like enum constants, global variables, etc,
// as they do not get snapshotted.
//
- if (getCurBlock() &&
- ShouldSnapshotBlockValueReference(*this, getCurBlock(), VD)) {
- if (VD->getType().getTypePtr()->isVariablyModifiedType()) {
- Diag(Loc, diag::err_ref_vm_type);
- Diag(D->getLocation(), diag::note_declared_at);
+ switch (shouldCaptureValueReference(*this, NameInfo.getLoc(), VD)) {
+ case CR_Error:
+ return ExprError();
+
+ case CR_Capture:
+ assert(!SS.isSet() && "referenced local variable with scope specifier?");
+ return BuildBlockDeclRefExpr(*this, VD, NameInfo, /*byref*/ false);
+
+ case CR_CaptureByRef:
+ assert(!SS.isSet() && "referenced local variable with scope specifier?");
+ return BuildBlockDeclRefExpr(*this, VD, NameInfo, /*byref*/ true);
+
+ case CR_NoCapture: {
+ // If this reference is not in a block or if the referenced
+ // variable is within the block, create a normal DeclRefExpr.
+
+ QualType type = VD->getType();
+ ExprValueKind valueKind = VK_RValue;
+
+ switch (D->getKind()) {
+ // Ignore all the non-ValueDecl kinds.
+#define ABSTRACT_DECL(kind)
+#define VALUE(type, base)
+#define DECL(type, base) \
+ case Decl::type:
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("invalid value decl kind");
return ExprError();
- }
- if (VD->getType()->isArrayType()) {
- Diag(Loc, diag::err_ref_array_type);
- Diag(D->getLocation(), diag::note_declared_at);
+ // These shouldn't make it here.
+ case Decl::ObjCAtDefsField:
+ case Decl::ObjCIvar:
+ llvm_unreachable("forming non-member reference to ivar?");
return ExprError();
+
+ // Enum constants are always r-values and never references.
+ // Unresolved using declarations are dependent.
+ case Decl::EnumConstant:
+ case Decl::UnresolvedUsingValue:
+ valueKind = VK_RValue;
+ break;
+
+ // Fields and indirect fields that got here must be for
+ // pointer-to-member expressions; we just call them l-values for
+ // internal consistency, because this subexpression doesn't really
+ // exist in the high-level semantics.
+ case Decl::Field:
+ case Decl::IndirectField:
+ assert(getLangOptions().CPlusPlus &&
+ "building reference to field in C?");
+
+ // These can't have reference type in well-formed programs, but
+ // for internal consistency we do this anyway.
+ type = type.getNonReferenceType();
+ valueKind = VK_LValue;
+ break;
+
+ // Non-type template parameters are either l-values or r-values
+ // depending on the type.
+ case Decl::NonTypeTemplateParm: {
+ if (const ReferenceType *reftype = type->getAs<ReferenceType>()) {
+ type = reftype->getPointeeType();
+ valueKind = VK_LValue; // even if the parameter is an r-value reference
+ break;
+ }
+
+ // For non-references, we need to strip qualifiers just in case
+ // the template parameter was declared as 'const int' or whatever.
+ valueKind = VK_RValue;
+ type = type.getUnqualifiedType();
+ break;
}
- MarkDeclarationReferenced(Loc, VD);
- QualType ExprTy = VD->getType().getNonReferenceType();
- // The BlocksAttr indicates the variable is bound by-reference.
- if (VD->getAttr<BlocksAttr>())
- return Owned(new (Context) BlockDeclRefExpr(VD, ExprTy, Loc, true));
- // This is to record that a 'const' was actually synthesize and added.
- bool constAdded = !ExprTy.isConstQualified();
- // Variable will be bound by-copy, make it const within the closure.
-
- ExprTy.addConst();
- QualType T = VD->getType();
- BlockDeclRefExpr *BDRE = new (Context) BlockDeclRefExpr(VD,
- ExprTy, Loc, false,
- constAdded);
- if (getLangOptions().CPlusPlus) {
- if (!T->isDependentType() && !T->isReferenceType()) {
- Expr *E = new (Context)
- DeclRefExpr(const_cast<ValueDecl*>(BDRE->getDecl()), T,
- SourceLocation());
+ case Decl::Var:
+ // In C, "extern void blah;" is valid and is an r-value.
+ if (!getLangOptions().CPlusPlus &&
+ !type.hasQualifiers() &&
+ type->isVoidType()) {
+ valueKind = VK_RValue;
+ break;
+ }
+ // fallthrough
+
+ case Decl::ImplicitParam:
+ case Decl::ParmVar:
+ // These are always l-values.
+ valueKind = VK_LValue;
+ type = type.getNonReferenceType();
+ break;
+
+ case Decl::Function: {
+ // Functions are l-values in C++.
+ if (getLangOptions().CPlusPlus) {
+ valueKind = VK_LValue;
+ break;
+ }
- ExprResult Res = PerformCopyInitialization(
- InitializedEntity::InitializeBlock(VD->getLocation(),
- T, false),
- SourceLocation(),
- Owned(E));
- if (!Res.isInvalid()) {
- Res = MaybeCreateCXXExprWithTemporaries(Res.get());
- Expr *Init = Res.takeAs<Expr>();
- BDRE->setCopyConstructorExpr(Init);
- }
+ // C99 DR 316 says that, if a function type comes from a
+ // function definition (without a prototype), that type is only
+ // used for checking compatibility. Therefore, when referencing
+ // the function, we pretend that we don't have the full function
+ // type.
+ if (!cast<FunctionDecl>(VD)->hasPrototype())
+ if (const FunctionProtoType *proto = type->getAs<FunctionProtoType>())
+ type = Context.getFunctionNoProtoType(proto->getResultType(),
+ proto->getExtInfo());
+
+ // Functions are r-values in C.
+ valueKind = VK_RValue;
+ break;
+ }
+
+ case Decl::CXXMethod:
+ // C++ methods are l-values if static, r-values if non-static.
+ if (cast<CXXMethodDecl>(VD)->isStatic()) {
+ valueKind = VK_LValue;
+ break;
}
+ // fallthrough
+
+ case Decl::CXXConversion:
+ case Decl::CXXDestructor:
+ case Decl::CXXConstructor:
+ valueKind = VK_RValue;
+ break;
}
- return Owned(BDRE);
+
+ return BuildDeclRefExpr(VD, type, valueKind, NameInfo, &SS);
}
- // If this reference is not in a block or if the referenced variable is
- // within the block, create a normal DeclRefExpr.
- return BuildDeclRefExpr(VD, VD->getType().getNonReferenceType(),
- NameInfo, &SS);
+ }
+
+ llvm_unreachable("unknown capture result");
+ return ExprError();
}
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc,
@@ -2008,6 +2622,9 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok) {
bool isExact = (result == APFloat::opOK);
Res = FloatingLiteral::Create(Context, Val, isExact, Ty, Tok.getLocation());
+ if (getLangOptions().SinglePrecisionConstants && Ty == Context.DoubleTy)
+ ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast);
+
} else if (!Literal.isIntegerLiteral()) {
return ExprError();
} else {
@@ -2074,7 +2691,10 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok) {
// Does it fit in a unsigned long long?
if (ResultVal.isIntN(LongLongSize)) {
// Does it fit in a signed long long?
- if (!Literal.isUnsigned && ResultVal[LongLongSize-1] == 0)
+ // To be compatible with MSVC, hex integer literals ending with the
+ // LL or i64 suffix are always signed in Microsoft mode.
+ if (!Literal.isUnsigned && (ResultVal[LongLongSize-1] == 0 ||
+ (getLangOptions().Microsoft && Literal.isLongLong)))
Ty = Context.LongLongTy;
else if (AllowUnsigned)
Ty = Context.UnsignedLongLongTy;
@@ -2091,7 +2711,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok) {
}
if (ResultVal.getBitWidth() != Width)
- ResultVal.trunc(Width);
+ ResultVal = ResultVal.trunc(Width);
}
Res = IntegerLiteral::Create(Context, ResultVal, Ty, Tok.getLocation());
}
@@ -2114,7 +2734,7 @@ ExprResult Sema::ActOnParenExpr(SourceLocation L,
/// See C99 6.3.2.1p[2-4] for more details.
bool Sema::CheckSizeOfAlignOfOperand(QualType exprType,
SourceLocation OpLoc,
- const SourceRange &ExprRange,
+ SourceRange ExprRange,
bool isSizeof) {
if (exprType->isDependentType())
return false;
@@ -2153,17 +2773,11 @@ bool Sema::CheckSizeOfAlignOfOperand(QualType exprType,
return true;
}
- if (Context.hasSameUnqualifiedType(exprType, Context.OverloadTy)) {
- Diag(OpLoc, diag::err_sizeof_alignof_overloaded_function_type)
- << !isSizeof << ExprRange;
- return true;
- }
-
return false;
}
-bool Sema::CheckAlignOfExpr(Expr *E, SourceLocation OpLoc,
- const SourceRange &ExprRange) {
+static bool CheckAlignOfExpr(Sema &S, Expr *E, SourceLocation OpLoc,
+ SourceRange ExprRange) {
E = E->IgnoreParens();
// alignof decl is always ok.
@@ -2175,7 +2789,7 @@ bool Sema::CheckAlignOfExpr(Expr *E, SourceLocation OpLoc,
return false;
if (E->getBitField()) {
- Diag(OpLoc, diag::err_sizeof_alignof_bitfield) << 1 << ExprRange;
+ S. Diag(OpLoc, diag::err_sizeof_alignof_bitfield) << 1 << ExprRange;
return true;
}
@@ -2185,7 +2799,7 @@ bool Sema::CheckAlignOfExpr(Expr *E, SourceLocation OpLoc,
if (isa<FieldDecl>(ME->getMemberDecl()))
return false;
- return CheckSizeOfAlignOfOperand(E->getType(), OpLoc, ExprRange, false);
+ return S.CheckSizeOfAlignOfOperand(E->getType(), OpLoc, ExprRange, false);
}
/// \brief Build a sizeof or alignof expression given a type operand.
@@ -2218,10 +2832,14 @@ Sema::CreateSizeOfAlignOfExpr(Expr *E, SourceLocation OpLoc,
if (E->isTypeDependent()) {
// Delay type-checking for type-dependent expressions.
} else if (!isSizeOf) {
- isInvalid = CheckAlignOfExpr(E, OpLoc, R);
+ isInvalid = CheckAlignOfExpr(*this, E, OpLoc, R);
} else if (E->getBitField()) { // C99 6.5.3.4p1.
Diag(OpLoc, diag::err_sizeof_alignof_bitfield) << 0;
isInvalid = true;
+ } else if (E->getType()->isPlaceholderType()) {
+ ExprResult PE = CheckPlaceholderExpr(E, OpLoc);
+ if (PE.isInvalid()) return ExprError();
+ return CreateSizeOfAlignOfExpr(PE.take(), OpLoc, isSizeOf, R);
} else {
isInvalid = CheckSizeOfAlignOfOperand(E->getType(), OpLoc, R, true);
}
@@ -2257,9 +2875,14 @@ Sema::ActOnSizeOfAlignOfExpr(SourceLocation OpLoc, bool isSizeof, bool isType,
return move(Result);
}
-QualType Sema::CheckRealImagOperand(Expr *&V, SourceLocation Loc, bool isReal) {
+static QualType CheckRealImagOperand(Sema &S, Expr *&V, SourceLocation Loc,
+ bool isReal) {
if (V->isTypeDependent())
- return Context.DependentTy;
+ return S.Context.DependentTy;
+
+ // _Real and _Imag are only l-values for normal l-values.
+ if (V->getObjectKind() != OK_Ordinary)
+ S.DefaultLvalueConversion(V);
// These operators return the element type of a complex type.
if (const ComplexType *CT = V->getType()->getAs<ComplexType>())
@@ -2269,8 +2892,16 @@ QualType Sema::CheckRealImagOperand(Expr *&V, SourceLocation Loc, bool isReal) {
if (V->getType()->isArithmeticType())
return V->getType();
+ // Test for placeholders.
+ ExprResult PR = S.CheckPlaceholderExpr(V, Loc);
+ if (PR.isInvalid()) return QualType();
+ if (PR.take() != V) {
+ V = PR.take();
+ return CheckRealImagOperand(S, V, Loc, isReal);
+ }
+
// Reject anything else.
- Diag(Loc, diag::err_realimag_invalid_type) << V->getType()
+ S.Diag(Loc, diag::err_realimag_invalid_type) << V->getType()
<< (isReal ? "__real" : "__imag");
return QualType();
}
@@ -2290,6 +2921,19 @@ Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
return BuildUnaryOp(S, OpLoc, Opc, Input);
}
+/// Expressions of certain arbitrary types are forbidden by C from
+/// having l-value type. These are:
+/// - 'void', but not qualified void
+/// - function types
+///
+/// The exact rule here is C99 6.3.2.1:
+/// An lvalue is an expression with an object type or an incomplete
+/// type other than void.
+static bool IsCForbiddenLValueType(ASTContext &C, QualType T) {
+ return ((T->isVoidType() && !T.hasQualifiers()) ||
+ T->isFunctionType());
+}
+
ExprResult
Sema::ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc) {
@@ -2303,7 +2947,9 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
if (getLangOptions().CPlusPlus &&
(LHSExp->isTypeDependent() || RHSExp->isTypeDependent())) {
return Owned(new (Context) ArraySubscriptExpr(LHSExp, RHSExp,
- Context.DependentTy, RLoc));
+ Context.DependentTy,
+ VK_LValue, OK_Ordinary,
+ RLoc));
}
if (getLangOptions().CPlusPlus &&
@@ -2330,6 +2976,8 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
DefaultFunctionArrayLvalueConversion(RHSExp);
QualType LHSTy = LHSExp->getType(), RHSTy = RHSExp->getType();
+ ExprValueKind VK = VK_LValue;
+ ExprObjectKind OK = OK_Ordinary;
// C99 6.5.2.1p2: the expression e1[e2] is by definition precisely equivalent
// to the expression *((e1)+(e2)). This means the array "Base" may actually be
@@ -2364,6 +3012,9 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
} else if (const VectorType *VTy = LHSTy->getAs<VectorType>()) {
BaseExpr = LHSExp; // vectors: V[123]
IndexExpr = RHSExp;
+ VK = LHSExp->getValueKind();
+ if (VK != VK_RValue)
+ OK = OK_VectorComponent;
// FIXME: need to deal with const...
ResultType = VTy->getElementType();
@@ -2417,7 +3068,15 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
return ExprError();
}
- if (!ResultType->isDependentType() &&
+ if (ResultType->isVoidType() && !getLangOptions().CPlusPlus) {
+ // GNU extension: subscripting on pointer to void
+ Diag(LLoc, diag::ext_gnu_void_ptr)
+ << BaseExpr->getSourceRange();
+
+ // C forbids expressions of unqualified void type from being l-values.
+ // See IsCForbiddenLValueType.
+ if (!ResultType.hasQualifiers()) VK = VK_RValue;
+ } else if (!ResultType->isDependentType() &&
RequireCompleteType(LLoc, ResultType,
PDiag(diag::err_subscript_incomplete_type)
<< BaseExpr->getSourceRange()))
@@ -2430,13 +3089,20 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
return ExprError();
}
+ assert(VK == VK_RValue || LangOpts.CPlusPlus ||
+ !IsCForbiddenLValueType(Context, ResultType));
+
return Owned(new (Context) ArraySubscriptExpr(LHSExp, RHSExp,
- ResultType, RLoc));
+ ResultType, VK, OK, RLoc));
}
-QualType Sema::
-CheckExtVectorComponent(QualType baseType, SourceLocation OpLoc,
- const IdentifierInfo *CompName,
+/// Check an ext-vector component access expression.
+///
+/// VK should be set in advance to the value kind of the base
+/// expression.
+static QualType
+CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
+ SourceLocation OpLoc, const IdentifierInfo *CompName,
SourceLocation CompLoc) {
// FIXME: Share logic with ExtVectorElementExpr::containsDuplicateElements,
// see FIXME there.
@@ -2457,25 +3123,36 @@ CheckExtVectorComponent(QualType baseType, SourceLocation OpLoc,
// indicating that it is a string of hex values to be used as vector indices.
bool HexSwizzle = *compStr == 's' || *compStr == 'S';
+ bool HasRepeated = false;
+ bool HasIndex[16] = {};
+
+ int Idx;
+
// Check that we've found one of the special components, or that the component
// names must come from the same set.
if (!strcmp(compStr, "hi") || !strcmp(compStr, "lo") ||
!strcmp(compStr, "even") || !strcmp(compStr, "odd")) {
HalvingSwizzle = true;
- } else if (vecType->getPointAccessorIdx(*compStr) != -1) {
- do
+ } else if (!HexSwizzle &&
+ (Idx = vecType->getPointAccessorIdx(*compStr)) != -1) {
+ do {
+ if (HasIndex[Idx]) HasRepeated = true;
+ HasIndex[Idx] = true;
compStr++;
- while (*compStr && vecType->getPointAccessorIdx(*compStr) != -1);
- } else if (HexSwizzle || vecType->getNumericAccessorIdx(*compStr) != -1) {
- do
+ } while (*compStr && (Idx = vecType->getPointAccessorIdx(*compStr)) != -1);
+ } else {
+ if (HexSwizzle) compStr++;
+ while ((Idx = vecType->getNumericAccessorIdx(*compStr)) != -1) {
+ if (HasIndex[Idx]) HasRepeated = true;
+ HasIndex[Idx] = true;
compStr++;
- while (*compStr && vecType->getNumericAccessorIdx(*compStr) != -1);
+ }
}
if (!HalvingSwizzle && *compStr) {
// We didn't get to the end of the string. This means the component names
// didn't come from the same set *or* we encountered an illegal name.
- Diag(OpLoc, diag::err_ext_vector_component_name_illegal)
+ S.Diag(OpLoc, diag::err_ext_vector_component_name_illegal)
<< llvm::StringRef(compStr, 1) << SourceRange(CompLoc);
return QualType();
}
@@ -2490,7 +3167,7 @@ CheckExtVectorComponent(QualType baseType, SourceLocation OpLoc,
while (*compStr) {
if (!vecType->isAccessorWithinNumElements(*compStr++)) {
- Diag(OpLoc, diag::err_ext_vector_component_exceeds_length)
+ S.Diag(OpLoc, diag::err_ext_vector_component_exceeds_length)
<< baseType << SourceRange(CompLoc);
return QualType();
}
@@ -2510,48 +3187,51 @@ CheckExtVectorComponent(QualType baseType, SourceLocation OpLoc,
if (CompSize == 1)
return vecType->getElementType();
- QualType VT = Context.getExtVectorType(vecType->getElementType(), CompSize);
+ if (HasRepeated) VK = VK_RValue;
+
+ QualType VT = S.Context.getExtVectorType(vecType->getElementType(), CompSize);
// Now look up the TypeDefDecl from the vector type. Without this,
// diagostics look bad. We want extended vector types to appear built-in.
- for (unsigned i = 0, E = ExtVectorDecls.size(); i != E; ++i) {
- if (ExtVectorDecls[i]->getUnderlyingType() == VT)
- return Context.getTypedefType(ExtVectorDecls[i]);
+ for (unsigned i = 0, E = S.ExtVectorDecls.size(); i != E; ++i) {
+ if (S.ExtVectorDecls[i]->getUnderlyingType() == VT)
+ return S.Context.getTypedefType(S.ExtVectorDecls[i]);
}
return VT; // should never get here (a typedef type should always be found).
}
-static Decl *FindGetterNameDeclFromProtocolList(const ObjCProtocolDecl*PDecl,
+static Decl *FindGetterSetterNameDeclFromProtocolList(const ObjCProtocolDecl*PDecl,
IdentifierInfo *Member,
const Selector &Sel,
ASTContext &Context) {
-
- if (ObjCPropertyDecl *PD = PDecl->FindPropertyDeclaration(Member))
- return PD;
+ if (Member)
+ if (ObjCPropertyDecl *PD = PDecl->FindPropertyDeclaration(Member))
+ return PD;
if (ObjCMethodDecl *OMD = PDecl->getInstanceMethod(Sel))
return OMD;
for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
E = PDecl->protocol_end(); I != E; ++I) {
- if (Decl *D = FindGetterNameDeclFromProtocolList(*I, Member, Sel,
- Context))
+ if (Decl *D = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel,
+ Context))
return D;
}
return 0;
}
-static Decl *FindGetterNameDecl(const ObjCObjectPointerType *QIdTy,
- IdentifierInfo *Member,
- const Selector &Sel,
- ASTContext &Context) {
+static Decl *FindGetterSetterNameDecl(const ObjCObjectPointerType *QIdTy,
+ IdentifierInfo *Member,
+ const Selector &Sel,
+ ASTContext &Context) {
// Check protocols on qualified interfaces.
Decl *GDecl = 0;
for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
E = QIdTy->qual_end(); I != E; ++I) {
- if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Member)) {
- GDecl = PD;
- break;
- }
- // Also must look for a getter name which uses property syntax.
+ if (Member)
+ if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Member)) {
+ GDecl = PD;
+ break;
+ }
+ // Also must look for a getter or setter name which uses property syntax.
if (ObjCMethodDecl *OMD = (*I)->getInstanceMethod(Sel)) {
GDecl = OMD;
break;
@@ -2561,7 +3241,8 @@ static Decl *FindGetterNameDecl(const ObjCObjectPointerType *QIdTy,
for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
E = QIdTy->qual_end(); I != E; ++I) {
// Search in the protocol-qualifier list of current protocol.
- GDecl = FindGetterNameDeclFromProtocolList(*I, Member, Sel, Context);
+ GDecl = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel,
+ Context);
if (GDecl)
return GDecl;
}
@@ -2617,14 +3298,15 @@ static void DiagnoseQualifiedMemberReference(Sema &SemaRef,
Expr *BaseExpr,
QualType BaseType,
const CXXScopeSpec &SS,
- const LookupResult &R) {
+ NamedDecl *rep,
+ const DeclarationNameInfo &nameInfo) {
// If this is an implicit member access, use a different set of
// diagnostics.
if (!BaseExpr)
- return DiagnoseInstanceReference(SemaRef, SS, R);
+ return DiagnoseInstanceReference(SemaRef, SS, rep, nameInfo);
- SemaRef.Diag(R.getNameLoc(), diag::err_qualified_member_of_unrelated)
- << SS.getRange() << R.getRepresentativeDecl() << BaseType;
+ SemaRef.Diag(nameInfo.getLoc(), diag::err_qualified_member_of_unrelated)
+ << SS.getRange() << rep << BaseType;
}
// Check whether the declarations we found through a nested-name
@@ -2673,7 +3355,9 @@ bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr,
return false;
}
- DiagnoseQualifiedMemberReference(*this, BaseExpr, BaseType, SS, R);
+ DiagnoseQualifiedMemberReference(*this, BaseExpr, BaseType, SS,
+ R.getRepresentativeDecl(),
+ R.getLookupNameInfo());
return true;
}
@@ -2846,18 +3530,12 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
// Construct an unresolved result if we in fact got an unresolved
// result.
if (R.isOverloadedResult() || R.isUnresolvableResult()) {
- bool Dependent =
- BaseExprType->isDependentType() ||
- R.isUnresolvableResult() ||
- OverloadExpr::ComputeDependence(R.begin(), R.end(), TemplateArgs);
-
// Suppress any lookup-related diagnostics; we'll do these when we
// pick a member.
R.suppressDiagnostics();
UnresolvedMemberExpr *MemExpr
- = UnresolvedMemberExpr::Create(Context, Dependent,
- R.isUnresolvableResult(),
+ = UnresolvedMemberExpr::Create(Context, R.isUnresolvableResult(),
BaseExpr, BaseExprType,
IsArrow, OpLoc,
Qualifier, SS.getRange(),
@@ -2905,58 +3583,44 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
return ExprError();
}
- if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) {
- // We may have found a field within an anonymous union or struct
- // (C++ [class.union]).
- if (cast<RecordDecl>(FD->getDeclContext())->isAnonymousStructOrUnion() &&
- !BaseType->getAs<RecordType>()->getDecl()->isAnonymousStructOrUnion())
- return BuildAnonymousStructUnionMemberReference(MemberLoc, FD,
- BaseExpr, OpLoc);
-
- // Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref]
- QualType MemberType = FD->getType();
- if (const ReferenceType *Ref = MemberType->getAs<ReferenceType>())
- MemberType = Ref->getPointeeType();
- else {
- Qualifiers BaseQuals = BaseType.getQualifiers();
- BaseQuals.removeObjCGCAttr();
- if (FD->isMutable()) BaseQuals.removeConst();
+ // Perform a property load on the base regardless of whether we
+ // actually need it for the declaration.
+ if (BaseExpr->getObjectKind() == OK_ObjCProperty)
+ ConvertPropertyForRValue(BaseExpr);
- Qualifiers MemberQuals
- = Context.getCanonicalType(MemberType).getQualifiers();
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl))
+ return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow,
+ SS, FD, FoundDecl, MemberNameInfo);
- Qualifiers Combined = BaseQuals + MemberQuals;
- if (Combined != MemberQuals)
- MemberType = Context.getQualifiedType(MemberType, Combined);
- }
-
- MarkDeclarationReferenced(MemberLoc, FD);
- if (PerformObjectMemberConversion(BaseExpr, Qualifier, FoundDecl, FD))
- return ExprError();
- return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
- FD, FoundDecl, MemberNameInfo,
- MemberType));
- }
+ if (IndirectFieldDecl *FD = dyn_cast<IndirectFieldDecl>(MemberDecl))
+ // We may have found a field within an anonymous union or struct
+ // (C++ [class.union]).
+ return BuildAnonymousStructUnionMemberReference(SS, MemberLoc, FD,
+ BaseExpr, OpLoc);
if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) {
MarkDeclarationReferenced(MemberLoc, Var);
return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
Var, FoundDecl, MemberNameInfo,
- Var->getType().getNonReferenceType()));
+ Var->getType().getNonReferenceType(),
+ VK_LValue, OK_Ordinary));
}
- if (FunctionDecl *MemberFn = dyn_cast<FunctionDecl>(MemberDecl)) {
+ if (CXXMethodDecl *MemberFn = dyn_cast<CXXMethodDecl>(MemberDecl)) {
MarkDeclarationReferenced(MemberLoc, MemberDecl);
return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
MemberFn, FoundDecl, MemberNameInfo,
- MemberFn->getType()));
+ MemberFn->getType(),
+ MemberFn->isInstance() ? VK_RValue : VK_LValue,
+ OK_Ordinary));
}
+ assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?");
if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) {
MarkDeclarationReferenced(MemberLoc, MemberDecl);
return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
Enum, FoundDecl, MemberNameInfo,
- Enum->getType()));
+ Enum->getType(), VK_RValue, OK_Ordinary));
}
Owned(BaseExpr);
@@ -2975,6 +3639,38 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
return ExprError();
}
+/// Given that normal member access failed on the given expression,
+/// and given that the expression's type involves builtin-id or
+/// builtin-Class, decide whether substituting in the redefinition
+/// types would be profitable. The redefinition type is whatever
+/// this translation unit tried to typedef to id/Class; we store
+/// it to the side and then re-use it in places like this.
+static bool ShouldTryAgainWithRedefinitionType(Sema &S, Expr *&base) {
+ const ObjCObjectPointerType *opty
+ = base->getType()->getAs<ObjCObjectPointerType>();
+ if (!opty) return false;
+
+ const ObjCObjectType *ty = opty->getObjectType();
+
+ QualType redef;
+ if (ty->isObjCId()) {
+ redef = S.Context.ObjCIdRedefinitionType;
+ } else if (ty->isObjCClass()) {
+ redef = S.Context.ObjCClassRedefinitionType;
+ } else {
+ return false;
+ }
+
+ // Do the substitution as long as the redefinition type isn't just a
+ // possibly-qualified pointer to builtin-id or builtin-Class again.
+ opty = redef->getAs<ObjCObjectPointerType>();
+ if (opty && !opty->getObjectType()->getInterface() != 0)
+ return false;
+
+ S.ImpCastExprToType(base, redef, CK_BitCast);
+ return true;
+}
+
/// Look up the given member of the given non-type-dependent
/// expression. This can return in one of two ways:
/// * If it returns a sentinel null-but-valid result, the caller will
@@ -2994,6 +3690,7 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
// Perform default conversions.
DefaultFunctionArrayConversion(BaseExpr);
+ if (IsArrow) DefaultLvalueConversion(BaseExpr);
QualType BaseType = BaseExpr->getType();
assert(!BaseType->isDependentType());
@@ -3001,96 +3698,235 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
DeclarationName MemberName = R.getLookupName();
SourceLocation MemberLoc = R.getNameLoc();
- // If the user is trying to apply -> or . to a function pointer
- // type, it's probably because they forgot parentheses to call that
- // function. Suggest the addition of those parentheses, build the
- // call, and continue on.
- if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
- if (const FunctionProtoType *Fun
- = Ptr->getPointeeType()->getAs<FunctionProtoType>()) {
- QualType ResultTy = Fun->getResultType();
- if (Fun->getNumArgs() == 0 &&
- ((!IsArrow && ResultTy->isRecordType()) ||
- (IsArrow && ResultTy->isPointerType() &&
- ResultTy->getAs<PointerType>()->getPointeeType()
- ->isRecordType()))) {
- SourceLocation Loc = PP.getLocForEndOfToken(BaseExpr->getLocEnd());
- Diag(Loc, diag::err_member_reference_needs_call)
- << QualType(Fun, 0)
- << FixItHint::CreateInsertion(Loc, "()");
+ // For later type-checking purposes, turn arrow accesses into dot
+ // accesses. The only access type we support that doesn't follow
+ // the C equivalence "a->b === (*a).b" is ObjC property accesses,
+ // and those never use arrows, so this is unaffected.
+ if (IsArrow) {
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (const ObjCObjectPointerType *Ptr
+ = BaseType->getAs<ObjCObjectPointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (BaseType->isRecordType()) {
+ // Recover from arrow accesses to records, e.g.:
+ // struct MyRecord foo;
+ // foo->bar
+ // This is actually well-formed in C++ if MyRecord has an
+ // overloaded operator->, but that should have been dealt with
+ // by now.
+ Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << BaseType << int(IsArrow) << BaseExpr->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ IsArrow = false;
+ } else {
+ Diag(MemberLoc, diag::err_typecheck_member_reference_arrow)
+ << BaseType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
+ }
- ExprResult NewBase
- = ActOnCallExpr(0, BaseExpr, Loc,
- MultiExprArg(*this, 0, 0), 0, Loc);
- BaseExpr = 0;
- if (NewBase.isInvalid())
- return ExprError();
+ // Handle field access to simple records.
+ if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
+ if (LookupMemberExprInRecord(*this, R, BaseExpr->getSourceRange(),
+ RTy, OpLoc, SS, HasTemplateArgs))
+ return ExprError();
- BaseExpr = NewBase.takeAs<Expr>();
- DefaultFunctionArrayConversion(BaseExpr);
- BaseType = BaseExpr->getType();
- }
- }
+ // Returning valid-but-null is how we indicate to the caller that
+ // the lookup result was filled in.
+ return Owned((Expr*) 0);
}
- // If this is an Objective-C pseudo-builtin and a definition is provided then
- // use that.
- if (BaseType->isObjCIdType()) {
- if (IsArrow) {
- // Handle the following exceptional case PObj->isa.
- if (const ObjCObjectPointerType *OPT =
- BaseType->getAs<ObjCObjectPointerType>()) {
- if (OPT->getObjectType()->isObjCId() &&
- MemberName.getAsIdentifierInfo()->isStr("isa"))
- return Owned(new (Context) ObjCIsaExpr(BaseExpr, true, MemberLoc,
- Context.getObjCClassType()));
- }
+ // Handle ivar access to Objective-C objects.
+ if (const ObjCObjectType *OTy = BaseType->getAs<ObjCObjectType>()) {
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+
+ // There are three cases for the base type:
+ // - builtin id (qualified or unqualified)
+ // - builtin Class (qualified or unqualified)
+ // - an interface
+ ObjCInterfaceDecl *IDecl = OTy->getInterface();
+ if (!IDecl) {
+ // There's an implicit 'isa' ivar on all objects.
+ // But we only actually find it this way on objects of type 'id',
+ // apparently.
+ if (OTy->isObjCId() && Member->isStr("isa"))
+ return Owned(new (Context) ObjCIsaExpr(BaseExpr, IsArrow, MemberLoc,
+ Context.getObjCClassType()));
+
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ goto fail;
}
- // We have an 'id' type. Rather than fall through, we check if this
- // is a reference to 'isa'.
- if (BaseType != Context.ObjCIdRedefinitionType) {
- BaseType = Context.ObjCIdRedefinitionType;
- ImpCastExprToType(BaseExpr, BaseType, CK_BitCast);
+
+ ObjCInterfaceDecl *ClassDeclared;
+ ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared);
+
+ if (!IV) {
+ // Attempt to correct for typos in ivar names.
+ LookupResult Res(*this, R.getLookupName(), R.getNameLoc(),
+ LookupMemberName);
+ if (CorrectTypo(Res, 0, 0, IDecl, false,
+ IsArrow ? CTC_ObjCIvarLookup
+ : CTC_ObjCPropertyLookup) &&
+ (IV = Res.getAsSingle<ObjCIvarDecl>())) {
+ Diag(R.getNameLoc(),
+ diag::err_typecheck_member_reference_ivar_suggest)
+ << IDecl->getDeclName() << MemberName << IV->getDeclName()
+ << FixItHint::CreateReplacement(R.getNameLoc(),
+ IV->getNameAsString());
+ Diag(IV->getLocation(), diag::note_previous_decl)
+ << IV->getDeclName();
+ } else {
+ Res.clear();
+ Res.setLookupName(Member);
+
+ Diag(MemberLoc, diag::err_typecheck_member_reference_ivar)
+ << IDecl->getDeclName() << MemberName
+ << BaseExpr->getSourceRange();
+ return ExprError();
+ }
}
- }
- // If this is an Objective-C pseudo-builtin and a definition is provided then
- // use that.
- if (Context.isObjCSelType(BaseType)) {
- // We have an 'SEL' type. Rather than fall through, we check if this
- // is a reference to 'sel_id'.
- if (BaseType != Context.ObjCSelRedefinitionType) {
- BaseType = Context.ObjCSelRedefinitionType;
- ImpCastExprToType(BaseExpr, BaseType, CK_BitCast);
+ // If the decl being referenced had an error, return an error for this
+ // sub-expr without emitting another error, in order to avoid cascading
+ // error cases.
+ if (IV->isInvalidDecl())
+ return ExprError();
+
+ // Check whether we can reference this field.
+ if (DiagnoseUseOfDecl(IV, MemberLoc))
+ return ExprError();
+ if (IV->getAccessControl() != ObjCIvarDecl::Public &&
+ IV->getAccessControl() != ObjCIvarDecl::Package) {
+ ObjCInterfaceDecl *ClassOfMethodDecl = 0;
+ if (ObjCMethodDecl *MD = getCurMethodDecl())
+ ClassOfMethodDecl = MD->getClassInterface();
+ else if (ObjCImpDecl && getCurFunctionDecl()) {
+ // Case of a c-function declared inside an objc implementation.
+ // FIXME: For a c-style function nested inside an objc implementation
+ // class, there is no implementation context available, so we pass
+ // down the context as argument to this routine. Ideally, this context
+ // need be passed down in the AST node and somehow calculated from the
+ // AST for a function decl.
+ if (ObjCImplementationDecl *IMPD =
+ dyn_cast<ObjCImplementationDecl>(ObjCImpDecl))
+ ClassOfMethodDecl = IMPD->getClassInterface();
+ else if (ObjCCategoryImplDecl* CatImplClass =
+ dyn_cast<ObjCCategoryImplDecl>(ObjCImpDecl))
+ ClassOfMethodDecl = CatImplClass->getClassInterface();
+ }
+
+ if (IV->getAccessControl() == ObjCIvarDecl::Private) {
+ if (ClassDeclared != IDecl ||
+ ClassOfMethodDecl != ClassDeclared)
+ Diag(MemberLoc, diag::error_private_ivar_access)
+ << IV->getDeclName();
+ } else if (!IDecl->isSuperClassOf(ClassOfMethodDecl))
+ // @protected
+ Diag(MemberLoc, diag::error_protected_ivar_access)
+ << IV->getDeclName();
}
+
+ return Owned(new (Context) ObjCIvarRefExpr(IV, IV->getType(),
+ MemberLoc, BaseExpr,
+ IsArrow));
}
- assert(!BaseType.isNull() && "no type for member expression");
+ // Objective-C property access.
+ const ObjCObjectPointerType *OPT;
+ if (!IsArrow && (OPT = BaseType->getAs<ObjCObjectPointerType>())) {
+ // This actually uses the base as an r-value.
+ DefaultLvalueConversion(BaseExpr);
+ assert(Context.hasSameUnqualifiedType(BaseType, BaseExpr->getType()));
- // Handle properties on ObjC 'Class' types.
- if (!IsArrow && BaseType->isObjCClassType()) {
- // Also must look for a getter name which uses property syntax.
IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
- Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
- if (ObjCMethodDecl *MD = getCurMethodDecl()) {
+
+ const ObjCObjectType *OT = OPT->getObjectType();
+
+ // id, with and without qualifiers.
+ if (OT->isObjCId()) {
+ // Check protocols on qualified interfaces.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
+ if (Decl *PMDecl = FindGetterSetterNameDecl(OPT, Member, Sel, Context)) {
+ if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(PMDecl)) {
+ // Check the use of this declaration
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, PD->getType(),
+ VK_LValue,
+ OK_ObjCProperty,
+ MemberLoc,
+ BaseExpr));
+ }
+
+ if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(PMDecl)) {
+ // Check the use of this method.
+ if (DiagnoseUseOfDecl(OMD, MemberLoc))
+ return ExprError();
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), Member);
+ ObjCMethodDecl *SMD = 0;
+ if (Decl *SDecl = FindGetterSetterNameDecl(OPT, /*Property id*/0,
+ SetterSel, Context))
+ SMD = dyn_cast<ObjCMethodDecl>(SDecl);
+ QualType PType = OMD->getSendResultType();
+
+ ExprValueKind VK = VK_LValue;
+ if (!getLangOptions().CPlusPlus &&
+ IsCForbiddenLValueType(Context, PType))
+ VK = VK_RValue;
+ ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
+
+ return Owned(new (Context) ObjCPropertyRefExpr(OMD, SMD, PType,
+ VK, OK,
+ MemberLoc, BaseExpr));
+ }
+ }
+
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ return ExprError(Diag(MemberLoc, diag::err_property_not_found)
+ << MemberName << BaseType);
+ }
+
+ // 'Class', unqualified only.
+ if (OT->isObjCClass()) {
+ // Only works in a method declaration (??!).
+ ObjCMethodDecl *MD = getCurMethodDecl();
+ if (!MD) {
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ goto fail;
+ }
+
+ // Also must look for a getter name which uses property syntax.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
ObjCInterfaceDecl *IFace = MD->getClassInterface();
ObjCMethodDecl *Getter;
- // FIXME: need to also look locally in the implementation.
if ((Getter = IFace->lookupClassMethod(Sel))) {
// Check the use of this method.
if (DiagnoseUseOfDecl(Getter, MemberLoc))
return ExprError();
- }
+ } else
+ Getter = IFace->lookupPrivateMethod(Sel, false);
// If we found a getter then this may be a valid dot-reference, we
// will look for the matching setter, in case it is needed.
Selector SetterSel =
- SelectorTable::constructSetterName(PP.getIdentifierTable(),
- PP.getSelectorTable(), Member);
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), Member);
ObjCMethodDecl *Setter = IFace->lookupClassMethod(SetterSel);
if (!Setter) {
// If this reference is in an @implementation, also check for 'private'
// methods.
- Setter = IFace->lookupPrivateInstanceMethod(SetterSel);
+ Setter = IFace->lookupPrivateMethod(SetterSel, false);
}
// Look through local category implementations associated with the class.
if (!Setter)
@@ -3102,49 +3938,73 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
if (Getter || Setter) {
QualType PType;
- if (Getter)
+ ExprValueKind VK = VK_LValue;
+ if (Getter) {
PType = Getter->getSendResultType();
- else
+ if (!getLangOptions().CPlusPlus &&
+ IsCForbiddenLValueType(Context, PType))
+ VK = VK_RValue;
+ } else {
// Get the expression type from Setter's incoming parameter.
PType = (*(Setter->param_end() -1))->getType();
+ }
+ ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
+
// FIXME: we must check that the setter has property type.
- return Owned(new (Context) ObjCImplicitSetterGetterRefExpr(Getter,
- PType,
- Setter, MemberLoc, BaseExpr));
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ PType, VK, OK,
+ MemberLoc, BaseExpr));
}
+
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
return ExprError(Diag(MemberLoc, diag::err_property_not_found)
- << MemberName << BaseType);
+ << MemberName << BaseType);
}
- }
- if (BaseType->isObjCClassType() &&
- BaseType != Context.ObjCClassRedefinitionType) {
- BaseType = Context.ObjCClassRedefinitionType;
- ImpCastExprToType(BaseExpr, BaseType, CK_BitCast);
+ // Normal property access.
+ return HandleExprPropertyRefExpr(OPT, BaseExpr, MemberName, MemberLoc,
+ SourceLocation(), QualType(), false);
}
- if (IsArrow) {
- if (const PointerType *PT = BaseType->getAs<PointerType>())
- BaseType = PT->getPointeeType();
- else if (BaseType->isObjCObjectPointerType())
- ;
- else if (BaseType->isRecordType()) {
- // Recover from arrow accesses to records, e.g.:
- // struct MyRecord foo;
- // foo->bar
- // This is actually well-formed in C++ if MyRecord has an
- // overloaded operator->, but that should have been dealt with
- // by now.
- Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
- << BaseType << int(IsArrow) << BaseExpr->getSourceRange()
- << FixItHint::CreateReplacement(OpLoc, ".");
- IsArrow = false;
- } else {
- Diag(MemberLoc, diag::err_typecheck_member_reference_arrow)
- << BaseType << BaseExpr->getSourceRange();
+ // Handle 'field access' to vectors, such as 'V.xx'.
+ if (BaseType->isExtVectorType()) {
+ // FIXME: this expr should store IsArrow.
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+ ExprValueKind VK = (IsArrow ? VK_LValue : BaseExpr->getValueKind());
+ QualType ret = CheckExtVectorComponent(*this, BaseType, VK, OpLoc,
+ Member, MemberLoc);
+ if (ret.isNull())
return ExprError();
- }
- } else {
+
+ return Owned(new (Context) ExtVectorElementExpr(ret, VK, BaseExpr,
+ *Member, MemberLoc));
+ }
+
+ // Adjust builtin-sel to the appropriate redefinition type if that's
+ // not just a pointer to builtin-sel again.
+ if (IsArrow &&
+ BaseType->isSpecificBuiltinType(BuiltinType::ObjCSel) &&
+ !Context.ObjCSelRedefinitionType->isObjCSelType()) {
+ ImpCastExprToType(BaseExpr, Context.ObjCSelRedefinitionType, CK_BitCast);
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+
+ // Failure cases.
+ fail:
+
+ // There's a possible road to recovery for function types.
+ const FunctionType *Fun = 0;
+ SourceLocation ParenInsertionLoc =
+ PP.getLocForEndOfToken(BaseExpr->getLocEnd());
+
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
+ if ((Fun = Ptr->getPointeeType()->getAs<FunctionType>())) {
+ // fall out, handled below.
+
// Recover from dot accesses to pointers, e.g.:
// type *foo;
// foo.bar
@@ -3152,165 +4012,113 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
// - 'type' is an Objective C type
// - 'bar' is a pseudo-destructor name which happens to refer to
// the appropriate pointer type
- if (MemberName.getNameKind() != DeclarationName::CXXDestructorName) {
- const PointerType *PT = BaseType->getAs<PointerType>();
- if (PT && PT->getPointeeType()->isRecordType()) {
- Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
- << BaseType << int(IsArrow) << BaseExpr->getSourceRange()
- << FixItHint::CreateReplacement(OpLoc, "->");
- BaseType = PT->getPointeeType();
- IsArrow = true;
- }
- }
- }
+ } else if (!IsArrow && Ptr->getPointeeType()->isRecordType() &&
+ MemberName.getNameKind() != DeclarationName::CXXDestructorName) {
+ Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << BaseType << int(IsArrow) << BaseExpr->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, "->");
- // Handle field access to simple records.
- if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
- if (LookupMemberExprInRecord(*this, R, BaseExpr->getSourceRange(),
- RTy, OpLoc, SS, HasTemplateArgs))
- return ExprError();
- return Owned((Expr*) 0);
+ // Recurse as an -> access.
+ IsArrow = true;
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+ } else {
+ Fun = BaseType->getAs<FunctionType>();
}
- // Handle access to Objective-C instance variables, such as "Obj->ivar" and
- // (*Obj).ivar.
- if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
- (!IsArrow && BaseType->isObjCObjectType())) {
- const ObjCObjectPointerType *OPT = BaseType->getAs<ObjCObjectPointerType>();
- ObjCInterfaceDecl *IDecl =
- OPT ? OPT->getInterfaceDecl()
- : BaseType->getAs<ObjCObjectType>()->getInterface();
- if (IDecl) {
- IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
-
- ObjCInterfaceDecl *ClassDeclared;
- ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared);
-
- if (!IV) {
- // Attempt to correct for typos in ivar names.
- LookupResult Res(*this, R.getLookupName(), R.getNameLoc(),
- LookupMemberName);
- if (CorrectTypo(Res, 0, 0, IDecl, false, CTC_MemberLookup) &&
- (IV = Res.getAsSingle<ObjCIvarDecl>())) {
- Diag(R.getNameLoc(),
- diag::err_typecheck_member_reference_ivar_suggest)
- << IDecl->getDeclName() << MemberName << IV->getDeclName()
- << FixItHint::CreateReplacement(R.getNameLoc(),
- IV->getNameAsString());
- Diag(IV->getLocation(), diag::note_previous_decl)
- << IV->getDeclName();
- } else {
- Res.clear();
- Res.setLookupName(Member);
+ // If the user is trying to apply -> or . to a function pointer
+ // type, it's probably because they forgot parentheses to call that
+ // function. Suggest the addition of those parentheses, build the
+ // call, and continue on.
+ if (Fun || BaseType == Context.OverloadTy) {
+ bool TryCall;
+ if (BaseType == Context.OverloadTy) {
+ // Plunder the overload set for something that would make the member
+ // expression valid.
+ const OverloadExpr *Overloads = cast<OverloadExpr>(BaseExpr);
+ UnresolvedSet<4> CandidateOverloads;
+ bool HasZeroArgCandidateOverload = false;
+ for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
+ DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
+ const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*it);
+ QualType ResultTy = OverloadDecl->getResultType();
+ if ((!IsArrow && ResultTy->isRecordType()) ||
+ (IsArrow && ResultTy->isPointerType() &&
+ ResultTy->getPointeeType()->isRecordType())) {
+ CandidateOverloads.addDecl(*it);
+ if (OverloadDecl->getNumParams() == 0) {
+ HasZeroArgCandidateOverload = true;
+ }
}
}
-
- if (IV) {
- // If the decl being referenced had an error, return an error for this
- // sub-expr without emitting another error, in order to avoid cascading
- // error cases.
- if (IV->isInvalidDecl())
- return ExprError();
-
- // Check whether we can reference this field.
- if (DiagnoseUseOfDecl(IV, MemberLoc))
- return ExprError();
- if (IV->getAccessControl() != ObjCIvarDecl::Public &&
- IV->getAccessControl() != ObjCIvarDecl::Package) {
- ObjCInterfaceDecl *ClassOfMethodDecl = 0;
- if (ObjCMethodDecl *MD = getCurMethodDecl())
- ClassOfMethodDecl = MD->getClassInterface();
- else if (ObjCImpDecl && getCurFunctionDecl()) {
- // Case of a c-function declared inside an objc implementation.
- // FIXME: For a c-style function nested inside an objc implementation
- // class, there is no implementation context available, so we pass
- // down the context as argument to this routine. Ideally, this context
- // need be passed down in the AST node and somehow calculated from the
- // AST for a function decl.
- if (ObjCImplementationDecl *IMPD =
- dyn_cast<ObjCImplementationDecl>(ObjCImpDecl))
- ClassOfMethodDecl = IMPD->getClassInterface();
- else if (ObjCCategoryImplDecl* CatImplClass =
- dyn_cast<ObjCCategoryImplDecl>(ObjCImpDecl))
- ClassOfMethodDecl = CatImplClass->getClassInterface();
+ if (HasZeroArgCandidateOverload && CandidateOverloads.size() == 1) {
+ // We have one reasonable overload, and there's only one way to call it,
+ // so emit a fixit and try to recover
+ Diag(ParenInsertionLoc, diag::err_member_reference_needs_call)
+ << 1
+ << BaseExpr->getSourceRange()
+ << FixItHint::CreateInsertion(ParenInsertionLoc, "()");
+ TryCall = true;
+ } else {
+ Diag(BaseExpr->getExprLoc(), diag::err_member_reference_needs_call)
+ << 0
+ << BaseExpr->getSourceRange();
+ int CandidateOverloadCount = CandidateOverloads.size();
+ int I;
+ for (I = 0; I < CandidateOverloadCount; ++I) {
+ // FIXME: Magic number for max shown overloads stolen from
+ // OverloadCandidateSet::NoteCandidates.
+ if (I >= 4 && Diags.getShowOverloads() == Diagnostic::Ovl_Best) {
+ break;
}
-
- if (IV->getAccessControl() == ObjCIvarDecl::Private) {
- if (ClassDeclared != IDecl ||
- ClassOfMethodDecl != ClassDeclared)
- Diag(MemberLoc, diag::error_private_ivar_access)
- << IV->getDeclName();
- } else if (!IDecl->isSuperClassOf(ClassOfMethodDecl))
- // @protected
- Diag(MemberLoc, diag::error_protected_ivar_access)
- << IV->getDeclName();
+ Diag(CandidateOverloads[I].getDecl()->getSourceRange().getBegin(),
+ diag::note_member_ref_possible_intended_overload);
+ }
+ if (I != CandidateOverloadCount) {
+ Diag(BaseExpr->getExprLoc(), diag::note_ovl_too_many_candidates)
+ << int(CandidateOverloadCount - I);
}
+ return ExprError();
+ }
+ } else {
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Fun)) {
+ TryCall = (FPT->getNumArgs() == 0);
+ } else {
+ TryCall = true;
+ }
- return Owned(new (Context) ObjCIvarRefExpr(IV, IV->getType(),
- MemberLoc, BaseExpr,
- IsArrow));
+ if (TryCall) {
+ QualType ResultTy = Fun->getResultType();
+ TryCall = (!IsArrow && ResultTy->isRecordType()) ||
+ (IsArrow && ResultTy->isPointerType() &&
+ ResultTy->getAs<PointerType>()->getPointeeType()->isRecordType());
}
- return ExprError(Diag(MemberLoc, diag::err_typecheck_member_reference_ivar)
- << IDecl->getDeclName() << MemberName
- << BaseExpr->getSourceRange());
}
- }
- // Handle properties on 'id' and qualified "id".
- if (!IsArrow && (BaseType->isObjCIdType() ||
- BaseType->isObjCQualifiedIdType())) {
- const ObjCObjectPointerType *QIdTy = BaseType->getAs<ObjCObjectPointerType>();
- IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
-
- // Check protocols on qualified interfaces.
- Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
- if (Decl *PMDecl = FindGetterNameDecl(QIdTy, Member, Sel, Context)) {
- if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(PMDecl)) {
- // Check the use of this declaration
- if (DiagnoseUseOfDecl(PD, MemberLoc))
- return ExprError();
- return Owned(new (Context) ObjCPropertyRefExpr(PD, PD->getType(),
- MemberLoc, BaseExpr));
- }
- if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(PMDecl)) {
- // Check the use of this method.
- if (DiagnoseUseOfDecl(OMD, MemberLoc))
- return ExprError();
- return Owned(ObjCMessageExpr::Create(Context,
- OMD->getSendResultType(),
- OpLoc, BaseExpr, Sel,
- OMD, NULL, 0, MemberLoc));
+ if (TryCall) {
+ if (Fun) {
+ Diag(BaseExpr->getExprLoc(),
+ diag::err_member_reference_needs_call_zero_arg)
+ << QualType(Fun, 0)
+ << FixItHint::CreateInsertion(ParenInsertionLoc, "()");
}
- }
- return ExprError(Diag(MemberLoc, diag::err_property_not_found)
- << MemberName << BaseType);
- }
+ ExprResult NewBase
+ = ActOnCallExpr(0, BaseExpr, ParenInsertionLoc,
+ MultiExprArg(*this, 0, 0), ParenInsertionLoc);
+ if (NewBase.isInvalid())
+ return ExprError();
+ BaseExpr = NewBase.takeAs<Expr>();
- // Handle Objective-C property access, which is "Obj.property" where Obj is a
- // pointer to a (potentially qualified) interface type.
- if (!IsArrow)
- if (const ObjCObjectPointerType *OPT =
- BaseType->getAsObjCInterfacePointerType())
- return HandleExprPropertyRefExpr(OPT, BaseExpr, MemberName, MemberLoc);
- // Handle the following exceptional case (*Obj).isa.
- if (!IsArrow &&
- BaseType->isObjCObjectType() &&
- BaseType->getAs<ObjCObjectType>()->isObjCId() &&
- MemberName.getAsIdentifierInfo()->isStr("isa"))
- return Owned(new (Context) ObjCIsaExpr(BaseExpr, false, MemberLoc,
- Context.getObjCClassType()));
+ DefaultFunctionArrayConversion(BaseExpr);
+ BaseType = BaseExpr->getType();
- // Handle 'field access' to vectors, such as 'V.xx'.
- if (BaseType->isExtVectorType()) {
- IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
- QualType ret = CheckExtVectorComponent(BaseType, OpLoc, Member, MemberLoc);
- if (ret.isNull())
- return ExprError();
- return Owned(new (Context) ExtVectorElementExpr(ret, BaseExpr, *Member,
- MemberLoc));
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
}
Diag(MemberLoc, diag::err_typecheck_member_reference_struct_union)
@@ -3333,15 +4141,21 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
/// this is an ugly hack around the fact that ObjC @implementations
/// aren't properly put in the context chain
ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
- SourceLocation OpLoc,
- tok::TokenKind OpKind,
- CXXScopeSpec &SS,
- UnqualifiedId &Id,
- Decl *ObjCImpDecl,
- bool HasTrailingLParen) {
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Id,
+ Decl *ObjCImpDecl,
+ bool HasTrailingLParen) {
if (SS.isSet() && SS.isInvalid())
return ExprError();
+ // Warn about the explicit constructor calls Microsoft extension.
+ if (getLangOptions().Microsoft &&
+ Id.getKind() == UnqualifiedId::IK_ConstructorName)
+ Diag(Id.getSourceRange().getBegin(),
+ diag::ext_ms_explicit_constructor_call);
+
TemplateArgumentListInfo TemplateArgsBuffer;
// Decompose the name into its component parts.
@@ -3399,60 +4213,76 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
}
ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
- FunctionDecl *FD,
- ParmVarDecl *Param) {
+ FunctionDecl *FD,
+ ParmVarDecl *Param) {
if (Param->hasUnparsedDefaultArg()) {
- Diag (CallLoc,
- diag::err_use_of_default_argument_to_function_declared_later) <<
+ Diag(CallLoc,
+ diag::err_use_of_default_argument_to_function_declared_later) <<
FD << cast<CXXRecordDecl>(FD->getDeclContext())->getDeclName();
Diag(UnparsedDefaultArgLocs[Param],
- diag::note_default_argument_declared_here);
- } else {
- if (Param->hasUninstantiatedDefaultArg()) {
- Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
-
- // Instantiate the expression.
- MultiLevelTemplateArgumentList ArgList
- = getTemplateInstantiationArgs(FD, 0, /*RelativeToPrimary=*/true);
-
- std::pair<const TemplateArgument *, unsigned> Innermost
- = ArgList.getInnermost();
- InstantiatingTemplate Inst(*this, CallLoc, Param, Innermost.first,
- Innermost.second);
-
- ExprResult Result = SubstExpr(UninstExpr, ArgList);
- if (Result.isInvalid())
- return ExprError();
+ diag::note_default_argument_declared_here);
+ return ExprError();
+ }
+
+ if (Param->hasUninstantiatedDefaultArg()) {
+ Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
+
+ // Instantiate the expression.
+ MultiLevelTemplateArgumentList ArgList
+ = getTemplateInstantiationArgs(FD, 0, /*RelativeToPrimary=*/true);
+
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = ArgList.getInnermost();
+ InstantiatingTemplate Inst(*this, CallLoc, Param, Innermost.first,
+ Innermost.second);
+
+ ExprResult Result;
+ {
+ // C++ [dcl.fct.default]p5:
+ // The names in the [default argument] expression are bound, and
+ // the semantic constraints are checked, at the point where the
+ // default argument expression appears.
+ ContextRAII SavedContext(*this, FD);
+ Result = SubstExpr(UninstExpr, ArgList);
+ }
+ if (Result.isInvalid())
+ return ExprError();
- // Check the expression as an initializer for the parameter.
- InitializedEntity Entity
- = InitializedEntity::InitializeParameter(Param);
- InitializationKind Kind
- = InitializationKind::CreateCopy(Param->getLocation(),
- /*FIXME:EqualLoc*/UninstExpr->getSourceRange().getBegin());
- Expr *ResultE = Result.takeAs<Expr>();
-
- InitializationSequence InitSeq(*this, Entity, Kind, &ResultE, 1);
- Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, &ResultE, 1));
- if (Result.isInvalid())
- return ExprError();
+ // Check the expression as an initializer for the parameter.
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context, Param);
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Param->getLocation(),
+ /*FIXME:EqualLoc*/UninstExpr->getSourceRange().getBegin());
+ Expr *ResultE = Result.takeAs<Expr>();
+
+ InitializationSequence InitSeq(*this, Entity, Kind, &ResultE, 1);
+ Result = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(*this, &ResultE, 1));
+ if (Result.isInvalid())
+ return ExprError();
- // Build the default argument expression.
- return Owned(CXXDefaultArgExpr::Create(Context, CallLoc, Param,
- Result.takeAs<Expr>()));
- }
+ // Build the default argument expression.
+ return Owned(CXXDefaultArgExpr::Create(Context, CallLoc, Param,
+ Result.takeAs<Expr>()));
+ }
- // If the default expression creates temporaries, we need to
- // push them to the current stack of expression temporaries so they'll
- // be properly destroyed.
- // FIXME: We should really be rebuilding the default argument with new
- // bound temporaries; see the comment in PR5810.
- for (unsigned i = 0, e = Param->getNumDefaultArgTemporaries(); i != e; ++i)
- ExprTemporaries.push_back(Param->getDefaultArgTemporary(i));
+ // If the default expression creates temporaries, we need to
+ // push them to the current stack of expression temporaries so they'll
+ // be properly destroyed.
+ // FIXME: We should really be rebuilding the default argument with new
+ // bound temporaries; see the comment in PR5810.
+ for (unsigned i = 0, e = Param->getNumDefaultArgTemporaries(); i != e; ++i) {
+ CXXTemporary *Temporary = Param->getDefaultArgTemporary(i);
+ MarkDeclarationReferenced(Param->getDefaultArg()->getLocStart(),
+ const_cast<CXXDestructorDecl*>(Temporary->getDestructor()));
+ ExprTemporaries.push_back(Temporary);
}
- // We already type-checked the argument, so we know it works.
+ // We already type-checked the argument, so we know it works.
+ // Just mark all of the declarations in this potentially-evaluated expression
+ // as being "referenced".
+ MarkDeclarationsReferencedInExpr(Param->getDefaultArg());
return Owned(CXXDefaultArgExpr::Create(Context, CallLoc, Param));
}
@@ -3549,13 +4379,12 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
if (FDecl && i < FDecl->getNumParams())
Param = FDecl->getParamDecl(i);
-
InitializedEntity Entity =
- Param? InitializedEntity::InitializeParameter(Param)
- : InitializedEntity::InitializeParameter(ProtoArgType);
+ Param? InitializedEntity::InitializeParameter(Context, Param)
+ : InitializedEntity::InitializeParameter(Context, ProtoArgType);
ExprResult ArgE = PerformCopyInitialization(Entity,
- SourceLocation(),
- Owned(Arg));
+ SourceLocation(),
+ Owned(Arg));
if (ArgE.isInvalid())
return true;
@@ -3590,8 +4419,8 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
/// locations.
ExprResult
Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
- MultiExprArg args,
- SourceLocation *CommaLocs, SourceLocation RParenLoc) {
+ MultiExprArg args, SourceLocation RParenLoc,
+ Expr *ExecConfig) {
unsigned NumArgs = args.size();
// Since this might be a postfix expression, get rid of ParenListExprs.
@@ -3615,7 +4444,7 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
}
return Owned(new (Context) CallExpr(Context, Fn, 0, 0, Context.VoidTy,
- RParenLoc));
+ VK_RValue, RParenLoc));
}
// Determine whether this is a dependent call inside a C++ template,
@@ -3628,14 +4457,22 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
else if (Expr::hasAnyTypeDependentArguments(Args, NumArgs))
Dependent = true;
- if (Dependent)
- return Owned(new (Context) CallExpr(Context, Fn, Args, NumArgs,
- Context.DependentTy, RParenLoc));
+ if (Dependent) {
+ if (ExecConfig) {
+ return Owned(new (Context) CUDAKernelCallExpr(
+ Context, Fn, cast<CallExpr>(ExecConfig), Args, NumArgs,
+ Context.DependentTy, VK_RValue, RParenLoc));
+ } else {
+ return Owned(new (Context) CallExpr(Context, Fn, Args, NumArgs,
+ Context.DependentTy, VK_RValue,
+ RParenLoc));
+ }
+ }
// Determine whether this is a call to an object (C++ [over.call.object]).
if (Fn->getType()->isRecordType())
return Owned(BuildCallToObjectOfClassType(S, Fn, LParenLoc, Args, NumArgs,
- CommaLocs, RParenLoc));
+ RParenLoc));
Expr *NakedFn = Fn->IgnoreParens();
@@ -3652,7 +4489,7 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
(void)MemE;
return BuildCallToMemberFunction(S, Fn, LParenLoc, Args, NumArgs,
- CommaLocs, RParenLoc);
+ RParenLoc);
}
// Determine whether this is a call to a member function.
@@ -3660,7 +4497,7 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
NamedDecl *MemDecl = MemExpr->getMemberDecl();
if (isa<CXXMethodDecl>(MemDecl))
return BuildCallToMemberFunction(S, Fn, LParenLoc, Args, NumArgs,
- CommaLocs, RParenLoc);
+ RParenLoc);
}
// Determine whether this is a call to a pointer-to-member function.
@@ -3670,10 +4507,31 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
if (const FunctionProtoType *FPT
= BO->getType()->getAs<FunctionProtoType>()) {
QualType ResultTy = FPT->getCallResultType(Context);
-
+ ExprValueKind VK = Expr::getValueKindForType(FPT->getResultType());
+
+ // Check that the object type isn't more qualified than the
+ // member function we're calling.
+ Qualifiers FuncQuals = Qualifiers::fromCVRMask(FPT->getTypeQuals());
+ Qualifiers ObjectQuals
+ = BO->getOpcode() == BO_PtrMemD
+ ? BO->getLHS()->getType().getQualifiers()
+ : BO->getLHS()->getType()->getAs<PointerType>()
+ ->getPointeeType().getQualifiers();
+
+ Qualifiers Difference = ObjectQuals - FuncQuals;
+ Difference.removeObjCGCAttr();
+ Difference.removeAddressSpace();
+ if (Difference) {
+ std::string QualsString = Difference.getAsString();
+ Diag(LParenLoc, diag::err_pointer_to_member_call_drops_quals)
+ << BO->getType().getUnqualifiedType()
+ << QualsString
+ << (QualsString.find(' ') == std::string::npos? 1 : 2);
+ }
+
CXXMemberCallExpr *TheCall
- = new (Context) CXXMemberCallExpr(Context, BO, Args,
- NumArgs, ResultTy,
+ = new (Context) CXXMemberCallExpr(Context, Fn, Args,
+ NumArgs, ResultTy, VK,
RParenLoc);
if (CheckCallReturnType(FPT->getResultType(),
@@ -3702,14 +4560,34 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
if (isa<UnresolvedLookupExpr>(NakedFn)) {
UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(NakedFn);
return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, Args, NumArgs,
- CommaLocs, RParenLoc);
+ RParenLoc, ExecConfig);
}
NamedDecl *NDecl = 0;
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(NakedFn))
+ if (UnOp->getOpcode() == UO_AddrOf)
+ NakedFn = UnOp->getSubExpr()->IgnoreParens();
+
if (isa<DeclRefExpr>(NakedFn))
NDecl = cast<DeclRefExpr>(NakedFn)->getDecl();
- return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, Args, NumArgs, RParenLoc);
+ return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, Args, NumArgs, RParenLoc,
+ ExecConfig);
+}
+
+ExprResult
+Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
+ MultiExprArg execConfig, SourceLocation GGGLoc) {
+ FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
+ if (!ConfigDecl)
+ return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
+ << "cudaConfigureCall");
+ QualType ConfigQTy = ConfigDecl->getType();
+
+ DeclRefExpr *ConfigDR = new (Context) DeclRefExpr(
+ ConfigDecl, ConfigQTy, VK_LValue, LLLLoc);
+
+ return ActOnCallExpr(S, ConfigDR, LLLLoc, execConfig, GGGLoc, 0);
}
/// BuildResolvedCallExpr - Build a call to a resolved expression,
@@ -3722,7 +4600,8 @@ ExprResult
Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc,
+ Expr *Config) {
FunctionDecl *FDecl = dyn_cast_or_null<FunctionDecl>(NDecl);
// Promote the function operand.
@@ -3730,10 +4609,21 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// Make the call expr early, before semantic checks. This guarantees cleanup
// of arguments and function on error.
- CallExpr *TheCall = new (Context) CallExpr(Context, Fn,
- Args, NumArgs,
- Context.BoolTy,
- RParenLoc);
+ CallExpr *TheCall;
+ if (Config) {
+ TheCall = new (Context) CUDAKernelCallExpr(Context, Fn,
+ cast<CallExpr>(Config),
+ Args, NumArgs,
+ Context.BoolTy,
+ VK_RValue,
+ RParenLoc);
+ } else {
+ TheCall = new (Context) CallExpr(Context, Fn,
+ Args, NumArgs,
+ Context.BoolTy,
+ VK_RValue,
+ RParenLoc);
+ }
const FunctionType *FuncT;
if (!Fn->getType()->isBlockPointerType()) {
@@ -3760,6 +4650,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// We know the result type of the call, set it.
TheCall->setType(FuncT->getCallResultType(Context));
+ TheCall->setValueKind(Expr::getValueKindForType(FuncT->getResultType()));
if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT)) {
if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, NumArgs,
@@ -3773,24 +4664,45 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// on our knowledge of the function definition.
const FunctionDecl *Def = 0;
if (FDecl->hasBody(Def) && NumArgs != Def->param_size()) {
- const FunctionProtoType *Proto =
- Def->getType()->getAs<FunctionProtoType>();
- if (!Proto || !(Proto->isVariadic() && NumArgs >= Def->param_size())) {
+ const FunctionProtoType *Proto
+ = Def->getType()->getAs<FunctionProtoType>();
+ if (!Proto || !(Proto->isVariadic() && NumArgs >= Def->param_size()))
Diag(RParenLoc, diag::warn_call_wrong_number_of_arguments)
<< (NumArgs > Def->param_size()) << FDecl << Fn->getSourceRange();
- }
}
+
+ // If the function we're calling isn't a function prototype, but we have
+ // a function prototype from a prior declaratiom, use that prototype.
+ if (!FDecl->hasPrototype())
+ Proto = FDecl->getType()->getAs<FunctionProtoType>();
}
// Promote the arguments (C99 6.5.2.2p6).
for (unsigned i = 0; i != NumArgs; i++) {
Expr *Arg = Args[i];
- DefaultArgumentPromotion(Arg);
+
+ if (Proto && i < Proto->getNumArgs()) {
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context,
+ Proto->getArgType(i));
+ ExprResult ArgE = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ Owned(Arg));
+ if (ArgE.isInvalid())
+ return true;
+
+ Arg = ArgE.takeAs<Expr>();
+
+ } else {
+ DefaultArgumentPromotion(Arg);
+ }
+
if (RequireCompleteType(Arg->getSourceRange().getBegin(),
Arg->getType(),
PDiag(diag::err_call_incomplete_argument)
<< Arg->getSourceRange()))
return ExprError();
+
TheCall->setArg(i, Arg);
}
}
@@ -3840,6 +4752,11 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
QualType literalType = TInfo->getType();
if (literalType->isArrayType()) {
+ if (RequireCompleteType(LParenLoc, Context.getBaseElementType(literalType),
+ PDiag(diag::err_illegal_decl_array_incomplete_type)
+ << SourceRange(LParenLoc,
+ literalExpr->getSourceRange().getEnd())))
+ return ExprError();
if (literalType->isVariableArrayType())
return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init)
<< SourceRange(LParenLoc, literalExpr->getSourceRange().getEnd()));
@@ -3869,8 +4786,11 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
return ExprError();
}
+ // In C, compound literals are l-values for some reason.
+ ExprValueKind VK = getLangOptions().CPlusPlus ? VK_RValue : VK_LValue;
+
return Owned(new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
- literalExpr, isFileScope));
+ VK, literalExpr, isFileScope));
}
ExprResult
@@ -3888,60 +4808,171 @@ Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg initlist,
return Owned(E);
}
-static CastKind getScalarCastKind(ASTContext &Context,
- QualType SrcTy, QualType DestTy) {
- if (Context.hasSameUnqualifiedType(SrcTy, DestTy))
+/// Prepares for a scalar cast, performing all the necessary stages
+/// except the final cast and returning the kind required.
+static CastKind PrepareScalarCast(Sema &S, Expr *&Src, QualType DestTy) {
+ // Both Src and Dest are scalar types, i.e. arithmetic or pointer.
+ // Also, callers should have filtered out the invalid cases with
+ // pointers. Everything else should be possible.
+
+ QualType SrcTy = Src->getType();
+ if (S.Context.hasSameUnqualifiedType(SrcTy, DestTy))
return CK_NoOp;
- if (SrcTy->hasPointerRepresentation()) {
- if (DestTy->hasPointerRepresentation())
+ switch (SrcTy->getScalarTypeKind()) {
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+
+ case Type::STK_Pointer:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_Pointer:
return DestTy->isObjCObjectPointerType() ?
CK_AnyPointerToObjCPointerCast :
CK_BitCast;
- if (DestTy->isIntegerType())
+ case Type::STK_Bool:
+ return CK_PointerToBoolean;
+ case Type::STK_Integral:
return CK_PointerToIntegral;
- }
+ case Type::STK_Floating:
+ case Type::STK_FloatingComplex:
+ case Type::STK_IntegralComplex:
+ case Type::STK_MemberPointer:
+ llvm_unreachable("illegal cast from pointer");
+ }
+ break;
- if (SrcTy->isIntegerType()) {
- if (DestTy->isIntegerType())
- return CK_IntegralCast;
- if (DestTy->hasPointerRepresentation())
+ case Type::STK_Bool: // casting from bool is like casting from an integer
+ case Type::STK_Integral:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_Pointer:
+ if (Src->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNull))
+ return CK_NullToPointer;
return CK_IntegralToPointer;
- if (DestTy->isRealFloatingType())
+ case Type::STK_Bool:
+ return CK_IntegralToBoolean;
+ case Type::STK_Integral:
+ return CK_IntegralCast;
+ case Type::STK_Floating:
return CK_IntegralToFloating;
- }
+ case Type::STK_IntegralComplex:
+ S.ImpCastExprToType(Src, DestTy->getAs<ComplexType>()->getElementType(),
+ CK_IntegralCast);
+ return CK_IntegralRealToComplex;
+ case Type::STK_FloatingComplex:
+ S.ImpCastExprToType(Src, DestTy->getAs<ComplexType>()->getElementType(),
+ CK_IntegralToFloating);
+ return CK_FloatingRealToComplex;
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ break;
- if (SrcTy->isRealFloatingType()) {
- if (DestTy->isRealFloatingType())
+ case Type::STK_Floating:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_Floating:
return CK_FloatingCast;
- if (DestTy->isIntegerType())
+ case Type::STK_Bool:
+ return CK_FloatingToBoolean;
+ case Type::STK_Integral:
return CK_FloatingToIntegral;
+ case Type::STK_FloatingComplex:
+ S.ImpCastExprToType(Src, DestTy->getAs<ComplexType>()->getElementType(),
+ CK_FloatingCast);
+ return CK_FloatingRealToComplex;
+ case Type::STK_IntegralComplex:
+ S.ImpCastExprToType(Src, DestTy->getAs<ComplexType>()->getElementType(),
+ CK_FloatingToIntegral);
+ return CK_IntegralRealToComplex;
+ case Type::STK_Pointer:
+ llvm_unreachable("valid float->pointer cast?");
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ break;
+
+ case Type::STK_FloatingComplex:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_FloatingComplex:
+ return CK_FloatingComplexCast;
+ case Type::STK_IntegralComplex:
+ return CK_FloatingComplexToIntegralComplex;
+ case Type::STK_Floating: {
+ QualType ET = SrcTy->getAs<ComplexType>()->getElementType();
+ if (S.Context.hasSameType(ET, DestTy))
+ return CK_FloatingComplexToReal;
+ S.ImpCastExprToType(Src, ET, CK_FloatingComplexToReal);
+ return CK_FloatingCast;
+ }
+ case Type::STK_Bool:
+ return CK_FloatingComplexToBoolean;
+ case Type::STK_Integral:
+ S.ImpCastExprToType(Src, SrcTy->getAs<ComplexType>()->getElementType(),
+ CK_FloatingComplexToReal);
+ return CK_FloatingToIntegral;
+ case Type::STK_Pointer:
+ llvm_unreachable("valid complex float->pointer cast?");
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ break;
+
+ case Type::STK_IntegralComplex:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_FloatingComplex:
+ return CK_IntegralComplexToFloatingComplex;
+ case Type::STK_IntegralComplex:
+ return CK_IntegralComplexCast;
+ case Type::STK_Integral: {
+ QualType ET = SrcTy->getAs<ComplexType>()->getElementType();
+ if (S.Context.hasSameType(ET, DestTy))
+ return CK_IntegralComplexToReal;
+ S.ImpCastExprToType(Src, ET, CK_IntegralComplexToReal);
+ return CK_IntegralCast;
+ }
+ case Type::STK_Bool:
+ return CK_IntegralComplexToBoolean;
+ case Type::STK_Floating:
+ S.ImpCastExprToType(Src, SrcTy->getAs<ComplexType>()->getElementType(),
+ CK_IntegralComplexToReal);
+ return CK_IntegralToFloating;
+ case Type::STK_Pointer:
+ llvm_unreachable("valid complex int->pointer cast?");
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ break;
}
- // FIXME: Assert here.
- // assert(false && "Unhandled cast combination!");
- return CK_Unknown;
+ llvm_unreachable("Unhandled scalar cast");
+ return CK_BitCast;
}
/// CheckCastTypes - Check type constraints for casting between types.
-bool Sema::CheckCastTypes(SourceRange TyR, QualType castType, Expr *&castExpr,
- CastKind& Kind,
- CXXCastPath &BasePath,
- bool FunctionalStyle) {
+bool Sema::CheckCastTypes(SourceRange TyR, QualType castType,
+ Expr *&castExpr, CastKind& Kind, ExprValueKind &VK,
+ CXXCastPath &BasePath, bool FunctionalStyle) {
if (getLangOptions().CPlusPlus)
- return CXXCheckCStyleCast(TyR, castType, castExpr, Kind, BasePath,
+ return CXXCheckCStyleCast(SourceRange(TyR.getBegin(),
+ castExpr->getLocEnd()),
+ castType, VK, castExpr, Kind, BasePath,
FunctionalStyle);
- DefaultFunctionArrayLvalueConversion(castExpr);
+ // We only support r-value casts in C.
+ VK = VK_RValue;
// C99 6.5.4p2: the cast type needs to be void or scalar and the expression
// type needs to be scalar.
if (castType->isVoidType()) {
+ // We don't necessarily do lvalue-to-rvalue conversions on this.
+ IgnoredValueConversions(castExpr);
+
// Cast to void allows any expr type.
Kind = CK_ToVoid;
return false;
}
+ DefaultFunctionArrayLvalueConversion(castExpr);
+
if (RequireCompleteType(TyR.getBegin(), castType,
diag::err_typecheck_cast_to_incomplete))
return true;
@@ -3964,7 +4995,8 @@ bool Sema::CheckCastTypes(SourceRange TyR, QualType castType, Expr *&castExpr,
for (Field = RD->field_begin(), FieldEnd = RD->field_end();
Field != FieldEnd; ++Field) {
if (Context.hasSameUnqualifiedType(Field->getType(),
- castExpr->getType())) {
+ castExpr->getType()) &&
+ !Field->isUnnamedBitfield()) {
Diag(TyR.getBegin(), diag::ext_typecheck_cast_to_union)
<< castExpr->getSourceRange();
break;
@@ -3982,6 +5014,9 @@ bool Sema::CheckCastTypes(SourceRange TyR, QualType castType, Expr *&castExpr,
<< castType << castExpr->getSourceRange();
}
+ // The type we're casting to is known to be a scalar or vector.
+
+ // Require the operand to be a scalar or vector.
if (!castExpr->getType()->isScalarType() &&
!castExpr->getType()->isVectorType()) {
return Diag(castExpr->getLocStart(),
@@ -3997,9 +5032,16 @@ bool Sema::CheckCastTypes(SourceRange TyR, QualType castType, Expr *&castExpr,
if (castExpr->getType()->isVectorType())
return CheckVectorCast(TyR, castExpr->getType(), castType, Kind);
+ // The source and target types are both scalars, i.e.
+ // - arithmetic types (fundamental, enum, and complex)
+ // - all kinds of pointers
+ // Note that member pointers were filtered out with C++, above.
+
if (isa<ObjCSelectorExpr>(castExpr))
return Diag(castExpr->getLocStart(), diag::err_cast_selector_expr);
+ // If either type is a pointer, the other type has to be either an
+ // integer or a pointer.
if (!castType->isArithmeticType()) {
QualType castExprType = castExpr->getType();
if (!castExprType->isIntegralType(Context) &&
@@ -4014,9 +5056,9 @@ bool Sema::CheckCastTypes(SourceRange TyR, QualType castType, Expr *&castExpr,
<< castType << castExpr->getSourceRange();
}
- Kind = getScalarCastKind(Context, castExpr->getType(), castType);
+ Kind = PrepareScalarCast(*this, castExpr, castType);
- if (Kind == CK_Unknown || Kind == CK_BitCast)
+ if (Kind == CK_BitCast)
CheckCastAlign(castExpr, castType, TyR);
return false;
@@ -4068,7 +5110,7 @@ bool Sema::CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *&CastExpr,
QualType DestElemTy = DestTy->getAs<ExtVectorType>()->getElementType();
ImpCastExprToType(CastExpr, DestElemTy,
- getScalarCastKind(Context, SrcTy, DestElemTy));
+ PrepareScalarCast(*this, CastExpr, DestElemTy));
Kind = CK_VectorSplat;
return false;
@@ -4096,15 +5138,16 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc, ParsedType Ty,
ExprResult
Sema::BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty,
SourceLocation RParenLoc, Expr *castExpr) {
- CastKind Kind = CK_Unknown;
+ CastKind Kind = CK_Invalid;
+ ExprValueKind VK = VK_RValue;
CXXCastPath BasePath;
if (CheckCastTypes(SourceRange(LParenLoc, RParenLoc), Ty->getType(), castExpr,
- Kind, BasePath))
+ Kind, VK, BasePath))
return ExprError();
return Owned(CStyleCastExpr::Create(Context,
Ty->getType().getNonLValueExprType(Context),
- Kind, castExpr, &BasePath, Ty,
+ VK, Kind, castExpr, &BasePath, Ty,
LParenLoc, RParenLoc));
}
@@ -4188,14 +5231,73 @@ ExprResult Sema::ActOnParenOrParenListExpr(SourceLocation L,
return Owned(expr);
}
+/// \brief Emit a specialized diagnostic when one expression is a null pointer
+/// constant and the other is not a pointer.
+bool Sema::DiagnoseConditionalForNull(Expr *LHS, Expr *RHS,
+ SourceLocation QuestionLoc) {
+ Expr *NullExpr = LHS;
+ Expr *NonPointerExpr = RHS;
+ Expr::NullPointerConstantKind NullKind =
+ NullExpr->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull);
+
+ if (NullKind == Expr::NPCK_NotNull) {
+ NullExpr = RHS;
+ NonPointerExpr = LHS;
+ NullKind =
+ NullExpr->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull);
+ }
+
+ if (NullKind == Expr::NPCK_NotNull)
+ return false;
+
+ if (NullKind == Expr::NPCK_ZeroInteger) {
+ // In this case, check to make sure that we got here from a "NULL"
+ // string in the source code.
+ NullExpr = NullExpr->IgnoreParenImpCasts();
+ SourceManager& SM = Context.getSourceManager();
+ SourceLocation Loc = SM.getInstantiationLoc(NullExpr->getExprLoc());
+ unsigned Len =
+ Lexer::MeasureTokenLength(Loc, SM, Context.getLangOptions());
+ if (Len != 4 || memcmp(SM.getCharacterData(Loc), "NULL", 4))
+ return false;
+ }
+
+ int DiagType = (NullKind == Expr::NPCK_CXX0X_nullptr);
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands_null)
+ << NonPointerExpr->getType() << DiagType
+ << NonPointerExpr->getSourceRange();
+ return true;
+}
+
/// Note that lhs is not null here, even if this is the gnu "x ?: y" extension.
/// In that case, lhs = cond.
/// C99 6.5.15
QualType Sema::CheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
+ ExprValueKind &VK, ExprObjectKind &OK,
SourceLocation QuestionLoc) {
+ // If both LHS and RHS are overloaded functions, try to resolve them.
+ if (Context.hasSameType(LHS->getType(), RHS->getType()) &&
+ LHS->getType()->isSpecificBuiltinType(BuiltinType::Overload)) {
+ ExprResult LHSResult = CheckPlaceholderExpr(LHS, QuestionLoc);
+ if (LHSResult.isInvalid())
+ return QualType();
+
+ ExprResult RHSResult = CheckPlaceholderExpr(RHS, QuestionLoc);
+ if (RHSResult.isInvalid())
+ return QualType();
+
+ LHS = LHSResult.take();
+ RHS = RHSResult.take();
+ }
+
// C++ is sufficiently different to merit its own checker.
if (getLangOptions().CPlusPlus)
- return CXXCheckConditionalOperands(Cond, LHS, RHS, QuestionLoc);
+ return CXXCheckConditionalOperands(Cond, LHS, RHS, VK, OK, QuestionLoc);
+
+ VK = VK_RValue;
+ OK = OK_Ordinary;
UsualUnaryConversions(Cond);
UsualUnaryConversions(LHS);
@@ -4206,15 +5308,47 @@ QualType Sema::CheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
// first, check the condition.
if (!CondTy->isScalarType()) { // C99 6.5.15p2
- Diag(Cond->getLocStart(), diag::err_typecheck_cond_expect_scalar)
- << CondTy;
- return QualType();
+ // OpenCL: Sec 6.3.i says the condition is allowed to be a vector or scalar.
+ // Throw an error if its not either.
+ if (getLangOptions().OpenCL) {
+ if (!CondTy->isVectorType()) {
+ Diag(Cond->getLocStart(),
+ diag::err_typecheck_cond_expect_scalar_or_vector)
+ << CondTy;
+ return QualType();
+ }
+ }
+ else {
+ Diag(Cond->getLocStart(), diag::err_typecheck_cond_expect_scalar)
+ << CondTy;
+ return QualType();
+ }
}
// Now check the two expressions.
if (LHSTy->isVectorType() || RHSTy->isVectorType())
return CheckVectorOperands(QuestionLoc, LHS, RHS);
+ // OpenCL: If the condition is a vector, and both operands are scalar,
+ // attempt to implicity convert them to the vector type to act like the
+ // built in select.
+ if (getLangOptions().OpenCL && CondTy->isVectorType()) {
+ // Both operands should be of scalar type.
+ if (!LHSTy->isScalarType()) {
+ Diag(LHS->getLocStart(), diag::err_typecheck_cond_expect_scalar)
+ << CondTy;
+ return QualType();
+ }
+ if (!RHSTy->isScalarType()) {
+ Diag(RHS->getLocStart(), diag::err_typecheck_cond_expect_scalar)
+ << CondTy;
+ return QualType();
+ }
+ // Implicity convert these scalars to the type of the condition.
+ ImpCastExprToType(LHS, CondTy, CK_IntegralCast);
+ ImpCastExprToType(RHS, CondTy, CK_IntegralCast);
+ }
+
// If both operands have arithmetic type, do the usual arithmetic conversions
// to find a common type: C99 6.5.15p3,5.
if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) {
@@ -4251,12 +5385,12 @@ QualType Sema::CheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
if ((LHSTy->isAnyPointerType() || LHSTy->isBlockPointerType()) &&
RHS->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
// promote the null to a pointer.
- ImpCastExprToType(RHS, LHSTy, CK_Unknown);
+ ImpCastExprToType(RHS, LHSTy, CK_NullToPointer);
return LHSTy;
}
if ((RHSTy->isAnyPointerType() || RHSTy->isBlockPointerType()) &&
LHS->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
- ImpCastExprToType(LHS, RHSTy, CK_Unknown);
+ ImpCastExprToType(LHS, RHSTy, CK_NullToPointer);
return RHSTy;
}
@@ -4364,7 +5498,8 @@ QualType Sema::CheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
return LHSTy;
}
- // GCC compatibility: soften pointer/integer mismatch.
+ // GCC compatibility: soften pointer/integer mismatch. Note that
+ // null pointers have been filtered out by this point.
if (RHSTy->isPointerType() && LHSTy->isIntegerType()) {
Diag(QuestionLoc, diag::warn_typecheck_cond_pointer_integer_mismatch)
<< LHSTy << RHSTy << LHS->getSourceRange() << RHS->getSourceRange();
@@ -4378,6 +5513,12 @@ QualType Sema::CheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
return LHSTy;
}
+ // Emit a better diagnostic if one of the expressions is a null pointer
+ // constant and the other is not a pointer type. In this case, the user most
+ // likely forgot to take the address of the other expression.
+ if (DiagnoseConditionalForNull(LHS, RHS, QuestionLoc))
+ return QualType();
+
// Otherwise, the operands are not compatible.
Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
<< LHSTy << RHSTy << LHS->getSourceRange() << RHS->getSourceRange();
@@ -4395,34 +5536,34 @@ QualType Sema::FindCompositeObjCPointerType(Expr *&LHS, Expr *&RHS,
// to the pseudo-builtin, because that will be implicitly cast back to the
// redefinition type if an attempt is made to access its fields.
if (LHSTy->isObjCClassType() &&
- (RHSTy.getDesugaredType() == Context.ObjCClassRedefinitionType)) {
+ (Context.hasSameType(RHSTy, Context.ObjCClassRedefinitionType))) {
ImpCastExprToType(RHS, LHSTy, CK_BitCast);
return LHSTy;
}
if (RHSTy->isObjCClassType() &&
- (LHSTy.getDesugaredType() == Context.ObjCClassRedefinitionType)) {
+ (Context.hasSameType(LHSTy, Context.ObjCClassRedefinitionType))) {
ImpCastExprToType(LHS, RHSTy, CK_BitCast);
return RHSTy;
}
// And the same for struct objc_object* / id
if (LHSTy->isObjCIdType() &&
- (RHSTy.getDesugaredType() == Context.ObjCIdRedefinitionType)) {
+ (Context.hasSameType(RHSTy, Context.ObjCIdRedefinitionType))) {
ImpCastExprToType(RHS, LHSTy, CK_BitCast);
return LHSTy;
}
if (RHSTy->isObjCIdType() &&
- (LHSTy.getDesugaredType() == Context.ObjCIdRedefinitionType)) {
+ (Context.hasSameType(LHSTy, Context.ObjCIdRedefinitionType))) {
ImpCastExprToType(LHS, RHSTy, CK_BitCast);
return RHSTy;
}
// And the same for struct objc_selector* / SEL
if (Context.isObjCSelType(LHSTy) &&
- (RHSTy.getDesugaredType() == Context.ObjCSelRedefinitionType)) {
+ (Context.hasSameType(RHSTy, Context.ObjCSelRedefinitionType))) {
ImpCastExprToType(RHS, LHSTy, CK_BitCast);
return LHSTy;
}
if (Context.isObjCSelType(RHSTy) &&
- (LHSTy.getDesugaredType() == Context.ObjCSelRedefinitionType)) {
+ (Context.hasSameType(LHSTy, Context.ObjCSelRedefinitionType))) {
ImpCastExprToType(LHS, RHSTy, CK_BitCast);
return RHSTy;
}
@@ -4512,60 +5653,84 @@ QualType Sema::FindCompositeObjCPointerType(Expr *&LHS, Expr *&RHS,
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
- SourceLocation ColonLoc,
- Expr *CondExpr, Expr *LHSExpr,
- Expr *RHSExpr) {
+ SourceLocation ColonLoc,
+ Expr *CondExpr, Expr *LHSExpr,
+ Expr *RHSExpr) {
// If this is the gnu "x ?: y" extension, analyze the types as though the LHS
// was the condition.
- bool isLHSNull = LHSExpr == 0;
- Expr *SAVEExpr = 0;
- if (isLHSNull) {
- LHSExpr = SAVEExpr = CondExpr;
- }
-
- QualType result = CheckConditionalOperands(CondExpr, LHSExpr,
- RHSExpr, QuestionLoc);
+ OpaqueValueExpr *opaqueValue = 0;
+ Expr *commonExpr = 0;
+ if (LHSExpr == 0) {
+ commonExpr = CondExpr;
+
+ // We usually want to apply unary conversions *before* saving, except
+ // in the special case of a C++ l-value conditional.
+ if (!(getLangOptions().CPlusPlus
+ && !commonExpr->isTypeDependent()
+ && commonExpr->getValueKind() == RHSExpr->getValueKind()
+ && commonExpr->isGLValue()
+ && commonExpr->isOrdinaryOrBitFieldObject()
+ && RHSExpr->isOrdinaryOrBitFieldObject()
+ && Context.hasSameType(commonExpr->getType(), RHSExpr->getType()))) {
+ UsualUnaryConversions(commonExpr);
+ }
+
+ opaqueValue = new (Context) OpaqueValueExpr(commonExpr->getExprLoc(),
+ commonExpr->getType(),
+ commonExpr->getValueKind(),
+ commonExpr->getObjectKind());
+ LHSExpr = CondExpr = opaqueValue;
+ }
+
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ QualType result = CheckConditionalOperands(CondExpr, LHSExpr, RHSExpr,
+ VK, OK, QuestionLoc);
if (result.isNull())
return ExprError();
- return Owned(new (Context) ConditionalOperator(CondExpr, QuestionLoc,
- LHSExpr, ColonLoc,
- RHSExpr, SAVEExpr,
- result));
+ if (!commonExpr)
+ return Owned(new (Context) ConditionalOperator(CondExpr, QuestionLoc,
+ LHSExpr, ColonLoc,
+ RHSExpr, result, VK, OK));
+
+ return Owned(new (Context)
+ BinaryConditionalOperator(commonExpr, opaqueValue, CondExpr, LHSExpr,
+ RHSExpr, QuestionLoc, ColonLoc, result, VK, OK));
}
-// CheckPointerTypesForAssignment - This is a very tricky routine (despite
+// checkPointerTypesForAssignment - This is a very tricky routine (despite
// being closely modeled after the C99 spec:-). The odd characteristic of this
// routine is it effectively iqnores the qualifiers on the top level pointee.
// This circumvents the usual type rules specified in 6.2.7p1 & 6.7.5.[1-3].
// FIXME: add a couple examples in this comment.
-Sema::AssignConvertType
-Sema::CheckPointerTypesForAssignment(QualType lhsType, QualType rhsType) {
- QualType lhptee, rhptee;
-
- if ((lhsType->isObjCClassType() &&
- (rhsType.getDesugaredType() == Context.ObjCClassRedefinitionType)) ||
- (rhsType->isObjCClassType() &&
- (lhsType.getDesugaredType() == Context.ObjCClassRedefinitionType))) {
- return Compatible;
- }
+static Sema::AssignConvertType
+checkPointerTypesForAssignment(Sema &S, QualType lhsType, QualType rhsType) {
+ assert(lhsType.isCanonical() && "LHS not canonicalized!");
+ assert(rhsType.isCanonical() && "RHS not canonicalized!");
// get the "pointed to" type (ignoring qualifiers at the top level)
- lhptee = lhsType->getAs<PointerType>()->getPointeeType();
- rhptee = rhsType->getAs<PointerType>()->getPointeeType();
+ const Type *lhptee, *rhptee;
+ Qualifiers lhq, rhq;
+ llvm::tie(lhptee, lhq) = cast<PointerType>(lhsType)->getPointeeType().split();
+ llvm::tie(rhptee, rhq) = cast<PointerType>(rhsType)->getPointeeType().split();
- // make sure we operate on the canonical type
- lhptee = Context.getCanonicalType(lhptee);
- rhptee = Context.getCanonicalType(rhptee);
-
- AssignConvertType ConvTy = Compatible;
+ Sema::AssignConvertType ConvTy = Sema::Compatible;
// C99 6.5.16.1p1: This following citation is common to constraints
// 3 & 4 (below). ...and the type *pointed to* by the left has all the
// qualifiers of the type *pointed to* by the right;
- // FIXME: Handle ExtQualType
- if (!lhptee.isAtLeastAsQualifiedAs(rhptee))
- ConvTy = CompatiblePointerDiscardsQualifiers;
+ Qualifiers lq;
+
+ if (!lhq.compatiblyIncludes(rhq)) {
+ // Treat address-space mismatches as fatal. TODO: address subspaces
+ if (lhq.getAddressSpace() != rhq.getAddressSpace())
+ ConvTy = Sema::IncompatiblePointerDiscardsQualifiers;
+
+ // For GCC compatibility, other qualifier mismatches are treated
+ // as still compatible in C.
+ else ConvTy = Sema::CompatiblePointerDiscardsQualifiers;
+ }
// C99 6.5.16.1p1 (constraint 4): If one operand is a pointer to an object or
// incomplete type and the other is a pointer to a qualified or unqualified
@@ -4576,7 +5741,7 @@ Sema::CheckPointerTypesForAssignment(QualType lhsType, QualType rhsType) {
// As an extension, we allow cast to/from void* to function pointer.
assert(rhptee->isFunctionType());
- return FunctionVoidPointer;
+ return Sema::FunctionVoidPointer;
}
if (rhptee->isVoidType()) {
@@ -4585,123 +5750,136 @@ Sema::CheckPointerTypesForAssignment(QualType lhsType, QualType rhsType) {
// As an extension, we allow cast to/from void* to function pointer.
assert(lhptee->isFunctionType());
- return FunctionVoidPointer;
+ return Sema::FunctionVoidPointer;
}
+
// C99 6.5.16.1p1 (constraint 3): both operands are pointers to qualified or
// unqualified versions of compatible types, ...
- lhptee = lhptee.getUnqualifiedType();
- rhptee = rhptee.getUnqualifiedType();
- if (!Context.typesAreCompatible(lhptee, rhptee)) {
+ QualType ltrans = QualType(lhptee, 0), rtrans = QualType(rhptee, 0);
+ if (!S.Context.typesAreCompatible(ltrans, rtrans)) {
// Check if the pointee types are compatible ignoring the sign.
// We explicitly check for char so that we catch "char" vs
// "unsigned char" on systems where "char" is unsigned.
if (lhptee->isCharType())
- lhptee = Context.UnsignedCharTy;
+ ltrans = S.Context.UnsignedCharTy;
else if (lhptee->hasSignedIntegerRepresentation())
- lhptee = Context.getCorrespondingUnsignedType(lhptee);
+ ltrans = S.Context.getCorrespondingUnsignedType(ltrans);
if (rhptee->isCharType())
- rhptee = Context.UnsignedCharTy;
+ rtrans = S.Context.UnsignedCharTy;
else if (rhptee->hasSignedIntegerRepresentation())
- rhptee = Context.getCorrespondingUnsignedType(rhptee);
+ rtrans = S.Context.getCorrespondingUnsignedType(rtrans);
- if (lhptee == rhptee) {
+ if (ltrans == rtrans) {
// Types are compatible ignoring the sign. Qualifier incompatibility
// takes priority over sign incompatibility because the sign
// warning can be disabled.
- if (ConvTy != Compatible)
+ if (ConvTy != Sema::Compatible)
return ConvTy;
- return IncompatiblePointerSign;
+
+ return Sema::IncompatiblePointerSign;
}
// If we are a multi-level pointer, it's possible that our issue is simply
// one of qualification - e.g. char ** -> const char ** is not allowed. If
// the eventual target type is the same and the pointers have the same
// level of indirection, this must be the issue.
- if (lhptee->isPointerType() && rhptee->isPointerType()) {
+ if (isa<PointerType>(lhptee) && isa<PointerType>(rhptee)) {
do {
- lhptee = lhptee->getAs<PointerType>()->getPointeeType();
- rhptee = rhptee->getAs<PointerType>()->getPointeeType();
-
- lhptee = Context.getCanonicalType(lhptee);
- rhptee = Context.getCanonicalType(rhptee);
- } while (lhptee->isPointerType() && rhptee->isPointerType());
+ lhptee = cast<PointerType>(lhptee)->getPointeeType().getTypePtr();
+ rhptee = cast<PointerType>(rhptee)->getPointeeType().getTypePtr();
+ } while (isa<PointerType>(lhptee) && isa<PointerType>(rhptee));
- if (Context.hasSameUnqualifiedType(lhptee, rhptee))
- return IncompatibleNestedPointerQualifiers;
+ if (lhptee == rhptee)
+ return Sema::IncompatibleNestedPointerQualifiers;
}
// General pointer incompatibility takes priority over qualifiers.
- return IncompatiblePointer;
+ return Sema::IncompatiblePointer;
}
return ConvTy;
}
-/// CheckBlockPointerTypesForAssignment - This routine determines whether two
+/// checkBlockPointerTypesForAssignment - This routine determines whether two
/// block pointer types are compatible or whether a block and normal pointer
/// are compatible. It is more restrict than comparing two function pointer
// types.
-Sema::AssignConvertType
-Sema::CheckBlockPointerTypesForAssignment(QualType lhsType,
- QualType rhsType) {
+static Sema::AssignConvertType
+checkBlockPointerTypesForAssignment(Sema &S, QualType lhsType,
+ QualType rhsType) {
+ assert(lhsType.isCanonical() && "LHS not canonicalized!");
+ assert(rhsType.isCanonical() && "RHS not canonicalized!");
+
QualType lhptee, rhptee;
// get the "pointed to" type (ignoring qualifiers at the top level)
- lhptee = lhsType->getAs<BlockPointerType>()->getPointeeType();
- rhptee = rhsType->getAs<BlockPointerType>()->getPointeeType();
+ lhptee = cast<BlockPointerType>(lhsType)->getPointeeType();
+ rhptee = cast<BlockPointerType>(rhsType)->getPointeeType();
- // make sure we operate on the canonical type
- lhptee = Context.getCanonicalType(lhptee);
- rhptee = Context.getCanonicalType(rhptee);
+ // In C++, the types have to match exactly.
+ if (S.getLangOptions().CPlusPlus)
+ return Sema::IncompatibleBlockPointer;
- AssignConvertType ConvTy = Compatible;
+ Sema::AssignConvertType ConvTy = Sema::Compatible;
// For blocks we enforce that qualifiers are identical.
- if (lhptee.getLocalCVRQualifiers() != rhptee.getLocalCVRQualifiers())
- ConvTy = CompatiblePointerDiscardsQualifiers;
+ if (lhptee.getLocalQualifiers() != rhptee.getLocalQualifiers())
+ ConvTy = Sema::CompatiblePointerDiscardsQualifiers;
+
+ if (!S.Context.typesAreBlockPointerCompatible(lhsType, rhsType))
+ return Sema::IncompatibleBlockPointer;
- if (!getLangOptions().CPlusPlus) {
- if (!Context.typesAreBlockPointerCompatible(lhsType, rhsType))
- return IncompatibleBlockPointer;
- }
- else if (!Context.typesAreCompatible(lhptee, rhptee))
- return IncompatibleBlockPointer;
return ConvTy;
}
-/// CheckObjCPointerTypesForAssignment - Compares two objective-c pointer types
+/// checkObjCPointerTypesForAssignment - Compares two objective-c pointer types
/// for assignment compatibility.
-Sema::AssignConvertType
-Sema::CheckObjCPointerTypesForAssignment(QualType lhsType, QualType rhsType) {
+static Sema::AssignConvertType
+checkObjCPointerTypesForAssignment(Sema &S, QualType lhsType, QualType rhsType) {
+ assert(lhsType.isCanonical() && "LHS was not canonicalized!");
+ assert(rhsType.isCanonical() && "RHS was not canonicalized!");
+
if (lhsType->isObjCBuiltinType()) {
// Class is not compatible with ObjC object pointers.
if (lhsType->isObjCClassType() && !rhsType->isObjCBuiltinType() &&
!rhsType->isObjCQualifiedClassType())
- return IncompatiblePointer;
- return Compatible;
+ return Sema::IncompatiblePointer;
+ return Sema::Compatible;
}
if (rhsType->isObjCBuiltinType()) {
// Class is not compatible with ObjC object pointers.
if (rhsType->isObjCClassType() && !lhsType->isObjCBuiltinType() &&
!lhsType->isObjCQualifiedClassType())
- return IncompatiblePointer;
- return Compatible;
+ return Sema::IncompatiblePointer;
+ return Sema::Compatible;
}
QualType lhptee =
lhsType->getAs<ObjCObjectPointerType>()->getPointeeType();
QualType rhptee =
rhsType->getAs<ObjCObjectPointerType>()->getPointeeType();
- // make sure we operate on the canonical type
- lhptee = Context.getCanonicalType(lhptee);
- rhptee = Context.getCanonicalType(rhptee);
+
if (!lhptee.isAtLeastAsQualifiedAs(rhptee))
- return CompatiblePointerDiscardsQualifiers;
+ return Sema::CompatiblePointerDiscardsQualifiers;
- if (Context.typesAreCompatible(lhsType, rhsType))
- return Compatible;
+ if (S.Context.typesAreCompatible(lhsType, rhsType))
+ return Sema::Compatible;
if (lhsType->isObjCQualifiedIdType() || rhsType->isObjCQualifiedIdType())
- return IncompatibleObjCQualifiedId;
- return IncompatiblePointer;
+ return Sema::IncompatibleObjCQualifiedId;
+ return Sema::IncompatiblePointer;
+}
+
+Sema::AssignConvertType
+Sema::CheckAssignmentConstraints(SourceLocation Loc,
+ QualType lhsType, QualType rhsType) {
+ // Fake up an opaque expression. We don't actually care about what
+ // cast operations are required, so if CheckAssignmentConstraints
+ // adds casts to this they'll be wasted, but fortunately that doesn't
+ // usually happen on valid code.
+ OpaqueValueExpr rhs(Loc, rhsType, VK_RValue);
+ Expr *rhsPtr = &rhs;
+ CastKind K = CK_Invalid;
+
+ return CheckAssignmentConstraints(lhsType, rhsPtr, K);
}
/// CheckAssignmentConstraints (C99 6.5.16) - This routine currently
@@ -4720,21 +5898,21 @@ Sema::CheckObjCPointerTypesForAssignment(QualType lhsType, QualType rhsType) {
/// As a result, the code for dealing with pointers is more complex than the
/// C99 spec dictates.
///
+/// Sets 'Kind' for any result kind except Incompatible.
Sema::AssignConvertType
-Sema::CheckAssignmentConstraints(QualType lhsType, QualType rhsType) {
+Sema::CheckAssignmentConstraints(QualType lhsType, Expr *&rhs,
+ CastKind &Kind) {
+ QualType rhsType = rhs->getType();
+
// Get canonical types. We're not formatting these types, just comparing
// them.
lhsType = Context.getCanonicalType(lhsType).getUnqualifiedType();
rhsType = Context.getCanonicalType(rhsType).getUnqualifiedType();
- if (lhsType == rhsType)
- return Compatible; // Common case: fast path an exact match.
-
- if ((lhsType->isObjCClassType() &&
- (rhsType.getDesugaredType() == Context.ObjCClassRedefinitionType)) ||
- (rhsType->isObjCClassType() &&
- (lhsType.getDesugaredType() == Context.ObjCClassRedefinitionType))) {
- return Compatible;
+ // Common case: no conversion required.
+ if (lhsType == rhsType) {
+ Kind = CK_NoOp;
+ return Compatible;
}
// If the left-hand side is a reference type, then we are in a
@@ -4745,144 +5923,220 @@ Sema::CheckAssignmentConstraints(QualType lhsType, QualType rhsType) {
// lhsType so that the resulting expression does not have reference
// type.
if (const ReferenceType *lhsTypeRef = lhsType->getAs<ReferenceType>()) {
- if (Context.typesAreCompatible(lhsTypeRef->getPointeeType(), rhsType))
+ if (Context.typesAreCompatible(lhsTypeRef->getPointeeType(), rhsType)) {
+ Kind = CK_LValueBitCast;
return Compatible;
+ }
return Incompatible;
}
+
// Allow scalar to ExtVector assignments, and assignments of an ExtVector type
// to the same ExtVector type.
if (lhsType->isExtVectorType()) {
if (rhsType->isExtVectorType())
- return lhsType == rhsType ? Compatible : Incompatible;
- if (rhsType->isArithmeticType())
+ return Incompatible;
+ if (rhsType->isArithmeticType()) {
+ // CK_VectorSplat does T -> vector T, so first cast to the
+ // element type.
+ QualType elType = cast<ExtVectorType>(lhsType)->getElementType();
+ if (elType != rhsType) {
+ Kind = PrepareScalarCast(*this, rhs, elType);
+ ImpCastExprToType(rhs, elType, Kind);
+ }
+ Kind = CK_VectorSplat;
return Compatible;
+ }
}
+ // Conversions to or from vector type.
if (lhsType->isVectorType() || rhsType->isVectorType()) {
if (lhsType->isVectorType() && rhsType->isVectorType()) {
+ // Allow assignments of an AltiVec vector type to an equivalent GCC
+ // vector type and vice versa
+ if (Context.areCompatibleVectorTypes(lhsType, rhsType)) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+
// If we are allowing lax vector conversions, and LHS and RHS are both
// vectors, the total size only needs to be the same. This is a bitcast;
// no bits are changed but the result type is different.
if (getLangOptions().LaxVectorConversions &&
- (Context.getTypeSize(lhsType) == Context.getTypeSize(rhsType)))
+ (Context.getTypeSize(lhsType) == Context.getTypeSize(rhsType))) {
+ Kind = CK_BitCast;
return IncompatibleVectors;
-
- // Allow assignments of an AltiVec vector type to an equivalent GCC
- // vector type and vice versa
- if (Context.areCompatibleVectorTypes(lhsType, rhsType))
- return Compatible;
+ }
}
return Incompatible;
}
+ // Arithmetic conversions.
if (lhsType->isArithmeticType() && rhsType->isArithmeticType() &&
- !(getLangOptions().CPlusPlus && lhsType->isEnumeralType()))
+ !(getLangOptions().CPlusPlus && lhsType->isEnumeralType())) {
+ Kind = PrepareScalarCast(*this, rhs, lhsType);
return Compatible;
+ }
- if (isa<PointerType>(lhsType)) {
- if (rhsType->isIntegerType())
- return IntToPointer;
+ // Conversions to normal pointers.
+ if (const PointerType *lhsPointer = dyn_cast<PointerType>(lhsType)) {
+ // U* -> T*
+ if (isa<PointerType>(rhsType)) {
+ Kind = CK_BitCast;
+ return checkPointerTypesForAssignment(*this, lhsType, rhsType);
+ }
- if (isa<PointerType>(rhsType))
- return CheckPointerTypesForAssignment(lhsType, rhsType);
+ // int -> T*
+ if (rhsType->isIntegerType()) {
+ Kind = CK_IntegralToPointer; // FIXME: null?
+ return IntToPointer;
+ }
- // In general, C pointers are not compatible with ObjC object pointers.
+ // C pointers are not compatible with ObjC object pointers,
+ // with two exceptions:
if (isa<ObjCObjectPointerType>(rhsType)) {
- if (lhsType->isVoidPointerType()) // an exception to the rule.
+ // - conversions to void*
+ if (lhsPointer->getPointeeType()->isVoidType()) {
+ Kind = CK_AnyPointerToObjCPointerCast;
return Compatible;
+ }
+
+ // - conversions from 'Class' to the redefinition type
+ if (rhsType->isObjCClassType() &&
+ Context.hasSameType(lhsType, Context.ObjCClassRedefinitionType)) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+
+ Kind = CK_BitCast;
return IncompatiblePointer;
}
- if (rhsType->getAs<BlockPointerType>()) {
- if (lhsType->getAs<PointerType>()->getPointeeType()->isVoidType())
- return Compatible;
- // Treat block pointers as objects.
- if (getLangOptions().ObjC1 && lhsType->isObjCIdType())
+ // U^ -> void*
+ if (rhsType->getAs<BlockPointerType>()) {
+ if (lhsPointer->getPointeeType()->isVoidType()) {
+ Kind = CK_BitCast;
return Compatible;
+ }
}
+
return Incompatible;
}
+ // Conversions to block pointers.
if (isa<BlockPointerType>(lhsType)) {
- if (rhsType->isIntegerType())
+ // U^ -> T^
+ if (rhsType->isBlockPointerType()) {
+ Kind = CK_AnyPointerToBlockPointerCast;
+ return checkBlockPointerTypesForAssignment(*this, lhsType, rhsType);
+ }
+
+ // int or null -> T^
+ if (rhsType->isIntegerType()) {
+ Kind = CK_IntegralToPointer; // FIXME: null
return IntToBlockPointer;
+ }
- // Treat block pointers as objects.
- if (getLangOptions().ObjC1 && rhsType->isObjCIdType())
+ // id -> T^
+ if (getLangOptions().ObjC1 && rhsType->isObjCIdType()) {
+ Kind = CK_AnyPointerToBlockPointerCast;
return Compatible;
+ }
- if (rhsType->isBlockPointerType())
- return CheckBlockPointerTypesForAssignment(lhsType, rhsType);
-
- if (const PointerType *RHSPT = rhsType->getAs<PointerType>()) {
- if (RHSPT->getPointeeType()->isVoidType())
+ // void* -> T^
+ if (const PointerType *RHSPT = rhsType->getAs<PointerType>())
+ if (RHSPT->getPointeeType()->isVoidType()) {
+ Kind = CK_AnyPointerToBlockPointerCast;
return Compatible;
- }
+ }
+
return Incompatible;
}
+ // Conversions to Objective-C pointers.
if (isa<ObjCObjectPointerType>(lhsType)) {
- if (rhsType->isIntegerType())
+ // A* -> B*
+ if (rhsType->isObjCObjectPointerType()) {
+ Kind = CK_BitCast;
+ return checkObjCPointerTypesForAssignment(*this, lhsType, rhsType);
+ }
+
+ // int or null -> A*
+ if (rhsType->isIntegerType()) {
+ Kind = CK_IntegralToPointer; // FIXME: null
return IntToPointer;
+ }
- // In general, C pointers are not compatible with ObjC object pointers.
+ // In general, C pointers are not compatible with ObjC object pointers,
+ // with two exceptions:
if (isa<PointerType>(rhsType)) {
- if (rhsType->isVoidPointerType()) // an exception to the rule.
+ // - conversions from 'void*'
+ if (rhsType->isVoidPointerType()) {
+ Kind = CK_AnyPointerToObjCPointerCast;
return Compatible;
- return IncompatiblePointer;
- }
- if (rhsType->isObjCObjectPointerType()) {
- return CheckObjCPointerTypesForAssignment(lhsType, rhsType);
- }
- if (const PointerType *RHSPT = rhsType->getAs<PointerType>()) {
- if (RHSPT->getPointeeType()->isVoidType())
+ }
+
+ // - conversions to 'Class' from its redefinition type
+ if (lhsType->isObjCClassType() &&
+ Context.hasSameType(rhsType, Context.ObjCClassRedefinitionType)) {
+ Kind = CK_BitCast;
return Compatible;
+ }
+
+ Kind = CK_AnyPointerToObjCPointerCast;
+ return IncompatiblePointer;
}
- // Treat block pointers as objects.
- if (rhsType->isBlockPointerType())
+
+ // T^ -> A*
+ if (rhsType->isBlockPointerType()) {
+ Kind = CK_AnyPointerToObjCPointerCast;
return Compatible;
+ }
+
return Incompatible;
}
+
+ // Conversions from pointers that are not covered by the above.
if (isa<PointerType>(rhsType)) {
- // C99 6.5.16.1p1: the left operand is _Bool and the right is a pointer.
- if (lhsType == Context.BoolTy)
+ // T* -> _Bool
+ if (lhsType == Context.BoolTy) {
+ Kind = CK_PointerToBoolean;
return Compatible;
+ }
- if (lhsType->isIntegerType())
+ // T* -> int
+ if (lhsType->isIntegerType()) {
+ Kind = CK_PointerToIntegral;
return PointerToInt;
+ }
- if (isa<PointerType>(lhsType))
- return CheckPointerTypesForAssignment(lhsType, rhsType);
-
- if (isa<BlockPointerType>(lhsType) &&
- rhsType->getAs<PointerType>()->getPointeeType()->isVoidType())
- return Compatible;
return Incompatible;
}
+
+ // Conversions from Objective-C pointers that are not covered by the above.
if (isa<ObjCObjectPointerType>(rhsType)) {
- // C99 6.5.16.1p1: the left operand is _Bool and the right is a pointer.
- if (lhsType == Context.BoolTy)
+ // T* -> _Bool
+ if (lhsType == Context.BoolTy) {
+ Kind = CK_PointerToBoolean;
return Compatible;
+ }
- if (lhsType->isIntegerType())
+ // T* -> int
+ if (lhsType->isIntegerType()) {
+ Kind = CK_PointerToIntegral;
return PointerToInt;
-
- // In general, C pointers are not compatible with ObjC object pointers.
- if (isa<PointerType>(lhsType)) {
- if (lhsType->isVoidPointerType()) // an exception to the rule.
- return Compatible;
- return IncompatiblePointer;
}
- if (isa<BlockPointerType>(lhsType) &&
- rhsType->getAs<PointerType>()->getPointeeType()->isVoidType())
- return Compatible;
+
return Incompatible;
}
+ // struct A -> struct B
if (isa<TagType>(lhsType) && isa<TagType>(rhsType)) {
- if (Context.typesAreCompatible(lhsType, rhsType))
+ if (Context.typesAreCompatible(lhsType, rhsType)) {
+ Kind = CK_NoOp;
return Compatible;
+ }
}
+
return Incompatible;
}
@@ -4902,7 +6156,7 @@ static void ConstructTransparentUnion(ASTContext &C, Expr *&E,
// union type from this initializer list.
TypeSourceInfo *unionTInfo = C.getTrivialTypeSourceInfo(UnionType);
E = new (C) CompoundLiteralExpr(SourceLocation(), unionTInfo, UnionType,
- Initializer, false);
+ VK_RValue, Initializer, false);
}
Sema::AssignConvertType
@@ -4935,14 +6189,18 @@ Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType, Expr *&rExpr) {
if (rExpr->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull)) {
- ImpCastExprToType(rExpr, it->getType(), CK_IntegralToPointer);
+ ImpCastExprToType(rExpr, it->getType(), CK_NullToPointer);
InitField = *it;
break;
}
}
- if (CheckAssignmentConstraints(it->getType(), rExpr->getType())
+ Expr *rhs = rExpr;
+ CastKind Kind = CK_Invalid;
+ if (CheckAssignmentConstraints(it->getType(), rhs, Kind)
== Compatible) {
+ ImpCastExprToType(rhs, it->getType(), Kind);
+ rExpr = rhs;
InitField = *it;
break;
}
@@ -4970,7 +6228,7 @@ Sema::CheckSingleAssignmentConstraints(QualType lhsType, Expr *&rExpr) {
// FIXME: Currently, we fall through and treat C++ classes like C
// structures.
- }
+ }
// C99 6.5.16.1p1: the left operand is a pointer and the right is
// a null pointer constant.
@@ -4979,7 +6237,7 @@ Sema::CheckSingleAssignmentConstraints(QualType lhsType, Expr *&rExpr) {
lhsType->isBlockPointerType())
&& rExpr->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull)) {
- ImpCastExprToType(rExpr, lhsType, CK_Unknown);
+ ImpCastExprToType(rExpr, lhsType, CK_NullToPointer);
return Compatible;
}
@@ -4992,8 +6250,9 @@ Sema::CheckSingleAssignmentConstraints(QualType lhsType, Expr *&rExpr) {
if (!lhsType->isReferenceType())
DefaultFunctionArrayLvalueConversion(rExpr);
+ CastKind Kind = CK_Invalid;
Sema::AssignConvertType result =
- CheckAssignmentConstraints(lhsType, rExpr->getType());
+ CheckAssignmentConstraints(lhsType, rExpr, Kind);
// C99 6.5.16.1p2: The value of the right operand is converted to the
// type of the assignment expression.
@@ -5002,8 +6261,7 @@ Sema::CheckSingleAssignmentConstraints(QualType lhsType, Expr *&rExpr) {
// The getNonReferenceType() call makes sure that the resulting expression
// does not have reference type.
if (result != Incompatible && rExpr->getType() != lhsType)
- ImpCastExprToType(rExpr, lhsType.getNonLValueExprType(Context),
- CK_Unknown);
+ ImpCastExprToType(rExpr, lhsType.getNonLValueExprType(Context), Kind);
return result;
}
@@ -5071,16 +6329,22 @@ QualType Sema::CheckVectorOperands(SourceLocation Loc, Expr *&lex, Expr *&rex) {
if (const ExtVectorType *LV = lhsType->getAs<ExtVectorType>()) {
QualType EltTy = LV->getElementType();
if (EltTy->isIntegralType(Context) && rhsType->isIntegralType(Context)) {
- if (Context.getIntegerTypeOrder(EltTy, rhsType) >= 0) {
- ImpCastExprToType(rex, lhsType, CK_IntegralCast);
+ int order = Context.getIntegerTypeOrder(EltTy, rhsType);
+ if (order > 0)
+ ImpCastExprToType(rex, EltTy, CK_IntegralCast);
+ if (order >= 0) {
+ ImpCastExprToType(rex, lhsType, CK_VectorSplat);
if (swapped) std::swap(rex, lex);
return lhsType;
}
}
if (EltTy->isRealFloatingType() && rhsType->isScalarType() &&
rhsType->isRealFloatingType()) {
- if (Context.getFloatingTypeOrder(EltTy, rhsType) >= 0) {
- ImpCastExprToType(rex, lhsType, CK_FloatingCast);
+ int order = Context.getFloatingTypeOrder(EltTy, rhsType);
+ if (order > 0)
+ ImpCastExprToType(rex, EltTy, CK_FloatingCast);
+ if (order >= 0) {
+ ImpCastExprToType(rex, lhsType, CK_VectorSplat);
if (swapped) std::swap(rex, lex);
return lhsType;
}
@@ -5359,6 +6623,12 @@ QualType Sema::CheckSubtractionOperands(Expr *&lex, Expr *&rex,
return InvalidOperands(Loc, lex, rex);
}
+static bool isScopedEnumerationType(QualType T) {
+ if (const EnumType *ET = dyn_cast<EnumType>(T))
+ return ET->getDecl()->isScoped();
+ return false;
+}
+
// C99 6.5.7
QualType Sema::CheckShiftOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
bool isCompAssign) {
@@ -5367,21 +6637,28 @@ QualType Sema::CheckShiftOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
!rex->getType()->hasIntegerRepresentation())
return InvalidOperands(Loc, lex, rex);
+ // C++0x: Don't allow scoped enums. FIXME: Use something better than
+ // hasIntegerRepresentation() above instead of this.
+ if (isScopedEnumerationType(lex->getType()) ||
+ isScopedEnumerationType(rex->getType())) {
+ return InvalidOperands(Loc, lex, rex);
+ }
+
// Vector shifts promote their scalar inputs to vector type.
if (lex->getType()->isVectorType() || rex->getType()->isVectorType())
return CheckVectorOperands(Loc, lex, rex);
// Shifts don't perform usual arithmetic conversions, they just do integer
// promotions on each operand. C99 6.5.7p3
- QualType LHSTy = Context.isPromotableBitField(lex);
- if (LHSTy.isNull()) {
- LHSTy = lex->getType();
- if (LHSTy->isPromotableIntegerType())
- LHSTy = Context.getPromotedIntegerType(LHSTy);
- }
- if (!isCompAssign)
- ImpCastExprToType(lex, LHSTy, CK_IntegralCast);
+ // For the LHS, do usual unary conversions, but then reset them away
+ // if this is a compound assignment.
+ Expr *old_lex = lex;
+ UsualUnaryConversions(lex);
+ QualType LHSTy = lex->getType();
+ if (isCompAssign) lex = old_lex;
+
+ // The RHS is simpler.
UsualUnaryConversions(rex);
// Sanity-check shift operands
@@ -5425,8 +6702,28 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
QualType lType = lex->getType();
QualType rType = rex->getType();
+ Expr *LHSStripped = lex->IgnoreParenImpCasts();
+ Expr *RHSStripped = rex->IgnoreParenImpCasts();
+ QualType LHSStrippedType = LHSStripped->getType();
+ QualType RHSStrippedType = RHSStripped->getType();
+
+ // Two different enums will raise a warning when compared.
+ if (const EnumType *LHSEnumType = LHSStrippedType->getAs<EnumType>()) {
+ if (const EnumType *RHSEnumType = RHSStrippedType->getAs<EnumType>()) {
+ if (LHSEnumType->getDecl()->getIdentifier() &&
+ RHSEnumType->getDecl()->getIdentifier() &&
+ !Context.hasSameUnqualifiedType(LHSStrippedType, RHSStrippedType)) {
+ Diag(Loc, diag::warn_comparison_of_mixed_enum_types)
+ << LHSStrippedType << RHSStrippedType
+ << lex->getSourceRange() << rex->getSourceRange();
+ }
+ }
+ }
+
if (!lType->hasFloatingRepresentation() &&
- !(lType->isBlockPointerType() && isRelational)) {
+ !(lType->isBlockPointerType() && isRelational) &&
+ !lex->getLocStart().isMacroID() &&
+ !rex->getLocStart().isMacroID()) {
// For non-floating point types, check for self-comparisons of the form
// x == x, x != x, x < x, etc. These always evaluate to a constant, and
// often indicate logic errors in the program.
@@ -5437,11 +6734,9 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
// obvious cases in the definition of the template anyways. The idea is to
// warn when the typed comparison operator will always evaluate to the same
// result.
- Expr *LHSStripped = lex->IgnoreParens();
- Expr *RHSStripped = rex->IgnoreParens();
if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LHSStripped)) {
if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RHSStripped)) {
- if (DRL->getDecl() == DRR->getDecl() && !Loc.isMacroID() &&
+ if (DRL->getDecl() == DRR->getDecl() &&
!IsWithinTemplateSpecialization(DRL->getDecl())) {
DiagRuntimeBehavior(Loc, PDiag(diag::warn_comparison_always)
<< 0 // self-
@@ -5525,7 +6820,7 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
rType = rex->getType();
// The result of comparisons is 'bool' in C++, 'int' in C.
- QualType ResultTy = getLangOptions().CPlusPlus ? Context.BoolTy:Context.IntTy;
+ QualType ResultTy = Context.getLogicalOperationType();
if (isRelational) {
if (lType->isRealType() && rType->isRealType())
@@ -5576,6 +6871,7 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
return ResultTy;
}
}
+
// C++ [expr.rel]p2:
// [...] Pointer conversions (4.10) and qualification
// conversions (4.4) are performed on pointer operands (or on
@@ -5629,24 +6925,28 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
}
if (getLangOptions().CPlusPlus) {
+ // Comparison of nullptr_t with itself.
+ if (lType->isNullPtrType() && rType->isNullPtrType())
+ return ResultTy;
+
// Comparison of pointers with null pointer constants and equality
// comparisons of member pointers to null pointer constants.
if (RHSIsNull &&
- (lType->isPointerType() ||
+ ((lType->isPointerType() || lType->isNullPtrType()) ||
(!isRelational && lType->isMemberPointerType()))) {
ImpCastExprToType(rex, lType,
lType->isMemberPointerType()
? CK_NullToMemberPointer
- : CK_IntegralToPointer);
+ : CK_NullToPointer);
return ResultTy;
}
if (LHSIsNull &&
- (rType->isPointerType() ||
+ ((rType->isPointerType() || rType->isNullPtrType()) ||
(!isRelational && rType->isMemberPointerType()))) {
ImpCastExprToType(lex, rType,
rType->isMemberPointerType()
? CK_NullToMemberPointer
- : CK_IntegralToPointer);
+ : CK_NullToPointer);
return ResultTy;
}
@@ -5681,10 +6981,6 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
ImpCastExprToType(rex, T, CK_BitCast);
return ResultTy;
}
-
- // Comparison of nullptr_t with itself.
- if (lType->isNullPtrType() && rType->isNullPtrType())
- return ResultTy;
}
// Handle block pointer types.
@@ -5765,21 +7061,23 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
}
if (lType->isIntegerType())
- ImpCastExprToType(lex, rType, CK_IntegralToPointer);
+ ImpCastExprToType(lex, rType,
+ LHSIsNull ? CK_NullToPointer : CK_IntegralToPointer);
else
- ImpCastExprToType(rex, lType, CK_IntegralToPointer);
+ ImpCastExprToType(rex, lType,
+ RHSIsNull ? CK_NullToPointer : CK_IntegralToPointer);
return ResultTy;
}
// Handle block pointers.
if (!isRelational && RHSIsNull
&& lType->isBlockPointerType() && rType->isIntegerType()) {
- ImpCastExprToType(rex, lType, CK_IntegralToPointer);
+ ImpCastExprToType(rex, lType, CK_NullToPointer);
return ResultTy;
}
if (!isRelational && LHSIsNull
&& lType->isIntegerType() && rType->isBlockPointerType()) {
- ImpCastExprToType(lex, rType, CK_IntegralToPointer);
+ ImpCastExprToType(lex, rType, CK_NullToPointer);
return ResultTy;
}
return InvalidOperands(Loc, lex, rex);
@@ -5798,6 +7096,11 @@ QualType Sema::CheckVectorCompareOperands(Expr *&lex, Expr *&rex,
if (vType.isNull())
return vType;
+ // If AltiVec, the comparison results in a numeric type, i.e.
+ // bool for C++, int for C
+ if (getLangOptions().AltiVec)
+ return Context.getLogicalOperationType();
+
QualType lType = lex->getType();
QualType rType = rex->getType();
@@ -5851,7 +7154,8 @@ inline QualType Sema::CheckBitwiseOperands(
QualType compType = UsualArithmeticConversions(lex, rex, isCompAssign);
- if (lex->getType()->isIntegerType() && rex->getType()->isIntegerType())
+ if (lex->getType()->isIntegralOrUnscopedEnumerationType() &&
+ rex->getType()->isIntegralOrUnscopedEnumerationType())
return compType;
return InvalidOperands(Loc, lex, rex);
}
@@ -5912,14 +7216,18 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
static bool IsReadonlyProperty(Expr *E, Sema &S) {
if (E->getStmtClass() == Expr::ObjCPropertyRefExprClass) {
const ObjCPropertyRefExpr* PropExpr = cast<ObjCPropertyRefExpr>(E);
- if (ObjCPropertyDecl *PDecl = PropExpr->getProperty()) {
- QualType BaseType = PropExpr->getBase()->getType();
- if (const ObjCObjectPointerType *OPT =
- BaseType->getAsObjCInterfacePointerType())
- if (ObjCInterfaceDecl *IFace = OPT->getInterfaceDecl())
- if (S.isPropertyReadonly(PDecl, IFace))
- return true;
- }
+ if (PropExpr->isImplicitProperty()) return false;
+
+ ObjCPropertyDecl *PDecl = PropExpr->getExplicitProperty();
+ QualType BaseType = PropExpr->isSuperReceiver() ?
+ PropExpr->getSuperReceiverType() :
+ PropExpr->getBase()->getType();
+
+ if (const ObjCObjectPointerType *OPT =
+ BaseType->getAsObjCInterfacePointerType())
+ if (ObjCInterfaceDecl *IFace = OPT->getInterfaceDecl())
+ if (S.isPropertyReadonly(PDecl, IFace))
+ return true;
}
return false;
}
@@ -6005,17 +7313,8 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS,
if (CompoundType.isNull()) {
QualType LHSTy(LHSType);
// Simple assignment "x = y".
- if (const ObjCImplicitSetterGetterRefExpr *OISGE =
- dyn_cast<ObjCImplicitSetterGetterRefExpr>(LHS)) {
- // If using property-dot syntax notation for assignment, and there is a
- // setter, RHS expression is being passed to the setter argument. So,
- // type conversion (and comparison) is RHS to setter's argument type.
- if (const ObjCMethodDecl *SetterMD = OISGE->getSetterMethod()) {
- ObjCMethodDecl::param_iterator P = SetterMD->param_begin();
- LHSTy = (*P)->getType();
- }
- }
-
+ if (LHS->getObjectKind() == OK_ObjCProperty)
+ ConvertPropertyForLValue(LHS, RHS, LHSTy);
ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
// Special case of NSObject attributes on c-style pointer types.
if (ConvTy == IncompatiblePointer &&
@@ -6025,6 +7324,12 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS,
LHSType->isObjCObjectPointerType())))
ConvTy = Compatible;
+ if (ConvTy == Compatible &&
+ getLangOptions().ObjCNonFragileABI &&
+ LHSType->isObjCObjectType())
+ Diag(Loc, diag::err_assignment_requires_nonfragile_object)
+ << LHSType;
+
// If the RHS is a unary plus or minus, check to see if they = and + are
// right next to each other. If so, the user may have typo'd "x =+ 4"
// instead of "x += 4".
@@ -6048,7 +7353,7 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS,
}
} else {
// Compound assignment "x += y"
- ConvTy = CheckAssignmentConstraints(LHSType, RHSType);
+ ConvTy = CheckAssignmentConstraints(Loc, LHSType, RHSType);
}
if (DiagnoseAssignmentResult(ConvTy, Loc, LHSType, RHSType,
@@ -6072,6 +7377,11 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS,
Diag(UO->getOperatorLoc(), diag::note_indirection_through_null);
}
+ // Check for trivial buffer overflows.
+ if (const ArraySubscriptExpr *ae
+ = dyn_cast<ArraySubscriptExpr>(LHS->IgnoreParenCasts()))
+ CheckArrayAccess(ae);
+
// C99 6.5.16p3: The type of an assignment expression is the type of the
// left operand unless the left operand has qualified type, in which case
// it is the unqualified version of the type of the left operand.
@@ -6079,42 +7389,61 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS,
// is converted to the type of the assignment expression (above).
// C++ 5.17p1: the type of the assignment expression is that of its left
// operand.
- return LHSType.getUnqualifiedType();
+ return (getLangOptions().CPlusPlus
+ ? LHSType : LHSType.getUnqualifiedType());
}
// C99 6.5.17
-QualType Sema::CheckCommaOperands(Expr *LHS, Expr *&RHS, SourceLocation Loc) {
- DiagnoseUnusedExprResult(LHS);
+static QualType CheckCommaOperands(Sema &S, Expr *&LHS, Expr *&RHS,
+ SourceLocation Loc) {
+ S.DiagnoseUnusedExprResult(LHS);
- // Comma performs lvalue conversion (C99 6.3.2.1), but not unary conversions.
- // C++ does not perform this conversion (C++ [expr.comma]p1).
- if (!getLangOptions().CPlusPlus)
- DefaultFunctionArrayLvalueConversion(RHS);
+ ExprResult LHSResult = S.CheckPlaceholderExpr(LHS, Loc);
+ if (LHSResult.isInvalid())
+ return QualType();
+
+ ExprResult RHSResult = S.CheckPlaceholderExpr(RHS, Loc);
+ if (RHSResult.isInvalid())
+ return QualType();
+ RHS = RHSResult.take();
- // FIXME: Check that RHS type is complete in C mode (it's legal for it to be
- // incomplete in C++).
+ // C's comma performs lvalue conversion (C99 6.3.2.1) on both its
+ // operands, but not unary promotions.
+ // C++'s comma does not do any conversions at all (C++ [expr.comma]p1).
+
+ // So we treat the LHS as a ignored value, and in C++ we allow the
+ // containing site to determine what should be done with the RHS.
+ S.IgnoredValueConversions(LHS);
+
+ if (!S.getLangOptions().CPlusPlus) {
+ S.DefaultFunctionArrayLvalueConversion(RHS);
+ if (!RHS->getType()->isVoidType())
+ S.RequireCompleteType(Loc, RHS->getType(), diag::err_incomplete_type);
+ }
return RHS->getType();
}
/// CheckIncrementDecrementOperand - unlike most "Check" methods, this routine
/// doesn't need to call UsualUnaryConversions or UsualArithmeticConversions.
-QualType Sema::CheckIncrementDecrementOperand(Expr *Op, SourceLocation OpLoc,
- bool isInc, bool isPrefix) {
+static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
+ ExprValueKind &VK,
+ SourceLocation OpLoc,
+ bool isInc, bool isPrefix) {
if (Op->isTypeDependent())
- return Context.DependentTy;
+ return S.Context.DependentTy;
QualType ResType = Op->getType();
assert(!ResType.isNull() && "no type for increment/decrement expression");
- if (getLangOptions().CPlusPlus && ResType->isBooleanType()) {
+ if (S.getLangOptions().CPlusPlus && ResType->isBooleanType()) {
// Decrement of bool is not allowed.
if (!isInc) {
- Diag(OpLoc, diag::err_decrement_bool) << Op->getSourceRange();
+ S.Diag(OpLoc, diag::err_decrement_bool) << Op->getSourceRange();
return QualType();
}
// Increment of bool sets it to true, but is deprecated.
- Diag(OpLoc, diag::warn_increment_bool) << Op->getSourceRange();
+ S.Diag(OpLoc, diag::warn_increment_bool) << Op->getSourceRange();
} else if (ResType->isRealType()) {
// OK!
} else if (ResType->isAnyPointerType()) {
@@ -6122,53 +7451,128 @@ QualType Sema::CheckIncrementDecrementOperand(Expr *Op, SourceLocation OpLoc,
// C99 6.5.2.4p2, 6.5.6p2
if (PointeeTy->isVoidType()) {
- if (getLangOptions().CPlusPlus) {
- Diag(OpLoc, diag::err_typecheck_pointer_arith_void_type)
+ if (S.getLangOptions().CPlusPlus) {
+ S.Diag(OpLoc, diag::err_typecheck_pointer_arith_void_type)
<< Op->getSourceRange();
return QualType();
}
// Pointer to void is a GNU extension in C.
- Diag(OpLoc, diag::ext_gnu_void_ptr) << Op->getSourceRange();
+ S.Diag(OpLoc, diag::ext_gnu_void_ptr) << Op->getSourceRange();
} else if (PointeeTy->isFunctionType()) {
- if (getLangOptions().CPlusPlus) {
- Diag(OpLoc, diag::err_typecheck_pointer_arith_function_type)
+ if (S.getLangOptions().CPlusPlus) {
+ S.Diag(OpLoc, diag::err_typecheck_pointer_arith_function_type)
<< Op->getType() << Op->getSourceRange();
return QualType();
}
- Diag(OpLoc, diag::ext_gnu_ptr_func_arith)
+ S.Diag(OpLoc, diag::ext_gnu_ptr_func_arith)
<< ResType << Op->getSourceRange();
- } else if (RequireCompleteType(OpLoc, PointeeTy,
- PDiag(diag::err_typecheck_arithmetic_incomplete_type)
+ } else if (S.RequireCompleteType(OpLoc, PointeeTy,
+ S.PDiag(diag::err_typecheck_arithmetic_incomplete_type)
<< Op->getSourceRange()
<< ResType))
return QualType();
// Diagnose bad cases where we step over interface counts.
- else if (PointeeTy->isObjCObjectType() && LangOpts.ObjCNonFragileABI) {
- Diag(OpLoc, diag::err_arithmetic_nonfragile_interface)
+ else if (PointeeTy->isObjCObjectType() && S.LangOpts.ObjCNonFragileABI) {
+ S.Diag(OpLoc, diag::err_arithmetic_nonfragile_interface)
<< PointeeTy << Op->getSourceRange();
return QualType();
}
} else if (ResType->isAnyComplexType()) {
// C99 does not support ++/-- on complex types, we allow as an extension.
- Diag(OpLoc, diag::ext_integer_increment_complex)
+ S.Diag(OpLoc, diag::ext_integer_increment_complex)
<< ResType << Op->getSourceRange();
+ } else if (ResType->isPlaceholderType()) {
+ ExprResult PR = S.CheckPlaceholderExpr(Op, OpLoc);
+ if (PR.isInvalid()) return QualType();
+ return CheckIncrementDecrementOperand(S, PR.take(), VK, OpLoc,
+ isInc, isPrefix);
+ } else if (S.getLangOptions().AltiVec && ResType->isVectorType()) {
+ // OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 )
} else {
- Diag(OpLoc, diag::err_typecheck_illegal_increment_decrement)
+ S.Diag(OpLoc, diag::err_typecheck_illegal_increment_decrement)
<< ResType << int(isInc) << Op->getSourceRange();
return QualType();
}
// At this point, we know we have a real, complex or pointer type.
// Now make sure the operand is a modifiable lvalue.
- if (CheckForModifiableLvalue(Op, OpLoc, *this))
+ if (CheckForModifiableLvalue(Op, OpLoc, S))
return QualType();
// In C++, a prefix increment is the same type as the operand. Otherwise
// (in C or with postfix), the increment is the unqualified type of the
// operand.
- return isPrefix && getLangOptions().CPlusPlus
- ? ResType : ResType.getUnqualifiedType();
+ if (isPrefix && S.getLangOptions().CPlusPlus) {
+ VK = VK_LValue;
+ return ResType;
+ } else {
+ VK = VK_RValue;
+ return ResType.getUnqualifiedType();
+ }
+}
+
+void Sema::ConvertPropertyForRValue(Expr *&E) {
+ assert(E->getValueKind() == VK_LValue &&
+ E->getObjectKind() == OK_ObjCProperty);
+ const ObjCPropertyRefExpr *PRE = E->getObjCProperty();
+
+ ExprValueKind VK = VK_RValue;
+ if (PRE->isImplicitProperty()) {
+ if (const ObjCMethodDecl *GetterMethod =
+ PRE->getImplicitPropertyGetter()) {
+ QualType Result = GetterMethod->getResultType();
+ VK = Expr::getValueKindForType(Result);
+ }
+ else {
+ Diag(PRE->getLocation(), diag::err_getter_not_found)
+ << PRE->getBase()->getType();
+ }
+ }
+
+ E = ImplicitCastExpr::Create(Context, E->getType(), CK_GetObjCProperty,
+ E, 0, VK);
+
+ ExprResult Result = MaybeBindToTemporary(E);
+ if (!Result.isInvalid())
+ E = Result.take();
+}
+
+void Sema::ConvertPropertyForLValue(Expr *&LHS, Expr *&RHS, QualType &LHSTy) {
+ assert(LHS->getValueKind() == VK_LValue &&
+ LHS->getObjectKind() == OK_ObjCProperty);
+ const ObjCPropertyRefExpr *PRE = LHS->getObjCProperty();
+
+ if (PRE->isImplicitProperty()) {
+ // If using property-dot syntax notation for assignment, and there is a
+ // setter, RHS expression is being passed to the setter argument. So,
+ // type conversion (and comparison) is RHS to setter's argument type.
+ if (const ObjCMethodDecl *SetterMD = PRE->getImplicitPropertySetter()) {
+ ObjCMethodDecl::param_iterator P = SetterMD->param_begin();
+ LHSTy = (*P)->getType();
+
+ // Otherwise, if the getter returns an l-value, just call that.
+ } else {
+ QualType Result = PRE->getImplicitPropertyGetter()->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(Result);
+ if (VK == VK_LValue) {
+ LHS = ImplicitCastExpr::Create(Context, LHS->getType(),
+ CK_GetObjCProperty, LHS, 0, VK);
+ return;
+ }
+ }
+ }
+
+ if (getLangOptions().CPlusPlus && LHSTy->isRecordType()) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(Context, LHSTy);
+ Expr *Arg = RHS;
+ ExprResult ArgE = PerformCopyInitialization(Entity, SourceLocation(),
+ Owned(Arg));
+ if (!ArgE.isInvalid())
+ RHS = ArgE.takeAs<Expr>();
+ }
}
+
/// getPrimaryDecl - Helper function for CheckAddressOfOperand().
/// This routine allows us to typecheck complex/recursive expressions
@@ -6182,7 +7586,7 @@ QualType Sema::CheckIncrementDecrementOperand(Expr *Op, SourceLocation OpLoc,
/// - *(x + 1) -> x, if x is an array
/// - &"123"[2] -> 0
/// - & __real__ x -> x
-static NamedDecl *getPrimaryDecl(Expr *E) {
+static ValueDecl *getPrimaryDecl(Expr *E) {
switch (E->getStmtClass()) {
case Stmt::DeclRefExprClass:
return cast<DeclRefExpr>(E)->getDecl();
@@ -6234,16 +7638,21 @@ static NamedDecl *getPrimaryDecl(Expr *E) {
/// operator (C99 6.3.2.1p[2-4]), and its result is never an lvalue.
/// In C++, the operand might be an overloaded function name, in which case
/// we allow the '&' but retain the overloaded-function type.
-QualType Sema::CheckAddressOfOperand(Expr *OrigOp, SourceLocation OpLoc) {
+static QualType CheckAddressOfOperand(Sema &S, Expr *OrigOp,
+ SourceLocation OpLoc) {
if (OrigOp->isTypeDependent())
- return Context.DependentTy;
- if (OrigOp->getType() == Context.OverloadTy)
- return Context.OverloadTy;
+ return S.Context.DependentTy;
+ if (OrigOp->getType() == S.Context.OverloadTy)
+ return S.Context.OverloadTy;
+
+ ExprResult PR = S.CheckPlaceholderExpr(OrigOp, OpLoc);
+ if (PR.isInvalid()) return QualType();
+ OrigOp = PR.take();
// Make sure to ignore parentheses in subsequent checks
Expr *op = OrigOp->IgnoreParens();
- if (getLangOptions().C99) {
+ if (S.getLangOptions().C99) {
// Implement C99-only parts of addressof rules.
if (UnaryOperator* uOp = dyn_cast<UnaryOperator>(op)) {
if (uOp->getOpcode() == UO_Deref)
@@ -6254,24 +7663,25 @@ QualType Sema::CheckAddressOfOperand(Expr *OrigOp, SourceLocation OpLoc) {
// Technically, there should be a check for array subscript
// expressions here, but the result of one is always an lvalue anyway.
}
- NamedDecl *dcl = getPrimaryDecl(op);
- Expr::isLvalueResult lval = op->isLvalue(Context);
+ ValueDecl *dcl = getPrimaryDecl(op);
+ Expr::LValueClassification lval = op->ClassifyLValue(S.Context);
if (lval == Expr::LV_ClassTemporary) {
- Diag(OpLoc, isSFINAEContext()? diag::err_typecheck_addrof_class_temporary
- : diag::ext_typecheck_addrof_class_temporary)
+ bool sfinae = S.isSFINAEContext();
+ S.Diag(OpLoc, sfinae ? diag::err_typecheck_addrof_class_temporary
+ : diag::ext_typecheck_addrof_class_temporary)
<< op->getType() << op->getSourceRange();
- if (isSFINAEContext())
+ if (sfinae)
return QualType();
} else if (isa<ObjCSelectorExpr>(op)) {
- return Context.getPointerType(op->getType());
+ return S.Context.getPointerType(op->getType());
} else if (lval == Expr::LV_MemberFunction) {
// If it's an instance method, make a member pointer.
// The expression must have exactly the form &A::foo.
// If the underlying expression isn't a decl ref, give up.
if (!isa<DeclRefExpr>(op)) {
- Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
+ S.Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
<< OrigOp->getSourceRange();
return QualType();
}
@@ -6280,45 +7690,41 @@ QualType Sema::CheckAddressOfOperand(Expr *OrigOp, SourceLocation OpLoc) {
// The id-expression was parenthesized.
if (OrigOp != DRE) {
- Diag(OpLoc, diag::err_parens_pointer_member_function)
+ S.Diag(OpLoc, diag::err_parens_pointer_member_function)
<< OrigOp->getSourceRange();
// The method was named without a qualifier.
} else if (!DRE->getQualifier()) {
- Diag(OpLoc, diag::err_unqualified_pointer_member_function)
+ S.Diag(OpLoc, diag::err_unqualified_pointer_member_function)
<< op->getSourceRange();
}
- return Context.getMemberPointerType(op->getType(),
- Context.getTypeDeclType(MD->getParent()).getTypePtr());
+ return S.Context.getMemberPointerType(op->getType(),
+ S.Context.getTypeDeclType(MD->getParent()).getTypePtr());
} else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) {
// C99 6.5.3.2p1
// The operand must be either an l-value or a function designator
if (!op->getType()->isFunctionType()) {
// FIXME: emit more specific diag...
- Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
+ S.Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
<< op->getSourceRange();
return QualType();
}
- } else if (op->getBitField()) { // C99 6.5.3.2p1
+ } else if (op->getObjectKind() == OK_BitField) { // C99 6.5.3.2p1
// The operand cannot be a bit-field
- Diag(OpLoc, diag::err_typecheck_address_of)
+ S.Diag(OpLoc, diag::err_typecheck_address_of)
<< "bit-field" << op->getSourceRange();
return QualType();
- } else if (op->refersToVectorElement()) {
+ } else if (op->getObjectKind() == OK_VectorComponent) {
// The operand cannot be an element of a vector
- Diag(OpLoc, diag::err_typecheck_address_of)
+ S.Diag(OpLoc, diag::err_typecheck_address_of)
<< "vector element" << op->getSourceRange();
return QualType();
- } else if (isa<ObjCPropertyRefExpr>(op)) {
+ } else if (op->getObjectKind() == OK_ObjCProperty) {
// cannot take address of a property expression.
- Diag(OpLoc, diag::err_typecheck_address_of)
+ S.Diag(OpLoc, diag::err_typecheck_address_of)
<< "property expression" << op->getSourceRange();
return QualType();
- } else if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(op)) {
- // FIXME: Can LHS ever be null here?
- if (!CheckAddressOfOperand(CO->getTrueExpr(), OpLoc).isNull())
- return CheckAddressOfOperand(CO->getFalseExpr(), OpLoc);
} else if (dcl) { // C99 6.5.3.2p1
// We have an lvalue with a decl. Make sure the decl is not declared
// with the register storage-class specifier.
@@ -6326,29 +7732,31 @@ QualType Sema::CheckAddressOfOperand(Expr *OrigOp, SourceLocation OpLoc) {
// in C++ it is not error to take address of a register
// variable (c++03 7.1.1P3)
if (vd->getStorageClass() == SC_Register &&
- !getLangOptions().CPlusPlus) {
- Diag(OpLoc, diag::err_typecheck_address_of)
+ !S.getLangOptions().CPlusPlus) {
+ S.Diag(OpLoc, diag::err_typecheck_address_of)
<< "register variable" << op->getSourceRange();
return QualType();
}
} else if (isa<FunctionTemplateDecl>(dcl)) {
- return Context.OverloadTy;
- } else if (FieldDecl *FD = dyn_cast<FieldDecl>(dcl)) {
+ return S.Context.OverloadTy;
+ } else if (isa<FieldDecl>(dcl) || isa<IndirectFieldDecl>(dcl)) {
// Okay: we can take the address of a field.
// Could be a pointer to member, though, if there is an explicit
// scope qualifier for the class.
if (isa<DeclRefExpr>(op) && cast<DeclRefExpr>(op)->getQualifier()) {
DeclContext *Ctx = dcl->getDeclContext();
if (Ctx && Ctx->isRecord()) {
- if (FD->getType()->isReferenceType()) {
- Diag(OpLoc,
- diag::err_cannot_form_pointer_to_member_of_reference_type)
- << FD->getDeclName() << FD->getType();
+ if (dcl->getType()->isReferenceType()) {
+ S.Diag(OpLoc,
+ diag::err_cannot_form_pointer_to_member_of_reference_type)
+ << dcl->getDeclName() << dcl->getType();
return QualType();
}
- return Context.getMemberPointerType(op->getType(),
- Context.getTypeDeclType(cast<RecordDecl>(Ctx)).getTypePtr());
+ while (cast<RecordDecl>(Ctx)->isAnonymousStructOrUnion())
+ Ctx = Ctx->getParent();
+ return S.Context.getMemberPointerType(op->getType(),
+ S.Context.getTypeDeclType(cast<RecordDecl>(Ctx)).getTypePtr());
}
}
} else if (!isa<FunctionDecl>(dcl))
@@ -6359,21 +7767,22 @@ QualType Sema::CheckAddressOfOperand(Expr *OrigOp, SourceLocation OpLoc) {
// Taking the address of a void variable is technically illegal, but we
// allow it in cases which are otherwise valid.
// Example: "extern void x; void* y = &x;".
- Diag(OpLoc, diag::ext_typecheck_addrof_void) << op->getSourceRange();
+ S.Diag(OpLoc, diag::ext_typecheck_addrof_void) << op->getSourceRange();
}
// If the operand has type "type", the result has type "pointer to type".
if (op->getType()->isObjCObjectType())
- return Context.getObjCObjectPointerType(op->getType());
- return Context.getPointerType(op->getType());
+ return S.Context.getObjCObjectPointerType(op->getType());
+ return S.Context.getPointerType(op->getType());
}
/// CheckIndirectionOperand - Type check unary indirection (prefix '*').
-QualType Sema::CheckIndirectionOperand(Expr *Op, SourceLocation OpLoc) {
+static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
+ SourceLocation OpLoc) {
if (Op->isTypeDependent())
- return Context.DependentTy;
+ return S.Context.DependentTy;
- UsualUnaryConversions(Op);
+ S.UsualUnaryConversions(Op);
QualType OpTy = Op->getType();
QualType Result;
@@ -6386,12 +7795,26 @@ QualType Sema::CheckIndirectionOperand(Expr *Op, SourceLocation OpLoc) {
else if (const ObjCObjectPointerType *OPT =
OpTy->getAs<ObjCObjectPointerType>())
Result = OPT->getPointeeType();
+ else {
+ ExprResult PR = S.CheckPlaceholderExpr(Op, OpLoc);
+ if (PR.isInvalid()) return QualType();
+ if (PR.take() != Op)
+ return CheckIndirectionOperand(S, PR.take(), VK, OpLoc);
+ }
if (Result.isNull()) {
- Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer)
+ S.Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer)
<< OpTy << Op->getSourceRange();
return QualType();
}
+
+ // Dereferences are usually l-values...
+ VK = VK_LValue;
+
+ // ...except that certain expressions are never l-values in C.
+ if (!S.getLangOptions().CPlusPlus &&
+ IsCForbiddenLValueType(S.Context, Result))
+ VK = VK_RValue;
return Result;
}
@@ -6457,25 +7880,67 @@ static inline UnaryOperatorKind ConvertTokenKindToUnaryOpcode(
return Opc;
}
+/// DiagnoseSelfAssignment - Emits a warning if a value is assigned to itself.
+/// This warning is only emitted for builtin assignment operations. It is also
+/// suppressed in the event of macro expansions.
+static void DiagnoseSelfAssignment(Sema &S, Expr *lhs, Expr *rhs,
+ SourceLocation OpLoc) {
+ if (!S.ActiveTemplateInstantiations.empty())
+ return;
+ if (OpLoc.isInvalid() || OpLoc.isMacroID())
+ return;
+ lhs = lhs->IgnoreParenImpCasts();
+ rhs = rhs->IgnoreParenImpCasts();
+ const DeclRefExpr *LeftDeclRef = dyn_cast<DeclRefExpr>(lhs);
+ const DeclRefExpr *RightDeclRef = dyn_cast<DeclRefExpr>(rhs);
+ if (!LeftDeclRef || !RightDeclRef ||
+ LeftDeclRef->getLocation().isMacroID() ||
+ RightDeclRef->getLocation().isMacroID())
+ return;
+ const ValueDecl *LeftDecl =
+ cast<ValueDecl>(LeftDeclRef->getDecl()->getCanonicalDecl());
+ const ValueDecl *RightDecl =
+ cast<ValueDecl>(RightDeclRef->getDecl()->getCanonicalDecl());
+ if (LeftDecl != RightDecl)
+ return;
+ if (LeftDecl->getType().isVolatileQualified())
+ return;
+ if (const ReferenceType *RefTy = LeftDecl->getType()->getAs<ReferenceType>())
+ if (RefTy->getPointeeType().isVolatileQualified())
+ return;
+
+ S.Diag(OpLoc, diag::warn_self_assignment)
+ << LeftDeclRef->getType()
+ << lhs->getSourceRange() << rhs->getSourceRange();
+}
+
/// CreateBuiltinBinOp - Creates a new built-in binary operation with
/// operator @p Opc at location @c TokLoc. This routine only supports
/// built-in operations; ActOnBinOp handles overloaded operators.
ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
- unsigned Op,
+ BinaryOperatorKind Opc,
Expr *lhs, Expr *rhs) {
QualType ResultTy; // Result type of the binary operator.
- BinaryOperatorKind Opc = (BinaryOperatorKind) Op;
// The following two variables are used for compound assignment operators
QualType CompLHSTy; // Type of LHS after promotions for computation
QualType CompResultTy; // Type of computation result
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
switch (Opc) {
case BO_Assign:
ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, QualType());
+ if (getLangOptions().CPlusPlus &&
+ lhs->getObjectKind() != OK_ObjCProperty) {
+ VK = lhs->getValueKind();
+ OK = lhs->getObjectKind();
+ }
+ if (!ResultTy.isNull())
+ DiagnoseSelfAssignment(*this, lhs, rhs, OpLoc);
break;
case BO_PtrMemD:
case BO_PtrMemI:
- ResultTy = CheckPointerToMemberOperands(lhs, rhs, OpLoc,
+ ResultTy = CheckPointerToMemberOperands(lhs, rhs, VK, OpLoc,
Opc == BO_PtrMemI);
break;
case BO_Mul:
@@ -6518,7 +7983,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
case BO_MulAssign:
case BO_DivAssign:
CompResultTy = CheckMultiplyDivideOperands(lhs, rhs, OpLoc, true,
- Opc == BO_DivAssign);
+ Opc == BO_DivAssign);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull())
ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, CompResultTy);
@@ -6555,22 +8020,26 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, CompResultTy);
break;
case BO_Comma:
- ResultTy = CheckCommaOperands(lhs, rhs, OpLoc);
+ ResultTy = CheckCommaOperands(*this, lhs, rhs, OpLoc);
+ if (getLangOptions().CPlusPlus) {
+ VK = rhs->getValueKind();
+ OK = rhs->getObjectKind();
+ }
break;
}
if (ResultTy.isNull())
return ExprError();
- if (ResultTy->isObjCObjectType() && LangOpts.ObjCNonFragileABI) {
- if (Opc >= BO_Assign && Opc <= BO_OrAssign)
- Diag(OpLoc, diag::err_assignment_requires_nonfragile_object)
- << ResultTy;
- }
if (CompResultTy.isNull())
- return Owned(new (Context) BinaryOperator(lhs, rhs, Opc, ResultTy, OpLoc));
- else
- return Owned(new (Context) CompoundAssignOperator(lhs, rhs, Opc, ResultTy,
- CompLHSTy, CompResultTy,
- OpLoc));
+ return Owned(new (Context) BinaryOperator(lhs, rhs, Opc, ResultTy,
+ VK, OK, OpLoc));
+
+ if (getLangOptions().CPlusPlus && lhs->getObjectKind() != OK_ObjCProperty) {
+ VK = VK_LValue;
+ OK = lhs->getObjectKind();
+ }
+ return Owned(new (Context) CompoundAssignOperator(lhs, rhs, Opc, ResultTy,
+ VK, OK, CompLHSTy,
+ CompResultTy, OpLoc));
}
/// SuggestParentheses - Emit a diagnostic together with a fixit hint that wraps
@@ -6660,13 +8129,87 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
rhs->getSourceRange());
}
+/// \brief It accepts a '&&' expr that is inside a '||' one.
+/// Emit a diagnostic together with a fixit hint that wraps the '&&' expression
+/// in parentheses.
+static void
+EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc,
+ Expr *E) {
+ assert(isa<BinaryOperator>(E) &&
+ cast<BinaryOperator>(E)->getOpcode() == BO_LAnd);
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::warn_logical_and_in_logical_or)
+ << E->getSourceRange(),
+ Self.PDiag(diag::note_logical_and_in_logical_or_silence),
+ E->getSourceRange(),
+ Self.PDiag(0), SourceRange());
+}
+
+/// \brief Returns true if the given expression can be evaluated as a constant
+/// 'true'.
+static bool EvaluatesAsTrue(Sema &S, Expr *E) {
+ bool Res;
+ return E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && Res;
+}
+
+/// \brief Returns true if the given expression can be evaluated as a constant
+/// 'false'.
+static bool EvaluatesAsFalse(Sema &S, Expr *E) {
+ bool Res;
+ return E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && !Res;
+}
+
+/// \brief Look for '&&' in the left hand of a '||' expr.
+static void DiagnoseLogicalAndInLogicalOrLHS(Sema &S, SourceLocation OpLoc,
+ Expr *OrLHS, Expr *OrRHS) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(OrLHS)) {
+ if (Bop->getOpcode() == BO_LAnd) {
+ // If it's "a && b || 0" don't warn since the precedence doesn't matter.
+ if (EvaluatesAsFalse(S, OrRHS))
+ return;
+ // If it's "1 && a || b" don't warn since the precedence doesn't matter.
+ if (!EvaluatesAsTrue(S, Bop->getLHS()))
+ return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop);
+ } else if (Bop->getOpcode() == BO_LOr) {
+ if (BinaryOperator *RBop = dyn_cast<BinaryOperator>(Bop->getRHS())) {
+ // If it's "a || b && 1 || c" we didn't warn earlier for
+ // "a || b && 1", but warn now.
+ if (RBop->getOpcode() == BO_LAnd && EvaluatesAsTrue(S, RBop->getRHS()))
+ return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, RBop);
+ }
+ }
+ }
+}
+
+/// \brief Look for '&&' in the right hand of a '||' expr.
+static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc,
+ Expr *OrLHS, Expr *OrRHS) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(OrRHS)) {
+ if (Bop->getOpcode() == BO_LAnd) {
+ // If it's "0 || a && b" don't warn since the precedence doesn't matter.
+ if (EvaluatesAsFalse(S, OrLHS))
+ return;
+ // If it's "a || b && 1" don't warn since the precedence doesn't matter.
+ if (!EvaluatesAsTrue(S, Bop->getRHS()))
+ return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop);
+ }
+ }
+}
+
/// DiagnoseBinOpPrecedence - Emit warnings for expressions with tricky
-/// precedence. This currently diagnoses only "arg1 'bitwise' arg2 'eq' arg3".
-/// But it could also warn about arg1 && arg2 || arg3, as GCC 4.3+ does.
+/// precedence.
static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc,
SourceLocation OpLoc, Expr *lhs, Expr *rhs){
+ // Diagnose "arg1 'bitwise' arg2 'eq' arg3".
if (BinaryOperator::isBitwiseOp(Opc))
- DiagnoseBitwisePrecedence(Self, Opc, OpLoc, lhs, rhs);
+ return DiagnoseBitwisePrecedence(Self, Opc, OpLoc, lhs, rhs);
+
+ // Warn about arg1 || arg2 && arg3, as GCC 4.3+ does.
+ // We don't warn for 'assert(a || b && "bad")' since this is safe.
+ if (Opc == BO_LOr && !OpLoc.isMacroID()/* Don't warn in macros. */) {
+ DiagnoseLogicalAndInLogicalOrLHS(Self, OpLoc, lhs, rhs);
+ DiagnoseLogicalAndInLogicalOrRHS(Self, OpLoc, lhs, rhs);
+ }
}
// Binary Operators. 'Tok' is the token for the operator.
@@ -6686,22 +8229,34 @@ ExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc,
ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc,
Expr *lhs, Expr *rhs) {
- if (getLangOptions().CPlusPlus &&
- (lhs->getType()->isOverloadableType() ||
- rhs->getType()->isOverloadableType())) {
- // Find all of the overloaded operators visible from this
- // point. We perform both an operator-name lookup from the local
- // scope and an argument-dependent lookup based on the types of
- // the arguments.
- UnresolvedSet<16> Functions;
- OverloadedOperatorKind OverOp = BinaryOperator::getOverloadedOperator(Opc);
- if (S && OverOp != OO_None)
- LookupOverloadedOperatorName(OverOp, S, lhs->getType(), rhs->getType(),
- Functions);
+ if (getLangOptions().CPlusPlus) {
+ bool UseBuiltinOperator;
- // Build the (potentially-overloaded, potentially-dependent)
- // binary operation.
- return CreateOverloadedBinOp(OpLoc, Opc, Functions, lhs, rhs);
+ if (lhs->isTypeDependent() || rhs->isTypeDependent()) {
+ UseBuiltinOperator = false;
+ } else if (Opc == BO_Assign && lhs->getObjectKind() == OK_ObjCProperty) {
+ UseBuiltinOperator = true;
+ } else {
+ UseBuiltinOperator = !lhs->getType()->isOverloadableType() &&
+ !rhs->getType()->isOverloadableType();
+ }
+
+ if (!UseBuiltinOperator) {
+ // Find all of the overloaded operators visible from this
+ // point. We perform both an operator-name lookup from the local
+ // scope and an argument-dependent lookup based on the types of
+ // the arguments.
+ UnresolvedSet<16> Functions;
+ OverloadedOperatorKind OverOp
+ = BinaryOperator::getOverloadedOperator(Opc);
+ if (S && OverOp != OO_None)
+ LookupOverloadedOperatorName(OverOp, S, lhs->getType(), rhs->getType(),
+ Functions);
+
+ // Build the (potentially-overloaded, potentially-dependent)
+ // binary operation.
+ return CreateOverloadedBinOp(OpLoc, Opc, Functions, lhs, rhs);
+ }
}
// Build a built-in binary operation.
@@ -6709,28 +8264,28 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
}
ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
- unsigned OpcIn,
- Expr *Input) {
- UnaryOperatorKind Opc = static_cast<UnaryOperatorKind>(OpcIn);
-
+ UnaryOperatorKind Opc,
+ Expr *Input) {
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
QualType resultType;
switch (Opc) {
case UO_PreInc:
case UO_PreDec:
case UO_PostInc:
case UO_PostDec:
- resultType = CheckIncrementDecrementOperand(Input, OpLoc,
+ resultType = CheckIncrementDecrementOperand(*this, Input, VK, OpLoc,
Opc == UO_PreInc ||
Opc == UO_PostInc,
Opc == UO_PreInc ||
Opc == UO_PreDec);
break;
case UO_AddrOf:
- resultType = CheckAddressOfOperand(Input, OpLoc);
+ resultType = CheckAddressOfOperand(*this, Input, OpLoc);
break;
case UO_Deref:
DefaultFunctionArrayLvalueConversion(Input);
- resultType = CheckIndirectionOperand(Input, OpLoc);
+ resultType = CheckIndirectionOperand(*this, Input, VK, OpLoc);
break;
case UO_Plus:
case UO_Minus:
@@ -6748,6 +8303,11 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
Opc == UO_Plus &&
resultType->isPointerType())
break;
+ else if (resultType->isPlaceholderType()) {
+ ExprResult PR = CheckPlaceholderExpr(Input, OpLoc);
+ if (PR.isInvalid()) return ExprError();
+ return CreateBuiltinUnaryOp(OpLoc, Opc, PR.take());
+ }
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input->getSourceRange());
@@ -6761,9 +8321,16 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
// C99 does not support '~' for complex conjugation.
Diag(OpLoc, diag::ext_integer_complement_complex)
<< resultType << Input->getSourceRange();
- else if (!resultType->hasIntegerRepresentation())
+ else if (resultType->hasIntegerRepresentation())
+ break;
+ else if (resultType->isPlaceholderType()) {
+ ExprResult PR = CheckPlaceholderExpr(Input, OpLoc);
+ if (PR.isInvalid()) return ExprError();
+ return CreateBuiltinUnaryOp(OpLoc, Opc, PR.take());
+ } else {
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input->getSourceRange());
+ }
break;
case UO_LNot: // logical negation
// Unlike +/-/~, integer promotions aren't done here (C99 6.5.3.3p5).
@@ -6771,25 +8338,40 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
resultType = Input->getType();
if (resultType->isDependentType())
break;
- if (!resultType->isScalarType()) // C99 6.5.3.3p1
+ if (resultType->isScalarType()) { // C99 6.5.3.3p1
+ // ok, fallthrough
+ } else if (resultType->isPlaceholderType()) {
+ ExprResult PR = CheckPlaceholderExpr(Input, OpLoc);
+ if (PR.isInvalid()) return ExprError();
+ return CreateBuiltinUnaryOp(OpLoc, Opc, PR.take());
+ } else {
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input->getSourceRange());
+ }
+
// LNot always has type int. C99 6.5.3.3p5.
// In C++, it's bool. C++ 5.3.1p8
- resultType = getLangOptions().CPlusPlus ? Context.BoolTy : Context.IntTy;
+ resultType = Context.getLogicalOperationType();
break;
case UO_Real:
case UO_Imag:
- resultType = CheckRealImagOperand(Input, OpLoc, Opc == UO_Real);
+ resultType = CheckRealImagOperand(*this, Input, OpLoc, Opc == UO_Real);
+ // _Real and _Imag map ordinary l-values into ordinary l-values.
+ if (Input->getValueKind() != VK_RValue &&
+ Input->getObjectKind() == OK_Ordinary)
+ VK = Input->getValueKind();
break;
case UO_Extension:
resultType = Input->getType();
+ VK = Input->getValueKind();
+ OK = Input->getObjectKind();
break;
}
if (resultType.isNull())
return ExprError();
- return Owned(new (Context) UnaryOperator(Input, Opc, resultType, OpLoc));
+ return Owned(new (Context) UnaryOperator(Input, Opc, resultType,
+ VK, OK, OpLoc));
}
ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
@@ -6815,24 +8397,16 @@ ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
// Unary Operators. 'Tok' is the token for the operator.
ExprResult Sema::ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
- tok::TokenKind Op, Expr *Input) {
+ tok::TokenKind Op, Expr *Input) {
return BuildUnaryOp(S, OpLoc, ConvertTokenKindToUnaryOpcode(Op), Input);
}
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
-ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc,
- SourceLocation LabLoc,
- IdentifierInfo *LabelII) {
- // Look up the record for this label identifier.
- LabelStmt *&LabelDecl = getCurFunction()->LabelMap[LabelII];
-
- // If we haven't seen this label yet, create a forward reference. It
- // will be validated and/or cleaned up in ActOnFinishFunctionBody.
- if (LabelDecl == 0)
- LabelDecl = new (Context) LabelStmt(LabLoc, LabelII, 0);
-
+ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
+ LabelDecl *TheDecl) {
+ TheDecl->setUsed();
// Create the AST node. The address of a label always has type 'void*'.
- return Owned(new (Context) AddrLabelExpr(OpLoc, LabLoc, LabelDecl,
+ return Owned(new (Context) AddrLabelExpr(OpLoc, LabLoc, TheDecl,
Context.getPointerType(Context.VoidTy)));
}
@@ -6854,28 +8428,54 @@ Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
// If there are sub stmts in the compound stmt, take the type of the last one
// as the type of the stmtexpr.
QualType Ty = Context.VoidTy;
-
+ bool StmtExprMayBindToTemp = false;
if (!Compound->body_empty()) {
Stmt *LastStmt = Compound->body_back();
+ LabelStmt *LastLabelStmt = 0;
// If LastStmt is a label, skip down through into the body.
- while (LabelStmt *Label = dyn_cast<LabelStmt>(LastStmt))
+ while (LabelStmt *Label = dyn_cast<LabelStmt>(LastStmt)) {
+ LastLabelStmt = Label;
LastStmt = Label->getSubStmt();
+ }
+ if (Expr *LastExpr = dyn_cast<Expr>(LastStmt)) {
+ // Do function/array conversion on the last expression, but not
+ // lvalue-to-rvalue. However, initialize an unqualified type.
+ DefaultFunctionArrayConversion(LastExpr);
+ Ty = LastExpr->getType().getUnqualifiedType();
- if (Expr *LastExpr = dyn_cast<Expr>(LastStmt))
- Ty = LastExpr->getType();
+ if (!Ty->isDependentType() && !LastExpr->isTypeDependent()) {
+ ExprResult Res = PerformCopyInitialization(
+ InitializedEntity::InitializeResult(LPLoc,
+ Ty,
+ false),
+ SourceLocation(),
+ Owned(LastExpr));
+ if (Res.isInvalid())
+ return ExprError();
+ if ((LastExpr = Res.takeAs<Expr>())) {
+ if (!LastLabelStmt)
+ Compound->setLastStmt(LastExpr);
+ else
+ LastLabelStmt->setSubStmt(LastExpr);
+ StmtExprMayBindToTemp = true;
+ }
+ }
+ }
}
// FIXME: Check that expression type is complete/non-abstract; statement
// expressions are not lvalues.
-
- return Owned(new (Context) StmtExpr(Compound, Ty, LPLoc, RPLoc));
+ Expr *ResStmtExpr = new (Context) StmtExpr(Compound, Ty, LPLoc, RPLoc);
+ if (StmtExprMayBindToTemp)
+ return MaybeBindToTemporary(ResStmtExpr);
+ return Owned(ResStmtExpr);
}
ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
- TypeSourceInfo *TInfo,
- OffsetOfComponent *CompPtr,
- unsigned NumComponents,
- SourceLocation RParenLoc) {
+ TypeSourceInfo *TInfo,
+ OffsetOfComponent *CompPtr,
+ unsigned NumComponents,
+ SourceLocation RParenLoc) {
QualType ArgTy = TInfo->getType();
bool Dependent = ArgTy->isDependentType();
SourceRange TypeRange = TInfo->getTypeLoc().getLocalSourceRange();
@@ -6974,6 +8574,12 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
LookupResult R(*this, OC.U.IdentInfo, OC.LocStart, LookupMemberName);
LookupQualifiedName(R, RD);
FieldDecl *MemberDecl = R.getAsSingle<FieldDecl>();
+ IndirectFieldDecl *IndirectMemberDecl = 0;
+ if (!MemberDecl) {
+ if ((IndirectMemberDecl = R.getAsSingle<IndirectFieldDecl>()))
+ MemberDecl = IndirectMemberDecl->getAnonField();
+ }
+
if (!MemberDecl)
return ExprError(Diag(BuiltinLoc, diag::err_no_member)
<< OC.U.IdentInfo << RD << SourceRange(OC.LocStart,
@@ -6992,12 +8598,8 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
}
RecordDecl *Parent = MemberDecl->getParent();
- bool AnonStructUnion = Parent->isAnonymousStructOrUnion();
- if (AnonStructUnion) {
- do {
- Parent = cast<RecordDecl>(Parent->getParent());
- } while (Parent->isAnonymousStructOrUnion());
- }
+ if (IndirectMemberDecl)
+ Parent = cast<RecordDecl>(IndirectMemberDecl->getDeclContext());
// If the member was found in a base class, introduce OffsetOfNodes for
// the base class indirections.
@@ -7010,15 +8612,17 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
Comps.push_back(OffsetOfNode(B->Base));
}
- if (AnonStructUnion) {
- llvm::SmallVector<FieldDecl*, 4> Path;
- BuildAnonymousStructUnionMemberPath(MemberDecl, Path);
- unsigned n = Path.size();
- for (int j = n - 1; j > -1; --j)
- Comps.push_back(OffsetOfNode(OC.LocStart, Path[j], OC.LocEnd));
- } else {
+ if (IndirectMemberDecl) {
+ for (IndirectFieldDecl::chain_iterator FI =
+ IndirectMemberDecl->chain_begin(),
+ FEnd = IndirectMemberDecl->chain_end(); FI != FEnd; FI++) {
+ assert(isa<FieldDecl>(*FI));
+ Comps.push_back(OffsetOfNode(OC.LocStart,
+ cast<FieldDecl>(*FI), OC.LocEnd));
+ }
+ } else
Comps.push_back(OffsetOfNode(OC.LocStart, MemberDecl, OC.LocEnd));
- }
+
CurrentType = MemberDecl->getType().getNonReferenceType();
}
@@ -7028,13 +8632,13 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
}
ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S,
- SourceLocation BuiltinLoc,
- SourceLocation TypeLoc,
- ParsedType argty,
- OffsetOfComponent *CompPtr,
- unsigned NumComponents,
- SourceLocation RPLoc) {
-
+ SourceLocation BuiltinLoc,
+ SourceLocation TypeLoc,
+ ParsedType argty,
+ OffsetOfComponent *CompPtr,
+ unsigned NumComponents,
+ SourceLocation RPLoc) {
+
TypeSourceInfo *ArgTInfo;
QualType ArgTy = GetTypeFromParser(argty, &ArgTInfo);
if (ArgTy.isNull())
@@ -7048,41 +8652,14 @@ ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S,
}
-ExprResult Sema::ActOnTypesCompatibleExpr(SourceLocation BuiltinLoc,
- ParsedType arg1,ParsedType arg2,
- SourceLocation RPLoc) {
- TypeSourceInfo *argTInfo1;
- QualType argT1 = GetTypeFromParser(arg1, &argTInfo1);
- TypeSourceInfo *argTInfo2;
- QualType argT2 = GetTypeFromParser(arg2, &argTInfo2);
-
- assert((!argT1.isNull() && !argT2.isNull()) && "Missing type argument(s)");
-
- return BuildTypesCompatibleExpr(BuiltinLoc, argTInfo1, argTInfo2, RPLoc);
-}
-
-ExprResult
-Sema::BuildTypesCompatibleExpr(SourceLocation BuiltinLoc,
- TypeSourceInfo *argTInfo1,
- TypeSourceInfo *argTInfo2,
- SourceLocation RPLoc) {
- if (getLangOptions().CPlusPlus) {
- Diag(BuiltinLoc, diag::err_types_compatible_p_in_cplusplus)
- << SourceRange(BuiltinLoc, RPLoc);
- return ExprError();
- }
-
- return Owned(new (Context) TypesCompatibleExpr(Context.IntTy, BuiltinLoc,
- argTInfo1, argTInfo2, RPLoc));
-}
-
-
ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
- Expr *CondExpr,
- Expr *LHSExpr, Expr *RHSExpr,
- SourceLocation RPLoc) {
+ Expr *CondExpr,
+ Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation RPLoc) {
assert((CondExpr && LHSExpr && RHSExpr) && "Missing type argument(s)");
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
QualType resType;
bool ValueDependent = false;
if (CondExpr->isTypeDependent() || CondExpr->isValueDependent()) {
@@ -7098,13 +8675,16 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
<< CondExpr->getSourceRange());
// If the condition is > zero, then the AST type is the same as the LSHExpr.
- resType = condEval.getZExtValue() ? LHSExpr->getType() : RHSExpr->getType();
- ValueDependent = condEval.getZExtValue() ? LHSExpr->isValueDependent()
- : RHSExpr->isValueDependent();
+ Expr *ActiveExpr = condEval.getZExtValue() ? LHSExpr : RHSExpr;
+
+ resType = ActiveExpr->getType();
+ ValueDependent = ActiveExpr->isValueDependent();
+ VK = ActiveExpr->getValueKind();
+ OK = ActiveExpr->getObjectKind();
}
return Owned(new (Context) ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr,
- resType, RPLoc,
+ resType, VK, OK, RPLoc,
resType->isDependentType(),
ValueDependent));
}
@@ -7126,32 +8706,51 @@ void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *BlockScope) {
void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
assert(ParamInfo.getIdentifier()==0 && "block-id should have no identifier!");
+ assert(ParamInfo.getContext() == Declarator::BlockLiteralContext);
BlockScopeInfo *CurBlock = getCurBlock();
TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope);
- CurBlock->TheDecl->setSignatureAsWritten(Sig);
QualType T = Sig->getType();
- bool isVariadic;
- QualType RetTy;
- if (const FunctionType *Fn = T->getAs<FunctionType>()) {
- CurBlock->FunctionType = T;
- RetTy = Fn->getResultType();
- isVariadic =
- !isa<FunctionProtoType>(Fn) || cast<FunctionProtoType>(Fn)->isVariadic();
- } else {
- RetTy = T;
- isVariadic = false;
- }
+ // GetTypeForDeclarator always produces a function type for a block
+ // literal signature. Furthermore, it is always a FunctionProtoType
+ // unless the function was written with a typedef.
+ assert(T->isFunctionType() &&
+ "GetTypeForDeclarator made a non-function block signature");
- CurBlock->TheDecl->setIsVariadic(isVariadic);
+ // Look for an explicit signature in that function type.
+ FunctionProtoTypeLoc ExplicitSignature;
- // Don't allow returning an array by value.
- if (RetTy->isArrayType()) {
- Diag(ParamInfo.getSourceRange().getBegin(), diag::err_block_returns_array);
- return;
+ TypeLoc tmp = Sig->getTypeLoc().IgnoreParens();
+ if (isa<FunctionProtoTypeLoc>(tmp)) {
+ ExplicitSignature = cast<FunctionProtoTypeLoc>(tmp);
+
+ // Check whether that explicit signature was synthesized by
+ // GetTypeForDeclarator. If so, don't save that as part of the
+ // written signature.
+ if (ExplicitSignature.getLParenLoc() ==
+ ExplicitSignature.getRParenLoc()) {
+ // This would be much cheaper if we stored TypeLocs instead of
+ // TypeSourceInfos.
+ TypeLoc Result = ExplicitSignature.getResultLoc();
+ unsigned Size = Result.getFullDataSize();
+ Sig = Context.CreateTypeSourceInfo(Result.getType(), Size);
+ Sig->getTypeLoc().initializeFullCopy(Result, Size);
+
+ ExplicitSignature = FunctionProtoTypeLoc();
+ }
}
+ CurBlock->TheDecl->setSignatureAsWritten(Sig);
+ CurBlock->FunctionType = T;
+
+ const FunctionType *Fn = T->getAs<FunctionType>();
+ QualType RetTy = Fn->getResultType();
+ bool isVariadic =
+ (isa<FunctionProtoType>(Fn) && cast<FunctionProtoType>(Fn)->isVariadic());
+
+ CurBlock->TheDecl->setIsVariadic(isVariadic);
+
// Don't allow returning a objc interface by value.
if (RetTy->isObjCObjectType()) {
Diag(ParamInfo.getSourceRange().getBegin(),
@@ -7168,10 +8767,9 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
// Push block parameters from the declarator if we had them.
llvm::SmallVector<ParmVarDecl*, 8> Params;
- if (isa<FunctionProtoType>(T)) {
- FunctionProtoTypeLoc TL = cast<FunctionProtoTypeLoc>(Sig->getTypeLoc());
- for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
- ParmVarDecl *Param = TL.getArg(I);
+ if (ExplicitSignature) {
+ for (unsigned I = 0, E = ExplicitSignature.getNumArgs(); I != E; ++I) {
+ ParmVarDecl *Param = ExplicitSignature.getArg(I);
if (Param->getIdentifier() == 0 &&
!Param->isImplicit() &&
!Param->isInvalidDecl() &&
@@ -7194,9 +8792,13 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
}
// Set the parameters on the block decl.
- if (!Params.empty())
+ if (!Params.empty()) {
CurBlock->TheDecl->setParams(Params.data(), Params.size());
-
+ CheckParmsForFunctionDef(CurBlock->TheDecl->param_begin(),
+ CurBlock->TheDecl->param_end(),
+ /*CheckParameterNames=*/false);
+ }
+
// Finally we can process decl attributes.
ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo);
@@ -7211,17 +8813,13 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
if (Params.empty())
return;
- bool ShouldCheckShadow =
- Diags.getDiagnosticLevel(diag::warn_decl_shadow) != Diagnostic::Ignored;
-
for (BlockDecl::param_iterator AI = CurBlock->TheDecl->param_begin(),
E = CurBlock->TheDecl->param_end(); AI != E; ++AI) {
(*AI)->setOwningFunction(CurBlock->TheDecl);
// If this has an identifier, add it to the scope stack.
if ((*AI)->getIdentifier()) {
- if (ShouldCheckShadow)
- CheckShadow(CurBlock->TheScope, *AI);
+ CheckShadow(CurBlock->TheScope, *AI);
PushOnScopeChains(*AI, CurBlock->TheScope);
}
@@ -7234,13 +8832,12 @@ void Sema::ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) {
// Pop off CurBlock, handle nested blocks.
PopDeclContext();
PopFunctionOrBlockScope();
- // FIXME: Delete the ParmVarDecl objects as well???
}
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
- Stmt *Body, Scope *CurScope) {
+ Stmt *Body, Scope *CurScope) {
// If blocks are disabled, emit an error.
if (!LangOpts.Blocks)
Diag(CaretLoc, diag::err_blocks_disable);
@@ -7256,6 +8853,10 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
bool NoReturn = BSI->TheDecl->getAttr<NoReturnAttr>();
QualType BlockTy;
+ // Set the captured variables on the block.
+ BSI->TheDecl->setCaptures(Context, BSI->Captures.begin(), BSI->Captures.end(),
+ BSI->CapturesCXXThis);
+
// If the user wrote a function type in some form, try to use that.
if (!BSI->FunctionType.isNull()) {
const FunctionType *FTy = BSI->FunctionType->getAs<FunctionType>();
@@ -7265,8 +8866,9 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
// Turn protoless block types into nullary block types.
if (isa<FunctionNoProtoType>(FTy)) {
- BlockTy = Context.getFunctionType(RetTy, 0, 0, false, 0,
- false, false, 0, 0, Ext);
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = Ext;
+ BlockTy = Context.getFunctionType(RetTy, 0, 0, EPI);
// Otherwise, if we don't need to change anything about the function type,
// preserve its sugar structure.
@@ -7277,26 +8879,22 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
// Otherwise, make the minimal modifications to the function type.
} else {
const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.TypeQuals = 0; // FIXME: silently?
+ EPI.ExtInfo = Ext;
BlockTy = Context.getFunctionType(RetTy,
FPT->arg_type_begin(),
FPT->getNumArgs(),
- FPT->isVariadic(),
- /*quals*/ 0,
- FPT->hasExceptionSpec(),
- FPT->hasAnyExceptionSpec(),
- FPT->getNumExceptions(),
- FPT->exception_begin(),
- Ext);
+ EPI);
}
// If we don't have a function type, just build one from nothing.
} else {
- BlockTy = Context.getFunctionType(RetTy, 0, 0, false, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo(NoReturn, 0, CC_Default));
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = FunctionType::ExtInfo(NoReturn, 0, CC_Default);
+ BlockTy = Context.getFunctionType(RetTy, 0, 0, EPI);
}
- // FIXME: Check that return/parameter types are complete/non-abstract
DiagnoseUnusedParameters(BSI->TheDecl->param_begin(),
BSI->TheDecl->param_end());
BlockTy = Context.getBlockPointerType(BlockTy);
@@ -7307,28 +8905,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
BSI->TheDecl->setBody(cast<CompoundStmt>(Body));
- bool Good = true;
- // Check goto/label use.
- for (llvm::DenseMap<IdentifierInfo*, LabelStmt*>::iterator
- I = BSI->LabelMap.begin(), E = BSI->LabelMap.end(); I != E; ++I) {
- LabelStmt *L = I->second;
-
- // Verify that we have no forward references left. If so, there was a goto
- // or address of a label taken, but no definition of it.
- if (L->getSubStmt() != 0)
- continue;
-
- // Emit error.
- Diag(L->getIdentLoc(), diag::err_undeclared_label_use) << L->getName();
- Good = false;
- }
- if (!Good) {
- PopFunctionOrBlockScope();
- return ExprError();
- }
-
- BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy,
- BSI->hasBlockDeclRefExprs);
+ BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy);
// Issue any analysis-based warnings.
const sema::AnalysisBasedWarnings::Policy &WP =
@@ -7343,17 +8920,15 @@ ExprResult Sema::ActOnVAArg(SourceLocation BuiltinLoc,
Expr *expr, ParsedType type,
SourceLocation RPLoc) {
TypeSourceInfo *TInfo;
- QualType T = GetTypeFromParser(type, &TInfo);
+ GetTypeFromParser(type, &TInfo);
return BuildVAArgExpr(BuiltinLoc, expr, TInfo, RPLoc);
}
ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
- Expr *E, TypeSourceInfo *TInfo,
- SourceLocation RPLoc) {
+ Expr *E, TypeSourceInfo *TInfo,
+ SourceLocation RPLoc) {
Expr *OrigExpr = E;
- InitBuiltinVaListType();
-
// Get the va_list type
QualType VaListType = Context.getBuiltinVaListType();
if (VaListType->isArrayType()) {
@@ -7389,10 +8964,17 @@ ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
// The type of __null will be int or long, depending on the size of
// pointers on the target.
QualType Ty;
- if (Context.Target.getPointerWidth(0) == Context.Target.getIntWidth())
+ unsigned pw = Context.Target.getPointerWidth(0);
+ if (pw == Context.Target.getIntWidth())
Ty = Context.IntTy;
- else
+ else if (pw == Context.Target.getLongWidth())
Ty = Context.LongTy;
+ else if (pw == Context.Target.getLongLongWidth())
+ Ty = Context.LongLongTy;
+ else {
+ assert(!"I don't know size of pointer!");
+ Ty = Context.IntTy;
+ }
return Owned(new (Context) GNUNullExpr(Ty, TokenLoc));
}
@@ -7454,15 +9036,29 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
case FunctionVoidPointer:
DiagKind = diag::ext_typecheck_convert_pointer_void_func;
break;
+ case IncompatiblePointerDiscardsQualifiers: {
+ // Perform array-to-pointer decay if necessary.
+ if (SrcType->isArrayType()) SrcType = Context.getArrayDecayedType(SrcType);
+
+ Qualifiers lhq = SrcType->getPointeeType().getQualifiers();
+ Qualifiers rhq = DstType->getPointeeType().getQualifiers();
+ if (lhq.getAddressSpace() != rhq.getAddressSpace()) {
+ DiagKind = diag::err_typecheck_incompatible_address_space;
+ break;
+ }
+
+ llvm_unreachable("unknown error case for discarding qualifiers!");
+ // fallthrough
+ }
case CompatiblePointerDiscardsQualifiers:
// If the qualifiers lost were because we were applying the
// (deprecated) C++ conversion from a string literal to a char*
// (or wchar_t*), then there was no error (C++ 4.2p2). FIXME:
// Ideally, this check would be performed in
- // CheckPointerTypesForAssignment. However, that would require a
+ // checkPointerTypesForAssignment. However, that would require a
// bit of refactoring (so that the second argument is an
// expression, rather than a type), which should be done as part
- // of a larger effort to fix CheckPointerTypesForAssignment for
+ // of a larger effort to fix checkPointerTypesForAssignment for
// C++ semantics.
if (getLangOptions().CPlusPlus &&
IsStringLiteralToNonConstPointerConversion(SrcExpr, DstType))
@@ -7548,7 +9144,8 @@ bool Sema::VerifyIntegerConstantExpression(const Expr *E, llvm::APSInt *Result){
E->getSourceRange();
if (EvalResult.Diag &&
- Diags.getDiagnosticLevel(diag::ext_expr_not_ice) != Diagnostic::Ignored)
+ Diags.getDiagnosticLevel(diag::ext_expr_not_ice, EvalResult.DiagLoc)
+ != Diagnostic::Ignored)
Diag(EvalResult.DiagLoc, EvalResult.Diag);
if (Result)
@@ -7625,7 +9222,7 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
// -Wunused-parameters)
if (isa<ParmVarDecl>(D) ||
(isa<VarDecl>(D) && D->getDeclContext()->isFunctionOrMethod())) {
- D->setUsed(true);
+ D->setUsed();
return;
}
@@ -7653,6 +9250,11 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
// potentially evaluated.
ExprEvalContexts.back().addReferencedDecl(Loc, D);
return;
+
+ case PotentiallyEvaluatedIfUsed:
+ // Referenced declarations will only be used if the construct in the
+ // containing expression is used.
+ return;
}
// Note that this declaration has been used.
@@ -7684,6 +9286,9 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
MarkVTableUsed(Loc, MethodDecl->getParent());
}
if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ // Recursive functions should be marked when used from another function.
+ if (CurContext == Function) return;
+
// Implicit instantiation of function templates and member functions of
// class templates.
if (Function->isImplicitlyInstantiable()) {
@@ -7712,19 +9317,23 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
else
PendingInstantiations.push_back(std::make_pair(Function, Loc));
}
- } else // Walk redefinitions, as some of them may be instantiable.
+ } else {
+ // Walk redefinitions, as some of them may be instantiable.
for (FunctionDecl::redecl_iterator i(Function->redecls_begin()),
e(Function->redecls_end()); i != e; ++i) {
if (!i->isUsed(false) && i->isImplicitlyInstantiable())
MarkDeclarationReferenced(Loc, *i);
}
+ }
- // FIXME: keep track of references to static functions
-
- // Recursive functions should be marked when used from another function.
- if (CurContext != Function)
- Function->setUsed(true);
+ // Keep track of used but undefined functions.
+ if (!Function->isPure() && !Function->hasBody() &&
+ Function->getLinkage() != ExternalLinkage) {
+ SourceLocation &old = UndefinedInternals[Function->getCanonicalDecl()];
+ if (old.isInvalid()) old = Loc;
+ }
+ Function->setUsed(true);
return;
}
@@ -7741,7 +9350,12 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
}
}
- // FIXME: keep track of references to static data?
+ // Keep track of used but undefined variables.
+ if (Var->hasDefinition() == VarDecl::DeclarationOnly
+ && Var->getLinkage() != ExternalLinkage) {
+ SourceLocation &old = UndefinedInternals[Var->getCanonicalDecl()];
+ if (old.isInvalid()) old = Loc;
+ }
D->setUsed(true);
return;
@@ -7779,8 +9393,7 @@ bool MarkReferencedDecls::TraverseRecordType(RecordType *T) {
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl())) {
const TemplateArgumentList &Args = Spec->getTemplateArgs();
- return TraverseTemplateArguments(Args.getFlatArgumentList(),
- Args.flat_size());
+ return TraverseTemplateArguments(Args.data(), Args.size());
}
return true;
@@ -7791,6 +9404,70 @@ void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) {
Marker.TraverseType(Context.getCanonicalType(T));
}
+namespace {
+ /// \brief Helper class that marks all of the declarations referenced by
+ /// potentially-evaluated subexpressions as "referenced".
+ class EvaluatedExprMarker : public EvaluatedExprVisitor<EvaluatedExprMarker> {
+ Sema &S;
+
+ public:
+ typedef EvaluatedExprVisitor<EvaluatedExprMarker> Inherited;
+
+ explicit EvaluatedExprMarker(Sema &S) : Inherited(S.Context), S(S) { }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ S.MarkDeclarationReferenced(E->getLocation(), E->getDecl());
+ }
+
+ void VisitMemberExpr(MemberExpr *E) {
+ S.MarkDeclarationReferenced(E->getMemberLoc(), E->getMemberDecl());
+ Inherited::VisitMemberExpr(E);
+ }
+
+ void VisitCXXNewExpr(CXXNewExpr *E) {
+ if (E->getConstructor())
+ S.MarkDeclarationReferenced(E->getLocStart(), E->getConstructor());
+ if (E->getOperatorNew())
+ S.MarkDeclarationReferenced(E->getLocStart(), E->getOperatorNew());
+ if (E->getOperatorDelete())
+ S.MarkDeclarationReferenced(E->getLocStart(), E->getOperatorDelete());
+ Inherited::VisitCXXNewExpr(E);
+ }
+
+ void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ if (E->getOperatorDelete())
+ S.MarkDeclarationReferenced(E->getLocStart(), E->getOperatorDelete());
+ QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ S.MarkDeclarationReferenced(E->getLocStart(),
+ S.LookupDestructor(Record));
+ }
+
+ Inherited::VisitCXXDeleteExpr(E);
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ S.MarkDeclarationReferenced(E->getLocStart(), E->getConstructor());
+ Inherited::VisitCXXConstructExpr(E);
+ }
+
+ void VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ S.MarkDeclarationReferenced(E->getLocation(), E->getDecl());
+ }
+
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ Visit(E->getExpr());
+ }
+ };
+}
+
+/// \brief Mark any declarations that appear within this expression or any
+/// potentially-evaluated subexpressions as "referenced".
+void Sema::MarkDeclarationsReferencedInExpr(Expr *E) {
+ EvaluatedExprMarker(*this).Visit(E);
+}
+
/// \brief Emit a diagnostic that describes an effect on the run-time behavior
/// of the program being compiled.
///
@@ -7815,6 +9492,7 @@ bool Sema::DiagRuntimeBehavior(SourceLocation Loc,
break;
case PotentiallyEvaluated:
+ case PotentiallyEvaluatedIfUsed:
Diag(Loc, PD);
return true;
@@ -7848,40 +9526,42 @@ bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
return false;
}
-// Diagnose the common s/=/==/ typo. Note that adding parentheses
+// Diagnose the s/=/==/ and s/\|=/!=/ typos. Note that adding parentheses
// will prevent this condition from triggering, which is what we want.
void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
SourceLocation Loc;
unsigned diagnostic = diag::warn_condition_is_assignment;
+ bool IsOrAssign = false;
if (isa<BinaryOperator>(E)) {
BinaryOperator *Op = cast<BinaryOperator>(E);
- if (Op->getOpcode() != BO_Assign)
+ if (Op->getOpcode() != BO_Assign && Op->getOpcode() != BO_OrAssign)
return;
+ IsOrAssign = Op->getOpcode() == BO_OrAssign;
+
// Greylist some idioms by putting them into a warning subcategory.
if (ObjCMessageExpr *ME
= dyn_cast<ObjCMessageExpr>(Op->getRHS()->IgnoreParenCasts())) {
Selector Sel = ME->getSelector();
// self = [<foo> init...]
- if (isSelfExpr(Op->getLHS())
- && Sel.getIdentifierInfoForSlot(0)->getName().startswith("init"))
+ if (isSelfExpr(Op->getLHS()) && Sel.getNameForSlot(0).startswith("init"))
diagnostic = diag::warn_condition_is_idiomatic_assignment;
// <foo> = [<bar> nextObject]
- else if (Sel.isUnarySelector() &&
- Sel.getIdentifierInfoForSlot(0)->getName() == "nextObject")
+ else if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "nextObject")
diagnostic = diag::warn_condition_is_idiomatic_assignment;
}
Loc = Op->getOperatorLoc();
} else if (isa<CXXOperatorCallExpr>(E)) {
CXXOperatorCallExpr *Op = cast<CXXOperatorCallExpr>(E);
- if (Op->getOperator() != OO_Equal)
+ if (Op->getOperator() != OO_Equal && Op->getOperator() != OO_PipeEqual)
return;
+ IsOrAssign = Op->getOperator() == OO_PipeEqual;
Loc = Op->getOperatorLoc();
} else {
// Not an assignment.
@@ -7892,29 +9572,63 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
SourceLocation Close = PP.getLocForEndOfToken(E->getSourceRange().getEnd());
Diag(Loc, diagnostic) << E->getSourceRange();
- Diag(Loc, diag::note_condition_assign_to_comparison)
- << FixItHint::CreateReplacement(Loc, "==");
+
+ if (IsOrAssign)
+ Diag(Loc, diag::note_condition_or_assign_to_comparison)
+ << FixItHint::CreateReplacement(Loc, "!=");
+ else
+ Diag(Loc, diag::note_condition_assign_to_comparison)
+ << FixItHint::CreateReplacement(Loc, "==");
+
Diag(Loc, diag::note_condition_assign_silence)
<< FixItHint::CreateInsertion(Open, "(")
<< FixItHint::CreateInsertion(Close, ")");
}
+/// \brief Redundant parentheses over an equality comparison can indicate
+/// that the user intended an assignment used as condition.
+void Sema::DiagnoseEqualityWithExtraParens(ParenExpr *parenE) {
+ // Don't warn if the parens came from a macro.
+ SourceLocation parenLoc = parenE->getLocStart();
+ if (parenLoc.isInvalid() || parenLoc.isMacroID())
+ return;
+
+ Expr *E = parenE->IgnoreParens();
+
+ if (BinaryOperator *opE = dyn_cast<BinaryOperator>(E))
+ if (opE->getOpcode() == BO_EQ &&
+ opE->getLHS()->IgnoreParenImpCasts()->isModifiableLvalue(Context)
+ == Expr::MLV_Valid) {
+ SourceLocation Loc = opE->getOperatorLoc();
+
+ Diag(Loc, diag::warn_equality_with_extra_parens) << E->getSourceRange();
+ Diag(Loc, diag::note_equality_comparison_to_assign)
+ << FixItHint::CreateReplacement(Loc, "=");
+ Diag(Loc, diag::note_equality_comparison_silence)
+ << FixItHint::CreateRemoval(parenE->getSourceRange().getBegin())
+ << FixItHint::CreateRemoval(parenE->getSourceRange().getEnd());
+ }
+}
+
bool Sema::CheckBooleanCondition(Expr *&E, SourceLocation Loc) {
DiagnoseAssignmentAsCondition(E);
+ if (ParenExpr *parenE = dyn_cast<ParenExpr>(E))
+ DiagnoseEqualityWithExtraParens(parenE);
if (!E->isTypeDependent()) {
+ if (E->isBoundMemberFunction(Context))
+ return Diag(E->getLocStart(), diag::err_invalid_use_of_bound_member_func)
+ << E->getSourceRange();
+
+ if (getLangOptions().CPlusPlus)
+ return CheckCXXBooleanCondition(E); // C++ 6.4p4
+
DefaultFunctionArrayLvalueConversion(E);
QualType T = E->getType();
-
- if (getLangOptions().CPlusPlus) {
- if (CheckCXXBooleanCondition(E)) // C++ 6.4p4
- return true;
- } else if (!T->isScalarType()) { // C99 6.8.4.1p1
- Diag(Loc, diag::err_typecheck_statement_requires_scalar)
- << T << E->getSourceRange();
- return true;
- }
+ if (!T->isScalarType()) // C99 6.8.4.1p1
+ return Diag(Loc, diag::err_typecheck_statement_requires_scalar)
+ << T << E->getSourceRange();
}
return false;
@@ -7930,3 +9644,26 @@ ExprResult Sema::ActOnBooleanCondition(Scope *S, SourceLocation Loc,
return Owned(Sub);
}
+
+/// Check for operands with placeholder types and complain if found.
+/// Returns true if there was an error and no recovery was possible.
+ExprResult Sema::CheckPlaceholderExpr(Expr *E, SourceLocation Loc) {
+ const BuiltinType *BT = E->getType()->getAs<BuiltinType>();
+ if (!BT || !BT->isPlaceholderType()) return Owned(E);
+
+ // If this is overload, check for a single overload.
+ assert(BT->getKind() == BuiltinType::Overload);
+
+ if (FunctionDecl *Specialization
+ = ResolveSingleFunctionTemplateSpecialization(E)) {
+ // The access doesn't really matter in this case.
+ DeclAccessPair Found = DeclAccessPair::make(Specialization,
+ Specialization->getAccess());
+ E = FixOverloadedFunctionReference(E, Found, Specialization);
+ if (!E) return ExprError();
+ return Owned(E);
+ }
+
+ Diag(Loc, diag::err_ovl_unresolvable) << E->getSourceRange();
+ return ExprError();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
index 5720d93..f9c2c9a 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
@@ -16,6 +16,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
@@ -31,7 +32,7 @@ using namespace clang;
using namespace sema;
ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
- IdentifierInfo &II,
+ IdentifierInfo &II,
SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectTypePtr,
@@ -71,26 +72,26 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
if (SS.isSet()) {
NestedNameSpecifier *NNS = (NestedNameSpecifier *)SS.getScopeRep();
-
+
bool AlreadySearched = false;
bool LookAtPrefix = true;
// C++ [basic.lookup.qual]p6:
- // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier,
+ // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier,
// the type-names are looked up as types in the scope designated by the
// nested-name-specifier. In a qualified-id of the form:
- //
- // ::[opt] nested-name-specifier ̃ class-name
+ //
+ // ::[opt] nested-name-specifier ~ class-name
//
// where the nested-name-specifier designates a namespace scope, and in
// a qualified-id of the form:
//
- // ::opt nested-name-specifier class-name :: ̃ class-name
+ // ::opt nested-name-specifier class-name :: ~ class-name
//
- // the class-names are looked up as types in the scope designated by
+ // the class-names are looked up as types in the scope designated by
// the nested-name-specifier.
//
// Here, we check the first case (completely) and determine whether the
- // code below is permitted to look at the prefix of the
+ // code below is permitted to look at the prefix of the
// nested-name-specifier.
DeclContext *DC = computeDeclContext(SS, EnteringContext);
if (DC && DC->isFileContext()) {
@@ -99,7 +100,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
isDependent = false;
} else if (DC && isa<CXXRecordDecl>(DC))
LookAtPrefix = false;
-
+
// The second case from the C++03 rules quoted further above.
NestedNameSpecifier *Prefix = 0;
if (AlreadySearched) {
@@ -116,7 +117,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
LookupCtx = computeDeclContext(SS, EnteringContext);
isDependent = LookupCtx && LookupCtx->isDependentContext();
}
-
+
LookInScope = false;
} else if (ObjectTypePtr) {
// C++ [basic.lookup.classref]p3:
@@ -128,7 +129,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
// cv-qualified) T.
LookupCtx = computeDeclContext(SearchType);
isDependent = SearchType->isDependentType();
- assert((isDependent || !SearchType->isIncompleteType()) &&
+ assert((isDependent || !SearchType->isIncompleteType()) &&
"Caller should have completed object type");
LookInScope = true;
@@ -170,7 +171,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
// nested-name-specifier (if present) or the object type, then
// this is the destructor for that class.
// FIXME: This is a workaround until we get real drafting for core
- // issue 399, for which there isn't even an obvious direction.
+ // issue 399, for which there isn't even an obvious direction.
if (ClassTemplateDecl *Template = Found.getAsSingle<ClassTemplateDecl>()) {
QualType MemberOfType;
if (SS.isSet()) {
@@ -182,7 +183,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
}
if (MemberOfType.isNull())
MemberOfType = SearchType;
-
+
if (MemberOfType.isNull())
continue;
@@ -199,7 +200,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
continue;
}
-
+
// We're referring to an unresolved class template
// specialization. Determine whether we class template we found
// is the same as the template being specialized or, if we don't
@@ -220,7 +221,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
// The class template we found has the same name as the
// (dependent) template name being specialized.
- if (DependentTemplateName *DepTemplate
+ if (DependentTemplateName *DepTemplate
= SpecName.getAsDependentTemplateName()) {
if (DepTemplate->isIdentifier() &&
DepTemplate->getIdentifier() == Template->getIdentifier())
@@ -253,7 +254,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
if (ObjectTypePtr)
Diag(NameLoc, diag::err_ident_in_pseudo_dtor_not_a_type)
- << &II;
+ << &II;
else
Diag(NameLoc, diag::err_destructor_class_name);
@@ -262,13 +263,13 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
/// \brief Build a C++ typeid expression with a type operand.
ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
- SourceLocation TypeidLoc,
- TypeSourceInfo *Operand,
- SourceLocation RParenLoc) {
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
// C++ [expr.typeid]p4:
- // The top-level cv-qualifiers of the lvalue expression or the type-id
+ // The top-level cv-qualifiers of the lvalue expression or the type-id
// that is the operand of typeid are always ignored.
- // If the type of the type-id is a class type or a reference to a class
+ // If the type of the type-id is a class type or a reference to a class
// type, the class shall be completely-defined.
Qualifiers Quals;
QualType T
@@ -277,7 +278,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
if (T->getAs<RecordType>() &&
RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
return ExprError();
-
+
return Owned(new (Context) CXXTypeidExpr(TypeInfoType.withConst(),
Operand,
SourceRange(TypeidLoc, RParenLoc)));
@@ -285,9 +286,9 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
/// \brief Build a C++ typeid expression with an expression operand.
ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
- SourceLocation TypeidLoc,
- Expr *E,
- SourceLocation RParenLoc) {
+ SourceLocation TypeidLoc,
+ Expr *E,
+ SourceLocation RParenLoc) {
bool isUnevaluatedOperand = true;
if (E && !E->isTypeDependent()) {
QualType T = E->getType();
@@ -298,7 +299,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
// shall be completely-defined.
if (RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
return ExprError();
-
+
// C++ [expr.typeid]p3:
// When typeid is applied to an expression other than an glvalue of a
// polymorphic class type [...] [the] expression is an unevaluated
@@ -310,11 +311,11 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
MarkVTableUsed(TypeidLoc, RecordD);
}
}
-
+
// C++ [expr.typeid]p4:
// [...] If the type of the type-id is a reference to a possibly
- // cv-qualified type, the result of the typeid expression refers to a
- // std::type_info object representing the cv-unqualified referenced
+ // cv-qualified type, the result of the typeid expression refers to a
+ // std::type_info object representing the cv-unqualified referenced
// type.
Qualifiers Quals;
QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
@@ -323,16 +324,16 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
ImpCastExprToType(E, UnqualT, CK_NoOp, CastCategory(E));
}
}
-
+
// If this is an unevaluated operand, clear out the set of
// declaration references we have been computing and eliminate any
// temporaries introduced in its computation.
if (isUnevaluatedOperand)
ExprEvalContexts.back().Context = Unevaluated;
-
+
return Owned(new (Context) CXXTypeidExpr(TypeInfoType.withConst(),
E,
- SourceRange(TypeidLoc, RParenLoc)));
+ SourceRange(TypeidLoc, RParenLoc)));
}
/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
@@ -343,15 +344,17 @@ Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
if (!StdNamespace)
return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
- IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info");
- LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
- LookupQualifiedName(R, getStdNamespace());
- RecordDecl *TypeInfoRecordDecl = R.getAsSingle<RecordDecl>();
- if (!TypeInfoRecordDecl)
- return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
-
- QualType TypeInfoType = Context.getTypeDeclType(TypeInfoRecordDecl);
-
+ if (!CXXTypeInfoDecl) {
+ IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info");
+ LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
+ LookupQualifiedName(R, getStdNamespace());
+ CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
+ if (!CXXTypeInfoDecl)
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
+ }
+
+ QualType TypeInfoType = Context.getTypeDeclType(CXXTypeInfoDecl);
+
if (isType) {
// The operand is a type; handle it as such.
TypeSourceInfo *TInfo = 0;
@@ -359,17 +362,102 @@ Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
&TInfo);
if (T.isNull())
return ExprError();
-
+
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
return BuildCXXTypeId(TypeInfoType, OpLoc, TInfo, RParenLoc);
}
- // The operand is an expression.
+ // The operand is an expression.
return BuildCXXTypeId(TypeInfoType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
}
+/// Retrieve the UuidAttr associated with QT.
+static UuidAttr *GetUuidAttrOfType(QualType QT) {
+ // Optionally remove one level of pointer, reference or array indirection.
+ const Type *Ty = QT.getTypePtr();;
+ if (QT->isPointerType() || QT->isReferenceType())
+ Ty = QT->getPointeeType().getTypePtr();
+ else if (QT->isArrayType())
+ Ty = cast<ArrayType>(QT)->getElementType().getTypePtr();
+
+ // Loop all class definition and declaration looking for an uuid attribute.
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ while (RD) {
+ if (UuidAttr *Uuid = RD->getAttr<UuidAttr>())
+ return Uuid;
+ RD = RD->getPreviousDeclaration();
+ }
+ return 0;
+}
+
+/// \brief Build a Microsoft __uuidof expression with a type operand.
+ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ if (!Operand->getType()->isDependentType()) {
+ if (!GetUuidAttrOfType(Operand->getType()))
+ return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
+ }
+
+ // FIXME: add __uuidof semantic analysis for type operand.
+ return Owned(new (Context) CXXUuidofExpr(TypeInfoType.withConst(),
+ Operand,
+ SourceRange(TypeidLoc, RParenLoc)));
+}
+
+/// \brief Build a Microsoft __uuidof expression with an expression operand.
+ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *E,
+ SourceLocation RParenLoc) {
+ if (!E->getType()->isDependentType()) {
+ if (!GetUuidAttrOfType(E->getType()) &&
+ !E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
+ }
+ // FIXME: add __uuidof semantic analysis for type operand.
+ return Owned(new (Context) CXXUuidofExpr(TypeInfoType.withConst(),
+ E,
+ SourceRange(TypeidLoc, RParenLoc)));
+}
+
+/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
+ExprResult
+Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
+ bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
+ // If MSVCGuidDecl has not been cached, do the lookup.
+ if (!MSVCGuidDecl) {
+ IdentifierInfo *GuidII = &PP.getIdentifierTable().get("_GUID");
+ LookupResult R(*this, GuidII, SourceLocation(), LookupTagName);
+ LookupQualifiedName(R, Context.getTranslationUnitDecl());
+ MSVCGuidDecl = R.getAsSingle<RecordDecl>();
+ if (!MSVCGuidDecl)
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_ms_uuidof));
+ }
+
+ QualType GuidType = Context.getTypeDeclType(MSVCGuidDecl);
+
+ if (isType) {
+ // The operand is a type; handle it as such.
+ TypeSourceInfo *TInfo = 0;
+ QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr),
+ &TInfo);
+ if (T.isNull())
+ return ExprError();
+
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
+
+ return BuildCXXUuidof(GuidType, OpLoc, TInfo, RParenLoc);
+ }
+
+ // The operand is an expression.
+ return BuildCXXUuidof(GuidType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
+}
+
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult
Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
@@ -388,6 +476,9 @@ Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
/// ActOnCXXThrow - Parse throw expressions.
ExprResult
Sema::ActOnCXXThrow(SourceLocation OpLoc, Expr *Ex) {
+ if (!getLangOptions().Exceptions)
+ Diag(OpLoc, diag::err_exceptions_disabled) << "throw";
+
if (Ex && !Ex->isTypeDependent() && CheckCXXThrowOperand(OpLoc, Ex))
return ExprError();
return Owned(new (Context) CXXThrowExpr(Ex, Context.VoidTy, OpLoc));
@@ -399,12 +490,12 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *&E) {
// A throw-expression initializes a temporary object, called the exception
// object, the type of which is determined by removing any top-level
// cv-qualifiers from the static type of the operand of throw and adjusting
- // the type from "array of T" or "function returning T" to "pointer to T"
+ // the type from "array of T" or "function returning T" to "pointer to T"
// or "pointer to function returning T", [...]
if (E->getType().hasQualifiers())
ImpCastExprToType(E, E->getType().getUnqualifiedType(), CK_NoOp,
CastCategory(E));
-
+
DefaultFunctionArrayConversion(E);
// If the type of the exception would be an incomplete type or a pointer
@@ -430,13 +521,14 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *&E) {
// Initialize the exception result. This implicitly weeds out
// abstract types or types with inaccessible copy constructors.
- // FIXME: Determine whether we can elide this copy per C++0x [class.copy]p34.
+ const VarDecl *NRVOVariable = getCopyElisionCandidate(QualType(), E, false);
+
+ // FIXME: Determine whether we can elide this copy per C++0x [class.copy]p32.
InitializedEntity Entity =
- InitializedEntity::InitializeException(ThrowLoc, E->getType(),
- /*NRVO=*/false);
- ExprResult Res = PerformCopyInitialization(Entity,
- SourceLocation(),
- Owned(E));
+ InitializedEntity::InitializeException(ThrowLoc, E->getType(),
+ /*NRVO=*/false);
+ ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOVariable,
+ QualType(), E);
if (Res.isInvalid())
return true;
E = Res.takeAs<Expr>();
@@ -451,11 +543,15 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *&E) {
// exception handling will make use of the vtable.
MarkVTableUsed(ThrowLoc, RD);
+ // If a pointer is thrown, the referenced object will not be destroyed.
+ if (isPointer)
+ return false;
+
// If the class has a non-trivial destructor, we must be able to call it.
if (RD->hasTrivialDestructor())
return false;
- CXXDestructorDecl *Destructor
+ CXXDestructorDecl *Destructor
= const_cast<CXXDestructorDecl*>(LookupDestructor(RD));
if (!Destructor)
return false;
@@ -466,30 +562,48 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *&E) {
return false;
}
-ExprResult Sema::ActOnCXXThis(SourceLocation ThisLoc) {
+CXXMethodDecl *Sema::tryCaptureCXXThis() {
+ // Ignore block scopes: we can capture through them.
+ // Ignore nested enum scopes: we'll diagnose non-constant expressions
+ // where they're invalid, and other uses are legitimate.
+ // Don't ignore nested class scopes: you can't use 'this' in a local class.
+ DeclContext *DC = CurContext;
+ while (true) {
+ if (isa<BlockDecl>(DC)) DC = cast<BlockDecl>(DC)->getDeclContext();
+ else if (isa<EnumDecl>(DC)) DC = cast<EnumDecl>(DC)->getDeclContext();
+ else break;
+ }
+
+ // If we're not in an instance method, error out.
+ CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC);
+ if (!method || !method->isInstance())
+ return 0;
+
+ // Mark that we're closing on 'this' in all the block scopes, if applicable.
+ for (unsigned idx = FunctionScopes.size() - 1;
+ isa<BlockScopeInfo>(FunctionScopes[idx]);
+ --idx)
+ cast<BlockScopeInfo>(FunctionScopes[idx])->CapturesCXXThis = true;
+
+ return method;
+}
+
+ExprResult Sema::ActOnCXXThis(SourceLocation loc) {
/// C++ 9.3.2: In the body of a non-static member function, the keyword this
/// is a non-lvalue expression whose value is the address of the object for
/// which the function is called.
- DeclContext *DC = getFunctionLevelDeclContext();
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC))
- if (MD->isInstance())
- return Owned(new (Context) CXXThisExpr(ThisLoc,
- MD->getThisType(Context),
- /*isImplicit=*/false));
+ CXXMethodDecl *method = tryCaptureCXXThis();
+ if (!method) return Diag(loc, diag::err_invalid_this_use);
- return ExprError(Diag(ThisLoc, diag::err_invalid_this_use));
+ return Owned(new (Context) CXXThisExpr(loc, method->getThisType(Context),
+ /*isImplicit=*/false));
}
-/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
-/// Can be interpreted either as function-style casting ("int(x)")
-/// or class type construction ("ClassType(x,y,z)")
-/// or creation of a value-initialized type ("int()").
ExprResult
-Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, ParsedType TypeRep,
+Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg exprs,
- SourceLocation *CommaLocs,
SourceLocation RParenLoc) {
if (!TypeRep)
return ExprError();
@@ -498,17 +612,30 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, ParsedType TypeRep,
QualType Ty = GetTypeFromParser(TypeRep, &TInfo);
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(Ty, SourceLocation());
+
+ return BuildCXXTypeConstructExpr(TInfo, LParenLoc, exprs, RParenLoc);
+}
+
+/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
+/// Can be interpreted either as function-style casting ("int(x)")
+/// or class type construction ("ClassType(x,y,z)")
+/// or creation of a value-initialized type ("int()").
+ExprResult
+Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
+ SourceLocation LParenLoc,
+ MultiExprArg exprs,
+ SourceLocation RParenLoc) {
+ QualType Ty = TInfo->getType();
unsigned NumExprs = exprs.size();
Expr **Exprs = (Expr**)exprs.get();
- SourceLocation TyBeginLoc = TypeRange.getBegin();
+ SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
SourceRange FullRange = SourceRange(TyBeginLoc, RParenLoc);
if (Ty->isDependentType() ||
CallExpr::hasAnyTypeDependentArguments(Exprs, NumExprs)) {
exprs.release();
- return Owned(CXXUnresolvedConstructExpr::Create(Context,
- TypeRange.getBegin(), Ty,
+ return Owned(CXXUnresolvedConstructExpr::Create(Context, TInfo,
LParenLoc,
Exprs, NumExprs,
RParenLoc));
@@ -522,7 +649,7 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, ParsedType TypeRep,
PDiag(diag::err_invalid_incomplete_type_use)
<< FullRange))
return ExprError();
-
+
if (RequireNonAbstractType(TyBeginLoc, Ty,
diag::err_allocation_of_abstract_type))
return ExprError();
@@ -534,55 +661,91 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, ParsedType TypeRep,
// corresponding cast expression.
//
if (NumExprs == 1) {
- CastKind Kind = CK_Unknown;
+ CastKind Kind = CK_Invalid;
+ ExprValueKind VK = VK_RValue;
CXXCastPath BasePath;
- if (CheckCastTypes(TypeRange, Ty, Exprs[0], Kind, BasePath,
+ if (CheckCastTypes(TInfo->getTypeLoc().getSourceRange(), Ty, Exprs[0],
+ Kind, VK, BasePath,
/*FunctionalStyle=*/true))
return ExprError();
exprs.release();
return Owned(CXXFunctionalCastExpr::Create(Context,
- Ty.getNonLValueExprType(Context),
- TInfo, TyBeginLoc, Kind,
+ Ty.getNonLValueExprType(Context),
+ VK, TInfo, TyBeginLoc, Kind,
Exprs[0], &BasePath,
RParenLoc));
}
- if (Ty->isRecordType()) {
- InitializedEntity Entity = InitializedEntity::InitializeTemporary(Ty);
- InitializationKind Kind
- = NumExprs ? InitializationKind::CreateDirect(TypeRange.getBegin(),
- LParenLoc, RParenLoc)
- : InitializationKind::CreateValue(TypeRange.getBegin(),
- LParenLoc, RParenLoc);
- InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs);
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- move(exprs));
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(TInfo);
+ InitializationKind Kind
+ = NumExprs ? InitializationKind::CreateDirect(TyBeginLoc,
+ LParenLoc, RParenLoc)
+ : InitializationKind::CreateValue(TyBeginLoc,
+ LParenLoc, RParenLoc);
+ InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, move(exprs));
+
+ // FIXME: Improve AST representation?
+ return move(Result);
+}
+
+/// doesUsualArrayDeleteWantSize - Answers whether the usual
+/// operator delete[] for the given type has a size_t parameter.
+static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
+ QualType allocType) {
+ const RecordType *record =
+ allocType->getBaseElementTypeUnsafe()->getAs<RecordType>();
+ if (!record) return false;
+
+ // Try to find an operator delete[] in class scope.
+
+ DeclarationName deleteName =
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
+ LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
+ S.LookupQualifiedName(ops, record->getDecl());
+
+ // We're just doing this for information.
+ ops.suppressDiagnostics();
+
+ // Very likely: there's no operator delete[].
+ if (ops.empty()) return false;
+
+ // If it's ambiguous, it should be illegal to call operator delete[]
+ // on this thing, so it doesn't matter if we allocate extra space or not.
+ if (ops.isAmbiguous()) return false;
+
+ LookupResult::Filter filter = ops.makeFilter();
+ while (filter.hasNext()) {
+ NamedDecl *del = filter.next()->getUnderlyingDecl();
- // FIXME: Improve AST representation?
- return move(Result);
+ // C++0x [basic.stc.dynamic.deallocation]p2:
+ // A template instance is never a usual deallocation function,
+ // regardless of its signature.
+ if (isa<FunctionTemplateDecl>(del)) {
+ filter.erase();
+ continue;
+ }
+
+ // C++0x [basic.stc.dynamic.deallocation]p2:
+ // If class T does not declare [an operator delete[] with one
+ // parameter] but does declare a member deallocation function
+ // named operator delete[] with exactly two parameters, the
+ // second of which has type std::size_t, then this function
+ // is a usual deallocation function.
+ if (!cast<CXXMethodDecl>(del)->isUsualDeallocationFunction()) {
+ filter.erase();
+ continue;
+ }
}
+ filter.done();
- // C++ [expr.type.conv]p1:
- // If the expression list specifies more than a single value, the type shall
- // be a class with a suitably declared constructor.
- //
- if (NumExprs > 1)
- return ExprError(Diag(CommaLocs[0],
- diag::err_builtin_func_cast_more_than_one_arg)
- << FullRange);
-
- assert(NumExprs == 0 && "Expected 0 expressions");
- // C++ [expr.type.conv]p2:
- // The expression T(), where T is a simple-type-specifier for a non-array
- // complete object type or the (possibly cv-qualified) void type, creates an
- // rvalue of the specified type, which is value-initialized.
- //
- exprs.release();
- return Owned(new (Context) CXXScalarValueInitExpr(Ty, TyBeginLoc, RParenLoc));
-}
+ if (!ops.isSingleResult()) return false;
+ const FunctionDecl *del = cast<FunctionDecl>(ops.getFoundDecl());
+ return (del->getNumParams() == 2);
+}
/// ActOnCXXNew - Parsed a C++ 'new' expression (C++ 5.3.4), as in e.g.:
/// @code new (memory) int[size][4] @endcode
@@ -592,15 +755,20 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, ParsedType TypeRep,
ExprResult
Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
- SourceLocation PlacementRParen, SourceRange TypeIdParens,
+ SourceLocation PlacementRParen, SourceRange TypeIdParens,
Declarator &D, SourceLocation ConstructorLParen,
MultiExprArg ConstructorArgs,
SourceLocation ConstructorRParen) {
+ bool TypeContainsAuto = D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto;
+
Expr *ArraySize = 0;
// If the specified type is an array, unwrap it and save the expression.
if (D.getNumTypeObjects() > 0 &&
D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
DeclaratorChunk &Chunk = D.getTypeObject(0);
+ if (TypeContainsAuto)
+ return ExprError(Diag(Chunk.Loc, diag::err_new_array_of_auto)
+ << D.getSourceRange());
if (Chunk.Arr.hasStatic)
return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new)
<< D.getSourceRange());
@@ -630,25 +798,24 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
}
- //FIXME: Store TypeSourceInfo in CXXNew expression.
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/0);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/0, /*OwnedDecl=*/0,
+ /*AllowAuto=*/true);
QualType AllocType = TInfo->getType();
if (D.isInvalidType())
return ExprError();
-
- SourceRange R = TInfo->getTypeLoc().getSourceRange();
+
return BuildCXXNew(StartLoc, UseGlobal,
PlacementLParen,
move(PlacementArgs),
PlacementRParen,
TypeIdParens,
AllocType,
- D.getSourceRange().getBegin(),
- R,
+ TInfo,
ArraySize,
ConstructorLParen,
move(ConstructorArgs),
- ConstructorRParen);
+ ConstructorRParen,
+ TypeContainsAuto);
}
ExprResult
@@ -658,15 +825,37 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
- SourceLocation TypeLoc,
- SourceRange TypeRange,
+ TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceLocation ConstructorLParen,
MultiExprArg ConstructorArgs,
- SourceLocation ConstructorRParen) {
- if (CheckAllocatedType(AllocType, TypeLoc, TypeRange))
- return ExprError();
+ SourceLocation ConstructorRParen,
+ bool TypeMayContainAuto) {
+ SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
+
+ // C++0x [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+ if (TypeMayContainAuto && AllocType->getContainedAutoType()) {
+ if (ConstructorArgs.size() == 0)
+ return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
+ << AllocType << TypeRange);
+ if (ConstructorArgs.size() != 1) {
+ Expr *FirstBad = ConstructorArgs.get()[1];
+ return ExprError(Diag(FirstBad->getSourceRange().getBegin(),
+ diag::err_auto_new_ctor_multiple_expressions)
+ << AllocType << TypeRange);
+ }
+ QualType DeducedType;
+ if (!DeduceAutoType(AllocType, ConstructorArgs.get()[0], DeducedType))
+ return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
+ << AllocType
+ << ConstructorArgs.get()[0]->getType()
+ << TypeRange
+ << ConstructorArgs.get()[0]->getSourceRange());
+ AllocType = DeducedType;
+ AllocTypeInfo = Context.getTrivialTypeSourceInfo(AllocType, StartLoc);
+ }
+
// Per C++0x [expr.new]p5, the type being constructed may be a
// typedef of an array type.
if (!ArraySize) {
@@ -679,14 +868,17 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
}
+ if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
+ return ExprError();
+
QualType ResultType = Context.getPointerType(AllocType);
// C++ 5.3.4p6: "The expression in a direct-new-declarator shall have integral
// or enumeration type with a non-negative value."
if (ArraySize && !ArraySize->isTypeDependent()) {
-
+
QualType SizeType = ArraySize->getType();
-
+
ExprResult ConvertedSize
= ConvertToIntegralOrEnumerationType(StartLoc, ArraySize,
PDiag(diag::err_array_size_not_integral),
@@ -696,16 +888,16 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
PDiag(diag::note_array_size_conversion),
PDiag(diag::err_array_size_ambiguous_conversion),
PDiag(diag::note_array_size_conversion),
- PDiag(getLangOptions().CPlusPlus0x? 0
+ PDiag(getLangOptions().CPlusPlus0x? 0
: diag::ext_array_size_conversion));
if (ConvertedSize.isInvalid())
return ExprError();
-
+
ArraySize = ConvertedSize.take();
SizeType = ArraySize->getType();
- if (!SizeType->isIntegralOrEnumerationType())
+ if (!SizeType->isIntegralOrUnscopedEnumerationType())
return ExprError();
-
+
// Let's see if this is a constant < 0. If so, we reject it out of hand.
// We don't care about special rules, so we tell the machinery it's not
// evaluated - it gives us a result in more cases.
@@ -713,17 +905,17 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
llvm::APSInt Value;
if (ArraySize->isIntegerConstantExpr(Value, Context, 0, false)) {
if (Value < llvm::APSInt(
- llvm::APInt::getNullValue(Value.getBitWidth()),
+ llvm::APInt::getNullValue(Value.getBitWidth()),
Value.isUnsigned()))
return ExprError(Diag(ArraySize->getSourceRange().getBegin(),
diag::err_typecheck_negative_array_size)
<< ArraySize->getSourceRange());
-
+
if (!AllocType->isDependentType()) {
unsigned ActiveSizeBits
= ConstantArrayType::getNumAddressingBits(Context, AllocType, Value);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
- Diag(ArraySize->getSourceRange().getBegin(),
+ Diag(ArraySize->getSourceRange().getBegin(),
diag::err_array_too_large)
<< Value.toString(10)
<< ArraySize->getSourceRange();
@@ -736,11 +928,11 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
<< ArraySize->getSourceRange()
<< FixItHint::CreateRemoval(TypeIdParens.getBegin())
<< FixItHint::CreateRemoval(TypeIdParens.getEnd());
-
+
TypeIdParens = SourceRange();
}
}
-
+
ImpCastExprToType(ArraySize, Context.getSizeType(),
CK_IntegralCast);
}
@@ -749,7 +941,7 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
FunctionDecl *OperatorDelete = 0;
Expr **PlaceArgs = (Expr**)PlacementArgs.get();
unsigned NumPlaceArgs = PlacementArgs.size();
-
+
if (!AllocType->isDependentType() &&
!Expr::hasAnyTypeDependentArguments(PlaceArgs, NumPlaceArgs) &&
FindAllocationFunctions(StartLoc,
@@ -757,24 +949,32 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
UseGlobal, AllocType, ArraySize, PlaceArgs,
NumPlaceArgs, OperatorNew, OperatorDelete))
return ExprError();
+
+ // If this is an array allocation, compute whether the usual array
+ // deallocation function for the type has a size_t parameter.
+ bool UsualArrayDeleteWantsSize = false;
+ if (ArraySize && !AllocType->isDependentType())
+ UsualArrayDeleteWantsSize
+ = doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType);
+
llvm::SmallVector<Expr *, 8> AllPlaceArgs;
if (OperatorNew) {
// Add default arguments, if any.
- const FunctionProtoType *Proto =
+ const FunctionProtoType *Proto =
OperatorNew->getType()->getAs<FunctionProtoType>();
- VariadicCallType CallType =
+ VariadicCallType CallType =
Proto->isVariadic() ? VariadicFunction : VariadicDoesNotApply;
-
+
if (GatherArgumentsForCall(PlacementLParen, OperatorNew,
- Proto, 1, PlaceArgs, NumPlaceArgs,
+ Proto, 1, PlaceArgs, NumPlaceArgs,
AllPlaceArgs, CallType))
return ExprError();
-
+
NumPlaceArgs = AllPlaceArgs.size();
if (NumPlaceArgs > 0)
PlaceArgs = &AllPlaceArgs[0];
}
-
+
bool Init = ConstructorLParen.isValid();
// --- Choosing a constructor ---
CXXConstructorDecl *Constructor = 0;
@@ -786,7 +986,7 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
if (NumConsArgs && (ResultType->isArrayType() || ArraySize)) {
SourceRange InitRange(ConsArgs[0]->getLocStart(),
ConsArgs[NumConsArgs - 1]->getLocEnd());
-
+
Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
return ExprError();
}
@@ -800,22 +1000,22 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
// - If the new-initializer is omitted, the object is default-
// initialized (8.5); if no initialization is performed,
// the object has indeterminate value
- = !Init? InitializationKind::CreateDefault(TypeLoc)
- // - Otherwise, the new-initializer is interpreted according to the
+ = !Init? InitializationKind::CreateDefault(TypeRange.getBegin())
+ // - Otherwise, the new-initializer is interpreted according to the
// initialization rules of 8.5 for direct-initialization.
- : InitializationKind::CreateDirect(TypeLoc,
- ConstructorLParen,
+ : InitializationKind::CreateDirect(TypeRange.getBegin(),
+ ConstructorLParen,
ConstructorRParen);
-
+
InitializedEntity Entity
= InitializedEntity::InitializeNew(StartLoc, AllocType);
InitializationSequence InitSeq(*this, Entity, Kind, ConsArgs, NumConsArgs);
- ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind,
+ ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind,
move(ConstructorArgs));
if (FullInit.isInvalid())
return ExprError();
-
- // FullInit is our initializer; walk through it to determine if it's a
+
+ // FullInit is our initializer; walk through it to determine if it's a
// constructor call, which CXXNewExpr handles directly.
if (Expr *FullInitExpr = (Expr *)FullInit.get()) {
if (CXXBindTemporaryExpr *Binder
@@ -827,7 +1027,7 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
for (CXXConstructExpr::arg_iterator A = Construct->arg_begin(),
AEnd = Construct->arg_end();
A != AEnd; ++A)
- ConvertedConstructorArgs.push_back(A->Retain());
+ ConvertedConstructorArgs.push_back(*A);
} else {
// Take the converted initializer.
ConvertedConstructorArgs.push_back(FullInit.release());
@@ -835,12 +1035,12 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
} else {
// No initialization required.
}
-
+
// Take the converted arguments and use them for the new expression.
NumConsArgs = ConvertedConstructorArgs.size();
ConsArgs = (Expr **)ConvertedConstructorArgs.take();
}
-
+
// Mark the new and delete operators as referenced.
if (OperatorNew)
MarkDeclarationReferenced(StartLoc, OperatorNew);
@@ -848,18 +1048,20 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
MarkDeclarationReferenced(StartLoc, OperatorDelete);
// FIXME: Also check that the destructor is accessible. (C++ 5.3.4p16)
-
+
PlacementArgs.release();
ConstructorArgs.release();
-
- // FIXME: The TypeSourceInfo should also be included in CXXNewExpr.
+
return Owned(new (Context) CXXNewExpr(Context, UseGlobal, OperatorNew,
PlaceArgs, NumPlaceArgs, TypeIdParens,
ArraySize, Constructor, Init,
ConsArgs, NumConsArgs, OperatorDelete,
- ResultType, StartLoc,
+ UsualArrayDeleteWantsSize,
+ ResultType, AllocTypeInfo,
+ StartLoc,
Init ? ConstructorRParen :
- TypeRange.getEnd()));
+ TypeRange.getEnd(),
+ ConstructorLParen, ConstructorRParen));
}
/// CheckAllocatedType - Checks that a type is suitable as the allocated type
@@ -883,6 +1085,9 @@ bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
else if (RequireNonAbstractType(Loc, AllocType,
diag::err_allocation_of_abstract_type))
return true;
+ else if (AllocType->isVariablyModifiedType())
+ return Diag(Loc, diag::err_variably_modified_new_type)
+ << AllocType;
return false;
}
@@ -931,10 +1136,10 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// C++ [expr.new]p8:
// If the allocated type is a non-array type, the allocation
- // function’s name is operator new and the deallocation function’s
+ // function's name is operator new and the deallocation function's
// name is operator delete. If the allocated type is an array
- // type, the allocation function’s name is operator new[] and the
- // deallocation function’s name is operator delete[].
+ // type, the allocation function's name is operator new[] and the
+ // deallocation function's name is operator delete[].
DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
IsArray ? OO_Array_New : OO_New);
DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
@@ -975,12 +1180,12 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// C++ [expr.new]p19:
//
// If the new-expression begins with a unary :: operator, the
- // deallocation function’s name is looked up in the global
+ // deallocation function's name is looked up in the global
// scope. Otherwise, if the allocated type is a class type T or an
- // array thereof, the deallocation function’s name is looked up in
+ // array thereof, the deallocation function's name is looked up in
// the scope of T. If this lookup fails to find the name, or if
// the allocated type is not a class type or array thereof, the
- // deallocation function’s name is looked up in the global scope.
+ // deallocation function's name is looked up in the global scope.
LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
if (AllocElemType->isRecordType() && !UseGlobal) {
CXXRecordDecl *RD
@@ -999,39 +1204,49 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
llvm::SmallVector<std::pair<DeclAccessPair,FunctionDecl*>, 2> Matches;
- if (NumPlaceArgs > 0) {
+ // Whether we're looking for a placement operator delete is dictated
+ // by whether we selected a placement operator new, not by whether
+ // we had explicit placement arguments. This matters for things like
+ // struct A { void *operator new(size_t, int = 0); ... };
+ // A *a = new A()
+ bool isPlacementNew = (NumPlaceArgs > 0 || OperatorNew->param_size() != 1);
+
+ if (isPlacementNew) {
// C++ [expr.new]p20:
// A declaration of a placement deallocation function matches the
// declaration of a placement allocation function if it has the
// same number of parameters and, after parameter transformations
// (8.3.5), all parameter types except the first are
// identical. [...]
- //
+ //
// To perform this comparison, we compute the function type that
// the deallocation function should have, and use that type both
// for template argument deduction and for comparison purposes.
+ //
+ // FIXME: this comparison should ignore CC and the like.
QualType ExpectedFunctionType;
{
const FunctionProtoType *Proto
= OperatorNew->getType()->getAs<FunctionProtoType>();
+
llvm::SmallVector<QualType, 4> ArgTypes;
- ArgTypes.push_back(Context.VoidPtrTy);
+ ArgTypes.push_back(Context.VoidPtrTy);
for (unsigned I = 1, N = Proto->getNumArgs(); I < N; ++I)
ArgTypes.push_back(Proto->getArgType(I));
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = Proto->isVariadic();
+
ExpectedFunctionType
= Context.getFunctionType(Context.VoidTy, ArgTypes.data(),
- ArgTypes.size(),
- Proto->isVariadic(),
- 0, false, false, 0, 0,
- FunctionType::ExtInfo());
+ ArgTypes.size(), EPI);
}
- for (LookupResult::iterator D = FoundDelete.begin(),
+ for (LookupResult::iterator D = FoundDelete.begin(),
DEnd = FoundDelete.end();
D != DEnd; ++D) {
FunctionDecl *Fn = 0;
- if (FunctionTemplateDecl *FnTmpl
+ if (FunctionTemplateDecl *FnTmpl
= dyn_cast<FunctionTemplateDecl>((*D)->getUnderlyingDecl())) {
// Perform template argument deduction to try to match the
// expected function type.
@@ -1048,7 +1263,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// C++ [expr.new]p20:
// [...] Any non-placement deallocation function matches a
// non-placement allocation function. [...]
- for (LookupResult::iterator D = FoundDelete.begin(),
+ for (LookupResult::iterator D = FoundDelete.begin(),
DEnd = FoundDelete.end();
D != DEnd; ++D) {
if (FunctionDecl *Fn = dyn_cast<FunctionDecl>((*D)->getUnderlyingDecl()))
@@ -1073,7 +1288,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
if (NumPlaceArgs && getLangOptions().CPlusPlus0x &&
isNonPlacementDeallocationFunction(OperatorDelete)) {
Diag(StartLoc, diag::err_placement_new_non_placement_delete)
- << SourceRange(PlaceArgs[0]->getLocStart(),
+ << SourceRange(PlaceArgs[0]->getLocStart(),
PlaceArgs[NumPlaceArgs - 1]->getLocEnd());
Diag(OperatorDelete->getLocation(), diag::note_previous_decl)
<< DeleteName;
@@ -1107,7 +1322,7 @@ bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
R.suppressDiagnostics();
OverloadCandidateSet Candidates(StartLoc);
- for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
+ for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
Alloc != AllocEnd; ++Alloc) {
// Even member operator new/delete are implicitly treated as
// static, so don't use AddMemberCandidate.
@@ -1140,12 +1355,13 @@ bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
for (unsigned i = 0; (i < NumArgs && i < NumArgsInFnDecl); ++i) {
ExprResult Result
= PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
FnDecl->getParamDecl(i)),
SourceLocation(),
- Owned(Args[i]->Retain()));
+ Owned(Args[i]));
if (Result.isInvalid())
return true;
-
+
Args[i] = Result.takeAs<Expr>();
}
Operator = FnDecl;
@@ -1190,18 +1406,18 @@ bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
void Sema::DeclareGlobalNewDelete() {
if (GlobalNewDeleteDeclared)
return;
-
+
// C++ [basic.std.dynamic]p2:
- // [...] The following allocation and deallocation functions (18.4) are
- // implicitly declared in global scope in each translation unit of a
+ // [...] The following allocation and deallocation functions (18.4) are
+ // implicitly declared in global scope in each translation unit of a
// program
- //
+ //
// void* operator new(std::size_t) throw(std::bad_alloc);
- // void* operator new[](std::size_t) throw(std::bad_alloc);
- // void operator delete(void*) throw();
+ // void* operator new[](std::size_t) throw(std::bad_alloc);
+ // void operator delete(void*) throw();
// void operator delete[](void*) throw();
//
- // These implicit declarations introduce only the function names operator
+ // These implicit declarations introduce only the function names operator
// new, operator new[], operator delete, operator delete[].
//
// Here, we need to refer to std::bad_alloc, so we will implicitly declare
@@ -1211,14 +1427,14 @@ void Sema::DeclareGlobalNewDelete() {
if (!StdBadAlloc) {
// The "std::bad_alloc" class has not yet been declared, so build it
// implicitly.
- StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class,
- getOrCreateStdNamespace(),
- SourceLocation(),
- &PP.getIdentifierTable().get("bad_alloc"),
+ StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class,
+ getOrCreateStdNamespace(),
+ SourceLocation(),
+ &PP.getIdentifierTable().get("bad_alloc"),
SourceLocation(), 0);
getStdBadAlloc()->setImplicit(true);
}
-
+
GlobalNewDeleteDeclared = true;
QualType VoidPtr = Context.getPointerType(Context.VoidTy);
@@ -1268,28 +1484,31 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
}
QualType BadAllocType;
- bool HasBadAllocExceptionSpec
+ bool HasBadAllocExceptionSpec
= (Name.getCXXOverloadedOperator() == OO_New ||
Name.getCXXOverloadedOperator() == OO_Array_New);
if (HasBadAllocExceptionSpec) {
assert(StdBadAlloc && "Must have std::bad_alloc declared");
BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
}
-
- QualType FnType = Context.getFunctionType(Return, &Argument, 1, false, 0,
- true, false,
- HasBadAllocExceptionSpec? 1 : 0,
- &BadAllocType,
- FunctionType::ExtInfo());
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasExceptionSpec = true;
+ if (HasBadAllocExceptionSpec) {
+ EPI.NumExceptions = 1;
+ EPI.Exceptions = &BadAllocType;
+ }
+
+ QualType FnType = Context.getFunctionType(Return, &Argument, 1, EPI);
FunctionDecl *Alloc =
FunctionDecl::Create(Context, GlobalCtx, SourceLocation(), Name,
FnType, /*TInfo=*/0, SC_None,
SC_None, false, true);
Alloc->setImplicit();
-
+
if (AddMallocAttr)
Alloc->addAttr(::new (Context) MallocAttr(SourceLocation(), Context));
-
+
ParmVarDecl *Param = ParmVarDecl::Create(Context, Alloc, SourceLocation(),
0, Argument, /*TInfo=*/0,
SC_None,
@@ -1308,7 +1527,7 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
// Try to find operator delete/operator delete[] in class scope.
LookupQualifiedName(Found, RD);
-
+
if (Found.isAmbiguous())
return true;
@@ -1352,7 +1571,7 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
if (!Found.empty()) {
Diag(StartLoc, diag::err_no_suitable_delete_member_function_found)
<< Name << RD;
-
+
for (LookupResult::iterator F = Found.begin(), FEnd = Found.end();
F != FEnd; ++F)
Diag((*F)->getUnderlyingDecl()->getLocation(),
@@ -1364,7 +1583,7 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
// Look for a global declaration.
DeclareGlobalNewDelete();
DeclContext *TUDecl = Context.getTranslationUnitDecl();
-
+
CXXNullPtrLiteralExpr Null(Context.VoidPtrTy, SourceLocation());
Expr* DeallocArgs[1];
DeallocArgs[0] = &Null;
@@ -1391,19 +1610,21 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// DR599 amends "pointer type" to "pointer to object type" in both cases.
FunctionDecl *OperatorDelete = 0;
+ bool ArrayFormAsWritten = ArrayForm;
+ bool UsualArrayDeleteWantsSize = false;
if (!Ex->isTypeDependent()) {
QualType Type = Ex->getType();
if (const RecordType *Record = Type->getAs<RecordType>()) {
- if (RequireCompleteType(StartLoc, Type,
+ if (RequireCompleteType(StartLoc, Type,
PDiag(diag::err_delete_incomplete_class_type)))
return ExprError();
-
+
llvm::SmallVector<CXXConversionDecl*, 4> ObjectPtrConversions;
CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
- const UnresolvedSetImpl *Conversions = RD->getVisibleConversionFunctions();
+ const UnresolvedSetImpl *Conversions = RD->getVisibleConversionFunctions();
for (UnresolvedSetImpl::iterator I = Conversions->begin(),
E = Conversions->end(); I != E; ++I) {
NamedDecl *D = I.getDecl();
@@ -1413,9 +1634,9 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// Skip over templated conversion functions; they aren't considered.
if (isa<FunctionTemplateDecl>(D))
continue;
-
+
CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
-
+
QualType ConvType = Conv->getConversionType().getNonReferenceType();
if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType())
@@ -1446,7 +1667,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
QualType Pointee = Type->getAs<PointerType>()->getPointeeType();
if (Pointee->isVoidType() && !isSFINAEContext()) {
- // The C++ standard bans deleting a pointer to a non-object type, which
+ // The C++ standard bans deleting a pointer to a non-object type, which
// effectively bans deletion of "void*". However, most compilers support
// this, so we treat it as a warning unless we're in a SFINAE context.
Diag(StartLoc, diag::ext_delete_void_ptr_operand)
@@ -1461,13 +1682,20 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
return ExprError();
// C++ [expr.delete]p2:
- // [Note: a pointer to a const type can be the operand of a
- // delete-expression; it is not necessary to cast away the constness
- // (5.2.11) of the pointer expression before it is used as the operand
+ // [Note: a pointer to a const type can be the operand of a
+ // delete-expression; it is not necessary to cast away the constness
+ // (5.2.11) of the pointer expression before it is used as the operand
// of the delete-expression. ]
- ImpCastExprToType(Ex, Context.getPointerType(Context.VoidTy),
+ ImpCastExprToType(Ex, Context.getPointerType(Context.VoidTy),
CK_NoOp);
-
+
+ if (Pointee->isArrayType() && !ArrayForm) {
+ Diag(StartLoc, diag::warn_delete_array_type)
+ << Type << Ex->getSourceRange()
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(StartLoc), "[]");
+ ArrayForm = true;
+ }
+
DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
ArrayForm ? OO_Array_Delete : OO_Delete);
@@ -1475,16 +1703,33 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
if (const RecordType *RT = PointeeElem->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (!UseGlobal &&
+ if (!UseGlobal &&
FindDeallocationFunction(StartLoc, RD, DeleteName, OperatorDelete))
return ExprError();
-
+
+ // If we're allocating an array of records, check whether the
+ // usual operator delete[] has a size_t parameter.
+ if (ArrayForm) {
+ // If the user specifically asked to use the global allocator,
+ // we'll need to do the lookup into the class.
+ if (UseGlobal)
+ UsualArrayDeleteWantsSize =
+ doesUsualArrayDeleteWantSize(*this, StartLoc, PointeeElem);
+
+ // Otherwise, the usual operator delete[] should be the
+ // function we just found.
+ else if (isa<CXXMethodDecl>(OperatorDelete))
+ UsualArrayDeleteWantsSize = (OperatorDelete->getNumParams() == 2);
+ }
+
if (!RD->hasTrivialDestructor())
- if (const CXXDestructorDecl *Dtor = LookupDestructor(RD))
+ if (CXXDestructorDecl *Dtor = LookupDestructor(RD)) {
MarkDeclarationReferenced(StartLoc,
const_cast<CXXDestructorDecl*>(Dtor));
+ DiagnoseUseOfDecl(Dtor, StartLoc);
+ }
}
-
+
if (!OperatorDelete) {
// Look for a global declaration.
DeclareGlobalNewDelete();
@@ -1496,38 +1741,49 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
}
MarkDeclarationReferenced(StartLoc, OperatorDelete);
+
+ // Check access and ambiguity of operator delete and destructor.
+ if (const RecordType *RT = PointeeElem->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (CXXDestructorDecl *Dtor = LookupDestructor(RD)) {
+ CheckDestructorAccess(Ex->getExprLoc(), Dtor,
+ PDiag(diag::err_access_dtor) << PointeeElem);
+ }
+ }
- // FIXME: Check access and ambiguity of operator delete and destructor.
}
return Owned(new (Context) CXXDeleteExpr(Context.VoidTy, UseGlobal, ArrayForm,
+ ArrayFormAsWritten,
+ UsualArrayDeleteWantsSize,
OperatorDelete, Ex, StartLoc));
}
/// \brief Check the use of the given variable as a C++ condition in an if,
/// while, do-while, or switch statement.
ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
- SourceLocation StmtLoc,
- bool ConvertToBoolean) {
+ SourceLocation StmtLoc,
+ bool ConvertToBoolean) {
QualType T = ConditionVar->getType();
-
+
// C++ [stmt.select]p2:
// The declarator shall not specify a function or an array.
if (T->isFunctionType())
- return ExprError(Diag(ConditionVar->getLocation(),
+ return ExprError(Diag(ConditionVar->getLocation(),
diag::err_invalid_use_of_function_type)
<< ConditionVar->getSourceRange());
else if (T->isArrayType())
- return ExprError(Diag(ConditionVar->getLocation(),
+ return ExprError(Diag(ConditionVar->getLocation(),
diag::err_invalid_use_of_array_type)
<< ConditionVar->getSourceRange());
Expr *Condition = DeclRefExpr::Create(Context, 0, SourceRange(), ConditionVar,
- ConditionVar->getLocation(),
- ConditionVar->getType().getNonReferenceType());
+ ConditionVar->getLocation(),
+ ConditionVar->getType().getNonReferenceType(),
+ VK_LValue);
if (ConvertToBoolean && CheckBooleanCondition(Condition, StmtLoc))
return ExprError();
-
+
return Owned(Condition);
}
@@ -1575,42 +1831,46 @@ Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
return false;
}
-static ExprResult BuildCXXCastArgument(Sema &S,
+static ExprResult BuildCXXCastArgument(Sema &S,
SourceLocation CastLoc,
QualType Ty,
CastKind Kind,
CXXMethodDecl *Method,
+ NamedDecl *FoundDecl,
Expr *From) {
switch (Kind) {
default: assert(0 && "Unhandled cast kind!");
case CK_ConstructorConversion: {
ASTOwningVector<Expr*> ConstructorArgs(S);
-
+
if (S.CompleteConstructorCall(cast<CXXConstructorDecl>(Method),
MultiExprArg(&From, 1),
CastLoc, ConstructorArgs))
return ExprError();
-
- ExprResult Result =
- S.BuildCXXConstructExpr(CastLoc, Ty, cast<CXXConstructorDecl>(Method),
+
+ ExprResult Result =
+ S.BuildCXXConstructExpr(CastLoc, Ty, cast<CXXConstructorDecl>(Method),
move_arg(ConstructorArgs),
- /*ZeroInit*/ false, CXXConstructExpr::CK_Complete);
+ /*ZeroInit*/ false, CXXConstructExpr::CK_Complete,
+ SourceRange());
if (Result.isInvalid())
return ExprError();
-
+
return S.MaybeBindToTemporary(Result.takeAs<Expr>());
}
-
+
case CK_UserDefinedConversion: {
assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
-
+
// Create an implicit call expr that calls it.
- // FIXME: pass the FoundDecl for the user-defined conversion here
- CXXMemberCallExpr *CE = S.BuildCXXMemberCallExpr(From, Method, Method);
- return S.MaybeBindToTemporary(CE);
+ ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Method);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return S.MaybeBindToTemporary(Result.get());
}
}
-}
+}
/// PerformImplicitConversion - Perform an implicit conversion of the
/// expression From to the type ToType using the pre-computed implicit
@@ -1621,52 +1881,51 @@ static ExprResult BuildCXXCastArgument(Sema &S,
bool
Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
const ImplicitConversionSequence &ICS,
- AssignmentAction Action, bool IgnoreBaseAccess) {
+ AssignmentAction Action, bool CStyle) {
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion:
if (PerformImplicitConversion(From, ToType, ICS.Standard, Action,
- IgnoreBaseAccess))
+ CStyle))
return true;
break;
case ImplicitConversionSequence::UserDefinedConversion: {
-
+
FunctionDecl *FD = ICS.UserDefined.ConversionFunction;
- CastKind CastKind = CK_Unknown;
+ CastKind CastKind;
QualType BeforeToType;
if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(FD)) {
CastKind = CK_UserDefinedConversion;
-
+
// If the user-defined conversion is specified by a conversion function,
// the initial standard conversion sequence converts the source type to
// the implicit object parameter of the conversion function.
BeforeToType = Context.getTagDeclType(Conv->getParent());
- } else if (const CXXConstructorDecl *Ctor =
- dyn_cast<CXXConstructorDecl>(FD)) {
+ } else {
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(FD);
CastKind = CK_ConstructorConversion;
// Do no conversion if dealing with ... for the first conversion.
if (!ICS.UserDefined.EllipsisConversion) {
- // If the user-defined conversion is specified by a constructor, the
+ // If the user-defined conversion is specified by a constructor, the
// initial standard conversion sequence converts the source type to the
// type required by the argument of the constructor
BeforeToType = Ctor->getParamDecl(0)->getType().getNonReferenceType();
}
- }
- else
- assert(0 && "Unknown conversion function kind!");
- // Whatch out for elipsis conversion.
+ }
+ // Watch out for elipsis conversion.
if (!ICS.UserDefined.EllipsisConversion) {
- if (PerformImplicitConversion(From, BeforeToType,
+ if (PerformImplicitConversion(From, BeforeToType,
ICS.UserDefined.Before, AA_Converting,
- IgnoreBaseAccess))
+ CStyle))
return true;
}
-
- ExprResult CastArg
+
+ ExprResult CastArg
= BuildCXXCastArgument(*this,
From->getLocStart(),
ToType.getNonReferenceType(),
- CastKind, cast<CXXMethodDecl>(FD),
+ CastKind, cast<CXXMethodDecl>(FD),
+ ICS.UserDefined.FoundConversionFunction,
From);
if (CastArg.isInvalid())
@@ -1675,7 +1934,7 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
From = CastArg.takeAs<Expr>();
return PerformImplicitConversion(From, ToType, ICS.UserDefined.After,
- AA_Converting, IgnoreBaseAccess);
+ AA_Converting, CStyle);
}
case ImplicitConversionSequence::AmbiguousConversion:
@@ -1683,7 +1942,7 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
PDiag(diag::err_typecheck_ambiguous_condition)
<< From->getSourceRange());
return true;
-
+
case ImplicitConversionSequence::EllipsisConversion:
assert(false && "Cannot perform an ellipsis conversion");
return false;
@@ -1705,7 +1964,7 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
bool
Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
const StandardConversionSequence& SCS,
- AssignmentAction Action, bool IgnoreBaseAccess) {
+ AssignmentAction Action, bool CStyle) {
// Overall FIXME: we are recomputing too many types here and doing far too
// much extra work. What this means is that we need to keep track of more
// information that is computed when we try the implicit conversion initially,
@@ -1719,7 +1978,7 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
ASTOwningVector<Expr*> ConstructorArgs(*this);
if (CompleteConstructorCall(cast<CXXConstructorDecl>(SCS.CopyConstructor),
MultiExprArg(*this, &From, 1),
- /*FIXME:ConstructLoc*/SourceLocation(),
+ /*FIXME:ConstructLoc*/SourceLocation(),
ConstructorArgs))
return true;
ExprResult FromResult =
@@ -1727,7 +1986,8 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
ToType, SCS.CopyConstructor,
move_arg(ConstructorArgs),
/*ZeroInit*/ false,
- CXXConstructExpr::CK_Complete);
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
if (FromResult.isInvalid())
return true;
From = FromResult.takeAs<Expr>();
@@ -1738,7 +1998,8 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
ToType, SCS.CopyConstructor,
MultiExprArg(*this, &From, 1),
/*ZeroInit*/ false,
- CXXConstructExpr::CK_Complete);
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
if (FromResult.isInvalid())
return true;
@@ -1765,10 +2026,25 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
// Perform the first implicit conversion.
switch (SCS.First) {
case ICK_Identity:
- case ICK_Lvalue_To_Rvalue:
// Nothing to do.
break;
+ case ICK_Lvalue_To_Rvalue:
+ // Should this get its own ICK?
+ if (From->getObjectKind() == OK_ObjCProperty) {
+ ConvertPropertyForRValue(From);
+ if (!From->isGLValue()) break;
+ }
+
+ // Check for trivial buffer overflows.
+ if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(From))
+ CheckArrayAccess(AE);
+
+ FromType = FromType.getUnqualifiedType();
+ From = ImplicitCastExpr::Create(Context, FromType, CK_LValueToRValue,
+ From, 0, VK_RValue);
+ break;
+
case ICK_Array_To_Pointer:
FromType = Context.getArrayDecayedType(FromType);
ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay);
@@ -1798,12 +2074,11 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
// If both sides are functions (or pointers/references to them), there could
// be incompatible exception declarations.
if (CheckExceptionSpecCompatibility(From, ToType))
- return true;
-
- ImpCastExprToType(From, Context.getNoReturnType(From->getType(), false),
- CK_NoOp);
+ return true;
+
+ ImpCastExprToType(From, ToType, CK_NoOp);
break;
-
+
case ICK_Integral_Promotion:
case ICK_Integral_Conversion:
ImpCastExprToType(From, ToType, CK_IntegralCast);
@@ -1815,9 +2090,23 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
break;
case ICK_Complex_Promotion:
- case ICK_Complex_Conversion:
- ImpCastExprToType(From, ToType, CK_Unknown);
+ case ICK_Complex_Conversion: {
+ QualType FromEl = From->getType()->getAs<ComplexType>()->getElementType();
+ QualType ToEl = ToType->getAs<ComplexType>()->getElementType();
+ CastKind CK;
+ if (FromEl->isRealFloatingType()) {
+ if (ToEl->isRealFloatingType())
+ CK = CK_FloatingComplexCast;
+ else
+ CK = CK_FloatingComplexToIntegralComplex;
+ } else if (ToEl->isRealFloatingType()) {
+ CK = CK_IntegralComplexToFloatingComplex;
+ } else {
+ CK = CK_IntegralComplexCast;
+ }
+ ImpCastExprToType(From, ToType, CK);
break;
+ }
case ICK_Floating_Integral:
if (ToType->isRealFloatingType())
@@ -1831,7 +2120,7 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
break;
case ICK_Pointer_Conversion: {
- if (SCS.IncompatibleObjC) {
+ if (SCS.IncompatibleObjC && Action != AA_Casting) {
// Diagnose incompatible Objective-C conversions
Diag(From->getSourceRange().getBegin(),
diag::ext_typecheck_convert_incompatible_pointer)
@@ -1839,20 +2128,18 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
<< From->getSourceRange();
}
-
- CastKind Kind = CK_Unknown;
+ CastKind Kind = CK_Invalid;
CXXCastPath BasePath;
- if (CheckPointerConversion(From, ToType, Kind, BasePath, IgnoreBaseAccess))
+ if (CheckPointerConversion(From, ToType, Kind, BasePath, CStyle))
return true;
ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath);
break;
}
-
+
case ICK_Pointer_Member: {
- CastKind Kind = CK_Unknown;
+ CastKind Kind = CK_Invalid;
CXXCastPath BasePath;
- if (CheckMemberPointerConversion(From, ToType, Kind, BasePath,
- IgnoreBaseAccess))
+ if (CheckMemberPointerConversion(From, ToType, Kind, BasePath, CStyle))
return true;
if (CheckExceptionSpecCompatibility(From, ToType))
return true;
@@ -1860,22 +2147,29 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
break;
}
case ICK_Boolean_Conversion: {
- CastKind Kind = CK_Unknown;
- if (FromType->isMemberPointerType())
- Kind = CK_MemberPointerToBoolean;
-
+ CastKind Kind = CK_Invalid;
+ switch (FromType->getScalarTypeKind()) {
+ case Type::STK_Pointer: Kind = CK_PointerToBoolean; break;
+ case Type::STK_MemberPointer: Kind = CK_MemberPointerToBoolean; break;
+ case Type::STK_Bool: llvm_unreachable("bool -> bool conversion?");
+ case Type::STK_Integral: Kind = CK_IntegralToBoolean; break;
+ case Type::STK_Floating: Kind = CK_FloatingToBoolean; break;
+ case Type::STK_IntegralComplex: Kind = CK_IntegralComplexToBoolean; break;
+ case Type::STK_FloatingComplex: Kind = CK_FloatingComplexToBoolean; break;
+ }
+
ImpCastExprToType(From, Context.BoolTy, Kind);
break;
}
case ICK_Derived_To_Base: {
CXXCastPath BasePath;
- if (CheckDerivedToBaseConversion(From->getType(),
+ if (CheckDerivedToBaseConversion(From->getType(),
ToType.getNonReferenceType(),
From->getLocStart(),
- From->getSourceRange(),
+ From->getSourceRange(),
&BasePath,
- IgnoreBaseAccess))
+ CStyle))
return true;
ImpCastExprToType(From, ToType.getNonReferenceType(),
@@ -1891,10 +2185,60 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
case ICK_Vector_Splat:
ImpCastExprToType(From, ToType, CK_VectorSplat);
break;
-
+
case ICK_Complex_Real:
- ImpCastExprToType(From, ToType, CK_Unknown);
+ // Case 1. x -> _Complex y
+ if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) {
+ QualType ElType = ToComplex->getElementType();
+ bool isFloatingComplex = ElType->isRealFloatingType();
+
+ // x -> y
+ if (Context.hasSameUnqualifiedType(ElType, From->getType())) {
+ // do nothing
+ } else if (From->getType()->isRealFloatingType()) {
+ ImpCastExprToType(From, ElType,
+ isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral);
+ } else {
+ assert(From->getType()->isIntegerType());
+ ImpCastExprToType(From, ElType,
+ isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast);
+ }
+ // y -> _Complex y
+ ImpCastExprToType(From, ToType,
+ isFloatingComplex ? CK_FloatingRealToComplex
+ : CK_IntegralRealToComplex);
+
+ // Case 2. _Complex x -> y
+ } else {
+ const ComplexType *FromComplex = From->getType()->getAs<ComplexType>();
+ assert(FromComplex);
+
+ QualType ElType = FromComplex->getElementType();
+ bool isFloatingComplex = ElType->isRealFloatingType();
+
+ // _Complex x -> x
+ ImpCastExprToType(From, ElType,
+ isFloatingComplex ? CK_FloatingComplexToReal
+ : CK_IntegralComplexToReal);
+
+ // x -> y
+ if (Context.hasSameUnqualifiedType(ElType, ToType)) {
+ // do nothing
+ } else if (ToType->isRealFloatingType()) {
+ ImpCastExprToType(From, ToType,
+ isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating);
+ } else {
+ assert(ToType->isIntegerType());
+ ImpCastExprToType(From, ToType,
+ isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast);
+ }
+ }
break;
+
+ case ICK_Block_Pointer_Conversion: {
+ ImpCastExprToType(From, ToType.getUnqualifiedType(), CK_BitCast, VK_RValue);
+ break;
+ }
case ICK_Lvalue_To_Rvalue:
case ICK_Array_To_Pointer:
@@ -1933,31 +2277,409 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
return false;
}
-ExprResult Sema::ActOnUnaryTypeTrait(UnaryTypeTrait OTT,
- SourceLocation KWLoc,
- SourceLocation LParen,
- ParsedType Ty,
- SourceLocation RParen) {
- QualType T = GetTypeFromParser(Ty);
+ExprResult Sema::ActOnUnaryTypeTrait(UnaryTypeTrait UTT,
+ SourceLocation KWLoc,
+ ParsedType Ty,
+ SourceLocation RParen) {
+ TypeSourceInfo *TSInfo;
+ QualType T = GetTypeFromParser(Ty, &TSInfo);
+
+ if (!TSInfo)
+ TSInfo = Context.getTrivialTypeSourceInfo(T);
+ return BuildUnaryTypeTrait(UTT, KWLoc, TSInfo, RParen);
+}
+
+static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT, QualType T,
+ SourceLocation KeyLoc) {
+ // FIXME: For many of these traits, we need a complete type before we can
+ // check these properties.
+ assert(!T->isDependentType() &&
+ "Cannot evaluate traits for dependent types.");
+ ASTContext &C = Self.Context;
+ switch(UTT) {
+ default: assert(false && "Unknown type trait or not implemented");
+ case UTT_IsPOD: return T->isPODType();
+ case UTT_IsLiteral: return T->isLiteralType();
+ case UTT_IsClass: // Fallthrough
+ case UTT_IsUnion:
+ if (const RecordType *Record = T->getAs<RecordType>()) {
+ bool Union = Record->getDecl()->isUnion();
+ return UTT == UTT_IsUnion ? Union : !Union;
+ }
+ return false;
+ case UTT_IsEnum: return T->isEnumeralType();
+ case UTT_IsPolymorphic:
+ if (const RecordType *Record = T->getAs<RecordType>()) {
+ // Type traits are only parsed in C++, so we've got CXXRecords.
+ return cast<CXXRecordDecl>(Record->getDecl())->isPolymorphic();
+ }
+ return false;
+ case UTT_IsAbstract:
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl())->isAbstract();
+ return false;
+ case UTT_IsEmpty:
+ if (const RecordType *Record = T->getAs<RecordType>()) {
+ return !Record->getDecl()->isUnion()
+ && cast<CXXRecordDecl>(Record->getDecl())->isEmpty();
+ }
+ return false;
+ case UTT_HasTrivialConstructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __is_pod (type) is true then the trait is true, else if type is
+ // a cv class or union type (or array thereof) with a trivial default
+ // constructor ([class.ctor]) then the trait is true, else it is false.
+ if (T->isPODType())
+ return true;
+ if (const RecordType *RT =
+ C.getBaseElementType(T)->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialConstructor();
+ return false;
+ case UTT_HasTrivialCopy:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __is_pod (type) is true or type is a reference type then
+ // the trait is true, else if type is a cv class or union type
+ // with a trivial copy constructor ([class.copy]) then the trait
+ // is true, else it is false.
+ if (T->isPODType() || T->isReferenceType())
+ return true;
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialCopyConstructor();
+ return false;
+ case UTT_HasTrivialAssign:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If type is const qualified or is a reference type then the
+ // trait is false. Otherwise if __is_pod (type) is true then the
+ // trait is true, else if type is a cv class or union type with
+ // a trivial copy assignment ([class.copy]) then the trait is
+ // true, else it is false.
+ // Note: the const and reference restrictions are interesting,
+ // given that const and reference members don't prevent a class
+ // from having a trivial copy assignment operator (but do cause
+ // errors if the copy assignment operator is actually used, q.v.
+ // [class.copy]p12).
+
+ if (C.getBaseElementType(T).isConstQualified())
+ return false;
+ if (T->isPODType())
+ return true;
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialCopyAssignment();
+ return false;
+ case UTT_HasTrivialDestructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __is_pod (type) is true or type is a reference type
+ // then the trait is true, else if type is a cv class or union
+ // type (or array thereof) with a trivial destructor
+ // ([class.dtor]) then the trait is true, else it is
+ // false.
+ if (T->isPODType() || T->isReferenceType())
+ return true;
+ if (const RecordType *RT =
+ C.getBaseElementType(T)->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor();
+ return false;
+ // TODO: Propagate nothrowness for implicitly declared special members.
+ case UTT_HasNothrowAssign:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If type is const qualified or is a reference type then the
+ // trait is false. Otherwise if __has_trivial_assign (type)
+ // is true then the trait is true, else if type is a cv class
+ // or union type with copy assignment operators that are known
+ // not to throw an exception then the trait is true, else it is
+ // false.
+ if (C.getBaseElementType(T).isConstQualified())
+ return false;
+ if (T->isReferenceType())
+ return false;
+ if (T->isPODType())
+ return true;
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ CXXRecordDecl* RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialCopyAssignment())
+ return true;
+
+ bool FoundAssign = false;
+ bool AllNoThrow = true;
+ DeclarationName Name = C.DeclarationNames.getCXXOperatorName(OO_Equal);
+ LookupResult Res(Self, DeclarationNameInfo(Name, KeyLoc),
+ Sema::LookupOrdinaryName);
+ if (Self.LookupQualifiedName(Res, RD)) {
+ for (LookupResult::iterator Op = Res.begin(), OpEnd = Res.end();
+ Op != OpEnd; ++Op) {
+ CXXMethodDecl *Operator = cast<CXXMethodDecl>(*Op);
+ if (Operator->isCopyAssignmentOperator()) {
+ FoundAssign = true;
+ const FunctionProtoType *CPT
+ = Operator->getType()->getAs<FunctionProtoType>();
+ if (!CPT->hasEmptyExceptionSpec()) {
+ AllNoThrow = false;
+ break;
+ }
+ }
+ }
+ }
+
+ return FoundAssign && AllNoThrow;
+ }
+ return false;
+ case UTT_HasNothrowCopy:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __has_trivial_copy (type) is true then the trait is true, else
+ // if type is a cv class or union type with copy constructors that are
+ // known not to throw an exception then the trait is true, else it is
+ // false.
+ if (T->isPODType() || T->isReferenceType())
+ return true;
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialCopyConstructor())
+ return true;
+
+ bool FoundConstructor = false;
+ bool AllNoThrow = true;
+ unsigned FoundTQs;
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = Self.LookupConstructors(RD);
+ Con != ConEnd; ++Con) {
+ // A template constructor is never a copy constructor.
+ // FIXME: However, it may actually be selected at the actual overload
+ // resolution point.
+ if (isa<FunctionTemplateDecl>(*Con))
+ continue;
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isCopyConstructor(FoundTQs)) {
+ FoundConstructor = true;
+ const FunctionProtoType *CPT
+ = Constructor->getType()->getAs<FunctionProtoType>();
+ // TODO: check whether evaluating default arguments can throw.
+ // For now, we'll be conservative and assume that they can throw.
+ if (!CPT->hasEmptyExceptionSpec() || CPT->getNumArgs() > 1) {
+ AllNoThrow = false;
+ break;
+ }
+ }
+ }
+
+ return FoundConstructor && AllNoThrow;
+ }
+ return false;
+ case UTT_HasNothrowConstructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __has_trivial_constructor (type) is true then the trait is
+ // true, else if type is a cv class or union type (or array
+ // thereof) with a default constructor that is known not to
+ // throw an exception then the trait is true, else it is false.
+ if (T->isPODType())
+ return true;
+ if (const RecordType *RT = C.getBaseElementType(T)->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialConstructor())
+ return true;
+
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = Self.LookupConstructors(RD);
+ Con != ConEnd; ++Con) {
+ // FIXME: In C++0x, a constructor template can be a default constructor.
+ if (isa<FunctionTemplateDecl>(*Con))
+ continue;
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isDefaultConstructor()) {
+ const FunctionProtoType *CPT
+ = Constructor->getType()->getAs<FunctionProtoType>();
+ // TODO: check whether evaluating default arguments can throw.
+ // For now, we'll be conservative and assume that they can throw.
+ return CPT->hasEmptyExceptionSpec() && CPT->getNumArgs() == 0;
+ }
+ }
+ }
+ return false;
+ case UTT_HasVirtualDestructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If type is a class type with a virtual destructor ([class.dtor])
+ // then the trait is true, else it is false.
+ if (const RecordType *Record = T->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (CXXDestructorDecl *Destructor = Self.LookupDestructor(RD))
+ return Destructor->isVirtual();
+ }
+ return false;
+ }
+}
+
+ExprResult Sema::BuildUnaryTypeTrait(UnaryTypeTrait UTT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *TSInfo,
+ SourceLocation RParen) {
+ QualType T = TSInfo->getType();
// According to http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html
// all traits except __is_class, __is_enum and __is_union require a the type
- // to be complete.
- if (OTT != UTT_IsClass && OTT != UTT_IsEnum && OTT != UTT_IsUnion) {
- if (RequireCompleteType(KWLoc, T,
+ // to be complete, an array of unknown bound, or void.
+ if (UTT != UTT_IsClass && UTT != UTT_IsEnum && UTT != UTT_IsUnion) {
+ QualType E = T;
+ if (T->isIncompleteArrayType())
+ E = Context.getAsArrayType(T)->getElementType();
+ if (!T->isVoidType() &&
+ RequireCompleteType(KWLoc, E,
diag::err_incomplete_type_used_in_type_trait_expr))
return ExprError();
}
- // There is no point in eagerly computing the value. The traits are designed
- // to be used from type trait templates, so Ty will be a template parameter
- // 99% of the time.
- return Owned(new (Context) UnaryTypeTraitExpr(KWLoc, OTT, T,
+ bool Value = false;
+ if (!T->isDependentType())
+ Value = EvaluateUnaryTypeTrait(*this, UTT, T, KWLoc);
+
+ return Owned(new (Context) UnaryTypeTraitExpr(KWLoc, UTT, TSInfo, Value,
RParen, Context.BoolTy));
}
-QualType Sema::CheckPointerToMemberOperands(
- Expr *&lex, Expr *&rex, SourceLocation Loc, bool isIndirect) {
+ExprResult Sema::ActOnBinaryTypeTrait(BinaryTypeTrait BTT,
+ SourceLocation KWLoc,
+ ParsedType LhsTy,
+ ParsedType RhsTy,
+ SourceLocation RParen) {
+ TypeSourceInfo *LhsTSInfo;
+ QualType LhsT = GetTypeFromParser(LhsTy, &LhsTSInfo);
+ if (!LhsTSInfo)
+ LhsTSInfo = Context.getTrivialTypeSourceInfo(LhsT);
+
+ TypeSourceInfo *RhsTSInfo;
+ QualType RhsT = GetTypeFromParser(RhsTy, &RhsTSInfo);
+ if (!RhsTSInfo)
+ RhsTSInfo = Context.getTrivialTypeSourceInfo(RhsT);
+
+ return BuildBinaryTypeTrait(BTT, KWLoc, LhsTSInfo, RhsTSInfo, RParen);
+}
+
+static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT,
+ QualType LhsT, QualType RhsT,
+ SourceLocation KeyLoc) {
+ assert((!LhsT->isDependentType() || RhsT->isDependentType()) &&
+ "Cannot evaluate traits for dependent types.");
+
+ switch(BTT) {
+ case BTT_IsBaseOf: {
+ // C++0x [meta.rel]p2
+ // Base is a base class of Derived without regard to cv-qualifiers or
+ // Base and Derived are not unions and name the same class type without
+ // regard to cv-qualifiers.
+
+ const RecordType *lhsRecord = LhsT->getAs<RecordType>();
+ if (!lhsRecord) return false;
+
+ const RecordType *rhsRecord = RhsT->getAs<RecordType>();
+ if (!rhsRecord) return false;
+
+ assert(Self.Context.hasSameUnqualifiedType(LhsT, RhsT)
+ == (lhsRecord == rhsRecord));
+
+ if (lhsRecord == rhsRecord)
+ return !lhsRecord->getDecl()->isUnion();
+
+ // C++0x [meta.rel]p2:
+ // If Base and Derived are class types and are different types
+ // (ignoring possible cv-qualifiers) then Derived shall be a
+ // complete type.
+ if (Self.RequireCompleteType(KeyLoc, RhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+
+ return cast<CXXRecordDecl>(rhsRecord->getDecl())
+ ->isDerivedFrom(cast<CXXRecordDecl>(lhsRecord->getDecl()));
+ }
+
+ case BTT_TypeCompatible:
+ return Self.Context.typesAreCompatible(LhsT.getUnqualifiedType(),
+ RhsT.getUnqualifiedType());
+
+ case BTT_IsConvertibleTo: {
+ // C++0x [meta.rel]p4:
+ // Given the following function prototype:
+ //
+ // template <class T>
+ // typename add_rvalue_reference<T>::type create();
+ //
+ // the predicate condition for a template specialization
+ // is_convertible<From, To> shall be satisfied if and only if
+ // the return expression in the following code would be
+ // well-formed, including any implicit conversions to the return
+ // type of the function:
+ //
+ // To test() {
+ // return create<From>();
+ // }
+ //
+ // Access checking is performed as if in a context unrelated to To and
+ // From. Only the validity of the immediate context of the expression
+ // of the return-statement (including conversions to the return type)
+ // is considered.
+ //
+ // We model the initialization as a copy-initialization of a temporary
+ // of the appropriate type, which for this expression is identical to the
+ // return statement (since NRVO doesn't apply).
+ if (LhsT->isObjectType() || LhsT->isFunctionType())
+ LhsT = Self.Context.getRValueReferenceType(LhsT);
+
+ InitializedEntity To(InitializedEntity::InitializeTemporary(RhsT));
+ OpaqueValueExpr From(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(LhsT));
+ Expr *FromPtr = &From;
+ InitializationKind Kind(InitializationKind::CreateCopy(KeyLoc,
+ SourceLocation()));
+
+ // Perform the initialization within a SFINAE trap at translation unit
+ // scope.
+ Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
+ InitializationSequence Init(Self, To, Kind, &FromPtr, 1);
+ if (Init.getKind() == InitializationSequence::FailedSequence)
+ return false;
+
+ ExprResult Result = Init.Perform(Self, To, Kind, MultiExprArg(&FromPtr, 1));
+ return !Result.isInvalid() && !SFINAE.hasErrorOccurred();
+ }
+ }
+ llvm_unreachable("Unknown type trait or not implemented");
+}
+
+ExprResult Sema::BuildBinaryTypeTrait(BinaryTypeTrait BTT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *LhsTSInfo,
+ TypeSourceInfo *RhsTSInfo,
+ SourceLocation RParen) {
+ QualType LhsT = LhsTSInfo->getType();
+ QualType RhsT = RhsTSInfo->getType();
+
+ if (BTT == BTT_TypeCompatible) {
+ if (getLangOptions().CPlusPlus) {
+ Diag(KWLoc, diag::err_types_compatible_p_in_cplusplus)
+ << SourceRange(KWLoc, RParen);
+ return ExprError();
+ }
+ }
+
+ bool Value = false;
+ if (!LhsT->isDependentType() && !RhsT->isDependentType())
+ Value = EvaluateBinaryTypeTrait(*this, BTT, LhsT, RhsT, KWLoc);
+
+ // Select trait result type.
+ QualType ResultType;
+ switch (BTT) {
+ case BTT_IsBaseOf: ResultType = Context.BoolTy; break;
+ case BTT_TypeCompatible: ResultType = Context.IntTy; break;
+ case BTT_IsConvertibleTo: ResultType = Context.BoolTy; break;
+ }
+
+ return Owned(new (Context) BinaryTypeTraitExpr(KWLoc, BTT, LhsTSInfo,
+ RhsTSInfo, Value, RParen,
+ ResultType));
+}
+
+QualType Sema::CheckPointerToMemberOperands(Expr *&lex, Expr *&rex,
+ ExprValueKind &VK,
+ SourceLocation Loc,
+ bool isIndirect) {
const char *OpSpelling = isIndirect ? "->*" : ".*";
// C++ 5.5p2
// The binary operator .* [p3: ->*] binds its second operand, which shall
@@ -1973,8 +2695,11 @@ QualType Sema::CheckPointerToMemberOperands(
QualType Class(MemPtr->getClass(), 0);
- if (RequireCompleteType(Loc, Class, diag::err_memptr_rhs_to_incomplete))
- return QualType();
+ // Note: C++ [expr.mptr.oper]p2-3 says that the class type into which the
+ // member pointer points must be completely-defined. However, there is no
+ // reason for this semantic distinction, and the rule is not enforced by
+ // other compilers. Therefore, we do not check this property, as it is
+ // likely to be considered a defect.
// C++ 5.5p2
// [...] to its first operand, which shall be of class T or of a class of
@@ -1983,7 +2708,7 @@ QualType Sema::CheckPointerToMemberOperands(
QualType LType = lex->getType();
if (isIndirect) {
if (const PointerType *Ptr = LType->getAs<PointerType>())
- LType = Ptr->getPointeeType().getNonReferenceType();
+ LType = Ptr->getPointeeType();
else {
Diag(Loc, diag::err_bad_memptr_lhs)
<< OpSpelling << 1 << LType
@@ -2024,6 +2749,7 @@ QualType Sema::CheckPointerToMemberOperands(
Diag(Loc, diag::err_pointer_to_member_type) << isIndirect;
return QualType();
}
+
// C++ 5.5p2
// The result is an object or a function of the type specified by the
// second operand.
@@ -2038,6 +2764,47 @@ QualType Sema::CheckPointerToMemberOperands(
// We probably need a "MemberFunctionClosureType" or something like that.
QualType Result = MemPtr->getPointeeType();
Result = Context.getCVRQualifiedType(Result, LType.getCVRQualifiers());
+
+ // C++0x [expr.mptr.oper]p6:
+ // In a .* expression whose object expression is an rvalue, the program is
+ // ill-formed if the second operand is a pointer to member function with
+ // ref-qualifier &. In a ->* expression or in a .* expression whose object
+ // expression is an lvalue, the program is ill-formed if the second operand
+ // is a pointer to member function with ref-qualifier &&.
+ if (const FunctionProtoType *Proto = Result->getAs<FunctionProtoType>()) {
+ switch (Proto->getRefQualifier()) {
+ case RQ_None:
+ // Do nothing
+ break;
+
+ case RQ_LValue:
+ if (!isIndirect && !lex->Classify(Context).isLValue())
+ Diag(Loc, diag::err_pointer_to_member_oper_value_classify)
+ << RType << 1 << lex->getSourceRange();
+ break;
+
+ case RQ_RValue:
+ if (isIndirect || !lex->Classify(Context).isRValue())
+ Diag(Loc, diag::err_pointer_to_member_oper_value_classify)
+ << RType << 0 << lex->getSourceRange();
+ break;
+ }
+ }
+
+ // C++ [expr.mptr.oper]p6:
+ // The result of a .* expression whose second operand is a pointer
+ // to a data member is of the same value category as its
+ // first operand. The result of a .* expression whose second
+ // operand is a pointer to a member function is a prvalue. The
+ // result of an ->* expression is an lvalue if its second operand
+ // is a pointer to data member and a prvalue otherwise.
+ if (Result->isFunctionType())
+ VK = VK_RValue;
+ else if (isIndirect)
+ VK = VK_LValue;
+ else
+ VK = lex->getValueKind();
+
return Result;
}
@@ -2054,29 +2821,29 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
QualType &ToType) {
HaveConversion = false;
ToType = To->getType();
-
- InitializationKind Kind = InitializationKind::CreateCopy(To->getLocStart(),
+
+ InitializationKind Kind = InitializationKind::CreateCopy(To->getLocStart(),
SourceLocation());
// C++0x 5.16p3
// The process for determining whether an operand expression E1 of type T1
// can be converted to match an operand expression E2 of type T2 is defined
// as follows:
// -- If E2 is an lvalue:
- bool ToIsLvalue = (To->isLvalue(Self.Context) == Expr::LV_Valid);
+ bool ToIsLvalue = To->isLValue();
if (ToIsLvalue) {
// E1 can be converted to match E2 if E1 can be implicitly converted to
// type "lvalue reference to T2", subject to the constraint that in the
// conversion the reference must bind directly to E1.
QualType T = Self.Context.getLValueReferenceType(ToType);
InitializedEntity Entity = InitializedEntity::InitializeTemporary(T);
-
+
InitializationSequence InitSeq(Self, Entity, Kind, &From, 1);
if (InitSeq.isDirectReferenceBinding()) {
ToType = T;
HaveConversion = true;
return false;
}
-
+
if (InitSeq.isAmbiguous())
return InitSeq.Diagnose(Self, Entity, Kind, &From, 1);
}
@@ -2088,9 +2855,9 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
QualType TTy = To->getType();
const RecordType *FRec = FTy->getAs<RecordType>();
const RecordType *TRec = TTy->getAs<RecordType>();
- bool FDerivedFromT = FRec && TRec && FRec != TRec &&
+ bool FDerivedFromT = FRec && TRec && FRec != TRec &&
Self.IsDerivedFrom(FTy, TTy);
- if (FRec && TRec &&
+ if (FRec && TRec &&
(FRec == TRec || FDerivedFromT || Self.IsDerivedFrom(TTy, FTy))) {
// E1 can be converted to match E2 if the class of T2 is the
// same type as, or a base class of, the class of T1, and
@@ -2103,28 +2870,28 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
HaveConversion = true;
return false;
}
-
+
if (InitSeq.isAmbiguous())
return InitSeq.Diagnose(Self, Entity, Kind, &From, 1);
- }
+ }
}
-
+
return false;
}
-
+
// -- Otherwise: E1 can be converted to match E2 if E1 can be
// implicitly converted to the type that expression E2 would have
- // if E2 were converted to an rvalue (or the type it has, if E2 is
+ // if E2 were converted to an rvalue (or the type it has, if E2 is
// an rvalue).
//
// This actually refers very narrowly to the lvalue-to-rvalue conversion, not
// to the array-to-pointer or function-to-pointer conversions.
if (!TTy->getAs<TagType>())
TTy = TTy.getUnqualifiedType();
-
+
InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy);
InitializationSequence InitSeq(Self, Entity, Kind, &From, 1);
- HaveConversion = InitSeq.getKind() != InitializationSequence::FailedSequence;
+ HaveConversion = InitSeq.getKind() != InitializationSequence::FailedSequence;
ToType = TTy;
if (InitSeq.isAmbiguous())
return InitSeq.Diagnose(Self, Entity, Kind, &From, 1);
@@ -2138,13 +2905,14 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
/// value operand is a class type, overload resolution is used to find a
/// conversion to a common type.
static bool FindConditionalOverload(Sema &Self, Expr *&LHS, Expr *&RHS,
- SourceLocation Loc) {
+ SourceLocation QuestionLoc) {
Expr *Args[2] = { LHS, RHS };
- OverloadCandidateSet CandidateSet(Loc);
- Self.AddBuiltinOperatorCandidates(OO_Conditional, Loc, Args, 2, CandidateSet);
+ OverloadCandidateSet CandidateSet(QuestionLoc);
+ Self.AddBuiltinOperatorCandidates(OO_Conditional, QuestionLoc, Args, 2,
+ CandidateSet);
OverloadCandidateSet::iterator Best;
- switch (CandidateSet.BestViableFunction(Self, Loc, Best)) {
+ switch (CandidateSet.BestViableFunction(Self, QuestionLoc, Best)) {
case OR_Success:
// We found a match. Perform the conversions on the arguments and move on.
if (Self.PerformImplicitConversion(LHS, Best->BuiltinTypes.ParamTypes[0],
@@ -2155,13 +2923,20 @@ static bool FindConditionalOverload(Sema &Self, Expr *&LHS, Expr *&RHS,
return false;
case OR_No_Viable_Function:
- Self.Diag(Loc, diag::err_typecheck_cond_incompatible_operands)
+
+ // Emit a better diagnostic if one of the expressions is a null pointer
+ // constant and the other is a pointer type. In this case, the user most
+ // likely forgot to take the address of the other expression.
+ if (Self.DiagnoseConditionalForNull(LHS, RHS, QuestionLoc))
+ return true;
+
+ Self.Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
<< LHS->getType() << RHS->getType()
<< LHS->getSourceRange() << RHS->getSourceRange();
return true;
case OR_Ambiguous:
- Self.Diag(Loc, diag::err_conditional_ambiguous_ovl)
+ Self.Diag(QuestionLoc, diag::err_conditional_ambiguous_ovl)
<< LHS->getType() << RHS->getType()
<< LHS->getSourceRange() << RHS->getSourceRange();
// FIXME: Print the possible common types by printing the return types of
@@ -2185,7 +2960,7 @@ static bool ConvertForConditional(Sema &Self, Expr *&E, QualType T) {
ExprResult Result = InitSeq.Perform(Self, Entity, Kind, MultiExprArg(&E, 1));
if (Result.isInvalid())
return true;
-
+
E = Result.takeAs<Expr>();
return false;
}
@@ -2195,6 +2970,7 @@ static bool ConvertForConditional(Sema &Self, Expr *&E, QualType T) {
/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
/// extension. In this case, LHS == Cond. (But they're not aliases.)
QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
+ ExprValueKind &VK, ExprObjectKind &OK,
SourceLocation QuestionLoc) {
// FIXME: Handle C99's complex types, vector types, block pointers and Obj-C++
// interface pointers.
@@ -2206,6 +2982,10 @@ QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
return QualType();
}
+ // Assume r-value.
+ VK = VK_RValue;
+ OK = OK_Ordinary;
+
// Either of the arguments dependent?
if (LHS->isTypeDependent() || RHS->isTypeDependent())
return Context.DependentTy;
@@ -2252,7 +3032,7 @@ QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
// Otherwise, if the second and third operand have different types, and
// either has (cv) class type, and attempt is made to convert each of those
// operands to the other.
- if (!Context.hasSameType(LTy, RTy) &&
+ if (!Context.hasSameType(LTy, RTy) &&
(LTy->isRecordType() || RTy->isRecordType())) {
ImplicitConversionSequence ICSLeftToRight, ICSRightToLeft;
// These return true if a single direction is already ambiguous.
@@ -2262,7 +3042,7 @@ QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
return QualType();
if (TryClassUnification(*this, RHS, LHS, QuestionLoc, HaveR2L, R2LType))
return QualType();
-
+
// If both can be converted, [...] the program is ill-formed.
if (HaveL2R && HaveR2L) {
Diag(QuestionLoc, diag::err_conditional_ambiguous)
@@ -2285,12 +3065,24 @@ QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
}
// C++0x 5.16p4
- // If the second and third operands are lvalues and have the same type,
- // the result is of that type [...]
+ // If the second and third operands are glvalues of the same value
+ // category and have the same type, the result is of that type and
+ // value category and it is a bit-field if the second or the third
+ // operand is a bit-field, or if both are bit-fields.
+ // We only extend this to bitfields, not to the crazy other kinds of
+ // l-values.
bool Same = Context.hasSameType(LTy, RTy);
- if (Same && LHS->isLvalue(Context) == Expr::LV_Valid &&
- RHS->isLvalue(Context) == Expr::LV_Valid)
+ if (Same &&
+ LHS->isGLValue() &&
+ LHS->getValueKind() == RHS->getValueKind() &&
+ LHS->isOrdinaryOrBitFieldObject() &&
+ RHS->isOrdinaryOrBitFieldObject()) {
+ VK = LHS->getValueKind();
+ if (LHS->getObjectKind() == OK_BitField ||
+ RHS->getObjectKind() == OK_BitField)
+ OK = OK_BitField;
return LTy;
+ }
// C++0x 5.16p5
// Otherwise, the result is an rvalue. If the second and third operands
@@ -2321,18 +3113,18 @@ QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
if (LTy->isRecordType()) {
// The operands have class type. Make a temporary copy.
InitializedEntity Entity = InitializedEntity::InitializeTemporary(LTy);
- ExprResult LHSCopy = PerformCopyInitialization(Entity,
- SourceLocation(),
- Owned(LHS));
+ ExprResult LHSCopy = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ Owned(LHS));
if (LHSCopy.isInvalid())
return QualType();
-
- ExprResult RHSCopy = PerformCopyInitialization(Entity,
- SourceLocation(),
- Owned(RHS));
+
+ ExprResult RHSCopy = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ Owned(RHS));
if (RHSCopy.isInvalid())
return QualType();
-
+
LHS = LHSCopy.takeAs<Expr>();
RHS = RHSCopy.takeAs<Expr>();
}
@@ -2341,7 +3133,7 @@ QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
}
// Extension: conditional operator involving vector types.
- if (LTy->isVectorType() || RTy->isVectorType())
+ if (LTy->isVectorType() || RTy->isVectorType())
return CheckVectorOperands(QuestionLoc, LHS, RHS);
// -- The second and third operands have arithmetic or enumeration type;
@@ -2367,19 +3159,23 @@ QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
isSFINAEContext()? 0 : &NonStandardCompositeType);
if (!Composite.isNull()) {
if (NonStandardCompositeType)
- Diag(QuestionLoc,
+ Diag(QuestionLoc,
diag::ext_typecheck_cond_incompatible_operands_nonstandard)
<< LTy << RTy << Composite
<< LHS->getSourceRange() << RHS->getSourceRange();
-
+
return Composite;
}
-
+
// Similarly, attempt to find composite type of two objective-c pointers.
Composite = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
if (!Composite.isNull())
return Composite;
+ // Check if we are using a null with a non-pointer type.
+ if (DiagnoseConditionalForNull(LHS, RHS, QuestionLoc))
+ return QualType();
+
Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
<< LHS->getType() << RHS->getType()
<< LHS->getSourceRange() << RHS->getSourceRange();
@@ -2400,12 +3196,12 @@ QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
/// a non-standard (but still sane) composite type to which both expressions
/// can be converted. When such a type is chosen, \c *NonStandardCompositeType
/// will be set true.
-QualType Sema::FindCompositePointerType(SourceLocation Loc,
+QualType Sema::FindCompositePointerType(SourceLocation Loc,
Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType) {
if (NonStandardCompositeType)
*NonStandardCompositeType = false;
-
+
assert(getLangOptions().CPlusPlus && "This function assumes C++");
QualType T1 = E1->getType(), T2 = E2->getType();
@@ -2422,14 +3218,14 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
if (T2->isMemberPointerType())
ImpCastExprToType(E1, T2, CK_NullToMemberPointer);
else
- ImpCastExprToType(E1, T2, CK_IntegralToPointer);
+ ImpCastExprToType(E1, T2, CK_NullToPointer);
return T2;
}
if (E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
if (T1->isMemberPointerType())
ImpCastExprToType(E2, T1, CK_NullToMemberPointer);
else
- ImpCastExprToType(E2, T1, CK_IntegralToPointer);
+ ImpCastExprToType(E2, T1, CK_NullToPointer);
return T1;
}
@@ -2456,20 +3252,20 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
ContainingClassVector MemberOfClass;
QualType Composite1 = Context.getCanonicalType(T1),
Composite2 = Context.getCanonicalType(T2);
- unsigned NeedConstBefore = 0;
+ unsigned NeedConstBefore = 0;
do {
const PointerType *Ptr1, *Ptr2;
if ((Ptr1 = Composite1->getAs<PointerType>()) &&
(Ptr2 = Composite2->getAs<PointerType>())) {
Composite1 = Ptr1->getPointeeType();
Composite2 = Ptr2->getPointeeType();
-
+
// If we're allowed to create a non-standard composite type, keep track
- // of where we need to fill in additional 'const' qualifiers.
+ // of where we need to fill in additional 'const' qualifiers.
if (NonStandardCompositeType &&
Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
NeedConstBefore = QualifierUnion.size();
-
+
QualifierUnion.push_back(
Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers());
MemberOfClass.push_back(std::make_pair((const Type *)0, (const Type *)0));
@@ -2481,13 +3277,13 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
(MemPtr2 = Composite2->getAs<MemberPointerType>())) {
Composite1 = MemPtr1->getPointeeType();
Composite2 = MemPtr2->getPointeeType();
-
+
// If we're allowed to create a non-standard composite type, keep track
- // of where we need to fill in additional 'const' qualifiers.
+ // of where we need to fill in additional 'const' qualifiers.
if (NonStandardCompositeType &&
Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
NeedConstBefore = QualifierUnion.size();
-
+
QualifierUnion.push_back(
Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers());
MemberOfClass.push_back(std::make_pair(MemPtr1->getClass(),
@@ -2503,7 +3299,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
if (NeedConstBefore && NonStandardCompositeType) {
// Extension: Add 'const' to qualifiers that come before the first qualifier
- // mismatch, so that our (non-standard!) composite type meets the
+ // mismatch, so that our (non-standard!) composite type meets the
// requirements of C++ [conv.qual]p4 bullet 3.
for (unsigned I = 0; I != NeedConstBefore; ++I) {
if ((QualifierUnion[I] & Qualifiers::Const) == 0) {
@@ -2512,7 +3308,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
}
}
}
-
+
// Rewrap the composites as pointers or member pointers with the union CVRs.
ContainingClassVector::reverse_iterator MOC
= MemberOfClass.rbegin();
@@ -2575,7 +3371,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
if (E2Result.isInvalid())
return QualType();
E2 = E2Result.takeAs<Expr>();
-
+
return Composite1;
}
@@ -2586,25 +3382,28 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
InitializationSequence E2ToC2(*this, Entity2, Kind, &E2, 1);
if (!E1ToC2 || !E2ToC2)
return QualType();
-
+
// Convert E1 to Composite2
ExprResult E1Result
= E1ToC2.Perform(*this, Entity2, Kind, MultiExprArg(*this, &E1, 1));
if (E1Result.isInvalid())
return QualType();
E1 = E1Result.takeAs<Expr>();
-
+
// Convert E2 to Composite2
ExprResult E2Result
= E2ToC2.Perform(*this, Entity2, Kind, MultiExprArg(*this, &E2, 1));
if (E2Result.isInvalid())
return QualType();
E2 = E2Result.takeAs<Expr>();
-
+
return Composite2;
}
ExprResult Sema::MaybeBindToTemporary(Expr *E) {
+ if (!E)
+ return ExprError();
+
if (!Context.getLangOptions().CPlusPlus)
return Owned(E);
@@ -2614,17 +3413,9 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
if (!RT)
return Owned(E);
- // If this is the result of a call or an Objective-C message send expression,
- // our source might actually be a reference, in which case we shouldn't bind.
- if (CallExpr *CE = dyn_cast<CallExpr>(E)) {
- if (CE->getCallReturnType()->isReferenceType())
- return Owned(E);
- } else if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
- if (const ObjCMethodDecl *MD = ME->getMethodDecl()) {
- if (MD->getResultType()->isReferenceType())
- return Owned(E);
- }
- }
+ // If the result is a glvalue, we shouldn't bind it.
+ if (E->Classify(Context).isGLValue())
+ return Owned(E);
// That should be enough to guarantee that this type is complete.
// If it has a trivial destructor, we can avoid the extra copy.
@@ -2644,48 +3435,49 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
return Owned(CXXBindTemporaryExpr::Create(Context, Temp, E));
}
-Expr *Sema::MaybeCreateCXXExprWithTemporaries(Expr *SubExpr) {
+Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
assert(SubExpr && "sub expression can't be null!");
- // Check any implicit conversions within the expression.
- CheckImplicitConversions(SubExpr);
-
unsigned FirstTemporary = ExprEvalContexts.back().NumTemporaries;
assert(ExprTemporaries.size() >= FirstTemporary);
if (ExprTemporaries.size() == FirstTemporary)
return SubExpr;
- Expr *E = CXXExprWithTemporaries::Create(Context, SubExpr,
- &ExprTemporaries[FirstTemporary],
- ExprTemporaries.size() - FirstTemporary);
+ Expr *E = ExprWithCleanups::Create(Context, SubExpr,
+ &ExprTemporaries[FirstTemporary],
+ ExprTemporaries.size() - FirstTemporary);
ExprTemporaries.erase(ExprTemporaries.begin() + FirstTemporary,
ExprTemporaries.end());
return E;
}
-ExprResult
-Sema::MaybeCreateCXXExprWithTemporaries(ExprResult SubExpr) {
+ExprResult
+Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) {
if (SubExpr.isInvalid())
return ExprError();
-
- return Owned(MaybeCreateCXXExprWithTemporaries(SubExpr.takeAs<Expr>()));
+
+ return Owned(MaybeCreateExprWithCleanups(SubExpr.take()));
}
-FullExpr Sema::CreateFullExpr(Expr *SubExpr) {
+Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
+ assert(SubStmt && "sub statement can't be null!");
+
unsigned FirstTemporary = ExprEvalContexts.back().NumTemporaries;
assert(ExprTemporaries.size() >= FirstTemporary);
-
- unsigned NumTemporaries = ExprTemporaries.size() - FirstTemporary;
- CXXTemporary **Temporaries =
- NumTemporaries == 0 ? 0 : &ExprTemporaries[FirstTemporary];
-
- FullExpr E = FullExpr::Create(Context, SubExpr, Temporaries, NumTemporaries);
-
- ExprTemporaries.erase(ExprTemporaries.begin() + FirstTemporary,
- ExprTemporaries.end());
-
- return E;
+ if (ExprTemporaries.size() == FirstTemporary)
+ return SubStmt;
+
+ // FIXME: In order to attach the temporaries, wrap the statement into
+ // a StmtExpr; currently this is only used for asm statements.
+ // This is hacky, either create a new CXXStmtWithTemporaries statement or
+ // a new AsmStmtWithTemporaries.
+ CompoundStmt *CompStmt = new (Context) CompoundStmt(Context, &SubStmt, 1,
+ SourceLocation(),
+ SourceLocation());
+ Expr *E = new (Context) StmtExpr(CompStmt, Context.VoidTy, SourceLocation(),
+ SourceLocation());
+ return MaybeCreateExprWithCleanups(E);
}
ExprResult
@@ -2706,7 +3498,7 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
if (OpKind == tok::arrow)
if (const PointerType *Ptr = BaseType->getAs<PointerType>())
BaseType = Ptr->getPointeeType();
-
+
ObjectType = ParsedType::make(BaseType);
MayBePseudoDestructor = true;
return Owned(Base);
@@ -2720,7 +3512,7 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
llvm::SmallPtrSet<CanQualType,8> CTypes;
llvm::SmallVector<SourceLocation, 8> Locations;
CTypes.insert(Context.getCanonicalType(BaseType));
-
+
while (BaseType->isRecordType()) {
Result = BuildOverloadedArrowExpr(S, Base, OpLoc);
if (Result.isInvalid())
@@ -2760,10 +3552,10 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
// The object type must be complete (or dependent).
if (!BaseType->isDependentType() &&
- RequireCompleteType(OpLoc, BaseType,
+ RequireCompleteType(OpLoc, BaseType,
PDiag(diag::err_incomplete_member_access)))
return ExprError();
-
+
// C++ [basic.lookup.classref]p2:
// If the id-expression in a class member access (5.2.5) is an
// unqualified-id, and the type of the object expression is of a class
@@ -2779,12 +3571,11 @@ ExprResult Sema::DiagnoseDtorReference(SourceLocation NameLoc,
Diag(MemExpr->getLocStart(), diag::err_dtor_expr_without_call)
<< isa<CXXPseudoDestructorExpr>(MemExpr)
<< FixItHint::CreateInsertion(ExpectedLParenLoc, "()");
-
+
return ActOnCallExpr(/*Scope*/ 0,
MemExpr,
/*LPLoc*/ ExpectedLParenLoc,
MultiExprArg(),
- /*CommaLocs*/ 0,
/*RPLoc*/ ExpectedLParenLoc);
}
@@ -2798,11 +3589,11 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
PseudoDestructorTypeStorage Destructed,
bool HasTrailingLParen) {
TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo();
-
+
// C++ [expr.pseudo]p2:
- // The left-hand side of the dot operator shall be of scalar type. The
+ // The left-hand side of the dot operator shall be of scalar type. The
// left-hand side of the arrow operator shall be of pointer to scalar type.
- // This scalar type is the object type.
+ // This scalar type is the object type.
QualType ObjectType = Base->getType();
if (OpKind == tok::arrow) {
if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
@@ -2814,11 +3605,11 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
<< FixItHint::CreateReplacement(OpLoc, ".");
if (isSFINAEContext())
return ExprError();
-
+
OpKind = tok::period;
}
}
-
+
if (!ObjectType->isDependentType() && !ObjectType->isScalarType()) {
Diag(OpLoc, diag::err_pseudo_dtor_base_not_scalar)
<< ObjectType << Base->getSourceRange();
@@ -2826,7 +3617,7 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
}
// C++ [expr.pseudo]p2:
- // [...] The cv-unqualified versions of the object type and of the type
+ // [...] The cv-unqualified versions of the object type and of the type
// designated by the pseudo-destructor-name shall be the same type.
if (DestructedTypeInfo) {
QualType DestructedType = DestructedTypeInfo->getType();
@@ -2837,7 +3628,7 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
Diag(DestructedTypeStart, diag::err_pseudo_dtor_type_mismatch)
<< ObjectType << DestructedType << Base->getSourceRange()
<< DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
-
+
// Recover by setting the destructed type to the object type.
DestructedType = ObjectType;
DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
@@ -2845,29 +3636,29 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
}
}
-
+
// C++ [expr.pseudo]p2:
// [...] Furthermore, the two type-names in a pseudo-destructor-name of the
// form
//
- // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name
+ // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name
//
// shall designate the same scalar type.
if (ScopeTypeInfo) {
QualType ScopeType = ScopeTypeInfo->getType();
if (!ScopeType->isDependentType() && !ObjectType->isDependentType() &&
!Context.hasSameUnqualifiedType(ScopeType, ObjectType)) {
-
+
Diag(ScopeTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(),
diag::err_pseudo_dtor_type_mismatch)
<< ObjectType << ScopeType << Base->getSourceRange()
<< ScopeTypeInfo->getTypeLoc().getLocalSourceRange();
-
+
ScopeType = QualType();
ScopeTypeInfo = 0;
}
}
-
+
Expr *Result
= new (Context) CXXPseudoDestructorExpr(Context, Base,
OpKind == tok::arrow, OpLoc,
@@ -2876,10 +3667,10 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
CCLoc,
TildeLoc,
Destructed);
-
+
if (HasTrailingLParen)
return Owned(Result);
-
+
return DiagnoseDtorReference(Destructed.getLocation(), Result);
}
@@ -2900,9 +3691,9 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
"Invalid second type name in pseudo-destructor");
// C++ [expr.pseudo]p2:
- // The left-hand side of the dot operator shall be of scalar type. The
+ // The left-hand side of the dot operator shall be of scalar type. The
// left-hand side of the arrow operator shall be of pointer to scalar type.
- // This scalar type is the object type.
+ // This scalar type is the object type.
QualType ObjectType = Base->getType();
if (OpKind == tok::arrow) {
if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
@@ -2914,7 +3705,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
<< FixItHint::CreateReplacement(OpLoc, ".");
if (isSFINAEContext())
return ExprError();
-
+
OpKind = tok::period;
}
}
@@ -2928,32 +3719,32 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
else if (ObjectType->isDependentType())
ObjectTypePtrForLookup = ParsedType::make(Context.DependentTy);
}
-
- // Convert the name of the type being destructed (following the ~) into a
+
+ // Convert the name of the type being destructed (following the ~) into a
// type (with source-location information).
QualType DestructedType;
TypeSourceInfo *DestructedTypeInfo = 0;
PseudoDestructorTypeStorage Destructed;
if (SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) {
- ParsedType T = getTypeName(*SecondTypeName.Identifier,
+ ParsedType T = getTypeName(*SecondTypeName.Identifier,
SecondTypeName.StartLocation,
- S, &SS, true, ObjectTypePtrForLookup);
- if (!T &&
+ S, &SS, true, false, ObjectTypePtrForLookup);
+ if (!T &&
((SS.isSet() && !computeDeclContext(SS, false)) ||
(!SS.isSet() && ObjectType->isDependentType()))) {
- // The name of the type being destroyed is a dependent name, and we
+ // The name of the type being destroyed is a dependent name, and we
// couldn't find anything useful in scope. Just store the identifier and
// it's location, and we'll perform (qualified) name lookup again at
// template instantiation time.
Destructed = PseudoDestructorTypeStorage(SecondTypeName.Identifier,
SecondTypeName.StartLocation);
} else if (!T) {
- Diag(SecondTypeName.StartLocation,
+ Diag(SecondTypeName.StartLocation,
diag::err_pseudo_dtor_destructor_non_type)
<< SecondTypeName.Identifier << ObjectType;
if (isSFINAEContext())
return ExprError();
-
+
// Recover by assuming we had the right type all along.
DestructedType = ObjectType;
} else
@@ -2975,8 +3766,8 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
} else
DestructedType = GetTypeFromParser(T.get(), &DestructedTypeInfo);
}
-
- // If we've performed some kind of recovery, (re-)build the type source
+
+ // If we've performed some kind of recovery, (re-)build the type source
// information.
if (!DestructedType.isNull()) {
if (!DestructedTypeInfo)
@@ -2984,24 +3775,24 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SecondTypeName.StartLocation);
Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
}
-
+
// Convert the name of the scope type (the type prior to '::') into a type.
TypeSourceInfo *ScopeTypeInfo = 0;
QualType ScopeType;
- if (FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
+ if (FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
FirstTypeName.Identifier) {
if (FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) {
- ParsedType T = getTypeName(*FirstTypeName.Identifier,
+ ParsedType T = getTypeName(*FirstTypeName.Identifier,
FirstTypeName.StartLocation,
- S, &SS, false, ObjectTypePtrForLookup);
+ S, &SS, false, false, ObjectTypePtrForLookup);
if (!T) {
- Diag(FirstTypeName.StartLocation,
+ Diag(FirstTypeName.StartLocation,
diag::err_pseudo_dtor_destructor_non_type)
<< FirstTypeName.Identifier << ObjectType;
-
+
if (isSFINAEContext())
return ExprError();
-
+
// Just drop this type. It's unnecessary anyway.
ScopeType = QualType();
} else
@@ -3021,39 +3812,100 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
// Recover by dropping this type.
ScopeType = QualType();
} else
- ScopeType = GetTypeFromParser(T.get(), &ScopeTypeInfo);
+ ScopeType = GetTypeFromParser(T.get(), &ScopeTypeInfo);
}
}
-
+
if (!ScopeType.isNull() && !ScopeTypeInfo)
ScopeTypeInfo = Context.getTrivialTypeSourceInfo(ScopeType,
FirstTypeName.StartLocation);
-
+
return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS,
ScopeTypeInfo, CCLoc, TildeLoc,
Destructed, HasTrailingLParen);
}
-CXXMemberCallExpr *Sema::BuildCXXMemberCallExpr(Expr *Exp,
- NamedDecl *FoundDecl,
- CXXMethodDecl *Method) {
+ExprResult Sema::BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
+ CXXMethodDecl *Method) {
if (PerformObjectArgumentInitialization(Exp, /*Qualifier=*/0,
FoundDecl, Method))
- assert(0 && "Calling BuildCXXMemberCallExpr with invalid call?");
+ return true;
- MemberExpr *ME =
+ MemberExpr *ME =
new (Context) MemberExpr(Exp, /*IsArrow=*/false, Method,
- SourceLocation(), Method->getType());
- QualType ResultType = Method->getCallResultType();
+ SourceLocation(), Method->getType(),
+ VK_RValue, OK_Ordinary);
+ QualType ResultType = Method->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultType);
+ ResultType = ResultType.getNonLValueExprType(Context);
+
MarkDeclarationReferenced(Exp->getLocStart(), Method);
CXXMemberCallExpr *CE =
- new (Context) CXXMemberCallExpr(Context, ME, 0, 0, ResultType,
+ new (Context) CXXMemberCallExpr(Context, ME, 0, 0, ResultType, VK,
Exp->getLocEnd());
return CE;
}
+ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
+ SourceLocation RParen) {
+ return Owned(new (Context) CXXNoexceptExpr(Context.BoolTy, Operand,
+ Operand->CanThrow(Context),
+ KeyLoc, RParen));
+}
+
+ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
+ Expr *Operand, SourceLocation RParen) {
+ return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
+}
+
+/// Perform the conversions required for an expression used in a
+/// context that ignores the result.
+void Sema::IgnoredValueConversions(Expr *&E) {
+ // C99 6.3.2.1:
+ // [Except in specific positions,] an lvalue that does not have
+ // array type is converted to the value stored in the
+ // designated object (and is no longer an lvalue).
+ if (E->isRValue()) return;
+
+ // We always want to do this on ObjC property references.
+ if (E->getObjectKind() == OK_ObjCProperty) {
+ ConvertPropertyForRValue(E);
+ if (E->isRValue()) return;
+ }
+
+ // Otherwise, this rule does not apply in C++, at least not for the moment.
+ if (getLangOptions().CPlusPlus) return;
+
+ // GCC seems to also exclude expressions of incomplete enum type.
+ if (const EnumType *T = E->getType()->getAs<EnumType>()) {
+ if (!T->getDecl()->isComplete()) {
+ // FIXME: stupid workaround for a codegen bug!
+ ImpCastExprToType(E, Context.VoidTy, CK_ToVoid);
+ return;
+ }
+ }
+
+ DefaultFunctionArrayLvalueConversion(E);
+ if (!E->getType()->isVoidType())
+ RequireCompleteType(E->getExprLoc(), E->getType(),
+ diag::err_incomplete_type);
+}
+
ExprResult Sema::ActOnFinishFullExpr(Expr *FullExpr) {
- if (!FullExpr) return ExprError();
- return MaybeCreateCXXExprWithTemporaries(FullExpr);
+ if (!FullExpr)
+ return ExprError();
+
+ if (DiagnoseUnexpandedParameterPack(FullExpr))
+ return ExprError();
+
+ IgnoredValueConversions(FullExpr);
+ CheckImplicitConversions(FullExpr);
+ return MaybeCreateExprWithCleanups(FullExpr);
+}
+
+StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) {
+ if (!FullStmt) return StmtError();
+
+ return MaybeCreateStmtWithCleanups(FullStmt);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
index b56159c..4d03b06 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
@@ -14,6 +14,7 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -23,6 +24,7 @@
#include "clang/Lex/Preprocessor.h"
using namespace clang;
+using namespace sema;
ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **strings,
@@ -77,7 +79,14 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
if (!Ty.isNull()) {
Ty = Context.getObjCObjectPointerType(Ty);
} else if (getLangOptions().NoConstantCFStrings) {
- IdentifierInfo *NSIdent = &Context.Idents.get("NSConstantString");
+ IdentifierInfo *NSIdent=0;
+ std::string StringClass(getLangOptions().ObjCConstantStringClass);
+
+ if (StringClass.empty())
+ NSIdent = &Context.Idents.get("NSConstantString");
+ else
+ NSIdent = &Context.Idents.get(StringClass);
+
NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLocs[0],
LookupOrdinaryName);
if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
@@ -188,11 +197,49 @@ ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
return new (Context) ObjCProtocolExpr(Ty, PDecl, AtLoc, RParenLoc);
}
+/// Try to capture an implicit reference to 'self'.
+ObjCMethodDecl *Sema::tryCaptureObjCSelf() {
+ // Ignore block scopes: we can capture through them.
+ DeclContext *DC = CurContext;
+ while (true) {
+ if (isa<BlockDecl>(DC)) DC = cast<BlockDecl>(DC)->getDeclContext();
+ else if (isa<EnumDecl>(DC)) DC = cast<EnumDecl>(DC)->getDeclContext();
+ else break;
+ }
+
+ // If we're not in an ObjC method, error out. Note that, unlike the
+ // C++ case, we don't require an instance method --- class methods
+ // still have a 'self', and we really do still need to capture it!
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(DC);
+ if (!method)
+ return 0;
+
+ ImplicitParamDecl *self = method->getSelfDecl();
+ assert(self && "capturing 'self' in non-definition?");
+
+ // Mark that we're closing on 'this' in all the block scopes, if applicable.
+ for (unsigned idx = FunctionScopes.size() - 1;
+ isa<BlockScopeInfo>(FunctionScopes[idx]);
+ --idx) {
+ BlockScopeInfo *blockScope = cast<BlockScopeInfo>(FunctionScopes[idx]);
+ unsigned &captureIndex = blockScope->CaptureMap[self];
+ if (captureIndex) break;
+
+ bool nested = isa<BlockScopeInfo>(FunctionScopes[idx-1]);
+ blockScope->Captures.push_back(
+ BlockDecl::Capture(self, /*byref*/ false, nested, /*copy*/ 0));
+ captureIndex = blockScope->Captures.size(); // +1
+ }
+
+ return method;
+}
+
+
bool Sema::CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs,
Selector Sel, ObjCMethodDecl *Method,
bool isClassMessage,
SourceLocation lbrac, SourceLocation rbrac,
- QualType &ReturnType) {
+ QualType &ReturnType, ExprValueKind &VK) {
if (!Method) {
// Apply default argument promotion as for (C99 6.5.2.2p6).
for (unsigned i = 0; i != NumArgs; i++) {
@@ -207,10 +254,12 @@ bool Sema::CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs,
Diag(lbrac, DiagID)
<< Sel << isClassMessage << SourceRange(lbrac, rbrac);
ReturnType = Context.getObjCIdType();
+ VK = VK_RValue;
return false;
}
ReturnType = Method->getSendResultType();
+ VK = Expr::getValueKindForType(Method->getResultType());
unsigned NumNamedArgs = Sel.getNumArgs();
// Method might have more arguments than selector indicates. This is due
@@ -219,8 +268,8 @@ bool Sema::CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs,
NumNamedArgs = Method->param_size();
// FIXME. This need be cleaned up.
if (NumArgs < NumNamedArgs) {
- Diag(lbrac, diag::err_typecheck_call_too_few_args) << 2
- << NumNamedArgs << NumArgs;
+ Diag(lbrac, diag::err_typecheck_call_too_few_args)
+ << 2 << NumNamedArgs << NumArgs;
return false;
}
@@ -241,10 +290,9 @@ bool Sema::CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs,
<< argExpr->getSourceRange()))
return true;
- InitializedEntity Entity = InitializedEntity::InitializeParameter(Param);
- ExprResult ArgE = PerformCopyInitialization(Entity,
- SourceLocation(),
- Owned(argExpr->Retain()));
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ Param);
+ ExprResult ArgE = PerformCopyInitialization(Entity, lbrac, Owned(argExpr));
if (ArgE.isInvalid())
IsError = true;
else
@@ -276,6 +324,9 @@ bool Sema::CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs,
}
bool Sema::isSelfExpr(Expr *RExpr) {
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(RExpr))
+ if (ICE->getCastKind() == CK_LValueToRValue)
+ RExpr = ICE->getSubExpr();
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(RExpr))
if (DRE->getDecl()->getIdentifier() == &Context.Idents.get("self"))
return true;
@@ -335,11 +386,19 @@ ObjCMethodDecl *Sema::LookupPrivateInstanceMethod(Selector Sel,
ExprResult Sema::
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr, DeclarationName MemberName,
- SourceLocation MemberLoc) {
+ SourceLocation MemberLoc,
+ SourceLocation SuperLoc, QualType SuperType,
+ bool Super) {
const ObjCInterfaceType *IFaceT = OPT->getInterfaceType();
ObjCInterfaceDecl *IFace = IFaceT->getDecl();
IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+ if (IFace->isForwardDecl()) {
+ Diag(MemberLoc, diag::err_property_not_found_forward_class)
+ << MemberName << QualType(OPT, 0);
+ Diag(IFace->getLocation(), diag::note_forward_class);
+ return ExprError();
+ }
// Search for a declared property first.
if (ObjCPropertyDecl *PD = IFace->FindPropertyDeclaration(Member)) {
// Check whether we can reference this property.
@@ -349,9 +408,17 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Sel);
if (DiagnosePropertyAccessorMismatch(PD, Getter, MemberLoc))
- ResTy = Getter->getSendResultType();
- return Owned(new (Context) ObjCPropertyRefExpr(PD, ResTy,
- MemberLoc, BaseExpr));
+ ResTy = Getter->getResultType();
+
+ if (Super)
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, ResTy,
+ VK_LValue, OK_ObjCProperty,
+ MemberLoc,
+ SuperLoc, SuperType));
+ else
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, ResTy,
+ VK_LValue, OK_ObjCProperty,
+ MemberLoc, BaseExpr));
}
// Check protocols on qualified interfaces.
for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
@@ -360,9 +427,18 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
// Check whether we can reference this property.
if (DiagnoseUseOfDecl(PD, MemberLoc))
return ExprError();
-
- return Owned(new (Context) ObjCPropertyRefExpr(PD, PD->getType(),
- MemberLoc, BaseExpr));
+ if (Super)
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, PD->getType(),
+ VK_LValue,
+ OK_ObjCProperty,
+ MemberLoc,
+ SuperLoc, SuperType));
+ else
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, PD->getType(),
+ VK_LValue,
+ OK_ObjCProperty,
+ MemberLoc,
+ BaseExpr));
}
// If that failed, look for an "implicit" property by seeing if the nullary
// selector is implemented.
@@ -375,7 +451,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
// If this reference is in an @implementation, check for 'private' methods.
if (!Getter)
- Getter = IFace->lookupPrivateInstanceMethod(Sel);
+ Getter = IFace->lookupPrivateMethod(Sel);
// Look through local category implementations associated with the class.
if (!Getter)
@@ -394,7 +470,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (!Setter) {
// If this reference is in an @implementation, also check for 'private'
// methods.
- Setter = IFace->lookupPrivateInstanceMethod(SetterSel);
+ Setter = IFace->lookupPrivateMethod(SetterSel);
}
// Look through local category implementations associated with the class.
if (!Setter)
@@ -403,11 +479,31 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
return ExprError();
- if (Getter) {
+ if (Getter || Setter) {
QualType PType;
- PType = Getter->getSendResultType();
- return Owned(new (Context) ObjCImplicitSetterGetterRefExpr(Getter, PType,
- Setter, MemberLoc, BaseExpr));
+ if (Getter)
+ PType = Getter->getSendResultType();
+ else {
+ ParmVarDecl *ArgDecl = *Setter->param_begin();
+ PType = ArgDecl->getType();
+ }
+
+ ExprValueKind VK = VK_LValue;
+ ExprObjectKind OK = OK_ObjCProperty;
+ if (!getLangOptions().CPlusPlus && !PType.hasQualifiers() &&
+ PType->isVoidType())
+ VK = VK_RValue, OK = OK_Ordinary;
+
+ if (Super)
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ PType, VK, OK,
+ MemberLoc,
+ SuperLoc, SuperType));
+ else
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ PType, VK, OK,
+ MemberLoc, BaseExpr));
+
}
// Attempt to correct for typos in property names.
@@ -421,14 +517,31 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
ObjCPropertyDecl *Property = Res.getAsSingle<ObjCPropertyDecl>();
Diag(Property->getLocation(), diag::note_previous_decl)
<< Property->getDeclName();
- return HandleExprPropertyRefExpr(OPT, BaseExpr, TypoResult, MemberLoc);
+ return HandleExprPropertyRefExpr(OPT, BaseExpr, TypoResult, MemberLoc,
+ SuperLoc, SuperType, Super);
+ }
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *Ivar =
+ IFace->lookupInstanceVariable(Member, ClassDeclared)) {
+ QualType T = Ivar->getType();
+ if (const ObjCObjectPointerType * OBJPT =
+ T->getAsObjCInterfacePointerType()) {
+ const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType();
+ if (ObjCInterfaceDecl *IFace = IFaceT->getDecl())
+ if (IFace->isForwardDecl()) {
+ Diag(MemberLoc, diag::err_property_not_as_forward_class)
+ << MemberName << IFace;
+ Diag(IFace->getLocation(), diag::note_forward_class);
+ return ExprError();
+ }
+ }
}
Diag(MemberLoc, diag::err_property_not_found)
<< MemberName << QualType(OPT, 0);
- if (Setter && !Getter)
+ if (Setter)
Diag(Setter->getLocation(), diag::note_getter_unavailable)
- << MemberName << BaseExpr->getSourceRange();
+ << MemberName << BaseExpr->getSourceRange();
return ExprError();
}
@@ -446,23 +559,24 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
if (IFace == 0) {
// If the "receiver" is 'super' in a method, handle it as an expression-like
// property reference.
- if (ObjCMethodDecl *CurMethod = getCurMethodDecl())
- if (receiverNamePtr->isStr("super")) {
+ if (receiverNamePtr->isStr("super")) {
+ if (ObjCMethodDecl *CurMethod = tryCaptureObjCSelf()) {
if (CurMethod->isInstanceMethod()) {
QualType T =
Context.getObjCInterfaceType(CurMethod->getClassInterface());
T = Context.getObjCObjectPointerType(T);
- Expr *SuperExpr = new (Context) ObjCSuperExpr(receiverNameLoc, T);
return HandleExprPropertyRefExpr(T->getAsObjCInterfacePointerType(),
- SuperExpr, &propertyName,
- propertyNameLoc);
+ /*BaseExpr*/0, &propertyName,
+ propertyNameLoc,
+ receiverNameLoc, T, true);
}
// Otherwise, if this is a class method, try dispatching to our
// superclass.
IFace = CurMethod->getClassInterface()->getSuperClass();
}
+ }
if (IFace == 0) {
Diag(receiverNameLoc, diag::err_expected_ident_or_lparen);
@@ -512,16 +626,25 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
if (Getter || Setter) {
QualType PType;
- if (Getter)
+ ExprValueKind VK = VK_LValue;
+ if (Getter) {
PType = Getter->getSendResultType();
- else {
+ if (!getLangOptions().CPlusPlus &&
+ !PType.hasQualifiers() && PType->isVoidType())
+ VK = VK_RValue;
+ } else {
for (ObjCMethodDecl::param_iterator PI = Setter->param_begin(),
E = Setter->param_end(); PI != E; ++PI)
PType = (*PI)->getType();
+ VK = VK_LValue;
}
- return Owned(new (Context) ObjCImplicitSetterGetterRefExpr(
- Getter, PType, Setter,
- propertyNameLoc, IFace, receiverNameLoc));
+
+ ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
+
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ PType, VK, OK,
+ propertyNameLoc,
+ receiverNameLoc, IFace));
}
return ExprError(Diag(propertyNameLoc, diag::err_property_not_found)
<< &propertyName << Context.getObjCInterfaceType(IFace));
@@ -536,9 +659,10 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
ReceiverType = ParsedType();
// If the identifier is "super" and there is no trailing dot, we're
- // messaging super.
- if (IsSuper && !HasTrailingDot && S->isInObjcMethodScope())
- return ObjCSuperMessage;
+ // messaging super. If the identifier is "super" and there is a
+ // trailing dot, it's an instance message.
+ if (IsSuper && S->isInObjcMethodScope())
+ return HasTrailingDot? ObjCInstanceMessage : ObjCSuperMessage;
LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
LookupName(Result, S);
@@ -547,14 +671,15 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
case LookupResult::NotFound:
// Normal name lookup didn't find anything. If we're in an
// Objective-C method, look for ivars. If we find one, we're done!
- // FIXME: This is a hack. Ivar lookup should be part of normal lookup.
+ // FIXME: This is a hack. Ivar lookup should be part of normal
+ // lookup.
if (ObjCMethodDecl *Method = getCurMethodDecl()) {
ObjCInterfaceDecl *ClassDeclared;
if (Method->getClassInterface()->lookupInstanceVariable(Name,
ClassDeclared))
return ObjCInstanceMessage;
}
-
+
// Break out; we'll perform typo correction below.
break;
@@ -566,6 +691,10 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
return ObjCInstanceMessage;
case LookupResult::Found: {
+ // If the identifier is a class or not, and there is a trailing dot,
+ // it's an instance message.
+ if (HasTrailingDot)
+ return ObjCInstanceMessage;
// We found something. If it's a type, then we have a class
// message. Otherwise, it's an instance message.
NamedDecl *ND = Result.getFoundDecl();
@@ -616,7 +745,6 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
Diag(NameLoc, diag::err_unknown_receiver_suggest)
<< Name << Corrected
<< FixItHint::CreateReplacement(SourceRange(NameLoc), "super");
- Name = Corrected.getAsIdentifierInfo();
return ObjCSuperMessage;
}
}
@@ -626,14 +754,14 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
}
ExprResult Sema::ActOnSuperMessage(Scope *S,
- SourceLocation SuperLoc,
- Selector Sel,
- SourceLocation LBracLoc,
- SourceLocation SelectorLoc,
- SourceLocation RBracLoc,
- MultiExprArg Args) {
+ SourceLocation SuperLoc,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ SourceLocation SelectorLoc,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
// Determine whether we are inside a method or not.
- ObjCMethodDecl *Method = getCurMethodDecl();
+ ObjCMethodDecl *Method = tryCaptureObjCSelf();
if (!Method) {
Diag(SuperLoc, diag::err_invalid_receiver_to_message_super);
return ExprError();
@@ -649,7 +777,8 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
ObjCInterfaceDecl *Super = Class->getSuperClass();
if (!Super) {
// The current class does not have a superclass.
- Diag(SuperLoc, diag::error_no_super_class) << Class->getIdentifier();
+ Diag(SuperLoc, diag::error_root_class_cannot_use_super)
+ << Class->getIdentifier();
return ExprError();
}
@@ -661,16 +790,16 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
QualType SuperTy = Context.getObjCInterfaceType(Super);
SuperTy = Context.getObjCObjectPointerType(SuperTy);
return BuildInstanceMessage(0, SuperTy, SuperLoc,
- Sel, /*Method=*/0, LBracLoc, RBracLoc,
- move(Args));
+ Sel, /*Method=*/0,
+ LBracLoc, SelectorLoc, RBracLoc, move(Args));
}
// Since we are in a class method, this is a class message to
// the superclass.
return BuildClassMessage(/*ReceiverTypeInfo=*/0,
Context.getObjCInterfaceType(Super),
- SuperLoc, Sel, /*Method=*/0, LBracLoc, RBracLoc,
- move(Args));
+ SuperLoc, Sel, /*Method=*/0,
+ LBracLoc, SelectorLoc, RBracLoc, move(Args));
}
/// \brief Build an Objective-C class message expression.
@@ -702,27 +831,34 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
///
/// \param Args The message arguments.
ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
- QualType ReceiverType,
- SourceLocation SuperLoc,
- Selector Sel,
- ObjCMethodDecl *Method,
- SourceLocation LBracLoc,
- SourceLocation RBracLoc,
- MultiExprArg ArgsIn) {
+ QualType ReceiverType,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ SourceLocation SelectorLoc,
+ SourceLocation RBracLoc,
+ MultiExprArg ArgsIn) {
+ SourceLocation Loc = SuperLoc.isValid()? SuperLoc
+ : ReceiverTypeInfo->getTypeLoc().getSourceRange().getBegin();
+ if (LBracLoc.isInvalid()) {
+ Diag(Loc, diag::err_missing_open_square_message_send)
+ << FixItHint::CreateInsertion(Loc, "[");
+ LBracLoc = Loc;
+ }
+
if (ReceiverType->isDependentType()) {
// If the receiver type is dependent, we can't type-check anything
// at this point. Build a dependent expression.
unsigned NumArgs = ArgsIn.size();
Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
assert(SuperLoc.isInvalid() && "Message to super with dependent type");
- return Owned(ObjCMessageExpr::Create(Context, ReceiverType, LBracLoc,
- ReceiverTypeInfo, Sel, /*Method=*/0,
+ return Owned(ObjCMessageExpr::Create(Context, ReceiverType,
+ VK_RValue, LBracLoc, ReceiverTypeInfo,
+ Sel, SelectorLoc, /*Method=*/0,
Args, NumArgs, RBracLoc));
}
- SourceLocation Loc = SuperLoc.isValid()? SuperLoc
- : ReceiverTypeInfo->getTypeLoc().getLocalSourceRange().getBegin();
-
// Find the class to which we are sending this message.
ObjCInterfaceDecl *Class = 0;
const ObjCObjectType *ClassType = ReceiverType->getAs<ObjCObjectType>();
@@ -757,23 +893,30 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
// Check the argument types and determine the result type.
QualType ReturnType;
+ ExprValueKind VK = VK_RValue;
+
unsigned NumArgs = ArgsIn.size();
Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
if (CheckMessageArgumentTypes(Args, NumArgs, Sel, Method, true,
- LBracLoc, RBracLoc, ReturnType))
+ LBracLoc, RBracLoc, ReturnType, VK))
+ return ExprError();
+
+ if (Method && !Method->getResultType()->isVoidType() &&
+ RequireCompleteType(LBracLoc, Method->getResultType(),
+ diag::err_illegal_message_expr_incomplete_type))
return ExprError();
// Construct the appropriate ObjCMessageExpr.
Expr *Result;
if (SuperLoc.isValid())
- Result = ObjCMessageExpr::Create(Context, ReturnType, LBracLoc,
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
SuperLoc, /*IsInstanceSuper=*/false,
- ReceiverType, Sel, Method, Args,
- NumArgs, RBracLoc);
+ ReceiverType, Sel, SelectorLoc,
+ Method, Args, NumArgs, RBracLoc);
else
- Result = ObjCMessageExpr::Create(Context, ReturnType, LBracLoc,
- ReceiverTypeInfo, Sel, Method, Args,
- NumArgs, RBracLoc);
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ ReceiverTypeInfo, Sel, SelectorLoc,
+ Method, Args, NumArgs, RBracLoc);
return MaybeBindToTemporary(Result);
}
@@ -781,12 +924,12 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
// ArgExprs is optional - if it is present, the number of expressions
// is obtained from Sel.getNumArgs().
ExprResult Sema::ActOnClassMessage(Scope *S,
- ParsedType Receiver,
- Selector Sel,
- SourceLocation LBracLoc,
- SourceLocation SelectorLoc,
- SourceLocation RBracLoc,
- MultiExprArg Args) {
+ ParsedType Receiver,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ SourceLocation SelectorLoc,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
TypeSourceInfo *ReceiverTypeInfo;
QualType ReceiverType = GetTypeFromParser(Receiver, &ReceiverTypeInfo);
if (ReceiverType.isNull())
@@ -798,7 +941,7 @@ ExprResult Sema::ActOnClassMessage(Scope *S,
return BuildClassMessage(ReceiverTypeInfo, ReceiverType,
/*SuperLoc=*/SourceLocation(), Sel, /*Method=*/0,
- LBracLoc, RBracLoc, move(Args));
+ LBracLoc, SelectorLoc, RBracLoc, move(Args));
}
/// \brief Build an Objective-C instance message expression.
@@ -830,13 +973,23 @@ ExprResult Sema::ActOnClassMessage(Scope *S,
///
/// \param Args The message arguments.
ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
- QualType ReceiverType,
- SourceLocation SuperLoc,
- Selector Sel,
- ObjCMethodDecl *Method,
- SourceLocation LBracLoc,
- SourceLocation RBracLoc,
- MultiExprArg ArgsIn) {
+ QualType ReceiverType,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ SourceLocation SelectorLoc,
+ SourceLocation RBracLoc,
+ MultiExprArg ArgsIn) {
+ // The location of the receiver.
+ SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart();
+
+ if (LBracLoc.isInvalid()) {
+ Diag(Loc, diag::err_missing_open_square_message_send)
+ << FixItHint::CreateInsertion(Loc, "[");
+ LBracLoc = Loc;
+ }
+
// If we have a receiver expression, perform appropriate promotions
// and determine receiver type.
if (Receiver) {
@@ -847,9 +1000,9 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
assert(SuperLoc.isInvalid() && "Message to super with dependent type");
return Owned(ObjCMessageExpr::Create(Context, Context.DependentTy,
- LBracLoc, Receiver, Sel,
- /*Method=*/0, Args, NumArgs,
- RBracLoc));
+ VK_RValue, LBracLoc, Receiver, Sel,
+ SelectorLoc, /*Method=*/0,
+ Args, NumArgs, RBracLoc));
}
// If necessary, apply function/array conversion to the receiver.
@@ -858,9 +1011,6 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
ReceiverType = Receiver->getType();
}
- // The location of the receiver.
- SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart();
-
if (!Method) {
// Handle messages to id.
bool receiverIsId = ReceiverType->isObjCIdType();
@@ -946,6 +1096,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
break;
}
}
+ bool forwardClass = false;
if (!Method) {
// If we have implementations in scope, check "private" methods.
Method = LookupPrivateInstanceMethod(Sel, ClassDecl);
@@ -956,14 +1107,15 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// compatibility. FIXME: should we deviate??
if (OCIType->qual_empty()) {
Method = LookupInstanceMethodInGlobalPool(Sel,
- SourceRange(LBracLoc, RBracLoc));
- if (Method && !OCIType->getInterfaceDecl()->isForwardDecl())
+ SourceRange(LBracLoc, RBracLoc));
+ forwardClass = OCIType->getInterfaceDecl()->isForwardDecl();
+ if (Method && !forwardClass)
Diag(Loc, diag::warn_maynot_respond)
<< OCIType->getInterfaceDecl()->getIdentifier() << Sel;
}
}
}
- if (Method && DiagnoseUseOfDecl(Method, Loc))
+ if (Method && DiagnoseUseOfDecl(Method, Loc, forwardClass))
return ExprError();
} else if (!Context.getObjCIdType().isNull() &&
(ReceiverType->isPointerType() ||
@@ -975,9 +1127,13 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (ReceiverType->isPointerType())
ImpCastExprToType(Receiver, Context.getObjCIdType(),
CK_BitCast);
- else
+ else {
+ // TODO: specialized warning on null receivers?
+ bool IsNull = Receiver->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull);
ImpCastExprToType(Receiver, Context.getObjCIdType(),
- CK_IntegralToPointer);
+ IsNull ? CK_NullToPointer : CK_IntegralToPointer);
+ }
ReceiverType = Receiver->getType();
}
else if (getLangOptions().CPlusPlus &&
@@ -991,7 +1147,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
SuperLoc,
Sel,
Method,
- LBracLoc,
+ LBracLoc,
+ SelectorLoc,
RBracLoc,
move(ArgsIn));
} else {
@@ -1007,26 +1164,29 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
unsigned NumArgs = ArgsIn.size();
Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
QualType ReturnType;
- if (CheckMessageArgumentTypes(Args, NumArgs, Sel, Method, false,
- LBracLoc, RBracLoc, ReturnType))
+ ExprValueKind VK = VK_RValue;
+ bool ClassMessage = (ReceiverType->isObjCClassType() ||
+ ReceiverType->isObjCQualifiedClassType());
+ if (CheckMessageArgumentTypes(Args, NumArgs, Sel, Method, ClassMessage,
+ LBracLoc, RBracLoc, ReturnType, VK))
return ExprError();
- if (!ReturnType->isVoidType()) {
- if (RequireCompleteType(LBracLoc, ReturnType,
- diag::err_illegal_message_expr_incomplete_type))
- return ExprError();
- }
+ if (Method && !Method->getResultType()->isVoidType() &&
+ RequireCompleteType(LBracLoc, Method->getResultType(),
+ diag::err_illegal_message_expr_incomplete_type))
+ return ExprError();
// Construct the appropriate ObjCMessageExpr instance.
Expr *Result;
if (SuperLoc.isValid())
- Result = ObjCMessageExpr::Create(Context, ReturnType, LBracLoc,
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
SuperLoc, /*IsInstanceSuper=*/true,
- ReceiverType, Sel, Method,
+ ReceiverType, Sel, SelectorLoc, Method,
Args, NumArgs, RBracLoc);
else
- Result = ObjCMessageExpr::Create(Context, ReturnType, LBracLoc, Receiver,
- Sel, Method, Args, NumArgs, RBracLoc);
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ Receiver, Sel, SelectorLoc, Method,
+ Args, NumArgs, RBracLoc);
return MaybeBindToTemporary(Result);
}
@@ -1045,6 +1205,6 @@ ExprResult Sema::ActOnInstanceMessage(Scope *S,
return BuildInstanceMessage(Receiver, Receiver->getType(),
/*SuperLoc=*/SourceLocation(), Sel, /*Method=*/0,
- LBracLoc, RBracLoc, move(Args));
+ LBracLoc, SelectorLoc, RBracLoc, move(Args));
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
index a28fd7f..b9a6a57 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
@@ -11,8 +11,6 @@
// point is Sema::CheckInitList(), but all of the work is performed
// within the InitListChecker class.
//
-// This file also implements Sema::CheckInitializerTypes.
-//
//===----------------------------------------------------------------------===//
#include "clang/Sema/Designator.h"
@@ -122,7 +120,7 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, Sema &S) {
/// responsible for moving that Index forward as it consumes elements.
///
/// Each Check* routine also has a StructuredList/StructuredIndex
-/// arguments, which contains the current the "structured" (semantic)
+/// arguments, which contains the current "structured" (semantic)
/// initializer list and the index into that initializer list where we
/// are copying initializers as we map them over to the semantic
/// list. Once we have completed our recursive walk of the subobject
@@ -231,11 +229,11 @@ public:
void InitListChecker::FillInValueInitForField(unsigned Init, FieldDecl *Field,
const InitializedEntity &ParentEntity,
- InitListExpr *ILE,
+ InitListExpr *ILE,
bool &RequiresSecondPass) {
SourceLocation Loc = ILE->getSourceRange().getBegin();
unsigned NumInits = ILE->getNumInits();
- InitializedEntity MemberEntity
+ InitializedEntity MemberEntity
= InitializedEntity::InitializeMember(Field, &ParentEntity);
if (Init >= NumInits || !ILE->getInit(Init)) {
// FIXME: We probably don't need to handle references
@@ -254,7 +252,7 @@ void InitListChecker::FillInValueInitForField(unsigned Init, FieldDecl *Field,
hadError = true;
return;
}
-
+
InitializationKind Kind = InitializationKind::CreateValue(Loc, Loc, Loc,
true);
InitializationSequence InitSeq(SemaRef, MemberEntity, Kind, 0, 0);
@@ -263,14 +261,14 @@ void InitListChecker::FillInValueInitForField(unsigned Init, FieldDecl *Field,
hadError = true;
return;
}
-
+
ExprResult MemberInit
= InitSeq.Perform(SemaRef, MemberEntity, Kind, MultiExprArg());
if (MemberInit.isInvalid()) {
hadError = true;
return;
}
-
+
if (hadError) {
// Do nothing
} else if (Init < NumInits) {
@@ -286,14 +284,14 @@ void InitListChecker::FillInValueInitForField(unsigned Init, FieldDecl *Field,
}
} else if (InitListExpr *InnerILE
= dyn_cast<InitListExpr>(ILE->getInit(Init)))
- FillInValueInitializations(MemberEntity, InnerILE,
- RequiresSecondPass);
+ FillInValueInitializations(MemberEntity, InnerILE,
+ RequiresSecondPass);
}
/// Recursively replaces NULL values within the given initializer list
/// with expressions that perform value-initialization of the
/// appropriate type.
-void
+void
InitListChecker::FillInValueInitializations(const InitializedEntity &Entity,
InitListExpr *ILE,
bool &RequiresSecondPass) {
@@ -344,17 +342,17 @@ InitListChecker::FillInValueInitializations(const InitializedEntity &Entity,
ElementType = AType->getElementType();
if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType))
NumElements = CAType->getSize().getZExtValue();
- ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context,
+ ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context,
0, Entity);
} else if (const VectorType *VType = ILE->getType()->getAs<VectorType>()) {
ElementType = VType->getElementType();
NumElements = VType->getNumElements();
- ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context,
+ ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context,
0, Entity);
} else
ElementType = ILE->getType();
-
+
for (unsigned Init = 0; Init != NumElements; ++Init) {
if (hadError)
return;
@@ -409,7 +407,7 @@ InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
unsigned newStructuredIndex = 0;
FullyStructuredList
= getStructuredSubobjectInit(IL, newIndex, T, 0, 0, IL->getSourceRange());
- CheckExplicitInitList(Entity, IL, T, newIndex,
+ CheckExplicitInitList(Entity, IL, T, newIndex,
FullyStructuredList, newStructuredIndex,
/*TopLevelObject=*/true);
@@ -417,7 +415,7 @@ InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
bool RequiresSecondPass = false;
FillInValueInitializations(Entity, FullyStructuredList, RequiresSecondPass);
if (RequiresSecondPass && !hadError)
- FillInValueInitializations(Entity, FullyStructuredList,
+ FillInValueInitializations(Entity, FullyStructuredList,
RequiresSecondPass);
}
}
@@ -482,7 +480,7 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
// Check the element types and build the structural subobject.
unsigned StartIndex = Index;
- CheckListElementTypes(Entity, ParentIList, T,
+ CheckListElementTypes(Entity, ParentIList, T,
/*SubobjectIsDesignatorContext=*/false, Index,
StructuredSubobjectInitList,
StructuredSubobjectInitIndex,
@@ -497,16 +495,16 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
= ParentIList->getInit(EndIndex)->getSourceRange().getEnd();
StructuredSubobjectInitList->setRBraceLoc(EndLoc);
}
-
+
// Warn about missing braces.
if (T->isArrayType() || T->isRecordType()) {
SemaRef.Diag(StructuredSubobjectInitList->getLocStart(),
diag::warn_missing_braces)
<< StructuredSubobjectInitList->getSourceRange()
- << FixItHint::CreateInsertion(StructuredSubobjectInitList->getLocStart(),
+ << FixItHint::CreateInsertion(StructuredSubobjectInitList->getLocStart(),
"{")
<< FixItHint::CreateInsertion(SemaRef.PP.getLocForEndOfToken(
- StructuredSubobjectInitList->getLocEnd()),
+ StructuredSubobjectInitList->getLocEnd()),
"}");
}
}
@@ -520,7 +518,7 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
assert(IList->isExplicit() && "Illegal Implicit InitListExpr");
SyntacticToSemantic[IList] = StructuredList;
StructuredList->setSyntacticForm(IList);
- CheckListElementTypes(Entity, IList, T, /*SubobjectIsDesignatorContext=*/true,
+ CheckListElementTypes(Entity, IList, T, /*SubobjectIsDesignatorContext=*/true,
Index, StructuredList, StructuredIndex, TopLevelObject);
QualType ExprTy = T.getNonLValueExprType(SemaRef.Context);
IList->setType(ExprTy);
@@ -585,7 +583,7 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
CheckScalarType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
} else if (DeclType->isVectorType()) {
- CheckVectorType(Entity, IList, DeclType, Index,
+ CheckVectorType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
} else if (DeclType->isAggregateType()) {
if (DeclType->isRecordType()) {
@@ -598,7 +596,7 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
llvm::APSInt Zero(
SemaRef.Context.getTypeSize(SemaRef.Context.getSizeType()),
false);
- CheckArrayType(Entity, IList, DeclType, Zero,
+ CheckArrayType(Entity, IList, DeclType, Zero,
SubobjectIsDesignatorContext, Index,
StructuredList, StructuredIndex);
} else
@@ -658,7 +656,7 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
UpdateStructuredListElement(StructuredList, StructuredIndex, Str);
++Index;
} else if (ElemType->isScalarType()) {
- CheckScalarType(Entity, IList, ElemType, Index,
+ CheckScalarType(Entity, IList, ElemType, Index,
StructuredList, StructuredIndex);
} else if (ElemType->isReferenceType()) {
CheckReferenceType(Entity, IList, ElemType, Index,
@@ -672,17 +670,17 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// member, the member is initialized. [...]
// FIXME: Better EqualLoc?
- InitializationKind Kind =
+ InitializationKind Kind =
InitializationKind::CreateCopy(expr->getLocStart(), SourceLocation());
InitializationSequence Seq(SemaRef, Entity, Kind, &expr, 1);
-
+
if (Seq) {
- ExprResult Result =
+ ExprResult Result =
Seq.Perform(SemaRef, Entity, Kind, MultiExprArg(&expr, 1));
if (Result.isInvalid())
hadError = true;
-
- UpdateStructuredListElement(StructuredList, StructuredIndex,
+
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
Result.takeAs<Expr>());
++Index;
return;
@@ -699,7 +697,9 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// initial value of the object, including unnamed members, is
// that of the expression.
if ((ElemType->isRecordType() || ElemType->isVectorType()) &&
- SemaRef.Context.hasSameUnqualifiedType(expr->getType(), ElemType)) {
+ SemaRef.CheckSingleAssignmentConstraints(ElemType, expr)
+ == Sema::Compatible) {
+ SemaRef.DefaultFunctionArrayLvalueConversion(expr);
UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
++Index;
return;
@@ -721,9 +721,8 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
} else {
// We cannot initialize this element, so let
// PerformCopyInitialization produce the appropriate diagnostic.
- SemaRef.PerformCopyInitialization(Entity, SourceLocation(),
+ SemaRef.PerformCopyInitialization(Entity, SourceLocation(),
SemaRef.Owned(expr));
- IList->setInit(Index, 0);
hadError = true;
++Index;
++StructuredIndex;
@@ -736,48 +735,7 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
unsigned &Index,
InitListExpr *StructuredList,
unsigned &StructuredIndex) {
- if (Index < IList->getNumInits()) {
- Expr *expr = IList->getInit(Index);
- if (InitListExpr *SubIList = dyn_cast<InitListExpr>(expr)) {
- SemaRef.Diag(SubIList->getLocStart(),
- diag::warn_many_braces_around_scalar_init)
- << SubIList->getSourceRange();
-
- CheckScalarType(Entity, SubIList, DeclType, Index, StructuredList,
- StructuredIndex);
- return;
- } else if (isa<DesignatedInitExpr>(expr)) {
- SemaRef.Diag(expr->getSourceRange().getBegin(),
- diag::err_designator_for_scalar_init)
- << DeclType << expr->getSourceRange();
- hadError = true;
- ++Index;
- ++StructuredIndex;
- return;
- }
-
- ExprResult Result =
- SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(),
- SemaRef.Owned(expr));
-
- Expr *ResultExpr = 0;
-
- if (Result.isInvalid())
- hadError = true; // types weren't compatible.
- else {
- ResultExpr = Result.takeAs<Expr>();
-
- if (ResultExpr != expr) {
- // The type was promoted, update initializer list.
- IList->setInit(Index, ResultExpr);
- }
- }
- if (hadError)
- ++StructuredIndex;
- else
- UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
- ++Index;
- } else {
+ if (Index >= IList->getNumInits()) {
SemaRef.Diag(IList->getLocStart(), diag::err_empty_scalar_initializer)
<< IList->getSourceRange();
hadError = true;
@@ -785,6 +743,47 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
++StructuredIndex;
return;
}
+
+ Expr *expr = IList->getInit(Index);
+ if (InitListExpr *SubIList = dyn_cast<InitListExpr>(expr)) {
+ SemaRef.Diag(SubIList->getLocStart(),
+ diag::warn_many_braces_around_scalar_init)
+ << SubIList->getSourceRange();
+
+ CheckScalarType(Entity, SubIList, DeclType, Index, StructuredList,
+ StructuredIndex);
+ return;
+ } else if (isa<DesignatedInitExpr>(expr)) {
+ SemaRef.Diag(expr->getSourceRange().getBegin(),
+ diag::err_designator_for_scalar_init)
+ << DeclType << expr->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ ExprResult Result =
+ SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(),
+ SemaRef.Owned(expr));
+
+ Expr *ResultExpr = 0;
+
+ if (Result.isInvalid())
+ hadError = true; // types weren't compatible.
+ else {
+ ResultExpr = Result.takeAs<Expr>();
+
+ if (ResultExpr != expr) {
+ // The type was promoted, update initializer list.
+ IList->setInit(Index, ResultExpr);
+ }
+ }
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
+ ++Index;
}
void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
@@ -839,66 +838,95 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
unsigned &Index,
InitListExpr *StructuredList,
unsigned &StructuredIndex) {
- if (Index < IList->getNumInits()) {
- const VectorType *VT = DeclType->getAs<VectorType>();
- unsigned maxElements = VT->getNumElements();
- unsigned numEltsInit = 0;
- QualType elementType = VT->getElementType();
-
- if (!SemaRef.getLangOptions().OpenCL) {
- InitializedEntity ElementEntity =
- InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
-
- for (unsigned i = 0; i < maxElements; ++i, ++numEltsInit) {
- // Don't attempt to go past the end of the init list
- if (Index >= IList->getNumInits())
- break;
-
- ElementEntity.setElementIndex(Index);
- CheckSubElementType(ElementEntity, IList, elementType, Index,
- StructuredList, StructuredIndex);
- }
- } else {
- InitializedEntity ElementEntity =
- InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
-
- // OpenCL initializers allows vectors to be constructed from vectors.
- for (unsigned i = 0; i < maxElements; ++i) {
- // Don't attempt to go past the end of the init list
- if (Index >= IList->getNumInits())
- break;
-
- ElementEntity.setElementIndex(Index);
-
- QualType IType = IList->getInit(Index)->getType();
- if (!IType->isVectorType()) {
- CheckSubElementType(ElementEntity, IList, elementType, Index,
- StructuredList, StructuredIndex);
- ++numEltsInit;
- } else {
- QualType VecType;
- const VectorType *IVT = IType->getAs<VectorType>();
- unsigned numIElts = IVT->getNumElements();
-
- if (IType->isExtVectorType())
- VecType = SemaRef.Context.getExtVectorType(elementType, numIElts);
- else
- VecType = SemaRef.Context.getVectorType(elementType, numIElts,
- IVT->getAltiVecSpecific());
- CheckSubElementType(ElementEntity, IList, VecType, Index,
- StructuredList, StructuredIndex);
- numEltsInit += numIElts;
+ if (Index >= IList->getNumInits())
+ return;
+
+ const VectorType *VT = DeclType->getAs<VectorType>();
+ unsigned maxElements = VT->getNumElements();
+ unsigned numEltsInit = 0;
+ QualType elementType = VT->getElementType();
+
+ if (!SemaRef.getLangOptions().OpenCL) {
+ // If the initializing element is a vector, try to copy-initialize
+ // instead of breaking it apart (which is doomed to failure anyway).
+ Expr *Init = IList->getInit(Index);
+ if (!isa<InitListExpr>(Init) && Init->getType()->isVectorType()) {
+ ExprResult Result =
+ SemaRef.PerformCopyInitialization(Entity, Init->getLocStart(),
+ SemaRef.Owned(Init));
+
+ Expr *ResultExpr = 0;
+ if (Result.isInvalid())
+ hadError = true; // types weren't compatible.
+ else {
+ ResultExpr = Result.takeAs<Expr>();
+
+ if (ResultExpr != Init) {
+ // The type was promoted, update initializer list.
+ IList->setInit(Index, ResultExpr);
}
}
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
+ ++Index;
+ return;
}
- // OpenCL requires all elements to be initialized.
- if (numEltsInit != maxElements)
- if (SemaRef.getLangOptions().OpenCL)
- SemaRef.Diag(IList->getSourceRange().getBegin(),
- diag::err_vector_incorrect_num_initializers)
- << (numEltsInit < maxElements) << maxElements << numEltsInit;
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ for (unsigned i = 0; i < maxElements; ++i, ++numEltsInit) {
+ // Don't attempt to go past the end of the init list
+ if (Index >= IList->getNumInits())
+ break;
+
+ ElementEntity.setElementIndex(Index);
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ }
+ return;
+ }
+
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ // OpenCL initializers allows vectors to be constructed from vectors.
+ for (unsigned i = 0; i < maxElements; ++i) {
+ // Don't attempt to go past the end of the init list
+ if (Index >= IList->getNumInits())
+ break;
+
+ ElementEntity.setElementIndex(Index);
+
+ QualType IType = IList->getInit(Index)->getType();
+ if (!IType->isVectorType()) {
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ ++numEltsInit;
+ } else {
+ QualType VecType;
+ const VectorType *IVT = IType->getAs<VectorType>();
+ unsigned numIElts = IVT->getNumElements();
+
+ if (IType->isExtVectorType())
+ VecType = SemaRef.Context.getExtVectorType(elementType, numIElts);
+ else
+ VecType = SemaRef.Context.getVectorType(elementType, numIElts,
+ IVT->getVectorKind());
+ CheckSubElementType(ElementEntity, IList, VecType, Index,
+ StructuredList, StructuredIndex);
+ numEltsInit += numIElts;
+ }
}
+
+ // OpenCL requires all elements to be initialized.
+ if (numEltsInit != maxElements)
+ if (SemaRef.getLangOptions().OpenCL)
+ SemaRef.Diag(IList->getSourceRange().getBegin(),
+ diag::err_vector_incorrect_num_initializers)
+ << (numEltsInit < maxElements) << maxElements << numEltsInit;
}
void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
@@ -945,7 +973,7 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
if (const ConstantArrayType *CAT =
SemaRef.Context.getAsConstantArrayType(DeclType)) {
maxElements = CAT->getSize();
- elementIndex.extOrTrunc(maxElements.getBitWidth());
+ elementIndex = elementIndex.extOrTrunc(maxElements.getBitWidth());
elementIndex.setIsUnsigned(maxElements.isUnsigned());
maxElementsKnown = true;
}
@@ -972,9 +1000,9 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
}
if (elementIndex.getBitWidth() > maxElements.getBitWidth())
- maxElements.extend(elementIndex.getBitWidth());
+ maxElements = maxElements.extend(elementIndex.getBitWidth());
else if (elementIndex.getBitWidth() < maxElements.getBitWidth())
- elementIndex.extend(maxElements.getBitWidth());
+ elementIndex = elementIndex.extend(maxElements.getBitWidth());
elementIndex.setIsUnsigned(maxElements.isUnsigned());
// If the array is of incomplete type, keep track of the number of
@@ -991,7 +1019,7 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
break;
InitializedEntity ElementEntity =
- InitializedEntity::InitializeElement(SemaRef.Context, StructuredIndex,
+ InitializedEntity::InitializeElement(SemaRef.Context, StructuredIndex,
Entity);
// Check this element.
CheckSubElementType(ElementEntity, IList, elementType, Index,
@@ -1118,7 +1146,7 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
}
// Emit warnings for missing struct field initializers.
- if (InitializedSomething && CheckForMissingFields && Field != FieldEnd &&
+ if (InitializedSomething && CheckForMissingFields && Field != FieldEnd &&
!Field->getType()->isIncompleteArrayType() && !DeclType->isUnionType()) {
// It is possible we have one or more unnamed bitfields remaining.
// Find first (if any) named field and emit warning.
@@ -1158,12 +1186,12 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
InitializedEntity MemberEntity =
InitializedEntity::InitializeMember(*Field, &Entity);
-
+
if (isa<InitListExpr>(IList->getInit(Index)))
- CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
+ CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
StructuredList, StructuredIndex);
else
- CheckImplicitInitList(MemberEntity, IList, Field->getType(), Index,
+ CheckImplicitInitList(MemberEntity, IList, Field->getType(), Index,
StructuredList, StructuredIndex);
}
@@ -1171,34 +1199,25 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
/// anonymous struct or union into a series of field designators that
/// refers to the field within the appropriate subobject.
///
-/// Field/FieldIndex will be updated to point to the (new)
-/// currently-designated field.
static void ExpandAnonymousFieldDesignator(Sema &SemaRef,
DesignatedInitExpr *DIE,
unsigned DesigIdx,
- FieldDecl *Field,
- RecordDecl::field_iterator &FieldIter,
- unsigned &FieldIndex) {
+ IndirectFieldDecl *IndirectField) {
typedef DesignatedInitExpr::Designator Designator;
- // Build the path from the current object to the member of the
- // anonymous struct/union (backwards).
- llvm::SmallVector<FieldDecl *, 4> Path;
- SemaRef.BuildAnonymousStructUnionMemberPath(Field, Path);
-
// Build the replacement designators.
llvm::SmallVector<Designator, 4> Replacements;
- for (llvm::SmallVector<FieldDecl *, 4>::reverse_iterator
- FI = Path.rbegin(), FIEnd = Path.rend();
- FI != FIEnd; ++FI) {
- if (FI + 1 == FIEnd)
+ for (IndirectFieldDecl::chain_iterator PI = IndirectField->chain_begin(),
+ PE = IndirectField->chain_end(); PI != PE; ++PI) {
+ if (PI + 1 == PE)
Replacements.push_back(Designator((IdentifierInfo *)0,
DIE->getDesignator(DesigIdx)->getDotLoc(),
DIE->getDesignator(DesigIdx)->getFieldLoc()));
else
Replacements.push_back(Designator((IdentifierInfo *)0, SourceLocation(),
SourceLocation()));
- Replacements.back().setField(*FI);
+ assert(isa<FieldDecl>(*PI));
+ Replacements.back().setField(cast<FieldDecl>(*PI));
}
// Expand the current designator into the set of replacement
@@ -1206,23 +1225,20 @@ static void ExpandAnonymousFieldDesignator(Sema &SemaRef,
// member of the anonymous struct/union is actually stored.
DIE->ExpandDesignator(SemaRef.Context, DesigIdx, &Replacements[0],
&Replacements[0] + Replacements.size());
+}
- // Update FieldIter/FieldIndex;
- RecordDecl *Record = cast<RecordDecl>(Path.back()->getDeclContext());
- FieldIter = Record->field_begin();
- FieldIndex = 0;
- for (RecordDecl::field_iterator FEnd = Record->field_end();
- FieldIter != FEnd; ++FieldIter) {
- if (FieldIter->isUnnamedBitfield())
- continue;
-
- if (*FieldIter == Path.back())
- return;
-
- ++FieldIndex;
+/// \brief Given an implicit anonymous field, search the IndirectField that
+/// corresponds to FieldName.
+static IndirectFieldDecl *FindIndirectFieldDesignator(FieldDecl *AnonField,
+ IdentifierInfo *FieldName) {
+ assert(AnonField->isAnonymousStructOrUnion());
+ Decl *NextDecl = AnonField->getNextDeclInContext();
+ while (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(NextDecl)) {
+ if (FieldName && FieldName == IF->getAnonField()->getIdentifier())
+ return IF;
+ NextDecl = NextDecl->getNextDeclInContext();
}
-
- assert(false && "Unable to find anonymous struct/union field");
+ return 0;
}
/// @brief Check the well-formedness of a C99 designated initializer.
@@ -1343,7 +1359,19 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (Field->isUnnamedBitfield())
continue;
- if (KnownField == *Field || Field->getIdentifier() == FieldName)
+ // If we find a field representing an anonymous field, look in the
+ // IndirectFieldDecl that follow for the designated initializer.
+ if (!KnownField && Field->isAnonymousStructOrUnion()) {
+ if (IndirectFieldDecl *IF =
+ FindIndirectFieldDesignator(*Field, FieldName)) {
+ ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, IF);
+ D = DIE->getDesignator(DesigIdx);
+ break;
+ }
+ }
+ if (KnownField && KnownField == *Field)
+ break;
+ if (FieldName && FieldName == Field->getIdentifier())
break;
++FieldIndex;
@@ -1360,19 +1388,19 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (Lookup.first == Lookup.second) {
// Name lookup didn't find anything. Determine whether this
// was a typo for another field name.
- LookupResult R(SemaRef, FieldName, D->getFieldLoc(),
+ LookupResult R(SemaRef, FieldName, D->getFieldLoc(),
Sema::LookupMemberName);
if (SemaRef.CorrectTypo(R, /*Scope=*/0, /*SS=*/0, RT->getDecl(), false,
- Sema::CTC_NoKeywords) &&
+ Sema::CTC_NoKeywords) &&
(ReplacementField = R.getAsSingle<FieldDecl>()) &&
ReplacementField->getDeclContext()->getRedeclContext()
->Equals(RT->getDecl())) {
- SemaRef.Diag(D->getFieldLoc(),
+ SemaRef.Diag(D->getFieldLoc(),
diag::err_field_designator_unknown_suggest)
<< FieldName << CurrentObjectType << R.getLookupName()
<< FixItHint::CreateReplacement(D->getFieldLoc(),
R.getLookupName().getAsString());
- SemaRef.Diag(ReplacementField->getLocation(),
+ SemaRef.Diag(ReplacementField->getLocation(),
diag::note_previous_decl)
<< ReplacementField->getDeclName();
} else {
@@ -1381,9 +1409,6 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
++Index;
return true;
}
- } else if (!KnownField) {
- // Determine whether we found a field at all.
- ReplacementField = dyn_cast<FieldDecl>(*Lookup.first);
}
if (!ReplacementField) {
@@ -1396,16 +1421,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
return true;
}
- if (!KnownField &&
- cast<RecordDecl>((ReplacementField)->getDeclContext())
- ->isAnonymousStructOrUnion()) {
- // Handle an field designator that refers to a member of an
- // anonymous struct or union.
- ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx,
- ReplacementField,
- Field, FieldIndex);
- D = DIE->getDesignator(DesigIdx);
- } else if (!KnownField) {
+ if (!KnownField) {
// The replacement field comes from typo correction; find it
// in the list of fields.
FieldIndex = 0;
@@ -1414,19 +1430,13 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (Field->isUnnamedBitfield())
continue;
- if (ReplacementField == *Field ||
+ if (ReplacementField == *Field ||
Field->getIdentifier() == ReplacementField->getIdentifier())
break;
++FieldIndex;
}
}
- } else if (!KnownField &&
- cast<RecordDecl>((*Field)->getDeclContext())
- ->isAnonymousStructOrUnion()) {
- ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, *Field,
- Field, FieldIndex);
- D = DIE->getDesignator(DesigIdx);
}
// All of the fields of a union are located at the same place in
@@ -1461,7 +1471,8 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
Invalid = true;
}
- if (!hadError && !isa<InitListExpr>(DIE->getInit())) {
+ if (!hadError && !isa<InitListExpr>(DIE->getInit()) &&
+ !isa<StringLiteral>(DIE->getInit())) {
// The initializer is not an initializer list.
SemaRef.Diag(DIE->getInit()->getSourceRange().getBegin(),
diag::err_flexible_array_init_needs_braces)
@@ -1511,11 +1522,11 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Recurse to check later designated subobjects.
QualType FieldType = (*Field)->getType();
unsigned newStructuredIndex = FieldIndex;
-
+
InitializedEntity MemberEntity =
InitializedEntity::InitializeMember(*Field, &Entity);
- if (CheckDesignatedInitializer(MemberEntity, IList, DIE, DesigIdx + 1,
- FieldType, 0, 0, Index,
+ if (CheckDesignatedInitializer(MemberEntity, IList, DIE, DesigIdx + 1,
+ FieldType, 0, 0, Index,
StructuredList, newStructuredIndex,
true, false))
return true;
@@ -1544,7 +1555,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Check the remaining fields within this class/struct/union subobject.
bool prevHadError = hadError;
-
+
CheckStructUnionTypes(Entity, IList, CurrentObjectType, Field, false, Index,
StructuredList, FieldIndex);
return hadError && !prevHadError;
@@ -1582,22 +1593,29 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
} else {
assert(D->isArrayRangeDesignator() && "Need array-range designator");
-
DesignatedStartIndex =
DIE->getArrayRangeStart(*D)->EvaluateAsInt(SemaRef.Context);
DesignatedEndIndex =
DIE->getArrayRangeEnd(*D)->EvaluateAsInt(SemaRef.Context);
IndexExpr = DIE->getArrayRangeEnd(*D);
- if (DesignatedStartIndex.getZExtValue() !=DesignatedEndIndex.getZExtValue())
+ // Codegen can't handle evaluating array range designators that have side
+ // effects, because we replicate the AST value for each initialized element.
+ // As such, set the sawArrayRangeDesignator() bit if we initialize multiple
+ // elements with something that has a side effect, so codegen can emit an
+ // "error unsupported" error instead of miscompiling the app.
+ if (DesignatedStartIndex.getZExtValue()!=DesignatedEndIndex.getZExtValue()&&
+ DIE->getInit()->HasSideEffects(SemaRef.Context))
FullyStructuredList->sawArrayRangeDesignator();
}
if (isa<ConstantArrayType>(AT)) {
llvm::APSInt MaxElements(cast<ConstantArrayType>(AT)->getSize(), false);
- DesignatedStartIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedStartIndex
+ = DesignatedStartIndex.extOrTrunc(MaxElements.getBitWidth());
DesignatedStartIndex.setIsUnsigned(MaxElements.isUnsigned());
- DesignatedEndIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedEndIndex
+ = DesignatedEndIndex.extOrTrunc(MaxElements.getBitWidth());
DesignatedEndIndex.setIsUnsigned(MaxElements.isUnsigned());
if (DesignatedEndIndex >= MaxElements) {
SemaRef.Diag(IndexExpr->getSourceRange().getBegin(),
@@ -1610,10 +1628,12 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
} else {
// Make sure the bit-widths and signedness match.
if (DesignatedStartIndex.getBitWidth() > DesignatedEndIndex.getBitWidth())
- DesignatedEndIndex.extend(DesignatedStartIndex.getBitWidth());
+ DesignatedEndIndex
+ = DesignatedEndIndex.extend(DesignatedStartIndex.getBitWidth());
else if (DesignatedStartIndex.getBitWidth() <
DesignatedEndIndex.getBitWidth())
- DesignatedStartIndex.extend(DesignatedEndIndex.getBitWidth());
+ DesignatedStartIndex
+ = DesignatedStartIndex.extend(DesignatedEndIndex.getBitWidth());
DesignatedStartIndex.setIsUnsigned(true);
DesignatedEndIndex.setIsUnsigned(true);
}
@@ -1630,7 +1650,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Move to the next designator
unsigned ElementIndex = DesignatedStartIndex.getZExtValue();
unsigned OldIndex = Index;
-
+
InitializedEntity ElementEntity =
InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
@@ -1638,10 +1658,10 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Recurse to check later designated subobjects.
QualType ElementType = AT->getElementType();
Index = OldIndex;
-
+
ElementEntity.setElementIndex(ElementIndex);
- if (CheckDesignatedInitializer(ElementEntity, IList, DIE, DesigIdx + 1,
- ElementType, 0, 0, Index,
+ if (CheckDesignatedInitializer(ElementEntity, IList, DIE, DesigIdx + 1,
+ ElementType, 0, 0, Index,
StructuredList, ElementIndex,
(DesignatedStartIndex == DesignatedEndIndex),
false))
@@ -1666,7 +1686,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Check the remaining elements within this array subobject.
bool prevHadError = hadError;
- CheckArrayType(Entity, IList, CurrentObjectType, DesignatedStartIndex,
+ CheckArrayType(Entity, IList, CurrentObjectType, DesignatedStartIndex,
/*SubobjectIsDesignatorContext=*/false, Index,
StructuredList, ElementIndex);
return hadError && !prevHadError;
@@ -1812,9 +1832,9 @@ CheckArrayDesignatorExpr(Sema &S, Expr *Index, llvm::APSInt &Value) {
}
ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
- SourceLocation Loc,
- bool GNUSyntax,
- ExprResult Init) {
+ SourceLocation Loc,
+ bool GNUSyntax,
+ ExprResult Init) {
typedef DesignatedInitExpr::Designator ASTDesignator;
bool Invalid = false;
@@ -1865,9 +1885,9 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
if (StartDependent || EndDependent) {
// Nothing to compute.
} else if (StartValue.getBitWidth() > EndValue.getBitWidth())
- EndValue.extend(StartValue.getBitWidth());
+ EndValue = EndValue.extend(StartValue.getBitWidth());
else if (StartValue.getBitWidth() < EndValue.getBitWidth())
- StartValue.extend(EndValue.getBitWidth());
+ StartValue = StartValue.extend(EndValue.getBitWidth());
if (!StartDependent && !EndDependent && EndValue < StartValue) {
Diag(D.getEllipsisLoc(), diag::err_array_designator_empty_range)
@@ -1899,6 +1919,11 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
Designators.data(), Designators.size(),
InitExpressions.data(), InitExpressions.size(),
Loc, GNUSyntax, Init.takeAs<Expr>());
+
+ if (getLangOptions().CPlusPlus)
+ Diag(DIE->getLocStart(), diag::ext_designated_init)
+ << DIE->getSourceRange();
+
return Owned(DIE);
}
@@ -1915,9 +1940,9 @@ bool Sema::CheckInitList(const InitializedEntity &Entity,
// Initialization entity
//===----------------------------------------------------------------------===//
-InitializedEntity::InitializedEntity(ASTContext &Context, unsigned Index,
+InitializedEntity::InitializedEntity(ASTContext &Context, unsigned Index,
const InitializedEntity &Parent)
- : Parent(&Parent), Index(Index)
+ : Parent(&Parent), Index(Index)
{
if (const ArrayType *AT = Context.getAsArrayType(Parent.getType())) {
Kind = EK_ArrayElement;
@@ -1928,7 +1953,7 @@ InitializedEntity::InitializedEntity(ASTContext &Context, unsigned Index,
}
}
-InitializedEntity InitializedEntity::InitializeBase(ASTContext &Context,
+InitializedEntity InitializedEntity::InitializeBase(ASTContext &Context,
CXXBaseSpecifier *Base,
bool IsInheritedVirtualBase)
{
@@ -1937,7 +1962,7 @@ InitializedEntity InitializedEntity::InitializeBase(ASTContext &Context,
Result.Base = reinterpret_cast<uintptr_t>(Base);
if (IsInheritedVirtualBase)
Result.Base |= 0x01;
-
+
Result.Type = Base->getType();
return Result;
}
@@ -1963,7 +1988,7 @@ DeclarationName InitializedEntity::getName() const {
case EK_BlockElement:
return DeclarationName();
}
-
+
// Silence GCC warning
return DeclarationName();
}
@@ -1985,7 +2010,7 @@ DeclaratorDecl *InitializedEntity::getDecl() const {
case EK_BlockElement:
return 0;
}
-
+
// Silence GCC warning
return 0;
}
@@ -1995,7 +2020,7 @@ bool InitializedEntity::allowsNRVO() const {
case EK_Result:
case EK_Exception:
return LocAndNRVO.NRVO;
-
+
case EK_Variable:
case EK_Parameter:
case EK_Member:
@@ -2035,7 +2060,7 @@ void InitializationSequence::Step::Destroy() {
case SK_StringInit:
case SK_ObjCObjectConversion:
break;
-
+
case SK_ConversionSequence:
delete ICS;
}
@@ -2048,7 +2073,7 @@ bool InitializationSequence::isDirectReferenceBinding() const {
bool InitializationSequence::isAmbiguous() const {
if (getKind() != FailedSequence)
return false;
-
+
switch (getFailureKind()) {
case FK_TooManyInitsForReference:
case FK_ArrayNeedsInitList:
@@ -2066,13 +2091,13 @@ bool InitializationSequence::isAmbiguous() const {
case FK_DefaultInitOfConst:
case FK_Incomplete:
return false;
-
+
case FK_ReferenceInitOverloadFailed:
case FK_UserConversionOverloadFailed:
case FK_ConstructorOverloadFailed:
return FailedOverloadResult == OR_Ambiguous;
}
-
+
return false;
}
@@ -2091,7 +2116,7 @@ void InitializationSequence::AddAddressOverloadResolutionStep(
Steps.push_back(S);
}
-void InitializationSequence::AddDerivedToBaseCastStep(QualType BaseType,
+void InitializationSequence::AddDerivedToBaseCastStep(QualType BaseType,
ExprValueKind VK) {
Step S;
switch (VK) {
@@ -2104,7 +2129,7 @@ void InitializationSequence::AddDerivedToBaseCastStep(QualType BaseType,
Steps.push_back(S);
}
-void InitializationSequence::AddReferenceBindingStep(QualType T,
+void InitializationSequence::AddReferenceBindingStep(QualType T,
bool BindingTemporary) {
Step S;
S.Kind = BindingTemporary? SK_BindReferenceToTemporary : SK_BindReference;
@@ -2166,7 +2191,7 @@ void InitializationSequence::AddListInitializationStep(QualType T) {
Steps.push_back(S);
}
-void
+void
InitializationSequence::AddConstructorInitializationStep(
CXXConstructorDecl *Constructor,
AccessSpecifier Access,
@@ -2207,7 +2232,7 @@ void InitializationSequence::AddObjCObjectConversionStep(QualType T) {
Steps.push_back(S);
}
-void InitializationSequence::SetOverloadFailure(FailureKind Failure,
+void InitializationSequence::SetOverloadFailure(FailureKind Failure,
OverloadingResult Result) {
SequenceKind = FailedSequence;
this->Failure = Failure;
@@ -2218,8 +2243,8 @@ void InitializationSequence::SetOverloadFailure(FailureKind Failure,
// Attempt initialization
//===----------------------------------------------------------------------===//
-/// \brief Attempt list initialization (C++0x [dcl.init.list])
-static void TryListInitialization(Sema &S,
+/// \brief Attempt list initialization (C++0x [dcl.init.list])
+static void TryListInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
InitListExpr *InitList,
@@ -2235,7 +2260,7 @@ static void TryListInitialization(Sema &S,
QualType DestType = Entity.getType();
// C++ [dcl.init]p13:
- // If T is a scalar type, then a declaration of the form
+ // If T is a scalar type, then a declaration of the form
//
// T x = { a };
//
@@ -2282,7 +2307,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
bool DerivedToBase;
bool ObjCConversion;
- assert(!S.CompareReferenceRelationship(Initializer->getLocStart(),
+ assert(!S.CompareReferenceRelationship(Initializer->getLocStart(),
T1, T2, DerivedToBase,
ObjCConversion) &&
"Must have incompatible references when binding via conversion");
@@ -2297,7 +2322,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
// Determine whether we are allowed to call explicit constructors or
// explicit conversion operators.
bool AllowExplicit = Kind.getKind() == InitializationKind::IK_Direct;
-
+
const RecordType *T1RecordType = 0;
if (AllowRValues && (T1RecordType = T1->getAs<RecordType>()) &&
!S.RequireCompleteType(Kind.getLocation(), T1, 0)) {
@@ -2319,22 +2344,24 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
ConstructorTmpl->getTemplatedDecl());
else
Constructor = cast<CXXConstructorDecl>(D);
-
+
if (!Constructor->isInvalidDecl() &&
Constructor->isConvertingConstructor(AllowExplicit)) {
if (ConstructorTmpl)
S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
/*ExplicitArgs*/ 0,
- &Initializer, 1, CandidateSet);
+ &Initializer, 1, CandidateSet,
+ /*SuppressUserConversions=*/true);
else
S.AddOverloadCandidate(Constructor, FoundDecl,
- &Initializer, 1, CandidateSet);
+ &Initializer, 1, CandidateSet,
+ /*SuppressUserConversions=*/true);
}
- }
+ }
}
if (T1RecordType && T1RecordType->getDecl()->isInvalidDecl())
return OR_No_Viable_Function;
-
+
const RecordType *T2RecordType = 0;
if ((T2RecordType = T2->getAs<RecordType>()) &&
!S.RequireCompleteType(Kind.getLocation(), T2, 0)) {
@@ -2342,11 +2369,6 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
// functions.
CXXRecordDecl *T2RecordDecl = cast<CXXRecordDecl>(T2RecordType->getDecl());
- // Determine the type we are converting to. If we are allowed to
- // convert to an rvalue, take the type that the destination type
- // refers to.
- QualType ToType = AllowRValues? cv1T1 : DestType;
-
const UnresolvedSetImpl *Conversions
= T2RecordDecl->getVisibleConversionFunctions();
for (UnresolvedSetImpl::const_iterator I = Conversions->begin(),
@@ -2355,41 +2377,41 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
-
+
FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D);
CXXConversionDecl *Conv;
if (ConvTemplate)
Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
else
Conv = cast<CXXConversionDecl>(D);
-
+
// If the conversion function doesn't return a reference type,
// it can't be considered for this conversion unless we're allowed to
// consider rvalues.
- // FIXME: Do we need to make sure that we only consider conversion
- // candidates with reference-compatible results? That might be needed to
+ // FIXME: Do we need to make sure that we only consider conversion
+ // candidates with reference-compatible results? That might be needed to
// break recursion.
if ((AllowExplicit || !Conv->isExplicit()) &&
(AllowRValues || Conv->getConversionType()->isLValueReferenceType())){
if (ConvTemplate)
S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
ActingDC, Initializer,
- ToType, CandidateSet);
+ DestType, CandidateSet);
else
S.AddConversionCandidate(Conv, I.getPair(), ActingDC,
- Initializer, ToType, CandidateSet);
+ Initializer, DestType, CandidateSet);
}
}
}
if (T2RecordType && T2RecordType->getDecl()->isInvalidDecl())
return OR_No_Viable_Function;
-
+
SourceLocation DeclLoc = Initializer->getLocStart();
- // Perform overload resolution. If it fails, return the failed result.
+ // Perform overload resolution. If it fails, return the failed result.
OverloadCandidateSet::iterator Best;
- if (OverloadingResult Result
- = CandidateSet.BestViableFunction(S, DeclLoc, Best))
+ if (OverloadingResult Result
+ = CandidateSet.BestViableFunction(S, DeclLoc, Best, true))
return Result;
FunctionDecl *Function = Best->Function;
@@ -2404,7 +2426,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
Sequence.AddUserConversionStep(Function, Best->FoundDecl,
T2.getNonLValueExprType(S.Context));
- // Determine whether we need to perform derived-to-base or
+ // Determine whether we need to perform derived-to-base or
// cv-qualification adjustments.
ExprValueKind VK = VK_RValue;
if (T2->isLValueReferenceType())
@@ -2415,7 +2437,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
bool NewDerivedToBase = false;
bool NewObjCConversion = false;
Sema::ReferenceCompareResult NewRefRelationship
- = S.CompareReferenceRelationship(DeclLoc, T1,
+ = S.CompareReferenceRelationship(DeclLoc, T1,
T2.getNonLValueExprType(S.Context),
NewDerivedToBase, NewObjCConversion);
if (NewRefRelationship == Sema::Ref_Incompatible) {
@@ -2431,7 +2453,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
} else if (NewDerivedToBase)
Sequence.AddDerivedToBaseCastStep(
S.Context.getQualifiedType(T1,
- T2.getNonReferenceType().getQualifiers()),
+ T2.getNonReferenceType().getQualifiers()),
VK);
else if (NewObjCConversion)
Sequence.AddObjCObjectConversionStep(
@@ -2440,13 +2462,13 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
if (cv1T1.getQualifiers() != T2.getNonReferenceType().getQualifiers())
Sequence.AddQualificationConversionStep(cv1T1, VK);
-
+
Sequence.AddReferenceBindingStep(cv1T1, !T2->isReferenceType());
return OR_Success;
}
-
-/// \brief Attempt reference initialization (C++0x [dcl.init.ref])
-static void TryReferenceInitialization(Sema &S,
+
+/// \brief Attempt reference initialization (C++0x [dcl.init.ref])
+static void TryReferenceInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
Expr *Initializer,
@@ -2467,18 +2489,17 @@ static void TryReferenceInitialization(Sema &S,
// type of the resulting function.
if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) {
DeclAccessPair Found;
- FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(Initializer,
- T1,
- false,
- Found);
- if (!Fn) {
+ if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(Initializer,
+ T1,
+ false,
+ Found)) {
+ Sequence.AddAddressOverloadResolutionStep(Fn, Found);
+ cv2T2 = Fn->getType();
+ T2 = cv2T2.getUnqualifiedType();
+ } else if (!T1->isRecordType()) {
Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
return;
}
-
- Sequence.AddAddressOverloadResolutionStep(Fn, Found);
- cv2T2 = Fn->getType();
- T2 = cv2T2.getUnqualifiedType();
}
// Compute some basic properties of the types and the initializer.
@@ -2492,10 +2513,10 @@ static void TryReferenceInitialization(Sema &S,
ObjCConversion);
// C++0x [dcl.init.ref]p5:
- // A reference to type "cv1 T1" is initialized by an expression of type
+ // A reference to type "cv1 T1" is initialized by an expression of type
// "cv2 T2" as follows:
//
- // - If the reference is an lvalue reference and the initializer
+ // - If the reference is an lvalue reference and the initializer
// expression
// Note the analogous bullet points for rvlaue refs to functions. Because
// there are no function rvalues in C++, rvalue refs to functions are treated
@@ -2503,19 +2524,21 @@ static void TryReferenceInitialization(Sema &S,
OverloadingResult ConvOvlResult = OR_Success;
bool T1Function = T1->isFunctionType();
if (isLValueRef || T1Function) {
- if (InitCategory.isLValue() &&
- RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
- // - is an lvalue (but is not a bit-field), and "cv1 T1" is
+ if (InitCategory.isLValue() &&
+ (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification ||
+ (Kind.isCStyleOrFunctionalCast() &&
+ RefRelationship == Sema::Ref_Related))) {
+ // - is an lvalue (but is not a bit-field), and "cv1 T1" is
// reference-compatible with "cv2 T2," or
//
- // Per C++ [over.best.ics]p2, we don't diagnose whether the lvalue is a
+ // Per C++ [over.best.ics]p2, we don't diagnose whether the lvalue is a
// bit-field when we're determining whether the reference initialization
// can occur. However, we do pay attention to whether it is a bit-field
// to decide whether we're actually binding to a temporary created from
// the bit-field.
if (DerivedToBase)
Sequence.AddDerivedToBaseCastStep(
- S.Context.getQualifiedType(T1, T2Quals),
+ S.Context.getQualifiedType(T1, T2Quals),
VK_LValue);
else if (ObjCConversion)
Sequence.AddObjCObjectConversionStep(
@@ -2528,18 +2551,18 @@ static void TryReferenceInitialization(Sema &S,
Sequence.AddReferenceBindingStep(cv1T1, BindingTemporary);
return;
}
-
- // - has a class type (i.e., T2 is a class type), where T1 is not
- // reference-related to T2, and can be implicitly converted to an
- // lvalue of type "cv3 T3," where "cv1 T1" is reference-compatible
- // with "cv3 T3" (this conversion is selected by enumerating the
+
+ // - has a class type (i.e., T2 is a class type), where T1 is not
+ // reference-related to T2, and can be implicitly converted to an
+ // lvalue of type "cv3 T3," where "cv1 T1" is reference-compatible
+ // with "cv3 T3" (this conversion is selected by enumerating the
// applicable conversion functions (13.3.1.6) and choosing the best
// one through overload resolution (13.3)),
// If we have an rvalue ref to function type here, the rhs must be
// an rvalue.
if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() &&
(isLValueRef || InitCategory.isRValue())) {
- ConvOvlResult = TryRefInitWithConversionFunction(S, Entity, Kind,
+ ConvOvlResult = TryRefInitWithConversionFunction(S, Entity, Kind,
Initializer,
/*AllowRValues=*/isRValueRef,
Sequence);
@@ -2553,37 +2576,39 @@ static void TryReferenceInitialization(Sema &S,
}
}
- // - Otherwise, the reference shall be an lvalue reference to a
+ // - Otherwise, the reference shall be an lvalue reference to a
// non-volatile const type (i.e., cv1 shall be const), or the reference
- // shall be an rvalue reference and the initializer expression shall
- // be an rvalue or have a function type.
- // We handled the function type stuff above.
- if (!((isLValueRef && T1Quals.hasConst() && !T1Quals.hasVolatile()) ||
- (isRValueRef && InitCategory.isRValue()))) {
- if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty())
+ // shall be an rvalue reference.
+ if (isLValueRef && !(T1Quals.hasConst() && !T1Quals.hasVolatile())) {
+ if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy)
+ Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ else if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty())
Sequence.SetOverloadFailure(
InitializationSequence::FK_ReferenceInitOverloadFailed,
ConvOvlResult);
- else if (isLValueRef)
+ else
Sequence.SetFailed(InitCategory.isLValue()
? (RefRelationship == Sema::Ref_Related
? InitializationSequence::FK_ReferenceInitDropsQualifiers
: InitializationSequence::FK_NonConstLValueReferenceBindingToUnrelated)
: InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
- else
- Sequence.SetFailed(
- InitializationSequence::FK_RValueReferenceBindingToLValue);
return;
}
- // - [If T1 is not a function type], if T2 is a class type and
- if (!T1Function && T2->isRecordType()) {
- bool isXValue = InitCategory.isXValue();
- // - the initializer expression is an rvalue and "cv1 T1" is
- // reference-compatible with "cv2 T2", or
- if (InitCategory.isRValue() &&
- RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
+ // - If the initializer expression
+ // - is an xvalue, class prvalue, array prvalue, or function lvalue and
+ // "cv1 T1" is reference-compatible with "cv2 T2"
+ // Note: functions are handled below.
+ if (!T1Function &&
+ (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification ||
+ (Kind.isCStyleOrFunctionalCast() &&
+ RefRelationship == Sema::Ref_Related)) &&
+ (InitCategory.isXValue() ||
+ (InitCategory.isPRValue() && T2->isRecordType()) ||
+ (InitCategory.isPRValue() && T2->isArrayType()))) {
+ ExprValueKind ValueKind = InitCategory.isXValue()? VK_XValue : VK_RValue;
+ if (InitCategory.isPRValue() && T2->isRecordType()) {
// The corresponding bullet in C++03 [dcl.init.ref]p5 gives the
// compiler the freedom to perform a copy here or bind to the
// object, while C++0x requires that we bind directly to the
@@ -2593,29 +2618,29 @@ static void TryReferenceInitialization(Sema &S,
//
// The constructor that would be used to make the copy shall
// be callable whether or not the copy is actually done.
- if (!S.getLangOptions().CPlusPlus0x)
+ if (!S.getLangOptions().CPlusPlus0x && !S.getLangOptions().Microsoft)
Sequence.AddExtraneousCopyToTemporary(cv2T2);
-
- if (DerivedToBase)
- Sequence.AddDerivedToBaseCastStep(
- S.Context.getQualifiedType(T1, T2Quals),
- isXValue ? VK_XValue : VK_RValue);
- else if (ObjCConversion)
- Sequence.AddObjCObjectConversionStep(
- S.Context.getQualifiedType(T1, T2Quals));
-
- if (T1Quals != T2Quals)
- Sequence.AddQualificationConversionStep(cv1T1,
- isXValue ? VK_XValue : VK_RValue);
- Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/!isXValue);
- return;
}
- // - T1 is not reference-related to T2 and the initializer expression
- // can be implicitly converted to an rvalue of type "cv3 T3" (this
- // conversion is selected by enumerating the applicable conversion
- // functions (13.3.1.6) and choosing the best one through overload
- // resolution (13.3)),
+ if (DerivedToBase)
+ Sequence.AddDerivedToBaseCastStep(S.Context.getQualifiedType(T1, T2Quals),
+ ValueKind);
+ else if (ObjCConversion)
+ Sequence.AddObjCObjectConversionStep(
+ S.Context.getQualifiedType(T1, T2Quals));
+
+ if (T1Quals != T2Quals)
+ Sequence.AddQualificationConversionStep(cv1T1, ValueKind);
+ Sequence.AddReferenceBindingStep(cv1T1,
+ /*bindingTemporary=*/(InitCategory.isPRValue() && !T2->isArrayType()));
+ return;
+ }
+
+ // - has a class type (i.e., T2 is a class type), where T1 is not
+ // reference-related to T2, and can be implicitly converted to an
+ // xvalue, class prvalue, or function lvalue of type "cv3 T3",
+ // where "cv1 T1" is reference-compatible with "cv3 T3",
+ if (T2->isRecordType()) {
if (RefRelationship == Sema::Ref_Incompatible) {
ConvOvlResult = TryRefInitWithConversionFunction(S, Entity,
Kind, Initializer,
@@ -2625,23 +2650,17 @@ static void TryReferenceInitialization(Sema &S,
Sequence.SetOverloadFailure(
InitializationSequence::FK_ReferenceInitOverloadFailed,
ConvOvlResult);
-
+
return;
}
-
+
Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers);
return;
}
-
- // - If the initializer expression is an rvalue, with T2 an array type,
- // and "cv1 T1" is reference-compatible with "cv2 T2," the reference
- // is bound to the object represented by the rvalue (see 3.10).
- // FIXME: How can an array type be reference-compatible with anything?
- // Don't we mean the element types of T1 and T2?
-
- // - Otherwise, a temporary of type “cv1 T1†is created and initialized
+
+ // - Otherwise, a temporary of type "cv1 T1" is created and initialized
// from the initializer expression using the rules for a non-reference
- // copy initialization (8.5). The reference is then bound to the
+ // copy initialization (8.5). The reference is then bound to the
// temporary. [...]
// Determine whether we are allowed to call explicit constructors or
@@ -2653,7 +2672,8 @@ static void TryReferenceInitialization(Sema &S,
if (S.TryImplicitConversion(Sequence, TempEntity, Initializer,
/*SuppressUserConversions*/ false,
AllowExplicit,
- /*FIXME:InOverloadResolution=*/false)) {
+ /*FIXME:InOverloadResolution=*/false,
+ /*CStyle=*/Kind.isCStyleOrFunctionalCast())) {
// FIXME: Use the conversion function set stored in ICS to turn
// this into an overloading ambiguity diagnostic. However, we need
// to keep that set as an OverloadCandidateSet rather than as some
@@ -2662,6 +2682,8 @@ static void TryReferenceInitialization(Sema &S,
Sequence.SetOverloadFailure(
InitializationSequence::FK_ReferenceInitOverloadFailed,
ConvOvlResult);
+ else if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy)
+ Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
else
Sequence.SetFailed(InitializationSequence::FK_ReferenceInitFailed);
return;
@@ -2672,19 +2694,28 @@ static void TryReferenceInitialization(Sema &S,
// than, cv2; otherwise, the program is ill-formed.
unsigned T1CVRQuals = T1Quals.getCVRQualifiers();
unsigned T2CVRQuals = T2Quals.getCVRQualifiers();
- if (RefRelationship == Sema::Ref_Related &&
+ if (RefRelationship == Sema::Ref_Related &&
(T1CVRQuals | T2CVRQuals) != T1CVRQuals) {
Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers);
return;
}
+ // [...] If T1 is reference-related to T2 and the reference is an rvalue
+ // reference, the initializer expression shall not be an lvalue.
+ if (RefRelationship >= Sema::Ref_Related && !isLValueRef &&
+ InitCategory.isLValue()) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_RValueReferenceBindingToLValue);
+ return;
+ }
+
Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
return;
}
/// \brief Attempt character array initialization from a string literal
-/// (C++ [dcl.init.string], C99 6.7.8).
-static void TryStringLiteralInitialization(Sema &S,
+/// (C++ [dcl.init.string], C99 6.7.8).
+static void TryStringLiteralInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
Expr *Initializer,
@@ -2696,19 +2727,19 @@ static void TryStringLiteralInitialization(Sema &S,
/// \brief Attempt initialization by constructor (C++ [dcl.init]), which
/// enumerates the constructors of the initialized entity and performs overload
/// resolution to select the best.
-static void TryConstructorInitialization(Sema &S,
+static void TryConstructorInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
Expr **Args, unsigned NumArgs,
QualType DestType,
InitializationSequence &Sequence) {
Sequence.setSequenceKind(InitializationSequence::ConstructorInitialization);
-
+
// Build the candidate set directly in the initialization sequence
// structure, so that it will persist if we fail.
OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
CandidateSet.clear();
-
+
// Determine whether we are allowed to call explicit constructors or
// explicit conversion operators.
bool AllowExplicit = (Kind.getKind() == InitializationKind::IK_Direct ||
@@ -2720,21 +2751,21 @@ static void TryConstructorInitialization(Sema &S,
Sequence.SetFailed(InitializationSequence::FK_Incomplete);
return;
}
-
+
// The type we're converting to is a class type. Enumerate its constructors
// to see if one is suitable.
const RecordType *DestRecordType = DestType->getAs<RecordType>();
- assert(DestRecordType && "Constructor initialization requires record type");
+ assert(DestRecordType && "Constructor initialization requires record type");
CXXRecordDecl *DestRecordDecl
= cast<CXXRecordDecl>(DestRecordType->getDecl());
-
+
DeclContext::lookup_iterator Con, ConEnd;
for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl);
Con != ConEnd; ++Con) {
NamedDecl *D = *Con;
DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
bool SuppressUserConversions = false;
-
+
// Find the constructor (which may be a template).
CXXConstructorDecl *Constructor = 0;
FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D);
@@ -2744,14 +2775,14 @@ static void TryConstructorInitialization(Sema &S,
else {
Constructor = cast<CXXConstructorDecl>(D);
- // If we're performing copy initialization using a copy constructor, we
+ // If we're performing copy initialization using a copy constructor, we
// suppress user-defined conversions on the arguments.
// FIXME: Move constructors?
if (Kind.getKind() == InitializationKind::IK_Copy &&
Constructor->isCopyConstructor())
SuppressUserConversions = true;
}
-
+
if (!Constructor->isInvalidDecl() &&
(AllowExplicit || !Constructor->isExplicit())) {
if (ConstructorTmpl)
@@ -2764,16 +2795,16 @@ static void TryConstructorInitialization(Sema &S,
Args, NumArgs, CandidateSet,
SuppressUserConversions);
}
- }
-
+ }
+
SourceLocation DeclLoc = Kind.getLocation();
-
- // Perform overload resolution. If it fails, return the failed result.
+
+ // Perform overload resolution. If it fails, return the failed result.
OverloadCandidateSet::iterator Best;
- if (OverloadingResult Result
+ if (OverloadingResult Result
= CandidateSet.BestViableFunction(S, DeclLoc, Best)) {
Sequence.SetOverloadFailure(
- InitializationSequence::FK_ConstructorOverloadFailed,
+ InitializationSequence::FK_ConstructorOverloadFailed,
Result);
return;
}
@@ -2792,13 +2823,13 @@ static void TryConstructorInitialization(Sema &S,
// Add the constructor initialization step. Any cv-qualification conversion is
// subsumed by the initialization.
Sequence.AddConstructorInitializationStep(
- cast<CXXConstructorDecl>(Best->Function),
+ cast<CXXConstructorDecl>(Best->Function),
Best->FoundDecl.getAccess(),
DestType);
}
/// \brief Attempt value initialization (C++ [dcl.init]p7).
-static void TryValueInitialization(Sema &S,
+static void TryValueInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
InitializationSequence &Sequence) {
@@ -2806,11 +2837,11 @@ static void TryValueInitialization(Sema &S,
//
// To value-initialize an object of type T means:
QualType T = Entity.getType();
-
+
// -- if T is an array type, then each element is value-initialized;
while (const ArrayType *AT = S.Context.getAsArrayType(T))
T = AT->getElementType();
-
+
if (const RecordType *RT = T->getAs<RecordType>()) {
if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
// -- if T is a class type (clause 9) with a user-declared
@@ -2822,15 +2853,15 @@ static void TryValueInitialization(Sema &S,
// but Entity doesn't have a way to capture that (yet).
if (ClassDecl->hasUserDeclaredConstructor())
return TryConstructorInitialization(S, Entity, Kind, 0, 0, T, Sequence);
-
+
// -- if T is a (possibly cv-qualified) non-union class type
// without a user-provided constructor, then the object is
- // zero-initialized and, if T’s implicitly-declared default
+ // zero-initialized and, if T's implicitly-declared default
// constructor is non-trivial, that constructor is called.
if ((ClassDecl->getTagKind() == TTK_Class ||
ClassDecl->getTagKind() == TTK_Struct)) {
Sequence.AddZeroInitializationStep(Entity.getType());
- return TryConstructorInitialization(S, Entity, Kind, 0, 0, T, Sequence);
+ return TryConstructorInitialization(S, Entity, Kind, 0, 0, T, Sequence);
}
}
}
@@ -2845,14 +2876,14 @@ static void TryDefaultInitialization(Sema &S,
const InitializationKind &Kind,
InitializationSequence &Sequence) {
assert(Kind.getKind() == InitializationKind::IK_Default);
-
+
// C++ [dcl.init]p6:
// To default-initialize an object of type T means:
// - if T is an array type, each element is default-initialized;
QualType DestType = Entity.getType();
while (const ArrayType *Array = S.Context.getAsArrayType(DestType))
DestType = Array->getElementType();
-
+
// - if T is a (possibly cv-qualified) class type (Clause 9), the default
// constructor for T is called (and the initialization is ill-formed if
// T has no accessible default constructor);
@@ -2860,12 +2891,12 @@ static void TryDefaultInitialization(Sema &S,
TryConstructorInitialization(S, Entity, Kind, 0, 0, DestType, Sequence);
return;
}
-
+
// - otherwise, no initialization is performed.
Sequence.setSequenceKind(InitializationSequence::NoInitialization);
-
+
// If a program calls for the default initialization of an object of
- // a const-qualified type T, T shall be a class type with a user-provided
+ // a const-qualified type T, T shall be a class type with a user-provided
// default constructor.
if (DestType.isConstQualified() && S.getLangOptions().CPlusPlus)
Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
@@ -2874,42 +2905,42 @@ static void TryDefaultInitialization(Sema &S,
/// \brief Attempt a user-defined conversion between two types (C++ [dcl.init]),
/// which enumerates all conversion functions and performs overload resolution
/// to select the best.
-static void TryUserDefinedConversion(Sema &S,
+static void TryUserDefinedConversion(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
Expr *Initializer,
InitializationSequence &Sequence) {
Sequence.setSequenceKind(InitializationSequence::UserDefinedConversion);
-
+
QualType DestType = Entity.getType();
assert(!DestType->isReferenceType() && "References are handled elsewhere");
QualType SourceType = Initializer->getType();
assert((DestType->isRecordType() || SourceType->isRecordType()) &&
"Must have a class type to perform a user-defined conversion");
-
+
// Build the candidate set directly in the initialization sequence
// structure, so that it will persist if we fail.
OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
CandidateSet.clear();
-
+
// Determine whether we are allowed to call explicit constructors or
// explicit conversion operators.
bool AllowExplicit = Kind.getKind() == InitializationKind::IK_Direct;
-
+
if (const RecordType *DestRecordType = DestType->getAs<RecordType>()) {
// The type we're converting to is a class type. Enumerate its constructors
// to see if there is a suitable conversion.
CXXRecordDecl *DestRecordDecl
= cast<CXXRecordDecl>(DestRecordType->getDecl());
-
+
// Try to complete the type we're converting to.
- if (!S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
+ if (!S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
DeclContext::lookup_iterator Con, ConEnd;
for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl);
Con != ConEnd; ++Con) {
NamedDecl *D = *Con;
DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
-
+
// Find the constructor (which may be a template).
CXXConstructorDecl *Constructor = 0;
FunctionTemplateDecl *ConstructorTmpl
@@ -2919,7 +2950,7 @@ static void TryUserDefinedConversion(Sema &S,
ConstructorTmpl->getTemplatedDecl());
else
Constructor = cast<CXXConstructorDecl>(D);
-
+
if (!Constructor->isInvalidDecl() &&
Constructor->isConvertingConstructor(AllowExplicit)) {
if (ConstructorTmpl)
@@ -2932,7 +2963,7 @@ static void TryUserDefinedConversion(Sema &S,
&Initializer, 1, CandidateSet,
/*SuppressUserConversions=*/true);
}
- }
+ }
}
}
@@ -2947,24 +2978,24 @@ static void TryUserDefinedConversion(Sema &S,
if (!S.RequireCompleteType(DeclLoc, SourceType, 0)) {
CXXRecordDecl *SourceRecordDecl
= cast<CXXRecordDecl>(SourceRecordType->getDecl());
-
+
const UnresolvedSetImpl *Conversions
= SourceRecordDecl->getVisibleConversionFunctions();
for (UnresolvedSetImpl::const_iterator I = Conversions->begin(),
- E = Conversions->end();
+ E = Conversions->end();
I != E; ++I) {
NamedDecl *D = *I;
CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
-
+
FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D);
CXXConversionDecl *Conv;
if (ConvTemplate)
Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
else
Conv = cast<CXXConversionDecl>(D);
-
+
if (AllowExplicit || !Conv->isExplicit()) {
if (ConvTemplate)
S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
@@ -2977,19 +3008,19 @@ static void TryUserDefinedConversion(Sema &S,
}
}
}
-
- // Perform overload resolution. If it fails, return the failed result.
+
+ // Perform overload resolution. If it fails, return the failed result.
OverloadCandidateSet::iterator Best;
if (OverloadingResult Result
- = CandidateSet.BestViableFunction(S, DeclLoc, Best)) {
+ = CandidateSet.BestViableFunction(S, DeclLoc, Best, true)) {
Sequence.SetOverloadFailure(
- InitializationSequence::FK_UserConversionOverloadFailed,
+ InitializationSequence::FK_UserConversionOverloadFailed,
Result);
return;
}
FunctionDecl *Function = Best->Function;
-
+
if (isa<CXXConstructorDecl>(Function)) {
// Add the user-defined conversion step. Any cv-qualification conversion is
// subsumed by the initialization.
@@ -3011,7 +3042,7 @@ static void TryUserDefinedConversion(Sema &S,
}
Sequence.AddUserConversionStep(Function, Best->FoundDecl, ConvType);
-
+
// If the conversion following the call to the conversion function
// is interesting, add it as a separate step.
if (Best->FinalConversion.First || Best->FinalConversion.Second ||
@@ -3030,12 +3061,12 @@ InitializationSequence::InitializationSequence(Sema &S,
unsigned NumArgs)
: FailedCandidateSet(Kind.getLocation()) {
ASTContext &Context = S.Context;
-
+
// C++0x [dcl.init]p16:
- // The semantics of initializers are as follows. The destination type is
- // the type of the object or reference being initialized and the source
+ // The semantics of initializers are as follows. The destination type is
+ // the type of the object or reference being initialized and the source
// type is the type of the initializer expression. The source type is not
- // defined when the initializer is a braced-init-list or when it is a
+ // defined when the initializer is a braced-init-list or when it is a
// parenthesized list of expressions.
QualType DestType = Entity.getType();
@@ -3045,6 +3076,10 @@ InitializationSequence::InitializationSequence(Sema &S,
return;
}
+ for (unsigned I = 0; I != NumArgs; ++I)
+ if (Args[I]->getObjectKind() == OK_ObjCProperty)
+ S.ConvertPropertyForRValue(Args[I]);
+
QualType SourceType;
Expr *Initializer = 0;
if (NumArgs == 1) {
@@ -3052,14 +3087,14 @@ InitializationSequence::InitializationSequence(Sema &S,
if (!isa<InitListExpr>(Initializer))
SourceType = Initializer->getType();
}
-
- // - If the initializer is a braced-init-list, the object is
+
+ // - If the initializer is a braced-init-list, the object is
// list-initialized (8.5.4).
if (InitListExpr *InitList = dyn_cast_or_null<InitListExpr>(Initializer)) {
TryListInitialization(S, Entity, Kind, InitList, *this);
return;
}
-
+
// - If the destination type is a reference type, see 8.5.3.
if (DestType->isReferenceType()) {
// C++0x [dcl.init.ref]p1:
@@ -3073,36 +3108,36 @@ InitializationSequence::InitializationSequence(Sema &S,
TryReferenceInitialization(S, Entity, Kind, Args[0], *this);
return;
}
-
- // - If the destination type is an array of characters, an array of
- // char16_t, an array of char32_t, or an array of wchar_t, and the
+
+ // - If the destination type is an array of characters, an array of
+ // char16_t, an array of char32_t, or an array of wchar_t, and the
// initializer is a string literal, see 8.5.2.
if (Initializer && IsStringInit(Initializer, DestType, Context)) {
TryStringLiteralInitialization(S, Entity, Kind, Initializer, *this);
return;
}
-
+
// - If the initializer is (), the object is value-initialized.
if (Kind.getKind() == InitializationKind::IK_Value ||
(Kind.getKind() == InitializationKind::IK_Direct && NumArgs == 0)) {
TryValueInitialization(S, Entity, Kind, *this);
return;
}
-
+
// Handle default initialization.
- if (Kind.getKind() == InitializationKind::IK_Default){
+ if (Kind.getKind() == InitializationKind::IK_Default) {
TryDefaultInitialization(S, Entity, Kind, *this);
return;
}
- // - Otherwise, if the destination type is an array, the program is
+ // - Otherwise, if the destination type is an array, the program is
// ill-formed.
if (const ArrayType *AT = Context.getAsArrayType(DestType)) {
if (AT->getElementType()->isAnyCharacterType())
SetFailed(FK_ArrayNeedsInitListOrStringLiteral);
else
SetFailed(FK_ArrayNeedsInitList);
-
+
return;
}
@@ -3112,22 +3147,22 @@ InitializationSequence::InitializationSequence(Sema &S,
AddCAssignmentStep(DestType);
return;
}
-
+
// - If the destination type is a (possibly cv-qualified) class type:
if (DestType->isRecordType()) {
- // - If the initialization is direct-initialization, or if it is
- // copy-initialization where the cv-unqualified version of the
- // source type is the same class as, or a derived class of, the
+ // - If the initialization is direct-initialization, or if it is
+ // copy-initialization where the cv-unqualified version of the
+ // source type is the same class as, or a derived class of, the
// class of the destination, constructors are considered. [...]
if (Kind.getKind() == InitializationKind::IK_Direct ||
(Kind.getKind() == InitializationKind::IK_Copy &&
(Context.hasSameUnqualifiedType(SourceType, DestType) ||
S.IsDerivedFrom(SourceType, DestType))))
- TryConstructorInitialization(S, Entity, Kind, Args, NumArgs,
+ TryConstructorInitialization(S, Entity, Kind, Args, NumArgs,
Entity.getType(), *this);
- // - Otherwise (i.e., for the remaining copy-initialization cases),
+ // - Otherwise (i.e., for the remaining copy-initialization cases),
// user-defined conversion sequences that can convert from the source
- // type to the destination type or (when a conversion function is
+ // type to the destination type or (when a conversion function is
// used) to a derived class thereof are enumerated as described in
// 13.3.1.4, and the best one is chosen through overload resolution
// (13.3).
@@ -3135,30 +3170,39 @@ InitializationSequence::InitializationSequence(Sema &S,
TryUserDefinedConversion(S, Entity, Kind, Initializer, *this);
return;
}
-
+
if (NumArgs > 1) {
SetFailed(FK_TooManyInitsForScalar);
return;
}
assert(NumArgs == 1 && "Zero-argument case handled above");
-
- // - Otherwise, if the source type is a (possibly cv-qualified) class
+
+ // - Otherwise, if the source type is a (possibly cv-qualified) class
// type, conversion functions are considered.
if (!SourceType.isNull() && SourceType->isRecordType()) {
TryUserDefinedConversion(S, Entity, Kind, Initializer, *this);
return;
}
-
+
// - Otherwise, the initial value of the object being initialized is the
// (possibly converted) value of the initializer expression. Standard
// conversions (Clause 4) will be used, if necessary, to convert the
- // initializer expression to the cv-unqualified version of the
+ // initializer expression to the cv-unqualified version of the
// destination type; no user-defined conversions are considered.
if (S.TryImplicitConversion(*this, Entity, Initializer,
/*SuppressUserConversions*/ true,
/*AllowExplicitConversions*/ false,
- /*InOverloadResolution*/ false))
- SetFailed(InitializationSequence::FK_ConversionFailed);
+ /*InOverloadResolution*/ false,
+ /*CStyle=*/Kind.isCStyleOrFunctionalCast()))
+ {
+ DeclAccessPair dap;
+ if (Initializer->getType() == Context.OverloadTy &&
+ !S.ResolveAddressOfOverloadedFunction(Initializer
+ , DestType, false, dap))
+ SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ else
+ SetFailed(InitializationSequence::FK_ConversionFailed);
+ }
else
setSequenceKind(StandardConversion);
}
@@ -3173,15 +3217,17 @@ InitializationSequence::~InitializationSequence() {
//===----------------------------------------------------------------------===//
// Perform initialization
//===----------------------------------------------------------------------===//
-static Sema::AssignmentAction
+static Sema::AssignmentAction
getAssignmentAction(const InitializedEntity &Entity) {
switch(Entity.getKind()) {
case InitializedEntity::EK_Variable:
case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_Base:
return Sema::AA_Initializing;
case InitializedEntity::EK_Parameter:
- if (Entity.getDecl() &&
+ if (Entity.getDecl() &&
isa<ObjCMethodDecl>(Entity.getDecl()->getDeclContext()))
return Sema::AA_Sending;
@@ -3190,15 +3236,10 @@ getAssignmentAction(const InitializedEntity &Entity) {
case InitializedEntity::EK_Result:
return Sema::AA_Returning;
- case InitializedEntity::EK_Exception:
- case InitializedEntity::EK_Base:
- llvm_unreachable("No assignment action for C++-specific initialization");
- break;
-
case InitializedEntity::EK_Temporary:
// FIXME: Can we tell apart casting vs. converting?
return Sema::AA_Casting;
-
+
case InitializedEntity::EK_Member:
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_VectorElement:
@@ -3223,12 +3264,12 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_Exception:
case InitializedEntity::EK_BlockElement:
return false;
-
+
case InitializedEntity::EK_Parameter:
case InitializedEntity::EK_Temporary:
return true;
}
-
+
llvm_unreachable("missed an InitializedEntity kind?");
}
@@ -3243,7 +3284,7 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_VectorElement:
case InitializedEntity::EK_BlockElement:
return false;
-
+
case InitializedEntity::EK_Variable:
case InitializedEntity::EK_Parameter:
case InitializedEntity::EK_Temporary:
@@ -3251,8 +3292,8 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_Exception:
return true;
}
-
- llvm_unreachable("missed an InitializedEntity kind?");
+
+ llvm_unreachable("missed an InitializedEntity kind?");
}
/// \brief Make a (potentially elidable) temporary copy of the object
@@ -3261,7 +3302,7 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
///
/// \param S The Sema object used for type-checking.
///
-/// \param T The type of the temporary object, which must either by
+/// \param T The type of the temporary object, which must either be
/// the type of the initializer expression or a superclass thereof.
///
/// \param Enter The entity being initialized.
@@ -3276,19 +3317,19 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
/// a temporary object, or an error expression if a copy could not be
/// created.
static ExprResult CopyObject(Sema &S,
- QualType T,
- const InitializedEntity &Entity,
- ExprResult CurInit,
- bool IsExtraneousCopy) {
+ QualType T,
+ const InitializedEntity &Entity,
+ ExprResult CurInit,
+ bool IsExtraneousCopy) {
// Determine which class type we're copying to.
Expr *CurInitExpr = (Expr *)CurInit.get();
- CXXRecordDecl *Class = 0;
+ CXXRecordDecl *Class = 0;
if (const RecordType *Record = T->getAs<RecordType>())
Class = cast<CXXRecordDecl>(Record->getDecl());
if (!Class)
return move(CurInit);
- // C++0x [class.copy]p34:
+ // C++0x [class.copy]p32:
// When certain criteria are met, an implementation is allowed to
// omit the copy/move construction of a class object, even if the
// copy/move constructor and/or destructor for the object have
@@ -3298,23 +3339,22 @@ static ExprResult CopyObject(Sema &S,
// with the same cv-unqualified type, the copy/move operation
// can be omitted by constructing the temporary object
// directly into the target of the omitted copy/move
- //
+ //
// Note that the other three bullets are handled elsewhere. Copy
// elision for return statements and throw expressions are handled as part
- // of constructor initialization, while copy elision for exception handlers
+ // of constructor initialization, while copy elision for exception handlers
// is handled by the run-time.
- bool Elidable = CurInitExpr->isTemporaryObject() &&
- S.Context.hasSameUnqualifiedType(T, CurInitExpr->getType());
+ bool Elidable = CurInitExpr->isTemporaryObject(S.Context, Class);
SourceLocation Loc;
switch (Entity.getKind()) {
case InitializedEntity::EK_Result:
Loc = Entity.getReturnLoc();
break;
-
+
case InitializedEntity::EK_Exception:
Loc = Entity.getThrowLoc();
break;
-
+
case InitializedEntity::EK_Variable:
Loc = Entity.getDecl()->getLocation();
break;
@@ -3331,33 +3371,57 @@ static ExprResult CopyObject(Sema &S,
break;
}
- // Make sure that the type we are copying is complete.
+ // Make sure that the type we are copying is complete.
if (S.RequireCompleteType(Loc, T, S.PDiag(diag::err_temp_copy_incomplete)))
return move(CurInit);
- // Perform overload resolution using the class's copy constructors.
+ // Perform overload resolution using the class's copy/move constructors.
DeclContext::lookup_iterator Con, ConEnd;
OverloadCandidateSet CandidateSet(Loc);
for (llvm::tie(Con, ConEnd) = S.LookupConstructors(Class);
Con != ConEnd; ++Con) {
- // Only consider copy constructors.
- CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(*Con);
- if (!Constructor || Constructor->isInvalidDecl() ||
- !Constructor->isCopyConstructor() ||
- !Constructor->isConvertingConstructor(/*AllowExplicit=*/false))
+ // Only consider copy/move constructors and constructor templates. Per
+ // C++0x [dcl.init]p16, second bullet to class types, this
+ // initialization is direct-initialization.
+ CXXConstructorDecl *Constructor = 0;
+
+ if ((Constructor = dyn_cast<CXXConstructorDecl>(*Con))) {
+ // Handle copy/moveconstructors, only.
+ if (!Constructor || Constructor->isInvalidDecl() ||
+ !Constructor->isCopyOrMoveConstructor() ||
+ !Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
+ continue;
+
+ DeclAccessPair FoundDecl
+ = DeclAccessPair::make(Constructor, Constructor->getAccess());
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ &CurInitExpr, 1, CandidateSet);
+ continue;
+ }
+
+ // Handle constructor templates.
+ FunctionTemplateDecl *ConstructorTmpl = cast<FunctionTemplateDecl>(*Con);
+ if (ConstructorTmpl->isInvalidDecl())
+ continue;
+
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ if (!Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
continue;
+ // FIXME: Do we need to limit this to copy-constructor-like
+ // candidates?
DeclAccessPair FoundDecl
- = DeclAccessPair::make(Constructor, Constructor->getAccess());
- S.AddOverloadCandidate(Constructor, FoundDecl,
- &CurInitExpr, 1, CandidateSet);
+ = DeclAccessPair::make(ConstructorTmpl, ConstructorTmpl->getAccess());
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, 0,
+ &CurInitExpr, 1, CandidateSet, true);
}
-
+
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(S, Loc, Best)) {
case OR_Success:
break;
-
+
case OR_No_Viable_Function:
S.Diag(Loc, IsExtraneousCopy && !S.isSFINAEContext()
? diag::ext_rvalue_to_reference_temp_copy_no_viable
@@ -3368,14 +3432,14 @@ static ExprResult CopyObject(Sema &S,
if (!IsExtraneousCopy || S.isSFINAEContext())
return ExprError();
return move(CurInit);
-
+
case OR_Ambiguous:
S.Diag(Loc, diag::err_temp_copy_ambiguous)
<< (int)Entity.getKind() << CurInitExpr->getType()
<< CurInitExpr->getSourceRange();
CandidateSet.NoteCandidates(S, OCD_ViableCandidates, &CurInitExpr, 1);
return ExprError();
-
+
case OR_Deleted:
S.Diag(Loc, diag::err_temp_copy_deleted)
<< (int)Entity.getKind() << CurInitExpr->getType()
@@ -3417,7 +3481,7 @@ static ExprResult CopyObject(Sema &S,
return S.Owned(CurInitExpr);
}
-
+
// Determine the arguments required to actually perform the
// constructor call (we might have derived-to-base conversions, or
// the copy constructor may have default arguments).
@@ -3429,8 +3493,9 @@ static ExprResult CopyObject(Sema &S,
CurInit = S.BuildCXXConstructExpr(Loc, T, Constructor, Elidable,
move_arg(ConstructorArgs),
/*ZeroInit*/ false,
- CXXConstructExpr::CK_Complete);
-
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
+
// If we're supposed to bind temporaries, do so.
if (!CurInit.isInvalid() && shouldBindAsTemporary(Entity))
CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
@@ -3451,7 +3516,7 @@ void InitializationSequence::PrintInitLocationNote(Sema &S,
}
}
-ExprResult
+ExprResult
InitializationSequence::Perform(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -3462,7 +3527,7 @@ InitializationSequence::Perform(Sema &S,
Diagnose(S, Entity, Kind, (Expr **)Args.release(), NumArgs);
return ExprError();
}
-
+
if (SequenceKind == DependentSequence) {
// If the declaration is a non-dependent, incomplete array type
// that has an initializer, then its type will be completed once
@@ -3512,14 +3577,14 @@ InitializationSequence::Perform(Sema &S,
unsigned NumArgs = Args.size();
return S.Owned(new (S.Context) ParenListExpr(S.Context,
SourceLocation(),
- (Expr **)Args.release(),
+ (Expr **)Args.release(),
NumArgs,
SourceLocation()));
}
if (SequenceKind == NoInitialization)
return S.Owned((Expr *)0);
-
+
QualType DestType = Entity.getType().getNonReferenceType();
// FIXME: Ugly hack around the fact that Entity.getType() is not
// the same as Entity.getDecl()->getType() in cases involving type merging,
@@ -3529,10 +3594,10 @@ InitializationSequence::Perform(Sema &S,
Entity.getType();
ExprResult CurInit = S.Owned((Expr *)0);
-
+
assert(!Steps.empty() && "Cannot have an empty initialization sequence");
-
- // For initialization steps that start with a single initializer,
+
+ // For initialization steps that start with a single initializer,
// grab the only argument out the Args and place it into the "current"
// initializer.
switch (Steps.front().Kind) {
@@ -3551,32 +3616,38 @@ InitializationSequence::Perform(Sema &S,
case SK_ListInitialization:
case SK_CAssignment:
case SK_StringInit:
- case SK_ObjCObjectConversion:
+ case SK_ObjCObjectConversion: {
assert(Args.size() == 1);
- CurInit = ExprResult(((Expr **)(Args.get()))[0]->Retain());
- if (CurInit.isInvalid())
- return ExprError();
+ Expr *CurInitExpr = Args.get()[0];
+ if (!CurInitExpr) return ExprError();
+
+ // Read from a property when initializing something with it.
+ if (CurInitExpr->getObjectKind() == OK_ObjCProperty)
+ S.ConvertPropertyForRValue(CurInitExpr);
+
+ CurInit = ExprResult(CurInitExpr);
break;
-
+ }
+
case SK_ConstructorInitialization:
case SK_ZeroInitialization:
break;
}
-
- // Walk through the computed steps for the initialization sequence,
+
+ // Walk through the computed steps for the initialization sequence,
// performing the specified conversions along the way.
bool ConstructorInitRequiresZeroInit = false;
for (step_iterator Step = step_begin(), StepEnd = step_end();
Step != StepEnd; ++Step) {
if (CurInit.isInvalid())
return ExprError();
-
- Expr *CurInitExpr = (Expr *)CurInit.get();
+
+ Expr *CurInitExpr = CurInit.get();
QualType SourceType = CurInitExpr? CurInitExpr->getType() : QualType();
-
+
switch (Step->Kind) {
case SK_ResolveAddressOfOverloadedFunction:
- // Overload resolution determined which function invoke; update the
+ // Overload resolution determined which function invoke; update the
// initializer to reflect that choice.
S.CheckAddressOfMemberAccess(CurInitExpr, Step->Function.FoundDecl);
S.DiagnoseUseOfDecl(Step->Function.FoundDecl, Kind.getLocation());
@@ -3584,29 +3655,29 @@ InitializationSequence::Perform(Sema &S,
Step->Function.FoundDecl,
Step->Function.Function);
break;
-
+
case SK_CastDerivedToBaseRValue:
case SK_CastDerivedToBaseXValue:
case SK_CastDerivedToBaseLValue: {
// We have a derived-to-base cast that produces either an rvalue or an
// lvalue. Perform that cast.
-
+
CXXCastPath BasePath;
// Casts to inaccessible base classes are allowed with C-style casts.
bool IgnoreBaseAccess = Kind.isCStyleOrFunctionalCast();
if (S.CheckDerivedToBaseConversion(SourceType, Step->Type,
CurInitExpr->getLocStart(),
- CurInitExpr->getSourceRange(),
+ CurInitExpr->getSourceRange(),
&BasePath, IgnoreBaseAccess))
return ExprError();
-
+
if (S.BasePathInvolvesVirtualBase(BasePath)) {
QualType T = SourceType;
if (const PointerType *Pointer = T->getAs<PointerType>())
T = Pointer->getPointeeType();
if (const RecordType *RecordTy = T->getAs<RecordType>())
- S.MarkVTableUsed(CurInitExpr->getLocStart(),
+ S.MarkVTableUsed(CurInitExpr->getLocStart(),
cast<CXXRecordDecl>(RecordTy->getDecl()));
}
@@ -3623,7 +3694,7 @@ InitializationSequence::Perform(Sema &S,
&BasePath, VK));
break;
}
-
+
case SK_BindReference:
if (FieldDecl *BitField = CurInitExpr->getBitField()) {
// References cannot bind to bit fields (C++ [dcl.init.ref]p5).
@@ -3643,7 +3714,7 @@ InitializationSequence::Perform(Sema &S,
PrintInitLocationNote(S, Entity);
return ExprError();
}
-
+
// Reference binding does not have any corresponding ASTs.
// Check exception specifications
@@ -3660,16 +3731,16 @@ InitializationSequence::Perform(Sema &S,
return ExprError();
break;
-
+
case SK_ExtraneousCopyToTemporary:
- CurInit = CopyObject(S, Step->Type, Entity, move(CurInit),
+ CurInit = CopyObject(S, Step->Type, Entity, move(CurInit),
/*IsExtraneousCopy=*/true);
break;
case SK_UserConversion: {
// We have a user-defined conversion that invokes either a constructor
// or a conversion function.
- CastKind CastKind = CK_Unknown;
+ CastKind CastKind;
bool IsCopy = false;
FunctionDecl *Fn = Step->Function.Function;
DeclAccessPair FoundFn = Step->Function.FoundDecl;
@@ -3687,25 +3758,26 @@ InitializationSequence::Perform(Sema &S,
MultiExprArg(&CurInitExpr, 1),
Loc, ConstructorArgs))
return ExprError();
-
+
// Build the an expression that constructs a temporary.
- CurInit = S.BuildCXXConstructExpr(Loc, Step->Type, Constructor,
+ CurInit = S.BuildCXXConstructExpr(Loc, Step->Type, Constructor,
move_arg(ConstructorArgs),
/*ZeroInit*/ false,
- CXXConstructExpr::CK_Complete);
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
if (CurInit.isInvalid())
return ExprError();
S.CheckConstructorAccess(Kind.getLocation(), Constructor, Entity,
FoundFn.getAccess());
S.DiagnoseUseOfDecl(FoundFn, Kind.getLocation());
-
+
CastKind = CK_ConstructorConversion;
QualType Class = S.Context.getTypeDeclType(Constructor->getParent());
if (S.Context.hasSameUnqualifiedType(SourceType, Class) ||
S.IsDerivedFrom(SourceType, Class))
IsCopy = true;
-
+
CreatedObject = true;
} else {
// Build a call to the conversion function.
@@ -3714,8 +3786,8 @@ InitializationSequence::Perform(Sema &S,
S.CheckMemberOperatorAccess(Kind.getLocation(), CurInitExpr, 0,
FoundFn);
S.DiagnoseUseOfDecl(FoundFn, Kind.getLocation());
-
- // FIXME: Should we move this initialization into a separate
+
+ // FIXME: Should we move this initialization into a separate
// derived-to-base conversion? I believe the answer is "no", because
// we don't want to turn off access control here for c-style casts.
if (S.PerformObjectArgumentInitialization(CurInitExpr, /*Qualifier=*/0,
@@ -3725,19 +3797,18 @@ InitializationSequence::Perform(Sema &S,
// Do a little dance to make sure that CurInit has the proper
// pointer.
CurInit.release();
-
+
// Build the actual call to the conversion function.
- CurInit = S.Owned(S.BuildCXXMemberCallExpr(CurInitExpr, FoundFn,
- Conversion));
+ CurInit = S.BuildCXXMemberCallExpr(CurInitExpr, FoundFn, Conversion);
if (CurInit.isInvalid() || !CurInit.get())
return ExprError();
-
+
CastKind = CK_UserDefinedConversion;
-
+
CreatedObject = Conversion->getResultType()->isRecordType();
}
-
- bool RequiresCopy = !IsCopy &&
+
+ bool RequiresCopy = !IsCopy &&
getKind() != InitializationSequence::ReferenceBinding;
if (RequiresCopy || shouldBindAsTemporary(Entity))
CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
@@ -3745,21 +3816,22 @@ InitializationSequence::Perform(Sema &S,
CurInitExpr = static_cast<Expr *>(CurInit.get());
QualType T = CurInitExpr->getType();
if (const RecordType *Record = T->getAs<RecordType>()) {
- CXXDestructorDecl *Destructor
+ CXXDestructorDecl *Destructor
= S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl()));
- S.CheckDestructorAccess(CurInitExpr->getLocStart(), Destructor,
+ S.CheckDestructorAccess(CurInitExpr->getLocStart(), Destructor,
S.PDiag(diag::err_access_dtor_temp) << T);
S.MarkDeclarationReferenced(CurInitExpr->getLocStart(), Destructor);
+ S.DiagnoseUseOfDecl(Destructor, CurInitExpr->getLocStart());
}
}
-
+
CurInitExpr = CurInit.takeAs<Expr>();
// FIXME: xvalues
CurInit = S.Owned(ImplicitCastExpr::Create(S.Context,
CurInitExpr->getType(),
CastKind, CurInitExpr, 0,
IsLvalue ? VK_LValue : VK_RValue));
-
+
if (RequiresCopy)
CurInit = CopyObject(S, Entity.getType().getNonReferenceType(), Entity,
move(CurInit), /*IsExtraneousCopy=*/false);
@@ -3784,17 +3856,16 @@ InitializationSequence::Perform(Sema &S,
}
case SK_ConversionSequence: {
- bool IgnoreBaseAccess = Kind.isCStyleOrFunctionalCast();
-
if (S.PerformImplicitConversion(CurInitExpr, Step->Type, *Step->ICS,
- Sema::AA_Converting, IgnoreBaseAccess))
+ getAssignmentAction(Entity),
+ Kind.isCStyleOrFunctionalCast()))
return ExprError();
-
+
CurInit.release();
CurInit = S.Owned(CurInitExpr);
break;
}
-
+
case SK_ListInitialization: {
InitListExpr *InitList = cast<InitListExpr>(CurInitExpr);
QualType Ty = Step->Type;
@@ -3810,7 +3881,7 @@ InitializationSequence::Perform(Sema &S,
unsigned NumArgs = Args.size();
CXXConstructorDecl *Constructor
= cast<CXXConstructorDecl>(Step->Function.Function);
-
+
// Build a call to the selected constructor.
ASTOwningVector<Expr*> ConstructorArgs(S);
SourceLocation Loc = (Kind.isCopyInit() && Kind.getEqualLoc().isValid())
@@ -3830,11 +3901,11 @@ InitializationSequence::Perform(Sema &S,
// Determine the arguments required to actually perform the constructor
// call.
- if (S.CompleteConstructorCall(Constructor, move(Args),
+ if (S.CompleteConstructorCall(Constructor, move(Args),
Loc, ConstructorArgs))
return ExprError();
-
-
+
+
if (Entity.getKind() == InitializedEntity::EK_Temporary &&
NumArgs != 1 && // FIXME: Hack to work around cast weirdness
(Kind.getKind() == InitializationKind::IK_Direct ||
@@ -3843,24 +3914,34 @@ InitializationSequence::Perform(Sema &S,
unsigned NumExprs = ConstructorArgs.size();
Expr **Exprs = (Expr **)ConstructorArgs.take();
S.MarkDeclarationReferenced(Loc, Constructor);
+ S.DiagnoseUseOfDecl(Constructor, Loc);
+
+ TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
+ if (!TSInfo)
+ TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc);
+
CurInit = S.Owned(new (S.Context) CXXTemporaryObjectExpr(S.Context,
Constructor,
- Entity.getType(),
- Loc,
- Exprs,
+ TSInfo,
+ Exprs,
NumExprs,
- Kind.getParenRange().getEnd(),
+ Kind.getParenRange(),
ConstructorInitRequiresZeroInit));
} else {
CXXConstructExpr::ConstructionKind ConstructKind =
CXXConstructExpr::CK_Complete;
-
+
if (Entity.getKind() == InitializedEntity::EK_Base) {
ConstructKind = Entity.getBaseSpecifier()->isVirtual() ?
- CXXConstructExpr::CK_VirtualBase :
+ CXXConstructExpr::CK_VirtualBase :
CXXConstructExpr::CK_NonVirtualBase;
- }
-
+ }
+
+ // Only get the parenthesis range if it is a direct construction.
+ SourceRange parenRange =
+ Kind.getKind() == InitializationKind::IK_Direct ?
+ Kind.getParenRange() : SourceRange();
+
// If the entity allows NRVO, mark the construction as elidable
// unconditionally.
if (Entity.allowsNRVO())
@@ -3868,13 +3949,15 @@ InitializationSequence::Perform(Sema &S,
Constructor, /*Elidable=*/true,
move_arg(ConstructorArgs),
ConstructorInitRequiresZeroInit,
- ConstructKind);
+ ConstructKind,
+ parenRange);
else
CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
- Constructor,
+ Constructor,
move_arg(ConstructorArgs),
ConstructorInitRequiresZeroInit,
- ConstructKind);
+ ConstructKind,
+ parenRange);
}
if (CurInit.isInvalid())
return ExprError();
@@ -3883,17 +3966,17 @@ InitializationSequence::Perform(Sema &S,
S.CheckConstructorAccess(Loc, Constructor, Entity,
Step->Function.FoundDecl.getAccess());
S.DiagnoseUseOfDecl(Step->Function.FoundDecl, Loc);
-
+
if (shouldBindAsTemporary(Entity))
CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
-
+
break;
}
-
+
case SK_ZeroInitialization: {
step_iterator NextStep = Step;
++NextStep;
- if (NextStep != StepEnd &&
+ if (NextStep != StepEnd &&
NextStep->Kind == SK_ConstructorInitialization) {
// The need for zero-initialization is recorded directly into
// the call to the object's constructor within the next step.
@@ -3901,8 +3984,14 @@ InitializationSequence::Perform(Sema &S,
} else if (Kind.getKind() == InitializationKind::IK_Value &&
S.getLangOptions().CPlusPlus &&
!Kind.isImplicitValueInit()) {
- CurInit = S.Owned(new (S.Context) CXXScalarValueInitExpr(Step->Type,
- Kind.getRange().getBegin(),
+ TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
+ if (!TSInfo)
+ TSInfo = S.Context.getTrivialTypeSourceInfo(Step->Type,
+ Kind.getRange().getBegin());
+
+ CurInit = S.Owned(new (S.Context) CXXScalarValueInitExpr(
+ TSInfo->getType().getNonLValueExprType(S.Context),
+ TSInfo,
Kind.getRange().getEnd()));
} else {
CurInit = S.Owned(new (S.Context) ImplicitValueInitExpr(Step->Type));
@@ -3925,7 +4014,7 @@ InitializationSequence::Perform(Sema &S,
bool Complained;
if (S.DiagnoseAssignmentResult(ConvTy, Kind.getLocation(),
Step->Type, SourceType,
- CurInitExpr,
+ CurInitExpr,
getAssignmentAction(Entity),
&Complained)) {
PrintInitLocationNote(S, Entity);
@@ -3945,7 +4034,7 @@ InitializationSequence::Perform(Sema &S,
}
case SK_ObjCObjectConversion:
- S.ImpCastExprToType(CurInitExpr, Step->Type,
+ S.ImpCastExprToType(CurInitExpr, Step->Type,
CK_ObjCObjectLValueCast,
S.CastCategory(CurInitExpr));
CurInit.release();
@@ -3953,20 +4042,27 @@ InitializationSequence::Perform(Sema &S,
break;
}
}
-
+
+ // Diagnose non-fatal problems with the completed initialization.
+ if (Entity.getKind() == InitializedEntity::EK_Member &&
+ cast<FieldDecl>(Entity.getDecl())->isBitField())
+ S.CheckBitFieldInitialization(Kind.getLocation(),
+ cast<FieldDecl>(Entity.getDecl()),
+ CurInit.get());
+
return move(CurInit);
}
//===----------------------------------------------------------------------===//
// Diagnose initialization failures
//===----------------------------------------------------------------------===//
-bool InitializationSequence::Diagnose(Sema &S,
+bool InitializationSequence::Diagnose(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
Expr **Args, unsigned NumArgs) {
if (SequenceKind != FailedSequence)
return false;
-
+
QualType DestType = Entity.getType();
switch (Failure) {
case FK_TooManyInitsForReference:
@@ -3978,22 +4074,22 @@ bool InitializationSequence::Diagnose(Sema &S,
S.Diag(Kind.getLocation(), diag::err_reference_has_multiple_inits)
<< SourceRange(Args[0]->getLocStart(), Args[NumArgs - 1]->getLocEnd());
break;
-
+
case FK_ArrayNeedsInitList:
case FK_ArrayNeedsInitListOrStringLiteral:
S.Diag(Kind.getLocation(), diag::err_array_init_not_init_list)
<< (Failure == FK_ArrayNeedsInitListOrStringLiteral);
break;
-
+
case FK_AddressOfOverloadFailed: {
DeclAccessPair Found;
- S.ResolveAddressOfOverloadedFunction(Args[0],
+ S.ResolveAddressOfOverloadedFunction(Args[0],
DestType.getNonReferenceType(),
true,
Found);
break;
}
-
+
case FK_ReferenceInitOverloadFailed:
case FK_UserConversionOverloadFailed:
switch (FailedOverloadResult) {
@@ -4009,21 +4105,22 @@ bool InitializationSequence::Diagnose(Sema &S,
FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates, Args, NumArgs);
break;
-
+
case OR_No_Viable_Function:
S.Diag(Kind.getLocation(), diag::err_typecheck_nonviable_condition)
<< Args[0]->getType() << DestType.getNonReferenceType()
<< Args[0]->getSourceRange();
FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args, NumArgs);
break;
-
+
case OR_Deleted: {
S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function)
<< Args[0]->getType() << DestType.getNonReferenceType()
<< Args[0]->getSourceRange();
OverloadCandidateSet::iterator Best;
OverloadingResult Ovl
- = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
+ = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best,
+ true);
if (Ovl == OR_Deleted) {
S.Diag(Best->Function->getLocation(), diag::note_unavailable_here)
<< Best->Function->isDeleted();
@@ -4032,16 +4129,16 @@ bool InitializationSequence::Diagnose(Sema &S,
}
break;
}
-
+
case OR_Success:
llvm_unreachable("Conversion did not fail!");
break;
}
break;
-
+
case FK_NonConstLValueReferenceBindingToTemporary:
case FK_NonConstLValueReferenceBindingToUnrelated:
- S.Diag(Kind.getLocation(),
+ S.Diag(Kind.getLocation(),
Failure == FK_NonConstLValueReferenceBindingToTemporary
? diag::err_lvalue_reference_bind_to_temporary
: diag::err_lvalue_reference_bind_to_unrelated)
@@ -4050,47 +4147,54 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Args[0]->getType()
<< Args[0]->getSourceRange();
break;
-
+
case FK_RValueReferenceBindingToLValue:
S.Diag(Kind.getLocation(), diag::err_lvalue_to_rvalue_ref)
+ << DestType.getNonReferenceType() << Args[0]->getType()
<< Args[0]->getSourceRange();
break;
-
+
case FK_ReferenceInitDropsQualifiers:
S.Diag(Kind.getLocation(), diag::err_reference_bind_drops_quals)
<< DestType.getNonReferenceType()
<< Args[0]->getType()
<< Args[0]->getSourceRange();
break;
-
+
case FK_ReferenceInitFailed:
S.Diag(Kind.getLocation(), diag::err_reference_bind_failed)
<< DestType.getNonReferenceType()
- << (Args[0]->isLvalue(S.Context) == Expr::LV_Valid)
+ << Args[0]->isLValue()
<< Args[0]->getType()
<< Args[0]->getSourceRange();
break;
-
- case FK_ConversionFailed:
+
+ case FK_ConversionFailed: {
+ QualType FromType = Args[0]->getType();
S.Diag(Kind.getLocation(), diag::err_init_conversion_failed)
<< (int)Entity.getKind()
<< DestType
- << (Args[0]->isLvalue(S.Context) == Expr::LV_Valid)
- << Args[0]->getType()
+ << Args[0]->isLValue()
+ << FromType
<< Args[0]->getSourceRange();
break;
-
+ }
case FK_TooManyInitsForScalar: {
SourceRange R;
if (InitListExpr *InitList = dyn_cast<InitListExpr>(Args[0]))
- R = SourceRange(InitList->getInit(1)->getLocStart(),
+ R = SourceRange(InitList->getInit(0)->getLocEnd(),
InitList->getLocEnd());
else
- R = SourceRange(Args[0]->getLocStart(), Args[NumArgs - 1]->getLocEnd());
+ R = SourceRange(Args[0]->getLocEnd(), Args[NumArgs - 1]->getLocEnd());
- S.Diag(Kind.getLocation(), diag::err_excess_initializers)
- << /*scalar=*/2 << R;
+ R.setBegin(S.PP.getLocForEndOfToken(R.getBegin()));
+ if (Kind.isCStyleOrFunctionalCast())
+ S.Diag(Kind.getLocation(), diag::err_builtin_func_cast_more_than_one_arg)
+ << R;
+ else
+ S.Diag(Kind.getLocation(), diag::err_excess_initializers)
+ << /*scalar=*/2 << R;
break;
}
@@ -4103,13 +4207,13 @@ bool InitializationSequence::Diagnose(Sema &S,
S.Diag(Kind.getLocation(), diag::err_init_list_bad_dest_type)
<< (DestType->isRecordType()) << DestType << Args[0]->getSourceRange();
break;
-
+
case FK_ConstructorOverloadFailed: {
SourceRange ArgsRange;
if (NumArgs)
- ArgsRange = SourceRange(Args[0]->getLocStart(),
+ ArgsRange = SourceRange(Args[0]->getLocStart(),
Args[NumArgs - 1]->getLocEnd());
-
+
// FIXME: Using "DestType" for the entity we're printing is probably
// bad.
switch (FailedOverloadResult) {
@@ -4119,7 +4223,7 @@ bool InitializationSequence::Diagnose(Sema &S,
FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates,
Args, NumArgs);
break;
-
+
case OR_No_Viable_Function:
if (Kind.getKind() == InitializationKind::IK_Default &&
(Entity.getKind() == InitializedEntity::EK_Base ||
@@ -4153,7 +4257,7 @@ bool InitializationSequence::Diagnose(Sema &S,
if (const RecordType *Record
= Entity.getType()->getAs<RecordType>())
- S.Diag(Record->getDecl()->getLocation(),
+ S.Diag(Record->getDecl()->getLocation(),
diag::note_previous_decl)
<< S.Context.getTagDeclType(Record->getDecl());
}
@@ -4164,7 +4268,7 @@ bool InitializationSequence::Diagnose(Sema &S,
<< DestType << ArgsRange;
FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args, NumArgs);
break;
-
+
case OR_Deleted: {
S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
<< true << DestType << ArgsRange;
@@ -4179,14 +4283,14 @@ bool InitializationSequence::Diagnose(Sema &S,
}
break;
}
-
+
case OR_Success:
llvm_unreachable("Conversion did not fail!");
break;
}
break;
}
-
+
case FK_DefaultInitOfConst:
if (Entity.getKind() == InitializedEntity::EK_Member &&
isa<CXXConstructorDecl>(S.CurContext)) {
@@ -4206,13 +4310,13 @@ bool InitializationSequence::Diagnose(Sema &S,
<< DestType << (bool)DestType->getAs<RecordType>();
}
break;
-
+
case FK_Incomplete:
- S.RequireCompleteType(Kind.getLocation(), DestType,
+ S.RequireCompleteType(Kind.getLocation(), DestType,
diag::err_init_incomplete_type);
- break;
+ break;
}
-
+
PrintInitLocationNote(S, Entity);
return true;
}
@@ -4225,95 +4329,95 @@ void InitializationSequence::dump(llvm::raw_ostream &OS) const {
case FK_TooManyInitsForReference:
OS << "too many initializers for reference";
break;
-
+
case FK_ArrayNeedsInitList:
OS << "array requires initializer list";
break;
-
+
case FK_ArrayNeedsInitListOrStringLiteral:
OS << "array requires initializer list or string literal";
break;
-
+
case FK_AddressOfOverloadFailed:
OS << "address of overloaded function failed";
break;
-
+
case FK_ReferenceInitOverloadFailed:
OS << "overload resolution for reference initialization failed";
break;
-
+
case FK_NonConstLValueReferenceBindingToTemporary:
OS << "non-const lvalue reference bound to temporary";
break;
-
+
case FK_NonConstLValueReferenceBindingToUnrelated:
OS << "non-const lvalue reference bound to unrelated type";
break;
-
+
case FK_RValueReferenceBindingToLValue:
OS << "rvalue reference bound to an lvalue";
break;
-
+
case FK_ReferenceInitDropsQualifiers:
OS << "reference initialization drops qualifiers";
break;
-
+
case FK_ReferenceInitFailed:
OS << "reference initialization failed";
break;
-
+
case FK_ConversionFailed:
OS << "conversion failed";
break;
-
+
case FK_TooManyInitsForScalar:
OS << "too many initializers for scalar";
break;
-
+
case FK_ReferenceBindingToInitList:
OS << "referencing binding to initializer list";
break;
-
+
case FK_InitListBadDestinationType:
OS << "initializer list for non-aggregate, non-scalar type";
break;
-
+
case FK_UserConversionOverloadFailed:
OS << "overloading failed for user-defined conversion";
break;
-
+
case FK_ConstructorOverloadFailed:
OS << "constructor overloading failed";
break;
-
+
case FK_DefaultInitOfConst:
OS << "default initialization of a const variable";
break;
-
+
case FK_Incomplete:
OS << "initialization of incomplete type";
break;
- }
+ }
OS << '\n';
return;
}
-
+
case DependentSequence:
OS << "Dependent sequence: ";
return;
-
+
case UserDefinedConversion:
OS << "User-defined conversion sequence: ";
break;
-
+
case ConstructorInitialization:
OS << "Constructor initialization sequence: ";
break;
-
+
case ReferenceBinding:
OS << "Reference binding: ";
break;
-
+
case ListInitialization:
OS << "List initialization: ";
break;
@@ -4321,54 +4425,54 @@ void InitializationSequence::dump(llvm::raw_ostream &OS) const {
case ZeroInitialization:
OS << "Zero initialization\n";
return;
-
+
case NoInitialization:
OS << "No initialization\n";
return;
-
+
case StandardConversion:
OS << "Standard conversion: ";
break;
-
+
case CAssignment:
OS << "C assignment: ";
break;
-
+
case StringInit:
OS << "String initialization: ";
break;
}
-
+
for (step_iterator S = step_begin(), SEnd = step_end(); S != SEnd; ++S) {
if (S != step_begin()) {
OS << " -> ";
}
-
+
switch (S->Kind) {
case SK_ResolveAddressOfOverloadedFunction:
OS << "resolve address of overloaded function";
break;
-
+
case SK_CastDerivedToBaseRValue:
OS << "derived-to-base case (rvalue" << S->Type.getAsString() << ")";
break;
-
+
case SK_CastDerivedToBaseXValue:
OS << "derived-to-base case (xvalue" << S->Type.getAsString() << ")";
break;
-
+
case SK_CastDerivedToBaseLValue:
OS << "derived-to-base case (lvalue" << S->Type.getAsString() << ")";
break;
-
+
case SK_BindReference:
OS << "bind reference to lvalue";
break;
-
+
case SK_BindReferenceToTemporary:
OS << "bind reference to a temporary";
break;
-
+
case SK_ExtraneousCopyToTemporary:
OS << "extraneous C++03 copy to temporary";
break;
@@ -4386,29 +4490,29 @@ void InitializationSequence::dump(llvm::raw_ostream &OS) const {
case SK_QualificationConversionLValue:
OS << "qualification conversion (lvalue)";
break;
-
+
case SK_ConversionSequence:
OS << "implicit conversion sequence (";
S->ICS->DebugPrint(); // FIXME: use OS
OS << ")";
break;
-
+
case SK_ListInitialization:
OS << "list initialization";
break;
-
+
case SK_ConstructorInitialization:
OS << "constructor initialization";
break;
-
+
case SK_ZeroInitialization:
OS << "zero initialization";
break;
-
+
case SK_CAssignment:
OS << "C assignment";
break;
-
+
case SK_StringInit:
OS << "string initialization";
break;
@@ -4427,14 +4531,14 @@ void InitializationSequence::dump() const {
//===----------------------------------------------------------------------===//
// Initialization helper functions
//===----------------------------------------------------------------------===//
-ExprResult
+ExprResult
Sema::PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init) {
if (Init.isInvalid())
return ExprError();
- Expr *InitE = (Expr *)Init.get();
+ Expr *InitE = Init.get();
assert(InitE && "No initialization expression?");
if (EqualLoc.isInvalid())
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
index 306e95a..0fd0e08 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
@@ -31,7 +31,9 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/Support/ErrorHandling.h"
+#include <limits>
#include <list>
#include <set>
#include <vector>
@@ -89,7 +91,7 @@ namespace {
UnqualUsingDirectiveSet() {}
void visitScopeChain(Scope *S, Scope *InnermostFileScope) {
- // C++ [namespace.udir]p1:
+ // C++ [namespace.udir]p1:
// During unqualified name lookup, the names appear as if they
// were declared in the nearest enclosing namespace which contains
// both the using-directive and the nominated namespace.
@@ -104,7 +106,7 @@ namespace {
} else {
Scope::udir_iterator I = S->using_directives_begin(),
End = S->using_directives_end();
-
+
for (; I != End; ++I)
visit(*I, InnermostFileDC);
}
@@ -175,7 +177,7 @@ namespace {
while (!Common->Encloses(EffectiveDC))
Common = Common->getParent();
Common = Common->getPrimaryContext();
-
+
list.push_back(UnqualUsingEntry(UD->getNominatedNamespace(), Common));
}
@@ -183,11 +185,8 @@ namespace {
std::sort(list.begin(), list.end(), UnqualUsingEntry::Comparator());
}
- typedef ListTy::iterator iterator;
typedef ListTy::const_iterator const_iterator;
-
- iterator begin() { return list.begin(); }
- iterator end() { return list.end(); }
+
const_iterator begin() const { return list.begin(); }
const_iterator end() const { return list.end(); }
@@ -211,7 +210,8 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
IDNS = Decl::IDNS_Ordinary;
if (CPlusPlus) {
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Member | Decl::IDNS_Namespace;
- if (Redeclaration) IDNS |= Decl::IDNS_TagFriend | Decl::IDNS_OrdinaryFriend;
+ if (Redeclaration)
+ IDNS |= Decl::IDNS_TagFriend | Decl::IDNS_OrdinaryFriend;
}
break;
@@ -237,7 +237,10 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
IDNS = Decl::IDNS_Tag;
}
break;
-
+ case Sema::LookupLabel:
+ IDNS = Decl::IDNS_Label;
+ break;
+
case Sema::LookupMemberName:
IDNS = Decl::IDNS_Member;
if (CPlusPlus)
@@ -260,9 +263,9 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
case Sema::LookupObjCProtocolName:
IDNS = Decl::IDNS_ObjCProtocol;
break;
-
+
case Sema::LookupAnyName:
- IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Member
+ IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Member
| Decl::IDNS_Using | Decl::IDNS_Namespace | Decl::IDNS_ObjCProtocol
| Decl::IDNS_Type;
break;
@@ -271,8 +274,7 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
}
void LookupResult::configure() {
- IDNS = getIDNS(LookupKind,
- SemaRef.getLangOptions().CPlusPlus,
+ IDNS = getIDNS(LookupKind, SemaRef.getLangOptions().CPlusPlus,
isForRedeclaration());
// If we're looking for one of the allocation or deallocation
@@ -293,7 +295,6 @@ void LookupResult::configure() {
}
}
-#ifndef NDEBUG
void LookupResult::sanity() const {
assert(ResultKind != NotFound || Decls.size() == 0);
assert(ResultKind != Found || Decls.size() == 1);
@@ -302,12 +303,12 @@ void LookupResult::sanity() const {
isa<FunctionTemplateDecl>((*begin())->getUnderlyingDecl())));
assert(ResultKind != FoundUnresolvedValue || sanityCheckUnresolved());
assert(ResultKind != Ambiguous || Decls.size() > 1 ||
- (Decls.size() == 1 && Ambiguity == AmbiguousBaseSubobjects));
+ (Decls.size() == 1 && (Ambiguity == AmbiguousBaseSubobjects ||
+ Ambiguity == AmbiguousBaseSubobjectTypes)));
assert((Paths != NULL) == (ResultKind == Ambiguous &&
(Ambiguity == AmbiguousBaseSubobjectTypes ||
Ambiguity == AmbiguousBaseSubobjects)));
}
-#endif
// Necessary because CXXBasePaths is not complete in Sema.h
void LookupResult::deletePaths(CXXBasePaths *Paths) {
@@ -317,7 +318,7 @@ void LookupResult::deletePaths(CXXBasePaths *Paths) {
/// Resolves the result kind of this lookup.
void LookupResult::resolveKind() {
unsigned N = Decls.size();
-
+
// Fast case: no possible ambiguity.
if (N == 0) {
assert(ResultKind == NotFound || ResultKind == NotFoundInCurrentInstantiation);
@@ -340,13 +341,13 @@ void LookupResult::resolveKind() {
llvm::SmallPtrSet<NamedDecl*, 16> Unique;
llvm::SmallPtrSet<QualType, 16> UniqueTypes;
-
+
bool Ambiguous = false;
bool HasTag = false, HasFunction = false, HasNonFunction = false;
bool HasFunctionTemplate = false, HasUnresolved = false;
unsigned UniqueTagIndex = 0;
-
+
unsigned I = 0;
while (I < N) {
NamedDecl *D = Decls[I]->getUnderlyingDecl();
@@ -367,14 +368,14 @@ void LookupResult::resolveKind() {
}
}
}
-
+
if (!Unique.insert(D)) {
// If it's not unique, pull something off the back (and
// continue at this index).
Decls[I] = Decls[--N];
continue;
- }
-
+ }
+
// Otherwise, do some decl type analysis and then continue.
if (isa<UnresolvedUsingValueDecl>(D)) {
@@ -407,8 +408,13 @@ void LookupResult::resolveKind() {
// But it's still an error if there are distinct tag types found,
// even if they're not visible. (ref?)
if (HideTags && HasTag && !Ambiguous &&
- (HasFunction || HasNonFunction || HasUnresolved))
- Decls[UniqueTagIndex] = Decls[--N];
+ (HasFunction || HasNonFunction || HasUnresolved)) {
+ if (Decls[UniqueTagIndex]->getDeclContext()->getRedeclContext()->Equals(
+ Decls[UniqueTagIndex? 0 : N-1]->getDeclContext()->getRedeclContext()))
+ Decls[UniqueTagIndex] = Decls[--N];
+ else
+ Ambiguous = true;
+ }
Decls.set_size(N);
@@ -453,7 +459,7 @@ void LookupResult::print(llvm::raw_ostream &Out) {
Out << Decls.size() << " result(s)";
if (isAmbiguous()) Out << ", ambiguous";
if (Paths) Out << ", base paths present";
-
+
for (iterator I = begin(), E = end(); I != E; ++I) {
Out << "\n";
(*I)->print(Out, 2);
@@ -480,12 +486,21 @@ static bool LookupBuiltin(Sema &S, LookupResult &R) {
S.Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return false;
- NamedDecl *D = S.LazilyCreateBuiltin((IdentifierInfo *)II, BuiltinID,
- S.TUScope, R.isForRedeclaration(),
- R.getNameLoc());
- if (D)
+ if (NamedDecl *D = S.LazilyCreateBuiltin((IdentifierInfo *)II,
+ BuiltinID, S.TUScope,
+ R.isForRedeclaration(),
+ R.getNameLoc())) {
R.addDecl(D);
- return (D != NULL);
+ return true;
+ }
+
+ if (R.isForRedeclaration()) {
+ // If we're redeclaring this function anyway, forget that
+ // this was a builtin at all.
+ S.Context.BuiltinInfo.ForgetBuiltin(BuiltinID, S.Context.Idents);
+ }
+
+ return false;
}
}
}
@@ -500,16 +515,16 @@ static bool CanDeclareSpecialMemberFunction(ASTContext &Context,
// Don't do it if the class is invalid.
if (Class->isInvalidDecl())
return false;
-
+
// We need to have a definition for the class.
if (!Class->getDefinition() || Class->isDependentContext())
return false;
-
+
// We can't be in the middle of defining the class.
if (const RecordType *RecordTy
= Context.getTypeDeclType(Class)->getAs<RecordType>())
return !RecordTy->isBeingDefined();
-
+
return false;
}
@@ -520,46 +535,46 @@ void Sema::ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class) {
// If the default constructor has not yet been declared, do so now.
if (!Class->hasDeclaredDefaultConstructor())
DeclareImplicitDefaultConstructor(Class);
-
+
// If the copy constructor has not yet been declared, do so now.
if (!Class->hasDeclaredCopyConstructor())
DeclareImplicitCopyConstructor(Class);
-
+
// If the copy assignment operator has not yet been declared, do so now.
if (!Class->hasDeclaredCopyAssignment())
DeclareImplicitCopyAssignment(Class);
// If the destructor has not yet been declared, do so now.
if (!Class->hasDeclaredDestructor())
- DeclareImplicitDestructor(Class);
+ DeclareImplicitDestructor(Class);
}
-/// \brief Determine whether this is the name of an implicitly-declared
+/// \brief Determine whether this is the name of an implicitly-declared
/// special member function.
static bool isImplicitlyDeclaredMemberFunctionName(DeclarationName Name) {
switch (Name.getNameKind()) {
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
return true;
-
+
case DeclarationName::CXXOperatorName:
return Name.getCXXOverloadedOperator() == OO_Equal;
-
+
default:
- break;
+ break;
}
-
+
return false;
}
/// \brief If there are any implicit member functions with the given name
/// that need to be declared in the given declaration context, do so.
-static void DeclareImplicitMemberFunctionsWithName(Sema &S,
+static void DeclareImplicitMemberFunctionsWithName(Sema &S,
DeclarationName Name,
const DeclContext *DC) {
if (!DC)
return;
-
+
switch (Name.getNameKind()) {
case DeclarationName::CXXConstructorName:
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
@@ -572,26 +587,26 @@ static void DeclareImplicitMemberFunctionsWithName(Sema &S,
S.DeclareImplicitCopyConstructor(const_cast<CXXRecordDecl *>(Record));
}
break;
-
+
case DeclarationName::CXXDestructorName:
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
if (Record->getDefinition() && !Record->hasDeclaredDestructor() &&
CanDeclareSpecialMemberFunction(S.Context, Record))
S.DeclareImplicitDestructor(const_cast<CXXRecordDecl *>(Record));
break;
-
+
case DeclarationName::CXXOperatorName:
if (Name.getCXXOverloadedOperator() != OO_Equal)
break;
-
+
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
if (Record->getDefinition() && !Record->hasDeclaredCopyAssignment() &&
CanDeclareSpecialMemberFunction(S.Context, Record))
S.DeclareImplicitCopyAssignment(const_cast<CXXRecordDecl *>(Record));
break;
-
+
default:
- break;
+ break;
}
}
@@ -603,7 +618,7 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
// Lazily declare C++ special member functions.
if (S.getLangOptions().CPlusPlus)
DeclareImplicitMemberFunctionsWithName(S, R.getLookupName(), DC);
-
+
// Perform lookup into this declaration context.
DeclContext::lookup_const_iterator I, E;
for (llvm::tie(I, E) = DC->lookup(R.getLookupName()); I != E; ++I) {
@@ -624,7 +639,7 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
return Found;
// C++ [temp.mem]p6:
- // A specialization of a conversion function template is not found by
+ // A specialization of a conversion function template is not found by
// name lookup. Instead, any conversion function templates visible in the
// context of the use are considered. [...]
const CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
@@ -632,50 +647,51 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
return Found;
const UnresolvedSetImpl *Unresolved = Record->getConversionFunctions();
- for (UnresolvedSetImpl::iterator U = Unresolved->begin(),
+ for (UnresolvedSetImpl::iterator U = Unresolved->begin(),
UEnd = Unresolved->end(); U != UEnd; ++U) {
FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(*U);
if (!ConvTemplate)
continue;
-
+
// When we're performing lookup for the purposes of redeclaration, just
- // add the conversion function template. When we deduce template
- // arguments for specializations, we'll end up unifying the return
+ // add the conversion function template. When we deduce template
+ // arguments for specializations, we'll end up unifying the return
// type of the new declaration with the type of the function template.
if (R.isForRedeclaration()) {
R.addDecl(ConvTemplate);
Found = true;
continue;
}
-
+
// C++ [temp.mem]p6:
- // [...] For each such operator, if argument deduction succeeds
- // (14.9.2.3), the resulting specialization is used as if found by
+ // [...] For each such operator, if argument deduction succeeds
+ // (14.9.2.3), the resulting specialization is used as if found by
// name lookup.
//
// When referencing a conversion function for any purpose other than
// a redeclaration (such that we'll be building an expression with the
- // result), perform template argument deduction and place the
+ // result), perform template argument deduction and place the
// specialization into the result set. We do this to avoid forcing all
// callers to perform special deduction for conversion functions.
TemplateDeductionInfo Info(R.getSema().Context, R.getNameLoc());
FunctionDecl *Specialization = 0;
-
- const FunctionProtoType *ConvProto
+
+ const FunctionProtoType *ConvProto
= ConvTemplate->getTemplatedDecl()->getType()->getAs<FunctionProtoType>();
assert(ConvProto && "Nonsensical conversion function template type");
// Compute the type of the function that we would expect the conversion
// function to have, if it were to match the name given.
// FIXME: Calling convention!
- FunctionType::ExtInfo ConvProtoInfo = ConvProto->getExtInfo();
+ FunctionProtoType::ExtProtoInfo EPI = ConvProto->getExtProtoInfo();
+ EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CC_Default);
+ EPI.HasExceptionSpec = false;
+ EPI.HasAnyExceptionSpec = false;
+ EPI.NumExceptions = 0;
QualType ExpectedType
= R.getSema().Context.getFunctionType(R.getLookupName().getCXXNameType(),
- 0, 0, ConvProto->isVariadic(),
- ConvProto->getTypeQuals(),
- false, false, 0, 0,
- ConvProtoInfo.withCallingConv(CC_Default));
-
+ 0, 0, EPI);
+
// Perform template argument deduction against the type that we would
// expect the function to have.
if (R.getSema().DeduceTemplateArguments(ConvTemplate, 0, ExpectedType,
@@ -691,7 +707,7 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
// Performs C++ unqualified lookup into the given file context.
static bool
-CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context,
+CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context,
DeclContext *NS, UnqualUsingDirectiveSet &UDirs) {
assert(NS && NS->isFileContext() && "CppNamespaceLookup() requires namespace!");
@@ -729,7 +745,7 @@ static bool isNamespaceOrTranslationUnitScope(Scope *S) {
static std::pair<DeclContext *, bool> findOuterContext(Scope *S) {
DeclContext *DC = static_cast<DeclContext *>(S->getEntity());
DeclContext *Lexical = 0;
- for (Scope *OuterS = S->getParent(); OuterS;
+ for (Scope *OuterS = S->getParent(); OuterS;
OuterS = OuterS->getParent()) {
if (OuterS->getEntity()) {
Lexical = static_cast<DeclContext *>(OuterS->getEntity());
@@ -745,12 +761,12 @@ static std::pair<DeclContext *, bool> findOuterContext(Scope *S) {
//
// Example:
//
- // namespace N {
- // class C { };
+ // namespace N {
+ // class C { };
//
// template<class T> class B {
// void f(T);
- // };
+ // };
// }
//
// template<class C> void N::B<C>::f(C) {
@@ -759,24 +775,24 @@ static std::pair<DeclContext *, bool> findOuterContext(Scope *S) {
//
// In this example, the lexical context we return is the
// TranslationUnit, while the semantic context is the namespace N.
- if (!Lexical || !DC || !S->getParent() ||
+ if (!Lexical || !DC || !S->getParent() ||
!S->getParent()->isTemplateParamScope())
return std::make_pair(Lexical, false);
- // Find the outermost template parameter scope.
+ // Find the outermost template parameter scope.
// For the example, this is the scope for the template parameters of
// template<class C>.
Scope *OutermostTemplateScope = S->getParent();
while (OutermostTemplateScope->getParent() &&
OutermostTemplateScope->getParent()->isTemplateParamScope())
OutermostTemplateScope = OutermostTemplateScope->getParent();
-
+
// Find the namespace context in which the original scope occurs. In
// the example, this is namespace N.
DeclContext *Semantic = DC;
while (!Semantic->isFileContext())
Semantic = Semantic->getParent();
-
+
// Find the declaration context just outside of the template
// parameter scope. This is the context in which the template is
// being lexically declaration (a namespace context). In the
@@ -800,7 +816,7 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
if (DeclContext *DC = static_cast<DeclContext *>(PreS->getEntity()))
DeclareImplicitMemberFunctionsWithName(*this, Name, DC);
}
-
+
// Implicitly declare member functions with the name we're looking for, if in
// fact we are in a scope where it matters.
@@ -883,7 +899,7 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
if (ObjCInterfaceDecl *Class = Method->getClassInterface()) {
ObjCInterfaceDecl *ClassDeclared;
if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable(
- Name.getAsIdentifierInfo(),
+ Name.getAsIdentifierInfo(),
ClassDeclared)) {
if (R.isAcceptableDecl(Ivar)) {
R.addDecl(Ivar);
@@ -913,6 +929,10 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
// FIXME: This really, really shouldn't be happening.
if (!S) return false;
+ // If we are looking for members, no need to look into global/namespace scope.
+ if (R.getLookupKind() == LookupMemberName)
+ return false;
+
// Collect UsingDirectiveDecls in all scopes, and recursively all
// nominated namespaces by those using-directives.
//
@@ -958,7 +978,7 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
Ctx = OutsideOfTemplateParamDC;
OutsideOfTemplateParamDC = 0;
}
-
+
if (Ctx) {
DeclContext *OuterCtx;
bool SearchAfterTemplateScope;
@@ -972,24 +992,24 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
// non-transparent context.
if (Ctx->isTransparentContext())
continue;
-
+
// If we have a context, and it's not a context stashed in the
// template parameter scope for an out-of-line definition, also
// look into that context.
if (!(Found && S && S->isTemplateParamScope())) {
assert(Ctx->isFileContext() &&
"We should have been looking only at file context here already.");
-
+
// Look into context considering using-directives.
if (CppNamespaceLookup(*this, R, Context, Ctx, UDirs))
Found = true;
}
-
+
if (Found) {
R.resolveKind();
return true;
}
-
+
if (R.isForRedeclaration() && !Ctx->isTransparentContext())
return false;
}
@@ -1042,7 +1062,6 @@ bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
if (!getLangOptions().CPlusPlus) {
// Unqualified name lookup in C/Objective-C is purely lexical, so
// search in the declarations attached to the name.
-
if (NameKind == Sema::LookupRedeclarationWithLinkage) {
// Find the nearest non-transparent declaration scope.
while (!(S->getFlags() & Scope::DeclScope) ||
@@ -1228,16 +1247,48 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
}
/// \brief Callback that looks for any member of a class with the given name.
-static bool LookupAnyMember(const CXXBaseSpecifier *Specifier,
+static bool LookupAnyMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *Name) {
RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
-
+
DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
Path.Decls = BaseRecord->lookup(N);
return Path.Decls.first != Path.Decls.second;
}
+/// \brief Determine whether the given set of member declarations contains only
+/// static members, nested types, and enumerators.
+template<typename InputIterator>
+static bool HasOnlyStaticMembers(InputIterator First, InputIterator Last) {
+ Decl *D = (*First)->getUnderlyingDecl();
+ if (isa<VarDecl>(D) || isa<TypeDecl>(D) || isa<EnumConstantDecl>(D))
+ return true;
+
+ if (isa<CXXMethodDecl>(D)) {
+ // Determine whether all of the methods are static.
+ bool AllMethodsAreStatic = true;
+ for(; First != Last; ++First) {
+ D = (*First)->getUnderlyingDecl();
+
+ if (!isa<CXXMethodDecl>(D)) {
+ assert(isa<TagDecl>(D) && "Non-function must be a tag decl");
+ break;
+ }
+
+ if (!cast<CXXMethodDecl>(D)->isStatic()) {
+ AllMethodsAreStatic = false;
+ break;
+ }
+ }
+
+ if (AllMethodsAreStatic)
+ return true;
+ }
+
+ return false;
+}
+
/// \brief Perform qualified name lookup into a given context.
///
/// Qualified name lookup (C++ [basic.lookup.qual]) is used to find
@@ -1256,7 +1307,7 @@ static bool LookupAnyMember(const CXXBaseSpecifier *Specifier,
/// search. If the lookup criteria permits, name lookup may also search
/// in the parent contexts or (for C++ classes) base classes.
///
-/// \param InUnqualifiedLookup true if this is qualified name lookup that
+/// \param InUnqualifiedLookup true if this is qualified name lookup that
/// occurs as part of unqualified name lookup.
///
/// \returns true if lookup succeeded, false if it failed.
@@ -1307,7 +1358,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// If we're performing qualified name lookup into a dependent class,
// then we are actually looking into a current instantiation. If we have any
- // dependent base classes, then we either have to delay lookup until
+ // dependent base classes, then we either have to delay lookup until
// template instantiation time (at which point all bases will be available)
// or we have to fail.
if (!InUnqualifiedLookup && LookupRec->isDependentContext() &&
@@ -1315,7 +1366,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
R.setNotFoundInCurrentInstantiation();
return false;
}
-
+
// Perform lookup into our base classes.
CXXBasePaths Paths;
Paths.setOrigin(LookupRec);
@@ -1328,7 +1379,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
case LookupRedeclarationWithLinkage:
BaseCallback = &CXXRecordDecl::FindOrdinaryMember;
break;
-
+
case LookupTagName:
BaseCallback = &CXXRecordDecl::FindTagMember;
break;
@@ -1336,21 +1387,22 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
case LookupAnyName:
BaseCallback = &LookupAnyMember;
break;
-
+
case LookupUsingDeclName:
// This lookup is for redeclarations only.
-
+
case LookupOperatorName:
case LookupNamespaceName:
case LookupObjCProtocolName:
+ case LookupLabel:
// These lookups will never find a member in a C++ class (or base class).
return false;
-
+
case LookupNestedNameSpecifierName:
BaseCallback = &CXXRecordDecl::FindNestedNameSpecifierMember;
break;
}
-
+
if (!LookupRec->lookupInBases(BaseCallback,
R.getLookupName().getAsOpaquePtr(), Paths))
return false;
@@ -1363,10 +1415,10 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// and includes members from distinct sub-objects, there is an
// ambiguity and the program is ill-formed. Otherwise that set is
// the result of the lookup.
- // FIXME: support using declarations!
QualType SubobjectType;
int SubobjectNumber = 0;
AccessSpecifier SubobjectAccess = AS_none;
+
for (CXXBasePaths::paths_iterator Path = Paths.begin(), PathEnd = Paths.end();
Path != PathEnd; ++Path) {
const CXXBasePathElement &PathElement = Path->back();
@@ -1374,51 +1426,54 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// Pick the best (i.e. most permissive i.e. numerically lowest) access
// across all paths.
SubobjectAccess = std::min(SubobjectAccess, Path->Access);
-
+
// Determine whether we're looking at a distinct sub-object or not.
if (SubobjectType.isNull()) {
// This is the first subobject we've looked at. Record its type.
SubobjectType = Context.getCanonicalType(PathElement.Base->getType());
SubobjectNumber = PathElement.SubobjectNumber;
- } else if (SubobjectType
+ continue;
+ }
+
+ if (SubobjectType
!= Context.getCanonicalType(PathElement.Base->getType())) {
// We found members of the given name in two subobjects of
- // different types. This lookup is ambiguous.
+ // different types. If the declaration sets aren't the same, this
+ // this lookup is ambiguous.
+ if (HasOnlyStaticMembers(Path->Decls.first, Path->Decls.second)) {
+ CXXBasePaths::paths_iterator FirstPath = Paths.begin();
+ DeclContext::lookup_iterator FirstD = FirstPath->Decls.first;
+ DeclContext::lookup_iterator CurrentD = Path->Decls.first;
+
+ while (FirstD != FirstPath->Decls.second &&
+ CurrentD != Path->Decls.second) {
+ if ((*FirstD)->getUnderlyingDecl()->getCanonicalDecl() !=
+ (*CurrentD)->getUnderlyingDecl()->getCanonicalDecl())
+ break;
+
+ ++FirstD;
+ ++CurrentD;
+ }
+
+ if (FirstD == FirstPath->Decls.second &&
+ CurrentD == Path->Decls.second)
+ continue;
+ }
+
R.setAmbiguousBaseSubobjectTypes(Paths);
return true;
- } else if (SubobjectNumber != PathElement.SubobjectNumber) {
+ }
+
+ if (SubobjectNumber != PathElement.SubobjectNumber) {
// We have a different subobject of the same type.
// C++ [class.member.lookup]p5:
// A static member, a nested type or an enumerator defined in
// a base class T can unambiguously be found even if an object
// has more than one base class subobject of type T.
- Decl *FirstDecl = *Path->Decls.first;
- if (isa<VarDecl>(FirstDecl) ||
- isa<TypeDecl>(FirstDecl) ||
- isa<EnumConstantDecl>(FirstDecl))
+ if (HasOnlyStaticMembers(Path->Decls.first, Path->Decls.second))
continue;
- if (isa<CXXMethodDecl>(FirstDecl)) {
- // Determine whether all of the methods are static.
- bool AllMethodsAreStatic = true;
- for (DeclContext::lookup_iterator Func = Path->Decls.first;
- Func != Path->Decls.second; ++Func) {
- if (!isa<CXXMethodDecl>(*Func)) {
- assert(isa<TagDecl>(*Func) && "Non-function must be a tag decl");
- break;
- }
-
- if (!cast<CXXMethodDecl>(*Func)->isStatic()) {
- AllMethodsAreStatic = false;
- break;
- }
- }
-
- if (AllMethodsAreStatic)
- continue;
- }
-
// We have found a nonstatic member name in multiple, distinct
// subobjects. Name lookup is ambiguous.
R.setAmbiguousBaseSubobjects(Paths);
@@ -1453,13 +1508,6 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
///
/// @param SS An optional C++ scope-specifier, e.g., "::N::M".
///
-/// @param Name The name of the entity that name lookup will
-/// search for.
-///
-/// @param Loc If provided, the source location where we're performing
-/// name lookup. At present, this is only used to produce diagnostics when
-/// C library functions (like "malloc") are implicitly declared.
-///
/// @param EnteringContext Indicates whether we are going to enter the
/// context of the scope-specifier SS (if present).
///
@@ -1525,21 +1573,21 @@ bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
Diag(NameLoc, diag::err_ambiguous_member_multiple_subobjects)
<< Name << SubobjectType << getAmbiguousPathsDisplayString(*Paths)
<< LookupRange;
-
+
DeclContext::lookup_iterator Found = Paths->front().Decls.first;
while (isa<CXXMethodDecl>(*Found) &&
cast<CXXMethodDecl>(*Found)->isStatic())
++Found;
-
+
Diag((*Found)->getLocation(), diag::note_ambiguous_member_found);
-
+
return true;
}
case LookupResult::AmbiguousBaseSubobjectTypes: {
Diag(NameLoc, diag::err_ambiguous_member_multiple_subobject_types)
<< Name << LookupRange;
-
+
CXXBasePaths *Paths = Result.getBasePaths();
std::set<Decl *> DeclsPrinted;
for (CXXBasePaths::paths_iterator Path = Paths->begin(),
@@ -1582,7 +1630,7 @@ bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
case LookupResult::AmbiguousReference: {
Diag(NameLoc, diag::err_ambiguous_reference) << Name << LookupRange;
-
+
LookupResult::iterator DI = Result.begin(), DE = Result.end();
for (; DI != DE; ++DI)
Diag((*DI)->getLocation(), diag::note_ambiguous_candidate) << *DI;
@@ -1648,11 +1696,12 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
addAssociatedClassesAndNamespaces(Result, Arg.getAsType());
break;
- case TemplateArgument::Template: {
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
// [...] the namespaces in which any template template arguments are
// defined; and the classes in which any member templates used as
// template template arguments are defined.
- TemplateName Template = Arg.getAsTemplate();
+ TemplateName Template = Arg.getAsTemplateOrTemplatePattern();
if (ClassTemplateDecl *ClassTemplate
= dyn_cast<ClassTemplateDecl>(Template.getAsTemplateDecl())) {
DeclContext *Ctx = ClassTemplate->getDeclContext();
@@ -1663,7 +1712,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
}
break;
}
-
+
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
case TemplateArgument::Expression:
@@ -1713,7 +1762,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
// -- If T is a template-id, its associated namespaces and classes are
// the namespace in which the template is defined; for member
- // templates, the member template’s class; the namespaces and classes
+ // templates, the member template's class; the namespaces and classes
// associated with the types of the template arguments provided for
// template type parameters (excluding template template parameters); the
// namespaces in which any template template arguments are defined; and
@@ -1840,7 +1889,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
// -- If T is an enumeration type, its associated namespace is
// the namespace in which it is defined. If it is class
- // member, its associated class is the member’s class; else
+ // member, its associated class is the member's class; else
// it has no associated class.
case Type::Enum: {
EnumDecl *Enum = cast<EnumType>(T)->getDecl();
@@ -2032,7 +2081,7 @@ NamedDecl *Sema::LookupSingleName(Scope *S, DeclarationName Name,
}
/// \brief Find the protocol with the given name, if any.
-ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II,
+ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II,
SourceLocation IdLoc) {
Decl *D = LookupSingleName(TUScope, II, IdLoc,
LookupObjCProtocolName);
@@ -2089,7 +2138,7 @@ DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
if (!Class->hasDeclaredCopyConstructor())
DeclareImplicitCopyConstructor(Class);
}
-
+
CanQualType T = Context.getCanonicalType(Context.getTypeDeclType(Class));
DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(T);
return Class->lookup(Name);
@@ -2097,7 +2146,7 @@ DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
/// \brief Look for the destructor of the given class.
///
-/// During semantic analysis, this routine should be used in lieu of
+/// During semantic analysis, this routine should be used in lieu of
/// CXXRecordDecl::getDestructor().
///
/// \returns The destructor for this class.
@@ -2233,7 +2282,7 @@ public:
/// of declarations.
class ShadowMapEntry {
typedef llvm::SmallVector<NamedDecl *, 4> DeclVector;
-
+
/// \brief Contains either the solitary NamedDecl * or a vector
/// of declarations.
llvm::PointerUnion<NamedDecl *, DeclVector*> DeclOrVector;
@@ -2245,7 +2294,7 @@ public:
void Destroy();
// Iteration.
- typedef NamedDecl **iterator;
+ typedef NamedDecl * const *iterator;
iterator begin();
iterator end();
};
@@ -2315,7 +2364,7 @@ void VisibleDeclsRecord::ShadowMapEntry::Add(NamedDecl *ND) {
DeclOrVector = ND;
return;
}
-
+
if (NamedDecl *PrevND = DeclOrVector.dyn_cast<NamedDecl *>()) {
// 1 -> 2 elements: create the vector of results and push in the
// existing declaration.
@@ -2335,18 +2384,18 @@ void VisibleDeclsRecord::ShadowMapEntry::Destroy() {
}
}
-VisibleDeclsRecord::ShadowMapEntry::iterator
+VisibleDeclsRecord::ShadowMapEntry::iterator
VisibleDeclsRecord::ShadowMapEntry::begin() {
if (DeclOrVector.isNull())
return 0;
- if (DeclOrVector.dyn_cast<NamedDecl *>())
- return &reinterpret_cast<NamedDecl*&>(DeclOrVector);
+ if (DeclOrVector.is<NamedDecl *>())
+ return DeclOrVector.getAddrOf<NamedDecl *>();
return DeclOrVector.get<DeclVector *>()->begin();
}
-VisibleDeclsRecord::ShadowMapEntry::iterator
+VisibleDeclsRecord::ShadowMapEntry::iterator
VisibleDeclsRecord::ShadowMapEntry::end() {
if (DeclOrVector.isNull())
return 0;
@@ -2360,7 +2409,7 @@ VisibleDeclsRecord::ShadowMapEntry::end() {
NamedDecl *VisibleDeclsRecord::checkHidden(NamedDecl *ND) {
// Look through using declarations.
ND = ND->getUnderlyingDecl();
-
+
unsigned IDNS = ND->getIdentifierNamespace();
std::list<ShadowMap>::reverse_iterator SM = ShadowMaps.rbegin();
for (std::list<ShadowMap>::reverse_iterator SMEnd = ShadowMaps.rend();
@@ -2369,12 +2418,12 @@ NamedDecl *VisibleDeclsRecord::checkHidden(NamedDecl *ND) {
if (Pos == SM->end())
continue;
- for (ShadowMapEntry::iterator I = Pos->second.begin(),
+ for (ShadowMapEntry::iterator I = Pos->second.begin(),
IEnd = Pos->second.end();
I != IEnd; ++I) {
// A tag declaration does not hide a non-tag declaration.
if ((*I)->hasTagIdentifierNamespace() &&
- (IDNS & (Decl::IDNS_Member | Decl::IDNS_Ordinary |
+ (IDNS & (Decl::IDNS_Member | Decl::IDNS_Ordinary |
Decl::IDNS_ObjCProtocol)))
continue;
@@ -2391,7 +2440,7 @@ NamedDecl *VisibleDeclsRecord::checkHidden(NamedDecl *ND) {
ND->isFunctionOrFunctionTemplate() &&
SM == ShadowMaps.rbegin())
continue;
-
+
// We've found a declaration that hides this one.
return *I;
}
@@ -2411,22 +2460,44 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
// Make sure we don't visit the same context twice.
if (Visited.visitedContext(Ctx->getPrimaryContext()))
return;
-
+
if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx))
Result.getSema().ForceDeclarationOfImplicitMembers(Class);
// Enumerate all of the results in this context.
- for (DeclContext *CurCtx = Ctx->getPrimaryContext(); CurCtx;
+ for (DeclContext *CurCtx = Ctx->getPrimaryContext(); CurCtx;
CurCtx = CurCtx->getNextContext()) {
- for (DeclContext::decl_iterator D = CurCtx->decls_begin(),
+ for (DeclContext::decl_iterator D = CurCtx->decls_begin(),
DEnd = CurCtx->decls_end();
D != DEnd; ++D) {
- if (NamedDecl *ND = dyn_cast<NamedDecl>(*D))
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) {
if (Result.isAcceptableDecl(ND)) {
Consumer.FoundDecl(ND, Visited.checkHidden(ND), InBaseClass);
Visited.add(ND);
}
-
+ } else if (ObjCForwardProtocolDecl *ForwardProto
+ = dyn_cast<ObjCForwardProtocolDecl>(*D)) {
+ for (ObjCForwardProtocolDecl::protocol_iterator
+ P = ForwardProto->protocol_begin(),
+ PEnd = ForwardProto->protocol_end();
+ P != PEnd;
+ ++P) {
+ if (Result.isAcceptableDecl(*P)) {
+ Consumer.FoundDecl(*P, Visited.checkHidden(*P), InBaseClass);
+ Visited.add(*P);
+ }
+ }
+ } else if (ObjCClassDecl *Class = dyn_cast<ObjCClassDecl>(*D)) {
+ for (ObjCClassDecl::iterator I = Class->begin(), IEnd = Class->end();
+ I != IEnd; ++I) {
+ ObjCInterfaceDecl *IFace = I->getInterface();
+ if (Result.isAcceptableDecl(IFace)) {
+ Consumer.FoundDecl(IFace, Visited.checkHidden(IFace), InBaseClass);
+ Visited.add(IFace);
+ }
+ }
+ }
+
// Visit transparent contexts and inline namespaces inside this context.
if (DeclContext *InnerCtx = dyn_cast<DeclContext>(*D)) {
if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace())
@@ -2441,7 +2512,7 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
ShadowContextRAII Shadow(Visited);
DeclContext::udir_iterator I, E;
for (llvm::tie(I, E) = Ctx->getUsingDirectives(); I != E; ++I) {
- LookupVisibleDecls((*I)->getNominatedNamespace(), Result,
+ LookupVisibleDecls((*I)->getNominatedNamespace(), Result,
QualifiedNameLookup, InBaseClass, Consumer, Visited);
}
}
@@ -2455,16 +2526,16 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
BEnd = Record->bases_end();
B != BEnd; ++B) {
QualType BaseType = B->getType();
-
+
// Don't look into dependent bases, because name lookup can't look
// there anyway.
if (BaseType->isDependentType())
continue;
-
+
const RecordType *Record = BaseType->getAs<RecordType>();
if (!Record)
continue;
-
+
// FIXME: It would be nice to be able to determine whether referencing
// a particular member would be ambiguous. For example, given
//
@@ -2483,21 +2554,21 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
// or
//
// c->A::member
-
+
// Find results in this base class (and its bases).
ShadowContextRAII Shadow(Visited);
LookupVisibleDecls(Record->getDecl(), Result, QualifiedNameLookup,
true, Consumer, Visited);
}
}
-
+
// Traverse the contexts of Objective-C classes.
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Ctx)) {
// Traverse categories.
for (ObjCCategoryDecl *Category = IFace->getCategoryList();
Category; Category = Category->getNextClassCategory()) {
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(Category, Result, QualifiedNameLookup, false,
+ LookupVisibleDecls(Category, Result, QualifiedNameLookup, false,
Consumer, Visited);
}
@@ -2506,7 +2577,7 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
I = IFace->all_referenced_protocol_begin(),
E = IFace->all_referenced_protocol_end(); I != E; ++I) {
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
+ LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
Visited);
}
@@ -2516,35 +2587,35 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
LookupVisibleDecls(IFace->getSuperClass(), Result, QualifiedNameLookup,
true, Consumer, Visited);
}
-
+
// If there is an implementation, traverse it. We do this to find
// synthesized ivars.
if (IFace->getImplementation()) {
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(IFace->getImplementation(), Result,
+ LookupVisibleDecls(IFace->getImplementation(), Result,
QualifiedNameLookup, true, Consumer, Visited);
}
} else if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Ctx)) {
for (ObjCProtocolDecl::protocol_iterator I = Protocol->protocol_begin(),
E = Protocol->protocol_end(); I != E; ++I) {
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
+ LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
Visited);
}
} else if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Ctx)) {
for (ObjCCategoryDecl::protocol_iterator I = Category->protocol_begin(),
E = Category->protocol_end(); I != E; ++I) {
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
+ LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
Visited);
}
-
+
// If there is an implementation, traverse it.
if (Category->getImplementation()) {
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(Category->getImplementation(), Result,
+ LookupVisibleDecls(Category->getImplementation(), Result,
QualifiedNameLookup, true, Consumer, Visited);
- }
+ }
}
}
@@ -2555,8 +2626,8 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
if (!S)
return;
- if (!S->getEntity() ||
- (!S->getParent() &&
+ if (!S->getEntity() ||
+ (!S->getParent() &&
!Visited.alreadyVisitedContext((DeclContext *)S->getEntity())) ||
((DeclContext *)S->getEntity())->isFunctionOrMethod()) {
// Walk through the declarations in this Scope.
@@ -2569,7 +2640,7 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
}
}
}
-
+
// FIXME: C++ [temp.local]p8
DeclContext *Entity = 0;
if (S->getEntity()) {
@@ -2578,7 +2649,7 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
// where we hit the context stored in the next outer scope.
Entity = (DeclContext *)S->getEntity();
DeclContext *OuterCtx = findOuterContext(S).first; // FIXME
-
+
for (DeclContext *Ctx = Entity; Ctx && !Ctx->Equals(OuterCtx);
Ctx = Ctx->getLookupParent()) {
if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(Ctx)) {
@@ -2586,9 +2657,27 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
// For instance methods, look for ivars in the method's interface.
LookupResult IvarResult(Result.getSema(), Result.getLookupName(),
Result.getNameLoc(), Sema::LookupMemberName);
- if (ObjCInterfaceDecl *IFace = Method->getClassInterface())
- LookupVisibleDecls(IFace, IvarResult, /*QualifiedNameLookup=*/false,
+ if (ObjCInterfaceDecl *IFace = Method->getClassInterface()) {
+ LookupVisibleDecls(IFace, IvarResult, /*QualifiedNameLookup=*/false,
/*InBaseClass=*/false, Consumer, Visited);
+
+ // Look for properties from which we can synthesize ivars, if
+ // permitted.
+ if (Result.getSema().getLangOptions().ObjCNonFragileABI2 &&
+ IFace->getImplementation() &&
+ Result.getLookupKind() == Sema::LookupOrdinaryName) {
+ for (ObjCInterfaceDecl::prop_iterator
+ P = IFace->prop_begin(),
+ PEnd = IFace->prop_end();
+ P != PEnd; ++P) {
+ if (Result.getSema().canSynthesizeProvisionalIvar(*P) &&
+ !IFace->lookupInstanceVariable((*P)->getIdentifier())) {
+ Consumer.FoundDecl(*P, Visited.checkHidden(*P), false);
+ Visited.add(*P);
+ }
+ }
+ }
+ }
}
// We've already performed all of the name lookup that we need
@@ -2599,8 +2688,8 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
if (Ctx->isFunctionOrMethod())
continue;
-
- LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/false,
+
+ LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/false,
/*InBaseClass=*/false, Consumer, Visited);
}
} else if (!S->getParent()) {
@@ -2610,14 +2699,14 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
// FIXME: We would like the translation unit's Scope object to point to the
// translation unit, so we don't need this special "if" branch. However,
// doing so would force the normal C++ name-lookup code to look into the
- // translation unit decl when the IdentifierInfo chains would suffice.
+ // translation unit decl when the IdentifierInfo chains would suffice.
// Once we fix that problem (which is part of a more general "don't look
// in DeclContexts unless we have to" optimization), we can eliminate this.
Entity = Result.getSema().Context.getTranslationUnitDecl();
- LookupVisibleDecls(Entity, Result, /*QualifiedNameLookup=*/false,
+ LookupVisibleDecls(Entity, Result, /*QualifiedNameLookup=*/false,
/*InBaseClass=*/false, Consumer, Visited);
- }
-
+ }
+
if (Entity) {
// Lookup visible declarations in any namespaces found by using
// directives.
@@ -2625,7 +2714,7 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
llvm::tie(UI, UEnd) = UDirs.getNamespacesFor(Entity);
for (; UI != UEnd; ++UI)
LookupVisibleDecls(const_cast<DeclContext *>(UI->getNominatedNamespace()),
- Result, /*QualifiedNameLookup=*/false,
+ Result, /*QualifiedNameLookup=*/false,
/*InBaseClass=*/false, Consumer, Visited);
}
@@ -2667,13 +2756,41 @@ void Sema::LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
if (!IncludeGlobalScope)
Visited.visitedContext(Context.getTranslationUnitDecl());
ShadowContextRAII Shadow(Visited);
- ::LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/true,
+ ::LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/true,
/*InBaseClass=*/false, Consumer, Visited);
}
-//----------------------------------------------------------------------------
+/// LookupOrCreateLabel - Do a name lookup of a label with the specified name.
+/// If isLocalLabel is true, then this is a definition of an __label__ label
+/// name, otherwise it is a normal label definition or use.
+LabelDecl *Sema::LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc,
+ bool isLocalLabel) {
+ // Do a lookup to see if we have a label with this name already.
+ NamedDecl *Res = 0;
+
+ // Local label definitions always shadow existing labels.
+ if (!isLocalLabel)
+ Res = LookupSingleName(CurScope, II, Loc, LookupLabel, NotForRedeclaration);
+
+ // If we found a label, check to see if it is in the same context as us. When
+ // in a Block, we don't want to reuse a label in an enclosing function.
+ if (Res && Res->getDeclContext() != CurContext)
+ Res = 0;
+
+ if (Res == 0) {
+ // If not forward referenced or defined already, create the backing decl.
+ Res = LabelDecl::Create(Context, CurContext, Loc, II);
+ Scope *S = isLocalLabel ? CurScope : CurScope->getFnParent();
+ assert(S && "Not in a function?");
+ PushOnScopeChains(Res, S, true);
+ }
+
+ return cast<LabelDecl>(Res);
+}
+
+//===----------------------------------------------------------------------===//
// Typo correction
-//----------------------------------------------------------------------------
+//===----------------------------------------------------------------------===//
namespace {
class TypoCorrectionConsumer : public VisibleDeclConsumer {
@@ -2682,46 +2799,44 @@ class TypoCorrectionConsumer : public VisibleDeclConsumer {
/// \brief The results found that have the smallest edit distance
/// found (so far) with the typo name.
- llvm::SmallVector<NamedDecl *, 4> BestResults;
+ ///
+ /// The boolean value indicates whether there is a keyword with this name.
+ llvm::StringMap<bool, llvm::BumpPtrAllocator> BestResults;
- /// \brief The keywords that have the smallest edit distance.
- llvm::SmallVector<IdentifierInfo *, 4> BestKeywords;
-
/// \brief The best edit distance found so far.
unsigned BestEditDistance;
-
+
public:
explicit TypoCorrectionConsumer(IdentifierInfo *Typo)
- : Typo(Typo->getName()) { }
+ : Typo(Typo->getName()),
+ BestEditDistance((std::numeric_limits<unsigned>::max)()) { }
virtual void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, bool InBaseClass);
+ void FoundName(llvm::StringRef Name);
void addKeywordResult(ASTContext &Context, llvm::StringRef Keyword);
- typedef llvm::SmallVector<NamedDecl *, 4>::const_iterator iterator;
- iterator begin() const { return BestResults.begin(); }
- iterator end() const { return BestResults.end(); }
- void clear_decls() { BestResults.clear(); }
-
- bool empty() const { return BestResults.empty() && BestKeywords.empty(); }
-
- typedef llvm::SmallVector<IdentifierInfo *, 4>::const_iterator
- keyword_iterator;
- keyword_iterator keyword_begin() const { return BestKeywords.begin(); }
- keyword_iterator keyword_end() const { return BestKeywords.end(); }
- bool keyword_empty() const { return BestKeywords.empty(); }
- unsigned keyword_size() const { return BestKeywords.size(); }
-
- unsigned getBestEditDistance() const { return BestEditDistance; }
+ typedef llvm::StringMap<bool, llvm::BumpPtrAllocator>::iterator iterator;
+ iterator begin() { return BestResults.begin(); }
+ iterator end() { return BestResults.end(); }
+ void erase(iterator I) { BestResults.erase(I); }
+ unsigned size() const { return BestResults.size(); }
+ bool empty() const { return BestResults.empty(); }
+
+ bool &operator[](llvm::StringRef Name) {
+ return BestResults[Name];
+ }
+
+ unsigned getBestEditDistance() const { return BestEditDistance; }
};
}
-void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding,
+void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding,
bool InBaseClass) {
// Don't consider hidden names for typo correction.
if (Hiding)
return;
-
+
// Only consider entities with identifiers for names, ignoring
// special names (constructors, overloaded operators, selectors,
// etc.).
@@ -2729,48 +2844,114 @@ void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding,
if (!Name)
return;
+ FoundName(Name->getName());
+}
+
+void TypoCorrectionConsumer::FoundName(llvm::StringRef Name) {
+ using namespace std;
+
+ // Use a simple length-based heuristic to determine the minimum possible
+ // edit distance. If the minimum isn't good enough, bail out early.
+ unsigned MinED = abs((int)Name.size() - (int)Typo.size());
+ if (MinED > BestEditDistance || (MinED && Typo.size() / MinED < 3))
+ return;
+
+ // Compute an upper bound on the allowable edit distance, so that the
+ // edit-distance algorithm can short-circuit.
+ unsigned UpperBound = min(unsigned((Typo.size() + 2) / 3), BestEditDistance);
+
// Compute the edit distance between the typo and the name of this
// entity. If this edit distance is not worse than the best edit
// distance we've seen so far, add it to the list of results.
- unsigned ED = Typo.edit_distance(Name->getName());
- if (!BestResults.empty() || !BestKeywords.empty()) {
- if (ED < BestEditDistance) {
- // This result is better than any we've seen before; clear out
- // the previous results.
- BestResults.clear();
- BestKeywords.clear();
- BestEditDistance = ED;
- } else if (ED > BestEditDistance) {
- // This result is worse than the best results we've seen so far;
- // ignore it.
- return;
- }
- } else
+ unsigned ED = Typo.edit_distance(Name, true, UpperBound);
+ if (ED == 0)
+ return;
+
+ if (ED < BestEditDistance) {
+ // This result is better than any we've seen before; clear out
+ // the previous results.
+ BestResults.clear();
BestEditDistance = ED;
+ } else if (ED > BestEditDistance) {
+ // This result is worse than the best results we've seen so far;
+ // ignore it.
+ return;
+ }
- BestResults.push_back(ND);
+ // Add this name to the list of results. By not assigning a value, we
+ // keep the current value if we've seen this name before (either as a
+ // keyword or as a declaration), or get the default value (not a keyword)
+ // if we haven't seen it before.
+ (void)BestResults[Name];
}
-void TypoCorrectionConsumer::addKeywordResult(ASTContext &Context,
+void TypoCorrectionConsumer::addKeywordResult(ASTContext &Context,
llvm::StringRef Keyword) {
// Compute the edit distance between the typo and this keyword.
// If this edit distance is not worse than the best edit
// distance we've seen so far, add it to the list of results.
unsigned ED = Typo.edit_distance(Keyword);
- if (!BestResults.empty() || !BestKeywords.empty()) {
- if (ED < BestEditDistance) {
- BestResults.clear();
- BestKeywords.clear();
- BestEditDistance = ED;
- } else if (ED > BestEditDistance) {
- // This result is worse than the best results we've seen so far;
- // ignore it.
- return;
- }
- } else
+ if (ED < BestEditDistance) {
+ BestResults.clear();
BestEditDistance = ED;
-
- BestKeywords.push_back(&Context.Idents.get(Keyword));
+ } else if (ED > BestEditDistance) {
+ // This result is worse than the best results we've seen so far;
+ // ignore it.
+ return;
+ }
+
+ BestResults[Keyword] = true;
+}
+
+/// \brief Perform name lookup for a possible result for typo correction.
+static void LookupPotentialTypoResult(Sema &SemaRef,
+ LookupResult &Res,
+ IdentifierInfo *Name,
+ Scope *S, CXXScopeSpec *SS,
+ DeclContext *MemberContext,
+ bool EnteringContext,
+ Sema::CorrectTypoContext CTC) {
+ Res.suppressDiagnostics();
+ Res.clear();
+ Res.setLookupName(Name);
+ if (MemberContext) {
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(MemberContext)) {
+ if (CTC == Sema::CTC_ObjCIvarLookup) {
+ if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable(Name)) {
+ Res.addDecl(Ivar);
+ Res.resolveKind();
+ return;
+ }
+ }
+
+ if (ObjCPropertyDecl *Prop = Class->FindPropertyDeclaration(Name)) {
+ Res.addDecl(Prop);
+ Res.resolveKind();
+ return;
+ }
+ }
+
+ SemaRef.LookupQualifiedName(Res, MemberContext);
+ return;
+ }
+
+ SemaRef.LookupParsedName(Res, S, SS, /*AllowBuiltinCreation=*/false,
+ EnteringContext);
+
+ // Fake ivar lookup; this should really be part of
+ // LookupParsedName.
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
+ if (Method->isInstanceMethod() && Method->getClassInterface() &&
+ (Res.empty() ||
+ (Res.isSingleResult() &&
+ Res.getFoundDecl()->isDefinedOutsideFunctionOrMethod()))) {
+ if (ObjCIvarDecl *IV
+ = Method->getClassInterface()->lookupInstanceVariable(Name)) {
+ Res.addDecl(IV);
+ Res.resolveKind();
+ }
+ }
+ }
}
/// \brief Try to "correct" a typo in the source code by finding
@@ -2790,7 +2971,7 @@ void TypoCorrectionConsumer::addKeywordResult(ASTContext &Context,
/// \param MemberContext if non-NULL, the context in which to look for
/// a member access expression.
///
-/// \param EnteringContext whether we're entering the context described by
+/// \param EnteringContext whether we're entering the context described by
/// the nested-name-specifier SS.
///
/// \param CTC The context in which typo correction occurs, which impacts the
@@ -2804,21 +2985,13 @@ void TypoCorrectionConsumer::addKeywordResult(ASTContext &Context,
/// may contain the results of name lookup for the correct name or it may be
/// empty.
DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
- DeclContext *MemberContext,
+ DeclContext *MemberContext,
bool EnteringContext,
CorrectTypoContext CTC,
const ObjCObjectPointerType *OPT) {
if (Diags.hasFatalErrorOccurred() || !getLangOptions().SpellChecking)
return DeclarationName();
- // Provide a stop gap for files that are just seriously broken. Trying
- // to correct all typos can turn into a HUGE performance penalty, causing
- // some files to take minutes to get rejected by the parser.
- // FIXME: Is this the right solution?
- if (TyposCorrected == 20)
- return DeclarationName();
- ++TyposCorrected;
-
// We only attempt to correct typos for identifiers.
IdentifierInfo *Typo = Res.getLookupName().getAsIdentifierInfo();
if (!Typo)
@@ -2833,17 +3006,18 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
// instantiation.
if (!ActiveTemplateInstantiations.empty())
return DeclarationName();
-
+
TypoCorrectionConsumer Consumer(Typo);
-
+
// Perform name lookup to find visible, similarly-named entities.
+ bool IsUnqualifiedLookup = false;
if (MemberContext) {
LookupVisibleDecls(MemberContext, Res.getLookupKind(), Consumer);
// Look in qualified interfaces.
if (OPT) {
- for (ObjCObjectPointerType::qual_iterator
- I = OPT->qual_begin(), E = OPT->qual_end();
+ for (ObjCObjectPointerType::qual_iterator
+ I = OPT->qual_begin(), E = OPT->qual_end();
I != E; ++I)
LookupVisibleDecls(*I, Res.getLookupKind(), Consumer);
}
@@ -2851,10 +3025,54 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
DeclContext *DC = computeDeclContext(*SS, EnteringContext);
if (!DC)
return DeclarationName();
-
+
+ // Provide a stop gap for files that are just seriously broken. Trying
+ // to correct all typos can turn into a HUGE performance penalty, causing
+ // some files to take minutes to get rejected by the parser.
+ if (TyposCorrected + UnqualifiedTyposCorrected.size() >= 20)
+ return DeclarationName();
+ ++TyposCorrected;
+
LookupVisibleDecls(DC, Res.getLookupKind(), Consumer);
} else {
- LookupVisibleDecls(S, Res.getLookupKind(), Consumer);
+ IsUnqualifiedLookup = true;
+ UnqualifiedTyposCorrectedMap::iterator Cached
+ = UnqualifiedTyposCorrected.find(Typo);
+ if (Cached == UnqualifiedTyposCorrected.end()) {
+ // Provide a stop gap for files that are just seriously broken. Trying
+ // to correct all typos can turn into a HUGE performance penalty, causing
+ // some files to take minutes to get rejected by the parser.
+ if (TyposCorrected + UnqualifiedTyposCorrected.size() >= 20)
+ return DeclarationName();
+
+ // For unqualified lookup, look through all of the names that we have
+ // seen in this translation unit.
+ for (IdentifierTable::iterator I = Context.Idents.begin(),
+ IEnd = Context.Idents.end();
+ I != IEnd; ++I)
+ Consumer.FoundName(I->getKey());
+
+ // Walk through identifiers in external identifier sources.
+ if (IdentifierInfoLookup *External
+ = Context.Idents.getExternalIdentifierLookup()) {
+ llvm::OwningPtr<IdentifierIterator> Iter(External->getIdentifiers());
+ do {
+ llvm::StringRef Name = Iter->Next();
+ if (Name.empty())
+ break;
+
+ Consumer.FoundName(Name);
+ } while (true);
+ }
+ } else {
+ // Use the cached value, unless it's a keyword. In the keyword case, we'll
+ // end up adding the keyword below.
+ if (Cached->second.first.empty())
+ return DeclarationName();
+
+ if (!Cached->second.second)
+ Consumer.FoundName(Cached->second.first);
+ }
}
// Add context-dependent keywords.
@@ -2868,39 +3086,46 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
WantExpressionKeywords = true;
WantCXXNamedCasts = true;
WantRemainingKeywords = true;
-
+
if (ObjCMethodDecl *Method = getCurMethodDecl())
if (Method->getClassInterface() &&
Method->getClassInterface()->getSuperClass())
Consumer.addKeywordResult(Context, "super");
-
+
break;
-
+
case CTC_NoKeywords:
break;
-
+
case CTC_Type:
WantTypeSpecifiers = true;
break;
-
+
case CTC_ObjCMessageReceiver:
Consumer.addKeywordResult(Context, "super");
// Fall through to handle message receivers like expressions.
-
+
case CTC_Expression:
if (getLangOptions().CPlusPlus)
WantTypeSpecifiers = true;
WantExpressionKeywords = true;
// Fall through to get C++ named casts.
-
+
case CTC_CXXCasts:
WantCXXNamedCasts = true;
break;
-
+
+ case CTC_ObjCPropertyLookup:
+ // FIXME: Add "isa"?
+ break;
+
case CTC_MemberLookup:
if (getLangOptions().CPlusPlus)
Consumer.addKeywordResult(Context, "template");
break;
+
+ case CTC_ObjCIvarLookup:
+ break;
}
if (WantTypeSpecifiers) {
@@ -2912,67 +3137,67 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
// storage-specifiers as well
"extern", "inline", "static", "typedef"
};
-
+
const unsigned NumCTypeSpecs = sizeof(CTypeSpecs) / sizeof(CTypeSpecs[0]);
for (unsigned I = 0; I != NumCTypeSpecs; ++I)
Consumer.addKeywordResult(Context, CTypeSpecs[I]);
-
+
if (getLangOptions().C99)
Consumer.addKeywordResult(Context, "restrict");
if (getLangOptions().Bool || getLangOptions().CPlusPlus)
Consumer.addKeywordResult(Context, "bool");
-
+
if (getLangOptions().CPlusPlus) {
Consumer.addKeywordResult(Context, "class");
Consumer.addKeywordResult(Context, "typename");
Consumer.addKeywordResult(Context, "wchar_t");
-
+
if (getLangOptions().CPlusPlus0x) {
Consumer.addKeywordResult(Context, "char16_t");
Consumer.addKeywordResult(Context, "char32_t");
Consumer.addKeywordResult(Context, "constexpr");
Consumer.addKeywordResult(Context, "decltype");
Consumer.addKeywordResult(Context, "thread_local");
- }
+ }
}
-
+
if (getLangOptions().GNUMode)
Consumer.addKeywordResult(Context, "typeof");
}
-
+
if (WantCXXNamedCasts && getLangOptions().CPlusPlus) {
Consumer.addKeywordResult(Context, "const_cast");
Consumer.addKeywordResult(Context, "dynamic_cast");
Consumer.addKeywordResult(Context, "reinterpret_cast");
Consumer.addKeywordResult(Context, "static_cast");
}
-
+
if (WantExpressionKeywords) {
Consumer.addKeywordResult(Context, "sizeof");
if (getLangOptions().Bool || getLangOptions().CPlusPlus) {
Consumer.addKeywordResult(Context, "false");
Consumer.addKeywordResult(Context, "true");
}
-
+
if (getLangOptions().CPlusPlus) {
- const char *CXXExprs[] = {
- "delete", "new", "operator", "throw", "typeid"
+ const char *CXXExprs[] = {
+ "delete", "new", "operator", "throw", "typeid"
};
const unsigned NumCXXExprs = sizeof(CXXExprs) / sizeof(CXXExprs[0]);
for (unsigned I = 0; I != NumCXXExprs; ++I)
Consumer.addKeywordResult(Context, CXXExprs[I]);
-
+
if (isa<CXXMethodDecl>(CurContext) &&
cast<CXXMethodDecl>(CurContext)->isInstance())
Consumer.addKeywordResult(Context, "this");
-
+
if (getLangOptions().CPlusPlus0x) {
Consumer.addKeywordResult(Context, "alignof");
Consumer.addKeywordResult(Context, "nullptr");
}
}
}
-
+
if (WantRemainingKeywords) {
if (getCurFunctionOrMethodDecl() || getCurBlock()) {
// Statements.
@@ -2981,18 +3206,18 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
const unsigned NumCStmts = sizeof(CStmts) / sizeof(CStmts[0]);
for (unsigned I = 0; I != NumCStmts; ++I)
Consumer.addKeywordResult(Context, CStmts[I]);
-
+
if (getLangOptions().CPlusPlus) {
Consumer.addKeywordResult(Context, "catch");
Consumer.addKeywordResult(Context, "try");
}
-
+
if (S && S->getBreakParent())
Consumer.addKeywordResult(Context, "break");
-
+
if (S && S->getContinueParent())
Consumer.addKeywordResult(Context, "continue");
-
+
if (!getCurFunction()->SwitchStack.empty()) {
Consumer.addKeywordResult(Context, "case");
Consumer.addKeywordResult(Context, "default");
@@ -3013,7 +3238,7 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
Consumer.addKeywordResult(Context, "virtual");
}
}
-
+
if (getLangOptions().CPlusPlus) {
Consumer.addKeywordResult(Context, "using");
@@ -3021,122 +3246,134 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
Consumer.addKeywordResult(Context, "static_assert");
}
}
-
+
// If we haven't found anything, we're done.
- if (Consumer.empty())
+ if (Consumer.empty()) {
+ // If this was an unqualified lookup, note that no correction was found.
+ if (IsUnqualifiedLookup)
+ (void)UnqualifiedTyposCorrected[Typo];
+
return DeclarationName();
+ }
- // Only allow a single, closest name in the result set (it's okay to
- // have overloads of that name, though).
- DeclarationName BestName;
- NamedDecl *BestIvarOrPropertyDecl = 0;
- bool FoundIvarOrPropertyDecl = false;
-
- // Check all of the declaration results to find the best name so far.
- for (TypoCorrectionConsumer::iterator I = Consumer.begin(),
+ // Make sure that the user typed at least 3 characters for each correction
+ // made. Otherwise, we don't even both looking at the results.
+
+ // We also suppress exact matches; those should be handled by a
+ // different mechanism (e.g., one that introduces qualification in
+ // C++).
+ unsigned ED = Consumer.getBestEditDistance();
+ if (ED > 0 && Typo->getName().size() / ED < 3) {
+ // If this was an unqualified lookup, note that no correction was found.
+ if (IsUnqualifiedLookup)
+ (void)UnqualifiedTyposCorrected[Typo];
+
+ return DeclarationName();
+ }
+
+ // Weed out any names that could not be found by name lookup.
+ bool LastLookupWasAccepted = false;
+ for (TypoCorrectionConsumer::iterator I = Consumer.begin(),
IEnd = Consumer.end();
- I != IEnd; ++I) {
- if (!BestName)
- BestName = (*I)->getDeclName();
- else if (BestName != (*I)->getDeclName())
- return DeclarationName();
+ I != IEnd; /* Increment in loop. */) {
+ // Keywords are always found.
+ if (I->second) {
+ ++I;
+ continue;
+ }
+
+ // Perform name lookup on this name.
+ IdentifierInfo *Name = &Context.Idents.get(I->getKey());
+ LookupPotentialTypoResult(*this, Res, Name, S, SS, MemberContext,
+ EnteringContext, CTC);
- // \brief Keep track of either an Objective-C ivar or a property, but not
- // both.
- if (isa<ObjCIvarDecl>(*I) || isa<ObjCPropertyDecl>(*I)) {
- if (FoundIvarOrPropertyDecl)
- BestIvarOrPropertyDecl = 0;
- else {
- BestIvarOrPropertyDecl = *I;
- FoundIvarOrPropertyDecl = true;
+ switch (Res.getResultKind()) {
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::Ambiguous:
+ // We didn't find this name in our scope, or didn't like what we found;
+ // ignore it.
+ Res.suppressDiagnostics();
+ {
+ TypoCorrectionConsumer::iterator Next = I;
+ ++Next;
+ Consumer.erase(I);
+ I = Next;
}
+ LastLookupWasAccepted = false;
+ break;
+
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ ++I;
+ LastLookupWasAccepted = true;
+ break;
+ }
+
+ if (Res.isAmbiguous()) {
+ // We don't deal with ambiguities.
+ Res.suppressDiagnostics();
+ Res.clear();
+ return DeclarationName();
}
}
- // Now check all of the keyword results to find the best name.
- switch (Consumer.keyword_size()) {
- case 0:
- // No keywords matched.
- break;
-
- case 1:
- // If we already have a name
- if (!BestName) {
- // We did not have anything previously,
- BestName = *Consumer.keyword_begin();
- } else if (BestName.getAsIdentifierInfo() == *Consumer.keyword_begin()) {
- // We have a declaration with the same name as a context-sensitive
- // keyword. The keyword takes precedence.
- BestIvarOrPropertyDecl = 0;
- FoundIvarOrPropertyDecl = false;
- Consumer.clear_decls();
- } else if (CTC == CTC_ObjCMessageReceiver &&
- (*Consumer.keyword_begin())->isStr("super")) {
- // In an Objective-C message send, give the "super" keyword a slight
- // edge over entities not in function or method scope.
- for (TypoCorrectionConsumer::iterator I = Consumer.begin(),
- IEnd = Consumer.end();
- I != IEnd; ++I) {
- if ((*I)->getDeclName() == BestName) {
- if ((*I)->getDeclContext()->isFunctionOrMethod())
- return DeclarationName();
- }
- }
-
- // Everything found was outside a function or method; the 'super'
- // keyword takes precedence.
- BestIvarOrPropertyDecl = 0;
- FoundIvarOrPropertyDecl = false;
- Consumer.clear_decls();
- BestName = *Consumer.keyword_begin();
- } else {
- // Name collision; we will not correct typos.
+ // If only a single name remains, return that result.
+ if (Consumer.size() == 1) {
+ IdentifierInfo *Name = &Context.Idents.get(Consumer.begin()->getKey());
+ if (Consumer.begin()->second) {
+ Res.suppressDiagnostics();
+ Res.clear();
+
+ // Don't correct to a keyword that's the same as the typo; the keyword
+ // wasn't actually in scope.
+ if (ED == 0) {
+ Res.setLookupName(Typo);
return DeclarationName();
}
- break;
-
- default:
- // Name collision; we will not correct typos.
- return DeclarationName();
- }
-
- // BestName is the closest viable name to what the user
- // typed. However, to make sure that we don't pick something that's
- // way off, make sure that the user typed at least 3 characters for
- // each correction.
- unsigned ED = Consumer.getBestEditDistance();
- if (ED == 0 || !BestName.getAsIdentifierInfo() ||
- (BestName.getAsIdentifierInfo()->getName().size() / ED) < 3)
- return DeclarationName();
- // Perform name lookup again with the name we chose, and declare
- // success if we found something that was not ambiguous.
- Res.clear();
- Res.setLookupName(BestName);
-
- // If we found an ivar or property, add that result; no further
- // lookup is required.
- if (BestIvarOrPropertyDecl)
- Res.addDecl(BestIvarOrPropertyDecl);
- // If we're looking into the context of a member, perform qualified
- // name lookup on the best name.
- else if (!Consumer.keyword_empty()) {
- // The best match was a keyword. Return it.
- return BestName;
- } else if (MemberContext)
- LookupQualifiedName(Res, MemberContext);
- // Perform lookup as if we had just parsed the best name.
- else
- LookupParsedName(Res, S, SS, /*AllowBuiltinCreation=*/false,
- EnteringContext);
+ } else if (!LastLookupWasAccepted) {
+ // Perform name lookup on this name.
+ LookupPotentialTypoResult(*this, Res, Name, S, SS, MemberContext,
+ EnteringContext, CTC);
+ }
- if (Res.isAmbiguous()) {
+ // Record the correction for unqualified lookup.
+ if (IsUnqualifiedLookup)
+ UnqualifiedTyposCorrected[Typo]
+ = std::make_pair(Name->getName(), Consumer.begin()->second);
+
+ return &Context.Idents.get(Consumer.begin()->getKey());
+ }
+ else if (Consumer.size() > 1 && CTC == CTC_ObjCMessageReceiver
+ && Consumer["super"]) {
+ // Prefix 'super' when we're completing in a message-receiver
+ // context.
Res.suppressDiagnostics();
- return DeclarationName();
+ Res.clear();
+
+ // Don't correct to a keyword that's the same as the typo; the keyword
+ // wasn't actually in scope.
+ if (ED == 0) {
+ Res.setLookupName(Typo);
+ return DeclarationName();
+ }
+
+ // Record the correction for unqualified lookup.
+ if (IsUnqualifiedLookup)
+ UnqualifiedTyposCorrected[Typo]
+ = std::make_pair("super", Consumer.begin()->second);
+
+ return &Context.Idents.get("super");
}
- if (Res.getResultKind() != LookupResult::NotFound)
- return BestName;
-
+ Res.suppressDiagnostics();
+ Res.setLookupName(Typo);
+ Res.clear();
+ // Record the correction for unqualified lookup.
+ if (IsUnqualifiedLookup)
+ (void)UnqualifiedTyposCorrected[Typo];
+
return DeclarationName();
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
index 7181d58..b086ca7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
@@ -31,7 +31,8 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
Selector SetterSel,
Decl *ClassCategory,
bool *isOverridingProperty,
- tok::ObjCKeywordKind MethodImplKind) {
+ tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC) {
unsigned Attributes = ODS.getPropertyAttributes();
bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) ||
// default is readwrite!
@@ -70,6 +71,9 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
GetterSel, SetterSel,
isAssign, isReadWrite,
Attributes, TSI, MethodImplKind);
+ if (lexicalDC)
+ Res->setLexicalDeclContext(lexicalDC);
+
// Validate the attributes on the @property.
CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
return Res;
@@ -89,16 +93,25 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
// Diagnose if this property is already in continuation class.
DeclContext *DC = cast<DeclContext>(CDecl);
IdentifierInfo *PropertyId = FD.D.getIdentifier();
-
- if (ObjCPropertyDecl *prevDecl =
- ObjCPropertyDecl::findPropertyDecl(DC, PropertyId)) {
- Diag(AtLoc, diag::err_duplicate_property);
- Diag(prevDecl->getLocation(), diag::note_property_declare);
- return 0;
- }
-
+ ObjCInterfaceDecl *CCPrimary = CDecl->getClassInterface();
+
+ if (CCPrimary)
+ // Check for duplicate declaration of this property in current and
+ // other class extensions.
+ for (const ObjCCategoryDecl *ClsExtDecl =
+ CCPrimary->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) {
+ if (ObjCPropertyDecl *prevDecl =
+ ObjCPropertyDecl::findPropertyDecl(ClsExtDecl, PropertyId)) {
+ Diag(AtLoc, diag::err_duplicate_property);
+ Diag(prevDecl->getLocation(), diag::note_property_declare);
+ return 0;
+ }
+ }
+
// Create a new ObjCPropertyDecl with the DeclContext being
// the class extension.
+ // FIXME. We should really be using CreatePropertyDecl for this.
ObjCPropertyDecl *PDecl =
ObjCPropertyDecl::Create(Context, DC, FD.D.getIdentifierLoc(),
PropertyId, AtLoc, T);
@@ -106,12 +119,13 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite);
-
+ // Set setter/getter selector name. Needed later.
+ PDecl->setGetterName(GetterSel);
+ PDecl->setSetterName(SetterSel);
DC->addDecl(PDecl);
// We need to look in the @interface to see if the @property was
// already declared.
- ObjCInterfaceDecl *CCPrimary = CDecl->getClassInterface();
if (!CCPrimary) {
Diag(CDecl->getLocation(), diag::err_continuation_class);
*isOverridingProperty = true;
@@ -137,9 +151,9 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
// A case of continuation class adding a new property in the class. This
// is not what it was meant for. However, gcc supports it and so should we.
// Make sure setter/getters are declared here.
- ProcessPropertyDecl(PDecl, CCPrimary);
+ ProcessPropertyDecl(PDecl, CCPrimary, /* redeclaredProperty = */ 0,
+ /* lexicalDC = */ CDecl);
return PDecl;
-
}
// The property 'PIDecl's readonly attribute will be over-ridden
@@ -172,7 +186,8 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
PIDecl->getGetterName(),
PIDecl->getSetterName(),
CCPrimary, isOverridingProperty,
- MethodImplKind);
+ MethodImplKind,
+ /* lexicalDC = */ CDecl);
PIDecl = cast<ObjCPropertyDecl>(ProtocolPtrTy);
}
PIDecl->makeitReadWriteAttribute();
@@ -182,13 +197,22 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
PIDecl->setSetterName(SetterSel);
} else {
- Diag(AtLoc, diag::err_use_continuation_class)
+ // Tailor the diagnostics for the common case where a readwrite
+ // property is declared both in the @interface and the continuation.
+ // This is a common error where the user often intended the original
+ // declaration to be readonly.
+ unsigned diag =
+ (Attributes & ObjCDeclSpec::DQ_PR_readwrite) &&
+ (PIkind & ObjCPropertyDecl::OBJC_PR_readwrite)
+ ? diag::err_use_continuation_class_redeclaration_readwrite
+ : diag::err_use_continuation_class;
+ Diag(AtLoc, diag)
<< CCPrimary->getDeclName();
Diag(PIDecl->getLocation(), diag::note_property_declare);
}
*isOverridingProperty = true;
// Make sure setter decl is synthesized, and added to primary class's list.
- ProcessPropertyDecl(PIDecl, CCPrimary);
+ ProcessPropertyDecl(PIDecl, CCPrimary, PDecl, CDecl);
return 0;
}
@@ -275,6 +299,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic);
+ else if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_atomic);
PDecl->setPropertyAttributesAsWritten(PDecl->getPropertyAttributes());
@@ -297,7 +323,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
bool Synthesize,
Decl *ClassCatImpDecl,
IdentifierInfo *PropertyId,
- IdentifierInfo *PropertyIvar) {
+ IdentifierInfo *PropertyIvar,
+ SourceLocation PropertyIvarLoc) {
ObjCContainerDecl *ClassImpDecl =
cast_or_null<ObjCContainerDecl>(ClassCatImpDecl);
// Make sure we have a context for the property implementation declaration.
@@ -324,6 +351,16 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Diag(PropertyLoc, diag::error_bad_property_decl) << IDecl->getDeclName();
return 0;
}
+ unsigned PIkind = property->getPropertyAttributesAsWritten();
+ if ((PIkind & (ObjCPropertyDecl::OBJC_PR_atomic |
+ ObjCPropertyDecl::OBJC_PR_nonatomic) ) == 0) {
+ if (AtLoc.isValid())
+ Diag(AtLoc, diag::warn_implicit_atomic_property);
+ else
+ Diag(IC->getLocation(), diag::warn_auto_implicit_atomic_property);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+
if (const ObjCCategoryDecl *CD =
dyn_cast<ObjCCategoryDecl>(property->getDeclContext())) {
if (!CD->IsClassExtension()) {
@@ -373,7 +410,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (!Ivar) {
Ivar = ObjCIvarDecl::Create(Context, ClassImpDecl, PropertyLoc,
PropertyIvar, PropType, /*Dinfo=*/0,
- ObjCIvarDecl::Protected,
+ ObjCIvarDecl::Private,
(Expr *)0, true);
ClassImpDecl->addDecl(Ivar);
IDecl->makeDeclVisibleInContext(Ivar, false);
@@ -403,8 +440,13 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Context.canAssignObjCInterfaces(
PropType->getAs<ObjCObjectPointerType>(),
IvarType->getAs<ObjCObjectPointerType>());
- else
- compat = (CheckAssignmentConstraints(PropType, IvarType) == Compatible);
+ else {
+ SourceLocation Loc = PropertyIvarLoc;
+ if (Loc.isInvalid())
+ Loc = PropertyLoc;
+ compat = (CheckAssignmentConstraints(Loc, PropType, IvarType)
+ == Compatible);
+ }
if (!compat) {
Diag(PropertyLoc, diag::error_property_ivar_type)
<< property->getDeclName() << PropType
@@ -452,17 +494,18 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
(Synthesize ?
ObjCPropertyImplDecl::Synthesize
: ObjCPropertyImplDecl::Dynamic),
- Ivar);
+ Ivar, PropertyIvarLoc);
if (ObjCMethodDecl *getterMethod = property->getGetterMethodDecl()) {
getterMethod->createImplicitParams(Context, IDecl);
- if (getLangOptions().CPlusPlus && Synthesize) {
+ if (getLangOptions().CPlusPlus && Synthesize &&
+ Ivar->getType()->isRecordType()) {
// For Objective-C++, need to synthesize the AST for the IVAR object to be
// returned by the getter as it must conform to C++'s copy-return rules.
// FIXME. Eventually we want to do this for Objective-C as well.
ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl();
DeclRefExpr *SelfExpr =
- new (Context) DeclRefExpr(SelfDecl,SelfDecl->getType(),
- SourceLocation());
+ new (Context) DeclRefExpr(SelfDecl, SelfDecl->getType(),
+ VK_RValue, SourceLocation());
Expr *IvarRefExpr =
new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc,
SelfExpr, true, true);
@@ -476,27 +519,28 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (!Res.isInvalid()) {
Expr *ResExpr = Res.takeAs<Expr>();
if (ResExpr)
- ResExpr = MaybeCreateCXXExprWithTemporaries(ResExpr);
+ ResExpr = MaybeCreateExprWithCleanups(ResExpr);
PIDecl->setGetterCXXConstructor(ResExpr);
}
}
}
if (ObjCMethodDecl *setterMethod = property->getSetterMethodDecl()) {
setterMethod->createImplicitParams(Context, IDecl);
- if (getLangOptions().CPlusPlus && Synthesize) {
+ if (getLangOptions().CPlusPlus && Synthesize
+ && Ivar->getType()->isRecordType()) {
// FIXME. Eventually we want to do this for Objective-C as well.
ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl();
DeclRefExpr *SelfExpr =
- new (Context) DeclRefExpr(SelfDecl,SelfDecl->getType(),
- SourceLocation());
+ new (Context) DeclRefExpr(SelfDecl, SelfDecl->getType(),
+ VK_RValue, SourceLocation());
Expr *lhs =
new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc,
SelfExpr, true, true);
ObjCMethodDecl::param_iterator P = setterMethod->param_begin();
ParmVarDecl *Param = (*P);
- Expr *rhs = new (Context) DeclRefExpr(Param,Param->getType(),
- SourceLocation());
- ExprResult Res = BuildBinOp(S, SourceLocation(),
+ Expr *rhs = new (Context) DeclRefExpr(Param, Param->getType(),
+ VK_LValue, SourceLocation());
+ ExprResult Res = BuildBinOp(S, lhs->getLocEnd(),
BO_Assign, lhs, rhs);
PIDecl->setSetterCXXAssignment(Res.takeAs<Expr>());
}
@@ -519,11 +563,12 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
return 0;
}
IC->addPropertyImplementation(PIDecl);
- if (getLangOptions().ObjCNonFragileABI2) {
+ if (getLangOptions().ObjCDefaultSynthProperties &&
+ getLangOptions().ObjCNonFragileABI2) {
// Diagnose if an ivar was lazily synthesdized due to a previous
// use and if 1) property is @dynamic or 2) property is synthesized
// but it requires an ivar of different name.
- ObjCInterfaceDecl *ClassDeclared;
+ ObjCInterfaceDecl *ClassDeclared=0;
ObjCIvarDecl *Ivar = 0;
if (!Synthesize)
Ivar = IDecl->lookupInstanceVariable(PropertyId, ClassDeclared);
@@ -622,7 +667,7 @@ bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
GetterMethod->getResultType() != property->getType()) {
AssignConvertType result = Incompatible;
if (property->getType()->isObjCObjectPointerType())
- result = CheckAssignmentConstraints(GetterMethod->getResultType(),
+ result = CheckAssignmentConstraints(Loc, GetterMethod->getResultType(),
property->getType());
if (result != Compatible) {
Diag(Loc, diag::warn_accessor_property_type_mismatch)
@@ -966,10 +1011,16 @@ void Sema::DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
continue;
}
- ActOnPropertyImplDecl(S, IMPDecl->getLocation(), IMPDecl->getLocation(),
- true, IMPDecl,
- Prop->getIdentifier(), Prop->getIdentifier());
- }
+
+ // We use invalid SourceLocations for the synthesized ivars since they
+ // aren't really synthesized at a particular location; they just exist.
+ // Saying that they are located at the @implementation isn't really going
+ // to help users.
+ ActOnPropertyImplDecl(S, SourceLocation(), SourceLocation(),
+ true,IMPDecl,
+ Prop->getIdentifier(), Prop->getIdentifier(),
+ SourceLocation());
+ }
}
void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
@@ -1030,7 +1081,32 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
E = IDecl->prop_end();
I != E; ++I) {
ObjCPropertyDecl *Property = (*I);
+ ObjCMethodDecl *GetterMethod = 0;
+ ObjCMethodDecl *SetterMethod = 0;
+ bool LookedUpGetterSetter = false;
+
unsigned Attributes = Property->getPropertyAttributes();
+ unsigned AttributesAsWrittern = Property->getPropertyAttributesAsWritten();
+
+ if (!(AttributesAsWrittern & ObjCPropertyDecl::OBJC_PR_atomic) &&
+ !(AttributesAsWrittern & ObjCPropertyDecl::OBJC_PR_nonatomic)) {
+ GetterMethod = IMPDecl->getInstanceMethod(Property->getGetterName());
+ SetterMethod = IMPDecl->getInstanceMethod(Property->getSetterName());
+ LookedUpGetterSetter = true;
+ if (GetterMethod) {
+ Diag(GetterMethod->getLocation(),
+ diag::warn_default_atomic_custom_getter_setter)
+ << Property->getIdentifier() << 0;
+ Diag(Property->getLocation(), diag::note_property_declare);
+ }
+ if (SetterMethod) {
+ Diag(SetterMethod->getLocation(),
+ diag::warn_default_atomic_custom_getter_setter)
+ << Property->getIdentifier() << 1;
+ Diag(Property->getLocation(), diag::note_property_declare);
+ }
+ }
+
// We only care about readwrite atomic property.
if ((Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) ||
!(Attributes & ObjCPropertyDecl::OBJC_PR_readwrite))
@@ -1039,10 +1115,11 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
= IMPDecl->FindPropertyImplDecl(Property->getIdentifier())) {
if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
continue;
- ObjCMethodDecl *GetterMethod =
- IMPDecl->getInstanceMethod(Property->getGetterName());
- ObjCMethodDecl *SetterMethod =
- IMPDecl->getInstanceMethod(Property->getSetterName());
+ if (!LookedUpGetterSetter) {
+ GetterMethod = IMPDecl->getInstanceMethod(Property->getGetterName());
+ SetterMethod = IMPDecl->getInstanceMethod(Property->getSetterName());
+ LookedUpGetterSetter = true;
+ }
if ((GetterMethod && !SetterMethod) || (!GetterMethod && SetterMethod)) {
SourceLocation MethodLoc =
(GetterMethod ? GetterMethod->getLocation()
@@ -1055,13 +1132,27 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
}
}
+/// AddPropertyAttrs - Propagates attributes from a property to the
+/// implicitly-declared getter or setter for that property.
+static void AddPropertyAttrs(Sema &S, ObjCMethodDecl *PropertyMethod,
+ ObjCPropertyDecl *Property) {
+ // Should we just clone all attributes over?
+ if (DeprecatedAttr *A = Property->getAttr<DeprecatedAttr>())
+ PropertyMethod->addAttr(A->clone(S.Context));
+ if (UnavailableAttr *A = Property->getAttr<UnavailableAttr>())
+ PropertyMethod->addAttr(A->clone(S.Context));
+}
+
/// ProcessPropertyDecl - Make sure that any user-defined setter/getter methods
/// have the property type and issue diagnostics if they don't.
/// Also synthesize a getter/setter method if none exist (and update the
/// appropriate lookup tables. FIXME: Should reconsider if adding synthesized
/// methods is the "right" thing to do.
void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
- ObjCContainerDecl *CD) {
+ ObjCContainerDecl *CD,
+ ObjCPropertyDecl *redeclaredProperty,
+ ObjCContainerDecl *lexicalDC) {
+
ObjCMethodDecl *GetterMethod, *SetterMethod;
GetterMethod = CD->getInstanceMethod(property->getGetterName());
@@ -1096,8 +1187,12 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
// No instance method of same name as property getter name was found.
// Declare a getter method and add it to the list of methods
// for this class.
- GetterMethod = ObjCMethodDecl::Create(Context, property->getLocation(),
- property->getLocation(), property->getGetterName(),
+ SourceLocation Loc = redeclaredProperty ?
+ redeclaredProperty->getLocation() :
+ property->getLocation();
+
+ GetterMethod = ObjCMethodDecl::Create(Context, Loc, Loc,
+ property->getGetterName(),
property->getType(), 0, CD, true, false, true,
false,
(property->getPropertyImplementation() ==
@@ -1105,11 +1200,13 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCMethodDecl::Optional :
ObjCMethodDecl::Required);
CD->addDecl(GetterMethod);
+
+ AddPropertyAttrs(*this, GetterMethod, property);
+
// FIXME: Eventually this shouldn't be needed, as the lexical context
// and the real context should be the same.
- if (DeclContext *lexicalDC = property->getLexicalDeclContext())
+ if (lexicalDC)
GetterMethod->setLexicalDeclContext(lexicalDC);
-
} else
// A user declared getter will be synthesize when @synthesize of
// the property with the same name is seen in the @implementation
@@ -1123,19 +1220,22 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
// No instance method of same name as property setter name was found.
// Declare a setter method and add it to the list of methods
// for this class.
- SetterMethod = ObjCMethodDecl::Create(Context, property->getLocation(),
- property->getLocation(),
- property->getSetterName(),
- Context.VoidTy, 0, CD, true, false, true,
- false,
+ SourceLocation Loc = redeclaredProperty ?
+ redeclaredProperty->getLocation() :
+ property->getLocation();
+
+ SetterMethod =
+ ObjCMethodDecl::Create(Context, Loc, Loc,
+ property->getSetterName(), Context.VoidTy, 0,
+ CD, true, false, true, false,
(property->getPropertyImplementation() ==
ObjCPropertyDecl::Optional) ?
- ObjCMethodDecl::Optional :
- ObjCMethodDecl::Required);
+ ObjCMethodDecl::Optional :
+ ObjCMethodDecl::Required);
+
// Invent the arguments for the setter. We don't bother making a
// nice name for the argument.
- ParmVarDecl *Argument = ParmVarDecl::Create(Context, SetterMethod,
- property->getLocation(),
+ ParmVarDecl *Argument = ParmVarDecl::Create(Context, SetterMethod, Loc,
property->getIdentifier(),
property->getType(),
/*TInfo=*/0,
@@ -1143,10 +1243,13 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
SC_None,
0);
SetterMethod->setMethodParams(Context, &Argument, 1, 1);
+
+ AddPropertyAttrs(*this, SetterMethod, property);
+
CD->addDecl(SetterMethod);
// FIXME: Eventually this shouldn't be needed, as the lexical context
// and the real context should be the same.
- if (DeclContext *lexicalDC = property->getLexicalDeclContext())
+ if (lexicalDC)
SetterMethod->setLexicalDeclContext(lexicalDC);
} else
// A user declared setter will be synthesize when @synthesize of
@@ -1253,6 +1356,7 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
}
if (!(Attributes & ObjCDeclSpec::DQ_PR_copy)
+ &&!(Attributes & ObjCDeclSpec::DQ_PR_readonly)
&& getLangOptions().getGCMode() == LangOptions::GCOnly
&& PropertyTy->isBlockPointerType())
Diag(Loc, diag::warn_objc_property_copy_missing_on_block);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
index 11b4bb3..6fb789c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
@@ -23,8 +23,10 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/STLExtras.h"
#include <algorithm>
@@ -32,9 +34,20 @@
namespace clang {
using namespace sema;
+/// A convenience routine for creating a decayed reference to a
+/// function.
+static Expr *
+CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn,
+ SourceLocation Loc = SourceLocation()) {
+ Expr *E = new (S.Context) DeclRefExpr(Fn, Fn->getType(), VK_LValue, Loc);
+ S.DefaultFunctionArrayConversion(E);
+ return E;
+}
+
static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
bool InOverloadResolution,
- StandardConversionSequence &SCS);
+ StandardConversionSequence &SCS,
+ bool CStyle);
static OverloadingResult
IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
UserDefinedConversionSequence& User,
@@ -158,7 +171,10 @@ void StandardConversionSequence::setAsIdentityConversion() {
DeprecatedStringLiteralToCharPtr = false;
ReferenceBinding = false;
DirectBinding = false;
- RRefBinding = false;
+ IsLvalueReference = true;
+ BindsToFunctionLvalue = false;
+ BindsToRvalue = false;
+ BindsImplicitObjectArgumentWithoutRefQualifier = false;
CopyConstructor = 0;
}
@@ -189,6 +205,7 @@ bool StandardConversionSequence::isPointerConversionToBool() const {
(getFromType()->isPointerType() ||
getFromType()->isObjCObjectPointerType() ||
getFromType()->isBlockPointerType() ||
+ getFromType()->isNullPtrType() ||
First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer))
return true;
@@ -323,7 +340,7 @@ namespace {
TemplateArgument SecondArg;
};
}
-
+
/// \brief Convert from Sema's representation of template deduction information
/// to the form used in overload-candidate information.
OverloadCandidate::DeductionFailureInfo
@@ -339,12 +356,12 @@ static MakeDeductionFailureInfo(ASTContext &Context,
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
break;
-
+
case Sema::TDK_Incomplete:
case Sema::TDK_InvalidExplicitArguments:
Result.Data = Info.Param.getOpaqueValue();
break;
-
+
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified: {
// FIXME: Should allocate from normal heap so that we can free this later.
@@ -355,16 +372,16 @@ static MakeDeductionFailureInfo(ASTContext &Context,
Result.Data = Saved;
break;
}
-
+
case Sema::TDK_SubstitutionFailure:
Result.Data = Info.take();
break;
-
+
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_FailedOverloadResolution:
- break;
+ break;
}
-
+
return Result;
}
@@ -377,7 +394,7 @@ void OverloadCandidate::DeductionFailureInfo::Destroy() {
case Sema::TDK_TooFewArguments:
case Sema::TDK_InvalidExplicitArguments:
break;
-
+
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
// FIXME: Destroy the data?
@@ -388,15 +405,15 @@ void OverloadCandidate::DeductionFailureInfo::Destroy() {
// FIXME: Destroy the template arugment list?
Data = 0;
break;
-
+
// Unhandled
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_FailedOverloadResolution:
break;
}
}
-
-TemplateParameter
+
+TemplateParameter
OverloadCandidate::DeductionFailureInfo::getTemplateParameter() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
@@ -405,24 +422,24 @@ OverloadCandidate::DeductionFailureInfo::getTemplateParameter() {
case Sema::TDK_TooFewArguments:
case Sema::TDK_SubstitutionFailure:
return TemplateParameter();
-
+
case Sema::TDK_Incomplete:
case Sema::TDK_InvalidExplicitArguments:
- return TemplateParameter::getFromOpaqueValue(Data);
+ return TemplateParameter::getFromOpaqueValue(Data);
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
return static_cast<DFIParamWithArguments*>(Data)->Param;
-
+
// Unhandled
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_FailedOverloadResolution:
break;
}
-
+
return TemplateParameter();
}
-
+
TemplateArgumentList *
OverloadCandidate::DeductionFailureInfo::getTemplateArgumentList() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
@@ -438,7 +455,7 @@ OverloadCandidate::DeductionFailureInfo::getTemplateArgumentList() {
case Sema::TDK_SubstitutionFailure:
return static_cast<TemplateArgumentList*>(Data);
-
+
// Unhandled
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_FailedOverloadResolution:
@@ -461,16 +478,16 @@ const TemplateArgument *OverloadCandidate::DeductionFailureInfo::getFirstArg() {
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
- return &static_cast<DFIParamWithArguments*>(Data)->FirstArg;
+ return &static_cast<DFIParamWithArguments*>(Data)->FirstArg;
// Unhandled
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_FailedOverloadResolution:
break;
}
-
+
return 0;
-}
+}
const TemplateArgument *
OverloadCandidate::DeductionFailureInfo::getSecondArg() {
@@ -493,7 +510,7 @@ OverloadCandidate::DeductionFailureInfo::getSecondArg() {
case Sema::TDK_FailedOverloadResolution:
break;
}
-
+
return 0;
}
@@ -501,7 +518,7 @@ void OverloadCandidateSet::clear() {
inherited::clear();
Functions.clear();
}
-
+
// IsOverload - Determine whether the given New declaration is an
// overload of the declarations in Old. This routine returns false if
// New and Old cannot be overloaded, e.g., if New has the same
@@ -582,10 +599,12 @@ Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
Match = *I;
return Ovl_Match;
}
- } else if (isa<UsingDecl>(OldD) || isa<TagDecl>(OldD)) {
+ } else if (isa<UsingDecl>(OldD)) {
// We can overload with these, which can show up when doing
// redeclaration checks for UsingDecls.
assert(Old.getLookupKind() == LookupUsingDeclName);
+ } else if (isa<TagDecl>(OldD)) {
+ // We can always overload with tags by hiding them.
} else if (isa<UnresolvedUsingValueDecl>(OldD)) {
// Optimistically assume that an unresolved using decl will
// overload; if it doesn't, we'll have to diagnose during
@@ -632,8 +651,8 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
isa<FunctionNoProtoType>(NewQType.getTypePtr()))
return false;
- FunctionProtoType* OldType = cast<FunctionProtoType>(OldQType);
- FunctionProtoType* NewType = cast<FunctionProtoType>(NewQType);
+ const FunctionProtoType* OldType = cast<FunctionProtoType>(OldQType);
+ const FunctionProtoType* NewType = cast<FunctionProtoType>(NewQType);
// The signature of a function includes the types of its
// parameters (C++ 1.3.10), which includes the presence or absence
@@ -664,7 +683,7 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
return true;
// If the function is a class member, its signature includes the
- // cv-qualifiers (if any) on the function itself.
+ // cv-qualifiers (if any) and ref-qualifier (if any) on the function itself.
//
// As part of this, also check whether one of the member functions
// is static, in which case they are not overloads (C++
@@ -675,9 +694,26 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
CXXMethodDecl* NewMethod = dyn_cast<CXXMethodDecl>(New);
if (OldMethod && NewMethod &&
!OldMethod->isStatic() && !NewMethod->isStatic() &&
- OldMethod->getTypeQualifiers() != NewMethod->getTypeQualifiers())
+ (OldMethod->getTypeQualifiers() != NewMethod->getTypeQualifiers() ||
+ OldMethod->getRefQualifier() != NewMethod->getRefQualifier())) {
+ if (!UseUsingDeclRules &&
+ OldMethod->getRefQualifier() != NewMethod->getRefQualifier() &&
+ (OldMethod->getRefQualifier() == RQ_None ||
+ NewMethod->getRefQualifier() == RQ_None)) {
+ // C++0x [over.load]p2:
+ // - Member function declarations with the same name and the same
+ // parameter-type-list as well as member function template
+ // declarations with the same name, the same parameter-type-list, and
+ // the same template parameter lists cannot be overloaded if any of
+ // them, but not all, have a ref-qualifier (8.3.5).
+ Diag(NewMethod->getLocation(), diag::err_ref_qualifier_overload)
+ << NewMethod->getRefQualifier() << OldMethod->getRefQualifier();
+ Diag(OldMethod->getLocation(), diag::note_previous_declaration);
+ }
+
return true;
-
+ }
+
// The signatures match; this is not an overload.
return false;
}
@@ -708,11 +744,12 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
static ImplicitConversionSequence
TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
- bool AllowExplicit,
- bool InOverloadResolution) {
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle) {
ImplicitConversionSequence ICS;
if (IsStandardConversion(S, From, ToType, InOverloadResolution,
- ICS.Standard)) {
+ ICS.Standard, CStyle)) {
ICS.setStandard();
return ICS;
}
@@ -737,20 +774,20 @@ TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
ICS.Standard.setAsIdentityConversion();
ICS.Standard.setFromType(FromType);
ICS.Standard.setAllToTypes(ToType);
-
+
// We don't actually check at this point whether there is a valid
// copy/move constructor, since overloading just assumes that it
// exists. When we actually perform initialization, we'll find the
// appropriate constructor to copy the returned object, if needed.
ICS.Standard.CopyConstructor = 0;
-
+
// Determine whether this is considered a derived-to-base conversion.
if (!S.Context.hasSameUnqualifiedType(FromType, ToType))
ICS.Standard.Second = ICK_Derived_To_Base;
-
+
return ICS;
}
-
+
if (SuppressUserConversions) {
// We're not in the case above, so there is no conversion that
// we can perform.
@@ -823,12 +860,14 @@ bool Sema::TryImplicitConversion(InitializationSequence &Sequence,
Expr *Initializer,
bool SuppressUserConversions,
bool AllowExplicitConversions,
- bool InOverloadResolution) {
+ bool InOverloadResolution,
+ bool CStyle) {
ImplicitConversionSequence ICS
= clang::TryImplicitConversion(*this, Initializer, Entity.getType(),
SuppressUserConversions,
- AllowExplicitConversions,
- InOverloadResolution);
+ AllowExplicitConversions,
+ InOverloadResolution,
+ CStyle);
if (ICS.isBad()) return true;
// Perform the actual conversion.
@@ -856,34 +895,66 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
ICS = clang::TryImplicitConversion(*this, From, ToType,
/*SuppressUserConversions=*/false,
AllowExplicit,
- /*InOverloadResolution=*/false);
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false);
return PerformImplicitConversion(From, ToType, ICS, Action);
}
-
-/// \brief Determine whether the conversion from FromType to ToType is a valid
+
+/// \brief Determine whether the conversion from FromType to ToType is a valid
/// conversion that strips "noreturn" off the nested function type.
-static bool IsNoReturnConversion(ASTContext &Context, QualType FromType,
+static bool IsNoReturnConversion(ASTContext &Context, QualType FromType,
QualType ToType, QualType &ResultTy) {
if (Context.hasSameUnqualifiedType(FromType, ToType))
return false;
-
- // Strip the noreturn off the type we're converting from; noreturn can
- // safely be removed.
- FromType = Context.getNoReturnType(FromType, false);
- if (!Context.hasSameUnqualifiedType(FromType, ToType))
- return false;
- ResultTy = FromType;
+ // Permit the conversion F(t __attribute__((noreturn))) -> F(t)
+ // where F adds one of the following at most once:
+ // - a pointer
+ // - a member pointer
+ // - a block pointer
+ CanQualType CanTo = Context.getCanonicalType(ToType);
+ CanQualType CanFrom = Context.getCanonicalType(FromType);
+ Type::TypeClass TyClass = CanTo->getTypeClass();
+ if (TyClass != CanFrom->getTypeClass()) return false;
+ if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto) {
+ if (TyClass == Type::Pointer) {
+ CanTo = CanTo.getAs<PointerType>()->getPointeeType();
+ CanFrom = CanFrom.getAs<PointerType>()->getPointeeType();
+ } else if (TyClass == Type::BlockPointer) {
+ CanTo = CanTo.getAs<BlockPointerType>()->getPointeeType();
+ CanFrom = CanFrom.getAs<BlockPointerType>()->getPointeeType();
+ } else if (TyClass == Type::MemberPointer) {
+ CanTo = CanTo.getAs<MemberPointerType>()->getPointeeType();
+ CanFrom = CanFrom.getAs<MemberPointerType>()->getPointeeType();
+ } else {
+ return false;
+ }
+
+ TyClass = CanTo->getTypeClass();
+ if (TyClass != CanFrom->getTypeClass()) return false;
+ if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto)
+ return false;
+ }
+
+ const FunctionType *FromFn = cast<FunctionType>(CanFrom);
+ FunctionType::ExtInfo EInfo = FromFn->getExtInfo();
+ if (!EInfo.getNoReturn()) return false;
+
+ FromFn = Context.adjustFunctionType(FromFn, EInfo.withNoReturn(false));
+ assert(QualType(FromFn, 0).isCanonical());
+ if (QualType(FromFn, 0) != CanTo) return false;
+
+ ResultTy = ToType;
return true;
}
-
+
/// \brief Determine whether the conversion from FromType to ToType is a valid
/// vector conversion.
///
/// \param ICK Will be set to the vector conversion kind, if this is a vector
/// conversion.
-static bool IsVectorConversion(ASTContext &Context, QualType FromType,
- QualType ToType, ImplicitConversionKind &ICK) {
+static bool IsVectorConversion(ASTContext &Context, QualType FromType,
+ QualType ToType, ImplicitConversionKind &ICK) {
// We need at least one of these types to be a vector type to have a vector
// conversion.
if (!ToType->isVectorType() && !FromType->isVectorType())
@@ -899,7 +970,7 @@ static bool IsVectorConversion(ASTContext &Context, QualType FromType,
// identity conversion.
if (FromType->isExtVectorType())
return false;
-
+
// Vector splat from any arithmetic type to a vector.
if (FromType->isArithmeticType()) {
ICK = ICK_Vector_Splat;
@@ -922,7 +993,7 @@ static bool IsVectorConversion(ASTContext &Context, QualType FromType,
return false;
}
-
+
/// IsStandardConversion - Determines whether there is a standard
/// conversion sequence (C++ [conv], C++ [over.ics.scs]) from the
/// expression From to the type ToType. Standard conversion sequences
@@ -933,9 +1004,10 @@ static bool IsVectorConversion(ASTContext &Context, QualType FromType,
/// routine will return false and the value of SCS is unspecified.
static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
bool InOverloadResolution,
- StandardConversionSequence &SCS) {
+ StandardConversionSequence &SCS,
+ bool CStyle) {
QualType FromType = From->getType();
-
+
// Standard conversions (C++ [conv])
SCS.setAsIdentityConversion();
SCS.DeprecatedStringLiteralToCharPtr = false;
@@ -959,38 +1031,53 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
if (FromType == S.Context.OverloadTy) {
DeclAccessPair AccessPair;
if (FunctionDecl *Fn
- = S.ResolveAddressOfOverloadedFunction(From, ToType, false,
+ = S.ResolveAddressOfOverloadedFunction(From, ToType, false,
AccessPair)) {
// We were able to resolve the address of the overloaded function,
// so we can convert to the type of that function.
FromType = Fn->getType();
+
+ // we can sometimes resolve &foo<int> regardless of ToType, so check
+ // if the type matches (identity) or we are converting to bool
+ if (!S.Context.hasSameUnqualifiedType(
+ S.ExtractUnqualifiedFunctionType(ToType), FromType)) {
+ QualType resultTy;
+ // if the function type matches except for [[noreturn]], it's ok
+ if (!IsNoReturnConversion(S.Context, FromType,
+ S.ExtractUnqualifiedFunctionType(ToType), resultTy))
+ // otherwise, only a boolean conversion is standard
+ if (!ToType->isBooleanType())
+ return false;
+
+ }
+
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
if (!Method->isStatic()) {
- Type *ClassType
+ const Type *ClassType
= S.Context.getTypeDeclType(Method->getParent()).getTypePtr();
FromType = S.Context.getMemberPointerType(FromType, ClassType);
}
}
-
+
// If the "from" expression takes the address of the overloaded
// function, update the type of the resulting expression accordingly.
if (FromType->getAs<FunctionType>())
if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(From->IgnoreParens()))
if (UnOp->getOpcode() == UO_AddrOf)
FromType = S.Context.getPointerType(FromType);
-
+
// Check that we've computed the proper type after overload resolution.
assert(S.Context.hasSameType(FromType,
S.FixOverloadedFunctionReference(From, AccessPair, Fn)->getType()));
} else {
return false;
}
- }
+ }
// Lvalue-to-rvalue conversion (C++ 4.1):
// An lvalue (3.10) of a non-function, non-array type T can be
// converted to an rvalue.
- Expr::isLvalueResult argIsLvalue = From->isLvalue(S.Context);
- if (argIsLvalue == Expr::LV_Valid &&
+ bool argIsLValue = From->isLValue();
+ if (argIsLValue &&
!FromType->isFunctionType() && !FromType->isArrayType() &&
S.Context.getCanonicalType(FromType) != S.Context.OverloadTy) {
SCS.First = ICK_Lvalue_To_Rvalue;
@@ -1022,7 +1109,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
SCS.setAllToTypes(FromType);
return true;
}
- } else if (FromType->isFunctionType() && argIsLvalue == Expr::LV_Valid) {
+ } else if (FromType->isFunctionType() && argIsLValue) {
// Function-to-pointer conversion (C++ 4.3).
SCS.First = ICK_Function_To_Pointer;
@@ -1060,17 +1147,26 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// Complex promotion (Clang extension)
SCS.Second = ICK_Complex_Promotion;
FromType = ToType.getUnqualifiedType();
- } else if (FromType->isIntegralOrEnumerationType() &&
+ } else if (ToType->isBooleanType() &&
+ (FromType->isArithmeticType() ||
+ FromType->isAnyPointerType() ||
+ FromType->isBlockPointerType() ||
+ FromType->isMemberPointerType() ||
+ FromType->isNullPtrType())) {
+ // Boolean conversions (C++ 4.12).
+ SCS.Second = ICK_Boolean_Conversion;
+ FromType = S.Context.BoolTy;
+ } else if (FromType->isIntegralOrUnscopedEnumerationType() &&
ToType->isIntegralType(S.Context)) {
// Integral conversions (C++ 4.7).
SCS.Second = ICK_Integral_Conversion;
FromType = ToType.getUnqualifiedType();
- } else if (FromType->isComplexType() && ToType->isComplexType()) {
+ } else if (FromType->isAnyComplexType() && ToType->isComplexType()) {
// Complex conversions (C99 6.3.1.6)
SCS.Second = ICK_Complex_Conversion;
FromType = ToType.getUnqualifiedType();
- } else if ((FromType->isComplexType() && ToType->isArithmeticType()) ||
- (ToType->isComplexType() && FromType->isArithmeticType())) {
+ } else if ((FromType->isAnyComplexType() && ToType->isArithmeticType()) ||
+ (ToType->isAnyComplexType() && FromType->isArithmeticType())) {
// Complex-real conversions (C99 6.3.1.7)
SCS.Second = ICK_Complex_Real;
FromType = ToType.getUnqualifiedType();
@@ -1078,32 +1174,24 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// Floating point conversions (C++ 4.8).
SCS.Second = ICK_Floating_Conversion;
FromType = ToType.getUnqualifiedType();
- } else if ((FromType->isRealFloatingType() &&
- ToType->isIntegralType(S.Context) && !ToType->isBooleanType()) ||
- (FromType->isIntegralOrEnumerationType() &&
+ } else if ((FromType->isRealFloatingType() &&
+ ToType->isIntegralType(S.Context)) ||
+ (FromType->isIntegralOrUnscopedEnumerationType() &&
ToType->isRealFloatingType())) {
// Floating-integral conversions (C++ 4.9).
SCS.Second = ICK_Floating_Integral;
FromType = ToType.getUnqualifiedType();
+ } else if (S.IsBlockPointerConversion(FromType, ToType, FromType)) {
+ SCS.Second = ICK_Block_Pointer_Conversion;
} else if (S.IsPointerConversion(From, FromType, ToType, InOverloadResolution,
FromType, IncompatibleObjC)) {
// Pointer conversions (C++ 4.10).
SCS.Second = ICK_Pointer_Conversion;
SCS.IncompatibleObjC = IncompatibleObjC;
- } else if (S.IsMemberPointerConversion(From, FromType, ToType,
+ } else if (S.IsMemberPointerConversion(From, FromType, ToType,
InOverloadResolution, FromType)) {
// Pointer to member conversions (4.11).
SCS.Second = ICK_Pointer_Member;
- } else if (ToType->isBooleanType() &&
- (FromType->isArithmeticType() ||
- FromType->isEnumeralType() ||
- FromType->isAnyPointerType() ||
- FromType->isBlockPointerType() ||
- FromType->isMemberPointerType() ||
- FromType->isNullPtrType())) {
- // Boolean conversions (C++ 4.12).
- SCS.Second = ICK_Boolean_Conversion;
- FromType = S.Context.BoolTy;
} else if (IsVectorConversion(S.Context, FromType, ToType, SecondICK)) {
SCS.Second = SecondICK;
FromType = ToType.getUnqualifiedType();
@@ -1124,7 +1212,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
QualType CanonFrom;
QualType CanonTo;
// The third conversion can be a qualification conversion (C++ 4p1).
- if (S.IsQualificationConversion(FromType, ToType)) {
+ if (S.IsQualificationConversion(FromType, ToType, CStyle)) {
SCS.Third = ICK_Qualification;
FromType = ToType;
CanonFrom = S.Context.getCanonicalType(FromType);
@@ -1139,7 +1227,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// a conversion. [...]
CanonFrom = S.Context.getCanonicalType(FromType);
CanonTo = S.Context.getCanonicalType(ToType);
- if (CanonFrom.getLocalUnqualifiedType()
+ if (CanonFrom.getLocalUnqualifiedType()
== CanonTo.getLocalUnqualifiedType() &&
(CanonFrom.getLocalCVRQualifiers() != CanonTo.getLocalCVRQualifiers()
|| CanonFrom.getObjCGCAttr() != CanonTo.getObjCGCAttr())) {
@@ -1187,23 +1275,47 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
return To->getKind() == BuiltinType::UInt;
}
- // An rvalue of type wchar_t (3.9.1) or an enumeration type (7.2)
- // can be converted to an rvalue of the first of the following types
- // that can represent all the values of its underlying type: int,
- // unsigned int, long, or unsigned long (C++ 4.5p2).
+ // C++0x [conv.prom]p3:
+ // A prvalue of an unscoped enumeration type whose underlying type is not
+ // fixed (7.2) can be converted to an rvalue a prvalue of the first of the
+ // following types that can represent all the values of the enumeration
+ // (i.e., the values in the range bmin to bmax as described in 7.2): int,
+ // unsigned int, long int, unsigned long int, long long int, or unsigned
+ // long long int. If none of the types in that list can represent all the
+ // values of the enumeration, an rvalue a prvalue of an unscoped enumeration
+ // type can be converted to an rvalue a prvalue of the extended integer type
+ // with lowest integer conversion rank (4.13) greater than the rank of long
+ // long in which all the values of the enumeration can be represented. If
+ // there are two such extended types, the signed one is chosen.
+ if (const EnumType *FromEnumType = FromType->getAs<EnumType>()) {
+ // C++0x 7.2p9: Note that this implicit enum to int conversion is not
+ // provided for a scoped enumeration.
+ if (FromEnumType->getDecl()->isScoped())
+ return false;
- // We pre-calculate the promotion type for enum types.
- if (const EnumType *FromEnumType = FromType->getAs<EnumType>())
- if (ToType->isIntegerType())
+ // We have already pre-calculated the promotion type, so this is trivial.
+ if (ToType->isIntegerType() &&
+ !RequireCompleteType(From->getLocStart(), FromType, PDiag()))
return Context.hasSameUnqualifiedType(ToType,
FromEnumType->getDecl()->getPromotionType());
+ }
- if (FromType->isWideCharType() && ToType->isIntegerType()) {
+ // C++0x [conv.prom]p2:
+ // A prvalue of type char16_t, char32_t, or wchar_t (3.9.1) can be converted
+ // to an rvalue a prvalue of the first of the following types that can
+ // represent all the values of its underlying type: int, unsigned int,
+ // long int, unsigned long int, long long int, or unsigned long long int.
+ // If none of the types in that list can represent all the values of its
+ // underlying type, an rvalue a prvalue of type char16_t, char32_t,
+ // or wchar_t can be converted to an rvalue a prvalue of its underlying
+ // type.
+ if (FromType->isAnyCharacterType() && !FromType->isCharType() &&
+ ToType->isIntegerType()) {
// Determine whether the type we're converting from is signed or
// unsigned.
bool FromIsSigned;
uint64_t FromSize = Context.getTypeSize(FromType);
-
+
// FIXME: Is wchar_t signed or unsigned? We assume it's signed for now.
FromIsSigned = true;
@@ -1321,10 +1433,19 @@ bool Sema::IsComplexPromotion(QualType FromType, QualType ToType) {
/// if non-empty, will be a pointer to ToType that may or may not have
/// the right set of qualifiers on its pointee.
static QualType
-BuildSimilarlyQualifiedPointerType(const PointerType *FromPtr,
+BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
QualType ToPointee, QualType ToType,
ASTContext &Context) {
- QualType CanonFromPointee = Context.getCanonicalType(FromPtr->getPointeeType());
+ assert((FromPtr->getTypeClass() == Type::Pointer ||
+ FromPtr->getTypeClass() == Type::ObjCObjectPointer) &&
+ "Invalid similarly-qualified pointer type");
+
+ /// \brief Conversions to 'id' subsume cv-qualifier conversions.
+ if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
+ return ToType.getUnqualifiedType();
+
+ QualType CanonFromPointee
+ = Context.getCanonicalType(FromPtr->getPointeeType());
QualType CanonToPointee = Context.getCanonicalType(ToPointee);
Qualifiers Quals = CanonFromPointee.getQualifiers();
@@ -1336,34 +1457,20 @@ BuildSimilarlyQualifiedPointerType(const PointerType *FromPtr,
// Build a pointer to ToPointee. It has the right qualifiers
// already.
+ if (isa<ObjCObjectPointerType>(ToType))
+ return Context.getObjCObjectPointerType(ToPointee);
return Context.getPointerType(ToPointee);
}
// Just build a canonical type that has the right qualifiers.
- return Context.getPointerType(
- Context.getQualifiedType(CanonToPointee.getLocalUnqualifiedType(),
- Quals));
-}
+ QualType QualifiedCanonToPointee
+ = Context.getQualifiedType(CanonToPointee.getLocalUnqualifiedType(), Quals);
-/// BuildSimilarlyQualifiedObjCObjectPointerType - In a pointer conversion from
-/// the FromType, which is an objective-c pointer, to ToType, which may or may
-/// not have the right set of qualifiers.
-static QualType
-BuildSimilarlyQualifiedObjCObjectPointerType(QualType FromType,
- QualType ToType,
- ASTContext &Context) {
- QualType CanonFromType = Context.getCanonicalType(FromType);
- QualType CanonToType = Context.getCanonicalType(ToType);
- Qualifiers Quals = CanonFromType.getQualifiers();
-
- // Exact qualifier match -> return the pointer type we're converting to.
- if (CanonToType.getLocalQualifiers() == Quals)
- return ToType;
-
- // Just build a canonical type that has the right qualifiers.
- return Context.getQualifiedType(CanonToType.getLocalUnqualifiedType(), Quals);
+ if (isa<ObjCObjectPointerType>(ToType))
+ return Context.getObjCObjectPointerType(QualifiedCanonToPointee);
+ return Context.getPointerType(QualifiedCanonToPointee);
}
-
+
static bool isNullPointerConstantForConversion(Expr *Expr,
bool InOverloadResolution,
ASTContext &Context) {
@@ -1399,7 +1506,8 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
QualType& ConvertedType,
bool &IncompatibleObjC) {
IncompatibleObjC = false;
- if (isObjCPointerConversion(FromType, ToType, ConvertedType, IncompatibleObjC))
+ if (isObjCPointerConversion(FromType, ToType, ConvertedType,
+ IncompatibleObjC))
return true;
// Conversion from a null pointer constant to any Objective-C pointer type.
@@ -1441,14 +1549,15 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
return true;
}
- // Beyond this point, both types need to be pointers
+ // Beyond this point, both types need to be pointers
// , including objective-c pointers.
QualType ToPointeeType = ToTypePtr->getPointeeType();
if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType()) {
- ConvertedType = BuildSimilarlyQualifiedObjCObjectPointerType(FromType,
+ ConvertedType = BuildSimilarlyQualifiedPointerType(
+ FromType->getAs<ObjCObjectPointerType>(),
+ ToPointeeType,
ToType, Context);
return true;
-
}
const PointerType *FromTypePtr = FromType->getAs<PointerType>();
if (!FromTypePtr)
@@ -1456,7 +1565,7 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
QualType FromPointeeType = FromTypePtr->getPointeeType();
- // If the unqualified pointee types are the same, this can't be a
+ // If the unqualified pointee types are the same, this can't be a
// pointer conversion, so don't do all of the work below.
if (Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType))
return false;
@@ -1517,13 +1626,20 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
bool &IncompatibleObjC) {
if (!getLangOptions().ObjC1)
return false;
-
+
// First, we handle all conversions on ObjC object pointer types.
- const ObjCObjectPointerType* ToObjCPtr = ToType->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType* ToObjCPtr =
+ ToType->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *FromObjCPtr =
FromType->getAs<ObjCObjectPointerType>();
if (ToObjCPtr && FromObjCPtr) {
+ // If the pointee types are the same (ignoring qualifications),
+ // then this is not a pointer conversion.
+ if (Context.hasSameUnqualifiedType(ToObjCPtr->getPointeeType(),
+ FromObjCPtr->getPointeeType()))
+ return false;
+
// Objective C++: We're able to convert between "id" or "Class" and a
// pointer to any interface (in both directions).
if (ToObjCPtr->isObjCBuiltinType() && FromObjCPtr->isObjCBuiltinType()) {
@@ -1547,7 +1663,9 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
!ToObjCPtr->getPointeeType().isAtLeastAsQualifiedAs(
FromObjCPtr->getPointeeType()))
return false;
- ConvertedType = ToType;
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr,
+ ToObjCPtr->getPointeeType(),
+ ToType, Context);
return true;
}
@@ -1556,7 +1674,9 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
// interfaces, which is permitted. However, we're going to
// complain about it.
IncompatibleObjC = true;
- ConvertedType = FromType;
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr,
+ ToObjCPtr->getPointeeType(),
+ ToType, Context);
return true;
}
}
@@ -1564,7 +1684,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
QualType ToPointeeType;
if (const PointerType *ToCPtr = ToType->getAs<PointerType>())
ToPointeeType = ToCPtr->getPointeeType();
- else if (const BlockPointerType *ToBlockPtr =
+ else if (const BlockPointerType *ToBlockPtr =
ToType->getAs<BlockPointerType>()) {
// Objective C++: We're able to convert from a pointer to any object
// to a block pointer type.
@@ -1574,9 +1694,9 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
}
ToPointeeType = ToBlockPtr->getPointeeType();
}
- else if (FromType->getAs<BlockPointerType>() &&
+ else if (FromType->getAs<BlockPointerType>() &&
ToObjCPtr && ToObjCPtr->isObjCBuiltinType()) {
- // Objective C++: We're able to convert from a block pointer type to a
+ // Objective C++: We're able to convert from a block pointer type to a
// pointer to any object.
ConvertedType = ToType;
return true;
@@ -1587,7 +1707,8 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
QualType FromPointeeType;
if (const PointerType *FromCPtr = FromType->getAs<PointerType>())
FromPointeeType = FromCPtr->getPointeeType();
- else if (const BlockPointerType *FromBlockPtr = FromType->getAs<BlockPointerType>())
+ else if (const BlockPointerType *FromBlockPtr =
+ FromType->getAs<BlockPointerType>())
FromPointeeType = FromBlockPtr->getPointeeType();
else
return false;
@@ -1599,7 +1720,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
IncompatibleObjC)) {
// We always complain about this conversion.
IncompatibleObjC = true;
- ConvertedType = ToType;
+ ConvertedType = Context.getPointerType(ConvertedType);
return true;
}
// Allow conversion of pointee being objective-c pointer to another one;
@@ -1608,10 +1729,10 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
ToPointeeType->getAs<ObjCObjectPointerType>() &&
isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
IncompatibleObjC)) {
- ConvertedType = ToType;
+ ConvertedType = Context.getPointerType(ConvertedType);
return true;
}
-
+
// If we have pointers to functions or blocks, check whether the only
// differences in the argument and result types are in Objective-C
// pointer conversions. If so, we permit the conversion (but
@@ -1677,17 +1798,102 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
return false;
}
-
+
+bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType) {
+ QualType ToPointeeType;
+ if (const BlockPointerType *ToBlockPtr =
+ ToType->getAs<BlockPointerType>())
+ ToPointeeType = ToBlockPtr->getPointeeType();
+ else
+ return false;
+
+ QualType FromPointeeType;
+ if (const BlockPointerType *FromBlockPtr =
+ FromType->getAs<BlockPointerType>())
+ FromPointeeType = FromBlockPtr->getPointeeType();
+ else
+ return false;
+ // We have pointer to blocks, check whether the only
+ // differences in the argument and result types are in Objective-C
+ // pointer conversions. If so, we permit the conversion.
+
+ const FunctionProtoType *FromFunctionType
+ = FromPointeeType->getAs<FunctionProtoType>();
+ const FunctionProtoType *ToFunctionType
+ = ToPointeeType->getAs<FunctionProtoType>();
+
+ if (!FromFunctionType || !ToFunctionType)
+ return false;
+
+ if (Context.hasSameType(FromPointeeType, ToPointeeType))
+ return true;
+
+ // Perform the quick checks that will tell us whether these
+ // function types are obviously different.
+ if (FromFunctionType->getNumArgs() != ToFunctionType->getNumArgs() ||
+ FromFunctionType->isVariadic() != ToFunctionType->isVariadic())
+ return false;
+
+ FunctionType::ExtInfo FromEInfo = FromFunctionType->getExtInfo();
+ FunctionType::ExtInfo ToEInfo = ToFunctionType->getExtInfo();
+ if (FromEInfo != ToEInfo)
+ return false;
+
+ bool IncompatibleObjC = false;
+ if (Context.hasSameType(FromFunctionType->getResultType(),
+ ToFunctionType->getResultType())) {
+ // Okay, the types match exactly. Nothing to do.
+ } else {
+ QualType RHS = FromFunctionType->getResultType();
+ QualType LHS = ToFunctionType->getResultType();
+ if ((!getLangOptions().CPlusPlus || !RHS->isRecordType()) &&
+ !RHS.hasQualifiers() && LHS.hasQualifiers())
+ LHS = LHS.getUnqualifiedType();
+
+ if (Context.hasSameType(RHS,LHS)) {
+ // OK exact match.
+ } else if (isObjCPointerConversion(RHS, LHS,
+ ConvertedType, IncompatibleObjC)) {
+ if (IncompatibleObjC)
+ return false;
+ // Okay, we have an Objective-C pointer conversion.
+ }
+ else
+ return false;
+ }
+
+ // Check argument types.
+ for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
+ ArgIdx != NumArgs; ++ArgIdx) {
+ IncompatibleObjC = false;
+ QualType FromArgType = FromFunctionType->getArgType(ArgIdx);
+ QualType ToArgType = ToFunctionType->getArgType(ArgIdx);
+ if (Context.hasSameType(FromArgType, ToArgType)) {
+ // Okay, the types match exactly. Nothing to do.
+ } else if (isObjCPointerConversion(ToArgType, FromArgType,
+ ConvertedType, IncompatibleObjC)) {
+ if (IncompatibleObjC)
+ return false;
+ // Okay, we have an Objective-C pointer conversion.
+ } else
+ // Argument types are too different. Abort.
+ return false;
+ }
+ ConvertedType = ToType;
+ return true;
+}
+
/// FunctionArgTypesAreEqual - This routine checks two function proto types
/// for equlity of their argument types. Caller has already checked that
/// they have same number of arguments. This routine assumes that Objective-C
/// pointer types which only differ in their protocol qualifiers are equal.
-bool Sema::FunctionArgTypesAreEqual(FunctionProtoType* OldType,
- FunctionProtoType* NewType){
+bool Sema::FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
+ const FunctionProtoType *NewType) {
if (!getLangOptions().ObjC1)
return std::equal(OldType->arg_type_begin(), OldType->arg_type_end(),
NewType->arg_type_begin());
-
+
for (FunctionProtoType::arg_type_iterator O = OldType->arg_type_begin(),
N = NewType->arg_type_begin(),
E = OldType->arg_type_end(); O && (O != E); ++O, ++N) {
@@ -1704,12 +1910,12 @@ bool Sema::FunctionArgTypesAreEqual(FunctionProtoType* OldType,
}
else if (const ObjCObjectPointerType *PTTo =
ToType->getAs<ObjCObjectPointerType>()) {
- if (const ObjCObjectPointerType *PTFr =
+ if (const ObjCObjectPointerType *PTFr =
FromType->getAs<ObjCObjectPointerType>())
if (PTTo->getInterfaceDecl() == PTFr->getInterfaceDecl())
continue;
}
- return false;
+ return false;
}
}
return true;
@@ -1726,10 +1932,13 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
CXXCastPath& BasePath,
bool IgnoreBaseAccess) {
QualType FromType = From->getType();
+ bool IsCStyleOrFunctionalCast = IgnoreBaseAccess;
+
+ Kind = CK_BitCast;
if (CXXBoolLiteralExpr* LitBool
= dyn_cast<CXXBoolLiteralExpr>(From->IgnoreParens()))
- if (LitBool->getValue() == false)
+ if (!IsCStyleOrFunctionalCast && LitBool->getValue() == false)
Diag(LitBool->getExprLoc(), diag::warn_init_pointer_from_false)
<< ToType;
@@ -1747,13 +1956,13 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
From->getSourceRange(), &BasePath,
IgnoreBaseAccess))
return true;
-
+
// The conversion was successful.
Kind = CK_DerivedToBase;
}
}
if (const ObjCObjectPointerType *FromPtrType =
- FromType->getAs<ObjCObjectPointerType>())
+ FromType->getAs<ObjCObjectPointerType>()) {
if (const ObjCObjectPointerType *ToPtrType =
ToType->getAs<ObjCObjectPointerType>()) {
// Objective-C++ conversions are always okay.
@@ -1761,8 +1970,14 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
// Objective-C++ implicit conversions.
if (FromPtrType->isObjCBuiltinType() || ToPtrType->isObjCBuiltinType())
return false;
-
+ }
}
+
+ // We shouldn't fall into this case unless it's valid for other
+ // reasons.
+ if (From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ Kind = CK_NullToPointer;
+
return false;
}
@@ -1772,7 +1987,7 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
/// If so, returns true and places the converted type (that might differ from
/// ToType in its cv-qualifiers at some level) into ConvertedType.
bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
- QualType ToType,
+ QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType) {
const MemberPointerType *ToTypePtr = ToType->getAs<MemberPointerType>();
@@ -1796,9 +2011,10 @@ bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
// where D is derived from B (C++ 4.11p2).
QualType FromClass(FromTypePtr->getClass(), 0);
QualType ToClass(ToTypePtr->getClass(), 0);
- // FIXME: What happens when these are dependent? Is this function even called?
- if (IsDerivedFrom(ToClass, FromClass)) {
+ if (!Context.hasSameUnqualifiedType(FromClass, ToClass) &&
+ !RequireCompleteType(From->getLocStart(), ToClass, PDiag()) &&
+ IsDerivedFrom(ToClass, FromClass)) {
ConvertedType = Context.getMemberPointerType(FromTypePtr->getPointeeType(),
ToClass.getTypePtr());
return true;
@@ -1806,7 +2022,7 @@ bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
return false;
}
-
+
/// CheckMemberPointerConversion - Check the member pointer conversion from the
/// expression From to the type ToType. This routine checks for ambiguous or
/// virtual or inaccessible base-to-derived member pointer conversions
@@ -1821,7 +2037,7 @@ bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
const MemberPointerType *FromPtrType = FromType->getAs<MemberPointerType>();
if (!FromPtrType) {
// This must be a null pointer to member pointer conversion
- assert(From->isNullPointerConstant(Context,
+ assert(From->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull) &&
"Expr must be null pointer constant!");
Kind = CK_NullToMemberPointer;
@@ -1876,7 +2092,8 @@ bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
/// an rvalue of type FromType to ToType is a qualification conversion
/// (C++ 4.4).
bool
-Sema::IsQualificationConversion(QualType FromType, QualType ToType) {
+Sema::IsQualificationConversion(QualType FromType, QualType ToType,
+ bool CStyle) {
FromType = Context.getCanonicalType(FromType);
ToType = Context.getCanonicalType(ToType);
@@ -1901,12 +2118,12 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType) {
// -- for every j > 0, if const is in cv 1,j then const is in cv
// 2,j, and similarly for volatile.
- if (!ToType.isAtLeastAsQualifiedAs(FromType))
+ if (!CStyle && !ToType.isAtLeastAsQualifiedAs(FromType))
return false;
// -- if the cv 1,j and cv 2,j are different, then const is in
// every cv for 0 < k < j.
- if (FromType.getCVRQualifiers() != ToType.getCVRQualifiers()
+ if (!CStyle && FromType.getCVRQualifiers() != ToType.getCVRQualifiers()
&& !PreviousToQualsIncludeConst)
return false;
@@ -1977,13 +2194,13 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
= cast<CXXConstructorDecl>(ConstructorTmpl->getTemplatedDecl());
else
Constructor = cast<CXXConstructorDecl>(D);
-
+
if (!Constructor->isInvalidDecl() &&
Constructor->isConvertingConstructor(AllowExplicit)) {
if (ConstructorTmpl)
S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
/*ExplicitArgs*/ 0,
- &From, 1, CandidateSet,
+ &From, 1, CandidateSet,
/*SuppressUserConversions=*/
!ConstructorsOnly);
else
@@ -2039,7 +2256,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
}
OverloadCandidateSet::iterator Best;
- switch (CandidateSet.BestViableFunction(S, From->getLocStart(), Best)) {
+ switch (CandidateSet.BestViableFunction(S, From->getLocStart(), Best, true)) {
case OR_Success:
// Record the standard conversion we used and the conversion function.
if (CXXConstructorDecl *Constructor
@@ -2058,6 +2275,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
User.EllipsisConversion = false;
}
User.ConversionFunction = Constructor;
+ User.FoundConversionFunction = Best->FoundDecl.getDecl();
User.After.setAsIdentityConversion();
User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType());
User.After.setAllToTypes(ToType);
@@ -2072,6 +2290,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// implicit object parameter of the conversion function.
User.Before = Best->Conversions[0].Standard;
User.ConversionFunction = Conversion;
+ User.FoundConversionFunction = Best->FoundDecl.getDecl();
User.EllipsisConversion = false;
// C++ [over.ics.user]p2:
@@ -2095,19 +2314,19 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
case OR_Deleted:
// No conversion here! We're done.
return OR_Deleted;
-
+
case OR_Ambiguous:
return OR_Ambiguous;
}
return OR_No_Viable_Function;
}
-
+
bool
Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
ImplicitConversionSequence ICS;
OverloadCandidateSet CandidateSet(From->getExprLoc());
- OverloadingResult OvResult =
+ OverloadingResult OvResult =
IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
CandidateSet, false);
if (OvResult == OR_Ambiguous)
@@ -2121,7 +2340,7 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
else
return false;
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, &From, 1);
- return true;
+ return true;
}
/// CompareImplicitConversionSequences - Compare two implicit
@@ -2182,12 +2401,12 @@ static bool hasSimilarType(ASTContext &Context, QualType T1, QualType T2) {
while (Context.UnwrapSimilarPointerTypes(T1, T2)) {
Qualifiers Quals;
T1 = Context.getUnqualifiedArrayType(T1, Quals);
- T2 = Context.getUnqualifiedArrayType(T2, Quals);
+ T2 = Context.getUnqualifiedArrayType(T2, Quals);
}
-
+
return Context.hasSameUnqualifiedType(T1, T2);
}
-
+
// Per 13.3.3.2p3, compare the given standard conversion sequences to
// determine if one is a proper subset of the other.
static ImplicitConversionSequence::CompareKind
@@ -2197,7 +2416,7 @@ compareStandardConversionSubsets(ASTContext &Context,
ImplicitConversionSequence::CompareKind Result
= ImplicitConversionSequence::Indistinguishable;
- // the identity conversion sequence is considered to be a subsequence of
+ // the identity conversion sequence is considered to be a subsequence of
// any non-identity conversion sequence
if (SCS1.ReferenceBinding == SCS2.ReferenceBinding) {
if (SCS1.isIdentityConversion() && !SCS2.isIdentityConversion())
@@ -2205,7 +2424,7 @@ compareStandardConversionSubsets(ASTContext &Context,
else if (!SCS1.isIdentityConversion() && SCS2.isIdentityConversion())
return ImplicitConversionSequence::Worse;
}
-
+
if (SCS1.Second != SCS2.Second) {
if (SCS1.Second == ICK_Identity)
Result = ImplicitConversionSequence::Better;
@@ -2230,10 +2449,37 @@ compareStandardConversionSubsets(ASTContext &Context,
return Result == ImplicitConversionSequence::Better
? ImplicitConversionSequence::Indistinguishable
: ImplicitConversionSequence::Worse;
-
+
return ImplicitConversionSequence::Indistinguishable;
}
+/// \brief Determine whether one of the given reference bindings is better
+/// than the other based on what kind of bindings they are.
+static bool isBetterReferenceBindingKind(const StandardConversionSequence &SCS1,
+ const StandardConversionSequence &SCS2) {
+ // C++0x [over.ics.rank]p3b4:
+ // -- S1 and S2 are reference bindings (8.5.3) and neither refers to an
+ // implicit object parameter of a non-static member function declared
+ // without a ref-qualifier, and *either* S1 binds an rvalue reference
+ // to an rvalue and S2 binds an lvalue reference *or S1 binds an
+ // lvalue reference to a function lvalue and S2 binds an rvalue
+ // reference*.
+ //
+ // FIXME: Rvalue references. We're going rogue with the above edits,
+ // because the semantics in the current C++0x working paper (N3225 at the
+ // time of this writing) break the standard definition of std::forward
+ // and std::reference_wrapper when dealing with references to functions.
+ // Proposed wording changes submitted to CWG for consideration.
+ if (SCS1.BindsImplicitObjectArgumentWithoutRefQualifier ||
+ SCS2.BindsImplicitObjectArgumentWithoutRefQualifier)
+ return false;
+
+ return (!SCS1.IsLvalueReference && SCS1.BindsToRvalue &&
+ SCS2.IsLvalueReference) ||
+ (SCS1.IsLvalueReference && SCS1.BindsToFunctionLvalue &&
+ !SCS2.IsLvalueReference);
+}
+
/// CompareStandardConversionSequences - Compare two standard
/// conversion sequences to determine whether one is better than the
/// other or if they are indistinguishable (C++ 13.3.3.2p3).
@@ -2339,17 +2585,11 @@ CompareStandardConversionSequences(Sema &S,
return QualCK;
if (SCS1.ReferenceBinding && SCS2.ReferenceBinding) {
- // C++0x [over.ics.rank]p3b4:
- // -- S1 and S2 are reference bindings (8.5.3) and neither refers to an
- // implicit object parameter of a non-static member function declared
- // without a ref-qualifier, and S1 binds an rvalue reference to an
- // rvalue and S2 binds an lvalue reference.
- // FIXME: We don't know if we're dealing with the implicit object parameter,
- // or if the member function in this case has a ref qualifier.
- // (Of course, we don't have ref qualifiers yet.)
- if (SCS1.RRefBinding != SCS2.RRefBinding)
- return SCS1.RRefBinding ? ImplicitConversionSequence::Better
- : ImplicitConversionSequence::Worse;
+ // Check for a better reference binding based on the kind of bindings.
+ if (isBetterReferenceBindingKind(SCS1, SCS2))
+ return ImplicitConversionSequence::Better;
+ else if (isBetterReferenceBindingKind(SCS2, SCS1))
+ return ImplicitConversionSequence::Worse;
// C++ [over.ics.rank]p3b4:
// -- S1 and S2 are reference bindings (8.5.3), and the types to
@@ -2365,8 +2605,8 @@ CompareStandardConversionSequences(Sema &S,
QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals);
QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals);
if (UnqualT1 == UnqualT2) {
- // If the type is an array type, promote the element qualifiers to the type
- // for comparison.
+ // If the type is an array type, promote the element qualifiers to the
+ // type for comparison.
if (isa<ArrayType>(T1) && T1Quals)
T1 = S.Context.getQualifiedType(UnqualT1, T1Quals);
if (isa<ArrayType>(T2) && T2Quals)
@@ -2513,9 +2753,6 @@ CompareDerivedToBaseConversions(Sema &S,
// If class B is derived directly or indirectly from class A and
// class C is derived directly or indirectly from B,
//
- // For Objective-C, we let A, B, and C also be Objective-C
- // interfaces.
-
// Compare based on pointer conversions.
if (SCS1.Second == ICK_Pointer_Conversion &&
SCS2.Second == ICK_Pointer_Conversion &&
@@ -2531,24 +2768,12 @@ CompareDerivedToBaseConversions(Sema &S,
QualType ToPointee2
= ToType2->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
- const ObjCObjectType* FromIface1 = FromPointee1->getAs<ObjCObjectType>();
- const ObjCObjectType* FromIface2 = FromPointee2->getAs<ObjCObjectType>();
- const ObjCObjectType* ToIface1 = ToPointee1->getAs<ObjCObjectType>();
- const ObjCObjectType* ToIface2 = ToPointee2->getAs<ObjCObjectType>();
-
// -- conversion of C* to B* is better than conversion of C* to A*,
if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
if (S.IsDerivedFrom(ToPointee1, ToPointee2))
return ImplicitConversionSequence::Better;
else if (S.IsDerivedFrom(ToPointee2, ToPointee1))
return ImplicitConversionSequence::Worse;
-
- if (ToIface1 && ToIface2) {
- if (S.Context.canAssignObjCInterfaces(ToIface2, ToIface1))
- return ImplicitConversionSequence::Better;
- else if (S.Context.canAssignObjCInterfaces(ToIface1, ToIface2))
- return ImplicitConversionSequence::Worse;
- }
}
// -- conversion of B* to A* is better than conversion of C* to A*,
@@ -2557,27 +2782,90 @@ CompareDerivedToBaseConversions(Sema &S,
return ImplicitConversionSequence::Better;
else if (S.IsDerivedFrom(FromPointee1, FromPointee2))
return ImplicitConversionSequence::Worse;
+ }
+ } else if (SCS1.Second == ICK_Pointer_Conversion &&
+ SCS2.Second == ICK_Pointer_Conversion) {
+ const ObjCObjectPointerType *FromPtr1
+ = FromType1->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *FromPtr2
+ = FromType2->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *ToPtr1
+ = ToType1->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *ToPtr2
+ = ToType2->getAs<ObjCObjectPointerType>();
+
+ if (FromPtr1 && FromPtr2 && ToPtr1 && ToPtr2) {
+ // Apply the same conversion ranking rules for Objective-C pointer types
+ // that we do for C++ pointers to class types. However, we employ the
+ // Objective-C pseudo-subtyping relationship used for assignment of
+ // Objective-C pointer types.
+ bool FromAssignLeft
+ = S.Context.canAssignObjCInterfaces(FromPtr1, FromPtr2);
+ bool FromAssignRight
+ = S.Context.canAssignObjCInterfaces(FromPtr2, FromPtr1);
+ bool ToAssignLeft
+ = S.Context.canAssignObjCInterfaces(ToPtr1, ToPtr2);
+ bool ToAssignRight
+ = S.Context.canAssignObjCInterfaces(ToPtr2, ToPtr1);
+
+ // A conversion to an a non-id object pointer type or qualified 'id'
+ // type is better than a conversion to 'id'.
+ if (ToPtr1->isObjCIdType() &&
+ (ToPtr2->isObjCQualifiedIdType() || ToPtr2->getInterfaceDecl()))
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCIdType() &&
+ (ToPtr1->isObjCQualifiedIdType() || ToPtr1->getInterfaceDecl()))
+ return ImplicitConversionSequence::Better;
+
+ // A conversion to a non-id object pointer type is better than a
+ // conversion to a qualified 'id' type
+ if (ToPtr1->isObjCQualifiedIdType() && ToPtr2->getInterfaceDecl())
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCQualifiedIdType() && ToPtr1->getInterfaceDecl())
+ return ImplicitConversionSequence::Better;
+
+ // A conversion to an a non-Class object pointer type or qualified 'Class'
+ // type is better than a conversion to 'Class'.
+ if (ToPtr1->isObjCClassType() &&
+ (ToPtr2->isObjCQualifiedClassType() || ToPtr2->getInterfaceDecl()))
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCClassType() &&
+ (ToPtr1->isObjCQualifiedClassType() || ToPtr1->getInterfaceDecl()))
+ return ImplicitConversionSequence::Better;
+
+ // A conversion to a non-Class object pointer type is better than a
+ // conversion to a qualified 'Class' type.
+ if (ToPtr1->isObjCQualifiedClassType() && ToPtr2->getInterfaceDecl())
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCQualifiedClassType() && ToPtr1->getInterfaceDecl())
+ return ImplicitConversionSequence::Better;
- if (FromIface1 && FromIface2) {
- if (S.Context.canAssignObjCInterfaces(FromIface1, FromIface2))
- return ImplicitConversionSequence::Better;
- else if (S.Context.canAssignObjCInterfaces(FromIface2, FromIface1))
- return ImplicitConversionSequence::Worse;
- }
+ // -- "conversion of C* to B* is better than conversion of C* to A*,"
+ if (S.Context.hasSameType(FromType1, FromType2) &&
+ !FromPtr1->isObjCIdType() && !FromPtr1->isObjCClassType() &&
+ (ToAssignLeft != ToAssignRight))
+ return ToAssignLeft? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+
+ // -- "conversion of B* to A* is better than conversion of C* to A*,"
+ if (S.Context.hasSameUnqualifiedType(ToType1, ToType2) &&
+ (FromAssignLeft != FromAssignRight))
+ return FromAssignLeft? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
}
}
-
+
// Ranking of member-pointer types.
if (SCS1.Second == ICK_Pointer_Member && SCS2.Second == ICK_Pointer_Member &&
FromType1->isMemberPointerType() && FromType2->isMemberPointerType() &&
ToType1->isMemberPointerType() && ToType2->isMemberPointerType()) {
- const MemberPointerType * FromMemPointer1 =
+ const MemberPointerType * FromMemPointer1 =
FromType1->getAs<MemberPointerType>();
- const MemberPointerType * ToMemPointer1 =
+ const MemberPointerType * ToMemPointer1 =
ToType1->getAs<MemberPointerType>();
- const MemberPointerType * FromMemPointer2 =
+ const MemberPointerType * FromMemPointer2 =
FromType2->getAs<MemberPointerType>();
- const MemberPointerType * ToMemPointer2 =
+ const MemberPointerType * ToMemPointer2 =
ToType2->getAs<MemberPointerType>();
const Type *FromPointeeType1 = FromMemPointer1->getClass();
const Type *ToPointeeType1 = ToMemPointer1->getClass();
@@ -2602,7 +2890,7 @@ CompareDerivedToBaseConversions(Sema &S,
return ImplicitConversionSequence::Worse;
}
}
-
+
if (SCS1.Second == ICK_Derived_To_Base) {
// -- conversion of C to B is better than conversion of C to A,
// -- binding of an expression of type C to a reference of type
@@ -2708,10 +2996,6 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
CXXRecordDecl *T2RecordDecl
= dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl());
- QualType ToType
- = AllowRvalues? DeclType->getAs<ReferenceType>()->getPointeeType()
- : DeclType;
-
OverloadCandidateSet CandidateSet(DeclLoc);
const UnresolvedSetImpl *Conversions
= T2RecordDecl->getVisibleConversionFunctions();
@@ -2730,20 +3014,22 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
else
Conv = cast<CXXConversionDecl>(D);
- // If this is an explicit conversion, and we're not allowed to consider
+ // If this is an explicit conversion, and we're not allowed to consider
// explicit conversions, skip it.
if (!AllowExplicit && Conv->isExplicit())
continue;
-
+
if (AllowRvalues) {
bool DerivedToBase = false;
bool ObjCConversion = false;
if (!ConvTemplate &&
- S.CompareReferenceRelationship(DeclLoc,
- Conv->getConversionType().getNonReferenceType().getUnqualifiedType(),
- DeclType.getNonReferenceType().getUnqualifiedType(),
- DerivedToBase, ObjCConversion)
- == Sema::Ref_Incompatible)
+ S.CompareReferenceRelationship(
+ DeclLoc,
+ Conv->getConversionType().getNonReferenceType()
+ .getUnqualifiedType(),
+ DeclType.getNonReferenceType().getUnqualifiedType(),
+ DerivedToBase, ObjCConversion) ==
+ Sema::Ref_Incompatible)
continue;
} else {
// If the conversion function doesn't return a reference type,
@@ -2757,17 +3043,17 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
!RefType->getPointeeType()->isFunctionType()))
continue;
}
-
+
if (ConvTemplate)
S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC,
- Init, ToType, CandidateSet);
+ Init, DeclType, CandidateSet);
else
S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init,
- ToType, CandidateSet);
+ DeclType, CandidateSet);
}
OverloadCandidateSet::iterator Best;
- switch (CandidateSet.BestViableFunction(S, DeclLoc, Best)) {
+ switch (CandidateSet.BestViableFunction(S, DeclLoc, Best, true)) {
case OR_Success:
// C++ [over.ics.ref]p1:
//
@@ -2786,6 +3072,7 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
ICS.UserDefined.Before = Best->Conversions[0].Standard;
ICS.UserDefined.After = Best->FinalConversion;
ICS.UserDefined.ConversionFunction = Best->Function;
+ ICS.UserDefined.FoundConversionFunction = Best->FoundDecl.getDecl();
ICS.UserDefined.EllipsisConversion = false;
assert(ICS.UserDefined.After.ReferenceBinding &&
ICS.UserDefined.After.DirectBinding &&
@@ -2806,7 +3093,7 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
// conversion; continue with other checks.
return false;
}
-
+
return false;
}
@@ -2851,9 +3138,7 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// of type "cv2 T2" as follows:
// -- If reference is an lvalue reference and the initializer expression
- // The next bullet point (T1 is a function) is pretty much equivalent to this
- // one, so it's handled here.
- if (!isRValRef || T1->isFunctionType()) {
+ if (!isRValRef) {
// -- is an lvalue (but is not a bit-field), and "cv1 T1" is
// reference-compatible with "cv2 T2," or
//
@@ -2879,7 +3164,10 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
ICS.Standard.setToType(2, T1);
ICS.Standard.ReferenceBinding = true;
ICS.Standard.DirectBinding = true;
- ICS.Standard.RRefBinding = isRValRef;
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.Standard.BindsToRvalue = false;
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
ICS.Standard.CopyConstructor = 0;
// Nothing more to do: the inaccessibility/ambiguity check for
@@ -2897,7 +3185,7 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// conversion functions (13.3.1.6) and choosing the best
// one through overload resolution (13.3)),
if (!SuppressUserConversions && T2->isRecordType() &&
- !S.RequireCompleteType(DeclLoc, T2, 0) &&
+ !S.RequireCompleteType(DeclLoc, T2, 0) &&
RefRelationship == Sema::Ref_Incompatible) {
if (FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
Init, T2, /*AllowRvalues=*/false,
@@ -2908,9 +3196,8 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// -- Otherwise, the reference shall be an lvalue reference to a
// non-volatile const type (i.e., cv1 shall be const), or the reference
- // shall be an rvalue reference and the initializer expression shall be
- // an rvalue or have a function type.
- //
+ // shall be an rvalue reference.
+ //
// We actually handle one oddity of C++ [over.ics.ref] at this
// point, which is that, due to p2 (which short-circuits reference
// binding by only attempting a simple conversion for non-direct
@@ -2920,70 +3207,70 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// qualifier.
// This is also the point where rvalue references and lvalue inits no longer
// go together.
- if ((!isRValRef && !T1.isConstQualified()) ||
- (isRValRef && InitCategory.isLValue()))
+ if (!isRValRef && !T1.isConstQualified())
return ICS;
- // -- If T1 is a function type, then
- // -- if T2 is the same type as T1, the reference is bound to the
- // initializer expression lvalue;
- // -- if T2 is a class type and the initializer expression can be
- // implicitly converted to an lvalue of type T1 [...], the
- // reference is bound to the function lvalue that is the result
- // of the conversion;
- // This is the same as for the lvalue case above, so it was handled there.
- // -- otherwise, the program is ill-formed.
- // This is the one difference to the lvalue case.
- if (T1->isFunctionType())
+ // -- If the initializer expression
+ //
+ // -- is an xvalue, class prvalue, array prvalue or function
+ // lvalue and "cv1T1" is reference-compatible with "cv2 T2", or
+ if (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification &&
+ (InitCategory.isXValue() ||
+ (InitCategory.isPRValue() && (T2->isRecordType() || T2->isArrayType())) ||
+ (InitCategory.isLValue() && T2->isFunctionType()))) {
+ ICS.setStandard();
+ ICS.Standard.First = ICK_Identity;
+ ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base
+ : ObjCConversion? ICK_Compatible_Conversion
+ : ICK_Identity;
+ ICS.Standard.Third = ICK_Identity;
+ ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
+ ICS.Standard.setToType(0, T2);
+ ICS.Standard.setToType(1, T1);
+ ICS.Standard.setToType(2, T1);
+ ICS.Standard.ReferenceBinding = true;
+ // In C++0x, this is always a direct binding. In C++98/03, it's a direct
+ // binding unless we're binding to a class prvalue.
+ // Note: Although xvalues wouldn't normally show up in C++98/03 code, we
+ // allow the use of rvalue references in C++98/03 for the benefit of
+ // standard library implementors; therefore, we need the xvalue check here.
+ ICS.Standard.DirectBinding =
+ S.getLangOptions().CPlusPlus0x ||
+ (InitCategory.isPRValue() && !T2->isRecordType());
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.Standard.BindsToRvalue = InitCategory.isRValue();
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.CopyConstructor = 0;
return ICS;
+ }
- // -- Otherwise, if T2 is a class type and
- // -- the initializer expression is an rvalue and "cv1 T1"
- // is reference-compatible with "cv2 T2," or
- //
- // -- T1 is not reference-related to T2 and the initializer
- // expression can be implicitly converted to an rvalue
- // of type "cv3 T3" (this conversion is selected by
- // enumerating the applicable conversion functions
- // (13.3.1.6) and choosing the best one through overload
- // resolution (13.3)),
+ // -- has a class type (i.e., T2 is a class type), where T1 is not
+ // reference-related to T2, and can be implicitly converted to
+ // an xvalue, class prvalue, or function lvalue of type
+ // "cv3 T3", where "cv1 T1" is reference-compatible with
+ // "cv3 T3",
//
- // then the reference is bound to the initializer
- // expression rvalue in the first case and to the object
- // that is the result of the conversion in the second case
- // (or, in either case, to the appropriate base class
- // subobject of the object).
- if (T2->isRecordType()) {
- // First case: "cv1 T1" is reference-compatible with "cv2 T2". This is a
- // direct binding in C++0x but not in C++03.
- if (InitCategory.isRValue() &&
- RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
- ICS.setStandard();
- ICS.Standard.First = ICK_Identity;
- ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base
- : ObjCConversion? ICK_Compatible_Conversion
- : ICK_Identity;
- ICS.Standard.Third = ICK_Identity;
- ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
- ICS.Standard.setToType(0, T2);
- ICS.Standard.setToType(1, T1);
- ICS.Standard.setToType(2, T1);
- ICS.Standard.ReferenceBinding = true;
- ICS.Standard.DirectBinding = S.getLangOptions().CPlusPlus0x;
- ICS.Standard.RRefBinding = isRValRef;
- ICS.Standard.CopyConstructor = 0;
- return ICS;
- }
-
- // Second case: not reference-related.
- if (RefRelationship == Sema::Ref_Incompatible &&
- !S.RequireCompleteType(DeclLoc, T2, 0) &&
- FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
- Init, T2, /*AllowRvalues=*/true,
- AllowExplicit))
- return ICS;
+ // then the reference is bound to the value of the initializer
+ // expression in the first case and to the result of the conversion
+ // in the second case (or, in either case, to an appropriate base
+ // class subobject).
+ if (!SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible &&
+ T2->isRecordType() && !S.RequireCompleteType(DeclLoc, T2, 0) &&
+ FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
+ Init, T2, /*AllowRvalues=*/true,
+ AllowExplicit)) {
+ // In the second case, if the reference is an rvalue reference
+ // and the second standard conversion sequence of the
+ // user-defined conversion sequence includes an lvalue-to-rvalue
+ // conversion, the program is ill-formed.
+ if (ICS.isUserDefined() && isRValRef &&
+ ICS.UserDefined.After.First == ICK_Lvalue_To_Rvalue)
+ ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType);
+
+ return ICS;
}
-
+
// -- Otherwise, a temporary of type "cv1 T1" is created and
// initialized from the initializer expression using the
// rules for a non-reference copy initialization (8.5). The
@@ -3008,6 +3295,12 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
(T1->isRecordType() || T2->isRecordType()))
return ICS;
+ // If T1 is reference-related to T2 and the reference is an rvalue
+ // reference, the initializer expression shall not be an lvalue.
+ if (RefRelationship >= Sema::Ref_Related &&
+ isRValRef && Init->Classify(S.Context).isLValue())
+ return ICS;
+
// C++ [over.ics.ref]p2:
// When a parameter of reference type is not bound directly to
// an argument expression, the conversion sequence is the one
@@ -3020,16 +3313,24 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// and does not constitute a conversion.
ICS = TryImplicitConversion(S, Init, T1, SuppressUserConversions,
/*AllowExplicit=*/false,
- /*InOverloadResolution=*/false);
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false);
// Of course, that's still a reference binding.
if (ICS.isStandard()) {
ICS.Standard.ReferenceBinding = true;
- ICS.Standard.RRefBinding = isRValRef;
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.Standard.BindsToRvalue = true;
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
} else if (ICS.isUserDefined()) {
ICS.UserDefined.After.ReferenceBinding = true;
- ICS.UserDefined.After.RRefBinding = isRValRef;
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.Standard.BindsToRvalue = true;
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
}
+
return ICS;
}
@@ -3041,7 +3342,7 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
/// do not permit any user-defined conversion sequences.
static ImplicitConversionSequence
TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
- bool SuppressUserConversions,
+ bool SuppressUserConversions,
bool InOverloadResolution) {
if (ToType->isReferenceType())
return TryReferenceInit(S, From, ToType,
@@ -3052,7 +3353,8 @@ TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
return TryImplicitConversion(S, From, ToType,
SuppressUserConversions,
/*AllowExplicit=*/false,
- InOverloadResolution);
+ InOverloadResolution,
+ /*CStyle=*/false);
}
/// TryObjectArgumentInitialization - Try to initialize the object
@@ -3060,6 +3362,7 @@ TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
/// expression @p From.
static ImplicitConversionSequence
TryObjectArgumentInitialization(Sema &S, QualType OrigFromType,
+ Expr::Classification FromClassification,
CXXMethodDecl *Method,
CXXRecordDecl *ActingContext) {
QualType ClassType = S.Context.getTypeDeclType(ActingContext);
@@ -3075,24 +3378,37 @@ TryObjectArgumentInitialization(Sema &S, QualType OrigFromType,
// We need to have an object of class type.
QualType FromType = OrigFromType;
- if (const PointerType *PT = FromType->getAs<PointerType>())
+ if (const PointerType *PT = FromType->getAs<PointerType>()) {
FromType = PT->getPointeeType();
+ // When we had a pointer, it's implicitly dereferenced, so we
+ // better have an lvalue.
+ assert(FromClassification.isLValue());
+ }
+
assert(FromType->isRecordType());
- // The implicit object parameter is has the type "reference to cv X",
- // where X is the class of which the function is a member
- // (C++ [over.match.funcs]p4). However, when finding an implicit
- // conversion sequence for the argument, we are not allowed to
- // create temporaries or perform user-defined conversions
+ // C++0x [over.match.funcs]p4:
+ // For non-static member functions, the type of the implicit object
+ // parameter is
+ //
+ // - "lvalue reference to cv X" for functions declared without a
+ // ref-qualifier or with the & ref-qualifier
+ // - "rvalue reference to cv X" for functions declared with the &&
+ // ref-qualifier
+ //
+ // where X is the class of which the function is a member and cv is the
+ // cv-qualification on the member function declaration.
+ //
+ // However, when finding an implicit conversion sequence for the argument, we
+ // are not allowed to create temporaries or perform user-defined conversions
// (C++ [over.match.funcs]p5). We perform a simplified version of
// reference binding here, that allows class rvalues to bind to
// non-constant references.
- // First check the qualifiers. We don't care about lvalue-vs-rvalue
- // with the implicit object parameter (C++ [over.match.funcs]p5).
+ // First check the qualifiers.
QualType FromTypeCanon = S.Context.getCanonicalType(FromType);
- if (ImplicitParamType.getCVRQualifiers()
+ if (ImplicitParamType.getCVRQualifiers()
!= FromTypeCanon.getLocalCVRQualifiers() &&
!ImplicitParamType.isAtLeastAsQualifiedAs(FromTypeCanon)) {
ICS.setBad(BadConversionSequence::bad_qualifiers,
@@ -3114,6 +3430,31 @@ TryObjectArgumentInitialization(Sema &S, QualType OrigFromType,
return ICS;
}
+ // Check the ref-qualifier.
+ switch (Method->getRefQualifier()) {
+ case RQ_None:
+ // Do nothing; we don't care about lvalueness or rvalueness.
+ break;
+
+ case RQ_LValue:
+ if (!FromClassification.isLValue() && Quals != Qualifiers::Const) {
+ // non-const lvalue reference cannot bind to an rvalue
+ ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, FromType,
+ ImplicitParamType);
+ return ICS;
+ }
+ break;
+
+ case RQ_RValue:
+ if (!FromClassification.isRValue()) {
+ // rvalue reference cannot bind to an lvalue
+ ICS.setBad(BadConversionSequence::rvalue_ref_to_lvalue, FromType,
+ ImplicitParamType);
+ return ICS;
+ }
+ break;
+ }
+
// Success. Mark this as a reference binding.
ICS.setStandard();
ICS.Standard.setAsIdentityConversion();
@@ -3122,7 +3463,11 @@ TryObjectArgumentInitialization(Sema &S, QualType OrigFromType,
ICS.Standard.setAllToTypes(ImplicitParamType);
ICS.Standard.ReferenceBinding = true;
ICS.Standard.DirectBinding = true;
- ICS.Standard.RRefBinding = false;
+ ICS.Standard.IsLvalueReference = Method->getRefQualifier() != RQ_RValue;
+ ICS.Standard.BindsToFunctionLvalue = false;
+ ICS.Standard.BindsToRvalue = FromClassification.isRValue();
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier
+ = (Method->getRefQualifier() == RQ_None);
return ICS;
}
@@ -3130,31 +3475,50 @@ TryObjectArgumentInitialization(Sema &S, QualType OrigFromType,
/// the implicit object parameter for the given Method with the given
/// expression.
bool
-Sema::PerformObjectArgumentInitialization(Expr *&From,
- NestedNameSpecifier *Qualifier,
+Sema::PerformObjectArgumentInitialization(Expr *&From,
+ NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method) {
QualType FromRecordType, DestType;
QualType ImplicitParamRecordType =
Method->getThisType(Context)->getAs<PointerType>()->getPointeeType();
+ Expr::Classification FromClassification;
if (const PointerType *PT = From->getType()->getAs<PointerType>()) {
FromRecordType = PT->getPointeeType();
DestType = Method->getThisType(Context);
+ FromClassification = Expr::Classification::makeSimpleLValue();
} else {
FromRecordType = From->getType();
DestType = ImplicitParamRecordType;
+ FromClassification = From->Classify(Context);
}
// Note that we always use the true parent context when performing
// the actual argument initialization.
ImplicitConversionSequence ICS
- = TryObjectArgumentInitialization(*this, From->getType(), Method,
- Method->getParent());
- if (ICS.isBad())
+ = TryObjectArgumentInitialization(*this, From->getType(), FromClassification,
+ Method, Method->getParent());
+ if (ICS.isBad()) {
+ if (ICS.Bad.Kind == BadConversionSequence::bad_qualifiers) {
+ Qualifiers FromQs = FromRecordType.getQualifiers();
+ Qualifiers ToQs = DestType.getQualifiers();
+ unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
+ if (CVR) {
+ Diag(From->getSourceRange().getBegin(),
+ diag::err_member_function_call_bad_cvr)
+ << Method->getDeclName() << FromRecordType << (CVR - 1)
+ << From->getSourceRange();
+ Diag(Method->getLocation(), diag::note_previous_decl)
+ << Method->getDeclName();
+ return true;
+ }
+ }
+
return Diag(From->getSourceRange().getBegin(),
diag::err_implicit_object_parameter_init)
<< ImplicitParamRecordType << FromRecordType << From->getSourceRange();
+ }
if (ICS.Standard.Second == ICK_Derived_To_Base)
return PerformObjectMemberConversion(From, Qualifier, FoundDecl, Method);
@@ -3174,7 +3538,8 @@ TryContextuallyConvertToBool(Sema &S, Expr *From) {
// FIXME: Are these flags correct?
/*SuppressUserConversions=*/false,
/*AllowExplicit=*/true,
- /*InOverloadResolution=*/false);
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false);
}
/// PerformContextuallyConvertToBool - Perform a contextual conversion
@@ -3183,14 +3548,14 @@ bool Sema::PerformContextuallyConvertToBool(Expr *&From) {
ImplicitConversionSequence ICS = TryContextuallyConvertToBool(*this, From);
if (!ICS.isBad())
return PerformImplicitConversion(From, Context.BoolTy, ICS, AA_Converting);
-
+
if (!DiagnoseMultipleUserDefinedConversion(From, Context.BoolTy))
return Diag(From->getSourceRange().getBegin(),
diag::err_typecheck_bool_condition)
<< From->getType() << From->getSourceRange();
return true;
}
-
+
/// TryContextuallyConvertToObjCId - Attempt to contextually convert the
/// expression From to 'id'.
static ImplicitConversionSequence
@@ -3200,7 +3565,8 @@ TryContextuallyConvertToObjCId(Sema &S, Expr *From) {
// FIXME: Are these flags correct?
/*SuppressUserConversions=*/false,
/*AllowExplicit=*/true,
- /*InOverloadResolution=*/false);
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false);
}
/// PerformContextuallyConvertToObjCId - Perform a contextual conversion
@@ -3213,7 +3579,7 @@ bool Sema::PerformContextuallyConvertToObjCId(Expr *&From) {
return true;
}
-/// \brief Attempt to convert the given expression to an integral or
+/// \brief Attempt to convert the given expression to an integral or
/// enumeration type.
///
/// This routine will attempt to convert an expression of class type to an
@@ -3241,7 +3607,7 @@ bool Sema::PerformContextuallyConvertToObjCId(Expr *&From) {
/// \param AmbigDiag The diagnostic to be emitted if there is more than one
/// conversion function that could convert to integral or enumeration type.
///
-/// \param AmbigNote The note to be emitted with \p AmbigDiag for each
+/// \param AmbigNote The note to be emitted with \p AmbigDiag for each
/// usable conversion function.
///
/// \param ConvDiag The diagnostic to be emitted if we are calling a conversion
@@ -3249,7 +3615,7 @@ bool Sema::PerformContextuallyConvertToObjCId(Expr *&From) {
///
/// \returns The expression, converted to an integral or enumeration type if
/// successful.
-ExprResult
+ExprResult
Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
const PartialDiagnostic &NotIntDiag,
const PartialDiagnostic &IncompleteDiag,
@@ -3261,7 +3627,7 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
// We can't perform any more checking for type-dependent expressions.
if (From->isTypeDependent())
return Owned(From);
-
+
// If the expression already has integral or enumeration type, we're golden.
QualType T = From->getType();
if (T->isIntegralOrEnumerationType())
@@ -3269,7 +3635,7 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
// FIXME: Check for missing '()' if T is a function type?
- // If we don't have a class type in C++, there's no way we can get an
+ // If we don't have a class type in C++, there's no way we can get an
// expression of integral or enumeration type.
const RecordType *RecordTy = T->getAs<RecordType>();
if (!RecordTy || !getLangOptions().CPlusPlus) {
@@ -3277,20 +3643,20 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
<< T << From->getSourceRange();
return Owned(From);
}
-
+
// We must have a complete class type.
if (RequireCompleteType(Loc, T, IncompleteDiag))
return Owned(From);
-
+
// Look for a conversion to an integral or enumeration type.
UnresolvedSet<4> ViableConversions;
UnresolvedSet<4> ExplicitConversions;
const UnresolvedSetImpl *Conversions
= cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
-
+
for (UnresolvedSetImpl::iterator I = Conversions->begin(),
- E = Conversions->end();
- I != E;
+ E = Conversions->end();
+ I != E;
++I) {
if (CXXConversionDecl *Conversion
= dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl()))
@@ -3302,21 +3668,21 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
ViableConversions.addDecl(I.getDecl(), I.getAccess());
}
}
-
+
switch (ViableConversions.size()) {
case 0:
if (ExplicitConversions.size() == 1) {
DeclAccessPair Found = ExplicitConversions[0];
CXXConversionDecl *Conversion
= cast<CXXConversionDecl>(Found->getUnderlyingDecl());
-
+
// The user probably meant to invoke the given explicit
// conversion; use it.
QualType ConvTy
= Conversion->getConversionType().getNonReferenceType();
std::string TypeStr;
ConvTy.getAsStringInternal(TypeStr, Context.PrintingPolicy);
-
+
Diag(Loc, ExplicitConvDiag)
<< T << ConvTy
<< FixItHint::CreateInsertion(From->getLocStart(),
@@ -3325,41 +3691,49 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
")");
Diag(Conversion->getLocation(), ExplicitConvNote)
<< ConvTy->isEnumeralType() << ConvTy;
-
- // If we aren't in a SFINAE context, build a call to the
+
+ // If we aren't in a SFINAE context, build a call to the
// explicit conversion function.
if (isSFINAEContext())
return ExprError();
-
+
CheckMemberOperatorAccess(From->getExprLoc(), From, 0, Found);
- From = BuildCXXMemberCallExpr(From, Found, Conversion);
+ ExprResult Result = BuildCXXMemberCallExpr(From, Found, Conversion);
+ if (Result.isInvalid())
+ return ExprError();
+
+ From = Result.get();
}
-
+
// We'll complain below about a non-integral condition type.
break;
-
+
case 1: {
// Apply this conversion.
DeclAccessPair Found = ViableConversions[0];
CheckMemberOperatorAccess(From->getExprLoc(), From, 0, Found);
-
+
CXXConversionDecl *Conversion
= cast<CXXConversionDecl>(Found->getUnderlyingDecl());
QualType ConvTy
- = Conversion->getConversionType().getNonReferenceType();
+ = Conversion->getConversionType().getNonReferenceType();
if (ConvDiag.getDiagID()) {
if (isSFINAEContext())
return ExprError();
-
+
Diag(Loc, ConvDiag)
<< T << ConvTy->isEnumeralType() << ConvTy << From->getSourceRange();
}
-
- From = BuildCXXMemberCallExpr(From, Found,
+
+ ExprResult Result = BuildCXXMemberCallExpr(From, Found,
cast<CXXConversionDecl>(Found->getUnderlyingDecl()));
+ if (Result.isInvalid())
+ return ExprError();
+
+ From = Result.get();
break;
}
-
+
default:
Diag(Loc, AmbigDiag)
<< T << From->getSourceRange();
@@ -3372,7 +3746,7 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
}
return Owned(From);
}
-
+
if (!From->getType()->isIntegralOrEnumerationType())
Diag(Loc, NotIntDiag)
<< From->getType() << From->getSourceRange();
@@ -3411,7 +3785,8 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// object argument (C++ [over.call.func]p3), and the acting context
// is irrelevant.
AddMethodCandidate(Method, FoundDecl, Method->getParent(),
- QualType(), Args, NumArgs, CandidateSet,
+ QualType(), Expr::Classification::makeSimpleLValue(),
+ Args, NumArgs, CandidateSet,
SuppressUserConversions);
return;
}
@@ -3430,13 +3805,13 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// A member function template is never instantiated to perform the copy
// of a class object to an object of its class type.
QualType ClassType = Context.getTypeDeclType(Constructor->getParent());
- if (NumArgs == 1 &&
- Constructor->isCopyConstructorLikeSpecialization() &&
+ if (NumArgs == 1 &&
+ Constructor->isSpecializationCopyingObject() &&
(Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
IsDerivedFrom(Args[0]->getType(), ClassType)))
return;
}
-
+
// Add this candidate
CandidateSet.push_back(OverloadCandidate());
OverloadCandidate& Candidate = CandidateSet.back();
@@ -3445,13 +3820,14 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
Candidate.Viable = true;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = NumArgs;
unsigned NumArgsInProto = Proto->getNumArgs();
// (C++ 13.3.2p2): A candidate function having fewer than m
// parameters is viable only if it has an ellipsis in its parameter
// list (8.3.5).
- if ((NumArgs + (PartialOverloading && NumArgs)) > NumArgsInProto &&
+ if ((NumArgs + (PartialOverloading && NumArgs)) > NumArgsInProto &&
!Proto->isVariadic()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_many_arguments;
@@ -3483,7 +3859,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
QualType ParamType = Proto->getArgType(ArgIdx);
Candidate.Conversions[ArgIdx]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
- SuppressUserConversions,
+ SuppressUserConversions,
/*InOverloadResolution=*/true);
if (Candidate.Conversions[ArgIdx].isBad()) {
Candidate.Viable = false;
@@ -3511,7 +3887,8 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
AddMethodCandidate(cast<CXXMethodDecl>(FD), F.getPair(),
cast<CXXMethodDecl>(FD)->getParent(),
- Args[0]->getType(), Args + 1, NumArgs - 1,
+ Args[0]->getType(), Args[0]->Classify(Context),
+ Args + 1, NumArgs - 1,
CandidateSet, SuppressUserConversions);
else
AddOverloadCandidate(FD, F.getPair(), Args, NumArgs, CandidateSet,
@@ -3523,7 +3900,9 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
AddMethodTemplateCandidate(FunTmpl, F.getPair(),
cast<CXXRecordDecl>(FunTmpl->getDeclContext()),
/*FIXME: explicit args */ 0,
- Args[0]->getType(), Args + 1, NumArgs - 1,
+ Args[0]->getType(),
+ Args[0]->Classify(Context),
+ Args + 1, NumArgs - 1,
CandidateSet,
SuppressUserConversions);
else
@@ -3539,6 +3918,7 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
/// method) as a method candidate to the given overload set.
void Sema::AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
+ Expr::Classification ObjectClassification,
Expr **Args, unsigned NumArgs,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions) {
@@ -3547,18 +3927,18 @@ void Sema::AddMethodCandidate(DeclAccessPair FoundDecl,
if (isa<UsingShadowDecl>(Decl))
Decl = cast<UsingShadowDecl>(Decl)->getTargetDecl();
-
+
if (FunctionTemplateDecl *TD = dyn_cast<FunctionTemplateDecl>(Decl)) {
assert(isa<CXXMethodDecl>(TD->getTemplatedDecl()) &&
"Expected a member function template");
AddMethodTemplateCandidate(TD, FoundDecl, ActingContext,
/*ExplicitArgs*/ 0,
- ObjectType, Args, NumArgs,
+ ObjectType, ObjectClassification, Args, NumArgs,
CandidateSet,
SuppressUserConversions);
} else {
AddMethodCandidate(cast<CXXMethodDecl>(Decl), FoundDecl, ActingContext,
- ObjectType, Args, NumArgs,
+ ObjectType, ObjectClassification, Args, NumArgs,
CandidateSet, SuppressUserConversions);
}
}
@@ -3573,6 +3953,7 @@ void Sema::AddMethodCandidate(DeclAccessPair FoundDecl,
void
Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
+ Expr::Classification ObjectClassification,
Expr **Args, unsigned NumArgs,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions) {
@@ -3595,6 +3976,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
Candidate.Function = Method;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = NumArgs;
unsigned NumArgsInProto = Proto->getNumArgs();
@@ -3630,8 +4012,8 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// Determine the implicit conversion sequence for the object
// parameter.
Candidate.Conversions[0]
- = TryObjectArgumentInitialization(*this, ObjectType, Method,
- ActingContext);
+ = TryObjectArgumentInitialization(*this, ObjectType, ObjectClassification,
+ Method, ActingContext);
if (Candidate.Conversions[0].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -3650,7 +4032,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
QualType ParamType = Proto->getArgType(ArgIdx);
Candidate.Conversions[ArgIdx + 1]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
- SuppressUserConversions,
+ SuppressUserConversions,
/*InOverloadResolution=*/true);
if (Candidate.Conversions[ArgIdx + 1].isBad()) {
Candidate.Viable = false;
@@ -3665,7 +4047,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
}
}
}
-
+
/// \brief Add a C++ member function template as a candidate to the candidate
/// set, using template argument deduction to produce an appropriate member
/// function template specialization.
@@ -3675,6 +4057,7 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
CXXRecordDecl *ActingContext,
const TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
+ Expr::Classification ObjectClassification,
Expr **Args, unsigned NumArgs,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions) {
@@ -3703,7 +4086,8 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
Candidate.FailureKind = ovl_fail_bad_deduction;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
- Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
+ Candidate.ExplicitCallArguments = NumArgs;
+ Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
Info);
return;
}
@@ -3714,8 +4098,8 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
assert(isa<CXXMethodDecl>(Specialization) &&
"Specialization is not a member function?");
AddMethodCandidate(cast<CXXMethodDecl>(Specialization), FoundDecl,
- ActingContext, ObjectType, Args, NumArgs,
- CandidateSet, SuppressUserConversions);
+ ActingContext, ObjectType, ObjectClassification,
+ Args, NumArgs, CandidateSet, SuppressUserConversions);
}
/// \brief Add a C++ function template specialization as a candidate
@@ -3753,7 +4137,8 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
Candidate.FailureKind = ovl_fail_bad_deduction;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
- Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
+ Candidate.ExplicitCallArguments = NumArgs;
+ Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
Info);
return;
}
@@ -3798,10 +4183,11 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
Candidate.FinalConversion.setAllToTypes(ToType);
Candidate.Viable = true;
Candidate.Conversions.resize(1);
+ Candidate.ExplicitCallArguments = 1;
// C++ [over.match.funcs]p4:
- // For conversion functions, the function is considered to be a member of
- // the class of the implicit implied object argument for the purpose of
+ // For conversion functions, the function is considered to be a member of
+ // the class of the implicit implied object argument for the purpose of
// defining the type of the implicit object parameter.
//
// Determine the implicit conversion sequence for the implicit
@@ -3811,18 +4197,19 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
ImplicitParamType = FromPtrType->getPointeeType();
CXXRecordDecl *ConversionContext
= cast<CXXRecordDecl>(ImplicitParamType->getAs<RecordType>()->getDecl());
-
+
Candidate.Conversions[0]
- = TryObjectArgumentInitialization(*this, From->getType(), Conversion,
- ConversionContext);
-
+ = TryObjectArgumentInitialization(*this, From->getType(),
+ From->Classify(Context),
+ Conversion, ConversionContext);
+
if (Candidate.Conversions[0].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
return;
}
- // We won't go through a user-define type conversion function to convert a
+ // We won't go through a user-define type conversion function to convert a
// derived to base as such conversions are given Conversion Rank. They only
// go through a copy constructor. 13.3.3.1.2-p4 [over.ics.user]
QualType FromCanon
@@ -3833,7 +4220,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
Candidate.FailureKind = ovl_fail_trivial_conversion;
return;
}
-
+
// To determine what the conversion from the result of calling the
// conversion function to the type we're eventually trying to
// convert to (ToType), we need to synthesize a call to the
@@ -3843,17 +4230,26 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
// call on the stack and we don't need its arguments to be
// well-formed.
DeclRefExpr ConversionRef(Conversion, Conversion->getType(),
- From->getLocStart());
+ VK_LValue, From->getLocStart());
ImplicitCastExpr ConversionFn(ImplicitCastExpr::OnStack,
Context.getPointerType(Conversion->getType()),
CK_FunctionToPointerDecay,
&ConversionRef, VK_RValue);
+ QualType CallResultType
+ = Conversion->getConversionType().getNonLValueExprType(Context);
+ if (RequireCompleteType(From->getLocStart(), CallResultType, 0)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_final_conversion;
+ return;
+ }
+
+ ExprValueKind VK = Expr::getValueKindForType(Conversion->getConversionType());
+
// Note that it is safe to allocate CallExpr on the stack here because
// there are 0 arguments (i.e., nothing is allocated using ASTContext's
// allocator).
- CallExpr Call(Context, &ConversionFn, 0, 0,
- Conversion->getConversionType().getNonLValueExprType(Context),
+ CallExpr Call(Context, &ConversionFn, 0, 0, CallResultType, VK,
From->getLocStart());
ImplicitConversionSequence ICS =
TryCopyInitialization(*this, &Call, ToType,
@@ -3863,17 +4259,27 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion:
Candidate.FinalConversion = ICS.Standard;
-
+
// C++ [over.ics.user]p3:
// If the user-defined conversion is specified by a specialization of a
- // conversion function template, the second standard conversion sequence
+ // conversion function template, the second standard conversion sequence
// shall have exact match rank.
if (Conversion->getPrimaryTemplate() &&
GetConversionRank(ICS.Standard.Second) != ICR_Exact_Match) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_final_conversion_not_exact;
}
-
+
+ // C++0x [dcl.init.ref]p5:
+ // In the second case, if the reference is an rvalue reference and
+ // the second standard conversion sequence of the user-defined
+ // conversion sequence includes an lvalue-to-rvalue conversion, the
+ // program is ill-formed.
+ if (ToType->isRValueReferenceType() &&
+ ICS.Standard.First == ICK_Lvalue_To_Rvalue) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_final_conversion;
+ }
break;
case ImplicitConversionSequence::BadConversion:
@@ -3917,7 +4323,8 @@ Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
Candidate.FailureKind = ovl_fail_bad_deduction;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
- Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
+ Candidate.ExplicitCallArguments = 1;
+ Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
Info);
return;
}
@@ -3938,7 +4345,7 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
- QualType ObjectType,
+ Expr *Object,
Expr **Args, unsigned NumArgs,
OverloadCandidateSet& CandidateSet) {
if (!CandidateSet.isNewCandidate(Conversion))
@@ -3956,12 +4363,14 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
Candidate.IsSurrogate = true;
Candidate.IgnoreObjectArgument = false;
Candidate.Conversions.resize(NumArgs + 1);
+ Candidate.ExplicitCallArguments = NumArgs;
// Determine the implicit conversion sequence for the implicit
// object parameter.
ImplicitConversionSequence ObjectInit
- = TryObjectArgumentInitialization(*this, ObjectType, Conversion,
- ActingContext);
+ = TryObjectArgumentInitialization(*this, Object->getType(),
+ Object->Classify(Context),
+ Conversion, ActingContext);
if (ObjectInit.isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -3976,6 +4385,8 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
Candidate.Conversions[0].UserDefined.Before = ObjectInit.Standard;
Candidate.Conversions[0].UserDefined.EllipsisConversion = false;
Candidate.Conversions[0].UserDefined.ConversionFunction = Conversion;
+ Candidate.Conversions[0].UserDefined.FoundConversionFunction
+ = FoundDecl.getDecl();
Candidate.Conversions[0].UserDefined.After
= Candidate.Conversions[0].UserDefined.Before;
Candidate.Conversions[0].UserDefined.After.setAsIdentityConversion();
@@ -4071,7 +4482,8 @@ void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
Oper != OperEnd;
++Oper)
AddMethodCandidate(Oper.getPair(), Args[0]->getType(),
- Args + 1, NumArgs - 1, CandidateSet,
+ Args[0]->Classify(Context), Args + 1, NumArgs - 1,
+ CandidateSet,
/* SuppressUserConversions = */ false);
}
}
@@ -4107,6 +4519,7 @@ void Sema::AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
// arguments.
Candidate.Viable = true;
Candidate.Conversions.resize(NumArgs);
+ Candidate.ExplicitCallArguments = NumArgs;
for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
// C++ [over.match.oper]p4:
// For the built-in assignment operators, conversions of the
@@ -4159,10 +4572,17 @@ class BuiltinCandidateTypeSet {
/// used in the built-in candidates.
TypeSet EnumerationTypes;
- /// \brief The set of vector types that will be used in the built-in
+ /// \brief The set of vector types that will be used in the built-in
/// candidates.
TypeSet VectorTypes;
-
+
+ /// \brief A flag indicating non-record types are viable candidates
+ bool HasNonRecordTypes;
+
+ /// \brief A flag indicating whether either arithmetic or enumeration types
+ /// were present in the candidate set.
+ bool HasArithmeticOrEnumeralTypes;
+
/// Sema - The semantic analysis instance where we are building the
/// candidate type set.
Sema &SemaRef;
@@ -4179,9 +4599,12 @@ public:
typedef TypeSet::iterator iterator;
BuiltinCandidateTypeSet(Sema &SemaRef)
- : SemaRef(SemaRef), Context(SemaRef.Context) { }
+ : HasNonRecordTypes(false),
+ HasArithmeticOrEnumeralTypes(false),
+ SemaRef(SemaRef),
+ Context(SemaRef.Context) { }
- void AddTypesConvertedFrom(QualType Ty,
+ void AddTypesConvertedFrom(QualType Ty,
SourceLocation Loc,
bool AllowUserConversions,
bool AllowExplicitConversions,
@@ -4204,9 +4627,12 @@ public:
/// enumeration_end - Past the last enumeration type found;
iterator enumeration_end() { return EnumerationTypes.end(); }
-
+
iterator vector_begin() { return VectorTypes.begin(); }
iterator vector_end() { return VectorTypes.end(); }
+
+ bool hasNonRecordTypes() { return HasNonRecordTypes; }
+ bool hasArithmeticOrEnumeralTypes() { return HasArithmeticOrEnumeralTypes; }
};
/// AddPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty to
@@ -4225,7 +4651,7 @@ BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
// Insert this type.
if (!PointerTypes.insert(Ty))
return false;
-
+
QualType PointeeTy;
const PointerType *PointerTy = Ty->getAs<PointerType>();
bool buildObjCPtr = false;
@@ -4239,7 +4665,7 @@ BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
}
else
PointeeTy = PointerTy->getPointeeType();
-
+
// Don't add qualified variants of arrays. For one, they're not allowed
// (the qualifier would sink to the element type), and for another, the
// only overload situation where it matters is subscript or pointer +- int,
@@ -4251,7 +4677,7 @@ BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
BaseCVR = Array->getElementType().getCVRQualifiers();
bool hasVolatile = VisibleQuals.hasVolatile();
bool hasRestrict = VisibleQuals.hasRestrict();
-
+
// Iterate through all strict supersets of BaseCVR.
for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) {
if ((CVR | BaseCVR) != CVR) continue;
@@ -4302,9 +4728,10 @@ BuiltinCandidateTypeSet::AddMemberPointerWithMoreQualifiedTypeVariants(
unsigned BaseCVR = PointeeTy.getCVRQualifiers();
for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) {
if ((CVR | BaseCVR) != CVR) continue;
-
+
QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
- MemberPointerTypes.insert(Context.getMemberPointerType(QPointeeTy, ClassTy));
+ MemberPointerTypes.insert(
+ Context.getMemberPointerType(QPointeeTy, ClassTy));
}
return true;
@@ -4332,12 +4759,21 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
if (const ReferenceType *RefTy = Ty->getAs<ReferenceType>())
Ty = RefTy->getPointeeType();
- // We don't care about qualifiers on the type.
- Ty = Ty.getLocalUnqualifiedType();
-
// If we're dealing with an array type, decay to the pointer.
if (Ty->isArrayType())
Ty = SemaRef.Context.getArrayDecayedType(Ty);
+
+ // Otherwise, we don't care about qualifiers on the type.
+ Ty = Ty.getLocalUnqualifiedType();
+
+ // Flag if we ever add a non-record type.
+ const RecordType *TyRec = Ty->getAs<RecordType>();
+ HasNonRecordTypes = HasNonRecordTypes || !TyRec;
+
+ // Flag if we encounter an arithmetic type.
+ HasArithmeticOrEnumeralTypes =
+ HasArithmeticOrEnumeralTypes || Ty->isArithmeticType();
+
if (Ty->isObjCIdType() || Ty->isObjCClassType())
PointerTypes.insert(Ty);
else if (Ty->getAs<PointerType>() || Ty->getAs<ObjCObjectPointerType>()) {
@@ -4350,35 +4786,36 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
if (!AddMemberPointerWithMoreQualifiedTypeVariants(Ty))
return;
} else if (Ty->isEnumeralType()) {
+ HasArithmeticOrEnumeralTypes = true;
EnumerationTypes.insert(Ty);
} else if (Ty->isVectorType()) {
+ // We treat vector types as arithmetic types in many contexts as an
+ // extension.
+ HasArithmeticOrEnumeralTypes = true;
VectorTypes.insert(Ty);
- } else if (AllowUserConversions) {
- if (const RecordType *TyRec = Ty->getAs<RecordType>()) {
- if (SemaRef.RequireCompleteType(Loc, Ty, 0)) {
- // No conversion functions in incomplete types.
- return;
- }
+ } else if (AllowUserConversions && TyRec) {
+ // No conversion functions in incomplete types.
+ if (SemaRef.RequireCompleteType(Loc, Ty, 0))
+ return;
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
- const UnresolvedSetImpl *Conversions
- = ClassDecl->getVisibleConversionFunctions();
- for (UnresolvedSetImpl::iterator I = Conversions->begin(),
- E = Conversions->end(); I != E; ++I) {
- NamedDecl *D = I.getDecl();
- if (isa<UsingShadowDecl>(D))
- D = cast<UsingShadowDecl>(D)->getTargetDecl();
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
+ const UnresolvedSetImpl *Conversions
+ = ClassDecl->getVisibleConversionFunctions();
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end(); I != E; ++I) {
+ NamedDecl *D = I.getDecl();
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
- // Skip conversion function templates; they don't tell us anything
- // about which builtin types we can convert to.
- if (isa<FunctionTemplateDecl>(D))
- continue;
+ // Skip conversion function templates; they don't tell us anything
+ // about which builtin types we can convert to.
+ if (isa<FunctionTemplateDecl>(D))
+ continue;
- CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
- if (AllowExplicitConversions || !Conv->isExplicit()) {
- AddTypesConvertedFrom(Conv->getConversionType(), Loc, false, false,
- VisibleQuals);
- }
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
+ if (AllowExplicitConversions || !Conv->isExplicit()) {
+ AddTypesConvertedFrom(Conv->getConversionType(), Loc, false, false,
+ VisibleQuals);
}
}
}
@@ -4426,14 +4863,14 @@ static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
VRQuals.addRestrict();
return VRQuals;
}
-
+
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
if (!ClassDecl->hasDefinition())
return VRQuals;
const UnresolvedSetImpl *Conversions =
ClassDecl->getVisibleConversionFunctions();
-
+
for (UnresolvedSetImpl::iterator I = Conversions->begin(),
E = Conversions->end(); I != E; ++I) {
NamedDecl *D = I.getDecl();
@@ -4449,7 +4886,7 @@ static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
while (!done) {
if (const PointerType *ResTypePtr = CanTy->getAs<PointerType>())
CanTy = ResTypePtr->getPointeeType();
- else if (const MemberPointerType *ResTypeMPtr =
+ else if (const MemberPointerType *ResTypeMPtr =
CanTy->getAs<MemberPointerType>())
CanTy = ResTypeMPtr->getPointeeType();
else
@@ -4465,765 +4902,1174 @@ static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
}
return VRQuals;
}
-
-/// AddBuiltinOperatorCandidates - Add the appropriate built-in
-/// operator overloads to the candidate set (C++ [over.built]), based
-/// on the operator @p Op and the arguments given. For example, if the
-/// operator is a binary '+', this routine might add "int
-/// operator+(int, int)" to cover integer addition.
-void
-Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
- SourceLocation OpLoc,
- Expr **Args, unsigned NumArgs,
- OverloadCandidateSet& CandidateSet) {
- // The set of "promoted arithmetic types", which are the arithmetic
- // types are that preserved by promotion (C++ [over.built]p2). Note
- // that the first few of these types are the promoted integral
- // types; these types need to be first.
- // FIXME: What about complex?
- const unsigned FirstIntegralType = 0;
- const unsigned LastIntegralType = 13;
- const unsigned FirstPromotedIntegralType = 7,
- LastPromotedIntegralType = 13;
- const unsigned FirstPromotedArithmeticType = 7,
- LastPromotedArithmeticType = 16;
- const unsigned NumArithmeticTypes = 16;
- QualType ArithmeticTypes[NumArithmeticTypes] = {
- Context.BoolTy, Context.CharTy, Context.WCharTy,
-// FIXME: Context.Char16Ty, Context.Char32Ty,
- Context.SignedCharTy, Context.ShortTy,
- Context.UnsignedCharTy, Context.UnsignedShortTy,
- Context.IntTy, Context.LongTy, Context.LongLongTy,
- Context.UnsignedIntTy, Context.UnsignedLongTy, Context.UnsignedLongLongTy,
- Context.FloatTy, Context.DoubleTy, Context.LongDoubleTy
- };
- assert(ArithmeticTypes[FirstPromotedIntegralType] == Context.IntTy &&
- "Invalid first promoted integral type");
- assert(ArithmeticTypes[LastPromotedIntegralType - 1]
- == Context.UnsignedLongLongTy &&
- "Invalid last promoted integral type");
- assert(ArithmeticTypes[FirstPromotedArithmeticType] == Context.IntTy &&
- "Invalid first promoted arithmetic type");
- assert(ArithmeticTypes[LastPromotedArithmeticType - 1]
- == Context.LongDoubleTy &&
- "Invalid last promoted arithmetic type");
-
- // Find all of the types that the arguments can convert to, but only
- // if the operator we're looking at has built-in operator candidates
- // that make use of these types.
- Qualifiers VisibleTypeConversionsQuals;
- VisibleTypeConversionsQuals.addConst();
- for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
- VisibleTypeConversionsQuals += CollectVRQualifiers(Context, Args[ArgIdx]);
-
- BuiltinCandidateTypeSet CandidateTypes(*this);
- for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
- CandidateTypes.AddTypesConvertedFrom(Args[ArgIdx]->getType(),
- OpLoc,
- true,
- (Op == OO_Exclaim ||
- Op == OO_AmpAmp ||
- Op == OO_PipePipe),
- VisibleTypeConversionsQuals);
-
- bool isComparison = false;
- switch (Op) {
- case OO_None:
- case NUM_OVERLOADED_OPERATORS:
- assert(false && "Expected an overloaded operator");
- break;
- case OO_Star: // '*' is either unary or binary
- if (NumArgs == 1)
- goto UnaryStar;
- else
- goto BinaryStar;
- break;
+namespace {
- case OO_Plus: // '+' is either unary or binary
- if (NumArgs == 1)
- goto UnaryPlus;
- else
- goto BinaryPlus;
- break;
+/// \brief Helper class to manage the addition of builtin operator overload
+/// candidates. It provides shared state and utility methods used throughout
+/// the process, as well as a helper method to add each group of builtin
+/// operator overloads from the standard to a candidate set.
+class BuiltinOperatorOverloadBuilder {
+ // Common instance state available to all overload candidate addition methods.
+ Sema &S;
+ Expr **Args;
+ unsigned NumArgs;
+ Qualifiers VisibleTypeConversionsQuals;
+ bool HasArithmeticOrEnumeralCandidateType;
+ llvm::SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes;
+ OverloadCandidateSet &CandidateSet;
+
+ // Define some constants used to index and iterate over the arithemetic types
+ // provided via the getArithmeticType() method below.
+ // The "promoted arithmetic types" are the arithmetic
+ // types are that preserved by promotion (C++ [over.built]p2).
+ static const unsigned FirstIntegralType = 3;
+ static const unsigned LastIntegralType = 18;
+ static const unsigned FirstPromotedIntegralType = 3,
+ LastPromotedIntegralType = 9;
+ static const unsigned FirstPromotedArithmeticType = 0,
+ LastPromotedArithmeticType = 9;
+ static const unsigned NumArithmeticTypes = 18;
+
+ /// \brief Get the canonical type for a given arithmetic type index.
+ CanQualType getArithmeticType(unsigned index) {
+ assert(index < NumArithmeticTypes);
+ static CanQualType ASTContext::* const
+ ArithmeticTypes[NumArithmeticTypes] = {
+ // Start of promoted types.
+ &ASTContext::FloatTy,
+ &ASTContext::DoubleTy,
+ &ASTContext::LongDoubleTy,
+
+ // Start of integral types.
+ &ASTContext::IntTy,
+ &ASTContext::LongTy,
+ &ASTContext::LongLongTy,
+ &ASTContext::UnsignedIntTy,
+ &ASTContext::UnsignedLongTy,
+ &ASTContext::UnsignedLongLongTy,
+ // End of promoted types.
+
+ &ASTContext::BoolTy,
+ &ASTContext::CharTy,
+ &ASTContext::WCharTy,
+ &ASTContext::Char16Ty,
+ &ASTContext::Char32Ty,
+ &ASTContext::SignedCharTy,
+ &ASTContext::ShortTy,
+ &ASTContext::UnsignedCharTy,
+ &ASTContext::UnsignedShortTy,
+ // End of integral types.
+ // FIXME: What about complex?
+ };
+ return S.Context.*ArithmeticTypes[index];
+ }
+
+ /// \brief Gets the canonical type resulting from the usual arithemetic
+ /// converions for the given arithmetic types.
+ CanQualType getUsualArithmeticConversions(unsigned L, unsigned R) {
+ // Accelerator table for performing the usual arithmetic conversions.
+ // The rules are basically:
+ // - if either is floating-point, use the wider floating-point
+ // - if same signedness, use the higher rank
+ // - if same size, use unsigned of the higher rank
+ // - use the larger type
+ // These rules, together with the axiom that higher ranks are
+ // never smaller, are sufficient to precompute all of these results
+ // *except* when dealing with signed types of higher rank.
+ // (we could precompute SLL x UI for all known platforms, but it's
+ // better not to make any assumptions).
+ enum PromotedType {
+ Flt, Dbl, LDbl, SI, SL, SLL, UI, UL, ULL, Dep=-1
+ };
+ static PromotedType ConversionsTable[LastPromotedArithmeticType]
+ [LastPromotedArithmeticType] = {
+ /* Flt*/ { Flt, Dbl, LDbl, Flt, Flt, Flt, Flt, Flt, Flt },
+ /* Dbl*/ { Dbl, Dbl, LDbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl },
+ /*LDbl*/ { LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl },
+ /* SI*/ { Flt, Dbl, LDbl, SI, SL, SLL, UI, UL, ULL },
+ /* SL*/ { Flt, Dbl, LDbl, SL, SL, SLL, Dep, UL, ULL },
+ /* SLL*/ { Flt, Dbl, LDbl, SLL, SLL, SLL, Dep, Dep, ULL },
+ /* UI*/ { Flt, Dbl, LDbl, UI, Dep, Dep, UI, UL, ULL },
+ /* UL*/ { Flt, Dbl, LDbl, UL, UL, Dep, UL, UL, ULL },
+ /* ULL*/ { Flt, Dbl, LDbl, ULL, ULL, ULL, ULL, ULL, ULL },
+ };
- case OO_Minus: // '-' is either unary or binary
- if (NumArgs == 1)
- goto UnaryMinus;
- else
- goto BinaryMinus;
- break;
+ assert(L < LastPromotedArithmeticType);
+ assert(R < LastPromotedArithmeticType);
+ int Idx = ConversionsTable[L][R];
+
+ // Fast path: the table gives us a concrete answer.
+ if (Idx != Dep) return getArithmeticType(Idx);
+
+ // Slow path: we need to compare widths.
+ // An invariant is that the signed type has higher rank.
+ CanQualType LT = getArithmeticType(L),
+ RT = getArithmeticType(R);
+ unsigned LW = S.Context.getIntWidth(LT),
+ RW = S.Context.getIntWidth(RT);
+
+ // If they're different widths, use the signed type.
+ if (LW > RW) return LT;
+ else if (LW < RW) return RT;
+
+ // Otherwise, use the unsigned type of the signed type's rank.
+ if (L == SL || R == SL) return S.Context.UnsignedLongTy;
+ assert(L == SLL || R == SLL);
+ return S.Context.UnsignedLongLongTy;
+ }
+
+ /// \brief Helper method to factor out the common pattern of adding overloads
+ /// for '++' and '--' builtin operators.
+ void addPlusPlusMinusMinusStyleOverloads(QualType CandidateTy,
+ bool HasVolatile) {
+ QualType ParamTypes[2] = {
+ S.Context.getLValueReferenceType(CandidateTy),
+ S.Context.IntTy
+ };
- case OO_Amp: // '&' is either unary or binary
+ // Non-volatile version.
if (NumArgs == 1)
- goto UnaryAmp;
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
else
- goto BinaryAmp;
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, 2, CandidateSet);
+
+ // Use a heuristic to reduce number of builtin candidates in the set:
+ // add volatile version only if there are conversions to a volatile type.
+ if (HasVolatile) {
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(
+ S.Context.getVolatileType(CandidateTy));
+ if (NumArgs == 1)
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
+ else
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+
+public:
+ BuiltinOperatorOverloadBuilder(
+ Sema &S, Expr **Args, unsigned NumArgs,
+ Qualifiers VisibleTypeConversionsQuals,
+ bool HasArithmeticOrEnumeralCandidateType,
+ llvm::SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes,
+ OverloadCandidateSet &CandidateSet)
+ : S(S), Args(Args), NumArgs(NumArgs),
+ VisibleTypeConversionsQuals(VisibleTypeConversionsQuals),
+ HasArithmeticOrEnumeralCandidateType(
+ HasArithmeticOrEnumeralCandidateType),
+ CandidateTypes(CandidateTypes),
+ CandidateSet(CandidateSet) {
+ // Validate some of our static helper constants in debug builds.
+ assert(getArithmeticType(FirstPromotedIntegralType) == S.Context.IntTy &&
+ "Invalid first promoted integral type");
+ assert(getArithmeticType(LastPromotedIntegralType - 1)
+ == S.Context.UnsignedLongLongTy &&
+ "Invalid last promoted integral type");
+ assert(getArithmeticType(FirstPromotedArithmeticType)
+ == S.Context.FloatTy &&
+ "Invalid first promoted arithmetic type");
+ assert(getArithmeticType(LastPromotedArithmeticType - 1)
+ == S.Context.UnsignedLongLongTy &&
+ "Invalid last promoted arithmetic type");
+ }
+
+ // C++ [over.built]p3:
+ //
+ // For every pair (T, VQ), where T is an arithmetic type, and VQ
+ // is either volatile or empty, there exist candidate operator
+ // functions of the form
+ //
+ // VQ T& operator++(VQ T&);
+ // T operator++(VQ T&, int);
+ //
+ // C++ [over.built]p4:
+ //
+ // For every pair (T, VQ), where T is an arithmetic type other
+ // than bool, and VQ is either volatile or empty, there exist
+ // candidate operator functions of the form
+ //
+ // VQ T& operator--(VQ T&);
+ // T operator--(VQ T&, int);
+ void addPlusPlusMinusMinusArithmeticOverloads(OverloadedOperatorKind Op) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
- case OO_PlusPlus:
- case OO_MinusMinus:
- // C++ [over.built]p3:
- //
- // For every pair (T, VQ), where T is an arithmetic type, and VQ
- // is either volatile or empty, there exist candidate operator
- // functions of the form
- //
- // VQ T& operator++(VQ T&);
- // T operator++(VQ T&, int);
- //
- // C++ [over.built]p4:
- //
- // For every pair (T, VQ), where T is an arithmetic type other
- // than bool, and VQ is either volatile or empty, there exist
- // candidate operator functions of the form
- //
- // VQ T& operator--(VQ T&);
- // T operator--(VQ T&, int);
for (unsigned Arith = (Op == OO_PlusPlus? 0 : 1);
Arith < NumArithmeticTypes; ++Arith) {
- QualType ArithTy = ArithmeticTypes[Arith];
- QualType ParamTypes[2]
- = { Context.getLValueReferenceType(ArithTy), Context.IntTy };
-
- // Non-volatile version.
- if (NumArgs == 1)
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
- else
- AddBuiltinCandidate(ArithTy, ParamTypes, Args, 2, CandidateSet);
- // heuristic to reduce number of builtin candidates in the set.
- // Add volatile version only if there are conversions to a volatile type.
- if (VisibleTypeConversionsQuals.hasVolatile()) {
- // Volatile version
- ParamTypes[0]
- = Context.getLValueReferenceType(Context.getVolatileType(ArithTy));
- if (NumArgs == 1)
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
- else
- AddBuiltinCandidate(ArithTy, ParamTypes, Args, 2, CandidateSet);
- }
+ addPlusPlusMinusMinusStyleOverloads(
+ getArithmeticType(Arith),
+ VisibleTypeConversionsQuals.hasVolatile());
}
+ }
- // C++ [over.built]p5:
- //
- // For every pair (T, VQ), where T is a cv-qualified or
- // cv-unqualified object type, and VQ is either volatile or
- // empty, there exist candidate operator functions of the form
- //
- // T*VQ& operator++(T*VQ&);
- // T*VQ& operator--(T*VQ&);
- // T* operator++(T*VQ&, int);
- // T* operator--(T*VQ&, int);
- for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
- Ptr != CandidateTypes.pointer_end(); ++Ptr) {
+ // C++ [over.built]p5:
+ //
+ // For every pair (T, VQ), where T is a cv-qualified or
+ // cv-unqualified object type, and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // T*VQ& operator++(T*VQ&);
+ // T*VQ& operator--(T*VQ&);
+ // T* operator++(T*VQ&, int);
+ // T* operator--(T*VQ&, int);
+ void addPlusPlusMinusMinusPointerOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
// Skip pointer types that aren't pointers to object types.
- if (!(*Ptr)->getPointeeType()->isIncompleteOrObjectType())
+ if (!(*Ptr)->getPointeeType()->isObjectType())
continue;
- QualType ParamTypes[2] = {
- Context.getLValueReferenceType(*Ptr), Context.IntTy
- };
-
- // Without volatile
- if (NumArgs == 1)
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
- else
- AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
-
- if (!Context.getCanonicalType(*Ptr).isVolatileQualified() &&
- VisibleTypeConversionsQuals.hasVolatile()) {
- // With volatile
- ParamTypes[0]
- = Context.getLValueReferenceType(Context.getVolatileType(*Ptr));
- if (NumArgs == 1)
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
- else
- AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
- }
+ addPlusPlusMinusMinusStyleOverloads(*Ptr,
+ (!S.Context.getCanonicalType(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile()));
}
- break;
+ }
- UnaryStar:
- // C++ [over.built]p6:
- // For every cv-qualified or cv-unqualified object type T, there
- // exist candidate operator functions of the form
- //
- // T& operator*(T*);
- //
- // C++ [over.built]p7:
- // For every function type T, there exist candidate operator
- // functions of the form
- // T& operator*(T*);
- for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
- Ptr != CandidateTypes.pointer_end(); ++Ptr) {
+ // C++ [over.built]p6:
+ // For every cv-qualified or cv-unqualified object type T, there
+ // exist candidate operator functions of the form
+ //
+ // T& operator*(T*);
+ //
+ // C++ [over.built]p7:
+ // For every function type T that does not have cv-qualifiers or a
+ // ref-qualifier, there exist candidate operator functions of the form
+ // T& operator*(T*);
+ void addUnaryStarPointerOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
QualType ParamTy = *Ptr;
QualType PointeeTy = ParamTy->getPointeeType();
- AddBuiltinCandidate(Context.getLValueReferenceType(PointeeTy),
- &ParamTy, Args, 1, CandidateSet);
- }
- break;
+ if (!PointeeTy->isObjectType() && !PointeeTy->isFunctionType())
+ continue;
- UnaryPlus:
- // C++ [over.built]p8:
- // For every type T, there exist candidate operator functions of
- // the form
- //
- // T* operator+(T*);
- for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
- Ptr != CandidateTypes.pointer_end(); ++Ptr) {
- QualType ParamTy = *Ptr;
- AddBuiltinCandidate(ParamTy, &ParamTy, Args, 1, CandidateSet);
+ if (const FunctionProtoType *Proto =PointeeTy->getAs<FunctionProtoType>())
+ if (Proto->getTypeQuals() || Proto->getRefQualifier())
+ continue;
+
+ S.AddBuiltinCandidate(S.Context.getLValueReferenceType(PointeeTy),
+ &ParamTy, Args, 1, CandidateSet);
}
+ }
- // Fall through
+ // C++ [over.built]p9:
+ // For every promoted arithmetic type T, there exist candidate
+ // operator functions of the form
+ //
+ // T operator+(T);
+ // T operator-(T);
+ void addUnaryPlusOrMinusArithmeticOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
- UnaryMinus:
- // C++ [over.built]p9:
- // For every promoted arithmetic type T, there exist candidate
- // operator functions of the form
- //
- // T operator+(T);
- // T operator-(T);
for (unsigned Arith = FirstPromotedArithmeticType;
Arith < LastPromotedArithmeticType; ++Arith) {
- QualType ArithTy = ArithmeticTypes[Arith];
- AddBuiltinCandidate(ArithTy, &ArithTy, Args, 1, CandidateSet);
+ QualType ArithTy = getArithmeticType(Arith);
+ S.AddBuiltinCandidate(ArithTy, &ArithTy, Args, 1, CandidateSet);
}
-
+
// Extension: We also add these operators for vector types.
- for (BuiltinCandidateTypeSet::iterator Vec = CandidateTypes.vector_begin(),
- VecEnd = CandidateTypes.vector_end();
+ for (BuiltinCandidateTypeSet::iterator
+ Vec = CandidateTypes[0].vector_begin(),
+ VecEnd = CandidateTypes[0].vector_end();
Vec != VecEnd; ++Vec) {
QualType VecTy = *Vec;
- AddBuiltinCandidate(VecTy, &VecTy, Args, 1, CandidateSet);
+ S.AddBuiltinCandidate(VecTy, &VecTy, Args, 1, CandidateSet);
}
- break;
+ }
+
+ // C++ [over.built]p8:
+ // For every type T, there exist candidate operator functions of
+ // the form
+ //
+ // T* operator+(T*);
+ void addUnaryPlusPointerOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTy = *Ptr;
+ S.AddBuiltinCandidate(ParamTy, &ParamTy, Args, 1, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p10:
+ // For every promoted integral type T, there exist candidate
+ // operator functions of the form
+ //
+ // T operator~(T);
+ void addUnaryTildePromotedIntegralOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
- case OO_Tilde:
- // C++ [over.built]p10:
- // For every promoted integral type T, there exist candidate
- // operator functions of the form
- //
- // T operator~(T);
for (unsigned Int = FirstPromotedIntegralType;
Int < LastPromotedIntegralType; ++Int) {
- QualType IntTy = ArithmeticTypes[Int];
- AddBuiltinCandidate(IntTy, &IntTy, Args, 1, CandidateSet);
+ QualType IntTy = getArithmeticType(Int);
+ S.AddBuiltinCandidate(IntTy, &IntTy, Args, 1, CandidateSet);
}
-
+
// Extension: We also add this operator for vector types.
- for (BuiltinCandidateTypeSet::iterator Vec = CandidateTypes.vector_begin(),
- VecEnd = CandidateTypes.vector_end();
+ for (BuiltinCandidateTypeSet::iterator
+ Vec = CandidateTypes[0].vector_begin(),
+ VecEnd = CandidateTypes[0].vector_end();
Vec != VecEnd; ++Vec) {
QualType VecTy = *Vec;
- AddBuiltinCandidate(VecTy, &VecTy, Args, 1, CandidateSet);
- }
- break;
+ S.AddBuiltinCandidate(VecTy, &VecTy, Args, 1, CandidateSet);
+ }
+ }
- case OO_New:
- case OO_Delete:
- case OO_Array_New:
- case OO_Array_Delete:
- case OO_Call:
- assert(false && "Special operators don't use AddBuiltinOperatorCandidates");
- break;
+ // C++ [over.match.oper]p16:
+ // For every pointer to member type T, there exist candidate operator
+ // functions of the form
+ //
+ // bool operator==(T,T);
+ // bool operator!=(T,T);
+ void addEqualEqualOrNotEqualMemberPointerOverloads() {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
+ MemPtr != MemPtrEnd;
+ ++MemPtr) {
+ // Don't add the same builtin candidate twice.
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)))
+ continue;
- case OO_Comma:
- UnaryAmp:
- case OO_Arrow:
- // C++ [over.match.oper]p3:
- // -- For the operator ',', the unary operator '&', or the
- // operator '->', the built-in candidates set is empty.
- break;
+ QualType ParamTypes[2] = { *MemPtr, *MemPtr };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, 2,
+ CandidateSet);
+ }
+ }
+ }
- case OO_EqualEqual:
- case OO_ExclaimEqual:
- // C++ [over.match.oper]p16:
- // For every pointer to member type T, there exist candidate operator
- // functions of the form
+ // C++ [over.built]p15:
+ //
+ // For every pointer or enumeration type T, there exist
+ // candidate operator functions of the form
+ //
+ // bool operator<(T, T);
+ // bool operator>(T, T);
+ // bool operator<=(T, T);
+ // bool operator>=(T, T);
+ // bool operator==(T, T);
+ // bool operator!=(T, T);
+ void addRelationalPointerOrEnumeralOverloads() {
+ // C++ [over.built]p1:
+ // If there is a user-written candidate with the same name and parameter
+ // types as a built-in candidate operator function, the built-in operator
+ // function is hidden and is not included in the set of candidate
+ // functions.
//
- // bool operator==(T,T);
- // bool operator!=(T,T);
- for (BuiltinCandidateTypeSet::iterator
- MemPtr = CandidateTypes.member_pointer_begin(),
- MemPtrEnd = CandidateTypes.member_pointer_end();
- MemPtr != MemPtrEnd;
- ++MemPtr) {
- QualType ParamTypes[2] = { *MemPtr, *MemPtr };
- AddBuiltinCandidate(Context.BoolTy, ParamTypes, Args, 2, CandidateSet);
- }
+ // The text is actually in a note, but if we don't implement it then we end
+ // up with ambiguities when the user provides an overloaded operator for
+ // an enumeration type. Note that only enumeration types have this problem,
+ // so we track which enumeration types we've seen operators for. Also, the
+ // only other overloaded operator with enumeration argumenst, operator=,
+ // cannot be overloaded for enumeration types, so this is the only place
+ // where we must suppress candidates like this.
+ llvm::DenseSet<std::pair<CanQualType, CanQualType> >
+ UserDefinedBinaryOperators;
+
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ if (CandidateTypes[ArgIdx].enumeration_begin() !=
+ CandidateTypes[ArgIdx].enumeration_end()) {
+ for (OverloadCandidateSet::iterator C = CandidateSet.begin(),
+ CEnd = CandidateSet.end();
+ C != CEnd; ++C) {
+ if (!C->Viable || !C->Function || C->Function->getNumParams() != 2)
+ continue;
- // Fall through
+ QualType FirstParamType =
+ C->Function->getParamDecl(0)->getType().getUnqualifiedType();
+ QualType SecondParamType =
+ C->Function->getParamDecl(1)->getType().getUnqualifiedType();
- case OO_Less:
- case OO_Greater:
- case OO_LessEqual:
- case OO_GreaterEqual:
- // C++ [over.built]p15:
- //
- // For every pointer or enumeration type T, there exist
- // candidate operator functions of the form
- //
- // bool operator<(T, T);
- // bool operator>(T, T);
- // bool operator<=(T, T);
- // bool operator>=(T, T);
- // bool operator==(T, T);
- // bool operator!=(T, T);
- for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
- Ptr != CandidateTypes.pointer_end(); ++Ptr) {
- QualType ParamTypes[2] = { *Ptr, *Ptr };
- AddBuiltinCandidate(Context.BoolTy, ParamTypes, Args, 2, CandidateSet);
- }
- for (BuiltinCandidateTypeSet::iterator Enum
- = CandidateTypes.enumeration_begin();
- Enum != CandidateTypes.enumeration_end(); ++Enum) {
- QualType ParamTypes[2] = { *Enum, *Enum };
- AddBuiltinCandidate(Context.BoolTy, ParamTypes, Args, 2, CandidateSet);
+ // Skip if either parameter isn't of enumeral type.
+ if (!FirstParamType->isEnumeralType() ||
+ !SecondParamType->isEnumeralType())
+ continue;
+
+ // Add this operator to the set of known user-defined operators.
+ UserDefinedBinaryOperators.insert(
+ std::make_pair(S.Context.getCanonicalType(FirstParamType),
+ S.Context.getCanonicalType(SecondParamType)));
+ }
+ }
}
- // Fall through.
- isComparison = true;
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
- BinaryPlus:
- BinaryMinus:
- if (!isComparison) {
- // We didn't fall through, so we must have OO_Plus or OO_Minus.
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[ArgIdx].pointer_begin(),
+ PtrEnd = CandidateTypes[ArgIdx].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // Don't add the same builtin candidate twice.
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ continue;
- // C++ [over.built]p13:
- //
- // For every cv-qualified or cv-unqualified object type T
- // there exist candidate operator functions of the form
- //
- // T* operator+(T*, ptrdiff_t);
- // T& operator[](T*, ptrdiff_t); [BELOW]
- // T* operator-(T*, ptrdiff_t);
- // T* operator+(ptrdiff_t, T*);
- // T& operator[](ptrdiff_t, T*); [BELOW]
- //
- // C++ [over.built]p14:
- //
- // For every T, where T is a pointer to object type, there
- // exist candidate operator functions of the form
- //
- // ptrdiff_t operator-(T, T);
- for (BuiltinCandidateTypeSet::iterator Ptr
- = CandidateTypes.pointer_begin();
- Ptr != CandidateTypes.pointer_end(); ++Ptr) {
- QualType ParamTypes[2] = { *Ptr, Context.getPointerDiffType() };
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, 2,
+ CandidateSet);
+ }
+ for (BuiltinCandidateTypeSet::iterator
+ Enum = CandidateTypes[ArgIdx].enumeration_begin(),
+ EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
+ Enum != EnumEnd; ++Enum) {
+ CanQualType CanonType = S.Context.getCanonicalType(*Enum);
+
+ // Don't add the same builtin candidate twice, or if a user defined
+ // candidate exists.
+ if (!AddedTypes.insert(CanonType) ||
+ UserDefinedBinaryOperators.count(std::make_pair(CanonType,
+ CanonType)))
+ continue;
- // operator+(T*, ptrdiff_t) or operator-(T*, ptrdiff_t)
- AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+ QualType ParamTypes[2] = { *Enum, *Enum };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, 2,
+ CandidateSet);
+ }
+ }
+ }
- if (Op == OO_Plus) {
+ // C++ [over.built]p13:
+ //
+ // For every cv-qualified or cv-unqualified object type T
+ // there exist candidate operator functions of the form
+ //
+ // T* operator+(T*, ptrdiff_t);
+ // T& operator[](T*, ptrdiff_t); [BELOW]
+ // T* operator-(T*, ptrdiff_t);
+ // T* operator+(ptrdiff_t, T*);
+ // T& operator[](ptrdiff_t, T*); [BELOW]
+ //
+ // C++ [over.built]p14:
+ //
+ // For every T, where T is a pointer to object type, there
+ // exist candidate operator functions of the form
+ //
+ // ptrdiff_t operator-(T, T);
+ void addBinaryPlusOrMinusPointerOverloads(OverloadedOperatorKind Op) {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (int Arg = 0; Arg < 2; ++Arg) {
+ QualType AsymetricParamTypes[2] = {
+ S.Context.getPointerDiffType(),
+ S.Context.getPointerDiffType(),
+ };
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[Arg].pointer_begin(),
+ PtrEnd = CandidateTypes[Arg].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType PointeeTy = (*Ptr)->getPointeeType();
+ if (!PointeeTy->isObjectType())
+ continue;
+
+ AsymetricParamTypes[Arg] = *Ptr;
+ if (Arg == 0 || Op == OO_Plus) {
+ // operator+(T*, ptrdiff_t) or operator-(T*, ptrdiff_t)
// T* operator+(ptrdiff_t, T*);
- ParamTypes[0] = ParamTypes[1];
- ParamTypes[1] = *Ptr;
- AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
- } else {
+ S.AddBuiltinCandidate(*Ptr, AsymetricParamTypes, Args, 2,
+ CandidateSet);
+ }
+ if (Op == OO_Minus) {
// ptrdiff_t operator-(T, T);
- ParamTypes[1] = *Ptr;
- AddBuiltinCandidate(Context.getPointerDiffType(), ParamTypes,
- Args, 2, CandidateSet);
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ continue;
+
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ S.AddBuiltinCandidate(S.Context.getPointerDiffType(), ParamTypes,
+ Args, 2, CandidateSet);
}
}
}
- // Fall through
+ }
+
+ // C++ [over.built]p12:
+ //
+ // For every pair of promoted arithmetic types L and R, there
+ // exist candidate operator functions of the form
+ //
+ // LR operator*(L, R);
+ // LR operator/(L, R);
+ // LR operator+(L, R);
+ // LR operator-(L, R);
+ // bool operator<(L, R);
+ // bool operator>(L, R);
+ // bool operator<=(L, R);
+ // bool operator>=(L, R);
+ // bool operator==(L, R);
+ // bool operator!=(L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ //
+ // C++ [over.built]p24:
+ //
+ // For every pair of promoted arithmetic types L and R, there exist
+ // candidate operator functions of the form
+ //
+ // LR operator?(bool, L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ // Our candidates ignore the first parameter.
+ void addGenericBinaryArithmeticOverloads(bool isComparison) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
- case OO_Slash:
- BinaryStar:
- Conditional:
- // C++ [over.built]p12:
- //
- // For every pair of promoted arithmetic types L and R, there
- // exist candidate operator functions of the form
- //
- // LR operator*(L, R);
- // LR operator/(L, R);
- // LR operator+(L, R);
- // LR operator-(L, R);
- // bool operator<(L, R);
- // bool operator>(L, R);
- // bool operator<=(L, R);
- // bool operator>=(L, R);
- // bool operator==(L, R);
- // bool operator!=(L, R);
- //
- // where LR is the result of the usual arithmetic conversions
- // between types L and R.
- //
- // C++ [over.built]p24:
- //
- // For every pair of promoted arithmetic types L and R, there exist
- // candidate operator functions of the form
- //
- // LR operator?(bool, L, R);
- //
- // where LR is the result of the usual arithmetic conversions
- // between types L and R.
- // Our candidates ignore the first parameter.
for (unsigned Left = FirstPromotedArithmeticType;
Left < LastPromotedArithmeticType; ++Left) {
for (unsigned Right = FirstPromotedArithmeticType;
Right < LastPromotedArithmeticType; ++Right) {
- QualType LandR[2] = { ArithmeticTypes[Left], ArithmeticTypes[Right] };
- QualType Result
- = isComparison
- ? Context.BoolTy
- : Context.UsualArithmeticConversionsType(LandR[0], LandR[1]);
- AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
+ QualType LandR[2] = { getArithmeticType(Left),
+ getArithmeticType(Right) };
+ QualType Result =
+ isComparison ? S.Context.BoolTy
+ : getUsualArithmeticConversions(Left, Right);
+ S.AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
}
}
// Extension: Add the binary operators ==, !=, <, <=, >=, >, *, /, and the
// conditional operator for vector types.
- for (BuiltinCandidateTypeSet::iterator Vec1 = CandidateTypes.vector_begin(),
- Vec1End = CandidateTypes.vector_end();
- Vec1 != Vec1End; ++Vec1)
- for (BuiltinCandidateTypeSet::iterator
- Vec2 = CandidateTypes.vector_begin(),
- Vec2End = CandidateTypes.vector_end();
+ for (BuiltinCandidateTypeSet::iterator
+ Vec1 = CandidateTypes[0].vector_begin(),
+ Vec1End = CandidateTypes[0].vector_end();
+ Vec1 != Vec1End; ++Vec1) {
+ for (BuiltinCandidateTypeSet::iterator
+ Vec2 = CandidateTypes[1].vector_begin(),
+ Vec2End = CandidateTypes[1].vector_end();
Vec2 != Vec2End; ++Vec2) {
QualType LandR[2] = { *Vec1, *Vec2 };
- QualType Result;
- if (isComparison)
- Result = Context.BoolTy;
- else {
+ QualType Result = S.Context.BoolTy;
+ if (!isComparison) {
if ((*Vec1)->isExtVectorType() || !(*Vec2)->isExtVectorType())
Result = *Vec1;
else
Result = *Vec2;
}
-
- AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
+
+ S.AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
}
-
- break;
+ }
+ }
+
+ // C++ [over.built]p17:
+ //
+ // For every pair of promoted integral types L and R, there
+ // exist candidate operator functions of the form
+ //
+ // LR operator%(L, R);
+ // LR operator&(L, R);
+ // LR operator^(L, R);
+ // LR operator|(L, R);
+ // L operator<<(L, R);
+ // L operator>>(L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ void addBinaryBitwiseArithmeticOverloads(OverloadedOperatorKind Op) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
- case OO_Percent:
- BinaryAmp:
- case OO_Caret:
- case OO_Pipe:
- case OO_LessLess:
- case OO_GreaterGreater:
- // C++ [over.built]p17:
- //
- // For every pair of promoted integral types L and R, there
- // exist candidate operator functions of the form
- //
- // LR operator%(L, R);
- // LR operator&(L, R);
- // LR operator^(L, R);
- // LR operator|(L, R);
- // L operator<<(L, R);
- // L operator>>(L, R);
- //
- // where LR is the result of the usual arithmetic conversions
- // between types L and R.
for (unsigned Left = FirstPromotedIntegralType;
Left < LastPromotedIntegralType; ++Left) {
for (unsigned Right = FirstPromotedIntegralType;
Right < LastPromotedIntegralType; ++Right) {
- QualType LandR[2] = { ArithmeticTypes[Left], ArithmeticTypes[Right] };
+ QualType LandR[2] = { getArithmeticType(Left),
+ getArithmeticType(Right) };
QualType Result = (Op == OO_LessLess || Op == OO_GreaterGreater)
? LandR[0]
- : Context.UsualArithmeticConversionsType(LandR[0], LandR[1]);
- AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
+ : getUsualArithmeticConversions(Left, Right);
+ S.AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
}
}
- break;
+ }
- case OO_Equal:
- // C++ [over.built]p20:
- //
- // For every pair (T, VQ), where T is an enumeration or
- // pointer to member type and VQ is either volatile or
- // empty, there exist candidate operator functions of the form
- //
- // VQ T& operator=(VQ T&, T);
- for (BuiltinCandidateTypeSet::iterator
- Enum = CandidateTypes.enumeration_begin(),
- EnumEnd = CandidateTypes.enumeration_end();
- Enum != EnumEnd; ++Enum)
- AddBuiltinAssignmentOperatorCandidates(*this, *Enum, Args, 2,
- CandidateSet);
- for (BuiltinCandidateTypeSet::iterator
- MemPtr = CandidateTypes.member_pointer_begin(),
- MemPtrEnd = CandidateTypes.member_pointer_end();
- MemPtr != MemPtrEnd; ++MemPtr)
- AddBuiltinAssignmentOperatorCandidates(*this, *MemPtr, Args, 2,
- CandidateSet);
-
- // Fall through.
+ // C++ [over.built]p20:
+ //
+ // For every pair (T, VQ), where T is an enumeration or
+ // pointer to member type and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // VQ T& operator=(VQ T&, T);
+ void addAssignmentMemberPointerOrEnumeralOverloads() {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ Enum = CandidateTypes[ArgIdx].enumeration_begin(),
+ EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
+ Enum != EnumEnd; ++Enum) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)))
+ continue;
- case OO_PlusEqual:
- case OO_MinusEqual:
- // C++ [over.built]p19:
- //
- // For every pair (T, VQ), where T is any type and VQ is either
- // volatile or empty, there exist candidate operator functions
- // of the form
- //
- // T*VQ& operator=(T*VQ&, T*);
- //
- // C++ [over.built]p21:
- //
- // For every pair (T, VQ), where T is a cv-qualified or
- // cv-unqualified object type and VQ is either volatile or
- // empty, there exist candidate operator functions of the form
- //
- // T*VQ& operator+=(T*VQ&, ptrdiff_t);
- // T*VQ& operator-=(T*VQ&, ptrdiff_t);
- for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
- Ptr != CandidateTypes.pointer_end(); ++Ptr) {
- QualType ParamTypes[2];
- ParamTypes[1] = (Op == OO_Equal)? *Ptr : Context.getPointerDiffType();
+ AddBuiltinAssignmentOperatorCandidates(S, *Enum, Args, 2,
+ CandidateSet);
+ }
+
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
+ MemPtr != MemPtrEnd; ++MemPtr) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)))
+ continue;
+
+ AddBuiltinAssignmentOperatorCandidates(S, *MemPtr, Args, 2,
+ CandidateSet);
+ }
+ }
+ }
+
+ // C++ [over.built]p19:
+ //
+ // For every pair (T, VQ), where T is any type and VQ is either
+ // volatile or empty, there exist candidate operator functions
+ // of the form
+ //
+ // T*VQ& operator=(T*VQ&, T*);
+ //
+ // C++ [over.built]p21:
+ //
+ // For every pair (T, VQ), where T is a cv-qualified or
+ // cv-unqualified object type and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // T*VQ& operator+=(T*VQ&, ptrdiff_t);
+ // T*VQ& operator-=(T*VQ&, ptrdiff_t);
+ void addAssignmentPointerOverloads(bool isEqualOp) {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // If this is operator=, keep track of the builtin candidates we added.
+ if (isEqualOp)
+ AddedTypes.insert(S.Context.getCanonicalType(*Ptr));
+ else if (!(*Ptr)->getPointeeType()->isObjectType())
+ continue;
// non-volatile version
- ParamTypes[0] = Context.getLValueReferenceType(*Ptr);
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
- /*IsAssigmentOperator=*/Op == OO_Equal);
+ QualType ParamTypes[2] = {
+ S.Context.getLValueReferenceType(*Ptr),
+ isEqualOp ? *Ptr : S.Context.getPointerDiffType(),
+ };
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/ isEqualOp);
- if (!Context.getCanonicalType(*Ptr).isVolatileQualified() &&
+ if (!S.Context.getCanonicalType(*Ptr).isVolatileQualified() &&
VisibleTypeConversionsQuals.hasVolatile()) {
// volatile version
- ParamTypes[0]
- = Context.getLValueReferenceType(Context.getVolatileType(*Ptr));
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
- /*IsAssigmentOperator=*/Op == OO_Equal);
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
}
}
- // Fall through.
- case OO_StarEqual:
- case OO_SlashEqual:
- // C++ [over.built]p18:
- //
- // For every triple (L, VQ, R), where L is an arithmetic type,
- // VQ is either volatile or empty, and R is a promoted
- // arithmetic type, there exist candidate operator functions of
- // the form
- //
- // VQ L& operator=(VQ L&, R);
- // VQ L& operator*=(VQ L&, R);
- // VQ L& operator/=(VQ L&, R);
- // VQ L& operator+=(VQ L&, R);
- // VQ L& operator-=(VQ L&, R);
+ if (isEqualOp) {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[1].pointer_begin(),
+ PtrEnd = CandidateTypes[1].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // Make sure we don't add the same candidate twice.
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ continue;
+
+ QualType ParamTypes[2] = {
+ S.Context.getLValueReferenceType(*Ptr),
+ *Ptr,
+ };
+
+ // non-volatile version
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/true);
+
+ if (!S.Context.getCanonicalType(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile()) {
+ // volatile version
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet, /*IsAssigmentOperator=*/true);
+ }
+ }
+ }
+ }
+
+ // C++ [over.built]p18:
+ //
+ // For every triple (L, VQ, R), where L is an arithmetic type,
+ // VQ is either volatile or empty, and R is a promoted
+ // arithmetic type, there exist candidate operator functions of
+ // the form
+ //
+ // VQ L& operator=(VQ L&, R);
+ // VQ L& operator*=(VQ L&, R);
+ // VQ L& operator/=(VQ L&, R);
+ // VQ L& operator+=(VQ L&, R);
+ // VQ L& operator-=(VQ L&, R);
+ void addAssignmentArithmeticOverloads(bool isEqualOp) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
for (unsigned Left = 0; Left < NumArithmeticTypes; ++Left) {
for (unsigned Right = FirstPromotedArithmeticType;
Right < LastPromotedArithmeticType; ++Right) {
QualType ParamTypes[2];
- ParamTypes[1] = ArithmeticTypes[Right];
+ ParamTypes[1] = getArithmeticType(Right);
// Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] = Context.getLValueReferenceType(ArithmeticTypes[Left]);
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
- /*IsAssigmentOperator=*/Op == OO_Equal);
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(getArithmeticType(Left));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
// Add this built-in operator as a candidate (VQ is 'volatile').
if (VisibleTypeConversionsQuals.hasVolatile()) {
- ParamTypes[0] = Context.getVolatileType(ArithmeticTypes[Left]);
- ParamTypes[0] = Context.getLValueReferenceType(ParamTypes[0]);
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
- /*IsAssigmentOperator=*/Op == OO_Equal);
+ ParamTypes[0] =
+ S.Context.getVolatileType(getArithmeticType(Left));
+ ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
}
}
}
-
+
// Extension: Add the binary operators =, +=, -=, *=, /= for vector types.
- for (BuiltinCandidateTypeSet::iterator Vec1 = CandidateTypes.vector_begin(),
- Vec1End = CandidateTypes.vector_end();
- Vec1 != Vec1End; ++Vec1)
- for (BuiltinCandidateTypeSet::iterator
- Vec2 = CandidateTypes.vector_begin(),
- Vec2End = CandidateTypes.vector_end();
+ for (BuiltinCandidateTypeSet::iterator
+ Vec1 = CandidateTypes[0].vector_begin(),
+ Vec1End = CandidateTypes[0].vector_end();
+ Vec1 != Vec1End; ++Vec1) {
+ for (BuiltinCandidateTypeSet::iterator
+ Vec2 = CandidateTypes[1].vector_begin(),
+ Vec2End = CandidateTypes[1].vector_end();
Vec2 != Vec2End; ++Vec2) {
QualType ParamTypes[2];
ParamTypes[1] = *Vec2;
// Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] = Context.getLValueReferenceType(*Vec1);
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
- /*IsAssigmentOperator=*/Op == OO_Equal);
-
+ ParamTypes[0] = S.Context.getLValueReferenceType(*Vec1);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+
// Add this built-in operator as a candidate (VQ is 'volatile').
if (VisibleTypeConversionsQuals.hasVolatile()) {
- ParamTypes[0] = Context.getVolatileType(*Vec1);
- ParamTypes[0] = Context.getLValueReferenceType(ParamTypes[0]);
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
- /*IsAssigmentOperator=*/Op == OO_Equal);
+ ParamTypes[0] = S.Context.getVolatileType(*Vec1);
+ ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
}
}
- break;
+ }
+ }
+
+ // C++ [over.built]p22:
+ //
+ // For every triple (L, VQ, R), where L is an integral type, VQ
+ // is either volatile or empty, and R is a promoted integral
+ // type, there exist candidate operator functions of the form
+ //
+ // VQ L& operator%=(VQ L&, R);
+ // VQ L& operator<<=(VQ L&, R);
+ // VQ L& operator>>=(VQ L&, R);
+ // VQ L& operator&=(VQ L&, R);
+ // VQ L& operator^=(VQ L&, R);
+ // VQ L& operator|=(VQ L&, R);
+ void addAssignmentIntegralOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
- case OO_PercentEqual:
- case OO_LessLessEqual:
- case OO_GreaterGreaterEqual:
- case OO_AmpEqual:
- case OO_CaretEqual:
- case OO_PipeEqual:
- // C++ [over.built]p22:
- //
- // For every triple (L, VQ, R), where L is an integral type, VQ
- // is either volatile or empty, and R is a promoted integral
- // type, there exist candidate operator functions of the form
- //
- // VQ L& operator%=(VQ L&, R);
- // VQ L& operator<<=(VQ L&, R);
- // VQ L& operator>>=(VQ L&, R);
- // VQ L& operator&=(VQ L&, R);
- // VQ L& operator^=(VQ L&, R);
- // VQ L& operator|=(VQ L&, R);
for (unsigned Left = FirstIntegralType; Left < LastIntegralType; ++Left) {
for (unsigned Right = FirstPromotedIntegralType;
Right < LastPromotedIntegralType; ++Right) {
QualType ParamTypes[2];
- ParamTypes[1] = ArithmeticTypes[Right];
+ ParamTypes[1] = getArithmeticType(Right);
// Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] = Context.getLValueReferenceType(ArithmeticTypes[Left]);
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet);
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(getArithmeticType(Left));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet);
if (VisibleTypeConversionsQuals.hasVolatile()) {
// Add this built-in operator as a candidate (VQ is 'volatile').
- ParamTypes[0] = ArithmeticTypes[Left];
- ParamTypes[0] = Context.getVolatileType(ParamTypes[0]);
- ParamTypes[0] = Context.getLValueReferenceType(ParamTypes[0]);
- AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet);
+ ParamTypes[0] = getArithmeticType(Left);
+ ParamTypes[0] = S.Context.getVolatileType(ParamTypes[0]);
+ ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet);
}
}
}
- break;
-
- case OO_Exclaim: {
- // C++ [over.operator]p23:
- //
- // There also exist candidate operator functions of the form
- //
- // bool operator!(bool);
- // bool operator&&(bool, bool); [BELOW]
- // bool operator||(bool, bool); [BELOW]
- QualType ParamTy = Context.BoolTy;
- AddBuiltinCandidate(ParamTy, &ParamTy, Args, 1, CandidateSet,
- /*IsAssignmentOperator=*/false,
- /*NumContextualBoolArguments=*/1);
- break;
- }
-
- case OO_AmpAmp:
- case OO_PipePipe: {
- // C++ [over.operator]p23:
- //
- // There also exist candidate operator functions of the form
- //
- // bool operator!(bool); [ABOVE]
- // bool operator&&(bool, bool);
- // bool operator||(bool, bool);
- QualType ParamTypes[2] = { Context.BoolTy, Context.BoolTy };
- AddBuiltinCandidate(Context.BoolTy, ParamTypes, Args, 2, CandidateSet,
- /*IsAssignmentOperator=*/false,
- /*NumContextualBoolArguments=*/2);
- break;
}
- case OO_Subscript:
- // C++ [over.built]p13:
- //
- // For every cv-qualified or cv-unqualified object type T there
- // exist candidate operator functions of the form
- //
- // T* operator+(T*, ptrdiff_t); [ABOVE]
- // T& operator[](T*, ptrdiff_t);
- // T* operator-(T*, ptrdiff_t); [ABOVE]
- // T* operator+(ptrdiff_t, T*); [ABOVE]
- // T& operator[](ptrdiff_t, T*);
- for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
- Ptr != CandidateTypes.pointer_end(); ++Ptr) {
- QualType ParamTypes[2] = { *Ptr, Context.getPointerDiffType() };
+ // C++ [over.operator]p23:
+ //
+ // There also exist candidate operator functions of the form
+ //
+ // bool operator!(bool);
+ // bool operator&&(bool, bool);
+ // bool operator||(bool, bool);
+ void addExclaimOverload() {
+ QualType ParamTy = S.Context.BoolTy;
+ S.AddBuiltinCandidate(ParamTy, &ParamTy, Args, 1, CandidateSet,
+ /*IsAssignmentOperator=*/false,
+ /*NumContextualBoolArguments=*/1);
+ }
+ void addAmpAmpOrPipePipeOverload() {
+ QualType ParamTypes[2] = { S.Context.BoolTy, S.Context.BoolTy };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, 2, CandidateSet,
+ /*IsAssignmentOperator=*/false,
+ /*NumContextualBoolArguments=*/2);
+ }
+
+ // C++ [over.built]p13:
+ //
+ // For every cv-qualified or cv-unqualified object type T there
+ // exist candidate operator functions of the form
+ //
+ // T* operator+(T*, ptrdiff_t); [ABOVE]
+ // T& operator[](T*, ptrdiff_t);
+ // T* operator-(T*, ptrdiff_t); [ABOVE]
+ // T* operator+(ptrdiff_t, T*); [ABOVE]
+ // T& operator[](ptrdiff_t, T*);
+ void addSubscriptOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTypes[2] = { *Ptr, S.Context.getPointerDiffType() };
QualType PointeeType = (*Ptr)->getPointeeType();
- QualType ResultTy = Context.getLValueReferenceType(PointeeType);
+ if (!PointeeType->isObjectType())
+ continue;
+
+ QualType ResultTy = S.Context.getLValueReferenceType(PointeeType);
// T& operator[](T*, ptrdiff_t)
- AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+ S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+ }
- // T& operator[](ptrdiff_t, T*);
- ParamTypes[0] = ParamTypes[1];
- ParamTypes[1] = *Ptr;
- AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
- }
- break;
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[1].pointer_begin(),
+ PtrEnd = CandidateTypes[1].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTypes[2] = { S.Context.getPointerDiffType(), *Ptr };
+ QualType PointeeType = (*Ptr)->getPointeeType();
+ if (!PointeeType->isObjectType())
+ continue;
- case OO_ArrowStar:
- // C++ [over.built]p11:
- // For every quintuple (C1, C2, T, CV1, CV2), where C2 is a class type,
- // C1 is the same type as C2 or is a derived class of C2, T is an object
- // type or a function type, and CV1 and CV2 are cv-qualifier-seqs,
- // there exist candidate operator functions of the form
- // CV12 T& operator->*(CV1 C1*, CV2 T C2::*);
- // where CV12 is the union of CV1 and CV2.
- {
- for (BuiltinCandidateTypeSet::iterator Ptr =
- CandidateTypes.pointer_begin();
- Ptr != CandidateTypes.pointer_end(); ++Ptr) {
- QualType C1Ty = (*Ptr);
- QualType C1;
- QualifierCollector Q1;
- C1 = QualType(Q1.strip(C1Ty->getPointeeType()), 0);
- if (!isa<RecordType>(C1))
+ QualType ResultTy = S.Context.getLValueReferenceType(PointeeType);
+
+ // T& operator[](ptrdiff_t, T*)
+ S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p11:
+ // For every quintuple (C1, C2, T, CV1, CV2), where C2 is a class type,
+ // C1 is the same type as C2 or is a derived class of C2, T is an object
+ // type or a function type, and CV1 and CV2 are cv-qualifier-seqs,
+ // there exist candidate operator functions of the form
+ //
+ // CV12 T& operator->*(CV1 C1*, CV2 T C2::*);
+ //
+ // where CV12 is the union of CV1 and CV2.
+ void addArrowStarOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType C1Ty = (*Ptr);
+ QualType C1;
+ QualifierCollector Q1;
+ C1 = QualType(Q1.strip(C1Ty->getPointeeType()), 0);
+ if (!isa<RecordType>(C1))
+ continue;
+ // heuristic to reduce number of builtin candidates in the set.
+ // Add volatile/restrict version only if there are conversions to a
+ // volatile/restrict type.
+ if (!VisibleTypeConversionsQuals.hasVolatile() && Q1.hasVolatile())
+ continue;
+ if (!VisibleTypeConversionsQuals.hasRestrict() && Q1.hasRestrict())
+ continue;
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[1].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[1].member_pointer_end();
+ MemPtr != MemPtrEnd; ++MemPtr) {
+ const MemberPointerType *mptr = cast<MemberPointerType>(*MemPtr);
+ QualType C2 = QualType(mptr->getClass(), 0);
+ C2 = C2.getUnqualifiedType();
+ if (C1 != C2 && !S.IsDerivedFrom(C1, C2))
+ break;
+ QualType ParamTypes[2] = { *Ptr, *MemPtr };
+ // build CV12 T&
+ QualType T = mptr->getPointeeType();
+ if (!VisibleTypeConversionsQuals.hasVolatile() &&
+ T.isVolatileQualified())
+ continue;
+ if (!VisibleTypeConversionsQuals.hasRestrict() &&
+ T.isRestrictQualified())
continue;
- // heuristic to reduce number of builtin candidates in the set.
- // Add volatile/restrict version only if there are conversions to a
- // volatile/restrict type.
- if (!VisibleTypeConversionsQuals.hasVolatile() && Q1.hasVolatile())
+ T = Q1.apply(S.Context, T);
+ QualType ResultTy = S.Context.getLValueReferenceType(T);
+ S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+ }
+
+ // Note that we don't consider the first argument, since it has been
+ // contextually converted to bool long ago. The candidates below are
+ // therefore added as binary.
+ //
+ // C++ [over.built]p25:
+ // For every type T, where T is a pointer, pointer-to-member, or scoped
+ // enumeration type, there exist candidate operator functions of the form
+ //
+ // T operator?(bool, T, T);
+ //
+ void addConditionalOperatorOverloads() {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[ArgIdx].pointer_begin(),
+ PtrEnd = CandidateTypes[ArgIdx].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
continue;
- if (!VisibleTypeConversionsQuals.hasRestrict() && Q1.hasRestrict())
+
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ S.AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+ }
+
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
+ MemPtr != MemPtrEnd; ++MemPtr) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)))
continue;
+
+ QualType ParamTypes[2] = { *MemPtr, *MemPtr };
+ S.AddBuiltinCandidate(*MemPtr, ParamTypes, Args, 2, CandidateSet);
+ }
+
+ if (S.getLangOptions().CPlusPlus0x) {
for (BuiltinCandidateTypeSet::iterator
- MemPtr = CandidateTypes.member_pointer_begin(),
- MemPtrEnd = CandidateTypes.member_pointer_end();
- MemPtr != MemPtrEnd; ++MemPtr) {
- const MemberPointerType *mptr = cast<MemberPointerType>(*MemPtr);
- QualType C2 = QualType(mptr->getClass(), 0);
- C2 = C2.getUnqualifiedType();
- if (C1 != C2 && !IsDerivedFrom(C1, C2))
- break;
- QualType ParamTypes[2] = { *Ptr, *MemPtr };
- // build CV12 T&
- QualType T = mptr->getPointeeType();
- if (!VisibleTypeConversionsQuals.hasVolatile() &&
- T.isVolatileQualified())
+ Enum = CandidateTypes[ArgIdx].enumeration_begin(),
+ EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
+ Enum != EnumEnd; ++Enum) {
+ if (!(*Enum)->getAs<EnumType>()->getDecl()->isScoped())
continue;
- if (!VisibleTypeConversionsQuals.hasRestrict() &&
- T.isRestrictQualified())
+
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)))
continue;
- T = Q1.apply(T);
- QualType ResultTy = Context.getLValueReferenceType(T);
- AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+
+ QualType ParamTypes[2] = { *Enum, *Enum };
+ S.AddBuiltinCandidate(*Enum, ParamTypes, Args, 2, CandidateSet);
}
}
}
+ }
+};
+
+} // end anonymous namespace
+
+/// AddBuiltinOperatorCandidates - Add the appropriate built-in
+/// operator overloads to the candidate set (C++ [over.built]), based
+/// on the operator @p Op and the arguments given. For example, if the
+/// operator is a binary '+', this routine might add "int
+/// operator+(int, int)" to cover integer addition.
+void
+Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet) {
+ // Find all of the types that the arguments can convert to, but only
+ // if the operator we're looking at has built-in operator candidates
+ // that make use of these types. Also record whether we encounter non-record
+ // candidate types or either arithmetic or enumeral candidate types.
+ Qualifiers VisibleTypeConversionsQuals;
+ VisibleTypeConversionsQuals.addConst();
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
+ VisibleTypeConversionsQuals += CollectVRQualifiers(Context, Args[ArgIdx]);
+
+ bool HasNonRecordCandidateType = false;
+ bool HasArithmeticOrEnumeralCandidateType = false;
+ llvm::SmallVector<BuiltinCandidateTypeSet, 2> CandidateTypes;
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ CandidateTypes.push_back(BuiltinCandidateTypeSet(*this));
+ CandidateTypes[ArgIdx].AddTypesConvertedFrom(Args[ArgIdx]->getType(),
+ OpLoc,
+ true,
+ (Op == OO_Exclaim ||
+ Op == OO_AmpAmp ||
+ Op == OO_PipePipe),
+ VisibleTypeConversionsQuals);
+ HasNonRecordCandidateType = HasNonRecordCandidateType ||
+ CandidateTypes[ArgIdx].hasNonRecordTypes();
+ HasArithmeticOrEnumeralCandidateType =
+ HasArithmeticOrEnumeralCandidateType ||
+ CandidateTypes[ArgIdx].hasArithmeticOrEnumeralTypes();
+ }
+
+ // Exit early when no non-record types have been added to the candidate set
+ // for any of the arguments to the operator.
+ if (!HasNonRecordCandidateType)
+ return;
+
+ // Setup an object to manage the common state for building overloads.
+ BuiltinOperatorOverloadBuilder OpBuilder(*this, Args, NumArgs,
+ VisibleTypeConversionsQuals,
+ HasArithmeticOrEnumeralCandidateType,
+ CandidateTypes, CandidateSet);
+
+ // Dispatch over the operation to add in only those overloads which apply.
+ switch (Op) {
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Expected an overloaded operator");
break;
- case OO_Conditional:
- // Note that we don't consider the first argument, since it has been
- // contextually converted to bool long ago. The candidates below are
- // therefore added as binary.
- //
- // C++ [over.built]p24:
- // For every type T, where T is a pointer or pointer-to-member type,
- // there exist candidate operator functions of the form
- //
- // T operator?(bool, T, T);
- //
- for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin(),
- E = CandidateTypes.pointer_end(); Ptr != E; ++Ptr) {
- QualType ParamTypes[2] = { *Ptr, *Ptr };
- AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
- }
- for (BuiltinCandidateTypeSet::iterator Ptr =
- CandidateTypes.member_pointer_begin(),
- E = CandidateTypes.member_pointer_end(); Ptr != E; ++Ptr) {
- QualType ParamTypes[2] = { *Ptr, *Ptr };
- AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Call:
+ assert(false && "Special operators don't use AddBuiltinOperatorCandidates");
+ break;
+
+ case OO_Comma:
+ case OO_Arrow:
+ // C++ [over.match.oper]p3:
+ // -- For the operator ',', the unary operator '&', or the
+ // operator '->', the built-in candidates set is empty.
+ break;
+
+ case OO_Plus: // '+' is either unary or binary
+ if (NumArgs == 1)
+ OpBuilder.addUnaryPlusPointerOverloads();
+ // Fall through.
+
+ case OO_Minus: // '-' is either unary or binary
+ if (NumArgs == 1) {
+ OpBuilder.addUnaryPlusOrMinusArithmeticOverloads();
+ } else {
+ OpBuilder.addBinaryPlusOrMinusPointerOverloads(Op);
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
}
- goto Conditional;
+ break;
+
+ case OO_Star: // '*' is either unary or binary
+ if (NumArgs == 1)
+ OpBuilder.addUnaryStarPointerOverloads();
+ else
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ break;
+
+ case OO_Slash:
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ break;
+
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ OpBuilder.addPlusPlusMinusMinusArithmeticOverloads(Op);
+ OpBuilder.addPlusPlusMinusMinusPointerOverloads();
+ break;
+
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ OpBuilder.addEqualEqualOrNotEqualMemberPointerOverloads();
+ // Fall through.
+
+ case OO_Less:
+ case OO_Greater:
+ case OO_LessEqual:
+ case OO_GreaterEqual:
+ OpBuilder.addRelationalPointerOrEnumeralOverloads();
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/true);
+ break;
+
+ case OO_Percent:
+ case OO_Caret:
+ case OO_Pipe:
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
+ break;
+
+ case OO_Amp: // '&' is either unary or binary
+ if (NumArgs == 1)
+ // C++ [over.match.oper]p3:
+ // -- For the operator ',', the unary operator '&', or the
+ // operator '->', the built-in candidates set is empty.
+ break;
+
+ OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
+ break;
+
+ case OO_Tilde:
+ OpBuilder.addUnaryTildePromotedIntegralOverloads();
+ break;
+
+ case OO_Equal:
+ OpBuilder.addAssignmentMemberPointerOrEnumeralOverloads();
+ // Fall through.
+
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ OpBuilder.addAssignmentPointerOverloads(Op == OO_Equal);
+ // Fall through.
+
+ case OO_StarEqual:
+ case OO_SlashEqual:
+ OpBuilder.addAssignmentArithmeticOverloads(Op == OO_Equal);
+ break;
+
+ case OO_PercentEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ case OO_AmpEqual:
+ case OO_CaretEqual:
+ case OO_PipeEqual:
+ OpBuilder.addAssignmentIntegralOverloads();
+ break;
+
+ case OO_Exclaim:
+ OpBuilder.addExclaimOverload();
+ break;
+
+ case OO_AmpAmp:
+ case OO_PipePipe:
+ OpBuilder.addAmpAmpOrPipePipeOverload();
+ break;
+
+ case OO_Subscript:
+ OpBuilder.addSubscriptOverloads();
+ break;
+
+ case OO_ArrowStar:
+ OpBuilder.addArrowStarOverloads();
+ break;
+
+ case OO_Conditional:
+ OpBuilder.addConditionalOperatorOverloads();
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ break;
}
}
@@ -5270,7 +6116,7 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
if (ExplicitTemplateArgs)
continue;
-
+
AddOverloadCandidate(FD, FoundDecl, Args, NumArgs, CandidateSet,
false, PartialOverloading);
} else
@@ -5284,9 +6130,10 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
/// candidate is a better candidate than the second (C++ 13.3.3p1).
bool
isBetterOverloadCandidate(Sema &S,
- const OverloadCandidate& Cand1,
- const OverloadCandidate& Cand2,
- SourceLocation Loc) {
+ const OverloadCandidate &Cand1,
+ const OverloadCandidate &Cand2,
+ SourceLocation Loc,
+ bool UserDefinedConversion) {
// Define viable functions to be better candidates than non-viable
// functions.
if (!Cand2.Viable)
@@ -5346,14 +6193,16 @@ isBetterOverloadCandidate(Sema &S,
// according to the partial ordering rules described in 14.5.5.2, or,
// if not that,
if (Cand1.Function && Cand1.Function->getPrimaryTemplate() &&
- Cand2.Function && Cand2.Function->getPrimaryTemplate())
+ Cand2.Function && Cand2.Function->getPrimaryTemplate()) {
if (FunctionTemplateDecl *BetterTemplate
= S.getMoreSpecializedTemplate(Cand1.Function->getPrimaryTemplate(),
Cand2.Function->getPrimaryTemplate(),
Loc,
- isa<CXXConversionDecl>(Cand1.Function)? TPOC_Conversion
- : TPOC_Call))
+ isa<CXXConversionDecl>(Cand1.Function)? TPOC_Conversion
+ : TPOC_Call,
+ Cand1.ExplicitCallArguments))
return BetterTemplate == Cand1.Function->getPrimaryTemplate();
+ }
// -- the context is an initialization by user-defined conversion
// (see 8.5, 13.3.1.5) and the standard conversion sequence
@@ -5361,7 +6210,7 @@ isBetterOverloadCandidate(Sema &S,
// the type of the entity being initialized) is a better
// conversion sequence than the standard conversion sequence
// from the return type of F2 to the destination type.
- if (Cand1.Function && Cand2.Function &&
+ if (UserDefinedConversion && Cand1.Function && Cand2.Function &&
isa<CXXConversionDecl>(Cand1.Function) &&
isa<CXXConversionDecl>(Cand2.Function)) {
switch (CompareStandardConversionSequences(S,
@@ -5398,12 +6247,14 @@ isBetterOverloadCandidate(Sema &S,
/// \returns The result of overload resolution.
OverloadingResult
OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
- iterator& Best) {
+ iterator &Best,
+ bool UserDefinedConversion) {
// Find the best viable function.
Best = end();
for (iterator Cand = begin(); Cand != end(); ++Cand) {
if (Cand->Viable)
- if (Best == end() || isBetterOverloadCandidate(S, *Cand, *Best, Loc))
+ if (Best == end() || isBetterOverloadCandidate(S, *Cand, *Best, Loc,
+ UserDefinedConversion))
Best = Cand;
}
@@ -5416,7 +6267,8 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
for (iterator Cand = begin(); Cand != end(); ++Cand) {
if (Cand->Viable &&
Cand != Best &&
- !isBetterOverloadCandidate(S, *Best, *Cand, Loc)) {
+ !isBetterOverloadCandidate(S, *Best, *Cand, Loc,
+ UserDefinedConversion)) {
Best = end();
return OR_Ambiguous;
}
@@ -5436,6 +6288,7 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
// placement new (5.3.4), as well as non-default initialization (8.5).
if (Best->Function)
S.MarkDeclarationReferenced(Loc, Best->Function);
+
return OR_Success;
}
@@ -5450,7 +6303,8 @@ enum OverloadCandidateKind {
oc_constructor_template,
oc_implicit_default_constructor,
oc_implicit_copy_constructor,
- oc_implicit_copy_assignment
+ oc_implicit_copy_assignment,
+ oc_implicit_inherited_constructor
};
OverloadCandidateKind ClassifyOverloadCandidate(Sema &S,
@@ -5468,6 +6322,9 @@ OverloadCandidateKind ClassifyOverloadCandidate(Sema &S,
if (!Ctor->isImplicit())
return isTemplate ? oc_constructor_template : oc_constructor;
+ if (Ctor->getInheritedConstructor())
+ return oc_implicit_inherited_constructor;
+
return Ctor->isCopyConstructor() ? oc_implicit_copy_constructor
: oc_implicit_default_constructor;
}
@@ -5478,7 +6335,7 @@ OverloadCandidateKind ClassifyOverloadCandidate(Sema &S,
if (!Meth->isImplicit())
return isTemplate ? oc_method_template : oc_method;
- assert(Meth->isCopyAssignment()
+ assert(Meth->isCopyAssignmentOperator()
&& "implicit method is not copy assignment operator?");
return oc_implicit_copy_assignment;
}
@@ -5486,6 +6343,16 @@ OverloadCandidateKind ClassifyOverloadCandidate(Sema &S,
return isTemplate ? oc_function_template : oc_function;
}
+void MaybeEmitInheritedConstructorNote(Sema &S, FunctionDecl *Fn) {
+ const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn);
+ if (!Ctor) return;
+
+ Ctor = Ctor->getInheritedConstructor();
+ if (!Ctor) return;
+
+ S.Diag(Ctor->getLocation(), diag::note_ovl_candidate_inherited_constructor);
+}
+
} // end anonymous namespace
// Notes the location of an overload candidate.
@@ -5494,6 +6361,28 @@ void Sema::NoteOverloadCandidate(FunctionDecl *Fn) {
OverloadCandidateKind K = ClassifyOverloadCandidate(*this, Fn, FnDesc);
Diag(Fn->getLocation(), diag::note_ovl_candidate)
<< (unsigned) K << FnDesc;
+ MaybeEmitInheritedConstructorNote(*this, Fn);
+}
+
+//Notes the location of all overload candidates designated through
+// OverloadedExpr
+void Sema::NoteAllOverloadCandidates(Expr* OverloadedExpr) {
+ assert(OverloadedExpr->getType() == Context.OverloadTy);
+
+ OverloadExpr::FindResult Ovl = OverloadExpr::find(OverloadedExpr);
+ OverloadExpr *OvlExpr = Ovl.Expression;
+
+ for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
+ IEnd = OvlExpr->decls_end();
+ I != IEnd; ++I) {
+ if (FunctionTemplateDecl *FunTmpl =
+ dyn_cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()) ) {
+ NoteOverloadCandidate(FunTmpl->getTemplatedDecl());
+ } else if (FunctionDecl *Fun
+ = dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()) ) {
+ NoteOverloadCandidate(Fun);
+ }
+ }
}
/// Diagnoses an ambiguous conversion. The partial diagnostic is the
@@ -5548,6 +6437,7 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
<< (unsigned) FnKind << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange())
<< ToTy << Name << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
@@ -5582,6 +6472,7 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
<< FromTy
<< FromQs.getAddressSpace() << ToQs.getAddressSpace()
<< (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
@@ -5599,6 +6490,7 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange())
<< FromTy << (CVR - 1) << I+1;
}
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
@@ -5613,6 +6505,7 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
<< (unsigned) FnKind << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange())
<< FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
@@ -5624,7 +6517,7 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
FromPtrTy->getPointeeType()) &&
!FromPtrTy->getPointeeType()->isIncompleteType() &&
!ToPtrTy->getPointeeType()->isIncompleteType() &&
- S.IsDerivedFrom(ToPtrTy->getPointeeType(),
+ S.IsDerivedFrom(ToPtrTy->getPointeeType(),
FromPtrTy->getPointeeType()))
BaseToDerivedConversion = 1;
}
@@ -5645,22 +6538,24 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
S.IsDerivedFrom(ToRefTy->getPointeeType(), FromTy))
BaseToDerivedConversion = 3;
}
-
+
if (BaseToDerivedConversion) {
- S.Diag(Fn->getLocation(),
+ S.Diag(Fn->getLocation(),
diag::note_ovl_candidate_bad_base_to_derived_conv)
<< (unsigned) FnKind << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange())
<< (BaseToDerivedConversion - 1)
- << FromTy << ToTy << I+1;
+ << FromTy << ToTy << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
-
+
// TODO: specialize more based on the kind of mismatch
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_conv)
<< (unsigned) FnKind << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange())
<< FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
}
void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
@@ -5671,15 +6566,15 @@ void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
const FunctionProtoType *FnTy = Fn->getType()->getAs<FunctionProtoType>();
unsigned MinParams = Fn->getMinRequiredArguments();
-
+
// at least / at most / exactly
- // FIXME: variadic templates "at most" should account for parameter packs
unsigned mode, modeCount;
if (NumFormalArgs < MinParams) {
assert((Cand->FailureKind == ovl_fail_too_few_arguments) ||
(Cand->FailureKind == ovl_fail_bad_deduction &&
Cand->DeductionFailure.Result == Sema::TDK_TooFewArguments));
- if (MinParams != FnTy->getNumArgs() || FnTy->isVariadic())
+ if (MinParams != FnTy->getNumArgs() ||
+ FnTy->isVariadic() || FnTy->isTemplateVariadic())
mode = 0; // "at least"
else
mode = 2; // "exactly"
@@ -5699,8 +6594,9 @@ void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, Description);
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity)
- << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != 0) << mode
+ << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != 0) << mode
<< modeCount << NumFormalArgs;
+ MaybeEmitInheritedConstructorNote(S, Fn);
}
/// Diagnose a failed template-argument deduction.
@@ -5721,6 +6617,7 @@ void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
assert(ParamD && "no parameter found for incomplete deduction result");
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_incomplete_deduction)
<< ParamD->getDeclName();
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
@@ -5732,9 +6629,9 @@ void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
// Param will have been canonicalized, but it should just be a
// qualified version of ParamD, so move the qualifiers to that.
- QualifierCollector Qs(S.Context);
+ QualifierCollector Qs;
Qs.strip(Param);
- QualType NonCanonParam = Qs.apply(TParam->getTypeForDecl());
+ QualType NonCanonParam = Qs.apply(S.Context, TParam->getTypeForDecl());
assert(S.Context.hasSameType(Param, NonCanonParam));
// Arg has also been canonicalized, but there's nothing we can do
@@ -5745,11 +6642,12 @@ void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_underqualified)
<< ParamD->getDeclName() << Arg << NonCanonParam;
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
case Sema::TDK_Inconsistent: {
- assert(ParamD && "no parameter found for inconsistent deduction result");
+ assert(ParamD && "no parameter found for inconsistent deduction result");
int which = 0;
if (isa<TemplateTypeParmDecl>(ParamD))
which = 0;
@@ -5758,18 +6656,19 @@ void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
else {
which = 2;
}
-
+
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_inconsistent_deduction)
- << which << ParamD->getDeclName()
+ << which << ParamD->getDeclName()
<< *Cand->DeductionFailure.getFirstArg()
<< *Cand->DeductionFailure.getSecondArg();
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
case Sema::TDK_InvalidExplicitArguments:
- assert(ParamD && "no parameter found for invalid explicit arguments");
+ assert(ParamD && "no parameter found for invalid explicit arguments");
if (ParamD->getDeclName())
- S.Diag(Fn->getLocation(),
+ S.Diag(Fn->getLocation(),
diag::note_ovl_candidate_explicit_arg_mismatch_named)
<< ParamD->getDeclName();
else {
@@ -5781,12 +6680,13 @@ void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
index = NTTP->getIndex();
else
index = cast<TemplateTemplateParmDecl>(ParamD)->getIndex();
- S.Diag(Fn->getLocation(),
+ S.Diag(Fn->getLocation(),
diag::note_ovl_candidate_explicit_arg_mismatch_unnamed)
<< (index + 1);
}
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
-
+
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
DiagnoseArityMismatch(S, Cand, NumArgs);
@@ -5794,6 +6694,7 @@ void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
case Sema::TDK_InstantiationDepth:
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_instantiation_depth);
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
case Sema::TDK_SubstitutionFailure: {
@@ -5805,14 +6706,16 @@ void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
*Args);
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_substitution_failure)
<< ArgString;
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
-
+
// TODO: diagnose these individually, then kill off
// note_ovl_candidate_bad_deduction, which is uselessly vague.
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_FailedOverloadResolution:
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_deduction);
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
}
@@ -5841,6 +6744,7 @@ void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_deleted)
<< FnKind << FnDesc << Fn->isDeleted();
+ MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
@@ -5868,7 +6772,7 @@ void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
for (unsigned N = Cand->Conversions.size(); I != N; ++I)
if (Cand->Conversions[I].isBad())
return DiagnoseBadConversion(S, Cand, I);
-
+
// FIXME: this currently happens when we're called from SemaInit
// when user-conversion overload fails. Figure out how to handle
// those conditions and diagnose them well.
@@ -5907,6 +6811,7 @@ void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
S.Diag(Cand->Surrogate->getLocation(), diag::note_ovl_surrogate_cand)
<< FnType;
+ MaybeEmitInheritedConstructorNote(S, Cand->Surrogate);
}
void NoteBuiltinOperatorCandidate(Sema &S,
@@ -6083,7 +6988,7 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
Cand->Conversions[ConvIdx]
= TryCopyInitialization(S, Args[ConvIdx],
Cand->BuiltinTypes.ParamTypes[ConvIdx],
- SuppressUserConversions,
+ SuppressUserConversions,
/*InOverloadResolution*/ true);
return;
}
@@ -6094,7 +6999,7 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
if (ArgIdx < NumArgsInProto)
Cand->Conversions[ConvIdx]
= TryCopyInitialization(S, Args[ArgIdx], Proto->getArgType(ArgIdx),
- SuppressUserConversions,
+ SuppressUserConversions,
/*InOverloadResolution=*/true);
else
Cand->Conversions[ConvIdx].setEllipsis();
@@ -6129,7 +7034,7 @@ void OverloadCandidateSet::NoteCandidates(Sema &S,
std::sort(Cands.begin(), Cands.end(),
CompareOverloadCandidatesForDisplay(S));
-
+
bool ReportedAmbiguousConversions = false;
llvm::SmallVectorImpl<OverloadCandidate*>::iterator I, E;
@@ -6180,186 +7085,207 @@ static bool CheckUnresolvedAccess(Sema &S, OverloadExpr *E, DeclAccessPair D) {
return S.CheckUnresolvedMemberAccess(cast<UnresolvedMemberExpr>(E), D);
}
-/// ResolveAddressOfOverloadedFunction - Try to resolve the address of
-/// an overloaded function (C++ [over.over]), where @p From is an
-/// expression with overloaded function type and @p ToType is the type
-/// we're trying to resolve to. For example:
-///
-/// @code
-/// int f(double);
-/// int f(int);
-///
-/// int (*pfd)(double) = f; // selects f(double)
-/// @endcode
-///
-/// This routine returns the resulting FunctionDecl if it could be
-/// resolved, and NULL otherwise. When @p Complain is true, this
-/// routine will emit diagnostics if there is an error.
-FunctionDecl *
-Sema::ResolveAddressOfOverloadedFunction(Expr *From, QualType ToType,
- bool Complain,
- DeclAccessPair &FoundResult) {
- QualType FunctionType = ToType;
- bool IsMember = false;
- if (const PointerType *ToTypePtr = ToType->getAs<PointerType>())
- FunctionType = ToTypePtr->getPointeeType();
- else if (const ReferenceType *ToTypeRef = ToType->getAs<ReferenceType>())
- FunctionType = ToTypeRef->getPointeeType();
- else if (const MemberPointerType *MemTypePtr =
- ToType->getAs<MemberPointerType>()) {
- FunctionType = MemTypePtr->getPointeeType();
- IsMember = true;
- }
- // C++ [over.over]p1:
- // [...] [Note: any redundant set of parentheses surrounding the
- // overloaded function name is ignored (5.1). ]
- // C++ [over.over]p1:
- // [...] The overloaded function name can be preceded by the &
- // operator.
- // However, remember whether the expression has member-pointer form:
- // C++ [expr.unary.op]p4:
- // A pointer to member is only formed when an explicit & is used
- // and its operand is a qualified-id not enclosed in
- // parentheses.
- OverloadExpr::FindResult Ovl = OverloadExpr::find(From);
- OverloadExpr *OvlExpr = Ovl.Expression;
-
- // We expect a pointer or reference to function, or a function pointer.
- FunctionType = Context.getCanonicalType(FunctionType).getUnqualifiedType();
- if (!FunctionType->isFunctionType()) {
- if (Complain)
- Diag(From->getLocStart(), diag::err_addr_ovl_not_func_ptrref)
- << OvlExpr->getName() << ToType;
-
- return 0;
- }
- // If the overload expression doesn't have the form of a pointer to
- // member, don't try to convert it to a pointer-to-member type.
- if (IsMember && !Ovl.HasFormOfMemberPointer) {
- if (!Complain) return 0;
- // TODO: Should we condition this on whether any functions might
- // have matched, or is it more appropriate to do that in callers?
- // TODO: a fixit wouldn't hurt.
- Diag(OvlExpr->getNameLoc(), diag::err_addr_ovl_no_qualifier)
- << ToType << OvlExpr->getSourceRange();
- return 0;
- }
+// [PossiblyAFunctionType] --> [Return]
+// NonFunctionType --> NonFunctionType
+// R (A) --> R(A)
+// R (*)(A) --> R (A)
+// R (&)(A) --> R (A)
+// R (S::*)(A) --> R (A)
+QualType Sema::ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType) {
+ QualType Ret = PossiblyAFunctionType;
+ if (const PointerType *ToTypePtr =
+ PossiblyAFunctionType->getAs<PointerType>())
+ Ret = ToTypePtr->getPointeeType();
+ else if (const ReferenceType *ToTypeRef =
+ PossiblyAFunctionType->getAs<ReferenceType>())
+ Ret = ToTypeRef->getPointeeType();
+ else if (const MemberPointerType *MemTypePtr =
+ PossiblyAFunctionType->getAs<MemberPointerType>())
+ Ret = MemTypePtr->getPointeeType();
+ Ret =
+ Context.getCanonicalType(Ret).getUnqualifiedType();
+ return Ret;
+}
- TemplateArgumentListInfo ETABuffer, *ExplicitTemplateArgs = 0;
- if (OvlExpr->hasExplicitTemplateArgs()) {
- OvlExpr->getExplicitTemplateArgs().copyInto(ETABuffer);
- ExplicitTemplateArgs = &ETABuffer;
- }
+// A helper class to help with address of function resolution
+// - allows us to avoid passing around all those ugly parameters
+class AddressOfFunctionResolver
+{
+ Sema& S;
+ Expr* SourceExpr;
+ const QualType& TargetType;
+ QualType TargetFunctionType; // Extracted function type from target type
+
+ bool Complain;
+ //DeclAccessPair& ResultFunctionAccessPair;
+ ASTContext& Context;
- assert(From->getType() == Context.OverloadTy);
+ bool TargetTypeIsNonStaticMemberFunction;
+ bool FoundNonTemplateFunction;
- // Look through all of the overloaded functions, searching for one
- // whose type matches exactly.
+ OverloadExpr::FindResult OvlExprInfo;
+ OverloadExpr *OvlExpr;
+ TemplateArgumentListInfo OvlExplicitTemplateArgs;
llvm::SmallVector<std::pair<DeclAccessPair, FunctionDecl*>, 4> Matches;
- llvm::SmallVector<FunctionDecl *, 4> NonMatches;
-
- bool FoundNonTemplateFunction = false;
- for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
- E = OvlExpr->decls_end(); I != E; ++I) {
- // Look through any using declarations to find the underlying function.
- NamedDecl *Fn = (*I)->getUnderlyingDecl();
-
- // C++ [over.over]p3:
- // Non-member functions and static member functions match
- // targets of type "pointer-to-function" or "reference-to-function."
- // Nonstatic member functions match targets of
- // type "pointer-to-member-function."
- // Note that according to DR 247, the containing class does not matter.
-
- if (FunctionTemplateDecl *FunctionTemplate
- = dyn_cast<FunctionTemplateDecl>(Fn)) {
- if (CXXMethodDecl *Method
- = dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl())) {
- // Skip non-static function templates when converting to pointer, and
- // static when converting to member pointer.
- if (Method->isStatic() == IsMember)
- continue;
- } else if (IsMember)
- continue;
- // C++ [over.over]p2:
- // If the name is a function template, template argument deduction is
- // done (14.8.2.2), and if the argument deduction succeeds, the
- // resulting template argument list is used to generate a single
- // function template specialization, which is added to the set of
- // overloaded functions considered.
- // FIXME: We don't really want to build the specialization here, do we?
- FunctionDecl *Specialization = 0;
- TemplateDeductionInfo Info(Context, OvlExpr->getNameLoc());
- if (TemplateDeductionResult Result
- = DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs,
- FunctionType, Specialization, Info)) {
- // FIXME: make a note of the failed deduction for diagnostics.
- (void)Result;
- } else {
- // FIXME: If the match isn't exact, shouldn't we just drop this as
- // a candidate? Find a testcase before changing the code.
- assert(FunctionType
- == Context.getCanonicalType(Specialization->getType()));
- Matches.push_back(std::make_pair(I.getPair(),
- cast<FunctionDecl>(Specialization->getCanonicalDecl())));
+public:
+ AddressOfFunctionResolver(Sema &S, Expr* SourceExpr,
+ const QualType& TargetType, bool Complain)
+ : S(S), SourceExpr(SourceExpr), TargetType(TargetType),
+ Complain(Complain), Context(S.getASTContext()),
+ TargetTypeIsNonStaticMemberFunction(
+ !!TargetType->getAs<MemberPointerType>()),
+ FoundNonTemplateFunction(false),
+ OvlExprInfo(OverloadExpr::find(SourceExpr)),
+ OvlExpr(OvlExprInfo.Expression)
+ {
+ ExtractUnqualifiedFunctionTypeFromTargetType();
+
+ if (!TargetFunctionType->isFunctionType()) {
+ if (OvlExpr->hasExplicitTemplateArgs()) {
+ DeclAccessPair dap;
+ if( FunctionDecl* Fn = S.ResolveSingleFunctionTemplateSpecialization(
+ OvlExpr, false, &dap) ) {
+ Matches.push_back(std::make_pair(dap,Fn));
+ }
+ }
+ return;
+ }
+
+ if (OvlExpr->hasExplicitTemplateArgs())
+ OvlExpr->getExplicitTemplateArgs().copyInto(OvlExplicitTemplateArgs);
+
+ if (FindAllFunctionsThatMatchTargetTypeExactly()) {
+ // C++ [over.over]p4:
+ // If more than one function is selected, [...]
+ if (Matches.size() > 1) {
+ if (FoundNonTemplateFunction)
+ EliminateAllTemplateMatches();
+ else
+ EliminateAllExceptMostSpecializedTemplate();
}
-
- continue;
}
+ }
+
+private:
+ bool isTargetTypeAFunction() const {
+ return TargetFunctionType->isFunctionType();
+ }
+
+ // [ToType] [Return]
+ // R (*)(A) --> R (A), IsNonStaticMemberFunction = false
+ // R (&)(A) --> R (A), IsNonStaticMemberFunction = false
+ // R (S::*)(A) --> R (A), IsNonStaticMemberFunction = true
+ void inline ExtractUnqualifiedFunctionTypeFromTargetType() {
+ TargetFunctionType = S.ExtractUnqualifiedFunctionType(TargetType);
+ }
+
+ // return true if any matching specializations were found
+ bool AddMatchingTemplateFunction(FunctionTemplateDecl* FunctionTemplate,
+ const DeclAccessPair& CurAccessFunPair) {
+ if (CXXMethodDecl *Method
+ = dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl())) {
+ // Skip non-static function templates when converting to pointer, and
+ // static when converting to member pointer.
+ if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
+ return false;
+ }
+ else if (TargetTypeIsNonStaticMemberFunction)
+ return false;
+
+ // C++ [over.over]p2:
+ // If the name is a function template, template argument deduction is
+ // done (14.8.2.2), and if the argument deduction succeeds, the
+ // resulting template argument list is used to generate a single
+ // function template specialization, which is added to the set of
+ // overloaded functions considered.
+ FunctionDecl *Specialization = 0;
+ TemplateDeductionInfo Info(Context, OvlExpr->getNameLoc());
+ if (Sema::TemplateDeductionResult Result
+ = S.DeduceTemplateArguments(FunctionTemplate,
+ &OvlExplicitTemplateArgs,
+ TargetFunctionType, Specialization,
+ Info)) {
+ // FIXME: make a note of the failed deduction for diagnostics.
+ (void)Result;
+ return false;
+ }
+
+ // Template argument deduction ensures that we have an exact match.
+ // This function template specicalization works.
+ Specialization = cast<FunctionDecl>(Specialization->getCanonicalDecl());
+ assert(TargetFunctionType
+ == Context.getCanonicalType(Specialization->getType()));
+ Matches.push_back(std::make_pair(CurAccessFunPair, Specialization));
+ return true;
+ }
+
+ bool AddMatchingNonTemplateFunction(NamedDecl* Fn,
+ const DeclAccessPair& CurAccessFunPair) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
// Skip non-static functions when converting to pointer, and static
// when converting to member pointer.
- if (Method->isStatic() == IsMember)
- continue;
-
- // If we have explicit template arguments, skip non-templates.
- if (OvlExpr->hasExplicitTemplateArgs())
- continue;
- } else if (IsMember)
- continue;
+ if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
+ return false;
+ }
+ else if (TargetTypeIsNonStaticMemberFunction)
+ return false;
if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) {
QualType ResultTy;
- if (Context.hasSameUnqualifiedType(FunctionType, FunDecl->getType()) ||
- IsNoReturnConversion(Context, FunDecl->getType(), FunctionType,
+ if (Context.hasSameUnqualifiedType(TargetFunctionType,
+ FunDecl->getType()) ||
+ IsNoReturnConversion(Context, FunDecl->getType(), TargetFunctionType,
ResultTy)) {
- Matches.push_back(std::make_pair(I.getPair(),
- cast<FunctionDecl>(FunDecl->getCanonicalDecl())));
+ Matches.push_back(std::make_pair(CurAccessFunPair,
+ cast<FunctionDecl>(FunDecl->getCanonicalDecl())));
FoundNonTemplateFunction = true;
+ return true;
}
}
+
+ return false;
}
+
+ bool FindAllFunctionsThatMatchTargetTypeExactly() {
+ bool Ret = false;
+
+ // If the overload expression doesn't have the form of a pointer to
+ // member, don't try to convert it to a pointer-to-member type.
+ if (IsInvalidFormOfPointerToMemberFunction())
+ return false;
- // If there were 0 or 1 matches, we're done.
- if (Matches.empty()) {
- if (Complain) {
- Diag(From->getLocStart(), diag::err_addr_ovl_no_viable)
- << OvlExpr->getName() << FunctionType;
- for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
- E = OvlExpr->decls_end();
- I != E; ++I)
- if (FunctionDecl *F = dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()))
- NoteOverloadCandidate(F);
+ for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
+ E = OvlExpr->decls_end();
+ I != E; ++I) {
+ // Look through any using declarations to find the underlying function.
+ NamedDecl *Fn = (*I)->getUnderlyingDecl();
+
+ // C++ [over.over]p3:
+ // Non-member functions and static member functions match
+ // targets of type "pointer-to-function" or "reference-to-function."
+ // Nonstatic member functions match targets of
+ // type "pointer-to-member-function."
+ // Note that according to DR 247, the containing class does not matter.
+ if (FunctionTemplateDecl *FunctionTemplate
+ = dyn_cast<FunctionTemplateDecl>(Fn)) {
+ if (AddMatchingTemplateFunction(FunctionTemplate, I.getPair()))
+ Ret = true;
+ }
+ // If we have explicit template arguments supplied, skip non-templates.
+ else if (!OvlExpr->hasExplicitTemplateArgs() &&
+ AddMatchingNonTemplateFunction(Fn, I.getPair()))
+ Ret = true;
}
-
- return 0;
- } else if (Matches.size() == 1) {
- FunctionDecl *Result = Matches[0].second;
- FoundResult = Matches[0].first;
- MarkDeclarationReferenced(From->getLocStart(), Result);
- if (Complain)
- CheckAddressOfMemberAccess(OvlExpr, Matches[0].first);
- return Result;
+ assert(Ret || Matches.empty());
+ return Ret;
}
- // C++ [over.over]p4:
- // If more than one function is selected, [...]
- if (!FoundNonTemplateFunction) {
+ void EliminateAllExceptMostSpecializedTemplate() {
// [...] and any given function template specialization F1 is
// eliminated if the set contains a second function template
// specialization whose function template is more specialized
@@ -6374,84 +7300,159 @@ Sema::ResolveAddressOfOverloadedFunction(Expr *From, QualType ToType,
UnresolvedSet<4> MatchesCopy; // TODO: avoid!
for (unsigned I = 0, E = Matches.size(); I != E; ++I)
MatchesCopy.addDecl(Matches[I].second, Matches[I].first.getAccess());
-
+
UnresolvedSetIterator Result =
- getMostSpecialized(MatchesCopy.begin(), MatchesCopy.end(),
- TPOC_Other, From->getLocStart(),
- PDiag(),
- PDiag(diag::err_addr_ovl_ambiguous)
- << Matches[0].second->getDeclName(),
- PDiag(diag::note_ovl_candidate)
- << (unsigned) oc_function_template);
- assert(Result != MatchesCopy.end() && "no most-specialized template");
- MarkDeclarationReferenced(From->getLocStart(), *Result);
- FoundResult = Matches[Result - MatchesCopy.begin()].first;
- if (Complain) {
- CheckUnresolvedAccess(*this, OvlExpr, FoundResult);
- DiagnoseUseOfDecl(FoundResult, OvlExpr->getNameLoc());
- }
- return cast<FunctionDecl>(*Result);
- }
-
- // [...] any function template specializations in the set are
- // eliminated if the set also contains a non-template function, [...]
- for (unsigned I = 0, N = Matches.size(); I != N; ) {
- if (Matches[I].second->getPrimaryTemplate() == 0)
- ++I;
- else {
- Matches[I] = Matches[--N];
- Matches.set_size(N);
+ S.getMostSpecialized(MatchesCopy.begin(), MatchesCopy.end(),
+ TPOC_Other, 0, SourceExpr->getLocStart(),
+ S.PDiag(),
+ S.PDiag(diag::err_addr_ovl_ambiguous)
+ << Matches[0].second->getDeclName(),
+ S.PDiag(diag::note_ovl_candidate)
+ << (unsigned) oc_function_template,
+ Complain);
+
+ if (Result != MatchesCopy.end()) {
+ // Make it the first and only element
+ Matches[0].first = Matches[Result - MatchesCopy.begin()].first;
+ Matches[0].second = cast<FunctionDecl>(*Result);
+ Matches.resize(1);
+ }
+ }
+
+ void EliminateAllTemplateMatches() {
+ // [...] any function template specializations in the set are
+ // eliminated if the set also contains a non-template function, [...]
+ for (unsigned I = 0, N = Matches.size(); I != N; ) {
+ if (Matches[I].second->getPrimaryTemplate() == 0)
+ ++I;
+ else {
+ Matches[I] = Matches[--N];
+ Matches.set_size(N);
+ }
}
}
+
+public:
+ void ComplainNoMatchesFound() const {
+ assert(Matches.empty());
+ S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_no_viable)
+ << OvlExpr->getName() << TargetFunctionType
+ << OvlExpr->getSourceRange();
+ S.NoteAllOverloadCandidates(OvlExpr);
+ }
- // [...] After such eliminations, if any, there shall remain exactly one
- // selected function.
- if (Matches.size() == 1) {
- MarkDeclarationReferenced(From->getLocStart(), Matches[0].second);
- FoundResult = Matches[0].first;
- if (Complain) {
- CheckUnresolvedAccess(*this, OvlExpr, Matches[0].first);
- DiagnoseUseOfDecl(Matches[0].first, OvlExpr->getNameLoc());
- }
- return cast<FunctionDecl>(Matches[0].second);
- }
-
- // FIXME: We should probably return the same thing that BestViableFunction
- // returns (even if we issue the diagnostics here).
- Diag(From->getLocStart(), diag::err_addr_ovl_ambiguous)
- << Matches[0].second->getDeclName();
- for (unsigned I = 0, E = Matches.size(); I != E; ++I)
- NoteOverloadCandidate(Matches[I].second);
- return 0;
+ bool IsInvalidFormOfPointerToMemberFunction() const {
+ return TargetTypeIsNonStaticMemberFunction &&
+ !OvlExprInfo.HasFormOfMemberPointer;
+ }
+
+ void ComplainIsInvalidFormOfPointerToMemberFunction() const {
+ // TODO: Should we condition this on whether any functions might
+ // have matched, or is it more appropriate to do that in callers?
+ // TODO: a fixit wouldn't hurt.
+ S.Diag(OvlExpr->getNameLoc(), diag::err_addr_ovl_no_qualifier)
+ << TargetType << OvlExpr->getSourceRange();
+ }
+
+ void ComplainOfInvalidConversion() const {
+ S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_not_func_ptrref)
+ << OvlExpr->getName() << TargetType;
+ }
+
+ void ComplainMultipleMatchesFound() const {
+ assert(Matches.size() > 1);
+ S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_ambiguous)
+ << OvlExpr->getName()
+ << OvlExpr->getSourceRange();
+ S.NoteAllOverloadCandidates(OvlExpr);
+ }
+
+ int getNumMatches() const { return Matches.size(); }
+
+ FunctionDecl* getMatchingFunctionDecl() const {
+ if (Matches.size() != 1) return 0;
+ return Matches[0].second;
+ }
+
+ const DeclAccessPair* getMatchingFunctionAccessPair() const {
+ if (Matches.size() != 1) return 0;
+ return &Matches[0].first;
+ }
+};
+
+/// ResolveAddressOfOverloadedFunction - Try to resolve the address of
+/// an overloaded function (C++ [over.over]), where @p From is an
+/// expression with overloaded function type and @p ToType is the type
+/// we're trying to resolve to. For example:
+///
+/// @code
+/// int f(double);
+/// int f(int);
+///
+/// int (*pfd)(double) = f; // selects f(double)
+/// @endcode
+///
+/// This routine returns the resulting FunctionDecl if it could be
+/// resolved, and NULL otherwise. When @p Complain is true, this
+/// routine will emit diagnostics if there is an error.
+FunctionDecl *
+Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType,
+ bool Complain,
+ DeclAccessPair &FoundResult) {
+
+ assert(AddressOfExpr->getType() == Context.OverloadTy);
+
+ AddressOfFunctionResolver Resolver(*this, AddressOfExpr, TargetType, Complain);
+ int NumMatches = Resolver.getNumMatches();
+ FunctionDecl* Fn = 0;
+ if ( NumMatches == 0 && Complain) {
+ if (Resolver.IsInvalidFormOfPointerToMemberFunction())
+ Resolver.ComplainIsInvalidFormOfPointerToMemberFunction();
+ else
+ Resolver.ComplainNoMatchesFound();
+ }
+ else if (NumMatches > 1 && Complain)
+ Resolver.ComplainMultipleMatchesFound();
+ else if (NumMatches == 1) {
+ Fn = Resolver.getMatchingFunctionDecl();
+ assert(Fn);
+ FoundResult = *Resolver.getMatchingFunctionAccessPair();
+ MarkDeclarationReferenced(AddressOfExpr->getLocStart(), Fn);
+ if (Complain)
+ CheckAddressOfMemberAccess(AddressOfExpr, FoundResult);
+ }
+
+ return Fn;
}
-/// \brief Given an expression that refers to an overloaded function, try to
+/// \brief Given an expression that refers to an overloaded function, try to
/// resolve that overloaded function expression down to a single function.
///
/// This routine can only resolve template-ids that refer to a single function
/// template, where that template-id refers to a single template whose template
-/// arguments are either provided by the template-id or have defaults,
+/// arguments are either provided by the template-id or have defaults,
/// as described in C++0x [temp.arg.explicit]p3.
-FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(Expr *From) {
+FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(Expr *From,
+ bool Complain,
+ DeclAccessPair* FoundResult) {
// C++ [over.over]p1:
// [...] [Note: any redundant set of parentheses surrounding the
// overloaded function name is ignored (5.1). ]
// C++ [over.over]p1:
// [...] The overloaded function name can be preceded by the &
// operator.
-
if (From->getType() != Context.OverloadTy)
return 0;
OverloadExpr *OvlExpr = OverloadExpr::find(From).Expression;
-
+
// If we didn't actually find any template-ids, we're done.
if (!OvlExpr->hasExplicitTemplateArgs())
return 0;
TemplateArgumentListInfo ExplicitTemplateArgs;
OvlExpr->getExplicitTemplateArgs().copyInto(ExplicitTemplateArgs);
-
+
// Look through all of the overloaded functions, searching for one
// whose type matches exactly.
FunctionDecl *Matched = 0;
@@ -6459,13 +7460,13 @@ FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(Expr *From) {
E = OvlExpr->decls_end(); I != E; ++I) {
// C++0x [temp.arg.explicit]p3:
// [...] In contexts where deduction is done and fails, or in contexts
- // where deduction is not done, if a template argument list is
- // specified and it, along with any default template arguments,
- // identifies a single function template specialization, then the
+ // where deduction is not done, if a template argument list is
+ // specified and it, along with any default template arguments,
+ // identifies a single function template specialization, then the
// template-id is an lvalue for the function template specialization.
FunctionTemplateDecl *FunctionTemplate
= cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl());
-
+
// C++ [over.over]p2:
// If the name is a function template, template argument deduction is
// done (14.8.2.2), and if the argument deduction succeeds, the
@@ -6480,18 +7481,28 @@ FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(Expr *From) {
// FIXME: make a note of the failed deduction for diagnostics.
(void)Result;
continue;
- }
-
+ }
+
// Multiple matches; we can't resolve to a single declaration.
- if (Matched)
+ if (Matched) {
+ if (FoundResult)
+ *FoundResult = DeclAccessPair();
+
+ if (Complain) {
+ Diag(From->getLocStart(), diag::err_addr_ovl_ambiguous)
+ << OvlExpr->getName();
+ NoteAllOverloadCandidates(OvlExpr);
+ }
return 0;
-
- Matched = Specialization;
+ }
+
+ if ((Matched = Specialization) && FoundResult)
+ *FoundResult = I.getPair();
}
return Matched;
}
-
+
/// \brief Add a single candidate to the overload set.
static void AddOverloadedCallCandidate(Sema &S,
DeclAccessPair FoundDecl,
@@ -6522,7 +7533,7 @@ static void AddOverloadedCallCandidate(Sema &S,
// do nothing?
}
-
+
/// \brief Add the overload candidates named by callee and/or found by argument
/// dependent lookup to the given overload set.
void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
@@ -6570,7 +7581,7 @@ void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(),
E = ULE->decls_end(); I != E; ++I)
AddOverloadedCallCandidate(*this, I.getPair(), ExplicitTemplateArgs,
- Args, NumArgs, CandidateSet,
+ Args, NumArgs, CandidateSet,
PartialOverloading);
if (ULE->requiresADL())
@@ -6578,7 +7589,7 @@ void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
Args, NumArgs,
ExplicitTemplateArgs,
CandidateSet,
- PartialOverloading);
+ PartialOverloading);
}
/// Attempts to recover from a call where no functions were found.
@@ -6589,7 +7600,6 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
- SourceLocation *CommaLocs,
SourceLocation RParenLoc) {
CXXScopeSpec SS;
@@ -6616,7 +7626,8 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
// casts and such from the call, we don't really care.
ExprResult NewFn = ExprError();
if ((*R.begin())->isCXXClassMember())
- NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, R, ExplicitTemplateArgs);
+ NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, R,
+ ExplicitTemplateArgs);
else if (ExplicitTemplateArgs)
NewFn = SemaRef.BuildTemplateIdExpr(SS, R, false, *ExplicitTemplateArgs);
else
@@ -6629,8 +7640,7 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
// an expression with non-empty lookup results, which should never
// end up here.
return SemaRef.ActOnCallExpr(/*Scope*/ 0, NewFn.take(), LParenLoc,
- MultiExprArg(Args, NumArgs),
- CommaLocs, RParenLoc);
+ MultiExprArg(Args, NumArgs), RParenLoc);
}
/// ResolveOverloadedCallFn - Given the call expression that calls Fn
@@ -6644,8 +7654,8 @@ ExprResult
Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
- SourceLocation *CommaLocs,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc,
+ Expr *ExecConfig) {
#ifndef NDEBUG
if (ULE->requiresADL()) {
// To do ADL, we must have found an unqualified name.
@@ -6658,7 +7668,7 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
(F = dyn_cast<FunctionDecl>(*ULE->decls_begin())) &&
F->getBuiltinID() && F->isImplicit())
assert(0 && "performing ADL for builtin");
-
+
// We don't perform ADL in C.
assert(getLangOptions().CPlusPlus && "ADL enabled in C");
}
@@ -6675,16 +7685,18 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
// bailout out if it fails.
if (CandidateSet.empty())
return BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc, Args, NumArgs,
- CommaLocs, RParenLoc);
+ RParenLoc);
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best)) {
case OR_Success: {
FunctionDecl *FDecl = Best->Function;
CheckUnresolvedLookupAccess(ULE, Best->FoundDecl);
- DiagnoseUseOfDecl(Best->FoundDecl, ULE->getNameLoc());
+ DiagnoseUseOfDecl(FDecl? FDecl : Best->FoundDecl.getDecl(),
+ ULE->getNameLoc());
Fn = FixOverloadedFunctionReference(Fn, Best->FoundDecl, FDecl);
- return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs, RParenLoc);
+ return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs, RParenLoc,
+ ExecConfig);
}
case OR_No_Viable_Function:
@@ -6746,6 +7758,9 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
// TODO: provide better source location info.
DeclarationNameInfo OpNameInfo(OpName, OpLoc);
+ if (Input->getObjectKind() == OK_ObjCProperty)
+ ConvertPropertyForRValue(Input);
+
Expr *Args[2] = { Input, 0 };
unsigned NumArgs = 1;
@@ -6762,19 +7777,21 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
if (Input->isTypeDependent()) {
if (Fns.empty())
return Owned(new (Context) UnaryOperator(Input,
- Opc,
+ Opc,
Context.DependentTy,
+ VK_RValue, OK_Ordinary,
OpLoc));
-
+
CXXRecordDecl *NamingClass = 0; // because lookup ignores member operators
UnresolvedLookupExpr *Fn
- = UnresolvedLookupExpr::Create(Context, /*Dependent*/ true, NamingClass,
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
0, SourceRange(), OpNameInfo,
/*ADL*/ true, IsOverloaded(Fns),
Fns.begin(), Fns.end());
return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn,
&Args[0], NumArgs,
Context.DependentTy,
+ VK_RValue,
OpLoc));
}
@@ -6818,8 +7835,9 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
// Convert the arguments.
ExprResult InputInit
= PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
FnDecl->getParamDecl(0)),
- SourceLocation(),
+ SourceLocation(),
Input);
if (InputInit.isInvalid())
return ExprError();
@@ -6828,20 +7846,20 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
DiagnoseUseOfDecl(Best->FoundDecl, OpLoc);
- // Determine the result type
- QualType ResultTy = FnDecl->getCallResultType();
+ // Determine the result type.
+ QualType ResultTy = FnDecl->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
// Build the actual expression node.
- Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
- SourceLocation());
- UsualUnaryConversions(FnExpr);
+ Expr *FnExpr = CreateFunctionRefExpr(*this, FnDecl);
Args[0] = Input;
CallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, Op, FnExpr,
- Args, NumArgs, ResultTy, OpLoc);
+ Args, NumArgs, ResultTy, VK, OpLoc);
- if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
+ if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
FnDecl))
return ExprError();
@@ -6864,8 +7882,9 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
break;
case OR_Ambiguous:
- Diag(OpLoc, diag::err_ovl_ambiguous_oper)
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
<< UnaryOperator::getOpcodeStr(Opc)
+ << Input->getType()
<< Input->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates,
Args, NumArgs,
@@ -6920,14 +7939,18 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// expression.
if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) {
if (Fns.empty()) {
- // If there are no functions to store, just build a dependent
+ // If there are no functions to store, just build a dependent
// BinaryOperator or CompoundAssignment.
if (Opc <= BO_Assign || Opc > BO_OrAssign)
return Owned(new (Context) BinaryOperator(Args[0], Args[1], Opc,
- Context.DependentTy, OpLoc));
-
+ Context.DependentTy,
+ VK_RValue, OK_Ordinary,
+ OpLoc));
+
return Owned(new (Context) CompoundAssignOperator(Args[0], Args[1], Opc,
Context.DependentTy,
+ VK_LValue,
+ OK_Ordinary,
Context.DependentTy,
Context.DependentTy,
OpLoc));
@@ -6938,20 +7961,48 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// TODO: provide better source location info in DNLoc component.
DeclarationNameInfo OpNameInfo(OpName, OpLoc);
UnresolvedLookupExpr *Fn
- = UnresolvedLookupExpr::Create(Context, /*Dependent*/ true, NamingClass,
- 0, SourceRange(), OpNameInfo,
- /*ADL*/ true, IsOverloaded(Fns),
+ = UnresolvedLookupExpr::Create(Context, NamingClass, 0, SourceRange(),
+ OpNameInfo, /*ADL*/ true, IsOverloaded(Fns),
Fns.begin(), Fns.end());
return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn,
Args, 2,
Context.DependentTy,
+ VK_RValue,
OpLoc));
}
- // If this is the .* operator, which is not overloadable, just
- // create a built-in binary operator.
- if (Opc == BO_PtrMemD)
- return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+ // Always do property rvalue conversions on the RHS.
+ if (Args[1]->getObjectKind() == OK_ObjCProperty)
+ ConvertPropertyForRValue(Args[1]);
+
+ // The LHS is more complicated.
+ if (Args[0]->getObjectKind() == OK_ObjCProperty) {
+
+ // There's a tension for assignment operators between primitive
+ // property assignment and the overloaded operators.
+ if (BinaryOperator::isAssignmentOp(Opc)) {
+ const ObjCPropertyRefExpr *PRE = LHS->getObjCProperty();
+
+ // Is the property "logically" settable?
+ bool Settable = (PRE->isExplicitProperty() ||
+ PRE->getImplicitPropertySetter());
+
+ // To avoid gratuitously inventing semantics, use the primitive
+ // unless it isn't. Thoughts in case we ever really care:
+ // - If the property isn't logically settable, we have to
+ // load and hope.
+ // - If the property is settable and this is simple assignment,
+ // we really should use the primitive.
+ // - If the property is settable, then we could try overloading
+ // on a generic lvalue of the appropriate type; if it works
+ // out to a builtin candidate, we would do that same operation
+ // on the property, and otherwise just error.
+ if (Settable)
+ return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+ }
+
+ ConvertPropertyForRValue(Args[0]);
+ }
// If this is the assignment operator, we only perform overload resolution
// if the left-hand side is a class or enumeration type. This is actually
@@ -6962,6 +8013,11 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (Opc == BO_Assign && !Args[0]->getType()->isOverloadableType())
return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+ // If this is the .* operator, which is not overloadable, just
+ // create a built-in binary operator.
+ if (Opc == BO_PtrMemD)
+ return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+
// Build an empty overload set.
OverloadCandidateSet CandidateSet(OpLoc);
@@ -6996,37 +8052,33 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// Best->Access is only meaningful for class members.
CheckMemberOperatorAccess(OpLoc, Args[0], Args[1], Best->FoundDecl);
- ExprResult Arg1
- = PerformCopyInitialization(
- InitializedEntity::InitializeParameter(
- FnDecl->getParamDecl(0)),
- SourceLocation(),
- Owned(Args[1]));
+ ExprResult Arg1 =
+ PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(), Owned(Args[1]));
if (Arg1.isInvalid())
return ExprError();
- if (PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/0,
+ if (PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/0,
Best->FoundDecl, Method))
return ExprError();
Args[1] = RHS = Arg1.takeAs<Expr>();
} else {
// Convert the arguments.
- ExprResult Arg0
- = PerformCopyInitialization(
- InitializedEntity::InitializeParameter(
- FnDecl->getParamDecl(0)),
- SourceLocation(),
- Owned(Args[0]));
+ ExprResult Arg0 = PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(), Owned(Args[0]));
if (Arg0.isInvalid())
return ExprError();
- ExprResult Arg1
- = PerformCopyInitialization(
- InitializedEntity::InitializeParameter(
- FnDecl->getParamDecl(1)),
- SourceLocation(),
- Owned(Args[1]));
+ ExprResult Arg1 =
+ PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(1)),
+ SourceLocation(), Owned(Args[1]));
if (Arg1.isInvalid())
return ExprError();
Args[0] = LHS = Arg0.takeAs<Expr>();
@@ -7035,21 +8087,19 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
DiagnoseUseOfDecl(Best->FoundDecl, OpLoc);
- // Determine the result type
- QualType ResultTy
- = FnDecl->getType()->getAs<FunctionType>()
- ->getCallResultType(Context);
+ // Determine the result type.
+ QualType ResultTy = FnDecl->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
// Build the actual expression node.
- Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
- OpLoc);
- UsualUnaryConversions(FnExpr);
+ Expr *FnExpr = CreateFunctionRefExpr(*this, FnDecl, OpLoc);
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, Op, FnExpr,
- Args, 2, ResultTy, OpLoc);
-
- if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
+ Args, 2, ResultTy, VK, OpLoc);
+
+ if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
FnDecl))
return ExprError();
@@ -7076,11 +8126,11 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (Opc == BO_Comma)
break;
- // For class as left operand for assignment or compound assigment operator
- // do not fall through to handling in built-in, but report that no overloaded
- // assignment operator found
+ // For class as left operand for assignment or compound assigment
+ // operator do not fall through to handling in built-in, but report that
+ // no overloaded assignment operator found
ExprResult Result = ExprError();
- if (Args[0]->getType()->isRecordType() &&
+ if (Args[0]->getType()->isRecordType() &&
Opc >= BO_Assign && Opc <= BO_OrAssign) {
Diag(OpLoc, diag::err_ovl_no_viable_oper)
<< BinaryOperator::getOpcodeStr(Opc)
@@ -7090,7 +8140,7 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// produce an error. Then, show the non-viable candidates.
Result = CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
}
- assert(Result.isInvalid() &&
+ assert(Result.isInvalid() &&
"C++ binary operator overloading is missing candidates!");
if (Result.isInvalid())
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, 2,
@@ -7099,8 +8149,9 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
}
case OR_Ambiguous:
- Diag(OpLoc, diag::err_ovl_ambiguous_oper)
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper_binary)
<< BinaryOperator::getOpcodeStr(Opc)
+ << Args[0]->getType() << Args[1]->getType()
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args, 2,
BinaryOperator::getOpcodeStr(Opc), OpLoc);
@@ -7136,7 +8187,7 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
DeclarationNameInfo OpNameInfo(OpName, LLoc);
OpNameInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
UnresolvedLookupExpr *Fn
- = UnresolvedLookupExpr::Create(Context, /*Dependent*/ true, NamingClass,
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
0, SourceRange(), OpNameInfo,
/*ADL*/ true, /*Overloaded*/ false,
UnresolvedSetIterator(),
@@ -7146,9 +8197,15 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
return Owned(new (Context) CXXOperatorCallExpr(Context, OO_Subscript, Fn,
Args, 2,
Context.DependentTy,
+ VK_RValue,
RLoc));
}
+ if (Args[0]->getObjectKind() == OK_ObjCProperty)
+ ConvertPropertyForRValue(Args[0]);
+ if (Args[1]->getObjectKind() == OK_ObjCProperty)
+ ConvertPropertyForRValue(Args[1]);
+
// Build an empty overload set.
OverloadCandidateSet CandidateSet(LLoc);
@@ -7176,15 +8233,16 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// Convert the arguments.
CXXMethodDecl *Method = cast<CXXMethodDecl>(FnDecl);
- if (PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/0,
+ if (PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/0,
Best->FoundDecl, Method))
return ExprError();
// Convert the arguments.
ExprResult InputInit
= PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
FnDecl->getParamDecl(0)),
- SourceLocation(),
+ SourceLocation(),
Owned(Args[1]));
if (InputInit.isInvalid())
return ExprError();
@@ -7192,19 +8250,17 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
Args[1] = InputInit.takeAs<Expr>();
// Determine the result type
- QualType ResultTy
- = FnDecl->getType()->getAs<FunctionType>()
- ->getCallResultType(Context);
+ QualType ResultTy = FnDecl->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
// Build the actual expression node.
- Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
- LLoc);
- UsualUnaryConversions(FnExpr);
+ Expr *FnExpr = CreateFunctionRefExpr(*this, FnDecl, LLoc);
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, OO_Subscript,
FnExpr, Args, 2,
- ResultTy, RLoc);
+ ResultTy, VK, RLoc);
if (CheckCallReturnType(FnDecl->getResultType(), LLoc, TheCall,
FnDecl))
@@ -7240,8 +8296,10 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
}
case OR_Ambiguous:
- Diag(LLoc, diag::err_ovl_ambiguous_oper)
- << "[]" << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ Diag(LLoc, diag::err_ovl_ambiguous_oper_binary)
+ << "[]"
+ << Args[0]->getType() << Args[1]->getType()
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args, 2,
"[]", LLoc);
return ExprError();
@@ -7269,12 +8327,11 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ExprResult
Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
SourceLocation LParenLoc, Expr **Args,
- unsigned NumArgs, SourceLocation *CommaLocs,
- SourceLocation RParenLoc) {
+ unsigned NumArgs, SourceLocation RParenLoc) {
// Dig out the member expression. This holds both the object
// argument and the member function we're referring to.
Expr *NakedMemExpr = MemExprE->IgnoreParens();
-
+
MemberExpr *MemExpr;
CXXMethodDecl *Method = 0;
DeclAccessPair FoundDecl = DeclAccessPair::make(0, AS_public);
@@ -7287,8 +8344,11 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
} else {
UnresolvedMemberExpr *UnresExpr = cast<UnresolvedMemberExpr>(NakedMemExpr);
Qualifier = UnresExpr->getQualifier();
-
+
QualType ObjectType = UnresExpr->getBaseType();
+ Expr::Classification ObjectClassification
+ = UnresExpr->isArrow()? Expr::Classification::makeSimpleLValue()
+ : UnresExpr->getBase()->Classify(Context);
// Add overload candidates
OverloadCandidateSet CandidateSet(UnresExpr->getMemberLoc());
@@ -7308,20 +8368,26 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
if (isa<UsingShadowDecl>(Func))
Func = cast<UsingShadowDecl>(Func)->getTargetDecl();
- if ((Method = dyn_cast<CXXMethodDecl>(Func))) {
+
+ // Microsoft supports direct constructor calls.
+ if (getLangOptions().Microsoft && isa<CXXConstructorDecl>(Func)) {
+ AddOverloadCandidate(cast<CXXConstructorDecl>(Func), I.getPair(), Args, NumArgs,
+ CandidateSet);
+ } else if ((Method = dyn_cast<CXXMethodDecl>(Func))) {
// If explicit template arguments were provided, we can't call a
// non-template member function.
if (TemplateArgs)
continue;
-
+
AddMethodCandidate(Method, I.getPair(), ActingDC, ObjectType,
- Args, NumArgs,
- CandidateSet, /*SuppressUserConversions=*/false);
+ ObjectClassification,
+ Args, NumArgs, CandidateSet,
+ /*SuppressUserConversions=*/false);
} else {
AddMethodTemplateCandidate(cast<FunctionTemplateDecl>(Func),
I.getPair(), ActingDC, TemplateArgs,
- ObjectType, Args, NumArgs,
- CandidateSet,
+ ObjectType, ObjectClassification,
+ Args, NumArgs, CandidateSet,
/*SuppressUsedConversions=*/false);
}
}
@@ -7330,7 +8396,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, UnresExpr->getLocStart(),
- Best)) {
+ Best)) {
case OR_Success:
Method = cast<CXXMethodDecl>(Best->Function);
FoundDecl = Best->FoundDecl;
@@ -7374,17 +8440,20 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
MemExpr = cast<MemberExpr>(MemExprE->IgnoreParens());
}
+ QualType ResultType = Method->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultType);
+ ResultType = ResultType.getNonLValueExprType(Context);
+
assert(Method && "Member call to something that isn't a method?");
- CXXMemberCallExpr *TheCall =
+ CXXMemberCallExpr *TheCall =
new (Context) CXXMemberCallExpr(Context, MemExprE, Args, NumArgs,
- Method->getCallResultType(),
- RParenLoc);
+ ResultType, VK, RParenLoc);
// Check for a valid return type.
- if (CheckCallReturnType(Method->getResultType(), MemExpr->getMemberLoc(),
+ if (CheckCallReturnType(Method->getResultType(), MemExpr->getMemberLoc(),
TheCall, Method))
return ExprError();
-
+
// Convert the object argument (for a non-static member function call).
// We only need to do this if there was actually an overload; otherwise
// it was done at lookup.
@@ -7396,7 +8465,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
MemExpr->setBase(ObjectArg);
// Convert the rest of the arguments
- const FunctionProtoType *Proto = Method->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *Proto =
+ Method->getType()->getAs<FunctionProtoType>();
if (ConvertArgumentsForCall(TheCall, MemExpr, Method, Proto, Args, NumArgs,
RParenLoc))
return ExprError();
@@ -7415,8 +8485,10 @@ ExprResult
Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
- SourceLocation *CommaLocs,
SourceLocation RParenLoc) {
+ if (Object->getObjectKind() == OK_ObjCProperty)
+ ConvertPropertyForRValue(Object);
+
assert(Object->getType()->isRecordType() && "Requires object type argument");
const RecordType *Record = Object->getType()->getAs<RecordType>();
@@ -7430,11 +8502,11 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
OverloadCandidateSet CandidateSet(LParenLoc);
DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Call);
- if (RequireCompleteType(LParenLoc, Object->getType(),
+ if (RequireCompleteType(LParenLoc, Object->getType(),
PDiag(diag::err_incomplete_object_call)
<< Object->getSourceRange()))
return true;
-
+
LookupResult R(*this, OpName, LParenLoc, LookupOrdinaryName);
LookupQualifiedName(R, Record->getDecl());
R.suppressDiagnostics();
@@ -7442,10 +8514,10 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
Oper != OperEnd; ++Oper) {
AddMethodCandidate(Oper.getPair(), Object->getType(),
- Args, NumArgs, CandidateSet,
+ Object->Classify(Context), Args, NumArgs, CandidateSet,
/*SuppressUserConversions=*/ false);
}
-
+
// C++ [over.call.object]p2:
// In addition, for each conversion function declared in T of the
// form
@@ -7471,7 +8543,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
-
+
// Skip over templated conversion functions; they aren't
// surrogates.
if (isa<FunctionTemplateDecl>(D))
@@ -7487,8 +8559,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
if (const FunctionProtoType *Proto = ConvType->getAs<FunctionProtoType>())
AddSurrogateCandidate(Conv, I.getPair(), ActingContext, Proto,
- Object->getType(), Args, NumArgs,
- CandidateSet);
+ Object, Args, NumArgs, CandidateSet);
}
// Perform overload resolution.
@@ -7544,14 +8615,15 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
// We selected one of the surrogate functions that converts the
// object parameter to a function pointer. Perform the conversion
// on the object argument, then let ActOnCallExpr finish the job.
-
+
// Create an implicit member expr to refer to the conversion operator.
// and then call it.
- CXXMemberCallExpr *CE = BuildCXXMemberCallExpr(Object, Best->FoundDecl,
- Conv);
-
- return ActOnCallExpr(S, CE, LParenLoc, MultiExprArg(Args, NumArgs),
- CommaLocs, RParenLoc);
+ ExprResult Call = BuildCXXMemberCallExpr(Object, Best->FoundDecl, Conv);
+ if (Call.isInvalid())
+ return ExprError();
+
+ return ActOnCallExpr(S, Call.get(), LParenLoc, MultiExprArg(Args, NumArgs),
+ RParenLoc);
}
CheckMemberOperatorAccess(LParenLoc, Object, 0, Best->FoundDecl);
@@ -7561,7 +8633,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
// that calls this method, using Object for the implicit object
// parameter and passing along the remaining arguments.
CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
- const FunctionProtoType *Proto = Method->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *Proto =
+ Method->getType()->getAs<FunctionProtoType>();
unsigned NumArgsInProto = Proto->getNumArgs();
unsigned NumArgsToCheck = NumArgs;
@@ -7580,23 +8653,24 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
MethodArgs[ArgIdx + 1] = Args[ArgIdx];
- Expr *NewFn = new (Context) DeclRefExpr(Method, Method->getType(),
- SourceLocation());
- UsualUnaryConversions(NewFn);
+ Expr *NewFn = CreateFunctionRefExpr(*this, Method);
// Once we've built TheCall, all of the expressions are properly
// owned.
- QualType ResultTy = Method->getCallResultType();
+ QualType ResultTy = Method->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, OO_Call, NewFn,
MethodArgs, NumArgs + 1,
- ResultTy, RParenLoc);
+ ResultTy, VK, RParenLoc);
delete [] MethodArgs;
- if (CheckCallReturnType(Method->getResultType(), LParenLoc, TheCall,
+ if (CheckCallReturnType(Method->getResultType(), LParenLoc, TheCall,
Method))
return true;
-
+
// We may have default arguments. If so, we need to allocate more
// slots in the call for them.
if (NumArgs < NumArgsInProto)
@@ -7607,7 +8681,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
bool IsError = false;
// Initialize the implicit object parameter.
- IsError |= PerformObjectArgumentInitialization(Object, /*Qualifier=*/0,
+ IsError |= PerformObjectArgumentInitialization(Object, /*Qualifier=*/0,
Best->FoundDecl, Method);
TheCall->setArg(0, Object);
@@ -7622,9 +8696,10 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
ExprResult InputInit
= PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
Method->getParamDecl(i)),
SourceLocation(), Arg);
-
+
IsError |= InputInit.isInvalid();
Arg = InputInit.takeAs<Expr>();
} else {
@@ -7634,7 +8709,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
IsError = true;
break;
}
-
+
Arg = DefArg.takeAs<Expr>();
}
@@ -7664,7 +8739,11 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
/// @c Member is the name of the member we're trying to find.
ExprResult
Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
- assert(Base->getType()->isRecordType() && "left-hand side must have class type");
+ assert(Base->getType()->isRecordType() &&
+ "left-hand side must have class type");
+
+ if (Base->getObjectKind() == OK_ObjCProperty)
+ ConvertPropertyForRValue(Base);
SourceLocation Loc = Base->getExprLoc();
@@ -7674,7 +8753,8 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
// for a class object x of type T if T::operator->() exists and if
// the operator is selected as the best match function by the
// overload resolution mechanism (13.3).
- DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Arrow);
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Arrow);
OverloadCandidateSet CandidateSet(Loc);
const RecordType *BaseRecord = Base->getType()->getAs<RecordType>();
@@ -7689,8 +8769,8 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
Oper != OperEnd; ++Oper) {
- AddMethodCandidate(Oper.getPair(), Base->getType(), 0, 0, CandidateSet,
- /*SuppressUserConversions=*/false);
+ AddMethodCandidate(Oper.getPair(), Base->getType(), Base->Classify(Context),
+ 0, 0, CandidateSet, /*SuppressUserConversions=*/false);
}
// Perform overload resolution.
@@ -7711,8 +8791,8 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
return ExprError();
case OR_Ambiguous:
- Diag(OpLoc, diag::err_ovl_ambiguous_oper)
- << "->" << Base->getSourceRange();
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
+ << "->" << Base->getType() << Base->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, &Base, 1);
return ExprError();
@@ -7734,16 +8814,16 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
return ExprError();
// Build the operator call.
- Expr *FnExpr = new (Context) DeclRefExpr(Method, Method->getType(),
- SourceLocation());
- UsualUnaryConversions(FnExpr);
-
- QualType ResultTy = Method->getCallResultType();
+ Expr *FnExpr = CreateFunctionRefExpr(*this, Method);
+
+ QualType ResultTy = Method->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
CXXOperatorCallExpr *TheCall =
- new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr,
- &Base, 1, ResultTy, OpLoc);
+ new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr,
+ &Base, 1, ResultTy, VK, OpLoc);
- if (CheckCallReturnType(Method->getResultType(), OpLoc, TheCall,
+ if (CheckCallReturnType(Method->getResultType(), OpLoc, TheCall,
Method))
return ExprError();
return Owned(TheCall);
@@ -7760,27 +8840,27 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
Expr *SubExpr = FixOverloadedFunctionReference(PE->getSubExpr(),
Found, Fn);
if (SubExpr == PE->getSubExpr())
- return PE->Retain();
-
+ return PE;
+
return new (Context) ParenExpr(PE->getLParen(), PE->getRParen(), SubExpr);
- }
-
+ }
+
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
Expr *SubExpr = FixOverloadedFunctionReference(ICE->getSubExpr(),
Found, Fn);
- assert(Context.hasSameType(ICE->getSubExpr()->getType(),
+ assert(Context.hasSameType(ICE->getSubExpr()->getType(),
SubExpr->getType()) &&
"Implicit cast type cannot be determined from overload");
assert(ICE->path_empty() && "fixing up hierarchy conversion?");
if (SubExpr == ICE->getSubExpr())
- return ICE->Retain();
-
- return ImplicitCastExpr::Create(Context, ICE->getType(),
+ return ICE;
+
+ return ImplicitCastExpr::Create(Context, ICE->getType(),
ICE->getCastKind(),
SubExpr, 0,
ICE->getValueKind());
- }
-
+ }
+
if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E)) {
assert(UnOp->getOpcode() == UO_AddrOf &&
"Can only take the address of an overloaded function");
@@ -7795,7 +8875,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
Found, Fn);
if (SubExpr == UnOp->getSubExpr())
- return UnOp->Retain();
+ return UnOp;
assert(isa<DeclRefExpr>(SubExpr)
&& "fixed to something other than a decl ref");
@@ -7810,19 +8890,21 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
QualType MemPtrType
= Context.getMemberPointerType(Fn->getType(), ClassType.getTypePtr());
- return new (Context) UnaryOperator(SubExpr, UO_AddrOf,
- MemPtrType, UnOp->getOperatorLoc());
+ return new (Context) UnaryOperator(SubExpr, UO_AddrOf, MemPtrType,
+ VK_RValue, OK_Ordinary,
+ UnOp->getOperatorLoc());
}
}
Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
Found, Fn);
if (SubExpr == UnOp->getSubExpr())
- return UnOp->Retain();
-
+ return UnOp;
+
return new (Context) UnaryOperator(SubExpr, UO_AddrOf,
Context.getPointerType(SubExpr->getType()),
+ VK_RValue, OK_Ordinary,
UnOp->getOperatorLoc());
- }
+ }
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
// FIXME: avoid copy.
@@ -7838,6 +8920,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
Fn,
ULE->getNameLoc(),
Fn->getType(),
+ VK_LValue,
TemplateArgs);
}
@@ -7851,7 +8934,8 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
Expr *Base;
- // If we're filling in
+ // If we're filling in a static method where we used to have an
+ // implicit member access, rewrite to a simple decl ref.
if (MemExpr->isImplicitAccess()) {
if (cast<CXXMethodDecl>(Fn)->isStatic()) {
return DeclRefExpr::Create(Context,
@@ -7860,6 +8944,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
Fn,
MemExpr->getMemberLoc(),
Fn->getType(),
+ VK_LValue,
TemplateArgs);
} else {
SourceLocation Loc = MemExpr->getMemberLoc();
@@ -7870,24 +8955,27 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
/*isImplicit=*/true);
}
} else
- Base = MemExpr->getBase()->Retain();
+ Base = MemExpr->getBase();
return MemberExpr::Create(Context, Base,
- MemExpr->isArrow(),
- MemExpr->getQualifier(),
+ MemExpr->isArrow(),
+ MemExpr->getQualifier(),
MemExpr->getQualifierRange(),
- Fn,
+ Fn,
Found,
MemExpr->getMemberNameInfo(),
TemplateArgs,
- Fn->getType());
+ Fn->getType(),
+ cast<CXXMethodDecl>(Fn)->isStatic()
+ ? VK_LValue : VK_RValue,
+ OK_Ordinary);
}
-
- assert(false && "Invalid reference to overloaded function");
- return E->Retain();
+
+ llvm_unreachable("Invalid reference to overloaded function");
+ return E;
}
-ExprResult Sema::FixOverloadedFunctionReference(ExprResult E,
+ExprResult Sema::FixOverloadedFunctionReference(ExprResult E,
DeclAccessPair Found,
FunctionDecl *Fn) {
return Owned(FixOverloadedFunctionReference((Expr *)E.get(), Found, Fn));
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
index 083e4db..e995e8f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
@@ -32,7 +32,9 @@ using namespace sema;
StmtResult Sema::ActOnExprStmt(FullExprArg expr) {
Expr *E = expr.get();
- assert(E && "ActOnExprStmt(): missing expression");
+ if (!E) // FIXME: FullExprArg has no error state?
+ return StmtError();
+
// C99 6.8.3p2: The expression in an expression statement is evaluated as a
// void expression for its side effects. Conversion to void allows any
// operand, even incomplete types.
@@ -42,13 +44,12 @@ StmtResult Sema::ActOnExprStmt(FullExprArg expr) {
}
-StmtResult Sema::ActOnNullStmt(SourceLocation SemiLoc) {
- return Owned(new (Context) NullStmt(SemiLoc));
+StmtResult Sema::ActOnNullStmt(SourceLocation SemiLoc, bool LeadingEmptyMacro) {
+ return Owned(new (Context) NullStmt(SemiLoc, LeadingEmptyMacro));
}
-StmtResult Sema::ActOnDeclStmt(DeclGroupPtrTy dg,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult Sema::ActOnDeclStmt(DeclGroupPtrTy dg, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
DeclGroupRef DG = dg.getAsVal<DeclGroupRef>();
// If we have an invalid decl, just return an error.
@@ -59,7 +60,7 @@ StmtResult Sema::ActOnDeclStmt(DeclGroupPtrTy dg,
void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) {
DeclGroupRef DG = dg.getAsVal<DeclGroupRef>();
-
+
// If we have an invalid decl, just return.
if (DG.isNull() || !DG.isSingleDecl()) return;
// suppress any potential 'unused variable' warning.
@@ -67,10 +68,19 @@ void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) {
}
void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
+ if (const LabelStmt *Label = dyn_cast_or_null<LabelStmt>(S))
+ return DiagnoseUnusedExprResult(Label->getSubStmt());
+
const Expr *E = dyn_cast_or_null<Expr>(S);
if (!E)
return;
+ if (E->isBoundMemberFunction(Context)) {
+ Diag(E->getLocStart(), diag::err_invalid_use_of_bound_member_func)
+ << E->getSourceRange();
+ return;
+ }
+
SourceLocation Loc;
SourceRange R1, R2;
if (!E->isUnusedResultAWarning(Loc, R1, R2, Context))
@@ -80,13 +90,10 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
// we might want to make a more specific diagnostic. Check for one of these
// cases now.
unsigned DiagID = diag::warn_unused_expr;
- E = E->IgnoreParens();
- if (isa<ObjCImplicitSetterGetterRefExpr>(E))
- DiagID = diag::warn_unused_property_expr;
-
- if (const CXXExprWithTemporaries *Temps = dyn_cast<CXXExprWithTemporaries>(E))
+ if (const ExprWithCleanups *Temps = dyn_cast<ExprWithCleanups>(E))
E = Temps->getSubExpr();
-
+
+ E = E->IgnoreParenImpCasts();
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
if (E->getType()->isVoidType())
return;
@@ -106,14 +113,15 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
Diag(Loc, diag::warn_unused_call) << R1 << R2 << "const";
return;
}
- }
- }
- else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
+ }
+ } else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
const ObjCMethodDecl *MD = ME->getMethodDecl();
if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
Diag(Loc, diag::warn_unused_call) << R1 << R2 << "warn_unused_result";
return;
}
+ } else if (isa<ObjCPropertyRefExpr>(E)) {
+ DiagID = diag::warn_unused_property_expr;
} else if (const CXXFunctionalCastExpr *FC
= dyn_cast<CXXFunctionalCastExpr>(E)) {
if (isa<CXXConstructExpr>(FC->getSubExpr()) ||
@@ -223,30 +231,23 @@ Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc,
}
StmtResult
-Sema::ActOnLabelStmt(SourceLocation IdentLoc, IdentifierInfo *II,
+Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt) {
- // Look up the record for this label identifier.
- LabelStmt *&LabelDecl = getCurFunction()->LabelMap[II];
-
- // If not forward referenced or defined already, just create a new LabelStmt.
- if (LabelDecl == 0)
- return Owned(LabelDecl = new (Context) LabelStmt(IdentLoc, II, SubStmt));
-
- assert(LabelDecl->getID() == II && "Label mismatch!");
-
- // Otherwise, this label was either forward reference or multiply defined. If
- // multiply defined, reject it now.
- if (LabelDecl->getSubStmt()) {
- Diag(IdentLoc, diag::err_redefinition_of_label) << LabelDecl->getID();
- Diag(LabelDecl->getIdentLoc(), diag::note_previous_definition);
+
+ // If the label was multiply defined, reject it now.
+ if (TheDecl->getStmt()) {
+ Diag(IdentLoc, diag::err_redefinition_of_label) << TheDecl->getDeclName();
+ Diag(TheDecl->getLocation(), diag::note_previous_definition);
return Owned(SubStmt);
}
- // Otherwise, this label was forward declared, and we just found its real
- // definition. Fill in the forward definition and return it.
- LabelDecl->setIdentLoc(IdentLoc);
- LabelDecl->setSubStmt(SubStmt);
- return Owned(LabelDecl);
+ // Otherwise, things are good. Fill in the declaration and return it.
+ TheDecl->setLocation(IdentLoc);
+
+ LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt);
+ TheDecl->setStmt(LS);
+ TheDecl->setLocation(IdentLoc);
+ return Owned(LS);
}
StmtResult
@@ -265,21 +266,29 @@ Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar,
Expr *ConditionExpr = CondResult.takeAs<Expr>();
if (!ConditionExpr)
return StmtError();
-
+
DiagnoseUnusedExprResult(thenStmt);
// Warn if the if block has a null body without an else value.
// this helps prevent bugs due to typos, such as
// if (condition);
// do_stuff();
+ //
if (!elseStmt) {
if (NullStmt* stmt = dyn_cast<NullStmt>(thenStmt))
- Diag(stmt->getSemiLoc(), diag::warn_empty_if_body);
+ // But do not warn if the body is a macro that expands to nothing, e.g:
+ //
+ // #define CALL(x)
+ // if (condition)
+ // CALL(0);
+ //
+ if (!stmt->hasLeadingEmptyMacro())
+ Diag(stmt->getSemiLoc(), diag::warn_empty_if_body);
}
DiagnoseUnusedExprResult(elseStmt);
- return Owned(new (Context) IfStmt(Context, IfLoc, ConditionVar, ConditionExpr,
+ return Owned(new (Context) IfStmt(Context, IfLoc, ConditionVar, ConditionExpr,
thenStmt, ElseLoc, elseStmt));
}
@@ -293,7 +302,7 @@ void Sema::ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &Val,
// Perform a conversion to the promoted condition type if needed.
if (NewWidth > Val.getBitWidth()) {
// If this is an extension, just do it.
- Val.extend(NewWidth);
+ Val = Val.extend(NewWidth);
Val.setIsSigned(NewSign);
// If the input was signed and negative and the output is
@@ -303,21 +312,21 @@ void Sema::ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &Val,
} else if (NewWidth < Val.getBitWidth()) {
// If this is a truncation, check for overflow.
llvm::APSInt ConvVal(Val);
- ConvVal.trunc(NewWidth);
+ ConvVal = ConvVal.trunc(NewWidth);
ConvVal.setIsSigned(NewSign);
- ConvVal.extend(Val.getBitWidth());
+ ConvVal = ConvVal.extend(Val.getBitWidth());
ConvVal.setIsSigned(Val.isSigned());
if (ConvVal != Val)
Diag(Loc, DiagID) << Val.toString(10) << ConvVal.toString(10);
// Regardless of whether a diagnostic was emitted, really do the
// truncation.
- Val.trunc(NewWidth);
+ Val = Val.trunc(NewWidth);
Val.setIsSigned(NewSign);
} else if (NewSign != Val.isSigned()) {
// Convert the sign to match the sign of the condition. This can cause
// overflow as well: unsigned(INTMIN)
- // We don't diagnose this overflow, because it is implementation-defined
+ // We don't diagnose this overflow, because it is implementation-defined
// behavior.
// FIXME: Introduce a second, default-ignored warning for this case?
llvm::APSInt OldVal(Val);
@@ -386,7 +395,7 @@ static QualType GetTypeBeforeIntegralPromotion(const Expr* expr) {
}
StmtResult
-Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
+Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
Decl *CondVar) {
ExprResult CondResult;
@@ -396,15 +405,15 @@ Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
CondResult = CheckConditionVariable(ConditionVar, SourceLocation(), false);
if (CondResult.isInvalid())
return StmtError();
-
+
Cond = CondResult.release();
}
-
+
if (!Cond)
return StmtError();
-
+
CondResult
- = ConvertToIntegralOrEnumerationType(SwitchLoc, Cond,
+ = ConvertToIntegralOrEnumerationType(SwitchLoc, Cond,
PDiag(diag::err_typecheck_statement_requires_integer),
PDiag(diag::err_switch_incomplete_class_type)
<< Cond->getSourceRange(),
@@ -415,21 +424,30 @@ Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
PDiag(0));
if (CondResult.isInvalid()) return StmtError();
Cond = CondResult.take();
-
+
if (!CondVar) {
- CondResult = MaybeCreateCXXExprWithTemporaries(Cond);
+ CheckImplicitConversions(Cond, SwitchLoc);
+ CondResult = MaybeCreateExprWithCleanups(Cond);
if (CondResult.isInvalid())
return StmtError();
Cond = CondResult.take();
}
getCurFunction()->setHasBranchIntoScope();
-
+
SwitchStmt *SS = new (Context) SwitchStmt(Context, ConditionVar, Cond);
getCurFunction()->SwitchStack.push_back(SS);
return Owned(SS);
}
+static void AdjustAPSInt(llvm::APSInt &Val, unsigned BitWidth, bool IsSigned) {
+ if (Val.getBitWidth() < BitWidth)
+ Val = Val.extend(BitWidth);
+ else if (Val.getBitWidth() > BitWidth)
+ Val = Val.trunc(BitWidth);
+ Val.setIsSigned(IsSigned);
+}
+
StmtResult
Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
Stmt *BodyStmt) {
@@ -442,7 +460,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (SS->getCond() == 0)
return StmtError();
-
+
Expr *CondExpr = SS->getCond();
Expr *CondExprBeforePromotion = CondExpr;
QualType CondTypeBeforePromotion =
@@ -462,7 +480,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// the pre-promotion type of the switch condition.
if (!CondExpr->isTypeDependent()) {
// We have already converted the expression to an integral or enumeration
- // type, when we started the switch statement. If we don't have an
+ // type, when we started the switch statement. If we don't have an
// appropriate type now, just return an error.
if (!CondType->isIntegralOrEnumerationType())
return StmtError();
@@ -531,7 +549,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// Convert the value to the same width/sign as the condition.
ConvertIntegerToTypeWarnOnOverflow(LoVal, CondWidth, CondIsSigned,
- CS->getLHS()->getLocStart(),
+ Lo->getLocStart(),
diag::warn_case_value_overflow);
// If the LHS is not the same type as the condition, insert an implicit
@@ -610,7 +628,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// Convert the value to the same width/sign as the condition.
ConvertIntegerToTypeWarnOnOverflow(HiVal, CondWidth, CondIsSigned,
- CR->getRHS()->getLocStart(),
+ Hi->getLocStart(),
diag::warn_case_value_overflow);
// If the LHS is not the same type as the condition, insert an implicit
@@ -622,7 +640,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (LoVal > HiVal) {
Diag(CR->getLHS()->getLocStart(), diag::warn_case_empty_range)
<< SourceRange(CR->getLHS()->getLocStart(),
- CR->getRHS()->getLocEnd());
+ Hi->getLocEnd());
CaseRanges.erase(CaseRanges.begin()+i);
--i, --e;
continue;
@@ -696,14 +714,14 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
}
// Check to see if switch is over an Enum and handles all of its
- // values. We don't need to do this if there's a default
- // statement or if we have a constant condition.
+ // values. We only issue a warning if there is not 'default:', but
+ // we still do the analysis to preserve this information in the AST
+ // (which can be used by flow-based analyes).
//
- // TODO: we might want to check whether case values are out of the
- // enum even if we don't want to check whether all cases are handled.
- const EnumType* ET = CondTypeBeforePromotion->getAs<EnumType>();
+ const EnumType *ET = CondTypeBeforePromotion->getAs<EnumType>();
+
// If switch has default case, then ignore it.
- if (!CaseListIsErroneous && !TheDefaultStmt && !HasConstantCond && ET) {
+ if (!CaseListIsErroneous && !HasConstantCond && ET) {
const EnumDecl *ED = ET->getDecl();
typedef llvm::SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64> EnumValsTy;
EnumValsTy EnumVals;
@@ -711,71 +729,105 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// Gather all enum values, set their type and sort them,
// allowing easier comparison with CaseVals.
for (EnumDecl::enumerator_iterator EDI = ED->enumerator_begin();
- EDI != ED->enumerator_end(); EDI++) {
- llvm::APSInt Val = (*EDI)->getInitVal();
- if(Val.getBitWidth() < CondWidth)
- Val.extend(CondWidth);
- else if (Val.getBitWidth() > CondWidth)
- Val.trunc(CondWidth);
- Val.setIsSigned(CondIsSigned);
- EnumVals.push_back(std::make_pair(Val, (*EDI)));
+ EDI != ED->enumerator_end(); ++EDI) {
+ llvm::APSInt Val = EDI->getInitVal();
+ AdjustAPSInt(Val, CondWidth, CondIsSigned);
+ EnumVals.push_back(std::make_pair(Val, *EDI));
}
std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
EnumValsTy::iterator EIend =
std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
- // See which case values aren't in enum
- EnumValsTy::const_iterator EI = EnumVals.begin();
- for (CaseValsTy::const_iterator CI = CaseVals.begin();
+
+ // See which case values aren't in enum.
+ // TODO: we might want to check whether case values are out of the
+ // enum even if we don't want to check whether all cases are handled.
+ if (!TheDefaultStmt) {
+ EnumValsTy::const_iterator EI = EnumVals.begin();
+ for (CaseValsTy::const_iterator CI = CaseVals.begin();
CI != CaseVals.end(); CI++) {
- while (EI != EIend && EI->first < CI->first)
- EI++;
- if (EI == EIend || EI->first > CI->first)
+ while (EI != EIend && EI->first < CI->first)
+ EI++;
+ if (EI == EIend || EI->first > CI->first)
Diag(CI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
<< ED->getDeclName();
- }
- // See which of case ranges aren't in enum
- EI = EnumVals.begin();
- for (CaseRangesTy::const_iterator RI = CaseRanges.begin();
- RI != CaseRanges.end() && EI != EIend; RI++) {
- while (EI != EIend && EI->first < RI->first)
- EI++;
-
- if (EI == EIend || EI->first != RI->first) {
- Diag(RI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
- << ED->getDeclName();
}
+ // See which of case ranges aren't in enum
+ EI = EnumVals.begin();
+ for (CaseRangesTy::const_iterator RI = CaseRanges.begin();
+ RI != CaseRanges.end() && EI != EIend; RI++) {
+ while (EI != EIend && EI->first < RI->first)
+ EI++;
+
+ if (EI == EIend || EI->first != RI->first) {
+ Diag(RI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
+ << ED->getDeclName();
+ }
- llvm::APSInt Hi = RI->second->getRHS()->EvaluateAsInt(Context);
- while (EI != EIend && EI->first < Hi)
- EI++;
- if (EI == EIend || EI->first != Hi)
- Diag(RI->second->getRHS()->getExprLoc(), diag::warn_not_in_enum)
- << ED->getDeclName();
+ llvm::APSInt Hi = RI->second->getRHS()->EvaluateAsInt(Context);
+ AdjustAPSInt(Hi, CondWidth, CondIsSigned);
+ while (EI != EIend && EI->first < Hi)
+ EI++;
+ if (EI == EIend || EI->first != Hi)
+ Diag(RI->second->getRHS()->getExprLoc(), diag::warn_not_in_enum)
+ << ED->getDeclName();
+ }
}
- //Check which enum vals aren't in switch
+
+ // Check which enum vals aren't in switch
CaseValsTy::const_iterator CI = CaseVals.begin();
CaseRangesTy::const_iterator RI = CaseRanges.begin();
- EI = EnumVals.begin();
- for (; EI != EIend; EI++) {
- //Drop unneeded case values
+ bool hasCasesNotInSwitch = false;
+
+ llvm::SmallVector<DeclarationName,8> UnhandledNames;
+
+ for (EnumValsTy::const_iterator EI = EnumVals.begin(); EI != EIend; EI++){
+ // Drop unneeded case values
llvm::APSInt CIVal;
while (CI != CaseVals.end() && CI->first < EI->first)
CI++;
-
+
if (CI != CaseVals.end() && CI->first == EI->first)
continue;
- //Drop unneeded case ranges
+ // Drop unneeded case ranges
for (; RI != CaseRanges.end(); RI++) {
llvm::APSInt Hi = RI->second->getRHS()->EvaluateAsInt(Context);
+ AdjustAPSInt(Hi, CondWidth, CondIsSigned);
if (EI->first <= Hi)
break;
}
- if (RI == CaseRanges.end() || EI->first < RI->first)
- Diag(CondExpr->getExprLoc(), diag::warn_missing_cases)
- << EI->second->getDeclName();
+ if (RI == CaseRanges.end() || EI->first < RI->first) {
+ hasCasesNotInSwitch = true;
+ if (!TheDefaultStmt)
+ UnhandledNames.push_back(EI->second->getDeclName());
+ }
+ }
+
+ // Produce a nice diagnostic if multiple values aren't handled.
+ switch (UnhandledNames.size()) {
+ case 0: break;
+ case 1:
+ Diag(CondExpr->getExprLoc(), diag::warn_missing_case1)
+ << UnhandledNames[0];
+ break;
+ case 2:
+ Diag(CondExpr->getExprLoc(), diag::warn_missing_case2)
+ << UnhandledNames[0] << UnhandledNames[1];
+ break;
+ case 3:
+ Diag(CondExpr->getExprLoc(), diag::warn_missing_case3)
+ << UnhandledNames[0] << UnhandledNames[1] << UnhandledNames[2];
+ break;
+ default:
+ Diag(CondExpr->getExprLoc(), diag::warn_missing_cases)
+ << (unsigned)UnhandledNames.size()
+ << UnhandledNames[0] << UnhandledNames[1] << UnhandledNames[2];
+ break;
}
+
+ if (!hasCasesNotInSwitch)
+ SS->setAllEnumCasesCovered();
}
}
@@ -788,10 +840,10 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
}
StmtResult
-Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond,
+Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond,
Decl *CondVar, Stmt *Body) {
ExprResult CondResult(Cond.release());
-
+
VarDecl *ConditionVar = 0;
if (CondVar) {
ConditionVar = cast<VarDecl>(CondVar);
@@ -802,7 +854,7 @@ Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond,
Expr *ConditionExpr = CondResult.take();
if (!ConditionExpr)
return StmtError();
-
+
DiagnoseUnusedExprResult(Body);
return Owned(new (Context) WhileStmt(Context, ConditionVar, ConditionExpr,
@@ -818,11 +870,12 @@ Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
if (CheckBooleanCondition(Cond, DoLoc))
return StmtError();
- ExprResult CondResult = MaybeCreateCXXExprWithTemporaries(Cond);
+ CheckImplicitConversions(Cond, DoLoc);
+ ExprResult CondResult = MaybeCreateExprWithCleanups(Cond);
if (CondResult.isInvalid())
return StmtError();
Cond = CondResult.take();
-
+
DiagnoseUnusedExprResult(Body);
return Owned(new (Context) DoStmt(Body, Cond, DoLoc, WhileLoc, CondRParen));
@@ -841,7 +894,7 @@ Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
DI!=DE; ++DI) {
VarDecl *VD = dyn_cast<VarDecl>(*DI);
- if (VD && VD->isBlockVarDecl() && !VD->hasLocalStorage())
+ if (VD && VD->isLocalVarDecl() && !VD->hasLocalStorage())
VD = 0;
if (VD == 0)
Diag((*DI)->getLocation(), diag::err_non_variable_decl_in_for);
@@ -858,19 +911,30 @@ Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
if (SecondResult.isInvalid())
return StmtError();
}
-
+
Expr *Third = third.release().takeAs<Expr>();
-
+
DiagnoseUnusedExprResult(First);
DiagnoseUnusedExprResult(Third);
DiagnoseUnusedExprResult(Body);
- return Owned(new (Context) ForStmt(Context, First,
- SecondResult.take(), ConditionVar,
- Third, Body, ForLoc, LParenLoc,
+ return Owned(new (Context) ForStmt(Context, First,
+ SecondResult.take(), ConditionVar,
+ Third, Body, ForLoc, LParenLoc,
RParenLoc));
}
+/// In an Objective C collection iteration statement:
+/// for (x in y)
+/// x can be an arbitrary l-value expression. Bind it up as a
+/// full-expression.
+StmtResult Sema::ActOnForEachLValueExpr(Expr *E) {
+ CheckImplicitConversions(E);
+ ExprResult Result = MaybeCreateExprWithCleanups(E);
+ if (Result.isInvalid()) return StmtError();
+ return Owned(static_cast<Stmt*>(Result.get()));
+}
+
StmtResult
Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
@@ -889,13 +953,12 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
// declare identifiers for objects having storage class 'auto' or
// 'register'.
VarDecl *VD = cast<VarDecl>(D);
- if (VD->isBlockVarDecl() && !VD->hasLocalStorage())
+ if (VD->isLocalVarDecl() && !VD->hasLocalStorage())
return StmtError(Diag(VD->getLocation(),
diag::err_non_variable_decl_in_for));
} else {
Expr *FirstE = cast<Expr>(First);
- if (!FirstE->isTypeDependent() &&
- FirstE->isLvalue(Context) != Expr::LV_Valid)
+ if (!FirstE->isTypeDependent() && !FirstE->isLValue())
return StmtError(Diag(First->getLocStart(),
diag::err_selector_element_not_lvalue)
<< First->getSourceRange());
@@ -917,7 +980,7 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
else if (const ObjCObjectPointerType *OPT =
SecondType->getAsObjCInterfacePointerType()) {
llvm::SmallVector<IdentifierInfo *, 4> KeyIdents;
- IdentifierInfo* selIdent =
+ IdentifierInfo* selIdent =
&Context.Idents.get("countByEnumeratingWithState");
KeyIdents.push_back(selIdent);
selIdent = &Context.Idents.get("objects");
@@ -926,7 +989,7 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
KeyIdents.push_back(selIdent);
Selector CSelector = Context.Selectors.getSelector(3, &KeyIdents[0]);
if (ObjCInterfaceDecl *IDecl = OPT->getInterfaceDecl()) {
- if (!IDecl->isForwardDecl() &&
+ if (!IDecl->isForwardDecl() &&
!IDecl->lookupInstanceMethod(CSelector)) {
// Must further look into private implementation methods.
if (!LookupPrivateInstanceMethod(CSelector, IDecl))
@@ -940,19 +1003,12 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
ForLoc, RParenLoc));
}
-StmtResult
-Sema::ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc,
- IdentifierInfo *LabelII) {
- // Look up the record for this label identifier.
- LabelStmt *&LabelDecl = getCurFunction()->LabelMap[LabelII];
-
+StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc,
+ SourceLocation LabelLoc,
+ LabelDecl *TheDecl) {
getCurFunction()->setHasBranchIntoScope();
-
- // If we haven't seen this label yet, create a forward reference.
- if (LabelDecl == 0)
- LabelDecl = new (Context) LabelStmt(LabelLoc, LabelII, 0);
-
- return Owned(new (Context) GotoStmt(LabelDecl, GotoLoc, LabelLoc));
+ TheDecl->setUsed();
+ return Owned(new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc));
}
StmtResult
@@ -995,45 +1051,130 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
return Owned(new (Context) BreakStmt(BreakLoc));
}
-/// \brief Determine whether a return statement is a candidate for the named
-/// return value optimization (C++0x 12.8p34, bullet 1).
+/// \brief Determine whether the given expression is a candidate for
+/// copy elision in either a return statement or a throw expression.
///
-/// \param Ctx The context in which the return expression and type occur.
+/// \param ReturnType If we're determining the copy elision candidate for
+/// a return statement, this is the return type of the function. If we're
+/// determining the copy elision candidate for a throw expression, this will
+/// be a NULL type.
///
-/// \param RetType The return type of the function or block.
+/// \param E The expression being returned from the function or block, or
+/// being thrown.
///
-/// \param RetExpr The expression being returned from the function or block.
+/// \param AllowFunctionParameter
///
/// \returns The NRVO candidate variable, if the return statement may use the
/// NRVO, or NULL if there is no such candidate.
-static const VarDecl *getNRVOCandidate(ASTContext &Ctx, QualType RetType,
- Expr *RetExpr) {
- QualType ExprType = RetExpr->getType();
+const VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType,
+ Expr *E,
+ bool AllowFunctionParameter) {
+ QualType ExprType = E->getType();
// - in a return statement in a function with ...
// ... a class return type ...
- if (!RetType->isRecordType())
- return 0;
- // ... the same cv-unqualified type as the function return type ...
- if (!Ctx.hasSameUnqualifiedType(RetType, ExprType))
- return 0;
- // ... the expression is the name of a non-volatile automatic object ...
- // We ignore parentheses here.
- // FIXME: Is this compliant? (Everyone else does it)
- const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(RetExpr->IgnoreParens());
+ if (!ReturnType.isNull()) {
+ if (!ReturnType->isRecordType())
+ return 0;
+ // ... the same cv-unqualified type as the function return type ...
+ if (!Context.hasSameUnqualifiedType(ReturnType, ExprType))
+ return 0;
+ }
+
+ // ... the expression is the name of a non-volatile automatic object
+ // (other than a function or catch-clause parameter)) ...
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());
if (!DR)
return 0;
const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
if (!VD)
return 0;
-
- if (VD->getKind() == Decl::Var && VD->hasLocalStorage() &&
+
+ if (VD->hasLocalStorage() && !VD->isExceptionVariable() &&
!VD->getType()->isReferenceType() && !VD->hasAttr<BlocksAttr>() &&
- !VD->getType().isVolatileQualified())
+ !VD->getType().isVolatileQualified() &&
+ ((VD->getKind() == Decl::Var) ||
+ (AllowFunctionParameter && VD->getKind() == Decl::ParmVar)))
return VD;
-
+
return 0;
}
+/// \brief Perform the initialization of a potentially-movable value, which
+/// is the result of return value.
+///
+/// This routine implements C++0x [class.copy]p33, which attempts to treat
+/// returned lvalues as rvalues in certain cases (to prefer move construction),
+/// then falls back to treating them as lvalues if that failed.
+ExprResult
+Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
+ const VarDecl *NRVOCandidate,
+ QualType ResultType,
+ Expr *Value) {
+ // C++0x [class.copy]p33:
+ // When the criteria for elision of a copy operation are met or would
+ // be met save for the fact that the source object is a function
+ // parameter, and the object to be copied is designated by an lvalue,
+ // overload resolution to select the constructor for the copy is first
+ // performed as if the object were designated by an rvalue.
+ ExprResult Res = ExprError();
+ if (NRVOCandidate || getCopyElisionCandidate(ResultType, Value, true)) {
+ ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack,
+ Value->getType(), CK_LValueToRValue,
+ Value, VK_XValue);
+
+ Expr *InitExpr = &AsRvalue;
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Value->getLocStart(),
+ Value->getLocStart());
+ InitializationSequence Seq(*this, Entity, Kind, &InitExpr, 1);
+
+ // [...] If overload resolution fails, or if the type of the first
+ // parameter of the selected constructor is not an rvalue reference
+ // to the object's type (possibly cv-qualified), overload resolution
+ // is performed again, considering the object as an lvalue.
+ if (Seq.getKind() != InitializationSequence::FailedSequence) {
+ for (InitializationSequence::step_iterator Step = Seq.step_begin(),
+ StepEnd = Seq.step_end();
+ Step != StepEnd; ++Step) {
+ if (Step->Kind
+ != InitializationSequence::SK_ConstructorInitialization)
+ continue;
+
+ CXXConstructorDecl *Constructor
+ = cast<CXXConstructorDecl>(Step->Function.Function);
+
+ const RValueReferenceType *RRefType
+ = Constructor->getParamDecl(0)->getType()
+ ->getAs<RValueReferenceType>();
+
+ // If we don't meet the criteria, break out now.
+ if (!RRefType ||
+ !Context.hasSameUnqualifiedType(RRefType->getPointeeType(),
+ Context.getTypeDeclType(Constructor->getParent())))
+ break;
+
+ // Promote "AsRvalue" to the heap, since we now need this
+ // expression node to persist.
+ Value = ImplicitCastExpr::Create(Context, Value->getType(),
+ CK_LValueToRValue, Value, 0,
+ VK_XValue);
+
+ // Complete type-checking the initialization of the return type
+ // using the constructor we found.
+ Res = Seq.Perform(*this, Entity, Kind, MultiExprArg(&Value, 1));
+ }
+ }
+ }
+
+ // Either we didn't meet the criteria for treating an lvalue as an rvalue,
+ // above, or overload resolution failed. Either way, we need to try
+ // (again) now with the return value expression as written.
+ if (Res.isInvalid())
+ Res = PerformCopyInitialization(Entity, SourceLocation(), Value);
+
+ return Res;
+}
+
/// ActOnBlockReturnStmt - Utility routine to figure out block's return type.
///
StmtResult
@@ -1052,14 +1193,14 @@ Sema::ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// part of the implementation spec. and not the actual qualifier for
// the variable.
if (CDRE->isConstQualAdded())
- CurBlock->ReturnType.removeConst();
+ CurBlock->ReturnType.removeLocalConst(); // FIXME: local???
}
} else
CurBlock->ReturnType = Context.VoidTy;
}
QualType FnRetType = CurBlock->ReturnType;
- if (CurBlock->TheDecl->hasAttr<NoReturnAttr>()) {
+ if (CurBlock->FunctionType->getAs<FunctionType>()->getNoReturnAttr()) {
Diag(ReturnLoc, diag::err_noreturn_block_has_return_expr)
<< getCurFunctionOrMethodDecl()->getDeclName();
return StmtError();
@@ -1079,7 +1220,7 @@ Sema::ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
return StmtError(Diag(ReturnLoc, diag::err_block_return_missing_expr));
} else {
const VarDecl *NRVOCandidate = 0;
-
+
if (!FnRetType->isDependentType() && !RetValExp->isTypeDependent()) {
// we have a non-void block with an expression, continue checking
@@ -1089,35 +1230,36 @@ Sema::ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// In C++ the return statement is handled via a copy initialization.
// the C version of which boils down to CheckSingleAssignmentConstraints.
- NRVOCandidate = getNRVOCandidate(Context, FnRetType, RetValExp);
- ExprResult Res = PerformCopyInitialization(
- InitializedEntity::InitializeResult(ReturnLoc,
- FnRetType,
- NRVOCandidate != 0),
- SourceLocation(),
- Owned(RetValExp));
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
+ FnRetType,
+ NRVOCandidate != 0);
+ ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
+ FnRetType, RetValExp);
if (Res.isInvalid()) {
// FIXME: Cleanup temporaries here, anyway?
return StmtError();
}
-
- if (RetValExp)
- RetValExp = MaybeCreateCXXExprWithTemporaries(RetValExp);
+
+ if (RetValExp) {
+ CheckImplicitConversions(RetValExp, ReturnLoc);
+ RetValExp = MaybeCreateExprWithCleanups(RetValExp);
+ }
RetValExp = Res.takeAs<Expr>();
- if (RetValExp)
+ if (RetValExp)
CheckReturnStackAddr(RetValExp, FnRetType, ReturnLoc);
}
-
+
Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, NRVOCandidate);
}
- // If we need to check for the named return value optimization, save the
+ // If we need to check for the named return value optimization, save the
// return statement in our scope for later processing.
if (getLangOptions().CPlusPlus && FnRetType->isRecordType() &&
!CurContext->isDependentContext())
FunctionScopes.back()->Returns.push_back(Result);
-
+
return Owned(Result);
}
@@ -1145,6 +1287,10 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
unsigned D = diag::ext_return_has_expr;
if (RetValExp->getType()->isVoidType())
D = diag::ext_return_has_void_expr;
+ else {
+ IgnoredValueConversions(RetValExp);
+ ImpCastExprToType(RetValExp, Context.VoidTy, CK_ToVoid);
+ }
// return (some void expression); is legal in C++.
if (D != diag::ext_return_has_void_expr ||
@@ -1155,9 +1301,10 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
<< RetValExp->getSourceRange();
}
- RetValExp = MaybeCreateCXXExprWithTemporaries(RetValExp);
+ CheckImplicitConversions(RetValExp, ReturnLoc);
+ RetValExp = MaybeCreateExprWithCleanups(RetValExp);
}
-
+
Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, 0);
} else if (!RetValExp && !FnRetType->isDependentType()) {
unsigned DiagID = diag::warn_return_missing_expr; // C90 6.6.6.4p4
@@ -1180,34 +1327,35 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// In C++ the return statement is handled via a copy initialization.
// the C version of which boils down to CheckSingleAssignmentConstraints.
- NRVOCandidate = getNRVOCandidate(Context, FnRetType, RetValExp);
- ExprResult Res = PerformCopyInitialization(
- InitializedEntity::InitializeResult(ReturnLoc,
- FnRetType,
- NRVOCandidate != 0),
- SourceLocation(),
- Owned(RetValExp));
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
+ FnRetType,
+ NRVOCandidate != 0);
+ ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
+ FnRetType, RetValExp);
if (Res.isInvalid()) {
// FIXME: Cleanup temporaries here, anyway?
return StmtError();
}
RetValExp = Res.takeAs<Expr>();
- if (RetValExp)
+ if (RetValExp)
CheckReturnStackAddr(RetValExp, FnRetType, ReturnLoc);
}
-
- if (RetValExp)
- RetValExp = MaybeCreateCXXExprWithTemporaries(RetValExp);
+
+ if (RetValExp) {
+ CheckImplicitConversions(RetValExp, ReturnLoc);
+ RetValExp = MaybeCreateExprWithCleanups(RetValExp);
+ }
Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, NRVOCandidate);
}
-
- // If we need to check for the named return value optimization, save the
+
+ // If we need to check for the named return value optimization, save the
// return statement in our scope for later processing.
if (getLangOptions().CPlusPlus && FnRetType->isRecordType() &&
!CurContext->isDependentContext())
FunctionScopes.back()->Returns.push_back(Result);
-
+
return Owned(Result);
}
@@ -1222,14 +1370,14 @@ static bool CheckAsmLValue(const Expr *E, Sema &S) {
// Type dependent expressions will be checked during instantiation.
if (E->isTypeDependent())
return false;
-
- if (E->isLvalue(S.Context) == Expr::LV_Valid)
+
+ if (E->isLValue())
return false; // Cool, this is an lvalue.
// Okay, this is not an lvalue, but perhaps it is the result of a cast that we
// are supposed to allow.
const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
- if (E != E2 && E2->isLvalue(S.Context) == Expr::LV_Valid) {
+ if (E != E2 && E2->isLValue()) {
if (!S.getLangOptions().HeinousExtensions)
S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue)
<< E->getSourceRange();
@@ -1358,8 +1506,8 @@ StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc,
}
AsmStmt *NS =
- new (Context) AsmStmt(Context, AsmLoc, IsSimple, IsVolatile, MSAsm,
- NumOutputs, NumInputs, Names, Constraints, Exprs,
+ new (Context) AsmStmt(Context, AsmLoc, IsSimple, IsVolatile, MSAsm,
+ NumOutputs, NumInputs, Names, Constraints, Exprs,
AsmString, NumClobbers, Clobbers, RParenLoc);
// Validate the asm string, ensuring it makes sense given the operands we
// have.
@@ -1393,7 +1541,7 @@ StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc,
enum AsmDomain {
AD_Int, AD_FP, AD_Other
} InputDomain, OutputDomain;
-
+
if (InTy->isIntegerType() || InTy->isPointerType())
InputDomain = AD_Int;
else if (InTy->isRealFloatingType())
@@ -1407,19 +1555,19 @@ StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc,
OutputDomain = AD_FP;
else
OutputDomain = AD_Other;
-
+
// They are ok if they are the same size and in the same domain. This
// allows tying things like:
// void* to int*
// void* to int if they are the same size.
// double to long double if they are the same size.
- //
+ //
uint64_t OutSize = Context.getTypeSize(OutTy);
uint64_t InSize = Context.getTypeSize(InTy);
if (OutSize == InSize && InputDomain == OutputDomain &&
InputDomain != AD_Other)
continue;
-
+
// If the smaller input/output operand is not mentioned in the asm string,
// then we can promote it and the asm string won't notice. Check this
// case now.
@@ -1471,7 +1619,7 @@ Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc,
VarDecl *Var = cast_or_null<VarDecl>(Parm);
if (Var && Var->isInvalidDecl())
return StmtError();
-
+
return Owned(new (Context) ObjCAtCatchStmt(AtLoc, RParen, Var, Body));
}
@@ -1481,8 +1629,11 @@ Sema::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) {
}
StmtResult
-Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
+Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg CatchStmts, Stmt *Finally) {
+ if (!getLangOptions().ObjCExceptions)
+ Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try";
+
getCurFunction()->setHasBranchProtectedScope();
unsigned NumCatchStmts = CatchStmts.size();
return Owned(ObjCAtTryStmt::Create(Context, AtLoc, Try,
@@ -1494,6 +1645,8 @@ Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc,
Expr *Throw) {
if (Throw) {
+ DefaultLvalueConversion(Throw);
+
QualType ThrowType = Throw->getType();
// Make sure the expression type is an ObjC pointer or "void *".
if (!ThrowType->isDependentType() &&
@@ -1504,13 +1657,16 @@ StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc,
<< Throw->getType() << Throw->getSourceRange());
}
}
-
+
return Owned(new (Context) ObjCAtThrowStmt(AtLoc, Throw));
}
StmtResult
-Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
+Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope) {
+ if (!getLangOptions().ObjCExceptions)
+ Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw";
+
if (!Throw) {
// @throw without an expression designates a rethrow (which much occur
// in the context of an @catch clause).
@@ -1519,8 +1675,8 @@ Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
AtCatchParent = AtCatchParent->getParent();
if (!AtCatchParent)
return StmtError(Diag(AtLoc, diag::error_rethrow_used_outside_catch));
- }
-
+ }
+
return BuildObjCAtThrowStmt(AtLoc, Throw);
}
@@ -1529,6 +1685,8 @@ Sema::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SyncExpr,
Stmt *SyncBody) {
getCurFunction()->setHasBranchProtectedScope();
+ DefaultLvalueConversion(SyncExpr);
+
// Make sure the expression type is an ObjC pointer or "void *".
if (!SyncExpr->getType()->isDependentType() &&
!SyncExpr->getType()->isObjCObjectPointerType()) {
@@ -1589,6 +1747,9 @@ public:
StmtResult
Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
MultiStmtArg RawHandlers) {
+ if (!getLangOptions().Exceptions)
+ Diag(TryLoc, diag::err_exceptions_disabled) << "try";
+
unsigned NumHandlers = RawHandlers.size();
assert(NumHandlers > 0 &&
"The parser shouldn't call this if there are no handlers.");
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
index 0fc8392..f0a0103 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
@@ -20,6 +20,8 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeVisitor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Basic/LangOptions.h"
@@ -28,6 +30,14 @@
using namespace clang;
using namespace sema;
+// Exported for use by Parser.
+SourceRange
+clang::getTemplateParamsRange(TemplateParameterList const * const *Ps,
+ unsigned N) {
+ if (!N) return SourceRange();
+ return SourceRange(Ps[0]->getTemplateLoc(), Ps[N-1]->getRAngleLoc());
+}
+
/// \brief Determine whether the declaration found is acceptable as the name
/// of a template and, if so, return that template declaration. Otherwise,
/// returns NULL.
@@ -78,12 +88,12 @@ static void FilterAcceptableTemplateNames(ASTContext &C, LookupResult &R) {
else if (Repl != Orig) {
// C++ [temp.local]p3:
- // A lookup that finds an injected-class-name (10.2) can result in an
+ // A lookup that finds an injected-class-name (10.2) can result in an
// ambiguity in certain cases (for example, if it is found in more than
- // one base class). If all of the injected-class-names that are found
- // refer to specializations of the same class template, and if the name
- // is followed by a template-argument-list, the reference refers to the
- // class template itself and not a specialization thereof, and is not
+ // one base class). If all of the injected-class-names that are found
+ // refer to specializations of the same class template, and if the name
+ // is followed by a template-argument-list, the reference refers to the
+ // class template itself and not a specialization thereof, and is not
// ambiguous.
//
// FIXME: Will we eventually have to do the same for alias templates?
@@ -116,12 +126,12 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
DeclarationName TName;
MemberOfUnknownSpecialization = false;
-
+
switch (Name.getKind()) {
case UnqualifiedId::IK_Identifier:
TName = DeclarationName(Name.Identifier);
break;
-
+
case UnqualifiedId::IK_OperatorFunctionId:
TName = Context.DeclarationNames.getCXXOperatorName(
Name.OperatorFunctionId.Operator);
@@ -137,7 +147,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
QualType ObjectType = ObjectTypePtr.get();
- LookupResult R(*this, TName, Name.getSourceRange().getBegin(),
+ LookupResult R(*this, TName, Name.getSourceRange().getBegin(),
LookupOrdinaryName);
LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
MemberOfUnknownSpecialization);
@@ -190,7 +200,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
return TemplateKind;
}
-bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
+bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
@@ -202,14 +212,14 @@ bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
if (!SS || !SS->isSet() || !isDependentScopeSpecifier(*SS) ||
computeDeclContext(*SS))
return false;
-
+
// The code is missing a 'template' keyword prior to the dependent template
// name.
NestedNameSpecifier *Qualifier = (NestedNameSpecifier*)SS->getScopeRep();
Diag(IILoc, diag::err_template_kw_missing)
<< Qualifier << II.getName()
<< FixItHint::CreateInsertion(IILoc, "template ");
- SuggestedTemplate
+ SuggestedTemplate
= TemplateTy::make(Context.getDependentTemplateName(Qualifier, &II));
SuggestedKind = TNK_Dependent_template_name;
return true;
@@ -230,14 +240,14 @@ void Sema::LookupTemplateName(LookupResult &Found,
assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
LookupCtx = computeDeclContext(ObjectType);
isDependent = ObjectType->isDependentType();
- assert((isDependent || !ObjectType->isIncompleteType()) &&
+ assert((isDependent || !ObjectType->isIncompleteType()) &&
"Caller should have completed object type");
} else if (SS.isSet()) {
// This nested-name-specifier occurs after another nested-name-specifier,
// so long into the context associated with the prior nested-name-specifier.
LookupCtx = computeDeclContext(SS, EnteringContext);
isDependent = isDependentScopeSpecifier(SS);
-
+
// The declaration context must be complete.
if (LookupCtx && RequireCompleteDeclContext(SS, LookupCtx))
return;
@@ -277,7 +287,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
if (Found.empty() && !isDependent) {
// If we did not find any names, attempt to correct any typos.
DeclarationName Name = Found.getLookupName();
- if (DeclarationName Corrected = CorrectTypo(Found, S, &SS, LookupCtx,
+ if (DeclarationName Corrected = CorrectTypo(Found, S, &SS, LookupCtx,
false, CTC_CXXCasts)) {
FilterAcceptableTemplateNames(Context, Found);
if (!Found.empty()) {
@@ -318,7 +328,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
LookupOrdinaryName);
LookupName(FoundOuter, S);
FilterAcceptableTemplateNames(Context, FoundOuter);
-
+
if (FoundOuter.empty()) {
// - if the name is not found, the name found in the class of the
// object expression is used, otherwise
@@ -333,7 +343,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
if (!Found.isSingleResult() ||
Found.getFoundDecl()->getCanonicalDecl()
!= FoundOuter.getFoundDecl()->getCanonicalDecl()) {
- Diag(Found.getNameLoc(),
+ Diag(Found.getNameLoc(),
diag::ext_nested_name_member_ref_lookup_ambiguous)
<< Found.getLookupName()
<< ObjectType;
@@ -362,12 +372,12 @@ Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
= static_cast<NestedNameSpecifier*>(SS.getScopeRep());
DeclContext *DC = getFunctionLevelDeclContext();
-
+
if (!isAddressOfOperand &&
isa<CXXMethodDecl>(DC) &&
cast<CXXMethodDecl>(DC)->isInstance()) {
QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType(Context);
-
+
// Since the 'this' expression is synthesized, we don't need to
// perform the double-lookup check.
NamedDecl *FirstQualifierInScope = 0;
@@ -427,35 +437,52 @@ TemplateDecl *Sema::AdjustDeclIfTemplate(Decl *&D) {
return 0;
}
+ParsedTemplateArgument ParsedTemplateArgument::getTemplatePackExpansion(
+ SourceLocation EllipsisLoc) const {
+ assert(Kind == Template &&
+ "Only template template arguments can be pack expansions here");
+ assert(getAsTemplate().get().containsUnexpandedParameterPack() &&
+ "Template template argument pack expansion without packs");
+ ParsedTemplateArgument Result(*this);
+ Result.EllipsisLoc = EllipsisLoc;
+ return Result;
+}
+
static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
const ParsedTemplateArgument &Arg) {
-
+
switch (Arg.getKind()) {
case ParsedTemplateArgument::Type: {
TypeSourceInfo *DI;
QualType T = SemaRef.GetTypeFromParser(Arg.getAsType(), &DI);
- if (!DI)
+ if (!DI)
DI = SemaRef.Context.getTrivialTypeSourceInfo(T, Arg.getLocation());
return TemplateArgumentLoc(TemplateArgument(T), DI);
}
-
+
case ParsedTemplateArgument::NonType: {
Expr *E = static_cast<Expr *>(Arg.getAsExpr());
return TemplateArgumentLoc(TemplateArgument(E), E);
}
-
+
case ParsedTemplateArgument::Template: {
TemplateName Template = Arg.getAsTemplate().get();
- return TemplateArgumentLoc(TemplateArgument(Template),
+ TemplateArgument TArg;
+ if (Arg.getEllipsisLoc().isValid())
+ TArg = TemplateArgument(Template, llvm::Optional<unsigned int>());
+ else
+ TArg = Template;
+ return TemplateArgumentLoc(TArg,
Arg.getScopeSpec().getRange(),
- Arg.getLocation());
+ Arg.getLocation(),
+ Arg.getEllipsisLoc());
}
}
-
+
llvm_unreachable("Unhandled parsed template argument");
return TemplateArgumentLoc();
}
-
+
/// \brief Translates template arguments as provided by the parser
/// into template arguments used by semantic analysis.
void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn,
@@ -464,7 +491,7 @@ void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn,
TemplateArgs.addArgument(translateTemplateArgument(*this,
TemplateArgsIn[I]));
}
-
+
/// ActOnTypeParameter - Called when a C++ template type parameter
/// (e.g., "typename T") has been parsed. Typename specifies whether
/// the keyword "typename" was used to declare the type parameter
@@ -512,30 +539,35 @@ Decl *Sema::ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
IdResolver.AddDecl(Param);
}
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (DefaultArg && Ellipsis) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ DefaultArg = ParsedType();
+ }
+
// Handle the default argument, if provided.
if (DefaultArg) {
TypeSourceInfo *DefaultTInfo;
GetTypeFromParser(DefaultArg, &DefaultTInfo);
-
+
assert(DefaultTInfo && "expected source information for type");
-
- // C++0x [temp.param]p9:
- // A default template-argument may be specified for any kind of
- // template-parameter that is not a template parameter pack.
- if (Ellipsis) {
- Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(Loc, DefaultTInfo,
+ UPPC_DefaultArgument))
return Param;
- }
-
+
// Check the template argument itself.
if (CheckTemplateArgument(Param, DefaultTInfo)) {
Param->setInvalidDecl();
return Param;
}
-
+
Param->setDefaultArgument(DefaultTInfo, false);
}
-
+
return Param;
}
@@ -582,7 +614,7 @@ Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
else if (T->isFunctionType())
// FIXME: Keep the type prior to promotion?
return Context.getPointerType(T);
-
+
Diag(Loc, diag::err_template_nontype_parm_bad_type)
<< T;
@@ -617,10 +649,12 @@ Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
Invalid = true;
}
+ bool IsParameterPack = D.hasEllipsis();
NonTypeTemplateParmDecl *Param
= NonTypeTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
D.getIdentifierLoc(),
- Depth, Position, ParamName, T, TInfo);
+ Depth, Position, ParamName, T,
+ IsParameterPack, TInfo);
if (Invalid)
Param->setInvalidDecl();
@@ -629,18 +663,30 @@ Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
S->AddDecl(Param);
IdResolver.AddDecl(Param);
}
-
+
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (Default && IsParameterPack) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ Default = 0;
+ }
+
// Check the well-formedness of the default template argument, if provided.
- if (Default) {
+ if (Default) {
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(Default, UPPC_DefaultArgument))
+ return Param;
+
TemplateArgument Converted;
if (CheckTemplateArgument(Param, Param->getType(), Default, Converted)) {
Param->setInvalidDecl();
return Param;
}
-
+
Param->setDefaultArgument(Default, false);
}
-
+
return Param;
}
@@ -650,29 +696,46 @@ Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
Decl *Sema::ActOnTemplateTemplateParameter(Scope* S,
SourceLocation TmpLoc,
TemplateParamsTy *Params,
+ SourceLocation EllipsisLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
- const ParsedTemplateArgument &Default) {
+ ParsedTemplateArgument Default) {
assert(S->isTemplateParamScope() &&
"Template template parameter not in template parameter scope!");
// Construct the parameter object.
+ bool IsParameterPack = EllipsisLoc.isValid();
+ // FIXME: Pack-ness is dropped
TemplateTemplateParmDecl *Param =
TemplateTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
- NameLoc.isInvalid()? TmpLoc : NameLoc,
- Depth, Position, Name,
- (TemplateParameterList*)Params);
+ NameLoc.isInvalid()? TmpLoc : NameLoc,
+ Depth, Position, IsParameterPack,
+ Name, Params);
- // If the template template parameter has a name, then link the identifier
+ // If the template template parameter has a name, then link the identifier
// into the scope and lookup mechanisms.
if (Name) {
S->AddDecl(Param);
IdResolver.AddDecl(Param);
}
+ if (Params->size() == 0) {
+ Diag(Param->getLocation(), diag::err_template_template_parm_no_parms)
+ << SourceRange(Params->getLAngleLoc(), Params->getRAngleLoc());
+ Param->setInvalidDecl();
+ }
+
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (IsParameterPack && !Default.isInvalid()) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ Default = ParsedTemplateArgument();
+ }
+
if (!Default.isInvalid()) {
// Check only that we have a template template argument. We don't want to
// try to check well-formedness now, because our template template parameter
@@ -688,10 +751,16 @@ Decl *Sema::ActOnTemplateTemplateParameter(Scope* S,
<< DefaultArg.getSourceRange();
return Param;
}
-
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(DefaultArg.getLocation(),
+ DefaultArg.getArgument().getAsTemplate(),
+ UPPC_DefaultArgument))
+ return Param;
+
Param->setDefaultArgument(DefaultArg, false);
}
-
+
return Param;
}
@@ -708,7 +777,7 @@ Sema::ActOnTemplateParameterList(unsigned Depth,
Diag(ExportLoc, diag::warn_template_export_unsupported);
return TemplateParameterList::Create(Context, TemplateLoc, LAngleLoc,
- (NamedDecl**)Params, NumParams,
+ (NamedDecl**)Params, NumParams,
RAngleLoc);
}
@@ -765,7 +834,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (Previous.isAmbiguous())
return true;
-
+
NamedDecl *PrevDecl = 0;
if (Previous.begin() != Previous.end())
PrevDecl = (*Previous.begin())->getUnderlyingDecl();
@@ -776,12 +845,12 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
= dyn_cast_or_null<ClassTemplateDecl>(PrevDecl);
// We may have found the injected-class-name of a class template,
- // class template partial specialization, or class template specialization.
+ // class template partial specialization, or class template specialization.
// In these cases, grab the template that is being defined or specialized.
- if (!PrevClassTemplate && PrevDecl && isa<CXXRecordDecl>(PrevDecl) &&
+ if (!PrevClassTemplate && PrevDecl && isa<CXXRecordDecl>(PrevDecl) &&
cast<CXXRecordDecl>(PrevDecl)->isInjectedClassName()) {
PrevDecl = cast<CXXRecordDecl>(PrevDecl->getDeclContext());
- PrevClassTemplate
+ PrevClassTemplate
= cast<CXXRecordDecl>(PrevDecl)->getDescribedClassTemplate();
if (!PrevClassTemplate && isa<ClassTemplateSpecializationDecl>(PrevDecl)) {
PrevClassTemplate
@@ -792,8 +861,8 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (TUK == TUK_Friend) {
// C++ [namespace.memdef]p3:
- // [...] When looking for a prior declaration of a class or a function
- // declared as a friend, and when the name of the friend class or
+ // [...] When looking for a prior declaration of a class or a function
+ // declared as a friend, and when the name of the friend class or
// function is neither a qualified name nor a template-id, scopes outside
// the innermost enclosing namespace scope are not considered.
if (!SS.isSet()) {
@@ -807,7 +876,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SemanticContext = PrevDecl->getDeclContext();
} else {
// Declarations in outer scopes don't matter. However, the outermost
- // context we computed is the semantic context for our new
+ // context we computed is the semantic context for our new
// declaration.
PrevDecl = PrevClassTemplate = 0;
SemanticContext = OutermostContext;
@@ -823,7 +892,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
}
} else if (PrevDecl && !isDeclInScope(PrevDecl, SemanticContext, S))
PrevDecl = PrevClassTemplate = 0;
-
+
if (PrevClassTemplate) {
// Ensure that the template parameter lists are compatible.
if (!TemplateParameterListsAreEqual(TemplateParams,
@@ -877,18 +946,22 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
// template declaration.
if (CheckTemplateParameterList(TemplateParams,
PrevClassTemplate? PrevClassTemplate->getTemplateParameters() : 0,
- TPC_ClassTemplate))
+ (SS.isSet() && SemanticContext &&
+ SemanticContext->isRecord() &&
+ SemanticContext->isDependentContext())
+ ? TPC_ClassTemplateMember
+ : TPC_ClassTemplate))
Invalid = true;
if (SS.isSet()) {
- // If the name of the template was qualified, we must be defining the
+ // If the name of the template was qualified, we must be defining the
// template out-of-line.
if (!SS.isInvalid() && !Invalid && !PrevClassTemplate &&
!(TUK == TUK_Friend && CurContext->isDependentContext()))
Diag(NameLoc, diag::err_member_def_does_not_match)
<< Name << SemanticContext << SS.getRange();
- }
-
+ }
+
CXXRecordDecl *NewClass =
CXXRecordDecl::Create(Context, Kind, SemanticContext, NameLoc, Name, KWLoc,
PrevClassTemplate?
@@ -908,12 +981,12 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
assert(T->isDependentType() && "Class template type is not dependent?");
(void)T;
- // If we are providing an explicit specialization of a member that is a
+ // If we are providing an explicit specialization of a member that is a
// class template, make a note of that.
- if (PrevClassTemplate &&
+ if (PrevClassTemplate &&
PrevClassTemplate->getInstantiatedFromMemberTemplate())
PrevClassTemplate->setMemberSpecialization();
-
+
// Set the access specifier.
if (!Invalid && TUK != TUK_Friend)
SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS);
@@ -938,16 +1011,16 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
NewTemplate->setObjectOfFriendDecl(/* PreviouslyDeclared = */
PrevClassTemplate != NULL);
-
+
// Friend templates are visible in fairly strange ways.
if (!CurContext->isDependentContext()) {
DeclContext *DC = SemanticContext->getRedeclContext();
DC->makeDeclVisibleInContext(NewTemplate, /* Recoverable = */ false);
if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
PushOnScopeChains(NewTemplate, EnclosingScope,
- /* AddToContext = */ false);
+ /* AddToContext = */ false);
}
-
+
FriendDecl *Friend = FriendDecl::Create(Context, CurContext,
NewClass->getLocation(),
NewTemplate,
@@ -967,7 +1040,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
/// template parameter, which is ill-formed in certain contexts.
///
/// \returns true if the default template argument should be dropped.
-static bool DiagnoseDefaultTemplateArgument(Sema &S,
+static bool DiagnoseDefaultTemplateArgument(Sema &S,
Sema::TemplateParamListContext TPC,
SourceLocation ParamLoc,
SourceRange DefArgRange) {
@@ -976,14 +1049,18 @@ static bool DiagnoseDefaultTemplateArgument(Sema &S,
return false;
case Sema::TPC_FunctionTemplate:
- // C++ [temp.param]p9:
+ case Sema::TPC_FriendFunctionTemplateDefinition:
+ // C++ [temp.param]p9:
// A default template-argument shall not be specified in a
// function template declaration or a function template
// definition [...]
- // (This sentence is not in C++0x, per DR226).
+ // If a friend function template declaration specifies a default
+ // template-argument, that declaration shall be a definition and shall be
+ // the only declaration of the function template in the translation unit.
+ // (C++98/03 doesn't have this wording; see DR226).
if (!S.getLangOptions().CPlusPlus0x)
- S.Diag(ParamLoc,
- diag::err_template_parameter_default_in_function_template)
+ S.Diag(ParamLoc,
+ diag::ext_template_parameter_default_in_function_template)
<< DefArgRange;
return false;
@@ -1012,6 +1089,30 @@ static bool DiagnoseDefaultTemplateArgument(Sema &S,
return false;
}
+/// \brief Check for unexpanded parameter packs within the template parameters
+/// of a template template parameter, recursively.
+bool DiagnoseUnexpandedParameterPacks(Sema &S, TemplateTemplateParmDecl *TTP){
+ TemplateParameterList *Params = TTP->getTemplateParameters();
+ for (unsigned I = 0, N = Params->size(); I != N; ++I) {
+ NamedDecl *P = Params->getParam(I);
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) {
+ if (S.DiagnoseUnexpandedParameterPack(NTTP->getLocation(),
+ NTTP->getTypeSourceInfo(),
+ Sema::UPPC_NonTypeTemplateParameterType))
+ return true;
+
+ continue;
+ }
+
+ if (TemplateTemplateParmDecl *InnerTTP
+ = dyn_cast<TemplateTemplateParmDecl>(P))
+ if (DiagnoseUnexpandedParameterPacks(S, InnerTTP))
+ return true;
+ }
+
+ return false;
+}
+
/// \brief Checks the validity of a template parameter list, possibly
/// considering the template parameter list from a previous
/// declaration.
@@ -1056,6 +1157,7 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
if (OldParams)
OldParam = OldParams->begin();
+ bool RemoveDefaultArguments = false;
for (TemplateParameterList::iterator NewParam = NewParams->begin(),
NewParamEnd = NewParams->end();
NewParam != NewParamEnd; ++NewParam) {
@@ -1068,9 +1170,9 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
bool MissingDefaultArg = false;
// C++0x [temp.param]p11:
- // If a template parameter of a class template is a template parameter pack,
- // it must be the last template parameter.
- if (SawParameterPack) {
+ // If a template parameter of a primary class template is a template
+ // parameter pack, it shall be the last template parameter.
+ if (SawParameterPack && TPC == TPC_ClassTemplate) {
Diag(ParameterPackLoc,
diag::err_template_param_pack_must_be_last_template_parameter);
Invalid = true;
@@ -1079,9 +1181,9 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
if (TemplateTypeParmDecl *NewTypeParm
= dyn_cast<TemplateTypeParmDecl>(*NewParam)) {
// Check the presence of a default argument here.
- if (NewTypeParm->hasDefaultArgument() &&
- DiagnoseDefaultTemplateArgument(*this, TPC,
- NewTypeParm->getLocation(),
+ if (NewTypeParm->hasDefaultArgument() &&
+ DiagnoseDefaultTemplateArgument(*this, TPC,
+ NewTypeParm->getLocation(),
NewTypeParm->getDefaultArgumentInfo()->getTypeLoc()
.getSourceRange()))
NewTypeParm->removeDefaultArgument();
@@ -1116,10 +1218,18 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
MissingDefaultArg = true;
} else if (NonTypeTemplateParmDecl *NewNonTypeParm
= dyn_cast<NonTypeTemplateParmDecl>(*NewParam)) {
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(NewNonTypeParm->getLocation(),
+ NewNonTypeParm->getTypeSourceInfo(),
+ UPPC_NonTypeTemplateParameterType)) {
+ Invalid = true;
+ continue;
+ }
+
// Check the presence of a default argument here.
- if (NewNonTypeParm->hasDefaultArgument() &&
- DiagnoseDefaultTemplateArgument(*this, TPC,
- NewNonTypeParm->getLocation(),
+ if (NewNonTypeParm->hasDefaultArgument() &&
+ DiagnoseDefaultTemplateArgument(*this, TPC,
+ NewNonTypeParm->getLocation(),
NewNonTypeParm->getDefaultArgument()->getSourceRange())) {
NewNonTypeParm->removeDefaultArgument();
}
@@ -1127,7 +1237,12 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
// Merge default arguments for non-type template parameters
NonTypeTemplateParmDecl *OldNonTypeParm
= OldParams? cast<NonTypeTemplateParmDecl>(*OldParam) : 0;
- if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument() &&
+ if (NewNonTypeParm->isParameterPack()) {
+ assert(!NewNonTypeParm->hasDefaultArgument() &&
+ "Parameter packs can't have a default argument!");
+ SawParameterPack = true;
+ ParameterPackLoc = NewNonTypeParm->getLocation();
+ } else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument() &&
NewNonTypeParm->hasDefaultArgument()) {
OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
NewDefaultLoc = NewNonTypeParm->getDefaultArgumentLoc();
@@ -1139,7 +1254,7 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
// new declaration.
SawDefaultArgument = true;
// FIXME: We need to create a new kind of "default argument"
- // expression that points to a previous template template
+ // expression that points to a previous non-type template
// parameter.
NewNonTypeParm->setDefaultArgument(
OldNonTypeParm->getDefaultArgument(),
@@ -1154,16 +1269,28 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
// Check the presence of a default argument here.
TemplateTemplateParmDecl *NewTemplateParm
= cast<TemplateTemplateParmDecl>(*NewParam);
- if (NewTemplateParm->hasDefaultArgument() &&
- DiagnoseDefaultTemplateArgument(*this, TPC,
- NewTemplateParm->getLocation(),
+
+ // Check for unexpanded parameter packs, recursively.
+ if (DiagnoseUnexpandedParameterPacks(*this, NewTemplateParm)) {
+ Invalid = true;
+ continue;
+ }
+
+ if (NewTemplateParm->hasDefaultArgument() &&
+ DiagnoseDefaultTemplateArgument(*this, TPC,
+ NewTemplateParm->getLocation(),
NewTemplateParm->getDefaultArgument().getSourceRange()))
NewTemplateParm->removeDefaultArgument();
// Merge default arguments for template template parameters
TemplateTemplateParmDecl *OldTemplateParm
= OldParams? cast<TemplateTemplateParmDecl>(*OldParam) : 0;
- if (OldTemplateParm && OldTemplateParm->hasDefaultArgument() &&
+ if (NewTemplateParm->isParameterPack()) {
+ assert(!NewTemplateParm->hasDefaultArgument() &&
+ "Parameter packs can't have a default argument!");
+ SawParameterPack = true;
+ ParameterPackLoc = NewTemplateParm->getLocation();
+ } else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument() &&
NewTemplateParm->hasDefaultArgument()) {
OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation();
NewDefaultLoc = NewTemplateParm->getDefaultArgument().getLocation();
@@ -1196,15 +1323,17 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
Diag(NewDefaultLoc, diag::err_template_param_default_arg_redefinition);
Diag(OldDefaultLoc, diag::note_template_param_prev_default_arg);
Invalid = true;
- } else if (MissingDefaultArg) {
+ } else if (MissingDefaultArg && TPC != TPC_FunctionTemplate) {
// C++ [temp.param]p11:
- // If a template-parameter has a default template-argument,
- // all subsequent template-parameters shall have a default
- // template-argument supplied.
+ // If a template-parameter of a class template has a default
+ // template-argument, each subsequent template-parameter shall either
+ // have a default template-argument supplied or be a template parameter
+ // pack.
Diag((*NewParam)->getLocation(),
diag::err_template_param_default_arg_missing);
Diag(PreviousDefaultArgLoc, diag::note_template_param_prev_default_arg);
Invalid = true;
+ RemoveDefaultArguments = true;
}
// If we have an old template parameter list that we're merging
@@ -1213,9 +1342,89 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
++OldParam;
}
+ // We were missing some default arguments at the end of the list, so remove
+ // all of the default arguments.
+ if (RemoveDefaultArguments) {
+ for (TemplateParameterList::iterator NewParam = NewParams->begin(),
+ NewParamEnd = NewParams->end();
+ NewParam != NewParamEnd; ++NewParam) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*NewParam))
+ TTP->removeDefaultArgument();
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*NewParam))
+ NTTP->removeDefaultArgument();
+ else
+ cast<TemplateTemplateParmDecl>(*NewParam)->removeDefaultArgument();
+ }
+ }
+
return Invalid;
}
+namespace {
+
+/// A class which looks for a use of a certain level of template
+/// parameter.
+struct DependencyChecker : RecursiveASTVisitor<DependencyChecker> {
+ typedef RecursiveASTVisitor<DependencyChecker> super;
+
+ unsigned Depth;
+ bool Match;
+
+ DependencyChecker(TemplateParameterList *Params) : Match(false) {
+ NamedDecl *ND = Params->getParam(0);
+ if (TemplateTypeParmDecl *PD = dyn_cast<TemplateTypeParmDecl>(ND)) {
+ Depth = PD->getDepth();
+ } else if (NonTypeTemplateParmDecl *PD =
+ dyn_cast<NonTypeTemplateParmDecl>(ND)) {
+ Depth = PD->getDepth();
+ } else {
+ Depth = cast<TemplateTemplateParmDecl>(ND)->getDepth();
+ }
+ }
+
+ bool Matches(unsigned ParmDepth) {
+ if (ParmDepth >= Depth) {
+ Match = true;
+ return true;
+ }
+ return false;
+ }
+
+ bool VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ return !Matches(T->getDepth());
+ }
+
+ bool TraverseTemplateName(TemplateName N) {
+ if (TemplateTemplateParmDecl *PD =
+ dyn_cast_or_null<TemplateTemplateParmDecl>(N.getAsTemplateDecl()))
+ if (Matches(PD->getDepth())) return false;
+ return super::TraverseTemplateName(N);
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (NonTypeTemplateParmDecl *PD =
+ dyn_cast<NonTypeTemplateParmDecl>(E->getDecl())) {
+ if (PD->getDepth() == Depth) {
+ Match = true;
+ return false;
+ }
+ }
+ return super::VisitDeclRefExpr(E);
+ }
+};
+}
+
+/// Determines whether a template-id depends on the given parameter
+/// list.
+static bool
+DependsOnTemplateParameters(const TemplateSpecializationType *TemplateId,
+ TemplateParameterList *Params) {
+ DependencyChecker Checker(Params);
+ Checker.TraverseType(QualType(TemplateId, 0));
+ return Checker.Match;
+}
+
/// \brief Match the given template parameter lists to the given scope
/// specifier, returning the template parameter list that applies to the
/// name.
@@ -1254,7 +1463,7 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
bool &IsExplicitSpecialization,
bool &Invalid) {
IsExplicitSpecialization = false;
-
+
// Find the template-ids that occur within the nested-name-specifier. These
// template-ids will match up with the template parameter lists.
llvm::SmallVector<const TemplateSpecializationType *, 4>
@@ -1275,8 +1484,8 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
//
// Following the existing practice of GNU and EDG, we allow a typedef of a
// template specialization type.
- if (const TypedefType *TT = dyn_cast<TypedefType>(T))
- T = TT->LookThroughTypedefs().getTypePtr();
+ while (const TypedefType *TT = dyn_cast<TypedefType>(T))
+ T = TT->getDecl()->getUnderlyingType().getTypePtr();
if (const TemplateSpecializationType *SpecType
= dyn_cast<TemplateSpecializationType>(T)) {
@@ -1309,12 +1518,24 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
// Match the template-ids found in the specifier to the template parameter
// lists.
- unsigned Idx = 0;
+ unsigned ParamIdx = 0, TemplateIdx = 0;
for (unsigned NumTemplateIds = TemplateIdsInSpecifier.size();
- Idx != NumTemplateIds; ++Idx) {
- QualType TemplateId = QualType(TemplateIdsInSpecifier[Idx], 0);
+ TemplateIdx != NumTemplateIds; ++TemplateIdx) {
+ const TemplateSpecializationType *TemplateId
+ = TemplateIdsInSpecifier[TemplateIdx];
bool DependentTemplateId = TemplateId->isDependentType();
- if (Idx >= NumParamLists) {
+
+ // In friend declarations we can have template-ids which don't
+ // depend on the corresponding template parameter lists. But
+ // assume that empty parameter lists are supposed to match this
+ // template-id.
+ if (IsFriend && ParamIdx < NumParamLists && ParamLists[ParamIdx]->size()) {
+ if (!DependentTemplateId ||
+ !DependsOnTemplateParameters(TemplateId, ParamLists[ParamIdx]))
+ continue;
+ }
+
+ if (ParamIdx >= NumParamLists) {
// We have a template-id without a corresponding template parameter
// list.
@@ -1328,7 +1549,7 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
// FIXME: the location information here isn't great.
Diag(SS.getRange().getBegin(),
diag::err_template_spec_needs_template_parameters)
- << TemplateId
+ << QualType(TemplateId, 0)
<< SS.getRange();
Invalid = true;
} else {
@@ -1357,35 +1578,38 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
}
if (ExpectedTemplateParams)
- TemplateParameterListsAreEqual(ParamLists[Idx],
+ TemplateParameterListsAreEqual(ParamLists[ParamIdx],
ExpectedTemplateParams,
true, TPL_TemplateMatch);
- CheckTemplateParameterList(ParamLists[Idx], 0, TPC_ClassTemplateMember);
- } else if (ParamLists[Idx]->size() > 0)
- Diag(ParamLists[Idx]->getTemplateLoc(),
+ CheckTemplateParameterList(ParamLists[ParamIdx], 0,
+ TPC_ClassTemplateMember);
+ } else if (ParamLists[ParamIdx]->size() > 0)
+ Diag(ParamLists[ParamIdx]->getTemplateLoc(),
diag::err_template_param_list_matches_nontemplate)
<< TemplateId
- << ParamLists[Idx]->getSourceRange();
+ << ParamLists[ParamIdx]->getSourceRange();
else
IsExplicitSpecialization = true;
+
+ ++ParamIdx;
}
// If there were at least as many template-ids as there were template
// parameter lists, then there are no template parameter lists remaining for
// the declaration itself.
- if (Idx >= NumParamLists)
+ if (ParamIdx >= NumParamLists)
return 0;
// If there were too many template parameter lists, complain about that now.
- if (Idx != NumParamLists - 1) {
- while (Idx < NumParamLists - 1) {
- bool isExplicitSpecHeader = ParamLists[Idx]->size() == 0;
- Diag(ParamLists[Idx]->getTemplateLoc(),
+ if (ParamIdx != NumParamLists - 1) {
+ while (ParamIdx < NumParamLists - 1) {
+ bool isExplicitSpecHeader = ParamLists[ParamIdx]->size() == 0;
+ Diag(ParamLists[ParamIdx]->getTemplateLoc(),
isExplicitSpecHeader? diag::warn_template_spec_extra_headers
: diag::err_template_spec_extra_headers)
- << SourceRange(ParamLists[Idx]->getTemplateLoc(),
- ParamLists[Idx]->getRAngleLoc());
+ << SourceRange(ParamLists[ParamIdx]->getTemplateLoc(),
+ ParamLists[ParamIdx]->getRAngleLoc());
if (isExplicitSpecHeader && !ExplicitSpecializationsInSpecifier.empty()) {
Diag(ExplicitSpecializationsInSpecifier.back()->getLocation(),
@@ -1394,13 +1618,13 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
ExplicitSpecializationsInSpecifier.pop_back();
}
- // We have a template parameter list with no corresponding scope, which
+ // We have a template parameter list with no corresponding scope, which
// means that the resulting template declaration can't be instantiated
// properly (we'll end up with dependent nodes when we shouldn't).
if (!isExplicitSpecHeader)
Invalid = true;
-
- ++Idx;
+
+ ++ParamIdx;
}
}
@@ -1421,14 +1645,12 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Check that the template argument list is well-formed for this
// template.
- TemplateArgumentListBuilder Converted(Template->getTemplateParameters(),
- TemplateArgs.size());
+ llvm::SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs,
false, Converted))
return QualType();
- assert((Converted.structuredSize() ==
- Template->getTemplateParameters()->size()) &&
+ assert((Converted.size() == Template->getTemplateParameters()->size()) &&
"Converted template argument list is too short!");
QualType CanonType;
@@ -1445,8 +1667,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// template<typename T, typename U = T> struct A;
TemplateName CanonName = Context.getCanonicalTemplateName(Name);
CanonType = Context.getTemplateSpecializationType(CanonName,
- Converted.getFlatArguments(),
- Converted.flatSize());
+ Converted.data(),
+ Converted.size());
// FIXME: CanonType is not actually the canonical type, and unfortunately
// it is a TemplateSpecializationType that we will never use again.
@@ -1474,7 +1696,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
if (!isa<ClassTemplatePartialSpecializationDecl>(Record) &&
!Record->getDescribedClassTemplate())
continue;
-
+
// Fetch the injected class name type and check whether its
// injected type is equal to the type we just built.
QualType ICNT = Context.getTypeDeclType(Record);
@@ -1497,8 +1719,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// corresponds to these arguments.
void *InsertPos = 0;
ClassTemplateSpecializationDecl *Decl
- = ClassTemplate->findSpecialization(Converted.getFlatArguments(),
- Converted.flatSize(), InsertPos);
+ = ClassTemplate->findSpecialization(Converted.data(), Converted.size(),
+ InsertPos);
if (!Decl) {
// This is the first time we have referenced this class template
// specialization. Create the canonical declaration and add it to
@@ -1507,8 +1729,9 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
ClassTemplate->getTemplatedDecl()->getTagKind(),
ClassTemplate->getDeclContext(),
ClassTemplate->getLocation(),
- ClassTemplate,
- Converted, 0);
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(), 0);
ClassTemplate->AddSpecialization(Decl, InsertPos);
Decl->setLexicalDeclContext(CurContext);
}
@@ -1553,14 +1776,14 @@ Sema::ActOnTemplateIdType(TemplateTy TemplateD, SourceLocation TemplateLoc,
return CreateParsedType(Result, DI);
}
-TypeResult Sema::ActOnTagTemplateIdType(TypeResult TypeResult,
+TypeResult Sema::ActOnTagTemplateIdType(CXXScopeSpec &SS,
+ TypeResult TypeResult,
TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc) {
if (TypeResult.isInvalid())
return ::TypeResult();
- // FIXME: preserve source info, ideally without copying the DI.
TypeSourceInfo *DI;
QualType Type = GetTypeFromParser(TypeResult.get(), &DI);
@@ -1585,7 +1808,12 @@ TypeResult Sema::ActOnTagTemplateIdType(TypeResult TypeResult,
= TypeWithKeyword::getKeywordForTagTypeKind(TagKind);
QualType ElabType = Context.getElaboratedType(Keyword, /*NNS=*/0, Type);
- return ParsedType::make(ElabType);
+ TypeSourceInfo *ElabDI = Context.CreateTypeSourceInfo(ElabType);
+ ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(ElabDI->getTypeLoc());
+ TL.setKeywordLoc(TagLoc);
+ TL.setQualifierRange(SS.getRange());
+ TL.getNamedTypeLoc().initializeFullCopy(DI->getTypeLoc());
+ return CreateParsedType(ElabType, ElabDI);
}
ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
@@ -1596,6 +1824,11 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
// template arguments that we have against the template name, if the template
// name refers to a single template. That's not a terribly common case,
// though.
+ // foo<int> could identify a single function unambiguously
+ // This approach does NOT work, since f<int>(1);
+ // gets resolved prior to resorting to overload resolution
+ // i.e., template<class T> void f(double);
+ // vs template<class T, class U> void f(U);
// These should be filtered out by our callers.
assert(!R.empty() && "empty lookup results when building templateid");
@@ -1610,15 +1843,12 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
// We don't want lookup warnings at this point.
R.suppressDiagnostics();
-
- bool Dependent
- = UnresolvedLookupExpr::ComputeDependence(R.begin(), R.end(),
- &TemplateArgs);
+
UnresolvedLookupExpr *ULE
- = UnresolvedLookupExpr::Create(Context, Dependent, R.getNamingClass(),
+ = UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
Qualifier, QualifierRange,
R.getLookupNameInfo(),
- RequiresADL, TemplateArgs,
+ RequiresADL, TemplateArgs,
R.begin(), R.end());
return Owned(ULE);
@@ -1642,7 +1872,7 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
if (R.isAmbiguous())
return ExprError();
-
+
if (R.empty()) {
Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_non_template)
<< NameInfo.getName() << SS.getRange();
@@ -1667,7 +1897,7 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
/// example, given "MetaFun::template apply", the scope specifier \p
/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
/// of the "template" keyword, and "apply" is the \p Name.
-TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
+TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
SourceLocation TemplateKWLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
@@ -1677,8 +1907,8 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent() &&
!getLangOptions().CPlusPlus0x)
Diag(TemplateKWLoc, diag::ext_template_outside_of_template)
- << FixItHint::CreateRemoval(TemplateKWLoc);
-
+ << FixItHint::CreateRemoval(TemplateKWLoc);
+
DeclContext *LookupCtx = 0;
if (SS.isSet())
LookupCtx = computeDeclContext(SS, EnteringContext);
@@ -1710,7 +1940,7 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases()) {
// This is a dependent template. Handle it below.
} else if (TNK == TNK_Non_template) {
- Diag(Name.getSourceRange().getBegin(),
+ Diag(Name.getSourceRange().getBegin(),
diag::err_template_kw_refers_to_non_template)
<< GetNameFromUnqualifiedId(Name).getName()
<< Name.getSourceRange()
@@ -1724,13 +1954,13 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
NestedNameSpecifier *Qualifier
= static_cast<NestedNameSpecifier *>(SS.getScopeRep());
-
+
switch (Name.getKind()) {
case UnqualifiedId::IK_Identifier:
- Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
+ Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
Name.Identifier));
return TNK_Dependent_template_name;
-
+
case UnqualifiedId::IK_OperatorFunctionId:
Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
Name.OperatorFunctionId.Operator));
@@ -1742,8 +1972,8 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
default:
break;
}
-
- Diag(Name.getSourceRange().getBegin(),
+
+ Diag(Name.getSourceRange().getBegin(),
diag::err_template_kw_refers_to_non_template)
<< GetNameFromUnqualifiedId(Name).getName()
<< Name.getSourceRange()
@@ -1753,7 +1983,7 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
const TemplateArgumentLoc &AL,
- TemplateArgumentListBuilder &Converted) {
+ llvm::SmallVectorImpl<TemplateArgument> &Converted) {
const TemplateArgument &Arg = AL.getArgument();
// Check template type parameter.
@@ -1790,7 +2020,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
return true;
// Add the converted template type argument.
- Converted.Append(
+ Converted.push_back(
TemplateArgument(Context.getCanonicalType(Arg.getAsType())));
return false;
}
@@ -1801,7 +2031,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
/// \param SemaRef the semantic analysis object for which we are performing
/// the substitution.
///
-/// \param Template the template that we are synthesizing template arguments
+/// \param Template the template that we are synthesizing template arguments
/// for.
///
/// \param TemplateLoc the location of the template name that started the
@@ -1823,23 +2053,23 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
TemplateTypeParmDecl *Param,
- TemplateArgumentListBuilder &Converted) {
+ llvm::SmallVectorImpl<TemplateArgument> &Converted) {
TypeSourceInfo *ArgType = Param->getDefaultArgumentInfo();
// If the argument type is dependent, instantiate it now based
// on the previously-computed template arguments.
if (ArgType->getType()->isDependentType()) {
- TemplateArgumentList TemplateArgs(SemaRef.Context, Converted,
- /*TakeArgs=*/false);
-
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
MultiLevelTemplateArgumentList AllTemplateArgs
= SemaRef.getTemplateInstantiationArgs(Template, &TemplateArgs);
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
- Template, Converted.getFlatArguments(),
- Converted.flatSize(),
+ Template, Converted.data(),
+ Converted.size(),
SourceRange(TemplateLoc, RAngleLoc));
-
+
ArgType = SemaRef.SubstType(ArgType, AllTemplateArgs,
Param->getDefaultArgumentLoc(),
Param->getDeclName());
@@ -1854,7 +2084,7 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
/// \param SemaRef the semantic analysis object for which we are performing
/// the substitution.
///
-/// \param Template the template that we are synthesizing template arguments
+/// \param Template the template that we are synthesizing template arguments
/// for.
///
/// \param TemplateLoc the location of the template name that started the
@@ -1876,16 +2106,16 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
NonTypeTemplateParmDecl *Param,
- TemplateArgumentListBuilder &Converted) {
- TemplateArgumentList TemplateArgs(SemaRef.Context, Converted,
- /*TakeArgs=*/false);
-
+ llvm::SmallVectorImpl<TemplateArgument> &Converted) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
MultiLevelTemplateArgumentList AllTemplateArgs
= SemaRef.getTemplateInstantiationArgs(Template, &TemplateArgs);
-
+
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
- Template, Converted.getFlatArguments(),
- Converted.flatSize(),
+ Template, Converted.data(),
+ Converted.size(),
SourceRange(TemplateLoc, RAngleLoc));
return SemaRef.SubstExpr(Param->getDefaultArgument(), AllTemplateArgs);
@@ -1897,7 +2127,7 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
/// \param SemaRef the semantic analysis object for which we are performing
/// the substitution.
///
-/// \param Template the template that we are synthesizing template arguments
+/// \param Template the template that we are synthesizing template arguments
/// for.
///
/// \param TemplateLoc the location of the template name that started the
@@ -1919,34 +2149,34 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
TemplateTemplateParmDecl *Param,
- TemplateArgumentListBuilder &Converted) {
- TemplateArgumentList TemplateArgs(SemaRef.Context, Converted,
- /*TakeArgs=*/false);
-
+ llvm::SmallVectorImpl<TemplateArgument> &Converted) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
MultiLevelTemplateArgumentList AllTemplateArgs
= SemaRef.getTemplateInstantiationArgs(Template, &TemplateArgs);
-
+
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
- Template, Converted.getFlatArguments(),
- Converted.flatSize(),
+ Template, Converted.data(),
+ Converted.size(),
SourceRange(TemplateLoc, RAngleLoc));
-
+
return SemaRef.SubstTemplateName(
Param->getDefaultArgument().getArgument().getAsTemplate(),
- Param->getDefaultArgument().getTemplateNameLoc(),
+ Param->getDefaultArgument().getTemplateNameLoc(),
AllTemplateArgs);
}
/// \brief If the given template parameter has a default template
/// argument, substitute into that default template argument and
/// return the corresponding template argument.
-TemplateArgumentLoc
+TemplateArgumentLoc
Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
- TemplateArgumentListBuilder &Converted) {
- if (TemplateTypeParmDecl *TypeParm = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ llvm::SmallVectorImpl<TemplateArgument> &Converted) {
+ if (TemplateTypeParmDecl *TypeParm = dyn_cast<TemplateTypeParmDecl>(Param)) {
if (!TypeParm->hasDefaultArgument())
return TemplateArgumentLoc();
@@ -1984,45 +2214,75 @@ Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
return TemplateArgumentLoc();
TemplateName TName = SubstDefaultTemplateArgument(*this, Template,
- TemplateLoc,
+ TemplateLoc,
RAngleLoc,
TempTempParm,
Converted);
if (TName.isNull())
return TemplateArgumentLoc();
- return TemplateArgumentLoc(TemplateArgument(TName),
+ return TemplateArgumentLoc(TemplateArgument(TName),
TempTempParm->getDefaultArgument().getTemplateQualifierRange(),
TempTempParm->getDefaultArgument().getTemplateNameLoc());
}
/// \brief Check that the given template argument corresponds to the given
/// template parameter.
+///
+/// \param Param The template parameter against which the argument will be
+/// checked.
+///
+/// \param Arg The template argument.
+///
+/// \param Template The template in which the template argument resides.
+///
+/// \param TemplateLoc The location of the template name for the template
+/// whose argument list we're matching.
+///
+/// \param RAngleLoc The location of the right angle bracket ('>') that closes
+/// the template argument list.
+///
+/// \param ArgumentPackIndex The index into the argument pack where this
+/// argument will be placed. Only valid if the parameter is a parameter pack.
+///
+/// \param Converted The checked, converted argument will be added to the
+/// end of this small vector.
+///
+/// \param CTAK Describes how we arrived at this particular template argument:
+/// explicitly written, deduced, etc.
+///
+/// \returns true on error, false otherwise.
bool Sema::CheckTemplateArgument(NamedDecl *Param,
const TemplateArgumentLoc &Arg,
- TemplateDecl *Template,
+ NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
- TemplateArgumentListBuilder &Converted,
+ unsigned ArgumentPackIndex,
+ llvm::SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK) {
// Check template type parameters.
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
return CheckTemplateTypeArgument(TTP, Arg, Converted);
-
+
// Check non-type template parameters.
- if (NonTypeTemplateParmDecl *NTTP =dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ if (NonTypeTemplateParmDecl *NTTP =dyn_cast<NonTypeTemplateParmDecl>(Param)) {
// Do substitution on the type of the non-type template parameter
- // with the template arguments we've seen thus far.
+ // with the template arguments we've seen thus far. But if the
+ // template has a dependent context then we cannot substitute yet.
QualType NTTPType = NTTP->getType();
- if (NTTPType->isDependentType()) {
+ if (NTTP->isParameterPack() && NTTP->isExpandedParameterPack())
+ NTTPType = NTTP->getExpansionType(ArgumentPackIndex);
+
+ if (NTTPType->isDependentType() &&
+ !isa<TemplateTemplateParmDecl>(Template) &&
+ !Template->getDeclContext()->isDependentContext()) {
// Do substitution on the type of the non-type template parameter.
InstantiatingTemplate Inst(*this, TemplateLoc, Template,
- NTTP, Converted.getFlatArguments(),
- Converted.flatSize(),
+ NTTP, Converted.data(), Converted.size(),
SourceRange(TemplateLoc, RAngleLoc));
-
- TemplateArgumentList TemplateArgs(Context, Converted,
- /*TakeArgs=*/false);
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
NTTPType = SubstType(NTTPType,
MultiLevelTemplateArgumentList(TemplateArgs),
NTTP->getLocation(),
@@ -2035,34 +2295,36 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
if (NTTPType.isNull())
return true;
}
-
+
switch (Arg.getArgument().getKind()) {
case TemplateArgument::Null:
assert(false && "Should never see a NULL template argument here");
return true;
-
+
case TemplateArgument::Expression: {
Expr *E = Arg.getArgument().getAsExpr();
TemplateArgument Result;
if (CheckTemplateArgument(NTTP, NTTPType, E, Result, CTAK))
return true;
-
- Converted.Append(Result);
+
+ Converted.push_back(Result);
break;
}
-
+
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
// We've already checked this template argument, so just copy
// it to the list of converted arguments.
- Converted.Append(Arg.getArgument());
+ Converted.push_back(Arg.getArgument());
break;
-
+
case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
// We were given a template template argument. It may not be ill-formed;
// see below.
if (DependentTemplateName *DTN
- = Arg.getArgument().getAsTemplate().getAsDependentTemplateName()) {
+ = Arg.getArgument().getAsTemplateOrTemplatePattern()
+ .getAsDependentTemplateName()) {
// We have a template argument such as \c T::template X, which we
// parsed as a template template argument. However, since we now
// know that we need a non-type template argument, convert this
@@ -2075,28 +2337,39 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
DTN->getQualifier(),
Arg.getTemplateQualifierRange(),
NameInfo);
-
+
+ // If we parsed the template argument as a pack expansion, create a
+ // pack expansion expression.
+ if (Arg.getArgument().getKind() == TemplateArgument::TemplateExpansion){
+ ExprResult Expansion = ActOnPackExpansion(E,
+ Arg.getTemplateEllipsisLoc());
+ if (Expansion.isInvalid())
+ return true;
+
+ E = Expansion.get();
+ }
+
TemplateArgument Result;
if (CheckTemplateArgument(NTTP, NTTPType, E, Result))
return true;
-
- Converted.Append(Result);
+
+ Converted.push_back(Result);
break;
}
-
+
// We have a template argument that actually does refer to a class
// template, template alias, or template template parameter, and
// therefore cannot be a non-type template argument.
Diag(Arg.getLocation(), diag::err_template_arg_must_be_expr)
<< Arg.getSourceRange();
-
+
Diag(Param->getLocation(), diag::note_template_param_here);
return true;
-
+
case TemplateArgument::Type: {
// We have a non-type template parameter but the template
// argument is a type.
-
+
// C++ [temp.arg]p2:
// In a template-argument, an ambiguity between a type-id and
// an expression is resolved to a type-id, regardless of the
@@ -2113,19 +2386,19 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
-
+
case TemplateArgument::Pack:
llvm_unreachable("Caller must expand template argument packs");
break;
}
-
+
return false;
- }
-
-
+ }
+
+
// Check template template parameters.
TemplateTemplateParmDecl *TempParm = cast<TemplateTemplateParmDecl>(Param);
-
+
// Substitute into the template parameter list of the template
// template parameter, since previously-supplied template arguments
// may appear within the template template parameter.
@@ -2133,40 +2406,38 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
// Set up a template instantiation context.
LocalInstantiationScope Scope(*this);
InstantiatingTemplate Inst(*this, TemplateLoc, Template,
- TempParm, Converted.getFlatArguments(),
- Converted.flatSize(),
+ TempParm, Converted.data(), Converted.size(),
SourceRange(TemplateLoc, RAngleLoc));
-
- TemplateArgumentList TemplateArgs(Context, Converted,
- /*TakeArgs=*/false);
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
TempParm = cast_or_null<TemplateTemplateParmDecl>(
- SubstDecl(TempParm, CurContext,
+ SubstDecl(TempParm, CurContext,
MultiLevelTemplateArgumentList(TemplateArgs)));
if (!TempParm)
return true;
-
- // FIXME: TempParam is leaked.
}
-
+
switch (Arg.getArgument().getKind()) {
case TemplateArgument::Null:
assert(false && "Should never see a NULL template argument here");
return true;
-
+
case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
if (CheckTemplateArgument(TempParm, Arg))
return true;
-
- Converted.Append(Arg.getArgument());
+
+ Converted.push_back(Arg.getArgument());
break;
-
+
case TemplateArgument::Expression:
case TemplateArgument::Type:
// We have a template template parameter but the template
// argument does not refer to a template.
Diag(Arg.getLocation(), diag::err_template_arg_must_be_template);
return true;
-
+
case TemplateArgument::Declaration:
llvm_unreachable(
"Declaration argument with template template parameter");
@@ -2175,12 +2446,12 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
llvm_unreachable(
"Integral argument with template template parameter");
break;
-
+
case TemplateArgument::Pack:
llvm_unreachable("Caller must expand template argument packs");
break;
}
-
+
return false;
}
@@ -2190,7 +2461,7 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
- TemplateArgumentListBuilder &Converted) {
+ llvm::SmallVectorImpl<TemplateArgument> &Converted) {
TemplateParameterList *Params = Template->getTemplateParameters();
unsigned NumParams = Params->size();
unsigned NumArgs = TemplateArgs.size();
@@ -2226,44 +2497,67 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// a template-id shall match the type and form specified for the
// corresponding parameter declared by the template in its
// template-parameter-list.
+ llvm::SmallVector<TemplateArgument, 2> ArgumentPack;
+ TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
unsigned ArgIdx = 0;
- for (TemplateParameterList::iterator Param = Params->begin(),
- ParamEnd = Params->end();
- Param != ParamEnd; ++Param, ++ArgIdx) {
+ LocalInstantiationScope InstScope(*this, true);
+ while (Param != ParamEnd) {
if (ArgIdx > NumArgs && PartialTemplateArgs)
break;
- // If we have a template parameter pack, check every remaining template
- // argument against that template parameter pack.
- if ((*Param)->isTemplateParameterPack()) {
- Converted.BeginPack();
- for (; ArgIdx < NumArgs; ++ArgIdx) {
- if (CheckTemplateArgument(*Param, TemplateArgs[ArgIdx], Template,
- TemplateLoc, RAngleLoc, Converted)) {
- Invalid = true;
- break;
+ if (ArgIdx < NumArgs) {
+ // If we have an expanded parameter pack, make sure we don't have too
+ // many arguments.
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ if (NTTP->isExpandedParameterPack() &&
+ ArgumentPack.size() >= NTTP->getNumExpansionTypes()) {
+ Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << true
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template;
+ Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
}
}
- Converted.EndPack();
- continue;
- }
-
- if (ArgIdx < NumArgs) {
+
// Check the template argument we were given.
- if (CheckTemplateArgument(*Param, TemplateArgs[ArgIdx], Template,
- TemplateLoc, RAngleLoc, Converted))
+ if (CheckTemplateArgument(*Param, TemplateArgs[ArgIdx], Template,
+ TemplateLoc, RAngleLoc,
+ ArgumentPack.size(), Converted))
return true;
-
+
+ if ((*Param)->isTemplateParameterPack()) {
+ // The template parameter was a template parameter pack, so take the
+ // deduced argument and place it on the argument pack. Note that we
+ // stay on the same template parameter so that we can deduce more
+ // arguments.
+ ArgumentPack.push_back(Converted.back());
+ Converted.pop_back();
+ } else {
+ // Move to the next template parameter.
+ ++Param;
+ }
+ ++ArgIdx;
continue;
}
-
+
+ // If we have a template parameter pack with no more corresponding
+ // arguments, just break out now and we'll fill in the argument pack below.
+ if ((*Param)->isTemplateParameterPack())
+ break;
+
// We have a default template argument that we will use.
TemplateArgumentLoc Arg;
-
+
// Retrieve the default template argument from the template
// parameter. For each kind of template parameter, we substitute the
// template arguments provided thus far and any "outer" template arguments
- // (when the template parameter was part of a nested template) into
+ // (when the template parameter was part of a nested template) into
// the default argument.
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
if (!TTP->hasDefaultArgument()) {
@@ -2271,7 +2565,7 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
break;
}
- TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(*this,
+ TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(*this,
Template,
TemplateLoc,
RAngleLoc,
@@ -2279,7 +2573,7 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
Converted);
if (!ArgType)
return true;
-
+
Arg = TemplateArgumentLoc(TemplateArgument(ArgType->getType()),
ArgType);
} else if (NonTypeTemplateParmDecl *NTTP
@@ -2290,9 +2584,9 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
}
ExprResult E = SubstDefaultTemplateArgument(*this, Template,
- TemplateLoc,
- RAngleLoc,
- NTTP,
+ TemplateLoc,
+ RAngleLoc,
+ NTTP,
Converted);
if (E.isInvalid())
return true;
@@ -2309,34 +2603,282 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
}
TemplateName Name = SubstDefaultTemplateArgument(*this, Template,
- TemplateLoc,
- RAngleLoc,
+ TemplateLoc,
+ RAngleLoc,
TempParm,
Converted);
if (Name.isNull())
return true;
-
- Arg = TemplateArgumentLoc(TemplateArgument(Name),
+
+ Arg = TemplateArgumentLoc(TemplateArgument(Name),
TempParm->getDefaultArgument().getTemplateQualifierRange(),
TempParm->getDefaultArgument().getTemplateNameLoc());
}
-
+
// Introduce an instantiation record that describes where we are using
// the default template argument.
InstantiatingTemplate Instantiating(*this, RAngleLoc, Template, *Param,
- Converted.getFlatArguments(),
- Converted.flatSize(),
- SourceRange(TemplateLoc, RAngleLoc));
-
+ Converted.data(), Converted.size(),
+ SourceRange(TemplateLoc, RAngleLoc));
+
// Check the default template argument.
if (CheckTemplateArgument(*Param, Arg, Template, TemplateLoc,
- RAngleLoc, Converted))
+ RAngleLoc, 0, Converted))
return true;
+
+ // Move to the next template parameter and argument.
+ ++Param;
+ ++ArgIdx;
+ }
+
+ // Form argument packs for each of the parameter packs remaining.
+ while (Param != ParamEnd) {
+ // If we're checking a partial list of template arguments, don't fill
+ // in arguments for non-template parameter packs.
+
+ if ((*Param)->isTemplateParameterPack()) {
+ if (PartialTemplateArgs && ArgumentPack.empty()) {
+ Converted.push_back(TemplateArgument());
+ } else if (ArgumentPack.empty())
+ Converted.push_back(TemplateArgument(0, 0));
+ else {
+ Converted.push_back(TemplateArgument::CreatePackCopy(Context,
+ ArgumentPack.data(),
+ ArgumentPack.size()));
+ ArgumentPack.clear();
+ }
+ }
+
+ ++Param;
}
return Invalid;
}
+namespace {
+ class UnnamedLocalNoLinkageFinder
+ : public TypeVisitor<UnnamedLocalNoLinkageFinder, bool>
+ {
+ Sema &S;
+ SourceRange SR;
+
+ typedef TypeVisitor<UnnamedLocalNoLinkageFinder, bool> inherited;
+
+ public:
+ UnnamedLocalNoLinkageFinder(Sema &S, SourceRange SR) : S(S), SR(SR) { }
+
+ bool Visit(QualType T) {
+ return inherited::Visit(T.getTypePtr());
+ }
+
+#define TYPE(Class, Parent) \
+ bool Visit##Class##Type(const Class##Type *);
+#define ABSTRACT_TYPE(Class, Parent) \
+ bool Visit##Class##Type(const Class##Type *) { return false; }
+#define NON_CANONICAL_TYPE(Class, Parent) \
+ bool Visit##Class##Type(const Class##Type *) { return false; }
+#include "clang/AST/TypeNodes.def"
+
+ bool VisitTagDecl(const TagDecl *Tag);
+ bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+ };
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitBuiltinType(const BuiltinType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitComplexType(const ComplexType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitPointerType(const PointerType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitBlockPointerType(
+ const BlockPointerType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitLValueReferenceType(
+ const LValueReferenceType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitRValueReferenceType(
+ const RValueReferenceType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitMemberPointerType(
+ const MemberPointerType* T) {
+ return Visit(T->getPointeeType()) || Visit(QualType(T->getClass(), 0));
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitConstantArrayType(
+ const ConstantArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitIncompleteArrayType(
+ const IncompleteArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitVariableArrayType(
+ const VariableArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentSizedArrayType(
+ const DependentSizedArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitVectorType(const VectorType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitExtVectorType(const ExtVectorType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitFunctionProtoType(
+ const FunctionProtoType* T) {
+ for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
+ AEnd = T->arg_type_end();
+ A != AEnd; ++A) {
+ if (Visit(*A))
+ return true;
+ }
+
+ return Visit(T->getResultType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitFunctionNoProtoType(
+ const FunctionNoProtoType* T) {
+ return Visit(T->getResultType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitUnresolvedUsingType(
+ const UnresolvedUsingType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTypeOfExprType(const TypeOfExprType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTypeOfType(const TypeOfType* T) {
+ return Visit(T->getUnderlyingType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDecltypeType(const DecltypeType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitAutoType(const AutoType *T) {
+ return Visit(T->getDeducedType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitRecordType(const RecordType* T) {
+ return VisitTagDecl(T->getDecl());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitEnumType(const EnumType* T) {
+ return VisitTagDecl(T->getDecl());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTemplateTypeParmType(
+ const TemplateTypeParmType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTemplateSpecializationType(
+ const TemplateSpecializationType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitInjectedClassNameType(
+ const InjectedClassNameType* T) {
+ return VisitTagDecl(T->getDecl());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
+ const DependentNameType* T) {
+ return VisitNestedNameSpecifier(T->getQualifier());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType(
+ const DependentTemplateSpecializationType* T) {
+ return VisitNestedNameSpecifier(T->getQualifier());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType(
+ const PackExpansionType* T) {
+ return Visit(T->getPattern());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitObjCObjectType(const ObjCObjectType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitObjCInterfaceType(
+ const ObjCInterfaceType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitObjCObjectPointerType(
+ const ObjCObjectPointerType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
+ if (Tag->getDeclContext()->isFunctionOrMethod()) {
+ S.Diag(SR.getBegin(), diag::ext_template_arg_local_type)
+ << S.Context.getTypeDeclType(Tag) << SR;
+ return true;
+ }
+
+ if (!Tag->getDeclName() && !Tag->getTypedefForAnonDecl()) {
+ S.Diag(SR.getBegin(), diag::ext_template_arg_unnamed_type) << SR;
+ S.Diag(Tag->getLocation(), diag::note_template_unnamed_type_here);
+ return true;
+ }
+
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
+ NestedNameSpecifier *NNS) {
+ if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix()))
+ return true;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::Global:
+ return false;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ return Visit(QualType(NNS->getAsType(), 0));
+ }
+ return false;
+}
+
+
/// \brief Check a template argument against its corresponding
/// template type parameter.
///
@@ -2346,36 +2888,24 @@ bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *ArgInfo) {
assert(ArgInfo && "invalid TypeSourceInfo");
QualType Arg = ArgInfo->getType();
+ SourceRange SR = ArgInfo->getTypeLoc().getSourceRange();
+
+ if (Arg->isVariablyModifiedType()) {
+ return Diag(SR.getBegin(), diag::err_variably_modified_template_arg) << Arg;
+ } else if (Context.hasSameUnqualifiedType(Arg, Context.OverloadTy)) {
+ return Diag(SR.getBegin(), diag::err_template_arg_overload_type) << SR;
+ }
// C++03 [temp.arg.type]p2:
// A local type, a type with no linkage, an unnamed type or a type
// compounded from any of these types shall not be used as a
// template-argument for a template type-parameter.
+ //
// C++0x allows these, and even in C++03 we allow them as an extension with
// a warning.
- SourceRange SR = ArgInfo->getTypeLoc().getSourceRange();
- if (!LangOpts.CPlusPlus0x) {
- const TagType *Tag = 0;
- if (const EnumType *EnumT = Arg->getAs<EnumType>())
- Tag = EnumT;
- else if (const RecordType *RecordT = Arg->getAs<RecordType>())
- Tag = RecordT;
- if (Tag && Tag->getDecl()->getDeclContext()->isFunctionOrMethod()) {
- SourceRange SR = ArgInfo->getTypeLoc().getSourceRange();
- Diag(SR.getBegin(), diag::ext_template_arg_local_type)
- << QualType(Tag, 0) << SR;
- } else if (Tag && !Tag->getDecl()->getDeclName() &&
- !Tag->getDecl()->getTypedefForAnonDecl()) {
- Diag(SR.getBegin(), diag::ext_template_arg_unnamed_type) << SR;
- Diag(Tag->getDecl()->getLocation(),
- diag::note_template_unnamed_type_here);
- }
- }
-
- if (Arg->isVariablyModifiedType()) {
- return Diag(SR.getBegin(), diag::err_variably_modified_template_arg) << Arg;
- } else if (Context.hasSameUnqualifiedType(Arg, Context.OverloadTy)) {
- return Diag(SR.getBegin(), diag::err_template_arg_overload_type) << SR;
+ if (!LangOpts.CPlusPlus0x && Arg->hasUnnamedOrLocalType()) {
+ UnnamedLocalNoLinkageFinder Finder(*this, SR);
+ (void)Finder.Visit(Context.getCanonicalType(Arg));
}
return false;
@@ -2383,7 +2913,7 @@ bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param,
/// \brief Checks whether the given template argument is the address
/// of an object or function according to C++ [temp.arg.nontype]p1.
-static bool
+static bool
CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
NonTypeTemplateParmDecl *Param,
QualType ParamType,
@@ -2410,13 +2940,15 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// corresponding template-parameter is a reference; or
DeclRefExpr *DRE = 0;
- // Ignore (and complain about) any excess parentheses.
+ // In C++98/03 mode, give an extension warning on any extra parentheses.
+ // See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
+ bool ExtraParens = false;
while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
- if (!Invalid) {
+ if (!Invalid && !ExtraParens && !S.getLangOptions().CPlusPlus0x) {
S.Diag(Arg->getSourceRange().getBegin(),
- diag::err_template_arg_extra_parens)
+ diag::ext_template_arg_extra_parens)
<< Arg->getSourceRange();
- Invalid = true;
+ ExtraParens = true;
}
Arg = Parens->getSubExpr();
@@ -2443,7 +2975,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// Stop checking the precise nature of the argument if it is value dependent,
// it should be checked when instantiated.
if (Arg->isValueDependent()) {
- Converted = TemplateArgument(ArgIn->Retain());
+ Converted = TemplateArgument(ArgIn);
return false;
}
@@ -2522,7 +3054,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// A value of reference type is not an object.
if (Var->getType()->isReferenceType()) {
- S.Diag(Arg->getSourceRange().getBegin(),
+ S.Diag(Arg->getSourceRange().getBegin(),
diag::err_template_arg_reference_var)
<< Var->getType() << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
@@ -2586,9 +3118,9 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return true;
}
- if (ParamType->isPointerType() &&
+ if (ParamType->isPointerType() &&
!ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType() &&
- S.IsQualificationConversion(ArgType, ParamType)) {
+ S.IsQualificationConversion(ArgType, ParamType, false)) {
// For pointer-to-object types, qualification conversions are
// permitted.
} else {
@@ -2613,7 +3145,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
<< Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
- }
+ }
}
}
@@ -2642,7 +3174,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
/// \brief Checks whether the given template argument is a pointer to
/// member constant according to C++ [temp.arg.nontype]p1.
-bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
+bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
TemplateArgument &Converted) {
bool Invalid = false;
@@ -2658,13 +3190,15 @@ bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
// -- a pointer to member expressed as described in 5.3.1.
DeclRefExpr *DRE = 0;
- // Ignore (and complain about) any excess parentheses.
+ // In C++98/03 mode, give an extension warning on any extra parentheses.
+ // See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
+ bool ExtraParens = false;
while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
- if (!Invalid) {
+ if (!Invalid && !ExtraParens && !getLangOptions().CPlusPlus0x) {
Diag(Arg->getSourceRange().getBegin(),
- diag::err_template_arg_extra_parens)
+ diag::ext_template_arg_extra_parens)
<< Arg->getSourceRange();
- Invalid = true;
+ ExtraParens = true;
}
Arg = Parens->getSubExpr();
@@ -2677,26 +3211,26 @@ bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
if (DRE && !DRE->getQualifier())
DRE = 0;
}
- }
+ }
// A constant of pointer-to-member type.
else if ((DRE = dyn_cast<DeclRefExpr>(Arg))) {
if (ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl())) {
if (VD->getType()->isMemberPointerType()) {
if (isa<NonTypeTemplateParmDecl>(VD) ||
- (isa<VarDecl>(VD) &&
+ (isa<VarDecl>(VD) &&
Context.getCanonicalType(VD->getType()).isConstQualified())) {
if (Arg->isTypeDependent() || Arg->isValueDependent())
- Converted = TemplateArgument(Arg->Retain());
+ Converted = TemplateArgument(Arg);
else
Converted = TemplateArgument(VD->getCanonicalDecl());
return Invalid;
}
}
}
-
+
DRE = 0;
}
-
+
if (!DRE)
return Diag(Arg->getSourceRange().getBegin(),
diag::err_template_arg_not_pointer_to_member_form)
@@ -2710,7 +3244,7 @@ bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
// Okay: this is the address of a non-static member, and therefore
// a member pointer constant.
if (Arg->isTypeDependent() || Arg->isValueDependent())
- Converted = TemplateArgument(Arg->Retain());
+ Converted = TemplateArgument(Arg);
else
Converted = TemplateArgument(DRE->getDecl()->getCanonicalDecl());
return Invalid;
@@ -2803,7 +3337,10 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
<< ArgType << ParamType;
Diag(Param->getLocation(), diag::note_template_param_here);
- return true;
+ return true;
+ } else if (ParamType->isBooleanType()) {
+ // This is an integral-to-boolean conversion.
+ ImpCastExprToType(Arg, ParamType, CK_IntegralToBoolean);
} else if (IsIntegralPromotion(Arg, ArgType, ParamType) ||
!ParamType->isEnumeralType()) {
// This is an integral promotion or conversion.
@@ -2823,12 +3360,12 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (!Arg->isValueDependent()) {
llvm::APSInt OldValue = Value;
-
- // Coerce the template argument's value to the value it will have
+
+ // Coerce the template argument's value to the value it will have
// based on the template parameter's type.
unsigned AllowedBits = Context.getTypeSize(IntegerType);
if (Value.getBitWidth() != AllowedBits)
- Value.extOrTrunc(AllowedBits);
+ Value = Value.extOrTrunc(AllowedBits);
Value.setIsSigned(IntegerType->isSignedIntegerType());
// Complain if an unsigned parameter received a negative value.
@@ -2879,7 +3416,7 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// from a template argument of type std::nullptr_t to a non-type
// template parameter of type pointer to object, pointer to
// function, or pointer-to-member, respectively.
- if (ArgType->isNullPtrType() &&
+ if (ArgType->isNullPtrType() &&
(ParamType->isPointerType() || ParamType->isMemberPointerType())) {
Converted = TemplateArgument((NamedDecl *)0);
return false;
@@ -2910,7 +3447,7 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
->isFunctionType())) {
if (Arg->getType() == Context.OverloadTy) {
- if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg, ParamType,
+ if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg, ParamType,
true,
FoundResult)) {
if (DiagnoseUseOfDecl(Fn, Arg->getSourceRange().getBegin()))
@@ -2921,13 +3458,14 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
} else
return true;
}
-
+
if (!ParamType->isMemberPointerType())
return CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
- ParamType,
+ ParamType,
Arg, Converted);
- if (IsQualificationConversion(ArgType, ParamType.getNonReferenceType())) {
+ if (IsQualificationConversion(ArgType, ParamType.getNonReferenceType(),
+ false)) {
ImpCastExprToType(Arg, ParamType, CK_NoOp, CastCategory(Arg));
} else if (!Context.hasSameUnqualifiedType(ArgType,
ParamType.getNonReferenceType())) {
@@ -2950,7 +3488,7 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
assert(ParamType->getPointeeType()->isIncompleteOrObjectType() &&
"Only object pointers allowed here");
- return CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
+ return CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
ParamType,
Arg, Converted);
}
@@ -2966,8 +3504,8 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
"Only object references allowed here");
if (Arg->getType() == Context.OverloadTy) {
- if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg,
- ParamRefType->getPointeeType(),
+ if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg,
+ ParamRefType->getPointeeType(),
true,
FoundResult)) {
if (DiagnoseUseOfDecl(Fn, Arg->getSourceRange().getBegin()))
@@ -2978,8 +3516,8 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
} else
return true;
}
-
- return CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
+
+ return CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
ParamType,
Arg, Converted);
}
@@ -2990,7 +3528,7 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (Context.hasSameUnqualifiedType(ParamType, ArgType)) {
// Types match exactly: nothing more to do here.
- } else if (IsQualificationConversion(ArgType, ParamType)) {
+ } else if (IsQualificationConversion(ArgType, ParamType, false)) {
ImpCastExprToType(Arg, ParamType, CK_NoOp, CastCategory(Arg));
} else {
// We can't perform this conversion.
@@ -3041,7 +3579,7 @@ bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
return !TemplateParameterListsAreEqual(Template->getTemplateParameters(),
Param->getTemplateParameters(),
- true,
+ true,
TPL_TemplateTemplateArgumentMatch,
Arg.getLocation());
}
@@ -3050,7 +3588,7 @@ bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
/// declaration and the type of its corresponding non-type template
/// parameter, produce an expression that properly refers to that
/// declaration.
-ExprResult
+ExprResult
Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc) {
@@ -3058,7 +3596,7 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
"Only declaration template arguments permitted here");
ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
- if (VD->getDeclContext()->isRecord() &&
+ if (VD->getDeclContext()->isRecord() &&
(isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD))) {
// If the value is a class member, we might have a pointer-to-member.
// Determine whether the non-type template template parameter is of
@@ -3073,37 +3611,46 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
ClassType.getTypePtr());
CXXScopeSpec SS;
SS.setScopeRep(Qualifier);
- ExprResult RefExpr = BuildDeclRefExpr(VD,
- VD->getType().getNonReferenceType(),
- Loc,
- &SS);
+
+ // The actual value-ness of this is unimportant, but for
+ // internal consistency's sake, references to instance methods
+ // are r-values.
+ ExprValueKind VK = VK_LValue;
+ if (isa<CXXMethodDecl>(VD) && cast<CXXMethodDecl>(VD)->isInstance())
+ VK = VK_RValue;
+
+ ExprResult RefExpr = BuildDeclRefExpr(VD,
+ VD->getType().getNonReferenceType(),
+ VK,
+ Loc,
+ &SS);
if (RefExpr.isInvalid())
return ExprError();
-
+
RefExpr = CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get());
-
+
// We might need to perform a trailing qualification conversion, since
// the element type on the parameter could be more qualified than the
// element type in the expression we constructed.
if (IsQualificationConversion(((Expr*) RefExpr.get())->getType(),
- ParamType.getUnqualifiedType())) {
+ ParamType.getUnqualifiedType(), false)) {
Expr *RefE = RefExpr.takeAs<Expr>();
ImpCastExprToType(RefE, ParamType.getUnqualifiedType(), CK_NoOp);
RefExpr = Owned(RefE);
}
-
+
assert(!RefExpr.isInvalid() &&
Context.hasSameType(((Expr*) RefExpr.get())->getType(),
ParamType.getUnqualifiedType()));
return move(RefExpr);
}
}
-
+
QualType T = VD->getType().getNonReferenceType();
if (ParamType->isPointerType()) {
// When the non-type template parameter is a pointer, take the
// address of the declaration.
- ExprResult RefExpr = BuildDeclRefExpr(VD, T, Loc);
+ ExprResult RefExpr = BuildDeclRefExpr(VD, T, VK_LValue, Loc);
if (RefExpr.isInvalid())
return ExprError();
@@ -3118,18 +3665,23 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
return move(RefExpr);
}
-
+
// Take the address of everything else
return CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get());
}
+ ExprValueKind VK = VK_RValue;
+
// If the non-type template parameter has reference type, qualify the
// resulting declaration reference with the extra qualifiers on the
// type that the reference refers to.
- if (const ReferenceType *TargetRef = ParamType->getAs<ReferenceType>())
- T = Context.getQualifiedType(T, TargetRef->getPointeeType().getQualifiers());
-
- return BuildDeclRefExpr(VD, T, Loc);
+ if (const ReferenceType *TargetRef = ParamType->getAs<ReferenceType>()) {
+ VK = VK_LValue;
+ T = Context.getQualifiedType(T,
+ TargetRef->getPointeeType().getQualifiers());
+ }
+
+ return BuildDeclRefExpr(VD, T, VK, Loc);
}
/// \brief Construct a new expression that refers to the given
@@ -3139,11 +3691,11 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
/// This routine takes care of the mapping from an integral template
/// argument (which may have any integral type) to the appropriate
/// literal value.
-ExprResult
+ExprResult
Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc) {
assert(Arg.getKind() == TemplateArgument::Integral &&
- "Operation is only value for integral template arguments");
+ "Operation is only valid for integral template arguments");
QualType T = Arg.getIntegralType();
if (T->isCharType() || T->isWideCharType())
return Owned(new (Context) CharacterLiteral(
@@ -3157,9 +3709,149 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
T,
Loc));
- return Owned(IntegerLiteral::Create(Context, *Arg.getAsIntegral(), T, Loc));
+ QualType BT;
+ if (const EnumType *ET = T->getAs<EnumType>())
+ BT = ET->getDecl()->getPromotionType();
+ else
+ BT = T;
+
+ Expr *E = IntegerLiteral::Create(Context, *Arg.getAsIntegral(), BT, Loc);
+ if (T->isEnumeralType()) {
+ // FIXME: This is a hack. We need a better way to handle substituted
+ // non-type template parameters.
+ E = CStyleCastExpr::Create(Context, T, VK_RValue, CK_IntegralCast,
+ E, 0,
+ Context.getTrivialTypeSourceInfo(T, Loc),
+ Loc, Loc);
+ }
+
+ return Owned(E);
}
+/// \brief Match two template parameters within template parameter lists.
+static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
+ bool Complain,
+ Sema::TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc) {
+ // Check the actual kind (type, non-type, template).
+ if (Old->getKind() != New->getKind()) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_param_different_kind;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_param_different_kind;
+ }
+ S.Diag(New->getLocation(), NextDiag)
+ << (Kind != Sema::TPL_TemplateMatch);
+ S.Diag(Old->getLocation(), diag::note_template_prev_declaration)
+ << (Kind != Sema::TPL_TemplateMatch);
+ }
+
+ return false;
+ }
+
+ // Check that both are parameter packs are neither are parameter packs.
+ // However, if we are matching a template template argument to a
+ // template template parameter, the template template parameter can have
+ // a parameter pack where the template template argument does not.
+ if (Old->isTemplateParameterPack() != New->isTemplateParameterPack() &&
+ !(Kind == Sema::TPL_TemplateTemplateArgumentMatch &&
+ Old->isTemplateParameterPack())) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_parameter_pack_non_pack;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc,
+ diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_parameter_pack_non_pack;
+ }
+
+ unsigned ParamKind = isa<TemplateTypeParmDecl>(New)? 0
+ : isa<NonTypeTemplateParmDecl>(New)? 1
+ : 2;
+ S.Diag(New->getLocation(), NextDiag)
+ << ParamKind << New->isParameterPack();
+ S.Diag(Old->getLocation(), diag::note_template_parameter_pack_here)
+ << ParamKind << Old->isParameterPack();
+ }
+
+ return false;
+ }
+
+ // For non-type template parameters, check the type of the parameter.
+ if (NonTypeTemplateParmDecl *OldNTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Old)) {
+ NonTypeTemplateParmDecl *NewNTTP = cast<NonTypeTemplateParmDecl>(New);
+
+ // If we are matching a template template argument to a template
+ // template parameter and one of the non-type template parameter types
+ // is dependent, then we must wait until template instantiation time
+ // to actually compare the arguments.
+ if (Kind == Sema::TPL_TemplateTemplateArgumentMatch &&
+ (OldNTTP->getType()->isDependentType() ||
+ NewNTTP->getType()->isDependentType()))
+ return true;
+
+ if (!S.Context.hasSameType(OldNTTP->getType(), NewNTTP->getType())) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_nontype_parm_different_type;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc,
+ diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_nontype_parm_different_type;
+ }
+ S.Diag(NewNTTP->getLocation(), NextDiag)
+ << NewNTTP->getType()
+ << (Kind != Sema::TPL_TemplateMatch);
+ S.Diag(OldNTTP->getLocation(),
+ diag::note_template_nontype_parm_prev_declaration)
+ << OldNTTP->getType();
+ }
+
+ return false;
+ }
+
+ return true;
+ }
+
+ // For template template parameters, check the template parameter types.
+ // The template parameter lists of template template
+ // parameters must agree.
+ if (TemplateTemplateParmDecl *OldTTP
+ = dyn_cast<TemplateTemplateParmDecl>(Old)) {
+ TemplateTemplateParmDecl *NewTTP = cast<TemplateTemplateParmDecl>(New);
+ return S.TemplateParameterListsAreEqual(NewTTP->getTemplateParameters(),
+ OldTTP->getTemplateParameters(),
+ Complain,
+ (Kind == Sema::TPL_TemplateMatch
+ ? Sema::TPL_TemplateTemplateParmMatch
+ : Kind),
+ TemplateArgLoc);
+ }
+
+ return true;
+}
+
+/// \brief Diagnose a known arity mismatch when comparing template argument
+/// lists.
+static
+void DiagnoseTemplateParameterListArityMismatch(Sema &S,
+ TemplateParameterList *New,
+ TemplateParameterList *Old,
+ Sema::TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc) {
+ unsigned NextDiag = diag::err_template_param_list_different_arity;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_param_list_different_arity;
+ }
+ S.Diag(New->getTemplateLoc(), NextDiag)
+ << (New->size() > Old->size())
+ << (Kind != Sema::TPL_TemplateMatch)
+ << SourceRange(New->getTemplateLoc(), New->getRAngleLoc());
+ S.Diag(Old->getTemplateLoc(), diag::note_template_prev_declaration)
+ << (Kind != Sema::TPL_TemplateMatch)
+ << SourceRange(Old->getTemplateLoc(), Old->getRAngleLoc());
+}
/// \brief Determine whether the given template parameter lists are
/// equivalent.
@@ -3190,120 +3882,66 @@ Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc) {
- if (Old->size() != New->size()) {
- if (Complain) {
- unsigned NextDiag = diag::err_template_param_list_different_arity;
- if (TemplateArgLoc.isValid()) {
- Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
- NextDiag = diag::note_template_param_list_different_arity;
- }
- Diag(New->getTemplateLoc(), NextDiag)
- << (New->size() > Old->size())
- << (Kind != TPL_TemplateMatch)
- << SourceRange(New->getTemplateLoc(), New->getRAngleLoc());
- Diag(Old->getTemplateLoc(), diag::note_template_prev_declaration)
- << (Kind != TPL_TemplateMatch)
- << SourceRange(Old->getTemplateLoc(), Old->getRAngleLoc());
- }
+ if (Old->size() != New->size() && Kind != TPL_TemplateTemplateArgumentMatch) {
+ if (Complain)
+ DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
+ TemplateArgLoc);
return false;
}
+ // C++0x [temp.arg.template]p3:
+ // A template-argument matches a template template-parameter (call it P)
+ // when each of the template parameters in the template-parameter-list of
+ // the template-argument's corresponding class template or template alias
+ // (call it A) matches the corresponding template parameter in the
+ // template-parameter-list of P. [...]
+ TemplateParameterList::iterator NewParm = New->begin();
+ TemplateParameterList::iterator NewParmEnd = New->end();
for (TemplateParameterList::iterator OldParm = Old->begin(),
- OldParmEnd = Old->end(), NewParm = New->begin();
- OldParm != OldParmEnd; ++OldParm, ++NewParm) {
- if ((*OldParm)->getKind() != (*NewParm)->getKind()) {
- if (Complain) {
- unsigned NextDiag = diag::err_template_param_different_kind;
- if (TemplateArgLoc.isValid()) {
- Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
- NextDiag = diag::note_template_param_different_kind;
- }
- Diag((*NewParm)->getLocation(), NextDiag)
- << (Kind != TPL_TemplateMatch);
- Diag((*OldParm)->getLocation(), diag::note_template_prev_declaration)
- << (Kind != TPL_TemplateMatch);
- }
- return false;
- }
+ OldParmEnd = Old->end();
+ OldParm != OldParmEnd; ++OldParm) {
+ if (Kind != TPL_TemplateTemplateArgumentMatch ||
+ !(*OldParm)->isTemplateParameterPack()) {
+ if (NewParm == NewParmEnd) {
+ if (Complain)
+ DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
+ TemplateArgLoc);
- if (TemplateTypeParmDecl *OldTTP
- = dyn_cast<TemplateTypeParmDecl>(*OldParm)) {
- // Template type parameters are equivalent if either both are template
- // type parameter packs or neither are (since we know we're at the same
- // index).
- TemplateTypeParmDecl *NewTTP = cast<TemplateTypeParmDecl>(*NewParm);
- if (OldTTP->isParameterPack() != NewTTP->isParameterPack()) {
- // FIXME: Implement the rules in C++0x [temp.arg.template]p5 that
- // allow one to match a template parameter pack in the template
- // parameter list of a template template parameter to one or more
- // template parameters in the template parameter list of the
- // corresponding template template argument.
- if (Complain) {
- unsigned NextDiag = diag::err_template_parameter_pack_non_pack;
- if (TemplateArgLoc.isValid()) {
- Diag(TemplateArgLoc,
- diag::err_template_arg_template_params_mismatch);
- NextDiag = diag::note_template_parameter_pack_non_pack;
- }
- Diag(NewTTP->getLocation(), NextDiag)
- << 0 << NewTTP->isParameterPack();
- Diag(OldTTP->getLocation(), diag::note_template_parameter_pack_here)
- << 0 << OldTTP->isParameterPack();
- }
return false;
}
- } else if (NonTypeTemplateParmDecl *OldNTTP
- = dyn_cast<NonTypeTemplateParmDecl>(*OldParm)) {
- // The types of non-type template parameters must agree.
- NonTypeTemplateParmDecl *NewNTTP
- = cast<NonTypeTemplateParmDecl>(*NewParm);
-
- // If we are matching a template template argument to a template
- // template parameter and one of the non-type template parameter types
- // is dependent, then we must wait until template instantiation time
- // to actually compare the arguments.
- if (Kind == TPL_TemplateTemplateArgumentMatch &&
- (OldNTTP->getType()->isDependentType() ||
- NewNTTP->getType()->isDependentType()))
- continue;
-
- if (Context.getCanonicalType(OldNTTP->getType()) !=
- Context.getCanonicalType(NewNTTP->getType())) {
- if (Complain) {
- unsigned NextDiag = diag::err_template_nontype_parm_different_type;
- if (TemplateArgLoc.isValid()) {
- Diag(TemplateArgLoc,
- diag::err_template_arg_template_params_mismatch);
- NextDiag = diag::note_template_nontype_parm_different_type;
- }
- Diag(NewNTTP->getLocation(), NextDiag)
- << NewNTTP->getType()
- << (Kind != TPL_TemplateMatch);
- Diag(OldNTTP->getLocation(),
- diag::note_template_nontype_parm_prev_declaration)
- << OldNTTP->getType();
- }
+
+ if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
+ Kind, TemplateArgLoc))
return false;
- }
- } else {
- // The template parameter lists of template template
- // parameters must agree.
- assert(isa<TemplateTemplateParmDecl>(*OldParm) &&
- "Only template template parameters handled here");
- TemplateTemplateParmDecl *OldTTP
- = cast<TemplateTemplateParmDecl>(*OldParm);
- TemplateTemplateParmDecl *NewTTP
- = cast<TemplateTemplateParmDecl>(*NewParm);
- if (!TemplateParameterListsAreEqual(NewTTP->getTemplateParameters(),
- OldTTP->getTemplateParameters(),
- Complain,
- (Kind == TPL_TemplateMatch? TPL_TemplateTemplateParmMatch : Kind),
- TemplateArgLoc))
+
+ ++NewParm;
+ continue;
+ }
+
+ // C++0x [temp.arg.template]p3:
+ // [...] When P's template- parameter-list contains a template parameter
+ // pack (14.5.3), the template parameter pack will match zero or more
+ // template parameters or template parameter packs in the
+ // template-parameter-list of A with the same type and form as the
+ // template parameter pack in P (ignoring whether those template
+ // parameters are template parameter packs).
+ for (; NewParm != NewParmEnd; ++NewParm) {
+ if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
+ Kind, TemplateArgLoc))
return false;
}
}
+ // Make sure we exhausted all of the arguments.
+ if (NewParm != NewParmEnd) {
+ if (Complain)
+ DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
+ TemplateArgLoc);
+
+ return false;
+ }
+
return true;
}
@@ -3343,18 +3981,18 @@ Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
static TemplateSpecializationKind getTemplateSpecializationKind(NamedDecl *D) {
if (!D)
return TSK_Undeclared;
-
+
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D))
return Record->getTemplateSpecializationKind();
if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
return Function->getTemplateSpecializationKind();
if (VarDecl *Var = dyn_cast<VarDecl>(D))
return Var->getTemplateSpecializationKind();
-
+
return TSK_Undeclared;
}
-/// \brief Check whether a specialization is well-formed in the current
+/// \brief Check whether a specialization is well-formed in the current
/// context.
///
/// This routine determines whether a template specialization can be declared
@@ -3365,7 +4003,7 @@ static TemplateSpecializationKind getTemplateSpecializationKind(NamedDecl *D) {
///
/// \param Specialized the entity being specialized or instantiated, which
/// may be a kind of template (class template, function template, etc.) or
-/// a member of a class template (member function, static data member,
+/// a member of a class template (member function, static data member,
/// member class).
///
/// \param PrevDecl the previous declaration of this entity, if any.
@@ -3386,14 +4024,11 @@ static bool CheckTemplateSpecializationScope(Sema &S,
// Keep these "kind" numbers in sync with the %select statements in the
// various diagnostics emitted by this routine.
int EntityKind = 0;
- bool isTemplateSpecialization = false;
- if (isa<ClassTemplateDecl>(Specialized)) {
+ if (isa<ClassTemplateDecl>(Specialized))
EntityKind = IsPartialSpecialization? 1 : 0;
- isTemplateSpecialization = true;
- } else if (isa<FunctionTemplateDecl>(Specialized)) {
+ else if (isa<FunctionTemplateDecl>(Specialized))
EntityKind = 2;
- isTemplateSpecialization = true;
- } else if (isa<CXXMethodDecl>(Specialized))
+ else if (isa<CXXMethodDecl>(Specialized))
EntityKind = 3;
else if (isa<VarDecl>(Specialized))
EntityKind = 4;
@@ -3429,38 +4064,53 @@ static bool CheckTemplateSpecializationScope(Sema &S,
<< Specialized;
return true;
}
-
+
// C++ [temp.class.spec]p6:
// A class template partial specialization may be declared or redeclared
- // in any namespace scope in which its definition may be defined (14.5.1
- // and 14.5.2).
+ // in any namespace scope in which its definition may be defined (14.5.1
+ // and 14.5.2).
bool ComplainedAboutScope = false;
- DeclContext *SpecializedContext
+ DeclContext *SpecializedContext
= Specialized->getDeclContext()->getEnclosingNamespaceContext();
DeclContext *DC = S.CurContext->getEnclosingNamespaceContext();
- if ((!PrevDecl ||
+ if ((!PrevDecl ||
getTemplateSpecializationKind(PrevDecl) == TSK_Undeclared ||
getTemplateSpecializationKind(PrevDecl) == TSK_ImplicitInstantiation)){
- // There is no prior declaration of this entity, so this
- // specialization must be in the same context as the template
- // itself, or in the enclosing namespace set.
- if (!DC->InEnclosingNamespaceSetOf(SpecializedContext)) {
+ // C++ [temp.exp.spec]p2:
+ // An explicit specialization shall be declared in the namespace of which
+ // the template is a member, or, for member templates, in the namespace
+ // of which the enclosing class or enclosing class template is a member.
+ // An explicit specialization of a member function, member class or
+ // static data member of a class template shall be declared in the
+ // namespace of which the class template is a member.
+ //
+ // C++0x [temp.expl.spec]p2:
+ // An explicit specialization shall be declared in a namespace enclosing
+ // the specialized template.
+ if (!DC->InEnclosingNamespaceSetOf(SpecializedContext) &&
+ !(S.getLangOptions().CPlusPlus0x && DC->Encloses(SpecializedContext))) {
+ bool IsCPlusPlus0xExtension
+ = !S.getLangOptions().CPlusPlus0x && DC->Encloses(SpecializedContext);
if (isa<TranslationUnitDecl>(SpecializedContext))
- S.Diag(Loc, diag::err_template_spec_decl_out_of_scope_global)
- << EntityKind << Specialized;
+ S.Diag(Loc, IsCPlusPlus0xExtension
+ ? diag::ext_template_spec_decl_out_of_scope_global
+ : diag::err_template_spec_decl_out_of_scope_global)
+ << EntityKind << Specialized;
else if (isa<NamespaceDecl>(SpecializedContext))
- S.Diag(Loc, diag::err_template_spec_decl_out_of_scope)
- << EntityKind << Specialized
- << cast<NamedDecl>(SpecializedContext);
-
+ S.Diag(Loc, IsCPlusPlus0xExtension
+ ? diag::ext_template_spec_decl_out_of_scope
+ : diag::err_template_spec_decl_out_of_scope)
+ << EntityKind << Specialized
+ << cast<NamedDecl>(SpecializedContext);
+
S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
ComplainedAboutScope = true;
}
}
-
- // Make sure that this redeclaration (or definition) occurs in an enclosing
+
+ // Make sure that this redeclaration (or definition) occurs in an enclosing
// namespace.
- // Note that HandleDeclarator() performs this check for explicit
+ // Note that HandleDeclarator() performs this check for explicit
// specializations of function templates, static data members, and member
// functions, so we skip the check here for those kinds of entities.
// FIXME: HandleDeclarator's diagnostics aren't quite as good, though.
@@ -3475,77 +4125,44 @@ static bool CheckTemplateSpecializationScope(Sema &S,
S.Diag(Loc, diag::err_template_spec_redecl_out_of_scope)
<< EntityKind << Specialized
<< cast<NamedDecl>(SpecializedContext);
-
+
S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
}
-
+
// FIXME: check for specialization-after-instantiation errors and such.
-
+
return false;
}
-
-/// \brief Check the non-type template arguments of a class template
-/// partial specialization according to C++ [temp.class.spec]p9.
-///
-/// \param TemplateParams the template parameters of the primary class
-/// template.
-///
-/// \param TemplateArg the template arguments of the class template
-/// partial specialization.
-///
-/// \param MirrorsPrimaryTemplate will be set true if the class
-/// template partial specialization arguments are identical to the
-/// implicit template arguments of the primary template. This is not
-/// necessarily an error (C++0x), and it is left to the caller to diagnose
-/// this condition when it is an error.
-///
-/// \returns true if there was an error, false otherwise.
-bool Sema::CheckClassTemplatePartialSpecializationArgs(
- TemplateParameterList *TemplateParams,
- const TemplateArgumentListBuilder &TemplateArgs,
- bool &MirrorsPrimaryTemplate) {
- // FIXME: the interface to this function will have to change to
- // accommodate variadic templates.
- MirrorsPrimaryTemplate = true;
-
- const TemplateArgument *ArgList = TemplateArgs.getFlatArguments();
- for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
- // Determine whether the template argument list of the partial
- // specialization is identical to the implicit argument list of
- // the primary template. The caller may need to diagnostic this as
- // an error per C++ [temp.class.spec]p9b3.
- if (MirrorsPrimaryTemplate) {
- if (TemplateTypeParmDecl *TTP
- = dyn_cast<TemplateTypeParmDecl>(TemplateParams->getParam(I))) {
- if (Context.getCanonicalType(Context.getTypeDeclType(TTP)) !=
- Context.getCanonicalType(ArgList[I].getAsType()))
- MirrorsPrimaryTemplate = false;
- } else if (TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(
- TemplateParams->getParam(I))) {
- TemplateName Name = ArgList[I].getAsTemplate();
- TemplateTemplateParmDecl *ArgDecl
- = dyn_cast_or_null<TemplateTemplateParmDecl>(Name.getAsTemplateDecl());
- if (!ArgDecl ||
- ArgDecl->getIndex() != TTP->getIndex() ||
- ArgDecl->getDepth() != TTP->getDepth())
- MirrorsPrimaryTemplate = false;
- }
- }
+/// \brief Subroutine of Sema::CheckClassTemplatePartialSpecializationArgs
+/// that checks non-type template partial specialization arguments.
+static bool CheckNonTypeClassTemplatePartialSpecializationArgs(Sema &S,
+ NonTypeTemplateParmDecl *Param,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I].getKind() == TemplateArgument::Pack) {
+ if (CheckNonTypeClassTemplatePartialSpecializationArgs(S, Param,
+ Args[I].pack_begin(),
+ Args[I].pack_size()))
+ return true;
- NonTypeTemplateParmDecl *Param
- = dyn_cast<NonTypeTemplateParmDecl>(TemplateParams->getParam(I));
- if (!Param) {
continue;
}
- Expr *ArgExpr = ArgList[I].getAsExpr();
+ Expr *ArgExpr = Args[I].getAsExpr();
if (!ArgExpr) {
- MirrorsPrimaryTemplate = false;
continue;
}
+ // We can have a pack expansion of any of the bullets below.
+ if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(ArgExpr))
+ ArgExpr = Expansion->getPattern();
+
+ // Strip off any implicit casts we added as part of type checking.
+ while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
+ ArgExpr = ICE->getSubExpr();
+
// C++ [temp.class.spec]p8:
// A non-type argument is non-specialized if it is the name of a
// non-type parameter. All other non-type arguments are
@@ -3555,15 +4172,8 @@ bool Sema::CheckClassTemplatePartialSpecializationArgs(
// specialized non-type arguments, so skip any non-specialized
// arguments.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ArgExpr))
- if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl())) {
- if (MirrorsPrimaryTemplate &&
- (Param->getIndex() != NTTP->getIndex() ||
- Param->getDepth() != NTTP->getDepth()))
- MirrorsPrimaryTemplate = false;
-
+ if (isa<NonTypeTemplateParmDecl>(DRE->getDecl()))
continue;
- }
// C++ [temp.class.spec]p9:
// Within the argument list of a class template partial
@@ -3573,7 +4183,7 @@ bool Sema::CheckClassTemplatePartialSpecializationArgs(
// specialization except when the argument expression is a
// simple identifier.
if (ArgExpr->isTypeDependent() || ArgExpr->isValueDependent()) {
- Diag(ArgExpr->getLocStart(),
+ S.Diag(ArgExpr->getLocStart(),
diag::err_dependent_non_type_arg_in_partial_spec)
<< ArgExpr->getSourceRange();
return true;
@@ -3583,15 +4193,42 @@ bool Sema::CheckClassTemplatePartialSpecializationArgs(
// specialized non-type argument shall not be dependent on a
// parameter of the specialization.
if (Param->getType()->isDependentType()) {
- Diag(ArgExpr->getLocStart(),
+ S.Diag(ArgExpr->getLocStart(),
diag::err_dependent_typed_non_type_arg_in_partial_spec)
<< Param->getType()
<< ArgExpr->getSourceRange();
- Diag(Param->getLocation(), diag::note_template_param_here);
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
+ }
+
+ return false;
+}
- MirrorsPrimaryTemplate = false;
+/// \brief Check the non-type template arguments of a class template
+/// partial specialization according to C++ [temp.class.spec]p9.
+///
+/// \param TemplateParams the template parameters of the primary class
+/// template.
+///
+/// \param TemplateArg the template arguments of the class template
+/// partial specialization.
+///
+/// \returns true if there was an error, false otherwise.
+static bool CheckClassTemplatePartialSpecializationArgs(Sema &S,
+ TemplateParameterList *TemplateParams,
+ llvm::SmallVectorImpl<TemplateArgument> &TemplateArgs) {
+ const TemplateArgument *ArgList = TemplateArgs.data();
+
+ for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
+ NonTypeTemplateParmDecl *Param
+ = dyn_cast<NonTypeTemplateParmDecl>(TemplateParams->getParam(I));
+ if (!Param)
+ continue;
+
+ if (CheckNonTypeClassTemplatePartialSpecializationArgs(S, Param,
+ &ArgList[I], 1))
+ return true;
}
return false;
@@ -3635,7 +4272,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
if (!ClassTemplate) {
Diag(TemplateNameLoc, diag::err_not_class_template_specialization)
- << (Name.getAsTemplateDecl() &&
+ << (Name.getAsTemplateDecl() &&
isa<TemplateTemplateParmDecl>(Name.getAsTemplateDecl()));
return true;
}
@@ -3657,7 +4294,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Invalid);
if (Invalid)
return true;
-
+
unsigned NumMatchedTemplateParamLists = TemplateParameterLists.size();
if (TemplateParams)
--NumMatchedTemplateParamLists;
@@ -3665,6 +4302,12 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
if (TemplateParams && TemplateParams->size() > 0) {
isPartialSpecialization = true;
+ if (TUK == TUK_Friend) {
+ Diag(KWLoc, diag::err_partial_specialization_friend)
+ << SourceRange(LAngleLoc, RAngleLoc);
+ return true;
+ }
+
// C++ [temp.class.spec]p10:
// The template parameter list of a specialization shall not
// contain default template argument values.
@@ -3731,48 +4374,33 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
TemplateArgs.setRAngleLoc(RAngleLoc);
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+ // Check for unexpanded parameter packs in any of the template arguments.
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ if (DiagnoseUnexpandedParameterPack(TemplateArgs[I],
+ UPPC_PartialSpecialization))
+ return true;
+
// Check that the template argument list is well-formed for this
// template.
- TemplateArgumentListBuilder Converted(ClassTemplate->getTemplateParameters(),
- TemplateArgs.size());
+ llvm::SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
TemplateArgs, false, Converted))
return true;
- assert((Converted.structuredSize() ==
- ClassTemplate->getTemplateParameters()->size()) &&
+ assert((Converted.size() == ClassTemplate->getTemplateParameters()->size()) &&
"Converted template argument list is too short!");
// Find the class template (partial) specialization declaration that
// corresponds to these arguments.
if (isPartialSpecialization) {
- bool MirrorsPrimaryTemplate;
- if (CheckClassTemplatePartialSpecializationArgs(
+ if (CheckClassTemplatePartialSpecializationArgs(*this,
ClassTemplate->getTemplateParameters(),
- Converted, MirrorsPrimaryTemplate))
+ Converted))
return true;
- if (MirrorsPrimaryTemplate) {
- // C++ [temp.class.spec]p9b3:
- //
- // -- The argument list of the specialization shall not be identical
- // to the implicit argument list of the primary template.
- Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
- << (TUK == TUK_Definition)
- << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
- return CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS,
- ClassTemplate->getIdentifier(),
- TemplateNameLoc,
- Attr,
- TemplateParams,
- AS_none);
- }
-
- // FIXME: Diagnose friend partial specializations
-
- if (!Name.isDependent() &&
+ if (!Name.isDependent() &&
!TemplateSpecializationType::anyDependentTemplateArguments(
- TemplateArgs.getArgumentArray(),
+ TemplateArgs.getArgumentArray(),
TemplateArgs.size())) {
Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
<< ClassTemplate->getDeclName();
@@ -3786,27 +4414,27 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
if (isPartialSpecialization)
// FIXME: Template parameter list matters, too
PrevDecl
- = ClassTemplate->findPartialSpecialization(Converted.getFlatArguments(),
- Converted.flatSize(),
+ = ClassTemplate->findPartialSpecialization(Converted.data(),
+ Converted.size(),
InsertPos);
else
PrevDecl
- = ClassTemplate->findSpecialization(Converted.getFlatArguments(),
- Converted.flatSize(), InsertPos);
+ = ClassTemplate->findSpecialization(Converted.data(),
+ Converted.size(), InsertPos);
ClassTemplateSpecializationDecl *Specialization = 0;
// Check whether we can declare a class template specialization in
// the current scope.
if (TUK != TUK_Friend &&
- CheckTemplateSpecializationScope(*this, ClassTemplate, PrevDecl,
- TemplateNameLoc,
+ CheckTemplateSpecializationScope(*this, ClassTemplate, PrevDecl,
+ TemplateNameLoc,
isPartialSpecialization))
return true;
-
+
// The canonical type
QualType CanonType;
- if (PrevDecl &&
+ if (PrevDecl &&
(PrevDecl->getSpecializationKind() == TSK_Undeclared ||
TUK == TUK_Friend)) {
// Since the only prior class template specialization with these
@@ -3823,8 +4451,25 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
// arguments of the class template partial specialization.
TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
CanonType = Context.getTemplateSpecializationType(CanonTemplate,
- Converted.getFlatArguments(),
- Converted.flatSize());
+ Converted.data(),
+ Converted.size());
+
+ if (Context.hasSameType(CanonType,
+ ClassTemplate->getInjectedClassNameSpecialization())) {
+ // C++ [temp.class.spec]p9b3:
+ //
+ // -- The argument list of the specialization shall not be identical
+ // to the implicit argument list of the primary template.
+ Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
+ << (TUK == TUK_Definition)
+ << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
+ return CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS,
+ ClassTemplate->getIdentifier(),
+ TemplateNameLoc,
+ Attr,
+ TemplateParams,
+ AS_none);
+ }
// Create a new class template partial specialization declaration node.
ClassTemplatePartialSpecializationDecl *PrevPartial
@@ -3837,7 +4482,8 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
TemplateNameLoc,
TemplateParams,
ClassTemplate,
- Converted,
+ Converted.data(),
+ Converted.size(),
TemplateArgs,
CanonType,
PrevPartial,
@@ -3853,18 +4499,18 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
ClassTemplate->AddPartialSpecialization(Partial, InsertPos);
Specialization = Partial;
- // If we are providing an explicit specialization of a member class
+ // If we are providing an explicit specialization of a member class
// template specialization, make a note of that.
if (PrevPartial && PrevPartial->getInstantiatedFromMember())
PrevPartial->setMemberSpecialization();
-
+
// Check that all of the template parameters of the class template
// partial specialization are deducible from the template
// arguments. If not, this class template partial specialization
// will never be used.
llvm::SmallVector<bool, 8> DeducibleParams;
DeducibleParams.resize(TemplateParams->size());
- MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
+ MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
TemplateParams->getDepth(),
DeducibleParams);
unsigned NumNonDeducible = 0;
@@ -3898,7 +4544,8 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
ClassTemplate->getDeclContext(),
TemplateNameLoc,
ClassTemplate,
- Converted,
+ Converted.data(),
+ Converted.size(),
PrevDecl);
SetNestedNameSpecifier(Specialization, SS);
if (NumMatchedTemplateParamLists > 0 && SS.isSet()) {
@@ -3915,9 +4562,9 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
// C++ [temp.expl.spec]p6:
// If a template, a member template or the member of a class template is
- // explicitly specialized then that specialization shall be declared
+ // explicitly specialized then that specialization shall be declared
// before the first use of that specialization that would cause an implicit
- // instantiation to take place, in every translation unit in which such a
+ // instantiation to take place, in every translation unit in which such a
// use occurs; no diagnostic is required.
if (PrevDecl && PrevDecl->getPointOfInstantiation().isValid()) {
bool Okay = false;
@@ -3934,14 +4581,14 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Diag(TemplateNameLoc, diag::err_specialization_after_instantiation)
<< Context.getTypeDeclType(Specialization) << Range;
- Diag(PrevDecl->getPointOfInstantiation(),
+ Diag(PrevDecl->getPointOfInstantiation(),
diag::note_instantiation_required_here)
- << (PrevDecl->getTemplateSpecializationKind()
+ << (PrevDecl->getTemplateSpecializationKind()
!= TSK_ImplicitInstantiation);
return true;
}
}
-
+
// If this is not a friend, note that this is an explicit specialization.
if (TUK != TUK_Friend)
Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
@@ -3958,6 +4605,9 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
}
}
+ if (Attr)
+ ProcessDeclAttributeList(S, Specialization, Attr);
+
// Build the fully-sugared type for this class template
// specialization as the user wrote in the specialization
// itself. This means that we'll pretty-print the type retrieved
@@ -4015,9 +4665,7 @@ Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D) {
assert(getCurFunctionDecl() == 0 && "Function parsing confused");
- assert(D.getTypeObject(0).Kind == DeclaratorChunk::Function &&
- "Not a function declarator!");
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
if (FTI.hasPrototype) {
// FIXME: Diagnose arguments without names in C.
@@ -4047,12 +4695,12 @@ static void StripImplicitInstantiation(NamedDecl *D) {
}
}
-/// \brief Diagnose cases where we have an explicit template specialization
+/// \brief Diagnose cases where we have an explicit template specialization
/// before/after an explicit template instantiation, producing diagnostics
-/// for those cases where they are required and determining whether the
+/// for those cases where they are required and determining whether the
/// new specialization/instantiation will have any effect.
///
-/// \param NewLoc the location of the new explicit specialization or
+/// \param NewLoc the location of the new explicit specialization or
/// instantiation.
///
/// \param NewTSK the kind of the new explicit specialization or instantiation.
@@ -4061,10 +4709,10 @@ static void StripImplicitInstantiation(NamedDecl *D) {
///
/// \param PrevTSK the kind of the old explicit specialization or instantiatin.
///
-/// \param PrevPointOfInstantiation if valid, indicates where the previus
+/// \param PrevPointOfInstantiation if valid, indicates where the previus
/// declaration was instantiated (either implicitly or explicitly).
///
-/// \param HasNoEffect will be set to true to indicate that the new
+/// \param HasNoEffect will be set to true to indicate that the new
/// specialization or instantiation has no effect and should be ignored.
///
/// \returns true if there was an error that should prevent the introduction of
@@ -4077,18 +4725,18 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
SourceLocation PrevPointOfInstantiation,
bool &HasNoEffect) {
HasNoEffect = false;
-
+
switch (NewTSK) {
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
assert(false && "Don't check implicit instantiations here");
return false;
-
+
case TSK_ExplicitSpecialization:
switch (PrevTSK) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
- // Okay, we're just specializing something that is either already
+ // Okay, we're just specializing something that is either already
// explicitly specialized or has merely been mentioned without any
// instantiation.
return false;
@@ -4101,17 +4749,17 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
return false;
}
// Fall through
-
+
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
- assert((PrevTSK == TSK_ImplicitInstantiation ||
- PrevPointOfInstantiation.isValid()) &&
+ assert((PrevTSK == TSK_ImplicitInstantiation ||
+ PrevPointOfInstantiation.isValid()) &&
"Explicit instantiation without point of instantiation?");
-
+
// C++ [temp.expl.spec]p6:
- // If a template, a member template or the member of a class template
+ // If a template, a member template or the member of a class template
// is explicitly specialized then that specialization shall be declared
- // before the first use of that specialization that would cause an
+ // before the first use of that specialization that would cause an
// implicit instantiation to take place, in every translation unit in
// which such a use occurs; no diagnostic is required.
for (NamedDecl *Prev = PrevDecl; Prev; Prev = getPreviousDecl(Prev)) {
@@ -4124,41 +4772,41 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
<< PrevDecl;
Diag(PrevPointOfInstantiation, diag::note_instantiation_required_here)
<< (PrevTSK != TSK_ImplicitInstantiation);
-
+
return true;
}
break;
-
+
case TSK_ExplicitInstantiationDeclaration:
switch (PrevTSK) {
case TSK_ExplicitInstantiationDeclaration:
// This explicit instantiation declaration is redundant (that's okay).
HasNoEffect = true;
return false;
-
+
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
// We're explicitly instantiating something that may have already been
// implicitly instantiated; that's fine.
return false;
-
+
case TSK_ExplicitSpecialization:
// C++0x [temp.explicit]p4:
// For a given set of template parameters, if an explicit instantiation
- // of a template appears after a declaration of an explicit
+ // of a template appears after a declaration of an explicit
// specialization for that template, the explicit instantiation has no
// effect.
HasNoEffect = true;
return false;
-
+
case TSK_ExplicitInstantiationDefinition:
// C++0x [temp.explicit]p10:
- // If an entity is the subject of both an explicit instantiation
- // declaration and an explicit instantiation definition in the same
+ // If an entity is the subject of both an explicit instantiation
+ // declaration and an explicit instantiation definition in the same
// translation unit, the definition shall follow the declaration.
- Diag(NewLoc,
+ Diag(NewLoc,
diag::err_explicit_instantiation_declaration_after_definition);
- Diag(PrevPointOfInstantiation,
+ Diag(PrevPointOfInstantiation,
diag::note_explicit_instantiation_definition_here);
assert(PrevPointOfInstantiation.isValid() &&
"Explicit instantiation without point of instantiation?");
@@ -4166,7 +4814,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
return false;
}
break;
-
+
case TSK_ExplicitInstantiationDefinition:
switch (PrevTSK) {
case TSK_Undeclared:
@@ -4174,7 +4822,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
// We're explicitly instantiating something that may have already been
// implicitly instantiated; that's fine.
return false;
-
+
case TSK_ExplicitSpecialization:
// C++ DR 259, C++0x [temp.explicit]p4:
// For a given set of template parameters, if an explicit
@@ -4182,7 +4830,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
// an explicit specialization for that template, the explicit
// instantiation has no effect.
//
- // In C++98/03 mode, we only give an extension warning here, because it
+ // In C++98/03 mode, we only give an extension warning here, because it
// is not harmful to try to explicitly instantiate something that
// has been explicitly specialized.
if (!getLangOptions().CPlusPlus0x) {
@@ -4193,12 +4841,12 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
}
HasNoEffect = true;
return false;
-
+
case TSK_ExplicitInstantiationDeclaration:
// We're explicity instantiating a definition for something for which we
- // were previously asked to suppress instantiations. That's fine.
+ // were previously asked to suppress instantiations. That's fine.
return false;
-
+
case TSK_ExplicitInstantiationDefinition:
// C++0x [temp.spec]p5:
// For a given template and a given set of template-arguments,
@@ -4206,16 +4854,16 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
// in a program,
Diag(NewLoc, diag::err_explicit_instantiation_duplicate)
<< PrevDecl;
- Diag(PrevPointOfInstantiation,
+ Diag(PrevPointOfInstantiation,
diag::note_previous_explicit_instantiation);
HasNoEffect = true;
- return false;
+ return false;
}
break;
}
-
+
assert(false && "Missing specialization/instantiation case?");
-
+
return false;
}
@@ -4281,21 +4929,21 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
// The set of function template specializations that could match this
// explicit function template specialization.
UnresolvedSet<8> Candidates;
-
+
DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
I != E; ++I) {
NamedDecl *Ovl = (*I)->getUnderlyingDecl();
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Ovl)) {
- // Only consider templates found within the same semantic lookup scope as
+ // Only consider templates found within the same semantic lookup scope as
// FD.
if (!FDLookupContext->InEnclosingNamespaceSetOf(
Ovl->getDeclContext()->getRedeclContext()))
continue;
-
+
// C++ [temp.expl.spec]p11:
- // A trailing template-argument can be left unspecified in the
- // template-id naming an explicit function template specialization
+ // A trailing template-argument can be left unspecified in the
+ // template-id naming an explicit function template specialization
// provided it can be deduced from the function argument type.
// Perform template argument deduction to determine whether we may be
// specializing this template.
@@ -4312,17 +4960,17 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
(void)TDK;
continue;
}
-
+
// Record this candidate.
Candidates.addDecl(Specialization, I.getAccess());
}
}
-
+
// Find the most specialized function template.
UnresolvedSetIterator Result
= getMostSpecialized(Candidates.begin(), Candidates.end(),
- TPOC_Other, FD->getLocation(),
- PDiag(diag::err_function_template_spec_no_match)
+ TPOC_Other, 0, FD->getLocation(),
+ PDiag(diag::err_function_template_spec_no_match)
<< FD->getDeclName(),
PDiag(diag::err_function_template_spec_ambiguous)
<< FD->getDeclName() << (ExplicitTemplateArgs != 0),
@@ -4333,27 +4981,27 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
// Ignore access information; it doesn't figure into redeclaration checking.
FunctionDecl *Specialization = cast<FunctionDecl>(*Result);
Specialization->setLocation(FD->getLocation());
-
+
// FIXME: Check if the prior specialization has a point of instantiation.
// If so, we have run afoul of .
// If this is a friend declaration, then we're not really declaring
// an explicit specialization.
bool isFriend = (FD->getFriendObjectKind() != Decl::FOK_None);
-
+
// Check the scope of this explicit specialization.
if (!isFriend &&
- CheckTemplateSpecializationScope(*this,
+ CheckTemplateSpecializationScope(*this,
Specialization->getPrimaryTemplate(),
- Specialization, FD->getLocation(),
+ Specialization, FD->getLocation(),
false))
return true;
// C++ [temp.expl.spec]p6:
// If a template, a member template or the member of a class template is
- // explicitly specialized then that specialization shall be declared
+ // explicitly specialized then that specialization shall be declared
// before the first use of that specialization that would cause an implicit
- // instantiation to take place, in every translation unit in which such a
+ // instantiation to take place, in every translation unit in which such a
// use occurs; no diagnostic is required.
FunctionTemplateSpecializationInfo *SpecInfo
= Specialization->getTemplateSpecializationInfo();
@@ -4368,14 +5016,14 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
SpecInfo->getPointOfInstantiation(),
HasNoEffect))
return true;
-
+
// Mark the prior declaration as an explicit specialization, so that later
// clients know that this is an explicit specialization.
if (!isFriend) {
SpecInfo->setTemplateSpecializationKind(TSK_ExplicitSpecialization);
MarkUnusedFileScopedDecl(Specialization);
}
-
+
// Turn the given function declaration into a function template
// specialization, with the template arguments from the previous
// specialization.
@@ -4399,7 +5047,7 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
/// \brief Perform semantic analysis for the given non-template member
/// specialization.
///
-/// This routine performs all of the semantic analysis required for an
+/// This routine performs all of the semantic analysis required for an
/// explicit member function specialization. On successful completion,
/// the function declaration \p FD will become a member function
/// specialization.
@@ -4410,7 +5058,7 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
/// \param Previous the set of declarations, one of which may be specialized
/// by this function specialization; the set will be modified to contain the
/// redeclared member.
-bool
+bool
Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
assert(!isa<TemplateDecl>(Member) && "Only for non-template members");
@@ -4452,7 +5100,7 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
MSInfo = PrevRecord->getMemberSpecializationInfo();
}
}
-
+
if (!Instantiation) {
// There is no previous declaration that matches. Since member
// specializations are always out-of-line, the caller will complain about
@@ -4478,7 +5126,7 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
Previous.addDecl(Instantiation);
return false;
}
-
+
// Make sure that this is a specialization of a member.
if (!InstantiatedFrom) {
Diag(Member->getLocation(), diag::err_spec_member_not_instantiated)
@@ -4486,12 +5134,12 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
Diag(Instantiation->getLocation(), diag::note_specialized_decl);
return true;
}
-
+
// C++ [temp.expl.spec]p6:
// If a template, a member template or the member of a class template is
- // explicitly specialized then that spe- cialization shall be declared
+ // explicitly specialized then that spe- cialization shall be declared
// before the first use of that specialization that would cause an implicit
- // instantiation to take place, in every translation unit in which such a
+ // instantiation to take place, in every translation unit in which such a
// use occurs; no diagnostic is required.
assert(MSInfo && "Member specialization info missing?");
@@ -4503,11 +5151,11 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
MSInfo->getPointOfInstantiation(),
HasNoEffect))
return true;
-
+
// Check the scope of this explicit specialization.
- if (CheckTemplateSpecializationScope(*this,
+ if (CheckTemplateSpecializationScope(*this,
InstantiatedFrom,
- Instantiation, Member->getLocation(),
+ Instantiation, Member->getLocation(),
false))
return true;
@@ -4523,7 +5171,7 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
TSK_ExplicitSpecialization);
InstantiationFunction->setLocation(Member->getLocation());
}
-
+
cast<FunctionDecl>(Member)->setInstantiationOfMemberFunction(
cast<CXXMethodDecl>(InstantiatedFrom),
TSK_ExplicitSpecialization);
@@ -4536,7 +5184,7 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
TSK_ExplicitSpecialization);
InstantiationVar->setLocation(Member->getLocation());
}
-
+
Context.setInstantiatedFromStaticDataMember(cast<VarDecl>(Member),
cast<VarDecl>(InstantiatedFrom),
TSK_ExplicitSpecialization);
@@ -4550,12 +5198,12 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
TSK_ExplicitSpecialization);
InstantiationClass->setLocation(Member->getLocation());
}
-
+
cast<CXXRecordDecl>(Member)->setInstantiationOfMemberClass(
cast<CXXRecordDecl>(InstantiatedFrom),
TSK_ExplicitSpecialization);
}
-
+
// Save the caller the trouble of having to figure out which declaration
// this specialization matches.
Previous.clear();
@@ -4571,28 +5219,28 @@ static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
bool WasQualifiedName) {
DeclContext *OrigContext= D->getDeclContext()->getEnclosingNamespaceContext();
DeclContext *CurContext = S.CurContext->getRedeclContext();
-
+
if (CurContext->isRecord()) {
S.Diag(InstLoc, diag::err_explicit_instantiation_in_class)
<< D;
return true;
}
-
+
// C++0x [temp.explicit]p2:
- // An explicit instantiation shall appear in an enclosing namespace of its
+ // An explicit instantiation shall appear in an enclosing namespace of its
// template.
//
// This is DR275, which we do not retroactively apply to C++98/03.
- if (S.getLangOptions().CPlusPlus0x &&
+ if (S.getLangOptions().CPlusPlus0x &&
!CurContext->Encloses(OrigContext)) {
if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(OrigContext))
- S.Diag(InstLoc,
- S.getLangOptions().CPlusPlus0x?
+ S.Diag(InstLoc,
+ S.getLangOptions().CPlusPlus0x?
diag::err_explicit_instantiation_out_of_scope
: diag::warn_explicit_instantiation_out_of_scope_0x)
<< D << NS;
else
- S.Diag(InstLoc,
+ S.Diag(InstLoc,
S.getLangOptions().CPlusPlus0x?
diag::err_explicit_instantiation_must_be_global
: diag::warn_explicit_instantiation_out_of_scope_0x)
@@ -4602,8 +5250,8 @@ static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
}
// C++0x [temp.explicit]p2:
- // If the name declared in the explicit instantiation is an unqualified
- // name, the explicit instantiation shall appear in the namespace where
+ // If the name declared in the explicit instantiation is an unqualified
+ // name, the explicit instantiation shall appear in the namespace where
// its template is declared or, if that namespace is inline (7.3.1), any
// namespace from its enclosing namespace set.
if (WasQualifiedName)
@@ -4612,7 +5260,7 @@ static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
if (CurContext->InEnclosingNamespaceSetOf(OrigContext))
return false;
- S.Diag(InstLoc,
+ S.Diag(InstLoc,
S.getLangOptions().CPlusPlus0x?
diag::err_explicit_instantiation_unqualified_wrong_namespace
: diag::warn_explicit_instantiation_unqualified_wrong_namespace_0x)
@@ -4625,9 +5273,9 @@ static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
if (!SS.isSet())
return false;
-
+
// C++0x [temp.explicit]p2:
- // If the explicit instantiation is for a member function, a member class
+ // If the explicit instantiation is for a member function, a member class
// or a static data member of a class template specialization, the name of
// the class template specialization in the qualified-id for the member
// name shall be a simple-template-id.
@@ -4635,7 +5283,7 @@ static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
// C++98 has the same restriction, just worded differently.
for (NestedNameSpecifier *NNS = (NestedNameSpecifier *)SS.getScopeRep();
NNS; NNS = NNS->getPrefix())
- if (Type *T = NNS->getAsType())
+ if (const Type *T = NNS->getAsType())
if (isa<TemplateSpecializationType>(T))
return true;
@@ -4680,34 +5328,32 @@ Sema::ActOnExplicitInstantiation(Scope *S,
// C++0x [temp.explicit]p2:
// There are two forms of explicit instantiation: an explicit instantiation
- // definition and an explicit instantiation declaration. An explicit
- // instantiation declaration begins with the extern keyword. [...]
+ // definition and an explicit instantiation declaration. An explicit
+ // instantiation declaration begins with the extern keyword. [...]
TemplateSpecializationKind TSK
= ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
: TSK_ExplicitInstantiationDeclaration;
-
+
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
// Check that the template argument list is well-formed for this
// template.
- TemplateArgumentListBuilder Converted(ClassTemplate->getTemplateParameters(),
- TemplateArgs.size());
+ llvm::SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
TemplateArgs, false, Converted))
return true;
- assert((Converted.structuredSize() ==
- ClassTemplate->getTemplateParameters()->size()) &&
+ assert((Converted.size() == ClassTemplate->getTemplateParameters()->size()) &&
"Converted template argument list is too short!");
// Find the class template specialization declaration that
// corresponds to these arguments.
void *InsertPos = 0;
ClassTemplateSpecializationDecl *PrevDecl
- = ClassTemplate->findSpecialization(Converted.getFlatArguments(),
- Converted.flatSize(), InsertPos);
+ = ClassTemplate->findSpecialization(Converted.data(),
+ Converted.size(), InsertPos);
TemplateSpecializationKind PrevDecl_TSK
= PrevDecl ? PrevDecl->getTemplateSpecializationKind() : TSK_Undeclared;
@@ -4720,10 +5366,9 @@ Sema::ActOnExplicitInstantiation(Scope *S,
if (CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc,
SS.isSet()))
return true;
-
+
ClassTemplateSpecializationDecl *Specialization = 0;
- bool ReusedDecl = false;
bool HasNoEffect = false;
if (PrevDecl) {
if (CheckSpecializationInstantiationRedecl(TemplateNameLoc, TSK,
@@ -4745,7 +5390,6 @@ Sema::ActOnExplicitInstantiation(Scope *S,
Specialization = PrevDecl;
Specialization->setLocation(TemplateNameLoc);
PrevDecl = 0;
- ReusedDecl = true;
}
}
@@ -4757,7 +5401,9 @@ Sema::ActOnExplicitInstantiation(Scope *S,
ClassTemplate->getDeclContext(),
TemplateNameLoc,
ClassTemplate,
- Converted, PrevDecl);
+ Converted.data(),
+ Converted.size(),
+ PrevDecl);
SetNestedNameSpecifier(Specialization, SS);
if (!HasNoEffect && !PrevDecl) {
@@ -4851,7 +5497,8 @@ Sema::ActOnExplicitInstantiation(Scope *S,
Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference,
KWLoc, SS, Name, NameLoc, Attr, AS_none,
MultiTemplateParamsArg(*this, 0, 0),
- Owned, IsDependent);
+ Owned, IsDependent, false, false,
+ TypeResult());
assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
if (!TagD)
@@ -4866,7 +5513,7 @@ Sema::ActOnExplicitInstantiation(Scope *S,
if (Tag->isInvalidDecl())
return true;
-
+
CXXRecordDecl *Record = cast<CXXRecordDecl>(Tag);
CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
if (!Pattern) {
@@ -4877,32 +5524,32 @@ Sema::ActOnExplicitInstantiation(Scope *S,
}
// C++0x [temp.explicit]p2:
- // If the explicit instantiation is for a class or member class, the
- // elaborated-type-specifier in the declaration shall include a
+ // If the explicit instantiation is for a class or member class, the
+ // elaborated-type-specifier in the declaration shall include a
// simple-template-id.
//
// C++98 has the same restriction, just worded differently.
if (!ScopeSpecifierHasTemplateId(SS))
Diag(TemplateLoc, diag::ext_explicit_instantiation_without_qualified_id)
<< Record << SS.getRange();
-
+
// C++0x [temp.explicit]p2:
// There are two forms of explicit instantiation: an explicit instantiation
- // definition and an explicit instantiation declaration. An explicit
+ // definition and an explicit instantiation declaration. An explicit
// instantiation declaration begins with the extern keyword. [...]
TemplateSpecializationKind TSK
= ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
: TSK_ExplicitInstantiationDeclaration;
-
+
// C++0x [temp.explicit]p2:
// [...] An explicit instantiation shall appear in an enclosing
// namespace of its template. [...]
//
// This is C++ DR 275.
CheckExplicitInstantiationScope(*this, Record, NameLoc, true);
-
+
// Verify that it is okay to explicitly instantiate here.
- CXXRecordDecl *PrevDecl
+ CXXRecordDecl *PrevDecl
= cast_or_null<CXXRecordDecl>(Record->getPreviousDeclaration());
if (!PrevDecl && Record->getDefinition())
PrevDecl = Record;
@@ -4910,23 +5557,23 @@ Sema::ActOnExplicitInstantiation(Scope *S,
MemberSpecializationInfo *MSInfo = PrevDecl->getMemberSpecializationInfo();
bool HasNoEffect = false;
assert(MSInfo && "No member specialization information?");
- if (CheckSpecializationInstantiationRedecl(TemplateLoc, TSK,
+ if (CheckSpecializationInstantiationRedecl(TemplateLoc, TSK,
PrevDecl,
MSInfo->getTemplateSpecializationKind(),
- MSInfo->getPointOfInstantiation(),
+ MSInfo->getPointOfInstantiation(),
HasNoEffect))
return true;
if (HasNoEffect)
return TagD;
}
-
+
CXXRecordDecl *RecordDef
= cast_or_null<CXXRecordDecl>(Record->getDefinition());
if (!RecordDef) {
// C++ [temp.explicit]p3:
- // A definition of a member class of a class template shall be in scope
+ // A definition of a member class of a class template shall be in scope
// at the point of an explicit instantiation of the member class.
- CXXRecordDecl *Def
+ CXXRecordDecl *Def
= cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
if (!Def) {
Diag(TemplateLoc, diag::err_explicit_instantiation_undefined_member)
@@ -4944,8 +5591,8 @@ Sema::ActOnExplicitInstantiation(Scope *S,
if (!RecordDef)
return true;
}
- }
-
+ }
+
// Instantiate all of the members of the class.
InstantiateClassMembers(NameLoc, RecordDef,
getTemplateInstantiationArgs(Record), TSK);
@@ -4974,7 +5621,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
diag::err_explicit_instantiation_requires_name)
<< D.getDeclSpec().getSourceRange()
<< D.getSourceRange();
-
+
return true;
}
@@ -4989,7 +5636,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
QualType R = T->getType();
if (R.isNull())
return true;
-
+
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
// Cannot explicitly instantiate a typedef.
Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_of_typedef)
@@ -5003,31 +5650,31 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// Presumably, this also applies to member functions of class templates as
// well.
if (D.getDeclSpec().isInlineSpecified() && getLangOptions().CPlusPlus0x)
- Diag(D.getDeclSpec().getInlineSpecLoc(),
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
diag::err_explicit_instantiation_inline)
<<FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
-
+
// FIXME: check for constexpr specifier.
-
+
// C++0x [temp.explicit]p2:
// There are two forms of explicit instantiation: an explicit instantiation
- // definition and an explicit instantiation declaration. An explicit
- // instantiation declaration begins with the extern keyword. [...]
+ // definition and an explicit instantiation declaration. An explicit
+ // instantiation declaration begins with the extern keyword. [...]
TemplateSpecializationKind TSK
= ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
: TSK_ExplicitInstantiationDeclaration;
-
+
LookupResult Previous(*this, NameInfo, LookupOrdinaryName);
LookupParsedName(Previous, S, &D.getCXXScopeSpec());
if (!R->isFunctionType()) {
// C++ [temp.explicit]p1:
- // A [...] static data member of a class template can be explicitly
- // instantiated from the member definition associated with its class
+ // A [...] static data member of a class template can be explicitly
+ // instantiated from the member definition associated with its class
// template.
if (Previous.isAmbiguous())
return true;
-
+
VarDecl *Prev = Previous.getAsSingle<VarDecl>();
if (!Prev || !Prev->isStaticDataMember()) {
// We expect to see a data data member here.
@@ -5038,54 +5685,54 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
Diag((*P)->getLocation(), diag::note_explicit_instantiation_here);
return true;
}
-
+
if (!Prev->getInstantiatedFromStaticDataMember()) {
// FIXME: Check for explicit specialization?
- Diag(D.getIdentifierLoc(),
+ Diag(D.getIdentifierLoc(),
diag::err_explicit_instantiation_data_member_not_instantiated)
<< Prev;
Diag(Prev->getLocation(), diag::note_explicit_instantiation_here);
// FIXME: Can we provide a note showing where this was declared?
return true;
}
-
+
// C++0x [temp.explicit]p2:
- // If the explicit instantiation is for a member function, a member class
+ // If the explicit instantiation is for a member function, a member class
// or a static data member of a class template specialization, the name of
// the class template specialization in the qualified-id for the member
// name shall be a simple-template-id.
//
// C++98 has the same restriction, just worded differently.
if (!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()))
- Diag(D.getIdentifierLoc(),
+ Diag(D.getIdentifierLoc(),
diag::ext_explicit_instantiation_without_qualified_id)
<< Prev << D.getCXXScopeSpec().getRange();
-
+
// Check the scope of this explicit instantiation.
CheckExplicitInstantiationScope(*this, Prev, D.getIdentifierLoc(), true);
-
+
// Verify that it is okay to explicitly instantiate here.
MemberSpecializationInfo *MSInfo = Prev->getMemberSpecializationInfo();
assert(MSInfo && "Missing static data member specialization info?");
bool HasNoEffect = false;
if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK, Prev,
MSInfo->getTemplateSpecializationKind(),
- MSInfo->getPointOfInstantiation(),
+ MSInfo->getPointOfInstantiation(),
HasNoEffect))
return true;
if (HasNoEffect)
return (Decl*) 0;
-
+
// Instantiate static data member.
Prev->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
if (TSK == TSK_ExplicitInstantiationDefinition)
InstantiateStaticDataMemberDefinition(D.getIdentifierLoc(), Prev);
-
+
// FIXME: Create an ExplicitInstantiation node?
return (Decl*) 0;
}
-
- // If the declarator is a template-id, translate the parser's template
+
+ // If the declarator is a template-id, translate the parser's template
// argument list into our AST format.
bool HasExplicitTemplateArgs = false;
TemplateArgumentListInfo TemplateArgs;
@@ -5100,11 +5747,11 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
HasExplicitTemplateArgs = true;
TemplateArgsPtr.release();
}
-
+
// C++ [temp.explicit]p1:
- // A [...] function [...] can be explicitly instantiated from its template.
- // A member function [...] of a class template can be explicitly
- // instantiated from the member definition associated with its class
+ // A [...] function [...] can be explicitly instantiated from its template.
+ // A member function [...] of a class template can be explicitly
+ // instantiated from the member definition associated with its class
// template.
UnresolvedSet<8> Matches;
for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
@@ -5121,7 +5768,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
}
}
}
-
+
FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Prev);
if (!FunTmpl)
continue;
@@ -5129,21 +5776,21 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
TemplateDeductionInfo Info(Context, D.getIdentifierLoc());
FunctionDecl *Specialization = 0;
if (TemplateDeductionResult TDK
- = DeduceTemplateArguments(FunTmpl,
+ = DeduceTemplateArguments(FunTmpl,
(HasExplicitTemplateArgs ? &TemplateArgs : 0),
R, Specialization, Info)) {
// FIXME: Keep track of almost-matches?
(void)TDK;
continue;
}
-
+
Matches.addDecl(Specialization, P.getAccess());
}
-
+
// Find the most specialized function template specialization.
UnresolvedSetIterator Result
- = getMostSpecialized(Matches.begin(), Matches.end(), TPOC_Other,
- D.getIdentifierLoc(),
+ = getMostSpecialized(Matches.begin(), Matches.end(), TPOC_Other, 0,
+ D.getIdentifierLoc(),
PDiag(diag::err_explicit_instantiation_not_known) << Name,
PDiag(diag::err_explicit_instantiation_ambiguous) << Name,
PDiag(diag::note_explicit_instantiation_candidate));
@@ -5153,17 +5800,17 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// Ignore access control bits, we don't need them for redeclaration checking.
FunctionDecl *Specialization = cast<FunctionDecl>(*Result);
-
+
if (Specialization->getTemplateSpecializationKind() == TSK_Undeclared) {
- Diag(D.getIdentifierLoc(),
+ Diag(D.getIdentifierLoc(),
diag::err_explicit_instantiation_member_function_not_instantiated)
<< Specialization
<< (Specialization->getTemplateSpecializationKind() ==
TSK_ExplicitSpecialization);
Diag(Specialization->getLocation(), diag::note_explicit_instantiation_here);
return true;
- }
-
+ }
+
FunctionDecl *PrevDecl = Specialization->getPreviousDeclaration();
if (!PrevDecl && Specialization->isThisDeclarationADefinition())
PrevDecl = Specialization;
@@ -5171,12 +5818,12 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
if (PrevDecl) {
bool HasNoEffect = false;
if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK,
- PrevDecl,
- PrevDecl->getTemplateSpecializationKind(),
+ PrevDecl,
+ PrevDecl->getTemplateSpecializationKind(),
PrevDecl->getPointOfInstantiation(),
HasNoEffect))
return true;
-
+
// FIXME: We may still want to build some representation of this
// explicit specialization.
if (HasNoEffect)
@@ -5184,12 +5831,12 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
}
Specialization->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
-
+
if (TSK == TSK_ExplicitInstantiationDefinition)
InstantiateFunctionDefinition(D.getIdentifierLoc(), Specialization);
-
+
// C++0x [temp.explicit]p2:
- // If the explicit instantiation is for a member function, a member class
+ // If the explicit instantiation is for a member function, a member class
// or a static data member of a class template specialization, the name of
// the class template specialization in the qualified-id for the member
// name shall be a simple-template-id.
@@ -5197,18 +5844,18 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// C++98 has the same restriction, just worded differently.
FunctionTemplateDecl *FunTmpl = Specialization->getPrimaryTemplate();
if (D.getName().getKind() != UnqualifiedId::IK_TemplateId && !FunTmpl &&
- D.getCXXScopeSpec().isSet() &&
+ D.getCXXScopeSpec().isSet() &&
!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()))
- Diag(D.getIdentifierLoc(),
+ Diag(D.getIdentifierLoc(),
diag::ext_explicit_instantiation_without_qualified_id)
<< Specialization << D.getCXXScopeSpec().getRange();
-
+
CheckExplicitInstantiationScope(*this,
- FunTmpl? (NamedDecl *)FunTmpl
+ FunTmpl? (NamedDecl *)FunTmpl
: Specialization->getInstantiatedFromMemberFunction(),
- D.getIdentifierLoc(),
+ D.getIdentifierLoc(),
D.getCXXScopeSpec().isSet());
-
+
// FIXME: Create some kind of ExplicitInstantiationDecl here.
return (Decl*) 0;
}
@@ -5238,8 +5885,8 @@ Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
}
TypeResult
-Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
- const CXXScopeSpec &SS, const IdentifierInfo &II,
+Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc) {
NestedNameSpecifier *NNS
= static_cast<NestedNameSpecifier *>(SS.getScopeRep());
@@ -5249,8 +5896,8 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
if (TypenameLoc.isValid() && S && !S->getTemplateParamParent() &&
!getLangOptions().CPlusPlus0x)
Diag(TypenameLoc, diag::ext_typename_outside_of_template)
- << FixItHint::CreateRemoval(TypenameLoc);
-
+ << FixItHint::CreateRemoval(TypenameLoc);
+
QualType T = CheckTypenameType(ETK_Typename, NNS, II,
TypenameLoc, SS.getRange(), IdLoc);
if (T.isNull())
@@ -5268,23 +5915,23 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
TL.setQualifierRange(SS.getRange());
cast<TypeSpecTypeLoc>(TL.getNamedTypeLoc()).setNameLoc(IdLoc);
}
-
+
return CreateParsedType(T, TSI);
}
TypeResult
-Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
- const CXXScopeSpec &SS, SourceLocation TemplateLoc,
+Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, SourceLocation TemplateLoc,
ParsedType Ty) {
if (TypenameLoc.isValid() && S && !S->getTemplateParamParent() &&
!getLangOptions().CPlusPlus0x)
Diag(TypenameLoc, diag::ext_typename_outside_of_template)
- << FixItHint::CreateRemoval(TypenameLoc);
-
+ << FixItHint::CreateRemoval(TypenameLoc);
+
TypeSourceInfo *InnerTSI = 0;
QualType T = GetTypeFromParser(Ty, &InnerTSI);
- assert(isa<TemplateSpecializationType>(T) &&
+ assert(isa<TemplateSpecializationType>(T) &&
"Expected a template specialization type");
if (computeDeclContext(SS, false)) {
@@ -5297,7 +5944,8 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
if (InnerTSI)
Builder.pushFullCopy(InnerTSI->getTypeLoc());
else
- Builder.push<TemplateSpecializationTypeLoc>(T).initialize(TemplateLoc);
+ Builder.push<TemplateSpecializationTypeLoc>(T).initialize(Context,
+ TemplateLoc);
/* Note: NNS already embedded in template specialization type T. */
T = Context.getElaboratedType(ETK_Typename, /*NNS=*/0, T);
@@ -5311,7 +5959,7 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
// TODO: it's really silly that we make a template specialization
// type earlier only to drop it again here.
- TemplateSpecializationType *TST = cast<TemplateSpecializationType>(T);
+ const TemplateSpecializationType *TST = cast<TemplateSpecializationType>(T);
DependentTemplateName *DTN =
TST->getTemplateName().getAsDependentTemplateName();
assert(DTN && "dependent template has non-dependent name?");
@@ -5333,7 +5981,8 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
for (unsigned I = 0, E = TST->getNumArgs(); I != E; ++I)
TL.setArgLocInfo(I, TSTL.getArgLocInfo(I));
} else {
- TL.initializeLocal(SourceLocation());
+ // FIXME: Poor source-location information here.
+ TL.initializeLocal(Context, TemplateLoc);
}
TL.setKeywordLoc(TypenameLoc);
TL.setQualifierRange(SS.getRange());
@@ -5377,13 +6026,30 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
case LookupResult::NotFound:
DiagID = diag::err_typename_nested_not_found;
break;
-
+
+ case LookupResult::FoundUnresolvedValue: {
+ // We found a using declaration that is a value. Most likely, the using
+ // declaration itself is meant to have the 'typename' keyword.
+ SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : NNSRange.getBegin(),
+ IILoc);
+ Diag(IILoc, diag::err_typename_refers_to_using_value_decl)
+ << Name << Ctx << FullRange;
+ if (UnresolvedUsingValueDecl *Using
+ = dyn_cast<UnresolvedUsingValueDecl>(Result.getRepresentativeDecl())){
+ SourceLocation Loc = Using->getTargetNestedNameRange().getBegin();
+ Diag(Loc, diag::note_using_value_decl_missing_typename)
+ << FixItHint::CreateInsertion(Loc, "typename ");
+ }
+ }
+ // Fall through to create a dependent typename type, from which we can recover
+ // better.
+
case LookupResult::NotFoundInCurrentInstantiation:
// Okay, it's a member of an unknown instantiation.
return Context.getDependentNameType(Keyword, NNS, &II);
case LookupResult::Found:
- if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
// We found a type. Build an ElaboratedType, since the
// typename-specifier was just sugar.
return Context.getElaboratedType(ETK_Typename, NNS,
@@ -5394,7 +6060,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
Referenced = Result.getFoundDecl();
break;
- case LookupResult::FoundUnresolvedValue:
+
llvm_unreachable("unresolved using decl in non-dependent context");
return QualType();
@@ -5427,7 +6093,7 @@ namespace {
public:
typedef TreeTransform<CurrentInstantiationRebuilder> inherited;
-
+
CurrentInstantiationRebuilder(Sema &SemaRef,
SourceLocation Loc,
DeclarationName Entity)
@@ -5507,7 +6173,7 @@ bool Sema::RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS) {
NestedNameSpecifier *NNS = static_cast<NestedNameSpecifier*>(SS.getScopeRep());
CurrentInstantiationRebuilder Rebuilder(*this, SS.getRange().getBegin(),
DeclarationName());
- NestedNameSpecifier *Rebuilt =
+ NestedNameSpecifier *Rebuilt =
Rebuilder.TransformNestedNameSpecifier(NNS, SS.getRange());
if (!Rebuilt) return true;
@@ -5520,99 +6186,38 @@ bool Sema::RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS) {
std::string
Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args) {
- // FIXME: For variadic templates, we'll need to get the structured list.
- return getTemplateArgumentBindingsText(Params, Args.getFlatArgumentList(),
- Args.flat_size());
+ return getTemplateArgumentBindingsText(Params, Args.data(), Args.size());
}
std::string
Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs) {
- std::string Result;
+ llvm::SmallString<128> Str;
+ llvm::raw_svector_ostream Out(Str);
if (!Params || Params->size() == 0 || NumArgs == 0)
- return Result;
-
+ return std::string();
+
for (unsigned I = 0, N = Params->size(); I != N; ++I) {
if (I >= NumArgs)
break;
-
+
if (I == 0)
- Result += "[with ";
+ Out << "[with ";
else
- Result += ", ";
-
+ Out << ", ";
+
if (const IdentifierInfo *Id = Params->getParam(I)->getIdentifier()) {
- Result += Id->getName();
+ Out << Id->getName();
} else {
- Result += '$';
- Result += llvm::utostr(I);
- }
-
- Result += " = ";
-
- switch (Args[I].getKind()) {
- case TemplateArgument::Null:
- Result += "<no value>";
- break;
-
- case TemplateArgument::Type: {
- std::string TypeStr;
- Args[I].getAsType().getAsStringInternal(TypeStr,
- Context.PrintingPolicy);
- Result += TypeStr;
- break;
- }
-
- case TemplateArgument::Declaration: {
- bool Unnamed = true;
- if (NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Args[I].getAsDecl())) {
- if (ND->getDeclName()) {
- Unnamed = false;
- Result += ND->getNameAsString();
- }
- }
-
- if (Unnamed) {
- Result += "<anonymous>";
- }
- break;
- }
-
- case TemplateArgument::Template: {
- std::string Str;
- llvm::raw_string_ostream OS(Str);
- Args[I].getAsTemplate().print(OS, Context.PrintingPolicy);
- Result += OS.str();
- break;
- }
-
- case TemplateArgument::Integral: {
- Result += Args[I].getAsIntegral()->toString(10);
- break;
- }
-
- case TemplateArgument::Expression: {
- // FIXME: This is non-optimal, since we're regurgitating the
- // expression we were given.
- std::string Str;
- {
- llvm::raw_string_ostream OS(Str);
- Args[I].getAsExpr()->printPretty(OS, Context, 0,
- Context.PrintingPolicy);
- }
- Result += Str;
- break;
- }
-
- case TemplateArgument::Pack:
- // FIXME: Format template argument packs
- Result += "<template argument pack>";
- break;
+ Out << '$' << I;
}
+
+ Out << " = ";
+ Args[I].print(Context.PrintingPolicy, Out);
}
-
- Result += ']';
- return Result;
+
+ Out << ']';
+ return Out.str();
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
index 5c77ed6..bd0a618 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -12,6 +12,7 @@
#include "clang/Sema/Sema.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/SemaDiagnostic.h" // FIXME: temporary!
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/AST/ASTContext.h"
@@ -20,6 +21,8 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/BitVector.h"
+#include "TreeTransform.h"
#include <algorithm>
namespace clang {
@@ -47,7 +50,10 @@ namespace clang {
/// \brief Allow non-dependent types to differ, e.g., when performing
/// template argument deduction from a function call where conversions
/// may apply.
- TDF_SkipNonDependent = 0x08
+ TDF_SkipNonDependent = 0x08,
+ /// \brief Whether we are performing template argument deduction for
+ /// parameters and arguments in a top-level template argument
+ TDF_TopLevelParameterTypeList = 0x10
};
}
@@ -57,9 +63,9 @@ using namespace clang;
/// necessary to compare their values regardless of underlying type.
static bool hasSameExtendedValue(llvm::APSInt X, llvm::APSInt Y) {
if (Y.getBitWidth() > X.getBitWidth())
- X.extend(Y.getBitWidth());
+ X = X.extend(Y.getBitWidth());
else if (Y.getBitWidth() < X.getBitWidth())
- Y.extend(X.getBitWidth());
+ Y = Y.extend(X.getBitWidth());
// If there is a signedness mismatch, correct it.
if (X.isSigned() != Y.isSigned()) {
@@ -78,9 +84,54 @@ static Sema::TemplateDeductionResult
DeduceTemplateArguments(Sema &S,
TemplateParameterList *TemplateParams,
const TemplateArgument &Param,
- const TemplateArgument &Arg,
+ TemplateArgument Arg,
TemplateDeductionInfo &Info,
- llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced);
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced);
+
+/// \brief Whether template argument deduction for two reference parameters
+/// resulted in the argument type, parameter type, or neither type being more
+/// qualified than the other.
+enum DeductionQualifierComparison {
+ NeitherMoreQualified = 0,
+ ParamMoreQualified,
+ ArgMoreQualified
+};
+
+/// \brief Stores the result of comparing two reference parameters while
+/// performing template argument deduction for partial ordering of function
+/// templates.
+struct RefParamPartialOrderingComparison {
+ /// \brief Whether the parameter type is an rvalue reference type.
+ bool ParamIsRvalueRef;
+ /// \brief Whether the argument type is an rvalue reference type.
+ bool ArgIsRvalueRef;
+
+ /// \brief Whether the parameter or argument (or neither) is more qualified.
+ DeductionQualifierComparison Qualifiers;
+};
+
+
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType Param,
+ QualType Arg,
+ TemplateDeductionInfo &Info,
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF,
+ bool PartialOrdering = false,
+ llvm::SmallVectorImpl<RefParamPartialOrderingComparison> *
+ RefParamComparisons = 0);
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgument *Params, unsigned NumParams,
+ const TemplateArgument *Args, unsigned NumArgs,
+ TemplateDeductionInfo &Info,
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ bool NumberOfArgumentsMustMatch = true);
/// \brief If the given expression is of a form that permits the deduction
/// of a non-type template parameter, return the declaration of that
@@ -95,6 +146,141 @@ static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) {
return 0;
}
+/// \brief Determine whether two declaration pointers refer to the same
+/// declaration.
+static bool isSameDeclaration(Decl *X, Decl *Y) {
+ if (!X || !Y)
+ return !X && !Y;
+
+ if (NamedDecl *NX = dyn_cast<NamedDecl>(X))
+ X = NX->getUnderlyingDecl();
+ if (NamedDecl *NY = dyn_cast<NamedDecl>(Y))
+ Y = NY->getUnderlyingDecl();
+
+ return X->getCanonicalDecl() == Y->getCanonicalDecl();
+}
+
+/// \brief Verify that the given, deduced template arguments are compatible.
+///
+/// \returns The deduced template argument, or a NULL template argument if
+/// the deduced template arguments were incompatible.
+static DeducedTemplateArgument
+checkDeducedTemplateArguments(ASTContext &Context,
+ const DeducedTemplateArgument &X,
+ const DeducedTemplateArgument &Y) {
+ // We have no deduction for one or both of the arguments; they're compatible.
+ if (X.isNull())
+ return Y;
+ if (Y.isNull())
+ return X;
+
+ switch (X.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Non-deduced template arguments handled above");
+
+ case TemplateArgument::Type:
+ // If two template type arguments have the same type, they're compatible.
+ if (Y.getKind() == TemplateArgument::Type &&
+ Context.hasSameType(X.getAsType(), Y.getAsType()))
+ return X;
+
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Integral:
+ // If we deduced a constant in one case and either a dependent expression or
+ // declaration in another case, keep the integral constant.
+ // If both are integral constants with the same value, keep that value.
+ if (Y.getKind() == TemplateArgument::Expression ||
+ Y.getKind() == TemplateArgument::Declaration ||
+ (Y.getKind() == TemplateArgument::Integral &&
+ hasSameExtendedValue(*X.getAsIntegral(), *Y.getAsIntegral())))
+ return DeducedTemplateArgument(X,
+ X.wasDeducedFromArrayBound() &&
+ Y.wasDeducedFromArrayBound());
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Template:
+ if (Y.getKind() == TemplateArgument::Template &&
+ Context.hasSameTemplateName(X.getAsTemplate(), Y.getAsTemplate()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::TemplateExpansion:
+ if (Y.getKind() == TemplateArgument::TemplateExpansion &&
+ Context.hasSameTemplateName(X.getAsTemplateOrTemplatePattern(),
+ Y.getAsTemplateOrTemplatePattern()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Expression:
+ // If we deduced a dependent expression in one case and either an integral
+ // constant or a declaration in another case, keep the integral constant
+ // or declaration.
+ if (Y.getKind() == TemplateArgument::Integral ||
+ Y.getKind() == TemplateArgument::Declaration)
+ return DeducedTemplateArgument(Y, X.wasDeducedFromArrayBound() &&
+ Y.wasDeducedFromArrayBound());
+
+ if (Y.getKind() == TemplateArgument::Expression) {
+ // Compare the expressions for equality
+ llvm::FoldingSetNodeID ID1, ID2;
+ X.getAsExpr()->Profile(ID1, Context, true);
+ Y.getAsExpr()->Profile(ID2, Context, true);
+ if (ID1 == ID2)
+ return X;
+ }
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Declaration:
+ // If we deduced a declaration and a dependent expression, keep the
+ // declaration.
+ if (Y.getKind() == TemplateArgument::Expression)
+ return X;
+
+ // If we deduced a declaration and an integral constant, keep the
+ // integral constant.
+ if (Y.getKind() == TemplateArgument::Integral)
+ return Y;
+
+ // If we deduced two declarations, make sure they they refer to the
+ // same declaration.
+ if (Y.getKind() == TemplateArgument::Declaration &&
+ isSameDeclaration(X.getAsDecl(), Y.getAsDecl()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Pack:
+ if (Y.getKind() != TemplateArgument::Pack ||
+ X.pack_size() != Y.pack_size())
+ return DeducedTemplateArgument();
+
+ for (TemplateArgument::pack_iterator XA = X.pack_begin(),
+ XAEnd = X.pack_end(),
+ YA = Y.pack_begin();
+ XA != XAEnd; ++XA, ++YA) {
+ if (checkDeducedTemplateArguments(Context,
+ DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
+ DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()))
+ .isNull())
+ return DeducedTemplateArgument();
+ }
+
+ return X;
+ }
+
+ return DeducedTemplateArgument();
+}
+
/// \brief Deduce the value of the given non-type template parameter
/// from the given constant.
static Sema::TemplateDeductionResult
@@ -107,31 +293,18 @@ DeduceNonTypeTemplateArgument(Sema &S,
assert(NTTP->getDepth() == 0 &&
"Cannot deduce non-type template argument with depth > 0");
- if (Deduced[NTTP->getIndex()].isNull()) {
- Deduced[NTTP->getIndex()] = DeducedTemplateArgument(Value, ValueType,
- DeducedFromArrayBound);
- return Sema::TDK_Success;
- }
-
- if (Deduced[NTTP->getIndex()].getKind() != TemplateArgument::Integral) {
+ DeducedTemplateArgument NewDeduced(Value, ValueType, DeducedFromArrayBound);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[NTTP->getIndex()],
+ NewDeduced);
+ if (Result.isNull()) {
Info.Param = NTTP;
Info.FirstArg = Deduced[NTTP->getIndex()];
- Info.SecondArg = TemplateArgument(Value, ValueType);
- return Sema::TDK_Inconsistent;
- }
-
- // Extent the smaller of the two values.
- llvm::APSInt PrevValue = *Deduced[NTTP->getIndex()].getAsIntegral();
- if (!hasSameExtendedValue(PrevValue, Value)) {
- Info.Param = NTTP;
- Info.FirstArg = Deduced[NTTP->getIndex()];
- Info.SecondArg = TemplateArgument(Value, ValueType);
+ Info.SecondArg = NewDeduced;
return Sema::TDK_Inconsistent;
}
- if (!DeducedFromArrayBound)
- Deduced[NTTP->getIndex()].setDeducedFromArrayBound(false);
-
+ Deduced[NTTP->getIndex()] = Result;
return Sema::TDK_Success;
}
@@ -150,30 +323,19 @@ DeduceNonTypeTemplateArgument(Sema &S,
assert((Value->isTypeDependent() || Value->isValueDependent()) &&
"Expression template argument must be type- or value-dependent.");
- if (Deduced[NTTP->getIndex()].isNull()) {
- Deduced[NTTP->getIndex()] = TemplateArgument(Value->Retain());
- return Sema::TDK_Success;
- }
-
- if (Deduced[NTTP->getIndex()].getKind() == TemplateArgument::Integral) {
- // Okay, we deduced a constant in one case and a dependent expression
- // in another case. FIXME: Later, we will check that instantiating the
- // dependent expression gives us the constant value.
- return Sema::TDK_Success;
- }
+ DeducedTemplateArgument NewDeduced(Value);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[NTTP->getIndex()],
+ NewDeduced);
- if (Deduced[NTTP->getIndex()].getKind() == TemplateArgument::Expression) {
- // Compare the expressions for equality
- llvm::FoldingSetNodeID ID1, ID2;
- Deduced[NTTP->getIndex()].getAsExpr()->Profile(ID1, S.Context, true);
- Value->Profile(ID2, S.Context, true);
- if (ID1 == ID2)
- return Sema::TDK_Success;
-
- // FIXME: Fill in argument mismatch information
- return Sema::TDK_NonDeducedMismatch;
+ if (Result.isNull()) {
+ Info.Param = NTTP;
+ Info.FirstArg = Deduced[NTTP->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
}
+ Deduced[NTTP->getIndex()] = Result;
return Sema::TDK_Success;
}
@@ -189,28 +351,19 @@ DeduceNonTypeTemplateArgument(Sema &S,
llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
assert(NTTP->getDepth() == 0 &&
"Cannot deduce non-type template argument with depth > 0");
-
- if (Deduced[NTTP->getIndex()].isNull()) {
- Deduced[NTTP->getIndex()] = TemplateArgument(D->getCanonicalDecl());
- return Sema::TDK_Success;
- }
-
- if (Deduced[NTTP->getIndex()].getKind() == TemplateArgument::Expression) {
- // Okay, we deduced a declaration in one case and a dependent expression
- // in another case.
- return Sema::TDK_Success;
- }
-
- if (Deduced[NTTP->getIndex()].getKind() == TemplateArgument::Declaration) {
- // Compare the declarations for equality
- if (Deduced[NTTP->getIndex()].getAsDecl()->getCanonicalDecl() ==
- D->getCanonicalDecl())
- return Sema::TDK_Success;
-
- // FIXME: Fill in argument mismatch information
- return Sema::TDK_NonDeducedMismatch;
+
+ DeducedTemplateArgument NewDeduced(D? D->getCanonicalDecl() : 0);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[NTTP->getIndex()],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = NTTP;
+ Info.FirstArg = Deduced[NTTP->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
}
-
+
+ Deduced[NTTP->getIndex()] = Result;
return Sema::TDK_Success;
}
@@ -227,33 +380,28 @@ DeduceTemplateArguments(Sema &S,
// so there is nothing that we can deduce.
return Sema::TDK_Success;
}
-
+
if (TemplateTemplateParmDecl *TempParam
= dyn_cast<TemplateTemplateParmDecl>(ParamDecl)) {
- // Bind the template template parameter to the given template name.
- TemplateArgument &ExistingArg = Deduced[TempParam->getIndex()];
- if (ExistingArg.isNull()) {
- // This is the first deduction for this template template parameter.
- ExistingArg = TemplateArgument(S.Context.getCanonicalTemplateName(Arg));
- return Sema::TDK_Success;
+ DeducedTemplateArgument NewDeduced(S.Context.getCanonicalTemplateName(Arg));
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[TempParam->getIndex()],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = TempParam;
+ Info.FirstArg = Deduced[TempParam->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
}
-
- // Verify that the previous binding matches this deduction.
- assert(ExistingArg.getKind() == TemplateArgument::Template);
- if (S.Context.hasSameTemplateName(ExistingArg.getAsTemplate(), Arg))
- return Sema::TDK_Success;
-
- // Inconsistent deduction.
- Info.Param = TempParam;
- Info.FirstArg = ExistingArg;
- Info.SecondArg = TemplateArgument(Arg);
- return Sema::TDK_Inconsistent;
+
+ Deduced[TempParam->getIndex()] = Result;
+ return Sema::TDK_Success;
}
-
+
// Verify that the two template names are equivalent.
if (S.Context.hasSameTemplateName(Param, Arg))
return Sema::TDK_Success;
-
+
// Mismatch of non-dependent template parameter to argument.
Info.FirstArg = TemplateArgument(Param);
Info.SecondArg = TemplateArgument(Arg);
@@ -300,17 +448,13 @@ DeduceTemplateArguments(Sema &S,
// Perform template argument deduction on each template
- // argument.
- unsigned NumArgs = std::min(SpecArg->getNumArgs(), Param->getNumArgs());
- for (unsigned I = 0; I != NumArgs; ++I)
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- Param->getArg(I),
- SpecArg->getArg(I),
- Info, Deduced))
- return Result;
-
- return Sema::TDK_Success;
+ // argument. Ignore any missing/extra arguments, since they could be
+ // filled in by default arguments.
+ return DeduceTemplateArguments(S, TemplateParams,
+ Param->getArgs(), Param->getNumArgs(),
+ SpecArg->getArgs(), SpecArg->getNumArgs(),
+ Info, Deduced,
+ /*NumberOfArgumentsMustMatch=*/false);
}
// If the argument type is a class template specialization, we
@@ -334,20 +478,12 @@ DeduceTemplateArguments(Sema &S,
Info, Deduced))
return Result;
- unsigned NumArgs = Param->getNumArgs();
- const TemplateArgumentList &ArgArgs = SpecArg->getTemplateArgs();
- if (NumArgs != ArgArgs.size())
- return Sema::TDK_NonDeducedMismatch;
-
- for (unsigned I = 0; I != NumArgs; ++I)
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- Param->getArg(I),
- ArgArgs.get(I),
- Info, Deduced))
- return Result;
-
- return Sema::TDK_Success;
+ // Perform template argument deduction for the template arguments.
+ return DeduceTemplateArguments(S, TemplateParams,
+ Param->getArgs(), Param->getNumArgs(),
+ SpecArg->getTemplateArgs().data(),
+ SpecArg->getTemplateArgs().size(),
+ Info, Deduced);
}
/// \brief Determines whether the given type is an opaque type that
@@ -359,6 +495,7 @@ static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
case Type::DependentName:
case Type::Decltype:
case Type::UnresolvedUsing:
+ case Type::TemplateTypeParm:
return true;
case Type::ConstantArray:
@@ -373,6 +510,296 @@ static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
}
}
+/// \brief Retrieve the depth and index of a template parameter.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(NamedDecl *ND) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
+ return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+}
+
+/// \brief Retrieve the depth and index of an unexpanded parameter pack.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(UnexpandedParameterPack UPP) {
+ if (const TemplateTypeParmType *TTP
+ = UPP.first.dyn_cast<const TemplateTypeParmType *>())
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ return getDepthAndIndex(UPP.first.get<NamedDecl *>());
+}
+
+/// \brief Helper function to build a TemplateParameter when we don't
+/// know its type statically.
+static TemplateParameter makeTemplateParameter(Decl *D) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(D))
+ return TemplateParameter(TTP);
+ else if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D))
+ return TemplateParameter(NTTP);
+
+ return TemplateParameter(cast<TemplateTemplateParmDecl>(D));
+}
+
+/// \brief Prepare to perform template argument deduction for all of the
+/// arguments in a set of argument packs.
+static void PrepareArgumentPackDeduction(Sema &S,
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ const llvm::SmallVectorImpl<unsigned> &PackIndices,
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &SavedPacks,
+ llvm::SmallVectorImpl<
+ llvm::SmallVector<DeducedTemplateArgument, 4> > &NewlyDeducedPacks) {
+ // Save the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, then clear out the deduction.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ // Save the previously-deduced argument pack, then clear it out so that we
+ // can deduce a new argument pack.
+ SavedPacks[I] = Deduced[PackIndices[I]];
+ Deduced[PackIndices[I]] = TemplateArgument();
+
+ // If the template arugment pack was explicitly specified, add that to
+ // the set of deduced arguments.
+ const TemplateArgument *ExplicitArgs;
+ unsigned NumExplicitArgs;
+ if (NamedDecl *PartiallySubstitutedPack
+ = S.CurrentInstantiationScope->getPartiallySubstitutedPack(
+ &ExplicitArgs,
+ &NumExplicitArgs)) {
+ if (getDepthAndIndex(PartiallySubstitutedPack).second == PackIndices[I])
+ NewlyDeducedPacks[I].append(ExplicitArgs,
+ ExplicitArgs + NumExplicitArgs);
+ }
+ }
+}
+
+/// \brief Finish template argument deduction for a set of argument packs,
+/// producing the argument packs and checking for consistency with prior
+/// deductions.
+static Sema::TemplateDeductionResult
+FinishArgumentPackDeduction(Sema &S,
+ TemplateParameterList *TemplateParams,
+ bool HasAnyArguments,
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ const llvm::SmallVectorImpl<unsigned> &PackIndices,
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &SavedPacks,
+ llvm::SmallVectorImpl<
+ llvm::SmallVector<DeducedTemplateArgument, 4> > &NewlyDeducedPacks,
+ TemplateDeductionInfo &Info) {
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ if (HasAnyArguments && NewlyDeducedPacks[I].empty()) {
+ // We were not able to deduce anything for this parameter pack,
+ // so just restore the saved argument pack.
+ Deduced[PackIndices[I]] = SavedPacks[I];
+ continue;
+ }
+
+ DeducedTemplateArgument NewPack;
+
+ if (NewlyDeducedPacks[I].empty()) {
+ // If we deduced an empty argument pack, create it now.
+ NewPack = DeducedTemplateArgument(TemplateArgument(0, 0));
+ } else {
+ TemplateArgument *ArgumentPack
+ = new (S.Context) TemplateArgument [NewlyDeducedPacks[I].size()];
+ std::copy(NewlyDeducedPacks[I].begin(), NewlyDeducedPacks[I].end(),
+ ArgumentPack);
+ NewPack
+ = DeducedTemplateArgument(TemplateArgument(ArgumentPack,
+ NewlyDeducedPacks[I].size()),
+ NewlyDeducedPacks[I][0].wasDeducedFromArrayBound());
+ }
+
+ DeducedTemplateArgument Result
+ = checkDeducedTemplateArguments(S.Context, SavedPacks[I], NewPack);
+ if (Result.isNull()) {
+ Info.Param
+ = makeTemplateParameter(TemplateParams->getParam(PackIndices[I]));
+ Info.FirstArg = SavedPacks[I];
+ Info.SecondArg = NewPack;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[PackIndices[I]] = Result;
+ }
+
+ return Sema::TDK_Success;
+}
+
+/// \brief Deduce the template arguments by comparing the list of parameter
+/// types to the list of argument types, as in the parameter-type-lists of
+/// function types (C++ [temp.deduct.type]p10).
+///
+/// \param S The semantic analysis object within which we are deducing
+///
+/// \param TemplateParams The template parameters that we are deducing
+///
+/// \param Params The list of parameter types
+///
+/// \param NumParams The number of types in \c Params
+///
+/// \param Args The list of argument types
+///
+/// \param NumArgs The number of types in \c Args
+///
+/// \param Info information about the template argument deduction itself
+///
+/// \param Deduced the deduced template arguments
+///
+/// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe
+/// how template argument deduction is performed.
+///
+/// \param PartialOrdering If true, we are performing template argument
+/// deduction for during partial ordering for a call
+/// (C++0x [temp.deduct.partial]).
+///
+/// \param RefParamComparisons If we're performing template argument deduction
+/// in the context of partial ordering, the set of qualifier comparisons.
+///
+/// \returns the result of template argument deduction so far. Note that a
+/// "success" result means that template argument deduction has not yet failed,
+/// but it may still fail, later, for other reasons.
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const QualType *Params, unsigned NumParams,
+ const QualType *Args, unsigned NumArgs,
+ TemplateDeductionInfo &Info,
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF,
+ bool PartialOrdering = false,
+ llvm::SmallVectorImpl<RefParamPartialOrderingComparison> *
+ RefParamComparisons = 0) {
+ // Fast-path check to see if we have too many/too few arguments.
+ if (NumParams != NumArgs &&
+ !(NumParams && isa<PackExpansionType>(Params[NumParams - 1])) &&
+ !(NumArgs && isa<PackExpansionType>(Args[NumArgs - 1])))
+ return Sema::TDK_NonDeducedMismatch;
+
+ // C++0x [temp.deduct.type]p10:
+ // Similarly, if P has a form that contains (T), then each parameter type
+ // Pi of the respective parameter-type- list of P is compared with the
+ // corresponding parameter type Ai of the corresponding parameter-type-list
+ // of A. [...]
+ unsigned ArgIdx = 0, ParamIdx = 0;
+ for (; ParamIdx != NumParams; ++ParamIdx) {
+ // Check argument types.
+ const PackExpansionType *Expansion
+ = dyn_cast<PackExpansionType>(Params[ParamIdx]);
+ if (!Expansion) {
+ // Simple case: compare the parameter and argument types at this point.
+
+ // Make sure we have an argument.
+ if (ArgIdx >= NumArgs)
+ return Sema::TDK_NonDeducedMismatch;
+
+ if (isa<PackExpansionType>(Args[ArgIdx])) {
+ // C++0x [temp.deduct.type]p22:
+ // If the original function parameter associated with A is a function
+ // parameter pack and the function parameter associated with P is not
+ // a function parameter pack, then template argument deduction fails.
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams,
+ Params[ParamIdx],
+ Args[ArgIdx],
+ Info, Deduced, TDF,
+ PartialOrdering,
+ RefParamComparisons))
+ return Result;
+
+ ++ArgIdx;
+ continue;
+ }
+
+ // C++0x [temp.deduct.type]p5:
+ // The non-deduced contexts are:
+ // - A function parameter pack that does not occur at the end of the
+ // parameter-declaration-clause.
+ if (ParamIdx + 1 < NumParams)
+ return Sema::TDK_Success;
+
+ // C++0x [temp.deduct.type]p10:
+ // If the parameter-declaration corresponding to Pi is a function
+ // parameter pack, then the type of its declarator- id is compared with
+ // each remaining parameter type in the parameter-type-list of A. Each
+ // comparison deduces template arguments for subsequent positions in the
+ // template parameter packs expanded by the function parameter pack.
+
+ // Compute the set of template parameter indices that correspond to
+ // parameter packs expanded by the pack expansion.
+ llvm::SmallVector<unsigned, 2> PackIndices;
+ QualType Pattern = Expansion->getPattern();
+ {
+ llvm::BitVector SawIndices(TemplateParams->size());
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (Depth == 0 && !SawIndices[Index]) {
+ SawIndices[Index] = true;
+ PackIndices.push_back(Index);
+ }
+ }
+ }
+ assert(!PackIndices.empty() && "Pack expansion without unexpanded packs?");
+
+ // Keep track of the deduced template arguments for each parameter pack
+ // expanded by this pack expansion (the outer index) and for each
+ // template argument (the inner SmallVectors).
+ llvm::SmallVector<llvm::SmallVector<DeducedTemplateArgument, 4>, 2>
+ NewlyDeducedPacks(PackIndices.size());
+ llvm::SmallVector<DeducedTemplateArgument, 2>
+ SavedPacks(PackIndices.size());
+ PrepareArgumentPackDeduction(S, Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks);
+
+ bool HasAnyArguments = false;
+ for (; ArgIdx < NumArgs; ++ArgIdx) {
+ HasAnyArguments = true;
+
+ // Deduce template arguments from the pattern.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx],
+ Info, Deduced, TDF, PartialOrdering,
+ RefParamComparisons))
+ return Result;
+
+ // Capture the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, add them to the list of arguments we've deduced
+ // for that pack, then clear out the deduced argument.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ DeducedTemplateArgument &DeducedArg = Deduced[PackIndices[I]];
+ if (!DeducedArg.isNull()) {
+ NewlyDeducedPacks[I].push_back(DeducedArg);
+ DeducedArg = DeducedTemplateArgument();
+ }
+ }
+ }
+
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ if (Sema::TemplateDeductionResult Result
+ = FinishArgumentPackDeduction(S, TemplateParams, HasAnyArguments,
+ Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks, Info))
+ return Result;
+ }
+
+ // Make sure we don't have any extra arguments.
+ if (ArgIdx < NumArgs)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return Sema::TDK_Success;
+}
+
/// \brief Deduce the template arguments by comparing the parameter type and
/// the argument type (C++ [temp.deduct.type]).
///
@@ -391,6 +818,12 @@ static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
/// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe
/// how template argument deduction is performed.
///
+/// \param PartialOrdering Whether we're performing template argument deduction
+/// in the context of partial ordering (C++0x [temp.deduct.partial]).
+///
+/// \param RefParamComparisons If we're performing template argument deduction
+/// in the context of partial ordering, the set of qualifier comparisons.
+///
/// \returns the result of template argument deduction so far. Note that a
/// "success" result means that template argument deduction has not yet failed,
/// but it may still fail, later, for other reasons.
@@ -400,31 +833,105 @@ DeduceTemplateArguments(Sema &S,
QualType ParamIn, QualType ArgIn,
TemplateDeductionInfo &Info,
llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- unsigned TDF) {
+ unsigned TDF,
+ bool PartialOrdering,
+ llvm::SmallVectorImpl<RefParamPartialOrderingComparison> *RefParamComparisons) {
// We only want to look at the canonical types, since typedefs and
// sugar are not part of template argument deduction.
QualType Param = S.Context.getCanonicalType(ParamIn);
QualType Arg = S.Context.getCanonicalType(ArgIn);
- // C++0x [temp.deduct.call]p4 bullet 1:
- // - If the original P is a reference type, the deduced A (i.e., the type
- // referred to by the reference) can be more cv-qualified than the
- // transformed A.
- if (TDF & TDF_ParamWithReferenceType) {
- Qualifiers Quals;
- QualType UnqualParam = S.Context.getUnqualifiedArrayType(Param, Quals);
- Quals.setCVRQualifiers(Quals.getCVRQualifiers() &
- Arg.getCVRQualifiersThroughArrayTypes());
- Param = S.Context.getQualifiedType(UnqualParam, Quals);
+ // If the argument type is a pack expansion, look at its pattern.
+ // This isn't explicitly called out
+ if (const PackExpansionType *ArgExpansion
+ = dyn_cast<PackExpansionType>(Arg))
+ Arg = ArgExpansion->getPattern();
+
+ if (PartialOrdering) {
+ // C++0x [temp.deduct.partial]p5:
+ // Before the partial ordering is done, certain transformations are
+ // performed on the types used for partial ordering:
+ // - If P is a reference type, P is replaced by the type referred to.
+ const ReferenceType *ParamRef = Param->getAs<ReferenceType>();
+ if (ParamRef)
+ Param = ParamRef->getPointeeType();
+
+ // - If A is a reference type, A is replaced by the type referred to.
+ const ReferenceType *ArgRef = Arg->getAs<ReferenceType>();
+ if (ArgRef)
+ Arg = ArgRef->getPointeeType();
+
+ if (RefParamComparisons && ParamRef && ArgRef) {
+ // C++0x [temp.deduct.partial]p6:
+ // If both P and A were reference types (before being replaced with the
+ // type referred to above), determine which of the two types (if any) is
+ // more cv-qualified than the other; otherwise the types are considered
+ // to be equally cv-qualified for partial ordering purposes. The result
+ // of this determination will be used below.
+ //
+ // We save this information for later, using it only when deduction
+ // succeeds in both directions.
+ RefParamPartialOrderingComparison Comparison;
+ Comparison.ParamIsRvalueRef = ParamRef->getAs<RValueReferenceType>();
+ Comparison.ArgIsRvalueRef = ArgRef->getAs<RValueReferenceType>();
+ Comparison.Qualifiers = NeitherMoreQualified;
+ if (Param.isMoreQualifiedThan(Arg))
+ Comparison.Qualifiers = ParamMoreQualified;
+ else if (Arg.isMoreQualifiedThan(Param))
+ Comparison.Qualifiers = ArgMoreQualified;
+ RefParamComparisons->push_back(Comparison);
+ }
+
+ // C++0x [temp.deduct.partial]p7:
+ // Remove any top-level cv-qualifiers:
+ // - If P is a cv-qualified type, P is replaced by the cv-unqualified
+ // version of P.
+ Param = Param.getUnqualifiedType();
+ // - If A is a cv-qualified type, A is replaced by the cv-unqualified
+ // version of A.
+ Arg = Arg.getUnqualifiedType();
+ } else {
+ // C++0x [temp.deduct.call]p4 bullet 1:
+ // - If the original P is a reference type, the deduced A (i.e., the type
+ // referred to by the reference) can be more cv-qualified than the
+ // transformed A.
+ if (TDF & TDF_ParamWithReferenceType) {
+ Qualifiers Quals;
+ QualType UnqualParam = S.Context.getUnqualifiedArrayType(Param, Quals);
+ Quals.setCVRQualifiers(Quals.getCVRQualifiers() &
+ Arg.getCVRQualifiers());
+ Param = S.Context.getQualifiedType(UnqualParam, Quals);
+ }
+
+ if ((TDF & TDF_TopLevelParameterTypeList) && !Param->isFunctionType()) {
+ // C++0x [temp.deduct.type]p10:
+ // If P and A are function types that originated from deduction when
+ // taking the address of a function template (14.8.2.2) or when deducing
+ // template arguments from a function declaration (14.8.2.6) and Pi and
+ // Ai are parameters of the top-level parameter-type-list of P and A,
+ // respectively, Pi is adjusted if it is an rvalue reference to a
+ // cv-unqualified template parameter and Ai is an lvalue reference, in
+ // which case the type of Pi is changed to be the template parameter
+ // type (i.e., T&& is changed to simply T). [ Note: As a result, when
+ // Pi is T&& and Ai is X&, the adjusted Pi will be T, causing T to be
+ // deduced as X&. - end note ]
+ TDF &= ~TDF_TopLevelParameterTypeList;
+
+ if (const RValueReferenceType *ParamRef
+ = Param->getAs<RValueReferenceType>()) {
+ if (isa<TemplateTypeParmType>(ParamRef->getPointeeType()) &&
+ !ParamRef->getPointeeType().getQualifiers())
+ if (Arg->isLValueReferenceType())
+ Param = ParamRef->getPointeeType();
+ }
+ }
}
// If the parameter type is not dependent, there is nothing to deduce.
if (!Param->isDependentType()) {
- if (!(TDF & TDF_SkipNonDependent) && Param != Arg) {
-
+ if (!(TDF & TDF_SkipNonDependent) && Param != Arg)
return Sema::TDK_NonDeducedMismatch;
- }
-
+
return Sema::TDK_Success;
}
@@ -464,27 +971,24 @@ DeduceTemplateArguments(Sema &S,
assert(TemplateTypeParm->getDepth() == 0 && "Can't deduce with depth > 0");
assert(Arg != S.Context.OverloadTy && "Unresolved overloaded function");
QualType DeducedType = Arg;
- DeducedType.removeCVRQualifiers(Param.getCVRQualifiers());
+
+ // local manipulation is okay because it's canonical
+ DeducedType.removeLocalCVRQualifiers(Param.getCVRQualifiers());
if (RecanonicalizeArg)
DeducedType = S.Context.getCanonicalType(DeducedType);
- if (Deduced[Index].isNull())
- Deduced[Index] = TemplateArgument(DeducedType);
- else {
- // C++ [temp.deduct.type]p2:
- // [...] If type deduction cannot be done for any P/A pair, or if for
- // any pair the deduction leads to more than one possible set of
- // deduced values, or if different pairs yield different deduced
- // values, or if any template argument remains neither deduced nor
- // explicitly specified, template argument deduction fails.
- if (Deduced[Index].getAsType() != DeducedType) {
- Info.Param
- = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
- Info.FirstArg = Deduced[Index];
- Info.SecondArg = TemplateArgument(Arg);
- return Sema::TDK_Inconsistent;
- }
+ DeducedTemplateArgument NewDeduced(DeducedType);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[Index],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
+ Info.FirstArg = Deduced[Index];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
}
+
+ Deduced[Index] = Result;
return Sema::TDK_Success;
}
@@ -492,6 +996,13 @@ DeduceTemplateArguments(Sema &S,
Info.FirstArg = TemplateArgument(ParamIn);
Info.SecondArg = TemplateArgument(ArgIn);
+ // If the parameter is an already-substituted template parameter
+ // pack, do nothing: we don't know which of its arguments to look
+ // at, so we have to wait until all of the parameter packs in this
+ // expansion have arguments.
+ if (isa<SubstTemplateTypeParmPackType>(Param))
+ return Sema::TDK_Success;
+
// Check the cv-qualifiers on the parameter and argument types.
if (!(TDF & TDF_IgnoreQualifiers)) {
if (TDF & TDF_ParamWithReferenceType) {
@@ -615,16 +1126,17 @@ DeduceTemplateArguments(Sema &S,
if (const ConstantArrayType *ConstantArrayArg
= dyn_cast<ConstantArrayType>(ArrayArg)) {
llvm::APSInt Size(ConstantArrayArg->getSize());
- return DeduceNonTypeTemplateArgument(S, NTTP, Size,
+ return DeduceNonTypeTemplateArgument(S, NTTP, Size,
S.Context.getSizeType(),
/*ArrayBound=*/true,
Info, Deduced);
}
if (const DependentSizedArrayType *DependentArrayArg
= dyn_cast<DependentSizedArrayType>(ArrayArg))
- return DeduceNonTypeTemplateArgument(S, NTTP,
- DependentArrayArg->getSizeExpr(),
- Info, Deduced);
+ if (DependentArrayArg->getSizeExpr())
+ return DeduceNonTypeTemplateArgument(S, NTTP,
+ DependentArrayArg->getSizeExpr(),
+ Info, Deduced);
// Incomplete type does not match a dependently-sized array type
return Sema::TDK_NonDeducedMismatch;
@@ -634,6 +1146,7 @@ DeduceTemplateArguments(Sema &S,
// T(*)()
// T(*)(T)
case Type::FunctionProto: {
+ unsigned SubTDF = TDF & TDF_TopLevelParameterTypeList;
const FunctionProtoType *FunctionProtoArg =
dyn_cast<FunctionProtoType>(Arg);
if (!FunctionProtoArg)
@@ -642,14 +1155,11 @@ DeduceTemplateArguments(Sema &S,
const FunctionProtoType *FunctionProtoParam =
cast<FunctionProtoType>(Param);
- if (FunctionProtoParam->getTypeQuals() !=
- FunctionProtoArg->getTypeQuals())
- return Sema::TDK_NonDeducedMismatch;
-
- if (FunctionProtoParam->getNumArgs() != FunctionProtoArg->getNumArgs())
- return Sema::TDK_NonDeducedMismatch;
-
- if (FunctionProtoParam->isVariadic() != FunctionProtoArg->isVariadic())
+ if (FunctionProtoParam->getTypeQuals()
+ != FunctionProtoArg->getTypeQuals() ||
+ FunctionProtoParam->getRefQualifier()
+ != FunctionProtoArg->getRefQualifier() ||
+ FunctionProtoParam->isVariadic() != FunctionProtoArg->isVariadic())
return Sema::TDK_NonDeducedMismatch;
// Check return types.
@@ -660,17 +1170,12 @@ DeduceTemplateArguments(Sema &S,
Info, Deduced, 0))
return Result;
- for (unsigned I = 0, N = FunctionProtoParam->getNumArgs(); I != N; ++I) {
- // Check argument types.
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- FunctionProtoParam->getArgType(I),
- FunctionProtoArg->getArgType(I),
- Info, Deduced, 0))
- return Result;
- }
-
- return Sema::TDK_Success;
+ return DeduceTemplateArguments(S, TemplateParams,
+ FunctionProtoParam->arg_type_begin(),
+ FunctionProtoParam->getNumArgs(),
+ FunctionProtoArg->arg_type_begin(),
+ FunctionProtoArg->getNumArgs(),
+ Info, Deduced, SubTDF);
}
case Type::InjectedClassName: {
@@ -721,6 +1226,8 @@ DeduceTemplateArguments(Sema &S,
llvm::SmallVector<const RecordType *, 8> ToVisit;
ToVisit.push_back(RecordT);
bool Successful = false;
+ llvm::SmallVectorImpl<DeducedTemplateArgument> DeducedOrig(0);
+ DeducedOrig = Deduced;
while (!ToVisit.empty()) {
// Retrieve the next class in the inheritance hierarchy.
const RecordType *NextT = ToVisit.back();
@@ -738,9 +1245,14 @@ DeduceTemplateArguments(Sema &S,
QualType(NextT, 0), Info, Deduced);
// If template argument deduction for this base was successful,
- // note that we had some success.
- if (BaseResult == Sema::TDK_Success)
+ // note that we had some success. Otherwise, ignore any deductions
+ // from this base class.
+ if (BaseResult == Sema::TDK_Success) {
Successful = true;
+ DeducedOrig = Deduced;
+ }
+ else
+ Deduced = DeducedOrig;
}
// Visit base classes
@@ -828,9 +1340,15 @@ static Sema::TemplateDeductionResult
DeduceTemplateArguments(Sema &S,
TemplateParameterList *TemplateParams,
const TemplateArgument &Param,
- const TemplateArgument &Arg,
+ TemplateArgument Arg,
TemplateDeductionInfo &Info,
llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ // If the template argument is a pack expansion, perform template argument
+ // deduction against the pattern of that expansion. This only occurs during
+ // partial ordering.
+ if (Arg.isPackExpansion())
+ Arg = Arg.getPackExpansionPattern();
+
switch (Param.getKind()) {
case TemplateArgument::Null:
assert(false && "Null template argument in parameter list");
@@ -843,22 +1361,26 @@ DeduceTemplateArguments(Sema &S,
Info.FirstArg = Param;
Info.SecondArg = Arg;
return Sema::TDK_NonDeducedMismatch;
-
+
case TemplateArgument::Template:
if (Arg.getKind() == TemplateArgument::Template)
- return DeduceTemplateArguments(S, TemplateParams,
+ return DeduceTemplateArguments(S, TemplateParams,
Param.getAsTemplate(),
Arg.getAsTemplate(), Info, Deduced);
Info.FirstArg = Param;
Info.SecondArg = Arg;
return Sema::TDK_NonDeducedMismatch;
-
+
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("caller should handle pack expansions");
+ break;
+
case TemplateArgument::Declaration:
if (Arg.getKind() == TemplateArgument::Declaration &&
Param.getAsDecl()->getCanonicalDecl() ==
Arg.getAsDecl()->getCanonicalDecl())
return Sema::TDK_Success;
-
+
Info.FirstArg = Param;
Info.SecondArg = Arg;
return Sema::TDK_NonDeducedMismatch;
@@ -898,7 +1420,7 @@ DeduceTemplateArguments(Sema &S,
if (Arg.getKind() == TemplateArgument::Declaration)
return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsDecl(),
Info, Deduced);
-
+
Info.FirstArg = Param;
Info.SecondArg = Arg;
return Sema::TDK_NonDeducedMismatch;
@@ -908,31 +1430,209 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_Success;
}
case TemplateArgument::Pack:
- assert(0 && "FIXME: Implement!");
- break;
+ llvm_unreachable("Argument packs should be expanded by the caller!");
}
return Sema::TDK_Success;
}
+/// \brief Determine whether there is a template argument to be used for
+/// deduction.
+///
+/// This routine "expands" argument packs in-place, overriding its input
+/// parameters so that \c Args[ArgIdx] will be the available template argument.
+///
+/// \returns true if there is another template argument (which will be at
+/// \c Args[ArgIdx]), false otherwise.
+static bool hasTemplateArgumentForDeduction(const TemplateArgument *&Args,
+ unsigned &ArgIdx,
+ unsigned &NumArgs) {
+ if (ArgIdx == NumArgs)
+ return false;
+
+ const TemplateArgument &Arg = Args[ArgIdx];
+ if (Arg.getKind() != TemplateArgument::Pack)
+ return true;
+
+ assert(ArgIdx == NumArgs - 1 && "Pack not at the end of argument list?");
+ Args = Arg.pack_begin();
+ NumArgs = Arg.pack_size();
+ ArgIdx = 0;
+ return ArgIdx < NumArgs;
+}
+
+/// \brief Determine whether the given set of template arguments has a pack
+/// expansion that is not the last template argument.
+static bool hasPackExpansionBeforeEnd(const TemplateArgument *Args,
+ unsigned NumArgs) {
+ unsigned ArgIdx = 0;
+ while (ArgIdx < NumArgs) {
+ const TemplateArgument &Arg = Args[ArgIdx];
+
+ // Unwrap argument packs.
+ if (Args[ArgIdx].getKind() == TemplateArgument::Pack) {
+ Args = Arg.pack_begin();
+ NumArgs = Arg.pack_size();
+ ArgIdx = 0;
+ continue;
+ }
+
+ ++ArgIdx;
+ if (ArgIdx == NumArgs)
+ return false;
+
+ if (Arg.isPackExpansion())
+ return true;
+ }
+
+ return false;
+}
+
static Sema::TemplateDeductionResult
DeduceTemplateArguments(Sema &S,
TemplateParameterList *TemplateParams,
- const TemplateArgumentList &ParamList,
- const TemplateArgumentList &ArgList,
+ const TemplateArgument *Params, unsigned NumParams,
+ const TemplateArgument *Args, unsigned NumArgs,
TemplateDeductionInfo &Info,
- llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
- assert(ParamList.size() == ArgList.size());
- for (unsigned I = 0, N = ParamList.size(); I != N; ++I) {
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ bool NumberOfArgumentsMustMatch) {
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (hasPackExpansionBeforeEnd(Params, NumParams))
+ return Sema::TDK_Success;
+
+ // C++0x [temp.deduct.type]p9:
+ // If P has a form that contains <T> or <i>, then each argument Pi of the
+ // respective template argument list P is compared with the corresponding
+ // argument Ai of the corresponding template argument list of A.
+ unsigned ArgIdx = 0, ParamIdx = 0;
+ for (; hasTemplateArgumentForDeduction(Params, ParamIdx, NumParams);
+ ++ParamIdx) {
+ if (!Params[ParamIdx].isPackExpansion()) {
+ // The simple case: deduce template arguments by matching Pi and Ai.
+
+ // Check whether we have enough arguments.
+ if (!hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs))
+ return NumberOfArgumentsMustMatch? Sema::TDK_NonDeducedMismatch
+ : Sema::TDK_Success;
+
+ if (Args[ArgIdx].isPackExpansion()) {
+ // FIXME: We follow the logic of C++0x [temp.deduct.type]p22 here,
+ // but applied to pack expansions that are template arguments.
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // Perform deduction for this Pi/Ai pair.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams,
+ Params[ParamIdx], Args[ArgIdx],
+ Info, Deduced))
+ return Result;
+
+ // Move to the next argument.
+ ++ArgIdx;
+ continue;
+ }
+
+ // The parameter is a pack expansion.
+
+ // C++0x [temp.deduct.type]p9:
+ // If Pi is a pack expansion, then the pattern of Pi is compared with
+ // each remaining argument in the template argument list of A. Each
+ // comparison deduces template arguments for subsequent positions in the
+ // template parameter packs expanded by Pi.
+ TemplateArgument Pattern = Params[ParamIdx].getPackExpansionPattern();
+
+ // Compute the set of template parameter indices that correspond to
+ // parameter packs expanded by the pack expansion.
+ llvm::SmallVector<unsigned, 2> PackIndices;
+ {
+ llvm::BitVector SawIndices(TemplateParams->size());
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (Depth == 0 && !SawIndices[Index]) {
+ SawIndices[Index] = true;
+ PackIndices.push_back(Index);
+ }
+ }
+ }
+ assert(!PackIndices.empty() && "Pack expansion without unexpanded packs?");
+
+ // FIXME: If there are no remaining arguments, we can bail out early
+ // and set any deduced parameter packs to an empty argument pack.
+ // The latter part of this is a (minor) correctness issue.
+
+ // Save the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, then clear out the deduction.
+ llvm::SmallVector<DeducedTemplateArgument, 2>
+ SavedPacks(PackIndices.size());
+ llvm::SmallVector<llvm::SmallVector<DeducedTemplateArgument, 4>, 2>
+ NewlyDeducedPacks(PackIndices.size());
+ PrepareArgumentPackDeduction(S, Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks);
+
+ // Keep track of the deduced template arguments for each parameter pack
+ // expanded by this pack expansion (the outer index) and for each
+ // template argument (the inner SmallVectors).
+ bool HasAnyArguments = false;
+ while (hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs)) {
+ HasAnyArguments = true;
+
+ // Deduce template arguments from the pattern.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx],
+ Info, Deduced))
+ return Result;
+
+ // Capture the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, add them to the list of arguments we've deduced
+ // for that pack, then clear out the deduced argument.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ DeducedTemplateArgument &DeducedArg = Deduced[PackIndices[I]];
+ if (!DeducedArg.isNull()) {
+ NewlyDeducedPacks[I].push_back(DeducedArg);
+ DeducedArg = DeducedTemplateArgument();
+ }
+ }
+
+ ++ArgIdx;
+ }
+
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- ParamList[I], ArgList[I],
- Info, Deduced))
+ = FinishArgumentPackDeduction(S, TemplateParams, HasAnyArguments,
+ Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks, Info))
return Result;
}
+
+ // If there is an argument remaining, then we had too many arguments.
+ if (NumberOfArgumentsMustMatch &&
+ hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs))
+ return Sema::TDK_NonDeducedMismatch;
+
return Sema::TDK_Success;
}
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgumentList &ParamList,
+ const TemplateArgumentList &ArgList,
+ TemplateDeductionInfo &Info,
+ llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ return DeduceTemplateArguments(S, TemplateParams,
+ ParamList.data(), ParamList.size(),
+ ArgList.data(), ArgList.size(),
+ Info, Deduced);
+}
+
/// \brief Determine whether two template arguments are the same.
static bool isSameTemplateArg(ASTContext &Context,
const TemplateArgument &X,
@@ -954,18 +1654,19 @@ static bool isSameTemplateArg(ASTContext &Context,
Y.getAsDecl()->getCanonicalDecl();
case TemplateArgument::Template:
- return Context.getCanonicalTemplateName(X.getAsTemplate())
- .getAsVoidPointer() ==
- Context.getCanonicalTemplateName(Y.getAsTemplate())
- .getAsVoidPointer();
-
+ case TemplateArgument::TemplateExpansion:
+ return Context.getCanonicalTemplateName(
+ X.getAsTemplateOrTemplatePattern()).getAsVoidPointer() ==
+ Context.getCanonicalTemplateName(
+ Y.getAsTemplateOrTemplatePattern()).getAsVoidPointer();
+
case TemplateArgument::Integral:
return *X.getAsIntegral() == *Y.getAsIntegral();
case TemplateArgument::Expression: {
llvm::FoldingSetNodeID XID, YID;
X.getAsExpr()->Profile(XID, Context, true);
- Y.getAsExpr()->Profile(YID, Context, true);
+ Y.getAsExpr()->Profile(YID, Context, true);
return XID == YID;
}
@@ -986,51 +1687,192 @@ static bool isSameTemplateArg(ASTContext &Context,
return false;
}
-/// \brief Helper function to build a TemplateParameter when we don't
-/// know its type statically.
-static TemplateParameter makeTemplateParameter(Decl *D) {
- if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(D))
- return TemplateParameter(TTP);
- else if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D))
- return TemplateParameter(NTTP);
+/// \brief Allocate a TemplateArgumentLoc where all locations have
+/// been initialized to the given location.
+///
+/// \param S The semantic analysis object.
+///
+/// \param The template argument we are producing template argument
+/// location information for.
+///
+/// \param NTTPType For a declaration template argument, the type of
+/// the non-type template parameter that corresponds to this template
+/// argument.
+///
+/// \param Loc The source location to use for the resulting template
+/// argument.
+static TemplateArgumentLoc
+getTrivialTemplateArgumentLoc(Sema &S,
+ const TemplateArgument &Arg,
+ QualType NTTPType,
+ SourceLocation Loc) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Can't get a NULL template argument here");
+ break;
- return TemplateParameter(cast<TemplateTemplateParmDecl>(D));
+ case TemplateArgument::Type:
+ return TemplateArgumentLoc(Arg,
+ S.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
+
+ case TemplateArgument::Declaration: {
+ Expr *E
+ = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
+ .takeAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(E), E);
+ }
+
+ case TemplateArgument::Integral: {
+ Expr *E
+ = S.BuildExpressionFromIntegralTemplateArgument(Arg, Loc).takeAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(E), E);
+ }
+
+ case TemplateArgument::Template:
+ return TemplateArgumentLoc(Arg, SourceRange(), Loc);
+
+ case TemplateArgument::TemplateExpansion:
+ return TemplateArgumentLoc(Arg, SourceRange(), Loc, Loc);
+
+ case TemplateArgument::Expression:
+ return TemplateArgumentLoc(Arg, Arg.getAsExpr());
+
+ case TemplateArgument::Pack:
+ return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo());
+ }
+
+ return TemplateArgumentLoc();
+}
+
+
+/// \brief Convert the given deduced template argument and add it to the set of
+/// fully-converted template arguments.
+static bool ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
+ DeducedTemplateArgument Arg,
+ NamedDecl *Template,
+ QualType NTTPType,
+ unsigned ArgumentPackIndex,
+ TemplateDeductionInfo &Info,
+ bool InFunctionTemplate,
+ llvm::SmallVectorImpl<TemplateArgument> &Output) {
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ // This is a template argument pack, so check each of its arguments against
+ // the template parameter.
+ llvm::SmallVector<TemplateArgument, 2> PackedArgsBuilder;
+ for (TemplateArgument::pack_iterator PA = Arg.pack_begin(),
+ PAEnd = Arg.pack_end();
+ PA != PAEnd; ++PA) {
+ // When converting the deduced template argument, append it to the
+ // general output list. We need to do this so that the template argument
+ // checking logic has all of the prior template arguments available.
+ DeducedTemplateArgument InnerArg(*PA);
+ InnerArg.setDeducedFromArrayBound(Arg.wasDeducedFromArrayBound());
+ if (ConvertDeducedTemplateArgument(S, Param, InnerArg, Template,
+ NTTPType, PackedArgsBuilder.size(),
+ Info, InFunctionTemplate, Output))
+ return true;
+
+ // Move the converted template argument into our argument pack.
+ PackedArgsBuilder.push_back(Output.back());
+ Output.pop_back();
+ }
+
+ // Create the resulting argument pack.
+ Output.push_back(TemplateArgument::CreatePackCopy(S.Context,
+ PackedArgsBuilder.data(),
+ PackedArgsBuilder.size()));
+ return false;
+ }
+
+ // Convert the deduced template argument into a template
+ // argument that we can check, almost as if the user had written
+ // the template argument explicitly.
+ TemplateArgumentLoc ArgLoc = getTrivialTemplateArgumentLoc(S, Arg, NTTPType,
+ Info.getLocation());
+
+ // Check the template argument, converting it as necessary.
+ return S.CheckTemplateArgument(Param, ArgLoc,
+ Template,
+ Template->getLocation(),
+ Template->getSourceRange().getEnd(),
+ ArgumentPackIndex,
+ Output,
+ InFunctionTemplate
+ ? (Arg.wasDeducedFromArrayBound()
+ ? Sema::CTAK_DeducedFromArrayBound
+ : Sema::CTAK_Deduced)
+ : Sema::CTAK_Specified);
}
/// Complete template argument deduction for a class template partial
/// specialization.
static Sema::TemplateDeductionResult
-FinishTemplateArgumentDeduction(Sema &S,
+FinishTemplateArgumentDeduction(Sema &S,
ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
TemplateDeductionInfo &Info) {
// Trap errors.
Sema::SFINAETrap Trap(S);
-
+
Sema::ContextRAII SavedContext(S, Partial);
// C++ [temp.deduct.type]p2:
// [...] or if any template argument remains neither deduced nor
// explicitly specified, template argument deduction fails.
- TemplateArgumentListBuilder Builder(Partial->getTemplateParameters(),
- Deduced.size());
- for (unsigned I = 0, N = Deduced.size(); I != N; ++I) {
+ llvm::SmallVector<TemplateArgument, 4> Builder;
+ TemplateParameterList *PartialParams = Partial->getTemplateParameters();
+ for (unsigned I = 0, N = PartialParams->size(); I != N; ++I) {
+ NamedDecl *Param = PartialParams->getParam(I);
if (Deduced[I].isNull()) {
- Decl *Param
- = const_cast<NamedDecl *>(
- Partial->getTemplateParameters()->getParam(I));
Info.Param = makeTemplateParameter(Param);
return Sema::TDK_Incomplete;
}
-
- Builder.Append(Deduced[I]);
+
+ // We have deduced this argument, so it still needs to be
+ // checked and converted.
+
+ // First, for a non-type template parameter type that is
+ // initialized by a declaration, we need the type of the
+ // corresponding non-type template parameter.
+ QualType NTTPType;
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ NTTPType = NTTP->getType();
+ if (NTTPType->isDependentType()) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Builder.data(), Builder.size());
+ NTTPType = S.SubstType(NTTPType,
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ if (NTTPType.isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context,
+ Builder.data(),
+ Builder.size()));
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+ }
+
+ if (ConvertDeducedTemplateArgument(S, Param, Deduced[I],
+ Partial, NTTPType, 0, Info, false,
+ Builder)) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder.data(),
+ Builder.size()));
+ return Sema::TDK_SubstitutionFailure;
+ }
}
-
+
// Form the template argument list from the deduced template arguments.
TemplateArgumentList *DeducedArgumentList
- = new (S.Context) TemplateArgumentList(S.Context, Builder,
- /*TakeArgs=*/true);
+ = TemplateArgumentList::CreateCopy(S.Context, Builder.data(),
+ Builder.size());
+
Info.reset(DeducedArgumentList);
// Substitute the deduced template arguments into the template
@@ -1038,60 +1880,40 @@ FinishTemplateArgumentDeduction(Sema &S,
// verify that the instantiated template arguments are both valid
// and are equivalent to the template arguments originally provided
// to the class template.
- // FIXME: Do we have to correct the types of deduced non-type template
- // arguments (in particular, integral non-type template arguments?).
LocalInstantiationScope InstScope(S);
ClassTemplateDecl *ClassTemplate = Partial->getSpecializedTemplate();
const TemplateArgumentLoc *PartialTemplateArgs
= Partial->getTemplateArgsAsWritten();
- unsigned N = Partial->getNumTemplateArgsAsWritten();
// Note that we don't provide the langle and rangle locations.
TemplateArgumentListInfo InstArgs;
- for (unsigned I = 0; I != N; ++I) {
- Decl *Param = const_cast<NamedDecl *>(
- ClassTemplate->getTemplateParameters()->getParam(I));
- TemplateArgumentLoc InstArg;
- if (S.Subst(PartialTemplateArgs[I], InstArg,
- MultiLevelTemplateArgumentList(*DeducedArgumentList))) {
- Info.Param = makeTemplateParameter(Param);
- Info.FirstArg = PartialTemplateArgs[I].getArgument();
- return Sema::TDK_SubstitutionFailure;
- }
- InstArgs.addArgument(InstArg);
- }
+ if (S.Subst(PartialTemplateArgs,
+ Partial->getNumTemplateArgsAsWritten(),
+ InstArgs, MultiLevelTemplateArgumentList(*DeducedArgumentList))) {
+ unsigned ArgIdx = InstArgs.size(), ParamIdx = ArgIdx;
+ if (ParamIdx >= Partial->getTemplateParameters()->size())
+ ParamIdx = Partial->getTemplateParameters()->size() - 1;
- TemplateArgumentListBuilder ConvertedInstArgs(
- ClassTemplate->getTemplateParameters(), N);
+ Decl *Param
+ = const_cast<NamedDecl *>(
+ Partial->getTemplateParameters()->getParam(ParamIdx));
+ Info.Param = makeTemplateParameter(Param);
+ Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument();
+ return Sema::TDK_SubstitutionFailure;
+ }
+ llvm::SmallVector<TemplateArgument, 4> ConvertedInstArgs;
if (S.CheckTemplateArgumentList(ClassTemplate, Partial->getLocation(),
- InstArgs, false, ConvertedInstArgs))
+ InstArgs, false, ConvertedInstArgs))
return Sema::TDK_SubstitutionFailure;
-
- for (unsigned I = 0, E = ConvertedInstArgs.flatSize(); I != E; ++I) {
- TemplateArgument InstArg = ConvertedInstArgs.getFlatArguments()[I];
-
- Decl *Param = const_cast<NamedDecl *>(
- ClassTemplate->getTemplateParameters()->getParam(I));
-
- if (InstArg.getKind() == TemplateArgument::Expression) {
- // When the argument is an expression, check the expression result
- // against the actual template parameter to get down to the canonical
- // template argument.
- Expr *InstExpr = InstArg.getAsExpr();
- if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- if (S.CheckTemplateArgument(NTTP, NTTP->getType(), InstExpr, InstArg)) {
- Info.Param = makeTemplateParameter(Param);
- Info.FirstArg = Partial->getTemplateArgs()[I];
- return Sema::TDK_SubstitutionFailure;
- }
- }
- }
+ TemplateParameterList *TemplateParams
+ = ClassTemplate->getTemplateParameters();
+ for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
+ TemplateArgument InstArg = ConvertedInstArgs.data()[I];
if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) {
- Info.Param = makeTemplateParameter(Param);
+ Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
Info.FirstArg = TemplateArgs[I];
Info.SecondArg = InstArg;
return Sema::TDK_NonDeducedMismatch;
@@ -1127,14 +1949,14 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
return Result;
InstantiatingTemplate Inst(*this, Partial->getLocation(), Partial,
- Deduced.data(), Deduced.size());
+ Deduced.data(), Deduced.size(), Info);
if (Inst)
return TDK_InstantiationDepth;
if (Trap.hasErrorOccurred())
return Sema::TDK_SubstitutionFailure;
-
- return ::FinishTemplateArgumentDeduction(*this, Partial, TemplateArgs,
+
+ return ::FinishTemplateArgumentDeduction(*this, Partial, TemplateArgs,
Deduced, Info);
}
@@ -1206,26 +2028,24 @@ Sema::SubstituteExplicitTemplateArguments(
// declaration order of their corresponding template-parameters. The
// template argument list shall not specify more template-arguments than
// there are corresponding template-parameters.
- TemplateArgumentListBuilder Builder(TemplateParams,
- ExplicitTemplateArgs.size());
+ llvm::SmallVector<TemplateArgument, 4> Builder;
// Enter a new template instantiation context where we check the
// explicitly-specified template arguments against this function template,
// and then substitute them into the function parameter types.
InstantiatingTemplate Inst(*this, FunctionTemplate->getLocation(),
FunctionTemplate, Deduced.data(), Deduced.size(),
- ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution);
+ ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution,
+ Info);
if (Inst)
return TDK_InstantiationDepth;
- ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl());
-
if (CheckTemplateArgumentList(FunctionTemplate,
SourceLocation(),
ExplicitTemplateArgs,
true,
Builder) || Trap.hasErrorOccurred()) {
- unsigned Index = Builder.structuredSize();
+ unsigned Index = Builder.size();
if (Index >= TemplateParams->size())
Index = TemplateParams->size() - 1;
Info.Param = makeTemplateParameter(TemplateParams->getParam(Index));
@@ -1235,25 +2055,38 @@ Sema::SubstituteExplicitTemplateArguments(
// Form the template argument list from the explicitly-specified
// template arguments.
TemplateArgumentList *ExplicitArgumentList
- = new (Context) TemplateArgumentList(Context, Builder, /*TakeArgs=*/true);
+ = TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size());
Info.reset(ExplicitArgumentList);
- // Instantiate the types of each of the function parameters given the
- // explicitly-specified template arguments.
- for (FunctionDecl::param_iterator P = Function->param_begin(),
- PEnd = Function->param_end();
- P != PEnd;
- ++P) {
- QualType ParamType
- = SubstType((*P)->getType(),
- MultiLevelTemplateArgumentList(*ExplicitArgumentList),
- (*P)->getLocation(), (*P)->getDeclName());
- if (ParamType.isNull() || Trap.hasErrorOccurred())
- return TDK_SubstitutionFailure;
+ // Template argument deduction and the final substitution should be
+ // done in the context of the templated declaration. Explicit
+ // argument substitution, on the other hand, needs to happen in the
+ // calling context.
+ ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl());
- ParamTypes.push_back(ParamType);
+ // If we deduced template arguments for a template parameter pack,
+ // note that the template argument pack is partially substituted and record
+ // the explicit template arguments. They'll be used as part of deduction
+ // for this template parameter pack.
+ for (unsigned I = 0, N = Builder.size(); I != N; ++I) {
+ const TemplateArgument &Arg = Builder[I];
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ CurrentInstantiationScope->SetPartiallySubstitutedPack(
+ TemplateParams->getParam(I),
+ Arg.pack_begin(),
+ Arg.pack_size());
+ break;
+ }
}
+ // Instantiate the types of each of the function parameters given the
+ // explicitly-specified template arguments.
+ if (SubstParmTypes(Function->getLocation(),
+ Function->param_begin(), Function->getNumParams(),
+ MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ ParamTypes))
+ return TDK_SubstitutionFailure;
+
// If the caller wants a full function type back, instantiate the return
// type and form that function type.
if (FunctionType) {
@@ -1274,6 +2107,7 @@ Sema::SubstituteExplicitTemplateArguments(
ParamTypes.data(), ParamTypes.size(),
Proto->isVariadic(),
Proto->getTypeQuals(),
+ Proto->getRefQualifier(),
Function->getLocation(),
Function->getDeclName(),
Proto->getExtInfo());
@@ -1287,67 +2121,20 @@ Sema::SubstituteExplicitTemplateArguments(
// template arguments can be deduced, they may all be omitted; in this
// case, the empty template argument list <> itself may also be omitted.
//
- // Take all of the explicitly-specified arguments and put them into the
- // set of deduced template arguments.
+ // Take all of the explicitly-specified arguments and put them into
+ // the set of deduced template arguments. Explicitly-specified
+ // parameter packs, however, will be set to NULL since the deduction
+ // mechanisms handle explicitly-specified argument packs directly.
Deduced.reserve(TemplateParams->size());
- for (unsigned I = 0, N = ExplicitArgumentList->size(); I != N; ++I)
- Deduced.push_back(ExplicitArgumentList->get(I));
-
- return TDK_Success;
-}
-
-/// \brief Allocate a TemplateArgumentLoc where all locations have
-/// been initialized to the given location.
-///
-/// \param S The semantic analysis object.
-///
-/// \param The template argument we are producing template argument
-/// location information for.
-///
-/// \param NTTPType For a declaration template argument, the type of
-/// the non-type template parameter that corresponds to this template
-/// argument.
-///
-/// \param Loc The source location to use for the resulting template
-/// argument.
-static TemplateArgumentLoc
-getTrivialTemplateArgumentLoc(Sema &S,
- const TemplateArgument &Arg,
- QualType NTTPType,
- SourceLocation Loc) {
- switch (Arg.getKind()) {
- case TemplateArgument::Null:
- llvm_unreachable("Can't get a NULL template argument here");
- break;
-
- case TemplateArgument::Type:
- return TemplateArgumentLoc(Arg,
- S.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
-
- case TemplateArgument::Declaration: {
- Expr *E
- = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
- .takeAs<Expr>();
- return TemplateArgumentLoc(TemplateArgument(E), E);
- }
-
- case TemplateArgument::Integral: {
- Expr *E
- = S.BuildExpressionFromIntegralTemplateArgument(Arg, Loc).takeAs<Expr>();
- return TemplateArgumentLoc(TemplateArgument(E), E);
- }
-
- case TemplateArgument::Template:
- return TemplateArgumentLoc(Arg, SourceRange(), Loc);
-
- case TemplateArgument::Expression:
- return TemplateArgumentLoc(Arg, Arg.getAsExpr());
-
- case TemplateArgument::Pack:
- llvm_unreachable("Template parameter packs are not yet supported");
+ for (unsigned I = 0, N = ExplicitArgumentList->size(); I != N; ++I) {
+ const TemplateArgument &Arg = ExplicitArgumentList->get(I);
+ if (Arg.getKind() == TemplateArgument::Pack)
+ Deduced.push_back(DeducedTemplateArgument());
+ else
+ Deduced.push_back(Arg);
}
- return TemplateArgumentLoc();
+ return TDK_Success;
}
/// \brief Finish template argument deduction for a function template,
@@ -1370,7 +2157,8 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
// actual function declaration.
InstantiatingTemplate Inst(*this, FunctionTemplate->getLocation(),
FunctionTemplate, Deduced.data(), Deduced.size(),
- ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution);
+ ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
+ Info);
if (Inst)
return TDK_InstantiationDepth;
@@ -1379,18 +2167,16 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
// C++ [temp.deduct.type]p2:
// [...] or if any template argument remains neither deduced nor
// explicitly specified, template argument deduction fails.
- TemplateArgumentListBuilder Builder(TemplateParams, Deduced.size());
- for (unsigned I = 0, N = Deduced.size(); I != N; ++I) {
- NamedDecl *Param = FunctionTemplate->getTemplateParameters()->getParam(I);
+ llvm::SmallVector<TemplateArgument, 4> Builder;
+ for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
+ NamedDecl *Param = TemplateParams->getParam(I);
+
if (!Deduced[I].isNull()) {
- if (I < NumExplicitlySpecified ||
- Deduced[I].getKind() == TemplateArgument::Type) {
+ if (I < NumExplicitlySpecified) {
// We have already fully type-checked and converted this
- // argument (because it was explicitly-specified) or no
- // additional checking is necessary (because it's a template
- // type parameter). Just record the presence of this
- // parameter.
- Builder.Append(Deduced[I]);
+ // argument, because it was explicitly-specified. Just record the
+ // presence of this argument.
+ Builder.push_back(Deduced[I]);
continue;
}
@@ -1401,55 +2187,61 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
// initialized by a declaration, we need the type of the
// corresponding non-type template parameter.
QualType NTTPType;
- if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- if (Deduced[I].getKind() == TemplateArgument::Declaration) {
- NTTPType = NTTP->getType();
- if (NTTPType->isDependentType()) {
- TemplateArgumentList TemplateArgs(Context, Builder,
- /*TakeArgs=*/false);
- NTTPType = SubstType(NTTPType,
- MultiLevelTemplateArgumentList(TemplateArgs),
- NTTP->getLocation(),
- NTTP->getDeclName());
- if (NTTPType.isNull()) {
- Info.Param = makeTemplateParameter(Param);
- Info.reset(new (Context) TemplateArgumentList(Context, Builder,
- /*TakeArgs=*/true));
- return TDK_SubstitutionFailure;
- }
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ NTTPType = NTTP->getType();
+ if (NTTPType->isDependentType()) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Builder.data(), Builder.size());
+ NTTPType = SubstType(NTTPType,
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ if (NTTPType.isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(Context,
+ Builder.data(),
+ Builder.size()));
+ return TDK_SubstitutionFailure;
}
}
}
- // Convert the deduced template argument into a template
- // argument that we can check, almost as if the user had written
- // the template argument explicitly.
- TemplateArgumentLoc Arg = getTrivialTemplateArgumentLoc(*this,
- Deduced[I],
- NTTPType,
- SourceLocation());
-
- // Check the template argument, converting it as necessary.
- if (CheckTemplateArgument(Param, Arg,
- FunctionTemplate,
- FunctionTemplate->getLocation(),
- FunctionTemplate->getSourceRange().getEnd(),
- Builder,
- Deduced[I].wasDeducedFromArrayBound()
- ? CTAK_DeducedFromArrayBound
- : CTAK_Deduced)) {
- Info.Param = makeTemplateParameter(
- const_cast<NamedDecl *>(TemplateParams->getParam(I)));
- Info.reset(new (Context) TemplateArgumentList(Context, Builder,
- /*TakeArgs=*/true));
+ if (ConvertDeducedTemplateArgument(*this, Param, Deduced[I],
+ FunctionTemplate, NTTPType, 0, Info,
+ true, Builder)) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(),
+ Builder.size()));
return TDK_SubstitutionFailure;
}
continue;
}
- // Substitute into the default template argument, if available.
+ // C++0x [temp.arg.explicit]p3:
+ // A trailing template parameter pack (14.5.3) not otherwise deduced will
+ // be deduced to an empty sequence of template arguments.
+ // FIXME: Where did the word "trailing" come from?
+ if (Param->isTemplateParameterPack()) {
+ // We may have had explicitly-specified template arguments for this
+ // template parameter pack. If so, our empty deduction extends the
+ // explicitly-specified set (C++0x [temp.arg.explicit]p9).
+ const TemplateArgument *ExplicitArgs;
+ unsigned NumExplicitArgs;
+ if (CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs,
+ &NumExplicitArgs)
+ == Param)
+ Builder.push_back(TemplateArgument(ExplicitArgs, NumExplicitArgs));
+ else
+ Builder.push_back(TemplateArgument(0, 0));
+
+ continue;
+ }
+
+ // Substitute into the default template argument, if available.
TemplateArgumentLoc DefArg
= SubstDefaultTemplateArgumentIfAvailable(FunctionTemplate,
FunctionTemplate->getLocation(),
@@ -1463,18 +2255,19 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
const_cast<NamedDecl *>(TemplateParams->getParam(I)));
return TDK_Incomplete;
}
-
+
// Check whether we can actually use the default argument.
if (CheckTemplateArgument(Param, DefArg,
FunctionTemplate,
FunctionTemplate->getLocation(),
FunctionTemplate->getSourceRange().getEnd(),
- Builder,
+ 0, Builder,
CTAK_Deduced)) {
Info.Param = makeTemplateParameter(
const_cast<NamedDecl *>(TemplateParams->getParam(I)));
- Info.reset(new (Context) TemplateArgumentList(Context, Builder,
- /*TakeArgs=*/true));
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(),
+ Builder.size()));
return TDK_SubstitutionFailure;
}
@@ -1483,7 +2276,7 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
// Form the template argument list from the deduced template arguments.
TemplateArgumentList *DeducedArgumentList
- = new (Context) TemplateArgumentList(Context, Builder, /*TakeArgs=*/true);
+ = TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size());
Info.reset(DeducedArgumentList);
// Substitute the deduced template arguments into the function template
@@ -1497,9 +2290,9 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
if (!Specialization)
return TDK_SubstitutionFailure;
- assert(Specialization->getPrimaryTemplate()->getCanonicalDecl() ==
+ assert(Specialization->getPrimaryTemplate()->getCanonicalDecl() ==
FunctionTemplate->getCanonicalDecl());
-
+
// If the template argument list is owned by the function template
// specialization, release it.
if (Specialization->getTemplateSpecializationArgs() == DeducedArgumentList &&
@@ -1514,6 +2307,18 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
return TDK_SubstitutionFailure;
}
+ // If we suppressed any diagnostics while performing template argument
+ // deduction, and if we haven't already instantiated this declaration,
+ // keep track of these diagnostics. They'll be emitted if this specialization
+ // is actually used.
+ if (Info.diag_begin() != Info.diag_end()) {
+ llvm::DenseMap<Decl *, llvm::SmallVector<PartialDiagnosticAt, 1> >::iterator
+ Pos = SuppressedDiagnostics.find(Specialization->getCanonicalDecl());
+ if (Pos == SuppressedDiagnostics.end())
+ SuppressedDiagnostics[Specialization->getCanonicalDecl()]
+ .append(Info.diag_begin(), Info.diag_end());
+ }
+
return TDK_Success;
}
@@ -1544,7 +2349,7 @@ static QualType
ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
Expr *Arg, QualType ParamType,
bool ParamWasReference) {
-
+
OverloadExpr::FindResult R = OverloadExpr::find(Arg);
OverloadExpr *Ovl = R.Expression;
@@ -1592,10 +2397,10 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
if (ArgType.isNull()) continue;
// Function-to-pointer conversion.
- if (!ParamWasReference && ParamType->isPointerType() &&
+ if (!ParamWasReference && ParamType->isPointerType() &&
ArgType->isFunctionType())
ArgType = S.Context.getPointerType(ArgType);
-
+
// - If the argument is an overload set (not containing function
// templates), trial argument deduction is attempted using each
// of the members of the set. If deduction succeeds for only one
@@ -1608,7 +2413,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
// Type deduction is done independently for each P/A pair, and
// the deduced template argument values are then combined.
// So we do not reject deductions which were made elsewhere.
- llvm::SmallVector<DeducedTemplateArgument, 8>
+ llvm::SmallVector<DeducedTemplateArgument, 8>
Deduced(TemplateParams->size());
TemplateDeductionInfo Info(S.Context, Ovl->getNameLoc());
Sema::TemplateDeductionResult Result
@@ -1623,6 +2428,113 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
return Match;
}
+/// \brief Perform the adjustments to the parameter and argument types
+/// described in C++ [temp.deduct.call].
+///
+/// \returns true if the caller should not attempt to perform any template
+/// argument deduction based on this P/A pair.
+static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType &ParamType,
+ QualType &ArgType,
+ Expr *Arg,
+ unsigned &TDF) {
+ // C++0x [temp.deduct.call]p3:
+ // If P is a cv-qualified type, the top level cv-qualifiers of P's type
+ // are ignored for type deduction.
+ if (ParamType.getCVRQualifiers())
+ ParamType = ParamType.getLocalUnqualifiedType();
+ const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>();
+ if (ParamRefType) {
+ QualType PointeeType = ParamRefType->getPointeeType();
+
+ // [C++0x] If P is an rvalue reference to a cv-unqualified
+ // template parameter and the argument is an lvalue, the type
+ // "lvalue reference to A" is used in place of A for type
+ // deduction.
+ if (isa<RValueReferenceType>(ParamType)) {
+ if (!PointeeType.getQualifiers() &&
+ isa<TemplateTypeParmType>(PointeeType) &&
+ Arg->Classify(S.Context).isLValue())
+ ArgType = S.Context.getLValueReferenceType(ArgType);
+ }
+
+ // [...] If P is a reference type, the type referred to by P is used
+ // for type deduction.
+ ParamType = PointeeType;
+ }
+
+ // Overload sets usually make this parameter an undeduced
+ // context, but there are sometimes special circumstances.
+ if (ArgType == S.Context.OverloadTy) {
+ ArgType = ResolveOverloadForDeduction(S, TemplateParams,
+ Arg, ParamType,
+ ParamRefType != 0);
+ if (ArgType.isNull())
+ return true;
+ }
+
+ if (ParamRefType) {
+ // C++0x [temp.deduct.call]p3:
+ // [...] If P is of the form T&&, where T is a template parameter, and
+ // the argument is an lvalue, the type A& is used in place of A for
+ // type deduction.
+ if (ParamRefType->isRValueReferenceType() &&
+ ParamRefType->getAs<TemplateTypeParmType>() &&
+ Arg->isLValue())
+ ArgType = S.Context.getLValueReferenceType(ArgType);
+ } else {
+ // C++ [temp.deduct.call]p2:
+ // If P is not a reference type:
+ // - If A is an array type, the pointer type produced by the
+ // array-to-pointer standard conversion (4.2) is used in place of
+ // A for type deduction; otherwise,
+ if (ArgType->isArrayType())
+ ArgType = S.Context.getArrayDecayedType(ArgType);
+ // - If A is a function type, the pointer type produced by the
+ // function-to-pointer standard conversion (4.3) is used in place
+ // of A for type deduction; otherwise,
+ else if (ArgType->isFunctionType())
+ ArgType = S.Context.getPointerType(ArgType);
+ else {
+ // - If A is a cv-qualified type, the top level cv-qualifiers of A's
+ // type are ignored for type deduction.
+ if (ArgType.getCVRQualifiers())
+ ArgType = ArgType.getUnqualifiedType();
+ }
+ }
+
+ // C++0x [temp.deduct.call]p4:
+ // In general, the deduction process attempts to find template argument
+ // values that will make the deduced A identical to A (after the type A
+ // is transformed as described above). [...]
+ TDF = TDF_SkipNonDependent;
+
+ // - If the original P is a reference type, the deduced A (i.e., the
+ // type referred to by the reference) can be more cv-qualified than
+ // the transformed A.
+ if (ParamRefType)
+ TDF |= TDF_ParamWithReferenceType;
+ // - The transformed A can be another pointer or pointer to member
+ // type that can be converted to the deduced A via a qualification
+ // conversion (4.4).
+ if (ArgType->isPointerType() || ArgType->isMemberPointerType() ||
+ ArgType->isObjCObjectPointerType())
+ TDF |= TDF_IgnoreQualifiers;
+ // - If P is a class and P has the form simple-template-id, then the
+ // transformed A can be a derived class of the deduced A. Likewise,
+ // if P is a pointer to a class of the form simple-template-id, the
+ // transformed A can be a pointer to a derived class pointed to by
+ // the deduced A.
+ if (isSimpleTemplateIdType(ParamType) ||
+ (isa<PointerType>(ParamType) &&
+ isSimpleTemplateIdType(
+ ParamType->getAs<PointerType>()->getPointeeType())))
+ TDF |= TDF_DerivedClass;
+
+ return false;
+}
+
/// \brief Perform template argument deduction from a function call
/// (C++ [temp.deduct.call]).
///
@@ -1639,7 +2551,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
/// \param Name the name of the function being called. This is only significant
/// when the function template is a conversion function template, in which
/// case this routine will also perform template argument deduction based on
-/// the function to which
+/// the function to which
///
/// \param Specialization if template argument deduction was successful,
/// this will be set to the function template specialization produced by
@@ -1667,10 +2579,12 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
else if (NumArgs > Function->getNumParams()) {
const FunctionProtoType *Proto
= Function->getType()->getAs<FunctionProtoType>();
- if (!Proto->isVariadic())
+ if (Proto->isTemplateVariadic())
+ /* Do nothing */;
+ else if (Proto->isVariadic())
+ CheckArgs = Function->getNumParams();
+ else
return TDK_TooManyArguments;
-
- CheckArgs = Function->getNumParams();
}
// The types of the parameters from which we will perform template argument
@@ -1695,105 +2609,126 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
NumExplicitlySpecified = Deduced.size();
} else {
// Just fill in the parameter types from the function declaration.
- for (unsigned I = 0; I != CheckArgs; ++I)
+ for (unsigned I = 0, N = Function->getNumParams(); I != N; ++I)
ParamTypes.push_back(Function->getParamDecl(I)->getType());
}
// Deduce template arguments from the function parameters.
Deduced.resize(TemplateParams->size());
- for (unsigned I = 0; I != CheckArgs; ++I) {
- QualType ParamType = ParamTypes[I];
- QualType ArgType = Args[I]->getType();
+ unsigned ArgIdx = 0;
+ for (unsigned ParamIdx = 0, NumParams = ParamTypes.size();
+ ParamIdx != NumParams; ++ParamIdx) {
+ QualType ParamType = ParamTypes[ParamIdx];
+
+ const PackExpansionType *ParamExpansion
+ = dyn_cast<PackExpansionType>(ParamType);
+ if (!ParamExpansion) {
+ // Simple case: matching a function parameter to a function argument.
+ if (ArgIdx >= CheckArgs)
+ break;
- // C++0x [temp.deduct.call]p3:
- // If P is a cv-qualified type, the top level cv-qualifiers of P’s type
- // are ignored for type deduction.
- if (ParamType.getCVRQualifiers())
- ParamType = ParamType.getLocalUnqualifiedType();
- const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>();
- if (ParamRefType) {
- // [...] If P is a reference type, the type referred to by P is used
- // for type deduction.
- ParamType = ParamRefType->getPointeeType();
- }
-
- // Overload sets usually make this parameter an undeduced
- // context, but there are sometimes special circumstances.
- if (ArgType == Context.OverloadTy) {
- ArgType = ResolveOverloadForDeduction(*this, TemplateParams,
- Args[I], ParamType,
- ParamRefType != 0);
- if (ArgType.isNull())
+ Expr *Arg = Args[ArgIdx++];
+ QualType ArgType = Arg->getType();
+ unsigned TDF = 0;
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
+ ParamType, ArgType, Arg,
+ TDF))
continue;
+
+ if (TemplateDeductionResult Result
+ = ::DeduceTemplateArguments(*this, TemplateParams,
+ ParamType, ArgType, Info, Deduced,
+ TDF))
+ return Result;
+
+ // FIXME: we need to check that the deduced A is the same as A,
+ // modulo the various allowed differences.
+ continue;
}
- if (ParamRefType) {
- // C++0x [temp.deduct.call]p3:
- // [...] If P is of the form T&&, where T is a template parameter, and
- // the argument is an lvalue, the type A& is used in place of A for
- // type deduction.
- if (ParamRefType->isRValueReferenceType() &&
- ParamRefType->getAs<TemplateTypeParmType>() &&
- Args[I]->isLvalue(Context) == Expr::LV_Valid)
- ArgType = Context.getLValueReferenceType(ArgType);
- } else {
- // C++ [temp.deduct.call]p2:
- // If P is not a reference type:
- // - If A is an array type, the pointer type produced by the
- // array-to-pointer standard conversion (4.2) is used in place of
- // A for type deduction; otherwise,
- if (ArgType->isArrayType())
- ArgType = Context.getArrayDecayedType(ArgType);
- // - If A is a function type, the pointer type produced by the
- // function-to-pointer standard conversion (4.3) is used in place
- // of A for type deduction; otherwise,
- else if (ArgType->isFunctionType())
- ArgType = Context.getPointerType(ArgType);
- else {
- // - If A is a cv-qualified type, the top level cv-qualifiers of A’s
- // type are ignored for type deduction.
- QualType CanonArgType = Context.getCanonicalType(ArgType);
- if (ArgType.getCVRQualifiers())
- ArgType = ArgType.getUnqualifiedType();
+ // C++0x [temp.deduct.call]p1:
+ // For a function parameter pack that occurs at the end of the
+ // parameter-declaration-list, the type A of each remaining argument of
+ // the call is compared with the type P of the declarator-id of the
+ // function parameter pack. Each comparison deduces template arguments
+ // for subsequent positions in the template parameter packs expanded by
+ // the function parameter pack. For a function parameter pack that does
+ // not occur at the end of the parameter-declaration-list, the type of
+ // the parameter pack is a non-deduced context.
+ if (ParamIdx + 1 < NumParams)
+ break;
+
+ QualType ParamPattern = ParamExpansion->getPattern();
+ llvm::SmallVector<unsigned, 2> PackIndices;
+ {
+ llvm::BitVector SawIndices(TemplateParams->size());
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(ParamPattern, Unexpanded);
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (Depth == 0 && !SawIndices[Index]) {
+ SawIndices[Index] = true;
+ PackIndices.push_back(Index);
+ }
}
}
+ assert(!PackIndices.empty() && "Pack expansion without unexpanded packs?");
+
+ // Keep track of the deduced template arguments for each parameter pack
+ // expanded by this pack expansion (the outer index) and for each
+ // template argument (the inner SmallVectors).
+ llvm::SmallVector<llvm::SmallVector<DeducedTemplateArgument, 4>, 2>
+ NewlyDeducedPacks(PackIndices.size());
+ llvm::SmallVector<DeducedTemplateArgument, 2>
+ SavedPacks(PackIndices.size());
+ PrepareArgumentPackDeduction(*this, Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks);
+ bool HasAnyArguments = false;
+ for (; ArgIdx < NumArgs; ++ArgIdx) {
+ HasAnyArguments = true;
+
+ ParamType = ParamPattern;
+ Expr *Arg = Args[ArgIdx];
+ QualType ArgType = Arg->getType();
+ unsigned TDF = 0;
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
+ ParamType, ArgType, Arg,
+ TDF)) {
+ // We can't actually perform any deduction for this argument, so stop
+ // deduction at this point.
+ ++ArgIdx;
+ break;
+ }
- // C++0x [temp.deduct.call]p4:
- // In general, the deduction process attempts to find template argument
- // values that will make the deduced A identical to A (after the type A
- // is transformed as described above). [...]
- unsigned TDF = TDF_SkipNonDependent;
-
- // - If the original P is a reference type, the deduced A (i.e., the
- // type referred to by the reference) can be more cv-qualified than
- // the transformed A.
- if (ParamRefType)
- TDF |= TDF_ParamWithReferenceType;
- // - The transformed A can be another pointer or pointer to member
- // type that can be converted to the deduced A via a qualification
- // conversion (4.4).
- if (ArgType->isPointerType() || ArgType->isMemberPointerType() ||
- ArgType->isObjCObjectPointerType())
- TDF |= TDF_IgnoreQualifiers;
- // - If P is a class and P has the form simple-template-id, then the
- // transformed A can be a derived class of the deduced A. Likewise,
- // if P is a pointer to a class of the form simple-template-id, the
- // transformed A can be a pointer to a derived class pointed to by
- // the deduced A.
- if (isSimpleTemplateIdType(ParamType) ||
- (isa<PointerType>(ParamType) &&
- isSimpleTemplateIdType(
- ParamType->getAs<PointerType>()->getPointeeType())))
- TDF |= TDF_DerivedClass;
+ if (TemplateDeductionResult Result
+ = ::DeduceTemplateArguments(*this, TemplateParams,
+ ParamType, ArgType, Info, Deduced,
+ TDF))
+ return Result;
- if (TemplateDeductionResult Result
- = ::DeduceTemplateArguments(*this, TemplateParams,
- ParamType, ArgType, Info, Deduced,
- TDF))
+ // Capture the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, add them to the list of arguments we've deduced
+ // for that pack, then clear out the deduced argument.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ DeducedTemplateArgument &DeducedArg = Deduced[PackIndices[I]];
+ if (!DeducedArg.isNull()) {
+ NewlyDeducedPacks[I].push_back(DeducedArg);
+ DeducedArg = DeducedTemplateArgument();
+ }
+ }
+ }
+
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ if (Sema::TemplateDeductionResult Result
+ = FinishArgumentPackDeduction(*this, TemplateParams, HasAnyArguments,
+ Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks, Info))
return Result;
- // FIXME: we need to check that the deduced A is the same as A,
- // modulo the various allowed differences.
+ // After we've matching against a parameter pack, we're done.
+ break;
}
return FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
@@ -1808,7 +2743,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
/// \param FunctionTemplate the function template for which we are performing
/// template argument deduction.
///
-/// \param ExplicitTemplateArguments the explicitly-specified template
+/// \param ExplicitTemplateArguments the explicitly-specified template
/// arguments.
///
/// \param ArgFunctionType the function type that will be used as the
@@ -1862,13 +2797,23 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
if (TemplateDeductionResult Result
= ::DeduceTemplateArguments(*this, TemplateParams,
FunctionType, ArgFunctionType, Info,
- Deduced, 0))
+ Deduced, TDF_TopLevelParameterTypeList))
return Result;
}
-
- return FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
- NumExplicitlySpecified,
- Specialization, Info);
+
+ if (TemplateDeductionResult Result
+ = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
+ NumExplicitlySpecified,
+ Specialization, Info))
+ return Result;
+
+ // If the requested function type does not match the actual type of the
+ // specialization, template argument deduction fails.
+ if (!ArgFunctionType.isNull() &&
+ !Context.hasSameType(ArgFunctionType, Specialization->getType()))
+ return TDK_NonDeducedMismatch;
+
+ return TDK_Success;
}
/// \brief Deduce template arguments for a templated conversion
@@ -1915,12 +2860,12 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
else if (P->isFunctionType())
P = Context.getPointerType(P);
// - If P is a cv-qualified type, the top level cv-qualifiers of
- // P’s type are ignored for type deduction.
+ // P's type are ignored for type deduction.
else
P = P.getUnqualifiedType();
// C++0x [temp.deduct.conv]p3:
- // If A is a cv-qualified type, the top level cv-qualifiers of A’s
+ // If A is a cv-qualified type, the top level cv-qualifiers of A's
// type are ignored for type deduction.
A = A.getUnqualifiedType();
}
@@ -1950,7 +2895,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
if (ToType->isReferenceType())
TDF |= TDF_ParamWithReferenceType;
// - The deduced A can be another pointer or pointer to member
- // type that can be converted to A via a qualiï¬cation
+ // type that can be converted to A via a qualification
// conversion.
//
// (C++0x [temp.deduct.conv]p6 clarifies that this only happens when
@@ -1971,7 +2916,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
LocalInstantiationScope InstScope(*this);
FunctionDecl *Spec = 0;
TemplateDeductionResult Result
- = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced, 0, Spec,
+ = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced, 0, Spec,
Info);
Specialization = cast_or_null<CXXConversionDecl>(Spec);
return Result;
@@ -1983,7 +2928,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
/// \param FunctionTemplate the function template for which we are performing
/// template argument deduction.
///
-/// \param ExplicitTemplateArguments the explicitly-specified template
+/// \param ExplicitTemplateArguments the explicitly-specified template
/// arguments.
///
/// \param Specialization if template argument deduction was successful,
@@ -2003,88 +2948,93 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType(), Specialization, Info);
}
-/// \brief Stores the result of comparing the qualifiers of two types.
-enum DeductionQualifierComparison {
- NeitherMoreQualified = 0,
- ParamMoreQualified,
- ArgMoreQualified
-};
+namespace {
+ /// Substitute the 'auto' type specifier within a type for a given replacement
+ /// type.
+ class SubstituteAutoTransform :
+ public TreeTransform<SubstituteAutoTransform> {
+ QualType Replacement;
+ public:
+ SubstituteAutoTransform(Sema &SemaRef, QualType Replacement) :
+ TreeTransform<SubstituteAutoTransform>(SemaRef), Replacement(Replacement) {
+ }
+ QualType TransformAutoType(TypeLocBuilder &TLB, AutoTypeLoc TL) {
+ // If we're building the type pattern to deduce against, don't wrap the
+ // substituted type in an AutoType. Certain template deduction rules
+ // apply only when a template type parameter appears directly (and not if
+ // the parameter is found through desugaring). For instance:
+ // auto &&lref = lvalue;
+ // must transform into "rvalue reference to T" not "rvalue reference to
+ // auto type deduced as T" in order for [temp.deduct.call]p3 to apply.
+ if (isa<TemplateTypeParmType>(Replacement)) {
+ QualType Result = Replacement;
+ TemplateTypeParmTypeLoc NewTL = TLB.push<TemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ } else {
+ QualType Result = RebuildAutoType(Replacement);
+ AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+ }
+ };
+}
-/// \brief Deduce the template arguments during partial ordering by comparing
-/// the parameter type and the argument type (C++0x [temp.deduct.partial]).
-///
-/// \param S the semantic analysis object within which we are deducing
+/// \brief Deduce the type for an auto type-specifier (C++0x [dcl.spec.auto]p6)
///
-/// \param TemplateParams the template parameters that we are deducing
-///
-/// \param ParamIn the parameter type
-///
-/// \param ArgIn the argument type
+/// \param Type the type pattern using the auto type-specifier.
///
-/// \param Info information about the template argument deduction itself
+/// \param Init the initializer for the variable whose type is to be deduced.
///
-/// \param Deduced the deduced template arguments
+/// \param Result if type deduction was successful, this will be set to the
+/// deduced type. This may still contain undeduced autos if the type is
+/// dependent.
///
-/// \returns the result of template argument deduction so far. Note that a
-/// "success" result means that template argument deduction has not yet failed,
-/// but it may still fail, later, for other reasons.
-static Sema::TemplateDeductionResult
-DeduceTemplateArgumentsDuringPartialOrdering(Sema &S,
- TemplateParameterList *TemplateParams,
- QualType ParamIn, QualType ArgIn,
- TemplateDeductionInfo &Info,
- llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- llvm::SmallVectorImpl<DeductionQualifierComparison> *QualifierComparisons) {
- CanQualType Param = S.Context.getCanonicalType(ParamIn);
- CanQualType Arg = S.Context.getCanonicalType(ArgIn);
-
- // C++0x [temp.deduct.partial]p5:
- // Before the partial ordering is done, certain transformations are
- // performed on the types used for partial ordering:
- // - If P is a reference type, P is replaced by the type referred to.
- CanQual<ReferenceType> ParamRef = Param->getAs<ReferenceType>();
- if (!ParamRef.isNull())
- Param = ParamRef->getPointeeType();
-
- // - If A is a reference type, A is replaced by the type referred to.
- CanQual<ReferenceType> ArgRef = Arg->getAs<ReferenceType>();
- if (!ArgRef.isNull())
- Arg = ArgRef->getPointeeType();
-
- if (QualifierComparisons && !ParamRef.isNull() && !ArgRef.isNull()) {
- // C++0x [temp.deduct.partial]p6:
- // If both P and A were reference types (before being replaced with the
- // type referred to above), determine which of the two types (if any) is
- // more cv-qualified than the other; otherwise the types are considered to
- // be equally cv-qualified for partial ordering purposes. The result of this
- // determination will be used below.
- //
- // We save this information for later, using it only when deduction
- // succeeds in both directions.
- DeductionQualifierComparison QualifierResult = NeitherMoreQualified;
- if (Param.isMoreQualifiedThan(Arg))
- QualifierResult = ParamMoreQualified;
- else if (Arg.isMoreQualifiedThan(Param))
- QualifierResult = ArgMoreQualified;
- QualifierComparisons->push_back(QualifierResult);
- }
-
- // C++0x [temp.deduct.partial]p7:
- // Remove any top-level cv-qualifiers:
- // - If P is a cv-qualified type, P is replaced by the cv-unqualified
- // version of P.
- Param = Param.getUnqualifiedType();
- // - If A is a cv-qualified type, A is replaced by the cv-unqualified
- // version of A.
- Arg = Arg.getUnqualifiedType();
-
- // C++0x [temp.deduct.partial]p8:
- // Using the resulting types P and A the deduction is then done as
- // described in 14.9.2.5. If deduction succeeds for a given type, the type
- // from the argument template is considered to be at least as specialized
- // as the type from the parameter template.
- return DeduceTemplateArguments(S, TemplateParams, Param, Arg, Info,
- Deduced, TDF_None);
+/// \returns true if deduction succeeded, false if it failed.
+bool
+Sema::DeduceAutoType(QualType Type, Expr *Init, QualType &Result) {
+ if (Init->isTypeDependent()) {
+ Result = Type;
+ return true;
+ }
+
+ SourceLocation Loc = Init->getExprLoc();
+
+ LocalInstantiationScope InstScope(*this);
+
+ // Build template<class TemplParam> void Func(FuncParam);
+ NamedDecl *TemplParam
+ = TemplateTypeParmDecl::Create(Context, 0, Loc, 0, 0, 0, false, false);
+ TemplateParameterList *TemplateParams
+ = TemplateParameterList::Create(Context, Loc, Loc, &TemplParam, 1, Loc);
+
+ QualType TemplArg = Context.getTemplateTypeParmType(0, 0, false);
+ QualType FuncParam =
+ SubstituteAutoTransform(*this, TemplArg).TransformType(Type);
+
+ // Deduce type of TemplParam in Func(Init)
+ llvm::SmallVector<DeducedTemplateArgument, 1> Deduced;
+ Deduced.resize(1);
+ QualType InitType = Init->getType();
+ unsigned TDF = 0;
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
+ FuncParam, InitType, Init,
+ TDF))
+ return false;
+
+ TemplateDeductionInfo Info(Context, Loc);
+ if (::DeduceTemplateArguments(*this, TemplateParams,
+ FuncParam, InitType, Info, Deduced,
+ TDF))
+ return false;
+
+ QualType DeducedType = Deduced[0].getAsType();
+ if (DeducedType.isNull())
+ return false;
+
+ Result = SubstituteAutoTransform(*this, DeducedType).TransformType(Type);
+ return true;
}
static void
@@ -2092,7 +3042,31 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
bool OnlyDeduced,
unsigned Level,
llvm::SmallVectorImpl<bool> &Deduced);
-
+
+/// \brief If this is a non-static member function,
+static void MaybeAddImplicitObjectParameterType(ASTContext &Context,
+ CXXMethodDecl *Method,
+ llvm::SmallVectorImpl<QualType> &ArgTypes) {
+ if (Method->isStatic())
+ return;
+
+ // C++ [over.match.funcs]p4:
+ //
+ // For non-static member functions, the type of the implicit
+ // object parameter is
+ // - "lvalue reference to cv X" for functions declared without a
+ // ref-qualifier or with the & ref-qualifier
+ // - "rvalue reference to cv X" for functions declared with the
+ // && ref-qualifier
+ //
+ // FIXME: We don't have ref-qualifiers yet, so we don't do that part.
+ QualType ArgTy = Context.getTypeDeclType(Method->getParent());
+ ArgTy = Context.getQualifiedType(ArgTy,
+ Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+ ArgTy = Context.getLValueReferenceType(ArgTy);
+ ArgTypes.push_back(ArgTy);
+}
+
/// \brief Determine whether the function template \p FT1 is at least as
/// specialized as \p FT2.
static bool isAtLeastAsSpecializedAs(Sema &S,
@@ -2100,12 +3074,13 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
TemplatePartialOrderingContext TPOC,
- llvm::SmallVectorImpl<DeductionQualifierComparison> *QualifierComparisons) {
+ unsigned NumCallArguments,
+ llvm::SmallVectorImpl<RefParamPartialOrderingComparison> *RefParamComparisons) {
FunctionDecl *FD1 = FT1->getTemplatedDecl();
- FunctionDecl *FD2 = FT2->getTemplatedDecl();
+ FunctionDecl *FD2 = FT2->getTemplatedDecl();
const FunctionProtoType *Proto1 = FD1->getType()->getAs<FunctionProtoType>();
const FunctionProtoType *Proto2 = FD2->getType()->getAs<FunctionProtoType>();
-
+
assert(Proto1 && Proto2 && "Function templates must have prototypes");
TemplateParameterList *TemplateParams = FT2->getTemplateParameters();
llvm::SmallVector<DeducedTemplateArgument, 4> Deduced;
@@ -2115,55 +3090,89 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
// The types used to determine the ordering depend on the context in which
// the partial ordering is done:
TemplateDeductionInfo Info(S.Context, Loc);
+ CXXMethodDecl *Method1 = 0;
+ CXXMethodDecl *Method2 = 0;
+ bool IsNonStatic2 = false;
+ bool IsNonStatic1 = false;
+ unsigned Skip2 = 0;
switch (TPOC) {
case TPOC_Call: {
// - In the context of a function call, the function parameter types are
// used.
- unsigned NumParams = std::min(Proto1->getNumArgs(), Proto2->getNumArgs());
- for (unsigned I = 0; I != NumParams; ++I)
- if (DeduceTemplateArgumentsDuringPartialOrdering(S,
- TemplateParams,
- Proto2->getArgType(I),
- Proto1->getArgType(I),
- Info,
- Deduced,
- QualifierComparisons))
+ Method1 = dyn_cast<CXXMethodDecl>(FD1);
+ Method2 = dyn_cast<CXXMethodDecl>(FD2);
+ IsNonStatic1 = Method1 && !Method1->isStatic();
+ IsNonStatic2 = Method2 && !Method2->isStatic();
+
+ // C++0x [temp.func.order]p3:
+ // [...] If only one of the function templates is a non-static
+ // member, that function template is considered to have a new
+ // first parameter inserted in its function parameter list. The
+ // new parameter is of type "reference to cv A," where cv are
+ // the cv-qualifiers of the function template (if any) and A is
+ // the class of which the function template is a member.
+ //
+ // C++98/03 doesn't have this provision, so instead we drop the
+ // first argument of the free function or static member, which
+ // seems to match existing practice.
+ llvm::SmallVector<QualType, 4> Args1;
+ unsigned Skip1 = !S.getLangOptions().CPlusPlus0x &&
+ IsNonStatic2 && !IsNonStatic1;
+ if (S.getLangOptions().CPlusPlus0x && IsNonStatic1 && !IsNonStatic2)
+ MaybeAddImplicitObjectParameterType(S.Context, Method1, Args1);
+ Args1.insert(Args1.end(),
+ Proto1->arg_type_begin() + Skip1, Proto1->arg_type_end());
+
+ llvm::SmallVector<QualType, 4> Args2;
+ Skip2 = !S.getLangOptions().CPlusPlus0x &&
+ IsNonStatic1 && !IsNonStatic2;
+ if (S.getLangOptions().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1)
+ MaybeAddImplicitObjectParameterType(S.Context, Method2, Args2);
+ Args2.insert(Args2.end(),
+ Proto2->arg_type_begin() + Skip2, Proto2->arg_type_end());
+
+ // C++ [temp.func.order]p5:
+ // The presence of unused ellipsis and default arguments has no effect on
+ // the partial ordering of function templates.
+ if (Args1.size() > NumCallArguments)
+ Args1.resize(NumCallArguments);
+ if (Args2.size() > NumCallArguments)
+ Args2.resize(NumCallArguments);
+ if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(),
+ Args1.data(), Args1.size(), Info, Deduced,
+ TDF_None, /*PartialOrdering=*/true,
+ RefParamComparisons))
return false;
-
+
break;
}
-
+
case TPOC_Conversion:
// - In the context of a call to a conversion operator, the return types
// of the conversion function templates are used.
- if (DeduceTemplateArgumentsDuringPartialOrdering(S,
- TemplateParams,
- Proto2->getResultType(),
- Proto1->getResultType(),
- Info,
- Deduced,
- QualifierComparisons))
+ if (DeduceTemplateArguments(S, TemplateParams, Proto2->getResultType(),
+ Proto1->getResultType(), Info, Deduced,
+ TDF_None, /*PartialOrdering=*/true,
+ RefParamComparisons))
return false;
break;
-
+
case TPOC_Other:
- // - In other contexts (14.6.6.2) the function template’s function type
+ // - In other contexts (14.6.6.2) the function template's function type
// is used.
- if (DeduceTemplateArgumentsDuringPartialOrdering(S,
- TemplateParams,
- FD2->getType(),
- FD1->getType(),
- Info,
- Deduced,
- QualifierComparisons))
+ // FIXME: Don't we actually want to perform the adjustments on the parameter
+ // types?
+ if (DeduceTemplateArguments(S, TemplateParams, FD2->getType(),
+ FD1->getType(), Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true, RefParamComparisons))
return false;
break;
}
-
+
// C++0x [temp.deduct.partial]p11:
- // In most cases, all template parameters must have values in order for
- // deduction to succeed, but for partial ordering purposes a template
- // parameter may remain without a value provided it is not used in the
+ // In most cases, all template parameters must have values in order for
+ // deduction to succeed, but for partial ordering purposes a template
+ // parameter may remain without a value provided it is not used in the
// types being used for partial ordering. [ Note: a template parameter used
// in a non-deduced context is considered used. -end note]
unsigned ArgIdx = 0, NumArgs = Deduced.size();
@@ -2172,7 +3181,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
break;
if (ArgIdx == NumArgs) {
- // All template arguments were deduced. FT1 is at least as specialized
+ // All template arguments were deduced. FT1 is at least as specialized
// as FT2.
return true;
}
@@ -2182,37 +3191,62 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
UsedParameters.resize(TemplateParams->size());
switch (TPOC) {
case TPOC_Call: {
- unsigned NumParams = std::min(Proto1->getNumArgs(), Proto2->getNumArgs());
- for (unsigned I = 0; I != NumParams; ++I)
- ::MarkUsedTemplateParameters(S, Proto2->getArgType(I), false,
+ unsigned NumParams = std::min(NumCallArguments,
+ std::min(Proto1->getNumArgs(),
+ Proto2->getNumArgs()));
+ if (S.getLangOptions().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1)
+ ::MarkUsedTemplateParameters(S, Method2->getThisType(S.Context), false,
+ TemplateParams->getDepth(), UsedParameters);
+ for (unsigned I = Skip2; I < NumParams; ++I)
+ ::MarkUsedTemplateParameters(S, Proto2->getArgType(I), false,
TemplateParams->getDepth(),
UsedParameters);
break;
}
-
+
case TPOC_Conversion:
- ::MarkUsedTemplateParameters(S, Proto2->getResultType(), false,
+ ::MarkUsedTemplateParameters(S, Proto2->getResultType(), false,
TemplateParams->getDepth(),
UsedParameters);
break;
-
+
case TPOC_Other:
- ::MarkUsedTemplateParameters(S, FD2->getType(), false,
+ ::MarkUsedTemplateParameters(S, FD2->getType(), false,
TemplateParams->getDepth(),
UsedParameters);
break;
}
-
+
for (; ArgIdx != NumArgs; ++ArgIdx)
// If this argument had no value deduced but was used in one of the types
// used for partial ordering, then deduction fails.
if (Deduced[ArgIdx].isNull() && UsedParameters[ArgIdx])
return false;
-
+
+ return true;
+}
+
+/// \brief Determine whether this a function template whose parameter-type-list
+/// ends with a function parameter pack.
+static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
+ FunctionDecl *Function = FunTmpl->getTemplatedDecl();
+ unsigned NumParams = Function->getNumParams();
+ if (NumParams == 0)
+ return false;
+
+ ParmVarDecl *Last = Function->getParamDecl(NumParams - 1);
+ if (!Last->isParameterPack())
+ return false;
+
+ // Make sure that no previous parameter is a parameter pack.
+ while (--NumParams > 0) {
+ if (Function->getParamDecl(NumParams - 1)->isParameterPack())
+ return false;
+ }
+
return true;
}
-
-
+
/// \brief Returns the more specialized function template according
/// to the rules of function template partial ordering (C++ [temp.func.order]).
///
@@ -2223,77 +3257,113 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
/// \param TPOC the context in which we are performing partial ordering of
/// function templates.
///
+/// \param NumCallArguments The number of arguments in a call, used only
+/// when \c TPOC is \c TPOC_Call.
+///
/// \returns the more specialized function template. If neither
/// template is more specialized, returns NULL.
FunctionTemplateDecl *
Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
- TemplatePartialOrderingContext TPOC) {
- llvm::SmallVector<DeductionQualifierComparison, 4> QualifierComparisons;
- bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC, 0);
- bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC,
- &QualifierComparisons);
-
+ TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments) {
+ llvm::SmallVector<RefParamPartialOrderingComparison, 4> RefParamComparisons;
+ bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC,
+ NumCallArguments, 0);
+ bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC,
+ NumCallArguments,
+ &RefParamComparisons);
+
if (Better1 != Better2) // We have a clear winner
return Better1? FT1 : FT2;
-
+
if (!Better1 && !Better2) // Neither is better than the other
return 0;
-
// C++0x [temp.deduct.partial]p10:
- // If for each type being considered a given template is at least as
+ // If for each type being considered a given template is at least as
// specialized for all types and more specialized for some set of types and
- // the other template is not more specialized for any types or is not at
+ // the other template is not more specialized for any types or is not at
// least as specialized for any types, then the given template is more
// specialized than the other template. Otherwise, neither template is more
// specialized than the other.
Better1 = false;
Better2 = false;
- for (unsigned I = 0, N = QualifierComparisons.size(); I != N; ++I) {
+ for (unsigned I = 0, N = RefParamComparisons.size(); I != N; ++I) {
// C++0x [temp.deduct.partial]p9:
// If, for a given type, deduction succeeds in both directions (i.e., the
- // types are identical after the transformations above) and if the type
- // from the argument template is more cv-qualified than the type from the
- // parameter template (as described above) that type is considered to be
- // more specialized than the other. If neither type is more cv-qualified
- // than the other then neither type is more specialized than the other.
- switch (QualifierComparisons[I]) {
- case NeitherMoreQualified:
- break;
-
- case ParamMoreQualified:
- Better1 = true;
- if (Better2)
- return 0;
- break;
-
- case ArgMoreQualified:
- Better2 = true;
- if (Better1)
- return 0;
- break;
+ // types are identical after the transformations above) and both P and A
+ // were reference types (before being replaced with the type referred to
+ // above):
+
+ // -- if the type from the argument template was an lvalue reference
+ // and the type from the parameter template was not, the argument
+ // type is considered to be more specialized than the other;
+ // otherwise,
+ if (!RefParamComparisons[I].ArgIsRvalueRef &&
+ RefParamComparisons[I].ParamIsRvalueRef) {
+ Better2 = true;
+ if (Better1)
+ return 0;
+ continue;
+ } else if (!RefParamComparisons[I].ParamIsRvalueRef &&
+ RefParamComparisons[I].ArgIsRvalueRef) {
+ Better1 = true;
+ if (Better2)
+ return 0;
+ continue;
}
+
+ // -- if the type from the argument template is more cv-qualified than
+ // the type from the parameter template (as described above), the
+ // argument type is considered to be more specialized than the
+ // other; otherwise,
+ switch (RefParamComparisons[I].Qualifiers) {
+ case NeitherMoreQualified:
+ break;
+
+ case ParamMoreQualified:
+ Better1 = true;
+ if (Better2)
+ return 0;
+ continue;
+
+ case ArgMoreQualified:
+ Better2 = true;
+ if (Better1)
+ return 0;
+ continue;
+ }
+
+ // -- neither type is more specialized than the other.
}
-
+
assert(!(Better1 && Better2) && "Should have broken out in the loop above");
if (Better1)
return FT1;
else if (Better2)
return FT2;
- else
- return 0;
+
+ // FIXME: This mimics what GCC implements, but doesn't match up with the
+ // proposed resolution for core issue 692. This area needs to be sorted out,
+ // but for now we attempt to maintain compatibility.
+ bool Variadic1 = isVariadicFunctionTemplate(FT1);
+ bool Variadic2 = isVariadicFunctionTemplate(FT2);
+ if (Variadic1 != Variadic2)
+ return Variadic1? FT2 : FT1;
+
+ return 0;
}
/// \brief Determine if the two templates are equivalent.
static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) {
if (T1 == T2)
return true;
-
+
if (!T1 || !T2)
return false;
-
+
return T1->getCanonicalDecl() == T2->getCanonicalDecl();
}
@@ -2309,7 +3379,10 @@ static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) {
/// \param TPOC the partial ordering context to use to compare the function
/// template specializations.
///
-/// \param Loc the location where the ambiguity or no-specializations
+/// \param NumCallArguments The number of arguments in a call, used only
+/// when \c TPOC is \c TPOC_Call.
+///
+/// \param Loc the location where the ambiguity or no-specializations
/// diagnostic should occur.
///
/// \param NoneDiag partial diagnostic used to diagnose cases where there are
@@ -2323,35 +3396,38 @@ static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) {
/// in this diagnostic should be unbound, which will correspond to the string
/// describing the template arguments for the function template specialization.
///
-/// \param Index if non-NULL and the result of this function is non-nULL,
+/// \param Index if non-NULL and the result of this function is non-nULL,
/// receives the index corresponding to the resulting function template
/// specialization.
///
-/// \returns the most specialized function template specialization, if
+/// \returns the most specialized function template specialization, if
/// found. Otherwise, returns SpecEnd.
///
-/// \todo FIXME: Consider passing in the "also-ran" candidates that failed
+/// \todo FIXME: Consider passing in the "also-ran" candidates that failed
/// template argument deduction.
UnresolvedSetIterator
Sema::getMostSpecialized(UnresolvedSetIterator SpecBegin,
- UnresolvedSetIterator SpecEnd,
+ UnresolvedSetIterator SpecEnd,
TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
- const PartialDiagnostic &CandidateDiag) {
+ const PartialDiagnostic &CandidateDiag,
+ bool Complain) {
if (SpecBegin == SpecEnd) {
- Diag(Loc, NoneDiag);
+ if (Complain)
+ Diag(Loc, NoneDiag);
return SpecEnd;
}
-
- if (SpecBegin + 1 == SpecEnd)
+
+ if (SpecBegin + 1 == SpecEnd)
return SpecBegin;
-
+
// Find the function template that is better than all of the templates it
// has been compared to.
UnresolvedSetIterator Best = SpecBegin;
- FunctionTemplateDecl *BestTemplate
+ FunctionTemplateDecl *BestTemplate
= cast<FunctionDecl>(*Best)->getPrimaryTemplate();
assert(BestTemplate && "Not a function template specialization?");
for (UnresolvedSetIterator I = SpecBegin + 1; I != SpecEnd; ++I) {
@@ -2359,13 +3435,13 @@ Sema::getMostSpecialized(UnresolvedSetIterator SpecBegin,
= cast<FunctionDecl>(*I)->getPrimaryTemplate();
assert(Challenger && "Not a function template specialization?");
if (isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger,
- Loc, TPOC),
+ Loc, TPOC, NumCallArguments),
Challenger)) {
Best = I;
BestTemplate = Challenger;
}
}
-
+
// Make sure that the "best" function template is more specialized than all
// of the others.
bool Ambiguous = false;
@@ -2373,29 +3449,31 @@ Sema::getMostSpecialized(UnresolvedSetIterator SpecBegin,
FunctionTemplateDecl *Challenger
= cast<FunctionDecl>(*I)->getPrimaryTemplate();
if (I != Best &&
- !isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger,
- Loc, TPOC),
+ !isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger,
+ Loc, TPOC, NumCallArguments),
BestTemplate)) {
Ambiguous = true;
break;
}
}
-
+
if (!Ambiguous) {
// We found an answer. Return it.
return Best;
}
-
+
// Diagnose the ambiguity.
- Diag(Loc, AmbigDiag);
-
+ if (Complain)
+ Diag(Loc, AmbigDiag);
+
+ if (Complain)
// FIXME: Can we order the candidates in some sane way?
- for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I)
- Diag((*I)->getLocation(), CandidateDiag)
- << getTemplateArgumentBindingsText(
- cast<FunctionDecl>(*I)->getPrimaryTemplate()->getTemplateParameters(),
+ for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I)
+ Diag((*I)->getLocation(), CandidateDiag)
+ << getTemplateArgumentBindingsText(
+ cast<FunctionDecl>(*I)->getPrimaryTemplate()->getTemplateParameters(),
*cast<FunctionDecl>(*I)->getTemplateSpecializationArgs());
-
+
return SpecEnd;
}
@@ -2416,17 +3494,17 @@ Sema::getMoreSpecializedPartialSpecialization(
SourceLocation Loc) {
// C++ [temp.class.order]p1:
// For two class template partial specializations, the first is at least as
- // specialized as the second if, given the following rewrite to two
- // function templates, the first function template is at least as
- // specialized as the second according to the ordering rules for function
+ // specialized as the second if, given the following rewrite to two
+ // function templates, the first function template is at least as
+ // specialized as the second according to the ordering rules for function
// templates (14.6.6.2):
// - the first function template has the same template parameters as the
- // first partial specialization and has a single function parameter
- // whose type is a class template specialization with the template
+ // first partial specialization and has a single function parameter
+ // whose type is a class template specialization with the template
// arguments of the first partial specialization, and
// - the second function template has the same template parameters as the
- // second partial specialization and has a single function parameter
- // whose type is a class template specialization with the template
+ // second partial specialization and has a single function parameter
+ // whose type is a class template specialization with the template
// arguments of the second partial specialization.
//
// Rather than synthesize function templates, we merely perform the
@@ -2443,39 +3521,39 @@ Sema::getMoreSpecializedPartialSpecialization(
QualType PT1 = PS1->getInjectedSpecializationType();
QualType PT2 = PS2->getInjectedSpecializationType();
-
+
// Determine whether PS1 is at least as specialized as PS2
Deduced.resize(PS2->getTemplateParameters()->size());
- bool Better1 = !DeduceTemplateArgumentsDuringPartialOrdering(*this,
- PS2->getTemplateParameters(),
- PT2,
- PT1,
- Info,
- Deduced,
- 0);
- if (Better1)
- Better1 = !::FinishTemplateArgumentDeduction(*this, PS2,
- PS1->getTemplateArgs(),
+ bool Better1 = !::DeduceTemplateArguments(*this, PS2->getTemplateParameters(),
+ PT2, PT1, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true,
+ /*RefParamComparisons=*/0);
+ if (Better1) {
+ InstantiatingTemplate Inst(*this, PS2->getLocation(), PS2,
+ Deduced.data(), Deduced.size(), Info);
+ Better1 = !::FinishTemplateArgumentDeduction(*this, PS2,
+ PS1->getTemplateArgs(),
Deduced, Info);
-
+ }
+
// Determine whether PS2 is at least as specialized as PS1
Deduced.clear();
Deduced.resize(PS1->getTemplateParameters()->size());
- bool Better2 = !DeduceTemplateArgumentsDuringPartialOrdering(*this,
- PS1->getTemplateParameters(),
- PT1,
- PT2,
- Info,
- Deduced,
- 0);
- if (Better2)
- Better2 = !::FinishTemplateArgumentDeduction(*this, PS1,
- PS2->getTemplateArgs(),
+ bool Better2 = !::DeduceTemplateArguments(*this, PS1->getTemplateParameters(),
+ PT1, PT2, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true,
+ /*RefParamComparisons=*/0);
+ if (Better2) {
+ InstantiatingTemplate Inst(*this, PS1->getLocation(), PS1,
+ Deduced.data(), Deduced.size(), Info);
+ Better2 = !::FinishTemplateArgumentDeduction(*this, PS1,
+ PS2->getTemplateArgs(),
Deduced, Info);
-
+ }
+
if (Better1 == Better2)
return 0;
-
+
return Better1? PS1 : PS2;
}
@@ -2494,7 +3572,15 @@ MarkUsedTemplateParameters(Sema &SemaRef,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallVectorImpl<bool> &Used) {
- // FIXME: if !OnlyDeduced, we have to walk the whole subexpression to
+ // We can deduce from a pack expansion.
+ if (const PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(E))
+ E = Expansion->getPattern();
+
+ // Skip through any implicit casts we added while type-checking.
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExpr();
+
+ // FIXME: if !OnlyDeduced, we have to walk the whole subexpression to
// find other occurrences of template parameters.
const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
if (!DRE)
@@ -2519,13 +3605,13 @@ MarkUsedTemplateParameters(Sema &SemaRef,
llvm::SmallVectorImpl<bool> &Used) {
if (!NNS)
return;
-
+
MarkUsedTemplateParameters(SemaRef, NNS->getPrefix(), OnlyDeduced, Depth,
Used);
- MarkUsedTemplateParameters(SemaRef, QualType(NNS->getAsType(), 0),
+ MarkUsedTemplateParameters(SemaRef, QualType(NNS->getAsType(), 0),
OnlyDeduced, Depth, Used);
}
-
+
/// \brief Mark the template parameters that are used by the given
/// template name.
static void
@@ -2542,12 +3628,12 @@ MarkUsedTemplateParameters(Sema &SemaRef,
}
return;
}
-
+
if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName())
- MarkUsedTemplateParameters(SemaRef, QTN->getQualifier(), OnlyDeduced,
+ MarkUsedTemplateParameters(SemaRef, QTN->getQualifier(), OnlyDeduced,
Depth, Used);
if (DependentTemplateName *DTN = Name.getAsDependentTemplateName())
- MarkUsedTemplateParameters(SemaRef, DTN->getQualifier(), OnlyDeduced,
+ MarkUsedTemplateParameters(SemaRef, DTN->getQualifier(), OnlyDeduced,
Depth, Used);
}
@@ -2560,7 +3646,7 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
llvm::SmallVectorImpl<bool> &Used) {
if (T.isNull())
return;
-
+
// Non-dependent types have nothing deducible
if (!T->isDependentType())
return;
@@ -2626,7 +3712,7 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
= cast<DependentSizedExtVectorType>(T);
MarkUsedTemplateParameters(SemaRef, VecType->getElementType(), OnlyDeduced,
Depth, Used);
- MarkUsedTemplateParameters(SemaRef, VecType->getSizeExpr(), OnlyDeduced,
+ MarkUsedTemplateParameters(SemaRef, VecType->getSizeExpr(), OnlyDeduced,
Depth, Used);
break;
}
@@ -2648,6 +3734,17 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
break;
}
+ case Type::SubstTemplateTypeParmPack: {
+ const SubstTemplateTypeParmPackType *Subst
+ = cast<SubstTemplateTypeParmPackType>(T);
+ MarkUsedTemplateParameters(SemaRef,
+ QualType(Subst->getReplacedParameter(), 0),
+ OnlyDeduced, Depth, Used);
+ MarkUsedTemplateParameters(SemaRef, Subst->getArgumentPack(),
+ OnlyDeduced, Depth, Used);
+ break;
+ }
+
case Type::InjectedClassName:
T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType();
// fall through
@@ -2657,6 +3754,15 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
= cast<TemplateSpecializationType>(T);
MarkUsedTemplateParameters(SemaRef, Spec->getTemplateName(), OnlyDeduced,
Depth, Used);
+
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (OnlyDeduced &&
+ hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs()))
+ break;
+
for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
MarkUsedTemplateParameters(SemaRef, Spec->getArg(I), OnlyDeduced, Depth,
Used);
@@ -2665,7 +3771,7 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
case Type::Complex:
if (!OnlyDeduced)
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(SemaRef,
cast<ComplexType>(T)->getElementType(),
OnlyDeduced, Depth, Used);
break;
@@ -2683,6 +3789,15 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
if (!OnlyDeduced)
MarkUsedTemplateParameters(SemaRef, Spec->getQualifier(),
OnlyDeduced, Depth, Used);
+
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (OnlyDeduced &&
+ hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs()))
+ break;
+
for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
MarkUsedTemplateParameters(SemaRef, Spec->getArg(I), OnlyDeduced, Depth,
Used);
@@ -2710,6 +3825,17 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
OnlyDeduced, Depth, Used);
break;
+ case Type::PackExpansion:
+ MarkUsedTemplateParameters(SemaRef,
+ cast<PackExpansionType>(T)->getPattern(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::Auto:
+ MarkUsedTemplateParameters(SemaRef,
+ cast<AutoType>(T)->getDeducedType(),
+ OnlyDeduced, Depth, Used);
+
// None of these types have any template parameters in them.
case Type::Builtin:
case Type::VariableArray:
@@ -2749,15 +3875,17 @@ MarkUsedTemplateParameters(Sema &SemaRef,
break;
case TemplateArgument::Template:
- MarkUsedTemplateParameters(SemaRef, TemplateArg.getAsTemplate(),
+ case TemplateArgument::TemplateExpansion:
+ MarkUsedTemplateParameters(SemaRef,
+ TemplateArg.getAsTemplateOrTemplatePattern(),
OnlyDeduced, Depth, Used);
break;
case TemplateArgument::Expression:
- MarkUsedTemplateParameters(SemaRef, TemplateArg.getAsExpr(), OnlyDeduced,
+ MarkUsedTemplateParameters(SemaRef, TemplateArg.getAsExpr(), OnlyDeduced,
Depth, Used);
break;
-
+
case TemplateArgument::Pack:
for (TemplateArgument::pack_iterator P = TemplateArg.pack_begin(),
PEnd = TemplateArg.pack_end();
@@ -2780,21 +3908,29 @@ void
Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced, unsigned Depth,
llvm::SmallVectorImpl<bool> &Used) {
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (OnlyDeduced &&
+ hasPackExpansionBeforeEnd(TemplateArgs.data(), TemplateArgs.size()))
+ return;
+
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
- ::MarkUsedTemplateParameters(*this, TemplateArgs[I], OnlyDeduced,
+ ::MarkUsedTemplateParameters(*this, TemplateArgs[I], OnlyDeduced,
Depth, Used);
}
/// \brief Marks all of the template parameters that will be deduced by a
/// call to the given function template.
-void
+void
Sema::MarkDeducedTemplateParameters(FunctionTemplateDecl *FunctionTemplate,
llvm::SmallVectorImpl<bool> &Deduced) {
- TemplateParameterList *TemplateParams
+ TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
Deduced.clear();
Deduced.resize(TemplateParams->size());
-
+
FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
for (unsigned I = 0, N = Function->getNumParams(); I != N; ++I)
::MarkUsedTemplateParameters(*this, Function->getParamDecl(I)->getType(),
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 4d4c181..44f5913 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -147,8 +147,10 @@ Sema::InstantiatingTemplate::
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange)
- : SemaRef(SemaRef) {
-
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
Invalid = CheckInstantiationDepth(PointOfInstantiation,
InstantiationRange);
if (!Invalid) {
@@ -159,6 +161,7 @@ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Inst.TemplateArgs = 0;
Inst.NumTemplateArgs = 0;
Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
}
}
@@ -169,8 +172,10 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
SourceRange InstantiationRange)
- : SemaRef(SemaRef) {
-
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
Invalid = CheckInstantiationDepth(PointOfInstantiation,
InstantiationRange);
if (!Invalid) {
@@ -182,6 +187,7 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
Inst.TemplateArgs = TemplateArgs;
Inst.NumTemplateArgs = NumTemplateArgs;
Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
}
}
@@ -192,9 +198,12 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
+ sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange)
-: SemaRef(SemaRef) {
-
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
Invalid = CheckInstantiationDepth(PointOfInstantiation,
InstantiationRange);
if (!Invalid) {
@@ -204,7 +213,9 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
Inst.Entity = reinterpret_cast<uintptr_t>(FunctionTemplate);
Inst.TemplateArgs = TemplateArgs;
Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.DeductionInfo = &DeductionInfo;
Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
if (!Inst.isInstantiationRecord())
@@ -217,9 +228,12 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
ClassTemplatePartialSpecializationDecl *PartialSpec,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
+ sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange)
- : SemaRef(SemaRef) {
-
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
Invalid = false;
ActiveTemplateInstantiation Inst;
@@ -228,7 +242,9 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
Inst.Entity = reinterpret_cast<uintptr_t>(PartialSpec);
Inst.TemplateArgs = TemplateArgs;
Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.DeductionInfo = &DeductionInfo;
Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
assert(!Inst.isInstantiationRecord());
@@ -241,8 +257,10 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
SourceRange InstantiationRange)
- : SemaRef(SemaRef) {
-
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
if (!Invalid) {
@@ -254,17 +272,22 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
Inst.TemplateArgs = TemplateArgs;
Inst.NumTemplateArgs = NumTemplateArgs;
Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
}
}
Sema::InstantiatingTemplate::
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
- TemplateDecl *Template,
+ NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
- SourceRange InstantiationRange) : SemaRef(SemaRef) {
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
Invalid = false;
ActiveTemplateInstantiation Inst;
@@ -275,6 +298,7 @@ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Inst.TemplateArgs = TemplateArgs;
Inst.NumTemplateArgs = NumTemplateArgs;
Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
assert(!Inst.isInstantiationRecord());
@@ -283,11 +307,15 @@ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Sema::InstantiatingTemplate::
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
- TemplateDecl *Template,
+ NamedDecl *Template,
TemplateTemplateParmDecl *Param,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
- SourceRange InstantiationRange) : SemaRef(SemaRef) {
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
Invalid = false;
ActiveTemplateInstantiation Inst;
Inst.Kind = ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution;
@@ -297,6 +325,7 @@ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Inst.TemplateArgs = TemplateArgs;
Inst.NumTemplateArgs = NumTemplateArgs;
Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
assert(!Inst.isInstantiationRecord());
@@ -309,7 +338,11 @@ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Param,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
- SourceRange InstantiationRange) : SemaRef(SemaRef) {
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
Invalid = false;
ActiveTemplateInstantiation Inst;
@@ -320,6 +353,7 @@ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Inst.TemplateArgs = TemplateArgs;
Inst.NumTemplateArgs = NumTemplateArgs;
Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
assert(!Inst.isInstantiationRecord());
@@ -332,7 +366,8 @@ void Sema::InstantiatingTemplate::Clear() {
assert(SemaRef.NonInstantiationEntries > 0);
--SemaRef.NonInstantiationEntries;
}
-
+ SemaRef.InNonInstantiationSFINAEContext
+ = SavedInNonInstantiationSFINAEContext;
SemaRef.ActiveTemplateInstantiations.pop_back();
Invalid = true;
}
@@ -379,7 +414,7 @@ void Sema::PrintInstantiationStack() {
if (InstantiationIdx >= SkipStart && InstantiationIdx < SkipEnd) {
if (InstantiationIdx == SkipStart) {
// Note that we're skipping instantiations.
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ Diags.Report(Active->PointOfInstantiation,
diag::note_instantiation_contexts_suppressed)
<< unsigned(ActiveTemplateInstantiations.size() - Limit);
}
@@ -393,8 +428,7 @@ void Sema::PrintInstantiationStack() {
unsigned DiagID = diag::note_template_member_class_here;
if (isa<ClassTemplateSpecializationDecl>(Record))
DiagID = diag::note_template_class_instantiation_here;
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
- DiagID)
+ Diags.Report(Active->PointOfInstantiation, DiagID)
<< Context.getTypeDeclType(Record)
<< Active->InstantiationRange;
} else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
@@ -403,12 +437,11 @@ void Sema::PrintInstantiationStack() {
DiagID = diag::note_function_template_spec_here;
else
DiagID = diag::note_template_member_function_here;
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
- DiagID)
+ Diags.Report(Active->PointOfInstantiation, DiagID)
<< Function
<< Active->InstantiationRange;
} else {
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ Diags.Report(Active->PointOfInstantiation,
diag::note_template_static_data_member_def_here)
<< cast<VarDecl>(D)
<< Active->InstantiationRange;
@@ -423,7 +456,7 @@ void Sema::PrintInstantiationStack() {
Active->TemplateArgs,
Active->NumTemplateArgs,
Context.PrintingPolicy);
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ Diags.Report(Active->PointOfInstantiation,
diag::note_default_arg_instantiation_here)
<< (Template->getNameAsString() + TemplateArgsStr)
<< Active->InstantiationRange;
@@ -433,7 +466,7 @@ void Sema::PrintInstantiationStack() {
case ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution: {
FunctionTemplateDecl *FnTmpl
= cast<FunctionTemplateDecl>((Decl *)Active->Entity);
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ Diags.Report(Active->PointOfInstantiation,
diag::note_explicit_template_arg_substitution_here)
<< FnTmpl
<< getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
@@ -447,7 +480,7 @@ void Sema::PrintInstantiationStack() {
if (ClassTemplatePartialSpecializationDecl *PartialSpec
= dyn_cast<ClassTemplatePartialSpecializationDecl>(
(Decl *)Active->Entity)) {
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ Diags.Report(Active->PointOfInstantiation,
diag::note_partial_spec_deduct_instantiation_here)
<< Context.getTypeDeclType(PartialSpec)
<< getTemplateArgumentBindingsText(
@@ -458,7 +491,7 @@ void Sema::PrintInstantiationStack() {
} else {
FunctionTemplateDecl *FnTmpl
= cast<FunctionTemplateDecl>((Decl *)Active->Entity);
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ Diags.Report(Active->PointOfInstantiation,
diag::note_function_template_deduction_instantiation_here)
<< FnTmpl
<< getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
@@ -477,7 +510,7 @@ void Sema::PrintInstantiationStack() {
Active->TemplateArgs,
Active->NumTemplateArgs,
Context.PrintingPolicy);
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ Diags.Report(Active->PointOfInstantiation,
diag::note_default_function_arg_instantiation_here)
<< (FD->getNameAsString() + TemplateArgsStr)
<< Active->InstantiationRange;
@@ -489,13 +522,19 @@ void Sema::PrintInstantiationStack() {
std::string Name;
if (!Parm->getName().empty())
Name = std::string(" '") + Parm->getName().str() + "'";
-
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+
+ TemplateParameterList *TemplateParams = 0;
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(Active->Template))
+ TemplateParams = Template->getTemplateParameters();
+ else
+ TemplateParams =
+ cast<ClassTemplatePartialSpecializationDecl>(Active->Template)
+ ->getTemplateParameters();
+ Diags.Report(Active->PointOfInstantiation,
diag::note_prior_template_arg_substitution)
<< isa<TemplateTemplateParmDecl>(Parm)
<< Name
- << getTemplateArgumentBindingsText(
- Active->Template->getTemplateParameters(),
+ << getTemplateArgumentBindingsText(TemplateParams,
Active->TemplateArgs,
Active->NumTemplateArgs)
<< Active->InstantiationRange;
@@ -503,10 +542,17 @@ void Sema::PrintInstantiationStack() {
}
case ActiveTemplateInstantiation::DefaultTemplateArgumentChecking: {
- Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ TemplateParameterList *TemplateParams = 0;
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(Active->Template))
+ TemplateParams = Template->getTemplateParameters();
+ else
+ TemplateParams =
+ cast<ClassTemplatePartialSpecializationDecl>(Active->Template)
+ ->getTemplateParameters();
+
+ Diags.Report(Active->PointOfInstantiation,
diag::note_template_default_arg_checking)
- << getTemplateArgumentBindingsText(
- Active->Template->getTemplateParameters(),
+ << getTemplateArgumentBindingsText(TemplateParams,
Active->TemplateArgs,
Active->NumTemplateArgs)
<< Active->InstantiationRange;
@@ -516,8 +562,11 @@ void Sema::PrintInstantiationStack() {
}
}
-bool Sema::isSFINAEContext() const {
+llvm::Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
using llvm::SmallVector;
+ if (InNonInstantiationSFINAEContext)
+ return llvm::Optional<TemplateDeductionInfo *>(0);
+
for (SmallVector<ActiveTemplateInstantiation, 16>::const_reverse_iterator
Active = ActiveTemplateInstantiations.rbegin(),
ActiveEnd = ActiveTemplateInstantiations.rend();
@@ -525,10 +574,10 @@ bool Sema::isSFINAEContext() const {
++Active)
{
switch(Active->Kind) {
- case ActiveTemplateInstantiation::TemplateInstantiation:
case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation:
+ case ActiveTemplateInstantiation::TemplateInstantiation:
// This is a template instantiation, so there is no SFINAE.
- return false;
+ return llvm::Optional<TemplateDeductionInfo *>();
case ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation:
case ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution:
@@ -542,11 +591,25 @@ bool Sema::isSFINAEContext() const {
case ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution:
// We're either substitution explicitly-specified template arguments
// or deduced template arguments, so SFINAE applies.
- return true;
+ assert(Active->DeductionInfo && "Missing deduction info pointer");
+ return Active->DeductionInfo;
}
}
- return false;
+ return llvm::Optional<TemplateDeductionInfo *>();
+}
+
+/// \brief Retrieve the depth and index of a parameter pack.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(NamedDecl *ND) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
+ return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
}
//===----------------------------------------------------------------------===/
@@ -587,7 +650,58 @@ namespace {
this->Loc = Loc;
this->Entity = Entity;
}
+
+ bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ const UnexpandedParameterPack *Unexpanded,
+ unsigned NumUnexpanded,
+ bool &ShouldExpand,
+ bool &RetainExpansion,
+ llvm::Optional<unsigned> &NumExpansions) {
+ return getSema().CheckParameterPacksForExpansion(EllipsisLoc,
+ PatternRange, Unexpanded,
+ NumUnexpanded,
+ TemplateArgs,
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions);
+ }
+
+ void ExpandingFunctionParameterPack(ParmVarDecl *Pack) {
+ SemaRef.CurrentInstantiationScope->MakeInstantiatedLocalArgPack(Pack);
+ }
+
+ TemplateArgument ForgetPartiallySubstitutedPack() {
+ TemplateArgument Result;
+ if (NamedDecl *PartialPack
+ = SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ MultiLevelTemplateArgumentList &TemplateArgs
+ = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(PartialPack);
+ if (TemplateArgs.hasTemplateArgument(Depth, Index)) {
+ Result = TemplateArgs(Depth, Index);
+ TemplateArgs.setArgument(Depth, Index, TemplateArgument());
+ }
+ }
+
+ return Result;
+ }
+
+ void RememberPartiallySubstitutedPack(TemplateArgument Arg) {
+ if (Arg.isNull())
+ return;
+ if (NamedDecl *PartialPack
+ = SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ MultiLevelTemplateArgumentList &TemplateArgs
+ = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(PartialPack);
+ TemplateArgs.setArgument(Depth, Index, Arg);
+ }
+ }
+
/// \brief Transform the given declaration by instantiating a reference to
/// this declaration.
Decl *TransformDecl(SourceLocation Loc, Decl *D);
@@ -602,10 +716,10 @@ namespace {
/// \brief Rebuild the exception declaration and register the declaration
/// as an instantiated local.
- VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl, QualType T,
+ VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *Declarator,
IdentifierInfo *Name,
- SourceLocation Loc, SourceRange TypeRange);
+ SourceLocation Loc);
/// \brief Rebuild the Objective-C exception declaration and register the
/// declaration as an instantiated local.
@@ -614,25 +728,37 @@ namespace {
/// \brief Check for tag mismatches when instantiating an
/// elaborated type.
- QualType RebuildElaboratedType(ElaboratedTypeKeyword Keyword,
+ QualType RebuildElaboratedType(SourceLocation KeywordLoc,
+ ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS, QualType T);
+ TemplateName TransformTemplateName(TemplateName Name,
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = 0);
+
ExprResult TransformPredefinedExpr(PredefinedExpr *E);
ExprResult TransformDeclRefExpr(DeclRefExpr *E);
ExprResult TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E);
ExprResult TransformTemplateParmRefExpr(DeclRefExpr *E,
- NonTypeTemplateParmDecl *D);
-
+ NonTypeTemplateParmDecl *D);
+ ExprResult TransformSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E);
+
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
- FunctionProtoTypeLoc TL,
- QualType ObjectType);
- ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm);
+ FunctionProtoTypeLoc TL);
+ ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ llvm::Optional<unsigned> NumExpansions);
/// \brief Transforms a template type parameter type by performing
/// substitution of the corresponding template type argument.
QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
- TemplateTypeParmTypeLoc TL,
- QualType ObjectType);
+ TemplateTypeParmTypeLoc TL);
+
+ /// \brief Transforms an already-substituted template type parameter pack
+ /// into either itself (if we aren't substituting into its pack expansion)
+ /// or the appropriate substituted argument.
+ QualType TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL);
ExprResult TransformCallExpr(CallExpr *CE) {
getSema().CallsUndergoingInstantiation.push_back(CE);
@@ -669,8 +795,18 @@ Decl *TemplateInstantiator::TransformDecl(SourceLocation Loc, Decl *D) {
TTP->getPosition()))
return D;
- TemplateName Template
- = TemplateArgs(TTP->getDepth(), TTP->getPosition()).getAsTemplate();
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
+
+ if (TTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ assert(getSema().ArgumentPackSubstitutionIndex >= 0);
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
+
+ TemplateName Template = Arg.getAsTemplate();
assert(!Template.isNull() && Template.getAsTemplateDecl() &&
"Wrong kind of template template argument");
return Template.getAsTemplateDecl();
@@ -700,8 +836,23 @@ TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D,
if (TemplateTypeParmDecl *TTPD = dyn_cast_or_null<TemplateTypeParmDecl>(D)) {
const TemplateTypeParmType *TTP
= cast<TemplateTypeParmType>(getSema().Context.getTypeDeclType(TTPD));
+
if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
- QualType T = TemplateArgs(TTP->getDepth(), TTP->getIndex()).getAsType();
+ // FIXME: This needs testing w/ member access expressions.
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getIndex());
+
+ if (TTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1)
+ return 0;
+
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
+
+ QualType T = Arg.getAsType();
if (T.isNull())
return cast_or_null<NamedDecl>(TransformDecl(Loc, D));
@@ -719,13 +870,11 @@ TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D,
VarDecl *
TemplateInstantiator::RebuildExceptionDecl(VarDecl *ExceptionDecl,
- QualType T,
TypeSourceInfo *Declarator,
IdentifierInfo *Name,
- SourceLocation Loc,
- SourceRange TypeRange) {
- VarDecl *Var = inherited::RebuildExceptionDecl(ExceptionDecl, T, Declarator,
- Name, Loc, TypeRange);
+ SourceLocation Loc) {
+ VarDecl *Var = inherited::RebuildExceptionDecl(ExceptionDecl, Declarator,
+ Name, Loc);
if (Var)
getSema().CurrentInstantiationScope->InstantiatedLocal(ExceptionDecl, Var);
return Var;
@@ -741,14 +890,14 @@ VarDecl *TemplateInstantiator::RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
}
QualType
-TemplateInstantiator::RebuildElaboratedType(ElaboratedTypeKeyword Keyword,
+TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
+ ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
QualType T) {
if (const TagType *TT = T->getAs<TagType>()) {
TagDecl* TD = TT->getDecl();
- // FIXME: this location is very wrong; we really need typelocs.
- SourceLocation TagLocation = TD->getTagKeywordLoc();
+ SourceLocation TagLocation = KeywordLoc;
// FIXME: type might be anonymous.
IdentifierInfo *Id = TD->getIdentifier();
@@ -767,14 +916,69 @@ TemplateInstantiator::RebuildElaboratedType(ElaboratedTypeKeyword Keyword,
}
}
- return TreeTransform<TemplateInstantiator>::RebuildElaboratedType(Keyword,
+ return TreeTransform<TemplateInstantiator>::RebuildElaboratedType(KeywordLoc,
+ Keyword,
NNS, T);
}
+TemplateName TemplateInstantiator::TransformTemplateName(TemplateName Name,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(Name.getAsTemplateDecl())) {
+ if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
+ // If the corresponding template argument is NULL or non-existent, it's
+ // because we are performing instantiation from explicitly-specified
+ // template arguments in a function template, but there were some
+ // arguments left unspecified.
+ if (!TemplateArgs.hasTemplateArgument(TTP->getDepth(),
+ TTP->getPosition()))
+ return Name;
+
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
+
+ if (TTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We have the template argument pack to substitute, but we're not
+ // actually expanding the enclosing pack expansion yet. So, just
+ // keep the entire argument pack.
+ return getSema().Context.getSubstTemplateTemplateParmPack(TTP, Arg);
+ }
+
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
+
+ TemplateName Template = Arg.getAsTemplate();
+ assert(!Template.isNull() && Template.getAsTemplateDecl() &&
+ "Wrong kind of template template argument");
+ return Template;
+ }
+ }
+
+ if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack()) {
+ if (getSema().ArgumentPackSubstitutionIndex == -1)
+ return Name;
+
+ const TemplateArgument &ArgPack = SubstPack->getArgumentPack();
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)ArgPack.pack_size() &&
+ "Pack substitution index out-of-range");
+ return ArgPack.pack_begin()[getSema().ArgumentPackSubstitutionIndex]
+ .getAsTemplate();
+ }
+
+ return inherited::TransformTemplateName(Name, ObjectType,
+ FirstQualifierInScope);
+}
+
ExprResult
TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
if (!E->isTypeDependent())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
FunctionDecl *currentDecl = getSema().getCurFunctionDecl();
assert(currentDecl && "Must have current function declaration when "
@@ -802,15 +1006,37 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
// arguments left unspecified.
if (!TemplateArgs.hasTemplateArgument(NTTP->getDepth(),
NTTP->getPosition()))
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
- const TemplateArgument &Arg = TemplateArgs(NTTP->getDepth(),
- NTTP->getPosition());
+ TemplateArgument Arg = TemplateArgs(NTTP->getDepth(), NTTP->getPosition());
+ if (NTTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We have an argument pack, but we can't select a particular argument
+ // out of it yet. Therefore, we'll build an expression to hold on to that
+ // argument pack.
+ QualType TargetType = SemaRef.SubstType(NTTP->getType(), TemplateArgs,
+ E->getLocation(),
+ NTTP->getDeclName());
+ if (TargetType.isNull())
+ return ExprError();
+
+ return new (SemaRef.Context) SubstNonTypeTemplateParmPackExpr(TargetType,
+ NTTP,
+ E->getLocation(),
+ Arg);
+ }
+
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
// The template argument itself might be an expression, in which
// case we just return that expression.
if (Arg.getKind() == TemplateArgument::Expression)
- return SemaRef.Owned(Arg.getAsExpr()->Retain());
+ return SemaRef.Owned(Arg.getAsExpr());
if (Arg.getKind() == TemplateArgument::Declaration) {
ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
@@ -825,9 +1051,19 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
// Derive the type we want the substituted decl to have. This had
// better be non-dependent, or these checks will have serious problems.
- QualType TargetType = SemaRef.SubstType(NTTP->getType(), TemplateArgs,
- E->getLocation(),
- DeclarationName());
+ QualType TargetType;
+ if (NTTP->isExpandedParameterPack())
+ TargetType = NTTP->getExpansionType(
+ getSema().ArgumentPackSubstitutionIndex);
+ else if (NTTP->isParameterPack() &&
+ isa<PackExpansionType>(NTTP->getType())) {
+ TargetType = SemaRef.SubstType(
+ cast<PackExpansionType>(NTTP->getType())->getPattern(),
+ TemplateArgs, E->getLocation(),
+ NTTP->getDeclName());
+ } else
+ TargetType = SemaRef.SubstType(NTTP->getType(), TemplateArgs,
+ E->getLocation(), NTTP->getDeclName());
assert(!TargetType.isNull() && "type substitution failed for param type");
assert(!TargetType->isDependentType() && "param type still dependent");
return SemaRef.BuildExpressionFromDeclTemplateArgument(Arg,
@@ -839,6 +1075,50 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
E->getSourceRange().getBegin());
}
+ExprResult
+TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We aren't expanding the parameter pack, so just return ourselves.
+ return getSema().Owned(E);
+ }
+
+ const TemplateArgument &ArgPack = E->getArgumentPack();
+ unsigned Index = (unsigned)getSema().ArgumentPackSubstitutionIndex;
+ assert(Index < ArgPack.pack_size() && "Substitution index out-of-range");
+
+ const TemplateArgument &Arg = ArgPack.pack_begin()[Index];
+ if (Arg.getKind() == TemplateArgument::Expression)
+ return SemaRef.Owned(Arg.getAsExpr());
+
+ if (Arg.getKind() == TemplateArgument::Declaration) {
+ ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
+
+ // Find the instantiation of the template argument. This is
+ // required for nested templates.
+ VD = cast_or_null<ValueDecl>(
+ getSema().FindInstantiatedDecl(E->getParameterPackLocation(),
+ VD, TemplateArgs));
+ if (!VD)
+ return ExprError();
+
+ QualType T;
+ NonTypeTemplateParmDecl *NTTP = E->getParameterPack();
+ if (NTTP->isExpandedParameterPack())
+ T = NTTP->getExpansionType(getSema().ArgumentPackSubstitutionIndex);
+ else if (const PackExpansionType *Expansion
+ = dyn_cast<PackExpansionType>(NTTP->getType()))
+ T = SemaRef.SubstType(Expansion->getPattern(), TemplateArgs,
+ E->getParameterPackLocation(), NTTP->getDeclName());
+ else
+ T = E->getType();
+ return SemaRef.BuildExpressionFromDeclTemplateArgument(Arg, T,
+ E->getParameterPackLocation());
+ }
+
+ return SemaRef.BuildExpressionFromIntegralTemplateArgument(Arg,
+ E->getParameterPackLocation());
+}
ExprResult
TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) {
@@ -865,23 +1145,23 @@ ExprResult TemplateInstantiator::TransformCXXDefaultArgExpr(
}
QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
- FunctionProtoTypeLoc TL,
- QualType ObjectType) {
+ FunctionProtoTypeLoc TL) {
// We need a local instantiation scope for this function prototype.
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
- return inherited::TransformFunctionProtoType(TLB, TL, ObjectType);
+ return inherited::TransformFunctionProtoType(TLB, TL);
}
ParmVarDecl *
-TemplateInstantiator::TransformFunctionTypeParam(ParmVarDecl *OldParm) {
- return SemaRef.SubstParmVarDecl(OldParm, TemplateArgs);
+TemplateInstantiator::TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ llvm::Optional<unsigned> NumExpansions) {
+ return SemaRef.SubstParmVarDecl(OldParm, TemplateArgs,
+ NumExpansions);
}
QualType
TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
- TemplateTypeParmTypeLoc TL,
- QualType ObjectType) {
- TemplateTypeParmType *T = TL.getTypePtr();
+ TemplateTypeParmTypeLoc TL) {
+ const TemplateTypeParmType *T = TL.getTypePtr();
if (T->getDepth() < TemplateArgs.getNumLevels()) {
// Replace the template type parameter with its corresponding
// template argument.
@@ -897,12 +1177,32 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
return TL.getType();
}
- assert(TemplateArgs(T->getDepth(), T->getIndex()).getKind()
- == TemplateArgument::Type &&
+ TemplateArgument Arg = TemplateArgs(T->getDepth(), T->getIndex());
+
+ if (T->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We have the template argument pack, but we're not expanding the
+ // enclosing pack expansion yet. Just save the template argument
+ // pack for later substitution.
+ QualType Result
+ = getSema().Context.getSubstTemplateTypeParmPackType(T, Arg);
+ SubstTemplateTypeParmPackTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmPackTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
+
+ assert(Arg.getKind() == TemplateArgument::Type &&
"Template argument kind mismatch");
- QualType Replacement
- = TemplateArgs(T->getDepth(), T->getIndex()).getAsType();
+ QualType Replacement = Arg.getAsType();
// TODO: only do this uniquing once, at the start of instantiation.
QualType Result
@@ -928,6 +1228,32 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
return Result;
}
+QualType
+TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
+ TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We aren't expanding the parameter pack, so just return ourselves.
+ SubstTemplateTypeParmPackTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmPackTypeLoc>(TL.getType());
+ NewTL.setNameLoc(TL.getNameLoc());
+ return TL.getType();
+ }
+
+ const TemplateArgument &ArgPack = TL.getTypePtr()->getArgumentPack();
+ unsigned Index = (unsigned)getSema().ArgumentPackSubstitutionIndex;
+ assert(Index < ArgPack.pack_size() && "Substitution index out-of-range");
+
+ QualType Result = ArgPack.pack_begin()[Index].getAsType();
+ Result = getSema().Context.getSubstTemplateTypeParmType(
+ TL.getTypePtr()->getReplacedParameter(),
+ Result);
+ SubstTemplateTypeParmTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+}
+
/// \brief Perform substitution on the type T with a given set of template
/// arguments.
///
@@ -971,6 +1297,36 @@ TypeSourceInfo *Sema::SubstType(TypeSourceInfo *T,
return Instantiator.TransformType(T);
}
+TypeSourceInfo *Sema::SubstType(TypeLoc TL,
+ const MultiLevelTemplateArgumentList &Args,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ if (TL.getType().isNull())
+ return 0;
+
+ if (!TL.getType()->isDependentType() &&
+ !TL.getType()->isVariablyModifiedType()) {
+ // FIXME: Make a copy of the TypeLoc data here, so that we can
+ // return a new TypeSourceInfo. Inefficient!
+ TypeLocBuilder TLB;
+ TLB.pushFullCopy(TL);
+ return TLB.getTypeSourceInfo(Context, TL.getType());
+ }
+
+ TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
+ TypeLocBuilder TLB;
+ TLB.reserve(TL.getFullDataSize());
+ QualType Result = Instantiator.TransformType(TLB, TL);
+ if (Result.isNull())
+ return 0;
+
+ return TLB.getTypeSourceInfo(Context, Result);
+}
+
/// Deprecated form of the above.
QualType Sema::SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
@@ -992,7 +1348,7 @@ static bool NeedsInstantiationAsFunctionType(TypeSourceInfo *T) {
if (T->getType()->isDependentType() || T->getType()->isVariablyModifiedType())
return true;
- TypeLoc TL = T->getTypeLoc();
+ TypeLoc TL = T->getTypeLoc().IgnoreParens();
if (!isa<FunctionProtoTypeLoc>(TL))
return false;
@@ -1003,7 +1359,7 @@ static bool NeedsInstantiationAsFunctionType(TypeSourceInfo *T) {
// TODO: currently we always rebuild expressions. When we
// properly get lazier about this, we should use the same
// logic to avoid rebuilding prototypes here.
- if (P->hasInit())
+ if (P->hasDefaultArg())
return true;
}
@@ -1031,7 +1387,7 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
TypeLoc TL = T->getTypeLoc();
TLB.reserve(TL.getFullDataSize());
- QualType Result = Instantiator.TransformType(TLB, TL, QualType());
+ QualType Result = Instantiator.TransformType(TLB, TL);
if (Result.isNull())
return 0;
@@ -1039,10 +1395,34 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
}
ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
- const MultiLevelTemplateArgumentList &TemplateArgs) {
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ llvm::Optional<unsigned> NumExpansions) {
TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
- TypeSourceInfo *NewDI = SubstType(OldDI, TemplateArgs, OldParm->getLocation(),
- OldParm->getDeclName());
+ TypeSourceInfo *NewDI = 0;
+
+ TypeLoc OldTL = OldDI->getTypeLoc();
+ if (isa<PackExpansionTypeLoc>(OldTL)) {
+ PackExpansionTypeLoc ExpansionTL = cast<PackExpansionTypeLoc>(OldTL);
+
+ // We have a function parameter pack. Substitute into the pattern of the
+ // expansion.
+ NewDI = SubstType(ExpansionTL.getPatternLoc(), TemplateArgs,
+ OldParm->getLocation(), OldParm->getDeclName());
+ if (!NewDI)
+ return 0;
+
+ if (NewDI->getType()->containsUnexpandedParameterPack()) {
+ // We still have unexpanded parameter packs, which means that
+ // our function parameter is still a function parameter pack.
+ // Therefore, make its type a pack expansion type.
+ NewDI = CheckPackExpansion(NewDI, ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ }
+ } else {
+ NewDI = SubstType(OldDI, TemplateArgs, OldParm->getLocation(),
+ OldParm->getDeclName());
+ }
+
if (!NewDI)
return 0;
@@ -1064,12 +1444,24 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
if (OldParm->hasUninstantiatedDefaultArg()) {
Expr *Arg = OldParm->getUninstantiatedDefaultArg();
NewParm->setUninstantiatedDefaultArg(Arg);
+ } else if (OldParm->hasUnparsedDefaultArg()) {
+ NewParm->setUnparsedDefaultArg();
+ UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
} else if (Expr *Arg = OldParm->getDefaultArg())
NewParm->setUninstantiatedDefaultArg(Arg);
NewParm->setHasInheritedDefaultArg(OldParm->hasInheritedDefaultArg());
- CurrentInstantiationScope->InstantiatedLocal(OldParm, NewParm);
+ // FIXME: When OldParm is a parameter pack and NewParm is not a parameter
+ // pack, we actually have a set of instantiated locations. Maintain this set!
+ if (OldParm->isParameterPack() && !NewParm->isParameterPack()) {
+ // Add the new parameter to
+ CurrentInstantiationScope->InstantiatedLocalPackArg(OldParm, NewParm);
+ } else {
+ // Introduce an Old -> New mapping
+ CurrentInstantiationScope->InstantiatedLocal(OldParm, NewParm);
+ }
+
// FIXME: OldParm may come from a FunctionProtoType, in which case CurContext
// can be anything, is this right ?
NewParm->setDeclContext(CurContext);
@@ -1077,6 +1469,24 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
return NewParm;
}
+/// \brief Substitute the given template arguments into the given set of
+/// parameters, producing the set of parameter types that would be generated
+/// from such a substitution.
+bool Sema::SubstParmTypes(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ llvm::SmallVectorImpl<QualType> &ParamTypes,
+ llvm::SmallVectorImpl<ParmVarDecl *> *OutParams) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc,
+ DeclarationName());
+ return Instantiator.TransformFunctionTypeParams(Loc, Params, NumParams, 0,
+ ParamTypes, OutParams);
+}
+
/// \brief Perform substitution on the base class specifiers of the
/// given class template specialization.
///
@@ -1093,17 +1503,64 @@ Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
Base = Pattern->bases_begin(), BaseEnd = Pattern->bases_end();
Base != BaseEnd; ++Base) {
if (!Base->getType()->isDependentType()) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
-
- // Make sure to set the attributes from the base.
- SetClassDeclAttributesFromBase(Instantiation, BaseDecl,
- Base->isVirtual());
-
InstantiatedBases.push_back(new (Context) CXXBaseSpecifier(*Base));
continue;
}
+ SourceLocation EllipsisLoc;
+ if (Base->isPackExpansion()) {
+ // This is a pack expansion. See whether we should expand it now, or
+ // wait until later.
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(Base->getTypeSourceInfo()->getTypeLoc(),
+ Unexpanded);
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (CheckParameterPacksForExpansion(Base->getEllipsisLoc(),
+ Base->getSourceRange(),
+ Unexpanded.data(), Unexpanded.size(),
+ TemplateArgs, ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ Invalid = true;
+ continue;
+ }
+
+ // If we should expand this pack expansion now, do so.
+ if (ShouldExpand) {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I);
+
+ TypeSourceInfo *BaseTypeLoc = SubstType(Base->getTypeSourceInfo(),
+ TemplateArgs,
+ Base->getSourceRange().getBegin(),
+ DeclarationName());
+ if (!BaseTypeLoc) {
+ Invalid = true;
+ continue;
+ }
+
+ if (CXXBaseSpecifier *InstantiatedBase
+ = CheckBaseSpecifier(Instantiation,
+ Base->getSourceRange(),
+ Base->isVirtual(),
+ Base->getAccessSpecifierAsWritten(),
+ BaseTypeLoc,
+ SourceLocation()))
+ InstantiatedBases.push_back(InstantiatedBase);
+ else
+ Invalid = true;
+ }
+
+ continue;
+ }
+
+ // The resulting base specifier will (still) be a pack expansion.
+ EllipsisLoc = Base->getEllipsisLoc();
+ }
+
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
TypeSourceInfo *BaseTypeLoc = SubstType(Base->getTypeSourceInfo(),
TemplateArgs,
Base->getSourceRange().getBegin(),
@@ -1118,7 +1575,8 @@ Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
Base->getSourceRange(),
Base->isVirtual(),
Base->getAccessSpecifierAsWritten(),
- BaseTypeLoc))
+ BaseTypeLoc,
+ EllipsisLoc))
InstantiatedBases.push_back(InstantiatedBase);
else
Invalid = true;
@@ -1221,11 +1679,29 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
if (SubstBaseSpecifiers(Instantiation, Pattern, TemplateArgs))
Invalid = true;
+ TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
llvm::SmallVector<Decl*, 4> Fields;
for (RecordDecl::decl_iterator Member = Pattern->decls_begin(),
MemberEnd = Pattern->decls_end();
Member != MemberEnd; ++Member) {
- Decl *NewMember = SubstDecl(*Member, Instantiation, TemplateArgs);
+ // Don't instantiate members not belonging in this semantic context.
+ // e.g. for:
+ // @code
+ // template <int i> class A {
+ // class B *g;
+ // };
+ // @endcode
+ // 'class B' has the template as lexical context but semantically it is
+ // introduced in namespace scope.
+ if ((*Member)->getDeclContext() != Pattern)
+ continue;
+
+ if ((*Member)->isInvalidDecl()) {
+ Invalid = true;
+ continue;
+ }
+
+ Decl *NewMember = Instantiator.Visit(*Member);
if (NewMember) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(NewMember))
Fields.push_back(Field);
@@ -1245,7 +1721,22 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
CheckCompletedCXXClass(Instantiation);
if (Instantiation->isInvalidDecl())
Invalid = true;
-
+ else {
+ // Instantiate any out-of-line class template partial
+ // specializations now.
+ for (TemplateDeclInstantiator::delayed_partial_spec_iterator
+ P = Instantiator.delayed_partial_spec_begin(),
+ PEnd = Instantiator.delayed_partial_spec_end();
+ P != PEnd; ++P) {
+ if (!Instantiator.InstantiateClassTemplatePartialSpecialization(
+ P->first,
+ P->second)) {
+ Invalid = true;
+ break;
+ }
+ }
+ }
+
// Exit the scope of this instantiation.
SavedContext.pop();
@@ -1261,6 +1752,15 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
return Invalid;
}
+namespace {
+ /// \brief A partial specialization whose template arguments have matched
+ /// a given template-id.
+ struct PartialSpecMatchResult {
+ ClassTemplatePartialSpecializationDecl *Partial;
+ TemplateArgumentList *Args;
+ };
+}
+
bool
Sema::InstantiateClassTemplateSpecialization(
SourceLocation PointOfInstantiation,
@@ -1310,8 +1810,7 @@ Sema::InstantiateClassTemplateSpecialization(
// matching the template arguments of the class template
// specialization with the template argument lists of the partial
// specializations.
- typedef std::pair<ClassTemplatePartialSpecializationDecl *,
- TemplateArgumentList *> MatchResult;
+ typedef PartialSpecMatchResult MatchResult;
llvm::SmallVector<MatchResult, 4> Matched;
llvm::SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
Template->getPartialSpecializations(PartialSpecs);
@@ -1326,10 +1825,17 @@ Sema::InstantiateClassTemplateSpecialization(
// diagnostics, later.
(void)Result;
} else {
- Matched.push_back(std::make_pair(Partial, Info.take()));
+ Matched.push_back(PartialSpecMatchResult());
+ Matched.back().Partial = Partial;
+ Matched.back().Args = Info.take();
}
}
+ // If we're dealing with a member template where the template parameters
+ // have been instantiated, this provides the original template parameters
+ // from which the member template's parameters were instantiated.
+ llvm::SmallVector<const NamedDecl *, 4> InstantiatedTemplateParameters;
+
if (Matched.size() >= 1) {
llvm::SmallVector<MatchResult, 4>::iterator Best = Matched.begin();
if (Matched.size() == 1) {
@@ -1347,9 +1853,9 @@ Sema::InstantiateClassTemplateSpecialization(
for (llvm::SmallVector<MatchResult, 4>::iterator P = Best + 1,
PEnd = Matched.end();
P != PEnd; ++P) {
- if (getMoreSpecializedPartialSpecialization(P->first, Best->first,
+ if (getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
PointOfInstantiation)
- == P->first)
+ == P->Partial)
Best = P;
}
@@ -1360,9 +1866,9 @@ Sema::InstantiateClassTemplateSpecialization(
PEnd = Matched.end();
P != PEnd; ++P) {
if (P != Best &&
- getMoreSpecializedPartialSpecialization(P->first, Best->first,
+ getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
PointOfInstantiation)
- != Best->first) {
+ != Best->Partial) {
Ambiguous = true;
break;
}
@@ -1378,16 +1884,17 @@ Sema::InstantiateClassTemplateSpecialization(
for (llvm::SmallVector<MatchResult, 4>::iterator P = Matched.begin(),
PEnd = Matched.end();
P != PEnd; ++P)
- Diag(P->first->getLocation(), diag::note_partial_spec_match)
- << getTemplateArgumentBindingsText(P->first->getTemplateParameters(),
- *P->second);
+ Diag(P->Partial->getLocation(), diag::note_partial_spec_match)
+ << getTemplateArgumentBindingsText(
+ P->Partial->getTemplateParameters(),
+ *P->Args);
return true;
}
}
// Instantiate using the best class template partial specialization.
- ClassTemplatePartialSpecializationDecl *OrigPartialSpec = Best->first;
+ ClassTemplatePartialSpecializationDecl *OrigPartialSpec = Best->Partial;
while (OrigPartialSpec->getInstantiatedFromMember()) {
// If we've found an explicit specialization of this class template,
// stop here and use that as the pattern.
@@ -1398,7 +1905,7 @@ Sema::InstantiateClassTemplateSpecialization(
}
Pattern = OrigPartialSpec;
- ClassTemplateSpec->setInstantiationOf(Best->first, Best->second);
+ ClassTemplateSpec->setInstantiationOf(Best->Partial, Best->Args);
} else {
// -- If no matches are found, the instantiation is generated
// from the primary template.
@@ -1449,7 +1956,7 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
Function,
MSInfo->getTemplateSpecializationKind(),
- MSInfo->getPointOfInstantiation(),
+ MSInfo->getPointOfInstantiation(),
SuppressNew) ||
SuppressNew)
continue;
@@ -1485,7 +1992,7 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
Var,
MSInfo->getTemplateSpecializationKind(),
- MSInfo->getPointOfInstantiation(),
+ MSInfo->getPointOfInstantiation(),
SuppressNew) ||
SuppressNew)
continue;
@@ -1520,11 +2027,11 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
if (MSInfo->getTemplateSpecializationKind()
== TSK_ExplicitSpecialization)
continue;
-
+
if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
Record,
MSInfo->getTemplateSpecializationKind(),
- MSInfo->getPointOfInstantiation(),
+ MSInfo->getPointOfInstantiation(),
SuppressNew) ||
SuppressNew)
continue;
@@ -1551,6 +2058,13 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
InstantiateClass(PointOfInstantiation, Record, Pattern,
TemplateArgs,
TSK);
+ } else {
+ if (TSK == TSK_ExplicitInstantiationDefinition &&
+ Record->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDeclaration) {
+ Record->setTemplateSpecializationKind(TSK);
+ MarkVTableUsed(PointOfInstantiation, Record, true);
+ }
}
Pattern = cast_or_null<CXXRecordDecl>(Record->getDefinition());
@@ -1604,6 +2118,18 @@ Sema::SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs) {
return Instantiator.TransformExpr(E);
}
+bool Sema::SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ llvm::SmallVectorImpl<Expr *> &Outputs) {
+ if (NumExprs == 0)
+ return false;
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs,
+ SourceLocation(),
+ DeclarationName());
+ return Instantiator.TransformExprs(Exprs, NumExprs, IsCall, Outputs);
+}
+
/// \brief Do template substitution on a nested-name-specifier.
NestedNameSpecifier *
Sema::SubstNestedNameSpecifier(NestedNameSpecifier *NNS,
@@ -1631,35 +2157,107 @@ Sema::SubstTemplateName(TemplateName Name, SourceLocation Loc,
return Instantiator.TransformTemplateName(Name);
}
-bool Sema::Subst(const TemplateArgumentLoc &Input, TemplateArgumentLoc &Output,
+bool Sema::Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
+ TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs) {
TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
DeclarationName());
-
- return Instantiator.TransformTemplateArgument(Input, Output);
+
+ return Instantiator.TransformTemplateArguments(Args, NumArgs, Result);
}
-Decl *LocalInstantiationScope::getInstantiationOf(const Decl *D) {
- for (LocalInstantiationScope *Current = this; Current;
+llvm::PointerUnion<Decl *, LocalInstantiationScope::DeclArgumentPack *> *
+LocalInstantiationScope::findInstantiationOf(const Decl *D) {
+ for (LocalInstantiationScope *Current = this; Current;
Current = Current->Outer) {
+
// Check if we found something within this scope.
- llvm::DenseMap<const Decl *, Decl *>::iterator Found
- = Current->LocalDecls.find(D);
- if (Found != Current->LocalDecls.end())
- return Found->second;
-
+ const Decl *CheckD = D;
+ do {
+ LocalDeclsMap::iterator Found = Current->LocalDecls.find(CheckD);
+ if (Found != Current->LocalDecls.end())
+ return &Found->second;
+
+ // If this is a tag declaration, it's possible that we need to look for
+ // a previous declaration.
+ if (const TagDecl *Tag = dyn_cast<TagDecl>(CheckD))
+ CheckD = Tag->getPreviousDeclaration();
+ else
+ CheckD = 0;
+ } while (CheckD);
+
// If we aren't combined with our outer scope, we're done.
if (!Current->CombineWithOuterScope)
break;
}
-
- assert(D->isInvalidDecl() &&
- "declaration was not instantiated in this scope!");
+
+ // If we didn't find the decl, then we either have a sema bug, or we have a
+ // forward reference to a label declaration. Return null to indicate that
+ // we have an uninstantiated label.
+ assert(isa<LabelDecl>(D) && "declaration not instantiated in this scope");
return 0;
}
void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
- Decl *&Stored = LocalDecls[D];
- assert((!Stored || Stored == Inst)&& "Already instantiated this local");
- Stored = Inst;
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
+ if (Stored.isNull())
+ Stored = Inst;
+ else if (Stored.is<Decl *>()) {
+ assert(Stored.get<Decl *>() == Inst && "Already instantiated this local");
+ Stored = Inst;
+ } else
+ LocalDecls[D].get<DeclArgumentPack *>()->push_back(Inst);
+}
+
+void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
+ Decl *Inst) {
+ DeclArgumentPack *Pack = LocalDecls[D].get<DeclArgumentPack *>();
+ Pack->push_back(Inst);
+}
+
+void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
+ assert(Stored.isNull() && "Already instantiated this local");
+ DeclArgumentPack *Pack = new DeclArgumentPack;
+ Stored = Pack;
+ ArgumentPacks.push_back(Pack);
+}
+
+void LocalInstantiationScope::SetPartiallySubstitutedPack(NamedDecl *Pack,
+ const TemplateArgument *ExplicitArgs,
+ unsigned NumExplicitArgs) {
+ assert((!PartiallySubstitutedPack || PartiallySubstitutedPack == Pack) &&
+ "Already have a partially-substituted pack");
+ assert((!PartiallySubstitutedPack
+ || NumArgsInPartiallySubstitutedPack == NumExplicitArgs) &&
+ "Wrong number of arguments in partially-substituted pack");
+ PartiallySubstitutedPack = Pack;
+ ArgsInPartiallySubstitutedPack = ExplicitArgs;
+ NumArgsInPartiallySubstitutedPack = NumExplicitArgs;
+}
+
+NamedDecl *LocalInstantiationScope::getPartiallySubstitutedPack(
+ const TemplateArgument **ExplicitArgs,
+ unsigned *NumExplicitArgs) const {
+ if (ExplicitArgs)
+ *ExplicitArgs = 0;
+ if (NumExplicitArgs)
+ *NumExplicitArgs = 0;
+
+ for (const LocalInstantiationScope *Current = this; Current;
+ Current = Current->Outer) {
+ if (Current->PartiallySubstitutedPack) {
+ if (ExplicitArgs)
+ *ExplicitArgs = Current->ArgsInPartiallySubstitutedPack;
+ if (NumExplicitArgs)
+ *NumExplicitArgs = Current->NumArgsInPartiallySubstitutedPack;
+
+ return Current->PartiallySubstitutedPack;
+ }
+
+ if (!Current->CombineWithOuterScope)
+ break;
+ }
+
+ return 0;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 1c7869f..c0150c0 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -25,85 +25,6 @@
using namespace clang;
-namespace {
- class TemplateDeclInstantiator
- : public DeclVisitor<TemplateDeclInstantiator, Decl *> {
- Sema &SemaRef;
- DeclContext *Owner;
- const MultiLevelTemplateArgumentList &TemplateArgs;
-
- public:
- TemplateDeclInstantiator(Sema &SemaRef, DeclContext *Owner,
- const MultiLevelTemplateArgumentList &TemplateArgs)
- : SemaRef(SemaRef), Owner(Owner), TemplateArgs(TemplateArgs) { }
-
- // FIXME: Once we get closer to completion, replace these manually-written
- // declarations with automatically-generated ones from
- // clang/AST/DeclNodes.inc.
- Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
- Decl *VisitNamespaceDecl(NamespaceDecl *D);
- Decl *VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
- Decl *VisitTypedefDecl(TypedefDecl *D);
- Decl *VisitVarDecl(VarDecl *D);
- Decl *VisitAccessSpecDecl(AccessSpecDecl *D);
- Decl *VisitFieldDecl(FieldDecl *D);
- Decl *VisitStaticAssertDecl(StaticAssertDecl *D);
- Decl *VisitEnumDecl(EnumDecl *D);
- Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
- Decl *VisitFriendDecl(FriendDecl *D);
- Decl *VisitFunctionDecl(FunctionDecl *D,
- TemplateParameterList *TemplateParams = 0);
- Decl *VisitCXXRecordDecl(CXXRecordDecl *D);
- Decl *VisitCXXMethodDecl(CXXMethodDecl *D,
- TemplateParameterList *TemplateParams = 0);
- Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
- Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
- Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
- ParmVarDecl *VisitParmVarDecl(ParmVarDecl *D);
- Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
- Decl *VisitClassTemplatePartialSpecializationDecl(
- ClassTemplatePartialSpecializationDecl *D);
- Decl *VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
- Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
- Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
- Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
- Decl *VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
- Decl *VisitUsingDecl(UsingDecl *D);
- Decl *VisitUsingShadowDecl(UsingShadowDecl *D);
- Decl *VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
- Decl *VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
-
- // Base case. FIXME: Remove once we can instantiate everything.
- Decl *VisitDecl(Decl *D) {
- unsigned DiagID = SemaRef.getDiagnostics().getCustomDiagID(
- Diagnostic::Error,
- "cannot instantiate %0 yet");
- SemaRef.Diag(D->getLocation(), DiagID)
- << D->getDeclKindName();
-
- return 0;
- }
-
- // Helper functions for instantiating methods.
- TypeSourceInfo *SubstFunctionType(FunctionDecl *D,
- llvm::SmallVectorImpl<ParmVarDecl *> &Params);
- bool InitFunctionInstantiation(FunctionDecl *New, FunctionDecl *Tmpl);
- bool InitMethodInstantiation(CXXMethodDecl *New, CXXMethodDecl *Tmpl);
-
- TemplateParameterList *
- SubstTemplateParams(TemplateParameterList *List);
-
- bool SubstQualifier(const DeclaratorDecl *OldDecl,
- DeclaratorDecl *NewDecl);
- bool SubstQualifier(const TagDecl *OldDecl,
- TagDecl *NewDecl);
-
- bool InstantiateClassTemplatePartialSpecialization(
- ClassTemplateDecl *ClassTemplate,
- ClassTemplatePartialSpecializationDecl *PartialSpec);
- };
-}
-
bool TemplateDeclInstantiator::SubstQualifier(const DeclaratorDecl *OldDecl,
DeclaratorDecl *NewDecl) {
NestedNameSpecifier *OldQual = OldDecl->getQualifier();
@@ -151,15 +72,15 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
if (Aligned->isAlignmentExpr()) {
ExprResult Result = SubstExpr(Aligned->getAlignmentExpr(),
- TemplateArgs);
+ TemplateArgs);
if (!Result.isInvalid())
AddAlignedAttr(Aligned->getLocation(), New, Result.takeAs<Expr>());
}
else {
TypeSourceInfo *Result = SubstType(Aligned->getAlignmentType(),
- TemplateArgs,
- Aligned->getLocation(),
- DeclarationName());
+ TemplateArgs,
+ Aligned->getLocation(),
+ DeclarationName());
if (Result)
AddAlignedAttr(Aligned->getLocation(), New, Result);
}
@@ -180,6 +101,14 @@ TemplateDeclInstantiator::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
}
Decl *
+TemplateDeclInstantiator::VisitLabelDecl(LabelDecl *D) {
+ LabelDecl *Inst = LabelDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getIdentifier());
+ Owner->addDecl(Inst);
+ return Inst;
+}
+
+Decl *
TemplateDeclInstantiator::VisitNamespaceDecl(NamespaceDecl *D) {
assert(false && "Namespaces cannot be instantiated");
return D;
@@ -222,13 +151,15 @@ Decl *TemplateDeclInstantiator::VisitTypedefDecl(TypedefDecl *D) {
if (Invalid)
Typedef->setInvalidDecl();
- if (const TagType *TT = DI->getType()->getAs<TagType>()) {
- TagDecl *TD = TT->getDecl();
-
- // If the TagDecl that the TypedefDecl points to is an anonymous decl
- // keep track of the TypedefDecl.
- if (!TD->getIdentifier() && !TD->getTypedefForAnonDecl())
- TD->setTypedefForAnonDecl(Typedef);
+ // If the old typedef was the name for linkage purposes of an anonymous
+ // tag decl, re-establish that relationship for the new typedef.
+ if (const TagType *oldTagType = D->getUnderlyingType()->getAs<TagType>()) {
+ TagDecl *oldTag = oldTagType->getDecl();
+ if (oldTag->getTypedefForAnonDecl() == D) {
+ TagDecl *newTag = DI->getType()->castAs<TagType>()->getDecl();
+ assert(!newTag->getIdentifier() && !newTag->getTypedefForAnonDecl());
+ newTag->setTypedefForAnonDecl(Typedef);
+ }
}
if (TypedefDecl *Prev = D->getPreviousDeclaration()) {
@@ -245,35 +176,6 @@ Decl *TemplateDeclInstantiator::VisitTypedefDecl(TypedefDecl *D) {
return Typedef;
}
-/// \brief Instantiate the arguments provided as part of initialization.
-///
-/// \returns true if an error occurred, false otherwise.
-static bool InstantiateInitializationArguments(Sema &SemaRef,
- Expr **Args, unsigned NumArgs,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- llvm::SmallVectorImpl<SourceLocation> &FakeCommaLocs,
- ASTOwningVector<Expr*> &InitArgs) {
- for (unsigned I = 0; I != NumArgs; ++I) {
- // When we hit the first defaulted argument, break out of the loop:
- // we don't pass those default arguments on.
- if (Args[I]->isDefaultArgument())
- break;
-
- ExprResult Arg = SemaRef.SubstExpr(Args[I], TemplateArgs);
- if (Arg.isInvalid())
- return true;
-
- Expr *ArgExpr = (Expr *)Arg.get();
- InitArgs.push_back(Arg.release());
-
- // FIXME: We're faking all of the comma locations. Do we need them?
- FakeCommaLocs.push_back(
- SemaRef.PP.getLocForEndOfToken(ArgExpr->getLocEnd()));
- }
-
- return false;
-}
-
/// \brief Instantiate an initializer, breaking it into separate
/// initialization arguments.
///
@@ -290,8 +192,7 @@ static bool InstantiateInitializationArguments(Sema &SemaRef,
static bool InstantiateInitializer(Sema &S, Expr *Init,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation &LParenLoc,
- llvm::SmallVector<SourceLocation, 4> &CommaLocs,
- ASTOwningVector<Expr*> &NewArgs,
+ ASTOwningVector<Expr*> &NewArgs,
SourceLocation &RParenLoc) {
NewArgs.clear();
LParenLoc = SourceLocation();
@@ -300,7 +201,7 @@ static bool InstantiateInitializer(Sema &S, Expr *Init,
if (!Init)
return false;
- if (CXXExprWithTemporaries *ExprTemp = dyn_cast<CXXExprWithTemporaries>(Init))
+ if (ExprWithCleanups *ExprTemp = dyn_cast<ExprWithCleanups>(Init))
Init = ExprTemp->getSubExpr();
while (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(Init))
@@ -312,24 +213,19 @@ static bool InstantiateInitializer(Sema &S, Expr *Init,
if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
LParenLoc = ParenList->getLParenLoc();
RParenLoc = ParenList->getRParenLoc();
- return InstantiateInitializationArguments(S, ParenList->getExprs(),
- ParenList->getNumExprs(),
- TemplateArgs, CommaLocs,
- NewArgs);
+ return S.SubstExprs(ParenList->getExprs(), ParenList->getNumExprs(),
+ true, TemplateArgs, NewArgs);
}
if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init)) {
if (!isa<CXXTemporaryObjectExpr>(Construct)) {
- if (InstantiateInitializationArguments(S,
- Construct->getArgs(),
- Construct->getNumArgs(),
- TemplateArgs,
- CommaLocs, NewArgs))
+ if (S.SubstExprs(Construct->getArgs(), Construct->getNumArgs(), true,
+ TemplateArgs, NewArgs))
return true;
// FIXME: Fake locations!
LParenLoc = S.PP.getLocForEndOfToken(Init->getLocStart());
- RParenLoc = CommaLocs.empty()? LParenLoc : CommaLocs.back();
+ RParenLoc = LParenLoc;
return false;
}
}
@@ -358,6 +254,12 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
if (!DI)
return 0;
+ if (DI->getType()->isFunctionType()) {
+ SemaRef.Diag(D->getLocation(), diag::err_variable_instantiates_to_function)
+ << D->isStaticDataMember() << DI->getType();
+ return 0;
+ }
+
// Build the instantiated declaration
VarDecl *Var = VarDecl::Create(SemaRef.Context, Owner,
D->getLocation(), D->getIdentifier(),
@@ -419,24 +321,24 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
// Instantiate the initializer.
SourceLocation LParenLoc, RParenLoc;
- llvm::SmallVector<SourceLocation, 4> CommaLocs;
ASTOwningVector<Expr*> InitArgs(SemaRef);
if (!InstantiateInitializer(SemaRef, D->getInit(), TemplateArgs, LParenLoc,
- CommaLocs, InitArgs, RParenLoc)) {
- // Attach the initializer to the declaration.
- if (D->hasCXXDirectInitializer()) {
+ InitArgs, RParenLoc)) {
+ bool TypeMayContainAuto = true;
+ // Attach the initializer to the declaration, if we have one.
+ if (InitArgs.size() == 0)
+ SemaRef.ActOnUninitializedDecl(Var, TypeMayContainAuto);
+ else if (D->hasCXXDirectInitializer()) {
// Add the direct initializer to the declaration.
SemaRef.AddCXXDirectInitializerToDecl(Var,
LParenLoc,
move_arg(InitArgs),
- CommaLocs.data(),
- RParenLoc);
- } else if (InitArgs.size() == 1) {
- Expr *Init = InitArgs.take()[0];
- SemaRef.AddInitializerToDecl(Var, Init, false);
+ RParenLoc,
+ TypeMayContainAuto);
} else {
- assert(InitArgs.size() == 0);
- SemaRef.ActOnUninitializedDecl(Var, false);
+ assert(InitArgs.size() == 1);
+ Expr *Init = InitArgs.take()[0];
+ SemaRef.AddInitializerToDecl(Var, Init, false, TypeMayContainAuto);
}
} else {
// FIXME: Not too happy about invalidating the declaration
@@ -540,6 +442,30 @@ Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) {
return Field;
}
+Decl *TemplateDeclInstantiator::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ NamedDecl **NamedChain =
+ new (SemaRef.Context)NamedDecl*[D->getChainingSize()];
+
+ int i = 0;
+ for (IndirectFieldDecl::chain_iterator PI =
+ D->chain_begin(), PE = D->chain_end();
+ PI != PE; ++PI)
+ NamedChain[i++] = (SemaRef.FindInstantiatedDecl(D->getLocation(),
+ *PI, TemplateArgs));
+
+ QualType T = cast<FieldDecl>(NamedChain[i-1])->getType();
+ IndirectFieldDecl* IndirectField
+ = IndirectFieldDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getIdentifier(), T,
+ NamedChain, D->getChainingSize());
+
+
+ IndirectField->setImplicit(D->isImplicit());
+ IndirectField->setAccess(D->getAccess());
+ Owner->addDecl(IndirectField);
+ return IndirectField;
+}
+
Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
// Handle friend type expressions by simply substituting template
// parameters into the pattern type and checking the result.
@@ -555,6 +481,7 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
return 0;
FD->setAccess(AS_public);
+ FD->setUnsupportedFriend(D->isUnsupportedFriend());
Owner->addDecl(FD);
return FD;
}
@@ -573,6 +500,7 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
FriendDecl::Create(SemaRef.Context, Owner, D->getLocation(),
cast<NamedDecl>(NewND), D->getFriendLoc());
FD->setAccess(AS_public);
+ FD->setUnsupportedFriend(D->isUnsupportedFriend());
Owner->addDecl(FD);
return FD;
}
@@ -589,7 +517,7 @@ Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
return 0;
ExprResult Message(D->getMessage());
- D->getMessage()->Retain();
+ D->getMessage();
return SemaRef.ActOnStaticAssertDeclaration(D->getLocation(),
InstantiatedAssertExpr.get(),
Message.get());
@@ -599,7 +527,31 @@ Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
EnumDecl *Enum = EnumDecl::Create(SemaRef.Context, Owner,
D->getLocation(), D->getIdentifier(),
D->getTagKeywordLoc(),
- /*PrevDecl=*/0);
+ /*PrevDecl=*/0, D->isScoped(),
+ D->isScopedUsingClassTag(), D->isFixed());
+ if (D->isFixed()) {
+ if (TypeSourceInfo* TI = D->getIntegerTypeSourceInfo()) {
+ // If we have type source information for the underlying type, it means it
+ // has been explicitly set by the user. Perform substitution on it before
+ // moving on.
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+ Enum->setIntegerTypeSourceInfo(SemaRef.SubstType(TI,
+ TemplateArgs,
+ UnderlyingLoc,
+ DeclarationName()));
+
+ if (!Enum->getIntegerTypeSourceInfo())
+ Enum->setIntegerType(SemaRef.Context.IntTy);
+ }
+ else {
+ assert(!D->getIntegerType()->isDependentType()
+ && "Dependent type without type source info");
+ Enum->setIntegerType(D->getIntegerType());
+ }
+ }
+
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Enum);
+
Enum->setInstantiationOfMemberEnum(D);
Enum->setAccess(D->getAccess());
if (SubstQualifier(D, Enum)) return 0;
@@ -644,6 +596,8 @@ Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
}
if (EnumConst) {
+ SemaRef.InstantiateAttrs(TemplateArgs, *EC, EnumConst);
+
EnumConst->setAccess(Enum->getAccess());
Enum->addDecl(EnumConst);
Enumerators.push_back(EnumConst);
@@ -699,6 +653,15 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
CXXRecordDecl *PrevDecl = 0;
ClassTemplateDecl *PrevClassTemplate = 0;
+ if (!isFriend && Pattern->getPreviousDeclaration()) {
+ DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
+ if (Found.first != Found.second) {
+ PrevClassTemplate = dyn_cast<ClassTemplateDecl>(*Found.first);
+ if (PrevClassTemplate)
+ PrevDecl = PrevClassTemplate->getTemplatedDecl();
+ }
+ }
+
// If this isn't a friend, then it's a member template, in which
// case we just want to build the instantiation in the
// specialization. If it is a friend, we want to build it in
@@ -813,7 +776,8 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
// friend target decl?
} else {
Inst->setAccess(D->getAccess());
- Inst->setInstantiatedFromMemberTemplate(D);
+ if (!PrevClassTemplate)
+ Inst->setInstantiatedFromMemberTemplate(D);
}
// Trigger creation of the type for the instantiation.
@@ -827,14 +791,18 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
}
Owner->addDecl(Inst);
-
- // Instantiate all of the partial specializations of this member class
- // template.
- llvm::SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
- D->getPartialSpecializations(PartialSpecs);
- for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I)
- InstantiateClassTemplatePartialSpecialization(Inst, PartialSpecs[I]);
-
+
+ if (!PrevClassTemplate) {
+ // Queue up any out-of-line partial specializations of this member
+ // class template; the client will force their instantiation once
+ // the enclosing class has been instantiated.
+ llvm::SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ D->getPartialSpecializations(PartialSpecs);
+ for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I)
+ if (PartialSpecs[I]->isOutOfLine())
+ OutOfLinePartialSpecs.push_back(std::make_pair(Inst, PartialSpecs[I]));
+ }
+
return Inst;
}
@@ -855,7 +823,11 @@ TemplateDeclInstantiator::VisitClassTemplatePartialSpecializationDecl(
if (!InstClassTemplate)
return 0;
- return InstClassTemplate->findPartialSpecInstantiatedFromMember(D);
+ if (ClassTemplatePartialSpecializationDecl *Result
+ = InstClassTemplate->findPartialSpecInstantiatedFromMember(D))
+ return Result;
+
+ return InstantiateClassTemplatePartialSpecialization(InstClassTemplate, D);
}
Decl *
@@ -1040,7 +1012,8 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
// Attach the parameters
for (unsigned P = 0; P < Params.size(); ++P)
- Params[P]->setOwningFunction(Function);
+ if (Params[P])
+ Params[P]->setOwningFunction(Function);
Function->setParams(Params.data(), Params.size());
SourceLocation InstantiateAtPOI;
@@ -1078,7 +1051,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
std::pair<const TemplateArgument *, unsigned> Innermost
= TemplateArgs.getInnermost();
Function->setFunctionTemplateSpecialization(FunctionTemplate,
- new (SemaRef.Context) TemplateArgumentList(SemaRef.Context,
+ TemplateArgumentList::CreateCopy(SemaRef.Context,
Innermost.first,
Innermost.second),
InsertPos);
@@ -1092,7 +1065,6 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
Function->setInvalidDecl();
bool Redeclaration = false;
- bool OverloadableAttrRequired = false;
bool isExplicitSpecialization = false;
LookupResult Previous(SemaRef, Function->getDeclName(), SourceLocation(),
@@ -1108,13 +1080,9 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
// Instantiate the explicit template arguments.
TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
Info->getRAngleLoc());
- for (unsigned I = 0, E = Info->getNumTemplateArgs(); I != E; ++I) {
- TemplateArgumentLoc Loc;
- if (SemaRef.Subst(Info->getTemplateArg(I), Loc, TemplateArgs))
- return 0;
-
- ExplicitArgs.addArgument(Loc);
- }
+ if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
+ ExplicitArgs, TemplateArgs))
+ return 0;
// Map the candidate templates to their instantiations.
for (unsigned I = 0, E = Info->getNumTemplates(); I != E; ++I) {
@@ -1148,8 +1116,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
}
SemaRef.CheckFunctionDeclaration(/*Scope*/ 0, Function, Previous,
- isExplicitSpecialization, Redeclaration,
- /*FIXME:*/OverloadableAttrRequired);
+ isExplicitSpecialization, Redeclaration);
NamedDecl *PrincipalDecl = (TemplateParams
? cast<NamedDecl>(FunctionTemplate)
@@ -1256,6 +1223,20 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
cast<Decl>(Owner)->isDefinedOutsideFunctionOrMethod());
LocalInstantiationScope Scope(SemaRef, MergeWithParentScope);
+ // Instantiate enclosing template arguments for friends.
+ llvm::SmallVector<TemplateParameterList *, 4> TempParamLists;
+ unsigned NumTempParamLists = 0;
+ if (isFriend && (NumTempParamLists = D->getNumTemplateParameterLists())) {
+ TempParamLists.set_size(NumTempParamLists);
+ for (unsigned I = 0; I != NumTempParamLists; ++I) {
+ TemplateParameterList *TempParams = D->getTemplateParameterList(I);
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return NULL;
+ TempParamLists[I] = InstParams;
+ }
+ }
+
llvm::SmallVector<ParmVarDecl *, 4> Params;
TypeSourceInfo *TInfo = D->getTypeSourceInfo();
TInfo = SubstFunctionType(D, Params);
@@ -1263,25 +1244,22 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
return 0;
QualType T = TInfo->getType();
- // \brief If the type of this function is not *directly* a function
- // type, then we're instantiating the a function that was declared
- // via a typedef, e.g.,
+ // \brief If the type of this function, after ignoring parentheses,
+ // is not *directly* a function type, then we're instantiating a function
+ // that was declared via a typedef, e.g.,
//
// typedef int functype(int, int);
// functype func;
//
// In this case, we'll just go instantiate the ParmVarDecls that we
// synthesized in the method declaration.
- if (!isa<FunctionProtoType>(T)) {
+ if (!isa<FunctionProtoType>(T.IgnoreParens())) {
assert(!Params.size() && "Instantiating type could not yield parameters");
- for (unsigned I = 0, N = D->getNumParams(); I != N; ++I) {
- ParmVarDecl *P = SemaRef.SubstParmVarDecl(D->getParamDecl(I),
- TemplateArgs);
- if (!P)
- return 0;
-
- Params.push_back(P);
- }
+ llvm::SmallVector<QualType, 4> ParamTypes;
+ if (SemaRef.SubstParmTypes(D->getLocation(), D->param_begin(),
+ D->getNumParams(), TemplateArgs, ParamTypes,
+ &Params))
+ return 0;
}
NestedNameSpecifier *Qualifier = D->getQualifier();
@@ -1299,6 +1277,9 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
SS.setScopeRep(Qualifier);
SS.setRange(D->getQualifierRange());
DC = SemaRef.computeDeclContext(SS);
+
+ if (DC && SemaRef.RequireCompleteDeclContext(SS, DC))
+ return 0;
} else {
DC = SemaRef.FindInstantiatedContext(D->getLocation(),
D->getDeclContext(),
@@ -1321,7 +1302,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
false);
} else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
Method = CXXDestructorDecl::Create(SemaRef.Context, Record,
- NameInfo, T,
+ NameInfo, T, TInfo,
Destructor->isInlineSpecified(),
false);
} else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
@@ -1369,9 +1350,9 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
std::pair<const TemplateArgument *, unsigned> Innermost
= TemplateArgs.getInnermost();
Method->setFunctionTemplateSpecialization(FunctionTemplate,
- new (SemaRef.Context) TemplateArgumentList(SemaRef.Context,
- Innermost.first,
- Innermost.second),
+ TemplateArgumentList::CreateCopy(SemaRef.Context,
+ Innermost.first,
+ Innermost.second),
InsertPos);
} else if (!isFriend) {
// Record that this is an instantiation of a member function.
@@ -1382,6 +1363,11 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
// out-of-line, the instantiation will have the same lexical
// context (which will be a namespace scope) as the template.
if (isFriend) {
+ if (NumTempParamLists)
+ Method->setTemplateParameterListsInfo(SemaRef.Context,
+ NumTempParamLists,
+ TempParamLists.data());
+
Method->setLexicalDeclContext(Owner);
Method->setObjectOfFriendDecl(true);
} else if (D->isOutOfLine())
@@ -1410,15 +1396,15 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
}
bool Redeclaration = false;
- bool OverloadableAttrRequired = false;
- SemaRef.CheckFunctionDeclaration(0, Method, Previous, false, Redeclaration,
- /*FIXME:*/OverloadableAttrRequired);
+ SemaRef.CheckFunctionDeclaration(0, Method, Previous, false, Redeclaration);
if (D->isPure())
SemaRef.CheckPureMethod(Method, SourceRange());
Method->setAccess(D->getAccess());
+ SemaRef.CheckOverrideControl(Method);
+
if (FunctionTemplate) {
// If there's a function template, let our caller handle it.
} else if (Method->isInvalidDecl() && !Previous.empty()) {
@@ -1449,7 +1435,7 @@ Decl *TemplateDeclInstantiator::VisitCXXConversionDecl(CXXConversionDecl *D) {
}
ParmVarDecl *TemplateDeclInstantiator::VisitParmVarDecl(ParmVarDecl *D) {
- return SemaRef.SubstParmVarDecl(D, TemplateArgs);
+ return SemaRef.SubstParmVarDecl(D, TemplateArgs, llvm::Optional<unsigned>());
}
Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
@@ -1462,7 +1448,7 @@ Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
TemplateTypeParmDecl *Inst =
TemplateTypeParmDecl::Create(SemaRef.Context, Owner, D->getLocation(),
TTPT->getDepth() - TemplateArgs.getNumLevels(),
- TTPT->getIndex(),TTPT->getName(),
+ TTPT->getIndex(), D->getIdentifier(),
D->wasDeclaredWithTypename(),
D->isParameterPack());
@@ -1479,33 +1465,140 @@ Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
NonTypeTemplateParmDecl *D) {
// Substitute into the type of the non-type template parameter.
+ TypeLoc TL = D->getTypeSourceInfo()->getTypeLoc();
+ llvm::SmallVector<TypeSourceInfo *, 4> ExpandedParameterPackTypesAsWritten;
+ llvm::SmallVector<QualType, 4> ExpandedParameterPackTypes;
+ bool IsExpandedParameterPack = false;
+ TypeSourceInfo *DI;
QualType T;
- TypeSourceInfo *DI = D->getTypeSourceInfo();
- if (DI) {
- DI = SemaRef.SubstType(DI, TemplateArgs, D->getLocation(),
- D->getDeclName());
- if (DI) T = DI->getType();
- } else {
- T = SemaRef.SubstType(D->getType(), TemplateArgs, D->getLocation(),
- D->getDeclName());
- DI = 0;
- }
- if (T.isNull())
- return 0;
-
- // Check that this type is acceptable for a non-type template parameter.
bool Invalid = false;
- T = SemaRef.CheckNonTypeTemplateParameterType(T, D->getLocation());
- if (T.isNull()) {
- T = SemaRef.Context.IntTy;
- Invalid = true;
+
+ if (D->isExpandedParameterPack()) {
+ // The non-type template parameter pack is an already-expanded pack
+ // expansion of types. Substitute into each of the expanded types.
+ ExpandedParameterPackTypes.reserve(D->getNumExpansionTypes());
+ ExpandedParameterPackTypesAsWritten.reserve(D->getNumExpansionTypes());
+ for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
+ TypeSourceInfo *NewDI =SemaRef.SubstType(D->getExpansionTypeSourceInfo(I),
+ TemplateArgs,
+ D->getLocation(),
+ D->getDeclName());
+ if (!NewDI)
+ return 0;
+
+ ExpandedParameterPackTypesAsWritten.push_back(NewDI);
+ QualType NewT =SemaRef.CheckNonTypeTemplateParameterType(NewDI->getType(),
+ D->getLocation());
+ if (NewT.isNull())
+ return 0;
+ ExpandedParameterPackTypes.push_back(NewT);
+ }
+
+ IsExpandedParameterPack = true;
+ DI = D->getTypeSourceInfo();
+ T = DI->getType();
+ } else if (isa<PackExpansionTypeLoc>(TL)) {
+ // The non-type template parameter pack's type is a pack expansion of types.
+ // Determine whether we need to expand this parameter pack into separate
+ // types.
+ PackExpansionTypeLoc Expansion = cast<PackExpansionTypeLoc>(TL);
+ TypeLoc Pattern = Expansion.getPatternLoc();
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions
+ = Expansion.getTypePtr()->getNumExpansions();
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (SemaRef.CheckParameterPacksForExpansion(Expansion.getEllipsisLoc(),
+ Pattern.getSourceRange(),
+ Unexpanded.data(),
+ Unexpanded.size(),
+ TemplateArgs,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return 0;
+
+ if (Expand) {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I);
+ TypeSourceInfo *NewDI = SemaRef.SubstType(Pattern, TemplateArgs,
+ D->getLocation(),
+ D->getDeclName());
+ if (!NewDI)
+ return 0;
+
+ ExpandedParameterPackTypesAsWritten.push_back(NewDI);
+ QualType NewT = SemaRef.CheckNonTypeTemplateParameterType(
+ NewDI->getType(),
+ D->getLocation());
+ if (NewT.isNull())
+ return 0;
+ ExpandedParameterPackTypes.push_back(NewT);
+ }
+
+ // Note that we have an expanded parameter pack. The "type" of this
+ // expanded parameter pack is the original expansion type, but callers
+ // will end up using the expanded parameter pack types for type-checking.
+ IsExpandedParameterPack = true;
+ DI = D->getTypeSourceInfo();
+ T = DI->getType();
+ } else {
+ // We cannot fully expand the pack expansion now, so substitute into the
+ // pattern and create a new pack expansion type.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1);
+ TypeSourceInfo *NewPattern = SemaRef.SubstType(Pattern, TemplateArgs,
+ D->getLocation(),
+ D->getDeclName());
+ if (!NewPattern)
+ return 0;
+
+ DI = SemaRef.CheckPackExpansion(NewPattern, Expansion.getEllipsisLoc(),
+ NumExpansions);
+ if (!DI)
+ return 0;
+
+ T = DI->getType();
+ }
+ } else {
+ // Simple case: substitution into a parameter that is not a parameter pack.
+ DI = SemaRef.SubstType(D->getTypeSourceInfo(), TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (!DI)
+ return 0;
+
+ // Check that this type is acceptable for a non-type template parameter.
+ bool Invalid = false;
+ T = SemaRef.CheckNonTypeTemplateParameterType(DI->getType(),
+ D->getLocation());
+ if (T.isNull()) {
+ T = SemaRef.Context.IntTy;
+ Invalid = true;
+ }
}
- NonTypeTemplateParmDecl *Param
- = NonTypeTemplateParmDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ NonTypeTemplateParmDecl *Param;
+ if (IsExpandedParameterPack)
+ Param = NonTypeTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getLocation(),
D->getDepth() - TemplateArgs.getNumLevels(),
- D->getPosition(), D->getIdentifier(), T,
- DI);
+ D->getPosition(),
+ D->getIdentifier(), T,
+ DI,
+ ExpandedParameterPackTypes.data(),
+ ExpandedParameterPackTypes.size(),
+ ExpandedParameterPackTypesAsWritten.data());
+ else
+ Param = NonTypeTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getPosition(),
+ D->getIdentifier(), T,
+ D->isParameterPack(), DI);
+
if (Invalid)
Param->setInvalidDecl();
@@ -1536,8 +1629,8 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
TemplateTemplateParmDecl *Param
= TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, D->getLocation(),
D->getDepth() - TemplateArgs.getNumLevels(),
- D->getPosition(), D->getIdentifier(),
- InstParams);
+ D->getPosition(), D->isParameterPack(),
+ D->getIdentifier(), InstParams);
Param->setDefaultArgument(D->getDefaultArgument(), false);
// Introduce this template parameter's instantiation into the instantiation
@@ -1562,8 +1655,24 @@ Decl *TemplateDeclInstantiator::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
}
Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
- // The nested name specifier is non-dependent, so no transformation
- // is required. The same holds for the name info.
+
+ // The nested name specifier may be dependent, for example
+ // template <typename T> struct t {
+ // struct s1 { T f1(); };
+ // struct s2 : s1 { using s1::f1; };
+ // };
+ // template struct t<int>;
+ // Here, in using s1::f1, s1 refers to t<T>::s1;
+ // we need to substitute for t<int>::s1.
+ NestedNameSpecifier *NNS =
+ SemaRef.SubstNestedNameSpecifier(D->getTargetNestedNameDecl(),
+ D->getNestedNameRange(),
+ TemplateArgs);
+ if (!NNS)
+ return 0;
+
+ // The name info is non-dependent, so no transformation
+ // is required.
DeclarationNameInfo NameInfo = D->getNameInfo();
// We only need to do redeclaration lookups if we're in a class
@@ -1577,12 +1686,12 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
UsingDecl *NewUD = UsingDecl::Create(SemaRef.Context, Owner,
D->getNestedNameRange(),
D->getUsingLocation(),
- D->getTargetNestedNameDecl(),
+ NNS,
NameInfo,
D->isTypeName());
CXXScopeSpec SS;
- SS.setScopeRep(D->getTargetNestedNameDecl());
+ SS.setScopeRep(NNS);
SS.setRange(D->getNestedNameRange());
if (CheckRedeclaration) {
@@ -1746,8 +1855,9 @@ TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) {
/// \param PartialSpec the (uninstantiated) class template partial
/// specialization that we are instantiating.
///
-/// \returns true if there was an error, false otherwise.
-bool
+/// \returns The instantiated partial specialization, if successful; otherwise,
+/// NULL to indicate an error.
+ClassTemplatePartialSpecializationDecl *
TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
ClassTemplateDecl *ClassTemplate,
ClassTemplatePartialSpecializationDecl *PartialSpec) {
@@ -1761,47 +1871,39 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
TemplateParameterList *TempParams = PartialSpec->getTemplateParameters();
TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
if (!InstParams)
- return true;
+ return 0;
// Substitute into the template arguments of the class template partial
// specialization.
- const TemplateArgumentLoc *PartialSpecTemplateArgs
- = PartialSpec->getTemplateArgsAsWritten();
- unsigned N = PartialSpec->getNumTemplateArgsAsWritten();
-
TemplateArgumentListInfo InstTemplateArgs; // no angle locations
- for (unsigned I = 0; I != N; ++I) {
- TemplateArgumentLoc Loc;
- if (SemaRef.Subst(PartialSpecTemplateArgs[I], Loc, TemplateArgs))
- return true;
- InstTemplateArgs.addArgument(Loc);
- }
+ if (SemaRef.Subst(PartialSpec->getTemplateArgsAsWritten(),
+ PartialSpec->getNumTemplateArgsAsWritten(),
+ InstTemplateArgs, TemplateArgs))
+ return 0;
-
// Check that the template argument list is well-formed for this
// class template.
- TemplateArgumentListBuilder Converted(ClassTemplate->getTemplateParameters(),
- InstTemplateArgs.size());
+ llvm::SmallVector<TemplateArgument, 4> Converted;
if (SemaRef.CheckTemplateArgumentList(ClassTemplate,
PartialSpec->getLocation(),
InstTemplateArgs,
false,
Converted))
- return true;
+ return 0;
// Figure out where to insert this class template partial specialization
// in the member template's set of class template partial specializations.
void *InsertPos = 0;
ClassTemplateSpecializationDecl *PrevDecl
- = ClassTemplate->findPartialSpecialization(Converted.getFlatArguments(),
- Converted.flatSize(), InsertPos);
+ = ClassTemplate->findPartialSpecialization(Converted.data(),
+ Converted.size(), InsertPos);
// Build the canonical type that describes the converted template
// arguments of the class template partial specialization.
QualType CanonType
= SemaRef.Context.getTemplateSpecializationType(TemplateName(ClassTemplate),
- Converted.getFlatArguments(),
- Converted.flatSize());
+ Converted.data(),
+ Converted.size());
// Build the fully-sugared type for this class template
// specialization as the user wrote in the specialization
@@ -1834,10 +1936,10 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
// Outer<int, int> outer; // error: the partial specializations of Inner
// // have the same signature.
SemaRef.Diag(PartialSpec->getLocation(), diag::err_partial_spec_redeclared)
- << WrittenTy;
+ << WrittenTy->getType();
SemaRef.Diag(PrevDecl->getLocation(), diag::note_prev_partial_spec_here)
<< SemaRef.Context.getTypeDeclType(PrevDecl);
- return true;
+ return 0;
}
@@ -1849,7 +1951,8 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
PartialSpec->getLocation(),
InstParams,
ClassTemplate,
- Converted,
+ Converted.data(),
+ Converted.size(),
InstTemplateArgs,
CanonType,
0,
@@ -1864,7 +1967,7 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
// Add this partial specialization to the set of class template partial
// specializations.
ClassTemplate->AddPartialSpecialization(InstPartialSpec, InsertPos);
- return false;
+ return InstPartialSpec;
}
TypeSourceInfo*
@@ -1882,25 +1985,47 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
if (NewTInfo != OldTInfo) {
// Get parameters from the new type info.
- TypeLoc OldTL = OldTInfo->getTypeLoc();
+ TypeLoc OldTL = OldTInfo->getTypeLoc().IgnoreParens();
if (FunctionProtoTypeLoc *OldProtoLoc
= dyn_cast<FunctionProtoTypeLoc>(&OldTL)) {
- TypeLoc NewTL = NewTInfo->getTypeLoc();
+ TypeLoc NewTL = NewTInfo->getTypeLoc().IgnoreParens();
FunctionProtoTypeLoc *NewProtoLoc = cast<FunctionProtoTypeLoc>(&NewTL);
assert(NewProtoLoc && "Missing prototype?");
- for (unsigned i = 0, i_end = NewProtoLoc->getNumArgs(); i != i_end; ++i) {
- // FIXME: Variadic templates will break this.
- Params.push_back(NewProtoLoc->getArg(i));
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(
- OldProtoLoc->getArg(i),
- NewProtoLoc->getArg(i));
+ unsigned NewIdx = 0, NumNewParams = NewProtoLoc->getNumArgs();
+ for (unsigned OldIdx = 0, NumOldParams = OldProtoLoc->getNumArgs();
+ OldIdx != NumOldParams; ++OldIdx) {
+ ParmVarDecl *OldParam = OldProtoLoc->getArg(OldIdx);
+ if (!OldParam->isParameterPack() ||
+ (NewIdx < NumNewParams &&
+ NewProtoLoc->getArg(NewIdx)->isParameterPack())) {
+ // Simple case: normal parameter, or a parameter pack that's
+ // instantiated to a (still-dependent) parameter pack.
+ ParmVarDecl *NewParam = NewProtoLoc->getArg(NewIdx++);
+ Params.push_back(NewParam);
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldParam,
+ NewParam);
+ continue;
+ }
+
+ // Parameter pack: make the instantiation an argument pack.
+ SemaRef.CurrentInstantiationScope->MakeInstantiatedLocalArgPack(
+ OldParam);
+ unsigned NumArgumentsInExpansion
+ = SemaRef.getNumArgumentsInExpansion(OldParam->getType(),
+ TemplateArgs);
+ while (NumArgumentsInExpansion--) {
+ ParmVarDecl *NewParam = NewProtoLoc->getArg(NewIdx++);
+ Params.push_back(NewParam);
+ SemaRef.CurrentInstantiationScope->InstantiatedLocalPackArg(OldParam,
+ NewParam);
+ }
}
}
} else {
// The function type itself was not dependent and therefore no
// substitution occurred. However, we still need to instantiate
// the function parameters themselves.
- TypeLoc OldTL = OldTInfo->getTypeLoc();
+ TypeLoc OldTL = OldTInfo->getTypeLoc().IgnoreParens();
if (FunctionProtoTypeLoc *OldProtoLoc
= dyn_cast<FunctionProtoTypeLoc>(&OldTL)) {
for (unsigned i = 0, i_end = OldProtoLoc->getNumArgs(); i != i_end; ++i) {
@@ -1957,6 +2082,67 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
llvm::SmallVector<QualType, 4> Exceptions;
for (unsigned I = 0, N = Proto->getNumExceptions(); I != N; ++I) {
// FIXME: Poor location information!
+ if (const PackExpansionType *PackExpansion
+ = Proto->getExceptionType(I)->getAs<PackExpansionType>()) {
+ // We have a pack expansion. Instantiate it.
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(PackExpansion->getPattern(),
+ Unexpanded);
+ assert(!Unexpanded.empty() &&
+ "Pack expansion without parameter packs?");
+
+ bool Expand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions
+ = PackExpansion->getNumExpansions();
+ if (SemaRef.CheckParameterPacksForExpansion(New->getLocation(),
+ SourceRange(),
+ Unexpanded.data(),
+ Unexpanded.size(),
+ TemplateArgs,
+ Expand,
+ RetainExpansion,
+ NumExpansions))
+ break;
+
+ if (!Expand) {
+ // We can't expand this pack expansion into separate arguments yet;
+ // just substitute into the pattern and create a new pack expansion
+ // type.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1);
+ QualType T = SemaRef.SubstType(PackExpansion->getPattern(),
+ TemplateArgs,
+ New->getLocation(), New->getDeclName());
+ if (T.isNull())
+ break;
+
+ T = SemaRef.Context.getPackExpansionType(T, NumExpansions);
+ Exceptions.push_back(T);
+ continue;
+ }
+
+ // Substitute into the pack expansion pattern for each template
+ bool Invalid = false;
+ for (unsigned ArgIdx = 0; ArgIdx != *NumExpansions; ++ArgIdx) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, ArgIdx);
+
+ QualType T = SemaRef.SubstType(PackExpansion->getPattern(),
+ TemplateArgs,
+ New->getLocation(), New->getDeclName());
+ if (T.isNull()) {
+ Invalid = true;
+ break;
+ }
+
+ Exceptions.push_back(T);
+ }
+
+ if (Invalid)
+ break;
+
+ continue;
+ }
+
QualType T
= SemaRef.SubstType(Proto->getExceptionType(I), TemplateArgs,
New->getLocation(), New->getDeclName());
@@ -1969,19 +2155,20 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
// Rebuild the function type
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+ EPI.HasExceptionSpec = Proto->hasExceptionSpec();
+ EPI.HasAnyExceptionSpec = Proto->hasAnyExceptionSpec();
+ EPI.NumExceptions = Exceptions.size();
+ EPI.Exceptions = Exceptions.data();
+ EPI.ExtInfo = Proto->getExtInfo();
+
const FunctionProtoType *NewProto
= New->getType()->getAs<FunctionProtoType>();
assert(NewProto && "Template instantiation without function prototype?");
New->setType(SemaRef.Context.getFunctionType(NewProto->getResultType(),
NewProto->arg_type_begin(),
NewProto->getNumArgs(),
- NewProto->isVariadic(),
- NewProto->getTypeQuals(),
- Proto->hasExceptionSpec(),
- Proto->hasAnyExceptionSpec(),
- Exceptions.size(),
- Exceptions.data(),
- Proto->getExtInfo()));
+ EPI));
}
SemaRef.InstantiateAttrs(TemplateArgs, Tmpl, New);
@@ -2000,10 +2187,9 @@ TemplateDeclInstantiator::InitMethodInstantiation(CXXMethodDecl *New,
if (InitFunctionInstantiation(New, Tmpl))
return true;
- CXXRecordDecl *Record = cast<CXXRecordDecl>(Owner);
New->setAccess(Tmpl->getAccess());
if (Tmpl->isVirtualAsWritten())
- Record->setMethodAsVirtual(New);
+ New->setVirtualAsWritten(true);
// FIXME: attributes
// FIXME: New needs a pointer to Tmpl
@@ -2084,9 +2270,12 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// If we're performing recursive template instantiation, create our own
// queue of pending implicit instantiations that we will instantiate later,
// while we're still within our own instantiation context.
+ llvm::SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
- if (Recursive)
+ if (Recursive) {
+ VTableUses.swap(SavedVTableUses);
PendingInstantiations.swap(SavedPendingInstantiations);
+ }
EnterExpressionEvaluationContext EvalContext(*this,
Sema::PotentiallyEvaluated);
@@ -2105,17 +2294,33 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// Introduce the instantiated function parameters into the local
// instantiation scope, and set the parameter names to those used
// in the template.
+ unsigned FParamIdx = 0;
for (unsigned I = 0, N = PatternDecl->getNumParams(); I != N; ++I) {
const ParmVarDecl *PatternParam = PatternDecl->getParamDecl(I);
- ParmVarDecl *FunctionParam = Function->getParamDecl(I);
- FunctionParam->setDeclName(PatternParam->getDeclName());
- Scope.InstantiatedLocal(PatternParam, FunctionParam);
+ if (!PatternParam->isParameterPack()) {
+ // Simple case: not a parameter pack.
+ assert(FParamIdx < Function->getNumParams());
+ ParmVarDecl *FunctionParam = Function->getParamDecl(I);
+ FunctionParam->setDeclName(PatternParam->getDeclName());
+ Scope.InstantiatedLocal(PatternParam, FunctionParam);
+ ++FParamIdx;
+ continue;
+ }
+
+ // Expand the parameter pack.
+ Scope.MakeInstantiatedLocalArgPack(PatternParam);
+ for (unsigned NumFParams = Function->getNumParams();
+ FParamIdx < NumFParams;
+ ++FParamIdx) {
+ ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
+ FunctionParam->setDeclName(PatternParam->getDeclName());
+ Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
+ }
}
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
- DeclContext *PreviousContext = CurContext;
- CurContext = Function;
+ Sema::ContextRAII savedContext(*this, Function);
MultiLevelTemplateArgumentList TemplateArgs =
getTemplateInstantiationArgs(Function, 0, false, PatternDecl);
@@ -2138,7 +2343,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
PerformDependentDiagnostics(PatternDecl, TemplateArgs);
- CurContext = PreviousContext;
+ savedContext.pop();
DeclGroupRef DG(Function);
Consumer.HandleTopLevelDecl(DG);
@@ -2149,10 +2354,16 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
Scope.Exit();
if (Recursive) {
+ // Define any pending vtables.
+ DefineUsedVTables();
+
// Instantiate any pending implicit instantiations found during the
// instantiation of this template.
PerformPendingInstantiations();
+ // Restore the set of pending vtables.
+ VTableUses.swap(SavedVTableUses);
+
// Restore the set of pending implicit instantiations.
PendingInstantiations.swap(SavedPendingInstantiations);
}
@@ -2233,13 +2444,13 @@ void Sema::InstantiateStaticDataMemberDefinition(
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
- DeclContext *PreviousContext = CurContext;
- CurContext = Var->getDeclContext();
+ ContextRAII previousContext(*this, Var->getDeclContext());
VarDecl *OldVar = Var;
Var = cast_or_null<VarDecl>(SubstDecl(Def, Var->getDeclContext(),
- getTemplateInstantiationArgs(Var)));
- CurContext = PreviousContext;
+ getTemplateInstantiationArgs(Var)));
+
+ previousContext.pop();
if (Var) {
MemberSpecializationInfo *MSInfo = OldVar->getMemberSpecializationInfo();
@@ -2272,7 +2483,7 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
for (CXXConstructorDecl::init_const_iterator Inits = Tmpl->init_begin(),
InitsEnd = Tmpl->init_end();
Inits != InitsEnd; ++Inits) {
- CXXBaseOrMemberInitializer *Init = *Inits;
+ CXXCtorInitializer *Init = *Inits;
// Only instantiate written initializers, let Sema re-construct implicit
// ones.
@@ -2281,11 +2492,75 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
SourceLocation LParenLoc, RParenLoc;
ASTOwningVector<Expr*> NewArgs(*this);
- llvm::SmallVector<SourceLocation, 4> CommaLocs;
+
+ SourceLocation EllipsisLoc;
+
+ if (Init->isPackExpansion()) {
+ // This is a pack expansion. We should expand it now.
+ TypeLoc BaseTL = Init->getBaseClassInfo()->getTypeLoc();
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(BaseTL, Unexpanded);
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (CheckParameterPacksForExpansion(Init->getEllipsisLoc(),
+ BaseTL.getSourceRange(),
+ Unexpanded.data(),
+ Unexpanded.size(),
+ TemplateArgs, ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ continue;
+ }
+ assert(ShouldExpand && "Partial instantiation of base initializer?");
+
+ // Loop over all of the arguments in the argument pack(s),
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I);
+
+ // Instantiate the initializer.
+ if (InstantiateInitializer(*this, Init->getInit(), TemplateArgs,
+ LParenLoc, NewArgs, RParenLoc)) {
+ AnyErrors = true;
+ break;
+ }
+
+ // Instantiate the base type.
+ TypeSourceInfo *BaseTInfo = SubstType(Init->getBaseClassInfo(),
+ TemplateArgs,
+ Init->getSourceLocation(),
+ New->getDeclName());
+ if (!BaseTInfo) {
+ AnyErrors = true;
+ break;
+ }
+
+ // Build the initializer.
+ MemInitResult NewInit = BuildBaseInitializer(BaseTInfo->getType(),
+ BaseTInfo,
+ (Expr **)NewArgs.data(),
+ NewArgs.size(),
+ Init->getLParenLoc(),
+ Init->getRParenLoc(),
+ New->getParent(),
+ SourceLocation());
+ if (NewInit.isInvalid()) {
+ AnyErrors = true;
+ break;
+ }
+
+ NewInits.push_back(NewInit.get());
+ NewArgs.clear();
+ }
+
+ continue;
+ }
// Instantiate the initializer.
if (InstantiateInitializer(*this, Init->getInit(), TemplateArgs,
- LParenLoc, CommaLocs, NewArgs, RParenLoc)) {
+ LParenLoc, NewArgs, RParenLoc)) {
AnyErrors = true;
continue;
}
@@ -2307,24 +2582,30 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
NewArgs.size(),
Init->getLParenLoc(),
Init->getRParenLoc(),
- New->getParent());
+ New->getParent(),
+ EllipsisLoc);
} else if (Init->isMemberInitializer()) {
- FieldDecl *Member;
-
- // Is this an anonymous union?
- if (FieldDecl *UnionInit = Init->getAnonUnionMember())
- Member = cast<FieldDecl>(FindInstantiatedDecl(Init->getMemberLocation(),
- UnionInit, TemplateArgs));
- else
- Member = cast<FieldDecl>(FindInstantiatedDecl(Init->getMemberLocation(),
- Init->getMember(),
- TemplateArgs));
+ FieldDecl *Member = cast<FieldDecl>(FindInstantiatedDecl(
+ Init->getMemberLocation(),
+ Init->getMember(),
+ TemplateArgs));
NewInit = BuildMemberInitializer(Member, (Expr **)NewArgs.data(),
NewArgs.size(),
Init->getSourceLocation(),
Init->getLParenLoc(),
Init->getRParenLoc());
+ } else if (Init->isIndirectMemberInitializer()) {
+ IndirectFieldDecl *IndirectMember =
+ cast<IndirectFieldDecl>(FindInstantiatedDecl(
+ Init->getMemberLocation(),
+ Init->getIndirectMember(), TemplateArgs));
+
+ NewInit = BuildMemberInitializer(IndirectMember, (Expr **)NewArgs.data(),
+ NewArgs.size(),
+ Init->getSourceLocation(),
+ Init->getLParenLoc(),
+ Init->getRParenLoc());
}
if (NewInit.isInvalid()) {
@@ -2589,7 +2870,27 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
(ParentDC->isFunctionOrMethod() && ParentDC->isDependentContext())) {
// D is a local of some kind. Look into the map of local
// declarations to their instantiations.
- return cast<NamedDecl>(CurrentInstantiationScope->getInstantiationOf(D));
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Found
+ = CurrentInstantiationScope->findInstantiationOf(D);
+
+ if (Found) {
+ if (Decl *FD = Found->dyn_cast<Decl *>())
+ return cast<NamedDecl>(FD);
+
+ unsigned PackIdx = ArgumentPackSubstitutionIndex;
+ return cast<NamedDecl>((*Found->get<DeclArgumentPack *>())[PackIdx]);
+ }
+
+ // If we didn't find the decl, then we must have a label decl that hasn't
+ // been found yet. Lazily instantiate it and return it now.
+ assert(isa<LabelDecl>(D));
+
+ Decl *Inst = SubstDecl(D, CurContext, TemplateArgs);
+ assert(Inst && "Failed to instantiate label??");
+
+ CurrentInstantiationScope->InstantiatedLocal(D, Inst);
+ return cast<LabelDecl>(Inst);
}
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
@@ -2693,6 +2994,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
if (!Tag->isBeingDefined() &&
RequireCompleteType(Loc, T, diag::err_incomplete_type))
return 0;
+
+ ParentDC = Tag->getDecl();
}
}
@@ -2800,4 +3103,3 @@ void Sema::PerformDependentDiagnostics(const DeclContext *Pattern,
}
}
}
-
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
new file mode 100644
index 0000000..0da801c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -0,0 +1,748 @@
+//===------- SemaTemplateVariadic.cpp - C++ Variadic Templates ------------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements semantic analysis for C++0x variadic templates.
+//===----------------------------------------------------------------------===/
+
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Template.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeLoc.h"
+
+using namespace clang;
+
+//----------------------------------------------------------------------------
+// Visitor that collects unexpanded parameter packs
+//----------------------------------------------------------------------------
+
+namespace {
+ /// \brief A class that collects unexpanded parameter packs.
+ class CollectUnexpandedParameterPacksVisitor :
+ public RecursiveASTVisitor<CollectUnexpandedParameterPacksVisitor>
+ {
+ typedef RecursiveASTVisitor<CollectUnexpandedParameterPacksVisitor>
+ inherited;
+
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded;
+
+ public:
+ explicit CollectUnexpandedParameterPacksVisitor(
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded)
+ : Unexpanded(Unexpanded) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ //------------------------------------------------------------------------
+ // Recording occurrences of (unexpanded) parameter packs.
+ //------------------------------------------------------------------------
+
+ /// \brief Record occurrences of template type parameter packs.
+ bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
+ if (TL.getTypePtr()->isParameterPack())
+ Unexpanded.push_back(std::make_pair(TL.getTypePtr(), TL.getNameLoc()));
+ return true;
+ }
+
+ /// \brief Record occurrences of template type parameter packs
+ /// when we don't have proper source-location information for
+ /// them.
+ ///
+ /// Ideally, this routine would never be used.
+ bool VisitTemplateTypeParmType(TemplateTypeParmType *T) {
+ if (T->isParameterPack())
+ Unexpanded.push_back(std::make_pair(T, SourceLocation()));
+
+ return true;
+ }
+
+ /// \brief Record occurrences of function and non-type template
+ /// parameter packs in an expression.
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (E->getDecl()->isParameterPack())
+ Unexpanded.push_back(std::make_pair(E->getDecl(), E->getLocation()));
+
+ return true;
+ }
+
+ // \brief Record occurrences of function and non-type template parameter
+ // packs in a block-captured expression.
+ bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ if (E->getDecl()->isParameterPack())
+ Unexpanded.push_back(std::make_pair(E->getDecl(), E->getLocation()));
+
+ return true;
+ }
+
+ /// \brief Record occurrences of template template parameter packs.
+ bool TraverseTemplateName(TemplateName Template) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Template.getAsTemplateDecl()))
+ if (TTP->isParameterPack())
+ Unexpanded.push_back(std::make_pair(TTP, SourceLocation()));
+
+ return inherited::TraverseTemplateName(Template);
+ }
+
+ //------------------------------------------------------------------------
+ // Pruning the search for unexpanded parameter packs.
+ //------------------------------------------------------------------------
+
+ /// \brief Suppress traversal into statements and expressions that
+ /// do not contain unexpanded parameter packs.
+ bool TraverseStmt(Stmt *S) {
+ if (Expr *E = dyn_cast_or_null<Expr>(S))
+ if (E->containsUnexpandedParameterPack())
+ return inherited::TraverseStmt(E);
+
+ return true;
+ }
+
+ /// \brief Suppress traversal into types that do not contain
+ /// unexpanded parameter packs.
+ bool TraverseType(QualType T) {
+ if (!T.isNull() && T->containsUnexpandedParameterPack())
+ return inherited::TraverseType(T);
+
+ return true;
+ }
+
+ /// \brief Suppress traversel into types with location information
+ /// that do not contain unexpanded parameter packs.
+ bool TraverseTypeLoc(TypeLoc TL) {
+ if (!TL.getType().isNull() &&
+ TL.getType()->containsUnexpandedParameterPack())
+ return inherited::TraverseTypeLoc(TL);
+
+ return true;
+ }
+
+ /// \brief Suppress traversal of non-parameter declarations, since
+ /// they cannot contain unexpanded parameter packs.
+ bool TraverseDecl(Decl *D) {
+ if (D && isa<ParmVarDecl>(D))
+ return inherited::TraverseDecl(D);
+
+ return true;
+ }
+
+ /// \brief Suppress traversal of template argument pack expansions.
+ bool TraverseTemplateArgument(const TemplateArgument &Arg) {
+ if (Arg.isPackExpansion())
+ return true;
+
+ return inherited::TraverseTemplateArgument(Arg);
+ }
+
+ /// \brief Suppress traversal of template argument pack expansions.
+ bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc) {
+ if (ArgLoc.getArgument().isPackExpansion())
+ return true;
+
+ return inherited::TraverseTemplateArgumentLoc(ArgLoc);
+ }
+ };
+}
+
+/// \brief Diagnose all of the unexpanded parameter packs in the given
+/// vector.
+static void
+DiagnoseUnexpandedParameterPacks(Sema &S, SourceLocation Loc,
+ Sema::UnexpandedParameterPackContext UPPC,
+ const llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ llvm::SmallVector<SourceLocation, 4> Locations;
+ llvm::SmallVector<IdentifierInfo *, 4> Names;
+ llvm::SmallPtrSet<IdentifierInfo *, 4> NamesKnown;
+
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ IdentifierInfo *Name = 0;
+ if (const TemplateTypeParmType *TTP
+ = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>())
+ Name = TTP->getName();
+ else
+ Name = Unexpanded[I].first.get<NamedDecl *>()->getIdentifier();
+
+ if (Name && NamesKnown.insert(Name))
+ Names.push_back(Name);
+
+ if (Unexpanded[I].second.isValid())
+ Locations.push_back(Unexpanded[I].second);
+ }
+
+ DiagnosticBuilder DB
+ = Names.size() == 0? S.Diag(Loc, diag::err_unexpanded_parameter_pack_0)
+ << (int)UPPC
+ : Names.size() == 1? S.Diag(Loc, diag::err_unexpanded_parameter_pack_1)
+ << (int)UPPC << Names[0]
+ : Names.size() == 2? S.Diag(Loc, diag::err_unexpanded_parameter_pack_2)
+ << (int)UPPC << Names[0] << Names[1]
+ : S.Diag(Loc, diag::err_unexpanded_parameter_pack_3_or_more)
+ << (int)UPPC << Names[0] << Names[1];
+
+ for (unsigned I = 0, N = Locations.size(); I != N; ++I)
+ DB << SourceRange(Locations[I]);
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
+ TypeSourceInfo *T,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ if (!T->getType()->containsUnexpandedParameterPack())
+ return false;
+
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(
+ T->getTypeLoc());
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(*this, Loc, UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(Expr *E,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ if (!E->containsUnexpandedParameterPack())
+ return false;
+
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(*this, E->getLocStart(), UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ if (!SS.getScopeRep() ||
+ !SS.getScopeRep()->containsUnexpandedParameterPack())
+ return false;
+
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseNestedNameSpecifier(SS.getScopeRep());
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(*this, SS.getRange().getBegin(),
+ UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ switch (NameInfo.getName().getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ // FIXME: We shouldn't need this null check!
+ if (TypeSourceInfo *TSInfo = NameInfo.getNamedTypeInfo())
+ return DiagnoseUnexpandedParameterPack(NameInfo.getLoc(), TSInfo, UPPC);
+
+ if (!NameInfo.getName().getCXXNameType()->containsUnexpandedParameterPack())
+ return false;
+
+ break;
+ }
+
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseType(NameInfo.getName().getCXXNameType());
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(*this, NameInfo.getLoc(), UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
+ TemplateName Template,
+ UnexpandedParameterPackContext UPPC) {
+
+ if (Template.isNull() || !Template.containsUnexpandedParameterPack())
+ return false;
+
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateName(Template);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(*this, Loc, UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
+ UnexpandedParameterPackContext UPPC) {
+ if (Arg.getArgument().isNull() ||
+ !Arg.getArgument().containsUnexpandedParameterPack())
+ return false;
+
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateArgumentLoc(Arg);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(*this, Arg.getLocation(), UPPC, Unexpanded);
+ return true;
+}
+
+void Sema::collectUnexpandedParameterPacks(TemplateArgument Arg,
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateArgument(Arg);
+}
+
+void Sema::collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateArgumentLoc(Arg);
+}
+
+void Sema::collectUnexpandedParameterPacks(QualType T,
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(T);
+}
+
+void Sema::collectUnexpandedParameterPacks(TypeLoc TL,
+ llvm::SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(TL);
+}
+
+ParsedTemplateArgument
+Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
+ SourceLocation EllipsisLoc) {
+ if (Arg.isInvalid())
+ return Arg;
+
+ switch (Arg.getKind()) {
+ case ParsedTemplateArgument::Type: {
+ TypeResult Result = ActOnPackExpansion(Arg.getAsType(), EllipsisLoc);
+ if (Result.isInvalid())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(Arg.getKind(), Result.get().getAsOpaquePtr(),
+ Arg.getLocation());
+ }
+
+ case ParsedTemplateArgument::NonType: {
+ ExprResult Result = ActOnPackExpansion(Arg.getAsExpr(), EllipsisLoc);
+ if (Result.isInvalid())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(Arg.getKind(), Result.get(),
+ Arg.getLocation());
+ }
+
+ case ParsedTemplateArgument::Template:
+ if (!Arg.getAsTemplate().get().containsUnexpandedParameterPack()) {
+ SourceRange R(Arg.getLocation());
+ if (Arg.getScopeSpec().isValid())
+ R.setBegin(Arg.getScopeSpec().getBeginLoc());
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << R;
+ return ParsedTemplateArgument();
+ }
+
+ return Arg.getTemplatePackExpansion(EllipsisLoc);
+ }
+ llvm_unreachable("Unhandled template argument kind?");
+ return ParsedTemplateArgument();
+}
+
+TypeResult Sema::ActOnPackExpansion(ParsedType Type,
+ SourceLocation EllipsisLoc) {
+ TypeSourceInfo *TSInfo;
+ GetTypeFromParser(Type, &TSInfo);
+ if (!TSInfo)
+ return true;
+
+ TypeSourceInfo *TSResult = CheckPackExpansion(TSInfo, EllipsisLoc,
+ llvm::Optional<unsigned>());
+ if (!TSResult)
+ return true;
+
+ return CreateParsedType(TSResult->getType(), TSResult);
+}
+
+TypeSourceInfo *Sema::CheckPackExpansion(TypeSourceInfo *Pattern,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ // Create the pack expansion type and source-location information.
+ QualType Result = CheckPackExpansion(Pattern->getType(),
+ Pattern->getTypeLoc().getSourceRange(),
+ EllipsisLoc, NumExpansions);
+ if (Result.isNull())
+ return 0;
+
+ TypeSourceInfo *TSResult = Context.CreateTypeSourceInfo(Result);
+ PackExpansionTypeLoc TL = cast<PackExpansionTypeLoc>(TSResult->getTypeLoc());
+ TL.setEllipsisLoc(EllipsisLoc);
+
+ // Copy over the source-location information from the type.
+ memcpy(TL.getNextTypeLoc().getOpaqueData(),
+ Pattern->getTypeLoc().getOpaqueData(),
+ Pattern->getTypeLoc().getFullDataSize());
+ return TSResult;
+}
+
+QualType Sema::CheckPackExpansion(QualType Pattern,
+ SourceRange PatternRange,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ // C++0x [temp.variadic]p5:
+ // The pattern of a pack expansion shall name one or more
+ // parameter packs that are not expanded by a nested pack
+ // expansion.
+ if (!Pattern->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << PatternRange;
+ return QualType();
+ }
+
+ return Context.getPackExpansionType(Pattern, NumExpansions);
+}
+
+ExprResult Sema::ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc) {
+ return CheckPackExpansion(Pattern, EllipsisLoc, llvm::Optional<unsigned>());
+}
+
+ExprResult Sema::CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ if (!Pattern)
+ return ExprError();
+
+ // C++0x [temp.variadic]p5:
+ // The pattern of a pack expansion shall name one or more
+ // parameter packs that are not expanded by a nested pack
+ // expansion.
+ if (!Pattern->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << Pattern->getSourceRange();
+ return ExprError();
+ }
+
+ // Create the pack expansion expression and source-location information.
+ return Owned(new (Context) PackExpansionExpr(Context.DependentTy, Pattern,
+ EllipsisLoc, NumExpansions));
+}
+
+/// \brief Retrieve the depth and index of a parameter pack.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(NamedDecl *ND) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
+ return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+}
+
+bool Sema::CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ const UnexpandedParameterPack *Unexpanded,
+ unsigned NumUnexpanded,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool &ShouldExpand,
+ bool &RetainExpansion,
+ llvm::Optional<unsigned> &NumExpansions) {
+ ShouldExpand = true;
+ RetainExpansion = false;
+ std::pair<IdentifierInfo *, SourceLocation> FirstPack;
+ bool HaveFirstPack = false;
+
+ for (unsigned I = 0; I != NumUnexpanded; ++I) {
+ // Compute the depth and index for this parameter pack.
+ unsigned Depth = 0, Index = 0;
+ IdentifierInfo *Name;
+ bool IsFunctionParameterPack = false;
+
+ if (const TemplateTypeParmType *TTP
+ = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) {
+ Depth = TTP->getDepth();
+ Index = TTP->getIndex();
+ Name = TTP->getName();
+ } else {
+ NamedDecl *ND = Unexpanded[I].first.get<NamedDecl *>();
+ if (isa<ParmVarDecl>(ND))
+ IsFunctionParameterPack = true;
+ else
+ llvm::tie(Depth, Index) = getDepthAndIndex(ND);
+
+ Name = ND->getIdentifier();
+ }
+
+ // Determine the size of this argument pack.
+ unsigned NewPackSize;
+ if (IsFunctionParameterPack) {
+ // Figure out whether we're instantiating to an argument pack or not.
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
+ = CurrentInstantiationScope->findInstantiationOf(
+ Unexpanded[I].first.get<NamedDecl *>());
+ if (Instantiation->is<DeclArgumentPack *>()) {
+ // We could expand this function parameter pack.
+ NewPackSize = Instantiation->get<DeclArgumentPack *>()->size();
+ } else {
+ // We can't expand this function parameter pack, so we can't expand
+ // the pack expansion.
+ ShouldExpand = false;
+ continue;
+ }
+ } else {
+ // If we don't have a template argument at this depth/index, then we
+ // cannot expand the pack expansion. Make a note of this, but we still
+ // want to check any parameter packs we *do* have arguments for.
+ if (Depth >= TemplateArgs.getNumLevels() ||
+ !TemplateArgs.hasTemplateArgument(Depth, Index)) {
+ ShouldExpand = false;
+ continue;
+ }
+
+ // Determine the size of the argument pack.
+ NewPackSize = TemplateArgs(Depth, Index).pack_size();
+ }
+
+ // C++0x [temp.arg.explicit]p9:
+ // Template argument deduction can extend the sequence of template
+ // arguments corresponding to a template parameter pack, even when the
+ // sequence contains explicitly specified template arguments.
+ if (!IsFunctionParameterPack) {
+ if (NamedDecl *PartialPack
+ = CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ unsigned PartialDepth, PartialIndex;
+ llvm::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack);
+ if (PartialDepth == Depth && PartialIndex == Index)
+ RetainExpansion = true;
+ }
+ }
+
+ if (!NumExpansions) {
+ // The is the first pack we've seen for which we have an argument.
+ // Record it.
+ NumExpansions = NewPackSize;
+ FirstPack.first = Name;
+ FirstPack.second = Unexpanded[I].second;
+ HaveFirstPack = true;
+ continue;
+ }
+
+ if (NewPackSize != *NumExpansions) {
+ // C++0x [temp.variadic]p5:
+ // All of the parameter packs expanded by a pack expansion shall have
+ // the same number of arguments specified.
+ if (HaveFirstPack)
+ Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict)
+ << FirstPack.first << Name << *NumExpansions << NewPackSize
+ << SourceRange(FirstPack.second) << SourceRange(Unexpanded[I].second);
+ else
+ Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_multilevel)
+ << Name << *NumExpansions << NewPackSize
+ << SourceRange(Unexpanded[I].second);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+unsigned Sema::getNumArgumentsInExpansion(QualType T,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ QualType Pattern = cast<PackExpansionType>(T)->getPattern();
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(Pattern);
+
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ // Compute the depth and index for this parameter pack.
+ unsigned Depth;
+ unsigned Index;
+
+ if (const TemplateTypeParmType *TTP
+ = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) {
+ Depth = TTP->getDepth();
+ Index = TTP->getIndex();
+ } else {
+ NamedDecl *ND = Unexpanded[I].first.get<NamedDecl *>();
+ if (isa<ParmVarDecl>(ND)) {
+ // Function parameter pack.
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
+ = CurrentInstantiationScope->findInstantiationOf(
+ Unexpanded[I].first.get<NamedDecl *>());
+ if (Instantiation->is<DeclArgumentPack *>())
+ return Instantiation->get<DeclArgumentPack *>()->size();
+
+ continue;
+ }
+
+ llvm::tie(Depth, Index) = getDepthAndIndex(ND);
+ }
+ if (Depth >= TemplateArgs.getNumLevels() ||
+ !TemplateArgs.hasTemplateArgument(Depth, Index))
+ continue;
+
+ // Determine the size of the argument pack.
+ return TemplateArgs(Depth, Index).pack_size();
+ }
+
+ llvm_unreachable("No unexpanded parameter packs in type expansion.");
+ return 0;
+}
+
+bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
+ const DeclSpec &DS = D.getDeclSpec();
+ switch (DS.getTypeSpecType()) {
+ case TST_typename:
+ case TST_typeofType: {
+ QualType T = DS.getRepAsType().get();
+ if (!T.isNull() && T->containsUnexpandedParameterPack())
+ return true;
+ break;
+ }
+
+ case TST_typeofExpr:
+ case TST_decltype:
+ if (DS.getRepAsExpr() &&
+ DS.getRepAsExpr()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case TST_unspecified:
+ case TST_void:
+ case TST_char:
+ case TST_wchar:
+ case TST_char16:
+ case TST_char32:
+ case TST_int:
+ case TST_float:
+ case TST_double:
+ case TST_bool:
+ case TST_decimal32:
+ case TST_decimal64:
+ case TST_decimal128:
+ case TST_enum:
+ case TST_union:
+ case TST_struct:
+ case TST_class:
+ case TST_auto:
+ case TST_error:
+ break;
+ }
+
+ for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) {
+ const DeclaratorChunk &Chunk = D.getTypeObject(I);
+ switch (Chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Paren:
+ // These declarator chunks cannot contain any parameter packs.
+ break;
+
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::BlockPointer:
+ // Syntactically, these kinds of declarator chunks all come after the
+ // declarator-id (conceptually), so the parser should not invoke this
+ // routine at this time.
+ llvm_unreachable("Could not have seen this kind of declarator chunk");
+ break;
+
+ case DeclaratorChunk::MemberPointer:
+ if (Chunk.Mem.Scope().getScopeRep() &&
+ Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack())
+ return true;
+ break;
+ }
+ }
+
+ return false;
+}
+
+/// \brief Called when an expression computing the size of a parameter pack
+/// is parsed.
+///
+/// \code
+/// template<typename ...Types> struct count {
+/// static const unsigned value = sizeof...(Types);
+/// };
+/// \endcode
+///
+//
+/// \param OpLoc The location of the "sizeof" keyword.
+/// \param Name The name of the parameter pack whose size will be determined.
+/// \param NameLoc The source location of the name of the parameter pack.
+/// \param RParenLoc The location of the closing parentheses.
+ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
+ SourceLocation OpLoc,
+ IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ SourceLocation RParenLoc) {
+ // C++0x [expr.sizeof]p5:
+ // The identifier in a sizeof... expression shall name a parameter pack.
+ LookupResult R(*this, &Name, NameLoc, LookupOrdinaryName);
+ LookupName(R, S);
+
+ NamedDecl *ParameterPack = 0;
+ switch (R.getResultKind()) {
+ case LookupResult::Found:
+ ParameterPack = R.getFoundDecl();
+ break;
+
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ if (DeclarationName CorrectedName = CorrectTypo(R, S, 0, 0, false,
+ CTC_NoKeywords)) {
+ if (NamedDecl *CorrectedResult = R.getAsSingle<NamedDecl>())
+ if (CorrectedResult->isParameterPack()) {
+ ParameterPack = CorrectedResult;
+ Diag(NameLoc, diag::err_sizeof_pack_no_pack_name_suggest)
+ << &Name << CorrectedName
+ << FixItHint::CreateReplacement(NameLoc,
+ CorrectedName.getAsString());
+ Diag(ParameterPack->getLocation(), diag::note_parameter_pack_here)
+ << CorrectedName;
+ }
+ }
+
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ break;
+
+ case LookupResult::Ambiguous:
+ DiagnoseAmbiguousLookup(R);
+ return ExprError();
+ }
+
+ if (!ParameterPack || !ParameterPack->isParameterPack()) {
+ Diag(NameLoc, diag::err_sizeof_pack_no_pack_name)
+ << &Name;
+ return ExprError();
+ }
+
+ return new (Context) SizeOfPackExpr(Context.getSizeType(), OpLoc,
+ ParameterPack, NameLoc, RParenLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
index aa30b5c..c88baa5 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
@@ -70,37 +70,455 @@ static bool isOmittedBlockReturnType(const Declarator &D) {
return false;
}
-typedef std::pair<const AttributeList*,QualType> DelayedAttribute;
-typedef llvm::SmallVectorImpl<DelayedAttribute> DelayedAttributeSet;
-
-static void ProcessTypeAttributeList(Sema &S, QualType &Type,
- bool IsDeclSpec,
- const AttributeList *Attrs,
- DelayedAttributeSet &DelayedFnAttrs);
-static bool ProcessFnAttr(Sema &S, QualType &Type, const AttributeList &Attr);
-
-static void ProcessDelayedFnAttrs(Sema &S, QualType &Type,
- DelayedAttributeSet &Attrs) {
- for (DelayedAttributeSet::iterator I = Attrs.begin(),
- E = Attrs.end(); I != E; ++I)
- if (ProcessFnAttr(S, Type, *I->first)) {
- S.Diag(I->first->getLoc(), diag::warn_function_attribute_wrong_type)
- << I->first->getName() << I->second;
- // Avoid any further processing of this attribute.
- I->first->setInvalid();
- }
- Attrs.clear();
+// objc_gc applies to Objective-C pointers or, otherwise, to the
+// smallest available pointer type (i.e. 'void*' in 'void**').
+#define OBJC_POINTER_TYPE_ATTRS_CASELIST \
+ case AttributeList::AT_objc_gc
+
+// Function type attributes.
+#define FUNCTION_TYPE_ATTRS_CASELIST \
+ case AttributeList::AT_noreturn: \
+ case AttributeList::AT_cdecl: \
+ case AttributeList::AT_fastcall: \
+ case AttributeList::AT_stdcall: \
+ case AttributeList::AT_thiscall: \
+ case AttributeList::AT_pascal: \
+ case AttributeList::AT_regparm
+
+namespace {
+ /// An object which stores processing state for the entire
+ /// GetTypeForDeclarator process.
+ class TypeProcessingState {
+ Sema &sema;
+
+ /// The declarator being processed.
+ Declarator &declarator;
+
+ /// The index of the declarator chunk we're currently processing.
+ /// May be the total number of valid chunks, indicating the
+ /// DeclSpec.
+ unsigned chunkIndex;
+
+ /// Whether there are non-trivial modifications to the decl spec.
+ bool trivial;
+
+ /// The original set of attributes on the DeclSpec.
+ llvm::SmallVector<AttributeList*, 2> savedAttrs;
+
+ /// A list of attributes to diagnose the uselessness of when the
+ /// processing is complete.
+ llvm::SmallVector<AttributeList*, 2> ignoredTypeAttrs;
+
+ public:
+ TypeProcessingState(Sema &sema, Declarator &declarator)
+ : sema(sema), declarator(declarator),
+ chunkIndex(declarator.getNumTypeObjects()),
+ trivial(true) {}
+
+ Sema &getSema() const {
+ return sema;
+ }
+
+ Declarator &getDeclarator() const {
+ return declarator;
+ }
+
+ unsigned getCurrentChunkIndex() const {
+ return chunkIndex;
+ }
+
+ void setCurrentChunkIndex(unsigned idx) {
+ assert(idx <= declarator.getNumTypeObjects());
+ chunkIndex = idx;
+ }
+
+ AttributeList *&getCurrentAttrListRef() const {
+ assert(chunkIndex <= declarator.getNumTypeObjects());
+ if (chunkIndex == declarator.getNumTypeObjects())
+ return getMutableDeclSpec().getAttributes().getListRef();
+ return declarator.getTypeObject(chunkIndex).getAttrListRef();
+ }
+
+ /// Save the current set of attributes on the DeclSpec.
+ void saveDeclSpecAttrs() {
+ // Don't try to save them multiple times.
+ if (!savedAttrs.empty()) return;
+
+ DeclSpec &spec = getMutableDeclSpec();
+ for (AttributeList *attr = spec.getAttributes().getList(); attr;
+ attr = attr->getNext())
+ savedAttrs.push_back(attr);
+ trivial &= savedAttrs.empty();
+ }
+
+ /// Record that we had nowhere to put the given type attribute.
+ /// We will diagnose such attributes later.
+ void addIgnoredTypeAttr(AttributeList &attr) {
+ ignoredTypeAttrs.push_back(&attr);
+ }
+
+ /// Diagnose all the ignored type attributes, given that the
+ /// declarator worked out to the given type.
+ void diagnoseIgnoredTypeAttrs(QualType type) const {
+ for (llvm::SmallVectorImpl<AttributeList*>::const_iterator
+ i = ignoredTypeAttrs.begin(), e = ignoredTypeAttrs.end();
+ i != e; ++i) {
+ AttributeList &attr = **i;
+ getSema().Diag(attr.getLoc(), diag::warn_function_attribute_wrong_type)
+ << attr.getName() << type;
+ }
+ }
+
+ ~TypeProcessingState() {
+ if (trivial) return;
+
+ restoreDeclSpecAttrs();
+ }
+
+ private:
+ DeclSpec &getMutableDeclSpec() const {
+ return const_cast<DeclSpec&>(declarator.getDeclSpec());
+ }
+
+ void restoreDeclSpecAttrs() {
+ assert(!savedAttrs.empty());
+ getMutableDeclSpec().getAttributes().set(savedAttrs[0]);
+ for (unsigned i = 0, e = savedAttrs.size() - 1; i != e; ++i)
+ savedAttrs[i]->setNext(savedAttrs[i+1]);
+ savedAttrs.back()->setNext(0);
+ }
+ };
+
+ /// Basically std::pair except that we really want to avoid an
+ /// implicit operator= for safety concerns. It's also a minor
+ /// link-time optimization for this to be a private type.
+ struct AttrAndList {
+ /// The attribute.
+ AttributeList &first;
+
+ /// The head of the list the attribute is currently in.
+ AttributeList *&second;
+
+ AttrAndList(AttributeList &attr, AttributeList *&head)
+ : first(attr), second(head) {}
+ };
+}
+
+namespace llvm {
+ template <> struct isPodLike<AttrAndList> {
+ static const bool value = true;
+ };
+}
+
+static void spliceAttrIntoList(AttributeList &attr, AttributeList *&head) {
+ attr.setNext(head);
+ head = &attr;
+}
+
+static void spliceAttrOutOfList(AttributeList &attr, AttributeList *&head) {
+ if (head == &attr) {
+ head = attr.getNext();
+ return;
+ }
+
+ AttributeList *cur = head;
+ while (true) {
+ assert(cur && cur->getNext() && "ran out of attrs?");
+ if (cur->getNext() == &attr) {
+ cur->setNext(attr.getNext());
+ return;
+ }
+ cur = cur->getNext();
+ }
+}
+
+static void moveAttrFromListToList(AttributeList &attr,
+ AttributeList *&fromList,
+ AttributeList *&toList) {
+ spliceAttrOutOfList(attr, fromList);
+ spliceAttrIntoList(attr, toList);
+}
+
+static void processTypeAttrs(TypeProcessingState &state,
+ QualType &type, bool isDeclSpec,
+ AttributeList *attrs);
+
+static bool handleFunctionTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type);
+
+static bool handleObjCGCTypeAttr(TypeProcessingState &state,
+ AttributeList &attr, QualType &type);
+
+static bool handleObjCPointerTypeAttr(TypeProcessingState &state,
+ AttributeList &attr, QualType &type) {
+ // Right now, we have exactly one of these attributes: objc_gc.
+ assert(attr.getKind() == AttributeList::AT_objc_gc);
+ return handleObjCGCTypeAttr(state, attr, type);
+}
+
+/// Given that an objc_gc attribute was written somewhere on a
+/// declaration *other* than on the declarator itself (for which, use
+/// distributeObjCPointerTypeAttrFromDeclarator), and given that it
+/// didn't apply in whatever position it was written in, try to move
+/// it to a more appropriate position.
+static void distributeObjCPointerTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType type) {
+ Declarator &declarator = state.getDeclarator();
+ for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
+ chunk.getAttrListRef());
+ return;
+
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Array:
+ continue;
+
+ // Don't walk through these.
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::MemberPointer:
+ goto error;
+ }
+ }
+ error:
+
+ state.getSema().Diag(attr.getLoc(), diag::warn_function_attribute_wrong_type)
+ << attr.getName() << type;
+}
+
+/// Distribute an objc_gc type attribute that was written on the
+/// declarator.
+static void
+distributeObjCPointerTypeAttrFromDeclarator(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // objc_gc goes on the innermost pointer to something that's not a
+ // pointer.
+ unsigned innermost = -1U;
+ bool considerDeclSpec = true;
+ for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ innermost = i;
+ continue;
+
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Array:
+ continue;
+
+ case DeclaratorChunk::Function:
+ considerDeclSpec = false;
+ goto done;
+ }
+ }
+ done:
+
+ // That might actually be the decl spec if we weren't blocked by
+ // anything in the declarator.
+ if (considerDeclSpec) {
+ if (handleObjCPointerTypeAttr(state, attr, declSpecType))
+ return;
+ }
+
+ // Otherwise, if we found an appropriate chunk, splice the attribute
+ // into it.
+ if (innermost != -1U) {
+ moveAttrFromListToList(attr, declarator.getAttrListRef(),
+ declarator.getTypeObject(innermost).getAttrListRef());
+ return;
+ }
+
+ // Otherwise, diagnose when we're done building the type.
+ spliceAttrOutOfList(attr, declarator.getAttrListRef());
+ state.addIgnoredTypeAttr(attr);
+}
+
+/// A function type attribute was written somewhere in a declaration
+/// *other* than on the declarator itself or in the decl spec. Given
+/// that it didn't apply in whatever position it was written in, try
+/// to move it to a more appropriate position.
+static void distributeFunctionTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType type) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Try to push the attribute from the return type of a function to
+ // the function itself.
+ for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Function:
+ moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
+ chunk.getAttrListRef());
+ return;
+
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ continue;
+ }
+ }
+
+ state.getSema().Diag(attr.getLoc(), diag::warn_function_attribute_wrong_type)
+ << attr.getName() << type;
+}
+
+/// Try to distribute a function type attribute to the innermost
+/// function chunk or type. Returns true if the attribute was
+/// distributed, false if no location was found.
+static bool
+distributeFunctionTypeAttrToInnermost(TypeProcessingState &state,
+ AttributeList &attr,
+ AttributeList *&attrList,
+ QualType &declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Put it on the innermost function chunk, if there is one.
+ for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i);
+ if (chunk.Kind != DeclaratorChunk::Function) continue;
+
+ moveAttrFromListToList(attr, attrList, chunk.getAttrListRef());
+ return true;
+ }
+
+ return handleFunctionTypeAttr(state, attr, declSpecType);
+}
+
+/// A function type attribute was written in the decl spec. Try to
+/// apply it somewhere.
+static void
+distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &declSpecType) {
+ state.saveDeclSpecAttrs();
+
+ // Try to distribute to the innermost.
+ if (distributeFunctionTypeAttrToInnermost(state, attr,
+ state.getCurrentAttrListRef(),
+ declSpecType))
+ return;
+
+ // If that failed, diagnose the bad attribute when the declarator is
+ // fully built.
+ state.addIgnoredTypeAttr(attr);
}
-static void DiagnoseDelayedFnAttrs(Sema &S, DelayedAttributeSet &Attrs) {
- for (DelayedAttributeSet::iterator I = Attrs.begin(),
- E = Attrs.end(); I != E; ++I) {
- S.Diag(I->first->getLoc(), diag::warn_function_attribute_wrong_type)
- << I->first->getName() << I->second;
- // Avoid any further processing of this attribute.
- I->first->setInvalid();
+/// A function type attribute was written on the declarator. Try to
+/// apply it somewhere.
+static void
+distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Try to distribute to the innermost.
+ if (distributeFunctionTypeAttrToInnermost(state, attr,
+ declarator.getAttrListRef(),
+ declSpecType))
+ return;
+
+ // If that failed, diagnose the bad attribute when the declarator is
+ // fully built.
+ spliceAttrOutOfList(attr, declarator.getAttrListRef());
+ state.addIgnoredTypeAttr(attr);
+}
+
+/// \brief Given that there are attributes written on the declarator
+/// itself, try to distribute any type attributes to the appropriate
+/// declarator chunk.
+///
+/// These are attributes like the following:
+/// int f ATTR;
+/// int (f ATTR)();
+/// but not necessarily this:
+/// int f() ATTR;
+static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
+ QualType &declSpecType) {
+ // Collect all the type attributes from the declarator itself.
+ assert(state.getDeclarator().getAttributes() && "declarator has no attrs!");
+ AttributeList *attr = state.getDeclarator().getAttributes();
+ AttributeList *next;
+ do {
+ next = attr->getNext();
+
+ switch (attr->getKind()) {
+ OBJC_POINTER_TYPE_ATTRS_CASELIST:
+ distributeObjCPointerTypeAttrFromDeclarator(state, *attr, declSpecType);
+ break;
+
+ FUNCTION_TYPE_ATTRS_CASELIST:
+ distributeFunctionTypeAttrFromDeclarator(state, *attr, declSpecType);
+ break;
+
+ default:
+ break;
+ }
+ } while ((attr = next));
+}
+
+/// Add a synthetic '()' to a block-literal declarator if it is
+/// required, given the return type.
+static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
+ QualType declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // First, check whether the declarator would produce a function,
+ // i.e. whether the innermost semantic chunk is a function.
+ if (declarator.isFunctionDeclarator()) {
+ // If so, make that declarator a prototyped declarator.
+ declarator.getFunctionTypeInfo().hasPrototype = true;
+ return;
}
- Attrs.clear();
+
+ // If there are any type objects, the type as written won't name a
+ // function, regardless of the decl spec type. This is because a
+ // block signature declarator is always an abstract-declarator, and
+ // abstract-declarators can't just be parentheses chunks. Therefore
+ // we need to build a function chunk unless there are no type
+ // objects and the decl spec type is a function.
+ if (!declarator.getNumTypeObjects() && declSpecType->isFunctionType())
+ return;
+
+ // Note that there *are* cases with invalid declarators where
+ // declarators consist solely of parentheses. In general, these
+ // occur only in failed efforts to make function declarators, so
+ // faking up the function chunk is still the right thing to do.
+
+ // Otherwise, we need to fake up a function declarator.
+ SourceLocation loc = declarator.getSourceRange().getBegin();
+
+ // ...and *prepend* it to the declarator.
+ declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction(
+ ParsedAttributes(),
+ /*proto*/ true,
+ /*variadic*/ false, SourceLocation(),
+ /*args*/ 0, 0,
+ /*type quals*/ 0,
+ /*ref-qualifier*/true, SourceLocation(),
+ /*EH*/ false, SourceLocation(), false, 0, 0, 0,
+ /*parens*/ loc, loc,
+ declarator));
+
+ // For consistency, make sure the state still has us as processing
+ // the decl spec.
+ assert(state.getCurrentChunkIndex() == declarator.getNumTypeObjects() - 1);
+ state.setCurrentChunkIndex(declarator.getNumTypeObjects());
}
/// \brief Convert the specified declspec to the appropriate type
@@ -108,17 +526,17 @@ static void DiagnoseDelayedFnAttrs(Sema &S, DelayedAttributeSet &Attrs) {
/// \param D the declarator containing the declaration specifier.
/// \returns The type described by the declaration specifiers. This function
/// never returns null.
-static QualType ConvertDeclSpecToType(Sema &TheSema,
- Declarator &TheDeclarator,
- DelayedAttributeSet &Delayed) {
+static QualType ConvertDeclSpecToType(Sema &S, TypeProcessingState &state) {
// FIXME: Should move the logic from DeclSpec::Finish to here for validity
// checking.
- const DeclSpec &DS = TheDeclarator.getDeclSpec();
- SourceLocation DeclLoc = TheDeclarator.getIdentifierLoc();
+
+ Declarator &declarator = state.getDeclarator();
+ const DeclSpec &DS = declarator.getDeclSpec();
+ SourceLocation DeclLoc = declarator.getIdentifierLoc();
if (DeclLoc.isInvalid())
DeclLoc = DS.getSourceRange().getBegin();
- ASTContext &Context = TheSema.Context;
+ ASTContext &Context = S.Context;
QualType Result;
switch (DS.getTypeSpecType()) {
@@ -140,13 +558,13 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
Result = Context.WCharTy;
else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) {
- TheSema.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
+ S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
<< DS.getSpecifierName(DS.getTypeSpecType());
Result = Context.getSignedWCharType();
} else {
assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
"Unknown TSS value");
- TheSema.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
+ S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
<< DS.getSpecifierName(DS.getTypeSpecType());
Result = Context.getUnsignedWCharType();
}
@@ -173,7 +591,7 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
// If this is a missing declspec in a block literal return context, then it
// is inferred from the return statements inside the block.
- if (isOmittedBlockReturnType(TheDeclarator)) {
+ if (isOmittedBlockReturnType(declarator)) {
Result = Context.DependentTy;
break;
}
@@ -185,11 +603,11 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
// allowed to be completely missing a declspec. This is handled in the
// parser already though by it pretending to have seen an 'int' in this
// case.
- if (TheSema.getLangOptions().ImplicitInt) {
+ if (S.getLangOptions().ImplicitInt) {
// In C89 mode, we only warn if there is a completely missing declspec
// when one is not allowed.
if (DS.isEmpty()) {
- TheSema.Diag(DeclLoc, diag::ext_missing_declspec)
+ S.Diag(DeclLoc, diag::ext_missing_declspec)
<< DS.getSourceRange()
<< FixItHint::CreateInsertion(DS.getSourceRange().getBegin(), "int");
}
@@ -199,17 +617,17 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
// specifiers in each declaration, and in the specifier-qualifier list in
// each struct declaration and type name."
// FIXME: Does Microsoft really have the implicit int extension in C++?
- if (TheSema.getLangOptions().CPlusPlus &&
- !TheSema.getLangOptions().Microsoft) {
- TheSema.Diag(DeclLoc, diag::err_missing_type_specifier)
+ if (S.getLangOptions().CPlusPlus &&
+ !S.getLangOptions().Microsoft) {
+ S.Diag(DeclLoc, diag::err_missing_type_specifier)
<< DS.getSourceRange();
// When this occurs in C++ code, often something is very broken with the
// value being declared, poison it as invalid so we don't get chains of
// errors.
- TheDeclarator.setInvalidType(true);
+ declarator.setInvalidType(true);
} else {
- TheSema.Diag(DeclLoc, diag::ext_missing_type_specifier)
+ S.Diag(DeclLoc, diag::ext_missing_type_specifier)
<< DS.getSourceRange();
}
}
@@ -225,9 +643,9 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
Result = Context.LongLongTy;
// long long is a C99 feature.
- if (!TheSema.getLangOptions().C99 &&
- !TheSema.getLangOptions().CPlusPlus0x)
- TheSema.Diag(DS.getTypeSpecWidthLoc(), diag::ext_longlong);
+ if (!S.getLangOptions().C99 &&
+ !S.getLangOptions().CPlusPlus0x)
+ S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_longlong);
break;
}
} else {
@@ -239,9 +657,9 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
Result = Context.UnsignedLongLongTy;
// long long is a C99 feature.
- if (!TheSema.getLangOptions().C99 &&
- !TheSema.getLangOptions().CPlusPlus0x)
- TheSema.Diag(DS.getTypeSpecWidthLoc(), diag::ext_longlong);
+ if (!S.getLangOptions().C99 &&
+ !S.getLangOptions().CPlusPlus0x)
+ S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_longlong);
break;
}
}
@@ -253,14 +671,19 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
Result = Context.LongDoubleTy;
else
Result = Context.DoubleTy;
+
+ if (S.getLangOptions().OpenCL && !S.getOpenCLOptions().cl_khr_fp64) {
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_double_requires_fp64);
+ declarator.setInvalidType(true);
+ }
break;
case DeclSpec::TST_bool: Result = Context.BoolTy; break; // _Bool or bool
case DeclSpec::TST_decimal32: // _Decimal32
case DeclSpec::TST_decimal64: // _Decimal64
case DeclSpec::TST_decimal128: // _Decimal128
- TheSema.Diag(DS.getTypeSpecTypeLoc(), diag::err_decimal_unsupported);
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_decimal_unsupported);
Result = Context.IntTy;
- TheDeclarator.setInvalidType(true);
+ declarator.setInvalidType(true);
break;
case DeclSpec::TST_class:
case DeclSpec::TST_enum:
@@ -270,12 +693,12 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
if (!D) {
// This can happen in C++ with ambiguous lookups.
Result = Context.IntTy;
- TheDeclarator.setInvalidType(true);
+ declarator.setInvalidType(true);
break;
}
// If the type is deprecated or unavailable, diagnose it.
- TheSema.DiagnoseUseOfDecl(D, DS.getTypeSpecTypeLoc());
+ S.DiagnoseUseOfDecl(D, DS.getTypeSpecTypeLoc());
assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
DS.getTypeSpecSign() == 0 && "No qualifiers on tag names!");
@@ -284,23 +707,22 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
Result = Context.getTypeDeclType(D);
// In C++, make an ElaboratedType.
- if (TheSema.getLangOptions().CPlusPlus) {
+ if (S.getLangOptions().CPlusPlus) {
ElaboratedTypeKeyword Keyword
= ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType());
- Result = TheSema.getElaboratedType(Keyword, DS.getTypeSpecScope(),
- Result);
+ Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result);
}
if (D->isInvalidDecl())
- TheDeclarator.setInvalidType(true);
+ declarator.setInvalidType(true);
break;
}
case DeclSpec::TST_typename: {
assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
DS.getTypeSpecSign() == 0 &&
"Can't handle qualifiers on typedef names yet!");
- Result = TheSema.GetTypeFromParser(DS.getRepAsType());
+ Result = S.GetTypeFromParser(DS.getRepAsType());
if (Result.isNull())
- TheDeclarator.setInvalidType(true);
+ declarator.setInvalidType(true);
else if (DeclSpec::ProtocolQualifierListTy PQ
= DS.getProtocolQualifiers()) {
if (const ObjCObjectType *ObjT = Result->getAs<ObjCObjectType>()) {
@@ -326,9 +748,9 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
DS.getNumProtocolQualifiers());
Result = Context.getObjCObjectPointerType(Result);
} else {
- TheSema.Diag(DeclLoc, diag::err_invalid_protocol_qualifiers)
+ S.Diag(DeclLoc, diag::err_invalid_protocol_qualifiers)
<< DS.getSourceRange();
- TheDeclarator.setInvalidType(true);
+ declarator.setInvalidType(true);
}
}
@@ -337,8 +759,11 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
}
case DeclSpec::TST_typeofType:
// FIXME: Preserve type source info.
- Result = TheSema.GetTypeFromParser(DS.getRepAsType());
+ Result = S.GetTypeFromParser(DS.getRepAsType());
assert(!Result.isNull() && "Didn't get a type for typeof?");
+ if (!Result->isDependentType())
+ if (const TagType *TT = Result->getAs<TagType>())
+ S.DiagnoseUseOfDecl(TT->getDecl(), DS.getTypeSpecTypeLoc());
// TypeQuals handled by caller.
Result = Context.getTypeOfType(Result);
break;
@@ -346,10 +771,10 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
Expr *E = DS.getRepAsExpr();
assert(E && "Didn't get an expression for typeof?");
// TypeQuals handled by caller.
- Result = TheSema.BuildTypeofExprType(E);
+ Result = S.BuildTypeofExprType(E, DS.getTypeSpecTypeLoc());
if (Result.isNull()) {
Result = Context.IntTy;
- TheDeclarator.setInvalidType(true);
+ declarator.setInvalidType(true);
}
break;
}
@@ -357,48 +782,55 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
Expr *E = DS.getRepAsExpr();
assert(E && "Didn't get an expression for decltype?");
// TypeQuals handled by caller.
- Result = TheSema.BuildDecltypeType(E);
+ Result = S.BuildDecltypeType(E, DS.getTypeSpecTypeLoc());
if (Result.isNull()) {
Result = Context.IntTy;
- TheDeclarator.setInvalidType(true);
+ declarator.setInvalidType(true);
}
break;
}
case DeclSpec::TST_auto: {
// TypeQuals handled by caller.
- Result = Context.UndeducedAutoTy;
+ Result = Context.getAutoType(QualType());
break;
}
case DeclSpec::TST_error:
Result = Context.IntTy;
- TheDeclarator.setInvalidType(true);
+ declarator.setInvalidType(true);
break;
}
// Handle complex types.
if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) {
- if (TheSema.getLangOptions().Freestanding)
- TheSema.Diag(DS.getTypeSpecComplexLoc(), diag::ext_freestanding_complex);
+ if (S.getLangOptions().Freestanding)
+ S.Diag(DS.getTypeSpecComplexLoc(), diag::ext_freestanding_complex);
Result = Context.getComplexType(Result);
} else if (DS.isTypeAltiVecVector()) {
unsigned typeSize = static_cast<unsigned>(Context.getTypeSize(Result));
assert(typeSize > 0 && "type size for vector must be greater than 0 bits");
- VectorType::AltiVecSpecific AltiVecSpec = VectorType::AltiVec;
+ VectorType::VectorKind VecKind = VectorType::AltiVecVector;
if (DS.isTypeAltiVecPixel())
- AltiVecSpec = VectorType::Pixel;
+ VecKind = VectorType::AltiVecPixel;
else if (DS.isTypeAltiVecBool())
- AltiVecSpec = VectorType::Bool;
- Result = Context.getVectorType(Result, 128/typeSize, AltiVecSpec);
+ VecKind = VectorType::AltiVecBool;
+ Result = Context.getVectorType(Result, 128/typeSize, VecKind);
}
- assert(DS.getTypeSpecComplex() != DeclSpec::TSC_imaginary &&
- "FIXME: imaginary types not supported yet!");
+ // FIXME: Imaginary.
+ if (DS.getTypeSpecComplex() == DeclSpec::TSC_imaginary)
+ S.Diag(DS.getTypeSpecComplexLoc(), diag::err_imaginary_not_supported);
- // See if there are any attributes on the declspec that apply to the type (as
- // opposed to the decl).
- if (const AttributeList *AL = DS.getAttributes())
- ProcessTypeAttributeList(TheSema, Result, true, AL, Delayed);
+ // Before we process any type attributes, synthesize a block literal
+ // function declarator if necessary.
+ if (declarator.getContext() == Declarator::BlockLiteralContext)
+ maybeSynthesizeBlockSignature(state, Result);
+
+ // Apply any type attributes from the decl spec. This may cause the
+ // list of type attributes to be temporarily saved while the type
+ // attributes are pushed around.
+ if (AttributeList *attrs = DS.getAttributes().getList())
+ processTypeAttrs(state, Result, true, attrs);
// Apply const/volatile/restrict qualifiers to T.
if (unsigned TypeQuals = DS.getTypeQualifiers()) {
@@ -419,14 +851,14 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
// If we have a pointer or reference, the pointee must have an object
// incomplete type.
if (!EltTy->isIncompleteOrObjectType()) {
- TheSema.Diag(DS.getRestrictSpecLoc(),
+ S.Diag(DS.getRestrictSpecLoc(),
diag::err_typecheck_invalid_restrict_invalid_pointee)
<< EltTy << DS.getSourceRange();
TypeQuals &= ~DeclSpec::TQ_restrict; // Remove the restrict qualifier.
}
} else {
- TheSema.Diag(DS.getRestrictSpecLoc(),
- diag::err_typecheck_invalid_restrict_not_pointer)
+ S.Diag(DS.getRestrictSpecLoc(),
+ diag::err_typecheck_invalid_restrict_not_pointer)
<< Result << DS.getSourceRange();
TypeQuals &= ~DeclSpec::TQ_restrict; // Remove the restrict qualifier.
}
@@ -447,7 +879,7 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
"Has CVR quals but not C, V, or R?");
Loc = DS.getRestrictSpecLoc();
}
- TheSema.Diag(Loc, diag::warn_typecheck_function_qualifiers)
+ S.Diag(Loc, diag::warn_typecheck_function_qualifiers)
<< Result << DS.getSourceRange();
}
@@ -516,6 +948,11 @@ QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
return Context.getQualifiedType(T, Qs);
}
+/// \brief Build a paren type including \p T.
+QualType Sema::BuildParenType(QualType T) {
+ return Context.getParenType(T);
+}
+
/// \brief Build a pointer type.
///
/// \param T The type to which we'll be building a pointer.
@@ -560,14 +997,14 @@ QualType Sema::BuildPointerType(QualType T,
QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
SourceLocation Loc,
DeclarationName Entity) {
+ // C++0x [dcl.ref]p6:
+ // If a typedef (7.1.3), a type template-parameter (14.3.1), or a
+ // decltype-specifier (7.1.6.2) denotes a type TR that is a reference to a
+ // type T, an attempt to create the type "lvalue reference to cv TR" creates
+ // the type "lvalue reference to T", while an attempt to create the type
+ // "rvalue reference to cv TR" creates the type TR.
bool LValueRef = SpelledAsLValue || T->getAs<LValueReferenceType>();
- // C++0x [dcl.typedef]p9: If a typedef TD names a type that is a
- // reference to a type T, and attempt to create the type "lvalue
- // reference to cv TD" creates the type "lvalue reference to T".
- // We use the qualifiers (restrict or none) of the original reference,
- // not the new ones. This is consistent with GCC.
-
// C++ [dcl.ref]p4: There shall be no references to references.
//
// According to C++ DR 106, references to references are only
@@ -579,8 +1016,8 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
//
// Parser::ParseDeclaratorInternal diagnoses the case where
// references are written directly; here, we handle the
- // collapsing of references-to-references as described in C++
- // DR 106 and amended by C++ DR 540.
+ // collapsing of references-to-references as described in C++0x.
+ // DR 106 and 540 introduce reference-collapsing into C++98/03.
// C++ [dcl.ref]p1:
// A declarator that specifies the type "reference to cv void"
@@ -654,9 +1091,9 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
return QualType();
}
- if (Context.getCanonicalType(T) == Context.UndeducedAutoTy) {
- Diag(Loc, diag::err_illegal_decl_array_of_auto)
- << getPrintableNameForEntity(Entity);
+ if (T->getContainedAutoType()) {
+ Diag(Loc, diag::err_illegal_decl_array_of_auto)
+ << getPrintableNameForEntity(Entity) << T;
return QualType();
}
@@ -670,9 +1107,15 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
return QualType();
}
+ // Do lvalue-to-rvalue conversions on the array size expression.
+ if (ArraySize && !ArraySize->isRValue())
+ DefaultLvalueConversion(ArraySize);
+
// C99 6.7.5.2p1: The size expression shall have integer type.
+ // TODO: in theory, if we were insane, we could allow contextual
+ // conversions to integer type here.
if (ArraySize && !ArraySize->isTypeDependent() &&
- !ArraySize->getType()->isIntegerType()) {
+ !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
<< ArraySize->getType() << ArraySize->getSourceRange();
return QualType();
@@ -695,9 +1138,12 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
// C99 6.7.5.2p1: If the expression is a constant expression, it shall
// have a value greater than zero.
if (ConstVal.isSigned() && ConstVal.isNegative()) {
- Diag(ArraySize->getLocStart(),
- diag::err_typecheck_negative_array_size)
- << ArraySize->getSourceRange();
+ if (Entity)
+ Diag(ArraySize->getLocStart(), diag::err_decl_negative_array_size)
+ << getPrintableNameForEntity(Entity) << ArraySize->getSourceRange();
+ else
+ Diag(ArraySize->getLocStart(), diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange();
return QualType();
}
if (ConstVal == 0) {
@@ -818,8 +1264,9 @@ QualType Sema::BuildFunctionType(QualType T,
QualType *ParamTypes,
unsigned NumParamTypes,
bool Variadic, unsigned Quals,
+ RefQualifierKind RefQualifier,
SourceLocation Loc, DeclarationName Entity,
- const FunctionType::ExtInfo &Info) {
+ FunctionType::ExtInfo Info) {
if (T->isArrayType() || T->isFunctionType()) {
Diag(Loc, diag::err_func_returning_array_function)
<< T->isFunctionType() << T;
@@ -840,8 +1287,13 @@ QualType Sema::BuildFunctionType(QualType T,
if (Invalid)
return QualType();
- return Context.getFunctionType(T, ParamTypes, NumParamTypes, Variadic,
- Quals, false, false, 0, 0, Info);
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = Variadic;
+ EPI.TypeQuals = Quals;
+ EPI.RefQualifier = RefQualifier;
+ EPI.ExtInfo = Info;
+
+ return Context.getFunctionType(T, ParamTypes, NumParamTypes, EPI);
}
/// \brief Build a member pointer type \c T Class::*.
@@ -934,7 +1386,7 @@ QualType Sema::GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo) {
}
TypeSourceInfo *DI = 0;
- if (LocInfoType *LIT = dyn_cast<LocInfoType>(QT)) {
+ if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT)) {
QT = LIT->getType();
DI = LIT->getTypeSourceInfo();
}
@@ -953,20 +1405,21 @@ QualType Sema::GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo) {
/// The result of this call will never be null, but the associated
/// type may be a null type if there's an unrecoverable error.
TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
- TagDecl **OwnedDecl) {
+ TagDecl **OwnedDecl,
+ bool AutoAllowedInTypeName) {
// Determine the type of the declarator. Not all forms of declarator
// have a type.
QualType T;
TypeSourceInfo *ReturnTypeInfo = 0;
-
- llvm::SmallVector<DelayedAttribute,4> FnAttrsFromDeclSpec;
+
+ TypeProcessingState state(*this, D);
switch (D.getName().getKind()) {
case UnqualifiedId::IK_Identifier:
case UnqualifiedId::IK_OperatorFunctionId:
case UnqualifiedId::IK_LiteralOperatorId:
case UnqualifiedId::IK_TemplateId:
- T = ConvertDeclSpecToType(*this, D, FnAttrsFromDeclSpec);
+ T = ConvertDeclSpecToType(*this, state);
if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) {
TagDecl* Owned = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
@@ -993,11 +1446,12 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
&ReturnTypeInfo);
break;
}
-
- if (T.isNull())
- return Context.getNullTypeSourceInfo();
- if (T == Context.UndeducedAutoTy) {
+ if (D.getAttributes())
+ distributeTypeAttrsFromDeclarator(state, T);
+
+ if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
+ !D.isFunctionDeclarator()) {
int Error = -1;
switch (D.getContext()) {
@@ -1022,13 +1476,19 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
Error = 5; // Template parameter
break;
case Declarator::BlockLiteralContext:
- Error = 6; // Block literal
+ Error = 6; // Block literal
+ break;
+ case Declarator::TemplateTypeArgContext:
+ Error = 7; // Template type argument
+ break;
+ case Declarator::TypeNameContext:
+ if (!AutoAllowedInTypeName)
+ Error = 8; // Generic
break;
case Declarator::FileContext:
case Declarator::BlockContext:
case Declarator::ForContext:
case Declarator::ConditionContext:
- case Declarator::TypeNameContext:
break;
}
@@ -1040,20 +1500,26 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
}
+ if (T.isNull())
+ return Context.getNullTypeSourceInfo();
+
// The name we're declaring, if any.
DeclarationName Name;
if (D.getIdentifier())
Name = D.getIdentifier();
- llvm::SmallVector<DelayedAttribute,4> FnAttrsFromPreviousChunk;
-
// Walk the DeclTypeInfo, building the recursive type as we go.
// DeclTypeInfos are ordered from the identifier out, which is
// opposite of what we want :).
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
- DeclaratorChunk &DeclType = D.getTypeObject(e-i-1);
+ unsigned chunkIndex = e - i - 1;
+ state.setCurrentChunkIndex(chunkIndex);
+ DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex);
switch (DeclType.Kind) {
default: assert(0 && "Unknown decltype!");
+ case DeclaratorChunk::Paren:
+ T = BuildParenType(T);
+ break;
case DeclaratorChunk::BlockPointer:
// If blocks are disabled, emit an error.
if (!LangOpts.Blocks)
@@ -1080,6 +1546,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
T = BuildPointerType(T, DeclType.Loc, Name);
if (DeclType.Ptr.TypeQuals)
T = BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
+
break;
case DeclaratorChunk::Reference: {
// Verify that we're not building a reference to pointer to function with
@@ -1137,12 +1604,43 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// For conversion functions, we'll diagnose this particular error later.
if ((T->isArrayType() || T->isFunctionType()) &&
(D.getName().getKind() != UnqualifiedId::IK_ConversionFunctionId)) {
- Diag(DeclType.Loc, diag::err_func_returning_array_function)
- << T->isFunctionType() << T;
+ unsigned diagID = diag::err_func_returning_array_function;
+ // Last processing chunk in block context means this function chunk
+ // represents the block.
+ if (chunkIndex == 0 &&
+ D.getContext() == Declarator::BlockLiteralContext)
+ diagID = diag::err_block_returning_array_function;
+ Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
T = Context.IntTy;
D.setInvalidType(true);
}
+ // Check for auto functions and trailing return type and adjust the
+ // return type accordingly.
+ if (!D.isInvalidType()) {
+ // trailing-return-type is only required if we're declaring a function,
+ // and not, for instance, a pointer to a function.
+ if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
+ !FTI.TrailingReturnType && chunkIndex == 0) {
+ Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::err_auto_missing_trailing_return);
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ } else if (FTI.TrailingReturnType) {
+ if (T.hasQualifiers() || !isa<AutoType>(T)) {
+ // T must be exactly 'auto' at this point. See CWG issue 681.
+ Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::err_trailing_return_without_auto)
+ << T << D.getDeclSpec().getSourceRange();
+ D.setInvalidType(true);
+ }
+
+ T = GetTypeFromParser(
+ ParsedType::getFromOpaquePtr(FTI.TrailingReturnType),
+ &ReturnTypeInfo);
+ }
+ }
+
// cv-qualifiers on return types are pointless except when the type is a
// class type in C++.
if (T.getCVRQualifiers() && D.getDeclSpec().getTypeQualifiers() &&
@@ -1229,6 +1727,13 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
break;
}
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = FTI.isVariadic;
+ EPI.TypeQuals = FTI.TypeQuals;
+ EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None
+ : FTI.RefQualifierIsLValueRef? RQ_LValue
+ : RQ_RValue;
+
// Otherwise, we have a function with an argument list that is
// potentially variadic.
llvm::SmallVector<QualType, 16> ArgTys;
@@ -1280,30 +1785,25 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
llvm::SmallVector<QualType, 4> Exceptions;
- Exceptions.reserve(FTI.NumExceptions);
- for (unsigned ei = 0, ee = FTI.NumExceptions; ei != ee; ++ei) {
- // FIXME: Preserve type source info.
- QualType ET = GetTypeFromParser(FTI.Exceptions[ei].Ty);
- // Check that the type is valid for an exception spec, and drop it if
- // not.
- if (!CheckSpecifiedExceptionType(ET, FTI.Exceptions[ei].Range))
- Exceptions.push_back(ET);
+ if (FTI.hasExceptionSpec) {
+ EPI.HasExceptionSpec = FTI.hasExceptionSpec;
+ EPI.HasAnyExceptionSpec = FTI.hasAnyExceptionSpec;
+ Exceptions.reserve(FTI.NumExceptions);
+ for (unsigned ei = 0, ee = FTI.NumExceptions; ei != ee; ++ei) {
+ // FIXME: Preserve type source info.
+ QualType ET = GetTypeFromParser(FTI.Exceptions[ei].Ty);
+ // Check that the type is valid for an exception spec, and
+ // drop it if not.
+ if (!CheckSpecifiedExceptionType(ET, FTI.Exceptions[ei].Range))
+ Exceptions.push_back(ET);
+ }
+ EPI.NumExceptions = Exceptions.size();
+ EPI.Exceptions = Exceptions.data();
}
- T = Context.getFunctionType(T, ArgTys.data(), ArgTys.size(),
- FTI.isVariadic, FTI.TypeQuals,
- FTI.hasExceptionSpec,
- FTI.hasAnyExceptionSpec,
- Exceptions.size(), Exceptions.data(),
- FunctionType::ExtInfo());
+ T = Context.getFunctionType(T, ArgTys.data(), ArgTys.size(), EPI);
}
- // For GCC compatibility, we allow attributes that apply only to
- // function types to be placed on a function's return type
- // instead (as long as that type doesn't happen to be function
- // or function-pointer itself).
- ProcessDelayedFnAttrs(*this, T, FnAttrsFromPreviousChunk);
-
break;
}
case DeclaratorChunk::MemberPointer:
@@ -1364,61 +1864,190 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
T = Context.IntTy;
}
- DiagnoseDelayedFnAttrs(*this, FnAttrsFromPreviousChunk);
-
// See if there are any attributes on this declarator chunk.
- if (const AttributeList *AL = DeclType.getAttrs())
- ProcessTypeAttributeList(*this, T, false, AL, FnAttrsFromPreviousChunk);
+ if (AttributeList *attrs = const_cast<AttributeList*>(DeclType.getAttrs()))
+ processTypeAttrs(state, T, false, attrs);
}
if (getLangOptions().CPlusPlus && T->isFunctionType()) {
const FunctionProtoType *FnTy = T->getAs<FunctionProtoType>();
assert(FnTy && "Why oh why is there not a FunctionProtoType here?");
- // C++ 8.3.5p4: A cv-qualifier-seq shall only be part of the function type
- // for a nonstatic member function, the function type to which a pointer
- // to member refers, or the top-level function type of a function typedef
- // declaration.
- bool FreeFunction = (D.getContext() != Declarator::MemberContext &&
- (!D.getCXXScopeSpec().isSet() ||
- !computeDeclContext(D.getCXXScopeSpec(), /*FIXME:*/true)->isRecord()));
- if (FnTy->getTypeQuals() != 0 &&
+ // C++ 8.3.5p4:
+ // A cv-qualifier-seq shall only be part of the function type
+ // for a nonstatic member function, the function type to which a pointer
+ // to member refers, or the top-level function type of a function typedef
+ // declaration.
+ //
+ // Core issue 547 also allows cv-qualifiers on function types that are
+ // top-level template type arguments.
+ bool FreeFunction;
+ if (!D.getCXXScopeSpec().isSet()) {
+ FreeFunction = (D.getContext() != Declarator::MemberContext ||
+ D.getDeclSpec().isFriendSpecified());
+ } else {
+ DeclContext *DC = computeDeclContext(D.getCXXScopeSpec());
+ FreeFunction = (DC && !DC->isRecord());
+ }
+
+ // C++0x [dcl.fct]p6:
+ // A ref-qualifier shall only be part of the function type for a
+ // non-static member function, the function type to which a pointer to
+ // member refers, or the top-level function type of a function typedef
+ // declaration.
+ if ((FnTy->getTypeQuals() != 0 || FnTy->getRefQualifier()) &&
+ !(D.getContext() == Declarator::TemplateTypeArgContext &&
+ !D.isFunctionDeclarator()) &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
(FreeFunction ||
D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)) {
- if (D.isFunctionDeclarator())
- Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_function_type);
- else
- Diag(D.getIdentifierLoc(),
- diag::err_invalid_qualified_typedef_function_type_use)
- << FreeFunction;
+ if (D.getContext() == Declarator::TemplateTypeArgContext) {
+ // Accept qualified function types as template type arguments as a GNU
+ // extension. This is also the subject of C++ core issue 547.
+ std::string Quals;
+ if (FnTy->getTypeQuals() != 0)
+ Quals = Qualifiers::fromCVRMask(FnTy->getTypeQuals()).getAsString();
+
+ switch (FnTy->getRefQualifier()) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ if (!Quals.empty())
+ Quals += ' ';
+ Quals += '&';
+ break;
+
+ case RQ_RValue:
+ if (!Quals.empty())
+ Quals += ' ';
+ Quals += "&&";
+ break;
+ }
+
+ Diag(D.getIdentifierLoc(),
+ diag::ext_qualified_function_type_template_arg)
+ << Quals;
+ } else {
+ if (FnTy->getTypeQuals() != 0) {
+ if (D.isFunctionDeclarator())
+ Diag(D.getIdentifierLoc(),
+ diag::err_invalid_qualified_function_type);
+ else
+ Diag(D.getIdentifierLoc(),
+ diag::err_invalid_qualified_typedef_function_type_use)
+ << FreeFunction;
+ }
+
+ if (FnTy->getRefQualifier()) {
+ if (D.isFunctionDeclarator()) {
+ SourceLocation Loc = D.getIdentifierLoc();
+ for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) {
+ const DeclaratorChunk &Chunk = D.getTypeObject(N-I-1);
+ if (Chunk.Kind == DeclaratorChunk::Function &&
+ Chunk.Fun.hasRefQualifier()) {
+ Loc = Chunk.Fun.getRefQualifierLoc();
+ break;
+ }
+ }
- // Strip the cv-quals from the type.
- T = Context.getFunctionType(FnTy->getResultType(), FnTy->arg_type_begin(),
- FnTy->getNumArgs(), FnTy->isVariadic(), 0,
- false, false, 0, 0, FunctionType::ExtInfo());
+ Diag(Loc, diag::err_invalid_ref_qualifier_function_type)
+ << (FnTy->getRefQualifier() == RQ_LValue)
+ << FixItHint::CreateRemoval(Loc);
+ } else {
+ Diag(D.getIdentifierLoc(),
+ diag::err_invalid_ref_qualifier_typedef_function_type_use)
+ << FreeFunction
+ << (FnTy->getRefQualifier() == RQ_LValue);
+ }
+ }
+
+ // Strip the cv-qualifiers and ref-qualifiers from the type.
+ FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+
+ T = Context.getFunctionType(FnTy->getResultType(),
+ FnTy->arg_type_begin(),
+ FnTy->getNumArgs(), EPI);
+ }
}
}
+ // Apply any undistributed attributes from the declarator.
+ if (!T.isNull())
+ if (AttributeList *attrs = D.getAttributes())
+ processTypeAttrs(state, T, false, attrs);
+
+ // Diagnose any ignored type attributes.
+ if (!T.isNull()) state.diagnoseIgnoredTypeAttrs(T);
+
// If there's a constexpr specifier, treat it as a top-level const.
if (D.getDeclSpec().isConstexprSpecified()) {
T.addConst();
}
- // Process any function attributes we might have delayed from the
- // declaration-specifiers.
- ProcessDelayedFnAttrs(*this, T, FnAttrsFromDeclSpec);
-
- // If there were any type attributes applied to the decl itself, not
- // the type, apply them to the result type. But don't do this for
- // block-literal expressions, which are parsed wierdly.
- if (D.getContext() != Declarator::BlockLiteralContext)
- if (const AttributeList *Attrs = D.getAttributes())
- ProcessTypeAttributeList(*this, T, false, Attrs,
- FnAttrsFromPreviousChunk);
-
- DiagnoseDelayedFnAttrs(*this, FnAttrsFromPreviousChunk);
-
+ // If there was an ellipsis in the declarator, the declaration declares a
+ // parameter pack whose type may be a pack expansion type.
+ if (D.hasEllipsis() && !T.isNull()) {
+ // C++0x [dcl.fct]p13:
+ // A declarator-id or abstract-declarator containing an ellipsis shall
+ // only be used in a parameter-declaration. Such a parameter-declaration
+ // is a parameter pack (14.5.3). [...]
+ switch (D.getContext()) {
+ case Declarator::PrototypeContext:
+ // C++0x [dcl.fct]p13:
+ // [...] When it is part of a parameter-declaration-clause, the
+ // parameter pack is a function parameter pack (14.5.3). The type T
+ // of the declarator-id of the function parameter pack shall contain
+ // a template parameter pack; each template parameter pack in T is
+ // expanded by the function parameter pack.
+ //
+ // We represent function parameter packs as function parameters whose
+ // type is a pack expansion.
+ if (!T->containsUnexpandedParameterPack()) {
+ Diag(D.getEllipsisLoc(),
+ diag::err_function_parameter_pack_without_parameter_packs)
+ << T << D.getSourceRange();
+ D.setEllipsisLoc(SourceLocation());
+ } else {
+ T = Context.getPackExpansionType(T, llvm::Optional<unsigned>());
+ }
+ break;
+
+ case Declarator::TemplateParamContext:
+ // C++0x [temp.param]p15:
+ // If a template-parameter is a [...] is a parameter-declaration that
+ // declares a parameter pack (8.3.5), then the template-parameter is a
+ // template parameter pack (14.5.3).
+ //
+ // Note: core issue 778 clarifies that, if there are any unexpanded
+ // parameter packs in the type of the non-type template parameter, then
+ // it expands those parameter packs.
+ if (T->containsUnexpandedParameterPack())
+ T = Context.getPackExpansionType(T, llvm::Optional<unsigned>());
+ else if (!getLangOptions().CPlusPlus0x)
+ Diag(D.getEllipsisLoc(), diag::ext_variadic_templates);
+ break;
+
+ case Declarator::FileContext:
+ case Declarator::KNRTypeListContext:
+ case Declarator::TypeNameContext:
+ case Declarator::MemberContext:
+ case Declarator::BlockContext:
+ case Declarator::ForContext:
+ case Declarator::ConditionContext:
+ case Declarator::CXXCatchContext:
+ case Declarator::BlockLiteralContext:
+ case Declarator::TemplateTypeArgContext:
+ // FIXME: We may want to allow parameter packs in block-literal contexts
+ // in the future.
+ Diag(D.getEllipsisLoc(), diag::err_ellipsis_in_declarator_not_parameter);
+ D.setEllipsisLoc(SourceLocation());
+ break;
+ }
+ }
+
if (T.isNull())
return Context.getNullTypeSourceInfo();
else if (D.isInvalidType())
@@ -1428,10 +2057,12 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
namespace {
class TypeSpecLocFiller : public TypeLocVisitor<TypeSpecLocFiller> {
+ ASTContext &Context;
const DeclSpec &DS;
public:
- TypeSpecLocFiller(const DeclSpec &DS) : DS(DS) {}
+ TypeSpecLocFiller(ASTContext &Context, const DeclSpec &DS)
+ : Context(Context), DS(DS) {}
void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
Visit(TL.getUnqualifiedLoc());
@@ -1446,7 +2077,7 @@ namespace {
// Handle the base type, which might not have been written explicitly.
if (DS.getTypeSpecType() == DeclSpec::TST_unspecified) {
TL.setHasBaseTypeAsWritten(false);
- TL.getBaseLoc().initialize(SourceLocation());
+ TL.getBaseLoc().initialize(Context, SourceLocation());
} else {
TL.setHasBaseTypeAsWritten(true);
Visit(TL.getBaseLoc());
@@ -1477,7 +2108,7 @@ namespace {
// If we got no declarator info from previous Sema routines,
// just fill with the typespec loc.
if (!TInfo) {
- TL.initialize(DS.getTypeSpecTypeLoc());
+ TL.initialize(Context, DS.getTypeSpecTypeLoc());
return;
}
@@ -1523,7 +2154,7 @@ namespace {
void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
ElaboratedTypeKeyword Keyword
= TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType());
- if (Keyword == ETK_Typename) {
+ if (DS.getTypeSpecType() == TST_typename) {
TypeSourceInfo *TInfo = 0;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
if (TInfo) {
@@ -1541,7 +2172,7 @@ namespace {
void VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
ElaboratedTypeKeyword Keyword
= TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType());
- if (Keyword == ETK_Typename) {
+ if (DS.getTypeSpecType() == TST_typename) {
TypeSourceInfo *TInfo = 0;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
if (TInfo) {
@@ -1570,7 +2201,7 @@ namespace {
return;
}
}
- TL.initializeLocal(SourceLocation());
+ TL.initializeLocal(Context, SourceLocation());
TL.setKeywordLoc(Keyword != ETK_None
? DS.getTypeSpecTypeLoc()
: SourceLocation());
@@ -1582,7 +2213,7 @@ namespace {
void VisitTypeLoc(TypeLoc TL) {
// FIXME: add other typespec types and change this to an assert.
- TL.initialize(DS.getTypeSpecTypeLoc());
+ TL.initialize(Context, DS.getTypeSpecTypeLoc());
}
};
@@ -1634,6 +2265,7 @@ namespace {
assert(Chunk.Kind == DeclaratorChunk::Function);
TL.setLParenLoc(Chunk.Loc);
TL.setRParenLoc(Chunk.EndLoc);
+ TL.setTrailingReturn(!!Chunk.Fun.TrailingReturnType);
const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun;
for (unsigned i = 0, e = TL.getNumArgs(), tpi = 0; i != e; ++i) {
@@ -1642,6 +2274,11 @@ namespace {
}
// FIXME: exception specs
}
+ void VisitParenTypeLoc(ParenTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Paren);
+ TL.setLParenLoc(Chunk.Loc);
+ TL.setRParenLoc(Chunk.EndLoc);
+ }
void VisitTypeLoc(TypeLoc TL) {
llvm_unreachable("unsupported TypeLoc kind in declarator!");
@@ -1663,6 +2300,12 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *TInfo = Context.CreateTypeSourceInfo(T);
UnqualTypeLoc CurrTL = TInfo->getTypeLoc().getUnqualifiedLoc();
+ // Handle parameter packs whose type is a pack expansion.
+ if (isa<PackExpansionType>(T)) {
+ cast<PackExpansionTypeLoc>(CurrTL).setEllipsisLoc(D.getEllipsisLoc());
+ CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
+ }
+
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
DeclaratorLocFiller(D.getTypeObject(i)).Visit(CurrTL);
CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
@@ -1675,7 +2318,7 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
assert(TL.getFullDataSize() == CurrTL.getFullDataSize());
memcpy(CurrTL.getOpaqueData(), TL.getOpaqueData(), TL.getFullDataSize());
} else {
- TypeSpecLocFiller(D.getDeclSpec()).Visit(CurrTL);
+ TypeSpecLocFiller(Context, D.getDeclSpec()).Visit(CurrTL);
}
return TInfo;
@@ -1686,7 +2329,8 @@ ParsedType Sema::CreateParsedType(QualType T, TypeSourceInfo *TInfo) {
// FIXME: LocInfoTypes are "transient", only needed for passing to/from Parser
// and Sema during declaration parsing. Try deallocating/caching them when
// it's appropriate, instead of allocating them and keeping them around.
- LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType), 8);
+ LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType),
+ TypeAlignment);
new (LocT) LocInfoType(T, TInfo);
assert(LocT->getTypeClass() != T->getTypeClass() &&
"LocInfoType's TypeClass conflicts with an existing Type class");
@@ -1787,160 +2431,291 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
}
-/// HandleObjCGCTypeAttribute - Process an objc's gc attribute on the
-/// specified type. The attribute contains 1 argument, weak or strong.
-static void HandleObjCGCTypeAttribute(QualType &Type,
- const AttributeList &Attr, Sema &S) {
- if (Type.getObjCGCAttr() != Qualifiers::GCNone) {
- S.Diag(Attr.getLoc(), diag::err_attribute_multiple_objc_gc);
- Attr.setInvalid();
- return;
+/// handleObjCGCTypeAttr - Process the __attribute__((objc_gc)) type
+/// attribute on the specified type. Returns true to indicate that
+/// the attribute was handled, false to indicate that the type does
+/// not permit the attribute.
+static bool handleObjCGCTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type) {
+ Sema &S = state.getSema();
+
+ // Delay if this isn't some kind of pointer.
+ if (!type->isPointerType() &&
+ !type->isObjCObjectPointerType() &&
+ !type->isBlockPointerType())
+ return false;
+
+ if (type.getObjCGCAttr() != Qualifiers::GCNone) {
+ S.Diag(attr.getLoc(), diag::err_attribute_multiple_objc_gc);
+ attr.setInvalid();
+ return true;
}
// Check the attribute arguments.
- if (!Attr.getParameterName()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ if (!attr.getParameterName()) {
+ S.Diag(attr.getLoc(), diag::err_attribute_argument_n_not_string)
<< "objc_gc" << 1;
- Attr.setInvalid();
- return;
+ attr.setInvalid();
+ return true;
}
Qualifiers::GC GCAttr;
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
- Attr.setInvalid();
- return;
+ if (attr.getNumArgs() != 0) {
+ S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ attr.setInvalid();
+ return true;
}
- if (Attr.getParameterName()->isStr("weak"))
+ if (attr.getParameterName()->isStr("weak"))
GCAttr = Qualifiers::Weak;
- else if (Attr.getParameterName()->isStr("strong"))
+ else if (attr.getParameterName()->isStr("strong"))
GCAttr = Qualifiers::Strong;
else {
- S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
- << "objc_gc" << Attr.getParameterName();
- Attr.setInvalid();
- return;
+ S.Diag(attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << "objc_gc" << attr.getParameterName();
+ attr.setInvalid();
+ return true;
}
- Type = S.Context.getObjCGCQualType(Type, GCAttr);
+ type = S.Context.getObjCGCQualType(type, GCAttr);
+ return true;
}
-/// Process an individual function attribute. Returns true if the
-/// attribute does not make sense to apply to this type.
-bool ProcessFnAttr(Sema &S, QualType &Type, const AttributeList &Attr) {
- if (Attr.getKind() == AttributeList::AT_noreturn) {
- // Complain immediately if the arg count is wrong.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
- Attr.setInvalid();
- return false;
+namespace {
+ /// A helper class to unwrap a type down to a function for the
+ /// purposes of applying attributes there.
+ ///
+ /// Use:
+ /// FunctionTypeUnwrapper unwrapped(SemaRef, T);
+ /// if (unwrapped.isFunctionType()) {
+ /// const FunctionType *fn = unwrapped.get();
+ /// // change fn somehow
+ /// T = unwrapped.wrap(fn);
+ /// }
+ struct FunctionTypeUnwrapper {
+ enum WrapKind {
+ Desugar,
+ Parens,
+ Pointer,
+ BlockPointer,
+ Reference,
+ MemberPointer
+ };
+
+ QualType Original;
+ const FunctionType *Fn;
+ llvm::SmallVector<unsigned char /*WrapKind*/, 8> Stack;
+
+ FunctionTypeUnwrapper(Sema &S, QualType T) : Original(T) {
+ while (true) {
+ const Type *Ty = T.getTypePtr();
+ if (isa<FunctionType>(Ty)) {
+ Fn = cast<FunctionType>(Ty);
+ return;
+ } else if (isa<ParenType>(Ty)) {
+ T = cast<ParenType>(Ty)->getInnerType();
+ Stack.push_back(Parens);
+ } else if (isa<PointerType>(Ty)) {
+ T = cast<PointerType>(Ty)->getPointeeType();
+ Stack.push_back(Pointer);
+ } else if (isa<BlockPointerType>(Ty)) {
+ T = cast<BlockPointerType>(Ty)->getPointeeType();
+ Stack.push_back(BlockPointer);
+ } else if (isa<MemberPointerType>(Ty)) {
+ T = cast<MemberPointerType>(Ty)->getPointeeType();
+ Stack.push_back(MemberPointer);
+ } else if (isa<ReferenceType>(Ty)) {
+ T = cast<ReferenceType>(Ty)->getPointeeType();
+ Stack.push_back(Reference);
+ } else {
+ const Type *DTy = Ty->getUnqualifiedDesugaredType();
+ if (Ty == DTy) {
+ Fn = 0;
+ return;
+ }
+
+ T = QualType(DTy, 0);
+ Stack.push_back(Desugar);
+ }
+ }
}
- // Delay if this is not a function or pointer to block.
- if (!Type->isFunctionPointerType()
- && !Type->isBlockPointerType()
- && !Type->isFunctionType()
- && !Type->isMemberFunctionPointerType())
- return true;
-
- // Otherwise we can process right away.
- Type = S.Context.getNoReturnType(Type);
- return false;
- }
+ bool isFunctionType() const { return (Fn != 0); }
+ const FunctionType *get() const { return Fn; }
- if (Attr.getKind() == AttributeList::AT_regparm) {
- // The warning is emitted elsewhere
- if (Attr.getNumArgs() != 1) {
- return false;
+ QualType wrap(Sema &S, const FunctionType *New) {
+ // If T wasn't modified from the unwrapped type, do nothing.
+ if (New == get()) return Original;
+
+ Fn = New;
+ return wrap(S.Context, Original, 0);
+ }
+
+ private:
+ QualType wrap(ASTContext &C, QualType Old, unsigned I) {
+ if (I == Stack.size())
+ return C.getQualifiedType(Fn, Old.getQualifiers());
+
+ // Build up the inner type, applying the qualifiers from the old
+ // type to the new type.
+ SplitQualType SplitOld = Old.split();
+
+ // As a special case, tail-recurse if there are no qualifiers.
+ if (SplitOld.second.empty())
+ return wrap(C, SplitOld.first, I);
+ return C.getQualifiedType(wrap(C, SplitOld.first, I), SplitOld.second);
+ }
+
+ QualType wrap(ASTContext &C, const Type *Old, unsigned I) {
+ if (I == Stack.size()) return QualType(Fn, 0);
+
+ switch (static_cast<WrapKind>(Stack[I++])) {
+ case Desugar:
+ // This is the point at which we potentially lose source
+ // information.
+ return wrap(C, Old->getUnqualifiedDesugaredType(), I);
+
+ case Parens: {
+ QualType New = wrap(C, cast<ParenType>(Old)->getInnerType(), I);
+ return C.getParenType(New);
+ }
+
+ case Pointer: {
+ QualType New = wrap(C, cast<PointerType>(Old)->getPointeeType(), I);
+ return C.getPointerType(New);
+ }
+
+ case BlockPointer: {
+ QualType New = wrap(C, cast<BlockPointerType>(Old)->getPointeeType(),I);
+ return C.getBlockPointerType(New);
+ }
+
+ case MemberPointer: {
+ const MemberPointerType *OldMPT = cast<MemberPointerType>(Old);
+ QualType New = wrap(C, OldMPT->getPointeeType(), I);
+ return C.getMemberPointerType(New, OldMPT->getClass());
+ }
+
+ case Reference: {
+ const ReferenceType *OldRef = cast<ReferenceType>(Old);
+ QualType New = wrap(C, OldRef->getPointeeType(), I);
+ if (isa<LValueReferenceType>(OldRef))
+ return C.getLValueReferenceType(New, OldRef->isSpelledAsLValue());
+ else
+ return C.getRValueReferenceType(New);
+ }
+ }
+
+ llvm_unreachable("unknown wrapping kind");
+ return QualType();
}
+ };
+}
+
+/// Process an individual function attribute. Returns true to
+/// indicate that the attribute was handled, false if it wasn't.
+static bool handleFunctionTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type) {
+ Sema &S = state.getSema();
- // Delay if this is not a function or pointer to block.
- if (!Type->isFunctionPointerType()
- && !Type->isBlockPointerType()
- && !Type->isFunctionType()
- && !Type->isMemberFunctionPointerType())
+ FunctionTypeUnwrapper unwrapped(S, type);
+
+ if (attr.getKind() == AttributeList::AT_noreturn) {
+ if (S.CheckNoReturnAttr(attr))
return true;
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
// Otherwise we can process right away.
- Expr *NumParamsExpr = static_cast<Expr *>(Attr.getArg(0));
- llvm::APSInt NumParams(32);
+ FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withNoReturn(true);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
- // The warning is emitted elsewhere
- if (NumParamsExpr->isTypeDependent() || NumParamsExpr->isValueDependent() ||
- !NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context))
+ if (attr.getKind() == AttributeList::AT_regparm) {
+ unsigned value;
+ if (S.CheckRegparmAttr(attr, value))
+ return true;
+
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
return false;
- Type = S.Context.getRegParmType(Type, NumParams.getZExtValue());
- return false;
- }
+ // Diagnose regparm with fastcall.
+ const FunctionType *fn = unwrapped.get();
+ CallingConv CC = fn->getCallConv();
+ if (CC == CC_X86FastCall) {
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << FunctionType::getNameForCallConv(CC)
+ << "regparm";
+ attr.setInvalid();
+ return true;
+ }
- // Otherwise, a calling convention.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
- Attr.setInvalid();
- return false;
+ FunctionType::ExtInfo EI =
+ unwrapped.get()->getExtInfo().withRegParm(value);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
}
- QualType T = Type;
- if (const PointerType *PT = Type->getAs<PointerType>())
- T = PT->getPointeeType();
- else if (const BlockPointerType *BPT = Type->getAs<BlockPointerType>())
- T = BPT->getPointeeType();
- else if (const MemberPointerType *MPT = Type->getAs<MemberPointerType>())
- T = MPT->getPointeeType();
- else if (const ReferenceType *RT = Type->getAs<ReferenceType>())
- T = RT->getPointeeType();
- const FunctionType *Fn = T->getAs<FunctionType>();
+ // Otherwise, a calling convention.
+ CallingConv CC;
+ if (S.CheckCallingConvAttr(attr, CC))
+ return true;
// Delay if the type didn't work out to a function.
- if (!Fn) return true;
+ if (!unwrapped.isFunctionType()) return false;
- // TODO: diagnose uses of these conventions on the wrong target.
- CallingConv CC;
- switch (Attr.getKind()) {
- case AttributeList::AT_cdecl: CC = CC_C; break;
- case AttributeList::AT_fastcall: CC = CC_X86FastCall; break;
- case AttributeList::AT_stdcall: CC = CC_X86StdCall; break;
- case AttributeList::AT_thiscall: CC = CC_X86ThisCall; break;
- case AttributeList::AT_pascal: CC = CC_X86Pascal; break;
- default: llvm_unreachable("unexpected attribute kind"); return false;
- }
-
- CallingConv CCOld = Fn->getCallConv();
+ const FunctionType *fn = unwrapped.get();
+ CallingConv CCOld = fn->getCallConv();
if (S.Context.getCanonicalCallConv(CC) ==
S.Context.getCanonicalCallConv(CCOld)) {
- Attr.setInvalid();
- return false;
+ FunctionType::ExtInfo EI= unwrapped.get()->getExtInfo().withCallingConv(CC);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
}
if (CCOld != CC_Default) {
// Should we diagnose reapplications of the same convention?
- S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
<< FunctionType::getNameForCallConv(CC)
<< FunctionType::getNameForCallConv(CCOld);
- Attr.setInvalid();
- return false;
+ attr.setInvalid();
+ return true;
}
// Diagnose the use of X86 fastcall on varargs or unprototyped functions.
if (CC == CC_X86FastCall) {
- if (isa<FunctionNoProtoType>(Fn)) {
- S.Diag(Attr.getLoc(), diag::err_cconv_knr)
+ if (isa<FunctionNoProtoType>(fn)) {
+ S.Diag(attr.getLoc(), diag::err_cconv_knr)
<< FunctionType::getNameForCallConv(CC);
- Attr.setInvalid();
- return false;
+ attr.setInvalid();
+ return true;
}
- const FunctionProtoType *FnP = cast<FunctionProtoType>(Fn);
+ const FunctionProtoType *FnP = cast<FunctionProtoType>(fn);
if (FnP->isVariadic()) {
- S.Diag(Attr.getLoc(), diag::err_cconv_varargs)
+ S.Diag(attr.getLoc(), diag::err_cconv_varargs)
<< FunctionType::getNameForCallConv(CC);
- Attr.setInvalid();
- return false;
+ attr.setInvalid();
+ return true;
+ }
+
+ // Also diagnose fastcall with regparm.
+ if (fn->getRegParmType()) {
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << "regparm"
+ << FunctionType::getNameForCallConv(CC);
+ attr.setInvalid();
+ return true;
}
}
- Type = S.Context.getCallConvType(Type, CC);
- return false;
+ FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withCallingConv(CC);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
}
/// HandleVectorSizeAttribute - this attribute is only applicable to integral
@@ -1952,7 +2727,7 @@ bool ProcessFnAttr(Sema &S, QualType &Type, const AttributeList &Attr) {
/// this routine will return a new vector type.
static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr,
Sema &S) {
- // Check the attribute arugments.
+ // Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
Attr.setInvalid();
@@ -1994,50 +2769,121 @@ static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr,
// Success! Instantiate the vector type, the number of elements is > 0, and
// not required to be a power of 2, unlike GCC.
CurType = S.Context.getVectorType(CurType, vectorSize/typeSize,
- VectorType::NotAltiVec);
+ VectorType::GenericVector);
}
-void ProcessTypeAttributeList(Sema &S, QualType &Result,
- bool IsDeclSpec, const AttributeList *AL,
- DelayedAttributeSet &FnAttrs) {
+/// HandleNeonVectorTypeAttr - The "neon_vector_type" and
+/// "neon_polyvector_type" attributes are used to create vector types that
+/// are mangled according to ARM's ABI. Otherwise, these types are identical
+/// to those created with the "vector_size" attribute. Unlike "vector_size"
+/// the argument to these Neon attributes is the number of vector elements,
+/// not the vector size in bytes. The vector width and element type must
+/// match one of the standard Neon vector types.
+static void HandleNeonVectorTypeAttr(QualType& CurType,
+ const AttributeList &Attr, Sema &S,
+ VectorType::VectorKind VecKind,
+ const char *AttrName) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ Attr.setInvalid();
+ return;
+ }
+ // The number of elements must be an ICE.
+ Expr *numEltsExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt numEltsInt(32);
+ if (numEltsExpr->isTypeDependent() || numEltsExpr->isValueDependent() ||
+ !numEltsExpr->isIntegerConstantExpr(numEltsInt, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << AttrName << numEltsExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ // Only certain element types are supported for Neon vectors.
+ const BuiltinType* BTy = CurType->getAs<BuiltinType>();
+ if (!BTy ||
+ (VecKind == VectorType::NeonPolyVector &&
+ BTy->getKind() != BuiltinType::SChar &&
+ BTy->getKind() != BuiltinType::Short) ||
+ (BTy->getKind() != BuiltinType::SChar &&
+ BTy->getKind() != BuiltinType::UChar &&
+ BTy->getKind() != BuiltinType::Short &&
+ BTy->getKind() != BuiltinType::UShort &&
+ BTy->getKind() != BuiltinType::Int &&
+ BTy->getKind() != BuiltinType::UInt &&
+ BTy->getKind() != BuiltinType::LongLong &&
+ BTy->getKind() != BuiltinType::ULongLong &&
+ BTy->getKind() != BuiltinType::Float)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) <<CurType;
+ Attr.setInvalid();
+ return;
+ }
+ // The total size of the vector must be 64 or 128 bits.
+ unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType));
+ unsigned numElts = static_cast<unsigned>(numEltsInt.getZExtValue());
+ unsigned vecSize = typeSize * numElts;
+ if (vecSize != 64 && vecSize != 128) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_bad_neon_vector_size) << CurType;
+ Attr.setInvalid();
+ return;
+ }
+
+ CurType = S.Context.getVectorType(CurType, numElts, VecKind);
+}
+
+static void processTypeAttrs(TypeProcessingState &state, QualType &type,
+ bool isDeclSpec, AttributeList *attrs) {
// Scan through and apply attributes to this type where it makes sense. Some
// attributes (such as __address_space__, __vector_size__, etc) apply to the
// type, but others can be present in the type specifiers even though they
// apply to the decl. Here we apply type attributes and ignore the rest.
- for (; AL; AL = AL->getNext()) {
+
+ AttributeList *next;
+ do {
+ AttributeList &attr = *attrs;
+ next = attr.getNext();
+
// Skip attributes that were marked to be invalid.
- if (AL->isInvalid())
+ if (attr.isInvalid())
continue;
// If this is an attribute we can handle, do so now,
// otherwise, add it to the FnAttrs list for rechaining.
- switch (AL->getKind()) {
+ switch (attr.getKind()) {
default: break;
case AttributeList::AT_address_space:
- HandleAddressSpaceTypeAttribute(Result, *AL, S);
+ HandleAddressSpaceTypeAttribute(type, attr, state.getSema());
break;
- case AttributeList::AT_objc_gc:
- HandleObjCGCTypeAttribute(Result, *AL, S);
+ OBJC_POINTER_TYPE_ATTRS_CASELIST:
+ if (!handleObjCPointerTypeAttr(state, attr, type))
+ distributeObjCPointerTypeAttr(state, attr, type);
break;
case AttributeList::AT_vector_size:
- HandleVectorSizeAttr(Result, *AL, S);
+ HandleVectorSizeAttr(type, attr, state.getSema());
+ break;
+ case AttributeList::AT_neon_vector_type:
+ HandleNeonVectorTypeAttr(type, attr, state.getSema(),
+ VectorType::NeonVector, "neon_vector_type");
break;
+ case AttributeList::AT_neon_polyvector_type:
+ HandleNeonVectorTypeAttr(type, attr, state.getSema(),
+ VectorType::NeonPolyVector,
+ "neon_polyvector_type");
+ break;
+
+ FUNCTION_TYPE_ATTRS_CASELIST:
+ // Never process function type attributes as part of the
+ // declaration-specifiers.
+ if (isDeclSpec)
+ distributeFunctionTypeAttrFromDeclSpec(state, attr, type);
- case AttributeList::AT_noreturn:
- case AttributeList::AT_cdecl:
- case AttributeList::AT_fastcall:
- case AttributeList::AT_stdcall:
- case AttributeList::AT_thiscall:
- case AttributeList::AT_pascal:
- case AttributeList::AT_regparm:
- // Don't process these on the DeclSpec.
- if (IsDeclSpec ||
- ProcessFnAttr(S, Result, *AL))
- FnAttrs.push_back(DelayedAttribute(AL, Result));
+ // Otherwise, handle the possible delays.
+ else if (!handleFunctionTypeAttr(state, attr, type))
+ distributeFunctionTypeAttr(state, attr, type);
break;
}
- }
+ } while ((attrs = next));
}
/// @brief Ensure that the type T is a complete type.
@@ -2110,16 +2956,19 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
if (diag == 0)
return true;
- const TagType *Tag = 0;
- if (const RecordType *Record = T->getAs<RecordType>())
- Tag = Record;
- else if (const EnumType *Enum = T->getAs<EnumType>())
- Tag = Enum;
+ const TagType *Tag = T->getAs<TagType>();
// Avoid diagnosing invalid decls as incomplete.
if (Tag && Tag->getDecl()->isInvalidDecl())
return true;
+ // Give the external AST source a chance to complete the type.
+ if (Tag && Tag->getDecl()->hasExternalLexicalStorage()) {
+ Context.getExternalSource()->CompleteType(Tag->getDecl());
+ if (!Tag->isIncompleteType())
+ return false;
+ }
+
// We have an incomplete type. Produce a diagnostic.
Diag(Loc, PD) << T;
@@ -2167,48 +3016,23 @@ QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
return Context.getElaboratedType(Keyword, NNS, T);
}
-QualType Sema::BuildTypeofExprType(Expr *E) {
- if (E->getType() == Context.OverloadTy) {
- // C++ [temp.arg.explicit]p3 allows us to resolve a template-id to a
- // function template specialization wherever deduction cannot occur.
- if (FunctionDecl *Specialization
- = ResolveSingleFunctionTemplateSpecialization(E)) {
- // The access doesn't really matter in this case.
- DeclAccessPair Found = DeclAccessPair::make(Specialization,
- Specialization->getAccess());
- E = FixOverloadedFunctionReference(E, Found, Specialization);
- if (!E)
- return QualType();
- } else {
- Diag(E->getLocStart(),
- diag::err_cannot_determine_declared_type_of_overloaded_function)
- << false << E->getSourceRange();
- return QualType();
- }
+QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
+ ExprResult ER = CheckPlaceholderExpr(E, Loc);
+ if (ER.isInvalid()) return QualType();
+ E = ER.take();
+
+ if (!E->isTypeDependent()) {
+ QualType T = E->getType();
+ if (const TagType *TT = T->getAs<TagType>())
+ DiagnoseUseOfDecl(TT->getDecl(), E->getExprLoc());
}
-
return Context.getTypeOfExprType(E);
}
-QualType Sema::BuildDecltypeType(Expr *E) {
- if (E->getType() == Context.OverloadTy) {
- // C++ [temp.arg.explicit]p3 allows us to resolve a template-id to a
- // function template specialization wherever deduction cannot occur.
- if (FunctionDecl *Specialization
- = ResolveSingleFunctionTemplateSpecialization(E)) {
- // The access doesn't really matter in this case.
- DeclAccessPair Found = DeclAccessPair::make(Specialization,
- Specialization->getAccess());
- E = FixOverloadedFunctionReference(E, Found, Specialization);
- if (!E)
- return QualType();
- } else {
- Diag(E->getLocStart(),
- diag::err_cannot_determine_declared_type_of_overloaded_function)
- << true << E->getSourceRange();
- return QualType();
- }
- }
+QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc) {
+ ExprResult ER = CheckPlaceholderExpr(E, Loc);
+ if (ER.isInvalid()) return QualType();
+ E = ER.take();
return Context.getDecltypeType(E);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
index 1854e74..c3415cb 100644
--- a/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
@@ -71,6 +71,55 @@ namespace {
};
}
+static void HandleMBlazeInterruptHandlerAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ // FIXME: Check for decl - it should be void ()(void).
+
+ d->addAttr(::new (S.Context) MBlazeInterruptHandlerAttr(Attr.getLoc(),
+ S.Context));
+ d->addAttr(::new (S.Context) UsedAttr(Attr.getLoc(), S.Context));
+}
+
+static void HandleMBlazeSaveVolatilesAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ // FIXME: Check for decl - it should be void ()(void).
+
+ d->addAttr(::new (S.Context) MBlazeSaveVolatilesAttr(Attr.getLoc(),
+ S.Context));
+ d->addAttr(::new (S.Context) UsedAttr(Attr.getLoc(), S.Context));
+}
+
+
+namespace {
+ class MBlazeAttributesSema : public TargetAttributesSema {
+ public:
+ MBlazeAttributesSema() { }
+ bool ProcessDeclAttribute(Scope *scope, Decl *D, const AttributeList &Attr,
+ Sema &S) const {
+ if (Attr.getName()->getName() == "interrupt_handler") {
+ HandleMBlazeInterruptHandlerAttr(D, Attr, S);
+ return true;
+ } else if (Attr.getName()->getName() == "save_volatiles") {
+ HandleMBlazeSaveVolatilesAttr(D, Attr, S);
+ return true;
+ }
+ return false;
+ }
+ };
+}
+
static void HandleX86ForceAlignArgPointerAttr(Decl *D,
const AttributeList& Attr,
Sema &S) {
@@ -189,8 +238,7 @@ namespace {
const AttributeList &Attr, Sema &S) const {
const llvm::Triple &Triple(S.Context.Target.getTriple());
if (Triple.getOS() == llvm::Triple::Win32 ||
- Triple.getOS() == llvm::Triple::MinGW32 ||
- Triple.getOS() == llvm::Triple::MinGW64) {
+ Triple.getOS() == llvm::Triple::MinGW32) {
switch (Attr.getKind()) {
case AttributeList::AT_dllimport: HandleDLLImportAttr(D, Attr, S);
return true;
@@ -220,8 +268,9 @@ const TargetAttributesSema &Sema::getTargetAttributesSema() const {
case llvm::Triple::msp430:
return *(TheTargetAttributesSema = new MSP430AttributesSema);
+ case llvm::Triple::mblaze:
+ return *(TheTargetAttributesSema = new MBlazeAttributesSema);
case llvm::Triple::x86:
return *(TheTargetAttributesSema = new X86AttributesSema);
}
}
-
diff --git a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
index e7bfbe6..944e6a1 100644
--- a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
@@ -1,20 +1,22 @@
-//===------- TreeTransform.h - Semantic Tree Transformation -----*- C++ -*-===/
+//===------- TreeTransform.h - Semantic Tree Transformation -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//===----------------------------------------------------------------------===/
+//===----------------------------------------------------------------------===//
//
// This file implements a semantic tree transformation that takes a given
// AST and rebuilds it, possibly transforming some nodes in the process.
//
-//===----------------------------------------------------------------------===/
+//===----------------------------------------------------------------------===//
+
#ifndef LLVM_CLANG_SEMA_TREETRANSFORM_H
#define LLVM_CLANG_SEMA_TREETRANSFORM_H
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/AST/Decl.h"
@@ -25,11 +27,11 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
-#include "clang/AST/TypeLocBuilder.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Designator.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/ErrorHandling.h"
+#include "TypeLocBuilder.h"
#include <algorithm>
namespace clang {
@@ -88,9 +90,26 @@ using namespace sema;
/// (\c getBaseLocation(), \c getBaseEntity()).
template<typename Derived>
class TreeTransform {
+ /// \brief Private RAII object that helps us forget and then re-remember
+ /// the template argument corresponding to a partially-substituted parameter
+ /// pack.
+ class ForgetPartiallySubstitutedPackRAII {
+ Derived &Self;
+ TemplateArgument Old;
+
+ public:
+ ForgetPartiallySubstitutedPackRAII(Derived &Self) : Self(Self) {
+ Old = Self.ForgetPartiallySubstitutedPack();
+ }
+
+ ~ForgetPartiallySubstitutedPackRAII() {
+ Self.RememberPartiallySubstitutedPack(Old);
+ }
+ };
+
protected:
Sema &SemaRef;
-
+
public:
/// \brief Initializes a new tree transformer.
TreeTransform(Sema &SemaRef) : SemaRef(SemaRef) { }
@@ -151,7 +170,9 @@ public:
DeclarationName Entity) : Self(Self) {
OldLocation = Self.getDerived().getBaseLocation();
OldEntity = Self.getDerived().getBaseEntity();
- Self.getDerived().setBase(Location, Entity);
+
+ if (Location.isValid())
+ Self.getDerived().setBase(Location, Entity);
}
~TemporaryBase() {
@@ -180,6 +201,77 @@ public:
return E->isDefaultArgument();
}
+ /// \brief Determine whether we should expand a pack expansion with the
+ /// given set of parameter packs into separate arguments by repeatedly
+ /// transforming the pattern.
+ ///
+ /// By default, the transformer never tries to expand pack expansions.
+ /// Subclasses can override this routine to provide different behavior.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis that identifies the
+ /// pack expansion.
+ ///
+ /// \param PatternRange The source range that covers the entire pattern of
+ /// the pack expansion.
+ ///
+ /// \param Unexpanded The set of unexpanded parameter packs within the
+ /// pattern.
+ ///
+ /// \param NumUnexpanded The number of unexpanded parameter packs in
+ /// \p Unexpanded.
+ ///
+ /// \param ShouldExpand Will be set to \c true if the transformer should
+ /// expand the corresponding pack expansions into separate arguments. When
+ /// set, \c NumExpansions must also be set.
+ ///
+ /// \param RetainExpansion Whether the caller should add an unexpanded
+ /// pack expansion after all of the expanded arguments. This is used
+ /// when extending explicitly-specified template argument packs per
+ /// C++0x [temp.arg.explicit]p9.
+ ///
+ /// \param NumExpansions The number of separate arguments that will be in
+ /// the expanded form of the corresponding pack expansion. This is both an
+ /// input and an output parameter, which can be set by the caller if the
+ /// number of expansions is known a priori (e.g., due to a prior substitution)
+ /// and will be set by the callee when the number of expansions is known.
+ /// The callee must set this value when \c ShouldExpand is \c true; it may
+ /// set this value in other cases.
+ ///
+ /// \returns true if an error occurred (e.g., because the parameter packs
+ /// are to be instantiated with arguments of different lengths), false
+ /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
+ /// must be set.
+ bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ const UnexpandedParameterPack *Unexpanded,
+ unsigned NumUnexpanded,
+ bool &ShouldExpand,
+ bool &RetainExpansion,
+ llvm::Optional<unsigned> &NumExpansions) {
+ ShouldExpand = false;
+ return false;
+ }
+
+ /// \brief "Forget" about the partially-substituted pack template argument,
+ /// when performing an instantiation that must preserve the parameter pack
+ /// use.
+ ///
+ /// This routine is meant to be overridden by the template instantiator.
+ TemplateArgument ForgetPartiallySubstitutedPack() {
+ return TemplateArgument();
+ }
+
+ /// \brief "Remember" the partially-substituted pack template argument
+ /// after performing an instantiation that must preserve the parameter pack
+ /// use.
+ ///
+ /// This routine is meant to be overridden by the template instantiator.
+ void RememberPartiallySubstitutedPack(TemplateArgument Arg) { }
+
+ /// \brief Note to the derived class when a function parameter pack is
+ /// being expanded.
+ void ExpandingFunctionParameterPack(ParmVarDecl *Pack) { }
+
/// \brief Transforms the given type into another type.
///
/// By default, this routine transforms a type by creating a
@@ -189,7 +281,7 @@ public:
/// switched to storing TypeSourceInfos.
///
/// \returns the transformed type.
- QualType TransformType(QualType T, QualType ObjectType = QualType());
+ QualType TransformType(QualType T);
/// \brief Transforms the given type-with-location into a new
/// type-with-location.
@@ -199,15 +291,13 @@ public:
/// may override this function (to take over all type
/// transformations) or some set of the TransformXXXType functions
/// to alter the transformation.
- TypeSourceInfo *TransformType(TypeSourceInfo *DI,
- QualType ObjectType = QualType());
+ TypeSourceInfo *TransformType(TypeSourceInfo *DI);
/// \brief Transform the given type-with-location into a new
/// type, collecting location information in the given builder
/// as necessary.
///
- QualType TransformType(TypeLocBuilder &TLB, TypeLoc TL,
- QualType ObjectType = QualType());
+ QualType TransformType(TypeLocBuilder &TLB, TypeLoc TL);
/// \brief Transform the given statement.
///
@@ -230,6 +320,33 @@ public:
/// \returns the transformed expression.
ExprResult TransformExpr(Expr *E);
+ /// \brief Transform the given list of expressions.
+ ///
+ /// This routine transforms a list of expressions by invoking
+ /// \c TransformExpr() for each subexpression. However, it also provides
+ /// support for variadic templates by expanding any pack expansions (if the
+ /// derived class permits such expansion) along the way. When pack expansions
+ /// are present, the number of outputs may not equal the number of inputs.
+ ///
+ /// \param Inputs The set of expressions to be transformed.
+ ///
+ /// \param NumInputs The number of expressions in \c Inputs.
+ ///
+ /// \param IsCall If \c true, then this transform is being performed on
+ /// function-call arguments, and any arguments that should be dropped, will
+ /// be.
+ ///
+ /// \param Outputs The transformed input expressions will be added to this
+ /// vector.
+ ///
+ /// \param ArgChanged If non-NULL, will be set \c true if any argument changed
+ /// due to transformation.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool TransformExprs(Expr **Inputs, unsigned NumInputs, bool IsCall,
+ llvm::SmallVectorImpl<Expr *> &Outputs,
+ bool *ArgChanged = 0);
+
/// \brief Transform the given declaration, which is referenced from a type
/// or expression.
///
@@ -275,8 +392,7 @@ public:
/// Identifiers and selectors are returned unmodified. Sublcasses may
/// override this function to provide alternate behavior.
DeclarationNameInfo
- TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
- QualType ObjectType = QualType());
+ TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo);
/// \brief Transform the given template name.
///
@@ -284,7 +400,8 @@ public:
/// and nested-name-specifiers that occur within the template name.
/// Subclasses may override this function to provide alternate behavior.
TemplateName TransformTemplateName(TemplateName Name,
- QualType ObjectType = QualType());
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = 0);
/// \brief Transform the given template argument.
///
@@ -297,6 +414,49 @@ public:
bool TransformTemplateArgument(const TemplateArgumentLoc &Input,
TemplateArgumentLoc &Output);
+ /// \brief Transform the given set of template arguments.
+ ///
+ /// By default, this operation transforms all of the template arguments
+ /// in the input set using \c TransformTemplateArgument(), and appends
+ /// the transformed arguments to the output list.
+ ///
+ /// Note that this overload of \c TransformTemplateArguments() is merely
+ /// a convenience function. Subclasses that wish to override this behavior
+ /// should override the iterator-based member template version.
+ ///
+ /// \param Inputs The set of template arguments to be transformed.
+ ///
+ /// \param NumInputs The number of template arguments in \p Inputs.
+ ///
+ /// \param Outputs The set of transformed template arguments output by this
+ /// routine.
+ ///
+ /// Returns true if an error occurred.
+ bool TransformTemplateArguments(const TemplateArgumentLoc *Inputs,
+ unsigned NumInputs,
+ TemplateArgumentListInfo &Outputs) {
+ return TransformTemplateArguments(Inputs, Inputs + NumInputs, Outputs);
+ }
+
+ /// \brief Transform the given set of template arguments.
+ ///
+ /// By default, this operation transforms all of the template arguments
+ /// in the input set using \c TransformTemplateArgument(), and appends
+ /// the transformed arguments to the output list.
+ ///
+ /// \param First An iterator to the first template argument.
+ ///
+ /// \param Last An iterator one step past the last template argument.
+ ///
+ /// \param Outputs The set of transformed template arguments output by this
+ /// routine.
+ ///
+ /// Returns true if an error occurred.
+ template<typename InputIterator>
+ bool TransformTemplateArguments(InputIterator First,
+ InputIterator Last,
+ TemplateArgumentListInfo &Outputs);
+
/// \brief Fakes up a TemplateArgumentLoc for a given TemplateArgument.
void InventTemplateArgumentLoc(const TemplateArgument &Arg,
TemplateArgumentLoc &ArgLoc);
@@ -309,10 +469,19 @@ public:
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
- QualType Transform##CLASS##Type(TypeLocBuilder &TLB, CLASS##TypeLoc T, \
- QualType ObjectType = QualType());
+ QualType Transform##CLASS##Type(TypeLocBuilder &TLB, CLASS##TypeLoc T);
#include "clang/AST/TypeLocNodes.def"
+ QualType
+ TransformTemplateSpecializationType(TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL,
+ TemplateName Template);
+
+ QualType
+ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ NestedNameSpecifier *Prefix);
+
/// \brief Transforms the parameters of a function type into the
/// given vectors.
///
@@ -320,20 +489,18 @@ public:
/// variables vector are acceptable.
///
/// Return true on error.
- bool TransformFunctionTypeParams(FunctionProtoTypeLoc TL,
+ bool TransformFunctionTypeParams(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const QualType *ParamTypes,
llvm::SmallVectorImpl<QualType> &PTypes,
- llvm::SmallVectorImpl<ParmVarDecl*> &PVars);
+ llvm::SmallVectorImpl<ParmVarDecl*> *PVars);
/// \brief Transforms a single function-type parameter. Return null
/// on error.
- ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm);
+ ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ llvm::Optional<unsigned> NumExpansions);
- QualType TransformReferenceType(TypeLocBuilder &TLB, ReferenceTypeLoc TL,
- QualType ObjectType);
-
- QualType
- TransformTemplateSpecializationType(const TemplateSpecializationType *T,
- QualType ObjectType);
+ QualType TransformReferenceType(TypeLocBuilder &TLB, ReferenceTypeLoc TL);
StmtResult TransformCompoundStmt(CompoundStmt *S, bool IsStmtExpr);
ExprResult TransformCXXNamedCastExpr(CXXNamedCastExpr *E);
@@ -440,7 +607,7 @@ public:
/// By default, performs semantic analysis when building the vector type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildVectorType(QualType ElementType, unsigned NumElements,
- VectorType::AltiVecSpecific AltiVecSpec);
+ VectorType::VectorKind VecKind);
/// \brief Build a new extended vector type given the element type and
/// number of elements.
@@ -467,6 +634,7 @@ public:
QualType *ParamTypes,
unsigned NumParamTypes,
bool Variadic, unsigned Quals,
+ RefQualifierKind RefQualifier,
const FunctionType::ExtInfo &Info);
/// \brief Build a new unprototyped function type.
@@ -495,7 +663,7 @@ public:
///
/// By default, performs semantic analysis when building the typeof type.
/// Subclasses may override this routine to provide different behavior.
- QualType RebuildTypeOfExprType(Expr *Underlying);
+ QualType RebuildTypeOfExprType(Expr *Underlying, SourceLocation Loc);
/// \brief Build a new typeof(type) type.
///
@@ -506,7 +674,14 @@ public:
///
/// By default, performs semantic analysis when building the decltype type.
/// Subclasses may override this routine to provide different behavior.
- QualType RebuildDecltypeType(Expr *Underlying);
+ QualType RebuildDecltypeType(Expr *Underlying, SourceLocation Loc);
+
+ /// \brief Build a new C++0x auto type.
+ ///
+ /// By default, builds a new AutoType with the given deduced type.
+ QualType RebuildAutoType(QualType Deduced) {
+ return SemaRef.Context.getAutoType(Deduced);
+ }
/// \brief Build a new template specialization type.
///
@@ -517,12 +692,21 @@ public:
SourceLocation TemplateLoc,
const TemplateArgumentListInfo &Args);
+ /// \brief Build a new parenthesized type.
+ ///
+ /// By default, builds a new ParenType type from the inner type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildParenType(QualType InnerType) {
+ return SemaRef.Context.getParenType(InnerType);
+ }
+
/// \brief Build a new qualified name type.
///
/// By default, builds a new ElaboratedType type from the keyword,
/// the nested-name-specifier and the named type.
/// Subclasses may override this routine to provide different behavior.
- QualType RebuildElaboratedType(ElaboratedTypeKeyword Keyword,
+ QualType RebuildElaboratedType(SourceLocation KeywordLoc,
+ ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS, QualType Named) {
return SemaRef.Context.getElaboratedType(Keyword, NNS, Named);
}
@@ -534,14 +718,16 @@ public:
/// this routine to provide different behavior.
QualType RebuildDependentTemplateSpecializationType(
ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
+ NestedNameSpecifier *Qualifier,
+ SourceRange QualifierRange,
const IdentifierInfo *Name,
SourceLocation NameLoc,
const TemplateArgumentListInfo &Args) {
// Rebuild the template name.
// TODO: avoid TemplateName abstraction
TemplateName InstName =
- getDerived().RebuildTemplateName(NNS, *Name, QualType());
+ getDerived().RebuildTemplateName(Qualifier, QualifierRange, *Name,
+ QualType(), 0);
if (InstName.isNull())
return QualType();
@@ -549,7 +735,7 @@ public:
// If it's still dependent, make a dependent specialization.
if (InstName.getAsDependentTemplateName())
return SemaRef.Context.getDependentTemplateSpecializationType(
- Keyword, NNS, Name, Args);
+ Keyword, Qualifier, Name, Args);
// Otherwise, make an elaborated type wrapping a non-dependent
// specialization.
@@ -621,9 +807,28 @@ public:
}
if (!Tag) {
- // FIXME: Would be nice to highlight just the source range.
- SemaRef.Diag(IdLoc, diag::err_not_tag_in_scope)
- << Kind << Id << DC;
+ // Check where the name exists but isn't a tag type and use that to emit
+ // better diagnostics.
+ LookupResult Result(SemaRef, Id, IdLoc, Sema::LookupTagName);
+ SemaRef.LookupQualifiedName(Result, DC);
+ switch (Result.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue: {
+ NamedDecl *SomeDecl = Result.getRepresentativeDecl();
+ unsigned Kind = 0;
+ if (isa<TypedefDecl>(SomeDecl)) Kind = 1;
+ else if (isa<ClassTemplateDecl>(SomeDecl)) Kind = 2;
+ SemaRef.Diag(IdLoc, diag::err_tag_reference_non_tag) << Kind;
+ SemaRef.Diag(SomeDecl->getLocation(), diag::note_declared_at);
+ break;
+ }
+ default:
+ // FIXME: Would be nice to highlight just the source range.
+ SemaRef.Diag(IdLoc, diag::err_not_tag_in_scope)
+ << Kind << Id << DC;
+ break;
+ }
return QualType();
}
@@ -638,6 +843,18 @@ public:
return SemaRef.Context.getElaboratedType(Keyword, NNS, T);
}
+ /// \brief Build a new pack expansion type.
+ ///
+ /// By default, builds a new PackExpansionType type from the given pattern.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildPackExpansionType(QualType Pattern,
+ SourceRange PatternRange,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ return getSema().CheckPackExpansion(Pattern, PatternRange, EllipsisLoc,
+ NumExpansions);
+ }
+
/// \brief Build a new nested-name-specifier given the prefix and an
/// identifier that names the next step in the nested-name-specifier.
///
@@ -689,8 +906,10 @@ public:
/// template name. Subclasses may override this routine to provide different
/// behavior.
TemplateName RebuildTemplateName(NestedNameSpecifier *Qualifier,
+ SourceRange QualifierRange,
const IdentifierInfo &II,
- QualType ObjectType);
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope);
/// \brief Build a new template name given a nested name specifier and the
/// overloaded operator name that is referred to as a template.
@@ -702,7 +921,19 @@ public:
TemplateName RebuildTemplateName(NestedNameSpecifier *Qualifier,
OverloadedOperatorKind Operator,
QualType ObjectType);
-
+
+ /// \brief Build a new template name given a template template parameter pack
+ /// and the
+ ///
+ /// By default, performs semantic analysis to determine whether the name can
+ /// be resolved to a specific template, then builds the appropriate kind of
+ /// template name. Subclasses may override this routine to provide different
+ /// behavior.
+ TemplateName RebuildTemplateName(TemplateTemplateParmDecl *Param,
+ const TemplateArgument &ArgPack) {
+ return getSema().Context.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
+
/// \brief Build a new compound statement.
///
/// By default, performs semantic analysis to build the new statement.
@@ -752,11 +983,9 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildLabelStmt(SourceLocation IdentLoc,
- IdentifierInfo *Id,
- SourceLocation ColonLoc,
- Stmt *SubStmt) {
- return SemaRef.ActOnLabelStmt(IdentLoc, Id, ColonLoc, SubStmt);
+ StmtResult RebuildLabelStmt(SourceLocation IdentLoc, LabelDecl *L,
+ SourceLocation ColonLoc, Stmt *SubStmt) {
+ return SemaRef.ActOnLabelStmt(IdentLoc, L, ColonLoc, SubStmt);
}
/// \brief Build a new "if" statement.
@@ -764,8 +993,8 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildIfStmt(SourceLocation IfLoc, Sema::FullExprArg Cond,
- VarDecl *CondVar, Stmt *Then,
- SourceLocation ElseLoc, Stmt *Else) {
+ VarDecl *CondVar, Stmt *Then,
+ SourceLocation ElseLoc, Stmt *Else) {
return getSema().ActOnIfStmt(IfLoc, Cond, CondVar, Then, ElseLoc, Else);
}
@@ -774,7 +1003,7 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildSwitchStmtStart(SourceLocation SwitchLoc,
- Expr *Cond, VarDecl *CondVar) {
+ Expr *Cond, VarDecl *CondVar) {
return getSema().ActOnStartOfSwitchStmt(SwitchLoc, Cond,
CondVar);
}
@@ -784,7 +1013,7 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildSwitchStmtBody(SourceLocation SwitchLoc,
- Stmt *Switch, Stmt *Body) {
+ Stmt *Switch, Stmt *Body) {
return getSema().ActOnFinishSwitchStmt(SwitchLoc, Switch, Body);
}
@@ -792,10 +1021,8 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildWhileStmt(SourceLocation WhileLoc,
- Sema::FullExprArg Cond,
- VarDecl *CondVar,
- Stmt *Body) {
+ StmtResult RebuildWhileStmt(SourceLocation WhileLoc, Sema::FullExprArg Cond,
+ VarDecl *CondVar, Stmt *Body) {
return getSema().ActOnWhileStmt(WhileLoc, Cond, CondVar, Body);
}
@@ -804,10 +1031,8 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildDoStmt(SourceLocation DoLoc, Stmt *Body,
- SourceLocation WhileLoc,
- SourceLocation LParenLoc,
- Expr *Cond,
- SourceLocation RParenLoc) {
+ SourceLocation WhileLoc, SourceLocation LParenLoc,
+ Expr *Cond, SourceLocation RParenLoc) {
return getSema().ActOnDoStmt(DoLoc, Body, WhileLoc, LParenLoc,
Cond, RParenLoc);
}
@@ -816,24 +1041,21 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildForStmt(SourceLocation ForLoc,
- SourceLocation LParenLoc,
- Stmt *Init, Sema::FullExprArg Cond,
- VarDecl *CondVar, Sema::FullExprArg Inc,
- SourceLocation RParenLoc, Stmt *Body) {
+ StmtResult RebuildForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
+ Stmt *Init, Sema::FullExprArg Cond,
+ VarDecl *CondVar, Sema::FullExprArg Inc,
+ SourceLocation RParenLoc, Stmt *Body) {
return getSema().ActOnForStmt(ForLoc, LParenLoc, Init, Cond,
- CondVar,
- Inc, RParenLoc, Body);
+ CondVar, Inc, RParenLoc, Body);
}
/// \brief Build a new goto statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildGotoStmt(SourceLocation GotoLoc,
- SourceLocation LabelLoc,
- LabelStmt *Label) {
- return getSema().ActOnGotoStmt(GotoLoc, LabelLoc, Label->getID());
+ StmtResult RebuildGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc,
+ LabelDecl *Label) {
+ return getSema().ActOnGotoStmt(GotoLoc, LabelLoc, Label);
}
/// \brief Build a new indirect goto statement.
@@ -841,8 +1063,8 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildIndirectGotoStmt(SourceLocation GotoLoc,
- SourceLocation StarLoc,
- Expr *Target) {
+ SourceLocation StarLoc,
+ Expr *Target) {
return getSema().ActOnIndirectGotoStmt(GotoLoc, StarLoc, Target);
}
@@ -850,9 +1072,7 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildReturnStmt(SourceLocation ReturnLoc,
- Expr *Result) {
-
+ StmtResult RebuildReturnStmt(SourceLocation ReturnLoc, Expr *Result) {
return getSema().ActOnReturnStmt(ReturnLoc, Result);
}
@@ -977,13 +1197,11 @@ public:
///
/// By default, performs semantic analysis to build the new decaration.
/// Subclasses may override this routine to provide different behavior.
- VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl, QualType T,
+ VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *Declarator,
IdentifierInfo *Name,
- SourceLocation Loc,
- SourceRange TypeRange) {
- return getSema().BuildExceptionDeclaration(0, T, Declarator, Name, Loc,
- TypeRange);
+ SourceLocation Loc) {
+ return getSema().BuildExceptionDeclaration(0, Declarator, Name, Loc);
}
/// \brief Build a new C++ catch statement.
@@ -1126,10 +1344,10 @@ public:
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildCallExpr(Expr *Callee, SourceLocation LParenLoc,
MultiExprArg Args,
- SourceLocation *CommaLocs,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc,
+ Expr *ExecConfig = 0) {
return getSema().ActOnCallExpr(/*Scope=*/0, Callee, LParenLoc,
- move(Args), CommaLocs, RParenLoc);
+ move(Args), RParenLoc, ExecConfig);
}
/// \brief Build a new member access expression.
@@ -1137,26 +1355,32 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildMemberExpr(Expr *Base, SourceLocation OpLoc,
- bool isArrow,
- NestedNameSpecifier *Qualifier,
- SourceRange QualifierRange,
- const DeclarationNameInfo &MemberNameInfo,
- ValueDecl *Member,
- NamedDecl *FoundDecl,
+ bool isArrow,
+ NestedNameSpecifier *Qualifier,
+ SourceRange QualifierRange,
+ const DeclarationNameInfo &MemberNameInfo,
+ ValueDecl *Member,
+ NamedDecl *FoundDecl,
const TemplateArgumentListInfo *ExplicitTemplateArgs,
- NamedDecl *FirstQualifierInScope) {
+ NamedDecl *FirstQualifierInScope) {
if (!Member->getDeclName()) {
- // We have a reference to an unnamed field.
+ // We have a reference to an unnamed field. This is always the
+ // base of an anonymous struct/union member access, i.e. the
+ // field is always of record type.
assert(!Qualifier && "Can't have an unnamed field with a qualifier!");
+ assert(Member->getType()->isRecordType() &&
+ "unnamed member not of record type?");
if (getSema().PerformObjectMemberConversion(Base, Qualifier,
FoundDecl, Member))
return ExprError();
+ ExprValueKind VK = isArrow ? VK_LValue : Base->getValueKind();
MemberExpr *ME =
new (getSema().Context) MemberExpr(Base, isArrow,
Member, MemberNameInfo,
- cast<FieldDecl>(Member)->getType());
+ cast<FieldDecl>(Member)->getType(),
+ VK, OK_Ordinary);
return getSema().Owned(ME);
}
@@ -1195,10 +1419,10 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildConditionalOperator(Expr *Cond,
- SourceLocation QuestionLoc,
- Expr *LHS,
- SourceLocation ColonLoc,
- Expr *RHS) {
+ SourceLocation QuestionLoc,
+ Expr *LHS,
+ SourceLocation ColonLoc,
+ Expr *RHS) {
return getSema().ActOnConditionalOp(QuestionLoc, ColonLoc, Cond,
LHS, RHS);
}
@@ -1322,9 +1546,8 @@ public:
/// rather than attempting to map the label statement itself.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildAddrLabelExpr(SourceLocation AmpAmpLoc,
- SourceLocation LabelLoc,
- LabelStmt *Label) {
- return getSema().ActOnAddrLabel(AmpAmpLoc, LabelLoc, Label->getID());
+ SourceLocation LabelLoc, LabelDecl *Label) {
+ return getSema().ActOnAddrLabel(AmpAmpLoc, LabelLoc, Label);
}
/// \brief Build a new GNU statement expression.
@@ -1337,19 +1560,6 @@ public:
return getSema().ActOnStmtExpr(LParenLoc, SubStmt, RParenLoc);
}
- /// \brief Build a new __builtin_types_compatible_p expression.
- ///
- /// By default, performs semantic analysis to build the new expression.
- /// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildTypesCompatibleExpr(SourceLocation BuiltinLoc,
- TypeSourceInfo *TInfo1,
- TypeSourceInfo *TInfo2,
- SourceLocation RParenLoc) {
- return getSema().BuildTypesCompatibleExpr(BuiltinLoc,
- TInfo1, TInfo2,
- RParenLoc);
- }
-
/// \brief Build a new __builtin_choose_expr expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -1492,16 +1702,12 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXFunctionalCastExpr(SourceRange TypeRange,
- TypeSourceInfo *TInfo,
- SourceLocation LParenLoc,
- Expr *Sub,
- SourceLocation RParenLoc) {
- return getSema().ActOnCXXTypeConstructExpr(TypeRange,
- ParsedType::make(TInfo->getType()),
- LParenLoc,
+ ExprResult RebuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
+ SourceLocation LParenLoc,
+ Expr *Sub,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TInfo, LParenLoc,
MultiExprArg(&Sub, 1),
- /*CommaLocs=*/0,
RParenLoc);
}
@@ -1517,6 +1723,7 @@ public:
RParenLoc);
}
+
/// \brief Build a new C++ typeid(expr) expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -1529,14 +1736,38 @@ public:
RParenLoc);
}
+ /// \brief Build a new C++ __uuidof(type) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ __uuidof(expr) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
/// \brief Build a new C++ "this" expression.
///
/// By default, builds a new "this" expression without performing any
/// semantic analysis. Subclasses may override this routine to provide
/// different behavior.
ExprResult RebuildCXXThisExpr(SourceLocation ThisLoc,
- QualType ThisType,
- bool isImplicit) {
+ QualType ThisType,
+ bool isImplicit) {
return getSema().Owned(
new (getSema().Context) CXXThisExpr(ThisLoc, ThisType,
isImplicit));
@@ -1565,14 +1796,12 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXScalarValueInitExpr(SourceLocation TypeStartLoc,
- SourceLocation LParenLoc,
- QualType T,
- SourceLocation RParenLoc) {
- return getSema().ActOnCXXTypeConstructExpr(SourceRange(TypeStartLoc),
- ParsedType::make(T), LParenLoc,
+ ExprResult RebuildCXXScalarValueInitExpr(TypeSourceInfo *TSInfo,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc,
MultiExprArg(getSema(), 0, 0),
- 0, RParenLoc);
+ RParenLoc);
}
/// \brief Build a new C++ "new" expression.
@@ -1580,26 +1809,24 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildCXXNewExpr(SourceLocation StartLoc,
- bool UseGlobal,
- SourceLocation PlacementLParen,
- MultiExprArg PlacementArgs,
- SourceLocation PlacementRParen,
- SourceRange TypeIdParens,
- QualType AllocType,
- SourceLocation TypeLoc,
- SourceRange TypeRange,
- Expr *ArraySize,
- SourceLocation ConstructorLParen,
- MultiExprArg ConstructorArgs,
- SourceLocation ConstructorRParen) {
+ bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ SourceRange TypeIdParens,
+ QualType AllocatedType,
+ TypeSourceInfo *AllocatedTypeInfo,
+ Expr *ArraySize,
+ SourceLocation ConstructorLParen,
+ MultiExprArg ConstructorArgs,
+ SourceLocation ConstructorRParen) {
return getSema().BuildCXXNew(StartLoc, UseGlobal,
PlacementLParen,
move(PlacementArgs),
PlacementRParen,
TypeIdParens,
- AllocType,
- TypeLoc,
- TypeRange,
+ AllocatedType,
+ AllocatedTypeInfo,
ArraySize,
ConstructorLParen,
move(ConstructorArgs),
@@ -1623,12 +1850,22 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildUnaryTypeTrait(UnaryTypeTrait Trait,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- QualType T,
- SourceLocation RParenLoc) {
- return getSema().ActOnUnaryTypeTrait(Trait, StartLoc, LParenLoc,
- ParsedType::make(T), RParenLoc);
+ SourceLocation StartLoc,
+ TypeSourceInfo *T,
+ SourceLocation RParenLoc) {
+ return getSema().BuildUnaryTypeTrait(Trait, StartLoc, T, RParenLoc);
+ }
+
+ /// \brief Build a new binary type trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildBinaryTypeTrait(BinaryTypeTrait Trait,
+ SourceLocation StartLoc,
+ TypeSourceInfo *LhsT,
+ TypeSourceInfo *RhsT,
+ SourceLocation RParenLoc) {
+ return getSema().BuildBinaryTypeTrait(Trait, StartLoc, LhsT, RhsT, RParenLoc);
}
/// \brief Build a new (previously unresolved) declaration reference
@@ -1672,7 +1909,8 @@ public:
bool IsElidable,
MultiExprArg Args,
bool RequiresZeroInit,
- CXXConstructExpr::ConstructionKind ConstructKind) {
+ CXXConstructExpr::ConstructionKind ConstructKind,
+ SourceRange ParenRange) {
ASTOwningVector<Expr*> ConvertedArgs(SemaRef);
if (getSema().CompleteConstructorCall(Constructor, move(Args), Loc,
ConvertedArgs))
@@ -1680,24 +1918,21 @@ public:
return getSema().BuildCXXConstructExpr(Loc, T, Constructor, IsElidable,
move_arg(ConvertedArgs),
- RequiresZeroInit, ConstructKind);
+ RequiresZeroInit, ConstructKind,
+ ParenRange);
}
/// \brief Build a new object-construction expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXTemporaryObjectExpr(SourceLocation TypeBeginLoc,
- QualType T,
- SourceLocation LParenLoc,
- MultiExprArg Args,
- SourceLocation *Commas,
- SourceLocation RParenLoc) {
- return getSema().ActOnCXXTypeConstructExpr(SourceRange(TypeBeginLoc),
- ParsedType::make(T),
+ ExprResult RebuildCXXTemporaryObjectExpr(TypeSourceInfo *TSInfo,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo,
LParenLoc,
move(Args),
- Commas,
RParenLoc);
}
@@ -1705,18 +1940,13 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXUnresolvedConstructExpr(SourceLocation TypeBeginLoc,
- QualType T,
- SourceLocation LParenLoc,
- MultiExprArg Args,
- SourceLocation *Commas,
- SourceLocation RParenLoc) {
- return getSema().ActOnCXXTypeConstructExpr(SourceRange(TypeBeginLoc,
- /*FIXME*/LParenLoc),
- ParsedType::make(T),
+ ExprResult RebuildCXXUnresolvedConstructExpr(TypeSourceInfo *TSInfo,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo,
LParenLoc,
move(Args),
- Commas,
RParenLoc);
}
@@ -1767,6 +1997,24 @@ public:
R, TemplateArgs);
}
+ /// \brief Build a new noexcept expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXNoexceptExpr(SourceRange Range, Expr *Arg) {
+ return SemaRef.BuildCXXNoexceptExpr(Range.getBegin(), Arg, Range.getEnd());
+ }
+
+ /// \brief Build a new expression to compute the length of a parameter pack.
+ ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc, NamedDecl *Pack,
+ SourceLocation PackLoc,
+ SourceLocation RParenLoc,
+ unsigned Length) {
+ return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(),
+ OperatorLoc, Pack, PackLoc,
+ RParenLoc, Length);
+ }
+
/// \brief Build a new Objective-C @encode expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -1781,6 +2029,7 @@ public:
/// \brief Build a new Objective-C class message.
ExprResult RebuildObjCMessageExpr(TypeSourceInfo *ReceiverTypeInfo,
Selector Sel,
+ SourceLocation SelectorLoc,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
MultiExprArg Args,
@@ -1788,13 +2037,14 @@ public:
return SemaRef.BuildClassMessage(ReceiverTypeInfo,
ReceiverTypeInfo->getType(),
/*SuperLoc=*/SourceLocation(),
- Sel, Method, LBracLoc, RBracLoc,
- move(Args));
+ Sel, Method, LBracLoc, SelectorLoc,
+ RBracLoc, move(Args));
}
/// \brief Build a new Objective-C instance message.
ExprResult RebuildObjCMessageExpr(Expr *Receiver,
Selector Sel,
+ SourceLocation SelectorLoc,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
MultiExprArg Args,
@@ -1802,8 +2052,8 @@ public:
return SemaRef.BuildInstanceMessage(Receiver,
Receiver->getType(),
/*SuperLoc=*/SourceLocation(),
- Sel, Method, LBracLoc, RBracLoc,
- move(Args));
+ Sel, Method, LBracLoc, SelectorLoc,
+ RBracLoc, move(Args));
}
/// \brief Build a new Objective-C ivar reference expression.
@@ -1864,24 +2114,20 @@ public:
/*TemplateArgs=*/0);
}
- /// \brief Build a new Objective-C implicit setter/getter reference
- /// expression.
+ /// \brief Build a new Objective-C property reference expression.
///
/// By default, performs semantic analysis to build the new expression.
- /// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildObjCImplicitSetterGetterRefExpr(
- ObjCMethodDecl *Getter,
- QualType T,
- ObjCMethodDecl *Setter,
- SourceLocation NameLoc,
- Expr *Base) {
- // Since these expressions can only be value-dependent, we do not need to
- // perform semantic analysis again.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCPropertyRefExpr(Expr *Base, QualType T,
+ ObjCMethodDecl *Getter,
+ ObjCMethodDecl *Setter,
+ SourceLocation PropertyLoc) {
+ // Since these expressions can only be value-dependent, we do not
+ // need to perform semantic analysis again.
return Owned(
- new (getSema().Context) ObjCImplicitSetterGetterRefExpr(Getter, T,
- Setter,
- NameLoc,
- Base));
+ new (getSema().Context) ObjCPropertyRefExpr(Getter, Setter, T,
+ VK_LValue, OK_ObjCProperty,
+ PropertyLoc, Base));
}
/// \brief Build a new Objective-C "isa" expression.
@@ -1915,8 +2161,8 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildShuffleVectorExpr(SourceLocation BuiltinLoc,
- MultiExprArg SubExprs,
- SourceLocation RParenLoc) {
+ MultiExprArg SubExprs,
+ SourceLocation RParenLoc) {
// Find the declaration for __builtin_shufflevector
const IdentifierInfo &Name
= SemaRef.Context.Idents.get("__builtin_shufflevector");
@@ -1928,7 +2174,7 @@ public:
FunctionDecl *Builtin = cast<FunctionDecl>(*Lookup.first);
Expr *Callee
= new (SemaRef.Context) DeclRefExpr(Builtin, Builtin->getType(),
- BuiltinLoc);
+ VK_LValue, BuiltinLoc);
SemaRef.UsualUnaryConversions(Callee);
// Build the CallExpr
@@ -1937,6 +2183,7 @@ public:
CallExpr *TheCall = new (SemaRef.Context) CallExpr(SemaRef.Context, Callee,
Subs, NumSubExprs,
Builtin->getCallResultType(),
+ Expr::getValueKindForType(Builtin->getResultType()),
RParenLoc);
ExprResult OwnedCall(SemaRef.Owned(TheCall));
@@ -1948,6 +2195,74 @@ public:
OwnedCall.release();
return move(Result);
}
+
+ /// \brief Build a new template argument pack expansion.
+ ///
+ /// By default, performs semantic analysis to build a new pack expansion
+ /// for a template argument. Subclasses may override this routine to provide
+ /// different behavior.
+ TemplateArgumentLoc RebuildPackExpansion(TemplateArgumentLoc Pattern,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ switch (Pattern.getArgument().getKind()) {
+ case TemplateArgument::Expression: {
+ ExprResult Result
+ = getSema().CheckPackExpansion(Pattern.getSourceExpression(),
+ EllipsisLoc, NumExpansions);
+ if (Result.isInvalid())
+ return TemplateArgumentLoc();
+
+ return TemplateArgumentLoc(Result.get(), Result.get());
+ }
+
+ case TemplateArgument::Template:
+ return TemplateArgumentLoc(TemplateArgument(
+ Pattern.getArgument().getAsTemplate(),
+ NumExpansions),
+ Pattern.getTemplateQualifierRange(),
+ Pattern.getTemplateNameLoc(),
+ EllipsisLoc);
+
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Pack:
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("Pack expansion pattern has no parameter packs");
+
+ case TemplateArgument::Type:
+ if (TypeSourceInfo *Expansion
+ = getSema().CheckPackExpansion(Pattern.getTypeSourceInfo(),
+ EllipsisLoc,
+ NumExpansions))
+ return TemplateArgumentLoc(TemplateArgument(Expansion->getType()),
+ Expansion);
+ break;
+ }
+
+ return TemplateArgumentLoc();
+ }
+
+ /// \brief Build a new expression pack expansion.
+ ///
+ /// By default, performs semantic analysis to build a new pack expansion
+ /// for an expression. Subclasses may override this routine to provide
+ /// different behavior.
+ ExprResult RebuildPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ return getSema().CheckPackExpansion(Pattern, EllipsisLoc, NumExpansions);
+ }
+
+private:
+ QualType TransformTypeInObjectScope(QualType T,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope,
+ NestedNameSpecifier *Prefix);
+
+ TypeSourceInfo *TransformTypeInObjectScope(TypeSourceInfo *T,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope,
+ NestedNameSpecifier *Prefix);
};
template<typename Derived>
@@ -1961,6 +2276,7 @@ StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S) {
// Transform individual statement nodes
#define STMT(Node, Parent) \
case Stmt::Node##Class: return getDerived().Transform##Node(cast<Node>(S));
+#define ABSTRACT_STMT(Node)
#define EXPR(Node, Parent)
#include "clang/AST/StmtNodes.inc"
@@ -1978,7 +2294,7 @@ StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S) {
}
}
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
}
@@ -1996,7 +2312,101 @@ ExprResult TreeTransform<Derived>::TransformExpr(Expr *E) {
#include "clang/AST/StmtNodes.inc"
}
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
+ unsigned NumInputs,
+ bool IsCall,
+ llvm::SmallVectorImpl<Expr *> &Outputs,
+ bool *ArgChanged) {
+ for (unsigned I = 0; I != NumInputs; ++I) {
+ // If requested, drop call arguments that need to be dropped.
+ if (IsCall && getDerived().DropCallArgument(Inputs[I])) {
+ if (ArgChanged)
+ *ArgChanged = true;
+
+ break;
+ }
+
+ if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(Inputs[I])) {
+ Expr *Pattern = Expansion->getPattern();
+
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions
+ = Expansion->getNumExpansions();
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(Expansion->getEllipsisLoc(),
+ Pattern->getSourceRange(),
+ Unexpanded.data(),
+ Unexpanded.size(),
+ Expand, RetainExpansion,
+ NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ ExprResult OutPattern = getDerived().TransformExpr(Pattern);
+ if (OutPattern.isInvalid())
+ return true;
+
+ ExprResult Out = getDerived().RebuildPackExpansion(OutPattern.get(),
+ Expansion->getEllipsisLoc(),
+ NumExpansions);
+ if (Out.isInvalid())
+ return true;
+
+ if (ArgChanged)
+ *ArgChanged = true;
+ Outputs.push_back(Out.get());
+ continue;
+ }
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ if (Out.get()->containsUnexpandedParameterPack()) {
+ Out = RebuildPackExpansion(Out.get(), Expansion->getEllipsisLoc(),
+ OrigNumExpansions);
+ if (Out.isInvalid())
+ return true;
+ }
+
+ if (ArgChanged)
+ *ArgChanged = true;
+ Outputs.push_back(Out.get());
+ }
+
+ continue;
+ }
+
+ ExprResult Result = getDerived().TransformExpr(Inputs[I]);
+ if (Result.isInvalid())
+ return true;
+
+ if (Result.get() != Inputs[I] && ArgChanged)
+ *ArgChanged = true;
+
+ Outputs.push_back(Result.get());
+ }
+
+ return false;
}
template<typename Derived>
@@ -2005,26 +2415,26 @@ TreeTransform<Derived>::TransformNestedNameSpecifier(NestedNameSpecifier *NNS,
SourceRange Range,
QualType ObjectType,
NamedDecl *FirstQualifierInScope) {
- if (!NNS)
- return 0;
+ NestedNameSpecifier *Prefix = NNS->getPrefix();
// Transform the prefix of this nested name specifier.
- NestedNameSpecifier *Prefix = NNS->getPrefix();
if (Prefix) {
Prefix = getDerived().TransformNestedNameSpecifier(Prefix, Range,
ObjectType,
FirstQualifierInScope);
if (!Prefix)
return 0;
-
- // Clear out the object type and the first qualifier in scope; they only
- // apply to the first element in the nested-name-specifier.
- ObjectType = QualType();
- FirstQualifierInScope = 0;
}
switch (NNS->getKind()) {
case NestedNameSpecifier::Identifier:
+ if (Prefix) {
+ // The object type and qualifier-in-scope really apply to the
+ // leftmost entity.
+ ObjectType = QualType();
+ FirstQualifierInScope = 0;
+ }
+
assert((Prefix || !ObjectType.isNull()) &&
"Identifier nested-name-specifier with no prefix or object type");
if (!getDerived().AlwaysRebuild() && Prefix == NNS->getPrefix() &&
@@ -2057,8 +2467,10 @@ TreeTransform<Derived>::TransformNestedNameSpecifier(NestedNameSpecifier *NNS,
case NestedNameSpecifier::TypeSpecWithTemplate:
case NestedNameSpecifier::TypeSpec: {
TemporaryBase Rebase(*this, Range.getBegin(), DeclarationName());
- QualType T = getDerived().TransformType(QualType(NNS->getAsType(), 0),
- ObjectType);
+ QualType T = TransformTypeInObjectScope(QualType(NNS->getAsType(), 0),
+ ObjectType,
+ FirstQualifierInScope,
+ Prefix);
if (T.isNull())
return 0;
@@ -2080,8 +2492,7 @@ TreeTransform<Derived>::TransformNestedNameSpecifier(NestedNameSpecifier *NNS,
template<typename Derived>
DeclarationNameInfo
TreeTransform<Derived>
-::TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
- QualType ObjectType) {
+::TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo) {
DeclarationName Name = NameInfo.getName();
if (!Name)
return DeclarationNameInfo();
@@ -2102,16 +2513,15 @@ TreeTransform<Derived>
TypeSourceInfo *NewTInfo;
CanQualType NewCanTy;
if (TypeSourceInfo *OldTInfo = NameInfo.getNamedTypeInfo()) {
- NewTInfo = getDerived().TransformType(OldTInfo, ObjectType);
- if (!NewTInfo)
- return DeclarationNameInfo();
- NewCanTy = SemaRef.Context.getCanonicalType(NewTInfo->getType());
+ NewTInfo = getDerived().TransformType(OldTInfo);
+ if (!NewTInfo)
+ return DeclarationNameInfo();
+ NewCanTy = SemaRef.Context.getCanonicalType(NewTInfo->getType());
}
else {
NewTInfo = 0;
TemporaryBase Rebase(*this, NameInfo.getLoc(), Name);
- QualType NewT = getDerived().TransformType(Name.getCXXNameType(),
- ObjectType);
+ QualType NewT = getDerived().TransformType(Name.getCXXNameType());
if (NewT.isNull())
return DeclarationNameInfo();
NewCanTy = SemaRef.Context.getCanonicalType(NewT);
@@ -2134,14 +2544,16 @@ TreeTransform<Derived>
template<typename Derived>
TemplateName
TreeTransform<Derived>::TransformTemplateName(TemplateName Name,
- QualType ObjectType) {
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
SourceLocation Loc = getDerived().getBaseLocation();
if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) {
NestedNameSpecifier *NNS
= getDerived().TransformNestedNameSpecifier(QTN->getQualifier(),
- /*FIXME:*/SourceRange(getDerived().getBaseLocation()),
- ObjectType);
+ /*FIXME*/ SourceRange(Loc),
+ ObjectType,
+ FirstQualifierInScope);
if (!NNS)
return TemplateName();
@@ -2161,25 +2573,36 @@ TreeTransform<Derived>::TransformTemplateName(TemplateName Name,
}
// These should be getting filtered out before they make it into the AST.
- assert(false && "overloaded template name survived to here");
+ llvm_unreachable("overloaded template name survived to here");
}
if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) {
- NestedNameSpecifier *NNS
- = getDerived().TransformNestedNameSpecifier(DTN->getQualifier(),
- /*FIXME:*/SourceRange(getDerived().getBaseLocation()),
- ObjectType);
- if (!NNS && DTN->getQualifier())
- return TemplateName();
+ NestedNameSpecifier *NNS = DTN->getQualifier();
+ if (NNS) {
+ NNS = getDerived().TransformNestedNameSpecifier(NNS,
+ /*FIXME:*/SourceRange(Loc),
+ ObjectType,
+ FirstQualifierInScope);
+ if (!NNS) return TemplateName();
+
+ // These apply to the scope specifier, not the template.
+ ObjectType = QualType();
+ FirstQualifierInScope = 0;
+ }
if (!getDerived().AlwaysRebuild() &&
NNS == DTN->getQualifier() &&
ObjectType.isNull())
return Name;
- if (DTN->isIdentifier())
- return getDerived().RebuildTemplateName(NNS, *DTN->getIdentifier(),
- ObjectType);
+ if (DTN->isIdentifier()) {
+ // FIXME: Bad range
+ SourceRange QualifierRange(getDerived().getBaseLocation());
+ return getDerived().RebuildTemplateName(NNS, QualifierRange,
+ *DTN->getIdentifier(),
+ ObjectType,
+ FirstQualifierInScope);
+ }
return getDerived().RebuildTemplateName(NNS, DTN->getOperator(),
ObjectType);
@@ -2198,8 +2621,24 @@ TreeTransform<Derived>::TransformTemplateName(TemplateName Name,
return TemplateName(TransTemplate);
}
+ if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack()) {
+ TemplateTemplateParmDecl *TransParam
+ = cast_or_null<TemplateTemplateParmDecl>(
+ getDerived().TransformDecl(Loc, SubstPack->getParameterPack()));
+ if (!TransParam)
+ return TemplateName();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TransParam == SubstPack->getParameterPack())
+ return Name;
+
+ return getDerived().RebuildTemplateName(TransParam,
+ SubstPack->getArgumentPack());
+ }
+
// These should be getting filtered out before they reach the AST.
- assert(false && "overloaded function decl survived to here");
+ llvm_unreachable("overloaded function decl survived to here");
return TemplateName();
}
@@ -2222,7 +2661,11 @@ void TreeTransform<Derived>::InventTemplateArgumentLoc(
case TemplateArgument::Template:
Output = TemplateArgumentLoc(Arg, SourceRange(), Loc);
break;
-
+
+ case TemplateArgument::TemplateExpansion:
+ Output = TemplateArgumentLoc(Arg, SourceRange(), Loc, Loc);
+ break;
+
case TemplateArgument::Expression:
Output = TemplateArgumentLoc(Arg, Arg.getAsExpr());
break;
@@ -2291,7 +2734,10 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
Input.getTemplateNameLoc());
return false;
}
-
+
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("Caller should expand pack expansions");
+
case TemplateArgument::Expression: {
// Template argument expressions are not potentially evaluated.
EnterExpressionEvaluationContext Unevaluated(getSema(),
@@ -2325,10 +2771,14 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
TransformedArgs.push_back(OutputArg.getArgument());
}
- TemplateArgument Result;
- Result.setArgumentPack(TransformedArgs.data(), TransformedArgs.size(),
- true);
- Output = TemplateArgumentLoc(Result, Input.getLocInfo());
+
+ TemplateArgument *TransformedArgsPtr
+ = new (getSema().Context) TemplateArgument[TransformedArgs.size()];
+ std::copy(TransformedArgs.begin(), TransformedArgs.end(),
+ TransformedArgsPtr);
+ Output = TemplateArgumentLoc(TemplateArgument(TransformedArgsPtr,
+ TransformedArgs.size()),
+ Input.getLocInfo());
return false;
}
}
@@ -2337,22 +2787,201 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
return true;
}
+/// \brief Iterator adaptor that invents template argument location information
+/// for each of the template arguments in its underlying iterator.
+template<typename Derived, typename InputIterator>
+class TemplateArgumentLocInventIterator {
+ TreeTransform<Derived> &Self;
+ InputIterator Iter;
+
+public:
+ typedef TemplateArgumentLoc value_type;
+ typedef TemplateArgumentLoc reference;
+ typedef typename std::iterator_traits<InputIterator>::difference_type
+ difference_type;
+ typedef std::input_iterator_tag iterator_category;
+
+ class pointer {
+ TemplateArgumentLoc Arg;
+
+ public:
+ explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { }
+
+ const TemplateArgumentLoc *operator->() const { return &Arg; }
+ };
+
+ TemplateArgumentLocInventIterator() { }
+
+ explicit TemplateArgumentLocInventIterator(TreeTransform<Derived> &Self,
+ InputIterator Iter)
+ : Self(Self), Iter(Iter) { }
+
+ TemplateArgumentLocInventIterator &operator++() {
+ ++Iter;
+ return *this;
+ }
+
+ TemplateArgumentLocInventIterator operator++(int) {
+ TemplateArgumentLocInventIterator Old(*this);
+ ++(*this);
+ return Old;
+ }
+
+ reference operator*() const {
+ TemplateArgumentLoc Result;
+ Self.InventTemplateArgumentLoc(*Iter, Result);
+ return Result;
+ }
+
+ pointer operator->() const { return pointer(**this); }
+
+ friend bool operator==(const TemplateArgumentLocInventIterator &X,
+ const TemplateArgumentLocInventIterator &Y) {
+ return X.Iter == Y.Iter;
+ }
+
+ friend bool operator!=(const TemplateArgumentLocInventIterator &X,
+ const TemplateArgumentLocInventIterator &Y) {
+ return X.Iter != Y.Iter;
+ }
+};
+
+template<typename Derived>
+template<typename InputIterator>
+bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
+ InputIterator Last,
+ TemplateArgumentListInfo &Outputs) {
+ for (; First != Last; ++First) {
+ TemplateArgumentLoc Out;
+ TemplateArgumentLoc In = *First;
+
+ if (In.getArgument().getKind() == TemplateArgument::Pack) {
+ // Unpack argument packs, which we translate them into separate
+ // arguments.
+ // FIXME: We could do much better if we could guarantee that the
+ // TemplateArgumentLocInfo for the pack expansion would be usable for
+ // all of the template arguments in the argument pack.
+ typedef TemplateArgumentLocInventIterator<Derived,
+ TemplateArgument::pack_iterator>
+ PackLocIterator;
+ if (TransformTemplateArguments(PackLocIterator(*this,
+ In.getArgument().pack_begin()),
+ PackLocIterator(*this,
+ In.getArgument().pack_end()),
+ Outputs))
+ return true;
+
+ continue;
+ }
+
+ if (In.getArgument().isPackExpansion()) {
+ // We have a pack expansion, for which we will be substituting into
+ // the pattern.
+ SourceLocation Ellipsis;
+ llvm::Optional<unsigned> OrigNumExpansions;
+ TemplateArgumentLoc Pattern
+ = In.getPackExpansionPattern(Ellipsis, OrigNumExpansions,
+ getSema().Context);
+
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(Ellipsis,
+ Pattern.getSourceRange(),
+ Unexpanded.data(),
+ Unexpanded.size(),
+ Expand,
+ RetainExpansion,
+ NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ TemplateArgumentLoc OutPattern;
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ if (getDerived().TransformTemplateArgument(Pattern, OutPattern))
+ return true;
+
+ Out = getDerived().RebuildPackExpansion(OutPattern, Ellipsis,
+ NumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+
+ Outputs.addArgument(Out);
+ continue;
+ }
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+
+ if (getDerived().TransformTemplateArgument(Pattern, Out))
+ return true;
+
+ if (Out.getArgument().containsUnexpandedParameterPack()) {
+ Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
+ OrigNumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+ }
+
+ Outputs.addArgument(Out);
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ if (getDerived().TransformTemplateArgument(Pattern, Out))
+ return true;
+
+ Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
+ OrigNumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+
+ Outputs.addArgument(Out);
+ }
+
+ continue;
+ }
+
+ // The simple case:
+ if (getDerived().TransformTemplateArgument(In, Out))
+ return true;
+
+ Outputs.addArgument(Out);
+ }
+
+ return false;
+
+}
+
//===----------------------------------------------------------------------===//
// Type transformation
//===----------------------------------------------------------------------===//
template<typename Derived>
-QualType TreeTransform<Derived>::TransformType(QualType T,
- QualType ObjectType) {
+QualType TreeTransform<Derived>::TransformType(QualType T) {
if (getDerived().AlreadyTransformed(T))
return T;
// Temporary workaround. All of these transformations should
// eventually turn into transformations on TypeLocs.
- TypeSourceInfo *DI = getSema().Context.CreateTypeSourceInfo(T);
- DI->getTypeLoc().initialize(getDerived().getBaseLocation());
+ TypeSourceInfo *DI = getSema().Context.getTrivialTypeSourceInfo(T,
+ getDerived().getBaseLocation());
- TypeSourceInfo *NewDI = getDerived().TransformType(DI, ObjectType);
+ TypeSourceInfo *NewDI = getDerived().TransformType(DI);
if (!NewDI)
return QualType();
@@ -2361,8 +2990,7 @@ QualType TreeTransform<Derived>::TransformType(QualType T,
}
template<typename Derived>
-TypeSourceInfo *TreeTransform<Derived>::TransformType(TypeSourceInfo *DI,
- QualType ObjectType) {
+TypeSourceInfo *TreeTransform<Derived>::TransformType(TypeSourceInfo *DI) {
if (getDerived().AlreadyTransformed(DI->getType()))
return DI;
@@ -2371,7 +2999,7 @@ TypeSourceInfo *TreeTransform<Derived>::TransformType(TypeSourceInfo *DI,
TypeLoc TL = DI->getTypeLoc();
TLB.reserve(TL.getFullDataSize());
- QualType Result = getDerived().TransformType(TLB, TL, ObjectType);
+ QualType Result = getDerived().TransformType(TLB, TL);
if (Result.isNull())
return 0;
@@ -2380,14 +3008,12 @@ TypeSourceInfo *TreeTransform<Derived>::TransformType(TypeSourceInfo *DI,
template<typename Derived>
QualType
-TreeTransform<Derived>::TransformType(TypeLocBuilder &TLB, TypeLoc T,
- QualType ObjectType) {
+TreeTransform<Derived>::TransformType(TypeLocBuilder &TLB, TypeLoc T) {
switch (T.getTypeLocClass()) {
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
case TypeLoc::CLASS: \
- return getDerived().Transform##CLASS##Type(TLB, cast<CLASS##TypeLoc>(T), \
- ObjectType);
+ return getDerived().Transform##CLASS##Type(TLB, cast<CLASS##TypeLoc>(T));
#include "clang/AST/TypeLocNodes.def"
}
@@ -2403,12 +3029,10 @@ TreeTransform<Derived>::TransformType(TypeLocBuilder &TLB, TypeLoc T,
template<typename Derived>
QualType
TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
- QualifiedTypeLoc T,
- QualType ObjectType) {
+ QualifiedTypeLoc T) {
Qualifiers Quals = T.getType().getLocalQualifiers();
- QualType Result = getDerived().TransformType(TLB, T.getUnqualifiedLoc(),
- ObjectType);
+ QualType Result = getDerived().TransformType(TLB, T.getUnqualifiedLoc());
if (Result.isNull())
return QualType();
@@ -2427,6 +3051,77 @@ TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
return Result;
}
+/// \brief Transforms a type that was written in a scope specifier,
+/// given an object type, the results of unqualified lookup, and
+/// an already-instantiated prefix.
+///
+/// The object type is provided iff the scope specifier qualifies the
+/// member of a dependent member-access expression. The prefix is
+/// provided iff the the scope specifier in which this appears has a
+/// prefix.
+///
+/// This is private to TreeTransform.
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformTypeInObjectScope(QualType T,
+ QualType ObjectType,
+ NamedDecl *UnqualLookup,
+ NestedNameSpecifier *Prefix) {
+ if (getDerived().AlreadyTransformed(T))
+ return T;
+
+ TypeSourceInfo *TSI =
+ SemaRef.Context.getTrivialTypeSourceInfo(T, getDerived().getBaseLocation());
+
+ TSI = getDerived().TransformTypeInObjectScope(TSI, ObjectType,
+ UnqualLookup, Prefix);
+ if (!TSI) return QualType();
+ return TSI->getType();
+}
+
+template<typename Derived>
+TypeSourceInfo *
+TreeTransform<Derived>::TransformTypeInObjectScope(TypeSourceInfo *TSI,
+ QualType ObjectType,
+ NamedDecl *UnqualLookup,
+ NestedNameSpecifier *Prefix) {
+ // TODO: in some cases, we might be some verification to do here.
+ if (ObjectType.isNull())
+ return getDerived().TransformType(TSI);
+
+ QualType T = TSI->getType();
+ if (getDerived().AlreadyTransformed(T))
+ return TSI;
+
+ TypeLocBuilder TLB;
+ QualType Result;
+
+ if (isa<TemplateSpecializationType>(T)) {
+ TemplateSpecializationTypeLoc TL
+ = cast<TemplateSpecializationTypeLoc>(TSI->getTypeLoc());
+
+ TemplateName Template =
+ getDerived().TransformTemplateName(TL.getTypePtr()->getTemplateName(),
+ ObjectType, UnqualLookup);
+ if (Template.isNull()) return 0;
+
+ Result = getDerived()
+ .TransformTemplateSpecializationType(TLB, TL, Template);
+ } else if (isa<DependentTemplateSpecializationType>(T)) {
+ DependentTemplateSpecializationTypeLoc TL
+ = cast<DependentTemplateSpecializationTypeLoc>(TSI->getTypeLoc());
+
+ Result = getDerived()
+ .TransformDependentTemplateSpecializationType(TLB, TL, Prefix);
+ } else {
+ // Nothing special needs to be done for these.
+ Result = getDerived().TransformType(TLB, TSI->getTypeLoc());
+ }
+
+ if (Result.isNull()) return 0;
+ return TLB.getTypeSourceInfo(SemaRef.Context, Result);
+}
+
template <class TyLoc> static inline
QualType TransformTypeSpecType(TypeLocBuilder &TLB, TyLoc T) {
TyLoc NewT = TLB.push<TyLoc>(T.getType());
@@ -2436,8 +3131,7 @@ QualType TransformTypeSpecType(TypeLocBuilder &TLB, TyLoc T) {
template<typename Derived>
QualType TreeTransform<Derived>::TransformBuiltinType(TypeLocBuilder &TLB,
- BuiltinTypeLoc T,
- QualType ObjectType) {
+ BuiltinTypeLoc T) {
BuiltinTypeLoc NewT = TLB.push<BuiltinTypeLoc>(T.getType());
NewT.setBuiltinLoc(T.getBuiltinLoc());
if (T.needsExtraLocalData())
@@ -2447,16 +3141,14 @@ QualType TreeTransform<Derived>::TransformBuiltinType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformComplexType(TypeLocBuilder &TLB,
- ComplexTypeLoc T,
- QualType ObjectType) {
+ ComplexTypeLoc T) {
// FIXME: recurse?
return TransformTypeSpecType(TLB, T);
}
template<typename Derived>
QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
- PointerTypeLoc TL,
- QualType ObjectType) {
+ PointerTypeLoc TL) {
QualType PointeeType
= getDerived().TransformType(TLB, TL.getPointeeLoc());
if (PointeeType.isNull())
@@ -2474,7 +3166,7 @@ QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
NewT.setStarLoc(TL.getStarLoc());
return Result;
}
-
+
if (getDerived().AlwaysRebuild() ||
PointeeType != TL.getPointeeLoc().getType()) {
Result = getDerived().RebuildPointerType(PointeeType, TL.getSigilLoc());
@@ -2490,8 +3182,7 @@ QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
template<typename Derived>
QualType
TreeTransform<Derived>::TransformBlockPointerType(TypeLocBuilder &TLB,
- BlockPointerTypeLoc TL,
- QualType ObjectType) {
+ BlockPointerTypeLoc TL) {
QualType PointeeType
= getDerived().TransformType(TLB, TL.getPointeeLoc());
if (PointeeType.isNull())
@@ -2518,8 +3209,7 @@ TreeTransform<Derived>::TransformBlockPointerType(TypeLocBuilder &TLB,
template<typename Derived>
QualType
TreeTransform<Derived>::TransformReferenceType(TypeLocBuilder &TLB,
- ReferenceTypeLoc TL,
- QualType ObjectType) {
+ ReferenceTypeLoc TL) {
const ReferenceType *T = TL.getTypePtr();
// Note that this works with the pointee-as-written.
@@ -2551,25 +3241,22 @@ TreeTransform<Derived>::TransformReferenceType(TypeLocBuilder &TLB,
template<typename Derived>
QualType
TreeTransform<Derived>::TransformLValueReferenceType(TypeLocBuilder &TLB,
- LValueReferenceTypeLoc TL,
- QualType ObjectType) {
- return TransformReferenceType(TLB, TL, ObjectType);
+ LValueReferenceTypeLoc TL) {
+ return TransformReferenceType(TLB, TL);
}
template<typename Derived>
QualType
TreeTransform<Derived>::TransformRValueReferenceType(TypeLocBuilder &TLB,
- RValueReferenceTypeLoc TL,
- QualType ObjectType) {
- return TransformReferenceType(TLB, TL, ObjectType);
+ RValueReferenceTypeLoc TL) {
+ return TransformReferenceType(TLB, TL);
}
template<typename Derived>
QualType
TreeTransform<Derived>::TransformMemberPointerType(TypeLocBuilder &TLB,
- MemberPointerTypeLoc TL,
- QualType ObjectType) {
- MemberPointerType *T = TL.getTypePtr();
+ MemberPointerTypeLoc TL) {
+ const MemberPointerType *T = TL.getTypePtr();
QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc());
if (PointeeType.isNull())
@@ -2600,9 +3287,8 @@ TreeTransform<Derived>::TransformMemberPointerType(TypeLocBuilder &TLB,
template<typename Derived>
QualType
TreeTransform<Derived>::TransformConstantArrayType(TypeLocBuilder &TLB,
- ConstantArrayTypeLoc TL,
- QualType ObjectType) {
- ConstantArrayType *T = TL.getTypePtr();
+ ConstantArrayTypeLoc TL) {
+ const ConstantArrayType *T = TL.getTypePtr();
QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
if (ElementType.isNull())
return QualType();
@@ -2636,9 +3322,8 @@ TreeTransform<Derived>::TransformConstantArrayType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformIncompleteArrayType(
TypeLocBuilder &TLB,
- IncompleteArrayTypeLoc TL,
- QualType ObjectType) {
- IncompleteArrayType *T = TL.getTypePtr();
+ IncompleteArrayTypeLoc TL) {
+ const IncompleteArrayType *T = TL.getTypePtr();
QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
if (ElementType.isNull())
return QualType();
@@ -2665,9 +3350,8 @@ QualType TreeTransform<Derived>::TransformIncompleteArrayType(
template<typename Derived>
QualType
TreeTransform<Derived>::TransformVariableArrayType(TypeLocBuilder &TLB,
- VariableArrayTypeLoc TL,
- QualType ObjectType) {
- VariableArrayType *T = TL.getTypePtr();
+ VariableArrayTypeLoc TL) {
+ const VariableArrayType *T = TL.getTypePtr();
QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
if (ElementType.isNull())
return QualType();
@@ -2706,9 +3390,8 @@ TreeTransform<Derived>::TransformVariableArrayType(TypeLocBuilder &TLB,
template<typename Derived>
QualType
TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB,
- DependentSizedArrayTypeLoc TL,
- QualType ObjectType) {
- DependentSizedArrayType *T = TL.getTypePtr();
+ DependentSizedArrayTypeLoc TL) {
+ const DependentSizedArrayType *T = TL.getTypePtr();
QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
if (ElementType.isNull())
return QualType();
@@ -2716,33 +3399,36 @@ TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB,
// Array bounds are not potentially evaluated contexts
EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
- ExprResult SizeResult
- = getDerived().TransformExpr(T->getSizeExpr());
- if (SizeResult.isInvalid())
+ // Prefer the expression from the TypeLoc; the other may have been uniqued.
+ Expr *origSize = TL.getSizeExpr();
+ if (!origSize) origSize = T->getSizeExpr();
+
+ ExprResult sizeResult
+ = getDerived().TransformExpr(origSize);
+ if (sizeResult.isInvalid())
return QualType();
- Expr *Size = static_cast<Expr*>(SizeResult.get());
+ Expr *size = sizeResult.get();
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
ElementType != T->getElementType() ||
- Size != T->getSizeExpr()) {
+ size != origSize) {
Result = getDerived().RebuildDependentSizedArrayType(ElementType,
T->getSizeModifier(),
- Size,
+ size,
T->getIndexTypeCVRQualifiers(),
TL.getBracketsRange());
if (Result.isNull())
return QualType();
}
- else SizeResult.take();
// We might have any sort of array type now, but fortunately they
// all have the same location layout.
ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result);
NewTL.setLBracketLoc(TL.getLBracketLoc());
NewTL.setRBracketLoc(TL.getRBracketLoc());
- NewTL.setSizeExpr(Size);
+ NewTL.setSizeExpr(size);
return Result;
}
@@ -2750,9 +3436,8 @@ TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType(
TypeLocBuilder &TLB,
- DependentSizedExtVectorTypeLoc TL,
- QualType ObjectType) {
- DependentSizedExtVectorType *T = TL.getTypePtr();
+ DependentSizedExtVectorTypeLoc TL) {
+ const DependentSizedExtVectorType *T = TL.getTypePtr();
// FIXME: ext vector locs should be nested
QualType ElementType = getDerived().TransformType(T->getElementType());
@@ -2792,9 +3477,8 @@ QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType(
template<typename Derived>
QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB,
- VectorTypeLoc TL,
- QualType ObjectType) {
- VectorType *T = TL.getTypePtr();
+ VectorTypeLoc TL) {
+ const VectorType *T = TL.getTypePtr();
QualType ElementType = getDerived().TransformType(T->getElementType());
if (ElementType.isNull())
return QualType();
@@ -2803,7 +3487,7 @@ QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB,
if (getDerived().AlwaysRebuild() ||
ElementType != T->getElementType()) {
Result = getDerived().RebuildVectorType(ElementType, T->getNumElements(),
- T->getAltiVecSpecific());
+ T->getVectorKind());
if (Result.isNull())
return QualType();
}
@@ -2816,9 +3500,8 @@ QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformExtVectorType(TypeLocBuilder &TLB,
- ExtVectorTypeLoc TL,
- QualType ObjectType) {
- VectorType *T = TL.getTypePtr();
+ ExtVectorTypeLoc TL) {
+ const VectorType *T = TL.getTypePtr();
QualType ElementType = getDerived().TransformType(T->getElementType());
if (ElementType.isNull())
return QualType();
@@ -2841,9 +3524,38 @@ QualType TreeTransform<Derived>::TransformExtVectorType(TypeLocBuilder &TLB,
template<typename Derived>
ParmVarDecl *
-TreeTransform<Derived>::TransformFunctionTypeParam(ParmVarDecl *OldParm) {
+TreeTransform<Derived>::TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ llvm::Optional<unsigned> NumExpansions) {
TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
- TypeSourceInfo *NewDI = getDerived().TransformType(OldDI);
+ TypeSourceInfo *NewDI = 0;
+
+ if (NumExpansions && isa<PackExpansionType>(OldDI->getType())) {
+ // If we're substituting into a pack expansion type and we know the
+ TypeLoc OldTL = OldDI->getTypeLoc();
+ PackExpansionTypeLoc OldExpansionTL = cast<PackExpansionTypeLoc>(OldTL);
+
+ TypeLocBuilder TLB;
+ TypeLoc NewTL = OldDI->getTypeLoc();
+ TLB.reserve(NewTL.getFullDataSize());
+
+ QualType Result = getDerived().TransformType(TLB,
+ OldExpansionTL.getPatternLoc());
+ if (Result.isNull())
+ return 0;
+
+ Result = RebuildPackExpansionType(Result,
+ OldExpansionTL.getPatternLoc().getSourceRange(),
+ OldExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (Result.isNull())
+ return 0;
+
+ PackExpansionTypeLoc NewExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(Result);
+ NewExpansionTL.setEllipsisLoc(OldExpansionTL.getEllipsisLoc());
+ NewDI = TLB.getTypeSourceInfo(SemaRef.Context, Result);
+ } else
+ NewDI = getDerived().TransformType(OldDI);
if (!NewDI)
return 0;
@@ -2863,75 +3575,226 @@ TreeTransform<Derived>::TransformFunctionTypeParam(ParmVarDecl *OldParm) {
template<typename Derived>
bool TreeTransform<Derived>::
- TransformFunctionTypeParams(FunctionProtoTypeLoc TL,
- llvm::SmallVectorImpl<QualType> &PTypes,
- llvm::SmallVectorImpl<ParmVarDecl*> &PVars) {
- FunctionProtoType *T = TL.getTypePtr();
-
- for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) {
- ParmVarDecl *OldParm = TL.getArg(i);
-
- QualType NewType;
- ParmVarDecl *NewParm;
-
- if (OldParm) {
- NewParm = getDerived().TransformFunctionTypeParam(OldParm);
+ TransformFunctionTypeParams(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const QualType *ParamTypes,
+ llvm::SmallVectorImpl<QualType> &OutParamTypes,
+ llvm::SmallVectorImpl<ParmVarDecl*> *PVars) {
+ for (unsigned i = 0; i != NumParams; ++i) {
+ if (ParmVarDecl *OldParm = Params[i]) {
+ llvm::Optional<unsigned> NumExpansions;
+ if (OldParm->isParameterPack()) {
+ // We have a function parameter pack that may need to be expanded.
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+
+ // Find the parameter packs that could be expanded.
+ TypeLoc TL = OldParm->getTypeSourceInfo()->getTypeLoc();
+ PackExpansionTypeLoc ExpansionTL = cast<PackExpansionTypeLoc>(TL);
+ TypeLoc Pattern = ExpansionTL.getPatternLoc();
+ SemaRef.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+
+ // Determine whether we should expand the parameter packs.
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions
+ = ExpansionTL.getTypePtr()->getNumExpansions();
+ NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
+ Pattern.getSourceRange(),
+ Unexpanded.data(),
+ Unexpanded.size(),
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ return true;
+ }
+
+ if (ShouldExpand) {
+ // Expand the function parameter pack into multiple, separate
+ // parameters.
+ getDerived().ExpandingFunctionParameterPack(OldParm);
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ParmVarDecl *NewParm
+ = getDerived().TransformFunctionTypeParam(OldParm,
+ OrigNumExpansions);
+ if (!NewParm)
+ return true;
+
+ OutParamTypes.push_back(NewParm->getType());
+ if (PVars)
+ PVars->push_back(NewParm);
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+ ParmVarDecl *NewParm
+ = getDerived().TransformFunctionTypeParam(OldParm,
+ OrigNumExpansions);
+ if (!NewParm)
+ return true;
+
+ OutParamTypes.push_back(NewParm->getType());
+ if (PVars)
+ PVars->push_back(NewParm);
+ }
+
+ // We're done with the pack expansion.
+ continue;
+ }
+
+ // We'll substitute the parameter now without expanding the pack
+ // expansion.
+ }
+
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ ParmVarDecl *NewParm = getDerived().TransformFunctionTypeParam(OldParm,
+ NumExpansions);
if (!NewParm)
return true;
- NewType = NewParm->getType();
+
+ OutParamTypes.push_back(NewParm->getType());
+ if (PVars)
+ PVars->push_back(NewParm);
+ continue;
+ }
// Deal with the possibility that we don't have a parameter
// declaration for this parameter.
- } else {
- NewParm = 0;
-
- QualType OldType = T->getArgType(i);
- NewType = getDerived().TransformType(OldType);
- if (NewType.isNull())
+ QualType OldType = ParamTypes[i];
+ bool IsPackExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (const PackExpansionType *Expansion
+ = dyn_cast<PackExpansionType>(OldType)) {
+ // We have a function parameter pack that may need to be expanded.
+ QualType Pattern = Expansion->getPattern();
+ llvm::SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+
+ // Determine whether we should expand the parameter packs.
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ if (getDerived().TryExpandParameterPacks(Loc, SourceRange(),
+ Unexpanded.data(),
+ Unexpanded.size(),
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
return true;
+ }
+
+ if (ShouldExpand) {
+ // Expand the function parameter pack into multiple, separate
+ // parameters.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ QualType NewType = getDerived().TransformType(Pattern);
+ if (NewType.isNull())
+ return true;
+
+ OutParamTypes.push_back(NewType);
+ if (PVars)
+ PVars->push_back(0);
+ }
+
+ // We're done with the pack expansion.
+ continue;
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+ QualType NewType = getDerived().TransformType(Pattern);
+ if (NewType.isNull())
+ return true;
+
+ OutParamTypes.push_back(NewType);
+ if (PVars)
+ PVars->push_back(0);
+ }
+
+ // We'll substitute the parameter now without expanding the pack
+ // expansion.
+ OldType = Expansion->getPattern();
+ IsPackExpansion = true;
}
+
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ QualType NewType = getDerived().TransformType(OldType);
+ if (NewType.isNull())
+ return true;
- PTypes.push_back(NewType);
- PVars.push_back(NewParm);
+ if (IsPackExpansion)
+ NewType = getSema().Context.getPackExpansionType(NewType,
+ NumExpansions);
+
+ OutParamTypes.push_back(NewType);
+ if (PVars)
+ PVars->push_back(0);
}
return false;
-}
+ }
template<typename Derived>
QualType
TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
- FunctionProtoTypeLoc TL,
- QualType ObjectType) {
+ FunctionProtoTypeLoc TL) {
// Transform the parameters and return type.
//
// We instantiate in source order, with the return type first followed by
// the parameters, because users tend to expect this (even if they shouldn't
// rely on it!).
//
- // FIXME: When we implement late-specified return types, we'll need to
- // instantiate the return tpe *after* the parameter types in that case,
- // since the return type can then refer to the parameters themselves (via
- // decltype, sizeof, etc.).
+ // When the function has a trailing return type, we instantiate the
+ // parameters before the return type, since the return type can then refer
+ // to the parameters themselves (via decltype, sizeof, etc.).
+ //
llvm::SmallVector<QualType, 4> ParamTypes;
llvm::SmallVector<ParmVarDecl*, 4> ParamDecls;
- FunctionProtoType *T = TL.getTypePtr();
- QualType ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
- if (ResultType.isNull())
- return QualType();
+ const FunctionProtoType *T = TL.getTypePtr();
+
+ QualType ResultType;
+
+ if (TL.getTrailingReturn()) {
+ if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
+ TL.getParmArray(),
+ TL.getNumArgs(),
+ TL.getTypePtr()->arg_type_begin(),
+ ParamTypes, &ParamDecls))
+ return QualType();
+
+ ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
+ if (ResultType.isNull())
+ return QualType();
+ }
+ else {
+ ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
+ if (ResultType.isNull())
+ return QualType();
+
+ if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
+ TL.getParmArray(),
+ TL.getNumArgs(),
+ TL.getTypePtr()->arg_type_begin(),
+ ParamTypes, &ParamDecls))
+ return QualType();
+ }
- if (getDerived().TransformFunctionTypeParams(TL, ParamTypes, ParamDecls))
- return QualType();
-
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
ResultType != T->getResultType() ||
+ T->getNumArgs() != ParamTypes.size() ||
!std::equal(T->arg_type_begin(), T->arg_type_end(), ParamTypes.begin())) {
Result = getDerived().RebuildFunctionProtoType(ResultType,
ParamTypes.data(),
ParamTypes.size(),
T->isVariadic(),
T->getTypeQuals(),
+ T->getRefQualifier(),
T->getExtInfo());
if (Result.isNull())
return QualType();
@@ -2940,6 +3803,7 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc NewTL = TLB.push<FunctionProtoTypeLoc>(Result);
NewTL.setLParenLoc(TL.getLParenLoc());
NewTL.setRParenLoc(TL.getRParenLoc());
+ NewTL.setTrailingReturn(TL.getTrailingReturn());
for (unsigned i = 0, e = NewTL.getNumArgs(); i != e; ++i)
NewTL.setArg(i, ParamDecls[i]);
@@ -2949,9 +3813,8 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
TypeLocBuilder &TLB,
- FunctionNoProtoTypeLoc TL,
- QualType ObjectType) {
- FunctionNoProtoType *T = TL.getTypePtr();
+ FunctionNoProtoTypeLoc TL) {
+ const FunctionNoProtoType *T = TL.getTypePtr();
QualType ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
if (ResultType.isNull())
return QualType();
@@ -2964,15 +3827,15 @@ QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
FunctionNoProtoTypeLoc NewTL = TLB.push<FunctionNoProtoTypeLoc>(Result);
NewTL.setLParenLoc(TL.getLParenLoc());
NewTL.setRParenLoc(TL.getRParenLoc());
+ NewTL.setTrailingReturn(false);
return Result;
}
template<typename Derived> QualType
TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB,
- UnresolvedUsingTypeLoc TL,
- QualType ObjectType) {
- UnresolvedUsingType *T = TL.getTypePtr();
+ UnresolvedUsingTypeLoc TL) {
+ const UnresolvedUsingType *T = TL.getTypePtr();
Decl *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl());
if (!D)
return QualType();
@@ -2994,9 +3857,8 @@ TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformTypedefType(TypeLocBuilder &TLB,
- TypedefTypeLoc TL,
- QualType ObjectType) {
- TypedefType *T = TL.getTypePtr();
+ TypedefTypeLoc TL) {
+ const TypedefType *T = TL.getTypePtr();
TypedefDecl *Typedef
= cast_or_null<TypedefDecl>(getDerived().TransformDecl(TL.getNameLoc(),
T->getDecl()));
@@ -3019,8 +3881,7 @@ QualType TreeTransform<Derived>::TransformTypedefType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
- TypeOfExprTypeLoc TL,
- QualType ObjectType) {
+ TypeOfExprTypeLoc TL) {
// typeof expressions are not potentially evaluated contexts
EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
@@ -3031,7 +3892,7 @@ QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
E.get() != TL.getUnderlyingExpr()) {
- Result = getDerived().RebuildTypeOfExprType(E.get());
+ Result = getDerived().RebuildTypeOfExprType(E.get(), TL.getTypeofLoc());
if (Result.isNull())
return QualType();
}
@@ -3047,8 +3908,7 @@ QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformTypeOfType(TypeLocBuilder &TLB,
- TypeOfTypeLoc TL,
- QualType ObjectType) {
+ TypeOfTypeLoc TL) {
TypeSourceInfo* Old_Under_TI = TL.getUnderlyingTInfo();
TypeSourceInfo* New_Under_TI = getDerived().TransformType(Old_Under_TI);
if (!New_Under_TI)
@@ -3072,9 +3932,8 @@ QualType TreeTransform<Derived>::TransformTypeOfType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
- DecltypeTypeLoc TL,
- QualType ObjectType) {
- DecltypeType *T = TL.getTypePtr();
+ DecltypeTypeLoc TL) {
+ const DecltypeType *T = TL.getTypePtr();
// decltype expressions are not potentially evaluated contexts
EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
@@ -3086,7 +3945,7 @@ QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
E.get() != T->getUnderlyingExpr()) {
- Result = getDerived().RebuildDecltypeType(E.get());
+ Result = getDerived().RebuildDecltypeType(E.get(), TL.getNameLoc());
if (Result.isNull())
return QualType();
}
@@ -3099,10 +3958,34 @@ QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
}
template<typename Derived>
+QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
+ AutoTypeLoc TL) {
+ const AutoType *T = TL.getTypePtr();
+ QualType OldDeduced = T->getDeducedType();
+ QualType NewDeduced;
+ if (!OldDeduced.isNull()) {
+ NewDeduced = getDerived().TransformType(OldDeduced);
+ if (NewDeduced.isNull())
+ return QualType();
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || NewDeduced != OldDeduced) {
+ Result = getDerived().RebuildAutoType(NewDeduced);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
QualType TreeTransform<Derived>::TransformRecordType(TypeLocBuilder &TLB,
- RecordTypeLoc TL,
- QualType ObjectType) {
- RecordType *T = TL.getTypePtr();
+ RecordTypeLoc TL) {
+ const RecordType *T = TL.getTypePtr();
RecordDecl *Record
= cast_or_null<RecordDecl>(getDerived().TransformDecl(TL.getNameLoc(),
T->getDecl()));
@@ -3125,9 +4008,8 @@ QualType TreeTransform<Derived>::TransformRecordType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformEnumType(TypeLocBuilder &TLB,
- EnumTypeLoc TL,
- QualType ObjectType) {
- EnumType *T = TL.getTypePtr();
+ EnumTypeLoc TL) {
+ const EnumType *T = TL.getTypePtr();
EnumDecl *Enum
= cast_or_null<EnumDecl>(getDerived().TransformDecl(TL.getNameLoc(),
T->getDecl()));
@@ -3151,8 +4033,7 @@ QualType TreeTransform<Derived>::TransformEnumType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformInjectedClassNameType(
TypeLocBuilder &TLB,
- InjectedClassNameTypeLoc TL,
- QualType ObjectType) {
+ InjectedClassNameTypeLoc TL) {
Decl *D = getDerived().TransformDecl(TL.getNameLoc(),
TL.getTypePtr()->getDecl());
if (!D) return QualType();
@@ -3162,73 +4043,122 @@ QualType TreeTransform<Derived>::TransformInjectedClassNameType(
return T;
}
-
template<typename Derived>
QualType TreeTransform<Derived>::TransformTemplateTypeParmType(
TypeLocBuilder &TLB,
- TemplateTypeParmTypeLoc TL,
- QualType ObjectType) {
+ TemplateTypeParmTypeLoc TL) {
return TransformTypeSpecType(TLB, TL);
}
template<typename Derived>
QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmType(
TypeLocBuilder &TLB,
- SubstTemplateTypeParmTypeLoc TL,
- QualType ObjectType) {
+ SubstTemplateTypeParmTypeLoc TL) {
return TransformTypeSpecType(TLB, TL);
}
template<typename Derived>
-QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
- const TemplateSpecializationType *TST,
- QualType ObjectType) {
- // FIXME: this entire method is a temporary workaround; callers
- // should be rewritten to provide real type locs.
-
- // Fake up a TemplateSpecializationTypeLoc.
- TypeLocBuilder TLB;
- TemplateSpecializationTypeLoc TL
- = TLB.push<TemplateSpecializationTypeLoc>(QualType(TST, 0));
-
- SourceLocation BaseLoc = getDerived().getBaseLocation();
-
- TL.setTemplateNameLoc(BaseLoc);
- TL.setLAngleLoc(BaseLoc);
- TL.setRAngleLoc(BaseLoc);
- for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) {
- const TemplateArgument &TA = TST->getArg(i);
- TemplateArgumentLoc TAL;
- getDerived().InventTemplateArgumentLoc(TA, TAL);
- TL.setArgLocInfo(i, TAL.getLocInfo());
- }
-
- TypeLocBuilder IgnoredTLB;
- return TransformTemplateSpecializationType(IgnoredTLB, TL, ObjectType);
+QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmPackType(
+ TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ return TransformTypeSpecType(TLB, TL);
}
-
+
template<typename Derived>
QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
TypeLocBuilder &TLB,
- TemplateSpecializationTypeLoc TL,
- QualType ObjectType) {
+ TemplateSpecializationTypeLoc TL) {
const TemplateSpecializationType *T = TL.getTypePtr();
TemplateName Template
- = getDerived().TransformTemplateName(T->getTemplateName(), ObjectType);
+ = getDerived().TransformTemplateName(T->getTemplateName());
if (Template.isNull())
return QualType();
+ return getDerived().TransformTemplateSpecializationType(TLB, TL, Template);
+}
+
+namespace {
+ /// \brief Simple iterator that traverses the template arguments in a
+ /// container that provides a \c getArgLoc() member function.
+ ///
+ /// This iterator is intended to be used with the iterator form of
+ /// \c TreeTransform<Derived>::TransformTemplateArguments().
+ template<typename ArgLocContainer>
+ class TemplateArgumentLocContainerIterator {
+ ArgLocContainer *Container;
+ unsigned Index;
+
+ public:
+ typedef TemplateArgumentLoc value_type;
+ typedef TemplateArgumentLoc reference;
+ typedef int difference_type;
+ typedef std::input_iterator_tag iterator_category;
+
+ class pointer {
+ TemplateArgumentLoc Arg;
+
+ public:
+ explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { }
+
+ const TemplateArgumentLoc *operator->() const {
+ return &Arg;
+ }
+ };
+
+
+ TemplateArgumentLocContainerIterator() {}
+
+ TemplateArgumentLocContainerIterator(ArgLocContainer &Container,
+ unsigned Index)
+ : Container(&Container), Index(Index) { }
+
+ TemplateArgumentLocContainerIterator &operator++() {
+ ++Index;
+ return *this;
+ }
+
+ TemplateArgumentLocContainerIterator operator++(int) {
+ TemplateArgumentLocContainerIterator Old(*this);
+ ++(*this);
+ return Old;
+ }
+
+ TemplateArgumentLoc operator*() const {
+ return Container->getArgLoc(Index);
+ }
+
+ pointer operator->() const {
+ return pointer(Container->getArgLoc(Index));
+ }
+
+ friend bool operator==(const TemplateArgumentLocContainerIterator &X,
+ const TemplateArgumentLocContainerIterator &Y) {
+ return X.Container == Y.Container && X.Index == Y.Index;
+ }
+
+ friend bool operator!=(const TemplateArgumentLocContainerIterator &X,
+ const TemplateArgumentLocContainerIterator &Y) {
+ return !(X == Y);
+ }
+ };
+}
+
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
+ TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL,
+ TemplateName Template) {
TemplateArgumentListInfo NewTemplateArgs;
NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
-
- for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
- TemplateArgumentLoc Loc;
- if (getDerived().TransformTemplateArgument(TL.getArgLoc(i), Loc))
- return QualType();
- NewTemplateArgs.addArgument(Loc);
- }
+ typedef TemplateArgumentLocContainerIterator<TemplateSpecializationTypeLoc>
+ ArgIterator;
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ ArgIterator(TL, TL.getNumArgs()),
+ NewTemplateArgs))
+ return QualType();
// FIXME: maybe don't rebuild if all the template arguments are the same.
@@ -3253,46 +4183,28 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
template<typename Derived>
QualType
TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
- ElaboratedTypeLoc TL,
- QualType ObjectType) {
- ElaboratedType *T = TL.getTypePtr();
+ ElaboratedTypeLoc TL) {
+ const ElaboratedType *T = TL.getTypePtr();
NestedNameSpecifier *NNS = 0;
// NOTE: the qualifier in an ElaboratedType is optional.
if (T->getQualifier() != 0) {
NNS = getDerived().TransformNestedNameSpecifier(T->getQualifier(),
- TL.getQualifierRange(),
- ObjectType);
+ TL.getQualifierRange());
if (!NNS)
return QualType();
}
- QualType NamedT;
- // FIXME: this test is meant to workaround a problem (failing assertion)
- // occurring if directly executing the code in the else branch.
- if (isa<TemplateSpecializationTypeLoc>(TL.getNamedTypeLoc())) {
- TemplateSpecializationTypeLoc OldNamedTL
- = cast<TemplateSpecializationTypeLoc>(TL.getNamedTypeLoc());
- const TemplateSpecializationType* OldTST
- = OldNamedTL.getType()->template getAs<TemplateSpecializationType>();
- NamedT = TransformTemplateSpecializationType(OldTST, ObjectType);
- if (NamedT.isNull())
- return QualType();
- TemplateSpecializationTypeLoc NewNamedTL
- = TLB.push<TemplateSpecializationTypeLoc>(NamedT);
- NewNamedTL.copy(OldNamedTL);
- }
- else {
- NamedT = getDerived().TransformType(TLB, TL.getNamedTypeLoc());
- if (NamedT.isNull())
- return QualType();
- }
+ QualType NamedT = getDerived().TransformType(TLB, TL.getNamedTypeLoc());
+ if (NamedT.isNull())
+ return QualType();
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
NNS != T->getQualifier() ||
NamedT != T->getNamedType()) {
- Result = getDerived().RebuildElaboratedType(T->getKeyword(), NNS, NamedT);
+ Result = getDerived().RebuildElaboratedType(TL.getKeywordLoc(),
+ T->getKeyword(), NNS, NamedT);
if (Result.isNull())
return QualType();
}
@@ -3305,15 +4217,72 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
}
template<typename Derived>
+QualType TreeTransform<Derived>::TransformAttributedType(
+ TypeLocBuilder &TLB,
+ AttributedTypeLoc TL) {
+ const AttributedType *oldType = TL.getTypePtr();
+ QualType modifiedType = getDerived().TransformType(TLB, TL.getModifiedLoc());
+ if (modifiedType.isNull())
+ return QualType();
+
+ QualType result = TL.getType();
+
+ // FIXME: dependent operand expressions?
+ if (getDerived().AlwaysRebuild() ||
+ modifiedType != oldType->getModifiedType()) {
+ // TODO: this is really lame; we should really be rebuilding the
+ // equivalent type from first principles.
+ QualType equivalentType
+ = getDerived().TransformType(oldType->getEquivalentType());
+ if (equivalentType.isNull())
+ return QualType();
+ result = SemaRef.Context.getAttributedType(oldType->getAttrKind(),
+ modifiedType,
+ equivalentType);
+ }
+
+ AttributedTypeLoc newTL = TLB.push<AttributedTypeLoc>(result);
+ newTL.setAttrNameLoc(TL.getAttrNameLoc());
+ if (TL.hasAttrOperand())
+ newTL.setAttrOperandParensRange(TL.getAttrOperandParensRange());
+ if (TL.hasAttrExprOperand())
+ newTL.setAttrExprOperand(TL.getAttrExprOperand());
+ else if (TL.hasAttrEnumOperand())
+ newTL.setAttrEnumOperandLoc(TL.getAttrEnumOperandLoc());
+
+ return result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformParenType(TypeLocBuilder &TLB,
+ ParenTypeLoc TL) {
+ QualType Inner = getDerived().TransformType(TLB, TL.getInnerLoc());
+ if (Inner.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Inner != TL.getInnerLoc().getType()) {
+ Result = getDerived().RebuildParenType(Inner);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ParenTypeLoc NewTL = TLB.push<ParenTypeLoc>(Result);
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+ return Result;
+}
+
+template<typename Derived>
QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB,
- DependentNameTypeLoc TL,
- QualType ObjectType) {
- DependentNameType *T = TL.getTypePtr();
+ DependentNameTypeLoc TL) {
+ const DependentNameType *T = TL.getTypePtr();
NestedNameSpecifier *NNS
= getDerived().TransformNestedNameSpecifier(T->getQualifier(),
- TL.getQualifierRange(),
- ObjectType);
+ TL.getQualifierRange());
if (!NNS)
return QualType();
@@ -3345,34 +4314,44 @@ QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::
TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
- DependentTemplateSpecializationTypeLoc TL,
- QualType ObjectType) {
- DependentTemplateSpecializationType *T = TL.getTypePtr();
+ DependentTemplateSpecializationTypeLoc TL) {
+ const DependentTemplateSpecializationType *T = TL.getTypePtr();
NestedNameSpecifier *NNS
= getDerived().TransformNestedNameSpecifier(T->getQualifier(),
- TL.getQualifierRange(),
- ObjectType);
+ TL.getQualifierRange());
if (!NNS)
return QualType();
+ return getDerived()
+ .TransformDependentTemplateSpecializationType(TLB, TL, NNS);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::
+ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ NestedNameSpecifier *NNS) {
+ const DependentTemplateSpecializationType *T = TL.getTypePtr();
+
TemplateArgumentListInfo NewTemplateArgs;
NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+
+ typedef TemplateArgumentLocContainerIterator<
+ DependentTemplateSpecializationTypeLoc> ArgIterator;
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ ArgIterator(TL, TL.getNumArgs()),
+ NewTemplateArgs))
+ return QualType();
- for (unsigned I = 0, E = T->getNumArgs(); I != E; ++I) {
- TemplateArgumentLoc Loc;
- if (getDerived().TransformTemplateArgument(TL.getArgLoc(I), Loc))
- return QualType();
- NewTemplateArgs.addArgument(Loc);
- }
-
- QualType Result = getDerived().RebuildDependentTemplateSpecializationType(
- T->getKeyword(),
- NNS,
- T->getIdentifier(),
- TL.getNameLoc(),
- NewTemplateArgs);
+ QualType Result
+ = getDerived().RebuildDependentTemplateSpecializationType(T->getKeyword(),
+ NNS,
+ TL.getQualifierRange(),
+ T->getIdentifier(),
+ TL.getNameLoc(),
+ NewTemplateArgs);
if (Result.isNull())
return QualType();
@@ -3399,10 +4378,33 @@ QualType TreeTransform<Derived>::
}
template<typename Derived>
+QualType TreeTransform<Derived>::TransformPackExpansionType(TypeLocBuilder &TLB,
+ PackExpansionTypeLoc TL) {
+ QualType Pattern
+ = getDerived().TransformType(TLB, TL.getPatternLoc());
+ if (Pattern.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Pattern != TL.getPatternLoc().getType()) {
+ Result = getDerived().RebuildPackExpansionType(Pattern,
+ TL.getPatternLoc().getSourceRange(),
+ TL.getEllipsisLoc(),
+ TL.getTypePtr()->getNumExpansions());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ PackExpansionTypeLoc NewT = TLB.push<PackExpansionTypeLoc>(Result);
+ NewT.setEllipsisLoc(TL.getEllipsisLoc());
+ return Result;
+}
+
+template<typename Derived>
QualType
TreeTransform<Derived>::TransformObjCInterfaceType(TypeLocBuilder &TLB,
- ObjCInterfaceTypeLoc TL,
- QualType ObjectType) {
+ ObjCInterfaceTypeLoc TL) {
// ObjCInterfaceType is never dependent.
TLB.pushFullCopy(TL);
return TL.getType();
@@ -3411,8 +4413,7 @@ TreeTransform<Derived>::TransformObjCInterfaceType(TypeLocBuilder &TLB,
template<typename Derived>
QualType
TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
- ObjCObjectTypeLoc TL,
- QualType ObjectType) {
+ ObjCObjectTypeLoc TL) {
// ObjCObjectType is never dependent.
TLB.pushFullCopy(TL);
return TL.getType();
@@ -3421,8 +4422,7 @@ TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
template<typename Derived>
QualType
TreeTransform<Derived>::TransformObjCObjectPointerType(TypeLocBuilder &TLB,
- ObjCObjectPointerTypeLoc TL,
- QualType ObjectType) {
+ ObjCObjectPointerTypeLoc TL) {
// ObjCObjectPointerType is never dependent.
TLB.pushFullCopy(TL);
return TL.getType();
@@ -3434,7 +4434,7 @@ TreeTransform<Derived>::TransformObjCObjectPointerType(TypeLocBuilder &TLB,
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformNullStmt(NullStmt *S) {
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
}
template<typename Derived>
@@ -3473,7 +4473,7 @@ TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
if (!getDerived().AlwaysRebuild() &&
!SubStmtChanged)
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
return getDerived().RebuildCompoundStmt(S->getLBracLoc(),
move_arg(Statements),
@@ -3540,9 +4540,15 @@ TreeTransform<Derived>::TransformLabelStmt(LabelStmt *S) {
if (SubStmt.isInvalid())
return StmtError();
+ Decl *LD = getDerived().TransformDecl(S->getDecl()->getLocation(),
+ S->getDecl());
+ if (!LD)
+ return StmtError();
+
+
// FIXME: Pass the real colon location in.
- SourceLocation ColonLoc = SemaRef.PP.getLocForEndOfToken(S->getIdentLoc());
- return getDerived().RebuildLabelStmt(S->getIdentLoc(), S->getID(), ColonLoc,
+ return getDerived().RebuildLabelStmt(S->getIdentLoc(),
+ cast<LabelDecl>(LD), SourceLocation(),
SubStmt.get());
}
@@ -3568,9 +4574,8 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
// Convert the condition to a boolean value.
if (S->getCond()) {
- ExprResult CondE = getSema().ActOnBooleanCondition(0,
- S->getIfLoc(),
- Cond.get());
+ ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getIfLoc(),
+ Cond.get());
if (CondE.isInvalid())
return StmtError();
@@ -3597,7 +4602,7 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
ConditionVar == S->getConditionVariable() &&
Then.get() == S->getThen() &&
Else.get() == S->getElse())
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
return getDerived().RebuildIfStmt(S->getIfLoc(), FullCond, ConditionVar,
Then.get(),
@@ -3664,9 +4669,8 @@ TreeTransform<Derived>::TransformWhileStmt(WhileStmt *S) {
if (S->getCond()) {
// Convert the condition to a boolean value.
- ExprResult CondE = getSema().ActOnBooleanCondition(0,
- S->getWhileLoc(),
- Cond.get());
+ ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getWhileLoc(),
+ Cond.get());
if (CondE.isInvalid())
return StmtError();
Cond = CondE;
@@ -3708,7 +4712,7 @@ TreeTransform<Derived>::TransformDoStmt(DoStmt *S) {
if (!getDerived().AlwaysRebuild() &&
Cond.get() == S->getCond() &&
Body.get() == S->getBody())
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
return getDerived().RebuildDoStmt(S->getDoLoc(), Body.get(), S->getWhileLoc(),
/*FIXME:*/S->getWhileLoc(), Cond.get(),
@@ -3742,9 +4746,8 @@ TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
if (S->getCond()) {
// Convert the condition to a boolean value.
- ExprResult CondE = getSema().ActOnBooleanCondition(0,
- S->getForLoc(),
- Cond.get());
+ ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getForLoc(),
+ Cond.get());
if (CondE.isInvalid())
return StmtError();
@@ -3775,7 +4778,7 @@ TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
FullCond.get() == S->getCond() &&
Inc.get() == S->getInc() &&
Body.get() == S->getBody())
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
return getDerived().RebuildForStmt(S->getForLoc(), S->getLParenLoc(),
Init.get(), FullCond, ConditionVar,
@@ -3785,9 +4788,14 @@ TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformGotoStmt(GotoStmt *S) {
+ Decl *LD = getDerived().TransformDecl(S->getLabel()->getLocation(),
+ S->getLabel());
+ if (!LD)
+ return StmtError();
+
// Goto statements must always be rebuilt, to resolve the label.
return getDerived().RebuildGotoStmt(S->getGotoLoc(), S->getLabelLoc(),
- S->getLabel());
+ cast<LabelDecl>(LD));
}
template<typename Derived>
@@ -3799,7 +4807,7 @@ TreeTransform<Derived>::TransformIndirectGotoStmt(IndirectGotoStmt *S) {
if (!getDerived().AlwaysRebuild() &&
Target.get() == S->getTarget())
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
return getDerived().RebuildIndirectGotoStmt(S->getGotoLoc(), S->getStarLoc(),
Target.get());
@@ -3808,13 +4816,13 @@ TreeTransform<Derived>::TransformIndirectGotoStmt(IndirectGotoStmt *S) {
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformContinueStmt(ContinueStmt *S) {
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
}
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformBreakStmt(BreakStmt *S) {
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
}
template<typename Derived>
@@ -3848,7 +4856,7 @@ TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
}
if (!getDerived().AlwaysRebuild() && !DeclChanged)
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
return getDerived().RebuildDeclStmt(Decls.data(), Decls.size(),
S->getStartLoc(), S->getEndLoc());
@@ -3856,13 +4864,6 @@ TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
template<typename Derived>
StmtResult
-TreeTransform<Derived>::TransformSwitchCase(SwitchCase *S) {
- assert(false && "SwitchCase is abstract and cannot be transformed");
- return SemaRef.Owned(S->Retain());
-}
-
-template<typename Derived>
-StmtResult
TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
ASTOwningVector<Expr*> Constraints(getSema());
@@ -3879,7 +4880,7 @@ TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
Names.push_back(S->getOutputIdentifier(I));
// No need to transform the constraint literal.
- Constraints.push_back(S->getOutputConstraintLiteral(I)->Retain());
+ Constraints.push_back(S->getOutputConstraintLiteral(I));
// Transform the output expr.
Expr *OutputExpr = S->getOutputExpr(I);
@@ -3897,7 +4898,7 @@ TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
Names.push_back(S->getInputIdentifier(I));
// No need to transform the constraint literal.
- Constraints.push_back(S->getInputConstraintLiteral(I)->Retain());
+ Constraints.push_back(S->getInputConstraintLiteral(I));
// Transform the input expr.
Expr *InputExpr = S->getInputExpr(I);
@@ -3911,11 +4912,11 @@ TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
}
if (!getDerived().AlwaysRebuild() && !ExprsChanged)
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
// Go through the clobbers.
for (unsigned I = 0, E = S->getNumClobbers(); I != E; ++I)
- Clobbers.push_back(S->getClobber(I)->Retain());
+ Clobbers.push_back(S->getClobber(I));
// No need to transform the asm string literal.
AsmString = SemaRef.Owned(S->getAsmString());
@@ -3968,7 +4969,7 @@ TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) {
TryBody.get() == S->getTryBody() &&
!AnyCatchChanged &&
Finally.get() == S->getFinallyStmt())
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
// Build a new statement.
return getDerived().RebuildObjCAtTryStmt(S->getAtTryLoc(), TryBody.get(),
@@ -4022,7 +5023,7 @@ TreeTransform<Derived>::TransformObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
// If nothing changed, just retain this statement.
if (!getDerived().AlwaysRebuild() &&
Body.get() == S->getFinallyBody())
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
// Build a new statement.
return getDerived().RebuildObjCAtFinallyStmt(S->getAtFinallyLoc(),
@@ -4041,7 +5042,7 @@ TreeTransform<Derived>::TransformObjCAtThrowStmt(ObjCAtThrowStmt *S) {
if (!getDerived().AlwaysRebuild() &&
Operand.get() == S->getThrowExpr())
- return getSema().Owned(S->Retain());
+ return getSema().Owned(S);
return getDerived().RebuildObjCAtThrowStmt(S->getThrowLoc(), Operand.get());
}
@@ -4064,7 +5065,7 @@ TreeTransform<Derived>::TransformObjCAtSynchronizedStmt(
if (!getDerived().AlwaysRebuild() &&
Object.get() == S->getSynchExpr() &&
Body.get() == S->getSynchBody())
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
// Build a new statement.
return getDerived().RebuildObjCAtSynchronizedStmt(S->getAtSynchronizedLoc(),
@@ -4095,7 +5096,7 @@ TreeTransform<Derived>::TransformObjCForCollectionStmt(
Element.get() == S->getElement() &&
Collection.get() == S->getCollection() &&
Body.get() == S->getBody())
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
// Build a new statement.
return getDerived().RebuildObjCForCollectionStmt(S->getForLoc(),
@@ -4114,20 +5115,14 @@ TreeTransform<Derived>::TransformCXXCatchStmt(CXXCatchStmt *S) {
VarDecl *Var = 0;
if (S->getExceptionDecl()) {
VarDecl *ExceptionDecl = S->getExceptionDecl();
- TemporaryBase Rebase(*this, ExceptionDecl->getLocation(),
- ExceptionDecl->getDeclName());
-
- QualType T = getDerived().TransformType(ExceptionDecl->getType());
- if (T.isNull())
+ TypeSourceInfo *T = getDerived().TransformType(
+ ExceptionDecl->getTypeSourceInfo());
+ if (!T)
return StmtError();
- Var = getDerived().RebuildExceptionDecl(ExceptionDecl,
- T,
- ExceptionDecl->getTypeSourceInfo(),
+ Var = getDerived().RebuildExceptionDecl(ExceptionDecl, T,
ExceptionDecl->getIdentifier(),
- ExceptionDecl->getLocation(),
- /*FIXME: Inaccurate*/
- SourceRange(ExceptionDecl->getLocation()));
+ ExceptionDecl->getLocation());
if (!Var || Var->isInvalidDecl())
return StmtError();
}
@@ -4140,7 +5135,7 @@ TreeTransform<Derived>::TransformCXXCatchStmt(CXXCatchStmt *S) {
if (!getDerived().AlwaysRebuild() &&
!Var &&
Handler.get() == S->getHandlerBlock())
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
return getDerived().RebuildCXXCatchStmt(S->getCatchLoc(),
Var,
@@ -4172,7 +5167,7 @@ TreeTransform<Derived>::TransformCXXTryStmt(CXXTryStmt *S) {
if (!getDerived().AlwaysRebuild() &&
TryBlock.get() == S->getTryBlock() &&
!HandlerChanged)
- return SemaRef.Owned(S->Retain());
+ return SemaRef.Owned(S);
return getDerived().RebuildCXXTryStmt(S->getTryLoc(), TryBlock.get(),
move_arg(Handlers));
@@ -4184,7 +5179,7 @@ TreeTransform<Derived>::TransformCXXTryStmt(CXXTryStmt *S) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformPredefinedExpr(PredefinedExpr *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
@@ -4221,7 +5216,7 @@ TreeTransform<Derived>::TransformDeclRefExpr(DeclRefExpr *E) {
// FIXME: this is a bit instantiation-specific.
SemaRef.MarkDeclarationReferenced(E->getLocation(), ND);
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
TemplateArgumentListInfo TransArgs, *TemplateArgs = 0;
@@ -4229,12 +5224,10 @@ TreeTransform<Derived>::TransformDeclRefExpr(DeclRefExpr *E) {
TemplateArgs = &TransArgs;
TransArgs.setLAngleLoc(E->getLAngleLoc());
TransArgs.setRAngleLoc(E->getRAngleLoc());
- for (unsigned I = 0, N = E->getNumTemplateArgs(); I != N; ++I) {
- TemplateArgumentLoc Loc;
- if (getDerived().TransformTemplateArgument(E->getTemplateArgs()[I], Loc))
- return ExprError();
- TransArgs.addArgument(Loc);
- }
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
}
return getDerived().RebuildDeclRefExpr(Qualifier, E->getQualifierRange(),
@@ -4244,31 +5237,31 @@ TreeTransform<Derived>::TransformDeclRefExpr(DeclRefExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformIntegerLiteral(IntegerLiteral *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformFloatingLiteral(FloatingLiteral *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformImaginaryLiteral(ImaginaryLiteral *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformStringLiteral(StringLiteral *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCharacterLiteral(CharacterLiteral *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
@@ -4279,7 +5272,7 @@ TreeTransform<Derived>::TransformParenExpr(ParenExpr *E) {
return ExprError();
if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildParenExpr(SubExpr.get(), E->getLParen(),
E->getRParen());
@@ -4293,7 +5286,7 @@ TreeTransform<Derived>::TransformUnaryOperator(UnaryOperator *E) {
return ExprError();
if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildUnaryOperator(E->getOperatorLoc(),
E->getOpcode(),
@@ -4358,7 +5351,7 @@ TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
if (!getDerived().AlwaysRebuild() &&
Type == E->getTypeSourceInfo() &&
!ExprChanged)
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
// Build a new offsetof expression.
return getDerived().RebuildOffsetOfExpr(E->getOperatorLoc(), Type,
@@ -4368,6 +5361,14 @@ TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformOpaqueValueExpr(OpaqueValueExpr *E) {
+ assert(getDerived().AlreadyTransformed(E->getType()) &&
+ "opaque value expression requires transformation");
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
if (E->isArgumentType()) {
TypeSourceInfo *OldT = E->getArgumentTypeInfo();
@@ -4377,7 +5378,7 @@ TreeTransform<Derived>::TransformSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
return ExprError();
if (!getDerived().AlwaysRebuild() && OldT == NewT)
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildSizeOfAlignOf(NewT, E->getOperatorLoc(),
E->isSizeOf(),
@@ -4396,7 +5397,7 @@ TreeTransform<Derived>::TransformSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
return ExprError();
if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getArgumentExpr())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
return getDerived().RebuildSizeOfAlignOf(SubExpr.get(), E->getOperatorLoc(),
@@ -4419,7 +5420,7 @@ TreeTransform<Derived>::TransformArraySubscriptExpr(ArraySubscriptExpr *E) {
if (!getDerived().AlwaysRebuild() &&
LHS.get() == E->getLHS() &&
RHS.get() == E->getRHS())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildArraySubscriptExpr(LHS.get(),
/*FIXME:*/E->getLHS()->getLocStart(),
@@ -4438,31 +5439,20 @@ TreeTransform<Derived>::TransformCallExpr(CallExpr *E) {
// Transform arguments.
bool ArgChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
- llvm::SmallVector<SourceLocation, 4> FakeCommaLocs;
- for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
- ExprResult Arg = getDerived().TransformExpr(E->getArg(I));
- if (Arg.isInvalid())
- return ExprError();
-
- // FIXME: Wrong source location information for the ','.
- FakeCommaLocs.push_back(
- SemaRef.PP.getLocForEndOfToken(E->getArg(I)->getSourceRange().getEnd()));
-
- ArgChanged = ArgChanged || Arg.get() != E->getArg(I);
- Args.push_back(Arg.get());
- }
-
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgChanged))
+ return ExprError();
+
if (!getDerived().AlwaysRebuild() &&
Callee.get() == E->getCallee() &&
!ArgChanged)
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
// FIXME: Wrong source location information for the '('.
SourceLocation FakeLParenLoc
= ((Expr *)Callee.get())->getSourceRange().getBegin();
return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc,
move_arg(Args),
- FakeCommaLocs.data(),
E->getRParenLoc());
}
@@ -4508,19 +5498,17 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
// Mark it referenced in the new context regardless.
// FIXME: this is a bit instantiation-specific.
SemaRef.MarkDeclarationReferenced(E->getMemberLoc(), Member);
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
TemplateArgumentListInfo TransArgs;
if (E->hasExplicitTemplateArgs()) {
TransArgs.setLAngleLoc(E->getLAngleLoc());
TransArgs.setRAngleLoc(E->getRAngleLoc());
- for (unsigned I = 0, N = E->getNumTemplateArgs(); I != N; ++I) {
- TemplateArgumentLoc Loc;
- if (getDerived().TransformTemplateArgument(E->getTemplateArgs()[I], Loc))
- return ExprError();
- TransArgs.addArgument(Loc);
- }
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
}
// FIXME: Bogus source location for the operator
@@ -4559,7 +5547,7 @@ TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
if (!getDerived().AlwaysRebuild() &&
LHS.get() == E->getLHS() &&
RHS.get() == E->getRHS())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildBinaryOperator(E->getOperatorLoc(), E->getOpcode(),
LHS.get(), RHS.get());
@@ -4573,6 +5561,32 @@ TreeTransform<Derived>::TransformCompoundAssignOperator(
}
template<typename Derived>
+ExprResult TreeTransform<Derived>::
+TransformBinaryConditionalOperator(BinaryConditionalOperator *e) {
+ // Just rebuild the common and RHS expressions and see whether we
+ // get any changes.
+
+ ExprResult commonExpr = getDerived().TransformExpr(e->getCommon());
+ if (commonExpr.isInvalid())
+ return ExprError();
+
+ ExprResult rhs = getDerived().TransformExpr(e->getFalseExpr());
+ if (rhs.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ commonExpr.get() == e->getCommon() &&
+ rhs.get() == e->getFalseExpr())
+ return SemaRef.Owned(e);
+
+ return getDerived().RebuildConditionalOperator(commonExpr.take(),
+ e->getQuestionLoc(),
+ 0,
+ e->getColonLoc(),
+ rhs.get());
+}
+
+template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformConditionalOperator(ConditionalOperator *E) {
ExprResult Cond = getDerived().TransformExpr(E->getCond());
@@ -4591,7 +5605,7 @@ TreeTransform<Derived>::TransformConditionalOperator(ConditionalOperator *E) {
Cond.get() == E->getCond() &&
LHS.get() == E->getLHS() &&
RHS.get() == E->getRHS())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildConditionalOperator(Cond.get(),
E->getQuestionLoc(),
@@ -4611,32 +5625,22 @@ TreeTransform<Derived>::TransformImplicitCastExpr(ImplicitCastExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCStyleCastExpr(CStyleCastExpr *E) {
- TypeSourceInfo *OldT;
- TypeSourceInfo *NewT;
- {
- // FIXME: Source location isn't quite accurate.
- SourceLocation TypeStartLoc
- = SemaRef.PP.getLocForEndOfToken(E->getLParenLoc());
- TemporaryBase Rebase(*this, TypeStartLoc, DeclarationName());
-
- OldT = E->getTypeInfoAsWritten();
- NewT = getDerived().TransformType(OldT);
- if (!NewT)
- return ExprError();
- }
-
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!Type)
+ return ExprError();
+
ExprResult SubExpr
= getDerived().TransformExpr(E->getSubExprAsWritten());
if (SubExpr.isInvalid())
return ExprError();
if (!getDerived().AlwaysRebuild() &&
- OldT == NewT &&
+ Type == E->getTypeInfoAsWritten() &&
SubExpr.get() == E->getSubExpr())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildCStyleCastExpr(E->getLParenLoc(),
- NewT,
+ Type,
E->getRParenLoc(),
SubExpr.get());
}
@@ -4656,7 +5660,7 @@ TreeTransform<Derived>::TransformCompoundLiteralExpr(CompoundLiteralExpr *E) {
if (!getDerived().AlwaysRebuild() &&
OldT == NewT &&
Init.get() == E->getInitializer())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
// Note: the expression type doesn't necessarily match the
// type-as-written, but that's okay, because it should always be
@@ -4676,7 +5680,7 @@ TreeTransform<Derived>::TransformExtVectorElementExpr(ExtVectorElementExpr *E) {
if (!getDerived().AlwaysRebuild() &&
Base.get() == E->getBase())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
// FIXME: Bad source location
SourceLocation FakeOperatorLoc
@@ -4692,17 +5696,12 @@ TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) {
bool InitChanged = false;
ASTOwningVector<Expr*, 4> Inits(SemaRef);
- for (unsigned I = 0, N = E->getNumInits(); I != N; ++I) {
- ExprResult Init = getDerived().TransformExpr(E->getInit(I));
- if (Init.isInvalid())
- return ExprError();
-
- InitChanged = InitChanged || Init.get() != E->getInit(I);
- Inits.push_back(Init.get());
- }
-
+ if (getDerived().TransformExprs(E->getInits(), E->getNumInits(), false,
+ Inits, &InitChanged))
+ return ExprError();
+
if (!getDerived().AlwaysRebuild() && !InitChanged)
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildInitList(E->getLBraceLoc(), move_arg(Inits),
E->getRBraceLoc(), E->getType());
@@ -4769,7 +5768,7 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
if (!getDerived().AlwaysRebuild() &&
Init.get() == E->getInit() &&
!ExprChanged)
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildDesignatedInitExpr(Desig, move_arg(ArrayExprs),
E->getEqualOrColonLoc(),
@@ -4790,7 +5789,7 @@ TreeTransform<Derived>::TransformImplicitValueInitExpr(
if (!getDerived().AlwaysRebuild() &&
T == E->getType())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildImplicitValueInitExpr(T);
}
@@ -4809,7 +5808,7 @@ TreeTransform<Derived>::TransformVAArgExpr(VAArgExpr *E) {
if (!getDerived().AlwaysRebuild() &&
TInfo == E->getWrittenTypeInfo() &&
SubExpr.get() == E->getSubExpr())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildVAArgExpr(E->getBuiltinLoc(), SubExpr.get(),
TInfo, E->getRParenLoc());
@@ -4820,15 +5819,10 @@ ExprResult
TreeTransform<Derived>::TransformParenListExpr(ParenListExpr *E) {
bool ArgumentChanged = false;
ASTOwningVector<Expr*, 4> Inits(SemaRef);
- for (unsigned I = 0, N = E->getNumExprs(); I != N; ++I) {
- ExprResult Init = getDerived().TransformExpr(E->getExpr(I));
- if (Init.isInvalid())
- return ExprError();
-
- ArgumentChanged = ArgumentChanged || Init.get() != E->getExpr(I);
- Inits.push_back(Init.get());
- }
-
+ if (TransformExprs(E->getExprs(), E->getNumExprs(), true, Inits,
+ &ArgumentChanged))
+ return ExprError();
+
return getDerived().RebuildParenListExpr(E->getLParenLoc(),
move_arg(Inits),
E->getRParenLoc());
@@ -4842,8 +5836,13 @@ TreeTransform<Derived>::TransformParenListExpr(ParenListExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformAddrLabelExpr(AddrLabelExpr *E) {
+ Decl *LD = getDerived().TransformDecl(E->getLabel()->getLocation(),
+ E->getLabel());
+ if (!LD)
+ return ExprError();
+
return getDerived().RebuildAddrLabelExpr(E->getAmpAmpLoc(), E->getLabelLoc(),
- E->getLabel());
+ cast<LabelDecl>(LD));
}
template<typename Derived>
@@ -4856,7 +5855,7 @@ TreeTransform<Derived>::TransformStmtExpr(StmtExpr *E) {
if (!getDerived().AlwaysRebuild() &&
SubStmt.get() == E->getSubStmt())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildStmtExpr(E->getLParenLoc(),
SubStmt.get(),
@@ -4865,30 +5864,6 @@ TreeTransform<Derived>::TransformStmtExpr(StmtExpr *E) {
template<typename Derived>
ExprResult
-TreeTransform<Derived>::TransformTypesCompatibleExpr(TypesCompatibleExpr *E) {
- TypeSourceInfo *TInfo1;
- TypeSourceInfo *TInfo2;
-
- TInfo1 = getDerived().TransformType(E->getArgTInfo1());
- if (!TInfo1)
- return ExprError();
-
- TInfo2 = getDerived().TransformType(E->getArgTInfo2());
- if (!TInfo2)
- return ExprError();
-
- if (!getDerived().AlwaysRebuild() &&
- TInfo1 == E->getArgTInfo1() &&
- TInfo2 == E->getArgTInfo2())
- return SemaRef.Owned(E->Retain());
-
- return getDerived().RebuildTypesCompatibleExpr(E->getBuiltinLoc(),
- TInfo1, TInfo2,
- E->getRParenLoc());
-}
-
-template<typename Derived>
-ExprResult
TreeTransform<Derived>::TransformChooseExpr(ChooseExpr *E) {
ExprResult Cond = getDerived().TransformExpr(E->getCond());
if (Cond.isInvalid())
@@ -4906,7 +5881,7 @@ TreeTransform<Derived>::TransformChooseExpr(ChooseExpr *E) {
Cond.get() == E->getCond() &&
LHS.get() == E->getLHS() &&
RHS.get() == E->getRHS())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildChooseExpr(E->getBuiltinLoc(),
Cond.get(), LHS.get(), RHS.get(),
@@ -4916,7 +5891,7 @@ TreeTransform<Derived>::TransformChooseExpr(ChooseExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformGNUNullExpr(GNUNullExpr *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
@@ -4946,26 +5921,12 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
// Transform the call arguments.
ASTOwningVector<Expr*> Args(SemaRef);
- llvm::SmallVector<SourceLocation, 4> FakeCommaLocs;
- for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I) {
- if (getDerived().DropCallArgument(E->getArg(I)))
- break;
-
- ExprResult Arg = getDerived().TransformExpr(E->getArg(I));
- if (Arg.isInvalid())
- return ExprError();
-
- // FIXME: Poor source location information.
- SourceLocation FakeCommaLoc
- = SemaRef.PP.getLocForEndOfToken(
- static_cast<Expr *>(Arg.get())->getLocEnd());
- FakeCommaLocs.push_back(FakeCommaLoc);
- Args.push_back(Arg.release());
- }
+ if (getDerived().TransformExprs(E->getArgs() + 1, E->getNumArgs() - 1, true,
+ Args))
+ return ExprError();
return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc,
move_arg(Args),
- FakeCommaLocs.data(),
E->getLocEnd());
}
@@ -5006,7 +5967,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
Callee.get() == E->getCallee() &&
First.get() == E->getArg(0) &&
(E->getNumArgs() != 2 || Second.get() == E->getArg(1)))
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
E->getOperatorLoc(),
@@ -5023,30 +5984,53 @@ TreeTransform<Derived>::TransformCXXMemberCallExpr(CXXMemberCallExpr *E) {
template<typename Derived>
ExprResult
-TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
- TypeSourceInfo *OldT;
- TypeSourceInfo *NewT;
- {
- // FIXME: Source location isn't quite accurate.
- SourceLocation TypeStartLoc
- = SemaRef.PP.getLocForEndOfToken(E->getOperatorLoc());
- TemporaryBase Rebase(*this, TypeStartLoc, DeclarationName());
+TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
+ // Transform the callee.
+ ExprResult Callee = getDerived().TransformExpr(E->getCallee());
+ if (Callee.isInvalid())
+ return ExprError();
- OldT = E->getTypeInfoAsWritten();
- NewT = getDerived().TransformType(OldT);
- if (!NewT)
- return ExprError();
- }
+ // Transform exec config.
+ ExprResult EC = getDerived().TransformCallExpr(E->getConfig());
+ if (EC.isInvalid())
+ return ExprError();
+ // Transform arguments.
+ bool ArgChanged = false;
+ ASTOwningVector<Expr*> Args(SemaRef);
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Callee.get() == E->getCallee() &&
+ !ArgChanged)
+ return SemaRef.Owned(E);
+
+ // FIXME: Wrong source location information for the '('.
+ SourceLocation FakeLParenLoc
+ = ((Expr *)Callee.get())->getSourceRange().getBegin();
+ return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc,
+ move_arg(Args),
+ E->getRParenLoc(), EC.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!Type)
+ return ExprError();
+
ExprResult SubExpr
= getDerived().TransformExpr(E->getSubExprAsWritten());
if (SubExpr.isInvalid())
return ExprError();
if (!getDerived().AlwaysRebuild() &&
- OldT == NewT &&
+ Type == E->getTypeInfoAsWritten() &&
SubExpr.get() == E->getSubExpr())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
// FIXME: Poor source location information here.
SourceLocation FakeLAngleLoc
@@ -5058,7 +6042,7 @@ TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
return getDerived().RebuildCXXNamedCastExpr(E->getOperatorLoc(),
E->getStmtClass(),
FakeLAngleLoc,
- NewT,
+ Type,
FakeRAngleLoc,
FakeRAngleLoc,
SubExpr.get(),
@@ -5094,16 +6078,9 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXFunctionalCastExpr(
CXXFunctionalCastExpr *E) {
- TypeSourceInfo *OldT;
- TypeSourceInfo *NewT;
- {
- TemporaryBase Rebase(*this, E->getTypeBeginLoc(), DeclarationName());
-
- OldT = E->getTypeInfoAsWritten();
- NewT = getDerived().TransformType(OldT);
- if (!NewT)
- return ExprError();
- }
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!Type)
+ return ExprError();
ExprResult SubExpr
= getDerived().TransformExpr(E->getSubExprAsWritten());
@@ -5111,14 +6088,11 @@ TreeTransform<Derived>::TransformCXXFunctionalCastExpr(
return ExprError();
if (!getDerived().AlwaysRebuild() &&
- OldT == NewT &&
+ Type == E->getTypeInfoAsWritten() &&
SubExpr.get() == E->getSubExpr())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
- // FIXME: The end of the type's source range is wrong
- return getDerived().RebuildCXXFunctionalCastExpr(
- /*FIXME:*/SourceRange(E->getTypeBeginLoc()),
- NewT,
+ return getDerived().RebuildCXXFunctionalCastExpr(Type,
/*FIXME:*/E->getSubExpr()->getLocStart(),
SubExpr.get(),
E->getRParenLoc());
@@ -5135,7 +6109,7 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
if (!getDerived().AlwaysRebuild() &&
TInfo == E->getTypeOperandSourceInfo())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildCXXTypeidExpr(E->getType(),
E->getLocStart(),
@@ -5155,7 +6129,7 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
if (!getDerived().AlwaysRebuild() &&
SubExpr.get() == E->getExprOperand())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildCXXTypeidExpr(E->getType(),
E->getLocStart(),
@@ -5165,29 +6139,64 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformCXXUuidofExpr(CXXUuidofExpr *E) {
+ if (E->isTypeOperand()) {
+ TypeSourceInfo *TInfo
+ = getDerived().TransformType(E->getTypeOperandSourceInfo());
+ if (!TInfo)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TInfo == E->getTypeOperandSourceInfo())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXTypeidExpr(E->getType(),
+ E->getLocStart(),
+ TInfo,
+ E->getLocEnd());
+ }
+
+ // We don't know whether the expression is potentially evaluated until
+ // after we perform semantic analysis, so the expression is potentially
+ // potentially evaluated.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+
+ ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubExpr.get() == E->getExprOperand())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXUuidofExpr(E->getType(),
+ E->getLocStart(),
+ SubExpr.get(),
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXNullPtrLiteralExpr(
CXXNullPtrLiteralExpr *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
- TemporaryBase Rebase(*this, E->getLocStart(), DeclarationName());
+ DeclContext *DC = getSema().getFunctionLevelDeclContext();
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC);
+ QualType T = MD->getThisType(getSema().Context);
- QualType T = getDerived().TransformType(E->getType());
- if (T.isNull())
- return ExprError();
-
- if (!getDerived().AlwaysRebuild() &&
- T == E->getType())
- return SemaRef.Owned(E->Retain());
+ if (!getDerived().AlwaysRebuild() && T == E->getType())
+ return SemaRef.Owned(E);
return getDerived().RebuildCXXThisExpr(E->getLocStart(), T, E->isImplicit());
}
@@ -5201,7 +6210,7 @@ TreeTransform<Derived>::TransformCXXThrowExpr(CXXThrowExpr *E) {
if (!getDerived().AlwaysRebuild() &&
SubExpr.get() == E->getSubExpr())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildCXXThrowExpr(E->getThrowLoc(), SubExpr.get());
}
@@ -5217,27 +6226,25 @@ TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
if (!getDerived().AlwaysRebuild() &&
Param == E->getParam())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildCXXDefaultArgExpr(E->getUsedLocation(), Param);
}
template<typename Derived>
ExprResult
-TreeTransform<Derived>::TransformCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
- TemporaryBase Rebase(*this, E->getTypeBeginLoc(), DeclarationName());
-
- QualType T = getDerived().TransformType(E->getType());
- if (T.isNull())
+TreeTransform<Derived>::TransformCXXScalarValueInitExpr(
+ CXXScalarValueInitExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!T)
return ExprError();
-
+
if (!getDerived().AlwaysRebuild() &&
- T == E->getType())
- return SemaRef.Owned(E->Retain());
+ T == E->getTypeSourceInfo())
+ return SemaRef.Owned(E);
- return getDerived().RebuildCXXScalarValueInitExpr(E->getTypeBeginLoc(),
- /*FIXME:*/E->getTypeBeginLoc(),
- T,
+ return getDerived().RebuildCXXScalarValueInitExpr(T,
+ /*FIXME:*/T->getTypeLoc().getEndLoc(),
E->getRParenLoc());
}
@@ -5245,9 +6252,9 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
// Transform the type that we're allocating
- TemporaryBase Rebase(*this, E->getLocStart(), DeclarationName());
- QualType AllocType = getDerived().TransformType(E->getAllocatedType());
- if (AllocType.isNull())
+ TypeSourceInfo *AllocTypeInfo
+ = getDerived().TransformType(E->getAllocatedTypeSourceInfo());
+ if (!AllocTypeInfo)
return ExprError();
// Transform the size of the array we're allocating (if any).
@@ -5258,28 +6265,16 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
// Transform the placement arguments (if any).
bool ArgumentChanged = false;
ASTOwningVector<Expr*> PlacementArgs(SemaRef);
- for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
- ExprResult Arg = getDerived().TransformExpr(E->getPlacementArg(I));
- if (Arg.isInvalid())
- return ExprError();
-
- ArgumentChanged = ArgumentChanged || Arg.get() != E->getPlacementArg(I);
- PlacementArgs.push_back(Arg.take());
- }
+ if (getDerived().TransformExprs(E->getPlacementArgs(),
+ E->getNumPlacementArgs(), true,
+ PlacementArgs, &ArgumentChanged))
+ return ExprError();
// transform the constructor arguments (if any).
ASTOwningVector<Expr*> ConstructorArgs(SemaRef);
- for (unsigned I = 0, N = E->getNumConstructorArgs(); I != N; ++I) {
- if (getDerived().DropCallArgument(E->getConstructorArg(I)))
- break;
-
- ExprResult Arg = getDerived().TransformExpr(E->getConstructorArg(I));
- if (Arg.isInvalid())
- return ExprError();
-
- ArgumentChanged = ArgumentChanged || Arg.get() != E->getConstructorArg(I);
- ConstructorArgs.push_back(Arg.take());
- }
+ if (TransformExprs(E->getConstructorArgs(), E->getNumConstructorArgs(), true,
+ ConstructorArgs, &ArgumentChanged))
+ return ExprError();
// Transform constructor, new operator, and delete operator.
CXXConstructorDecl *Constructor = 0;
@@ -5310,7 +6305,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
}
if (!getDerived().AlwaysRebuild() &&
- AllocType == E->getAllocatedType() &&
+ AllocTypeInfo == E->getAllocatedTypeSourceInfo() &&
ArraySize.get() == E->getArraySize() &&
Constructor == E->getConstructor() &&
OperatorNew == E->getOperatorNew() &&
@@ -5324,9 +6319,10 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
SemaRef.MarkDeclarationReferenced(E->getLocStart(), OperatorNew);
if (OperatorDelete)
SemaRef.MarkDeclarationReferenced(E->getLocStart(), OperatorDelete);
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
+ QualType AllocType = AllocTypeInfo->getType();
if (!ArraySize.get()) {
// If no array size was specified, but the new expression was
// instantiated with an array type (e.g., "new T" where T is
@@ -5347,11 +6343,12 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
} else if (const DependentSizedArrayType *DepArrayT
= dyn_cast<DependentSizedArrayType>(ArrayT)) {
if (DepArrayT->getSizeExpr()) {
- ArraySize = SemaRef.Owned(DepArrayT->getSizeExpr()->Retain());
+ ArraySize = SemaRef.Owned(DepArrayT->getSizeExpr());
AllocType = DepArrayT->getElementType();
}
}
}
+
return getDerived().RebuildCXXNewExpr(E->getLocStart(),
E->isGlobalNew(),
/*FIXME:*/E->getLocStart(),
@@ -5359,8 +6356,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
/*FIXME:*/E->getLocStart(),
E->getTypeIdParens(),
AllocType,
- /*FIXME:*/E->getLocStart(),
- /*FIXME:*/SourceRange(),
+ AllocTypeInfo,
ArraySize.get(),
/*FIXME:*/E->getLocStart(),
move_arg(ConstructorArgs),
@@ -5391,7 +6387,18 @@ TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
// FIXME: instantiation-specific.
if (OperatorDelete)
SemaRef.MarkDeclarationReferenced(E->getLocStart(), OperatorDelete);
- return SemaRef.Owned(E->Retain());
+
+ if (!E->getArgument()->isTypeDependent()) {
+ QualType Destroyed = SemaRef.Context.getBaseElementType(
+ E->getDestroyedType());
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ SemaRef.MarkDeclarationReferenced(E->getLocStart(),
+ SemaRef.LookupDestructor(Record));
+ }
+ }
+
+ return SemaRef.Owned(E);
}
return getDerived().RebuildCXXDeleteExpr(E->getLocStart(),
@@ -5419,17 +6426,21 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
return ExprError();
QualType ObjectType = ObjectTypePtr.get();
- NestedNameSpecifier *Qualifier
- = getDerived().TransformNestedNameSpecifier(E->getQualifier(),
- E->getQualifierRange(),
- ObjectType);
- if (E->getQualifier() && !Qualifier)
- return ExprError();
+ NestedNameSpecifier *Qualifier = E->getQualifier();
+ if (Qualifier) {
+ Qualifier
+ = getDerived().TransformNestedNameSpecifier(E->getQualifier(),
+ E->getQualifierRange(),
+ ObjectType);
+ if (!Qualifier)
+ return ExprError();
+ }
PseudoDestructorTypeStorage Destroyed;
if (E->getDestroyedTypeInfo()) {
TypeSourceInfo *DestroyedTypeInfo
- = getDerived().TransformType(E->getDestroyedTypeInfo(), ObjectType);
+ = getDerived().TransformTypeInObjectScope(E->getDestroyedTypeInfo(),
+ ObjectType, 0, Qualifier);
if (!DestroyedTypeInfo)
return ExprError();
Destroyed = DestroyedTypeInfo;
@@ -5462,8 +6473,7 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
TypeSourceInfo *ScopeTypeInfo = 0;
if (E->getScopeTypeInfo()) {
- ScopeTypeInfo = getDerived().TransformType(E->getScopeTypeInfo(),
- ObjectType);
+ ScopeTypeInfo = getDerived().TransformType(E->getScopeTypeInfo());
if (!ScopeTypeInfo)
return ExprError();
}
@@ -5550,12 +6560,10 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
// If we have template arguments, rebuild them, then rebuild the
// templateid expression.
TemplateArgumentListInfo TransArgs(Old->getLAngleLoc(), Old->getRAngleLoc());
- for (unsigned I = 0, N = Old->getNumTemplateArgs(); I != N; ++I) {
- TemplateArgumentLoc Loc;
- if (getDerived().TransformTemplateArgument(Old->getTemplateArgs()[I], Loc))
- return ExprError();
- TransArgs.addArgument(Loc);
- }
+ if (getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
+ Old->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
return getDerived().RebuildTemplateIdExpr(SS, R, Old->requiresADL(),
TransArgs);
@@ -5564,29 +6572,43 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
- TemporaryBase Rebase(*this, /*FIXME*/E->getLocStart(), DeclarationName());
-
- QualType T = getDerived().TransformType(E->getQueriedType());
- if (T.isNull())
+ TypeSourceInfo *T = getDerived().TransformType(E->getQueriedTypeSourceInfo());
+ if (!T)
return ExprError();
if (!getDerived().AlwaysRebuild() &&
- T == E->getQueriedType())
- return SemaRef.Owned(E->Retain());
-
- // FIXME: Bad location information
- SourceLocation FakeLParenLoc
- = SemaRef.PP.getLocForEndOfToken(E->getLocStart());
+ T == E->getQueriedTypeSourceInfo())
+ return SemaRef.Owned(E);
return getDerived().RebuildUnaryTypeTrait(E->getTrait(),
E->getLocStart(),
- /*FIXME:*/FakeLParenLoc,
T,
E->getLocEnd());
}
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
+ TypeSourceInfo *LhsT = getDerived().TransformType(E->getLhsTypeSourceInfo());
+ if (!LhsT)
+ return ExprError();
+
+ TypeSourceInfo *RhsT = getDerived().TransformType(E->getRhsTypeSourceInfo());
+ if (!RhsT)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ LhsT == E->getLhsTypeSourceInfo() && RhsT == E->getRhsTypeSourceInfo())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildBinaryTypeTrait(E->getTrait(),
+ E->getLocStart(),
+ LhsT, RhsT,
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
DependentScopeDeclRefExpr *E) {
NestedNameSpecifier *NNS
@@ -5595,6 +6617,10 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
if (!NNS)
return ExprError();
+ // TODO: If this is a conversion-function-id, verify that the
+ // destination type name (if present) resolves the same way after
+ // instantiation as it did in the local scope.
+
DeclarationNameInfo NameInfo
= getDerived().TransformDeclarationNameInfo(E->getNameInfo());
if (!NameInfo.getName())
@@ -5606,7 +6632,7 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
// Note: it is sufficient to compare the Name component of NameInfo:
// if name has not changed, DNLoc has not changed either.
NameInfo.getName() == E->getDeclName())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildDependentScopeDeclRefExpr(NNS,
E->getQualifierRange(),
@@ -5615,12 +6641,10 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
}
TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc());
- for (unsigned I = 0, N = E->getNumTemplateArgs(); I != N; ++I) {
- TemplateArgumentLoc Loc;
- if (getDerived().TransformTemplateArgument(E->getTemplateArgs()[I], Loc))
- return ExprError();
- TransArgs.addArgument(Loc);
- }
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
return getDerived().RebuildDependentScopeDeclRefExpr(NNS,
E->getQualifierRange(),
@@ -5652,22 +6676,10 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
bool ArgumentChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
- for (CXXConstructExpr::arg_iterator Arg = E->arg_begin(),
- ArgEnd = E->arg_end();
- Arg != ArgEnd; ++Arg) {
- if (getDerived().DropCallArgument(*Arg)) {
- ArgumentChanged = true;
- break;
- }
-
- ExprResult TransArg = getDerived().TransformExpr(*Arg);
- if (TransArg.isInvalid())
- return ExprError();
-
- ArgumentChanged = ArgumentChanged || TransArg.get() != *Arg;
- Args.push_back(TransArg.get());
- }
-
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+
if (!getDerived().AlwaysRebuild() &&
T == E->getType() &&
Constructor == E->getConstructor() &&
@@ -5675,14 +6687,15 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
// Mark the constructor as referenced.
// FIXME: Instantiation-specific
SemaRef.MarkDeclarationReferenced(E->getLocStart(), Constructor);
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
return getDerived().RebuildCXXConstructExpr(T, /*FIXME:*/E->getLocStart(),
Constructor, E->isElidable(),
move_arg(Args),
E->requiresZeroInitialization(),
- E->getConstructionKind());
+ E->getConstructionKind(),
+ E->getParenRange());
}
/// \brief Transform a C++ temporary-binding expression.
@@ -5695,25 +6708,23 @@ TreeTransform<Derived>::TransformCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
return getDerived().TransformExpr(E->getSubExpr());
}
-/// \brief Transform a C++ expression that contains temporaries that should
-/// be destroyed after the expression is evaluated.
+/// \brief Transform a C++ expression that contains cleanups that should
+/// be run after the expression is evaluated.
///
-/// Since CXXExprWithTemporaries nodes are implicitly generated, we
+/// Since ExprWithCleanups nodes are implicitly generated, we
/// just transform the subexpression and return that.
template<typename Derived>
ExprResult
-TreeTransform<Derived>::TransformCXXExprWithTemporaries(
- CXXExprWithTemporaries *E) {
+TreeTransform<Derived>::TransformExprWithCleanups(ExprWithCleanups *E) {
return getDerived().TransformExpr(E->getSubExpr());
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
- CXXTemporaryObjectExpr *E) {
- TemporaryBase Rebase(*this, E->getTypeBeginLoc(), DeclarationName());
- QualType T = getDerived().TransformType(E->getType());
- if (T.isNull())
+ CXXTemporaryObjectExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!T)
return ExprError();
CXXConstructorDecl *Constructor
@@ -5726,43 +6737,22 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
bool ArgumentChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
Args.reserve(E->getNumArgs());
- for (CXXTemporaryObjectExpr::arg_iterator Arg = E->arg_begin(),
- ArgEnd = E->arg_end();
- Arg != ArgEnd; ++Arg) {
- if (getDerived().DropCallArgument(*Arg)) {
- ArgumentChanged = true;
- break;
- }
-
- ExprResult TransArg = getDerived().TransformExpr(*Arg);
- if (TransArg.isInvalid())
- return ExprError();
-
- ArgumentChanged = ArgumentChanged || TransArg.get() != *Arg;
- Args.push_back((Expr *)TransArg.release());
- }
+ if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
if (!getDerived().AlwaysRebuild() &&
- T == E->getType() &&
+ T == E->getTypeSourceInfo() &&
Constructor == E->getConstructor() &&
!ArgumentChanged) {
// FIXME: Instantiation-specific
- SemaRef.MarkDeclarationReferenced(E->getTypeBeginLoc(), Constructor);
- return SemaRef.MaybeBindToTemporary(E->Retain());
- }
-
- // FIXME: Bogus location information
- SourceLocation CommaLoc;
- if (Args.size() > 1) {
- Expr *First = (Expr *)Args[0];
- CommaLoc
- = SemaRef.PP.getLocForEndOfToken(First->getSourceRange().getEnd());
+ SemaRef.MarkDeclarationReferenced(E->getLocStart(), Constructor);
+ return SemaRef.MaybeBindToTemporary(E);
}
- return getDerived().RebuildCXXTemporaryObjectExpr(E->getTypeBeginLoc(),
- T,
- /*FIXME:*/E->getTypeBeginLoc(),
+
+ return getDerived().RebuildCXXTemporaryObjectExpr(T,
+ /*FIXME:*/T->getTypeLoc().getEndLoc(),
move_arg(Args),
- &CommaLoc,
E->getLocEnd());
}
@@ -5770,38 +6760,26 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
CXXUnresolvedConstructExpr *E) {
- TemporaryBase Rebase(*this, E->getTypeBeginLoc(), DeclarationName());
- QualType T = getDerived().TransformType(E->getTypeAsWritten());
- if (T.isNull())
+ TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!T)
return ExprError();
bool ArgumentChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
- llvm::SmallVector<SourceLocation, 8> FakeCommaLocs;
- for (CXXUnresolvedConstructExpr::arg_iterator Arg = E->arg_begin(),
- ArgEnd = E->arg_end();
- Arg != ArgEnd; ++Arg) {
- ExprResult TransArg = getDerived().TransformExpr(*Arg);
- if (TransArg.isInvalid())
- return ExprError();
-
- ArgumentChanged = ArgumentChanged || TransArg.get() != *Arg;
- FakeCommaLocs.push_back(
- SemaRef.PP.getLocForEndOfToken((*Arg)->getLocEnd()));
- Args.push_back(TransArg.get());
- }
-
+ Args.reserve(E->arg_size());
+ if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+
if (!getDerived().AlwaysRebuild() &&
- T == E->getTypeAsWritten() &&
+ T == E->getTypeSourceInfo() &&
!ArgumentChanged)
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
// FIXME: we're faking the locations of the commas
- return getDerived().RebuildCXXUnresolvedConstructExpr(E->getTypeBeginLoc(),
- T,
+ return getDerived().RebuildCXXUnresolvedConstructExpr(T,
E->getLParenLoc(),
move_arg(Args),
- FakeCommaLocs.data(),
E->getRParenLoc());
}
@@ -5856,9 +6834,12 @@ TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr(
return ExprError();
}
+ // TODO: If this is a conversion-function-id, verify that the
+ // destination type name (if present) resolves the same way after
+ // instantiation as it did in the local scope.
+
DeclarationNameInfo NameInfo
- = getDerived().TransformDeclarationNameInfo(E->getMemberNameInfo(),
- ObjectType);
+ = getDerived().TransformDeclarationNameInfo(E->getMemberNameInfo());
if (!NameInfo.getName())
return ExprError();
@@ -5871,7 +6852,7 @@ TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr(
Qualifier == E->getQualifier() &&
NameInfo.getName() == E->getMember() &&
FirstQualifierInScope == E->getFirstQualifierFoundInScope())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildCXXDependentScopeMemberExpr(Base.get(),
BaseType,
@@ -5885,12 +6866,10 @@ TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr(
}
TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc());
- for (unsigned I = 0, N = E->getNumTemplateArgs(); I != N; ++I) {
- TemplateArgumentLoc Loc;
- if (getDerived().TransformTemplateArgument(E->getTemplateArgs()[I], Loc))
- return ExprError();
- TransArgs.addArgument(Loc);
- }
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
return getDerived().RebuildCXXDependentScopeMemberExpr(Base.get(),
BaseType,
@@ -5975,13 +6954,10 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
if (Old->hasExplicitTemplateArgs()) {
TransArgs.setLAngleLoc(Old->getLAngleLoc());
TransArgs.setRAngleLoc(Old->getRAngleLoc());
- for (unsigned I = 0, N = Old->getNumTemplateArgs(); I != N; ++I) {
- TemplateArgumentLoc Loc;
- if (getDerived().TransformTemplateArgument(Old->getTemplateArgs()[I],
- Loc))
- return ExprError();
- TransArgs.addArgument(Loc);
- }
+ if (getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
+ Old->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
}
// FIXME: to do this check properly, we will need to preserve the
@@ -6004,8 +6980,74 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ ExprResult SubExpr = getDerived().TransformExpr(E->getOperand());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getOperand())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXNoexceptExpr(E->getSourceRange(),SubExpr.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformPackExpansionExpr(PackExpansionExpr *E) {
+ ExprResult Pattern = getDerived().TransformExpr(E->getPattern());
+ if (Pattern.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && Pattern.get() == E->getPattern())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildPackExpansion(Pattern.get(), E->getEllipsisLoc(),
+ E->getNumExpansions());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
+ // If E is not value-dependent, then nothing will change when we transform it.
+ // Note: This is an instantiation-centric view.
+ if (!E->isValueDependent())
+ return SemaRef.Owned(E);
+
+ // Note: None of the implementations of TryExpandParameterPacks can ever
+ // produce a diagnostic when given only a single unexpanded parameter pack,
+ // so
+ UnexpandedParameterPack Unexpanded(E->getPack(), E->getPackLoc());
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(),
+ &Unexpanded, 1,
+ ShouldExpand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (!ShouldExpand || RetainExpansion)
+ return SemaRef.Owned(E);
+
+ // We now know the length of the parameter pack, so build a new expression
+ // that stores that length.
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), E->getPack(),
+ E->getPackLoc(), E->getRParenLoc(),
+ *NumExpansions);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ // Default behavior is to do nothing with this transformation.
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformObjCStringLiteral(ObjCStringLiteral *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
@@ -6018,7 +7060,7 @@ TreeTransform<Derived>::TransformObjCEncodeExpr(ObjCEncodeExpr *E) {
if (!getDerived().AlwaysRebuild() &&
EncodedTypeInfo == E->getEncodedTypeSourceInfo())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildObjCEncodeExpr(E->getAtLoc(),
EncodedTypeInfo,
@@ -6031,15 +7073,11 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
// Transform arguments.
bool ArgChanged = false;
ASTOwningVector<Expr*> Args(SemaRef);
- for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
- ExprResult Arg = getDerived().TransformExpr(E->getArg(I));
- if (Arg.isInvalid())
- return ExprError();
-
- ArgChanged = ArgChanged || Arg.get() != E->getArg(I);
- Args.push_back(Arg.get());
- }
-
+ Args.reserve(E->getNumArgs());
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), false, Args,
+ &ArgChanged))
+ return ExprError();
+
if (E->getReceiverKind() == ObjCMessageExpr::Class) {
// Class message: transform the receiver type.
TypeSourceInfo *ReceiverTypeInfo
@@ -6050,11 +7088,12 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
// If nothing changed, just retain the existing message send.
if (!getDerived().AlwaysRebuild() &&
ReceiverTypeInfo == E->getClassReceiverTypeInfo() && !ArgChanged)
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
// Build a new class message send.
return getDerived().RebuildObjCMessageExpr(ReceiverTypeInfo,
E->getSelector(),
+ E->getSelectorLoc(),
E->getMethodDecl(),
E->getLeftLoc(),
move_arg(Args),
@@ -6072,11 +7111,12 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
// If nothing changed, just retain the existing message send.
if (!getDerived().AlwaysRebuild() &&
Receiver.get() == E->getInstanceReceiver() && !ArgChanged)
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
// Build a new instance message send.
return getDerived().RebuildObjCMessageExpr(Receiver.get(),
E->getSelector(),
+ E->getSelectorLoc(),
E->getMethodDecl(),
E->getLeftLoc(),
move_arg(Args),
@@ -6086,13 +7126,13 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformObjCSelectorExpr(ObjCSelectorExpr *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformObjCProtocolExpr(ObjCProtocolExpr *E) {
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
template<typename Derived>
@@ -6108,7 +7148,7 @@ TreeTransform<Derived>::TransformObjCIvarRefExpr(ObjCIvarRefExpr *E) {
// If nothing changed, just retain the existing expression.
if (!getDerived().AlwaysRebuild() &&
Base.get() == E->getBase())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildObjCIvarRefExpr(Base.get(), E->getDecl(),
E->getLocation(),
@@ -6118,6 +7158,11 @@ TreeTransform<Derived>::TransformObjCIvarRefExpr(ObjCIvarRefExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ // 'super' and types never change. Property never changes. Just
+ // retain the existing expression.
+ if (!E->isObjectReceiver())
+ return SemaRef.Owned(E);
+
// Transform the base expression.
ExprResult Base = getDerived().TransformExpr(E->getBase());
if (Base.isInvalid())
@@ -6128,47 +7173,18 @@ TreeTransform<Derived>::TransformObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
// If nothing changed, just retain the existing expression.
if (!getDerived().AlwaysRebuild() &&
Base.get() == E->getBase())
- return SemaRef.Owned(E->Retain());
-
- return getDerived().RebuildObjCPropertyRefExpr(Base.get(), E->getProperty(),
- E->getLocation());
-}
+ return SemaRef.Owned(E);
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::TransformObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E) {
- // If this implicit setter/getter refers to class methods, it cannot have any
- // dependent parts. Just retain the existing declaration.
- if (E->getInterfaceDecl())
- return SemaRef.Owned(E->Retain());
-
- // Transform the base expression.
- ExprResult Base = getDerived().TransformExpr(E->getBase());
- if (Base.isInvalid())
- return ExprError();
-
- // We don't need to transform the getters/setters; they will never change.
-
- // If nothing changed, just retain the existing expression.
- if (!getDerived().AlwaysRebuild() &&
- Base.get() == E->getBase())
- return SemaRef.Owned(E->Retain());
-
- return getDerived().RebuildObjCImplicitSetterGetterRefExpr(
- E->getGetterMethod(),
- E->getType(),
- E->getSetterMethod(),
- E->getLocation(),
- Base.get());
-
-}
+ if (E->isExplicitProperty())
+ return getDerived().RebuildObjCPropertyRefExpr(Base.get(),
+ E->getExplicitProperty(),
+ E->getLocation());
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::TransformObjCSuperExpr(ObjCSuperExpr *E) {
- // Can never occur in a dependent context.
- return SemaRef.Owned(E->Retain());
+ return getDerived().RebuildObjCPropertyRefExpr(Base.get(),
+ E->getType(),
+ E->getImplicitPropertyGetter(),
+ E->getImplicitPropertySetter(),
+ E->getLocation());
}
template<typename Derived>
@@ -6182,7 +7198,7 @@ TreeTransform<Derived>::TransformObjCIsaExpr(ObjCIsaExpr *E) {
// If nothing changed, just retain the existing expression.
if (!getDerived().AlwaysRebuild() &&
Base.get() == E->getBase())
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildObjCIsaExpr(Base.get(), E->getIsaMemberLoc(),
E->isArrow());
@@ -6193,18 +7209,14 @@ ExprResult
TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) {
bool ArgumentChanged = false;
ASTOwningVector<Expr*> SubExprs(SemaRef);
- for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I) {
- ExprResult SubExpr = getDerived().TransformExpr(E->getExpr(I));
- if (SubExpr.isInvalid())
- return ExprError();
-
- ArgumentChanged = ArgumentChanged || SubExpr.get() != E->getExpr(I);
- SubExprs.push_back(SubExpr.get());
- }
+ SubExprs.reserve(E->getNumSubExprs());
+ if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false,
+ SubExprs, &ArgumentChanged))
+ return ExprError();
if (!getDerived().AlwaysRebuild() &&
!ArgumentChanged)
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
return getDerived().RebuildShuffleVectorExpr(E->getBuiltinLoc(),
move_arg(SubExprs),
@@ -6214,52 +7226,92 @@ TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
- SourceLocation CaretLoc(E->getExprLoc());
+ BlockDecl *oldBlock = E->getBlockDecl();
- SemaRef.ActOnBlockStart(CaretLoc, /*Scope=*/0);
- BlockScopeInfo *CurBlock = SemaRef.getCurBlock();
- CurBlock->TheDecl->setIsVariadic(E->getBlockDecl()->isVariadic());
- llvm::SmallVector<ParmVarDecl*, 4> Params;
- llvm::SmallVector<QualType, 4> ParamTypes;
+ SemaRef.ActOnBlockStart(E->getCaretLocation(), /*Scope=*/0);
+ BlockScopeInfo *blockScope = SemaRef.getCurBlock();
+
+ blockScope->TheDecl->setIsVariadic(oldBlock->isVariadic());
+ llvm::SmallVector<ParmVarDecl*, 4> params;
+ llvm::SmallVector<QualType, 4> paramTypes;
// Parameter substitution.
- const BlockDecl *BD = E->getBlockDecl();
- for (BlockDecl::param_const_iterator P = BD->param_begin(),
- EN = BD->param_end(); P != EN; ++P) {
- ParmVarDecl *OldParm = (*P);
- ParmVarDecl *NewParm = getDerived().TransformFunctionTypeParam(OldParm);
- QualType NewType = NewParm->getType();
- Params.push_back(NewParm);
- ParamTypes.push_back(NewParm->getType());
+ if (getDerived().TransformFunctionTypeParams(E->getCaretLocation(),
+ oldBlock->param_begin(),
+ oldBlock->param_size(),
+ 0, paramTypes, &params))
+ return true;
+
+ const FunctionType *exprFunctionType = E->getFunctionType();
+ QualType exprResultType = exprFunctionType->getResultType();
+ if (!exprResultType.isNull()) {
+ if (!exprResultType->isDependentType())
+ blockScope->ReturnType = exprResultType;
+ else if (exprResultType != getSema().Context.DependentTy)
+ blockScope->ReturnType = getDerived().TransformType(exprResultType);
}
- const FunctionType *BExprFunctionType = E->getFunctionType();
- QualType BExprResultType = BExprFunctionType->getResultType();
- if (!BExprResultType.isNull()) {
- if (!BExprResultType->isDependentType())
- CurBlock->ReturnType = BExprResultType;
- else if (BExprResultType != SemaRef.Context.DependentTy)
- CurBlock->ReturnType = getDerived().TransformType(BExprResultType);
- }
-
- // Transform the body
- StmtResult Body = getDerived().TransformStmt(E->getBody());
- if (Body.isInvalid())
+ // If the return type has not been determined yet, leave it as a dependent
+ // type; it'll get set when we process the body.
+ if (blockScope->ReturnType.isNull())
+ blockScope->ReturnType = getSema().Context.DependentTy;
+
+ // Don't allow returning a objc interface by value.
+ if (blockScope->ReturnType->isObjCObjectType()) {
+ getSema().Diag(E->getCaretLocation(),
+ diag::err_object_cannot_be_passed_returned_by_value)
+ << 0 << blockScope->ReturnType;
return ExprError();
+ }
+
+ QualType functionType = getDerived().RebuildFunctionProtoType(
+ blockScope->ReturnType,
+ paramTypes.data(),
+ paramTypes.size(),
+ oldBlock->isVariadic(),
+ 0, RQ_None,
+ exprFunctionType->getExtInfo());
+ blockScope->FunctionType = functionType;
+
// Set the parameters on the block decl.
- if (!Params.empty())
- CurBlock->TheDecl->setParams(Params.data(), Params.size());
-
- QualType FunctionType = getDerived().RebuildFunctionProtoType(
- CurBlock->ReturnType,
- ParamTypes.data(),
- ParamTypes.size(),
- BD->isVariadic(),
- 0,
- BExprFunctionType->getExtInfo());
+ if (!params.empty())
+ blockScope->TheDecl->setParams(params.data(), params.size());
+
+ // If the return type wasn't explicitly set, it will have been marked as a
+ // dependent type (DependentTy); clear out the return type setting so
+ // we will deduce the return type when type-checking the block's body.
+ if (blockScope->ReturnType == getSema().Context.DependentTy)
+ blockScope->ReturnType = QualType();
- CurBlock->FunctionType = FunctionType;
- return SemaRef.ActOnBlockStmtExpr(CaretLoc, Body.get(), /*Scope=*/0);
+ // Transform the body
+ StmtResult body = getDerived().TransformStmt(E->getBody());
+ if (body.isInvalid())
+ return ExprError();
+
+#ifndef NDEBUG
+ // In builds with assertions, make sure that we captured everything we
+ // captured before.
+
+ if (oldBlock->capturesCXXThis()) assert(blockScope->CapturesCXXThis);
+
+ for (BlockDecl::capture_iterator i = oldBlock->capture_begin(),
+ e = oldBlock->capture_end(); i != e; ++i) {
+ VarDecl *oldCapture = i->getVariable();
+
+ // Ignore parameter packs.
+ if (isa<ParmVarDecl>(oldCapture) &&
+ cast<ParmVarDecl>(oldCapture)->isParameterPack())
+ continue;
+
+ VarDecl *newCapture =
+ cast<VarDecl>(getDerived().TransformDecl(E->getCaretLocation(),
+ oldCapture));
+ assert(blockScope->CaptureMap.count(newCapture));
+ }
+#endif
+
+ return SemaRef.ActOnBlockStmtExpr(E->getCaretLocation(), body.get(),
+ /*Scope=*/0);
}
template<typename Derived>
@@ -6279,7 +7331,7 @@ TreeTransform<Derived>::TransformBlockDeclRefExpr(BlockDeclRefExpr *E) {
// FIXME: this is a bit instantiation-specific.
SemaRef.MarkDeclarationReferenced(E->getLocation(), ND);
- return SemaRef.Owned(E->Retain());
+ return SemaRef.Owned(E);
}
DeclarationNameInfo NameInfo(E->getDecl()->getDeclName(), E->getLocation());
@@ -6403,10 +7455,10 @@ TreeTransform<Derived>::RebuildDependentSizedArrayType(QualType ElementType,
template<typename Derived>
QualType TreeTransform<Derived>::RebuildVectorType(QualType ElementType,
- unsigned NumElements,
- VectorType::AltiVecSpecific AltiVecSpec) {
+ unsigned NumElements,
+ VectorType::VectorKind VecKind) {
// FIXME: semantic checking!
- return SemaRef.Context.getVectorType(ElementType, NumElements, AltiVecSpec);
+ return SemaRef.Context.getVectorType(ElementType, NumElements, VecKind);
}
template<typename Derived>
@@ -6435,9 +7487,10 @@ QualType TreeTransform<Derived>::RebuildFunctionProtoType(QualType T,
unsigned NumParamTypes,
bool Variadic,
unsigned Quals,
+ RefQualifierKind RefQualifier,
const FunctionType::ExtInfo &Info) {
return SemaRef.BuildFunctionType(T, ParamTypes, NumParamTypes, Variadic,
- Quals,
+ Quals, RefQualifier,
getDerived().getBaseLocation(),
getDerived().getBaseEntity(),
Info);
@@ -6474,8 +7527,9 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(Decl *D) {
}
template<typename Derived>
-QualType TreeTransform<Derived>::RebuildTypeOfExprType(Expr *E) {
- return SemaRef.BuildTypeofExprType(E);
+QualType TreeTransform<Derived>::RebuildTypeOfExprType(Expr *E,
+ SourceLocation Loc) {
+ return SemaRef.BuildTypeofExprType(E, Loc);
}
template<typename Derived>
@@ -6484,8 +7538,9 @@ QualType TreeTransform<Derived>::RebuildTypeOfType(QualType Underlying) {
}
template<typename Derived>
-QualType TreeTransform<Derived>::RebuildDecltypeType(Expr *E) {
- return SemaRef.BuildDecltypeType(E);
+QualType TreeTransform<Derived>::RebuildDecltypeType(Expr *E,
+ SourceLocation Loc) {
+ return SemaRef.BuildDecltypeType(E, Loc);
}
template<typename Derived>
@@ -6552,10 +7607,12 @@ TreeTransform<Derived>::RebuildTemplateName(NestedNameSpecifier *Qualifier,
template<typename Derived>
TemplateName
TreeTransform<Derived>::RebuildTemplateName(NestedNameSpecifier *Qualifier,
+ SourceRange QualifierRange,
const IdentifierInfo &II,
- QualType ObjectType) {
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
CXXScopeSpec SS;
- SS.setRange(SourceRange(getDerived().getBaseLocation()));
+ SS.setRange(QualifierRange);
SS.setScopeRep(Qualifier);
UnqualifiedId Name;
Name.setIdentifier(&II, /*FIXME:*/getDerived().getBaseLocation());
@@ -6567,7 +7624,7 @@ TreeTransform<Derived>::RebuildTemplateName(NestedNameSpecifier *Qualifier,
ParsedType::make(ObjectType),
/*EnteringContext=*/false,
Template);
- return Template.template getAsVal<TemplateName>();
+ return Template.get();
}
template<typename Derived>
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLocBuilder.h b/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h
index 880af26..3d20a52 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeLocBuilder.h
+++ b/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_TYPELOCBUILDER_H
-#define LLVM_CLANG_AST_TYPELOCBUILDER_H
+#ifndef LLVM_CLANG_SEMA_TYPELOCBUILDER_H
+#define LLVM_CLANG_SEMA_TYPELOCBUILDER_H
#include "clang/AST/TypeLoc.h"
#include "llvm/ADT/SmallVector.h"
@@ -62,17 +62,17 @@ class TypeLocBuilder {
/// Pushes a copy of the given TypeLoc onto this builder. The builder
/// must be empty for this to work.
void pushFullCopy(TypeLoc L) {
-#ifndef NDEBUG
- assert(LastTy.isNull() && "pushing copy on non-empty TypeLocBuilder");
- LastTy = L.getNextTypeLoc().getType();
-#endif
- assert(Index == Capacity && "pushing copy on non-empty TypeLocBuilder");
-
- unsigned Size = L.getFullDataSize();
- TypeLoc Copy = pushImpl(L.getType(), Size);
+ size_t Size = L.getFullDataSize();
+ TypeLoc Copy = pushFullUninitializedImpl(L.getType(), Size);
memcpy(Copy.getOpaqueData(), L.getOpaqueData(), Size);
}
+ /// Pushes uninitialized space for the given type. The builder must
+ /// be empty.
+ TypeLoc pushFullUninitialized(QualType T) {
+ return pushFullUninitializedImpl(T, TypeLoc::getFullDataSizeForType(T));
+ }
+
/// Pushes space for a typespec TypeLoc. Invalidates any TypeLocs
/// previously retrieved from this builder.
TypeSpecTypeLoc pushTypeSpec(QualType T) {
@@ -127,7 +127,7 @@ private:
Index -= LocalSize;
- return TypeLoc(T, &Buffer[Index]);
+ return getTypeLoc(T);
}
/// Grow to the given capacity.
@@ -148,6 +148,31 @@ private:
Capacity = NewCapacity;
Index = NewIndex;
}
+
+ TypeLoc pushFullUninitializedImpl(QualType T, size_t Size) {
+#ifndef NDEBUG
+ assert(LastTy.isNull() && "pushing full on non-empty TypeLocBuilder");
+ LastTy = T;
+#endif
+ assert(Index == Capacity && "pushing full on non-empty TypeLocBuilder");
+
+ reserve(Size);
+ Index -= Size;
+
+ return getTypeLoc(T);
+ }
+
+
+ // This is private because, when we kill off TypeSourceInfo in favor
+ // of TypeLoc, we'll want an interface that creates a TypeLoc given
+ // an ASTContext, and we don't want people to think they can just
+ // use this as an equivalent.
+ TypeLoc getTypeLoc(QualType T) {
+#ifndef NDEBUG
+ assert(LastTy == T && "type doesn't match last type pushed!");
+#endif
+ return TypeLoc(T, &Buffer[Index]);
+ }
};
}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
index 77c1aff..5e94f59 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
@@ -12,11 +12,15 @@
//===----------------------------------------------------------------------===//
#include "ASTCommon.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
+// Give ASTDeserializationListener's VTable a home.
+ASTDeserializationListener::~ASTDeserializationListener() { }
+
serialization::TypeIdx
serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
unsigned ID = 0;
@@ -32,7 +36,8 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::UInt128: ID = PREDEF_TYPE_UINT128_ID; break;
case BuiltinType::Char_S: ID = PREDEF_TYPE_CHAR_S_ID; break;
case BuiltinType::SChar: ID = PREDEF_TYPE_SCHAR_ID; break;
- case BuiltinType::WChar: ID = PREDEF_TYPE_WCHAR_ID; break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: ID = PREDEF_TYPE_WCHAR_ID; break;
case BuiltinType::Short: ID = PREDEF_TYPE_SHORT_ID; break;
case BuiltinType::Int: ID = PREDEF_TYPE_INT_ID; break;
case BuiltinType::Long: ID = PREDEF_TYPE_LONG_ID; break;
@@ -49,9 +54,6 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::ObjCId: ID = PREDEF_TYPE_OBJC_ID; break;
case BuiltinType::ObjCClass: ID = PREDEF_TYPE_OBJC_CLASS; break;
case BuiltinType::ObjCSel: ID = PREDEF_TYPE_OBJC_SEL; break;
- case BuiltinType::UndeducedAuto:
- assert(0 && "Should not see undeduced auto here");
- break;
}
return TypeIdx(ID);
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
index a0e2ecd..d416699 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
@@ -20,6 +20,12 @@ namespace clang {
namespace serialization {
+enum DeclUpdateKind {
+ UPD_CXX_SET_DEFINITIONDATA,
+ UPD_CXX_ADDED_IMPLICIT_MEMBER,
+ UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION
+};
+
TypeIdx TypeIdxFromBuiltin(const BuiltinType *BT);
template <typename IdxForTypeTy>
@@ -28,7 +34,7 @@ TypeID MakeTypeID(QualType T, IdxForTypeTy IdxForType) {
return PREDEF_TYPE_NULL_ID;
unsigned FastQuals = T.getLocalFastQualifiers();
- T.removeFastQualifiers();
+ T.removeLocalFastQualifiers();
if (T.hasLocalNonFastQualifiers())
return IdxForType(T).asTypeID(FastQuals);
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
index f07215c..ce87b11 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
@@ -33,17 +33,21 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceManagerInternals.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitstreamReader.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
#include <algorithm>
#include <iterator>
#include <cstdio>
#include <sys/stat.h>
+
using namespace clang;
using namespace clang::serialization;
@@ -74,6 +78,7 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
PARSE_LANGOPT_BENIGN(HexFloats);
PARSE_LANGOPT_IMPORTANT(C99, diag::warn_pch_c99);
PARSE_LANGOPT_IMPORTANT(Microsoft, diag::warn_pch_microsoft_extensions);
+ PARSE_LANGOPT_BENIGN(MSCVersion);
PARSE_LANGOPT_IMPORTANT(CPlusPlus, diag::warn_pch_cplusplus);
PARSE_LANGOPT_IMPORTANT(CPlusPlus0x, diag::warn_pch_cplusplus0x);
PARSE_LANGOPT_BENIGN(CXXOperatorName);
@@ -81,7 +86,10 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
PARSE_LANGOPT_IMPORTANT(ObjC2, diag::warn_pch_objective_c2);
PARSE_LANGOPT_IMPORTANT(ObjCNonFragileABI, diag::warn_pch_nonfragile_abi);
PARSE_LANGOPT_IMPORTANT(ObjCNonFragileABI2, diag::warn_pch_nonfragile_abi2);
- PARSE_LANGOPT_IMPORTANT(NoConstantCFStrings,
+ PARSE_LANGOPT_IMPORTANT(AppleKext, diag::warn_pch_apple_kext);
+ PARSE_LANGOPT_IMPORTANT(ObjCDefaultSynthProperties,
+ diag::warn_pch_objc_auto_properties);
+ PARSE_LANGOPT_IMPORTANT(NoConstantCFStrings,
diag::warn_pch_no_constant_cfstrings);
PARSE_LANGOPT_BENIGN(PascalStrings);
PARSE_LANGOPT_BENIGN(WritableStrings);
@@ -90,6 +98,8 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
PARSE_LANGOPT_IMPORTANT(AltiVec, diag::warn_pch_altivec);
PARSE_LANGOPT_IMPORTANT(Exceptions, diag::warn_pch_exceptions);
PARSE_LANGOPT_IMPORTANT(SjLjExceptions, diag::warn_pch_sjlj_exceptions);
+ PARSE_LANGOPT_IMPORTANT(ObjCExceptions, diag::warn_pch_objc_exceptions);
+ PARSE_LANGOPT_IMPORTANT(MSBitfields, diag::warn_pch_ms_bitfields);
PARSE_LANGOPT_IMPORTANT(NeXTRuntime, diag::warn_pch_objc_runtime);
PARSE_LANGOPT_IMPORTANT(Freestanding, diag::warn_pch_freestanding);
PARSE_LANGOPT_IMPORTANT(NoBuiltin, diag::warn_pch_builtins);
@@ -118,6 +128,7 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
PARSE_LANGOPT_IMPORTANT(AccessControl, diag::warn_pch_access_control);
PARSE_LANGOPT_IMPORTANT(CharIsSigned, diag::warn_pch_char_signed);
PARSE_LANGOPT_IMPORTANT(ShortWChar, diag::warn_pch_short_wchar);
+ PARSE_LANGOPT_IMPORTANT(ShortEnums, diag::warn_pch_short_enums);
if ((PPLangOpts.getGCMode() != 0) != (LangOpts.getGCMode() != 0)) {
Reader.Diag(diag::warn_pch_gc_mode)
<< LangOpts.getGCMode() << PPLangOpts.getGCMode();
@@ -128,9 +139,11 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
diag::warn_pch_stack_protector);
PARSE_LANGOPT_BENIGN(InstantiationDepth);
PARSE_LANGOPT_IMPORTANT(OpenCL, diag::warn_pch_opencl);
+ PARSE_LANGOPT_IMPORTANT(CUDA, diag::warn_pch_cuda);
PARSE_LANGOPT_BENIGN(CatchUndefined);
PARSE_LANGOPT_IMPORTANT(ElideConstructors, diag::warn_pch_elide_constructors);
PARSE_LANGOPT_BENIGN(SpellChecking);
+ PARSE_LANGOPT_BENIGN(DefaultFPContract);
#undef PARSE_LANGOPT_IMPORTANT
#undef PARSE_LANGOPT_BENIGN
@@ -146,12 +159,14 @@ bool PCHValidator::ReadTargetTriple(llvm::StringRef Triple) {
return true;
}
-struct EmptyStringRef {
- bool operator ()(llvm::StringRef r) const { return r.empty(); }
-};
-struct EmptyBlock {
- bool operator ()(const PCHPredefinesBlock &r) const { return r.Data.empty(); }
-};
+namespace {
+ struct EmptyStringRef {
+ bool operator ()(llvm::StringRef r) const { return r.empty(); }
+ };
+ struct EmptyBlock {
+ bool operator ()(const PCHPredefinesBlock &r) const {return r.Data.empty();}
+ };
+}
static bool EqualConcatenations(llvm::SmallVector<llvm::StringRef, 2> L,
PCHPredefinesBlocks R) {
@@ -263,7 +278,36 @@ bool PCHValidator::ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
llvm::SmallVector<llvm::StringRef, 8> CmdLineLines;
Left.split(CmdLineLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
- Right.split(CmdLineLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+
+ // Pick out implicit #includes after the PCH and don't consider them for
+ // validation; we will insert them into SuggestedPredefines so that the
+ // preprocessor includes them.
+ std::string IncludesAfterPCH;
+ llvm::SmallVector<llvm::StringRef, 8> AfterPCHLines;
+ Right.split(AfterPCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ for (unsigned i = 0, e = AfterPCHLines.size(); i != e; ++i) {
+ if (AfterPCHLines[i].startswith("#include ")) {
+ IncludesAfterPCH += AfterPCHLines[i];
+ IncludesAfterPCH += '\n';
+ } else {
+ CmdLineLines.push_back(AfterPCHLines[i]);
+ }
+ }
+
+ // Make sure we add the includes last into SuggestedPredefines before we
+ // exit this function.
+ struct AddIncludesRAII {
+ std::string &SuggestedPredefines;
+ std::string &IncludesAfterPCH;
+
+ AddIncludesRAII(std::string &SuggestedPredefines,
+ std::string &IncludesAfterPCH)
+ : SuggestedPredefines(SuggestedPredefines),
+ IncludesAfterPCH(IncludesAfterPCH) { }
+ ~AddIncludesRAII() {
+ SuggestedPredefines += IncludesAfterPCH;
+ }
+ } AddIncludes(SuggestedPredefines, IncludesAfterPCH);
// Sort both sets of predefined buffer lines, since we allow some extra
// definitions and they may appear at any point in the output.
@@ -281,6 +325,11 @@ bool PCHValidator::ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
bool ConflictingDefines = false;
for (unsigned I = 0, N = MissingPredefines.size(); I != N; ++I) {
llvm::StringRef Missing = MissingPredefines[I];
+ if (Missing.startswith("#include ")) {
+ // An -include was specified when generating the PCH; it is included in
+ // the PCH, just ignore it.
+ continue;
+ }
if (!Missing.startswith("#define ")) {
Reader.Diag(diag::warn_pch_compiler_options_mismatch);
return true;
@@ -418,8 +467,6 @@ void PCHValidator::ReadCounter(unsigned Value) {
void
ASTReader::setDeserializationListener(ASTDeserializationListener *Listener) {
DeserializationListener = Listener;
- if (DeserializationListener)
- DeserializationListener->SetReader(this);
}
@@ -533,10 +580,10 @@ public:
typedef OnDiskChainedHashTable<ASTSelectorLookupTrait>
ASTSelectorLookupTable;
-namespace {
+namespace clang {
class ASTIdentifierLookupTrait {
ASTReader &Reader;
- llvm::BitstreamCursor &Stream;
+ ASTReader::PerFileData &F;
// If we know the IdentifierInfo in advance, it is here and we will
// not build a new one. Used when deserializing information about an
@@ -550,9 +597,9 @@ public:
typedef external_key_type internal_key_type;
- ASTIdentifierLookupTrait(ASTReader &Reader, llvm::BitstreamCursor &Stream,
+ ASTIdentifierLookupTrait(ASTReader &Reader, ASTReader::PerFileData &F,
IdentifierInfo *II = 0)
- : Reader(Reader), Stream(Stream), KnownII(II) { }
+ : Reader(Reader), F(F), KnownII(II) { }
static bool EqualKey(const internal_key_type& a,
const internal_key_type& b) {
@@ -568,6 +615,10 @@ public:
static const internal_key_type&
GetInternalKey(const external_key_type& x) { return x; }
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const external_key_type&
+ GetExternalKey(const internal_key_type& x) { return x; }
+
static std::pair<unsigned, unsigned>
ReadKeyDataLength(const unsigned char*& d) {
using namespace clang::io;
@@ -644,7 +695,7 @@ public:
// definition.
if (hasMacroDefinition) {
uint32_t Offset = ReadUnalignedLE32(d);
- Reader.ReadMacroRecord(Stream, Offset);
+ Reader.SetIdentifierIsMacro(II, F, Offset);
DataLen -= 4;
}
@@ -752,7 +803,7 @@ public:
case DeclarationName::CXXUsingDirective:
break;
}
-
+
return Key;
}
@@ -814,7 +865,7 @@ public:
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- Key.Data =
+ Key.Data =
(uint64_t)Reader.DecodeSelector(ReadUnalignedLE32(d)).getAsOpaquePtr();
break;
case DeclarationName::CXXConstructorName:
@@ -831,7 +882,7 @@ public:
case DeclarationName::CXXUsingDirective:
break;
}
-
+
return Key;
}
@@ -868,8 +919,8 @@ bool ASTReader::ReadDeclContextStorage(llvm::BitstreamCursor &Cursor,
return true;
}
- Info.LexicalDecls = reinterpret_cast<const DeclID*>(Blob);
- Info.NumLexicalDecls = BlobLen / sizeof(DeclID);
+ Info.LexicalDecls = reinterpret_cast<const KindDeclIDPair*>(Blob);
+ Info.NumLexicalDecls = BlobLen / sizeof(KindDeclIDPair);
} else {
Info.LexicalDecls = 0;
Info.NumLexicalDecls = 0;
@@ -918,8 +969,9 @@ bool ASTReader::CheckPredefinesBuffers() {
//===----------------------------------------------------------------------===//
/// \brief Read the line table in the source manager block.
-/// \returns true if ther was an error.
-bool ASTReader::ParseLineTable(llvm::SmallVectorImpl<uint64_t> &Record) {
+/// \returns true if there was an error.
+bool ASTReader::ParseLineTable(PerFileData &F,
+ llvm::SmallVectorImpl<uint64_t> &Record) {
unsigned Idx = 0;
LineTableInfo &LineTable = SourceMgr.getLineTable();
@@ -965,7 +1017,6 @@ namespace {
class ASTStatData {
public:
- const bool hasStat;
const ino_t ino;
const dev_t dev;
const mode_t mode;
@@ -973,10 +1024,7 @@ public:
const off_t size;
ASTStatData(ino_t i, dev_t d, mode_t mo, time_t m, off_t s)
- : hasStat(true), ino(i), dev(d), mode(mo), mtime(m), size(s) {}
-
- ASTStatData()
- : hasStat(false), ino(0), dev(0), mode(0), mtime(0), size(0) {}
+ : ino(i), dev(d), mode(mo), mtime(m), size(s) {}
};
class ASTStatLookupTrait {
@@ -1011,9 +1059,6 @@ class ASTStatLookupTrait {
unsigned /*DataLen*/) {
using namespace clang::io;
- if (*d++ == 1)
- return data_type();
-
ino_t ino = (ino_t) ReadUnalignedLE32(d);
dev_t dev = (dev_t) ReadUnalignedLE32(d);
mode_t mode = (mode_t) ReadUnalignedLE16(d);
@@ -1027,44 +1072,40 @@ class ASTStatLookupTrait {
///
/// This cache is very similar to the stat cache used by pretokenized
/// headers.
-class ASTStatCache : public StatSysCallCache {
+class ASTStatCache : public FileSystemStatCache {
typedef OnDiskChainedHashTable<ASTStatLookupTrait> CacheTy;
CacheTy *Cache;
unsigned &NumStatHits, &NumStatMisses;
public:
- ASTStatCache(const unsigned char *Buckets,
- const unsigned char *Base,
- unsigned &NumStatHits,
- unsigned &NumStatMisses)
+ ASTStatCache(const unsigned char *Buckets, const unsigned char *Base,
+ unsigned &NumStatHits, unsigned &NumStatMisses)
: Cache(0), NumStatHits(NumStatHits), NumStatMisses(NumStatMisses) {
Cache = CacheTy::Create(Buckets, Base);
}
~ASTStatCache() { delete Cache; }
- int stat(const char *path, struct stat *buf) {
+ LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
// Do the lookup for the file's data in the AST file.
- CacheTy::iterator I = Cache->find(path);
+ CacheTy::iterator I = Cache->find(Path);
// If we don't get a hit in the AST file just forward to 'stat'.
if (I == Cache->end()) {
++NumStatMisses;
- return StatSysCallCache::stat(path, buf);
+ return statChained(Path, StatBuf, FileDescriptor);
}
++NumStatHits;
ASTStatData Data = *I;
- if (!Data.hasStat)
- return 1;
-
- buf->st_ino = Data.ino;
- buf->st_dev = Data.dev;
- buf->st_mtime = Data.mtime;
- buf->st_mode = Data.mode;
- buf->st_size = Data.size;
- return 0;
+ StatBuf.st_ino = Data.ino;
+ StatBuf.st_dev = Data.dev;
+ StatBuf.st_mtime = Data.mtime;
+ StatBuf.st_mode = Data.mode;
+ StatBuf.st_size = Data.size;
+ return CacheExists;
}
};
} // end anonymous namespace
@@ -1129,7 +1170,7 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(PerFileData &F) {
break;
case SM_LINE_TABLE:
- if (ParseLineTable(Record))
+ if (ParseLineTable(F, Record))
return Failure;
break;
@@ -1142,9 +1183,42 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(PerFileData &F) {
}
}
+/// \brief If a header file is not found at the path that we expect it to be
+/// and the PCH file was moved from its original location, try to resolve the
+/// file by assuming that header+PCH were moved together and the header is in
+/// the same place relative to the PCH.
+static std::string
+resolveFileRelativeToOriginalDir(const std::string &Filename,
+ const std::string &OriginalDir,
+ const std::string &CurrDir) {
+ assert(OriginalDir != CurrDir &&
+ "No point trying to resolve the file if the PCH dir didn't change");
+ using namespace llvm::sys;
+ llvm::SmallString<128> filePath(Filename);
+ fs::make_absolute(filePath);
+ assert(path::is_absolute(OriginalDir));
+ llvm::SmallString<128> currPCHPath(CurrDir);
+
+ path::const_iterator fileDirI = path::begin(path::parent_path(filePath)),
+ fileDirE = path::end(path::parent_path(filePath));
+ path::const_iterator origDirI = path::begin(OriginalDir),
+ origDirE = path::end(OriginalDir);
+ // Skip the common path components from filePath and OriginalDir.
+ while (fileDirI != fileDirE && origDirI != origDirE &&
+ *fileDirI == *origDirI) {
+ ++fileDirI;
+ ++origDirI;
+ }
+ for (; origDirI != origDirE; ++origDirI)
+ path::append(currPCHPath, "..");
+ path::append(currPCHPath, fileDirI, fileDirE);
+ path::append(currPCHPath, path::filename(Filename));
+ return currPCHPath.str();
+}
+
/// \brief Get a cursor that's correctly positioned for reading the source
/// location entry with the given ID.
-llvm::BitstreamCursor &ASTReader::SLocCursorForID(unsigned ID) {
+ASTReader::PerFileData *ASTReader::SLocCursorForID(unsigned ID) {
assert(ID != 0 && ID <= TotalNumSLocEntries &&
"SLocCursorForID should only be called for real IDs.");
@@ -1159,7 +1233,7 @@ llvm::BitstreamCursor &ASTReader::SLocCursorForID(unsigned ID) {
assert(F && F->LocalNumSLocEntries > ID && "Chain corrupted");
F->SLocEntryCursor.JumpToBit(F->SLocOffsets[ID]);
- return F->SLocEntryCursor;
+ return F;
}
/// \brief Read in the source location entry with the given ID.
@@ -1172,7 +1246,8 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(unsigned ID) {
return Failure;
}
- llvm::BitstreamCursor &SLocEntryCursor = SLocCursorForID(ID);
+ PerFileData *F = SLocCursorForID(ID);
+ llvm::BitstreamCursor &SLocEntryCursor = F->SLocEntryCursor;
++NumSLocEntriesRead;
unsigned Code = SLocEntryCursor.ReadCode();
@@ -1195,6 +1270,17 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(unsigned ID) {
std::string Filename(BlobStart, BlobStart + BlobLen);
MaybeAddSystemRootToFilename(Filename);
const FileEntry *File = FileMgr.getFile(Filename);
+ if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() &&
+ OriginalDir != CurrentDir) {
+ std::string resolved = resolveFileRelativeToOriginalDir(Filename,
+ OriginalDir,
+ CurrentDir);
+ if (!resolved.empty())
+ File = FileMgr.getFile(resolved);
+ }
+ if (File == 0)
+ File = FileMgr.getVirtualFile(Filename, (off_t)Record[4],
+ (time_t)Record[5]);
if (File == 0) {
std::string ErrorStr = "could not find file '";
ErrorStr += Filename;
@@ -1203,7 +1289,7 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(unsigned ID) {
return Failure;
}
- if (Record.size() < 10) {
+ if (Record.size() < 6) {
Error("source location entry is incorrect");
return Failure;
}
@@ -1222,22 +1308,13 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(unsigned ID) {
return Failure;
}
- FileID FID = SourceMgr.createFileID(File,
- SourceLocation::getFromRawEncoding(Record[1]),
- (SrcMgr::CharacteristicKind)Record[2],
+ FileID FID = SourceMgr.createFileID(File, ReadSourceLocation(*F, Record[1]),
+ (SrcMgr::CharacteristicKind)Record[2],
ID, Record[0]);
if (Record[3])
const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile())
.setHasLineDirectives();
-
- // Reconstruct header-search information for this file.
- HeaderFileInfo HFI;
- HFI.isImport = Record[6];
- HFI.DirInfo = Record[7];
- HFI.NumIncludes = Record[8];
- HFI.ControllingMacroID = Record[9];
- if (Listener)
- Listener->ReadHeaderFileInfo(HFI, File->getUID());
+
break;
}
@@ -1271,11 +1348,10 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(unsigned ID) {
}
case SM_SLOC_INSTANTIATION_ENTRY: {
- SourceLocation SpellingLoc
- = SourceLocation::getFromRawEncoding(Record[1]);
+ SourceLocation SpellingLoc = ReadSourceLocation(*F, Record[1]);
SourceMgr.createInstantiationLoc(SpellingLoc,
- SourceLocation::getFromRawEncoding(Record[2]),
- SourceLocation::getFromRawEncoding(Record[3]),
+ ReadSourceLocation(*F, Record[2]),
+ ReadSourceLocation(*F, Record[3]),
Record[4],
ID,
Record[0]);
@@ -1297,17 +1373,21 @@ bool ASTReader::ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor,
}
while (true) {
+ uint64_t Offset = Cursor.GetCurrentBitNo();
unsigned Code = Cursor.ReadCode();
// We expect all abbrevs to be at the start of the block.
- if (Code != llvm::bitc::DEFINE_ABBREV)
+ if (Code != llvm::bitc::DEFINE_ABBREV) {
+ Cursor.JumpToBit(Offset);
return false;
+ }
Cursor.ReadAbbrevRecord();
}
}
-void ASTReader::ReadMacroRecord(llvm::BitstreamCursor &Stream, uint64_t Offset){
+PreprocessedEntity *ASTReader::ReadMacroRecord(PerFileData &F, uint64_t Offset) {
assert(PP && "Forgot to set Preprocessor ?");
+ llvm::BitstreamCursor &Stream = F.MacroCursor;
// Keep track of where we are in the stream, then jump back there
// after reading this macro.
@@ -1322,14 +1402,14 @@ void ASTReader::ReadMacroRecord(llvm::BitstreamCursor &Stream, uint64_t Offset){
unsigned Code = Stream.ReadCode();
switch (Code) {
case llvm::bitc::END_BLOCK:
- return;
+ return 0;
case llvm::bitc::ENTER_SUBBLOCK:
// No known subblocks, always skip them.
Stream.ReadSubBlockID();
if (Stream.SkipBlock()) {
Error("malformed block record in AST file");
- return;
+ return 0;
}
continue;
@@ -1340,9 +1420,12 @@ void ASTReader::ReadMacroRecord(llvm::BitstreamCursor &Stream, uint64_t Offset){
}
// Read a record.
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
Record.clear();
PreprocessorRecordTypes RecType =
- (PreprocessorRecordTypes)Stream.ReadRecord(Code, Record);
+ (PreprocessorRecordTypes)Stream.ReadRecord(Code, Record, BlobStart,
+ BlobLen);
switch (RecType) {
case PP_MACRO_OBJECT_LIKE:
case PP_MACRO_FUNCTION_LIKE: {
@@ -1350,14 +1433,14 @@ void ASTReader::ReadMacroRecord(llvm::BitstreamCursor &Stream, uint64_t Offset){
// of the definition of the macro we were looking for. We're
// done.
if (Macro)
- return;
+ return 0;
IdentifierInfo *II = DecodeIdentifierInfo(Record[0]);
if (II == 0) {
Error("macro must have a name in AST file");
- return;
+ return 0;
}
- SourceLocation Loc = SourceLocation::getFromRawEncoding(Record[1]);
+ SourceLocation Loc = ReadSourceLocation(F, Record[1]);
bool isUsed = Record[2];
MacroInfo *MI = PP->AllocateMacroInfo(Loc);
@@ -1389,13 +1472,13 @@ void ASTReader::ReadMacroRecord(llvm::BitstreamCursor &Stream, uint64_t Offset){
// Remember that we saw this macro last so that we add the tokens that
// form its body to it.
Macro = MI;
-
+
if (NextIndex + 1 == Record.size() && PP->getPreprocessingRecord()) {
// We have a macro definition. Load it now.
PP->getPreprocessingRecord()->RegisterMacroDefinition(Macro,
getMacroDefinition(Record[NextIndex]));
}
-
+
++NumMacrosRead;
break;
}
@@ -1407,7 +1490,7 @@ void ASTReader::ReadMacroRecord(llvm::BitstreamCursor &Stream, uint64_t Offset){
Token Tok;
Tok.startToken();
- Tok.setLocation(SourceLocation::getFromRawEncoding(Record[0]));
+ Tok.setLocation(ReadSourceLocation(F, Record[0]));
Tok.setLength(Record[1]);
if (IdentifierInfo *II = DecodeIdentifierInfo(Record[2]))
Tok.setIdentifierInfo(II);
@@ -1416,92 +1499,242 @@ void ASTReader::ReadMacroRecord(llvm::BitstreamCursor &Stream, uint64_t Offset){
Macro->AddTokenToBody(Tok);
break;
}
-
- case PP_MACRO_INSTANTIATION: {
- // If we already have a macro, that means that we've hit the end
- // of the definition of the macro we were looking for. We're
- // done.
- if (Macro)
- return;
-
- if (!PP->getPreprocessingRecord()) {
- Error("missing preprocessing record in AST file");
- return;
- }
-
- PreprocessingRecord &PPRec = *PP->getPreprocessingRecord();
- if (PPRec.getPreprocessedEntity(Record[0]))
- return;
-
- MacroInstantiation *MI
- = new (PPRec) MacroInstantiation(DecodeIdentifierInfo(Record[3]),
- SourceRange(
- SourceLocation::getFromRawEncoding(Record[1]),
- SourceLocation::getFromRawEncoding(Record[2])),
- getMacroDefinition(Record[4]));
- PPRec.SetPreallocatedEntity(Record[0], MI);
- return;
- }
+ }
+ }
+
+ return 0;
+}
- case PP_MACRO_DEFINITION: {
- // If we already have a macro, that means that we've hit the end
- // of the definition of the macro we were looking for. We're
- // done.
- if (Macro)
- return;
+PreprocessedEntity *ASTReader::LoadPreprocessedEntity(PerFileData &F) {
+ assert(PP && "Forgot to set Preprocessor ?");
+ unsigned Code = F.PreprocessorDetailCursor.ReadCode();
+ switch (Code) {
+ case llvm::bitc::END_BLOCK:
+ return 0;
+
+ case llvm::bitc::ENTER_SUBBLOCK:
+ Error("unexpected subblock record in preprocessor detail block");
+ return 0;
- if (!PP->getPreprocessingRecord()) {
- Error("missing preprocessing record in AST file");
- return;
- }
+ case llvm::bitc::DEFINE_ABBREV:
+ Error("unexpected abbrevation record in preprocessor detail block");
+ return 0;
- PreprocessingRecord &PPRec = *PP->getPreprocessingRecord();
- if (PPRec.getPreprocessedEntity(Record[0]))
- return;
-
- if (Record[1] >= MacroDefinitionsLoaded.size()) {
- Error("out-of-bounds macro definition record");
- return;
- }
+ default:
+ break;
+ }
+ if (!PP->getPreprocessingRecord()) {
+ Error("no preprocessing record");
+ return 0;
+ }
+
+ // Read the record.
+ PreprocessingRecord &PPRec = *PP->getPreprocessingRecord();
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ RecordData Record;
+ PreprocessorDetailRecordTypes RecType =
+ (PreprocessorDetailRecordTypes)F.PreprocessorDetailCursor.ReadRecord(
+ Code, Record, BlobStart, BlobLen);
+ switch (RecType) {
+ case PPD_MACRO_INSTANTIATION: {
+ if (PreprocessedEntity *PE = PPRec.getPreprocessedEntity(Record[0]))
+ return PE;
+
+ MacroInstantiation *MI
+ = new (PPRec) MacroInstantiation(DecodeIdentifierInfo(Record[3]),
+ SourceRange(ReadSourceLocation(F, Record[1]),
+ ReadSourceLocation(F, Record[2])),
+ getMacroDefinition(Record[4]));
+ PPRec.SetPreallocatedEntity(Record[0], MI);
+ return MI;
+ }
+
+ case PPD_MACRO_DEFINITION: {
+ if (PreprocessedEntity *PE = PPRec.getPreprocessedEntity(Record[0]))
+ return PE;
+
+ if (Record[1] > MacroDefinitionsLoaded.size()) {
+ Error("out-of-bounds macro definition record");
+ return 0;
+ }
+
+ // Decode the identifier info and then check again; if the macro is
+ // still defined and associated with the identifier,
+ IdentifierInfo *II = DecodeIdentifierInfo(Record[4]);
+ if (!MacroDefinitionsLoaded[Record[1] - 1]) {
MacroDefinition *MD
- = new (PPRec) MacroDefinition(DecodeIdentifierInfo(Record[4]),
- SourceLocation::getFromRawEncoding(Record[5]),
- SourceRange(
- SourceLocation::getFromRawEncoding(Record[2]),
- SourceLocation::getFromRawEncoding(Record[3])));
+ = new (PPRec) MacroDefinition(II,
+ ReadSourceLocation(F, Record[5]),
+ SourceRange(
+ ReadSourceLocation(F, Record[2]),
+ ReadSourceLocation(F, Record[3])));
+
PPRec.SetPreallocatedEntity(Record[0], MD);
- MacroDefinitionsLoaded[Record[1]] = MD;
- return;
+ MacroDefinitionsLoaded[Record[1] - 1] = MD;
+
+ if (DeserializationListener)
+ DeserializationListener->MacroDefinitionRead(Record[1], MD);
}
+
+ return MacroDefinitionsLoaded[Record[1] - 1];
+ }
+
+ case PPD_INCLUSION_DIRECTIVE: {
+ if (PreprocessedEntity *PE = PPRec.getPreprocessedEntity(Record[0]))
+ return PE;
+
+ const char *FullFileNameStart = BlobStart + Record[3];
+ const FileEntry *File
+ = PP->getFileManager().getFile(llvm::StringRef(FullFileNameStart,
+ BlobLen - Record[3]));
+
+ // FIXME: Stable encoding
+ InclusionDirective::InclusionKind Kind
+ = static_cast<InclusionDirective::InclusionKind>(Record[5]);
+ InclusionDirective *ID
+ = new (PPRec) InclusionDirective(PPRec, Kind,
+ llvm::StringRef(BlobStart, Record[3]),
+ Record[4],
+ File,
+ SourceRange(ReadSourceLocation(F, Record[1]),
+ ReadSourceLocation(F, Record[2])));
+ PPRec.SetPreallocatedEntity(Record[0], ID);
+ return ID;
}
}
+
+ Error("invalid offset in preprocessor detail block");
+ return 0;
+}
+
+namespace {
+ /// \brief Trait class used to search the on-disk hash table containing all of
+ /// the header search information.
+ ///
+ /// The on-disk hash table contains a mapping from each header path to
+ /// information about that header (how many times it has been included, its
+ /// controlling macro, etc.). Note that we actually hash based on the
+ /// filename, and support "deep" comparisons of file names based on current
+ /// inode numbers, so that the search can cope with non-normalized path names
+ /// and symlinks.
+ class HeaderFileInfoTrait {
+ const char *SearchPath;
+ struct stat SearchPathStatBuf;
+ llvm::Optional<int> SearchPathStatResult;
+
+ int StatSimpleCache(const char *Path, struct stat *StatBuf) {
+ if (Path == SearchPath) {
+ if (!SearchPathStatResult)
+ SearchPathStatResult = stat(Path, &SearchPathStatBuf);
+
+ *StatBuf = SearchPathStatBuf;
+ return *SearchPathStatResult;
+ }
+
+ return stat(Path, StatBuf);
+ }
+
+ public:
+ typedef const char *external_key_type;
+ typedef const char *internal_key_type;
+
+ typedef HeaderFileInfo data_type;
+
+ HeaderFileInfoTrait(const char *SearchPath = 0) : SearchPath(SearchPath) { }
+
+ static unsigned ComputeHash(const char *path) {
+ return llvm::HashString(llvm::sys::path::filename(path));
+ }
+
+ static internal_key_type GetInternalKey(const char *path) { return path; }
+
+ bool EqualKey(internal_key_type a, internal_key_type b) {
+ if (strcmp(a, b) == 0)
+ return true;
+
+ if (llvm::sys::path::filename(a) != llvm::sys::path::filename(b))
+ return false;
+
+ // The file names match, but the path names don't. stat() the files to
+ // see if they are the same.
+ struct stat StatBufA, StatBufB;
+ if (StatSimpleCache(a, &StatBufA) || StatSimpleCache(b, &StatBufB))
+ return false;
+
+ return StatBufA.st_ino == StatBufB.st_ino;
+ }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ unsigned KeyLen = (unsigned) clang::io::ReadUnalignedLE16(d);
+ unsigned DataLen = (unsigned) *d++;
+ return std::make_pair(KeyLen + 1, DataLen);
+ }
+
+ static internal_key_type ReadKey(const unsigned char *d, unsigned) {
+ return (const char *)d;
+ }
+
+ static data_type ReadData(const internal_key_type, const unsigned char *d,
+ unsigned DataLen) {
+ const unsigned char *End = d + DataLen;
+ using namespace clang::io;
+ HeaderFileInfo HFI;
+ unsigned Flags = *d++;
+ HFI.isImport = (Flags >> 3) & 0x01;
+ HFI.DirInfo = (Flags >> 1) & 0x03;
+ HFI.Resolved = Flags & 0x01;
+ HFI.NumIncludes = ReadUnalignedLE16(d);
+ HFI.ControllingMacroID = ReadUnalignedLE32(d);
+ assert(End == d && "Wrong data length in HeaderFileInfo deserialization");
+ (void)End;
+
+ // This HeaderFileInfo was externally loaded.
+ HFI.External = true;
+ return HFI;
+ }
+ };
+}
+
+/// \brief The on-disk hash table used for the global method pool.
+typedef OnDiskChainedHashTable<HeaderFileInfoTrait>
+ HeaderFileInfoLookupTable;
+
+void ASTReader::SetIdentifierIsMacro(IdentifierInfo *II, PerFileData &F,
+ uint64_t Offset) {
+ // Note that this identifier has a macro definition.
+ II->setHasMacroDefinition(true);
+
+ // Adjust the offset based on our position in the chain.
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ if (Chain[I] == &F)
+ break;
+
+ Offset += Chain[I]->SizeInBits;
+ }
+
+ UnreadMacroRecordOffsets[II] = Offset;
}
void ASTReader::ReadDefinedMacros() {
for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
- llvm::BitstreamCursor &MacroCursor = Chain[N - I - 1]->MacroCursor;
+ PerFileData &F = *Chain[N - I - 1];
+ llvm::BitstreamCursor &MacroCursor = F.MacroCursor;
// If there was no preprocessor block, skip this file.
if (!MacroCursor.getBitStreamReader())
continue;
llvm::BitstreamCursor Cursor = MacroCursor;
- if (Cursor.EnterSubBlock(PREPROCESSOR_BLOCK_ID)) {
- Error("malformed preprocessor block record in AST file");
- return;
- }
+ Cursor.JumpToBit(F.MacroStartOffset);
RecordData Record;
while (true) {
unsigned Code = Cursor.ReadCode();
- if (Code == llvm::bitc::END_BLOCK) {
- if (Cursor.ReadBlockEnd()) {
- Error("error at end of preprocessor block in AST file");
- return;
- }
+ if (Code == llvm::bitc::END_BLOCK)
break;
- }
if (Code == llvm::bitc::ENTER_SUBBLOCK) {
// No known subblocks, always skip them.
@@ -1534,35 +1767,64 @@ void ASTReader::ReadDefinedMacros() {
case PP_TOKEN:
// Ignore tokens.
break;
-
- case PP_MACRO_INSTANTIATION:
- case PP_MACRO_DEFINITION:
- // Read the macro record.
- ReadMacroRecord(Chain[N - I - 1]->Stream, Cursor.GetCurrentBitNo());
- break;
}
}
}
+
+ // Drain the unread macro-record offsets map.
+ while (!UnreadMacroRecordOffsets.empty())
+ LoadMacroDefinition(UnreadMacroRecordOffsets.begin());
+}
+
+void ASTReader::LoadMacroDefinition(
+ llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos) {
+ assert(Pos != UnreadMacroRecordOffsets.end() && "Unknown macro definition");
+ PerFileData *F = 0;
+ uint64_t Offset = Pos->second;
+ UnreadMacroRecordOffsets.erase(Pos);
+
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ if (Offset < Chain[I]->SizeInBits) {
+ F = Chain[I];
+ break;
+ }
+
+ Offset -= Chain[I]->SizeInBits;
+ }
+ if (!F) {
+ Error("Malformed macro record offset");
+ return;
+ }
+
+ ReadMacroRecord(*F, Offset);
}
-MacroDefinition *ASTReader::getMacroDefinition(IdentID ID) {
- if (ID == 0 || ID >= MacroDefinitionsLoaded.size())
+void ASTReader::LoadMacroDefinition(IdentifierInfo *II) {
+ llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos
+ = UnreadMacroRecordOffsets.find(II);
+ LoadMacroDefinition(Pos);
+}
+
+MacroDefinition *ASTReader::getMacroDefinition(MacroID ID) {
+ if (ID == 0 || ID > MacroDefinitionsLoaded.size())
return 0;
- if (!MacroDefinitionsLoaded[ID]) {
- unsigned Index = ID;
+ if (!MacroDefinitionsLoaded[ID - 1]) {
+ unsigned Index = ID - 1;
for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
PerFileData &F = *Chain[N - I - 1];
if (Index < F.LocalNumMacroDefinitions) {
- ReadMacroRecord(F.Stream, F.MacroDefinitionOffsets[Index]);
+ SavedStreamPosition SavedPosition(F.PreprocessorDetailCursor);
+ F.PreprocessorDetailCursor.JumpToBit(F.MacroDefinitionOffsets[Index]);
+ LoadPreprocessedEntity(F);
break;
}
Index -= F.LocalNumMacroDefinitions;
}
- assert(MacroDefinitionsLoaded[ID] && "Broken chain");
+ assert(MacroDefinitionsLoaded[ID - 1] && "Broken chain");
}
- return MacroDefinitionsLoaded[ID];
+ return MacroDefinitionsLoaded[ID - 1];
}
/// \brief If we are loading a relocatable PCH file, and the filename is
@@ -1573,7 +1835,7 @@ void ASTReader::MaybeAddSystemRootToFilename(std::string &Filename) {
if (!RelocatablePCH)
return;
- if (Filename.empty() || llvm::sys::Path(Filename).isAbsolute())
+ if (Filename.empty() || llvm::sys::path::is_absolute(Filename))
return;
if (isysroot == 0) {
@@ -1628,17 +1890,38 @@ ASTReader::ReadASTBlock(PerFileData &F) {
}
break;
+ case DECL_UPDATES_BLOCK_ID:
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ break;
+
case PREPROCESSOR_BLOCK_ID:
F.MacroCursor = Stream;
if (PP)
PP->setExternalSource(this);
- if (Stream.SkipBlock()) {
+ if (Stream.SkipBlock() ||
+ ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) {
Error("malformed block record in AST file");
return Failure;
}
+ F.MacroStartOffset = F.MacroCursor.GetCurrentBitNo();
break;
+ case PREPROCESSOR_DETAIL_BLOCK_ID:
+ F.PreprocessorDetailCursor = Stream;
+ if (Stream.SkipBlock() ||
+ ReadBlockAbbrevs(F.PreprocessorDetailCursor,
+ PREPROCESSOR_DETAIL_BLOCK_ID)) {
+ Error("malformed preprocessor detail record in AST file");
+ return Failure;
+ }
+ F.PreprocessorDetailStartOffset
+ = F.PreprocessorDetailCursor.GetCurrentBitNo();
+ break;
+
case SOURCE_MANAGER_BLOCK_ID:
switch (ReadSourceManagerBlock(F)) {
case Success:
@@ -1667,7 +1950,7 @@ ASTReader::ReadASTBlock(PerFileData &F) {
const char *BlobStart = 0;
unsigned BlobLen = 0;
switch ((ASTRecordTypes)Stream.ReadRecord(Code, Record,
- &BlobStart, &BlobLen)) {
+ &BlobStart, &BlobLen)) {
default: // Default behavior: ignore.
break;
@@ -1698,8 +1981,8 @@ ASTReader::ReadASTBlock(PerFileData &F) {
return IgnorePCH;
}
- // Load the chained file.
- switch(ReadASTCore(llvm::StringRef(BlobStart, BlobLen))) {
+ // Load the chained file, which is always a PCH file.
+ switch(ReadASTCore(llvm::StringRef(BlobStart, BlobLen), PCH)) {
case Failure: return Failure;
// If we have to ignore the dependency, we'll have to ignore this too.
case IgnorePCH: return IgnorePCH;
@@ -1729,10 +2012,11 @@ ASTReader::ReadASTBlock(PerFileData &F) {
case TU_UPDATE_LEXICAL: {
DeclContextInfo Info = {
/* No visible information */ 0,
- reinterpret_cast<const DeclID *>(BlobStart),
- BlobLen / sizeof(DeclID)
+ reinterpret_cast<const KindDeclIDPair *>(BlobStart),
+ BlobLen / sizeof(KindDeclIDPair)
};
- DeclContextOffsets[Context->getTranslationUnitDecl()].push_back(Info);
+ DeclContextOffsets[Context ? Context->getTranslationUnitDecl() : 0]
+ .push_back(Info);
break;
}
@@ -1742,7 +2026,7 @@ ASTReader::ReadASTBlock(PerFileData &F) {
(const unsigned char *)BlobStart + Record[1],
(const unsigned char *)BlobStart,
ASTDeclContextNameLookupTrait(*this));
- if (ID == 1) { // Is it the TU?
+ if (ID == 1 && Context) { // Is it the TU?
DeclContextInfo Info = {
Table, /* No lexical inforamtion */ 0, 0
};
@@ -1776,7 +2060,7 @@ ASTReader::ReadASTBlock(PerFileData &F) {
= ASTIdentifierLookupTable::Create(
(const unsigned char *)F.IdentifierTableData + Record[0],
(const unsigned char *)F.IdentifierTableData,
- ASTIdentifierLookupTrait(*this, F.Stream));
+ ASTIdentifierLookupTrait(*this, F));
if (PP)
PP->getIdentifierTable().setExternalIdentifierLookup(this);
}
@@ -1863,11 +2147,9 @@ ASTReader::ReadASTBlock(PerFileData &F) {
TotalNumMethodPoolEntries += Record[1];
break;
- case REFERENCED_SELECTOR_POOL: {
- ReferencedSelectorsData.insert(ReferencedSelectorsData.end(),
- Record.begin(), Record.end());
+ case REFERENCED_SELECTOR_POOL:
+ F.ReferencedSelectorsData.swap(Record);
break;
- }
case PP_COUNTER_VALUE:
if (!Record.empty() && Listener)
@@ -1877,28 +2159,26 @@ ASTReader::ReadASTBlock(PerFileData &F) {
case SOURCE_LOCATION_OFFSETS:
F.SLocOffsets = (const uint32_t *)BlobStart;
F.LocalNumSLocEntries = Record[0];
- // We cannot delay this until the entire chain is loaded, because then
- // source location preloads would also have to be delayed.
- // FIXME: Is there a reason not to do that?
- TotalNumSLocEntries += F.LocalNumSLocEntries;
- SourceMgr.PreallocateSLocEntries(this, TotalNumSLocEntries, Record[1]);
+ F.LocalSLocSize = Record[1];
break;
case SOURCE_LOCATION_PRELOADS:
- for (unsigned I = 0, N = Record.size(); I != N; ++I) {
- ASTReadResult Result = ReadSLocEntryRecord(Record[I]);
- if (Result != Success)
- return Result;
- }
+ if (PreloadSLocEntries.empty())
+ PreloadSLocEntries.swap(Record);
+ else
+ PreloadSLocEntries.insert(PreloadSLocEntries.end(),
+ Record.begin(), Record.end());
break;
case STAT_CACHE: {
- ASTStatCache *MyStatCache =
- new ASTStatCache((const unsigned char *)BlobStart + Record[0],
- (const unsigned char *)BlobStart,
- NumStatHits, NumStatMisses);
- FileMgr.addStatCache(MyStatCache);
- F.StatCache = MyStatCache;
+ if (!DisableStatCache) {
+ ASTStatCache *MyStatCache =
+ new ASTStatCache((const unsigned char *)BlobStart + Record[0],
+ (const unsigned char *)BlobStart,
+ NumStatHits, NumStatMisses);
+ FileMgr.addStatCache(MyStatCache);
+ F.StatCache = MyStatCache;
+ }
break;
}
@@ -1926,12 +2206,7 @@ ASTReader::ReadASTBlock(PerFileData &F) {
break;
case PENDING_IMPLICIT_INSTANTIATIONS:
- // Optimization for the first block.
- if (PendingInstantiations.empty())
- PendingInstantiations.swap(Record);
- else
- PendingInstantiations.insert(PendingInstantiations.end(),
- Record.begin(), Record.end());
+ F.PendingInstantiations.swap(Record);
break;
case SEMA_DECL_REFS:
@@ -1947,6 +2222,12 @@ ASTReader::ReadASTBlock(PerFileData &F) {
MaybeAddSystemRootToFilename(OriginalFileName);
break;
+ case ORIGINAL_PCH_DIR:
+ // The primary AST will be the last to get here, so it will be the one
+ // that's used.
+ OriginalDir.assign(BlobStart, BlobLen);
+ break;
+
case VERSION_CONTROL_BRANCH_REVISION: {
const std::string &CurBranch = getClangFullRepositoryVersion();
llvm::StringRef ASTBranch(BlobStart, BlobLen);
@@ -1963,6 +2244,17 @@ ASTReader::ReadASTBlock(PerFileData &F) {
F.LocalNumMacroDefinitions = Record[1];
break;
+ case DECL_UPDATE_OFFSETS: {
+ if (Record.size() % 2 != 0) {
+ Error("invalid DECL_UPDATE_OFFSETS block in AST file");
+ return Failure;
+ }
+ for (unsigned I = 0, N = Record.size(); I != N; I += 2)
+ DeclUpdateOffsets[static_cast<DeclID>(Record[I])]
+ .push_back(std::make_pair(&F, Record[I+1]));
+ break;
+ }
+
case DECL_REPLACEMENTS: {
if (Record.size() % 2 != 0) {
Error("invalid DECL_REPLACEMENTS block in AST file");
@@ -1973,13 +2265,57 @@ ASTReader::ReadASTBlock(PerFileData &F) {
std::make_pair(&F, Record[I+1]);
break;
}
-
- case ADDITIONAL_TEMPLATE_SPECIALIZATIONS: {
- AdditionalTemplateSpecializations &ATS =
- AdditionalTemplateSpecializationsPending[Record[0]];
- ATS.insert(ATS.end(), Record.begin()+1, Record.end());
+
+ case CXX_BASE_SPECIFIER_OFFSETS: {
+ if (F.LocalNumCXXBaseSpecifiers != 0) {
+ Error("duplicate CXX_BASE_SPECIFIER_OFFSETS record in AST file");
+ return Failure;
+ }
+
+ F.LocalNumCXXBaseSpecifiers = Record[0];
+ F.CXXBaseSpecifiersOffsets = (const uint32_t *)BlobStart;
break;
}
+
+ case DIAG_PRAGMA_MAPPINGS:
+ if (Record.size() % 2 != 0) {
+ Error("invalid DIAG_USER_MAPPINGS block in AST file");
+ return Failure;
+ }
+ if (PragmaDiagMappings.empty())
+ PragmaDiagMappings.swap(Record);
+ else
+ PragmaDiagMappings.insert(PragmaDiagMappings.end(),
+ Record.begin(), Record.end());
+ break;
+
+ case CUDA_SPECIAL_DECL_REFS:
+ // Later tables overwrite earlier ones.
+ CUDASpecialDeclRefs.swap(Record);
+ break;
+
+ case HEADER_SEARCH_TABLE:
+ F.HeaderFileInfoTableData = BlobStart;
+ F.LocalNumHeaderFileInfos = Record[1];
+ if (Record[0]) {
+ F.HeaderFileInfoTable
+ = HeaderFileInfoLookupTable::Create(
+ (const unsigned char *)F.HeaderFileInfoTableData + Record[0],
+ (const unsigned char *)F.HeaderFileInfoTableData);
+ if (PP)
+ PP->getHeaderSearchInfo().SetExternalSource(this);
+ }
+ break;
+
+ case FP_PRAGMA_OPTIONS:
+ // Later tables overwrite earlier ones.
+ FPPragmaOptions.swap(Record);
+ break;
+
+ case OPENCL_EXTENSIONS:
+ // Later tables overwrite earlier ones.
+ OpenCLExtensions.swap(Record);
+ break;
}
First = false;
}
@@ -1987,8 +2323,9 @@ ASTReader::ReadASTBlock(PerFileData &F) {
return Failure;
}
-ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName) {
- switch(ReadASTCore(FileName)) {
+ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
+ ASTFileType Type) {
+ switch(ReadASTCore(FileName, Type)) {
case Failure: return Failure;
case IgnorePCH: return IgnorePCH;
case Success: break;
@@ -1996,11 +2333,13 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName) {
// Here comes stuff that we only do once the entire chain is loaded.
- // Allocate space for loaded identifiers, decls and types.
+ // Allocate space for loaded slocentries, identifiers, decls and types.
unsigned TotalNumIdentifiers = 0, TotalNumTypes = 0, TotalNumDecls = 0,
TotalNumPreallocatedPreprocessingEntities = 0, TotalNumMacroDefs = 0,
TotalNumSelectors = 0;
for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ TotalNumSLocEntries += Chain[I]->LocalNumSLocEntries;
+ NextSLocOffset += Chain[I]->LocalSLocSize;
TotalNumIdentifiers += Chain[I]->LocalNumIdentifiers;
TotalNumTypes += Chain[I]->LocalNumTypes;
TotalNumDecls += Chain[I]->LocalNumDecls;
@@ -2009,6 +2348,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName) {
TotalNumMacroDefs += Chain[I]->LocalNumMacroDefinitions;
TotalNumSelectors += Chain[I]->LocalNumSelectors;
}
+ SourceMgr.PreallocateSLocEntries(this, TotalNumSLocEntries, NextSLocOffset);
IdentifiersLoaded.resize(TotalNumIdentifiers);
TypesLoaded.resize(TotalNumTypes);
DeclsLoaded.resize(TotalNumDecls);
@@ -2024,6 +2364,12 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName) {
}
}
SelectorsLoaded.resize(TotalNumSelectors);
+ // Preload SLocEntries.
+ for (unsigned I = 0, N = PreloadSLocEntries.size(); I != N; ++I) {
+ ASTReadResult Result = ReadSLocEntryRecord(PreloadSLocEntries[I]);
+ if (Result != Success)
+ return Result;
+ }
// Check the predefines buffers.
if (!DisableValidation && CheckPredefinesBuffers())
@@ -2058,7 +2404,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName) {
for (unsigned I = 0, N = Identifiers.size(); I != N; ++I) {
IdentifierInfo *II = Identifiers[I];
// Look in the on-disk hash tables for an entry for this identifier
- ASTIdentifierLookupTrait Info(*this, Chain[J]->Stream, II);
+ ASTIdentifierLookupTrait Info(*this, *Chain[J], II);
std::pair<const char*,unsigned> Key(II->getNameStart(),II->getLength());
ASTIdentifierLookupTable::iterator Pos = IdTable->find(Key, &Info);
if (Pos == IdTable->end())
@@ -2074,21 +2420,54 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName) {
if (Context)
InitializeContext(*Context);
+ if (DeserializationListener)
+ DeserializationListener->ReaderInitialized(this);
+
+ // If this AST file is a precompiled preamble, then set the main file ID of
+ // the source manager to the file source file from which the preamble was
+ // built. This is the only valid way to use a precompiled preamble.
+ if (Type == Preamble) {
+ SourceLocation Loc
+ = SourceMgr.getLocation(FileMgr.getFile(getOriginalSourceFile()), 1, 1);
+ if (Loc.isValid()) {
+ std::pair<FileID, unsigned> Decomposed = SourceMgr.getDecomposedLoc(Loc);
+ SourceMgr.SetPreambleFileID(Decomposed.first);
+ }
+ }
+
return Success;
}
-ASTReader::ASTReadResult ASTReader::ReadASTCore(llvm::StringRef FileName) {
- Chain.push_back(new PerFileData());
+ASTReader::ASTReadResult ASTReader::ReadASTCore(llvm::StringRef FileName,
+ ASTFileType Type) {
+ PerFileData *Prev = Chain.empty() ? 0 : Chain.back();
+ Chain.push_back(new PerFileData(Type));
PerFileData &F = *Chain.back();
+ if (Prev)
+ Prev->NextInSource = &F;
+ else
+ FirstInSource = &F;
+ F.Loaders.push_back(Prev);
// Set the AST file name.
F.FileName = FileName;
+ if (FileName != "-") {
+ CurrentDir = llvm::sys::path::parent_path(FileName);
+ if (CurrentDir.empty()) CurrentDir = ".";
+ }
+
// Open the AST file.
//
// FIXME: This shouldn't be here, we should just take a raw_ostream.
std::string ErrStr;
- F.Buffer.reset(llvm::MemoryBuffer::getFileOrSTDIN(FileName, &ErrStr));
+ llvm::error_code ec;
+ if (FileName == "-") {
+ ec = llvm::MemoryBuffer::getSTDIN(F.Buffer);
+ if (ec)
+ ErrStr = ec.message();
+ } else
+ F.Buffer.reset(FileMgr.getBufferForFile(FileName, &ErrStr));
if (!F.Buffer) {
Error(ErrStr.c_str());
return IgnorePCH;
@@ -2185,6 +2564,18 @@ void ASTReader::InitializeContext(ASTContext &Ctx) {
PP->getIdentifierTable().setExternalIdentifierLookup(this);
PP->getHeaderSearchInfo().SetExternalLookup(this);
PP->setExternalSource(this);
+ PP->getHeaderSearchInfo().SetExternalSource(this);
+
+ // If we have an update block for the TU waiting, we have to add it before
+ // deserializing the decl.
+ DeclContextOffsetsMap::iterator DCU = DeclContextOffsets.find(0);
+ if (DCU != DeclContextOffsets.end()) {
+ // Insertion could invalidate map, so grab vector.
+ DeclContextInfos T;
+ T.swap(DCU->second);
+ DeclContextOffsets.erase(DCU);
+ DeclContextOffsets[Ctx.getTranslationUnitDecl()].swap(T);
+ }
// Load the translation unit declaration
GetTranslationUnitDecl();
@@ -2273,17 +2664,27 @@ void ASTReader::InitializeContext(ASTContext &Ctx) {
if (SpecialTypes[SPECIAL_TYPE_INT128_INSTALLED])
Context->setInt128Installed();
+
+ ReadPragmaDiagnosticMappings(Context->getDiagnostics());
+
+ // If there were any CUDA special declarations, deserialize them.
+ if (!CUDASpecialDeclRefs.empty()) {
+ assert(CUDASpecialDeclRefs.size() == 1 && "More decl refs than expected!");
+ Context->setcudaConfigureCallDecl(
+ cast<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[0])));
+ }
}
/// \brief Retrieve the name of the original source file name
/// directly from the AST file, without actually loading the AST
/// file.
std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName,
+ FileManager &FileMgr,
Diagnostic &Diags) {
// Open the AST file.
std::string ErrStr;
llvm::OwningPtr<llvm::MemoryBuffer> Buffer;
- Buffer.reset(llvm::MemoryBuffer::getFile(ASTFileName.c_str(), &ErrStr));
+ Buffer.reset(FileMgr.getBufferForFile(ASTFileName, &ErrStr));
if (!Buffer) {
Diags.Report(diag::err_fe_unable_to_read_pch_file) << ErrStr;
return std::string();
@@ -2390,6 +2791,8 @@ bool ASTReader::ParseLanguageOptions(
PARSE_LANGOPT(ObjC2);
PARSE_LANGOPT(ObjCNonFragileABI);
PARSE_LANGOPT(ObjCNonFragileABI2);
+ PARSE_LANGOPT(AppleKext);
+ PARSE_LANGOPT(ObjCDefaultSynthProperties);
PARSE_LANGOPT(NoConstantCFStrings);
PARSE_LANGOPT(PascalStrings);
PARSE_LANGOPT(WritableStrings);
@@ -2397,6 +2800,8 @@ bool ASTReader::ParseLanguageOptions(
PARSE_LANGOPT(AltiVec);
PARSE_LANGOPT(Exceptions);
PARSE_LANGOPT(SjLjExceptions);
+ PARSE_LANGOPT(ObjCExceptions);
+ PARSE_LANGOPT(MSBitfields);
PARSE_LANGOPT(NeXTRuntime);
PARSE_LANGOPT(Freestanding);
PARSE_LANGOPT(NoBuiltin);
@@ -2417,13 +2822,16 @@ bool ASTReader::ParseLanguageOptions(
PARSE_LANGOPT(AccessControl);
PARSE_LANGOPT(CharIsSigned);
PARSE_LANGOPT(ShortWChar);
+ PARSE_LANGOPT(ShortEnums);
LangOpts.setGCMode((LangOptions::GCMode)Record[Idx++]);
- LangOpts.setVisibilityMode((LangOptions::VisibilityMode)Record[Idx++]);
+ LangOpts.setVisibilityMode((Visibility)Record[Idx++]);
LangOpts.setStackProtectorMode((LangOptions::StackProtectorMode)
Record[Idx++]);
PARSE_LANGOPT(InstantiationDepth);
PARSE_LANGOPT(OpenCL);
+ PARSE_LANGOPT(CUDA);
PARSE_LANGOPT(CatchUndefined);
+ PARSE_LANGOPT(DefaultFPContract);
// FIXME: Missing ElideConstructors?!
#undef PARSE_LANGOPT
@@ -2434,7 +2842,83 @@ bool ASTReader::ParseLanguageOptions(
}
void ASTReader::ReadPreprocessedEntities() {
- ReadDefinedMacros();
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ PerFileData &F = *Chain[I];
+ if (!F.PreprocessorDetailCursor.getBitStreamReader())
+ continue;
+
+ SavedStreamPosition SavedPosition(F.PreprocessorDetailCursor);
+ F.PreprocessorDetailCursor.JumpToBit(F.PreprocessorDetailStartOffset);
+ while (LoadPreprocessedEntity(F)) { }
+ }
+}
+
+PreprocessedEntity *ASTReader::ReadPreprocessedEntityAtOffset(uint64_t Offset) {
+ PerFileData *F = 0;
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ if (Offset < Chain[I]->SizeInBits) {
+ F = Chain[I];
+ break;
+ }
+
+ Offset -= Chain[I]->SizeInBits;
+ }
+
+ if (!F) {
+ Error("Malformed preprocessed entity offset");
+ return 0;
+ }
+
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this entity.
+ SavedStreamPosition SavedPosition(F->PreprocessorDetailCursor);
+ F->PreprocessorDetailCursor.JumpToBit(Offset);
+ return LoadPreprocessedEntity(*F);
+}
+
+HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) {
+ HeaderFileInfoTrait Trait(FE->getName());
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ PerFileData &F = *Chain[I];
+ HeaderFileInfoLookupTable *Table
+ = static_cast<HeaderFileInfoLookupTable *>(F.HeaderFileInfoTable);
+ if (!Table)
+ continue;
+
+ // Look in the on-disk hash table for an entry for this file name.
+ HeaderFileInfoLookupTable::iterator Pos = Table->find(FE->getName(),
+ &Trait);
+ if (Pos == Table->end())
+ continue;
+
+ HeaderFileInfo HFI = *Pos;
+ if (Listener)
+ Listener->ReadHeaderFileInfo(HFI, FE->getUID());
+
+ return HFI;
+ }
+
+ return HeaderFileInfo();
+}
+
+void ASTReader::ReadPragmaDiagnosticMappings(Diagnostic &Diag) {
+ unsigned Idx = 0;
+ while (Idx < PragmaDiagMappings.size()) {
+ SourceLocation
+ Loc = SourceLocation::getFromRawEncoding(PragmaDiagMappings[Idx++]);
+ while (1) {
+ assert(Idx < PragmaDiagMappings.size() &&
+ "Invalid data, didn't find '-1' marking end of diag/map pairs");
+ if (Idx >= PragmaDiagMappings.size())
+ break; // Something is messed up but at least avoid infinite loop in
+ // release build.
+ unsigned DiagID = PragmaDiagMappings[Idx++];
+ if (DiagID == (unsigned)-1)
+ break; // no more diag/map pairs for this location.
+ diag::Mapping Map = (diag::Mapping)PragmaDiagMappings[Idx++];
+ Diag.setDiagnosticMapping(DiagID, Map, Loc);
+ }
+ }
}
/// \brief Get the correct cursor and offset for loading a type.
@@ -2447,7 +2931,7 @@ ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
Index -= F->LocalNumTypes;
}
assert(F && F->LocalNumTypes > Index && "Broken chain");
- return RecordLocation(&F->DeclsCursor, F->TypeOffsets[Index]);
+ return RecordLocation(F, F->TypeOffsets[Index]);
}
/// \brief Read and return the type with the given index..
@@ -2458,7 +2942,7 @@ ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
/// IDs.
QualType ASTReader::ReadTypeRecord(unsigned Index) {
RecordLocation Loc = TypeCursorForIndex(Index);
- llvm::BitstreamCursor &DeclsCursor = *Loc.first;
+ llvm::BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
// Keep track of where we are in the stream, then jump back there
// after reading this type.
@@ -2469,7 +2953,7 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
// Note that we are loading a type record.
Deserializing AType(this);
- DeclsCursor.JumpToBit(Loc.second);
+ DeclsCursor.JumpToBit(Loc.Offset);
RecordData Record;
unsigned Code = DeclsCursor.ReadCode();
switch ((TypeCode)DeclsCursor.ReadRecord(Code, Record)) {
@@ -2535,6 +3019,9 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
}
QualType PointeeType = GetType(Record[0]);
QualType ClassType = GetType(Record[1]);
+ if (PointeeType.isNull() || ClassType.isNull())
+ return QualType();
+
return Context->getMemberPointerType(PointeeType, ClassType.getTypePtr());
}
@@ -2559,9 +3046,9 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
QualType ElementType = GetType(Record[0]);
ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
unsigned IndexTypeQuals = Record[2];
- SourceLocation LBLoc = SourceLocation::getFromRawEncoding(Record[3]);
- SourceLocation RBLoc = SourceLocation::getFromRawEncoding(Record[4]);
- return Context->getVariableArrayType(ElementType, ReadExpr(DeclsCursor),
+ SourceLocation LBLoc = ReadSourceLocation(*Loc.F, Record[3]);
+ SourceLocation RBLoc = ReadSourceLocation(*Loc.F, Record[4]);
+ return Context->getVariableArrayType(ElementType, ReadExpr(*Loc.F),
ASM, IndexTypeQuals,
SourceRange(LBLoc, RBLoc));
}
@@ -2574,9 +3061,9 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
QualType ElementType = GetType(Record[0]);
unsigned NumElements = Record[1];
- unsigned AltiVecSpec = Record[2];
+ unsigned VecKind = Record[2];
return Context->getVectorType(ElementType, NumElements,
- (VectorType::AltiVecSpecific)AltiVecSpec);
+ (VectorType::VectorKind)VecKind);
}
case TYPE_EXT_VECTOR: {
@@ -2602,28 +3089,30 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
case TYPE_FUNCTION_PROTO: {
QualType ResultType = GetType(Record[0]);
- bool NoReturn = Record[1];
- unsigned RegParm = Record[2];
- CallingConv CallConv = (CallingConv)Record[3];
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = FunctionType::ExtInfo(/*noreturn*/ Record[1],
+ /*regparm*/ Record[2],
+ static_cast<CallingConv>(Record[3]));
+
unsigned Idx = 4;
unsigned NumParams = Record[Idx++];
llvm::SmallVector<QualType, 16> ParamTypes;
for (unsigned I = 0; I != NumParams; ++I)
ParamTypes.push_back(GetType(Record[Idx++]));
- bool isVariadic = Record[Idx++];
- unsigned Quals = Record[Idx++];
- bool hasExceptionSpec = Record[Idx++];
- bool hasAnyExceptionSpec = Record[Idx++];
- unsigned NumExceptions = Record[Idx++];
+
+ EPI.Variadic = Record[Idx++];
+ EPI.TypeQuals = Record[Idx++];
+ EPI.RefQualifier = static_cast<RefQualifierKind>(Record[Idx++]);
+ EPI.HasExceptionSpec = Record[Idx++];
+ EPI.HasAnyExceptionSpec = Record[Idx++];
+ EPI.NumExceptions = Record[Idx++];
llvm::SmallVector<QualType, 2> Exceptions;
- for (unsigned I = 0; I != NumExceptions; ++I)
+ for (unsigned I = 0; I != EPI.NumExceptions; ++I)
Exceptions.push_back(GetType(Record[Idx++]));
+ EPI.Exceptions = Exceptions.data();
return Context->getFunctionType(ResultType, ParamTypes.data(), NumParams,
- isVariadic, Quals, hasExceptionSpec,
- hasAnyExceptionSpec, NumExceptions,
- Exceptions.data(),
- FunctionType::ExtInfo(NoReturn, RegParm,
- CallConv));
+ EPI);
}
case TYPE_UNRESOLVED_USING:
@@ -2637,11 +3126,13 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
}
TypedefDecl *Decl = cast<TypedefDecl>(GetDecl(Record[0]));
QualType Canonical = GetType(Record[1]);
+ if (!Canonical.isNull())
+ Canonical = Context->getCanonicalType(Canonical);
return Context->getTypedefType(Decl, Canonical);
}
case TYPE_TYPEOF_EXPR:
- return Context->getTypeOfExprType(ReadExpr(DeclsCursor));
+ return Context->getTypeOfExprType(ReadExpr(*Loc.F));
case TYPE_TYPEOF: {
if (Record.size() != 1) {
@@ -2653,7 +3144,10 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
}
case TYPE_DECLTYPE:
- return Context->getDecltypeType(ReadExpr(DeclsCursor));
+ return Context->getDecltypeType(ReadExpr(*Loc.F));
+
+ case TYPE_AUTO:
+ return Context->getAutoType(GetType(Record[0]));
case TYPE_RECORD: {
if (Record.size() != 2) {
@@ -2662,7 +3156,7 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
}
bool IsDependent = Record[0];
QualType T = Context->getRecordType(cast<RecordDecl>(GetDecl(Record[1])));
- T->Dependent = IsDependent;
+ const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
return T;
}
@@ -2673,10 +3167,44 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
}
bool IsDependent = Record[0];
QualType T = Context->getEnumType(cast<EnumDecl>(GetDecl(Record[1])));
- T->Dependent = IsDependent;
+ const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
return T;
}
+ case TYPE_ATTRIBUTED: {
+ if (Record.size() != 3) {
+ Error("incorrect encoding of attributed type");
+ return QualType();
+ }
+ QualType modifiedType = GetType(Record[0]);
+ QualType equivalentType = GetType(Record[1]);
+ AttributedType::Kind kind = static_cast<AttributedType::Kind>(Record[2]);
+ return Context->getAttributedType(kind, modifiedType, equivalentType);
+ }
+
+ case TYPE_PAREN: {
+ if (Record.size() != 1) {
+ Error("incorrect encoding of paren type");
+ return QualType();
+ }
+ QualType InnerType = GetType(Record[0]);
+ return Context->getParenType(InnerType);
+ }
+
+ case TYPE_PACK_EXPANSION: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of pack expansion type");
+ return QualType();
+ }
+ QualType Pattern = GetType(Record[0]);
+ if (Pattern.isNull())
+ return QualType();
+ llvm::Optional<unsigned> NumExpansions;
+ if (Record[1])
+ NumExpansions = Record[1] - 1;
+ return Context->getPackExpansionType(Pattern, NumExpansions);
+ }
+
case TYPE_ELABORATED: {
unsigned Idx = 0;
ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
@@ -2698,7 +3226,7 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
llvm::SmallVector<ObjCProtocolDecl*, 4> Protos;
for (unsigned I = 0; I != NumProtos; ++I)
Protos.push_back(cast<ObjCProtocolDecl>(GetDecl(Record[Idx++])));
- return Context->getObjCObjectType(Base, Protos.data(), NumProtos);
+ return Context->getObjCObjectType(Base, Protos.data(), NumProtos);
}
case TYPE_OBJC_OBJECT_POINTER: {
@@ -2716,6 +3244,15 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
Replacement);
}
+ case TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK: {
+ unsigned Idx = 0;
+ QualType Parm = GetType(Record[Idx++]);
+ TemplateArgument ArgPack = ReadTemplateArgument(*Loc.F, Record, Idx);
+ return Context->getSubstTemplateTypeParmPackType(
+ cast<TemplateTypeParmType>(Parm),
+ ArgPack);
+ }
+
case TYPE_INJECTED_CLASS_NAME: {
CXXRecordDecl *D = cast<CXXRecordDecl>(GetDecl(Record[0]));
QualType TST = GetType(Record[1]); // probably derivable
@@ -2724,7 +3261,7 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
return
QualType(new (*Context, TypeAlignment) InjectedClassNameType(D, TST), 0);
}
-
+
case TYPE_TEMPLATE_TYPE_PARM: {
unsigned Idx = 0;
unsigned Depth = Record[Idx++];
@@ -2733,16 +3270,18 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
IdentifierInfo *Name = GetIdentifierInfo(Record, Idx);
return Context->getTemplateTypeParmType(Depth, Index, Pack, Name);
}
-
+
case TYPE_DEPENDENT_NAME: {
unsigned Idx = 0;
ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx);
const IdentifierInfo *Name = this->GetIdentifierInfo(Record, Idx);
QualType Canon = GetType(Record[Idx++]);
+ if (!Canon.isNull())
+ Canon = Context->getCanonicalType(Canon);
return Context->getDependentNameType(Keyword, NNS, Name, Canon);
}
-
+
case TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION: {
unsigned Idx = 0;
ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
@@ -2752,11 +3291,11 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
llvm::SmallVector<TemplateArgument, 8> Args;
Args.reserve(NumArgs);
while (NumArgs--)
- Args.push_back(ReadTemplateArgument(DeclsCursor, Record, Idx));
+ Args.push_back(ReadTemplateArgument(*Loc.F, Record, Idx));
return Context->getDependentTemplateSpecializationType(Keyword, NNS, Name,
Args.size(), Args.data());
}
-
+
case TYPE_DEPENDENT_SIZED_ARRAY: {
unsigned Idx = 0;
@@ -2767,8 +3306,8 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
unsigned IndexTypeQuals = Record[Idx++];
// DependentSizedArrayType
- Expr *NumElts = ReadExpr(DeclsCursor);
- SourceRange Brackets = ReadSourceRange(Record, Idx);
+ Expr *NumElts = ReadExpr(*Loc.F);
+ SourceRange Brackets = ReadSourceRange(*Loc.F, Record, Idx);
return Context->getDependentSizedArrayType(ElementType, NumElts, ASM,
IndexTypeQuals, Brackets);
@@ -2777,9 +3316,9 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
case TYPE_TEMPLATE_SPECIALIZATION: {
unsigned Idx = 0;
bool IsDependent = Record[Idx++];
- TemplateName Name = ReadTemplateName(Record, Idx);
+ TemplateName Name = ReadTemplateName(*Loc.F, Record, Idx);
llvm::SmallVector<TemplateArgument, 8> Args;
- ReadTemplateArgumentList(Args, DeclsCursor, Record, Idx);
+ ReadTemplateArgumentList(Args, *Loc.F, Record, Idx);
QualType Canon = GetType(Record[Idx++]);
QualType T;
if (Canon.isNull())
@@ -2788,7 +3327,7 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
else
T = Context->getTemplateSpecializationType(Name, Args.data(),
Args.size(), Canon);
- T->Dependent = IsDependent;
+ const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
return T;
}
}
@@ -2796,18 +3335,23 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
return QualType();
}
-namespace {
-
-class TypeLocReader : public TypeLocVisitor<TypeLocReader> {
+class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
ASTReader &Reader;
+ ASTReader::PerFileData &F;
llvm::BitstreamCursor &DeclsCursor;
const ASTReader::RecordData &Record;
unsigned &Idx;
+ SourceLocation ReadSourceLocation(const ASTReader::RecordData &R,
+ unsigned &I) {
+ return Reader.ReadSourceLocation(F, R, I);
+ }
+
public:
- TypeLocReader(ASTReader &Reader, llvm::BitstreamCursor &Cursor,
+ TypeLocReader(ASTReader &Reader, ASTReader::PerFileData &F,
const ASTReader::RecordData &Record, unsigned &Idx)
- : Reader(Reader), DeclsCursor(Cursor), Record(Record), Idx(Idx) { }
+ : Reader(Reader), F(F), DeclsCursor(F.DeclsCursor), Record(Record), Idx(Idx)
+ { }
// We want compile-time assurance that we've enumerated all of
// these, so unfortunately we have to declare them first, then
@@ -2821,13 +3365,11 @@ public:
void VisitArrayTypeLoc(ArrayTypeLoc);
};
-}
-
void TypeLocReader::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
// nothing to do
}
void TypeLocReader::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
- TL.setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setBuiltinLoc(ReadSourceLocation(Record, Idx));
if (TL.needsExtraLocalData()) {
TL.setWrittenTypeSpec(static_cast<DeclSpec::TST>(Record[Idx++]));
TL.setWrittenSignSpec(static_cast<DeclSpec::TSS>(Record[Idx++]));
@@ -2836,28 +3378,28 @@ void TypeLocReader::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
}
}
void TypeLocReader::VisitComplexTypeLoc(ComplexTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitPointerTypeLoc(PointerTypeLoc TL) {
- TL.setStarLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setStarLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
- TL.setCaretLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setCaretLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
- TL.setAmpLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setAmpLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
- TL.setAmpAmpLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setAmpAmpLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
- TL.setStarLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setStarLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitArrayTypeLoc(ArrayTypeLoc TL) {
- TL.setLBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setRBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setLBracketLoc(ReadSourceLocation(Record, Idx));
+ TL.setRBracketLoc(ReadSourceLocation(Record, Idx));
if (Record[Idx++])
- TL.setSizeExpr(Reader.ReadExpr(DeclsCursor));
+ TL.setSizeExpr(Reader.ReadExpr(F));
else
TL.setSizeExpr(0);
}
@@ -2876,17 +3418,18 @@ void TypeLocReader::VisitDependentSizedArrayTypeLoc(
}
void TypeLocReader::VisitDependentSizedExtVectorTypeLoc(
DependentSizedExtVectorTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitVectorTypeLoc(VectorTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
- TL.setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setTrailingReturn(Record[Idx++]);
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) {
TL.setArg(i, cast_or_null<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
}
@@ -2898,87 +3441,119 @@ void TypeLocReader::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
VisitFunctionTypeLoc(TL);
}
void TypeLocReader::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
- TL.setTypeofLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
- TL.setTypeofLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
+ TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
}
void TypeLocReader::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitRecordTypeLoc(RecordTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ TL.setAttrNameLoc(ReadSourceLocation(Record, Idx));
+ if (TL.hasAttrOperand()) {
+ SourceRange range;
+ range.setBegin(ReadSourceLocation(Record, Idx));
+ range.setEnd(ReadSourceLocation(Record, Idx));
+ TL.setAttrOperandParensRange(range);
+ }
+ if (TL.hasAttrExprOperand()) {
+ if (Record[Idx++])
+ TL.setAttrExprOperand(Reader.ReadExpr(F));
+ else
+ TL.setAttrExprOperand(0);
+ } else if (TL.hasAttrEnumOperand())
+ TL.setAttrEnumOperandLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitSubstTemplateTypeParmTypeLoc(
SubstTemplateTypeParmTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitSubstTemplateTypeParmPackTypeLoc(
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
- TL.setTemplateNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setLAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setRAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
TL.setArgLocInfo(i,
- Reader.GetTemplateArgumentLocInfo(TL.getTypePtr()->getArg(i).getKind(),
- DeclsCursor, Record, Idx));
+ Reader.GetTemplateArgumentLocInfo(F,
+ TL.getTypePtr()->getArg(i).getKind(),
+ Record, Idx));
+}
+void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- TL.setKeywordLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setQualifierRange(Reader.ReadSourceRange(Record, Idx));
+ TL.setKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setQualifierRange(Reader.ReadSourceRange(F, Record, Idx));
}
void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
- TL.setKeywordLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setQualifierRange(Reader.ReadSourceRange(Record, Idx));
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setQualifierRange(Reader.ReadSourceRange(F, Record, Idx));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
- TL.setKeywordLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setQualifierRange(Reader.ReadSourceRange(Record, Idx));
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setLAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setRAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setQualifierRange(Reader.ReadSourceRange(F, Record, Idx));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
TL.setArgLocInfo(I,
- Reader.GetTemplateArgumentLocInfo(TL.getTypePtr()->getArg(I).getKind(),
- DeclsCursor, Record, Idx));
+ Reader.GetTemplateArgumentLocInfo(F,
+ TL.getTypePtr()->getArg(I).getKind(),
+ Record, Idx));
+}
+void TypeLocReader::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
+ TL.setEllipsisLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
- TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
TL.setHasBaseTypeAsWritten(Record[Idx++]);
- TL.setLAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TL.setRAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
- TL.setProtocolLoc(i, SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setProtocolLoc(i, ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
- TL.setStarLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setStarLoc(ReadSourceLocation(Record, Idx));
}
-TypeSourceInfo *ASTReader::GetTypeSourceInfo(llvm::BitstreamCursor &DeclsCursor,
+TypeSourceInfo *ASTReader::GetTypeSourceInfo(PerFileData &F,
const RecordData &Record,
unsigned &Idx) {
QualType InfoTy = GetType(Record[Idx++]);
@@ -2986,7 +3561,7 @@ TypeSourceInfo *ASTReader::GetTypeSourceInfo(llvm::BitstreamCursor &DeclsCursor,
return 0;
TypeSourceInfo *TInfo = getContext()->CreateTypeSourceInfo(InfoTy);
- TypeLocReader TLR(*this, DeclsCursor, Record, Idx);
+ TypeLocReader TLR(*this, F, Record, Idx);
for (TypeLoc TL = TInfo->getTypeLoc(); !TL.isNull(); TL = TL.getNextTypeLoc())
TLR.Visit(TL);
return TInfo;
@@ -3043,6 +3618,9 @@ QualType ASTReader::GetType(TypeID ID) {
assert(Index < TypesLoaded.size() && "Type index out-of-range");
if (TypesLoaded[Index].isNull()) {
TypesLoaded[Index] = ReadTypeRecord(Index);
+ if (TypesLoaded[Index].isNull())
+ return QualType();
+
TypesLoaded[Index]->setFromAST();
TypeIdxs[TypesLoaded[Index]] = TypeIdx::fromTypeID(ID);
if (DeserializationListener)
@@ -3074,20 +3652,36 @@ TypeIdx ASTReader::GetTypeIdx(QualType T) const {
return I->second;
}
+unsigned ASTReader::getTotalNumCXXBaseSpecifiers() const {
+ unsigned Result = 0;
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I)
+ Result += Chain[I]->LocalNumCXXBaseSpecifiers;
+
+ return Result;
+}
+
TemplateArgumentLocInfo
-ASTReader::GetTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
- llvm::BitstreamCursor &DeclsCursor,
+ASTReader::GetTemplateArgumentLocInfo(PerFileData &F,
+ TemplateArgument::ArgKind Kind,
const RecordData &Record,
unsigned &Index) {
switch (Kind) {
case TemplateArgument::Expression:
- return ReadExpr(DeclsCursor);
+ return ReadExpr(F);
case TemplateArgument::Type:
- return GetTypeSourceInfo(DeclsCursor, Record, Index);
+ return GetTypeSourceInfo(F, Record, Index);
case TemplateArgument::Template: {
- SourceRange QualifierRange = ReadSourceRange(Record, Index);
- SourceLocation TemplateNameLoc = ReadSourceLocation(Record, Index);
- return TemplateArgumentLocInfo(QualifierRange, TemplateNameLoc);
+ SourceRange QualifierRange = ReadSourceRange(F, Record, Index);
+ SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
+ return TemplateArgumentLocInfo(QualifierRange, TemplateNameLoc,
+ SourceLocation());
+ }
+ case TemplateArgument::TemplateExpansion: {
+ SourceRange QualifierRange = ReadSourceRange(F, Record, Index);
+ SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
+ SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Index);
+ return TemplateArgumentLocInfo(QualifierRange, TemplateNameLoc,
+ EllipsisLoc);
}
case TemplateArgument::Null:
case TemplateArgument::Integral:
@@ -3100,16 +3694,15 @@ ASTReader::GetTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
}
TemplateArgumentLoc
-ASTReader::ReadTemplateArgumentLoc(llvm::BitstreamCursor &DeclsCursor,
+ASTReader::ReadTemplateArgumentLoc(PerFileData &F,
const RecordData &Record, unsigned &Index) {
- TemplateArgument Arg = ReadTemplateArgument(DeclsCursor, Record, Index);
+ TemplateArgument Arg = ReadTemplateArgument(F, Record, Index);
if (Arg.getKind() == TemplateArgument::Expression) {
if (Record[Index++]) // bool InfoHasSameExpr.
return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo(Arg.getAsExpr()));
}
- return TemplateArgumentLoc(Arg, GetTemplateArgumentLocInfo(Arg.getKind(),
- DeclsCursor,
+ return TemplateArgumentLoc(Arg, GetTemplateArgumentLocInfo(F, Arg.getKind(),
Record, Index));
}
@@ -3117,6 +3710,63 @@ Decl *ASTReader::GetExternalDecl(uint32_t ID) {
return GetDecl(ID);
}
+uint64_t
+ASTReader::GetCXXBaseSpecifiersOffset(serialization::CXXBaseSpecifiersID ID) {
+ if (ID == 0)
+ return 0;
+
+ --ID;
+ uint64_t Offset = 0;
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ if (ID < Chain[I]->LocalNumCXXBaseSpecifiers)
+ return Offset + Chain[I]->CXXBaseSpecifiersOffsets[ID];
+
+ ID -= Chain[I]->LocalNumCXXBaseSpecifiers;
+ Offset += Chain[I]->SizeInBits;
+ }
+
+ assert(false && "CXXBaseSpecifiers not found");
+ return 0;
+}
+
+CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
+ // Figure out which AST file contains this offset.
+ PerFileData *F = 0;
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ if (Offset < Chain[I]->SizeInBits) {
+ F = Chain[I];
+ break;
+ }
+
+ Offset -= Chain[I]->SizeInBits;
+ }
+
+ if (!F) {
+ Error("Malformed AST file: C++ base specifiers at impossible offset");
+ return 0;
+ }
+
+ llvm::BitstreamCursor &Cursor = F->DeclsCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Offset);
+ ReadingKindTracker ReadingKind(Read_Decl, *this);
+ RecordData Record;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.ReadRecord(Code, Record);
+ if (RecCode != DECL_CXX_BASE_SPECIFIERS) {
+ Error("Malformed AST file: missing C++ base specifiers");
+ return 0;
+ }
+
+ unsigned Idx = 0;
+ unsigned NumBases = Record[Idx++];
+ void *Mem = Context->Allocate(sizeof(CXXBaseSpecifier) * NumBases);
+ CXXBaseSpecifier *Bases = new (Mem) CXXBaseSpecifier [NumBases];
+ for (unsigned I = 0; I != NumBases; ++I)
+ Bases[I] = ReadCXXBaseSpecifier(*F, Record, Idx);
+ return Bases;
+}
+
TranslationUnitDecl *ASTReader::GetTranslationUnitDecl() {
if (!DeclsLoaded[0]) {
ReadDeclRecord(0, 1);
@@ -3152,6 +3802,9 @@ Decl *ASTReader::GetDecl(DeclID ID) {
/// source each time it is called, and is meant to be used via a
/// LazyOffsetPtr (which is used by Decls for the body of functions, etc).
Stmt *ASTReader::GetExternalDeclStmt(uint64_t Offset) {
+ // Switch case IDs are per Decl.
+ ClearSwitchCaseIDs();
+
// Offset here is a global offset across the entire chain.
for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
PerFileData &F = *Chain[N - I - 1];
@@ -3159,7 +3812,7 @@ Stmt *ASTReader::GetExternalDeclStmt(uint64_t Offset) {
// Since we know that this statement is part of a decl, make sure to use
// the decl cursor to read it.
F.DeclsCursor.JumpToBit(Offset);
- return ReadStmtFromStream(F.DeclsCursor);
+ return ReadStmtFromStream(F);
}
Offset -= F.SizeInBits;
}
@@ -3167,13 +3820,16 @@ Stmt *ASTReader::GetExternalDeclStmt(uint64_t Offset) {
}
bool ASTReader::FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
llvm::SmallVectorImpl<Decl*> &Decls) {
assert(DC->hasExternalLexicalStorage() &&
"DeclContext has no lexical decls in storage");
// There might be lexical decls in multiple parts of the chain, for the TU
// at least.
- DeclContextInfos &Infos = DeclContextOffsets[DC];
+ // DeclContextOffsets might reallocate as we load additional decls below,
+ // so make a copy of the vector.
+ DeclContextInfos Infos = DeclContextOffsets[DC];
for (DeclContextInfos::iterator I = Infos.begin(), E = Infos.end();
I != E; ++I) {
// IDs can be 0 if this context doesn't contain declarations.
@@ -3181,10 +3837,15 @@ bool ASTReader::FindExternalLexicalDecls(const DeclContext *DC,
continue;
// Load all of the declaration IDs
- for (const DeclID *ID = I->LexicalDecls,
- *IDE = ID + I->NumLexicalDecls;
- ID != IDE; ++ID)
- Decls.push_back(GetDecl(*ID));
+ for (const KindDeclIDPair *ID = I->LexicalDecls,
+ *IDE = ID + I->NumLexicalDecls; ID != IDE; ++ID) {
+ if (isKindWeWant && !isKindWeWant((Decl::Kind)ID->first))
+ continue;
+
+ Decl *D = GetDecl(ID->second);
+ assert(D && "Null decl in lexical decls");
+ Decls.push_back(D);
+ }
}
++NumLexicalDeclContextsRead;
@@ -3355,11 +4016,11 @@ void ASTReader::InitializeSema(Sema &S) {
// Makes sure any declarations that were deserialized "too early"
// still get added to the identifier's declaration chains.
- if (SemaObj->TUScope) {
- for (unsigned I = 0, N = PreloadedDecls.size(); I != N; ++I) {
+ for (unsigned I = 0, N = PreloadedDecls.size(); I != N; ++I) {
+ if (SemaObj->TUScope)
SemaObj->TUScope->AddDecl(PreloadedDecls[I]);
- SemaObj->IdResolver.AddDecl(PreloadedDecls[I]);
- }
+
+ SemaObj->IdResolver.AddDecl(PreloadedDecls[I]);
}
PreloadedDecls.clear();
@@ -3377,21 +4038,6 @@ void ASTReader::InitializeSema(Sema &S) {
SemaObj->UnusedFileScopedDecls.push_back(D);
}
- // If there were any weak undeclared identifiers, deserialize them and add to
- // Sema's list of weak undeclared identifiers.
- if (!WeakUndeclaredIdentifiers.empty()) {
- unsigned Idx = 0;
- for (unsigned I = 0, N = WeakUndeclaredIdentifiers[Idx++]; I != N; ++I) {
- IdentifierInfo *WeakId = GetIdentifierInfo(WeakUndeclaredIdentifiers,Idx);
- IdentifierInfo *AliasId=GetIdentifierInfo(WeakUndeclaredIdentifiers,Idx);
- SourceLocation Loc = ReadSourceLocation(WeakUndeclaredIdentifiers, Idx);
- bool Used = WeakUndeclaredIdentifiers[Idx++];
- Sema::WeakInfo WI(AliasId, Loc);
- WI.setUsed(Used);
- SemaObj->WeakUndeclaredIdentifiers.insert(std::make_pair(WeakId, WI));
- }
- }
-
// If there were any locally-scoped external declarations,
// deserialize them and add them to Sema's table of locally-scoped
// external declarations.
@@ -3409,34 +4055,12 @@ void ASTReader::InitializeSema(Sema &S) {
// FIXME: Do VTable uses and dynamic classes deserialize too much ?
// Can we cut them down before writing them ?
- // If there were any VTable uses, deserialize the information and add it
- // to Sema's vector and map of VTable uses.
- if (!VTableUses.empty()) {
- unsigned Idx = 0;
- for (unsigned I = 0, N = VTableUses[Idx++]; I != N; ++I) {
- CXXRecordDecl *Class = cast<CXXRecordDecl>(GetDecl(VTableUses[Idx++]));
- SourceLocation Loc = ReadSourceLocation(VTableUses, Idx);
- bool DefinitionRequired = VTableUses[Idx++];
- SemaObj->VTableUses.push_back(std::make_pair(Class, Loc));
- SemaObj->VTablesUsed[Class] = DefinitionRequired;
- }
- }
-
// If there were any dynamic classes declarations, deserialize them
// and add them to Sema's vector of such declarations.
for (unsigned I = 0, N = DynamicClasses.size(); I != N; ++I)
SemaObj->DynamicClasses.push_back(
cast<CXXRecordDecl>(GetDecl(DynamicClasses[I])));
- // If there were any pending implicit instantiations, deserialize them
- // and add them to Sema's queue of such instantiations.
- assert(PendingInstantiations.size() % 2 == 0 && "Expected pairs of entries");
- for (unsigned Idx = 0, N = PendingInstantiations.size(); Idx < N;) {
- ValueDecl *D=cast<ValueDecl>(GetDecl(PendingInstantiations[Idx++]));
- SourceLocation Loc = ReadSourceLocation(PendingInstantiations, Idx);
- SemaObj->PendingInstantiations.push_back(std::make_pair(D, Loc));
- }
-
// Load the offsets of the declarations that Sema references.
// They will be lazily deserialized when needed.
if (!SemaDeclRefs.empty()) {
@@ -3445,18 +4069,76 @@ void ASTReader::InitializeSema(Sema &S) {
SemaObj->StdBadAlloc = SemaDeclRefs[1];
}
- // If there are @selector references added them to its pool. This is for
- // implementation of -Wselector.
- if (!ReferencedSelectorsData.empty()) {
- unsigned int DataSize = ReferencedSelectorsData.size()-1;
- unsigned I = 0;
- while (I < DataSize) {
- Selector Sel = DecodeSelector(ReferencedSelectorsData[I++]);
- SourceLocation SelLoc =
- SourceLocation::getFromRawEncoding(ReferencedSelectorsData[I++]);
- SemaObj->ReferencedSelectors.insert(std::make_pair(Sel, SelLoc));
+ for (PerFileData *F = FirstInSource; F; F = F->NextInSource) {
+
+ // If there are @selector references added them to its pool. This is for
+ // implementation of -Wselector.
+ if (!F->ReferencedSelectorsData.empty()) {
+ unsigned int DataSize = F->ReferencedSelectorsData.size()-1;
+ unsigned I = 0;
+ while (I < DataSize) {
+ Selector Sel = DecodeSelector(F->ReferencedSelectorsData[I++]);
+ SourceLocation SelLoc = ReadSourceLocation(
+ *F, F->ReferencedSelectorsData, I);
+ SemaObj->ReferencedSelectors.insert(std::make_pair(Sel, SelLoc));
+ }
+ }
+
+ // If there were any pending implicit instantiations, deserialize them
+ // and add them to Sema's queue of such instantiations.
+ assert(F->PendingInstantiations.size() % 2 == 0 &&
+ "Expected pairs of entries");
+ for (unsigned Idx = 0, N = F->PendingInstantiations.size(); Idx < N;) {
+ ValueDecl *D=cast<ValueDecl>(GetDecl(F->PendingInstantiations[Idx++]));
+ SourceLocation Loc = ReadSourceLocation(*F, F->PendingInstantiations,Idx);
+ SemaObj->PendingInstantiations.push_back(std::make_pair(D, Loc));
+ }
+ }
+
+ // The two special data sets below always come from the most recent PCH,
+ // which is at the front of the chain.
+ PerFileData &F = *Chain.front();
+
+ // If there were any weak undeclared identifiers, deserialize them and add to
+ // Sema's list of weak undeclared identifiers.
+ if (!WeakUndeclaredIdentifiers.empty()) {
+ unsigned Idx = 0;
+ for (unsigned I = 0, N = WeakUndeclaredIdentifiers[Idx++]; I != N; ++I) {
+ IdentifierInfo *WeakId = GetIdentifierInfo(WeakUndeclaredIdentifiers,Idx);
+ IdentifierInfo *AliasId= GetIdentifierInfo(WeakUndeclaredIdentifiers,Idx);
+ SourceLocation Loc = ReadSourceLocation(F, WeakUndeclaredIdentifiers,Idx);
+ bool Used = WeakUndeclaredIdentifiers[Idx++];
+ Sema::WeakInfo WI(AliasId, Loc);
+ WI.setUsed(Used);
+ SemaObj->WeakUndeclaredIdentifiers.insert(std::make_pair(WeakId, WI));
+ }
+ }
+
+ // If there were any VTable uses, deserialize the information and add it
+ // to Sema's vector and map of VTable uses.
+ if (!VTableUses.empty()) {
+ unsigned Idx = 0;
+ for (unsigned I = 0, N = VTableUses[Idx++]; I != N; ++I) {
+ CXXRecordDecl *Class = cast<CXXRecordDecl>(GetDecl(VTableUses[Idx++]));
+ SourceLocation Loc = ReadSourceLocation(F, VTableUses, Idx);
+ bool DefinitionRequired = VTableUses[Idx++];
+ SemaObj->VTableUses.push_back(std::make_pair(Class, Loc));
+ SemaObj->VTablesUsed[Class] = DefinitionRequired;
}
}
+
+ if (!FPPragmaOptions.empty()) {
+ assert(FPPragmaOptions.size() == 1 && "Wrong number of FP_PRAGMA_OPTIONS");
+ SemaObj->FPFeatures.fp_contract = FPPragmaOptions[0];
+ }
+
+ if (!OpenCLExtensions.empty()) {
+ unsigned I = 0;
+#define OPENCLEXT(nm) SemaObj->OpenCLFeatures.nm = OpenCLExtensions[I++];
+#include "clang/Basic/OpenCLExtensions.def"
+
+ assert(OpenCLExtensions.size() == I && "Wrong number of OPENCL_EXTENSIONS");
+ }
}
IdentifierInfo* ASTReader::get(const char *NameStart, const char *NameEnd) {
@@ -3480,6 +4162,64 @@ IdentifierInfo* ASTReader::get(const char *NameStart, const char *NameEnd) {
return 0;
}
+namespace clang {
+ /// \brief An identifier-lookup iterator that enumerates all of the
+ /// identifiers stored within a set of AST files.
+ class ASTIdentifierIterator : public IdentifierIterator {
+ /// \brief The AST reader whose identifiers are being enumerated.
+ const ASTReader &Reader;
+
+ /// \brief The current index into the chain of AST files stored in
+ /// the AST reader.
+ unsigned Index;
+
+ /// \brief The current position within the identifier lookup table
+ /// of the current AST file.
+ ASTIdentifierLookupTable::key_iterator Current;
+
+ /// \brief The end position within the identifier lookup table of
+ /// the current AST file.
+ ASTIdentifierLookupTable::key_iterator End;
+
+ public:
+ explicit ASTIdentifierIterator(const ASTReader &Reader);
+
+ virtual llvm::StringRef Next();
+ };
+}
+
+ASTIdentifierIterator::ASTIdentifierIterator(const ASTReader &Reader)
+ : Reader(Reader), Index(Reader.Chain.size() - 1) {
+ ASTIdentifierLookupTable *IdTable
+ = (ASTIdentifierLookupTable *)Reader.Chain[Index]->IdentifierLookupTable;
+ Current = IdTable->key_begin();
+ End = IdTable->key_end();
+}
+
+llvm::StringRef ASTIdentifierIterator::Next() {
+ while (Current == End) {
+ // If we have exhausted all of our AST files, we're done.
+ if (Index == 0)
+ return llvm::StringRef();
+
+ --Index;
+ ASTIdentifierLookupTable *IdTable
+ = (ASTIdentifierLookupTable *)Reader.Chain[Index]->IdentifierLookupTable;
+ Current = IdTable->key_begin();
+ End = IdTable->key_end();
+ }
+
+ // We have any identifiers remaining in the current AST file; return
+ // the next one.
+ std::pair<const char*, unsigned> Key = *Current;
+ ++Current;
+ return llvm::StringRef(Key.first, Key.second);
+}
+
+IdentifierIterator *ASTReader::getIdentifiers() const {
+ return new ASTIdentifierIterator(*this);
+}
+
std::pair<ObjCMethodList, ObjCMethodList>
ASTReader::ReadMethodPool(Selector Sel) {
// Find this selector in a hash table. We want to find the most recent entry.
@@ -3545,8 +4285,7 @@ ASTReader::SetGloballyVisibleDecls(IdentifierInfo *II,
PendingIdentifierInfos.push_back(PendingIdentifierInfo());
PendingIdentifierInfo &PII = PendingIdentifierInfos.back();
PII.II = II;
- for (unsigned I = 0, N = DeclIDs.size(); I != N; ++I)
- PII.DeclIDs.push_back(DeclIDs[I]);
+ PII.DeclIDs.append(DeclIDs.begin(), DeclIDs.end());
return;
}
@@ -3558,8 +4297,8 @@ ASTReader::SetGloballyVisibleDecls(IdentifierInfo *II,
// and add it to the declaration chain for this identifier, so
// that (unqualified) name lookup will find it.
SemaObj->TUScope->AddDecl(D);
- SemaObj->IdResolver.AddDeclToIdentifierChain(II, D);
}
+ SemaObj->IdResolver.AddDeclToIdentifierChain(II, D);
} else {
// Queue this declaration so that it will be added to the
// translation unit scope and identifier's declaration chain
@@ -3644,7 +4383,7 @@ Selector ASTReader::DecodeSelector(unsigned ID) {
return SelectorsLoaded[ID - 1];
}
-Selector ASTReader::GetExternalSelector(uint32_t ID) {
+Selector ASTReader::GetExternalSelector(uint32_t ID) {
return DecodeSelector(ID);
}
@@ -3693,9 +4432,65 @@ ASTReader::ReadDeclarationName(const RecordData &Record, unsigned &Idx) {
return DeclarationName();
}
+void ASTReader::ReadDeclarationNameLoc(PerFileData &F,
+ DeclarationNameLoc &DNLoc,
+ DeclarationName Name,
+ const RecordData &Record, unsigned &Idx) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ DNLoc.NamedType.TInfo = GetTypeSourceInfo(F, Record, Idx);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ DNLoc.CXXOperatorName.BeginOpNameLoc
+ = ReadSourceLocation(F, Record, Idx).getRawEncoding();
+ DNLoc.CXXOperatorName.EndOpNameLoc
+ = ReadSourceLocation(F, Record, Idx).getRawEncoding();
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ DNLoc.CXXLiteralOperatorName.OpNameLoc
+ = ReadSourceLocation(F, Record, Idx).getRawEncoding();
+ break;
+
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+}
+
+void ASTReader::ReadDeclarationNameInfo(PerFileData &F,
+ DeclarationNameInfo &NameInfo,
+ const RecordData &Record, unsigned &Idx) {
+ NameInfo.setName(ReadDeclarationName(Record, Idx));
+ NameInfo.setLoc(ReadSourceLocation(F, Record, Idx));
+ DeclarationNameLoc DNLoc;
+ ReadDeclarationNameLoc(F, DNLoc, NameInfo.getName(), Record, Idx);
+ NameInfo.setInfo(DNLoc);
+}
+
+void ASTReader::ReadQualifierInfo(PerFileData &F, QualifierInfo &Info,
+ const RecordData &Record, unsigned &Idx) {
+ Info.NNS = ReadNestedNameSpecifier(Record, Idx);
+ Info.NNSRange = ReadSourceRange(F, Record, Idx);
+ unsigned NumTPLists = Record[Idx++];
+ Info.NumTemplParamLists = NumTPLists;
+ if (NumTPLists) {
+ Info.TemplParamLists = new (*Context) TemplateParameterList*[NumTPLists];
+ for (unsigned i=0; i != NumTPLists; ++i)
+ Info.TemplParamLists[i] = ReadTemplateParameterList(F, Record, Idx);
+ }
+}
+
TemplateName
-ASTReader::ReadTemplateName(const RecordData &Record, unsigned &Idx) {
- TemplateName::NameKind Kind = (TemplateName::NameKind)Record[Idx++];
+ASTReader::ReadTemplateName(PerFileData &F, const RecordData &Record,
+ unsigned &Idx) {
+ TemplateName::NameKind Kind = (TemplateName::NameKind)Record[Idx++];
switch (Kind) {
case TemplateName::Template:
return TemplateName(cast_or_null<TemplateDecl>(GetDecl(Record[Idx++])));
@@ -3708,14 +4503,14 @@ ASTReader::ReadTemplateName(const RecordData &Record, unsigned &Idx) {
return Context->getOverloadedTemplateName(Decls.begin(), Decls.end());
}
-
+
case TemplateName::QualifiedTemplate: {
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx);
bool hasTemplKeyword = Record[Idx++];
TemplateDecl *Template = cast<TemplateDecl>(GetDecl(Record[Idx++]));
return Context->getQualifiedTemplateName(NNS, hasTemplKeyword, Template);
}
-
+
case TemplateName::DependentTemplate: {
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx);
if (Record[Idx++]) // isIdentifier
@@ -3724,16 +4519,30 @@ ASTReader::ReadTemplateName(const RecordData &Record, unsigned &Idx) {
return Context->getDependentTemplateName(NNS,
(OverloadedOperatorKind)Record[Idx++]);
}
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ TemplateTemplateParmDecl *Param
+ = cast_or_null<TemplateTemplateParmDecl>(GetDecl(Record[Idx++]));
+ if (!Param)
+ return TemplateName();
+
+ TemplateArgument ArgPack = ReadTemplateArgument(F, Record, Idx);
+ if (ArgPack.getKind() != TemplateArgument::Pack)
+ return TemplateName();
+
+ return Context->getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
}
-
+
assert(0 && "Unhandled template name kind!");
return TemplateName();
}
TemplateArgument
-ASTReader::ReadTemplateArgument(llvm::BitstreamCursor &DeclsCursor,
+ASTReader::ReadTemplateArgument(PerFileData &F,
const RecordData &Record, unsigned &Idx) {
- switch ((TemplateArgument::ArgKind)Record[Idx++]) {
+ TemplateArgument::ArgKind Kind = (TemplateArgument::ArgKind)Record[Idx++];
+ switch (Kind) {
case TemplateArgument::Null:
return TemplateArgument();
case TemplateArgument::Type:
@@ -3745,39 +4554,44 @@ ASTReader::ReadTemplateArgument(llvm::BitstreamCursor &DeclsCursor,
QualType T = GetType(Record[Idx++]);
return TemplateArgument(Value, T);
}
- case TemplateArgument::Template:
- return TemplateArgument(ReadTemplateName(Record, Idx));
+ case TemplateArgument::Template:
+ return TemplateArgument(ReadTemplateName(F, Record, Idx));
+ case TemplateArgument::TemplateExpansion: {
+ TemplateName Name = ReadTemplateName(F, Record, Idx);
+ llvm::Optional<unsigned> NumTemplateExpansions;
+ if (unsigned NumExpansions = Record[Idx++])
+ NumTemplateExpansions = NumExpansions - 1;
+ return TemplateArgument(Name, NumTemplateExpansions);
+ }
case TemplateArgument::Expression:
- return TemplateArgument(ReadExpr(DeclsCursor));
+ return TemplateArgument(ReadExpr(F));
case TemplateArgument::Pack: {
unsigned NumArgs = Record[Idx++];
- llvm::SmallVector<TemplateArgument, 8> Args;
- Args.reserve(NumArgs);
- while (NumArgs--)
- Args.push_back(ReadTemplateArgument(DeclsCursor, Record, Idx));
- TemplateArgument TemplArg;
- TemplArg.setArgumentPack(Args.data(), Args.size(), /*CopyArgs=*/true);
- return TemplArg;
+ TemplateArgument *Args = new (*Context) TemplateArgument[NumArgs];
+ for (unsigned I = 0; I != NumArgs; ++I)
+ Args[I] = ReadTemplateArgument(F, Record, Idx);
+ return TemplateArgument(Args, NumArgs);
}
}
-
+
assert(0 && "Unhandled template argument kind!");
return TemplateArgument();
}
TemplateParameterList *
-ASTReader::ReadTemplateParameterList(const RecordData &Record, unsigned &Idx) {
- SourceLocation TemplateLoc = ReadSourceLocation(Record, Idx);
- SourceLocation LAngleLoc = ReadSourceLocation(Record, Idx);
- SourceLocation RAngleLoc = ReadSourceLocation(Record, Idx);
+ASTReader::ReadTemplateParameterList(PerFileData &F,
+ const RecordData &Record, unsigned &Idx) {
+ SourceLocation TemplateLoc = ReadSourceLocation(F, Record, Idx);
+ SourceLocation LAngleLoc = ReadSourceLocation(F, Record, Idx);
+ SourceLocation RAngleLoc = ReadSourceLocation(F, Record, Idx);
unsigned NumParams = Record[Idx++];
llvm::SmallVector<NamedDecl *, 16> Params;
Params.reserve(NumParams);
while (NumParams--)
Params.push_back(cast<NamedDecl>(GetDecl(Record[Idx++])));
-
- TemplateParameterList* TemplateParams =
+
+ TemplateParameterList* TemplateParams =
TemplateParameterList::Create(*Context, TemplateLoc, LAngleLoc,
Params.data(), Params.size(), RAngleLoc);
return TemplateParams;
@@ -3786,12 +4600,12 @@ ASTReader::ReadTemplateParameterList(const RecordData &Record, unsigned &Idx) {
void
ASTReader::
ReadTemplateArgumentList(llvm::SmallVector<TemplateArgument, 8> &TemplArgs,
- llvm::BitstreamCursor &DeclsCursor,
- const RecordData &Record, unsigned &Idx) {
+ PerFileData &F, const RecordData &Record,
+ unsigned &Idx) {
unsigned NumTemplateArgs = Record[Idx++];
TemplArgs.reserve(NumTemplateArgs);
while (NumTemplateArgs--)
- TemplArgs.push_back(ReadTemplateArgument(DeclsCursor, Record, Idx));
+ TemplArgs.push_back(ReadTemplateArgument(F, Record, Idx));
}
/// \brief Read a UnresolvedSet structure.
@@ -3806,45 +4620,52 @@ void ASTReader::ReadUnresolvedSet(UnresolvedSetImpl &Set,
}
CXXBaseSpecifier
-ASTReader::ReadCXXBaseSpecifier(llvm::BitstreamCursor &DeclsCursor,
+ASTReader::ReadCXXBaseSpecifier(PerFileData &F,
const RecordData &Record, unsigned &Idx) {
bool isVirtual = static_cast<bool>(Record[Idx++]);
bool isBaseOfClass = static_cast<bool>(Record[Idx++]);
AccessSpecifier AS = static_cast<AccessSpecifier>(Record[Idx++]);
- TypeSourceInfo *TInfo = GetTypeSourceInfo(DeclsCursor, Record, Idx);
- SourceRange Range = ReadSourceRange(Record, Idx);
- return CXXBaseSpecifier(Range, isVirtual, isBaseOfClass, AS, TInfo);
+ bool inheritConstructors = static_cast<bool>(Record[Idx++]);
+ TypeSourceInfo *TInfo = GetTypeSourceInfo(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Idx);
+ CXXBaseSpecifier Result(Range, isVirtual, isBaseOfClass, AS, TInfo,
+ EllipsisLoc);
+ Result.setInheritConstructors(inheritConstructors);
+ return Result;
}
-std::pair<CXXBaseOrMemberInitializer **, unsigned>
-ASTReader::ReadCXXBaseOrMemberInitializers(llvm::BitstreamCursor &Cursor,
- const RecordData &Record,
- unsigned &Idx) {
- CXXBaseOrMemberInitializer **BaseOrMemberInitializers = 0;
+std::pair<CXXCtorInitializer **, unsigned>
+ASTReader::ReadCXXCtorInitializers(PerFileData &F, const RecordData &Record,
+ unsigned &Idx) {
+ CXXCtorInitializer **CtorInitializers = 0;
unsigned NumInitializers = Record[Idx++];
if (NumInitializers) {
ASTContext &C = *getContext();
- BaseOrMemberInitializers
- = new (C) CXXBaseOrMemberInitializer*[NumInitializers];
+ CtorInitializers
+ = new (C) CXXCtorInitializer*[NumInitializers];
for (unsigned i=0; i != NumInitializers; ++i) {
TypeSourceInfo *BaseClassInfo = 0;
bool IsBaseVirtual = false;
FieldDecl *Member = 0;
-
+ IndirectFieldDecl *IndirectMember = 0;
+
bool IsBaseInitializer = Record[Idx++];
if (IsBaseInitializer) {
- BaseClassInfo = GetTypeSourceInfo(Cursor, Record, Idx);
+ BaseClassInfo = GetTypeSourceInfo(F, Record, Idx);
IsBaseVirtual = Record[Idx++];
} else {
- Member = cast<FieldDecl>(GetDecl(Record[Idx++]));
+ bool IsIndirectMemberInitializer = Record[Idx++];
+ if (IsIndirectMemberInitializer)
+ IndirectMember = cast<IndirectFieldDecl>(GetDecl(Record[Idx++]));
+ else
+ Member = cast<FieldDecl>(GetDecl(Record[Idx++]));
}
- SourceLocation MemberLoc = ReadSourceLocation(Record, Idx);
- Expr *Init = ReadExpr(Cursor);
- FieldDecl *AnonUnionMember
- = cast_or_null<FieldDecl>(GetDecl(Record[Idx++]));
- SourceLocation LParenLoc = ReadSourceLocation(Record, Idx);
- SourceLocation RParenLoc = ReadSourceLocation(Record, Idx);
+ SourceLocation MemberOrEllipsisLoc = ReadSourceLocation(F, Record, Idx);
+ Expr *Init = ReadExpr(F);
+ SourceLocation LParenLoc = ReadSourceLocation(F, Record, Idx);
+ SourceLocation RParenLoc = ReadSourceLocation(F, Record, Idx);
bool IsWritten = Record[Idx++];
unsigned SourceOrderOrNumArrayIndices;
llvm::SmallVector<VarDecl *, 8> Indices;
@@ -3856,28 +4677,33 @@ ASTReader::ReadCXXBaseOrMemberInitializers(llvm::BitstreamCursor &Cursor,
for (unsigned i=0; i != SourceOrderOrNumArrayIndices; ++i)
Indices.push_back(cast<VarDecl>(GetDecl(Record[Idx++])));
}
-
- CXXBaseOrMemberInitializer *BOMInit;
+
+ CXXCtorInitializer *BOMInit;
if (IsBaseInitializer) {
- BOMInit = new (C) CXXBaseOrMemberInitializer(C, BaseClassInfo,
- IsBaseVirtual, LParenLoc,
- Init, RParenLoc);
+ BOMInit = new (C) CXXCtorInitializer(C, BaseClassInfo, IsBaseVirtual,
+ LParenLoc, Init, RParenLoc,
+ MemberOrEllipsisLoc);
} else if (IsWritten) {
- BOMInit = new (C) CXXBaseOrMemberInitializer(C, Member, MemberLoc,
- LParenLoc, Init, RParenLoc);
+ if (Member)
+ BOMInit = new (C) CXXCtorInitializer(C, Member, MemberOrEllipsisLoc,
+ LParenLoc, Init, RParenLoc);
+ else
+ BOMInit = new (C) CXXCtorInitializer(C, IndirectMember,
+ MemberOrEllipsisLoc, LParenLoc,
+ Init, RParenLoc);
} else {
- BOMInit = CXXBaseOrMemberInitializer::Create(C, Member, MemberLoc,
- LParenLoc, Init, RParenLoc,
- Indices.data(),
- Indices.size());
+ BOMInit = CXXCtorInitializer::Create(C, Member, MemberOrEllipsisLoc,
+ LParenLoc, Init, RParenLoc,
+ Indices.data(), Indices.size());
}
- BOMInit->setAnonUnionMember(AnonUnionMember);
- BaseOrMemberInitializers[i] = BOMInit;
+ if (IsWritten)
+ BOMInit->setSourceOrder(SourceOrderOrNumArrayIndices);
+ CtorInitializers[i] = BOMInit;
}
}
- return std::make_pair(BaseOrMemberInitializers, NumInitializers);
+ return std::make_pair(CtorInitializers, NumInitializers);
}
NestedNameSpecifier *
@@ -3902,7 +4728,10 @@ ASTReader::ReadNestedNameSpecifier(const RecordData &Record, unsigned &Idx) {
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate: {
- Type *T = GetType(Record[Idx++]).getTypePtr();
+ const Type *T = GetType(Record[Idx++]).getTypePtrOrNull();
+ if (!T)
+ return 0;
+
bool Template = Record[Idx++];
NNS = NestedNameSpecifier::Create(*Context, Prev, Template, T);
break;
@@ -3920,9 +4749,10 @@ ASTReader::ReadNestedNameSpecifier(const RecordData &Record, unsigned &Idx) {
}
SourceRange
-ASTReader::ReadSourceRange(const RecordData &Record, unsigned &Idx) {
- SourceLocation beg = SourceLocation::getFromRawEncoding(Record[Idx++]);
- SourceLocation end = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ASTReader::ReadSourceRange(PerFileData &F, const RecordData &Record,
+ unsigned &Idx) {
+ SourceLocation beg = ReadSourceLocation(F, Record, Idx);
+ SourceLocation end = ReadSourceLocation(F, Record, Idx);
return SourceRange(beg, end);
}
@@ -3965,7 +4795,7 @@ DiagnosticBuilder ASTReader::Diag(unsigned DiagID) {
}
DiagnosticBuilder ASTReader::Diag(SourceLocation Loc, unsigned DiagID) {
- return Diags.Report(FullSourceLoc(Loc, SourceMgr), DiagID);
+ return Diags.Report(Loc, DiagID);
}
/// \brief Retrieve the identifier table associated with the
@@ -3988,70 +4818,8 @@ SwitchCase *ASTReader::getSwitchCaseWithID(unsigned ID) {
return SwitchCaseStmts[ID];
}
-/// \brief Record that the given label statement has been
-/// deserialized and has the given ID.
-void ASTReader::RecordLabelStmt(LabelStmt *S, unsigned ID) {
- assert(LabelStmts.find(ID) == LabelStmts.end() &&
- "Deserialized label twice");
- LabelStmts[ID] = S;
-
- // If we've already seen any goto statements that point to this
- // label, resolve them now.
- typedef std::multimap<unsigned, GotoStmt *>::iterator GotoIter;
- std::pair<GotoIter, GotoIter> Gotos = UnresolvedGotoStmts.equal_range(ID);
- for (GotoIter Goto = Gotos.first; Goto != Gotos.second; ++Goto)
- Goto->second->setLabel(S);
- UnresolvedGotoStmts.erase(Gotos.first, Gotos.second);
-
- // If we've already seen any address-label statements that point to
- // this label, resolve them now.
- typedef std::multimap<unsigned, AddrLabelExpr *>::iterator AddrLabelIter;
- std::pair<AddrLabelIter, AddrLabelIter> AddrLabels
- = UnresolvedAddrLabelExprs.equal_range(ID);
- for (AddrLabelIter AddrLabel = AddrLabels.first;
- AddrLabel != AddrLabels.second; ++AddrLabel)
- AddrLabel->second->setLabel(S);
- UnresolvedAddrLabelExprs.erase(AddrLabels.first, AddrLabels.second);
-}
-
-/// \brief Set the label of the given statement to the label
-/// identified by ID.
-///
-/// Depending on the order in which the label and other statements
-/// referencing that label occur, this operation may complete
-/// immediately (updating the statement) or it may queue the
-/// statement to be back-patched later.
-void ASTReader::SetLabelOf(GotoStmt *S, unsigned ID) {
- std::map<unsigned, LabelStmt *>::iterator Label = LabelStmts.find(ID);
- if (Label != LabelStmts.end()) {
- // We've already seen this label, so set the label of the goto and
- // we're done.
- S->setLabel(Label->second);
- } else {
- // We haven't seen this label yet, so add this goto to the set of
- // unresolved goto statements.
- UnresolvedGotoStmts.insert(std::make_pair(ID, S));
- }
-}
-
-/// \brief Set the label of the given expression to the label
-/// identified by ID.
-///
-/// Depending on the order in which the label and other statements
-/// referencing that label occur, this operation may complete
-/// immediately (updating the statement) or it may queue the
-/// statement to be back-patched later.
-void ASTReader::SetLabelOf(AddrLabelExpr *S, unsigned ID) {
- std::map<unsigned, LabelStmt *>::iterator Label = LabelStmts.find(ID);
- if (Label != LabelStmts.end()) {
- // We've already seen this label, so set the label of the
- // label-address expression and we're done.
- S->setLabel(Label->second);
- } else {
- // We haven't seen this label yet, so add this label-address
- // expression to the set of unresolved label-address expressions.
- UnresolvedAddrLabelExprs.insert(std::make_pair(ID, S));
- }
+void ASTReader::ClearSwitchCaseIDs() {
+ SwitchCaseStmts.clear();
}
void ASTReader::FinishedDeserializing() {
@@ -4066,43 +4834,57 @@ void ASTReader::FinishedDeserializing() {
PendingIdentifierInfos.pop_front();
}
+ // Ready to load previous declarations of Decls that were delayed.
+ while (!PendingPreviousDecls.empty()) {
+ loadAndAttachPreviousDecl(PendingPreviousDecls.front().first,
+ PendingPreviousDecls.front().second);
+ PendingPreviousDecls.pop_front();
+ }
+
// We are not in recursive loading, so it's safe to pass the "interesting"
// decls to the consumer.
if (Consumer)
PassInterestingDeclsToConsumer();
+
+ assert(PendingForwardRefs.size() == 0 &&
+ "Some forward refs did not get linked to the definition!");
}
--NumCurrentElementsDeserializing;
}
ASTReader::ASTReader(Preprocessor &PP, ASTContext *Context,
- const char *isysroot, bool DisableValidation)
+ const char *isysroot, bool DisableValidation,
+ bool DisableStatCache)
: Listener(new PCHValidator(PP, *this)), DeserializationListener(0),
SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
Diags(PP.getDiagnostics()), SemaObj(0), PP(&PP), Context(Context),
Consumer(0), isysroot(isysroot), DisableValidation(DisableValidation),
- NumStatHits(0), NumStatMisses(0), NumSLocEntriesRead(0),
- TotalNumSLocEntries(0), NumStatementsRead(0), TotalNumStatements(0),
- NumMacrosRead(0), TotalNumMacros(0), NumSelectorsRead(0),
- NumMethodPoolEntriesRead(0), NumMethodPoolMisses(0),
- TotalNumMethodPoolEntries(0), NumLexicalDeclContextsRead(0),
- TotalLexicalDeclContexts(0), NumVisibleDeclContextsRead(0),
- TotalVisibleDeclContexts(0), NumCurrentElementsDeserializing(0) {
+ DisableStatCache(DisableStatCache), NumStatHits(0), NumStatMisses(0),
+ NumSLocEntriesRead(0), TotalNumSLocEntries(0), NextSLocOffset(0),
+ NumStatementsRead(0), TotalNumStatements(0), NumMacrosRead(0),
+ TotalNumMacros(0), NumSelectorsRead(0), NumMethodPoolEntriesRead(0),
+ NumMethodPoolMisses(0), TotalNumMethodPoolEntries(0),
+ NumLexicalDeclContextsRead(0), TotalLexicalDeclContexts(0),
+ NumVisibleDeclContextsRead(0), TotalVisibleDeclContexts(0),
+ NumCurrentElementsDeserializing(0)
+{
RelocatablePCH = false;
}
ASTReader::ASTReader(SourceManager &SourceMgr, FileManager &FileMgr,
Diagnostic &Diags, const char *isysroot,
- bool DisableValidation)
+ bool DisableValidation, bool DisableStatCache)
: DeserializationListener(0), SourceMgr(SourceMgr), FileMgr(FileMgr),
Diags(Diags), SemaObj(0), PP(0), Context(0), Consumer(0),
- isysroot(isysroot), DisableValidation(DisableValidation), NumStatHits(0),
- NumStatMisses(0), NumSLocEntriesRead(0), TotalNumSLocEntries(0),
- NumStatementsRead(0), TotalNumStatements(0), NumMacrosRead(0),
- TotalNumMacros(0), NumSelectorsRead(0), NumMethodPoolEntriesRead(0),
- NumMethodPoolMisses(0), TotalNumMethodPoolEntries(0),
- NumLexicalDeclContextsRead(0), TotalLexicalDeclContexts(0),
- NumVisibleDeclContextsRead(0), TotalVisibleDeclContexts(0),
- NumCurrentElementsDeserializing(0) {
+ isysroot(isysroot), DisableValidation(DisableValidation),
+ DisableStatCache(DisableStatCache), NumStatHits(0), NumStatMisses(0),
+ NumSLocEntriesRead(0), TotalNumSLocEntries(0),
+ NextSLocOffset(0), NumStatementsRead(0), TotalNumStatements(0),
+ NumMacrosRead(0), TotalNumMacros(0), NumSelectorsRead(0),
+ NumMethodPoolEntriesRead(0), NumMethodPoolMisses(0),
+ TotalNumMethodPoolEntries(0), NumLexicalDeclContextsRead(0),
+ TotalLexicalDeclContexts(0), NumVisibleDeclContextsRead(0),
+ TotalVisibleDeclContexts(0), NumCurrentElementsDeserializing(0) {
RelocatablePCH = false;
}
@@ -4131,17 +4913,23 @@ ASTReader::~ASTReader() {
}
}
-ASTReader::PerFileData::PerFileData()
- : StatCache(0), LocalNumSLocEntries(0), LocalNumTypes(0), TypeOffsets(0),
- LocalNumDecls(0), DeclOffsets(0), LocalNumIdentifiers(0),
- IdentifierOffsets(0), IdentifierTableData(0), IdentifierLookupTable(0),
- LocalNumMacroDefinitions(0), MacroDefinitionOffsets(0),
- NumPreallocatedPreprocessingEntities(0), SelectorLookupTable(0),
- SelectorLookupTableData(0), SelectorOffsets(0), LocalNumSelectors(0)
+ASTReader::PerFileData::PerFileData(ASTFileType Ty)
+ : Type(Ty), SizeInBits(0), LocalNumSLocEntries(0), SLocOffsets(0), LocalSLocSize(0),
+ LocalNumIdentifiers(0), IdentifierOffsets(0), IdentifierTableData(0),
+ IdentifierLookupTable(0), LocalNumMacroDefinitions(0),
+ MacroDefinitionOffsets(0),
+ LocalNumHeaderFileInfos(0), HeaderFileInfoTableData(0),
+ HeaderFileInfoTable(0),
+ LocalNumSelectors(0), SelectorOffsets(0),
+ SelectorLookupTableData(0), SelectorLookupTable(0), LocalNumDecls(0),
+ DeclOffsets(0), LocalNumCXXBaseSpecifiers(0), CXXBaseSpecifiersOffsets(0),
+ LocalNumTypes(0), TypeOffsets(0), StatCache(0),
+ NumPreallocatedPreprocessingEntities(0), NextInSource(0)
{}
ASTReader::PerFileData::~PerFileData() {
delete static_cast<ASTIdentifierLookupTable *>(IdentifierLookupTable);
+ delete static_cast<HeaderFileInfoLookupTable *>(HeaderFileInfoTable);
delete static_cast<ASTSelectorLookupTable *>(SelectorLookupTable);
}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
index 7adbe12..dec15dd 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "ASTCommon.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
@@ -30,26 +31,60 @@ using namespace clang::serialization;
namespace clang {
class ASTDeclReader : public DeclVisitor<ASTDeclReader, void> {
ASTReader &Reader;
+ ASTReader::PerFileData &F;
llvm::BitstreamCursor &Cursor;
const DeclID ThisDeclID;
- const ASTReader::RecordData &Record;
+ typedef ASTReader::RecordData RecordData;
+ const RecordData &Record;
unsigned &Idx;
TypeID TypeIDForTypeDecl;
uint64_t GetCurrentCursorOffset();
+ SourceLocation ReadSourceLocation(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceLocation(F, R, I);
+ }
+ SourceRange ReadSourceRange(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceRange(F, R, I);
+ }
+ TypeSourceInfo *GetTypeSourceInfo(const RecordData &R, unsigned &I) {
+ return Reader.GetTypeSourceInfo(F, R, I);
+ }
+ void ReadQualifierInfo(QualifierInfo &Info,
+ const RecordData &R, unsigned &I) {
+ Reader.ReadQualifierInfo(F, Info, R, I);
+ }
+ void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name,
+ const RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameLoc(F, DNLoc, Name, R, I);
+ }
+ void ReadDeclarationNameInfo(DeclarationNameInfo &NameInfo,
+ const RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameInfo(F, NameInfo, R, I);
+ }
+
+ void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data,
+ const RecordData &R, unsigned &I);
+ void InitializeCXXDefinitionData(CXXRecordDecl *D,
+ CXXRecordDecl *DefinitionDecl,
+ const RecordData &Record, unsigned &Idx);
public:
- ASTDeclReader(ASTReader &Reader, llvm::BitstreamCursor &Cursor,
- DeclID thisDeclID, const ASTReader::RecordData &Record,
- unsigned &Idx)
- : Reader(Reader), Cursor(Cursor), ThisDeclID(thisDeclID), Record(Record),
- Idx(Idx), TypeIDForTypeDecl(0) { }
+ ASTDeclReader(ASTReader &Reader, ASTReader::PerFileData &F,
+ llvm::BitstreamCursor &Cursor, DeclID thisDeclID,
+ const RecordData &Record, unsigned &Idx)
+ : Reader(Reader), F(F), Cursor(Cursor), ThisDeclID(thisDeclID),
+ Record(Record), Idx(Idx), TypeIDForTypeDecl(0) { }
+
+ static void attachPreviousDecl(Decl *D, Decl *previous);
void Visit(Decl *D);
+ void UpdateDecl(Decl *D, const RecordData &Record);
+
void VisitDecl(Decl *D);
void VisitTranslationUnitDecl(TranslationUnitDecl *TU);
void VisitNamedDecl(NamedDecl *ND);
+ void VisitLabelDecl(LabelDecl *LD);
void VisitNamespaceDecl(NamespaceDecl *D);
void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
@@ -75,6 +110,7 @@ namespace clang {
void VisitCXXDestructorDecl(CXXDestructorDecl *D);
void VisitCXXConversionDecl(CXXConversionDecl *D);
void VisitFieldDecl(FieldDecl *FD);
+ void VisitIndirectFieldDecl(IndirectFieldDecl *FD);
void VisitVarDecl(VarDecl *VD);
void VisitImplicitParamDecl(ImplicitParamDecl *PD);
void VisitParmVarDecl(ParmVarDecl *PD);
@@ -134,7 +170,7 @@ void ASTDeclReader::Visit(Decl *D) {
if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
// if we have a fully initialized TypeDecl, we can safely read its type now.
- TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtr());
+ TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull());
} else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// FunctionDecl's body was written last after all other Stmts/Exprs.
if (Record[Idx++])
@@ -146,17 +182,17 @@ void ASTDeclReader::VisitDecl(Decl *D) {
D->setDeclContext(cast_or_null<DeclContext>(Reader.GetDecl(Record[Idx++])));
D->setLexicalDeclContext(
cast_or_null<DeclContext>(Reader.GetDecl(Record[Idx++])));
- D->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ D->setLocation(ReadSourceLocation(Record, Idx));
D->setInvalidDecl(Record[Idx++]);
- if (Record[Idx++]) {
+ if (Record[Idx++]) { // hasAttrs
AttrVec Attrs;
- Reader.ReadAttributes(Cursor, Attrs);
+ Reader.ReadAttributes(F, Attrs, Record, Idx);
D->setAttrs(Attrs);
}
D->setImplicit(Record[Idx++]);
D->setUsed(Record[Idx++]);
D->setAccess((AccessSpecifier)Record[Idx++]);
- D->setPCHLevel(Record[Idx++] + 1);
+ D->setPCHLevel(Record[Idx++] + (F.Type <= ASTReader::PCH));
}
void ASTDeclReader::VisitTranslationUnitDecl(TranslationUnitDecl *TU) {
@@ -178,29 +214,39 @@ void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
void ASTDeclReader::VisitTypedefDecl(TypedefDecl *TD) {
VisitTypeDecl(TD);
- TD->setTypeSourceInfo(Reader.GetTypeSourceInfo(Cursor, Record, Idx));
+ TD->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
}
void ASTDeclReader::VisitTagDecl(TagDecl *TD) {
VisitTypeDecl(TD);
- TD->IdentifierNamespace = Record[Idx++];
VisitRedeclarable(TD);
+ TD->IdentifierNamespace = Record[Idx++];
TD->setTagKind((TagDecl::TagKind)Record[Idx++]);
TD->setDefinition(Record[Idx++]);
TD->setEmbeddedInDeclarator(Record[Idx++]);
- TD->setRBraceLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TD->setTagKeywordLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- // FIXME: maybe read optional qualifier and its range.
- TD->setTypedefForAnonDecl(
- cast_or_null<TypedefDecl>(Reader.GetDecl(Record[Idx++])));
+ TD->setRBraceLoc(ReadSourceLocation(Record, Idx));
+ TD->setTagKeywordLoc(ReadSourceLocation(Record, Idx));
+ if (Record[Idx++]) { // hasExtInfo
+ TagDecl::ExtInfo *Info = new (*Reader.getContext()) TagDecl::ExtInfo();
+ ReadQualifierInfo(*Info, Record, Idx);
+ TD->TypedefDeclOrQualifier = Info;
+ } else
+ TD->setTypedefForAnonDecl(
+ cast_or_null<TypedefDecl>(Reader.GetDecl(Record[Idx++])));
}
void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
VisitTagDecl(ED);
- ED->setIntegerType(Reader.GetType(Record[Idx++]));
+ if (TypeSourceInfo *TI = Reader.GetTypeSourceInfo(F, Record, Idx))
+ ED->setIntegerTypeSourceInfo(TI);
+ else
+ ED->setIntegerType(Reader.GetType(Record[Idx++]));
ED->setPromotionType(Reader.GetType(Record[Idx++]));
ED->setNumPositiveBits(Record[Idx++]);
ED->setNumNegativeBits(Record[Idx++]);
+ ED->IsScoped = Record[Idx++];
+ ED->IsScopedUsingClassTag = Record[Idx++];
+ ED->IsFixed = Record[Idx++];
ED->setInstantiationOfMemberEnum(
cast_or_null<EnumDecl>(Reader.GetDecl(Record[Idx++])));
}
@@ -220,22 +266,27 @@ void ASTDeclReader::VisitValueDecl(ValueDecl *VD) {
void ASTDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) {
VisitValueDecl(ECD);
if (Record[Idx++])
- ECD->setInitExpr(Reader.ReadExpr(Cursor));
+ ECD->setInitExpr(Reader.ReadExpr(F));
ECD->setInitVal(Reader.ReadAPSInt(Record, Idx));
}
void ASTDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) {
VisitValueDecl(DD);
- TypeSourceInfo *TInfo = Reader.GetTypeSourceInfo(Cursor, Record, Idx);
- if (TInfo)
- DD->setTypeSourceInfo(TInfo);
- // FIXME: read optional qualifier and its range.
+ if (Record[Idx++]) { // hasExtInfo
+ DeclaratorDecl::ExtInfo *Info
+ = new (*Reader.getContext()) DeclaratorDecl::ExtInfo();
+ ReadQualifierInfo(*Info, Record, Idx);
+ Info->TInfo = GetTypeSourceInfo(Record, Idx);
+ DD->DeclInfo = Info;
+ } else
+ DD->DeclInfo = GetTypeSourceInfo(Record, Idx);
}
void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
VisitDeclaratorDecl(FD);
- // FIXME: read DeclarationNameLoc.
+ VisitRedeclarable(FD);
+ ReadDeclarationNameLoc(FD->DNLoc, FD->getDeclName(), Record, Idx);
FD->IdentifierNamespace = Record[Idx++];
switch ((FunctionDecl::TemplatedKind)Record[Idx++]) {
default: assert(false && "Unhandled TemplatedKind!");
@@ -249,8 +300,8 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
case FunctionDecl::TK_MemberSpecialization: {
FunctionDecl *InstFD = cast<FunctionDecl>(Reader.GetDecl(Record[Idx++]));
TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
- SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
- FD->setInstantiationOfMemberFunction(InstFD, TSK);
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ FD->setInstantiationOfMemberFunction(*Reader.getContext(), InstFD, TSK);
FD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
break;
}
@@ -261,7 +312,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
// Template arguments.
llvm::SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, Cursor, Record, Idx);
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
// Template args as written.
llvm::SmallVector<TemplateArgumentLoc, 8> TemplArgLocs;
@@ -271,20 +322,45 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
TemplArgLocs.reserve(NumTemplateArgLocs);
for (unsigned i=0; i != NumTemplateArgLocs; ++i)
TemplArgLocs.push_back(
- Reader.ReadTemplateArgumentLoc(Cursor, Record, Idx));
+ Reader.ReadTemplateArgumentLoc(F, Record, Idx));
- LAngleLoc = Reader.ReadSourceLocation(Record, Idx);
- RAngleLoc = Reader.ReadSourceLocation(Record, Idx);
+ LAngleLoc = ReadSourceLocation(Record, Idx);
+ RAngleLoc = ReadSourceLocation(Record, Idx);
}
- SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
-
- if (FD->isCanonicalDecl()) // if canonical add to template's set.
- FD->setFunctionTemplateSpecialization(Template, TemplArgs.size(),
- TemplArgs.data(), TSK,
- TemplArgLocs.size(),
- TemplArgLocs.data(),
- LAngleLoc, RAngleLoc, POI);
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+
+ ASTContext &C = *Reader.getContext();
+ TemplateArgumentList *TemplArgList
+ = TemplateArgumentList::CreateCopy(C, TemplArgs.data(), TemplArgs.size());
+ TemplateArgumentListInfo *TemplArgsInfo
+ = new (C) TemplateArgumentListInfo(LAngleLoc, RAngleLoc);
+ for (unsigned i=0, e = TemplArgLocs.size(); i != e; ++i)
+ TemplArgsInfo->addArgument(TemplArgLocs[i]);
+ FunctionTemplateSpecializationInfo *FTInfo
+ = FunctionTemplateSpecializationInfo::Create(C, FD, Template, TSK,
+ TemplArgList,
+ TemplArgsInfo, POI);
+ FD->TemplateOrSpecialization = FTInfo;
+
+ if (FD->isCanonicalDecl()) { // if canonical add to template's set.
+ // The template that contains the specializations set. It's not safe to
+ // use getCanonicalDecl on Template since it may still be initializing.
+ FunctionTemplateDecl *CanonTemplate
+ = cast<FunctionTemplateDecl>(Reader.GetDecl(Record[Idx++]));
+ // Get the InsertPos by FindNodeOrInsertPos() instead of calling
+ // InsertNode(FTInfo) directly to avoid the getASTContext() call in
+ // FunctionTemplateSpecializationInfo's Profile().
+ // We avoid getASTContext because a decl in the parent hierarchy may
+ // be initializing.
+ llvm::FoldingSetNodeID ID;
+ FunctionTemplateSpecializationInfo::Profile(ID, TemplArgs.data(),
+ TemplArgs.size(), C);
+ void *InsertPos = 0;
+ CanonTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
+ assert(InsertPos && "Another specialization already inserted!");
+ CanonTemplate->getSpecializations().InsertNode(FTInfo, InsertPos);
+ }
break;
}
case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
@@ -298,9 +374,9 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
TemplateArgumentListInfo TemplArgs;
unsigned NumArgs = Record[Idx++];
while (NumArgs--)
- TemplArgs.addArgument(Reader.ReadTemplateArgumentLoc(Cursor,Record, Idx));
- TemplArgs.setLAngleLoc(Reader.ReadSourceLocation(Record, Idx));
- TemplArgs.setRAngleLoc(Reader.ReadSourceLocation(Record, Idx));
+ TemplArgs.addArgument(Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+ TemplArgs.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TemplArgs.setRAngleLoc(ReadSourceLocation(Record, Idx));
FD->setDependentTemplateSpecialization(*Reader.getContext(),
TemplDecls, TemplArgs);
@@ -311,19 +387,18 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
// FunctionDecl's body is handled last at ASTDeclReader::Visit,
// after everything else is read.
- VisitRedeclarable(FD);
- FD->setStorageClass((StorageClass)Record[Idx++]);
- FD->setStorageClassAsWritten((StorageClass)Record[Idx++]);
- FD->setInlineSpecified(Record[Idx++]);
- FD->setVirtualAsWritten(Record[Idx++]);
- FD->setPure(Record[Idx++]);
- FD->setHasInheritedPrototype(Record[Idx++]);
- FD->setHasWrittenPrototype(Record[Idx++]);
- FD->setDeleted(Record[Idx++]);
- FD->setTrivial(Record[Idx++]);
- FD->setCopyAssignment(Record[Idx++]);
- FD->setHasImplicitReturnZero(Record[Idx++]);
- FD->setLocEnd(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ FD->SClass = (StorageClass)Record[Idx++];
+ FD->SClassAsWritten = (StorageClass)Record[Idx++];
+ FD->IsInline = Record[Idx++];
+ FD->IsInlineSpecified = Record[Idx++];
+ FD->IsVirtualAsWritten = Record[Idx++];
+ FD->IsPure = Record[Idx++];
+ FD->HasInheritedPrototype = Record[Idx++];
+ FD->HasWrittenPrototype = Record[Idx++];
+ FD->IsDeleted = Record[Idx++];
+ FD->IsTrivial = Record[Idx++];
+ FD->HasImplicitReturnZero = Record[Idx++];
+ FD->EndRangeLoc = ReadSourceLocation(Record, Idx);
// Read in the parameters.
unsigned NumParams = Record[Idx++];
@@ -331,7 +406,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
Params.reserve(NumParams);
for (unsigned I = 0; I != NumParams; ++I)
Params.push_back(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
- FD->setParams(Params.data(), NumParams);
+ FD->setParams(*Reader.getContext(), Params.data(), NumParams);
}
void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
@@ -339,7 +414,7 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
if (Record[Idx++]) {
// In practice, this won't be executed (since method definitions
// don't occur in header files).
- MD->setBody(Reader.ReadStmt(Cursor));
+ MD->setBody(Reader.ReadStmt(F));
MD->setSelfDecl(cast<ImplicitParamDecl>(Reader.GetDecl(Record[Idx++])));
MD->setCmdDecl(cast<ImplicitParamDecl>(Reader.GetDecl(Record[Idx++])));
}
@@ -351,8 +426,8 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]);
MD->setNumSelectorArgs(unsigned(Record[Idx++]));
MD->setResultType(Reader.GetType(Record[Idx++]));
- MD->setResultTypeSourceInfo(Reader.GetTypeSourceInfo(Cursor, Record, Idx));
- MD->setEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ MD->setResultTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ MD->setEndLoc(ReadSourceLocation(Record, Idx));
unsigned NumParams = Record[Idx++];
llvm::SmallVector<ParmVarDecl *, 16> Params;
Params.reserve(NumParams);
@@ -364,14 +439,14 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
void ASTDeclReader::VisitObjCContainerDecl(ObjCContainerDecl *CD) {
VisitNamedDecl(CD);
- SourceLocation A = SourceLocation::getFromRawEncoding(Record[Idx++]);
- SourceLocation B = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation A = ReadSourceLocation(Record, Idx);
+ SourceLocation B = ReadSourceLocation(Record, Idx);
CD->setAtEndRange(SourceRange(A, B));
}
void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
VisitObjCContainerDecl(ID);
- ID->setTypeForDecl(Reader.GetType(Record[Idx++]).getTypePtr());
+ ID->setTypeForDecl(Reader.GetType(Record[Idx++]).getTypePtrOrNull());
ID->setSuperClass(cast_or_null<ObjCInterfaceDecl>
(Reader.GetDecl(Record[Idx++])));
@@ -384,7 +459,7 @@ void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
llvm::SmallVector<SourceLocation, 16> ProtoLocs;
ProtoLocs.reserve(NumProtocols);
for (unsigned I = 0; I != NumProtocols; ++I)
- ProtoLocs.push_back(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
ID->setProtocolList(Protocols.data(), NumProtocols, ProtoLocs.data(),
*Reader.getContext());
@@ -409,9 +484,9 @@ void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
ID->setIvarList(0);
ID->setForwardDecl(Record[Idx++]);
ID->setImplicitInterfaceDecl(Record[Idx++]);
- ID->setClassLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- ID->setSuperClassLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- ID->setLocEnd(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ ID->setClassLoc(ReadSourceLocation(Record, Idx));
+ ID->setSuperClassLoc(ReadSourceLocation(Record, Idx));
+ ID->setLocEnd(ReadSourceLocation(Record, Idx));
}
void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
@@ -426,7 +501,7 @@ void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
void ASTDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) {
VisitObjCContainerDecl(PD);
PD->setForwardDecl(Record[Idx++]);
- PD->setLocEnd(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ PD->setLocEnd(ReadSourceLocation(Record, Idx));
unsigned NumProtoRefs = Record[Idx++];
llvm::SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
ProtoRefs.reserve(NumProtoRefs);
@@ -435,7 +510,7 @@ void ASTDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) {
llvm::SmallVector<SourceLocation, 16> ProtoLocs;
ProtoLocs.reserve(NumProtoRefs);
for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoLocs.push_back(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
PD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
*Reader.getContext());
}
@@ -454,7 +529,7 @@ void ASTDeclReader::VisitObjCClassDecl(ObjCClassDecl *CD) {
llvm::SmallVector<SourceLocation, 16> SLocs;
SLocs.reserve(NumClassRefs);
for (unsigned I = 0; I != NumClassRefs; ++I)
- SLocs.push_back(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ SLocs.push_back(ReadSourceLocation(Record, Idx));
CD->setClassList(*Reader.getContext(), ClassRefs.data(), SLocs.data(),
NumClassRefs);
}
@@ -469,7 +544,7 @@ void ASTDeclReader::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *FPD) {
llvm::SmallVector<SourceLocation, 16> ProtoLocs;
ProtoLocs.reserve(NumProtoRefs);
for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoLocs.push_back(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
FPD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
*Reader.getContext());
}
@@ -485,13 +560,13 @@ void ASTDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) {
llvm::SmallVector<SourceLocation, 16> ProtoLocs;
ProtoLocs.reserve(NumProtoRefs);
for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoLocs.push_back(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
CD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
*Reader.getContext());
CD->setNextClassCategory(cast_or_null<ObjCCategoryDecl>(Reader.GetDecl(Record[Idx++])));
CD->setHasSynthBitfield(Record[Idx++]);
- CD->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- CD->setCategoryNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ CD->setAtLoc(ReadSourceLocation(Record, Idx));
+ CD->setCategoryNameLoc(ReadSourceLocation(Record, Idx));
}
void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
@@ -501,8 +576,8 @@ void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
void ASTDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
VisitNamedDecl(D);
- D->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- D->setType(Reader.GetTypeSourceInfo(Cursor, Record, Idx));
+ D->setAtLoc(ReadSourceLocation(Record, Idx));
+ D->setType(GetTypeSourceInfo(Record, Idx));
// FIXME: stable encoding
D->setPropertyAttributes(
(ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
@@ -537,27 +612,28 @@ void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
D->setSuperClass(
cast_or_null<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
llvm::tie(D->IvarInitializers, D->NumIvarInitializers)
- = Reader.ReadCXXBaseOrMemberInitializers(Cursor, Record, Idx);
+ = Reader.ReadCXXCtorInitializers(F, Record, Idx);
D->setHasSynthBitfield(Record[Idx++]);
}
void ASTDeclReader::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
VisitDecl(D);
- D->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ D->setAtLoc(ReadSourceLocation(Record, Idx));
D->setPropertyDecl(
cast_or_null<ObjCPropertyDecl>(Reader.GetDecl(Record[Idx++])));
- D->setPropertyIvarDecl(
- cast_or_null<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++])));
- D->setGetterCXXConstructor(Reader.ReadExpr(Cursor));
- D->setSetterCXXAssignment(Reader.ReadExpr(Cursor));
+ D->PropertyIvarDecl =
+ cast_or_null<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++]));
+ D->IvarLoc = ReadSourceLocation(Record, Idx);
+ D->setGetterCXXConstructor(Reader.ReadExpr(F));
+ D->setSetterCXXAssignment(Reader.ReadExpr(F));
}
void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
VisitDeclaratorDecl(FD);
FD->setMutable(Record[Idx++]);
if (Record[Idx++])
- FD->setBitWidth(Reader.ReadExpr(Cursor));
+ FD->setBitWidth(Reader.ReadExpr(F));
if (!FD->getDeclName()) {
FieldDecl *Tmpl = cast_or_null<FieldDecl>(Reader.GetDecl(Record[Idx++]));
if (Tmpl)
@@ -565,22 +641,33 @@ void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
}
}
+void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) {
+ VisitValueDecl(FD);
+
+ FD->ChainingSize = Record[Idx++];
+ assert(FD->ChainingSize >= 2 && "Anonymous chaining must be >= 2");
+ FD->Chaining = new (*Reader.getContext())NamedDecl*[FD->ChainingSize];
+
+ for (unsigned I = 0; I != FD->ChainingSize; ++I)
+ FD->Chaining[I] = cast<NamedDecl>(Reader.GetDecl(Record[Idx++]));
+}
+
void ASTDeclReader::VisitVarDecl(VarDecl *VD) {
VisitDeclaratorDecl(VD);
- VD->setStorageClass((StorageClass)Record[Idx++]);
+ VisitRedeclarable(VD);
+ VD->SClass = (StorageClass)Record[Idx++];
VD->setStorageClassAsWritten((StorageClass)Record[Idx++]);
VD->setThreadSpecified(Record[Idx++]);
VD->setCXXDirectInitializer(Record[Idx++]);
VD->setExceptionVariable(Record[Idx++]);
VD->setNRVOVariable(Record[Idx++]);
- VisitRedeclarable(VD);
if (Record[Idx++])
- VD->setInit(Reader.ReadExpr(Cursor));
+ VD->setInit(Reader.ReadExpr(F));
if (Record[Idx++]) { // HasMemberSpecializationInfo.
VarDecl *Tmpl = cast<VarDecl>(Reader.GetDecl(Record[Idx++]));
TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
- SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
Reader.getContext()->setInstantiatedFromStaticDataMember(VD, Tmpl, TSK,POI);
}
}
@@ -594,24 +681,40 @@ void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
PD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]);
PD->setHasInheritedDefaultArg(Record[Idx++]);
if (Record[Idx++]) // hasUninstantiatedDefaultArg.
- PD->setUninstantiatedDefaultArg(Reader.ReadExpr(Cursor));
+ PD->setUninstantiatedDefaultArg(Reader.ReadExpr(F));
}
void ASTDeclReader::VisitFileScopeAsmDecl(FileScopeAsmDecl *AD) {
VisitDecl(AD);
- AD->setAsmString(cast<StringLiteral>(Reader.ReadExpr(Cursor)));
+ AD->setAsmString(cast<StringLiteral>(Reader.ReadExpr(F)));
}
void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
VisitDecl(BD);
- BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadStmt(Cursor)));
- BD->setSignatureAsWritten(Reader.GetTypeSourceInfo(Cursor, Record, Idx));
+ BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadStmt(F)));
+ BD->setSignatureAsWritten(GetTypeSourceInfo(Record, Idx));
unsigned NumParams = Record[Idx++];
llvm::SmallVector<ParmVarDecl *, 16> Params;
Params.reserve(NumParams);
for (unsigned I = 0; I != NumParams; ++I)
Params.push_back(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
BD->setParams(Params.data(), NumParams);
+
+ bool capturesCXXThis = Record[Idx++];
+ unsigned numCaptures = Record[Idx++];
+ llvm::SmallVector<BlockDecl::Capture, 16> captures;
+ captures.reserve(numCaptures);
+ for (unsigned i = 0; i != numCaptures; ++i) {
+ VarDecl *decl = cast<VarDecl>(Reader.GetDecl(Record[Idx++]));
+ unsigned flags = Record[Idx++];
+ bool byRef = (flags & 1);
+ bool nested = (flags & 2);
+ Expr *copyExpr = ((flags & 4) ? Reader.ReadExpr(F) : 0);
+
+ captures.push_back(BlockDecl::Capture(decl, byRef, nested, copyExpr));
+ }
+ BD->setCaptures(*Reader.getContext(), captures.begin(),
+ captures.end(), capturesCXXThis);
}
void ASTDeclReader::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
@@ -620,12 +723,17 @@ void ASTDeclReader::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
D->setHasBraces(Record[Idx++]);
}
+void ASTDeclReader::VisitLabelDecl(LabelDecl *D) {
+ VisitNamedDecl(D);
+}
+
+
void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
VisitNamedDecl(D);
- D->setLBracLoc(Reader.ReadSourceLocation(Record, Idx));
- D->setRBracLoc(Reader.ReadSourceLocation(Record, Idx));
- D->setNextNamespace(
- cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++])));
+ D->IsInline = Record[Idx++];
+ D->LBracLoc = ReadSourceLocation(Record, Idx);
+ D->RBracLoc = ReadSourceLocation(Record, Idx);
+ D->NextNamespace = Record[Idx++];
bool IsOriginal = Record[Idx++];
D->OrigOrAnonNamespace.setInt(IsOriginal);
@@ -635,29 +743,20 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
void ASTDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
VisitNamedDecl(D);
- D->NamespaceLoc = Reader.ReadSourceLocation(Record, Idx);
- D->setQualifierRange(Reader.ReadSourceRange(Record, Idx));
+ D->NamespaceLoc = ReadSourceLocation(Record, Idx);
+ D->setQualifierRange(ReadSourceRange(Record, Idx));
D->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx));
- D->IdentLoc = Reader.ReadSourceLocation(Record, Idx);
+ D->IdentLoc = ReadSourceLocation(Record, Idx);
D->Namespace = cast<NamedDecl>(Reader.GetDecl(Record[Idx++]));
}
void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
VisitNamedDecl(D);
- D->setUsingLocation(Reader.ReadSourceLocation(Record, Idx));
- D->setNestedNameRange(Reader.ReadSourceRange(Record, Idx));
+ D->setUsingLocation(ReadSourceLocation(Record, Idx));
+ D->setNestedNameRange(ReadSourceRange(Record, Idx));
D->setTargetNestedNameDecl(Reader.ReadNestedNameSpecifier(Record, Idx));
- // FIXME: read the DNLoc component.
-
- // FIXME: It would probably be more efficient to read these into a vector
- // and then re-cosntruct the shadow decl set over that vector since it
- // would avoid existence checks.
- unsigned NumShadows = Record[Idx++];
- for(unsigned I = 0; I != NumShadows; ++I) {
- // Avoid invariant checking of UsingDecl::addShadowDecl, the decl may still
- // be initializing.
- D->Shadows.insert(cast<UsingShadowDecl>(Reader.GetDecl(Record[Idx++])));
- }
+ ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
+ D->FirstUsingShadow = cast_or_null<UsingShadowDecl>(Reader.GetDecl(Record[Idx++]));
D->setTypeName(Record[Idx++]);
NamedDecl *Pattern = cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++]));
if (Pattern)
@@ -667,7 +766,7 @@ void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
void ASTDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) {
VisitNamedDecl(D);
D->setTargetDecl(cast<NamedDecl>(Reader.GetDecl(Record[Idx++])));
- D->setUsingDecl(cast<UsingDecl>(Reader.GetDecl(Record[Idx++])));
+ D->UsingOrNextShadow = cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++]));
UsingShadowDecl *Pattern
= cast_or_null<UsingShadowDecl>(Reader.GetDecl(Record[Idx++]));
if (Pattern)
@@ -676,9 +775,9 @@ void ASTDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) {
void ASTDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
VisitNamedDecl(D);
- D->UsingLoc = Reader.ReadSourceLocation(Record, Idx);
- D->NamespaceLoc = Reader.ReadSourceLocation(Record, Idx);
- D->QualifierRange = Reader.ReadSourceRange(Record, Idx);
+ D->UsingLoc = ReadSourceLocation(Record, Idx);
+ D->NamespaceLoc = ReadSourceLocation(Record, Idx);
+ D->QualifierRange = ReadSourceRange(Record, Idx);
D->Qualifier = Reader.ReadNestedNameSpecifier(Record, Idx);
D->NominatedNamespace = cast<NamedDecl>(Reader.GetDecl(Record[Idx++]));
D->CommonAncestor = cast_or_null<DeclContext>(Reader.GetDecl(Record[Idx++]));
@@ -686,88 +785,99 @@ void ASTDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
void ASTDeclReader::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
VisitValueDecl(D);
- D->setTargetNestedNameRange(Reader.ReadSourceRange(Record, Idx));
- D->setUsingLoc(Reader.ReadSourceLocation(Record, Idx));
+ D->setTargetNestedNameRange(ReadSourceRange(Record, Idx));
+ D->setUsingLoc(ReadSourceLocation(Record, Idx));
D->setTargetNestedNameSpecifier(Reader.ReadNestedNameSpecifier(Record, Idx));
- // FIXME: read the DNLoc component.
+ ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
}
void ASTDeclReader::VisitUnresolvedUsingTypenameDecl(
UnresolvedUsingTypenameDecl *D) {
VisitTypeDecl(D);
- D->TargetNestedNameRange = Reader.ReadSourceRange(Record, Idx);
- D->UsingLocation = Reader.ReadSourceLocation(Record, Idx);
- D->TypenameLocation = Reader.ReadSourceLocation(Record, Idx);
+ D->TargetNestedNameRange = ReadSourceRange(Record, Idx);
+ D->UsingLocation = ReadSourceLocation(Record, Idx);
+ D->TypenameLocation = ReadSourceLocation(Record, Idx);
D->TargetNestedNameSpecifier = Reader.ReadNestedNameSpecifier(Record, Idx);
}
-void ASTDeclReader::VisitCXXRecordDecl(CXXRecordDecl *D) {
+void ASTDeclReader::ReadCXXDefinitionData(
+ struct CXXRecordDecl::DefinitionData &Data,
+ const RecordData &Record, unsigned &Idx) {
+ Data.UserDeclaredConstructor = Record[Idx++];
+ Data.UserDeclaredCopyConstructor = Record[Idx++];
+ Data.UserDeclaredCopyAssignment = Record[Idx++];
+ Data.UserDeclaredDestructor = Record[Idx++];
+ Data.Aggregate = Record[Idx++];
+ Data.PlainOldData = Record[Idx++];
+ Data.Empty = Record[Idx++];
+ Data.Polymorphic = Record[Idx++];
+ Data.Abstract = Record[Idx++];
+ Data.HasTrivialConstructor = Record[Idx++];
+ Data.HasTrivialCopyConstructor = Record[Idx++];
+ Data.HasTrivialCopyAssignment = Record[Idx++];
+ Data.HasTrivialDestructor = Record[Idx++];
+ Data.ComputedVisibleConversions = Record[Idx++];
+ Data.DeclaredDefaultConstructor = Record[Idx++];
+ Data.DeclaredCopyConstructor = Record[Idx++];
+ Data.DeclaredCopyAssignment = Record[Idx++];
+ Data.DeclaredDestructor = Record[Idx++];
+
+ Data.NumBases = Record[Idx++];
+ if (Data.NumBases)
+ Data.Bases = Reader.GetCXXBaseSpecifiersOffset(Record[Idx++]);
+ Data.NumVBases = Record[Idx++];
+ if (Data.NumVBases)
+ Data.VBases = Reader.GetCXXBaseSpecifiersOffset(Record[Idx++]);
+
+ Reader.ReadUnresolvedSet(Data.Conversions, Record, Idx);
+ Reader.ReadUnresolvedSet(Data.VisibleConversions, Record, Idx);
+ assert(Data.Definition && "Data.Definition should be already set!");
+ Data.FirstFriend
+ = cast_or_null<FriendDecl>(Reader.GetDecl(Record[Idx++]));
+}
+
+void ASTDeclReader::InitializeCXXDefinitionData(CXXRecordDecl *D,
+ CXXRecordDecl *DefinitionDecl,
+ const RecordData &Record,
+ unsigned &Idx) {
ASTContext &C = *Reader.getContext();
- // We need to allocate the DefinitionData struct ahead of VisitRecordDecl
- // so that the other CXXRecordDecls can get a pointer even when the owner
- // is still initializing.
- bool OwnsDefinitionData = false;
- enum DataOwnership { Data_NoDefData, Data_Owner, Data_NotOwner };
- switch ((DataOwnership)Record[Idx++]) {
- default:
- assert(0 && "Out of sync with ASTDeclWriter or messed up reading");
- case Data_NoDefData:
- break;
- case Data_Owner:
- OwnsDefinitionData = true;
+ if (D == DefinitionDecl) {
D->DefinitionData = new (C) struct CXXRecordDecl::DefinitionData(D);
- break;
- case Data_NotOwner:
- D->DefinitionData
- = cast<CXXRecordDecl>(Reader.GetDecl(Record[Idx++]))->DefinitionData;
- break;
+ ReadCXXDefinitionData(*D->DefinitionData, Record, Idx);
+ // We read the definition info. Check if there are pending forward
+ // references that need to point to this DefinitionData pointer.
+ ASTReader::PendingForwardRefsMap::iterator
+ FindI = Reader.PendingForwardRefs.find(D);
+ if (FindI != Reader.PendingForwardRefs.end()) {
+ ASTReader::ForwardRefs &Refs = FindI->second;
+ for (ASTReader::ForwardRefs::iterator
+ I = Refs.begin(), E = Refs.end(); I != E; ++I)
+ (*I)->DefinitionData = D->DefinitionData;
+#ifndef NDEBUG
+ // We later check whether PendingForwardRefs is empty to make sure all
+ // pending references were linked.
+ Reader.PendingForwardRefs.erase(D);
+#endif
+ }
+ } else if (DefinitionDecl) {
+ if (DefinitionDecl->DefinitionData) {
+ D->DefinitionData = DefinitionDecl->DefinitionData;
+ } else {
+ // The definition is still initializing.
+ Reader.PendingForwardRefs[DefinitionDecl].push_back(D);
+ }
}
+}
+void ASTDeclReader::VisitCXXRecordDecl(CXXRecordDecl *D) {
VisitRecordDecl(D);
- if (OwnsDefinitionData) {
- assert(D->DefinitionData);
- struct CXXRecordDecl::DefinitionData &Data = *D->DefinitionData;
-
- Data.UserDeclaredConstructor = Record[Idx++];
- Data.UserDeclaredCopyConstructor = Record[Idx++];
- Data.UserDeclaredCopyAssignment = Record[Idx++];
- Data.UserDeclaredDestructor = Record[Idx++];
- Data.Aggregate = Record[Idx++];
- Data.PlainOldData = Record[Idx++];
- Data.Empty = Record[Idx++];
- Data.Polymorphic = Record[Idx++];
- Data.Abstract = Record[Idx++];
- Data.HasTrivialConstructor = Record[Idx++];
- Data.HasTrivialCopyConstructor = Record[Idx++];
- Data.HasTrivialCopyAssignment = Record[Idx++];
- Data.HasTrivialDestructor = Record[Idx++];
- Data.ComputedVisibleConversions = Record[Idx++];
- Data.DeclaredDefaultConstructor = Record[Idx++];
- Data.DeclaredCopyConstructor = Record[Idx++];
- Data.DeclaredCopyAssignment = Record[Idx++];
- Data.DeclaredDestructor = Record[Idx++];
-
- // setBases() is unsuitable since it may try to iterate the bases of an
- // uninitialized base.
- Data.NumBases = Record[Idx++];
- Data.Bases = new(C) CXXBaseSpecifier [Data.NumBases];
- for (unsigned i = 0; i != Data.NumBases; ++i)
- Data.Bases[i] = Reader.ReadCXXBaseSpecifier(Cursor, Record, Idx);
-
- // FIXME: Make VBases lazily computed when needed to avoid storing them.
- Data.NumVBases = Record[Idx++];
- Data.VBases = new(C) CXXBaseSpecifier [Data.NumVBases];
- for (unsigned i = 0; i != Data.NumVBases; ++i)
- Data.VBases[i] = Reader.ReadCXXBaseSpecifier(Cursor, Record, Idx);
-
- Reader.ReadUnresolvedSet(Data.Conversions, Record, Idx);
- Reader.ReadUnresolvedSet(Data.VisibleConversions, Record, Idx);
- assert(Data.Definition && "Data.Definition should be already set!");
- Data.FirstFriend
- = cast_or_null<FriendDecl>(Reader.GetDecl(Record[Idx++]));
- }
+ CXXRecordDecl *DefinitionDecl
+ = cast_or_null<CXXRecordDecl>(Reader.GetDecl(Record[Idx++]));
+ InitializeCXXDefinitionData(D, DefinitionDecl, Record, Idx);
+
+ ASTContext &C = *Reader.getContext();
enum CXXRecKind {
CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
@@ -778,18 +888,28 @@ void ASTDeclReader::VisitCXXRecordDecl(CXXRecordDecl *D) {
case CXXRecNotTemplate:
break;
case CXXRecTemplate:
- D->setDescribedClassTemplate(
- cast<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++])));
+ D->TemplateOrInstantiation
+ = cast<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++]));
break;
case CXXRecMemberSpecialization: {
CXXRecordDecl *RD = cast<CXXRecordDecl>(Reader.GetDecl(Record[Idx++]));
TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
- SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
- D->setInstantiationOfMemberClass(RD, TSK);
- D->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ MemberSpecializationInfo *MSI = new (C) MemberSpecializationInfo(RD, TSK);
+ MSI->setPointOfInstantiation(POI);
+ D->TemplateOrInstantiation = MSI;
break;
}
}
+
+ // Load the key function to avoid deserializing every method so we can
+ // compute it.
+ if (D->IsDefinition) {
+ CXXMethodDecl *Key
+ = cast_or_null<CXXMethodDecl>(Reader.GetDecl(Record[Idx++]));
+ if (Key)
+ C.KeyFunctions[D] = Key;
+ }
}
void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
@@ -808,8 +928,8 @@ void ASTDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
D->IsExplicitSpecified = Record[Idx++];
D->ImplicitlyDefined = Record[Idx++];
- llvm::tie(D->BaseOrMemberInitializers, D->NumBaseOrMemberInitializers)
- = Reader.ReadCXXBaseOrMemberInitializers(Cursor, Record, Idx);
+ llvm::tie(D->CtorInitializers, D->NumCtorInitializers)
+ = Reader.ReadCXXCtorInitializers(F, Record, Idx);
}
void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
@@ -826,17 +946,18 @@ void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) {
void ASTDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) {
VisitDecl(D);
- D->setColonLoc(Reader.ReadSourceLocation(Record, Idx));
+ D->setColonLoc(ReadSourceLocation(Record, Idx));
}
void ASTDeclReader::VisitFriendDecl(FriendDecl *D) {
VisitDecl(D);
if (Record[Idx++])
- D->Friend = Reader.GetTypeSourceInfo(Cursor, Record, Idx);
+ D->Friend = GetTypeSourceInfo(Record, Idx);
else
D->Friend = cast<NamedDecl>(Reader.GetDecl(Record[Idx++]));
- D->NextFriend = cast_or_null<FriendDecl>(Reader.GetDecl(Record[Idx++]));
- D->FriendLoc = Reader.ReadSourceLocation(Record, Idx);
+ D->NextFriend = Record[Idx++];
+ D->UnsupportedFriend = (Record[Idx++] != 0);
+ D->FriendLoc = ReadSourceLocation(Record, Idx);
}
void ASTDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
@@ -845,12 +966,12 @@ void ASTDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
D->NumParams = NumParams;
D->Params = new TemplateParameterList*[NumParams];
for (unsigned i = 0; i != NumParams; ++i)
- D->Params[i] = Reader.ReadTemplateParameterList(Record, Idx);
+ D->Params[i] = Reader.ReadTemplateParameterList(F, Record, Idx);
if (Record[Idx++]) // HasFriendDecl
D->Friend = cast<NamedDecl>(Reader.GetDecl(Record[Idx++]));
else
- D->Friend = Reader.GetTypeSourceInfo(Cursor, Record, Idx);
- D->FriendLoc = Reader.ReadSourceLocation(Record, Idx);
+ D->Friend = GetTypeSourceInfo(Record, Idx);
+ D->FriendLoc = ReadSourceLocation(Record, Idx);
}
void ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
@@ -859,21 +980,32 @@ void ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
NamedDecl *TemplatedDecl
= cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++]));
TemplateParameterList* TemplateParams
- = Reader.ReadTemplateParameterList(Record, Idx);
+ = Reader.ReadTemplateParameterList(F, Record, Idx);
D->init(TemplatedDecl, TemplateParams);
}
void ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
- VisitTemplateDecl(D);
-
- D->IdentifierNamespace = Record[Idx++];
- RedeclarableTemplateDecl *PrevDecl =
- cast_or_null<RedeclarableTemplateDecl>(Reader.GetDecl(Record[Idx++]));
- assert((PrevDecl == 0 || PrevDecl->getKind() == D->getKind()) &&
- "PrevDecl kind mismatch");
- if (PrevDecl)
- D->CommonOrPrev = PrevDecl;
- if (PrevDecl == 0) {
+ // Initialize CommonOrPrev before VisitTemplateDecl so that getCommonPtr()
+ // can be used while this is still initializing.
+
+ assert(D->CommonOrPrev.isNull() && "getCommonPtr was called earlier on this");
+ DeclID PreviousDeclID = Record[Idx++];
+ DeclID FirstDeclID = PreviousDeclID ? Record[Idx++] : 0;
+ // We delay loading of the redeclaration chain to avoid deeply nested calls.
+ // We temporarily set the first (canonical) declaration as the previous one
+ // which is the one that matters and mark the real previous DeclID to be
+ // loaded & attached later on.
+ RedeclarableTemplateDecl *FirstDecl =
+ cast_or_null<RedeclarableTemplateDecl>(Reader.GetDecl(FirstDeclID));
+ assert((FirstDecl == 0 || FirstDecl->getKind() == D->getKind()) &&
+ "FirstDecl kind mismatch");
+ if (FirstDecl) {
+ D->CommonOrPrev = FirstDecl;
+ // Mark the real previous DeclID to be loaded & attached later on.
+ if (PreviousDeclID != FirstDeclID)
+ Reader.PendingPreviousDecls.push_back(std::make_pair(D, PreviousDeclID));
+ } else {
+ D->CommonOrPrev = D->newCommon(*Reader.getContext());
if (RedeclarableTemplateDecl *RTD
= cast_or_null<RedeclarableTemplateDecl>(Reader.GetDecl(Record[Idx++]))) {
assert(RTD->getKind() == D->getKind() &&
@@ -897,9 +1029,9 @@ void ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
Decl *NewLatest = Reader.GetDecl(I->second);
assert((LatestDecl->getLocation().isInvalid() ||
NewLatest->getLocation().isInvalid() ||
- Reader.SourceMgr.isBeforeInTranslationUnit(
- LatestDecl->getLocation(),
- NewLatest->getLocation())) &&
+ !Reader.SourceMgr.isBeforeInTranslationUnit(
+ NewLatest->getLocation(),
+ LatestDecl->getLocation())) &&
"The new latest is supposed to come after the previous latest");
LatestDecl = cast<RedeclarableTemplateDecl>(NewLatest);
}
@@ -907,24 +1039,42 @@ void ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
assert(LatestDecl->getKind() == D->getKind() && "Latest kind mismatch");
D->getCommonPtr()->Latest = LatestDecl;
}
+
+ VisitTemplateDecl(D);
+ D->IdentifierNamespace = Record[Idx++];
}
void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
VisitRedeclarableTemplateDecl(D);
if (D->getPreviousDeclaration() == 0) {
- // This ClassTemplateDecl owns a CommonPtr; read it.
-
- // FoldingSets are filled in VisitClassTemplateSpecializationDecl.
- unsigned size = Record[Idx++];
- while (size--)
- cast<ClassTemplateSpecializationDecl>(Reader.GetDecl(Record[Idx++]));
-
- size = Record[Idx++];
- while (size--)
- cast<ClassTemplatePartialSpecializationDecl>(
- Reader.GetDecl(Record[Idx++]));
-
+ // This ClassTemplateDecl owns a CommonPtr; read it to keep track of all of
+ // the specializations.
+ llvm::SmallVector<serialization::DeclID, 2> SpecIDs;
+ SpecIDs.push_back(0);
+
+ // Specializations.
+ unsigned Size = Record[Idx++];
+ SpecIDs[0] += Size;
+ SpecIDs.append(Record.begin() + Idx, Record.begin() + Idx + Size);
+ Idx += Size;
+
+ // Partial specializations.
+ Size = Record[Idx++];
+ SpecIDs[0] += Size;
+ SpecIDs.append(Record.begin() + Idx, Record.begin() + Idx + Size);
+ Idx += Size;
+
+ if (SpecIDs[0]) {
+ typedef serialization::DeclID DeclID;
+
+ ClassTemplateDecl::Common *CommonPtr = D->getCommonPtr();
+ CommonPtr->LazySpecializations
+ = new (*Reader.getContext()) DeclID [SpecIDs.size()];
+ memcpy(CommonPtr->LazySpecializations, SpecIDs.data(),
+ SpecIDs.size() * sizeof(DeclID));
+ }
+
// InjectedClassNameType is computed.
}
}
@@ -932,41 +1082,52 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
void ASTDeclReader::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D) {
VisitCXXRecordDecl(D);
-
+
+ ASTContext &C = *Reader.getContext();
if (Decl *InstD = Reader.GetDecl(Record[Idx++])) {
if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(InstD)) {
- D->setInstantiationOf(CTD);
+ D->SpecializedTemplate = CTD;
} else {
llvm::SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, Cursor, Record, Idx);
- D->setInstantiationOf(cast<ClassTemplatePartialSpecializationDecl>(InstD),
- TemplArgs.data(), TemplArgs.size());
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ TemplateArgumentList *ArgList
+ = TemplateArgumentList::CreateCopy(C, TemplArgs.data(),
+ TemplArgs.size());
+ ClassTemplateSpecializationDecl::SpecializedPartialSpecialization *PS
+ = new (C) ClassTemplateSpecializationDecl::
+ SpecializedPartialSpecialization();
+ PS->PartialSpecialization
+ = cast<ClassTemplatePartialSpecializationDecl>(InstD);
+ PS->TemplateArgs = ArgList;
+ D->SpecializedTemplate = PS;
}
}
// Explicit info.
- if (TypeSourceInfo *TyInfo = Reader.GetTypeSourceInfo(Cursor, Record, Idx)) {
- D->setTypeAsWritten(TyInfo);
- D->setExternLoc(Reader.ReadSourceLocation(Record, Idx));
- D->setTemplateKeywordLoc(Reader.ReadSourceLocation(Record, Idx));
+ if (TypeSourceInfo *TyInfo = GetTypeSourceInfo(Record, Idx)) {
+ ClassTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo
+ = new (C) ClassTemplateSpecializationDecl::ExplicitSpecializationInfo;
+ ExplicitInfo->TypeAsWritten = TyInfo;
+ ExplicitInfo->ExternLoc = ReadSourceLocation(Record, Idx);
+ ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation(Record, Idx);
+ D->ExplicitInfo = ExplicitInfo;
}
llvm::SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, Cursor, Record, Idx);
- D->initTemplateArgs(TemplArgs.data(), TemplArgs.size());
- SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
- if (POI.isValid())
- D->setPointOfInstantiation(POI);
- D->setSpecializationKind((TemplateSpecializationKind)Record[Idx++]);
-
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ D->TemplateArgs = TemplateArgumentList::CreateCopy(C, TemplArgs.data(),
+ TemplArgs.size());
+ D->PointOfInstantiation = ReadSourceLocation(Record, Idx);
+ D->SpecializationKind = (TemplateSpecializationKind)Record[Idx++];
+
if (D->isCanonicalDecl()) { // It's kept in the folding set.
ClassTemplateDecl *CanonPattern
= cast<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++]));
if (ClassTemplatePartialSpecializationDecl *Partial
- = dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
- CanonPattern->getPartialSpecializations().InsertNode(Partial);
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
+ CanonPattern->getCommonPtr()->PartialSpecializations.InsertNode(Partial);
} else {
- CanonPattern->getSpecializations().InsertNode(D);
+ CanonPattern->getCommonPtr()->Specializations.InsertNode(D);
}
}
}
@@ -975,23 +1136,25 @@ void ASTDeclReader::VisitClassTemplatePartialSpecializationDecl(
ClassTemplatePartialSpecializationDecl *D) {
VisitClassTemplateSpecializationDecl(D);
- D->initTemplateParameters(Reader.ReadTemplateParameterList(Record, Idx));
-
- TemplateArgumentListInfo ArgInfos;
+ ASTContext &C = *Reader.getContext();
+ D->TemplateParams = Reader.ReadTemplateParameterList(F, Record, Idx);
+
unsigned NumArgs = Record[Idx++];
- while (NumArgs--)
- ArgInfos.addArgument(Reader.ReadTemplateArgumentLoc(Cursor, Record, Idx));
- D->initTemplateArgsAsWritten(ArgInfos);
-
- D->setSequenceNumber(Record[Idx++]);
+ if (NumArgs) {
+ D->NumArgsAsWritten = NumArgs;
+ D->ArgsAsWritten = new (C) TemplateArgumentLoc[NumArgs];
+ for (unsigned i=0; i != NumArgs; ++i)
+ D->ArgsAsWritten[i] = Reader.ReadTemplateArgumentLoc(F, Record, Idx);
+ }
+
+ D->SequenceNumber = Record[Idx++];
// These are read/set from/to the first declaration.
if (D->getPreviousDeclaration() == 0) {
- D->setInstantiatedFromMember(
+ D->InstantiatedFromMember.setPointer(
cast_or_null<ClassTemplatePartialSpecializationDecl>(
Reader.GetDecl(Record[Idx++])));
- if (Record[Idx++])
- D->setMemberSpecialization();
+ D->InstantiatedFromMember.setInt(Record[Idx++]);
}
}
@@ -1003,7 +1166,7 @@ void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
// Read the function specialization declarations.
// FunctionTemplateDecl's FunctionTemplateSpecializationInfos are filled
- // through the specialized FunctionDecl's setFunctionTemplateSpecialization.
+ // when reading the specialized FunctionDecl.
unsigned NumSpecs = Record[Idx++];
while (NumSpecs--)
Reader.GetDecl(Record[Idx++]);
@@ -1017,21 +1180,30 @@ void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
D->setParameterPack(Record[Idx++]);
bool Inherited = Record[Idx++];
- TypeSourceInfo *DefArg = Reader.GetTypeSourceInfo(Cursor, Record, Idx);
+ TypeSourceInfo *DefArg = GetTypeSourceInfo(Record, Idx);
D->setDefaultArgument(DefArg, Inherited);
}
void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
- VisitVarDecl(D);
+ VisitDeclaratorDecl(D);
// TemplateParmPosition.
D->setDepth(Record[Idx++]);
D->setPosition(Record[Idx++]);
- // Rest of NonTypeTemplateParmDecl.
- if (Record[Idx++]) {
- Expr *DefArg = Reader.ReadExpr(Cursor);
- bool Inherited = Record[Idx++];
- D->setDefaultArgument(DefArg, Inherited);
- }
+ if (D->isExpandedParameterPack()) {
+ void **Data = reinterpret_cast<void **>(D + 1);
+ for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
+ Data[2*I] = Reader.GetType(Record[Idx++]).getAsOpaquePtr();
+ Data[2*I + 1] = GetTypeSourceInfo(Record, Idx);
+ }
+ } else {
+ // Rest of NonTypeTemplateParmDecl.
+ D->ParameterPack = Record[Idx++];
+ if (Record[Idx++]) {
+ Expr *DefArg = Reader.ReadExpr(F);
+ bool Inherited = Record[Idx++];
+ D->setDefaultArgument(DefArg, Inherited);
+ }
+ }
}
void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
@@ -1040,15 +1212,16 @@ void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
D->setDepth(Record[Idx++]);
D->setPosition(Record[Idx++]);
// Rest of TemplateTemplateParmDecl.
- TemplateArgumentLoc Arg = Reader.ReadTemplateArgumentLoc(Cursor, Record, Idx);
+ TemplateArgumentLoc Arg = Reader.ReadTemplateArgumentLoc(F, Record, Idx);
bool IsInherited = Record[Idx++];
D->setDefaultArgument(Arg, IsInherited);
+ D->ParameterPack = Record[Idx++];
}
void ASTDeclReader::VisitStaticAssertDecl(StaticAssertDecl *D) {
VisitDecl(D);
- D->AssertExpr = Reader.ReadExpr(Cursor);
- D->Message = cast<StringLiteral>(Reader.ReadExpr(Cursor));
+ D->AssertExpr = Reader.ReadExpr(F);
+ D->Message = cast<StringLiteral>(Reader.ReadExpr(F));
}
std::pair<uint64_t, uint64_t>
@@ -1068,10 +1241,20 @@ void ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
" reading");
case NoRedeclaration:
break;
- case PointsToPrevious:
+ case PointsToPrevious: {
+ DeclID PreviousDeclID = Record[Idx++];
+ DeclID FirstDeclID = Record[Idx++];
+ // We delay loading of the redeclaration chain to avoid deeply nested calls.
+ // We temporarily set the first (canonical) declaration as the previous one
+ // which is the one that matters and mark the real previous DeclID to be
+ // loaded & attached later on.
D->RedeclLink = typename Redeclarable<T>::PreviousDeclLink(
- cast_or_null<T>(Reader.GetDecl(Record[Idx++])));
+ cast_or_null<T>(Reader.GetDecl(FirstDeclID)));
+ if (PreviousDeclID != FirstDeclID)
+ Reader.PendingPreviousDecls.push_back(std::make_pair(static_cast<T*>(D),
+ PreviousDeclID));
break;
+ }
case PointsToLatest:
D->RedeclLink = typename Redeclarable<T>::LatestDeclLink(
cast_or_null<T>(Reader.GetDecl(Record[Idx++])));
@@ -1095,12 +1278,6 @@ void ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
= Reader.FirstLatestDeclIDs.find(ThisDeclID);
if (I != Reader.FirstLatestDeclIDs.end()) {
Decl *NewLatest = Reader.GetDecl(I->second);
- assert((D->getMostRecentDeclaration()->getLocation().isInvalid() ||
- NewLatest->getLocation().isInvalid() ||
- Reader.SourceMgr.isBeforeInTranslationUnit(
- D->getMostRecentDeclaration()->getLocation(),
- NewLatest->getLocation())) &&
- "The new latest is supposed to come after the previous latest");
D->RedeclLink
= typename Redeclarable<T>::LatestDeclLink(cast_or_null<T>(NewLatest));
}
@@ -1111,28 +1288,16 @@ void ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
//===----------------------------------------------------------------------===//
/// \brief Reads attributes from the current stream position.
-void ASTReader::ReadAttributes(llvm::BitstreamCursor &DeclsCursor,
- AttrVec &Attrs) {
- unsigned Code = DeclsCursor.ReadCode();
- assert(Code == llvm::bitc::UNABBREV_RECORD &&
- "Expected unabbreviated record"); (void)Code;
-
- RecordData Record;
- unsigned Idx = 0;
- unsigned RecCode = DeclsCursor.ReadRecord(Code, Record);
- assert(RecCode == DECL_ATTR && "Expected attribute record");
- (void)RecCode;
-
- while (Idx < Record.size()) {
+void ASTReader::ReadAttributes(PerFileData &F, AttrVec &Attrs,
+ const RecordData &Record, unsigned &Idx) {
+ for (unsigned i = 0, e = Record[Idx++]; i != e; ++i) {
Attr *New = 0;
attr::Kind Kind = (attr::Kind)Record[Idx++];
- SourceLocation Loc = SourceLocation::getFromRawEncoding(Record[Idx++]);
- bool isInherited = Record[Idx++];
+ SourceLocation Loc = ReadSourceLocation(F, Record, Idx);
#include "clang/Serialization/AttrPCHRead.inc"
assert(New && "Unable to decode attribute?");
- New->setInherited(isInherited);
Attrs.push_back(New);
}
}
@@ -1176,7 +1341,7 @@ ASTReader::DeclCursorForIndex(unsigned Index, DeclID ID) {
// See if there's an override.
DeclReplacementMap::iterator It = ReplacedDecls.find(ID);
if (It != ReplacedDecls.end())
- return RecordLocation(&It->second.first->DeclsCursor, It->second.second);
+ return RecordLocation(It->second.first, It->second.second);
PerFileData *F = 0;
for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
@@ -1186,13 +1351,32 @@ ASTReader::DeclCursorForIndex(unsigned Index, DeclID ID) {
Index -= F->LocalNumDecls;
}
assert(F && F->LocalNumDecls > Index && "Broken chain");
- return RecordLocation(&F->DeclsCursor, F->DeclOffsets[Index]);
+ return RecordLocation(F, F->DeclOffsets[Index]);
+}
+
+void ASTDeclReader::attachPreviousDecl(Decl *D, Decl *previous) {
+ assert(D && previous);
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ TD->RedeclLink.setPointer(cast<TagDecl>(previous));
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ FD->RedeclLink.setPointer(cast<FunctionDecl>(previous));
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ VD->RedeclLink.setPointer(cast<VarDecl>(previous));
+ } else {
+ RedeclarableTemplateDecl *TD = cast<RedeclarableTemplateDecl>(D);
+ TD->CommonOrPrev = cast<RedeclarableTemplateDecl>(previous);
+ }
+}
+
+void ASTReader::loadAndAttachPreviousDecl(Decl *D, serialization::DeclID ID) {
+ Decl *previous = GetDecl(ID);
+ ASTDeclReader::attachPreviousDecl(D, previous);
}
/// \brief Read the declaration at the given offset from the AST file.
Decl *ASTReader::ReadDeclRecord(unsigned Index, DeclID ID) {
RecordLocation Loc = DeclCursorForIndex(Index, ID);
- llvm::BitstreamCursor &DeclsCursor = *Loc.first;
+ llvm::BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
// Keep track of where we are in the stream, then jump back there
// after reading this declaration.
SavedStreamPosition SavedPosition(DeclsCursor);
@@ -1202,15 +1386,14 @@ Decl *ASTReader::ReadDeclRecord(unsigned Index, DeclID ID) {
// Note that we are loading a declaration record.
Deserializing ADecl(this);
- DeclsCursor.JumpToBit(Loc.second);
+ DeclsCursor.JumpToBit(Loc.Offset);
RecordData Record;
unsigned Code = DeclsCursor.ReadCode();
unsigned Idx = 0;
- ASTDeclReader Reader(*this, DeclsCursor, ID, Record, Idx);
+ ASTDeclReader Reader(*this, *Loc.F, DeclsCursor, ID, Record, Idx);
Decl *D = 0;
switch ((DeclCode)DeclsCursor.ReadRecord(Code, Record)) {
- case DECL_ATTR:
case DECL_CONTEXT_LEXICAL:
case DECL_CONTEXT_VISIBLE:
assert(false && "Record cannot be de-serialized with ReadDeclRecord");
@@ -1241,6 +1424,9 @@ Decl *ASTReader::ReadDeclRecord(unsigned Index, DeclID ID) {
(LinkageSpecDecl::LanguageIDs)0,
false);
break;
+ case DECL_LABEL:
+ D = LabelDecl::Create(*Context, 0, SourceLocation(), 0);
+ break;
case DECL_NAMESPACE:
D = NamespaceDecl::Create(*Context, 0, SourceLocation(), 0);
break;
@@ -1289,8 +1475,7 @@ Decl *ASTReader::ReadDeclRecord(unsigned Index, DeclID ID) {
D = CXXConversionDecl::Create(*Context, Decl::EmptyShell());
break;
case DECL_ACCESS_SPEC:
- D = AccessSpecDecl::Create(*Context, AS_none, 0, SourceLocation(),
- SourceLocation());
+ D = AccessSpecDecl::Create(*Context, Decl::EmptyShell());
break;
case DECL_FRIEND:
D = FriendDecl::Create(*Context, Decl::EmptyShell());
@@ -1318,10 +1503,16 @@ Decl *ASTReader::ReadDeclRecord(unsigned Index, DeclID ID) {
break;
case DECL_NON_TYPE_TEMPLATE_PARM:
D = NonTypeTemplateParmDecl::Create(*Context, 0, SourceLocation(), 0,0,0,
- QualType(),0);
+ QualType(), false, 0);
+ break;
+ case DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK:
+ D = NonTypeTemplateParmDecl::Create(*Context, 0, SourceLocation(), 0, 0,
+ 0, QualType(), 0, 0, Record[Idx++],
+ 0);
break;
case DECL_TEMPLATE_TEMPLATE_PARM:
- D = TemplateTemplateParmDecl::Create(*Context, 0, SourceLocation(),0,0,0,0);
+ D = TemplateTemplateParmDecl::Create(*Context, 0, SourceLocation(), 0, 0,
+ false, 0, 0);
break;
case DECL_STATIC_ASSERT:
D = StaticAssertDecl::Create(*Context, 0, SourceLocation(), 0, 0);
@@ -1371,12 +1562,17 @@ Decl *ASTReader::ReadDeclRecord(unsigned Index, DeclID ID) {
case DECL_OBJC_PROPERTY_IMPL:
D = ObjCPropertyImplDecl::Create(*Context, 0, SourceLocation(),
SourceLocation(), 0,
- ObjCPropertyImplDecl::Dynamic, 0);
+ ObjCPropertyImplDecl::Dynamic, 0,
+ SourceLocation());
break;
case DECL_FIELD:
D = FieldDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), 0, 0,
false);
break;
+ case DECL_INDIRECTFIELD:
+ D = IndirectFieldDecl::Create(*Context, 0, SourceLocation(), 0, QualType(),
+ 0, 0);
+ break;
case DECL_VAR:
D = VarDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), 0,
SC_None, SC_None);
@@ -1396,6 +1592,9 @@ Decl *ASTReader::ReadDeclRecord(unsigned Index, DeclID ID) {
case DECL_BLOCK:
D = BlockDecl::Create(*Context, 0, SourceLocation());
break;
+ case DECL_CXX_BASE_SPECIFIERS:
+ Error("attempt to read a C++ base-specifier record as a declaration");
+ return 0;
}
assert(D && "Unknown declaration reading AST file");
@@ -1435,21 +1634,29 @@ Decl *ASTReader::ReadDeclRecord(unsigned Index, DeclID ID) {
}
}
}
+ assert(Idx == Record.size());
- // If this is a template, read additional specializations that may be in a
- // different part of the chain.
- if (isa<RedeclarableTemplateDecl>(D)) {
- AdditionalTemplateSpecializationsMap::iterator F =
- AdditionalTemplateSpecializationsPending.find(ID);
- if (F != AdditionalTemplateSpecializationsPending.end()) {
- for (AdditionalTemplateSpecializations::iterator I = F->second.begin(),
- E = F->second.end();
- I != E; ++I)
- GetDecl(*I);
- AdditionalTemplateSpecializationsPending.erase(F);
+ // The declaration may have been modified by files later in the chain.
+ // If this is the case, read the record containing the updates from each file
+ // and pass it to ASTDeclReader to make the modifications.
+ DeclUpdateOffsetsMap::iterator UpdI = DeclUpdateOffsets.find(ID);
+ if (UpdI != DeclUpdateOffsets.end()) {
+ FileOffsetsTy &UpdateOffsets = UpdI->second;
+ for (FileOffsetsTy::iterator
+ I = UpdateOffsets.begin(), E = UpdateOffsets.end(); I != E; ++I) {
+ PerFileData *F = I->first;
+ uint64_t Offset = I->second;
+ llvm::BitstreamCursor &Cursor = F->DeclsCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Offset);
+ RecordData Record;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.ReadRecord(Code, Record);
+ (void)RecCode;
+ assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!");
+ Reader.UpdateDecl(D, Record);
}
}
- assert(Idx == Record.size());
// If we have deserialized a declaration that has a definition the
// AST consumer might need to know about, queue it.
@@ -1460,3 +1667,27 @@ Decl *ASTReader::ReadDeclRecord(unsigned Index, DeclID ID) {
return D;
}
+
+void ASTDeclReader::UpdateDecl(Decl *D, const RecordData &Record) {
+ unsigned Idx = 0;
+ while (Idx < Record.size()) {
+ switch ((DeclUpdateKind)Record[Idx++]) {
+ case UPD_CXX_SET_DEFINITIONDATA: {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
+ CXXRecordDecl *
+ DefinitionDecl = cast<CXXRecordDecl>(Reader.GetDecl(Record[Idx++]));
+ assert(!RD->DefinitionData && "DefinitionData is already set!");
+ InitializeCXXDefinitionData(RD, DefinitionDecl, Record, Idx);
+ break;
+ }
+
+ case UPD_CXX_ADDED_IMPLICIT_MEMBER:
+ cast<CXXRecordDecl>(D)->addedMember(Reader.GetDecl(Record[Idx++]));
+ break;
+
+ case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
+ // It will be added to the template's specializations set when loaded.
+ Reader.GetDecl(Record[Idx++]);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
index ee5d40a..4e91c98 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -14,6 +14,7 @@
#include "clang/Serialization/ASTReader.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
using namespace clang;
using namespace clang::serialization;
@@ -22,14 +23,36 @@ namespace clang {
class ASTStmtReader : public StmtVisitor<ASTStmtReader> {
ASTReader &Reader;
+ ASTReader::PerFileData &F;
llvm::BitstreamCursor &DeclsCursor;
const ASTReader::RecordData &Record;
unsigned &Idx;
+ SourceLocation ReadSourceLocation(const ASTReader::RecordData &R,
+ unsigned &I) {
+ return Reader.ReadSourceLocation(F, R, I);
+ }
+ SourceRange ReadSourceRange(const ASTReader::RecordData &R, unsigned &I) {
+ return Reader.ReadSourceRange(F, R, I);
+ }
+ TypeSourceInfo *GetTypeSourceInfo(const ASTReader::RecordData &R,
+ unsigned &I) {
+ return Reader.GetTypeSourceInfo(F, R, I);
+ }
+ void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name,
+ const ASTReader::RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameLoc(F, DNLoc, Name, R, I);
+ }
+ void ReadDeclarationNameInfo(DeclarationNameInfo &NameInfo,
+ const ASTReader::RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameInfo(F, NameInfo, R, I);
+ }
+
public:
- ASTStmtReader(ASTReader &Reader, llvm::BitstreamCursor &Cursor,
+ ASTStmtReader(ASTReader &Reader, ASTReader::PerFileData &F,
+ llvm::BitstreamCursor &Cursor,
const ASTReader::RecordData &Record, unsigned &Idx)
- : Reader(Reader), DeclsCursor(Cursor), Record(Record), Idx(Idx) { }
+ : Reader(Reader), F(F), DeclsCursor(Cursor), Record(Record), Idx(Idx) { }
/// \brief The number of record fields required for the Stmt class
/// itself.
@@ -37,7 +60,7 @@ namespace clang {
/// \brief The number of record fields required for the Expr class
/// itself.
- static const unsigned NumExprFields = NumStmtFields + 3;
+ static const unsigned NumExprFields = NumStmtFields + 6;
/// \brief Read and initialize a ExplicitTemplateArgumentList structure.
void ReadExplicitTemplateArgumentList(ExplicitTemplateArgumentList &ArgList,
@@ -82,6 +105,7 @@ namespace clang {
void VisitBinaryOperator(BinaryOperator *E);
void VisitCompoundAssignOperator(CompoundAssignOperator *E);
void VisitConditionalOperator(ConditionalOperator *E);
+ void VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
void VisitImplicitCastExpr(ImplicitCastExpr *E);
void VisitExplicitCastExpr(ExplicitCastExpr *E);
void VisitCStyleCastExpr(CStyleCastExpr *E);
@@ -93,7 +117,6 @@ namespace clang {
void VisitVAArgExpr(VAArgExpr *E);
void VisitAddrLabelExpr(AddrLabelExpr *E);
void VisitStmtExpr(StmtExpr *E);
- void VisitTypesCompatibleExpr(TypesCompatibleExpr *E);
void VisitChooseExpr(ChooseExpr *E);
void VisitGNUNullExpr(GNUNullExpr *E);
void VisitShuffleVectorExpr(ShuffleVectorExpr *E);
@@ -105,10 +128,7 @@ namespace clang {
void VisitObjCProtocolExpr(ObjCProtocolExpr *E);
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E);
void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
- void VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E);
void VisitObjCMessageExpr(ObjCMessageExpr *E);
- void VisitObjCSuperExpr(ObjCSuperExpr *E);
void VisitObjCIsaExpr(ObjCIsaExpr *E);
void VisitObjCForCollectionStmt(ObjCForCollectionStmt *);
@@ -134,6 +154,7 @@ namespace clang {
void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
void VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E);
+ void VisitCXXUuidofExpr(CXXUuidofExpr *E);
void VisitCXXThisExpr(CXXThisExpr *E);
void VisitCXXThrowExpr(CXXThrowExpr *E);
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
@@ -144,7 +165,7 @@ namespace clang {
void VisitCXXDeleteExpr(CXXDeleteExpr *E);
void VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
- void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+ void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
void VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
@@ -155,6 +176,16 @@ namespace clang {
void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
void VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E);
+ void VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E);
+ void VisitCXXNoexceptExpr(CXXNoexceptExpr *E);
+ void VisitPackExpansionExpr(PackExpansionExpr *E);
+ void VisitSizeOfPackExpr(SizeOfPackExpr *E);
+ void VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E);
+ void VisitOpaqueValueExpr(OpaqueValueExpr *E);
+
+ // CUDA Expressions
+ void VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E);
};
}
@@ -162,11 +193,11 @@ void ASTStmtReader::
ReadExplicitTemplateArgumentList(ExplicitTemplateArgumentList &ArgList,
unsigned NumTemplateArgs) {
TemplateArgumentListInfo ArgInfo;
- ArgInfo.setLAngleLoc(Reader.ReadSourceLocation(Record, Idx));
- ArgInfo.setRAngleLoc(Reader.ReadSourceLocation(Record, Idx));
+ ArgInfo.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ ArgInfo.setRAngleLoc(ReadSourceLocation(Record, Idx));
for (unsigned i = 0; i != NumTemplateArgs; ++i)
ArgInfo.addArgument(
- Reader.ReadTemplateArgumentLoc(DeclsCursor, Record, Idx));
+ Reader.ReadTemplateArgumentLoc(F, Record, Idx));
ArgList.initializeFrom(ArgInfo);
}
@@ -176,7 +207,8 @@ void ASTStmtReader::VisitStmt(Stmt *S) {
void ASTStmtReader::VisitNullStmt(NullStmt *S) {
VisitStmt(S);
- S->setSemiLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setSemiLoc(ReadSourceLocation(Record, Idx));
+ S->LeadingEmptyMacro = Record[Idx++];
}
void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
@@ -186,8 +218,8 @@ void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
while (NumStmts--)
Stmts.push_back(Reader.ReadSubStmt());
S->setStmts(*Reader.getContext(), Stmts.data(), Stmts.size());
- S->setLBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setRBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setLBracLoc(ReadSourceLocation(Record, Idx));
+ S->setRBracLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitSwitchCase(SwitchCase *S) {
@@ -200,24 +232,25 @@ void ASTStmtReader::VisitCaseStmt(CaseStmt *S) {
S->setLHS(Reader.ReadSubExpr());
S->setRHS(Reader.ReadSubExpr());
S->setSubStmt(Reader.ReadSubStmt());
- S->setCaseLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setEllipsisLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setCaseLoc(ReadSourceLocation(Record, Idx));
+ S->setEllipsisLoc(ReadSourceLocation(Record, Idx));
+ S->setColonLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitDefaultStmt(DefaultStmt *S) {
VisitSwitchCase(S);
S->setSubStmt(Reader.ReadSubStmt());
- S->setDefaultLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setDefaultLoc(ReadSourceLocation(Record, Idx));
+ S->setColonLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitLabelStmt(LabelStmt *S) {
VisitStmt(S);
- S->setID(Reader.GetIdentifierInfo(Record, Idx));
+ LabelDecl *LD = cast<LabelDecl>(Reader.GetDecl(Record[Idx++]));
+ LD->setStmt(S);
+ S->setDecl(LD);
S->setSubStmt(Reader.ReadSubStmt());
- S->setIdentLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- Reader.RecordLabelStmt(S, Record[Idx++]);
+ S->setIdentLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitIfStmt(IfStmt *S) {
@@ -227,8 +260,8 @@ void ASTStmtReader::VisitIfStmt(IfStmt *S) {
S->setCond(Reader.ReadSubExpr());
S->setThen(Reader.ReadSubStmt());
S->setElse(Reader.ReadSubStmt());
- S->setIfLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setElseLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setIfLoc(ReadSourceLocation(Record, Idx));
+ S->setElseLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
@@ -237,7 +270,10 @@ void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
S->setCond(Reader.ReadSubExpr());
S->setBody(Reader.ReadSubStmt());
- S->setSwitchLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setSwitchLoc(ReadSourceLocation(Record, Idx));
+ if (Record[Idx++])
+ S->setAllEnumCasesCovered();
+
SwitchCase *PrevSC = 0;
for (unsigned N = Record.size(); Idx != N; ++Idx) {
SwitchCase *SC = Reader.getSwitchCaseWithID(Record[Idx]);
@@ -246,9 +282,6 @@ void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
else
S->setSwitchCaseList(SC);
- // Retain this SwitchCase, since SwitchStmt::addSwitchCase() would
- // normally retain it (but we aren't calling addSwitchCase).
- SC->Retain();
PrevSC = SC;
}
}
@@ -259,16 +292,16 @@ void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
S->setCond(Reader.ReadSubExpr());
S->setBody(Reader.ReadSubStmt());
- S->setWhileLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setWhileLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitDoStmt(DoStmt *S) {
VisitStmt(S);
S->setCond(Reader.ReadSubExpr());
S->setBody(Reader.ReadSubStmt());
- S->setDoLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setWhileLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setDoLoc(ReadSourceLocation(Record, Idx));
+ S->setWhileLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitForStmt(ForStmt *S) {
@@ -279,46 +312,46 @@ void ASTStmtReader::VisitForStmt(ForStmt *S) {
cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
S->setInc(Reader.ReadSubExpr());
S->setBody(Reader.ReadSubStmt());
- S->setForLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setForLoc(ReadSourceLocation(Record, Idx));
+ S->setLParenLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitGotoStmt(GotoStmt *S) {
VisitStmt(S);
- Reader.SetLabelOf(S, Record[Idx++]);
- S->setGotoLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setLabelLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setLabel(cast<LabelDecl>(Reader.GetDecl(Record[Idx++])));
+ S->setGotoLoc(ReadSourceLocation(Record, Idx));
+ S->setLabelLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
VisitStmt(S);
- S->setGotoLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setStarLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setGotoLoc(ReadSourceLocation(Record, Idx));
+ S->setStarLoc(ReadSourceLocation(Record, Idx));
S->setTarget(Reader.ReadSubExpr());
}
void ASTStmtReader::VisitContinueStmt(ContinueStmt *S) {
VisitStmt(S);
- S->setContinueLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setContinueLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitBreakStmt(BreakStmt *S) {
VisitStmt(S);
- S->setBreakLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setBreakLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitReturnStmt(ReturnStmt *S) {
VisitStmt(S);
S->setRetValue(Reader.ReadSubExpr());
- S->setReturnLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setReturnLoc(ReadSourceLocation(Record, Idx));
S->setNRVOCandidate(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
}
void ASTStmtReader::VisitDeclStmt(DeclStmt *S) {
VisitStmt(S);
- S->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setStartLoc(ReadSourceLocation(Record, Idx));
+ S->setEndLoc(ReadSourceLocation(Record, Idx));
if (Idx + 1 == Record.size()) {
// Single declaration
@@ -339,8 +372,8 @@ void ASTStmtReader::VisitAsmStmt(AsmStmt *S) {
unsigned NumOutputs = Record[Idx++];
unsigned NumInputs = Record[Idx++];
unsigned NumClobbers = Record[Idx++];
- S->setAsmLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setAsmLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
S->setVolatile(Record[Idx++]);
S->setSimple(Record[Idx++]);
S->setMSAsm(Record[Idx++]);
@@ -373,12 +406,15 @@ void ASTStmtReader::VisitExpr(Expr *E) {
E->setType(Reader.GetType(Record[Idx++]));
E->setTypeDependent(Record[Idx++]);
E->setValueDependent(Record[Idx++]);
+ E->ExprBits.ContainsUnexpandedParameterPack = Record[Idx++];
+ E->setValueKind(static_cast<ExprValueKind>(Record[Idx++]));
+ E->setObjectKind(static_cast<ExprObjectKind>(Record[Idx++]));
assert(Idx == NumExprFields && "Incorrect expression field count");
}
void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLocation(ReadSourceLocation(Record, Idx));
E->setIdentType((PredefinedExpr::IdentType)Record[Idx++]);
}
@@ -386,28 +422,31 @@ void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
VisitExpr(E);
bool HasQualifier = Record[Idx++];
- unsigned NumTemplateArgs = Record[Idx++];
+ bool HasExplicitTemplateArgs = Record[Idx++];
E->DecoratedD.setInt((HasQualifier? DeclRefExpr::HasQualifierFlag : 0) |
- (NumTemplateArgs ? DeclRefExpr::HasExplicitTemplateArgumentListFlag : 0));
+ (HasExplicitTemplateArgs
+ ? DeclRefExpr::HasExplicitTemplateArgumentListFlag : 0));
if (HasQualifier) {
E->getNameQualifier()->NNS = Reader.ReadNestedNameSpecifier(Record, Idx);
- E->getNameQualifier()->Range = Reader.ReadSourceRange(Record, Idx);
+ E->getNameQualifier()->Range = ReadSourceRange(Record, Idx);
}
- if (NumTemplateArgs)
+ if (HasExplicitTemplateArgs) {
+ unsigned NumTemplateArgs = Record[Idx++];
ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
NumTemplateArgs);
+ }
E->setDecl(cast<ValueDecl>(Reader.GetDecl(Record[Idx++])));
- // FIXME: read DeclarationNameLoc.
- E->setLocation(Reader.ReadSourceLocation(Record, Idx));
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ ReadDeclarationNameLoc(E->DNLoc, E->getDecl()->getDeclName(), Record, Idx);
}
void ASTStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
VisitExpr(E);
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLocation(ReadSourceLocation(Record, Idx));
E->setValue(*Reader.getContext(), Reader.ReadAPInt(Record, Idx));
}
@@ -415,7 +454,7 @@ void ASTStmtReader::VisitFloatingLiteral(FloatingLiteral *E) {
VisitExpr(E);
E->setValue(*Reader.getContext(), Reader.ReadAPFloat(Record, Idx));
E->setExact(Record[Idx++]);
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLocation(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) {
@@ -438,20 +477,20 @@ void ASTStmtReader::VisitStringLiteral(StringLiteral *E) {
// Read source locations
for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
- E->setStrTokenLoc(I, SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setStrTokenLoc(I, ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
VisitExpr(E);
E->setValue(Record[Idx++]);
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLocation(ReadSourceLocation(Record, Idx));
E->setWide(Record[Idx++]);
}
void ASTStmtReader::VisitParenExpr(ParenExpr *E) {
VisitExpr(E);
- E->setLParen(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParen(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLParen(ReadSourceLocation(Record, Idx));
+ E->setRParen(ReadSourceLocation(Record, Idx));
E->setSubExpr(Reader.ReadSubExpr());
}
@@ -462,15 +501,15 @@ void ASTStmtReader::VisitParenListExpr(ParenListExpr *E) {
for (unsigned i = 0; i != NumExprs; ++i)
E->Exprs[i] = Reader.ReadSubStmt();
E->NumExprs = NumExprs;
- E->LParenLoc = Reader.ReadSourceLocation(Record, Idx);
- E->RParenLoc = Reader.ReadSourceLocation(Record, Idx);
+ E->LParenLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
}
void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
E->setSubExpr(Reader.ReadSubExpr());
E->setOpcode((UnaryOperator::Opcode)Record[Idx++]);
- E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
@@ -480,13 +519,13 @@ void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
++Idx;
assert(E->getNumExpressions() == Record[Idx]);
++Idx;
- E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setTypeSourceInfo(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
Node::Kind Kind = static_cast<Node::Kind>(Record[Idx++]);
- SourceLocation Start = SourceLocation::getFromRawEncoding(Record[Idx++]);
- SourceLocation End = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation Start = ReadSourceLocation(Record, Idx);
+ SourceLocation End = ReadSourceLocation(Record, Idx);
switch (Kind) {
case Node::Array:
E->setComponent(I, Node(Start, Record[Idx++], End));
@@ -505,7 +544,7 @@ void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
case Node::Base: {
CXXBaseSpecifier *Base = new (*Reader.getContext()) CXXBaseSpecifier();
- *Base = Reader.ReadCXXBaseSpecifier(DeclsCursor, Record, Idx);
+ *Base = Reader.ReadCXXBaseSpecifier(F, Record, Idx);
E->setComponent(I, Node(Base));
break;
}
@@ -523,23 +562,23 @@ void ASTStmtReader::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
E->setArgument(Reader.ReadSubExpr());
++Idx;
} else {
- E->setArgument(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
+ E->setArgument(GetTypeSourceInfo(Record, Idx));
}
- E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
VisitExpr(E);
E->setLHS(Reader.ReadSubExpr());
E->setRHS(Reader.ReadSubExpr());
- E->setRBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRBracketLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
E->setNumArgs(*Reader.getContext(), Record[Idx++]);
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
E->setCallee(Reader.ReadSubExpr());
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
E->setArg(I, Reader.ReadSubExpr());
@@ -554,7 +593,7 @@ void ASTStmtReader::VisitMemberExpr(MemberExpr *E) {
void ASTStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) {
VisitExpr(E);
E->setBase(Reader.ReadSubExpr());
- E->setIsaMemberLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setIsaMemberLoc(ReadSourceLocation(Record, Idx));
E->setArrow(Record[Idx++]);
}
@@ -567,7 +606,7 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
CastExpr::path_iterator BaseI = E->path_begin();
while (NumBaseSpecs--) {
CXXBaseSpecifier *BaseSpec = new (*Reader.getContext()) CXXBaseSpecifier;
- *BaseSpec = Reader.ReadCXXBaseSpecifier(DeclsCursor, Record, Idx);
+ *BaseSpec = Reader.ReadCXXBaseSpecifier(F, Record, Idx);
*BaseI++ = BaseSpec;
}
}
@@ -577,7 +616,7 @@ void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
E->setLHS(Reader.ReadSubExpr());
E->setRHS(Reader.ReadSubExpr());
E->setOpcode((BinaryOperator::Opcode)Record[Idx++]);
- E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
@@ -588,34 +627,46 @@ void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
void ASTStmtReader::VisitConditionalOperator(ConditionalOperator *E) {
VisitExpr(E);
- E->setCond(Reader.ReadSubExpr());
- E->setLHS(Reader.ReadSubExpr());
- E->setRHS(Reader.ReadSubExpr());
- E->setSAVE(Reader.ReadSubExpr());
- E->setQuestionLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->SubExprs[ConditionalOperator::COND] = Reader.ReadSubExpr();
+ E->SubExprs[ConditionalOperator::LHS] = Reader.ReadSubExpr();
+ E->SubExprs[ConditionalOperator::RHS] = Reader.ReadSubExpr();
+ E->QuestionLoc = ReadSourceLocation(Record, Idx);
+ E->ColonLoc = ReadSourceLocation(Record, Idx);
+}
+
+void
+ASTStmtReader::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ VisitExpr(E);
+ E->OpaqueValue = cast<OpaqueValueExpr>(Reader.ReadSubExpr());
+ E->SubExprs[BinaryConditionalOperator::COMMON] = Reader.ReadSubExpr();
+ E->SubExprs[BinaryConditionalOperator::COND] = Reader.ReadSubExpr();
+ E->SubExprs[BinaryConditionalOperator::LHS] = Reader.ReadSubExpr();
+ E->SubExprs[BinaryConditionalOperator::RHS] = Reader.ReadSubExpr();
+ E->QuestionLoc = ReadSourceLocation(Record, Idx);
+ E->ColonLoc = ReadSourceLocation(Record, Idx);
+
+ E->getOpaqueValue()->setSourceExpr(E->getCommon());
}
void ASTStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
VisitCastExpr(E);
- E->setValueKind(static_cast<ExprValueKind>(Record[Idx++]));
}
void ASTStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) {
VisitCastExpr(E);
- E->setTypeInfoAsWritten(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
+ E->setTypeInfoAsWritten(GetTypeSourceInfo(Record, Idx));
}
void ASTStmtReader::VisitCStyleCastExpr(CStyleCastExpr *E) {
VisitExplicitCastExpr(E);
- E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
VisitExpr(E);
- E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setTypeSourceInfo(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
E->setInitializer(Reader.ReadSubExpr());
E->setFileScope(Record[Idx++]);
}
@@ -624,7 +675,7 @@ void ASTStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
VisitExpr(E);
E->setBase(Reader.ReadSubExpr());
E->setAccessor(Reader.GetIdentifierInfo(Record, Idx));
- E->setAccessorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setAccessorLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitInitListExpr(InitListExpr *E) {
@@ -634,8 +685,8 @@ void ASTStmtReader::VisitInitListExpr(InitListExpr *E) {
for (unsigned I = 0; I != NumInits; ++I)
E->updateInit(*Reader.getContext(), I, Reader.ReadSubExpr());
E->setSyntacticForm(cast_or_null<InitListExpr>(Reader.ReadSubStmt()));
- E->setLBraceLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRBraceLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLBraceLoc(ReadSourceLocation(Record, Idx));
+ E->setRBraceLoc(ReadSourceLocation(Record, Idx));
E->setInitializedFieldInUnion(
cast_or_null<FieldDecl>(Reader.GetDecl(Record[Idx++])));
E->sawArrayRangeDesignator(Record[Idx++]);
@@ -649,7 +700,7 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
assert(NumSubExprs == E->getNumSubExprs() && "Wrong number of subexprs");
for (unsigned I = 0; I != NumSubExprs; ++I)
E->setSubExpr(I, Reader.ReadSubExpr());
- E->setEqualOrColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setEqualOrColonLoc(ReadSourceLocation(Record, Idx));
E->setGNUSyntax(Record[Idx++]);
llvm::SmallVector<Designator, 4> Designators;
@@ -658,9 +709,9 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
case DESIG_FIELD_DECL: {
FieldDecl *Field = cast<FieldDecl>(Reader.GetDecl(Record[Idx++]));
SourceLocation DotLoc
- = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ = ReadSourceLocation(Record, Idx);
SourceLocation FieldLoc
- = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ = ReadSourceLocation(Record, Idx);
Designators.push_back(Designator(Field->getIdentifier(), DotLoc,
FieldLoc));
Designators.back().setField(Field);
@@ -670,9 +721,9 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
case DESIG_FIELD_NAME: {
const IdentifierInfo *Name = Reader.GetIdentifierInfo(Record, Idx);
SourceLocation DotLoc
- = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ = ReadSourceLocation(Record, Idx);
SourceLocation FieldLoc
- = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ = ReadSourceLocation(Record, Idx);
Designators.push_back(Designator(Name, DotLoc, FieldLoc));
break;
}
@@ -680,9 +731,9 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
case DESIG_ARRAY: {
unsigned Index = Record[Idx++];
SourceLocation LBracketLoc
- = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ = ReadSourceLocation(Record, Idx);
SourceLocation RBracketLoc
- = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ = ReadSourceLocation(Record, Idx);
Designators.push_back(Designator(Index, LBracketLoc, RBracketLoc));
break;
}
@@ -690,11 +741,11 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
case DESIG_ARRAY_RANGE: {
unsigned Index = Record[Idx++];
SourceLocation LBracketLoc
- = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ = ReadSourceLocation(Record, Idx);
SourceLocation EllipsisLoc
- = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ = ReadSourceLocation(Record, Idx);
SourceLocation RBracketLoc
- = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ = ReadSourceLocation(Record, Idx);
Designators.push_back(Designator(Index, LBracketLoc, EllipsisLoc,
RBracketLoc));
break;
@@ -712,45 +763,37 @@ void ASTStmtReader::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
void ASTStmtReader::VisitVAArgExpr(VAArgExpr *E) {
VisitExpr(E);
E->setSubExpr(Reader.ReadSubExpr());
- E->setWrittenTypeInfo(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
- E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setWrittenTypeInfo(GetTypeSourceInfo(Record, Idx));
+ E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
VisitExpr(E);
- E->setAmpAmpLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setLabelLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- Reader.SetLabelOf(E, Record[Idx++]);
+ E->setAmpAmpLoc(ReadSourceLocation(Record, Idx));
+ E->setLabelLoc(ReadSourceLocation(Record, Idx));
+ E->setLabel(cast<LabelDecl>(Reader.GetDecl(Record[Idx++])));
}
void ASTStmtReader::VisitStmtExpr(StmtExpr *E) {
VisitExpr(E);
- E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
E->setSubStmt(cast_or_null<CompoundStmt>(Reader.ReadSubStmt()));
}
-void ASTStmtReader::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) {
- VisitExpr(E);
- E->setArgTInfo1(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
- E->setArgTInfo2(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
- E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
-}
-
void ASTStmtReader::VisitChooseExpr(ChooseExpr *E) {
VisitExpr(E);
E->setCond(Reader.ReadSubExpr());
E->setLHS(Reader.ReadSubExpr());
E->setRHS(Reader.ReadSubExpr());
- E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitGNUNullExpr(GNUNullExpr *E) {
VisitExpr(E);
- E->setTokenLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setTokenLocation(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
@@ -760,23 +803,21 @@ void ASTStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
while (NumExprs--)
Exprs.push_back(Reader.ReadSubExpr());
E->setExprs(*Reader.getContext(), Exprs.data(), Exprs.size());
- E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitBlockExpr(BlockExpr *E) {
VisitExpr(E);
E->setBlockDecl(cast_or_null<BlockDecl>(Reader.GetDecl(Record[Idx++])));
- E->setHasBlockDeclRefExprs(Record[Idx++]);
}
void ASTStmtReader::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
VisitExpr(E);
- E->setDecl(cast<ValueDecl>(Reader.GetDecl(Record[Idx++])));
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setDecl(cast<VarDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setLocation(ReadSourceLocation(Record, Idx));
E->setByRef(Record[Idx++]);
E->setConstQualAdded(Record[Idx++]);
- E->setCopyConstructorExpr(Reader.ReadSubExpr());
}
//===----------------------------------------------------------------------===//
@@ -785,34 +826,34 @@ void ASTStmtReader::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
void ASTStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) {
VisitExpr(E);
E->setString(cast<StringLiteral>(Reader.ReadSubStmt()));
- E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
VisitExpr(E);
- E->setEncodedTypeSourceInfo(Reader.GetTypeSourceInfo(DeclsCursor,Record,Idx));
- E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setEncodedTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
VisitExpr(E);
E->setSelector(Reader.GetSelector(Record, Idx));
- E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
VisitExpr(E);
E->setProtocol(cast<ObjCProtocolDecl>(Reader.GetDecl(Record[Idx++])));
- E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
VisitExpr(E);
E->setDecl(cast<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++])));
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLocation(ReadSourceLocation(Record, Idx));
E->setBase(Reader.ReadSubExpr());
E->setIsArrow(Record[Idx++]);
E->setIsFreeIvar(Record[Idx++]);
@@ -820,23 +861,31 @@ void ASTStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
void ASTStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
VisitExpr(E);
- E->setProperty(cast<ObjCPropertyDecl>(Reader.GetDecl(Record[Idx++])));
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setBase(Reader.ReadSubExpr());
-}
-
-void ASTStmtReader::VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E) {
- VisitExpr(E);
- E->setGetterMethod(
- cast_or_null<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++])));
- E->setSetterMethod(
- cast_or_null<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++])));
- E->setInterfaceDecl(
- cast_or_null<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
- E->setBase(Reader.ReadSubExpr());
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setClassLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ bool Implicit = Record[Idx++] != 0;
+ if (Implicit) {
+ ObjCMethodDecl *Getter =
+ cast<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++]));
+ ObjCMethodDecl *Setter =
+ cast<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++]));
+ E->setImplicitProperty(Getter, Setter);
+ } else {
+ E->setExplicitProperty(
+ cast<ObjCPropertyDecl>(Reader.GetDecl(Record[Idx++])));
+ }
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setReceiverLocation(ReadSourceLocation(Record, Idx));
+ switch (Record[Idx++]) {
+ case 0:
+ E->setBase(Reader.ReadSubExpr());
+ break;
+ case 1:
+ E->setSuperReceiver(Reader.GetType(Record[Idx++]));
+ break;
+ case 2:
+ E->setClassReceiver(
+ cast<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
+ break;
+ }
}
void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -851,13 +900,13 @@ void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
break;
case ObjCMessageExpr::Class:
- E->setClassReceiver(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
+ E->setClassReceiver(GetTypeSourceInfo(Record, Idx));
break;
case ObjCMessageExpr::SuperClass:
case ObjCMessageExpr::SuperInstance: {
QualType T = Reader.GetType(Record[Idx++]);
- SourceLocation SuperLoc = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation SuperLoc = ReadSourceLocation(Record, Idx);
E->setSuper(SuperLoc, T, Kind == ObjCMessageExpr::SuperInstance);
break;
}
@@ -870,39 +919,35 @@ void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
else
E->setSelector(Reader.GetSelector(Record, Idx));
- E->setLeftLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRightLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->LBracLoc = ReadSourceLocation(Record, Idx);
+ E->RBracLoc = ReadSourceLocation(Record, Idx);
+ E->SelectorLoc = ReadSourceLocation(Record, Idx);
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
E->setArg(I, Reader.ReadSubExpr());
}
-void ASTStmtReader::VisitObjCSuperExpr(ObjCSuperExpr *E) {
- VisitExpr(E);
- E->setLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
-}
-
void ASTStmtReader::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
VisitStmt(S);
S->setElement(Reader.ReadSubStmt());
S->setCollection(Reader.ReadSubExpr());
S->setBody(Reader.ReadSubStmt());
- S->setForLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setForLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
VisitStmt(S);
S->setCatchBody(Reader.ReadSubStmt());
S->setCatchParamDecl(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
- S->setAtCatchLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setAtCatchLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
VisitStmt(S);
S->setFinallyBody(Reader.ReadSubStmt());
- S->setAtFinallyLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setAtFinallyLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
@@ -916,20 +961,20 @@ void ASTStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
if (HasFinally)
S->setFinallyStmt(Reader.ReadSubStmt());
- S->setAtTryLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setAtTryLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
VisitStmt(S);
S->setSynchExpr(Reader.ReadSubStmt());
S->setSynchBody(Reader.ReadSubStmt());
- S->setAtSynchronizedLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setAtSynchronizedLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
VisitStmt(S);
S->setThrowExpr(Reader.ReadSubStmt());
- S->setThrowLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setThrowLoc(ReadSourceLocation(Record, Idx));
}
//===----------------------------------------------------------------------===//
@@ -938,7 +983,7 @@ void ASTStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
void ASTStmtReader::VisitCXXCatchStmt(CXXCatchStmt *S) {
VisitStmt(S);
- S->CatchLoc = Reader.ReadSourceLocation(Record, Idx);
+ S->CatchLoc = ReadSourceLocation(Record, Idx);
S->ExceptionDecl = cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]));
S->HandlerBlock = Reader.ReadSubStmt();
}
@@ -947,7 +992,7 @@ void ASTStmtReader::VisitCXXTryStmt(CXXTryStmt *S) {
VisitStmt(S);
assert(Record[Idx] == S->getNumHandlers() && "NumStmtFields is wrong ?");
++Idx;
- S->TryLoc = Reader.ReadSourceLocation(Record, Idx);
+ S->TryLoc = ReadSourceLocation(Record, Idx);
S->getStmts()[0] = Reader.ReadSubStmt();
for (unsigned i = 0, e = S->getNumHandlers(); i != e; ++i)
S->getStmts()[i + 1] = Reader.ReadSubStmt();
@@ -966,21 +1011,23 @@ void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
E->setArg(I, Reader.ReadSubExpr());
E->setConstructor(cast<CXXConstructorDecl>(Reader.GetDecl(Record[Idx++])));
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLocation(ReadSourceLocation(Record, Idx));
E->setElidable(Record[Idx++]);
E->setRequiresZeroInitialization(Record[Idx++]);
E->setConstructionKind((CXXConstructExpr::ConstructionKind)Record[Idx++]);
+ E->ParenRange = ReadSourceRange(Record, Idx);
}
void ASTStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
VisitCXXConstructExpr(E);
- E->TyBeginLoc = Reader.ReadSourceLocation(Record, Idx);
- E->RParenLoc = Reader.ReadSourceLocation(Record, Idx);
+ E->Type = GetTypeSourceInfo(Record, Idx);
}
void ASTStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
VisitExplicitCastExpr(E);
- E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ SourceRange R = ReadSourceRange(Record, Idx);
+ E->Loc = R.getBegin();
+ E->RParenLoc = R.getEnd();
}
void ASTStmtReader::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
@@ -1001,43 +1048,55 @@ void ASTStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
void ASTStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
VisitExplicitCastExpr(E);
- E->setTypeBeginLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setTypeBeginLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
VisitExpr(E);
E->setValue(Record[Idx++]);
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLocation(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
VisitExpr(E);
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLocation(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
VisitExpr(E);
- E->setSourceRange(Reader.ReadSourceRange(Record, Idx));
+ E->setSourceRange(ReadSourceRange(Record, Idx));
if (E->isTypeOperand()) { // typeid(int)
E->setTypeOperandSourceInfo(
- Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
+ GetTypeSourceInfo(Record, Idx));
return;
}
// typeid(42+2)
E->setExprOperand(Reader.ReadSubExpr());
}
+void ASTStmtReader::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
+ VisitExpr(E);
+ E->setSourceRange(ReadSourceRange(Record, Idx));
+ if (E->isTypeOperand()) { // __uuidof(ComType)
+ E->setTypeOperandSourceInfo(
+ GetTypeSourceInfo(Record, Idx));
+ return;
+ }
+
+ // __uuidof(expr)
+ E->setExprOperand(Reader.ReadSubExpr());
+}
void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
VisitExpr(E);
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLocation(ReadSourceLocation(Record, Idx));
E->setImplicit(Record[Idx++]);
}
void ASTStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
VisitExpr(E);
- E->setThrowLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setThrowLoc(ReadSourceLocation(Record, Idx));
E->setSubExpr(Reader.ReadSubExpr());
}
@@ -1047,7 +1106,7 @@ void ASTStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
assert(Record[Idx] == E->Param.getInt() && "We messed up at creation ?");
++Idx; // HasOtherExprStored and SubExpr was handled during creation.
E->Param.setPointer(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
- E->Loc = Reader.ReadSourceLocation(Record, Idx);
+ E->Loc = ReadSourceLocation(Record, Idx);
}
void ASTStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
@@ -1058,14 +1117,15 @@ void ASTStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
void ASTStmtReader::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
VisitExpr(E);
- E->setTypeBeginLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->TypeInfo = GetTypeSourceInfo(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
}
void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
- E->setGlobalNew(Record[Idx++]);
- E->setHasInitializer(Record[Idx++]);
+ E->GlobalNew = Record[Idx++];
+ E->Initializer = Record[Idx++];
+ E->UsualArrayDeleteWantsSize = Record[Idx++];
bool isArray = Record[Idx++];
unsigned NumPlacementArgs = Record[Idx++];
unsigned NumCtorArgs = Record[Idx++];
@@ -1074,13 +1134,16 @@ void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++])));
E->setConstructor(
cast_or_null<CXXConstructorDecl>(Reader.GetDecl(Record[Idx++])));
+ E->AllocatedTypeInfo = GetTypeSourceInfo(Record, Idx);
SourceRange TypeIdParens;
- TypeIdParens.setBegin(SourceLocation::getFromRawEncoding(Record[Idx++]));
- TypeIdParens.setEnd(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TypeIdParens.setBegin(ReadSourceLocation(Record, Idx));
+ TypeIdParens.setEnd(ReadSourceLocation(Record, Idx));
E->TypeIdParens = TypeIdParens;
- E->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
-
+ E->StartLoc = ReadSourceLocation(Record, Idx);
+ E->EndLoc = ReadSourceLocation(Record, Idx);
+ E->ConstructorLParen = ReadSourceLocation(Record, Idx);
+ E->ConstructorRParen = ReadSourceLocation(Record, Idx);
+
E->AllocateArgsArray(*Reader.getContext(), isArray, NumPlacementArgs,
NumCtorArgs);
@@ -1092,12 +1155,13 @@ void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
void ASTStmtReader::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
VisitExpr(E);
- E->setGlobalDelete(Record[Idx++]);
- E->setArrayForm(Record[Idx++]);
- E->setOperatorDelete(
- cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++])));
- E->setArgument(Reader.ReadSubExpr());
- E->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->GlobalDelete = Record[Idx++];
+ E->ArrayForm = Record[Idx++];
+ E->ArrayFormAsWritten = Record[Idx++];
+ E->UsualArrayDeleteWantsSize = Record[Idx++];
+ E->OperatorDelete = cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++]));
+ E->Argument = Reader.ReadSubExpr();
+ E->Loc = ReadSourceLocation(Record, Idx);
}
void ASTStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
@@ -1105,21 +1169,21 @@ void ASTStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
E->setBase(Reader.ReadSubExpr());
E->setArrow(Record[Idx++]);
- E->setOperatorLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx));
- E->setQualifierRange(Reader.ReadSourceRange(Record, Idx));
- E->setScopeTypeInfo(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
- E->setColonColonLoc(Reader.ReadSourceLocation(Record, Idx));
- E->setTildeLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setQualifierRange(ReadSourceRange(Record, Idx));
+ E->setScopeTypeInfo(GetTypeSourceInfo(Record, Idx));
+ E->setColonColonLoc(ReadSourceLocation(Record, Idx));
+ E->setTildeLoc(ReadSourceLocation(Record, Idx));
IdentifierInfo *II = Reader.GetIdentifierInfo(Record, Idx);
if (II)
- E->setDestroyedType(II, Reader.ReadSourceLocation(Record, Idx));
+ E->setDestroyedType(II, ReadSourceLocation(Record, Idx));
else
- E->setDestroyedType(Reader.GetTypeSourceInfo(DeclsCursor, Record, Idx));
+ E->setDestroyedType(GetTypeSourceInfo(Record, Idx));
}
-void ASTStmtReader::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
VisitExpr(E);
unsigned NumTemps = Record[Idx++];
if (NumTemps) {
@@ -1134,41 +1198,31 @@ void
ASTStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
VisitExpr(E);
- unsigned NumTemplateArgs = Record[Idx++];
- assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() &&
- "Read wrong record during creation ?");
- if (E->hasExplicitTemplateArgs())
+ if (Record[Idx++])
ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
- NumTemplateArgs);
+ Record[Idx++]);
E->setBase(Reader.ReadSubExpr());
E->setBaseType(Reader.GetType(Record[Idx++]));
E->setArrow(Record[Idx++]);
- E->setOperatorLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx));
- E->setQualifierRange(Reader.ReadSourceRange(Record, Idx));
+ E->setQualifierRange(ReadSourceRange(Record, Idx));
E->setFirstQualifierFoundInScope(
cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++])));
- // FIXME: read whole DeclarationNameInfo.
- E->setMember(Reader.ReadDeclarationName(Record, Idx));
- E->setMemberLoc(Reader.ReadSourceLocation(Record, Idx));
+ ReadDeclarationNameInfo(E->MemberNameInfo, Record, Idx);
}
void
ASTStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
VisitExpr(E);
- unsigned NumTemplateArgs = Record[Idx++];
- assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() &&
- "Read wrong record during creation ?");
- if (E->hasExplicitTemplateArgs())
- ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
- NumTemplateArgs);
-
- // FIXME: read whole DeclarationNameInfo.
- E->setDeclName(Reader.ReadDeclarationName(Record, Idx));
- E->setLocation(Reader.ReadSourceLocation(Record, Idx));
- E->setQualifierRange(Reader.ReadSourceRange(Record, Idx));
+ if (Record[Idx++])
+ ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
+ Record[Idx++]);
+
+ ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
+ E->setQualifierRange(ReadSourceRange(Record, Idx));
E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx));
}
@@ -1179,21 +1233,18 @@ ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
++Idx; // NumArgs;
for (unsigned I = 0, N = E->arg_size(); I != N; ++I)
E->setArg(I, Reader.ReadSubExpr());
- E->setTypeBeginLoc(Reader.ReadSourceLocation(Record, Idx));
- E->setTypeAsWritten(Reader.GetType(Record[Idx++]));
- E->setLParenLoc(Reader.ReadSourceLocation(Record, Idx));
- E->setRParenLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->Type = GetTypeSourceInfo(Record, Idx);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
- unsigned NumTemplateArgs = Record[Idx++];
- assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() &&
- "Read wrong record during creation ?");
- if (E->hasExplicitTemplateArgs())
+ // Read the explicit template argument list, if available.
+ if (Record[Idx++])
ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
- NumTemplateArgs);
+ Record[Idx++]);
unsigned NumDecls = Record[Idx++];
UnresolvedSet<8> Decls;
@@ -1204,11 +1255,9 @@ void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
}
E->initializeResults(*Reader.getContext(), Decls.begin(), Decls.end());
- // FIXME: read whole DeclarationNameInfo.
- E->setName(Reader.ReadDeclarationName(Record, Idx));
+ ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx));
- E->setQualifierRange(Reader.ReadSourceRange(Record, Idx));
- E->setNameLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setQualifierRange(ReadSourceRange(Record, Idx));
}
void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
@@ -1217,7 +1266,7 @@ void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
E->setHasUnresolvedUsing(Record[Idx++]);
E->setBase(Reader.ReadSubExpr());
E->setBaseType(Reader.GetType(Record[Idx++]));
- E->setOperatorLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
}
void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
@@ -1230,17 +1279,81 @@ void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
void ASTStmtReader::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
VisitExpr(E);
E->UTT = (UnaryTypeTrait)Record[Idx++];
- SourceRange Range = Reader.ReadSourceRange(Record, Idx);
+ E->Value = (bool)Record[Idx++];
+ SourceRange Range = ReadSourceRange(Record, Idx);
E->Loc = Range.getBegin();
E->RParen = Range.getEnd();
- E->QueriedType = Reader.GetType(Record[Idx++]);
+ E->QueriedType = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
+ VisitExpr(E);
+ E->BTT = (BinaryTypeTrait)Record[Idx++];
+ E->Value = (bool)Record[Idx++];
+ SourceRange Range = ReadSourceRange(Record, Idx);
+ E->Loc = Range.getBegin();
+ E->RParen = Range.getEnd();
+ E->LhsType = GetTypeSourceInfo(Record, Idx);
+ E->RhsType = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ VisitExpr(E);
+ E->Value = (bool)Record[Idx++];
+ E->Range = ReadSourceRange(Record, Idx);
+ E->Operand = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ VisitExpr(E);
+ E->EllipsisLoc = ReadSourceLocation(Record, Idx);
+ E->NumExpansions = Record[Idx++];
+ E->Pattern = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ VisitExpr(E);
+ E->OperatorLoc = ReadSourceLocation(Record, Idx);
+ E->PackLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->Length = Record[Idx++];
+ E->Pack = cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++]));
+}
+
+void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ VisitExpr(E);
+ E->Param
+ = cast_or_null<NonTypeTemplateParmDecl>(Reader.GetDecl(Record[Idx++]));
+ TemplateArgument ArgPack = Reader.ReadTemplateArgument(F, Record, Idx);
+ if (ArgPack.getKind() != TemplateArgument::Pack)
+ return;
+
+ E->Arguments = ArgPack.pack_begin();
+ E->NumArguments = ArgPack.pack_size();
+ E->NameLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ VisitExpr(E);
+ Idx++; // skip ID
+ E->Loc = ReadSourceLocation(Record, Idx);
}
-Stmt *ASTReader::ReadStmt(llvm::BitstreamCursor &Cursor) {
+//===----------------------------------------------------------------------===//
+// CUDA Expressions and Statements
+//===----------------------------------------------------------------------===//
+
+void ASTStmtReader::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
+ VisitCallExpr(E);
+ E->setConfig(cast<CallExpr>(Reader.ReadSubExpr()));
+}
+
+Stmt *ASTReader::ReadStmt(PerFileData &F) {
switch (ReadingKind) {
case Read_Decl:
case Read_Type:
- return ReadStmtFromStream(Cursor);
+ return ReadStmtFromStream(F);
case Read_Stmt:
return ReadSubStmt();
}
@@ -1249,8 +1362,8 @@ Stmt *ASTReader::ReadStmt(llvm::BitstreamCursor &Cursor) {
return 0;
}
-Expr *ASTReader::ReadExpr(llvm::BitstreamCursor &Cursor) {
- return cast_or_null<Expr>(ReadStmt(Cursor));
+Expr *ASTReader::ReadExpr(PerFileData &F) {
+ return cast_or_null<Expr>(ReadStmt(F));
}
Expr *ASTReader::ReadSubExpr() {
@@ -1264,17 +1377,18 @@ Expr *ASTReader::ReadSubExpr() {
// the stack, with expressions having operands removing those operands from the
// stack. Evaluation terminates when we see a STMT_STOP record, and
// the single remaining expression on the stack is our result.
-Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
+Stmt *ASTReader::ReadStmtFromStream(PerFileData &F) {
ReadingKindTracker ReadingKind(Read_Stmt, *this);
-
+ llvm::BitstreamCursor &Cursor = F.DeclsCursor;
+
#ifndef NDEBUG
unsigned PrevNumStmts = StmtStack.size();
#endif
RecordData Record;
unsigned Idx;
- ASTStmtReader Reader(*this, Cursor, Record, Idx);
+ ASTStmtReader Reader(*this, F, Cursor, Record, Idx);
Stmt::EmptyShell Empty;
while (true) {
@@ -1390,7 +1504,10 @@ Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
case EXPR_DECL_REF:
S = DeclRefExpr::CreateEmpty(*Context,
/*HasQualifier=*/Record[ASTStmtReader::NumExprFields],
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 1]);
+ /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 1],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 1]
+ ? Record[ASTStmtReader::NumExprFields + 2]
+ : 0);
break;
case EXPR_INTEGER_LITERAL:
@@ -1454,16 +1571,17 @@ Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
SourceRange QualifierRange;
if (Record[Idx++]) { // HasQualifier.
NNS = ReadNestedNameSpecifier(Record, Idx);
- QualifierRange = ReadSourceRange(Record, Idx);
+ QualifierRange = ReadSourceRange(F, Record, Idx);
}
TemplateArgumentListInfo ArgInfo;
- unsigned NumTemplateArgs = Record[Idx++];
- if (NumTemplateArgs) {
- ArgInfo.setLAngleLoc(ReadSourceLocation(Record, Idx));
- ArgInfo.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ bool HasExplicitTemplateArgs = Record[Idx++];
+ if (HasExplicitTemplateArgs) {
+ unsigned NumTemplateArgs = Record[Idx++];
+ ArgInfo.setLAngleLoc(ReadSourceLocation(F, Record, Idx));
+ ArgInfo.setRAngleLoc(ReadSourceLocation(F, Record, Idx));
for (unsigned i = 0; i != NumTemplateArgs; ++i)
- ArgInfo.addArgument(ReadTemplateArgumentLoc(Cursor, Record, Idx));
+ ArgInfo.addArgument(ReadTemplateArgumentLoc(F, Record, Idx));
}
NamedDecl *FoundD = cast_or_null<NamedDecl>(GetDecl(Record[Idx++]));
@@ -1471,16 +1589,19 @@ Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
DeclAccessPair FoundDecl = DeclAccessPair::make(FoundD, AS);
QualType T = GetType(Record[Idx++]);
+ ExprValueKind VK = static_cast<ExprValueKind>(Record[Idx++]);
+ ExprObjectKind OK = static_cast<ExprObjectKind>(Record[Idx++]);
Expr *Base = ReadSubExpr();
ValueDecl *MemberD = cast<ValueDecl>(GetDecl(Record[Idx++]));
- // FIXME: read DeclarationNameLoc.
- SourceLocation MemberLoc = ReadSourceLocation(Record, Idx);
+ SourceLocation MemberLoc = ReadSourceLocation(F, Record, Idx);
DeclarationNameInfo MemberNameInfo(MemberD->getDeclName(), MemberLoc);
bool IsArrow = Record[Idx++];
S = MemberExpr::Create(*Context, Base, IsArrow, NNS, QualifierRange,
MemberD, FoundDecl, MemberNameInfo,
- NumTemplateArgs ? &ArgInfo : 0, T);
+ HasExplicitTemplateArgs ? &ArgInfo : 0, T, VK, OK);
+ ReadDeclarationNameLoc(F, cast<MemberExpr>(S)->MemberDNLoc,
+ MemberD->getDeclName(), Record, Idx);
break;
}
@@ -1496,6 +1617,10 @@ Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
S = new (Context) ConditionalOperator(Empty);
break;
+ case EXPR_BINARY_CONDITIONAL_OPERATOR:
+ S = new (Context) BinaryConditionalOperator(Empty);
+ break;
+
case EXPR_IMPLICIT_CAST:
S = ImplicitCastExpr::CreateEmpty(*Context,
/*PathSize*/ Record[ASTStmtReader::NumExprFields]);
@@ -1540,10 +1665,6 @@ Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
S = new (Context) StmtExpr(Empty);
break;
- case EXPR_TYPES_COMPATIBLE:
- S = new (Context) TypesCompatibleExpr(Empty);
- break;
-
case EXPR_CHOOSE:
S = new (Context) ChooseExpr(Empty);
break;
@@ -1583,15 +1704,12 @@ Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
S = new (Context) ObjCPropertyRefExpr(Empty);
break;
case EXPR_OBJC_KVC_REF_EXPR:
- S = new (Context) ObjCImplicitSetterGetterRefExpr(Empty);
+ llvm_unreachable("mismatching AST file");
break;
case EXPR_OBJC_MESSAGE_EXPR:
S = ObjCMessageExpr::CreateEmpty(*Context,
Record[ASTStmtReader::NumExprFields]);
break;
- case EXPR_OBJC_SUPER_EXPR:
- S = new (Context) ObjCSuperExpr(Empty);
- break;
case EXPR_OBJC_ISA:
S = new (Context) ObjCIsaExpr(Empty);
break;
@@ -1678,6 +1796,12 @@ Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
case EXPR_CXX_TYPEID_TYPE:
S = new (Context) CXXTypeidExpr(Empty, false);
break;
+ case EXPR_CXX_UUIDOF_EXPR:
+ S = new (Context) CXXUuidofExpr(Empty, true);
+ break;
+ case EXPR_CXX_UUIDOF_TYPE:
+ S = new (Context) CXXUuidofExpr(Empty, false);
+ break;
case EXPR_CXX_THIS:
S = new (Context) CXXThisExpr(Empty);
break;
@@ -1710,18 +1834,24 @@ Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
S = new (Context) CXXPseudoDestructorExpr(Empty);
break;
- case EXPR_CXX_EXPR_WITH_TEMPORARIES:
- S = new (Context) CXXExprWithTemporaries(Empty);
+ case EXPR_EXPR_WITH_CLEANUPS:
+ S = new (Context) ExprWithCleanups(Empty);
break;
case EXPR_CXX_DEPENDENT_SCOPE_MEMBER:
S = CXXDependentScopeMemberExpr::CreateEmpty(*Context,
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]);
+ /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
break;
case EXPR_CXX_DEPENDENT_SCOPE_DECL_REF:
S = DependentScopeDeclRefExpr::CreateEmpty(*Context,
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]);
+ /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
break;
case EXPR_CXX_UNRESOLVED_CONSTRUCT:
@@ -1731,17 +1861,62 @@ Stmt *ASTReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
case EXPR_CXX_UNRESOLVED_MEMBER:
S = UnresolvedMemberExpr::CreateEmpty(*Context,
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]);
+ /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
break;
case EXPR_CXX_UNRESOLVED_LOOKUP:
S = UnresolvedLookupExpr::CreateEmpty(*Context,
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]);
+ /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
break;
case EXPR_CXX_UNARY_TYPE_TRAIT:
S = new (Context) UnaryTypeTraitExpr(Empty);
break;
+
+ case EXPR_BINARY_TYPE_TRAIT:
+ S = new (Context) BinaryTypeTraitExpr(Empty);
+ break;
+
+ case EXPR_CXX_NOEXCEPT:
+ S = new (Context) CXXNoexceptExpr(Empty);
+ break;
+
+ case EXPR_PACK_EXPANSION:
+ S = new (Context) PackExpansionExpr(Empty);
+ break;
+
+ case EXPR_SIZEOF_PACK:
+ S = new (Context) SizeOfPackExpr(Empty);
+ break;
+
+ case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK:
+ S = new (Context) SubstNonTypeTemplateParmPackExpr(Empty);
+ break;
+
+ case EXPR_OPAQUE_VALUE: {
+ unsigned key = Record[ASTStmtReader::NumExprFields];
+ OpaqueValueExpr *&expr = OpaqueValueExprs[key];
+
+ // If we already have an entry for this opaque value expression,
+ // don't bother reading it again.
+ if (expr) {
+ StmtStack.push_back(expr);
+ continue;
+ }
+
+ S = expr = new (Context) OpaqueValueExpr(Empty);
+ break;
+ }
+
+ case EXPR_CUDA_KERNEL_CALL:
+ S = new (Context) CUDAKernelCallExpr(*Context, Empty);
+ break;
}
// We hit a STMT_STOP, so we're done with this expression.
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
index 3b6b218..8fcb535 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Serialization/ASTWriter.h"
+#include "clang/Serialization/ASTSerializationListener.h"
#include "ASTCommon.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/IdentifierResolver.h"
@@ -19,6 +20,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclContextInternals.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclFriend.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Type.h"
@@ -29,6 +31,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
#include "clang/Basic/OnDiskHashTable.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceManagerInternals.h"
@@ -38,9 +41,11 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
#include <cstdio>
+#include <string.h>
using namespace clang;
using namespace clang::serialization;
@@ -60,13 +65,13 @@ const T *data(const std::vector<T, Allocator> &v) {
namespace {
class ASTTypeWriter {
ASTWriter &Writer;
- ASTWriter::RecordData &Record;
+ ASTWriter::RecordDataImpl &Record;
public:
/// \brief Type code that corresponds to the record generated.
TypeCode Code;
- ASTTypeWriter(ASTWriter &Writer, ASTWriter::RecordData &Record)
+ ASTTypeWriter(ASTWriter &Writer, ASTWriter::RecordDataImpl &Record)
: Writer(Writer), Record(Record), Code(TYPE_EXT_QUAL) { }
void VisitArrayType(const ArrayType *T);
@@ -142,7 +147,7 @@ void ASTTypeWriter::VisitVariableArrayType(const VariableArrayType *T) {
void ASTTypeWriter::VisitVectorType(const VectorType *T) {
Writer.AddTypeRef(T->getElementType(), Record);
Record.push_back(T->getNumElements());
- Record.push_back(T->getAltiVecSpecific());
+ Record.push_back(T->getVectorKind());
Code = TYPE_VECTOR;
}
@@ -172,6 +177,7 @@ void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
Writer.AddTypeRef(T->getArgType(I), Record);
Record.push_back(T->isVariadic());
Record.push_back(T->getTypeQuals());
+ Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
Record.push_back(T->hasExceptionSpec());
Record.push_back(T->hasAnyExceptionSpec());
Record.push_back(T->getNumExceptions());
@@ -207,6 +213,11 @@ void ASTTypeWriter::VisitDecltypeType(const DecltypeType *T) {
Code = TYPE_DECLTYPE;
}
+void ASTTypeWriter::VisitAutoType(const AutoType *T) {
+ Writer.AddTypeRef(T->getDeducedType(), Record);
+ Code = TYPE_AUTO;
+}
+
void ASTTypeWriter::VisitTagType(const TagType *T) {
Record.push_back(T->isDependentType());
Writer.AddDeclRef(T->getDecl(), Record);
@@ -224,6 +235,13 @@ void ASTTypeWriter::VisitEnumType(const EnumType *T) {
Code = TYPE_ENUM;
}
+void ASTTypeWriter::VisitAttributedType(const AttributedType *T) {
+ Writer.AddTypeRef(T->getModifiedType(), Record);
+ Writer.AddTypeRef(T->getEquivalentType(), Record);
+ Record.push_back(T->getAttrKind());
+ Code = TYPE_ATTRIBUTED;
+}
+
void
ASTTypeWriter::VisitSubstTemplateTypeParmType(
const SubstTemplateTypeParmType *T) {
@@ -233,6 +251,14 @@ ASTTypeWriter::VisitSubstTemplateTypeParmType(
}
void
+ASTTypeWriter::VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *T) {
+ Writer.AddTypeRef(QualType(T->getReplacedParameter(), 0), Record);
+ Writer.AddTemplateArgument(T->getArgumentPack(), Record);
+ Code = TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK;
+}
+
+void
ASTTypeWriter::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
Record.push_back(T->isDependentType());
@@ -295,6 +321,20 @@ ASTTypeWriter::VisitDependentTemplateSpecializationType(
Code = TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION;
}
+void ASTTypeWriter::VisitPackExpansionType(const PackExpansionType *T) {
+ Writer.AddTypeRef(T->getPattern(), Record);
+ if (llvm::Optional<unsigned> NumExpansions = T->getNumExpansions())
+ Record.push_back(*NumExpansions + 1);
+ else
+ Record.push_back(0);
+ Code = TYPE_PACK_EXPANSION;
+}
+
+void ASTTypeWriter::VisitParenType(const ParenType *T) {
+ Writer.AddTypeRef(T->getInnerType(), Record);
+ Code = TYPE_PAREN;
+}
+
void ASTTypeWriter::VisitElaboratedType(const ElaboratedType *T) {
Record.push_back(T->getKeyword());
Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
@@ -332,10 +372,10 @@ namespace {
class TypeLocWriter : public TypeLocVisitor<TypeLocWriter> {
ASTWriter &Writer;
- ASTWriter::RecordData &Record;
+ ASTWriter::RecordDataImpl &Record;
public:
- TypeLocWriter(ASTWriter &Writer, ASTWriter::RecordData &Record)
+ TypeLocWriter(ASTWriter &Writer, ASTWriter::RecordDataImpl &Record)
: Writer(Writer), Record(Record) { }
#define ABSTRACT_TYPELOC(CLASS, PARENT)
@@ -412,6 +452,7 @@ void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
Writer.AddSourceLocation(TL.getLParenLoc(), Record);
Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+ Record.push_back(TL.getTrailingReturn());
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
Writer.AddDeclRef(TL.getArg(i), Record);
}
@@ -441,12 +482,30 @@ void TypeLocWriter::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
void TypeLocWriter::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
+void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
+void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getAttrNameLoc(), Record);
+ if (TL.hasAttrOperand()) {
+ SourceRange range = TL.getAttrOperandParensRange();
+ Writer.AddSourceLocation(range.getBegin(), Record);
+ Writer.AddSourceLocation(range.getEnd(), Record);
+ }
+ if (TL.hasAttrExprOperand()) {
+ Expr *operand = TL.getAttrExprOperand();
+ Record.push_back(operand ? 1 : 0);
+ if (operand) Writer.AddStmt(operand);
+ } else if (TL.hasAttrEnumOperand()) {
+ Writer.AddSourceLocation(TL.getAttrEnumOperandLoc(), Record);
+ }
+}
void TypeLocWriter::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
@@ -454,6 +513,10 @@ void TypeLocWriter::VisitSubstTemplateTypeParmTypeLoc(
SubstTemplateTypeParmTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
+void TypeLocWriter::VisitSubstTemplateTypeParmPackTypeLoc(
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
Writer.AddSourceLocation(TL.getTemplateNameLoc(), Record);
@@ -463,6 +526,10 @@ void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(),
TL.getArgLoc(i).getLocInfo(), Record);
}
+void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+}
void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
Writer.AddSourceLocation(TL.getKeywordLoc(), Record);
Writer.AddSourceRange(TL.getQualifierRange(), Record);
@@ -486,6 +553,9 @@ void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(),
TL.getArgLoc(I).getLocInfo(), Record);
}
+void TypeLocWriter::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getEllipsisLoc(), Record);
+}
void TypeLocWriter::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
@@ -506,7 +576,7 @@ void TypeLocWriter::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
static void EmitBlockID(unsigned ID, const char *Name,
llvm::BitstreamWriter &Stream,
- ASTWriter::RecordData &Record) {
+ ASTWriter::RecordDataImpl &Record) {
Record.clear();
Record.push_back(ID);
Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETBID, Record);
@@ -521,7 +591,7 @@ static void EmitBlockID(unsigned ID, const char *Name,
static void EmitRecordID(unsigned ID, const char *Name,
llvm::BitstreamWriter &Stream,
- ASTWriter::RecordData &Record) {
+ ASTWriter::RecordDataImpl &Record) {
Record.clear();
Record.push_back(ID);
while (*Name)
@@ -530,7 +600,7 @@ static void EmitRecordID(unsigned ID, const char *Name,
}
static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
- ASTWriter::RecordData &Record) {
+ ASTWriter::RecordDataImpl &Record) {
#define RECORD(X) EmitRecordID(X, #X, Stream, Record)
RECORD(STMT_STOP);
RECORD(STMT_NULL_PTR);
@@ -577,7 +647,6 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_VA_ARG);
RECORD(EXPR_ADDR_LABEL);
RECORD(EXPR_STMT);
- RECORD(EXPR_TYPES_COMPATIBLE);
RECORD(EXPR_CHOOSE);
RECORD(EXPR_GNU_NULL);
RECORD(EXPR_SHUFFLE_VECTOR);
@@ -591,7 +660,6 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_OBJC_PROPERTY_REF_EXPR);
RECORD(EXPR_OBJC_KVC_REF_EXPR);
RECORD(EXPR_OBJC_MESSAGE_EXPR);
- RECORD(EXPR_OBJC_SUPER_EXPR);
RECORD(STMT_OBJC_FOR_COLLECTION);
RECORD(STMT_OBJC_CATCH);
RECORD(STMT_OBJC_FINALLY);
@@ -607,6 +675,32 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_CXX_FUNCTIONAL_CAST);
RECORD(EXPR_CXX_BOOL_LITERAL);
RECORD(EXPR_CXX_NULL_PTR_LITERAL);
+ RECORD(EXPR_CXX_TYPEID_EXPR);
+ RECORD(EXPR_CXX_TYPEID_TYPE);
+ RECORD(EXPR_CXX_UUIDOF_EXPR);
+ RECORD(EXPR_CXX_UUIDOF_TYPE);
+ RECORD(EXPR_CXX_THIS);
+ RECORD(EXPR_CXX_THROW);
+ RECORD(EXPR_CXX_DEFAULT_ARG);
+ RECORD(EXPR_CXX_BIND_TEMPORARY);
+ RECORD(EXPR_CXX_SCALAR_VALUE_INIT);
+ RECORD(EXPR_CXX_NEW);
+ RECORD(EXPR_CXX_DELETE);
+ RECORD(EXPR_CXX_PSEUDO_DESTRUCTOR);
+ RECORD(EXPR_EXPR_WITH_CLEANUPS);
+ RECORD(EXPR_CXX_DEPENDENT_SCOPE_MEMBER);
+ RECORD(EXPR_CXX_DEPENDENT_SCOPE_DECL_REF);
+ RECORD(EXPR_CXX_UNRESOLVED_CONSTRUCT);
+ RECORD(EXPR_CXX_UNRESOLVED_MEMBER);
+ RECORD(EXPR_CXX_UNRESOLVED_LOOKUP);
+ RECORD(EXPR_CXX_UNARY_TYPE_TRAIT);
+ RECORD(EXPR_CXX_NOEXCEPT);
+ RECORD(EXPR_OPAQUE_VALUE);
+ RECORD(EXPR_BINARY_TYPE_TRAIT);
+ RECORD(EXPR_PACK_EXPANSION);
+ RECORD(EXPR_SIZEOF_PACK);
+ RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK);
+ RECORD(EXPR_CUDA_KERNEL_CALL);
#undef RECORD
}
@@ -643,6 +737,21 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(MACRO_DEFINITION_OFFSETS);
RECORD(CHAINED_METADATA);
RECORD(REFERENCED_SELECTOR_POOL);
+ RECORD(TU_UPDATE_LEXICAL);
+ RECORD(REDECLS_UPDATE_LATEST);
+ RECORD(SEMA_DECL_REFS);
+ RECORD(WEAK_UNDECLARED_IDENTIFIERS);
+ RECORD(PENDING_IMPLICIT_INSTANTIATIONS);
+ RECORD(DECL_REPLACEMENTS);
+ RECORD(UPDATE_VISIBLE);
+ RECORD(DECL_UPDATE_OFFSETS);
+ RECORD(DECL_UPDATES);
+ RECORD(CXX_BASE_SPECIFIER_OFFSETS);
+ RECORD(DIAG_PRAGMA_MAPPINGS);
+ RECORD(CUDA_SPECIAL_DECL_REFS);
+ RECORD(HEADER_SEARCH_TABLE);
+ RECORD(FP_PRAGMA_OPTIONS);
+ RECORD(OPENCL_EXTENSIONS);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -657,8 +766,6 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(PP_MACRO_OBJECT_LIKE);
RECORD(PP_MACRO_FUNCTION_LIKE);
RECORD(PP_TOKEN);
- RECORD(PP_MACRO_INSTANTIATION);
- RECORD(PP_MACRO_DEFINITION);
// Decls and Types block.
BLOCK(DECLTYPES_BLOCK);
@@ -684,7 +791,21 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(TYPE_OBJC_INTERFACE);
RECORD(TYPE_OBJC_OBJECT);
RECORD(TYPE_OBJC_OBJECT_POINTER);
- RECORD(DECL_ATTR);
+ RECORD(TYPE_DECLTYPE);
+ RECORD(TYPE_ELABORATED);
+ RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM);
+ RECORD(TYPE_UNRESOLVED_USING);
+ RECORD(TYPE_INJECTED_CLASS_NAME);
+ RECORD(TYPE_OBJC_OBJECT);
+ RECORD(TYPE_TEMPLATE_TYPE_PARM);
+ RECORD(TYPE_TEMPLATE_SPECIALIZATION);
+ RECORD(TYPE_DEPENDENT_NAME);
+ RECORD(TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION);
+ RECORD(TYPE_DEPENDENT_SIZED_ARRAY);
+ RECORD(TYPE_PAREN);
+ RECORD(TYPE_PACK_EXPANSION);
+ RECORD(TYPE_ATTRIBUTED);
+ RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK);
RECORD(DECL_TRANSLATION_UNIT);
RECORD(DECL_TYPEDEF);
RECORD(DECL_ENUM);
@@ -712,6 +833,39 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_BLOCK);
RECORD(DECL_CONTEXT_LEXICAL);
RECORD(DECL_CONTEXT_VISIBLE);
+ RECORD(DECL_NAMESPACE);
+ RECORD(DECL_NAMESPACE_ALIAS);
+ RECORD(DECL_USING);
+ RECORD(DECL_USING_SHADOW);
+ RECORD(DECL_USING_DIRECTIVE);
+ RECORD(DECL_UNRESOLVED_USING_VALUE);
+ RECORD(DECL_UNRESOLVED_USING_TYPENAME);
+ RECORD(DECL_LINKAGE_SPEC);
+ RECORD(DECL_CXX_RECORD);
+ RECORD(DECL_CXX_METHOD);
+ RECORD(DECL_CXX_CONSTRUCTOR);
+ RECORD(DECL_CXX_DESTRUCTOR);
+ RECORD(DECL_CXX_CONVERSION);
+ RECORD(DECL_ACCESS_SPEC);
+ RECORD(DECL_FRIEND);
+ RECORD(DECL_FRIEND_TEMPLATE);
+ RECORD(DECL_CLASS_TEMPLATE);
+ RECORD(DECL_CLASS_TEMPLATE_SPECIALIZATION);
+ RECORD(DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION);
+ RECORD(DECL_FUNCTION_TEMPLATE);
+ RECORD(DECL_TEMPLATE_TYPE_PARM);
+ RECORD(DECL_NON_TYPE_TEMPLATE_PARM);
+ RECORD(DECL_TEMPLATE_TEMPLATE_PARM);
+ RECORD(DECL_STATIC_ASSERT);
+ RECORD(DECL_CXX_BASE_SPECIFIERS);
+ RECORD(DECL_INDIRECTFIELD);
+ RECORD(DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK);
+
+ BLOCK(PREPROCESSOR_DETAIL_BLOCK);
+ RECORD(PPD_MACRO_INSTANTIATION);
+ RECORD(PPD_MACRO_DEFINITION);
+ RECORD(PPD_INCLUSION_DIRECTIVE);
+
// Statements and Exprs can occur in the Decls and Types block.
AddStmtsExprs(Stream, Record);
#undef RECORD
@@ -756,7 +910,8 @@ adjustFilenameForRelocatablePCH(const char *Filename, const char *isysroot) {
}
/// \brief Write the AST metadata (e.g., i686-apple-darwin9).
-void ASTWriter::WriteMetadata(ASTContext &Context, const char *isysroot) {
+void ASTWriter::WriteMetadata(ASTContext &Context, const char *isysroot,
+ const std::string &OutputFile) {
using namespace llvm;
// Metadata
@@ -792,9 +947,9 @@ void ASTWriter::WriteMetadata(ASTContext &Context, const char *isysroot) {
FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
unsigned FileAbbrevCode = Stream.EmitAbbrev(FileAbbrev);
- llvm::sys::Path MainFilePath(MainFile->getName());
+ llvm::SmallString<128> MainFilePath(MainFile->getName());
- MainFilePath.makeAbsolute();
+ llvm::sys::fs::make_absolute(MainFilePath);
const char *MainFileNameStr = MainFilePath.c_str();
MainFileNameStr = adjustFilenameForRelocatablePCH(MainFileNameStr,
@@ -804,6 +959,23 @@ void ASTWriter::WriteMetadata(ASTContext &Context, const char *isysroot) {
Stream.EmitRecordWithBlob(FileAbbrevCode, Record, MainFileNameStr);
}
+ // Original PCH directory
+ if (!OutputFile.empty() && OutputFile != "-") {
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(ORIGINAL_PCH_DIR));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
+
+ llvm::SmallString<128> OutputPath(OutputFile);
+
+ llvm::sys::fs::make_absolute(OutputPath);
+ StringRef origDir = llvm::sys::path::parent_path(OutputPath);
+
+ RecordData Record;
+ Record.push_back(ORIGINAL_PCH_DIR);
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, origDir);
+ }
+
// Repository branch/version information.
BitCodeAbbrev *RepoAbbrev = new BitCodeAbbrev();
RepoAbbrev->Add(BitCodeAbbrevOp(VERSION_CONTROL_BRANCH_REVISION));
@@ -829,6 +1001,8 @@ void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
Record.push_back(LangOpts.HexFloats); // C99 Hexadecimal float constants.
Record.push_back(LangOpts.C99); // C99 Support
Record.push_back(LangOpts.Microsoft); // Microsoft extensions.
+ // LangOpts.MSCVersion is ignored because all it does it set a macro, which is
+ // already saved elsewhere.
Record.push_back(LangOpts.CPlusPlus); // C++ Support
Record.push_back(LangOpts.CPlusPlus0x); // C++0x Support
Record.push_back(LangOpts.CXXOperatorNames); // Treat C++ operator names as keywords.
@@ -839,6 +1013,9 @@ void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
// modern abi enabled.
Record.push_back(LangOpts.ObjCNonFragileABI2); // Objective-C enhanced
// modern abi enabled.
+ Record.push_back(LangOpts.AppleKext); // Apple's kernel extensions ABI
+ Record.push_back(LangOpts.ObjCDefaultSynthProperties); // Objective-C auto-synthesized
+ // properties enabled.
Record.push_back(LangOpts.NoConstantCFStrings); // non cfstring generation enabled..
Record.push_back(LangOpts.PascalStrings); // Allow Pascal strings
@@ -847,7 +1024,9 @@ void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
Record.push_back(LangOpts.AltiVec);
Record.push_back(LangOpts.Exceptions); // Support exception handling.
Record.push_back(LangOpts.SjLjExceptions);
+ Record.push_back(LangOpts.ObjCExceptions);
+ Record.push_back(LangOpts.MSBitfields); // MS-compatible structure layout
Record.push_back(LangOpts.NeXTRuntime); // Use NeXT runtime.
Record.push_back(LangOpts.Freestanding); // Freestanding implementation
Record.push_back(LangOpts.NoBuiltin); // Do not use builtin functions (-fno-builtin)
@@ -879,12 +1058,17 @@ void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
Record.push_back(LangOpts.CharIsSigned); // Whether char is a signed or
// unsigned type
Record.push_back(LangOpts.ShortWChar); // force wchar_t to be unsigned short
+ Record.push_back(LangOpts.ShortEnums); // Should the enum type be equivalent
+ // to the smallest integer type with
+ // enough room.
Record.push_back(LangOpts.getGCMode());
Record.push_back(LangOpts.getVisibilityMode());
Record.push_back(LangOpts.getStackProtectorMode());
Record.push_back(LangOpts.InstantiationDepth);
Record.push_back(LangOpts.OpenCL);
+ Record.push_back(LangOpts.CUDA);
Record.push_back(LangOpts.CatchUndefined);
+ Record.push_back(LangOpts.DefaultFPContract);
Record.push_back(LangOpts.ElideConstructors);
Record.push_back(LangOpts.SpellChecking);
Stream.EmitRecord(LANGUAGE_OPTIONS, Record);
@@ -901,8 +1085,8 @@ public:
typedef const char * key_type;
typedef key_type key_type_ref;
- typedef std::pair<int, struct stat> data_type;
- typedef const data_type& data_type_ref;
+ typedef struct stat data_type;
+ typedef const data_type &data_type_ref;
static unsigned ComputeHash(const char *path) {
return llvm::HashString(path);
@@ -913,9 +1097,7 @@ public:
data_type_ref Data) {
unsigned StrLen = strlen(path);
clang::io::Emit16(Out, StrLen);
- unsigned DataLen = 1; // result value
- if (Data.first == 0)
- DataLen += 4 + 4 + 2 + 8 + 8;
+ unsigned DataLen = 4 + 4 + 2 + 8 + 8;
clang::io::Emit8(Out, DataLen);
return std::make_pair(StrLen + 1, DataLen);
}
@@ -924,21 +1106,16 @@ public:
Out.write(path, KeyLen);
}
- void EmitData(llvm::raw_ostream& Out, key_type_ref,
+ void EmitData(llvm::raw_ostream &Out, key_type_ref,
data_type_ref Data, unsigned DataLen) {
using namespace clang::io;
uint64_t Start = Out.tell(); (void)Start;
- // Result of stat()
- Emit8(Out, Data.first? 1 : 0);
-
- if (Data.first == 0) {
- Emit32(Out, (uint32_t) Data.second.st_ino);
- Emit32(Out, (uint32_t) Data.second.st_dev);
- Emit16(Out, (uint16_t) Data.second.st_mode);
- Emit64(Out, (uint64_t) Data.second.st_mtime);
- Emit64(Out, (uint64_t) Data.second.st_size);
- }
+ Emit32(Out, (uint32_t) Data.st_ino);
+ Emit32(Out, (uint32_t) Data.st_dev);
+ Emit16(Out, (uint16_t) Data.st_mode);
+ Emit64(Out, (uint64_t) Data.st_mtime);
+ Emit64(Out, (uint64_t) Data.st_size);
assert(Out.tell() - Start == DataLen && "Wrong data length");
}
@@ -1002,11 +1179,6 @@ static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
// FileEntry fields.
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time
- // HeaderFileInfo fields.
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isImport
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // DirInfo
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumIncludes
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // ControllingMacro
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
return Stream.EmitAbbrev(Abbrev);
}
@@ -1049,6 +1221,135 @@ static unsigned CreateSLocInstantiationAbbrev(llvm::BitstreamWriter &Stream) {
return Stream.EmitAbbrev(Abbrev);
}
+namespace {
+ // Trait used for the on-disk hash table of header search information.
+ class HeaderFileInfoTrait {
+ ASTWriter &Writer;
+ HeaderSearch &HS;
+
+ public:
+ HeaderFileInfoTrait(ASTWriter &Writer, HeaderSearch &HS)
+ : Writer(Writer), HS(HS) { }
+
+ typedef const char *key_type;
+ typedef key_type key_type_ref;
+
+ typedef HeaderFileInfo data_type;
+ typedef const data_type &data_type_ref;
+
+ static unsigned ComputeHash(const char *path) {
+ // The hash is based only on the filename portion of the key, so that the
+ // reader can match based on filenames when symlinking or excess path
+ // elements ("foo/../", "../") change the form of the name. However,
+ // complete path is still the key.
+ return llvm::HashString(llvm::sys::path::filename(path));
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(llvm::raw_ostream& Out, const char *path,
+ data_type_ref Data) {
+ unsigned StrLen = strlen(path);
+ clang::io::Emit16(Out, StrLen);
+ unsigned DataLen = 1 + 2 + 4;
+ clang::io::Emit8(Out, DataLen);
+ return std::make_pair(StrLen + 1, DataLen);
+ }
+
+ void EmitKey(llvm::raw_ostream& Out, const char *path, unsigned KeyLen) {
+ Out.write(path, KeyLen);
+ }
+
+ void EmitData(llvm::raw_ostream &Out, key_type_ref,
+ data_type_ref Data, unsigned DataLen) {
+ using namespace clang::io;
+ uint64_t Start = Out.tell(); (void)Start;
+
+ unsigned char Flags = (Data.isImport << 3)
+ | (Data.DirInfo << 1)
+ | Data.Resolved;
+ Emit8(Out, (uint8_t)Flags);
+ Emit16(Out, (uint16_t) Data.NumIncludes);
+
+ if (!Data.ControllingMacro)
+ Emit32(Out, (uint32_t)Data.ControllingMacroID);
+ else
+ Emit32(Out, (uint32_t)Writer.getIdentifierRef(Data.ControllingMacro));
+ assert(Out.tell() - Start == DataLen && "Wrong data length");
+ }
+ };
+} // end anonymous namespace
+
+/// \brief Write the header search block for the list of files that
+///
+/// \param HS The header search structure to save.
+///
+/// \param Chain Whether we're creating a chained AST file.
+void ASTWriter::WriteHeaderSearch(HeaderSearch &HS, const char* isysroot) {
+ llvm::SmallVector<const FileEntry *, 16> FilesByUID;
+ HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
+
+ if (FilesByUID.size() > HS.header_file_size())
+ FilesByUID.resize(HS.header_file_size());
+
+ HeaderFileInfoTrait GeneratorTrait(*this, HS);
+ OnDiskChainedHashTableGenerator<HeaderFileInfoTrait> Generator;
+ llvm::SmallVector<const char *, 4> SavedStrings;
+ unsigned NumHeaderSearchEntries = 0;
+ for (unsigned UID = 0, LastUID = FilesByUID.size(); UID != LastUID; ++UID) {
+ const FileEntry *File = FilesByUID[UID];
+ if (!File)
+ continue;
+
+ const HeaderFileInfo &HFI = HS.header_file_begin()[UID];
+ if (HFI.External && Chain)
+ continue;
+
+ // Turn the file name into an absolute path, if it isn't already.
+ const char *Filename = File->getName();
+ Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
+
+ // If we performed any translation on the file name at all, we need to
+ // save this string, since the generator will refer to it later.
+ if (Filename != File->getName()) {
+ Filename = strdup(Filename);
+ SavedStrings.push_back(Filename);
+ }
+
+ Generator.insert(Filename, HFI, GeneratorTrait);
+ ++NumHeaderSearchEntries;
+ }
+
+ // Create the on-disk hash table in a buffer.
+ llvm::SmallString<4096> TableData;
+ uint32_t BucketOffset;
+ {
+ llvm::raw_svector_ostream Out(TableData);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out, GeneratorTrait);
+ }
+
+ // Create a blob abbreviation
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(HEADER_SEARCH_TABLE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned TableAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the stat cache
+ RecordData Record;
+ Record.push_back(HEADER_SEARCH_TABLE);
+ Record.push_back(BucketOffset);
+ Record.push_back(NumHeaderSearchEntries);
+ Stream.EmitRecordWithBlob(TableAbbrev, Record, TableData.str());
+
+ // Free all of the strings we had to duplicate.
+ for (unsigned I = 0, N = SavedStrings.size(); I != N; ++I)
+ free((void*)SavedStrings[I]);
+}
+
/// \brief Writes the block containing the serialized form of the
/// source manager.
///
@@ -1150,29 +1451,14 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Record.push_back(Content->Entry->getSize());
Record.push_back(Content->Entry->getModificationTime());
- // Emit header-search information associated with this file.
- HeaderFileInfo HFI;
- HeaderSearch &HS = PP.getHeaderSearchInfo();
- if (Content->Entry->getUID() < HS.header_file_size())
- HFI = HS.header_file_begin()[Content->Entry->getUID()];
- Record.push_back(HFI.isImport);
- Record.push_back(HFI.DirInfo);
- Record.push_back(HFI.NumIncludes);
- AddIdentifierRef(HFI.ControllingMacro, Record);
-
// Turn the file name into an absolute path, if it isn't already.
const char *Filename = Content->Entry->getName();
- llvm::sys::Path FilePath(Filename, strlen(Filename));
- FilePath.makeAbsolute();
+ llvm::SmallString<128> FilePath(Filename);
+ llvm::sys::fs::make_absolute(FilePath);
Filename = FilePath.c_str();
Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
Stream.EmitRecordWithBlob(SLocFileAbbrv, Record, Filename);
-
- // FIXME: For now, preload all file source locations, so that
- // we get the appropriate File entries in the reader. This is
- // a temporary measure.
- PreloadSLocs.push_back(BaseSLocID + SLocEntryOffsets.size());
} else {
// The source location entry is a buffer. The blob associated
// with this entry contains the contents of the buffer.
@@ -1228,7 +1514,8 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Record.clear();
Record.push_back(SOURCE_LOCATION_OFFSETS);
Record.push_back(SLocEntryOffsets.size());
- Record.push_back(SourceMgr.getNextOffset());
+ unsigned BaseOffset = Chain ? Chain->getNextSLocOffset() : 0;
+ Record.push_back(SourceMgr.getNextOffset() - BaseOffset);
Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record,
(const char *)data(SLocEntryOffsets),
SLocEntryOffsets.size()*sizeof(SLocEntryOffsets[0]));
@@ -1242,6 +1529,14 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Preprocessor Serialization
//===----------------------------------------------------------------------===//
+static int compareMacroDefinitions(const void *XPtr, const void *YPtr) {
+ const std::pair<const IdentifierInfo *, MacroInfo *> &X =
+ *(const std::pair<const IdentifierInfo *, MacroInfo *>*)XPtr;
+ const std::pair<const IdentifierInfo *, MacroInfo *> &Y =
+ *(const std::pair<const IdentifierInfo *, MacroInfo *>*)YPtr;
+ return X.first->getName().compare(Y.first->getName());
+}
+
/// \brief Writes the block containing the serialized form of the
/// preprocessor.
///
@@ -1256,30 +1551,63 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP) {
}
// Enter the preprocessor block.
- Stream.EnterSubblock(PREPROCESSOR_BLOCK_ID, 2);
+ Stream.EnterSubblock(PREPROCESSOR_BLOCK_ID, 3);
// If the AST file contains __DATE__ or __TIME__ emit a warning about this.
// FIXME: use diagnostics subsystem for localization etc.
if (PP.SawDateOrTime())
fprintf(stderr, "warning: precompiled header used __DATE__ or __TIME__.\n");
+
// Loop over all the macro definitions that are live at the end of the file,
// emitting each to the PP section.
PreprocessingRecord *PPRec = PP.getPreprocessingRecord();
- for (Preprocessor::macro_iterator I = PP.macro_begin(), E = PP.macro_end();
- I != E; ++I) {
- // FIXME: This emits macros in hash table order, we should do it in a stable
- // order so that output is reproducible.
- MacroInfo *MI = I->second;
+ // Construct the list of macro definitions that need to be serialized.
+ llvm::SmallVector<std::pair<const IdentifierInfo *, MacroInfo *>, 2>
+ MacrosToEmit;
+ llvm::SmallPtrSet<const IdentifierInfo*, 4> MacroDefinitionsSeen;
+ for (Preprocessor::macro_iterator I = PP.macro_begin(Chain == 0),
+ E = PP.macro_end(Chain == 0);
+ I != E; ++I) {
+ MacroDefinitionsSeen.insert(I->first);
+ MacrosToEmit.push_back(std::make_pair(I->first, I->second));
+ }
+
+ // Sort the set of macro definitions that need to be serialized by the
+ // name of the macro, to provide a stable ordering.
+ llvm::array_pod_sort(MacrosToEmit.begin(), MacrosToEmit.end(),
+ &compareMacroDefinitions);
+
+ // Resolve any identifiers that defined macros at the time they were
+ // deserialized, adding them to the list of macros to emit (if appropriate).
+ for (unsigned I = 0, N = DeserializedMacroNames.size(); I != N; ++I) {
+ IdentifierInfo *Name
+ = const_cast<IdentifierInfo *>(DeserializedMacroNames[I]);
+ if (Name->hasMacroDefinition() && MacroDefinitionsSeen.insert(Name))
+ MacrosToEmit.push_back(std::make_pair(Name, PP.getMacroInfo(Name)));
+ }
+
+ for (unsigned I = 0, N = MacrosToEmit.size(); I != N; ++I) {
+ const IdentifierInfo *Name = MacrosToEmit[I].first;
+ MacroInfo *MI = MacrosToEmit[I].second;
+ if (!MI)
+ continue;
+
// Don't emit builtin macros like __LINE__ to the AST file unless they have
// been redefined by the header (in which case they are not isBuiltinMacro).
// Also skip macros from a AST file if we're chaining.
- if (MI->isBuiltinMacro() || (Chain && MI->isFromAST()))
+
+ // FIXME: There is a (probably minor) optimization we could do here, if
+ // the macro comes from the original PCH but the identifier comes from a
+ // chained PCH, by storing the offset into the original PCH rather than
+ // writing the macro definition a second time.
+ if (MI->isBuiltinMacro() ||
+ (Chain && Name->isFromAST() && MI->isFromAST()))
continue;
- AddIdentifierRef(I->first, Record);
- MacroOffsets[I->first] = Stream.GetCurrentBitNo();
+ AddIdentifierRef(Name, Record);
+ MacroOffsets[Name] = Stream.GetCurrentBitNo();
Record.push_back(MI->getDefinitionLoc().getRawEncoding());
Record.push_back(MI->isUsed());
@@ -1296,12 +1624,12 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP) {
I != E; ++I)
AddIdentifierRef(*I, Record);
}
-
+
// If we have a detailed preprocessing record, record the macro definition
// ID that corresponds to this macro.
if (PPRec)
Record.push_back(getMacroDefinitionID(PPRec->findMacroDefinition(MI)));
-
+
Stream.EmitRecord(Code, Record);
Record.clear();
@@ -1318,7 +1646,6 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP) {
// FIXME: When reading literal tokens, reconstruct the literal pointer if
// it is needed.
AddIdentifierRef(Tok.getIdentifierInfo(), Record);
-
// FIXME: Should translate token kind to a stable encoding.
Record.push_back(Tok.getKind());
// FIXME: Should translate token flags to a stable encoding.
@@ -1329,49 +1656,111 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP) {
}
++NumMacros;
}
+ Stream.ExitBlock();
+
+ if (PPRec)
+ WritePreprocessorDetail(*PPRec);
+}
+
+void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
+ if (PPRec.begin(Chain) == PPRec.end(Chain))
+ return;
+ // Enter the preprocessor block.
+ Stream.EnterSubblock(PREPROCESSOR_DETAIL_BLOCK_ID, 3);
+
// If the preprocessor has a preprocessing record, emit it.
unsigned NumPreprocessingRecords = 0;
- if (PPRec) {
- for (PreprocessingRecord::iterator E = PPRec->begin(), EEnd = PPRec->end();
- E != EEnd; ++E) {
- Record.clear();
+ using namespace llvm;
+
+ // Set up the abbreviation for
+ unsigned InclusionAbbrev = 0;
+ {
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(PPD_INCLUSION_DIRECTIVE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // index
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // start location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // end location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // filename length
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // in quotes
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // kind
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ InclusionAbbrev = Stream.EmitAbbrev(Abbrev);
+ }
+
+ unsigned IndexBase = Chain ? PPRec.getNumPreallocatedEntities() : 0;
+ RecordData Record;
+ for (PreprocessingRecord::iterator E = PPRec.begin(Chain),
+ EEnd = PPRec.end(Chain);
+ E != EEnd; ++E) {
+ Record.clear();
+
+ if (MacroDefinition *MD = dyn_cast<MacroDefinition>(*E)) {
+ // Record this macro definition's location.
+ MacroID ID = getMacroDefinitionID(MD);
- if (MacroInstantiation *MI = dyn_cast<MacroInstantiation>(*E)) {
- Record.push_back(NumPreprocessingRecords++);
- AddSourceLocation(MI->getSourceRange().getBegin(), Record);
- AddSourceLocation(MI->getSourceRange().getEnd(), Record);
- AddIdentifierRef(MI->getName(), Record);
- Record.push_back(getMacroDefinitionID(MI->getDefinition()));
- Stream.EmitRecord(PP_MACRO_INSTANTIATION, Record);
+ // Don't write the macro definition if it is from another AST file.
+ if (ID < FirstMacroID)
continue;
- }
- if (MacroDefinition *MD = dyn_cast<MacroDefinition>(*E)) {
- // Record this macro definition's location.
- IdentID ID = getMacroDefinitionID(MD);
- if (ID != MacroDefinitionOffsets.size()) {
- if (ID > MacroDefinitionOffsets.size())
- MacroDefinitionOffsets.resize(ID + 1);
-
- MacroDefinitionOffsets[ID] = Stream.GetCurrentBitNo();
- } else
- MacroDefinitionOffsets.push_back(Stream.GetCurrentBitNo());
+ // Notify the serialization listener that we're serializing this entity.
+ if (SerializationListener)
+ SerializationListener->SerializedPreprocessedEntity(*E,
+ Stream.GetCurrentBitNo());
+
+ unsigned Position = ID - FirstMacroID;
+ if (Position != MacroDefinitionOffsets.size()) {
+ if (Position > MacroDefinitionOffsets.size())
+ MacroDefinitionOffsets.resize(Position + 1);
- Record.push_back(NumPreprocessingRecords++);
- Record.push_back(ID);
- AddSourceLocation(MD->getSourceRange().getBegin(), Record);
- AddSourceLocation(MD->getSourceRange().getEnd(), Record);
- AddIdentifierRef(MD->getName(), Record);
- AddSourceLocation(MD->getLocation(), Record);
- Stream.EmitRecord(PP_MACRO_DEFINITION, Record);
- continue;
- }
+ MacroDefinitionOffsets[Position] = Stream.GetCurrentBitNo();
+ } else
+ MacroDefinitionOffsets.push_back(Stream.GetCurrentBitNo());
+
+ Record.push_back(IndexBase + NumPreprocessingRecords++);
+ Record.push_back(ID);
+ AddSourceLocation(MD->getSourceRange().getBegin(), Record);
+ AddSourceLocation(MD->getSourceRange().getEnd(), Record);
+ AddIdentifierRef(MD->getName(), Record);
+ AddSourceLocation(MD->getLocation(), Record);
+ Stream.EmitRecord(PPD_MACRO_DEFINITION, Record);
+ continue;
+ }
+
+ // Notify the serialization listener that we're serializing this entity.
+ if (SerializationListener)
+ SerializationListener->SerializedPreprocessedEntity(*E,
+ Stream.GetCurrentBitNo());
+
+ if (MacroInstantiation *MI = dyn_cast<MacroInstantiation>(*E)) {
+ Record.push_back(IndexBase + NumPreprocessingRecords++);
+ AddSourceLocation(MI->getSourceRange().getBegin(), Record);
+ AddSourceLocation(MI->getSourceRange().getEnd(), Record);
+ AddIdentifierRef(MI->getName(), Record);
+ Record.push_back(getMacroDefinitionID(MI->getDefinition()));
+ Stream.EmitRecord(PPD_MACRO_INSTANTIATION, Record);
+ continue;
}
+
+ if (InclusionDirective *ID = dyn_cast<InclusionDirective>(*E)) {
+ Record.push_back(PPD_INCLUSION_DIRECTIVE);
+ Record.push_back(IndexBase + NumPreprocessingRecords++);
+ AddSourceLocation(ID->getSourceRange().getBegin(), Record);
+ AddSourceLocation(ID->getSourceRange().getEnd(), Record);
+ Record.push_back(ID->getFileName().size());
+ Record.push_back(ID->wasInQuotes());
+ Record.push_back(static_cast<unsigned>(ID->getKind()));
+ llvm::SmallString<64> Buffer;
+ Buffer += ID->getFileName();
+ Buffer += ID->getFile()->getName();
+ Stream.EmitRecordWithBlob(InclusionAbbrev, Record, Buffer);
+ continue;
+ }
+
+ llvm_unreachable("Unhandled PreprocessedEntity in ASTWriter");
}
-
Stream.ExitBlock();
-
+
// Write the offsets table for the preprocessing record.
if (NumPreprocessingRecords > 0) {
// Write the offsets table for identifier IDs.
@@ -1382,7 +1771,7 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of macro defs
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned MacroDefOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
-
+
Record.clear();
Record.push_back(MACRO_DEFINITION_OFFSETS);
Record.push_back(NumPreprocessingRecords);
@@ -1393,6 +1782,32 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP) {
}
}
+void ASTWriter::WritePragmaDiagnosticMappings(const Diagnostic &Diag) {
+ RecordData Record;
+ for (Diagnostic::DiagStatePointsTy::const_iterator
+ I = Diag.DiagStatePoints.begin(), E = Diag.DiagStatePoints.end();
+ I != E; ++I) {
+ const Diagnostic::DiagStatePoint &point = *I;
+ if (point.Loc.isInvalid())
+ continue;
+
+ Record.push_back(point.Loc.getRawEncoding());
+ for (Diagnostic::DiagState::iterator
+ I = point.State->begin(), E = point.State->end(); I != E; ++I) {
+ unsigned diag = I->first, map = I->second;
+ if (map & 0x10) { // mapping from a diagnostic pragma.
+ Record.push_back(diag);
+ Record.push_back(map & 0x7);
+ }
+ }
+ Record.push_back(-1); // mark the end of the diag/map pairs for this
+ // location.
+ }
+
+ if (!Record.empty())
+ Stream.EmitRecord(DIAG_PRAGMA_MAPPINGS, Record);
+}
+
//===----------------------------------------------------------------------===//
// Type Serialization
//===----------------------------------------------------------------------===//
@@ -1403,6 +1818,8 @@ void ASTWriter::WriteType(QualType T) {
if (Idx.getIndex() == 0) // we haven't seen this type before.
Idx = TypeIdx(NextTypeID++);
+ assert(Idx.getIndex() >= FirstTypeID && "Re-writing a type from a prior AST");
+
// Record the offset for this type.
unsigned Index = Idx.getIndex() - FirstTypeID;
if (TypeOffsets.size() == Index)
@@ -1457,14 +1874,15 @@ uint64_t ASTWriter::WriteDeclContextLexicalBlock(ASTContext &Context,
uint64_t Offset = Stream.GetCurrentBitNo();
RecordData Record;
Record.push_back(DECL_CONTEXT_LEXICAL);
- llvm::SmallVector<DeclID, 64> Decls;
+ llvm::SmallVector<KindDeclIDPair, 64> Decls;
for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end();
D != DEnd; ++D)
- Decls.push_back(GetDeclRef(*D));
+ Decls.push_back(std::make_pair((*D)->getKind(), GetDeclRef(*D)));
++NumLexicalDeclContexts;
Stream.EmitRecordWithBlob(DeclContextLexicalAbbrev, Record,
- reinterpret_cast<char*>(Decls.data()), Decls.size() * sizeof(DeclID));
+ reinterpret_cast<char*>(Decls.data()),
+ Decls.size() * sizeof(KindDeclIDPair));
return Offset;
}
@@ -1674,7 +2092,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
// Create a blob abbreviation for the selector table offsets.
Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SELECTOR_OFFSETS));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // index
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned SelectorOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
@@ -2088,13 +2506,6 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
/// DeclContext in a dependent AST file. As such, they only exist for the TU
/// (in C++) and for namespaces.
void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
- assert((DC->isTranslationUnit() || DC->isNamespace()) &&
- "Only TU and namespaces should have visible decl updates.");
-
- // Make the context build its lookup table, but don't make it load external
- // decls.
- DC->lookup(DeclarationName());
-
StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(DC->getLookupPtr());
if (!Map || Map->empty())
return;
@@ -2130,19 +2541,23 @@ void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
Stream.EmitRecordWithBlob(UpdateVisibleAbbrev, Record, LookupTable.str());
}
-/// \brief Write ADDITIONAL_TEMPLATE_SPECIALIZATIONS blocks for all templates
-/// that have new specializations in the current AST file.
-void ASTWriter::WriteAdditionalTemplateSpecializations() {
+/// \brief Write an FP_PRAGMA_OPTIONS block for the given FPOptions.
+void ASTWriter::WriteFPPragmaOptions(const FPOptions &Opts) {
RecordData Record;
- for (AdditionalTemplateSpecializationsMap::iterator
- I = AdditionalTemplateSpecializations.begin(),
- E = AdditionalTemplateSpecializations.end();
- I != E; ++I) {
- Record.clear();
- Record.push_back(I->first);
- Record.insert(Record.end(), I->second.begin(), I->second.end());
- Stream.EmitRecord(ADDITIONAL_TEMPLATE_SPECIALIZATIONS, Record);
- }
+ Record.push_back(Opts.fp_contract);
+ Stream.EmitRecord(FP_PRAGMA_OPTIONS, Record);
+}
+
+/// \brief Write an OPENCL_EXTENSIONS block for the given OpenCLOptions.
+void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
+ if (!SemaRef.Context.getLangOptions().OpenCL)
+ return;
+
+ const OpenCLOptions &Opts = SemaRef.getOpenCLOptions();
+ RecordData Record;
+#define OPENCLEXT(nm) Record.push_back(Opts.nm);
+#include "clang/Basic/OpenCLExtensions.def"
+ Stream.EmitRecord(OPENCL_EXTENSIONS, Record);
}
//===----------------------------------------------------------------------===//
@@ -2150,22 +2565,19 @@ void ASTWriter::WriteAdditionalTemplateSpecializations() {
//===----------------------------------------------------------------------===//
/// \brief Write a record containing the given attributes.
-void ASTWriter::WriteAttributeRecord(const AttrVec &Attrs) {
- RecordData Record;
+void ASTWriter::WriteAttributes(const AttrVec &Attrs, RecordDataImpl &Record) {
+ Record.push_back(Attrs.size());
for (AttrVec::const_iterator i = Attrs.begin(), e = Attrs.end(); i != e; ++i){
const Attr * A = *i;
Record.push_back(A->getKind()); // FIXME: stable encoding, target attrs
AddSourceLocation(A->getLocation(), Record);
- Record.push_back(A->isInherited());
#include "clang/Serialization/AttrPCHWrite.inc"
}
-
- Stream.EmitRecord(DECL_ATTR, Record);
}
-void ASTWriter::AddString(llvm::StringRef Str, RecordData &Record) {
+void ASTWriter::AddString(llvm::StringRef Str, RecordDataImpl &Record) {
Record.push_back(Str.size());
Record.insert(Record.end(), Str.begin(), Str.end());
}
@@ -2193,15 +2605,20 @@ void ASTWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
}
ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream)
- : Stream(Stream), Chain(0), FirstDeclID(1), NextDeclID(FirstDeclID),
+ : Stream(Stream), Chain(0), SerializationListener(0),
+ FirstDeclID(1), NextDeclID(FirstDeclID),
FirstTypeID(NUM_PREDEF_TYPE_IDS), NextTypeID(FirstTypeID),
FirstIdentID(1), NextIdentID(FirstIdentID), FirstSelectorID(1),
- NextSelectorID(FirstSelectorID), CollectedStmts(&StmtsToEmit),
+ NextSelectorID(FirstSelectorID), FirstMacroID(1), NextMacroID(FirstMacroID),
+ CollectedStmts(&StmtsToEmit),
NumStatements(0), NumMacros(0), NumLexicalDeclContexts(0),
- NumVisibleDeclContexts(0) {
+ NumVisibleDeclContexts(0), FirstCXXBaseSpecifiersID(1),
+ NextCXXBaseSpecifiersID(1)
+{
}
void ASTWriter::WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ const std::string &OutputFile,
const char *isysroot) {
// Emit the file header.
Stream.Emit((unsigned)'C', 8);
@@ -2214,11 +2631,12 @@ void ASTWriter::WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
if (Chain)
WriteASTChain(SemaRef, StatCalls, isysroot);
else
- WriteASTCore(SemaRef, StatCalls, isysroot);
+ WriteASTCore(SemaRef, StatCalls, isysroot, OutputFile);
}
void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
- const char *isysroot) {
+ const char *isysroot,
+ const std::string &OutputFile) {
using namespace llvm;
ASTContext &Context = SemaRef.Context;
@@ -2318,10 +2736,15 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
AddDeclRef(SemaRef.getStdBadAlloc(), SemaDeclRefs);
}
+ RecordData CUDASpecialDeclRefs;
+ if (Context.getcudaConfigureCallDecl()) {
+ AddDeclRef(Context.getcudaConfigureCallDecl(), CUDASpecialDeclRefs);
+ }
+
// Write the remaining AST contents.
RecordData Record;
Stream.EnterSubblock(AST_BLOCK_ID, 5);
- WriteMetadata(Context, isysroot);
+ WriteMetadata(Context, isysroot, OutputFile);
WriteLanguageOptions(Context.getLangOptions());
if (StatCalls && !isysroot)
WriteStatCache(*StatCalls);
@@ -2363,12 +2786,36 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
Stream.ExitBlock();
WritePreprocessor(PP);
+ WriteHeaderSearch(PP.getHeaderSearchInfo(), isysroot);
WriteSelectors(SemaRef);
WriteReferencedSelectorsPool(SemaRef);
WriteIdentifierTable(PP);
+ WriteFPPragmaOptions(SemaRef.getFPOptions());
+ WriteOpenCLExtensions(SemaRef);
WriteTypeDeclOffsets();
+ WritePragmaDiagnosticMappings(Context.getDiagnostics());
+ // Write the C++ base-specifier set offsets.
+ if (!CXXBaseSpecifiersOffsets.empty()) {
+ // Create a blob abbreviation for the C++ base specifiers offsets.
+ using namespace llvm;
+
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(CXX_BASE_SPECIFIER_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned BaseSpecifierOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the selector offsets table.
+ Record.clear();
+ Record.push_back(CXX_BASE_SPECIFIER_OFFSETS);
+ Record.push_back(CXXBaseSpecifiersOffsets.size());
+ Stream.EmitRecordWithBlob(BaseSpecifierOffsetAbbrev, Record,
+ (const char *)CXXBaseSpecifiersOffsets.data(),
+ CXXBaseSpecifiersOffsets.size() * sizeof(uint32_t));
+ }
+
// Write the record containing external, unnamed definitions.
if (!ExternalDefinitions.empty())
Stream.EmitRecord(EXTERNAL_DEFINITIONS, ExternalDefinitions);
@@ -2411,6 +2858,10 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
if (!SemaDeclRefs.empty())
Stream.EmitRecord(SEMA_DECL_REFS, SemaDeclRefs);
+ // Write the record containing CUDA-specific declaration references.
+ if (!CUDASpecialDeclRefs.empty())
+ Stream.EmitRecord(CUDA_SPECIAL_DECL_REFS, CUDASpecialDeclRefs);
+
// Some simple statistics
Record.clear();
Record.push_back(NumStatements);
@@ -2425,21 +2876,12 @@ void ASTWriter::WriteASTChain(Sema &SemaRef, MemorizeStatCalls *StatCalls,
const char *isysroot) {
using namespace llvm;
- FirstDeclID += Chain->getTotalNumDecls();
- FirstTypeID += Chain->getTotalNumTypes();
- FirstIdentID += Chain->getTotalNumIdentifiers();
- FirstSelectorID += Chain->getTotalNumSelectors();
- NextDeclID = FirstDeclID;
- NextTypeID = FirstTypeID;
- NextIdentID = FirstIdentID;
- NextSelectorID = FirstSelectorID;
-
ASTContext &Context = SemaRef.Context;
Preprocessor &PP = SemaRef.PP;
RecordData Record;
Stream.EnterSubblock(AST_BLOCK_ID, 5);
- WriteMetadata(Context, isysroot);
+ WriteMetadata(Context, isysroot, "");
if (StatCalls && !isysroot)
WriteStatCache(*StatCalls);
// FIXME: Source manager block should only write new stuff, which could be
@@ -2451,12 +2893,12 @@ void ASTWriter::WriteASTChain(Sema &SemaRef, MemorizeStatCalls *StatCalls,
// We don't start with the translation unit, but with its decls that
// don't come from the chained PCH.
const TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
- llvm::SmallVector<DeclID, 64> NewGlobalDecls;
+ llvm::SmallVector<KindDeclIDPair, 64> NewGlobalDecls;
for (DeclContext::decl_iterator I = TU->noload_decls_begin(),
E = TU->noload_decls_end();
I != E; ++I) {
if ((*I)->getPCHLevel() == 0)
- NewGlobalDecls.push_back(GetDeclRef(*I));
+ NewGlobalDecls.push_back(std::make_pair((*I)->getKind(), GetDeclRef(*I)));
else if ((*I)->isChangedSinceDeserialization())
(void)GetDeclRef(*I); // Make sure it's written, but don't record it.
}
@@ -2469,17 +2911,15 @@ void ASTWriter::WriteASTChain(Sema &SemaRef, MemorizeStatCalls *StatCalls,
Record.push_back(TU_UPDATE_LEXICAL);
Stream.EmitRecordWithBlob(TuUpdateLexicalAbbrev, Record,
reinterpret_cast<const char*>(NewGlobalDecls.data()),
- NewGlobalDecls.size() * sizeof(DeclID));
- // And in C++, a visible updates block for the TU.
- if (Context.getLangOptions().CPlusPlus) {
- Abv = new llvm::BitCodeAbbrev();
- Abv->Add(llvm::BitCodeAbbrevOp(UPDATE_VISIBLE));
- Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
- Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Fixed, 32));
- Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
- UpdateVisibleAbbrev = Stream.EmitAbbrev(Abv);
- WriteDeclContextVisibleUpdate(TU);
- }
+ NewGlobalDecls.size() * sizeof(KindDeclIDPair));
+ // And a visible updates block for the DeclContexts.
+ Abv = new llvm::BitCodeAbbrev();
+ Abv->Add(llvm::BitCodeAbbrevOp(UPDATE_VISIBLE));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Fixed, 32));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ UpdateVisibleAbbrev = Stream.EmitAbbrev(Abv);
+ WriteDeclContextVisibleUpdate(TU);
// Build a record containing all of the new tentative definitions in this
// file, in TentativeDefinitions order.
@@ -2574,6 +3014,9 @@ void ASTWriter::WriteASTChain(Sema &SemaRef, MemorizeStatCalls *StatCalls,
Stream.EnterSubblock(DECLTYPES_BLOCK_ID, 3);
WriteDeclsBlockAbbrevs();
+ for (DeclsToRewriteTy::iterator
+ I = DeclsToRewrite.begin(), E = DeclsToRewrite.end(); I != E; ++I)
+ DeclTypesToEmit.push(const_cast<Decl*>(*I));
while (!DeclTypesToEmit.empty()) {
DeclOrType DOT = DeclTypesToEmit.front();
DeclTypesToEmit.pop();
@@ -2588,7 +3031,13 @@ void ASTWriter::WriteASTChain(Sema &SemaRef, MemorizeStatCalls *StatCalls,
WriteSelectors(SemaRef);
WriteReferencedSelectorsPool(SemaRef);
WriteIdentifierTable(PP);
+ WriteFPPragmaOptions(SemaRef.getFPOptions());
+ WriteOpenCLExtensions(SemaRef);
+
WriteTypeDeclOffsets();
+ // FIXME: For chained PCH only write the new mappings (we currently
+ // write all of them again).
+ WritePragmaDiagnosticMappings(Context.getDiagnostics());
/// Build a record containing first declarations from a chained PCH and the
/// most recent declarations in this AST that they point to.
@@ -2645,28 +3094,50 @@ void ASTWriter::WriteASTChain(Sema &SemaRef, MemorizeStatCalls *StatCalls,
if (!SemaDeclRefs.empty())
Stream.EmitRecord(SEMA_DECL_REFS, SemaDeclRefs);
- // Write the updates to C++ namespaces.
- for (llvm::SmallPtrSet<const NamespaceDecl *, 16>::iterator
- I = UpdatedNamespaces.begin(),
- E = UpdatedNamespaces.end();
+ // Write the updates to DeclContexts.
+ for (llvm::SmallPtrSet<const DeclContext *, 16>::iterator
+ I = UpdatedDeclContexts.begin(),
+ E = UpdatedDeclContexts.end();
I != E; ++I)
WriteDeclContextVisibleUpdate(*I);
- // Write the updates to C++ template specialization lists.
- if (!AdditionalTemplateSpecializations.empty())
- WriteAdditionalTemplateSpecializations();
+ WriteDeclUpdatesBlocks();
Record.clear();
Record.push_back(NumStatements);
Record.push_back(NumMacros);
Record.push_back(NumLexicalDeclContexts);
Record.push_back(NumVisibleDeclContexts);
- WriteDeclUpdateBlock();
+ WriteDeclReplacementsBlock();
Stream.EmitRecord(STATISTICS, Record);
Stream.ExitBlock();
}
-void ASTWriter::WriteDeclUpdateBlock() {
+void ASTWriter::WriteDeclUpdatesBlocks() {
+ if (DeclUpdates.empty())
+ return;
+
+ RecordData OffsetsRecord;
+ Stream.EnterSubblock(DECL_UPDATES_BLOCK_ID, 3);
+ for (DeclUpdateMap::iterator
+ I = DeclUpdates.begin(), E = DeclUpdates.end(); I != E; ++I) {
+ const Decl *D = I->first;
+ UpdateRecord &URec = I->second;
+
+ if (DeclsToRewrite.count(D))
+ continue; // The decl will be written completely,no need to store updates.
+
+ uint64_t Offset = Stream.GetCurrentBitNo();
+ Stream.EmitRecord(DECL_UPDATES, URec);
+
+ OffsetsRecord.push_back(GetDeclRef(D));
+ OffsetsRecord.push_back(Offset);
+ }
+ Stream.ExitBlock();
+ Stream.EmitRecord(DECL_UPDATE_OFFSETS, OffsetsRecord);
+}
+
+void ASTWriter::WriteDeclReplacementsBlock() {
if (ReplacedDecls.empty())
return;
@@ -2679,33 +3150,31 @@ void ASTWriter::WriteDeclUpdateBlock() {
Stream.EmitRecord(DECL_REPLACEMENTS, Record);
}
-void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordData &Record) {
+void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record) {
Record.push_back(Loc.getRawEncoding());
}
-void ASTWriter::AddSourceRange(SourceRange Range, RecordData &Record) {
+void ASTWriter::AddSourceRange(SourceRange Range, RecordDataImpl &Record) {
AddSourceLocation(Range.getBegin(), Record);
AddSourceLocation(Range.getEnd(), Record);
}
-void ASTWriter::AddAPInt(const llvm::APInt &Value, RecordData &Record) {
+void ASTWriter::AddAPInt(const llvm::APInt &Value, RecordDataImpl &Record) {
Record.push_back(Value.getBitWidth());
- unsigned N = Value.getNumWords();
- const uint64_t* Words = Value.getRawData();
- for (unsigned I = 0; I != N; ++I)
- Record.push_back(Words[I]);
+ const uint64_t *Words = Value.getRawData();
+ Record.append(Words, Words + Value.getNumWords());
}
-void ASTWriter::AddAPSInt(const llvm::APSInt &Value, RecordData &Record) {
+void ASTWriter::AddAPSInt(const llvm::APSInt &Value, RecordDataImpl &Record) {
Record.push_back(Value.isUnsigned());
AddAPInt(Value, Record);
}
-void ASTWriter::AddAPFloat(const llvm::APFloat &Value, RecordData &Record) {
+void ASTWriter::AddAPFloat(const llvm::APFloat &Value, RecordDataImpl &Record) {
AddAPInt(Value.bitcastToAPInt(), Record);
}
-void ASTWriter::AddIdentifierRef(const IdentifierInfo *II, RecordData &Record) {
+void ASTWriter::AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Record) {
Record.push_back(getIdentifierRef(II));
}
@@ -2719,17 +3188,17 @@ IdentID ASTWriter::getIdentifierRef(const IdentifierInfo *II) {
return ID;
}
-IdentID ASTWriter::getMacroDefinitionID(MacroDefinition *MD) {
+MacroID ASTWriter::getMacroDefinitionID(MacroDefinition *MD) {
if (MD == 0)
return 0;
-
- IdentID &ID = MacroDefinitions[MD];
+
+ MacroID &ID = MacroDefinitions[MD];
if (ID == 0)
- ID = MacroDefinitions.size();
+ ID = NextMacroID++;
return ID;
}
-void ASTWriter::AddSelectorRef(const Selector SelRef, RecordData &Record) {
+void ASTWriter::AddSelectorRef(const Selector SelRef, RecordDataImpl &Record) {
Record.push_back(getSelectorRef(SelRef));
}
@@ -2750,13 +3219,23 @@ SelectorID ASTWriter::getSelectorRef(Selector Sel) {
return SID;
}
-void ASTWriter::AddCXXTemporary(const CXXTemporary *Temp, RecordData &Record) {
+void ASTWriter::AddCXXTemporary(const CXXTemporary *Temp, RecordDataImpl &Record) {
AddDeclRef(Temp->getDestructor(), Record);
}
+void ASTWriter::AddCXXBaseSpecifiersRef(CXXBaseSpecifier const *Bases,
+ CXXBaseSpecifier const *BasesEnd,
+ RecordDataImpl &Record) {
+ assert(Bases != BasesEnd && "Empty base-specifier sets are not recorded");
+ CXXBaseSpecifiersToWrite.push_back(
+ QueuedCXXBaseSpecifiers(NextCXXBaseSpecifiersID,
+ Bases, BasesEnd));
+ Record.push_back(NextCXXBaseSpecifiersID++);
+}
+
void ASTWriter::AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
const TemplateArgumentLocInfo &Arg,
- RecordData &Record) {
+ RecordDataImpl &Record) {
switch (Kind) {
case TemplateArgument::Expression:
AddStmt(Arg.getAsExpr());
@@ -2768,6 +3247,11 @@ void ASTWriter::AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
AddSourceRange(Arg.getTemplateQualifierRange(), Record);
AddSourceLocation(Arg.getTemplateNameLoc(), Record);
break;
+ case TemplateArgument::TemplateExpansion:
+ AddSourceRange(Arg.getTemplateQualifierRange(), Record);
+ AddSourceLocation(Arg.getTemplateNameLoc(), Record);
+ AddSourceLocation(Arg.getTemplateEllipsisLoc(), Record);
+ break;
case TemplateArgument::Null:
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
@@ -2777,7 +3261,7 @@ void ASTWriter::AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
}
void ASTWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
- RecordData &Record) {
+ RecordDataImpl &Record) {
AddTemplateArgument(Arg.getArgument(), Record);
if (Arg.getArgument().getKind() == TemplateArgument::Expression) {
@@ -2791,7 +3275,7 @@ void ASTWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
Record);
}
-void ASTWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo, RecordData &Record) {
+void ASTWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo, RecordDataImpl &Record) {
if (TInfo == 0) {
AddTypeRef(QualType(), Record);
return;
@@ -2803,7 +3287,7 @@ void ASTWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo, RecordData &Record) {
TLW.Visit(TL);
}
-void ASTWriter::AddTypeRef(QualType T, RecordData &Record) {
+void ASTWriter::AddTypeRef(QualType T, RecordDataImpl &Record) {
Record.push_back(GetOrCreateTypeID(T));
}
@@ -2842,7 +3326,7 @@ TypeIdx ASTWriter::getTypeIdx(QualType T) const {
return I->second;
}
-void ASTWriter::AddDeclRef(const Decl *D, RecordData &Record) {
+void ASTWriter::AddDeclRef(const Decl *D, RecordDataImpl &Record) {
Record.push_back(GetDeclRef(D));
}
@@ -2850,7 +3334,7 @@ DeclID ASTWriter::GetDeclRef(const Decl *D) {
if (D == 0) {
return 0;
}
-
+ assert(!(reinterpret_cast<uintptr_t>(D) & 0x01) && "Invalid decl pointer");
DeclID &ID = DeclIDs[D];
if (ID == 0) {
// We haven't seen this declaration before. Give it a new ID and
@@ -2876,7 +3360,7 @@ DeclID ASTWriter::getDeclID(const Decl *D) {
return DeclIDs[D];
}
-void ASTWriter::AddDeclarationName(DeclarationName Name, RecordData &Record) {
+void ASTWriter::AddDeclarationName(DeclarationName Name, RecordDataImpl &Record) {
// FIXME: Emit a stable enum for NameKind. 0 = Identifier etc.
Record.push_back(Name.getNameKind());
switch (Name.getNameKind()) {
@@ -2910,8 +3394,57 @@ void ASTWriter::AddDeclarationName(DeclarationName Name, RecordData &Record) {
}
}
+void ASTWriter::AddDeclarationNameLoc(const DeclarationNameLoc &DNLoc,
+ DeclarationName Name, RecordDataImpl &Record) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ AddTypeSourceInfo(DNLoc.NamedType.TInfo, Record);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ AddSourceLocation(
+ SourceLocation::getFromRawEncoding(DNLoc.CXXOperatorName.BeginOpNameLoc),
+ Record);
+ AddSourceLocation(
+ SourceLocation::getFromRawEncoding(DNLoc.CXXOperatorName.EndOpNameLoc),
+ Record);
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ AddSourceLocation(
+ SourceLocation::getFromRawEncoding(DNLoc.CXXLiteralOperatorName.OpNameLoc),
+ Record);
+ break;
+
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+}
+
+void ASTWriter::AddDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
+ RecordDataImpl &Record) {
+ AddDeclarationName(NameInfo.getName(), Record);
+ AddSourceLocation(NameInfo.getLoc(), Record);
+ AddDeclarationNameLoc(NameInfo.getInfo(), NameInfo.getName(), Record);
+}
+
+void ASTWriter::AddQualifierInfo(const QualifierInfo &Info,
+ RecordDataImpl &Record) {
+ AddNestedNameSpecifier(Info.NNS, Record);
+ AddSourceRange(Info.NNSRange, Record);
+ Record.push_back(Info.NumTemplParamLists);
+ for (unsigned i=0, e=Info.NumTemplParamLists; i != e; ++i)
+ AddTemplateParameterList(Info.TemplParamLists[i], Record);
+}
+
void ASTWriter::AddNestedNameSpecifier(NestedNameSpecifier *NNS,
- RecordData &Record) {
+ RecordDataImpl &Record) {
// Nested name specifiers usually aren't too long. I think that 8 would
// typically accomodate the vast majority.
llvm::SmallVector<NestedNameSpecifier *, 8> NestedNames;
@@ -2949,8 +3482,8 @@ void ASTWriter::AddNestedNameSpecifier(NestedNameSpecifier *NNS,
}
}
-void ASTWriter::AddTemplateName(TemplateName Name, RecordData &Record) {
- TemplateName::NameKind Kind = Name.getKind();
+void ASTWriter::AddTemplateName(TemplateName Name, RecordDataImpl &Record) {
+ TemplateName::NameKind Kind = Name.getKind();
Record.push_back(Kind);
switch (Kind) {
case TemplateName::Template:
@@ -2965,7 +3498,7 @@ void ASTWriter::AddTemplateName(TemplateName Name, RecordData &Record) {
AddDeclRef(*I, Record);
break;
}
-
+
case TemplateName::QualifiedTemplate: {
QualifiedTemplateName *QualT = Name.getAsQualifiedTemplateName();
AddNestedNameSpecifier(QualT->getQualifier(), Record);
@@ -2973,7 +3506,7 @@ void ASTWriter::AddTemplateName(TemplateName Name, RecordData &Record) {
AddDeclRef(QualT->getTemplateDecl(), Record);
break;
}
-
+
case TemplateName::DependentTemplate: {
DependentTemplateName *DepT = Name.getAsDependentTemplateName();
AddNestedNameSpecifier(DepT->getQualifier(), Record);
@@ -2984,11 +3517,19 @@ void ASTWriter::AddTemplateName(TemplateName Name, RecordData &Record) {
Record.push_back(DepT->getOperator());
break;
}
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack();
+ AddDeclRef(SubstPack->getParameterPack(), Record);
+ AddTemplateArgument(SubstPack->getArgumentPack(), Record);
+ break;
+ }
}
}
-void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg,
- RecordData &Record) {
+void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg,
+ RecordDataImpl &Record) {
Record.push_back(Arg.getKind());
switch (Arg.getKind()) {
case TemplateArgument::Null:
@@ -3004,7 +3545,14 @@ void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg,
AddTypeRef(Arg.getIntegralType(), Record);
break;
case TemplateArgument::Template:
- AddTemplateName(Arg.getAsTemplate(), Record);
+ AddTemplateName(Arg.getAsTemplateOrTemplatePattern(), Record);
+ break;
+ case TemplateArgument::TemplateExpansion:
+ AddTemplateName(Arg.getAsTemplateOrTemplatePattern(), Record);
+ if (llvm::Optional<unsigned> NumExpansions = Arg.getNumTemplateExpansions())
+ Record.push_back(*NumExpansions + 1);
+ else
+ Record.push_back(0);
break;
case TemplateArgument::Expression:
AddStmt(Arg.getAsExpr());
@@ -3020,7 +3568,7 @@ void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg,
void
ASTWriter::AddTemplateParameterList(const TemplateParameterList *TemplateParams,
- RecordData &Record) {
+ RecordDataImpl &Record) {
assert(TemplateParams && "No TemplateParams!");
AddSourceLocation(TemplateParams->getTemplateLoc(), Record);
AddSourceLocation(TemplateParams->getLAngleLoc(), Record);
@@ -3035,16 +3583,16 @@ ASTWriter::AddTemplateParameterList(const TemplateParameterList *TemplateParams,
/// \brief Emit a template argument list.
void
ASTWriter::AddTemplateArgumentList(const TemplateArgumentList *TemplateArgs,
- RecordData &Record) {
+ RecordDataImpl &Record) {
assert(TemplateArgs && "No TemplateArgs!");
- Record.push_back(TemplateArgs->flat_size());
- for (int i=0, e = TemplateArgs->flat_size(); i != e; ++i)
+ Record.push_back(TemplateArgs->size());
+ for (int i=0, e = TemplateArgs->size(); i != e; ++i)
AddTemplateArgument(TemplateArgs->get(i), Record);
}
void
-ASTWriter::AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordData &Record) {
+ASTWriter::AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordDataImpl &Record) {
Record.push_back(Set.size());
for (UnresolvedSetImpl::const_iterator
I = Set.begin(), E = Set.end(); I != E; ++I) {
@@ -3054,31 +3602,69 @@ ASTWriter::AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordData &Record) {
}
void ASTWriter::AddCXXBaseSpecifier(const CXXBaseSpecifier &Base,
- RecordData &Record) {
+ RecordDataImpl &Record) {
Record.push_back(Base.isVirtual());
Record.push_back(Base.isBaseOfClass());
Record.push_back(Base.getAccessSpecifierAsWritten());
+ Record.push_back(Base.getInheritConstructors());
AddTypeSourceInfo(Base.getTypeSourceInfo(), Record);
AddSourceRange(Base.getSourceRange(), Record);
+ AddSourceLocation(Base.isPackExpansion()? Base.getEllipsisLoc()
+ : SourceLocation(),
+ Record);
+}
+
+void ASTWriter::FlushCXXBaseSpecifiers() {
+ RecordData Record;
+ for (unsigned I = 0, N = CXXBaseSpecifiersToWrite.size(); I != N; ++I) {
+ Record.clear();
+
+ // Record the offset of this base-specifier set.
+ unsigned Index = CXXBaseSpecifiersToWrite[I].ID - FirstCXXBaseSpecifiersID;
+ if (Index == CXXBaseSpecifiersOffsets.size())
+ CXXBaseSpecifiersOffsets.push_back(Stream.GetCurrentBitNo());
+ else {
+ if (Index > CXXBaseSpecifiersOffsets.size())
+ CXXBaseSpecifiersOffsets.resize(Index + 1);
+ CXXBaseSpecifiersOffsets[Index] = Stream.GetCurrentBitNo();
+ }
+
+ const CXXBaseSpecifier *B = CXXBaseSpecifiersToWrite[I].Bases,
+ *BEnd = CXXBaseSpecifiersToWrite[I].BasesEnd;
+ Record.push_back(BEnd - B);
+ for (; B != BEnd; ++B)
+ AddCXXBaseSpecifier(*B, Record);
+ Stream.EmitRecord(serialization::DECL_CXX_BASE_SPECIFIERS, Record);
+
+ // Flush any expressions that were written as part of the base specifiers.
+ FlushStmts();
+ }
+
+ CXXBaseSpecifiersToWrite.clear();
}
-void ASTWriter::AddCXXBaseOrMemberInitializers(
- const CXXBaseOrMemberInitializer * const *BaseOrMembers,
- unsigned NumBaseOrMembers, RecordData &Record) {
- Record.push_back(NumBaseOrMembers);
- for (unsigned i=0; i != NumBaseOrMembers; ++i) {
- const CXXBaseOrMemberInitializer *Init = BaseOrMembers[i];
+void ASTWriter::AddCXXCtorInitializers(
+ const CXXCtorInitializer * const *CtorInitializers,
+ unsigned NumCtorInitializers,
+ RecordDataImpl &Record) {
+ Record.push_back(NumCtorInitializers);
+ for (unsigned i=0; i != NumCtorInitializers; ++i) {
+ const CXXCtorInitializer *Init = CtorInitializers[i];
Record.push_back(Init->isBaseInitializer());
if (Init->isBaseInitializer()) {
AddTypeSourceInfo(Init->getBaseClassInfo(), Record);
Record.push_back(Init->isBaseVirtual());
} else {
- AddDeclRef(Init->getMember(), Record);
+ Record.push_back(Init->isIndirectMemberInitializer());
+ if (Init->isIndirectMemberInitializer())
+ AddDeclRef(Init->getIndirectMember(), Record);
+ else
+ AddDeclRef(Init->getMember(), Record);
}
+
AddSourceLocation(Init->getMemberLocation(), Record);
AddStmt(Init->getInit());
- AddDeclRef(Init->getAnonUnionMember(), Record);
AddSourceLocation(Init->getLParenLoc(), Record);
AddSourceLocation(Init->getRParenLoc(), Record);
Record.push_back(Init->isWritten());
@@ -3092,22 +3678,86 @@ void ASTWriter::AddCXXBaseOrMemberInitializers(
}
}
-void ASTWriter::SetReader(ASTReader *Reader) {
+void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Record) {
+ assert(D->DefinitionData);
+ struct CXXRecordDecl::DefinitionData &Data = *D->DefinitionData;
+ Record.push_back(Data.UserDeclaredConstructor);
+ Record.push_back(Data.UserDeclaredCopyConstructor);
+ Record.push_back(Data.UserDeclaredCopyAssignment);
+ Record.push_back(Data.UserDeclaredDestructor);
+ Record.push_back(Data.Aggregate);
+ Record.push_back(Data.PlainOldData);
+ Record.push_back(Data.Empty);
+ Record.push_back(Data.Polymorphic);
+ Record.push_back(Data.Abstract);
+ Record.push_back(Data.HasTrivialConstructor);
+ Record.push_back(Data.HasTrivialCopyConstructor);
+ Record.push_back(Data.HasTrivialCopyAssignment);
+ Record.push_back(Data.HasTrivialDestructor);
+ Record.push_back(Data.ComputedVisibleConversions);
+ Record.push_back(Data.DeclaredDefaultConstructor);
+ Record.push_back(Data.DeclaredCopyConstructor);
+ Record.push_back(Data.DeclaredCopyAssignment);
+ Record.push_back(Data.DeclaredDestructor);
+
+ Record.push_back(Data.NumBases);
+ if (Data.NumBases > 0)
+ AddCXXBaseSpecifiersRef(Data.getBases(), Data.getBases() + Data.NumBases,
+ Record);
+
+ // FIXME: Make VBases lazily computed when needed to avoid storing them.
+ Record.push_back(Data.NumVBases);
+ if (Data.NumVBases > 0)
+ AddCXXBaseSpecifiersRef(Data.getVBases(), Data.getVBases() + Data.NumVBases,
+ Record);
+
+ AddUnresolvedSet(Data.Conversions, Record);
+ AddUnresolvedSet(Data.VisibleConversions, Record);
+ // Data.Definition is the owning decl, no need to write it.
+ AddDeclRef(Data.FirstFriend, Record);
+}
+
+void ASTWriter::ReaderInitialized(ASTReader *Reader) {
assert(Reader && "Cannot remove chain");
+ assert(!Chain && "Cannot replace chain");
assert(FirstDeclID == NextDeclID &&
FirstTypeID == NextTypeID &&
FirstIdentID == NextIdentID &&
FirstSelectorID == NextSelectorID &&
+ FirstMacroID == NextMacroID &&
+ FirstCXXBaseSpecifiersID == NextCXXBaseSpecifiersID &&
"Setting chain after writing has started.");
Chain = Reader;
+
+ FirstDeclID += Chain->getTotalNumDecls();
+ FirstTypeID += Chain->getTotalNumTypes();
+ FirstIdentID += Chain->getTotalNumIdentifiers();
+ FirstSelectorID += Chain->getTotalNumSelectors();
+ FirstMacroID += Chain->getTotalNumMacroDefinitions();
+ FirstCXXBaseSpecifiersID += Chain->getTotalNumCXXBaseSpecifiers();
+ NextDeclID = FirstDeclID;
+ NextTypeID = FirstTypeID;
+ NextIdentID = FirstIdentID;
+ NextSelectorID = FirstSelectorID;
+ NextMacroID = FirstMacroID;
+ NextCXXBaseSpecifiersID = FirstCXXBaseSpecifiersID;
}
void ASTWriter::IdentifierRead(IdentID ID, IdentifierInfo *II) {
IdentifierIDs[II] = ID;
+ if (II->hasMacroDefinition())
+ DeserializedMacroNames.push_back(II);
}
void ASTWriter::TypeRead(TypeIdx Idx, QualType T) {
- TypeIdxs[T] = Idx;
+ // Always take the highest-numbered type index. This copes with an interesting
+ // case for chained AST writing where we schedule writing the type and then,
+ // later, deserialize the type from another AST. In this case, we want to
+ // keep the higher-numbered entry so that we can properly write it out to
+ // the AST file.
+ TypeIdx &StoredIdx = TypeIdxs[T];
+ if (Idx.getIndex() >= StoredIdx.getIndex())
+ StoredIdx = Idx;
}
void ASTWriter::DeclRead(DeclID ID, const Decl *D) {
@@ -3117,3 +3767,75 @@ void ASTWriter::DeclRead(DeclID ID, const Decl *D) {
void ASTWriter::SelectorRead(SelectorID ID, Selector S) {
SelectorIDs[S] = ID;
}
+
+void ASTWriter::MacroDefinitionRead(serialization::MacroID ID,
+ MacroDefinition *MD) {
+ MacroDefinitions[MD] = ID;
+}
+
+void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
+ assert(D->isDefinition());
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ // We are interested when a PCH decl is modified.
+ if (RD->getPCHLevel() > 0) {
+ // A forward reference was mutated into a definition. Rewrite it.
+ // FIXME: This happens during template instantiation, should we
+ // have created a new definition decl instead ?
+ RewriteDecl(RD);
+ }
+
+ for (CXXRecordDecl::redecl_iterator
+ I = RD->redecls_begin(), E = RD->redecls_end(); I != E; ++I) {
+ CXXRecordDecl *Redecl = cast<CXXRecordDecl>(*I);
+ if (Redecl == RD)
+ continue;
+
+ // We are interested when a PCH decl is modified.
+ if (Redecl->getPCHLevel() > 0) {
+ UpdateRecord &Record = DeclUpdates[Redecl];
+ Record.push_back(UPD_CXX_SET_DEFINITIONDATA);
+ assert(Redecl->DefinitionData);
+ assert(Redecl->DefinitionData->Definition == D);
+ AddDeclRef(D, Record); // the DefinitionDecl
+ }
+ }
+ }
+}
+void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
+ // TU and namespaces are handled elsewhere.
+ if (isa<TranslationUnitDecl>(DC) || isa<NamespaceDecl>(DC))
+ return;
+
+ if (!(D->getPCHLevel() == 0 && cast<Decl>(DC)->getPCHLevel() > 0))
+ return; // Not a source decl added to a DeclContext from PCH.
+
+ AddUpdatedDeclContext(DC);
+}
+
+void ASTWriter::AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {
+ assert(D->isImplicit());
+ if (!(D->getPCHLevel() == 0 && RD->getPCHLevel() > 0))
+ return; // Not a source member added to a class from PCH.
+ if (!isa<CXXMethodDecl>(D))
+ return; // We are interested in lazily declared implicit methods.
+
+ // A decl coming from PCH was modified.
+ assert(RD->isDefinition());
+ UpdateRecord &Record = DeclUpdates[RD];
+ Record.push_back(UPD_CXX_ADDED_IMPLICIT_MEMBER);
+ AddDeclRef(D, Record);
+}
+
+void ASTWriter::AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+ const ClassTemplateSpecializationDecl *D) {
+ // The specializations set is kept in the canonical template.
+ TD = TD->getCanonicalDecl();
+ if (!(D->getPCHLevel() == 0 && TD->getPCHLevel() > 0))
+ return; // Not a source specialization added to a template from PCH.
+
+ UpdateRecord &Record = DeclUpdates[TD];
+ Record.push_back(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION);
+ AddDeclRef(D, Record);
+}
+
+ASTSerializationListener::~ASTSerializationListener() { }
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
index ce39a10..ce07e138 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/DeclContextInternals.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Bitcode/BitstreamWriter.h"
#include "llvm/Support/ErrorHandling.h"
@@ -30,22 +31,23 @@ namespace clang {
ASTWriter &Writer;
ASTContext &Context;
- ASTWriter::RecordData &Record;
+ typedef ASTWriter::RecordData RecordData;
+ RecordData &Record;
public:
serialization::DeclCode Code;
unsigned AbbrevToUse;
- ASTDeclWriter(ASTWriter &Writer, ASTContext &Context,
- ASTWriter::RecordData &Record)
+ ASTDeclWriter(ASTWriter &Writer, ASTContext &Context, RecordData &Record)
: Writer(Writer), Context(Context), Record(Record) {
}
-
+
void Visit(Decl *D);
void VisitDecl(Decl *D);
void VisitTranslationUnitDecl(TranslationUnitDecl *D);
void VisitNamedDecl(NamedDecl *D);
+ void VisitLabelDecl(LabelDecl *LD);
void VisitNamespaceDecl(NamespaceDecl *D);
void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
@@ -71,6 +73,7 @@ namespace clang {
void VisitCXXDestructorDecl(CXXDestructorDecl *D);
void VisitCXXConversionDecl(CXXConversionDecl *D);
void VisitFieldDecl(FieldDecl *D);
+ void VisitIndirectFieldDecl(IndirectFieldDecl *D);
void VisitVarDecl(VarDecl *D);
void VisitImplicitParamDecl(ImplicitParamDecl *D);
void VisitParmVarDecl(ParmVarDecl *D);
@@ -133,6 +136,8 @@ void ASTDeclWriter::VisitDecl(Decl *D) {
Writer.AddSourceLocation(D->getLocation(), Record);
Record.push_back(D->isInvalidDecl());
Record.push_back(D->hasAttrs());
+ if (D->hasAttrs())
+ Writer.WriteAttributes(D->getAttrs(), Record);
Record.push_back(D->isImplicit());
Record.push_back(D->isUsed(false));
Record.push_back(D->getAccess());
@@ -163,23 +168,31 @@ void ASTDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
void ASTDeclWriter::VisitTagDecl(TagDecl *D) {
VisitTypeDecl(D);
- Record.push_back(D->getIdentifierNamespace());
VisitRedeclarable(D);
+ Record.push_back(D->getIdentifierNamespace());
Record.push_back((unsigned)D->getTagKind()); // FIXME: stable encoding
Record.push_back(D->isDefinition());
Record.push_back(D->isEmbeddedInDeclarator());
Writer.AddSourceLocation(D->getRBraceLoc(), Record);
Writer.AddSourceLocation(D->getTagKeywordLoc(), Record);
- // FIXME: maybe write optional qualifier and its range.
- Writer.AddDeclRef(D->getTypedefForAnonDecl(), Record);
+ Record.push_back(D->hasExtInfo());
+ if (D->hasExtInfo())
+ Writer.AddQualifierInfo(*D->getExtInfo(), Record);
+ else
+ Writer.AddDeclRef(D->getTypedefForAnonDecl(), Record);
}
void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
VisitTagDecl(D);
- Writer.AddTypeRef(D->getIntegerType(), Record);
+ Writer.AddTypeSourceInfo(D->getIntegerTypeSourceInfo(), Record);
+ if (!D->getIntegerTypeSourceInfo())
+ Writer.AddTypeRef(D->getIntegerType(), Record);
Writer.AddTypeRef(D->getPromotionType(), Record);
Record.push_back(D->getNumPositiveBits());
Record.push_back(D->getNumNegativeBits());
+ Record.push_back(D->isScoped());
+ Record.push_back(D->isScopedUsingClassTag());
+ Record.push_back(D->isFixed());
Writer.AddDeclRef(D->getInstantiatedFromMemberEnum(), Record);
Code = serialization::DECL_ENUM;
}
@@ -208,14 +221,17 @@ void ASTDeclWriter::VisitEnumConstantDecl(EnumConstantDecl *D) {
void ASTDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
VisitValueDecl(D);
+ Record.push_back(D->hasExtInfo());
+ if (D->hasExtInfo())
+ Writer.AddQualifierInfo(*D->getExtInfo(), Record);
Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
- // FIXME: write optional qualifier and its range.
}
void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
VisitDeclaratorDecl(D);
- // FIXME: write DeclarationNameLoc.
+ VisitRedeclarable(D);
+ Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
Record.push_back(D->getIdentifierNamespace());
Record.push_back(D->getTemplatedKind());
switch (D->getTemplatedKind()) {
@@ -236,8 +252,7 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
case FunctionDecl::TK_FunctionTemplateSpecialization: {
FunctionTemplateSpecializationInfo *
FTSInfo = D->getTemplateSpecializationInfo();
- // We want it canonical to guarantee that it has a Common*.
- Writer.AddDeclRef(FTSInfo->getTemplate()->getCanonicalDecl(), Record);
+ Writer.AddDeclRef(FTSInfo->getTemplate(), Record);
Record.push_back(FTSInfo->getTemplateSpecializationKind());
// Template arguments.
@@ -257,6 +272,12 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
}
Writer.AddSourceLocation(FTSInfo->getPointOfInstantiation(), Record);
+
+ if (D->isCanonicalDecl()) {
+ // Write the template that contains the specializations set. We will
+ // add a FunctionTemplateSpecializationInfo to it when reading.
+ Writer.AddDeclRef(FTSInfo->getTemplate()->getCanonicalDecl(), Record);
+ }
break;
}
case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
@@ -281,9 +302,9 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
// FunctionDecl's body is handled last at ASTWriterDecl::Visit,
// after everything else is written.
- VisitRedeclarable(D);
Record.push_back(D->getStorageClass()); // FIXME: stable encoding
Record.push_back(D->getStorageClassAsWritten());
+ Record.push_back(D->IsInline);
Record.push_back(D->isInlineSpecified());
Record.push_back(D->isVirtualAsWritten());
Record.push_back(D->isPure());
@@ -291,7 +312,6 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Record.push_back(D->hasWrittenPrototype());
Record.push_back(D->isDeleted());
Record.push_back(D->isTrivial());
- Record.push_back(D->isCopyAssignment());
Record.push_back(D->hasImplicitReturnZero());
Writer.AddSourceLocation(D->getLocEnd(), Record);
@@ -484,8 +504,8 @@ void ASTDeclWriter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
VisitObjCImplDecl(D);
Writer.AddDeclRef(D->getSuperClass(), Record);
- Writer.AddCXXBaseOrMemberInitializers(D->IvarInitializers,
- D->NumIvarInitializers, Record);
+ Writer.AddCXXCtorInitializers(D->IvarInitializers, D->NumIvarInitializers,
+ Record);
Record.push_back(D->hasSynthBitfield());
Code = serialization::DECL_OBJC_IMPLEMENTATION;
}
@@ -495,6 +515,7 @@ void ASTDeclWriter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
Writer.AddSourceLocation(D->getLocStart(), Record);
Writer.AddDeclRef(D->getPropertyDecl(), Record);
Writer.AddDeclRef(D->getPropertyIvarDecl(), Record);
+ Writer.AddSourceLocation(D->getPropertyIvarDeclLoc(), Record);
Writer.AddStmt(D->getGetterCXXConstructor());
Writer.AddStmt(D->getSetterCXXAssignment());
Code = serialization::DECL_OBJC_PROPERTY_IMPL;
@@ -511,15 +532,26 @@ void ASTDeclWriter::VisitFieldDecl(FieldDecl *D) {
Code = serialization::DECL_FIELD;
}
+void ASTDeclWriter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ VisitValueDecl(D);
+ Record.push_back(D->getChainingSize());
+
+ for (IndirectFieldDecl::chain_iterator
+ P = D->chain_begin(),
+ PEnd = D->chain_end(); P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ Code = serialization::DECL_INDIRECTFIELD;
+}
+
void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
VisitDeclaratorDecl(D);
+ VisitRedeclarable(D);
Record.push_back(D->getStorageClass()); // FIXME: stable encoding
Record.push_back(D->getStorageClassAsWritten());
Record.push_back(D->isThreadSpecified());
Record.push_back(D->hasCXXDirectInitializer());
Record.push_back(D->isExceptionVariable());
Record.push_back(D->isNRVOVariable());
- VisitRedeclarable(D);
Record.push_back(D->getInit() ? 1 : 0);
if (D->getInit())
Writer.AddStmt(D->getInit());
@@ -592,6 +624,22 @@ void ASTDeclWriter::VisitBlockDecl(BlockDecl *D) {
for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
P != PEnd; ++P)
Writer.AddDeclRef(*P, Record);
+ Record.push_back(D->capturesCXXThis());
+ Record.push_back(D->getNumCaptures());
+ for (BlockDecl::capture_iterator
+ i = D->capture_begin(), e = D->capture_end(); i != e; ++i) {
+ const BlockDecl::Capture &capture = *i;
+ Writer.AddDeclRef(capture.getVariable(), Record);
+
+ unsigned flags = 0;
+ if (capture.isByRef()) flags |= 1;
+ if (capture.isNested()) flags |= 2;
+ if (capture.hasCopyExpr()) flags |= 4;
+ Record.push_back(flags);
+
+ if (capture.hasCopyExpr()) Writer.AddStmt(capture.getCopyExpr());
+ }
+
Code = serialization::DECL_BLOCK;
}
@@ -604,8 +652,15 @@ void ASTDeclWriter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
Code = serialization::DECL_LINKAGE_SPEC;
}
+void ASTDeclWriter::VisitLabelDecl(LabelDecl *D) {
+ VisitNamedDecl(D);
+ Code = serialization::DECL_LABEL;
+}
+
+
void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
VisitNamedDecl(D);
+ Record.push_back(D->isInline());
Writer.AddSourceLocation(D->getLBracLoc(), Record);
Writer.AddSourceLocation(D->getRBracLoc(), Record);
Writer.AddDeclRef(D->getNextNamespace(), Record);
@@ -620,7 +675,22 @@ void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
if (Writer.hasChain() && !D->isOriginalNamespace() &&
D->getOriginalNamespace()->getPCHLevel() > 0) {
- Writer.AddUpdatedNamespace(D->getOriginalNamespace());
+ NamespaceDecl *NS = D->getOriginalNamespace();
+ Writer.AddUpdatedDeclContext(NS);
+
+ // Make sure all visible decls are written. They will be recorded later.
+ NS->lookup(DeclarationName());
+ StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(NS->getLookupPtr());
+ if (Map) {
+ for (StoredDeclsMap::iterator D = Map->begin(), DEnd = Map->end();
+ D != DEnd; ++D) {
+ DeclContext::lookup_result Result = D->second.getLookupResult();
+ while (Result.first != Result.second) {
+ Writer.GetDeclRef(*Result.first);
+ ++Result.first;
+ }
+ }
+ }
}
}
@@ -639,10 +709,8 @@ void ASTDeclWriter::VisitUsingDecl(UsingDecl *D) {
Writer.AddSourceRange(D->getNestedNameRange(), Record);
Writer.AddSourceLocation(D->getUsingLocation(), Record);
Writer.AddNestedNameSpecifier(D->getTargetNestedNameDecl(), Record);
- Record.push_back(D->getNumShadowDecls());
- for (UsingDecl::shadow_iterator P = D->shadow_begin(),
- PEnd = D->shadow_end(); P != PEnd; ++P)
- Writer.AddDeclRef(*P, Record);
+ Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
+ Writer.AddDeclRef(D->FirstUsingShadow, Record);
Record.push_back(D->isTypeName());
Writer.AddDeclRef(Context.getInstantiatedFromUsingDecl(D), Record);
Code = serialization::DECL_USING;
@@ -651,7 +719,7 @@ void ASTDeclWriter::VisitUsingDecl(UsingDecl *D) {
void ASTDeclWriter::VisitUsingShadowDecl(UsingShadowDecl *D) {
VisitNamedDecl(D);
Writer.AddDeclRef(D->getTargetDecl(), Record);
- Writer.AddDeclRef(D->getUsingDecl(), Record);
+ Writer.AddDeclRef(D->UsingOrNextShadow, Record);
Writer.AddDeclRef(Context.getInstantiatedFromUsingShadowDecl(D), Record);
Code = serialization::DECL_USING_SHADOW;
}
@@ -672,6 +740,7 @@ void ASTDeclWriter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
Writer.AddSourceRange(D->getTargetNestedNameRange(), Record);
Writer.AddSourceLocation(D->getUsingLoc(), Record);
Writer.AddNestedNameSpecifier(D->getTargetNestedNameSpecifier(), Record);
+ Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
Code = serialization::DECL_UNRESOLVED_USING_VALUE;
}
@@ -686,64 +755,14 @@ void ASTDeclWriter::VisitUnresolvedUsingTypenameDecl(
}
void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
- // See comments at ASTDeclReader::VisitCXXRecordDecl about why this happens
- // before VisitRecordDecl.
- enum { Data_NoDefData, Data_Owner, Data_NotOwner };
- bool OwnsDefinitionData = false;
- if (D->DefinitionData) {
- assert(D->DefinitionData->Definition &&
- "DefinitionData don't point to a definition decl!");
- OwnsDefinitionData = D->DefinitionData->Definition == D;
- if (OwnsDefinitionData) {
- Record.push_back(Data_Owner);
- } else {
- Record.push_back(Data_NotOwner);
- Writer.AddDeclRef(D->DefinitionData->Definition, Record);
- }
- } else
- Record.push_back(Data_NoDefData);
-
VisitRecordDecl(D);
- if (OwnsDefinitionData) {
- assert(D->DefinitionData);
- struct CXXRecordDecl::DefinitionData &Data = *D->DefinitionData;
-
- Record.push_back(Data.UserDeclaredConstructor);
- Record.push_back(Data.UserDeclaredCopyConstructor);
- Record.push_back(Data.UserDeclaredCopyAssignment);
- Record.push_back(Data.UserDeclaredDestructor);
- Record.push_back(Data.Aggregate);
- Record.push_back(Data.PlainOldData);
- Record.push_back(Data.Empty);
- Record.push_back(Data.Polymorphic);
- Record.push_back(Data.Abstract);
- Record.push_back(Data.HasTrivialConstructor);
- Record.push_back(Data.HasTrivialCopyConstructor);
- Record.push_back(Data.HasTrivialCopyAssignment);
- Record.push_back(Data.HasTrivialDestructor);
- Record.push_back(Data.ComputedVisibleConversions);
- Record.push_back(Data.DeclaredDefaultConstructor);
- Record.push_back(Data.DeclaredCopyConstructor);
- Record.push_back(Data.DeclaredCopyAssignment);
- Record.push_back(Data.DeclaredDestructor);
-
- Record.push_back(D->getNumBases());
- for (CXXRecordDecl::base_class_iterator I = D->bases_begin(),
- E = D->bases_end(); I != E; ++I)
- Writer.AddCXXBaseSpecifier(*I, Record);
-
- // FIXME: Make VBases lazily computed when needed to avoid storing them.
- Record.push_back(D->getNumVBases());
- for (CXXRecordDecl::base_class_iterator I = D->vbases_begin(),
- E = D->vbases_end(); I != E; ++I)
- Writer.AddCXXBaseSpecifier(*I, Record);
-
- Writer.AddUnresolvedSet(Data.Conversions, Record);
- Writer.AddUnresolvedSet(Data.VisibleConversions, Record);
- // Data.Definition is written at the top.
- Writer.AddDeclRef(Data.FirstFriend, Record);
- }
+ CXXRecordDecl *DefinitionDecl = 0;
+ if (D->DefinitionData)
+ DefinitionDecl = D->DefinitionData->Definition;
+ Writer.AddDeclRef(DefinitionDecl, Record);
+ if (D == DefinitionDecl)
+ Writer.AddCXXDefinitionData(D, Record);
enum {
CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
@@ -761,6 +780,11 @@ void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
Record.push_back(CXXRecNotTemplate);
}
+ // Store the key function to avoid deserializing every method so we can
+ // compute it.
+ if (D->IsDefinition)
+ Writer.AddDeclRef(Context.getKeyFunction(D), Record);
+
Code = serialization::DECL_CXX_RECORD;
}
@@ -779,8 +803,8 @@ void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
Record.push_back(D->IsExplicitSpecified);
Record.push_back(D->ImplicitlyDefined);
- Writer.AddCXXBaseOrMemberInitializers(D->BaseOrMemberInitializers,
- D->NumBaseOrMemberInitializers, Record);
+ Writer.AddCXXCtorInitializers(D->CtorInitializers, D->NumCtorInitializers,
+ Record);
Code = serialization::DECL_CXX_CONSTRUCTOR;
}
@@ -813,7 +837,8 @@ void ASTDeclWriter::VisitFriendDecl(FriendDecl *D) {
Writer.AddTypeSourceInfo(D->Friend.get<TypeSourceInfo*>(), Record);
else
Writer.AddDeclRef(D->Friend.get<NamedDecl*>(), Record);
- Writer.AddDeclRef(D->NextFriend, Record);
+ Writer.AddDeclRef(D->getNextFriend(), Record);
+ Record.push_back(D->UnsupportedFriend);
Writer.AddSourceLocation(D->FriendLoc, Record);
Code = serialization::DECL_FRIEND;
}
@@ -840,10 +865,13 @@ void ASTDeclWriter::VisitTemplateDecl(TemplateDecl *D) {
}
void ASTDeclWriter::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
- VisitTemplateDecl(D);
+ // Emit data to initialize CommonOrPrev before VisitTemplateDecl so that
+ // getCommonPtr() can be used while this is still initializing.
- Record.push_back(D->getIdentifierNamespace());
Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
+ if (D->getPreviousDeclaration())
+ Writer.AddDeclRef(D->getFirstDeclaration(), Record);
+
if (D->getPreviousDeclaration() == 0) {
// This TemplateDecl owns the CommonPtr; write it.
assert(D->isCanonicalDecl());
@@ -866,6 +894,9 @@ void ASTDeclWriter::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
Writer.FirstLatestDecls[First] = D;
}
}
+
+ VisitTemplateDecl(D);
+ Record.push_back(D->getIdentifierNamespace());
}
void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
@@ -911,10 +942,6 @@ void ASTDeclWriter::VisitClassTemplateSpecializationDecl(
InstFromD = cast<ClassTemplatePartialSpecializationDecl>(InstFromD)->
getSpecializedTemplate();
}
- // Is this a specialization of an already-serialized template?
- if (InstFromD->getCanonicalDecl()->getPCHLevel() != 0)
- Writer.AddAdditionalTemplateSpecialization(Writer.getDeclID(InstFromD),
- Writer.getDeclID(D));
// Explicit info.
Writer.AddTypeSourceInfo(D->getTypeAsWritten(), Record);
@@ -987,17 +1014,33 @@ void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
}
void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
- VisitVarDecl(D);
+ // For an expanded parameter pack, record the number of expansion types here
+ // so that it's easier for
+ if (D->isExpandedParameterPack())
+ Record.push_back(D->getNumExpansionTypes());
+
+ VisitDeclaratorDecl(D);
// TemplateParmPosition.
Record.push_back(D->getDepth());
Record.push_back(D->getPosition());
- // Rest of NonTypeTemplateParmDecl.
- Record.push_back(D->getDefaultArgument() != 0);
- if (D->getDefaultArgument()) {
- Writer.AddStmt(D->getDefaultArgument());
- Record.push_back(D->defaultArgumentWasInherited());
+
+ if (D->isExpandedParameterPack()) {
+ for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
+ Writer.AddTypeRef(D->getExpansionType(I), Record);
+ Writer.AddTypeSourceInfo(D->getExpansionTypeSourceInfo(I), Record);
+ }
+
+ Code = serialization::DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK;
+ } else {
+ // Rest of NonTypeTemplateParmDecl.
+ Record.push_back(D->isParameterPack());
+ Record.push_back(D->getDefaultArgument() != 0);
+ if (D->getDefaultArgument()) {
+ Writer.AddStmt(D->getDefaultArgument());
+ Record.push_back(D->defaultArgumentWasInherited());
+ }
+ Code = serialization::DECL_NON_TYPE_TEMPLATE_PARM;
}
- Code = serialization::DECL_NON_TYPE_TEMPLATE_PARM;
}
void ASTDeclWriter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
@@ -1008,6 +1051,7 @@ void ASTDeclWriter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
// Rest of TemplateTemplateParmDecl.
Writer.AddTemplateArgumentLoc(D->getDefaultArgument(), Record);
Record.push_back(D->defaultArgumentWasInherited());
+ Record.push_back(D->isParameterPack());
Code = serialization::DECL_TEMPLATE_TEMPLATE_PARM;
}
@@ -1041,9 +1085,14 @@ void ASTDeclWriter::VisitRedeclarable(Redeclarable<T> *D) {
if (D->RedeclLink.getNext() == D) {
Record.push_back(NoRedeclaration);
} else {
- Record.push_back(D->RedeclLink.NextIsPrevious() ? PointsToPrevious
- : PointsToLatest);
- Writer.AddDeclRef(D->RedeclLink.getPointer(), Record);
+ if (D->RedeclLink.NextIsPrevious()) {
+ Record.push_back(PointsToPrevious);
+ Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
+ Writer.AddDeclRef(D->getFirstDeclaration(), Record);
+ } else {
+ Record.push_back(PointsToLatest);
+ Writer.AddDeclRef(D->RedeclLink.getPointer(), Record);
+ }
}
T *First = D->getFirstDeclaration();
@@ -1086,15 +1135,16 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
// ValueDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
// DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
Abv->Add(BitCodeAbbrevOp(serialization::PREDEF_TYPE_NULL_ID)); // InfoType
// VarDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
Abv->Add(BitCodeAbbrevOp(0)); // StorageClass
Abv->Add(BitCodeAbbrevOp(0)); // StorageClassAsWritten
Abv->Add(BitCodeAbbrevOp(0)); // isThreadSpecified
Abv->Add(BitCodeAbbrevOp(0)); // hasCXXDirectInitializer
Abv->Add(BitCodeAbbrevOp(0)); // isExceptionVariable
Abv->Add(BitCodeAbbrevOp(0)); // isNRVOVariable
- Abv->Add(BitCodeAbbrevOp(0)); // PrevDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasInit
Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
// ParmVarDecl
@@ -1137,6 +1187,9 @@ static bool isRequiredDecl(const Decl *D, ASTContext &Context) {
}
void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
+ // Switch case IDs are per Decl.
+ ClearSwitchCaseIDs();
+
RecordData Record;
ASTDeclWriter W(*this, Context, Record);
@@ -1186,13 +1239,12 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
D->getDeclKindName() + "'");
Stream.EmitRecord(W.Code, Record, W.AbbrevToUse);
- // If the declaration had any attributes, write them now.
- if (D->hasAttrs())
- WriteAttributeRecord(D->getAttrs());
-
// Flush any expressions that were written as part of this declaration.
FlushStmts();
-
+
+ // Flush C++ base specifiers, if there are any.
+ FlushCXXBaseSpecifiers();
+
// Note "external" declarations so that we can add them to a record in the
// AST file later.
//
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
index 7f2da6c..8a5ffe9 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -14,6 +14,7 @@
#include "clang/Serialization/ASTWriter.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/Bitcode/BitstreamWriter.h"
using namespace clang;
@@ -75,6 +76,7 @@ namespace clang {
void VisitBinaryOperator(BinaryOperator *E);
void VisitCompoundAssignOperator(CompoundAssignOperator *E);
void VisitConditionalOperator(ConditionalOperator *E);
+ void VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
void VisitImplicitCastExpr(ImplicitCastExpr *E);
void VisitExplicitCastExpr(ExplicitCastExpr *E);
void VisitCStyleCastExpr(CStyleCastExpr *E);
@@ -86,7 +88,6 @@ namespace clang {
void VisitVAArgExpr(VAArgExpr *E);
void VisitAddrLabelExpr(AddrLabelExpr *E);
void VisitStmtExpr(StmtExpr *E);
- void VisitTypesCompatibleExpr(TypesCompatibleExpr *E);
void VisitChooseExpr(ChooseExpr *E);
void VisitGNUNullExpr(GNUNullExpr *E);
void VisitShuffleVectorExpr(ShuffleVectorExpr *E);
@@ -100,10 +101,7 @@ namespace clang {
void VisitObjCProtocolExpr(ObjCProtocolExpr *E);
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E);
void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
- void VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E);
void VisitObjCMessageExpr(ObjCMessageExpr *E);
- void VisitObjCSuperExpr(ObjCSuperExpr *E);
void VisitObjCIsaExpr(ObjCIsaExpr *E);
// Objective-C Statements
@@ -131,6 +129,7 @@ namespace clang {
void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
void VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E);
+ void VisitCXXUuidofExpr(CXXUuidofExpr *E);
void VisitCXXThisExpr(CXXThisExpr *E);
void VisitCXXThrowExpr(CXXThrowExpr *E);
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
@@ -141,7 +140,7 @@ namespace clang {
void VisitCXXDeleteExpr(CXXDeleteExpr *E);
void VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
- void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+ void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
void VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
void VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E);
@@ -151,6 +150,16 @@ namespace clang {
void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
void VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E);
+ void VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E);
+ void VisitCXXNoexceptExpr(CXXNoexceptExpr *E);
+ void VisitPackExpansionExpr(PackExpansionExpr *E);
+ void VisitSizeOfPackExpr(SizeOfPackExpr *E);
+ void VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E);
+ void VisitOpaqueValueExpr(OpaqueValueExpr *E);
+
+ // CUDA Expressions
+ void VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E);
};
}
@@ -168,6 +177,7 @@ void ASTStmtWriter::VisitStmt(Stmt *S) {
void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
VisitStmt(S);
Writer.AddSourceLocation(S->getSemiLoc(), Record);
+ Record.push_back(S->LeadingEmptyMacro);
Code = serialization::STMT_NULL;
}
@@ -208,10 +218,9 @@ void ASTStmtWriter::VisitDefaultStmt(DefaultStmt *S) {
void ASTStmtWriter::VisitLabelStmt(LabelStmt *S) {
VisitStmt(S);
- Writer.AddIdentifierRef(S->getID(), Record);
+ Writer.AddDeclRef(S->getDecl(), Record);
Writer.AddStmt(S->getSubStmt());
Writer.AddSourceLocation(S->getIdentLoc(), Record);
- Record.push_back(Writer.GetLabelID(S));
Code = serialization::STMT_LABEL;
}
@@ -232,6 +241,7 @@ void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
Writer.AddStmt(S->getCond());
Writer.AddStmt(S->getBody());
Writer.AddSourceLocation(S->getSwitchLoc(), Record);
+ Record.push_back(S->isAllEnumCasesCovered());
for (SwitchCase *SC = S->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase())
Record.push_back(Writer.RecordSwitchCaseID(SC));
@@ -272,7 +282,7 @@ void ASTStmtWriter::VisitForStmt(ForStmt *S) {
void ASTStmtWriter::VisitGotoStmt(GotoStmt *S) {
VisitStmt(S);
- Record.push_back(Writer.GetLabelID(S->getLabel()));
+ Writer.AddDeclRef(S->getLabel(), Record);
Writer.AddSourceLocation(S->getGotoLoc(), Record);
Writer.AddSourceLocation(S->getLabelLoc(), Record);
Code = serialization::STMT_GOTO;
@@ -354,6 +364,9 @@ void ASTStmtWriter::VisitExpr(Expr *E) {
Writer.AddTypeRef(E->getType(), Record);
Record.push_back(E->isTypeDependent());
Record.push_back(E->isValueDependent());
+ Record.push_back(E->containsUnexpandedParameterPack());
+ Record.push_back(E->getValueKind());
+ Record.push_back(E->getObjectKind());
}
void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
@@ -367,22 +380,22 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
VisitExpr(E);
Record.push_back(E->hasQualifier());
- unsigned NumTemplateArgs = E->getNumTemplateArgs();
- assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() &&
- "Template args list with no args ?");
- Record.push_back(NumTemplateArgs);
+ Record.push_back(E->hasExplicitTemplateArgs());
if (E->hasQualifier()) {
Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
Writer.AddSourceRange(E->getQualifierRange(), Record);
}
- if (NumTemplateArgs)
+ if (E->hasExplicitTemplateArgs()) {
+ unsigned NumTemplateArgs = E->getNumTemplateArgs();
+ Record.push_back(NumTemplateArgs);
AddExplicitTemplateArgumentList(E->getExplicitTemplateArgs());
+ }
Writer.AddDeclRef(E->getDecl(), Record);
- // FIXME: write DeclarationNameLoc.
Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddDeclarationNameLoc(E->DNLoc, E->getDecl()->getDeclName(), Record);
Code = serialization::EXPR_DECL_REF;
}
@@ -533,11 +546,10 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
Writer.AddSourceRange(E->getQualifierRange(), Record);
}
- unsigned NumTemplateArgs = E->getNumTemplateArgs();
- assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() &&
- "Template args list with no args ?");
- Record.push_back(NumTemplateArgs);
- if (NumTemplateArgs) {
+ Record.push_back(E->hasExplicitTemplateArgs());
+ if (E->hasExplicitTemplateArgs()) {
+ unsigned NumTemplateArgs = E->getNumTemplateArgs();
+ Record.push_back(NumTemplateArgs);
Writer.AddSourceLocation(E->getLAngleLoc(), Record);
Writer.AddSourceLocation(E->getRAngleLoc(), Record);
for (unsigned i=0; i != NumTemplateArgs; ++i)
@@ -549,11 +561,14 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
Record.push_back(FoundDecl.getAccess());
Writer.AddTypeRef(E->getType(), Record);
+ Record.push_back(E->getValueKind());
+ Record.push_back(E->getObjectKind());
Writer.AddStmt(E->getBase());
Writer.AddDeclRef(E->getMemberDecl(), Record);
- // FIXME: write DeclarationNameLoc.
Writer.AddSourceLocation(E->getMemberLoc(), Record);
Record.push_back(E->isArrow());
+ Writer.AddDeclarationNameLoc(E->MemberDNLoc,
+ E->getMemberDecl()->getDeclName(), Record);
Code = serialization::EXPR_MEMBER;
}
@@ -597,15 +612,26 @@ void ASTStmtWriter::VisitConditionalOperator(ConditionalOperator *E) {
Writer.AddStmt(E->getCond());
Writer.AddStmt(E->getLHS());
Writer.AddStmt(E->getRHS());
- Writer.AddStmt(E->getSAVE());
Writer.AddSourceLocation(E->getQuestionLoc(), Record);
Writer.AddSourceLocation(E->getColonLoc(), Record);
Code = serialization::EXPR_CONDITIONAL_OPERATOR;
}
+void
+ASTStmtWriter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getOpaqueValue());
+ Writer.AddStmt(E->getCommon());
+ Writer.AddStmt(E->getCond());
+ Writer.AddStmt(E->getTrueExpr());
+ Writer.AddStmt(E->getFalseExpr());
+ Writer.AddSourceLocation(E->getQuestionLoc(), Record);
+ Writer.AddSourceLocation(E->getColonLoc(), Record);
+ Code = serialization::EXPR_BINARY_CONDITIONAL_OPERATOR;
+}
+
void ASTStmtWriter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
VisitCastExpr(E);
- Record.push_back(E->getValueKind());
Code = serialization::EXPR_IMPLICIT_CAST;
}
@@ -706,7 +732,7 @@ void ASTStmtWriter::VisitAddrLabelExpr(AddrLabelExpr *E) {
VisitExpr(E);
Writer.AddSourceLocation(E->getAmpAmpLoc(), Record);
Writer.AddSourceLocation(E->getLabelLoc(), Record);
- Record.push_back(Writer.GetLabelID(E->getLabel()));
+ Writer.AddDeclRef(E->getLabel(), Record);
Code = serialization::EXPR_ADDR_LABEL;
}
@@ -718,15 +744,6 @@ void ASTStmtWriter::VisitStmtExpr(StmtExpr *E) {
Code = serialization::EXPR_STMT;
}
-void ASTStmtWriter::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) {
- VisitExpr(E);
- Writer.AddTypeSourceInfo(E->getArgTInfo1(), Record);
- Writer.AddTypeSourceInfo(E->getArgTInfo2(), Record);
- Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
- Writer.AddSourceLocation(E->getRParenLoc(), Record);
- Code = serialization::EXPR_TYPES_COMPATIBLE;
-}
-
void ASTStmtWriter::VisitChooseExpr(ChooseExpr *E) {
VisitExpr(E);
Writer.AddStmt(E->getCond());
@@ -756,7 +773,6 @@ void ASTStmtWriter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
void ASTStmtWriter::VisitBlockExpr(BlockExpr *E) {
VisitExpr(E);
Writer.AddDeclRef(E->getBlockDecl(), Record);
- Record.push_back(E->hasBlockDeclRefExprs());
Code = serialization::EXPR_BLOCK;
}
@@ -766,7 +782,6 @@ void ASTStmtWriter::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
Writer.AddSourceLocation(E->getLocation(), Record);
Record.push_back(E->isByRef());
Record.push_back(E->isConstQualAdded());
- Writer.AddStmt(E->getCopyConstructorExpr());
Code = serialization::EXPR_BLOCK_DECL_REF;
}
@@ -817,26 +832,29 @@ void ASTStmtWriter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
void ASTStmtWriter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
VisitExpr(E);
- Writer.AddDeclRef(E->getProperty(), Record);
+ Record.push_back(E->isImplicitProperty());
+ if (E->isImplicitProperty()) {
+ Writer.AddDeclRef(E->getImplicitPropertyGetter(), Record);
+ Writer.AddDeclRef(E->getImplicitPropertySetter(), Record);
+ } else {
+ Writer.AddDeclRef(E->getExplicitProperty(), Record);
+ }
Writer.AddSourceLocation(E->getLocation(), Record);
- Writer.AddStmt(E->getBase());
+ Writer.AddSourceLocation(E->getReceiverLocation(), Record);
+ if (E->isObjectReceiver()) {
+ Record.push_back(0);
+ Writer.AddStmt(E->getBase());
+ } else if (E->isSuperReceiver()) {
+ Record.push_back(1);
+ Writer.AddTypeRef(E->getSuperReceiverType(), Record);
+ } else {
+ Record.push_back(2);
+ Writer.AddDeclRef(E->getClassReceiver(), Record);
+ }
+
Code = serialization::EXPR_OBJC_PROPERTY_REF_EXPR;
}
-void ASTStmtWriter::VisitObjCImplicitSetterGetterRefExpr(
- ObjCImplicitSetterGetterRefExpr *E) {
- VisitExpr(E);
- Writer.AddDeclRef(E->getGetterMethod(), Record);
- Writer.AddDeclRef(E->getSetterMethod(), Record);
-
- // NOTE: InterfaceDecl and Base are mutually exclusive.
- Writer.AddDeclRef(E->getInterfaceDecl(), Record);
- Writer.AddStmt(E->getBase());
- Writer.AddSourceLocation(E->getLocation(), Record);
- Writer.AddSourceLocation(E->getClassLoc(), Record);
- Code = serialization::EXPR_OBJC_KVC_REF_EXPR;
-}
-
void ASTStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumArgs());
@@ -867,6 +885,7 @@ void ASTStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
Writer.AddSourceLocation(E->getLeftLoc(), Record);
Writer.AddSourceLocation(E->getRightLoc(), Record);
+ Writer.AddSourceLocation(E->getSelectorLoc(), Record);
for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
Arg != ArgEnd; ++Arg)
@@ -874,12 +893,6 @@ void ASTStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
Code = serialization::EXPR_OBJC_MESSAGE_EXPR;
}
-void ASTStmtWriter::VisitObjCSuperExpr(ObjCSuperExpr *E) {
- VisitExpr(E);
- Writer.AddSourceLocation(E->getLoc(), Record);
- Code = serialization::EXPR_OBJC_SUPER_EXPR;
-}
-
void ASTStmtWriter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
VisitStmt(S);
Writer.AddStmt(S->getElement());
@@ -972,19 +985,20 @@ void ASTStmtWriter::VisitCXXConstructExpr(CXXConstructExpr *E) {
Record.push_back(E->isElidable());
Record.push_back(E->requiresZeroInitialization());
Record.push_back(E->getConstructionKind()); // FIXME: stable encoding
+ Writer.AddSourceRange(E->getParenRange(), Record);
Code = serialization::EXPR_CXX_CONSTRUCT;
}
void ASTStmtWriter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
VisitCXXConstructExpr(E);
- Writer.AddSourceLocation(E->getTypeBeginLoc(), Record);
- Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
Code = serialization::EXPR_CXX_TEMPORARY_OBJECT;
}
void ASTStmtWriter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
VisitExplicitCastExpr(E);
- Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddSourceRange(SourceRange(E->getOperatorLoc(), E->getRParenLoc()),
+ Record);
}
void ASTStmtWriter::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
@@ -1039,6 +1053,18 @@ void ASTStmtWriter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
}
}
+void ASTStmtWriter::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ if (E->isTypeOperand()) {
+ Writer.AddTypeSourceInfo(E->getTypeOperandSourceInfo(), Record);
+ Code = serialization::EXPR_CXX_UUIDOF_TYPE;
+ } else {
+ Writer.AddStmt(E->getExprOperand());
+ Code = serialization::EXPR_CXX_UUIDOF_EXPR;
+ }
+}
+
void ASTStmtWriter::VisitCXXThisExpr(CXXThisExpr *E) {
VisitExpr(E);
Writer.AddSourceLocation(E->getLocation(), Record);
@@ -1076,7 +1102,7 @@ void ASTStmtWriter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
void ASTStmtWriter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
VisitExpr(E);
- Writer.AddSourceLocation(E->getTypeBeginLoc(), Record);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
Code = serialization::EXPR_CXX_SCALAR_VALUE_INIT;
}
@@ -1085,15 +1111,19 @@ void ASTStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
Record.push_back(E->isGlobalNew());
Record.push_back(E->hasInitializer());
+ Record.push_back(E->doesUsualArrayDeleteWantSize());
Record.push_back(E->isArray());
Record.push_back(E->getNumPlacementArgs());
Record.push_back(E->getNumConstructorArgs());
Writer.AddDeclRef(E->getOperatorNew(), Record);
Writer.AddDeclRef(E->getOperatorDelete(), Record);
Writer.AddDeclRef(E->getConstructor(), Record);
+ Writer.AddTypeSourceInfo(E->getAllocatedTypeSourceInfo(), Record);
Writer.AddSourceRange(E->getTypeIdParens(), Record);
Writer.AddSourceLocation(E->getStartLoc(), Record);
Writer.AddSourceLocation(E->getEndLoc(), Record);
+ Writer.AddSourceLocation(E->getConstructorLParen(), Record);
+ Writer.AddSourceLocation(E->getConstructorRParen(), Record);
for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), e = E->raw_arg_end();
I != e; ++I)
Writer.AddStmt(*I);
@@ -1105,6 +1135,8 @@ void ASTStmtWriter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
VisitExpr(E);
Record.push_back(E->isGlobalDelete());
Record.push_back(E->isArrayForm());
+ Record.push_back(E->isArrayFormAsWritten());
+ Record.push_back(E->doesUsualArrayDeleteWantSize());
Writer.AddDeclRef(E->getOperatorDelete(), Record);
Writer.AddStmt(E->getArgument());
Writer.AddSourceLocation(E->getSourceRange().getBegin(), Record);
@@ -1134,30 +1166,28 @@ void ASTStmtWriter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
Code = serialization::EXPR_CXX_PSEUDO_DESTRUCTOR;
}
-void ASTStmtWriter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+void ASTStmtWriter::VisitExprWithCleanups(ExprWithCleanups *E) {
VisitExpr(E);
Record.push_back(E->getNumTemporaries());
for (unsigned i = 0, e = E->getNumTemporaries(); i != e; ++i)
Writer.AddCXXTemporary(E->getTemporary(i), Record);
Writer.AddStmt(E->getSubExpr());
- Code = serialization::EXPR_CXX_EXPR_WITH_TEMPORARIES;
+ Code = serialization::EXPR_EXPR_WITH_CLEANUPS;
}
void
ASTStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
VisitExpr(E);
- // Don't emit anything here, NumTemplateArgs must be emitted first.
+ // Don't emit anything here, hasExplicitTemplateArgs() must be
+ // emitted first.
+ Record.push_back(E->hasExplicitTemplateArgs());
if (E->hasExplicitTemplateArgs()) {
const ExplicitTemplateArgumentList &Args = E->getExplicitTemplateArgs();
- assert(Args.NumTemplateArgs &&
- "Num of template args was zero! AST reading will mess up!");
Record.push_back(Args.NumTemplateArgs);
AddExplicitTemplateArgumentList(Args);
- } else {
- Record.push_back(0);
}
if (!E->isImplicitAccess())
@@ -1170,9 +1200,7 @@ ASTStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
Writer.AddSourceRange(E->getQualifierRange(), Record);
Writer.AddDeclRef(E->getFirstQualifierFoundInScope(), Record);
- // FIXME: write whole DeclarationNameInfo.
- Writer.AddDeclarationName(E->getMember(), Record);
- Writer.AddSourceLocation(E->getMemberLoc(), Record);
+ Writer.AddDeclarationNameInfo(E->MemberNameInfo, Record);
Code = serialization::EXPR_CXX_DEPENDENT_SCOPE_MEMBER;
}
@@ -1180,21 +1208,16 @@ void
ASTStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
VisitExpr(E);
- // Don't emit anything here, NumTemplateArgs must be emitted first.
-
+ // Don't emit anything here, hasExplicitTemplateArgs() must be
+ // emitted first.
+ Record.push_back(E->hasExplicitTemplateArgs());
if (E->hasExplicitTemplateArgs()) {
const ExplicitTemplateArgumentList &Args = E->getExplicitTemplateArgs();
- assert(Args.NumTemplateArgs &&
- "Num of template args was zero! AST reading will mess up!");
Record.push_back(Args.NumTemplateArgs);
AddExplicitTemplateArgumentList(Args);
- } else {
- Record.push_back(0);
}
- // FIXME: write whole DeclarationNameInfo.
- Writer.AddDeclarationName(E->getDeclName(), Record);
- Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddDeclarationNameInfo(E->NameInfo, Record);
Writer.AddSourceRange(E->getQualifierRange(), Record);
Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
Code = serialization::EXPR_CXX_DEPENDENT_SCOPE_DECL_REF;
@@ -1207,8 +1230,7 @@ ASTStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
for (CXXUnresolvedConstructExpr::arg_iterator
ArgI = E->arg_begin(), ArgE = E->arg_end(); ArgI != ArgE; ++ArgI)
Writer.AddStmt(*ArgI);
- Writer.AddSourceLocation(E->getTypeBeginLoc(), Record);
- Writer.AddTypeRef(E->getTypeAsWritten(), Record);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
Writer.AddSourceLocation(E->getLParenLoc(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
Code = serialization::EXPR_CXX_UNRESOLVED_CONSTRUCT;
@@ -1217,16 +1239,12 @@ ASTStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
void ASTStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
- // Don't emit anything here, NumTemplateArgs must be emitted first.
-
+ // Don't emit anything here, hasExplicitTemplateArgs() must be emitted first.
+ Record.push_back(E->hasExplicitTemplateArgs());
if (E->hasExplicitTemplateArgs()) {
const ExplicitTemplateArgumentList &Args = E->getExplicitTemplateArgs();
- assert(Args.NumTemplateArgs &&
- "Num of template args was zero! AST reading will mess up!");
Record.push_back(Args.NumTemplateArgs);
AddExplicitTemplateArgumentList(Args);
- } else {
- Record.push_back(0);
}
Record.push_back(E->getNumDecls());
@@ -1236,11 +1254,9 @@ void ASTStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
Record.push_back(OvI.getAccess());
}
- // FIXME: write whole DeclarationNameInfo.
- Writer.AddDeclarationName(E->getName(), Record);
+ Writer.AddDeclarationNameInfo(E->NameInfo, Record);
Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
Writer.AddSourceRange(E->getQualifierRange(), Record);
- Writer.AddSourceLocation(E->getNameLoc(), Record);
}
void ASTStmtWriter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
@@ -1264,11 +1280,74 @@ void ASTStmtWriter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
void ASTStmtWriter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
VisitExpr(E);
Record.push_back(E->getTrait());
+ Record.push_back(E->getValue());
Writer.AddSourceRange(E->getSourceRange(), Record);
- Writer.AddTypeRef(E->getQueriedType(), Record);
+ Writer.AddTypeSourceInfo(E->getQueriedTypeSourceInfo(), Record);
Code = serialization::EXPR_CXX_UNARY_TYPE_TRAIT;
}
+void ASTStmtWriter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getTrait());
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddTypeSourceInfo(E->getLhsTypeSourceInfo(), Record);
+ Writer.AddTypeSourceInfo(E->getRhsTypeSourceInfo(), Record);
+ Code = serialization::EXPR_BINARY_TYPE_TRAIT;
+}
+
+void ASTStmtWriter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddStmt(E->getOperand());
+ Code = serialization::EXPR_CXX_NOEXCEPT;
+}
+
+void ASTStmtWriter::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getEllipsisLoc(), Record);
+ Record.push_back(E->NumExpansions);
+ Writer.AddStmt(E->getPattern());
+ Code = serialization::EXPR_PACK_EXPANSION;
+}
+
+void ASTStmtWriter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->OperatorLoc, Record);
+ Writer.AddSourceLocation(E->PackLoc, Record);
+ Writer.AddSourceLocation(E->RParenLoc, Record);
+ Record.push_back(E->Length);
+ Writer.AddDeclRef(E->Pack, Record);
+ Code = serialization::EXPR_SIZEOF_PACK;
+}
+
+void ASTStmtWriter::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->Param, Record);
+ Writer.AddTemplateArgument(E->getArgumentPack(), Record);
+ Writer.AddSourceLocation(E->NameLoc, Record);
+ Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK;
+}
+
+void ASTStmtWriter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ VisitExpr(E);
+ Record.push_back(Writer.getOpaqueValueID(E));
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_OPAQUE_VALUE;
+}
+
+//===----------------------------------------------------------------------===//
+// CUDA Expressions and Statements.
+//===----------------------------------------------------------------------===//
+
+void ASTStmtWriter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
+ VisitCallExpr(E);
+ Writer.AddStmt(E->getConfig());
+ Code = serialization::EXPR_CUDA_KERNEL_CALL;
+}
+
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
@@ -1287,16 +1366,14 @@ unsigned ASTWriter::getSwitchCaseID(SwitchCase *S) {
return SwitchCaseIDs[S];
}
-/// \brief Retrieve the ID for the given label statement, which may
-/// or may not have been emitted yet.
-unsigned ASTWriter::GetLabelID(LabelStmt *S) {
- std::map<LabelStmt *, unsigned>::iterator Pos = LabelIDs.find(S);
- if (Pos != LabelIDs.end())
- return Pos->second;
+void ASTWriter::ClearSwitchCaseIDs() {
+ SwitchCaseIDs.clear();
+}
- unsigned NextID = LabelIDs.size();
- LabelIDs[S] = NextID;
- return NextID;
+unsigned ASTWriter::getOpaqueValueID(OpaqueValueExpr *e) {
+ unsigned &entry = OpaqueValues[e];
+ if (!entry) entry = OpaqueValues.size();
+ return entry;
}
/// \brief Write the given substatement or subexpression to the
diff --git a/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
index 5329b6c..b8833ce 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
#include "llvm/Bitcode/BitstreamWriter.h"
#include "llvm/Support/raw_ostream.h"
#include <string>
@@ -26,15 +27,15 @@
using namespace clang;
PCHGenerator::PCHGenerator(const Preprocessor &PP,
+ const std::string &OutputFile,
bool Chaining,
const char *isysroot,
llvm::raw_ostream *OS)
- : PP(PP), isysroot(isysroot), Out(OS), SemaPtr(0),
- StatCalls(0), Stream(Buffer), Writer(Stream) {
-
+ : PP(PP), OutputFile(OutputFile), isysroot(isysroot), Out(OS), SemaPtr(0),
+ StatCalls(0), Stream(Buffer), Writer(Stream), Chaining(Chaining) {
// Install a stat() listener to keep track of all of the stat()
// calls.
- StatCalls = new MemorizeStatCalls;
+ StatCalls = new MemorizeStatCalls();
// If we have a chain, we want new stat calls only, so install the memorizer
// *after* the already installed ASTReader's stat cache.
PP.getFileManager().addStatCache(StatCalls,
@@ -45,9 +46,12 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
if (PP.getDiagnostics().hasErrorOccurred())
return;
+ // Set up the serialization listener.
+ Writer.SetSerializationListener(GetASTSerializationListener());
+
// Emit the PCH file
assert(SemaPtr && "No Sema?");
- Writer.WriteAST(*SemaPtr, StatCalls, isysroot);
+ Writer.WriteAST(*SemaPtr, StatCalls, OutputFile, isysroot);
// Write the generated bitstream to "Out".
Out->write((char *)&Buffer.front(), Buffer.size());
@@ -59,6 +63,16 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
Buffer.clear();
}
+ASTMutationListener *PCHGenerator::GetASTMutationListener() {
+ if (Chaining)
+ return &Writer;
+ return 0;
+}
+
+ASTSerializationListener *PCHGenerator::GetASTSerializationListener() {
+ return 0;
+}
+
ASTDeserializationListener *PCHGenerator::GetASTDeserializationListener() {
return &Writer;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/CMakeLists.txt b/contrib/llvm/tools/clang/lib/StaticAnalyzer/CMakeLists.txt
new file mode 100644
index 0000000..3d15092
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_subdirectory(Core)
+add_subdirectory(Checkers)
+add_subdirectory(Frontend)
diff --git a/contrib/llvm/tools/clang/lib/Checker/AdjustedReturnValueChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp
index 0ed04fb..8832b05 100644
--- a/contrib/llvm/tools/clang/lib/Checker/AdjustedReturnValueChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp
@@ -13,12 +13,13 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class AdjustedReturnValueChecker :
@@ -34,7 +35,7 @@ public:
};
}
-void clang::RegisterAdjustedReturnValueChecker(GRExprEngine &Eng) {
+void ento::RegisterAdjustedReturnValueChecker(ExprEngine &Eng) {
Eng.registerCheck(new AdjustedReturnValueChecker());
}
@@ -54,7 +55,7 @@ void AdjustedReturnValueChecker::PostVisitCallExpr(CheckerContext &C,
// Casting to void? Discard the value.
if (expectedResultTy->isVoidType()) {
- C.GenerateNode(state->BindExpr(CE, UnknownVal()));
+ C.generateNode(state->BindExpr(CE, UnknownVal()));
return;
}
@@ -88,8 +89,8 @@ void AdjustedReturnValueChecker::PostVisitCallExpr(CheckerContext &C,
if (expectedResultTy != actualResultTy) {
// FIXME: Do more checking and actual emit an error. At least performing
// the cast avoids some assertion failures elsewhere.
- SValuator &SVator = C.getSValuator();
- V = SVator.EvalCast(V, expectedResultTy, actualResultTy);
- C.GenerateNode(state->BindExpr(CE, V));
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ V = svalBuilder.evalCast(V, expectedResultTy, actualResultTy);
+ C.generateNode(state->BindExpr(CE, V));
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
new file mode 100644
index 0000000..7b68887
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -0,0 +1,123 @@
+//==--AnalyzerStatsChecker.cpp - Analyzer visitation statistics --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file reports various statistics about analyzer visitation.
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+
+// FIXME: Restructure checker registration.
+#include "ExperimentalChecks.h"
+
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallPtrSet.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class AnalyzerStatsChecker : public CheckerVisitor<AnalyzerStatsChecker> {
+public:
+ static void *getTag();
+ void VisitEndAnalysis(ExplodedGraph &G, BugReporter &B, ExprEngine &Eng);
+
+private:
+ llvm::SmallPtrSet<const CFGBlock*, 256> reachable;
+};
+}
+
+void *AnalyzerStatsChecker::getTag() {
+ static int x = 0;
+ return &x;
+}
+
+void ento::RegisterAnalyzerStatsChecker(ExprEngine &Eng) {
+ Eng.registerCheck(new AnalyzerStatsChecker());
+}
+
+void AnalyzerStatsChecker::VisitEndAnalysis(ExplodedGraph &G,
+ BugReporter &B,
+ ExprEngine &Eng) {
+ const CFG *C = 0;
+ const Decl *D = 0;
+ const LocationContext *LC = 0;
+ const SourceManager &SM = B.getSourceManager();
+
+ // Iterate over explodedgraph
+ for (ExplodedGraph::node_iterator I = G.nodes_begin();
+ I != G.nodes_end(); ++I) {
+ const ProgramPoint &P = I->getLocation();
+ // Save the LocationContext if we don't have it already
+ if (!LC)
+ LC = P.getLocationContext();
+
+ if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
+ const CFGBlock *CB = BE->getBlock();
+ reachable.insert(CB);
+ }
+ }
+
+ // Get the CFG and the Decl of this block
+ C = LC->getCFG();
+ D = LC->getAnalysisContext()->getDecl();
+
+ unsigned total = 0, unreachable = 0;
+
+ // Find CFGBlocks that were not covered by any node
+ for (CFG::const_iterator I = C->begin(); I != C->end(); ++I) {
+ const CFGBlock *CB = *I;
+ ++total;
+ // Check if the block is unreachable
+ if (!reachable.count(CB)) {
+ ++unreachable;
+ }
+ }
+
+ // We never 'reach' the entry block, so correct the unreachable count
+ unreachable--;
+
+ // Generate the warning string
+ llvm::SmallString<128> buf;
+ llvm::raw_svector_ostream output(buf);
+ PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
+ if (Loc.isValid()) {
+ output << Loc.getFilename() << " : ";
+
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ output << ND;
+ }
+ else if (isa<BlockDecl>(D)) {
+ output << "block(line:" << Loc.getLine() << ":col:" << Loc.getColumn();
+ }
+ }
+
+ output << " -> Total CFGBlocks: " << total << " | Unreachable CFGBlocks: "
+ << unreachable << " | Aborted Block: "
+ << (Eng.wasBlockAborted() ? "yes" : "no")
+ << " | Empty WorkList: "
+ << (Eng.hasEmptyWorkList() ? "yes" : "no");
+
+ B.EmitBasicReport("Analyzer Statistics", "Internal Statistics", output.str(),
+ D->getLocation());
+
+ // Emit warning for each block we bailed out on
+ typedef CoreEngine::BlocksAborted::const_iterator AbortedIterator;
+ const CoreEngine &CE = Eng.getCoreEngine();
+ for (AbortedIterator I = CE.blocks_aborted_begin(),
+ E = CE.blocks_aborted_end(); I != E; ++I) {
+ const BlockEdge &BE = I->first;
+ const CFGBlock *Exit = BE.getDst();
+ const CFGElement &CE = Exit->front();
+ if (const CFGStmt *CS = dyn_cast<CFGStmt>(&CE))
+ B.EmitBasicReport("Bailout Point", "Internal Statistics", "The analyzer "
+ "stopped analyzing at this point", CS->getStmt()->getLocStart());
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ArrayBoundChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 98345bd..9194791 100644
--- a/contrib/llvm/tools/clang/lib/Checker/ArrayBoundChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -12,46 +12,47 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
+using namespace ento;
namespace {
class ArrayBoundChecker :
public CheckerVisitor<ArrayBoundChecker> {
BuiltinBug *BT;
public:
- ArrayBoundChecker() : BT(0) {}
- static void *getTag();
- void VisitLocation(CheckerContext &C, const Stmt *S, SVal l);
+ ArrayBoundChecker() : BT(0) {}
+ static void *getTag() { static int x = 0; return &x; }
+ void visitLocation(CheckerContext &C, const Stmt *S, SVal l, bool isLoad);
};
}
-void clang::RegisterArrayBoundChecker(GRExprEngine &Eng) {
+void ento::RegisterArrayBoundChecker(ExprEngine &Eng) {
Eng.registerCheck(new ArrayBoundChecker());
}
-void *ArrayBoundChecker::getTag() {
- static int x = 0; return &x;
-}
-
-void ArrayBoundChecker::VisitLocation(CheckerContext &C, const Stmt *S, SVal l){
+void ArrayBoundChecker::visitLocation(CheckerContext &C, const Stmt *S, SVal l,
+ bool isLoad) {
// Check for out of bound array element access.
const MemRegion *R = l.getAsRegion();
if (!R)
return;
- R = R->StripCasts();
-
const ElementRegion *ER = dyn_cast<ElementRegion>(R);
if (!ER)
return;
// Get the index of the accessed element.
- DefinedOrUnknownSVal &Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+ DefinedOrUnknownSVal Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+
+ // Zero index is always in bound, this also passes ElementRegions created for
+ // pointer casts.
+ if (Idx.isZeroConstant())
+ return;
const GRState *state = C.getState();
@@ -60,10 +61,10 @@ void ArrayBoundChecker::VisitLocation(CheckerContext &C, const Stmt *S, SVal l){
= C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
ER->getValueType());
- const GRState *StInBound = state->AssumeInBound(Idx, NumElements, true);
- const GRState *StOutBound = state->AssumeInBound(Idx, NumElements, false);
+ const GRState *StInBound = state->assumeInBound(Idx, NumElements, true);
+ const GRState *StOutBound = state->assumeInBound(Idx, NumElements, false);
if (StOutBound && !StInBound) {
- ExplodedNode *N = C.GenerateSink(StOutBound);
+ ExplodedNode *N = C.generateSink(StOutBound);
if (!N)
return;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
new file mode 100644
index 0000000..f803d27
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -0,0 +1,277 @@
+//== ArrayBoundCheckerV2.cpp ------------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ArrayBoundCheckerV2, which is a path-sensitive check
+// which looks for an out-of-bound array element access.
+//
+//===----------------------------------------------------------------------===//
+
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/AST/CharUnits.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ArrayBoundCheckerV2 :
+ public CheckerVisitor<ArrayBoundCheckerV2> {
+ BuiltinBug *BT;
+
+ enum OOB_Kind { OOB_Precedes, OOB_Excedes };
+
+ void reportOOB(CheckerContext &C, const GRState *errorState,
+ OOB_Kind kind);
+
+public:
+ ArrayBoundCheckerV2() : BT(0) {}
+ static void *getTag() { static int x = 0; return &x; }
+ void visitLocation(CheckerContext &C, const Stmt *S, SVal l, bool isLoad);
+};
+
+// FIXME: Eventually replace RegionRawOffset with this class.
+class RegionRawOffsetV2 {
+private:
+ const SubRegion *baseRegion;
+ SVal byteOffset;
+
+ RegionRawOffsetV2()
+ : baseRegion(0), byteOffset(UnknownVal()) {}
+
+public:
+ RegionRawOffsetV2(const SubRegion* base, SVal offset)
+ : baseRegion(base), byteOffset(offset) {}
+
+ NonLoc getByteOffset() const { return cast<NonLoc>(byteOffset); }
+ const SubRegion *getRegion() const { return baseRegion; }
+
+ static RegionRawOffsetV2 computeOffset(const GRState *state,
+ SValBuilder &svalBuilder,
+ SVal location);
+
+ void dump() const;
+ void dumpToStream(llvm::raw_ostream& os) const;
+};
+}
+
+void ento::RegisterArrayBoundCheckerV2(ExprEngine &Eng) {
+ Eng.registerCheck(new ArrayBoundCheckerV2());
+}
+
+void ArrayBoundCheckerV2::visitLocation(CheckerContext &checkerContext,
+ const Stmt *S,
+ SVal location, bool isLoad) {
+
+ // NOTE: Instead of using GRState::assumeInBound(), we are prototyping
+ // some new logic here that reasons directly about memory region extents.
+ // Once that logic is more mature, we can bring it back to assumeInBound()
+ // for all clients to use.
+ //
+ // The algorithm we are using here for bounds checking is to see if the
+ // memory access is within the extent of the base region. Since we
+ // have some flexibility in defining the base region, we can achieve
+ // various levels of conservatism in our buffer overflow checking.
+ const GRState *state = checkerContext.getState();
+ const GRState *originalState = state;
+
+ SValBuilder &svalBuilder = checkerContext.getSValBuilder();
+ const RegionRawOffsetV2 &rawOffset =
+ RegionRawOffsetV2::computeOffset(state, svalBuilder, location);
+
+ if (!rawOffset.getRegion())
+ return;
+
+ // CHECK LOWER BOUND: Is byteOffset < 0? If so, we are doing a load/store
+ // before the first valid offset in the memory region.
+
+ SVal lowerBound
+ = svalBuilder.evalBinOpNN(state, BO_LT, rawOffset.getByteOffset(),
+ svalBuilder.makeZeroArrayIndex(),
+ svalBuilder.getConditionType());
+
+ NonLoc *lowerBoundToCheck = dyn_cast<NonLoc>(&lowerBound);
+ if (!lowerBoundToCheck)
+ return;
+
+ const GRState *state_precedesLowerBound, *state_withinLowerBound;
+ llvm::tie(state_precedesLowerBound, state_withinLowerBound) =
+ state->assume(*lowerBoundToCheck);
+
+ // Are we constrained enough to definitely precede the lower bound?
+ if (state_precedesLowerBound && !state_withinLowerBound) {
+ reportOOB(checkerContext, state_precedesLowerBound, OOB_Precedes);
+ return;
+ }
+
+ // Otherwise, assume the constraint of the lower bound.
+ assert(state_withinLowerBound);
+ state = state_withinLowerBound;
+
+ do {
+ // CHECK UPPER BOUND: Is byteOffset >= extent(baseRegion)? If so,
+ // we are doing a load/store after the last valid offset.
+ DefinedOrUnknownSVal extentVal =
+ rawOffset.getRegion()->getExtent(svalBuilder);
+ if (!isa<NonLoc>(extentVal))
+ break;
+
+ SVal upperbound
+ = svalBuilder.evalBinOpNN(state, BO_GE, rawOffset.getByteOffset(),
+ cast<NonLoc>(extentVal),
+ svalBuilder.getConditionType());
+
+ NonLoc *upperboundToCheck = dyn_cast<NonLoc>(&upperbound);
+ if (!upperboundToCheck)
+ break;
+
+ const GRState *state_exceedsUpperBound, *state_withinUpperBound;
+ llvm::tie(state_exceedsUpperBound, state_withinUpperBound) =
+ state->assume(*upperboundToCheck);
+
+ // Are we constrained enough to definitely exceed the upper bound?
+ if (state_exceedsUpperBound && !state_withinUpperBound) {
+ reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
+ return;
+ }
+
+ assert(state_withinUpperBound);
+ state = state_withinUpperBound;
+ }
+ while (false);
+
+ if (state != originalState)
+ checkerContext.generateNode(state);
+}
+
+void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
+ const GRState *errorState,
+ OOB_Kind kind) {
+
+ ExplodedNode *errorNode = checkerContext.generateSink(errorState);
+ if (!errorNode)
+ return;
+
+ if (!BT)
+ BT = new BuiltinBug("Out-of-bound access");
+
+ // FIXME: This diagnostics are preliminary. We should get far better
+ // diagnostics for explaining buffer overruns.
+
+ llvm::SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Out of bound memory access "
+ << (kind == OOB_Precedes ? "(accessed memory precedes memory block)"
+ : "(access exceeds upper limit of memory block)");
+
+ checkerContext.EmitReport(new RangedBugReport(*BT, os.str(), errorNode));
+}
+
+void RegionRawOffsetV2::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+void RegionRawOffsetV2::dumpToStream(llvm::raw_ostream& os) const {
+ os << "raw_offset_v2{" << getRegion() << ',' << getByteOffset() << '}';
+}
+
+// FIXME: Merge with the implementation of the same method in Store.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *D = RT->getDecl();
+ if (!D->getDefinition())
+ return false;
+ }
+
+ return true;
+}
+
+
+// Lazily computes a value to be used by 'computeOffset'. If 'val'
+// is unknown or undefined, we lazily substitute '0'. Otherwise,
+// return 'val'.
+static inline SVal getValue(SVal val, SValBuilder &svalBuilder) {
+ return isa<UndefinedVal>(val) ? svalBuilder.makeArrayIndex(0) : val;
+}
+
+// Scale a base value by a scaling factor, and return the scaled
+// value as an SVal. Used by 'computeOffset'.
+static inline SVal scaleValue(const GRState *state,
+ NonLoc baseVal, CharUnits scaling,
+ SValBuilder &sb) {
+ return sb.evalBinOpNN(state, BO_Mul, baseVal,
+ sb.makeArrayIndex(scaling.getQuantity()),
+ sb.getArrayIndexType());
+}
+
+// Add an SVal to another, treating unknown and undefined values as
+// summing to UnknownVal. Used by 'computeOffset'.
+static SVal addValue(const GRState *state, SVal x, SVal y,
+ SValBuilder &svalBuilder) {
+ // We treat UnknownVals and UndefinedVals the same here because we
+ // only care about computing offsets.
+ if (x.isUnknownOrUndef() || y.isUnknownOrUndef())
+ return UnknownVal();
+
+ return svalBuilder.evalBinOpNN(state, BO_Add,
+ cast<NonLoc>(x), cast<NonLoc>(y),
+ svalBuilder.getArrayIndexType());
+}
+
+/// Compute a raw byte offset from a base region. Used for array bounds
+/// checking.
+RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(const GRState *state,
+ SValBuilder &svalBuilder,
+ SVal location)
+{
+ const MemRegion *region = location.getAsRegion();
+ SVal offset = UndefinedVal();
+
+ while (region) {
+ switch (region->getKind()) {
+ default: {
+ if (const SubRegion *subReg = dyn_cast<SubRegion>(region))
+ if (!offset.isUnknownOrUndef())
+ return RegionRawOffsetV2(subReg, offset);
+ return RegionRawOffsetV2();
+ }
+ case MemRegion::ElementRegionKind: {
+ const ElementRegion *elemReg = cast<ElementRegion>(region);
+ SVal index = elemReg->getIndex();
+ if (!isa<NonLoc>(index))
+ return RegionRawOffsetV2();
+ QualType elemType = elemReg->getElementType();
+ // If the element is an incomplete type, go no further.
+ ASTContext &astContext = svalBuilder.getContext();
+ if (!IsCompleteType(astContext, elemType))
+ return RegionRawOffsetV2();
+
+ // Update the offset.
+ offset = addValue(state,
+ getValue(offset, svalBuilder),
+ scaleValue(state,
+ cast<NonLoc>(index),
+ astContext.getTypeSizeInChars(elemType),
+ svalBuilder),
+ svalBuilder);
+
+ if (offset.isUnknownOrUndef())
+ return RegionRawOffsetV2();
+
+ region = elemReg->getSuperRegion();
+ continue;
+ }
+ }
+ }
+ return RegionRawOffsetV2();
+}
+
+
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
index d0bccb2..e4865b1 100644
--- a/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
@@ -7,16 +7,17 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines AttrNonNullChecker, a builtin check in GRExprEngine that
+// This defines AttrNonNullChecker, a builtin check in ExprEngine that
// performs checks for arguments declared to have nonnull attribute.
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class AttrNonNullChecker
@@ -32,7 +33,7 @@ public:
};
} // end anonymous namespace
-void clang::RegisterAttrNonNullChecker(GRExprEngine &Eng) {
+void ento::RegisterAttrNonNullChecker(ExprEngine &Eng) {
Eng.registerCheck(new AttrNonNullChecker());
}
@@ -67,14 +68,36 @@ void AttrNonNullChecker::PreVisitCallExpr(CheckerContext &C,
if (!DV)
continue;
+ if (!isa<Loc>(*DV)) {
+ // If the argument is a union type, we want to handle a potential
+ // transparent_unoin GCC extension.
+ QualType T = (*I)->getType();
+ const RecordType *UT = T->getAsUnionType();
+ if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ continue;
+ if (nonloc::CompoundVal *CSV = dyn_cast<nonloc::CompoundVal>(DV)) {
+ nonloc::CompoundVal::iterator CSV_I = CSV->begin();
+ assert(CSV_I != CSV->end());
+ V = *CSV_I;
+ DV = dyn_cast<DefinedSVal>(&V);
+ assert(++CSV_I == CSV->end());
+ if (!DV)
+ continue;
+ }
+ else {
+ // FIXME: Handle LazyCompoundVals?
+ continue;
+ }
+ }
+
ConstraintManager &CM = C.getConstraintManager();
const GRState *stateNotNull, *stateNull;
- llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, *DV);
+ llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
if (stateNull && !stateNotNull) {
// Generate an error node. Check for a null node in case
// we cache out.
- if (ExplodedNode *errorNode = C.GenerateSink(stateNull)) {
+ if (ExplodedNode *errorNode = C.generateSink(stateNull)) {
// Lazily allocate the BugType object if it hasn't already been
// created. Ownership is transferred to the BugReporter object once
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
new file mode 100644
index 0000000..1b6c528
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -0,0 +1,521 @@
+//== BasicObjCFoundationChecks.cpp - Simple Apple-Foundation checks -*- C++ -*--
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicObjCFoundationChecks, a class that encapsulates
+// a set of simple checks to run on Objective-C code using Apple's Foundation
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BasicObjCFoundationChecks.h"
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class APIMisuse : public BugType {
+public:
+ APIMisuse(const char* name) : BugType(name, "API Misuse (Apple)") {}
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static const ObjCInterfaceType* GetReceiverType(const ObjCMessage &msg) {
+ if (const ObjCInterfaceDecl *ID = msg.getReceiverInterface())
+ return ID->getTypeForDecl()->getAs<ObjCInterfaceType>();
+ return NULL;
+}
+
+static const char* GetReceiverNameType(const ObjCMessage &msg) {
+ if (const ObjCInterfaceType *ReceiverType = GetReceiverType(msg))
+ return ReceiverType->getDecl()->getIdentifier()->getNameStart();
+ return NULL;
+}
+
+static bool isNSString(llvm::StringRef ClassName) {
+ return ClassName == "NSString" || ClassName == "NSMutableString";
+}
+
+static inline bool isNil(SVal X) {
+ return isa<loc::ConcreteInt>(X);
+}
+
+//===----------------------------------------------------------------------===//
+// NilArgChecker - Check for prohibited nil arguments to ObjC method calls.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class NilArgChecker : public CheckerVisitor<NilArgChecker> {
+ APIMisuse *BT;
+ void WarnNilArg(CheckerContext &C, const ObjCMessage &msg, unsigned Arg);
+ public:
+ NilArgChecker() : BT(0) {}
+ static void *getTag() { static int x = 0; return &x; }
+ void preVisitObjCMessage(CheckerContext &C, ObjCMessage msg);
+ };
+}
+
+void NilArgChecker::WarnNilArg(CheckerContext &C,
+ const ObjCMessage &msg,
+ unsigned int Arg)
+{
+ if (!BT)
+ BT = new APIMisuse("nil argument");
+
+ if (ExplodedNode *N = C.generateSink()) {
+ llvm::SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << "Argument to '" << GetReceiverNameType(msg) << "' method '"
+ << msg.getSelector().getAsString() << "' cannot be nil";
+
+ RangedBugReport *R = new RangedBugReport(*BT, os.str(), N);
+ R->addRange(msg.getArgSourceRange(Arg));
+ C.EmitReport(R);
+ }
+}
+
+void NilArgChecker::preVisitObjCMessage(CheckerContext &C,
+ ObjCMessage msg)
+{
+ const ObjCInterfaceType *ReceiverType = GetReceiverType(msg);
+ if (!ReceiverType)
+ return;
+
+ if (isNSString(ReceiverType->getDecl()->getIdentifier()->getName())) {
+ Selector S = msg.getSelector();
+
+ if (S.isUnarySelector())
+ return;
+
+ // FIXME: This is going to be really slow doing these checks with
+ // lexical comparisons.
+
+ std::string NameStr = S.getAsString();
+ llvm::StringRef Name(NameStr);
+ assert(!Name.empty());
+
+ // FIXME: Checking for initWithFormat: will not work in most cases
+ // yet because [NSString alloc] returns id, not NSString*. We will
+ // need support for tracking expected-type information in the analyzer
+ // to find these errors.
+ if (Name == "caseInsensitiveCompare:" ||
+ Name == "compare:" ||
+ Name == "compare:options:" ||
+ Name == "compare:options:range:" ||
+ Name == "compare:options:range:locale:" ||
+ Name == "componentsSeparatedByCharactersInSet:" ||
+ Name == "initWithFormat:") {
+ if (isNil(msg.getArgSVal(0, C.getState())))
+ WarnNilArg(C, msg, 0);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFNumberCreateChecker : public CheckerVisitor<CFNumberCreateChecker> {
+ APIMisuse* BT;
+ IdentifierInfo* II;
+public:
+ CFNumberCreateChecker() : BT(0), II(0) {}
+ ~CFNumberCreateChecker() {}
+ static void *getTag() { static int x = 0; return &x; }
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+private:
+ void EmitError(const TypedRegion* R, const Expr* Ex,
+ uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
+};
+} // end anonymous namespace
+
+enum CFNumberType {
+ kCFNumberSInt8Type = 1,
+ kCFNumberSInt16Type = 2,
+ kCFNumberSInt32Type = 3,
+ kCFNumberSInt64Type = 4,
+ kCFNumberFloat32Type = 5,
+ kCFNumberFloat64Type = 6,
+ kCFNumberCharType = 7,
+ kCFNumberShortType = 8,
+ kCFNumberIntType = 9,
+ kCFNumberLongType = 10,
+ kCFNumberLongLongType = 11,
+ kCFNumberFloatType = 12,
+ kCFNumberDoubleType = 13,
+ kCFNumberCFIndexType = 14,
+ kCFNumberNSIntegerType = 15,
+ kCFNumberCGFloatType = 16
+};
+
+namespace {
+ template<typename T>
+ class Optional {
+ bool IsKnown;
+ T Val;
+ public:
+ Optional() : IsKnown(false), Val(0) {}
+ Optional(const T& val) : IsKnown(true), Val(val) {}
+
+ bool isKnown() const { return IsKnown; }
+
+ const T& getValue() const {
+ assert (isKnown());
+ return Val;
+ }
+
+ operator const T&() const {
+ return getValue();
+ }
+ };
+}
+
+static Optional<uint64_t> GetCFNumberSize(ASTContext& Ctx, uint64_t i) {
+ static const unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
+
+ if (i < kCFNumberCharType)
+ return FixedSize[i-1];
+
+ QualType T;
+
+ switch (i) {
+ case kCFNumberCharType: T = Ctx.CharTy; break;
+ case kCFNumberShortType: T = Ctx.ShortTy; break;
+ case kCFNumberIntType: T = Ctx.IntTy; break;
+ case kCFNumberLongType: T = Ctx.LongTy; break;
+ case kCFNumberLongLongType: T = Ctx.LongLongTy; break;
+ case kCFNumberFloatType: T = Ctx.FloatTy; break;
+ case kCFNumberDoubleType: T = Ctx.DoubleTy; break;
+ case kCFNumberCFIndexType:
+ case kCFNumberNSIntegerType:
+ case kCFNumberCGFloatType:
+ // FIXME: We need a way to map from names to Type*.
+ default:
+ return Optional<uint64_t>();
+ }
+
+ return Ctx.getTypeSize(T);
+}
+
+#if 0
+static const char* GetCFNumberTypeStr(uint64_t i) {
+ static const char* Names[] = {
+ "kCFNumberSInt8Type",
+ "kCFNumberSInt16Type",
+ "kCFNumberSInt32Type",
+ "kCFNumberSInt64Type",
+ "kCFNumberFloat32Type",
+ "kCFNumberFloat64Type",
+ "kCFNumberCharType",
+ "kCFNumberShortType",
+ "kCFNumberIntType",
+ "kCFNumberLongType",
+ "kCFNumberLongLongType",
+ "kCFNumberFloatType",
+ "kCFNumberDoubleType",
+ "kCFNumberCFIndexType",
+ "kCFNumberNSIntegerType",
+ "kCFNumberCGFloatType"
+ };
+
+ return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
+}
+#endif
+
+void CFNumberCreateChecker::PreVisitCallExpr(CheckerContext &C,
+ const CallExpr *CE)
+{
+ const Expr* Callee = CE->getCallee();
+ const GRState *state = C.getState();
+ SVal CallV = state->getSVal(Callee);
+ const FunctionDecl* FD = CallV.getAsFunctionDecl();
+
+ if (!FD)
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II)
+ II = &Ctx.Idents.get("CFNumberCreate");
+
+ if (FD->getIdentifier() != II || CE->getNumArgs() != 3)
+ return;
+
+ // Get the value of the "theType" argument.
+ SVal TheTypeVal = state->getSVal(CE->getArg(1));
+
+ // FIXME: We really should allow ranges of valid theType values, and
+ // bifurcate the state appropriately.
+ nonloc::ConcreteInt* V = dyn_cast<nonloc::ConcreteInt>(&TheTypeVal);
+ if (!V)
+ return;
+
+ uint64_t NumberKind = V->getValue().getLimitedValue();
+ Optional<uint64_t> TargetSize = GetCFNumberSize(Ctx, NumberKind);
+
+ // FIXME: In some cases we can emit an error.
+ if (!TargetSize.isKnown())
+ return;
+
+ // Look at the value of the integer being passed by reference. Essentially
+ // we want to catch cases where the value passed in is not equal to the
+ // size of the type being created.
+ SVal TheValueExpr = state->getSVal(CE->getArg(2));
+
+ // FIXME: Eventually we should handle arbitrary locations. We can do this
+ // by having an enhanced memory model that does low-level typing.
+ loc::MemRegionVal* LV = dyn_cast<loc::MemRegionVal>(&TheValueExpr);
+ if (!LV)
+ return;
+
+ const TypedRegion* R = dyn_cast<TypedRegion>(LV->stripCasts());
+ if (!R)
+ return;
+
+ QualType T = Ctx.getCanonicalType(R->getValueType());
+
+ // FIXME: If the pointee isn't an integer type, should we flag a warning?
+ // People can do weird stuff with pointers.
+
+ if (!T->isIntegerType())
+ return;
+
+ uint64_t SourceSize = Ctx.getTypeSize(T);
+
+ // CHECK: is SourceSize == TargetSize
+ if (SourceSize == TargetSize)
+ return;
+
+ // Generate an error. Only generate a sink if 'SourceSize < TargetSize';
+ // otherwise generate a regular node.
+ //
+ // FIXME: We can actually create an abstract "CFNumber" object that has
+ // the bits initialized to the provided values.
+ //
+ if (ExplodedNode *N = SourceSize < TargetSize ? C.generateSink()
+ : C.generateNode()) {
+ llvm::SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ os << (SourceSize == 8 ? "An " : "A ")
+ << SourceSize << " bit integer is used to initialize a CFNumber "
+ "object that represents "
+ << (TargetSize == 8 ? "an " : "a ")
+ << TargetSize << " bit integer. ";
+
+ if (SourceSize < TargetSize)
+ os << (TargetSize - SourceSize)
+ << " bits of the CFNumber value will be garbage." ;
+ else
+ os << (SourceSize - TargetSize)
+ << " bits of the input integer will be lost.";
+
+ if (!BT)
+ BT = new APIMisuse("Bad use of CFNumberCreate");
+
+ RangedBugReport *report = new RangedBugReport(*BT, os.str(), N);
+ report->addRange(CE->getArg(2)->getSourceRange());
+ C.EmitReport(report);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// CFRetain/CFRelease checking for null arguments.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFRetainReleaseChecker : public CheckerVisitor<CFRetainReleaseChecker> {
+ APIMisuse *BT;
+ IdentifierInfo *Retain, *Release;
+public:
+ CFRetainReleaseChecker(): BT(0), Retain(0), Release(0) {}
+ static void *getTag() { static int x = 0; return &x; }
+ void PreVisitCallExpr(CheckerContext& C, const CallExpr* CE);
+};
+} // end anonymous namespace
+
+
+void CFRetainReleaseChecker::PreVisitCallExpr(CheckerContext& C,
+ const CallExpr* CE) {
+ // If the CallExpr doesn't have exactly 1 argument just give up checking.
+ if (CE->getNumArgs() != 1)
+ return;
+
+ // Get the function declaration of the callee.
+ const GRState* state = C.getState();
+ SVal X = state->getSVal(CE->getCallee());
+ const FunctionDecl* FD = X.getAsFunctionDecl();
+
+ if (!FD)
+ return;
+
+ if (!BT) {
+ ASTContext &Ctx = C.getASTContext();
+ Retain = &Ctx.Idents.get("CFRetain");
+ Release = &Ctx.Idents.get("CFRelease");
+ BT = new APIMisuse("null passed to CFRetain/CFRelease");
+ }
+
+ // Check if we called CFRetain/CFRelease.
+ const IdentifierInfo *FuncII = FD->getIdentifier();
+ if (!(FuncII == Retain || FuncII == Release))
+ return;
+
+ // FIXME: The rest of this just checks that the argument is non-null.
+ // It should probably be refactored and combined with AttrNonNullChecker.
+
+ // Get the argument's value.
+ const Expr *Arg = CE->getArg(0);
+ SVal ArgVal = state->getSVal(Arg);
+ DefinedSVal *DefArgVal = dyn_cast<DefinedSVal>(&ArgVal);
+ if (!DefArgVal)
+ return;
+
+ // Get a NULL value.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedSVal zero = cast<DefinedSVal>(svalBuilder.makeZeroVal(Arg->getType()));
+
+ // Make an expression asserting that they're equal.
+ DefinedOrUnknownSVal ArgIsNull = svalBuilder.evalEQ(state, zero, *DefArgVal);
+
+ // Are they equal?
+ const GRState *stateTrue, *stateFalse;
+ llvm::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
+
+ if (stateTrue && !stateFalse) {
+ ExplodedNode *N = C.generateSink(stateTrue);
+ if (!N)
+ return;
+
+ const char *description = (FuncII == Retain)
+ ? "Null pointer argument in call to CFRetain"
+ : "Null pointer argument in call to CFRelease";
+
+ EnhancedBugReport *report = new EnhancedBugReport(*BT, description, N);
+ report->addRange(Arg->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Arg);
+ C.EmitReport(report);
+ return;
+ }
+
+ // From here on, we know the argument is non-null.
+ C.addTransition(stateFalse);
+}
+
+//===----------------------------------------------------------------------===//
+// Check for sending 'retain', 'release', or 'autorelease' directly to a Class.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ClassReleaseChecker : public CheckerVisitor<ClassReleaseChecker> {
+ Selector releaseS;
+ Selector retainS;
+ Selector autoreleaseS;
+ Selector drainS;
+ BugType *BT;
+public:
+ ClassReleaseChecker()
+ : BT(0) {}
+
+ static void *getTag() { static int x = 0; return &x; }
+
+ void preVisitObjCMessage(CheckerContext &C, ObjCMessage msg);
+};
+}
+
+void ClassReleaseChecker::preVisitObjCMessage(CheckerContext &C,
+ ObjCMessage msg) {
+
+ if (!BT) {
+ BT = new APIMisuse("message incorrectly sent to class instead of class "
+ "instance");
+
+ ASTContext &Ctx = C.getASTContext();
+ releaseS = GetNullarySelector("release", Ctx);
+ retainS = GetNullarySelector("retain", Ctx);
+ autoreleaseS = GetNullarySelector("autorelease", Ctx);
+ drainS = GetNullarySelector("drain", Ctx);
+ }
+
+ if (msg.isInstanceMessage())
+ return;
+ const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
+ assert(Class);
+
+ Selector S = msg.getSelector();
+ if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS))
+ return;
+
+ if (ExplodedNode *N = C.generateNode()) {
+ llvm::SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "The '" << S.getAsString() << "' message should be sent to instances "
+ "of class '" << Class->getName()
+ << "' and not the class directly";
+
+ RangedBugReport *report = new RangedBugReport(*BT, os.str(), N);
+ report->addRange(msg.getSourceRange());
+ C.EmitReport(report);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+//===----------------------------------------------------------------------===//
+
+static void RegisterNilArgChecker(ExprEngine& Eng) {
+ Eng.registerCheck(new NilArgChecker());
+}
+
+void ento::registerNilArgChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterNilArgChecker);
+}
+
+static void RegisterCFNumberCreateChecker(ExprEngine& Eng) {
+ Eng.registerCheck(new CFNumberCreateChecker());
+}
+
+void ento::registerCFNumberCreateChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterCFNumberCreateChecker);
+}
+
+static void RegisterCFRetainReleaseChecker(ExprEngine& Eng) {
+ Eng.registerCheck(new CFRetainReleaseChecker());
+}
+
+void ento::registerCFRetainReleaseChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterCFRetainReleaseChecker);
+}
+
+static void RegisterClassReleaseChecker(ExprEngine& Eng) {
+ Eng.registerCheck(new ClassReleaseChecker());
+}
+
+void ento::registerClassReleaseChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterClassReleaseChecker);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.h
index 8fb0570..92cfb1a 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.h
@@ -13,25 +13,22 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_BASICOBJCFOUNDATIONCHECKS
-#define LLVM_CLANG_ANALYSIS_BASICOBJCFOUNDATIONCHECKS
+#ifndef LLVM_CLANG_GR_BASICOBJCFOUNDATIONCHECKS
+#define LLVM_CLANG_GR_BASICOBJCFOUNDATIONCHECKS
namespace clang {
class ASTContext;
-class BugReporter;
class Decl;
-class GRExprEngine;
-class GRSimpleAPICheck;
-GRSimpleAPICheck *CreateBasicObjCFoundationChecks(ASTContext& Ctx,
- BugReporter& BR);
+namespace ento {
+
+class BugReporter;
+class ExprEngine;
-GRSimpleAPICheck *CreateAuditCFNumberCreate(ASTContext& Ctx,
- BugReporter& BR);
+void RegisterNSErrorChecks(BugReporter& BR, ExprEngine &Eng, const Decl &D);
-void RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng, const Decl &D);
-void RegisterNSAutoreleasePoolChecks(GRExprEngine &Eng);
+} // end GR namespace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 057e474..417b015 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -11,27 +11,28 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/PathSensitive/Checker.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h"
#include "clang/Basic/Builtins.h"
using namespace clang;
+using namespace ento;
namespace {
class BuiltinFunctionChecker : public Checker {
public:
static void *getTag() { static int tag = 0; return &tag; }
- virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+ virtual bool evalCallExpr(CheckerContext &C, const CallExpr *CE);
};
}
-void clang::RegisterBuiltinFunctionChecker(GRExprEngine &Eng) {
+void ento::RegisterBuiltinFunctionChecker(ExprEngine &Eng) {
Eng.registerCheck(new BuiltinFunctionChecker());
}
-bool BuiltinFunctionChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE){
+bool BuiltinFunctionChecker::evalCallExpr(CheckerContext &C,const CallExpr *CE){
const GRState *state = C.getState();
const Expr *Callee = CE->getCallee();
SVal L = state->getSVal(Callee);
@@ -50,7 +51,7 @@ bool BuiltinFunctionChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE){
// For __builtin_expect, just return the value of the subexpression.
assert (CE->arg_begin() != CE->arg_end());
SVal X = state->getSVal(*(CE->arg_begin()));
- C.GenerateNode(state->BindExpr(CE, X));
+ C.generateNode(state->BindExpr(CE, X));
return true;
}
@@ -67,15 +68,13 @@ bool BuiltinFunctionChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE){
DefinedOrUnknownSVal Size =
cast<DefinedOrUnknownSVal>(state->getSVal(*(CE->arg_begin())));
- ValueManager& ValMgr = C.getValueManager();
- DefinedOrUnknownSVal Extent = R->getExtent(ValMgr);
+ SValBuilder& svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
+ DefinedOrUnknownSVal extentMatchesSizeArg =
+ svalBuilder.evalEQ(state, Extent, Size);
+ state = state->assume(extentMatchesSizeArg, true);
- SValuator& SVator = ValMgr.getSValuator();
- DefinedOrUnknownSVal ExtentMatchesSizeArg =
- SVator.EvalEQ(state, Extent, Size);
- state = state->Assume(ExtentMatchesSizeArg, true);
-
- C.GenerateNode(state->BindExpr(CE, loc::MemRegionVal(R)));
+ C.generateNode(state->BindExpr(CE, loc::MemRegionVal(R)));
return true;
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt
new file mode 100644
index 0000000..e172a52
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -0,0 +1,70 @@
+set(LLVM_TARGET_DEFINITIONS Checkers.td)
+tablegen(Checkers.inc
+ -gen-clang-sa-checkers
+ -I ${CMAKE_CURRENT_SOURCE_DIR}/../../../include)
+add_custom_target(ClangSACheckers
+ DEPENDS Checkers.inc)
+
+set(LLVM_USED_LIBS clangBasic clangAST)
+
+add_clang_library(clangStaticAnalyzerCheckers
+ AdjustedReturnValueChecker.cpp
+ AnalyzerStatsChecker.cpp
+ ArrayBoundChecker.cpp
+ ArrayBoundCheckerV2.cpp
+ AttrNonNullChecker.cpp
+ BasicObjCFoundationChecks.cpp
+ BuiltinFunctionChecker.cpp
+ CStringChecker.cpp
+ CallAndMessageChecker.cpp
+ CastSizeChecker.cpp
+ CastToStructChecker.cpp
+ CheckObjCDealloc.cpp
+ CheckObjCInstMethSignature.cpp
+ CheckSecuritySyntaxOnly.cpp
+ CheckSizeofPointer.cpp
+ ChrootChecker.cpp
+ ClangSACheckerProvider.cpp
+ DeadStoresChecker.cpp
+ DebugCheckers.cpp
+ DereferenceChecker.cpp
+ DivZeroChecker.cpp
+ ExprEngine.cpp
+ ExperimentalChecks.cpp
+ FixedAddressChecker.cpp
+ IdempotentOperationChecker.cpp
+ LLVMConventionsChecker.cpp
+ MacOSXAPIChecker.cpp
+ MallocChecker.cpp
+ NSAutoreleasePoolChecker.cpp
+ NSErrorChecker.cpp
+ NoReturnFunctionChecker.cpp
+ OSAtomicChecker.cpp
+ ObjCAtSyncChecker.cpp
+ ObjCSelfInitChecker.cpp
+ ObjCUnusedIVarsChecker.cpp
+ PointerArithChecker.cpp
+ PointerSubChecker.cpp
+ PthreadLockChecker.cpp
+ ReturnPointerRangeChecker.cpp
+ ReturnUndefChecker.cpp
+ StackAddrLeakChecker.cpp
+ StreamChecker.cpp
+ UndefBranchChecker.cpp
+ UndefCapturedBlockVarChecker.cpp
+ UndefResultChecker.cpp
+ UndefinedArraySubscriptChecker.cpp
+ UndefinedAssignmentChecker.cpp
+ UnixAPIChecker.cpp
+ UnreachableCodeChecker.cpp
+ VLASizeChecker.cpp
+ )
+
+add_dependencies(clangStaticAnalyzerCheckers
+ clangStaticAnalyzerCore
+ ClangAttrClasses
+ ClangAttrList
+ ClangDeclNodes
+ ClangStmtNodes
+ ClangSACheckers
+ )
diff --git a/contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 9ea572f..03f9047 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -12,13 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineExperimentalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
+using namespace ento;
namespace {
class CStringChecker : public CheckerVisitor<CStringChecker> {
@@ -29,11 +31,11 @@ public:
{}
static void *getTag() { static int tag; return &tag; }
- bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+ bool evalCallExpr(CheckerContext &C, const CallExpr *CE);
void PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS);
void MarkLiveSymbols(const GRState *state, SymbolReaper &SR);
- void EvalDeadSymbols(CheckerContext &C, SymbolReaper &SR);
- bool WantsRegionChangeUpdate(const GRState *state);
+ void evalDeadSymbols(CheckerContext &C, SymbolReaper &SR);
+ bool wantsRegionChangeUpdate(const GRState *state);
const GRState *EvalRegionChanges(const GRState *state,
const MemRegion * const *Begin,
@@ -42,30 +44,30 @@ public:
typedef void (CStringChecker::*FnCheck)(CheckerContext &, const CallExpr *);
- void EvalMemcpy(CheckerContext &C, const CallExpr *CE);
- void EvalMemmove(CheckerContext &C, const CallExpr *CE);
- void EvalBcopy(CheckerContext &C, const CallExpr *CE);
- void EvalCopyCommon(CheckerContext &C, const GRState *state,
+ void evalMemcpy(CheckerContext &C, const CallExpr *CE);
+ void evalMemmove(CheckerContext &C, const CallExpr *CE);
+ void evalBcopy(CheckerContext &C, const CallExpr *CE);
+ void evalCopyCommon(CheckerContext &C, const GRState *state,
const Expr *Size, const Expr *Source, const Expr *Dest,
bool Restricted = false);
- void EvalMemcmp(CheckerContext &C, const CallExpr *CE);
+ void evalMemcmp(CheckerContext &C, const CallExpr *CE);
- void EvalStrlen(CheckerContext &C, const CallExpr *CE);
+ void evalstrLength(CheckerContext &C, const CallExpr *CE);
- void EvalStrcpy(CheckerContext &C, const CallExpr *CE);
- void EvalStpcpy(CheckerContext &C, const CallExpr *CE);
- void EvalStrcpyCommon(CheckerContext &C, const CallExpr *CE, bool ReturnEnd);
+ void evalStrcpy(CheckerContext &C, const CallExpr *CE);
+ void evalStpcpy(CheckerContext &C, const CallExpr *CE);
+ void evalStrcpyCommon(CheckerContext &C, const CallExpr *CE, bool returnEnd);
// Utility methods
std::pair<const GRState*, const GRState*>
- AssumeZero(CheckerContext &C, const GRState *state, SVal V, QualType Ty);
+ assumeZero(CheckerContext &C, const GRState *state, SVal V, QualType Ty);
- const GRState *SetCStringLength(const GRState *state, const MemRegion *MR,
- SVal StrLen);
- SVal GetCStringLengthForRegion(CheckerContext &C, const GRState *&state,
+ const GRState *setCStringLength(const GRState *state, const MemRegion *MR,
+ SVal strLength);
+ SVal getCStringLengthForRegion(CheckerContext &C, const GRState *&state,
const Expr *Ex, const MemRegion *MR);
- SVal GetCStringLength(CheckerContext &C, const GRState *&state,
+ SVal getCStringLength(CheckerContext &C, const GRState *&state,
const Expr *Ex, SVal Buf);
const GRState *InvalidateBuffer(CheckerContext &C, const GRState *state,
@@ -75,7 +77,7 @@ public:
const MemRegion *MR);
// Re-usable checks
- const GRState *CheckNonNull(CheckerContext &C, const GRState *state,
+ const GRState *checkNonNull(CheckerContext &C, const GRState *state,
const Expr *S, SVal l);
const GRState *CheckLocation(CheckerContext &C, const GRState *state,
const Expr *S, SVal l,
@@ -88,7 +90,7 @@ public:
const GRState *CheckOverlap(CheckerContext &C, const GRState *state,
const Expr *Size, const Expr *First,
const Expr *Second);
- void EmitOverlapBug(CheckerContext &C, const GRState *state,
+ void emitOverlapBug(CheckerContext &C, const GRState *state,
const Stmt *First, const Stmt *Second);
};
@@ -99,38 +101,40 @@ public:
} //end anonymous namespace
namespace clang {
+namespace ento {
template <>
struct GRStateTrait<CStringLength>
: public GRStatePartialTrait<CStringLength::EntryMap> {
static void *GDMIndex() { return CStringChecker::getTag(); }
};
}
+}
-void clang::RegisterCStringChecker(GRExprEngine &Eng) {
+static void RegisterCStringChecker(ExprEngine &Eng) {
Eng.registerCheck(new CStringChecker());
}
+void ento::registerCStringChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterCStringChecker);
+}
+
//===----------------------------------------------------------------------===//
// Individual checks and utility methods.
//===----------------------------------------------------------------------===//
std::pair<const GRState*, const GRState*>
-CStringChecker::AssumeZero(CheckerContext &C, const GRState *state, SVal V,
+CStringChecker::assumeZero(CheckerContext &C, const GRState *state, SVal V,
QualType Ty) {
- DefinedSVal *Val = dyn_cast<DefinedSVal>(&V);
- if (!Val)
+ DefinedSVal *val = dyn_cast<DefinedSVal>(&V);
+ if (!val)
return std::pair<const GRState*, const GRState *>(state, state);
- ValueManager &ValMgr = C.getValueManager();
- SValuator &SV = ValMgr.getSValuator();
-
- DefinedOrUnknownSVal Zero = ValMgr.makeZeroVal(Ty);
- DefinedOrUnknownSVal ValIsZero = SV.EvalEQ(state, *Val, Zero);
-
- return state->Assume(ValIsZero);
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal zero = svalBuilder.makeZeroVal(Ty);
+ return state->assume(svalBuilder.evalEQ(state, *val, zero));
}
-const GRState *CStringChecker::CheckNonNull(CheckerContext &C,
+const GRState *CStringChecker::checkNonNull(CheckerContext &C,
const GRState *state,
const Expr *S, SVal l) {
// If a previous check has failed, propagate the failure.
@@ -138,10 +142,10 @@ const GRState *CStringChecker::CheckNonNull(CheckerContext &C,
return NULL;
const GRState *stateNull, *stateNonNull;
- llvm::tie(stateNull, stateNonNull) = AssumeZero(C, state, l, S->getType());
+ llvm::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
if (stateNull && !stateNonNull) {
- ExplodedNode *N = C.GenerateSink(stateNull);
+ ExplodedNode *N = C.generateSink(stateNull);
if (!N)
return NULL;
@@ -187,18 +191,18 @@ const GRState *CStringChecker::CheckLocation(CheckerContext &C,
"CheckLocation should only be called with char* ElementRegions");
// Get the size of the array.
- const SubRegion *Super = cast<SubRegion>(ER->getSuperRegion());
- ValueManager &ValMgr = C.getValueManager();
- SVal Extent = ValMgr.convertToArrayIndex(Super->getExtent(ValMgr));
+ const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ SVal Extent = svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
DefinedOrUnknownSVal Size = cast<DefinedOrUnknownSVal>(Extent);
// Get the index of the accessed element.
- DefinedOrUnknownSVal &Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+ DefinedOrUnknownSVal Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
- const GRState *StInBound = state->AssumeInBound(Idx, Size, true);
- const GRState *StOutBound = state->AssumeInBound(Idx, Size, false);
+ const GRState *StInBound = state->assumeInBound(Idx, Size, true);
+ const GRState *StOutBound = state->assumeInBound(Idx, Size, false);
if (StOutBound && !StInBound) {
- ExplodedNode *N = C.GenerateSink(StOutBound);
+ ExplodedNode *N = C.generateSink(StOutBound);
if (!N)
return NULL;
@@ -244,16 +248,15 @@ const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C,
if (!state)
return NULL;
- ValueManager &VM = C.getValueManager();
- SValuator &SV = VM.getSValuator();
+ SValBuilder &svalBuilder = C.getSValBuilder();
ASTContext &Ctx = C.getASTContext();
- QualType SizeTy = Size->getType();
+ QualType sizeTy = Size->getType();
QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
// Check that the first buffer is non-null.
SVal BufVal = state->getSVal(FirstBuf);
- state = CheckNonNull(C, state, FirstBuf, BufVal);
+ state = checkNonNull(C, state, FirstBuf, BufVal);
if (!state)
return NULL;
@@ -264,15 +267,15 @@ const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C,
return state;
// Compute the offset of the last element to be accessed: size-1.
- NonLoc One = cast<NonLoc>(VM.makeIntVal(1, SizeTy));
- NonLoc LastOffset = cast<NonLoc>(SV.EvalBinOpNN(state, BO_Sub,
- *Length, One, SizeTy));
+ NonLoc One = cast<NonLoc>(svalBuilder.makeIntVal(1, sizeTy));
+ NonLoc LastOffset = cast<NonLoc>(svalBuilder.evalBinOpNN(state, BO_Sub,
+ *Length, One, sizeTy));
// Check that the first buffer is sufficently long.
- SVal BufStart = SV.EvalCast(BufVal, PtrTy, FirstBuf->getType());
+ SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
if (Loc *BufLoc = dyn_cast<Loc>(&BufStart)) {
- SVal BufEnd = SV.EvalBinOpLN(state, BO_Add, *BufLoc,
- LastOffset, PtrTy);
+ SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
+ LastOffset, PtrTy);
state = CheckLocation(C, state, FirstBuf, BufEnd, FirstIsDestination);
// If the buffer isn't large enough, abort.
@@ -283,14 +286,14 @@ const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C,
// If there's a second buffer, check it as well.
if (SecondBuf) {
BufVal = state->getSVal(SecondBuf);
- state = CheckNonNull(C, state, SecondBuf, BufVal);
+ state = checkNonNull(C, state, SecondBuf, BufVal);
if (!state)
return NULL;
- BufStart = SV.EvalCast(BufVal, PtrTy, SecondBuf->getType());
+ BufStart = svalBuilder.evalCast(BufVal, PtrTy, SecondBuf->getType());
if (Loc *BufLoc = dyn_cast<Loc>(&BufStart)) {
- SVal BufEnd = SV.EvalBinOpLN(state, BO_Add, *BufLoc,
- LastOffset, PtrTy);
+ SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
+ LastOffset, PtrTy);
state = CheckLocation(C, state, SecondBuf, BufEnd);
}
}
@@ -312,56 +315,54 @@ const GRState *CStringChecker::CheckOverlap(CheckerContext &C,
if (!state)
return NULL;
- ValueManager &VM = state->getStateManager().getValueManager();
- SValuator &SV = VM.getSValuator();
- ASTContext &Ctx = VM.getContext();
const GRState *stateTrue, *stateFalse;
// Get the buffer values and make sure they're known locations.
- SVal FirstVal = state->getSVal(First);
- SVal SecondVal = state->getSVal(Second);
+ SVal firstVal = state->getSVal(First);
+ SVal secondVal = state->getSVal(Second);
- Loc *FirstLoc = dyn_cast<Loc>(&FirstVal);
- if (!FirstLoc)
+ Loc *firstLoc = dyn_cast<Loc>(&firstVal);
+ if (!firstLoc)
return state;
- Loc *SecondLoc = dyn_cast<Loc>(&SecondVal);
- if (!SecondLoc)
+ Loc *secondLoc = dyn_cast<Loc>(&secondVal);
+ if (!secondLoc)
return state;
// Are the two values the same?
- DefinedOrUnknownSVal EqualTest = SV.EvalEQ(state, *FirstLoc, *SecondLoc);
- llvm::tie(stateTrue, stateFalse) = state->Assume(EqualTest);
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ llvm::tie(stateTrue, stateFalse) =
+ state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
if (stateTrue && !stateFalse) {
// If the values are known to be equal, that's automatically an overlap.
- EmitOverlapBug(C, stateTrue, First, Second);
+ emitOverlapBug(C, stateTrue, First, Second);
return NULL;
}
- // Assume the two expressions are not equal.
+ // assume the two expressions are not equal.
assert(stateFalse);
state = stateFalse;
// Which value comes first?
- QualType CmpTy = Ctx.IntTy;
- SVal Reverse = SV.EvalBinOpLL(state, BO_GT,
- *FirstLoc, *SecondLoc, CmpTy);
- DefinedOrUnknownSVal *ReverseTest = dyn_cast<DefinedOrUnknownSVal>(&Reverse);
- if (!ReverseTest)
+ ASTContext &Ctx = svalBuilder.getContext();
+ QualType cmpTy = Ctx.IntTy;
+ SVal reverse = svalBuilder.evalBinOpLL(state, BO_GT,
+ *firstLoc, *secondLoc, cmpTy);
+ DefinedOrUnknownSVal *reverseTest = dyn_cast<DefinedOrUnknownSVal>(&reverse);
+ if (!reverseTest)
return state;
- llvm::tie(stateTrue, stateFalse) = state->Assume(*ReverseTest);
-
+ llvm::tie(stateTrue, stateFalse) = state->assume(*reverseTest);
if (stateTrue) {
if (stateFalse) {
// If we don't know which one comes first, we can't perform this test.
return state;
} else {
- // Switch the values so that FirstVal is before SecondVal.
- Loc *tmpLoc = FirstLoc;
- FirstLoc = SecondLoc;
- SecondLoc = tmpLoc;
+ // Switch the values so that firstVal is before secondVal.
+ Loc *tmpLoc = firstLoc;
+ firstLoc = secondLoc;
+ secondLoc = tmpLoc;
// Switch the Exprs as well, so that they still correspond.
const Expr *tmpExpr = First;
@@ -379,41 +380,41 @@ const GRState *CStringChecker::CheckOverlap(CheckerContext &C,
// Convert the first buffer's start address to char*.
// Bail out if the cast fails.
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
- SVal FirstStart = SV.EvalCast(*FirstLoc, CharPtrTy, First->getType());
+ SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy, First->getType());
Loc *FirstStartLoc = dyn_cast<Loc>(&FirstStart);
if (!FirstStartLoc)
return state;
// Compute the end of the first buffer. Bail out if THAT fails.
- SVal FirstEnd = SV.EvalBinOpLN(state, BO_Add,
+ SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add,
*FirstStartLoc, *Length, CharPtrTy);
Loc *FirstEndLoc = dyn_cast<Loc>(&FirstEnd);
if (!FirstEndLoc)
return state;
// Is the end of the first buffer past the start of the second buffer?
- SVal Overlap = SV.EvalBinOpLL(state, BO_GT,
- *FirstEndLoc, *SecondLoc, CmpTy);
+ SVal Overlap = svalBuilder.evalBinOpLL(state, BO_GT,
+ *FirstEndLoc, *secondLoc, cmpTy);
DefinedOrUnknownSVal *OverlapTest = dyn_cast<DefinedOrUnknownSVal>(&Overlap);
if (!OverlapTest)
return state;
- llvm::tie(stateTrue, stateFalse) = state->Assume(*OverlapTest);
+ llvm::tie(stateTrue, stateFalse) = state->assume(*OverlapTest);
if (stateTrue && !stateFalse) {
// Overlap!
- EmitOverlapBug(C, stateTrue, First, Second);
+ emitOverlapBug(C, stateTrue, First, Second);
return NULL;
}
- // Assume the two expressions don't overlap.
+ // assume the two expressions don't overlap.
assert(stateFalse);
return stateFalse;
}
-void CStringChecker::EmitOverlapBug(CheckerContext &C, const GRState *state,
+void CStringChecker::emitOverlapBug(CheckerContext &C, const GRState *state,
const Stmt *First, const Stmt *Second) {
- ExplodedNode *N = C.GenerateSink(state);
+ ExplodedNode *N = C.generateSink(state);
if (!N)
return;
@@ -430,11 +431,11 @@ void CStringChecker::EmitOverlapBug(CheckerContext &C, const GRState *state,
C.EmitReport(report);
}
-const GRState *CStringChecker::SetCStringLength(const GRState *state,
+const GRState *CStringChecker::setCStringLength(const GRState *state,
const MemRegion *MR,
- SVal StrLen) {
- assert(!StrLen.isUndef() && "Attempt to set an undefined string length");
- if (StrLen.isUnknown())
+ SVal strLength) {
+ assert(!strLength.isUndef() && "Attempt to set an undefined string length");
+ if (strLength.isUnknown())
return state;
MR = MR->StripCasts();
@@ -450,7 +451,7 @@ const GRState *CStringChecker::SetCStringLength(const GRState *state,
case MemRegion::VarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
- return state->set<CStringLength>(MR, StrLen);
+ return state->set<CStringLength>(MR, strLength);
case MemRegion::ElementRegionKind:
// FIXME: Handle element regions by upper-bounding the parent region's
@@ -466,7 +467,7 @@ const GRState *CStringChecker::SetCStringLength(const GRState *state,
}
}
-SVal CStringChecker::GetCStringLengthForRegion(CheckerContext &C,
+SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
const GRState *&state,
const Expr *Ex,
const MemRegion *MR) {
@@ -477,15 +478,14 @@ SVal CStringChecker::GetCStringLengthForRegion(CheckerContext &C,
// Otherwise, get a new symbol and update the state.
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
- ValueManager &ValMgr = C.getValueManager();
- QualType SizeTy = ValMgr.getContext().getSizeType();
- SVal Strlen = ValMgr.getMetadataSymbolVal(getTag(), MR, Ex, SizeTy, Count);
-
- state = state->set<CStringLength>(MR, Strlen);
- return Strlen;
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+ SVal strLength = svalBuilder.getMetadataSymbolVal(getTag(), MR, Ex, sizeTy, Count);
+ state = state->set<CStringLength>(MR, strLength);
+ return strLength;
}
-SVal CStringChecker::GetCStringLength(CheckerContext &C, const GRState *&state,
+SVal CStringChecker::getCStringLength(CheckerContext &C, const GRState *&state,
const Expr *Ex, SVal Buf) {
const MemRegion *MR = Buf.getAsRegion();
if (!MR) {
@@ -493,7 +493,7 @@ SVal CStringChecker::GetCStringLength(CheckerContext &C, const GRState *&state,
// C string. In the context of locations, the only time we can issue such
// a warning is for labels.
if (loc::GotoLabel *Label = dyn_cast<loc::GotoLabel>(&Buf)) {
- if (ExplodedNode *N = C.GenerateNode(state)) {
+ if (ExplodedNode *N = C.generateNode(state)) {
if (!BT_NotCString)
BT_NotCString = new BuiltinBug("API",
"Argument is not a null-terminated string.");
@@ -501,7 +501,7 @@ SVal CStringChecker::GetCStringLength(CheckerContext &C, const GRState *&state,
llvm::SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
os << "Argument to byte string function is the address of the label '"
- << Label->getLabel()->getID()->getName()
+ << Label->getLabel()->getName()
<< "', which is not a null-terminated string";
// Generate a report for this bug.
@@ -527,17 +527,17 @@ SVal CStringChecker::GetCStringLength(CheckerContext &C, const GRState *&state,
case MemRegion::StringRegionKind: {
// Modifying the contents of string regions is undefined [C99 6.4.5p6],
// so we can assume that the byte length is the correct C string length.
- ValueManager &ValMgr = C.getValueManager();
- QualType SizeTy = ValMgr.getContext().getSizeType();
- const StringLiteral *Str = cast<StringRegion>(MR)->getStringLiteral();
- return ValMgr.makeIntVal(Str->getByteLength(), SizeTy);
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+ const StringLiteral *strLit = cast<StringRegion>(MR)->getStringLiteral();
+ return svalBuilder.makeIntVal(strLit->getByteLength(), sizeTy);
}
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
case MemRegion::VarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
- return GetCStringLengthForRegion(C, state, Ex, MR);
+ return getCStringLengthForRegion(C, state, Ex, MR);
case MemRegion::CompoundLiteralRegionKind:
// FIXME: Can we track this? Is it necessary?
return UnknownVal();
@@ -549,7 +549,7 @@ SVal CStringChecker::GetCStringLength(CheckerContext &C, const GRState *&state,
// Other regions (mostly non-data) can't have a reliable C string length.
// In this case, an error is emitted and UndefinedVal is returned.
// The caller should always be prepared to handle this case.
- if (ExplodedNode *N = C.GenerateNode(state)) {
+ if (ExplodedNode *N = C.generateNode(state)) {
if (!BT_NotCString)
BT_NotCString = new BuiltinBug("API",
"Argument is not a null-terminated string.");
@@ -598,7 +598,7 @@ const GRState *CStringChecker::InvalidateBuffer(CheckerContext &C,
// Invalidate this region.
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
- return state->InvalidateRegion(R, E, Count, NULL);
+ return state->invalidateRegion(R, E, Count, NULL);
}
// If we have a non-region value by chance, just remove the binding.
@@ -629,8 +629,8 @@ bool CStringChecker::SummarizeRegion(llvm::raw_ostream& os, ASTContext& Ctx,
os << "a block";
return true;
case MemRegion::CXXThisRegionKind:
- case MemRegion::CXXObjectRegionKind:
- os << "a C++ object of type " << TR->getValueType().getAsString();
+ case MemRegion::CXXTempObjectRegionKind:
+ os << "a C++ temp object of type " << TR->getValueType().getAsString();
return true;
case MemRegion::VarRegionKind:
os << "a variable of type" << TR->getValueType().getAsString();
@@ -647,26 +647,26 @@ bool CStringChecker::SummarizeRegion(llvm::raw_ostream& os, ASTContext& Ctx,
}
//===----------------------------------------------------------------------===//
-// Evaluation of individual function calls.
+// evaluation of individual function calls.
//===----------------------------------------------------------------------===//
-void CStringChecker::EvalCopyCommon(CheckerContext &C, const GRState *state,
+void CStringChecker::evalCopyCommon(CheckerContext &C, const GRState *state,
const Expr *Size, const Expr *Dest,
const Expr *Source, bool Restricted) {
// See if the size argument is zero.
- SVal SizeVal = state->getSVal(Size);
- QualType SizeTy = Size->getType();
+ SVal sizeVal = state->getSVal(Size);
+ QualType sizeTy = Size->getType();
- const GRState *StZeroSize, *StNonZeroSize;
- llvm::tie(StZeroSize, StNonZeroSize) = AssumeZero(C, state, SizeVal, SizeTy);
+ const GRState *stateZeroSize, *stateNonZeroSize;
+ llvm::tie(stateZeroSize, stateNonZeroSize) = assumeZero(C, state, sizeVal, sizeTy);
// If the size is zero, there won't be any actual memory access.
- if (StZeroSize)
- C.addTransition(StZeroSize);
+ if (stateZeroSize)
+ C.addTransition(stateZeroSize);
// If the size can be nonzero, we have to check the other arguments.
- if (StNonZeroSize) {
- state = StNonZeroSize;
+ if (stateNonZeroSize) {
+ state = stateNonZeroSize;
state = CheckBufferAccess(C, state, Size, Dest, Source,
/* FirstIsDst = */ true);
if (Restricted)
@@ -685,58 +685,57 @@ void CStringChecker::EvalCopyCommon(CheckerContext &C, const GRState *state,
}
-void CStringChecker::EvalMemcpy(CheckerContext &C, const CallExpr *CE) {
+void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) {
// void *memcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is the address of the destination buffer.
const Expr *Dest = CE->getArg(0);
const GRState *state = C.getState();
state = state->BindExpr(CE, state->getSVal(Dest));
- EvalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1), true);
+ evalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1), true);
}
-void CStringChecker::EvalMemmove(CheckerContext &C, const CallExpr *CE) {
+void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) {
// void *memmove(void *dst, const void *src, size_t n);
// The return value is the address of the destination buffer.
const Expr *Dest = CE->getArg(0);
const GRState *state = C.getState();
state = state->BindExpr(CE, state->getSVal(Dest));
- EvalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1));
+ evalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1));
}
-void CStringChecker::EvalBcopy(CheckerContext &C, const CallExpr *CE) {
+void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) {
// void bcopy(const void *src, void *dst, size_t n);
- EvalCopyCommon(C, C.getState(), CE->getArg(2), CE->getArg(1), CE->getArg(0));
+ evalCopyCommon(C, C.getState(), CE->getArg(2), CE->getArg(1), CE->getArg(0));
}
-void CStringChecker::EvalMemcmp(CheckerContext &C, const CallExpr *CE) {
+void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) {
// int memcmp(const void *s1, const void *s2, size_t n);
const Expr *Left = CE->getArg(0);
const Expr *Right = CE->getArg(1);
const Expr *Size = CE->getArg(2);
const GRState *state = C.getState();
- ValueManager &ValMgr = C.getValueManager();
- SValuator &SV = ValMgr.getSValuator();
+ SValBuilder &svalBuilder = C.getSValBuilder();
// See if the size argument is zero.
- SVal SizeVal = state->getSVal(Size);
- QualType SizeTy = Size->getType();
+ SVal sizeVal = state->getSVal(Size);
+ QualType sizeTy = Size->getType();
- const GRState *StZeroSize, *StNonZeroSize;
- llvm::tie(StZeroSize, StNonZeroSize) = AssumeZero(C, state, SizeVal, SizeTy);
+ const GRState *stateZeroSize, *stateNonZeroSize;
+ llvm::tie(stateZeroSize, stateNonZeroSize) =
+ assumeZero(C, state, sizeVal, sizeTy);
// If the size can be zero, the result will be 0 in that case, and we don't
// have to check either of the buffers.
- if (StZeroSize) {
- state = StZeroSize;
- state = state->BindExpr(CE, ValMgr.makeZeroVal(CE->getType()));
+ if (stateZeroSize) {
+ state = stateZeroSize;
+ state = state->BindExpr(CE, svalBuilder.makeZeroVal(CE->getType()));
C.addTransition(state);
}
// If the size can be nonzero, we have to check the other arguments.
- if (StNonZeroSize) {
- state = StNonZeroSize;
-
+ if (stateNonZeroSize) {
+ state = stateNonZeroSize;
// If we know the two buffers are the same, we know the result is 0.
// First, get the two buffers' addresses. Another checker will have already
// made sure they're not undefined.
@@ -744,9 +743,9 @@ void CStringChecker::EvalMemcmp(CheckerContext &C, const CallExpr *CE) {
DefinedOrUnknownSVal RV = cast<DefinedOrUnknownSVal>(state->getSVal(Right));
// See if they are the same.
- DefinedOrUnknownSVal SameBuf = SV.EvalEQ(state, LV, RV);
+ DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
const GRState *StSameBuf, *StNotSameBuf;
- llvm::tie(StSameBuf, StNotSameBuf) = state->Assume(SameBuf);
+ llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
// If the two arguments might be the same buffer, we know the result is zero,
// and we only need to check one size.
@@ -754,7 +753,7 @@ void CStringChecker::EvalMemcmp(CheckerContext &C, const CallExpr *CE) {
state = StSameBuf;
state = CheckBufferAccess(C, state, Size, Left);
if (state) {
- state = StSameBuf->BindExpr(CE, ValMgr.makeZeroVal(CE->getType()));
+ state = StSameBuf->BindExpr(CE, svalBuilder.makeZeroVal(CE->getType()));
C.addTransition(state);
}
}
@@ -767,7 +766,7 @@ void CStringChecker::EvalMemcmp(CheckerContext &C, const CallExpr *CE) {
if (state) {
// The return value is the comparison result, which we don't know.
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
- SVal CmpV = ValMgr.getConjuredSymbolVal(NULL, CE, Count);
+ SVal CmpV = svalBuilder.getConjuredSymbolVal(NULL, CE, Count);
state = state->BindExpr(CE, CmpV);
C.addTransition(state);
}
@@ -775,94 +774,90 @@ void CStringChecker::EvalMemcmp(CheckerContext &C, const CallExpr *CE) {
}
}
-void CStringChecker::EvalStrlen(CheckerContext &C, const CallExpr *CE) {
+void CStringChecker::evalstrLength(CheckerContext &C, const CallExpr *CE) {
// size_t strlen(const char *s);
const GRState *state = C.getState();
const Expr *Arg = CE->getArg(0);
SVal ArgVal = state->getSVal(Arg);
// Check that the argument is non-null.
- state = CheckNonNull(C, state, Arg, ArgVal);
+ state = checkNonNull(C, state, Arg, ArgVal);
if (state) {
- SVal StrLen = GetCStringLength(C, state, Arg, ArgVal);
+ SVal strLength = getCStringLength(C, state, Arg, ArgVal);
// If the argument isn't a valid C string, there's no valid state to
// transition to.
- if (StrLen.isUndef())
+ if (strLength.isUndef())
return;
- // If GetCStringLength couldn't figure out the length, conjure a return
+ // If getCStringLength couldn't figure out the length, conjure a return
// value, so it can be used in constraints, at least.
- if (StrLen.isUnknown()) {
- ValueManager &ValMgr = C.getValueManager();
+ if (strLength.isUnknown()) {
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
- StrLen = ValMgr.getConjuredSymbolVal(NULL, CE, Count);
+ strLength = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, Count);
}
// Bind the return value.
- state = state->BindExpr(CE, StrLen);
+ state = state->BindExpr(CE, strLength);
C.addTransition(state);
}
}
-void CStringChecker::EvalStrcpy(CheckerContext &C, const CallExpr *CE) {
+void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) {
// char *strcpy(char *restrict dst, const char *restrict src);
- EvalStrcpyCommon(C, CE, /* ReturnEnd = */ false);
+ evalStrcpyCommon(C, CE, /* returnEnd = */ false);
}
-void CStringChecker::EvalStpcpy(CheckerContext &C, const CallExpr *CE) {
+void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) {
// char *stpcpy(char *restrict dst, const char *restrict src);
- EvalStrcpyCommon(C, CE, /* ReturnEnd = */ true);
+ evalStrcpyCommon(C, CE, /* returnEnd = */ true);
}
-void CStringChecker::EvalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
- bool ReturnEnd) {
+void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
+ bool returnEnd) {
const GRState *state = C.getState();
// Check that the destination is non-null
const Expr *Dst = CE->getArg(0);
SVal DstVal = state->getSVal(Dst);
- state = CheckNonNull(C, state, Dst, DstVal);
+ state = checkNonNull(C, state, Dst, DstVal);
if (!state)
return;
// Check that the source is non-null.
- const Expr *Src = CE->getArg(1);
- SVal SrcVal = state->getSVal(Src);
-
- state = CheckNonNull(C, state, Src, SrcVal);
+ const Expr *srcExpr = CE->getArg(1);
+ SVal srcVal = state->getSVal(srcExpr);
+ state = checkNonNull(C, state, srcExpr, srcVal);
if (!state)
return;
// Get the string length of the source.
- SVal StrLen = GetCStringLength(C, state, Src, SrcVal);
+ SVal strLength = getCStringLength(C, state, srcExpr, srcVal);
// If the source isn't a valid C string, give up.
- if (StrLen.isUndef())
+ if (strLength.isUndef())
return;
- SVal Result = (ReturnEnd ? UnknownVal() : DstVal);
+ SVal Result = (returnEnd ? UnknownVal() : DstVal);
// If the destination is a MemRegion, try to check for a buffer overflow and
// record the new string length.
- if (loc::MemRegionVal *DstRegVal = dyn_cast<loc::MemRegionVal>(&DstVal)) {
+ if (loc::MemRegionVal *dstRegVal = dyn_cast<loc::MemRegionVal>(&DstVal)) {
// If the length is known, we can check for an overflow.
- if (NonLoc *KnownStrLen = dyn_cast<NonLoc>(&StrLen)) {
- SValuator &SV = C.getSValuator();
+ if (NonLoc *knownStrLength = dyn_cast<NonLoc>(&strLength)) {
+ SVal lastElement =
+ C.getSValBuilder().evalBinOpLN(state, BO_Add, *dstRegVal,
+ *knownStrLength, Dst->getType());
- SVal LastElement = SV.EvalBinOpLN(state, BO_Add,
- *DstRegVal, *KnownStrLen,
- Dst->getType());
-
- state = CheckLocation(C, state, Dst, LastElement, /* IsDst = */ true);
+ state = CheckLocation(C, state, Dst, lastElement, /* IsDst = */ true);
if (!state)
return;
// If this is a stpcpy-style copy, the last element is the return value.
- if (ReturnEnd)
- Result = LastElement;
+ if (returnEnd)
+ Result = lastElement;
}
// Invalidate the destination. This must happen before we set the C string
@@ -871,18 +866,18 @@ void CStringChecker::EvalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// string, but that's still an improvement over blank invalidation.
- state = InvalidateBuffer(C, state, Dst, *DstRegVal);
+ state = InvalidateBuffer(C, state, Dst, *dstRegVal);
// Set the C string length of the destination.
- state = SetCStringLength(state, DstRegVal->getRegion(), StrLen);
+ state = setCStringLength(state, dstRegVal->getRegion(), strLength);
}
// If this is a stpcpy-style copy, but we were unable to check for a buffer
// overflow, we still need a result. Conjure a return value.
- if (ReturnEnd && Result.isUnknown()) {
- ValueManager &ValMgr = C.getValueManager();
+ if (returnEnd && Result.isUnknown()) {
+ SValBuilder &svalBuilder = C.getSValBuilder();
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
- StrLen = ValMgr.getConjuredSymbolVal(NULL, CE, Count);
+ strLength = svalBuilder.getConjuredSymbolVal(NULL, CE, Count);
}
// Set the return value.
@@ -894,7 +889,7 @@ void CStringChecker::EvalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// The driver method, and other Checker callbacks.
//===----------------------------------------------------------------------===//
-bool CStringChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+bool CStringChecker::evalCallExpr(CheckerContext &C, const CallExpr *CE) {
// Get the callee. All the functions we care about are C functions
// with simple identifiers.
const GRState *state = C.getState();
@@ -905,26 +900,29 @@ bool CStringChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
return false;
// Get the name of the callee. If it's a builtin, strip off the prefix.
- llvm::StringRef Name = FD->getName();
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II) // if no identifier, not a simple C function
+ return false;
+ llvm::StringRef Name = II->getName();
if (Name.startswith("__builtin_"))
Name = Name.substr(10);
- FnCheck EvalFunction = llvm::StringSwitch<FnCheck>(Name)
- .Cases("memcpy", "__memcpy_chk", &CStringChecker::EvalMemcpy)
- .Cases("memcmp", "bcmp", &CStringChecker::EvalMemcmp)
- .Cases("memmove", "__memmove_chk", &CStringChecker::EvalMemmove)
- .Cases("strcpy", "__strcpy_chk", &CStringChecker::EvalStrcpy)
- .Cases("stpcpy", "__stpcpy_chk", &CStringChecker::EvalStpcpy)
- .Case("strlen", &CStringChecker::EvalStrlen)
- .Case("bcopy", &CStringChecker::EvalBcopy)
+ FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Cases("memcpy", "__memcpy_chk", &CStringChecker::evalMemcpy)
+ .Cases("memcmp", "bcmp", &CStringChecker::evalMemcmp)
+ .Cases("memmove", "__memmove_chk", &CStringChecker::evalMemmove)
+ .Cases("strcpy", "__strcpy_chk", &CStringChecker::evalStrcpy)
+ .Cases("stpcpy", "__stpcpy_chk", &CStringChecker::evalStpcpy)
+ .Case("strlen", &CStringChecker::evalstrLength)
+ .Case("bcopy", &CStringChecker::evalBcopy)
.Default(NULL);
// If the callee isn't a string function, let another checker handle it.
- if (!EvalFunction)
+ if (!evalFunction)
return false;
// Check and evaluate the call.
- (this->*EvalFunction)(C, CE);
+ (this->*evalFunction)(C, CE);
return true;
}
@@ -955,16 +953,16 @@ void CStringChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
SVal StrVal = state->getSVal(Init);
assert(StrVal.isValid() && "Initializer string is unknown or undefined");
- DefinedOrUnknownSVal StrLen
- = cast<DefinedOrUnknownSVal>(GetCStringLength(C, state, Init, StrVal));
+ DefinedOrUnknownSVal strLength
+ = cast<DefinedOrUnknownSVal>(getCStringLength(C, state, Init, StrVal));
- state = state->set<CStringLength>(MR, StrLen);
+ state = state->set<CStringLength>(MR, strLength);
}
C.addTransition(state);
}
-bool CStringChecker::WantsRegionChangeUpdate(const GRState *state) {
+bool CStringChecker::wantsRegionChangeUpdate(const GRState *state) {
CStringLength::EntryMap Entries = state->get<CStringLength>();
return !Entries.isEmpty();
}
@@ -1001,7 +999,7 @@ const GRState *CStringChecker::EvalRegionChanges(const GRState *state,
// Is this entry for a super-region of a changed region?
if (SuperRegions.count(MR)) {
- Entries = F.Remove(Entries, MR);
+ Entries = F.remove(Entries, MR);
continue;
}
@@ -1010,7 +1008,7 @@ const GRState *CStringChecker::EvalRegionChanges(const GRState *state,
while (const SubRegion *SR = dyn_cast<SubRegion>(Super)) {
Super = SR->getSuperRegion();
if (Invalidated.count(Super)) {
- Entries = F.Remove(Entries, MR);
+ Entries = F.remove(Entries, MR);
break;
}
}
@@ -1031,7 +1029,7 @@ void CStringChecker::MarkLiveSymbols(const GRState *state, SymbolReaper &SR) {
}
}
-void CStringChecker::EvalDeadSymbols(CheckerContext &C, SymbolReaper &SR) {
+void CStringChecker::evalDeadSymbols(CheckerContext &C, SymbolReaper &SR) {
if (!SR.hasDeadSymbols())
return;
@@ -1046,10 +1044,10 @@ void CStringChecker::EvalDeadSymbols(CheckerContext &C, SymbolReaper &SR) {
SVal Len = I.getData();
if (SymbolRef Sym = Len.getAsSymbol()) {
if (SR.isDead(Sym))
- Entries = F.Remove(Entries, I.getKey());
+ Entries = F.remove(Entries, I.getKey());
}
}
state = state->set<CStringLength>(Entries);
- C.GenerateNode(state);
+ C.generateNode(state);
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CallAndMessageChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 3c9ddce..415900e 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CallAndMessageChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -12,13 +12,14 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
+#include "InternalChecks.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class CallAndMessageChecker
@@ -40,19 +41,21 @@ public:
}
void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
- void PreVisitObjCMessageExpr(CheckerContext &C, const ObjCMessageExpr *ME);
- bool EvalNilReceiver(CheckerContext &C, const ObjCMessageExpr *ME);
+ void preVisitObjCMessage(CheckerContext &C, ObjCMessage msg);
+ bool evalNilReceiver(CheckerContext &C, ObjCMessage msg);
private:
- bool PreVisitProcessArg(CheckerContext &C, const Expr *Ex,
- const char *BT_desc, BugType *&BT);
+ void PreVisitProcessArgs(CheckerContext &C, CallOrObjCMessage callOrMsg,
+ const char *BT_desc, BugType *&BT);
+ bool PreVisitProcessArg(CheckerContext &C, SVal V, SourceRange argRange,
+ const Expr *argEx, const char *BT_desc, BugType *&BT);
void EmitBadCall(BugType *BT, CheckerContext &C, const CallExpr *CE);
- void EmitNilReceiverBug(CheckerContext &C, const ObjCMessageExpr *ME,
+ void emitNilReceiverBug(CheckerContext &C, const ObjCMessage &msg,
ExplodedNode *N);
void HandleNilReceiver(CheckerContext &C, const GRState *state,
- const ObjCMessageExpr *ME);
+ ObjCMessage msg);
void LazyInit_BT(const char *desc, BugType *&BT) {
if (!BT)
@@ -61,13 +64,13 @@ private:
};
} // end anonymous namespace
-void clang::RegisterCallAndMessageChecker(GRExprEngine &Eng) {
+void ento::RegisterCallAndMessageChecker(ExprEngine &Eng) {
Eng.registerCheck(new CallAndMessageChecker());
}
void CallAndMessageChecker::EmitBadCall(BugType *BT, CheckerContext &C,
const CallExpr *CE) {
- ExplodedNode *N = C.GenerateSink();
+ ExplodedNode *N = C.generateSink();
if (!N)
return;
@@ -77,21 +80,32 @@ void CallAndMessageChecker::EmitBadCall(BugType *BT, CheckerContext &C,
C.EmitReport(R);
}
+void CallAndMessageChecker::PreVisitProcessArgs(CheckerContext &C,
+ CallOrObjCMessage callOrMsg,
+ const char *BT_desc,
+ BugType *&BT) {
+ for (unsigned i = 0, e = callOrMsg.getNumArgs(); i != e; ++i)
+ if (PreVisitProcessArg(C, callOrMsg.getArgSVal(i),
+ callOrMsg.getArgSourceRange(i), callOrMsg.getArg(i),
+ BT_desc, BT))
+ return;
+}
+
bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
- const Expr *Ex,
+ SVal V, SourceRange argRange,
+ const Expr *argEx,
const char *BT_desc,
BugType *&BT) {
- const SVal &V = C.getState()->getSVal(Ex);
-
if (V.isUndef()) {
- if (ExplodedNode *N = C.GenerateSink()) {
+ if (ExplodedNode *N = C.generateSink()) {
LazyInit_BT(BT_desc, BT);
// Generate a report for this bug.
EnhancedBugReport *R = new EnhancedBugReport(*BT, BT->getName(), N);
- R->addRange(Ex->getSourceRange());
- R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Ex);
+ R->addRange(argRange);
+ if (argEx)
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, argEx);
C.EmitReport(R);
}
return true;
@@ -143,11 +157,11 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
const LazyCompoundValData *D = LV->getCVData();
FindUninitializedField F(C.getASTContext(),
C.getState()->getStateManager().getStoreManager(),
- C.getValueManager().getRegionManager(),
+ C.getSValBuilder().getRegionManager(),
D->getStore());
if (F.Find(D->getRegion())) {
- if (ExplodedNode *N = C.GenerateSink()) {
+ if (ExplodedNode *N = C.generateSink()) {
LazyInit_BT(BT_desc, BT);
llvm::SmallString<512> Str;
llvm::raw_svector_ostream os(Str);
@@ -171,7 +185,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
// Generate a report for this bug.
EnhancedBugReport *R = new EnhancedBugReport(*BT, os.str(), N);
- R->addRange(Ex->getSourceRange());
+ R->addRange(argRange);
// FIXME: enhance track back for uninitialized value for arbitrary
// memregions
@@ -193,7 +207,7 @@ void CallAndMessageChecker::PreVisitCallExpr(CheckerContext &C,
if (L.isUndef()) {
if (!BT_call_undef)
BT_call_undef =
- new BuiltinBug("Called function pointer is an undefined pointer value");
+ new BuiltinBug("Called function pointer is an uninitalized pointer value");
EmitBadCall(BT_call_undef, C, CE);
return;
}
@@ -205,26 +219,23 @@ void CallAndMessageChecker::PreVisitCallExpr(CheckerContext &C,
EmitBadCall(BT_call_null, C, CE);
}
- for (CallExpr::const_arg_iterator I = CE->arg_begin(), E = CE->arg_end();
- I != E; ++I)
- if (PreVisitProcessArg(C, *I,
- "Pass-by-value argument in function call is"
- " undefined", BT_call_arg))
- return;
+ PreVisitProcessArgs(C, CallOrObjCMessage(CE, C.getState()),
+ "Function call argument is an uninitialized value",
+ BT_call_arg);
}
-void CallAndMessageChecker::PreVisitObjCMessageExpr(CheckerContext &C,
- const ObjCMessageExpr *ME) {
+void CallAndMessageChecker::preVisitObjCMessage(CheckerContext &C,
+ ObjCMessage msg) {
const GRState *state = C.getState();
// FIXME: Handle 'super'?
- if (const Expr *receiver = ME->getInstanceReceiver())
+ if (const Expr *receiver = msg.getInstanceReceiver())
if (state->getSVal(receiver).isUndef()) {
- if (ExplodedNode *N = C.GenerateSink()) {
+ if (ExplodedNode *N = C.generateSink()) {
if (!BT_msg_undef)
BT_msg_undef =
- new BuiltinBug("Receiver in message expression is a garbage value");
+ new BuiltinBug("Receiver in message expression is an uninitialized value");
EnhancedBugReport *R =
new EnhancedBugReport(*BT_msg_undef, BT_msg_undef->getName(), N);
R->addRange(receiver->getSourceRange());
@@ -235,23 +246,21 @@ void CallAndMessageChecker::PreVisitObjCMessageExpr(CheckerContext &C,
return;
}
+ const char *bugDesc = msg.isPropertySetter() ?
+ "Argument for property setter is an uninitialized value"
+ : "Argument in message expression is an uninitialized value";
// Check for any arguments that are uninitialized/undefined.
- for (ObjCMessageExpr::const_arg_iterator I = ME->arg_begin(),
- E = ME->arg_end(); I != E; ++I)
- if (PreVisitProcessArg(C, *I,
- "Pass-by-value argument in message expression "
- "is undefined", BT_msg_arg))
- return;
+ PreVisitProcessArgs(C, CallOrObjCMessage(msg, state), bugDesc, BT_msg_arg);
}
-bool CallAndMessageChecker::EvalNilReceiver(CheckerContext &C,
- const ObjCMessageExpr *ME) {
- HandleNilReceiver(C, C.getState(), ME);
+bool CallAndMessageChecker::evalNilReceiver(CheckerContext &C,
+ ObjCMessage msg) {
+ HandleNilReceiver(C, C.getState(), msg);
return true; // Nil receiver is not handled elsewhere.
}
-void CallAndMessageChecker::EmitNilReceiverBug(CheckerContext &C,
- const ObjCMessageExpr *ME,
+void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
+ const ObjCMessage &msg,
ExplodedNode *N) {
if (!BT_msg_ret)
@@ -261,12 +270,12 @@ void CallAndMessageChecker::EmitNilReceiverBug(CheckerContext &C,
llvm::SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
- os << "The receiver of message '" << ME->getSelector().getAsString()
+ os << "The receiver of message '" << msg.getSelector().getAsString()
<< "' is nil and returns a value of type '"
- << ME->getType().getAsString() << "' that will be garbage";
+ << msg.getType(C.getASTContext()).getAsString() << "' that will be garbage";
EnhancedBugReport *report = new EnhancedBugReport(*BT_msg_ret, os.str(), N);
- if (const Expr *receiver = ME->getInstanceReceiver()) {
+ if (const Expr *receiver = msg.getInstanceReceiver()) {
report->addRange(receiver->getSourceRange());
report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
receiver);
@@ -274,29 +283,31 @@ void CallAndMessageChecker::EmitNilReceiverBug(CheckerContext &C,
C.EmitReport(report);
}
-static bool SupportsNilWithFloatRet(const llvm::Triple &triple) {
+static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
return triple.getVendor() == llvm::Triple::Apple &&
- triple.getDarwinMajorNumber() >= 9;
+ (triple.getDarwinMajorNumber() >= 9 ||
+ triple.getArch() == llvm::Triple::arm ||
+ triple.getArch() == llvm::Triple::thumb);
}
void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
const GRState *state,
- const ObjCMessageExpr *ME) {
+ ObjCMessage msg) {
+ ASTContext &Ctx = C.getASTContext();
// Check the return type of the message expression. A message to nil will
// return different values depending on the return type and the architecture.
- QualType RetTy = ME->getType();
+ QualType RetTy = msg.getType(Ctx);
- ASTContext &Ctx = C.getASTContext();
CanQualType CanRetTy = Ctx.getCanonicalType(RetTy);
if (CanRetTy->isStructureOrClassType()) {
// FIXME: At some point we shouldn't rely on isConsumedExpr(), but instead
// have the "use of undefined value" be smarter about where the
// undefined value came from.
- if (C.getPredecessor()->getParentMap().isConsumedExpr(ME)) {
- if (ExplodedNode* N = C.GenerateSink(state))
- EmitNilReceiverBug(C, ME, N);
+ if (C.getPredecessor()->getParentMap().isConsumedExpr(msg.getOriginExpr())){
+ if (ExplodedNode* N = C.generateSink(state))
+ emitNilReceiverBug(C, msg, N);
return;
}
@@ -308,19 +319,20 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
// Other cases: check if the return type is smaller than void*.
if (CanRetTy != Ctx.VoidTy &&
- C.getPredecessor()->getParentMap().isConsumedExpr(ME)) {
+ C.getPredecessor()->getParentMap().isConsumedExpr(msg.getOriginExpr())) {
// Compute: sizeof(void *) and sizeof(return type)
const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
const uint64_t returnTypeSize = Ctx.getTypeSize(CanRetTy);
if (voidPtrSize < returnTypeSize &&
- !(SupportsNilWithFloatRet(Ctx.Target.getTriple()) &&
+ !(supportsNilWithFloatRet(Ctx.Target.getTriple()) &&
(Ctx.FloatTy == CanRetTy ||
Ctx.DoubleTy == CanRetTy ||
Ctx.LongDoubleTy == CanRetTy ||
- Ctx.LongLongTy == CanRetTy))) {
- if (ExplodedNode* N = C.GenerateSink(state))
- EmitNilReceiverBug(C, ME, N);
+ Ctx.LongLongTy == CanRetTy ||
+ Ctx.UnsignedLongLongTy == CanRetTy))) {
+ if (ExplodedNode* N = C.generateSink(state))
+ emitNilReceiverBug(C, msg, N);
return;
}
@@ -337,8 +349,8 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
// it most likely isn't nil. We should assume the semantics
// of this case unless we have *a lot* more knowledge.
//
- SVal V = C.getValueManager().makeZeroVal(ME->getType());
- C.GenerateNode(state->BindExpr(ME, V));
+ SVal V = C.getSValBuilder().makeZeroVal(msg.getType(Ctx));
+ C.generateNode(state->BindExpr(msg.getOriginExpr(), V));
return;
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 6676fe5..518cf96 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -12,11 +12,12 @@
//
//===----------------------------------------------------------------------===//
#include "clang/AST/CharUnits.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "GRExprEngineInternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "InternalChecks.h"
using namespace clang;
+using namespace ento;
namespace {
class CastSizeChecker : public CheckerVisitor<CastSizeChecker> {
@@ -37,7 +38,7 @@ void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
const Expr *E = CE->getSubExpr();
ASTContext &Ctx = C.getASTContext();
QualType ToTy = Ctx.getCanonicalType(CE->getType());
- PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+ const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
if (!ToPTy)
return;
@@ -57,28 +58,27 @@ void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
if (SR == 0)
return;
- ValueManager &ValMgr = C.getValueManager();
- SVal Extent = SR->getExtent(ValMgr);
-
- SValuator &SVator = ValMgr.getSValuator();
- const llvm::APSInt *ExtentInt = SVator.getKnownValue(state, Extent);
- if (!ExtentInt)
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ SVal extent = SR->getExtent(svalBuilder);
+ const llvm::APSInt *extentInt = svalBuilder.getKnownValue(state, extent);
+ if (!extentInt)
return;
- CharUnits RegionSize = CharUnits::fromQuantity(ExtentInt->getSExtValue());
- CharUnits TypeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
-
+ CharUnits regionSize = CharUnits::fromQuantity(extentInt->getSExtValue());
+ CharUnits typeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
+
// Ignore void, and a few other un-sizeable types.
- if (TypeSize.isZero())
+ if (typeSize.isZero())
return;
-
- if (RegionSize % TypeSize != 0) {
- if (ExplodedNode *N = C.GenerateSink()) {
+
+ if (regionSize % typeSize != 0) {
+ if (ExplodedNode *errorNode = C.generateSink()) {
if (!BT)
BT = new BuiltinBug("Cast region with wrong size.",
"Cast a region whose size is not a multiple of the"
" destination type size.");
- RangedBugReport *R = new RangedBugReport(*BT, BT->getDescription(), N);
+ RangedBugReport *R = new RangedBugReport(*BT, BT->getDescription(),
+ errorNode);
R->addRange(CE->getSourceRange());
C.EmitReport(R);
}
@@ -86,6 +86,6 @@ void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
}
-void clang::RegisterCastSizeChecker(GRExprEngine &Eng) {
+void ento::RegisterCastSizeChecker(ExprEngine &Eng) {
Eng.registerCheck(new CastSizeChecker());
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CastToStructChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index eeaed97..8ec226a 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CastToStructChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -13,11 +13,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "GRExprEngineInternalChecks.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class CastToStructChecker
@@ -42,8 +44,8 @@ void CastToStructChecker::PreVisitCastExpr(CheckerContext &C,
QualType OrigTy = Ctx.getCanonicalType(E->getType());
QualType ToTy = Ctx.getCanonicalType(CE->getType());
- PointerType *OrigPTy = dyn_cast<PointerType>(OrigTy.getTypePtr());
- PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+ const PointerType *OrigPTy = dyn_cast<PointerType>(OrigTy.getTypePtr());
+ const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
if (!ToPTy || !OrigPTy)
return;
@@ -60,7 +62,7 @@ void CastToStructChecker::PreVisitCastExpr(CheckerContext &C,
// Now the cast-to-type is struct pointer, the original type is not void*.
if (!OrigPointeeTy->isRecordType()) {
- if (ExplodedNode *N = C.GenerateNode()) {
+ if (ExplodedNode *N = C.generateNode()) {
if (!BT)
BT = new BuiltinBug("Cast from non-struct type to struct type",
"Casting a non-structure type to a structure type "
@@ -73,6 +75,10 @@ void CastToStructChecker::PreVisitCastExpr(CheckerContext &C,
}
}
-void clang::RegisterCastToStructChecker(GRExprEngine &Eng) {
+static void RegisterCastToStructChecker(ExprEngine &Eng) {
Eng.registerCheck(new CastToStructChecker());
}
+
+void ento::registerCastToStructChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterCastToStructChecker);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckObjCDealloc.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 11ddaca..ad3bab6 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CheckObjCDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -13,9 +13,11 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/Checkers/LocalCheckers.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerV2.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/DeclObjC.h"
@@ -23,6 +25,7 @@
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
static bool scan_dealloc(Stmt* S, Selector Dealloc) {
@@ -76,8 +79,8 @@ static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
if (BinaryOperator* BO = dyn_cast<BinaryOperator>(S))
if (BO->isAssignmentOp())
if (ObjCPropertyRefExpr* PRE =
- dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
- if (PRE->getProperty() == PD)
+ dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
+ if (PRE->isExplicitProperty() && PRE->getExplicitProperty() == PD)
if (BO->getRHS()->isNullPointerConstant(Ctx,
Expr::NPC_ValueDependentIsNull)) {
// This is only a 'release' if the property kind is not
@@ -93,7 +96,7 @@ static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
return false;
}
-void clang::CheckObjCDealloc(const ObjCImplementationDecl* D,
+static void checkObjCDealloc(const ObjCImplementationDecl* D,
const LangOptions& LOpts, BugReporter& BR) {
assert (LOpts.getGCMode() != LangOptions::GCOnly);
@@ -259,3 +262,23 @@ void clang::CheckObjCDealloc(const ObjCImplementationDecl* D,
}
}
+//===----------------------------------------------------------------------===//
+// ObjCDeallocChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCDeallocChecker : public CheckerV2<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (mgr.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ return;
+ checkObjCDealloc(cast<ObjCImplementationDecl>(D), mgr.getLangOptions(), BR);
+ }
+};
+}
+
+void ento::registerObjCDeallocChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCDeallocChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckObjCInstMethSignature.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
index 76a0923..369ba0b 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CheckObjCInstMethSignature.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -13,9 +13,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/Checkers/LocalCheckers.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerV2.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Type.h"
#include "clang/AST/ASTContext.h"
@@ -24,6 +25,7 @@
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
static bool AreTypesCompatible(QualType Derived, QualType Ancestor,
ASTContext& C) {
@@ -69,7 +71,7 @@ static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
}
}
-void clang::CheckObjCInstMethSignature(const ObjCImplementationDecl* ID,
+static void CheckObjCInstMethSignature(const ObjCImplementationDecl* ID,
BugReporter& BR) {
const ObjCInterfaceDecl* D = ID->getClassInterface();
@@ -117,3 +119,22 @@ void clang::CheckObjCInstMethSignature(const ObjCImplementationDecl* ID,
C = C->getSuperClass();
}
}
+
+//===----------------------------------------------------------------------===//
+// ObjCMethSigsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCMethSigsChecker : public CheckerV2<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CheckObjCInstMethSignature(D, BR);
+ }
+};
+}
+
+void ento::registerObjCMethSigsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCMethSigsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index 9a2ac45..185520c 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -11,18 +11,23 @@
//
//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerV2.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/Checkers/LocalCheckers.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
static bool isArc4RandomAvailable(const ASTContext &Ctx) {
const llvm::Triple &T = Ctx.Target.getTriple();
return T.getVendor() == llvm::Triple::Apple ||
- T.getOS() == llvm::Triple::FreeBSD;
+ T.getOS() == llvm::Triple::FreeBSD ||
+ T.getOS() == llvm::Triple::NetBSD ||
+ T.getOS() == llvm::Triple::OpenBSD ||
+ T.getOS() == llvm::Triple::DragonFly;
}
namespace {
@@ -187,8 +192,10 @@ void WalkAST::CheckLoopConditionForFloat(const ForStmt *FS) {
return;
// Are we comparing variables?
- const DeclRefExpr *drLHS = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParens());
- const DeclRefExpr *drRHS = dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParens());
+ const DeclRefExpr *drLHS =
+ dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenLValueCasts());
+ const DeclRefExpr *drRHS =
+ dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParenLValueCasts());
// Does at least one of the variables have a floating point type?
drLHS = drLHS && drLHS->getType()->isRealFloatingType() ? drLHS : NULL;
@@ -240,7 +247,8 @@ void WalkAST::CheckCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
if (FD->getIdentifier() != GetIdentifier(II_gets, "gets"))
return;
- const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FD->getType());
+ const FunctionProtoType *FPT
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
if (!FPT)
return;
@@ -274,7 +282,8 @@ void WalkAST::CheckCall_getpw(const CallExpr *CE, const FunctionDecl *FD) {
if (FD->getIdentifier() != GetIdentifier(II_getpw, "getpw"))
return;
- const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FD->getType());
+ const FunctionProtoType *FPT
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
if (!FPT)
return;
@@ -312,7 +321,8 @@ void WalkAST::CheckCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
if (FD->getIdentifier() != GetIdentifier(II_mktemp, "mktemp"))
return;
- const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FD->getType());
+ const FunctionProtoType *FPT
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
if(!FPT)
return;
@@ -367,7 +377,8 @@ void WalkAST::CheckCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
if (identifierid >= num_rands)
return;
- const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ const FunctionProtoType *FTP
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
if (!FTP)
return;
@@ -408,7 +419,8 @@ void WalkAST::CheckCall_random(const CallExpr *CE, const FunctionDecl *FD) {
if (FD->getIdentifier() != GetIdentifier(II_random, "random"))
return;
- const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ const FunctionProtoType *FTP
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
if (!FTP)
return;
@@ -455,7 +467,8 @@ void WalkAST::CheckUncheckedReturnValue(CallExpr *CE) {
if (identifierid >= num_setids)
return;
- const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FD->getType());
+ const FunctionProtoType *FTP
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
if (!FTP)
return;
@@ -485,10 +498,20 @@ void WalkAST::CheckUncheckedReturnValue(CallExpr *CE) {
}
//===----------------------------------------------------------------------===//
-// Entry point for check.
+// SecuritySyntaxChecker
//===----------------------------------------------------------------------===//
-void clang::CheckSecuritySyntaxOnly(const Decl *D, BugReporter &BR) {
- WalkAST walker(BR);
- walker.Visit(D->getBody());
+namespace {
+class SecuritySyntaxChecker : public CheckerV2<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR);
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerSecuritySyntaxChecker(CheckerManager &mgr) {
+ mgr.registerChecker<SecuritySyntaxChecker>();
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckSizeofPointer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
index bbe494c..d46ac81 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CheckSizeofPointer.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -12,11 +12,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerV2.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/Checker/Checkers/LocalCheckers.h"
using namespace clang;
+using namespace ento;
namespace {
class WalkAST : public StmtVisitor<WalkAST> {
@@ -65,7 +67,21 @@ void WalkAST::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
}
}
-void clang::CheckSizeofPointer(const Decl *D, BugReporter &BR) {
- WalkAST walker(BR);
- walker.Visit(D->getBody());
+//===----------------------------------------------------------------------===//
+// SizeofPointerChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class SizeofPointerChecker : public CheckerV2<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR);
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerSizeofPointerChecker(CheckerManager &mgr) {
+ mgr.registerChecker<SizeofPointerChecker>();
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
new file mode 100644
index 0000000..1dc7486
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -0,0 +1,197 @@
+//===--- Checkers.td - Static Analyzer Checkers -===-----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+include "clang/StaticAnalyzer/Checkers/CheckerBase.td"
+
+//===----------------------------------------------------------------------===//
+// Packages.
+//===----------------------------------------------------------------------===//
+
+def Core : Package<"core">;
+def Cocoa : Package<"cocoa">;
+def Unix : Package<"unix">;
+def MacOSX : Package<"macosx">;
+
+def CoreExperimental : Package<"experimental">,
+ InPackage<Core>, Hidden;
+
+def CocoaExperimental : Package<"experimental">,
+ InPackage<Cocoa>, Hidden;
+
+def UnixExperimental : Package<"experimental">,
+ InPackage<Unix>, Hidden;
+
+def LLVM : Package<"llvm">;
+def Debug : Package<"debug">;
+
+//===----------------------------------------------------------------------===//
+// Groups.
+//===----------------------------------------------------------------------===//
+
+def AllExperimental : CheckerGroup<"all-experimental">,
+ Hidden;
+
+//===----------------------------------------------------------------------===//
+// Checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Cocoa in {
+
+def ObjCSelfInitChecker : Checker<"SelfInit">,
+ HelpText<"Check that 'self' is propely initialized inside an initializer method">,
+ DescFile<"ObjCSelfInitChecker.cpp">;
+
+def ObjCAtSyncChecker : Checker<"AtSync">,
+ HelpText<"Check for null pointers used as mutexes for @synchronized">,
+ DescFile<"ObjCAtSyncChecker.cpp">;
+
+def NilArgChecker : Checker<"NilArg">,
+ HelpText<"Check for prohibited nil arguments to ObjC method calls">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def ClassReleaseChecker : Checker<"ClassRelease">,
+ HelpText<"Check for sending 'retain', 'release', or 'autorelease' directly to a Class">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def NSAutoreleasePoolChecker : Checker<"NSAutoreleasePool">,
+ HelpText<"Warn for subpar uses of NSAutoreleasePool">,
+ DescFile<"NSAutoreleasePoolChecker.cpp">;
+
+def ObjCMethSigsChecker : Checker<"MethodSigs">,
+ HelpText<"Warn about Objective-C method signatures with type incompatibilities">,
+ DescFile<"CheckObjCInstMethSignature.cpp">;
+
+def ObjCUnusedIvarsChecker : Checker<"UnusedIvars">,
+ HelpText<"Warn about private ivars that are never used">,
+ DescFile<"ObjCUnusedIVarsChecker.cpp">;
+
+}
+
+def StackAddrLeakChecker : Checker<"StackAddrLeak">,
+ InPackage<Core>,
+ HelpText<"Check that addresses to stack memory are not leaked outside the function">,
+ DescFile<"StackAddrLeakChecker.cpp">;
+
+def DeadStoresChecker : Checker<"DeadStores">,
+ InPackage<Core>,
+ HelpText<"Check for stores to dead variables">,
+ DescFile<"DeadStoresChecker.cpp">;
+
+def UnixAPIChecker : Checker<"API">,
+ InPackage<Unix>,
+ HelpText<"Check calls to various UNIX/Posix functions">,
+ DescFile<"UnixAPIChecker.cpp">;
+
+def MacOSXAPIChecker : Checker<"API">,
+ InPackage<MacOSX>,
+ HelpText<"Check calls to various MacOSXAPIChecker">,
+ DescFile<"MacOSXAPIChecker.cpp">;
+
+def CFNumberCreateChecker : Checker<"CFNumber">,
+ InPackage<MacOSX>,
+ HelpText<"Check for CFNumberCreate">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def CFRetainReleaseChecker : Checker<"CFRetainRelease">,
+ InPackage<MacOSX>,
+ HelpText<"Check for null arguments to CFRetain/CFRelease">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def LLVMConventionsChecker : Checker<"Conventions">,
+ InPackage<LLVM>,
+ HelpText<"Check code for LLVM codebase conventions">,
+ DescFile<"LLVMConventionsChecker.cpp">;
+
+def LiveVariablesDumper : Checker<"DumpLiveVars">,
+ InPackage<Debug>,
+ HelpText<"Print results of live variable analysis">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CFGViewer : Checker<"ViewCFG">,
+ InPackage<Debug>,
+ HelpText<"View Control-Flow Graphs using GraphViz">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CFGDumper : Checker<"DumpCFG">,
+ InPackage<Debug>,
+ HelpText<"Display Control-Flow Graphs">,
+ DescFile<"DebugCheckers.cpp">;
+
+//===----------------------------------------------------------------------===//
+// Hidden experimental checkers.
+//===----------------------------------------------------------------------===//
+
+let Group = AllExperimental in {
+
+def CStringChecker : Checker<"CString">,
+ InPackage<CoreExperimental>,
+ HelpText<"Check calls to functions in <string.h>">,
+ DescFile<"CStringChecker.cpp">;
+
+def UnreachableCodeChecker : Checker<"UnreachableCode">,
+ InPackage<CoreExperimental>,
+ HelpText<"Check unreachable code">,
+ DescFile<"UnreachableCodeChecker.cpp">;
+
+def IdempotentOperationChecker : Checker<"IdempotentOps">,
+ InPackage<CoreExperimental>,
+ HelpText<"Warn about idempotent operations">,
+ DescFile<"IdempotentOperationChecker.cpp">;
+
+def CastToStructChecker : Checker<"CastToStruct">,
+ InPackage<CoreExperimental>,
+ HelpText<"Check for cast from non-struct pointer to struct pointer">,
+ DescFile<"CastToStructChecker.cpp">;
+
+def FixedAddressChecker : Checker<"FixedAddr">,
+ InPackage<CoreExperimental>,
+ HelpText<"Check for assignment of a fixed address to a pointer">,
+ DescFile<"FixedAddressChecker.cpp">;
+
+def PointerArithChecker : Checker<"PointerArithm">,
+ InPackage<CoreExperimental>,
+ HelpText<"Check for pointer arithmetic on locations other than array elements">,
+ DescFile<"PointerArithChecker">;
+
+def PointerSubChecker : Checker<"PointerSub">,
+ InPackage<CoreExperimental>,
+ HelpText<"Check for pointer subtractions on two pointers pointing to different memory chunks">,
+ DescFile<"PointerSubChecker">;
+
+def SizeofPointerChecker : Checker<"SizeofPtr">,
+ InPackage<CoreExperimental>,
+ HelpText<"Warn about unintended use of sizeof() on pointer expressions">,
+ DescFile<"CheckSizeofPointer.cpp">;
+
+def SecuritySyntaxChecker : Checker<"SecuritySyntactic">,
+ InPackage<CoreExperimental>,
+ HelpText<"Perform quick security checks that require no data flow">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+
+def ObjCDeallocChecker : Checker<"Dealloc">,
+ InPackage<CocoaExperimental>,
+ HelpText<"Warn about Objective-C classes that lack a correct implementation of -dealloc">,
+ DescFile<"CheckObjCDealloc.cpp">;
+
+def ChrootChecker : Checker<"Chroot">,
+ InPackage<UnixExperimental>,
+ HelpText<"Check improper use of chroot">,
+ DescFile<"ChrootChecker.cpp">;
+
+def PthreadLockChecker : Checker<"PthreadLock">,
+ InPackage<UnixExperimental>,
+ HelpText<"Simple lock -> unlock checker">,
+ DescFile<"PthreadLockChecker.cpp">;
+
+def StreamChecker : Checker<"Stream">,
+ InPackage<UnixExperimental>,
+ HelpText<"Check stream handling functions">,
+ DescFile<"StreamChecker.cpp">;
+
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
new file mode 100644
index 0000000..36e76d0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -0,0 +1,167 @@
+//===- Chrootchecker.cpp -------- Basic security checks ----------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines chroot checker, which checks improper use of chroot.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// enum value that represent the jail state
+enum Kind { NO_CHROOT, ROOT_CHANGED, JAIL_ENTERED };
+
+bool isRootChanged(intptr_t k) { return k == ROOT_CHANGED; }
+//bool isJailEntered(intptr_t k) { return k == JAIL_ENTERED; }
+
+// This checker checks improper use of chroot.
+// The state transition:
+// NO_CHROOT ---chroot(path)--> ROOT_CHANGED ---chdir(/) --> JAIL_ENTERED
+// | |
+// ROOT_CHANGED<--chdir(..)-- JAIL_ENTERED<--chdir(..)--
+// | |
+// bug<--foo()-- JAIL_ENTERED<--foo()--
+class ChrootChecker : public CheckerVisitor<ChrootChecker> {
+ IdentifierInfo *II_chroot, *II_chdir;
+ // This bug refers to possibly break out of a chroot() jail.
+ BuiltinBug *BT_BreakJail;
+
+public:
+ ChrootChecker() : II_chroot(0), II_chdir(0), BT_BreakJail(0) {}
+
+ static void *getTag() {
+ static int x;
+ return &x;
+ }
+
+ virtual bool evalCallExpr(CheckerContext &C, const CallExpr *CE);
+ virtual void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+
+private:
+ void Chroot(CheckerContext &C, const CallExpr *CE);
+ void Chdir(CheckerContext &C, const CallExpr *CE);
+};
+
+} // end anonymous namespace
+
+static void RegisterChrootChecker(ExprEngine &Eng) {
+ Eng.registerCheck(new ChrootChecker());
+}
+
+void ento::registerChrootChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterChrootChecker);
+}
+
+bool ChrootChecker::evalCallExpr(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_chroot)
+ II_chroot = &Ctx.Idents.get("chroot");
+ if (!II_chdir)
+ II_chdir = &Ctx.Idents.get("chdir");
+
+ if (FD->getIdentifier() == II_chroot) {
+ Chroot(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_chdir) {
+ Chdir(C, CE);
+ return true;
+ }
+
+ return false;
+}
+
+void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ GRStateManager &Mgr = state->getStateManager();
+
+ // Once encouter a chroot(), set the enum value ROOT_CHANGED directly in
+ // the GDM.
+ state = Mgr.addGDM(state, ChrootChecker::getTag(), (void*) ROOT_CHANGED);
+ C.addTransition(state);
+}
+
+void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ GRStateManager &Mgr = state->getStateManager();
+
+ // If there are no jail state in the GDM, just return.
+ const void* k = state->FindGDM(ChrootChecker::getTag());
+ if (!k)
+ return;
+
+ // After chdir("/"), enter the jail, set the enum value JAIL_ENTERED.
+ const Expr *ArgExpr = CE->getArg(0);
+ SVal ArgVal = state->getSVal(ArgExpr);
+
+ if (const MemRegion *R = ArgVal.getAsRegion()) {
+ R = R->StripCasts();
+ if (const StringRegion* StrRegion= dyn_cast<StringRegion>(R)) {
+ const StringLiteral* Str = StrRegion->getStringLiteral();
+ if (Str->getString() == "/")
+ state = Mgr.addGDM(state, ChrootChecker::getTag(),
+ (void*) JAIL_ENTERED);
+ }
+ }
+
+ C.addTransition(state);
+}
+
+// Check the jail state before any function call except chroot and chdir().
+void ChrootChecker::PreVisitCallExpr(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_chroot)
+ II_chroot = &Ctx.Idents.get("chroot");
+ if (!II_chdir)
+ II_chdir = &Ctx.Idents.get("chdir");
+
+ // Ingnore chroot and chdir.
+ if (FD->getIdentifier() == II_chroot || FD->getIdentifier() == II_chdir)
+ return;
+
+ // If jail state is ROOT_CHANGED, generate BugReport.
+ void* const* k = state->FindGDM(ChrootChecker::getTag());
+ if (k)
+ if (isRootChanged((intptr_t) *k))
+ if (ExplodedNode *N = C.generateNode()) {
+ if (!BT_BreakJail)
+ BT_BreakJail = new BuiltinBug("Break out of jail",
+ "No call of chdir(\"/\") immediately "
+ "after chroot");
+ BugReport *R = new BugReport(*BT_BreakJail,
+ BT_BreakJail->getDescription(), N);
+ C.EmitReport(R);
+ }
+
+ return;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckerProvider.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckerProvider.cpp
new file mode 100644
index 0000000..94f200f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckerProvider.cpp
@@ -0,0 +1,137 @@
+//===--- ClangSACheckerProvider.cpp - Clang SA Checkers Provider ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the CheckerProvider for the checkers defined in
+// libclangStaticAnalyzerCheckers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckerProvider.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/CheckerProvider.h"
+#include "llvm/ADT/DenseSet.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+/// \brief Provider for all the checkers in libclangStaticAnalyzerCheckers.
+class ClangSACheckerProvider : public CheckerProvider {
+public:
+ virtual void registerCheckers(CheckerManager &checkerMgr,
+ CheckerOptInfo *checkOpts, unsigned numCheckOpts);
+};
+
+}
+
+CheckerProvider *ento::createClangSACheckerProvider() {
+ return new ClangSACheckerProvider();
+}
+
+namespace {
+
+struct StaticCheckerInfoRec {
+ const char *FullName;
+ void (*RegFunc)(CheckerManager &mgr);
+ bool Hidden;
+};
+
+} // end anonymous namespace.
+
+static const StaticCheckerInfoRec StaticCheckerInfo[] = {
+#define GET_CHECKERS
+#define CHECKER(FULLNAME,CLASS,DESCFILE,HELPTEXT,HIDDEN) \
+ { FULLNAME, register##CLASS, HIDDEN },
+#include "Checkers.inc"
+ { 0, 0, 0}
+#undef CHECKER
+#undef GET_CHECKERS
+};
+
+namespace {
+
+struct CheckNameOption {
+ const char *Name;
+ const short *Members;
+ const short *SubGroups;
+ bool Hidden;
+};
+
+} // end anonymous namespace.
+
+#define GET_MEMBER_ARRAYS
+#include "Checkers.inc"
+#undef GET_MEMBER_ARRAYS
+
+// The table of check name options, sorted by name for fast binary lookup.
+static const CheckNameOption CheckNameTable[] = {
+#define GET_CHECKNAME_TABLE
+#include "Checkers.inc"
+#undef GET_CHECKNAME_TABLE
+};
+static const size_t
+ CheckNameTableSize = sizeof(CheckNameTable) / sizeof(CheckNameTable[0]);
+
+static bool CheckNameOptionCompare(const CheckNameOption &LHS,
+ const CheckNameOption &RHS) {
+ return strcmp(LHS.Name, RHS.Name) < 0;
+}
+
+static void collectCheckers(const CheckNameOption *checkName,
+ bool enable,
+ llvm::DenseSet<const StaticCheckerInfoRec *> &checkers,
+ bool collectHidden) {
+ if (checkName->Hidden && !collectHidden)
+ return;
+
+ if (const short *member = checkName->Members) {
+ if (enable) {
+ if (collectHidden || !StaticCheckerInfo[*member].Hidden)
+ checkers.insert(&StaticCheckerInfo[*member]);
+ } else {
+ for (; *member != -1; ++member)
+ checkers.erase(&StaticCheckerInfo[*member]);
+ }
+ }
+
+ // Enable/disable all subgroups along with this one.
+ if (const short *subGroups = checkName->SubGroups) {
+ for (; *subGroups != -1; ++subGroups)
+ collectCheckers(&CheckNameTable[*subGroups], enable, checkers,
+ collectHidden && checkName->Hidden);
+ }
+}
+
+static void collectCheckers(CheckerOptInfo &opt,
+ llvm::DenseSet<const StaticCheckerInfoRec *> &checkers) {
+ const char *optName = opt.getName();
+ CheckNameOption key = { optName, 0, 0, false };
+ const CheckNameOption *found =
+ std::lower_bound(CheckNameTable, CheckNameTable + CheckNameTableSize, key,
+ CheckNameOptionCompare);
+ if (found == CheckNameTable + CheckNameTableSize ||
+ strcmp(found->Name, optName) != 0)
+ return; // Check name not found.
+
+ opt.claim();
+ collectCheckers(found, opt.isEnabled(), checkers, /*collectHidden=*/true);
+}
+
+void ClangSACheckerProvider::registerCheckers(CheckerManager &checkerMgr,
+ CheckerOptInfo *checkOpts, unsigned numCheckOpts) {
+ llvm::DenseSet<const StaticCheckerInfoRec *> enabledCheckers;
+ for (unsigned i = 0; i != numCheckOpts; ++i)
+ collectCheckers(checkOpts[i], enabledCheckers);
+ for (llvm::DenseSet<const StaticCheckerInfoRec *>::iterator
+ I = enabledCheckers.begin(), E = enabledCheckers.end(); I != E; ++I) {
+ (*I)->RegFunc(checkerMgr);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckerProvider.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckerProvider.h
new file mode 100644
index 0000000..f6c8011
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckerProvider.h
@@ -0,0 +1,29 @@
+//===--- ClangSACheckerProvider.h - Clang SA Checkers Provider --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the entry point for creating the provider for the checkers defined
+// in libclangStaticAnalyzerCheckers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_CHECKERS_CLANGSACHECKERPROVIDER_H
+#define LLVM_CLANG_SA_CHECKERS_CLANGSACHECKERPROVIDER_H
+
+namespace clang {
+
+namespace ento {
+ class CheckerProvider;
+
+CheckerProvider *createClangSACheckerProvider();
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
new file mode 100644
index 0000000..73239f5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
@@ -0,0 +1,34 @@
+//===--- ClangSACheckers.h - Registration functions for Checkers *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the registation functions for the checkers defined in
+// libclangStaticAnalyzerCheckers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
+#define LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
+
+namespace clang {
+
+namespace ento {
+class CheckerManager;
+
+#define GET_CHECKERS
+#define CHECKER(FULLNAME,CLASS,CXXFILE,HELPTEXT,HIDDEN) \
+ void register##CLASS(CheckerManager &mgr);
+#include "Checkers.inc"
+#undef CHECKER
+#undef GET_CHECKERS
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckDeadStores.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 3896100..3b39372 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CheckDeadStores.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -1,4 +1,4 @@
-//==- DeadStores.cpp - Check for stores to dead variables --------*- C++ -*-==//
+//==- DeadStoresChecker.cpp - Check for stores to dead variables -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -12,11 +12,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerV2.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/Visitors/CFGRecStmtVisitor.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/AST/ASTContext.h"
@@ -24,29 +26,84 @@
#include "llvm/ADT/SmallPtrSet.h"
using namespace clang;
+using namespace ento;
namespace {
+// FIXME: Eventually migrate into its own file, and have it managed by
+// AnalysisManager.
+class ReachableCode {
+ const CFG &cfg;
+ llvm::BitVector reachable;
+public:
+ ReachableCode(const CFG &cfg)
+ : cfg(cfg), reachable(cfg.getNumBlockIDs(), false) {}
+
+ void computeReachableBlocks();
+
+ bool isReachable(const CFGBlock *block) const {
+ return reachable[block->getBlockID()];
+ }
+};
+}
+
+void ReachableCode::computeReachableBlocks() {
+ if (!cfg.getNumBlockIDs())
+ return;
+
+ llvm::SmallVector<const CFGBlock*, 10> worklist;
+ worklist.push_back(&cfg.getEntry());
+
+ while (!worklist.empty()) {
+ const CFGBlock *block = worklist.back();
+ worklist.pop_back();
+ llvm::BitVector::reference isReachable = reachable[block->getBlockID()];
+ if (isReachable)
+ continue;
+ isReachable = true;
+ for (CFGBlock::const_succ_iterator i = block->succ_begin(),
+ e = block->succ_end(); i != e; ++i)
+ if (const CFGBlock *succ = *i)
+ worklist.push_back(succ);
+ }
+}
+
+namespace {
class DeadStoreObs : public LiveVariables::ObserverTy {
+ const CFG &cfg;
ASTContext &Ctx;
BugReporter& BR;
ParentMap& Parents;
llvm::SmallPtrSet<VarDecl*, 20> Escaped;
+ llvm::OwningPtr<ReachableCode> reachableCode;
+ const CFGBlock *currentBlock;
enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
public:
- DeadStoreObs(ASTContext &ctx, BugReporter& br, ParentMap& parents,
+ DeadStoreObs(const CFG &cfg, ASTContext &ctx,
+ BugReporter& br, ParentMap& parents,
llvm::SmallPtrSet<VarDecl*, 20> &escaped)
- : Ctx(ctx), BR(br), Parents(parents), Escaped(escaped) {}
+ : cfg(cfg), Ctx(ctx), BR(br), Parents(parents),
+ Escaped(escaped), currentBlock(0) {}
virtual ~DeadStoreObs() {}
void Report(VarDecl* V, DeadStoreKind dsk, SourceLocation L, SourceRange R) {
if (Escaped.count(V))
return;
+
+ // Compute reachable blocks within the CFG for trivial cases
+ // where a bogus dead store can be reported because itself is unreachable.
+ if (!reachableCode.get()) {
+ reachableCode.reset(new ReachableCode(cfg));
+ reachableCode->computeReachableBlocks();
+ }
+
+ if (!reachableCode->isReachable(currentBlock))
+ return;
- std::string name = V->getNameAsString();
+ const std::string &name = V->getNameAsString();
const char* BugType = 0;
std::string msg;
@@ -69,11 +126,10 @@ public:
break;
case Enclosing:
- BugType = "Dead nested assignment";
- msg = "Although the value stored to '" + name +
- "' is used in the enclosing expression, the value is never actually"
- " read from '" + name + "'";
- break;
+ // Don't report issues in this case, e.g.: "if (x = foo())",
+ // where 'x' is unused later. We have yet to see a case where
+ // this is a real bug.
+ return;
}
BR.EmitBasicReport(BugType, "Dead store", msg, L, R);
@@ -127,14 +183,18 @@ public:
return false;
}
- virtual void ObserveStmt(Stmt* S,
+ virtual void ObserveStmt(Stmt* S, const CFGBlock *block,
const LiveVariables::AnalysisDataTy& AD,
const LiveVariables::ValTy& Live) {
+ currentBlock = block;
+
// Skip statements in macros.
if (S->getLocStart().isMacroID())
return;
+ // Only cover dead stores from regular assignments. ++/-- dead stores
+ // have never flagged a real bug.
if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
if (!B->isAssignmentOp()) return; // Skip non-assignments.
@@ -165,14 +225,11 @@ public:
}
}
else if (UnaryOperator* U = dyn_cast<UnaryOperator>(S)) {
- if (!U->isIncrementOp())
+ if (!U->isIncrementOp() || U->isPrefix())
return;
- // Handle: ++x within a subexpression. The solution is not warn
- // about preincrements to dead variables when the preincrement occurs
- // as a subexpression. This can lead to false negatives, e.g. "(++x);"
- // A generalized dead code checker should find such issues.
- if (U->isPrefix() && Parents.isConsumedExpr(U))
+ Stmt *parent = Parents.getParentIgnoreParenCasts(U);
+ if (!parent || !isa<ReturnStmt>(parent))
return;
Expr *Ex = U->getSubExpr()->IgnoreParenCasts();
@@ -203,7 +260,7 @@ public:
if (isa<CXXConstructExpr>(E))
return;
- if (isa<CXXExprWithTemporaries>(E))
+ if (isa<ExprWithCleanups>(E))
return;
// A dead initialization is a variable that is dead after it
@@ -280,10 +337,27 @@ public:
} // end anonymous namespace
-void clang::CheckDeadStores(CFG &cfg, LiveVariables &L, ParentMap &pmap,
- BugReporter& BR) {
- FindEscaped FS(&cfg);
- FS.getCFG().VisitBlockStmts(FS);
- DeadStoreObs A(BR.getContext(), BR, pmap, FS.Escaped);
- L.runOnAllBlocks(cfg, &A);
+//===----------------------------------------------------------------------===//
+// DeadStoresChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DeadStoresChecker : public CheckerV2<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (LiveVariables *L = mgr.getLiveVariables(D)) {
+ CFG &cfg = *mgr.getCFG(D);
+ ParentMap &pmap = mgr.getParentMap(D);
+ FindEscaped FS(&cfg);
+ FS.getCFG().VisitBlockStmts(FS);
+ DeadStoreObs A(cfg, BR.getContext(), BR, pmap, FS.Escaped);
+ L->runOnAllBlocks(cfg, &A);
+ }
+ }
+};
+}
+
+void ento::registerDeadStoresChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DeadStoresChecker>();
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
new file mode 100644
index 0000000..091d99b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -0,0 +1,80 @@
+//==- DebugCheckers.cpp - Debugging Checkers ---------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checkers that display debugging information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerV2.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// LiveVariablesDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LiveVariablesDumper : public CheckerV2<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (LiveVariables* L = mgr.getLiveVariables(D)) {
+ L->dumpBlockLiveness(mgr.getSourceManager());
+ }
+ }
+};
+}
+
+void ento::registerLiveVariablesDumper(CheckerManager &mgr) {
+ mgr.registerChecker<LiveVariablesDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// CFGViewer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFGViewer : public CheckerV2<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (CFG *cfg = mgr.getCFG(D)) {
+ cfg->viewCFG(mgr.getLangOptions());
+ }
+ }
+};
+}
+
+void ento::registerCFGViewer(CheckerManager &mgr) {
+ mgr.registerChecker<CFGViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+// CFGDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFGDumper : public CheckerV2<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (CFG *cfg = mgr.getCFG(D)) {
+ cfg->dump(mgr.getLangOptions());
+ }
+ }
+};
+}
+
+void ento::registerCFGDumper(CheckerManager &mgr) {
+ mgr.registerChecker<CFGDumper>();
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/DereferenceChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index af74c79..606ac4a 100644
--- a/contrib/llvm/tools/clang/lib/Checker/DereferenceChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -7,18 +7,19 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines NullDerefChecker, a builtin check in GRExprEngine that performs
+// This defines NullDerefChecker, a builtin check in ExprEngine that performs
// checks for null pointers at loads and stores.
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/Checkers/DereferenceChecker.h"
-#include "clang/Checker/PathSensitive/Checker.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Checkers/DereferenceChecker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
+using namespace ento;
namespace {
class DereferenceChecker : public Checker {
@@ -28,7 +29,8 @@ class DereferenceChecker : public Checker {
public:
DereferenceChecker() : BT_null(0), BT_undef(0) {}
static void *getTag() { static int tag = 0; return &tag; }
- void VisitLocation(CheckerContext &C, const Stmt *S, SVal location);
+ void visitLocation(CheckerContext &C, const Stmt *S, SVal location,
+ bool isLoad);
std::pair<ExplodedNode * const*, ExplodedNode * const*>
getImplicitNodes() const {
@@ -36,15 +38,18 @@ public:
ImplicitNullDerefNodes.data() +
ImplicitNullDerefNodes.size());
}
+ void AddDerefSource(llvm::raw_ostream &os,
+ llvm::SmallVectorImpl<SourceRange> &Ranges,
+ const Expr *Ex, bool loadedFrom = false);
};
} // end anonymous namespace
-void clang::RegisterDereferenceChecker(GRExprEngine &Eng) {
+void ento::RegisterDereferenceChecker(ExprEngine &Eng) {
Eng.registerCheck(new DereferenceChecker());
}
std::pair<ExplodedNode * const *, ExplodedNode * const *>
-clang::GetImplicitNullDereferences(GRExprEngine &Eng) {
+ento::GetImplicitNullDereferences(ExprEngine &Eng) {
DereferenceChecker *checker = Eng.getChecker<DereferenceChecker>();
if (!checker)
return std::make_pair((ExplodedNode * const *) 0,
@@ -52,11 +57,39 @@ clang::GetImplicitNullDereferences(GRExprEngine &Eng) {
return checker->getImplicitNodes();
}
-void DereferenceChecker::VisitLocation(CheckerContext &C, const Stmt *S,
- SVal l) {
+void DereferenceChecker::AddDerefSource(llvm::raw_ostream &os,
+ llvm::SmallVectorImpl<SourceRange> &Ranges,
+ const Expr *Ex,
+ bool loadedFrom) {
+ Ex = Ex->IgnoreParenLValueCasts();
+ switch (Ex->getStmtClass()) {
+ default:
+ return;
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DR = cast<DeclRefExpr>(Ex);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ os << " (" << (loadedFrom ? "loaded from" : "from")
+ << " variable '" << VD->getName() << "')";
+ Ranges.push_back(DR->getSourceRange());
+ }
+ return;
+ }
+ case Stmt::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(Ex);
+ os << " (" << (loadedFrom ? "loaded from" : "via")
+ << " field '" << ME->getMemberNameInfo() << "')";
+ SourceLocation L = ME->getMemberLoc();
+ Ranges.push_back(SourceRange(L, L));
+ break;
+ }
+ }
+}
+
+void DereferenceChecker::visitLocation(CheckerContext &C, const Stmt *S,
+ SVal l, bool isLoad) {
// Check for dereference of an undefined value.
if (l.isUndef()) {
- if (ExplodedNode *N = C.GenerateSink()) {
+ if (ExplodedNode *N = C.generateSink()) {
if (!BT_undef)
BT_undef = new BuiltinBug("Dereference of undefined pointer value");
@@ -77,13 +110,13 @@ void DereferenceChecker::VisitLocation(CheckerContext &C, const Stmt *S,
const GRState *state = C.getState();
const GRState *notNullState, *nullState;
- llvm::tie(notNullState, nullState) = state->Assume(location);
+ llvm::tie(notNullState, nullState) = state->assume(location);
// The explicit NULL case.
if (nullState) {
if (!notNullState) {
// Generate an error node.
- ExplodedNode *N = C.GenerateSink(nullState);
+ ExplodedNode *N = C.generateSink(nullState);
if (!N)
return;
@@ -94,33 +127,49 @@ void DereferenceChecker::VisitLocation(CheckerContext &C, const Stmt *S,
llvm::SmallString<100> buf;
llvm::SmallVector<SourceRange, 2> Ranges;
+
+ // Walk through lvalue casts to get the original expression
+ // that syntactically caused the load.
+ if (const Expr *expr = dyn_cast<Expr>(S))
+ S = expr->IgnoreParenLValueCasts();
switch (S->getStmtClass()) {
+ case Stmt::ArraySubscriptExprClass: {
+ llvm::raw_svector_ostream os(buf);
+ os << "Array access";
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
+ AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts());
+ os << " results in a null pointer dereference";
+ break;
+ }
case Stmt::UnaryOperatorClass: {
+ llvm::raw_svector_ostream os(buf);
+ os << "Dereference of null pointer";
const UnaryOperator *U = cast<UnaryOperator>(S);
- const Expr *SU = U->getSubExpr()->IgnoreParens();
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(SU)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- llvm::raw_svector_ostream os(buf);
- os << "Dereference of null pointer (loaded from variable '"
- << VD->getName() << "')";
- Ranges.push_back(DR->getSourceRange());
- }
- }
+ AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(), true);
break;
}
case Stmt::MemberExprClass: {
const MemberExpr *M = cast<MemberExpr>(S);
- if (M->isArrow())
- if (DeclRefExpr *DR =
- dyn_cast<DeclRefExpr>(M->getBase()->IgnoreParenCasts())) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- llvm::raw_svector_ostream os(buf);
- os << "Field access results in a dereference of a null pointer "
- "(loaded from variable '" << VD->getName() << "')";
- Ranges.push_back(M->getBase()->getSourceRange());
- }
+ if (M->isArrow()) {
+ llvm::raw_svector_ostream os(buf);
+ os << "Access to field '" << M->getMemberNameInfo()
+ << "' results in a dereference of a null pointer";
+ AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(), true);
+ }
+ break;
+ }
+ case Stmt::ObjCIvarRefExprClass: {
+ const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(S);
+ if (const DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(IV->getBase()->IgnoreParenCasts())) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ llvm::raw_svector_ostream os(buf);
+ os << "Instance variable access (via '" << VD->getName()
+ << "') results in a null pointer dereference";
}
+ }
+ Ranges.push_back(IV->getSourceRange());
break;
}
default:
@@ -146,7 +195,7 @@ void DereferenceChecker::VisitLocation(CheckerContext &C, const Stmt *S,
// Otherwise, we have the case where the location could either be
// null or not-null. Record the error node as an "implicit" null
// dereference.
- if (ExplodedNode *N = C.GenerateSink(nullState))
+ if (ExplodedNode *N = C.generateSink(nullState))
ImplicitNullDerefNodes.push_back(N);
}
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/DivZeroChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 32e2a17..20cc904 100644
--- a/contrib/llvm/tools/clang/lib/Checker/DivZeroChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -7,16 +7,17 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines DivZeroChecker, a builtin check in GRExprEngine that performs
+// This defines DivZeroChecker, a builtin check in ExprEngine that performs
// checks for division by zeros.
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class DivZeroChecker : public CheckerVisitor<DivZeroChecker> {
@@ -28,7 +29,7 @@ public:
};
} // end anonymous namespace
-void clang::RegisterDivZeroChecker(GRExprEngine &Eng) {
+void ento::RegisterDivZeroChecker(ExprEngine &Eng) {
Eng.registerCheck(new DivZeroChecker());
}
@@ -61,10 +62,10 @@ void DivZeroChecker::PreVisitBinaryOperator(CheckerContext &C,
// Check for divide by zero.
ConstraintManager &CM = C.getConstraintManager();
const GRState *stateNotZero, *stateZero;
- llvm::tie(stateNotZero, stateZero) = CM.AssumeDual(C.getState(), *DV);
+ llvm::tie(stateNotZero, stateZero) = CM.assumeDual(C.getState(), *DV);
if (stateZero && !stateNotZero) {
- if (ExplodedNode *N = C.GenerateSink(stateZero)) {
+ if (ExplodedNode *N = C.generateSink(stateZero)) {
if (!BT)
BT = new BuiltinBug("Division by zero");
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExperimentalChecks.cpp
index 84262b0..d9bb480 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExperimentalChecks.cpp
@@ -1,4 +1,4 @@
-//=-- GRExprEngineExperimentalChecks.h ------------------------------*- C++ -*-=
+//=-- ExperimentalChecks.h ----------------------------------------*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -8,27 +8,24 @@
//===----------------------------------------------------------------------===//
//
// This file defines functions to instantiate and register experimental
-// checks in GRExprEngine.
+// checks in ExprEngine.
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "GRExprEngineExperimentalChecks.h"
-#include "clang/Checker/Checkers/LocalCheckers.h"
+#include "InternalChecks.h"
+#include "ExperimentalChecks.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
using namespace clang;
+using namespace ento;
-void clang::RegisterExperimentalChecks(GRExprEngine &Eng) {
+void ento::RegisterExperimentalChecks(ExprEngine &Eng) {
// These are checks that never belong as internal checks
- // within GRExprEngine.
- RegisterCStringChecker(Eng);
- RegisterMallocChecker(Eng);
- RegisterPthreadLockChecker(Eng);
- RegisterStreamChecker(Eng);
- RegisterUnreachableCodeChecker(Eng);
+ // within ExprEngine.
+ RegisterMallocChecker(Eng); // ArrayBoundChecker depends on this.
}
-void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) {
+void ento::RegisterExperimentalInternalChecks(ExprEngine &Eng) {
// These are internal checks that should eventually migrate to
// RegisterInternalChecks() once they have been further tested.
@@ -37,8 +34,4 @@ void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) {
RegisterArrayBoundChecker(Eng);
RegisterCastSizeChecker(Eng);
- RegisterCastToStructChecker(Eng);
- RegisterFixedAddressChecker(Eng);
- RegisterPointerArithChecker(Eng);
- RegisterPointerSubChecker(Eng);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExperimentalChecks.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExperimentalChecks.h
new file mode 100644
index 0000000..1f38ad7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExperimentalChecks.h
@@ -0,0 +1,31 @@
+//=-- ExperimentalChecks.h ----------------------------------------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions to instantiate and register experimental
+// checks in ExprEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_ExprEngine_EXPERIMENTAL_CHECKS
+#define LLVM_CLANG_GR_ExprEngine_EXPERIMENTAL_CHECKS
+
+namespace clang {
+
+namespace ento {
+
+class ExprEngine;
+
+void RegisterAnalyzerStatsChecker(ExprEngine &Eng);
+void RegisterMallocChecker(ExprEngine &Eng);
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprEngine.cpp
index feb826e..ab8d564 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ExprEngine.cpp
@@ -1,4 +1,4 @@
-//=-- GRExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=
+//=-- ExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=
//
// The LLVM Compiler Infrastructure
//
@@ -12,12 +12,16 @@
// functions and build the ExplodedGraph at the expression level.
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/AnalysisManager.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/PathSensitive/GRExprEngineBuilders.h"
-#include "clang/Checker/PathSensitive/Checker.h"
+
+// FIXME: Restructure checker registration.
+#include "InternalChecks.h"
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtObjC.h"
@@ -34,6 +38,7 @@
#endif
using namespace clang;
+using namespace ento;
using llvm::dyn_cast;
using llvm::dyn_cast_or_null;
using llvm::cast;
@@ -57,119 +62,11 @@ static inline Selector GetNullarySelector(const char* name, ASTContext& Ctx) {
return Ctx.Selectors.getSelector(0, &II);
}
-
-static QualType GetCalleeReturnType(const CallExpr *CE) {
- const Expr *Callee = CE->getCallee();
- QualType T = Callee->getType();
- if (const PointerType *PT = T->getAs<PointerType>()) {
- const FunctionType *FT = PT->getPointeeType()->getAs<FunctionType>();
- T = FT->getResultType();
- }
- else {
- const BlockPointerType *BT = T->getAs<BlockPointerType>();
- T = BT->getPointeeType()->getAs<FunctionType>()->getResultType();
- }
- return T;
-}
-
-static bool CalleeReturnsReference(const CallExpr *CE) {
- return (bool) GetCalleeReturnType(CE)->getAs<ReferenceType>();
-}
-
-static bool ReceiverReturnsReference(const ObjCMessageExpr *ME) {
- const ObjCMethodDecl *MD = ME->getMethodDecl();
- if (!MD)
- return false;
- return MD->getResultType()->getAs<ReferenceType>();
-}
-
-#ifndef NDEBUG
-static bool ReceiverReturnsReferenceOrRecord(const ObjCMessageExpr *ME) {
- const ObjCMethodDecl *MD = ME->getMethodDecl();
- if (!MD)
- return false;
- QualType T = MD->getResultType();
- return T->getAs<RecordType>() || T->getAs<ReferenceType>();
-}
-
-static bool CalleeReturnsReferenceOrRecord(const CallExpr *CE) {
- QualType T = GetCalleeReturnType(CE);
- return T->getAs<ReferenceType>() || T->getAs<RecordType>();
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// Batch auditor. DEPRECATED.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class MappedBatchAuditor : public GRSimpleAPICheck {
- typedef llvm::ImmutableList<GRSimpleAPICheck*> Checks;
- typedef llvm::DenseMap<void*,Checks> MapTy;
-
- MapTy M;
- Checks::Factory F;
- Checks AllStmts;
-
-public:
- MappedBatchAuditor(llvm::BumpPtrAllocator& Alloc) :
- F(Alloc), AllStmts(F.GetEmptyList()) {}
-
- virtual ~MappedBatchAuditor() {
- llvm::DenseSet<GRSimpleAPICheck*> AlreadyVisited;
-
- for (MapTy::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
- for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E;++I){
-
- GRSimpleAPICheck* check = *I;
-
- if (AlreadyVisited.count(check))
- continue;
-
- AlreadyVisited.insert(check);
- delete check;
- }
- }
-
- void AddCheck(GRSimpleAPICheck *A, Stmt::StmtClass C) {
- assert (A && "Check cannot be null.");
- void* key = reinterpret_cast<void*>((uintptr_t) C);
- MapTy::iterator I = M.find(key);
- M[key] = F.Concat(A, I == M.end() ? F.GetEmptyList() : I->second);
- }
-
- void AddCheck(GRSimpleAPICheck *A) {
- assert (A && "Check cannot be null.");
- AllStmts = F.Concat(A, AllStmts);
- }
-
- virtual bool Audit(ExplodedNode* N, GRStateManager& VMgr) {
- // First handle the auditors that accept all statements.
- bool isSink = false;
- for (Checks::iterator I = AllStmts.begin(), E = AllStmts.end(); I!=E; ++I)
- isSink |= (*I)->Audit(N, VMgr);
-
- // Next handle the auditors that accept only specific statements.
- const Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
- void* key = reinterpret_cast<void*>((uintptr_t) S->getStmtClass());
- MapTy::iterator MI = M.find(key);
- if (MI != M.end()) {
- for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E; ++I)
- isSink |= (*I)->Audit(N, VMgr);
- }
-
- return isSink;
- }
-};
-
-} // end anonymous namespace
-
//===----------------------------------------------------------------------===//
// Checker worklist routines.
//===----------------------------------------------------------------------===//
-void GRExprEngine::CheckerVisit(const Stmt *S, ExplodedNodeSet &Dst,
+void ExprEngine::CheckerVisit(const Stmt *S, ExplodedNodeSet &Dst,
ExplodedNodeSet &Src, CallbackKind Kind) {
// Determine if we already have a cached 'CheckersOrdered' vector
@@ -249,27 +146,66 @@ void GRExprEngine::CheckerVisit(const Stmt *S, ExplodedNodeSet &Dst,
// automatically.
}
-void GRExprEngine::CheckerEvalNilReceiver(const ObjCMessageExpr *ME,
- ExplodedNodeSet &Dst,
- const GRState *state,
- ExplodedNode *Pred) {
- bool Evaluated = false;
+void ExprEngine::CheckerVisitObjCMessage(const ObjCMessage &msg,
+ ExplodedNodeSet &Dst,
+ ExplodedNodeSet &Src,
+ bool isPrevisit) {
+
+ if (Checkers.empty()) {
+ Dst.insert(Src);
+ return;
+ }
+
+ ExplodedNodeSet Tmp;
+ ExplodedNodeSet *PrevSet = &Src;
+
+ for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end(); I!=E; ++I)
+ {
+ ExplodedNodeSet *CurrSet = 0;
+ if (I+1 == E)
+ CurrSet = &Dst;
+ else {
+ CurrSet = (PrevSet == &Tmp) ? &Src : &Tmp;
+ CurrSet->clear();
+ }
+
+ void *tag = I->first;
+ Checker *checker = I->second;
+
+ for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
+ NI != NE; ++NI)
+ checker->GR_visitObjCMessage(*CurrSet, *Builder, *this, msg,
+ *NI, tag, isPrevisit);
+
+ // Update which NodeSet is the current one.
+ PrevSet = CurrSet;
+ }
+
+ // Don't autotransition. The CheckerContext objects should do this
+ // automatically.
+}
+
+void ExprEngine::CheckerEvalNilReceiver(const ObjCMessage &msg,
+ ExplodedNodeSet &Dst,
+ const GRState *state,
+ ExplodedNode *Pred) {
+ bool evaluated = false;
ExplodedNodeSet DstTmp;
for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end();I!=E;++I) {
void *tag = I->first;
Checker *checker = I->second;
- if (checker->GR_EvalNilReceiver(DstTmp, *Builder, *this, ME, Pred, state,
+ if (checker->GR_evalNilReceiver(DstTmp, *Builder, *this, msg, Pred, state,
tag)) {
- Evaluated = true;
+ evaluated = true;
break;
} else
// The checker didn't evaluate the expr. Restore the Dst.
DstTmp.clear();
}
- if (Evaluated)
+ if (evaluated)
Dst.insert(DstTmp);
else
Dst.insert(Pred);
@@ -278,35 +214,35 @@ void GRExprEngine::CheckerEvalNilReceiver(const ObjCMessageExpr *ME,
// CheckerEvalCall returns true if one of the checkers processed the node.
// This may return void when all call evaluation logic goes to some checker
// in the future.
-bool GRExprEngine::CheckerEvalCall(const CallExpr *CE,
+bool ExprEngine::CheckerEvalCall(const CallExpr *CE,
ExplodedNodeSet &Dst,
ExplodedNode *Pred) {
- bool Evaluated = false;
+ bool evaluated = false;
ExplodedNodeSet DstTmp;
for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end();I!=E;++I) {
void *tag = I->first;
Checker *checker = I->second;
- if (checker->GR_EvalCallExpr(DstTmp, *Builder, *this, CE, Pred, tag)) {
- Evaluated = true;
+ if (checker->GR_evalCallExpr(DstTmp, *Builder, *this, CE, Pred, tag)) {
+ evaluated = true;
break;
} else
// The checker didn't evaluate the expr. Restore the DstTmp set.
DstTmp.clear();
}
- if (Evaluated)
+ if (evaluated)
Dst.insert(DstTmp);
else
Dst.insert(Pred);
- return Evaluated;
+ return evaluated;
}
// FIXME: This is largely copy-paste from CheckerVisit(). Need to
// unify.
-void GRExprEngine::CheckerVisitBind(const Stmt *StoreE, ExplodedNodeSet &Dst,
+void ExprEngine::CheckerVisitBind(const Stmt *StoreE, ExplodedNodeSet &Dst,
ExplodedNodeSet &Src, SVal location,
SVal val, bool isPrevisit) {
@@ -347,16 +283,16 @@ void GRExprEngine::CheckerVisitBind(const Stmt *StoreE, ExplodedNodeSet &Dst,
// Engine construction and deletion.
//===----------------------------------------------------------------------===//
-static void RegisterInternalChecks(GRExprEngine &Eng) {
+static void RegisterInternalChecks(ExprEngine &Eng) {
// Register internal "built-in" BugTypes with the BugReporter. These BugTypes
// are different than what probably many checks will do since they don't
- // create BugReports on-the-fly but instead wait until GRExprEngine finishes
+ // create BugReports on-the-fly but instead wait until ExprEngine finishes
// analyzing a function. Generation of BugReport objects is done via a call
// to 'FlushReports' from BugReporter.
// The following checks do not need to have their associated BugTypes
// explicitly registered with the BugReporter. If they issue any BugReports,
// their associated BugType will get registered with the BugReporter
- // automatically. Note that the check itself is owned by the GRExprEngine
+ // automatically. Note that the check itself is owned by the ExprEngine
// object.
RegisterAdjustedReturnValueChecker(Eng);
// CallAndMessageChecker should be registered before AttrNonNullChecker,
@@ -372,28 +308,24 @@ static void RegisterInternalChecks(GRExprEngine &Eng) {
RegisterUndefBranchChecker(Eng);
RegisterUndefCapturedBlockVarChecker(Eng);
RegisterUndefResultChecker(Eng);
- RegisterStackAddrLeakChecker(Eng);
// This is not a checker yet.
RegisterNoReturnFunctionChecker(Eng);
RegisterBuiltinFunctionChecker(Eng);
RegisterOSAtomicChecker(Eng);
- RegisterUnixAPIChecker(Eng);
- RegisterMacOSXAPIChecker(Eng);
}
-GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
+ExprEngine::ExprEngine(AnalysisManager &mgr, TransferFuncs *tf)
: AMgr(mgr),
- CoreEngine(*this),
- G(CoreEngine.getGraph()),
+ Engine(*this),
+ G(Engine.getGraph()),
Builder(NULL),
StateMgr(getContext(), mgr.getStoreManagerCreator(),
mgr.getConstraintManagerCreator(), G.getAllocator(),
*this),
SymMgr(StateMgr.getSymbolManager()),
- ValMgr(StateMgr.getValueManager()),
- SVator(ValMgr.getSValuator()),
- CurrentStmt(NULL),
+ svalBuilder(StateMgr.getSValBuilder()),
+ EntryNode(NULL), currentStmt(NULL),
NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
RaiseSel(GetNullarySelector("raise", getContext())),
BR(mgr, *this), TF(tf) {
@@ -403,9 +335,16 @@ GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
// FIXME: Eventually remove the TF object entirely.
TF->RegisterChecks(*this);
TF->RegisterPrinters(getStateManager().Printers);
+
+ mgr.getCheckerManager()->registerCheckersToEngine(*this);
+
+ if (mgr.shouldEagerlyTrimExplodedGraph()) {
+ // Enable eager node reclaimation when constructing the ExplodedGraph.
+ G.enableNodeReclamation();
+ }
}
-GRExprEngine::~GRExprEngine() {
+ExprEngine::~ExprEngine() {
BR.FlushReports();
delete [] NSExceptionInstanceRaiseSelectors;
@@ -422,21 +361,7 @@ GRExprEngine::~GRExprEngine() {
// Utility methods.
//===----------------------------------------------------------------------===//
-void GRExprEngine::AddCheck(GRSimpleAPICheck* A, Stmt::StmtClass C) {
- if (!BatchAuditor)
- BatchAuditor.reset(new MappedBatchAuditor(getGraph().getAllocator()));
-
- ((MappedBatchAuditor*) BatchAuditor.get())->AddCheck(A, C);
-}
-
-void GRExprEngine::AddCheck(GRSimpleAPICheck *A) {
- if (!BatchAuditor)
- BatchAuditor.reset(new MappedBatchAuditor(getGraph().getAllocator()));
-
- ((MappedBatchAuditor*) BatchAuditor.get())->AddCheck(A);
-}
-
-const GRState* GRExprEngine::getInitialState(const LocationContext *InitLoc) {
+const GRState* ExprEngine::getInitialState(const LocationContext *InitLoc) {
const GRState *state = StateMgr.getInitialState(InitLoc);
// Preconditions.
@@ -462,8 +387,8 @@ const GRState* GRExprEngine::getInitialState(const LocationContext *InitLoc) {
break;
SVal V = state->getSVal(loc::MemRegionVal(R));
- SVal Constraint_untested = EvalBinOp(state, BO_GT, V,
- ValMgr.makeZeroVal(T),
+ SVal Constraint_untested = evalBinOp(state, BO_GT, V,
+ svalBuilder.makeZeroVal(T),
getContext().IntTy);
DefinedOrUnknownSVal *Constraint =
@@ -472,7 +397,7 @@ const GRState* GRExprEngine::getInitialState(const LocationContext *InitLoc) {
if (!Constraint)
break;
- if (const GRState *newState = state->Assume(*Constraint, true))
+ if (const GRState *newState = state->assume(*Constraint, true))
state = newState;
break;
@@ -487,7 +412,7 @@ const GRState* GRExprEngine::getInitialState(const LocationContext *InitLoc) {
if (const Loc *LV = dyn_cast<Loc>(&V)) {
// Assume that the pointer value in 'self' is non-null.
- state = state->Assume(*LV, true);
+ state = state->assume(*LV, true);
assert(state && "'self' cannot be null");
}
}
@@ -500,9 +425,9 @@ const GRState* GRExprEngine::getInitialState(const LocationContext *InitLoc) {
// Top-level transfer function logic (Dispatcher).
//===----------------------------------------------------------------------===//
-/// EvalAssume - Called by ConstraintManager. Used to call checker-specific
+/// evalAssume - Called by ConstraintManager. Used to call checker-specific
/// logic for handling assumptions on symbolic values.
-const GRState *GRExprEngine::ProcessAssume(const GRState *state, SVal cond,
+const GRState *ExprEngine::processAssume(const GRState *state, SVal cond,
bool assumption) {
// Determine if we already have a cached 'CheckersOrdered' vector
// specifically tailored for processing assumptions. This
@@ -510,7 +435,7 @@ const GRState *GRExprEngine::ProcessAssume(const GRState *state, SVal cond,
CheckersOrdered *CO = &Checkers;
llvm::OwningPtr<CheckersOrdered> NewCO;
- CallbackTag K = GetCallbackTag(ProcessAssumeCallback);
+ CallbackTag K = GetCallbackTag(processAssumeCallback);
CheckersOrdered *& CO_Ref = COCache[K];
if (!CO_Ref) {
@@ -536,15 +461,16 @@ const GRState *GRExprEngine::ProcessAssume(const GRState *state, SVal cond,
Checker *C = I->second;
bool respondsToCallback = true;
- state = C->EvalAssume(state, cond, assumption, &respondsToCallback);
+ state = C->evalAssume(state, cond, assumption, &respondsToCallback);
- // Check if we're building the cache of checkers that care about Assumes.
+ // Check if we're building the cache of checkers that care about
+ // assumptions.
if (NewCO.get() && respondsToCallback)
NewCO->push_back(*I);
}
// If we got through all the checkers, and we built a list of those that
- // care about Assumes, save it.
+ // care about assumptions, save it.
if (NewCO.get())
CO_Ref = NewCO.take();
}
@@ -553,10 +479,10 @@ const GRState *GRExprEngine::ProcessAssume(const GRState *state, SVal cond,
if (!state)
return NULL;
- return TF->EvalAssume(state, cond, assumption);
+ return TF->evalAssume(state, cond, assumption);
}
-bool GRExprEngine::WantsRegionChangeUpdate(const GRState* state) {
+bool ExprEngine::wantsRegionChangeUpdate(const GRState* state) {
CallbackTag K = GetCallbackTag(EvalRegionChangesCallback);
CheckersOrdered *CO = COCache[K];
@@ -565,7 +491,7 @@ bool GRExprEngine::WantsRegionChangeUpdate(const GRState* state) {
for (CheckersOrdered::iterator I = CO->begin(), E = CO->end(); I != E; ++I) {
Checker *C = I->second;
- if (C->WantsRegionChangeUpdate(state))
+ if (C->wantsRegionChangeUpdate(state))
return true;
}
@@ -573,10 +499,10 @@ bool GRExprEngine::WantsRegionChangeUpdate(const GRState* state) {
}
const GRState *
-GRExprEngine::ProcessRegionChanges(const GRState *state,
+ExprEngine::processRegionChanges(const GRState *state,
const MemRegion * const *Begin,
const MemRegion * const *End) {
- // FIXME: Most of this method is copy-pasted from ProcessAssume.
+ // FIXME: Most of this method is copy-pasted from processAssume.
// Determine if we already have a cached 'CheckersOrdered' vector
// specifically tailored for processing region changes. This
@@ -625,29 +551,48 @@ GRExprEngine::ProcessRegionChanges(const GRState *state,
return state;
}
-void GRExprEngine::ProcessEndWorklist(bool hasWorkRemaining) {
+void ExprEngine::processEndWorklist(bool hasWorkRemaining) {
for (CheckersOrdered::iterator I = Checkers.begin(), E = Checkers.end();
I != E; ++I) {
I->second->VisitEndAnalysis(G, BR, *this);
}
}
-void GRExprEngine::ProcessStmt(const CFGElement CE,GRStmtNodeBuilder& builder) {
- CurrentStmt = CE.getStmt();
+void ExprEngine::processCFGElement(const CFGElement E,
+ StmtNodeBuilder& builder) {
+ switch (E.getKind()) {
+ case CFGElement::Statement:
+ ProcessStmt(E.getAs<CFGStmt>(), builder);
+ break;
+ case CFGElement::Initializer:
+ ProcessInitializer(E.getAs<CFGInitializer>(), builder);
+ break;
+ case CFGElement::ImplicitDtor:
+ ProcessImplicitDtor(E.getAs<CFGImplicitDtor>(), builder);
+ break;
+ default:
+ // Suppress compiler warning.
+ llvm_unreachable("Unexpected CFGElement kind.");
+ }
+}
+
+void ExprEngine::ProcessStmt(const CFGStmt S, StmtNodeBuilder& builder) {
+ // Reclaim any unnecessary nodes in the ExplodedGraph.
+ G.reclaimRecentlyAllocatedNodes();
+ // Recycle any unused states in the GRStateManager.
+ StateMgr.recycleUnusedStates();
+
+ currentStmt = S.getStmt();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
- CurrentStmt->getLocStart(),
+ currentStmt->getLocStart(),
"Error evaluating statement");
Builder = &builder;
- EntryNode = builder.getBasePredecessor();
-
- // Set up our simple checks.
- if (BatchAuditor)
- Builder->setAuditor(BatchAuditor.get());
+ EntryNode = builder.getPredecessor();
// Create the cleaned state.
const LocationContext *LC = EntryNode->getLocationContext();
- SymbolReaper SymReaper(LC, CurrentStmt, SymMgr);
+ SymbolReaper SymReaper(LC, currentStmt, SymMgr);
if (AMgr.shouldPurgeDead()) {
const GRState *St = EntryNode->getState();
@@ -659,7 +604,7 @@ void GRExprEngine::ProcessStmt(const CFGElement CE,GRStmtNodeBuilder& builder) {
}
const StackFrameContext *SFC = LC->getCurrentStackFrame();
- CleanedState = StateMgr.RemoveDeadBindings(St, SFC, SymReaper);
+ CleanedState = StateMgr.removeDeadBindings(St, SFC, SymReaper);
} else {
CleanedState = EntryNode->getState();
}
@@ -671,14 +616,14 @@ void GRExprEngine::ProcessStmt(const CFGElement CE,GRStmtNodeBuilder& builder) {
Tmp.Add(EntryNode);
else {
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
- SaveOr OldHasGen(Builder->HasGeneratedNode);
+ SaveOr OldHasGen(Builder->hasGeneratedNode);
SaveAndRestore<bool> OldPurgeDeadSymbols(Builder->PurgingDeadSymbols);
Builder->PurgingDeadSymbols = true;
// FIXME: This should soon be removed.
ExplodedNodeSet Tmp2;
- getTF().EvalDeadSymbols(Tmp2, *this, *Builder, EntryNode,
+ getTF().evalDeadSymbols(Tmp2, *this, *Builder, EntryNode,
CleanedState, SymReaper);
if (Checkers.empty())
@@ -700,38 +645,34 @@ void GRExprEngine::ProcessStmt(const CFGElement CE,GRStmtNodeBuilder& builder) {
Checker *checker = I->second;
for (ExplodedNodeSet::iterator NI = SrcSet->begin(), NE = SrcSet->end();
NI != NE; ++NI)
- checker->GR_EvalDeadSymbols(*DstSet, *Builder, *this, CurrentStmt,
+ checker->GR_evalDeadSymbols(*DstSet, *Builder, *this, currentStmt,
*NI, SymReaper, tag);
SrcSet = DstSet;
}
}
- if (!Builder->BuildSinks && !Builder->HasGeneratedNode)
+ if (!Builder->BuildSinks && !Builder->hasGeneratedNode)
Tmp.Add(EntryNode);
}
bool HasAutoGenerated = false;
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
-
ExplodedNodeSet Dst;
// Set the cleaned state.
Builder->SetCleanedState(*I == EntryNode ? CleanedState : GetState(*I));
// Visit the statement.
- if (CE.asLValue())
- VisitLValue(cast<Expr>(CurrentStmt), *I, Dst);
- else
- Visit(CurrentStmt, *I, Dst);
+ Visit(currentStmt, *I, Dst);
// Do we need to auto-generate a node? We only need to do this to generate
- // a node with a "cleaned" state; GRCoreEngine will actually handle
+ // a node with a "cleaned" state; CoreEngine will actually handle
// auto-transitions for other cases.
if (Dst.size() == 1 && *Dst.begin() == EntryNode
- && !Builder->HasGeneratedNode && !HasAutoGenerated) {
+ && !Builder->hasGeneratedNode && !HasAutoGenerated) {
HasAutoGenerated = true;
- builder.generateNode(CurrentStmt, GetState(EntryNode), *I);
+ builder.generateNode(currentStmt, GetState(EntryNode), *I);
}
}
@@ -739,22 +680,135 @@ void GRExprEngine::ProcessStmt(const CFGElement CE,GRStmtNodeBuilder& builder) {
CleanedState = NULL;
EntryNode = NULL;
- CurrentStmt = 0;
+ currentStmt = 0;
Builder = NULL;
}
-void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
+void ExprEngine::ProcessInitializer(const CFGInitializer Init,
+ StmtNodeBuilder &builder) {
+ // We don't set EntryNode and currentStmt. And we don't clean up state.
+ const CXXCtorInitializer *BMI = Init.getInitializer();
+
+ ExplodedNode *pred = builder.getPredecessor();
+
+ const StackFrameContext *stackFrame = cast<StackFrameContext>(pred->getLocationContext());
+ const CXXConstructorDecl *decl = cast<CXXConstructorDecl>(stackFrame->getDecl());
+ const CXXThisRegion *thisReg = getCXXThisRegion(decl, stackFrame);
+
+ SVal thisVal = pred->getState()->getSVal(thisReg);
+
+ if (BMI->isAnyMemberInitializer()) {
+ ExplodedNodeSet Dst;
+
+ // Evaluate the initializer.
+ Visit(BMI->getInit(), pred, Dst);
+
+ for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end(); I != E; ++I){
+ ExplodedNode *Pred = *I;
+ const GRState *state = Pred->getState();
+
+ const FieldDecl *FD = BMI->getAnyMember();
+
+ SVal FieldLoc = state->getLValue(FD, thisVal);
+ SVal InitVal = state->getSVal(BMI->getInit());
+ state = state->bindLoc(FieldLoc, InitVal);
+
+ // Use a custom node building process.
+ PostInitializer PP(BMI, stackFrame);
+ // Builder automatically add the generated node to the deferred set,
+ // which are processed in the builder's dtor.
+ builder.generateNode(PP, state, Pred);
+ }
+ return;
+ }
+
+ assert(BMI->isBaseInitializer());
+
+ // Get the base class declaration.
+ const CXXConstructExpr *ctorExpr = cast<CXXConstructExpr>(BMI->getInit());
+
+ // Create the base object region.
+ SVal baseVal =
+ getStoreManager().evalDerivedToBase(thisVal, ctorExpr->getType());
+ const MemRegion *baseReg = baseVal.getAsRegion();
+ assert(baseReg);
+ Builder = &builder;
+ ExplodedNodeSet dst;
+ VisitCXXConstructExpr(ctorExpr, baseReg, pred, dst);
+}
+
+void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D,
+ StmtNodeBuilder &builder) {
+ Builder = &builder;
+
+ switch (D.getDtorKind()) {
+ case CFGElement::AutomaticObjectDtor:
+ ProcessAutomaticObjDtor(cast<CFGAutomaticObjDtor>(D), builder);
+ break;
+ case CFGElement::BaseDtor:
+ ProcessBaseDtor(cast<CFGBaseDtor>(D), builder);
+ break;
+ case CFGElement::MemberDtor:
+ ProcessMemberDtor(cast<CFGMemberDtor>(D), builder);
+ break;
+ case CFGElement::TemporaryDtor:
+ ProcessTemporaryDtor(cast<CFGTemporaryDtor>(D), builder);
+ break;
+ default:
+ llvm_unreachable("Unexpected dtor kind.");
+ }
+}
+
+void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor dtor,
+ StmtNodeBuilder &builder) {
+ ExplodedNode *pred = builder.getPredecessor();
+ const GRState *state = pred->getState();
+ const VarDecl *varDecl = dtor.getVarDecl();
+
+ QualType varType = varDecl->getType();
+
+ if (const ReferenceType *refType = varType->getAs<ReferenceType>())
+ varType = refType->getPointeeType();
+
+ const CXXRecordDecl *recordDecl = varType->getAsCXXRecordDecl();
+ assert(recordDecl && "get CXXRecordDecl fail");
+ const CXXDestructorDecl *dtorDecl = recordDecl->getDestructor();
+
+ Loc dest = state->getLValue(varDecl, pred->getLocationContext());
+
+ ExplodedNodeSet dstSet;
+ VisitCXXDestructor(dtorDecl, cast<loc::MemRegionVal>(dest).getRegion(),
+ dtor.getTriggerStmt(), pred, dstSet);
+}
+
+void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
+ StmtNodeBuilder &builder) {
+}
+
+void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
+ StmtNodeBuilder &builder) {
+}
+
+void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
+ StmtNodeBuilder &builder) {
+}
+
+void ExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
S->getLocStart(),
"Error evaluating statement");
+ // Expressions to ignore.
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreParens();
+
// FIXME: add metadata to the CFG so that we can disable
// this check when we KNOW that there is no block-level subexpression.
// The motivation is that this check requires a hashtable lookup.
- if (S != CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S)) {
+ if (S != currentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S)) {
Dst.Add(Pred);
return;
}
@@ -763,29 +817,35 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
// C++ stuff we don't support yet.
case Stmt::CXXBindTemporaryExprClass:
case Stmt::CXXCatchStmtClass:
- case Stmt::CXXConstructExprClass:
case Stmt::CXXDefaultArgExprClass:
case Stmt::CXXDependentScopeMemberExprClass:
- case Stmt::CXXExprWithTemporariesClass:
+ case Stmt::ExprWithCleanupsClass:
case Stmt::CXXNullPtrLiteralExprClass:
case Stmt::CXXPseudoDestructorExprClass:
case Stmt::CXXTemporaryObjectExprClass:
case Stmt::CXXThrowExprClass:
case Stmt::CXXTryStmtClass:
case Stmt::CXXTypeidExprClass:
+ case Stmt::CXXUuidofExprClass:
case Stmt::CXXUnresolvedConstructExprClass:
case Stmt::CXXScalarValueInitExprClass:
case Stmt::DependentScopeDeclRefExprClass:
case Stmt::UnaryTypeTraitExprClass:
+ case Stmt::BinaryTypeTraitExprClass:
case Stmt::UnresolvedLookupExprClass:
case Stmt::UnresolvedMemberExprClass:
+ case Stmt::CXXNoexceptExprClass:
+ case Stmt::PackExpansionExprClass:
+ case Stmt::SubstNonTypeTemplateParmPackExprClass:
{
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
Builder->BuildSinks = true;
MakeNode(Dst, S, Pred, GetState(Pred));
break;
}
-
+
+ case Stmt::ParenExprClass:
+ llvm_unreachable("ParenExprs already handled.");
// Cases that should never be evaluated simply because they shouldn't
// appear in the CFG.
case Stmt::BreakStmtClass:
@@ -799,15 +859,22 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
case Stmt::LabelStmtClass:
case Stmt::NoStmtClass:
case Stmt::NullStmtClass:
- case Stmt::SwitchCaseClass:
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
break;
case Stmt::GNUNullExprClass: {
- MakeNode(Dst, S, Pred, GetState(Pred)->BindExpr(S, ValMgr.makeNull()));
+ MakeNode(Dst, S, Pred, GetState(Pred)->BindExpr(S, svalBuilder.makeNull()));
break;
}
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S), Pred, Dst);
+ break;
+
+ case Stmt::ObjCPropertyRefExprClass:
+ VisitObjCPropertyRefExpr(cast<ObjCPropertyRefExpr>(S), Pred, Dst);
+ break;
+
// Cases not handled yet; but will handle some day.
case Stmt::DesignatedInitExprClass:
case Stmt::ExtVectorElementExprClass:
@@ -815,21 +882,18 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
case Stmt::ImplicitValueInitExprClass:
case Stmt::ObjCAtCatchStmtClass:
case Stmt::ObjCAtFinallyStmtClass:
- case Stmt::ObjCAtSynchronizedStmtClass:
case Stmt::ObjCAtTryStmtClass:
case Stmt::ObjCEncodeExprClass:
- case Stmt::ObjCImplicitSetterGetterRefExprClass:
case Stmt::ObjCIsaExprClass:
- case Stmt::ObjCPropertyRefExprClass:
case Stmt::ObjCProtocolExprClass:
case Stmt::ObjCSelectorExprClass:
case Stmt::ObjCStringLiteralClass:
- case Stmt::ObjCSuperExprClass:
case Stmt::ParenListExprClass:
case Stmt::PredefinedExprClass:
case Stmt::ShuffleVectorExprClass:
- case Stmt::TypesCompatibleExprClass:
case Stmt::VAArgExprClass:
+ case Stmt::CUDAKernelCallExprClass:
+ case Stmt::OpaqueValueExprClass:
// Fall through.
// Cases we intentionally don't evaluate, since they don't need
@@ -839,20 +903,23 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
case Stmt::CharacterLiteralClass:
case Stmt::CXXBoolLiteralExprClass:
case Stmt::FloatingLiteralClass:
+ case Stmt::SizeOfPackExprClass:
Dst.Add(Pred); // No-op. Simply propagate the current state unchanged.
break;
case Stmt::ArraySubscriptExprClass:
- VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst, false);
+ VisitLvalArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst);
break;
case Stmt::AsmStmtClass:
VisitAsmStmt(cast<AsmStmt>(S), Pred, Dst);
break;
- case Stmt::BlockDeclRefExprClass:
- VisitBlockDeclRefExpr(cast<BlockDeclRefExpr>(S), Pred, Dst, false);
+ case Stmt::BlockDeclRefExprClass: {
+ const BlockDeclRefExpr *BE = cast<BlockDeclRefExpr>(S);
+ VisitCommonDeclRefExpr(BE, BE->getDecl(), Pred, Dst);
break;
+ }
case Stmt::BlockExprClass:
VisitBlockExpr(cast<BlockExpr>(S), Pred, Dst);
@@ -860,7 +927,6 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
case Stmt::BinaryOperatorClass: {
const BinaryOperator* B = cast<BinaryOperator>(S);
-
if (B->isLogicalOp()) {
VisitLogicalExpr(B, Pred, Dst);
break;
@@ -874,19 +940,26 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
if (AMgr.shouldEagerlyAssume() &&
(B->isRelationalOp() || B->isEqualityOp())) {
ExplodedNodeSet Tmp;
- VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp, false);
- EvalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
+ evalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
}
else
- VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst, false);
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
break;
}
- case Stmt::CallExprClass:
- case Stmt::CXXOperatorCallExprClass: {
+ case Stmt::CallExprClass: {
const CallExpr* C = cast<CallExpr>(S);
- VisitCall(C, Pred, C->arg_begin(), C->arg_end(), Dst, false);
+ VisitCall(C, Pred, C->arg_begin(), C->arg_end(), Dst);
+ break;
+ }
+
+ case Stmt::CXXConstructExprClass: {
+ const CXXConstructExpr *C = cast<CXXConstructExpr>(S);
+ // For block-level CXXConstructExpr, we don't have a destination region.
+ // Let VisitCXXConstructExpr() create one.
+ VisitCXXConstructExpr(C, 0, Pred, Dst);
break;
}
@@ -896,6 +969,12 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
break;
}
+ case Stmt::CXXOperatorCallExprClass: {
+ const CXXOperatorCallExpr *C = cast<CXXOperatorCallExpr>(S);
+ VisitCXXOperatorCallExpr(C, Pred, Dst);
+ break;
+ }
+
case Stmt::CXXNewExprClass: {
const CXXNewExpr *NE = cast<CXXNewExpr>(S);
VisitCXXNewExpr(NE, Pred, Dst);
@@ -917,16 +996,18 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
}
case Stmt::CompoundAssignOperatorClass:
- VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst, false);
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
break;
case Stmt::CompoundLiteralExprClass:
- VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst, false);
+ VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst);
break;
+ case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass: { // '?' operator
- const ConditionalOperator* C = cast<ConditionalOperator>(S);
- VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
+ const AbstractConditionalOperator *C
+ = cast<AbstractConditionalOperator>(S);
+ VisitGuardedExpr(C, C->getTrueExpr(), C->getFalseExpr(), Pred, Dst);
break;
}
@@ -934,9 +1015,11 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
VisitCXXThisExpr(cast<CXXThisExpr>(S), Pred, Dst);
break;
- case Stmt::DeclRefExprClass:
- VisitDeclRefExpr(cast<DeclRefExpr>(S), Pred, Dst, false);
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DE = cast<DeclRefExpr>(S);
+ VisitCommonDeclRefExpr(DE, DE->getDecl(), Pred, Dst);
break;
+ }
case Stmt::DeclStmtClass:
VisitDeclStmt(cast<DeclStmt>(S), Pred, Dst);
@@ -956,7 +1039,7 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
case Stmt::CXXConstCastExprClass:
case Stmt::CXXFunctionalCastExprClass: {
const CastExpr* C = cast<CastExpr>(S);
- VisitCast(C, C->getSubExpr(), Pred, Dst, false);
+ VisitCast(C, C->getSubExpr(), Pred, Dst);
break;
}
@@ -971,11 +1054,10 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
break;
case Stmt::MemberExprClass:
- VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst, false);
+ VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst);
break;
-
case Stmt::ObjCIvarRefExprClass:
- VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst, false);
+ VisitLvalObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst);
break;
case Stmt::ObjCForCollectionStmtClass:
@@ -983,7 +1065,7 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
break;
case Stmt::ObjCMessageExprClass:
- VisitObjCMessageExpr(cast<ObjCMessageExpr>(S), Pred, Dst, false);
+ VisitObjCMessageExpr(cast<ObjCMessageExpr>(S), Pred, Dst);
break;
case Stmt::ObjCAtThrowStmtClass: {
@@ -995,10 +1077,6 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
break;
}
- case Stmt::ParenExprClass:
- Visit(cast<ParenExpr>(S)->getSubExpr()->IgnoreParens(), Pred, Dst);
- break;
-
case Stmt::ReturnStmtClass:
VisitReturnStmt(cast<ReturnStmt>(S), Pred, Dst);
break;
@@ -1032,9 +1110,12 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
break;
}
- case Stmt::StringLiteralClass:
- VisitLValue(cast<StringLiteral>(S), Pred, Dst);
- break;
+ case Stmt::StringLiteralClass: {
+ const GRState* state = GetState(Pred);
+ SVal V = state->getLValue(cast<StringLiteral>(S));
+ MakeNode(Dst, S, Pred, state->BindExpr(S, V));
+ return;
+ }
case Stmt::SwitchStmtClass:
// This case isn't for branch processing, but for handling the
@@ -1046,11 +1127,11 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
const UnaryOperator *U = cast<UnaryOperator>(S);
if (AMgr.shouldEagerlyAssume()&&(U->getOpcode() == UO_LNot)) {
ExplodedNodeSet Tmp;
- VisitUnaryOperator(U, Pred, Tmp, false);
- EvalEagerlyAssume(Dst, Tmp, U);
+ VisitUnaryOperator(U, Pred, Tmp);
+ evalEagerlyAssume(Dst, Tmp, U);
}
else
- VisitUnaryOperator(U, Pred, Dst, false);
+ VisitUnaryOperator(U, Pred, Dst);
break;
}
@@ -1062,162 +1143,34 @@ void GRExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
}
}
-void GRExprEngine::VisitLValue(const Expr* Ex, ExplodedNode* Pred,
- ExplodedNodeSet& Dst) {
-
- PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
- Ex->getLocStart(),
- "Error evaluating statement");
-
-
- Ex = Ex->IgnoreParens();
-
- if (Ex != CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(Ex)){
- Dst.Add(Pred);
- return;
- }
-
- switch (Ex->getStmtClass()) {
- // C++ stuff we don't support yet.
- case Stmt::CXXExprWithTemporariesClass:
- case Stmt::CXXMemberCallExprClass:
- case Stmt::CXXScalarValueInitExprClass: {
- SaveAndRestore<bool> OldSink(Builder->BuildSinks);
- Builder->BuildSinks = true;
- MakeNode(Dst, Ex, Pred, GetState(Pred));
- break;
- }
-
- case Stmt::ArraySubscriptExprClass:
- VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(Ex), Pred, Dst, true);
- return;
-
- case Stmt::BinaryOperatorClass:
- case Stmt::CompoundAssignOperatorClass:
- VisitBinaryOperator(cast<BinaryOperator>(Ex), Pred, Dst, true);
- return;
-
- case Stmt::BlockDeclRefExprClass:
- VisitBlockDeclRefExpr(cast<BlockDeclRefExpr>(Ex), Pred, Dst, true);
- return;
-
- case Stmt::CallExprClass:
- case Stmt::CXXOperatorCallExprClass: {
- const CallExpr *C = cast<CallExpr>(Ex);
- assert(CalleeReturnsReferenceOrRecord(C));
- VisitCall(C, Pred, C->arg_begin(), C->arg_end(), Dst, true);
- break;
- }
-
- case Stmt::CompoundLiteralExprClass:
- VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(Ex), Pred, Dst, true);
- return;
-
- case Stmt::DeclRefExprClass:
- VisitDeclRefExpr(cast<DeclRefExpr>(Ex), Pred, Dst, true);
- return;
-
- case Stmt::ImplicitCastExprClass:
- case Stmt::CStyleCastExprClass: {
- const CastExpr *C = cast<CastExpr>(Ex);
- QualType T = Ex->getType();
- VisitCast(C, C->getSubExpr(), Pred, Dst, true);
- break;
- }
-
- case Stmt::MemberExprClass:
- VisitMemberExpr(cast<MemberExpr>(Ex), Pred, Dst, true);
- return;
-
- case Stmt::ObjCIvarRefExprClass:
- VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(Ex), Pred, Dst, true);
- return;
-
- case Stmt::ObjCMessageExprClass: {
- const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(Ex);
- assert(ReceiverReturnsReferenceOrRecord(ME));
- VisitObjCMessageExpr(ME, Pred, Dst, true);
- return;
- }
-
- case Stmt::ObjCIsaExprClass:
- // FIXME: Do something more intelligent with 'x->isa = ...'.
- // For now, just ignore the assignment.
- return;
-
- case Stmt::ObjCPropertyRefExprClass:
- case Stmt::ObjCImplicitSetterGetterRefExprClass:
- // FIXME: Property assignments are lvalues, but not really "locations".
- // e.g.: self.x = something;
- // Here the "self.x" really can translate to a method call (setter) when
- // the assignment is made. Moreover, the entire assignment expression
- // evaluate to whatever "something" is, not calling the "getter" for
- // the property (which would make sense since it can have side effects).
- // We'll probably treat this as a location, but not one that we can
- // take the address of. Perhaps we need a new SVal class for cases
- // like thsis?
- // Note that we have a similar problem for bitfields, since they don't
- // have "locations" in the sense that we can take their address.
- Dst.Add(Pred);
- return;
-
- case Stmt::StringLiteralClass: {
- const GRState* state = GetState(Pred);
- SVal V = state->getLValue(cast<StringLiteral>(Ex));
- MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V));
- return;
- }
-
- case Stmt::UnaryOperatorClass:
- VisitUnaryOperator(cast<UnaryOperator>(Ex), Pred, Dst, true);
- return;
-
- // In C++, binding an rvalue to a reference requires to create an object.
- case Stmt::CXXBoolLiteralExprClass:
- case Stmt::IntegerLiteralClass:
- case Stmt::CharacterLiteralClass:
- case Stmt::FloatingLiteralClass:
- case Stmt::ImaginaryLiteralClass:
- CreateCXXTemporaryObject(Ex, Pred, Dst);
- return;
-
- default: {
- // Arbitrary subexpressions can return aggregate temporaries that
- // can be used in a lvalue context. We need to enhance our support
- // of such temporaries in both the environment and the store, so right
- // now we just do a regular visit.
-
- // NOTE: Do not use 'isAggregateType()' here as CXXRecordDecls that
- // are non-pod are not aggregates.
- assert ((isa<RecordType>(Ex->getType().getDesugaredType()) ||
- isa<ArrayType>(Ex->getType().getDesugaredType())) &&
- "Other kinds of expressions with non-aggregate/union/class types"
- " do not have lvalues.");
-
- Visit(Ex, Pred, Dst);
- }
- }
-}
-
//===----------------------------------------------------------------------===//
// Block entrance. (Update counters).
//===----------------------------------------------------------------------===//
-bool GRExprEngine::ProcessBlockEntrance(const CFGBlock* B,
- const ExplodedNode *Pred,
- GRBlockCounter BC) {
- return BC.getNumVisited(Pred->getLocationContext()->getCurrentStackFrame(),
- B->getBlockID()) < AMgr.getMaxLoop();
+void ExprEngine::processCFGBlockEntrance(ExplodedNodeSet &dstNodes,
+ GenericNodeBuilder<BlockEntrance> &nodeBuilder){
+
+ // FIXME: Refactor this into a checker.
+ const CFGBlock *block = nodeBuilder.getProgramPoint().getBlock();
+ ExplodedNode *pred = nodeBuilder.getPredecessor();
+
+ if (nodeBuilder.getBlockCounter().getNumVisited(
+ pred->getLocationContext()->getCurrentStackFrame(),
+ block->getBlockID()) >= AMgr.getMaxVisit()) {
+
+ static int tag = 0;
+ nodeBuilder.generateNode(pred->getState(), pred, &tag, true);
+ }
}
//===----------------------------------------------------------------------===//
// Generic node creation.
//===----------------------------------------------------------------------===//
-ExplodedNode* GRExprEngine::MakeNode(ExplodedNodeSet& Dst, const Stmt* S,
+ExplodedNode* ExprEngine::MakeNode(ExplodedNodeSet& Dst, const Stmt* S,
ExplodedNode* Pred, const GRState* St,
ProgramPoint::Kind K, const void *tag) {
- assert (Builder && "GRStmtNodeBuilder not present.");
+ assert (Builder && "StmtNodeBuilder not present.");
SaveAndRestore<const void*> OldTag(Builder->Tag);
Builder->Tag = tag;
return Builder->MakeNode(Dst, S, Pred, St, K);
@@ -1227,7 +1180,7 @@ ExplodedNode* GRExprEngine::MakeNode(ExplodedNodeSet& Dst, const Stmt* S,
// Branch processing.
//===----------------------------------------------------------------------===//
-const GRState* GRExprEngine::MarkBranch(const GRState* state,
+const GRState* ExprEngine::MarkBranch(const GRState* state,
const Stmt* Terminator,
bool branchTaken) {
@@ -1255,9 +1208,10 @@ const GRState* GRExprEngine::MarkBranch(const GRState* state,
return state->BindExpr(B, UndefinedVal(Ex));
}
+ case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass: { // ?:
-
- const ConditionalOperator* C = cast<ConditionalOperator>(Terminator);
+ const AbstractConditionalOperator* C
+ = cast<AbstractConditionalOperator>(Terminator);
// For ?, if branchTaken == true then the value is either the LHS or
// the condition itself. (GNU extension).
@@ -1265,9 +1219,9 @@ const GRState* GRExprEngine::MarkBranch(const GRState* state,
const Expr* Ex;
if (branchTaken)
- Ex = C->getLHS() ? C->getLHS() : C->getCond();
+ Ex = C->getTrueExpr();
else
- Ex = C->getRHS();
+ Ex = C->getFalseExpr();
return state->BindExpr(C, UndefinedVal(Ex));
}
@@ -1321,8 +1275,8 @@ static SVal RecoverCastedSymbol(GRStateManager& StateMgr, const GRState* state,
return state->getSVal(Ex);
}
-void GRExprEngine::ProcessBranch(const Stmt* Condition, const Stmt* Term,
- GRBranchNodeBuilder& builder) {
+void ExprEngine::processBranch(const Stmt* Condition, const Stmt* Term,
+ BranchNodeBuilder& builder) {
// Check for NULL conditions; e.g. "for(;;)"
if (!Condition) {
@@ -1376,7 +1330,7 @@ void GRExprEngine::ProcessBranch(const Stmt* Condition, const Stmt* Term,
// Process the true branch.
if (builder.isFeasible(true)) {
- if (const GRState *state = PrevState->Assume(V, true))
+ if (const GRState *state = PrevState->assume(V, true))
builder.generateNode(MarkBranch(state, Term, true), true);
else
builder.markInfeasible(true);
@@ -1384,16 +1338,16 @@ void GRExprEngine::ProcessBranch(const Stmt* Condition, const Stmt* Term,
// Process the false branch.
if (builder.isFeasible(false)) {
- if (const GRState *state = PrevState->Assume(V, false))
+ if (const GRState *state = PrevState->assume(V, false))
builder.generateNode(MarkBranch(state, Term, false), false);
else
builder.markInfeasible(false);
}
}
-/// ProcessIndirectGoto - Called by GRCoreEngine. Used to generate successor
+/// processIndirectGoto - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a computed goto jump.
-void GRExprEngine::ProcessIndirectGoto(GRIndirectGotoNodeBuilder& builder) {
+void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
const GRState *state = builder.getState();
SVal V = state->getSVal(builder.getTarget());
@@ -1405,19 +1359,19 @@ void GRExprEngine::ProcessIndirectGoto(GRIndirectGotoNodeBuilder& builder) {
// (3) We have no clue about the label. Dispatch to all targets.
//
- typedef GRIndirectGotoNodeBuilder::iterator iterator;
+ typedef IndirectGotoNodeBuilder::iterator iterator;
if (isa<loc::GotoLabel>(V)) {
- const LabelStmt* L = cast<loc::GotoLabel>(V).getLabel();
+ const LabelDecl *L = cast<loc::GotoLabel>(V).getLabel();
- for (iterator I=builder.begin(), E=builder.end(); I != E; ++I) {
+ for (iterator I = builder.begin(), E = builder.end(); I != E; ++I) {
if (I.getLabel() == L) {
builder.generateNode(I, state);
return;
}
}
- assert (false && "No block with label.");
+ assert(false && "No block with label.");
return;
}
@@ -1437,11 +1391,11 @@ void GRExprEngine::ProcessIndirectGoto(GRIndirectGotoNodeBuilder& builder) {
}
-void GRExprEngine::VisitGuardedExpr(const Expr* Ex, const Expr* L,
+void ExprEngine::VisitGuardedExpr(const Expr* Ex, const Expr* L,
const Expr* R,
ExplodedNode* Pred, ExplodedNodeSet& Dst) {
- assert(Ex == CurrentStmt &&
+ assert(Ex == currentStmt &&
Pred->getLocationContext()->getCFG()->isBlkExpr(Ex));
const GRState* state = GetState(Pred);
@@ -1457,22 +1411,22 @@ void GRExprEngine::VisitGuardedExpr(const Expr* Ex, const Expr* L,
MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, X, true));
}
-/// ProcessEndPath - Called by GRCoreEngine. Used to generate end-of-path
+/// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
-void GRExprEngine::ProcessEndPath(GREndPathNodeBuilder& builder) {
- getTF().EvalEndPath(*this, builder);
+void ExprEngine::processEndOfFunction(EndOfFunctionNodeBuilder& builder) {
+ getTF().evalEndPath(*this, builder);
StateMgr.EndPath(builder.getState());
for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end(); I!=E;++I){
void *tag = I->first;
Checker *checker = I->second;
- checker->EvalEndPath(builder, tag, *this);
+ checker->evalEndPath(builder, tag, *this);
}
}
-/// ProcessSwitch - Called by GRCoreEngine. Used to generate successor
+/// ProcessSwitch - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a switch statement.
-void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
- typedef GRSwitchNodeBuilder::iterator iterator;
+void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
+ typedef SwitchNodeBuilder::iterator iterator;
const GRState* state = builder.getState();
const Expr* CondE = builder.getCondition();
SVal CondV_untested = state->getSVal(CondE);
@@ -1501,7 +1455,7 @@ void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
// Sanity checks. These go away in Release builds.
assert(b && V1.Val.isInt() && !V1.HasSideEffects
&& "Case condition must evaluate to an integer constant.");
- b = b; // silence unused variable warning
+ (void)b; // silence unused variable warning
assert(V1.Val.getInt().getBitWidth() ==
getContext().getTypeSize(CondE->getType()));
@@ -1512,7 +1466,7 @@ void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
b = E->Evaluate(V2, getContext());
assert(b && V2.Val.isInt() && !V2.HasSideEffects
&& "Case condition must evaluate to an integer constant.");
- b = b; // silence unused variable warning
+ (void)b; // silence unused variable warning
}
else
V2 = V1;
@@ -1523,11 +1477,11 @@ void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
do {
nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1.Val.getInt()));
- DefinedOrUnknownSVal Res = SVator.EvalEQ(DefaultSt ? DefaultSt : state,
+ DefinedOrUnknownSVal Res = svalBuilder.evalEQ(DefaultSt ? DefaultSt : state,
CondV, CaseVal);
// Now "assume" that the case matches.
- if (const GRState* stateNew = state->Assume(Res, true)) {
+ if (const GRState* stateNew = state->assume(Res, true)) {
builder.generateCaseStmtNode(I, stateNew);
// If CondV evaluates to a constant, then we know that this
@@ -1540,7 +1494,7 @@ void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
// Now "assume" that the case doesn't match. Add this state
// to the default state (if it is feasible).
if (DefaultSt) {
- if (const GRState *stateNew = DefaultSt->Assume(Res, false)) {
+ if (const GRState *stateNew = DefaultSt->assume(Res, false)) {
defaultIsFeasible = true;
DefaultSt = stateNew;
}
@@ -1560,30 +1514,37 @@ void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
} while (true);
}
- // If we reach here, than we know that the default branch is
- // possible.
- if (defaultIsFeasible) builder.generateDefaultCaseNode(DefaultSt);
-}
+ if (!defaultIsFeasible)
+ return;
-void GRExprEngine::ProcessCallEnter(GRCallEnterNodeBuilder &B) {
- const StackFrameContext *LocCtx
- = AMgr.getStackFrame(B.getCalleeContext(),
- B.getLocationContext(),
- B.getCallExpr(),
- B.getBlock(),
- B.getIndex());
+ // If we have switch(enum value), the default branch is not
+ // feasible if all of the enum constants not covered by 'case:' statements
+ // are not feasible values for the switch condition.
+ //
+ // Note that this isn't as accurate as it could be. Even if there isn't
+ // a case for a particular enum value as long as that enum value isn't
+ // feasible then it shouldn't be considered for making 'default:' reachable.
+ const SwitchStmt *SS = builder.getSwitch();
+ const Expr *CondExpr = SS->getCond()->IgnoreParenImpCasts();
+ if (CondExpr->getType()->getAs<EnumType>()) {
+ if (SS->isAllEnumCasesCovered())
+ return;
+ }
- const GRState *state = B.getState()->EnterStackFrame(LocCtx);
+ builder.generateDefaultCaseNode(DefaultSt);
+}
- B.GenerateNode(state, LocCtx);
+void ExprEngine::processCallEnter(CallEnterNodeBuilder &B) {
+ const GRState *state = B.getState()->enterStackFrame(B.getCalleeContext());
+ B.generateNode(state);
}
-void GRExprEngine::ProcessCallExit(GRCallExitNodeBuilder &B) {
+void ExprEngine::processCallExit(CallExitNodeBuilder &B) {
const GRState *state = B.getState();
const ExplodedNode *Pred = B.getPredecessor();
- const StackFrameContext *LocCtx =
+ const StackFrameContext *calleeCtx =
cast<StackFrameContext>(Pred->getLocationContext());
- const Stmt *CE = LocCtx->getCallSite();
+ const Stmt *CE = calleeCtx->getCallSite();
// If the callee returns an expression, bind its value to CallExpr.
const Stmt *ReturnedExpr = state->get<ReturnExpr>();
@@ -1596,32 +1557,28 @@ void GRExprEngine::ProcessCallExit(GRCallExitNodeBuilder &B) {
// Bind the constructed object value to CXXConstructExpr.
if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
- const CXXThisRegion *ThisR = getCXXThisRegion(CCE->getConstructor(),LocCtx);
- // We might not have 'this' region in the binding if we didn't inline
- // the ctor call.
+ const CXXThisRegion *ThisR =
+ getCXXThisRegion(CCE->getConstructor()->getParent(), calleeCtx);
+
SVal ThisV = state->getSVal(ThisR);
- loc::MemRegionVal *V = dyn_cast<loc::MemRegionVal>(&ThisV);
- if (V) {
- SVal ObjVal = state->getSVal(V->getRegion());
- assert(isa<nonloc::LazyCompoundVal>(ObjVal));
- state = state->BindExpr(CCE, ObjVal);
- }
+ // Always bind the region to the CXXConstructExpr.
+ state = state->BindExpr(CCE, ThisV);
}
- B.GenerateNode(state);
+ B.generateNode(state);
}
//===----------------------------------------------------------------------===//
// Transfer functions: logical operations ('&&', '||').
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode* Pred,
+void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
assert(B->getOpcode() == BO_LAnd ||
B->getOpcode() == BO_LOr);
- assert(B==CurrentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(B));
+ assert(B==currentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(B));
const GRState* state = GetState(Pred);
SVal X = state->getSVal(B);
@@ -1647,19 +1604,19 @@ void GRExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode* Pred,
// value later when necessary. We don't have the machinery in place for
// this right now, and since most logical expressions are used for branches,
// the payoff is not likely to be large. Instead, we do eager evaluation.
- if (const GRState *newState = state->Assume(XD, true))
+ if (const GRState *newState = state->assume(XD, true))
MakeNode(Dst, B, Pred,
- newState->BindExpr(B, ValMgr.makeIntVal(1U, B->getType())));
+ newState->BindExpr(B, svalBuilder.makeIntVal(1U, B->getType())));
- if (const GRState *newState = state->Assume(XD, false))
+ if (const GRState *newState = state->assume(XD, false))
MakeNode(Dst, B, Pred,
- newState->BindExpr(B, ValMgr.makeIntVal(0U, B->getType())));
+ newState->BindExpr(B, svalBuilder.makeIntVal(0U, B->getType())));
}
else {
// We took the LHS expression. Depending on whether we are '&&' or
// '||' we know what the value of the expression is via properties of
// the short-circuiting.
- X = ValMgr.makeIntVal(B->getOpcode() == BO_LAnd ? 0U : 1U,
+ X = svalBuilder.makeIntVal(B->getOpcode() == BO_LAnd ? 0U : 1U,
B->getType());
MakeNode(Dst, B, Pred, state->BindExpr(B, X));
}
@@ -1669,13 +1626,13 @@ void GRExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode* Pred,
// Transfer functions: Loads and stores.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
+void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
ExplodedNodeSet Tmp;
CanQualType T = getContext().getCanonicalType(BE->getType());
- SVal V = ValMgr.getBlockPointer(BE->getBlockDecl(), T,
+ SVal V = svalBuilder.getBlockPointer(BE->getBlockDecl(), T,
Pred->getLocationContext());
MakeNode(Tmp, BE, Pred, GetState(Pred)->BindExpr(BE, V),
@@ -1685,86 +1642,59 @@ void GRExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
CheckerVisit(BE, Dst, Tmp, PostVisitStmtCallback);
}
-void GRExprEngine::VisitDeclRefExpr(const DeclRefExpr *Ex, ExplodedNode *Pred,
- ExplodedNodeSet &Dst, bool asLValue) {
- VisitCommonDeclRefExpr(Ex, Ex->getDecl(), Pred, Dst, asLValue);
-}
-
-void GRExprEngine::VisitBlockDeclRefExpr(const BlockDeclRefExpr *Ex,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst, bool asLValue) {
- VisitCommonDeclRefExpr(Ex, Ex->getDecl(), Pred, Dst, asLValue);
-}
-
-void GRExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst, bool asLValue) {
-
+void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
const GRState *state = GetState(Pred);
if (const VarDecl* VD = dyn_cast<VarDecl>(D)) {
-
+ assert(Ex->isLValue());
SVal V = state->getLValue(VD, Pred->getLocationContext());
- if (asLValue) {
- // For references, the 'lvalue' is the pointer address stored in the
- // reference region.
- if (VD->getType()->isReferenceType()) {
- if (const MemRegion *R = V.getAsRegion())
- V = state->getSVal(R);
- else
- V = UnknownVal();
- }
-
- MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V),
- ProgramPoint::PostLValueKind);
+ // For references, the 'lvalue' is the pointer address stored in the
+ // reference region.
+ if (VD->getType()->isReferenceType()) {
+ if (const MemRegion *R = V.getAsRegion())
+ V = state->getSVal(R);
+ else
+ V = UnknownVal();
}
- else
- EvalLoad(Dst, Ex, Pred, state, V);
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V),
+ ProgramPoint::PostLValueKind);
return;
- } else if (const EnumConstantDecl* ED = dyn_cast<EnumConstantDecl>(D)) {
- assert(!asLValue && "EnumConstantDecl does not have lvalue.");
-
- SVal V = ValMgr.makeIntVal(ED->getInitVal());
+ }
+ if (const EnumConstantDecl* ED = dyn_cast<EnumConstantDecl>(D)) {
+ assert(!Ex->isLValue());
+ SVal V = svalBuilder.makeIntVal(ED->getInitVal());
MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V));
return;
-
- } else if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D)) {
- // This code is valid regardless of the value of 'isLValue'.
- SVal V = ValMgr.getFunctionPointer(FD);
+ }
+ if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D)) {
+ SVal V = svalBuilder.getFunctionPointer(FD);
MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V),
ProgramPoint::PostLValueKind);
return;
}
-
assert (false &&
"ValueDecl support for this ValueDecl not implemented.");
}
/// VisitArraySubscriptExpr - Transfer function for array accesses
-void GRExprEngine::VisitArraySubscriptExpr(const ArraySubscriptExpr* A,
- ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue){
+void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr* A,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst){
const Expr* Base = A->getBase()->IgnoreParens();
const Expr* Idx = A->getIdx()->IgnoreParens();
+
+ // Evaluate the base.
ExplodedNodeSet Tmp;
-
- if (Base->getType()->isVectorType()) {
- // For vector types get its lvalue.
- // FIXME: This may not be correct. Is the rvalue of a vector its location?
- // In fact, I think this is just a hack. We need to get the right
- // semantics.
- VisitLValue(Base, Pred, Tmp);
- }
- else
- Visit(Base, Pred, Tmp); // Get Base's rvalue, which should be an LocVal.
+ Visit(Base, Pred, Tmp);
for (ExplodedNodeSet::iterator I1=Tmp.begin(), E1=Tmp.end(); I1!=E1; ++I1) {
ExplodedNodeSet Tmp2;
Visit(Idx, *I1, Tmp2); // Evaluate the index.
-
ExplodedNodeSet Tmp3;
CheckerVisit(A, Tmp3, Tmp2, PreVisitStmtCallback);
@@ -1772,49 +1702,54 @@ void GRExprEngine::VisitArraySubscriptExpr(const ArraySubscriptExpr* A,
const GRState* state = GetState(*I2);
SVal V = state->getLValue(A->getType(), state->getSVal(Idx),
state->getSVal(Base));
-
- if (asLValue)
- MakeNode(Dst, A, *I2, state->BindExpr(A, V),
- ProgramPoint::PostLValueKind);
- else
- EvalLoad(Dst, A, *I2, state, V);
+ assert(A->isLValue());
+ MakeNode(Dst, A, *I2, state->BindExpr(A, V), ProgramPoint::PostLValueKind);
}
}
}
/// VisitMemberExpr - Transfer function for member expressions.
-void GRExprEngine::VisitMemberExpr(const MemberExpr* M, ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue) {
-
- Expr* Base = M->getBase()->IgnoreParens();
- ExplodedNodeSet Tmp;
+void ExprEngine::VisitMemberExpr(const MemberExpr* M, ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
- if (M->isArrow())
- Visit(Base, Pred, Tmp); // p->f = ... or ... = p->f
- else
- VisitLValue(Base, Pred, Tmp); // x.f = ... or ... = x.f
+ Expr *baseExpr = M->getBase()->IgnoreParens();
+ ExplodedNodeSet dstBase;
+ Visit(baseExpr, Pred, dstBase);
- FieldDecl *Field = dyn_cast<FieldDecl>(M->getMemberDecl());
- if (!Field) // FIXME: skipping member expressions for non-fields
+ FieldDecl *field = dyn_cast<FieldDecl>(M->getMemberDecl());
+ if (!field) // FIXME: skipping member expressions for non-fields
return;
- for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ for (ExplodedNodeSet::iterator I = dstBase.begin(), E = dstBase.end();
+ I != E; ++I) {
const GRState* state = GetState(*I);
+ SVal baseExprVal = state->getSVal(baseExpr);
+ if (isa<nonloc::LazyCompoundVal>(baseExprVal) ||
+ isa<nonloc::CompoundVal>(baseExprVal) ||
+ // FIXME: This can originate by conjuring a symbol for an unknown
+ // temporary struct object, see test/Analysis/fields.c:
+ // (p = getit()).x
+ isa<nonloc::SymbolVal>(baseExprVal)) {
+ MakeNode(Dst, M, *I, state->BindExpr(M, UnknownVal()));
+ continue;
+ }
+
// FIXME: Should we insert some assumption logic in here to determine
// if "Base" is a valid piece of memory? Before we put this assumption
// later when using FieldOffset lvals (which we no longer have).
- SVal L = state->getLValue(Field, state->getSVal(Base));
- if (asLValue)
+ // For all other cases, compute an lvalue.
+ SVal L = state->getLValue(field, baseExprVal);
+ if (M->isLValue())
MakeNode(Dst, M, *I, state->BindExpr(M, L), ProgramPoint::PostLValueKind);
else
- EvalLoad(Dst, M, *I, state, L);
+ evalLoad(Dst, M, *I, state, L);
}
}
-/// EvalBind - Handle the semantics of binding a value to a specific location.
-/// This method is used by EvalStore and (soon) VisitDeclStmt, and others.
-void GRExprEngine::EvalBind(ExplodedNodeSet& Dst, const Stmt* StoreE,
+/// evalBind - Handle the semantics of binding a value to a specific location.
+/// This method is used by evalStore and (soon) VisitDeclStmt, and others.
+void ExprEngine::evalBind(ExplodedNodeSet& Dst, const Stmt* StoreE,
ExplodedNode* Pred, const GRState* state,
SVal location, SVal Val, bool atDeclInit) {
@@ -1852,21 +1787,21 @@ void GRExprEngine::EvalBind(ExplodedNodeSet& Dst, const Stmt* StoreE,
}
}
- // The next thing to do is check if the GRTransferFuncs object wants to
+ // The next thing to do is check if the TransferFuncs object wants to
// update the state based on the new binding. If the GRTransferFunc object
// doesn't do anything, just auto-propagate the current state.
// NOTE: We use 'AssignE' for the location of the PostStore if 'AssignE'
// is non-NULL. Checkers typically care about
- GRStmtNodeBuilderRef BuilderRef(Dst, *Builder, *this, *I, newState, StoreE,
- newState != state);
+ StmtNodeBuilderRef BuilderRef(Dst, *Builder, *this, *I, newState, StoreE,
+ true);
- getTF().EvalBind(BuilderRef, location, Val);
+ getTF().evalBind(BuilderRef, location, Val);
}
}
-/// EvalStore - Handle the semantics of a store via an assignment.
+/// evalStore - Handle the semantics of a store via an assignment.
/// @param Dst The node set to store generated state nodes
/// @param AssignE The assignment expression if the store happens in an
/// assignment.
@@ -1874,17 +1809,28 @@ void GRExprEngine::EvalBind(ExplodedNodeSet& Dst, const Stmt* StoreE,
/// @param state The current simulation state
/// @param location The location to store the value
/// @param Val The value to be stored
-void GRExprEngine::EvalStore(ExplodedNodeSet& Dst, const Expr *AssignE,
+void ExprEngine::evalStore(ExplodedNodeSet& Dst, const Expr *AssignE,
const Expr* LocationE,
ExplodedNode* Pred,
const GRState* state, SVal location, SVal Val,
const void *tag) {
- assert(Builder && "GRStmtNodeBuilder must be defined.");
+ assert(Builder && "StmtNodeBuilder must be defined.");
+
+ // Proceed with the store. We use AssignE as the anchor for the PostStore
+ // ProgramPoint if it is non-NULL, and LocationE otherwise.
+ const Expr *StoreE = AssignE ? AssignE : LocationE;
+
+ if (isa<loc::ObjCPropRef>(location)) {
+ loc::ObjCPropRef prop = cast<loc::ObjCPropRef>(location);
+ ExplodedNodeSet src = Pred;
+ return VisitObjCMessage(ObjCPropertySetter(prop.getPropRefExpr(),
+ StoreE, Val), src, Dst);
+ }
// Evaluate the location (checks for bad dereferences).
ExplodedNodeSet Tmp;
- EvalLocation(Tmp, LocationE, Pred, state, location, tag, false);
+ evalLocation(Tmp, LocationE, Pred, state, location, tag, false);
if (Tmp.empty())
return;
@@ -1893,20 +1839,23 @@ void GRExprEngine::EvalStore(ExplodedNodeSet& Dst, const Expr *AssignE,
SaveAndRestore<ProgramPoint::Kind> OldSPointKind(Builder->PointKind,
ProgramPoint::PostStoreKind);
- SaveAndRestore<const void*> OldTag(Builder->Tag, tag);
-
- // Proceed with the store. We use AssignE as the anchor for the PostStore
- // ProgramPoint if it is non-NULL, and LocationE otherwise.
- const Expr *StoreE = AssignE ? AssignE : LocationE;
for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
- EvalBind(Dst, StoreE, *NI, GetState(*NI), location, Val);
+ evalBind(Dst, StoreE, *NI, GetState(*NI), location, Val);
}
-void GRExprEngine::EvalLoad(ExplodedNodeSet& Dst, const Expr *Ex,
+void ExprEngine::evalLoad(ExplodedNodeSet& Dst, const Expr *Ex,
ExplodedNode* Pred,
const GRState* state, SVal location,
const void *tag, QualType LoadTy) {
+ assert(!isa<NonLoc>(location) && "location cannot be a NonLoc.");
+
+ if (isa<loc::ObjCPropRef>(location)) {
+ loc::ObjCPropRef prop = cast<loc::ObjCPropRef>(location);
+ ExplodedNodeSet src = Pred;
+ return VisitObjCMessage(ObjCPropertyGetter(prop.getPropRefExpr(), Ex),
+ src, Dst);
+ }
// Are we loading from a region? This actually results in two loads; one
// to fetch the address of the referenced value and one to fetch the
@@ -1918,30 +1867,30 @@ void GRExprEngine::EvalLoad(ExplodedNodeSet& Dst, const Expr *Ex,
if (const ReferenceType *RT = ValTy->getAs<ReferenceType>()) {
static int loadReferenceTag = 0;
ExplodedNodeSet Tmp;
- EvalLoadCommon(Tmp, Ex, Pred, state, location, &loadReferenceTag,
+ evalLoadCommon(Tmp, Ex, Pred, state, location, &loadReferenceTag,
getContext().getPointerType(RT->getPointeeType()));
// Perform the load from the referenced value.
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end() ; I!=E; ++I) {
state = GetState(*I);
location = state->getSVal(Ex);
- EvalLoadCommon(Dst, Ex, *I, state, location, tag, LoadTy);
+ evalLoadCommon(Dst, Ex, *I, state, location, tag, LoadTy);
}
return;
}
}
- EvalLoadCommon(Dst, Ex, Pred, state, location, tag, LoadTy);
+ evalLoadCommon(Dst, Ex, Pred, state, location, tag, LoadTy);
}
-void GRExprEngine::EvalLoadCommon(ExplodedNodeSet& Dst, const Expr *Ex,
+void ExprEngine::evalLoadCommon(ExplodedNodeSet& Dst, const Expr *Ex,
ExplodedNode* Pred,
const GRState* state, SVal location,
const void *tag, QualType LoadTy) {
// Evaluate the location (checks for bad dereferences).
ExplodedNodeSet Tmp;
- EvalLocation(Tmp, Ex, Pred, state, location, tag, true);
+ evalLocation(Tmp, Ex, Pred, state, location, tag, true);
if (Tmp.empty())
return;
@@ -1949,26 +1898,27 @@ void GRExprEngine::EvalLoadCommon(ExplodedNodeSet& Dst, const Expr *Ex,
assert(!location.isUndef());
SaveAndRestore<ProgramPoint::Kind> OldSPointKind(Builder->PointKind);
- SaveAndRestore<const void*> OldTag(Builder->Tag);
// Proceed with the load.
for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
state = GetState(*NI);
+
if (location.isUnknown()) {
// This is important. We must nuke the old binding.
MakeNode(Dst, Ex, *NI, state->BindExpr(Ex, UnknownVal()),
ProgramPoint::PostLoadKind, tag);
}
else {
- SVal V = state->getSVal(cast<Loc>(location), LoadTy.isNull() ?
- Ex->getType() : LoadTy);
- MakeNode(Dst, Ex, *NI, state->BindExpr(Ex, V), ProgramPoint::PostLoadKind,
- tag);
+ if (LoadTy.isNull())
+ LoadTy = Ex->getType();
+ SVal V = state->getSVal(cast<Loc>(location), LoadTy);
+ MakeNode(Dst, Ex, *NI, state->bindExprAndLocation(Ex, location, V),
+ ProgramPoint::PostLoadKind, tag);
}
}
}
-void GRExprEngine::EvalLocation(ExplodedNodeSet &Dst, const Stmt *S,
+void ExprEngine::evalLocation(ExplodedNodeSet &Dst, const Stmt *S,
ExplodedNode* Pred,
const GRState* state, SVal location,
const void *tag, bool isLoad) {
@@ -1999,7 +1949,7 @@ void GRExprEngine::EvalLocation(ExplodedNodeSet &Dst, const Stmt *S,
NI != NE; ++NI) {
// Use the 'state' argument only when the predecessor node is the
// same as Pred. This allows us to catch updates to the state.
- checker->GR_VisitLocation(*CurrSet, *Builder, *this, S, *NI,
+ checker->GR_visitLocation(*CurrSet, *Builder, *this, S, *NI,
*NI == Pred ? state : GetState(*NI),
location, tag, isLoad);
}
@@ -2009,7 +1959,7 @@ void GRExprEngine::EvalLocation(ExplodedNodeSet &Dst, const Stmt *S,
}
}
-bool GRExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
+bool ExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
ExplodedNode *Pred) {
const GRState *state = GetState(Pred);
const Expr *Callee = CE->getCallee();
@@ -2021,8 +1971,12 @@ bool GRExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
// Check if the function definition is in the same translation unit.
if (FD->hasBody(FD)) {
+ const StackFrameContext *stackFrame =
+ AMgr.getStackFrame(AMgr.getAnalysisContext(FD),
+ Pred->getLocationContext(),
+ CE, Builder->getBlock(), Builder->getIndex());
// Now we have the definition of the callee, create a CallEnter node.
- CallEnter Loc(CE, AMgr.getAnalysisContext(FD), Pred->getLocationContext());
+ CallEnter Loc(CE, stackFrame, Pred->getLocationContext());
ExplodedNode *N = Builder->generateNode(Loc, state, Pred);
Dst.Add(N);
@@ -2031,11 +1985,13 @@ bool GRExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
// Check if we can find the function definition in other translation units.
if (AMgr.hasIndexer()) {
- const AnalysisContext *C = AMgr.getAnalysisContextInAnotherTU(FD);
+ AnalysisContext *C = AMgr.getAnalysisContextInAnotherTU(FD);
if (C == 0)
return false;
-
- CallEnter Loc(CE, C, Pred->getLocationContext());
+ const StackFrameContext *stackFrame =
+ AMgr.getStackFrame(C, Pred->getLocationContext(),
+ CE, Builder->getBlock(), Builder->getIndex());
+ CallEnter Loc(CE, stackFrame, Pred->getLocationContext());
ExplodedNode *N = Builder->generateNode(Loc, state, Pred);
Dst.Add(N);
return true;
@@ -2044,10 +2000,10 @@ bool GRExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
return false;
}
-void GRExprEngine::VisitCall(const CallExpr* CE, ExplodedNode* Pred,
+void ExprEngine::VisitCall(const CallExpr* CE, ExplodedNode* Pred,
CallExpr::const_arg_iterator AI,
CallExpr::const_arg_iterator AE,
- ExplodedNodeSet& Dst, bool asLValue) {
+ ExplodedNodeSet& Dst) {
// Determine the type of function we're calling (if available).
const FunctionProtoType *Proto = NULL;
@@ -2055,41 +2011,9 @@ void GRExprEngine::VisitCall(const CallExpr* CE, ExplodedNode* Pred,
if (const PointerType *FnTypePtr = FnType->getAs<PointerType>())
Proto = FnTypePtr->getPointeeType()->getAs<FunctionProtoType>();
- // Create a worklist to process the arguments.
- llvm::SmallVector<CallExprWLItem, 20> WorkList;
- WorkList.reserve(AE - AI);
- WorkList.push_back(CallExprWLItem(AI, Pred));
-
+ // Evaluate the arguments.
ExplodedNodeSet ArgsEvaluated;
-
- while (!WorkList.empty()) {
- CallExprWLItem Item = WorkList.back();
- WorkList.pop_back();
-
- if (Item.I == AE) {
- ArgsEvaluated.insert(Item.N);
- continue;
- }
-
- // Evaluate the argument.
- ExplodedNodeSet Tmp;
- const unsigned ParamIdx = Item.I - AI;
-
- bool VisitAsLvalue = false;
- if (Proto && ParamIdx < Proto->getNumArgs())
- VisitAsLvalue = Proto->getArgType(ParamIdx)->isReferenceType();
-
- if (VisitAsLvalue)
- VisitLValue(*Item.I, Item.N, Tmp);
- else
- Visit(*Item.I, Item.N, Tmp);
-
- // Enqueue evaluating the next argument on the worklist.
- ++(Item.I);
-
- for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
- WorkList.push_back(CallExprWLItem(Item.I, *NI));
- }
+ evalArguments(CE->arg_begin(), CE->arg_end(), Proto, Pred, ArgsEvaluated);
// Now process the call itself.
ExplodedNodeSet DstTmp;
@@ -2108,7 +2032,6 @@ void GRExprEngine::VisitCall(const CallExpr* CE, ExplodedNode* Pred,
// to see if the can evaluate the function call.
ExplodedNodeSet DstTmp3;
-
for (ExplodedNodeSet::iterator DI = DstTmp.begin(), DE = DstTmp.end();
DI != DE; ++DI) {
@@ -2133,19 +2056,19 @@ void GRExprEngine::VisitCall(const CallExpr* CE, ExplodedNode* Pred,
DI_Checker != DE_Checker; ++DI_Checker) {
// Dispatch to the plug-in transfer function.
- unsigned OldSize = DstTmp3.size();
- SaveOr OldHasGen(Builder->HasGeneratedNode);
+ unsigned oldSize = DstTmp3.size();
+ SaveOr OldHasGen(Builder->hasGeneratedNode);
Pred = *DI_Checker;
// Dispatch to transfer function logic to handle the call itself.
// FIXME: Allow us to chain together transfer functions.
- assert(Builder && "GRStmtNodeBuilder must be defined.");
- getTF().EvalCall(DstTmp3, *this, *Builder, CE, L, Pred);
+ assert(Builder && "StmtNodeBuilder must be defined.");
+ getTF().evalCall(DstTmp3, *this, *Builder, CE, L, Pred);
// Handle the case where no nodes where generated. Auto-generate that
// contains the updated state if we aren't generating sinks.
- if (!Builder->BuildSinks && DstTmp3.size() == OldSize &&
- !Builder->HasGeneratedNode)
+ if (!Builder->BuildSinks && DstTmp3.size() == oldSize &&
+ !Builder->hasGeneratedNode)
MakeNode(DstTmp3, CE, Pred, state);
}
}
@@ -2153,29 +2076,35 @@ void GRExprEngine::VisitCall(const CallExpr* CE, ExplodedNode* Pred,
// Finally, perform the post-condition check of the CallExpr and store
// the created nodes in 'Dst'.
- // If the callee returns a reference and we want an rvalue, skip this check
- // and do the load.
- if (!(!asLValue && CalleeReturnsReference(CE))) {
- CheckerVisit(CE, Dst, DstTmp3, PostVisitStmtCallback);
- return;
- }
+ CheckerVisit(CE, Dst, DstTmp3, PostVisitStmtCallback);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C dot-syntax to access a property.
+//===----------------------------------------------------------------------===//
+
+void ExprEngine::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Ex,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ExplodedNodeSet dstBase;
+
+ // Visit the receiver (if any).
+ if (Ex->isObjectReceiver())
+ Visit(Ex->getBase(), Pred, dstBase);
+ else
+ dstBase = Pred;
- // Handle the case where the called function returns a reference but
- // we expect an rvalue. For such cases, convert the reference to
- // an rvalue.
- // FIXME: This conversion doesn't actually happen unless the result
- // of CallExpr is consumed by another expression.
- ExplodedNodeSet DstTmp4;
- CheckerVisit(CE, DstTmp4, DstTmp3, PostVisitStmtCallback);
- QualType LoadTy = CE->getType();
+ ExplodedNodeSet dstPropRef;
- static int *ConvertToRvalueTag = 0;
- for (ExplodedNodeSet::iterator NI = DstTmp4.begin(), NE = DstTmp4.end();
- NI!=NE; ++NI) {
- const GRState *state = GetState(*NI);
- EvalLoad(Dst, CE, *NI, state, state->getSVal(CE),
- &ConvertToRvalueTag, LoadTy);
+ // Using the base, compute the lvalue of the instance variable.
+ for (ExplodedNodeSet::iterator I = dstBase.begin(), E = dstBase.end();
+ I!=E; ++I) {
+ ExplodedNode *nodeBase = *I;
+ const GRState *state = GetState(nodeBase);
+ MakeNode(dstPropRef, Ex, *I, state->BindExpr(Ex, loc::ObjCPropRef(Ex)));
}
+
+ Dst.insert(dstPropRef);
}
//===----------------------------------------------------------------------===//
@@ -2185,7 +2114,7 @@ void GRExprEngine::VisitCall(const CallExpr* CE, ExplodedNode* Pred,
static std::pair<const void*,const void*> EagerlyAssumeTag
= std::pair<const void*,const void*>(&EagerlyAssumeTag,static_cast<void*>(0));
-void GRExprEngine::EvalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
+void ExprEngine::evalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
const Expr *Ex) {
for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
ExplodedNode *Pred = *I;
@@ -2203,18 +2132,18 @@ void GRExprEngine::EvalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
SVal V = state->getSVal(Ex);
if (nonloc::SymExprVal *SEV = dyn_cast<nonloc::SymExprVal>(&V)) {
// First assume that the condition is true.
- if (const GRState *stateTrue = state->Assume(*SEV, true)) {
+ if (const GRState *stateTrue = state->assume(*SEV, true)) {
stateTrue = stateTrue->BindExpr(Ex,
- ValMgr.makeIntVal(1U, Ex->getType()));
+ svalBuilder.makeIntVal(1U, Ex->getType()));
Dst.Add(Builder->generateNode(PostStmtCustom(Ex,
&EagerlyAssumeTag, Pred->getLocationContext()),
stateTrue, Pred));
}
// Next, assume that the condition is false.
- if (const GRState *stateFalse = state->Assume(*SEV, false)) {
+ if (const GRState *stateFalse = state->assume(*SEV, false)) {
stateFalse = stateFalse->BindExpr(Ex,
- ValMgr.makeIntVal(0U, Ex->getType()));
+ svalBuilder.makeIntVal(0U, Ex->getType()));
Dst.Add(Builder->generateNode(PostStmtCustom(Ex, &EagerlyAssumeTag,
Pred->getLocationContext()),
stateFalse, Pred));
@@ -2226,34 +2155,58 @@ void GRExprEngine::EvalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
}
//===----------------------------------------------------------------------===//
-// Transfer function: Objective-C ivar references.
+// Transfer function: Objective-C @synchronized.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitObjCIvarRefExpr(const ObjCIvarRefExpr* Ex,
- ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue) {
+void ExprEngine::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
- const Expr* Base = cast<Expr>(Ex->getBase());
+ // The mutex expression is a CFGElement, so we don't need to explicitly
+ // visit it since it will already be processed.
+
+ // Pre-visit the ObjCAtSynchronizedStmt.
ExplodedNodeSet Tmp;
- Visit(Base, Pred, Tmp);
+ Tmp.Add(Pred);
+ CheckerVisit(S, Dst, Tmp, PreVisitStmtCallback);
+}
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
- const GRState* state = GetState(*I);
- SVal BaseVal = state->getSVal(Base);
- SVal location = state->getLValue(Ex->getDecl(), BaseVal);
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C ivar references.
+//===----------------------------------------------------------------------===//
- if (asLValue)
- MakeNode(Dst, Ex, *I, state->BindExpr(Ex, location));
- else
- EvalLoad(Dst, Ex, *I, state, location);
+void ExprEngine::VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr* Ex,
+ ExplodedNode* Pred,
+ ExplodedNodeSet& Dst) {
+
+ // Visit the base expression, which is needed for computing the lvalue
+ // of the ivar.
+ ExplodedNodeSet dstBase;
+ const Expr *baseExpr = Ex->getBase();
+ Visit(baseExpr, Pred, dstBase);
+
+ ExplodedNodeSet dstIvar;
+
+ // Using the base, compute the lvalue of the instance variable.
+ for (ExplodedNodeSet::iterator I = dstBase.begin(), E = dstBase.end();
+ I!=E; ++I) {
+ ExplodedNode *nodeBase = *I;
+ const GRState *state = GetState(nodeBase);
+ SVal baseVal = state->getSVal(baseExpr);
+ SVal location = state->getLValue(Ex->getDecl(), baseVal);
+ MakeNode(dstIvar, Ex, *I, state->BindExpr(Ex, location));
}
+
+ // Perform the post-condition check of the ObjCIvarRefExpr and store
+ // the created nodes in 'Dst'.
+ CheckerVisit(Ex, Dst, dstIvar, PostVisitStmtCallback);
}
//===----------------------------------------------------------------------===//
// Transfer function: Objective-C fast enumeration 'for' statements.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt* S,
+void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt* S,
ExplodedNode* Pred, ExplodedNodeSet& Dst) {
// ObjCForCollectionStmts are processed in two places. This method
@@ -2293,22 +2246,21 @@ void GRExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt* S,
}
ExplodedNodeSet Tmp;
- VisitLValue(cast<Expr>(elem), Pred, Tmp);
-
+ Visit(cast<Expr>(elem), Pred, Tmp);
for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
VisitObjCForCollectionStmtAux(S, *I, Dst, state->getSVal(elem));
}
}
-void GRExprEngine::VisitObjCForCollectionStmtAux(const ObjCForCollectionStmt* S,
+void ExprEngine::VisitObjCForCollectionStmtAux(const ObjCForCollectionStmt* S,
ExplodedNode* Pred, ExplodedNodeSet& Dst,
SVal ElementV) {
// Check if the location we are writing back to is a null pointer.
const Stmt* elem = S->getElement();
ExplodedNodeSet Tmp;
- EvalLocation(Tmp, elem, Pred, GetState(Pred), ElementV, NULL, false);
+ evalLocation(Tmp, elem, Pred, GetState(Pred), ElementV, NULL, false);
if (Tmp.empty())
return;
@@ -2318,11 +2270,11 @@ void GRExprEngine::VisitObjCForCollectionStmtAux(const ObjCForCollectionStmt* S,
const GRState *state = GetState(Pred);
// Handle the case where the container still has elements.
- SVal TrueV = ValMgr.makeTruthVal(1);
+ SVal TrueV = svalBuilder.makeTruthVal(1);
const GRState *hasElems = state->BindExpr(S, TrueV);
// Handle the case where the container has no elements.
- SVal FalseV = ValMgr.makeTruthVal(0);
+ SVal FalseV = svalBuilder.makeTruthVal(0);
const GRState *noElems = state->BindExpr(S, FalseV);
if (loc::MemRegionVal* MV = dyn_cast<loc::MemRegionVal>(&ElementV))
@@ -2331,14 +2283,14 @@ void GRExprEngine::VisitObjCForCollectionStmtAux(const ObjCForCollectionStmt* S,
// container. We will do this with dispatch logic to the store.
// For now, just 'conjure' up a symbolic value.
QualType T = R->getValueType();
- assert(Loc::IsLocType(T));
+ assert(Loc::isLocType(T));
unsigned Count = Builder->getCurrentBlockCount();
SymbolRef Sym = SymMgr.getConjuredSymbol(elem, T, Count);
- SVal V = ValMgr.makeLoc(Sym);
+ SVal V = svalBuilder.makeLoc(Sym);
hasElems = hasElems->bindLoc(ElementV, V);
// Bind the location to 'nil' on the false branch.
- SVal nilV = ValMgr.makeIntVal(0, T);
+ SVal nilV = svalBuilder.makeIntVal(0, T);
noElems = noElems->bindLoc(ElementV, nilV);
}
@@ -2363,9 +2315,9 @@ public:
};
} // end anonymous namespace
-void GRExprEngine::VisitObjCMessageExpr(const ObjCMessageExpr* ME,
+void ExprEngine::VisitObjCMessageExpr(const ObjCMessageExpr* ME,
ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue){
+ ExplodedNodeSet& Dst){
// Create a worklist to process both the arguments.
llvm::SmallVector<ObjCMsgWLItem, 20> WL;
@@ -2408,23 +2360,30 @@ void GRExprEngine::VisitObjCMessageExpr(const ObjCMessageExpr* ME,
WL.push_back(ObjCMsgWLItem(Item.I, *NI));
}
- // Now that the arguments are processed, handle the previsits checks.
+ // Now that the arguments are processed, handle the ObjC message.
+ VisitObjCMessage(ME, ArgsEvaluated, Dst);
+}
+
+void ExprEngine::VisitObjCMessage(const ObjCMessage &msg,
+ ExplodedNodeSet &Src, ExplodedNodeSet& Dst) {
+
+ // Handle the previsits checks.
ExplodedNodeSet DstPrevisit;
- CheckerVisit(ME, DstPrevisit, ArgsEvaluated, PreVisitStmtCallback);
+ CheckerVisitObjCMessage(msg, DstPrevisit, Src, /*isPreVisit=*/true);
// Proceed with evaluate the message expression.
- ExplodedNodeSet DstEval;
+ ExplodedNodeSet dstEval;
for (ExplodedNodeSet::iterator DI = DstPrevisit.begin(),
DE = DstPrevisit.end(); DI != DE; ++DI) {
- Pred = *DI;
+ ExplodedNode *Pred = *DI;
bool RaisesException = false;
- unsigned OldSize = DstEval.size();
+ unsigned oldSize = dstEval.size();
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
- SaveOr OldHasGen(Builder->HasGeneratedNode);
+ SaveOr OldHasGen(Builder->hasGeneratedNode);
- if (const Expr *Receiver = ME->getInstanceReceiver()) {
+ if (const Expr *Receiver = msg.getInstanceReceiver()) {
const GRState *state = GetState(Pred);
// Bifurcate the state into nil and non-nil ones.
@@ -2432,18 +2391,18 @@ void GRExprEngine::VisitObjCMessageExpr(const ObjCMessageExpr* ME,
cast<DefinedOrUnknownSVal>(state->getSVal(Receiver));
const GRState *notNilState, *nilState;
- llvm::tie(notNilState, nilState) = state->Assume(receiverVal);
+ llvm::tie(notNilState, nilState) = state->assume(receiverVal);
// There are three cases: can be nil or non-nil, must be nil, must be
// non-nil. We handle must be nil, and merge the rest two into non-nil.
if (nilState && !notNilState) {
- CheckerEvalNilReceiver(ME, DstEval, nilState, Pred);
+ CheckerEvalNilReceiver(msg, dstEval, nilState, Pred);
continue;
}
// Check if the "raise" message was sent.
assert(notNilState);
- if (ME->getSelector() == RaiseSel)
+ if (msg.getSelector() == RaiseSel)
RaisesException = true;
// Check if we raise an exception. For now treat these as sinks.
@@ -2452,11 +2411,11 @@ void GRExprEngine::VisitObjCMessageExpr(const ObjCMessageExpr* ME,
Builder->BuildSinks = true;
// Dispatch to plug-in transfer function.
- EvalObjCMessageExpr(DstEval, ME, Pred, notNilState);
+ evalObjCMessage(dstEval, msg, Pred, notNilState);
}
- else if (ObjCInterfaceDecl *Iface = ME->getReceiverInterface()) {
+ else if (const ObjCInterfaceDecl *Iface = msg.getReceiverInterface()) {
IdentifierInfo* ClsName = Iface->getIdentifier();
- Selector S = ME->getSelector();
+ Selector S = msg.getSelector();
// Check for special instance methods.
if (!NSExceptionII) {
@@ -2500,64 +2459,51 @@ void GRExprEngine::VisitObjCMessageExpr(const ObjCMessageExpr* ME,
Builder->BuildSinks = true;
// Dispatch to plug-in transfer function.
- EvalObjCMessageExpr(DstEval, ME, Pred, Builder->GetState(Pred));
+ evalObjCMessage(dstEval, msg, Pred, Builder->GetState(Pred));
}
// Handle the case where no nodes where generated. Auto-generate that
// contains the updated state if we aren't generating sinks.
- if (!Builder->BuildSinks && DstEval.size() == OldSize &&
- !Builder->HasGeneratedNode)
- MakeNode(DstEval, ME, Pred, GetState(Pred));
+ if (!Builder->BuildSinks && dstEval.size() == oldSize &&
+ !Builder->hasGeneratedNode)
+ MakeNode(dstEval, msg.getOriginExpr(), Pred, GetState(Pred));
}
// Finally, perform the post-condition check of the ObjCMessageExpr and store
// the created nodes in 'Dst'.
- if (!(!asLValue && ReceiverReturnsReference(ME))) {
- CheckerVisit(ME, Dst, DstEval, PostVisitStmtCallback);
- return;
- }
-
- // Handle the case where the message expression returns a reference but
- // we expect an rvalue. For such cases, convert the reference to
- // an rvalue.
- // FIXME: This conversion doesn't actually happen unless the result
- // of ObjCMessageExpr is consumed by another expression.
- ExplodedNodeSet DstRValueConvert;
- CheckerVisit(ME, DstRValueConvert, DstEval, PostVisitStmtCallback);
- QualType LoadTy = ME->getType();
-
- static int *ConvertToRvalueTag = 0;
- for (ExplodedNodeSet::iterator NI = DstRValueConvert.begin(),
- NE = DstRValueConvert.end(); NI != NE; ++NI) {
- const GRState *state = GetState(*NI);
- EvalLoad(Dst, ME, *NI, state, state->getSVal(ME),
- &ConvertToRvalueTag, LoadTy);
- }
+ CheckerVisitObjCMessage(msg, Dst, dstEval, /*isPreVisit=*/false);
}
//===----------------------------------------------------------------------===//
// Transfer functions: Miscellaneous statements.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
- ExplodedNode *Pred, ExplodedNodeSet &Dst,
- bool asLValue) {
+void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+
ExplodedNodeSet S1;
+ Visit(Ex, Pred, S1);
+ ExplodedNodeSet S2;
+ CheckerVisit(CastE, S2, S1, PreVisitStmtCallback);
+
+ if (CastE->getCastKind() == CK_LValueToRValue ||
+ CastE->getCastKind() == CK_GetObjCProperty) {
+ for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I!=E; ++I) {
+ ExplodedNode *subExprNode = *I;
+ const GRState *state = GetState(subExprNode);
+ evalLoad(Dst, CastE, subExprNode, state, state->getSVal(Ex));
+ }
+ return;
+ }
+
+ // All other casts.
QualType T = CastE->getType();
QualType ExTy = Ex->getType();
if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
T = ExCast->getTypeAsWritten();
-
- if (ExTy->isArrayType() || ExTy->isFunctionType() || T->isReferenceType() ||
- asLValue)
- VisitLValue(Ex, Pred, S1);
- else
- Visit(Ex, Pred, S1);
-
- ExplodedNodeSet S2;
- CheckerVisit(CastE, S2, S1, PreVisitStmtCallback);
-
+
+#if 0
// If we are evaluating the cast in an lvalue context, we implicitly want
// the cast to evaluate to a location.
if (asLValue) {
@@ -2565,14 +2511,15 @@ void GRExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
T = Ctx.getPointerType(Ctx.getCanonicalType(T));
ExTy = Ctx.getPointerType(Ctx.getCanonicalType(ExTy));
}
+#endif
switch (CastE->getCastKind()) {
case CK_ToVoid:
- assert(!asLValue);
for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I)
Dst.Add(*I);
return;
+ case CK_LValueToRValue:
case CK_NoOp:
case CK_FunctionToPointerDecay:
for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I) {
@@ -2585,33 +2532,60 @@ void GRExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
}
return;
- case CK_Unknown:
+ case CK_GetObjCProperty:
+ case CK_Dependent:
case CK_ArrayToPointerDecay:
case CK_BitCast:
case CK_LValueBitCast:
case CK_IntegralCast:
+ case CK_NullToPointer:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
case CK_FloatingCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
case CK_AnyPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
- case CK_DerivedToBase:
- case CK_UncheckedDerivedToBase:
+
case CK_ObjCObjectLValueCast: {
- // Delegate to SValuator to process.
+ // Delegate to SValBuilder to process.
for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I) {
ExplodedNode* N = *I;
const GRState* state = GetState(N);
SVal V = state->getSVal(Ex);
- V = SVator.EvalCast(V, T, ExTy);
+ V = svalBuilder.evalCast(V, T, ExTy);
state = state->BindExpr(CastE, V);
MakeNode(Dst, CastE, N, state);
}
return;
}
-
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ // For DerivedToBase cast, delegate to the store manager.
+ for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I) {
+ ExplodedNode *node = *I;
+ const GRState *state = GetState(node);
+ SVal val = state->getSVal(Ex);
+ val = getStoreManager().evalDerivedToBase(val, T);
+ state = state->BindExpr(CastE, val);
+ MakeNode(Dst, CastE, node, state);
+ }
+ return;
+
// Various C++ casts that are not handled yet.
case CK_Dynamic:
case CK_ToUnion:
@@ -2631,10 +2605,9 @@ void GRExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
}
}
-void GRExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr* CL,
+void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr* CL,
ExplodedNode* Pred,
- ExplodedNodeSet& Dst,
- bool asLValue) {
+ ExplodedNodeSet& Dst) {
const InitListExpr* ILE
= cast<InitListExpr>(CL->getInitializer()->IgnoreParens());
ExplodedNodeSet Tmp;
@@ -2646,7 +2619,7 @@ void GRExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr* CL,
const LocationContext *LC = (*I)->getLocationContext();
state = state->bindCompoundLiteral(CL, LC, ILV);
- if (asLValue) {
+ if (CL->isLValue()) {
MakeNode(Dst, CL, *I, state->BindExpr(CL, state->getLValue(CL, LC)));
}
else
@@ -2654,7 +2627,7 @@ void GRExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr* CL,
}
}
-void GRExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
+void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
ExplodedNodeSet& Dst) {
// The CFG has one DeclStmt per Decl.
@@ -2671,18 +2644,16 @@ void GRExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
ExplodedNodeSet Tmp;
if (InitEx) {
- QualType InitTy = InitEx->getType();
- if (getContext().getLangOptions().CPlusPlus && InitTy->isRecordType()) {
- // Delegate expressions of C++ record type evaluation to AggExprVisitor.
- VisitAggExpr(InitEx, GetState(Pred)->getLValue(VD,
- Pred->getLocationContext()), Pred, Dst);
- return;
- } else if (VD->getType()->isReferenceType())
- VisitLValue(InitEx, Pred, Tmp);
- else
+ if (VD->getType()->isReferenceType() && !InitEx->isLValue()) {
+ // If the initializer is C++ record type, it should already has a
+ // temp object.
+ if (!InitEx->getType()->isRecordType())
+ CreateCXXTemporaryObject(InitEx, Pred, Tmp);
+ else
+ Tmp.Add(Pred);
+ } else
Visit(InitEx, Pred, Tmp);
- }
- else
+ } else
Tmp.Add(Pred);
ExplodedNodeSet Tmp2;
@@ -2698,16 +2669,24 @@ void GRExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
if (InitEx) {
SVal InitVal = state->getSVal(InitEx);
+ // We bound the temp obj region to the CXXConstructExpr. Now recover
+ // the lazy compound value when the variable is not a reference.
+ if (AMgr.getLangOptions().CPlusPlus && VD->getType()->isRecordType() &&
+ !VD->getType()->isReferenceType() && isa<loc::MemRegionVal>(InitVal)){
+ InitVal = state->getSVal(cast<loc::MemRegionVal>(InitVal).getRegion());
+ assert(isa<nonloc::LazyCompoundVal>(InitVal));
+ }
+
// Recover some path-sensitivity if a scalar value evaluated to
// UnknownVal.
if ((InitVal.isUnknown() ||
!getConstraintManager().canReasonAbout(InitVal)) &&
!VD->getType()->isReferenceType()) {
- InitVal = ValMgr.getConjuredSymbolVal(NULL, InitEx,
+ InitVal = svalBuilder.getConjuredSymbolVal(NULL, InitEx,
Builder->getCurrentBlockCount());
}
- EvalBind(Dst, DS, *I, state,
+ evalBind(Dst, DS, *I, state,
loc::MemRegionVal(state->getRegion(VD, LC)), InitVal, true);
}
else {
@@ -2717,7 +2696,7 @@ void GRExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
}
}
-void GRExprEngine::VisitCondInit(const VarDecl *VD, const Stmt *S,
+void ExprEngine::VisitCondInit(const VarDecl *VD, const Stmt *S,
ExplodedNode *Pred, ExplodedNodeSet& Dst) {
const Expr* InitEx = VD->getInit();
@@ -2735,11 +2714,11 @@ void GRExprEngine::VisitCondInit(const VarDecl *VD, const Stmt *S,
// UnknownVal.
if (InitVal.isUnknown() ||
!getConstraintManager().canReasonAbout(InitVal)) {
- InitVal = ValMgr.getConjuredSymbolVal(NULL, InitEx,
+ InitVal = svalBuilder.getConjuredSymbolVal(NULL, InitEx,
Builder->getCurrentBlockCount());
}
- EvalBind(Dst, S, N, state,
+ evalBind(Dst, S, N, state,
loc::MemRegionVal(state->getRegion(VD, LC)), InitVal, true);
}
}
@@ -2760,7 +2739,7 @@ public:
}
-void GRExprEngine::VisitInitListExpr(const InitListExpr* E, ExplodedNode* Pred,
+void ExprEngine::VisitInitListExpr(const InitListExpr* E, ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
const GRState* state = GetState(Pred);
@@ -2773,7 +2752,7 @@ void GRExprEngine::VisitInitListExpr(const InitListExpr* E, ExplodedNode* Pred,
// Handle base case where the initializer has no elements.
// e.g: static int* myArray[] = {};
if (NumInitElements == 0) {
- SVal V = ValMgr.makeCompoundVal(T, StartVals);
+ SVal V = svalBuilder.makeCompoundVal(T, StartVals);
MakeNode(Dst, E, Pred, state->BindExpr(E, V));
return;
}
@@ -2807,7 +2786,7 @@ void GRExprEngine::VisitInitListExpr(const InitListExpr* E, ExplodedNode* Pred,
if (NewItr == ItrEnd) {
// Now we have a list holding all init values. Make CompoundValData.
- SVal V = ValMgr.makeCompoundVal(T, NewVals);
+ SVal V = svalBuilder.makeCompoundVal(T, NewVals);
// Make final state and node.
MakeNode(Dst, E, *NI, state->BindExpr(E, V));
@@ -2822,7 +2801,7 @@ void GRExprEngine::VisitInitListExpr(const InitListExpr* E, ExplodedNode* Pred,
return;
}
- if (Loc::IsLocType(T) || T->isIntegerType()) {
+ if (Loc::isLocType(T) || T->isIntegerType()) {
assert (E->getNumInits() == 1);
ExplodedNodeSet Tmp;
const Expr* Init = E->getInit(0);
@@ -2838,7 +2817,7 @@ void GRExprEngine::VisitInitListExpr(const InitListExpr* E, ExplodedNode* Pred,
}
/// VisitSizeOfAlignOfExpr - Transfer function for sizeof(type).
-void GRExprEngine::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr* Ex,
+void ExprEngine::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr* Ex,
ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
QualType T = Ex->getTypeOfArgument();
@@ -2863,7 +2842,7 @@ void GRExprEngine::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr* Ex,
// First, visit the sub-expression to find its region.
const Expr *Arg = Ex->getArgumentExpr();
ExplodedNodeSet Tmp;
- VisitLValue(Arg, Pred, Tmp);
+ Visit(Arg, Pred, Tmp);
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
@@ -2877,7 +2856,7 @@ void GRExprEngine::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr* Ex,
}
// The result is the extent of the VLA.
- SVal Extent = cast<SubRegion>(MR)->getExtent(ValMgr);
+ SVal Extent = cast<SubRegion>(MR)->getExtent(svalBuilder);
MakeNode(Dst, Ex, *I, state->BindExpr(Ex, Extent));
}
@@ -2900,10 +2879,10 @@ void GRExprEngine::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr* Ex,
MakeNode(Dst, Ex, Pred,
GetState(Pred)->BindExpr(Ex,
- ValMgr.makeIntVal(amt.getQuantity(), Ex->getType())));
+ svalBuilder.makeIntVal(amt.getQuantity(), Ex->getType())));
}
-void GRExprEngine::VisitOffsetOfExpr(const OffsetOfExpr* OOE,
+void ExprEngine::VisitOffsetOfExpr(const OffsetOfExpr* OOE,
ExplodedNode* Pred, ExplodedNodeSet& Dst) {
Expr::EvalResult Res;
if (OOE->Evaluate(Res, getContext()) && Res.Val.isInt()) {
@@ -2911,7 +2890,7 @@ void GRExprEngine::VisitOffsetOfExpr(const OffsetOfExpr* OOE,
assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
assert(OOE->getType()->isIntegerType());
assert(IV.isSigned() == OOE->getType()->isSignedIntegerType());
- SVal X = ValMgr.makeIntVal(IV);
+ SVal X = svalBuilder.makeIntVal(IV);
MakeNode(Dst, OOE, Pred, GetState(Pred)->BindExpr(OOE, X));
return;
}
@@ -2919,38 +2898,16 @@ void GRExprEngine::VisitOffsetOfExpr(const OffsetOfExpr* OOE,
Dst.Add(Pred);
}
-void GRExprEngine::VisitUnaryOperator(const UnaryOperator* U,
+void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue) {
+ ExplodedNodeSet& Dst) {
switch (U->getOpcode()) {
default:
break;
- case UO_Deref: {
-
- const Expr* Ex = U->getSubExpr()->IgnoreParens();
- ExplodedNodeSet Tmp;
- Visit(Ex, Pred, Tmp);
-
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
-
- const GRState* state = GetState(*I);
- SVal location = state->getSVal(Ex);
-
- if (asLValue)
- MakeNode(Dst, U, *I, state->BindExpr(U, location),
- ProgramPoint::PostLValueKind);
- else
- EvalLoad(Dst, U, *I, state, location);
- }
-
- return;
- }
-
case UO_Real: {
-
const Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
@@ -2989,14 +2946,18 @@ void GRExprEngine::VisitUnaryOperator(const UnaryOperator* U,
// For all other types, UO_Imag returns 0.
const GRState* state = GetState(*I);
- SVal X = ValMgr.makeZeroVal(Ex->getType());
+ SVal X = svalBuilder.makeZeroVal(Ex->getType());
MakeNode(Dst, U, *I, state->BindExpr(U, X));
}
return;
}
- case UO_Plus: assert(!asLValue); // FALL-THROUGH.
+ case UO_Plus:
+ assert(!U->isLValue());
+ // FALL-THROUGH.
+ case UO_Deref:
+ case UO_AddrOf:
case UO_Extension: {
// Unary "+" is a no-op, similar to a parentheses. We still have places
@@ -3006,11 +2967,7 @@ void GRExprEngine::VisitUnaryOperator(const UnaryOperator* U,
const Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
-
- if (asLValue)
- VisitLValue(Ex, Pred, Tmp);
- else
- Visit(Ex, Pred, Tmp);
+ Visit(Ex, Pred, Tmp);
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
@@ -3020,28 +2977,10 @@ void GRExprEngine::VisitUnaryOperator(const UnaryOperator* U,
return;
}
- case UO_AddrOf: {
-
- assert(!asLValue);
- const Expr* Ex = U->getSubExpr()->IgnoreParens();
- ExplodedNodeSet Tmp;
- VisitLValue(Ex, Pred, Tmp);
-
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
- const GRState* state = GetState(*I);
- SVal V = state->getSVal(Ex);
- state = state->BindExpr(U, V);
- MakeNode(Dst, U, *I, state);
- }
-
- return;
- }
-
case UO_LNot:
case UO_Minus:
case UO_Not: {
-
- assert (!asLValue);
+ assert (!U->isLValue());
const Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
Visit(Ex, Pred, Tmp);
@@ -3061,7 +3000,7 @@ void GRExprEngine::VisitUnaryOperator(const UnaryOperator* U,
// QualType SrcT = getContext().getCanonicalType(Ex->getType());
//
// if (DstT != SrcT) // Perform promotions.
-// V = EvalCast(V, DstT);
+// V = evalCast(V, DstT);
//
// if (V.isUnknownOrUndef()) {
// MakeNode(Dst, U, *I, BindExpr(St, U, V));
@@ -3075,12 +3014,12 @@ void GRExprEngine::VisitUnaryOperator(const UnaryOperator* U,
case UO_Not:
// FIXME: Do we need to handle promotions?
- state = state->BindExpr(U, EvalComplement(cast<NonLoc>(V)));
+ state = state->BindExpr(U, evalComplement(cast<NonLoc>(V)));
break;
case UO_Minus:
// FIXME: Do we need to handle promotions?
- state = state->BindExpr(U, EvalMinus(cast<NonLoc>(V)));
+ state = state->BindExpr(U, evalMinus(cast<NonLoc>(V)));
break;
case UO_LNot:
@@ -3092,13 +3031,13 @@ void GRExprEngine::VisitUnaryOperator(const UnaryOperator* U,
SVal Result;
if (isa<Loc>(V)) {
- Loc X = ValMgr.makeNull();
- Result = EvalBinOp(state, BO_EQ, cast<Loc>(V), X,
+ Loc X = svalBuilder.makeNull();
+ Result = evalBinOp(state, BO_EQ, cast<Loc>(V), X,
U->getType());
}
else {
nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
- Result = EvalBinOp(state, BO_EQ, cast<NonLoc>(V), X,
+ Result = evalBinOp(state, BO_EQ, cast<NonLoc>(V), X,
U->getType());
}
@@ -3115,20 +3054,19 @@ void GRExprEngine::VisitUnaryOperator(const UnaryOperator* U,
}
// Handle ++ and -- (both pre- and post-increment).
-
assert (U->isIncrementDecrementOp());
ExplodedNodeSet Tmp;
const Expr* Ex = U->getSubExpr()->IgnoreParens();
- VisitLValue(Ex, Pred, Tmp);
+ Visit(Ex, Pred, Tmp);
for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
- SVal V1 = state->getSVal(Ex);
+ SVal loc = state->getSVal(Ex);
// Perform a load.
ExplodedNodeSet Tmp2;
- EvalLoad(Tmp2, Ex, *I, state, V1);
+ evalLoad(Tmp2, Ex, *I, state, loc);
for (ExplodedNodeSet::iterator I2=Tmp2.begin(), E2=Tmp2.end();I2!=E2;++I2) {
@@ -3152,53 +3090,58 @@ void GRExprEngine::VisitUnaryOperator(const UnaryOperator* U,
SVal RHS;
if (U->getType()->isAnyPointerType())
- RHS = ValMgr.makeIntValWithPtrWidth(1, false);
+ RHS = svalBuilder.makeArrayIndex(1);
else
- RHS = ValMgr.makeIntVal(1, U->getType());
+ RHS = svalBuilder.makeIntVal(1, U->getType());
- SVal Result = EvalBinOp(state, Op, V2, RHS, U->getType());
+ SVal Result = evalBinOp(state, Op, V2, RHS, U->getType());
// Conjure a new symbol if necessary to recover precision.
if (Result.isUnknown() || !getConstraintManager().canReasonAbout(Result)){
DefinedOrUnknownSVal SymVal =
- ValMgr.getConjuredSymbolVal(NULL, Ex,
+ svalBuilder.getConjuredSymbolVal(NULL, Ex,
Builder->getCurrentBlockCount());
Result = SymVal;
// If the value is a location, ++/-- should always preserve
// non-nullness. Check if the original value was non-null, and if so
// propagate that constraint.
- if (Loc::IsLocType(U->getType())) {
+ if (Loc::isLocType(U->getType())) {
DefinedOrUnknownSVal Constraint =
- SVator.EvalEQ(state, V2, ValMgr.makeZeroVal(U->getType()));
+ svalBuilder.evalEQ(state, V2,svalBuilder.makeZeroVal(U->getType()));
- if (!state->Assume(Constraint, true)) {
+ if (!state->assume(Constraint, true)) {
// It isn't feasible for the original value to be null.
// Propagate this constraint.
- Constraint = SVator.EvalEQ(state, SymVal,
- ValMgr.makeZeroVal(U->getType()));
+ Constraint = svalBuilder.evalEQ(state, SymVal,
+ svalBuilder.makeZeroVal(U->getType()));
- state = state->Assume(Constraint, false);
+ state = state->assume(Constraint, false);
assert(state);
}
}
}
- state = state->BindExpr(U, U->isPostfix() ? V2 : Result);
+ // Since the lvalue-to-rvalue conversion is explicit in the AST,
+ // we bind an l-value if the operator is prefix and an lvalue (in C++).
+ if (U->isLValue())
+ state = state->BindExpr(U, loc);
+ else
+ state = state->BindExpr(U, V2);
// Perform the store.
- EvalStore(Dst, NULL, U, *I2, state, V1, Result);
+ evalStore(Dst, NULL, U, *I2, state, loc, Result);
}
}
}
-void GRExprEngine::VisitAsmStmt(const AsmStmt* A, ExplodedNode* Pred,
+void ExprEngine::VisitAsmStmt(const AsmStmt* A, ExplodedNode* Pred,
ExplodedNodeSet& Dst) {
VisitAsmStmtHelperOutputs(A, A->begin_outputs(), A->end_outputs(), Pred, Dst);
}
-void GRExprEngine::VisitAsmStmtHelperOutputs(const AsmStmt* A,
+void ExprEngine::VisitAsmStmtHelperOutputs(const AsmStmt* A,
AsmStmt::const_outputs_iterator I,
AsmStmt::const_outputs_iterator E,
ExplodedNode* Pred, ExplodedNodeSet& Dst) {
@@ -3208,15 +3151,14 @@ void GRExprEngine::VisitAsmStmtHelperOutputs(const AsmStmt* A,
}
ExplodedNodeSet Tmp;
- VisitLValue(*I, Pred, Tmp);
-
+ Visit(*I, Pred, Tmp);
++I;
for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end();NI != NE;++NI)
VisitAsmStmtHelperOutputs(A, I, E, *NI, Dst);
}
-void GRExprEngine::VisitAsmStmtHelperInputs(const AsmStmt* A,
+void ExprEngine::VisitAsmStmtHelperInputs(const AsmStmt* A,
AsmStmt::const_inputs_iterator I,
AsmStmt::const_inputs_iterator E,
ExplodedNode* Pred,
@@ -3255,18 +3197,17 @@ void GRExprEngine::VisitAsmStmtHelperInputs(const AsmStmt* A,
VisitAsmStmtHelperInputs(A, I, E, *NI, Dst);
}
-void GRExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
+void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
ExplodedNodeSet Src;
if (const Expr *RetE = RS->getRetValue()) {
// Record the returned expression in the state. It will be used in
- // ProcessCallExit to bind the return value to the call expr.
+ // processCallExit to bind the return value to the call expr.
{
- static int Tag = 0;
- SaveAndRestore<const void *> OldTag(Builder->Tag, &Tag);
+ static int tag = 0;
const GRState *state = GetState(Pred);
state = state->set<ReturnExpr>(RetE);
- Pred = Builder->generateNode(RetE, state, Pred);
+ Pred = Builder->generateNode(RetE, state, Pred, &tag);
}
// We may get a NULL Pred because we generated a cached node.
if (Pred)
@@ -3282,19 +3223,19 @@ void GRExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
I != E; ++I) {
- assert(Builder && "GRStmtNodeBuilder must be defined.");
+ assert(Builder && "StmtNodeBuilder must be defined.");
Pred = *I;
unsigned size = Dst.size();
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
- SaveOr OldHasGen(Builder->HasGeneratedNode);
+ SaveOr OldHasGen(Builder->hasGeneratedNode);
- getTF().EvalReturn(Dst, *this, *Builder, RS, Pred);
+ getTF().evalReturn(Dst, *this, *Builder, RS, Pred);
// Handle the case where no nodes where generated.
if (!Builder->BuildSinks && Dst.size() == size &&
- !Builder->HasGeneratedNode)
+ !Builder->hasGeneratedNode)
MakeNode(Dst, RS, Pred, GetState(Pred));
}
}
@@ -3303,25 +3244,14 @@ void GRExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
// Transfer functions: Binary operators.
//===----------------------------------------------------------------------===//
-void GRExprEngine::VisitBinaryOperator(const BinaryOperator* B,
+void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
ExplodedNode* Pred,
- ExplodedNodeSet& Dst, bool asLValue) {
-
+ ExplodedNodeSet& Dst) {
ExplodedNodeSet Tmp1;
Expr* LHS = B->getLHS()->IgnoreParens();
Expr* RHS = B->getRHS()->IgnoreParens();
- // FIXME: Add proper support for ObjCImplicitSetterGetterRefExpr.
- if (isa<ObjCImplicitSetterGetterRefExpr>(LHS)) {
- Visit(RHS, Pred, Dst);
- return;
- }
-
- if (B->isAssignmentOp())
- VisitLValue(LHS, Pred, Tmp1);
- else
- Visit(LHS, Pred, Tmp1);
-
+ Visit(LHS, Pred, Tmp1);
ExplodedNodeSet Tmp3;
for (ExplodedNodeSet::iterator I1=Tmp1.begin(), E1=Tmp1.end(); I1!=E1; ++I1) {
@@ -3338,7 +3268,6 @@ void GRExprEngine::VisitBinaryOperator(const BinaryOperator* B,
I2 != E2; ++I2) {
const GRState *state = GetState(*I2);
- const GRState *OldSt = state;
SVal RightV = state->getSVal(RHS);
BinaryOperator::Opcode Op = B->getOpcode();
@@ -3346,35 +3275,27 @@ void GRExprEngine::VisitBinaryOperator(const BinaryOperator* B,
if (Op == BO_Assign) {
// EXPERIMENTAL: "Conjured" symbols.
// FIXME: Handle structs.
- QualType T = RHS->getType();
-
if (RightV.isUnknown() ||!getConstraintManager().canReasonAbout(RightV))
{
unsigned Count = Builder->getCurrentBlockCount();
- RightV = ValMgr.getConjuredSymbolVal(NULL, B->getRHS(), Count);
+ RightV = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), Count);
}
- SVal ExprVal = asLValue ? LeftV : RightV;
+ SVal ExprVal = B->isLValue() ? LeftV : RightV;
// Simulate the effects of a "store": bind the value of the RHS
// to the L-Value represented by the LHS.
- EvalStore(Tmp3, B, LHS, *I2, state->BindExpr(B, ExprVal), LeftV,RightV);
+ evalStore(Tmp3, B, LHS, *I2, state->BindExpr(B, ExprVal), LeftV,RightV);
continue;
}
if (!B->isAssignmentOp()) {
// Process non-assignments except commas or short-circuited
// logical expressions (LAnd and LOr).
- SVal Result = EvalBinOp(state, Op, LeftV, RightV, B->getType());
+ SVal Result = evalBinOp(state, Op, LeftV, RightV, B->getType());
if (Result.isUnknown()) {
- if (OldSt != state) {
- // Generate a new node if we have already created a new state.
- MakeNode(Tmp3, B, *I2, state);
- }
- else
- Tmp3.Add(*I2);
-
+ MakeNode(Tmp3, B, *I2, state);
continue;
}
@@ -3405,7 +3326,7 @@ void GRExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// null dereferences, and so on.
ExplodedNodeSet Tmp4;
SVal location = state->getSVal(LHS);
- EvalLoad(Tmp4, LHS, *I2, state, location);
+ evalLoad(Tmp4, LHS, *I2, state, location);
for (ExplodedNodeSet::iterator I4=Tmp4.begin(), E4=Tmp4.end(); I4!=E4;
++I4) {
@@ -3422,13 +3343,12 @@ void GRExprEngine::VisitBinaryOperator(const BinaryOperator* B,
CLHSTy = getContext().getCanonicalType(CLHSTy);
QualType LTy = getContext().getCanonicalType(LHS->getType());
- QualType RTy = getContext().getCanonicalType(RHS->getType());
// Promote LHS.
- V = SVator.EvalCast(V, CLHSTy, LTy);
+ V = svalBuilder.evalCast(V, CLHSTy, LTy);
// Compute the result of the operation.
- SVal Result = SVator.EvalCast(EvalBinOp(state, Op, V, RightV, CTy),
+ SVal Result = svalBuilder.evalCast(evalBinOp(state, Op, V, RightV, CTy),
B->getType(), CTy);
// EXPERIMENTAL: "Conjured" symbols.
@@ -3444,19 +3364,25 @@ void GRExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// The symbolic value is actually for the type of the left-hand side
// expression, not the computation type, as this is the value the
// LValue on the LHS will bind to.
- LHSVal = ValMgr.getConjuredSymbolVal(NULL, B->getRHS(), LTy, Count);
+ LHSVal = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LTy, Count);
// However, we need to convert the symbol to the computation type.
- Result = SVator.EvalCast(LHSVal, CTy, LTy);
+ Result = svalBuilder.evalCast(LHSVal, CTy, LTy);
}
else {
// The left-hand side may bind to a different value then the
// computation type.
- LHSVal = SVator.EvalCast(Result, LTy, CTy);
+ LHSVal = svalBuilder.evalCast(Result, LTy, CTy);
}
- EvalStore(Tmp3, B, LHS, *I4, state->BindExpr(B, Result),
- location, LHSVal);
+ // In C++, assignment and compound assignment operators return an
+ // lvalue.
+ if (B->isLValue())
+ state = state->BindExpr(B, location);
+ else
+ state = state->BindExpr(B, Result);
+
+ evalStore(Tmp3, B, LHS, *I4, state, location, LHSVal);
}
}
}
@@ -3468,7 +3394,7 @@ void GRExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// Checker registration/lookup.
//===----------------------------------------------------------------------===//
-Checker *GRExprEngine::lookupChecker(void *tag) const {
+Checker *ExprEngine::lookupChecker(void *tag) const {
CheckerMap::const_iterator I = CheckerM.find(tag);
return (I == CheckerM.end()) ? NULL : Checkers[I->second].second;
}
@@ -3478,7 +3404,7 @@ Checker *GRExprEngine::lookupChecker(void *tag) const {
//===----------------------------------------------------------------------===//
#ifndef NDEBUG
-static GRExprEngine* GraphPrintCheckerState;
+static ExprEngine* GraphPrintCheckerState;
static SourceManager* GraphPrintSourceManager;
namespace llvm {
@@ -3488,7 +3414,7 @@ struct DOTGraphTraits<ExplodedNode*> :
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
- // FIXME: Since we do not cache error nodes in GRExprEngine now, this does not
+ // FIXME: Since we do not cache error nodes in ExprEngine now, this does not
// work.
static std::string getNodeAttributes(const ExplodedNode* N, void*) {
@@ -3654,11 +3580,10 @@ struct DOTGraphTraits<ExplodedNode*> :
}
}
- Out << "\\|StateID: " << (void*) N->getState() << "\\|";
-
const GRState *state = N->getState();
+ Out << "\\|StateID: " << (void*) state
+ << " NodeID: " << (void*) N << "\\|";
state->printDOT(Out, *N->getLocationContext()->getCFG());
-
Out << "\\l";
return Out.str();
}
@@ -3677,7 +3602,7 @@ GetGraphNode<llvm::DenseMap<ExplodedNode*, Expr*>::iterator>
}
#endif
-void GRExprEngine::ViewGraph(bool trim) {
+void ExprEngine::ViewGraph(bool trim) {
#ifndef NDEBUG
if (trim) {
std::vector<ExplodedNode*> Src;
@@ -3693,7 +3618,7 @@ void GRExprEngine::ViewGraph(bool trim) {
I2!=E2; ++I2) {
const BugReportEquivClass& EQ = *I2;
const BugReport &R = **EQ.begin();
- ExplodedNode *N = const_cast<ExplodedNode*>(R.getEndNode());
+ ExplodedNode *N = const_cast<ExplodedNode*>(R.getErrorNode());
if (N) Src.push_back(N);
}
}
@@ -3704,7 +3629,7 @@ void GRExprEngine::ViewGraph(bool trim) {
GraphPrintCheckerState = this;
GraphPrintSourceManager = &getContext().getSourceManager();
- llvm::ViewGraph(*G.roots_begin(), "GRExprEngine");
+ llvm::ViewGraph(*G.roots_begin(), "ExprEngine");
GraphPrintCheckerState = NULL;
GraphPrintSourceManager = NULL;
@@ -3712,7 +3637,7 @@ void GRExprEngine::ViewGraph(bool trim) {
#endif
}
-void GRExprEngine::ViewGraph(ExplodedNode** Beg, ExplodedNode** End) {
+void ExprEngine::ViewGraph(ExplodedNode** Beg, ExplodedNode** End) {
#ifndef NDEBUG
GraphPrintCheckerState = this;
GraphPrintSourceManager = &getContext().getSourceManager();
@@ -3722,7 +3647,7 @@ void GRExprEngine::ViewGraph(ExplodedNode** Beg, ExplodedNode** End) {
if (!TrimmedG.get())
llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
else
- llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedGRExprEngine");
+ llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedExprEngine");
GraphPrintCheckerState = NULL;
GraphPrintSourceManager = NULL;
diff --git a/contrib/llvm/tools/clang/lib/Checker/FixedAddressChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index 29a3c3a..fe628a2 100644
--- a/contrib/llvm/tools/clang/lib/Checker/FixedAddressChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -13,11 +13,13 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class FixedAddressChecker
@@ -54,7 +56,7 @@ void FixedAddressChecker::PreVisitBinaryOperator(CheckerContext &C,
if (!RV.isConstant() || RV.isZeroConstant())
return;
- if (ExplodedNode *N = C.GenerateNode()) {
+ if (ExplodedNode *N = C.generateNode()) {
if (!BT)
BT = new BuiltinBug("Use fixed address",
"Using a fixed address is not portable because that "
@@ -66,6 +68,10 @@ void FixedAddressChecker::PreVisitBinaryOperator(CheckerContext &C,
}
}
-void clang::RegisterFixedAddressChecker(GRExprEngine &Eng) {
+static void RegisterFixedAddressChecker(ExprEngine &Eng) {
Eng.registerCheck(new FixedAddressChecker());
}
+
+void ento::registerFixedAddressChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterFixedAddressChecker);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
index 6411c79..f49b125 100644
--- a/contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
@@ -42,65 +42,89 @@
// - Finer grained false positive control (levels)
// - Handling ~0 values
-#include "GRExprEngineExperimentalChecks.h"
+#include "ClangSACheckers.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerHelpers.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRCoreEngine.h"
-#include "clang/Checker/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/AST/Stmt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/Support/ErrorHandling.h"
#include <deque>
using namespace clang;
+using namespace ento;
namespace {
class IdempotentOperationChecker
: public CheckerVisitor<IdempotentOperationChecker> {
+public:
+ static void *getTag();
+ void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
+ void PostVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
+ void VisitEndAnalysis(ExplodedGraph &G, BugReporter &B, ExprEngine &Eng);
+
+private:
+ // Our assumption about a particular operation.
+ enum Assumption { Possible = 0, Impossible, Equal, LHSis1, RHSis1, LHSis0,
+ RHSis0 };
+
+ void UpdateAssumption(Assumption &A, const Assumption &New);
+
+ // False positive reduction methods
+ static bool isSelfAssign(const Expr *LHS, const Expr *RHS);
+ static bool isUnused(const Expr *E, AnalysisContext *AC);
+ static bool isTruncationExtensionAssignment(const Expr *LHS,
+ const Expr *RHS);
+ bool pathWasCompletelyAnalyzed(const CFG *cfg,
+ const CFGBlock *CB,
+ const CFGStmtMap *CBM,
+ const CoreEngine &CE);
+ static bool CanVary(const Expr *Ex,
+ AnalysisContext *AC);
+ static bool isConstantOrPseudoConstant(const DeclRefExpr *DR,
+ AnalysisContext *AC);
+ static bool containsNonLocalVarDecl(const Stmt *S);
+
+ // Hash table and related data structures
+ struct BinaryOperatorData {
+ BinaryOperatorData() : assumption(Possible), analysisContext(0) {}
+
+ Assumption assumption;
+ AnalysisContext *analysisContext;
+ ExplodedNodeSet explodedNodes; // Set of ExplodedNodes that refer to a
+ // BinaryOperator
+ };
+ typedef llvm::DenseMap<const BinaryOperator *, BinaryOperatorData>
+ AssumptionMap;
+ AssumptionMap hash;
+
+ // A class that performs reachability queries for CFGBlocks. Several internal
+ // checks in this checker require reachability information. The requests all
+ // tend to have a common destination, so we lazily do a predecessor search
+ // from the destination node and cache the results to prevent work
+ // duplication.
+ class CFGReachabilityAnalysis {
+ typedef llvm::BitVector ReachableSet;
+ typedef llvm::DenseMap<unsigned, ReachableSet> ReachableMap;
+ ReachableSet analyzed;
+ ReachableMap reachable;
public:
- static void *getTag();
- void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
- void PostVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
- void VisitEndAnalysis(ExplodedGraph &G, BugReporter &B, GRExprEngine &Eng);
-
+ CFGReachabilityAnalysis(const CFG &cfg)
+ : analyzed(cfg.getNumBlockIDs(), false) {}
+
+ inline bool isReachable(const CFGBlock *Src, const CFGBlock *Dst);
private:
- // Our assumption about a particular operation.
- enum Assumption { Possible = 0, Impossible, Equal, LHSis1, RHSis1, LHSis0,
- RHSis0 };
-
- void UpdateAssumption(Assumption &A, const Assumption &New);
-
- // False positive reduction methods
- static bool isSelfAssign(const Expr *LHS, const Expr *RHS);
- static bool isUnused(const Expr *E, AnalysisContext *AC);
- //static bool isTruncationExtensionAssignment(const Expr *LHS,
- // const Expr *RHS);
- static bool PathWasCompletelyAnalyzed(const CFG *C,
- const CFGBlock *CB,
- const GRCoreEngine &CE);
- static bool CanVary(const Expr *Ex,
- AnalysisContext *AC);
- static bool isConstantOrPseudoConstant(const DeclRefExpr *DR,
- AnalysisContext *AC);
- static bool containsNonLocalVarDecl(const Stmt *S);
-
- // Hash table and related data structures
- struct BinaryOperatorData {
- BinaryOperatorData() : assumption(Possible), analysisContext(0) {}
-
- Assumption assumption;
- AnalysisContext *analysisContext;
- ExplodedNodeSet explodedNodes; // Set of ExplodedNodes that refer to a
- // BinaryOperator
- };
- typedef llvm::DenseMap<const BinaryOperator *, BinaryOperatorData>
- AssumptionMap;
- AssumptionMap hash;
+ void MapReachability(const CFGBlock *Dst);
+ };
+ llvm::OwningPtr<CFGReachabilityAnalysis> CRA;
};
}
@@ -109,10 +133,14 @@ void *IdempotentOperationChecker::getTag() {
return &x;
}
-void clang::RegisterIdempotentOperationChecker(GRExprEngine &Eng) {
+static void RegisterIdempotentOperationChecker(ExprEngine &Eng) {
Eng.registerCheck(new IdempotentOperationChecker());
}
+void ento::registerIdempotentOperationChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterIdempotentOperationChecker);
+}
+
void IdempotentOperationChecker::PreVisitBinaryOperator(
CheckerContext &C,
const BinaryOperator *B) {
@@ -181,7 +209,7 @@ void IdempotentOperationChecker::PreVisitBinaryOperator(
A = Impossible;
return;
}
- LHSVal = state->getSVal(cast<Loc>(LHSVal));
+ LHSVal = state->getSVal(cast<Loc>(LHSVal), LHS->getType());
}
@@ -196,9 +224,10 @@ void IdempotentOperationChecker::PreVisitBinaryOperator(
case BO_Assign:
// x Assign x can be used to silence unused variable warnings intentionally.
// If this is a self assignment and the variable is referenced elsewhere,
- // then it is a false positive.
+ // and the assignment is not a truncation or extension, then it is a false
+ // positive.
if (isSelfAssign(LHS, RHS)) {
- if (!isUnused(LHS, AC)) {
+ if (!isUnused(LHS, AC) && !isTruncationExtensionAssignment(LHS, RHS)) {
UpdateAssumption(A, Equal);
return;
}
@@ -335,12 +364,21 @@ void IdempotentOperationChecker::PostVisitBinaryOperator(
const BinaryOperator *B) {
// Add the ExplodedNode we just visited
BinaryOperatorData &Data = hash[B];
+
+ const Stmt *predStmt
+ = cast<StmtPoint>(C.getPredecessor()->getLocation()).getStmt();
+
+ // Ignore implicit calls to setters.
+ if (isa<ObjCPropertyRefExpr>(predStmt))
+ return;
+
+ assert(isa<BinaryOperator>(predStmt));
Data.explodedNodes.Add(C.getPredecessor());
}
void IdempotentOperationChecker::VisitEndAnalysis(ExplodedGraph &G,
BugReporter &BR,
- GRExprEngine &Eng) {
+ ExprEngine &Eng) {
BugType *BT = new BugType("Idempotent operation", "Dead code");
// Iterate over the hash to see if we have any paths with definite
// idempotent operations.
@@ -363,8 +401,8 @@ void IdempotentOperationChecker::VisitEndAnalysis(ExplodedGraph &G,
&AC->getParentMap());
// If we can trace back
- if (!PathWasCompletelyAnalyzed(AC->getCFG(),
- CBM->getBlock(B),
+ if (!pathWasCompletelyAnalyzed(AC->getCFG(),
+ CBM->getBlock(B), CBM,
Eng.getCoreEngine()))
continue;
@@ -500,7 +538,6 @@ bool IdempotentOperationChecker::isUnused(const Expr *E,
return true;
}
-#if 0
// Check for self casts truncating/extending a variable
bool IdempotentOperationChecker::isTruncationExtensionAssignment(
const Expr *LHS,
@@ -521,59 +558,78 @@ bool IdempotentOperationChecker::isTruncationExtensionAssignment(
if (VD != RHS_DR->getDecl())
return false;
- return dyn_cast<DeclRefExpr>(RHS->IgnoreParens()) == NULL;
+ return dyn_cast<DeclRefExpr>(RHS->IgnoreParenLValueCasts()) == NULL;
}
-#endif
// Returns false if a path to this block was not completely analyzed, or true
// otherwise.
-bool IdempotentOperationChecker::PathWasCompletelyAnalyzed(
- const CFG *C,
- const CFGBlock *CB,
- const GRCoreEngine &CE) {
- std::deque<const CFGBlock *> WorkList;
- llvm::SmallSet<unsigned, 8> Aborted;
- llvm::SmallSet<unsigned, 128> Visited;
-
- // Create a set of all aborted blocks
- typedef GRCoreEngine::BlocksAborted::const_iterator AbortedIterator;
+bool
+IdempotentOperationChecker::pathWasCompletelyAnalyzed(const CFG *cfg,
+ const CFGBlock *CB,
+ const CFGStmtMap *CBM,
+ const CoreEngine &CE) {
+
+ if (!CRA.get())
+ CRA.reset(new CFGReachabilityAnalysis(*cfg));
+
+ // Test for reachability from any aborted blocks to this block
+ typedef CoreEngine::BlocksAborted::const_iterator AbortedIterator;
for (AbortedIterator I = CE.blocks_aborted_begin(),
E = CE.blocks_aborted_end(); I != E; ++I) {
const BlockEdge &BE = I->first;
// The destination block on the BlockEdge is the first block that was not
- // analyzed.
- Aborted.insert(BE.getDst()->getBlockID());
- }
-
- // Save the entry block ID for early exiting
- unsigned EntryBlockID = C->getEntry().getBlockID();
-
- // Create initial node
- WorkList.push_back(CB);
-
- while (!WorkList.empty()) {
- const CFGBlock *Head = WorkList.front();
- WorkList.pop_front();
- Visited.insert(Head->getBlockID());
-
- // If we found the entry block, then there exists a path from the target
- // node to the entry point of this function -> the path was completely
- // analyzed.
- if (Head->getBlockID() == EntryBlockID)
- return true;
-
- // If any of the aborted blocks are on the path to the beginning, then all
- // paths to this block were not analyzed.
- if (Aborted.count(Head->getBlockID()))
+ // analyzed. If we can reach this block from the aborted block, then this
+ // block was not completely analyzed.
+ //
+ // Also explicitly check if the current block is the destination block.
+ // While technically reachable, it means we aborted the analysis on
+ // a path that included that block.
+ const CFGBlock *destBlock = BE.getDst();
+ if (destBlock == CB || CRA->isReachable(destBlock, CB))
return false;
-
- // Add the predecessors to the worklist unless we have already visited them
- for (CFGBlock::const_pred_iterator I = Head->pred_begin();
- I != Head->pred_end(); ++I)
- if (!Visited.count((*I)->getBlockID()))
- WorkList.push_back(*I);
}
+
+ // For the items still on the worklist, see if they are in blocks that
+ // can eventually reach 'CB'.
+ class VisitWL : public WorkList::Visitor {
+ const CFGStmtMap *CBM;
+ const CFGBlock *TargetBlock;
+ CFGReachabilityAnalysis &CRA;
+ public:
+ VisitWL(const CFGStmtMap *cbm, const CFGBlock *targetBlock,
+ CFGReachabilityAnalysis &cra)
+ : CBM(cbm), TargetBlock(targetBlock), CRA(cra) {}
+ virtual bool visit(const WorkListUnit &U) {
+ ProgramPoint P = U.getNode()->getLocation();
+ const CFGBlock *B = 0;
+ if (StmtPoint *SP = dyn_cast<StmtPoint>(&P)) {
+ B = CBM->getBlock(SP->getStmt());
+ }
+ else if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ B = BE->getDst();
+ }
+ else if (BlockEntrance *BEnt = dyn_cast<BlockEntrance>(&P)) {
+ B = BEnt->getBlock();
+ }
+ else if (BlockExit *BExit = dyn_cast<BlockExit>(&P)) {
+ B = BExit->getBlock();
+ }
+ if (!B)
+ return true;
+
+ return CRA.isReachable(B, TargetBlock);
+ }
+ };
+ VisitWL visitWL(CBM, CB, *CRA.get());
+ // Were there any items in the worklist that could potentially reach
+ // this block?
+ if (CE.getWorkList()->visitItemsInWorkList(visitWL))
+ return false;
+
+ // Verify that this block is reachable from the entry block
+ if (!CRA->isReachable(&cfg->getEntry(), CB))
+ return false;
// If we get to this point, there is no connection to the entry block or an
// aborted block. This path is unreachable and we can report the error.
@@ -614,7 +670,7 @@ bool IdempotentOperationChecker::CanVary(const Expr *Ex,
case Stmt::OffsetOfExprClass:
case Stmt::CompoundLiteralExprClass:
case Stmt::AddrLabelExprClass:
- case Stmt::TypesCompatibleExprClass:
+ case Stmt::BinaryTypeTraitExprClass:
case Stmt::GNUNullExprClass:
case Stmt::InitListExprClass:
case Stmt::DesignatedInitExprClass:
@@ -636,6 +692,13 @@ bool IdempotentOperationChecker::CanVary(const Expr *Ex,
// The next cases require recursion for subexpressions
case Stmt::BinaryOperatorClass: {
const BinaryOperator *B = cast<const BinaryOperator>(Ex);
+
+ // Exclude cases involving pointer arithmetic. These are usually
+ // false positives.
+ if (B->getOpcode() == BO_Sub || B->getOpcode() == BO_Add)
+ if (B->getLHS()->getType()->getAs<PointerType>())
+ return false;
+
return CanVary(B->getRHS(), AC)
|| CanVary(B->getLHS(), AC);
}
@@ -653,7 +716,8 @@ bool IdempotentOperationChecker::CanVary(const Expr *Ex,
return CanVary(cast<const ChooseExpr>(Ex)->getChosenSubExpr(
AC->getASTContext()), AC);
case Stmt::ConditionalOperatorClass:
- return CanVary(cast<const ConditionalOperator>(Ex)->getCond(), AC);
+ case Stmt::BinaryConditionalOperatorClass:
+ return CanVary(cast<AbstractConditionalOperator>(Ex)->getCond(), AC);
}
}
@@ -701,3 +765,58 @@ bool IdempotentOperationChecker::containsNonLocalVarDecl(const Stmt *S) {
return false;
}
+
+bool IdempotentOperationChecker::CFGReachabilityAnalysis::isReachable(
+ const CFGBlock *Src,
+ const CFGBlock *Dst) {
+ const unsigned DstBlockID = Dst->getBlockID();
+
+ // If we haven't analyzed the destination node, run the analysis now
+ if (!analyzed[DstBlockID]) {
+ MapReachability(Dst);
+ analyzed[DstBlockID] = true;
+ }
+
+ // Return the cached result
+ return reachable[DstBlockID][Src->getBlockID()];
+}
+
+// Maps reachability to a common node by walking the predecessors of the
+// destination node.
+void IdempotentOperationChecker::CFGReachabilityAnalysis::MapReachability(
+ const CFGBlock *Dst) {
+
+ llvm::SmallVector<const CFGBlock *, 11> worklist;
+ llvm::BitVector visited(analyzed.size());
+
+ ReachableSet &DstReachability = reachable[Dst->getBlockID()];
+ DstReachability.resize(analyzed.size(), false);
+
+ // Start searching from the destination node, since we commonly will perform
+ // multiple queries relating to a destination node.
+ worklist.push_back(Dst);
+ bool firstRun = true;
+
+ while (!worklist.empty()) {
+ const CFGBlock *block = worklist.back();
+ worklist.pop_back();
+
+ if (visited[block->getBlockID()])
+ continue;
+ visited[block->getBlockID()] = true;
+
+ // Update reachability information for this node -> Dst
+ if (!firstRun) {
+ // Don't insert Dst -> Dst unless it was a predecessor of itself
+ DstReachability[block->getBlockID()] = true;
+ }
+ else
+ firstRun = false;
+
+ // Add the predecessors to the worklist.
+ for (CFGBlock::const_pred_iterator i = block->pred_begin(),
+ e = block->pred_end(); i != e; ++i) {
+ worklist.push_back(*i);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InternalChecks.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InternalChecks.h
new file mode 100644
index 0000000..e855386
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InternalChecks.h
@@ -0,0 +1,51 @@
+//=-- InternalChecks.h- Builtin ExprEngine Checks -------------------*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions to instantiate and register the "built-in"
+// checks in ExprEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_ExprEngine_INTERNAL_CHECKS
+#define LLVM_CLANG_GR_ExprEngine_INTERNAL_CHECKS
+
+namespace clang {
+
+namespace ento {
+
+class ExprEngine;
+
+// Foundational checks that handle basic semantics.
+void RegisterAdjustedReturnValueChecker(ExprEngine &Eng);
+void RegisterArrayBoundChecker(ExprEngine &Eng);
+void RegisterArrayBoundCheckerV2(ExprEngine &Eng);
+void RegisterAttrNonNullChecker(ExprEngine &Eng);
+void RegisterBuiltinFunctionChecker(ExprEngine &Eng);
+void RegisterCallAndMessageChecker(ExprEngine &Eng);
+void RegisterCastSizeChecker(ExprEngine &Eng);
+void RegisterDereferenceChecker(ExprEngine &Eng);
+void RegisterDivZeroChecker(ExprEngine &Eng);
+void RegisterNoReturnFunctionChecker(ExprEngine &Eng);
+void RegisterReturnPointerRangeChecker(ExprEngine &Eng);
+void RegisterReturnUndefChecker(ExprEngine &Eng);
+void RegisterUndefBranchChecker(ExprEngine &Eng);
+void RegisterUndefCapturedBlockVarChecker(ExprEngine &Eng);
+void RegisterUndefResultChecker(ExprEngine &Eng);
+void RegisterUndefinedArraySubscriptChecker(ExprEngine &Eng);
+void RegisterUndefinedAssignmentChecker(ExprEngine &Eng);
+void RegisterVLASizeChecker(ExprEngine &Eng);
+
+// API checks.
+void RegisterOSAtomicChecker(ExprEngine &Eng);
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index 2f87da1..9e3adc8 100644
--- a/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -12,14 +12,17 @@
//
//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerV2.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/Checker/Checkers/LocalCheckers.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
#include <string>
#include "llvm/ADT/StringRef.h"
using namespace clang;
+using namespace ento;
//===----------------------------------------------------------------------===//
// Generic type checking routines.
@@ -37,15 +40,13 @@ static bool IsLLVMStringRef(QualType T) {
/// Check whether the declaration is semantically inside the top-level
/// namespace named by ns.
static bool InNamespace(const Decl *D, llvm::StringRef NS) {
- const DeclContext *DC = D->getDeclContext();
const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
if (!ND)
return false;
const IdentifierInfo *II = ND->getIdentifier();
if (!II || !II->getName().equals(NS))
return false;
- DC = ND->getDeclContext();
- return isa<TranslationUnitDecl>(DC);
+ return isa<TranslationUnitDecl>(ND->getDeclContext());
}
static bool IsStdString(QualType T) {
@@ -153,7 +154,7 @@ void StringRefCheckerVisitor::VisitVarDecl(VarDecl *VD) {
// llvm::StringRef x = call() (where call returns std::string)
if (!IsLLVMStringRef(VD->getType()))
return;
- CXXExprWithTemporaries *Ex1 = dyn_cast<CXXExprWithTemporaries>(Init);
+ ExprWithCleanups *Ex1 = dyn_cast<ExprWithCleanups>(Init);
if (!Ex1)
return;
CXXConstructExpr *Ex2 = dyn_cast<CXXConstructExpr>(Ex1->getSubExpr());
@@ -211,10 +212,10 @@ static bool IsPartOfAST(const CXXRecordDecl *R) {
namespace {
class ASTFieldVisitor {
llvm::SmallVector<FieldDecl*, 10> FieldChain;
- CXXRecordDecl *Root;
+ const CXXRecordDecl *Root;
BugReporter &BR;
public:
- ASTFieldVisitor(CXXRecordDecl *root, BugReporter &br)
+ ASTFieldVisitor(const CXXRecordDecl *root, BugReporter &br)
: Root(root), BR(br) {}
void Visit(FieldDecl *D);
@@ -222,7 +223,7 @@ public:
};
} // end anonymous namespace
-static void CheckASTMemory(CXXRecordDecl *R, BugReporter &BR) {
+static void CheckASTMemory(const CXXRecordDecl *R, BugReporter &BR) {
if (!IsPartOfAST(R))
return;
@@ -284,29 +285,27 @@ void ASTFieldVisitor::ReportError(QualType T) {
}
//===----------------------------------------------------------------------===//
-// Entry point for all checks.
+// LLVMConventionsChecker
//===----------------------------------------------------------------------===//
-static void ScanCodeDecls(DeclContext *DC, BugReporter &BR) {
- for (DeclContext::decl_iterator I=DC->decls_begin(), E=DC->decls_end();
- I!=E ; ++I) {
-
- Decl *D = *I;
-
- if (D->hasBody())
- CheckStringRefAssignedTemporary(D, BR);
-
- if (CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(D))
- if (R->isDefinition())
- CheckASTMemory(R, BR);
+namespace {
+class LLVMConventionsChecker : public CheckerV2<
+ check::ASTDecl<CXXRecordDecl>,
+ check::ASTCodeBody > {
+public:
+ void checkASTDecl(const CXXRecordDecl *R, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (R->isDefinition())
+ CheckASTMemory(R, BR);
+ }
- if (DeclContext *DC_child = dyn_cast<DeclContext>(D))
- ScanCodeDecls(DC_child, BR);
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CheckStringRefAssignedTemporary(D, BR);
}
+};
}
-void clang::CheckLLVMConventions(TranslationUnitDecl &TU,
- BugReporter &BR) {
- ScanCodeDecls(&TU, BR);
+void ento::registerLLVMConventionsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<LLVMConventionsChecker>();
}
-
diff --git a/contrib/llvm/tools/clang/lib/Checker/MacOSXAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index bcd96e7..358be12 100644
--- a/contrib/llvm/tools/clang/lib/Checker/MacOSXAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -15,16 +15,18 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
+#include "ClangSACheckers.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
namespace {
class MacOSXAPIChecker : public CheckerVisitor<MacOSXAPIChecker> {
@@ -44,9 +46,12 @@ public:
};
} //end anonymous namespace
-void clang::RegisterMacOSXAPIChecker(GRExprEngine &Eng) {
- if (Eng.getContext().Target.getTriple().getVendor() == llvm::Triple::Apple)
- Eng.registerCheck(new MacOSXAPIChecker());
+static void RegisterMacOSXAPIChecker(ExprEngine &Eng) {
+ Eng.registerCheck(new MacOSXAPIChecker());
+}
+
+void ento::registerMacOSXAPIChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterMacOSXAPIChecker);
}
//===----------------------------------------------------------------------===//
@@ -73,7 +78,7 @@ static void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
- ExplodedNode *N = C.GenerateSink(state);
+ ExplodedNode *N = C.generateSink(state);
if (!N)
return;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Makefile b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Makefile
new file mode 100644
index 0000000..97f4642
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Makefile
@@ -0,0 +1,24 @@
+##===- clang/lib/Checker/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements analyses built on top of source-level CFGs.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../../..
+LIBRARYNAME := clangStaticAnalyzerCheckers
+
+BUILT_SOURCES = Checkers.inc
+TABLEGEN_INC_FILES_COMMON = 1
+
+include $(CLANG_LEVEL)/Makefile
+
+$(ObjDir)/Checkers.inc.tmp : Checkers.td $(PROJ_SRC_DIR)/$(CLANG_LEVEL)/include/clang/StaticAnalyzer/Checkers/CheckerBase.td $(TBLGEN) $(ObjDir)/.dir
+ $(Echo) "Building Clang SA Checkers tables with tblgen"
+ $(Verb) $(TableGen) -gen-clang-sa-checkers -I $(PROJ_SRC_DIR)/$(CLANG_LEVEL)/include -o $(call SYSPATH, $@) $<
diff --git a/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index c9b6d75..9d3a887 100644
--- a/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -12,14 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineExperimentalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
-#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "ExperimentalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
+using namespace ento;
namespace {
@@ -75,13 +76,13 @@ public:
BT_BadFree(0),
II_malloc(0), II_free(0), II_realloc(0), II_calloc(0) {}
static void *getTag();
- bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
- void EvalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper);
- void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng);
+ bool evalCallExpr(CheckerContext &C, const CallExpr *CE);
+ void evalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper);
+ void evalEndPath(EndOfFunctionNodeBuilder &B, void *tag, ExprEngine &Eng);
void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S);
- const GRState *EvalAssume(const GRState *state, SVal Cond, bool Assumption,
+ const GRState *evalAssume(const GRState *state, SVal Cond, bool Assumption,
bool *respondsToCallback);
- void VisitLocation(CheckerContext &C, const Stmt *S, SVal l);
+ void visitLocation(CheckerContext &C, const Stmt *S, SVal l, bool isLoad);
virtual void PreVisitBind(CheckerContext &C, const Stmt *StoreE,
SVal location, SVal val);
@@ -116,14 +117,16 @@ private:
typedef llvm::ImmutableMap<SymbolRef, RefState> RegionStateTy;
namespace clang {
+namespace ento {
template <>
struct GRStateTrait<RegionState>
: public GRStatePartialTrait<RegionStateTy> {
static void *GDMIndex() { return MallocChecker::getTag(); }
};
}
+}
-void clang::RegisterMallocChecker(GRExprEngine &Eng) {
+void ento::RegisterMallocChecker(ExprEngine &Eng) {
Eng.registerCheck(new MallocChecker());
}
@@ -132,7 +135,7 @@ void *MallocChecker::getTag() {
return &x;
}
-bool MallocChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+bool MallocChecker::evalCallExpr(CheckerContext &C, const CallExpr *CE) {
const GRState *state = C.getState();
const Expr *Callee = CE->getCallee();
SVal L = state->getSVal(Callee);
@@ -227,27 +230,28 @@ const GRState *MallocChecker::MallocMemAux(CheckerContext &C,
SVal Size, SVal Init,
const GRState *state) {
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
- ValueManager &ValMgr = C.getValueManager();
+ SValBuilder &svalBuilder = C.getSValBuilder();
// Set the return value.
- SVal RetVal = ValMgr.getConjuredSymbolVal(NULL, CE, CE->getType(), Count);
- state = state->BindExpr(CE, RetVal);
+ SVal retVal = svalBuilder.getConjuredSymbolVal(NULL, CE, CE->getType(), Count);
+ state = state->BindExpr(CE, retVal);
// Fill the region with the initialization value.
- state = state->bindDefault(RetVal, Init);
+ state = state->bindDefault(retVal, Init);
// Set the region's extent equal to the Size parameter.
- const SymbolicRegion *R = cast<SymbolicRegion>(RetVal.getAsRegion());
- DefinedOrUnknownSVal Extent = R->getExtent(ValMgr);
+ const SymbolicRegion *R = cast<SymbolicRegion>(retVal.getAsRegion());
+ DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
DefinedOrUnknownSVal DefinedSize = cast<DefinedOrUnknownSVal>(Size);
+ DefinedOrUnknownSVal extentMatchesSize =
+ svalBuilder.evalEQ(state, Extent, DefinedSize);
- SValuator &SVator = ValMgr.getSValuator();
- DefinedOrUnknownSVal ExtentMatchesSize =
- SVator.EvalEQ(state, Extent, DefinedSize);
- state = state->Assume(ExtentMatchesSize, true);
-
- SymbolRef Sym = RetVal.getAsLocSymbol();
+ state = state->assume(extentMatchesSize, true);
+ assert(state);
+
+ SymbolRef Sym = retVal.getAsLocSymbol();
assert(Sym);
+
// Set the symbol's state to Allocated.
return state->set<RegionState>(Sym, RefState::getAllocateUnchecked(CE));
}
@@ -288,7 +292,7 @@ const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
// FIXME: Technically using 'Assume' here can result in a path
// bifurcation. In such cases we need to return two states, not just one.
const GRState *notNullState, *nullState;
- llvm::tie(notNullState, nullState) = state->Assume(location);
+ llvm::tie(notNullState, nullState) = state->assume(location);
// The explicit NULL case, no operation is performed.
if (nullState && !notNullState)
@@ -351,7 +355,7 @@ const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
// Check double free.
if (RS->isReleased()) {
- if (ExplodedNode *N = C.GenerateSink()) {
+ if (ExplodedNode *N = C.generateSink()) {
if (!BT_DoubleFree)
BT_DoubleFree
= new BuiltinBug("Double free",
@@ -376,9 +380,7 @@ bool MallocChecker::SummarizeValue(llvm::raw_ostream& os, SVal V) {
else if (loc::ConcreteInt *ConstAddr = dyn_cast<loc::ConcreteInt>(&V))
os << "a constant address (" << ConstAddr->getValue() << ")";
else if (loc::GotoLabel *Label = dyn_cast<loc::GotoLabel>(&V))
- os << "the address of the label '"
- << Label->getLabel()->getID()->getName()
- << "'";
+ os << "the address of the label '" << Label->getLabel()->getName() << "'";
else
return false;
@@ -462,7 +464,7 @@ bool MallocChecker::SummarizeRegion(llvm::raw_ostream& os,
void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
SourceRange range) {
- if (ExplodedNode *N = C.GenerateSink()) {
+ if (ExplodedNode *N = C.generateSink()) {
if (!BT_BadFree)
BT_BadFree = new BuiltinBug("Bad free");
@@ -500,21 +502,22 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) {
const GRState *state = C.getState();
- const Expr *Arg0 = CE->getArg(0);
- DefinedOrUnknownSVal Arg0Val=cast<DefinedOrUnknownSVal>(state->getSVal(Arg0));
+ const Expr *arg0Expr = CE->getArg(0);
+ DefinedOrUnknownSVal arg0Val
+ = cast<DefinedOrUnknownSVal>(state->getSVal(arg0Expr));
- ValueManager &ValMgr = C.getValueManager();
- SValuator &SVator = C.getSValuator();
+ SValBuilder &svalBuilder = C.getSValBuilder();
- DefinedOrUnknownSVal PtrEQ = SVator.EvalEQ(state, Arg0Val, ValMgr.makeNull());
+ DefinedOrUnknownSVal PtrEQ =
+ svalBuilder.evalEQ(state, arg0Val, svalBuilder.makeNull());
// If the ptr is NULL, the call is equivalent to malloc(size).
- if (const GRState *stateEqual = state->Assume(PtrEQ, true)) {
+ if (const GRState *stateEqual = state->assume(PtrEQ, true)) {
// Hack: set the NULL symbolic region to released to suppress false warning.
// In the future we should add more states for allocated regions, e.g.,
// CheckedNull, CheckedNonNull.
- SymbolRef Sym = Arg0Val.getAsLocSymbol();
+ SymbolRef Sym = arg0Val.getAsLocSymbol();
if (Sym)
stateEqual = stateEqual->set<RegionState>(Sym, RefState::getReleased(CE));
@@ -523,49 +526,44 @@ void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) {
C.addTransition(stateMalloc);
}
- if (const GRState *stateNotEqual = state->Assume(PtrEQ, false)) {
+ if (const GRState *stateNotEqual = state->assume(PtrEQ, false)) {
const Expr *Arg1 = CE->getArg(1);
DefinedOrUnknownSVal Arg1Val =
cast<DefinedOrUnknownSVal>(stateNotEqual->getSVal(Arg1));
- DefinedOrUnknownSVal SizeZero = SVator.EvalEQ(stateNotEqual, Arg1Val,
- ValMgr.makeIntValWithPtrWidth(0, false));
+ DefinedOrUnknownSVal SizeZero =
+ svalBuilder.evalEQ(stateNotEqual, Arg1Val,
+ svalBuilder.makeIntValWithPtrWidth(0, false));
- if (const GRState *stateSizeZero = stateNotEqual->Assume(SizeZero, true)) {
- const GRState *stateFree = FreeMemAux(C, CE, stateSizeZero, 0, false);
- if (stateFree)
+ if (const GRState *stateSizeZero = stateNotEqual->assume(SizeZero, true))
+ if (const GRState *stateFree = FreeMemAux(C, CE, stateSizeZero, 0, false))
C.addTransition(stateFree->BindExpr(CE, UndefinedVal(), true));
- }
- if (const GRState *stateSizeNotZero=stateNotEqual->Assume(SizeZero,false)) {
- const GRState *stateFree = FreeMemAux(C, CE, stateSizeNotZero, 0, false);
- if (stateFree) {
+ if (const GRState *stateSizeNotZero = stateNotEqual->assume(SizeZero,false))
+ if (const GRState *stateFree = FreeMemAux(C, CE, stateSizeNotZero,
+ 0, false)) {
// FIXME: We should copy the content of the original buffer.
const GRState *stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
UnknownVal(), stateFree);
C.addTransition(stateRealloc);
}
- }
}
}
void MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE) {
const GRState *state = C.getState();
-
- ValueManager &ValMgr = C.getValueManager();
- SValuator &SVator = C.getSValuator();
+ SValBuilder &svalBuilder = C.getSValBuilder();
- SVal Count = state->getSVal(CE->getArg(0));
- SVal EleSize = state->getSVal(CE->getArg(1));
- SVal TotalSize = SVator.EvalBinOp(state, BO_Mul, Count, EleSize,
- ValMgr.getContext().getSizeType());
-
- SVal Zero = ValMgr.makeZeroVal(ValMgr.getContext().CharTy);
+ SVal count = state->getSVal(CE->getArg(0));
+ SVal elementSize = state->getSVal(CE->getArg(1));
+ SVal TotalSize = svalBuilder.evalBinOp(state, BO_Mul, count, elementSize,
+ svalBuilder.getContext().getSizeType());
+ SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
- state = MallocMemAux(C, CE, TotalSize, Zero, state);
- C.addTransition(state);
+ C.addTransition(MallocMemAux(C, CE, TotalSize, zeroVal, state));
}
-void MallocChecker::EvalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) {
+void MallocChecker::evalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper)
+{
if (!SymReaper.hasDeadSymbols())
return;
@@ -576,7 +574,7 @@ void MallocChecker::EvalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) {
for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
if (SymReaper.isDead(I->first)) {
if (I->second.isAllocated()) {
- if (ExplodedNode *N = C.GenerateNode()) {
+ if (ExplodedNode *N = C.generateNode()) {
if (!BT_Leak)
BT_Leak = new BuiltinBug("Memory leak",
"Allocated memory never released. Potential memory leak.");
@@ -587,17 +585,14 @@ void MallocChecker::EvalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) {
}
// Remove the dead symbol from the map.
- RS = F.Remove(RS, I->first);
+ RS = F.remove(RS, I->first);
}
}
-
- state = state->set<RegionState>(RS);
- C.GenerateNode(state);
+ C.generateNode(state->set<RegionState>(RS));
}
-void MallocChecker::EvalEndPath(GREndPathNodeBuilder &B, void *tag,
- GRExprEngine &Eng) {
- SaveAndRestore<bool> OldHasGen(B.HasGeneratedNode);
+void MallocChecker::evalEndPath(EndOfFunctionNodeBuilder &B, void *tag,
+ ExprEngine &Eng) {
const GRState *state = B.getState();
RegionStateTy M = state->get<RegionState>();
@@ -617,14 +612,13 @@ void MallocChecker::EvalEndPath(GREndPathNodeBuilder &B, void *tag,
}
void MallocChecker::PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S) {
- const Expr *RetE = S->getRetValue();
- if (!RetE)
+ const Expr *retExpr = S->getRetValue();
+ if (!retExpr)
return;
const GRState *state = C.getState();
- SymbolRef Sym = state->getSVal(RetE).getAsSymbol();
-
+ SymbolRef Sym = state->getSVal(retExpr).getAsSymbol();
if (!Sym)
return;
@@ -639,7 +633,7 @@ void MallocChecker::PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S) {
C.addTransition(state);
}
-const GRState *MallocChecker::EvalAssume(const GRState *state, SVal Cond,
+const GRState *MallocChecker::evalAssume(const GRState *state, SVal Cond,
bool Assumption,
bool * /* respondsToCallback */) {
// If a symblic region is assumed to NULL, set its state to AllocateFailed.
@@ -656,12 +650,13 @@ const GRState *MallocChecker::EvalAssume(const GRState *state, SVal Cond,
}
// Check if the location is a freed symbolic region.
-void MallocChecker::VisitLocation(CheckerContext &C, const Stmt *S, SVal l) {
+void MallocChecker::visitLocation(CheckerContext &C, const Stmt *S, SVal l,
+ bool isLoad) {
SymbolRef Sym = l.getLocSymbolInBase();
if (Sym) {
const RefState *RS = C.getState()->get<RegionState>(Sym);
if (RS && RS->isReleased()) {
- if (ExplodedNode *N = C.GenerateNode()) {
+ if (ExplodedNode *N = C.generateNode()) {
if (!BT_UseFree)
BT_UseFree = new BuiltinBug("Use dynamically allocated memory after"
" it is freed.");
@@ -697,7 +692,7 @@ void MallocChecker::PreVisitBind(CheckerContext &C,
if (const RefState *RS = state->get<RegionState>(Sym)) {
// If ptr is NULL, no operation is performed.
const GRState *notNullState, *nullState;
- llvm::tie(notNullState, nullState) = state->Assume(l);
+ llvm::tie(notNullState, nullState) = state->assume(l);
// Generate a transition for 'nullState' to record the assumption
// that the state was null.
diff --git a/contrib/llvm/tools/clang/lib/Checker/NSAutoreleasePoolChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index 48f03a3..9fb89d7 100644
--- a/contrib/llvm/tools/clang/lib/Checker/NSAutoreleasePoolChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -15,14 +15,16 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "BasicObjCFoundationChecks.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Decl.h"
using namespace clang;
+using namespace ento;
namespace {
class NSAutoreleasePoolChecker
@@ -38,13 +40,13 @@ public:
return &x;
}
- void PreVisitObjCMessageExpr(CheckerContext &C, const ObjCMessageExpr *ME);
+ void preVisitObjCMessage(CheckerContext &C, ObjCMessage msg);
};
} // end anonymous namespace
-void clang::RegisterNSAutoreleasePoolChecks(GRExprEngine &Eng) {
+static void RegisterNSAutoreleasePoolChecker(ExprEngine &Eng) {
ASTContext &Ctx = Eng.getContext();
if (Ctx.getLangOptions().getGCMode() != LangOptions::NonGC) {
Eng.registerCheck(new NSAutoreleasePoolChecker(GetNullarySelector("release",
@@ -52,11 +54,15 @@ void clang::RegisterNSAutoreleasePoolChecks(GRExprEngine &Eng) {
}
}
+void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterNSAutoreleasePoolChecker);
+}
+
void
-NSAutoreleasePoolChecker::PreVisitObjCMessageExpr(CheckerContext &C,
- const ObjCMessageExpr *ME) {
+NSAutoreleasePoolChecker::preVisitObjCMessage(CheckerContext &C,
+ ObjCMessage msg) {
- const Expr *receiver = ME->getInstanceReceiver();
+ const Expr *receiver = msg.getInstanceReceiver();
if (!receiver)
return;
@@ -74,13 +80,13 @@ NSAutoreleasePoolChecker::PreVisitObjCMessageExpr(CheckerContext &C,
return;
// Sending 'release' message?
- if (ME->getSelector() != releaseS)
+ if (msg.getSelector() != releaseS)
return;
- SourceRange R = ME->getSourceRange();
+ SourceRange R = msg.getSourceRange();
C.getBugReporter().EmitBasicReport("Use -drain instead of -release",
"API Upgrade (Apple)",
"Use -drain instead of -release when using NSAutoreleasePool "
- "and garbage collection", ME->getLocStart(), &R, 1);
+ "and garbage collection", R.getBegin(), &R, 1);
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/NSErrorChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index e30d54c..7a1b978 100644
--- a/contrib/llvm/tools/clang/lib/Checker/NSErrorChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -15,23 +15,24 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/Checkers/LocalCheckers.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/Checkers/DereferenceChecker.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Checkers/DereferenceChecker.h"
#include "BasicObjCFoundationChecks.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Decl.h"
#include "llvm/ADT/SmallVector.h"
using namespace clang;
+using namespace ento;
namespace {
class NSErrorChecker : public BugType {
const Decl &CodeDecl;
const bool isNSErrorWarning;
IdentifierInfo * const II;
- GRExprEngine &Eng;
+ ExprEngine &Eng;
void CheckSignature(const ObjCMethodDecl& MD, QualType& ResultTy,
llvm::SmallVectorImpl<VarDecl*>& ErrorParams);
@@ -48,7 +49,7 @@ class NSErrorChecker : public BugType {
void EmitRetTyWarning(BugReporter& BR, const Decl& CodeDecl);
public:
- NSErrorChecker(const Decl &D, bool isNSError, GRExprEngine& eng)
+ NSErrorChecker(const Decl &D, bool isNSError, ExprEngine& eng)
: BugType(isNSError ? "NSError** null dereference"
: "CFErrorRef* null dereference",
"Coding conventions (Apple)"),
@@ -62,7 +63,7 @@ public:
} // end anonymous namespace
-void clang::RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng,
+void ento::RegisterNSErrorChecks(BugReporter& BR, ExprEngine &Eng,
const Decl &D) {
BR.Register(new NSErrorChecker(D, true, Eng));
BR.Register(new NSErrorChecker(D, false, Eng));
diff --git a/contrib/llvm/tools/clang/lib/Checker/NoReturnFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index 12527e0..40040ea 100644
--- a/contrib/llvm/tools/clang/lib/Checker/NoReturnFunctionChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -12,11 +12,12 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
+using namespace ento;
namespace {
@@ -28,7 +29,7 @@ public:
}
-void clang::RegisterNoReturnFunctionChecker(GRExprEngine &Eng) {
+void ento::RegisterNoReturnFunctionChecker(ExprEngine &Eng) {
Eng.registerCheck(new NoReturnFunctionChecker());
}
@@ -75,5 +76,5 @@ void NoReturnFunctionChecker::PostVisitCallExpr(CheckerContext &C,
}
if (BuildSinks)
- C.GenerateSink(CE);
+ C.generateSink(CE);
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp
index 02de0a8..e1126b6 100644
--- a/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp
@@ -11,30 +11,31 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/PathSensitive/Checker.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h"
#include "clang/Basic/Builtins.h"
using namespace clang;
+using namespace ento;
namespace {
class OSAtomicChecker : public Checker {
public:
static void *getTag() { static int tag = 0; return &tag; }
- virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+ virtual bool evalCallExpr(CheckerContext &C, const CallExpr *CE);
private:
- bool EvalOSAtomicCompareAndSwap(CheckerContext &C, const CallExpr *CE);
+ bool evalOSAtomicCompareAndSwap(CheckerContext &C, const CallExpr *CE);
};
}
-void clang::RegisterOSAtomicChecker(GRExprEngine &Eng) {
+void ento::RegisterOSAtomicChecker(ExprEngine &Eng) {
Eng.registerCheck(new OSAtomicChecker());
}
-bool OSAtomicChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE) {
+bool OSAtomicChecker::evalCallExpr(CheckerContext &C,const CallExpr *CE) {
const GRState *state = C.getState();
const Expr *Callee = CE->getCallee();
SVal L = state->getSVal(Callee);
@@ -52,13 +53,13 @@ bool OSAtomicChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE) {
// Check for compare and swap.
if (FName.startswith("OSAtomicCompareAndSwap") ||
FName.startswith("objc_atomicCompareAndSwap"))
- return EvalOSAtomicCompareAndSwap(C, CE);
+ return evalOSAtomicCompareAndSwap(C, CE);
// FIXME: Other atomics.
return false;
}
-bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C,
+bool OSAtomicChecker::evalOSAtomicCompareAndSwap(CheckerContext &C,
const CallExpr *CE) {
// Not enough arguments to match OSAtomicCompareAndSwap?
if (CE->getNumArgs() != 3)
@@ -96,7 +97,7 @@ bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C,
const void *OSAtomicStoreTag = &magic_store;
// Load 'theValue'.
- GRExprEngine &Engine = C.getEngine();
+ ExprEngine &Engine = C.getEngine();
const GRState *state = C.getState();
ExplodedNodeSet Tmp;
SVal location = state->getSVal(theValueExpr);
@@ -112,7 +113,7 @@ bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C,
dyn_cast_or_null<TypedRegion>(location.getAsRegion())) {
LoadTy = TR->getValueType();
}
- Engine.EvalLoad(Tmp, theValueExpr, C.getPredecessor(),
+ Engine.evalLoad(Tmp, theValueExpr, C.getPredecessor(),
state, location, OSAtomicLoadTag, LoadTy);
if (Tmp.empty()) {
@@ -142,12 +143,12 @@ bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C,
DefinedOrUnknownSVal oldValueVal =
cast<DefinedOrUnknownSVal>(oldValueVal_untested);
- SValuator &SVator = Engine.getSValuator();
+ SValBuilder &svalBuilder = Engine.getSValBuilder();
// Perform the comparison.
- DefinedOrUnknownSVal Cmp = SVator.EvalEQ(stateLoad,theValueVal,oldValueVal);
+ DefinedOrUnknownSVal Cmp = svalBuilder.evalEQ(stateLoad,theValueVal,oldValueVal);
- const GRState *stateEqual = stateLoad->Assume(Cmp, true);
+ const GRState *stateEqual = stateLoad->assume(Cmp, true);
// Were they equal?
if (stateEqual) {
@@ -158,10 +159,10 @@ bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C,
// Handle implicit value casts.
if (const TypedRegion *R =
dyn_cast_or_null<TypedRegion>(location.getAsRegion())) {
- val = SVator.EvalCast(val,R->getValueType(), newValueExpr->getType());
+ val = svalBuilder.evalCast(val,R->getValueType(), newValueExpr->getType());
}
- Engine.EvalStore(TmpStore, NULL, theValueExpr, N,
+ Engine.evalStore(TmpStore, NULL, theValueExpr, N,
stateEqual, location, val, OSAtomicStoreTag);
if (TmpStore.empty()) {
@@ -182,19 +183,19 @@ bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C,
SVal Res = UnknownVal();
QualType T = CE->getType();
if (!T->isVoidType())
- Res = Engine.getValueManager().makeTruthVal(true, T);
- C.GenerateNode(stateNew->BindExpr(CE, Res), predNew);
+ Res = Engine.getSValBuilder().makeTruthVal(true, T);
+ C.generateNode(stateNew->BindExpr(CE, Res), predNew);
}
}
// Were they not equal?
- if (const GRState *stateNotEqual = stateLoad->Assume(Cmp, false)) {
+ if (const GRState *stateNotEqual = stateLoad->assume(Cmp, false)) {
// Check for 'void' return type if we have a bogus function prototype.
SVal Res = UnknownVal();
QualType T = CE->getType();
if (!T->isVoidType())
- Res = Engine.getValueManager().makeTruthVal(false, CE->getType());
- C.GenerateNode(stateNotEqual->BindExpr(CE, Res), N);
+ Res = Engine.getSValBuilder().makeTruthVal(false, CE->getType());
+ C.generateNode(stateNotEqual->BindExpr(CE, Res), N);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
new file mode 100644
index 0000000..7d440ab
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -0,0 +1,100 @@
+//== ObjCAtSyncChecker.cpp - nil mutex checker for @synchronized -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ObjCAtSyncChecker, a builtin check that checks for null pointers
+// used as mutexes for @synchronized.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Checkers/DereferenceChecker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ObjCAtSyncChecker : public CheckerVisitor<ObjCAtSyncChecker> {
+ BuiltinBug *BT_null;
+ BuiltinBug *BT_undef;
+public:
+ ObjCAtSyncChecker() : BT_null(0), BT_undef(0) {}
+ static void *getTag() { static int tag = 0; return &tag; }
+ void PreVisitObjCAtSynchronizedStmt(CheckerContext &C,
+ const ObjCAtSynchronizedStmt *S);
+};
+} // end anonymous namespace
+
+static void RegisterObjCAtSyncChecker(ExprEngine &Eng) {
+ // @synchronized is an Objective-C 2 feature.
+ if (Eng.getContext().getLangOptions().ObjC2)
+ Eng.registerCheck(new ObjCAtSyncChecker());
+}
+
+void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterObjCAtSyncChecker);
+}
+
+void ObjCAtSyncChecker::PreVisitObjCAtSynchronizedStmt(CheckerContext &C,
+ const ObjCAtSynchronizedStmt *S) {
+
+ const Expr *Ex = S->getSynchExpr();
+ const GRState *state = C.getState();
+ SVal V = state->getSVal(Ex);
+
+ // Uninitialized value used for the mutex?
+ if (isa<UndefinedVal>(V)) {
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_undef)
+ BT_undef = new BuiltinBug("Uninitialized value used as mutex "
+ "for @synchronized");
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT_undef, BT_undef->getDescription(), N);
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Ex);
+ C.EmitReport(report);
+ }
+ return;
+ }
+
+ if (V.isUnknown())
+ return;
+
+ // Check for null mutexes.
+ const GRState *notNullState, *nullState;
+ llvm::tie(notNullState, nullState) = state->assume(cast<DefinedSVal>(V));
+
+ if (nullState) {
+ if (!notNullState) {
+ // Generate an error node. This isn't a sink since
+ // a null mutex just means no synchronization occurs.
+ if (ExplodedNode *N = C.generateNode(nullState)) {
+ if (!BT_null)
+ BT_null = new BuiltinBug("Nil value used as mutex for @synchronized() "
+ "(no synchronization will occur)");
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT_null, BT_null->getDescription(), N);
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
+ Ex);
+
+ C.EmitReport(report);
+ return;
+ }
+ }
+ // Don't add a transition for 'nullState'. If the value is
+ // under-constrained to be null or non-null, assume it is non-null
+ // afterwards.
+ }
+
+ if (notNullState)
+ C.addTransition(notNullState);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
new file mode 100644
index 0000000..4f247ea
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -0,0 +1,356 @@
+//== ObjCSelfInitChecker.cpp - Checker for 'self' initialization -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ObjCSelfInitChecker, a builtin check that checks for uses of
+// 'self' before proper initialization.
+//
+//===----------------------------------------------------------------------===//
+
+// This checks initialization methods to verify that they assign 'self' to the
+// result of an initialization call (e.g. [super init], or [self initWith..])
+// before using 'self' or any instance variable.
+//
+// To perform the required checking, values are tagged wih flags that indicate
+// 1) if the object is the one pointed to by 'self', and 2) if the object
+// is the result of an initializer (e.g. [super init]).
+//
+// Uses of an object that is true for 1) but not 2) trigger a diagnostic.
+// The uses that are currently checked are:
+// - Using instance variables.
+// - Returning the object.
+//
+// Note that we don't check for an invalid 'self' that is the receiver of an
+// obj-c message expression to cut down false positives where logging functions
+// get information from self (like its class) or doing "invalidation" on self
+// when the initialization fails.
+//
+// Because the object that 'self' points to gets invalidated when a call
+// receives a reference to 'self', the checker keeps track and passes the flags
+// for 1) and 2) to the new object that 'self' points to after the call.
+//
+// FIXME (rdar://7937506): In the case of:
+// [super init];
+// return self;
+// Have an extra PathDiagnosticPiece in the path that says "called [super init],
+// but didn't assign the result to self."
+
+//===----------------------------------------------------------------------===//
+
+// FIXME: Somehow stick the link to Apple's documentation about initializing
+// objects in the diagnostics.
+// http://developer.apple.com/library/mac/#documentation/Cocoa/Conceptual/ObjectiveC/Articles/ocAllocInit.html
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/AST/ParentMap.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND);
+static bool isInitializationMethod(const ObjCMethodDecl *MD);
+static bool isInitMessage(const ObjCMessage &msg);
+static bool isSelfVar(SVal location, CheckerContext &C);
+
+namespace {
+enum SelfFlagEnum {
+ /// \brief No flag set.
+ SelfFlag_None = 0x0,
+ /// \brief Value came from 'self'.
+ SelfFlag_Self = 0x1,
+ /// \brief Value came from the result of an initializer (e.g. [super init]).
+ SelfFlag_InitRes = 0x2
+};
+}
+
+namespace {
+class ObjCSelfInitChecker : public CheckerVisitor<ObjCSelfInitChecker> {
+ /// \brief A call receiving a reference to 'self' invalidates the object that
+ /// 'self' contains. This field keeps the "self flags" assigned to the 'self'
+ /// object before the call and assign them to the new object that 'self'
+ /// points to after the call.
+ SelfFlagEnum preCallSelfFlags;
+
+public:
+ static void *getTag() { static int tag = 0; return &tag; }
+ void postVisitObjCMessage(CheckerContext &C, ObjCMessage msg);
+ void PostVisitObjCIvarRefExpr(CheckerContext &C, const ObjCIvarRefExpr *E);
+ void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S);
+ void PreVisitGenericCall(CheckerContext &C, const CallExpr *CE);
+ void PostVisitGenericCall(CheckerContext &C, const CallExpr *CE);
+ virtual void visitLocation(CheckerContext &C, const Stmt *S, SVal location,
+ bool isLoad);
+};
+} // end anonymous namespace
+
+static void RegisterObjCSelfInitChecker(ExprEngine &Eng) {
+ if (Eng.getContext().getLangOptions().ObjC1)
+ Eng.registerCheck(new ObjCSelfInitChecker());
+}
+
+void ento::registerObjCSelfInitChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterObjCSelfInitChecker);
+}
+
+namespace {
+
+class InitSelfBug : public BugType {
+ const std::string desc;
+public:
+ InitSelfBug() : BugType("missing \"self = [(super or self) init...]\"",
+ "missing \"self = [(super or self) init...]\"") {}
+};
+
+} // end anonymous namespace
+
+typedef llvm::ImmutableMap<SymbolRef, unsigned> SelfFlag;
+namespace { struct CalledInit {}; }
+
+namespace clang {
+namespace ento {
+ template<>
+ struct GRStateTrait<SelfFlag> : public GRStatePartialTrait<SelfFlag> {
+ static void* GDMIndex() {
+ static int index = 0;
+ return &index;
+ }
+ };
+ template <>
+ struct GRStateTrait<CalledInit> : public GRStatePartialTrait<bool> {
+ static void *GDMIndex() { static int index = 0; return &index; }
+ };
+}
+}
+
+static SelfFlagEnum getSelfFlags(SVal val, const GRState *state) {
+ if (SymbolRef sym = val.getAsSymbol())
+ if (const unsigned *attachedFlags = state->get<SelfFlag>(sym))
+ return (SelfFlagEnum)*attachedFlags;
+ return SelfFlag_None;
+}
+
+static SelfFlagEnum getSelfFlags(SVal val, CheckerContext &C) {
+ return getSelfFlags(val, C.getState());
+}
+
+static void addSelfFlag(const GRState *state, SVal val,
+ SelfFlagEnum flag, CheckerContext &C) {
+ // We tag the symbol that the SVal wraps.
+ if (SymbolRef sym = val.getAsSymbol())
+ C.addTransition(state->set<SelfFlag>(sym, getSelfFlags(val, C) | flag));
+}
+
+static bool hasSelfFlag(SVal val, SelfFlagEnum flag, CheckerContext &C) {
+ return getSelfFlags(val, C) & flag;
+}
+
+/// \brief Returns true of the value of the expression is the object that 'self'
+/// points to and is an object that did not come from the result of calling
+/// an initializer.
+static bool isInvalidSelf(const Expr *E, CheckerContext &C) {
+ SVal exprVal = C.getState()->getSVal(E);
+ if (!hasSelfFlag(exprVal, SelfFlag_Self, C))
+ return false; // value did not come from 'self'.
+ if (hasSelfFlag(exprVal, SelfFlag_InitRes, C))
+ return false; // 'self' is properly initialized.
+
+ return true;
+}
+
+static void checkForInvalidSelf(const Expr *E, CheckerContext &C,
+ const char *errorStr) {
+ if (!E)
+ return;
+
+ if (!C.getState()->get<CalledInit>())
+ return;
+
+ if (!isInvalidSelf(E, C))
+ return;
+
+ // Generate an error node.
+ ExplodedNode *N = C.generateSink();
+ if (!N)
+ return;
+
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*new InitSelfBug(), errorStr, N);
+ C.EmitReport(report);
+}
+
+void ObjCSelfInitChecker::postVisitObjCMessage(CheckerContext &C,
+ ObjCMessage msg) {
+ // When encountering a message that does initialization (init rule),
+ // tag the return value so that we know later on that if self has this value
+ // then it is properly initialized.
+
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisContext()->getDecl())))
+ return;
+
+ if (isInitMessage(msg)) {
+ // Tag the return value as the result of an initializer.
+ const GRState *state = C.getState();
+
+ // FIXME this really should be context sensitive, where we record
+ // the current stack frame (for IPA). Also, we need to clean this
+ // value out when we return from this method.
+ state = state->set<CalledInit>(true);
+
+ SVal V = state->getSVal(msg.getOriginExpr());
+ addSelfFlag(state, V, SelfFlag_InitRes, C);
+ return;
+ }
+
+ // We don't check for an invalid 'self' in an obj-c message expression to cut
+ // down false positives where logging functions get information from self
+ // (like its class) or doing "invalidation" on self when the initialization
+ // fails.
+}
+
+void ObjCSelfInitChecker::PostVisitObjCIvarRefExpr(CheckerContext &C,
+ const ObjCIvarRefExpr *E) {
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisContext()->getDecl())))
+ return;
+
+ checkForInvalidSelf(E->getBase(), C,
+ "Instance variable used while 'self' is not set to the result of "
+ "'[(super or self) init...]'");
+}
+
+void ObjCSelfInitChecker::PreVisitReturnStmt(CheckerContext &C,
+ const ReturnStmt *S) {
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisContext()->getDecl())))
+ return;
+
+ checkForInvalidSelf(S->getRetValue(), C,
+ "Returning 'self' while it is not set to the result of "
+ "'[(super or self) init...]'");
+}
+
+// When a call receives a reference to 'self', [Pre/Post]VisitGenericCall pass
+// the SelfFlags from the object 'self' point to before the call, to the new
+// object after the call. This is to avoid invalidation of 'self' by logging
+// functions.
+// Another common pattern in classes with multiple initializers is to put the
+// subclass's common initialization bits into a static function that receives
+// the value of 'self', e.g:
+// @code
+// if (!(self = [super init]))
+// return nil;
+// if (!(self = _commonInit(self)))
+// return nil;
+// @endcode
+// Until we can use inter-procedural analysis, in such a call, transfer the
+// SelfFlags to the result of the call.
+
+void ObjCSelfInitChecker::PreVisitGenericCall(CheckerContext &C,
+ const CallExpr *CE) {
+ const GRState *state = C.getState();
+ for (CallExpr::const_arg_iterator
+ I = CE->arg_begin(), E = CE->arg_end(); I != E; ++I) {
+ SVal argV = state->getSVal(*I);
+ if (isSelfVar(argV, C)) {
+ preCallSelfFlags = getSelfFlags(state->getSVal(cast<Loc>(argV)), C);
+ return;
+ } else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
+ preCallSelfFlags = getSelfFlags(argV, C);
+ return;
+ }
+ }
+}
+
+void ObjCSelfInitChecker::PostVisitGenericCall(CheckerContext &C,
+ const CallExpr *CE) {
+ const GRState *state = C.getState();
+ for (CallExpr::const_arg_iterator
+ I = CE->arg_begin(), E = CE->arg_end(); I != E; ++I) {
+ SVal argV = state->getSVal(*I);
+ if (isSelfVar(argV, C)) {
+ addSelfFlag(state, state->getSVal(cast<Loc>(argV)), preCallSelfFlags, C);
+ return;
+ } else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
+ addSelfFlag(state, state->getSVal(CE), preCallSelfFlags, C);
+ return;
+ }
+ }
+}
+
+void ObjCSelfInitChecker::visitLocation(CheckerContext &C, const Stmt *S,
+ SVal location, bool isLoad) {
+ // Tag the result of a load from 'self' so that we can easily know that the
+ // value is the object that 'self' points to.
+ const GRState *state = C.getState();
+ if (isSelfVar(location, C))
+ addSelfFlag(state, state->getSVal(cast<Loc>(location)), SelfFlag_Self, C);
+}
+
+// FIXME: A callback should disable checkers at the start of functions.
+static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) {
+ if (!ND)
+ return false;
+
+ const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(ND);
+ if (!MD)
+ return false;
+ if (!isInitializationMethod(MD))
+ return false;
+
+ // self = [super init] applies only to NSObject subclasses.
+ // For instance, NSProxy doesn't implement -init.
+ ASTContext& Ctx = MD->getASTContext();
+ IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+ ObjCInterfaceDecl* ID = MD->getClassInterface()->getSuperClass();
+ for ( ; ID ; ID = ID->getSuperClass()) {
+ IdentifierInfo *II = ID->getIdentifier();
+
+ if (II == NSObjectII)
+ break;
+ }
+ if (!ID)
+ return false;
+
+ return true;
+}
+
+/// \brief Returns true if the location is 'self'.
+static bool isSelfVar(SVal location, CheckerContext &C) {
+ AnalysisContext *analCtx = C.getCurrentAnalysisContext();
+ if (!analCtx->getSelfDecl())
+ return false;
+ if (!isa<loc::MemRegionVal>(location))
+ return false;
+
+ loc::MemRegionVal MRV = cast<loc::MemRegionVal>(location);
+ if (const DeclRegion *DR = dyn_cast<DeclRegion>(MRV.getRegion()))
+ return (DR->getDecl() == analCtx->getSelfDecl());
+
+ return false;
+}
+
+static bool isInitializationMethod(const ObjCMethodDecl *MD) {
+ // Init methods with prefix like '-(id)_init' are private and the requirements
+ // are less strict so we don't check those.
+ return MD->isInstanceMethod() &&
+ cocoa::deriveNamingConvention(MD->getSelector(),
+ /*ignorePrefix=*/false) == cocoa::InitRule;
+}
+
+static bool isInitMessage(const ObjCMessage &msg) {
+ return cocoa::deriveNamingConvention(msg.getSelector()) == cocoa::InitRule;
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ObjCUnusedIVarsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index 2523cff..6e92498 100644
--- a/contrib/llvm/tools/clang/lib/Checker/ObjCUnusedIVarsChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -13,9 +13,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/Checkers/LocalCheckers.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerV2.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/DeclObjC.h"
@@ -23,6 +24,7 @@
#include "clang/Basic/SourceManager.h"
using namespace clang;
+using namespace ento;
enum IVarState { Unused, Used };
typedef llvm::DenseMap<const ObjCIvarDecl*,IVarState> IvarUsageMap;
@@ -97,7 +99,7 @@ static void Scan(IvarUsageMap &M, const DeclContext *C, const FileID FID,
}
}
-void clang::CheckObjCUnusedIvar(const ObjCImplementationDecl *D,
+static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
BugReporter &BR) {
const ObjCInterfaceDecl* ID = D->getClassInterface();
@@ -113,9 +115,11 @@ void clang::CheckObjCUnusedIvar(const ObjCImplementationDecl *D,
// (a) aren't private
// (b) explicitly marked unused
// (c) are iboutlets
+ // (d) are unnamed bitfields
if (ID->getAccessControl() != ObjCIvarDecl::Private ||
ID->getAttr<UnusedAttr>() || ID->getAttr<IBOutletAttr>() ||
- ID->getAttr<IBOutletCollectionAttr>())
+ ID->getAttr<IBOutletCollectionAttr>() ||
+ ID->isUnnamedBitfield())
continue;
M[ID] = Unused;
@@ -159,3 +163,22 @@ void clang::CheckObjCUnusedIvar(const ObjCImplementationDecl *D,
os.str(), I->first->getLocation());
}
}
+
+//===----------------------------------------------------------------------===//
+// ObjCUnusedIvarsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCUnusedIvarsChecker : public CheckerV2<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ checkObjCUnusedIvar(D, BR);
+ }
+};
+}
+
+void ento::registerObjCUnusedIvarsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCUnusedIvarsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/PointerArithChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index cbac423..741e48b 100644
--- a/contrib/llvm/tools/clang/lib/Checker/PointerArithChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -12,11 +12,13 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class PointerArithChecker
@@ -53,7 +55,7 @@ void PointerArithChecker::PreVisitBinaryOperator(CheckerContext &C,
if (isa<VarRegion>(LR) || isa<CodeTextRegion>(LR) ||
isa<CompoundLiteralRegion>(LR)) {
- if (ExplodedNode *N = C.GenerateNode()) {
+ if (ExplodedNode *N = C.generateNode()) {
if (!BT)
BT = new BuiltinBug("Dangerous pointer arithmetic",
"Pointer arithmetic done on non-array variables "
@@ -66,6 +68,10 @@ void PointerArithChecker::PreVisitBinaryOperator(CheckerContext &C,
}
}
-void clang::RegisterPointerArithChecker(GRExprEngine &Eng) {
+static void RegisterPointerArithChecker(ExprEngine &Eng) {
Eng.registerCheck(new PointerArithChecker());
}
+
+void ento::registerPointerArithChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterPointerArithChecker);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/PointerSubChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index d64b6ae..f28fe9a 100644
--- a/contrib/llvm/tools/clang/lib/Checker/PointerSubChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -13,11 +13,13 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class PointerSubChecker
@@ -62,7 +64,7 @@ void PointerSubChecker::PreVisitBinaryOperator(CheckerContext &C,
if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR))
return;
- if (ExplodedNode *N = C.GenerateNode()) {
+ if (ExplodedNode *N = C.generateNode()) {
if (!BT)
BT = new BuiltinBug("Pointer subtraction",
"Subtraction of two pointers that do not point to "
@@ -73,6 +75,10 @@ void PointerSubChecker::PreVisitBinaryOperator(CheckerContext &C,
}
}
-void clang::RegisterPointerSubChecker(GRExprEngine &Eng) {
+static void RegisterPointerSubChecker(ExprEngine &Eng) {
Eng.registerCheck(new PointerSubChecker());
}
+
+void ento::registerPointerSubChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterPointerSubChecker);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/PthreadLockChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 74e266c..34c095f 100644
--- a/contrib/llvm/tools/clang/lib/Checker/PthreadLockChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -8,17 +8,19 @@
//===----------------------------------------------------------------------===//
//
// This defines PthreadLockChecker, a simple lock -> unlock checker. Eventually
-// this shouldn't be registered with GRExprEngineInternalChecks.
+// this shouldn't be registered with ExprEngineInternalChecks.
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
-#include "GRExprEngineExperimentalChecks.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
#include "llvm/ADT/ImmutableSet.h"
using namespace clang;
+using namespace ento;
namespace {
class PthreadLockChecker
@@ -44,16 +46,22 @@ public:
// GDM Entry for tracking lock state.
namespace { class LockSet {}; }
namespace clang {
+namespace ento {
template <> struct GRStateTrait<LockSet> :
public GRStatePartialTrait<llvm::ImmutableSet<const MemRegion*> > {
static void* GDMIndex() { return PthreadLockChecker::getTag(); }
};
+} // end GR namespace
} // end clang namespace
-void clang::RegisterPthreadLockChecker(GRExprEngine &Eng) {
+static void RegisterPthreadLockChecker(ExprEngine &Eng) {
Eng.registerCheck(new PthreadLockChecker());
}
+void ento::registerPthreadLockChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterPthreadLockChecker);
+}
+
void PthreadLockChecker::PostVisitCallExpr(CheckerContext &C,
const CallExpr *CE) {
@@ -65,7 +73,10 @@ void PthreadLockChecker::PostVisitCallExpr(CheckerContext &C,
if (!R)
return;
- llvm::StringRef FName = R->getDecl()->getName();
+ IdentifierInfo *II = R->getDecl()->getIdentifier();
+ if (!II) // if no identifier, not a simple C function
+ return;
+ llvm::StringRef FName = II->getName();
if (FName == "pthread_mutex_lock") {
if (CE->getNumArgs() != 1)
@@ -103,20 +114,20 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
if (isTryLock) {
// Bifurcate the state, and allow a mode where the lock acquisition fails.
const GRState *lockFail;
- llvm::tie(lockFail, lockSucc) = state->Assume(retVal);
+ llvm::tie(lockFail, lockSucc) = state->assume(retVal);
assert(lockFail && lockSucc);
- C.addTransition(C.GenerateNode(CE, lockFail));
+ C.addTransition(C.generateNode(CE, lockFail));
}
else {
// Assume that the return value was 0.
- lockSucc = state->Assume(retVal, false);
+ lockSucc = state->assume(retVal, false);
assert(lockSucc);
}
// Record that the lock was acquired.
lockSucc = lockSucc->add<LockSet>(lockR);
- C.addTransition(lockSucc != state ? C.GenerateNode(CE, lockSucc) :
+ C.addTransition(lockSucc != state ? C.generateNode(CE, lockSucc) :
C.getPredecessor());
}
@@ -137,5 +148,5 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
if (state == unlockState)
return;
- C.addTransition(C.GenerateNode(CE, unlockState));
+ C.addTransition(C.generateNode(CE, unlockState));
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/ReturnPointerRangeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index a9eb5ce..838a00f 100644
--- a/contrib/llvm/tools/clang/lib/Checker/ReturnPointerRangeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -12,12 +12,13 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
+using namespace ento;
namespace {
class ReturnPointerRangeChecker :
@@ -30,7 +31,7 @@ public:
};
}
-void clang::RegisterReturnPointerRangeChecker(GRExprEngine &Eng) {
+void ento::RegisterReturnPointerRangeChecker(ExprEngine &Eng) {
Eng.registerCheck(new ReturnPointerRangeChecker());
}
@@ -48,19 +49,16 @@ void ReturnPointerRangeChecker::PreVisitReturnStmt(CheckerContext &C,
SVal V = state->getSVal(RetE);
const MemRegion *R = V.getAsRegion();
- if (!R)
- return;
-
- R = R->StripCasts();
- if (!R)
- return;
const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(R);
if (!ER)
return;
- DefinedOrUnknownSVal &Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
-
+ DefinedOrUnknownSVal Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+ // Zero index is always in bound, this also passes ElementRegions created for
+ // pointer casts.
+ if (Idx.isZeroConstant())
+ return;
// FIXME: All of this out-of-bounds checking should eventually be refactored
// into a common place.
@@ -68,10 +66,10 @@ void ReturnPointerRangeChecker::PreVisitReturnStmt(CheckerContext &C,
= C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
ER->getValueType());
- const GRState *StInBound = state->AssumeInBound(Idx, NumElements, true);
- const GRState *StOutBound = state->AssumeInBound(Idx, NumElements, false);
+ const GRState *StInBound = state->assumeInBound(Idx, NumElements, true);
+ const GRState *StOutBound = state->assumeInBound(Idx, NumElements, false);
if (StOutBound && !StInBound) {
- ExplodedNode *N = C.GenerateSink(StOutBound);
+ ExplodedNode *N = C.generateSink(StOutBound);
if (!N)
return;
diff --git a/contrib/llvm/tools/clang/lib/Checker/ReturnUndefChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index 73d1890..555eaf4 100644
--- a/contrib/llvm/tools/clang/lib/Checker/ReturnUndefChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -13,12 +13,13 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
+using namespace ento;
namespace {
class ReturnUndefChecker :
@@ -31,7 +32,7 @@ public:
};
}
-void clang::RegisterReturnUndefChecker(GRExprEngine &Eng) {
+void ento::RegisterReturnUndefChecker(ExprEngine &Eng) {
Eng.registerCheck(new ReturnUndefChecker());
}
@@ -49,7 +50,7 @@ void ReturnUndefChecker::PreVisitReturnStmt(CheckerContext &C,
if (!C.getState()->getSVal(RetE).isUndef())
return;
- ExplodedNode *N = C.GenerateSink();
+ ExplodedNode *N = C.generateSink();
if (!N)
return;
diff --git a/contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrLeakChecker.cpp
index c67a81d..363f404 100644
--- a/contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrLeakChecker.cpp
@@ -12,13 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
+using namespace ento;
namespace {
class StackAddrLeakChecker : public CheckerVisitor<StackAddrLeakChecker> {
@@ -32,7 +34,7 @@ public:
return &x;
}
void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS);
- void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng);
+ void evalEndPath(EndOfFunctionNodeBuilder &B, void *tag, ExprEngine &Eng);
private:
void EmitStackError(CheckerContext &C, const MemRegion *R, const Expr *RetE);
SourceRange GenName(llvm::raw_ostream &os, const MemRegion *R,
@@ -40,10 +42,14 @@ private:
};
}
-void clang::RegisterStackAddrLeakChecker(GRExprEngine &Eng) {
+static void RegisterStackAddrLeakChecker(ExprEngine &Eng) {
Eng.registerCheck(new StackAddrLeakChecker());
}
+void ento::registerStackAddrLeakChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterStackAddrLeakChecker);
+}
+
SourceRange StackAddrLeakChecker::GenName(llvm::raw_ostream &os,
const MemRegion *R,
SourceManager &SM) {
@@ -89,7 +95,7 @@ SourceRange StackAddrLeakChecker::GenName(llvm::raw_ostream &os,
void StackAddrLeakChecker::EmitStackError(CheckerContext &C, const MemRegion *R,
const Expr *RetE) {
- ExplodedNode *N = C.GenerateSink();
+ ExplodedNode *N = C.generateSink();
if (!N)
return;
@@ -129,9 +135,9 @@ void StackAddrLeakChecker::PreVisitReturnStmt(CheckerContext &C,
}
}
-void StackAddrLeakChecker::EvalEndPath(GREndPathNodeBuilder &B, void *tag,
- GRExprEngine &Eng) {
- SaveAndRestore<bool> OldHasGen(B.HasGeneratedNode);
+void StackAddrLeakChecker::evalEndPath(EndOfFunctionNodeBuilder &B, void *tag,
+ ExprEngine &Eng) {
+
const GRState *state = B.getState();
// Iterate over all bindings to global variables and see if it contains
diff --git a/contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 8553875..2655be2 100644
--- a/contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -11,15 +11,17 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineExperimentalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
-#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
+using namespace ento;
namespace {
@@ -72,9 +74,9 @@ public:
return &x;
}
- virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
- void EvalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper);
- void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng);
+ virtual bool evalCallExpr(CheckerContext &C, const CallExpr *CE);
+ void evalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper);
+ void evalEndPath(EndOfFunctionNodeBuilder &B, void *tag, ExprEngine &Eng);
void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S);
private:
@@ -104,18 +106,24 @@ private:
} // end anonymous namespace
namespace clang {
+namespace ento {
template <>
struct GRStateTrait<StreamState>
: public GRStatePartialTrait<llvm::ImmutableMap<SymbolRef, StreamState> > {
static void *GDMIndex() { return StreamChecker::getTag(); }
};
}
+}
-void clang::RegisterStreamChecker(GRExprEngine &Eng) {
+static void RegisterStreamChecker(ExprEngine &Eng) {
Eng.registerCheck(new StreamChecker());
}
-bool StreamChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+void ento::registerStreamChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterStreamChecker);
+}
+
+bool StreamChecker::evalCallExpr(CheckerContext &C, const CallExpr *CE) {
const GRState *state = C.getState();
const Expr *Callee = CE->getCallee();
SVal L = state->getSVal(Callee);
@@ -224,16 +232,16 @@ void StreamChecker::Tmpfile(CheckerContext &C, const CallExpr *CE) {
void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) {
const GRState *state = C.getState();
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
- ValueManager &ValMgr = C.getValueManager();
- DefinedSVal RetVal = cast<DefinedSVal>(ValMgr.getConjuredSymbolVal(0, CE,
- Count));
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedSVal RetVal =
+ cast<DefinedSVal>(svalBuilder.getConjuredSymbolVal(0, CE, Count));
state = state->BindExpr(CE, RetVal);
ConstraintManager &CM = C.getConstraintManager();
// Bifurcate the state into two: one with a valid FILE* pointer, the other
// with a NULL.
const GRState *stateNotNull, *stateNull;
- llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, RetVal);
+ llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
if (SymbolRef Sym = RetVal.getAsSymbol()) {
// if RetVal is not NULL, set the symbol's state to Opened.
@@ -271,29 +279,24 @@ void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) {
return;
// Check the legality of the 'whence' argument of 'fseek'.
SVal Whence = state->getSVal(CE->getArg(2));
- bool WhenceIsLegal = true;
const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Whence);
+
if (!CI)
- WhenceIsLegal = false;
+ return;
int64_t x = CI->getValue().getSExtValue();
- if (!(x == 0 || x == 1 || x == 2))
- WhenceIsLegal = false;
-
- if (!WhenceIsLegal) {
- if (ExplodedNode *N = C.GenerateSink(state)) {
- if (!BT_illegalwhence)
- BT_illegalwhence = new BuiltinBug("Illegal whence argument",
- "The whence argument to fseek() should be "
- "SEEK_SET, SEEK_END, or SEEK_CUR.");
- BugReport *R = new BugReport(*BT_illegalwhence,
- BT_illegalwhence->getDescription(), N);
- C.EmitReport(R);
- }
+ if (x >= 0 && x <= 2)
return;
- }
- C.addTransition(state);
+ if (ExplodedNode *N = C.generateNode(state)) {
+ if (!BT_illegalwhence)
+ BT_illegalwhence = new BuiltinBug("Illegal whence argument",
+ "The whence argument to fseek() should be "
+ "SEEK_SET, SEEK_END, or SEEK_CUR.");
+ BugReport *R = new BugReport(*BT_illegalwhence,
+ BT_illegalwhence->getDescription(), N);
+ C.EmitReport(R);
+ }
}
void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) {
@@ -352,10 +355,10 @@ const GRState *StreamChecker::CheckNullStream(SVal SV, const GRState *state,
ConstraintManager &CM = C.getConstraintManager();
const GRState *stateNotNull, *stateNull;
- llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, *DV);
+ llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
if (!stateNotNull && stateNull) {
- if (ExplodedNode *N = C.GenerateSink(stateNull)) {
+ if (ExplodedNode *N = C.generateSink(stateNull)) {
if (!BT_nullfp)
BT_nullfp = new BuiltinBug("NULL stream pointer",
"Stream pointer might be NULL.");
@@ -383,7 +386,7 @@ const GRState *StreamChecker::CheckDoubleClose(const CallExpr *CE,
// Check: Double close a File Descriptor could cause undefined behaviour.
// Conforming to man-pages
if (SS->isClosed()) {
- ExplodedNode *N = C.GenerateSink();
+ ExplodedNode *N = C.generateSink();
if (N) {
if (!BT_doubleclose)
BT_doubleclose = new BuiltinBug("Double fclose",
@@ -400,7 +403,7 @@ const GRState *StreamChecker::CheckDoubleClose(const CallExpr *CE,
return state->set<StreamState>(Sym, StreamState::getClosed(CE));
}
-void StreamChecker::EvalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) {
+void StreamChecker::evalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) {
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I) {
SymbolRef Sym = *I;
@@ -410,7 +413,7 @@ void StreamChecker::EvalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) {
return;
if (SS->isOpened()) {
- ExplodedNode *N = C.GenerateSink();
+ ExplodedNode *N = C.generateSink();
if (N) {
if (!BT_ResourceLeak)
BT_ResourceLeak = new BuiltinBug("Resource Leak",
@@ -423,9 +426,8 @@ void StreamChecker::EvalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) {
}
}
-void StreamChecker::EvalEndPath(GREndPathNodeBuilder &B, void *tag,
- GRExprEngine &Eng) {
- SaveAndRestore<bool> OldHasGen(B.HasGeneratedNode);
+void StreamChecker::evalEndPath(EndOfFunctionNodeBuilder &B, void *tag,
+ ExprEngine &Eng) {
const GRState *state = B.getState();
typedef llvm::ImmutableMap<SymbolRef, StreamState> SymMap;
SymMap M = state->get<StreamState>();
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefBranchChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 1ff0641..14ae9ed 100644
--- a/contrib/llvm/tools/clang/lib/Checker/UndefBranchChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -12,11 +12,12 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/Checker.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h"
using namespace clang;
+using namespace ento;
namespace {
@@ -49,13 +50,13 @@ class UndefBranchChecker : public Checker {
public:
UndefBranchChecker() : BT(0) {}
static void *getTag();
- void VisitBranchCondition(GRBranchNodeBuilder &Builder, GRExprEngine &Eng,
+ void VisitBranchCondition(BranchNodeBuilder &Builder, ExprEngine &Eng,
const Stmt *Condition, void *tag);
};
}
-void clang::RegisterUndefBranchChecker(GRExprEngine &Eng) {
+void ento::RegisterUndefBranchChecker(ExprEngine &Eng) {
Eng.registerCheck(new UndefBranchChecker());
}
@@ -64,8 +65,8 @@ void *UndefBranchChecker::getTag() {
return &x;
}
-void UndefBranchChecker::VisitBranchCondition(GRBranchNodeBuilder &Builder,
- GRExprEngine &Eng,
+void UndefBranchChecker::VisitBranchCondition(BranchNodeBuilder &Builder,
+ ExprEngine &Eng,
const Stmt *Condition, void *tag){
const GRState *state = Builder.getState();
SVal X = state->getSVal(Condition);
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefCapturedBlockVarChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index b1010c9..6d3c966 100644
--- a/contrib/llvm/tools/clang/lib/Checker/UndefCapturedBlockVarChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -11,13 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/BugReporter/BugType.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
namespace {
class UndefCapturedBlockVarChecker
@@ -31,7 +32,7 @@ public:
};
} // end anonymous namespace
-void clang::RegisterUndefCapturedBlockVarChecker(GRExprEngine &Eng) {
+void ento::RegisterUndefCapturedBlockVarChecker(ExprEngine &Eng) {
Eng.registerCheck(new UndefCapturedBlockVarChecker());
}
@@ -55,7 +56,7 @@ static const BlockDeclRefExpr *FindBlockDeclRefExpr(const Stmt *S,
void
UndefCapturedBlockVarChecker::PostVisitBlockExpr(CheckerContext &C,
const BlockExpr *BE) {
- if (!BE->hasBlockDeclRefExprs())
+ if (!BE->getBlockDecl()->hasCaptures())
return;
const GRState *state = C.getState();
@@ -76,19 +77,19 @@ UndefCapturedBlockVarChecker::PostVisitBlockExpr(CheckerContext &C,
// Get the VarRegion associated with VD in the local stack frame.
const LocationContext *LC = C.getPredecessor()->getLocationContext();
- VR = C.getValueManager().getRegionManager().getVarRegion(VD, LC);
+ VR = C.getSValBuilder().getRegionManager().getVarRegion(VD, LC);
if (state->getSVal(VR).isUndef())
- if (ExplodedNode *N = C.GenerateSink()) {
+ if (ExplodedNode *N = C.generateSink()) {
if (!BT)
- BT = new BuiltinBug("Captured block variable is uninitialized");
+ BT = new BuiltinBug("uninitialized variable captured by block");
// Generate a bug report.
llvm::SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
- os << "Variable '" << VD->getName() << "' is captured by block with "
- "a garbage value";
+ os << "Variable '" << VD->getName()
+ << "' is uninitialized when captured by block";
EnhancedBugReport *R = new EnhancedBugReport(*BT, os.str(), N);
if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefResultChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 8b07aed..64a3567 100644
--- a/contrib/llvm/tools/clang/lib/Checker/UndefResultChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -7,17 +7,18 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines UndefResultChecker, a builtin check in GRExprEngine that
+// This defines UndefResultChecker, a builtin check in ExprEngine that
// performs checks for undefined results of non-assignment binary operators.
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
+using namespace ento;
namespace {
class UndefResultChecker
@@ -32,7 +33,7 @@ public:
};
} // end anonymous namespace
-void clang::RegisterUndefResultChecker(GRExprEngine &Eng) {
+void ento::RegisterUndefResultChecker(ExprEngine &Eng) {
Eng.registerCheck(new UndefResultChecker());
}
@@ -41,7 +42,7 @@ void UndefResultChecker::PostVisitBinaryOperator(CheckerContext &C,
const GRState *state = C.getState();
if (state->getSVal(B).isUndef()) {
// Generate an error node.
- ExplodedNode *N = C.GenerateSink();
+ ExplodedNode *N = C.generateSink();
if (!N)
return;
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefinedArraySubscriptChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index 148629e..ff03448 100644
--- a/contrib/llvm/tools/clang/lib/Checker/UndefinedArraySubscriptChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -7,16 +7,17 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines UndefinedArraySubscriptChecker, a builtin check in GRExprEngine
+// This defines UndefinedArraySubscriptChecker, a builtin check in ExprEngine
// that performs checks for undefined array subscripts.
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class UndefinedArraySubscriptChecker
@@ -33,7 +34,7 @@ public:
};
} // end anonymous namespace
-void clang::RegisterUndefinedArraySubscriptChecker(GRExprEngine &Eng) {
+void ento::RegisterUndefinedArraySubscriptChecker(ExprEngine &Eng) {
Eng.registerCheck(new UndefinedArraySubscriptChecker());
}
@@ -41,7 +42,7 @@ void
UndefinedArraySubscriptChecker::PreVisitArraySubscriptExpr(CheckerContext &C,
const ArraySubscriptExpr *A) {
if (C.getState()->getSVal(A->getIdx()).isUndef()) {
- if (ExplodedNode *N = C.GenerateSink()) {
+ if (ExplodedNode *N = C.generateSink()) {
if (!BT)
BT = new BuiltinBug("Array subscript is undefined");
diff --git a/contrib/llvm/tools/clang/lib/Checker/UndefinedAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index ccc9748..e53cbba 100644
--- a/contrib/llvm/tools/clang/lib/Checker/UndefinedAssignmentChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -7,16 +7,17 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines UndefinedAssginmentChecker, a builtin check in GRExprEngine that
+// This defines UndefinedAssginmentChecker, a builtin check in ExprEngine that
// checks for assigning undefined values.
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "InternalChecks.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
class UndefinedAssignmentChecker
@@ -30,7 +31,7 @@ public:
};
}
-void clang::RegisterUndefinedAssignmentChecker(GRExprEngine &Eng){
+void ento::RegisterUndefinedAssignmentChecker(ExprEngine &Eng){
Eng.registerCheck(new UndefinedAssignmentChecker());
}
@@ -46,7 +47,7 @@ void UndefinedAssignmentChecker::PreVisitBind(CheckerContext &C,
if (!val.isUndef())
return;
- ExplodedNode *N = C.GenerateSink();
+ ExplodedNode *N = C.generateSink();
if (!N)
return;
diff --git a/contrib/llvm/tools/clang/lib/Checker/UnixAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index de7346d..a53ebb5 100644
--- a/contrib/llvm/tools/clang/lib/Checker/UnixAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -12,15 +12,17 @@
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
+#include "ClangSACheckers.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringSwitch.h"
#include <fcntl.h>
using namespace clang;
+using namespace ento;
using llvm::Optional;
namespace {
@@ -28,6 +30,7 @@ class UnixAPIChecker : public CheckerVisitor<UnixAPIChecker> {
enum SubChecks {
OpenFn = 0,
PthreadOnceFn = 1,
+ MallocZero = 2,
NumChecks
};
@@ -44,10 +47,14 @@ public:
};
} //end anonymous namespace
-void clang::RegisterUnixAPIChecker(GRExprEngine &Eng) {
+static void RegisterUnixAPIChecker(ExprEngine &Eng) {
Eng.registerCheck(new UnixAPIChecker());
}
+void ento::registerUnixAPIChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterUnixAPIChecker);
+}
+
//===----------------------------------------------------------------------===//
// Utility functions.
//===----------------------------------------------------------------------===//
@@ -72,7 +79,7 @@ static void CheckOpen(CheckerContext &C, UnixAPIChecker &UC,
else {
// FIXME: We need a more general way of getting the O_CREAT value.
// We could possibly grovel through the preprocessor state, but
- // that would require passing the Preprocessor object to the GRExprEngine.
+ // that would require passing the Preprocessor object to the ExprEngine.
return;
}
}
@@ -98,18 +105,18 @@ static void CheckOpen(CheckerContext &C, UnixAPIChecker &UC,
}
NonLoc oflags = cast<NonLoc>(V);
NonLoc ocreateFlag =
- cast<NonLoc>(C.getValueManager().makeIntVal(UC.Val_O_CREAT.getValue(),
+ cast<NonLoc>(C.getSValBuilder().makeIntVal(UC.Val_O_CREAT.getValue(),
oflagsEx->getType()));
- SVal maskedFlagsUC = C.getSValuator().EvalBinOpNN(state, BO_And,
- oflags, ocreateFlag,
- oflagsEx->getType());
+ SVal maskedFlagsUC = C.getSValBuilder().evalBinOpNN(state, BO_And,
+ oflags, ocreateFlag,
+ oflagsEx->getType());
if (maskedFlagsUC.isUnknownOrUndef())
return;
DefinedSVal maskedFlags = cast<DefinedSVal>(maskedFlagsUC);
// Check if maskedFlags is non-zero.
const GRState *trueState, *falseState;
- llvm::tie(trueState, falseState) = state->Assume(maskedFlags);
+ llvm::tie(trueState, falseState) = state->assume(maskedFlags);
// Only emit an error if the value of 'maskedFlags' is properly
// constrained;
@@ -117,7 +124,7 @@ static void CheckOpen(CheckerContext &C, UnixAPIChecker &UC,
return;
if (CE->getNumArgs() < 3) {
- ExplodedNode *N = C.GenerateSink(trueState);
+ ExplodedNode *N = C.generateSink(trueState);
if (!N)
return;
@@ -152,7 +159,7 @@ static void CheckPthreadOnce(CheckerContext &C, UnixAPIChecker &,
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
- ExplodedNode *N = C.GenerateSink(state);
+ ExplodedNode *N = C.generateSink(state);
if (!N)
return;
@@ -174,6 +181,56 @@ static void CheckPthreadOnce(CheckerContext &C, UnixAPIChecker &,
}
//===----------------------------------------------------------------------===//
+// "malloc" with allocation size 0
+//===----------------------------------------------------------------------===//
+
+// FIXME: Eventually this should be rolled into the MallocChecker, but this
+// check is more basic and is valuable for widespread use.
+static void CheckMallocZero(CheckerContext &C, UnixAPIChecker &UC,
+ const CallExpr *CE, BugType *&BT) {
+
+ // Sanity check that malloc takes one argument.
+ if (CE->getNumArgs() != 1)
+ return;
+
+ // Check if the allocation size is 0.
+ const GRState *state = C.getState();
+ SVal argVal = state->getSVal(CE->getArg(0));
+
+ if (argVal.isUnknownOrUndef())
+ return;
+
+ const GRState *trueState, *falseState;
+ llvm::tie(trueState, falseState) = state->assume(cast<DefinedSVal>(argVal));
+
+ // Is the value perfectly constrained to zero?
+ if (falseState && !trueState) {
+ ExplodedNode *N = C.generateSink(falseState);
+ if (!N)
+ return;
+
+ // FIXME: Add reference to CERT advisory, and/or C99 standard in bug
+ // output.
+
+ LazyInitialize(BT, "Undefined allocation of 0 bytes");
+
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT, "Call to 'malloc' has an allocation size"
+ " of 0 bytes", N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
+ CE->getArg(0));
+ C.EmitReport(report);
+ return;
+ }
+ // Assume the the value is non-zero going forward.
+ assert(trueState);
+ if (trueState != state) {
+ C.addTransition(trueState);
+ }
+}
+
+//===----------------------------------------------------------------------===//
// Central dispatch function.
//===----------------------------------------------------------------------===//
@@ -213,9 +270,12 @@ void UnixAPIChecker::PreVisitCallExpr(CheckerContext &C, const CallExpr *CE) {
const SubCheck &SC =
llvm::StringSwitch<SubCheck>(FI->getName())
- .Case("open", SubCheck(CheckOpen, this, BTypes[OpenFn]))
- .Case("pthread_once", SubCheck(CheckPthreadOnce, this,
- BTypes[PthreadOnceFn]))
+ .Case("open",
+ SubCheck(CheckOpen, this, BTypes[OpenFn]))
+ .Case("pthread_once",
+ SubCheck(CheckPthreadOnce, this, BTypes[PthreadOnceFn]))
+ .Case("malloc",
+ SubCheck(CheckMallocZero, this, BTypes[MallocZero]))
.Default(SubCheck());
SC.run(C, CE);
diff --git a/contrib/llvm/tools/clang/lib/Checker/UnreachableCodeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 7a56c7f..3038e29 100644
--- a/contrib/llvm/tools/clang/lib/Checker/UnreachableCodeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -13,15 +13,16 @@
// A similar flow-sensitive only check exists in Analysis/ReachableCode.cpp
//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/ExplodedGraph.h"
-#include "clang/Checker/PathSensitive/SVals.h"
-#include "clang/Checker/PathSensitive/CheckerHelpers.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "GRExprEngineExperimentalChecks.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "llvm/ADT/SmallPtrSet.h"
// The number of CFGBlock pointers we want to reserve memory for. This is used
@@ -29,14 +30,15 @@
#define DEFAULT_CFGBLOCKS 256
using namespace clang;
+using namespace ento;
namespace {
-class UnreachableCodeChecker : public CheckerVisitor<UnreachableCodeChecker> {
+class UnreachableCodeChecker : public Checker {
public:
static void *getTag();
void VisitEndAnalysis(ExplodedGraph &G,
BugReporter &B,
- GRExprEngine &Eng);
+ ExprEngine &Eng);
private:
static inline const Stmt *getUnreachableStmt(const CFGBlock *CB);
void FindUnreachableEntryPoints(const CFGBlock *CB);
@@ -53,13 +55,17 @@ void *UnreachableCodeChecker::getTag() {
return &x;
}
-void clang::RegisterUnreachableCodeChecker(GRExprEngine &Eng) {
+static void RegisterUnreachableCodeChecker(ExprEngine &Eng) {
Eng.registerCheck(new UnreachableCodeChecker());
}
+void ento::registerUnreachableCodeChecker(CheckerManager &mgr) {
+ mgr.addCheckerRegisterFunction(RegisterUnreachableCodeChecker);
+}
+
void UnreachableCodeChecker::VisitEndAnalysis(ExplodedGraph &G,
BugReporter &B,
- GRExprEngine &Eng) {
+ ExprEngine &Eng) {
// Bail out if we didn't cover all paths
if (Eng.hasWorkRemaining())
return;
@@ -91,7 +97,7 @@ void UnreachableCodeChecker::VisitEndAnalysis(ExplodedGraph &G,
ASTContext &Ctx = B.getContext();
// Find CFGBlocks that were not covered by any node
- for (CFG::const_iterator I = C->begin(); I != C->end(); ++I) {
+ for (CFG::const_iterator I = C->begin(), E = C->end(); I != E; ++I) {
const CFGBlock *CB = *I;
// Check if the block is unreachable
if (reachable.count(CB->getBlockID()))
@@ -102,7 +108,8 @@ void UnreachableCodeChecker::VisitEndAnalysis(ExplodedGraph &G,
continue;
// Find the entry points for this block
- FindUnreachableEntryPoints(CB);
+ if (!visited.count(CB->getBlockID()))
+ FindUnreachableEntryPoints(CB);
// This block may have been pruned; check if we still want to report it
if (reachable.count(CB->getBlockID()))
@@ -116,10 +123,12 @@ void UnreachableCodeChecker::VisitEndAnalysis(ExplodedGraph &G,
// FIXME: This should be extended to include other unreachable markers,
// such as llvm_unreachable.
if (!CB->empty()) {
- const Stmt *First = CB->front();
- if (const CallExpr *CE = dyn_cast<CallExpr>(First)) {
- if (CE->isBuiltinCall(Ctx) == Builtin::BI__builtin_unreachable)
- continue;
+ CFGElement First = CB->front();
+ if (CFGStmt S = First.getAs<CFGStmt>()) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S.getStmt())) {
+ if (CE->isBuiltinCall(Ctx) == Builtin::BI__builtin_unreachable)
+ continue;
+ }
}
}
@@ -147,35 +156,28 @@ void UnreachableCodeChecker::VisitEndAnalysis(ExplodedGraph &G,
// Recursively finds the entry point(s) for this dead CFGBlock.
void UnreachableCodeChecker::FindUnreachableEntryPoints(const CFGBlock *CB) {
- bool allPredecessorsReachable = true;
-
visited.insert(CB->getBlockID());
- for (CFGBlock::const_pred_iterator I = CB->pred_begin(); I != CB->pred_end();
- ++I) {
- // Recurse over all unreachable blocks
+ for (CFGBlock::const_pred_iterator I = CB->pred_begin(), E = CB->pred_end();
+ I != E; ++I) {
if (!reachable.count((*I)->getBlockID())) {
- // At least one predeccessor was unreachable
- allPredecessorsReachable = false;
-
- // Only visit the block once
+ // If we find an unreachable predecessor, mark this block as reachable so
+ // we don't report this block
+ reachable.insert(CB->getBlockID());
if (!visited.count((*I)->getBlockID()))
+ // If we haven't previously visited the unreachable predecessor, recurse
FindUnreachableEntryPoints(*I);
}
}
-
- // If at least one predecessor is unreachable, mark this block as reachable
- // so we don't report this block.
- if (!allPredecessorsReachable) {
- reachable.insert(CB->getBlockID());
- }
}
// Find the Stmt* in a CFGBlock for reporting a warning
const Stmt *UnreachableCodeChecker::getUnreachableStmt(const CFGBlock *CB) {
- if (CB->size() > 0)
- return CB->front().getStmt();
- else if (const Stmt *S = CB->getTerminator())
+ for (CFGBlock::const_iterator I = CB->begin(), E = CB->end(); I != E; ++I) {
+ if (CFGStmt S = I->getAs<CFGStmt>())
+ return S;
+ }
+ if (const Stmt *S = CB->getTerminator())
return S;
else
return 0;
diff --git a/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 0800b8b..ba46e17 100644
--- a/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -7,20 +7,21 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines VLASizeChecker, a builtin check in GRExprEngine that
+// This defines VLASizeChecker, a builtin check in ExprEngine that
// performs checks for declaration of VLA of undefined or zero size.
// In addition, VLASizeChecker is responsible for defining the extent
// of the MemRegion that represents a VLA.
//
//===----------------------------------------------------------------------===//
-#include "GRExprEngineInternalChecks.h"
+#include "InternalChecks.h"
#include "clang/AST/CharUnits.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
+using namespace ento;
namespace {
class VLASizeChecker : public CheckerVisitor<VLASizeChecker> {
@@ -34,7 +35,7 @@ public:
};
} // end anonymous namespace
-void clang::RegisterVLASizeChecker(GRExprEngine &Eng) {
+void ento::RegisterVLASizeChecker(ExprEngine &Eng) {
Eng.registerCheck(new VLASizeChecker());
}
@@ -58,7 +59,7 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
if (sizeV.isUndef()) {
// Generate an error node.
- ExplodedNode *N = C.GenerateSink();
+ ExplodedNode *N = C.generateSink();
if (!N)
return;
@@ -83,10 +84,10 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
DefinedSVal sizeD = cast<DefinedSVal>(sizeV);
const GRState *stateNotZero, *stateZero;
- llvm::tie(stateNotZero, stateZero) = state->Assume(sizeD);
+ llvm::tie(stateNotZero, stateZero) = state->assume(sizeD);
if (stateZero && !stateNotZero) {
- ExplodedNode* N = C.GenerateSink(stateZero);
+ ExplodedNode* N = C.generateSink(stateZero);
if (!BT_zero)
BT_zero = new BuiltinBug("Declared variable-length array (VLA) has zero "
"size");
@@ -107,25 +108,27 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
// then matching that with the array region's extent symbol.
// Convert the array length to size_t.
- ValueManager &ValMgr = C.getValueManager();
- SValuator &SV = ValMgr.getSValuator();
+ SValBuilder &svalBuilder = C.getSValBuilder();
QualType SizeTy = Ctx.getSizeType();
- NonLoc ArrayLength = cast<NonLoc>(SV.EvalCast(sizeD, SizeTy, SE->getType()));
+ NonLoc ArrayLength = cast<NonLoc>(svalBuilder.evalCast(sizeD, SizeTy,
+ SE->getType()));
// Get the element size.
CharUnits EleSize = Ctx.getTypeSizeInChars(VLA->getElementType());
- SVal EleSizeVal = ValMgr.makeIntVal(EleSize.getQuantity(), SizeTy);
+ SVal EleSizeVal = svalBuilder.makeIntVal(EleSize.getQuantity(), SizeTy);
// Multiply the array length by the element size.
- SVal ArraySizeVal = SV.EvalBinOpNN(state, BO_Mul, ArrayLength,
- cast<NonLoc>(EleSizeVal), SizeTy);
+ SVal ArraySizeVal = svalBuilder.evalBinOpNN(state, BO_Mul, ArrayLength,
+ cast<NonLoc>(EleSizeVal), SizeTy);
- // Finally, Assume that the array's extent matches the given size.
+ // Finally, assume that the array's extent matches the given size.
const LocationContext *LC = C.getPredecessor()->getLocationContext();
- DefinedOrUnknownSVal Extent = state->getRegion(VD, LC)->getExtent(ValMgr);
+ DefinedOrUnknownSVal Extent =
+ state->getRegion(VD, LC)->getExtent(svalBuilder);
DefinedOrUnknownSVal ArraySize = cast<DefinedOrUnknownSVal>(ArraySizeVal);
- DefinedOrUnknownSVal SizeIsKnown = SV.EvalEQ(state, Extent, ArraySize);
- state = state->Assume(SizeIsKnown, true);
+ DefinedOrUnknownSVal sizeIsKnown =
+ svalBuilder.evalEQ(state, Extent, ArraySize);
+ state = state->assume(sizeIsKnown, true);
// Assume should not fail at this point.
assert(state);
diff --git a/contrib/llvm/tools/clang/lib/Checker/AggExprVisitor.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AggExprVisitor.cpp
index 6d472f4..e80cf9b 100644
--- a/contrib/llvm/tools/clang/lib/Checker/AggExprVisitor.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AggExprVisitor.cpp
@@ -12,32 +12,34 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/AST/StmtVisitor.h"
using namespace clang;
+using namespace ento;
namespace {
/// AggExprVisitor is designed after AggExprEmitter of the CodeGen module. It
/// is used for evaluating exprs of C++ object type. Evaluating such exprs
/// requires a destination pointer pointing to the object being evaluated
/// into. Passing such a pointer around would pollute the Visit* interface of
-/// GRExprEngine. AggExprVisitor encapsulates code that goes through various
+/// ExprEngine. AggExprVisitor encapsulates code that goes through various
/// cast and construct exprs (and others), and at the final point, dispatches
-/// back to the GRExprEngine to let the real evaluation logic happen.
+/// back to the ExprEngine to let the real evaluation logic happen.
class AggExprVisitor : public StmtVisitor<AggExprVisitor> {
- SVal DestPtr;
+ const MemRegion *Dest;
ExplodedNode *Pred;
ExplodedNodeSet &DstSet;
- GRExprEngine &Eng;
+ ExprEngine &Eng;
public:
- AggExprVisitor(SVal dest, ExplodedNode *N, ExplodedNodeSet &dst,
- GRExprEngine &eng)
- : DestPtr(dest), Pred(N), DstSet(dst), Eng(eng) {}
+ AggExprVisitor(const MemRegion *dest, ExplodedNode *N, ExplodedNodeSet &dst,
+ ExprEngine &eng)
+ : Dest(dest), Pred(N), DstSet(dst), Eng(eng) {}
void VisitCastExpr(CastExpr *E);
void VisitCXXConstructExpr(CXXConstructExpr *E);
+ void VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
};
}
@@ -47,16 +49,21 @@ void AggExprVisitor::VisitCastExpr(CastExpr *E) {
assert(0 && "Unhandled cast kind");
case CK_NoOp:
case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
Visit(E->getSubExpr());
break;
}
}
void AggExprVisitor::VisitCXXConstructExpr(CXXConstructExpr *E) {
- Eng.VisitCXXConstructExpr(E, DestPtr, Pred, DstSet);
+ Eng.VisitCXXConstructExpr(E, Dest, Pred, DstSet);
}
-void GRExprEngine::VisitAggExpr(const Expr *E, SVal Dest, ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
+void AggExprVisitor::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ Eng.VisitCXXMemberCallExpr(E, Pred, DstSet);
+}
+
+void ExprEngine::VisitAggExpr(const Expr *E, const MemRegion *Dest,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
AggExprVisitor(Dest, Pred, Dst, *this).Visit(const_cast<Expr *>(E));
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/AnalysisManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index 339cdab..5f4f83c 100644
--- a/contrib/llvm/tools/clang/lib/Checker/AnalysisManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -7,13 +7,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/Index/Entity.h"
#include "clang/Index/Indexer.h"
using namespace clang;
+using namespace ento;
-const AnalysisContext *
+AnalysisContext *
AnalysisManager::getAnalysisContextInAnotherTU(const Decl *D) {
idx::Entity Ent = idx::Entity::get(const_cast<Decl *>(D),
Idxer->getProgram());
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
index eee5c59..3050ca3 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
@@ -13,12 +13,13 @@
//===----------------------------------------------------------------------===//
#include "SimpleConstraintManager.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
-#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
namespace { class ConstNotEq {}; }
@@ -31,6 +32,7 @@ static int ConstEqIndex = 0;
static int ConstNotEqIndex = 0;
namespace clang {
+namespace ento {
template<>
struct GRStateTrait<ConstNotEq> : public GRStatePartialTrait<ConstNotEqTy> {
static inline void* GDMIndex() { return &ConstNotEqIndex; }
@@ -41,6 +43,7 @@ struct GRStateTrait<ConstEq> : public GRStatePartialTrait<ConstEqTy> {
static inline void* GDMIndex() { return &ConstEqIndex; }
};
}
+}
namespace {
// BasicConstraintManager only tracks equality and inequality constraints of
@@ -49,31 +52,31 @@ class BasicConstraintManager
: public SimpleConstraintManager {
GRState::IntSetTy::Factory ISetFactory;
public:
- BasicConstraintManager(GRStateManager &statemgr, GRSubEngine &subengine)
+ BasicConstraintManager(GRStateManager &statemgr, SubEngine &subengine)
: SimpleConstraintManager(subengine),
ISetFactory(statemgr.getAllocator()) {}
- const GRState* AssumeSymNE(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymNE(const GRState* state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymEQ(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymEQ(const GRState* state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymLT(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymLT(const GRState* state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymGT(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymGT(const GRState* state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymGE(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymGE(const GRState* state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymLE(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymLE(const GRState* state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
@@ -87,7 +90,7 @@ public:
bool isEqual(const GRState* state, SymbolRef sym, const llvm::APSInt& V)
const;
- const GRState* RemoveDeadBindings(const GRState* state, SymbolReaper& SymReaper);
+ const GRState* removeDeadBindings(const GRState* state, SymbolReaper& SymReaper);
void print(const GRState* state, llvm::raw_ostream& Out,
const char* nl, const char *sep);
@@ -95,14 +98,14 @@ public:
} // end anonymous namespace
-ConstraintManager* clang::CreateBasicConstraintManager(GRStateManager& statemgr,
- GRSubEngine &subengine) {
+ConstraintManager* ento::CreateBasicConstraintManager(GRStateManager& statemgr,
+ SubEngine &subengine) {
return new BasicConstraintManager(statemgr, subengine);
}
const GRState*
-BasicConstraintManager::AssumeSymNE(const GRState *state, SymbolRef sym,
+BasicConstraintManager::assumeSymNE(const GRState *state, SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
// First, determine if sym == X, where X+Adjustment != V.
@@ -122,7 +125,7 @@ BasicConstraintManager::AssumeSymNE(const GRState *state, SymbolRef sym,
}
const GRState*
-BasicConstraintManager::AssumeSymEQ(const GRState *state, SymbolRef sym,
+BasicConstraintManager::assumeSymEQ(const GRState *state, SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
// First, determine if sym == X, where X+Adjustment != V.
@@ -143,7 +146,7 @@ BasicConstraintManager::AssumeSymEQ(const GRState *state, SymbolRef sym,
// The logic for these will be handled in another ConstraintManager.
const GRState*
-BasicConstraintManager::AssumeSymLT(const GRState *state, SymbolRef sym,
+BasicConstraintManager::assumeSymLT(const GRState *state, SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
// Is 'V' the smallest possible value?
@@ -153,11 +156,11 @@ BasicConstraintManager::AssumeSymLT(const GRState *state, SymbolRef sym,
}
// FIXME: For now have assuming x < y be the same as assuming sym != V;
- return AssumeSymNE(state, sym, V, Adjustment);
+ return assumeSymNE(state, sym, V, Adjustment);
}
const GRState*
-BasicConstraintManager::AssumeSymGT(const GRState *state, SymbolRef sym,
+BasicConstraintManager::assumeSymGT(const GRState *state, SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
// Is 'V' the largest possible value?
@@ -167,11 +170,11 @@ BasicConstraintManager::AssumeSymGT(const GRState *state, SymbolRef sym,
}
// FIXME: For now have assuming x > y be the same as assuming sym != V;
- return AssumeSymNE(state, sym, V, Adjustment);
+ return assumeSymNE(state, sym, V, Adjustment);
}
const GRState*
-BasicConstraintManager::AssumeSymGE(const GRState *state, SymbolRef sym,
+BasicConstraintManager::assumeSymGE(const GRState *state, SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
// Reject a path if the value of sym is a constant X and !(X+Adj >= V).
@@ -199,7 +202,7 @@ BasicConstraintManager::AssumeSymGE(const GRState *state, SymbolRef sym,
}
const GRState*
-BasicConstraintManager::AssumeSymLE(const GRState *state, SymbolRef sym,
+BasicConstraintManager::assumeSymLE(const GRState *state, SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
// Reject a path if the value of sym is a constant X and !(X+Adj <= V).
@@ -237,10 +240,10 @@ const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym
// First, retrieve the NE-set associated with the given symbol.
ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
- GRState::IntSetTy S = T ? *T : ISetFactory.GetEmptySet();
+ GRState::IntSetTy S = T ? *T : ISetFactory.getEmptySet();
// Now add V to the NE set.
- S = ISetFactory.Add(S, &state->getBasicVals().getValue(V));
+ S = ISetFactory.add(S, &state->getBasicVals().getValue(V));
// Create a new state with the old binding replaced.
return state->set<ConstNotEq>(sym, S);
@@ -273,7 +276,7 @@ bool BasicConstraintManager::isEqual(const GRState* state, SymbolRef sym,
/// Scan all symbols referenced by the constraints. If the symbol is not alive
/// as marked in LSymbols, mark it as dead in DSymbols.
const GRState*
-BasicConstraintManager::RemoveDeadBindings(const GRState* state,
+BasicConstraintManager::removeDeadBindings(const GRState* state,
SymbolReaper& SymReaper) {
ConstEqTy CE = state->get<ConstEq>();
@@ -281,7 +284,8 @@ BasicConstraintManager::RemoveDeadBindings(const GRState* state,
for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) {
SymbolRef sym = I.getKey();
- if (SymReaper.maybeDead(sym)) CE = CEFactory.Remove(CE, sym);
+ if (SymReaper.maybeDead(sym))
+ CE = CEFactory.remove(CE, sym);
}
state = state->set<ConstEq>(CE);
@@ -290,7 +294,8 @@ BasicConstraintManager::RemoveDeadBindings(const GRState* state,
for (ConstNotEqTy::iterator I = CNE.begin(), E = CNE.end(); I != E; ++I) {
SymbolRef sym = I.getKey();
- if (SymReaper.maybeDead(sym)) CNE = CNEFactory.Remove(CNE, sym);
+ if (SymReaper.maybeDead(sym))
+ CNE = CNEFactory.remove(CNE, sym);
}
return state->set<ConstNotEq>(CNE);
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicStore.cpp
index f82e1b2..98365e7 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicStore.cpp
@@ -11,13 +11,15 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/AnalysisContext.h"
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
+using namespace ento;
typedef llvm::ImmutableMap<const MemRegion*,SVal> BindingsTy;
@@ -46,47 +48,48 @@ public:
SVal Retrieve(Store store, Loc loc, QualType T = QualType());
- Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
- unsigned Count, InvalidatedSymbols *IS);
+ StoreRef invalidateRegion(Store store, const MemRegion *R, const Expr *E,
+ unsigned Count, InvalidatedSymbols *IS);
- Store InvalidateRegions(Store store, const MemRegion * const *Begin,
- const MemRegion * const *End, const Expr *E,
- unsigned Count, InvalidatedSymbols *IS,
- bool invalidateGlobals, InvalidatedRegions *Regions);
+ StoreRef invalidateRegions(Store store, const MemRegion * const *Begin,
+ const MemRegion * const *End, const Expr *E,
+ unsigned Count, InvalidatedSymbols *IS,
+ bool invalidateGlobals,
+ InvalidatedRegions *Regions);
- Store scanForIvars(Stmt *B, const Decl* SelfDecl,
- const MemRegion *SelfRegion, Store St);
+ StoreRef scanForIvars(Stmt *B, const Decl* SelfDecl,
+ const MemRegion *SelfRegion, Store St);
- Store Bind(Store St, Loc loc, SVal V);
- Store Remove(Store St, Loc loc);
- Store getInitialStore(const LocationContext *InitLoc);
+ StoreRef Bind(Store St, Loc loc, SVal V);
+ StoreRef Remove(Store St, Loc loc);
+ StoreRef getInitialStore(const LocationContext *InitLoc);
- Store BindCompoundLiteral(Store store, const CompoundLiteralExpr*,
+ StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr*,
const LocationContext*, SVal val) {
- return store;
+ return StoreRef(store, *this);
}
- /// ArrayToPointer - Used by GRExprEngine::VistCast to handle implicit
+ /// ArrayToPointer - Used by ExprEngine::VistCast to handle implicit
/// conversions between arrays and pointers.
SVal ArrayToPointer(Loc Array) { return Array; }
- /// RemoveDeadBindings - Scans a BasicStore of 'state' for dead values.
+ /// removeDeadBindings - Scans a BasicStore of 'state' for dead values.
/// It updatees the GRState object in place with the values removed.
- Store RemoveDeadBindings(Store store, const StackFrameContext *LCtx,
- SymbolReaper& SymReaper,
+ StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
void iterBindings(Store store, BindingsHandler& f);
- Store BindDecl(Store store, const VarRegion *VR, SVal InitVal) {
+ StoreRef BindDecl(Store store, const VarRegion *VR, SVal InitVal) {
return BindDeclInternal(store, VR, &InitVal);
}
- Store BindDeclWithNoInit(Store store, const VarRegion *VR) {
+ StoreRef BindDeclWithNoInit(Store store, const VarRegion *VR) {
return BindDeclInternal(store, VR, 0);
}
- Store BindDeclInternal(Store store, const VarRegion *VR, SVal *InitVal);
+ StoreRef BindDeclInternal(Store store, const VarRegion *VR, SVal *InitVal);
static inline BindingsTy GetBindings(Store store) {
return BindingsTy(static_cast<const BindingsTy::TreeTy*>(store));
@@ -102,7 +105,7 @@ private:
} // end anonymous namespace
-StoreManager* clang::CreateBasicStoreManager(GRStateManager& StMgr) {
+StoreManager* ento::CreateBasicStoreManager(GRStateManager& StMgr) {
return new BasicStoreManager(StMgr);
}
@@ -142,17 +145,17 @@ SVal BasicStoreManager::LazyRetrieve(Store store, const TypedRegion *R) {
// Globals and parameters start with symbolic values.
// Local variables initially are undefined.
- // Non-static globals may have had their values reset by InvalidateRegions.
+ // Non-static globals may have had their values reset by invalidateRegions.
const MemSpaceRegion *MS = VR->getMemorySpace();
if (isa<NonStaticGlobalSpaceRegion>(MS)) {
BindingsTy B = GetBindings(store);
// FIXME: Copy-and-pasted from RegionStore.cpp.
if (BindingsTy::data_type *Val = B.lookup(MS)) {
if (SymbolRef parentSym = Val->getAsSymbol())
- return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
if (Val->isZeroConstant())
- return ValMgr.makeZeroVal(T);
+ return svalBuilder.makeZeroVal(T);
if (Val->isUnknownOrUndef())
return *Val;
@@ -163,7 +166,7 @@ SVal BasicStoreManager::LazyRetrieve(Store store, const TypedRegion *R) {
if (VR->hasGlobalsOrParametersStorage() ||
isa<UnknownSpaceRegion>(VR->getMemorySpace()))
- return ValMgr.getRegionValueSymbolVal(R);
+ return svalBuilder.getRegionValueSymbolVal(R);
return UndefinedVal();
}
@@ -179,7 +182,8 @@ SVal BasicStoreManager::Retrieve(Store store, Loc loc, QualType T) {
case loc::MemRegionKind: {
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
- if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R) ||
+ isa<CXXThisRegion>(R)))
return UnknownVal();
BindingsTy B = GetBindings(store);
@@ -193,11 +197,11 @@ SVal BasicStoreManager::Retrieve(Store store, Loc loc, QualType T) {
return V.isUnknownOrUndef() ? V : CastRetrievedVal(V, TR, T);
}
+ case loc::ObjCPropRefKind:
case loc::ConcreteIntKind:
- // Some clients may call GetSVal with such an option simply because
- // they are doing a quick scan through their Locs (potentially to
- // invalidate their bindings). Just return Undefined.
- return UndefinedVal();
+ // Support direct accesses to memory. It's up to individual checkers
+ // to flag an error.
+ return UnknownVal();
default:
assert (false && "Invalid Loc.");
@@ -207,9 +211,9 @@ SVal BasicStoreManager::Retrieve(Store store, Loc loc, QualType T) {
return UnknownVal();
}
-Store BasicStoreManager::Bind(Store store, Loc loc, SVal V) {
+StoreRef BasicStoreManager::Bind(Store store, Loc loc, SVal V) {
if (isa<loc::ConcreteInt>(loc))
- return store;
+ return StoreRef(store, *this);
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
@@ -217,7 +221,7 @@ Store BasicStoreManager::Bind(Store store, Loc loc, SVal V) {
// that is used to derive other symbols.
if (isa<NonStaticGlobalSpaceRegion>(R)) {
BindingsTy B = GetBindings(store);
- return VBFactory.Add(B, R, V).getRoot();
+ return StoreRef(VBFactory.add(B, R, V).getRoot(), *this);
}
// Special case: handle store of pointer values (Loc) to pointers via
@@ -232,15 +236,15 @@ Store BasicStoreManager::Bind(Store store, Loc loc, SVal V) {
R = ER->getSuperRegion();
}
- if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
- return store;
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R) || isa<CXXThisRegion>(R)))
+ return StoreRef(store, *this);
const TypedRegion *TyR = cast<TypedRegion>(R);
// Do not bind to arrays. We need to explicitly check for this so that
// we do not encounter any weirdness of trying to load/store from arrays.
if (TyR->isBoundable() && TyR->getValueType()->isArrayType())
- return store;
+ return StoreRef(store, *this);
if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&V)) {
// Only convert 'V' to a location iff the underlying region type
@@ -249,35 +253,36 @@ Store BasicStoreManager::Bind(Store store, Loc loc, SVal V) {
// a pointer. We may wish to flag a type error here if the types
// are incompatible. This may also cause lots of breakage
// elsewhere. Food for thought.
- if (TyR->isBoundable() && Loc::IsLocType(TyR->getValueType()))
+ if (TyR->isBoundable() && Loc::isLocType(TyR->getValueType()))
V = X->getLoc();
}
BindingsTy B = GetBindings(store);
- return V.isUnknown()
- ? VBFactory.Remove(B, R).getRoot()
- : VBFactory.Add(B, R, V).getRoot();
+ return StoreRef(V.isUnknown()
+ ? VBFactory.remove(B, R).getRoot()
+ : VBFactory.add(B, R, V).getRoot(), *this);
}
-Store BasicStoreManager::Remove(Store store, Loc loc) {
+StoreRef BasicStoreManager::Remove(Store store, Loc loc) {
switch (loc.getSubKind()) {
case loc::MemRegionKind: {
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
- if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
- return store;
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R) ||
+ isa<CXXThisRegion>(R)))
+ return StoreRef(store, *this);
- return VBFactory.Remove(GetBindings(store), R).getRoot();
+ return StoreRef(VBFactory.remove(GetBindings(store), R).getRoot(), *this);
}
default:
assert ("Remove for given Loc type not yet implemented.");
- return store;
+ return StoreRef(store, *this);
}
}
-Store BasicStoreManager::RemoveDeadBindings(Store store,
- const StackFrameContext *LCtx,
- SymbolReaper& SymReaper,
+StoreRef BasicStoreManager::removeDeadBindings(Store store,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
{
BindingsTy B = GetBindings(store);
@@ -292,7 +297,8 @@ Store BasicStoreManager::RemoveDeadBindings(Store store,
continue;
}
else if (isa<ObjCIvarRegion>(I.getKey()) ||
- isa<NonStaticGlobalSpaceRegion>(I.getKey()))
+ isa<NonStaticGlobalSpaceRegion>(I.getKey()) ||
+ isa<CXXThisRegion>(I.getKey()))
RegionRoots.push_back(I.getKey());
else
continue;
@@ -316,7 +322,7 @@ Store BasicStoreManager::RemoveDeadBindings(Store store,
break;
}
else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR) ||
- isa<NonStaticGlobalSpaceRegion>(MR)) {
+ isa<NonStaticGlobalSpaceRegion>(MR) || isa<CXXThisRegion>(MR)) {
if (Marked.count(MR))
break;
@@ -342,11 +348,12 @@ Store BasicStoreManager::RemoveDeadBindings(Store store,
}
// Remove dead variable bindings.
+ StoreRef newStore(store, *this);
for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
const MemRegion* R = I.getKey();
if (!Marked.count(R)) {
- store = Remove(store, ValMgr.makeLoc(R));
+ newStore = Remove(newStore.getStore(), svalBuilder.makeLoc(R));
SVal X = I.getData();
for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
@@ -354,11 +361,15 @@ Store BasicStoreManager::RemoveDeadBindings(Store store,
}
}
- return store;
+ return newStore;
}
-Store BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl,
- const MemRegion *SelfRegion, Store St) {
+StoreRef BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl,
+ const MemRegion *SelfRegion,
+ Store St) {
+
+ StoreRef newStore(St, *this);
+
for (Stmt::child_iterator CI=B->child_begin(), CE=B->child_end();
CI != CE; ++CI) {
@@ -373,25 +384,25 @@ Store BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl,
if (DR->getDecl() == SelfDecl) {
const ObjCIvarRegion *IVR = MRMgr.getObjCIvarRegion(IV->getDecl(),
SelfRegion);
- SVal X = ValMgr.getRegionValueSymbolVal(IVR);
- St = Bind(St, ValMgr.makeLoc(IVR), X);
+ SVal X = svalBuilder.getRegionValueSymbolVal(IVR);
+ newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(IVR), X);
}
}
}
else
- St = scanForIvars(*CI, SelfDecl, SelfRegion, St);
+ newStore = scanForIvars(*CI, SelfDecl, SelfRegion, newStore.getStore());
}
- return St;
+ return newStore;
}
-Store BasicStoreManager::getInitialStore(const LocationContext *InitLoc) {
+StoreRef BasicStoreManager::getInitialStore(const LocationContext *InitLoc) {
// The LiveVariables information already has a compilation of all VarDecls
// used in the function. Iterate through this set, and "symbolicate"
// any VarDecl whose value originally comes from outside the function.
typedef LiveVariables::AnalysisDataTy LVDataTy;
LVDataTy& D = InitLoc->getLiveVariables()->getAnalysisData();
- Store St = VBFactory.GetEmptyMap().getRoot();
+ StoreRef St(VBFactory.getEmptyMap().getRoot(), *this);
for (LVDataTy::decl_iterator I=D.begin_decl(), E=D.end_decl(); I != E; ++I) {
const NamedDecl* ND = I->first;
@@ -405,30 +416,41 @@ Store BasicStoreManager::getInitialStore(const LocationContext *InitLoc) {
// SelfRegion? (i.e., it implements MD->getClassInterface()).
const VarRegion *VR = MRMgr.getVarRegion(PD, InitLoc);
const MemRegion *SelfRegion =
- ValMgr.getRegionValueSymbolVal(VR).getAsRegion();
+ svalBuilder.getRegionValueSymbolVal(VR).getAsRegion();
assert(SelfRegion);
- St = Bind(St, ValMgr.makeLoc(VR), loc::MemRegionVal(SelfRegion));
+ St = Bind(St.getStore(), svalBuilder.makeLoc(VR),
+ loc::MemRegionVal(SelfRegion));
// Scan the method for ivar references. While this requires an
// entire AST scan, the cost should not be high in practice.
- St = scanForIvars(MD->getBody(), PD, SelfRegion, St);
+ St = scanForIvars(MD->getBody(), PD, SelfRegion, St.getStore());
}
}
}
}
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(InitLoc->getDecl())) {
+ // For C++ methods add symbolic region for 'this' in initial stack frame.
+ QualType ThisT = MD->getThisType(StateMgr.getContext());
+ MemRegionManager &RegMgr = svalBuilder.getRegionManager();
+ const CXXThisRegion *ThisR = RegMgr.getCXXThisRegion(ThisT, InitLoc);
+ SVal ThisV = svalBuilder.getRegionValueSymbolVal(ThisR);
+ St = Bind(St.getStore(), svalBuilder.makeLoc(ThisR), ThisV);
+ }
+
return St;
}
-Store BasicStoreManager::BindDeclInternal(Store store, const VarRegion* VR,
- SVal* InitVal) {
+StoreRef BasicStoreManager::BindDeclInternal(Store store, const VarRegion* VR,
+ SVal* InitVal) {
BasicValueFactory& BasicVals = StateMgr.getBasicVals();
const VarDecl *VD = VR->getDecl();
+ StoreRef newStore(store, *this);
// BasicStore does not model arrays and structs.
if (VD->getType()->isArrayType() || VD->getType()->isStructureOrClassType())
- return store;
-
+ return newStore;
+
if (VD->hasGlobalStorage()) {
// Handle variables with global storage: extern, static, PrivateExtern.
@@ -445,32 +467,33 @@ Store BasicStoreManager::BindDeclInternal(Store store, const VarRegion* VR,
// C99: 6.7.8 Initialization
// If an object that has static storage duration is not initialized
// explicitly, then:
- // —if it has pointer type, it is initialized to a null pointer;
- // —if it has arithmetic type, it is initialized to (positive or
+ // -if it has pointer type, it is initialized to a null pointer;
+ // -if it has arithmetic type, it is initialized to (positive or
// unsigned) zero;
if (!InitVal) {
QualType T = VD->getType();
- if (Loc::IsLocType(T))
- store = Bind(store, loc::MemRegionVal(VR),
- loc::ConcreteInt(BasicVals.getValue(0, T)));
+ if (Loc::isLocType(T))
+ newStore = Bind(store, loc::MemRegionVal(VR),
+ loc::ConcreteInt(BasicVals.getValue(0, T)));
else if (T->isIntegerType() && T->isScalarType())
- store = Bind(store, loc::MemRegionVal(VR),
- nonloc::ConcreteInt(BasicVals.getValue(0, T)));
+ newStore = Bind(store, loc::MemRegionVal(VR),
+ nonloc::ConcreteInt(BasicVals.getValue(0, T)));
} else {
- store = Bind(store, loc::MemRegionVal(VR), *InitVal);
+ newStore = Bind(store, loc::MemRegionVal(VR), *InitVal);
}
}
} else {
// Process local scalar variables.
QualType T = VD->getType();
// BasicStore only supports scalars.
- if (T->isScalarType() && ValMgr.getSymbolManager().canSymbolicate(T)) {
+ if ((T->isScalarType() || T->isReferenceType()) &&
+ svalBuilder.getSymbolManager().canSymbolicate(T)) {
SVal V = InitVal ? *InitVal : UndefinedVal();
- store = Bind(store, loc::MemRegionVal(VR), V);
+ newStore = Bind(store, loc::MemRegionVal(VR), V);
}
}
- return store;
+ return newStore;
}
void BasicStoreManager::print(Store store, llvm::raw_ostream& Out,
@@ -508,19 +531,21 @@ StoreManager::BindingsHandler::~BindingsHandler() {}
//===----------------------------------------------------------------------===//
-Store BasicStoreManager::InvalidateRegions(Store store,
- const MemRegion * const *I,
- const MemRegion * const *End,
- const Expr *E, unsigned Count,
- InvalidatedSymbols *IS,
- bool invalidateGlobals,
- InvalidatedRegions *Regions) {
+StoreRef BasicStoreManager::invalidateRegions(Store store,
+ const MemRegion * const *I,
+ const MemRegion * const *End,
+ const Expr *E, unsigned Count,
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals,
+ InvalidatedRegions *Regions) {
+ StoreRef newStore(store, *this);
+
if (invalidateGlobals) {
BindingsTy B = GetBindings(store);
for (BindingsTy::iterator I=B.begin(), End=B.end(); I != End; ++I) {
const MemRegion *R = I.getKey();
if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
- store = InvalidateRegion(store, R, E, Count, IS);
+ newStore = invalidateRegion(newStore.getStore(), R, E, Count, IS);
}
}
@@ -531,7 +556,7 @@ Store BasicStoreManager::InvalidateRegions(Store store,
if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
continue;
}
- store = InvalidateRegion(store, *I, E, Count, IS);
+ newStore = invalidateRegion(newStore.getStore(), *I, E, Count, IS);
if (Regions)
Regions->push_back(R);
}
@@ -542,28 +567,28 @@ Store BasicStoreManager::InvalidateRegions(Store store,
// use to derive the bindings for all non-static globals.
const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion();
SVal V =
- ValMgr.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, E,
+ svalBuilder.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, E,
/* symbol type, doesn't matter */ Ctx.IntTy,
Count);
- store = Bind(store, loc::MemRegionVal(GS), V);
+ newStore = Bind(newStore.getStore(), loc::MemRegionVal(GS), V);
if (Regions)
Regions->push_back(GS);
}
- return store;
+ return newStore;
}
-Store BasicStoreManager::InvalidateRegion(Store store,
- const MemRegion *R,
- const Expr *E,
- unsigned Count,
- InvalidatedSymbols *IS) {
+StoreRef BasicStoreManager::invalidateRegion(Store store,
+ const MemRegion *R,
+ const Expr *E,
+ unsigned Count,
+ InvalidatedSymbols *IS) {
R = R->StripCasts();
if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
- return store;
+ return StoreRef(store, *this);
if (IS) {
BindingsTy B = GetBindings(store);
@@ -574,7 +599,6 @@ Store BasicStoreManager::InvalidateRegion(Store store,
}
QualType T = cast<TypedRegion>(R)->getValueType();
- SVal V = ValMgr.getConjuredSymbolVal(R, E, T, Count);
+ SVal V = svalBuilder.getConjuredSymbolVal(R, E, T, Count);
return Bind(store, loc::MemRegionVal(R), V);
}
-
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicValueFactory.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index 4c9b109..6315d83 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BasicValueFactory.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -8,14 +8,15 @@
//===----------------------------------------------------------------------===//
//
// This file defines BasicValueFactory, a class that manages the lifetime
-// of APSInt objects and symbolic constraints used by GRExprEngine
+// of APSInt objects and symbolic constraints used by ExprEngine
// and related classes.
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
using namespace clang;
+using namespace ento;
void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
llvm::ImmutableList<SVal> L) {
@@ -98,7 +99,7 @@ const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
unsigned bits = Ctx.getTypeSize(T);
- llvm::APSInt V(bits, T->isUnsignedIntegerType() || Loc::IsLocType(T));
+ llvm::APSInt V(bits, T->isUnsignedIntegerType() || Loc::isLocType(T));
V = X;
return getValue(V);
}
@@ -142,7 +143,7 @@ BasicValueFactory::getLazyCompoundValData(const void *store,
}
const llvm::APSInt*
-BasicValueFactory::EvaluateAPSInt(BinaryOperator::Opcode Op,
+BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
const llvm::APSInt& V1, const llvm::APSInt& V2) {
switch (Op) {
@@ -285,5 +286,3 @@ BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
const SVal* BasicValueFactory::getPersistentSVal(SVal X) {
return &getPersistentSValWithData(X, 0).first;
}
-
-
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRBlockCounter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp
index cd26060..ed52b6b 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRBlockCounter.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp
@@ -1,4 +1,4 @@
-//==- GRBlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
+//==- BlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
@@ -7,16 +7,17 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines GRBlockCounter, an abstract data type used to count
+// This file defines BlockCounter, an abstract data type used to count
// the number of times a given block has been visited along a path
-// analyzed by GRCoreEngine.
+// analyzed by CoreEngine.
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/GRBlockCounter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
+using namespace ento;
namespace {
@@ -55,31 +56,31 @@ static inline CountMap::Factory& GetFactory(void* F) {
return *static_cast<CountMap::Factory*>(F);
}
-unsigned GRBlockCounter::getNumVisited(const StackFrameContext *CallSite,
+unsigned BlockCounter::getNumVisited(const StackFrameContext *CallSite,
unsigned BlockID) const {
CountMap M = GetMap(Data);
CountMap::data_type* T = M.lookup(CountKey(CallSite, BlockID));
return T ? *T : 0;
}
-GRBlockCounter::Factory::Factory(llvm::BumpPtrAllocator& Alloc) {
+BlockCounter::Factory::Factory(llvm::BumpPtrAllocator& Alloc) {
F = new CountMap::Factory(Alloc);
}
-GRBlockCounter::Factory::~Factory() {
+BlockCounter::Factory::~Factory() {
delete static_cast<CountMap::Factory*>(F);
}
-GRBlockCounter
-GRBlockCounter::Factory::IncrementCount(GRBlockCounter BC,
+BlockCounter
+BlockCounter::Factory::IncrementCount(BlockCounter BC,
const StackFrameContext *CallSite,
unsigned BlockID) {
- return GRBlockCounter(GetFactory(F).Add(GetMap(BC.Data),
+ return BlockCounter(GetFactory(F).add(GetMap(BC.Data),
CountKey(CallSite, BlockID),
BC.getNumVisited(CallSite, BlockID)+1).getRoot());
}
-GRBlockCounter
-GRBlockCounter::Factory::GetEmptyCounter() {
- return GRBlockCounter(GetFactory(F).GetEmptyMap().getRoot());
+BlockCounter
+BlockCounter::Factory::GetEmptyCounter() {
+ return BlockCounter(GetFactory(F).getEmptyMap().getRoot());
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index bffbd52..9a84045 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -12,9 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/AST/ASTContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/AST/Expr.h"
@@ -22,7 +22,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Analysis/ProgramPoint.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
@@ -30,6 +30,7 @@
#include <queue>
using namespace clang;
+using namespace ento;
BugReporterVisitor::~BugReporterVisitor() {}
BugReporterContext::~BugReporterContext() {
@@ -51,7 +52,7 @@ void BugReporterContext::addVisitor(BugReporterVisitor* visitor) {
}
CallbacksSet.InsertNode(visitor, InsertPos);
- Callbacks = F.Add(visitor, Callbacks);
+ Callbacks = F.add(visitor, Callbacks);
}
//===----------------------------------------------------------------------===//
@@ -92,6 +93,7 @@ static const Stmt* GetNextStmt(const ExplodedNode* N) {
// not actual statement points.
switch (S->getStmtClass()) {
case Stmt::ChooseExprClass:
+ case Stmt::BinaryConditionalOperatorClass: continue;
case Stmt::ConditionalOperatorClass: continue;
case Stmt::BinaryOperatorClass: {
BinaryOperatorKind Op = cast<BinaryOperator>(S)->getOpcode();
@@ -168,9 +170,9 @@ public:
PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream& os,
const ExplodedNode* N);
- Decl const &getCodeDecl() { return R->getEndNode()->getCodeDecl(); }
+ Decl const &getCodeDecl() { return R->getErrorNode()->getCodeDecl(); }
- ParentMap& getParentMap() { return R->getEndNode()->getParentMap(); }
+ ParentMap& getParentMap() { return R->getErrorNode()->getParentMap(); }
const Stmt *getParent(const Stmt *S) {
return getParentMap().getParent(S);
@@ -278,10 +280,11 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
return PathDiagnosticLocation(Parent, SMgr);
else
return PathDiagnosticLocation(S, SMgr);
+ case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass:
// For '?', if we are referring to condition, just have the edge point
// to the entire '?' expression.
- if (cast<ConditionalOperator>(Parent)->getCond() == S)
+ if (cast<AbstractConditionalOperator>(Parent)->getCond() == S)
return PathDiagnosticLocation(Parent, SMgr);
else
return PathDiagnosticLocation(S, SMgr);
@@ -445,7 +448,7 @@ public:
// Create the diagnostic.
FullSourceLoc L(S->getLocStart(), BR.getSourceManager());
- if (Loc::IsLocType(VD->getType())) {
+ if (Loc::isLocType(VD->getType())) {
std::string msg = "'" + std::string(VD->getNameAsString()) +
"' now aliases '" + MostRecent->getNameAsString() + "'";
@@ -634,6 +637,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
}
// Determine control-flow for ternary '?'.
+ case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass: {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
@@ -809,7 +813,7 @@ static bool IsControlFlowExpr(const Stmt *S) {
E = E->IgnoreParenCasts();
- if (isa<ConditionalOperator>(E))
+ if (isa<AbstractConditionalOperator>(E))
return true;
if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E))
@@ -858,8 +862,9 @@ class EdgeBuilder {
S = cast<ParenExpr>(S)->IgnoreParens();
firstCharOnly = true;
continue;
+ case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass:
- S = cast<ConditionalOperator>(S)->getCond();
+ S = cast<AbstractConditionalOperator>(S)->getCond();
firstCharOnly = true;
continue;
case Stmt::ChooseExprClass:
@@ -1165,15 +1170,15 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
}
if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
- if (const Stmt* S = BE->getFirstStmt()) {
- if (IsControlFlowExpr(S)) {
- // Add the proper context for '&&', '||', and '?'.
- EB.addContext(S);
- }
- else
- EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ if (CFGStmt S = BE->getFirstElement().getAs<CFGStmt>()) {
+ if (IsControlFlowExpr(S)) {
+ // Add the proper context for '&&', '||', and '?'.
+ EB.addContext(S);
+ }
+ else
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
}
-
+
break;
}
} while (0);
@@ -1216,13 +1221,13 @@ BugReport::~BugReport() {}
RangedBugReport::~RangedBugReport() {}
const Stmt* BugReport::getStmt() const {
- ProgramPoint ProgP = EndNode->getLocation();
+ ProgramPoint ProgP = ErrorNode->getLocation();
const Stmt *S = NULL;
if (BlockEntrance* BE = dyn_cast<BlockEntrance>(&ProgP)) {
CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit();
if (BE->getBlock() == &Exit)
- S = GetPreviousStmt(EndNode);
+ S = GetPreviousStmt(ErrorNode);
}
if (!S)
S = GetStmt(ProgP);
@@ -1239,8 +1244,8 @@ BugReport::getEndPath(BugReporterContext& BRC,
if (!S)
return NULL;
- const SourceRange *Beg, *End;
- getRanges(Beg, End);
+ BugReport::ranges_iterator Beg, End;
+ llvm::tie(Beg, End) = getRanges();
PathDiagnosticLocation L(S, BRC.getSourceManager());
// Only add the statement itself as a range if we didn't specify any
@@ -1254,20 +1259,20 @@ BugReport::getEndPath(BugReporterContext& BRC,
return P;
}
-void BugReport::getRanges(const SourceRange*& beg, const SourceRange*& end) {
+std::pair<BugReport::ranges_iterator, BugReport::ranges_iterator>
+BugReport::getRanges() const {
if (const Expr* E = dyn_cast_or_null<Expr>(getStmt())) {
R = E->getSourceRange();
assert(R.isValid());
- beg = &R;
- end = beg+1;
+ return std::make_pair(&R, &R+1);
}
else
- beg = end = 0;
+ return std::make_pair(ranges_iterator(), ranges_iterator());
}
SourceLocation BugReport::getLocation() const {
- if (EndNode)
- if (const Stmt* S = GetCurrentOrPreviousStmt(EndNode)) {
+ if (ErrorNode)
+ if (const Stmt* S = GetCurrentOrPreviousStmt(ErrorNode)) {
// For member expressions, return the location of the '.' or '->'.
if (const MemberExpr *ME = dyn_cast<MemberExpr>(S))
return ME->getMemberLoc();
@@ -1333,7 +1338,7 @@ void BugReporter::FlushReports() {
}
// Remove all references to the BugType objects.
- BugTypes = F.GetEmptySet();
+ BugTypes = F.getEmptySet();
}
//===----------------------------------------------------------------------===//
@@ -1343,8 +1348,7 @@ void BugReporter::FlushReports() {
static std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
std::pair<ExplodedNode*, unsigned> >
MakeReportGraph(const ExplodedGraph* G,
- const ExplodedNode** NStart,
- const ExplodedNode** NEnd) {
+ llvm::SmallVectorImpl<const ExplodedNode*> &nodes) {
// Create the trimmed graph. It will contain the shortest paths from the
// error nodes to the root. In the new graph we should only have one
@@ -1354,7 +1358,8 @@ MakeReportGraph(const ExplodedGraph* G,
InterExplodedGraphMap* NMap;
llvm::DenseMap<const void*, const void*> InverseMap;
- llvm::tie(GTrim, NMap) = G->Trim(NStart, NEnd, &InverseMap);
+ llvm::tie(GTrim, NMap) = G->Trim(nodes.data(), nodes.data() + nodes.size(),
+ &InverseMap);
// Create owning pointers for GTrim and NMap just to ensure that they are
// released when this function exists.
@@ -1369,12 +1374,13 @@ MakeReportGraph(const ExplodedGraph* G,
typedef llvm::DenseMap<const ExplodedNode*, unsigned> IndexMapTy;
IndexMapTy IndexMap;
- for (const ExplodedNode** I = NStart; I != NEnd; ++I)
- if (const ExplodedNode *N = NMap->getMappedNode(*I)) {
- unsigned NodeIndex = (I - NStart) / sizeof(*I);
+ for (unsigned nodeIndex = 0 ; nodeIndex < nodes.size(); ++nodeIndex) {
+ const ExplodedNode *originalNode = nodes[nodeIndex];
+ if (const ExplodedNode *N = NMap->getMappedNode(originalNode)) {
WS.push(N);
- IndexMap[*I] = NodeIndex;
+ IndexMap[originalNode] = nodeIndex;
}
+ }
assert(!WS.empty() && "No error node found in the trimmed graph.");
@@ -1567,30 +1573,24 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
}
void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
- BugReportEquivClass& EQ) {
+ llvm::SmallVectorImpl<BugReport *> &bugReports) {
- std::vector<const ExplodedNode*> Nodes;
-
- for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
- const ExplodedNode* N = I->getEndNode();
- if (N) Nodes.push_back(N);
+ assert(!bugReports.empty());
+ llvm::SmallVector<const ExplodedNode *, 10> errorNodes;
+ for (llvm::SmallVectorImpl<BugReport*>::iterator I = bugReports.begin(),
+ E = bugReports.end(); I != E; ++I) {
+ errorNodes.push_back((*I)->getErrorNode());
}
- if (Nodes.empty())
- return;
-
// Construct a new graph that contains only a single path from the error
// node to a root.
const std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
std::pair<ExplodedNode*, unsigned> >&
- GPair = MakeReportGraph(&getGraph(), &Nodes[0], &Nodes[0] + Nodes.size());
+ GPair = MakeReportGraph(&getGraph(), errorNodes);
// Find the BugReport with the original location.
- BugReport *R = 0;
- unsigned i = 0;
- for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I, ++i)
- if (i == GPair.second.second) { R = *I; break; }
-
+ assert(GPair.second.second < bugReports.size());
+ BugReport *R = bugReports[GPair.second.second];
assert(R && "No original report found for sliced graph.");
llvm::OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first);
@@ -1620,7 +1620,7 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
}
void BugReporter::Register(BugType *BT) {
- BugTypes = F.Add(BugTypes, BT);
+ BugTypes = F.add(BugTypes, BT);
}
void BugReporter::EmitReport(BugReport* R) {
@@ -1657,37 +1657,56 @@ struct FRIEC_WLItem {
};
}
-static BugReport *FindReportInEquivalenceClass(BugReportEquivClass& EQ) {
+static BugReport *
+FindReportInEquivalenceClass(BugReportEquivClass& EQ,
+ llvm::SmallVectorImpl<BugReport*> &bugReports) {
+
BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end();
assert(I != E);
BugReport *R = *I;
BugType& BT = R->getBugType();
-
- if (!BT.isSuppressOnSink())
+
+ // If we don't need to suppress any of the nodes because they are
+ // post-dominated by a sink, simply add all the nodes in the equivalence class
+ // to 'Nodes'. Any of the reports will serve as a "representative" report.
+ if (!BT.isSuppressOnSink()) {
+ for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
+ const ExplodedNode* N = I->getErrorNode();
+ if (N) {
+ R = *I;
+ bugReports.push_back(R);
+ }
+ }
return R;
-
+ }
+
// For bug reports that should be suppressed when all paths are post-dominated
// by a sink node, iterate through the reports in the equivalence class
// until we find one that isn't post-dominated (if one exists). We use a
// DFS traversal of the ExplodedGraph to find a non-sink node. We could write
// this as a recursive function, but we don't want to risk blowing out the
// stack for very long paths.
+ BugReport *exampleReport = 0;
+
for (; I != E; ++I) {
R = *I;
- const ExplodedNode *N = R->getEndNode();
+ const ExplodedNode *errorNode = R->getErrorNode();
- if (!N)
+ if (!errorNode)
continue;
-
- if (N->isSink()) {
+ if (errorNode->isSink()) {
assert(false &&
"BugType::isSuppressSink() should not be 'true' for sink end nodes");
- return R;
+ return 0;
}
-
- if (N->succ_empty())
- return R;
-
+ // No successors? By definition this nodes isn't post-dominated by a sink.
+ if (errorNode->succ_empty()) {
+ bugReports.push_back(R);
+ if (!exampleReport)
+ exampleReport = R;
+ continue;
+ }
+
// At this point we know that 'N' is not a sink and it has at least one
// successor. Use a DFS worklist to find a non-sink end-of-path node.
typedef FRIEC_WLItem WLItem;
@@ -1695,8 +1714,8 @@ static BugReport *FindReportInEquivalenceClass(BugReportEquivClass& EQ) {
llvm::DenseMap<const ExplodedNode *, unsigned> Visited;
DFSWorkList WL;
- WL.push_back(N);
- Visited[N] = 1;
+ WL.push_back(errorNode);
+ Visited[errorNode] = 1;
while (!WL.empty()) {
WLItem &WI = WL.back();
@@ -1706,15 +1725,17 @@ static BugReport *FindReportInEquivalenceClass(BugReportEquivClass& EQ) {
const ExplodedNode *Succ = *WI.I;
// End-of-path node?
if (Succ->succ_empty()) {
- // If we found an end-of-path node that is not a sink, then return
- // this report.
- if (!Succ->isSink())
- return R;
-
+ // If we found an end-of-path node that is not a sink.
+ if (!Succ->isSink()) {
+ bugReports.push_back(R);
+ if (!exampleReport)
+ exampleReport = R;
+ WL.clear();
+ break;
+ }
// Found a sink? Continue on to the next successor.
continue;
}
-
// Mark the successor as visited. If it hasn't been explored,
// enqueue it to the DFS worklist.
unsigned &mark = Visited[Succ];
@@ -1724,17 +1745,18 @@ static BugReport *FindReportInEquivalenceClass(BugReportEquivClass& EQ) {
break;
}
}
-
- if (&WL.back() == &WI)
+
+ // The worklist may have been cleared at this point. First
+ // check if it is empty before checking the last item.
+ if (!WL.empty() && &WL.back() == &WI)
WL.pop_back();
}
}
-
- // If we reach here, the end nodes for all reports in the equivalence
- // class are post-dominated by a sink node.
- return NULL;
-}
+ // ExampleReport will be NULL if all the nodes in the equivalence class
+ // were post-dominated by sinks.
+ return exampleReport;
+}
//===----------------------------------------------------------------------===//
// DiagnosticCache. This is a hack to cache analyzer diagnostics. It
@@ -1780,42 +1802,45 @@ static bool IsCachedDiagnostic(BugReport *R, PathDiagnostic *PD) {
}
void BugReporter::FlushReport(BugReportEquivClass& EQ) {
- BugReport *R = FindReportInEquivalenceClass(EQ);
-
- if (!R)
+ llvm::SmallVector<BugReport*, 10> bugReports;
+ BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports);
+ if (!exampleReport)
return;
PathDiagnosticClient* PD = getPathDiagnosticClient();
// FIXME: Make sure we use the 'R' for the path that was actually used.
// Probably doesn't make a difference in practice.
- BugType& BT = R->getBugType();
+ BugType& BT = exampleReport->getBugType();
llvm::OwningPtr<PathDiagnostic>
- D(new PathDiagnostic(R->getBugType().getName(),
+ D(new PathDiagnostic(exampleReport->getBugType().getName(),
!PD || PD->useVerboseDescription()
- ? R->getDescription() : R->getShortDescription(),
+ ? exampleReport->getDescription()
+ : exampleReport->getShortDescription(),
BT.getCategory()));
- GeneratePathDiagnostic(*D.get(), EQ);
+ if (!bugReports.empty())
+ GeneratePathDiagnostic(*D.get(), bugReports);
- if (IsCachedDiagnostic(R, D.get()))
+ if (IsCachedDiagnostic(exampleReport, D.get()))
return;
// Get the meta data.
- std::pair<const char**, const char**> Meta = R->getExtraDescriptiveText();
+ std::pair<const char**, const char**> Meta =
+ exampleReport->getExtraDescriptiveText();
for (const char** s = Meta.first; s != Meta.second; ++s)
D->addMeta(*s);
// Emit a summary diagnostic to the regular Diagnostics engine.
- const SourceRange *Beg = 0, *End = 0;
- R->getRanges(Beg, End);
- Diagnostic& Diag = getDiagnostic();
- FullSourceLoc L(R->getLocation(), getSourceManager());
+ BugReport::ranges_iterator Beg, End;
+ llvm::tie(Beg, End) = exampleReport->getRanges();
+ Diagnostic &Diag = getDiagnostic();
+ FullSourceLoc L(exampleReport->getLocation(), getSourceManager());
// Search the description for '%', as that will be interpretted as a
// format character by FormatDiagnostics.
- llvm::StringRef desc = R->getShortDescription();
+ llvm::StringRef desc = exampleReport->getShortDescription();
unsigned ErrorDiag;
{
llvm::SmallString<512> TmpStr;
@@ -1830,12 +1855,10 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
ErrorDiag = Diag.getCustomDiagID(Diagnostic::Warning, TmpStr);
}
- switch (End-Beg) {
- default: assert(0 && "Don't handle this many ranges yet!");
- case 0: Diag.Report(L, ErrorDiag); break;
- case 1: Diag.Report(L, ErrorDiag) << Beg[0]; break;
- case 2: Diag.Report(L, ErrorDiag) << Beg[0] << Beg[1]; break;
- case 3: Diag.Report(L, ErrorDiag) << Beg[0] << Beg[1] << Beg[2]; break;
+ {
+ DiagnosticBuilder diagBuilder = Diag.Report(L, ErrorDiag);
+ for (BugReport::ranges_iterator I = Beg; I != End; ++I)
+ diagBuilder << *I;
}
// Emit a full diagnostic for the path if we have a PathDiagnosticClient.
@@ -1844,7 +1867,7 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
if (D->empty()) {
PathDiagnosticPiece* piece =
- new PathDiagnosticEventPiece(L, R->getDescription());
+ new PathDiagnosticEventPiece(L, exampleReport->getDescription());
for ( ; Beg != End; ++Beg) piece->addRange(*Beg);
D->push_back(piece);
diff --git a/contrib/llvm/tools/clang/lib/Checker/BugReporterVisitors.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 91cf349..8e31ade 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BugReporterVisitors.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -14,18 +14,19 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
-#include "clang/Checker/PathSensitive/ExplodedGraph.h"
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
using namespace clang;
+using namespace ento;
//===----------------------------------------------------------------------===//
// Utility functions.
//===----------------------------------------------------------------------===//
-const Stmt *clang::bugreporter::GetDerefExpr(const ExplodedNode *N) {
+const Stmt *bugreporter::GetDerefExpr(const ExplodedNode *N) {
// Pattern match for a few useful cases (do something smarter later):
// a[0], p->f, *p
const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
@@ -46,16 +47,14 @@ const Stmt *clang::bugreporter::GetDerefExpr(const ExplodedNode *N) {
return NULL;
}
-const Stmt*
-clang::bugreporter::GetDenomExpr(const ExplodedNode *N) {
+const Stmt *bugreporter::GetDenomExpr(const ExplodedNode *N) {
const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S))
return BE->getRHS();
return NULL;
}
-const Stmt*
-clang::bugreporter::GetCalleeExpr(const ExplodedNode *N) {
+const Stmt *bugreporter::GetCalleeExpr(const ExplodedNode *N) {
// Callee is checked as a PreVisit to the CallExpr.
const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
if (const CallExpr *CE = dyn_cast<CallExpr>(S))
@@ -63,8 +62,7 @@ clang::bugreporter::GetCalleeExpr(const ExplodedNode *N) {
return NULL;
}
-const Stmt*
-clang::bugreporter::GetRetValExpr(const ExplodedNode *N) {
+const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
return RS->getRetValue();
@@ -252,14 +250,14 @@ public:
// Check if in the previous state it was feasible for this constraint
// to *not* be true.
- if (PrevN->getState()->Assume(Constraint, !Assumption)) {
+ if (PrevN->getState()->assume(Constraint, !Assumption)) {
isSatisfied = true;
// As a sanity check, make sure that the negation of the constraint
// was infeasible in the current state. If it is feasible, we somehow
// missed the transition point.
- if (N->getState()->Assume(Constraint, !Assumption))
+ if (N->getState()->assume(Constraint, !Assumption))
return NULL;
// We found the transition point for the constraint. We now need to
@@ -306,9 +304,9 @@ static void registerTrackConstraint(BugReporterContext& BRC,
BRC.addVisitor(new TrackConstraintBRVisitor(Constraint, Assumption));
}
-void clang::bugreporter::registerTrackNullOrUndefValue(BugReporterContext& BRC,
- const void *data,
- const ExplodedNode* N) {
+void bugreporter::registerTrackNullOrUndefValue(BugReporterContext& BRC,
+ const void *data,
+ const ExplodedNode* N) {
const Stmt *S = static_cast<const Stmt*>(data);
@@ -318,13 +316,14 @@ void clang::bugreporter::registerTrackNullOrUndefValue(BugReporterContext& BRC,
GRStateManager &StateMgr = BRC.getStateManager();
const GRState *state = N->getState();
+ // Walk through lvalue-to-rvalue conversions.
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
const VarRegion *R =
- StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
+ StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
// What did we load?
- SVal V = state->getSVal(S);
+ SVal V = state->getSVal(loc::MemRegionVal(R));
if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)
|| V.isUndef()) {
@@ -353,9 +352,9 @@ void clang::bugreporter::registerTrackNullOrUndefValue(BugReporterContext& BRC,
}
}
-void clang::bugreporter::registerFindLastStore(BugReporterContext& BRC,
- const void *data,
- const ExplodedNode* N) {
+void bugreporter::registerFindLastStore(BugReporterContext& BRC,
+ const void *data,
+ const ExplodedNode* N) {
const MemRegion *R = static_cast<const MemRegion*>(data);
@@ -400,7 +399,7 @@ public:
const DefinedOrUnknownSVal *DV = dyn_cast<DefinedOrUnknownSVal>(&V);
if (!DV)
return 0;
- state = state->Assume(*DV, true);
+ state = state->assume(*DV, true);
if (state)
return 0;
@@ -416,12 +415,12 @@ public:
};
} // end anonymous namespace
-void clang::bugreporter::registerNilReceiverVisitor(BugReporterContext &BRC) {
+void bugreporter::registerNilReceiverVisitor(BugReporterContext &BRC) {
BRC.addVisitor(new NilReceiverVisitor());
}
// Registers every VarDecl inside a Stmt with a last store vistor.
-void clang::bugreporter::registerVarDeclsLastStore(BugReporterContext &BRC,
+void bugreporter::registerVarDeclsLastStore(BugReporterContext &BRC,
const void *stmt,
const ExplodedNode *N) {
const Stmt *S = static_cast<const Stmt *>(stmt);
diff --git a/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CFRefCount.cpp
index 6fa48b2..b3721d7 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CFRefCount.cpp
@@ -16,15 +16,15 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
-#include "clang/Checker/Checkers/LocalCheckers.h"
-#include "clang/Checker/DomainSpecific/CocoaConventions.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/PathSensitive/GRExprEngineBuilders.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
-#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
-#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
@@ -34,19 +34,21 @@
#include <stdarg.h>
using namespace clang;
+using namespace ento;
using llvm::StringRef;
using llvm::StrInStrNoCase;
namespace {
class InstanceReceiver {
- const ObjCMessageExpr *ME;
+ ObjCMessage Msg;
const LocationContext *LC;
public:
- InstanceReceiver(const ObjCMessageExpr *me = 0,
- const LocationContext *lc = 0) : ME(me), LC(lc) {}
+ InstanceReceiver() : LC(0) { }
+ InstanceReceiver(const ObjCMessage &msg,
+ const LocationContext *lc = 0) : Msg(msg), LC(lc) {}
bool isValid() const {
- return ME && ME->isInstanceMessage();
+ return Msg.isValid() && Msg.isInstanceMessage();
}
operator bool() const {
return isValid();
@@ -56,7 +58,7 @@ public:
assert(isValid());
// We have an expression for the receiver? Fetch the value
// of that expression.
- if (const Expr *Ex = ME->getInstanceReceiver())
+ if (const Expr *Ex = Msg.getInstanceReceiver())
return state->getSValAsScalarOrLoc(Ex);
// Otherwise we are sending a message to super. In this case the
@@ -69,11 +71,11 @@ public:
SourceRange getSourceRange() const {
assert(isValid());
- if (const Expr *Ex = ME->getInstanceReceiver())
+ if (const Expr *Ex = Msg.getInstanceReceiver())
return Ex->getSourceRange();
// Otherwise we are sending a message to super.
- SourceLocation L = ME->getSuperLoc();
+ SourceLocation L = Msg.getSuperLoc();
assert(L.isValid());
return SourceRange(L, L);
}
@@ -90,17 +92,17 @@ ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD) {
}
namespace {
-class GenericNodeBuilder {
- GRStmtNodeBuilder *SNB;
+class GenericNodeBuilderRefCount {
+ StmtNodeBuilder *SNB;
const Stmt *S;
const void *tag;
- GREndPathNodeBuilder *ENB;
+ EndOfFunctionNodeBuilder *ENB;
public:
- GenericNodeBuilder(GRStmtNodeBuilder &snb, const Stmt *s,
+ GenericNodeBuilderRefCount(StmtNodeBuilder &snb, const Stmt *s,
const void *t)
: SNB(&snb), S(s), tag(t), ENB(0) {}
- GenericNodeBuilder(GREndPathNodeBuilder &enb)
+ GenericNodeBuilderRefCount(EndOfFunctionNodeBuilder &enb)
: SNB(0), S(0), tag(0), ENB(&enb) {}
ExplodedNode *MakeNode(const GRState *state, ExplodedNode *Pred) {
@@ -396,6 +398,7 @@ void RefVal::print(llvm::raw_ostream& Out) const {
typedef llvm::ImmutableMap<SymbolRef, RefVal> RefBindings;
namespace clang {
+namespace ento {
template<>
struct GRStateTrait<RefBindings> : public GRStatePartialTrait<RefBindings> {
static void* GDMIndex() {
@@ -404,6 +407,7 @@ namespace clang {
}
};
}
+}
//===----------------------------------------------------------------------===//
// Summaries
@@ -447,6 +451,10 @@ public:
return DefaultArgEffect;
}
+
+ void addArg(ArgEffects::Factory &af, unsigned idx, ArgEffect e) {
+ Args = af.add(Args, idx, e);
+ }
/// setDefaultArgEffect - Set the default argument effect.
void setDefaultArgEffect(ArgEffect E) {
@@ -463,6 +471,10 @@ public:
/// terminate the path.
bool isEndPath() const { return EndPath; }
+
+ /// Sets the effect on the receiver of the message.
+ void setReceiverEffect(ArgEffect e) { Receiver = e; }
+
/// getReceiverEffect - Returns the effect on the receiver of the call.
/// This is only meaningful if the summary applies to an ObjCMessageExpr*.
ArgEffect getReceiverEffect() const { return Receiver; }
@@ -762,7 +774,7 @@ private:
}
void addPanicSummary(const char* Cls, ...) {
- RetainSummary* Summ = getPersistentSummary(AF.GetEmptyMap(),
+ RetainSummary* Summ = getPersistentSummary(AF.getEmptyMap(),
RetEffect::MakeNoRet(),
DoNothing, DoNothing, true);
va_list argp;
@@ -776,12 +788,12 @@ public:
RetainSummaryManager(ASTContext& ctx, bool gcenabled)
: Ctx(ctx),
CFDictionaryCreateII(&ctx.Idents.get("CFDictionaryCreate")),
- GCEnabled(gcenabled), AF(BPAlloc), ScratchArgs(AF.GetEmptyMap()),
+ GCEnabled(gcenabled), AF(BPAlloc), ScratchArgs(AF.getEmptyMap()),
ObjCAllocRetE(gcenabled ? RetEffect::MakeGCNotOwned()
: RetEffect::MakeOwned(RetEffect::ObjC, true)),
ObjCInitRetE(gcenabled ? RetEffect::MakeGCNotOwned()
: RetEffect::MakeOwnedWhenTrackedReceiver()),
- DefaultSummary(AF.GetEmptyMap() /* per-argument effects (none) */,
+ DefaultSummary(AF.getEmptyMap() /* per-argument effects (none) */,
RetEffect::MakeNoRet() /* return effect */,
MayEscape, /* default argument effect */
DoNothing /* receiver effect */),
@@ -795,14 +807,14 @@ public:
RetainSummary* getSummary(const FunctionDecl* FD);
- RetainSummary *getInstanceMethodSummary(const ObjCMessageExpr *ME,
+ RetainSummary *getInstanceMethodSummary(const ObjCMessage &msg,
const GRState *state,
const LocationContext *LC);
- RetainSummary* getInstanceMethodSummary(const ObjCMessageExpr* ME,
+ RetainSummary* getInstanceMethodSummary(const ObjCMessage &msg,
const ObjCInterfaceDecl* ID) {
- return getInstanceMethodSummary(ME->getSelector(), 0,
- ID, ME->getMethodDecl(), ME->getType());
+ return getInstanceMethodSummary(msg.getSelector(), 0,
+ ID, msg.getMethodDecl(), msg.getType(Ctx));
}
RetainSummary* getInstanceMethodSummary(Selector S, IdentifierInfo *ClsName,
@@ -815,23 +827,15 @@ public:
const ObjCMethodDecl *MD,
QualType RetTy);
- RetainSummary *getClassMethodSummary(const ObjCMessageExpr *ME) {
- ObjCInterfaceDecl *Class = 0;
- switch (ME->getReceiverKind()) {
- case ObjCMessageExpr::Class:
- case ObjCMessageExpr::SuperClass:
- Class = ME->getReceiverInterface();
- break;
-
- case ObjCMessageExpr::Instance:
- case ObjCMessageExpr::SuperInstance:
- break;
- }
+ RetainSummary *getClassMethodSummary(const ObjCMessage &msg) {
+ const ObjCInterfaceDecl *Class = 0;
+ if (!msg.isInstanceMessage())
+ Class = msg.getReceiverInterface();
- return getClassMethodSummary(ME->getSelector(),
+ return getClassMethodSummary(msg.getSelector(),
Class? Class->getIdentifier() : 0,
Class,
- ME->getMethodDecl(), ME->getType());
+ msg.getMethodDecl(), msg.getType(Ctx));
}
/// getMethodSummary - This version of getMethodSummary is used to query
@@ -881,7 +885,7 @@ RetainSummaryManager::~RetainSummaryManager() {}
ArgEffects RetainSummaryManager::getArgEffects() {
ArgEffects AE = ScratchArgs;
- ScratchArgs = AF.GetEmptyMap();
+ ScratchArgs = AF.getEmptyMap();
return AE;
}
@@ -967,13 +971,13 @@ RetainSummary* RetainSummaryManager::getSummary(const FunctionDecl* FD) {
// FIXES: <rdar://problem/6326900>
// This should be addressed using a API table. This strcmp is also
// a little gross, but there is no need to super optimize here.
- ScratchArgs = AF.Add(ScratchArgs, 1, DecRef);
+ ScratchArgs = AF.add(ScratchArgs, 1, DecRef);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName == "IOServiceAddNotification" ||
FName == "IOServiceAddMatchingNotification") {
// Part of <rdar://problem/6961230>. (IOKit)
// This should be addressed using a API table.
- ScratchArgs = AF.Add(ScratchArgs, 2, DecRef);
+ ScratchArgs = AF.add(ScratchArgs, 2, DecRef);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName == "CVPixelBufferCreateWithBytes") {
// FIXES: <rdar://problem/7283567>
@@ -982,14 +986,14 @@ RetainSummary* RetainSummaryManager::getSummary(const FunctionDecl* FD) {
// a callback and doing full IPA to make sure this is done correctly.
// FIXME: This function has an out parameter that returns an
// allocated object.
- ScratchArgs = AF.Add(ScratchArgs, 7, StopTracking);
+ ScratchArgs = AF.add(ScratchArgs, 7, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName == "CGBitmapContextCreateWithData") {
// FIXES: <rdar://problem/7358899>
// Eventually this can be improved by recognizing that 'releaseInfo'
// passed to CGBitmapContextCreateWithData is released via
// a callback and doing full IPA to make sure this is done correctly.
- ScratchArgs = AF.Add(ScratchArgs, 8, StopTracking);
+ ScratchArgs = AF.add(ScratchArgs, 8, StopTracking);
S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
DoNothing, DoNothing);
} else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
@@ -998,7 +1002,7 @@ RetainSummary* RetainSummaryManager::getSummary(const FunctionDecl* FD) {
// buffer passed to CVPixelBufferCreateWithPlanarBytes is released
// via a callback and doing full IPA to make sure this is done
// correctly.
- ScratchArgs = AF.Add(ScratchArgs, 12, StopTracking);
+ ScratchArgs = AF.add(ScratchArgs, 12, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
@@ -1129,19 +1133,19 @@ RetainSummaryManager::getUnarySummary(const FunctionType* FT,
switch (func) {
case cfretain: {
- ScratchArgs = AF.Add(ScratchArgs, 0, IncRef);
+ ScratchArgs = AF.add(ScratchArgs, 0, IncRef);
return getPersistentSummary(RetEffect::MakeAlias(0),
DoNothing, DoNothing);
}
case cfrelease: {
- ScratchArgs = AF.Add(ScratchArgs, 0, DecRef);
+ ScratchArgs = AF.add(ScratchArgs, 0, DecRef);
return getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, DoNothing);
}
case cfmakecollectable: {
- ScratchArgs = AF.Add(ScratchArgs, 0, MakeCollectable);
+ ScratchArgs = AF.add(ScratchArgs, 0, MakeCollectable);
return getPersistentSummary(RetEffect::MakeAlias(0),DoNothing, DoNothing);
}
@@ -1156,8 +1160,8 @@ RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl* FD) {
assert (ScratchArgs.isEmpty());
if (FD->getIdentifier() == CFDictionaryCreateII) {
- ScratchArgs = AF.Add(ScratchArgs, 1, DoNothingByRef);
- ScratchArgs = AF.Add(ScratchArgs, 2, DoNothingByRef);
+ ScratchArgs = AF.add(ScratchArgs, 1, DoNothingByRef);
+ ScratchArgs = AF.add(ScratchArgs, 2, DoNothingByRef);
}
return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
@@ -1191,6 +1195,20 @@ RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
if (!FD)
return;
+ // Effects on the parameters.
+ unsigned parm_idx = 0;
+ for (FunctionDecl::param_const_iterator pi = FD->param_begin(),
+ pe = FD->param_end(); pi != pe; ++pi) {
+ const ParmVarDecl *pd = *pi;
+ if (pd->getAttr<NSConsumedAttr>()) {
+ if (!GCEnabled)
+ Summ.addArg(AF, parm_idx, DecRef);
+ }
+ else if(pd->getAttr<CFConsumedAttr>()) {
+ Summ.addArg(AF, parm_idx, DecRef);
+ }
+ }
+
QualType RetTy = FD->getResultType();
// Determine if there is a special return effect for this method.
@@ -1223,6 +1241,26 @@ RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
bool isTrackedLoc = false;
+ // Effects on the receiver.
+ if (MD->getAttr<NSConsumesSelfAttr>()) {
+ if (!GCEnabled)
+ Summ.setReceiverEffect(DecRefMsg);
+ }
+
+ // Effects on the parameters.
+ unsigned parm_idx = 0;
+ for (ObjCMethodDecl::param_iterator pi=MD->param_begin(), pe=MD->param_end();
+ pi != pe; ++pi, ++parm_idx) {
+ const ParmVarDecl *pd = *pi;
+ if (pd->getAttr<NSConsumedAttr>()) {
+ if (!GCEnabled)
+ Summ.addArg(AF, parm_idx, DecRef);
+ }
+ else if(pd->getAttr<CFConsumedAttr>()) {
+ Summ.addArg(AF, parm_idx, DecRef);
+ }
+ }
+
// Determine if there is a special return effect for this method.
if (cocoa::isCocoaObjectRef(MD->getResultType())) {
if (MD->getAttr<NSReturnsRetainedAttr>()) {
@@ -1263,7 +1301,7 @@ RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl* MD,
if (ParmVarDecl *PD = *I) {
QualType Ty = Ctx.getCanonicalType(PD->getType());
if (Ty.getLocalUnqualifiedType() == Ctx.VoidPtrTy)
- ScratchArgs = AF.Add(ScratchArgs, i, StopTracking);
+ ScratchArgs = AF.add(ScratchArgs, i, StopTracking);
}
}
@@ -1283,7 +1321,7 @@ RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl* MD,
// Look for methods that return an owned object.
if (cocoa::isCocoaObjectRef(RetTy)) {
- // EXPERIMENTAL: Assume the Cocoa conventions for all objects returned
+ // EXPERIMENTAL: assume the Cocoa conventions for all objects returned
// by instance methods.
RetEffect E = cocoa::followsFundamentalRule(S)
? ObjCAllocRetE : RetEffect::MakeNotOwned(RetEffect::ObjC);
@@ -1307,13 +1345,13 @@ RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl* MD,
}
RetainSummary*
-RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
+RetainSummaryManager::getInstanceMethodSummary(const ObjCMessage &msg,
const GRState *state,
const LocationContext *LC) {
// We need the type-information of the tracked receiver object
// Retrieve it from the state.
- const Expr *Receiver = ME->getInstanceReceiver();
+ const Expr *Receiver = msg.getInstanceReceiver();
const ObjCInterfaceDecl* ID = 0;
// FIXME: Is this really working as expected? There are cases where
@@ -1341,30 +1379,12 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
}
} else {
// FIXME: Hack for 'super'.
- ID = ME->getReceiverInterface();
+ ID = msg.getReceiverInterface();
}
// FIXME: The receiver could be a reference to a class, meaning that
// we should use the class method.
- RetainSummary *Summ = getInstanceMethodSummary(ME, ID);
-
- // Special-case: are we sending a mesage to "self"?
- // This is a hack. When we have full-IP this should be removed.
- if (isa<ObjCMethodDecl>(LC->getDecl()) && Receiver) {
- if (const loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&receiverV)) {
- // Get the region associated with 'self'.
- if (const ImplicitParamDecl *SelfDecl = LC->getSelfDecl()) {
- SVal SelfVal = state->getSVal(state->getRegion(SelfDecl, LC));
- if (L->StripCasts() == SelfVal.getAsRegion()) {
- // Update the summary to make the default argument effect
- // 'StopTracking'.
- Summ = copySummary(Summ);
- Summ->setDefaultArgEffect(StopTracking);
- }
- }
- }
- }
-
+ RetainSummary *Summ = getInstanceMethodSummary(msg, ID);
return Summ ? Summ : getDefaultSummary();
}
@@ -1421,27 +1441,16 @@ void RetainSummaryManager::InitializeClassMethodSummaries() {
assert(ScratchArgs.isEmpty());
RetainSummary* Summ = getPersistentSummary(ObjCAllocRetE);
- // Create the summaries for "alloc", "new", and "allocWithZone:" for
- // NSObject and its derivatives.
- addNSObjectClsMethSummary(GetNullarySelector("alloc", Ctx), Summ);
- addNSObjectClsMethSummary(GetNullarySelector("new", Ctx), Summ);
- addNSObjectClsMethSummary(GetUnarySelector("allocWithZone", Ctx), Summ);
-
// Create the [NSAssertionHandler currentHander] summary.
addClassMethSummary("NSAssertionHandler", "currentHandler",
getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
// Create the [NSAutoreleasePool addObject:] summary.
- ScratchArgs = AF.Add(ScratchArgs, 0, Autorelease);
+ ScratchArgs = AF.add(ScratchArgs, 0, Autorelease);
addClassMethSummary("NSAutoreleasePool", "addObject",
getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, Autorelease));
- // Create a summary for [NSCursor dragCopyCursor].
- addClassMethSummary("NSCursor", "dragCopyCursor",
- getPersistentSummary(RetEffect::MakeNoRet(), DoNothing,
- DoNothing));
-
// Create the summaries for [NSObject performSelector...]. We treat
// these as 'stop tracking' for the arguments because they are often
// used for delegates that can release the object. When we have better
@@ -1463,15 +1472,6 @@ void RetainSummaryManager::InitializeClassMethodSummaries() {
"withObject", "waitUntilDone", "modes", NULL);
addClsMethSummary(NSObjectII, Summ, "performSelectorInBackground",
"withObject", NULL);
-
- // Specially handle NSData.
- RetainSummary *dataWithBytesNoCopySumm =
- getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC), DoNothing,
- DoNothing);
- addClsMethSummary("NSData", dataWithBytesNoCopySumm,
- "dataWithBytesNoCopy", "length", NULL);
- addClsMethSummary("NSData", dataWithBytesNoCopySumm,
- "dataWithBytesNoCopy", "length", "freeWhenDone", NULL);
}
void RetainSummaryManager::InitializeMethodSummaries() {
@@ -1493,12 +1493,6 @@ void RetainSummaryManager::InitializeMethodSummaries() {
RetainSummary *CFAllocSumm =
getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
- // Create the "copy" selector.
- addNSObjectMethSummary(GetNullarySelector("copy", Ctx), AllocSumm);
-
- // Create the "mutableCopy" selector.
- addNSObjectMethSummary(GetNullarySelector("mutableCopy", Ctx), AllocSumm);
-
// Create the "retain" selector.
RetEffect E = RetEffect::MakeReceiverAlias();
RetainSummary *Summ = getPersistentSummary(E, IncRefMsg);
@@ -1602,6 +1596,7 @@ namespace { class AutoreleasePoolContents {}; }
namespace { class AutoreleaseStack {}; }
namespace clang {
+namespace ento {
template<> struct GRStateTrait<AutoreleaseStack>
: public GRStatePartialTrait<ARStack> {
static inline void* GDMIndex() { return &AutoRBIndex; }
@@ -1611,6 +1606,7 @@ template<> struct GRStateTrait<AutoreleasePoolContents>
: public GRStatePartialTrait<ARPoolContents> {
static inline void* GDMIndex() { return &AutoRCIndex; }
};
+} // end GR namespace
} // end clang namespace
static SymbolRef GetCurrentAutoreleasePool(const GRState* state) {
@@ -1627,10 +1623,10 @@ static const GRState * SendAutorelease(const GRState *state,
if (cnts) {
const unsigned *cnt = (*cnts).lookup(sym);
- newCnts = F.Add(*cnts, sym, cnt ? *cnt + 1 : 1);
+ newCnts = F.add(*cnts, sym, cnt ? *cnt + 1 : 1);
}
else
- newCnts = F.Add(F.GetEmptyMap(), sym, 1);
+ newCnts = F.add(F.getEmptyMap(), sym, 1);
return state->set<AutoreleasePoolContents>(pool, newCnts);
}
@@ -1641,7 +1637,7 @@ static const GRState * SendAutorelease(const GRState *state,
namespace {
-class CFRefCount : public GRTransferFuncs {
+class CFRefCount : public TransferFuncs {
public:
class BindingsPrinter : public GRState::Printer {
public:
@@ -1669,7 +1665,7 @@ private:
RefVal::Kind& hasErr);
void ProcessNonLeakError(ExplodedNodeSet& Dst,
- GRStmtNodeBuilder& Builder,
+ StmtNodeBuilder& Builder,
const Expr* NodeExpr, SourceRange ErrorRange,
ExplodedNode* Pred,
const GRState* St,
@@ -1680,8 +1676,8 @@ private:
ExplodedNode* ProcessLeaks(const GRState * state,
llvm::SmallVectorImpl<SymbolRef> &Leaked,
- GenericNodeBuilder &Builder,
- GRExprEngine &Eng,
+ GenericNodeBuilderRefCount &Builder,
+ ExprEngine &Eng,
ExplodedNode *Pred = 0);
public:
@@ -1694,7 +1690,7 @@ public:
virtual ~CFRefCount() {}
- void RegisterChecks(GRExprEngine &Eng);
+ void RegisterChecks(ExprEngine &Eng);
virtual void RegisterPrinters(std::vector<GRState::Printer*>& Printers) {
Printers.push_back(new BindingsPrinter());
@@ -1710,59 +1706,59 @@ public:
// Calls.
- void EvalSummary(ExplodedNodeSet& Dst,
- GRExprEngine& Eng,
- GRStmtNodeBuilder& Builder,
+ void evalSummary(ExplodedNodeSet& Dst,
+ ExprEngine& Eng,
+ StmtNodeBuilder& Builder,
const Expr* Ex,
+ const CallOrObjCMessage &callOrMsg,
InstanceReceiver Receiver,
const RetainSummary& Summ,
const MemRegion *Callee,
- ConstExprIterator arg_beg, ConstExprIterator arg_end,
ExplodedNode* Pred, const GRState *state);
- virtual void EvalCall(ExplodedNodeSet& Dst,
- GRExprEngine& Eng,
- GRStmtNodeBuilder& Builder,
+ virtual void evalCall(ExplodedNodeSet& Dst,
+ ExprEngine& Eng,
+ StmtNodeBuilder& Builder,
const CallExpr* CE, SVal L,
ExplodedNode* Pred);
- virtual void EvalObjCMessageExpr(ExplodedNodeSet& Dst,
- GRExprEngine& Engine,
- GRStmtNodeBuilder& Builder,
- const ObjCMessageExpr* ME,
- ExplodedNode* Pred,
- const GRState *state);
+ virtual void evalObjCMessage(ExplodedNodeSet& Dst,
+ ExprEngine& Engine,
+ StmtNodeBuilder& Builder,
+ ObjCMessage msg,
+ ExplodedNode* Pred,
+ const GRState *state);
// Stores.
- virtual void EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val);
+ virtual void evalBind(StmtNodeBuilderRef& B, SVal location, SVal val);
// End-of-path.
- virtual void EvalEndPath(GRExprEngine& Engine,
- GREndPathNodeBuilder& Builder);
+ virtual void evalEndPath(ExprEngine& Engine,
+ EndOfFunctionNodeBuilder& Builder);
- virtual void EvalDeadSymbols(ExplodedNodeSet& Dst,
- GRExprEngine& Engine,
- GRStmtNodeBuilder& Builder,
+ virtual void evalDeadSymbols(ExplodedNodeSet& Dst,
+ ExprEngine& Engine,
+ StmtNodeBuilder& Builder,
ExplodedNode* Pred,
const GRState* state,
SymbolReaper& SymReaper);
std::pair<ExplodedNode*, const GRState *>
- HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd,
- ExplodedNode* Pred, GRExprEngine &Eng,
+ HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilderRefCount Bd,
+ ExplodedNode* Pred, ExprEngine &Eng,
SymbolRef Sym, RefVal V, bool &stop);
// Return statements.
- virtual void EvalReturn(ExplodedNodeSet& Dst,
- GRExprEngine& Engine,
- GRStmtNodeBuilder& Builder,
+ virtual void evalReturn(ExplodedNodeSet& Dst,
+ ExprEngine& Engine,
+ StmtNodeBuilder& Builder,
const ReturnStmt* S,
ExplodedNode* Pred);
// Assumptions.
- virtual const GRState *EvalAssume(const GRState* state, SVal condition,
+ virtual const GRState *evalAssume(const GRState* state, SVal condition,
bool assumption);
};
@@ -1941,15 +1937,15 @@ namespace {
virtual ~CFRefReport() {}
- CFRefBug& getBugType() {
+ CFRefBug& getBugType() const {
return (CFRefBug&) RangedBugReport::getBugType();
}
- virtual void getRanges(const SourceRange*& beg, const SourceRange*& end) {
+ virtual std::pair<ranges_iterator, ranges_iterator> getRanges() const {
if (!getBugType().isLeak())
- RangedBugReport::getRanges(beg, end);
+ return RangedBugReport::getRanges();
else
- beg = end = 0;
+ return std::make_pair(ranges_iterator(), ranges_iterator());
}
SymbolRef getSymbol() const { return Sym; }
@@ -1970,7 +1966,7 @@ namespace {
public:
CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
ExplodedNode *n, SymbolRef sym,
- GRExprEngine& Eng);
+ ExprEngine& Eng);
PathDiagnosticPiece* getEndPath(BugReporterContext& BRC,
const ExplodedNode* N);
@@ -2061,9 +2057,10 @@ PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode* N,
else
os << "function call";
}
- else {
- assert (isa<ObjCMessageExpr>(S));
+ else if (isa<ObjCMessageExpr>(S)) {
os << "Method";
+ } else {
+ os << "Property";
}
if (CurrV.getObjKind() == RetEffect::CF) {
@@ -2420,15 +2417,15 @@ CFRefLeakReport::getEndPath(BugReporterContext& BRC,
"collector";
}
else
- os << " is no longer referenced after this point and has a retain count of"
- " +" << RV->getCount() << " (object leaked)";
+ os << " is not referenced later in this execution path and has a retain "
+ "count of +" << RV->getCount() << " (object leaked)";
return new PathDiagnosticEventPiece(L, os.str());
}
CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
ExplodedNode *n,
- SymbolRef sym, GRExprEngine& Eng)
+ SymbolRef sym, ExprEngine& Eng)
: CFRefReport(D, tf, n, sym) {
// Most bug reports are cached at the location where they occured.
@@ -2442,7 +2439,7 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
const ExplodedNode* AllocNode = 0;
llvm::tie(AllocNode, AllocBinding) = // Set AllocBinding.
- GetAllocationSite(Eng.getStateManager(), getEndNode(), getSymbol());
+ GetAllocationSite(Eng.getStateManager(), getErrorNode(), getSymbol());
// Get the SourceLocation for the allocation site.
ProgramPoint P = AllocNode->getLocation();
@@ -2494,20 +2491,18 @@ static QualType GetReturnType(const Expr* RetE, ASTContext& Ctx) {
return RetTy;
}
-void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
- GRExprEngine& Eng,
- GRStmtNodeBuilder& Builder,
+void CFRefCount::evalSummary(ExplodedNodeSet& Dst,
+ ExprEngine& Eng,
+ StmtNodeBuilder& Builder,
const Expr* Ex,
+ const CallOrObjCMessage &callOrMsg,
InstanceReceiver Receiver,
const RetainSummary& Summ,
const MemRegion *Callee,
- ConstExprIterator arg_beg,
- ConstExprIterator arg_end,
ExplodedNode* Pred, const GRState *state) {
// Evaluate the effect of the arguments.
RefVal::Kind hasErr = (RefVal::Kind) 0;
- unsigned idx = 0;
SourceRange ErrorRange;
SymbolRef ErrorSym = 0;
@@ -2519,8 +2514,20 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
// done an invalidation pass.
llvm::DenseSet<SymbolRef> WhitelistedSymbols;
- for (ConstExprIterator I = arg_beg; I != arg_end; ++I, ++idx) {
- SVal V = state->getSValAsScalarOrLoc(*I);
+ // Invalidate all instance variables of the receiver of a message.
+ // FIXME: We should be able to do better with inter-procedural analysis.
+ if (Receiver) {
+ SVal V = Receiver.getSValAsScalarOrLoc(state);
+ if (SymbolRef Sym = V.getAsLocSymbol()) {
+ if (state->get<RefBindings>(Sym))
+ WhitelistedSymbols.insert(Sym);
+ }
+ if (const MemRegion *region = V.getAsRegion())
+ RegionsToInvalidate.push_back(region);
+ }
+
+ for (unsigned idx = 0, e = callOrMsg.getNumArgs(); idx != e; ++idx) {
+ SVal V = callOrMsg.getArgSValAsScalarOrLoc(idx);
SymbolRef Sym = V.getAsLocSymbol();
if (Sym)
@@ -2528,7 +2535,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
WhitelistedSymbols.insert(Sym);
state = Update(state, Sym, *T, Summ.getArg(idx), hasErr);
if (hasErr) {
- ErrorRange = (*I)->getSourceRange();
+ ErrorRange = callOrMsg.getArgSourceRange(idx);
ErrorSym = Sym;
break;
}
@@ -2606,7 +2613,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
// NOTE: Even if RegionsToInvalidate is empty, we must still invalidate
// global variables.
- state = state->InvalidateRegions(RegionsToInvalidate.data(),
+ state = state->invalidateRegions(RegionsToInvalidate.data(),
RegionsToInvalidate.data() +
RegionsToInvalidate.size(),
Ex, Count, &IS,
@@ -2671,23 +2678,11 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
// FIXME: We eventually should handle structs and other compound types
// that are returned by value.
- QualType T = Ex->getType();
-
- // For CallExpr, use the result type to know if it returns a reference.
- if (const CallExpr *CE = dyn_cast<CallExpr>(Ex)) {
- const Expr *Callee = CE->getCallee();
- if (const FunctionDecl *FD = state->getSVal(Callee).getAsFunctionDecl())
- T = FD->getResultType();
- }
- else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(Ex)) {
- if (const ObjCMethodDecl *MD = ME->getMethodDecl())
- T = MD->getResultType();
- }
-
- if (Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType())) {
+ QualType T = callOrMsg.getResultType(Eng.getContext());
+ if (Loc::isLocType(T) || (T->isIntegerType() && T->isScalarType())) {
unsigned Count = Builder.getCurrentBlockCount();
- ValueManager &ValMgr = Eng.getValueManager();
- SVal X = ValMgr.getConjuredSymbolVal(NULL, Ex, T, Count);
+ SValBuilder &svalBuilder = Eng.getSValBuilder();
+ SVal X = svalBuilder.getConjuredSymbolVal(NULL, Ex, T, Count);
state = state->BindExpr(Ex, X, false);
}
@@ -2696,9 +2691,8 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
case RetEffect::Alias: {
unsigned idx = RE.getIndex();
- assert (arg_end >= arg_beg);
- assert (idx < (unsigned) (arg_end - arg_beg));
- SVal V = state->getSValAsScalarOrLoc(*(arg_beg+idx));
+ assert (idx < callOrMsg.getNumArgs());
+ SVal V = callOrMsg.getArgSValAsScalarOrLoc(idx);
state = state->BindExpr(Ex, V, false);
break;
}
@@ -2713,19 +2707,19 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
case RetEffect::OwnedAllocatedSymbol:
case RetEffect::OwnedSymbol: {
unsigned Count = Builder.getCurrentBlockCount();
- ValueManager &ValMgr = Eng.getValueManager();
- SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
- QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ SValBuilder &svalBuilder = Eng.getSValBuilder();
+ SymbolRef Sym = svalBuilder.getConjuredSymbol(Ex, Count);
+ QualType RetT = GetReturnType(Ex, svalBuilder.getContext());
state = state->set<RefBindings>(Sym, RefVal::makeOwned(RE.getObjKind(),
RetT));
- state = state->BindExpr(Ex, ValMgr.makeLoc(Sym), false);
+ state = state->BindExpr(Ex, svalBuilder.makeLoc(Sym), false);
// FIXME: Add a flag to the checker where allocations are assumed to
// *not fail.
#if 0
if (RE.getKind() == RetEffect::OwnedAllocatedSymbol) {
bool isFeasible;
- state = state.Assume(loc::SymbolVal(Sym), true, isFeasible);
+ state = state.assume(loc::SymbolVal(Sym), true, isFeasible);
assert(isFeasible && "Cannot assume fresh symbol is non-null.");
}
#endif
@@ -2736,12 +2730,12 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
case RetEffect::GCNotOwnedSymbol:
case RetEffect::NotOwnedSymbol: {
unsigned Count = Builder.getCurrentBlockCount();
- ValueManager &ValMgr = Eng.getValueManager();
- SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
- QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ SValBuilder &svalBuilder = Eng.getSValBuilder();
+ SymbolRef Sym = svalBuilder.getConjuredSymbol(Ex, Count);
+ QualType RetT = GetReturnType(Ex, svalBuilder.getContext());
state = state->set<RefBindings>(Sym, RefVal::makeNotOwned(RE.getObjKind(),
RetT));
- state = state->BindExpr(Ex, ValMgr.makeLoc(Sym), false);
+ state = state->BindExpr(Ex, svalBuilder.makeLoc(Sym), false);
break;
}
}
@@ -2756,9 +2750,9 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
}
-void CFRefCount::EvalCall(ExplodedNodeSet& Dst,
- GRExprEngine& Eng,
- GRStmtNodeBuilder& Builder,
+void CFRefCount::evalCall(ExplodedNodeSet& Dst,
+ ExprEngine& Eng,
+ StmtNodeBuilder& Builder,
const CallExpr* CE, SVal L,
ExplodedNode* Pred) {
@@ -2777,25 +2771,28 @@ void CFRefCount::EvalCall(ExplodedNodeSet& Dst,
}
assert(Summ);
- EvalSummary(Dst, Eng, Builder, CE, 0, *Summ, L.getAsRegion(),
- CE->arg_begin(), CE->arg_end(), Pred, Builder.GetState(Pred));
+ evalSummary(Dst, Eng, Builder, CE,
+ CallOrObjCMessage(CE, Builder.GetState(Pred)),
+ InstanceReceiver(), *Summ,L.getAsRegion(),
+ Pred, Builder.GetState(Pred));
}
-void CFRefCount::EvalObjCMessageExpr(ExplodedNodeSet& Dst,
- GRExprEngine& Eng,
- GRStmtNodeBuilder& Builder,
- const ObjCMessageExpr* ME,
- ExplodedNode* Pred,
- const GRState *state) {
+void CFRefCount::evalObjCMessage(ExplodedNodeSet& Dst,
+ ExprEngine& Eng,
+ StmtNodeBuilder& Builder,
+ ObjCMessage msg,
+ ExplodedNode* Pred,
+ const GRState *state) {
RetainSummary *Summ =
- ME->isInstanceMessage()
- ? Summaries.getInstanceMethodSummary(ME, state,Pred->getLocationContext())
- : Summaries.getClassMethodSummary(ME);
+ msg.isInstanceMessage()
+ ? Summaries.getInstanceMethodSummary(msg, state,Pred->getLocationContext())
+ : Summaries.getClassMethodSummary(msg);
assert(Summ && "RetainSummary is null");
- EvalSummary(Dst, Eng, Builder, ME,
- InstanceReceiver(ME, Pred->getLocationContext()), *Summ, NULL,
- ME->arg_begin(), ME->arg_end(), Pred, state);
+ evalSummary(Dst, Eng, Builder, msg.getOriginExpr(),
+ CallOrObjCMessage(msg, Builder.GetState(Pred)),
+ InstanceReceiver(msg, Pred->getLocationContext()), *Summ, NULL,
+ Pred, state);
}
namespace {
@@ -2813,7 +2810,7 @@ public:
} // end anonymous namespace
-void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
+void CFRefCount::evalBind(StmtNodeBuilderRef& B, SVal location, SVal val) {
// Are we storing to something that causes the value to "escape"?
bool escapes = false;
@@ -2852,9 +2849,9 @@ void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
// Return statements.
-void CFRefCount::EvalReturn(ExplodedNodeSet& Dst,
- GRExprEngine& Eng,
- GRStmtNodeBuilder& Builder,
+void CFRefCount::evalReturn(ExplodedNodeSet& Dst,
+ ExprEngine& Eng,
+ StmtNodeBuilder& Builder,
const ReturnStmt* S,
ExplodedNode* Pred) {
@@ -2912,7 +2909,7 @@ void CFRefCount::EvalReturn(ExplodedNodeSet& Dst,
// Update the autorelease counts.
static unsigned autoreleasetag = 0;
- GenericNodeBuilder Bd(Builder, S, &autoreleasetag);
+ GenericNodeBuilderRefCount Bd(Builder, S, &autoreleasetag);
bool stop = false;
llvm::tie(Pred, state) = HandleAutoreleaseCounts(state , Bd, Pred, Eng, Sym,
X, stop);
@@ -2994,14 +2991,14 @@ void CFRefCount::EvalReturn(ExplodedNodeSet& Dst,
// Assumptions.
-const GRState* CFRefCount::EvalAssume(const GRState *state,
+const GRState* CFRefCount::evalAssume(const GRState *state,
SVal Cond, bool Assumption) {
- // FIXME: We may add to the interface of EvalAssume the list of symbols
+ // FIXME: We may add to the interface of evalAssume the list of symbols
// whose assumptions have changed. For now we just iterate through the
// bindings and check if any of the tracked symbols are NULL. This isn't
// too bad since the number of symbols we will track in practice are
- // probably small and EvalAssume is only called at branches and a few
+ // probably small and evalAssume is only called at branches and a few
// other places.
RefBindings B = state->get<RefBindings>();
@@ -3016,7 +3013,7 @@ const GRState* CFRefCount::EvalAssume(const GRState *state,
// If this is the case, stop tracking the symbol.
if (state->getSymVal(I.getKey())) {
changed = true;
- B = RefBFactory.Remove(B, I.getKey());
+ B = RefBFactory.remove(B, I.getKey());
}
}
@@ -3160,9 +3157,10 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
//===----------------------------------------------------------------------===//
std::pair<ExplodedNode*, const GRState *>
-CFRefCount::HandleAutoreleaseCounts(const GRState * state, GenericNodeBuilder Bd,
+CFRefCount::HandleAutoreleaseCounts(const GRState * state,
+ GenericNodeBuilderRefCount Bd,
ExplodedNode* Pred,
- GRExprEngine &Eng,
+ ExprEngine &Eng,
SymbolRef Sym, RefVal V, bool &stop) {
unsigned ACnt = V.getAutoreleaseCount();
@@ -3245,8 +3243,8 @@ CFRefCount::HandleSymbolDeath(const GRState * state, SymbolRef sid, RefVal V,
ExplodedNode*
CFRefCount::ProcessLeaks(const GRState * state,
llvm::SmallVectorImpl<SymbolRef> &Leaked,
- GenericNodeBuilder &Builder,
- GRExprEngine& Eng,
+ GenericNodeBuilderRefCount &Builder,
+ ExprEngine& Eng,
ExplodedNode *Pred) {
if (Leaked.empty())
@@ -3270,11 +3268,11 @@ CFRefCount::ProcessLeaks(const GRState * state,
return N;
}
-void CFRefCount::EvalEndPath(GRExprEngine& Eng,
- GREndPathNodeBuilder& Builder) {
+void CFRefCount::evalEndPath(ExprEngine& Eng,
+ EndOfFunctionNodeBuilder& Builder) {
const GRState *state = Builder.getState();
- GenericNodeBuilder Bd(Builder);
+ GenericNodeBuilderRefCount Bd(Builder);
RefBindings B = state->get<RefBindings>();
ExplodedNode *Pred = 0;
@@ -3297,9 +3295,9 @@ void CFRefCount::EvalEndPath(GRExprEngine& Eng,
ProcessLeaks(state, Leaked, Bd, Eng, Pred);
}
-void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst,
- GRExprEngine& Eng,
- GRStmtNodeBuilder& Builder,
+void CFRefCount::evalDeadSymbols(ExplodedNodeSet& Dst,
+ ExprEngine& Eng,
+ StmtNodeBuilder& Builder,
ExplodedNode* Pred,
const GRState* state,
SymbolReaper& SymReaper) {
@@ -3313,7 +3311,7 @@ void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst,
if (const RefVal* T = B.lookup(Sym)){
// Use the symbol as the tag.
// FIXME: This might not be as unique as we would like.
- GenericNodeBuilder Bd(Builder, S, Sym);
+ GenericNodeBuilderRefCount Bd(Builder, S, Sym);
bool stop = false;
llvm::tie(Pred, state) = HandleAutoreleaseCounts(state, Bd, Pred, Eng,
Sym, *T, stop);
@@ -3333,7 +3331,7 @@ void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst,
static unsigned LeakPPTag = 0;
{
- GenericNodeBuilder Bd(Builder, S, &LeakPPTag);
+ GenericNodeBuilderRefCount Bd(Builder, S, &LeakPPTag);
Pred = ProcessLeaks(state, Leaked, Bd, Eng, Pred);
}
@@ -3345,14 +3343,14 @@ void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst,
RefBindings::Factory& F = state->get_context<RefBindings>();
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
- E = SymReaper.dead_end(); I!=E; ++I) B = F.Remove(B, *I);
+ E = SymReaper.dead_end(); I!=E; ++I) B = F.remove(B, *I);
state = state->set<RefBindings>(B);
Builder.MakeNode(Dst, S, Pred, state);
}
void CFRefCount::ProcessNonLeakError(ExplodedNodeSet& Dst,
- GRStmtNodeBuilder& Builder,
+ StmtNodeBuilder& Builder,
const Expr* NodeExpr,
SourceRange ErrorRange,
ExplodedNode* Pred,
@@ -3413,7 +3411,7 @@ void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
// Scan the BlockDecRefExprs for any object the retain/release checker
// may be tracking.
- if (!BE->hasBlockDeclRefExprs())
+ if (!BE->getBlockDecl()->hasCaptures())
return;
const GRState *state = C.getState();
@@ -3431,7 +3429,7 @@ void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
// and in implicit increment/decrement of a retain count.
llvm::SmallVector<const MemRegion*, 10> Regions;
const LocationContext *LC = C.getPredecessor()->getLocationContext();
- MemRegionManager &MemMgr = C.getValueManager().getRegionManager();
+ MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
for ( ; I != E; ++I) {
const VarRegion *VR = *I;
@@ -3451,7 +3449,7 @@ void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
// Transfer function creation for external clients.
//===----------------------------------------------------------------------===//
-void CFRefCount::RegisterChecks(GRExprEngine& Eng) {
+void CFRefCount::RegisterChecks(ExprEngine& Eng) {
BugReporter &BR = Eng.getBugReporter();
useAfterRelease = new UseAfterRelease(this);
@@ -3509,13 +3507,13 @@ void CFRefCount::RegisterChecks(GRExprEngine& Eng) {
// Save the reference to the BugReporter.
this->BR = &BR;
- // Register the RetainReleaseChecker with the GRExprEngine object.
+ // Register the RetainReleaseChecker with the ExprEngine object.
// Functionality in CFRefCount will be migrated to RetainReleaseChecker
// over time.
Eng.registerCheck(new RetainReleaseChecker(this));
}
-GRTransferFuncs* clang::MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
+TransferFuncs* ento::MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
const LangOptions& lopts) {
return new CFRefCount(Ctx, GCEnabled, lopts);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CMakeLists.txt b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CMakeLists.txt
new file mode 100644
index 0000000..14c636c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CMakeLists.txt
@@ -0,0 +1,41 @@
+set(LLVM_LINK_COMPONENTS support)
+
+set(LLVM_USED_LIBS clangBasic clangLex clangAST clangFrontend clangRewrite)
+
+add_clang_library(clangStaticAnalyzerCore
+ AggExprVisitor.cpp
+ AnalysisManager.cpp
+ BasicConstraintManager.cpp
+ BasicStore.cpp
+ BasicValueFactory.cpp
+ BugReporter.cpp
+ BugReporterVisitors.cpp
+ CFRefCount.cpp
+ Checker.cpp
+ CheckerHelpers.cpp
+ CheckerManager.cpp
+ Environment.cpp
+ ExplodedGraph.cpp
+ FlatStore.cpp
+ BlockCounter.cpp
+ CXXExprEngine.cpp
+ CoreEngine.cpp
+ GRState.cpp
+ HTMLDiagnostics.cpp
+ MemRegion.cpp
+ ObjCMessage.cpp
+ PathDiagnostic.cpp
+ PlistDiagnostics.cpp
+ RangeConstraintManager.cpp
+ RegionStore.cpp
+ SimpleConstraintManager.cpp
+ SimpleSValBuilder.cpp
+ Store.cpp
+ SValBuilder.cpp
+ SVals.cpp
+ SymbolManager.cpp
+ TextPathDiagnostics.cpp
+ )
+
+add_dependencies(clangStaticAnalyzerCore ClangAttrClasses ClangAttrList ClangDeclNodes
+ ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CXXExprEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CXXExprEngine.cpp
new file mode 100644
index 0000000..56dfe8c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CXXExprEngine.cpp
@@ -0,0 +1,322 @@
+//===- GRCXXExprEngine.cpp - C++ expr evaluation engine ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C++ expression evaluation engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/AST/DeclCXX.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CallExprWLItem {
+public:
+ CallExpr::const_arg_iterator I;
+ ExplodedNode *N;
+
+ CallExprWLItem(const CallExpr::const_arg_iterator &i, ExplodedNode *n)
+ : I(i), N(n) {}
+};
+}
+
+void ExprEngine::evalArguments(ConstExprIterator AI, ConstExprIterator AE,
+ const FunctionProtoType *FnType,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst,
+ bool FstArgAsLValue) {
+
+
+ llvm::SmallVector<CallExprWLItem, 20> WorkList;
+ WorkList.reserve(AE - AI);
+ WorkList.push_back(CallExprWLItem(AI, Pred));
+
+ while (!WorkList.empty()) {
+ CallExprWLItem Item = WorkList.back();
+ WorkList.pop_back();
+
+ if (Item.I == AE) {
+ Dst.insert(Item.N);
+ continue;
+ }
+
+ // Evaluate the argument.
+ ExplodedNodeSet Tmp;
+ if (FstArgAsLValue) {
+ FstArgAsLValue = false;
+ }
+
+ Visit(*Item.I, Item.N, Tmp);
+ ++(Item.I);
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI != NE; ++NI)
+ WorkList.push_back(CallExprWLItem(Item.I, *NI));
+ }
+}
+
+const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXRecordDecl *D,
+ const StackFrameContext *SFC) {
+ const Type *T = D->getTypeForDecl();
+ QualType PT = getContext().getPointerType(QualType(T, 0));
+ return svalBuilder.getRegionManager().getCXXThisRegion(PT, SFC);
+}
+
+const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXMethodDecl *decl,
+ const StackFrameContext *frameCtx) {
+ return svalBuilder.getRegionManager().
+ getCXXThisRegion(decl->getThisType(getContext()), frameCtx);
+}
+
+void ExprEngine::CreateCXXTemporaryObject(const Expr *Ex, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ExplodedNodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ const GRState *state = GetState(*I);
+
+ // Bind the temporary object to the value of the expression. Then bind
+ // the expression to the location of the object.
+ SVal V = state->getSVal(Ex);
+
+ const MemRegion *R =
+ svalBuilder.getRegionManager().getCXXTempObjectRegion(Ex,
+ Pred->getLocationContext());
+
+ state = state->bindLoc(loc::MemRegionVal(R), V);
+ MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, loc::MemRegionVal(R)));
+ }
+}
+
+void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E,
+ const MemRegion *Dest,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ if (!Dest)
+ Dest = svalBuilder.getRegionManager().getCXXTempObjectRegion(E,
+ Pred->getLocationContext());
+
+ if (E->isElidable()) {
+ VisitAggExpr(E->getArg(0), Dest, Pred, Dst);
+ return;
+ }
+
+ const CXXConstructorDecl *CD = E->getConstructor();
+ assert(CD);
+
+ if (!(CD->isThisDeclarationADefinition() && AMgr.shouldInlineCall()))
+ // FIXME: invalidate the object.
+ return;
+
+
+ // Evaluate other arguments.
+ ExplodedNodeSet argsEvaluated;
+ const FunctionProtoType *FnType = CD->getType()->getAs<FunctionProtoType>();
+ evalArguments(E->arg_begin(), E->arg_end(), FnType, Pred, argsEvaluated);
+ // The callee stack frame context used to create the 'this' parameter region.
+ const StackFrameContext *SFC = AMgr.getStackFrame(CD,
+ Pred->getLocationContext(),
+ E, Builder->getBlock(),
+ Builder->getIndex());
+
+ const CXXThisRegion *ThisR =getCXXThisRegion(E->getConstructor()->getParent(),
+ SFC);
+
+ CallEnter Loc(E, SFC, Pred->getLocationContext());
+ for (ExplodedNodeSet::iterator NI = argsEvaluated.begin(),
+ NE = argsEvaluated.end(); NI != NE; ++NI) {
+ const GRState *state = GetState(*NI);
+ // Setup 'this' region, so that the ctor is evaluated on the object pointed
+ // by 'Dest'.
+ state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest));
+ ExplodedNode *N = Builder->generateNode(Loc, state, Pred);
+ if (N)
+ Dst.Add(N);
+ }
+}
+
+void ExprEngine::VisitCXXDestructor(const CXXDestructorDecl *DD,
+ const MemRegion *Dest,
+ const Stmt *S,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ if (!(DD->isThisDeclarationADefinition() && AMgr.shouldInlineCall()))
+ return;
+ // Create the context for 'this' region.
+ const StackFrameContext *SFC = AMgr.getStackFrame(DD,
+ Pred->getLocationContext(),
+ S, Builder->getBlock(),
+ Builder->getIndex());
+
+ const CXXThisRegion *ThisR = getCXXThisRegion(DD->getParent(), SFC);
+
+ CallEnter PP(S, SFC, Pred->getLocationContext());
+
+ const GRState *state = Pred->getState();
+ state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest));
+ ExplodedNode *N = Builder->generateNode(PP, state, Pred);
+ if (N)
+ Dst.Add(N);
+}
+
+void ExprEngine::VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Get the method type.
+ const FunctionProtoType *FnType =
+ MCE->getCallee()->getType()->getAs<FunctionProtoType>();
+ assert(FnType && "Method type not available");
+
+ // Evaluate explicit arguments with a worklist.
+ ExplodedNodeSet argsEvaluated;
+ evalArguments(MCE->arg_begin(), MCE->arg_end(), FnType, Pred, argsEvaluated);
+
+ // Evaluate the implicit object argument.
+ ExplodedNodeSet AllargsEvaluated;
+ const MemberExpr *ME = dyn_cast<MemberExpr>(MCE->getCallee()->IgnoreParens());
+ if (!ME)
+ return;
+ Expr *ObjArgExpr = ME->getBase();
+ for (ExplodedNodeSet::iterator I = argsEvaluated.begin(),
+ E = argsEvaluated.end(); I != E; ++I) {
+ Visit(ObjArgExpr, *I, AllargsEvaluated);
+ }
+
+ // Now evaluate the call itself.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+ assert(MD && "not a CXXMethodDecl?");
+ evalMethodCall(MCE, MD, ObjArgExpr, Pred, AllargsEvaluated, Dst);
+}
+
+void ExprEngine::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *C,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(C->getCalleeDecl());
+ if (!MD) {
+ // If the operator doesn't represent a method call treat as regural call.
+ VisitCall(C, Pred, C->arg_begin(), C->arg_end(), Dst);
+ return;
+ }
+
+ // Determine the type of function we're calling (if available).
+ const FunctionProtoType *Proto = NULL;
+ QualType FnType = C->getCallee()->IgnoreParens()->getType();
+ if (const PointerType *FnTypePtr = FnType->getAs<PointerType>())
+ Proto = FnTypePtr->getPointeeType()->getAs<FunctionProtoType>();
+
+ // Evaluate arguments treating the first one (object method is called on)
+ // as alvalue.
+ ExplodedNodeSet argsEvaluated;
+ evalArguments(C->arg_begin(), C->arg_end(), Proto, Pred, argsEvaluated, true);
+
+ // Now evaluate the call itself.
+ evalMethodCall(C, MD, C->getArg(0), Pred, argsEvaluated, Dst);
+}
+
+void ExprEngine::evalMethodCall(const CallExpr *MCE, const CXXMethodDecl *MD,
+ const Expr *ThisExpr, ExplodedNode *Pred,
+ ExplodedNodeSet &Src, ExplodedNodeSet &Dst) {
+ // Allow checkers to pre-visit the member call.
+ ExplodedNodeSet PreVisitChecks;
+ CheckerVisit(MCE, PreVisitChecks, Src, PreVisitStmtCallback);
+
+ if (!(MD->isThisDeclarationADefinition() && AMgr.shouldInlineCall())) {
+ // FIXME: conservative method call evaluation.
+ CheckerVisit(MCE, Dst, PreVisitChecks, PostVisitStmtCallback);
+ return;
+ }
+
+ const StackFrameContext *SFC = AMgr.getStackFrame(MD,
+ Pred->getLocationContext(),
+ MCE,
+ Builder->getBlock(),
+ Builder->getIndex());
+ const CXXThisRegion *ThisR = getCXXThisRegion(MD, SFC);
+ CallEnter Loc(MCE, SFC, Pred->getLocationContext());
+ for (ExplodedNodeSet::iterator I = PreVisitChecks.begin(),
+ E = PreVisitChecks.end(); I != E; ++I) {
+ // Set up 'this' region.
+ const GRState *state = GetState(*I);
+ state = state->bindLoc(loc::MemRegionVal(ThisR), state->getSVal(ThisExpr));
+ Dst.Add(Builder->generateNode(Loc, state, *I));
+ }
+}
+
+void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ if (CNE->isArray()) {
+ // FIXME: allocating an array has not been handled.
+ return;
+ }
+
+ unsigned Count = Builder->getCurrentBlockCount();
+ DefinedOrUnknownSVal symVal =
+ svalBuilder.getConjuredSymbolVal(NULL, CNE, CNE->getType(), Count);
+ const MemRegion *NewReg = cast<loc::MemRegionVal>(symVal).getRegion();
+
+ QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
+
+ const ElementRegion *EleReg =
+ getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
+
+ // Evaluate constructor arguments.
+ const FunctionProtoType *FnType = NULL;
+ const CXXConstructorDecl *CD = CNE->getConstructor();
+ if (CD)
+ FnType = CD->getType()->getAs<FunctionProtoType>();
+ ExplodedNodeSet argsEvaluated;
+ evalArguments(CNE->constructor_arg_begin(), CNE->constructor_arg_end(),
+ FnType, Pred, argsEvaluated);
+
+ // Initialize the object region and bind the 'new' expression.
+ for (ExplodedNodeSet::iterator I = argsEvaluated.begin(),
+ E = argsEvaluated.end(); I != E; ++I) {
+ const GRState *state = GetState(*I);
+
+ if (ObjTy->isRecordType()) {
+ state = state->invalidateRegion(EleReg, CNE, Count);
+ } else {
+ if (CNE->hasInitializer()) {
+ SVal V = state->getSVal(*CNE->constructor_arg_begin());
+ state = state->bindLoc(loc::MemRegionVal(EleReg), V);
+ } else {
+ // Explicitly set to undefined, because currently we retrieve symbolic
+ // value from symbolic region.
+ state = state->bindLoc(loc::MemRegionVal(EleReg), UndefinedVal());
+ }
+ }
+ state = state->BindExpr(CNE, loc::MemRegionVal(EleReg));
+ MakeNode(Dst, CNE, *I, state);
+ }
+}
+
+void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
+ ExplodedNode *Pred,ExplodedNodeSet &Dst) {
+ // Should do more checking.
+ ExplodedNodeSet Argevaluated;
+ Visit(CDE->getArgument(), Pred, Argevaluated);
+ for (ExplodedNodeSet::iterator I = Argevaluated.begin(),
+ E = Argevaluated.end(); I != E; ++I) {
+ const GRState *state = GetState(*I);
+ MakeNode(Dst, CDE, *I, state);
+ }
+}
+
+void ExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Get the this object region from StoreManager.
+ const MemRegion *R =
+ svalBuilder.getRegionManager().getCXXThisRegion(
+ getContext().getCanonicalType(TE->getType()),
+ Pred->getLocationContext());
+
+ const GRState *state = GetState(Pred);
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+ MakeNode(Dst, TE, Pred, state->BindExpr(TE, V));
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/Checker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
index 36323b9..a014eec 100644
--- a/contrib/llvm/tools/clang/lib/Checker/Checker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
@@ -12,8 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h"
using namespace clang;
+using namespace ento;
Checker::~Checker() {}
@@ -23,11 +24,10 @@ CheckerContext::~CheckerContext() {
// without actually generated a new node. We also shouldn't autotransition
// if we are building sinks or we generated a node and decided to not
// add it as a transition.
- if (Dst.size() == size && !B.BuildSinks && !B.HasGeneratedNode) {
+ if (Dst.size() == size && !B.BuildSinks && !B.hasGeneratedNode) {
if (ST && ST != B.GetState(Pred)) {
static int autoTransitionTag = 0;
- B.Tag = &autoTransitionTag;
- addTransition(ST);
+ addTransition(ST, &autoTransitionTag);
}
else
Dst.Add(Pred);
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckerHelpers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index ece6943..28df695 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CheckerHelpers.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -11,11 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "clang/AST/Expr.h"
// Recursively find any substatements containing macros
-bool clang::containsMacro(const Stmt *S) {
+bool clang::ento::containsMacro(const Stmt *S) {
if (S->getLocStart().isMacroID())
return true;
@@ -32,7 +32,7 @@ bool clang::containsMacro(const Stmt *S) {
}
// Recursively find any substatements containing enum constants
-bool clang::containsEnum(const Stmt *S) {
+bool clang::ento::containsEnum(const Stmt *S) {
const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
if (DR && isa<EnumConstantDecl>(DR->getDecl()))
@@ -48,7 +48,7 @@ bool clang::containsEnum(const Stmt *S) {
}
// Recursively find any substatements containing static vars
-bool clang::containsStaticLocal(const Stmt *S) {
+bool clang::ento::containsStaticLocal(const Stmt *S) {
const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
if (DR)
@@ -66,7 +66,7 @@ bool clang::containsStaticLocal(const Stmt *S) {
}
// Recursively find any substatements containing __builtin_offsetof
-bool clang::containsBuiltinOffsetOf(const Stmt *S) {
+bool clang::ento::containsBuiltinOffsetOf(const Stmt *S) {
if (isa<OffsetOfExpr>(S))
return true;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
new file mode 100644
index 0000000..1989b82
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -0,0 +1,85 @@
+//===--- CheckerManager.cpp - Static Analyzer Checker Manager -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the Static Analyzer Checker Manager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/CheckerProvider.h"
+#include "clang/AST/DeclBase.h"
+
+using namespace clang;
+using namespace ento;
+
+void CheckerManager::runCheckersOnASTDecl(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ assert(D);
+
+ unsigned DeclKind = D->getKind();
+ CachedDeclCheckers *checkers = 0;
+ CachedDeclCheckersMapTy::iterator CCI = CachedDeclCheckersMap.find(DeclKind);
+ if (CCI != CachedDeclCheckersMap.end()) {
+ checkers = &(CCI->second);
+ } else {
+ // Find the checkers that should run for this Decl and cache them.
+ checkers = &CachedDeclCheckersMap[DeclKind];
+ for (unsigned i = 0, e = DeclCheckers.size(); i != e; ++i) {
+ DeclCheckerInfo &info = DeclCheckers[i];
+ if (info.IsForDeclFn(D))
+ checkers->push_back(std::make_pair(info.Checker, info.CheckFn));
+ }
+ }
+
+ assert(checkers);
+ for (CachedDeclCheckers::iterator
+ I = checkers->begin(), E = checkers->end(); I != E; ++I) {
+ CheckerRef checker = I->first;
+ CheckDeclFunc fn = I->second;
+ fn(checker, D, mgr, BR);
+ }
+}
+
+void CheckerManager::runCheckersOnASTBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ assert(D && D->hasBody());
+
+ for (unsigned i = 0, e = BodyCheckers.size(); i != e; ++i) {
+ CheckerRef checker = BodyCheckers[i].first;
+ CheckDeclFunc fn = BodyCheckers[i].second;
+ fn(checker, D, mgr, BR);
+ }
+}
+
+void CheckerManager::_registerForDecl(CheckerRef checker, CheckDeclFunc checkfn,
+ HandlesDeclFunc isForDeclFn) {
+ DeclCheckerInfo info = { checker, checkfn, isForDeclFn };
+ DeclCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForBody(CheckerRef checker,
+ CheckDeclFunc checkfn) {
+ BodyCheckers.push_back(std::make_pair(checker, checkfn));
+}
+
+void CheckerManager::registerCheckersToEngine(ExprEngine &eng) {
+ for (unsigned i = 0, e = Funcs.size(); i != e; ++i)
+ Funcs[i](eng);
+}
+
+CheckerManager::~CheckerManager() {
+ for (unsigned i = 0, e = Checkers.size(); i != e; ++i) {
+ CheckerRef checker = Checkers[i].first;
+ Dtor dtor = Checkers[i].second;
+ dtor(checker);
+ }
+}
+
+// Anchor for the vtable.
+CheckerProvider::~CheckerProvider() { }
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index 5125f36..070042a 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -1,4 +1,4 @@
-//==- GRCoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
+//==- CoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
@@ -12,9 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/AnalysisManager.h"
-#include "clang/Checker/PathSensitive/GRCoreEngine.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/Index/TranslationUnit.h"
#include "clang/AST/Expr.h"
#include "llvm/Support/Casting.h"
@@ -25,86 +25,107 @@
using llvm::cast;
using llvm::isa;
using namespace clang;
+using namespace ento;
// This should be removed in the future.
namespace clang {
-GRTransferFuncs* MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
+namespace ento {
+TransferFuncs* MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
const LangOptions& lopts);
}
+}
//===----------------------------------------------------------------------===//
// Worklist classes for exploration of reachable states.
//===----------------------------------------------------------------------===//
+WorkList::Visitor::~Visitor() {}
+
namespace {
-class DFS : public GRWorkList {
- llvm::SmallVector<GRWorkListUnit,20> Stack;
+class DFS : public WorkList {
+ llvm::SmallVector<WorkListUnit,20> Stack;
public:
virtual bool hasWork() const {
return !Stack.empty();
}
- virtual void Enqueue(const GRWorkListUnit& U) {
+ virtual void enqueue(const WorkListUnit& U) {
Stack.push_back(U);
}
- virtual GRWorkListUnit Dequeue() {
+ virtual WorkListUnit dequeue() {
assert (!Stack.empty());
- const GRWorkListUnit& U = Stack.back();
+ const WorkListUnit& U = Stack.back();
Stack.pop_back(); // This technically "invalidates" U, but we are fine.
return U;
}
+
+ virtual bool visitItemsInWorkList(Visitor &V) {
+ for (llvm::SmallVectorImpl<WorkListUnit>::iterator
+ I = Stack.begin(), E = Stack.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ return false;
+ }
};
-class BFS : public GRWorkList {
- std::queue<GRWorkListUnit> Queue;
+class BFS : public WorkList {
+ std::deque<WorkListUnit> Queue;
public:
virtual bool hasWork() const {
return !Queue.empty();
}
- virtual void Enqueue(const GRWorkListUnit& U) {
- Queue.push(U);
+ virtual void enqueue(const WorkListUnit& U) {
+ Queue.push_front(U);
}
- virtual GRWorkListUnit Dequeue() {
- // Don't use const reference. The subsequent pop_back() might make it
- // unsafe.
- GRWorkListUnit U = Queue.front();
- Queue.pop();
+ virtual WorkListUnit dequeue() {
+ WorkListUnit U = Queue.front();
+ Queue.pop_front();
return U;
}
+
+ virtual bool visitItemsInWorkList(Visitor &V) {
+ for (std::deque<WorkListUnit>::iterator
+ I = Queue.begin(), E = Queue.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ return false;
+ }
};
} // end anonymous namespace
-// Place the dstor for GRWorkList here because it contains virtual member
+// Place the dstor for WorkList here because it contains virtual member
// functions, and we the code for the dstor generated in one compilation unit.
-GRWorkList::~GRWorkList() {}
+WorkList::~WorkList() {}
-GRWorkList *GRWorkList::MakeDFS() { return new DFS(); }
-GRWorkList *GRWorkList::MakeBFS() { return new BFS(); }
+WorkList *WorkList::makeDFS() { return new DFS(); }
+WorkList *WorkList::makeBFS() { return new BFS(); }
namespace {
- class BFSBlockDFSContents : public GRWorkList {
- std::queue<GRWorkListUnit> Queue;
- llvm::SmallVector<GRWorkListUnit,20> Stack;
+ class BFSBlockDFSContents : public WorkList {
+ std::deque<WorkListUnit> Queue;
+ llvm::SmallVector<WorkListUnit,20> Stack;
public:
virtual bool hasWork() const {
return !Queue.empty() || !Stack.empty();
}
- virtual void Enqueue(const GRWorkListUnit& U) {
+ virtual void enqueue(const WorkListUnit& U) {
if (isa<BlockEntrance>(U.getNode()->getLocation()))
- Queue.push(U);
+ Queue.push_front(U);
else
Stack.push_back(U);
}
- virtual GRWorkListUnit Dequeue() {
+ virtual WorkListUnit dequeue() {
// Process all basic blocks to completion.
if (!Stack.empty()) {
- const GRWorkListUnit& U = Stack.back();
+ const WorkListUnit& U = Stack.back();
Stack.pop_back(); // This technically "invalidates" U, but we are fine.
return U;
}
@@ -112,14 +133,28 @@ namespace {
assert(!Queue.empty());
// Don't use const reference. The subsequent pop_back() might make it
// unsafe.
- GRWorkListUnit U = Queue.front();
- Queue.pop();
+ WorkListUnit U = Queue.front();
+ Queue.pop_front();
return U;
}
+ virtual bool visitItemsInWorkList(Visitor &V) {
+ for (llvm::SmallVectorImpl<WorkListUnit>::iterator
+ I = Stack.begin(), E = Stack.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ for (std::deque<WorkListUnit>::iterator
+ I = Queue.begin(), E = Queue.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ return false;
+ }
+
};
} // end anonymous namespace
-GRWorkList* GRWorkList::MakeBFSBlockDFSContents() {
+WorkList* WorkList::makeBFSBlockDFSContents() {
return new BFSBlockDFSContents();
}
@@ -128,7 +163,7 @@ GRWorkList* GRWorkList::MakeBFSBlockDFSContents() {
//===----------------------------------------------------------------------===//
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
-bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
+bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
const GRState *InitState) {
if (G->num_roots() == 0) { // Initialize the analysis by constructing
@@ -154,14 +189,22 @@ bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
if (!InitState)
// Generate the root.
- GenerateNode(StartLoc, getInitialState(L), 0);
+ generateNode(StartLoc, SubEng.getInitialState(L), 0);
else
- GenerateNode(StartLoc, InitState, 0);
+ generateNode(StartLoc, InitState, 0);
}
- while (Steps && WList->hasWork()) {
- --Steps;
- const GRWorkListUnit& WU = WList->Dequeue();
+ // Check if we have a steps limit
+ bool UnlimitedSteps = Steps == 0;
+
+ while (WList->hasWork()) {
+ if (!UnlimitedSteps) {
+ if (Steps == 0)
+ break;
+ --Steps;
+ }
+
+ const WorkListUnit& WU = WList->dequeue();
// Set the current block counter.
WList->setBlockCounter(WU.getBlockCounter());
@@ -193,18 +236,18 @@ bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
break;
default:
- assert(isa<PostStmt>(Node->getLocation()));
- HandlePostStmt(cast<PostStmt>(Node->getLocation()), WU.getBlock(),
- WU.getIndex(), Node);
+ assert(isa<PostStmt>(Node->getLocation()) ||
+ isa<PostInitializer>(Node->getLocation()));
+ HandlePostStmt(WU.getBlock(), WU.getIndex(), Node);
break;
}
}
- SubEngine.ProcessEndWorklist(hasWorkRemaining());
+ SubEng.processEndWorklist(hasWorkRemaining());
return WList->hasWork();
}
-void GRCoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
+void CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
unsigned Steps,
const GRState *InitState,
ExplodedNodeSet &Dst) {
@@ -215,19 +258,19 @@ void GRCoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
}
}
-void GRCoreEngine::HandleCallEnter(const CallEnter &L, const CFGBlock *Block,
+void CoreEngine::HandleCallEnter(const CallEnter &L, const CFGBlock *Block,
unsigned Index, ExplodedNode *Pred) {
- GRCallEnterNodeBuilder Builder(*this, Pred, L.getCallExpr(),
+ CallEnterNodeBuilder Builder(*this, Pred, L.getCallExpr(),
L.getCalleeContext(), Block, Index);
- ProcessCallEnter(Builder);
+ SubEng.processCallEnter(Builder);
}
-void GRCoreEngine::HandleCallExit(const CallExit &L, ExplodedNode *Pred) {
- GRCallExitNodeBuilder Builder(*this, Pred);
- ProcessCallExit(Builder);
+void CoreEngine::HandleCallExit(const CallExit &L, ExplodedNode *Pred) {
+ CallExitNodeBuilder Builder(*this, Pred);
+ SubEng.processCallExit(Builder);
}
-void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
+void CoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
const CFGBlock* Blk = L.getDst();
@@ -238,28 +281,44 @@ void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
&& "EXIT block cannot contain Stmts.");
// Process the final state transition.
- GREndPathNodeBuilder Builder(Blk, Pred, this);
- ProcessEndPath(Builder);
+ EndOfFunctionNodeBuilder Builder(Blk, Pred, this);
+ SubEng.processEndOfFunction(Builder);
// This path is done. Don't enqueue any more nodes.
return;
}
- // FIXME: Should we allow ProcessBlockEntrance to also manipulate state?
+ // Call into the subengine to process entering the CFGBlock.
+ ExplodedNodeSet dstNodes;
+ BlockEntrance BE(Blk, Pred->getLocationContext());
+ GenericNodeBuilder<BlockEntrance> nodeBuilder(*this, Pred, BE);
+ SubEng.processCFGBlockEntrance(dstNodes, nodeBuilder);
- if (ProcessBlockEntrance(Blk, Pred, WList->getBlockCounter()))
- GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()),
- Pred->State, Pred);
+ if (dstNodes.empty()) {
+ if (!nodeBuilder.hasGeneratedNode) {
+ // Auto-generate a node and enqueue it to the worklist.
+ generateNode(BE, Pred->State, Pred);
+ }
+ }
else {
- blocksAborted.push_back(std::make_pair(L, Pred));
+ for (ExplodedNodeSet::iterator I = dstNodes.begin(), E = dstNodes.end();
+ I != E; ++I) {
+ WList->enqueue(*I);
+ }
+ }
+
+ for (llvm::SmallVectorImpl<ExplodedNode*>::const_iterator
+ I = nodeBuilder.sinks().begin(), E = nodeBuilder.sinks().end();
+ I != E; ++I) {
+ blocksAborted.push_back(std::make_pair(L, *I));
}
}
-void GRCoreEngine::HandleBlockEntrance(const BlockEntrance& L,
+void CoreEngine::HandleBlockEntrance(const BlockEntrance& L,
ExplodedNode* Pred) {
// Increment the block counter.
- GRBlockCounter Counter = WList->getBlockCounter();
+ BlockCounter Counter = WList->getBlockCounter();
Counter = BCounterFactory.IncrementCount(Counter,
Pred->getLocationContext()->getCurrentStackFrame(),
L.getBlock()->getBlockID());
@@ -267,15 +326,15 @@ void GRCoreEngine::HandleBlockEntrance(const BlockEntrance& L,
// Process the entrance of the block.
if (CFGElement E = L.getFirstElement()) {
- GRStmtNodeBuilder Builder(L.getBlock(), 0, Pred, this,
- SubEngine.getStateManager());
- ProcessStmt(E, Builder);
+ StmtNodeBuilder Builder(L.getBlock(), 0, Pred, this,
+ SubEng.getStateManager());
+ SubEng.processCFGElement(E, Builder);
}
else
HandleBlockExit(L.getBlock(), Pred);
}
-void GRCoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode* Pred) {
+void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode* Pred) {
if (const Stmt* Term = B->getTerminator()) {
switch (Term->getStmtClass()) {
@@ -287,8 +346,10 @@ void GRCoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode* Pred) {
HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred);
return;
+ case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass:
- HandleBranch(cast<ConditionalOperator>(Term)->getCond(), Term, B, Pred);
+ HandleBranch(cast<AbstractConditionalOperator>(Term)->getCond(),
+ Term, B, Pred);
return;
// FIXME: Use constant-folding in CFG construction to simplify this
@@ -319,11 +380,11 @@ void GRCoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode* Pred) {
// Only 1 successor: the indirect goto dispatch block.
assert (B->succ_size() == 1);
- GRIndirectGotoNodeBuilder
+ IndirectGotoNodeBuilder
builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
*(B->succ_begin()), this);
- ProcessIndirectGoto(builder);
+ SubEng.processIndirectGoto(builder);
return;
}
@@ -343,10 +404,10 @@ void GRCoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode* Pred) {
}
case Stmt::SwitchStmtClass: {
- GRSwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
+ SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
this);
- ProcessSwitch(builder);
+ SubEng.processSwitch(builder);
return;
}
@@ -359,38 +420,35 @@ void GRCoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode* Pred) {
assert (B->succ_size() == 1 &&
"Blocks with no terminator should have at most 1 successor.");
- GenerateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
+ generateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
Pred->State, Pred);
}
-void GRCoreEngine::HandleBranch(const Stmt* Cond, const Stmt* Term,
+void CoreEngine::HandleBranch(const Stmt* Cond, const Stmt* Term,
const CFGBlock * B, ExplodedNode* Pred) {
- assert (B->succ_size() == 2);
-
- GRBranchNodeBuilder Builder(B, *(B->succ_begin()), *(B->succ_begin()+1),
- Pred, this);
-
- ProcessBranch(Cond, Term, Builder);
+ assert(B->succ_size() == 2);
+ BranchNodeBuilder Builder(B, *(B->succ_begin()), *(B->succ_begin()+1),
+ Pred, this);
+ SubEng.processBranch(Cond, Term, Builder);
}
-void GRCoreEngine::HandlePostStmt(const PostStmt& L, const CFGBlock* B,
- unsigned StmtIdx, ExplodedNode* Pred) {
-
+void CoreEngine::HandlePostStmt(const CFGBlock* B, unsigned StmtIdx,
+ ExplodedNode* Pred) {
assert (!B->empty());
if (StmtIdx == B->size())
HandleBlockExit(B, Pred);
else {
- GRStmtNodeBuilder Builder(B, StmtIdx, Pred, this,
- SubEngine.getStateManager());
- ProcessStmt((*B)[StmtIdx], Builder);
+ StmtNodeBuilder Builder(B, StmtIdx, Pred, this,
+ SubEng.getStateManager());
+ SubEng.processCFGElement((*B)[StmtIdx], Builder);
}
}
-/// GenerateNode - Utility method to generate nodes, hook up successors,
+/// generateNode - Utility method to generate nodes, hook up successors,
/// and add nodes to the worklist.
-void GRCoreEngine::GenerateNode(const ProgramPoint& Loc,
- const GRState* State, ExplodedNode* Pred) {
+void CoreEngine::generateNode(const ProgramPoint& Loc,
+ const GRState* State, ExplodedNode* Pred) {
bool IsNew;
ExplodedNode* Node = G->getNode(Loc, State, &IsNew);
@@ -403,33 +461,60 @@ void GRCoreEngine::GenerateNode(const ProgramPoint& Loc,
}
// Only add 'Node' to the worklist if it was freshly generated.
- if (IsNew) WList->Enqueue(Node);
+ if (IsNew) WList->enqueue(Node);
+}
+
+ExplodedNode *
+GenericNodeBuilderImpl::generateNodeImpl(const GRState *state,
+ ExplodedNode *pred,
+ ProgramPoint programPoint,
+ bool asSink) {
+
+ hasGeneratedNode = true;
+ bool isNew;
+ ExplodedNode *node = engine.getGraph().getNode(programPoint, state, &isNew);
+ if (pred)
+ node->addPredecessor(pred, engine.getGraph());
+ if (isNew) {
+ if (asSink) {
+ node->markAsSink();
+ sinksGenerated.push_back(node);
+ }
+ return node;
+ }
+ return 0;
}
-GRStmtNodeBuilder::GRStmtNodeBuilder(const CFGBlock* b, unsigned idx,
- ExplodedNode* N, GRCoreEngine* e,
+StmtNodeBuilder::StmtNodeBuilder(const CFGBlock* b, unsigned idx,
+ ExplodedNode* N, CoreEngine* e,
GRStateManager &mgr)
- : Eng(*e), B(*b), Idx(idx), Pred(N), Mgr(mgr), Auditor(0),
- PurgingDeadSymbols(false), BuildSinks(false), HasGeneratedNode(false),
+ : Eng(*e), B(*b), Idx(idx), Pred(N), Mgr(mgr),
+ PurgingDeadSymbols(false), BuildSinks(false), hasGeneratedNode(false),
PointKind(ProgramPoint::PostStmtKind), Tag(0) {
Deferred.insert(N);
CleanedState = Pred->getState();
}
-GRStmtNodeBuilder::~GRStmtNodeBuilder() {
+StmtNodeBuilder::~StmtNodeBuilder() {
for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
if (!(*I)->isSink())
GenerateAutoTransition(*I);
}
-void GRStmtNodeBuilder::GenerateAutoTransition(ExplodedNode* N) {
+void StmtNodeBuilder::GenerateAutoTransition(ExplodedNode* N) {
assert (!N->isSink());
// Check if this node entered a callee.
if (isa<CallEnter>(N->getLocation())) {
// Still use the index of the CallExpr. It's needed to create the callee
// StackFrameContext.
- Eng.WList->Enqueue(N, &B, Idx);
+ Eng.WList->enqueue(N, &B, Idx);
+ return;
+ }
+
+ // Do not create extra nodes. Move to the next CFG element.
+ if (isa<PostInitializer>(N->getLocation())) {
+ Eng.WList->enqueue(N, &B, Idx+1);
return;
}
@@ -438,7 +523,7 @@ void GRStmtNodeBuilder::GenerateAutoTransition(ExplodedNode* N) {
if (Loc == N->getLocation()) {
// Note: 'N' should be a fresh node because otherwise it shouldn't be
// a member of Deferred.
- Eng.WList->Enqueue(N, &B, Idx+1);
+ Eng.WList->enqueue(N, &B, Idx+1);
return;
}
@@ -447,31 +532,20 @@ void GRStmtNodeBuilder::GenerateAutoTransition(ExplodedNode* N) {
Succ->addPredecessor(N, *Eng.G);
if (IsNew)
- Eng.WList->Enqueue(Succ, &B, Idx+1);
+ Eng.WList->enqueue(Succ, &B, Idx+1);
}
-ExplodedNode* GRStmtNodeBuilder::MakeNode(ExplodedNodeSet& Dst, const Stmt* S,
+ExplodedNode* StmtNodeBuilder::MakeNode(ExplodedNodeSet& Dst, const Stmt* S,
ExplodedNode* Pred, const GRState* St,
ProgramPoint::Kind K) {
- const GRState* PredState = GetState(Pred);
-
- // If the state hasn't changed, don't generate a new node.
- if (!BuildSinks && St == PredState && Auditor == 0) {
- Dst.Add(Pred);
- return NULL;
- }
ExplodedNode* N = generateNode(S, St, Pred, K);
if (N) {
if (BuildSinks)
N->markAsSink();
- else {
- if (Auditor && Auditor->Audit(N, Mgr))
- N->markAsSink();
-
+ else
Dst.Add(N);
- }
}
return N;
@@ -502,7 +576,7 @@ static ProgramPoint GetProgramPoint(const Stmt *S, ProgramPoint::Kind K,
}
ExplodedNode*
-GRStmtNodeBuilder::generateNodeInternal(const Stmt* S, const GRState* state,
+StmtNodeBuilder::generateNodeInternal(const Stmt* S, const GRState* state,
ExplodedNode* Pred,
ProgramPoint::Kind K,
const void *tag) {
@@ -512,7 +586,7 @@ GRStmtNodeBuilder::generateNodeInternal(const Stmt* S, const GRState* state,
}
ExplodedNode*
-GRStmtNodeBuilder::generateNodeInternal(const ProgramPoint &Loc,
+StmtNodeBuilder::generateNodeInternal(const ProgramPoint &Loc,
const GRState* State,
ExplodedNode* Pred) {
bool IsNew;
@@ -528,7 +602,7 @@ GRStmtNodeBuilder::generateNodeInternal(const ProgramPoint &Loc,
return NULL;
}
-ExplodedNode* GRBranchNodeBuilder::generateNode(const GRState* State,
+ExplodedNode* BranchNodeBuilder::generateNode(const GRState* State,
bool branch) {
// If the branch has been marked infeasible we should not generate a node.
@@ -556,17 +630,17 @@ ExplodedNode* GRBranchNodeBuilder::generateNode(const GRState* State,
return NULL;
}
-GRBranchNodeBuilder::~GRBranchNodeBuilder() {
+BranchNodeBuilder::~BranchNodeBuilder() {
if (!GeneratedTrue) generateNode(Pred->State, true);
if (!GeneratedFalse) generateNode(Pred->State, false);
for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
- if (!(*I)->isSink()) Eng.WList->Enqueue(*I);
+ if (!(*I)->isSink()) Eng.WList->enqueue(*I);
}
ExplodedNode*
-GRIndirectGotoNodeBuilder::generateNode(const iterator& I, const GRState* St,
+IndirectGotoNodeBuilder::generateNode(const iterator& I, const GRState* St,
bool isSink) {
bool IsNew;
@@ -580,7 +654,7 @@ GRIndirectGotoNodeBuilder::generateNode(const iterator& I, const GRState* St,
if (isSink)
Succ->markAsSink();
else
- Eng.WList->Enqueue(Succ);
+ Eng.WList->enqueue(Succ);
return Succ;
}
@@ -590,7 +664,7 @@ GRIndirectGotoNodeBuilder::generateNode(const iterator& I, const GRState* St,
ExplodedNode*
-GRSwitchNodeBuilder::generateCaseStmtNode(const iterator& I, const GRState* St){
+SwitchNodeBuilder::generateCaseStmtNode(const iterator& I, const GRState* St){
bool IsNew;
@@ -599,7 +673,7 @@ GRSwitchNodeBuilder::generateCaseStmtNode(const iterator& I, const GRState* St){
Succ->addPredecessor(Pred, *Eng.G);
if (IsNew) {
- Eng.WList->Enqueue(Succ);
+ Eng.WList->enqueue(Succ);
return Succ;
}
@@ -608,7 +682,7 @@ GRSwitchNodeBuilder::generateCaseStmtNode(const iterator& I, const GRState* St){
ExplodedNode*
-GRSwitchNodeBuilder::generateDefaultCaseNode(const GRState* St, bool isSink) {
+SwitchNodeBuilder::generateDefaultCaseNode(const GRState* St, bool isSink) {
// Get the block for the default case.
assert (Src->succ_rbegin() != Src->succ_rend());
@@ -624,7 +698,7 @@ GRSwitchNodeBuilder::generateDefaultCaseNode(const GRState* St, bool isSink) {
if (isSink)
Succ->markAsSink();
else
- Eng.WList->Enqueue(Succ);
+ Eng.WList->enqueue(Succ);
return Succ;
}
@@ -632,9 +706,9 @@ GRSwitchNodeBuilder::generateDefaultCaseNode(const GRState* St, bool isSink) {
return NULL;
}
-GREndPathNodeBuilder::~GREndPathNodeBuilder() {
+EndOfFunctionNodeBuilder::~EndOfFunctionNodeBuilder() {
// Auto-generate an EOP node if one has not been generated.
- if (!HasGeneratedNode) {
+ if (!hasGeneratedNode) {
// If we are in an inlined call, generate CallExit node.
if (Pred->getLocationContext()->getParent())
GenerateCallExitNode(Pred->State);
@@ -644,9 +718,9 @@ GREndPathNodeBuilder::~GREndPathNodeBuilder() {
}
ExplodedNode*
-GREndPathNodeBuilder::generateNode(const GRState* State, const void *tag,
+EndOfFunctionNodeBuilder::generateNode(const GRState* State, const void *tag,
ExplodedNode* P) {
- HasGeneratedNode = true;
+ hasGeneratedNode = true;
bool IsNew;
ExplodedNode* Node = Eng.G->getNode(BlockEntrance(&B,
@@ -662,8 +736,8 @@ GREndPathNodeBuilder::generateNode(const GRState* State, const void *tag,
return NULL;
}
-void GREndPathNodeBuilder::GenerateCallExitNode(const GRState *state) {
- HasGeneratedNode = true;
+void EndOfFunctionNodeBuilder::GenerateCallExitNode(const GRState *state) {
+ hasGeneratedNode = true;
// Create a CallExit node and enqueue it.
const StackFrameContext *LocCtx
= cast<StackFrameContext>(Pred->getLocationContext());
@@ -677,19 +751,18 @@ void GREndPathNodeBuilder::GenerateCallExitNode(const GRState *state) {
Node->addPredecessor(Pred, *Eng.G);
if (isNew)
- Eng.WList->Enqueue(Node);
+ Eng.WList->enqueue(Node);
}
-void GRCallEnterNodeBuilder::GenerateNode(const GRState *state,
- const LocationContext *LocCtx) {
+void CallEnterNodeBuilder::generateNode(const GRState *state) {
// Check if the callee is in the same translation unit.
if (CalleeCtx->getTranslationUnit() !=
Pred->getLocationContext()->getTranslationUnit()) {
// Create a new engine. We must be careful that the new engine should not
// reference data structures owned by the old engine.
- AnalysisManager &OldMgr = Eng.SubEngine.getAnalysisManager();
+ AnalysisManager &OldMgr = Eng.SubEng.getAnalysisManager();
// Get the callee's translation unit.
idx::TranslationUnit *TU = CalleeCtx->getTranslationUnit();
@@ -702,25 +775,29 @@ void GRCallEnterNodeBuilder::GenerateNode(const GRState *state,
OldMgr.getPathDiagnosticClient(),
OldMgr.getStoreManagerCreator(),
OldMgr.getConstraintManagerCreator(),
+ OldMgr.getCheckerManager(),
OldMgr.getIndexer(),
- OldMgr.getMaxNodes(), OldMgr.getMaxLoop(),
+ OldMgr.getMaxNodes(), OldMgr.getMaxVisit(),
OldMgr.shouldVisualizeGraphviz(),
OldMgr.shouldVisualizeUbigraph(),
OldMgr.shouldPurgeDead(),
OldMgr.shouldEagerlyAssume(),
OldMgr.shouldTrimGraph(),
OldMgr.shouldInlineCall(),
- OldMgr.getAnalysisContextManager().getUseUnoptimizedCFG());
- llvm::OwningPtr<GRTransferFuncs> TF(MakeCFRefCountTF(AMgr.getASTContext(),
+ OldMgr.getAnalysisContextManager().getUseUnoptimizedCFG(),
+ OldMgr.getAnalysisContextManager().getAddImplicitDtors(),
+ OldMgr.getAnalysisContextManager().getAddInitializers(),
+ OldMgr.shouldEagerlyTrimExplodedGraph());
+ llvm::OwningPtr<TransferFuncs> TF(MakeCFRefCountTF(AMgr.getASTContext(),
/* GCEnabled */ false,
AMgr.getLangOptions()));
// Create the new engine.
- GRExprEngine NewEng(AMgr, TF.take());
+ ExprEngine NewEng(AMgr, TF.take());
// Create the new LocationContext.
AnalysisContext *NewAnaCtx = AMgr.getAnalysisContext(CalleeCtx->getDecl(),
CalleeCtx->getTranslationUnit());
- const StackFrameContext *OldLocCtx = cast<StackFrameContext>(LocCtx);
+ const StackFrameContext *OldLocCtx = CalleeCtx;
const StackFrameContext *NewLocCtx = AMgr.getStackFrame(NewAnaCtx,
OldLocCtx->getParent(),
OldLocCtx->getCallSite(),
@@ -737,7 +814,7 @@ void GRCallEnterNodeBuilder::GenerateNode(const GRState *state,
}
// Get the callee entry block.
- const CFGBlock *Entry = &(LocCtx->getCFG()->getEntry());
+ const CFGBlock *Entry = &(CalleeCtx->getCFG()->getEntry());
assert(Entry->empty());
assert(Entry->succ_size() == 1);
@@ -745,26 +822,27 @@ void GRCallEnterNodeBuilder::GenerateNode(const GRState *state,
const CFGBlock *SuccB = *(Entry->succ_begin());
// Construct an edge representing the starting location in the callee.
- BlockEdge Loc(Entry, SuccB, LocCtx);
+ BlockEdge Loc(Entry, SuccB, CalleeCtx);
bool isNew;
ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G);
if (isNew)
- Eng.WList->Enqueue(Node);
+ Eng.WList->enqueue(Node);
}
-void GRCallExitNodeBuilder::GenerateNode(const GRState *state) {
+void CallExitNodeBuilder::generateNode(const GRState *state) {
// Get the callee's location context.
const StackFrameContext *LocCtx
= cast<StackFrameContext>(Pred->getLocationContext());
-
+ // When exiting an implicit automatic obj dtor call, the callsite is the Stmt
+ // that triggers the dtor.
PostStmt Loc(LocCtx->getCallSite(), LocCtx->getParent());
bool isNew;
ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G);
if (isNew)
- Eng.WList->Enqueue(Node, LocCtx->getCallSiteBlock(),
+ Eng.WList->enqueue(Node, LocCtx->getCallSiteBlock(),
LocCtx->getIndex() + 1);
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/Environment.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
index 02291f4..ecaff29 100644
--- a/contrib/llvm/tools/clang/lib/Checker/Environment.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -13,36 +13,39 @@
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
using namespace clang;
+using namespace ento;
-SVal Environment::GetSVal(const Stmt *E, ValueManager& ValMgr) const {
+SVal Environment::lookupExpr(const Stmt* E) const {
+ const SVal* X = ExprBindings.lookup(E);
+ if (X) {
+ SVal V = *X;
+ return V;
+ }
+ return UnknownVal();
+}
+SVal Environment::getSVal(const Stmt *E, SValBuilder& svalBuilder) const {
for (;;) {
-
switch (E->getStmtClass()) {
-
case Stmt::AddrLabelExprClass:
- return ValMgr.makeLoc(cast<AddrLabelExpr>(E));
-
- // ParenExprs are no-ops.
-
+ return svalBuilder.makeLoc(cast<AddrLabelExpr>(E));
case Stmt::ParenExprClass:
+ // ParenExprs are no-ops.
E = cast<ParenExpr>(E)->getSubExpr();
continue;
-
case Stmt::CharacterLiteralClass: {
const CharacterLiteral* C = cast<CharacterLiteral>(E);
- return ValMgr.makeIntVal(C->getValue(), C->getType());
+ return svalBuilder.makeIntVal(C->getValue(), C->getType());
}
-
case Stmt::CXXBoolLiteralExprClass: {
const SVal *X = ExprBindings.lookup(E);
if (X)
return *X;
else
- return ValMgr.makeIntVal(cast<CXXBoolLiteralExpr>(E));
+ return svalBuilder.makeBoolVal(cast<CXXBoolLiteralExpr>(E));
}
case Stmt::IntegerLiteralClass: {
// In C++, this expression may have been bound to a temporary object.
@@ -50,34 +53,36 @@ SVal Environment::GetSVal(const Stmt *E, ValueManager& ValMgr) const {
if (X)
return *X;
else
- return ValMgr.makeIntVal(cast<IntegerLiteral>(E));
+ return svalBuilder.makeIntVal(cast<IntegerLiteral>(E));
}
-
- // Casts where the source and target type are the same
- // are no-ops. We blast through these to get the descendant
- // subexpression that has a value.
-
case Stmt::ImplicitCastExprClass:
+ case Stmt::CXXFunctionalCastExprClass:
case Stmt::CStyleCastExprClass: {
+ // We blast through no-op casts to get the descendant
+ // subexpression that has a value.
const CastExpr* C = cast<CastExpr>(E);
QualType CT = C->getType();
-
if (CT->isVoidType())
return UnknownVal();
-
+ if (C->getCastKind() == CK_NoOp) {
+ E = C->getSubExpr();
+ continue;
+ }
break;
}
-
- // Handle all other Stmt* using a lookup.
-
+ case Stmt::ExprWithCleanupsClass:
+ E = cast<ExprWithCleanups>(E)->getSubExpr();
+ continue;
+ case Stmt::CXXBindTemporaryExprClass:
+ E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
+ continue;
+ // Handle all other Stmt* using a lookup.
default:
break;
};
-
break;
}
-
- return LookupExpr(E);
+ return lookupExpr(E);
}
Environment EnvironmentManager::bindExpr(Environment Env, const Stmt *S,
@@ -86,12 +91,12 @@ Environment EnvironmentManager::bindExpr(Environment Env, const Stmt *S,
if (V.isUnknown()) {
if (Invalidate)
- return Environment(F.Remove(Env.ExprBindings, S));
+ return Environment(F.remove(Env.ExprBindings, S));
else
return Env;
}
- return Environment(F.Add(Env.ExprBindings, S, V));
+ return Environment(F.add(Env.ExprBindings, S, V));
}
static inline const Stmt *MakeLocation(const Stmt *S) {
@@ -101,7 +106,8 @@ static inline const Stmt *MakeLocation(const Stmt *S) {
Environment EnvironmentManager::bindExprAndLocation(Environment Env,
const Stmt *S,
SVal location, SVal V) {
- return Environment(F.Add(F.Add(Env.ExprBindings, MakeLocation(S), V), S, V));
+ return Environment(F.add(F.add(Env.ExprBindings, MakeLocation(S), location),
+ S, V));
}
namespace {
@@ -132,7 +138,7 @@ static inline bool IsLocation(const Stmt *S) {
return (bool) (((uintptr_t) S) & 0x1);
}
-// RemoveDeadBindings:
+// removeDeadBindings:
// - Remove subexpression bindings.
// - Remove dead block expression bindings.
// - Keep live block expression bindings:
@@ -140,7 +146,7 @@ static inline bool IsLocation(const Stmt *S) {
// see ScanReachableSymbols.
// - Mark the region in DRoots if the binding is a loc::MemRegionVal.
Environment
-EnvironmentManager::RemoveDeadBindings(Environment Env,
+EnvironmentManager::removeDeadBindings(Environment Env,
SymbolReaper &SymReaper,
const GRState *ST,
llvm::SmallVectorImpl<const MemRegion*> &DRoots) {
@@ -174,7 +180,7 @@ EnvironmentManager::RemoveDeadBindings(Environment Env,
// Block-level expressions in callers are assumed always live.
if (isBlockExprInCallers(BlkExpr, SymReaper.getLocationContext())) {
- NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
+ NewEnv.ExprBindings = F.add(NewEnv.ExprBindings, BlkExpr, X);
if (isa<loc::MemRegionVal>(X)) {
const MemRegion* R = cast<loc::MemRegionVal>(X).getRegion();
@@ -193,7 +199,7 @@ EnvironmentManager::RemoveDeadBindings(Environment Env,
if (SymReaper.isLive(BlkExpr)) {
// Copy the binding to the new map.
- NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
+ NewEnv.ExprBindings = F.add(NewEnv.ExprBindings, BlkExpr, X);
// If the block expr's value is a memory region, then mark that region.
if (isa<loc::MemRegionVal>(X)) {
@@ -212,7 +218,7 @@ EnvironmentManager::RemoveDeadBindings(Environment Env,
// beginning of itself, but we need its UndefinedVal to determine its
// SVal.
if (X.isUndef() && cast<UndefinedVal>(X).getData())
- NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
+ NewEnv.ExprBindings = F.add(NewEnv.ExprBindings, BlkExpr, X);
}
// Go through he deferred locations and add them to the new environment if
@@ -221,7 +227,7 @@ EnvironmentManager::RemoveDeadBindings(Environment Env,
I = deferredLocations.begin(), E = deferredLocations.end(); I != E; ++I) {
const Stmt *S = (Stmt*) (((uintptr_t) I->first) & (uintptr_t) ~0x1);
if (NewEnv.ExprBindings.lookup(S))
- NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, I->first, I->second);
+ NewEnv.ExprBindings = F.add(NewEnv.ExprBindings, I->first, I->second);
}
return NewEnv;
diff --git a/contrib/llvm/tools/clang/lib/Checker/ExplodedGraph.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 20429b9..2a8364d 100644
--- a/contrib/llvm/tools/clang/lib/Checker/ExplodedGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/ExplodedGraph.h"
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
#include "clang/AST/Stmt.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DenseMap.h"
@@ -21,6 +21,7 @@
#include <vector>
using namespace clang;
+using namespace ento;
//===----------------------------------------------------------------------===//
// Node auditing.
@@ -40,6 +41,96 @@ void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
}
//===----------------------------------------------------------------------===//
+// Cleanup.
+//===----------------------------------------------------------------------===//
+
+typedef std::vector<ExplodedNode*> NodeList;
+static inline NodeList*& getNodeList(void *&p) { return (NodeList*&) p; }
+
+ExplodedGraph::~ExplodedGraph() {
+ if (reclaimNodes) {
+ delete getNodeList(recentlyAllocatedNodes);
+ delete getNodeList(freeNodes);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Node reclamation.
+//===----------------------------------------------------------------------===//
+
+void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
+ if (!recentlyAllocatedNodes)
+ return;
+ NodeList &nl = *getNodeList(recentlyAllocatedNodes);
+
+ // Reclaimn all nodes that match *all* the following criteria:
+ //
+ // (1) 1 predecessor (that has one successor)
+ // (2) 1 successor (that has one predecessor)
+ // (3) The ProgramPoint is for a PostStmt.
+ // (4) There is no 'tag' for the ProgramPoint.
+ // (5) The 'store' is the same as the predecessor.
+ // (6) The 'GDM' is the same as the predecessor.
+ // (7) The LocationContext is the same as the predecessor.
+ // (8) The PostStmt is for a non-CFGElement expression.
+
+ for (NodeList::iterator i = nl.begin(), e = nl.end() ; i != e; ++i) {
+ ExplodedNode *node = *i;
+
+ // Conditions 1 and 2.
+ if (node->pred_size() != 1 || node->succ_size() != 1)
+ continue;
+
+ ExplodedNode *pred = *(node->pred_begin());
+ if (pred->succ_size() != 1)
+ continue;
+
+ ExplodedNode *succ = *(node->succ_begin());
+ if (succ->pred_size() != 1)
+ continue;
+
+ // Condition 3.
+ ProgramPoint progPoint = node->getLocation();
+ if (!isa<PostStmt>(progPoint))
+ continue;
+
+ // Condition 4.
+ PostStmt ps = cast<PostStmt>(progPoint);
+ if (ps.getTag() || isa<PostStmtCustom>(ps))
+ continue;
+
+ if (isa<BinaryOperator>(ps.getStmt()))
+ continue;
+
+ // Conditions 5, 6, and 7.
+ const GRState *state = node->getState();
+ const GRState *pred_state = pred->getState();
+ if (state->store != pred_state->store || state->GDM != pred_state->GDM ||
+ progPoint.getLocationContext() != pred->getLocationContext())
+ continue;
+
+ // Condition 8.
+ if (node->getCFG().isBlkExpr(ps.getStmt()))
+ continue;
+
+ // If we reach here, we can remove the node. This means:
+ // (a) changing the predecessors successor to the successor of this node
+ // (b) changing the successors predecessor to the predecessor of this node
+ // (c) Putting 'node' onto freeNodes.
+ pred->replaceSuccessor(succ);
+ succ->replacePredecessor(pred);
+ if (!freeNodes)
+ freeNodes = new NodeList();
+ getNodeList(freeNodes)->push_back(node);
+ Nodes.RemoveNode(node);
+ --NumNodes;
+ node->~ExplodedNode();
+ }
+
+ nl.clear();
+}
+
+//===----------------------------------------------------------------------===//
// ExplodedNode.
//===----------------------------------------------------------------------===//
@@ -56,6 +147,12 @@ void ExplodedNode::addPredecessor(ExplodedNode* V, ExplodedGraph &G) {
#endif
}
+void ExplodedNode::NodeGroup::replaceNode(ExplodedNode *node) {
+ assert(getKind() == Size1);
+ P = reinterpret_cast<uintptr_t>(node);
+ assert(getKind() == Size1);
+}
+
void ExplodedNode::NodeGroup::addNode(ExplodedNode* N, ExplodedGraph &G) {
assert((reinterpret_cast<uintptr_t>(N) & Mask) == 0x0);
assert(!getFlag());
@@ -128,10 +225,24 @@ ExplodedNode *ExplodedGraph::getNode(const ProgramPoint& L,
NodeTy* V = Nodes.FindNodeOrInsertPos(profile, InsertPos);
if (!V) {
- // Allocate a new node.
- V = (NodeTy*) getAllocator().Allocate<NodeTy>();
+ if (freeNodes && !getNodeList(freeNodes)->empty()) {
+ NodeList *nl = getNodeList(freeNodes);
+ V = nl->back();
+ nl->pop_back();
+ }
+ else {
+ // Allocate a new node.
+ V = (NodeTy*) getAllocator().Allocate<NodeTy>();
+ }
+
new (V) NodeTy(L, State);
+ if (reclaimNodes) {
+ if (!recentlyAllocatedNodes)
+ recentlyAllocatedNodes = new NodeList();
+ getNodeList(recentlyAllocatedNodes)->push_back(V);
+ }
+
// Insert the node into the node set and return it.
Nodes.InsertNode(V, InsertPos);
diff --git a/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FlatStore.cpp
index 21fa422..99a5ead 100644
--- a/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FlatStore.cpp
@@ -7,11 +7,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
#include "llvm/ADT/ImmutableIntervalMap.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
+using namespace ento;
using llvm::Interval;
// The actual store type.
@@ -30,13 +31,13 @@ public:
BVFactory(mgr.getAllocator()) {}
SVal Retrieve(Store store, Loc L, QualType T);
- Store Bind(Store store, Loc L, SVal val);
- Store Remove(Store St, Loc L);
- Store BindCompoundLiteral(Store store, const CompoundLiteralExpr* cl,
+ StoreRef Bind(Store store, Loc L, SVal val);
+ StoreRef Remove(Store St, Loc L);
+ StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr* cl,
const LocationContext *LC, SVal v);
- Store getInitialStore(const LocationContext *InitLoc) {
- return RBFactory.GetEmptyMap().getRoot();
+ StoreRef getInitialStore(const LocationContext *InitLoc) {
+ return StoreRef(RBFactory.getEmptyMap().getRoot(), *this);
}
SubRegionMap *getSubRegionMap(Store store) {
@@ -44,22 +45,23 @@ public:
}
SVal ArrayToPointer(Loc Array);
- Store RemoveDeadBindings(Store store, const StackFrameContext *LCtx,
- SymbolReaper& SymReaper,
- llvm::SmallVectorImpl<const MemRegion*>& RegionRoots){
- return store;
+ StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots){
+ return StoreRef(store, *this);
}
- Store BindDecl(Store store, const VarRegion *VR, SVal initVal);
+ StoreRef BindDecl(Store store, const VarRegion *VR, SVal initVal);
- Store BindDeclWithNoInit(Store store, const VarRegion *VR);
+ StoreRef BindDeclWithNoInit(Store store, const VarRegion *VR);
typedef llvm::DenseSet<SymbolRef> InvalidatedSymbols;
- Store InvalidateRegions(Store store, const MemRegion * const *I,
- const MemRegion * const *E, const Expr *Ex,
- unsigned Count, InvalidatedSymbols *IS,
- bool invalidateGlobals, InvalidatedRegions *Regions);
+ StoreRef invalidateRegions(Store store, const MemRegion * const *I,
+ const MemRegion * const *E, const Expr *Ex,
+ unsigned Count, InvalidatedSymbols *IS,
+ bool invalidateGlobals,
+ InvalidatedRegions *Regions);
void print(Store store, llvm::raw_ostream& Out, const char* nl,
const char *sep);
@@ -83,7 +85,7 @@ private:
};
} // end anonymous namespace
-StoreManager *clang::CreateFlatStoreManager(GRStateManager &StMgr) {
+StoreManager *ento::CreateFlatStoreManager(GRStateManager &StMgr) {
return new FlatStoreManager(StMgr);
}
@@ -97,7 +99,7 @@ SVal FlatStoreManager::Retrieve(Store store, Loc L, QualType T) {
RegionBindings B = getRegionBindings(store);
const BindingVal *BV = B.lookup(RI.R);
if (BV) {
- const SVal *V = BVFactory.Lookup(*BV, RI.I);
+ const SVal *V = BVFactory.lookup(*BV, RI.I);
if (V)
return *V;
else
@@ -111,60 +113,60 @@ SVal FlatStoreManager::RetrieveRegionWithNoBinding(const MemRegion *R,
if (R->hasStackNonParametersStorage())
return UndefinedVal();
else
- return ValMgr.getRegionValueSymbolVal(cast<TypedRegion>(R));
+ return svalBuilder.getRegionValueSymbolVal(cast<TypedRegion>(R));
}
-Store FlatStoreManager::Bind(Store store, Loc L, SVal val) {
+StoreRef FlatStoreManager::Bind(Store store, Loc L, SVal val) {
const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion();
RegionBindings B = getRegionBindings(store);
const BindingVal *V = B.lookup(R);
- BindingVal BV = BVFactory.GetEmptyMap();
+ BindingVal BV = BVFactory.getEmptyMap();
if (V)
BV = *V;
RegionInterval RI = RegionToInterval(R);
// FIXME: FlatStore should handle regions with unknown intervals.
if (!RI.R)
- return B.getRoot();
- BV = BVFactory.Add(BV, RI.I, val);
- B = RBFactory.Add(B, RI.R, BV);
- return B.getRoot();
+ return StoreRef(B.getRoot(), *this);
+ BV = BVFactory.add(BV, RI.I, val);
+ B = RBFactory.add(B, RI.R, BV);
+ return StoreRef(B.getRoot(), *this);
}
-Store FlatStoreManager::Remove(Store store, Loc L) {
- return store;
+StoreRef FlatStoreManager::Remove(Store store, Loc L) {
+ return StoreRef(store, *this);
}
-Store FlatStoreManager::BindCompoundLiteral(Store store,
+StoreRef FlatStoreManager::BindCompoundLiteral(Store store,
const CompoundLiteralExpr* cl,
const LocationContext *LC,
SVal v) {
- return store;
+ return StoreRef(store, *this);
}
SVal FlatStoreManager::ArrayToPointer(Loc Array) {
return Array;
}
-Store FlatStoreManager::BindDecl(Store store, const VarRegion *VR,
- SVal initVal) {
- return Bind(store, ValMgr.makeLoc(VR), initVal);
+StoreRef FlatStoreManager::BindDecl(Store store, const VarRegion *VR,
+ SVal initVal) {
+ return Bind(store, svalBuilder.makeLoc(VR), initVal);
}
-Store FlatStoreManager::BindDeclWithNoInit(Store store, const VarRegion *VR) {
- return store;
+StoreRef FlatStoreManager::BindDeclWithNoInit(Store store, const VarRegion *VR){
+ return StoreRef(store, *this);
}
-Store FlatStoreManager::InvalidateRegions(Store store,
- const MemRegion * const *I,
- const MemRegion * const *E,
- const Expr *Ex, unsigned Count,
- InvalidatedSymbols *IS,
- bool invalidateGlobals,
- InvalidatedRegions *Regions) {
+StoreRef FlatStoreManager::invalidateRegions(Store store,
+ const MemRegion * const *I,
+ const MemRegion * const *E,
+ const Expr *Ex, unsigned Count,
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals,
+ InvalidatedRegions *Regions) {
assert(false && "Not implemented");
- return store;
+ return StoreRef(store, *this);
}
void FlatStoreManager::print(Store store, llvm::raw_ostream& Out,
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRState.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/GRState.cpp
index d38ae21..7b21677 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRState.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/GRState.cpp
@@ -12,18 +12,44 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/CFG.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/PathSensitive/GRSubEngine.h"
-#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
// Give the vtable for ConstraintManager somewhere to live.
// FIXME: Move this elsewhere.
ConstraintManager::~ConstraintManager() {}
+GRState::GRState(GRStateManager *mgr, const Environment& env,
+ StoreRef st, GenericDataMap gdm)
+ : stateMgr(mgr),
+ Env(env),
+ store(st.getStore()),
+ GDM(gdm),
+ refCount(0) {
+ stateMgr->getStoreManager().incrementReferenceCount(store);
+}
+
+GRState::GRState(const GRState& RHS)
+ : llvm::FoldingSetNode(),
+ stateMgr(RHS.stateMgr),
+ Env(RHS.Env),
+ store(RHS.store),
+ GDM(RHS.GDM),
+ refCount(0) {
+ stateMgr->getStoreManager().incrementReferenceCount(store);
+}
+
+GRState::~GRState() {
+ if (store)
+ stateMgr->getStoreManager().decrementReferenceCount(store);
+}
+
GRStateManager::~GRStateManager() {
for (std::vector<GRState::Printer*>::iterator I=Printers.begin(),
E=Printers.end(); I!=E; ++I)
@@ -35,7 +61,7 @@ GRStateManager::~GRStateManager() {
}
const GRState*
-GRStateManager::RemoveDeadBindings(const GRState* state,
+GRStateManager::removeDeadBindings(const GRState* state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper) {
@@ -48,14 +74,14 @@ GRStateManager::RemoveDeadBindings(const GRState* state,
llvm::SmallVector<const MemRegion*, 10> RegionRoots;
GRState NewState = *state;
- NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, SymReaper,
+ NewState.Env = EnvMgr.removeDeadBindings(NewState.Env, SymReaper,
state, RegionRoots);
// Clean up the store.
- NewState.St = StoreMgr->RemoveDeadBindings(NewState.St, LCtx,
- SymReaper, RegionRoots);
+ NewState.setStore(StoreMgr->removeDeadBindings(NewState.getStore(), LCtx,
+ SymReaper, RegionRoots));
state = getPersistentState(NewState);
- return ConstraintMgr->RemoveDeadBindings(state, SymReaper);
+ return ConstraintMgr->removeDeadBindings(state, SymReaper);
}
const GRState *GRStateManager::MarshalState(const GRState *state,
@@ -64,7 +90,7 @@ const GRState *GRStateManager::MarshalState(const GRState *state,
GRState State(this,
EnvMgr.getInitialEnvironment(),
StoreMgr->getInitialStore(InitLoc),
- GDMFactory.GetEmptyMap());
+ GDMFactory.getEmptyMap());
return getPersistentState(State);
}
@@ -72,84 +98,84 @@ const GRState *GRStateManager::MarshalState(const GRState *state,
const GRState *GRState::bindCompoundLiteral(const CompoundLiteralExpr* CL,
const LocationContext *LC,
SVal V) const {
- Store new_store =
- getStateManager().StoreMgr->BindCompoundLiteral(St, CL, LC, V);
- return makeWithStore(new_store);
+ const StoreRef &newStore =
+ getStateManager().StoreMgr->BindCompoundLiteral(getStore(), CL, LC, V);
+ return makeWithStore(newStore);
}
const GRState *GRState::bindDecl(const VarRegion* VR, SVal IVal) const {
- Store new_store = getStateManager().StoreMgr->BindDecl(St, VR, IVal);
- return makeWithStore(new_store);
+ const StoreRef &newStore =
+ getStateManager().StoreMgr->BindDecl(getStore(), VR, IVal);
+ return makeWithStore(newStore);
}
const GRState *GRState::bindDeclWithNoInit(const VarRegion* VR) const {
- Store new_store = getStateManager().StoreMgr->BindDeclWithNoInit(St, VR);
- return makeWithStore(new_store);
+ const StoreRef &newStore =
+ getStateManager().StoreMgr->BindDeclWithNoInit(getStore(), VR);
+ return makeWithStore(newStore);
}
const GRState *GRState::bindLoc(Loc LV, SVal V) const {
GRStateManager &Mgr = getStateManager();
- Store new_store = Mgr.StoreMgr->Bind(St, LV, V);
- const GRState *new_state = makeWithStore(new_store);
-
+ const GRState *newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
+ LV, V));
const MemRegion *MR = LV.getAsRegion();
- if (MR)
- return Mgr.getOwningEngine().ProcessRegionChange(new_state, MR);
+ if (MR && Mgr.getOwningEngine())
+ return Mgr.getOwningEngine()->processRegionChange(newState, MR);
- return new_state;
+ return newState;
}
const GRState *GRState::bindDefault(SVal loc, SVal V) const {
GRStateManager &Mgr = getStateManager();
const MemRegion *R = cast<loc::MemRegionVal>(loc).getRegion();
- Store new_store = Mgr.StoreMgr->BindDefault(St, R, V);
- const GRState *new_state = makeWithStore(new_store);
- return Mgr.getOwningEngine().ProcessRegionChange(new_state, R);
+ const StoreRef &newStore = Mgr.StoreMgr->BindDefault(getStore(), R, V);
+ const GRState *new_state = makeWithStore(newStore);
+ return Mgr.getOwningEngine() ?
+ Mgr.getOwningEngine()->processRegionChange(new_state, R) :
+ new_state;
}
-const GRState *GRState::InvalidateRegions(const MemRegion * const *Begin,
+const GRState *GRState::invalidateRegions(const MemRegion * const *Begin,
const MemRegion * const *End,
const Expr *E, unsigned Count,
StoreManager::InvalidatedSymbols *IS,
bool invalidateGlobals) const {
GRStateManager &Mgr = getStateManager();
- GRSubEngine &Eng = Mgr.getOwningEngine();
-
- if (Eng.WantsRegionChangeUpdate(this)) {
+ SubEngine* Eng = Mgr.getOwningEngine();
+
+ if (Eng && Eng->wantsRegionChangeUpdate(this)) {
StoreManager::InvalidatedRegions Regions;
-
- Store new_store = Mgr.StoreMgr->InvalidateRegions(St, Begin, End,
- E, Count, IS,
- invalidateGlobals,
- &Regions);
- const GRState *new_state = makeWithStore(new_store);
-
- return Eng.ProcessRegionChanges(new_state,
- &Regions.front(),
- &Regions.back()+1);
+ const StoreRef &newStore
+ = Mgr.StoreMgr->invalidateRegions(getStore(), Begin, End, E, Count, IS,
+ invalidateGlobals, &Regions);
+ const GRState *newState = makeWithStore(newStore);
+ return Eng->processRegionChanges(newState,
+ &Regions.front(),
+ &Regions.back()+1);
}
- Store new_store = Mgr.StoreMgr->InvalidateRegions(St, Begin, End,
- E, Count, IS,
- invalidateGlobals,
- NULL);
- return makeWithStore(new_store);
+ const StoreRef &newStore =
+ Mgr.StoreMgr->invalidateRegions(getStore(), Begin, End, E, Count, IS,
+ invalidateGlobals, NULL);
+ return makeWithStore(newStore);
}
const GRState *GRState::unbindLoc(Loc LV) const {
- assert(!isa<loc::MemRegionVal>(LV) && "Use InvalidateRegion instead.");
+ assert(!isa<loc::MemRegionVal>(LV) && "Use invalidateRegion instead.");
Store OldStore = getStore();
- Store NewStore = getStateManager().StoreMgr->Remove(OldStore, LV);
+ const StoreRef &newStore = getStateManager().StoreMgr->Remove(OldStore, LV);
- if (NewStore == OldStore)
+ if (newStore.getStore() == OldStore)
return this;
- return makeWithStore(NewStore);
+ return makeWithStore(newStore);
}
-const GRState *GRState::EnterStackFrame(const StackFrameContext *frame) const {
- Store new_store = getStateManager().StoreMgr->EnterStackFrame(this, frame);
+const GRState *GRState::enterStackFrame(const StackFrameContext *frame) const {
+ const StoreRef &new_store =
+ getStateManager().StoreMgr->enterStackFrame(this, frame);
return makeWithStore(new_store);
}
@@ -162,16 +188,16 @@ SVal GRState::getSValAsScalarOrLoc(const MemRegion *R) const {
if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
QualType T = TR->getValueType();
- if (Loc::IsLocType(T) || T->isIntegerType())
+ if (Loc::isLocType(T) || T->isIntegerType())
return getSVal(R);
}
return UnknownVal();
}
-SVal GRState::getSimplifiedSVal(Loc location, QualType T) const {
- SVal V = getSVal(cast<Loc>(location), T);
-
+SVal GRState::getSVal(Loc location, QualType T) const {
+ SVal V = getRawSVal(cast<Loc>(location), T);
+
// If 'V' is a symbolic value that is *perfectly* constrained to
// be a constant value, use that value instead to lessen the burden
// on later analysis stages (so we have less symbolic values to reason
@@ -230,7 +256,7 @@ const GRState *GRState::bindExprAndLocation(const Stmt *S, SVal location,
return getStateManager().getPersistentState(NewSt);
}
-const GRState *GRState::AssumeInBound(DefinedOrUnknownSVal Idx,
+const GRState *GRState::assumeInBound(DefinedOrUnknownSVal Idx,
DefinedOrUnknownSVal UpperBound,
bool Assumption) const {
if (Idx.isUnknown() || UpperBound.isUnknown())
@@ -238,51 +264,65 @@ const GRState *GRState::AssumeInBound(DefinedOrUnknownSVal Idx,
// Build an expression for 0 <= Idx < UpperBound.
// This is the same as Idx + MIN < UpperBound + MIN, if overflow is allowed.
- // FIXME: This should probably be part of SValuator.
+ // FIXME: This should probably be part of SValBuilder.
GRStateManager &SM = getStateManager();
- ValueManager &VM = SM.getValueManager();
- SValuator &SV = VM.getSValuator();
- ASTContext &Ctx = VM.getContext();
+ SValBuilder &svalBuilder = SM.getSValBuilder();
+ ASTContext &Ctx = svalBuilder.getContext();
// Get the offset: the minimum value of the array index type.
- BasicValueFactory &BVF = VM.getBasicValueFactory();
- // FIXME: This should be using ValueManager::ArrayIndexTy...somehow.
- QualType IndexTy = Ctx.IntTy;
- nonloc::ConcreteInt Min = BVF.getMinValue(IndexTy);
+ BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+ // FIXME: This should be using ValueManager::ArrayindexTy...somehow.
+ QualType indexTy = Ctx.IntTy;
+ nonloc::ConcreteInt Min(BVF.getMinValue(indexTy));
// Adjust the index.
- SVal NewIdx = SV.EvalBinOpNN(this, BO_Add,
- cast<NonLoc>(Idx), Min, IndexTy);
- if (NewIdx.isUnknownOrUndef())
+ SVal newIdx = svalBuilder.evalBinOpNN(this, BO_Add,
+ cast<NonLoc>(Idx), Min, indexTy);
+ if (newIdx.isUnknownOrUndef())
return this;
// Adjust the upper bound.
- SVal NewBound = SV.EvalBinOpNN(this, BO_Add,
- cast<NonLoc>(UpperBound), Min, IndexTy);
- if (NewBound.isUnknownOrUndef())
+ SVal newBound =
+ svalBuilder.evalBinOpNN(this, BO_Add, cast<NonLoc>(UpperBound),
+ Min, indexTy);
+
+ if (newBound.isUnknownOrUndef())
return this;
// Build the actual comparison.
- SVal InBound = SV.EvalBinOpNN(this, BO_LT,
- cast<NonLoc>(NewIdx), cast<NonLoc>(NewBound),
+ SVal inBound = svalBuilder.evalBinOpNN(this, BO_LT,
+ cast<NonLoc>(newIdx), cast<NonLoc>(newBound),
Ctx.IntTy);
- if (InBound.isUnknownOrUndef())
+ if (inBound.isUnknownOrUndef())
return this;
// Finally, let the constraint manager take care of it.
ConstraintManager &CM = SM.getConstraintManager();
- return CM.Assume(this, cast<DefinedSVal>(InBound), Assumption);
+ return CM.assume(this, cast<DefinedSVal>(inBound), Assumption);
}
const GRState* GRStateManager::getInitialState(const LocationContext *InitLoc) {
GRState State(this,
EnvMgr.getInitialEnvironment(),
StoreMgr->getInitialStore(InitLoc),
- GDMFactory.GetEmptyMap());
+ GDMFactory.getEmptyMap());
return getPersistentState(State);
}
+void GRStateManager::recycleUnusedStates() {
+ for (std::vector<GRState*>::iterator i = recentlyAllocatedStates.begin(),
+ e = recentlyAllocatedStates.end(); i != e; ++i) {
+ GRState *state = *i;
+ if (state->referencedByExplodedNode())
+ continue;
+ StateSet.RemoveNode(state);
+ freeStates.push_back(state);
+ state->~GRState();
+ }
+ recentlyAllocatedStates.clear();
+}
+
const GRState* GRStateManager::getPersistentState(GRState& State) {
llvm::FoldingSetNodeID ID;
@@ -292,18 +332,35 @@ const GRState* GRStateManager::getPersistentState(GRState& State) {
if (GRState* I = StateSet.FindNodeOrInsertPos(ID, InsertPos))
return I;
- GRState* I = (GRState*) Alloc.Allocate<GRState>();
- new (I) GRState(State);
- StateSet.InsertNode(I, InsertPos);
- return I;
+ GRState *newState = 0;
+ if (!freeStates.empty()) {
+ newState = freeStates.back();
+ freeStates.pop_back();
+ }
+ else {
+ newState = (GRState*) Alloc.Allocate<GRState>();
+ }
+ new (newState) GRState(State);
+ StateSet.InsertNode(newState, InsertPos);
+ recentlyAllocatedStates.push_back(newState);
+ return newState;
}
-const GRState* GRState::makeWithStore(Store store) const {
+const GRState* GRState::makeWithStore(const StoreRef &store) const {
GRState NewSt = *this;
- NewSt.St = store;
+ NewSt.setStore(store);
return getStateManager().getPersistentState(NewSt);
}
+void GRState::setStore(const StoreRef &newStore) {
+ Store newStoreStore = newStore.getStore();
+ if (newStoreStore)
+ stateMgr->getStoreManager().incrementReferenceCount(newStoreStore);
+ if (store)
+ stateMgr->getStoreManager().decrementReferenceCount(store);
+ store = newStoreStore;
+}
+
//===----------------------------------------------------------------------===//
// State pretty-printing.
//===----------------------------------------------------------------------===//
@@ -420,7 +477,7 @@ GRStateManager::FindGDMContext(void* K,
const GRState* GRStateManager::addGDM(const GRState* St, void* Key, void* Data){
GRState::GenericDataMap M1 = St->getGDM();
- GRState::GenericDataMap M2 = GDMFactory.Add(M1, Key, Data);
+ GRState::GenericDataMap M2 = GDMFactory.add(M1, Key, Data);
if (M1 == M2)
return St;
@@ -432,7 +489,7 @@ const GRState* GRStateManager::addGDM(const GRState* St, void* Key, void* Data){
const GRState *GRStateManager::removeGDM(const GRState *state, void *Key) {
GRState::GenericDataMap OldM = state->getGDM();
- GRState::GenericDataMap NewM = GDMFactory.Remove(OldM, Key);
+ GRState::GenericDataMap NewM = GDMFactory.remove(OldM, Key);
if (NewM == OldM)
return state;
diff --git a/contrib/llvm/tools/clang/lib/Checker/HTMLDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index ff9867f..1ebc28c 100644
--- a/contrib/llvm/tools/clang/lib/Checker/HTMLDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathDiagnosticClients.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticClients.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/SourceManager.h"
@@ -21,11 +21,13 @@
#include "clang/Rewrite/HTMLRewrite.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
+#include "llvm/Support/Path.h"
using namespace clang;
+using namespace ento;
//===----------------------------------------------------------------------===//
// Boilerplate.
@@ -77,8 +79,8 @@ HTMLDiagnostics::HTMLDiagnostics(const std::string& prefix,
}
PathDiagnosticClient*
-clang::CreateHTMLDiagnosticClient(const std::string& prefix,
- const Preprocessor &PP) {
+ento::createHTMLDiagnosticClient(const std::string& prefix,
+ const Preprocessor &PP) {
return new HTMLDiagnostics(prefix, PP);
}
@@ -120,7 +122,9 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
std::string ErrorMsg;
Directory.createDirectoryOnDisk(true, &ErrorMsg);
- if (!Directory.isDirectory()) {
+ bool IsDirectory;
+ if (llvm::sys::fs::is_directory(Directory.str(), IsDirectory) ||
+ !IsDirectory) {
llvm::errs() << "warning: could not create directory '"
<< Directory.str() << "'\n"
<< "reason: " << ErrorMsg << '\n';
@@ -199,7 +203,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
std::string DirName = "";
- if (!llvm::sys::Path(Entry->getName()).isAbsolute()) {
+ if (llvm::sys::path::is_relative(Entry->getName())) {
llvm::sys::Path P = llvm::sys::Path::GetCurrentDirectory();
DirName = P.str() + "/";
}
@@ -300,7 +304,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
}
if (FilesMade)
- FilesMade->push_back(H.getLast());
+ FilesMade->push_back(llvm::sys::path::filename(H.str()));
// Emit the HTML to disk.
for (RewriteBuffer::iterator I = Buf->begin(), E = Buf->end(); I!=E; ++I)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Makefile b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Makefile
new file mode 100644
index 0000000..4aebc16
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Makefile
@@ -0,0 +1,17 @@
+##===- clang/lib/StaticAnalyzer/Core/Makefile --------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements analyses built on top of source-level CFGs.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../../..
+LIBRARYNAME := clangStaticAnalyzerCore
+
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 3f706e1..d9e884a 100644
--- a/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/MemRegion.h"
-#include "clang/Checker/PathSensitive/ValueManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/Support/BumpVector.h"
#include "clang/AST/CharUnits.h"
@@ -22,6 +22,7 @@
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
//===----------------------------------------------------------------------===//
// MemRegion Construction.
@@ -176,27 +177,27 @@ const StackFrameContext *VarRegion::getStackFrame() const {
// Region extents.
//===----------------------------------------------------------------------===//
-DefinedOrUnknownSVal DeclRegion::getExtent(ValueManager& ValMgr) const {
- ASTContext& Ctx = ValMgr.getContext();
- QualType T = getDesugaredValueType();
+DefinedOrUnknownSVal DeclRegion::getExtent(SValBuilder &svalBuilder) const {
+ ASTContext& Ctx = svalBuilder.getContext();
+ QualType T = getDesugaredValueType(Ctx);
if (isa<VariableArrayType>(T))
- return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this));
+ return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
if (isa<IncompleteArrayType>(T))
return UnknownVal();
- CharUnits Size = Ctx.getTypeSizeInChars(T);
- QualType SizeTy = Ctx.getSizeType();
- return ValMgr.makeIntVal(Size.getQuantity(), SizeTy);
+ CharUnits size = Ctx.getTypeSizeInChars(T);
+ QualType sizeTy = svalBuilder.getArrayIndexType();
+ return svalBuilder.makeIntVal(size.getQuantity(), sizeTy);
}
-DefinedOrUnknownSVal FieldRegion::getExtent(ValueManager& ValMgr) const {
- DefinedOrUnknownSVal Extent = DeclRegion::getExtent(ValMgr);
+DefinedOrUnknownSVal FieldRegion::getExtent(SValBuilder &svalBuilder) const {
+ DefinedOrUnknownSVal Extent = DeclRegion::getExtent(svalBuilder);
// A zero-length array at the end of a struct often stands for dynamically-
// allocated extra memory.
if (Extent.isZeroConstant()) {
- QualType T = getDesugaredValueType();
+ QualType T = getDesugaredValueType(svalBuilder.getContext());
if (isa<ConstantArrayType>(T))
return UnknownVal();
@@ -205,17 +206,21 @@ DefinedOrUnknownSVal FieldRegion::getExtent(ValueManager& ValMgr) const {
return Extent;
}
-DefinedOrUnknownSVal AllocaRegion::getExtent(ValueManager& ValMgr) const {
- return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this));
+DefinedOrUnknownSVal AllocaRegion::getExtent(SValBuilder &svalBuilder) const {
+ return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
}
-DefinedOrUnknownSVal SymbolicRegion::getExtent(ValueManager& ValMgr) const {
- return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this));
+DefinedOrUnknownSVal SymbolicRegion::getExtent(SValBuilder &svalBuilder) const {
+ return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
}
-DefinedOrUnknownSVal StringRegion::getExtent(ValueManager& ValMgr) const {
- QualType SizeTy = ValMgr.getContext().getSizeType();
- return ValMgr.makeIntVal(getStringLiteral()->getByteLength()+1, SizeTy);
+DefinedOrUnknownSVal StringRegion::getExtent(SValBuilder &svalBuilder) const {
+ return svalBuilder.makeIntVal(getStringLiteral()->getByteLength()+1,
+ svalBuilder.getArrayIndexType());
+}
+
+QualType CXXBaseObjectRegion::getValueType() const {
+ return QualType(decl->getTypeForDecl(), 0);
}
//===----------------------------------------------------------------------===//
@@ -356,17 +361,28 @@ void BlockDataRegion::Profile(llvm::FoldingSetNodeID& ID) const {
BlockDataRegion::ProfileRegion(ID, BC, LC, getSuperRegion());
}
-void CXXObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
- Expr const *Ex,
- const MemRegion *sReg) {
+void CXXTempObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ Expr const *Ex,
+ const MemRegion *sReg) {
ID.AddPointer(Ex);
ID.AddPointer(sReg);
}
-void CXXObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+void CXXTempObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
ProfileRegion(ID, Ex, getSuperRegion());
}
+void CXXBaseObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const CXXRecordDecl *decl,
+ const MemRegion *sReg) {
+ ID.AddPointer(decl);
+ ID.AddPointer(sReg);
+}
+
+void CXXBaseObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, decl, superRegion);
+}
+
//===----------------------------------------------------------------------===//
// Region pretty-printing.
//===----------------------------------------------------------------------===//
@@ -407,6 +423,14 @@ void CompoundLiteralRegion::dumpToStream(llvm::raw_ostream& os) const {
os << "{ " << (void*) CL << " }";
}
+void CXXTempObjectRegion::dumpToStream(llvm::raw_ostream &os) const {
+ os << "temp_object";
+}
+
+void CXXBaseObjectRegion::dumpToStream(llvm::raw_ostream &os) const {
+ os << "base " << decl->getName();
+}
+
void CXXThisRegion::dumpToStream(llvm::raw_ostream &os) const {
os << "this";
}
@@ -445,7 +469,7 @@ void RegionRawOffset::dump() const {
}
void RegionRawOffset::dumpToStream(llvm::raw_ostream& os) const {
- os << "raw_offset{" << getRegion() << ',' << getByteOffset() << '}';
+ os << "raw_offset{" << getRegion() << ',' << getOffset().getQuantity() << '}';
}
void StaticGlobalSpaceRegion::dumpToStream(llvm::raw_ostream &os) const {
@@ -624,7 +648,7 @@ MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL,
}
const ElementRegion*
-MemRegionManager::getElementRegion(QualType elementType, SVal Idx,
+MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
const MemRegion* superRegion,
ASTContext& Ctx){
@@ -675,12 +699,18 @@ MemRegionManager::getObjCIvarRegion(const ObjCIvarDecl* d,
return getSubRegion<ObjCIvarRegion>(d, superRegion);
}
-const CXXObjectRegion*
-MemRegionManager::getCXXObjectRegion(Expr const *E,
- LocationContext const *LC) {
+const CXXTempObjectRegion*
+MemRegionManager::getCXXTempObjectRegion(Expr const *E,
+ LocationContext const *LC) {
const StackFrameContext *SFC = LC->getCurrentStackFrame();
assert(SFC);
- return getSubRegion<CXXObjectRegion>(E, getStackLocalsRegion(SFC));
+ return getSubRegion<CXXTempObjectRegion>(E, getStackLocalsRegion(SFC));
+}
+
+const CXXBaseObjectRegion *
+MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *decl,
+ const MemRegion *superRegion) {
+ return getSubRegion<CXXBaseObjectRegion>(decl, superRegion);
}
const CXXThisRegion*
@@ -740,6 +770,7 @@ const MemRegion *MemRegion::getBaseRegion() const {
case MemRegion::ElementRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::CXXBaseObjectRegionKind:
R = cast<SubRegion>(R)->getSuperRegion();
continue;
default:
@@ -824,7 +855,7 @@ RegionRawOffset ElementRegion::getAsArrayOffset() const {
}
assert(superR && "super region cannot be NULL");
- return RegionRawOffset(superR, offset.getQuantity());
+ return RegionRawOffset(superR, offset);
}
RegionOffset MemRegion::getAsOffset() const {
@@ -841,7 +872,7 @@ RegionOffset MemRegion::getAsOffset() const {
case CXXThisRegionKind:
case StringRegionKind:
case VarRegionKind:
- case CXXObjectRegionKind:
+ case CXXTempObjectRegionKind:
goto Finish;
case ElementRegionKind: {
const ElementRegion *ER = cast<ElementRegion>(R);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp
new file mode 100644
index 0000000..2e370d6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp
@@ -0,0 +1,99 @@
+//===- ObjCMessage.cpp - Wrapper for ObjC messages and dot syntax -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ObjCMessage which serves as a common wrapper for ObjC
+// message expressions or implicit messages for loading/storing ObjC properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+
+using namespace clang;
+using namespace ento;
+
+QualType ObjCMessage::getType(ASTContext &ctx) const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->getType();
+ const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
+ if (isPropertySetter())
+ return ctx.VoidTy;
+ return propE->getType();
+}
+
+Selector ObjCMessage::getSelector() const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->getSelector();
+ const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
+ if (isPropertySetter())
+ return propE->getSetterSelector();
+ return propE->getGetterSelector();
+}
+
+const ObjCMethodDecl *ObjCMessage::getMethodDecl() const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->getMethodDecl();
+ const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
+ if (propE->isImplicitProperty())
+ return isPropertySetter() ? propE->getImplicitPropertySetter()
+ : propE->getImplicitPropertyGetter();
+ return 0;
+}
+
+const ObjCInterfaceDecl *ObjCMessage::getReceiverInterface() const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->getReceiverInterface();
+ const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
+ if (propE->isClassReceiver())
+ return propE->getClassReceiver();
+ QualType recT;
+ if (const Expr *recE = getInstanceReceiver())
+ recT = recE->getType();
+ else {
+ assert(propE->isSuperReceiver());
+ recT = propE->getSuperReceiverType();
+ }
+ if (const ObjCObjectPointerType *Ptr = recT->getAs<ObjCObjectPointerType>())
+ return Ptr->getInterfaceDecl();
+ return 0;
+}
+
+const Expr *ObjCMessage::getArgExpr(unsigned i) const {
+ assert(isValid() && "This ObjCMessage is uninitialized!");
+ assert(i < getNumArgs() && "Invalid index for argument");
+ if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
+ return msgE->getArg(i);
+ assert(isPropertySetter());
+ if (const BinaryOperator *bop = dyn_cast<BinaryOperator>(OriginE))
+ if (bop->isAssignmentOp())
+ return bop->getRHS();
+ return 0;
+}
+
+QualType CallOrObjCMessage::getResultType(ASTContext &ctx) const {
+ if (CallE) {
+ const Expr *Callee = CallE->getCallee();
+ if (const FunctionDecl *FD = State->getSVal(Callee).getAsFunctionDecl())
+ return FD->getResultType();
+ return CallE->getType();
+ }
+ return Msg.getResultType(ctx);
+}
+
+SVal CallOrObjCMessage::getArgSValAsScalarOrLoc(unsigned i) const {
+ assert(i < getNumArgs());
+ if (CallE) return State->getSValAsScalarOrLoc(CallE->getArg(i));
+ QualType argT = Msg.getArgType(i);
+ if (Loc::isLocType(argT) || argT->isIntegerType())
+ return Msg.getArgSVal(i, State);
+ return UnknownVal();
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index cf05a7d..872bbfe 100644
--- a/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -20,6 +20,7 @@
#include "llvm/Support/Casting.h"
using namespace clang;
+using namespace ento;
using llvm::dyn_cast;
using llvm::isa;
@@ -84,6 +85,8 @@ PathDiagnostic::PathDiagnostic(llvm::StringRef bugtype, llvm::StringRef desc,
void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
const DiagnosticInfo &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticClient::HandleDiagnostic(DiagLevel, Info);
// Create a PathDiagnostic with a single piece.
@@ -104,7 +107,9 @@ void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
Info.FormatDiagnostic(StrC);
PathDiagnosticPiece *P =
- new PathDiagnosticEventPiece(Info.getLocation(), StrC.str());
+ new PathDiagnosticEventPiece(FullSourceLoc(Info.getLocation(),
+ Info.getSourceManager()),
+ StrC.str());
for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i)
P->addRange(Info.getRange(i).getAsRange());
@@ -168,6 +173,7 @@ PathDiagnosticRange PathDiagnosticLocation::asRange() const {
case Stmt::ChooseExprClass:
case Stmt::IndirectGotoStmtClass:
case Stmt::SwitchStmtClass:
+ case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass:
case Stmt::ObjCForCollectionStmtClass: {
SourceLocation L = S->getLocStart();
diff --git a/contrib/llvm/tools/clang/lib/Checker/PlistDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 13accbb..fbbbd46 100644
--- a/contrib/llvm/tools/clang/lib/Checker/PlistDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathDiagnosticClients.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticClients.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
#include "clang/Lex/Preprocessor.h"
@@ -21,6 +21,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
using namespace clang;
+using namespace ento;
using llvm::cast;
typedef llvm::DenseMap<FileID, unsigned> FIDMap;
@@ -97,8 +98,8 @@ PlistDiagnostics::PlistDiagnostics(const std::string& output,
: OutputFile(output), LangOpts(LO), SubPD(subPD), flushed(false) {}
PathDiagnosticClient*
-clang::CreatePlistDiagnosticClient(const std::string& s, const Preprocessor &PP,
- PathDiagnosticClient *subPD) {
+ento::createPlistDiagnosticClient(const std::string& s, const Preprocessor &PP,
+ PathDiagnosticClient *subPD) {
return new PlistDiagnostics(s, PP.getLangOptions(), subPD);
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 697694e..389fff5 100644
--- a/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -13,16 +13,16 @@
//===----------------------------------------------------------------------===//
#include "SimpleConstraintManager.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
-#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
-#include "clang/Checker/ManagerRegistry.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
namespace { class ConstraintRange {}; }
static int ConstraintRangeIndex = 0;
@@ -91,7 +91,7 @@ public:
/// Construct a new RangeSet representing '{ [from, to] }'.
RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
- : ranges(F.Add(F.GetEmptySet(), Range(from, to))) {}
+ : ranges(F.add(F.getEmptySet(), Range(from, to))) {}
/// Profile - Generates a hash profile of this RangeSet for use
/// by FoldingSet.
@@ -129,17 +129,17 @@ private:
if (i->Includes(Lower)) {
if (i->Includes(Upper)) {
- newRanges = F.Add(newRanges, Range(BV.getValue(Lower),
+ newRanges = F.add(newRanges, Range(BV.getValue(Lower),
BV.getValue(Upper)));
break;
} else
- newRanges = F.Add(newRanges, Range(BV.getValue(Lower), i->To()));
+ newRanges = F.add(newRanges, Range(BV.getValue(Lower), i->To()));
} else {
if (i->Includes(Upper)) {
- newRanges = F.Add(newRanges, Range(i->From(), BV.getValue(Upper)));
+ newRanges = F.add(newRanges, Range(i->From(), BV.getValue(Upper)));
break;
} else
- newRanges = F.Add(newRanges, *i);
+ newRanges = F.add(newRanges, *i);
}
}
}
@@ -155,7 +155,7 @@ public:
RangeSet Intersect(BasicValueFactory &BV, Factory &F,
const llvm::APSInt &Lower,
const llvm::APSInt &Upper) const {
- PrimRangeSet newRanges = F.GetEmptySet();
+ PrimRangeSet newRanges = F.getEmptySet();
PrimRangeSet::iterator i = begin(), e = end();
if (Lower <= Upper)
@@ -194,41 +194,43 @@ public:
typedef llvm::ImmutableMap<SymbolRef,RangeSet> ConstraintRangeTy;
namespace clang {
+namespace ento {
template<>
struct GRStateTrait<ConstraintRange>
: public GRStatePartialTrait<ConstraintRangeTy> {
static inline void* GDMIndex() { return &ConstraintRangeIndex; }
};
}
+}
namespace {
class RangeConstraintManager : public SimpleConstraintManager{
RangeSet GetRange(const GRState *state, SymbolRef sym);
public:
- RangeConstraintManager(GRSubEngine &subengine)
+ RangeConstraintManager(SubEngine &subengine)
: SimpleConstraintManager(subengine) {}
- const GRState* AssumeSymNE(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymNE(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymEQ(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymEQ(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymLT(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymLT(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymGT(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymGT(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymGE(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymGE(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const GRState* AssumeSymLE(const GRState* state, SymbolRef sym,
+ const GRState *assumeSymLE(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
@@ -240,7 +242,7 @@ public:
return i ? *i == V : false;
}
- const GRState* RemoveDeadBindings(const GRState* St, SymbolReaper& SymReaper);
+ const GRState* removeDeadBindings(const GRState* St, SymbolReaper& SymReaper);
void print(const GRState* St, llvm::raw_ostream& Out,
const char* nl, const char *sep);
@@ -251,8 +253,8 @@ private:
} // end anonymous namespace
-ConstraintManager* clang::CreateRangeConstraintManager(GRStateManager&,
- GRSubEngine &subeng) {
+ConstraintManager* ento::CreateRangeConstraintManager(GRStateManager&,
+ SubEngine &subeng) {
return new RangeConstraintManager(subeng);
}
@@ -265,7 +267,7 @@ const llvm::APSInt* RangeConstraintManager::getSymVal(const GRState* St,
/// Scan all symbols referenced by the constraints. If the symbol is not alive
/// as marked in LSymbols, mark it as dead in DSymbols.
const GRState*
-RangeConstraintManager::RemoveDeadBindings(const GRState* state,
+RangeConstraintManager::removeDeadBindings(const GRState* state,
SymbolReaper& SymReaper) {
ConstraintRangeTy CR = state->get<ConstraintRange>();
@@ -274,7 +276,7 @@ RangeConstraintManager::RemoveDeadBindings(const GRState* state,
for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
SymbolRef sym = I.getKey();
if (SymReaper.maybeDead(sym))
- CR = CRFactory.Remove(CR, sym);
+ CR = CRFactory.remove(CR, sym);
}
return state->set<ConstraintRange>(CR);
@@ -293,7 +295,7 @@ RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) {
}
//===------------------------------------------------------------------------===
-// AssumeSymX methods: public interface for RangeConstraintManager.
+// assumeSymX methods: public interface for RangeConstraintManager.
//===------------------------------------------------------------------------===/
// The syntax for ranges below is mathematical, using [x, y] for closed ranges
@@ -305,7 +307,7 @@ RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) {
// UINT_MAX, 0, 1, and 2.
const GRState*
-RangeConstraintManager::AssumeSymNE(const GRState* state, SymbolRef sym,
+RangeConstraintManager::assumeSymNE(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
@@ -322,7 +324,7 @@ RangeConstraintManager::AssumeSymNE(const GRState* state, SymbolRef sym,
}
const GRState*
-RangeConstraintManager::AssumeSymEQ(const GRState* state, SymbolRef sym,
+RangeConstraintManager::assumeSymEQ(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
// [Int-Adjustment, Int-Adjustment]
@@ -333,7 +335,7 @@ RangeConstraintManager::AssumeSymEQ(const GRState* state, SymbolRef sym,
}
const GRState*
-RangeConstraintManager::AssumeSymLT(const GRState* state, SymbolRef sym,
+RangeConstraintManager::assumeSymLT(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
@@ -354,7 +356,7 @@ RangeConstraintManager::AssumeSymLT(const GRState* state, SymbolRef sym,
}
const GRState*
-RangeConstraintManager::AssumeSymGT(const GRState* state, SymbolRef sym,
+RangeConstraintManager::assumeSymGT(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
@@ -375,7 +377,7 @@ RangeConstraintManager::AssumeSymGT(const GRState* state, SymbolRef sym,
}
const GRState*
-RangeConstraintManager::AssumeSymGE(const GRState* state, SymbolRef sym,
+RangeConstraintManager::assumeSymGE(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
@@ -397,7 +399,7 @@ RangeConstraintManager::AssumeSymGE(const GRState* state, SymbolRef sym,
}
const GRState*
-RangeConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym,
+RangeConstraintManager::assumeSymLE(const GRState* state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
diff --git a/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 1a3eded..19e0e12 100644
--- a/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -20,15 +20,16 @@
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
-#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
using llvm::Optional;
//===----------------------------------------------------------------------===//
@@ -85,7 +86,7 @@ BindingKey BindingKey::Make(const MemRegion *R, Kind k) {
// FIXME: There are some ElementRegions for which we cannot compute
// raw offsets yet, including regions with symbolic offsets. These will be
// ignored by the store.
- return BindingKey(O.getRegion(), O.getByteOffset(), k);
+ return BindingKey(O.getRegion(), O.getOffset().getQuantity(), k);
}
return BindingKey(R, 0, k);
@@ -148,11 +149,11 @@ public:
Map::iterator I = M.find(Parent);
if (I == M.end()) {
- M.insert(std::make_pair(Parent, F.Add(F.GetEmptySet(), SubRegion)));
+ M.insert(std::make_pair(Parent, F.add(F.getEmptySet(), SubRegion)));
return true;
}
- I->second = F.Add(I->second, SubRegion);
+ I->second = F.add(I->second, SubRegion);
return false;
}
@@ -214,86 +215,98 @@ public:
/// setImplicitDefaultValue - Set the default binding for the provided
/// MemRegion to the value implicitly defined for compound literals when
/// the value is not specified.
- Store setImplicitDefaultValue(Store store, const MemRegion *R, QualType T);
+ StoreRef setImplicitDefaultValue(Store store, const MemRegion *R, QualType T);
/// ArrayToPointer - Emulates the "decay" of an array to a pointer
/// type. 'Array' represents the lvalue of the array being decayed
/// to a pointer, and the returned SVal represents the decayed
/// version of that lvalue (i.e., a pointer to the first element of
- /// the array). This is called by GRExprEngine when evaluating
+ /// the array). This is called by ExprEngine when evaluating
/// casts from arrays to pointers.
SVal ArrayToPointer(Loc Array);
- SVal EvalBinOp(BinaryOperator::Opcode Op,Loc L, NonLoc R, QualType resultTy);
+ /// For DerivedToBase casts, create a CXXBaseObjectRegion and return it.
+ virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType);
- Store getInitialStore(const LocationContext *InitLoc) {
- return RBFactory.GetEmptyMap().getRoot();
+ StoreRef getInitialStore(const LocationContext *InitLoc) {
+ return StoreRef(RBFactory.getEmptyMap().getRootWithoutRetain(), *this);
}
//===-------------------------------------------------------------------===//
// Binding values to regions.
//===-------------------------------------------------------------------===//
- Store InvalidateRegions(Store store,
- const MemRegion * const *Begin,
- const MemRegion * const *End,
- const Expr *E, unsigned Count,
- InvalidatedSymbols *IS,
- bool invalidateGlobals,
- InvalidatedRegions *Regions);
+ StoreRef invalidateRegions(Store store,
+ const MemRegion * const *Begin,
+ const MemRegion * const *End,
+ const Expr *E, unsigned Count,
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals,
+ InvalidatedRegions *Regions);
public: // Made public for helper classes.
void RemoveSubRegionBindings(RegionBindings &B, const MemRegion *R,
RegionStoreSubRegionMap &M);
- RegionBindings Add(RegionBindings B, BindingKey K, SVal V);
+ RegionBindings addBinding(RegionBindings B, BindingKey K, SVal V);
- RegionBindings Add(RegionBindings B, const MemRegion *R,
+ RegionBindings addBinding(RegionBindings B, const MemRegion *R,
BindingKey::Kind k, SVal V);
- const SVal *Lookup(RegionBindings B, BindingKey K);
- const SVal *Lookup(RegionBindings B, const MemRegion *R, BindingKey::Kind k);
+ const SVal *lookup(RegionBindings B, BindingKey K);
+ const SVal *lookup(RegionBindings B, const MemRegion *R, BindingKey::Kind k);
- RegionBindings Remove(RegionBindings B, BindingKey K);
- RegionBindings Remove(RegionBindings B, const MemRegion *R,
+ RegionBindings removeBinding(RegionBindings B, BindingKey K);
+ RegionBindings removeBinding(RegionBindings B, const MemRegion *R,
BindingKey::Kind k);
- RegionBindings Remove(RegionBindings B, const MemRegion *R) {
- return Remove(Remove(B, R, BindingKey::Direct), R, BindingKey::Default);
+ RegionBindings removeBinding(RegionBindings B, const MemRegion *R) {
+ return removeBinding(removeBinding(B, R, BindingKey::Direct), R,
+ BindingKey::Default);
}
public: // Part of public interface to class.
- Store Bind(Store store, Loc LV, SVal V);
+ StoreRef Bind(Store store, Loc LV, SVal V);
// BindDefault is only used to initialize a region with a default value.
- Store BindDefault(Store store, const MemRegion *R, SVal V) {
+ StoreRef BindDefault(Store store, const MemRegion *R, SVal V) {
RegionBindings B = GetRegionBindings(store);
- assert(!Lookup(B, R, BindingKey::Default));
- assert(!Lookup(B, R, BindingKey::Direct));
- return Add(B, R, BindingKey::Default, V).getRoot();
+ assert(!lookup(B, R, BindingKey::Default));
+ assert(!lookup(B, R, BindingKey::Direct));
+ return StoreRef(addBinding(B, R, BindingKey::Default, V).getRootWithoutRetain(), *this);
}
- Store BindCompoundLiteral(Store store, const CompoundLiteralExpr* CL,
- const LocationContext *LC, SVal V);
+ StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr* CL,
+ const LocationContext *LC, SVal V);
- Store BindDecl(Store store, const VarRegion *VR, SVal InitVal);
+ StoreRef BindDecl(Store store, const VarRegion *VR, SVal InitVal);
- Store BindDeclWithNoInit(Store store, const VarRegion *) {
- return store;
+ StoreRef BindDeclWithNoInit(Store store, const VarRegion *) {
+ return StoreRef(store, *this);
}
/// BindStruct - Bind a compound value to a structure.
- Store BindStruct(Store store, const TypedRegion* R, SVal V);
+ StoreRef BindStruct(Store store, const TypedRegion* R, SVal V);
- Store BindArray(Store store, const TypedRegion* R, SVal V);
+ StoreRef BindArray(Store store, const TypedRegion* R, SVal V);
/// KillStruct - Set the entire struct to unknown.
- Store KillStruct(Store store, const TypedRegion* R, SVal DefaultVal);
+ StoreRef KillStruct(Store store, const TypedRegion* R, SVal DefaultVal);
- Store Remove(Store store, Loc LV);
+ StoreRef Remove(Store store, Loc LV);
+ void incrementReferenceCount(Store store) {
+ GetRegionBindings(store).manualRetain();
+ }
+
+ /// If the StoreManager supports it, decrement the reference count of
+ /// the specified Store object. If the reference count hits 0, the memory
+ /// associated with the object is recycled.
+ void decrementReferenceCount(Store store) {
+ GetRegionBindings(store).manualRelease();
+ }
//===------------------------------------------------------------------===//
// Loading values from regions.
@@ -344,20 +357,20 @@ public: // Part of public interface to class.
std::pair<Store, const MemRegion*>
GetLazyBinding(RegionBindings B, const MemRegion *R);
- Store CopyLazyBindings(nonloc::LazyCompoundVal V, Store store,
- const TypedRegion *R);
+ StoreRef CopyLazyBindings(nonloc::LazyCompoundVal V, Store store,
+ const TypedRegion *R);
//===------------------------------------------------------------------===//
// State pruning.
//===------------------------------------------------------------------===//
- /// RemoveDeadBindings - Scans the RegionStore of 'state' for dead values.
+ /// removeDeadBindings - Scans the RegionStore of 'state' for dead values.
/// It returns a new Store with these values removed.
- Store RemoveDeadBindings(Store store, const StackFrameContext *LCtx,
+ StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
- Store EnterStackFrame(const GRState *state, const StackFrameContext *frame);
+ StoreRef enterStackFrame(const GRState *state, const StackFrameContext *frame);
//===------------------------------------------------------------------===//
// Region "extents".
@@ -399,12 +412,12 @@ public: // Part of public interface to class.
// RegionStore creation.
//===----------------------------------------------------------------------===//
-StoreManager *clang::CreateRegionStoreManager(GRStateManager& StMgr) {
+StoreManager *ento::CreateRegionStoreManager(GRStateManager& StMgr) {
RegionStoreFeatures F = maximal_features_tag();
return new RegionStoreManager(StMgr, F);
}
-StoreManager *clang::CreateFieldsOnlyRegionStoreManager(GRStateManager &StMgr) {
+StoreManager *ento::CreateFieldsOnlyRegionStoreManager(GRStateManager &StMgr) {
RegionStoreFeatures F = minimal_features_tag();
F.enableFields(true);
return new RegionStoreManager(StMgr, F);
@@ -453,15 +466,18 @@ protected:
RegionStoreManager &RM;
ASTContext &Ctx;
- ValueManager &ValMgr;
+ SValBuilder &svalBuilder;
RegionBindings B;
+
+ const bool includeGlobals;
public:
ClusterAnalysis(RegionStoreManager &rm, GRStateManager &StateMgr,
- RegionBindings b)
- : RM(rm), Ctx(StateMgr.getContext()), ValMgr(StateMgr.getValueManager()),
- B(b) {}
+ RegionBindings b, const bool includeGlobals)
+ : RM(rm), Ctx(StateMgr.getContext()),
+ svalBuilder(StateMgr.getSValBuilder()),
+ B(b), includeGlobals(includeGlobals) {}
RegionBindings getRegionBindings() const { return B; }
@@ -487,7 +503,7 @@ public:
return *CRef;
}
- void GenerateClusters(bool includeGlobals = false) {
+ void GenerateClusters() {
// Scan the entire set of bindings and make the region clusters.
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
RegionCluster &C = AddToCluster(RI.getKey());
@@ -558,24 +574,25 @@ void RegionStoreManager::RemoveSubRegionBindings(RegionBindings &B,
I != E; ++I)
RemoveSubRegionBindings(B, *I, M);
- B = Remove(B, R);
+ B = removeBinding(B, R);
}
namespace {
-class InvalidateRegionsWorker : public ClusterAnalysis<InvalidateRegionsWorker>
+class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker>
{
const Expr *Ex;
unsigned Count;
StoreManager::InvalidatedSymbols *IS;
StoreManager::InvalidatedRegions *Regions;
public:
- InvalidateRegionsWorker(RegionStoreManager &rm,
+ invalidateRegionsWorker(RegionStoreManager &rm,
GRStateManager &stateMgr,
RegionBindings b,
const Expr *ex, unsigned count,
StoreManager::InvalidatedSymbols *is,
- StoreManager::InvalidatedRegions *r)
- : ClusterAnalysis<InvalidateRegionsWorker>(rm, stateMgr, b),
+ StoreManager::InvalidatedRegions *r,
+ bool includeGlobals)
+ : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b, includeGlobals),
Ex(ex), Count(count), IS(is), Regions(r) {}
void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
@@ -586,7 +603,7 @@ private:
};
}
-void InvalidateRegionsWorker::VisitBinding(SVal V) {
+void invalidateRegionsWorker::VisitBinding(SVal V) {
// A symbol? Mark it touched by the invalidation.
if (IS)
if (SymbolRef Sym = V.getAsSymbol())
@@ -614,19 +631,19 @@ void InvalidateRegionsWorker::VisitBinding(SVal V) {
}
}
-void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
+void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
BindingKey *I, BindingKey *E) {
for ( ; I != E; ++I) {
// Get the old binding. Is it a region? If so, add it to the worklist.
const BindingKey &K = *I;
- if (const SVal *V = RM.Lookup(B, K))
+ if (const SVal *V = RM.lookup(B, K))
VisitBinding(*V);
- B = RM.Remove(B, K);
+ B = RM.removeBinding(B, K);
}
}
-void InvalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
+void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
if (IS) {
// Symbolic region? Mark that symbol touched by the invalidation.
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR))
@@ -654,9 +671,9 @@ void InvalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
if (isa<AllocaRegion>(baseR) || isa<SymbolicRegion>(baseR)) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelavant.
- DefinedOrUnknownSVal V = ValMgr.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy,
- Count);
- B = RM.Add(B, baseR, BindingKey::Default, V);
+ DefinedOrUnknownSVal V =
+ svalBuilder.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy, Count);
+ B = RM.addBinding(B, baseR, BindingKey::Default, V);
return;
}
@@ -670,38 +687,48 @@ void InvalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
if (T->isStructureType()) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelavant.
- DefinedOrUnknownSVal V = ValMgr.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy,
+ DefinedOrUnknownSVal V = svalBuilder.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy,
Count);
- B = RM.Add(B, baseR, BindingKey::Default, V);
+ B = RM.addBinding(B, baseR, BindingKey::Default, V);
return;
}
if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
// Set the default value of the array to conjured symbol.
DefinedOrUnknownSVal V =
- ValMgr.getConjuredSymbolVal(baseR, Ex, AT->getElementType(), Count);
- B = RM.Add(B, baseR, BindingKey::Default, V);
+ svalBuilder.getConjuredSymbolVal(baseR, Ex, AT->getElementType(), Count);
+ B = RM.addBinding(B, baseR, BindingKey::Default, V);
return;
}
+
+ if (includeGlobals &&
+ isa<NonStaticGlobalSpaceRegion>(baseR->getMemorySpace())) {
+ // If the region is a global and we are invalidating all globals,
+ // just erase the entry. This causes all globals to be lazily
+ // symbolicated from the same base symbol.
+ B = RM.removeBinding(B, baseR);
+ return;
+ }
+
- DefinedOrUnknownSVal V = ValMgr.getConjuredSymbolVal(baseR, Ex, T, Count);
+ DefinedOrUnknownSVal V = svalBuilder.getConjuredSymbolVal(baseR, Ex, T, Count);
assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
- B = RM.Add(B, baseR, BindingKey::Direct, V);
+ B = RM.addBinding(B, baseR, BindingKey::Direct, V);
}
-Store RegionStoreManager::InvalidateRegions(Store store,
- const MemRegion * const *I,
- const MemRegion * const *E,
- const Expr *Ex, unsigned Count,
- InvalidatedSymbols *IS,
- bool invalidateGlobals,
- InvalidatedRegions *Regions) {
- InvalidateRegionsWorker W(*this, StateMgr,
+StoreRef RegionStoreManager::invalidateRegions(Store store,
+ const MemRegion * const *I,
+ const MemRegion * const *E,
+ const Expr *Ex, unsigned Count,
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals,
+ InvalidatedRegions *Regions) {
+ invalidateRegionsWorker W(*this, StateMgr,
RegionStoreManager::GetRegionBindings(store),
- Ex, Count, IS, Regions);
+ Ex, Count, IS, Regions, invalidateGlobals);
// Scan the bindings and generate the clusters.
- W.GenerateClusters(invalidateGlobals);
+ W.GenerateClusters();
// Add I .. E to the worklist.
for ( ; I != E; ++I)
@@ -717,10 +744,10 @@ Store RegionStoreManager::InvalidateRegions(Store store,
// use to derive the bindings for all non-static globals.
const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion();
SVal V =
- ValMgr.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex,
+ svalBuilder.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex,
/* symbol type, doesn't matter */ Ctx.IntTy,
Count);
- B = Add(B, BindingKey::Make(GS, BindingKey::Default), V);
+ B = addBinding(B, BindingKey::Make(GS, BindingKey::Default), V);
// Even if there are no bindings in the global scope, we still need to
// record that we touched it.
@@ -728,7 +755,7 @@ Store RegionStoreManager::InvalidateRegions(Store store,
Regions->push_back(GS);
}
- return B.getRoot();
+ return StoreRef(B.getRootWithoutRetain(), *this);
}
//===----------------------------------------------------------------------===//
@@ -738,19 +765,26 @@ Store RegionStoreManager::InvalidateRegions(Store store,
DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const GRState *state,
const MemRegion *R,
QualType EleTy) {
- SVal Size = cast<SubRegion>(R)->getExtent(ValMgr);
- SValuator &SVator = ValMgr.getSValuator();
- const llvm::APSInt *SizeInt = SVator.getKnownValue(state, Size);
+ SVal Size = cast<SubRegion>(R)->getExtent(svalBuilder);
+ const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
if (!SizeInt)
return UnknownVal();
CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue());
+
+ if (Ctx.getAsVariableArrayType(EleTy)) {
+ // FIXME: We need to track extra state to properly record the size
+ // of VLAs. Returning UnknownVal here, however, is a stop-gap so that
+ // we don't have a divide-by-zero below.
+ return UnknownVal();
+ }
+
CharUnits EleSize = Ctx.getTypeSizeInChars(EleTy);
// If a variable is reinterpreted as a type that doesn't fit into a larger
// type evenly, round it down.
// This is a signed value, since it's used in arithmetic with signed indices.
- return ValMgr.makeIntVal(RegionSize / EleSize, false);
+ return svalBuilder.makeIntVal(RegionSize / EleSize, false);
}
//===----------------------------------------------------------------------===//
@@ -761,7 +795,7 @@ DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const GRState *state,
/// type. 'Array' represents the lvalue of the array being decayed
/// to a pointer, and the returned SVal represents the decayed
/// version of that lvalue (i.e., a pointer to the first element of
-/// the array). This is called by GRExprEngine when evaluating casts
+/// the array). This is called by ExprEngine when evaluating casts
/// from arrays to pointers.
SVal RegionStoreManager::ArrayToPointer(Loc Array) {
if (!isa<loc::MemRegionVal>(Array))
@@ -774,127 +808,31 @@ SVal RegionStoreManager::ArrayToPointer(Loc Array) {
return UnknownVal();
// Strip off typedefs from the ArrayRegion's ValueType.
- QualType T = ArrayR->getValueType().getDesugaredType();
- ArrayType *AT = cast<ArrayType>(T);
+ QualType T = ArrayR->getValueType().getDesugaredType(Ctx);
+ const ArrayType *AT = cast<ArrayType>(T);
T = AT->getElementType();
- SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ NonLoc ZeroIdx = svalBuilder.makeZeroArrayIndex();
return loc::MemRegionVal(MRMgr.getElementRegion(T, ZeroIdx, ArrayR, Ctx));
}
-//===----------------------------------------------------------------------===//
-// Pointer arithmetic.
-//===----------------------------------------------------------------------===//
-
-SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
- QualType resultTy) {
- // Assume the base location is MemRegionVal.
- if (!isa<loc::MemRegionVal>(L))
- return UnknownVal();
-
- // Special case for zero RHS.
- if (R.isZeroConstant()) {
- switch (Op) {
- default:
- // Handle it normally.
- break;
- case BO_Add:
- case BO_Sub:
- // FIXME: does this need to be casted to match resultTy?
- return L;
- }
- }
-
- const MemRegion* MR = cast<loc::MemRegionVal>(L).getRegion();
- const ElementRegion *ER = 0;
-
- switch (MR->getKind()) {
- case MemRegion::SymbolicRegionKind: {
- const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
- SymbolRef Sym = SR->getSymbol();
- QualType T = Sym->getType(Ctx);
- QualType EleTy;
-
- if (const PointerType *PT = T->getAs<PointerType>())
- EleTy = PT->getPointeeType();
- else
- EleTy = T->getAs<ObjCObjectPointerType>()->getPointeeType();
-
- SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
- ER = MRMgr.getElementRegion(EleTy, ZeroIdx, SR, Ctx);
- break;
- }
- case MemRegion::AllocaRegionKind: {
- const AllocaRegion *AR = cast<AllocaRegion>(MR);
- QualType EleTy = Ctx.CharTy; // Create an ElementRegion of bytes.
- SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
- ER = MRMgr.getElementRegion(EleTy, ZeroIdx, AR, Ctx);
- break;
- }
-
- case MemRegion::ElementRegionKind: {
- ER = cast<ElementRegion>(MR);
- break;
- }
-
- // Not yet handled.
- case MemRegion::VarRegionKind:
- case MemRegion::StringRegionKind: {
-
- }
- // Fall-through.
- case MemRegion::CompoundLiteralRegionKind:
- case MemRegion::FieldRegionKind:
- case MemRegion::ObjCIvarRegionKind:
- case MemRegion::CXXObjectRegionKind:
- return UnknownVal();
+SVal RegionStoreManager::evalDerivedToBase(SVal derived, QualType baseType) {
+ const CXXRecordDecl *baseDecl;
+ if (baseType->isPointerType())
+ baseDecl = baseType->getCXXRecordDeclForPointerType();
+ else
+ baseDecl = baseType->getAsCXXRecordDecl();
- case MemRegion::FunctionTextRegionKind:
- case MemRegion::BlockTextRegionKind:
- case MemRegion::BlockDataRegionKind:
- // Technically this can happen if people do funny things with casts.
- return UnknownVal();
+ assert(baseDecl && "not a CXXRecordDecl?");
- case MemRegion::CXXThisRegionKind:
- assert(0 &&
- "Cannot perform pointer arithmetic on implicit argument 'this'");
- case MemRegion::GenericMemSpaceRegionKind:
- case MemRegion::StackLocalsSpaceRegionKind:
- case MemRegion::StackArgumentsSpaceRegionKind:
- case MemRegion::HeapSpaceRegionKind:
- case MemRegion::NonStaticGlobalSpaceRegionKind:
- case MemRegion::StaticGlobalSpaceRegionKind:
- case MemRegion::UnknownSpaceRegionKind:
- assert(0 && "Cannot perform pointer arithmetic on a MemSpace");
- return UnknownVal();
- }
+ loc::MemRegionVal *derivedRegVal = dyn_cast<loc::MemRegionVal>(&derived);
+ if (!derivedRegVal)
+ return derived;
- SVal Idx = ER->getIndex();
- nonloc::ConcreteInt* Base = dyn_cast<nonloc::ConcreteInt>(&Idx);
-
- // For now, only support:
- // (a) concrete integer indices that can easily be resolved
- // (b) 0 + symbolic index
- if (Base) {
- if (nonloc::ConcreteInt *Offset = dyn_cast<nonloc::ConcreteInt>(&R)) {
- // FIXME: Should use SValuator here.
- SVal NewIdx =
- Base->evalBinOp(ValMgr, Op,
- cast<nonloc::ConcreteInt>(ValMgr.convertToArrayIndex(*Offset)));
- const MemRegion* NewER =
- MRMgr.getElementRegion(ER->getElementType(), NewIdx,
- ER->getSuperRegion(), Ctx);
- return ValMgr.makeLoc(NewER);
- }
- if (0 == Base->getValue()) {
- const MemRegion* NewER =
- MRMgr.getElementRegion(ER->getElementType(), R,
- ER->getSuperRegion(), Ctx);
- return ValMgr.makeLoc(NewER);
- }
- }
+ const MemRegion *baseReg =
+ MRMgr.getCXXBaseObjectRegion(baseDecl, derivedRegVal->getRegion());
- return UnknownVal();
+ return loc::MemRegionVal(baseReg);
}
//===----------------------------------------------------------------------===//
@@ -904,7 +842,7 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
Optional<SVal> RegionStoreManager::getDirectBinding(RegionBindings B,
const MemRegion *R) {
- if (const SVal *V = Lookup(B, R, BindingKey::Direct))
+ if (const SVal *V = lookup(B, R, BindingKey::Direct))
return *V;
return Optional<SVal>();
@@ -917,7 +855,7 @@ Optional<SVal> RegionStoreManager::getDefaultBinding(RegionBindings B,
if (TR->getValueType()->isUnionType())
return UnknownVal();
- if (const SVal *V = Lookup(B, R, BindingKey::Default))
+ if (const SVal *V = lookup(B, R, BindingKey::Default))
return *V;
return Optional<SVal>();
@@ -927,10 +865,18 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
assert(!isa<UnknownVal>(L) && "location unknown");
assert(!isa<UndefinedVal>(L) && "location undefined");
- // FIXME: Is this even possible? Shouldn't this be treated as a null
- // dereference at a higher level?
- if (isa<loc::ConcreteInt>(L))
- return UndefinedVal();
+ // For access to concrete addresses, return UnknownVal. Checks
+ // for null dereferences (and similar errors) are done by checkers, not
+ // the Store.
+ // FIXME: We can consider lazily symbolicating such memory, but we really
+ // should defer this when we can reason easily about symbolicating arrays
+ // of bytes.
+ if (isa<loc::ConcreteInt>(L)) {
+ return UnknownVal();
+ }
+ if (!isa<loc::MemRegionVal>(L)) {
+ return UnknownVal();
+ }
const MemRegion *MR = cast<loc::MemRegionVal>(L).getRegion();
@@ -1008,7 +954,7 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
}
RegionBindings B = GetRegionBindings(store);
- const SVal *V = Lookup(B, R, BindingKey::Direct);
+ const SVal *V = lookup(B, R, BindingKey::Direct);
// Check if the region has a binding.
if (V)
@@ -1026,7 +972,7 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
}
// All other values are symbolic.
- return ValMgr.getRegionValueSymbolVal(R);
+ return svalBuilder.getRegionValueSymbolVal(R);
}
std::pair<Store, const MemRegion *>
@@ -1052,6 +998,17 @@ RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R) {
return std::make_pair(X.first,
MRMgr.getFieldRegionWithSuper(FR, X.second));
}
+ // C++ base object region is another kind of region that we should blast
+ // through to look for lazy compound value. It is like a field region.
+ else if (const CXXBaseObjectRegion *baseReg =
+ dyn_cast<CXXBaseObjectRegion>(R)) {
+ const std::pair<Store, const MemRegion *> &X =
+ GetLazyBinding(B, baseReg->getSuperRegion());
+
+ if (X.second)
+ return std::make_pair(X.first,
+ MRMgr.getCXXBaseObjectRegionWithSuper(baseReg, X.second));
+ }
// The NULL MemRegion indicates an non-existent lazy binding. A NULL Store is
// possible for a valid lazy binding.
return std::make_pair((Store) 0, (const MemRegion *) 0);
@@ -1084,7 +1041,7 @@ SVal RegionStoreManager::RetrieveElement(Store store,
// the only time such an access would be made is if a string literal was
// used to initialize a larger array.
char c = (i >= byteLength) ? '\0' : Str->getString()[i];
- return ValMgr.makeIntVal(c, T);
+ return svalBuilder.makeIntVal(c, T);
}
}
@@ -1107,7 +1064,7 @@ SVal RegionStoreManager::RetrieveElement(Store store,
if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) {
if (const Optional<SVal> &V = getDirectBinding(B, superR)) {
if (SymbolRef parentSym = V->getAsSymbol())
- return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
if (V->isUnknownOrUndef())
return *V;
@@ -1142,10 +1099,10 @@ RegionStoreManager::RetrieveDerivedDefaultValue(RegionBindings B,
if (const Optional<SVal> &D = getDefaultBinding(B, superR)) {
if (SymbolRef parentSym = D->getAsSymbol())
- return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
if (D->isZeroConstant())
- return ValMgr.makeZeroVal(Ty);
+ return svalBuilder.makeZeroVal(Ty);
if (D->isUnknownOrUndef())
return *D;
@@ -1167,16 +1124,16 @@ SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
RegionBindings B = GetRegionBindings(store);
while (superR) {
- if (const Optional<SVal> &D = RetrieveDerivedDefaultValue(B, superR, R, Ty))
+ if (const Optional<SVal> &D =
+ RetrieveDerivedDefaultValue(B, superR, R, Ty))
return *D;
// If our super region is a field or element itself, walk up the region
// hierarchy to see if there is a default value installed in an ancestor.
- if (isa<FieldRegion>(superR) || isa<ElementRegion>(superR)) {
- superR = cast<SubRegion>(superR)->getSuperRegion();
+ if (const SubRegion *SR = dyn_cast<SubRegion>(superR)) {
+ superR = SR->getSuperRegion();
continue;
}
-
break;
}
@@ -1211,7 +1168,7 @@ SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
}
// All other values are symbolic.
- return ValMgr.getRegionValueSymbolVal(R);
+ return svalBuilder.getRegionValueSymbolVal(R);
}
SVal RegionStoreManager::RetrieveObjCIvar(Store store, const ObjCIvarRegion* R){
@@ -1227,7 +1184,7 @@ SVal RegionStoreManager::RetrieveObjCIvar(Store store, const ObjCIvarRegion* R){
// Check if the super region has a default binding.
if (const Optional<SVal> &V = getDefaultBinding(B, superR)) {
if (SymbolRef parentSym = V->getAsSymbol())
- return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
// Other cases: give up.
return UnknownVal();
@@ -1251,7 +1208,7 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
if (isa<UnknownSpaceRegion>(MS) ||
isa<StackArgumentsSpaceRegion>(MS))
- return ValMgr.getRegionValueSymbolVal(R);
+ return svalBuilder.getRegionValueSymbolVal(R);
if (isa<GlobalsSpaceRegion>(MS)) {
if (isa<NonStaticGlobalSpaceRegion>(MS)) {
@@ -1263,22 +1220,21 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
if (Init)
if (const IntegerLiteral *IL =
dyn_cast<IntegerLiteral>(Init->IgnoreParenCasts())) {
- const nonloc::ConcreteInt &V = ValMgr.makeIntVal(IL);
- return ValMgr.getSValuator().EvalCast(V, Init->getType(),
- IL->getType());
+ const nonloc::ConcreteInt &V = svalBuilder.makeIntVal(IL);
+ return svalBuilder.evalCast(V, Init->getType(), IL->getType());
}
}
if (const Optional<SVal> &V = RetrieveDerivedDefaultValue(B, MS, R, CT))
return V.getValue();
- return ValMgr.getRegionValueSymbolVal(R);
+ return svalBuilder.getRegionValueSymbolVal(R);
}
if (T->isIntegerType())
- return ValMgr.makeIntVal(0, T);
+ return svalBuilder.makeIntVal(0, T);
if (T->isPointerType())
- return ValMgr.makeNull();
+ return svalBuilder.makeNull();
return UnknownVal();
}
@@ -1288,35 +1244,37 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
SVal RegionStoreManager::RetrieveLazySymbol(const TypedRegion *R) {
// All other values are symbolic.
- return ValMgr.getRegionValueSymbolVal(R);
+ return svalBuilder.getRegionValueSymbolVal(R);
}
SVal RegionStoreManager::RetrieveStruct(Store store, const TypedRegion* R) {
QualType T = R->getValueType();
assert(T->isStructureOrClassType());
- return ValMgr.makeLazyCompoundVal(store, R);
+ return svalBuilder.makeLazyCompoundVal(store, R);
}
SVal RegionStoreManager::RetrieveArray(Store store, const TypedRegion * R) {
- assert(isa<ConstantArrayType>(R->getValueType()));
- return ValMgr.makeLazyCompoundVal(store, R);
+ assert(Ctx.getAsConstantArrayType(R->getValueType()));
+ return svalBuilder.makeLazyCompoundVal(store, R);
}
//===----------------------------------------------------------------------===//
// Binding values to regions.
//===----------------------------------------------------------------------===//
-Store RegionStoreManager::Remove(Store store, Loc L) {
+StoreRef RegionStoreManager::Remove(Store store, Loc L) {
if (isa<loc::MemRegionVal>(L))
if (const MemRegion* R = cast<loc::MemRegionVal>(L).getRegion())
- return Remove(GetRegionBindings(store), R).getRoot();
+ return StoreRef(removeBinding(GetRegionBindings(store),
+ R).getRootWithoutRetain(),
+ *this);
- return store;
+ return StoreRef(store, *this);
}
-Store RegionStoreManager::Bind(Store store, Loc L, SVal V) {
+StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) {
if (isa<loc::ConcreteInt>(L))
- return store;
+ return StoreRef(store, *this);
// If we get here, the location should be a region.
const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion();
@@ -1355,11 +1313,12 @@ Store RegionStoreManager::Bind(Store store, Loc L, SVal V) {
// Perform the binding.
RegionBindings B = GetRegionBindings(store);
- return Add(B, R, BindingKey::Direct, V).getRoot();
+ return StoreRef(addBinding(B, R, BindingKey::Direct,
+ V).getRootWithoutRetain(), *this);
}
-Store RegionStoreManager::BindDecl(Store store, const VarRegion *VR,
- SVal InitVal) {
+StoreRef RegionStoreManager::BindDecl(Store store, const VarRegion *VR,
+ SVal InitVal) {
QualType T = VR->getDecl()->getType();
@@ -1368,43 +1327,43 @@ Store RegionStoreManager::BindDecl(Store store, const VarRegion *VR,
if (T->isStructureOrClassType())
return BindStruct(store, VR, InitVal);
- return Bind(store, ValMgr.makeLoc(VR), InitVal);
+ return Bind(store, svalBuilder.makeLoc(VR), InitVal);
}
// FIXME: this method should be merged into Bind().
-Store RegionStoreManager::BindCompoundLiteral(Store store,
- const CompoundLiteralExpr *CL,
- const LocationContext *LC,
- SVal V) {
+StoreRef RegionStoreManager::BindCompoundLiteral(Store store,
+ const CompoundLiteralExpr *CL,
+ const LocationContext *LC,
+ SVal V) {
return Bind(store, loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC)),
V);
}
-
-Store RegionStoreManager::setImplicitDefaultValue(Store store,
- const MemRegion *R,
- QualType T) {
+StoreRef RegionStoreManager::setImplicitDefaultValue(Store store,
+ const MemRegion *R,
+ QualType T) {
RegionBindings B = GetRegionBindings(store);
SVal V;
- if (Loc::IsLocType(T))
- V = ValMgr.makeNull();
+ if (Loc::isLocType(T))
+ V = svalBuilder.makeNull();
else if (T->isIntegerType())
- V = ValMgr.makeZeroVal(T);
+ V = svalBuilder.makeZeroVal(T);
else if (T->isStructureOrClassType() || T->isArrayType()) {
// Set the default value to a zero constant when it is a structure
// or array. The type doesn't really matter.
- V = ValMgr.makeZeroVal(Ctx.IntTy);
+ V = svalBuilder.makeZeroVal(Ctx.IntTy);
}
else {
- return store;
+ return StoreRef(store, *this);
}
- return Add(B, R, BindingKey::Default, V).getRoot();
+ return StoreRef(addBinding(B, R, BindingKey::Default,
+ V).getRootWithoutRetain(), *this);
}
-Store RegionStoreManager::BindArray(Store store, const TypedRegion* R,
- SVal Init) {
+StoreRef RegionStoreManager::BindArray(Store store, const TypedRegion* R,
+ SVal Init) {
const ArrayType *AT =cast<ArrayType>(Ctx.getCanonicalType(R->getValueType()));
QualType ElementTy = AT->getElementType();
@@ -1419,7 +1378,7 @@ Store RegionStoreManager::BindArray(Store store, const TypedRegion* R,
// Treat the string as a lazy compound value.
nonloc::LazyCompoundVal LCV =
- cast<nonloc::LazyCompoundVal>(ValMgr.makeLazyCompoundVal(store, S));
+ cast<nonloc::LazyCompoundVal>(svalBuilder.makeLazyCompoundVal(store, S));
return CopyLazyBindings(LCV, store, R);
}
@@ -1436,35 +1395,36 @@ Store RegionStoreManager::BindArray(Store store, const TypedRegion* R,
nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
uint64_t i = 0;
+ StoreRef newStore(store, *this);
for (; Size.hasValue() ? i < Size.getValue() : true ; ++i, ++VI) {
// The init list might be shorter than the array length.
if (VI == VE)
break;
- SVal Idx = ValMgr.makeArrayIndex(i);
+ const NonLoc &Idx = svalBuilder.makeArrayIndex(i);
const ElementRegion *ER = MRMgr.getElementRegion(ElementTy, Idx, R, Ctx);
if (ElementTy->isStructureOrClassType())
- store = BindStruct(store, ER, *VI);
+ newStore = BindStruct(newStore.getStore(), ER, *VI);
else if (ElementTy->isArrayType())
- store = BindArray(store, ER, *VI);
+ newStore = BindArray(newStore.getStore(), ER, *VI);
else
- store = Bind(store, ValMgr.makeLoc(ER), *VI);
+ newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(ER), *VI);
}
// If the init list is shorter than the array length, set the
// array default value.
if (Size.hasValue() && i < Size.getValue())
- store = setImplicitDefaultValue(store, R, ElementTy);
+ newStore = setImplicitDefaultValue(newStore.getStore(), R, ElementTy);
- return store;
+ return newStore;
}
-Store RegionStoreManager::BindStruct(Store store, const TypedRegion* R,
- SVal V) {
+StoreRef RegionStoreManager::BindStruct(Store store, const TypedRegion* R,
+ SVal V) {
if (!Features.supportsFields())
- return store;
+ return StoreRef(store, *this);
QualType T = R->getValueType();
assert(T->isStructureOrClassType());
@@ -1473,7 +1433,7 @@ Store RegionStoreManager::BindStruct(Store store, const TypedRegion* R,
RecordDecl* RD = RT->getDecl();
if (!RD->isDefinition())
- return store;
+ return StoreRef(store, *this);
// Handle lazy compound values.
if (const nonloc::LazyCompoundVal *LCV=dyn_cast<nonloc::LazyCompoundVal>(&V))
@@ -1491,7 +1451,8 @@ Store RegionStoreManager::BindStruct(Store store, const TypedRegion* R,
nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
RecordDecl::field_iterator FI, FE;
-
+ StoreRef newStore(store, *this);
+
for (FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI, ++VI) {
if (VI == VE)
@@ -1501,36 +1462,61 @@ Store RegionStoreManager::BindStruct(Store store, const TypedRegion* R,
const FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
if (FTy->isArrayType())
- store = BindArray(store, FR, *VI);
+ newStore = BindArray(newStore.getStore(), FR, *VI);
else if (FTy->isStructureOrClassType())
- store = BindStruct(store, FR, *VI);
+ newStore = BindStruct(newStore.getStore(), FR, *VI);
else
- store = Bind(store, ValMgr.makeLoc(FR), *VI);
+ newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(FR), *VI);
}
// There may be fewer values in the initialize list than the fields of struct.
if (FI != FE) {
- RegionBindings B = GetRegionBindings(store);
- B = Add(B, R, BindingKey::Default, ValMgr.makeIntVal(0, false));
- store = B.getRoot();
+ RegionBindings B = GetRegionBindings(newStore.getStore());
+ B = addBinding(B, R, BindingKey::Default, svalBuilder.makeIntVal(0, false));
+ newStore = StoreRef(B.getRootWithoutRetain(), *this);
}
- return store;
+ return newStore;
}
-Store RegionStoreManager::KillStruct(Store store, const TypedRegion* R,
+StoreRef RegionStoreManager::KillStruct(Store store, const TypedRegion* R,
SVal DefaultVal) {
+ BindingKey key = BindingKey::Make(R, BindingKey::Default);
+
+ // The BindingKey may be "invalid" if we cannot handle the region binding
+ // explicitly. One example is something like array[index], where index
+ // is a symbolic value. In such cases, we want to invalidate the entire
+ // array, as the index assignment could have been to any element. In
+ // the case of nested symbolic indices, we need to march up the region
+ // hierarchy untile we reach a region whose binding we can reason about.
+ const SubRegion *subReg = R;
+
+ while (!key.isValid()) {
+ if (const SubRegion *tmp = dyn_cast<SubRegion>(subReg->getSuperRegion())) {
+ subReg = tmp;
+ key = BindingKey::Make(tmp, BindingKey::Default);
+ }
+ else
+ break;
+ }
+
+ // Remove the old bindings, using 'subReg' as the root of all regions
+ // we will invalidate.
RegionBindings B = GetRegionBindings(store);
llvm::OwningPtr<RegionStoreSubRegionMap>
SubRegions(getRegionStoreSubRegionMap(store));
- RemoveSubRegionBindings(B, R, *SubRegions);
+ RemoveSubRegionBindings(B, subReg, *SubRegions);
// Set the default value of the struct region to "unknown".
- return Add(B, R, BindingKey::Default, DefaultVal).getRoot();
+ if (!key.isValid())
+ return StoreRef(B.getRootWithoutRetain(), *this);
+
+ return StoreRef(addBinding(B, key, DefaultVal).getRootWithoutRetain(), *this);
}
-Store RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
- Store store, const TypedRegion *R) {
+StoreRef RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
+ Store store,
+ const TypedRegion *R) {
// Nuke the old bindings stemming from R.
RegionBindings B = GetRegionBindings(store);
@@ -1543,7 +1529,8 @@ Store RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
// Now copy the bindings. This amounts to just binding 'V' to 'R'. This
// results in a zero-copy algorithm.
- return Add(B, R, BindingKey::Direct, V).getRoot();
+ return StoreRef(addBinding(B, R, BindingKey::Direct,
+ V).getRootWithoutRetain(), *this);
}
//===----------------------------------------------------------------------===//
@@ -1551,38 +1538,42 @@ Store RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
//===----------------------------------------------------------------------===//
-RegionBindings RegionStoreManager::Add(RegionBindings B, BindingKey K, SVal V) {
+RegionBindings RegionStoreManager::addBinding(RegionBindings B, BindingKey K,
+ SVal V) {
if (!K.isValid())
return B;
- return RBFactory.Add(B, K, V);
+ return RBFactory.add(B, K, V);
}
-RegionBindings RegionStoreManager::Add(RegionBindings B, const MemRegion *R,
- BindingKey::Kind k, SVal V) {
- return Add(B, BindingKey::Make(R, k), V);
+RegionBindings RegionStoreManager::addBinding(RegionBindings B,
+ const MemRegion *R,
+ BindingKey::Kind k, SVal V) {
+ return addBinding(B, BindingKey::Make(R, k), V);
}
-const SVal *RegionStoreManager::Lookup(RegionBindings B, BindingKey K) {
+const SVal *RegionStoreManager::lookup(RegionBindings B, BindingKey K) {
if (!K.isValid())
return NULL;
return B.lookup(K);
}
-const SVal *RegionStoreManager::Lookup(RegionBindings B,
+const SVal *RegionStoreManager::lookup(RegionBindings B,
const MemRegion *R,
BindingKey::Kind k) {
- return Lookup(B, BindingKey::Make(R, k));
+ return lookup(B, BindingKey::Make(R, k));
}
-RegionBindings RegionStoreManager::Remove(RegionBindings B, BindingKey K) {
+RegionBindings RegionStoreManager::removeBinding(RegionBindings B,
+ BindingKey K) {
if (!K.isValid())
return B;
- return RBFactory.Remove(B, K);
+ return RBFactory.remove(B, K);
}
-RegionBindings RegionStoreManager::Remove(RegionBindings B, const MemRegion *R,
- BindingKey::Kind k){
- return Remove(B, BindingKey::Make(R, k));
+RegionBindings RegionStoreManager::removeBinding(RegionBindings B,
+ const MemRegion *R,
+ BindingKey::Kind k){
+ return removeBinding(B, BindingKey::Make(R, k));
}
//===----------------------------------------------------------------------===//
@@ -1590,17 +1581,18 @@ RegionBindings RegionStoreManager::Remove(RegionBindings B, const MemRegion *R,
//===----------------------------------------------------------------------===//
namespace {
-class RemoveDeadBindingsWorker :
- public ClusterAnalysis<RemoveDeadBindingsWorker> {
+class removeDeadBindingsWorker :
+ public ClusterAnalysis<removeDeadBindingsWorker> {
llvm::SmallVector<const SymbolicRegion*, 12> Postponed;
SymbolReaper &SymReaper;
const StackFrameContext *CurrentLCtx;
public:
- RemoveDeadBindingsWorker(RegionStoreManager &rm, GRStateManager &stateMgr,
+ removeDeadBindingsWorker(RegionStoreManager &rm, GRStateManager &stateMgr,
RegionBindings b, SymbolReaper &symReaper,
const StackFrameContext *LCtx)
- : ClusterAnalysis<RemoveDeadBindingsWorker>(rm, stateMgr, b),
+ : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b,
+ /* includeGlobals = */ false),
SymReaper(symReaper), CurrentLCtx(LCtx) {}
// Called by ClusterAnalysis.
@@ -1613,7 +1605,7 @@ public:
};
}
-void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
+void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
RegionCluster &C) {
if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) {
@@ -1647,13 +1639,13 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
}
}
-void RemoveDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
+void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
BindingKey *I, BindingKey *E) {
for ( ; I != E; ++I)
VisitBindingKey(*I);
}
-void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
+void removeDeadBindingsWorker::VisitBinding(SVal V) {
// Is it a LazyCompoundVal? All referenced regions are live as well.
if (const nonloc::LazyCompoundVal *LCS =
dyn_cast<nonloc::LazyCompoundVal>(&V)) {
@@ -1678,7 +1670,7 @@ void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
SymReaper.markLive(*SI);
}
-void RemoveDeadBindingsWorker::VisitBindingKey(BindingKey K) {
+void removeDeadBindingsWorker::VisitBindingKey(BindingKey K) {
const MemRegion *R = K.getRegion();
// Mark this region "live" by adding it to the worklist. This will cause
@@ -1707,11 +1699,11 @@ void RemoveDeadBindingsWorker::VisitBindingKey(BindingKey K) {
}
// Visit the data binding for K.
- if (const SVal *V = RM.Lookup(B, K))
+ if (const SVal *V = RM.lookup(B, K))
VisitBinding(*V);
}
-bool RemoveDeadBindingsWorker::UpdatePostponed() {
+bool removeDeadBindingsWorker::UpdatePostponed() {
// See if any postponed SymbolicRegions are actually live now, after
// having done a scan.
bool changed = false;
@@ -1729,13 +1721,13 @@ bool RemoveDeadBindingsWorker::UpdatePostponed() {
return changed;
}
-Store RegionStoreManager::RemoveDeadBindings(Store store,
- const StackFrameContext *LCtx,
- SymbolReaper& SymReaper,
+StoreRef RegionStoreManager::removeDeadBindings(Store store,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
{
RegionBindings B = GetRegionBindings(store);
- RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx);
+ removeDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx);
W.GenerateClusters();
// Enqueue the region roots onto the worklist.
@@ -1756,7 +1748,7 @@ Store RegionStoreManager::RemoveDeadBindings(Store store,
continue;
// Remove the dead entry.
- B = Remove(B, K);
+ B = removeBinding(B, K);
// Mark all non-live symbols that this binding references as dead.
if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(K.getRegion()))
@@ -1768,23 +1760,27 @@ Store RegionStoreManager::RemoveDeadBindings(Store store,
SymReaper.maybeDead(*SI);
}
- return B.getRoot();
+ return StoreRef(B.getRootWithoutRetain(), *this);
}
-Store RegionStoreManager::EnterStackFrame(const GRState *state,
- const StackFrameContext *frame) {
+StoreRef RegionStoreManager::enterStackFrame(const GRState *state,
+ const StackFrameContext *frame) {
FunctionDecl const *FD = cast<FunctionDecl>(frame->getDecl());
- FunctionDecl::param_const_iterator PI = FD->param_begin();
- Store store = state->getStore();
+ FunctionDecl::param_const_iterator PI = FD->param_begin(),
+ PE = FD->param_end();
+ StoreRef store = StoreRef(state->getStore(), *this);
if (CallExpr const *CE = dyn_cast<CallExpr>(frame->getCallSite())) {
CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
- // Copy the arg expression value to the arg variables.
- for (; AI != AE; ++AI, ++PI) {
+ // Copy the arg expression value to the arg variables. We check that
+ // PI != PE because the actual number of arguments may be different than
+ // the function declaration.
+ for (; AI != AE && PI != PE; ++AI, ++PI) {
SVal ArgVal = state->getSVal(*AI);
- store = Bind(store, ValMgr.makeLoc(MRMgr.getVarRegion(*PI,frame)),ArgVal);
+ store = Bind(store.getStore(),
+ svalBuilder.makeLoc(MRMgr.getVarRegion(*PI, frame)), ArgVal);
}
} else if (const CXXConstructExpr *CE =
dyn_cast<CXXConstructExpr>(frame->getCallSite())) {
@@ -1794,10 +1790,11 @@ Store RegionStoreManager::EnterStackFrame(const GRState *state,
// Copy the arg expression value to the arg variables.
for (; AI != AE; ++AI, ++PI) {
SVal ArgVal = state->getSVal(*AI);
- store = Bind(store, ValMgr.makeLoc(MRMgr.getVarRegion(*PI,frame)),ArgVal);
+ store = Bind(store.getStore(),
+ svalBuilder.makeLoc(MRMgr.getVarRegion(*PI,frame)), ArgVal);
}
} else
- llvm_unreachable("Unhandled call expression.");
+ assert(isa<CXXDestructorDecl>(frame->getDecl()));
return store;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
new file mode 100644
index 0000000..b0fd497
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -0,0 +1,310 @@
+// SValBuilder.cpp - Basic class for all SValBuilder implementations -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SValBuilder, the base class for all (complete) SValBuilder
+// implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Basic SVal creation.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType T) {
+ if (Loc::isLocType(T))
+ return makeNull();
+
+ if (T->isIntegerType())
+ return makeIntVal(0, T);
+
+ // FIXME: Handle floats.
+ // FIXME: Handle structs.
+ return UnknownVal();
+}
+
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const llvm::APSInt& v, QualType T) {
+ // The Environment ensures we always get a persistent APSInt in
+ // BasicValueFactory, so we don't need to get the APSInt from
+ // BasicValueFactory again.
+ assert(!Loc::isLocType(T));
+ return nonloc::SymExprVal(SymMgr.getSymIntExpr(lhs, op, v, T));
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType T) {
+ assert(SymMgr.getType(lhs) == SymMgr.getType(rhs));
+ assert(!Loc::isLocType(T));
+ return nonloc::SymExprVal(SymMgr.getSymSymExpr(lhs, op, rhs, T));
+}
+
+
+SVal SValBuilder::convertToArrayIndex(SVal V) {
+ if (V.isUnknownOrUndef())
+ return V;
+
+ // Common case: we have an appropriately sized integer.
+ if (nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&V)) {
+ const llvm::APSInt& I = CI->getValue();
+ if (I.getBitWidth() == ArrayIndexWidth && I.isSigned())
+ return V;
+ }
+
+ return evalCastNL(cast<NonLoc>(V), ArrayIndexTy);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getRegionValueSymbolVal(const TypedRegion* R) {
+ QualType T = R->getValueType();
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getRegionValueSymbol(R);
+
+ if (Loc::isLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal SValBuilder::getConjuredSymbolVal(const void *SymbolTag,
+ const Expr *E,
+ unsigned Count) {
+ QualType T = E->getType();
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getConjuredSymbol(E, Count, SymbolTag);
+
+ if (Loc::isLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal SValBuilder::getConjuredSymbolVal(const void *SymbolTag,
+ const Expr *E,
+ QualType T,
+ unsigned Count) {
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getConjuredSymbol(E, T, Count, SymbolTag);
+
+ if (Loc::isLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedSVal SValBuilder::getMetadataSymbolVal(const void *SymbolTag,
+ const MemRegion *MR,
+ const Expr *E, QualType T,
+ unsigned Count) {
+ assert(SymbolManager::canSymbolicate(T) && "Invalid metadata symbol type");
+
+ SymbolRef sym = SymMgr.getMetadataSymbol(MR, E, T, Count, SymbolTag);
+
+ if (Loc::isLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
+ const TypedRegion *R) {
+ QualType T = R->getValueType();
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getDerivedSymbol(parentSymbol, R);
+
+ if (Loc::isLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedSVal SValBuilder::getFunctionPointer(const FunctionDecl* FD) {
+ return loc::MemRegionVal(MemMgr.getFunctionTextRegion(FD));
+}
+
+DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *D,
+ CanQualType locTy,
+ const LocationContext *LC) {
+ const BlockTextRegion *BC =
+ MemMgr.getBlockTextRegion(D, locTy, LC->getAnalysisContext());
+ const BlockDataRegion *BD = MemMgr.getBlockDataRegion(BC, LC);
+ return loc::MemRegionVal(BD);
+}
+
+//===----------------------------------------------------------------------===//
+
+SVal SValBuilder::evalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
+ SVal L, SVal R, QualType T) {
+
+ if (L.isUndef() || R.isUndef())
+ return UndefinedVal();
+
+ if (L.isUnknown() || R.isUnknown())
+ return UnknownVal();
+
+ if (isa<Loc>(L)) {
+ if (isa<Loc>(R))
+ return evalBinOpLL(ST, Op, cast<Loc>(L), cast<Loc>(R), T);
+
+ return evalBinOpLN(ST, Op, cast<Loc>(L), cast<NonLoc>(R), T);
+ }
+
+ if (isa<Loc>(R)) {
+ // Support pointer arithmetic where the addend is on the left
+ // and the pointer on the right.
+ assert(Op == BO_Add);
+
+ // Commute the operands.
+ return evalBinOpLN(ST, Op, cast<Loc>(R), cast<NonLoc>(L), T);
+ }
+
+ return evalBinOpNN(ST, Op, cast<NonLoc>(L), cast<NonLoc>(R), T);
+}
+
+DefinedOrUnknownSVal SValBuilder::evalEQ(const GRState *ST,
+ DefinedOrUnknownSVal L,
+ DefinedOrUnknownSVal R) {
+ return cast<DefinedOrUnknownSVal>(evalBinOp(ST, BO_EQ, L, R,
+ Context.IntTy));
+}
+
+// FIXME: should rewrite according to the cast kind.
+SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
+ if (val.isUnknownOrUndef() || castTy == originalTy)
+ return val;
+
+ // For const casts, just propagate the value.
+ if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
+ if (Context.hasSameUnqualifiedType(castTy, originalTy))
+ return val;
+
+ // Check for casts to real or complex numbers. We don't handle these at all
+ // right now.
+ if (castTy->isFloatingType() || castTy->isAnyComplexType())
+ return UnknownVal();
+
+ // Check for casts from integers to integers.
+ if (castTy->isIntegerType() && originalTy->isIntegerType())
+ return evalCastNL(cast<NonLoc>(val), castTy);
+
+ // Check for casts from pointers to integers.
+ if (castTy->isIntegerType() && Loc::isLocType(originalTy))
+ return evalCastL(cast<Loc>(val), castTy);
+
+ // Check for casts from integers to pointers.
+ if (Loc::isLocType(castTy) && originalTy->isIntegerType()) {
+ if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) {
+ if (const MemRegion *R = LV->getLoc().getAsRegion()) {
+ StoreManager &storeMgr = StateMgr.getStoreManager();
+ R = storeMgr.castRegion(R, castTy);
+ return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+ }
+ return LV->getLoc();
+ }
+ goto DispatchCast;
+ }
+
+ // Just pass through function and block pointers.
+ if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
+ assert(Loc::isLocType(castTy));
+ return val;
+ }
+
+ // Check for casts from array type to another type.
+ if (originalTy->isArrayType()) {
+ // We will always decay to a pointer.
+ val = StateMgr.ArrayToPointer(cast<Loc>(val));
+
+ // Are we casting from an array to a pointer? If so just pass on
+ // the decayed value.
+ if (castTy->isPointerType())
+ return val;
+
+ // Are we casting from an array to an integer? If so, cast the decayed
+ // pointer value to an integer.
+ assert(castTy->isIntegerType());
+
+ // FIXME: Keep these here for now in case we decide soon that we
+ // need the original decayed type.
+ // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
+ // QualType pointerTy = C.getPointerType(elemTy);
+ return evalCastL(cast<Loc>(val), castTy);
+ }
+
+ // Check for casts from a region to a specific type.
+ if (const MemRegion *R = val.getAsRegion()) {
+ // FIXME: We should handle the case where we strip off view layers to get
+ // to a desugared type.
+
+ if (!Loc::isLocType(castTy)) {
+ // FIXME: There can be gross cases where one casts the result of a function
+ // (that returns a pointer) to some other value that happens to fit
+ // within that pointer value. We currently have no good way to
+ // model such operations. When this happens, the underlying operation
+ // is that the caller is reasoning about bits. Conceptually we are
+ // layering a "view" of a location on top of those bits. Perhaps
+ // we need to be more lazy about mutual possible views, even on an
+ // SVal? This may be necessary for bit-level reasoning as well.
+ return UnknownVal();
+ }
+
+ // We get a symbolic function pointer for a dereference of a function
+ // pointer, but it is of function type. Example:
+
+ // struct FPRec {
+ // void (*my_func)(int * x);
+ // };
+ //
+ // int bar(int x);
+ //
+ // int f1_a(struct FPRec* foo) {
+ // int x;
+ // (*foo->my_func)(&x);
+ // return bar(x)+1; // no-warning
+ // }
+
+ assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() ||
+ originalTy->isBlockPointerType() || castTy->isReferenceType());
+
+ StoreManager &storeMgr = StateMgr.getStoreManager();
+
+ // Delegate to store manager to get the result of casting a region to a
+ // different type. If the MemRegion* returned is NULL, this expression
+ // Evaluates to UnknownVal.
+ R = storeMgr.castRegion(R, castTy);
+ return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+ }
+
+DispatchCast:
+ // All other cases.
+ return isa<Loc>(val) ? evalCastL(cast<Loc>(val), castTy)
+ : evalCastNL(cast<NonLoc>(val), castTy);
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/SVals.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
index 97ba74e..4614e34 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SVals.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -12,10 +12,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/Basic/IdentifierTable.h"
using namespace clang;
+using namespace ento;
using llvm::dyn_cast;
using llvm::cast;
using llvm::APSInt;
@@ -66,7 +68,7 @@ SymbolRef SVal::getAsLocSymbol() const {
return X->getLoc().getAsLocSymbol();
if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this)) {
- const MemRegion *R = X->StripCasts();
+ const MemRegion *R = X->stripCasts();
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
return SymR->getSymbol();
}
@@ -126,7 +128,7 @@ const MemRegion *SVal::getAsRegion() const {
return 0;
}
-const MemRegion *loc::MemRegionVal::StripCasts() const {
+const MemRegion *loc::MemRegionVal::stripCasts() const {
const MemRegion *R = getRegion();
return R ? R->StripCasts() : NULL;
}
@@ -221,11 +223,11 @@ bool SVal::isZeroConstant() const {
// Transfer function dispatch for Non-Locs.
//===----------------------------------------------------------------------===//
-SVal nonloc::ConcreteInt::evalBinOp(ValueManager &ValMgr,
+SVal nonloc::ConcreteInt::evalBinOp(SValBuilder &svalBuilder,
BinaryOperator::Opcode Op,
const nonloc::ConcreteInt& R) const {
const llvm::APSInt* X =
- ValMgr.getBasicValueFactory().EvaluateAPSInt(Op, getValue(), R.getValue());
+ svalBuilder.getBasicValueFactory().evalAPSInt(Op, getValue(), R.getValue());
if (X)
return nonloc::ConcreteInt(*X);
@@ -234,26 +236,27 @@ SVal nonloc::ConcreteInt::evalBinOp(ValueManager &ValMgr,
}
nonloc::ConcreteInt
-nonloc::ConcreteInt::evalComplement(ValueManager &ValMgr) const {
- return ValMgr.makeIntVal(~getValue());
+nonloc::ConcreteInt::evalComplement(SValBuilder &svalBuilder) const {
+ return svalBuilder.makeIntVal(~getValue());
}
-nonloc::ConcreteInt nonloc::ConcreteInt::evalMinus(ValueManager &ValMgr) const {
- return ValMgr.makeIntVal(-getValue());
+nonloc::ConcreteInt
+nonloc::ConcreteInt::evalMinus(SValBuilder &svalBuilder) const {
+ return svalBuilder.makeIntVal(-getValue());
}
//===----------------------------------------------------------------------===//
// Transfer function dispatch for Locs.
//===----------------------------------------------------------------------===//
-SVal loc::ConcreteInt::EvalBinOp(BasicValueFactory& BasicVals,
+SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals,
BinaryOperator::Opcode Op,
const loc::ConcreteInt& R) const {
assert (Op == BO_Add || Op == BO_Sub ||
(Op >= BO_LT && Op <= BO_NE));
- const llvm::APSInt* X = BasicVals.EvaluateAPSInt(Op, getValue(), R.getValue());
+ const llvm::APSInt* X = BasicVals.evalAPSInt(Op, getValue(), R.getValue());
if (X)
return loc::ConcreteInt(*X);
@@ -270,7 +273,7 @@ void SVal::dump() const { dumpToStream(llvm::errs()); }
void SVal::dumpToStream(llvm::raw_ostream& os) const {
switch (getBaseKind()) {
case UnknownKind:
- os << "Invalid";
+ os << "Unknown";
break;
case NonLocKind:
cast<NonLoc>(this)->dumpToStream(os);
@@ -288,11 +291,16 @@ void SVal::dumpToStream(llvm::raw_ostream& os) const {
void NonLoc::dumpToStream(llvm::raw_ostream& os) const {
switch (getSubKind()) {
- case nonloc::ConcreteIntKind:
- os << cast<nonloc::ConcreteInt>(this)->getValue().getZExtValue();
- if (cast<nonloc::ConcreteInt>(this)->getValue().isUnsigned())
- os << 'U';
+ case nonloc::ConcreteIntKind: {
+ const nonloc::ConcreteInt& C = *cast<nonloc::ConcreteInt>(this);
+ if (C.getValue().isUnsigned())
+ os << C.getValue().getZExtValue();
+ else
+ os << C.getValue().getSExtValue();
+ os << ' ' << (C.getValue().isUnsigned() ? 'U' : 'S')
+ << C.getValue().getBitWidth() << 'b';
break;
+ }
case nonloc::SymbolValKind:
os << '$' << cast<nonloc::SymbolVal>(this)->getSymbol();
break;
@@ -342,11 +350,27 @@ void Loc::dumpToStream(llvm::raw_ostream& os) const {
os << cast<loc::ConcreteInt>(this)->getValue().getZExtValue() << " (Loc)";
break;
case loc::GotoLabelKind:
- os << "&&" << cast<loc::GotoLabel>(this)->getLabel()->getID()->getName();
+ os << "&&" << cast<loc::GotoLabel>(this)->getLabel()->getName();
break;
case loc::MemRegionKind:
os << '&' << cast<loc::MemRegionVal>(this)->getRegion()->getString();
break;
+ case loc::ObjCPropRefKind: {
+ const ObjCPropertyRefExpr *E = cast<loc::ObjCPropRef>(this)->getPropRefExpr();
+ os << "objc-prop{";
+ if (E->isSuperReceiver())
+ os << "super.";
+ else if (E->getBase())
+ os << "<base>.";
+
+ if (E->isImplicitProperty())
+ os << E->getImplicitPropertyGetter()->getSelector().getAsString();
+ else
+ os << E->getExplicitProperty()->getName();
+
+ os << "}";
+ break;
+ }
default:
assert(false && "Pretty-printing not implemented for this Loc.");
break;
diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index 04496e1..e0b61ab 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -13,12 +13,14 @@
//===----------------------------------------------------------------------===//
#include "SimpleConstraintManager.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/PathSensitive/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Checker.h"
namespace clang {
+namespace ento {
+
SimpleConstraintManager::~SimpleConstraintManager() {}
bool SimpleConstraintManager::canReasonAbout(SVal X) const {
@@ -55,22 +57,22 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const {
return true;
}
-const GRState *SimpleConstraintManager::Assume(const GRState *state,
+const GRState *SimpleConstraintManager::assume(const GRState *state,
DefinedSVal Cond,
bool Assumption) {
if (isa<NonLoc>(Cond))
- return Assume(state, cast<NonLoc>(Cond), Assumption);
+ return assume(state, cast<NonLoc>(Cond), Assumption);
else
- return Assume(state, cast<Loc>(Cond), Assumption);
+ return assume(state, cast<Loc>(Cond), Assumption);
}
-const GRState *SimpleConstraintManager::Assume(const GRState *state, Loc cond,
+const GRState *SimpleConstraintManager::assume(const GRState *state, Loc cond,
bool assumption) {
- state = AssumeAux(state, cond, assumption);
- return SU.ProcessAssume(state, cond, assumption);
+ state = assumeAux(state, cond, assumption);
+ return SU.processAssume(state, cond, assumption);
}
-const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
+const GRState *SimpleConstraintManager::assumeAux(const GRState *state,
Loc Cond, bool Assumption) {
BasicValueFactory &BasicVals = state->getBasicVals();
@@ -91,9 +93,9 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) {
const llvm::APSInt &zero = BasicVals.getZeroWithPtrWidth();
if (Assumption)
- return AssumeSymNE(state, SymR->getSymbol(), zero, zero);
+ return assumeSymNE(state, SymR->getSymbol(), zero, zero);
else
- return AssumeSymEQ(state, SymR->getSymbol(), zero, zero);
+ return assumeSymEQ(state, SymR->getSymbol(), zero, zero);
}
SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
}
@@ -112,16 +114,16 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
} // end switch
}
-const GRState *SimpleConstraintManager::Assume(const GRState *state,
+const GRState *SimpleConstraintManager::assume(const GRState *state,
NonLoc cond,
bool assumption) {
- state = AssumeAux(state, cond, assumption);
- return SU.ProcessAssume(state, cond, assumption);
+ state = assumeAux(state, cond, assumption);
+ return SU.processAssume(state, cond, assumption);
}
static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
// FIXME: This should probably be part of BinaryOperator, since this isn't
- // the only place it's used. (This code was copied from SimpleSValuator.cpp.)
+ // the only place it's used. (This code was copied from SimpleSValBuilder.cpp.)
switch (op) {
default:
assert(false && "Invalid opcode.");
@@ -134,7 +136,7 @@ static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
}
}
-const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
+const GRState *SimpleConstraintManager::assumeAux(const GRState *state,
NonLoc Cond,
bool Assumption) {
@@ -159,9 +161,9 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
QualType T = SymMgr.getType(sym);
const llvm::APSInt &zero = BasicVals.getValue(0, T);
if (Assumption)
- return AssumeSymNE(state, sym, zero, zero);
+ return assumeSymNE(state, sym, zero, zero);
else
- return AssumeSymEQ(state, sym, zero, zero);
+ return assumeSymEQ(state, sym, zero, zero);
}
case nonloc::SymExprValKind: {
@@ -179,14 +181,14 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
QualType T = SymMgr.getType(SE);
const llvm::APSInt &zero = BasicVals.getValue(0, T);
op = (Assumption ? BO_NE : BO_EQ);
- return AssumeSymRel(state, SE, op, zero);
+ return assumeSymRel(state, SE, op, zero);
}
// From here on out, op is the real comparison we'll be testing.
if (!Assumption)
op = NegateComparison(op);
- return AssumeSymRel(state, SE->getLHS(), op, SE->getRHS());
+ return assumeSymRel(state, SE->getLHS(), op, SE->getRHS());
}
case nonloc::ConcreteIntKind: {
@@ -196,12 +198,12 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
}
case nonloc::LocAsIntegerKind:
- return AssumeAux(state, cast<nonloc::LocAsInteger>(Cond).getLoc(),
+ return assumeAux(state, cast<nonloc::LocAsInteger>(Cond).getLoc(),
Assumption);
} // end switch
}
-const GRState *SimpleConstraintManager::AssumeSymRel(const GRState *state,
+const GRState *SimpleConstraintManager::assumeSymRel(const GRState *state,
const SymExpr *LHS,
BinaryOperator::Opcode op,
const llvm::APSInt& Int) {
@@ -259,41 +261,43 @@ const GRState *SimpleConstraintManager::AssumeSymRel(const GRState *state,
ASTContext &Ctx = StateMgr.getContext();
QualType T = Sym->getType(Ctx);
- assert(T->isIntegerType() || Loc::IsLocType(T));
+ assert(T->isIntegerType() || Loc::isLocType(T));
unsigned bitwidth = Ctx.getTypeSize(T);
- bool isSymUnsigned = T->isUnsignedIntegerType() || Loc::IsLocType(T);
+ bool isSymUnsigned = T->isUnsignedIntegerType() || Loc::isLocType(T);
// Convert the adjustment.
Adjustment.setIsUnsigned(isSymUnsigned);
- Adjustment.extOrTrunc(bitwidth);
+ Adjustment = Adjustment.extOrTrunc(bitwidth);
// Convert the right-hand side integer.
llvm::APSInt ConvertedInt(Int, isSymUnsigned);
- ConvertedInt.extOrTrunc(bitwidth);
+ ConvertedInt = ConvertedInt.extOrTrunc(bitwidth);
switch (op) {
default:
- // No logic yet for other operators. Assume the constraint is feasible.
+ // No logic yet for other operators. assume the constraint is feasible.
return state;
case BO_EQ:
- return AssumeSymEQ(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymEQ(state, Sym, ConvertedInt, Adjustment);
case BO_NE:
- return AssumeSymNE(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymNE(state, Sym, ConvertedInt, Adjustment);
case BO_GT:
- return AssumeSymGT(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymGT(state, Sym, ConvertedInt, Adjustment);
case BO_GE:
- return AssumeSymGE(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymGE(state, Sym, ConvertedInt, Adjustment);
case BO_LT:
- return AssumeSymLT(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymLT(state, Sym, ConvertedInt, Adjustment);
case BO_LE:
- return AssumeSymLE(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymLE(state, Sym, ConvertedInt, Adjustment);
} // end switch
}
-} // end of namespace clang
+} // end of namespace ento
+
+} // end of namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
index 96811b3..a2952af 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -11,18 +11,20 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_SIMPLE_CONSTRAINT_MANAGER_H
-#define LLVM_CLANG_ANALYSIS_SIMPLE_CONSTRAINT_MANAGER_H
+#ifndef LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H
+#define LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H
-#include "clang/Checker/PathSensitive/ConstraintManager.h"
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
namespace clang {
+namespace ento {
+
class SimpleConstraintManager : public ConstraintManager {
- GRSubEngine &SU;
+ SubEngine &SU;
public:
- SimpleConstraintManager(GRSubEngine &subengine) : SU(subengine) {}
+ SimpleConstraintManager(SubEngine &subengine) : SU(subengine) {}
virtual ~SimpleConstraintManager();
//===------------------------------------------------------------------===//
@@ -31,14 +33,14 @@ public:
bool canReasonAbout(SVal X) const;
- const GRState *Assume(const GRState *state, DefinedSVal Cond,
+ const GRState *assume(const GRState *state, DefinedSVal Cond,
bool Assumption);
- const GRState *Assume(const GRState *state, Loc Cond, bool Assumption);
+ const GRState *assume(const GRState *state, Loc Cond, bool Assumption);
- const GRState *Assume(const GRState *state, NonLoc Cond, bool Assumption);
+ const GRState *assume(const GRState *state, NonLoc Cond, bool Assumption);
- const GRState *AssumeSymRel(const GRState *state,
+ const GRState *assumeSymRel(const GRState *state,
const SymExpr *LHS,
BinaryOperator::Opcode op,
const llvm::APSInt& Int);
@@ -51,27 +53,27 @@ protected:
// Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison
// operation for the method being invoked.
- virtual const GRState *AssumeSymNE(const GRState *state, SymbolRef sym,
+ virtual const GRState *assumeSymNE(const GRState *state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const GRState *AssumeSymEQ(const GRState *state, SymbolRef sym,
+ virtual const GRState *assumeSymEQ(const GRState *state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const GRState *AssumeSymLT(const GRState *state, SymbolRef sym,
+ virtual const GRState *assumeSymLT(const GRState *state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const GRState *AssumeSymGT(const GRState *state, SymbolRef sym,
+ virtual const GRState *assumeSymGT(const GRState *state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const GRState *AssumeSymLE(const GRState *state, SymbolRef sym,
+ virtual const GRState *assumeSymLE(const GRState *state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const GRState *AssumeSymGE(const GRState *state, SymbolRef sym,
+ virtual const GRState *assumeSymGE(const GRState *state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
@@ -79,11 +81,13 @@ protected:
// Internal implementation.
//===------------------------------------------------------------------===//
- const GRState *AssumeAux(const GRState *state, Loc Cond,bool Assumption);
+ const GRState *assumeAux(const GRState *state, Loc Cond,bool Assumption);
- const GRState *AssumeAux(const GRState *state, NonLoc Cond, bool Assumption);
+ const GRState *assumeAux(const GRState *state, NonLoc Cond, bool Assumption);
};
-} // end clang namespace
+} // end GR namespace
+
+} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 782cd4f..9a46bd6 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -1,4 +1,4 @@
-// SimpleSValuator.cpp - A basic SValuator ------------------------*- C++ -*--//
+// SimpleSValBuilder.cpp - A basic SValBuilder -----------------------*- C++ -*-
//
// The LLVM Compiler Infrastructure
//
@@ -7,35 +7,38 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines SimpleSValuator, a basic implementation of SValuator.
+// This file defines SimpleSValBuilder, a basic implementation of SValBuilder.
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/SValuator.h"
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
using namespace clang;
+using namespace ento;
namespace {
-class SimpleSValuator : public SValuator {
+class SimpleSValBuilder : public SValBuilder {
protected:
- virtual SVal EvalCastNL(NonLoc val, QualType castTy);
- virtual SVal EvalCastL(Loc val, QualType castTy);
+ virtual SVal evalCastNL(NonLoc val, QualType castTy);
+ virtual SVal evalCastL(Loc val, QualType castTy);
public:
- SimpleSValuator(ValueManager &valMgr) : SValuator(valMgr) {}
- virtual ~SimpleSValuator() {}
-
- virtual SVal EvalMinus(NonLoc val);
- virtual SVal EvalComplement(NonLoc val);
- virtual SVal EvalBinOpNN(const GRState *state, BinaryOperator::Opcode op,
+ SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
+ GRStateManager &stateMgr)
+ : SValBuilder(alloc, context, stateMgr) {}
+ virtual ~SimpleSValBuilder() {}
+
+ virtual SVal evalMinus(NonLoc val);
+ virtual SVal evalComplement(NonLoc val);
+ virtual SVal evalBinOpNN(const GRState *state, BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs, QualType resultTy);
- virtual SVal EvalBinOpLL(const GRState *state, BinaryOperator::Opcode op,
+ virtual SVal evalBinOpLL(const GRState *state, BinaryOperator::Opcode op,
Loc lhs, Loc rhs, QualType resultTy);
- virtual SVal EvalBinOpLN(const GRState *state, BinaryOperator::Opcode op,
+ virtual SVal evalBinOpLN(const GRState *state, BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy);
- /// getKnownValue - Evaluates a given SVal. If the SVal has only one possible
+ /// getKnownValue - evaluates a given SVal. If the SVal has only one possible
/// (integer) value, that value is returned. Otherwise, returns NULL.
virtual const llvm::APSInt *getKnownValue(const GRState *state, SVal V);
@@ -44,35 +47,34 @@ public:
};
} // end anonymous namespace
-SValuator *clang::CreateSimpleSValuator(ValueManager &valMgr) {
- return new SimpleSValuator(valMgr);
+SValBuilder *ento::createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
+ ASTContext &context,
+ GRStateManager &stateMgr) {
+ return new SimpleSValBuilder(alloc, context, stateMgr);
}
//===----------------------------------------------------------------------===//
// Transfer function for Casts.
//===----------------------------------------------------------------------===//
-SVal SimpleSValuator::EvalCastNL(NonLoc val, QualType castTy) {
+SVal SimpleSValBuilder::evalCastNL(NonLoc val, QualType castTy) {
- bool isLocType = Loc::IsLocType(castTy);
+ bool isLocType = Loc::isLocType(castTy);
if (nonloc::LocAsInteger *LI = dyn_cast<nonloc::LocAsInteger>(&val)) {
if (isLocType)
return LI->getLoc();
// FIXME: Correctly support promotions/truncations.
- ASTContext &Ctx = ValMgr.getContext();
- unsigned castSize = Ctx.getTypeSize(castTy);
+ unsigned castSize = Context.getTypeSize(castTy);
if (castSize == LI->getNumBits())
return val;
-
- return ValMgr.makeLocAsInteger(LI->getLoc(), castSize);
+ return makeLocAsInteger(LI->getLoc(), castSize);
}
if (const SymExpr *se = val.getAsSymbolicExpression()) {
- ASTContext &Ctx = ValMgr.getContext();
- QualType T = Ctx.getCanonicalType(se->getType(Ctx));
- if (T == Ctx.getCanonicalType(castTy))
+ QualType T = Context.getCanonicalType(se->getType(Context));
+ if (T == Context.getCanonicalType(castTy))
return val;
// FIXME: Remove this hack when we support symbolic truncation/extension.
@@ -95,16 +97,16 @@ SVal SimpleSValuator::EvalCastNL(NonLoc val, QualType castTy) {
return UnknownVal();
llvm::APSInt i = cast<nonloc::ConcreteInt>(val).getValue();
- i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
- i.extOrTrunc(ValMgr.getContext().getTypeSize(castTy));
+ i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::isLocType(castTy));
+ i = i.extOrTrunc(Context.getTypeSize(castTy));
if (isLocType)
- return ValMgr.makeIntLocVal(i);
+ return makeIntLocVal(i);
else
- return ValMgr.makeIntVal(i);
+ return makeIntVal(i);
}
-SVal SimpleSValuator::EvalCastL(Loc val, QualType castTy) {
+SVal SimpleSValBuilder::evalCastL(Loc val, QualType castTy) {
// Casts from pointers -> pointers, just return the lval.
//
@@ -112,7 +114,7 @@ SVal SimpleSValuator::EvalCastL(Loc val, QualType castTy) {
// can be introduced by the frontend for corner cases, e.g
// casting from va_list* to __builtin_va_list&.
//
- if (Loc::IsLocType(castTy) || castTy->isReferenceType())
+ if (Loc::isLocType(castTy) || castTy->isReferenceType())
return val;
// FIXME: Handle transparent unions where a value can be "transparently"
@@ -121,15 +123,15 @@ SVal SimpleSValuator::EvalCastL(Loc val, QualType castTy) {
return UnknownVal();
if (castTy->isIntegerType()) {
- unsigned BitWidth = ValMgr.getContext().getTypeSize(castTy);
+ unsigned BitWidth = Context.getTypeSize(castTy);
if (!isa<loc::ConcreteInt>(val))
- return ValMgr.makeLocAsInteger(val, BitWidth);
+ return makeLocAsInteger(val, BitWidth);
llvm::APSInt i = cast<loc::ConcreteInt>(val).getValue();
- i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::IsLocType(castTy));
- i.extOrTrunc(BitWidth);
- return ValMgr.makeIntVal(i);
+ i.setIsUnsigned(castTy->isUnsignedIntegerType() || Loc::isLocType(castTy));
+ i = i.extOrTrunc(BitWidth);
+ return makeIntVal(i);
}
// All other cases: return 'UnknownVal'. This includes casting pointers
@@ -142,19 +144,19 @@ SVal SimpleSValuator::EvalCastL(Loc val, QualType castTy) {
// Transfer function for unary operators.
//===----------------------------------------------------------------------===//
-SVal SimpleSValuator::EvalMinus(NonLoc val) {
+SVal SimpleSValBuilder::evalMinus(NonLoc val) {
switch (val.getSubKind()) {
case nonloc::ConcreteIntKind:
- return cast<nonloc::ConcreteInt>(val).evalMinus(ValMgr);
+ return cast<nonloc::ConcreteInt>(val).evalMinus(*this);
default:
return UnknownVal();
}
}
-SVal SimpleSValuator::EvalComplement(NonLoc X) {
+SVal SimpleSValBuilder::evalComplement(NonLoc X) {
switch (X.getSubKind()) {
case nonloc::ConcreteIntKind:
- return cast<nonloc::ConcreteInt>(X).evalComplement(ValMgr);
+ return cast<nonloc::ConcreteInt>(X).evalComplement(*this);
default:
return UnknownVal();
}
@@ -191,7 +193,7 @@ static BinaryOperator::Opcode ReverseComparison(BinaryOperator::Opcode op) {
}
}
-SVal SimpleSValuator::MakeSymIntVal(const SymExpr *LHS,
+SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
BinaryOperator::Opcode op,
const llvm::APSInt &RHS,
QualType resultTy) {
@@ -205,7 +207,7 @@ SVal SimpleSValuator::MakeSymIntVal(const SymExpr *LHS,
case BO_Mul:
// a*0 and a*1
if (RHS == 0)
- return ValMgr.makeIntVal(0, resultTy);
+ return makeIntVal(0, resultTy);
else if (RHS == 1)
isIdempotent = true;
break;
@@ -223,7 +225,7 @@ SVal SimpleSValuator::MakeSymIntVal(const SymExpr *LHS,
// This is also handled elsewhere.
return UndefinedVal();
else if (RHS == 1)
- return ValMgr.makeIntVal(0, resultTy);
+ return makeIntVal(0, resultTy);
break;
case BO_Add:
case BO_Sub:
@@ -237,7 +239,7 @@ SVal SimpleSValuator::MakeSymIntVal(const SymExpr *LHS,
case BO_And:
// a&0 and a&(~0)
if (RHS == 0)
- return ValMgr.makeIntVal(0, resultTy);
+ return makeIntVal(0, resultTy);
else if (RHS.isAllOnesValue())
isIdempotent = true;
break;
@@ -246,27 +248,26 @@ SVal SimpleSValuator::MakeSymIntVal(const SymExpr *LHS,
if (RHS == 0)
isIdempotent = true;
else if (RHS.isAllOnesValue()) {
- BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
- const llvm::APSInt &Result = BVF.Convert(resultTy, RHS);
+ const llvm::APSInt &Result = BasicVals.Convert(resultTy, RHS);
return nonloc::ConcreteInt(Result);
}
break;
}
// Idempotent ops (like a*1) can still change the type of an expression.
- // Wrap the LHS up in a NonLoc again and let EvalCastNL do the dirty work.
+ // Wrap the LHS up in a NonLoc again and let evalCastNL do the dirty work.
if (isIdempotent) {
if (SymbolRef LHSSym = dyn_cast<SymbolData>(LHS))
- return EvalCastNL(nonloc::SymbolVal(LHSSym), resultTy);
- return EvalCastNL(nonloc::SymExprVal(LHS), resultTy);
+ return evalCastNL(nonloc::SymbolVal(LHSSym), resultTy);
+ return evalCastNL(nonloc::SymExprVal(LHS), resultTy);
}
// If we reach this point, the expression cannot be simplified.
// Make a SymExprVal for the entire thing.
- return ValMgr.makeNonLoc(LHS, op, RHS, resultTy);
+ return makeNonLoc(LHS, op, RHS, resultTy);
}
-SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
+SVal SimpleSValBuilder::evalBinOpNN(const GRState *state,
BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs,
QualType resultTy) {
@@ -278,17 +279,17 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
case BO_EQ:
case BO_LE:
case BO_GE:
- return ValMgr.makeTruthVal(true, resultTy);
+ return makeTruthVal(true, resultTy);
case BO_LT:
case BO_GT:
case BO_NE:
- return ValMgr.makeTruthVal(false, resultTy);
+ return makeTruthVal(false, resultTy);
case BO_Xor:
case BO_Sub:
- return ValMgr.makeIntVal(0, resultTy);
+ return makeIntVal(0, resultTy);
case BO_Or:
case BO_And:
- return EvalCastNL(lhs, resultTy);
+ return evalCastNL(lhs, resultTy);
}
while (1) {
@@ -299,23 +300,22 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
switch (rhs.getSubKind()) {
case nonloc::LocAsIntegerKind:
- return EvalBinOpLL(state, op, lhsL,
+ return evalBinOpLL(state, op, lhsL,
cast<nonloc::LocAsInteger>(rhs).getLoc(),
resultTy);
case nonloc::ConcreteIntKind: {
// Transform the integer into a location and compare.
- ASTContext& Ctx = ValMgr.getContext();
llvm::APSInt i = cast<nonloc::ConcreteInt>(rhs).getValue();
i.setIsUnsigned(true);
- i.extOrTrunc(Ctx.getTypeSize(Ctx.VoidPtrTy));
- return EvalBinOpLL(state, op, lhsL, ValMgr.makeLoc(i), resultTy);
+ i = i.extOrTrunc(Context.getTypeSize(Context.VoidPtrTy));
+ return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy);
}
default:
switch (op) {
case BO_EQ:
- return ValMgr.makeTruthVal(false, resultTy);
+ return makeTruthVal(false, resultTy);
case BO_NE:
- return ValMgr.makeTruthVal(true, resultTy);
+ return makeTruthVal(true, resultTy);
default:
// This case also handles pointer arithmetic.
return UnknownVal();
@@ -358,7 +358,7 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
case BO_XorAssign:
case BO_OrAssign:
case BO_Comma:
- assert(false && "'=' and ',' operators handled by GRExprEngine.");
+ assert(false && "'=' and ',' operators handled by ExprEngine.");
return UnknownVal();
case BO_PtrMemD:
case BO_PtrMemI:
@@ -372,8 +372,8 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
case BO_NE:
// Negate the comparison and make a value.
opc = NegateComparison(opc);
- assert(symIntExpr->getType(ValMgr.getContext()) == resultTy);
- return ValMgr.makeNonLoc(symIntExpr->getLHS(), opc,
+ assert(symIntExpr->getType(Context) == resultTy);
+ return makeNonLoc(symIntExpr->getLHS(), opc,
symIntExpr->getRHS(), resultTy);
}
}
@@ -388,23 +388,20 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
if (BinaryOperator::isAdditiveOp(op)) {
BinaryOperator::Opcode lop = symIntExpr->getOpcode();
if (BinaryOperator::isAdditiveOp(lop)) {
- BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
-
// resultTy may not be the best type to convert to, but it's
// probably the best choice in expressions with mixed type
// (such as x+1U+2LL). The rules for implicit conversions should
// choose a reasonable type to preserve the expression, and will
// at least match how the value is going to be used.
const llvm::APSInt &first =
- BVF.Convert(resultTy, symIntExpr->getRHS());
+ BasicVals.Convert(resultTy, symIntExpr->getRHS());
const llvm::APSInt &second =
- BVF.Convert(resultTy, rhsInt->getValue());
-
+ BasicVals.Convert(resultTy, rhsInt->getValue());
const llvm::APSInt *newRHS;
if (lop == op)
- newRHS = BVF.EvaluateAPSInt(BO_Add, first, second);
+ newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
else
- newRHS = BVF.EvaluateAPSInt(BO_Sub, first, second);
+ newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
return MakeSymIntVal(symIntExpr->getLHS(), lop, *newRHS, resultTy);
}
}
@@ -416,7 +413,7 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
if (isa<nonloc::ConcreteInt>(rhs)) {
- return lhsInt.evalBinOp(ValMgr, op, cast<nonloc::ConcreteInt>(rhs));
+ return lhsInt.evalBinOp(*this, op, cast<nonloc::ConcreteInt>(rhs));
} else {
const llvm::APSInt& lhsValue = lhsInt.getValue();
@@ -461,17 +458,13 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
case nonloc::SymbolValKind: {
nonloc::SymbolVal *slhs = cast<nonloc::SymbolVal>(&lhs);
SymbolRef Sym = slhs->getSymbol();
-
- ASTContext& Ctx = ValMgr.getContext();
-
// Does the symbol simplify to a constant? If so, "fold" the constant
// by setting 'lhs' to a ConcreteInt and try again.
- if (Sym->getType(Ctx)->isIntegerType())
+ if (Sym->getType(Context)->isIntegerType())
if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
// The symbol evaluates to a constant. If necessary, promote the
// folded constant (LHS) to the result type.
- BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
- const llvm::APSInt &lhs_I = BVF.Convert(resultTy, *Constant);
+ const llvm::APSInt &lhs_I = BasicVals.Convert(resultTy, *Constant);
lhs = nonloc::ConcreteInt(lhs_I);
// Also promote the RHS (if necessary).
@@ -483,7 +476,8 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
// Other operators: do an implicit conversion. This shouldn't be
// necessary once we support truncation/extension of symbolic values.
if (nonloc::ConcreteInt *rhs_I = dyn_cast<nonloc::ConcreteInt>(&rhs)){
- rhs = nonloc::ConcreteInt(BVF.Convert(resultTy, rhs_I->getValue()));
+ rhs = nonloc::ConcreteInt(BasicVals.Convert(resultTy,
+ rhs_I->getValue()));
}
continue;
@@ -492,11 +486,10 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
// Is the RHS a symbol we can simplify?
if (const nonloc::SymbolVal *srhs = dyn_cast<nonloc::SymbolVal>(&rhs)) {
SymbolRef RSym = srhs->getSymbol();
- if (RSym->getType(Ctx)->isIntegerType()) {
+ if (RSym->getType(Context)->isIntegerType()) {
if (const llvm::APSInt *Constant = state->getSymVal(RSym)) {
// The symbol evaluates to a constant.
- BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
- const llvm::APSInt &rhs_I = BVF.Convert(resultTy, *Constant);
+ const llvm::APSInt &rhs_I = BasicVals.Convert(resultTy, *Constant);
rhs = nonloc::ConcreteInt(rhs_I);
}
}
@@ -515,13 +508,13 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
}
// FIXME: all this logic will change if/when we have MemRegion::getLocation().
-SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
+SVal SimpleSValBuilder::evalBinOpLL(const GRState *state,
BinaryOperator::Opcode op,
Loc lhs, Loc rhs,
QualType resultTy) {
// Only comparisons and subtractions are valid operations on two pointers.
// See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15].
- // However, if a pointer is casted to an integer, EvalBinOpNN may end up
+ // However, if a pointer is casted to an integer, evalBinOpNN may end up
// calling this function with another operation (PR7527). We don't attempt to
// model this for now, but it could be useful, particularly when the
// "location" is actually an integer value that's been passed through a void*.
@@ -535,15 +528,15 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
assert(false && "Unimplemented operation for two identical values");
return UnknownVal();
case BO_Sub:
- return ValMgr.makeZeroVal(resultTy);
+ return makeZeroVal(resultTy);
case BO_EQ:
case BO_LE:
case BO_GE:
- return ValMgr.makeTruthVal(true, resultTy);
+ return makeTruthVal(true, resultTy);
case BO_NE:
case BO_LT:
case BO_GT:
- return ValMgr.makeTruthVal(false, resultTy);
+ return makeTruthVal(false, resultTy);
}
}
@@ -559,15 +552,15 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
default:
break;
case BO_Sub:
- return EvalCastL(lhs, resultTy);
+ return evalCastL(lhs, resultTy);
case BO_EQ:
case BO_LE:
case BO_LT:
- return ValMgr.makeTruthVal(false, resultTy);
+ return makeTruthVal(false, resultTy);
case BO_NE:
case BO_GT:
case BO_GE:
- return ValMgr.makeTruthVal(true, resultTy);
+ return makeTruthVal(true, resultTy);
}
}
// There may be two labels for the same location, and a function region may
@@ -587,15 +580,15 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
return UnknownVal();
const llvm::APSInt &lVal = cast<loc::ConcreteInt>(lhs).getValue();
- return ValMgr.makeNonLoc(rSym, ReverseComparison(op), lVal, resultTy);
+ return makeNonLoc(rSym, ReverseComparison(op), lVal, resultTy);
}
// If both operands are constants, just perform the operation.
if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) {
- BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
- SVal ResultVal = cast<loc::ConcreteInt>(lhs).EvalBinOp(BVF, op, *rInt);
+ SVal ResultVal = cast<loc::ConcreteInt>(lhs).evalBinOp(BasicVals, op,
+ *rInt);
if (Loc *Result = dyn_cast<Loc>(&ResultVal))
- return EvalCastL(*Result, resultTy);
+ return evalCastL(*Result, resultTy);
else
return UnknownVal();
}
@@ -612,11 +605,11 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
case BO_EQ:
case BO_GT:
case BO_GE:
- return ValMgr.makeTruthVal(false, resultTy);
+ return makeTruthVal(false, resultTy);
case BO_NE:
case BO_LT:
case BO_LE:
- return ValMgr.makeTruthVal(true, resultTy);
+ return makeTruthVal(true, resultTy);
}
}
@@ -640,15 +633,15 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
default:
break;
case BO_Sub:
- return EvalCastL(lhs, resultTy);
+ return evalCastL(lhs, resultTy);
case BO_EQ:
case BO_LT:
case BO_LE:
- return ValMgr.makeTruthVal(false, resultTy);
+ return makeTruthVal(false, resultTy);
case BO_NE:
case BO_GT:
case BO_GE:
- return ValMgr.makeTruthVal(true, resultTy);
+ return makeTruthVal(true, resultTy);
}
}
@@ -676,9 +669,9 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
default:
return UnknownVal();
case BO_EQ:
- return ValMgr.makeTruthVal(false, resultTy);
+ return makeTruthVal(false, resultTy);
case BO_NE:
- return ValMgr.makeTruthVal(true, resultTy);
+ return makeTruthVal(true, resultTy);
}
}
@@ -705,7 +698,7 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
NonLoc *LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal);
if (!LeftIndex)
return UnknownVal();
- LeftIndexVal = EvalCastNL(*LeftIndex, resultTy);
+ LeftIndexVal = evalCastNL(*LeftIndex, resultTy);
LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal);
if (!LeftIndex)
return UnknownVal();
@@ -715,14 +708,14 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
NonLoc *RightIndex = dyn_cast<NonLoc>(&RightIndexVal);
if (!RightIndex)
return UnknownVal();
- RightIndexVal = EvalCastNL(*RightIndex, resultTy);
+ RightIndexVal = evalCastNL(*RightIndex, resultTy);
RightIndex = dyn_cast<NonLoc>(&RightIndexVal);
if (!RightIndex)
return UnknownVal();
// Actually perform the operation.
- // EvalBinOpNN expects the two indexes to already be the right type.
- return EvalBinOpNN(state, op, *LeftIndex, *RightIndex, resultTy);
+ // evalBinOpNN expects the two indexes to already be the right type.
+ return evalBinOpNN(state, op, *LeftIndex, *RightIndex, resultTy);
}
// If the element indexes aren't comparable, see if the raw offsets are.
@@ -731,24 +724,24 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
if (LeftOffset.getRegion() != NULL &&
LeftOffset.getRegion() == RightOffset.getRegion()) {
- int64_t left = LeftOffset.getByteOffset();
- int64_t right = RightOffset.getByteOffset();
+ CharUnits left = LeftOffset.getOffset();
+ CharUnits right = RightOffset.getOffset();
switch (op) {
default:
return UnknownVal();
case BO_LT:
- return ValMgr.makeTruthVal(left < right, resultTy);
+ return makeTruthVal(left < right, resultTy);
case BO_GT:
- return ValMgr.makeTruthVal(left > right, resultTy);
+ return makeTruthVal(left > right, resultTy);
case BO_LE:
- return ValMgr.makeTruthVal(left <= right, resultTy);
+ return makeTruthVal(left <= right, resultTy);
case BO_GE:
- return ValMgr.makeTruthVal(left >= right, resultTy);
+ return makeTruthVal(left >= right, resultTy);
case BO_EQ:
- return ValMgr.makeTruthVal(left == right, resultTy);
+ return makeTruthVal(left == right, resultTy);
case BO_NE:
- return ValMgr.makeTruthVal(left != right, resultTy);
+ return makeTruthVal(left != right, resultTy);
}
}
@@ -786,9 +779,9 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
// We know for sure that the two fields are not the same, since that
// would have given us the same SVal.
if (op == BO_EQ)
- return ValMgr.makeTruthVal(false, resultTy);
+ return makeTruthVal(false, resultTy);
if (op == BO_NE)
- return ValMgr.makeTruthVal(true, resultTy);
+ return makeTruthVal(true, resultTy);
// Iterate through the fields and see which one comes first.
// [C99 6.7.2.1.13] "Within a structure object, the non-bit-field
@@ -798,9 +791,9 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
for (RecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I!=E; ++I) {
if (*I == LeftFD)
- return ValMgr.makeTruthVal(leftFirst, resultTy);
+ return makeTruthVal(leftFirst, resultTy);
if (*I == RightFD)
- return ValMgr.makeTruthVal(!leftFirst, resultTy);
+ return makeTruthVal(!leftFirst, resultTy);
}
assert(false && "Fields not found in parent record's definition");
@@ -812,9 +805,14 @@ SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
}
}
-SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
+SVal SimpleSValBuilder::evalBinOpLN(const GRState *state,
BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy) {
+
+ // Special case: rhs is a zero constant.
+ if (rhs.isZeroConstant())
+ return lhs;
+
// Special case: 'rhs' is an integer that has the same width as a pointer and
// we are using the integer location in a comparison. Normally this cannot be
// triggered, but transfer functions like those for OSCommpareAndSwapBarrier32
@@ -823,13 +821,13 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
if (BinaryOperator::isComparisonOp(op)) {
if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) {
const llvm::APSInt *x = &rhsInt->getValue();
- ASTContext &ctx = ValMgr.getContext();
+ ASTContext &ctx = Context;
if (ctx.getTypeSize(ctx.VoidPtrTy) == x->getBitWidth()) {
// Convert the signedness of the integer (if necessary).
if (x->isSigned())
- x = &ValMgr.getBasicValueFactory().getValue(*x, true);
+ x = &getBasicValueFactory().getValue(*x, true);
- return EvalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy);
+ return evalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy);
}
}
}
@@ -845,7 +843,7 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
// Convert the bitwidth of rightI. This should deal with overflow
// since we are dealing with concrete values.
- rightI.extOrTrunc(leftI.getBitWidth());
+ rightI = rightI.extOrTrunc(leftI.getBitWidth());
// Offset the increment by the pointer size.
llvm::APSInt Multiplicand(rightI.getBitWidth(), /* isUnsigned */ true);
@@ -862,17 +860,45 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
default:
llvm_unreachable("Invalid pointer arithmetic operation");
}
- return loc::ConcreteInt(ValMgr.getBasicValueFactory().getValue(rightI));
+ return loc::ConcreteInt(getBasicValueFactory().getValue(rightI));
}
}
-
- // Delegate remaining pointer arithmetic to the StoreManager.
- return state->getStateManager().getStoreManager().EvalBinOp(op, lhs,
- rhs, resultTy);
+ // Handle cases where 'lhs' is a region.
+ if (const MemRegion *region = lhs.getAsRegion()) {
+ rhs = cast<NonLoc>(convertToArrayIndex(rhs));
+ SVal index = UnknownVal();
+ const MemRegion *superR = 0;
+ QualType elementType;
+
+ if (const ElementRegion *elemReg = dyn_cast<ElementRegion>(region)) {
+ index = evalBinOpNN(state, BO_Add, elemReg->getIndex(), rhs,
+ getArrayIndexType());
+ superR = elemReg->getSuperRegion();
+ elementType = elemReg->getElementType();
+ }
+ else if (isa<SubRegion>(region)) {
+ superR = region;
+ index = rhs;
+ if (const PointerType *PT = resultTy->getAs<PointerType>()) {
+ elementType = PT->getPointeeType();
+ }
+ else {
+ const ObjCObjectPointerType *OT =
+ resultTy->getAs<ObjCObjectPointerType>();
+ elementType = OT->getPointeeType();
+ }
+ }
+
+ if (NonLoc *indexV = dyn_cast<NonLoc>(&index)) {
+ return loc::MemRegionVal(MemMgr.getElementRegion(elementType, *indexV,
+ superR, getContext()));
+ }
+ }
+ return UnknownVal();
}
-const llvm::APSInt *SimpleSValuator::getKnownValue(const GRState *state,
+const llvm::APSInt *SimpleSValBuilder::getKnownValue(const GRState *state,
SVal V) {
if (V.isUnknownOrUndef())
return NULL;
diff --git a/contrib/llvm/tools/clang/lib/Checker/Store.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
index 1cb5cd7..7225170 100644
--- a/contrib/llvm/tools/clang/lib/Checker/Store.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -11,25 +11,26 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/Store.h"
-#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h"
#include "clang/AST/CharUnits.h"
using namespace clang;
+using namespace ento;
StoreManager::StoreManager(GRStateManager &stateMgr)
- : ValMgr(stateMgr.getValueManager()), StateMgr(stateMgr),
- MRMgr(ValMgr.getRegionManager()), Ctx(stateMgr.getContext()) {}
+ : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
+ MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
-Store StoreManager::EnterStackFrame(const GRState *state,
- const StackFrameContext *frame) {
- return state->getStore();
+StoreRef StoreManager::enterStackFrame(const GRState *state,
+ const StackFrameContext *frame) {
+ return StoreRef(state->getStore(), *this);
}
const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base,
QualType EleTy, uint64_t index) {
- SVal idx = ValMgr.makeArrayIndex(index);
- return MRMgr.getElementRegion(EleTy, idx, Base, ValMgr.getContext());
+ NonLoc idx = svalBuilder.makeArrayIndex(index);
+ return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext());
}
// FIXME: Merge with the implementation of the same method in MemRegion.cpp
@@ -43,14 +44,18 @@ static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
return true;
}
+StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) {
+ return StoreRef(store, *this);
+}
+
const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R,
QualType T) {
- SVal idx = ValMgr.makeZeroArrayIndex();
+ NonLoc idx = svalBuilder.makeZeroArrayIndex();
assert(!T.isNull());
return MRMgr.getElementRegion(T, idx, R, Ctx);
}
-const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy) {
+const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) {
ASTContext& Ctx = StateMgr.getContext();
@@ -73,7 +78,7 @@ const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy)
// Now assume we are casting from pointer to pointer. Other cases should
// already be handled.
- QualType PointeeTy = CastToTy->getAs<PointerType>()->getPointeeType();
+ QualType PointeeTy = CastToTy->getPointeeType();
QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
// Handle casts to void*. We just pass the region through.
@@ -113,7 +118,8 @@ const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy)
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
case MemRegion::VarRegionKind:
- case MemRegion::CXXObjectRegionKind:
+ case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXBaseObjectRegionKind:
return MakeElementRegion(R, PointeeTy);
case MemRegion::ElementRegionKind: {
@@ -145,7 +151,7 @@ const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy)
if (!baseR)
return NULL;
- CharUnits off = CharUnits::fromQuantity(rawOff.getByteOffset());
+ CharUnits off = rawOff.getOffset();
if (off.isZero()) {
// Edge case: we are at 0 bytes off the beginning of baseR. We
@@ -211,7 +217,7 @@ SVal StoreManager::CastRetrievedVal(SVal V, const TypedRegion *R,
if (castTy.isNull())
return V;
- ASTContext &Ctx = ValMgr.getContext();
+ ASTContext &Ctx = svalBuilder.getContext();
if (performTestOnly) {
// Automatically translate references to pointers.
@@ -219,14 +225,14 @@ SVal StoreManager::CastRetrievedVal(SVal V, const TypedRegion *R,
if (const ReferenceType *RT = T->getAs<ReferenceType>())
T = Ctx.getPointerType(RT->getPointeeType());
- assert(ValMgr.getContext().hasSameUnqualifiedType(castTy, T));
+ assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T));
return V;
}
if (const Loc *L = dyn_cast<Loc>(&V))
- return ValMgr.getSValuator().EvalCastL(*L, castTy);
+ return svalBuilder.evalCastL(*L, castTy);
else if (const NonLoc *NL = dyn_cast<NonLoc>(&V))
- return ValMgr.getSValuator().EvalCastNL(*NL, castTy);
+ return svalBuilder.evalCastNL(*NL, castTy);
return V;
}
@@ -267,7 +273,7 @@ SVal StoreManager::getLValueFieldOrIvar(const Decl* D, SVal Base) {
return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
}
-SVal StoreManager::getLValueElement(QualType elementType, SVal Offset,
+SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
SVal Base) {
// If the base is an unknown or undefined value, just return it back.
@@ -283,7 +289,7 @@ SVal StoreManager::getLValueElement(QualType elementType, SVal Offset,
const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion);
// Convert the offset to the appropriate size and signedness.
- Offset = ValMgr.convertToArrayIndex(Offset);
+ Offset = cast<NonLoc>(svalBuilder.convertToArrayIndex(Offset));
if (!ElemR) {
//
@@ -308,7 +314,7 @@ SVal StoreManager::getLValueElement(QualType elementType, SVal Offset,
// Only allow non-integer offsets if the base region has no offset itself.
// FIXME: This is a somewhat arbitrary restriction. We should be using
- // SValuator here to add the two offsets without checking their types.
+ // SValBuilder here to add the two offsets without checking their types.
if (!isa<nonloc::ConcreteInt>(Offset)) {
if (isa<ElementRegion>(BaseRegion->StripCasts()))
return UnknownVal();
@@ -322,8 +328,8 @@ SVal StoreManager::getLValueElement(QualType elementType, SVal Offset,
assert(BaseIdxI.isSigned());
// Compute the new index.
- SVal NewIdx = nonloc::ConcreteInt(
- ValMgr.getBasicValueFactory().getValue(BaseIdxI + OffI));
+ nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI +
+ OffI));
// Construct the new ElementRegion.
const MemRegion *ArrayR = ElemR->getSuperRegion();
diff --git a/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 3b1bb6d..c1ca1cf 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -8,16 +8,17 @@
//===----------------------------------------------------------------------===//
//
// This file defines SymbolManager, a class that manages symbolic values
-// created for use by GRExprEngine and related classes.
+// created for use by ExprEngine and related classes.
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
-#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
+using namespace ento;
void SymExpr::dump() const {
dumpToStream(llvm::errs());
@@ -232,13 +233,15 @@ QualType SymbolRegionValue::getType(ASTContext& C) const {
SymbolManager::~SymbolManager() {}
bool SymbolManager::canSymbolicate(QualType T) {
- if (Loc::IsLocType(T))
+ T = T.getCanonicalType();
+
+ if (Loc::isLocType(T))
return true;
if (T->isIntegerType())
return T->isScalarType();
- if (T->isRecordType())
+ if (T->isRecordType() && !T->isUnionType())
return true;
return false;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
new file mode 100644
index 0000000..230b6a10
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
@@ -0,0 +1,70 @@
+//===--- TextPathDiagnostics.cpp - Text Diagnostics for Paths ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TextPathDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathDiagnosticClients.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace ento;
+using namespace llvm;
+
+namespace {
+
+/// \brief Simple path diagnostic client used for outputting as diagnostic notes
+/// the sequence of events.
+class TextPathDiagnostics : public PathDiagnosticClient {
+ const std::string OutputFile;
+ Diagnostic &Diag;
+
+public:
+ TextPathDiagnostics(const std::string& output, Diagnostic &diag)
+ : OutputFile(output), Diag(diag) {}
+
+ void HandlePathDiagnostic(const PathDiagnostic* D);
+
+ void FlushDiagnostics(llvm::SmallVectorImpl<std::string> *FilesMade) { }
+
+ virtual llvm::StringRef getName() const {
+ return "TextPathDiagnostics";
+ }
+
+ PathGenerationScheme getGenerationScheme() const { return Minimal; }
+ bool supportsLogicalOpControlFlow() const { return true; }
+ bool supportsAllBlockEdges() const { return true; }
+ virtual bool useVerboseDescription() const { return true; }
+};
+
+} // end anonymous namespace
+
+PathDiagnosticClient*
+ento::createTextPathDiagnosticClient(const std::string& out,
+ const Preprocessor &PP) {
+ return new TextPathDiagnostics(out, PP.getDiagnostics());
+}
+
+void TextPathDiagnostics::HandlePathDiagnostic(const PathDiagnostic* D) {
+ if (!D)
+ return;
+
+ if (D->empty()) {
+ delete D;
+ return;
+ }
+
+ for (PathDiagnostic::const_iterator I=D->begin(), E=D->end(); I != E; ++I) {
+ unsigned diagID = Diag.getDiagnosticIDs()->getCustomDiagID(
+ DiagnosticIDs::Note, I->getString());
+ Diag.Report(I->getLocation().asLocation(), diagID);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/AnalysisConsumer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index ad5ccb50..dbfebcc 100644
--- a/contrib/llvm/tools/clang/lib/Checker/AnalysisConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/AnalysisConsumer.h"
+#include "AnalysisConsumer.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -20,26 +20,33 @@
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/Analyses/UninitializedValues.h"
#include "clang/Analysis/CFG.h"
-#include "clang/Checker/Checkers/LocalCheckers.h"
-#include "clang/Checker/ManagerRegistry.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
-#include "clang/Checker/PathSensitive/AnalysisManager.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
-#include "clang/Checker/PathDiagnosticClients.h"
-#include "GRExprEngineExperimentalChecks.h"
-#include "GRExprEngineInternalChecks.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TransferFuncs.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticClients.h"
+
+// FIXME: Restructure checker registration.
+#include "../Checkers/ClangSACheckers.h"
+#include "../Checkers/ExperimentalChecks.h"
+#include "../Checkers/InternalChecks.h"
+#include "../Checkers/BasicObjCFoundationChecks.h"
+
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/AnalyzerOptions.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Path.h"
-#include "llvm/System/Program.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
#include "llvm/ADT/OwningPtr.h"
using namespace clang;
+using namespace ento;
static ExplodedNode::Auditor* CreateUbiViz();
@@ -48,11 +55,11 @@ static ExplodedNode::Auditor* CreateUbiViz();
//===----------------------------------------------------------------------===//
static PathDiagnosticClient*
-CreatePlistHTMLDiagnosticClient(const std::string& prefix,
+createPlistHTMLDiagnosticClient(const std::string& prefix,
const Preprocessor &PP) {
- llvm::sys::Path F(prefix);
- PathDiagnosticClient *PD = CreateHTMLDiagnosticClient(F.getDirname(), PP);
- return CreatePlistDiagnosticClient(prefix, PP, PD);
+ PathDiagnosticClient *PD =
+ createHTMLDiagnosticClient(llvm::sys::path::parent_path(prefix), PP);
+ return createPlistDiagnosticClient(prefix, PP, PD);
}
//===----------------------------------------------------------------------===//
@@ -75,7 +82,6 @@ private:
Actions ObjCMethodActions;
Actions ObjCImplementationActions;
Actions CXXMethodActions;
- TUActions TranslationUnitActions; // Remove this.
public:
ASTContext* Ctx;
@@ -89,6 +95,7 @@ public:
StoreManagerCreator CreateStoreMgr;
ConstraintManagerCreator CreateConstraintMgr;
+ llvm::OwningPtr<CheckerManager> checkerMgr;
llvm::OwningPtr<AnalysisManager> Mgr;
AnalysisConsumer(const Preprocessor& pp,
@@ -108,32 +115,27 @@ public:
case PD_##NAME: PD = CREATEFN(OutDir, PP); break;
#include "clang/Frontend/Analyses.def"
}
+ } else if (Opts.AnalysisDiagOpt == PD_TEXT) {
+ // Create the text client even without a specified output file since
+ // it just uses diagnostic notes.
+ PD = createTextPathDiagnosticClient("", PP);
}
// Create the analyzer component creators.
- if (ManagerRegistry::StoreMgrCreator != 0) {
- CreateStoreMgr = ManagerRegistry::StoreMgrCreator;
- }
- else {
- switch (Opts.AnalysisStoreOpt) {
- default:
- assert(0 && "Unknown store manager.");
+ switch (Opts.AnalysisStoreOpt) {
+ default:
+ assert(0 && "Unknown store manager.");
#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN) \
- case NAME##Model: CreateStoreMgr = CREATEFN; break;
+ case NAME##Model: CreateStoreMgr = CREATEFN; break;
#include "clang/Frontend/Analyses.def"
- }
}
- if (ManagerRegistry::ConstraintMgrCreator != 0)
- CreateConstraintMgr = ManagerRegistry::ConstraintMgrCreator;
- else {
- switch (Opts.AnalysisConstraintsOpt) {
- default:
- assert(0 && "Unknown store manager.");
+ switch (Opts.AnalysisConstraintsOpt) {
+ default:
+ assert(0 && "Unknown store manager.");
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATEFN) \
- case NAME##Model: CreateConstraintMgr = CREATEFN; break;
+ case NAME##Model: CreateConstraintMgr = CREATEFN; break;
#include "clang/Frontend/Analyses.def"
- }
}
}
@@ -143,15 +145,21 @@ public:
SourceManager &SM = Mgr->getASTContext().getSourceManager();
PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
- llvm::errs() << "ANALYZE: " << Loc.getFilename();
+ if (Loc.isValid()) {
+ llvm::errs() << "ANALYZE: " << Loc.getFilename();
- if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
- const NamedDecl *ND = cast<NamedDecl>(D);
- llvm::errs() << ' ' << ND << '\n';
- }
- else if (isa<BlockDecl>(D)) {
- llvm::errs() << ' ' << "block(line:" << Loc.getLine() << ",col:"
- << Loc.getColumn() << '\n';
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ llvm::errs() << ' ' << ND << '\n';
+ }
+ else if (isa<BlockDecl>(D)) {
+ llvm::errs() << ' ' << "block(line:" << Loc.getLine() << ",col:"
+ << Loc.getColumn() << '\n';
+ }
+ else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ Selector S = MD->getSelector();
+ llvm::errs() << ' ' << S.getAsString();
+ }
}
}
@@ -161,28 +169,30 @@ public:
CXXMethodActions.push_back(action);
}
- void addTranslationUnitAction(TUAction action) {
- TranslationUnitActions.push_back(action);
- }
-
void addObjCImplementationAction(CodeAction action) {
ObjCImplementationActions.push_back(action);
}
virtual void Initialize(ASTContext &Context) {
Ctx = &Context;
+ checkerMgr.reset(registerCheckers(Opts, PP.getDiagnostics()));
Mgr.reset(new AnalysisManager(*Ctx, PP.getDiagnostics(),
PP.getLangOptions(), PD,
CreateStoreMgr, CreateConstraintMgr,
+ checkerMgr.get(),
/* Indexer */ 0,
Opts.MaxNodes, Opts.MaxLoop,
Opts.VisualizeEGDot, Opts.VisualizeEGUbi,
Opts.PurgeDead, Opts.EagerlyAssume,
Opts.TrimGraph, Opts.InlineCall,
- Opts.UnoptimizedCFG));
+ Opts.UnoptimizedCFG, Opts.CFGAddImplicitDtors,
+ Opts.CFGAddInitializers,
+ Opts.EagerlyTrimEGraph));
}
virtual void HandleTranslationUnit(ASTContext &C);
+ void HandleDeclContext(ASTContext &C, DeclContext *dc);
+
void HandleCode(Decl *D, Actions& actions);
};
} // end anonymous namespace
@@ -191,70 +201,65 @@ public:
// AnalysisConsumer implementation.
//===----------------------------------------------------------------------===//
-void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
-
- TranslationUnitDecl *TU = C.getTranslationUnitDecl();
-
- for (DeclContext::decl_iterator I = TU->decls_begin(), E = TU->decls_end();
+void AnalysisConsumer::HandleDeclContext(ASTContext &C, DeclContext *dc) {
+ BugReporter BR(*Mgr);
+ for (DeclContext::decl_iterator I = dc->decls_begin(), E = dc->decls_end();
I != E; ++I) {
Decl *D = *I;
+ checkerMgr->runCheckersOnASTDecl(D, *Mgr, BR);
switch (D->getKind()) {
- case Decl::CXXConstructor:
- case Decl::CXXDestructor:
- case Decl::CXXConversion:
- case Decl::CXXMethod:
- case Decl::Function: {
- FunctionDecl* FD = cast<FunctionDecl>(D);
-
- if (FD->isThisDeclarationADefinition()) {
- if (!Opts.AnalyzeSpecificFunction.empty() &&
- FD->getDeclName().getAsString() != Opts.AnalyzeSpecificFunction)
- break;
- DisplayFunction(FD);
- HandleCode(FD, FunctionActions);
- }
- break;
- }
-
- case Decl::ObjCMethod: {
- ObjCMethodDecl* MD = cast<ObjCMethodDecl>(D);
-
- if (MD->isThisDeclarationADefinition()) {
- if (!Opts.AnalyzeSpecificFunction.empty() &&
- Opts.AnalyzeSpecificFunction != MD->getSelector().getAsString())
- break;
- DisplayFunction(MD);
- HandleCode(MD, ObjCMethodActions);
+ case Decl::Namespace: {
+ HandleDeclContext(C, cast<NamespaceDecl>(D));
+ break;
}
- break;
- }
-
- case Decl::ObjCImplementation: {
- ObjCImplementationDecl* ID = cast<ObjCImplementationDecl>(*I);
- HandleCode(ID, ObjCImplementationActions);
-
- for (ObjCImplementationDecl::method_iterator MI = ID->meth_begin(),
- ME = ID->meth_end(); MI != ME; ++MI) {
- if ((*MI)->isThisDeclarationADefinition()) {
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function: {
+ FunctionDecl* FD = cast<FunctionDecl>(D);
+ // We skip function template definitions, as their semantics is
+ // only determined when they are instantiated.
+ if (FD->isThisDeclarationADefinition() &&
+ !FD->isDependentContext()) {
if (!Opts.AnalyzeSpecificFunction.empty() &&
- Opts.AnalyzeSpecificFunction != (*MI)->getSelector().getAsString())
+ FD->getDeclName().getAsString() != Opts.AnalyzeSpecificFunction)
break;
- HandleCode(*MI, ObjCMethodActions);
+ DisplayFunction(FD);
+ HandleCode(FD, FunctionActions);
}
+ break;
}
- break;
- }
-
- default:
- break;
+
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl* ID = cast<ObjCImplementationDecl>(*I);
+ HandleCode(ID, ObjCImplementationActions);
+
+ for (ObjCImplementationDecl::method_iterator MI = ID->meth_begin(),
+ ME = ID->meth_end(); MI != ME; ++MI) {
+ if ((*MI)->isThisDeclarationADefinition()) {
+ if (!Opts.AnalyzeSpecificFunction.empty() &&
+ Opts.AnalyzeSpecificFunction != (*MI)->getSelector().getAsString())
+ break;
+ DisplayFunction(*MI);
+ HandleCode(*MI, ObjCMethodActions);
+ }
+ }
+ break;
+ }
+
+ default:
+ break;
}
- }
+ }
+}
- for (TUActions::iterator I = TranslationUnitActions.begin(),
- E = TranslationUnitActions.end(); I != E; ++I) {
- (*I)(*this, *Mgr, *TU);
- }
+void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
+ BugReporter BR(*Mgr);
+ TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
+ HandleDeclContext(C, TU);
// Explicitly destroy the PathDiagnosticClient. This will flush its output.
// FIXME: This should be replaced with something that doesn't rely on
@@ -297,6 +302,12 @@ void AnalysisConsumer::HandleCode(Decl *D, Actions& actions) {
if (D->hasBody() && Opts.AnalyzeNestedBlocks)
FindBlocks(cast<DeclContext>(D), WL);
+ BugReporter BR(*Mgr);
+ for (llvm::SmallVectorImpl<Decl*>::iterator WI=WL.begin(), WE=WL.end();
+ WI != WE; ++WI)
+ if ((*WI)->hasBody())
+ checkerMgr->runCheckersOnASTBody(*WI, *Mgr, BR);
+
for (Actions::iterator I = actions.begin(), E = actions.end(); I != E; ++I)
for (llvm::SmallVectorImpl<Decl*>::iterator WI=WL.begin(), WE=WL.end();
WI != WE; ++WI)
@@ -307,14 +318,6 @@ void AnalysisConsumer::HandleCode(Decl *D, Actions& actions) {
// Analyses
//===----------------------------------------------------------------------===//
-static void ActionWarnDeadStores(AnalysisConsumer &C, AnalysisManager& mgr,
- Decl *D) {
- if (LiveVariables *L = mgr.getLiveVariables(D)) {
- BugReporter BR(mgr);
- CheckDeadStores(*mgr.getCFG(D), *L, mgr.getParentMap(D), BR);
- }
-}
-
static void ActionWarnUninitVals(AnalysisConsumer &C, AnalysisManager& mgr,
Decl *D) {
if (CFG* c = mgr.getCFG(D)) {
@@ -323,32 +326,33 @@ static void ActionWarnUninitVals(AnalysisConsumer &C, AnalysisManager& mgr,
}
-static void ActionGRExprEngine(AnalysisConsumer &C, AnalysisManager& mgr,
+static void ActionExprEngine(AnalysisConsumer &C, AnalysisManager& mgr,
Decl *D,
- GRTransferFuncs* tf) {
+ TransferFuncs* tf) {
- llvm::OwningPtr<GRTransferFuncs> TF(tf);
+ llvm::OwningPtr<TransferFuncs> TF(tf);
// Construct the analysis engine. We first query for the LiveVariables
// information to see if the CFG is valid.
// FIXME: Inter-procedural analysis will need to handle invalid CFGs.
if (!mgr.getLiveVariables(D))
return;
- GRExprEngine Eng(mgr, TF.take());
+ ExprEngine Eng(mgr, TF.take());
if (C.Opts.EnableExperimentalInternalChecks)
RegisterExperimentalInternalChecks(Eng);
- RegisterAppleChecks(Eng, *D);
+ RegisterNSErrorChecks(Eng.getBugReporter(), Eng, *D);
if (C.Opts.EnableExperimentalChecks)
RegisterExperimentalChecks(Eng);
- // Enable idempotent operation checking if it was explicitly turned on, or if
- // we are running experimental checks (i.e. everything)
- if (C.Opts.IdempotentOps || C.Opts.EnableExperimentalChecks
- || C.Opts.EnableExperimentalInternalChecks)
- RegisterIdempotentOperationChecker(Eng);
+ if (C.Opts.BufferOverflows)
+ RegisterArrayBoundCheckerV2(Eng);
+
+ // Enable AnalyzerStatsChecker if it was given as an argument
+ if (C.Opts.AnalyzerStats)
+ RegisterAnalyzerStatsChecker(Eng);
// Set the graph auditor.
llvm::OwningPtr<ExplodedNode::Auditor> Auditor;
@@ -375,11 +379,11 @@ static void ActionGRExprEngine(AnalysisConsumer &C, AnalysisManager& mgr,
static void ActionObjCMemCheckerAux(AnalysisConsumer &C, AnalysisManager& mgr,
Decl *D, bool GCEnabled) {
- GRTransferFuncs* TF = MakeCFRefCountTF(mgr.getASTContext(),
+ TransferFuncs* TF = MakeCFRefCountTF(mgr.getASTContext(),
GCEnabled,
mgr.getLangOptions());
- ActionGRExprEngine(C, mgr, D, TF);
+ ActionExprEngine(C, mgr, D, TF);
}
static void ActionObjCMemChecker(AnalysisConsumer &C, AnalysisManager& mgr,
@@ -403,69 +407,11 @@ static void ActionObjCMemChecker(AnalysisConsumer &C, AnalysisManager& mgr,
}
}
-static void ActionDisplayLiveVariables(AnalysisConsumer &C,
- AnalysisManager& mgr, Decl *D) {
- if (LiveVariables* L = mgr.getLiveVariables(D)) {
- L->dumpBlockLiveness(mgr.getSourceManager());
- }
-}
-
-static void ActionCFGDump(AnalysisConsumer &C, AnalysisManager& mgr, Decl *D) {
- if (CFG *cfg = mgr.getCFG(D)) {
- cfg->dump(mgr.getLangOptions());
- }
-}
-
-static void ActionCFGView(AnalysisConsumer &C, AnalysisManager& mgr, Decl *D) {
- if (CFG *cfg = mgr.getCFG(D)) {
- cfg->viewCFG(mgr.getLangOptions());
- }
-}
-
-static void ActionSecuritySyntacticChecks(AnalysisConsumer &C,
- AnalysisManager &mgr, Decl *D) {
- BugReporter BR(mgr);
- CheckSecuritySyntaxOnly(D, BR);
-}
-
-static void ActionLLVMConventionChecker(AnalysisConsumer &C,
- AnalysisManager &mgr,
- TranslationUnitDecl &TU) {
- BugReporter BR(mgr);
- CheckLLVMConventions(TU, BR);
-}
-
-static void ActionWarnObjCDealloc(AnalysisConsumer &C, AnalysisManager& mgr,
- Decl *D) {
- if (mgr.getLangOptions().getGCMode() == LangOptions::GCOnly)
- return;
- BugReporter BR(mgr);
- CheckObjCDealloc(cast<ObjCImplementationDecl>(D), mgr.getLangOptions(), BR);
-}
-
-static void ActionWarnObjCUnusedIvars(AnalysisConsumer &C, AnalysisManager& mgr,
- Decl *D) {
- BugReporter BR(mgr);
- CheckObjCUnusedIvar(cast<ObjCImplementationDecl>(D), BR);
-}
-
-static void ActionWarnObjCMethSigs(AnalysisConsumer &C, AnalysisManager& mgr,
- Decl *D) {
- BugReporter BR(mgr);
- CheckObjCInstMethSignature(cast<ObjCImplementationDecl>(D), BR);
-}
-
-static void ActionWarnSizeofPointer(AnalysisConsumer &C, AnalysisManager &mgr,
- Decl *D) {
- BugReporter BR(mgr);
- CheckSizeofPointer(D, BR);
-}
-
//===----------------------------------------------------------------------===//
// AnalysisConsumer creation.
//===----------------------------------------------------------------------===//
-ASTConsumer* clang::CreateAnalysisConsumer(const Preprocessor& pp,
+ASTConsumer* ento::CreateAnalysisConsumer(const Preprocessor& pp,
const std::string& OutDir,
const AnalyzerOptions& Opts) {
llvm::OwningPtr<AnalysisConsumer> C(new AnalysisConsumer(pp, OutDir, Opts));
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/AnalysisConsumer.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
index c236766..646fe97 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/AnalysisConsumer.h
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CHECKER_ANALYSISCONSUMER_H
-#define LLVM_CLANG_CHECKER_ANALYSISCONSUMER_H
+#ifndef LLVM_CLANG_GR_ANALYSISCONSUMER_H
+#define LLVM_CLANG_GR_ANALYSISCONSUMER_H
#include <string>
@@ -22,6 +22,10 @@ namespace clang {
class AnalyzerOptions;
class ASTConsumer;
class Preprocessor;
+class Diagnostic;
+
+namespace ento {
+class CheckerManager;
/// CreateAnalysisConsumer - Creates an ASTConsumer to run various code
/// analysis passes. (The set of analyses run is controlled by command-line
@@ -30,6 +34,8 @@ ASTConsumer* CreateAnalysisConsumer(const Preprocessor &pp,
const std::string &output,
const AnalyzerOptions& Opts);
-}
+} // end GR namespace
+
+} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CMakeLists.txt b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CMakeLists.txt
new file mode 100644
index 0000000..cd9ac1b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CMakeLists.txt
@@ -0,0 +1,20 @@
+set(LLVM_NO_RTTI 1)
+
+set(LLVM_USED_LIBS clangBasic clangLex clangAST clangFrontend clangRewrite)
+
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/../Checkers )
+
+add_clang_library(clangStaticAnalyzerFrontend
+ AnalysisConsumer.cpp
+ CheckerRegistration.cpp
+ FrontendActions.cpp
+ )
+
+add_dependencies(clangStaticAnalyzerFrontend
+ clangStaticAnalyzerCheckers
+ clangStaticAnalyzerCore
+ ClangAttrClasses
+ ClangAttrList
+ ClangDeclNodes
+ ClangStmtNode
+ )
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
new file mode 100644
index 0000000..6625729
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -0,0 +1,50 @@
+//===--- CheckerRegistration.cpp - Registration for the Analyzer Checkers -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the registration function for the analyzer checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "../Checkers/ClangSACheckerProvider.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/CheckerProvider.h"
+#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+using namespace ento;
+
+CheckerManager *ento::registerCheckers(const AnalyzerOptions &opts,
+ Diagnostic &diags) {
+ llvm::OwningPtr<CheckerManager> checkerMgr(new CheckerManager());
+
+ llvm::SmallVector<CheckerOptInfo, 8> checkerOpts;
+ for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
+ const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
+ checkerOpts.push_back(CheckerOptInfo(opt.first.c_str(), opt.second));
+ }
+
+ llvm::OwningPtr<CheckerProvider> provider(createClangSACheckerProvider());
+ provider->registerCheckers(*checkerMgr,
+ checkerOpts.data(), checkerOpts.size());
+
+ // FIXME: Load CheckerProviders from plugins.
+
+ for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
+ if (checkerOpts[i].isUnclaimed())
+ diags.Report(diag::warn_unkwown_analyzer_checker)
+ << checkerOpts[i].getName();
+ }
+
+ return checkerMgr.take();
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
index d9a54a0..a59cc68 100644
--- a/contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
@@ -7,10 +7,11 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/FrontendActions.h"
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Checker/AnalysisConsumer.h"
+#include "AnalysisConsumer.h"
using namespace clang;
+using namespace ento;
ASTConsumer *AnalysisAction::CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/Makefile b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/Makefile
new file mode 100644
index 0000000..2698120
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/Makefile
@@ -0,0 +1,19 @@
+##===- clang/lib/StaticAnalyzer/Frontend/Makefile ----------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# Starting point into the static analyzer land for the driver.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../../..
+LIBRARYNAME := clangStaticAnalyzerFrontend
+
+CPP.Flags += -I${PROJ_OBJ_DIR}/../Checkers
+
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Makefile b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Makefile
new file mode 100644
index 0000000..c166f06
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Makefile
@@ -0,0 +1,18 @@
+##===- clang/lib/StaticAnalyzer/Makefile -------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements analyses built on top of source-level CFGs.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../..
+DIRS := Checkers Frontend
+PARALLEL_DIRS := Core
+
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/README.txt b/contrib/llvm/tools/clang/lib/StaticAnalyzer/README.txt
new file mode 100644
index 0000000..1406eca
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/README.txt
@@ -0,0 +1,139 @@
+//===----------------------------------------------------------------------===//
+// Clang Static Analyzer
+//===----------------------------------------------------------------------===//
+
+= Library Structure =
+
+The analyzer library has two layers: a (low-level) static analysis
+engine (GRExprEngine.cpp and friends), and some static checkers
+(*Checker.cpp). The latter are built on top of the former via the
+Checker and CheckerVisitor interfaces (Checker.h and
+CheckerVisitor.h). The Checker interface is designed to be minimal
+and simple for checker writers, and attempts to isolate them from much
+of the gore of the internal analysis engine.
+
+= How It Works =
+
+The analyzer is inspired by several foundational research papers ([1],
+[2]). (FIXME: kremenek to add more links)
+
+In a nutshell, the analyzer is basically a source code simulator that
+traces out possible paths of execution. The state of the program
+(values of variables and expressions) is encapsulated by the state
+(GRState). A location in the program is called a program point
+(ProgramPoint), and the combination of state and program point is a
+node in an exploded graph (ExplodedGraph). The term "exploded" comes
+from exploding the control-flow edges in the control-flow graph (CFG).
+
+Conceptually the analyzer does a reachability analysis through the
+ExplodedGraph. We start at a root node, which has the entry program
+point and initial state, and then simulate transitions by analyzing
+individual expressions. The analysis of an expression can cause the
+state to change, resulting in a new node in the ExplodedGraph with an
+updated program point and an updated state. A bug is found by hitting
+a node that satisfies some "bug condition" (basically a violation of a
+checking invariant).
+
+The analyzer traces out multiple paths by reasoning about branches and
+then bifurcating the state: on the true branch the conditions of the
+branch are assumed to be true and on the false branch the conditions
+of the branch are assumed to be false. Such "assumptions" create
+constraints on the values of the program, and those constraints are
+recorded in the GRState object (and are manipulated by the
+ConstraintManager). If assuming the conditions of a branch would
+cause the constraints to be unsatisfiable, the branch is considered
+infeasible and that path is not taken. This is how we get
+path-sensitivity. We reduce exponential blow-up by caching nodes. If
+a new node with the same state and program point as an existing node
+would get generated, the path "caches out" and we simply reuse the
+existing node. Thus the ExplodedGraph is not a DAG; it can contain
+cycles as paths loop back onto each other and cache out.
+
+GRState and ExplodedNodes are basically immutable once created. Once
+one creates a GRState, you need to create a new one to get a new
+GRState. This immutability is key since the ExplodedGraph represents
+the behavior of the analyzed program from the entry point. To
+represent these efficiently, we use functional data structures (e.g.,
+ImmutableMaps) which share data between instances.
+
+Finally, individual Checkers work by also manipulating the analysis
+state. The analyzer engine talks to them via a visitor interface.
+For example, the PreVisitCallExpr() method is called by GRExprEngine
+to tell the Checker that we are about to analyze a CallExpr, and the
+checker is asked to check for any preconditions that might not be
+satisfied. The checker can do nothing, or it can generate a new
+GRState and ExplodedNode which contains updated checker state. If it
+finds a bug, it can tell the BugReporter object about the bug,
+providing it an ExplodedNode which is the last node in the path that
+triggered the problem.
+
+= Notes about C++ =
+
+Since now constructors are seen before the variable that is constructed
+in the CFG, we create a temporary object as the destination region that
+is constructed into. See ExprEngine::VisitCXXConstructExpr().
+
+In ExprEngine::processCallExit(), we always bind the object region to the
+evaluated CXXConstructExpr. Then in VisitDeclStmt(), we compute the
+corresponding lazy compound value if the variable is not a reference, and
+bind the variable region to the lazy compound value. If the variable
+is a reference, just use the object region as the initilizer value.
+
+Before entering a C++ method (or ctor/dtor), the 'this' region is bound
+to the object region. In ctors, we synthesize 'this' region with
+CXXRecordDecl*, which means we do not use type qualifiers. In methods, we
+synthesize 'this' region with CXXMethodDecl*, which has getThisType()
+taking type qualifiers into account. It does not matter we use qualified
+'this' region in one method and unqualified 'this' region in another
+method, because we only need to ensure the 'this' region is consistent
+when we synthesize it and create it directly from CXXThisExpr in a single
+method call.
+
+= Working on the Analyzer =
+
+If you are interested in bringing up support for C++ expressions, the
+best place to look is the visitation logic in GRExprEngine, which
+handles the simulation of individual expressions. There are plenty of
+examples there of how other expressions are handled.
+
+If you are interested in writing checkers, look at the Checker and
+CheckerVisitor interfaces (Checker.h and CheckerVisitor.h). Also look
+at the files named *Checker.cpp for examples on how you can implement
+these interfaces.
+
+= Debugging the Analyzer =
+
+There are some useful command-line options for debugging. For example:
+
+$ clang -cc1 -help | grep analyze
+ -analyze-function <value>
+ -analyzer-display-progress
+ -analyzer-viz-egraph-graphviz
+ ...
+
+The first allows you to specify only analyzing a specific function.
+The second prints to the console what function is being analyzed. The
+third generates a graphviz dot file of the ExplodedGraph. This is
+extremely useful when debugging the analyzer and viewing the
+simulation results.
+
+Of course, viewing the CFG (Control-Flow Graph) is also useful:
+
+$ clang -cc1 -help | grep cfg
+ -cfg-add-implicit-dtors Add C++ implicit destructors to CFGs for all analyses
+ -cfg-add-initializers Add C++ initializers to CFGs for all analyses
+ -cfg-dump Display Control-Flow Graphs
+ -cfg-view View Control-Flow Graphs using GraphViz
+ -unoptimized-cfg Generate unoptimized CFGs for all analyses
+
+-cfg-dump dumps a textual representation of the CFG to the console,
+and -cfg-view creates a GraphViz representation.
+
+= References =
+
+[1] Precise interprocedural dataflow analysis via graph reachability,
+ T Reps, S Horwitz, and M Sagiv, POPL '95,
+ http://portal.acm.org/citation.cfm?id=199462
+
+[2] A memory model for static analysis of C programs, Z Xu, T
+ Kremenek, and J Zhang, http://lcs.ios.ac.cn/~xzx/memmodel.pdf
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
index de5e8bf..7fb394f 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
@@ -24,7 +24,6 @@
#include "clang/Frontend/TextDiagnosticBuffer.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/FrontendTool/Utils.h"
-#include "llvm/LLVMContext.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
@@ -116,13 +115,12 @@ static int cc1_test(Diagnostic &Diags,
int cc1_main(const char **ArgBegin, const char **ArgEnd,
const char *Argv0, void *MainAddr) {
llvm::OwningPtr<CompilerInstance> Clang(new CompilerInstance());
-
- Clang->setLLVMContext(new llvm::LLVMContext());
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
// Run clang -cc1 test.
if (ArgBegin != ArgEnd && llvm::StringRef(ArgBegin[0]) == "-cc1test") {
- Diagnostic Diags(new TextDiagnosticPrinter(llvm::errs(),
- DiagnosticOptions()));
+ Diagnostic Diags(DiagID, new TextDiagnosticPrinter(llvm::errs(),
+ DiagnosticOptions()));
return cc1_test(Diags, ArgBegin + 1, ArgEnd);
}
@@ -134,7 +132,7 @@ int cc1_main(const char **ArgBegin, const char **ArgEnd,
// Buffer diagnostics from argument parsing so that we can output them using a
// well formed diagnostic object.
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
- Diagnostic Diags(DiagsBuffer);
+ Diagnostic Diags(DiagID, DiagsBuffer);
CompilerInvocation::CreateFromArgs(Clang->getInvocation(), ArgBegin, ArgEnd,
Diags);
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
index 5bce70c..1d544f3 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
@@ -39,12 +39,16 @@
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Host.h"
-#include "llvm/System/Path.h"
-#include "llvm/System/Signals.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/system_error.h"
#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetAsmParser.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/Target/TargetSelect.h"
@@ -97,6 +101,7 @@ struct AssemblerInvocation {
/// @{
unsigned RelaxAll : 1;
+ unsigned NoExecStack : 1;
/// @}
@@ -111,6 +116,7 @@ public:
ShowInst = 0;
ShowEncoding = 0;
RelaxAll = 0;
+ NoExecStack = 0;
}
static void CreateFromArgs(AssemblerInvocation &Res, const char **ArgBegin,
@@ -189,6 +195,7 @@ void AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
// Assemble Options
Opts.RelaxAll = Args->hasArg(OPT_relax_all);
+ Opts.NoExecStack = Args->hasArg(OPT_no_exec_stack);
}
static formatted_raw_ostream *GetOutputStream(AssemblerInvocation &Opts,
@@ -224,11 +231,13 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts, Diagnostic &Diags) {
return false;
}
- MemoryBuffer *Buffer = MemoryBuffer::getFileOrSTDIN(Opts.InputFile, &Error);
- if (Buffer == 0) {
+ OwningPtr<MemoryBuffer> BufferPtr;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(Opts.InputFile, BufferPtr)) {
+ Error = ec.message();
Diags.Report(diag::err_fe_error_reading) << Opts.InputFile;
return false;
}
+ MemoryBuffer *Buffer = BufferPtr.take();
SourceMgr SrcMgr;
@@ -242,7 +251,6 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts, Diagnostic &Diags) {
OwningPtr<MCAsmInfo> MAI(TheTarget->createAsmInfo(Opts.Triple));
assert(MAI && "Unable to create target asm info!");
- MCContext Ctx(*MAI);
bool IsBinary = Opts.OutputType == AssemblerInvocation::FT_Obj;
formatted_raw_ostream *Out = GetOutputStream(Opts, Diags, IsBinary);
if (!Out)
@@ -255,16 +263,28 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts, Diagnostic &Diags) {
return false;
}
+ const TargetAsmInfo *tai = new TargetAsmInfo(*TM);
+ MCContext Ctx(*MAI, tai);
+
OwningPtr<MCStreamer> Str;
+ const TargetLoweringObjectFile &TLOF =
+ TM->getTargetLowering()->getObjFileLowering();
+ const_cast<TargetLoweringObjectFile&>(TLOF).Initialize(Ctx, *TM);
+
+ // FIXME: There is a bit of code duplication with addPassesToEmitFile.
if (Opts.OutputType == AssemblerInvocation::FT_Asm) {
MCInstPrinter *IP =
TheTarget->createMCInstPrinter(Opts.OutputAsmVariant, *MAI);
MCCodeEmitter *CE = 0;
- if (Opts.ShowEncoding)
+ TargetAsmBackend *TAB = 0;
+ if (Opts.ShowEncoding) {
CE = TheTarget->createCodeEmitter(*TM, Ctx);
- Str.reset(createAsmStreamer(Ctx, *Out,TM->getTargetData()->isLittleEndian(),
- /*asmverbose*/true, IP, CE, Opts.ShowInst));
+ TAB = TheTarget->createAsmBackend(Opts.Triple);
+ }
+ Str.reset(TheTarget->createAsmStreamer(Ctx, *Out, /*asmverbose*/true,
+ /*useLoc*/ true, IP, CE, TAB,
+ Opts.ShowInst));
} else if (Opts.OutputType == AssemblerInvocation::FT_Null) {
Str.reset(createNullStreamer(Ctx));
} else {
@@ -273,7 +293,9 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts, Diagnostic &Diags) {
MCCodeEmitter *CE = TheTarget->createCodeEmitter(*TM, Ctx);
TargetAsmBackend *TAB = TheTarget->createAsmBackend(Opts.Triple);
Str.reset(TheTarget->createObjectStreamer(Opts.Triple, Ctx, *TAB, *Out,
- CE, Opts.RelaxAll));
+ CE, Opts.RelaxAll,
+ Opts.NoExecStack));
+ Str.get()->InitSections();
}
OwningPtr<MCAsmParser> Parser(createMCAsmParser(*TheTarget, SrcMgr, Ctx,
@@ -325,7 +347,8 @@ int cc1as_main(const char **ArgBegin, const char **ArgEnd,
TextDiagnosticPrinter *DiagClient
= new TextDiagnosticPrinter(errs(), DiagnosticOptions());
DiagClient->setPrefix("clang -cc1as");
- Diagnostic Diags(DiagClient);
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ Diagnostic Diags(DiagID, DiagClient);
// Set an error handler, so that any LLVM backend diagnostics go through our
// error handler.
@@ -366,7 +389,7 @@ int cc1as_main(const char **ArgBegin, const char **ArgEnd,
// Execute the invocation, unless there were parsing errors.
bool Success = false;
- if (!Diags.getNumErrors())
+ if (!Diags.hasErrorOccurred())
Success = ExecuteAssembler(Asm, Diags);
// If any timers were active but haven't been destroyed yet, print their
diff --git a/contrib/llvm/tools/clang/tools/driver/driver.cpp b/contrib/llvm/tools/clang/tools/driver/driver.cpp
index c058ece..0b5d2c9 100644
--- a/contrib/llvm/tools/clang/tools/driver/driver.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/driver.cpp
@@ -23,16 +23,19 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Config/config.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Host.h"
-#include "llvm/System/Path.h"
-#include "llvm/System/Program.h"
-#include "llvm/System/Signals.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/system_error.h"
+#include <cctype>
using namespace clang;
using namespace clang::driver;
@@ -181,8 +184,8 @@ static void ExpandArgsFromBuf(const char *Arg,
llvm::SmallVectorImpl<const char*> &ArgVector,
std::set<std::string> &SavedStrings) {
const char *FName = Arg + 1;
- llvm::MemoryBuffer *MemBuf = llvm::MemoryBuffer::getFile(FName);
- if (!MemBuf) {
+ llvm::OwningPtr<llvm::MemoryBuffer> MemBuf;
+ if (llvm::MemoryBuffer::getFile(FName, MemBuf)) {
ArgVector.push_back(SaveStringInSet(SavedStrings, Arg));
return;
}
@@ -233,7 +236,6 @@ static void ExpandArgsFromBuf(const char *Arg,
}
CurArg.push_back(*P);
}
- delete MemBuf;
}
static void ExpandArgv(int argc, const char **argv,
@@ -287,8 +289,9 @@ int main(int argc_, const char **argv_) {
TextDiagnosticPrinter *DiagClient
= new TextDiagnosticPrinter(llvm::errs(), DiagnosticOptions());
- DiagClient->setPrefix(Path.getBasename());
- Diagnostic Diags(DiagClient);
+ DiagClient->setPrefix(llvm::sys::path::stem(Path.str()));
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ Diagnostic Diags(DiagID, DiagClient);
#ifdef CLANG_IS_PRODUCTION
const bool IsProduction = true;
@@ -308,19 +311,22 @@ int main(int argc_, const char **argv_) {
// Attempt to find the original path used to invoke the driver, to determine
// the installed path. We do this manually, because we want to support that
// path being a symlink.
- llvm::sys::Path InstalledPath(argv[0]);
-
- // Do a PATH lookup, if there are no directory components.
- if (InstalledPath.getLast() == InstalledPath.str()) {
- llvm::sys::Path Tmp =
- llvm::sys::Program::FindProgramByName(InstalledPath.getLast());
- if (!Tmp.empty())
- InstalledPath = Tmp;
+ {
+ llvm::SmallString<128> InstalledPath(argv[0]);
+
+ // Do a PATH lookup, if there are no directory components.
+ if (llvm::sys::path::filename(InstalledPath) == InstalledPath) {
+ llvm::sys::Path Tmp = llvm::sys::Program::FindProgramByName(
+ llvm::sys::path::filename(InstalledPath.str()));
+ if (!Tmp.empty())
+ InstalledPath = Tmp.str();
+ }
+ llvm::sys::fs::make_absolute(InstalledPath);
+ InstalledPath = llvm::sys::path::parent_path(InstalledPath);
+ bool exists;
+ if (!llvm::sys::fs::exists(InstalledPath.str(), exists) && exists)
+ TheDriver.setInstalledDir(InstalledPath);
}
- InstalledPath.makeAbsolute();
- InstalledPath.eraseComponent();
- if (InstalledPath.exists())
- TheDriver.setInstalledDir(InstalledPath.str());
// Check for ".*++" or ".*++-[^-]*" to determine if we are a C++
// compiler. This matches things like "c++", "clang++", and "clang++-1.1".
@@ -330,11 +336,10 @@ int main(int argc_, const char **argv_) {
//
// We use *argv instead of argv[0] to work around a bogus g++ warning.
const char *progname = argv_[0];
- std::string ProgName(llvm::sys::Path(progname).getBasename());
+ std::string ProgName(llvm::sys::path::stem(progname));
if (llvm::StringRef(ProgName).endswith("++") ||
llvm::StringRef(ProgName).rsplit('-').first.endswith("++")) {
TheDriver.CCCIsCXX = true;
- TheDriver.CCCGenericGCCName = "g++";
}
// Handle CC_PRINT_OPTIONS and CC_PRINT_OPTIONS_FILE.
@@ -342,6 +347,11 @@ int main(int argc_, const char **argv_) {
if (TheDriver.CCPrintOptions)
TheDriver.CCPrintOptionsFilename = ::getenv("CC_PRINT_OPTIONS_FILE");
+ // Handle CC_PRINT_HEADERS and CC_PRINT_HEADERS_FILE.
+ TheDriver.CCPrintHeaders = !!::getenv("CC_PRINT_HEADERS");
+ if (TheDriver.CCPrintHeaders)
+ TheDriver.CCPrintHeadersFilename = ::getenv("CC_PRINT_HEADERS_FILE");
+
// Handle QA_OVERRIDE_GCC3_OPTIONS and CCC_ADD_ARGS, used for editing a
// command line behind the scenes.
if (const char *OverrideStr = ::getenv("QA_OVERRIDE_GCC3_OPTIONS")) {
diff --git a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp
index 03b01f6..a8de745 100644
--- a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp
@@ -221,7 +221,7 @@ typedef enum {
#define BIT_WIDTH 32
// Forward declaration.
-class FilterChooser;
+class ARMFilterChooser;
// Representation of the instruction to work on.
typedef bit_value_t insn_t[BIT_WIDTH];
@@ -240,7 +240,7 @@ typedef bit_value_t insn_t[BIT_WIDTH];
/// the Filter/FilterChooser combo does not know how to distinguish among the
/// Opcodes assigned.
///
-/// An example of a conflcit is
+/// An example of a conflict is
///
/// Conflict:
/// 111101000.00........00010000....
@@ -262,9 +262,9 @@ typedef bit_value_t insn_t[BIT_WIDTH];
/// decoder could try to decode the even/odd register numbering and assign to
/// VST4q8a or VST4q8b, but for the time being, the decoder chooses the "a"
/// version and return the Opcode since the two have the same Asm format string.
-class Filter {
+class ARMFilter {
protected:
- FilterChooser *Owner; // points to the FilterChooser who owns this filter
+ ARMFilterChooser *Owner; // points to the FilterChooser who owns this filter
unsigned StartBit; // the starting bit position
unsigned NumBits; // number of bits to filter
bool Mixed; // a mixed region contains both set and unset bits
@@ -276,7 +276,7 @@ protected:
std::vector<unsigned> VariableInstructions;
// Map of well-known segment value to its delegate.
- std::map<unsigned, FilterChooser*> FilterChooserMap;
+ std::map<unsigned, ARMFilterChooser*> FilterChooserMap;
// Number of instructions which fall under FilteredInstructions category.
unsigned NumFiltered;
@@ -296,16 +296,17 @@ public:
}
// Return the filter chooser for the group of instructions without constant
// segment values.
- FilterChooser &getVariableFC() {
+ ARMFilterChooser &getVariableFC() {
assert(NumFiltered == 1);
assert(FilterChooserMap.size() == 1);
return *(FilterChooserMap.find((unsigned)-1)->second);
}
- Filter(const Filter &f);
- Filter(FilterChooser &owner, unsigned startBit, unsigned numBits, bool mixed);
+ ARMFilter(const ARMFilter &f);
+ ARMFilter(ARMFilterChooser &owner, unsigned startBit, unsigned numBits,
+ bool mixed);
- ~Filter();
+ ~ARMFilter();
// Divides the decoding task into sub tasks and delegates them to the
// inferior FilterChooser's.
@@ -333,7 +334,7 @@ typedef enum {
ATTR_MIXED
} bitAttr_t;
-/// FilterChooser - FilterChooser chooses the best filter among a set of Filters
+/// ARMFilterChooser - FilterChooser chooses the best filter among a set of Filters
/// in order to perform the decoding of instructions at the current level.
///
/// Decoding proceeds from the top down. Based on the well-known encoding bits
@@ -348,11 +349,11 @@ typedef enum {
/// It is useful to think of a Filter as governing the switch stmts of the
/// decoding tree. And each case is delegated to an inferior FilterChooser to
/// decide what further remaining bits to look at.
-class FilterChooser {
+class ARMFilterChooser {
static TARGET_NAME_t TargetName;
protected:
- friend class Filter;
+ friend class ARMFilter;
// Vector of codegen instructions to choose our filter.
const std::vector<const CodeGenInstruction*> &AllInstructions;
@@ -361,14 +362,14 @@ protected:
const std::vector<unsigned> Opcodes;
// Vector of candidate filters.
- std::vector<Filter> Filters;
+ std::vector<ARMFilter> Filters;
// Array of bit values passed down from our parent.
// Set to all BIT_UNFILTERED's for Parent == NULL.
bit_value_t FilterBitValues[BIT_WIDTH];
// Links to the FilterChooser above us in the decoding tree.
- FilterChooser *Parent;
+ ARMFilterChooser *Parent;
// Index of the best filter from Filters.
int BestIndex;
@@ -376,13 +377,13 @@ protected:
public:
static void setTargetName(TARGET_NAME_t tn) { TargetName = tn; }
- FilterChooser(const FilterChooser &FC) :
+ ARMFilterChooser(const ARMFilterChooser &FC) :
AllInstructions(FC.AllInstructions), Opcodes(FC.Opcodes),
Filters(FC.Filters), Parent(FC.Parent), BestIndex(FC.BestIndex) {
memcpy(FilterBitValues, FC.FilterBitValues, sizeof(FilterBitValues));
}
- FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
+ ARMFilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
const std::vector<unsigned> &IDs) :
AllInstructions(Insts), Opcodes(IDs), Filters(), Parent(NULL),
BestIndex(-1) {
@@ -392,10 +393,10 @@ public:
doFilter();
}
- FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
- const std::vector<unsigned> &IDs,
- bit_value_t (&ParentFilterBitValues)[BIT_WIDTH],
- FilterChooser &parent) :
+ ARMFilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
+ const std::vector<unsigned> &IDs,
+ bit_value_t (&ParentFilterBitValues)[BIT_WIDTH],
+ ARMFilterChooser &parent) :
AllInstructions(Insts), Opcodes(IDs), Filters(), Parent(&parent),
BestIndex(-1) {
for (unsigned i = 0; i < BIT_WIDTH; ++i)
@@ -426,8 +427,9 @@ protected:
Insn[i] = bitFromBits(Bits, i);
// Set Inst{21} to 1 (wback) when IndexModeBits == IndexModeUpd.
- if (getByteField(*AllInstructions[Opcode]->TheDef, "IndexModeBits")
- == IndexModeUpd)
+ Record *R = AllInstructions[Opcode]->TheDef;
+ if (R->getValue("IndexModeBits") &&
+ getByteField(*R, "IndexModeBits") == IndexModeUpd)
Insn[21] = BIT_TRUE;
}
@@ -452,7 +454,7 @@ protected:
/// dumpFilterArray on each filter chooser up to the top level one.
void dumpStack(raw_ostream &o, const char *prefix);
- Filter &bestFilter() {
+ ARMFilter &bestFilter() {
assert(BestIndex != -1 && "BestIndex not set");
return Filters[BestIndex];
}
@@ -497,11 +499,12 @@ protected:
bool emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,unsigned Opc);
// Emits code to decode the singleton, and then to decode the rest.
- void emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,Filter &Best);
+ void emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
+ ARMFilter &Best);
// Assign a single filter and run with it.
- void runSingleFilter(FilterChooser &owner, unsigned startBit, unsigned numBit,
- bool mixed);
+ void runSingleFilter(ARMFilterChooser &owner, unsigned startBit,
+ unsigned numBit, bool mixed);
// reportRegion is a helper function for filterProcessor to mark a region as
// eligible for use as a filter region.
@@ -530,7 +533,7 @@ protected:
// //
///////////////////////////
-Filter::Filter(const Filter &f) :
+ARMFilter::ARMFilter(const ARMFilter &f) :
Owner(f.Owner), StartBit(f.StartBit), NumBits(f.NumBits), Mixed(f.Mixed),
FilteredInstructions(f.FilteredInstructions),
VariableInstructions(f.VariableInstructions),
@@ -538,7 +541,7 @@ Filter::Filter(const Filter &f) :
LastOpcFiltered(f.LastOpcFiltered), NumVariable(f.NumVariable) {
}
-Filter::Filter(FilterChooser &owner, unsigned startBit, unsigned numBits,
+ARMFilter::ARMFilter(ARMFilterChooser &owner, unsigned startBit, unsigned numBits,
bool mixed) : Owner(&owner), StartBit(startBit), NumBits(numBits),
Mixed(mixed) {
assert(StartBit + NumBits - 1 < BIT_WIDTH);
@@ -575,8 +578,8 @@ Filter::Filter(FilterChooser &owner, unsigned startBit, unsigned numBits,
&& "Filter returns no instruction categories");
}
-Filter::~Filter() {
- std::map<unsigned, FilterChooser*>::iterator filterIterator;
+ARMFilter::~ARMFilter() {
+ std::map<unsigned, ARMFilterChooser*>::iterator filterIterator;
for (filterIterator = FilterChooserMap.begin();
filterIterator != FilterChooserMap.end();
filterIterator++) {
@@ -590,7 +593,7 @@ Filter::~Filter() {
// A special case arises when there's only one entry in the filtered
// instructions. In order to unambiguously decode the singleton, we need to
// match the remaining undecoded encoding bits against the singleton.
-void Filter::recurse() {
+void ARMFilter::recurse() {
std::map<uint64_t, std::vector<unsigned> >::const_iterator mapIterator;
bit_value_t BitValueArray[BIT_WIDTH];
@@ -606,12 +609,12 @@ void Filter::recurse() {
// Delegates to an inferior filter chooser for futher processing on this
// group of instructions whose segment values are variable.
- FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>(
+ FilterChooserMap.insert(std::pair<unsigned, ARMFilterChooser*>(
(unsigned)-1,
- new FilterChooser(Owner->AllInstructions,
- VariableInstructions,
- BitValueArray,
- *Owner)
+ new ARMFilterChooser(Owner->AllInstructions,
+ VariableInstructions,
+ BitValueArray,
+ *Owner)
));
}
@@ -638,18 +641,18 @@ void Filter::recurse() {
// Delegates to an inferior filter chooser for futher processing on this
// category of instructions.
- FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>(
+ FilterChooserMap.insert(std::pair<unsigned, ARMFilterChooser*>(
mapIterator->first,
- new FilterChooser(Owner->AllInstructions,
- mapIterator->second,
- BitValueArray,
- *Owner)
+ new ARMFilterChooser(Owner->AllInstructions,
+ mapIterator->second,
+ BitValueArray,
+ *Owner)
));
}
}
// Emit code to decode instructions given a segment or segments of bits.
-void Filter::emit(raw_ostream &o, unsigned &Indentation) {
+void ARMFilter::emit(raw_ostream &o, unsigned &Indentation) {
o.indent(Indentation) << "// Check Inst{";
if (NumBits > 1)
@@ -660,7 +663,7 @@ void Filter::emit(raw_ostream &o, unsigned &Indentation) {
o.indent(Indentation) << "switch (fieldFromInstruction(insn, "
<< StartBit << ", " << NumBits << ")) {\n";
- std::map<unsigned, FilterChooser*>::iterator filterIterator;
+ std::map<unsigned, ARMFilterChooser*>::iterator filterIterator;
bool DefaultCase = false;
for (filterIterator = FilterChooserMap.begin();
@@ -709,7 +712,7 @@ void Filter::emit(raw_ostream &o, unsigned &Indentation) {
// Returns the number of fanout produced by the filter. More fanout implies
// the filter distinguishes more categories of instructions.
-unsigned Filter::usefulness() const {
+unsigned ARMFilter::usefulness() const {
if (VariableInstructions.size())
return FilteredInstructions.size();
else
@@ -723,10 +726,10 @@ unsigned Filter::usefulness() const {
//////////////////////////////////
// Define the symbol here.
-TARGET_NAME_t FilterChooser::TargetName;
+TARGET_NAME_t ARMFilterChooser::TargetName;
// This provides an opportunity for target specific code emission.
-void FilterChooser::emitTopHook(raw_ostream &o) {
+void ARMFilterChooser::emitTopHook(raw_ostream &o) {
if (TargetName == TARGET_ARM) {
// Emit code that references the ARMFormat data type.
o << "static const ARMFormat ARMFormats[] = {\n";
@@ -747,7 +750,7 @@ void FilterChooser::emitTopHook(raw_ostream &o) {
}
// Emit the top level typedef and decodeInstruction() function.
-void FilterChooser::emitTop(raw_ostream &o, unsigned &Indentation) {
+void ARMFilterChooser::emitTop(raw_ostream &o, unsigned &Indentation) {
// Run the target specific emit hook.
emitTopHook(o);
@@ -801,7 +804,7 @@ void FilterChooser::emitTop(raw_ostream &o, unsigned &Indentation) {
o << '\n';
- o.indent(Indentation) << "static uint16_t decodeInstruction(field_t insn) {\n";
+ o.indent(Indentation) <<"static uint16_t decodeInstruction(field_t insn) {\n";
++Indentation; ++Indentation;
// Emits code to decode the instructions.
@@ -818,7 +821,7 @@ void FilterChooser::emitTop(raw_ostream &o, unsigned &Indentation) {
// This provides an opportunity for target specific code emission after
// emitTop().
-void FilterChooser::emitBot(raw_ostream &o, unsigned &Indentation) {
+void ARMFilterChooser::emitBot(raw_ostream &o, unsigned &Indentation) {
if (TargetName != TARGET_THUMB) return;
// Emit code that decodes the Thumb ISA.
@@ -843,7 +846,7 @@ void FilterChooser::emitBot(raw_ostream &o, unsigned &Indentation) {
//
// Returns false if and on the first uninitialized bit value encountered.
// Returns true, otherwise.
-bool FilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
+bool ARMFilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
unsigned StartBit, unsigned NumBits) const {
Field = 0;
@@ -860,7 +863,7 @@ bool FilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
/// dumpFilterArray - dumpFilterArray prints out debugging info for the given
/// filter array as a series of chars.
-void FilterChooser::dumpFilterArray(raw_ostream &o,
+void ARMFilterChooser::dumpFilterArray(raw_ostream &o,
bit_value_t (&filter)[BIT_WIDTH]) {
unsigned bitIndex;
@@ -884,8 +887,8 @@ void FilterChooser::dumpFilterArray(raw_ostream &o,
/// dumpStack - dumpStack traverses the filter chooser chain and calls
/// dumpFilterArray on each filter chooser up to the top level one.
-void FilterChooser::dumpStack(raw_ostream &o, const char *prefix) {
- FilterChooser *current = this;
+void ARMFilterChooser::dumpStack(raw_ostream &o, const char *prefix) {
+ ARMFilterChooser *current = this;
while (current) {
o << prefix;
@@ -896,7 +899,7 @@ void FilterChooser::dumpStack(raw_ostream &o, const char *prefix) {
}
// Called from Filter::recurse() when singleton exists. For debug purpose.
-void FilterChooser::SingletonExists(unsigned Opc) {
+void ARMFilterChooser::SingletonExists(unsigned Opc) {
insn_t Insn0;
insnWithID(Insn0, Opc);
@@ -923,7 +926,7 @@ void FilterChooser::SingletonExists(unsigned Opc) {
// This returns a list of undecoded bits of an instructions, for example,
// Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
// decoded bits in order to verify that the instruction matches the Opcode.
-unsigned FilterChooser::getIslands(std::vector<unsigned> &StartBits,
+unsigned ARMFilterChooser::getIslands(std::vector<unsigned> &StartBits,
std::vector<unsigned> &EndBits, std::vector<uint64_t> &FieldVals,
insn_t &Insn) {
unsigned Num, BitNo;
@@ -983,7 +986,7 @@ unsigned FilterChooser::getIslands(std::vector<unsigned> &StartBits,
// Emits code to decode the singleton. Return true if we have matched all the
// well-known bits.
-bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
+bool ARMFilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
unsigned Opc) {
std::vector<unsigned> StartBits;
std::vector<unsigned> EndBits;
@@ -1046,8 +1049,9 @@ bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
}
// Emits code to decode the singleton, and then to decode the rest.
-void FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
- Filter &Best) {
+void ARMFilterChooser::emitSingletonDecoder(raw_ostream &o,
+ unsigned &Indentation,
+ ARMFilter &Best) {
unsigned Opc = Best.getSingletonOpc();
@@ -1063,10 +1067,11 @@ void FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
// Assign a single filter and run with it. Top level API client can initialize
// with a single filter to start the filtering process.
-void FilterChooser::runSingleFilter(FilterChooser &owner, unsigned startBit,
- unsigned numBit, bool mixed) {
+void ARMFilterChooser::runSingleFilter(ARMFilterChooser &owner,
+ unsigned startBit,
+ unsigned numBit, bool mixed) {
Filters.clear();
- Filter F(*this, startBit, numBit, true);
+ ARMFilter F(*this, startBit, numBit, true);
Filters.push_back(F);
BestIndex = 0; // Sole Filter instance to choose from.
bestFilter().recurse();
@@ -1074,18 +1079,18 @@ void FilterChooser::runSingleFilter(FilterChooser &owner, unsigned startBit,
// reportRegion is a helper function for filterProcessor to mark a region as
// eligible for use as a filter region.
-void FilterChooser::reportRegion(bitAttr_t RA, unsigned StartBit,
- unsigned BitIndex, bool AllowMixed) {
+void ARMFilterChooser::reportRegion(bitAttr_t RA, unsigned StartBit,
+ unsigned BitIndex, bool AllowMixed) {
if (RA == ATTR_MIXED && AllowMixed)
- Filters.push_back(Filter(*this, StartBit, BitIndex - StartBit, true));
+ Filters.push_back(ARMFilter(*this, StartBit, BitIndex - StartBit, true));
else if (RA == ATTR_ALL_SET && !AllowMixed)
- Filters.push_back(Filter(*this, StartBit, BitIndex - StartBit, false));
+ Filters.push_back(ARMFilter(*this, StartBit, BitIndex - StartBit, false));
}
// FilterProcessor scans the well-known encoding bits of the instructions and
// builds up a list of candidate filters. It chooses the best filter and
// recursively descends down the decoding tree.
-bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
+bool ARMFilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
Filters.clear();
BestIndex = -1;
unsigned numInstructions = Opcodes.size();
@@ -1317,7 +1322,7 @@ bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
// Decides on the best configuration of filter(s) to use in order to decode
// the instructions. A conflict of instructions may occur, in which case we
// dump the conflict set to the standard error.
-void FilterChooser::doFilter() {
+void ARMFilterChooser::doFilter() {
unsigned Num = Opcodes.size();
assert(Num && "FilterChooser created with no instructions");
@@ -1350,7 +1355,7 @@ void FilterChooser::doFilter() {
// Emits code to decode our share of instructions. Returns true if the
// emitted code causes a return, which occurs if we know how to decode
// the instruction at this level or the instruction is not decodeable.
-bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
+bool ARMFilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
if (Opcodes.size() == 1)
// There is only one instruction in the set, which is great!
// Call emitSingletonDecoder() to see whether there are any remaining
@@ -1359,7 +1364,7 @@ bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
// Choose the best filter to do the decodings!
if (BestIndex != -1) {
- Filter &Best = bestFilter();
+ ARMFilter &Best = bestFilter();
if (Best.getNumFiltered() == 1)
emitSingletonDecoder(o, Indentation, Best);
else
@@ -1488,11 +1493,11 @@ bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
class ARMDecoderEmitter::ARMDEBackend {
public:
- ARMDEBackend(ARMDecoderEmitter &frontend) :
+ ARMDEBackend(ARMDecoderEmitter &frontend, RecordKeeper &Records) :
NumberedInstructions(),
Opcodes(),
Frontend(frontend),
- Target(),
+ Target(Records),
FC(NULL)
{
if (Target.getName() == "ARM")
@@ -1538,13 +1543,14 @@ protected:
std::vector<unsigned> Opcodes2;
ARMDecoderEmitter &Frontend;
CodeGenTarget Target;
- FilterChooser *FC;
+ ARMFilterChooser *FC;
TARGET_NAME_t TargetName;
};
-bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
- const CodeGenInstruction &CGI, TARGET_NAME_t TN) {
+bool ARMDecoderEmitter::
+ARMDEBackend::populateInstruction(const CodeGenInstruction &CGI,
+ TARGET_NAME_t TN) {
const Record &Def = *CGI.TheDef;
const StringRef Name = Def.getName();
uint8_t Form = getByteField(Def, "Form");
@@ -1559,6 +1565,10 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
// which is a better design and less fragile than the name matchings.
if (Bits.allInComplete()) return false;
+ // Ignore "asm parser only" instructions.
+ if (Def.getValueAsBit("isAsmParserOnly"))
+ return false;
+
if (TN == TARGET_ARM) {
// FIXME: what about Int_MemBarrierV6 and Int_SyncBarrierV6?
if ((Name != "Int_MemBarrierV7" && Name != "Int_SyncBarrierV7") &&
@@ -1566,13 +1576,6 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
return false;
if (thumbInstruction(Form))
return false;
- if (Name.find("CMPz") != std::string::npos /* ||
- Name.find("CMNz") != std::string::npos */)
- return false;
-
- // Ignore pseudo instructions.
- if (Name == "BXr9" || Name == "BMOVPCRX" || Name == "BMOVPCRXr9")
- return false;
// Tail calls are other patterns that generate existing instructions.
if (Name == "TCRETURNdi" || Name == "TCRETURNdiND" ||
@@ -1583,11 +1586,6 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
Name == "MOVr_TC")
return false;
- // VLDMQ/VSTMQ can be handled with the more generic VLDMD/VSTMD.
- if (Name == "VLDMQ" || Name == "VLDMQ_UPD" ||
- Name == "VSTMQ" || Name == "VSTMQ_UPD")
- return false;
-
//
// The following special cases are for conflict resolutions.
//
@@ -1610,13 +1608,13 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
// better off using the generic RSCri and RSCrs instructions.
if (Name == "RSCSri" || Name == "RSCSrs") return false;
- // MOVCCr, MOVCCs, MOVCCi, FCYPScc, FCYPDcc, FNEGScc, and FNEGDcc are used
- // in the compiler to implement conditional moves. We can ignore them in
- // favor of their more generic versions of instructions.
- // See also SDNode *ARMDAGToDAGISel::Select(SDValue Op).
- if (Name == "MOVCCr" || Name == "MOVCCs" || Name == "MOVCCi" ||
- Name == "FCPYScc" || Name == "FCPYDcc" ||
- Name == "FNEGScc" || Name == "FNEGDcc")
+ // MOVCCr, MOVCCs, MOVCCi, MOVCCi16, FCYPScc, FCYPDcc, FNEGScc, and
+ // FNEGDcc are used in the compiler to implement conditional moves.
+ // We can ignore them in favor of their more generic versions of
+ // instructions. See also SDNode *ARMDAGToDAGISel::Select(SDValue Op).
+ if (Name == "MOVCCr" || Name == "MOVCCs" || Name == "MOVCCi" ||
+ Name == "MOVCCi16" || Name == "FCPYScc" || Name == "FCPYDcc" ||
+ Name == "FNEGScc" || Name == "FNEGDcc")
return false;
// Ditto for VMOVDcc, VMOVScc, VNEGDcc, and VNEGScc.
@@ -1624,15 +1622,10 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
Name == "VNEGScc")
return false;
- // Ignore the *_sfp instructions when decoding. They are used by the
- // compiler to implement scalar floating point operations using vector
- // operations in order to work around some performance issues.
- if (Name.find("_sfp") != std::string::npos) return false;
-
- // LDM_RET is a special case of LDM (Load Multiple) where the registers
+ // LDMIA_RET is a special case of LDM (Load Multiple) where the registers
// loaded include the PC, causing a branch to a loaded address. Ignore
- // the LDM_RET instruction when decoding.
- if (Name == "LDM_RET") return false;
+ // the LDMIA_RET instruction when decoding.
+ if (Name == "LDMIA_RET") return false;
// Bcc is in a more generic form than B. Ignore B when decoding.
if (Name == "B") return false;
@@ -1671,18 +1664,17 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
// VREV64qf is equivalent to VREV64q32.
if (Name == "VREV64df" || Name == "VREV64qf") return false;
- // VDUPLNfd is equivalent to VDUPLN32d; VDUPfdf is specialized VDUPLN32d.
- // VDUPLNfq is equivalent to VDUPLN32q; VDUPfqf is specialized VDUPLN32q.
+ // VDUPLNfd is equivalent to VDUPLN32d.
+ // VDUPLNfq is equivalent to VDUPLN32q.
// VLD1df is equivalent to VLD1d32.
// VLD1qf is equivalent to VLD1q32.
// VLD2d64 is equivalent to VLD1q64.
// VST1df is equivalent to VST1d32.
// VST1qf is equivalent to VST1q32.
// VST2d64 is equivalent to VST1q64.
- if (Name == "VDUPLNfd" || Name == "VDUPfdf" ||
- Name == "VDUPLNfq" || Name == "VDUPfqf" ||
- Name == "VLD1df" || Name == "VLD1qf" || Name == "VLD2d64" ||
- Name == "VST1df" || Name == "VST1qf" || Name == "VST2d64")
+ if (Name == "VDUPLNfd" || Name == "VDUPLNfq" ||
+ Name == "VLD1df" || Name == "VLD1qf" || Name == "VLD2d64" ||
+ Name == "VST1df" || Name == "VST1qf" || Name == "VST2d64")
return false;
} else if (TN == TARGET_THUMB) {
if (!thumbInstruction(Form))
@@ -1696,12 +1688,8 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
if (Name == "tTPsoft" || Name == "t2TPsoft")
return false;
- // Ignore tLEApcrel and tLEApcrelJT, prefer tADDrPCi.
- if (Name == "tLEApcrel" || Name == "tLEApcrelJT")
- return false;
-
- // Ignore t2LEApcrel, prefer the generic t2ADD* for disassembly printing.
- if (Name == "t2LEApcrel")
+ // Ignore tADR, prefer tADDrPCi.
+ if (Name == "tADR")
return false;
// Ignore tADDrSP, tADDspr, and tPICADD, prefer the generic tADDhirr.
@@ -1719,42 +1707,33 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
if (Name == "t2LDRDpci")
return false;
- // Ignore t2TBB, t2TBH and prefer the generic t2TBBgen, t2TBHgen.
- if (Name == "t2TBB" || Name == "t2TBH")
- return false;
-
// Resolve conflicts:
//
// tBfar conflicts with tBLr9
- // tCMNz conflicts with tCMN (with assembly format strings being equal)
- // tPOP_RET/t2LDM_RET conflict with tPOP/t2LDM (ditto)
+ // tPOP_RET/t2LDMIA_RET conflict with tPOP/t2LDM (ditto)
// tMOVCCi conflicts with tMOVi8
// tMOVCCr conflicts with tMOVgpr2gpr
- // tBR_JTr conflicts with tBRIND
// tSpill conflicts with tSTRspi
// tLDRcp conflicts with tLDRspi
// tRestore conflicts with tLDRspi
- // t2LEApcrelJT conflicts with t2LEApcrel
+ // t2MOVCCi16 conflicts with tMOVi16
if (Name == "tBfar" ||
- /* Name == "tCMNz" || */ Name == "tCMPzi8" || Name == "tCMPzr" ||
- Name == "tCMPzhir" || /* Name == "t2CMNzrr" || Name == "t2CMNzrs" ||
- Name == "t2CMNzri" || */ Name == "t2CMPzrr" || Name == "t2CMPzrs" ||
- Name == "t2CMPzri" || Name == "tPOP_RET" || Name == "t2LDM_RET" ||
- Name == "tMOVCCi" || Name == "tMOVCCr" || Name == "tBR_JTr" ||
+ Name == "tPOP_RET" || Name == "t2LDMIA_RET" ||
+ Name == "tMOVCCi" || Name == "tMOVCCr" ||
Name == "tSpill" || Name == "tLDRcp" || Name == "tRestore" ||
- Name == "t2LEApcrelJT")
+ Name == "t2MOVCCi16")
return false;
}
- // Dumps the instruction encoding format.
- switch (TargetName) {
- case TARGET_ARM:
- case TARGET_THUMB:
- DEBUG(errs() << Name << " " << stringForARMFormat((ARMFormat)Form));
- break;
- }
-
DEBUG({
+ // Dumps the instruction encoding format.
+ switch (TargetName) {
+ case TARGET_ARM:
+ case TARGET_THUMB:
+ errs() << Name << " " << stringForARMFormat((ARMFormat)Form);
+ break;
+ }
+
errs() << " ";
// Dumps the instruction encoding bits.
@@ -1763,8 +1742,8 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
errs() << '\n';
// Dumps the list of operand info.
- for (unsigned i = 0, e = CGI.OperandList.size(); i != e; ++i) {
- CodeGenInstruction::OperandInfo Info = CGI.OperandList[i];
+ for (unsigned i = 0, e = CGI.Operands.size(); i != e; ++i) {
+ const CGIOperandList::OperandInfo &Info = CGI.Operands[i];
const std::string &OperandName = Info.Name;
const Record &OperandDef = *Info.Rec;
@@ -1778,32 +1757,20 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
void ARMDecoderEmitter::ARMDEBackend::populateInstructions() {
getInstructionsByEnumValue(NumberedInstructions);
- uint16_t numUIDs = NumberedInstructions.size();
- uint16_t uid;
-
- const char *instClass = NULL;
-
- switch (TargetName) {
- case TARGET_ARM:
- instClass = "InstARM";
- break;
- default:
- assert(0 && "Unreachable code!");
- }
-
- for (uid = 0; uid < numUIDs; uid++) {
- // filter out intrinsics
- if (!NumberedInstructions[uid]->TheDef->isSubClassOf(instClass))
- continue;
+ unsigned numUIDs = NumberedInstructions.size();
+ if (TargetName == TARGET_ARM) {
+ for (unsigned uid = 0; uid < numUIDs; uid++) {
+ // filter out intrinsics
+ if (!NumberedInstructions[uid]->TheDef->isSubClassOf("InstARM"))
+ continue;
- if (populateInstruction(*NumberedInstructions[uid], TargetName))
- Opcodes.push_back(uid);
- }
+ if (populateInstruction(*NumberedInstructions[uid], TargetName))
+ Opcodes.push_back(uid);
+ }
- // Special handling for the ARM chip, which supports two modes of execution.
- // This branch handles the Thumb opcodes.
- if (TargetName == TARGET_ARM) {
- for (uid = 0; uid < numUIDs; uid++) {
+ // Special handling for the ARM chip, which supports two modes of execution.
+ // This branch handles the Thumb opcodes.
+ for (unsigned uid = 0; uid < numUIDs; uid++) {
// filter out intrinsics
if (!NumberedInstructions[uid]->TheDef->isSubClassOf("InstARM")
&& !NumberedInstructions[uid]->TheDef->isSubClassOf("InstThumb"))
@@ -1812,6 +1779,18 @@ void ARMDecoderEmitter::ARMDEBackend::populateInstructions() {
if (populateInstruction(*NumberedInstructions[uid], TARGET_THUMB))
Opcodes2.push_back(uid);
}
+
+ return;
+ }
+
+ // For other targets.
+ for (unsigned uid = 0; uid < numUIDs; uid++) {
+ Record *R = NumberedInstructions[uid]->TheDef;
+ if (R->getValueAsString("Namespace") == "TargetOpcode")
+ continue;
+
+ if (populateInstruction(*NumberedInstructions[uid], TargetName))
+ Opcodes.push_back(uid);
}
}
@@ -1826,25 +1805,25 @@ void ARMDecoderEmitter::ARMDEBackend::emit(raw_ostream &o) {
assert(0 && "Unreachable code!");
}
- o << "#include \"llvm/System/DataTypes.h\"\n";
+ o << "#include \"llvm/Support/DataTypes.h\"\n";
o << "#include <assert.h>\n";
o << '\n';
o << "namespace llvm {\n\n";
- FilterChooser::setTargetName(TargetName);
+ ARMFilterChooser::setTargetName(TargetName);
switch (TargetName) {
case TARGET_ARM: {
// Emit common utility and ARM ISA decoder.
- FC = new FilterChooser(NumberedInstructions, Opcodes);
+ FC = new ARMFilterChooser(NumberedInstructions, Opcodes);
// Reset indentation level.
unsigned Indentation = 0;
FC->emitTop(o, Indentation);
delete FC;
// Emit Thumb ISA decoder as well.
- FilterChooser::setTargetName(TARGET_THUMB);
- FC = new FilterChooser(NumberedInstructions, Opcodes2);
+ ARMFilterChooser::setTargetName(TARGET_THUMB);
+ FC = new ARMFilterChooser(NumberedInstructions, Opcodes2);
// Reset indentation level.
Indentation = 0;
FC->emitBot(o, Indentation);
@@ -1863,7 +1842,7 @@ void ARMDecoderEmitter::ARMDEBackend::emit(raw_ostream &o) {
void ARMDecoderEmitter::initBackend()
{
- Backend = new ARMDEBackend(*this);
+ Backend = new ARMDEBackend(*this, Records);
}
void ARMDecoderEmitter::run(raw_ostream &o)
diff --git a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h
index 571a947..1faeb91 100644
--- a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h
+++ b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h
@@ -17,7 +17,7 @@
#include "TableGenBackend.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index 5583986..e3def41 100644
--- a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -8,7 +8,11 @@
//===----------------------------------------------------------------------===//
//
// This tablegen backend emits a target specifier matcher for converting parsed
-// assembly operands in the MCInst structures.
+// assembly operands in the MCInst structures. It also emits a matcher for
+// custom operand parsing.
+//
+// Converting assembly operands into MCInst structures
+// ---------------------------------------------------
//
// The input to the target specific matcher is a list of literal tokens and
// operands. The target specific parser should generally eliminate any syntax
@@ -20,7 +24,7 @@
// Some example inputs, for X86:
// 'addl' (immediate ...) (register ...)
// 'add' (immediate ...) (memory ...)
-// 'call' '*' %epc
+// 'call' '*' %epc
//
// The assembly matcher is responsible for converting this input into a precise
// machine instruction (i.e., an instruction with a well defined encoding). This
@@ -63,26 +67,47 @@
// In addition, the subset relation amongst classes induces a partial order
// on such tuples, which we use to resolve ambiguities.
//
-// FIXME: What do we do if a crazy case shows up where this is the wrong
-// resolution?
-//
// 2. The input can now be treated as a tuple of classes (static tokens are
// simple singleton sets). Each such tuple should generally map to a single
// instruction (we currently ignore cases where this isn't true, whee!!!),
// which we can emit a simple matcher for.
//
+// Custom Operand Parsing
+// ----------------------
+//
+// Some targets need a custom way to parse operands, some specific instructions
+// can contain arguments that can represent processor flags and other kinds of
+// identifiers that need to be mapped to specific valeus in the final encoded
+// instructions. The target specific custom operand parsing works in the
+// following way:
+//
+// 1. A operand match table is built, each entry contains a mnemonic, an
+// operand class, a mask for all operand positions for that same
+// class/mnemonic and target features to be checked while trying to match.
+//
+// 2. The operand matcher will try every possible entry with the same
+// mnemonic and will check if the target feature for this mnemonic also
+// matches. After that, if the operand to be matched has its index
+// present in the mask, a successfull match occurs. Otherwise, fallback
+// to the regular operand parsing.
+//
+// 3. For a match success, each operand class that has a 'ParserMethod'
+// becomes part of a switch from where the custom method is called.
+//
//===----------------------------------------------------------------------===//
#include "AsmMatcherEmitter.h"
#include "CodeGenTarget.h"
#include "Record.h"
+#include "StringMatcher.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include <list>
#include <map>
#include <set>
using namespace llvm;
@@ -91,197 +116,8 @@ static cl::opt<std::string>
MatchPrefix("match-prefix", cl::init(""),
cl::desc("Only match instructions with the given prefix"));
-/// FlattenVariants - Flatten an .td file assembly string by selecting the
-/// variant at index \arg N.
-static std::string FlattenVariants(const std::string &AsmString,
- unsigned N) {
- StringRef Cur = AsmString;
- std::string Res = "";
-
- for (;;) {
- // Find the start of the next variant string.
- size_t VariantsStart = 0;
- for (size_t e = Cur.size(); VariantsStart != e; ++VariantsStart)
- if (Cur[VariantsStart] == '{' &&
- (VariantsStart == 0 || (Cur[VariantsStart-1] != '$' &&
- Cur[VariantsStart-1] != '\\')))
- break;
-
- // Add the prefix to the result.
- Res += Cur.slice(0, VariantsStart);
- if (VariantsStart == Cur.size())
- break;
-
- ++VariantsStart; // Skip the '{'.
-
- // Scan to the end of the variants string.
- size_t VariantsEnd = VariantsStart;
- unsigned NestedBraces = 1;
- for (size_t e = Cur.size(); VariantsEnd != e; ++VariantsEnd) {
- if (Cur[VariantsEnd] == '}' && Cur[VariantsEnd-1] != '\\') {
- if (--NestedBraces == 0)
- break;
- } else if (Cur[VariantsEnd] == '{')
- ++NestedBraces;
- }
-
- // Select the Nth variant (or empty).
- StringRef Selection = Cur.slice(VariantsStart, VariantsEnd);
- for (unsigned i = 0; i != N; ++i)
- Selection = Selection.split('|').second;
- Res += Selection.split('|').first;
-
- assert(VariantsEnd != Cur.size() &&
- "Unterminated variants in assembly string!");
- Cur = Cur.substr(VariantsEnd + 1);
- }
-
- return Res;
-}
-
-/// TokenizeAsmString - Tokenize a simplified assembly string.
-static void TokenizeAsmString(StringRef AsmString,
- SmallVectorImpl<StringRef> &Tokens) {
- unsigned Prev = 0;
- bool InTok = true;
- for (unsigned i = 0, e = AsmString.size(); i != e; ++i) {
- switch (AsmString[i]) {
- case '[':
- case ']':
- case '*':
- case '!':
- case ' ':
- case '\t':
- case ',':
- if (InTok) {
- Tokens.push_back(AsmString.slice(Prev, i));
- InTok = false;
- }
- if (!isspace(AsmString[i]) && AsmString[i] != ',')
- Tokens.push_back(AsmString.substr(i, 1));
- Prev = i + 1;
- break;
-
- case '\\':
- if (InTok) {
- Tokens.push_back(AsmString.slice(Prev, i));
- InTok = false;
- }
- ++i;
- assert(i != AsmString.size() && "Invalid quoted character");
- Tokens.push_back(AsmString.substr(i, 1));
- Prev = i + 1;
- break;
-
- case '$': {
- // If this isn't "${", treat like a normal token.
- if (i + 1 == AsmString.size() || AsmString[i + 1] != '{') {
- if (InTok) {
- Tokens.push_back(AsmString.slice(Prev, i));
- InTok = false;
- }
- Prev = i;
- break;
- }
-
- if (InTok) {
- Tokens.push_back(AsmString.slice(Prev, i));
- InTok = false;
- }
-
- StringRef::iterator End =
- std::find(AsmString.begin() + i, AsmString.end(), '}');
- assert(End != AsmString.end() && "Missing brace in operand reference!");
- size_t EndPos = End - AsmString.begin();
- Tokens.push_back(AsmString.slice(i, EndPos+1));
- Prev = EndPos + 1;
- i = EndPos;
- break;
- }
-
- case '.':
- if (InTok) {
- Tokens.push_back(AsmString.slice(Prev, i));
- }
- Prev = i;
- InTok = true;
- break;
-
- default:
- InTok = true;
- }
- }
- if (InTok && Prev != AsmString.size())
- Tokens.push_back(AsmString.substr(Prev));
-}
-
-static bool IsAssemblerInstruction(StringRef Name,
- const CodeGenInstruction &CGI,
- const SmallVectorImpl<StringRef> &Tokens) {
- // Ignore "codegen only" instructions.
- if (CGI.TheDef->getValueAsBit("isCodeGenOnly"))
- return false;
-
- // Ignore pseudo ops.
- //
- // FIXME: This is a hack; can we convert these instructions to set the
- // "codegen only" bit instead?
- if (const RecordVal *Form = CGI.TheDef->getValue("Form"))
- if (Form->getValue()->getAsString() == "Pseudo")
- return false;
-
- // Ignore "Int_*" and "*_Int" instructions, which are internal aliases.
- //
- // FIXME: This is a total hack.
- if (StringRef(Name).startswith("Int_") || StringRef(Name).endswith("_Int"))
- return false;
-
- // Ignore instructions with no .s string.
- //
- // FIXME: What are these?
- if (CGI.AsmString.empty())
- return false;
-
- // FIXME: Hack; ignore any instructions with a newline in them.
- if (std::find(CGI.AsmString.begin(),
- CGI.AsmString.end(), '\n') != CGI.AsmString.end())
- return false;
-
- // Ignore instructions with attributes, these are always fake instructions for
- // simplifying codegen.
- //
- // FIXME: Is this true?
- //
- // Also, check for instructions which reference the operand multiple times;
- // this implies a constraint we would not honor.
- std::set<std::string> OperandNames;
- for (unsigned i = 1, e = Tokens.size(); i < e; ++i) {
- if (Tokens[i][0] == '$' &&
- std::find(Tokens[i].begin(),
- Tokens[i].end(), ':') != Tokens[i].end()) {
- DEBUG({
- errs() << "warning: '" << Name << "': "
- << "ignoring instruction; operand with attribute '"
- << Tokens[i] << "'\n";
- });
- return false;
- }
-
- if (Tokens[i][0] == '$' && !OperandNames.insert(Tokens[i]).second) {
- DEBUG({
- errs() << "warning: '" << Name << "': "
- << "ignoring instruction with tied operand '"
- << Tokens[i].str() << "'\n";
- });
- return false;
- }
- }
-
- return true;
-}
-
namespace {
-
+class AsmMatcherInfo;
struct SubtargetFeatureInfo;
/// ClassInfo - Helper class for storing the information about a particular
@@ -331,6 +167,10 @@ struct ClassInfo {
/// MCInst; this is not valid for Token or register kinds.
std::string RenderMethod;
+ /// ParserMethod - The name of the operand method to do a target specific
+ /// parsing on the operand.
+ std::string ParserMethod;
+
/// For register classes, the records for all the registers in this class.
std::set<Record*> Registers;
@@ -360,7 +200,7 @@ public:
std::set<Record*> Tmp;
std::insert_iterator< std::set<Record*> > II(Tmp, Tmp.begin());
- std::set_intersection(Registers.begin(), Registers.end(),
+ std::set_intersection(Registers.begin(), Registers.end(),
RHS.Registers.begin(), RHS.Registers.end(),
II);
@@ -380,11 +220,11 @@ public:
const ClassInfo *RHSRoot = &RHS;
while (!RHSRoot->SuperClasses.empty())
RHSRoot = RHSRoot->SuperClasses.front();
-
+
return Root == RHSRoot;
}
- /// isSubsetOf - Test whether this class is a subset of \arg RHS;
+ /// isSubsetOf - Test whether this class is a subset of \arg RHS;
bool isSubsetOf(const ClassInfo &RHS) const {
// This is a subset of RHS if it is the same class...
if (this == &RHS)
@@ -430,32 +270,131 @@ public:
}
};
-/// InstructionInfo - Helper class for storing the necessary information for an
-/// instruction which is capable of being matched.
-struct InstructionInfo {
- struct Operand {
+/// MatchableInfo - Helper class for storing the necessary information for an
+/// instruction or alias which is capable of being matched.
+struct MatchableInfo {
+ struct AsmOperand {
+ /// Token - This is the token that the operand came from.
+ StringRef Token;
+
/// The unique class instance this operand should match.
ClassInfo *Class;
- /// The original operand this corresponds to, if any.
- const CodeGenInstruction::OperandInfo *OperandInfo;
+ /// The operand name this is, if anything.
+ StringRef SrcOpName;
+
+ /// The suboperand index within SrcOpName, or -1 for the entire operand.
+ int SubOpIdx;
+
+ explicit AsmOperand(StringRef T) : Token(T), Class(0), SubOpIdx(-1) {}
};
- /// InstrName - The target name for this instruction.
- std::string InstrName;
+ /// ResOperand - This represents a single operand in the result instruction
+ /// generated by the match. In cases (like addressing modes) where a single
+ /// assembler operand expands to multiple MCOperands, this represents the
+ /// single assembler operand, not the MCOperand.
+ struct ResOperand {
+ enum {
+ /// RenderAsmOperand - This represents an operand result that is
+ /// generated by calling the render method on the assembly operand. The
+ /// corresponding AsmOperand is specified by AsmOperandNum.
+ RenderAsmOperand,
+
+ /// TiedOperand - This represents a result operand that is a duplicate of
+ /// a previous result operand.
+ TiedOperand,
+
+ /// ImmOperand - This represents an immediate value that is dumped into
+ /// the operand.
+ ImmOperand,
+
+ /// RegOperand - This represents a fixed register that is dumped in.
+ RegOperand
+ } Kind;
+
+ union {
+ /// This is the operand # in the AsmOperands list that this should be
+ /// copied from.
+ unsigned AsmOperandNum;
+
+ /// TiedOperandNum - This is the (earlier) result operand that should be
+ /// copied from.
+ unsigned TiedOperandNum;
+
+ /// ImmVal - This is the immediate value added to the instruction.
+ int64_t ImmVal;
+
+ /// Register - This is the register record.
+ Record *Register;
+ };
+
+ /// MINumOperands - The number of MCInst operands populated by this
+ /// operand.
+ unsigned MINumOperands;
+
+ static ResOperand getRenderedOp(unsigned AsmOpNum, unsigned NumOperands) {
+ ResOperand X;
+ X.Kind = RenderAsmOperand;
+ X.AsmOperandNum = AsmOpNum;
+ X.MINumOperands = NumOperands;
+ return X;
+ }
+
+ static ResOperand getTiedOp(unsigned TiedOperandNum) {
+ ResOperand X;
+ X.Kind = TiedOperand;
+ X.TiedOperandNum = TiedOperandNum;
+ X.MINumOperands = 1;
+ return X;
+ }
- /// Instr - The instruction this matches.
- const CodeGenInstruction *Instr;
+ static ResOperand getImmOp(int64_t Val) {
+ ResOperand X;
+ X.Kind = ImmOperand;
+ X.ImmVal = Val;
+ X.MINumOperands = 1;
+ return X;
+ }
+
+ static ResOperand getRegOp(Record *Reg) {
+ ResOperand X;
+ X.Kind = RegOperand;
+ X.Register = Reg;
+ X.MINumOperands = 1;
+ return X;
+ }
+ };
+
+ /// TheDef - This is the definition of the instruction or InstAlias that this
+ /// matchable came from.
+ Record *const TheDef;
+
+ /// DefRec - This is the definition that it came from.
+ PointerUnion<const CodeGenInstruction*, const CodeGenInstAlias*> DefRec;
+
+ const CodeGenInstruction *getResultInst() const {
+ if (DefRec.is<const CodeGenInstruction*>())
+ return DefRec.get<const CodeGenInstruction*>();
+ return DefRec.get<const CodeGenInstAlias*>()->ResultInst;
+ }
+
+ /// ResOperands - This is the operand list that should be built for the result
+ /// MCInst.
+ std::vector<ResOperand> ResOperands;
/// AsmString - The assembly string for this instruction (with variants
- /// removed).
+ /// removed), e.g. "movsx $src, $dst".
std::string AsmString;
- /// Tokens - The tokenized assembly pattern that this instruction matches.
- SmallVector<StringRef, 4> Tokens;
+ /// Mnemonic - This is the first token of the matched instruction, its
+ /// mnemonic.
+ StringRef Mnemonic;
- /// Operands - The operands that this instruction matches.
- SmallVector<Operand, 4> Operands;
+ /// AsmOperands - The textual operands that this instruction matches,
+ /// annotated with a class and where in the OperandList they were defined.
+ /// This directly corresponds to the tokenized AsmString after the mnemonic is
+ /// removed.
+ SmallVector<AsmOperand, 4> AsmOperands;
/// Predicates - The required subtarget features to match this instruction.
SmallVector<SubtargetFeatureInfo*, 4> RequiredFeatures;
@@ -465,29 +404,79 @@ struct InstructionInfo {
/// function.
std::string ConversionFnKind;
- /// operator< - Compare two instructions.
- bool operator<(const InstructionInfo &RHS) const {
- if (Operands.size() != RHS.Operands.size())
- return Operands.size() < RHS.Operands.size();
+ MatchableInfo(const CodeGenInstruction &CGI)
+ : TheDef(CGI.TheDef), DefRec(&CGI), AsmString(CGI.AsmString) {
+ }
+
+ MatchableInfo(const CodeGenInstAlias *Alias)
+ : TheDef(Alias->TheDef), DefRec(Alias), AsmString(Alias->AsmString) {
+ }
+
+ void Initialize(const AsmMatcherInfo &Info,
+ SmallPtrSet<Record*, 16> &SingletonRegisters);
+
+ /// Validate - Return true if this matchable is a valid thing to match against
+ /// and perform a bunch of validity checking.
+ bool Validate(StringRef CommentDelimiter, bool Hack) const;
+
+ /// getSingletonRegisterForAsmOperand - If the specified token is a singleton
+ /// register, return the Record for it, otherwise return null.
+ Record *getSingletonRegisterForAsmOperand(unsigned i,
+ const AsmMatcherInfo &Info) const;
+
+ /// FindAsmOperand - Find the AsmOperand with the specified name and
+ /// suboperand index.
+ int FindAsmOperand(StringRef N, int SubOpIdx) const {
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i)
+ if (N == AsmOperands[i].SrcOpName &&
+ SubOpIdx == AsmOperands[i].SubOpIdx)
+ return i;
+ return -1;
+ }
+
+ /// FindAsmOperandNamed - Find the first AsmOperand with the specified name.
+ /// This does not check the suboperand index.
+ int FindAsmOperandNamed(StringRef N) const {
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i)
+ if (N == AsmOperands[i].SrcOpName)
+ return i;
+ return -1;
+ }
+
+ void BuildInstructionResultOperands();
+ void BuildAliasResultOperands();
+
+ /// operator< - Compare two matchables.
+ bool operator<(const MatchableInfo &RHS) const {
+ // The primary comparator is the instruction mnemonic.
+ if (Mnemonic != RHS.Mnemonic)
+ return Mnemonic < RHS.Mnemonic;
+
+ if (AsmOperands.size() != RHS.AsmOperands.size())
+ return AsmOperands.size() < RHS.AsmOperands.size();
// Compare lexicographically by operand. The matcher validates that other
- // orderings wouldn't be ambiguous using \see CouldMatchAmiguouslyWith().
- for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
- if (*Operands[i].Class < *RHS.Operands[i].Class)
+ // orderings wouldn't be ambiguous using \see CouldMatchAmbiguouslyWith().
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
+ if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class)
return true;
- if (*RHS.Operands[i].Class < *Operands[i].Class)
+ if (*RHS.AsmOperands[i].Class < *AsmOperands[i].Class)
return false;
}
return false;
}
- /// CouldMatchAmiguouslyWith - Check whether this instruction could
+ /// CouldMatchAmbiguouslyWith - Check whether this matchable could
/// ambiguously match the same set of operands as \arg RHS (without being a
/// strictly superior match).
- bool CouldMatchAmiguouslyWith(const InstructionInfo &RHS) {
+ bool CouldMatchAmbiguouslyWith(const MatchableInfo &RHS) {
+ // The primary comparator is the instruction mnemonic.
+ if (Mnemonic != RHS.Mnemonic)
+ return false;
+
// The number of operands is unambiguous.
- if (Operands.size() != RHS.Operands.size())
+ if (AsmOperands.size() != RHS.AsmOperands.size())
return false;
// Otherwise, make sure the ordering of the two instructions is unambiguous
@@ -496,29 +485,31 @@ struct InstructionInfo {
// Tokens and operand kinds are unambiguous (assuming a correct target
// specific parser).
- for (unsigned i = 0, e = Operands.size(); i != e; ++i)
- if (Operands[i].Class->Kind != RHS.Operands[i].Class->Kind ||
- Operands[i].Class->Kind == ClassInfo::Token)
- if (*Operands[i].Class < *RHS.Operands[i].Class ||
- *RHS.Operands[i].Class < *Operands[i].Class)
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i)
+ if (AsmOperands[i].Class->Kind != RHS.AsmOperands[i].Class->Kind ||
+ AsmOperands[i].Class->Kind == ClassInfo::Token)
+ if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class ||
+ *RHS.AsmOperands[i].Class < *AsmOperands[i].Class)
return false;
-
+
// Otherwise, this operand could commute if all operands are equivalent, or
// there is a pair of operands that compare less than and a pair that
// compare greater than.
bool HasLT = false, HasGT = false;
- for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
- if (*Operands[i].Class < *RHS.Operands[i].Class)
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
+ if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class)
HasLT = true;
- if (*RHS.Operands[i].Class < *Operands[i].Class)
+ if (*RHS.AsmOperands[i].Class < *AsmOperands[i].Class)
HasGT = true;
}
return !(HasLT ^ HasGT);
}
-public:
void dump();
+
+private:
+ void TokenizeAsmString(const AsmMatcherInfo &Info);
};
/// SubtargetFeatureInfo - Helper class for storing information on a subtarget
@@ -530,26 +521,52 @@ struct SubtargetFeatureInfo {
/// \brief An unique index assigned to represent this feature.
unsigned Index;
+ SubtargetFeatureInfo(Record *D, unsigned Idx) : TheDef(D), Index(Idx) {}
+
/// \brief The name of the enumerated constant identifying this feature.
- std::string EnumName;
+ std::string getEnumName() const {
+ return "Feature_" + TheDef->getName();
+ }
};
+struct OperandMatchEntry {
+ unsigned OperandMask;
+ MatchableInfo* MI;
+ ClassInfo *CI;
+
+ static OperandMatchEntry Create(MatchableInfo* mi, ClassInfo *ci,
+ unsigned opMask) {
+ OperandMatchEntry X;
+ X.OperandMask = opMask;
+ X.CI = ci;
+ X.MI = mi;
+ return X;
+ }
+};
+
+
class AsmMatcherInfo {
public:
+ /// Tracked Records
+ RecordKeeper &Records;
+
/// The tablegen AsmParser record.
Record *AsmParser;
- /// The AsmParser "CommentDelimiter" value.
- std::string CommentDelimiter;
+ /// Target - The target information.
+ CodeGenTarget &Target;
/// The AsmParser "RegisterPrefix" value.
std::string RegisterPrefix;
/// The classes which are needed for matching.
std::vector<ClassInfo*> Classes;
-
- /// The information on the instruction to match.
- std::vector<InstructionInfo*> Instructions;
+
+ /// The information on the matchables to match.
+ std::vector<MatchableInfo*> Matchables;
+
+ /// Info for custom matching operands by user defined methods.
+ std::vector<OperandMatchEntry> OperandMatchInfo;
/// Map of Register records to their class information.
std::map<Record*, ClassInfo*> RegisterClasses;
@@ -572,108 +589,265 @@ private:
ClassInfo *getTokenClass(StringRef Token);
/// getOperandClass - Lookup or create the class for the given operand.
- ClassInfo *getOperandClass(StringRef Token,
- const CodeGenInstruction::OperandInfo &OI);
-
- /// getSubtargetFeature - Lookup or create the subtarget feature info for the
- /// given operand.
- SubtargetFeatureInfo *getSubtargetFeature(Record *Def) {
- assert(Def->isSubClassOf("Predicate") && "Invalid predicate type!");
-
- SubtargetFeatureInfo *&Entry = SubtargetFeatures[Def];
- if (!Entry) {
- Entry = new SubtargetFeatureInfo;
- Entry->TheDef = Def;
- Entry->Index = SubtargetFeatures.size() - 1;
- Entry->EnumName = "Feature_" + Def->getName();
- assert(Entry->Index < 32 && "Too many subtarget features!");
- }
-
- return Entry;
- }
+ ClassInfo *getOperandClass(const CGIOperandList::OperandInfo &OI,
+ int SubOpIdx = -1);
/// BuildRegisterClasses - Build the ClassInfo* instances for register
/// classes.
- void BuildRegisterClasses(CodeGenTarget &Target,
- std::set<std::string> &SingletonRegisterNames);
+ void BuildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters);
/// BuildOperandClasses - Build the ClassInfo* instances for user defined
/// operand classes.
- void BuildOperandClasses(CodeGenTarget &Target);
+ void BuildOperandClasses();
+
+ void BuildInstructionOperandReference(MatchableInfo *II, StringRef OpName,
+ unsigned AsmOpIdx);
+ void BuildAliasOperandReference(MatchableInfo *II, StringRef OpName,
+ MatchableInfo::AsmOperand &Op);
public:
- AsmMatcherInfo(Record *_AsmParser);
+ AsmMatcherInfo(Record *AsmParser,
+ CodeGenTarget &Target,
+ RecordKeeper &Records);
/// BuildInfo - Construct the various tables used during matching.
- void BuildInfo(CodeGenTarget &Target);
+ void BuildInfo();
+
+ /// BuildOperandMatchInfo - Build the necessary information to handle user
+ /// defined operand parsing methods.
+ void BuildOperandMatchInfo();
+
+ /// getSubtargetFeature - Lookup or create the subtarget feature info for the
+ /// given operand.
+ SubtargetFeatureInfo *getSubtargetFeature(Record *Def) const {
+ assert(Def->isSubClassOf("Predicate") && "Invalid predicate type!");
+ std::map<Record*, SubtargetFeatureInfo*>::const_iterator I =
+ SubtargetFeatures.find(Def);
+ return I == SubtargetFeatures.end() ? 0 : I->second;
+ }
+
+ RecordKeeper &getRecords() const {
+ return Records;
+ }
};
}
-void InstructionInfo::dump() {
- errs() << InstrName << " -- " << "flattened:\"" << AsmString << '\"'
- << ", tokens:[";
- for (unsigned i = 0, e = Tokens.size(); i != e; ++i) {
- errs() << Tokens[i];
- if (i + 1 != e)
- errs() << ", ";
- }
- errs() << "]\n";
+void MatchableInfo::dump() {
+ errs() << TheDef->getName() << " -- " << "flattened:\"" << AsmString <<"\"\n";
- for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
- Operand &Op = Operands[i];
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
+ AsmOperand &Op = AsmOperands[i];
errs() << " op[" << i << "] = " << Op.Class->ClassName << " - ";
- if (Op.Class->Kind == ClassInfo::Token) {
- errs() << '\"' << Tokens[i] << "\"\n";
- continue;
+ errs() << '\"' << Op.Token << "\"\n";
+ }
+}
+
+void MatchableInfo::Initialize(const AsmMatcherInfo &Info,
+ SmallPtrSet<Record*, 16> &SingletonRegisters) {
+ // TODO: Eventually support asmparser for Variant != 0.
+ AsmString = CodeGenInstruction::FlattenAsmStringVariants(AsmString, 0);
+
+ TokenizeAsmString(Info);
+
+ // Compute the require features.
+ std::vector<Record*> Predicates =TheDef->getValueAsListOfDefs("Predicates");
+ for (unsigned i = 0, e = Predicates.size(); i != e; ++i)
+ if (SubtargetFeatureInfo *Feature =
+ Info.getSubtargetFeature(Predicates[i]))
+ RequiredFeatures.push_back(Feature);
+
+ // Collect singleton registers, if used.
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
+ if (Record *Reg = getSingletonRegisterForAsmOperand(i, Info))
+ SingletonRegisters.insert(Reg);
+ }
+}
+
+/// TokenizeAsmString - Tokenize a simplified assembly string.
+void MatchableInfo::TokenizeAsmString(const AsmMatcherInfo &Info) {
+ StringRef String = AsmString;
+ unsigned Prev = 0;
+ bool InTok = true;
+ for (unsigned i = 0, e = String.size(); i != e; ++i) {
+ switch (String[i]) {
+ case '[':
+ case ']':
+ case '*':
+ case '!':
+ case ' ':
+ case '\t':
+ case ',':
+ if (InTok) {
+ AsmOperands.push_back(AsmOperand(String.slice(Prev, i)));
+ InTok = false;
+ }
+ if (!isspace(String[i]) && String[i] != ',')
+ AsmOperands.push_back(AsmOperand(String.substr(i, 1)));
+ Prev = i + 1;
+ break;
+
+ case '\\':
+ if (InTok) {
+ AsmOperands.push_back(AsmOperand(String.slice(Prev, i)));
+ InTok = false;
+ }
+ ++i;
+ assert(i != String.size() && "Invalid quoted character");
+ AsmOperands.push_back(AsmOperand(String.substr(i, 1)));
+ Prev = i + 1;
+ break;
+
+ case '$': {
+ if (InTok) {
+ AsmOperands.push_back(AsmOperand(String.slice(Prev, i)));
+ InTok = false;
+ }
+
+ // If this isn't "${", treat like a normal token.
+ if (i + 1 == String.size() || String[i + 1] != '{') {
+ Prev = i;
+ break;
+ }
+
+ StringRef::iterator End = std::find(String.begin() + i, String.end(),'}');
+ assert(End != String.end() && "Missing brace in operand reference!");
+ size_t EndPos = End - String.begin();
+ AsmOperands.push_back(AsmOperand(String.slice(i, EndPos+1)));
+ Prev = EndPos + 1;
+ i = EndPos;
+ break;
}
- if (!Op.OperandInfo) {
- errs() << "(singleton register)\n";
- continue;
+ case '.':
+ if (InTok)
+ AsmOperands.push_back(AsmOperand(String.slice(Prev, i)));
+ Prev = i;
+ InTok = true;
+ break;
+
+ default:
+ InTok = true;
}
+ }
+ if (InTok && Prev != String.size())
+ AsmOperands.push_back(AsmOperand(String.substr(Prev)));
+
+ // The first token of the instruction is the mnemonic, which must be a
+ // simple string, not a $foo variable or a singleton register.
+ assert(!AsmOperands.empty() && "Instruction has no tokens?");
+ Mnemonic = AsmOperands[0].Token;
+ if (Mnemonic[0] == '$' || getSingletonRegisterForAsmOperand(0, Info))
+ throw TGError(TheDef->getLoc(),
+ "Invalid instruction mnemonic '" + Mnemonic.str() + "'!");
+
+ // Remove the first operand, it is tracked in the mnemonic field.
+ AsmOperands.erase(AsmOperands.begin());
+}
- const CodeGenInstruction::OperandInfo &OI = *Op.OperandInfo;
- errs() << OI.Name << " " << OI.Rec->getName()
- << " (" << OI.MIOperandNo << ", " << OI.MINumOperands << ")\n";
+bool MatchableInfo::Validate(StringRef CommentDelimiter, bool Hack) const {
+ // Reject matchables with no .s string.
+ if (AsmString.empty())
+ throw TGError(TheDef->getLoc(), "instruction with empty asm string");
+
+ // Reject any matchables with a newline in them, they should be marked
+ // isCodeGenOnly if they are pseudo instructions.
+ if (AsmString.find('\n') != std::string::npos)
+ throw TGError(TheDef->getLoc(),
+ "multiline instruction is not valid for the asmparser, "
+ "mark it isCodeGenOnly");
+
+ // Remove comments from the asm string. We know that the asmstring only
+ // has one line.
+ if (!CommentDelimiter.empty() &&
+ StringRef(AsmString).find(CommentDelimiter) != StringRef::npos)
+ throw TGError(TheDef->getLoc(),
+ "asmstring for instruction has comment character in it, "
+ "mark it isCodeGenOnly");
+
+ // Reject matchables with operand modifiers, these aren't something we can
+ // handle, the target should be refactored to use operands instead of
+ // modifiers.
+ //
+ // Also, check for instructions which reference the operand multiple times;
+ // this implies a constraint we would not honor.
+ std::set<std::string> OperandNames;
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
+ StringRef Tok = AsmOperands[i].Token;
+ if (Tok[0] == '$' && Tok.find(':') != StringRef::npos)
+ throw TGError(TheDef->getLoc(),
+ "matchable with operand modifier '" + Tok.str() +
+ "' not supported by asm matcher. Mark isCodeGenOnly!");
+
+ // Verify that any operand is only mentioned once.
+ // We reject aliases and ignore instructions for now.
+ if (Tok[0] == '$' && !OperandNames.insert(Tok).second) {
+ if (!Hack)
+ throw TGError(TheDef->getLoc(),
+ "ERROR: matchable with tied operand '" + Tok.str() +
+ "' can never be matched!");
+ // FIXME: Should reject these. The ARM backend hits this with $lane in a
+ // bunch of instructions. It is unclear what the right answer is.
+ DEBUG({
+ errs() << "warning: '" << TheDef->getName() << "': "
+ << "ignoring instruction with tied operand '"
+ << Tok.str() << "'\n";
+ });
+ return false;
+ }
}
+
+ return true;
+}
+
+/// getSingletonRegisterForAsmOperand - If the specified token is a singleton
+/// register, return the register name, otherwise return a null StringRef.
+Record *MatchableInfo::
+getSingletonRegisterForAsmOperand(unsigned i, const AsmMatcherInfo &Info) const{
+ StringRef Tok = AsmOperands[i].Token;
+ if (!Tok.startswith(Info.RegisterPrefix))
+ return 0;
+
+ StringRef RegName = Tok.substr(Info.RegisterPrefix.size());
+ if (const CodeGenRegister *Reg = Info.Target.getRegisterByName(RegName))
+ return Reg->TheDef;
+
+ // If there is no register prefix (i.e. "%" in "%eax"), then this may
+ // be some random non-register token, just ignore it.
+ if (Info.RegisterPrefix.empty())
+ return 0;
+
+ // Otherwise, we have something invalid prefixed with the register prefix,
+ // such as %foo.
+ std::string Err = "unable to find register for '" + RegName.str() +
+ "' (which matches register prefix)";
+ throw TGError(TheDef->getLoc(), Err);
}
static std::string getEnumNameForToken(StringRef Str) {
std::string Res;
-
+
for (StringRef::iterator it = Str.begin(), ie = Str.end(); it != ie; ++it) {
switch (*it) {
case '*': Res += "_STAR_"; break;
case '%': Res += "_PCT_"; break;
case ':': Res += "_COLON_"; break;
-
+ case '!': Res += "_EXCLAIM_"; break;
+ case '.': Res += "_DOT_"; break;
default:
- if (isalnum(*it)) {
+ if (isalnum(*it))
Res += *it;
- } else {
+ else
Res += "_" + utostr((unsigned) *it) + "_";
- }
}
}
return Res;
}
-/// getRegisterRecord - Get the register record for \arg name, or 0.
-static Record *getRegisterRecord(CodeGenTarget &Target, StringRef Name) {
- for (unsigned i = 0, e = Target.getRegisters().size(); i != e; ++i) {
- const CodeGenRegister &Reg = Target.getRegisters()[i];
- if (Name == Reg.TheDef->getValueAsString("AsmName"))
- return Reg.TheDef;
- }
-
- return 0;
-}
-
ClassInfo *AsmMatcherInfo::getTokenClass(StringRef Token) {
ClassInfo *&Entry = TokenClasses[Token];
-
+
if (!Entry) {
Entry = new ClassInfo();
Entry->Kind = ClassInfo::Token;
@@ -682,6 +856,7 @@ ClassInfo *AsmMatcherInfo::getTokenClass(StringRef Token) {
Entry->ValueName = Token;
Entry->PredicateMethod = "<invalid>";
Entry->RenderMethod = "<invalid>";
+ Entry->ParserMethod = "";
Classes.push_back(Entry);
}
@@ -689,65 +864,58 @@ ClassInfo *AsmMatcherInfo::getTokenClass(StringRef Token) {
}
ClassInfo *
-AsmMatcherInfo::getOperandClass(StringRef Token,
- const CodeGenInstruction::OperandInfo &OI) {
- if (OI.Rec->isSubClassOf("RegisterClass")) {
- ClassInfo *CI = RegisterClassClasses[OI.Rec];
-
- if (!CI) {
- PrintError(OI.Rec->getLoc(), "register class has no class info!");
- throw std::string("ERROR: Missing register class!");
- }
-
- return CI;
+AsmMatcherInfo::getOperandClass(const CGIOperandList::OperandInfo &OI,
+ int SubOpIdx) {
+ Record *Rec = OI.Rec;
+ if (SubOpIdx != -1)
+ Rec = dynamic_cast<DefInit*>(OI.MIOperandInfo->getArg(SubOpIdx))->getDef();
+
+ if (Rec->isSubClassOf("RegisterClass")) {
+ if (ClassInfo *CI = RegisterClassClasses[Rec])
+ return CI;
+ throw TGError(Rec->getLoc(), "register class has no class info!");
}
- assert(OI.Rec->isSubClassOf("Operand") && "Unexpected operand!");
- Record *MatchClass = OI.Rec->getValueAsDef("ParserMatchClass");
- ClassInfo *CI = AsmOperandClasses[MatchClass];
-
- if (!CI) {
- PrintError(OI.Rec->getLoc(), "operand has no match class!");
- throw std::string("ERROR: Missing match class!");
- }
+ assert(Rec->isSubClassOf("Operand") && "Unexpected operand!");
+ Record *MatchClass = Rec->getValueAsDef("ParserMatchClass");
+ if (ClassInfo *CI = AsmOperandClasses[MatchClass])
+ return CI;
- return CI;
+ throw TGError(Rec->getLoc(), "operand has no match class!");
}
-void AsmMatcherInfo::BuildRegisterClasses(CodeGenTarget &Target,
- std::set<std::string>
- &SingletonRegisterNames) {
- std::vector<CodeGenRegisterClass> RegisterClasses;
- std::vector<CodeGenRegister> Registers;
-
- RegisterClasses = Target.getRegisterClasses();
- Registers = Target.getRegisters();
+void AsmMatcherInfo::
+BuildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters) {
+ const std::vector<CodeGenRegister> &Registers = Target.getRegisters();
+ const std::vector<CodeGenRegisterClass> &RegClassList =
+ Target.getRegisterClasses();
// The register sets used for matching.
std::set< std::set<Record*> > RegisterSets;
- // Gather the defined sets.
- for (std::vector<CodeGenRegisterClass>::iterator it = RegisterClasses.begin(),
- ie = RegisterClasses.end(); it != ie; ++it)
+ // Gather the defined sets.
+ for (std::vector<CodeGenRegisterClass>::const_iterator it =
+ RegClassList.begin(), ie = RegClassList.end(); it != ie; ++it)
RegisterSets.insert(std::set<Record*>(it->Elements.begin(),
it->Elements.end()));
// Add any required singleton sets.
- for (std::set<std::string>::iterator it = SingletonRegisterNames.begin(),
- ie = SingletonRegisterNames.end(); it != ie; ++it)
- if (Record *Rec = getRegisterRecord(Target, *it))
- RegisterSets.insert(std::set<Record*>(&Rec, &Rec + 1));
-
+ for (SmallPtrSet<Record*, 16>::iterator it = SingletonRegisters.begin(),
+ ie = SingletonRegisters.end(); it != ie; ++it) {
+ Record *Rec = *it;
+ RegisterSets.insert(std::set<Record*>(&Rec, &Rec + 1));
+ }
+
// Introduce derived sets where necessary (when a register does not determine
// a unique register set class), and build the mapping of registers to the set
// they should classify to.
std::map<Record*, std::set<Record*> > RegisterMap;
- for (std::vector<CodeGenRegister>::iterator it = Registers.begin(),
+ for (std::vector<CodeGenRegister>::const_iterator it = Registers.begin(),
ie = Registers.end(); it != ie; ++it) {
- CodeGenRegister &CGR = *it;
+ const CodeGenRegister &CGR = *it;
// Compute the intersection of all sets containing this register.
std::set<Record*> ContainingSet;
-
+
for (std::set< std::set<Record*> >::iterator it = RegisterSets.begin(),
ie = RegisterSets.end(); it != ie; ++it) {
if (!it->count(CGR.TheDef))
@@ -755,14 +923,14 @@ void AsmMatcherInfo::BuildRegisterClasses(CodeGenTarget &Target,
if (ContainingSet.empty()) {
ContainingSet = *it;
- } else {
- std::set<Record*> Tmp;
- std::swap(Tmp, ContainingSet);
- std::insert_iterator< std::set<Record*> > II(ContainingSet,
- ContainingSet.begin());
- std::set_intersection(Tmp.begin(), Tmp.end(), it->begin(), it->end(),
- II);
+ continue;
}
+
+ std::set<Record*> Tmp;
+ std::swap(Tmp, ContainingSet);
+ std::insert_iterator< std::set<Record*> > II(ContainingSet,
+ ContainingSet.begin());
+ std::set_intersection(Tmp.begin(), Tmp.end(), it->begin(), it->end(), II);
}
if (!ContainingSet.empty()) {
@@ -795,14 +963,14 @@ void AsmMatcherInfo::BuildRegisterClasses(CodeGenTarget &Target,
ClassInfo *CI = RegisterSetClasses[*it];
for (std::set< std::set<Record*> >::iterator it2 = RegisterSets.begin(),
ie2 = RegisterSets.end(); it2 != ie2; ++it2)
- if (*it != *it2 &&
+ if (*it != *it2 &&
std::includes(it2->begin(), it2->end(), it->begin(), it->end()))
CI->SuperClasses.push_back(RegisterSetClasses[*it2]);
}
// Name the register classes which correspond to a user defined RegisterClass.
- for (std::vector<CodeGenRegisterClass>::iterator it = RegisterClasses.begin(),
- ie = RegisterClasses.end(); it != ie; ++it) {
+ for (std::vector<CodeGenRegisterClass>::const_iterator
+ it = RegClassList.begin(), ie = RegClassList.end(); it != ie; ++it) {
ClassInfo *CI = RegisterSetClasses[std::set<Record*>(it->Elements.begin(),
it->Elements.end())];
if (CI->ValueName.empty()) {
@@ -818,36 +986,35 @@ void AsmMatcherInfo::BuildRegisterClasses(CodeGenTarget &Target,
// Populate the map for individual registers.
for (std::map<Record*, std::set<Record*> >::iterator it = RegisterMap.begin(),
ie = RegisterMap.end(); it != ie; ++it)
- this->RegisterClasses[it->first] = RegisterSetClasses[it->second];
+ RegisterClasses[it->first] = RegisterSetClasses[it->second];
// Name the register classes which correspond to singleton registers.
- for (std::set<std::string>::iterator it = SingletonRegisterNames.begin(),
- ie = SingletonRegisterNames.end(); it != ie; ++it) {
- if (Record *Rec = getRegisterRecord(Target, *it)) {
- ClassInfo *CI = this->RegisterClasses[Rec];
- assert(CI && "Missing singleton register class info!");
-
- if (CI->ValueName.empty()) {
- CI->ClassName = Rec->getName();
- CI->Name = "MCK_" + Rec->getName();
- CI->ValueName = Rec->getName();
- } else
- CI->ValueName = CI->ValueName + "," + Rec->getName();
- }
+ for (SmallPtrSet<Record*, 16>::iterator it = SingletonRegisters.begin(),
+ ie = SingletonRegisters.end(); it != ie; ++it) {
+ Record *Rec = *it;
+ ClassInfo *CI = RegisterClasses[Rec];
+ assert(CI && "Missing singleton register class info!");
+
+ if (CI->ValueName.empty()) {
+ CI->ClassName = Rec->getName();
+ CI->Name = "MCK_" + Rec->getName();
+ CI->ValueName = Rec->getName();
+ } else
+ CI->ValueName = CI->ValueName + "," + Rec->getName();
}
}
-void AsmMatcherInfo::BuildOperandClasses(CodeGenTarget &Target) {
- std::vector<Record*> AsmOperands;
- AsmOperands = Records.getAllDerivedDefinitions("AsmOperandClass");
+void AsmMatcherInfo::BuildOperandClasses() {
+ std::vector<Record*> AsmOperands =
+ Records.getAllDerivedDefinitions("AsmOperandClass");
// Pre-populate AsmOperandClasses map.
- for (std::vector<Record*>::iterator it = AsmOperands.begin(),
+ for (std::vector<Record*>::iterator it = AsmOperands.begin(),
ie = AsmOperands.end(); it != ie; ++it)
AsmOperandClasses[*it] = new ClassInfo();
unsigned Index = 0;
- for (std::vector<Record*>::iterator it = AsmOperands.begin(),
+ for (std::vector<Record*>::iterator it = AsmOperands.begin(),
ie = AsmOperands.end(); it != ie; ++it, ++Index) {
ClassInfo *CI = AsmOperandClasses[*it];
CI->Kind = ClassInfo::UserClass0 + Index;
@@ -875,7 +1042,7 @@ void AsmMatcherInfo::BuildOperandClasses(CodeGenTarget &Target) {
if (StringInit *SI = dynamic_cast<StringInit*>(PMName)) {
CI->PredicateMethod = SI->getValue();
} else {
- assert(dynamic_cast<UnsetInit*>(PMName) &&
+ assert(dynamic_cast<UnsetInit*>(PMName) &&
"Unexpected PredicateMethod field!");
CI->PredicateMethod = "is" + CI->ClassName;
}
@@ -890,128 +1057,192 @@ void AsmMatcherInfo::BuildOperandClasses(CodeGenTarget &Target) {
CI->RenderMethod = "add" + CI->ClassName + "Operands";
}
+ // Get the parse method name or leave it as empty.
+ Init *PRMName = (*it)->getValueInit("ParserMethod");
+ if (StringInit *SI = dynamic_cast<StringInit*>(PRMName))
+ CI->ParserMethod = SI->getValue();
+
AsmOperandClasses[*it] = CI;
Classes.push_back(CI);
}
}
-AsmMatcherInfo::AsmMatcherInfo(Record *_AsmParser)
- : AsmParser(_AsmParser),
- CommentDelimiter(AsmParser->getValueAsString("CommentDelimiter")),
- RegisterPrefix(AsmParser->getValueAsString("RegisterPrefix"))
-{
+AsmMatcherInfo::AsmMatcherInfo(Record *asmParser,
+ CodeGenTarget &target,
+ RecordKeeper &records)
+ : Records(records), AsmParser(asmParser), Target(target),
+ RegisterPrefix(AsmParser->getValueAsString("RegisterPrefix")) {
+}
+
+/// BuildOperandMatchInfo - Build the necessary information to handle user
+/// defined operand parsing methods.
+void AsmMatcherInfo::BuildOperandMatchInfo() {
+
+ /// Map containing a mask with all operands indicies that can be found for
+ /// that class inside a instruction.
+ std::map<ClassInfo*, unsigned> OpClassMask;
+
+ for (std::vector<MatchableInfo*>::const_iterator it =
+ Matchables.begin(), ie = Matchables.end();
+ it != ie; ++it) {
+ MatchableInfo &II = **it;
+ OpClassMask.clear();
+
+ // Keep track of all operands of this instructions which belong to the
+ // same class.
+ for (unsigned i = 0, e = II.AsmOperands.size(); i != e; ++i) {
+ MatchableInfo::AsmOperand &Op = II.AsmOperands[i];
+ if (Op.Class->ParserMethod.empty())
+ continue;
+ unsigned &OperandMask = OpClassMask[Op.Class];
+ OperandMask |= (1 << i);
+ }
+
+ // Generate operand match info for each mnemonic/operand class pair.
+ for (std::map<ClassInfo*, unsigned>::iterator iit = OpClassMask.begin(),
+ iie = OpClassMask.end(); iit != iie; ++iit) {
+ unsigned OpMask = iit->second;
+ ClassInfo *CI = iit->first;
+ OperandMatchInfo.push_back(OperandMatchEntry::Create(&II, CI, OpMask));
+ }
+ }
}
-void AsmMatcherInfo::BuildInfo(CodeGenTarget &Target) {
+void AsmMatcherInfo::BuildInfo() {
+ // Build information about all of the AssemblerPredicates.
+ std::vector<Record*> AllPredicates =
+ Records.getAllDerivedDefinitions("Predicate");
+ for (unsigned i = 0, e = AllPredicates.size(); i != e; ++i) {
+ Record *Pred = AllPredicates[i];
+ // Ignore predicates that are not intended for the assembler.
+ if (!Pred->getValueAsBit("AssemblerMatcherPredicate"))
+ continue;
+
+ if (Pred->getName().empty())
+ throw TGError(Pred->getLoc(), "Predicate has no name!");
+
+ unsigned FeatureNo = SubtargetFeatures.size();
+ SubtargetFeatures[Pred] = new SubtargetFeatureInfo(Pred, FeatureNo);
+ assert(FeatureNo < 32 && "Too many subtarget features!");
+ }
+
+ StringRef CommentDelimiter = AsmParser->getValueAsString("CommentDelimiter");
+
// Parse the instructions; we need to do this first so that we can gather the
// singleton register classes.
- std::set<std::string> SingletonRegisterNames;
-
- const std::vector<const CodeGenInstruction*> &InstrList =
- Target.getInstructionsByEnumValue();
-
- for (unsigned i = 0, e = InstrList.size(); i != e; ++i) {
- const CodeGenInstruction &CGI = *InstrList[i];
+ SmallPtrSet<Record*, 16> SingletonRegisters;
+ for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+ E = Target.inst_end(); I != E; ++I) {
+ const CodeGenInstruction &CGI = **I;
+ // If the tblgen -match-prefix option is specified (for tblgen hackers),
+ // filter the set of instructions we consider.
if (!StringRef(CGI.TheDef->getName()).startswith(MatchPrefix))
continue;
- OwningPtr<InstructionInfo> II(new InstructionInfo());
-
- II->InstrName = CGI.TheDef->getName();
- II->Instr = &CGI;
- II->AsmString = FlattenVariants(CGI.AsmString, 0);
-
- // Remove comments from the asm string.
- if (!CommentDelimiter.empty()) {
- size_t Idx = StringRef(II->AsmString).find(CommentDelimiter);
- if (Idx != StringRef::npos)
- II->AsmString = II->AsmString.substr(0, Idx);
- }
-
- TokenizeAsmString(II->AsmString, II->Tokens);
-
- // Ignore instructions which shouldn't be matched.
- if (!IsAssemblerInstruction(CGI.TheDef->getName(), CGI, II->Tokens))
+ // Ignore "codegen only" instructions.
+ if (CGI.TheDef->getValueAsBit("isCodeGenOnly"))
continue;
- // Collect singleton registers, if used.
- if (!RegisterPrefix.empty()) {
- for (unsigned i = 0, e = II->Tokens.size(); i != e; ++i) {
- if (II->Tokens[i].startswith(RegisterPrefix)) {
- StringRef RegName = II->Tokens[i].substr(RegisterPrefix.size());
- Record *Rec = getRegisterRecord(Target, RegName);
-
- if (!Rec) {
- std::string Err = "unable to find register for '" + RegName.str() +
- "' (which matches register prefix)";
- throw TGError(CGI.TheDef->getLoc(), Err);
- }
-
- SingletonRegisterNames.insert(RegName);
+ // Validate the operand list to ensure we can handle this instruction.
+ for (unsigned i = 0, e = CGI.Operands.size(); i != e; ++i) {
+ const CGIOperandList::OperandInfo &OI = CGI.Operands[i];
+
+ // Validate tied operands.
+ if (OI.getTiedRegister() != -1) {
+ // If we have a tied operand that consists of multiple MCOperands,
+ // reject it. We reject aliases and ignore instructions for now.
+ if (OI.MINumOperands != 1) {
+ // FIXME: Should reject these. The ARM backend hits this with $lane
+ // in a bunch of instructions. It is unclear what the right answer is.
+ DEBUG({
+ errs() << "warning: '" << CGI.TheDef->getName() << "': "
+ << "ignoring instruction with multi-operand tied operand '"
+ << OI.Name << "'\n";
+ });
+ continue;
}
}
}
- // Compute the require features.
- ListInit *Predicates = CGI.TheDef->getValueAsListInit("Predicates");
- for (unsigned i = 0, e = Predicates->getSize(); i != e; ++i) {
- if (DefInit *Pred = dynamic_cast<DefInit*>(Predicates->getElement(i))) {
- // Ignore OptForSize and OptForSpeed, they aren't really requirements,
- // rather they are hints to isel.
- //
- // FIXME: Find better way to model this.
- if (Pred->getDef()->getName() == "OptForSize" ||
- Pred->getDef()->getName() == "OptForSpeed")
- continue;
+ OwningPtr<MatchableInfo> II(new MatchableInfo(CGI));
- // FIXME: Total hack; for now, we just limit ourselves to In32BitMode
- // and In64BitMode, because we aren't going to have the right feature
- // masks for SSE and friends. We need to decide what we are going to do
- // about CPU subtypes to implement this the right way.
- if (Pred->getDef()->getName() != "In32BitMode" &&
- Pred->getDef()->getName() != "In64BitMode")
- continue;
+ II->Initialize(*this, SingletonRegisters);
- II->RequiredFeatures.push_back(getSubtargetFeature(Pred->getDef()));
- }
- }
+ // Ignore instructions which shouldn't be matched and diagnose invalid
+ // instruction definitions with an error.
+ if (!II->Validate(CommentDelimiter, true))
+ continue;
- Instructions.push_back(II.take());
+ // Ignore "Int_*" and "*_Int" instructions, which are internal aliases.
+ //
+ // FIXME: This is a total hack.
+ if (StringRef(II->TheDef->getName()).startswith("Int_") ||
+ StringRef(II->TheDef->getName()).endswith("_Int"))
+ continue;
+
+ Matchables.push_back(II.take());
+ }
+
+ // Parse all of the InstAlias definitions and stick them in the list of
+ // matchables.
+ std::vector<Record*> AllInstAliases =
+ Records.getAllDerivedDefinitions("InstAlias");
+ for (unsigned i = 0, e = AllInstAliases.size(); i != e; ++i) {
+ CodeGenInstAlias *Alias = new CodeGenInstAlias(AllInstAliases[i], Target);
+
+ // If the tblgen -match-prefix option is specified (for tblgen hackers),
+ // filter the set of instruction aliases we consider, based on the target
+ // instruction.
+ if (!StringRef(Alias->ResultInst->TheDef->getName()).startswith(
+ MatchPrefix))
+ continue;
+
+ OwningPtr<MatchableInfo> II(new MatchableInfo(Alias));
+
+ II->Initialize(*this, SingletonRegisters);
+
+ // Validate the alias definitions.
+ II->Validate(CommentDelimiter, false);
+
+ Matchables.push_back(II.take());
}
// Build info for the register classes.
- BuildRegisterClasses(Target, SingletonRegisterNames);
+ BuildRegisterClasses(SingletonRegisters);
// Build info for the user defined assembly operand classes.
- BuildOperandClasses(Target);
+ BuildOperandClasses();
- // Build the instruction information.
- for (std::vector<InstructionInfo*>::iterator it = Instructions.begin(),
- ie = Instructions.end(); it != ie; ++it) {
- InstructionInfo *II = *it;
+ // Build the information about matchables, now that we have fully formed
+ // classes.
+ for (std::vector<MatchableInfo*>::iterator it = Matchables.begin(),
+ ie = Matchables.end(); it != ie; ++it) {
+ MatchableInfo *II = *it;
- for (unsigned i = 0, e = II->Tokens.size(); i != e; ++i) {
- StringRef Token = II->Tokens[i];
+ // Parse the tokens after the mnemonic.
+ // Note: BuildInstructionOperandReference may insert new AsmOperands, so
+ // don't precompute the loop bound.
+ for (unsigned i = 0; i != II->AsmOperands.size(); ++i) {
+ MatchableInfo::AsmOperand &Op = II->AsmOperands[i];
+ StringRef Token = Op.Token;
// Check for singleton registers.
- if (!RegisterPrefix.empty() && Token.startswith(RegisterPrefix)) {
- StringRef RegName = II->Tokens[i].substr(RegisterPrefix.size());
- InstructionInfo::Operand Op;
- Op.Class = RegisterClasses[getRegisterRecord(Target, RegName)];
- Op.OperandInfo = 0;
+ if (Record *RegRecord = II->getSingletonRegisterForAsmOperand(i, *this)) {
+ Op.Class = RegisterClasses[RegRecord];
assert(Op.Class && Op.Class->Registers.size() == 1 &&
"Unexpected class for singleton register");
- II->Operands.push_back(Op);
continue;
}
// Check for simple tokens.
if (Token[0] != '$') {
- InstructionInfo::Operand Op;
Op.Class = getTokenClass(Token);
- Op.OperandInfo = 0;
- II->Operands.push_back(Op);
+ continue;
+ }
+
+ if (Token.size() > 1 && isdigit(Token[1])) {
+ Op.Class = getTokenClass(Token);
continue;
}
@@ -1022,58 +1253,204 @@ void AsmMatcherInfo::BuildInfo(CodeGenTarget &Target) {
else
OperandName = Token.substr(1);
- // Map this token to an operand. FIXME: Move elsewhere.
- unsigned Idx;
- try {
- Idx = II->Instr->getOperandNamed(OperandName);
- } catch(...) {
- throw std::string("error: unable to find operand: '" +
- OperandName.str() + "'");
- }
+ if (II->DefRec.is<const CodeGenInstruction*>())
+ BuildInstructionOperandReference(II, OperandName, i);
+ else
+ BuildAliasOperandReference(II, OperandName, Op);
+ }
- // FIXME: This is annoying, the named operand may be tied (e.g.,
- // XCHG8rm). What we want is the untied operand, which we now have to
- // grovel for. Only worry about this for single entry operands, we have to
- // clean this up anyway.
- const CodeGenInstruction::OperandInfo *OI = &II->Instr->OperandList[Idx];
- if (OI->Constraints[0].isTied()) {
- unsigned TiedOp = OI->Constraints[0].getTiedOperand();
-
- // The tied operand index is an MIOperand index, find the operand that
- // contains it.
- for (unsigned i = 0, e = II->Instr->OperandList.size(); i != e; ++i) {
- if (II->Instr->OperandList[i].MIOperandNo == TiedOp) {
- OI = &II->Instr->OperandList[i];
- break;
- }
- }
+ if (II->DefRec.is<const CodeGenInstruction*>())
+ II->BuildInstructionResultOperands();
+ else
+ II->BuildAliasResultOperands();
+ }
- assert(OI && "Unable to find tied operand target!");
- }
+ // Reorder classes so that classes preceed super classes.
+ std::sort(Classes.begin(), Classes.end(), less_ptr<ClassInfo>());
+}
- InstructionInfo::Operand Op;
- Op.Class = getOperandClass(Token, *OI);
- Op.OperandInfo = OI;
- II->Operands.push_back(Op);
+/// BuildInstructionOperandReference - The specified operand is a reference to a
+/// named operand such as $src. Resolve the Class and OperandInfo pointers.
+void AsmMatcherInfo::
+BuildInstructionOperandReference(MatchableInfo *II,
+ StringRef OperandName,
+ unsigned AsmOpIdx) {
+ const CodeGenInstruction &CGI = *II->DefRec.get<const CodeGenInstruction*>();
+ const CGIOperandList &Operands = CGI.Operands;
+ MatchableInfo::AsmOperand *Op = &II->AsmOperands[AsmOpIdx];
+
+ // Map this token to an operand.
+ unsigned Idx;
+ if (!Operands.hasOperandNamed(OperandName, Idx))
+ throw TGError(II->TheDef->getLoc(), "error: unable to find operand: '" +
+ OperandName.str() + "'");
+
+ // If the instruction operand has multiple suboperands, but the parser
+ // match class for the asm operand is still the default "ImmAsmOperand",
+ // then handle each suboperand separately.
+ if (Op->SubOpIdx == -1 && Operands[Idx].MINumOperands > 1) {
+ Record *Rec = Operands[Idx].Rec;
+ assert(Rec->isSubClassOf("Operand") && "Unexpected operand!");
+ Record *MatchClass = Rec->getValueAsDef("ParserMatchClass");
+ if (MatchClass && MatchClass->getValueAsString("Name") == "Imm") {
+ // Insert remaining suboperands after AsmOpIdx in II->AsmOperands.
+ StringRef Token = Op->Token; // save this in case Op gets moved
+ for (unsigned SI = 1, SE = Operands[Idx].MINumOperands; SI != SE; ++SI) {
+ MatchableInfo::AsmOperand NewAsmOp(Token);
+ NewAsmOp.SubOpIdx = SI;
+ II->AsmOperands.insert(II->AsmOperands.begin()+AsmOpIdx+SI, NewAsmOp);
+ }
+ // Replace Op with first suboperand.
+ Op = &II->AsmOperands[AsmOpIdx]; // update the pointer in case it moved
+ Op->SubOpIdx = 0;
}
}
- // Reorder classes so that classes preceed super classes.
- std::sort(Classes.begin(), Classes.end(), less_ptr<ClassInfo>());
+ // Set up the operand class.
+ Op->Class = getOperandClass(Operands[Idx], Op->SubOpIdx);
+
+ // If the named operand is tied, canonicalize it to the untied operand.
+ // For example, something like:
+ // (outs GPR:$dst), (ins GPR:$src)
+ // with an asmstring of
+ // "inc $src"
+ // we want to canonicalize to:
+ // "inc $dst"
+ // so that we know how to provide the $dst operand when filling in the result.
+ int OITied = Operands[Idx].getTiedRegister();
+ if (OITied != -1) {
+ // The tied operand index is an MIOperand index, find the operand that
+ // contains it.
+ std::pair<unsigned, unsigned> Idx = Operands.getSubOperandNumber(OITied);
+ OperandName = Operands[Idx.first].Name;
+ Op->SubOpIdx = Idx.second;
+ }
+
+ Op->SrcOpName = OperandName;
}
-static std::pair<unsigned, unsigned> *
-GetTiedOperandAtIndex(SmallVectorImpl<std::pair<unsigned, unsigned> > &List,
- unsigned Index) {
- for (unsigned i = 0, e = List.size(); i != e; ++i)
- if (Index == List[i].first)
- return &List[i];
+/// BuildAliasOperandReference - When parsing an operand reference out of the
+/// matching string (e.g. "movsx $src, $dst"), determine what the class of the
+/// operand reference is by looking it up in the result pattern definition.
+void AsmMatcherInfo::BuildAliasOperandReference(MatchableInfo *II,
+ StringRef OperandName,
+ MatchableInfo::AsmOperand &Op) {
+ const CodeGenInstAlias &CGA = *II->DefRec.get<const CodeGenInstAlias*>();
+
+ // Set up the operand class.
+ for (unsigned i = 0, e = CGA.ResultOperands.size(); i != e; ++i)
+ if (CGA.ResultOperands[i].isRecord() &&
+ CGA.ResultOperands[i].getName() == OperandName) {
+ // It's safe to go with the first one we find, because CodeGenInstAlias
+ // validates that all operands with the same name have the same record.
+ unsigned ResultIdx = CGA.ResultInstOperandIndex[i].first;
+ Op.SubOpIdx = CGA.ResultInstOperandIndex[i].second;
+ Op.Class = getOperandClass(CGA.ResultInst->Operands[ResultIdx],
+ Op.SubOpIdx);
+ Op.SrcOpName = OperandName;
+ return;
+ }
+
+ throw TGError(II->TheDef->getLoc(), "error: unable to find operand: '" +
+ OperandName.str() + "'");
+}
+
+void MatchableInfo::BuildInstructionResultOperands() {
+ const CodeGenInstruction *ResultInst = getResultInst();
+
+ // Loop over all operands of the result instruction, determining how to
+ // populate them.
+ for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
+ const CGIOperandList::OperandInfo &OpInfo = ResultInst->Operands[i];
+
+ // If this is a tied operand, just copy from the previously handled operand.
+ int TiedOp = OpInfo.getTiedRegister();
+ if (TiedOp != -1) {
+ ResOperands.push_back(ResOperand::getTiedOp(TiedOp));
+ continue;
+ }
+
+ // Find out what operand from the asmparser this MCInst operand comes from.
+ int SrcOperand = FindAsmOperandNamed(OpInfo.Name);
+ if (OpInfo.Name.empty() || SrcOperand == -1)
+ throw TGError(TheDef->getLoc(), "Instruction '" +
+ TheDef->getName() + "' has operand '" + OpInfo.Name +
+ "' that doesn't appear in asm string!");
+
+ // Check if the one AsmOperand populates the entire operand.
+ unsigned NumOperands = OpInfo.MINumOperands;
+ if (AsmOperands[SrcOperand].SubOpIdx == -1) {
+ ResOperands.push_back(ResOperand::getRenderedOp(SrcOperand, NumOperands));
+ continue;
+ }
- return 0;
+ // Add a separate ResOperand for each suboperand.
+ for (unsigned AI = 0; AI < NumOperands; ++AI) {
+ assert(AsmOperands[SrcOperand+AI].SubOpIdx == (int)AI &&
+ AsmOperands[SrcOperand+AI].SrcOpName == OpInfo.Name &&
+ "unexpected AsmOperands for suboperands");
+ ResOperands.push_back(ResOperand::getRenderedOp(SrcOperand + AI, 1));
+ }
+ }
}
-static void EmitConvertToMCInst(CodeGenTarget &Target,
- std::vector<InstructionInfo*> &Infos,
+void MatchableInfo::BuildAliasResultOperands() {
+ const CodeGenInstAlias &CGA = *DefRec.get<const CodeGenInstAlias*>();
+ const CodeGenInstruction *ResultInst = getResultInst();
+
+ // Loop over all operands of the result instruction, determining how to
+ // populate them.
+ unsigned AliasOpNo = 0;
+ unsigned LastOpNo = CGA.ResultInstOperandIndex.size();
+ for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
+ const CGIOperandList::OperandInfo *OpInfo = &ResultInst->Operands[i];
+
+ // If this is a tied operand, just copy from the previously handled operand.
+ int TiedOp = OpInfo->getTiedRegister();
+ if (TiedOp != -1) {
+ ResOperands.push_back(ResOperand::getTiedOp(TiedOp));
+ continue;
+ }
+
+ // Handle all the suboperands for this operand.
+ const std::string &OpName = OpInfo->Name;
+ for ( ; AliasOpNo < LastOpNo &&
+ CGA.ResultInstOperandIndex[AliasOpNo].first == i; ++AliasOpNo) {
+ int SubIdx = CGA.ResultInstOperandIndex[AliasOpNo].second;
+
+ // Find out what operand from the asmparser that this MCInst operand
+ // comes from.
+ switch (CGA.ResultOperands[AliasOpNo].Kind) {
+ default: assert(0 && "unexpected InstAlias operand kind");
+ case CodeGenInstAlias::ResultOperand::K_Record: {
+ StringRef Name = CGA.ResultOperands[AliasOpNo].getName();
+ int SrcOperand = FindAsmOperand(Name, SubIdx);
+ if (SrcOperand == -1)
+ throw TGError(TheDef->getLoc(), "Instruction '" +
+ TheDef->getName() + "' has operand '" + OpName +
+ "' that doesn't appear in asm string!");
+ unsigned NumOperands = (SubIdx == -1 ? OpInfo->MINumOperands : 1);
+ ResOperands.push_back(ResOperand::getRenderedOp(SrcOperand,
+ NumOperands));
+ break;
+ }
+ case CodeGenInstAlias::ResultOperand::K_Imm: {
+ int64_t ImmVal = CGA.ResultOperands[AliasOpNo].getImm();
+ ResOperands.push_back(ResOperand::getImmOp(ImmVal));
+ break;
+ }
+ case CodeGenInstAlias::ResultOperand::K_Reg: {
+ Record *Reg = CGA.ResultOperands[AliasOpNo].getRegister();
+ ResOperands.push_back(ResOperand::getRegOp(Reg));
+ break;
+ }
+ }
+ }
+ }
+}
+
+static void EmitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName,
+ std::vector<MatchableInfo*> &Infos,
raw_ostream &OS) {
// Write the convert function to a separate stream, so we can drop it after
// the enum.
@@ -1084,8 +1461,8 @@ static void EmitConvertToMCInst(CodeGenTarget &Target,
std::set<std::string> GeneratedFns;
// Start the unified conversion function.
-
- CvtOS << "static void ConvertToMCInst(ConversionKind Kind, MCInst &Inst, "
+ CvtOS << "bool " << Target.getName() << ClassName << "::\n";
+ CvtOS << "ConvertToMCInst(unsigned Kind, MCInst &Inst, "
<< "unsigned Opcode,\n"
<< " const SmallVectorImpl<MCParsedAsmOperand*"
<< "> &Operands) {\n";
@@ -1095,92 +1472,94 @@ static void EmitConvertToMCInst(CodeGenTarget &Target,
// Start the enum, which we will generate inline.
- OS << "// Unified function for converting operants to MCInst instances.\n\n";
+ OS << "// Unified function for converting operands to MCInst instances.\n\n";
OS << "enum ConversionKind {\n";
-
+
// TargetOperandClass - This is the target's operand class, like X86Operand.
std::string TargetOperandClass = Target.getName() + "Operand";
-
- for (std::vector<InstructionInfo*>::const_iterator it = Infos.begin(),
+
+ for (std::vector<MatchableInfo*>::const_iterator it = Infos.begin(),
ie = Infos.end(); it != ie; ++it) {
- InstructionInfo &II = **it;
-
- // Order the (class) operands by the order to convert them into an MCInst.
- SmallVector<std::pair<unsigned, unsigned>, 4> MIOperandList;
- for (unsigned i = 0, e = II.Operands.size(); i != e; ++i) {
- InstructionInfo::Operand &Op = II.Operands[i];
- if (Op.OperandInfo)
- MIOperandList.push_back(std::make_pair(Op.OperandInfo->MIOperandNo, i));
- }
+ MatchableInfo &II = **it;
- // Find any tied operands.
- SmallVector<std::pair<unsigned, unsigned>, 4> TiedOperands;
- for (unsigned i = 0, e = II.Instr->OperandList.size(); i != e; ++i) {
- const CodeGenInstruction::OperandInfo &OpInfo = II.Instr->OperandList[i];
- for (unsigned j = 0, e = OpInfo.Constraints.size(); j != e; ++j) {
- const CodeGenInstruction::ConstraintInfo &CI = OpInfo.Constraints[j];
- if (CI.isTied())
- TiedOperands.push_back(std::make_pair(OpInfo.MIOperandNo + j,
- CI.getTiedOperand()));
- }
- }
+ // Check if we have a custom match function.
+ StringRef AsmMatchConverter = II.getResultInst()->TheDef->getValueAsString(
+ "AsmMatchConverter");
+ if (!AsmMatchConverter.empty()) {
+ std::string Signature = "ConvertCustom_" + AsmMatchConverter.str();
+ II.ConversionFnKind = Signature;
- std::sort(MIOperandList.begin(), MIOperandList.end());
+ // Check if we have already generated this signature.
+ if (!GeneratedFns.insert(Signature).second)
+ continue;
- // Compute the total number of operands.
- unsigned NumMIOperands = 0;
- for (unsigned i = 0, e = II.Instr->OperandList.size(); i != e; ++i) {
- const CodeGenInstruction::OperandInfo &OI = II.Instr->OperandList[i];
- NumMIOperands = std::max(NumMIOperands,
- OI.MIOperandNo + OI.MINumOperands);
+ // If not, emit it now. Add to the enum list.
+ OS << " " << Signature << ",\n";
+
+ CvtOS << " case " << Signature << ":\n";
+ CvtOS << " return " << AsmMatchConverter
+ << "(Inst, Opcode, Operands);\n";
+ continue;
}
// Build the conversion function signature.
std::string Signature = "Convert";
- unsigned CurIndex = 0;
- for (unsigned i = 0, e = MIOperandList.size(); i != e; ++i) {
- InstructionInfo::Operand &Op = II.Operands[MIOperandList[i].second];
- assert(CurIndex <= Op.OperandInfo->MIOperandNo &&
- "Duplicate match for instruction operand!");
-
- // Skip operands which weren't matched by anything, this occurs when the
- // .td file encodes "implicit" operands as explicit ones.
- //
- // FIXME: This should be removed from the MCInst structure.
- for (; CurIndex != Op.OperandInfo->MIOperandNo; ++CurIndex) {
- std::pair<unsigned, unsigned> *Tie = GetTiedOperandAtIndex(TiedOperands,
- CurIndex);
- if (!Tie)
- Signature += "__Imp";
+ std::string CaseBody;
+ raw_string_ostream CaseOS(CaseBody);
+
+ // Compute the convert enum and the case body.
+ for (unsigned i = 0, e = II.ResOperands.size(); i != e; ++i) {
+ const MatchableInfo::ResOperand &OpInfo = II.ResOperands[i];
+
+ // Generate code to populate each result operand.
+ switch (OpInfo.Kind) {
+ case MatchableInfo::ResOperand::RenderAsmOperand: {
+ // This comes from something we parsed.
+ MatchableInfo::AsmOperand &Op = II.AsmOperands[OpInfo.AsmOperandNum];
+
+ // Registers are always converted the same, don't duplicate the
+ // conversion function based on them.
+ Signature += "__";
+ if (Op.Class->isRegisterClass())
+ Signature += "Reg";
else
- Signature += "__Tie" + utostr(Tie->second);
- }
-
- Signature += "__";
-
- // Registers are always converted the same, don't duplicate the conversion
- // function based on them.
- //
- // FIXME: We could generalize this based on the render method, if it
- // mattered.
- if (Op.Class->isRegisterClass())
- Signature += "Reg";
- else
- Signature += Op.Class->ClassName;
- Signature += utostr(Op.OperandInfo->MINumOperands);
- Signature += "_" + utostr(MIOperandList[i].second);
+ Signature += Op.Class->ClassName;
+ Signature += utostr(OpInfo.MINumOperands);
+ Signature += "_" + itostr(OpInfo.AsmOperandNum);
- CurIndex += Op.OperandInfo->MINumOperands;
- }
+ CaseOS << " ((" << TargetOperandClass << "*)Operands["
+ << (OpInfo.AsmOperandNum+1) << "])->" << Op.Class->RenderMethod
+ << "(Inst, " << OpInfo.MINumOperands << ");\n";
+ break;
+ }
- // Add any trailing implicit operands.
- for (; CurIndex != NumMIOperands; ++CurIndex) {
- std::pair<unsigned, unsigned> *Tie = GetTiedOperandAtIndex(TiedOperands,
- CurIndex);
- if (!Tie)
- Signature += "__Imp";
- else
- Signature += "__Tie" + utostr(Tie->second);
+ case MatchableInfo::ResOperand::TiedOperand: {
+ // If this operand is tied to a previous one, just copy the MCInst
+ // operand from the earlier one.We can only tie single MCOperand values.
+ //assert(OpInfo.MINumOperands == 1 && "Not a singular MCOperand");
+ unsigned TiedOp = OpInfo.TiedOperandNum;
+ assert(i > TiedOp && "Tied operand preceeds its target!");
+ CaseOS << " Inst.addOperand(Inst.getOperand(" << TiedOp << "));\n";
+ Signature += "__Tie" + utostr(TiedOp);
+ break;
+ }
+ case MatchableInfo::ResOperand::ImmOperand: {
+ int64_t Val = OpInfo.ImmVal;
+ CaseOS << " Inst.addOperand(MCOperand::CreateImm(" << Val << "));\n";
+ Signature += "__imm" + itostr(Val);
+ break;
+ }
+ case MatchableInfo::ResOperand::RegOperand: {
+ if (OpInfo.Register == 0) {
+ CaseOS << " Inst.addOperand(MCOperand::CreateReg(0));\n";
+ Signature += "__reg0";
+ } else {
+ std::string N = getQualifiedName(OpInfo.Register);
+ CaseOS << " Inst.addOperand(MCOperand::CreateReg(" << N << "));\n";
+ Signature += "__reg" + OpInfo.Register->getName();
+ }
+ }
+ }
}
II.ConversionFnKind = Signature;
@@ -1189,72 +1568,25 @@ static void EmitConvertToMCInst(CodeGenTarget &Target,
if (!GeneratedFns.insert(Signature).second)
continue;
- // If not, emit it now.
-
- // Add to the enum list.
+ // If not, emit it now. Add to the enum list.
OS << " " << Signature << ",\n";
- // And to the convert function.
CvtOS << " case " << Signature << ":\n";
- CurIndex = 0;
- for (unsigned i = 0, e = MIOperandList.size(); i != e; ++i) {
- InstructionInfo::Operand &Op = II.Operands[MIOperandList[i].second];
-
- // Add the implicit operands.
- for (; CurIndex != Op.OperandInfo->MIOperandNo; ++CurIndex) {
- // See if this is a tied operand.
- std::pair<unsigned, unsigned> *Tie = GetTiedOperandAtIndex(TiedOperands,
- CurIndex);
-
- if (!Tie) {
- // If not, this is some implicit operand. Just assume it is a register
- // for now.
- CvtOS << " Inst.addOperand(MCOperand::CreateReg(0));\n";
- } else {
- // Copy the tied operand.
- assert(Tie->first>Tie->second && "Tied operand preceeds its target!");
- CvtOS << " Inst.addOperand(Inst.getOperand("
- << Tie->second << "));\n";
- }
- }
-
- CvtOS << " ((" << TargetOperandClass << "*)Operands["
- << MIOperandList[i].second
- << "])->" << Op.Class->RenderMethod
- << "(Inst, " << Op.OperandInfo->MINumOperands << ");\n";
- CurIndex += Op.OperandInfo->MINumOperands;
- }
-
- // And add trailing implicit operands.
- for (; CurIndex != NumMIOperands; ++CurIndex) {
- std::pair<unsigned, unsigned> *Tie = GetTiedOperandAtIndex(TiedOperands,
- CurIndex);
-
- if (!Tie) {
- // If not, this is some implicit operand. Just assume it is a register
- // for now.
- CvtOS << " Inst.addOperand(MCOperand::CreateReg(0));\n";
- } else {
- // Copy the tied operand.
- assert(Tie->first>Tie->second && "Tied operand preceeds its target!");
- CvtOS << " Inst.addOperand(Inst.getOperand("
- << Tie->second << "));\n";
- }
- }
-
- CvtOS << " return;\n";
+ CvtOS << CaseOS.str();
+ CvtOS << " return true;\n";
}
// Finish the convert function.
CvtOS << " }\n";
+ CvtOS << " return false;\n";
CvtOS << "}\n\n";
// Finish the enum, and drop the convert function after it.
OS << " NumConversionVariants\n";
OS << "};\n\n";
-
+
OS << CvtOS.str();
}
@@ -1268,7 +1600,7 @@ static void EmitMatchClassEnumeration(CodeGenTarget &Target,
<< "/// instruction matching.\n";
OS << "enum MatchClassKind {\n";
OS << " InvalidMatchClass = 0,\n";
- for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
+ for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
ie = Infos.end(); it != ie; ++it) {
ClassInfo &CI = **it;
OS << " " << CI.Name << ", // ";
@@ -1289,64 +1621,50 @@ static void EmitMatchClassEnumeration(CodeGenTarget &Target,
OS << "}\n\n";
}
-/// EmitClassifyOperand - Emit the function to classify an operand.
-static void EmitClassifyOperand(CodeGenTarget &Target,
- AsmMatcherInfo &Info,
- raw_ostream &OS) {
- OS << "static MatchClassKind ClassifyOperand(MCParsedAsmOperand *GOp) {\n"
- << " " << Target.getName() << "Operand &Operand = *("
- << Target.getName() << "Operand*)GOp;\n";
+/// EmitValidateOperandClass - Emit the function to validate an operand class.
+static void EmitValidateOperandClass(AsmMatcherInfo &Info,
+ raw_ostream &OS) {
+ OS << "static bool ValidateOperandClass(MCParsedAsmOperand *GOp, "
+ << "MatchClassKind Kind) {\n";
+ OS << " " << Info.Target.getName() << "Operand &Operand = *("
+ << Info.Target.getName() << "Operand*)GOp;\n";
- // Classify tokens.
+ // Check for Token operands first.
OS << " if (Operand.isToken())\n";
- OS << " return MatchTokenString(Operand.getToken());\n\n";
+ OS << " return MatchTokenString(Operand.getToken()) == Kind;\n\n";
- // Classify registers.
- //
- // FIXME: Don't hardcode isReg, getReg.
+ // Check for register operands, including sub-classes.
OS << " if (Operand.isReg()) {\n";
+ OS << " MatchClassKind OpKind;\n";
OS << " switch (Operand.getReg()) {\n";
- OS << " default: return InvalidMatchClass;\n";
- for (std::map<Record*, ClassInfo*>::iterator
+ OS << " default: OpKind = InvalidMatchClass; break;\n";
+ for (std::map<Record*, ClassInfo*>::iterator
it = Info.RegisterClasses.begin(), ie = Info.RegisterClasses.end();
it != ie; ++it)
- OS << " case " << Target.getName() << "::"
- << it->first->getName() << ": return " << it->second->Name << ";\n";
+ OS << " case " << Info.Target.getName() << "::"
+ << it->first->getName() << ": OpKind = " << it->second->Name
+ << "; break;\n";
OS << " }\n";
+ OS << " return IsSubclass(OpKind, Kind);\n";
OS << " }\n\n";
- // Classify user defined operands.
- for (std::vector<ClassInfo*>::iterator it = Info.Classes.begin(),
+ // Check the user classes. We don't care what order since we're only
+ // actually matching against one of them.
+ for (std::vector<ClassInfo*>::iterator it = Info.Classes.begin(),
ie = Info.Classes.end(); it != ie; ++it) {
ClassInfo &CI = **it;
if (!CI.isUserClass())
continue;
- OS << " // '" << CI.ClassName << "' class";
- if (!CI.SuperClasses.empty()) {
- OS << ", subclass of ";
- for (unsigned i = 0, e = CI.SuperClasses.size(); i != e; ++i) {
- if (i) OS << ", ";
- OS << "'" << CI.SuperClasses[i]->ClassName << "'";
- assert(CI < *CI.SuperClasses[i] && "Invalid class relation!");
- }
- }
- OS << "\n";
-
- OS << " if (Operand." << CI.PredicateMethod << "()) {\n";
-
- // Validate subclass relationships.
- if (!CI.SuperClasses.empty()) {
- for (unsigned i = 0, e = CI.SuperClasses.size(); i != e; ++i)
- OS << " assert(Operand." << CI.SuperClasses[i]->PredicateMethod
- << "() && \"Invalid class relationship!\");\n";
- }
-
- OS << " return " << CI.Name << ";\n";
+ OS << " // '" << CI.ClassName << "' class\n";
+ OS << " if (Kind == " << CI.Name
+ << " && Operand." << CI.PredicateMethod << "()) {\n";
+ OS << " return true;\n";
OS << " }\n\n";
}
- OS << " return InvalidMatchClass;\n";
+
+ OS << " return false;\n";
OS << "}\n\n";
}
@@ -1362,13 +1680,13 @@ static void EmitIsSubclass(CodeGenTarget &Target,
OS << " switch (A) {\n";
OS << " default:\n";
OS << " return false;\n";
- for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
+ for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
ie = Infos.end(); it != ie; ++it) {
ClassInfo &A = **it;
if (A.Kind != ClassInfo::Token) {
std::vector<StringRef> SuperClasses;
- for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
+ for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
ie = Infos.end(); it != ie; ++it) {
ClassInfo &B = **it;
@@ -1397,153 +1715,25 @@ static void EmitIsSubclass(CodeGenTarget &Target,
OS << "}\n\n";
}
-typedef std::pair<std::string, std::string> StringPair;
-
-/// FindFirstNonCommonLetter - Find the first character in the keys of the
-/// string pairs that is not shared across the whole set of strings. All
-/// strings are assumed to have the same length.
-static unsigned
-FindFirstNonCommonLetter(const std::vector<const StringPair*> &Matches) {
- assert(!Matches.empty());
- for (unsigned i = 0, e = Matches[0]->first.size(); i != e; ++i) {
- // Check to see if letter i is the same across the set.
- char Letter = Matches[0]->first[i];
-
- for (unsigned str = 0, e = Matches.size(); str != e; ++str)
- if (Matches[str]->first[i] != Letter)
- return i;
- }
-
- return Matches[0]->first.size();
-}
-
-/// EmitStringMatcherForChar - Given a set of strings that are known to be the
-/// same length and whose characters leading up to CharNo are the same, emit
-/// code to verify that CharNo and later are the same.
-///
-/// \return - True if control can leave the emitted code fragment.
-static bool EmitStringMatcherForChar(const std::string &StrVariableName,
- const std::vector<const StringPair*> &Matches,
- unsigned CharNo, unsigned IndentCount,
- raw_ostream &OS) {
- assert(!Matches.empty() && "Must have at least one string to match!");
- std::string Indent(IndentCount*2+4, ' ');
-
- // If we have verified that the entire string matches, we're done: output the
- // matching code.
- if (CharNo == Matches[0]->first.size()) {
- assert(Matches.size() == 1 && "Had duplicate keys to match on");
-
- // FIXME: If Matches[0].first has embeded \n, this will be bad.
- OS << Indent << Matches[0]->second << "\t // \"" << Matches[0]->first
- << "\"\n";
- return false;
- }
-
- // Bucket the matches by the character we are comparing.
- std::map<char, std::vector<const StringPair*> > MatchesByLetter;
-
- for (unsigned i = 0, e = Matches.size(); i != e; ++i)
- MatchesByLetter[Matches[i]->first[CharNo]].push_back(Matches[i]);
-
-
- // If we have exactly one bucket to match, see how many characters are common
- // across the whole set and match all of them at once.
- if (MatchesByLetter.size() == 1) {
- unsigned FirstNonCommonLetter = FindFirstNonCommonLetter(Matches);
- unsigned NumChars = FirstNonCommonLetter-CharNo;
-
- // Emit code to break out if the prefix doesn't match.
- if (NumChars == 1) {
- // Do the comparison with if (Str[1] != 'f')
- // FIXME: Need to escape general characters.
- OS << Indent << "if (" << StrVariableName << "[" << CharNo << "] != '"
- << Matches[0]->first[CharNo] << "')\n";
- OS << Indent << " break;\n";
- } else {
- // Do the comparison with if (Str.substr(1,3) != "foo").
- // FIXME: Need to escape general strings.
- OS << Indent << "if (" << StrVariableName << ".substr(" << CharNo << ","
- << NumChars << ") != \"";
- OS << Matches[0]->first.substr(CharNo, NumChars) << "\")\n";
- OS << Indent << " break;\n";
- }
-
- return EmitStringMatcherForChar(StrVariableName, Matches,
- FirstNonCommonLetter, IndentCount, OS);
- }
-
- // Otherwise, we have multiple possible things, emit a switch on the
- // character.
- OS << Indent << "switch (" << StrVariableName << "[" << CharNo << "]) {\n";
- OS << Indent << "default: break;\n";
-
- for (std::map<char, std::vector<const StringPair*> >::iterator LI =
- MatchesByLetter.begin(), E = MatchesByLetter.end(); LI != E; ++LI) {
- // TODO: escape hard stuff (like \n) if we ever care about it.
- OS << Indent << "case '" << LI->first << "':\t // "
- << LI->second.size() << " strings to match.\n";
- if (EmitStringMatcherForChar(StrVariableName, LI->second, CharNo+1,
- IndentCount+1, OS))
- OS << Indent << " break;\n";
- }
-
- OS << Indent << "}\n";
- return true;
-}
-
-
-/// EmitStringMatcher - Given a list of strings and code to execute when they
-/// match, output a simple switch tree to classify the input string.
-///
-/// If a match is found, the code in Vals[i].second is executed; control must
-/// not exit this code fragment. If nothing matches, execution falls through.
-///
-/// \param StrVariableName - The name of the variable to test.
-static void EmitStringMatcher(const std::string &StrVariableName,
- const std::vector<StringPair> &Matches,
- raw_ostream &OS) {
- // First level categorization: group strings by length.
- std::map<unsigned, std::vector<const StringPair*> > MatchesByLength;
-
- for (unsigned i = 0, e = Matches.size(); i != e; ++i)
- MatchesByLength[Matches[i].first.size()].push_back(&Matches[i]);
-
- // Output a switch statement on length and categorize the elements within each
- // bin.
- OS << " switch (" << StrVariableName << ".size()) {\n";
- OS << " default: break;\n";
-
- for (std::map<unsigned, std::vector<const StringPair*> >::iterator LI =
- MatchesByLength.begin(), E = MatchesByLength.end(); LI != E; ++LI) {
- OS << " case " << LI->first << ":\t // " << LI->second.size()
- << " strings to match.\n";
- if (EmitStringMatcherForChar(StrVariableName, LI->second, 0, 0, OS))
- OS << " break;\n";
- }
-
- OS << " }\n";
-}
-
-
/// EmitMatchTokenString - Emit the function to match a token string to the
/// appropriate match class value.
static void EmitMatchTokenString(CodeGenTarget &Target,
std::vector<ClassInfo*> &Infos,
raw_ostream &OS) {
// Construct the match list.
- std::vector<StringPair> Matches;
- for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
+ std::vector<StringMatcher::StringPair> Matches;
+ for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
ie = Infos.end(); it != ie; ++it) {
ClassInfo &CI = **it;
if (CI.Kind == ClassInfo::Token)
- Matches.push_back(StringPair(CI.ValueName, "return " + CI.Name + ";"));
+ Matches.push_back(StringMatcher::StringPair(CI.ValueName,
+ "return " + CI.Name + ";"));
}
OS << "static MatchClassKind MatchTokenString(StringRef Name) {\n";
- EmitStringMatcher("Name", Matches, OS);
+ StringMatcher("Name", Matches, OS).Emit();
OS << " return InvalidMatchClass;\n";
OS << "}\n\n";
@@ -1554,28 +1744,28 @@ static void EmitMatchTokenString(CodeGenTarget &Target,
static void EmitMatchRegisterName(CodeGenTarget &Target, Record *AsmParser,
raw_ostream &OS) {
// Construct the match list.
- std::vector<StringPair> Matches;
+ std::vector<StringMatcher::StringPair> Matches;
for (unsigned i = 0, e = Target.getRegisters().size(); i != e; ++i) {
const CodeGenRegister &Reg = Target.getRegisters()[i];
if (Reg.TheDef->getValueAsString("AsmName").empty())
continue;
- Matches.push_back(StringPair(Reg.TheDef->getValueAsString("AsmName"),
- "return " + utostr(i + 1) + ";"));
+ Matches.push_back(StringMatcher::StringPair(
+ Reg.TheDef->getValueAsString("AsmName"),
+ "return " + utostr(i + 1) + ";"));
}
-
+
OS << "static unsigned MatchRegisterName(StringRef Name) {\n";
- EmitStringMatcher("Name", Matches, OS);
-
+ StringMatcher("Name", Matches, OS).Emit();
+
OS << " return 0;\n";
OS << "}\n\n";
}
/// EmitSubtargetFeatureFlagEnumeration - Emit the subtarget feature flag
/// definitions.
-static void EmitSubtargetFeatureFlagEnumeration(CodeGenTarget &Target,
- AsmMatcherInfo &Info,
+static void EmitSubtargetFeatureFlagEnumeration(AsmMatcherInfo &Info,
raw_ostream &OS) {
OS << "// Flags for subtarget features that participate in "
<< "instruction matching.\n";
@@ -1584,7 +1774,7 @@ static void EmitSubtargetFeatureFlagEnumeration(CodeGenTarget &Target,
it = Info.SubtargetFeatures.begin(),
ie = Info.SubtargetFeatures.end(); it != ie; ++it) {
SubtargetFeatureInfo &SFI = *it->second;
- OS << " " << SFI.EnumName << " = (1 << " << SFI.Index << "),\n";
+ OS << " " << SFI.getEnumName() << " = (1 << " << SFI.Index << "),\n";
}
OS << " Feature_None = 0\n";
OS << "};\n\n";
@@ -1592,14 +1782,13 @@ static void EmitSubtargetFeatureFlagEnumeration(CodeGenTarget &Target,
/// EmitComputeAvailableFeatures - Emit the function to compute the list of
/// available features given a subtarget.
-static void EmitComputeAvailableFeatures(CodeGenTarget &Target,
- AsmMatcherInfo &Info,
+static void EmitComputeAvailableFeatures(AsmMatcherInfo &Info,
raw_ostream &OS) {
std::string ClassName =
Info.AsmParser->getValueAsString("AsmParserClassName");
- OS << "unsigned " << Target.getName() << ClassName << "::\n"
- << "ComputeAvailableFeatures(const " << Target.getName()
+ OS << "unsigned " << Info.Target.getName() << ClassName << "::\n"
+ << "ComputeAvailableFeatures(const " << Info.Target.getName()
<< "Subtarget *Subtarget) const {\n";
OS << " unsigned Features = 0;\n";
for (std::map<Record*, SubtargetFeatureInfo*>::const_iterator
@@ -1608,73 +1797,375 @@ static void EmitComputeAvailableFeatures(CodeGenTarget &Target,
SubtargetFeatureInfo &SFI = *it->second;
OS << " if (" << SFI.TheDef->getValueAsString("CondString")
<< ")\n";
- OS << " Features |= " << SFI.EnumName << ";\n";
+ OS << " Features |= " << SFI.getEnumName() << ";\n";
}
OS << " return Features;\n";
OS << "}\n\n";
}
+static std::string GetAliasRequiredFeatures(Record *R,
+ const AsmMatcherInfo &Info) {
+ std::vector<Record*> ReqFeatures = R->getValueAsListOfDefs("Predicates");
+ std::string Result;
+ unsigned NumFeatures = 0;
+ for (unsigned i = 0, e = ReqFeatures.size(); i != e; ++i) {
+ SubtargetFeatureInfo *F = Info.getSubtargetFeature(ReqFeatures[i]);
+
+ if (F == 0)
+ throw TGError(R->getLoc(), "Predicate '" + ReqFeatures[i]->getName() +
+ "' is not marked as an AssemblerPredicate!");
+
+ if (NumFeatures)
+ Result += '|';
+
+ Result += F->getEnumName();
+ ++NumFeatures;
+ }
+
+ if (NumFeatures > 1)
+ Result = '(' + Result + ')';
+ return Result;
+}
+
+/// EmitMnemonicAliases - If the target has any MnemonicAlias<> definitions,
+/// emit a function for them and return true, otherwise return false.
+static bool EmitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info) {
+ // Ignore aliases when match-prefix is set.
+ if (!MatchPrefix.empty())
+ return false;
+
+ std::vector<Record*> Aliases =
+ Info.getRecords().getAllDerivedDefinitions("MnemonicAlias");
+ if (Aliases.empty()) return false;
+
+ OS << "static void ApplyMnemonicAliases(StringRef &Mnemonic, "
+ "unsigned Features) {\n";
+
+ // Keep track of all the aliases from a mnemonic. Use an std::map so that the
+ // iteration order of the map is stable.
+ std::map<std::string, std::vector<Record*> > AliasesFromMnemonic;
+
+ for (unsigned i = 0, e = Aliases.size(); i != e; ++i) {
+ Record *R = Aliases[i];
+ AliasesFromMnemonic[R->getValueAsString("FromMnemonic")].push_back(R);
+ }
+
+ // Process each alias a "from" mnemonic at a time, building the code executed
+ // by the string remapper.
+ std::vector<StringMatcher::StringPair> Cases;
+ for (std::map<std::string, std::vector<Record*> >::iterator
+ I = AliasesFromMnemonic.begin(), E = AliasesFromMnemonic.end();
+ I != E; ++I) {
+ const std::vector<Record*> &ToVec = I->second;
+
+ // Loop through each alias and emit code that handles each case. If there
+ // are two instructions without predicates, emit an error. If there is one,
+ // emit it last.
+ std::string MatchCode;
+ int AliasWithNoPredicate = -1;
+
+ for (unsigned i = 0, e = ToVec.size(); i != e; ++i) {
+ Record *R = ToVec[i];
+ std::string FeatureMask = GetAliasRequiredFeatures(R, Info);
+
+ // If this unconditionally matches, remember it for later and diagnose
+ // duplicates.
+ if (FeatureMask.empty()) {
+ if (AliasWithNoPredicate != -1) {
+ // We can't have two aliases from the same mnemonic with no predicate.
+ PrintError(ToVec[AliasWithNoPredicate]->getLoc(),
+ "two MnemonicAliases with the same 'from' mnemonic!");
+ throw TGError(R->getLoc(), "this is the other MnemonicAlias.");
+ }
+
+ AliasWithNoPredicate = i;
+ continue;
+ }
+ if (R->getValueAsString("ToMnemonic") == I->first)
+ throw TGError(R->getLoc(), "MnemonicAlias to the same string");
+
+ if (!MatchCode.empty())
+ MatchCode += "else ";
+ MatchCode += "if ((Features & " + FeatureMask + ") == "+FeatureMask+")\n";
+ MatchCode += " Mnemonic = \"" +R->getValueAsString("ToMnemonic")+"\";\n";
+ }
+
+ if (AliasWithNoPredicate != -1) {
+ Record *R = ToVec[AliasWithNoPredicate];
+ if (!MatchCode.empty())
+ MatchCode += "else\n ";
+ MatchCode += "Mnemonic = \"" + R->getValueAsString("ToMnemonic")+"\";\n";
+ }
+
+ MatchCode += "return;";
+
+ Cases.push_back(std::make_pair(I->first, MatchCode));
+ }
+
+ StringMatcher("Mnemonic", Cases, OS).Emit();
+ OS << "}\n\n";
+
+ return true;
+}
+
+static void EmitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
+ const AsmMatcherInfo &Info, StringRef ClassName) {
+ // Emit the static custom operand parsing table;
+ OS << "namespace {\n";
+ OS << " struct OperandMatchEntry {\n";
+ OS << " const char *Mnemonic;\n";
+ OS << " unsigned OperandMask;\n";
+ OS << " MatchClassKind Class;\n";
+ OS << " unsigned RequiredFeatures;\n";
+ OS << " };\n\n";
+
+ OS << " // Predicate for searching for an opcode.\n";
+ OS << " struct LessOpcodeOperand {\n";
+ OS << " bool operator()(const OperandMatchEntry &LHS, StringRef RHS) {\n";
+ OS << " return StringRef(LHS.Mnemonic) < RHS;\n";
+ OS << " }\n";
+ OS << " bool operator()(StringRef LHS, const OperandMatchEntry &RHS) {\n";
+ OS << " return LHS < StringRef(RHS.Mnemonic);\n";
+ OS << " }\n";
+ OS << " bool operator()(const OperandMatchEntry &LHS,";
+ OS << " const OperandMatchEntry &RHS) {\n";
+ OS << " return StringRef(LHS.Mnemonic) < StringRef(RHS.Mnemonic);\n";
+ OS << " }\n";
+ OS << " };\n";
+
+ OS << "} // end anonymous namespace.\n\n";
+
+ OS << "static const OperandMatchEntry OperandMatchTable["
+ << Info.OperandMatchInfo.size() << "] = {\n";
+
+ OS << " /* Mnemonic, Operand List Mask, Operand Class, Features */\n";
+ for (std::vector<OperandMatchEntry>::const_iterator it =
+ Info.OperandMatchInfo.begin(), ie = Info.OperandMatchInfo.end();
+ it != ie; ++it) {
+ const OperandMatchEntry &OMI = *it;
+ const MatchableInfo &II = *OMI.MI;
+
+ OS << " { \"" << II.Mnemonic << "\""
+ << ", " << OMI.OperandMask;
+
+ OS << " /* ";
+ bool printComma = false;
+ for (int i = 0, e = 31; i !=e; ++i)
+ if (OMI.OperandMask & (1 << i)) {
+ if (printComma)
+ OS << ", ";
+ OS << i;
+ printComma = true;
+ }
+ OS << " */";
+
+ OS << ", " << OMI.CI->Name
+ << ", ";
+
+ // Write the required features mask.
+ if (!II.RequiredFeatures.empty()) {
+ for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) {
+ if (i) OS << "|";
+ OS << II.RequiredFeatures[i]->getEnumName();
+ }
+ } else
+ OS << "0";
+ OS << " },\n";
+ }
+ OS << "};\n\n";
+
+ // Emit the operand class switch to call the correct custom parser for
+ // the found operand class.
+ OS << Target.getName() << ClassName << "::OperandMatchResultTy "
+ << Target.getName() << ClassName << "::\n"
+ << "TryCustomParseOperand(SmallVectorImpl<MCParsedAsmOperand*>"
+ << " &Operands,\n unsigned MCK) {\n\n"
+ << " switch(MCK) {\n";
+
+ for (std::vector<ClassInfo*>::const_iterator it = Info.Classes.begin(),
+ ie = Info.Classes.end(); it != ie; ++it) {
+ ClassInfo *CI = *it;
+ if (CI->ParserMethod.empty())
+ continue;
+ OS << " case " << CI->Name << ":\n"
+ << " return " << CI->ParserMethod << "(Operands);\n";
+ }
+
+ OS << " default:\n";
+ OS << " return MatchOperand_NoMatch;\n";
+ OS << " }\n";
+ OS << " return MatchOperand_NoMatch;\n";
+ OS << "}\n\n";
+
+ // Emit the static custom operand parser. This code is very similar with
+ // the other matcher. Also use MatchResultTy here just in case we go for
+ // a better error handling.
+ OS << Target.getName() << ClassName << "::OperandMatchResultTy "
+ << Target.getName() << ClassName << "::\n"
+ << "MatchOperandParserImpl(SmallVectorImpl<MCParsedAsmOperand*>"
+ << " &Operands,\n StringRef Mnemonic) {\n";
+
+ // Emit code to get the available features.
+ OS << " // Get the current feature set.\n";
+ OS << " unsigned AvailableFeatures = getAvailableFeatures();\n\n";
+
+ OS << " // Get the next operand index.\n";
+ OS << " unsigned NextOpNum = Operands.size()-1;\n";
+
+ // Emit code to search the table.
+ OS << " // Search the table.\n";
+ OS << " std::pair<const OperandMatchEntry*, const OperandMatchEntry*>";
+ OS << " MnemonicRange =\n";
+ OS << " std::equal_range(OperandMatchTable, OperandMatchTable+"
+ << Info.OperandMatchInfo.size() << ", Mnemonic,\n"
+ << " LessOpcodeOperand());\n\n";
+
+ OS << " if (MnemonicRange.first == MnemonicRange.second)\n";
+ OS << " return MatchOperand_NoMatch;\n\n";
+
+ OS << " for (const OperandMatchEntry *it = MnemonicRange.first,\n"
+ << " *ie = MnemonicRange.second; it != ie; ++it) {\n";
+
+ OS << " // equal_range guarantees that instruction mnemonic matches.\n";
+ OS << " assert(Mnemonic == it->Mnemonic);\n\n";
+
+ // Emit check that the required features are available.
+ OS << " // check if the available features match\n";
+ OS << " if ((AvailableFeatures & it->RequiredFeatures) "
+ << "!= it->RequiredFeatures) {\n";
+ OS << " continue;\n";
+ OS << " }\n\n";
+
+ // Emit check to ensure the operand number matches.
+ OS << " // check if the operand in question has a custom parser.\n";
+ OS << " if (!(it->OperandMask & (1 << NextOpNum)))\n";
+ OS << " continue;\n\n";
+
+ // Emit call to the custom parser method
+ OS << " // call custom parse method to handle the operand\n";
+ OS << " OperandMatchResultTy Result = ";
+ OS << "TryCustomParseOperand(Operands, it->Class);\n";
+ OS << " if (Result != MatchOperand_NoMatch)\n";
+ OS << " return Result;\n";
+ OS << " }\n\n";
+
+ OS << " // Okay, we had no match.\n";
+ OS << " return MatchOperand_NoMatch;\n";
+ OS << "}\n\n";
+}
+
void AsmMatcherEmitter::run(raw_ostream &OS) {
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
Record *AsmParser = Target.getAsmParser();
std::string ClassName = AsmParser->getValueAsString("AsmParserClassName");
// Compute the information on the instructions to match.
- AsmMatcherInfo Info(AsmParser);
- Info.BuildInfo(Target);
+ AsmMatcherInfo Info(AsmParser, Target, Records);
+ Info.BuildInfo();
// Sort the instruction table using the partial order on classes. We use
// stable_sort to ensure that ambiguous instructions are still
// deterministically ordered.
- std::stable_sort(Info.Instructions.begin(), Info.Instructions.end(),
- less_ptr<InstructionInfo>());
-
+ std::stable_sort(Info.Matchables.begin(), Info.Matchables.end(),
+ less_ptr<MatchableInfo>());
+
DEBUG_WITH_TYPE("instruction_info", {
- for (std::vector<InstructionInfo*>::iterator
- it = Info.Instructions.begin(), ie = Info.Instructions.end();
+ for (std::vector<MatchableInfo*>::iterator
+ it = Info.Matchables.begin(), ie = Info.Matchables.end();
it != ie; ++it)
(*it)->dump();
});
- // Check for ambiguous instructions.
- unsigned NumAmbiguous = 0;
- for (unsigned i = 0, e = Info.Instructions.size(); i != e; ++i) {
- for (unsigned j = i + 1; j != e; ++j) {
- InstructionInfo &A = *Info.Instructions[i];
- InstructionInfo &B = *Info.Instructions[j];
-
- if (A.CouldMatchAmiguouslyWith(B)) {
- DEBUG_WITH_TYPE("ambiguous_instrs", {
- errs() << "warning: ambiguous instruction match:\n";
- A.dump();
- errs() << "\nis incomparable with:\n";
- B.dump();
- errs() << "\n\n";
- });
- ++NumAmbiguous;
+ // Check for ambiguous matchables.
+ DEBUG_WITH_TYPE("ambiguous_instrs", {
+ unsigned NumAmbiguous = 0;
+ for (unsigned i = 0, e = Info.Matchables.size(); i != e; ++i) {
+ for (unsigned j = i + 1; j != e; ++j) {
+ MatchableInfo &A = *Info.Matchables[i];
+ MatchableInfo &B = *Info.Matchables[j];
+
+ if (A.CouldMatchAmbiguouslyWith(B)) {
+ errs() << "warning: ambiguous matchables:\n";
+ A.dump();
+ errs() << "\nis incomparable with:\n";
+ B.dump();
+ errs() << "\n\n";
+ ++NumAmbiguous;
+ }
}
}
- }
- if (NumAmbiguous)
- DEBUG_WITH_TYPE("ambiguous_instrs", {
- errs() << "warning: " << NumAmbiguous
- << " ambiguous instructions!\n";
- });
+ if (NumAmbiguous)
+ errs() << "warning: " << NumAmbiguous
+ << " ambiguous matchables!\n";
+ });
+
+ // Compute the information on the custom operand parsing.
+ Info.BuildOperandMatchInfo();
// Write the output.
EmitSourceFileHeader("Assembly Matcher Source Fragment", OS);
+ // Information for the class declaration.
+ OS << "\n#ifdef GET_ASSEMBLER_HEADER\n";
+ OS << "#undef GET_ASSEMBLER_HEADER\n";
+ OS << " // This should be included into the middle of the declaration of\n";
+ OS << " // your subclasses implementation of TargetAsmParser.\n";
+ OS << " unsigned ComputeAvailableFeatures(const " <<
+ Target.getName() << "Subtarget *Subtarget) const;\n";
+ OS << " enum MatchResultTy {\n";
+ OS << " Match_ConversionFail,\n";
+ OS << " Match_InvalidOperand,\n";
+ OS << " Match_MissingFeature,\n";
+ OS << " Match_MnemonicFail,\n";
+ OS << " Match_Success\n";
+ OS << " };\n";
+ OS << " bool ConvertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const SmallVectorImpl<MCParsedAsmOperand*> "
+ << "&Operands);\n";
+ OS << " bool MnemonicIsValid(StringRef Mnemonic);\n";
+ OS << " MatchResultTy MatchInstructionImpl(\n";
+ OS << " const SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n";
+ OS << " MCInst &Inst, unsigned &ErrorInfo);\n";
+
+ if (Info.OperandMatchInfo.size()) {
+ OS << "\n enum OperandMatchResultTy {\n";
+ OS << " MatchOperand_Success, // operand matched successfully\n";
+ OS << " MatchOperand_NoMatch, // operand did not match\n";
+ OS << " MatchOperand_ParseFail // operand matched but had errors\n";
+ OS << " };\n";
+ OS << " OperandMatchResultTy MatchOperandParserImpl(\n";
+ OS << " SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n";
+ OS << " StringRef Mnemonic);\n";
+
+ OS << " OperandMatchResultTy TryCustomParseOperand(\n";
+ OS << " SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n";
+ OS << " unsigned MCK);\n\n";
+ }
+
+ OS << "#endif // GET_ASSEMBLER_HEADER_INFO\n\n";
+
+ OS << "\n#ifdef GET_REGISTER_MATCHER\n";
+ OS << "#undef GET_REGISTER_MATCHER\n\n";
+
// Emit the subtarget feature enumeration.
- EmitSubtargetFeatureFlagEnumeration(Target, Info, OS);
+ EmitSubtargetFeatureFlagEnumeration(Info, OS);
// Emit the function to match a register name to number.
EmitMatchRegisterName(Target, AsmParser, OS);
-
- OS << "#ifndef REGISTERS_ONLY\n\n";
+
+ OS << "#endif // GET_REGISTER_MATCHER\n\n";
+
+
+ OS << "\n#ifdef GET_MATCHER_IMPLEMENTATION\n";
+ OS << "#undef GET_MATCHER_IMPLEMENTATION\n\n";
+
+ // Generate the function that remaps for mnemonic aliases.
+ bool HasMnemonicAliases = EmitMnemonicAliases(OS, Info);
// Generate the unified function to convert operands into an MCInst.
- EmitConvertToMCInst(Target, Info.Instructions, OS);
+ EmitConvertToMCInst(Target, ClassName, Info.Matchables, OS);
// Emit the enumeration for classes which participate in matching.
EmitMatchClassEnumeration(Target, Info.Classes, OS);
@@ -1682,27 +2173,21 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
// Emit the routine to match token strings to their match class.
EmitMatchTokenString(Target, Info.Classes, OS);
- // Emit the routine to classify an operand.
- EmitClassifyOperand(Target, Info, OS);
-
// Emit the subclass predicate routine.
EmitIsSubclass(Target, Info.Classes, OS);
+ // Emit the routine to validate an operand against a match class.
+ EmitValidateOperandClass(Info, OS);
+
// Emit the available features compute function.
- EmitComputeAvailableFeatures(Target, Info, OS);
+ EmitComputeAvailableFeatures(Info, OS);
- // Finally, build the match function.
size_t MaxNumOperands = 0;
- for (std::vector<InstructionInfo*>::const_iterator it =
- Info.Instructions.begin(), ie = Info.Instructions.end();
+ for (std::vector<MatchableInfo*>::const_iterator it =
+ Info.Matchables.begin(), ie = Info.Matchables.end();
it != ie; ++it)
- MaxNumOperands = std::max(MaxNumOperands, (*it)->Operands.size());
-
- OS << "bool " << Target.getName() << ClassName << "::\n"
- << "MatchInstructionImpl(const SmallVectorImpl<MCParsedAsmOperand*>"
- << " &Operands,\n";
- OS << " MCInst &Inst) {\n";
+ MaxNumOperands = std::max(MaxNumOperands, (*it)->AsmOperands.size());
// Emit the static match table; unused classes get initalized to 0 which is
// guaranteed to be InvalidMatchClass.
@@ -1714,23 +2199,44 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
// order the match kinds appropriately (putting mnemonics last), then we
// should only end up using a few bits for each class, especially the ones
// following the mnemonic.
- OS << " static const struct MatchEntry {\n";
+ OS << "namespace {\n";
+ OS << " struct MatchEntry {\n";
OS << " unsigned Opcode;\n";
+ OS << " const char *Mnemonic;\n";
OS << " ConversionKind ConvertFn;\n";
OS << " MatchClassKind Classes[" << MaxNumOperands << "];\n";
OS << " unsigned RequiredFeatures;\n";
- OS << " } MatchTable[" << Info.Instructions.size() << "] = {\n";
+ OS << " };\n\n";
+
+ OS << " // Predicate for searching for an opcode.\n";
+ OS << " struct LessOpcode {\n";
+ OS << " bool operator()(const MatchEntry &LHS, StringRef RHS) {\n";
+ OS << " return StringRef(LHS.Mnemonic) < RHS;\n";
+ OS << " }\n";
+ OS << " bool operator()(StringRef LHS, const MatchEntry &RHS) {\n";
+ OS << " return LHS < StringRef(RHS.Mnemonic);\n";
+ OS << " }\n";
+ OS << " bool operator()(const MatchEntry &LHS, const MatchEntry &RHS) {\n";
+ OS << " return StringRef(LHS.Mnemonic) < StringRef(RHS.Mnemonic);\n";
+ OS << " }\n";
+ OS << " };\n";
- for (std::vector<InstructionInfo*>::const_iterator it =
- Info.Instructions.begin(), ie = Info.Instructions.end();
+ OS << "} // end anonymous namespace.\n\n";
+
+ OS << "static const MatchEntry MatchTable["
+ << Info.Matchables.size() << "] = {\n";
+
+ for (std::vector<MatchableInfo*>::const_iterator it =
+ Info.Matchables.begin(), ie = Info.Matchables.end();
it != ie; ++it) {
- InstructionInfo &II = **it;
+ MatchableInfo &II = **it;
- OS << " { " << Target.getName() << "::" << II.InstrName
+ OS << " { " << Target.getName() << "::"
+ << II.getResultInst()->TheDef->getName() << ", \"" << II.Mnemonic << "\""
<< ", " << II.ConversionFnKind << ", { ";
- for (unsigned i = 0, e = II.Operands.size(); i != e; ++i) {
- InstructionInfo::Operand &Op = II.Operands[i];
-
+ for (unsigned i = 0, e = II.AsmOperands.size(); i != e; ++i) {
+ MatchableInfo::AsmOperand &Op = II.AsmOperands[i];
+
if (i) OS << ", ";
OS << Op.Class->Name;
}
@@ -1740,7 +2246,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
if (!II.RequiredFeatures.empty()) {
for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) {
if (i) OS << "|";
- OS << II.RequiredFeatures[i]->EnumName;
+ OS << II.RequiredFeatures[i]->getEnumName();
}
} else
OS << "0";
@@ -1748,52 +2254,101 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << "},\n";
}
- OS << " };\n\n";
+ OS << "};\n\n";
+
+ // A method to determine if a mnemonic is in the list.
+ OS << "bool " << Target.getName() << ClassName << "::\n"
+ << "MnemonicIsValid(StringRef Mnemonic) {\n";
+ OS << " // Search the table.\n";
+ OS << " std::pair<const MatchEntry*, const MatchEntry*> MnemonicRange =\n";
+ OS << " std::equal_range(MatchTable, MatchTable+"
+ << Info.Matchables.size() << ", Mnemonic, LessOpcode());\n";
+ OS << " return MnemonicRange.first != MnemonicRange.second;\n";
+ OS << "}\n\n";
+ // Finally, build the match function.
+ OS << Target.getName() << ClassName << "::MatchResultTy "
+ << Target.getName() << ClassName << "::\n"
+ << "MatchInstructionImpl(const SmallVectorImpl<MCParsedAsmOperand*>"
+ << " &Operands,\n";
+ OS << " MCInst &Inst, unsigned &ErrorInfo) {\n";
// Emit code to get the available features.
OS << " // Get the current feature set.\n";
OS << " unsigned AvailableFeatures = getAvailableFeatures();\n\n";
- // Emit code to compute the class list for this operand vector.
- OS << " // Eliminate obvious mismatches.\n";
- OS << " if (Operands.size() > " << MaxNumOperands << ")\n";
- OS << " return true;\n\n";
+ OS << " // Get the instruction mnemonic, which is the first token.\n";
+ OS << " StringRef Mnemonic = ((" << Target.getName()
+ << "Operand*)Operands[0])->getToken();\n\n";
- OS << " // Compute the class list for this operand vector.\n";
- OS << " MatchClassKind Classes[" << MaxNumOperands << "];\n";
- OS << " for (unsigned i = 0, e = Operands.size(); i != e; ++i) {\n";
- OS << " Classes[i] = ClassifyOperand(Operands[i]);\n\n";
+ if (HasMnemonicAliases) {
+ OS << " // Process all MnemonicAliases to remap the mnemonic.\n";
+ OS << " ApplyMnemonicAliases(Mnemonic, AvailableFeatures);\n\n";
+ }
- OS << " // Check for invalid operands before matching.\n";
- OS << " if (Classes[i] == InvalidMatchClass)\n";
- OS << " return true;\n";
+ // Emit code to compute the class list for this operand vector.
+ OS << " // Eliminate obvious mismatches.\n";
+ OS << " if (Operands.size() > " << (MaxNumOperands+1) << ") {\n";
+ OS << " ErrorInfo = " << (MaxNumOperands+1) << ";\n";
+ OS << " return Match_InvalidOperand;\n";
OS << " }\n\n";
- OS << " // Mark unused classes.\n";
- OS << " for (unsigned i = Operands.size(), e = " << MaxNumOperands << "; "
- << "i != e; ++i)\n";
- OS << " Classes[i] = InvalidMatchClass;\n\n";
+ OS << " // Some state to try to produce better error messages.\n";
+ OS << " bool HadMatchOtherThanFeatures = false;\n\n";
+ OS << " // Set ErrorInfo to the operand that mismatches if it is\n";
+ OS << " // wrong for all instances of the instruction.\n";
+ OS << " ErrorInfo = ~0U;\n";
// Emit code to search the table.
OS << " // Search the table.\n";
- OS << " for (const MatchEntry *it = MatchTable, "
- << "*ie = MatchTable + " << Info.Instructions.size()
- << "; it != ie; ++it) {\n";
+ OS << " std::pair<const MatchEntry*, const MatchEntry*> MnemonicRange =\n";
+ OS << " std::equal_range(MatchTable, MatchTable+"
+ << Info.Matchables.size() << ", Mnemonic, LessOpcode());\n\n";
- // Emit check that the required features are available.
- OS << " if ((AvailableFeatures & it->RequiredFeatures) "
- << "!= it->RequiredFeatures)\n";
- OS << " continue;\n";
+ OS << " // Return a more specific error code if no mnemonics match.\n";
+ OS << " if (MnemonicRange.first == MnemonicRange.second)\n";
+ OS << " return Match_MnemonicFail;\n\n";
+
+ OS << " for (const MatchEntry *it = MnemonicRange.first, "
+ << "*ie = MnemonicRange.second;\n";
+ OS << " it != ie; ++it) {\n";
+
+ OS << " // equal_range guarantees that instruction mnemonic matches.\n";
+ OS << " assert(Mnemonic == it->Mnemonic);\n";
// Emit check that the subclasses match.
- for (unsigned i = 0; i != MaxNumOperands; ++i) {
- OS << " if (!IsSubclass(Classes["
- << i << "], it->Classes[" << i << "]))\n";
- OS << " continue;\n";
- }
+ OS << " bool OperandsValid = true;\n";
+ OS << " for (unsigned i = 0; i != " << MaxNumOperands << "; ++i) {\n";
+ OS << " if (i + 1 >= Operands.size()) {\n";
+ OS << " OperandsValid = (it->Classes[i] == " <<"InvalidMatchClass);\n";
+ OS << " break;";
+ OS << " }\n";
+ OS << " if (ValidateOperandClass(Operands[i+1], it->Classes[i]))\n";
+ OS << " continue;\n";
+ OS << " // If this operand is broken for all of the instances of this\n";
+ OS << " // mnemonic, keep track of it so we can report loc info.\n";
+ OS << " if (it == MnemonicRange.first || ErrorInfo <= i+1)\n";
+ OS << " ErrorInfo = i+1;\n";
+ OS << " // Otherwise, just reject this instance of the mnemonic.\n";
+ OS << " OperandsValid = false;\n";
+ OS << " break;\n";
+ OS << " }\n\n";
+
+ OS << " if (!OperandsValid) continue;\n";
+
+ // Emit check that the required features are available.
+ OS << " if ((AvailableFeatures & it->RequiredFeatures) "
+ << "!= it->RequiredFeatures) {\n";
+ OS << " HadMatchOtherThanFeatures = true;\n";
+ OS << " continue;\n";
+ OS << " }\n";
+ OS << "\n";
+ OS << " // We have selected a definite instruction, convert the parsed\n"
+ << " // operands into the appropriate MCInst.\n";
+ OS << " if (!ConvertToMCInst(it->ConvertFn, Inst,\n"
+ << " it->Opcode, Operands))\n";
+ OS << " return Match_ConversionFail;\n";
OS << "\n";
- OS << " ConvertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
// Call the post-processing function, if used.
std::string InsnCleanupFn =
@@ -1801,11 +2356,16 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
if (!InsnCleanupFn.empty())
OS << " " << InsnCleanupFn << "(Inst);\n";
- OS << " return false;\n";
+ OS << " return Match_Success;\n";
OS << " }\n\n";
- OS << " return true;\n";
+ OS << " // Okay, we had no match. Try to return a useful error code.\n";
+ OS << " if (HadMatchOtherThanFeatures) return Match_MissingFeature;\n";
+ OS << " return Match_InvalidOperand;\n";
OS << "}\n\n";
-
- OS << "#endif // REGISTERS_ONLY\n";
+
+ if (Info.OperandMatchInfo.size())
+ EmitCustomOperandParsing(OS, Target, Info, ClassName);
+
+ OS << "#endif // GET_MATCHER_IMPLEMENTATION\n\n";
}
diff --git a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
index 23f13c2..448ebad 100644
--- a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -101,22 +101,22 @@ static void EmitInstructions(std::vector<AsmWriterInst> &Insts,
}
void AsmWriterEmitter::
-FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
+FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
std::vector<unsigned> &InstIdxs,
std::vector<unsigned> &InstOpsUsed) const {
InstIdxs.assign(NumberedInstructions.size(), ~0U);
-
+
// This vector parallels UniqueOperandCommands, keeping track of which
// instructions each case are used for. It is a comma separated string of
// enums.
std::vector<std::string> InstrsForCase;
InstrsForCase.resize(UniqueOperandCommands.size());
InstOpsUsed.assign(UniqueOperandCommands.size(), 0);
-
+
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
const AsmWriterInst *Inst = getAsmWriterInstByID(i);
if (Inst == 0) continue; // PHI, INLINEASM, PROLOG_LABEL, etc.
-
+
std::string Command;
if (Inst->Operands.empty())
continue; // Instruction already done.
@@ -143,13 +143,13 @@ FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
InstOpsUsed.push_back(1);
}
}
-
+
// For each entry of UniqueOperandCommands, there is a set of instructions
// that uses it. If the next command of all instructions in the set are
// identical, fold it into the command.
for (unsigned CommandIdx = 0, e = UniqueOperandCommands.size();
CommandIdx != e; ++CommandIdx) {
-
+
for (unsigned Op = 1; ; ++Op) {
// Scan for the first instruction in the set.
std::vector<unsigned>::iterator NIT =
@@ -158,7 +158,7 @@ FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
// If this instruction has no more operands, we isn't anything to merge
// into this command.
- const AsmWriterInst *FirstInst =
+ const AsmWriterInst *FirstInst =
getAsmWriterInstByID(NIT-InstIdxs.begin());
if (!FirstInst || FirstInst->Operands.size() == Op)
break;
@@ -175,7 +175,7 @@ FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
NIT = std::find(NIT+1, InstIdxs.end(), CommandIdx)) {
// Okay, found another instruction in this command set. If the operand
// matches, we're ok, otherwise bail out.
- const AsmWriterInst *OtherInst =
+ const AsmWriterInst *OtherInst =
getAsmWriterInstByID(NIT-InstIdxs.begin());
if (OtherInst &&
@@ -189,16 +189,16 @@ FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
}
}
if (!AllSame) break;
-
+
// Okay, everything in this command set has the same next operand. Add it
// to UniqueOperandCommands and remember that it was consumed.
std::string Command = " " + FirstInst->Operands[Op].getCode() + "\n";
-
+
UniqueOperandCommands[CommandIdx] += Command;
InstOpsUsed[CommandIdx]++;
}
}
-
+
// Prepend some of the instructions each case is used for onto the case val.
for (unsigned i = 0, e = InstrsForCase.size(); i != e; ++i) {
std::string Instrs = InstrsForCase[i];
@@ -206,9 +206,9 @@ FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
Instrs.erase(Instrs.begin()+70, Instrs.end());
Instrs += "...";
}
-
+
if (!Instrs.empty())
- UniqueOperandCommands[i] = " // " + Instrs + "\n" +
+ UniqueOperandCommands[i] = " // " + Instrs + "\n" +
UniqueOperandCommands[i];
}
}
@@ -240,15 +240,18 @@ static void UnescapeString(std::string &Str) {
/// EmitPrintInstruction - Generate the code for the "printInstruction" method
/// implementation.
void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
Record *AsmWriter = Target.getAsmWriter();
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
-
+ bool isMC = AsmWriter->getValueAsBit("isMCAsmWriter");
+ const char *MachineInstrClassName = isMC ? "MCInst" : "MachineInstr";
+
O <<
"/// printInstruction - This method is automatically generated by tablegen\n"
"/// from the instruction set description.\n"
"void " << Target.getName() << ClassName
- << "::printInstruction(const MachineInstr *MI, raw_ostream &O) {\n";
+ << "::printInstruction(const " << MachineInstrClassName
+ << " *MI, raw_ostream &O) {\n";
std::vector<AsmWriterInst> Instructions;
@@ -257,14 +260,14 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
if (!(*I)->AsmString.empty() &&
(*I)->TheDef->getName() != "PHI")
Instructions.push_back(
- AsmWriterInst(**I,
+ AsmWriterInst(**I,
AsmWriter->getValueAsInt("Variant"),
AsmWriter->getValueAsInt("FirstOperandColumn"),
AsmWriter->getValueAsInt("OperandSpacing")));
// Get the instruction numbering.
NumberedInstructions = Target.getInstructionsByEnumValue();
-
+
// Compute the CodeGenInstruction -> AsmWriterInst mapping. Note that not
// all machine instructions are necessarily being printed, so there may be
// target instructions not in this map.
@@ -273,11 +276,11 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
// Build an aggregate string, and build a table of offsets into it.
StringToOffsetTable StringTable;
-
+
/// OpcodeInfo - This encodes the index of the string to use for the first
/// chunk of the output as well as indices used for operand printing.
std::vector<unsigned> OpcodeInfo;
-
+
unsigned MaxStringIdx = 0;
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
AsmWriterInst *AWI = CGIAWIMap[NumberedInstructions[i]];
@@ -285,7 +288,7 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
if (AWI == 0) {
// Something not handled by the asmwriter printer.
Idx = ~0U;
- } else if (AWI->Operands[0].OperandType !=
+ } else if (AWI->Operands[0].OperandType !=
AsmWriterOperand::isLiteralTextOperand ||
AWI->Operands[0].Str.empty()) {
// Something handled by the asmwriter printer, but with no leading string.
@@ -295,51 +298,51 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
UnescapeString(Str);
Idx = StringTable.GetOrAddStringOffset(Str);
MaxStringIdx = std::max(MaxStringIdx, Idx);
-
+
// Nuke the string from the operand list. It is now handled!
AWI->Operands.erase(AWI->Operands.begin());
}
-
+
// Bias offset by one since we want 0 as a sentinel.
OpcodeInfo.push_back(Idx+1);
}
-
+
// Figure out how many bits we used for the string index.
unsigned AsmStrBits = Log2_32_Ceil(MaxStringIdx+2);
-
+
// To reduce code size, we compactify common instructions into a few bits
// in the opcode-indexed table.
unsigned BitsLeft = 32-AsmStrBits;
std::vector<std::vector<std::string> > TableDrivenOperandPrinters;
-
+
while (1) {
std::vector<std::string> UniqueOperandCommands;
std::vector<unsigned> InstIdxs;
std::vector<unsigned> NumInstOpsHandled;
FindUniqueOperandCommands(UniqueOperandCommands, InstIdxs,
NumInstOpsHandled);
-
+
// If we ran out of operands to print, we're done.
if (UniqueOperandCommands.empty()) break;
-
+
// Compute the number of bits we need to represent these cases, this is
// ceil(log2(numentries)).
unsigned NumBits = Log2_32_Ceil(UniqueOperandCommands.size());
-
+
// If we don't have enough bits for this operand, don't include it.
if (NumBits > BitsLeft) {
DEBUG(errs() << "Not enough bits to densely encode " << NumBits
<< " more bits\n");
break;
}
-
+
// Otherwise, we can include this in the initial lookup table. Add it in.
BitsLeft -= NumBits;
for (unsigned i = 0, e = InstIdxs.size(); i != e; ++i)
if (InstIdxs[i] != ~0U)
OpcodeInfo[i] |= InstIdxs[i] << (BitsLeft+AsmStrBits);
-
+
// Remove the info about this operand.
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
if (AsmWriterInst *Inst = getAsmWriterInstByID(i))
@@ -351,13 +354,13 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
Inst->Operands.begin()+NumOps);
}
}
-
+
// Remember the handlers for this set of operands.
TableDrivenOperandPrinters.push_back(UniqueOperandCommands);
}
-
-
-
+
+
+
O<<" static const unsigned OpInfo[] = {\n";
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
O << " " << OpcodeInfo[i] << "U,\t// "
@@ -366,7 +369,7 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
// Add a dummy entry so the array init doesn't end with a comma.
O << " 0U\n";
O << " };\n\n";
-
+
// Emit the string itself.
O << " const char *AsmStrs = \n";
StringTable.EmitString(O);
@@ -388,13 +391,13 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
// ceil(log2(numentries)).
unsigned NumBits = Log2_32_Ceil(Commands.size());
assert(NumBits <= BitsLeft && "consistency error");
-
+
// Emit code to extract this field from Bits.
BitsLeft -= NumBits;
-
+
O << "\n // Fragment " << i << " encoded into " << NumBits
<< " bits for " << Commands.size() << " unique commands.\n";
-
+
if (Commands.size() == 2) {
// Emit two possibilitys with if/else.
O << " if ((Bits >> " << (BitsLeft+AsmStrBits) << ") & "
@@ -403,11 +406,14 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
<< " } else {\n"
<< Commands[0]
<< " }\n\n";
+ } else if (Commands.size() == 1) {
+ // Emit a single possibility.
+ O << Commands[0] << "\n\n";
} else {
O << " switch ((Bits >> " << (BitsLeft+AsmStrBits) << ") & "
<< ((1 << NumBits)-1) << ") {\n"
<< " default: // unreachable.\n";
-
+
// Print out all the cases.
for (unsigned i = 0, e = Commands.size(); i != e; ++i) {
O << " case " << i << ":\n";
@@ -417,7 +423,7 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
O << " }\n\n";
}
}
-
+
// Okay, delete instructions with no operand info left.
for (unsigned i = 0, e = Instructions.size(); i != e; ++i) {
// Entire instruction has been emitted?
@@ -428,12 +434,12 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
}
}
-
+
// Because this is a vector, we want to emit from the end. Reverse all of the
// elements in the vector.
std::reverse(Instructions.begin(), Instructions.end());
-
-
+
+
// Now that we've emitted all of the operand info that fit into 32 bits, emit
// information for those instructions that are left. This is a less dense
// encoding, but we expect the main 32-bit table to handle the majority of
@@ -453,11 +459,11 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
Record *AsmWriter = Target.getAsmWriter();
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
const std::vector<CodeGenRegister> &Registers = Target.getRegisters();
-
+
StringToOffsetTable StringTable;
O <<
"\n\n/// getRegisterName - This method is automatically generated by tblgen\n"
@@ -475,33 +481,33 @@ void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
std::string AsmName = Reg.TheDef->getValueAsString("AsmName");
if (AsmName.empty())
AsmName = Reg.getName();
-
-
+
+
if ((i % 14) == 0)
O << "\n ";
-
+
O << StringTable.GetOrAddStringOffset(AsmName) << ", ";
}
O << "0\n"
<< " };\n"
<< "\n";
-
+
O << " const char *AsmStrs =\n";
StringTable.EmitString(O);
O << ";\n";
-
+
O << " return AsmStrs+RegAsmOffset[RegNo-1];\n"
<< "}\n";
}
void AsmWriterEmitter::EmitGetInstructionName(raw_ostream &O) {
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
Record *AsmWriter = Target.getAsmWriter();
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
const std::vector<const CodeGenInstruction*> &NumberedInstructions =
Target.getInstructionsByEnumValue();
-
+
StringToOffsetTable StringTable;
O <<
"\n\n#ifdef GET_INSTRUCTION_NAME\n"
@@ -517,21 +523,21 @@ void AsmWriterEmitter::EmitGetInstructionName(raw_ostream &O) {
<< " static const unsigned InstAsmOffset[] = {";
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
const CodeGenInstruction &Inst = *NumberedInstructions[i];
-
+
std::string AsmName = Inst.TheDef->getName();
if ((i % 14) == 0)
O << "\n ";
-
+
O << StringTable.GetOrAddStringOffset(AsmName) << ", ";
}
O << "0\n"
<< " };\n"
<< "\n";
-
+
O << " const char *Strs =\n";
StringTable.EmitString(O);
O << ";\n";
-
+
O << " return Strs+InstAsmOffset[Opcode];\n"
<< "}\n\n#endif\n";
}
@@ -540,7 +546,7 @@ void AsmWriterEmitter::EmitGetInstructionName(raw_ostream &O) {
void AsmWriterEmitter::run(raw_ostream &O) {
EmitSourceFileHeader("Assembly Writer Source Fragment", O);
-
+
EmitPrintInstruction(O);
EmitGetRegisterName(O);
EmitGetInstructionName(O);
diff --git a/contrib/llvm/utils/TableGen/AsmWriterInst.cpp b/contrib/llvm/utils/TableGen/AsmWriterInst.cpp
index b2228b0..fdf447f 100644
--- a/contrib/llvm/utils/TableGen/AsmWriterInst.cpp
+++ b/contrib/llvm/utils/TableGen/AsmWriterInst.cpp
@@ -53,8 +53,6 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI,
int OperandSpacing) {
this->CGI = &CGI;
- unsigned CurVariant = ~0U; // ~0 if we are outside a {.|.|.} region, other #.
-
// This is the number of tabs we've seen if we're doing columnar layout.
unsigned CurColumn = 0;
@@ -62,54 +60,48 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI,
// NOTE: Any extensions to this code need to be mirrored in the
// AsmPrinter::printInlineAsm code that executes as compile time (assuming
// that inline asm strings should also get the new feature)!
- const std::string &AsmString = CGI.AsmString;
+ std::string AsmString = CGI.FlattenAsmStringVariants(CGI.AsmString, Variant);
std::string::size_type LastEmitted = 0;
while (LastEmitted != AsmString.size()) {
std::string::size_type DollarPos =
- AsmString.find_first_of("${|}\\", LastEmitted);
+ AsmString.find_first_of("$\\", LastEmitted);
if (DollarPos == std::string::npos) DollarPos = AsmString.size();
// Emit a constant string fragment.
-
if (DollarPos != LastEmitted) {
- if (CurVariant == Variant || CurVariant == ~0U) {
- for (; LastEmitted != DollarPos; ++LastEmitted)
- switch (AsmString[LastEmitted]) {
- case '\n':
- AddLiteralString("\\n");
- break;
- case '\t':
- // If the asm writer is not using a columnar layout, \t is not
- // magic.
- if (FirstOperandColumn == -1 || OperandSpacing == -1) {
- AddLiteralString("\\t");
- } else {
- // We recognize a tab as an operand delimeter.
- unsigned DestColumn = FirstOperandColumn +
- CurColumn++ * OperandSpacing;
- Operands.push_back(
- AsmWriterOperand(
- "O.PadToColumn(" +
- utostr(DestColumn) + ");\n",
- AsmWriterOperand::isLiteralStatementOperand));
- }
- break;
- case '"':
- AddLiteralString("\\\"");
- break;
- case '\\':
- AddLiteralString("\\\\");
- break;
- default:
- AddLiteralString(std::string(1, AsmString[LastEmitted]));
- break;
- }
- } else {
- LastEmitted = DollarPos;
- }
+ for (; LastEmitted != DollarPos; ++LastEmitted)
+ switch (AsmString[LastEmitted]) {
+ case '\n':
+ AddLiteralString("\\n");
+ break;
+ case '\t':
+ // If the asm writer is not using a columnar layout, \t is not
+ // magic.
+ if (FirstOperandColumn == -1 || OperandSpacing == -1) {
+ AddLiteralString("\\t");
+ } else {
+ // We recognize a tab as an operand delimeter.
+ unsigned DestColumn = FirstOperandColumn +
+ CurColumn++ * OperandSpacing;
+ Operands.push_back(
+ AsmWriterOperand(
+ "O.PadToColumn(" +
+ utostr(DestColumn) + ");\n",
+ AsmWriterOperand::isLiteralStatementOperand));
+ }
+ break;
+ case '"':
+ AddLiteralString("\\\"");
+ break;
+ case '\\':
+ AddLiteralString("\\\\");
+ break;
+ default:
+ AddLiteralString(std::string(1, AsmString[LastEmitted]));
+ break;
+ }
} else if (AsmString[DollarPos] == '\\') {
- if (DollarPos+1 != AsmString.size() &&
- (CurVariant == Variant || CurVariant == ~0U)) {
+ if (DollarPos+1 != AsmString.size()) {
if (AsmString[DollarPos+1] == 'n') {
AddLiteralString("\\n");
} else if (AsmString[DollarPos+1] == 't') {
@@ -137,29 +129,9 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI,
LastEmitted = DollarPos+2;
continue;
}
- } else if (AsmString[DollarPos] == '{') {
- if (CurVariant != ~0U)
- throw "Nested variants found for instruction '" +
- CGI.TheDef->getName() + "'!";
- LastEmitted = DollarPos+1;
- CurVariant = 0; // We are now inside of the variant!
- } else if (AsmString[DollarPos] == '|') {
- if (CurVariant == ~0U)
- throw "'|' character found outside of a variant in instruction '"
- + CGI.TheDef->getName() + "'!";
- ++CurVariant;
- ++LastEmitted;
- } else if (AsmString[DollarPos] == '}') {
- if (CurVariant == ~0U)
- throw "'}' character found outside of a variant in instruction '"
- + CGI.TheDef->getName() + "'!";
- ++LastEmitted;
- CurVariant = ~0U;
} else if (DollarPos+1 != AsmString.size() &&
AsmString[DollarPos+1] == '$') {
- if (CurVariant == Variant || CurVariant == ~0U) {
- AddLiteralString("$"); // "$$" -> $
- }
+ AddLiteralString("$"); // "$$" -> $
LastEmitted = DollarPos+2;
} else {
// Get the name of the variable.
@@ -226,16 +198,12 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI,
Modifier));
} else {
// Otherwise, normal operand.
- unsigned OpNo = CGI.getOperandNamed(VarName);
- CodeGenInstruction::OperandInfo OpInfo = CGI.OperandList[OpNo];
+ unsigned OpNo = CGI.Operands.getOperandNamed(VarName);
+ CGIOperandList::OperandInfo OpInfo = CGI.Operands[OpNo];
- if (CurVariant == Variant || CurVariant == ~0U) {
- unsigned MIOp = OpInfo.MIOperandNo;
- Operands.push_back(AsmWriterOperand(OpInfo.PrinterMethodName,
- OpNo,
- MIOp,
- Modifier));
- }
+ unsigned MIOp = OpInfo.MIOperandNo;
+ Operands.push_back(AsmWriterOperand(OpInfo.PrinterMethodName,
+ OpNo, MIOp, Modifier));
}
LastEmitted = VarEnd;
}
diff --git a/contrib/llvm/utils/TableGen/AsmWriterInst.h b/contrib/llvm/utils/TableGen/AsmWriterInst.h
index 20b8588..ec7d8eb 100644
--- a/contrib/llvm/utils/TableGen/AsmWriterInst.h
+++ b/contrib/llvm/utils/TableGen/AsmWriterInst.h
@@ -23,51 +23,51 @@
namespace llvm {
class CodeGenInstruction;
class Record;
-
+
struct AsmWriterOperand {
enum OpType {
// Output this text surrounded by quotes to the asm.
- isLiteralTextOperand,
+ isLiteralTextOperand,
// This is the name of a routine to call to print the operand.
isMachineInstrOperand,
// Output this text verbatim to the asm writer. It is code that
// will output some text to the asm.
isLiteralStatementOperand
} OperandType;
-
+
/// Str - For isLiteralTextOperand, this IS the literal text. For
/// isMachineInstrOperand, this is the PrinterMethodName for the operand..
- /// For isLiteralStatementOperand, this is the code to insert verbatim
+ /// For isLiteralStatementOperand, this is the code to insert verbatim
/// into the asm writer.
std::string Str;
-
+
/// CGIOpNo - For isMachineInstrOperand, this is the index of the operand in
/// the CodeGenInstruction.
unsigned CGIOpNo;
-
+
/// MiOpNo - For isMachineInstrOperand, this is the operand number of the
/// machine instruction.
unsigned MIOpNo;
-
+
/// MiModifier - For isMachineInstrOperand, this is the modifier string for
/// an operand, specified with syntax like ${opname:modifier}.
std::string MiModifier;
-
+
// To make VS STL happy
AsmWriterOperand(OpType op = isLiteralTextOperand):OperandType(op) {}
-
+
AsmWriterOperand(const std::string &LitStr,
OpType op = isLiteralTextOperand)
: OperandType(op), Str(LitStr) {}
-
+
AsmWriterOperand(const std::string &Printer,
unsigned _CGIOpNo,
unsigned _MIOpNo,
const std::string &Modifier,
- OpType op = isMachineInstrOperand)
+ OpType op = isMachineInstrOperand)
: OperandType(op), Str(Printer), CGIOpNo(_CGIOpNo), MIOpNo(_MIOpNo),
MiModifier(Modifier) {}
-
+
bool operator!=(const AsmWriterOperand &Other) const {
if (OperandType != Other.OperandType || Str != Other.Str) return true;
if (OperandType == isMachineInstrOperand)
@@ -77,26 +77,26 @@ namespace llvm {
bool operator==(const AsmWriterOperand &Other) const {
return !operator!=(Other);
}
-
+
/// getCode - Return the code that prints this operand.
std::string getCode() const;
};
-
+
class AsmWriterInst {
public:
std::vector<AsmWriterOperand> Operands;
const CodeGenInstruction *CGI;
-
- AsmWriterInst(const CodeGenInstruction &CGI,
+
+ AsmWriterInst(const CodeGenInstruction &CGI,
unsigned Variant,
int FirstOperandColumn,
int OperandSpacing);
-
+
/// MatchesAllButOneOp - If this instruction is exactly identical to the
/// specified instruction except for one differing operand, return the
/// differing operand number. Otherwise return ~0.
unsigned MatchesAllButOneOp(const AsmWriterInst &Other) const;
-
+
private:
void AddLiteralString(const std::string &Str) {
// If the last operand was already a literal text string, append this to
diff --git a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
index 7643609..c51afd8 100644
--- a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
@@ -26,9 +26,9 @@ void CallingConvEmitter::run(raw_ostream &O) {
// other.
for (unsigned i = 0, e = CCs.size(); i != e; ++i) {
O << "static bool " << CCs[i]->getName()
- << "(unsigned ValNo, EVT ValVT,\n"
+ << "(unsigned ValNo, MVT ValVT,\n"
<< std::string(CCs[i]->getName().size()+13, ' ')
- << "EVT LocVT, CCValAssign::LocInfo LocInfo,\n"
+ << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
<< std::string(CCs[i]->getName().size()+13, ' ')
<< "ISD::ArgFlagsTy ArgFlags, CCState &State);\n";
}
@@ -44,9 +44,9 @@ void CallingConvEmitter::EmitCallingConv(Record *CC, raw_ostream &O) {
Counter = 0;
O << "\n\nstatic bool " << CC->getName()
- << "(unsigned ValNo, EVT ValVT,\n"
+ << "(unsigned ValNo, MVT ValVT,\n"
<< std::string(CC->getName().size()+13, ' ')
- << "EVT LocVT, CCValAssign::LocInfo LocInfo,\n"
+ << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
<< std::string(CC->getName().size()+13, ' ')
<< "ISD::ArgFlagsTy ArgFlags, CCState &State) {\n";
// Emit all of the actions, in order.
@@ -163,12 +163,12 @@ void CallingConvEmitter::EmitAction(Record *Action,
O << Size << ", ";
else
O << "\n" << IndentStr << " State.getTarget().getTargetData()"
- "->getTypeAllocSize(LocVT.getTypeForEVT(State.getContext())), ";
+ "->getTypeAllocSize(EVT(LocVT).getTypeForEVT(State.getContext())), ";
if (Align)
O << Align;
else
O << "\n" << IndentStr << " State.getTarget().getTargetData()"
- "->getABITypeAlignment(LocVT.getTypeForEVT(State.getContext()))";
+ "->getABITypeAlignment(EVT(LocVT).getTypeForEVT(State.getContext()))";
if (Action->isSubClassOf("CCAssignToStackWithShadow"))
O << ", " << getQualifiedName(Action->getValueAsDef("ShadowReg"));
O << ");\n" << IndentStr
diff --git a/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h b/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h
index abf9c9a..712333b 100644
--- a/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h
+++ b/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h
@@ -57,7 +57,7 @@ class ClangASTNodesEmitter : public TableGenBackend {
public:
explicit ClangASTNodesEmitter(RecordKeeper &R, const std::string &N,
const std::string &S)
- : Records(R), Root(N, SMLoc()), BaseSuffix(S)
+ : Records(R), Root(N, SMLoc(), R), BaseSuffix(S)
{}
// run - Output the .inc file contents
diff --git a/contrib/llvm/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm/utils/TableGen/ClangAttrEmitter.cpp
index 8d3399a..27e1e02 100644
--- a/contrib/llvm/utils/TableGen/ClangAttrEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/ClangAttrEmitter.cpp
@@ -19,8 +19,8 @@
using namespace llvm;
-static const std::vector<StringRef> getValueAsListOfStrings(Record &R,
- StringRef FieldName) {
+static const std::vector<StringRef>
+getValueAsListOfStrings(Record &R, StringRef FieldName) {
ListInit *List = R.getValueAsListInit(FieldName);
assert (List && "Got a null ListInit");
@@ -44,7 +44,8 @@ std::string ReadPCHRecord(StringRef type) {
return StringSwitch<std::string>(type)
.EndsWith("Decl *", "cast_or_null<" + std::string(type, 0, type.size()-1) +
">(GetDecl(Record[Idx++]))")
- .Case("QualType", "ReadTypeRecord(Idx++)")
+ .Case("QualType", "GetType(Record[Idx++])")
+ .Case("Expr *", "ReadSubExpr()")
.Default("Record[Idx++]");
}
@@ -54,6 +55,7 @@ std::string WritePCHRecord(StringRef type, StringRef name) {
.EndsWith("Decl *", "AddDeclRef(" + std::string(name) +
", Record);\n")
.Case("QualType", "AddTypeRef(" + std::string(name) + ", Record);\n")
+ .Case("Expr *", "AddStmt(" + std::string(name) + ");\n")
.Default("Record.push_back(" + std::string(name) + ");\n");
}
@@ -171,7 +173,8 @@ namespace {
OS << "char *" << getLowerName() << ";";
}
void writePCHReadDecls(raw_ostream &OS) const {
- OS << " std::string " << getLowerName() << "= ReadString(Record, Idx);\n";
+ OS << " std::string " << getLowerName()
+ << "= ReadString(Record, Idx);\n";
}
void writePCHReadArgs(raw_ostream &OS) const {
OS << getLowerName();
@@ -269,10 +272,10 @@ namespace {
OS << " bool is" << getLowerName() << "Expr = Record[Idx++];\n";
OS << " void *" << getLowerName() << "Ptr;\n";
OS << " if (is" << getLowerName() << "Expr)\n";
- OS << " " << getLowerName() << "Ptr = ReadExpr(DeclsCursor);\n";
+ OS << " " << getLowerName() << "Ptr = ReadExpr(F);\n";
OS << " else\n";
OS << " " << getLowerName()
- << "Ptr = GetTypeSourceInfo(DeclsCursor, Record, Idx);\n";
+ << "Ptr = GetTypeSourceInfo(F, Record, Idx);\n";
}
void writePCHWrite(raw_ostream &OS) const {
OS << " Record.push_back(SA->is" << getUpperName() << "Expr());\n";
@@ -461,8 +464,9 @@ void ClangAttrClassEmitter::run(raw_ostream &OS) {
for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
i != e; ++i) {
Record &R = **i;
+ const std::string &SuperName = R.getSuperClasses().back()->getName();
- OS << "class " << R.getName() << "Attr : public Attr {\n";
+ OS << "class " << R.getName() << "Attr : public " << SuperName << " {\n";
std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
std::vector<Argument*> Args;
@@ -493,7 +497,7 @@ void ClangAttrClassEmitter::run(raw_ostream &OS) {
}
OS << " )\n";
- OS << " : Attr(attr::" << R.getName() << ", L)\n";
+ OS << " : " << SuperName << "(attr::" << R.getName() << ", L)\n";
for (ai = Args.begin(); ai != ae; ++ai) {
OS << " , ";
@@ -557,31 +561,58 @@ void ClangAttrImplEmitter::run(raw_ostream &OS) {
}
}
+static void EmitAttrList(raw_ostream &OS, StringRef Class,
+ const std::vector<Record*> &AttrList) {
+ std::vector<Record*>::const_iterator i = AttrList.begin(), e = AttrList.end();
+
+ if (i != e) {
+ // Move the end iterator back to emit the last attribute.
+ for(--e; i != e; ++i)
+ OS << Class << "(" << (*i)->getName() << ")\n";
+
+ OS << "LAST_" << Class << "(" << (*i)->getName() << ")\n\n";
+ }
+}
+
void ClangAttrListEmitter::run(raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
OS << "#ifndef LAST_ATTR\n";
OS << "#define LAST_ATTR(NAME) ATTR(NAME)\n";
OS << "#endif\n\n";
-
- std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
- std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
- if (i != e) {
- // Move the end iterator back to emit the last attribute.
- for(--e; i != e; ++i)
- OS << "ATTR(" << (*i)->getName() << ")\n";
-
- OS << "LAST_ATTR(" << (*i)->getName() << ")\n\n";
+ OS << "#ifndef INHERITABLE_ATTR\n";
+ OS << "#define INHERITABLE_ATTR(NAME) ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef LAST_INHERITABLE_ATTR\n";
+ OS << "#define LAST_INHERITABLE_ATTR(NAME) INHERITABLE_ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ Record *InhClass = Records.getClass("InheritableAttr");
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"),
+ NonInhAttrs, InhAttrs;
+ for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
+ i != e; ++i) {
+ if ((*i)->isSubClassOf(InhClass))
+ InhAttrs.push_back(*i);
+ else
+ NonInhAttrs.push_back(*i);
}
+ EmitAttrList(OS, "INHERITABLE_ATTR", InhAttrs);
+ EmitAttrList(OS, "ATTR", NonInhAttrs);
+
OS << "#undef LAST_ATTR\n";
+ OS << "#undef INHERITABLE_ATTR\n";
+ OS << "#undef LAST_INHERITABLE_ATTR\n";
OS << "#undef ATTR\n";
}
void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
- OS << "// This file is generated by TableGen. Do not edi.\n\n";
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+ Record *InhClass = Records.getClass("InheritableAttr");
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"),
ArgRecords;
std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ai, ae;
@@ -595,6 +626,8 @@ void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
for (; i != e; ++i) {
Record &R = **i;
OS << " case attr::" << R.getName() << ": {\n";
+ if (R.isSubClassOf(InhClass))
+ OS << " bool isInherited = Record[Idx++];\n";
ArgRecords = R.getValueAsListOfDefs("Args");
Args.clear();
for (ai = ArgRecords.begin(), ae = ArgRecords.end(); ai != ae; ++ai) {
@@ -608,6 +641,8 @@ void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
(*ri)->writePCHReadArgs(OS);
}
OS << ");\n";
+ if (R.isSubClassOf(InhClass))
+ OS << " cast<InheritableAttr>(New)->setInherited(isInherited);\n";
OS << " break;\n";
OS << " }\n";
}
@@ -615,6 +650,7 @@ void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
}
void ClangAttrPCHWriteEmitter::run(raw_ostream &OS) {
+ Record *InhClass = Records.getClass("InheritableAttr");
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), Args;
std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ai, ae;
@@ -626,9 +662,11 @@ void ClangAttrPCHWriteEmitter::run(raw_ostream &OS) {
Record &R = **i;
OS << " case attr::" << R.getName() << ": {\n";
Args = R.getValueAsListOfDefs("Args");
- if (!Args.empty())
+ if (R.isSubClassOf(InhClass) || !Args.empty())
OS << " const " << R.getName() << "Attr *SA = cast<" << R.getName()
<< "Attr>(A);\n";
+ if (R.isSubClassOf(InhClass))
+ OS << " Record.push_back(SA->isInherited());\n";
for (ai = Args.begin(), ae = Args.end(); ai != ae; ++ai)
createArgument(**ai, R.getName())->writePCHWrite(OS);
OS << " break;\n";
@@ -636,3 +674,21 @@ void ClangAttrPCHWriteEmitter::run(raw_ostream &OS) {
}
OS << " }\n";
}
+
+void ClangAttrSpellingListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end(); I != E; ++I) {
+ Record &Attr = **I;
+
+ std::vector<StringRef> Spellings = getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(), E = Spellings.end(); I != E; ++I) {
+ StringRef Spelling = *I;
+ OS << ".Case(\"" << Spelling << "\", true)\n";
+ }
+ }
+
+}
diff --git a/contrib/llvm/utils/TableGen/ClangAttrEmitter.h b/contrib/llvm/utils/TableGen/ClangAttrEmitter.h
index 8314982..af87009 100644
--- a/contrib/llvm/utils/TableGen/ClangAttrEmitter.h
+++ b/contrib/llvm/utils/TableGen/ClangAttrEmitter.h
@@ -83,6 +83,19 @@ public:
void run(raw_ostream &OS);
};
+/// ClangAttrSpellingListEmitter - class emits the list of spellings for attributes for
+/// clang.
+class ClangAttrSpellingListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrSpellingListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
}
#endif
diff --git a/contrib/llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp b/contrib/llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 75b6252..60e67c4 100644
--- a/contrib/llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -29,9 +29,10 @@ using namespace llvm;
namespace {
class DiagGroupParentMap {
+ RecordKeeper &Records;
std::map<const Record*, std::vector<Record*> > Mapping;
public:
- DiagGroupParentMap() {
+ DiagGroupParentMap(RecordKeeper &records) : Records(records) {
std::vector<Record*> DiagGroups
= Records.getAllDerivedDefinitions("DiagGroup");
for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
@@ -84,11 +85,12 @@ static std::string getDiagnosticCategory(const Record *R,
namespace {
class DiagCategoryIDMap {
+ RecordKeeper &Records;
StringMap<unsigned> CategoryIDs;
std::vector<std::string> CategoryStrings;
public:
- DiagCategoryIDMap() {
- DiagGroupParentMap ParentInfo;
+ DiagCategoryIDMap(RecordKeeper &records) : Records(records) {
+ DiagGroupParentMap ParentInfo(Records);
// The zero'th category is "".
CategoryStrings.push_back("");
@@ -138,8 +140,8 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
const std::vector<Record*> &Diags =
Records.getAllDerivedDefinitions("Diagnostic");
- DiagCategoryIDMap CategoryIDs;
- DiagGroupParentMap DGParentMap;
+ DiagCategoryIDMap CategoryIDs(Records);
+ DiagGroupParentMap DGParentMap(Records);
for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
const Record &R = *Diags[i];
@@ -168,7 +170,13 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
OS << ", true";
else
OS << ", false";
-
+
+ // Access control bit
+ if (R.getValueAsBit("AccessControl"))
+ OS << ", true";
+ else
+ OS << ", false";
+
// Category number.
OS << ", " << CategoryIDs.getID(getDiagnosticCategory(&R, DGParentMap));
OS << ")\n";
@@ -179,15 +187,17 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
// Warning Group Tables generation
//===----------------------------------------------------------------------===//
+namespace {
struct GroupInfo {
std::vector<const Record*> DiagsInGroup;
std::vector<std::string> SubGroups;
unsigned IDNo;
};
+} // end anonymous namespace.
void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
// Compute a mapping from a DiagGroup to all of its parents.
- DiagGroupParentMap DGParentMap;
+ DiagGroupParentMap DGParentMap(Records);
// Invert the 1-[0/1] mapping of diags to group into a one to many mapping of
// groups to diags in the group.
@@ -277,7 +287,7 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
OS << "#endif // GET_DIAG_TABLE\n\n";
// Emit the category table next.
- DiagCategoryIDMap CategoriesByID;
+ DiagCategoryIDMap CategoriesByID(Records);
OS << "\n#ifdef GET_CATEGORY_TABLE\n";
for (DiagCategoryIDMap::iterator I = CategoriesByID.begin(),
E = CategoriesByID.end(); I != E; ++I)
diff --git a/contrib/llvm/utils/TableGen/ClangSACheckersEmitter.cpp b/contrib/llvm/utils/TableGen/ClangSACheckersEmitter.cpp
new file mode 100644
index 0000000..3e49ab1
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/ClangSACheckersEmitter.cpp
@@ -0,0 +1,229 @@
+//=- ClangSACheckersEmitter.cpp - Generate Clang SA checkers tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits Clang Static Analyzer checkers tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckersEmitter.h"
+#include "Record.h"
+#include "llvm/ADT/DenseSet.h"
+#include <map>
+#include <string>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Static Analyzer Checkers Tables generation
+//===----------------------------------------------------------------------===//
+
+/// \brief True if it is specified hidden or a parent package is specified
+/// as hidden, otherwise false.
+static bool isHidden(const Record &R) {
+ if (R.getValueAsBit("Hidden"))
+ return true;
+ // Not declared as hidden, check the parent package if it is hidden.
+ if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("ParentPackage")))
+ return isHidden(*DI->getDef());
+
+ return false;
+}
+
+static bool isCheckerNamed(const Record *R) {
+ return !R->getValueAsString("CheckerName").empty();
+}
+
+static std::string getPackageFullName(const Record *R);
+
+static std::string getParentPackageFullName(const Record *R) {
+ std::string name;
+ if (DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("ParentPackage")))
+ name = getPackageFullName(DI->getDef());
+ return name;
+}
+
+static std::string getPackageFullName(const Record *R) {
+ std::string name = getParentPackageFullName(R);
+ if (!name.empty()) name += ".";
+ return name + R->getValueAsString("PackageName");
+}
+
+static std::string getCheckerFullName(const Record *R) {
+ std::string name = getParentPackageFullName(R);
+ if (isCheckerNamed(R)) {
+ if (!name.empty()) name += ".";
+ name += R->getValueAsString("CheckerName");
+ }
+ return name;
+}
+
+static std::string getStringValue(const Record &R, StringRef field) {
+ if (StringInit *
+ SI = dynamic_cast<StringInit*>(R.getValueInit(field)))
+ return SI->getValue();
+ return std::string();
+}
+
+namespace {
+struct GroupInfo {
+ std::vector<const Record*> Checkers;
+ llvm::DenseSet<const Record *> SubGroups;
+ bool Hidden;
+ unsigned Index;
+
+ GroupInfo() : Hidden(false) { }
+};
+}
+
+void ClangSACheckersEmitter::run(raw_ostream &OS) {
+ std::vector<Record*> checkers = Records.getAllDerivedDefinitions("Checker");
+ llvm::DenseMap<const Record *, unsigned> checkerRecIndexMap;
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i)
+ checkerRecIndexMap[checkers[i]] = i;
+
+ OS << "\n#ifdef GET_CHECKERS\n";
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i) {
+ const Record &R = *checkers[i];
+
+ OS << "CHECKER(" << "\"";
+ std::string name;
+ if (isCheckerNamed(&R))
+ name = getCheckerFullName(&R);
+ OS.write_escaped(name) << "\", ";
+ OS << R.getName() << ", ";
+ OS << getStringValue(R, "DescFile") << ", ";
+ OS << "\"";
+ OS.write_escaped(getStringValue(R, "HelpText")) << "\", ";
+ // Hidden bit
+ if (isHidden(R))
+ OS << "true";
+ else
+ OS << "false";
+ OS << ")\n";
+ }
+ OS << "#endif // GET_CHECKERS\n\n";
+
+ // Invert the mapping of checkers to package/group into a one to many
+ // mapping of packages/groups to checkers.
+ std::map<std::string, GroupInfo> groupInfoByName;
+ llvm::DenseMap<const Record *, GroupInfo *> recordGroupMap;
+
+ std::vector<Record*> packages = Records.getAllDerivedDefinitions("Package");
+ for (unsigned i = 0, e = packages.size(); i != e; ++i) {
+ Record *R = packages[i];
+ std::string fullName = getPackageFullName(R);
+ if (!fullName.empty()) {
+ GroupInfo &info = groupInfoByName[fullName];
+ info.Hidden = isHidden(*R);
+ recordGroupMap[R] = &info;
+ }
+ }
+
+ std::vector<Record*>
+ checkerGroups = Records.getAllDerivedDefinitions("CheckerGroup");
+ for (unsigned i = 0, e = checkerGroups.size(); i != e; ++i) {
+ Record *R = checkerGroups[i];
+ std::string name = R->getValueAsString("GroupName");
+ if (!name.empty()) {
+ GroupInfo &info = groupInfoByName[name];
+ recordGroupMap[R] = &info;
+ }
+ }
+
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i) {
+ Record *R = checkers[i];
+ Record *package = 0;
+ if (DefInit *
+ DI = dynamic_cast<DefInit*>(R->getValueInit("ParentPackage")))
+ package = DI->getDef();
+ if (!isCheckerNamed(R) && !package)
+ throw "Checker '" + R->getName() + "' is neither named, nor in a package!";
+
+ if (isCheckerNamed(R)) {
+ // Create a pseudo-group to hold this checker.
+ std::string fullName = getCheckerFullName(R);
+ GroupInfo &info = groupInfoByName[fullName];
+ recordGroupMap[R] = &info;
+ info.Checkers.push_back(R);
+ } else {
+ recordGroupMap[package]->Checkers.push_back(R);
+ }
+
+ Record *currR = isCheckerNamed(R) ? R : package;
+ // Insert the checker and its parent packages into the subgroups set of
+ // the corresponding parent package.
+ while (DefInit *DI
+ = dynamic_cast<DefInit*>(currR->getValueInit("ParentPackage"))) {
+ Record *parentPackage = DI->getDef();
+ recordGroupMap[parentPackage]->SubGroups.insert(currR);
+ currR = parentPackage;
+ }
+ // Insert the checker into the set of its group.
+ if (DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group")))
+ recordGroupMap[DI->getDef()]->Checkers.push_back(R);
+ }
+
+ unsigned index = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = groupInfoByName.begin(), E = groupInfoByName.end(); I != E; ++I)
+ I->second.Index = index++;
+
+ // Walk through the packages/groups/checkers emitting an array for each
+ // set of checkers and an array for each set of subpackages.
+
+ OS << "\n#ifdef GET_MEMBER_ARRAYS\n";
+ unsigned maxLen = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = groupInfoByName.begin(), E = groupInfoByName.end(); I != E; ++I) {
+ maxLen = std::max(maxLen, (unsigned)I->first.size());
+
+ std::vector<const Record*> &V = I->second.Checkers;
+ if (!V.empty()) {
+ OS << "static const short CheckerArray" << I->second.Index << "[] = { ";
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ OS << checkerRecIndexMap[V[i]] << ", ";
+ OS << "-1 };\n";
+ }
+
+ llvm::DenseSet<const Record *> &subGroups = I->second.SubGroups;
+ if (!subGroups.empty()) {
+ OS << "static const short SubPackageArray" << I->second.Index << "[] = { ";
+ for (llvm::DenseSet<const Record *>::iterator
+ I = subGroups.begin(), E = subGroups.end(); I != E; ++I) {
+ OS << recordGroupMap[*I]->Index << ", ";
+ }
+ OS << "-1 };\n";
+ }
+ }
+ OS << "#endif // GET_MEMBER_ARRAYS\n\n";
+
+ OS << "\n#ifdef GET_CHECKNAME_TABLE\n";
+ for (std::map<std::string, GroupInfo>::iterator
+ I = groupInfoByName.begin(), E = groupInfoByName.end(); I != E; ++I) {
+ // Group option string.
+ OS << " { \"";
+ OS.write_escaped(I->first) << "\","
+ << std::string(maxLen-I->first.size()+1, ' ');
+
+ if (I->second.Checkers.empty())
+ OS << "0, ";
+ else
+ OS << "CheckerArray" << I->second.Index << ", ";
+
+ // Subgroups.
+ if (I->second.SubGroups.empty())
+ OS << "0, ";
+ else
+ OS << "SubPackageArray" << I->second.Index << ", ";
+
+ OS << (I->second.Hidden ? "true" : "false");
+
+ OS << " },\n";
+ }
+ OS << "#endif // GET_CHECKNAME_TABLE\n\n";
+}
diff --git a/contrib/llvm/utils/TableGen/ClangSACheckersEmitter.h b/contrib/llvm/utils/TableGen/ClangSACheckersEmitter.h
new file mode 100644
index 0000000..6bd1635
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/ClangSACheckersEmitter.h
@@ -0,0 +1,31 @@
+//===- ClangSACheckersEmitter.h - Generate Clang SA checkers tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits Clang Static Analyzer checkers tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGSACHECKERS_EMITTER_H
+#define CLANGSACHECKERS_EMITTER_H
+
+#include "TableGenBackend.h"
+
+namespace llvm {
+
+class ClangSACheckersEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+public:
+ explicit ClangSACheckersEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
index ec702c2a5..957dd19 100644
--- a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -17,9 +17,19 @@
#include "CodeGenTarget.h"
#include "Record.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include <map>
using namespace llvm;
+// FIXME: Somewhat hackish to use a command line option for this. There should
+// be a CodeEmitter class in the Target.td that controls this sort of thing
+// instead.
+static cl::opt<bool>
+MCEmitter("mc-emitter",
+ cl::desc("Generate CodeEmitter for use with the MC library."),
+ cl::init(false));
+
void CodeEmitterGen::reverseBits(std::vector<Record*> &Insts) {
for (std::vector<Record*>::iterator I = Insts.begin(), E = Insts.end();
I != E; ++I) {
@@ -42,46 +52,171 @@ void CodeEmitterGen::reverseBits(std::vector<Record*> &Insts) {
unsigned middle = (numBits + 1) / 2;
NewBI->setBit(middle, BI->getBit(middle));
}
-
+
// Update the bits in reversed order so that emitInstrOpBits will get the
// correct endianness.
R->getValue("Inst")->setValue(NewBI);
}
}
-
// If the VarBitInit at position 'bit' matches the specified variable then
// return the variable bit position. Otherwise return -1.
int CodeEmitterGen::getVariableBit(const std::string &VarName,
- BitsInit *BI, int bit) {
- if (VarBitInit *VBI = dynamic_cast<VarBitInit*>(BI->getBit(bit))) {
- TypedInit *TI = VBI->getVariable();
+ BitsInit *BI, int bit) {
+ if (VarBitInit *VBI = dynamic_cast<VarBitInit*>(BI->getBit(bit)))
+ if (VarInit *VI = dynamic_cast<VarInit*>(VBI->getVariable()))
+ if (VI->getName() == VarName)
+ return VBI->getBitNum();
+
+ return -1;
+}
+
+void CodeEmitterGen::
+AddCodeToMergeInOperand(Record *R, BitsInit *BI, const std::string &VarName,
+ unsigned &NumberedOp,
+ std::string &Case, CodeGenTarget &Target) {
+ CodeGenInstruction &CGI = Target.getInstruction(R);
+
+ // Determine if VarName actually contributes to the Inst encoding.
+ int bit = BI->getNumBits()-1;
+
+ // Scan for a bit that this contributed to.
+ for (; bit >= 0; ) {
+ if (getVariableBit(VarName, BI, bit) != -1)
+ break;
- if (VarInit *VI = dynamic_cast<VarInit*>(TI)) {
- if (VI->getName() == VarName) return VBI->getBitNum();
+ --bit;
+ }
+
+ // If we found no bits, ignore this value, otherwise emit the call to get the
+ // operand encoding.
+ if (bit < 0) return;
+
+ // If the operand matches by name, reference according to that
+ // operand number. Non-matching operands are assumed to be in
+ // order.
+ unsigned OpIdx;
+ if (CGI.Operands.hasOperandNamed(VarName, OpIdx)) {
+ // Get the machine operand number for the indicated operand.
+ OpIdx = CGI.Operands[OpIdx].MIOperandNo;
+ assert(!CGI.Operands.isFlatOperandNotEmitted(OpIdx) &&
+ "Explicitly used operand also marked as not emitted!");
+ } else {
+ /// If this operand is not supposed to be emitted by the
+ /// generated emitter, skip it.
+ while (CGI.Operands.isFlatOperandNotEmitted(NumberedOp))
+ ++NumberedOp;
+ OpIdx = NumberedOp++;
+ }
+
+ std::pair<unsigned, unsigned> SO = CGI.Operands.getSubOperandNumber(OpIdx);
+ std::string &EncoderMethodName = CGI.Operands[SO.first].EncoderMethodName;
+
+ // If the source operand has a custom encoder, use it. This will
+ // get the encoding for all of the suboperands.
+ if (!EncoderMethodName.empty()) {
+ // A custom encoder has all of the information for the
+ // sub-operands, if there are more than one, so only
+ // query the encoder once per source operand.
+ if (SO.second == 0) {
+ Case += " // op: " + VarName + "\n" +
+ " op = " + EncoderMethodName + "(MI, " + utostr(OpIdx);
+ if (MCEmitter)
+ Case += ", Fixups";
+ Case += ");\n";
}
+ } else {
+ Case += " // op: " + VarName + "\n" +
+ " op = getMachineOpValue(MI, MI.getOperand(" + utostr(OpIdx) + ")";
+ if (MCEmitter)
+ Case += ", Fixups";
+ Case += ");\n";
}
- return -1;
-}
+ for (; bit >= 0; ) {
+ int varBit = getVariableBit(VarName, BI, bit);
+
+ // If this bit isn't from a variable, skip it.
+ if (varBit == -1) {
+ --bit;
+ continue;
+ }
+
+ // Figure out the consecutive range of bits covered by this operand, in
+ // order to generate better encoding code.
+ int beginInstBit = bit;
+ int beginVarBit = varBit;
+ int N = 1;
+ for (--bit; bit >= 0;) {
+ varBit = getVariableBit(VarName, BI, bit);
+ if (varBit == -1 || varBit != (beginVarBit - N)) break;
+ ++N;
+ --bit;
+ }
+
+ unsigned opMask = ~0U >> (32-N);
+ int opShift = beginVarBit - N + 1;
+ opMask <<= opShift;
+ opShift = beginInstBit - beginVarBit;
+
+ if (opShift > 0) {
+ Case += " Value |= (op & " + utostr(opMask) + "U) << " +
+ itostr(opShift) + ";\n";
+ } else if (opShift < 0) {
+ Case += " Value |= (op & " + utostr(opMask) + "U) >> " +
+ itostr(-opShift) + ";\n";
+ } else {
+ Case += " Value |= op & " + utostr(opMask) + "U;\n";
+ }
+ }
+}
+std::string CodeEmitterGen::getInstructionCase(Record *R,
+ CodeGenTarget &Target) {
+ std::string Case;
+
+ BitsInit *BI = R->getValueAsBitsInit("Inst");
+ const std::vector<RecordVal> &Vals = R->getValues();
+ unsigned NumberedOp = 0;
+
+ // Loop over all of the fields in the instruction, determining which are the
+ // operands to the instruction.
+ for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+ // Ignore fixed fields in the record, we're looking for values like:
+ // bits<5> RST = { ?, ?, ?, ?, ? };
+ if (Vals[i].getPrefix() || Vals[i].getValue()->isComplete())
+ continue;
+
+ AddCodeToMergeInOperand(R, BI, Vals[i].getName(), NumberedOp, Case, Target);
+ }
+
+ std::string PostEmitter = R->getValueAsString("PostEncoderMethod");
+ if (!PostEmitter.empty())
+ Case += " Value = " + PostEmitter + "(MI, Value);\n";
+
+ return Case;
+}
+
void CodeEmitterGen::run(raw_ostream &o) {
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");
-
+
// For little-endian instruction bit encodings, reverse the bit order
if (Target.isLittleEndianEncoding()) reverseBits(Insts);
EmitSourceFileHeader("Machine Code Emitter", o);
- std::string Namespace = Insts[0]->getValueAsString("Namespace") + "::";
-
+
const std::vector<const CodeGenInstruction*> &NumberedInstructions =
Target.getInstructionsByEnumValue();
// Emit function declaration
- o << "unsigned " << Target.getName() << "CodeEmitter::"
- << "getBinaryCodeForInstr(const MachineInstr &MI) {\n";
+ o << "unsigned " << Target.getName();
+ if (MCEmitter)
+ o << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
+ << " SmallVectorImpl<MCFixup> &Fixups) const {\n";
+ else
+ o << "CodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) const {\n";
// Emit instruction base values
o << " static const unsigned InstBits[] = {\n";
@@ -91,109 +226,45 @@ void CodeEmitterGen::run(raw_ostream &o) {
IN != EN; ++IN) {
const CodeGenInstruction *CGI = *IN;
Record *R = CGI->TheDef;
-
+
if (R->getValueAsString("Namespace") == "TargetOpcode") {
o << " 0U,\n";
continue;
}
-
+
BitsInit *BI = R->getValueAsBitsInit("Inst");
- // Start by filling in fixed values...
+ // Start by filling in fixed values.
unsigned Value = 0;
for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) {
- if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(e-i-1))) {
+ if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(e-i-1)))
Value |= B->getValue() << (e-i-1);
- }
}
o << " " << Value << "U," << '\t' << "// " << R->getName() << "\n";
}
o << " 0U\n };\n";
-
+
// Map to accumulate all the cases.
std::map<std::string, std::vector<std::string> > CaseMap;
-
+
// Construct all cases statement for each opcode
for (std::vector<Record*>::iterator IC = Insts.begin(), EC = Insts.end();
IC != EC; ++IC) {
Record *R = *IC;
if (R->getValueAsString("Namespace") == "TargetOpcode")
continue;
- const std::string &InstName = R->getName();
- std::string Case("");
-
- BitsInit *BI = R->getValueAsBitsInit("Inst");
- const std::vector<RecordVal> &Vals = R->getValues();
- CodeGenInstruction &CGI = Target.getInstruction(R);
-
- // Loop over all of the fields in the instruction, determining which are the
- // operands to the instruction.
- unsigned op = 0;
- for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
- if (!Vals[i].getPrefix() && !Vals[i].getValue()->isComplete()) {
- // Is the operand continuous? If so, we can just mask and OR it in
- // instead of doing it bit-by-bit, saving a lot in runtime cost.
- const std::string &VarName = Vals[i].getName();
- bool gotOp = false;
-
- for (int bit = BI->getNumBits()-1; bit >= 0; ) {
- int varBit = getVariableBit(VarName, BI, bit);
-
- if (varBit == -1) {
- --bit;
- } else {
- int beginInstBit = bit;
- int beginVarBit = varBit;
- int N = 1;
-
- for (--bit; bit >= 0;) {
- varBit = getVariableBit(VarName, BI, bit);
- if (varBit == -1 || varBit != (beginVarBit - N)) break;
- ++N;
- --bit;
- }
-
- if (!gotOp) {
- /// If this operand is not supposed to be emitted by the generated
- /// emitter, skip it.
- while (CGI.isFlatOperandNotEmitted(op))
- ++op;
-
- Case += " // op: " + VarName + "\n"
- + " op = getMachineOpValue(MI, MI.getOperand("
- + utostr(op++) + "));\n";
- gotOp = true;
- }
-
- unsigned opMask = ~0U >> (32-N);
- int opShift = beginVarBit - N + 1;
- opMask <<= opShift;
- opShift = beginInstBit - beginVarBit;
-
- if (opShift > 0) {
- Case += " Value |= (op & " + utostr(opMask) + "U) << "
- + itostr(opShift) + ";\n";
- } else if (opShift < 0) {
- Case += " Value |= (op & " + utostr(opMask) + "U) >> "
- + itostr(-opShift) + ";\n";
- } else {
- Case += " Value |= op & " + utostr(opMask) + "U;\n";
- }
- }
- }
- }
- }
+ const std::string &InstName = R->getValueAsString("Namespace") + "::"
+ + R->getName();
+ std::string Case = getInstructionCase(R, Target);
- std::vector<std::string> &InstList = CaseMap[Case];
- InstList.push_back(InstName);
+ CaseMap[Case].push_back(InstName);
}
-
// Emit initial function code
o << " const unsigned opcode = MI.getOpcode();\n"
<< " unsigned Value = InstBits[opcode];\n"
<< " unsigned op = 0;\n"
- << " op = op; // suppress warning\n"
+ << " (void)op; // suppress warning\n"
<< " switch (opcode) {\n";
// Emit each case statement
@@ -204,7 +275,7 @@ void CodeEmitterGen::run(raw_ostream &o) {
for (int i = 0, N = InstList.size(); i < N; i++) {
if (i) o << "\n";
- o << " case " << Namespace << InstList[i] << ":";
+ o << " case " << InstList[i] << ":";
}
o << " {\n";
o << Case;
diff --git a/contrib/llvm/utils/TableGen/CodeEmitterGen.h b/contrib/llvm/utils/TableGen/CodeEmitterGen.h
index f0b3229..a874d97 100644
--- a/contrib/llvm/utils/TableGen/CodeEmitterGen.h
+++ b/contrib/llvm/utils/TableGen/CodeEmitterGen.h
@@ -15,7 +15,6 @@
#define CODEMITTERGEN_H
#include "TableGenBackend.h"
-#include <map>
#include <vector>
#include <string>
@@ -23,6 +22,7 @@ namespace llvm {
class RecordVal;
class BitsInit;
+class CodeGenTarget;
class CodeEmitterGen : public TableGenBackend {
RecordKeeper &Records;
@@ -36,6 +36,12 @@ private:
void emitGetValueBit(raw_ostream &o, const std::string &Namespace);
void reverseBits(std::vector<Record*> &Insts);
int getVariableBit(const std::string &VarName, BitsInit *BI, int bit);
+ std::string getInstructionCase(Record *R, CodeGenTarget &Target);
+ void
+ AddCodeToMergeInOperand(Record *R, BitsInit *BI, const std::string &VarName,
+ unsigned &NumberedOp,
+ std::string &Case, CodeGenTarget &Target);
+
};
} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
index 303aa6c..aa60f87 100644
--- a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -56,11 +56,11 @@ EEVT::TypeSet::TypeSet(MVT::SimpleValueType VT, TreePattern &TP) {
EEVT::TypeSet::TypeSet(const std::vector<MVT::SimpleValueType> &VTList) {
assert(!VTList.empty() && "empty list?");
TypeVec.append(VTList.begin(), VTList.end());
-
+
if (!VTList.empty())
assert(VTList[0] != MVT::iAny && VTList[0] != MVT::vAny &&
VTList[0] != MVT::fAny);
-
+
// Verify no duplicates.
array_pod_sort(TypeVec.begin(), TypeVec.end());
assert(std::unique(TypeVec.begin(), TypeVec.end()) == TypeVec.end());
@@ -72,9 +72,9 @@ bool EEVT::TypeSet::FillWithPossibleTypes(TreePattern &TP,
bool (*Pred)(MVT::SimpleValueType),
const char *PredicateName) {
assert(isCompletelyUnknown());
- const std::vector<MVT::SimpleValueType> &LegalTypes =
+ const std::vector<MVT::SimpleValueType> &LegalTypes =
TP.getDAGPatterns().getTargetInfo().getLegalValueTypes();
-
+
for (unsigned i = 0, e = LegalTypes.size(); i != e; ++i)
if (Pred == 0 || Pred(LegalTypes[i]))
TypeVec.push_back(LegalTypes[i]);
@@ -82,14 +82,14 @@ bool EEVT::TypeSet::FillWithPossibleTypes(TreePattern &TP,
// If we have nothing that matches the predicate, bail out.
if (TypeVec.empty())
TP.error("Type inference contradiction found, no " +
- std::string(PredicateName) + " types found");
+ std::string(PredicateName) + " types found");
// No need to sort with one element.
if (TypeVec.size() == 1) return true;
// Remove duplicates.
array_pod_sort(TypeVec.begin(), TypeVec.end());
TypeVec.erase(std::unique(TypeVec.begin(), TypeVec.end()), TypeVec.end());
-
+
return true;
}
@@ -100,7 +100,7 @@ bool EEVT::TypeSet::hasIntegerTypes() const {
if (isInteger(TypeVec[i]))
return true;
return false;
-}
+}
/// hasFloatingPointTypes - Return true if this TypeSet contains an fAny or
/// a floating point value type.
@@ -109,7 +109,7 @@ bool EEVT::TypeSet::hasFloatingPointTypes() const {
if (isFloatingPoint(TypeVec[i]))
return true;
return false;
-}
+}
/// hasVectorTypes - Return true if this TypeSet contains a vAny or a vector
/// value type.
@@ -123,9 +123,9 @@ bool EEVT::TypeSet::hasVectorTypes() const {
std::string EEVT::TypeSet::getName() const {
if (TypeVec.empty()) return "<empty>";
-
+
std::string Result;
-
+
for (unsigned i = 0, e = TypeVec.size(); i != e; ++i) {
std::string VTName = llvm::getEnumName(TypeVec[i]);
// Strip off MVT:: prefix if present.
@@ -134,7 +134,7 @@ std::string EEVT::TypeSet::getName() const {
if (i) Result += ':';
Result += VTName;
}
-
+
if (TypeVec.size() == 1)
return Result;
return "{" + Result + "}";
@@ -146,14 +146,14 @@ std::string EEVT::TypeSet::getName() const {
bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){
if (InVT.isCompletelyUnknown() || *this == InVT)
return false;
-
+
if (isCompletelyUnknown()) {
*this = InVT;
return true;
}
-
+
assert(TypeVec.size() >= 1 && InVT.TypeVec.size() >= 1 && "No unknowns");
-
+
// Handle the abstract cases, seeing if we can resolve them better.
switch (TypeVec[0]) {
default: break;
@@ -163,26 +163,26 @@ bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){
EEVT::TypeSet InCopy(InVT);
InCopy.EnforceInteger(TP);
InCopy.EnforceScalar(TP);
-
+
if (InCopy.isConcrete()) {
// If the RHS has one integer type, upgrade iPTR to i32.
TypeVec[0] = InVT.TypeVec[0];
return true;
}
-
+
// If the input has multiple scalar integers, this doesn't add any info.
if (!InCopy.isCompletelyUnknown())
return false;
}
break;
}
-
+
// If the input constraint is iAny/iPTR and this is an integer type list,
// remove non-integer types from the list.
if ((InVT.TypeVec[0] == MVT::iPTR || InVT.TypeVec[0] == MVT::iPTRAny) &&
hasIntegerTypes()) {
bool MadeChange = EnforceInteger(TP);
-
+
// If we're merging in iPTR/iPTRAny and the node currently has a list of
// multiple different integer types, replace them with a single iPTR.
if ((InVT.TypeVec[0] == MVT::iPTR || InVT.TypeVec[0] == MVT::iPTRAny) &&
@@ -191,10 +191,10 @@ bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){
TypeVec[0] = InVT.TypeVec[0];
MadeChange = true;
}
-
+
return MadeChange;
}
-
+
// If this is a type list and the RHS is a typelist as well, eliminate entries
// from this list that aren't in the other one.
bool MadeChange = false;
@@ -207,16 +207,16 @@ bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){
InInVT = true;
break;
}
-
+
if (InInVT) continue;
TypeVec.erase(TypeVec.begin()+i--);
MadeChange = true;
}
-
+
// If we removed all of our types, we have a type contradiction.
if (!TypeVec.empty())
return MadeChange;
-
+
// FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, merging '" +
InVT.getName() + "' into '" + InputSet.getName() + "'");
@@ -232,12 +232,12 @@ bool EEVT::TypeSet::EnforceInteger(TreePattern &TP) {
return false;
TypeSet InputSet(*this);
-
+
// Filter out all the fp types.
for (unsigned i = 0; i != TypeVec.size(); ++i)
if (!isInteger(TypeVec[i]))
TypeVec.erase(TypeVec.begin()+i--);
-
+
if (TypeVec.empty())
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be integer");
@@ -254,12 +254,12 @@ bool EEVT::TypeSet::EnforceFloatingPoint(TreePattern &TP) {
return false;
TypeSet InputSet(*this);
-
+
// Filter out all the fp types.
for (unsigned i = 0; i != TypeVec.size(); ++i)
if (!isFloatingPoint(TypeVec[i]))
TypeVec.erase(TypeVec.begin()+i--);
-
+
if (TypeVec.empty())
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be floating point");
@@ -276,12 +276,12 @@ bool EEVT::TypeSet::EnforceScalar(TreePattern &TP) {
return false;
TypeSet InputSet(*this);
-
+
// Filter out all the vector types.
for (unsigned i = 0; i != TypeVec.size(); ++i)
if (!isScalar(TypeVec[i]))
TypeVec.erase(TypeVec.begin()+i--);
-
+
if (TypeVec.empty())
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be scalar");
@@ -296,14 +296,14 @@ bool EEVT::TypeSet::EnforceVector(TreePattern &TP) {
TypeSet InputSet(*this);
bool MadeChange = false;
-
+
// Filter out all the scalar types.
for (unsigned i = 0; i != TypeVec.size(); ++i)
if (!isVector(TypeVec[i])) {
TypeVec.erase(TypeVec.begin()+i--);
MadeChange = true;
}
-
+
if (TypeVec.empty())
TP.error("Type inference contradiction found, '" +
InputSet.getName() + "' needs to be a vector");
@@ -317,13 +317,13 @@ bool EEVT::TypeSet::EnforceVector(TreePattern &TP) {
bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
// Both operands must be integer or FP, but we don't care which.
bool MadeChange = false;
-
+
if (isCompletelyUnknown())
MadeChange = FillWithPossibleTypes(TP);
if (Other.isCompletelyUnknown())
MadeChange = Other.FillWithPossibleTypes(TP);
-
+
// If one side is known to be integer or known to be FP but the other side has
// no information, get at least the type integrality info in there.
if (!hasFloatingPointTypes())
@@ -334,62 +334,165 @@ bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
MadeChange |= EnforceInteger(TP);
else if (!Other.hasIntegerTypes())
MadeChange |= EnforceFloatingPoint(TP);
-
+
assert(!isCompletelyUnknown() && !Other.isCompletelyUnknown() &&
"Should have a type list now");
-
+
// If one contains vectors but the other doesn't pull vectors out.
if (!hasVectorTypes())
MadeChange |= Other.EnforceScalar(TP);
if (!hasVectorTypes())
MadeChange |= EnforceScalar(TP);
+
+ if (TypeVec.size() == 1 && Other.TypeVec.size() == 1) {
+ // If we are down to concrete types, this code does not currently
+ // handle nodes which have multiple types, where some types are
+ // integer, and some are fp. Assert that this is not the case.
+ assert(!(hasIntegerTypes() && hasFloatingPointTypes()) &&
+ !(Other.hasIntegerTypes() && Other.hasFloatingPointTypes()) &&
+ "SDTCisOpSmallerThanOp does not handle mixed int/fp types!");
+
+ // Otherwise, if these are both vector types, either this vector
+ // must have a larger bitsize than the other, or this element type
+ // must be larger than the other.
+ EVT Type(TypeVec[0]);
+ EVT OtherType(Other.TypeVec[0]);
+
+ if (hasVectorTypes() && Other.hasVectorTypes()) {
+ if (Type.getSizeInBits() >= OtherType.getSizeInBits())
+ if (Type.getVectorElementType().getSizeInBits()
+ >= OtherType.getVectorElementType().getSizeInBits())
+ TP.error("Type inference contradiction found, '" +
+ getName() + "' element type not smaller than '" +
+ Other.getName() +"'!");
+ }
+ else
+ // For scalar types, the bitsize of this type must be larger
+ // than that of the other.
+ if (Type.getSizeInBits() >= OtherType.getSizeInBits())
+ TP.error("Type inference contradiction found, '" +
+ getName() + "' is not smaller than '" +
+ Other.getName() +"'!");
+
+ }
- // This code does not currently handle nodes which have multiple types,
- // where some types are integer, and some are fp. Assert that this is not
- // the case.
- assert(!(hasIntegerTypes() && hasFloatingPointTypes()) &&
- !(Other.hasIntegerTypes() && Other.hasFloatingPointTypes()) &&
- "SDTCisOpSmallerThanOp does not handle mixed int/fp types!");
-
+
+ // Handle int and fp as disjoint sets. This won't work for patterns
+ // that have mixed fp/int types but those are likely rare and would
+ // not have been accepted by this code previously.
+
// Okay, find the smallest type from the current set and remove it from the
// largest set.
- MVT::SimpleValueType Smallest = TypeVec[0];
+ MVT::SimpleValueType SmallestInt = MVT::LAST_VALUETYPE;
+ for (unsigned i = 0, e = TypeVec.size(); i != e; ++i)
+ if (isInteger(TypeVec[i])) {
+ SmallestInt = TypeVec[i];
+ break;
+ }
for (unsigned i = 1, e = TypeVec.size(); i != e; ++i)
- if (TypeVec[i] < Smallest)
- Smallest = TypeVec[i];
-
+ if (isInteger(TypeVec[i]) && TypeVec[i] < SmallestInt)
+ SmallestInt = TypeVec[i];
+
+ MVT::SimpleValueType SmallestFP = MVT::LAST_VALUETYPE;
+ for (unsigned i = 0, e = TypeVec.size(); i != e; ++i)
+ if (isFloatingPoint(TypeVec[i])) {
+ SmallestFP = TypeVec[i];
+ break;
+ }
+ for (unsigned i = 1, e = TypeVec.size(); i != e; ++i)
+ if (isFloatingPoint(TypeVec[i]) && TypeVec[i] < SmallestFP)
+ SmallestFP = TypeVec[i];
+
+ int OtherIntSize = 0;
+ int OtherFPSize = 0;
+ for (SmallVector<MVT::SimpleValueType, 2>::iterator TVI =
+ Other.TypeVec.begin();
+ TVI != Other.TypeVec.end();
+ /* NULL */) {
+ if (isInteger(*TVI)) {
+ ++OtherIntSize;
+ if (*TVI == SmallestInt) {
+ TVI = Other.TypeVec.erase(TVI);
+ --OtherIntSize;
+ MadeChange = true;
+ continue;
+ }
+ }
+ else if (isFloatingPoint(*TVI)) {
+ ++OtherFPSize;
+ if (*TVI == SmallestFP) {
+ TVI = Other.TypeVec.erase(TVI);
+ --OtherFPSize;
+ MadeChange = true;
+ continue;
+ }
+ }
+ ++TVI;
+ }
+
// If this is the only type in the large set, the constraint can never be
// satisfied.
- if (Other.TypeVec.size() == 1 && Other.TypeVec[0] == Smallest)
+ if ((Other.hasIntegerTypes() && OtherIntSize == 0)
+ || (Other.hasFloatingPointTypes() && OtherFPSize == 0))
TP.error("Type inference contradiction found, '" +
Other.getName() + "' has nothing larger than '" + getName() +"'!");
-
- SmallVector<MVT::SimpleValueType, 2>::iterator TVI =
- std::find(Other.TypeVec.begin(), Other.TypeVec.end(), Smallest);
- if (TVI != Other.TypeVec.end()) {
- Other.TypeVec.erase(TVI);
- MadeChange = true;
- }
-
+
// Okay, find the largest type in the Other set and remove it from the
// current set.
- MVT::SimpleValueType Largest = Other.TypeVec[0];
+ MVT::SimpleValueType LargestInt = MVT::Other;
+ for (unsigned i = 0, e = Other.TypeVec.size(); i != e; ++i)
+ if (isInteger(Other.TypeVec[i])) {
+ LargestInt = Other.TypeVec[i];
+ break;
+ }
for (unsigned i = 1, e = Other.TypeVec.size(); i != e; ++i)
- if (Other.TypeVec[i] > Largest)
- Largest = Other.TypeVec[i];
-
+ if (isInteger(Other.TypeVec[i]) && Other.TypeVec[i] > LargestInt)
+ LargestInt = Other.TypeVec[i];
+
+ MVT::SimpleValueType LargestFP = MVT::Other;
+ for (unsigned i = 0, e = Other.TypeVec.size(); i != e; ++i)
+ if (isFloatingPoint(Other.TypeVec[i])) {
+ LargestFP = Other.TypeVec[i];
+ break;
+ }
+ for (unsigned i = 1, e = Other.TypeVec.size(); i != e; ++i)
+ if (isFloatingPoint(Other.TypeVec[i]) && Other.TypeVec[i] > LargestFP)
+ LargestFP = Other.TypeVec[i];
+
+ int IntSize = 0;
+ int FPSize = 0;
+ for (SmallVector<MVT::SimpleValueType, 2>::iterator TVI =
+ TypeVec.begin();
+ TVI != TypeVec.end();
+ /* NULL */) {
+ if (isInteger(*TVI)) {
+ ++IntSize;
+ if (*TVI == LargestInt) {
+ TVI = TypeVec.erase(TVI);
+ --IntSize;
+ MadeChange = true;
+ continue;
+ }
+ }
+ else if (isFloatingPoint(*TVI)) {
+ ++FPSize;
+ if (*TVI == LargestFP) {
+ TVI = TypeVec.erase(TVI);
+ --FPSize;
+ MadeChange = true;
+ continue;
+ }
+ }
+ ++TVI;
+ }
+
// If this is the only type in the small set, the constraint can never be
// satisfied.
- if (TypeVec.size() == 1 && TypeVec[0] == Largest)
+ if ((hasIntegerTypes() && IntSize == 0)
+ || (hasFloatingPointTypes() && FPSize == 0))
TP.error("Type inference contradiction found, '" +
getName() + "' has nothing smaller than '" + Other.getName()+"'!");
-
- TVI = std::find(TypeVec.begin(), TypeVec.end(), Largest);
- if (TVI != TypeVec.end()) {
- TypeVec.erase(TVI);
- MadeChange = true;
- }
-
+
return MadeChange;
}
@@ -406,7 +509,7 @@ bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand,
if (isConcrete()) {
EVT IVT = getConcrete();
IVT = IVT.getVectorElementType();
- return MadeChange |
+ return MadeChange |
VTOperand.MergeInTypeInfo(IVT.getSimpleVT().SimpleTy, TP);
}
@@ -414,11 +517,11 @@ bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand,
// disagree.
if (!VTOperand.isConcrete())
return MadeChange;
-
+
MVT::SimpleValueType VT = VTOperand.getConcrete();
-
+
TypeSet InputSet(*this);
-
+
// Filter out all the types which don't have the right element type.
for (unsigned i = 0; i != TypeVec.size(); ++i) {
assert(isVector(TypeVec[i]) && "EnforceVector didn't work");
@@ -427,13 +530,43 @@ bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand,
MadeChange = true;
}
}
-
+
if (TypeVec.empty()) // FIXME: Really want an SMLoc here!
TP.error("Type inference contradiction found, forcing '" +
InputSet.getName() + "' to have a vector element");
return MadeChange;
}
+/// EnforceVectorSubVectorTypeIs - 'this' is now constrainted to be a
+/// vector type specified by VTOperand.
+bool EEVT::TypeSet::EnforceVectorSubVectorTypeIs(EEVT::TypeSet &VTOperand,
+ TreePattern &TP) {
+ // "This" must be a vector and "VTOperand" must be a vector.
+ bool MadeChange = false;
+ MadeChange |= EnforceVector(TP);
+ MadeChange |= VTOperand.EnforceVector(TP);
+
+ // "This" must be larger than "VTOperand."
+ MadeChange |= VTOperand.EnforceSmallerThan(*this, TP);
+
+ // If we know the vector type, it forces the scalar types to agree.
+ if (isConcrete()) {
+ EVT IVT = getConcrete();
+ IVT = IVT.getVectorElementType();
+
+ EEVT::TypeSet EltTypeSet(IVT.getSimpleVT().SimpleTy, TP);
+ MadeChange |= VTOperand.EnforceVectorEltTypeIs(EltTypeSet, TP);
+ } else if (VTOperand.isConcrete()) {
+ EVT IVT = VTOperand.getConcrete();
+ IVT = IVT.getVectorElementType();
+
+ EEVT::TypeSet EltTypeSet(IVT.getSimpleVT().SimpleTy, TP);
+ MadeChange |= EnforceVectorEltTypeIs(EltTypeSet, TP);
+ }
+
+ return MadeChange;
+}
+
//===----------------------------------------------------------------------===//
// Helpers for working with extended types.
@@ -473,18 +606,21 @@ void FindDepVars(TreePatternNode *N, MultipleUseVarSet &DepVars) {
}
//! Dump the dependent variable set:
+#ifndef NDEBUG
void DumpDepVars(MultipleUseVarSet &DepVars) {
if (DepVars.empty()) {
DEBUG(errs() << "<empty set>");
} else {
DEBUG(errs() << "[ ");
- for (MultipleUseVarSet::const_iterator i = DepVars.begin(), e = DepVars.end();
- i != e; ++i) {
+ for (MultipleUseVarSet::const_iterator i = DepVars.begin(),
+ e = DepVars.end(); i != e; ++i) {
DEBUG(errs() << (*i) << " ");
}
DEBUG(errs() << "]");
}
}
+#endif
+
}
//===----------------------------------------------------------------------===//
@@ -502,7 +638,7 @@ static unsigned getPatternSize(const TreePatternNode *P,
// e.g. (set R32:$dst, 0).
if (P->isLeaf() && dynamic_cast<IntInit*>(P->getLeafValue()))
Size += 2;
-
+
// FIXME: This is a hack to statically increase the priority of patterns
// which maps a sub-dag to a complex pattern. e.g. favors LEA over ADD.
// Later we can allow complexity / cost for each pattern to be (optionally)
@@ -511,12 +647,12 @@ static unsigned getPatternSize(const TreePatternNode *P,
const ComplexPattern *AM = P->getComplexPatternInfo(CGP);
if (AM)
Size += AM->getNumOperands() * 3;
-
+
// If this node has some predicate function that must match, it adds to the
// complexity of this node.
if (!P->getPredicateFns().empty())
++Size;
-
+
// Count children in the count if they are also nodes.
for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i) {
TreePatternNode *Child = P->getChild(i);
@@ -524,7 +660,7 @@ static unsigned getPatternSize(const TreePatternNode *P,
Child->getType(0) != MVT::Other)
Size += getPatternSize(Child, CGP);
else if (Child->isLeaf()) {
- if (dynamic_cast<IntInit*>(Child->getLeafValue()))
+ if (dynamic_cast<IntInit*>(Child->getLeafValue()))
Size += 5; // Matches a ConstantSDNode (+3) and a specific value (+2).
else if (Child->getComplexPatternInfo(CGP))
Size += getPatternSize(Child, CGP);
@@ -532,7 +668,7 @@ static unsigned getPatternSize(const TreePatternNode *P,
++Size;
}
}
-
+
return Size;
}
@@ -573,13 +709,13 @@ std::string PatternToMatch::getPredicateCheck() const {
SDTypeConstraint::SDTypeConstraint(Record *R) {
OperandNo = R->getValueAsInt("OperandNum");
-
+
if (R->isSubClassOf("SDTCisVT")) {
ConstraintType = SDTCisVT;
x.SDTCisVT_Info.VT = getValueType(R->getValueAsDef("VT"));
if (x.SDTCisVT_Info.VT == MVT::isVoid)
throw TGError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT");
-
+
} else if (R->isSubClassOf("SDTCisPtrTy")) {
ConstraintType = SDTCisPtrTy;
} else if (R->isSubClassOf("SDTCisInt")) {
@@ -593,15 +729,19 @@ SDTypeConstraint::SDTypeConstraint(Record *R) {
x.SDTCisSameAs_Info.OtherOperandNum = R->getValueAsInt("OtherOperandNum");
} else if (R->isSubClassOf("SDTCisVTSmallerThanOp")) {
ConstraintType = SDTCisVTSmallerThanOp;
- x.SDTCisVTSmallerThanOp_Info.OtherOperandNum =
+ x.SDTCisVTSmallerThanOp_Info.OtherOperandNum =
R->getValueAsInt("OtherOperandNum");
} else if (R->isSubClassOf("SDTCisOpSmallerThanOp")) {
ConstraintType = SDTCisOpSmallerThanOp;
- x.SDTCisOpSmallerThanOp_Info.BigOperandNum =
+ x.SDTCisOpSmallerThanOp_Info.BigOperandNum =
R->getValueAsInt("BigOperandNum");
} else if (R->isSubClassOf("SDTCisEltOfVec")) {
ConstraintType = SDTCisEltOfVec;
x.SDTCisEltOfVec_Info.OtherOperandNum = R->getValueAsInt("OtherOpNum");
+ } else if (R->isSubClassOf("SDTCisSubVecOfVec")) {
+ ConstraintType = SDTCisSubVecOfVec;
+ x.SDTCisSubVecOfVec_Info.OtherOperandNum =
+ R->getValueAsInt("OtherOpNum");
} else {
errs() << "Unrecognized SDTypeConstraint '" << R->getName() << "'!\n";
exit(1);
@@ -618,11 +758,11 @@ static TreePatternNode *getOperandNum(unsigned OpNo, TreePatternNode *N,
ResNo = OpNo;
return N;
}
-
+
OpNo -= NumResults;
-
+
if (OpNo >= N->getNumChildren()) {
- errs() << "Invalid operand number in type constraint "
+ errs() << "Invalid operand number in type constraint "
<< (OpNo+NumResults) << " ";
N->dump();
errs() << '\n';
@@ -641,7 +781,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
TreePattern &TP) const {
unsigned ResNo = 0; // The result number being referenced.
TreePatternNode *NodeToApply = getOperandNum(OperandNo, N, NodeInfo, ResNo);
-
+
switch (ConstraintType) {
default: assert(0 && "Unknown constraint type!");
case SDTCisVT:
@@ -676,9 +816,9 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
TP.error(N->getOperator()->getName() + " expects a VT operand!");
MVT::SimpleValueType VT =
getValueType(static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef());
-
+
EEVT::TypeSet TypeListTmp(VT, TP);
-
+
unsigned OResNo = 0;
TreePatternNode *OtherNode =
getOperandNum(x.SDTCisVTSmallerThanOp_Info.OtherOperandNum, N, NodeInfo,
@@ -699,13 +839,24 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
TreePatternNode *VecOperand =
getOperandNum(x.SDTCisEltOfVec_Info.OtherOperandNum, N, NodeInfo,
VResNo);
-
+
// Filter vector types out of VecOperand that don't have the right element
// type.
return VecOperand->getExtType(VResNo).
EnforceVectorEltTypeIs(NodeToApply->getExtType(ResNo), TP);
}
- }
+ case SDTCisSubVecOfVec: {
+ unsigned VResNo = 0;
+ TreePatternNode *BigVecOperand =
+ getOperandNum(x.SDTCisSubVecOfVec_Info.OtherOperandNum, N, NodeInfo,
+ VResNo);
+
+ // Filter vector types out of BigVecOperand that don't have the
+ // right subvector type.
+ return BigVecOperand->getExtType(VResNo).
+ EnforceVectorSubVectorTypeIs(NodeToApply->getExtType(ResNo), TP);
+ }
+ }
return false;
}
@@ -718,7 +869,7 @@ SDNodeInfo::SDNodeInfo(Record *R) : Def(R) {
Record *TypeProfile = R->getValueAsDef("TypeProfile");
NumResults = TypeProfile->getValueAsInt("NumResults");
NumOperands = TypeProfile->getValueAsInt("NumOperands");
-
+
// Parse the properties.
Properties = 0;
std::vector<Record*> PropList = R->getValueAsListOfDefs("Properties");
@@ -729,12 +880,12 @@ SDNodeInfo::SDNodeInfo(Record *R) : Def(R) {
Properties |= 1 << SDNPAssociative;
} else if (PropList[i]->getName() == "SDNPHasChain") {
Properties |= 1 << SDNPHasChain;
- } else if (PropList[i]->getName() == "SDNPOutFlag") {
- Properties |= 1 << SDNPOutFlag;
- } else if (PropList[i]->getName() == "SDNPInFlag") {
- Properties |= 1 << SDNPInFlag;
- } else if (PropList[i]->getName() == "SDNPOptInFlag") {
- Properties |= 1 << SDNPOptInFlag;
+ } else if (PropList[i]->getName() == "SDNPOutGlue") {
+ Properties |= 1 << SDNPOutGlue;
+ } else if (PropList[i]->getName() == "SDNPInGlue") {
+ Properties |= 1 << SDNPInGlue;
+ } else if (PropList[i]->getName() == "SDNPOptInGlue") {
+ Properties |= 1 << SDNPOptInGlue;
} else if (PropList[i]->getName() == "SDNPMayStore") {
Properties |= 1 << SDNPMayStore;
} else if (PropList[i]->getName() == "SDNPMayLoad") {
@@ -751,8 +902,8 @@ SDNodeInfo::SDNodeInfo(Record *R) : Def(R) {
exit(1);
}
}
-
-
+
+
// Parse the type constraints.
std::vector<Record*> ConstraintList =
TypeProfile->getValueAsListOfDefs("Constraints");
@@ -767,12 +918,12 @@ MVT::SimpleValueType SDNodeInfo::getKnownType(unsigned ResNo) const {
assert(NumResults <= 1 &&
"We only work with nodes with zero or one result so far!");
assert(ResNo == 0 && "Only handles single result nodes so far");
-
+
for (unsigned i = 0, e = TypeConstraints.size(); i != e; ++i) {
// Make sure that this applies to the correct node result.
if (TypeConstraints[i].OperandNo >= NumResults) // FIXME: need value #
continue;
-
+
switch (TypeConstraints[i].ConstraintType) {
default: break;
case SDTypeConstraint::SDTCisVT:
@@ -799,20 +950,20 @@ static unsigned GetNumNodeResults(Record *Operator, CodeGenDAGPatterns &CDP) {
if (Operator->getName() == "set" ||
Operator->getName() == "implicit")
return 0; // All return nothing.
-
+
if (Operator->isSubClassOf("Intrinsic"))
return CDP.getIntrinsic(Operator).IS.RetVTs.size();
-
+
if (Operator->isSubClassOf("SDNode"))
return CDP.getSDNodeInfo(Operator).getNumResults();
-
+
if (Operator->isSubClassOf("PatFrag")) {
// If we've already parsed this pattern fragment, get it. Otherwise, handle
// the forward reference case where one pattern fragment references another
// before it is processed.
if (TreePattern *PFRec = CDP.getPatternFragmentIfRead(Operator))
return PFRec->getOnlyTree()->getNumTypes();
-
+
// Get the result tree.
DagInit *Tree = Operator->getValueAsDag("Fragment");
Record *Op = 0;
@@ -821,22 +972,22 @@ static unsigned GetNumNodeResults(Record *Operator, CodeGenDAGPatterns &CDP) {
assert(Op && "Invalid Fragment");
return GetNumNodeResults(Op, CDP);
}
-
+
if (Operator->isSubClassOf("Instruction")) {
CodeGenInstruction &InstInfo = CDP.getTargetInfo().getInstruction(Operator);
// FIXME: Should allow access to all the results here.
- unsigned NumDefsToAdd = InstInfo.NumDefs ? 1 : 0;
-
+ unsigned NumDefsToAdd = InstInfo.Operands.NumDefs ? 1 : 0;
+
// Add on one implicit def if it has a resolvable type.
if (InstInfo.HasOneImplicitDefWithKnownVT(CDP.getTargetInfo()) !=MVT::Other)
++NumDefsToAdd;
return NumDefsToAdd;
}
-
+
if (Operator->isSubClassOf("SDNodeXForm"))
return 1; // FIXME: Generalize SDNodeXForm
-
+
Operator->dump();
errs() << "Unhandled node in GetNumNodeResults\n";
exit(1);
@@ -862,7 +1013,7 @@ void TreePatternNode::print(raw_ostream &OS) const {
}
OS << ")";
}
-
+
for (unsigned i = 0, e = PredicateFns.size(); i != e; ++i)
OS << "<<P:" << PredicateFns[i] << ">>";
if (TransformFn)
@@ -900,7 +1051,7 @@ bool TreePatternNode::isIsomorphicTo(const TreePatternNode *N,
}
return getLeafValue() == N->getLeafValue();
}
-
+
if (N->getOperator() != getOperator() ||
N->getNumChildren() != getNumChildren()) return false;
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
@@ -944,7 +1095,7 @@ void TreePatternNode::RemoveAllTypes() {
void TreePatternNode::
SubstituteFormalArguments(std::map<std::string, TreePatternNode*> &ArgMap) {
if (isLeaf()) return;
-
+
for (unsigned i = 0, e = getNumChildren(); i != e; ++i) {
TreePatternNode *Child = getChild(i);
if (Child->isLeaf()) {
@@ -972,7 +1123,7 @@ SubstituteFormalArguments(std::map<std::string, TreePatternNode*> &ArgMap) {
TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
if (isLeaf()) return this; // nothing to do.
Record *Op = getOperator();
-
+
if (!Op->isSubClassOf("PatFrag")) {
// Just recursively inline children nodes.
for (unsigned i = 0, e = getNumChildren(); i != e; ++i) {
@@ -991,7 +1142,7 @@ TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
// Otherwise, we found a reference to a fragment. First, look up its
// TreePattern record.
TreePattern *Frag = TP.getDAGPatterns().getPatternFragment(Op);
-
+
// Verify that we are passing the right number of operands.
if (Frag->getNumArgs() != Children.size())
TP.error("'" + Op->getName() + "' fragment requires " +
@@ -1009,10 +1160,10 @@ TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
std::map<std::string, TreePatternNode*> ArgMap;
for (unsigned i = 0, e = Frag->getNumArgs(); i != e; ++i)
ArgMap[Frag->getArgName(i)] = getChild(i)->InlinePatternFragments(TP);
-
+
FragTree->SubstituteFormalArguments(ArgMap);
}
-
+
FragTree->setName(getName());
for (unsigned i = 0, e = Types.size(); i != e; ++i)
FragTree->UpdateNodeType(i, getExtType(i), TP);
@@ -1023,7 +1174,7 @@ TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
// Get a new copy of this fragment to stitch into here.
//delete this; // FIXME: implement refcounting!
-
+
// The fragment we inlined could have recursive inlining that is needed. See
// if there are any pattern fragments in it and inline them as needed.
return FragTree->InlinePatternFragments(TP);
@@ -1038,21 +1189,21 @@ static EEVT::TypeSet getImplicitType(Record *R, unsigned ResNo,
// Check to see if this is a register or a register class.
if (R->isSubClassOf("RegisterClass")) {
assert(ResNo == 0 && "Regclass ref only has one result!");
- if (NotRegisters)
+ if (NotRegisters)
return EEVT::TypeSet(); // Unknown.
const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
return EEVT::TypeSet(T.getRegisterClass(R).getValueTypes());
}
-
+
if (R->isSubClassOf("PatFrag")) {
assert(ResNo == 0 && "FIXME: PatFrag with multiple results?");
// Pattern fragment types will be resolved when they are inlined.
return EEVT::TypeSet(); // Unknown.
}
-
+
if (R->isSubClassOf("Register")) {
assert(ResNo == 0 && "Registers only produce one result!");
- if (NotRegisters)
+ if (NotRegisters)
return EEVT::TypeSet(); // Unknown.
const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
return EEVT::TypeSet(T.getRegisterVTs(R));
@@ -1062,16 +1213,16 @@ static EEVT::TypeSet getImplicitType(Record *R, unsigned ResNo,
assert(ResNo == 0 && "SubRegisterIndices only produce one result!");
return EEVT::TypeSet();
}
-
+
if (R->isSubClassOf("ValueType") || R->isSubClassOf("CondCode")) {
assert(ResNo == 0 && "This node only has one result!");
// Using a VTSDNode or CondCodeSDNode.
return EEVT::TypeSet(MVT::Other, TP);
}
-
+
if (R->isSubClassOf("ComplexPattern")) {
assert(ResNo == 0 && "FIXME: ComplexPattern with multiple results?");
- if (NotRegisters)
+ if (NotRegisters)
return EEVT::TypeSet(); // Unknown.
return EEVT::TypeSet(TP.getDAGPatterns().getComplexPattern(R).getValueType(),
TP);
@@ -1080,13 +1231,13 @@ static EEVT::TypeSet getImplicitType(Record *R, unsigned ResNo,
assert(ResNo == 0 && "Regclass can only have one result!");
return EEVT::TypeSet(MVT::iPTR, TP);
}
-
+
if (R->getName() == "node" || R->getName() == "srcvalue" ||
R->getName() == "zero_reg") {
// Placeholder.
return EEVT::TypeSet(); // Unknown.
}
-
+
TP.error("Unknown node flavor used in pattern: " + R->getName());
return EEVT::TypeSet(MVT::Other, TP);
}
@@ -1100,8 +1251,8 @@ getIntrinsicInfo(const CodeGenDAGPatterns &CDP) const {
getOperator() != CDP.get_intrinsic_w_chain_sdnode() &&
getOperator() != CDP.get_intrinsic_wo_chain_sdnode())
return 0;
-
- unsigned IID =
+
+ unsigned IID =
dynamic_cast<IntInit*>(getChild(0)->getLeafValue())->getValue();
return &CDP.getIntrinsicInfo(IID);
}
@@ -1111,7 +1262,7 @@ getIntrinsicInfo(const CodeGenDAGPatterns &CDP) const {
const ComplexPattern *
TreePatternNode::getComplexPatternInfo(const CodeGenDAGPatterns &CGP) const {
if (!isLeaf()) return 0;
-
+
DefInit *DI = dynamic_cast<DefInit*>(getLeafValue());
if (DI && DI->getDef()->isSubClassOf("ComplexPattern"))
return &CGP.getComplexPattern(DI->getDef());
@@ -1126,10 +1277,10 @@ bool TreePatternNode::NodeHasProperty(SDNP Property,
return CP->hasProperty(Property);
return false;
}
-
+
Record *Operator = getOperator();
if (!Operator->isSubClassOf("SDNode")) return false;
-
+
return CGP.getSDNodeInfo(Operator).hasProperty(Property);
}
@@ -1146,7 +1297,7 @@ bool TreePatternNode::TreeHasProperty(SDNP Property,
if (getChild(i)->TreeHasProperty(Property, CGP))
return true;
return false;
-}
+}
/// isCommutativeIntrinsic - Return true if the node corresponds to a
/// commutative intrinsic.
@@ -1173,27 +1324,27 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
NotRegisters, TP), TP);
return MadeChange;
}
-
+
if (IntInit *II = dynamic_cast<IntInit*>(getLeafValue())) {
assert(Types.size() == 1 && "Invalid IntInit");
-
+
// Int inits are always integers. :)
bool MadeChange = Types[0].EnforceInteger(TP);
-
+
if (!Types[0].isConcrete())
return MadeChange;
-
+
MVT::SimpleValueType VT = getType(0);
if (VT == MVT::iPTR || VT == MVT::iPTRAny)
return MadeChange;
-
+
unsigned Size = EVT(VT).getSizeInBits();
// Make sure that the value is representable for this type.
if (Size >= 32) return MadeChange;
-
+
int Val = (II->getValue() << (32-Size)) >> (32-Size);
if (Val == II->getValue()) return MadeChange;
-
+
// If sign-extended doesn't fit, does it fit as unsigned?
unsigned ValueMask;
unsigned UnsignedVal;
@@ -1202,34 +1353,34 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
if ((ValueMask & UnsignedVal) == UnsignedVal)
return MadeChange;
-
+
TP.error("Integer value '" + itostr(II->getValue())+
"' is out of range for type '" + getEnumName(getType(0)) + "'!");
return MadeChange;
}
return false;
}
-
+
// special handling for set, which isn't really an SDNode.
if (getOperator()->getName() == "set") {
assert(getNumTypes() == 0 && "Set doesn't produce a value");
assert(getNumChildren() >= 2 && "Missing RHS of a set?");
unsigned NC = getNumChildren();
-
+
TreePatternNode *SetVal = getChild(NC-1);
bool MadeChange = SetVal->ApplyTypeConstraints(TP, NotRegisters);
for (unsigned i = 0; i < NC-1; ++i) {
TreePatternNode *Child = getChild(i);
MadeChange |= Child->ApplyTypeConstraints(TP, NotRegisters);
-
+
// Types of operands must match.
MadeChange |= Child->UpdateNodeType(0, SetVal->getExtType(i), TP);
MadeChange |= SetVal->UpdateNodeType(i, Child->getExtType(0), TP);
}
return MadeChange;
}
-
+
if (getOperator()->getName() == "implicit") {
assert(getNumTypes() == 0 && "Node doesn't produce a value");
@@ -1238,15 +1389,15 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
MadeChange = getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
return MadeChange;
}
-
+
if (getOperator()->getName() == "COPY_TO_REGCLASS") {
bool MadeChange = false;
MadeChange |= getChild(0)->ApplyTypeConstraints(TP, NotRegisters);
MadeChange |= getChild(1)->ApplyTypeConstraints(TP, NotRegisters);
-
+
assert(getChild(0)->getNumTypes() == 1 &&
getChild(1)->getNumTypes() == 1 && "Unhandled case");
-
+
// child #1 of COPY_TO_REGCLASS should be a register class. We don't care
// what type it gets, so if it didn't get a concrete type just give it the
// first viable type from the reg class.
@@ -1257,14 +1408,14 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
}
return MadeChange;
}
-
+
if (const CodeGenIntrinsic *Int = getIntrinsicInfo(CDP)) {
bool MadeChange = false;
// Apply the result type to the node.
unsigned NumRetVTs = Int->IS.RetVTs.size();
unsigned NumParamVTs = Int->IS.ParamVTs.size();
-
+
for (unsigned i = 0, e = NumRetVTs; i != e; ++i)
MadeChange |= UpdateNodeType(i, Int->IS.RetVTs[i], TP);
@@ -1275,46 +1426,46 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
// Apply type info to the intrinsic ID.
MadeChange |= getChild(0)->UpdateNodeType(0, MVT::iPTR, TP);
-
+
for (unsigned i = 0, e = getNumChildren()-1; i != e; ++i) {
MadeChange |= getChild(i+1)->ApplyTypeConstraints(TP, NotRegisters);
-
+
MVT::SimpleValueType OpVT = Int->IS.ParamVTs[i];
assert(getChild(i+1)->getNumTypes() == 1 && "Unhandled case");
MadeChange |= getChild(i+1)->UpdateNodeType(0, OpVT, TP);
}
return MadeChange;
}
-
+
if (getOperator()->isSubClassOf("SDNode")) {
const SDNodeInfo &NI = CDP.getSDNodeInfo(getOperator());
-
+
// Check that the number of operands is sane. Negative operands -> varargs.
if (NI.getNumOperands() >= 0 &&
getNumChildren() != (unsigned)NI.getNumOperands())
TP.error(getOperator()->getName() + " node requires exactly " +
itostr(NI.getNumOperands()) + " operands!");
-
+
bool MadeChange = NI.ApplyTypeConstraints(this, TP);
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
return MadeChange;
}
-
+
if (getOperator()->isSubClassOf("Instruction")) {
const DAGInstruction &Inst = CDP.getInstruction(getOperator());
CodeGenInstruction &InstInfo =
CDP.getTargetInfo().getInstruction(getOperator());
-
+
bool MadeChange = false;
// Apply the result types to the node, these come from the things in the
// (outs) list of the instruction.
// FIXME: Cap at one result so far.
- unsigned NumResultsToAdd = InstInfo.NumDefs ? 1 : 0;
+ unsigned NumResultsToAdd = InstInfo.Operands.NumDefs ? 1 : 0;
for (unsigned ResNo = 0; ResNo != NumResultsToAdd; ++ResNo) {
Record *ResultNode = Inst.getResult(ResNo);
-
+
if (ResultNode->isSubClassOf("PointerLikeRegClass")) {
MadeChange |= UpdateNodeType(ResNo, MVT::iPTR, TP);
} else if (ResultNode->getName() == "unknown") {
@@ -1322,26 +1473,26 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
} else {
assert(ResultNode->isSubClassOf("RegisterClass") &&
"Operands should be register classes!");
- const CodeGenRegisterClass &RC =
+ const CodeGenRegisterClass &RC =
CDP.getTargetInfo().getRegisterClass(ResultNode);
MadeChange |= UpdateNodeType(ResNo, RC.getValueTypes(), TP);
}
}
-
+
// If the instruction has implicit defs, we apply the first one as a result.
// FIXME: This sucks, it should apply all implicit defs.
if (!InstInfo.ImplicitDefs.empty()) {
unsigned ResNo = NumResultsToAdd;
-
+
// FIXME: Generalize to multiple possible types and multiple possible
// ImplicitDefs.
MVT::SimpleValueType VT =
InstInfo.HasOneImplicitDefWithKnownVT(CDP.getTargetInfo());
-
+
if (VT != MVT::Other)
MadeChange |= UpdateNodeType(ResNo, VT, TP);
}
-
+
// If this is an INSERT_SUBREG, constrain the source and destination VTs to
// be the same.
if (getOperator()->getName() == "INSERT_SUBREG") {
@@ -1353,7 +1504,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
unsigned ChildNo = 0;
for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i) {
Record *OperandNode = Inst.getOperand(i);
-
+
// If the instruction expects a predicate or optional def operand, we
// codegen this by setting the operand to it's default value if it has a
// non-empty DefaultOps field.
@@ -1361,18 +1512,18 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
OperandNode->isSubClassOf("OptionalDefOperand")) &&
!CDP.getDefaultOperand(OperandNode).DefaultOps.empty())
continue;
-
+
// Verify that we didn't run out of provided operands.
if (ChildNo >= getNumChildren())
TP.error("Instruction '" + getOperator()->getName() +
"' expects more operands than were provided.");
-
+
MVT::SimpleValueType VT;
TreePatternNode *Child = getChild(ChildNo++);
unsigned ChildResNo = 0; // Instructions always use res #0 of their op.
-
+
if (OperandNode->isSubClassOf("RegisterClass")) {
- const CodeGenRegisterClass &RC =
+ const CodeGenRegisterClass &RC =
CDP.getTargetInfo().getRegisterClass(OperandNode);
MadeChange |= Child->UpdateNodeType(ChildResNo, RC.getValueTypes(), TP);
} else if (OperandNode->isSubClassOf("Operand")) {
@@ -1392,12 +1543,12 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
if (ChildNo != getNumChildren())
TP.error("Instruction '" + getOperator()->getName() +
"' was provided too many operands!");
-
+
return MadeChange;
}
-
+
assert(getOperator()->isSubClassOf("SDNodeXForm") && "Unknown node type!");
-
+
// Node transforms always take one operand.
if (getNumChildren() != 1)
TP.error("Node transform '" + getOperator()->getName() +
@@ -1405,7 +1556,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
bool MadeChange = getChild(0)->ApplyTypeConstraints(TP, NotRegisters);
-
+
// If either the output or input of the xform does not have exact
// type info. We assume they must be the same. Otherwise, it is perfectly
// legal to transform from one type to a completely different type.
@@ -1435,7 +1586,7 @@ static bool OnlyOnRHSOfCommutative(TreePatternNode *N) {
/// used as a sanity check for .td files (to prevent people from writing stuff
/// that can never possibly work), and to prevent the pattern permuter from
/// generating stuff that is useless.
-bool TreePatternNode::canPatternMatch(std::string &Reason,
+bool TreePatternNode::canPatternMatch(std::string &Reason,
const CodeGenDAGPatterns &CDP) {
if (isLeaf()) return true;
@@ -1449,7 +1600,7 @@ bool TreePatternNode::canPatternMatch(std::string &Reason,
// TODO:
return true;
}
-
+
// If this node is a commutative operator, check that the LHS isn't an
// immediate.
const SDNodeInfo &NodeInfo = CDP.getSDNodeInfo(getOperator());
@@ -1466,7 +1617,7 @@ bool TreePatternNode::canPatternMatch(std::string &Reason,
}
}
}
-
+
return true;
}
@@ -1506,7 +1657,7 @@ void TreePattern::ComputeNamedNodes() {
void TreePattern::ComputeNamedNodes(TreePatternNode *N) {
if (!N->getName().empty())
NamedNodes[N->getName()].push_back(N);
-
+
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
ComputeNamedNodes(N->getChild(i));
}
@@ -1515,7 +1666,7 @@ void TreePattern::ComputeNamedNodes(TreePatternNode *N) {
TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
if (DefInit *DI = dynamic_cast<DefInit*>(TheInit)) {
Record *R = DI->getDef();
-
+
// Direct reference to a leaf DagNode or PatFrag? Turn it into a
// TreePatternNode if its own. For example:
/// (foo GPR, imm) -> (foo GPR, (imm))
@@ -1523,7 +1674,7 @@ TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
return ParseTreePattern(new DagInit(DI, "",
std::vector<std::pair<Init*, std::string> >()),
OpName);
-
+
// Input argument?
TreePatternNode *Res = new TreePatternNode(DI, 1);
if (R->getName() == "node" && !OpName.empty()) {
@@ -1535,13 +1686,13 @@ TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
Res->setName(OpName);
return Res;
}
-
+
if (IntInit *II = dynamic_cast<IntInit*>(TheInit)) {
if (!OpName.empty())
error("Constant int argument should not have a name!");
return new TreePatternNode(II, 1);
}
-
+
if (BitsInit *BI = dynamic_cast<BitsInit*>(TheInit)) {
// Turn this into an IntInit.
Init *II = BI->convertInitializerTo(new IntRecTy());
@@ -1558,34 +1709,34 @@ TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
DefInit *OpDef = dynamic_cast<DefInit*>(Dag->getOperator());
if (!OpDef) error("Pattern has unexpected operator type!");
Record *Operator = OpDef->getDef();
-
+
if (Operator->isSubClassOf("ValueType")) {
// If the operator is a ValueType, then this must be "type cast" of a leaf
// node.
if (Dag->getNumArgs() != 1)
error("Type cast only takes one operand!");
-
+
TreePatternNode *New = ParseTreePattern(Dag->getArg(0), Dag->getArgName(0));
-
+
// Apply the type cast.
assert(New->getNumTypes() == 1 && "FIXME: Unhandled");
New->UpdateNodeType(0, getValueType(Operator), *this);
-
+
if (!OpName.empty())
error("ValueType cast should not have a name!");
return New;
}
-
+
// Verify that this is something that makes sense for an operator.
- if (!Operator->isSubClassOf("PatFrag") &&
+ if (!Operator->isSubClassOf("PatFrag") &&
!Operator->isSubClassOf("SDNode") &&
- !Operator->isSubClassOf("Instruction") &&
+ !Operator->isSubClassOf("Instruction") &&
!Operator->isSubClassOf("SDNodeXForm") &&
!Operator->isSubClassOf("Intrinsic") &&
Operator->getName() != "set" &&
Operator->getName() != "implicit")
error("Unrecognized node '" + Operator->getName() + "'!");
-
+
// Check to see if this is something that is illegal in an input pattern.
if (isInputPattern) {
if (Operator->isSubClassOf("Instruction") ||
@@ -1594,7 +1745,7 @@ TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
} else {
if (Operator->isSubClassOf("Intrinsic"))
error("Cannot use '" + Operator->getName() + "' in an output pattern!");
-
+
if (Operator->isSubClassOf("SDNode") &&
Operator->getName() != "imm" &&
Operator->getName() != "fpimm" &&
@@ -1609,15 +1760,15 @@ TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
Operator->getName() != "vt")
error("Cannot use '" + Operator->getName() + "' in an output pattern!");
}
-
+
std::vector<TreePatternNode*> Children;
// Parse all the operands.
for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i)
Children.push_back(ParseTreePattern(Dag->getArg(i), Dag->getArgName(i)));
-
+
// If the operator is an intrinsic, then this is just syntactic sugar for for
- // (intrinsic_* <number>, ..children..). Pick the right intrinsic node, and
+ // (intrinsic_* <number>, ..children..). Pick the right intrinsic node, and
// convert the intrinsic name to a number.
if (Operator->isSubClassOf("Intrinsic")) {
const CodeGenIntrinsic &Int = getDAGPatterns().getIntrinsic(Operator);
@@ -1632,15 +1783,15 @@ TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
Operator = getDAGPatterns().get_intrinsic_w_chain_sdnode();
else // Otherwise, no chain.
Operator = getDAGPatterns().get_intrinsic_wo_chain_sdnode();
-
+
TreePatternNode *IIDNode = new TreePatternNode(new IntInit(IID), 1);
Children.insert(Children.begin(), IIDNode);
}
-
+
unsigned NumResults = GetNumNodeResults(Operator, CDP);
TreePatternNode *Result = new TreePatternNode(Operator, Children, NumResults);
Result->setName(OpName);
-
+
if (!Dag->getName().empty()) {
assert(Result->getName().empty());
Result->setName(Dag->getName());
@@ -1698,10 +1849,10 @@ InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
}
// If there are constraints on our named nodes, apply them.
- for (StringMap<SmallVector<TreePatternNode*,1> >::iterator
+ for (StringMap<SmallVector<TreePatternNode*,1> >::iterator
I = NamedNodes.begin(), E = NamedNodes.end(); I != E; ++I) {
SmallVectorImpl<TreePatternNode*> &Nodes = I->second;
-
+
// If we have input named node types, propagate their types to the named
// values here.
if (InNamedTypes) {
@@ -1724,7 +1875,7 @@ InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
if (DI && DI->getDef()->isSubClassOf("RegisterClass"))
continue;
}
-
+
assert(Nodes[i]->getNumTypes() == 1 &&
InNodes[0]->getNumTypes() == 1 &&
"FIXME: cannot name multiple result nodes yet");
@@ -1732,7 +1883,7 @@ InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
*this);
}
}
-
+
// If there are multiple nodes with the same name, they must all have the
// same type.
if (I->second.size() > 1) {
@@ -1740,14 +1891,14 @@ InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
TreePatternNode *N1 = Nodes[i], *N2 = Nodes[i+1];
assert(N1->getNumTypes() == 1 && N2->getNumTypes() == 1 &&
"FIXME: cannot name multiple result nodes yet");
-
+
MadeChange |= N1->UpdateNodeType(0, N2->getExtType(0), *this);
MadeChange |= N2->UpdateNodeType(0, N1->getExtType(0), *this);
}
}
}
}
-
+
bool HasUnresolvedTypes = false;
for (unsigned i = 0, e = Trees.size(); i != e; ++i)
HasUnresolvedTypes |= Trees[i]->ContainsUnresolvedType();
@@ -1763,7 +1914,7 @@ void TreePattern::print(raw_ostream &OS) const {
OS << ")";
}
OS << ": ";
-
+
if (Trees.size() > 1)
OS << "[\n";
for (unsigned i = 0, e = Trees.size(); i != e; ++i) {
@@ -1782,7 +1933,9 @@ void TreePattern::dump() const { print(errs()); }
// CodeGenDAGPatterns implementation
//
-CodeGenDAGPatterns::CodeGenDAGPatterns(RecordKeeper &R) : Records(R) {
+CodeGenDAGPatterns::CodeGenDAGPatterns(RecordKeeper &R) :
+ Records(R), Target(R) {
+
Intrinsics = LoadIntrinsics(Records, false);
TgtIntrinsics = LoadIntrinsics(Records, true);
ParseNodeInfo();
@@ -1792,7 +1945,7 @@ CodeGenDAGPatterns::CodeGenDAGPatterns(RecordKeeper &R) : Records(R) {
ParseDefaultOperands();
ParseInstructions();
ParsePatterns();
-
+
// Generate variants. For example, commutative patterns can match
// multiple ways. Add them to PatternsToMatch as well.
GenerateVariants();
@@ -1863,20 +2016,20 @@ void CodeGenDAGPatterns::ParseComplexPatterns() {
///
void CodeGenDAGPatterns::ParsePatternFragments() {
std::vector<Record*> Fragments = Records.getAllDerivedDefinitions("PatFrag");
-
+
// First step, parse all of the fragments.
for (unsigned i = 0, e = Fragments.size(); i != e; ++i) {
DagInit *Tree = Fragments[i]->getValueAsDag("Fragment");
TreePattern *P = new TreePattern(Fragments[i], Tree, true, *this);
PatternFragments[Fragments[i]] = P;
-
+
// Validate the argument list, converting it to set, to discard duplicates.
std::vector<std::string> &Args = P->getArgList();
std::set<std::string> OperandsSet(Args.begin(), Args.end());
-
+
if (OperandsSet.count(""))
P->error("Cannot have unnamed 'node' values in pattern fragment!");
-
+
// Parse the operands list.
DagInit *OpsList = Fragments[i]->getValueAsDag("Operands");
DefInit *OpsOp = dynamic_cast<DefInit*>(OpsList->getOperator());
@@ -1887,8 +2040,8 @@ void CodeGenDAGPatterns::ParsePatternFragments() {
OpsOp->getDef()->getName() != "outs" &&
OpsOp->getDef()->getName() != "ins"))
P->error("Operands list should start with '(ops ... '!");
-
- // Copy over the arguments.
+
+ // Copy over the arguments.
Args.clear();
for (unsigned j = 0, e = OpsList->getNumArgs(); j != e; ++j) {
if (!dynamic_cast<DefInit*>(OpsList->getArg(j)) ||
@@ -1903,7 +2056,7 @@ void CodeGenDAGPatterns::ParsePatternFragments() {
OperandsSet.erase(OpsList->getArgName(j));
Args.push_back(OpsList->getArgName(j));
}
-
+
if (!OperandsSet.empty())
P->error("Operands list does not contain an entry for operand '" +
*OperandsSet.begin() + "'!");
@@ -1913,20 +2066,20 @@ void CodeGenDAGPatterns::ParsePatternFragments() {
std::string Code = Fragments[i]->getValueAsCode("Predicate");
if (!Code.empty())
P->getOnlyTree()->addPredicateFn("Predicate_"+Fragments[i]->getName());
-
+
// If there is a node transformation corresponding to this, keep track of
// it.
Record *Transform = Fragments[i]->getValueAsDef("OperandTransform");
if (!getSDNodeTransform(Transform).second.empty()) // not noop xform?
P->getOnlyTree()->setTransformFn(Transform);
}
-
+
// Now that we've parsed all of the tree fragments, do a closure on them so
// that there are not references to PatFrags left inside of them.
for (unsigned i = 0, e = Fragments.size(); i != e; ++i) {
TreePattern *ThePat = PatternFragments[Fragments[i]];
ThePat->InlinePatternFragments();
-
+
// Infer as many types as possible. Don't worry about it if we don't infer
// all of them, some may depend on the inputs of the pattern.
try {
@@ -1937,7 +2090,7 @@ void CodeGenDAGPatterns::ParsePatternFragments() {
// actually used by instructions, the type consistency error will be
// reported there.
}
-
+
// If debugging, print out the pattern fragment result.
DEBUG(ThePat->dump());
}
@@ -1951,11 +2104,11 @@ void CodeGenDAGPatterns::ParseDefaultOperands() {
// Find some SDNode.
assert(!SDNodes.empty() && "No SDNodes parsed?");
Init *SomeSDNode = new DefInit(SDNodes.begin()->first);
-
+
for (unsigned iter = 0; iter != 2; ++iter) {
for (unsigned i = 0, e = DefaultOps[iter].size(); i != e; ++i) {
DagInit *DefaultInfo = DefaultOps[iter][i]->getValueAsDag("DefaultOps");
-
+
// Clone the DefaultInfo dag node, changing the operator from 'ops' to
// SomeSDnode so that we can parse this.
std::vector<std::pair<Init*, std::string> > Ops;
@@ -1963,20 +2116,20 @@ void CodeGenDAGPatterns::ParseDefaultOperands() {
Ops.push_back(std::make_pair(DefaultInfo->getArg(op),
DefaultInfo->getArgName(op)));
DagInit *DI = new DagInit(SomeSDNode, "", Ops);
-
+
// Create a TreePattern to parse this.
TreePattern P(DefaultOps[iter][i], DI, false, *this);
assert(P.getNumTrees() == 1 && "This ctor can only produce one tree!");
// Copy the operands over into a DAGDefaultOperand.
DAGDefaultOperand DefaultOpInfo;
-
+
TreePatternNode *T = P.getTree(0);
for (unsigned op = 0, e = T->getNumChildren(); op != e; ++op) {
TreePatternNode *TPN = T->getChild(op);
while (TPN->ApplyTypeConstraints(P, false))
/* Resolve all types */;
-
+
if (TPN->ContainsUnresolvedType()) {
if (iter == 0)
throw "Value #" + utostr(i) + " of PredicateOperand '" +
@@ -2033,7 +2186,7 @@ static bool HandleUse(TreePattern *I, TreePatternNode *Pat,
assert(Slot->getNumChildren() == 0 && "can't be a use with children!");
SlotRec = Slot->getOperator();
}
-
+
// Ensure that the inputs agree if we've already seen this input.
if (Rec != SlotRec)
I->error("All $" + Pat->getName() + " inputs must agree with each other");
@@ -2056,13 +2209,13 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
I->error("Cannot specify a transform function for a non-input value!");
return;
}
-
+
if (Pat->getOperator()->getName() == "implicit") {
for (unsigned i = 0, e = Pat->getNumChildren(); i != e; ++i) {
TreePatternNode *Dest = Pat->getChild(i);
if (!Dest->isLeaf())
I->error("implicitly defined value should be a register!");
-
+
DefInit *Val = dynamic_cast<DefInit*>(Dest->getLeafValue());
if (!Val || !Val->getDef()->isSubClassOf("Register"))
I->error("implicitly defined value should be a register!");
@@ -2070,7 +2223,7 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
}
return;
}
-
+
if (Pat->getOperator()->getName() != "set") {
// If this is not a set, verify that the children nodes are not void typed,
// and recurse.
@@ -2080,30 +2233,30 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
FindPatternInputsAndOutputs(I, Pat->getChild(i), InstInputs, InstResults,
InstImpResults);
}
-
+
// If this is a non-leaf node with no children, treat it basically as if
// it were a leaf. This handles nodes like (imm).
bool isUse = HandleUse(I, Pat, InstInputs);
-
+
if (!isUse && Pat->getTransformFn())
I->error("Cannot specify a transform function for a non-input value!");
return;
}
-
+
// Otherwise, this is a set, validate and collect instruction results.
if (Pat->getNumChildren() == 0)
I->error("set requires operands!");
-
+
if (Pat->getTransformFn())
I->error("Cannot specify a transform function on a set node!");
-
+
// Check the set destinations.
unsigned NumDests = Pat->getNumChildren()-1;
for (unsigned i = 0; i != NumDests; ++i) {
TreePatternNode *Dest = Pat->getChild(i);
if (!Dest->isLeaf())
I->error("set destination should be a register!");
-
+
DefInit *Val = dynamic_cast<DefInit*>(Dest->getLeafValue());
if (!Val)
I->error("set destination should be a register!");
@@ -2121,7 +2274,7 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
I->error("set destination should be a register!");
}
}
-
+
// Verify and collect info from the computation.
FindPatternInputsAndOutputs(I, Pat->getChild(NumDests),
InstInputs, InstResults, InstImpResults);
@@ -2254,8 +2407,8 @@ static void InferFromPattern(const CodeGenInstruction &Inst,
"which already inferred this.\n", Inst.TheDef->getName().c_str());
HasSideEffects = true;
}
-
- if (Inst.isVariadic)
+
+ if (Inst.Operands.isVariadic)
IsVariadic = true; // Can warn if we want.
}
@@ -2264,64 +2417,64 @@ static void InferFromPattern(const CodeGenInstruction &Inst,
/// resolved instructions.
void CodeGenDAGPatterns::ParseInstructions() {
std::vector<Record*> Instrs = Records.getAllDerivedDefinitions("Instruction");
-
+
for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
ListInit *LI = 0;
-
+
if (dynamic_cast<ListInit*>(Instrs[i]->getValueInit("Pattern")))
LI = Instrs[i]->getValueAsListInit("Pattern");
-
+
// If there is no pattern, only collect minimal information about the
// instruction for its operand list. We have to assume that there is one
// result, as we have no detailed info.
if (!LI || LI->getSize() == 0) {
std::vector<Record*> Results;
std::vector<Record*> Operands;
-
+
CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]);
- if (InstInfo.OperandList.size() != 0) {
- if (InstInfo.NumDefs == 0) {
+ if (InstInfo.Operands.size() != 0) {
+ if (InstInfo.Operands.NumDefs == 0) {
// These produce no results
- for (unsigned j = 0, e = InstInfo.OperandList.size(); j < e; ++j)
- Operands.push_back(InstInfo.OperandList[j].Rec);
+ for (unsigned j = 0, e = InstInfo.Operands.size(); j < e; ++j)
+ Operands.push_back(InstInfo.Operands[j].Rec);
} else {
// Assume the first operand is the result.
- Results.push_back(InstInfo.OperandList[0].Rec);
-
+ Results.push_back(InstInfo.Operands[0].Rec);
+
// The rest are inputs.
- for (unsigned j = 1, e = InstInfo.OperandList.size(); j < e; ++j)
- Operands.push_back(InstInfo.OperandList[j].Rec);
+ for (unsigned j = 1, e = InstInfo.Operands.size(); j < e; ++j)
+ Operands.push_back(InstInfo.Operands[j].Rec);
}
}
-
+
// Create and insert the instruction.
std::vector<Record*> ImpResults;
- Instructions.insert(std::make_pair(Instrs[i],
+ Instructions.insert(std::make_pair(Instrs[i],
DAGInstruction(0, Results, Operands, ImpResults)));
continue; // no pattern.
}
-
+
// Parse the instruction.
TreePattern *I = new TreePattern(Instrs[i], LI, true, *this);
// Inline pattern fragments into it.
I->InlinePatternFragments();
-
+
// Infer as many types as possible. If we cannot infer all of them, we can
// never do anything with this instruction pattern: report it to the user.
if (!I->InferAllTypes())
I->error("Could not infer all types in pattern!");
-
- // InstInputs - Keep track of all of the inputs of the instruction, along
+
+ // InstInputs - Keep track of all of the inputs of the instruction, along
// with the record they are declared as.
std::map<std::string, TreePatternNode*> InstInputs;
-
+
// InstResults - Keep track of all the virtual registers that are 'set'
// in the instruction, including what reg class they are.
std::map<std::string, TreePatternNode*> InstResults;
std::vector<Record*> InstImpResults;
-
+
// Verify that the top-level forms in the instruction are of void type, and
// fill in the InstResults map.
for (unsigned j = 0, e = I->getNumTrees(); j != e; ++j) {
@@ -2348,29 +2501,29 @@ void CodeGenDAGPatterns::ParseInstructions() {
std::vector<Record*> Results;
TreePatternNode *Res0Node = 0;
for (unsigned i = 0; i != NumResults; ++i) {
- if (i == CGI.OperandList.size())
+ if (i == CGI.Operands.size())
I->error("'" + InstResults.begin()->first +
"' set but does not appear in operand list!");
- const std::string &OpName = CGI.OperandList[i].Name;
-
+ const std::string &OpName = CGI.Operands[i].Name;
+
// Check that it exists in InstResults.
TreePatternNode *RNode = InstResults[OpName];
if (RNode == 0)
I->error("Operand $" + OpName + " does not exist in operand list!");
-
+
if (i == 0)
Res0Node = RNode;
Record *R = dynamic_cast<DefInit*>(RNode->getLeafValue())->getDef();
if (R == 0)
I->error("Operand $" + OpName + " should be a set destination: all "
"outputs must occur before inputs in operand list!");
-
- if (CGI.OperandList[i].Rec != R)
+
+ if (CGI.Operands[i].Rec != R)
I->error("Operand $" + OpName + " class mismatch!");
-
+
// Remember the return type.
- Results.push_back(CGI.OperandList[i].Rec);
-
+ Results.push_back(CGI.Operands[i].Rec);
+
// Okay, this one checks out.
InstResults.erase(OpName);
}
@@ -2381,8 +2534,8 @@ void CodeGenDAGPatterns::ParseInstructions() {
std::vector<TreePatternNode*> ResultNodeOperands;
std::vector<Record*> Operands;
- for (unsigned i = NumResults, e = CGI.OperandList.size(); i != e; ++i) {
- CodeGenInstruction::OperandInfo &Op = CGI.OperandList[i];
+ for (unsigned i = NumResults, e = CGI.Operands.size(); i != e; ++i) {
+ CGIOperandList::OperandInfo &Op = CGI.Operands[i];
const std::string &OpName = Op.Name;
if (OpName.empty())
I->error("Operand #" + utostr(i) + " in operands list has no name!");
@@ -2403,7 +2556,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
}
TreePatternNode *InVal = InstInputsCheck[OpName];
InstInputsCheck.erase(OpName); // It occurred, remove from map.
-
+
if (InVal->isLeaf() &&
dynamic_cast<DefInit*>(InVal->getLeafValue())) {
Record *InRec = static_cast<DefInit*>(InVal->getLeafValue())->getDef();
@@ -2412,13 +2565,13 @@ void CodeGenDAGPatterns::ParseInstructions() {
" between the operand and pattern");
}
Operands.push_back(Op.Rec);
-
+
// Construct the result for the dest-pattern operand list.
TreePatternNode *OpNode = InVal->clone();
-
+
// No predicate is useful on the result.
OpNode->clearPredicateFns();
-
+
// Promote the xform function to be an explicit node if set.
if (Record *Xform = OpNode->getTransformFn()) {
OpNode->setTransformFn(0);
@@ -2426,10 +2579,10 @@ void CodeGenDAGPatterns::ParseInstructions() {
Children.push_back(OpNode);
OpNode = new TreePatternNode(Xform, Children, OpNode->getNumTypes());
}
-
+
ResultNodeOperands.push_back(OpNode);
}
-
+
if (!InstInputsCheck.empty())
I->error("Input operand $" + InstInputsCheck.begin()->first +
" occurs in pattern but not in operands list!");
@@ -2454,10 +2607,10 @@ void CodeGenDAGPatterns::ParseInstructions() {
DAGInstruction &TheInsertedInst = Instructions.find(I->getRecord())->second;
TheInsertedInst.setResultPattern(Temp.getOnlyTree());
-
+
DEBUG(I->dump());
}
-
+
// If we can, convert the instructions to be patterns that are matched!
for (std::map<Record*, DAGInstruction, RecordPtrCmp>::iterator II =
Instructions.begin(),
@@ -2476,10 +2629,11 @@ void CodeGenDAGPatterns::ParseInstructions() {
// Not a set (store or something?)
SrcPattern = Pattern;
}
-
+
Record *Instr = II->first;
AddPatternToMatch(I,
- PatternToMatch(Instr->getValueAsListInit("Predicates"),
+ PatternToMatch(Instr,
+ Instr->getValueAsListInit("Predicates"),
SrcPattern,
TheInst.getResultPattern(),
TheInst.getImpResults(),
@@ -2491,7 +2645,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
typedef std::pair<const TreePatternNode*, unsigned> NameRecord;
-static void FindNames(const TreePatternNode *P,
+static void FindNames(const TreePatternNode *P,
std::map<std::string, NameRecord> &Names,
const TreePattern *PatternTop) {
if (!P->getName().empty()) {
@@ -2503,7 +2657,7 @@ static void FindNames(const TreePatternNode *P,
PatternTop->error("repetition of value: $" + P->getName() +
" where different uses have different types!");
}
-
+
if (!P->isLeaf()) {
for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i)
FindNames(P->getChild(i), Names, PatternTop);
@@ -2516,7 +2670,7 @@ void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern,
std::string Reason;
if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this))
Pattern->error("Pattern can never match: " + Reason);
-
+
// If the source pattern's root is a complex pattern, that complex pattern
// must specify the nodes it can potentially match.
if (const ComplexPattern *CP =
@@ -2524,8 +2678,8 @@ void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern,
if (CP->getRootNodes().empty())
Pattern->error("ComplexPattern at root must specify list of opcodes it"
" could match");
-
-
+
+
// Find all of the named values in the input and output, ensure they have the
// same type.
std::map<std::string, NameRecord> SrcNames, DstNames;
@@ -2540,14 +2694,14 @@ void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern,
Pattern->error("Pattern has input without matching name in output: $" +
I->first);
}
-
+
// Scan all of the named values in the source pattern, rejecting them if the
// name isn't used in the dest, and isn't used to tie two values together.
for (std::map<std::string, NameRecord>::iterator
I = SrcNames.begin(), E = SrcNames.end(); I != E; ++I)
if (DstNames[I->first].first == 0 && SrcNames[I->first].second == 1)
Pattern->error("Pattern has dead named input: $" + I->first);
-
+
PatternsToMatch.push_back(PTM);
}
@@ -2566,7 +2720,7 @@ void CodeGenDAGPatterns::InferInstructionFlags() {
InstInfo.mayStore = MayStore;
InstInfo.mayLoad = MayLoad;
InstInfo.hasSideEffects = HasSideEffects;
- InstInfo.isVariadic = IsVariadic;
+ InstInfo.Operands.isVariadic = IsVariadic;
}
}
@@ -2576,7 +2730,7 @@ void CodeGenDAGPatterns::InferInstructionFlags() {
static bool ForceArbitraryInstResultType(TreePatternNode *N, TreePattern &TP) {
if (N->isLeaf())
return false;
-
+
// Analyze children.
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
if (ForceArbitraryInstResultType(N->getChild(i), TP))
@@ -2590,12 +2744,12 @@ static bool ForceArbitraryInstResultType(TreePatternNode *N, TreePattern &TP) {
for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i) {
if (N->getExtType(i).isCompletelyUnknown() || N->getExtType(i).isConcrete())
continue;
-
+
// Otherwise, force its type to the first possibility (an arbitrary choice).
if (N->getExtType(i).MergeInTypeInfo(N->getExtType(i).getTypeList()[0], TP))
return true;
}
-
+
return false;
}
@@ -2609,20 +2763,20 @@ void CodeGenDAGPatterns::ParsePatterns() {
// Inline pattern fragments into it.
Pattern->InlinePatternFragments();
-
+
ListInit *LI = CurPattern->getValueAsListInit("ResultInstrs");
if (LI->getSize() == 0) continue; // no pattern.
-
+
// Parse the instruction.
TreePattern *Result = new TreePattern(CurPattern, LI, false, *this);
-
+
// Inline pattern fragments into it.
Result->InlinePatternFragments();
if (Result->getNumTrees() != 1)
Result->error("Cannot handle instructions producing instructions "
"with temporaries yet!");
-
+
bool IterateInference;
bool InferredAllPatternTypes, InferredAllResultTypes;
do {
@@ -2630,14 +2784,14 @@ void CodeGenDAGPatterns::ParsePatterns() {
// can never do anything with this pattern: report it to the user.
InferredAllPatternTypes =
Pattern->InferAllTypes(&Pattern->getNamedNodesMap());
-
+
// Infer as many types as possible. If we cannot infer all of them, we
// can never do anything with this pattern: report it to the user.
InferredAllResultTypes =
Result->InferAllTypes(&Pattern->getNamedNodesMap());
IterateInference = false;
-
+
// Apply the type of the result to the source pattern. This helps us
// resolve cases where the input type is known to be a pointer type (which
// is considered resolved), but the result knows it needs to be 32- or
@@ -2650,7 +2804,7 @@ void CodeGenDAGPatterns::ParsePatterns() {
IterateInference |= Result->getTree(0)->
UpdateNodeType(i, Pattern->getTree(0)->getExtType(i), *Result);
}
-
+
// If our iteration has converged and the input pattern's types are fully
// resolved but the result pattern is not fully resolved, we may have a
// situation where we have two instructions in the result pattern and
@@ -2665,7 +2819,7 @@ void CodeGenDAGPatterns::ParsePatterns() {
IterateInference = ForceArbitraryInstResultType(Result->getTree(0),
*Result);
} while (IterateInference);
-
+
// Verify that we inferred enough types that we can do something with the
// pattern and result. If these fire the user has to add type casts.
if (!InferredAllPatternTypes)
@@ -2674,7 +2828,7 @@ void CodeGenDAGPatterns::ParsePatterns() {
Pattern->dump();
Result->error("Could not infer all types in pattern result!");
}
-
+
// Validate that the input pattern is correct.
std::map<std::string, TreePatternNode*> InstInputs;
std::map<std::string, TreePatternNode*> InstResults;
@@ -2702,16 +2856,17 @@ void CodeGenDAGPatterns::ParsePatterns() {
DstPattern = new TreePatternNode(DstPattern->getOperator(),
ResultNodeOperands,
DstPattern->getNumTypes());
-
+
for (unsigned i = 0, e = Result->getOnlyTree()->getNumTypes(); i != e; ++i)
DstPattern->setType(i, Result->getOnlyTree()->getExtType(i));
-
+
TreePattern Temp(Result->getRecord(), DstPattern, false, *this);
Temp.InferAllTypes();
-
+
AddPatternToMatch(Pattern,
- PatternToMatch(CurPattern->getValueAsListInit("Predicates"),
+ PatternToMatch(CurPattern,
+ CurPattern->getValueAsListInit("Predicates"),
Pattern->getTree(0),
Temp.getOnlyTree(), InstImpResults,
CurPattern->getValueAsInt("AddedComplexity"),
@@ -2721,7 +2876,7 @@ void CodeGenDAGPatterns::ParsePatterns() {
/// CombineChildVariants - Given a bunch of permutations of each child of the
/// 'operator' node, put them together in all possible ways.
-static void CombineChildVariants(TreePatternNode *Orig,
+static void CombineChildVariants(TreePatternNode *Orig,
const std::vector<std::vector<TreePatternNode*> > &ChildVariants,
std::vector<TreePatternNode*> &OutVariants,
CodeGenDAGPatterns &CDP,
@@ -2730,7 +2885,7 @@ static void CombineChildVariants(TreePatternNode *Orig,
for (unsigned i = 0, e = ChildVariants.size(); i != e; ++i)
if (ChildVariants[i].empty())
return;
-
+
// The end result is an all-pairs construction of the resultant pattern.
std::vector<unsigned> Idxs;
Idxs.resize(ChildVariants.size());
@@ -2751,21 +2906,21 @@ static void CombineChildVariants(TreePatternNode *Orig,
NewChildren.push_back(ChildVariants[i][Idxs[i]]);
TreePatternNode *R = new TreePatternNode(Orig->getOperator(), NewChildren,
Orig->getNumTypes());
-
+
// Copy over properties.
R->setName(Orig->getName());
R->setPredicateFns(Orig->getPredicateFns());
R->setTransformFn(Orig->getTransformFn());
for (unsigned i = 0, e = Orig->getNumTypes(); i != e; ++i)
R->setType(i, Orig->getExtType(i));
-
+
// If this pattern cannot match, do not include it as a variant.
std::string ErrString;
if (!R->canPatternMatch(ErrString, CDP)) {
delete R;
} else {
bool AlreadyExists = false;
-
+
// Scan to see if this pattern has already been emitted. We can get
// duplication due to things like commuting:
// (and GPRC:$a, GPRC:$b) -> (and GPRC:$b, GPRC:$a)
@@ -2775,13 +2930,13 @@ static void CombineChildVariants(TreePatternNode *Orig,
AlreadyExists = true;
break;
}
-
+
if (AlreadyExists)
delete R;
else
OutVariants.push_back(R);
}
-
+
// Increment indices to the next permutation by incrementing the
// indicies from last index backward, e.g., generate the sequence
// [0, 0], [0, 1], [1, 0], [1, 1].
@@ -2798,7 +2953,7 @@ static void CombineChildVariants(TreePatternNode *Orig,
/// CombineChildVariants - A helper function for binary operators.
///
-static void CombineChildVariants(TreePatternNode *Orig,
+static void CombineChildVariants(TreePatternNode *Orig,
const std::vector<TreePatternNode*> &LHS,
const std::vector<TreePatternNode*> &RHS,
std::vector<TreePatternNode*> &OutVariants,
@@ -2808,14 +2963,14 @@ static void CombineChildVariants(TreePatternNode *Orig,
ChildVariants.push_back(LHS);
ChildVariants.push_back(RHS);
CombineChildVariants(Orig, ChildVariants, OutVariants, CDP, DepVars);
-}
+}
static void GatherChildrenOfAssociativeOpcode(TreePatternNode *N,
std::vector<TreePatternNode *> &Children) {
assert(N->getNumChildren()==2 &&"Associative but doesn't have 2 children!");
Record *Operator = N->getOperator();
-
+
// Only permit raw nodes.
if (!N->getName().empty() || !N->getPredicateFns().empty() ||
N->getTransformFn()) {
@@ -2852,7 +3007,7 @@ static void GenerateVariantsOf(TreePatternNode *N,
// If this node is associative, re-associate.
if (NodeInfo.hasProperty(SDNPAssociative)) {
- // Re-associate by pulling together all of the linked operators
+ // Re-associate by pulling together all of the linked operators
std::vector<TreePatternNode*> MaximalChildren;
GatherChildrenOfAssociativeOpcode(N, MaximalChildren);
@@ -2864,11 +3019,11 @@ static void GenerateVariantsOf(TreePatternNode *N,
GenerateVariantsOf(MaximalChildren[0], AVariants, CDP, DepVars);
GenerateVariantsOf(MaximalChildren[1], BVariants, CDP, DepVars);
GenerateVariantsOf(MaximalChildren[2], CVariants, CDP, DepVars);
-
+
// There are only two ways we can permute the tree:
// (A op B) op C and A op (B op C)
// Within these forms, we can also permute A/B/C.
-
+
// Generate legal pair permutations of A/B/C.
std::vector<TreePatternNode*> ABVariants;
std::vector<TreePatternNode*> BAVariants;
@@ -2901,7 +3056,7 @@ static void GenerateVariantsOf(TreePatternNode *N,
return;
}
}
-
+
// Compute permutations of all children.
std::vector<std::vector<TreePatternNode*> > ChildVariants;
ChildVariants.resize(N->getNumChildren());
@@ -2953,7 +3108,7 @@ static void GenerateVariantsOf(TreePatternNode *N,
// match multiple ways. Add them to PatternsToMatch as well.
void CodeGenDAGPatterns::GenerateVariants() {
DEBUG(errs() << "Generating instruction variants.\n");
-
+
// Loop over all of the patterns we've collected, checking to see if we can
// generate variants of the instruction, through the exploitation of
// identities. This permits the target to provide aggressive matching without
@@ -2970,7 +3125,8 @@ void CodeGenDAGPatterns::GenerateVariants() {
DEBUG(errs() << "Dependent/multiply used variables: ");
DEBUG(DumpDepVars(DepVars));
DEBUG(errs() << "\n");
- GenerateVariantsOf(PatternsToMatch[i].getSrcPattern(), Variants, *this, DepVars);
+ GenerateVariantsOf(PatternsToMatch[i].getSrcPattern(), Variants, *this,
+ DepVars);
assert(!Variants.empty() && "Must create at least original variant!");
Variants.erase(Variants.begin()); // Remove the original pattern.
@@ -2988,7 +3144,7 @@ void CodeGenDAGPatterns::GenerateVariants() {
DEBUG(errs() << " VAR#" << v << ": ";
Variant->dump();
errs() << "\n");
-
+
// Scan to see if an instruction or explicit pattern already matches this.
bool AlreadyExists = false;
for (unsigned p = 0, e = PatternsToMatch.size(); p != e; ++p) {
@@ -2997,7 +3153,8 @@ void CodeGenDAGPatterns::GenerateVariants() {
PatternsToMatch[p].getPredicates())
continue;
// Check to see if this variant already exists.
- if (Variant->isIsomorphicTo(PatternsToMatch[p].getSrcPattern(), DepVars)) {
+ if (Variant->isIsomorphicTo(PatternsToMatch[p].getSrcPattern(),
+ DepVars)) {
DEBUG(errs() << " *** ALREADY EXISTS, ignoring variant.\n");
AlreadyExists = true;
break;
@@ -3008,7 +3165,8 @@ void CodeGenDAGPatterns::GenerateVariants() {
// Otherwise, add it to the list of patterns we have.
PatternsToMatch.
- push_back(PatternToMatch(PatternsToMatch[i].getPredicates(),
+ push_back(PatternToMatch(PatternsToMatch[i].getSrcRecord(),
+ PatternsToMatch[i].getPredicates(),
Variant, PatternsToMatch[i].getDstPattern(),
PatternsToMatch[i].getDstRegs(),
PatternsToMatch[i].getAddedComplexity(),
diff --git a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
index 0a1362a..946dcee 100644
--- a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
+++ b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
@@ -58,50 +58,50 @@ namespace EEVT {
public:
TypeSet() {}
TypeSet(MVT::SimpleValueType VT, TreePattern &TP);
- TypeSet(const std::vector<MVT::SimpleValueType> &VTList);
-
+ TypeSet(const std::vector<MVT::SimpleValueType> &VTList);
+
bool isCompletelyUnknown() const { return TypeVec.empty(); }
-
+
bool isConcrete() const {
if (TypeVec.size() != 1) return false;
unsigned char T = TypeVec[0]; (void)T;
assert(T < MVT::LAST_VALUETYPE || T == MVT::iPTR || T == MVT::iPTRAny);
return true;
}
-
+
MVT::SimpleValueType getConcrete() const {
assert(isConcrete() && "Type isn't concrete yet");
return (MVT::SimpleValueType)TypeVec[0];
}
-
+
bool isDynamicallyResolved() const {
return getConcrete() == MVT::iPTR || getConcrete() == MVT::iPTRAny;
}
-
+
const SmallVectorImpl<MVT::SimpleValueType> &getTypeList() const {
assert(!TypeVec.empty() && "Not a type list!");
return TypeVec;
}
-
+
bool isVoid() const {
return TypeVec.size() == 1 && TypeVec[0] == MVT::isVoid;
}
-
+
/// hasIntegerTypes - Return true if this TypeSet contains any integer value
/// types.
bool hasIntegerTypes() const;
-
+
/// hasFloatingPointTypes - Return true if this TypeSet contains an fAny or
/// a floating point value type.
bool hasFloatingPointTypes() const;
-
+
/// hasVectorTypes - Return true if this TypeSet contains a vector value
/// type.
bool hasVectorTypes() const;
-
+
/// getName() - Return this TypeSet as a string.
std::string getName() const;
-
+
/// MergeInTypeInfo - This merges in type information from the specified
/// argument. If 'this' changes, it returns true. If the two types are
/// contradictory (e.g. merge f32 into i32) then this throws an exception.
@@ -126,14 +126,18 @@ namespace EEVT {
/// EnforceSmallerThan - 'this' must be a smaller VT than Other. Update
/// this an other based on this information.
bool EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP);
-
+
/// EnforceVectorEltTypeIs - 'this' is now constrainted to be a vector type
/// whose element is VT.
bool EnforceVectorEltTypeIs(EEVT::TypeSet &VT, TreePattern &TP);
-
+
+ /// EnforceVectorSubVectorTypeIs - 'this' is now constrainted to
+ /// be a vector type VT.
+ bool EnforceVectorSubVectorTypeIs(EEVT::TypeSet &VT, TreePattern &TP);
+
bool operator!=(const TypeSet &RHS) const { return TypeVec != RHS.TypeVec; }
bool operator==(const TypeSet &RHS) const { return TypeVec == RHS.TypeVec; }
-
+
private:
/// FillWithPossibleTypes - Set to all legal types and return true, only
/// valid on completely unknown type sets. If Pred is non-null, only MVTs
@@ -151,13 +155,14 @@ typedef std::set<std::string> MultipleUseVarSet;
/// corresponding to the SDTypeConstraint tablegen class in Target.td.
struct SDTypeConstraint {
SDTypeConstraint(Record *R);
-
+
unsigned OperandNo; // The operand # this constraint applies to.
- enum {
- SDTCisVT, SDTCisPtrTy, SDTCisInt, SDTCisFP, SDTCisVec, SDTCisSameAs,
- SDTCisVTSmallerThanOp, SDTCisOpSmallerThanOp, SDTCisEltOfVec
+ enum {
+ SDTCisVT, SDTCisPtrTy, SDTCisInt, SDTCisFP, SDTCisVec, SDTCisSameAs,
+ SDTCisVTSmallerThanOp, SDTCisOpSmallerThanOp, SDTCisEltOfVec,
+ SDTCisSubVecOfVec
} ConstraintType;
-
+
union { // The discriminated union.
struct {
MVT::SimpleValueType VT;
@@ -174,6 +179,9 @@ struct SDTypeConstraint {
struct {
unsigned OtherOperandNum;
} SDTCisEltOfVec_Info;
+ struct {
+ unsigned OtherOperandNum;
+ } SDTCisSubVecOfVec_Info;
} x;
/// ApplyTypeConstraint - Given a node in a pattern, apply this type
@@ -197,25 +205,25 @@ class SDNodeInfo {
std::vector<SDTypeConstraint> TypeConstraints;
public:
SDNodeInfo(Record *R); // Parse the specified record.
-
+
unsigned getNumResults() const { return NumResults; }
-
+
/// getNumOperands - This is the number of operands required or -1 if
/// variadic.
int getNumOperands() const { return NumOperands; }
Record *getRecord() const { return Def; }
const std::string &getEnumName() const { return EnumName; }
const std::string &getSDClassName() const { return SDClassName; }
-
+
const std::vector<SDTypeConstraint> &getTypeConstraints() const {
return TypeConstraints;
}
-
+
/// getKnownType - If the type constraints on this node imply a fixed type
/// (e.g. all stores return void, etc), then return it as an
/// MVT::SimpleValueType. Otherwise, return MVT::Other.
MVT::SimpleValueType getKnownType(unsigned ResNo) const;
-
+
/// hasProperty - Return true if this node has the specified property.
///
bool hasProperty(enum SDNP Prop) const { return Properties & (1 << Prop); }
@@ -240,31 +248,31 @@ class TreePatternNode {
/// result may be a set of possible types. After (successful) type inference,
/// each is a single concrete type.
SmallVector<EEVT::TypeSet, 1> Types;
-
+
/// Operator - The Record for the operator if this is an interior node (not
/// a leaf).
Record *Operator;
-
+
/// Val - The init value (e.g. the "GPRC" record, or "7") for a leaf.
///
Init *Val;
-
+
/// Name - The name given to this node with the :$foo notation.
///
std::string Name;
-
+
/// PredicateFns - The predicate functions to execute on this node to check
/// for a match. If this list is empty, no predicate is involved.
std::vector<std::string> PredicateFns;
-
+
/// TransformFn - The transformation function to execute on this node before
/// it can be substituted into the resulting instruction on a pattern match.
Record *TransformFn;
-
+
std::vector<TreePatternNode*> Children;
public:
TreePatternNode(Record *Op, const std::vector<TreePatternNode*> &Ch,
- unsigned NumResults)
+ unsigned NumResults)
: Operator(Op), Val(0), TransformFn(0), Children(Ch) {
Types.resize(NumResults);
}
@@ -273,12 +281,12 @@ public:
Types.resize(NumResults);
}
~TreePatternNode();
-
+
const std::string &getName() const { return Name; }
void setName(StringRef N) { Name.assign(N.begin(), N.end()); }
-
+
bool isLeaf() const { return Val != 0; }
-
+
// Type accessors.
unsigned getNumTypes() const { return Types.size(); }
MVT::SimpleValueType getType(unsigned ResNo) const {
@@ -288,7 +296,7 @@ public:
const EEVT::TypeSet &getExtType(unsigned ResNo) const { return Types[ResNo]; }
EEVT::TypeSet &getExtType(unsigned ResNo) { return Types[ResNo]; }
void setType(unsigned ResNo, const EEVT::TypeSet &T) { Types[ResNo] = T; }
-
+
bool hasTypeSet(unsigned ResNo) const {
return Types[ResNo].isConcrete();
}
@@ -298,16 +306,16 @@ public:
bool isTypeDynamicallyResolved(unsigned ResNo) const {
return Types[ResNo].isDynamicallyResolved();
}
-
+
Init *getLeafValue() const { assert(isLeaf()); return Val; }
Record *getOperator() const { assert(!isLeaf()); return Operator; }
-
+
unsigned getNumChildren() const { return Children.size(); }
TreePatternNode *getChild(unsigned N) const { return Children[N]; }
void setChild(unsigned i, TreePatternNode *N) {
Children[i] = N;
}
-
+
/// hasChild - Return true if N is any of our children.
bool hasChild(const TreePatternNode *N) const {
for (unsigned i = 0, e = Children.size(); i != e; ++i)
@@ -321,7 +329,7 @@ public:
assert(PredicateFns.empty() && "Overwriting non-empty predicate list!");
PredicateFns = Fns;
}
- void addPredicateFn(const std::string &Fn) {
+ void addPredicateFn(const std::string &Fn) {
assert(!Fn.empty() && "Empty predicate string!");
if (std::find(PredicateFns.begin(), PredicateFns.end(), Fn) ==
PredicateFns.end())
@@ -330,7 +338,7 @@ public:
Record *getTransformFn() const { return TransformFn; }
void setTransformFn(Record *Fn) { TransformFn = Fn; }
-
+
/// getIntrinsicInfo - If this node corresponds to an intrinsic, return the
/// CodeGenIntrinsic information for it, otherwise return a null pointer.
const CodeGenIntrinsic *getIntrinsicInfo(const CodeGenDAGPatterns &CDP) const;
@@ -342,18 +350,18 @@ public:
/// NodeHasProperty - Return true if this node has the specified property.
bool NodeHasProperty(SDNP Property, const CodeGenDAGPatterns &CGP) const;
-
+
/// TreeHasProperty - Return true if any node in this tree has the specified
/// property.
bool TreeHasProperty(SDNP Property, const CodeGenDAGPatterns &CGP) const;
-
+
/// isCommutativeIntrinsic - Return true if the node is an intrinsic which is
/// marked isCommutative.
bool isCommutativeIntrinsic(const CodeGenDAGPatterns &CDP) const;
-
+
void print(raw_ostream &OS) const;
void dump() const;
-
+
public: // Higher level manipulation routines.
/// clone - Return a new copy of this tree.
@@ -362,14 +370,14 @@ public: // Higher level manipulation routines.
/// RemoveAllTypes - Recursively strip all the types of this tree.
void RemoveAllTypes();
-
+
/// isIsomorphicTo - Return true if this node is recursively isomorphic to
/// the specified node. For this comparison, all of the state of the node
/// is considered, except for the assigned name. Nodes with differing names
/// that are otherwise identical are considered isomorphic.
bool isIsomorphicTo(const TreePatternNode *N,
const MultipleUseVarSet &DepVars) const;
-
+
/// SubstituteFormalArguments - Replace the formal arguments in this tree
/// with actual values specified by ArgMap.
void SubstituteFormalArguments(std::map<std::string,
@@ -379,13 +387,13 @@ public: // Higher level manipulation routines.
/// fragments, inline them into place, giving us a pattern without any
/// PatFrag references.
TreePatternNode *InlinePatternFragments(TreePattern &TP);
-
+
/// ApplyTypeConstraints - Apply all of the type constraints relevant to
/// this node and its children in the tree. This returns true if it makes a
/// change, false otherwise. If a type contradiction is found, throw an
/// exception.
bool ApplyTypeConstraints(TreePattern &TP, bool NotRegisters);
-
+
/// UpdateNodeType - Set the node type of N to VT if VT contains
/// information. If N already contains a conflicting type, then throw an
/// exception. This returns true if any information was updated.
@@ -399,18 +407,18 @@ public: // Higher level manipulation routines.
TreePattern &TP) {
return Types[ResNo].MergeInTypeInfo(EEVT::TypeSet(InTy, TP), TP);
}
-
+
/// ContainsUnresolvedType - Return true if this tree contains any
/// unresolved types.
bool ContainsUnresolvedType() const {
for (unsigned i = 0, e = Types.size(); i != e; ++i)
if (!Types[i].isConcrete()) return true;
-
+
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
if (getChild(i)->ContainsUnresolvedType()) return true;
return false;
}
-
+
/// canPatternMatch - If it is impossible for this pattern to match on this
/// target, fill in Reason and return false. Otherwise, return true.
bool canPatternMatch(std::string &Reason, const CodeGenDAGPatterns &CDP);
@@ -420,7 +428,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const TreePatternNode &TPN) {
TPN.print(OS);
return OS;
}
-
+
/// TreePattern - Represent a pattern, used for instructions, pattern
/// fragments, etc.
@@ -430,19 +438,19 @@ class TreePattern {
/// Note that PatFrag's only have a single tree.
///
std::vector<TreePatternNode*> Trees;
-
+
/// NamedNodes - This is all of the nodes that have names in the trees in this
/// pattern.
StringMap<SmallVector<TreePatternNode*,1> > NamedNodes;
-
+
/// TheRecord - The actual TableGen record corresponding to this pattern.
///
Record *TheRecord;
-
+
/// Args - This is a list of all of the arguments to this pattern (for
/// PatFrag patterns), which are the 'node' markers in this pattern.
std::vector<std::string> Args;
-
+
/// CDP - the top-level object coordinating this madness.
///
CodeGenDAGPatterns &CDP;
@@ -451,7 +459,7 @@ class TreePattern {
/// False if this is an output pattern, something to emit.
bool isInputPattern;
public:
-
+
/// TreePattern constructor - Parse the specified DagInits into the
/// current record.
TreePattern(Record *TheRec, ListInit *RawPat, bool isInput,
@@ -460,7 +468,7 @@ public:
CodeGenDAGPatterns &ise);
TreePattern(Record *TheRec, TreePatternNode *Pat, bool isInput,
CodeGenDAGPatterns &ise);
-
+
/// getTrees - Return the tree patterns which corresponds to this pattern.
///
const std::vector<TreePatternNode*> &getTrees() const { return Trees; }
@@ -470,25 +478,25 @@ public:
assert(Trees.size() == 1 && "Doesn't have exactly one pattern!");
return Trees[0];
}
-
+
const StringMap<SmallVector<TreePatternNode*,1> > &getNamedNodesMap() {
if (NamedNodes.empty())
ComputeNamedNodes();
return NamedNodes;
}
-
+
/// getRecord - Return the actual TableGen record corresponding to this
/// pattern.
///
Record *getRecord() const { return TheRecord; }
-
+
unsigned getNumArgs() const { return Args.size(); }
const std::string &getArgName(unsigned i) const {
assert(i < Args.size() && "Argument reference out of range!");
return Args[i];
}
std::vector<std::string> &getArgList() { return Args; }
-
+
CodeGenDAGPatterns &getDAGPatterns() const { return CDP; }
/// InlinePatternFragments - If this pattern refers to any pattern
@@ -498,20 +506,20 @@ public:
for (unsigned i = 0, e = Trees.size(); i != e; ++i)
Trees[i] = Trees[i]->InlinePatternFragments(*this);
}
-
+
/// InferAllTypes - Infer/propagate as many types throughout the expression
/// patterns as possible. Return true if all types are inferred, false
/// otherwise. Throw an exception if a type contradiction is found.
bool InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> >
*NamedTypes=0);
-
+
/// error - Throw an exception, prefixing it with information about this
/// pattern.
void error(const std::string &Msg) const;
-
+
void print(raw_ostream &OS) const;
void dump() const;
-
+
private:
TreePatternNode *ParseTreePattern(Init *DI, StringRef OpName);
void ComputeNamedNodes();
@@ -535,7 +543,7 @@ public:
const std::vector<Record*> &results,
const std::vector<Record*> &operands,
const std::vector<Record*> &impresults)
- : Pattern(TP), Results(results), Operands(operands),
+ : Pattern(TP), Results(results), Operands(operands),
ImpResults(impresults), ResultPattern(0) {}
const TreePattern *getPattern() const { return Pattern; }
@@ -543,14 +551,14 @@ public:
unsigned getNumOperands() const { return Operands.size(); }
unsigned getNumImpResults() const { return ImpResults.size(); }
const std::vector<Record*>& getImpResults() const { return ImpResults; }
-
+
void setResultPattern(TreePatternNode *R) { ResultPattern = R; }
-
+
Record *getResult(unsigned RN) const {
assert(RN < Results.size());
return Results[RN];
}
-
+
Record *getOperand(unsigned ON) const {
assert(ON < Operands.size());
return Operands[ON];
@@ -560,21 +568,22 @@ public:
assert(RN < ImpResults.size());
return ImpResults[RN];
}
-
+
TreePatternNode *getResultPattern() const { return ResultPattern; }
};
-
+
/// PatternToMatch - Used by CodeGenDAGPatterns to keep tab of patterns
/// processed to produce isel.
class PatternToMatch {
public:
- PatternToMatch(ListInit *preds,
+ PatternToMatch(Record *srcrecord, ListInit *preds,
TreePatternNode *src, TreePatternNode *dst,
const std::vector<Record*> &dstregs,
unsigned complexity, unsigned uid)
- : Predicates(preds), SrcPattern(src), DstPattern(dst),
+ : SrcRecord(srcrecord), Predicates(preds), SrcPattern(src), DstPattern(dst),
Dstregs(dstregs), AddedComplexity(complexity), ID(uid) {}
+ Record *SrcRecord; // Originating Record for the pattern.
ListInit *Predicates; // Top level predicate conditions to match.
TreePatternNode *SrcPattern; // Source pattern to match.
TreePatternNode *DstPattern; // Resulting pattern.
@@ -582,6 +591,7 @@ public:
unsigned AddedComplexity; // Add to matching pattern complexity.
unsigned ID; // Unique ID for the record.
+ Record *getSrcRecord() const { return SrcRecord; }
ListInit *getPredicates() const { return Predicates; }
TreePatternNode *getSrcPattern() const { return SrcPattern; }
TreePatternNode *getDstPattern() const { return DstPattern; }
@@ -589,7 +599,7 @@ public:
unsigned getAddedComplexity() const { return AddedComplexity; }
std::string getPredicateCheck() const;
-
+
/// Compute the complexity metric for the input pattern. This roughly
/// corresponds to the number of nodes that are covered.
unsigned getPatternComplexity(const CodeGenDAGPatterns &CGP) const;
@@ -599,60 +609,60 @@ public:
struct RecordPtrCmp {
bool operator()(const Record *LHS, const Record *RHS) const;
};
-
+
class CodeGenDAGPatterns {
RecordKeeper &Records;
CodeGenTarget Target;
std::vector<CodeGenIntrinsic> Intrinsics;
std::vector<CodeGenIntrinsic> TgtIntrinsics;
-
+
std::map<Record*, SDNodeInfo, RecordPtrCmp> SDNodes;
std::map<Record*, std::pair<Record*, std::string>, RecordPtrCmp> SDNodeXForms;
std::map<Record*, ComplexPattern, RecordPtrCmp> ComplexPatterns;
std::map<Record*, TreePattern*, RecordPtrCmp> PatternFragments;
std::map<Record*, DAGDefaultOperand, RecordPtrCmp> DefaultOperands;
std::map<Record*, DAGInstruction, RecordPtrCmp> Instructions;
-
+
// Specific SDNode definitions:
Record *intrinsic_void_sdnode;
Record *intrinsic_w_chain_sdnode, *intrinsic_wo_chain_sdnode;
-
+
/// PatternsToMatch - All of the things we are matching on the DAG. The first
/// value is the pattern to match, the second pattern is the result to
/// emit.
std::vector<PatternToMatch> PatternsToMatch;
public:
- CodeGenDAGPatterns(RecordKeeper &R);
+ CodeGenDAGPatterns(RecordKeeper &R);
~CodeGenDAGPatterns();
-
+
CodeGenTarget &getTargetInfo() { return Target; }
const CodeGenTarget &getTargetInfo() const { return Target; }
-
+
Record *getSDNodeNamed(const std::string &Name) const;
-
+
const SDNodeInfo &getSDNodeInfo(Record *R) const {
assert(SDNodes.count(R) && "Unknown node!");
return SDNodes.find(R)->second;
}
-
+
// Node transformation lookups.
typedef std::pair<Record*, std::string> NodeXForm;
const NodeXForm &getSDNodeTransform(Record *R) const {
assert(SDNodeXForms.count(R) && "Invalid transform!");
return SDNodeXForms.find(R)->second;
}
-
+
typedef std::map<Record*, NodeXForm, RecordPtrCmp>::const_iterator
nx_iterator;
nx_iterator nx_begin() const { return SDNodeXForms.begin(); }
nx_iterator nx_end() const { return SDNodeXForms.end(); }
-
+
const ComplexPattern &getComplexPattern(Record *R) const {
assert(ComplexPatterns.count(R) && "Unknown addressing mode!");
return ComplexPatterns.find(R)->second;
}
-
+
const CodeGenIntrinsic &getIntrinsic(Record *R) const {
for (unsigned i = 0, e = Intrinsics.size(); i != e; ++i)
if (Intrinsics[i].TheDef == R) return Intrinsics[i];
@@ -661,7 +671,7 @@ public:
assert(0 && "Unknown intrinsic!");
abort();
}
-
+
const CodeGenIntrinsic &getIntrinsicInfo(unsigned IID) const {
if (IID-1 < Intrinsics.size())
return Intrinsics[IID-1];
@@ -670,7 +680,7 @@ public:
assert(0 && "Bad intrinsic ID!");
abort();
}
-
+
unsigned getIntrinsicID(Record *R) const {
for (unsigned i = 0, e = Intrinsics.size(); i != e; ++i)
if (Intrinsics[i].TheDef == R) return i;
@@ -679,12 +689,12 @@ public:
assert(0 && "Unknown intrinsic!");
abort();
}
-
+
const DAGDefaultOperand &getDefaultOperand(Record *R) const {
assert(DefaultOperands.count(R) &&"Isn't an analyzed default operand!");
return DefaultOperands.find(R)->second;
}
-
+
// Pattern Fragment information.
TreePattern *getPatternFragment(Record *R) const {
assert(PatternFragments.count(R) && "Invalid pattern fragment request!");
@@ -694,7 +704,7 @@ public:
if (!PatternFragments.count(R)) return 0;
return PatternFragments.find(R)->second;
}
-
+
typedef std::map<Record*, TreePattern*, RecordPtrCmp>::const_iterator
pf_iterator;
pf_iterator pf_begin() const { return PatternFragments.begin(); }
@@ -704,14 +714,14 @@ public:
typedef std::vector<PatternToMatch>::const_iterator ptm_iterator;
ptm_iterator ptm_begin() const { return PatternsToMatch.begin(); }
ptm_iterator ptm_end() const { return PatternsToMatch.end(); }
-
-
-
+
+
+
const DAGInstruction &getInstruction(Record *R) const {
assert(Instructions.count(R) && "Unknown instruction!");
return Instructions.find(R)->second;
}
-
+
Record *get_intrinsic_void_sdnode() const {
return intrinsic_void_sdnode;
}
@@ -721,7 +731,7 @@ public:
Record *get_intrinsic_wo_chain_sdnode() const {
return intrinsic_wo_chain_sdnode;
}
-
+
bool hasTargetIntrinsics() { return !TgtIntrinsics.empty(); }
private:
@@ -734,7 +744,7 @@ private:
void ParsePatterns();
void InferInstructionFlags();
void GenerateVariants();
-
+
void AddPatternToMatch(const TreePattern *Pattern, const PatternToMatch &PTM);
void FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
std::map<std::string,
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
index 01a1fe1..f37d3ea 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
@@ -15,120 +15,19 @@
#include "CodeGenTarget.h"
#include "Record.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/STLExtras.h"
#include <set>
using namespace llvm;
-static void ParseConstraint(const std::string &CStr, CodeGenInstruction *I) {
- // EARLY_CLOBBER: @early $reg
- std::string::size_type wpos = CStr.find_first_of(" \t");
- std::string::size_type start = CStr.find_first_not_of(" \t");
- std::string Tok = CStr.substr(start, wpos - start);
- if (Tok == "@earlyclobber") {
- std::string Name = CStr.substr(wpos+1);
- wpos = Name.find_first_not_of(" \t");
- if (wpos == std::string::npos)
- throw "Illegal format for @earlyclobber constraint: '" + CStr + "'";
- Name = Name.substr(wpos);
- std::pair<unsigned,unsigned> Op =
- I->ParseOperandName(Name, false);
-
- // Build the string for the operand
- if (!I->OperandList[Op.first].Constraints[Op.second].isNone())
- throw "Operand '" + Name + "' cannot have multiple constraints!";
- I->OperandList[Op.first].Constraints[Op.second] =
- CodeGenInstruction::ConstraintInfo::getEarlyClobber();
- return;
- }
-
- // Only other constraint is "TIED_TO" for now.
- std::string::size_type pos = CStr.find_first_of('=');
- assert(pos != std::string::npos && "Unrecognized constraint");
- start = CStr.find_first_not_of(" \t");
- std::string Name = CStr.substr(start, pos - start);
-
- // TIED_TO: $src1 = $dst
- wpos = Name.find_first_of(" \t");
- if (wpos == std::string::npos)
- throw "Illegal format for tied-to constraint: '" + CStr + "'";
- std::string DestOpName = Name.substr(0, wpos);
- std::pair<unsigned,unsigned> DestOp = I->ParseOperandName(DestOpName, false);
-
- Name = CStr.substr(pos+1);
- wpos = Name.find_first_not_of(" \t");
- if (wpos == std::string::npos)
- throw "Illegal format for tied-to constraint: '" + CStr + "'";
-
- std::pair<unsigned,unsigned> SrcOp =
- I->ParseOperandName(Name.substr(wpos), false);
- if (SrcOp > DestOp)
- throw "Illegal tied-to operand constraint '" + CStr + "'";
-
-
- unsigned FlatOpNo = I->getFlattenedOperandNumber(SrcOp);
-
- if (!I->OperandList[DestOp.first].Constraints[DestOp.second].isNone())
- throw "Operand '" + DestOpName + "' cannot have multiple constraints!";
- I->OperandList[DestOp.first].Constraints[DestOp.second] =
- CodeGenInstruction::ConstraintInfo::getTied(FlatOpNo);
-}
-
-static void ParseConstraints(const std::string &CStr, CodeGenInstruction *I) {
- // Make sure the constraints list for each operand is large enough to hold
- // constraint info, even if none is present.
- for (unsigned i = 0, e = I->OperandList.size(); i != e; ++i)
- I->OperandList[i].Constraints.resize(I->OperandList[i].MINumOperands);
-
- if (CStr.empty()) return;
-
- const std::string delims(",");
- std::string::size_type bidx, eidx;
-
- bidx = CStr.find_first_not_of(delims);
- while (bidx != std::string::npos) {
- eidx = CStr.find_first_of(delims, bidx);
- if (eidx == std::string::npos)
- eidx = CStr.length();
-
- ParseConstraint(CStr.substr(bidx, eidx - bidx), I);
- bidx = CStr.find_first_not_of(delims, eidx);
- }
-}
-
-CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
- : TheDef(R), AsmString(AsmStr) {
- Namespace = R->getValueAsString("Namespace");
+//===----------------------------------------------------------------------===//
+// CGIOperandList Implementation
+//===----------------------------------------------------------------------===//
- isReturn = R->getValueAsBit("isReturn");
- isBranch = R->getValueAsBit("isBranch");
- isIndirectBranch = R->getValueAsBit("isIndirectBranch");
- isCompare = R->getValueAsBit("isCompare");
- isBarrier = R->getValueAsBit("isBarrier");
- isCall = R->getValueAsBit("isCall");
- canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
- mayLoad = R->getValueAsBit("mayLoad");
- mayStore = R->getValueAsBit("mayStore");
- isPredicable = R->getValueAsBit("isPredicable");
- isConvertibleToThreeAddress = R->getValueAsBit("isConvertibleToThreeAddress");
- isCommutable = R->getValueAsBit("isCommutable");
- isTerminator = R->getValueAsBit("isTerminator");
- isReMaterializable = R->getValueAsBit("isReMaterializable");
- hasDelaySlot = R->getValueAsBit("hasDelaySlot");
- usesCustomInserter = R->getValueAsBit("usesCustomInserter");
- hasCtrlDep = R->getValueAsBit("hasCtrlDep");
- isNotDuplicable = R->getValueAsBit("isNotDuplicable");
- hasSideEffects = R->getValueAsBit("hasSideEffects");
- neverHasSideEffects = R->getValueAsBit("neverHasSideEffects");
- isAsCheapAsAMove = R->getValueAsBit("isAsCheapAsAMove");
- hasExtraSrcRegAllocReq = R->getValueAsBit("hasExtraSrcRegAllocReq");
- hasExtraDefRegAllocReq = R->getValueAsBit("hasExtraDefRegAllocReq");
+CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
+ isPredicable = false;
hasOptionalDef = false;
isVariadic = false;
- ImplicitDefs = R->getValueAsListOfDefs("Defs");
- ImplicitUses = R->getValueAsListOfDefs("Uses");
-
- if (neverHasSideEffects + hasSideEffects > 1)
- throw R->getName() + ": multiple conflicting side-effect flags set!";
DagInit *OutDI = R->getValueAsDag("OutOperandList");
@@ -137,16 +36,16 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
throw R->getName() + ": invalid def name for output list: use 'outs'";
} else
throw R->getName() + ": invalid output list: use 'outs'";
-
+
NumDefs = OutDI->getNumArgs();
-
+
DagInit *InDI = R->getValueAsDag("InOperandList");
if (DefInit *Init = dynamic_cast<DefInit*>(InDI->getOperator())) {
if (Init->getDef()->getName() != "ins")
throw R->getName() + ": invalid def name for input list: use 'ins'";
} else
throw R->getName() + ": invalid input list: use 'ins'";
-
+
unsigned MIOperandNo = 0;
std::set<std::string> OperandNames;
for (unsigned i = 0, e = InDI->getNumArgs()+OutDI->getNumArgs(); i != e; ++i){
@@ -159,25 +58,28 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
ArgInit = InDI->getArg(i-NumDefs);
ArgName = InDI->getArgName(i-NumDefs);
}
-
+
DefInit *Arg = dynamic_cast<DefInit*>(ArgInit);
if (!Arg)
throw "Illegal operand for the '" + R->getName() + "' instruction!";
Record *Rec = Arg->getDef();
std::string PrintMethod = "printOperand";
+ std::string EncoderMethod;
unsigned NumOps = 1;
DagInit *MIOpInfo = 0;
if (Rec->isSubClassOf("Operand")) {
PrintMethod = Rec->getValueAsString("PrintMethod");
+ // If there is an explicit encoder method, use it.
+ EncoderMethod = Rec->getValueAsString("EncoderMethod");
MIOpInfo = Rec->getValueAsDag("MIOperandInfo");
// Verify that MIOpInfo has an 'ops' root value.
if (!dynamic_cast<DefInit*>(MIOpInfo->getOperator()) ||
dynamic_cast<DefInit*>(MIOpInfo->getOperator())
- ->getDef()->getName() != "ops")
+ ->getDef()->getName() != "ops")
throw "Bad value for MIOperandInfo in operand '" + Rec->getName() +
- "'\n";
+ "'\n";
// If we have MIOpInfo, then we have #operands equal to number of entries
// in MIOperandInfo.
@@ -192,58 +94,58 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
isVariadic = true;
continue;
} else if (!Rec->isSubClassOf("RegisterClass") &&
- Rec->getName() != "ptr_rc" && Rec->getName() != "unknown")
+ !Rec->isSubClassOf("PointerLikeRegClass") &&
+ Rec->getName() != "unknown")
throw "Unknown operand class '" + Rec->getName() +
- "' in '" + R->getName() + "' instruction!";
+ "' in '" + R->getName() + "' instruction!";
// Check that the operand has a name and that it's unique.
if (ArgName.empty())
throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
- " has no name!";
+ " has no name!";
if (!OperandNames.insert(ArgName).second)
throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
- " has the same name as a previous operand!";
+ " has the same name as a previous operand!";
- OperandList.push_back(OperandInfo(Rec, ArgName, PrintMethod,
+ OperandList.push_back(OperandInfo(Rec, ArgName, PrintMethod, EncoderMethod,
MIOperandNo, NumOps, MIOpInfo));
MIOperandNo += NumOps;
}
- // Parse Constraints.
- ParseConstraints(R->getValueAsString("Constraints"), this);
-
- // Parse the DisableEncoding field.
- std::string DisableEncoding = R->getValueAsString("DisableEncoding");
- while (1) {
- std::string OpName;
- tie(OpName, DisableEncoding) = getToken(DisableEncoding, " ,\t");
- if (OpName.empty()) break;
-
- // Figure out which operand this is.
- std::pair<unsigned,unsigned> Op = ParseOperandName(OpName, false);
- // Mark the operand as not-to-be encoded.
- if (Op.second >= OperandList[Op.first].DoNotEncode.size())
- OperandList[Op.first].DoNotEncode.resize(Op.second+1);
- OperandList[Op.first].DoNotEncode[Op.second] = true;
- }
+ // Make sure the constraints list for each operand is large enough to hold
+ // constraint info, even if none is present.
+ for (unsigned i = 0, e = OperandList.size(); i != e; ++i)
+ OperandList[i].Constraints.resize(OperandList[i].MINumOperands);
}
+
/// getOperandNamed - Return the index of the operand with the specified
/// non-empty name. If the instruction does not have an operand with the
/// specified name, throw an exception.
///
-unsigned CodeGenInstruction::getOperandNamed(const std::string &Name) const {
+unsigned CGIOperandList::getOperandNamed(StringRef Name) const {
+ unsigned OpIdx;
+ if (hasOperandNamed(Name, OpIdx)) return OpIdx;
+ throw "'" + TheDef->getName() + "' does not have an operand named '$" +
+ Name.str() + "'!";
+}
+
+/// hasOperandNamed - Query whether the instruction has an operand of the
+/// given name. If so, return true and set OpIdx to the index of the
+/// operand. Otherwise, return false.
+bool CGIOperandList::hasOperandNamed(StringRef Name, unsigned &OpIdx) const {
assert(!Name.empty() && "Cannot search for operand with no name!");
for (unsigned i = 0, e = OperandList.size(); i != e; ++i)
- if (OperandList[i].Name == Name) return i;
- throw "Instruction '" + TheDef->getName() +
- "' does not have an operand named '$" + Name + "'!";
+ if (OperandList[i].Name == Name) {
+ OpIdx = i;
+ return true;
+ }
+ return false;
}
std::pair<unsigned,unsigned>
-CodeGenInstruction::ParseOperandName(const std::string &Op,
- bool AllowWholeOp) {
+CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) {
if (Op.empty() || Op[0] != '$')
throw TheDef->getName() + ": Illegal operand name: '" + Op + "'";
@@ -266,7 +168,7 @@ CodeGenInstruction::ParseOperandName(const std::string &Op,
if (OperandList[OpIdx].MINumOperands > 1 && !AllowWholeOp &&
SubOpName.empty())
throw TheDef->getName() + ": Illegal to refer to"
- " whole operand part of complex operand '" + Op + "'";
+ " whole operand part of complex operand '" + Op + "'";
// Otherwise, return the operand.
return std::make_pair(OpIdx, 0U);
@@ -286,6 +188,137 @@ CodeGenInstruction::ParseOperandName(const std::string &Op,
throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'";
}
+static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) {
+ // EARLY_CLOBBER: @early $reg
+ std::string::size_type wpos = CStr.find_first_of(" \t");
+ std::string::size_type start = CStr.find_first_not_of(" \t");
+ std::string Tok = CStr.substr(start, wpos - start);
+ if (Tok == "@earlyclobber") {
+ std::string Name = CStr.substr(wpos+1);
+ wpos = Name.find_first_not_of(" \t");
+ if (wpos == std::string::npos)
+ throw "Illegal format for @earlyclobber constraint: '" + CStr + "'";
+ Name = Name.substr(wpos);
+ std::pair<unsigned,unsigned> Op = Ops.ParseOperandName(Name, false);
+
+ // Build the string for the operand
+ if (!Ops[Op.first].Constraints[Op.second].isNone())
+ throw "Operand '" + Name + "' cannot have multiple constraints!";
+ Ops[Op.first].Constraints[Op.second] =
+ CGIOperandList::ConstraintInfo::getEarlyClobber();
+ return;
+ }
+
+ // Only other constraint is "TIED_TO" for now.
+ std::string::size_type pos = CStr.find_first_of('=');
+ assert(pos != std::string::npos && "Unrecognized constraint");
+ start = CStr.find_first_not_of(" \t");
+ std::string Name = CStr.substr(start, pos - start);
+
+ // TIED_TO: $src1 = $dst
+ wpos = Name.find_first_of(" \t");
+ if (wpos == std::string::npos)
+ throw "Illegal format for tied-to constraint: '" + CStr + "'";
+ std::string DestOpName = Name.substr(0, wpos);
+ std::pair<unsigned,unsigned> DestOp = Ops.ParseOperandName(DestOpName, false);
+
+ Name = CStr.substr(pos+1);
+ wpos = Name.find_first_not_of(" \t");
+ if (wpos == std::string::npos)
+ throw "Illegal format for tied-to constraint: '" + CStr + "'";
+
+ std::pair<unsigned,unsigned> SrcOp =
+ Ops.ParseOperandName(Name.substr(wpos), false);
+ if (SrcOp > DestOp)
+ throw "Illegal tied-to operand constraint '" + CStr + "'";
+
+
+ unsigned FlatOpNo = Ops.getFlattenedOperandNumber(SrcOp);
+
+ if (!Ops[DestOp.first].Constraints[DestOp.second].isNone())
+ throw "Operand '" + DestOpName + "' cannot have multiple constraints!";
+ Ops[DestOp.first].Constraints[DestOp.second] =
+ CGIOperandList::ConstraintInfo::getTied(FlatOpNo);
+}
+
+static void ParseConstraints(const std::string &CStr, CGIOperandList &Ops) {
+ if (CStr.empty()) return;
+
+ const std::string delims(",");
+ std::string::size_type bidx, eidx;
+
+ bidx = CStr.find_first_not_of(delims);
+ while (bidx != std::string::npos) {
+ eidx = CStr.find_first_of(delims, bidx);
+ if (eidx == std::string::npos)
+ eidx = CStr.length();
+
+ ParseConstraint(CStr.substr(bidx, eidx - bidx), Ops);
+ bidx = CStr.find_first_not_of(delims, eidx);
+ }
+}
+
+void CGIOperandList::ProcessDisableEncoding(std::string DisableEncoding) {
+ while (1) {
+ std::string OpName;
+ tie(OpName, DisableEncoding) = getToken(DisableEncoding, " ,\t");
+ if (OpName.empty()) break;
+
+ // Figure out which operand this is.
+ std::pair<unsigned,unsigned> Op = ParseOperandName(OpName, false);
+
+ // Mark the operand as not-to-be encoded.
+ if (Op.second >= OperandList[Op.first].DoNotEncode.size())
+ OperandList[Op.first].DoNotEncode.resize(Op.second+1);
+ OperandList[Op.first].DoNotEncode[Op.second] = true;
+ }
+
+}
+
+//===----------------------------------------------------------------------===//
+// CodeGenInstruction Implementation
+//===----------------------------------------------------------------------===//
+
+CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) {
+ Namespace = R->getValueAsString("Namespace");
+ AsmString = R->getValueAsString("AsmString");
+
+ isReturn = R->getValueAsBit("isReturn");
+ isBranch = R->getValueAsBit("isBranch");
+ isIndirectBranch = R->getValueAsBit("isIndirectBranch");
+ isCompare = R->getValueAsBit("isCompare");
+ isMoveImm = R->getValueAsBit("isMoveImm");
+ isBarrier = R->getValueAsBit("isBarrier");
+ isCall = R->getValueAsBit("isCall");
+ canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
+ mayLoad = R->getValueAsBit("mayLoad");
+ mayStore = R->getValueAsBit("mayStore");
+ isPredicable = Operands.isPredicable || R->getValueAsBit("isPredicable");
+ isConvertibleToThreeAddress = R->getValueAsBit("isConvertibleToThreeAddress");
+ isCommutable = R->getValueAsBit("isCommutable");
+ isTerminator = R->getValueAsBit("isTerminator");
+ isReMaterializable = R->getValueAsBit("isReMaterializable");
+ hasDelaySlot = R->getValueAsBit("hasDelaySlot");
+ usesCustomInserter = R->getValueAsBit("usesCustomInserter");
+ hasCtrlDep = R->getValueAsBit("hasCtrlDep");
+ isNotDuplicable = R->getValueAsBit("isNotDuplicable");
+ hasSideEffects = R->getValueAsBit("hasSideEffects");
+ neverHasSideEffects = R->getValueAsBit("neverHasSideEffects");
+ isAsCheapAsAMove = R->getValueAsBit("isAsCheapAsAMove");
+ hasExtraSrcRegAllocReq = R->getValueAsBit("hasExtraSrcRegAllocReq");
+ hasExtraDefRegAllocReq = R->getValueAsBit("hasExtraDefRegAllocReq");
+ ImplicitDefs = R->getValueAsListOfDefs("Defs");
+ ImplicitUses = R->getValueAsListOfDefs("Uses");
+
+ if (neverHasSideEffects + hasSideEffects > 1)
+ throw R->getName() + ": multiple conflicting side-effect flags set!";
+
+ // Parse Constraints.
+ ParseConstraints(R->getValueAsString("Constraints"), Operands);
+
+ // Parse the DisableEncoding field.
+ Operands.ProcessDisableEncoding(R->getValueAsString("DisableEncoding"));
+}
/// HasOneImplicitDefWithKnownVT - If the instruction has at least one
/// implicit def and it has a known VT, return the VT, otherwise return
@@ -293,14 +326,212 @@ CodeGenInstruction::ParseOperandName(const std::string &Op,
MVT::SimpleValueType CodeGenInstruction::
HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const {
if (ImplicitDefs.empty()) return MVT::Other;
-
+
// Check to see if the first implicit def has a resolvable type.
Record *FirstImplicitDef = ImplicitDefs[0];
assert(FirstImplicitDef->isSubClassOf("Register"));
- const std::vector<MVT::SimpleValueType> &RegVTs =
+ const std::vector<MVT::SimpleValueType> &RegVTs =
TargetInfo.getRegisterVTs(FirstImplicitDef);
if (RegVTs.size() == 1)
return RegVTs[0];
return MVT::Other;
}
+
+/// FlattenAsmStringVariants - Flatten the specified AsmString to only
+/// include text from the specified variant, returning the new string.
+std::string CodeGenInstruction::
+FlattenAsmStringVariants(StringRef Cur, unsigned Variant) {
+ std::string Res = "";
+
+ for (;;) {
+ // Find the start of the next variant string.
+ size_t VariantsStart = 0;
+ for (size_t e = Cur.size(); VariantsStart != e; ++VariantsStart)
+ if (Cur[VariantsStart] == '{' &&
+ (VariantsStart == 0 || (Cur[VariantsStart-1] != '$' &&
+ Cur[VariantsStart-1] != '\\')))
+ break;
+
+ // Add the prefix to the result.
+ Res += Cur.slice(0, VariantsStart);
+ if (VariantsStart == Cur.size())
+ break;
+
+ ++VariantsStart; // Skip the '{'.
+
+ // Scan to the end of the variants string.
+ size_t VariantsEnd = VariantsStart;
+ unsigned NestedBraces = 1;
+ for (size_t e = Cur.size(); VariantsEnd != e; ++VariantsEnd) {
+ if (Cur[VariantsEnd] == '}' && Cur[VariantsEnd-1] != '\\') {
+ if (--NestedBraces == 0)
+ break;
+ } else if (Cur[VariantsEnd] == '{')
+ ++NestedBraces;
+ }
+
+ // Select the Nth variant (or empty).
+ StringRef Selection = Cur.slice(VariantsStart, VariantsEnd);
+ for (unsigned i = 0; i != Variant; ++i)
+ Selection = Selection.split('|').second;
+ Res += Selection.split('|').first;
+
+ assert(VariantsEnd != Cur.size() &&
+ "Unterminated variants in assembly string!");
+ Cur = Cur.substr(VariantsEnd + 1);
+ }
+
+ return Res;
+}
+
+
+//===----------------------------------------------------------------------===//
+/// CodeGenInstAlias Implementation
+//===----------------------------------------------------------------------===//
+
+/// tryAliasOpMatch - This is a helper function for the CodeGenInstAlias
+/// constructor. It checks if an argument in an InstAlias pattern matches
+/// the corresponding operand of the instruction. It returns true on a
+/// successful match, with ResOp set to the result operand to be used.
+bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
+ Record *InstOpRec, bool hasSubOps,
+ SMLoc Loc, CodeGenTarget &T,
+ ResultOperand &ResOp) {
+ Init *Arg = Result->getArg(AliasOpNo);
+ DefInit *ADI = dynamic_cast<DefInit*>(Arg);
+
+ if (ADI && ADI->getDef() == InstOpRec) {
+ // If the operand is a record, it must have a name, and the record type
+ // must match up with the instruction's argument type.
+ if (Result->getArgName(AliasOpNo).empty())
+ throw TGError(Loc, "result argument #" + utostr(AliasOpNo) +
+ " must have a name!");
+ ResOp = ResultOperand(Result->getArgName(AliasOpNo), ADI->getDef());
+ return true;
+ }
+
+ // Handle explicit registers.
+ if (ADI && ADI->getDef()->isSubClassOf("Register")) {
+ if (!InstOpRec->isSubClassOf("RegisterClass"))
+ return false;
+
+ if (!T.getRegisterClass(InstOpRec).containsRegister(ADI->getDef()))
+ throw TGError(Loc, "fixed register " +ADI->getDef()->getName()
+ + " is not a member of the " + InstOpRec->getName() +
+ " register class!");
+
+ if (!Result->getArgName(AliasOpNo).empty())
+ throw TGError(Loc, "result fixed register argument must "
+ "not have a name!");
+
+ ResOp = ResultOperand(ADI->getDef());
+ return true;
+ }
+
+ // Handle "zero_reg" for optional def operands.
+ if (ADI && ADI->getDef()->getName() == "zero_reg") {
+
+ // Check if this is an optional def.
+ if (!InstOpRec->isSubClassOf("OptionalDefOperand"))
+ throw TGError(Loc, "reg0 used for result that is not an "
+ "OptionalDefOperand!");
+
+ ResOp = ResultOperand(static_cast<Record*>(0));
+ return true;
+ }
+
+ if (IntInit *II = dynamic_cast<IntInit*>(Arg)) {
+ if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
+ return false;
+ // Integer arguments can't have names.
+ if (!Result->getArgName(AliasOpNo).empty())
+ throw TGError(Loc, "result argument #" + utostr(AliasOpNo) +
+ " must not have a name!");
+ ResOp = ResultOperand(II->getValue());
+ return true;
+ }
+
+ return false;
+}
+
+CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
+ AsmString = R->getValueAsString("AsmString");
+ Result = R->getValueAsDag("ResultInst");
+
+ // Verify that the root of the result is an instruction.
+ DefInit *DI = dynamic_cast<DefInit*>(Result->getOperator());
+ if (DI == 0 || !DI->getDef()->isSubClassOf("Instruction"))
+ throw TGError(R->getLoc(), "result of inst alias should be an instruction");
+
+ ResultInst = &T.getInstruction(DI->getDef());
+
+ // NameClass - If argument names are repeated, we need to verify they have
+ // the same class.
+ StringMap<Record*> NameClass;
+ for (unsigned i = 0, e = Result->getNumArgs(); i != e; ++i) {
+ DefInit *ADI = dynamic_cast<DefInit*>(Result->getArg(i));
+ if (!ADI || Result->getArgName(i).empty())
+ continue;
+ // Verify we don't have something like: (someinst GR16:$foo, GR32:$foo)
+ // $foo can exist multiple times in the result list, but it must have the
+ // same type.
+ Record *&Entry = NameClass[Result->getArgName(i)];
+ if (Entry && Entry != ADI->getDef())
+ throw TGError(R->getLoc(), "result value $" + Result->getArgName(i) +
+ " is both " + Entry->getName() + " and " +
+ ADI->getDef()->getName() + "!");
+ Entry = ADI->getDef();
+ }
+
+ // Decode and validate the arguments of the result.
+ unsigned AliasOpNo = 0;
+ for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
+
+ // Tied registers don't have an entry in the result dag.
+ if (ResultInst->Operands[i].getTiedRegister() != -1)
+ continue;
+
+ if (AliasOpNo >= Result->getNumArgs())
+ throw TGError(R->getLoc(), "not enough arguments for instruction!");
+
+ Record *InstOpRec = ResultInst->Operands[i].Rec;
+ unsigned NumSubOps = ResultInst->Operands[i].MINumOperands;
+ ResultOperand ResOp(static_cast<int64_t>(0));
+ if (tryAliasOpMatch(Result, AliasOpNo, InstOpRec, (NumSubOps > 1),
+ R->getLoc(), T, ResOp)) {
+ ResultOperands.push_back(ResOp);
+ ResultInstOperandIndex.push_back(std::make_pair(i, -1));
+ ++AliasOpNo;
+ continue;
+ }
+
+ // If the argument did not match the instruction operand, and the operand
+ // is composed of multiple suboperands, try matching the suboperands.
+ if (NumSubOps > 1) {
+ DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
+ for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
+ if (AliasOpNo >= Result->getNumArgs())
+ throw TGError(R->getLoc(), "not enough arguments for instruction!");
+ Record *SubRec = dynamic_cast<DefInit*>(MIOI->getArg(SubOp))->getDef();
+ if (tryAliasOpMatch(Result, AliasOpNo, SubRec, false,
+ R->getLoc(), T, ResOp)) {
+ ResultOperands.push_back(ResOp);
+ ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
+ ++AliasOpNo;
+ } else {
+ throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
+ " does not match instruction operand class " +
+ (SubOp == 0 ? InstOpRec->getName() :SubRec->getName()));
+ }
+ }
+ continue;
+ }
+ throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
+ " does not match instruction operand class " +
+ InstOpRec->getName());
+ }
+
+ if (AliasOpNo != Result->getNumArgs())
+ throw TGError(R->getLoc(), "too many operands for instruction!");
+}
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.h b/contrib/llvm/utils/TableGen/CodeGenInstruction.h
index b02d0d3..58913b9 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.h
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.h
@@ -15,6 +15,8 @@
#define CODEGEN_INSTRUCTION_H
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/SourceMgr.h"
#include <string>
#include <vector>
#include <utility>
@@ -23,22 +25,16 @@ namespace llvm {
class Record;
class DagInit;
class CodeGenTarget;
-
- class CodeGenInstruction {
+ class StringRef;
+
+ class CGIOperandList {
public:
- Record *TheDef; // The actual record defining this instruction.
- std::string Namespace; // The namespace the instruction is in.
-
- /// AsmString - The format string used to emit a .s file for the
- /// instruction.
- std::string AsmString;
-
class ConstraintInfo {
enum { None, EarlyClobber, Tied } Kind;
unsigned OtherTiedOperand;
public:
ConstraintInfo() : Kind(None) {}
-
+
static ConstraintInfo getEarlyClobber() {
ConstraintInfo I;
I.Kind = EarlyClobber;
@@ -62,22 +58,26 @@ namespace llvm {
return OtherTiedOperand;
}
};
-
+
/// OperandInfo - The information we keep track of for each operand in the
/// operand list for a tablegen instruction.
struct OperandInfo {
/// Rec - The definition this operand is declared as.
///
Record *Rec;
-
+
/// Name - If this operand was assigned a symbolic name, this is it,
/// otherwise, it's empty.
std::string Name;
-
+
/// PrinterMethodName - The method used to print operands of this type in
/// the asmprinter.
std::string PrinterMethodName;
-
+
+ /// EncoderMethodName - The method used to get the machine operand value
+ /// for binary encoding. "getMachineOpValue" by default.
+ std::string EncoderMethodName;
+
/// MIOperandNo - Currently (this is meant to be phased out), some logical
/// operands correspond to multiple MachineInstr operands. In the X86
/// target for example, one address operand is represented as 4
@@ -86,7 +86,7 @@ namespace llvm {
/// does, this contains the MI operand index of this operand.
unsigned MIOperandNo;
unsigned MINumOperands; // The number of operands.
-
+
/// DoNotEncode - Bools are set to true in this vector for each operand in
/// the DisableEncoding list. These should not be emitted by the code
/// emitter.
@@ -99,52 +99,61 @@ namespace llvm {
/// Constraint info for this operand. This operand can have pieces, so we
/// track constraint info for each.
std::vector<ConstraintInfo> Constraints;
-
- OperandInfo(Record *R, const std::string &N, const std::string &PMN,
- unsigned MION, unsigned MINO, DagInit *MIOI)
- : Rec(R), Name(N), PrinterMethodName(PMN), MIOperandNo(MION),
- MINumOperands(MINO), MIOperandInfo(MIOI) {}
+
+ OperandInfo(Record *R, const std::string &N, const std::string &PMN,
+ const std::string &EMN, unsigned MION, unsigned MINO,
+ DagInit *MIOI)
+ : Rec(R), Name(N), PrinterMethodName(PMN), EncoderMethodName(EMN),
+ MIOperandNo(MION), MINumOperands(MINO), MIOperandInfo(MIOI) {}
+
+
+ /// getTiedOperand - If this operand is tied to another one, return the
+ /// other operand number. Otherwise, return -1.
+ int getTiedRegister() const {
+ for (unsigned j = 0, e = Constraints.size(); j != e; ++j) {
+ const CGIOperandList::ConstraintInfo &CI = Constraints[j];
+ if (CI.isTied()) return CI.getTiedOperand();
+ }
+ return -1;
+ }
};
+
+ CGIOperandList(Record *D);
+
+ Record *TheDef; // The actual record containing this OperandList.
/// NumDefs - Number of def operands declared, this is the number of
/// elements in the instruction's (outs) list.
///
unsigned NumDefs;
-
+
/// OperandList - The list of declared operands, along with their declared
/// type (which is a record).
std::vector<OperandInfo> OperandList;
-
- /// ImplicitDefs/ImplicitUses - These are lists of registers that are
- /// implicitly defined and used by the instruction.
- std::vector<Record*> ImplicitDefs, ImplicitUses;
-
- // Various boolean values we track for the instruction.
- bool isReturn;
- bool isBranch;
- bool isIndirectBranch;
- bool isCompare;
- bool isBarrier;
- bool isCall;
- bool canFoldAsLoad;
- bool mayLoad, mayStore;
+
+ // Information gleaned from the operand list.
bool isPredicable;
- bool isConvertibleToThreeAddress;
- bool isCommutable;
- bool isTerminator;
- bool isReMaterializable;
- bool hasDelaySlot;
- bool usesCustomInserter;
- bool isVariadic;
- bool hasCtrlDep;
- bool isNotDuplicable;
bool hasOptionalDef;
- bool hasSideEffects;
- bool neverHasSideEffects;
- bool isAsCheapAsAMove;
- bool hasExtraSrcRegAllocReq;
- bool hasExtraDefRegAllocReq;
+ bool isVariadic;
+
+ // Provide transparent accessors to the operand list.
+ unsigned size() const { return OperandList.size(); }
+ const OperandInfo &operator[](unsigned i) const { return OperandList[i]; }
+ OperandInfo &operator[](unsigned i) { return OperandList[i]; }
+ OperandInfo &back() { return OperandList.back(); }
+ const OperandInfo &back() const { return OperandList.back(); }
+
+
+ /// getOperandNamed - Return the index of the operand with the specified
+ /// non-empty name. If the instruction does not have an operand with the
+ /// specified name, throw an exception.
+ unsigned getOperandNamed(StringRef Name) const;
+ /// hasOperandNamed - Query whether the instruction has an operand of the
+ /// given name. If so, return true and set OpIdx to the index of the
+ /// operand. Otherwise, return false.
+ bool hasOperandNamed(StringRef Name, unsigned &OpIdx) const;
+
/// ParseOperandName - Parse an operand name like "$foo" or "$foo.bar",
/// where $foo is a whole operand and $foo.bar refers to a suboperand.
/// This throws an exception if the name is invalid. If AllowWholeOp is
@@ -178,20 +187,130 @@ namespace llvm {
return OperandList[Op.first].DoNotEncode[Op.second];
return false;
}
+
+ void ProcessDisableEncoding(std::string Value);
+ };
+
- CodeGenInstruction(Record *R, const std::string &AsmStr);
+ class CodeGenInstruction {
+ public:
+ Record *TheDef; // The actual record defining this instruction.
+ std::string Namespace; // The namespace the instruction is in.
+
+ /// AsmString - The format string used to emit a .s file for the
+ /// instruction.
+ std::string AsmString;
+
+ /// Operands - This is information about the (ins) and (outs) list specified
+ /// to the instruction.
+ CGIOperandList Operands;
+
+ /// ImplicitDefs/ImplicitUses - These are lists of registers that are
+ /// implicitly defined and used by the instruction.
+ std::vector<Record*> ImplicitDefs, ImplicitUses;
+
+ // Various boolean values we track for the instruction.
+ bool isReturn;
+ bool isBranch;
+ bool isIndirectBranch;
+ bool isCompare;
+ bool isMoveImm;
+ bool isBarrier;
+ bool isCall;
+ bool canFoldAsLoad;
+ bool mayLoad, mayStore;
+ bool isPredicable;
+ bool isConvertibleToThreeAddress;
+ bool isCommutable;
+ bool isTerminator;
+ bool isReMaterializable;
+ bool hasDelaySlot;
+ bool usesCustomInserter;
+ bool hasCtrlDep;
+ bool isNotDuplicable;
+ bool hasSideEffects;
+ bool neverHasSideEffects;
+ bool isAsCheapAsAMove;
+ bool hasExtraSrcRegAllocReq;
+ bool hasExtraDefRegAllocReq;
+
+
+ CodeGenInstruction(Record *R);
- /// getOperandNamed - Return the index of the operand with the specified
- /// non-empty name. If the instruction does not have an operand with the
- /// specified name, throw an exception.
- unsigned getOperandNamed(const std::string &Name) const;
-
/// HasOneImplicitDefWithKnownVT - If the instruction has at least one
/// implicit def and it has a known VT, return the VT, otherwise return
/// MVT::Other.
- MVT::SimpleValueType
+ MVT::SimpleValueType
HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const;
+
+
+ /// FlattenAsmStringVariants - Flatten the specified AsmString to only
+ /// include text from the specified variant, returning the new string.
+ static std::string FlattenAsmStringVariants(StringRef AsmString,
+ unsigned Variant);
};
+
+
+ /// CodeGenInstAlias - This represents an InstAlias definition.
+ class CodeGenInstAlias {
+ public:
+ Record *TheDef; // The actual record defining this InstAlias.
+
+ /// AsmString - The format string used to emit a .s file for the
+ /// instruction.
+ std::string AsmString;
+
+ /// Result - The result instruction.
+ DagInit *Result;
+
+ /// ResultInst - The instruction generated by the alias (decoded from
+ /// Result).
+ CodeGenInstruction *ResultInst;
+
+
+ struct ResultOperand {
+ private:
+ StringRef Name;
+ Record *R;
+
+ int64_t Imm;
+ public:
+ enum {
+ K_Record,
+ K_Imm,
+ K_Reg
+ } Kind;
+
+ ResultOperand(StringRef N, Record *r) : Name(N), R(r), Kind(K_Record) {}
+ ResultOperand(int64_t I) : Imm(I), Kind(K_Imm) {}
+ ResultOperand(Record *r) : R(r), Kind(K_Reg) {}
+
+ bool isRecord() const { return Kind == K_Record; }
+ bool isImm() const { return Kind == K_Imm; }
+ bool isReg() const { return Kind == K_Reg; }
+
+ StringRef getName() const { assert(isRecord()); return Name; }
+ Record *getRecord() const { assert(isRecord()); return R; }
+ int64_t getImm() const { assert(isImm()); return Imm; }
+ Record *getRegister() const { assert(isReg()); return R; }
+ };
+
+ /// ResultOperands - The decoded operands for the result instruction.
+ std::vector<ResultOperand> ResultOperands;
+
+ /// ResultInstOperandIndex - For each operand, this vector holds a pair of
+ /// indices to identify the corresponding operand in the result
+ /// instruction. The first index specifies the operand and the second
+ /// index specifies the suboperand. If there are no suboperands or if all
+ /// of them are matched by the operand, the second value should be -1.
+ std::vector<std::pair<unsigned, int> > ResultInstOperandIndex;
+
+ CodeGenInstAlias(Record *R, CodeGenTarget &T);
+
+ bool tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
+ Record *InstOpRec, bool hasSubOps, SMLoc Loc,
+ CodeGenTarget &T, ResultOperand &ResOp);
+ };
}
#endif
diff --git a/contrib/llvm/utils/TableGen/CodeGenRegisters.h b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
index ccd3d22..bbd0cef 100644
--- a/contrib/llvm/utils/TableGen/CodeGenRegisters.h
+++ b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
@@ -57,6 +57,12 @@ namespace llvm {
abort();
}
+ bool containsRegister(Record *R) const {
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i)
+ if (Elements[i] == R) return true;
+ return false;
+ }
+
// Returns true if RC is a strict subclass.
// RC is a sub-class of this class if it is a valid replacement for any
// instruction operand where a register of this classis required. It must
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
index cbfe2ad..d0f7d8b 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -48,46 +48,47 @@ std::string llvm::getName(MVT::SimpleValueType T) {
std::string llvm::getEnumName(MVT::SimpleValueType T) {
switch (T) {
- case MVT::Other: return "MVT::Other";
- case MVT::i1: return "MVT::i1";
- case MVT::i8: return "MVT::i8";
- case MVT::i16: return "MVT::i16";
- case MVT::i32: return "MVT::i32";
- case MVT::i64: return "MVT::i64";
- case MVT::i128: return "MVT::i128";
- case MVT::iAny: return "MVT::iAny";
- case MVT::fAny: return "MVT::fAny";
- case MVT::vAny: return "MVT::vAny";
- case MVT::f32: return "MVT::f32";
- case MVT::f64: return "MVT::f64";
- case MVT::f80: return "MVT::f80";
- case MVT::f128: return "MVT::f128";
+ case MVT::Other: return "MVT::Other";
+ case MVT::i1: return "MVT::i1";
+ case MVT::i8: return "MVT::i8";
+ case MVT::i16: return "MVT::i16";
+ case MVT::i32: return "MVT::i32";
+ case MVT::i64: return "MVT::i64";
+ case MVT::i128: return "MVT::i128";
+ case MVT::iAny: return "MVT::iAny";
+ case MVT::fAny: return "MVT::fAny";
+ case MVT::vAny: return "MVT::vAny";
+ case MVT::f32: return "MVT::f32";
+ case MVT::f64: return "MVT::f64";
+ case MVT::f80: return "MVT::f80";
+ case MVT::f128: return "MVT::f128";
case MVT::ppcf128: return "MVT::ppcf128";
- case MVT::Flag: return "MVT::Flag";
- case MVT::isVoid:return "MVT::isVoid";
- case MVT::v2i8: return "MVT::v2i8";
- case MVT::v4i8: return "MVT::v4i8";
- case MVT::v8i8: return "MVT::v8i8";
- case MVT::v16i8: return "MVT::v16i8";
- case MVT::v32i8: return "MVT::v32i8";
- case MVT::v2i16: return "MVT::v2i16";
- case MVT::v4i16: return "MVT::v4i16";
- case MVT::v8i16: return "MVT::v8i16";
- case MVT::v16i16: return "MVT::v16i16";
- case MVT::v2i32: return "MVT::v2i32";
- case MVT::v4i32: return "MVT::v4i32";
- case MVT::v8i32: return "MVT::v8i32";
- case MVT::v1i64: return "MVT::v1i64";
- case MVT::v2i64: return "MVT::v2i64";
- case MVT::v4i64: return "MVT::v4i64";
- case MVT::v8i64: return "MVT::v8i64";
- case MVT::v2f32: return "MVT::v2f32";
- case MVT::v4f32: return "MVT::v4f32";
- case MVT::v8f32: return "MVT::v8f32";
- case MVT::v2f64: return "MVT::v2f64";
- case MVT::v4f64: return "MVT::v4f64";
+ case MVT::x86mmx: return "MVT::x86mmx";
+ case MVT::Glue: return "MVT::Glue";
+ case MVT::isVoid: return "MVT::isVoid";
+ case MVT::v2i8: return "MVT::v2i8";
+ case MVT::v4i8: return "MVT::v4i8";
+ case MVT::v8i8: return "MVT::v8i8";
+ case MVT::v16i8: return "MVT::v16i8";
+ case MVT::v32i8: return "MVT::v32i8";
+ case MVT::v2i16: return "MVT::v2i16";
+ case MVT::v4i16: return "MVT::v4i16";
+ case MVT::v8i16: return "MVT::v8i16";
+ case MVT::v16i16: return "MVT::v16i16";
+ case MVT::v2i32: return "MVT::v2i32";
+ case MVT::v4i32: return "MVT::v4i32";
+ case MVT::v8i32: return "MVT::v8i32";
+ case MVT::v1i64: return "MVT::v1i64";
+ case MVT::v2i64: return "MVT::v2i64";
+ case MVT::v4i64: return "MVT::v4i64";
+ case MVT::v8i64: return "MVT::v8i64";
+ case MVT::v2f32: return "MVT::v2f32";
+ case MVT::v4f32: return "MVT::v4f32";
+ case MVT::v8f32: return "MVT::v8f32";
+ case MVT::v2f64: return "MVT::v2f64";
+ case MVT::v4f64: return "MVT::v4f64";
case MVT::Metadata: return "MVT::Metadata";
- case MVT::iPTR: return "MVT::iPTR";
+ case MVT::iPTR: return "MVT::iPTR";
case MVT::iPTRAny: return "MVT::iPTRAny";
default: assert(0 && "ILLEGAL VALUE TYPE!"); return "";
}
@@ -107,7 +108,7 @@ std::string llvm::getQualifiedName(const Record *R) {
/// getTarget - Return the current instance of the Target class.
///
-CodeGenTarget::CodeGenTarget() {
+CodeGenTarget::CodeGenTarget(RecordKeeper &records) : Records(records) {
std::vector<Record*> Targets = Records.getAllDerivedDefinitions("Target");
if (Targets.size() == 0)
throw std::string("ERROR: No 'Target' subclasses defined!");
@@ -189,6 +190,19 @@ void CodeGenTarget::ReadRegisterClasses() const {
RegisterClasses.assign(RegClasses.begin(), RegClasses.end());
}
+/// getRegisterByName - If there is a register with the specific AsmName,
+/// return it.
+const CodeGenRegister *CodeGenTarget::getRegisterByName(StringRef Name) const {
+ const std::vector<CodeGenRegister> &Regs = getRegisters();
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ const CodeGenRegister &Reg = Regs[i];
+ if (Reg.TheDef->getValueAsString("AsmName") == Name)
+ return &Reg;
+ }
+
+ return 0;
+}
+
std::vector<MVT::SimpleValueType> CodeGenTarget::
getRegisterVTs(Record *R) const {
std::vector<MVT::SimpleValueType> Result;
@@ -294,18 +308,14 @@ void CodeGenTarget::ReadInstructions() const {
throw std::string("No 'Instruction' subclasses defined!");
// Parse the instructions defined in the .td file.
- std::string InstFormatName =
- getAsmWriter()->getValueAsString("InstFormatName");
-
- for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
- std::string AsmStr = Insts[i]->getValueAsString(InstFormatName);
- Instructions[Insts[i]] = new CodeGenInstruction(Insts[i], AsmStr);
- }
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i)
+ Instructions[Insts[i]] = new CodeGenInstruction(Insts[i]);
}
static const CodeGenInstruction *
GetInstByName(const char *Name,
- const DenseMap<const Record*, CodeGenInstruction*> &Insts) {
+ const DenseMap<const Record*, CodeGenInstruction*> &Insts,
+ RecordKeeper &Records) {
const Record *Rec = Records.getDef(Name);
DenseMap<const Record*, CodeGenInstruction*>::const_iterator
@@ -349,7 +359,7 @@ void CodeGenTarget::ComputeInstrsByEnum() const {
};
const DenseMap<const Record*, CodeGenInstruction*> &Insts = getInstructions();
for (const char *const *p = FixedInstrs; *p; ++p) {
- const CodeGenInstruction *Instr = GetInstByName(*p, Insts);
+ const CodeGenInstruction *Instr = GetInstByName(*p, Insts, Records);
assert(Instr && "Missing target independent instruction");
assert(Instr->Namespace == "TargetOpcode" && "Bad namespace");
InstrsByEnum.push_back(Instr);
@@ -394,8 +404,8 @@ ComplexPattern::ComplexPattern(Record *R) {
for (unsigned i = 0, e = PropList.size(); i != e; ++i)
if (PropList[i]->getName() == "SDNPHasChain") {
Properties |= 1 << SDNPHasChain;
- } else if (PropList[i]->getName() == "SDNPOptInFlag") {
- Properties |= 1 << SDNPOptInFlag;
+ } else if (PropList[i]->getName() == "SDNPOptInGlue") {
+ Properties |= 1 << SDNPOptInGlue;
} else if (PropList[i]->getName() == "SDNPMayStore") {
Properties |= 1 << SDNPMayStore;
} else if (PropList[i]->getName() == "SDNPMayLoad") {
@@ -406,6 +416,10 @@ ComplexPattern::ComplexPattern(Record *R) {
Properties |= 1 << SDNPMemOperand;
} else if (PropList[i]->getName() == "SDNPVariadic") {
Properties |= 1 << SDNPVariadic;
+ } else if (PropList[i]->getName() == "SDNPWantRoot") {
+ Properties |= 1 << SDNPWantRoot;
+ } else if (PropList[i]->getName() == "SDNPWantParent") {
+ Properties |= 1 << SDNPWantParent;
} else {
errs() << "Unsupported SD Node property '" << PropList[i]->getName()
<< "' on ComplexPattern '" << R->getName() << "'!\n";
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.h b/contrib/llvm/utils/TableGen/CodeGenTarget.h
index 6b06b66..f1058eb 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.h
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.h
@@ -35,14 +35,16 @@ enum SDNP {
SDNPCommutative,
SDNPAssociative,
SDNPHasChain,
- SDNPOutFlag,
- SDNPInFlag,
- SDNPOptInFlag,
+ SDNPOutGlue,
+ SDNPInGlue,
+ SDNPOptInGlue,
SDNPMayLoad,
SDNPMayStore,
SDNPSideEffect,
SDNPMemOperand,
- SDNPVariadic
+ SDNPVariadic,
+ SDNPWantRoot,
+ SDNPWantParent
};
/// getValueType - Return the MVT::SimpleValueType that the specified TableGen
@@ -59,6 +61,7 @@ std::string getQualifiedName(const Record *R);
/// CodeGenTarget - This class corresponds to the Target class in the .td files.
///
class CodeGenTarget {
+ RecordKeeper &Records;
Record *TargetRec;
mutable DenseMap<const Record*, CodeGenInstruction*> Instructions;
@@ -74,7 +77,7 @@ class CodeGenTarget {
mutable std::vector<const CodeGenInstruction*> InstrsByEnum;
public:
- CodeGenTarget();
+ CodeGenTarget(RecordKeeper &Records);
Record *getTargetRecord() const { return TargetRec; }
const std::string &getName() const;
@@ -99,6 +102,10 @@ public:
if (Registers.empty()) ReadRegisters();
return Registers;
}
+
+ /// getRegisterByName - If there is a register with the specific AsmName,
+ /// return it.
+ const CodeGenRegister *getRegisterByName(StringRef Name) const;
const std::vector<Record*> &getSubRegIndices() const {
if (SubRegIndices.empty()) ReadSubRegIndices();
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcher.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcher.cpp
index 9f12a68..2afa2b9 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcher.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcher.cpp
@@ -35,7 +35,7 @@ void Matcher::printOne(raw_ostream &OS) const {
Matcher *Matcher::unlinkNode(Matcher *Other) {
if (this == Other)
return takeNext();
-
+
// Scan until we find the predecessor of Other.
Matcher *Cur = this;
for (; Cur && Cur->getNext() != Other; Cur = Cur->getNext())
@@ -67,11 +67,11 @@ bool Matcher::canMoveBeforeNode(const Matcher *Other) const {
// We can move simple predicates before record nodes.
if (isSimplePredicateNode())
return Other->isSimplePredicateOrRecordNode();
-
+
// We can move record nodes across simple predicates.
if (isSimplePredicateOrRecordNode())
return isSimplePredicateNode();
-
+
// We can't move record nodes across each other etc.
return false;
}
@@ -107,8 +107,8 @@ void RecordMemRefMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
OS.indent(indent) << "RecordMemRef\n";
}
-void CaptureFlagInputMatcher::printImpl(raw_ostream &OS, unsigned indent) const{
- OS.indent(indent) << "CaptureFlagInput\n";
+void CaptureGlueInputMatcher::printImpl(raw_ostream &OS, unsigned indent) const{
+ OS.indent(indent) << "CaptureGlueInput\n";
}
void MoveChildMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
@@ -246,8 +246,8 @@ void EmitNodeMatcherCommon::printImpl(raw_ostream &OS, unsigned indent) const {
OS << ")\n";
}
-void MarkFlagResultsMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
- OS.indent(indent) << "MarkFlagResults <todo: args>\n";
+void MarkGlueResultsMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "MarkGlueResults <todo: args>\n";
}
void CompleteMatchMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
@@ -296,7 +296,7 @@ unsigned EmitMergeInputChainsMatcher::getHashImpl() const {
bool CheckOpcodeMatcher::isEqualImpl(const Matcher *M) const {
// Note: pointer equality isn't enough here, we have to check the enum names
- // to ensure that the nodes are for the same opcode.
+ // to ensure that the nodes are for the same opcode.
return cast<CheckOpcodeMatcher>(M)->Opcode.getEnumName() ==
Opcode.getEnumName();
}
@@ -306,7 +306,7 @@ bool EmitNodeMatcherCommon::isEqualImpl(const Matcher *m) const {
const EmitNodeMatcherCommon *M = cast<EmitNodeMatcherCommon>(m);
return M->OpcodeName == OpcodeName && M->VTs == VTs &&
M->Operands == Operands && M->HasChain == HasChain &&
- M->HasInFlag == HasInFlag && M->HasOutFlag == HasOutFlag &&
+ M->HasInGlue == HasInGlue && M->HasOutGlue == HasOutGlue &&
M->HasMemRefs == HasMemRefs &&
M->NumFixedArityOperands == NumFixedArityOperands;
}
@@ -316,12 +316,12 @@ unsigned EmitNodeMatcherCommon::getHashImpl() const {
}
-unsigned MarkFlagResultsMatcher::getHashImpl() const {
- return HashUnsigneds(FlagResultNodes.begin(), FlagResultNodes.end());
+unsigned MarkGlueResultsMatcher::getHashImpl() const {
+ return HashUnsigneds(GlueResultNodes.begin(), GlueResultNodes.end());
}
unsigned CompleteMatchMatcher::getHashImpl() const {
- return HashUnsigneds(Results.begin(), Results.end()) ^
+ return HashUnsigneds(Results.begin(), Results.end()) ^
((unsigned)(intptr_t)&Pattern << 8);
}
@@ -332,15 +332,15 @@ static bool TypesAreContradictory(MVT::SimpleValueType T1,
// If the two types are the same, then they are the same, so they don't
// contradict.
if (T1 == T2) return false;
-
+
// If either type is about iPtr, then they don't conflict unless the other
// one is not a scalar integer type.
if (T1 == MVT::iPTR)
return !MVT(T2).isInteger() || MVT(T2).isVector();
-
+
if (T2 == MVT::iPTR)
return !MVT(T1).isInteger() || MVT(T1).isVector();
-
+
// Otherwise, they are two different non-iPTR types, they conflict.
return true;
}
@@ -349,10 +349,10 @@ bool CheckOpcodeMatcher::isContradictoryImpl(const Matcher *M) const {
if (const CheckOpcodeMatcher *COM = dyn_cast<CheckOpcodeMatcher>(M)) {
// One node can't have two different opcodes!
// Note: pointer equality isn't enough here, we have to check the enum names
- // to ensure that the nodes are for the same opcode.
+ // to ensure that the nodes are for the same opcode.
return COM->getOpcode().getEnumName() != getOpcode().getEnumName();
}
-
+
// If the node has a known type, and if the type we're checking for is
// different, then we know they contradict. For example, a check for
// ISD::STORE will never be true at the same time a check for Type i32 is.
@@ -360,12 +360,12 @@ bool CheckOpcodeMatcher::isContradictoryImpl(const Matcher *M) const {
// If checking for a result the opcode doesn't have, it can't match.
if (CT->getResNo() >= getOpcode().getNumResults())
return true;
-
+
MVT::SimpleValueType NodeType = getOpcode().getKnownType(CT->getResNo());
if (NodeType != MVT::Other)
return TypesAreContradictory(NodeType, CT->getType());
}
-
+
return false;
}
@@ -381,12 +381,12 @@ bool CheckChildTypeMatcher::isContradictoryImpl(const Matcher *M) const {
// conflict!
if (CC->getChildNo() != getChildNo())
return false;
-
+
return TypesAreContradictory(getType(), CC->getType());
}
return false;
}
-
+
bool CheckIntegerMatcher::isContradictoryImpl(const Matcher *M) const {
if (const CheckIntegerMatcher *CIM = dyn_cast<CheckIntegerMatcher>(M))
return CIM->getValue() != getValue();
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcher.h b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
index d9b25d5..8e6e446 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcher.h
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
@@ -31,7 +31,7 @@ Matcher *OptimizeMatcher(Matcher *Matcher, const CodeGenDAGPatterns &CGP);
void EmitMatcherTable(const Matcher *Matcher, const CodeGenDAGPatterns &CGP,
raw_ostream &OS);
-
+
/// Matcher - Base class for all the the DAG ISel Matcher representation
/// nodes.
class Matcher {
@@ -45,10 +45,10 @@ public:
RecordNode, // Record the current node.
RecordChild, // Record a child of the current node.
RecordMemRef, // Record the memref in the current node.
- CaptureFlagInput, // If the current node has an input flag, save it.
+ CaptureGlueInput, // If the current node has an input glue, save it.
MoveChild, // Move current node to specified child.
MoveParent, // Move current node to parent.
-
+
// Predicate checking.
CheckSame, // Fail if not same as prev match.
CheckPatternPredicate,
@@ -65,7 +65,7 @@ public:
CheckAndImm,
CheckOrImm,
CheckFoldableChainNode,
-
+
// Node creation/emisssion.
EmitInteger, // Create a TargetConstant
EmitStringInteger, // Create a TargetConstant from a string.
@@ -75,7 +75,7 @@ public:
EmitCopyToReg, // Emit a copytoreg into a physreg.
EmitNode, // Create a DAG node
EmitNodeXForm, // Run a SDNodeXForm
- MarkFlagResults, // Indicate which interior nodes have flag results.
+ MarkGlueResults, // Indicate which interior nodes have glue results.
CompleteMatch, // Finish a match and update the results.
MorphNodeTo // Build a node, finish a match and update results.
};
@@ -85,7 +85,7 @@ protected:
Matcher(KindTy K) : Kind(K) {}
public:
virtual ~Matcher() {}
-
+
KindTy getKind() const { return Kind; }
Matcher *getNext() { return Next.get(); }
@@ -94,25 +94,25 @@ public:
Matcher *takeNext() { return Next.take(); }
OwningPtr<Matcher> &getNextPtr() { return Next; }
-
+
static inline bool classof(const Matcher *) { return true; }
-
+
bool isEqual(const Matcher *M) const {
if (getKind() != M->getKind()) return false;
return isEqualImpl(M);
}
-
+
unsigned getHash() const {
// Clear the high bit so we don't conflict with tombstones etc.
return ((getHashImpl() << 4) ^ getKind()) & (~0U>>1);
}
-
+
/// isSafeToReorderWithPatternPredicate - Return true if it is safe to sink a
/// PatternPredicate node past this one.
virtual bool isSafeToReorderWithPatternPredicate() const {
return false;
}
-
+
/// isSimplePredicateNode - Return true if this is a simple predicate that
/// operates on the node or its children without potential side effects or a
/// change of the current node.
@@ -134,28 +134,28 @@ public:
return true;
}
}
-
+
/// isSimplePredicateOrRecordNode - Return true if this is a record node or
/// a simple predicate.
bool isSimplePredicateOrRecordNode() const {
return isSimplePredicateNode() ||
getKind() == RecordNode || getKind() == RecordChild;
}
-
+
/// unlinkNode - Unlink the specified node from this chain. If Other == this,
/// we unlink the next pointer and return it. Otherwise we unlink Other from
/// the list and return this.
Matcher *unlinkNode(Matcher *Other);
-
+
/// canMoveBefore - Return true if this matcher is the same as Other, or if
/// we can move this matcher past all of the nodes in-between Other and this
/// node. Other must be equal to or before this.
bool canMoveBefore(const Matcher *Other) const;
-
+
/// canMoveBefore - Return true if it is safe to move the current matcher
/// across the specified one.
bool canMoveBeforeNode(const Matcher *Other) const;
-
+
/// isContradictory - Return true of these two matchers could never match on
/// the same node.
bool isContradictory(const Matcher *Other) const {
@@ -167,7 +167,7 @@ public:
return isContradictoryImpl(Other);
return Other->isContradictoryImpl(this);
}
-
+
void print(raw_ostream &OS, unsigned indent = 0) const;
void printOne(raw_ostream &OS) const;
void dump() const;
@@ -177,7 +177,7 @@ protected:
virtual unsigned getHashImpl() const = 0;
virtual bool isContradictoryImpl(const Matcher *M) const { return false; }
};
-
+
/// ScopeMatcher - This attempts to match each of its children to find the first
/// one that successfully matches. If one child fails, it tries the next child.
/// If none of the children match then this check fails. It never has a 'next'.
@@ -188,12 +188,12 @@ public:
: Matcher(Scope), Children(children, children+numchildren) {
}
virtual ~ScopeMatcher();
-
+
unsigned getNumChildren() const { return Children.size(); }
-
+
Matcher *getChild(unsigned i) { return Children[i]; }
const Matcher *getChild(unsigned i) const { return Children[i]; }
-
+
void resetChild(unsigned i, Matcher *N) {
delete Children[i];
Children[i] = N;
@@ -204,7 +204,7 @@ public:
Children[i] = 0;
return Res;
}
-
+
void setNumChildren(unsigned NC) {
if (NC < Children.size()) {
// delete any children we're about to lose pointers to.
@@ -217,7 +217,7 @@ public:
static inline bool classof(const Matcher *N) {
return N->getKind() == Scope;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const { return false; }
@@ -229,38 +229,38 @@ class RecordMatcher : public Matcher {
/// WhatFor - This is a string indicating why we're recording this. This
/// should only be used for comment generation not anything semantic.
std::string WhatFor;
-
+
/// ResultNo - The slot number in the RecordedNodes vector that this will be,
/// just printed as a comment.
unsigned ResultNo;
public:
RecordMatcher(const std::string &whatfor, unsigned resultNo)
: Matcher(RecordNode), WhatFor(whatfor), ResultNo(resultNo) {}
-
+
const std::string &getWhatFor() const { return WhatFor; }
unsigned getResultNo() const { return ResultNo; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == RecordNode;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const { return true; }
virtual unsigned getHashImpl() const { return 0; }
};
-
+
/// RecordChildMatcher - Save a numbered child of the current node, or fail
/// the match if it doesn't exist. This is logically equivalent to:
/// MoveChild N + RecordNode + MoveParent.
class RecordChildMatcher : public Matcher {
unsigned ChildNo;
-
+
/// WhatFor - This is a string indicating why we're recording this. This
/// should only be used for comment generation not anything semantic.
std::string WhatFor;
-
+
/// ResultNo - The slot number in the RecordedNodes vector that this will be,
/// just printed as a comment.
unsigned ResultNo;
@@ -269,7 +269,7 @@ public:
unsigned resultNo)
: Matcher(RecordChild), ChildNo(childno), WhatFor(whatfor),
ResultNo(resultNo) {}
-
+
unsigned getChildNo() const { return ChildNo; }
const std::string &getWhatFor() const { return WhatFor; }
unsigned getResultNo() const { return ResultNo; }
@@ -277,7 +277,7 @@ public:
static inline bool classof(const Matcher *N) {
return N->getKind() == RecordChild;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -287,16 +287,16 @@ private:
}
virtual unsigned getHashImpl() const { return getChildNo(); }
};
-
+
/// RecordMemRefMatcher - Save the current node's memref.
class RecordMemRefMatcher : public Matcher {
public:
RecordMemRefMatcher() : Matcher(RecordMemRef) {}
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == RecordMemRef;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -305,17 +305,17 @@ private:
virtual unsigned getHashImpl() const { return 0; }
};
-
-/// CaptureFlagInputMatcher - If the current record has a flag input, record
+
+/// CaptureGlueInputMatcher - If the current record has a glue input, record
/// it so that it is used as an input to the generated code.
-class CaptureFlagInputMatcher : public Matcher {
+class CaptureGlueInputMatcher : public Matcher {
public:
- CaptureFlagInputMatcher() : Matcher(CaptureFlagInput) {}
-
+ CaptureGlueInputMatcher() : Matcher(CaptureGlueInput) {}
+
static inline bool classof(const Matcher *N) {
- return N->getKind() == CaptureFlagInput;
+ return N->getKind() == CaptureGlueInput;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -323,20 +323,20 @@ private:
virtual bool isEqualImpl(const Matcher *M) const { return true; }
virtual unsigned getHashImpl() const { return 0; }
};
-
+
/// MoveChildMatcher - This tells the interpreter to move into the
/// specified child node.
class MoveChildMatcher : public Matcher {
unsigned ChildNo;
public:
MoveChildMatcher(unsigned childNo) : Matcher(MoveChild), ChildNo(childNo) {}
-
+
unsigned getChildNo() const { return ChildNo; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == MoveChild;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -346,17 +346,17 @@ private:
}
virtual unsigned getHashImpl() const { return getChildNo(); }
};
-
+
/// MoveParentMatcher - This tells the interpreter to move to the parent
/// of the current node.
class MoveParentMatcher : public Matcher {
public:
MoveParentMatcher() : Matcher(MoveParent) {}
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == MoveParent;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -373,13 +373,13 @@ class CheckSameMatcher : public Matcher {
public:
CheckSameMatcher(unsigned matchnumber)
: Matcher(CheckSame), MatchNumber(matchnumber) {}
-
+
unsigned getMatchNumber() const { return MatchNumber; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckSame;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -389,7 +389,7 @@ private:
}
virtual unsigned getHashImpl() const { return getMatchNumber(); }
};
-
+
/// CheckPatternPredicateMatcher - This checks the target-specific predicate
/// to see if the entire pattern is capable of matching. This predicate does
/// not take a node as input. This is used for subtarget feature checks etc.
@@ -398,13 +398,13 @@ class CheckPatternPredicateMatcher : public Matcher {
public:
CheckPatternPredicateMatcher(StringRef predicate)
: Matcher(CheckPatternPredicate), Predicate(predicate) {}
-
+
StringRef getPredicate() const { return Predicate; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckPatternPredicate;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -414,7 +414,7 @@ private:
}
virtual unsigned getHashImpl() const;
};
-
+
/// CheckPredicateMatcher - This checks the target-specific predicate to
/// see if the node is acceptable.
class CheckPredicateMatcher : public Matcher {
@@ -422,13 +422,13 @@ class CheckPredicateMatcher : public Matcher {
public:
CheckPredicateMatcher(StringRef predname)
: Matcher(CheckPredicate), PredName(predname) {}
-
+
StringRef getPredicateName() const { return PredName; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckPredicate;
}
-
+
// TODO: Ok?
//virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
@@ -439,8 +439,8 @@ private:
}
virtual unsigned getHashImpl() const;
};
-
-
+
+
/// CheckOpcodeMatcher - This checks to see if the current node has the
/// specified opcode, if not it fails to match.
class CheckOpcodeMatcher : public Matcher {
@@ -448,13 +448,13 @@ class CheckOpcodeMatcher : public Matcher {
public:
CheckOpcodeMatcher(const SDNodeInfo &opcode)
: Matcher(CheckOpcode), Opcode(opcode) {}
-
+
const SDNodeInfo &getOpcode() const { return Opcode; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckOpcode;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -478,19 +478,19 @@ public:
static inline bool classof(const Matcher *N) {
return N->getKind() == SwitchOpcode;
}
-
+
unsigned getNumCases() const { return Cases.size(); }
-
+
const SDNodeInfo &getCaseOpcode(unsigned i) const { return *Cases[i].first; }
Matcher *getCaseMatcher(unsigned i) { return Cases[i].second; }
const Matcher *getCaseMatcher(unsigned i) const { return Cases[i].second; }
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const { return false; }
virtual unsigned getHashImpl() const { return 4123; }
};
-
+
/// CheckTypeMatcher - This checks to see if the current node has the
/// specified type at the specified result, if not it fails to match.
class CheckTypeMatcher : public Matcher {
@@ -499,14 +499,14 @@ class CheckTypeMatcher : public Matcher {
public:
CheckTypeMatcher(MVT::SimpleValueType type, unsigned resno)
: Matcher(CheckType), Type(type), ResNo(resno) {}
-
+
MVT::SimpleValueType getType() const { return Type; }
unsigned getResNo() const { return ResNo; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckType;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -517,7 +517,7 @@ private:
virtual unsigned getHashImpl() const { return Type; }
virtual bool isContradictoryImpl(const Matcher *M) const;
};
-
+
/// SwitchTypeMatcher - Switch based on the current node's type, dispatching
/// to one matcher per case. If the type doesn't match any of the cases,
/// then the match fails. This is semantically equivalent to a Scope node where
@@ -528,24 +528,24 @@ public:
SwitchTypeMatcher(const std::pair<MVT::SimpleValueType, Matcher*> *cases,
unsigned numcases)
: Matcher(SwitchType), Cases(cases, cases+numcases) {}
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == SwitchType;
}
-
+
unsigned getNumCases() const { return Cases.size(); }
-
+
MVT::SimpleValueType getCaseType(unsigned i) const { return Cases[i].first; }
Matcher *getCaseMatcher(unsigned i) { return Cases[i].second; }
const Matcher *getCaseMatcher(unsigned i) const { return Cases[i].second; }
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const { return false; }
virtual unsigned getHashImpl() const { return 4123; }
};
-
-
+
+
/// CheckChildTypeMatcher - This checks to see if a child node has the
/// specified type, if not it fails to match.
class CheckChildTypeMatcher : public Matcher {
@@ -554,14 +554,14 @@ class CheckChildTypeMatcher : public Matcher {
public:
CheckChildTypeMatcher(unsigned childno, MVT::SimpleValueType type)
: Matcher(CheckChildType), ChildNo(childno), Type(type) {}
-
+
unsigned getChildNo() const { return ChildNo; }
MVT::SimpleValueType getType() const { return Type; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckChildType;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -573,7 +573,7 @@ private:
virtual unsigned getHashImpl() const { return (Type << 3) | ChildNo; }
virtual bool isContradictoryImpl(const Matcher *M) const;
};
-
+
/// CheckIntegerMatcher - This checks to see if the current node is a
/// ConstantSDNode with the specified integer value, if not it fails to match.
@@ -582,13 +582,13 @@ class CheckIntegerMatcher : public Matcher {
public:
CheckIntegerMatcher(int64_t value)
: Matcher(CheckInteger), Value(value) {}
-
+
int64_t getValue() const { return Value; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckInteger;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -599,7 +599,7 @@ private:
virtual unsigned getHashImpl() const { return Value; }
virtual bool isContradictoryImpl(const Matcher *M) const;
};
-
+
/// CheckCondCodeMatcher - This checks to see if the current node is a
/// CondCodeSDNode with the specified condition, if not it fails to match.
class CheckCondCodeMatcher : public Matcher {
@@ -607,13 +607,13 @@ class CheckCondCodeMatcher : public Matcher {
public:
CheckCondCodeMatcher(StringRef condcodename)
: Matcher(CheckCondCode), CondCodeName(condcodename) {}
-
+
StringRef getCondCodeName() const { return CondCodeName; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckCondCode;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -623,7 +623,7 @@ private:
}
virtual unsigned getHashImpl() const;
};
-
+
/// CheckValueTypeMatcher - This checks to see if the current node is a
/// VTSDNode with the specified type, if not it fails to match.
class CheckValueTypeMatcher : public Matcher {
@@ -631,13 +631,13 @@ class CheckValueTypeMatcher : public Matcher {
public:
CheckValueTypeMatcher(StringRef type_name)
: Matcher(CheckValueType), TypeName(type_name) {}
-
+
StringRef getTypeName() const { return TypeName; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckValueType;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -648,21 +648,21 @@ private:
virtual unsigned getHashImpl() const;
bool isContradictoryImpl(const Matcher *M) const;
};
-
-
-
+
+
+
/// CheckComplexPatMatcher - This node runs the specified ComplexPattern on
/// the current node.
class CheckComplexPatMatcher : public Matcher {
const ComplexPattern &Pattern;
-
- /// MatchNumber - This is the recorded nodes slot that contains the node we want to
- /// match against.
+
+ /// MatchNumber - This is the recorded nodes slot that contains the node we
+ /// want to match against.
unsigned MatchNumber;
-
+
/// Name - The name of the node we're matching, for comment emission.
std::string Name;
-
+
/// FirstResult - This is the first slot in the RecordedNodes list that the
/// result of the match populates.
unsigned FirstResult;
@@ -671,17 +671,17 @@ public:
const std::string &name, unsigned firstresult)
: Matcher(CheckComplexPat), Pattern(pattern), MatchNumber(matchnumber),
Name(name), FirstResult(firstresult) {}
-
+
const ComplexPattern &getPattern() const { return Pattern; }
unsigned getMatchNumber() const { return MatchNumber; }
-
+
const std::string getName() const { return Name; }
unsigned getFirstResult() const { return FirstResult; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckComplexPat;
}
-
+
// Not safe to move a pattern predicate past a complex pattern.
virtual bool isSafeToReorderWithPatternPredicate() const { return false; }
@@ -695,7 +695,7 @@ private:
return (unsigned)(intptr_t)&Pattern ^ MatchNumber;
}
};
-
+
/// CheckAndImmMatcher - This checks to see if the current node is an 'and'
/// with something equivalent to the specified immediate.
class CheckAndImmMatcher : public Matcher {
@@ -703,13 +703,13 @@ class CheckAndImmMatcher : public Matcher {
public:
CheckAndImmMatcher(int64_t value)
: Matcher(CheckAndImm), Value(value) {}
-
+
int64_t getValue() const { return Value; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckAndImm;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -727,13 +727,13 @@ class CheckOrImmMatcher : public Matcher {
public:
CheckOrImmMatcher(int64_t value)
: Matcher(CheckOrImm), Value(value) {}
-
+
int64_t getValue() const { return Value; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckOrImm;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -750,11 +750,11 @@ class CheckFoldableChainNodeMatcher : public Matcher {
public:
CheckFoldableChainNodeMatcher()
: Matcher(CheckFoldableChainNode) {}
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckFoldableChainNode;
}
-
+
virtual bool isSafeToReorderWithPatternPredicate() const { return true; }
private:
@@ -770,14 +770,14 @@ class EmitIntegerMatcher : public Matcher {
public:
EmitIntegerMatcher(int64_t val, MVT::SimpleValueType vt)
: Matcher(EmitInteger), Val(val), VT(vt) {}
-
+
int64_t getValue() const { return Val; }
MVT::SimpleValueType getVT() const { return VT; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitInteger;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const {
@@ -795,14 +795,14 @@ class EmitStringIntegerMatcher : public Matcher {
public:
EmitStringIntegerMatcher(const std::string &val, MVT::SimpleValueType vt)
: Matcher(EmitStringInteger), Val(val), VT(vt) {}
-
+
const std::string &getValue() const { return Val; }
MVT::SimpleValueType getVT() const { return VT; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitStringInteger;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const {
@@ -811,7 +811,7 @@ private:
}
virtual unsigned getHashImpl() const;
};
-
+
/// EmitRegisterMatcher - This creates a new TargetConstant.
class EmitRegisterMatcher : public Matcher {
/// Reg - The def for the register that we're emitting. If this is null, then
@@ -821,14 +821,14 @@ class EmitRegisterMatcher : public Matcher {
public:
EmitRegisterMatcher(Record *reg, MVT::SimpleValueType vt)
: Matcher(EmitRegister), Reg(reg), VT(vt) {}
-
+
Record *getReg() const { return Reg; }
MVT::SimpleValueType getVT() const { return VT; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitRegister;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const {
@@ -848,13 +848,13 @@ class EmitConvertToTargetMatcher : public Matcher {
public:
EmitConvertToTargetMatcher(unsigned slot)
: Matcher(EmitConvertToTarget), Slot(slot) {}
-
+
unsigned getSlot() const { return Slot; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitConvertToTarget;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const {
@@ -862,7 +862,7 @@ private:
}
virtual unsigned getHashImpl() const { return Slot; }
};
-
+
/// EmitMergeInputChainsMatcher - Emit a node that merges a list of input
/// chains together with a token factor. The list of nodes are the nodes in the
/// matched pattern that have chain input/outputs. This node adds all input
@@ -872,18 +872,18 @@ class EmitMergeInputChainsMatcher : public Matcher {
public:
EmitMergeInputChainsMatcher(const unsigned *nodes, unsigned NumNodes)
: Matcher(EmitMergeInputChains), ChainNodes(nodes, nodes+NumNodes) {}
-
+
unsigned getNumNodes() const { return ChainNodes.size(); }
-
+
unsigned getNode(unsigned i) const {
assert(i < ChainNodes.size());
return ChainNodes[i];
- }
-
+ }
+
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitMergeInputChains;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const {
@@ -891,9 +891,9 @@ private:
}
virtual unsigned getHashImpl() const;
};
-
+
/// EmitCopyToRegMatcher - Emit a CopyToReg node from a value to a physreg,
-/// pushing the chain and flag results.
+/// pushing the chain and glue results.
///
class EmitCopyToRegMatcher : public Matcher {
unsigned SrcSlot; // Value to copy into the physreg.
@@ -901,27 +901,27 @@ class EmitCopyToRegMatcher : public Matcher {
public:
EmitCopyToRegMatcher(unsigned srcSlot, Record *destPhysReg)
: Matcher(EmitCopyToReg), SrcSlot(srcSlot), DestPhysReg(destPhysReg) {}
-
+
unsigned getSrcSlot() const { return SrcSlot; }
Record *getDestPhysReg() const { return DestPhysReg; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitCopyToReg;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const {
return cast<EmitCopyToRegMatcher>(M)->SrcSlot == SrcSlot &&
- cast<EmitCopyToRegMatcher>(M)->DestPhysReg == DestPhysReg;
+ cast<EmitCopyToRegMatcher>(M)->DestPhysReg == DestPhysReg;
}
virtual unsigned getHashImpl() const {
return SrcSlot ^ ((unsigned)(intptr_t)DestPhysReg << 4);
}
};
-
-
-
+
+
+
/// EmitNodeXFormMatcher - Emit an operation that runs an SDNodeXForm on a
/// recorded node and records the result.
class EmitNodeXFormMatcher : public Matcher {
@@ -930,33 +930,33 @@ class EmitNodeXFormMatcher : public Matcher {
public:
EmitNodeXFormMatcher(unsigned slot, Record *nodeXForm)
: Matcher(EmitNodeXForm), Slot(slot), NodeXForm(nodeXForm) {}
-
+
unsigned getSlot() const { return Slot; }
Record *getNodeXForm() const { return NodeXForm; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitNodeXForm;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const {
return cast<EmitNodeXFormMatcher>(M)->Slot == Slot &&
- cast<EmitNodeXFormMatcher>(M)->NodeXForm == NodeXForm;
+ cast<EmitNodeXFormMatcher>(M)->NodeXForm == NodeXForm;
}
virtual unsigned getHashImpl() const {
return Slot ^ ((unsigned)(intptr_t)NodeXForm << 4);
}
};
-
+
/// EmitNodeMatcherCommon - Common class shared between EmitNode and
/// MorphNodeTo.
class EmitNodeMatcherCommon : public Matcher {
std::string OpcodeName;
const SmallVector<MVT::SimpleValueType, 3> VTs;
const SmallVector<unsigned, 6> Operands;
- bool HasChain, HasInFlag, HasOutFlag, HasMemRefs;
-
+ bool HasChain, HasInGlue, HasOutGlue, HasMemRefs;
+
/// NumFixedArityOperands - If this is a fixed arity node, this is set to -1.
/// If this is a varidic node, this is set to the number of fixed arity
/// operands in the root of the pattern. The rest are appended to this node.
@@ -965,16 +965,16 @@ public:
EmitNodeMatcherCommon(const std::string &opcodeName,
const MVT::SimpleValueType *vts, unsigned numvts,
const unsigned *operands, unsigned numops,
- bool hasChain, bool hasInFlag, bool hasOutFlag,
+ bool hasChain, bool hasInGlue, bool hasOutGlue,
bool hasmemrefs,
int numfixedarityoperands, bool isMorphNodeTo)
: Matcher(isMorphNodeTo ? MorphNodeTo : EmitNode), OpcodeName(opcodeName),
VTs(vts, vts+numvts), Operands(operands, operands+numops),
- HasChain(hasChain), HasInFlag(hasInFlag), HasOutFlag(hasOutFlag),
+ HasChain(hasChain), HasInGlue(hasInGlue), HasOutGlue(hasOutGlue),
HasMemRefs(hasmemrefs), NumFixedArityOperands(numfixedarityoperands) {}
-
+
const std::string &getOpcodeName() const { return OpcodeName; }
-
+
unsigned getNumVTs() const { return VTs.size(); }
MVT::SimpleValueType getVT(unsigned i) const {
assert(i < VTs.size());
@@ -986,27 +986,27 @@ public:
assert(i < Operands.size());
return Operands[i];
}
-
+
const SmallVectorImpl<MVT::SimpleValueType> &getVTList() const { return VTs; }
const SmallVectorImpl<unsigned> &getOperandList() const { return Operands; }
-
+
bool hasChain() const { return HasChain; }
- bool hasInFlag() const { return HasInFlag; }
- bool hasOutFlag() const { return HasOutFlag; }
+ bool hasInFlag() const { return HasInGlue; }
+ bool hasOutFlag() const { return HasOutGlue; }
bool hasMemRefs() const { return HasMemRefs; }
int getNumFixedArityOperands() const { return NumFixedArityOperands; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitNode || N->getKind() == MorphNodeTo;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const;
virtual unsigned getHashImpl() const;
};
-
+
/// EmitNodeMatcher - This signals a successful match and generates a node.
class EmitNodeMatcher : public EmitNodeMatcherCommon {
unsigned FirstResultSlot;
@@ -1021,15 +1021,15 @@ public:
hasInFlag, hasOutFlag, hasmemrefs,
numfixedarityoperands, false),
FirstResultSlot(firstresultslot) {}
-
+
unsigned getFirstResultSlot() const { return FirstResultSlot; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == EmitNode;
}
-
+
};
-
+
class MorphNodeToMatcher : public EmitNodeMatcherCommon {
const PatternToMatch &Pattern;
public:
@@ -1044,38 +1044,38 @@ public:
numfixedarityoperands, true),
Pattern(pattern) {
}
-
+
const PatternToMatch &getPattern() const { return Pattern; }
static inline bool classof(const Matcher *N) {
return N->getKind() == MorphNodeTo;
}
};
-
-/// MarkFlagResultsMatcher - This node indicates which non-root nodes in the
-/// pattern produce flags. This allows CompleteMatchMatcher to update them
-/// with the output flag of the resultant code.
-class MarkFlagResultsMatcher : public Matcher {
- SmallVector<unsigned, 3> FlagResultNodes;
+
+/// MarkGlueResultsMatcher - This node indicates which non-root nodes in the
+/// pattern produce glue. This allows CompleteMatchMatcher to update them
+/// with the output glue of the resultant code.
+class MarkGlueResultsMatcher : public Matcher {
+ SmallVector<unsigned, 3> GlueResultNodes;
public:
- MarkFlagResultsMatcher(const unsigned *nodes, unsigned NumNodes)
- : Matcher(MarkFlagResults), FlagResultNodes(nodes, nodes+NumNodes) {}
-
- unsigned getNumNodes() const { return FlagResultNodes.size(); }
-
+ MarkGlueResultsMatcher(const unsigned *nodes, unsigned NumNodes)
+ : Matcher(MarkGlueResults), GlueResultNodes(nodes, nodes+NumNodes) {}
+
+ unsigned getNumNodes() const { return GlueResultNodes.size(); }
+
unsigned getNode(unsigned i) const {
- assert(i < FlagResultNodes.size());
- return FlagResultNodes[i];
- }
-
+ assert(i < GlueResultNodes.size());
+ return GlueResultNodes[i];
+ }
+
static inline bool classof(const Matcher *N) {
- return N->getKind() == MarkFlagResults;
+ return N->getKind() == MarkGlueResults;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const {
- return cast<MarkFlagResultsMatcher>(M)->FlagResultNodes == FlagResultNodes;
+ return cast<MarkGlueResultsMatcher>(M)->GlueResultNodes == GlueResultNodes;
}
virtual unsigned getHashImpl() const;
};
@@ -1095,11 +1095,11 @@ public:
unsigned getNumResults() const { return Results.size(); }
unsigned getResult(unsigned R) const { return Results[R]; }
const PatternToMatch &getPattern() const { return Pattern; }
-
+
static inline bool classof(const Matcher *N) {
return N->getKind() == CompleteMatch;
}
-
+
private:
virtual void printImpl(raw_ostream &OS, unsigned indent) const;
virtual bool isEqualImpl(const Matcher *M) const {
@@ -1108,7 +1108,7 @@ private:
}
virtual unsigned getHashImpl() const;
};
-
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index dfbfe80..0b7fbf7 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -220,8 +220,8 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
OS << "OPC_RecordMemRef,\n";
return 1;
- case Matcher::CaptureFlagInput:
- OS << "OPC_CaptureFlagInput,\n";
+ case Matcher::CaptureGlueInput:
+ OS << "OPC_CaptureGlueInput,\n";
return 1;
case Matcher::MoveChild:
@@ -485,8 +485,8 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
OS << ", TARGET_OPCODE(" << EN->getOpcodeName() << "), 0";
if (EN->hasChain()) OS << "|OPFL_Chain";
- if (EN->hasInFlag()) OS << "|OPFL_FlagInput";
- if (EN->hasOutFlag()) OS << "|OPFL_FlagOutput";
+ if (EN->hasInFlag()) OS << "|OPFL_GlueInput";
+ if (EN->hasOutFlag()) OS << "|OPFL_GlueOutput";
if (EN->hasMemRefs()) OS << "|OPFL_MemRefs";
if (EN->getNumFixedArityOperands() != -1)
OS << "|OPFL_Variadic" << EN->getNumFixedArityOperands();
@@ -531,9 +531,9 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
return 6+EN->getNumVTs()+NumOperandBytes;
}
- case Matcher::MarkFlagResults: {
- const MarkFlagResultsMatcher *CFR = cast<MarkFlagResultsMatcher>(N);
- OS << "OPC_MarkFlagResults, " << CFR->getNumNodes() << ", ";
+ case Matcher::MarkGlueResults: {
+ const MarkGlueResultsMatcher *CFR = cast<MarkGlueResultsMatcher>(N);
+ OS << "OPC_MarkGlueResults, " << CFR->getNumNodes() << ", ";
unsigned NumOperandBytes = 0;
for (unsigned i = 0, e = CFR->getNumNodes(); i != e; ++i)
NumOperandBytes += EmitVBRValue(CFR->getNode(i), OS);
@@ -633,8 +633,9 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
// Emit CompletePattern matchers.
// FIXME: This should be const.
if (!ComplexPatterns.empty()) {
- OS << "bool CheckComplexPattern(SDNode *Root, SDValue N,\n";
- OS << " unsigned PatternNo, SmallVectorImpl<SDValue> &Result) {\n";
+ OS << "bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,\n";
+ OS << " unsigned PatternNo,\n";
+ OS << " SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {\n";
OS << " unsigned NextRes = Result.size();\n";
OS << " switch (PatternNo) {\n";
OS << " default: assert(0 && \"Invalid pattern # in table?\");\n";
@@ -649,9 +650,20 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
OS << " Result.resize(NextRes+" << NumOps << ");\n";
OS << " return " << P.getSelectFunc();
- OS << "(Root, N";
+ OS << "(";
+ // If the complex pattern wants the root of the match, pass it in as the
+ // first argument.
+ if (P.hasProperty(SDNPWantRoot))
+ OS << "Root, ";
+
+ // If the complex pattern wants the parent of the operand being matched,
+ // pass it in as the next argument.
+ if (P.hasProperty(SDNPWantParent))
+ OS << "Parent, ";
+
+ OS << "N";
for (unsigned i = 0; i != NumOps; ++i)
- OS << ", Result[NextRes+" << i << ']';
+ OS << ", Result[NextRes+" << i << "].first";
OS << ");\n";
}
OS << " }\n";
@@ -730,7 +742,7 @@ void MatcherTableEmitter::EmitHistogram(const Matcher *M,
case Matcher::RecordNode: OS << "OPC_RecordNode"; break;
case Matcher::RecordChild: OS << "OPC_RecordChild"; break;
case Matcher::RecordMemRef: OS << "OPC_RecordMemRef"; break;
- case Matcher::CaptureFlagInput: OS << "OPC_CaptureFlagInput"; break;
+ case Matcher::CaptureGlueInput: OS << "OPC_CaptureGlueInput"; break;
case Matcher::MoveChild: OS << "OPC_MoveChild"; break;
case Matcher::MoveParent: OS << "OPC_MoveParent"; break;
case Matcher::CheckSame: OS << "OPC_CheckSame"; break;
@@ -759,7 +771,7 @@ void MatcherTableEmitter::EmitHistogram(const Matcher *M,
case Matcher::EmitNode: OS << "OPC_EmitNode"; break;
case Matcher::MorphNodeTo: OS << "OPC_MorphNodeTo"; break;
case Matcher::EmitNodeXForm: OS << "OPC_EmitNodeXForm"; break;
- case Matcher::MarkFlagResults: OS << "OPC_MarkFlagResults"; break;
+ case Matcher::MarkGlueResults: OS << "OPC_MarkGlueResults"; break;
case Matcher::CompleteMatch: OS << "OPC_CompleteMatch"; break;
}
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index aba6636..7c0bade 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -25,12 +25,12 @@ static MVT::SimpleValueType getRegisterValueType(Record *R,
MVT::SimpleValueType VT = MVT::Other;
const std::vector<CodeGenRegisterClass> &RCs = T.getRegisterClasses();
std::vector<Record*>::const_iterator Element;
-
+
for (unsigned rc = 0, e = RCs.size(); rc != e; ++rc) {
const CodeGenRegisterClass &RC = RCs[rc];
if (!std::count(RC.Elements.begin(), RC.Elements.end(), R))
continue;
-
+
if (!FoundRC) {
FoundRC = true;
VT = RC.getValueTypeNum(0);
@@ -48,30 +48,30 @@ namespace {
class MatcherGen {
const PatternToMatch &Pattern;
const CodeGenDAGPatterns &CGP;
-
+
/// PatWithNoTypes - This is a clone of Pattern.getSrcPattern() that starts
/// out with all of the types removed. This allows us to insert type checks
/// as we scan the tree.
TreePatternNode *PatWithNoTypes;
-
+
/// VariableMap - A map from variable names ('$dst') to the recorded operand
/// number that they were captured as. These are biased by 1 to make
/// insertion easier.
StringMap<unsigned> VariableMap;
-
+
/// NextRecordedOperandNo - As we emit opcodes to record matched values in
/// the RecordedNodes array, this keeps track of which slot will be next to
/// record into.
unsigned NextRecordedOperandNo;
-
+
/// MatchedChainNodes - This maintains the position in the recorded nodes
/// array of all of the recorded input nodes that have chains.
SmallVector<unsigned, 2> MatchedChainNodes;
- /// MatchedFlagResultNodes - This maintains the position in the recorded
- /// nodes array of all of the recorded input nodes that have flag results.
- SmallVector<unsigned, 2> MatchedFlagResultNodes;
-
+ /// MatchedGlueResultNodes - This maintains the position in the recorded
+ /// nodes array of all of the recorded input nodes that have glue results.
+ SmallVector<unsigned, 2> MatchedGlueResultNodes;
+
/// MatchedComplexPatterns - This maintains a list of all of the
/// ComplexPatterns that we need to check. The patterns are known to have
/// names which were recorded. The second element of each pair is the first
@@ -79,40 +79,39 @@ namespace {
/// results into.
SmallVector<std::pair<const TreePatternNode*,
unsigned>, 2> MatchedComplexPatterns;
-
+
/// PhysRegInputs - List list has an entry for each explicitly specified
/// physreg input to the pattern. The first elt is the Register node, the
/// second is the recorded slot number the input pattern match saved it in.
SmallVector<std::pair<Record*, unsigned>, 2> PhysRegInputs;
-
+
/// Matcher - This is the top level of the generated matcher, the result.
Matcher *TheMatcher;
-
+
/// CurPredicate - As we emit matcher nodes, this points to the latest check
/// which should have future checks stuck into its Next position.
Matcher *CurPredicate;
public:
MatcherGen(const PatternToMatch &pattern, const CodeGenDAGPatterns &cgp);
-
+
~MatcherGen() {
delete PatWithNoTypes;
}
-
+
bool EmitMatcherCode(unsigned Variant);
void EmitResultCode();
-
+
Matcher *GetMatcher() const { return TheMatcher; }
- Matcher *GetCurPredicate() const { return CurPredicate; }
private:
void AddMatcher(Matcher *NewNode);
void InferPossibleTypes();
-
+
// Matcher Generation.
void EmitMatchCode(const TreePatternNode *N, TreePatternNode *NodeNoTypes);
void EmitLeafMatchCode(const TreePatternNode *N);
void EmitOperatorMatchCode(const TreePatternNode *N,
TreePatternNode *NodeNoTypes);
-
+
// Result Code Generation.
unsigned getNamedArgumentSlot(StringRef Name) {
unsigned VarMapEntry = VariableMap[Name];
@@ -124,7 +123,7 @@ namespace {
/// GetInstPatternNode - Get the pattern for an instruction.
const TreePatternNode *GetInstPatternNode(const DAGInstruction &Ins,
const TreePatternNode *N);
-
+
void EmitResultOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps);
void EmitResultOfNamedOperand(const TreePatternNode *N,
@@ -136,7 +135,7 @@ namespace {
void EmitResultSDNodeXFormAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps);
};
-
+
} // end anon namespace.
MatcherGen::MatcherGen(const PatternToMatch &pattern,
@@ -157,7 +156,7 @@ MatcherGen::MatcherGen(const PatternToMatch &pattern,
//
PatWithNoTypes = Pattern.getSrcPattern()->clone();
PatWithNoTypes->RemoveAllTypes();
-
+
// If there are types that are manifestly known, infer them.
InferPossibleTypes();
}
@@ -170,7 +169,7 @@ void MatcherGen::InferPossibleTypes() {
// TP - Get *SOME* tree pattern, we don't care which. It is only used for
// diagnostics, which we know are impossible at this point.
TreePattern &TP = *CGP.pf_begin()->second;
-
+
try {
bool MadeChange = true;
while (MadeChange)
@@ -183,7 +182,7 @@ void MatcherGen::InferPossibleTypes() {
}
-/// AddMatcher - Add a matcher node to the current graph we're building.
+/// AddMatcher - Add a matcher node to the current graph we're building.
void MatcherGen::AddMatcher(Matcher *NewNode) {
if (CurPredicate != 0)
CurPredicate->setNext(NewNode);
@@ -200,7 +199,7 @@ void MatcherGen::AddMatcher(Matcher *NewNode) {
/// EmitLeafMatchCode - Generate matching code for leaf nodes.
void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
assert(N->isLeaf() && "Not a leaf?");
-
+
// Direct match against an integer constant.
if (IntInit *II = dynamic_cast<IntInit*>(N->getLeafValue())) {
// If this is the root of the dag we're matching, we emit a redundant opcode
@@ -213,16 +212,16 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
return AddMatcher(new CheckIntegerMatcher(II->getValue()));
}
-
+
DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue());
if (DI == 0) {
errs() << "Unknown leaf kind: " << *DI << "\n";
abort();
}
-
+
Record *LeafRec = DI->getDef();
if (// Handle register references. Nothing to do here, they always match.
- LeafRec->isSubClassOf("RegisterClass") ||
+ LeafRec->isSubClassOf("RegisterClass") ||
LeafRec->isSubClassOf("PointerLikeRegClass") ||
LeafRec->isSubClassOf("SubRegIndex") ||
// Place holder for SRCVALUE nodes. Nothing to do here.
@@ -230,20 +229,20 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
return;
// If we have a physreg reference like (mul gpr:$src, EAX) then we need to
- // record the register
+ // record the register
if (LeafRec->isSubClassOf("Register")) {
AddMatcher(new RecordMatcher("physreg input "+LeafRec->getName(),
NextRecordedOperandNo));
PhysRegInputs.push_back(std::make_pair(LeafRec, NextRecordedOperandNo++));
return;
}
-
+
if (LeafRec->isSubClassOf("ValueType"))
return AddMatcher(new CheckValueTypeMatcher(LeafRec->getName()));
-
+
if (LeafRec->isSubClassOf("CondCode"))
return AddMatcher(new CheckCondCodeMatcher(LeafRec->getName()));
-
+
if (LeafRec->isSubClassOf("ComplexPattern")) {
// We can't model ComplexPattern uses that don't have their name taken yet.
// The OPC_CheckComplexPattern operation implicitly records the results.
@@ -257,7 +256,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
MatchedComplexPatterns.push_back(std::make_pair(N, 0));
return;
}
-
+
errs() << "Unknown leaf kind: " << *N << "\n";
abort();
}
@@ -266,7 +265,7 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N,
TreePatternNode *NodeNoTypes) {
assert(!N->isLeaf() && "Not an operator?");
const SDNodeInfo &CInfo = CGP.getSDNodeInfo(N->getOperator());
-
+
// If this is an 'and R, 1234' where the operation is AND/OR and the RHS is
// a constant without a predicate fn that has more that one bit set, handle
// this as a special case. This is usually for targets that have special
@@ -277,7 +276,7 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N,
// them from the mask in the dag. For example, it might turn 'AND X, 255'
// into 'AND X, 254' if it knows the low bit is set. Emit code that checks
// to handle this.
- if ((N->getOperator()->getName() == "and" ||
+ if ((N->getOperator()->getName() == "and" ||
N->getOperator()->getName() == "or") &&
N->getChild(1)->isLeaf() && N->getChild(1)->getPredicateFns().empty() &&
N->getPredicateFns().empty()) {
@@ -303,15 +302,15 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N,
}
}
}
-
+
// Check that the current opcode lines up.
AddMatcher(new CheckOpcodeMatcher(CInfo));
-
+
// If this node has memory references (i.e. is a load or store), tell the
// interpreter to capture them in the memref array.
if (N->NodeHasProperty(SDNPMemOperand, CGP))
AddMatcher(new RecordMemRefMatcher());
-
+
// If this node has a chain, then the chain is operand #0 is the SDNode, and
// the child numbers of the node are all offset by one.
unsigned OpNo = 0;
@@ -322,7 +321,7 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N,
NextRecordedOperandNo));
// Remember all of the input chains our pattern will match.
MatchedChainNodes.push_back(NextRecordedOperandNo++);
-
+
// Don't look at the input chain when matching the tree pattern to the
// SDNode.
OpNo = 1;
@@ -353,11 +352,11 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N,
// If there is a node between the root and this node, then we definitely
// need to emit the check.
bool NeedCheck = !Root->hasChild(N);
-
+
// If it *is* an immediate child of the root, we can still need a check if
// the root SDNode has multiple inputs. For us, this means that it is an
// intrinsic, has multiple operands, or has other inputs like chain or
- // flag).
+ // glue).
if (!NeedCheck) {
const SDNodeInfo &PInfo = CGP.getSDNodeInfo(Root->getOperator());
NeedCheck =
@@ -366,34 +365,34 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N,
Root->getOperator() == CGP.get_intrinsic_wo_chain_sdnode() ||
PInfo.getNumOperands() > 1 ||
PInfo.hasProperty(SDNPHasChain) ||
- PInfo.hasProperty(SDNPInFlag) ||
- PInfo.hasProperty(SDNPOptInFlag);
+ PInfo.hasProperty(SDNPInGlue) ||
+ PInfo.hasProperty(SDNPOptInGlue);
}
-
+
if (NeedCheck)
AddMatcher(new CheckFoldableChainNodeMatcher());
}
}
- // If this node has an output flag and isn't the root, remember it.
- if (N->NodeHasProperty(SDNPOutFlag, CGP) &&
+ // If this node has an output glue and isn't the root, remember it.
+ if (N->NodeHasProperty(SDNPOutGlue, CGP) &&
N != Pattern.getSrcPattern()) {
- // TODO: This redundantly records nodes with both flags and chains.
-
+ // TODO: This redundantly records nodes with both glues and chains.
+
// Record the node and remember it in our chained nodes list.
AddMatcher(new RecordMatcher("'" + N->getOperator()->getName() +
- "' flag output node",
+ "' glue output node",
NextRecordedOperandNo));
- // Remember all of the nodes with output flags our pattern will match.
- MatchedFlagResultNodes.push_back(NextRecordedOperandNo++);
+ // Remember all of the nodes with output glue our pattern will match.
+ MatchedGlueResultNodes.push_back(NextRecordedOperandNo++);
}
-
- // If this node is known to have an input flag or if it *might* have an input
- // flag, capture it as the flag input of the pattern.
- if (N->NodeHasProperty(SDNPOptInFlag, CGP) ||
- N->NodeHasProperty(SDNPInFlag, CGP))
- AddMatcher(new CaptureFlagInputMatcher());
-
+
+ // If this node is known to have an input glue or if it *might* have an input
+ // glue, capture it as the glue input of the pattern.
+ if (N->NodeHasProperty(SDNPOptInGlue, CGP) ||
+ N->NodeHasProperty(SDNPInGlue, CGP))
+ AddMatcher(new CaptureGlueInputMatcher());
+
for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i, ++OpNo) {
// Get the code suitable for matching this child. Move to the child, check
// it then move back to the parent.
@@ -410,14 +409,14 @@ void MatcherGen::EmitMatchCode(const TreePatternNode *N,
// need to do a type check. Emit the check, apply the tyep to NodeNoTypes and
// reinfer any correlated types.
SmallVector<unsigned, 2> ResultsToTypeCheck;
-
+
for (unsigned i = 0, e = NodeNoTypes->getNumTypes(); i != e; ++i) {
if (NodeNoTypes->getExtType(i) == N->getExtType(i)) continue;
NodeNoTypes->setType(i, N->getExtType(i));
InferPossibleTypes();
ResultsToTypeCheck.push_back(i);
}
-
+
// If this node has a name associated with it, capture it in VariableMap. If
// we already saw this in the pattern, emit code to verify dagness.
if (!N->getName().empty()) {
@@ -435,16 +434,16 @@ void MatcherGen::EmitMatchCode(const TreePatternNode *N,
return;
}
}
-
+
if (N->isLeaf())
EmitLeafMatchCode(N);
else
EmitOperatorMatchCode(N, NodeNoTypes);
-
+
// If there are node predicates for this node, generate their checks.
for (unsigned i = 0, e = N->getPredicateFns().size(); i != e; ++i)
AddMatcher(new CheckPredicateMatcher(N->getPredicateFns()[i]));
-
+
for (unsigned i = 0, e = ResultsToTypeCheck.size(); i != e; ++i)
AddMatcher(new CheckTypeMatcher(N->getType(ResultsToTypeCheck[i]),
ResultsToTypeCheck[i]));
@@ -463,27 +462,27 @@ bool MatcherGen::EmitMatcherCode(unsigned Variant) {
const std::vector<Record*> &OpNodes = CP->getRootNodes();
assert(!OpNodes.empty() &&"Complex Pattern must specify what it can match");
if (Variant >= OpNodes.size()) return true;
-
+
AddMatcher(new CheckOpcodeMatcher(CGP.getSDNodeInfo(OpNodes[Variant])));
} else {
if (Variant != 0) return true;
}
-
+
// Emit the matcher for the pattern structure and types.
EmitMatchCode(Pattern.getSrcPattern(), PatWithNoTypes);
-
+
// If the pattern has a predicate on it (e.g. only enabled when a subtarget
// feature is around, do the check).
if (!Pattern.getPredicateCheck().empty())
AddMatcher(new CheckPatternPredicateMatcher(Pattern.getPredicateCheck()));
-
+
// Now that we've completed the structural type match, emit any ComplexPattern
// checks (e.g. addrmode matches). We emit this after the structural match
// because they are generally more expensive to evaluate and more difficult to
// factor.
for (unsigned i = 0, e = MatchedComplexPatterns.size(); i != e; ++i) {
const TreePatternNode *N = MatchedComplexPatterns[i].first;
-
+
// Remember where the results of this match get stuck.
MatchedComplexPatterns[i].second = NextRecordedOperandNo;
@@ -492,15 +491,15 @@ bool MatcherGen::EmitMatcherCode(unsigned Variant) {
assert(!N->getName().empty() && RecNodeEntry &&
"Complex pattern should have a name and slot");
--RecNodeEntry; // Entries in VariableMap are biased.
-
+
const ComplexPattern &CP =
CGP.getComplexPattern(((DefInit*)N->getLeafValue())->getDef());
-
+
// Emit a CheckComplexPat operation, which does the match (aborting if it
// fails) and pushes the matched operands onto the recorded nodes list.
AddMatcher(new CheckComplexPatMatcher(CP, RecNodeEntry,
N->getName(), NextRecordedOperandNo));
-
+
// Record the right number of operands.
NextRecordedOperandNo += CP.getNumOperands();
if (CP.hasProperty(SDNPHasChain)) {
@@ -508,17 +507,17 @@ bool MatcherGen::EmitMatcherCode(unsigned Variant) {
// fact that we just recorded a chain input. The chain input will be
// matched as the last operand of the predicate if it was successful.
++NextRecordedOperandNo; // Chained node operand.
-
+
// It is the last operand recorded.
assert(NextRecordedOperandNo > 1 &&
"Should have recorded input/result chains at least!");
MatchedChainNodes.push_back(NextRecordedOperandNo-1);
}
-
- // TODO: Complex patterns can't have output flags, if they did, we'd want
+
+ // TODO: Complex patterns can't have output glues, if they did, we'd want
// to record them.
}
-
+
return false;
}
@@ -530,7 +529,7 @@ bool MatcherGen::EmitMatcherCode(unsigned Variant) {
void MatcherGen::EmitResultOfNamedOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps){
assert(!N->getName().empty() && "Operand not named!");
-
+
// A reference to a complex pattern gets all of the results of the complex
// pattern's match.
if (const ComplexPattern *CP = N->getComplexPatternInfo(CGP)) {
@@ -541,7 +540,7 @@ void MatcherGen::EmitResultOfNamedOperand(const TreePatternNode *N,
break;
}
assert(SlotNo != 0 && "Didn't get a slot number assigned?");
-
+
// The first slot entry is the node itself, the subsequent entries are the
// matched values.
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
@@ -562,20 +561,20 @@ void MatcherGen::EmitResultOfNamedOperand(const TreePatternNode *N,
return;
}
}
-
+
ResultOps.push_back(SlotNo);
}
void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &ResultOps) {
assert(N->isLeaf() && "Must be a leaf");
-
+
if (IntInit *II = dynamic_cast<IntInit*>(N->getLeafValue())) {
AddMatcher(new EmitIntegerMatcher(II->getValue(), N->getType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
-
+
// If this is an explicit register reference, handle it.
if (DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue())) {
if (DI->getDef()->isSubClassOf("Register")) {
@@ -583,13 +582,13 @@ void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N,
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
-
+
if (DI->getDef()->getName() == "zero_reg") {
AddMatcher(new EmitRegisterMatcher(0, N->getType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
-
+
// Handle a reference to a register class. This is used
// in COPY_TO_SUBREG instructions.
if (DI->getDef()->isSubClassOf("RegisterClass")) {
@@ -607,17 +606,17 @@ void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N,
return;
}
}
-
+
errs() << "unhandled leaf node: \n";
N->dump();
}
/// GetInstPatternNode - Get the pattern for an instruction.
-///
+///
const TreePatternNode *MatcherGen::
GetInstPatternNode(const DAGInstruction &Inst, const TreePatternNode *N) {
const TreePattern *InstPat = Inst.getPattern();
-
+
// FIXME2?: Assume actual pattern comes before "implicit".
TreePatternNode *InstPatNode;
if (InstPat)
@@ -626,11 +625,11 @@ GetInstPatternNode(const DAGInstruction &Inst, const TreePatternNode *N) {
InstPatNode = Pattern.getSrcPattern();
else
return 0;
-
+
if (InstPatNode && !InstPatNode->isLeaf() &&
InstPatNode->getOperator()->getName() == "set")
InstPatNode = InstPatNode->getChild(InstPatNode->getNumChildren()-1);
-
+
return InstPatNode;
}
@@ -641,7 +640,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
const CodeGenTarget &CGT = CGP.getTargetInfo();
CodeGenInstruction &II = CGT.getInstruction(Op);
const DAGInstruction &Inst = CGP.getInstruction(Op);
-
+
// If we can, get the pattern for the instruction we're generating. We derive
// a variety of information from this pattern, such as whether it has a chain.
//
@@ -650,27 +649,27 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
// nodes can't duplicate.
const TreePatternNode *InstPatNode = GetInstPatternNode(Inst, N);
- // NodeHasChain - Whether the instruction node we're creating takes chains.
+ // NodeHasChain - Whether the instruction node we're creating takes chains.
bool NodeHasChain = InstPatNode &&
InstPatNode->TreeHasProperty(SDNPHasChain, CGP);
-
+
bool isRoot = N == Pattern.getDstPattern();
- // TreeHasOutFlag - True if this tree has a flag.
- bool TreeHasInFlag = false, TreeHasOutFlag = false;
+ // TreeHasOutGlue - True if this tree has glue.
+ bool TreeHasInGlue = false, TreeHasOutGlue = false;
if (isRoot) {
const TreePatternNode *SrcPat = Pattern.getSrcPattern();
- TreeHasInFlag = SrcPat->TreeHasProperty(SDNPOptInFlag, CGP) ||
- SrcPat->TreeHasProperty(SDNPInFlag, CGP);
-
+ TreeHasInGlue = SrcPat->TreeHasProperty(SDNPOptInGlue, CGP) ||
+ SrcPat->TreeHasProperty(SDNPInGlue, CGP);
+
// FIXME2: this is checking the entire pattern, not just the node in
// question, doing this just for the root seems like a total hack.
- TreeHasOutFlag = SrcPat->TreeHasProperty(SDNPOutFlag, CGP);
+ TreeHasOutGlue = SrcPat->TreeHasProperty(SDNPOutGlue, CGP);
}
// NumResults - This is the number of results produced by the instruction in
// the "outs" list.
- unsigned NumResults = Inst.getNumResults();
+ unsigned NumResults = Inst.getNumResults();
// Loop over all of the operands of the instruction pattern, emitting code
// to fill them all in. The node 'N' usually has number children equal to
@@ -679,41 +678,41 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
// in the 'execute always' values. Match up the node operands to the
// instruction operands to do this.
SmallVector<unsigned, 8> InstOps;
- for (unsigned ChildNo = 0, InstOpNo = NumResults, e = II.OperandList.size();
+ for (unsigned ChildNo = 0, InstOpNo = NumResults, e = II.Operands.size();
InstOpNo != e; ++InstOpNo) {
-
+
// Determine what to emit for this operand.
- Record *OperandNode = II.OperandList[InstOpNo].Rec;
+ Record *OperandNode = II.Operands[InstOpNo].Rec;
if ((OperandNode->isSubClassOf("PredicateOperand") ||
OperandNode->isSubClassOf("OptionalDefOperand")) &&
!CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) {
// This is a predicate or optional def operand; emit the
// 'default ops' operands.
const DAGDefaultOperand &DefaultOp
- = CGP.getDefaultOperand(OperandNode);
+ = CGP.getDefaultOperand(OperandNode);
for (unsigned i = 0, e = DefaultOp.DefaultOps.size(); i != e; ++i)
EmitResultOperand(DefaultOp.DefaultOps[i], InstOps);
continue;
}
-
+
const TreePatternNode *Child = N->getChild(ChildNo);
-
+
// Otherwise this is a normal operand or a predicate operand without
// 'execute always'; emit it.
unsigned BeforeAddingNumOps = InstOps.size();
EmitResultOperand(Child, InstOps);
assert(InstOps.size() > BeforeAddingNumOps && "Didn't add any operands");
-
+
// If the operand is an instruction and it produced multiple results, just
// take the first one.
if (!Child->isLeaf() && Child->getOperator()->isSubClassOf("Instruction"))
InstOps.resize(BeforeAddingNumOps+1);
-
+
++ChildNo;
}
-
- // If this node has an input flag or explicitly specified input physregs, we
- // need to add chained and flagged copyfromreg nodes and materialize the flag
+
+ // If this node has input glue or explicitly specified input physregs, we
+ // need to add chained and glued copyfromreg nodes and materialize the glue
// input.
if (isRoot && !PhysRegInputs.empty()) {
// Emit all of the CopyToReg nodes for the input physical registers. These
@@ -721,18 +720,18 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
for (unsigned i = 0, e = PhysRegInputs.size(); i != e; ++i)
AddMatcher(new EmitCopyToRegMatcher(PhysRegInputs[i].second,
PhysRegInputs[i].first));
- // Even if the node has no other flag inputs, the resultant node must be
- // flagged to the CopyFromReg nodes we just generated.
- TreeHasInFlag = true;
+ // Even if the node has no other glue inputs, the resultant node must be
+ // glued to the CopyFromReg nodes we just generated.
+ TreeHasInGlue = true;
}
-
- // Result order: node results, chain, flags
-
+
+ // Result order: node results, chain, glue
+
// Determine the result types.
SmallVector<MVT::SimpleValueType, 4> ResultVTs;
for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i)
ResultVTs.push_back(N->getType(i));
-
+
// If this is the root instruction of a pattern that has physical registers in
// its result pattern, add output VTs for them. For example, X86 has:
// (set AL, (mul ...))
@@ -744,7 +743,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
Record *HandledReg = 0;
if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
HandledReg = II.ImplicitDefs[0];
-
+
for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
Record *Reg = Pattern.getDstRegs()[i];
if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
@@ -759,7 +758,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
if (isRoot &&
(Pattern.getSrcPattern()->NodeHasProperty(SDNPVariadic, CGP)))
NumFixedArityOperands = Pattern.getSrcPattern()->getNumChildren();
-
+
// If this is the root node and any of the nodes matched nodes in the input
// pattern have MemRefs in them, have the interpreter collect them and plop
// them onto this node.
@@ -776,19 +775,19 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
bool NodeHasMemRefs =
isRoot && Pattern.getSrcPattern()->TreeHasProperty(SDNPMemOperand, CGP);
- assert((!ResultVTs.empty() || TreeHasOutFlag || NodeHasChain) &&
+ assert((!ResultVTs.empty() || TreeHasOutGlue || NodeHasChain) &&
"Node has no result");
-
+
AddMatcher(new EmitNodeMatcher(II.Namespace+"::"+II.TheDef->getName(),
ResultVTs.data(), ResultVTs.size(),
InstOps.data(), InstOps.size(),
- NodeHasChain, TreeHasInFlag, TreeHasOutFlag,
+ NodeHasChain, TreeHasInGlue, TreeHasOutGlue,
NodeHasMemRefs, NumFixedArityOperands,
NextRecordedOperandNo));
-
- // The non-chain and non-flag results of the newly emitted node get recorded.
+
+ // The non-chain and non-glue results of the newly emitted node get recorded.
for (unsigned i = 0, e = ResultVTs.size(); i != e; ++i) {
- if (ResultVTs[i] == MVT::Other || ResultVTs[i] == MVT::Flag) break;
+ if (ResultVTs[i] == MVT::Other || ResultVTs[i] == MVT::Glue) break;
OutputOps.push_back(NextRecordedOperandNo++);
}
}
@@ -800,7 +799,7 @@ EmitResultSDNodeXFormAsOperand(const TreePatternNode *N,
// Emit the operand.
SmallVector<unsigned, 8> InputOps;
-
+
// FIXME2: Could easily generalize this to support multiple inputs and outputs
// to the SDNodeXForm. For now we just support one input and one output like
// the old instruction selector.
@@ -839,7 +838,7 @@ void MatcherGen::EmitResultCode() {
if (!MatchedChainNodes.empty())
AddMatcher(new EmitMergeInputChainsMatcher
(MatchedChainNodes.data(), MatchedChainNodes.size()));
-
+
// Codegen the root of the result pattern, capturing the resulting values.
SmallVector<unsigned, 8> Ops;
EmitResultOperand(Pattern.getDstPattern(), Ops);
@@ -847,11 +846,11 @@ void MatcherGen::EmitResultCode() {
// At this point, we have however many values the result pattern produces.
// However, the input pattern might not need all of these. If there are
// excess values at the end (such as implicit defs of condition codes etc)
- // just lop them off. This doesn't need to worry about flags or chains, just
+ // just lop them off. This doesn't need to worry about glue or chains, just
// explicit results.
//
unsigned NumSrcResults = Pattern.getSrcPattern()->getNumTypes();
-
+
// If the pattern also has (implicit) results, count them as well.
if (!Pattern.getDstRegs().empty()) {
// If the root came from an implicit def in the instruction handling stuff,
@@ -865,23 +864,23 @@ void MatcherGen::EmitResultCode() {
if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
HandledReg = II.ImplicitDefs[0];
}
-
+
for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
Record *Reg = Pattern.getDstRegs()[i];
if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
++NumSrcResults;
}
- }
-
+ }
+
assert(Ops.size() >= NumSrcResults && "Didn't provide enough results");
Ops.resize(NumSrcResults);
- // If the matched pattern covers nodes which define a flag result, emit a node
+ // If the matched pattern covers nodes which define a glue result, emit a node
// that tells the matcher about them so that it can update their results.
- if (!MatchedFlagResultNodes.empty())
- AddMatcher(new MarkFlagResultsMatcher(MatchedFlagResultNodes.data(),
- MatchedFlagResultNodes.size()));
-
+ if (!MatchedGlueResultNodes.empty())
+ AddMatcher(new MarkGlueResultsMatcher(MatchedGlueResultNodes.data(),
+ MatchedGlueResultNodes.size()));
+
AddMatcher(new CompleteMatchMatcher(Ops.data(), Ops.size(), Pattern));
}
@@ -896,12 +895,12 @@ Matcher *llvm::ConvertPatternToMatcher(const PatternToMatch &Pattern,
// Generate the code for the matcher.
if (Gen.EmitMatcherCode(Variant))
return 0;
-
+
// FIXME2: Kill extra MoveParent commands at the end of the matcher sequence.
// FIXME2: Split result code out to another table, and make the matcher end
// with an "Emit <index>" command. This allows result generation stuff to be
// shared and factored?
-
+
// If the match succeeds, then we generate Pattern.
Gen.EmitResultCode();
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
index c73bdb9..3169ea1 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
@@ -75,7 +75,7 @@ static void ContractNodes(OwningPtr<Matcher> &MatcherPtr,
// MarkFlagResults->EmitNode->CompleteMatch when we can to encourage
// MorphNodeTo formation. This is safe because MarkFlagResults never refers
// to the root of the pattern.
- if (isa<EmitNodeMatcher>(N) && isa<MarkFlagResultsMatcher>(N->getNext()) &&
+ if (isa<EmitNodeMatcher>(N) && isa<MarkGlueResultsMatcher>(N->getNext()) &&
isa<CompleteMatchMatcher>(N->getNext()->getNext())) {
// Unlink the two nodes from the list.
Matcher *EmitNode = MatcherPtr.take();
@@ -100,7 +100,7 @@ static void ContractNodes(OwningPtr<Matcher> &MatcherPtr,
if (CM->getResult(i) != RootResultFirst+i)
ResultsMatch = false;
- // If the selected node defines a subset of the flag/chain results, we
+ // If the selected node defines a subset of the glue/chain results, we
// can't use MorphNodeTo. For example, we can't use MorphNodeTo if the
// matched pattern has a chain but the root node doesn't.
const PatternToMatch &Pattern = CM->getPattern();
@@ -109,23 +109,23 @@ static void ContractNodes(OwningPtr<Matcher> &MatcherPtr,
Pattern.getSrcPattern()->NodeHasProperty(SDNPHasChain, CGP))
ResultsMatch = false;
- // If the matched node has a flag and the output root doesn't, we can't
+ // If the matched node has glue and the output root doesn't, we can't
// use MorphNodeTo.
//
- // NOTE: Strictly speaking, we don't have to check for the flag here
+ // NOTE: Strictly speaking, we don't have to check for glue here
// because the code in the pattern generator doesn't handle it right. We
// do it anyway for thoroughness.
if (!EN->hasOutFlag() &&
- Pattern.getSrcPattern()->NodeHasProperty(SDNPOutFlag, CGP))
+ Pattern.getSrcPattern()->NodeHasProperty(SDNPOutGlue, CGP))
ResultsMatch = false;
// If the root result node defines more results than the source root node
- // *and* has a chain or flag input, then we can't match it because it
- // would end up replacing the extra result with the chain/flag.
+ // *and* has a chain or glue input, then we can't match it because it
+ // would end up replacing the extra result with the chain/glue.
#if 0
- if ((EN->hasFlag() || EN->hasChain()) &&
- EN->getNumNonChainFlagVTs() > ... need to get no results reliably ...)
+ if ((EN->hasGlue() || EN->hasChain()) &&
+ EN->getNumNonChainGlueVTs() > ... need to get no results reliably ...)
ResultMatch = false;
#endif
diff --git a/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp b/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
index 3284366..90a2af2 100644
--- a/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
@@ -13,6 +13,7 @@
#include "X86DisassemblerTables.h"
#include "X86RecognizableInstr.h"
#include "ARMDecoderEmitter.h"
+#include "FixedLenDecoderEmitter.h"
using namespace llvm;
using namespace llvm::X86Disassembler;
@@ -94,7 +95,7 @@ using namespace llvm::X86Disassembler;
/// instruction.
void DisassemblerEmitter::run(raw_ostream &OS) {
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
OS << "/*===- TableGen'erated file "
<< "---------------------------------------*- C -*-===*\n"
@@ -127,11 +128,11 @@ void DisassemblerEmitter::run(raw_ostream &OS) {
}
// Fixed-instruction-length targets use a common disassembler.
+ // ARM use its own implementation for now.
if (Target.getName() == "ARM") {
ARMDecoderEmitter(Records).run(OS);
return;
}
- throw TGError(Target.getTargetRecord()->getLoc(),
- "Unable to generate disassembler for this target");
+ FixedLenDecoderEmitter(Records).run(OS);
}
diff --git a/contrib/llvm/utils/TableGen/EDEmitter.cpp b/contrib/llvm/utils/TableGen/EDEmitter.cpp
index 525fffb..020a4a3 100644
--- a/contrib/llvm/utils/TableGen/EDEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/EDEmitter.cpp
@@ -35,22 +35,22 @@ using namespace llvm;
///////////////////////////////////////////////////////////
namespace {
-
+
class EnumEmitter {
private:
std::string Name;
std::vector<std::string> Entries;
public:
- EnumEmitter(const char *N) : Name(N) {
+ EnumEmitter(const char *N) : Name(N) {
}
- int addEntry(const char *e) {
+ int addEntry(const char *e) {
Entries.push_back(std::string(e));
- return Entries.size() - 1;
+ return Entries.size() - 1;
}
void emit(raw_ostream &o, unsigned int &i) {
o.indent(i) << "enum " << Name.c_str() << " {" << "\n";
i += 2;
-
+
unsigned int index = 0;
unsigned int numEntries = Entries.size();
for (index = 0; index < numEntries; ++index) {
@@ -59,15 +59,15 @@ namespace {
o << ",";
o << "\n";
}
-
+
i -= 2;
o.indent(i) << "};" << "\n";
}
-
+
void emitAsFlags(raw_ostream &o, unsigned int &i) {
o.indent(i) << "enum " << Name.c_str() << " {" << "\n";
i += 2;
-
+
unsigned int index = 0;
unsigned int numEntries = Entries.size();
unsigned int flag = 1;
@@ -78,7 +78,7 @@ namespace {
o << "\n";
flag <<= 1;
}
-
+
i -= 2;
o.indent(i) << "};" << "\n";
}
@@ -89,7 +89,7 @@ namespace {
virtual ~ConstantEmitter() { }
virtual void emit(raw_ostream &o, unsigned int &i) = 0;
};
-
+
class LiteralConstantEmitter : public ConstantEmitter {
private:
bool IsNumber;
@@ -98,7 +98,7 @@ namespace {
const char* String;
};
public:
- LiteralConstantEmitter(int number = 0) :
+ LiteralConstantEmitter(int number = 0) :
IsNumber(true),
Number(number) {
}
@@ -117,7 +117,7 @@ namespace {
o << String;
}
};
-
+
class CompoundConstantEmitter : public ConstantEmitter {
private:
unsigned int Padding;
@@ -127,7 +127,7 @@ namespace {
}
CompoundConstantEmitter &addEntry(ConstantEmitter *e) {
Entries.push_back(e);
-
+
return *this;
}
~CompoundConstantEmitter() {
@@ -140,12 +140,12 @@ namespace {
void emit(raw_ostream &o, unsigned int &i) {
o << "{" << "\n";
i += 2;
-
+
unsigned int index;
unsigned int numEntries = Entries.size();
-
+
unsigned int numToPrint;
-
+
if (Padding) {
if (numEntries > Padding) {
fprintf(stderr, "%u entries but %u padding\n", numEntries, Padding);
@@ -155,24 +155,24 @@ namespace {
} else {
numToPrint = numEntries;
}
-
+
for (index = 0; index < numToPrint; ++index) {
o.indent(i);
if (index < numEntries)
Entries[index]->emit(o, i);
else
o << "-1";
-
+
if (index < (numToPrint - 1))
o << ",";
o << "\n";
}
-
+
i -= 2;
o.indent(i) << "}";
}
};
-
+
class FlagsConstantEmitter : public ConstantEmitter {
private:
std::vector<std::string> Flags;
@@ -188,7 +188,7 @@ namespace {
unsigned int numFlags = Flags.size();
if (numFlags == 0)
o << "0";
-
+
for (index = 0; index < numFlags; ++index) {
o << Flags[index].c_str();
if (index < (numFlags - 1))
@@ -218,15 +218,15 @@ void populateOperandOrder(CompoundConstantEmitter *operandOrder,
const CodeGenInstruction &inst,
unsigned syntax) {
unsigned int numArgs = 0;
-
+
AsmWriterInst awInst(inst, syntax, -1, -1);
-
+
std::vector<AsmWriterOperand>::iterator operandIterator;
-
+
for (operandIterator = awInst.Operands.begin();
operandIterator != awInst.Operands.end();
++operandIterator) {
- if (operandIterator->OperandType ==
+ if (operandIterator->OperandType ==
AsmWriterOperand::isMachineInstrOperand) {
operandOrder->addEntry(
new LiteralConstantEmitter(operandIterator->CGIOpNo));
@@ -274,7 +274,7 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
REG("SEGMENT_REG");
REG("DEBUG_REG");
REG("CONTROL_REG");
-
+
IMM("i8imm");
IMM("i16imm");
IMM("i16i8imm");
@@ -284,7 +284,7 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
IMM("i64i8imm");
IMM("i64i32imm");
IMM("SSECC");
-
+
// all R, I, R, I, R
MEM("i8mem");
MEM("i8mem_NOREX");
@@ -306,12 +306,12 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
MEM("f128mem");
MEM("f256mem");
MEM("opaque512mem");
-
+
// all R, I, R, I
LEA("lea32mem");
LEA("lea64_32mem");
LEA("lea64mem");
-
+
// all I
PCR("i16imm_pcrel");
PCR("i32imm_pcrel");
@@ -322,7 +322,12 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
PCR("offset32");
PCR("offset64");
PCR("brtarget");
-
+ PCR("uncondbrtarget");
+ PCR("bltarget");
+
+ // all I, ARM mode only, conditional/unconditional
+ PCR("br_target");
+ PCR("bl_target");
return 1;
}
@@ -344,19 +349,19 @@ static void X86PopulateOperands(
const CodeGenInstruction &inst) {
if (!inst.TheDef->isSubClassOf("X86Inst"))
return;
-
+
unsigned int index;
- unsigned int numOperands = inst.OperandList.size();
-
+ unsigned int numOperands = inst.Operands.size();
+
for (index = 0; index < numOperands; ++index) {
- const CodeGenInstruction::OperandInfo &operandInfo =
- inst.OperandList[index];
+ const CGIOperandList::OperandInfo &operandInfo = inst.Operands[index];
Record &rec = *operandInfo.Rec;
-
- if (X86TypeFromOpName(operandTypes[index], rec.getName())) {
+
+ if (X86TypeFromOpName(operandTypes[index], rec.getName()) &&
+ !rec.isSubClassOf("PointerLikeRegClass")) {
errs() << "Operand type: " << rec.getName().c_str() << "\n";
errs() << "Operand name: " << operandInfo.Name.c_str() << "\n";
- errs() << "Instruction mame: " << inst.TheDef->getName().c_str() << "\n";
+ errs() << "Instruction name: " << inst.TheDef->getName().c_str() << "\n";
llvm_unreachable("Unhandled type");
}
}
@@ -375,9 +380,9 @@ static inline void decorate1(
const char *opName,
const char *opFlag) {
unsigned opIndex;
-
- opIndex = inst.getOperandNamed(std::string(opName));
-
+
+ opIndex = inst.Operands.getOperandNamed(std::string(opName));
+
operandFlags[opIndex]->addEntry(opFlag);
}
@@ -414,7 +419,7 @@ static inline void decorate1(
}
/// X86ExtractSemantics - Performs various checks on the name of an X86
-/// instruction to determine what sort of an instruction it is and then adds
+/// instruction to determine what sort of an instruction it is and then adds
/// the appropriate flags to the instruction and its operands
///
/// @arg instType - A reference to the type for the instruction as a whole
@@ -425,7 +430,7 @@ static void X86ExtractSemantics(
FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS],
const CodeGenInstruction &inst) {
const std::string &name = inst.TheDef->getName();
-
+
if (name.find("MOV") != name.npos) {
if (name.find("MOV_V") != name.npos) {
// ignore (this is a pseudoinstruction)
@@ -450,7 +455,7 @@ static void X86ExtractSemantics(
MOV("src", "dst");
}
}
-
+
if (name.find("JMP") != name.npos ||
name.find("J") == 0) {
if (name.find("FAR") != name.npos && name.find("i") != name.npos) {
@@ -459,10 +464,14 @@ static void X86ExtractSemantics(
BRANCH("dst");
}
}
-
+
if (name.find("PUSH") != name.npos) {
- if (name.find("FS") != name.npos ||
- name.find("GS") != name.npos) {
+ if (name.find("CS") != name.npos ||
+ name.find("DS") != name.npos ||
+ name.find("ES") != name.npos ||
+ name.find("FS") != name.npos ||
+ name.find("GS") != name.npos ||
+ name.find("SS") != name.npos) {
instType.set("kInstructionTypePush");
// TODO add support for fixed operands
} else if (name.find("F") != name.npos) {
@@ -477,12 +486,16 @@ static void X86ExtractSemantics(
PUSH("reg");
}
}
-
+
if (name.find("POP") != name.npos) {
if (name.find("POPCNT") != name.npos) {
// ignore (not a real pop)
- } else if (name.find("FS") != name.npos ||
- name.find("GS") != name.npos) {
+ } else if (name.find("CS") != name.npos ||
+ name.find("DS") != name.npos ||
+ name.find("ES") != name.npos ||
+ name.find("FS") != name.npos ||
+ name.find("GS") != name.npos ||
+ name.find("SS") != name.npos) {
instType.set("kInstructionTypePop");
// TODO add support for fixed operands
} else if (name.find("F") != name.npos) {
@@ -495,7 +508,7 @@ static void X86ExtractSemantics(
POP("reg");
}
}
-
+
if (name.find("CALL") != name.npos) {
if (name.find("ADJ") != name.npos) {
// ignore (not a call)
@@ -509,7 +522,7 @@ static void X86ExtractSemantics(
CALL("dst");
}
}
-
+
if (name.find("RET") != name.npos) {
RETURN();
}
@@ -553,12 +566,20 @@ static int ARMFlagFromOpName(LiteralConstantEmitter *type,
REG("QPR");
REG("QQPR");
REG("QQQQPR");
-
+
IMM("i32imm");
+ IMM("i32imm_hilo16");
IMM("bf_inv_mask_imm");
+ IMM("lsb_pos_imm");
+ IMM("width_imm");
IMM("jtblock_operand");
IMM("nohash_imm");
+ IMM("p_imm");
+ IMM("c_imm");
+ IMM("imod_op");
+ IMM("iflags_op");
IMM("cpinst_operand");
+ IMM("setend_op");
IMM("cps_opt");
IMM("vfp_f64imm");
IMM("vfp_f32imm");
@@ -566,46 +587,73 @@ static int ARMFlagFromOpName(LiteralConstantEmitter *type,
IMM("msr_mask");
IMM("neg_zero");
IMM("imm0_31");
+ IMM("imm0_31_m1");
IMM("nModImm");
IMM("imm0_4095");
IMM("jt2block_operand");
IMM("t_imm_s4");
IMM("pclabel");
+ IMM("adrlabel");
+ IMM("t_adrlabel");
+ IMM("t2adrlabel");
IMM("shift_imm");
-
+ IMM("neon_vcvt_imm32");
+
MISC("brtarget", "kOperandTypeARMBranchTarget"); // ?
+ MISC("uncondbrtarget", "kOperandTypeARMBranchTarget"); // ?
+ MISC("t_brtarget", "kOperandTypeARMBranchTarget"); // ?
+ MISC("t_bcctarget", "kOperandTypeARMBranchTarget"); // ?
+ MISC("t_cbtarget", "kOperandTypeARMBranchTarget"); // ?
+ MISC("bltarget", "kOperandTypeARMBranchTarget"); // ?
+
+ MISC("br_target", "kOperandTypeARMBranchTarget"); // ?
+ MISC("bl_target", "kOperandTypeARMBranchTarget"); // ?
+
+ MISC("t_bltarget", "kOperandTypeARMBranchTarget"); // ?
+ MISC("t_blxtarget", "kOperandTypeARMBranchTarget"); // ?
MISC("so_reg", "kOperandTypeARMSoReg"); // R, R, I
+ MISC("shift_so_reg", "kOperandTypeARMSoReg"); // R, R, I
MISC("t2_so_reg", "kOperandTypeThumb2SoReg"); // R, I
MISC("so_imm", "kOperandTypeARMSoImm"); // I
+ MISC("rot_imm", "kOperandTypeARMRotImm"); // I
MISC("t2_so_imm", "kOperandTypeThumb2SoImm"); // I
MISC("so_imm2part", "kOperandTypeARMSoImm2Part"); // I
MISC("pred", "kOperandTypeARMPredicate"); // I, R
MISC("it_pred", "kOperandTypeARMPredicate"); // I
+ MISC("addrmode_imm12", "kOperandTypeAddrModeImm12"); // R, I
+ MISC("ldst_so_reg", "kOperandTypeLdStSOReg"); // R, R, I
MISC("addrmode2", "kOperandTypeARMAddrMode2"); // R, R, I
MISC("am2offset", "kOperandTypeARMAddrMode2Offset"); // R, I
MISC("addrmode3", "kOperandTypeARMAddrMode3"); // R, R, I
MISC("am3offset", "kOperandTypeARMAddrMode3Offset"); // R, I
- MISC("addrmode4", "kOperandTypeARMAddrMode4"); // R, I
+ MISC("ldstm_mode", "kOperandTypeARMLdStmMode"); // I
MISC("addrmode5", "kOperandTypeARMAddrMode5"); // R, I
MISC("addrmode6", "kOperandTypeARMAddrMode6"); // R, R, I, I
MISC("am6offset", "kOperandTypeARMAddrMode6Offset"); // R, I, I
+ MISC("addrmode6dup", "kOperandTypeARMAddrMode6"); // R, R, I, I
MISC("addrmodepc", "kOperandTypeARMAddrModePC"); // R, I
MISC("reglist", "kOperandTypeARMRegisterList"); // I, R, ...
+ MISC("dpr_reglist", "kOperandTypeARMDPRRegisterList"); // I, R, ...
+ MISC("spr_reglist", "kOperandTypeARMSPRRegisterList"); // I, R, ...
MISC("it_mask", "kOperandTypeThumbITMask"); // I
MISC("t2addrmode_imm8", "kOperandTypeThumb2AddrModeImm8"); // R, I
MISC("t2am_imm8_offset", "kOperandTypeThumb2AddrModeImm8Offset");//I
MISC("t2addrmode_imm12", "kOperandTypeThumb2AddrModeImm12"); // R, I
MISC("t2addrmode_so_reg", "kOperandTypeThumb2AddrModeSoReg"); // R, R, I
MISC("t2addrmode_imm8s4", "kOperandTypeThumb2AddrModeImm8s4"); // R, I
- MISC("t2am_imm8s4_offset", "kOperandTypeThumb2AddrModeImm8s4Offset");
+ MISC("t2am_imm8s4_offset", "kOperandTypeThumb2AddrModeImm8s4Offset");
// R, I
MISC("tb_addrmode", "kOperandTypeARMTBAddrMode"); // I
- MISC("t_addrmode_s1", "kOperandTypeThumbAddrModeS1"); // R, I, R
- MISC("t_addrmode_s2", "kOperandTypeThumbAddrModeS2"); // R, I, R
- MISC("t_addrmode_s4", "kOperandTypeThumbAddrModeS4"); // R, I, R
+ MISC("t_addrmode_rrs1", "kOperandTypeThumbAddrModeRegS"); // R, R
+ MISC("t_addrmode_rrs2", "kOperandTypeThumbAddrModeRegS"); // R, R
+ MISC("t_addrmode_rrs4", "kOperandTypeThumbAddrModeRegS"); // R, R
+ MISC("t_addrmode_is1", "kOperandTypeThumbAddrModeImmS"); // R, I
+ MISC("t_addrmode_is2", "kOperandTypeThumbAddrModeImmS"); // R, I
+ MISC("t_addrmode_is4", "kOperandTypeThumbAddrModeImmS"); // R, I
MISC("t_addrmode_rr", "kOperandTypeThumbAddrModeRR"); // R, R
MISC("t_addrmode_sp", "kOperandTypeThumbAddrModeSP"); // R, I
-
+ MISC("t_addrmode_pc", "kOperandTypeThumbAddrModePC"); // R, I
+
return 1;
}
@@ -631,25 +679,24 @@ static void ARMPopulateOperands(
if (!inst.TheDef->isSubClassOf("InstARM") &&
!inst.TheDef->isSubClassOf("InstThumb"))
return;
-
+
unsigned int index;
- unsigned int numOperands = inst.OperandList.size();
-
+ unsigned int numOperands = inst.Operands.size();
+
if (numOperands > EDIS_MAX_OPERANDS) {
- errs() << "numOperands == " << numOperands << " > " <<
+ errs() << "numOperands == " << numOperands << " > " <<
EDIS_MAX_OPERANDS << '\n';
llvm_unreachable("Too many operands");
}
-
+
for (index = 0; index < numOperands; ++index) {
- const CodeGenInstruction::OperandInfo &operandInfo =
- inst.OperandList[index];
+ const CGIOperandList::OperandInfo &operandInfo = inst.Operands[index];
Record &rec = *operandInfo.Rec;
-
+
if (ARMFlagFromOpName(operandTypes[index], rec.getName())) {
errs() << "Operand type: " << rec.getName() << '\n';
errs() << "Operand name: " << operandInfo.Name << '\n';
- errs() << "Instruction mame: " << inst.TheDef->getName() << '\n';
+ errs() << "Instruction name: " << inst.TheDef->getName() << '\n';
llvm_unreachable("Unhandled type");
}
}
@@ -661,7 +708,7 @@ static void ARMPopulateOperands(
}
/// ARMExtractSemantics - Performs various checks on the name of an ARM
-/// instruction to determine what sort of an instruction it is and then adds
+/// instruction to determine what sort of an instruction it is and then adds
/// the appropriate flags to the instruction and its operands
///
/// @arg instType - A reference to the type for the instruction as a whole
@@ -674,7 +721,7 @@ static void ARMExtractSemantics(
FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS],
const CodeGenInstruction &inst) {
const std::string &name = inst.TheDef->getName();
-
+
if (name == "tBcc" ||
name == "tB" ||
name == "t2Bcc" ||
@@ -683,7 +730,7 @@ static void ARMExtractSemantics(
name == "tCBNZ") {
BRANCH("target");
}
-
+
if (name == "tBLr9" ||
name == "BLr9_pred" ||
name == "tBLXi_r9" ||
@@ -692,9 +739,9 @@ static void ARMExtractSemantics(
name == "t2BXJ" ||
name == "BXJ") {
BRANCH("func");
-
+
unsigned opIndex;
- opIndex = inst.getOperandNamed("func");
+ opIndex = inst.Operands.getOperandNamed("func");
if (operandTypes[opIndex]->is("kOperandTypeImmediate"))
operandTypes[opIndex]->set("kOperandTypeARMBranchTarget");
}
@@ -702,7 +749,7 @@ static void ARMExtractSemantics(
#undef BRANCH
-/// populateInstInfo - Fills an array of InstInfos with information about each
+/// populateInstInfo - Fills an array of InstInfos with information about each
/// instruction in a target
///
/// @arg infoArray - The array of InstInfo objects to populate
@@ -711,45 +758,45 @@ static void populateInstInfo(CompoundConstantEmitter &infoArray,
CodeGenTarget &target) {
const std::vector<const CodeGenInstruction*> &numberedInstructions =
target.getInstructionsByEnumValue();
-
+
unsigned int index;
unsigned int numInstructions = numberedInstructions.size();
-
+
for (index = 0; index < numInstructions; ++index) {
const CodeGenInstruction& inst = *numberedInstructions[index];
-
+
CompoundConstantEmitter *infoStruct = new CompoundConstantEmitter;
infoArray.addEntry(infoStruct);
-
+
LiteralConstantEmitter *instType = new LiteralConstantEmitter;
infoStruct->addEntry(instType);
-
- LiteralConstantEmitter *numOperandsEmitter =
- new LiteralConstantEmitter(inst.OperandList.size());
+
+ LiteralConstantEmitter *numOperandsEmitter =
+ new LiteralConstantEmitter(inst.Operands.size());
infoStruct->addEntry(numOperandsEmitter);
-
+
CompoundConstantEmitter *operandTypeArray = new CompoundConstantEmitter;
infoStruct->addEntry(operandTypeArray);
-
+
LiteralConstantEmitter *operandTypes[EDIS_MAX_OPERANDS];
-
+
CompoundConstantEmitter *operandFlagArray = new CompoundConstantEmitter;
infoStruct->addEntry(operandFlagArray);
-
+
FlagsConstantEmitter *operandFlags[EDIS_MAX_OPERANDS];
-
- for (unsigned operandIndex = 0;
- operandIndex < EDIS_MAX_OPERANDS;
+
+ for (unsigned operandIndex = 0;
+ operandIndex < EDIS_MAX_OPERANDS;
++operandIndex) {
operandTypes[operandIndex] = new LiteralConstantEmitter;
operandTypeArray->addEntry(operandTypes[operandIndex]);
-
+
operandFlags[operandIndex] = new FlagsConstantEmitter;
operandFlagArray->addEntry(operandFlags[operandIndex]);
}
-
+
unsigned numSyntaxes = 0;
-
+
if (target.getName() == "X86") {
X86PopulateOperands(operandTypes, inst);
X86ExtractSemantics(*instType, operandFlags, inst);
@@ -760,24 +807,24 @@ static void populateInstInfo(CompoundConstantEmitter &infoArray,
ARMExtractSemantics(*instType, operandTypes, operandFlags, inst);
numSyntaxes = 1;
}
-
- CompoundConstantEmitter *operandOrderArray = new CompoundConstantEmitter;
-
+
+ CompoundConstantEmitter *operandOrderArray = new CompoundConstantEmitter;
+
infoStruct->addEntry(operandOrderArray);
-
- for (unsigned syntaxIndex = 0;
- syntaxIndex < EDIS_MAX_SYNTAXES;
+
+ for (unsigned syntaxIndex = 0;
+ syntaxIndex < EDIS_MAX_SYNTAXES;
++syntaxIndex) {
- CompoundConstantEmitter *operandOrder =
+ CompoundConstantEmitter *operandOrder =
new CompoundConstantEmitter(EDIS_MAX_OPERANDS);
-
+
operandOrderArray->addEntry(operandOrder);
-
+
if (syntaxIndex < numSyntaxes) {
populateOperandOrder(operandOrder, inst, syntaxIndex);
}
}
-
+
infoStruct = NULL;
}
}
@@ -793,25 +840,30 @@ static void emitCommonEnums(raw_ostream &o, unsigned int &i) {
operandTypes.addEntry("kOperandTypeARMBranchTarget");
operandTypes.addEntry("kOperandTypeARMSoReg");
operandTypes.addEntry("kOperandTypeARMSoImm");
+ operandTypes.addEntry("kOperandTypeARMRotImm");
operandTypes.addEntry("kOperandTypeARMSoImm2Part");
operandTypes.addEntry("kOperandTypeARMPredicate");
+ operandTypes.addEntry("kOperandTypeAddrModeImm12");
+ operandTypes.addEntry("kOperandTypeLdStSOReg");
operandTypes.addEntry("kOperandTypeARMAddrMode2");
operandTypes.addEntry("kOperandTypeARMAddrMode2Offset");
operandTypes.addEntry("kOperandTypeARMAddrMode3");
operandTypes.addEntry("kOperandTypeARMAddrMode3Offset");
- operandTypes.addEntry("kOperandTypeARMAddrMode4");
+ operandTypes.addEntry("kOperandTypeARMLdStmMode");
operandTypes.addEntry("kOperandTypeARMAddrMode5");
operandTypes.addEntry("kOperandTypeARMAddrMode6");
operandTypes.addEntry("kOperandTypeARMAddrMode6Offset");
operandTypes.addEntry("kOperandTypeARMAddrModePC");
operandTypes.addEntry("kOperandTypeARMRegisterList");
+ operandTypes.addEntry("kOperandTypeARMDPRRegisterList");
+ operandTypes.addEntry("kOperandTypeARMSPRRegisterList");
operandTypes.addEntry("kOperandTypeARMTBAddrMode");
operandTypes.addEntry("kOperandTypeThumbITMask");
- operandTypes.addEntry("kOperandTypeThumbAddrModeS1");
- operandTypes.addEntry("kOperandTypeThumbAddrModeS2");
- operandTypes.addEntry("kOperandTypeThumbAddrModeS4");
+ operandTypes.addEntry("kOperandTypeThumbAddrModeRegS");
+ operandTypes.addEntry("kOperandTypeThumbAddrModeImmS");
operandTypes.addEntry("kOperandTypeThumbAddrModeRR");
operandTypes.addEntry("kOperandTypeThumbAddrModeSP");
+ operandTypes.addEntry("kOperandTypeThumbAddrModePC");
operandTypes.addEntry("kOperandTypeThumb2SoReg");
operandTypes.addEntry("kOperandTypeThumb2SoImm");
operandTypes.addEntry("kOperandTypeThumb2AddrModeImm8");
@@ -821,16 +873,16 @@ static void emitCommonEnums(raw_ostream &o, unsigned int &i) {
operandTypes.addEntry("kOperandTypeThumb2AddrModeImm8s4");
operandTypes.addEntry("kOperandTypeThumb2AddrModeImm8s4Offset");
operandTypes.emit(o, i);
-
+
o << "\n";
-
+
EnumEmitter operandFlags("OperandFlags");
operandFlags.addEntry("kOperandFlagSource");
operandFlags.addEntry("kOperandFlagTarget");
operandFlags.emitAsFlags(o, i);
-
+
o << "\n";
-
+
EnumEmitter instructionTypes("InstructionTypes");
instructionTypes.addEntry("kInstructionTypeNone");
instructionTypes.addEntry("kInstructionTypeMove");
@@ -840,25 +892,25 @@ static void emitCommonEnums(raw_ostream &o, unsigned int &i) {
instructionTypes.addEntry("kInstructionTypeCall");
instructionTypes.addEntry("kInstructionTypeReturn");
instructionTypes.emit(o, i);
-
+
o << "\n";
}
void EDEmitter::run(raw_ostream &o) {
unsigned int i = 0;
-
+
CompoundConstantEmitter infoArray;
- CodeGenTarget target;
-
+ CodeGenTarget target(Records);
+
populateInstInfo(infoArray, target);
-
+
emitCommonEnums(o, i);
-
+
o << "namespace {\n";
-
+
o << "llvm::EDInstInfo instInfo" << target.getName().c_str() << "[] = ";
infoArray.emit(o, i);
o << ";" << "\n";
-
+
o << "}\n";
}
diff --git a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
index 6c16fcf..f01de1d 100644
--- a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -20,6 +20,7 @@
#include "FastISelEmitter.h"
#include "Record.h"
#include "llvm/Support/Debug.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/VectorExtras.h"
using namespace llvm;
@@ -65,23 +66,23 @@ struct OperandsSignature {
return true;
}
}
-
+
const CodeGenRegisterClass *DstRC = 0;
-
+
for (unsigned i = 0, e = InstPatNode->getNumChildren(); i != e; ++i) {
TreePatternNode *Op = InstPatNode->getChild(i);
-
+
// For now, filter out any operand with a predicate.
// For now, filter out any operand with multiple values.
if (!Op->getPredicateFns().empty() ||
Op->getNumTypes() != 1)
return false;
-
+
assert(Op->hasTypeSet(0) && "Type infererence not done?");
// For now, all the operands must have the same type.
if (Op->getType(0) != VT)
return false;
-
+
if (!Op->isLeaf()) {
if (Op->getOperator()->getName() == "imm") {
Operands.push_back("i");
@@ -107,7 +108,7 @@ struct OperandsSignature {
RC = Target.getRegisterClassForRegister(OpLeafRec);
else
return false;
-
+
// For now, this needs to be a register class of some sort.
if (!RC)
return false;
@@ -212,7 +213,7 @@ class FastISelMap {
typedef std::map<MVT::SimpleValueType, PredMap> RetPredMap;
typedef std::map<MVT::SimpleValueType, RetPredMap> TypeRetPredMap;
typedef std::map<std::string, TypeRetPredMap> OpcodeTypeRetPredMap;
- typedef std::map<OperandsSignature, OpcodeTypeRetPredMap>
+ typedef std::map<OperandsSignature, OpcodeTypeRetPredMap>
OperandsOpcodeTypeRetPredMap;
OperandsOpcodeTypeRetPredMap SimplePatterns;
@@ -263,9 +264,9 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
if (!Op->isSubClassOf("Instruction"))
continue;
CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op);
- if (II.OperandList.empty())
+ if (II.Operands.size() == 0)
continue;
-
+
// For now, ignore multi-instruction patterns.
bool MultiInsts = false;
for (unsigned i = 0, e = Dst->getNumChildren(); i != e; ++i) {
@@ -285,7 +286,7 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
const CodeGenRegisterClass *DstRC = 0;
std::string SubRegNo;
if (Op->getName() != "EXTRACT_SUBREG") {
- Record *Op0Rec = II.OperandList[0].Rec;
+ Record *Op0Rec = II.Operands[0].Rec;
if (!Op0Rec->isSubClassOf("RegisterClass"))
continue;
DstRC = &Target.getRegisterClass(Op0Rec);
@@ -295,7 +296,7 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
// If this isn't a leaf, then continue since the register classes are
// a bit too complicated for now.
if (!Dst->getChild(1)->isLeaf()) continue;
-
+
DefInit *SR = dynamic_cast<DefInit*>(Dst->getChild(1)->getLeafValue());
if (SR)
SubRegNo = getQualifiedName(SR->getDef());
@@ -310,7 +311,7 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
// Ignore multiple result nodes for now.
if (InstPatNode->getNumTypes() > 1) continue;
-
+
Record *InstPatOp = InstPatNode->getOperator();
std::string OpcodeName = getOpcodeName(InstPatOp, CGP);
MVT::SimpleValueType RetVT = MVT::isVoid;
@@ -334,7 +335,7 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
OperandsSignature Operands;
if (!Operands.initialize(InstPatNode, Target, VT))
continue;
-
+
std::vector<std::string>* PhysRegInputs = new std::vector<std::string>();
if (!InstPatNode->isLeaf() &&
(InstPatNode->getOperator()->getName() == "imm" ||
@@ -347,7 +348,7 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
PhysRegInputs->push_back("");
continue;
}
-
+
DefInit *OpDI = dynamic_cast<DefInit*>(Op->getLeafValue());
Record *OpLeafRec = OpDI->getDef();
std::string PhysReg;
@@ -355,7 +356,7 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
PhysReg += static_cast<StringInit*>(OpLeafRec->getValue( \
"Namespace")->getValue())->getValue();
PhysReg += "::";
-
+
std::vector<CodeGenRegister> Regs = Target.getRegisters();
for (unsigned i = 0; i < Regs.size(); ++i) {
if (Regs[i].TheDef == OpLeafRec) {
@@ -364,7 +365,7 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
}
}
}
-
+
PhysRegInputs->push_back(PhysReg);
}
} else
@@ -380,9 +381,10 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
SubRegNo,
PhysRegInputs
};
- assert(!SimplePatterns[Operands][OpcodeName][VT][RetVT]
- .count(PredicateCheck) &&
- "Duplicate pattern!");
+ if (SimplePatterns[Operands][OpcodeName][VT][RetVT]
+ .count(PredicateCheck))
+ throw TGError(Pattern.getSrcRecord()->getLoc(), "Duplicate record!");
+
SimplePatterns[Operands][OpcodeName][VT][RetVT][PredicateCheck] = Memo;
}
}
@@ -429,7 +431,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
PI != PE; ++PI) {
std::string PredicateCheck = PI->first;
const InstructionMemo &Memo = PI->second;
-
+
if (PredicateCheck.empty()) {
assert(!HasPred &&
"Multiple instructions match, at least one has "
@@ -439,14 +441,14 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
OS << " ";
HasPred = true;
}
-
+
for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
if ((*Memo.PhysRegs)[i] != "")
OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, "
<< "TII.get(TargetOpcode::COPY), "
<< (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
}
-
+
OS << " return FastEmitInst_";
if (Memo.SubRegNo.empty()) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs);
@@ -462,10 +464,10 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
OS << Memo.SubRegNo;
OS << ");\n";
}
-
+
if (HasPred)
OS << " }\n";
-
+
}
// Return 0 if none of the predicates were satisfied.
if (HasPred)
@@ -473,7 +475,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
OS << "}\n";
OS << "\n";
}
-
+
// Emit one function for the type that demultiplexes on return type.
OS << "unsigned FastEmit_"
<< getLegalCName(Opcode) << "_"
@@ -496,7 +498,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
OS << ");\n";
}
OS << " default: return 0;\n}\n}\n\n";
-
+
} else {
// Non-variadic return type.
OS << "unsigned FastEmit_"
@@ -508,13 +510,13 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
OS << ", ";
Operands.PrintParameters(OS);
OS << ") {\n";
-
+
OS << " if (RetVT.SimpleTy != " << getName(RM.begin()->first)
<< ")\n return 0;\n";
-
+
const PredMap &PM = RM.begin()->second;
bool HasPred = false;
-
+
// Emit code for each possible instruction. There may be
// multiple if there are subtarget concerns.
for (PredMap::const_iterator PI = PM.begin(), PE = PM.end(); PI != PE;
@@ -531,16 +533,16 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
OS << " ";
HasPred = true;
}
-
+
for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
if ((*Memo.PhysRegs)[i] != "")
OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, "
<< "TII.get(TargetOpcode::COPY), "
<< (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
}
-
+
OS << " return FastEmitInst_";
-
+
if (Memo.SubRegNo.empty()) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs);
OS << "(" << InstNS << Memo.Name << ", ";
@@ -554,11 +556,11 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
OS << Memo.SubRegNo;
OS << ");\n";
}
-
+
if (HasPred)
OS << " }\n";
}
-
+
// Return 0 if none of the predicates were satisfied.
if (HasPred)
OS << " return 0;\n";
diff --git a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
new file mode 100644
index 0000000..2c222b3
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
@@ -0,0 +1,1372 @@
+//===------------ FixedLenDecoderEmitter.cpp - Decoder Generator ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// It contains the tablegen backend that emits the decoder functions for
+// targets with fixed length instruction set.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "decoder-emitter"
+
+#include "FixedLenDecoderEmitter.h"
+#include "CodeGenTarget.h"
+#include "Record.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <vector>
+#include <map>
+#include <string>
+
+using namespace llvm;
+
+// The set (BIT_TRUE, BIT_FALSE, BIT_UNSET) represents a ternary logic system
+// for a bit value.
+//
+// BIT_UNFILTERED is used as the init value for a filter position. It is used
+// only for filter processings.
+typedef enum {
+ BIT_TRUE, // '1'
+ BIT_FALSE, // '0'
+ BIT_UNSET, // '?'
+ BIT_UNFILTERED // unfiltered
+} bit_value_t;
+
+static bool ValueSet(bit_value_t V) {
+ return (V == BIT_TRUE || V == BIT_FALSE);
+}
+static bool ValueNotSet(bit_value_t V) {
+ return (V == BIT_UNSET);
+}
+static int Value(bit_value_t V) {
+ return ValueNotSet(V) ? -1 : (V == BIT_FALSE ? 0 : 1);
+}
+static bit_value_t bitFromBits(BitsInit &bits, unsigned index) {
+ if (BitInit *bit = dynamic_cast<BitInit*>(bits.getBit(index)))
+ return bit->getValue() ? BIT_TRUE : BIT_FALSE;
+
+ // The bit is uninitialized.
+ return BIT_UNSET;
+}
+// Prints the bit value for each position.
+static void dumpBits(raw_ostream &o, BitsInit &bits) {
+ unsigned index;
+
+ for (index = bits.getNumBits(); index > 0; index--) {
+ switch (bitFromBits(bits, index - 1)) {
+ case BIT_TRUE:
+ o << "1";
+ break;
+ case BIT_FALSE:
+ o << "0";
+ break;
+ case BIT_UNSET:
+ o << "_";
+ break;
+ default:
+ assert(0 && "unexpected return value from bitFromBits");
+ }
+ }
+}
+
+static BitsInit &getBitsField(const Record &def, const char *str) {
+ BitsInit *bits = def.getValueAsBitsInit(str);
+ return *bits;
+}
+
+// Forward declaration.
+class FilterChooser;
+
+// FIXME: Possibly auto-detected?
+#define BIT_WIDTH 32
+
+// Representation of the instruction to work on.
+typedef bit_value_t insn_t[BIT_WIDTH];
+
+/// Filter - Filter works with FilterChooser to produce the decoding tree for
+/// the ISA.
+///
+/// It is useful to think of a Filter as governing the switch stmts of the
+/// decoding tree in a certain level. Each case stmt delegates to an inferior
+/// FilterChooser to decide what further decoding logic to employ, or in another
+/// words, what other remaining bits to look at. The FilterChooser eventually
+/// chooses a best Filter to do its job.
+///
+/// This recursive scheme ends when the number of Opcodes assigned to the
+/// FilterChooser becomes 1 or if there is a conflict. A conflict happens when
+/// the Filter/FilterChooser combo does not know how to distinguish among the
+/// Opcodes assigned.
+///
+/// An example of a conflict is
+///
+/// Conflict:
+/// 111101000.00........00010000....
+/// 111101000.00........0001........
+/// 1111010...00........0001........
+/// 1111010...00....................
+/// 1111010.........................
+/// 1111............................
+/// ................................
+/// VST4q8a 111101000_00________00010000____
+/// VST4q8b 111101000_00________00010000____
+///
+/// The Debug output shows the path that the decoding tree follows to reach the
+/// the conclusion that there is a conflict. VST4q8a is a vst4 to double-spaced
+/// even registers, while VST4q8b is a vst4 to double-spaced odd regsisters.
+///
+/// The encoding info in the .td files does not specify this meta information,
+/// which could have been used by the decoder to resolve the conflict. The
+/// decoder could try to decode the even/odd register numbering and assign to
+/// VST4q8a or VST4q8b, but for the time being, the decoder chooses the "a"
+/// version and return the Opcode since the two have the same Asm format string.
+class Filter {
+protected:
+ FilterChooser *Owner; // points to the FilterChooser who owns this filter
+ unsigned StartBit; // the starting bit position
+ unsigned NumBits; // number of bits to filter
+ bool Mixed; // a mixed region contains both set and unset bits
+
+ // Map of well-known segment value to the set of uid's with that value.
+ std::map<uint64_t, std::vector<unsigned> > FilteredInstructions;
+
+ // Set of uid's with non-constant segment values.
+ std::vector<unsigned> VariableInstructions;
+
+ // Map of well-known segment value to its delegate.
+ std::map<unsigned, FilterChooser*> FilterChooserMap;
+
+ // Number of instructions which fall under FilteredInstructions category.
+ unsigned NumFiltered;
+
+ // Keeps track of the last opcode in the filtered bucket.
+ unsigned LastOpcFiltered;
+
+ // Number of instructions which fall under VariableInstructions category.
+ unsigned NumVariable;
+
+public:
+ unsigned getNumFiltered() { return NumFiltered; }
+ unsigned getNumVariable() { return NumVariable; }
+ unsigned getSingletonOpc() {
+ assert(NumFiltered == 1);
+ return LastOpcFiltered;
+ }
+ // Return the filter chooser for the group of instructions without constant
+ // segment values.
+ FilterChooser &getVariableFC() {
+ assert(NumFiltered == 1);
+ assert(FilterChooserMap.size() == 1);
+ return *(FilterChooserMap.find((unsigned)-1)->second);
+ }
+
+ Filter(const Filter &f);
+ Filter(FilterChooser &owner, unsigned startBit, unsigned numBits, bool mixed);
+
+ ~Filter();
+
+ // Divides the decoding task into sub tasks and delegates them to the
+ // inferior FilterChooser's.
+ //
+ // A special case arises when there's only one entry in the filtered
+ // instructions. In order to unambiguously decode the singleton, we need to
+ // match the remaining undecoded encoding bits against the singleton.
+ void recurse();
+
+ // Emit code to decode instructions given a segment or segments of bits.
+ void emit(raw_ostream &o, unsigned &Indentation);
+
+ // Returns the number of fanout produced by the filter. More fanout implies
+ // the filter distinguishes more categories of instructions.
+ unsigned usefulness() const;
+}; // End of class Filter
+
+// These are states of our finite state machines used in FilterChooser's
+// filterProcessor() which produces the filter candidates to use.
+typedef enum {
+ ATTR_NONE,
+ ATTR_FILTERED,
+ ATTR_ALL_SET,
+ ATTR_ALL_UNSET,
+ ATTR_MIXED
+} bitAttr_t;
+
+/// FilterChooser - FilterChooser chooses the best filter among a set of Filters
+/// in order to perform the decoding of instructions at the current level.
+///
+/// Decoding proceeds from the top down. Based on the well-known encoding bits
+/// of instructions available, FilterChooser builds up the possible Filters that
+/// can further the task of decoding by distinguishing among the remaining
+/// candidate instructions.
+///
+/// Once a filter has been chosen, it is called upon to divide the decoding task
+/// into sub-tasks and delegates them to its inferior FilterChoosers for further
+/// processings.
+///
+/// It is useful to think of a Filter as governing the switch stmts of the
+/// decoding tree. And each case is delegated to an inferior FilterChooser to
+/// decide what further remaining bits to look at.
+class FilterChooser {
+protected:
+ friend class Filter;
+
+ // Vector of codegen instructions to choose our filter.
+ const std::vector<const CodeGenInstruction*> &AllInstructions;
+
+ // Vector of uid's for this filter chooser to work on.
+ const std::vector<unsigned> Opcodes;
+
+ // Lookup table for the operand decoding of instructions.
+ std::map<unsigned, std::vector<OperandInfo> > &Operands;
+
+ // Vector of candidate filters.
+ std::vector<Filter> Filters;
+
+ // Array of bit values passed down from our parent.
+ // Set to all BIT_UNFILTERED's for Parent == NULL.
+ bit_value_t FilterBitValues[BIT_WIDTH];
+
+ // Links to the FilterChooser above us in the decoding tree.
+ FilterChooser *Parent;
+
+ // Index of the best filter from Filters.
+ int BestIndex;
+
+public:
+ FilterChooser(const FilterChooser &FC) :
+ AllInstructions(FC.AllInstructions), Opcodes(FC.Opcodes),
+ Operands(FC.Operands), Filters(FC.Filters), Parent(FC.Parent),
+ BestIndex(FC.BestIndex) {
+ memcpy(FilterBitValues, FC.FilterBitValues, sizeof(FilterBitValues));
+ }
+
+ FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
+ const std::vector<unsigned> &IDs,
+ std::map<unsigned, std::vector<OperandInfo> > &Ops) :
+ AllInstructions(Insts), Opcodes(IDs), Operands(Ops), Filters(),
+ Parent(NULL), BestIndex(-1) {
+ for (unsigned i = 0; i < BIT_WIDTH; ++i)
+ FilterBitValues[i] = BIT_UNFILTERED;
+
+ doFilter();
+ }
+
+ FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
+ const std::vector<unsigned> &IDs,
+ std::map<unsigned, std::vector<OperandInfo> > &Ops,
+ bit_value_t (&ParentFilterBitValues)[BIT_WIDTH],
+ FilterChooser &parent) :
+ AllInstructions(Insts), Opcodes(IDs), Operands(Ops),
+ Filters(), Parent(&parent), BestIndex(-1) {
+ for (unsigned i = 0; i < BIT_WIDTH; ++i)
+ FilterBitValues[i] = ParentFilterBitValues[i];
+
+ doFilter();
+ }
+
+ // The top level filter chooser has NULL as its parent.
+ bool isTopLevel() { return Parent == NULL; }
+
+ // Emit the top level typedef and decodeInstruction() function.
+ void emitTop(raw_ostream &o, unsigned Indentation);
+
+protected:
+ // Populates the insn given the uid.
+ void insnWithID(insn_t &Insn, unsigned Opcode) const {
+ BitsInit &Bits = getBitsField(*AllInstructions[Opcode]->TheDef, "Inst");
+
+ for (unsigned i = 0; i < BIT_WIDTH; ++i)
+ Insn[i] = bitFromBits(Bits, i);
+ }
+
+ // Returns the record name.
+ const std::string &nameWithID(unsigned Opcode) const {
+ return AllInstructions[Opcode]->TheDef->getName();
+ }
+
+ // Populates the field of the insn given the start position and the number of
+ // consecutive bits to scan for.
+ //
+ // Returns false if there exists any uninitialized bit value in the range.
+ // Returns true, otherwise.
+ bool fieldFromInsn(uint64_t &Field, insn_t &Insn, unsigned StartBit,
+ unsigned NumBits) const;
+
+ /// dumpFilterArray - dumpFilterArray prints out debugging info for the given
+ /// filter array as a series of chars.
+ void dumpFilterArray(raw_ostream &o, bit_value_t (&filter)[BIT_WIDTH]);
+
+ /// dumpStack - dumpStack traverses the filter chooser chain and calls
+ /// dumpFilterArray on each filter chooser up to the top level one.
+ void dumpStack(raw_ostream &o, const char *prefix);
+
+ Filter &bestFilter() {
+ assert(BestIndex != -1 && "BestIndex not set");
+ return Filters[BestIndex];
+ }
+
+ // Called from Filter::recurse() when singleton exists. For debug purpose.
+ void SingletonExists(unsigned Opc);
+
+ bool PositionFiltered(unsigned i) {
+ return ValueSet(FilterBitValues[i]);
+ }
+
+ // Calculates the island(s) needed to decode the instruction.
+ // This returns a lit of undecoded bits of an instructions, for example,
+ // Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
+ // decoded bits in order to verify that the instruction matches the Opcode.
+ unsigned getIslands(std::vector<unsigned> &StartBits,
+ std::vector<unsigned> &EndBits, std::vector<uint64_t> &FieldVals,
+ insn_t &Insn);
+
+ // Emits code to decode the singleton. Return true if we have matched all the
+ // well-known bits.
+ bool emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,unsigned Opc);
+
+ // Emits code to decode the singleton, and then to decode the rest.
+ void emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,Filter &Best);
+
+ // Assign a single filter and run with it.
+ void runSingleFilter(FilterChooser &owner, unsigned startBit, unsigned numBit,
+ bool mixed);
+
+ // reportRegion is a helper function for filterProcessor to mark a region as
+ // eligible for use as a filter region.
+ void reportRegion(bitAttr_t RA, unsigned StartBit, unsigned BitIndex,
+ bool AllowMixed);
+
+ // FilterProcessor scans the well-known encoding bits of the instructions and
+ // builds up a list of candidate filters. It chooses the best filter and
+ // recursively descends down the decoding tree.
+ bool filterProcessor(bool AllowMixed, bool Greedy = true);
+
+ // Decides on the best configuration of filter(s) to use in order to decode
+ // the instructions. A conflict of instructions may occur, in which case we
+ // dump the conflict set to the standard error.
+ void doFilter();
+
+ // Emits code to decode our share of instructions. Returns true if the
+ // emitted code causes a return, which occurs if we know how to decode
+ // the instruction at this level or the instruction is not decodeable.
+ bool emit(raw_ostream &o, unsigned &Indentation);
+};
+
+///////////////////////////
+// //
+// Filter Implmenetation //
+// //
+///////////////////////////
+
+Filter::Filter(const Filter &f) :
+ Owner(f.Owner), StartBit(f.StartBit), NumBits(f.NumBits), Mixed(f.Mixed),
+ FilteredInstructions(f.FilteredInstructions),
+ VariableInstructions(f.VariableInstructions),
+ FilterChooserMap(f.FilterChooserMap), NumFiltered(f.NumFiltered),
+ LastOpcFiltered(f.LastOpcFiltered), NumVariable(f.NumVariable) {
+}
+
+Filter::Filter(FilterChooser &owner, unsigned startBit, unsigned numBits,
+ bool mixed) : Owner(&owner), StartBit(startBit), NumBits(numBits),
+ Mixed(mixed) {
+ assert(StartBit + NumBits - 1 < BIT_WIDTH);
+
+ NumFiltered = 0;
+ LastOpcFiltered = 0;
+ NumVariable = 0;
+
+ for (unsigned i = 0, e = Owner->Opcodes.size(); i != e; ++i) {
+ insn_t Insn;
+
+ // Populates the insn given the uid.
+ Owner->insnWithID(Insn, Owner->Opcodes[i]);
+
+ uint64_t Field;
+ // Scans the segment for possibly well-specified encoding bits.
+ bool ok = Owner->fieldFromInsn(Field, Insn, StartBit, NumBits);
+
+ if (ok) {
+ // The encoding bits are well-known. Lets add the uid of the
+ // instruction into the bucket keyed off the constant field value.
+ LastOpcFiltered = Owner->Opcodes[i];
+ FilteredInstructions[Field].push_back(LastOpcFiltered);
+ ++NumFiltered;
+ } else {
+ // Some of the encoding bit(s) are unspecfied. This contributes to
+ // one additional member of "Variable" instructions.
+ VariableInstructions.push_back(Owner->Opcodes[i]);
+ ++NumVariable;
+ }
+ }
+
+ assert((FilteredInstructions.size() + VariableInstructions.size() > 0)
+ && "Filter returns no instruction categories");
+}
+
+Filter::~Filter() {
+ std::map<unsigned, FilterChooser*>::iterator filterIterator;
+ for (filterIterator = FilterChooserMap.begin();
+ filterIterator != FilterChooserMap.end();
+ filterIterator++) {
+ delete filterIterator->second;
+ }
+}
+
+// Divides the decoding task into sub tasks and delegates them to the
+// inferior FilterChooser's.
+//
+// A special case arises when there's only one entry in the filtered
+// instructions. In order to unambiguously decode the singleton, we need to
+// match the remaining undecoded encoding bits against the singleton.
+void Filter::recurse() {
+ std::map<uint64_t, std::vector<unsigned> >::const_iterator mapIterator;
+
+ bit_value_t BitValueArray[BIT_WIDTH];
+ // Starts by inheriting our parent filter chooser's filter bit values.
+ memcpy(BitValueArray, Owner->FilterBitValues, sizeof(BitValueArray));
+
+ unsigned bitIndex;
+
+ if (VariableInstructions.size()) {
+ // Conservatively marks each segment position as BIT_UNSET.
+ for (bitIndex = 0; bitIndex < NumBits; bitIndex++)
+ BitValueArray[StartBit + bitIndex] = BIT_UNSET;
+
+ // Delegates to an inferior filter chooser for futher processing on this
+ // group of instructions whose segment values are variable.
+ FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>(
+ (unsigned)-1,
+ new FilterChooser(Owner->AllInstructions,
+ VariableInstructions,
+ Owner->Operands,
+ BitValueArray,
+ *Owner)
+ ));
+ }
+
+ // No need to recurse for a singleton filtered instruction.
+ // See also Filter::emit().
+ if (getNumFiltered() == 1) {
+ //Owner->SingletonExists(LastOpcFiltered);
+ assert(FilterChooserMap.size() == 1);
+ return;
+ }
+
+ // Otherwise, create sub choosers.
+ for (mapIterator = FilteredInstructions.begin();
+ mapIterator != FilteredInstructions.end();
+ mapIterator++) {
+
+ // Marks all the segment positions with either BIT_TRUE or BIT_FALSE.
+ for (bitIndex = 0; bitIndex < NumBits; bitIndex++) {
+ if (mapIterator->first & (1ULL << bitIndex))
+ BitValueArray[StartBit + bitIndex] = BIT_TRUE;
+ else
+ BitValueArray[StartBit + bitIndex] = BIT_FALSE;
+ }
+
+ // Delegates to an inferior filter chooser for futher processing on this
+ // category of instructions.
+ FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>(
+ mapIterator->first,
+ new FilterChooser(Owner->AllInstructions,
+ mapIterator->second,
+ Owner->Operands,
+ BitValueArray,
+ *Owner)
+ ));
+ }
+}
+
+// Emit code to decode instructions given a segment or segments of bits.
+void Filter::emit(raw_ostream &o, unsigned &Indentation) {
+ o.indent(Indentation) << "// Check Inst{";
+
+ if (NumBits > 1)
+ o << (StartBit + NumBits - 1) << '-';
+
+ o << StartBit << "} ...\n";
+
+ o.indent(Indentation) << "switch (fieldFromInstruction(insn, "
+ << StartBit << ", " << NumBits << ")) {\n";
+
+ std::map<unsigned, FilterChooser*>::iterator filterIterator;
+
+ bool DefaultCase = false;
+ for (filterIterator = FilterChooserMap.begin();
+ filterIterator != FilterChooserMap.end();
+ filterIterator++) {
+
+ // Field value -1 implies a non-empty set of variable instructions.
+ // See also recurse().
+ if (filterIterator->first == (unsigned)-1) {
+ DefaultCase = true;
+
+ o.indent(Indentation) << "default:\n";
+ o.indent(Indentation) << " break; // fallthrough\n";
+
+ // Closing curly brace for the switch statement.
+ // This is unconventional because we want the default processing to be
+ // performed for the fallthrough cases as well, i.e., when the "cases"
+ // did not prove a decoded instruction.
+ o.indent(Indentation) << "}\n";
+
+ } else
+ o.indent(Indentation) << "case " << filterIterator->first << ":\n";
+
+ // We arrive at a category of instructions with the same segment value.
+ // Now delegate to the sub filter chooser for further decodings.
+ // The case may fallthrough, which happens if the remaining well-known
+ // encoding bits do not match exactly.
+ if (!DefaultCase) { ++Indentation; ++Indentation; }
+
+ bool finished = filterIterator->second->emit(o, Indentation);
+ // For top level default case, there's no need for a break statement.
+ if (Owner->isTopLevel() && DefaultCase)
+ break;
+ if (!finished)
+ o.indent(Indentation) << "break;\n";
+
+ if (!DefaultCase) { --Indentation; --Indentation; }
+ }
+
+ // If there is no default case, we still need to supply a closing brace.
+ if (!DefaultCase) {
+ // Closing curly brace for the switch statement.
+ o.indent(Indentation) << "}\n";
+ }
+}
+
+// Returns the number of fanout produced by the filter. More fanout implies
+// the filter distinguishes more categories of instructions.
+unsigned Filter::usefulness() const {
+ if (VariableInstructions.size())
+ return FilteredInstructions.size();
+ else
+ return FilteredInstructions.size() + 1;
+}
+
+//////////////////////////////////
+// //
+// Filterchooser Implementation //
+// //
+//////////////////////////////////
+
+// Emit the top level typedef and decodeInstruction() function.
+void FilterChooser::emitTop(raw_ostream &o, unsigned Indentation) {
+ switch (BIT_WIDTH) {
+ case 8:
+ o.indent(Indentation) << "typedef uint8_t field_t;\n";
+ break;
+ case 16:
+ o.indent(Indentation) << "typedef uint16_t field_t;\n";
+ break;
+ case 32:
+ o.indent(Indentation) << "typedef uint32_t field_t;\n";
+ break;
+ case 64:
+ o.indent(Indentation) << "typedef uint64_t field_t;\n";
+ break;
+ default:
+ assert(0 && "Unexpected instruction size!");
+ }
+
+ o << '\n';
+
+ o.indent(Indentation) << "static field_t " <<
+ "fieldFromInstruction(field_t insn, unsigned startBit, unsigned numBits)\n";
+
+ o.indent(Indentation) << "{\n";
+
+ ++Indentation; ++Indentation;
+ o.indent(Indentation) << "assert(startBit + numBits <= " << BIT_WIDTH
+ << " && \"Instruction field out of bounds!\");\n";
+ o << '\n';
+ o.indent(Indentation) << "field_t fieldMask;\n";
+ o << '\n';
+ o.indent(Indentation) << "if (numBits == " << BIT_WIDTH << ")\n";
+
+ ++Indentation; ++Indentation;
+ o.indent(Indentation) << "fieldMask = (field_t)-1;\n";
+ --Indentation; --Indentation;
+
+ o.indent(Indentation) << "else\n";
+
+ ++Indentation; ++Indentation;
+ o.indent(Indentation) << "fieldMask = ((1 << numBits) - 1) << startBit;\n";
+ --Indentation; --Indentation;
+
+ o << '\n';
+ o.indent(Indentation) << "return (insn & fieldMask) >> startBit;\n";
+ --Indentation; --Indentation;
+
+ o.indent(Indentation) << "}\n";
+
+ o << '\n';
+
+ o.indent(Indentation) <<
+ "static bool decodeInstruction(MCInst &MI, field_t insn) {\n";
+ o.indent(Indentation) << " unsigned tmp = 0;\n";
+
+ ++Indentation; ++Indentation;
+ // Emits code to decode the instructions.
+ emit(o, Indentation);
+
+ o << '\n';
+ o.indent(Indentation) << "return false;\n";
+ --Indentation; --Indentation;
+
+ o.indent(Indentation) << "}\n";
+
+ o << '\n';
+}
+
+// Populates the field of the insn given the start position and the number of
+// consecutive bits to scan for.
+//
+// Returns false if and on the first uninitialized bit value encountered.
+// Returns true, otherwise.
+bool FilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
+ unsigned StartBit, unsigned NumBits) const {
+ Field = 0;
+
+ for (unsigned i = 0; i < NumBits; ++i) {
+ if (Insn[StartBit + i] == BIT_UNSET)
+ return false;
+
+ if (Insn[StartBit + i] == BIT_TRUE)
+ Field = Field | (1ULL << i);
+ }
+
+ return true;
+}
+
+/// dumpFilterArray - dumpFilterArray prints out debugging info for the given
+/// filter array as a series of chars.
+void FilterChooser::dumpFilterArray(raw_ostream &o,
+ bit_value_t (&filter)[BIT_WIDTH]) {
+ unsigned bitIndex;
+
+ for (bitIndex = BIT_WIDTH; bitIndex > 0; bitIndex--) {
+ switch (filter[bitIndex - 1]) {
+ case BIT_UNFILTERED:
+ o << ".";
+ break;
+ case BIT_UNSET:
+ o << "_";
+ break;
+ case BIT_TRUE:
+ o << "1";
+ break;
+ case BIT_FALSE:
+ o << "0";
+ break;
+ }
+ }
+}
+
+/// dumpStack - dumpStack traverses the filter chooser chain and calls
+/// dumpFilterArray on each filter chooser up to the top level one.
+void FilterChooser::dumpStack(raw_ostream &o, const char *prefix) {
+ FilterChooser *current = this;
+
+ while (current) {
+ o << prefix;
+ dumpFilterArray(o, current->FilterBitValues);
+ o << '\n';
+ current = current->Parent;
+ }
+}
+
+// Called from Filter::recurse() when singleton exists. For debug purpose.
+void FilterChooser::SingletonExists(unsigned Opc) {
+ insn_t Insn0;
+ insnWithID(Insn0, Opc);
+
+ errs() << "Singleton exists: " << nameWithID(Opc)
+ << " with its decoding dominating ";
+ for (unsigned i = 0; i < Opcodes.size(); ++i) {
+ if (Opcodes[i] == Opc) continue;
+ errs() << nameWithID(Opcodes[i]) << ' ';
+ }
+ errs() << '\n';
+
+ dumpStack(errs(), "\t\t");
+ for (unsigned i = 0; i < Opcodes.size(); i++) {
+ const std::string &Name = nameWithID(Opcodes[i]);
+
+ errs() << '\t' << Name << " ";
+ dumpBits(errs(),
+ getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
+ errs() << '\n';
+ }
+}
+
+// Calculates the island(s) needed to decode the instruction.
+// This returns a list of undecoded bits of an instructions, for example,
+// Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
+// decoded bits in order to verify that the instruction matches the Opcode.
+unsigned FilterChooser::getIslands(std::vector<unsigned> &StartBits,
+ std::vector<unsigned> &EndBits, std::vector<uint64_t> &FieldVals,
+ insn_t &Insn) {
+ unsigned Num, BitNo;
+ Num = BitNo = 0;
+
+ uint64_t FieldVal = 0;
+
+ // 0: Init
+ // 1: Water (the bit value does not affect decoding)
+ // 2: Island (well-known bit value needed for decoding)
+ int State = 0;
+ int Val = -1;
+
+ for (unsigned i = 0; i < BIT_WIDTH; ++i) {
+ Val = Value(Insn[i]);
+ bool Filtered = PositionFiltered(i);
+ switch (State) {
+ default:
+ assert(0 && "Unreachable code!");
+ break;
+ case 0:
+ case 1:
+ if (Filtered || Val == -1)
+ State = 1; // Still in Water
+ else {
+ State = 2; // Into the Island
+ BitNo = 0;
+ StartBits.push_back(i);
+ FieldVal = Val;
+ }
+ break;
+ case 2:
+ if (Filtered || Val == -1) {
+ State = 1; // Into the Water
+ EndBits.push_back(i - 1);
+ FieldVals.push_back(FieldVal);
+ ++Num;
+ } else {
+ State = 2; // Still in Island
+ ++BitNo;
+ FieldVal = FieldVal | Val << BitNo;
+ }
+ break;
+ }
+ }
+ // If we are still in Island after the loop, do some housekeeping.
+ if (State == 2) {
+ EndBits.push_back(BIT_WIDTH - 1);
+ FieldVals.push_back(FieldVal);
+ ++Num;
+ }
+
+ assert(StartBits.size() == Num && EndBits.size() == Num &&
+ FieldVals.size() == Num);
+ return Num;
+}
+
+// Emits code to decode the singleton. Return true if we have matched all the
+// well-known bits.
+bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
+ unsigned Opc) {
+ std::vector<unsigned> StartBits;
+ std::vector<unsigned> EndBits;
+ std::vector<uint64_t> FieldVals;
+ insn_t Insn;
+ insnWithID(Insn, Opc);
+
+ // Look for islands of undecoded bits of the singleton.
+ getIslands(StartBits, EndBits, FieldVals, Insn);
+
+ unsigned Size = StartBits.size();
+ unsigned I, NumBits;
+
+ // If we have matched all the well-known bits, just issue a return.
+ if (Size == 0) {
+ o.indent(Indentation) << "{\n";
+ o.indent(Indentation) << " MI.setOpcode(" << Opc << ");\n";
+ std::vector<OperandInfo>& InsnOperands = Operands[Opc];
+ for (std::vector<OperandInfo>::iterator
+ I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
+ // If a custom instruction decoder was specified, use that.
+ if (I->FieldBase == ~0U && I->FieldLength == ~0U) {
+ o.indent(Indentation) << " " << I->Decoder << "(MI, insn);\n";
+ break;
+ }
+
+ o.indent(Indentation)
+ << " tmp = fieldFromInstruction(insn, " << I->FieldBase
+ << ", " << I->FieldLength << ");\n";
+ if (I->Decoder != "") {
+ o.indent(Indentation) << " " << I->Decoder << "(MI, tmp);\n";
+ } else {
+ o.indent(Indentation)
+ << " MI.addOperand(MCOperand::CreateImm(tmp));\n";
+ }
+ }
+
+ o.indent(Indentation) << " return true; // " << nameWithID(Opc)
+ << '\n';
+ o.indent(Indentation) << "}\n";
+ return true;
+ }
+
+ // Otherwise, there are more decodings to be done!
+
+ // Emit code to match the island(s) for the singleton.
+ o.indent(Indentation) << "// Check ";
+
+ for (I = Size; I != 0; --I) {
+ o << "Inst{" << EndBits[I-1] << '-' << StartBits[I-1] << "} ";
+ if (I > 1)
+ o << "&& ";
+ else
+ o << "for singleton decoding...\n";
+ }
+
+ o.indent(Indentation) << "if (";
+
+ for (I = Size; I != 0; --I) {
+ NumBits = EndBits[I-1] - StartBits[I-1] + 1;
+ o << "fieldFromInstruction(insn, " << StartBits[I-1] << ", " << NumBits
+ << ") == " << FieldVals[I-1];
+ if (I > 1)
+ o << " && ";
+ else
+ o << ") {\n";
+ }
+ o.indent(Indentation) << " MI.setOpcode(" << Opc << ");\n";
+ std::vector<OperandInfo>& InsnOperands = Operands[Opc];
+ for (std::vector<OperandInfo>::iterator
+ I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
+ // If a custom instruction decoder was specified, use that.
+ if (I->FieldBase == ~0U && I->FieldLength == ~0U) {
+ o.indent(Indentation) << " " << I->Decoder << "(MI, insn);\n";
+ break;
+ }
+
+ o.indent(Indentation)
+ << " tmp = fieldFromInstruction(insn, " << I->FieldBase
+ << ", " << I->FieldLength << ");\n";
+ if (I->Decoder != "") {
+ o.indent(Indentation) << " " << I->Decoder << "(MI, tmp);\n";
+ } else {
+ o.indent(Indentation)
+ << " MI.addOperand(MCOperand::CreateImm(tmp));\n";
+ }
+ }
+ o.indent(Indentation) << " return true; // " << nameWithID(Opc)
+ << '\n';
+ o.indent(Indentation) << "}\n";
+
+ return false;
+}
+
+// Emits code to decode the singleton, and then to decode the rest.
+void FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
+ Filter &Best) {
+
+ unsigned Opc = Best.getSingletonOpc();
+
+ emitSingletonDecoder(o, Indentation, Opc);
+
+ // Emit code for the rest.
+ o.indent(Indentation) << "else\n";
+
+ Indentation += 2;
+ Best.getVariableFC().emit(o, Indentation);
+ Indentation -= 2;
+}
+
+// Assign a single filter and run with it. Top level API client can initialize
+// with a single filter to start the filtering process.
+void FilterChooser::runSingleFilter(FilterChooser &owner, unsigned startBit,
+ unsigned numBit, bool mixed) {
+ Filters.clear();
+ Filter F(*this, startBit, numBit, true);
+ Filters.push_back(F);
+ BestIndex = 0; // Sole Filter instance to choose from.
+ bestFilter().recurse();
+}
+
+// reportRegion is a helper function for filterProcessor to mark a region as
+// eligible for use as a filter region.
+void FilterChooser::reportRegion(bitAttr_t RA, unsigned StartBit,
+ unsigned BitIndex, bool AllowMixed) {
+ if (RA == ATTR_MIXED && AllowMixed)
+ Filters.push_back(Filter(*this, StartBit, BitIndex - StartBit, true));
+ else if (RA == ATTR_ALL_SET && !AllowMixed)
+ Filters.push_back(Filter(*this, StartBit, BitIndex - StartBit, false));
+}
+
+// FilterProcessor scans the well-known encoding bits of the instructions and
+// builds up a list of candidate filters. It chooses the best filter and
+// recursively descends down the decoding tree.
+bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
+ Filters.clear();
+ BestIndex = -1;
+ unsigned numInstructions = Opcodes.size();
+
+ assert(numInstructions && "Filter created with no instructions");
+
+ // No further filtering is necessary.
+ if (numInstructions == 1)
+ return true;
+
+ // Heuristics. See also doFilter()'s "Heuristics" comment when num of
+ // instructions is 3.
+ if (AllowMixed && !Greedy) {
+ assert(numInstructions == 3);
+
+ for (unsigned i = 0; i < Opcodes.size(); ++i) {
+ std::vector<unsigned> StartBits;
+ std::vector<unsigned> EndBits;
+ std::vector<uint64_t> FieldVals;
+ insn_t Insn;
+
+ insnWithID(Insn, Opcodes[i]);
+
+ // Look for islands of undecoded bits of any instruction.
+ if (getIslands(StartBits, EndBits, FieldVals, Insn) > 0) {
+ // Found an instruction with island(s). Now just assign a filter.
+ runSingleFilter(*this, StartBits[0], EndBits[0] - StartBits[0] + 1,
+ true);
+ return true;
+ }
+ }
+ }
+
+ unsigned BitIndex, InsnIndex;
+
+ // We maintain BIT_WIDTH copies of the bitAttrs automaton.
+ // The automaton consumes the corresponding bit from each
+ // instruction.
+ //
+ // Input symbols: 0, 1, and _ (unset).
+ // States: NONE, FILTERED, ALL_SET, ALL_UNSET, and MIXED.
+ // Initial state: NONE.
+ //
+ // (NONE) ------- [01] -> (ALL_SET)
+ // (NONE) ------- _ ----> (ALL_UNSET)
+ // (ALL_SET) ---- [01] -> (ALL_SET)
+ // (ALL_SET) ---- _ ----> (MIXED)
+ // (ALL_UNSET) -- [01] -> (MIXED)
+ // (ALL_UNSET) -- _ ----> (ALL_UNSET)
+ // (MIXED) ------ . ----> (MIXED)
+ // (FILTERED)---- . ----> (FILTERED)
+
+ bitAttr_t bitAttrs[BIT_WIDTH];
+
+ // FILTERED bit positions provide no entropy and are not worthy of pursuing.
+ // Filter::recurse() set either BIT_TRUE or BIT_FALSE for each position.
+ for (BitIndex = 0; BitIndex < BIT_WIDTH; ++BitIndex)
+ if (FilterBitValues[BitIndex] == BIT_TRUE ||
+ FilterBitValues[BitIndex] == BIT_FALSE)
+ bitAttrs[BitIndex] = ATTR_FILTERED;
+ else
+ bitAttrs[BitIndex] = ATTR_NONE;
+
+ for (InsnIndex = 0; InsnIndex < numInstructions; ++InsnIndex) {
+ insn_t insn;
+
+ insnWithID(insn, Opcodes[InsnIndex]);
+
+ for (BitIndex = 0; BitIndex < BIT_WIDTH; ++BitIndex) {
+ switch (bitAttrs[BitIndex]) {
+ case ATTR_NONE:
+ if (insn[BitIndex] == BIT_UNSET)
+ bitAttrs[BitIndex] = ATTR_ALL_UNSET;
+ else
+ bitAttrs[BitIndex] = ATTR_ALL_SET;
+ break;
+ case ATTR_ALL_SET:
+ if (insn[BitIndex] == BIT_UNSET)
+ bitAttrs[BitIndex] = ATTR_MIXED;
+ break;
+ case ATTR_ALL_UNSET:
+ if (insn[BitIndex] != BIT_UNSET)
+ bitAttrs[BitIndex] = ATTR_MIXED;
+ break;
+ case ATTR_MIXED:
+ case ATTR_FILTERED:
+ break;
+ }
+ }
+ }
+
+ // The regionAttr automaton consumes the bitAttrs automatons' state,
+ // lowest-to-highest.
+ //
+ // Input symbols: F(iltered), (all_)S(et), (all_)U(nset), M(ixed)
+ // States: NONE, ALL_SET, MIXED
+ // Initial state: NONE
+ //
+ // (NONE) ----- F --> (NONE)
+ // (NONE) ----- S --> (ALL_SET) ; and set region start
+ // (NONE) ----- U --> (NONE)
+ // (NONE) ----- M --> (MIXED) ; and set region start
+ // (ALL_SET) -- F --> (NONE) ; and report an ALL_SET region
+ // (ALL_SET) -- S --> (ALL_SET)
+ // (ALL_SET) -- U --> (NONE) ; and report an ALL_SET region
+ // (ALL_SET) -- M --> (MIXED) ; and report an ALL_SET region
+ // (MIXED) ---- F --> (NONE) ; and report a MIXED region
+ // (MIXED) ---- S --> (ALL_SET) ; and report a MIXED region
+ // (MIXED) ---- U --> (NONE) ; and report a MIXED region
+ // (MIXED) ---- M --> (MIXED)
+
+ bitAttr_t RA = ATTR_NONE;
+ unsigned StartBit = 0;
+
+ for (BitIndex = 0; BitIndex < BIT_WIDTH; BitIndex++) {
+ bitAttr_t bitAttr = bitAttrs[BitIndex];
+
+ assert(bitAttr != ATTR_NONE && "Bit without attributes");
+
+ switch (RA) {
+ case ATTR_NONE:
+ switch (bitAttr) {
+ case ATTR_FILTERED:
+ break;
+ case ATTR_ALL_SET:
+ StartBit = BitIndex;
+ RA = ATTR_ALL_SET;
+ break;
+ case ATTR_ALL_UNSET:
+ break;
+ case ATTR_MIXED:
+ StartBit = BitIndex;
+ RA = ATTR_MIXED;
+ break;
+ default:
+ assert(0 && "Unexpected bitAttr!");
+ }
+ break;
+ case ATTR_ALL_SET:
+ switch (bitAttr) {
+ case ATTR_FILTERED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ RA = ATTR_NONE;
+ break;
+ case ATTR_ALL_SET:
+ break;
+ case ATTR_ALL_UNSET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ RA = ATTR_NONE;
+ break;
+ case ATTR_MIXED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ StartBit = BitIndex;
+ RA = ATTR_MIXED;
+ break;
+ default:
+ assert(0 && "Unexpected bitAttr!");
+ }
+ break;
+ case ATTR_MIXED:
+ switch (bitAttr) {
+ case ATTR_FILTERED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ StartBit = BitIndex;
+ RA = ATTR_NONE;
+ break;
+ case ATTR_ALL_SET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ StartBit = BitIndex;
+ RA = ATTR_ALL_SET;
+ break;
+ case ATTR_ALL_UNSET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ RA = ATTR_NONE;
+ break;
+ case ATTR_MIXED:
+ break;
+ default:
+ assert(0 && "Unexpected bitAttr!");
+ }
+ break;
+ case ATTR_ALL_UNSET:
+ assert(0 && "regionAttr state machine has no ATTR_UNSET state");
+ case ATTR_FILTERED:
+ assert(0 && "regionAttr state machine has no ATTR_FILTERED state");
+ }
+ }
+
+ // At the end, if we're still in ALL_SET or MIXED states, report a region
+ switch (RA) {
+ case ATTR_NONE:
+ break;
+ case ATTR_FILTERED:
+ break;
+ case ATTR_ALL_SET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ break;
+ case ATTR_ALL_UNSET:
+ break;
+ case ATTR_MIXED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ break;
+ }
+
+ // We have finished with the filter processings. Now it's time to choose
+ // the best performing filter.
+ BestIndex = 0;
+ bool AllUseless = true;
+ unsigned BestScore = 0;
+
+ for (unsigned i = 0, e = Filters.size(); i != e; ++i) {
+ unsigned Usefulness = Filters[i].usefulness();
+
+ if (Usefulness)
+ AllUseless = false;
+
+ if (Usefulness > BestScore) {
+ BestIndex = i;
+ BestScore = Usefulness;
+ }
+ }
+
+ if (!AllUseless)
+ bestFilter().recurse();
+
+ return !AllUseless;
+} // end of FilterChooser::filterProcessor(bool)
+
+// Decides on the best configuration of filter(s) to use in order to decode
+// the instructions. A conflict of instructions may occur, in which case we
+// dump the conflict set to the standard error.
+void FilterChooser::doFilter() {
+ unsigned Num = Opcodes.size();
+ assert(Num && "FilterChooser created with no instructions");
+
+ // Try regions of consecutive known bit values first.
+ if (filterProcessor(false))
+ return;
+
+ // Then regions of mixed bits (both known and unitialized bit values allowed).
+ if (filterProcessor(true))
+ return;
+
+ // Heuristics to cope with conflict set {t2CMPrs, t2SUBSrr, t2SUBSrs} where
+ // no single instruction for the maximum ATTR_MIXED region Inst{14-4} has a
+ // well-known encoding pattern. In such case, we backtrack and scan for the
+ // the very first consecutive ATTR_ALL_SET region and assign a filter to it.
+ if (Num == 3 && filterProcessor(true, false))
+ return;
+
+ // If we come to here, the instruction decoding has failed.
+ // Set the BestIndex to -1 to indicate so.
+ BestIndex = -1;
+}
+
+// Emits code to decode our share of instructions. Returns true if the
+// emitted code causes a return, which occurs if we know how to decode
+// the instruction at this level or the instruction is not decodeable.
+bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
+ if (Opcodes.size() == 1)
+ // There is only one instruction in the set, which is great!
+ // Call emitSingletonDecoder() to see whether there are any remaining
+ // encodings bits.
+ return emitSingletonDecoder(o, Indentation, Opcodes[0]);
+
+ // Choose the best filter to do the decodings!
+ if (BestIndex != -1) {
+ Filter &Best = bestFilter();
+ if (Best.getNumFiltered() == 1)
+ emitSingletonDecoder(o, Indentation, Best);
+ else
+ bestFilter().emit(o, Indentation);
+ return false;
+ }
+
+ // We don't know how to decode these instructions! Return 0 and dump the
+ // conflict set!
+ o.indent(Indentation) << "return 0;" << " // Conflict set: ";
+ for (int i = 0, N = Opcodes.size(); i < N; ++i) {
+ o << nameWithID(Opcodes[i]);
+ if (i < (N - 1))
+ o << ", ";
+ else
+ o << '\n';
+ }
+
+ // Print out useful conflict information for postmortem analysis.
+ errs() << "Decoding Conflict:\n";
+
+ dumpStack(errs(), "\t\t");
+
+ for (unsigned i = 0; i < Opcodes.size(); i++) {
+ const std::string &Name = nameWithID(Opcodes[i]);
+
+ errs() << '\t' << Name << " ";
+ dumpBits(errs(),
+ getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
+ errs() << '\n';
+ }
+
+ return true;
+}
+
+bool FixedLenDecoderEmitter::populateInstruction(const CodeGenInstruction &CGI,
+ unsigned Opc){
+ const Record &Def = *CGI.TheDef;
+ // If all the bit positions are not specified; do not decode this instruction.
+ // We are bound to fail! For proper disassembly, the well-known encoding bits
+ // of the instruction must be fully specified.
+ //
+ // This also removes pseudo instructions from considerations of disassembly,
+ // which is a better design and less fragile than the name matchings.
+ BitsInit &Bits = getBitsField(Def, "Inst");
+ if (Bits.allInComplete()) return false;
+
+ // Ignore "asm parser only" instructions.
+ if (Def.getValueAsBit("isAsmParserOnly"))
+ return false;
+
+ std::vector<OperandInfo> InsnOperands;
+
+ // If the instruction has specified a custom decoding hook, use that instead
+ // of trying to auto-generate the decoder.
+ std::string InstDecoder = Def.getValueAsString("DecoderMethod");
+ if (InstDecoder != "") {
+ InsnOperands.push_back(OperandInfo(~0U, ~0U, InstDecoder));
+ Operands[Opc] = InsnOperands;
+ return true;
+ }
+
+ // Generate a description of the operand of the instruction that we know
+ // how to decode automatically.
+ // FIXME: We'll need to have a way to manually override this as needed.
+
+ // Gather the outputs/inputs of the instruction, so we can find their
+ // positions in the encoding. This assumes for now that they appear in the
+ // MCInst in the order that they're listed.
+ std::vector<std::pair<Init*, std::string> > InOutOperands;
+ DagInit *Out = Def.getValueAsDag("OutOperandList");
+ DagInit *In = Def.getValueAsDag("InOperandList");
+ for (unsigned i = 0; i < Out->getNumArgs(); ++i)
+ InOutOperands.push_back(std::make_pair(Out->getArg(i), Out->getArgName(i)));
+ for (unsigned i = 0; i < In->getNumArgs(); ++i)
+ InOutOperands.push_back(std::make_pair(In->getArg(i), In->getArgName(i)));
+
+ // For each operand, see if we can figure out where it is encoded.
+ for (std::vector<std::pair<Init*, std::string> >::iterator
+ NI = InOutOperands.begin(), NE = InOutOperands.end(); NI != NE; ++NI) {
+ unsigned PrevBit = ~0;
+ unsigned Base = ~0;
+ unsigned PrevPos = ~0;
+ std::string Decoder = "";
+
+ for (unsigned bi = 0; bi < Bits.getNumBits(); ++bi) {
+ VarBitInit *BI = dynamic_cast<VarBitInit*>(Bits.getBit(bi));
+ if (!BI) continue;
+
+ VarInit *Var = dynamic_cast<VarInit*>(BI->getVariable());
+ assert(Var);
+ unsigned CurrBit = BI->getBitNum();
+ if (Var->getName() != NI->second) continue;
+
+ // Figure out the lowest bit of the value, and the width of the field.
+ // Deliberately don't try to handle cases where the field is scattered,
+ // or where not all bits of the the field are explicit.
+ if (Base == ~0U && PrevBit == ~0U && PrevPos == ~0U) {
+ if (CurrBit == 0)
+ Base = bi;
+ else
+ continue;
+ }
+
+ if ((PrevPos != ~0U && bi-1 != PrevPos) ||
+ (CurrBit != ~0U && CurrBit-1 != PrevBit)) {
+ PrevBit = ~0;
+ Base = ~0;
+ PrevPos = ~0;
+ }
+
+ PrevPos = bi;
+ PrevBit = CurrBit;
+
+ // At this point, we can locate the field, but we need to know how to
+ // interpret it. As a first step, require the target to provide callbacks
+ // for decoding register classes.
+ // FIXME: This need to be extended to handle instructions with custom
+ // decoder methods, and operands with (simple) MIOperandInfo's.
+ TypedInit *TI = dynamic_cast<TypedInit*>(NI->first);
+ RecordRecTy *Type = dynamic_cast<RecordRecTy*>(TI->getType());
+ Record *TypeRecord = Type->getRecord();
+ bool isReg = false;
+ if (TypeRecord->isSubClassOf("RegisterClass")) {
+ Decoder = "Decode" + Type->getRecord()->getName() + "RegisterClass";
+ isReg = true;
+ }
+
+ RecordVal *DecoderString = TypeRecord->getValue("DecoderMethod");
+ StringInit *String = DecoderString ?
+ dynamic_cast<StringInit*>(DecoderString->getValue()) :
+ 0;
+ if (!isReg && String && String->getValue() != "")
+ Decoder = String->getValue();
+ }
+
+ if (Base != ~0U) {
+ InsnOperands.push_back(OperandInfo(Base, PrevBit+1, Decoder));
+ DEBUG(errs() << "ENCODED OPERAND: $" << NI->second << " @ ("
+ << utostr(Base+PrevBit) << ", " << utostr(Base) << ")\n");
+ }
+ }
+
+ Operands[Opc] = InsnOperands;
+
+
+#if 0
+ DEBUG({
+ // Dumps the instruction encoding bits.
+ dumpBits(errs(), Bits);
+
+ errs() << '\n';
+
+ // Dumps the list of operand info.
+ for (unsigned i = 0, e = CGI.Operands.size(); i != e; ++i) {
+ const CGIOperandList::OperandInfo &Info = CGI.Operands[i];
+ const std::string &OperandName = Info.Name;
+ const Record &OperandDef = *Info.Rec;
+
+ errs() << "\t" << OperandName << " (" << OperandDef.getName() << ")\n";
+ }
+ });
+#endif
+
+ return true;
+}
+
+void FixedLenDecoderEmitter::populateInstructions() {
+ for (unsigned i = 0, e = NumberedInstructions.size(); i < e; ++i) {
+ Record *R = NumberedInstructions[i]->TheDef;
+ if (R->getValueAsString("Namespace") == "TargetOpcode")
+ continue;
+
+ if (populateInstruction(*NumberedInstructions[i], i))
+ Opcodes.push_back(i);
+ }
+}
+
+// Emits disassembler code for instruction decoding.
+void FixedLenDecoderEmitter::run(raw_ostream &o)
+{
+ o << "#include \"llvm/MC/MCInst.h\"\n";
+ o << "#include \"llvm/Support/DataTypes.h\"\n";
+ o << "#include <assert.h>\n";
+ o << '\n';
+ o << "namespace llvm {\n\n";
+
+ NumberedInstructions = Target.getInstructionsByEnumValue();
+ populateInstructions();
+ FilterChooser FC(NumberedInstructions, Opcodes, Operands);
+ FC.emitTop(o, 0);
+
+ o << "\n} // End llvm namespace \n";
+}
diff --git a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h
new file mode 100644
index 0000000..d46a495
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h
@@ -0,0 +1,56 @@
+//===------------ FixedLenDecoderEmitter.h - Decoder Generator --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// It contains the tablegen backend that emits the decoder functions for
+// targets with fixed length instruction set.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FixedLenDECODEREMITTER_H
+#define FixedLenDECODEREMITTER_H
+
+#include "CodeGenTarget.h"
+#include "TableGenBackend.h"
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+struct OperandInfo {
+ unsigned FieldBase;
+ unsigned FieldLength;
+ std::string Decoder;
+
+ OperandInfo(unsigned FB, unsigned FL, std::string D)
+ : FieldBase(FB), FieldLength(FL), Decoder(D) { }
+};
+
+class FixedLenDecoderEmitter : public TableGenBackend {
+public:
+ FixedLenDecoderEmitter(RecordKeeper &R) :
+ Records(R), Target(R),
+ NumberedInstructions(Target.getInstructionsByEnumValue()) {}
+
+ // run - Output the code emitter
+ void run(raw_ostream &o);
+
+private:
+ RecordKeeper &Records;
+ CodeGenTarget Target;
+ std::vector<const CodeGenInstruction*> NumberedInstructions;
+ std::vector<unsigned> Opcodes;
+ std::map<unsigned, std::vector<OperandInfo> > Operands;
+
+ bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc);
+ void populateInstructions();
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/utils/TableGen/InstrEnumEmitter.cpp b/contrib/llvm/utils/TableGen/InstrEnumEmitter.cpp
index 47a8474..aa59689 100644
--- a/contrib/llvm/utils/TableGen/InstrEnumEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/InstrEnumEmitter.cpp
@@ -23,7 +23,7 @@ void InstrEnumEmitter::run(raw_ostream &OS) {
EmitSourceFileHeader("Target Instruction Enum Values", OS);
OS << "namespace llvm {\n\n";
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
// We must emit the PHI opcode first...
std::string Namespace = Target.getInstNamespace();
diff --git a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
index 4d3aa5e..2b684be 100644
--- a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -60,23 +60,23 @@ std::vector<std::string>
InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
std::vector<std::string> Result;
- for (unsigned i = 0, e = Inst.OperandList.size(); i != e; ++i) {
+ for (unsigned i = 0, e = Inst.Operands.size(); i != e; ++i) {
// Handle aggregate operands and normal operands the same way by expanding
// either case into a list of operands for this op.
- std::vector<CodeGenInstruction::OperandInfo> OperandList;
+ std::vector<CGIOperandList::OperandInfo> OperandList;
// This might be a multiple operand thing. Targets like X86 have
// registers in their multi-operand operands. It may also be an anonymous
// operand, which has a single operand, but no declared class for the
// operand.
- DagInit *MIOI = Inst.OperandList[i].MIOperandInfo;
+ DagInit *MIOI = Inst.Operands[i].MIOperandInfo;
if (!MIOI || MIOI->getNumArgs() == 0) {
// Single, anonymous, operand.
- OperandList.push_back(Inst.OperandList[i]);
+ OperandList.push_back(Inst.Operands[i]);
} else {
- for (unsigned j = 0, e = Inst.OperandList[i].MINumOperands; j != e; ++j) {
- OperandList.push_back(Inst.OperandList[i]);
+ for (unsigned j = 0, e = Inst.Operands[i].MINumOperands; j != e; ++j) {
+ OperandList.push_back(Inst.Operands[i]);
Record *OpR = dynamic_cast<DefInit*>(MIOI->getArg(j))->getDef();
OperandList.back().Rec = OpR;
@@ -104,19 +104,19 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
// Predicate operands. Check to see if the original unexpanded operand
// was of type PredicateOperand.
- if (Inst.OperandList[i].Rec->isSubClassOf("PredicateOperand"))
+ if (Inst.Operands[i].Rec->isSubClassOf("PredicateOperand"))
Res += "|(1<<TOI::Predicate)";
// Optional def operands. Check to see if the original unexpanded operand
// was of type OptionalDefOperand.
- if (Inst.OperandList[i].Rec->isSubClassOf("OptionalDefOperand"))
+ if (Inst.Operands[i].Rec->isSubClassOf("OptionalDefOperand"))
Res += "|(1<<TOI::OptionalDef)";
// Fill in constraint info.
Res += ", ";
- const CodeGenInstruction::ConstraintInfo &Constraint =
- Inst.OperandList[i].Constraints[j];
+ const CGIOperandList::ConstraintInfo &Constraint =
+ Inst.Operands[i].Constraints[j];
if (Constraint.isNone())
Res += "0";
else if (Constraint.isEarlyClobber())
@@ -256,14 +256,14 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
const OperandInfoMapTy &OpInfo,
raw_ostream &OS) {
int MinOperands = 0;
- if (!Inst.OperandList.empty())
+ if (!Inst.Operands.size() == 0)
// Each logical operand can be multiple MI operands.
- MinOperands = Inst.OperandList.back().MIOperandNo +
- Inst.OperandList.back().MINumOperands;
+ MinOperands = Inst.Operands.back().MIOperandNo +
+ Inst.Operands.back().MINumOperands;
OS << " { ";
OS << Num << ",\t" << MinOperands << ",\t"
- << Inst.NumDefs << ",\t" << getItinClassNumber(Inst.TheDef)
+ << Inst.Operands.NumDefs << ",\t" << getItinClassNumber(Inst.TheDef)
<< ",\t\"" << Inst.TheDef->getName() << "\", 0";
// Emit all of the target indepedent flags...
@@ -271,6 +271,7 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
if (Inst.isBranch) OS << "|(1<<TID::Branch)";
if (Inst.isIndirectBranch) OS << "|(1<<TID::IndirectBranch)";
if (Inst.isCompare) OS << "|(1<<TID::Compare)";
+ if (Inst.isMoveImm) OS << "|(1<<TID::MoveImm)";
if (Inst.isBarrier) OS << "|(1<<TID::Barrier)";
if (Inst.hasDelaySlot) OS << "|(1<<TID::DelaySlot)";
if (Inst.isCall) OS << "|(1<<TID::Call)";
@@ -283,9 +284,9 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
if (Inst.isTerminator) OS << "|(1<<TID::Terminator)";
if (Inst.isReMaterializable) OS << "|(1<<TID::Rematerializable)";
if (Inst.isNotDuplicable) OS << "|(1<<TID::NotDuplicable)";
- if (Inst.hasOptionalDef) OS << "|(1<<TID::HasOptionalDef)";
+ if (Inst.Operands.hasOptionalDef) OS << "|(1<<TID::HasOptionalDef)";
if (Inst.usesCustomInserter) OS << "|(1<<TID::UsesCustomInserter)";
- if (Inst.isVariadic) OS << "|(1<<TID::Variadic)";
+ if (Inst.Operands.isVariadic)OS << "|(1<<TID::Variadic)";
if (Inst.hasSideEffects) OS << "|(1<<TID::UnmodeledSideEffects)";
if (Inst.isAsCheapAsAMove) OS << "|(1<<TID::CheapAsAMove)";
if (Inst.hasExtraSrcRegAllocReq) OS << "|(1<<TID::ExtraSrcRegAllocReq)";
diff --git a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
index ba30d97..08f6728 100644
--- a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -14,6 +14,7 @@
#include "CodeGenTarget.h"
#include "IntrinsicEmitter.h"
#include "Record.h"
+#include "StringMatcher.h"
#include "llvm/ADT/StringExtras.h"
#include <algorithm>
using namespace llvm;
@@ -67,16 +68,19 @@ void IntrinsicEmitter::run(raw_ostream &OS) {
void IntrinsicEmitter::EmitPrefix(raw_ostream &OS) {
OS << "// VisualStudio defines setjmp as _setjmp\n"
- "#if defined(_MSC_VER) && defined(setjmp)\n"
- "#define setjmp_undefined_for_visual_studio\n"
- "#undef setjmp\n"
+ "#if defined(_MSC_VER) && defined(setjmp) && \\\n"
+ " !defined(setjmp_undefined_for_msvc)\n"
+ "# pragma push_macro(\"setjmp\")\n"
+ "# undef setjmp\n"
+ "# define setjmp_undefined_for_msvc\n"
"#endif\n\n";
}
void IntrinsicEmitter::EmitSuffix(raw_ostream &OS) {
- OS << "#if defined(_MSC_VER) && defined(setjmp_undefined_for_visual_studio)\n"
+ OS << "#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)\n"
"// let's return it to _setjmp state\n"
- "#define setjmp _setjmp\n"
+ "# pragma pop_macro(\"setjmp\")\n"
+ "# undef setjmp_undefined_for_msvc\n"
"#endif\n\n";
}
@@ -96,37 +100,48 @@ void IntrinsicEmitter::EmitEnumInfo(const std::vector<CodeGenIntrinsic> &Ints,
void IntrinsicEmitter::
EmitFnNameRecognizer(const std::vector<CodeGenIntrinsic> &Ints,
raw_ostream &OS) {
- // Build a function name -> intrinsic name mapping.
- std::map<std::string, unsigned> IntMapping;
+ // Build a 'first character of function name' -> intrinsic # mapping.
+ std::map<char, std::vector<unsigned> > IntMapping;
for (unsigned i = 0, e = Ints.size(); i != e; ++i)
- IntMapping[Ints[i].Name] = i;
-
+ IntMapping[Ints[i].Name[5]].push_back(i);
+
OS << "// Function name -> enum value recognizer code.\n";
OS << "#ifdef GET_FUNCTION_RECOGNIZER\n";
- OS << " switch (Name[5]) {\n";
- OS << " default:\n";
- // Emit the intrinsics in sorted order.
- char LastChar = 0;
- for (std::map<std::string, unsigned>::iterator I = IntMapping.begin(),
+ OS << " StringRef NameR(Name+6, Len-6); // Skip over 'llvm.'\n";
+ OS << " switch (Name[5]) { // Dispatch on first letter.\n";
+ OS << " default: break;\n";
+ // Emit the intrinsic matching stuff by first letter.
+ for (std::map<char, std::vector<unsigned> >::iterator I = IntMapping.begin(),
E = IntMapping.end(); I != E; ++I) {
- if (I->first[5] != LastChar) {
- LastChar = I->first[5];
- OS << " break;\n";
- OS << " case '" << LastChar << "':\n";
+ OS << " case '" << I->first << "':\n";
+ std::vector<unsigned> &IntList = I->second;
+
+ // Emit all the overloaded intrinsics first, build a table of the
+ // non-overloaded ones.
+ std::vector<StringMatcher::StringPair> MatchTable;
+
+ for (unsigned i = 0, e = IntList.size(); i != e; ++i) {
+ unsigned IntNo = IntList[i];
+ std::string Result = "return " + TargetPrefix + "Intrinsic::" +
+ Ints[IntNo].EnumName + ";";
+
+ if (!Ints[IntNo].isOverloaded) {
+ MatchTable.push_back(std::make_pair(Ints[IntNo].Name.substr(6),Result));
+ continue;
+ }
+
+ // For overloaded intrinsics, only the prefix needs to match
+ std::string TheStr = Ints[IntNo].Name.substr(6);
+ TheStr += '.'; // Require "bswap." instead of bswap.
+ OS << " if (NameR.startswith(\"" << TheStr << "\")) "
+ << Result << '\n';
}
- // For overloaded intrinsics, only the prefix needs to match
- if (Ints[I->second].isOverloaded)
- OS << " if (Len > " << I->first.size()
- << " && !memcmp(Name, \"" << I->first << ".\", "
- << (I->first.size() + 1) << ")) return " << TargetPrefix << "Intrinsic::"
- << Ints[I->second].EnumName << ";\n";
- else
- OS << " if (Len == " << I->first.size()
- << " && !memcmp(Name, \"" << I->first << "\", "
- << I->first.size() << ")) return " << TargetPrefix << "Intrinsic::"
- << Ints[I->second].EnumName << ";\n";
+ // Emit the matcher logic for the fixed length strings.
+ StringMatcher("NameR", MatchTable, OS).Emit(1);
+ OS << " break; // end of '" << I->first << "' case.\n";
}
+
OS << " }\n";
OS << "#endif\n\n";
}
@@ -180,6 +195,8 @@ static void EmitTypeForValueType(raw_ostream &OS, MVT::SimpleValueType VT) {
OS << "Type::getVoidTy(Context)";
} else if (VT == MVT::Metadata) {
OS << "Type::getMetadataTy(Context)";
+ } else if (VT == MVT::x86mmx) {
+ OS << "Type::getX86_MMXTy(Context)";
} else {
assert(false && "Unsupported ValueType!");
}
@@ -556,11 +573,13 @@ EmitModRefBehavior(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS){
OS << " return DoesNotAccessMemory;\n";
break;
case CodeGenIntrinsic::ReadArgMem:
+ OS << " return OnlyReadsArgumentPointees;\n";
+ break;
case CodeGenIntrinsic::ReadMem:
OS << " return OnlyReadsMemory;\n";
break;
case CodeGenIntrinsic::ReadWriteArgMem:
- OS << " return AccessesArguments;\n";
+ OS << " return OnlyAccessesArgumentPointees;\n";
break;
}
}
@@ -584,112 +603,22 @@ EmitGCCBuiltinList(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS){
OS << "#endif\n\n";
}
-/// EmitBuiltinComparisons - Emit comparisons to determine whether the specified
-/// sorted range of builtin names is equal to the current builtin. This breaks
-/// it down into a simple tree.
-///
-/// At this point, we know that all the builtins in the range have the same name
-/// for the first 'CharStart' characters. Only the end of the name needs to be
-/// discriminated.
-typedef std::map<std::string, std::string>::const_iterator StrMapIterator;
-static void EmitBuiltinComparisons(StrMapIterator Start, StrMapIterator End,
- unsigned CharStart, unsigned Indent,
- std::string TargetPrefix, raw_ostream &OS) {
- if (Start == End) return; // empty range.
-
- // Determine what, if anything, is the same about all these strings.
- std::string CommonString = Start->first;
- unsigned NumInRange = 0;
- for (StrMapIterator I = Start; I != End; ++I, ++NumInRange) {
- // Find the first character that doesn't match.
- const std::string &ThisStr = I->first;
- unsigned NonMatchChar = CharStart;
- while (NonMatchChar < CommonString.size() &&
- NonMatchChar < ThisStr.size() &&
- CommonString[NonMatchChar] == ThisStr[NonMatchChar])
- ++NonMatchChar;
- // Truncate off pieces that don't match.
- CommonString.resize(NonMatchChar);
- }
-
- // Just compare the rest of the string.
- if (NumInRange == 1) {
- if (CharStart != CommonString.size()) {
- OS << std::string(Indent*2, ' ') << "if (!memcmp(BuiltinName";
- if (CharStart) OS << "+" << CharStart;
- OS << ", \"" << (CommonString.c_str()+CharStart) << "\", ";
- OS << CommonString.size() - CharStart << "))\n";
- ++Indent;
- }
- OS << std::string(Indent*2, ' ') << "IntrinsicID = " << TargetPrefix
- << "Intrinsic::";
- OS << Start->second << ";\n";
- return;
- }
-
- // At this point, we potentially have a common prefix for these builtins, emit
- // a check for this common prefix.
- if (CommonString.size() != CharStart) {
- OS << std::string(Indent*2, ' ') << "if (!memcmp(BuiltinName";
- if (CharStart) OS << "+" << CharStart;
- OS << ", \"" << (CommonString.c_str()+CharStart) << "\", ";
- OS << CommonString.size()-CharStart << ")) {\n";
-
- EmitBuiltinComparisons(Start, End, CommonString.size(), Indent+1,
- TargetPrefix, OS);
- OS << std::string(Indent*2, ' ') << "}\n";
- return;
- }
-
- // Output a switch on the character that differs across the set.
- OS << std::string(Indent*2, ' ') << "switch (BuiltinName[" << CharStart
- << "]) {";
- if (CharStart)
- OS << " // \"" << std::string(Start->first.begin(),
- Start->first.begin()+CharStart) << "\"";
- OS << "\n";
-
- for (StrMapIterator I = Start; I != End; ) {
- char ThisChar = I->first[CharStart];
- OS << std::string(Indent*2, ' ') << "case '" << ThisChar << "':\n";
- // Figure out the range that has this common character.
- StrMapIterator NextChar = I;
- for (++NextChar; NextChar != End && NextChar->first[CharStart] == ThisChar;
- ++NextChar)
- /*empty*/;
- EmitBuiltinComparisons(I, NextChar, CharStart+1, Indent+1, TargetPrefix,OS);
- OS << std::string(Indent*2, ' ') << " break;\n";
- I = NextChar;
- }
- OS << std::string(Indent*2, ' ') << "}\n";
-}
-
/// EmitTargetBuiltins - All of the builtins in the specified map are for the
/// same target, and we already checked it.
static void EmitTargetBuiltins(const std::map<std::string, std::string> &BIM,
const std::string &TargetPrefix,
raw_ostream &OS) {
- // Rearrange the builtins by length.
- std::vector<std::map<std::string, std::string> > BuiltinsByLen;
- BuiltinsByLen.reserve(100);
- for (StrMapIterator I = BIM.begin(), E = BIM.end(); I != E; ++I) {
- if (I->first.size() >= BuiltinsByLen.size())
- BuiltinsByLen.resize(I->first.size()+1);
- BuiltinsByLen[I->first.size()].insert(*I);
- }
+ std::vector<StringMatcher::StringPair> Results;
- // Now that we have all the builtins by their length, emit a switch stmt.
- OS << " switch (strlen(BuiltinName)) {\n";
- OS << " default: break;\n";
- for (unsigned i = 0, e = BuiltinsByLen.size(); i != e; ++i) {
- if (BuiltinsByLen[i].empty()) continue;
- OS << " case " << i << ":\n";
- EmitBuiltinComparisons(BuiltinsByLen[i].begin(), BuiltinsByLen[i].end(),
- 0, 3, TargetPrefix, OS);
- OS << " break;\n";
+ for (std::map<std::string, std::string>::const_iterator I = BIM.begin(),
+ E = BIM.end(); I != E; ++I) {
+ std::string ResultCode =
+ "return " + TargetPrefix + "Intrinsic::" + I->second + ";";
+ Results.push_back(StringMatcher::StringPair(I->first, ResultCode));
}
- OS << " }\n";
+
+ StringMatcher("BuiltinName", Results, OS).Emit();
}
@@ -719,24 +648,20 @@ EmitIntrinsicToGCCBuiltinMap(const std::vector<CodeGenIntrinsic> &Ints,
if (TargetOnly) {
OS << "static " << TargetPrefix << "Intrinsic::ID "
<< "getIntrinsicForGCCBuiltin(const char "
- << "*TargetPrefix, const char *BuiltinName) {\n";
- OS << " " << TargetPrefix << "Intrinsic::ID IntrinsicID = ";
+ << "*TargetPrefixStr, const char *BuiltinNameStr) {\n";
} else {
OS << "Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char "
- << "*TargetPrefix, const char *BuiltinName) {\n";
- OS << " Intrinsic::ID IntrinsicID = ";
+ << "*TargetPrefixStr, const char *BuiltinNameStr) {\n";
}
- if (TargetOnly)
- OS << "(" << TargetPrefix<< "Intrinsic::ID)";
-
- OS << "Intrinsic::not_intrinsic;\n";
+ OS << " StringRef BuiltinName(BuiltinNameStr);\n";
+ OS << " StringRef TargetPrefix(TargetPrefixStr);\n\n";
// Note: this could emit significantly better code if we cared.
for (BIMTy::iterator I = BuiltinMap.begin(), E = BuiltinMap.end();I != E;++I){
OS << " ";
if (!I->first.empty())
- OS << "if (!strcmp(TargetPrefix, \"" << I->first << "\")) ";
+ OS << "if (TargetPrefix == \"" << I->first << "\") ";
else
OS << "/* Target Independent Builtins */ ";
OS << "{\n";
@@ -745,7 +670,10 @@ EmitIntrinsicToGCCBuiltinMap(const std::vector<CodeGenIntrinsic> &Ints,
EmitTargetBuiltins(I->second, TargetPrefix, OS);
OS << " }\n";
}
- OS << " return IntrinsicID;\n";
+ OS << " return ";
+ if (!TargetPrefix.empty())
+ OS << "(" << TargetPrefix << "Intrinsic::ID)";
+ OS << "Intrinsic::not_intrinsic;\n";
OS << "}\n";
OS << "#endif\n\n";
}
diff --git a/contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp b/contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp
index 8b81e14..c40a39d 100644
--- a/contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp
@@ -25,6 +25,7 @@
#include <string>
#include <typeinfo>
+
using namespace llvm;
namespace {
@@ -164,18 +165,6 @@ void CheckedIncrement(I& P, I E, S ErrorString) {
throw ErrorString;
}
-// apply is needed because C++'s syntax doesn't let us construct a function
-// object and call it in the same statement.
-template<typename F, typename T0>
-void apply(F Fun, T0& Arg0) {
- return Fun(Arg0);
-}
-
-template<typename F, typename T0, typename T1>
-void apply(F Fun, T0& Arg0, T1& Arg1) {
- return Fun(Arg0, Arg1);
-}
-
//===----------------------------------------------------------------------===//
/// Back-end specific code
@@ -779,14 +768,21 @@ public:
CheckNumberOfArguments(d, 2);
+ // Alias option store the aliased option name in the 'Help' field and do not
+ // have any properties.
if (OD.isAlias()) {
- // Aliases store the aliased option name in the 'Help' field.
OD.Help = InitPtrToString(d.getArg(1));
}
else {
processOptionProperties(d, OD);
}
+ // Switch options are ZeroOrMore by default.
+ if (OD.isSwitch()) {
+ if (!(OD.isOptional() || OD.isOneOrMore() || OD.isRequired()))
+ OD.setZeroOrMore();
+ }
+
OptDescs_.InsertDescription(OD);
}
@@ -809,13 +805,12 @@ void CollectOptionDescriptions (const RecordVector& V,
OptionDescriptions& OptDescs)
{
// For every OptionList:
- for (RecordVector::const_iterator B = V.begin(),
- E = V.end(); B!=E; ++B) {
+ for (RecordVector::const_iterator B = V.begin(), E = V.end(); B!=E; ++B)
+ {
// Throws an exception if the value does not exist.
ListInit* PropList = (*B)->getValueAsListInit("options");
- // For every option description in this list:
- // collect the information and
+ // For every option description in this list: invoke AddOption.
std::for_each(PropList->begin(), PropList->end(), AddOption(OptDescs));
}
}
@@ -833,7 +828,7 @@ struct ToolDescription : public RefCountedBase<ToolDescription> {
StrVector InLanguage;
std::string InFileOption;
std::string OutFileOption;
- std::string OutLanguage;
+ StrVector OutLanguage;
std::string OutputSuffix;
unsigned Flags;
const Init* OnEmpty;
@@ -919,31 +914,24 @@ private:
toolDesc_.CmdLine = d.getArg(0);
}
- void onInLanguage (const DagInit& d) {
+ /// onInOutLanguage - Common implementation of on{In,Out}Language().
+ void onInOutLanguage (const DagInit& d, StrVector& OutVec) {
CheckNumberOfArguments(d, 1);
- Init* arg = d.getArg(0);
- // Find out the argument's type.
- if (typeid(*arg) == typeid(StringInit)) {
- // It's a string.
- toolDesc_.InLanguage.push_back(InitPtrToString(arg));
+ // Copy strings to the output vector.
+ for (unsigned i = 0, NumArgs = d.getNumArgs(); i < NumArgs; ++i) {
+ OutVec.push_back(InitPtrToString(d.getArg(i)));
}
- else {
- // It's a list.
- const ListInit& lst = InitPtrToList(arg);
- StrVector& out = toolDesc_.InLanguage;
- // Copy strings to the output vector.
- for (ListInit::const_iterator B = lst.begin(), E = lst.end();
- B != E; ++B) {
- out.push_back(InitPtrToString(*B));
- }
+ // Remove duplicates.
+ std::sort(OutVec.begin(), OutVec.end());
+ StrVector::iterator newE = std::unique(OutVec.begin(), OutVec.end());
+ OutVec.erase(newE, OutVec.end());
+ }
- // Remove duplicates.
- std::sort(out.begin(), out.end());
- StrVector::iterator newE = std::unique(out.begin(), out.end());
- out.erase(newE, out.end());
- }
+
+ void onInLanguage (const DagInit& d) {
+ this->onInOutLanguage(d, toolDesc_.InLanguage);
}
void onJoin (const DagInit& d) {
@@ -952,8 +940,7 @@ private:
}
void onOutLanguage (const DagInit& d) {
- CheckNumberOfArguments(d, 1);
- toolDesc_.OutLanguage = InitPtrToString(d.getArg(0));
+ this->onInOutLanguage(d, toolDesc_.OutLanguage);
}
void onOutFileOption (const DagInit& d) {
@@ -1062,47 +1049,62 @@ void FilterNotInGraph (const DagVector& EdgeVector,
}
/// FillInToolToLang - Fills in two tables that map tool names to
-/// (input, output) languages. Helper function used by TypecheckGraph().
+/// input & output language names. Helper function used by TypecheckGraph().
void FillInToolToLang (const ToolDescriptions& ToolDescs,
StringMap<StringSet<> >& ToolToInLang,
- StringMap<std::string>& ToolToOutLang) {
+ StringMap<StringSet<> >& ToolToOutLang) {
for (ToolDescriptions::const_iterator B = ToolDescs.begin(),
E = ToolDescs.end(); B != E; ++B) {
const ToolDescription& D = *(*B);
for (StrVector::const_iterator B = D.InLanguage.begin(),
E = D.InLanguage.end(); B != E; ++B)
ToolToInLang[D.Name].insert(*B);
- ToolToOutLang[D.Name] = D.OutLanguage;
+ for (StrVector::const_iterator B = D.OutLanguage.begin(),
+ E = D.OutLanguage.end(); B != E; ++B)
+ ToolToOutLang[D.Name].insert(*B);
}
}
+/// Intersect - Is set intersection non-empty?
+bool Intersect (const StringSet<>& S1, const StringSet<>& S2) {
+ for (StringSet<>::const_iterator B = S1.begin(), E = S1.end(); B != E; ++B) {
+ if (S2.count(B->first()) != 0)
+ return true;
+ }
+ return false;
+}
+
/// TypecheckGraph - Check that names for output and input languages
/// on all edges do match.
void TypecheckGraph (const DagVector& EdgeVector,
const ToolDescriptions& ToolDescs) {
StringMap<StringSet<> > ToolToInLang;
- StringMap<std::string> ToolToOutLang;
+ StringMap<StringSet<> > ToolToOutLang;
FillInToolToLang(ToolDescs, ToolToInLang, ToolToOutLang);
- StringMap<std::string>::iterator IAE = ToolToOutLang.end();
- StringMap<StringSet<> >::iterator IBE = ToolToInLang.end();
for (DagVector::const_iterator B = EdgeVector.begin(),
E = EdgeVector.end(); B != E; ++B) {
const DagInit* Edge = *B;
const std::string& NodeA = InitPtrToString(Edge->getArg(0));
const std::string& NodeB = InitPtrToString(Edge->getArg(1));
- StringMap<std::string>::iterator IA = ToolToOutLang.find(NodeA);
+ StringMap<StringSet<> >::iterator IA = ToolToOutLang.find(NodeA);
StringMap<StringSet<> >::iterator IB = ToolToInLang.find(NodeB);
+ if (NodeB == "root")
+ throw "Edges back to the root are not allowed!";
+
if (NodeA != "root") {
- if (IA != IAE && IB != IBE && IB->second.count(IA->second) == 0)
+ if (IA == ToolToOutLang.end())
+ throw NodeA + ": no output language defined!";
+ if (IB == ToolToInLang.end())
+ throw NodeB + ": no input language defined!";
+
+ if (!Intersect(IA->second, IB->second)) {
throw "Edge " + NodeA + "->" + NodeB
+ ": output->input language mismatch";
+ }
}
-
- if (NodeB == "root")
- throw "Edges back to the root are not allowed!";
}
}
@@ -1178,25 +1180,20 @@ class ExtractOptionNames {
if (ActionName == "forward" || ActionName == "forward_as" ||
ActionName == "forward_value" ||
ActionName == "forward_transformed_value" ||
- ActionName == "switch_on" || ActionName == "any_switch_on" ||
- ActionName == "parameter_equals" ||
- ActionName == "element_in_list" || ActionName == "not_empty" ||
- ActionName == "empty") {
+ ActionName == "parameter_equals" || ActionName == "element_in_list") {
CheckNumberOfArguments(Stmt, 1);
Init* Arg = Stmt.getArg(0);
- if (typeid(*Arg) == typeid(StringInit)) {
- const std::string& Name = InitPtrToString(Arg);
- OptionNames_.insert(Name);
- }
- else {
- // It's a list.
- const ListInit& List = InitPtrToList(Arg);
- for (ListInit::const_iterator B = List.begin(), E = List.end();
- B != E; ++B) {
- const std::string& Name = InitPtrToString(*B);
- OptionNames_.insert(Name);
- }
+ if (typeid(*Arg) == typeid(StringInit))
+ OptionNames_.insert(InitPtrToString(Arg));
+ }
+ else if (ActionName == "any_switch_on" || ActionName == "switch_on" ||
+ ActionName == "any_not_empty" || ActionName == "any_empty" ||
+ ActionName == "not_empty" || ActionName == "empty") {
+ for (unsigned i = 0, NumArgs = Stmt.getNumArgs(); i < NumArgs; ++i) {
+ Init* Arg = Stmt.getArg(i);
+ if (typeid(*Arg) == typeid(StringInit))
+ OptionNames_.insert(InitPtrToString(Arg));
}
}
else if (ActionName == "and" || ActionName == "or" || ActionName == "not") {
@@ -1211,6 +1208,7 @@ public:
{}
void operator()(const Init* Statement) {
+ // Statement is either a dag, or a list of dags.
if (typeid(*Statement) == typeid(ListInit)) {
const ListInit& DagList = *static_cast<const ListInit*>(Statement);
for (ListInit::const_iterator B = DagList.begin(), E = DagList.end();
@@ -1291,24 +1289,20 @@ bool EmitCaseTest0Args(const std::string& TestName, raw_ostream& O) {
return false;
}
-/// EmitListTest - Helper function used by EmitCaseTest1ArgList().
+/// EmitMultipleArgumentTest - Helper function used by
+/// EmitCaseTestMultipleArgs()
template <typename F>
-void EmitListTest(const ListInit& L, const char* LogicOp,
- F Callback, raw_ostream& O)
+void EmitMultipleArgumentTest(const DagInit& D, const char* LogicOp,
+ F Callback, raw_ostream& O)
{
- // This is a lot like EmitLogicalOperationTest, but works on ListInits instead
- // of Dags...
- bool isFirst = true;
- for (ListInit::const_iterator B = L.begin(), E = L.end(); B != E; ++B) {
- if (isFirst)
- isFirst = false;
- else
- O << ' ' << LogicOp << ' ';
- Callback(InitPtrToString(*B), O);
+ for (unsigned i = 0, NumArgs = D.getNumArgs(); i < NumArgs; ++i) {
+ if (i != 0)
+ O << ' ' << LogicOp << ' ';
+ Callback(InitPtrToString(D.getArg(i)), O);
}
}
-// Callbacks for use with EmitListTest.
+// Callbacks for use with EmitMultipleArgumentTest
class EmitSwitchOn {
const OptionDescriptions& OptDescs_;
@@ -1346,54 +1340,48 @@ public:
};
-/// EmitCaseTest1ArgList - Helper function used by EmitCaseTest1Arg();
-bool EmitCaseTest1ArgList(const std::string& TestName,
- const DagInit& d,
- const OptionDescriptions& OptDescs,
- raw_ostream& O) {
- const ListInit& L = InitPtrToList(d.getArg(0));
-
+/// EmitCaseTestMultipleArgs - Helper function used by EmitCaseTest1Arg()
+bool EmitCaseTestMultipleArgs (const std::string& TestName,
+ const DagInit& d,
+ const OptionDescriptions& OptDescs,
+ raw_ostream& O) {
if (TestName == "any_switch_on") {
- EmitListTest(L, "||", EmitSwitchOn(OptDescs), O);
+ EmitMultipleArgumentTest(d, "||", EmitSwitchOn(OptDescs), O);
return true;
}
else if (TestName == "switch_on") {
- EmitListTest(L, "&&", EmitSwitchOn(OptDescs), O);
+ EmitMultipleArgumentTest(d, "&&", EmitSwitchOn(OptDescs), O);
return true;
}
else if (TestName == "any_not_empty") {
- EmitListTest(L, "||", EmitEmptyTest(true, OptDescs), O);
+ EmitMultipleArgumentTest(d, "||", EmitEmptyTest(true, OptDescs), O);
return true;
}
else if (TestName == "any_empty") {
- EmitListTest(L, "||", EmitEmptyTest(false, OptDescs), O);
+ EmitMultipleArgumentTest(d, "||", EmitEmptyTest(false, OptDescs), O);
return true;
}
else if (TestName == "not_empty") {
- EmitListTest(L, "&&", EmitEmptyTest(true, OptDescs), O);
+ EmitMultipleArgumentTest(d, "&&", EmitEmptyTest(true, OptDescs), O);
return true;
}
else if (TestName == "empty") {
- EmitListTest(L, "&&", EmitEmptyTest(false, OptDescs), O);
+ EmitMultipleArgumentTest(d, "&&", EmitEmptyTest(false, OptDescs), O);
return true;
}
return false;
}
-/// EmitCaseTest1ArgStr - Helper function used by EmitCaseTest1Arg();
-bool EmitCaseTest1ArgStr(const std::string& TestName,
- const DagInit& d,
- const OptionDescriptions& OptDescs,
- raw_ostream& O) {
- const std::string& OptName = InitPtrToString(d.getArg(0));
+/// EmitCaseTest1Arg - Helper function used by EmitCaseTest1OrMoreArgs()
+bool EmitCaseTest1Arg (const std::string& TestName,
+ const DagInit& d,
+ const OptionDescriptions& OptDescs,
+ raw_ostream& O) {
+ const std::string& Arg = InitPtrToString(d.getArg(0));
- if (TestName == "switch_on") {
- apply(EmitSwitchOn(OptDescs), OptName, O);
- return true;
- }
- else if (TestName == "input_languages_contain") {
- O << "InLangs.count(\"" << OptName << "\") != 0";
+ if (TestName == "input_languages_contain") {
+ O << "InLangs.count(\"" << Arg << "\") != 0";
return true;
}
else if (TestName == "in_language") {
@@ -1401,28 +1389,22 @@ bool EmitCaseTest1ArgStr(const std::string& TestName,
// tools can process several files in different languages simultaneously.
// TODO: make this work with Edge::Weight (if possible).
- O << "LangMap.GetLanguage(inFile) == \"" << OptName << '\"';
- return true;
- }
- else if (TestName == "not_empty" || TestName == "empty") {
- bool EmitNegate = (TestName == "not_empty");
- apply(EmitEmptyTest(EmitNegate, OptDescs), OptName, O);
+ O << "LangMap.GetLanguage(inFile) == \"" << Arg << '\"';
return true;
}
return false;
}
-/// EmitCaseTest1Arg - Helper function used by EmitCaseConstructHandler();
-bool EmitCaseTest1Arg(const std::string& TestName,
- const DagInit& d,
- const OptionDescriptions& OptDescs,
- raw_ostream& O) {
+/// EmitCaseTest1OrMoreArgs - Helper function used by
+/// EmitCaseConstructHandler()
+bool EmitCaseTest1OrMoreArgs(const std::string& TestName,
+ const DagInit& d,
+ const OptionDescriptions& OptDescs,
+ raw_ostream& O) {
CheckNumberOfArguments(d, 1);
- if (typeid(*d.getArg(0)) == typeid(ListInit))
- return EmitCaseTest1ArgList(TestName, d, OptDescs, O);
- else
- return EmitCaseTest1ArgStr(TestName, d, OptDescs, O);
+ return EmitCaseTest1Arg(TestName, d, OptDescs, O) ||
+ EmitCaseTestMultipleArgs(TestName, d, OptDescs, O);
}
/// EmitCaseTest2Args - Helper function used by EmitCaseConstructHandler().
@@ -1466,10 +1448,10 @@ void EmitLogicalOperationTest(const DagInit& d, const char* LogicOp,
const OptionDescriptions& OptDescs,
raw_ostream& O) {
O << '(';
- for (unsigned j = 0, NumArgs = d.getNumArgs(); j < NumArgs; ++j) {
- const DagInit& InnerTest = InitPtrToDag(d.getArg(j));
+ for (unsigned i = 0, NumArgs = d.getNumArgs(); i < NumArgs; ++i) {
+ const DagInit& InnerTest = InitPtrToDag(d.getArg(i));
EmitCaseTest(InnerTest, IndentLevel, OptDescs, O);
- if (j != NumArgs - 1) {
+ if (i != NumArgs - 1) {
O << ")\n";
O.indent(IndentLevel + Indent1) << ' ' << LogicOp << " (";
}
@@ -1503,7 +1485,7 @@ void EmitCaseTest(const DagInit& d, unsigned IndentLevel,
EmitLogicalNot(d, IndentLevel, OptDescs, O);
else if (EmitCaseTest0Args(TestName, O))
return;
- else if (EmitCaseTest1Arg(TestName, d, OptDescs, O))
+ else if (EmitCaseTest1OrMoreArgs(TestName, d, OptDescs, O))
return;
else if (EmitCaseTest2Args(TestName, d, IndentLevel, OptDescs, O))
return;
@@ -1550,10 +1532,12 @@ public:
{}
void operator() (const Init* Statement, unsigned IndentLevel) {
+ // Is this a nested 'case'?
+ bool IsCase = dynamic_cast<const DagInit*>(Statement) &&
+ GetOperatorName(static_cast<const DagInit&>(*Statement)) == "case";
- // Ignore nested 'case' DAG.
- if (!(dynamic_cast<const DagInit*>(Statement) &&
- GetOperatorName(static_cast<const DagInit&>(*Statement)) == "case")) {
+ // If so, ignore it, it is handled by our caller, WalkCase.
+ if (!IsCase) {
if (typeid(*Statement) == typeid(ListInit)) {
const ListInit& DagList = *static_cast<const ListInit*>(Statement);
for (ListInit::const_iterator B = DagList.begin(), E = DagList.end();
@@ -2250,11 +2234,8 @@ void EmitInOutLanguageMethods (const ToolDescription& D, raw_ostream& O) {
O.indent(Indent2) << "return InputLanguages_;\n";
O.indent(Indent1) << "}\n\n";
- if (D.OutLanguage.empty())
- throw "Tool " + D.Name + " has no 'out_language' property!";
-
- O.indent(Indent1) << "const char* OutputLanguage() const {\n";
- O.indent(Indent2) << "return \"" << D.OutLanguage << "\";\n";
+ O.indent(Indent1) << "const char** OutputLanguages() const {\n";
+ O.indent(Indent2) << "return OutputLanguages_;\n";
O.indent(Indent1) << "}\n\n";
}
@@ -2299,17 +2280,28 @@ void EmitWorksOnEmptyMethod (const ToolDescription& D,
O.indent(Indent1) << "}\n\n";
}
+/// EmitStrArray - Emit definition of a 'const char**' static member
+/// variable. Helper used by EmitStaticMemberDefinitions();
+void EmitStrArray(const std::string& Name, const std::string& VarName,
+ const StrVector& StrVec, raw_ostream& O) {
+ O << "const char* " << Name << "::" << VarName << "[] = {";
+ for (StrVector::const_iterator B = StrVec.begin(), E = StrVec.end();
+ B != E; ++B)
+ O << '\"' << *B << "\", ";
+ O << "0};\n";
+}
+
/// EmitStaticMemberDefinitions - Emit static member definitions for a
/// given Tool class.
void EmitStaticMemberDefinitions(const ToolDescription& D, raw_ostream& O) {
if (D.InLanguage.empty())
throw "Tool " + D.Name + " has no 'in_language' property!";
+ if (D.OutLanguage.empty())
+ throw "Tool " + D.Name + " has no 'out_language' property!";
- O << "const char* " << D.Name << "::InputLanguages_[] = {";
- for (StrVector::const_iterator B = D.InLanguage.begin(),
- E = D.InLanguage.end(); B != E; ++B)
- O << '\"' << *B << "\", ";
- O << "0};\n\n";
+ EmitStrArray(D.Name, "InputLanguages_", D.InLanguage, O);
+ EmitStrArray(D.Name, "OutputLanguages_", D.OutLanguage, O);
+ O << '\n';
}
/// EmitToolClassDefinition - Emit a Tool class definition.
@@ -2327,7 +2319,8 @@ void EmitToolClassDefinition (const ToolDescription& D,
O << "Tool";
O << " {\nprivate:\n";
- O.indent(Indent1) << "static const char* InputLanguages_[];\n\n";
+ O.indent(Indent1) << "static const char* InputLanguages_[];\n";
+ O.indent(Indent1) << "static const char* OutputLanguages_[];\n\n";
O << "public:\n";
EmitNameMethod(D, O);
@@ -2448,21 +2441,13 @@ class EmitPreprocessOptionsCallback :
const OptionDescriptions& OptDescs_;
- void onListOrDag(const DagInit& d, HandlerImpl h,
- unsigned IndentLevel, raw_ostream& O) const
+ void onEachArgument(const DagInit& d, HandlerImpl h,
+ unsigned IndentLevel, raw_ostream& O) const
{
CheckNumberOfArguments(d, 1);
- const Init* I = d.getArg(0);
- // If I is a list, apply h to each element.
- if (typeid(*I) == typeid(ListInit)) {
- const ListInit& L = *static_cast<const ListInit*>(I);
- for (ListInit::const_iterator B = L.begin(), E = L.end(); B != E; ++B)
- ((this)->*(h))(*B, IndentLevel, O);
- }
- // Otherwise, apply h to I.
- else {
- ((this)->*(h))(I, IndentLevel, O);
+ for (unsigned i = 0, NumArgs = d.getNumArgs(); i < NumArgs; ++i) {
+ ((this)->*(h))(d.getArg(i), IndentLevel, O);
}
}
@@ -2489,16 +2474,17 @@ class EmitPreprocessOptionsCallback :
void onUnsetOption(const DagInit& d,
unsigned IndentLevel, raw_ostream& O) const
{
- this->onListOrDag(d, &EmitPreprocessOptionsCallback::onUnsetOptionImpl,
- IndentLevel, O);
+ this->onEachArgument(d, &EmitPreprocessOptionsCallback::onUnsetOptionImpl,
+ IndentLevel, O);
}
- void onSetOptionImpl(const DagInit& d,
+ void onSetOptionImpl(const DagInit& D,
unsigned IndentLevel, raw_ostream& O) const {
- CheckNumberOfArguments(d, 2);
- const std::string& OptName = InitPtrToString(d.getArg(0));
- const Init* Value = d.getArg(1);
+ CheckNumberOfArguments(D, 2);
+
+ const std::string& OptName = InitPtrToString(D.getArg(0));
const OptionDescription& OptDesc = OptDescs_.FindOption(OptName);
+ const Init* Value = D.getArg(1);
if (OptDesc.isList()) {
const ListInit& List = InitPtrToList(Value);
@@ -2528,7 +2514,7 @@ class EmitPreprocessOptionsCallback :
<< " = \"" << Str << "\";\n";
}
else {
- throw "Can't apply 'set_option' to alias option -" + OptName + " !";
+ throw "Can't apply 'set_option' to alias option '" + OptName + "'!";
}
}
@@ -2548,15 +2534,22 @@ class EmitPreprocessOptionsCallback :
{
CheckNumberOfArguments(d, 1);
- // Two arguments: (set_option "parameter", VALUE), where VALUE can be a
- // boolean, a string or a string list.
- if (d.getNumArgs() > 1)
- this->onSetOptionImpl(d, IndentLevel, O);
- // One argument: (set_option "switch")
- // or (set_option ["switch1", "switch2", ...])
- else
- this->onListOrDag(d, &EmitPreprocessOptionsCallback::onSetSwitch,
- IndentLevel, O);
+ // 2-argument form: (set_option "A", true), (set_option "B", "C"),
+ // (set_option "D", ["E", "F"])
+ if (d.getNumArgs() == 2) {
+ const OptionDescription& OptDesc =
+ OptDescs_.FindOption(InitPtrToString(d.getArg(0)));
+ const Init* Opt2 = d.getArg(1);
+
+ if (!OptDesc.isSwitch() || typeid(*Opt2) != typeid(StringInit)) {
+ this->onSetOptionImpl(d, IndentLevel, O);
+ return;
+ }
+ }
+
+ // Multiple argument form: (set_option "A"), (set_option "B", "C", "D")
+ this->onEachArgument(d, &EmitPreprocessOptionsCallback::onSetSwitch,
+ IndentLevel, O);
}
public:
@@ -2661,10 +2654,11 @@ void EmitPopulateLanguageMap (const RecordKeeper& Records, raw_ostream& O)
{
O << "int PopulateLanguageMap (LanguageMap& langMap) {\n";
- // For each LangMap:
+ // For each LanguageMap:
const RecordVector& LangMaps =
Records.getAllDerivedDefinitions("LanguageMap");
+ // Call DoEmitPopulateLanguageMap.
for (RecordVector::const_iterator B = LangMaps.begin(),
E = LangMaps.end(); B!=E; ++B) {
ListInit* LangMap = (*B)->getValueAsListInit("map");
@@ -2899,7 +2893,7 @@ public:
return;
}
- // We're invoked on a command line.
+ // We're invoked on a command line string.
this->onCmdLine(InitPtrToString(Arg));
}
@@ -3041,7 +3035,8 @@ void CheckDriverData(DriverData& Data) {
CheckForSuperfluousOptions(Data.Edges, Data.ToolDescs, Data.OptDescs);
}
-void EmitDriverCode(const DriverData& Data, raw_ostream& O) {
+void EmitDriverCode(const DriverData& Data,
+ raw_ostream& O, RecordKeeper &Records) {
// Emit file header.
EmitIncludes(O);
@@ -3102,7 +3097,7 @@ void LLVMCConfigurationEmitter::run (raw_ostream &O) {
CheckDriverData(Data);
this->EmitSourceFileHeader("llvmc-based driver: auto-generated code", O);
- EmitDriverCode(Data, O);
+ EmitDriverCode(Data, O, Records);
} catch (std::exception& Error) {
throw Error.what() + std::string(" - usually this means a syntax error.");
diff --git a/contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.h b/contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.h
index b37b83f..0f2ff37 100644
--- a/contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.h
+++ b/contrib/llvm/utils/TableGen/LLVMCConfigurationEmitter.h
@@ -21,8 +21,10 @@ namespace llvm {
/// LLVMCConfigurationEmitter - TableGen backend that generates
/// configuration code for LLVMC.
class LLVMCConfigurationEmitter : public TableGenBackend {
+ RecordKeeper &Records;
public:
- explicit LLVMCConfigurationEmitter(RecordKeeper&) {}
+ explicit LLVMCConfigurationEmitter(RecordKeeper &records) :
+ Records(records) {}
// run - Output the asmwriter, returning true on failure.
void run(raw_ostream &o);
diff --git a/contrib/llvm/utils/TableGen/NeonEmitter.cpp b/contrib/llvm/utils/TableGen/NeonEmitter.cpp
index 0a12f37..64224d9 100644
--- a/contrib/llvm/utils/TableGen/NeonEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/NeonEmitter.cpp
@@ -8,17 +8,18 @@
//===----------------------------------------------------------------------===//
//
// This tablegen backend is responsible for emitting arm_neon.h, which includes
-// a declaration and definition of each function specified by the ARM NEON
+// a declaration and definition of each function specified by the ARM NEON
// compiler interface. See ARM document DUI0348B.
//
// Each NEON instruction is implemented in terms of 1 or more functions which
-// are suffixed with the element type of the input vectors. Functions may be
+// are suffixed with the element type of the input vectors. Functions may be
// implemented in terms of generic vector operations such as +, *, -, etc. or
// by calling a __builtin_-prefixed function which will be handled by clang's
// CodeGen library.
//
// Additional validation code can be generated by this file when runHeader() is
-// called, rather than the normal run() entry point.
+// called, rather than the normal run() entry point. A complete set of tests
+// for Neon intrinsics can be generated by calling the runTests() entry point.
//
//===----------------------------------------------------------------------===//
@@ -38,11 +39,11 @@ static void ParseTypes(Record *r, std::string &s,
SmallVectorImpl<StringRef> &TV) {
const char *data = s.data();
int len = 0;
-
+
for (unsigned i = 0, e = s.size(); i != e; ++i, ++len) {
if (data[len] == 'P' || data[len] == 'Q' || data[len] == 'U')
continue;
-
+
switch (data[len]) {
case 'c':
case 's':
@@ -72,6 +73,8 @@ static char Widen(const char t) {
return 'i';
case 'i':
return 'l';
+ case 'h':
+ return 'f';
default: throw "unhandled type in widen!";
}
return '\0';
@@ -89,7 +92,7 @@ static char Narrow(const char t) {
return 'i';
case 'f':
return 'h';
- default: throw "unhandled type in widen!";
+ default: throw "unhandled type in narrow!";
}
return '\0';
}
@@ -98,25 +101,25 @@ static char Narrow(const char t) {
/// the quad-vector, polynomial, or unsigned modifiers set.
static char ClassifyType(StringRef ty, bool &quad, bool &poly, bool &usgn) {
unsigned off = 0;
-
+
// remember quad.
if (ty[off] == 'Q') {
quad = true;
++off;
}
-
+
// remember poly.
if (ty[off] == 'P') {
poly = true;
++off;
}
-
+
// remember unsigned.
if (ty[off] == 'U') {
usgn = true;
++off;
}
-
+
// base type to get the type string for.
return ty[off];
}
@@ -134,7 +137,12 @@ static char ModType(const char mod, char type, bool &quad, bool &poly,
break;
case 'u':
usgn = true;
+ poly = false;
+ if (type == 'f')
+ type = 'i';
+ break;
case 'x':
+ usgn = false;
poly = false;
if (type == 'f')
type = 'i';
@@ -155,6 +163,10 @@ static char ModType(const char mod, char type, bool &quad, bool &poly,
case 'n':
type = Widen(type);
break;
+ case 'i':
+ type = 'i';
+ scal = true;
+ break;
case 'l':
type = 'l';
scal = true;
@@ -189,36 +201,31 @@ static char ModType(const char mod, char type, bool &quad, bool &poly,
}
/// TypeString - for a modifier and type, generate the name of the typedef for
-/// that type. If generic is true, emit the generic vector type rather than
-/// the public NEON type. QUc -> uint8x8_t / __neon_uint8x8_t.
-static std::string TypeString(const char mod, StringRef typestr,
- bool generic = false) {
+/// that type. QUc -> uint8x8_t.
+static std::string TypeString(const char mod, StringRef typestr) {
bool quad = false;
bool poly = false;
bool usgn = false;
bool scal = false;
bool cnst = false;
bool pntr = false;
-
+
if (mod == 'v')
return "void";
if (mod == 'i')
return "int";
-
+
// base type to get the type string for.
char type = ClassifyType(typestr, quad, poly, usgn);
-
+
// Based on the modifying character, change the type and width if necessary.
type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
-
+
SmallString<128> s;
-
- if (generic)
- s += "__neon_";
-
+
if (usgn)
s.push_back('u');
-
+
switch (type) {
case 'c':
s += poly ? "poly8" : "int8";
@@ -267,16 +274,16 @@ static std::string TypeString(const char mod, StringRef typestr,
s += "x3";
if (mod == '4')
s += "x4";
-
+
// Append _t, finishing the type string typedef type.
s += "_t";
-
+
if (cnst)
s += " const";
-
+
if (pntr)
s += " *";
-
+
return s.str();
}
@@ -291,23 +298,25 @@ static std::string BuiltinTypeString(const char mod, StringRef typestr,
bool scal = false;
bool cnst = false;
bool pntr = false;
-
+
if (mod == 'v')
- return "v";
+ return "v"; // void
if (mod == 'i')
- return "i";
-
+ return "i"; // int
+
// base type to get the type string for.
char type = ClassifyType(typestr, quad, poly, usgn);
-
+
// Based on the modifying character, change the type and width if necessary.
type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+ // All pointers are void* pointers. Change type to 'v' now.
if (pntr) {
usgn = false;
poly = false;
type = 'v';
}
+ // Treat half-float ('h') types as unsigned short ('s') types.
if (type == 'h') {
type = 's';
usgn = true;
@@ -319,12 +328,14 @@ static std::string BuiltinTypeString(const char mod, StringRef typestr,
if (usgn)
s.push_back('U');
-
- if (type == 'l')
+ else if (type == 'c')
+ s.push_back('S'); // make chars explicitly signed
+
+ if (type == 'l') // 64-bit long
s += "LLi";
else
s.push_back(type);
-
+
if (cnst)
s.push_back('C');
if (pntr)
@@ -337,8 +348,8 @@ static std::string BuiltinTypeString(const char mod, StringRef typestr,
// returning structs of 2, 3, or 4 vectors which are returned in a sret-like
// fashion, storing them to a pointer arg.
if (ret) {
- if (mod == '2' || mod == '3' || mod == '4')
- return "vv*";
+ if (mod >= '2' && mod <= '4')
+ return "vv*"; // void result with void* first argument
if (mod == 'f' || (ck != ClassB && type == 'f'))
return quad ? "V4f" : "V2f";
if (ck != ClassB && type == 's')
@@ -347,17 +358,17 @@ static std::string BuiltinTypeString(const char mod, StringRef typestr,
return quad ? "V4i" : "V2i";
if (ck != ClassB && type == 'l')
return quad ? "V2LLi" : "V1LLi";
-
- return quad ? "V16c" : "V8c";
- }
+
+ return quad ? "V16Sc" : "V8Sc";
+ }
// Non-return array types are passed as individual vectors.
if (mod == '2')
- return quad ? "V16cV16c" : "V8cV8c";
+ return quad ? "V16ScV16Sc" : "V8ScV8Sc";
if (mod == '3')
- return quad ? "V16cV16cV16c" : "V8cV8cV8c";
+ return quad ? "V16ScV16ScV16Sc" : "V8ScV8ScV8Sc";
if (mod == '4')
- return quad ? "V16cV16cV16cV16c" : "V8cV8cV8cV8c";
+ return quad ? "V16ScV16ScV16ScV16Sc" : "V8ScV8ScV8ScV8Sc";
if (mod == 'f' || (ck != ClassB && type == 'f'))
return quad ? "V4f" : "V2f";
@@ -367,71 +378,25 @@ static std::string BuiltinTypeString(const char mod, StringRef typestr,
return quad ? "V4i" : "V2i";
if (ck != ClassB && type == 'l')
return quad ? "V2LLi" : "V1LLi";
-
- return quad ? "V16c" : "V8c";
-}
-/// StructTag - generate the name of the struct tag for a type.
-/// These names are mandated by ARM's ABI.
-static std::string StructTag(StringRef typestr) {
- bool quad = false;
- bool poly = false;
- bool usgn = false;
-
- // base type to get the type string for.
- char type = ClassifyType(typestr, quad, poly, usgn);
-
- SmallString<128> s;
- s += "__simd";
- s += quad ? "128_" : "64_";
- if (usgn)
- s.push_back('u');
-
- switch (type) {
- case 'c':
- s += poly ? "poly8" : "int8";
- break;
- case 's':
- s += poly ? "poly16" : "int16";
- break;
- case 'i':
- s += "int32";
- break;
- case 'l':
- s += "int64";
- break;
- case 'h':
- s += "float16";
- break;
- case 'f':
- s += "float32";
- break;
- default:
- throw "unhandled type!";
- break;
- }
-
- // Append _t, finishing the struct tag name.
- s += "_t";
-
- return s.str();
+ return quad ? "V16Sc" : "V8Sc";
}
-/// MangleName - Append a type or width suffix to a base neon function name,
+/// MangleName - Append a type or width suffix to a base neon function name,
/// and insert a 'q' in the appropriate location if the operation works on
/// 128b rather than 64b. E.g. turn "vst2_lane" into "vst2q_lane_f32", etc.
static std::string MangleName(const std::string &name, StringRef typestr,
ClassKind ck) {
if (name == "vcvt_f32_f16")
return name;
-
+
bool quad = false;
bool poly = false;
bool usgn = false;
char type = ClassifyType(typestr, quad, poly, usgn);
std::string s = name;
-
+
switch (type) {
case 'c':
switch (ck) {
@@ -487,8 +452,8 @@ static std::string MangleName(const std::string &name, StringRef typestr,
}
if (ck == ClassB)
s += "_v";
-
- // Insert a 'q' before the first '_' character so that it ends up before
+
+ // Insert a 'q' before the first '_' character so that it ends up before
// _lane or _n on vector-scalar operations.
if (quad) {
size_t pos = s.find('_');
@@ -501,177 +466,344 @@ static std::string MangleName(const std::string &name, StringRef typestr,
static std::string GenArgs(const std::string &proto, StringRef typestr) {
bool define = proto.find('i') != std::string::npos;
char arg = 'a';
-
+
std::string s;
s += "(";
-
+
for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
- if (!define) {
- s += TypeString(proto[i], typestr);
- s.push_back(' ');
+ if (define) {
+ // Immediate macro arguments are used directly instead of being assigned
+ // to local temporaries; prepend an underscore prefix to make their
+ // names consistent with the local temporaries.
+ if (proto[i] == 'i')
+ s += "__";
+ } else {
+ s += TypeString(proto[i], typestr) + " __";
}
s.push_back(arg);
if ((i + 1) < e)
s += ", ";
}
-
+
s += ")";
return s;
}
-static std::string Duplicate(unsigned nElts, StringRef typestr,
+// Macro arguments are not type-checked like inline function arguments, so
+// assign them to local temporaries to get the right type checking.
+static std::string GenMacroLocals(const std::string &proto, StringRef typestr) {
+ char arg = 'a';
+ std::string s;
+
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ // Do not create a temporary for an immediate argument.
+ // That would defeat the whole point of using a macro!
+ if (proto[i] == 'i') continue;
+
+ s += TypeString(proto[i], typestr) + " __";
+ s.push_back(arg);
+ s += " = (";
+ s.push_back(arg);
+ s += "); ";
+ }
+
+ s += "\\\n ";
+ return s;
+}
+
+// Use the vmovl builtin to sign-extend or zero-extend a vector.
+static std::string Extend(StringRef typestr, const std::string &a) {
+ std::string s;
+ s = MangleName("vmovl", typestr, ClassS);
+ s += "(" + a + ")";
+ return s;
+}
+
+static std::string Duplicate(unsigned nElts, StringRef typestr,
const std::string &a) {
std::string s;
-
- s = "(__neon_" + TypeString('d', typestr) + "){ ";
+
+ s = "(" + TypeString('d', typestr) + "){ ";
for (unsigned i = 0; i != nElts; ++i) {
s += a;
if ((i + 1) < nElts)
s += ", ";
}
s += " }";
-
+
return s;
}
-// Generate the definition for this intrinsic, e.g. "a + b" for OpAdd.
-// If structTypes is true, the NEON types are structs of vector types rather
-// than vector types, and the call becomes "a.val + b.val"
-static std::string GenOpString(OpKind op, const std::string &proto,
- StringRef typestr, bool structTypes = true) {
- bool dummy, quad = false;
+static std::string SplatLane(unsigned nElts, const std::string &vec,
+ const std::string &lane) {
+ std::string s = "__builtin_shufflevector(" + vec + ", " + vec;
+ for (unsigned i = 0; i < nElts; ++i)
+ s += ", " + lane;
+ s += ")";
+ return s;
+}
+
+static unsigned GetNumElements(StringRef typestr, bool &quad) {
+ quad = false;
+ bool dummy = false;
char type = ClassifyType(typestr, quad, dummy, dummy);
unsigned nElts = 0;
switch (type) {
- case 'c': nElts = 8; break;
- case 's': nElts = 4; break;
- case 'i': nElts = 2; break;
- case 'l': nElts = 1; break;
- case 'h': nElts = 4; break;
- case 'f': nElts = 2; break;
+ case 'c': nElts = 8; break;
+ case 's': nElts = 4; break;
+ case 'i': nElts = 2; break;
+ case 'l': nElts = 1; break;
+ case 'h': nElts = 4; break;
+ case 'f': nElts = 2; break;
+ default:
+ throw "unhandled type!";
+ break;
}
-
+ if (quad) nElts <<= 1;
+ return nElts;
+}
+
+// Generate the definition for this intrinsic, e.g. "a + b" for OpAdd.
+static std::string GenOpString(OpKind op, const std::string &proto,
+ StringRef typestr) {
+ bool quad;
+ unsigned nElts = GetNumElements(typestr, quad);
+
+ // If this builtin takes an immediate argument, we need to #define it rather
+ // than use a standard declaration, so that SemaChecking can range check
+ // the immediate passed by the user.
+ bool define = proto.find('i') != std::string::npos;
+
std::string ts = TypeString(proto[0], typestr);
- std::string s = ts + " r; r";
-
- if (structTypes)
- s += ".val";
-
- s += " = ";
-
- std::string a, b, c;
- if (proto.size() > 1)
- a = (structTypes && proto[1] != 'l' && proto[1] != 's') ? "a.val" : "a";
- b = structTypes ? "b.val" : "b";
- c = structTypes ? "c.val" : "c";
-
+ std::string s;
+ if (!define) {
+ s = "return ";
+ }
+
switch(op) {
case OpAdd:
- s += a + " + " + b;
+ s += "__a + __b;";
+ break;
+ case OpAddl:
+ s += Extend(typestr, "__a") + " + " + Extend(typestr, "__b") + ";";
+ break;
+ case OpAddw:
+ s += "__a + " + Extend(typestr, "__b") + ";";
break;
case OpSub:
- s += a + " - " + b;
+ s += "__a - __b;";
+ break;
+ case OpSubl:
+ s += Extend(typestr, "__a") + " - " + Extend(typestr, "__b") + ";";
+ break;
+ case OpSubw:
+ s += "__a - " + Extend(typestr, "__b") + ";";
break;
case OpMulN:
- b = Duplicate(nElts << (int)quad, typestr, "b");
+ s += "__a * " + Duplicate(nElts, typestr, "__b") + ";";
+ break;
+ case OpMulLane:
+ s += "__a * " + SplatLane(nElts, "__b", "__c") + ";";
+ break;
case OpMul:
- s += a + " * " + b;
+ s += "__a * __b;";
+ break;
+ case OpMullN:
+ s += Extend(typestr, "__a") + " * " +
+ Extend(typestr, Duplicate(nElts << (int)quad, typestr, "__b")) + ";";
+ break;
+ case OpMullLane:
+ s += Extend(typestr, "__a") + " * " +
+ Extend(typestr, SplatLane(nElts, "__b", "__c")) + ";";
+ break;
+ case OpMull:
+ s += Extend(typestr, "__a") + " * " + Extend(typestr, "__b") + ";";
break;
case OpMlaN:
- c = Duplicate(nElts << (int)quad, typestr, "c");
+ s += "__a + (__b * " + Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlaLane:
+ s += "__a + (__b * " + SplatLane(nElts, "__c", "__d") + ");";
+ break;
case OpMla:
- s += a + " + ( " + b + " * " + c + " )";
+ s += "__a + (__b * __c);";
+ break;
+ case OpMlalN:
+ s += "__a + (" + Extend(typestr, "__b") + " * " +
+ Extend(typestr, Duplicate(nElts, typestr, "__c")) + ");";
+ break;
+ case OpMlalLane:
+ s += "__a + (" + Extend(typestr, "__b") + " * " +
+ Extend(typestr, SplatLane(nElts, "__c", "__d")) + ");";
+ break;
+ case OpMlal:
+ s += "__a + (" + Extend(typestr, "__b") + " * " +
+ Extend(typestr, "__c") + ");";
break;
case OpMlsN:
- c = Duplicate(nElts << (int)quad, typestr, "c");
+ s += "__a - (__b * " + Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlsLane:
+ s += "__a - (__b * " + SplatLane(nElts, "__c", "__d") + ");";
+ break;
case OpMls:
- s += a + " - ( " + b + " * " + c + " )";
+ s += "__a - (__b * __c);";
+ break;
+ case OpMlslN:
+ s += "__a - (" + Extend(typestr, "__b") + " * " +
+ Extend(typestr, Duplicate(nElts, typestr, "__c")) + ");";
+ break;
+ case OpMlslLane:
+ s += "__a - (" + Extend(typestr, "__b") + " * " +
+ Extend(typestr, SplatLane(nElts, "__c", "__d")) + ");";
+ break;
+ case OpMlsl:
+ s += "__a - (" + Extend(typestr, "__b") + " * " +
+ Extend(typestr, "__c") + ");";
+ break;
+ case OpQDMullLane:
+ s += MangleName("vqdmull", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpQDMlalLane:
+ s += MangleName("vqdmlal", typestr, ClassS) + "(__a, __b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpQDMlslLane:
+ s += MangleName("vqdmlsl", typestr, ClassS) + "(__a, __b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpQDMulhLane:
+ s += MangleName("vqdmulh", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpQRDMulhLane:
+ s += MangleName("vqrdmulh", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
break;
case OpEq:
- s += "(__neon_" + ts + ")(" + a + " == " + b + ")";
+ s += "(" + ts + ")(__a == __b);";
break;
case OpGe:
- s += "(__neon_" + ts + ")(" + a + " >= " + b + ")";
+ s += "(" + ts + ")(__a >= __b);";
break;
case OpLe:
- s += "(__neon_" + ts + ")(" + a + " <= " + b + ")";
+ s += "(" + ts + ")(__a <= __b);";
break;
case OpGt:
- s += "(__neon_" + ts + ")(" + a + " > " + b + ")";
+ s += "(" + ts + ")(__a > __b);";
break;
case OpLt:
- s += "(__neon_" + ts + ")(" + a + " < " + b + ")";
+ s += "(" + ts + ")(__a < __b);";
break;
case OpNeg:
- s += " -" + a;
+ s += " -__a;";
break;
case OpNot:
- s += " ~" + a;
+ s += " ~__a;";
break;
case OpAnd:
- s += a + " & " + b;
+ s += "__a & __b;";
break;
case OpOr:
- s += a + " | " + b;
+ s += "__a | __b;";
break;
case OpXor:
- s += a + " ^ " + b;
+ s += "__a ^ __b;";
break;
case OpAndNot:
- s += a + " & ~" + b;
+ s += "__a & ~__b;";
break;
case OpOrNot:
- s += a + " | ~" + b;
+ s += "__a | ~__b;";
break;
case OpCast:
- s += "(__neon_" + ts + ")" + a;
+ s += "(" + ts + ")__a;";
break;
case OpConcat:
- s += "__builtin_shufflevector((__neon_int64x1_t)" + a;
- s += ", (__neon_int64x1_t)" + b + ", 0, 1)";
+ s += "(" + ts + ")__builtin_shufflevector((int64x1_t)__a";
+ s += ", (int64x1_t)__b, 0, 1);";
break;
case OpHi:
- s += "(__neon_int64x1_t)(((__neon_int64x2_t)" + a + ")[1])";
+ s += "(" + ts +
+ ")__builtin_shufflevector((int64x2_t)__a, (int64x2_t)__a, 1);";
break;
case OpLo:
- s += "(__neon_int64x1_t)(((__neon_int64x2_t)" + a + ")[0])";
+ s += "(" + ts +
+ ")__builtin_shufflevector((int64x2_t)__a, (int64x2_t)__a, 0);";
break;
case OpDup:
- s += Duplicate(nElts << (int)quad, typestr, a);
+ s += Duplicate(nElts, typestr, "__a") + ";";
+ break;
+ case OpDupLane:
+ s += SplatLane(nElts, "__a", "__b") + ";";
break;
case OpSelect:
// ((0 & 1) | (~0 & 2))
+ s += "(" + ts + ")";
ts = TypeString(proto[1], typestr);
- s += "( " + a + " & (__neon_" + ts + ")" + b + ") | ";
- s += "(~" + a + " & (__neon_" + ts + ")" + c + ")";
+ s += "((__a & (" + ts + ")__b) | ";
+ s += "(~__a & (" + ts + ")__c));";
break;
case OpRev16:
- s += "__builtin_shufflevector(" + a + ", " + a;
- for (unsigned i = 2; i <= nElts << (int)quad; i += 2)
+ s += "__builtin_shufflevector(__a, __a";
+ for (unsigned i = 2; i <= nElts; i += 2)
for (unsigned j = 0; j != 2; ++j)
s += ", " + utostr(i - j - 1);
- s += ")";
+ s += ");";
break;
- case OpRev32:
- nElts >>= 1;
- s += "__builtin_shufflevector(" + a + ", " + a;
- for (unsigned i = nElts; i <= nElts << (1 + (int)quad); i += nElts)
- for (unsigned j = 0; j != nElts; ++j)
+ case OpRev32: {
+ unsigned WordElts = nElts >> (1 + (int)quad);
+ s += "__builtin_shufflevector(__a, __a";
+ for (unsigned i = WordElts; i <= nElts; i += WordElts)
+ for (unsigned j = 0; j != WordElts; ++j)
s += ", " + utostr(i - j - 1);
- s += ")";
+ s += ");";
break;
- case OpRev64:
- s += "__builtin_shufflevector(" + a + ", " + a;
- for (unsigned i = nElts; i <= nElts << (int)quad; i += nElts)
- for (unsigned j = 0; j != nElts; ++j)
+ }
+ case OpRev64: {
+ unsigned DblWordElts = nElts >> (int)quad;
+ s += "__builtin_shufflevector(__a, __a";
+ for (unsigned i = DblWordElts; i <= nElts; i += DblWordElts)
+ for (unsigned j = 0; j != DblWordElts; ++j)
s += ", " + utostr(i - j - 1);
- s += ")";
+ s += ");";
+ break;
+ }
+ case OpAbdl: {
+ std::string abd = MangleName("vabd", typestr, ClassS) + "(__a, __b)";
+ if (typestr[0] != 'U') {
+ // vabd results are always unsigned and must be zero-extended.
+ std::string utype = "U" + typestr.str();
+ s += "(" + TypeString(proto[0], typestr) + ")";
+ abd = "(" + TypeString('d', utype) + ")" + abd;
+ s += Extend(utype, abd) + ";";
+ } else {
+ s += Extend(typestr, abd) + ";";
+ }
+ break;
+ }
+ case OpAba:
+ s += "__a + " + MangleName("vabd", typestr, ClassS) + "(__b, __c);";
break;
+ case OpAbal: {
+ s += "__a + ";
+ std::string abd = MangleName("vabd", typestr, ClassS) + "(__b, __c)";
+ if (typestr[0] != 'U') {
+ // vabd results are always unsigned and must be zero-extended.
+ std::string utype = "U" + typestr.str();
+ s += "(" + TypeString(proto[0], typestr) + ")";
+ abd = "(" + TypeString('d', utype) + ")" + abd;
+ s += Extend(utype, abd) + ";";
+ } else {
+ s += Extend(typestr, abd) + ";";
+ }
+ break;
+ }
default:
throw "unknown OpKind!";
break;
}
- s += "; return r;";
return s;
}
@@ -688,10 +820,10 @@ static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
bool scal = false;
bool cnst = false;
bool pntr = false;
-
+
// Base type to get the type string for.
char type = ClassifyType(typestr, quad, poly, usgn);
-
+
// Based on the modifying character, change the type and width if necessary.
type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
@@ -699,9 +831,9 @@ static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
ret |= 0x08;
if (quad && proto[1] != 'g')
ret |= 0x10;
-
+
switch (type) {
- case 'c':
+ case 'c':
ret |= poly ? 5 : 0;
break;
case 's':
@@ -727,65 +859,45 @@ static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
}
// Generate the definition for this intrinsic, e.g. __builtin_neon_cls(a)
-// If structTypes is true, the NEON types are structs of vector types rather
-// than vector types, and the call becomes __builtin_neon_cls(a.val)
static std::string GenBuiltin(const std::string &name, const std::string &proto,
- StringRef typestr, ClassKind ck,
- bool structTypes = true) {
- bool dummy, quad = false;
- char type = ClassifyType(typestr, quad, dummy, dummy);
- unsigned nElts = 0;
- switch (type) {
- case 'c': nElts = 8; break;
- case 's': nElts = 4; break;
- case 'i': nElts = 2; break;
- case 'l': nElts = 1; break;
- case 'h': nElts = 4; break;
- case 'f': nElts = 2; break;
- }
- if (quad) nElts <<= 1;
-
- char arg = 'a';
+ StringRef typestr, ClassKind ck) {
std::string s;
// If this builtin returns a struct 2, 3, or 4 vectors, pass it as an implicit
// sret-like argument.
- bool sret = (proto[0] == '2' || proto[0] == '3' || proto[0] == '4');
+ bool sret = (proto[0] >= '2' && proto[0] <= '4');
// If this builtin takes an immediate argument, we need to #define it rather
// than use a standard declaration, so that SemaChecking can range check
// the immediate passed by the user.
bool define = proto.find('i') != std::string::npos;
- // If all types are the same size, bitcasting the args will take care
- // of arg checking. The actual signedness etc. will be taken care of with
- // special enums.
+ // Check if the prototype has a scalar operand with the type of the vector
+ // elements. If not, bitcasting the args will take care of arg checking.
+ // The actual signedness etc. will be taken care of with special enums.
if (proto.find('s') == std::string::npos)
ck = ClassB;
if (proto[0] != 'v') {
std::string ts = TypeString(proto[0], typestr);
-
+
if (define) {
if (sret)
- s += "({ " + ts + " r; ";
- else if (proto[0] != 's')
- s += "(" + ts + "){(__neon_" + ts + ")";
+ s += ts + " r; ";
+ else
+ s += "(" + ts + ")";
} else if (sret) {
s += ts + " r; ";
} else {
- s += ts + " r; r";
- if (structTypes && proto[0] != 's' && proto[0] != 'i' && proto[0] != 'l')
- s += ".val";
-
- s += " = ";
+ s += "return (" + ts + ")";
}
}
-
+
bool splat = proto.find('a') != std::string::npos;
-
+
s += "__builtin_neon_";
if (splat) {
+ // Call the non-splat builtin: chop off the "_n" suffix from the name.
std::string vname(name, 0, name.size()-2);
s += MangleName(vname, typestr, ck);
} else {
@@ -797,17 +909,32 @@ static std::string GenBuiltin(const std::string &name, const std::string &proto,
// builtins.
if (sret)
s += "&r, ";
-
+
+ char arg = 'a';
for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
std::string args = std::string(&arg, 1);
- if (define)
- args = "(" + args + ")";
-
+
+ // Use the local temporaries instead of the macro arguments.
+ args = "__" + args;
+
+ bool argQuad = false;
+ bool argPoly = false;
+ bool argUsgn = false;
+ bool argScalar = false;
+ bool dummy = false;
+ char argType = ClassifyType(typestr, argQuad, argPoly, argUsgn);
+ argType = ModType(proto[i], argType, argQuad, argPoly, argUsgn, argScalar,
+ dummy, dummy);
+
// Handle multiple-vector values specially, emitting each subvector as an
// argument to the __builtin.
- if (structTypes && (proto[i] == '2' || proto[i] == '3' || proto[i] == '4')){
+ if (proto[i] >= '2' && proto[i] <= '4') {
+ // Check if an explicit cast is needed.
+ if (argType != 'c' || argPoly || argUsgn)
+ args = (argQuad ? "(int8x16_t)" : "(int8x8_t)") + args;
+
for (unsigned vi = 0, ve = proto[i] - '0'; vi != ve; ++vi) {
- s += args + ".val[" + utostr(vi) + "].val";
+ s += args + ".val[" + utostr(vi) + "]";
if ((vi + 1) < ve)
s += ", ";
}
@@ -816,77 +943,158 @@ static std::string GenBuiltin(const std::string &name, const std::string &proto,
continue;
}
-
- if (splat && (i + 1) == e)
- s += Duplicate(nElts, typestr, args);
- else
- s += args;
-
- if (structTypes && proto[i] != 's' && proto[i] != 'i' && proto[i] != 'l' &&
- proto[i] != 'p' && proto[i] != 'c' && proto[i] != 'a') {
- s += ".val";
+
+ if (splat && (i + 1) == e)
+ args = Duplicate(GetNumElements(typestr, argQuad), typestr, args);
+
+ // Check if an explicit cast is needed.
+ if ((splat || !argScalar) &&
+ ((ck == ClassB && argType != 'c') || argPoly || argUsgn)) {
+ std::string argTypeStr = "c";
+ if (ck != ClassB)
+ argTypeStr = argType;
+ if (argQuad)
+ argTypeStr = "Q" + argTypeStr;
+ args = "(" + TypeString('d', argTypeStr) + ")" + args;
}
+
+ s += args;
if ((i + 1) < e)
s += ", ";
}
-
+
// Extra constant integer to hold type class enum for this function, e.g. s8
if (ck == ClassB)
s += ", " + utostr(GetNeonEnum(proto, typestr));
-
- if (define)
- s += ")";
- else
- s += ");";
- if (proto[0] != 'v') {
- if (define) {
- if (sret)
- s += "; r; })";
- else if (proto[0] != 's')
- s += "}";
- } else {
+ s += ");";
+
+ if (proto[0] != 'v' && sret) {
+ if (define)
+ s += " r;";
+ else
s += " return r;";
- }
}
return s;
}
-static std::string GenBuiltinDef(const std::string &name,
+static std::string GenBuiltinDef(const std::string &name,
const std::string &proto,
StringRef typestr, ClassKind ck) {
std::string s("BUILTIN(__builtin_neon_");
- // If all types are the same size, bitcasting the args will take care
+ // If all types are the same size, bitcasting the args will take care
// of arg checking. The actual signedness etc. will be taken care of with
// special enums.
if (proto.find('s') == std::string::npos)
ck = ClassB;
-
+
s += MangleName(name, typestr, ck);
s += ", \"";
-
+
for (unsigned i = 0, e = proto.size(); i != e; ++i)
s += BuiltinTypeString(proto[i], typestr, ck, i == 0);
// Extra constant integer to hold type class enum for this function, e.g. s8
if (ck == ClassB)
s += "i";
-
+
s += "\", \"n\")";
return s;
}
+static std::string GenIntrinsic(const std::string &name,
+ const std::string &proto,
+ StringRef outTypeStr, StringRef inTypeStr,
+ OpKind kind, ClassKind classKind) {
+ assert(!proto.empty() && "");
+ bool define = proto.find('i') != std::string::npos;
+ std::string s;
+
+ // static always inline + return type
+ if (define)
+ s += "#define ";
+ else
+ s += "__ai " + TypeString(proto[0], outTypeStr) + " ";
+
+ // Function name with type suffix
+ std::string mangledName = MangleName(name, outTypeStr, ClassS);
+ if (outTypeStr != inTypeStr) {
+ // If the input type is different (e.g., for vreinterpret), append a suffix
+ // for the input type. String off a "Q" (quad) prefix so that MangleName
+ // does not insert another "q" in the name.
+ unsigned typeStrOff = (inTypeStr[0] == 'Q' ? 1 : 0);
+ StringRef inTypeNoQuad = inTypeStr.substr(typeStrOff);
+ mangledName = MangleName(mangledName, inTypeNoQuad, ClassS);
+ }
+ s += mangledName;
+
+ // Function arguments
+ s += GenArgs(proto, inTypeStr);
+
+ // Definition.
+ if (define) {
+ s += " __extension__ ({ \\\n ";
+ s += GenMacroLocals(proto, inTypeStr);
+ } else {
+ s += " { \\\n ";
+ }
+
+ if (kind != OpNone)
+ s += GenOpString(kind, proto, outTypeStr);
+ else
+ s += GenBuiltin(name, proto, outTypeStr, classKind);
+ if (define)
+ s += " })";
+ else
+ s += " }";
+ s += "\n";
+ return s;
+}
+
/// run - Read the records in arm_neon.td and output arm_neon.h. arm_neon.h
/// is comprised of type definitions and function declarations.
void NeonEmitter::run(raw_ostream &OS) {
- EmitSourceFileHeader("ARM NEON Header", OS);
-
- // FIXME: emit license into file?
-
+ OS <<
+ "/*===---- arm_neon.h - ARM Neon intrinsics ------------------------------"
+ "---===\n"
+ " *\n"
+ " * Permission is hereby granted, free of charge, to any person obtaining "
+ "a copy\n"
+ " * of this software and associated documentation files (the \"Software\"),"
+ " to deal\n"
+ " * in the Software without restriction, including without limitation the "
+ "rights\n"
+ " * to use, copy, modify, merge, publish, distribute, sublicense, "
+ "and/or sell\n"
+ " * copies of the Software, and to permit persons to whom the Software is\n"
+ " * furnished to do so, subject to the following conditions:\n"
+ " *\n"
+ " * The above copyright notice and this permission notice shall be "
+ "included in\n"
+ " * all copies or substantial portions of the Software.\n"
+ " *\n"
+ " * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, "
+ "EXPRESS OR\n"
+ " * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF "
+ "MERCHANTABILITY,\n"
+ " * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT "
+ "SHALL THE\n"
+ " * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR "
+ "OTHER\n"
+ " * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, "
+ "ARISING FROM,\n"
+ " * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER "
+ "DEALINGS IN\n"
+ " * THE SOFTWARE.\n"
+ " *\n"
+ " *===--------------------------------------------------------------------"
+ "---===\n"
+ " */\n\n";
+
OS << "#ifndef __ARM_NEON_H\n";
OS << "#define __ARM_NEON_H\n\n";
-
+
OS << "#ifndef __ARM_NEON__\n";
OS << "#error \"NEON support not enabled\"\n";
OS << "#endif\n\n";
@@ -895,8 +1103,8 @@ void NeonEmitter::run(raw_ostream &OS) {
// Emit NEON-specific scalar typedefs.
OS << "typedef float float32_t;\n";
- OS << "typedef uint8_t poly8_t;\n";
- OS << "typedef uint16_t poly16_t;\n";
+ OS << "typedef int8_t poly8_t;\n";
+ OS << "typedef int16_t poly16_t;\n";
OS << "typedef uint16_t float16_t;\n";
// Emit Neon vector typedefs.
@@ -905,105 +1113,105 @@ void NeonEmitter::run(raw_ostream &OS) {
ParseTypes(0, TypedefTypes, TDTypeVec);
// Emit vector typedefs.
- for (unsigned v = 1; v != 5; ++v) {
- for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
- bool dummy, quad = false;
- (void) ClassifyType(TDTypeVec[i], quad, dummy, dummy);
- OS << "typedef __attribute__(( __vector_size__(";
-
- OS << utostr(8*v*(quad ? 2 : 1)) << ") )) ";
- if (!quad)
- OS << " ";
-
- OS << TypeString('s', TDTypeVec[i]);
- OS << " __neon_";
-
- char t = (v == 1) ? 'd' : '0' + v;
- OS << TypeString(t, TDTypeVec[i]) << ";\n";
- }
+ for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
+ bool dummy, quad = false, poly = false;
+ (void) ClassifyType(TDTypeVec[i], quad, poly, dummy);
+ if (poly)
+ OS << "typedef __attribute__((neon_polyvector_type(";
+ else
+ OS << "typedef __attribute__((neon_vector_type(";
+
+ unsigned nElts = GetNumElements(TDTypeVec[i], quad);
+ OS << utostr(nElts) << "))) ";
+ if (nElts < 10)
+ OS << " ";
+
+ OS << TypeString('s', TDTypeVec[i]);
+ OS << " " << TypeString('d', TDTypeVec[i]) << ";\n";
}
OS << "\n";
// Emit struct typedefs.
- for (unsigned vi = 1; vi != 5; ++vi) {
+ for (unsigned vi = 2; vi != 5; ++vi) {
for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
- std::string ts = TypeString('d', TDTypeVec[i], vi == 1);
- std::string vs = TypeString((vi > 1) ? '0' + vi : 'd', TDTypeVec[i]);
- std::string tag = (vi > 1) ? vs : StructTag(TDTypeVec[i]);
- OS << "typedef struct " << tag << " {\n";
+ std::string ts = TypeString('d', TDTypeVec[i]);
+ std::string vs = TypeString('0' + vi, TDTypeVec[i]);
+ OS << "typedef struct " << vs << " {\n";
OS << " " << ts << " val";
- if (vi > 1)
- OS << "[" << utostr(vi) << "]";
- OS << ";\n} " << vs << ";\n\n";
+ OS << "[" << utostr(vi) << "]";
+ OS << ";\n} ";
+ OS << vs << ";\n\n";
}
}
-
+
OS << "#define __ai static __attribute__((__always_inline__))\n\n";
std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
-
- // Unique the return+pattern types, and assign them.
+
+ // Emit vmovl and vabd intrinsics first so they can be used by other
+ // intrinsics. (Some of the saturating multiply instructions are also
+ // used to implement the corresponding "_lane" variants, but tablegen
+ // sorts the records into alphabetical order so that the "_lane" variants
+ // come after the intrinsics they use.)
+ emitIntrinsic(OS, Records.getDef("VMOVL"));
+ emitIntrinsic(OS, Records.getDef("VABD"));
+
for (unsigned i = 0, e = RV.size(); i != e; ++i) {
Record *R = RV[i];
- std::string name = LowercaseString(R->getName());
- std::string Proto = R->getValueAsString("Prototype");
- std::string Types = R->getValueAsString("Types");
-
- SmallVector<StringRef, 16> TypeVec;
- ParseTypes(R, Types, TypeVec);
-
- OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
-
- bool define = Proto.find('i') != std::string::npos;
-
- for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
- assert(!Proto.empty() && "");
-
- // static always inline + return type
- if (define)
- OS << "#define";
- else
- OS << "__ai " << TypeString(Proto[0], TypeVec[ti]);
-
- // Function name with type suffix
- OS << " " << MangleName(name, TypeVec[ti], ClassS);
-
- // Function arguments
- OS << GenArgs(Proto, TypeVec[ti]);
-
- // Definition.
- if (define)
- OS << " ";
- else
- OS << " { ";
-
- if (k != OpNone) {
- OS << GenOpString(k, Proto, TypeVec[ti]);
- } else {
- if (R->getSuperClasses().size() < 2)
- throw TGError(R->getLoc(), "Builtin has no class kind");
-
- ClassKind ck = ClassMap[R->getSuperClasses()[1]];
-
- if (ck == ClassNone)
- throw TGError(R->getLoc(), "Builtin has no class kind");
- OS << GenBuiltin(name, Proto, TypeVec[ti], ck);
- }
- if (!define)
- OS << " }";
- OS << "\n";
- }
- OS << "\n";
+ if (R->getName() != "VMOVL" && R->getName() != "VABD")
+ emitIntrinsic(OS, R);
}
+
OS << "#undef __ai\n\n";
OS << "#endif /* __ARM_NEON_H */\n";
}
-static unsigned RangeFromType(StringRef typestr) {
+/// emitIntrinsic - Write out the arm_neon.h header file definitions for the
+/// intrinsics specified by record R.
+void NeonEmitter::emitIntrinsic(raw_ostream &OS, Record *R) {
+ std::string name = R->getValueAsString("Name");
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ OpKind kind = OpMap[R->getValueAsDef("Operand")->getName()];
+
+ ClassKind classKind = ClassNone;
+ if (R->getSuperClasses().size() >= 2)
+ classKind = ClassMap[R->getSuperClasses()[1]];
+ if (classKind == ClassNone && kind == OpNone)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ if (kind == OpReinterpret) {
+ bool outQuad = false;
+ bool dummy = false;
+ (void)ClassifyType(TypeVec[ti], outQuad, dummy, dummy);
+ for (unsigned srcti = 0, srcte = TypeVec.size();
+ srcti != srcte; ++srcti) {
+ bool inQuad = false;
+ (void)ClassifyType(TypeVec[srcti], inQuad, dummy, dummy);
+ if (srcti == ti || inQuad != outQuad)
+ continue;
+ OS << GenIntrinsic(name, Proto, TypeVec[ti], TypeVec[srcti],
+ OpCast, ClassS);
+ }
+ } else {
+ OS << GenIntrinsic(name, Proto, TypeVec[ti], TypeVec[ti],
+ kind, classKind);
+ }
+ }
+ OS << "\n";
+}
+
+static unsigned RangeFromType(const char mod, StringRef typestr) {
// base type to get the type string for.
bool quad = false, dummy = false;
char type = ClassifyType(typestr, quad, dummy, dummy);
-
+ type = ModType(mod, type, quad, dummy, dummy, dummy, dummy, dummy);
+
switch (type) {
case 'c':
return (8 << (int)quad) - 1;
@@ -1031,7 +1239,7 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
StringMap<OpKind> EmittedMap;
-
+
// Generate BuiltinsARM.def for NEON
OS << "#ifdef GET_NEON_BUILTINS\n";
for (unsigned i = 0, e = RV.size(); i != e; ++i) {
@@ -1041,22 +1249,22 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
continue;
std::string Proto = R->getValueAsString("Prototype");
-
+
// Functions with 'a' (the splat code) in the type prototype should not get
// their own builtin as they use the non-splat variant.
if (Proto.find('a') != std::string::npos)
continue;
-
+
std::string Types = R->getValueAsString("Types");
SmallVector<StringRef, 16> TypeVec;
ParseTypes(R, Types, TypeVec);
-
+
if (R->getSuperClasses().size() < 2)
throw TGError(R->getLoc(), "Builtin has no class kind");
-
- std::string name = LowercaseString(R->getName());
+
+ std::string name = R->getValueAsString("Name");
ClassKind ck = ClassMap[R->getSuperClasses()[1]];
-
+
for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
// Generate the BuiltinsARM.def declaration for this builtin, ensuring
// that each unique BUILTIN() macro appears only once in the output
@@ -1064,13 +1272,13 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
std::string bd = GenBuiltinDef(name, Proto, TypeVec[ti], ck);
if (EmittedMap.count(bd))
continue;
-
+
EmittedMap[bd] = OpNone;
OS << bd << "\n";
}
}
OS << "#endif\n\n";
-
+
// Generate the overloaded type checking code for SemaChecking.cpp
OS << "#ifdef GET_NEON_OVERLOAD_CHECK\n";
for (unsigned i = 0, e = RV.size(); i != e; ++i) {
@@ -1078,34 +1286,34 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
if (k != OpNone)
continue;
-
+
std::string Proto = R->getValueAsString("Prototype");
std::string Types = R->getValueAsString("Types");
- std::string name = LowercaseString(R->getName());
-
+ std::string name = R->getValueAsString("Name");
+
// Functions with 'a' (the splat code) in the type prototype should not get
// their own builtin as they use the non-splat variant.
if (Proto.find('a') != std::string::npos)
continue;
-
+
// Functions which have a scalar argument cannot be overloaded, no need to
// check them if we are emitting the type checking code.
if (Proto.find('s') != std::string::npos)
continue;
-
+
SmallVector<StringRef, 16> TypeVec;
ParseTypes(R, Types, TypeVec);
-
+
if (R->getSuperClasses().size() < 2)
throw TGError(R->getLoc(), "Builtin has no class kind");
-
+
int si = -1, qi = -1;
unsigned mask = 0, qmask = 0;
for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
// Generate the switch case(s) for this builtin for the type validation.
bool quad = false, poly = false, usgn = false;
(void) ClassifyType(TypeVec[ti], quad, poly, usgn);
-
+
if (quad) {
qi = ti;
qmask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
@@ -1115,64 +1323,67 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
}
}
if (mask)
- OS << "case ARM::BI__builtin_neon_"
- << MangleName(name, TypeVec[si], ClassB)
- << ": mask = " << "0x" << utohexstr(mask) << "; break;\n";
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[si], ClassB)
+ << ": mask = " << "0x" << utohexstr(mask) << "; break;\n";
if (qmask)
- OS << "case ARM::BI__builtin_neon_"
- << MangleName(name, TypeVec[qi], ClassB)
- << ": mask = " << "0x" << utohexstr(qmask) << "; break;\n";
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[qi], ClassB)
+ << ": mask = " << "0x" << utohexstr(qmask) << "; break;\n";
}
OS << "#endif\n\n";
-
+
// Generate the intrinsic range checking code for shift/lane immediates.
OS << "#ifdef GET_NEON_IMMEDIATE_CHECK\n";
for (unsigned i = 0, e = RV.size(); i != e; ++i) {
Record *R = RV[i];
-
+
OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
if (k != OpNone)
continue;
-
- std::string name = LowercaseString(R->getName());
+
+ std::string name = R->getValueAsString("Name");
std::string Proto = R->getValueAsString("Prototype");
std::string Types = R->getValueAsString("Types");
-
+
// Functions with 'a' (the splat code) in the type prototype should not get
// their own builtin as they use the non-splat variant.
if (Proto.find('a') != std::string::npos)
continue;
-
+
// Functions which do not have an immediate do not need to have range
// checking code emitted.
- if (Proto.find('i') == std::string::npos)
+ size_t immPos = Proto.find('i');
+ if (immPos == std::string::npos)
continue;
-
+
SmallVector<StringRef, 16> TypeVec;
ParseTypes(R, Types, TypeVec);
-
+
if (R->getSuperClasses().size() < 2)
throw TGError(R->getLoc(), "Builtin has no class kind");
-
+
ClassKind ck = ClassMap[R->getSuperClasses()[1]];
-
+
for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
std::string namestr, shiftstr, rangestr;
-
+
// Builtins which are overloaded by type will need to have their upper
// bound computed at Sema time based on the type constant.
if (Proto.find('s') == std::string::npos) {
ck = ClassB;
if (R->getValueAsBit("isShift")) {
shiftstr = ", true";
-
+
// Right shifts have an 'r' in the name, left shifts do not.
if (name.find('r') != std::string::npos)
rangestr = "l = 1; ";
}
rangestr += "u = RFT(TV" + shiftstr + ")";
} else {
- rangestr = "u = " + utostr(RangeFromType(TypeVec[ti]));
+ // The immediate generally refers to a lane in the preceding argument.
+ assert(immPos > 0 && "unexpected immediate operand");
+ rangestr = "u = " + utostr(RangeFromType(Proto[immPos-1], TypeVec[ti]));
}
// Make sure cases appear only once by uniquing them in a string map.
namestr = MangleName(name, TypeVec[ti], ck);
@@ -1182,13 +1393,13 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
// Calculate the index of the immediate that should be range checked.
unsigned immidx = 0;
-
+
// Builtins that return a struct of multiple vectors have an extra
// leading arg for the struct return.
- if (Proto[0] == '2' || Proto[0] == '3' || Proto[0] == '4')
+ if (Proto[0] >= '2' && Proto[0] <= '4')
++immidx;
-
- // Add one to the index for each argument until we reach the immediate
+
+ // Add one to the index for each argument until we reach the immediate
// to be checked. Structs of vectors are passed as multiple arguments.
for (unsigned ii = 1, ie = Proto.size(); ii != ie; ++ii) {
switch (Proto[ii]) {
@@ -1199,9 +1410,113 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
case 'i': ie = ii + 1; break;
}
}
- OS << "case ARM::BI__builtin_neon_" << MangleName(name, TypeVec[ti], ck)
+ OS << "case ARM::BI__builtin_neon_" << MangleName(name, TypeVec[ti], ck)
<< ": i = " << immidx << "; " << rangestr << "; break;\n";
}
}
OS << "#endif\n\n";
}
+
+/// GenTest - Write out a test for the intrinsic specified by the name and
+/// type strings, including the embedded patterns for FileCheck to match.
+static std::string GenTest(const std::string &name,
+ const std::string &proto,
+ StringRef outTypeStr, StringRef inTypeStr,
+ bool isShift) {
+ assert(!proto.empty() && "");
+ std::string s;
+
+ // Function name with type suffix
+ std::string mangledName = MangleName(name, outTypeStr, ClassS);
+ if (outTypeStr != inTypeStr) {
+ // If the input type is different (e.g., for vreinterpret), append a suffix
+ // for the input type. String off a "Q" (quad) prefix so that MangleName
+ // does not insert another "q" in the name.
+ unsigned typeStrOff = (inTypeStr[0] == 'Q' ? 1 : 0);
+ StringRef inTypeNoQuad = inTypeStr.substr(typeStrOff);
+ mangledName = MangleName(mangledName, inTypeNoQuad, ClassS);
+ }
+
+ // Emit the FileCheck patterns.
+ s += "// CHECK: test_" + mangledName + "\n";
+ // s += "// CHECK: \n"; // FIXME: + expected instruction opcode.
+
+ // Emit the start of the test function.
+ s += TypeString(proto[0], outTypeStr) + " test_" + mangledName + "(";
+ char arg = 'a';
+ std::string comma;
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ // Do not create arguments for values that must be immediate constants.
+ if (proto[i] == 'i')
+ continue;
+ s += comma + TypeString(proto[i], inTypeStr) + " ";
+ s.push_back(arg);
+ comma = ", ";
+ }
+ s += ") { \\\n ";
+
+ if (proto[0] != 'v')
+ s += "return ";
+ s += mangledName + "(";
+ arg = 'a';
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ if (proto[i] == 'i') {
+ // For immediate operands, test the maximum value.
+ if (isShift)
+ s += "1"; // FIXME
+ else
+ // The immediate generally refers to a lane in the preceding argument.
+ s += utostr(RangeFromType(proto[i-1], inTypeStr));
+ } else {
+ s.push_back(arg);
+ }
+ if ((i + 1) < e)
+ s += ", ";
+ }
+ s += ");\n}\n\n";
+ return s;
+}
+
+/// runTests - Write out a complete set of tests for all of the Neon
+/// intrinsics.
+void NeonEmitter::runTests(raw_ostream &OS) {
+ OS <<
+ "// RUN: %clang_cc1 -triple thumbv7-apple-darwin \\\n"
+ "// RUN: -target-cpu cortex-a9 -ffreestanding -S -o - %s | FileCheck %s\n"
+ "\n"
+ "#include <arm_neon.h>\n"
+ "\n";
+
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ std::string name = R->getValueAsString("Name");
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+ bool isShift = R->getValueAsBit("isShift");
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ OpKind kind = OpMap[R->getValueAsDef("Operand")->getName()];
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ if (kind == OpReinterpret) {
+ bool outQuad = false;
+ bool dummy = false;
+ (void)ClassifyType(TypeVec[ti], outQuad, dummy, dummy);
+ for (unsigned srcti = 0, srcte = TypeVec.size();
+ srcti != srcte; ++srcti) {
+ bool inQuad = false;
+ (void)ClassifyType(TypeVec[srcti], inQuad, dummy, dummy);
+ if (srcti == ti || inQuad != outQuad)
+ continue;
+ OS << GenTest(name, Proto, TypeVec[ti], TypeVec[srcti], isShift);
+ }
+ } else {
+ OS << GenTest(name, Proto, TypeVec[ti], TypeVec[ti], isShift);
+ }
+ }
+ OS << "\n";
+ }
+}
+
diff --git a/contrib/llvm/utils/TableGen/NeonEmitter.h b/contrib/llvm/utils/TableGen/NeonEmitter.h
index 6c6760d..1e6fcbf 100644
--- a/contrib/llvm/utils/TableGen/NeonEmitter.h
+++ b/contrib/llvm/utils/TableGen/NeonEmitter.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This tablegen backend is responsible for emitting arm_neon.h, which includes
-// a declaration and definition of each function specified by the ARM NEON
+// a declaration and definition of each function specified by the ARM NEON
// compiler interface. See ARM document DUI0348B.
//
//===----------------------------------------------------------------------===//
@@ -24,13 +24,34 @@
enum OpKind {
OpNone,
OpAdd,
+ OpAddl,
+ OpAddw,
OpSub,
+ OpSubl,
+ OpSubw,
OpMul,
+ OpMull,
OpMla,
+ OpMlal,
OpMls,
+ OpMlsl,
OpMulN,
+ OpMullN,
OpMlaN,
OpMlsN,
+ OpMlalN,
+ OpMlslN,
+ OpMulLane,
+ OpMullLane,
+ OpMlaLane,
+ OpMlsLane,
+ OpMlalLane,
+ OpMlslLane,
+ OpQDMullLane,
+ OpQDMlalLane,
+ OpQDMlslLane,
+ OpQDMulhLane,
+ OpQRDMulhLane,
OpEq,
OpGe,
OpLe,
@@ -46,40 +67,66 @@ enum OpKind {
OpCast,
OpConcat,
OpDup,
+ OpDupLane,
OpHi,
OpLo,
OpSelect,
OpRev16,
OpRev32,
- OpRev64
+ OpRev64,
+ OpReinterpret,
+ OpAbdl,
+ OpAba,
+ OpAbal
};
enum ClassKind {
ClassNone,
- ClassI,
- ClassS,
- ClassW,
- ClassB
+ ClassI, // generic integer instruction, e.g., "i8" suffix
+ ClassS, // signed/unsigned/poly, e.g., "s8", "u8" or "p8" suffix
+ ClassW, // width-specific instruction, e.g., "8" suffix
+ ClassB // bitcast arguments with enum argument to specify type
};
namespace llvm {
-
+
class NeonEmitter : public TableGenBackend {
RecordKeeper &Records;
StringMap<OpKind> OpMap;
DenseMap<Record*, ClassKind> ClassMap;
-
+
public:
NeonEmitter(RecordKeeper &R) : Records(R) {
OpMap["OP_NONE"] = OpNone;
OpMap["OP_ADD"] = OpAdd;
+ OpMap["OP_ADDL"] = OpAddl;
+ OpMap["OP_ADDW"] = OpAddw;
OpMap["OP_SUB"] = OpSub;
+ OpMap["OP_SUBL"] = OpSubl;
+ OpMap["OP_SUBW"] = OpSubw;
OpMap["OP_MUL"] = OpMul;
+ OpMap["OP_MULL"] = OpMull;
OpMap["OP_MLA"] = OpMla;
+ OpMap["OP_MLAL"] = OpMlal;
OpMap["OP_MLS"] = OpMls;
+ OpMap["OP_MLSL"] = OpMlsl;
OpMap["OP_MUL_N"] = OpMulN;
+ OpMap["OP_MULL_N"]= OpMullN;
OpMap["OP_MLA_N"] = OpMlaN;
OpMap["OP_MLS_N"] = OpMlsN;
+ OpMap["OP_MLAL_N"] = OpMlalN;
+ OpMap["OP_MLSL_N"] = OpMlslN;
+ OpMap["OP_MUL_LN"]= OpMulLane;
+ OpMap["OP_MULL_LN"] = OpMullLane;
+ OpMap["OP_MLA_LN"]= OpMlaLane;
+ OpMap["OP_MLS_LN"]= OpMlsLane;
+ OpMap["OP_MLAL_LN"] = OpMlalLane;
+ OpMap["OP_MLSL_LN"] = OpMlslLane;
+ OpMap["OP_QDMULL_LN"] = OpQDMullLane;
+ OpMap["OP_QDMLAL_LN"] = OpQDMlalLane;
+ OpMap["OP_QDMLSL_LN"] = OpQDMlslLane;
+ OpMap["OP_QDMULH_LN"] = OpQDMulhLane;
+ OpMap["OP_QRDMULH_LN"] = OpQRDMulhLane;
OpMap["OP_EQ"] = OpEq;
OpMap["OP_GE"] = OpGe;
OpMap["OP_LE"] = OpLe;
@@ -97,10 +144,15 @@ namespace llvm {
OpMap["OP_HI"] = OpHi;
OpMap["OP_LO"] = OpLo;
OpMap["OP_DUP"] = OpDup;
+ OpMap["OP_DUP_LN"] = OpDupLane;
OpMap["OP_SEL"] = OpSelect;
OpMap["OP_REV16"] = OpRev16;
OpMap["OP_REV32"] = OpRev32;
OpMap["OP_REV64"] = OpRev64;
+ OpMap["OP_REINT"] = OpReinterpret;
+ OpMap["OP_ABDL"] = OpAbdl;
+ OpMap["OP_ABA"] = OpAba;
+ OpMap["OP_ABAL"] = OpAbal;
Record *SI = R.getClass("SInst");
Record *II = R.getClass("IInst");
@@ -109,14 +161,20 @@ namespace llvm {
ClassMap[II] = ClassI;
ClassMap[WI] = ClassW;
}
-
+
// run - Emit arm_neon.h.inc
void run(raw_ostream &o);
// runHeader - Emit all the __builtin prototypes used in arm_neon.h
void runHeader(raw_ostream &o);
+
+ // runTests - Emit tests for all the Neon intrinsics.
+ void runTests(raw_ostream &o);
+
+ private:
+ void emitIntrinsic(raw_ostream &OS, Record *R);
};
-
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/utils/TableGen/Record.cpp b/contrib/llvm/utils/TableGen/Record.cpp
index dc79358..abbbafe 100644
--- a/contrib/llvm/utils/TableGen/Record.cpp
+++ b/contrib/llvm/utils/TableGen/Record.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "Record.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Format.h"
#include "llvm/ADT/StringExtras.h"
@@ -59,26 +59,34 @@ Init *BitsRecTy::convertValue(UnsetInit *UI) {
}
Init *BitsRecTy::convertValue(BitInit *UI) {
- if (Size != 1) return 0; // Can only convert single bit...
+ if (Size != 1) return 0; // Can only convert single bit.
BitsInit *Ret = new BitsInit(1);
Ret->setBit(0, UI);
return Ret;
}
-// convertValue from Int initializer to bits type: Split the integer up into the
-// appropriate bits...
-//
-Init *BitsRecTy::convertValue(IntInit *II) {
- int64_t Value = II->getValue();
- // Make sure this bitfield is large enough to hold the integer value...
+/// canFitInBitfield - Return true if the number of bits is large enough to hold
+/// the integer value.
+static bool canFitInBitfield(int64_t Value, unsigned NumBits) {
if (Value >= 0) {
- if (Value & ~((1LL << Size)-1))
- return 0;
- } else {
- if ((Value >> Size) != -1 || ((Value & (1LL << (Size-1))) == 0))
- return 0;
+ if (Value & ~((1LL << NumBits) - 1))
+ return false;
+ } else if ((Value >> NumBits) != -1 || (Value & (1LL << (NumBits-1))) == 0) {
+ return false;
}
+ return true;
+}
+
+/// convertValue from Int initializer to bits type: Split the integer up into the
+/// appropriate bits.
+///
+Init *BitsRecTy::convertValue(IntInit *II) {
+ int64_t Value = II->getValue();
+ // Make sure this bitfield is large enough to hold the integer value.
+ if (!canFitInBitfield(Value, Size))
+ return 0;
+
BitsInit *Ret = new BitsInit(Size);
for (unsigned i = 0; i != Size; ++i)
Ret->setBit(i, new BitInit(Value & (1LL << i)));
@@ -88,7 +96,7 @@ Init *BitsRecTy::convertValue(IntInit *II) {
Init *BitsRecTy::convertValue(BitsInit *BI) {
// If the number of bits is right, return it. Otherwise we need to expand or
- // truncate...
+ // truncate.
if (BI->getNumBits() == Size) return BI;
return 0;
}
@@ -101,12 +109,56 @@ Init *BitsRecTy::convertValue(TypedInit *VI) {
Ret->setBit(i, new VarBitInit(VI, i));
return Ret;
}
+
if (Size == 1 && dynamic_cast<BitRecTy*>(VI->getType())) {
BitsInit *Ret = new BitsInit(1);
Ret->setBit(0, VI);
return Ret;
}
+ if (TernOpInit *Tern = dynamic_cast<TernOpInit*>(VI)) {
+ if (Tern->getOpcode() == TernOpInit::IF) {
+ Init *LHS = Tern->getLHS();
+ Init *MHS = Tern->getMHS();
+ Init *RHS = Tern->getRHS();
+
+ IntInit *MHSi = dynamic_cast<IntInit*>(MHS);
+ IntInit *RHSi = dynamic_cast<IntInit*>(RHS);
+
+ if (MHSi && RHSi) {
+ int64_t MHSVal = MHSi->getValue();
+ int64_t RHSVal = RHSi->getValue();
+
+ if (canFitInBitfield(MHSVal, Size) && canFitInBitfield(RHSVal, Size)) {
+ BitsInit *Ret = new BitsInit(Size);
+
+ for (unsigned i = 0; i != Size; ++i)
+ Ret->setBit(i, new TernOpInit(TernOpInit::IF, LHS,
+ new IntInit((MHSVal & (1LL << i)) ? 1 : 0),
+ new IntInit((RHSVal & (1LL << i)) ? 1 : 0),
+ VI->getType()));
+
+ return Ret;
+ }
+ } else {
+ BitsInit *MHSbs = dynamic_cast<BitsInit*>(MHS);
+ BitsInit *RHSbs = dynamic_cast<BitsInit*>(RHS);
+
+ if (MHSbs && RHSbs) {
+ BitsInit *Ret = new BitsInit(Size);
+
+ for (unsigned i = 0; i != Size; ++i)
+ Ret->setBit(i, new TernOpInit(TernOpInit::IF, LHS,
+ MHSbs->getBit(i),
+ RHSbs->getBit(i),
+ VI->getType()));
+
+ return Ret;
+ }
+ }
+ }
+ }
+
return 0;
}
@@ -152,16 +204,6 @@ Init *StringRecTy::convertValue(BinOpInit *BO) {
return new BinOpInit(BinOpInit::STRCONCAT, L, R, new StringRecTy);
return BO;
}
- if (BO->getOpcode() == BinOpInit::NAMECONCAT) {
- if (BO->getType()->getAsString() == getAsString()) {
- Init *L = BO->getLHS()->convertInitializerTo(this);
- Init *R = BO->getRHS()->convertInitializerTo(this);
- if (L == 0 || R == 0) return 0;
- if (L != BO->getLHS() || R != BO->getRHS())
- return new BinOpInit(BinOpInit::NAMECONCAT, L, R, new StringRecTy);
- return BO;
- }
- }
return convertValue((TypedInit*)BO);
}
@@ -236,16 +278,6 @@ Init *DagRecTy::convertValue(BinOpInit *BO) {
return new BinOpInit(BinOpInit::CONCAT, L, R, new DagRecTy);
return BO;
}
- if (BO->getOpcode() == BinOpInit::NAMECONCAT) {
- if (BO->getType()->getAsString() == getAsString()) {
- Init *L = BO->getLHS()->convertInitializerTo(this);
- Init *R = BO->getRHS()->convertInitializerTo(this);
- if (L == 0 || R == 0) return 0;
- if (L != BO->getLHS() || R != BO->getRHS())
- return new BinOpInit(BinOpInit::CONCAT, L, R, new DagRecTy);
- return BO;
- }
- }
return 0;
}
@@ -518,9 +550,8 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
// From TGParser::ParseIDValue
if (CurRec) {
if (const RecordVal *RV = CurRec->getValue(Name)) {
- if (RV->getType() != getType()) {
- throw "type mismatch in nameconcat";
- }
+ if (RV->getType() != getType())
+ throw "type mismatch in cast";
return new VarInit(Name, RV->getType());
}
@@ -529,9 +560,8 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
const RecordVal *RV = CurRec->getValue(TemplateArgName);
assert(RV && "Template arg doesn't exist??");
- if (RV->getType() != getType()) {
- throw "type mismatch in nameconcat";
- }
+ if (RV->getType() != getType())
+ throw "type mismatch in cast";
return new VarInit(TemplateArgName, RV->getType());
}
@@ -543,15 +573,14 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
const RecordVal *RV = CurMultiClass->Rec.getValue(MCName);
assert(RV && "Template arg doesn't exist??");
- if (RV->getType() != getType()) {
- throw "type mismatch in nameconcat";
- }
+ if (RV->getType() != getType())
+ throw "type mismatch in cast";
return new VarInit(MCName, RV->getType());
}
}
- if (Record *D = Records.getDef(Name))
+ if (Record *D = (CurRec->getRecords()).getDef(Name))
return new DefInit(D);
errs() << "Variable not defined: '" + Name + "'\n";
@@ -561,7 +590,7 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
}
break;
}
- case CAR: {
+ case HEAD: {
ListInit *LHSl = dynamic_cast<ListInit*>(LHS);
if (LHSl) {
if (LHSl->getSize() == 0) {
@@ -572,7 +601,7 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
}
break;
}
- case CDR: {
+ case TAIL: {
ListInit *LHSl = dynamic_cast<ListInit*>(LHS);
if (LHSl) {
if (LHSl->getSize() == 0) {
@@ -585,7 +614,7 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
}
break;
}
- case LNULL: {
+ case EMPTY: {
ListInit *LHSl = dynamic_cast<ListInit*>(LHS);
if (LHSl) {
if (LHSl->getSize() == 0) {
@@ -621,9 +650,9 @@ std::string UnOpInit::getAsString() const {
std::string Result;
switch (Opc) {
case CAST: Result = "!cast<" + getType()->getAsString() + ">"; break;
- case CAR: Result = "!car"; break;
- case CDR: Result = "!cdr"; break;
- case LNULL: Result = "!null"; break;
+ case HEAD: Result = "!head"; break;
+ case TAIL: Result = "!tail"; break;
+ case EMPTY: Result = "!empty"; break;
}
return Result + "(" + LHS->getAsString() + ")";
}
@@ -660,57 +689,6 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
return new StringInit(LHSs->getValue() + RHSs->getValue());
break;
}
- case NAMECONCAT: {
- StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
- StringInit *RHSs = dynamic_cast<StringInit*>(RHS);
- if (LHSs && RHSs) {
- std::string Name(LHSs->getValue() + RHSs->getValue());
-
- // From TGParser::ParseIDValue
- if (CurRec) {
- if (const RecordVal *RV = CurRec->getValue(Name)) {
- if (RV->getType() != getType()) {
- throw "type mismatch in nameconcat";
- }
- return new VarInit(Name, RV->getType());
- }
-
- std::string TemplateArgName = CurRec->getName()+":"+Name;
- if (CurRec->isTemplateArg(TemplateArgName)) {
- const RecordVal *RV = CurRec->getValue(TemplateArgName);
- assert(RV && "Template arg doesn't exist??");
-
- if (RV->getType() != getType()) {
- throw "type mismatch in nameconcat";
- }
-
- return new VarInit(TemplateArgName, RV->getType());
- }
- }
-
- if (CurMultiClass) {
- std::string MCName = CurMultiClass->Rec.getName()+"::"+Name;
- if (CurMultiClass->Rec.isTemplateArg(MCName)) {
- const RecordVal *RV = CurMultiClass->Rec.getValue(MCName);
- assert(RV && "Template arg doesn't exist??");
-
- if (RV->getType() != getType()) {
- throw "type mismatch in nameconcat";
- }
-
- return new VarInit(MCName, RV->getType());
- }
- }
-
- if (Record *D = Records.getDef(Name))
- return new DefInit(D);
-
- errs() << "Variable not defined in !nameconcat: '" + Name + "'\n";
- assert(0 && "Variable not found in !nameconcat");
- return 0;
- }
- break;
- }
case EQ: {
// try to fold eq comparison for 'bit' and 'int', otherwise fallback
// to string objects.
@@ -771,8 +749,6 @@ std::string BinOpInit::getAsString() const {
case SRL: Result = "!srl"; break;
case EQ: Result = "!eq"; break;
case STRCONCAT: Result = "!strconcat"; break;
- case NAMECONCAT:
- Result = "!nameconcat<" + getType()->getAsString() + ">"; break;
}
return Result + "(" + LHS->getAsString() + ", " + RHS->getAsString() + ")";
}
@@ -1042,7 +1018,7 @@ RecTy *TypedInit::getFieldType(const std::string &FieldName) const {
Init *TypedInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) {
BitsRecTy *T = dynamic_cast<BitsRecTy*>(getType());
- if (T == 0) return 0; // Cannot subscript a non-bits variable...
+ if (T == 0) return 0; // Cannot subscript a non-bits variable.
unsigned NumBits = T->getNumBits();
BitsInit *BI = new BitsInit(Bits.size());
@@ -1058,7 +1034,7 @@ Init *TypedInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) {
Init *TypedInit::convertInitListSlice(const std::vector<unsigned> &Elements) {
ListRecTy *T = dynamic_cast<ListRecTy*>(getType());
- if (T == 0) return 0; // Cannot subscript a non-list variable...
+ if (T == 0) return 0; // Cannot subscript a non-list variable.
if (Elements.size() == 1)
return new VarListElementInit(this, Elements[0]);
@@ -1211,7 +1187,7 @@ Init *FieldInit::resolveBitReference(Record &R, const RecordVal *RV,
assert(Bit < BI->getNumBits() && "Bit reference out of range!");
Init *B = BI->getBit(Bit);
- if (dynamic_cast<BitInit*>(B)) // If the bit is set...
+ if (dynamic_cast<BitInit*>(B)) // If the bit is set.
return B; // Replace the VarBitInit with it.
}
return 0;
@@ -1303,14 +1279,14 @@ void RecordVal::print(raw_ostream &OS, bool PrintSem) const {
unsigned Record::LastID = 0;
void Record::setName(const std::string &Name) {
- if (Records.getDef(getName()) == this) {
- Records.removeDef(getName());
+ if (TrackedRecords.getDef(getName()) == this) {
+ TrackedRecords.removeDef(getName());
this->Name = Name;
- Records.addDef(this);
+ TrackedRecords.addDef(this);
} else {
- Records.removeClass(getName());
+ TrackedRecords.removeClass(getName());
this->Name = Name;
- Records.addClass(this);
+ TrackedRecords.addClass(this);
}
}
@@ -1573,7 +1549,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const RecordKeeper &RK) {
/// name does not exist, an error is printed and true is returned.
std::vector<Record*>
RecordKeeper::getAllDerivedDefinitions(const std::string &ClassName) const {
- Record *Class = Records.getClass(ClassName);
+ Record *Class = getClass(ClassName);
if (!Class)
throw "ERROR: Couldn't find the `" + ClassName + "' class!\n";
diff --git a/contrib/llvm/utils/TableGen/Record.h b/contrib/llvm/utils/TableGen/Record.h
index d6f37ee..f3a5df2 100644
--- a/contrib/llvm/utils/TableGen/Record.h
+++ b/contrib/llvm/utils/TableGen/Record.h
@@ -16,7 +16,7 @@
#define RECORD_H
#include "llvm/Support/SourceMgr.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
@@ -57,6 +57,7 @@ class VarListElementInit;
class Record;
class RecordVal;
struct MultiClass;
+class RecordKeeper;
//===----------------------------------------------------------------------===//
// Type Classes
@@ -810,7 +811,7 @@ public:
///
class UnOpInit : public OpInit {
public:
- enum UnaryOp { CAST, CAR, CDR, LNULL };
+ enum UnaryOp { CAST, HEAD, TAIL, EMPTY };
private:
UnaryOp Opc;
Init *LHS;
@@ -848,7 +849,7 @@ public:
///
class BinOpInit : public OpInit {
public:
- enum BinaryOp { SHL, SRA, SRL, STRCONCAT, CONCAT, NAMECONCAT, EQ };
+ enum BinaryOp { SHL, SRA, SRL, STRCONCAT, CONCAT, EQ };
private:
BinaryOp Opc;
Init *LHS, *RHS;
@@ -930,6 +931,8 @@ public:
// possible to fold.
Init *Fold(Record *CurRec, MultiClass *CurMultiClass);
+ virtual bool isComplete() const { return false; }
+
virtual Init *resolveReferences(Record &R, const RecordVal *RV);
virtual std::string getAsString() const;
@@ -1227,16 +1230,21 @@ class Record {
std::vector<std::string> TemplateArgs;
std::vector<RecordVal> Values;
std::vector<Record*> SuperClasses;
+
+ // Tracks Record instances. Not owned by Record.
+ RecordKeeper &TrackedRecords;
+
public:
- explicit Record(const std::string &N, SMLoc loc) :
- ID(LastID++), Name(N), Loc(loc) {}
+ // Constructs a record.
+ explicit Record(const std::string &N, SMLoc loc, RecordKeeper &records) :
+ ID(LastID++), Name(N), Loc(loc), TrackedRecords(records) {}
~Record() {}
-
+
static unsigned getNewUID() { return LastID++; }
-
-
+
+
unsigned getID() const { return ID; }
const std::string &getName() const { return Name; }
@@ -1315,6 +1323,10 @@ public:
/// possible references.
void resolveReferencesTo(const RecordVal *RV);
+ RecordKeeper &getRecords() const {
+ return TrackedRecords;
+ }
+
void dump() const;
//===--------------------------------------------------------------------===//
@@ -1350,9 +1362,9 @@ public:
///
std::vector<Record*> getValueAsListOfDefs(StringRef FieldName) const;
- /// getValueAsListOfInts - This method looks up the specified field and returns
- /// its value as a vector of integers, throwing an exception if the field does
- /// not exist or if the value is not the right type.
+ /// getValueAsListOfInts - This method looks up the specified field and
+ /// returns its value as a vector of integers, throwing an exception if the
+ /// field does not exist or if the value is not the right type.
///
std::vector<int64_t> getValueAsListOfInts(StringRef FieldName) const;
@@ -1396,7 +1408,8 @@ struct MultiClass {
void dump() const;
- MultiClass(const std::string &Name, SMLoc Loc) : Rec(Name, Loc) {}
+ MultiClass(const std::string &Name, SMLoc Loc, RecordKeeper &Records) :
+ Rec(Name, Loc, Records) {}
};
class RecordKeeper {
@@ -1453,7 +1466,6 @@ public:
std::vector<Record*>
getAllDerivedDefinitions(const std::string &ClassName) const;
-
void dump() const;
};
@@ -1488,9 +1500,7 @@ public:
raw_ostream &operator<<(raw_ostream &OS, const RecordKeeper &RK);
-extern RecordKeeper Records;
-
-void PrintError(SMLoc ErrorLoc, const std::string &Msg);
+void PrintError(SMLoc ErrorLoc, const Twine &Msg);
} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index 6f06705..96399a4 100644
--- a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
// runEnums - Print out enum values for all of the registers.
void RegisterInfoEmitter::runEnums(raw_ostream &OS) {
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
const std::vector<CodeGenRegister> &Registers = Target.getRegisters();
std::string Namespace = Registers[0].TheDef->getValueAsString("Namespace");
@@ -63,7 +63,7 @@ void RegisterInfoEmitter::runEnums(raw_ostream &OS) {
void RegisterInfoEmitter::runHeader(raw_ostream &OS) {
EmitSourceFileHeader("Register Information Header Fragment", OS);
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
const std::string &TargetName = Target.getName();
std::string ClassName = TargetName + "GenRegisterInfo";
@@ -333,7 +333,7 @@ public:
// RegisterInfoEmitter::run - Main register file description emitter.
//
void RegisterInfoEmitter::run(raw_ostream &OS) {
- CodeGenTarget Target;
+ CodeGenTarget Target(Records);
EmitSourceFileHeader("Register Information Source Fragment", OS);
OS << "namespace llvm {\n\n";
@@ -777,17 +777,13 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
delete [] AliasesHashTable;
if (!RegisterAliases.empty())
- OS << "\n\n // Register Alias Sets...\n";
+ OS << "\n\n // Register Overlap Lists...\n";
- // Emit the empty alias list
- OS << " const unsigned Empty_AliasSet[] = { 0 };\n";
- // Loop over all of the registers which have aliases, emitting the alias list
- // to memory.
+ // Emit an overlap list for all registers.
for (std::map<Record*, std::set<Record*>, LessRecord >::iterator
I = RegisterAliases.begin(), E = RegisterAliases.end(); I != E; ++I) {
- if (I->second.empty())
- continue;
- OS << " const unsigned " << I->first->getName() << "_AliasSet[] = { ";
+ OS << " const unsigned " << I->first->getName() << "_Overlaps[] = { "
+ << getQualifiedName(I->first) << ", ";
for (std::set<Record*>::iterator ASI = I->second.begin(),
E = I->second.end(); ASI != E; ++ASI)
OS << getQualifiedName(*ASI) << ", ";
@@ -849,11 +845,7 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
const CodeGenRegister &Reg = Regs[i];
OS << " { \"";
- OS << Reg.getName() << "\",\t";
- if (!RegisterAliases[Reg.TheDef].empty())
- OS << Reg.getName() << "_AliasSet,\t";
- else
- OS << "Empty_AliasSet,\t";
+ OS << Reg.getName() << "\",\t" << Reg.getName() << "_Overlaps,\t";
if (!RegisterSubRegs[Reg.TheDef].empty())
OS << Reg.getName() << "_SubRegsSet,\t";
else
diff --git a/contrib/llvm/utils/TableGen/StringMatcher.cpp b/contrib/llvm/utils/TableGen/StringMatcher.cpp
new file mode 100644
index 0000000..6aedcbf
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/StringMatcher.cpp
@@ -0,0 +1,149 @@
+//===- StringMatcher.cpp - Generate a matcher for input strings -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the StringMatcher class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "StringMatcher.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+using namespace llvm;
+
+/// FindFirstNonCommonLetter - Find the first character in the keys of the
+/// string pairs that is not shared across the whole set of strings. All
+/// strings are assumed to have the same length.
+static unsigned
+FindFirstNonCommonLetter(const std::vector<const
+ StringMatcher::StringPair*> &Matches) {
+ assert(!Matches.empty());
+ for (unsigned i = 0, e = Matches[0]->first.size(); i != e; ++i) {
+ // Check to see if letter i is the same across the set.
+ char Letter = Matches[0]->first[i];
+
+ for (unsigned str = 0, e = Matches.size(); str != e; ++str)
+ if (Matches[str]->first[i] != Letter)
+ return i;
+ }
+
+ return Matches[0]->first.size();
+}
+
+/// EmitStringMatcherForChar - Given a set of strings that are known to be the
+/// same length and whose characters leading up to CharNo are the same, emit
+/// code to verify that CharNo and later are the same.
+///
+/// \return - True if control can leave the emitted code fragment.
+bool StringMatcher::
+EmitStringMatcherForChar(const std::vector<const StringPair*> &Matches,
+ unsigned CharNo, unsigned IndentCount) const {
+ assert(!Matches.empty() && "Must have at least one string to match!");
+ std::string Indent(IndentCount*2+4, ' ');
+
+ // If we have verified that the entire string matches, we're done: output the
+ // matching code.
+ if (CharNo == Matches[0]->first.size()) {
+ assert(Matches.size() == 1 && "Had duplicate keys to match on");
+
+ // If the to-execute code has \n's in it, indent each subsequent line.
+ StringRef Code = Matches[0]->second;
+
+ std::pair<StringRef, StringRef> Split = Code.split('\n');
+ OS << Indent << Split.first << "\t // \"" << Matches[0]->first << "\"\n";
+
+ Code = Split.second;
+ while (!Code.empty()) {
+ Split = Code.split('\n');
+ OS << Indent << Split.first << "\n";
+ Code = Split.second;
+ }
+ return false;
+ }
+
+ // Bucket the matches by the character we are comparing.
+ std::map<char, std::vector<const StringPair*> > MatchesByLetter;
+
+ for (unsigned i = 0, e = Matches.size(); i != e; ++i)
+ MatchesByLetter[Matches[i]->first[CharNo]].push_back(Matches[i]);
+
+
+ // If we have exactly one bucket to match, see how many characters are common
+ // across the whole set and match all of them at once.
+ if (MatchesByLetter.size() == 1) {
+ unsigned FirstNonCommonLetter = FindFirstNonCommonLetter(Matches);
+ unsigned NumChars = FirstNonCommonLetter-CharNo;
+
+ // Emit code to break out if the prefix doesn't match.
+ if (NumChars == 1) {
+ // Do the comparison with if (Str[1] != 'f')
+ // FIXME: Need to escape general characters.
+ OS << Indent << "if (" << StrVariableName << "[" << CharNo << "] != '"
+ << Matches[0]->first[CharNo] << "')\n";
+ OS << Indent << " break;\n";
+ } else {
+ // Do the comparison with if (Str.substr(1, 3) != "foo").
+ // FIXME: Need to escape general strings.
+ OS << Indent << "if (" << StrVariableName << ".substr(" << CharNo << ", "
+ << NumChars << ") != \"";
+ OS << Matches[0]->first.substr(CharNo, NumChars) << "\")\n";
+ OS << Indent << " break;\n";
+ }
+
+ return EmitStringMatcherForChar(Matches, FirstNonCommonLetter, IndentCount);
+ }
+
+ // Otherwise, we have multiple possible things, emit a switch on the
+ // character.
+ OS << Indent << "switch (" << StrVariableName << "[" << CharNo << "]) {\n";
+ OS << Indent << "default: break;\n";
+
+ for (std::map<char, std::vector<const StringPair*> >::iterator LI =
+ MatchesByLetter.begin(), E = MatchesByLetter.end(); LI != E; ++LI) {
+ // TODO: escape hard stuff (like \n) if we ever care about it.
+ OS << Indent << "case '" << LI->first << "':\t // "
+ << LI->second.size() << " string";
+ if (LI->second.size() != 1) OS << 's';
+ OS << " to match.\n";
+ if (EmitStringMatcherForChar(LI->second, CharNo+1, IndentCount+1))
+ OS << Indent << " break;\n";
+ }
+
+ OS << Indent << "}\n";
+ return true;
+}
+
+
+/// Emit - Top level entry point.
+///
+void StringMatcher::Emit(unsigned Indent) const {
+ // If nothing to match, just fall through.
+ if (Matches.empty()) return;
+
+ // First level categorization: group strings by length.
+ std::map<unsigned, std::vector<const StringPair*> > MatchesByLength;
+
+ for (unsigned i = 0, e = Matches.size(); i != e; ++i)
+ MatchesByLength[Matches[i].first.size()].push_back(&Matches[i]);
+
+ // Output a switch statement on length and categorize the elements within each
+ // bin.
+ OS.indent(Indent*2+2) << "switch (" << StrVariableName << ".size()) {\n";
+ OS.indent(Indent*2+2) << "default: break;\n";
+
+ for (std::map<unsigned, std::vector<const StringPair*> >::iterator LI =
+ MatchesByLength.begin(), E = MatchesByLength.end(); LI != E; ++LI) {
+ OS.indent(Indent*2+2) << "case " << LI->first << ":\t // "
+ << LI->second.size()
+ << " string" << (LI->second.size() == 1 ? "" : "s") << " to match.\n";
+ if (EmitStringMatcherForChar(LI->second, 0, Indent))
+ OS.indent(Indent*2+4) << "break;\n";
+ }
+
+ OS.indent(Indent*2+2) << "}\n";
+}
diff --git a/contrib/llvm/utils/TableGen/StringMatcher.h b/contrib/llvm/utils/TableGen/StringMatcher.h
new file mode 100644
index 0000000..1dadc76
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/StringMatcher.h
@@ -0,0 +1,54 @@
+//===- StringMatcher.h - Generate a matcher for input strings ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the StringMatcher class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef STRINGMATCHER_H
+#define STRINGMATCHER_H
+
+#include <vector>
+#include <string>
+#include <utility>
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+ class raw_ostream;
+
+/// StringMatcher - Given a list of strings and code to execute when they match,
+/// output a simple switch tree to classify the input string.
+///
+/// If a match is found, the code in Vals[i].second is executed; control must
+/// not exit this code fragment. If nothing matches, execution falls through.
+///
+class StringMatcher {
+public:
+ typedef std::pair<std::string, std::string> StringPair;
+private:
+ StringRef StrVariableName;
+ const std::vector<StringPair> &Matches;
+ raw_ostream &OS;
+
+public:
+ StringMatcher(StringRef strVariableName,
+ const std::vector<StringPair> &matches, raw_ostream &os)
+ : StrVariableName(strVariableName), Matches(matches), OS(os) {}
+
+ void Emit(unsigned Indent = 0) const;
+
+
+private:
+ bool EmitStringMatcherForChar(const std::vector<const StringPair*> &Matches,
+ unsigned CharNo, unsigned IndentCount) const;
+};
+
+} // end llvm namespace.
+
+#endif
diff --git a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
index b04eaf8..e35bdca 100644
--- a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -172,13 +172,10 @@ void SubtargetEmitter::CPUKeyValues(raw_ostream &OS) {
// CollectAllItinClasses - Gathers and enumerates all the itinerary classes.
// Returns itinerary class count.
//
-unsigned SubtargetEmitter::CollectAllItinClasses(raw_ostream &OS,
- std::map<std::string, unsigned> &ItinClassesMap) {
- // Gather and sort all itinerary classes
- std::vector<Record*> ItinClassList =
- Records.getAllDerivedDefinitions("InstrItinClass");
- std::sort(ItinClassList.begin(), ItinClassList.end(), LessRecord());
-
+unsigned SubtargetEmitter::
+CollectAllItinClasses(raw_ostream &OS,
+ std::map<std::string, unsigned> &ItinClassesMap,
+ std::vector<Record*> &ItinClassList) {
// For each itinerary class
unsigned N = ItinClassList.size();
for (unsigned i = 0; i < N; i++) {
@@ -265,13 +262,32 @@ void SubtargetEmitter::FormItineraryOperandCycleString(Record *ItinData,
}
}
+void SubtargetEmitter::FormItineraryBypassString(const std::string &Name,
+ Record *ItinData,
+ std::string &ItinString,
+ unsigned NOperandCycles) {
+ const std::vector<Record*> &BypassList =
+ ItinData->getValueAsListOfDefs("Bypasses");
+ unsigned N = BypassList.size();
+ unsigned i = 0;
+ for (; i < N;) {
+ ItinString += Name + "Bypass::" + BypassList[i]->getName();
+ if (++i < NOperandCycles) ItinString += ", ";
+ }
+ for (; i < NOperandCycles;) {
+ ItinString += " 0";
+ if (++i < NOperandCycles) ItinString += ", ";
+ }
+}
+
//
// EmitStageAndOperandCycleData - Generate unique itinerary stages and
// operand cycle tables. Record itineraries for processors.
//
void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
unsigned NItinClasses,
- std::map<std::string, unsigned> &ItinClassesMap,
+ std::map<std::string, unsigned> &ItinClassesMap,
+ std::vector<Record*> &ItinClassList,
std::vector<std::vector<InstrItinerary> > &ProcList) {
// Gather processor iteraries
std::vector<Record*> ProcItinList =
@@ -298,6 +314,19 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
<< " = 1 << " << j << ";\n";
OS << "}\n";
+
+ std::vector<Record*> BPs = Proc->getValueAsListOfDefs("BP");
+ if (BPs.size()) {
+ OS << "\n// Pipeline forwarding pathes for itineraries \"" << Name
+ << "\"\n" << "namespace " << Name << "Bypass {\n";
+
+ OS << " const unsigned NoBypass = 0;\n";
+ for (unsigned j = 0, BPN = BPs.size(); j < BPN; ++j)
+ OS << " const unsigned " << BPs[j]->getName()
+ << " = 1 << " << j << ";\n";
+
+ OS << "}\n";
+ }
}
// Begin stages table
@@ -307,10 +336,14 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
// Begin operand cycle table
std::string OperandCycleTable = "static const unsigned OperandCycles[] = {\n";
OperandCycleTable += " 0, // No itinerary\n";
+
+ // Begin pipeline bypass table
+ std::string BypassTable = "static const unsigned ForwardingPathes[] = {\n";
+ BypassTable += " 0, // No itinerary\n";
unsigned StageCount = 1, OperandCycleCount = 1;
unsigned ItinStageEnum = 1, ItinOperandCycleEnum = 1;
- std::map<std::string, unsigned> ItinStageMap, ItinOperandCycleMap;
+ std::map<std::string, unsigned> ItinStageMap, ItinOperandMap;
for (unsigned i = 0, N = ProcItinList.size(); i < N; i++) {
// Next record
Record *Proc = ProcItinList[i];
@@ -344,6 +377,10 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
FormItineraryOperandCycleString(ItinData, ItinOperandCycleString,
NOperandCycles);
+ std::string ItinBypassString;
+ FormItineraryBypassString(Name, ItinData, ItinBypassString,
+ NOperandCycles);
+
// Check to see if stage already exists and create if it doesn't
unsigned FindStage = 0;
if (NStages > 0) {
@@ -361,27 +398,35 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
// Check to see if operand cycle already exists and create if it doesn't
unsigned FindOperandCycle = 0;
if (NOperandCycles > 0) {
- FindOperandCycle = ItinOperandCycleMap[ItinOperandCycleString];
+ std::string ItinOperandString = ItinOperandCycleString+ItinBypassString;
+ FindOperandCycle = ItinOperandMap[ItinOperandString];
if (FindOperandCycle == 0) {
// Emit as cycle, // index
OperandCycleTable += ItinOperandCycleString + ", // " +
itostr(ItinOperandCycleEnum) + "\n";
// Record Itin class number.
- ItinOperandCycleMap[ItinOperandCycleString] =
+ ItinOperandMap[ItinOperandCycleString] =
FindOperandCycle = OperandCycleCount;
+
+ // Emit as bypass, // index
+ BypassTable += ItinBypassString + ", // " +
+ itostr(ItinOperandCycleEnum) + "\n";
+
OperandCycleCount += NOperandCycles;
ItinOperandCycleEnum++;
}
}
- // Set up itinerary as location and location + stage count
- InstrItinerary Intinerary = { FindStage, FindStage + NStages,
- FindOperandCycle, FindOperandCycle + NOperandCycles};
-
// Locate where to inject into processor itinerary table
const std::string &Name = ItinData->getValueAsDef("TheClass")->getName();
unsigned Find = ItinClassesMap[Name];
+ // Set up itinerary as location and location + stage count
+ unsigned NumUOps = ItinClassList[Find]->getValueAsInt("NumMicroOps");
+ InstrItinerary Intinerary = { NumUOps, FindStage, FindStage + NStages,
+ FindOperandCycle,
+ FindOperandCycle + NOperandCycles};
+
// Inject - empty slots will be 0, 0
ItinList[Find] = Intinerary;
}
@@ -389,7 +434,7 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
// Add process itinerary to list
ProcList.push_back(ItinList);
}
-
+
// Closing stage
StageTable += " { 0, 0, 0, llvm::InstrStage::Required } // End itinerary\n";
StageTable += "};\n";
@@ -398,9 +443,13 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
OperandCycleTable += " 0 // End itinerary\n";
OperandCycleTable += "};\n";
+ BypassTable += " 0 // End itinerary\n";
+ BypassTable += "};\n";
+
// Emit tables.
OS << StageTable;
OS << OperandCycleTable;
+ OS << BypassTable;
// Emit size of tables
OS<<"\nenum {\n";
@@ -443,9 +492,11 @@ void SubtargetEmitter::EmitProcessorData(raw_ostream &OS,
// Emit in the form of
// { firstStage, lastStage, firstCycle, lastCycle } // index
if (Intinerary.FirstStage == 0) {
- OS << " { 0, 0, 0, 0 }";
+ OS << " { 1, 0, 0, 0, 0 }";
} else {
- OS << " { " << Intinerary.FirstStage << ", " <<
+ OS << " { " <<
+ Intinerary.NumMicroOps << ", " <<
+ Intinerary.FirstStage << ", " <<
Intinerary.LastStage << ", " <<
Intinerary.FirstOperandCycle << ", " <<
Intinerary.LastOperandCycle << " }";
@@ -455,7 +506,7 @@ void SubtargetEmitter::EmitProcessorData(raw_ostream &OS,
}
// End processor itinerary table
- OS << " { ~0U, ~0U, ~0U, ~0U } // end marker\n";
+ OS << " { 1, ~0U, ~0U, ~0U, ~0U } // end marker\n";
OS << "};\n";
}
}
@@ -511,16 +562,22 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
//
void SubtargetEmitter::EmitData(raw_ostream &OS) {
std::map<std::string, unsigned> ItinClassesMap;
- std::vector<std::vector<InstrItinerary> > ProcList;
+ // Gather and sort all itinerary classes
+ std::vector<Record*> ItinClassList =
+ Records.getAllDerivedDefinitions("InstrItinClass");
+ std::sort(ItinClassList.begin(), ItinClassList.end(), LessRecord());
// Enumerate all the itinerary classes
- unsigned NItinClasses = CollectAllItinClasses(OS, ItinClassesMap);
+ unsigned NItinClasses = CollectAllItinClasses(OS, ItinClassesMap,
+ ItinClassList);
// Make sure the rest is worth the effort
HasItineraries = NItinClasses != 1; // Ignore NoItinerary.
if (HasItineraries) {
+ std::vector<std::vector<InstrItinerary> > ProcList;
// Emit the stage data
- EmitStageAndOperandCycleData(OS, NItinClasses, ItinClassesMap, ProcList);
+ EmitStageAndOperandCycleData(OS, NItinClasses, ItinClassesMap,
+ ItinClassList, ProcList);
// Emit the processor itinerary data
EmitProcessorData(OS, ProcList);
// Emit the processor lookup data
@@ -569,7 +626,8 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS) {
OS << "\n"
<< " InstrItinerary *Itinerary = (InstrItinerary *)"
<< "Features.getInfo(ProcItinKV, ProcItinKVSize);\n"
- << " InstrItins = InstrItineraryData(Stages, OperandCycles, Itinerary);\n";
+ << " InstrItins = InstrItineraryData(Stages, OperandCycles, "
+ << "ForwardingPathes, Itinerary);\n";
}
OS << " return Features.getCPU();\n"
@@ -580,7 +638,7 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS) {
// SubtargetEmitter::run - Main subtarget enumeration emitter.
//
void SubtargetEmitter::run(raw_ostream &OS) {
- Target = CodeGenTarget().getName();
+ Target = CodeGenTarget(Records).getName();
EmitSourceFileHeader("Subtarget Enumeration Source Fragment", OS);
diff --git a/contrib/llvm/utils/TableGen/SubtargetEmitter.h b/contrib/llvm/utils/TableGen/SubtargetEmitter.h
index f43a443..3abec3b 100644
--- a/contrib/llvm/utils/TableGen/SubtargetEmitter.h
+++ b/contrib/llvm/utils/TableGen/SubtargetEmitter.h
@@ -33,14 +33,19 @@ class SubtargetEmitter : public TableGenBackend {
void FeatureKeyValues(raw_ostream &OS);
void CPUKeyValues(raw_ostream &OS);
unsigned CollectAllItinClasses(raw_ostream &OS,
- std::map<std::string, unsigned> &ItinClassesMap);
+ std::map<std::string,unsigned> &ItinClassesMap,
+ std::vector<Record*> &ItinClassList);
void FormItineraryStageString(const std::string &Names,
Record *ItinData, std::string &ItinString,
unsigned &NStages);
void FormItineraryOperandCycleString(Record *ItinData, std::string &ItinString,
unsigned &NOperandCycles);
+ void FormItineraryBypassString(const std::string &Names,
+ Record *ItinData,
+ std::string &ItinString, unsigned NOperandCycles);
void EmitStageAndOperandCycleData(raw_ostream &OS, unsigned NItinClasses,
std::map<std::string, unsigned> &ItinClassesMap,
+ std::vector<Record*> &ItinClassList,
std::vector<std::vector<InstrItinerary> > &ProcList);
void EmitProcessorData(raw_ostream &OS,
std::vector<std::vector<InstrItinerary> > &ProcList);
diff --git a/contrib/llvm/utils/TableGen/TGLexer.cpp b/contrib/llvm/utils/TableGen/TGLexer.cpp
index 2c7becc..82d2b64 100644
--- a/contrib/llvm/utils/TableGen/TGLexer.cpp
+++ b/contrib/llvm/utils/TableGen/TGLexer.cpp
@@ -15,6 +15,8 @@
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Config/config.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
#include <cctype>
#include <cstdio>
#include <cstdlib>
@@ -36,17 +38,17 @@ SMLoc TGLexer::getLoc() const {
/// ReturnError - Set the error to the specified string at the specified
/// location. This is defined to always return tgtok::Error.
-tgtok::TokKind TGLexer::ReturnError(const char *Loc, const std::string &Msg) {
+tgtok::TokKind TGLexer::ReturnError(const char *Loc, const Twine &Msg) {
PrintError(Loc, Msg);
return tgtok::Error;
}
-void TGLexer::PrintError(const char *Loc, const std::string &Msg) const {
+void TGLexer::PrintError(const char *Loc, const Twine &Msg) const {
SrcMgr.PrintMessage(SMLoc::getFromPointer(Loc), Msg, "error");
}
-void TGLexer::PrintError(SMLoc Loc, const std::string &Msg) const {
+void TGLexer::PrintError(SMLoc Loc, const Twine &Msg) const {
SrcMgr.PrintMessage(Loc, Msg, "error");
}
@@ -95,7 +97,7 @@ tgtok::TokKind TGLexer::LexToken() {
switch (CurChar) {
default:
- // Handle letters: [a-zA-Z_]
+ // Handle letters: [a-zA-Z_#]
if (isalpha(CurChar) || CurChar == '_' || CurChar == '#')
return LexIdentifier();
@@ -214,23 +216,13 @@ tgtok::TokKind TGLexer::LexVarName() {
tgtok::TokKind TGLexer::LexIdentifier() {
- // The first letter is [a-zA-Z_].
+ // The first letter is [a-zA-Z_#].
const char *IdentStart = TokStart;
- // Match the rest of the identifier regex: [0-9a-zA-Z_]*
- while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_'
- || *CurPtr == '#') {
- // If this contains a '#', make sure it's value
- if (*CurPtr == '#') {
- if (strncmp(CurPtr, "#NAME#", 6) != 0) {
- return tgtok::Error;
- }
- CurPtr += 6;
- }
- else {
- ++CurPtr;
- }
- }
+ // Match the rest of the identifier regex: [0-9a-zA-Z_#]*
+ while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_' ||
+ *CurPtr == '#')
+ ++CurPtr;
// Check to see if this identifier is a keyword.
@@ -421,30 +413,30 @@ tgtok::TokKind TGLexer::LexBracket() {
/// LexExclaim - Lex '!' and '![a-zA-Z]+'.
tgtok::TokKind TGLexer::LexExclaim() {
if (!isalpha(*CurPtr))
- return ReturnError(CurPtr-1, "Invalid \"!operator\"");
+ return ReturnError(CurPtr - 1, "Invalid \"!operator\"");
const char *Start = CurPtr++;
while (isalpha(*CurPtr))
++CurPtr;
// Check to see which operator this is.
- unsigned Len = CurPtr-Start;
-
- if (Len == 3 && !memcmp(Start, "con", 3)) return tgtok::XConcat;
- if (Len == 3 && !memcmp(Start, "sra", 3)) return tgtok::XSRA;
- if (Len == 3 && !memcmp(Start, "srl", 3)) return tgtok::XSRL;
- if (Len == 3 && !memcmp(Start, "shl", 3)) return tgtok::XSHL;
- if (Len == 2 && !memcmp(Start, "eq", 2)) return tgtok::XEq;
- if (Len == 9 && !memcmp(Start, "strconcat", 9)) return tgtok::XStrConcat;
- if (Len == 10 && !memcmp(Start, "nameconcat", 10)) return tgtok::XNameConcat;
- if (Len == 5 && !memcmp(Start, "subst", 5)) return tgtok::XSubst;
- if (Len == 7 && !memcmp(Start, "foreach", 7)) return tgtok::XForEach;
- if (Len == 4 && !memcmp(Start, "cast", 4)) return tgtok::XCast;
- if (Len == 3 && !memcmp(Start, "car", 3)) return tgtok::XCar;
- if (Len == 3 && !memcmp(Start, "cdr", 3)) return tgtok::XCdr;
- if (Len == 4 && !memcmp(Start, "null", 4)) return tgtok::XNull;
- if (Len == 2 && !memcmp(Start, "if", 2)) return tgtok::XIf;
-
- return ReturnError(Start-1, "Unknown operator");
+ tgtok::TokKind Kind =
+ StringSwitch<tgtok::TokKind>(StringRef(Start, CurPtr - Start))
+ .Case("eq", tgtok::XEq)
+ .Case("if", tgtok::XIf)
+ .Case("head", tgtok::XHead)
+ .Case("tail", tgtok::XTail)
+ .Case("con", tgtok::XConcat)
+ .Case("shl", tgtok::XSHL)
+ .Case("sra", tgtok::XSRA)
+ .Case("srl", tgtok::XSRL)
+ .Case("cast", tgtok::XCast)
+ .Case("empty", tgtok::XEmpty)
+ .Case("subst", tgtok::XSubst)
+ .Case("foreach", tgtok::XForEach)
+ .Case("strconcat", tgtok::XStrConcat)
+ .Default(tgtok::Error);
+
+ return Kind != tgtok::Error ? Kind : ReturnError(Start-1, "Unknown operator");
}
diff --git a/contrib/llvm/utils/TableGen/TGLexer.h b/contrib/llvm/utils/TableGen/TGLexer.h
index 835f351..55a6c5d 100644
--- a/contrib/llvm/utils/TableGen/TGLexer.h
+++ b/contrib/llvm/utils/TableGen/TGLexer.h
@@ -14,7 +14,7 @@
#ifndef TGLEXER_H
#define TGLEXER_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <vector>
#include <string>
#include <cassert>
@@ -23,7 +23,8 @@ namespace llvm {
class MemoryBuffer;
class SourceMgr;
class SMLoc;
-
+class Twine;
+
namespace tgtok {
enum TokKind {
// Markers
@@ -44,8 +45,8 @@ namespace tgtok {
MultiClass, String,
// !keywords.
- XConcat, XSRA, XSRL, XSHL, XStrConcat, XNameConcat, XCast, XSubst,
- XForEach, XCar, XCdr, XNull, XIf, XEq,
+ XConcat, XSRA, XSRL, XSHL, XStrConcat, XCast, XSubst,
+ XForEach, XHead, XTail, XEmpty, XIf, XEq,
// Integer value.
IntVal,
@@ -95,14 +96,14 @@ public:
SMLoc getLoc() const;
- void PrintError(const char *Loc, const std::string &Msg) const;
- void PrintError(SMLoc Loc, const std::string &Msg) const;
+ void PrintError(const char *Loc, const Twine &Msg) const;
+ void PrintError(SMLoc Loc, const Twine &Msg) const;
private:
/// LexToken - Read the next token and return its code.
tgtok::TokKind LexToken();
- tgtok::TokKind ReturnError(const char *Loc, const std::string &Msg);
+ tgtok::TokKind ReturnError(const char *Loc, const Twine &Msg);
int getNextChar();
void SkipBCPLComment();
diff --git a/contrib/llvm/utils/TableGen/TGParser.cpp b/contrib/llvm/utils/TableGen/TGParser.cpp
index f81aabe..f6041be 100644
--- a/contrib/llvm/utils/TableGen/TGParser.cpp
+++ b/contrib/llvm/utils/TableGen/TGParser.cpp
@@ -16,6 +16,8 @@
#include "llvm/ADT/StringExtras.h"
#include <algorithm>
#include <sstream>
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -294,20 +296,23 @@ static bool isObjectStart(tgtok::TokKind K) {
K == tgtok::Defm || K == tgtok::Let || K == tgtok::MultiClass;
}
+static std::string GetNewAnonymousName() {
+ static unsigned AnonCounter = 0;
+ return "anonymous."+utostr(AnonCounter++);
+}
+
/// ParseObjectName - If an object name is specified, return it. Otherwise,
/// return an anonymous name.
/// ObjectName ::= ID
/// ObjectName ::= /*empty*/
///
std::string TGParser::ParseObjectName() {
- if (Lex.getCode() == tgtok::Id) {
- std::string Ret = Lex.getCurStrVal();
- Lex.Lex();
- return Ret;
- }
+ if (Lex.getCode() != tgtok::Id)
+ return GetNewAnonymousName();
- static unsigned AnonCounter = 0;
- return "anonymous."+utostr(AnonCounter++);
+ std::string Ret = Lex.getCurStrVal();
+ Lex.Lex();
+ return Ret;
}
@@ -678,9 +683,9 @@ Init *TGParser::ParseOperation(Record *CurRec) {
TokError("unknown operation");
return 0;
break;
- case tgtok::XCar:
- case tgtok::XCdr:
- case tgtok::XNull:
+ case tgtok::XHead:
+ case tgtok::XTail:
+ case tgtok::XEmpty:
case tgtok::XCast: { // Value ::= !unop '(' Value ')'
UnOpInit::UnaryOp Code;
RecTy *Type = 0;
@@ -699,17 +704,17 @@ Init *TGParser::ParseOperation(Record *CurRec) {
}
break;
- case tgtok::XCar:
+ case tgtok::XHead:
Lex.Lex(); // eat the operation
- Code = UnOpInit::CAR;
+ Code = UnOpInit::HEAD;
break;
- case tgtok::XCdr:
+ case tgtok::XTail:
Lex.Lex(); // eat the operation
- Code = UnOpInit::CDR;
+ Code = UnOpInit::TAIL;
break;
- case tgtok::XNull:
+ case tgtok::XEmpty:
Lex.Lex(); // eat the operation
- Code = UnOpInit::LNULL;
+ Code = UnOpInit::EMPTY;
Type = new IntRecTy;
break;
}
@@ -722,9 +727,9 @@ Init *TGParser::ParseOperation(Record *CurRec) {
Init *LHS = ParseValue(CurRec);
if (LHS == 0) return 0;
- if (Code == UnOpInit::CAR
- || Code == UnOpInit::CDR
- || Code == UnOpInit::LNULL) {
+ if (Code == UnOpInit::HEAD
+ || Code == UnOpInit::TAIL
+ || Code == UnOpInit::EMPTY) {
ListInit *LHSl = dynamic_cast<ListInit*>(LHS);
StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
TypedInit *LHSt = dynamic_cast<TypedInit*>(LHS);
@@ -741,8 +746,8 @@ Init *TGParser::ParseOperation(Record *CurRec) {
}
}
- if (Code == UnOpInit::CAR
- || Code == UnOpInit::CDR) {
+ if (Code == UnOpInit::HEAD
+ || Code == UnOpInit::TAIL) {
if (LHSl == 0 && LHSt == 0) {
TokError("expected list type argumnet in unary operator");
return 0;
@@ -759,7 +764,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
TokError("untyped list element in unary operator");
return 0;
}
- if (Code == UnOpInit::CAR) {
+ if (Code == UnOpInit::HEAD) {
Type = Itemt->getType();
} else {
Type = new ListRecTy(Itemt->getType());
@@ -771,7 +776,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
TokError("expected list type argumnet in unary operator");
return 0;
}
- if (Code == UnOpInit::CAR) {
+ if (Code == UnOpInit::HEAD) {
Type = LType->getElementType();
} else {
Type = LType;
@@ -793,81 +798,68 @@ Init *TGParser::ParseOperation(Record *CurRec) {
case tgtok::XSRL:
case tgtok::XSHL:
case tgtok::XEq:
- case tgtok::XStrConcat:
- case tgtok::XNameConcat: { // Value ::= !binop '(' Value ',' Value ')'
+ case tgtok::XStrConcat: { // Value ::= !binop '(' Value ',' Value ')'
+ tgtok::TokKind OpTok = Lex.getCode();
+ SMLoc OpLoc = Lex.getLoc();
+ Lex.Lex(); // eat the operation
+
BinOpInit::BinaryOp Code;
RecTy *Type = 0;
-
- switch (Lex.getCode()) {
+ switch (OpTok) {
default: assert(0 && "Unhandled code!");
- case tgtok::XConcat:
- Lex.Lex(); // eat the operation
- Code = BinOpInit::CONCAT;
- Type = new DagRecTy();
- break;
- case tgtok::XSRA:
- Lex.Lex(); // eat the operation
- Code = BinOpInit::SRA;
- Type = new IntRecTy();
- break;
- case tgtok::XSRL:
- Lex.Lex(); // eat the operation
- Code = BinOpInit::SRL;
- Type = new IntRecTy();
- break;
- case tgtok::XSHL:
- Lex.Lex(); // eat the operation
- Code = BinOpInit::SHL;
- Type = new IntRecTy();
- break;
- case tgtok::XEq:
- Lex.Lex(); // eat the operation
- Code = BinOpInit::EQ;
- Type = new IntRecTy();
- break;
+ case tgtok::XConcat: Code = BinOpInit::CONCAT; Type = new DagRecTy(); break;
+ case tgtok::XSRA: Code = BinOpInit::SRA; Type = new IntRecTy(); break;
+ case tgtok::XSRL: Code = BinOpInit::SRL; Type = new IntRecTy(); break;
+ case tgtok::XSHL: Code = BinOpInit::SHL; Type = new IntRecTy(); break;
+ case tgtok::XEq: Code = BinOpInit::EQ; Type = new BitRecTy(); break;
case tgtok::XStrConcat:
- Lex.Lex(); // eat the operation
Code = BinOpInit::STRCONCAT;
Type = new StringRecTy();
break;
- case tgtok::XNameConcat:
- Lex.Lex(); // eat the operation
- Code = BinOpInit::NAMECONCAT;
-
- Type = ParseOperatorType();
-
- if (Type == 0) {
- TokError("did not get type for binary operator");
- return 0;
- }
-
- break;
}
+
if (Lex.getCode() != tgtok::l_paren) {
TokError("expected '(' after binary operator");
return 0;
}
Lex.Lex(); // eat the '('
- Init *LHS = ParseValue(CurRec);
- if (LHS == 0) return 0;
+ SmallVector<Init*, 2> InitList;
- if (Lex.getCode() != tgtok::comma) {
- TokError("expected ',' in binary operator");
- return 0;
- }
- Lex.Lex(); // eat the ','
+ InitList.push_back(ParseValue(CurRec));
+ if (InitList.back() == 0) return 0;
- Init *RHS = ParseValue(CurRec);
- if (RHS == 0) return 0;
+ while (Lex.getCode() == tgtok::comma) {
+ Lex.Lex(); // eat the ','
+
+ InitList.push_back(ParseValue(CurRec));
+ if (InitList.back() == 0) return 0;
+ }
if (Lex.getCode() != tgtok::r_paren) {
- TokError("expected ')' in binary operator");
+ TokError("expected ')' in operator");
return 0;
}
Lex.Lex(); // eat the ')'
- return (new BinOpInit(Code, LHS, RHS, Type))->Fold(CurRec, CurMultiClass);
+
+ // We allow multiple operands to associative operators like !strconcat as
+ // shorthand for nesting them.
+ if (Code == BinOpInit::STRCONCAT) {
+ while (InitList.size() > 2) {
+ Init *RHS = InitList.pop_back_val();
+ RHS = (new BinOpInit(Code, InitList.back(), RHS, Type))
+ ->Fold(CurRec, CurMultiClass);
+ InitList.back() = RHS;
+ }
+ }
+
+ if (InitList.size() == 2)
+ return (new BinOpInit(Code, InitList[0], InitList[1], Type))
+ ->Fold(CurRec, CurMultiClass);
+
+ Error(OpLoc, "expected two operands to operator");
+ return 0;
}
case tgtok::XIf:
@@ -876,7 +868,6 @@ Init *TGParser::ParseOperation(Record *CurRec) {
TernOpInit::TernaryOp Code;
RecTy *Type = 0;
-
tgtok::TokKind LexCode = Lex.getCode();
Lex.Lex(); // eat the operation
switch (LexCode) {
@@ -927,16 +918,45 @@ Init *TGParser::ParseOperation(Record *CurRec) {
switch (LexCode) {
default: assert(0 && "Unhandled code!");
case tgtok::XIf: {
- TypedInit *MHSt = dynamic_cast<TypedInit *>(MHS);
- TypedInit *RHSt = dynamic_cast<TypedInit *>(RHS);
- if (MHSt == 0 || RHSt == 0) {
+ // FIXME: The `!if' operator doesn't handle non-TypedInit well at
+ // all. This can be made much more robust.
+ TypedInit *MHSt = dynamic_cast<TypedInit*>(MHS);
+ TypedInit *RHSt = dynamic_cast<TypedInit*>(RHS);
+
+ RecTy *MHSTy = 0;
+ RecTy *RHSTy = 0;
+
+ if (MHSt == 0 && RHSt == 0) {
+ BitsInit *MHSbits = dynamic_cast<BitsInit*>(MHS);
+ BitsInit *RHSbits = dynamic_cast<BitsInit*>(RHS);
+
+ if (MHSbits && RHSbits &&
+ MHSbits->getNumBits() == RHSbits->getNumBits()) {
+ Type = new BitRecTy();
+ break;
+ } else {
+ BitInit *MHSbit = dynamic_cast<BitInit*>(MHS);
+ BitInit *RHSbit = dynamic_cast<BitInit*>(RHS);
+
+ if (MHSbit && RHSbit) {
+ Type = new BitRecTy();
+ break;
+ }
+ }
+ } else if (MHSt != 0 && RHSt != 0) {
+ MHSTy = MHSt->getType();
+ RHSTy = RHSt->getType();
+ }
+
+ if (!MHSTy || !RHSTy) {
TokError("could not get type for !if");
return 0;
}
- if (MHSt->getType()->typeIsConvertibleTo(RHSt->getType())) {
- Type = RHSt->getType();
- } else if (RHSt->getType()->typeIsConvertibleTo(MHSt->getType())) {
- Type = MHSt->getType();
+
+ if (MHSTy->typeIsConvertibleTo(RHSTy)) {
+ Type = RHSTy;
+ } else if (RHSTy->typeIsConvertibleTo(MHSTy)) {
+ Type = MHSTy;
} else {
TokError("inconsistent types for !if");
return 0;
@@ -1037,8 +1057,13 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType) {
break;
}
case tgtok::CodeFragment:
- R = new CodeInit(Lex.getCurStrVal()); Lex.Lex(); break;
- case tgtok::question: R = new UnsetInit(); Lex.Lex(); break;
+ R = new CodeInit(Lex.getCurStrVal());
+ Lex.Lex();
+ break;
+ case tgtok::question:
+ R = new UnsetInit();
+ Lex.Lex();
+ break;
case tgtok::Id: {
SMLoc NameLoc = Lex.getLoc();
std::string Name = Lex.getCurStrVal();
@@ -1071,7 +1096,9 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType) {
// Create the new record, set it as CurRec temporarily.
static unsigned AnonCounter = 0;
- Record *NewRec = new Record("anonymous.val."+utostr(AnonCounter++),NameLoc);
+ Record *NewRec = new Record("anonymous.val."+utostr(AnonCounter++),
+ NameLoc,
+ Records);
SubClassReference SCRef;
SCRef.RefLoc = NameLoc;
SCRef.Rec = Class;
@@ -1212,21 +1239,13 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType) {
}
case tgtok::l_paren: { // Value ::= '(' IDValue DagArgList ')'
Lex.Lex(); // eat the '('
- if (Lex.getCode() != tgtok::Id
- && Lex.getCode() != tgtok::XCast
- && Lex.getCode() != tgtok::XNameConcat) {
+ if (Lex.getCode() != tgtok::Id && Lex.getCode() != tgtok::XCast) {
TokError("expected identifier in dag init");
return 0;
}
- Init *Operator = 0;
- if (Lex.getCode() == tgtok::Id) {
- Operator = ParseIDValue(CurRec);
- if (Operator == 0) return 0;
- } else {
- Operator = ParseOperation(CurRec);
- if (Operator == 0) return 0;
- }
+ Init *Operator = ParseValue(CurRec);
+ if (Operator == 0) return 0;
// If the operator name is present, parse it.
std::string OperatorName;
@@ -1252,25 +1271,22 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType) {
Lex.Lex(); // eat the ')'
return new DagInit(Operator, OperatorName, DagArgs);
- break;
}
- case tgtok::XCar:
- case tgtok::XCdr:
- case tgtok::XNull:
+ case tgtok::XHead:
+ case tgtok::XTail:
+ case tgtok::XEmpty:
case tgtok::XCast: // Value ::= !unop '(' Value ')'
case tgtok::XConcat:
case tgtok::XSRA:
case tgtok::XSRL:
case tgtok::XSHL:
case tgtok::XEq:
- case tgtok::XStrConcat:
- case tgtok::XNameConcat: // Value ::= !binop '(' Value ',' Value ')'
+ case tgtok::XStrConcat: // Value ::= !binop '(' Value ',' Value ')'
case tgtok::XIf:
case tgtok::XForEach:
case tgtok::XSubst: { // Value ::= !ternop '(' Value ',' Value ',' Value ')'
return ParseOperation(CurRec);
- break;
}
}
@@ -1646,7 +1662,7 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
Lex.Lex(); // Eat the 'def' token.
// Parse ObjectName and make a record for it.
- Record *CurRec = new Record(ParseObjectName(), DefLoc);
+ Record *CurRec = new Record(ParseObjectName(), DefLoc, Records);
if (!CurMultiClass) {
// Top-level def definition.
@@ -1713,7 +1729,7 @@ bool TGParser::ParseClass() {
return TokError("Class '" + CurRec->getName() + "' already defined");
} else {
// If this is the first reference to this class, create and add it.
- CurRec = new Record(Lex.getCurStrVal(), Lex.getLoc());
+ CurRec = new Record(Lex.getCurStrVal(), Lex.getLoc(), Records);
Records.addClass(CurRec);
}
Lex.Lex(); // eat the name.
@@ -1830,7 +1846,8 @@ bool TGParser::ParseMultiClass() {
if (MultiClasses.count(Name))
return TokError("multiclass '" + Name + "' already defined");
- CurMultiClass = MultiClasses[Name] = new MultiClass(Name, Lex.getLoc());
+ CurMultiClass = MultiClasses[Name] = new MultiClass(Name,
+ Lex.getLoc(), Records);
Lex.Lex(); // Eat the identifier.
// If there are template args, parse them.
@@ -1899,12 +1916,15 @@ bool TGParser::ParseMultiClass() {
///
bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
assert(Lex.getCode() == tgtok::Defm && "Unexpected token!");
- if (Lex.Lex() != tgtok::Id) // eat the defm.
- return TokError("expected identifier after defm");
+
+ std::string DefmPrefix;
+ if (Lex.Lex() == tgtok::Id) { // eat the defm.
+ DefmPrefix = Lex.getCurStrVal();
+ Lex.Lex(); // Eat the defm prefix.
+ }
SMLoc DefmPrefixLoc = Lex.getLoc();
- std::string DefmPrefix = Lex.getCurStrVal();
- if (Lex.Lex() != tgtok::colon)
+ if (Lex.getCode() != tgtok::colon)
return TokError("expected ':' after defm identifier");
// Keep track of the new generated record definitions.
@@ -1939,17 +1959,24 @@ bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
for (unsigned i = 0, e = MC->DefPrototypes.size(); i != e; ++i) {
Record *DefProto = MC->DefPrototypes[i];
- // Add in the defm name
+ // Add in the defm name. If the defm prefix is empty, give each
+ // instantiated def a unique name. Otherwise, if "#NAME#" exists in the
+ // name, substitute the prefix for #NAME#. Otherwise, use the defm name
+ // as a prefix.
std::string DefName = DefProto->getName();
- std::string::size_type idx = DefName.find("#NAME#");
- if (idx != std::string::npos) {
- DefName.replace(idx, 6, DefmPrefix);
+ if (DefmPrefix.empty()) {
+ DefName = GetNewAnonymousName();
} else {
- // Add the suffix to the defm name to get the new name.
- DefName = DefmPrefix + DefName;
+ std::string::size_type idx = DefName.find("#NAME#");
+ if (idx != std::string::npos) {
+ DefName.replace(idx, 6, DefmPrefix);
+ } else {
+ // Add the suffix to the defm name to get the new name.
+ DefName = DefmPrefix + DefName;
+ }
}
- Record *CurRec = new Record(DefName, DefmPrefixLoc);
+ Record *CurRec = new Record(DefName, DefmPrefixLoc, Records);
SubClassReference Ref;
Ref.RefLoc = DefmPrefixLoc;
@@ -2091,7 +2118,8 @@ bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
/// Object ::= LETCommand Object
bool TGParser::ParseObject(MultiClass *MC) {
switch (Lex.getCode()) {
- default: assert(0 && "This is not an object");
+ default:
+ return TokError("Expected class, def, defm, multiclass or let definition");
case tgtok::Let: return ParseTopLevelLet(MC);
case tgtok::Def: return ParseDef(MC);
case tgtok::Defm: return ParseDefm(MC);
diff --git a/contrib/llvm/utils/TableGen/TGParser.h b/contrib/llvm/utils/TableGen/TGParser.h
index 0aee931..9cdf68f 100644
--- a/contrib/llvm/utils/TableGen/TGParser.h
+++ b/contrib/llvm/utils/TableGen/TGParser.h
@@ -15,12 +15,14 @@
#define TGPARSER_H
#include "TGLexer.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Support/SourceMgr.h"
#include <map>
namespace llvm {
class Record;
class RecordVal;
+ class RecordKeeper;
struct RecTy;
struct Init;
struct MultiClass;
@@ -46,18 +48,22 @@ class TGParser {
/// CurMultiClass - If we are parsing a 'multiclass' definition, this is the
/// current value.
MultiClass *CurMultiClass;
+
+ // Record tracker
+ RecordKeeper &Records;
public:
- TGParser(SourceMgr &SrcMgr) : Lex(SrcMgr), CurMultiClass(0) {}
+ TGParser(SourceMgr &SrcMgr, RecordKeeper &records) :
+ Lex(SrcMgr), CurMultiClass(0), Records(records) {}
/// ParseFile - Main entrypoint for parsing a tblgen file. These parser
/// routines return true on error, or false on success.
bool ParseFile();
- bool Error(SMLoc L, const std::string &Msg) const {
+ bool Error(SMLoc L, const Twine &Msg) const {
Lex.PrintError(L, Msg);
return true;
}
- bool TokError(const std::string &Msg) const {
+ bool TokError(const Twine &Msg) const {
return Error(Lex.getLoc(), Msg);
}
private: // Semantic analysis methods.
diff --git a/contrib/llvm/utils/TableGen/TableGen.cpp b/contrib/llvm/utils/TableGen/TableGen.cpp
index 5e3e282..3b7dc01 100644
--- a/contrib/llvm/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/utils/TableGen/TableGen.cpp
@@ -21,6 +21,7 @@
#include "ClangASTNodesEmitter.h"
#include "ClangAttrEmitter.h"
#include "ClangDiagnosticsEmitter.h"
+#include "ClangSACheckersEmitter.h"
#include "CodeEmitterGen.h"
#include "DAGISelEmitter.h"
#include "DisassemblerEmitter.h"
@@ -37,11 +38,13 @@
#include "ARMDecoderEmitter.h"
#include "SubtargetEmitter.h"
#include "TGParser.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Signals.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/system_error.h"
#include <algorithm>
#include <cstdio>
using namespace llvm;
@@ -59,10 +62,12 @@ enum ActionType {
GenClangAttrList,
GenClangAttrPCHRead,
GenClangAttrPCHWrite,
+ GenClangAttrSpellingList,
GenClangDiagsDefs,
GenClangDiagGroups,
GenClangDeclNodes,
GenClangStmtNodes,
+ GenClangSACheckers,
GenDAGISel,
GenFastISel,
GenOptParserDefs, GenOptParserImpl,
@@ -73,6 +78,7 @@ enum ActionType {
GenEDInfo,
GenArmNeon,
GenArmNeonSema,
+ GenArmNeonTest,
PrintEnums
};
@@ -127,14 +133,18 @@ namespace {
"Generate clang PCH attribute reader"),
clEnumValN(GenClangAttrPCHWrite, "gen-clang-attr-pch-write",
"Generate clang PCH attribute writer"),
+ clEnumValN(GenClangAttrSpellingList, "gen-clang-attr-spelling-list",
+ "Generate a clang attribute spelling list"),
clEnumValN(GenClangDiagsDefs, "gen-clang-diags-defs",
"Generate Clang diagnostics definitions"),
clEnumValN(GenClangDiagGroups, "gen-clang-diag-groups",
"Generate Clang diagnostic groups"),
clEnumValN(GenClangDeclNodes, "gen-clang-decl-nodes",
- "Generate Clang AST statement nodes"),
+ "Generate Clang AST declaration nodes"),
clEnumValN(GenClangStmtNodes, "gen-clang-stmt-nodes",
"Generate Clang AST statement nodes"),
+ clEnumValN(GenClangSACheckers, "gen-clang-sa-checkers",
+ "Generate Clang Static Analyzer checkers"),
clEnumValN(GenLLVMCConf, "gen-llvmc",
"Generate LLVMC configuration library"),
clEnumValN(GenEDInfo, "gen-enhanced-disassembly-info",
@@ -143,6 +153,8 @@ namespace {
"Generate arm_neon.h for clang"),
clEnumValN(GenArmNeonSema, "gen-arm-neon-sema",
"Generate ARM NEON sema support for clang"),
+ clEnumValN(GenArmNeonTest, "gen-arm-neon-test",
+ "Generate ARM NEON tests for clang"),
clEnumValN(PrintEnums, "print-enums",
"Print enum values for a class"),
clEnumValEnd));
@@ -169,12 +181,9 @@ namespace {
}
-// FIXME: Eliminate globals from tblgen.
-RecordKeeper llvm::Records;
-
static SourceMgr SrcMgr;
-void llvm::PrintError(SMLoc ErrorLoc, const std::string &Msg) {
+void llvm::PrintError(SMLoc ErrorLoc, const Twine &Msg) {
SrcMgr.PrintMessage(ErrorLoc, Msg, "error");
}
@@ -184,14 +193,15 @@ void llvm::PrintError(SMLoc ErrorLoc, const std::string &Msg) {
/// file.
static bool ParseFile(const std::string &Filename,
const std::vector<std::string> &IncludeDirs,
- SourceMgr &SrcMgr) {
- std::string ErrorStr;
- MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrorStr);
- if (F == 0) {
+ SourceMgr &SrcMgr,
+ RecordKeeper &Records) {
+ OwningPtr<MemoryBuffer> File;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), File)) {
errs() << "Could not open input file '" << Filename << "': "
- << ErrorStr <<"\n";
+ << ec.message() <<"\n";
return true;
}
+ MemoryBuffer *F = File.take();
// Tell SrcMgr about this buffer, which is what TGParser will pick up.
SrcMgr.AddNewSourceBuffer(F, SMLoc());
@@ -200,19 +210,21 @@ static bool ParseFile(const std::string &Filename,
// it later.
SrcMgr.setIncludeDirs(IncludeDirs);
- TGParser Parser(SrcMgr);
+ TGParser Parser(SrcMgr, Records);
return Parser.ParseFile();
}
int main(int argc, char **argv) {
+ RecordKeeper Records;
+
sys::PrintStackTraceOnErrorSignal();
PrettyStackTraceProgram X(argc, argv);
cl::ParseCommandLineOptions(argc, argv);
// Parse the input file.
- if (ParseFile(InputFilename, IncludeDirs, SrcMgr))
+ if (ParseFile(InputFilename, IncludeDirs, SrcMgr, Records))
return 1;
std::string Error;
@@ -274,6 +286,9 @@ int main(int argc, char **argv) {
case GenClangAttrPCHWrite:
ClangAttrPCHWriteEmitter(Records).run(Out.os());
break;
+ case GenClangAttrSpellingList:
+ ClangAttrSpellingListEmitter(Records).run(Out.os());
+ break;
case GenClangDiagsDefs:
ClangDiagsDefsEmitter(Records, ClangComponent).run(Out.os());
break;
@@ -287,6 +302,9 @@ int main(int argc, char **argv) {
case GenClangStmtNodes:
ClangASTNodesEmitter(Records, "Stmt", "").run(Out.os());
break;
+ case GenClangSACheckers:
+ ClangSACheckersEmitter(Records).run(Out.os());
+ break;
case GenDisassembler:
DisassemblerEmitter(Records).run(Out.os());
break;
@@ -323,6 +341,9 @@ int main(int argc, char **argv) {
case GenArmNeonSema:
NeonEmitter(Records).runHeader(Out.os());
break;
+ case GenArmNeonTest:
+ NeonEmitter(Records).runTests(Out.os());
+ break;
case PrintEnums:
{
std::vector<Record*> Recs = Records.getAllDerivedDefinitions(Class);
diff --git a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
index 2176224..94797f5 100644
--- a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -161,7 +161,7 @@ void DisassemblerTables::emitOneID(raw_ostream &o,
/// @param i - The indentation level for that output stream.
static void emitEmptyTable(raw_ostream &o, uint32_t &i)
{
- o.indent(i * 2) << "static InstrUID modRMEmptyTable[1] = { 0 };" << "\n";
+ o.indent(i * 2) << "static const InstrUID modRMEmptyTable[1] = { 0 };\n";
o << "\n";
}
@@ -275,7 +275,7 @@ void DisassemblerTables::emitModRMDecision(raw_ostream &o1,
return;
}
- o1.indent(i1) << "static InstrUID modRMTable" << thisTableNumber;
+ o1.indent(i1) << "static const InstrUID modRMTable" << thisTableNumber;
switch (dt) {
default:
@@ -365,7 +365,7 @@ void DisassemblerTables::emitContextDecision(
uint32_t &i2,
ContextDecision &decision,
const char* name) const {
- o2.indent(i2) << "struct ContextDecision " << name << " = {" << "\n";
+ o2.indent(i2) << "static const struct ContextDecision " << name << " = {\n";
i2++;
o2.indent(i2) << "{ /* opcodeDecisions */" << "\n";
i2++;
@@ -392,10 +392,8 @@ void DisassemblerTables::emitContextDecision(
void DisassemblerTables::emitInstructionInfo(raw_ostream &o, uint32_t &i)
const {
- o.indent(i * 2) << "struct InstructionSpecifier ";
- o << INSTRUCTIONS_STR << "[";
- o << InstructionSpecifiers.size();
- o << "] = {" << "\n";
+ o.indent(i * 2) << "static const struct InstructionSpecifier ";
+ o << INSTRUCTIONS_STR "[" << InstructionSpecifiers.size() << "] = {\n";
i++;
@@ -456,8 +454,8 @@ void DisassemblerTables::emitInstructionInfo(raw_ostream &o, uint32_t &i)
void DisassemblerTables::emitContextTable(raw_ostream &o, uint32_t &i) const {
uint16_t index;
- o.indent(i * 2) << "InstructionContext ";
- o << CONTEXTS_STR << "[256] = {" << "\n";
+ o.indent(i * 2) << "static const InstructionContext " CONTEXTS_STR
+ "[256] = {\n";
i++;
for (index = 0; index < 256; ++index) {
diff --git a/contrib/llvm/utils/TableGen/X86ModRMFilters.h b/contrib/llvm/utils/TableGen/X86ModRMFilters.h
index 45cb07a..199040b 100644
--- a/contrib/llvm/utils/TableGen/X86ModRMFilters.h
+++ b/contrib/llvm/utils/TableGen/X86ModRMFilters.h
@@ -18,7 +18,7 @@
#ifndef X86MODRMFILTERS_H
#define X86MODRMFILTERS_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
index 4dba85b..ccd3efd 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -51,10 +51,11 @@ namespace X86Local {
MRM0m = 24, MRM1m = 25, MRM2m = 26, MRM3m = 27,
MRM4m = 28, MRM5m = 29, MRM6m = 30, MRM7m = 31,
MRMInitReg = 32,
-
#define MAP(from, to) MRM_##from = to,
MRM_MAPPING
#undef MAP
+ RawFrmImm8 = 43,
+ RawFrmImm16 = 44,
lastMRM
};
@@ -113,7 +114,6 @@ namespace X86Local {
EXTENSION_TABLE(72) \
EXTENSION_TABLE(73) \
EXTENSION_TABLE(ae) \
- EXTENSION_TABLE(b9) \
EXTENSION_TABLE(ba) \
EXTENSION_TABLE(c7)
@@ -219,7 +219,7 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
Name = Rec->getName();
AsmString = Rec->getValueAsString("AsmString");
- Operands = &insn.OperandList;
+ Operands = &insn.Operands.OperandList;
IsSSE = HasOpSizePrefix && (Name.find("16") == Name.npos);
HasFROperands = false;
@@ -311,7 +311,7 @@ RecognizableInstr::filter_ret RecognizableInstr::filter() const {
return FILTER_STRONG;
// Special cases.
-
+
if (Name.find("PCMPISTRI") != Name.npos && Name != "PCMPISTRI")
return FILTER_WEAK;
if (Name.find("PCMPESTRI") != Name.npos && Name != "PCMPESTRI")
@@ -424,7 +424,7 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
Spec->insnContext = insnContext();
- const std::vector<CodeGenInstruction::OperandInfo> &OperandList = *Operands;
+ const std::vector<CGIOperandList::OperandInfo> &OperandList = *Operands;
unsigned operandIndex;
unsigned numOperands = OperandList.size();
@@ -440,7 +440,7 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
for (operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
if (OperandList[operandIndex].Constraints.size()) {
- const CodeGenInstruction::ConstraintInfo &Constraint =
+ const CGIOperandList::ConstraintInfo &Constraint =
OperandList[operandIndex].Constraints[0];
if (Constraint.isTied()) {
operandMapping[operandIndex] = Constraint.getTiedOperand();
@@ -587,6 +587,20 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
HANDLE_OPERAND(memory)
HANDLE_OPTIONAL(relocation)
break;
+ case X86Local::RawFrmImm8:
+ // operand 1 is a 16-bit immediate
+ // operand 2 is an 8-bit immediate
+ assert(numPhysicalOperands == 2 &&
+ "Unexpected number of operands for X86Local::RawFrmImm8");
+ HANDLE_OPERAND(immediate)
+ HANDLE_OPERAND(immediate)
+ break;
+ case X86Local::RawFrmImm16:
+ // operand 1 is a 16-bit immediate
+ // operand 2 is a 16-bit immediate
+ HANDLE_OPERAND(immediate)
+ HANDLE_OPERAND(immediate)
+ break;
case X86Local::MRMInitReg:
// Ignored.
break;
@@ -829,10 +843,13 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
TYPE("GR8", TYPE_R8)
TYPE("VR128", TYPE_XMM128)
TYPE("f128mem", TYPE_M128)
+ TYPE("f256mem", TYPE_M256)
TYPE("FR64", TYPE_XMM64)
TYPE("f64mem", TYPE_M64FP)
+ TYPE("sdmem", TYPE_M64FP)
TYPE("FR32", TYPE_XMM32)
TYPE("f32mem", TYPE_M32FP)
+ TYPE("ssmem", TYPE_M32FP)
TYPE("RST", TYPE_ST)
TYPE("i128mem", TYPE_M128)
TYPE("i64i32imm_pcrel", TYPE_REL64)
@@ -840,6 +857,7 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
TYPE("i32imm_pcrel", TYPE_REL32)
TYPE("SSECC", TYPE_IMM3)
TYPE("brtarget", TYPE_RELv)
+ TYPE("uncondbrtarget", TYPE_RELv)
TYPE("brtarget8", TYPE_REL8)
TYPE("f80mem", TYPE_M80FP)
TYPE("lea32mem", TYPE_LEA)
@@ -924,7 +942,10 @@ OperandEncoding RecognizableInstr::memoryEncodingFromString
ENCODING("i32mem", ENCODING_RM)
ENCODING("i64mem", ENCODING_RM)
ENCODING("i8mem", ENCODING_RM)
+ ENCODING("ssmem", ENCODING_RM)
+ ENCODING("sdmem", ENCODING_RM)
ENCODING("f128mem", ENCODING_RM)
+ ENCODING("f256mem", ENCODING_RM)
ENCODING("f64mem", ENCODING_RM)
ENCODING("f32mem", ENCODING_RM)
ENCODING("i128mem", ENCODING_RM)
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
index db4d96d..c043b90 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
@@ -22,7 +22,7 @@
#include "CodeGenTarget.h"
#include "Record.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/SmallVector.h"
namespace llvm {
@@ -76,7 +76,8 @@ private:
/// The operands of the instruction, as listed in the CodeGenInstruction.
/// They are not one-to-one with operands listed in the MCInst; for example,
/// memory operands expand to 5 operands in the MCInst
- const std::vector<CodeGenInstruction::OperandInfo>* Operands;
+ const std::vector<CGIOperandList::OperandInfo>* Operands;
+
/// The description of the instruction that is emitted into the instruction
/// info table
InstructionSpecifier* Spec;
diff --git a/etc/mtree/BSD.include.dist b/etc/mtree/BSD.include.dist
index f2cc467..2d9fa26 100644
--- a/etc/mtree/BSD.include.dist
+++ b/etc/mtree/BSD.include.dist
@@ -84,7 +84,7 @@
..
..
clang
- 2.8
+ 2.9
..
..
crypto
diff --git a/lib/clang/Makefile b/lib/clang/Makefile
index bd6ece6..c37e089 100644
--- a/lib/clang/Makefile
+++ b/lib/clang/Makefile
@@ -4,17 +4,19 @@
SUBDIR= libclanganalysis \
libclangast \
libclangbasic \
- libclangchecker \
libclangcodegen \
libclangdriver \
- libclangindex \
libclangfrontend \
libclangfrontendtool \
+ libclangindex \
libclanglex \
libclangparse \
libclangrewrite \
libclangsema \
libclangserialization \
+ libclangstaticanalyzercore \
+ libclangstaticanalyzerfrontend \
+ libclangstaticanalyzercheckers \
\
libllvmanalysis \
libllvmasmparser \
@@ -31,25 +33,25 @@ SUBDIR= libclanganalysis \
libllvmscalaropts \
libllvmselectiondag \
libllvmsupport \
- libllvmsystem \
libllvmtarget \
libllvmtransformutils \
\
libllvmarmasmparser \
- libllvmarmasmprinter \
+ libllvmarminstprinter \
libllvmarmcodegen \
+ libllvmarmdisassembler \
libllvmarminfo \
- libllvmmipsasmprinter \
libllvmmipscodegen \
libllvmmipsinfo \
- libllvmpowerpcasmprinter \
+ libllvmpowerpcinstprinter \
libllvmpowerpccodegen \
libllvmpowerpcinfo \
libllvmx86asmparser \
- libllvmx86asmprinter \
+ libllvmx86instprinter \
libllvmx86codegen \
libllvmx86disassembler \
- libllvmx86info
+ libllvmx86info \
+ libllvmx86utils
.endif
SUBDIR+= include
diff --git a/lib/clang/clang.build.mk b/lib/clang/clang.build.mk
index 33b251b..579140e 100644
--- a/lib/clang/clang.build.mk
+++ b/lib/clang/clang.build.mk
@@ -38,7 +38,7 @@ TBLGEN=tblgen ${CFLAGS:M-I*}
Intrinsics.inc.h: ${LLVM_SRCS}/include/llvm/Intrinsics.td
${TBLGEN} -gen-intrinsic \
- ${LLVM_SRCS}/include/llvm/Intrinsics.td > ${.TARGET}
+ ${LLVM_SRCS}/include/llvm/Intrinsics.td > ${.TARGET}
.for arch in \
ARM/ARM Mips/Mips PowerPC/PPC X86/X86
. for hdr in \
@@ -53,72 +53,72 @@ Intrinsics.inc.h: ${LLVM_SRCS}/include/llvm/Intrinsics.td
FastISel/-gen-fast-isel \
InstrInfo/-gen-instr-desc \
InstrNames/-gen-instr-enums \
+ MCCodeEmitter/-gen-emitter,-mc-emitter \
RegisterInfo.h/-gen-register-desc-header \
RegisterInfo/-gen-register-desc \
RegisterNames/-gen-register-enums \
Subtarget/-gen-subtarget
${arch:T}Gen${hdr:H:C/$/.inc.h/}: ${LLVM_SRCS}/lib/Target/${arch:H}/${arch:T}.td
${TBLGEN} ${hdr:T:C/,/ /g} \
- ${LLVM_SRCS}/lib/Target/${arch:H}/${arch:T}.td > ${.TARGET}
+ ${LLVM_SRCS}/lib/Target/${arch:H}/${arch:T}.td > ${.TARGET}
. endfor
.endfor
+ARMGenDecoderTables.inc.h: ${LLVM_SRCS}/lib/Target/ARM/ARM.td
+ ${TBLGEN} -gen-arm-decoder ${.ALLSRC} > ${.TARGET}
+
Attrs.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-clang-attr-classes ${.ALLSRC} > ${.TARGET}
+ ${TBLGEN} -gen-clang-attr-classes ${.ALLSRC} > ${.TARGET}
AttrImpl.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-clang-attr-impl ${.ALLSRC} > ${.TARGET}
+ ${TBLGEN} -gen-clang-attr-impl ${.ALLSRC} > ${.TARGET}
AttrList.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-clang-attr-list ${.ALLSRC} > ${.TARGET}
+ ${TBLGEN} -gen-clang-attr-list ${.ALLSRC} > ${.TARGET}
AttrPCHRead.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-clang-attr-pch-read ${.ALLSRC} > ${.TARGET}
+ ${TBLGEN} -gen-clang-attr-pch-read ${.ALLSRC} > ${.TARGET}
AttrPCHWrite.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-clang-attr-pch-write ${.ALLSRC} > ${.TARGET}
+ ${TBLGEN} -gen-clang-attr-pch-write ${.ALLSRC} > ${.TARGET}
+
+AttrSpellings.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
+ ${TBLGEN} -gen-clang-attr-spelling-list ${.ALLSRC} > ${.TARGET}
DeclNodes.inc.h: ${CLANG_SRCS}/include/clang/Basic/DeclNodes.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-clang-decl-nodes ${.ALLSRC} > ${.TARGET}
+ ${TBLGEN} -gen-clang-decl-nodes ${.ALLSRC} > ${.TARGET}
StmtNodes.inc.h: ${CLANG_SRCS}/include/clang/Basic/StmtNodes.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-clang-stmt-nodes ${.ALLSRC} > ${.TARGET}
+ ${TBLGEN} -gen-clang-stmt-nodes ${.ALLSRC} > ${.TARGET}
arm_neon.inc.h: ${CLANG_SRCS}/include/clang/Basic/arm_neon.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-arm-neon-sema ${.ALLSRC} > ${.TARGET}
+ ${TBLGEN} -gen-arm-neon-sema ${.ALLSRC} > ${.TARGET}
DiagnosticGroups.inc.h: ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-clang-diag-groups \
- ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td > ${.TARGET}
+ ${TBLGEN} -gen-clang-diag-groups -I${CLANG_SRCS}/include/clang/Basic \
+ ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td > ${.TARGET}
.for hdr in AST Analysis Common Driver Frontend Lex Parse Sema
Diagnostic${hdr}Kinds.inc.h: ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
- -gen-clang-diags-defs -clang-component=${hdr} \
- ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td > ${.TARGET}
+ ${TBLGEN} -gen-clang-diags-defs -clang-component=${hdr} \
+ -I${CLANG_SRCS}/include/clang/Basic \
+ ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td > ${.TARGET}
.endfor
Options.inc.h: ${CLANG_SRCS}/include/clang/Driver/Options.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Driver \
- -gen-opt-parser-defs \
- ${CLANG_SRCS}/include/clang/Driver/Options.td > ${.TARGET}
+ ${TBLGEN} -gen-opt-parser-defs -I${CLANG_SRCS}/include/clang/Driver \
+ ${CLANG_SRCS}/include/clang/Driver/Options.td > ${.TARGET}
CC1Options.inc.h: ${CLANG_SRCS}/include/clang/Driver/CC1Options.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Driver \
- -gen-opt-parser-defs \
- ${CLANG_SRCS}/include/clang/Driver/CC1Options.td > ${.TARGET}
+ ${TBLGEN} -gen-opt-parser-defs -I${CLANG_SRCS}/include/clang/Driver \
+ ${CLANG_SRCS}/include/clang/Driver/CC1Options.td > ${.TARGET}
CC1AsOptions.inc.h: ${CLANG_SRCS}/include/clang/Driver/CC1AsOptions.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/Driver \
- -gen-opt-parser-defs \
- ${CLANG_SRCS}/include/clang/Driver/CC1AsOptions.td > ${.TARGET}
+ ${TBLGEN} -gen-opt-parser-defs -I${CLANG_SRCS}/include/clang/Driver \
+ ${CLANG_SRCS}/include/clang/Driver/CC1AsOptions.td > ${.TARGET}
+
+Checkers.inc.h: ${CLANG_SRCS}/lib/StaticAnalyzer/Checkers/Checkers.td \
+ ${CLANG_SRCS}/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
+ ${TBLGEN} -gen-clang-sa-checkers -I${CLANG_SRCS}/include \
+ ${CLANG_SRCS}/lib/StaticAnalyzer/Checkers/Checkers.td > ${.TARGET}
SRCS+= ${TGHDRS:C/$/.inc.h/}
DPADD+= ${TGHDRS:C/$/.inc.h/}
diff --git a/lib/clang/include/ARMGenDecoderTables.inc b/lib/clang/include/ARMGenDecoderTables.inc
new file mode 100644
index 0000000..b09685c
--- /dev/null
+++ b/lib/clang/include/ARMGenDecoderTables.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "ARMGenDecoderTables.inc.h"
diff --git a/lib/clang/include/ARMGenEDInfo.inc b/lib/clang/include/ARMGenEDInfo.inc
new file mode 100644
index 0000000..c2b7a09
--- /dev/null
+++ b/lib/clang/include/ARMGenEDInfo.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "ARMGenEDInfo.inc.h"
diff --git a/lib/clang/include/ARMGenMCCodeEmitter.inc b/lib/clang/include/ARMGenMCCodeEmitter.inc
new file mode 100644
index 0000000..82933f0
--- /dev/null
+++ b/lib/clang/include/ARMGenMCCodeEmitter.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "ARMGenMCCodeEmitter.inc.h"
diff --git a/lib/clang/include/Checkers.inc b/lib/clang/include/Checkers.inc
new file mode 100644
index 0000000..d5b8138
--- /dev/null
+++ b/lib/clang/include/Checkers.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "Checkers.inc.h"
diff --git a/lib/clang/include/Makefile b/lib/clang/include/Makefile
index e8319f8..9f3397d 100644
--- a/lib/clang/include/Makefile
+++ b/lib/clang/include/Makefile
@@ -2,7 +2,7 @@
.PATH: ${.CURDIR}/../../../contrib/llvm/tools/clang/lib/Headers
-INCSDIR=${INCLUDEDIR}/clang/2.8
+INCSDIR=${INCLUDEDIR}/clang/2.9
INCS= emmintrin.h \
mm_malloc.h \
diff --git a/lib/clang/include/PPCGenMCCodeEmitter.inc b/lib/clang/include/PPCGenMCCodeEmitter.inc
new file mode 100644
index 0000000..7bd37ce
--- /dev/null
+++ b/lib/clang/include/PPCGenMCCodeEmitter.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "PPCGenMCCodeEmitter.inc.h"
diff --git a/lib/clang/include/clang/Basic/Version.inc b/lib/clang/include/clang/Basic/Version.inc
index f7a43ab..053c13b 100644
--- a/lib/clang/include/clang/Basic/Version.inc
+++ b/lib/clang/include/clang/Basic/Version.inc
@@ -1,10 +1,10 @@
/* $FreeBSD$ */
-#define CLANG_VERSION 2.8
+#define CLANG_VERSION 2.9
#define CLANG_VERSION_MAJOR 2
-#define CLANG_VERSION_MINOR 8
+#define CLANG_VERSION_MINOR 9
#define CLANG_VENDOR "FreeBSD "
-#define CLANG_VENDOR_SUFFIX " 20101007"
+#define CLANG_VENDOR_SUFFIX " 20110220"
-#define SVN_REVISION "115870"
+#define SVN_REVISION "126079"
diff --git a/lib/clang/include/clang/Lex/AttrSpellings.inc b/lib/clang/include/clang/Lex/AttrSpellings.inc
new file mode 100644
index 0000000..8859ae3
--- /dev/null
+++ b/lib/clang/include/clang/Lex/AttrSpellings.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "AttrSpellings.inc.h"
diff --git a/lib/clang/include/llvm/ADT/iterator.h b/lib/clang/include/llvm/ADT/iterator.h
deleted file mode 100644
index b406266..0000000
--- a/lib/clang/include/llvm/ADT/iterator.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* include/llvm/ADT/iterator.h. Generated from iterator.h.in by configure. */
-//==-- llvm/ADT/iterator.h - Portable wrapper around <iterator> --*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides a wrapper around the mysterious <iterator> header file.
-// In GCC 2.95.3, the file defines a bidirectional_iterator class (and other
-// friends), instead of the standard iterator class. In GCC 3.1, the
-// bidirectional_iterator class got moved out and the new, standards compliant,
-// iterator<> class was added. Because there is nothing that we can do to get
-// correct behavior on both compilers, we have this header with #ifdef's. Gross
-// huh?
-//
-// By #includ'ing this file, you get the contents of <iterator> plus the
-// following classes in the global namespace:
-//
-// 1. bidirectional_iterator
-// 2. forward_iterator
-//
-// The #if directives' expressions are filled in by Autoconf.
-//
-// $FreeBSD$
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ADT_ITERATOR_H
-#define LLVM_ADT_ITERATOR_H
-
-#include <iterator>
-
-#define HAVE_BI_ITERATOR 0
-#define HAVE_STD_ITERATOR 1
-#define HAVE_FWD_ITERATOR 0
-
-#ifdef _MSC_VER
-# define HAVE_BI_ITERATOR 0
-# define HAVE_STD_ITERATOR 1
-# define HAVE_FWD_ITERATOR 0
-#endif
-
-#if !HAVE_BI_ITERATOR
-# if HAVE_STD_ITERATOR
-/// If the bidirectional iterator is not defined, we attempt to define it in
-/// terms of the C++ standard iterator. Otherwise, we import it with a "using"
-/// statement.
-///
-template<class Ty, class PtrDiffTy>
-struct bidirectional_iterator
- : public std::iterator<std::bidirectional_iterator_tag, Ty, PtrDiffTy> {
-};
-# else
-# error "Need to have standard iterator to define bidirectional iterator!"
-# endif
-#else
-using std::bidirectional_iterator;
-#endif
-
-#if !HAVE_FWD_ITERATOR
-# if HAVE_STD_ITERATOR
-/// If the forward iterator is not defined, attempt to define it in terms of
-/// the C++ standard iterator. Otherwise, we import it with a "using" statement.
-///
-template<class Ty, class PtrDiffTy>
-struct forward_iterator
- : public std::iterator<std::forward_iterator_tag, Ty, PtrDiffTy> {
-};
-# else
-# error "Need to have standard iterator to define forward iterator!"
-# endif
-#else
-using std::forward_iterator;
-#endif
-
-#endif // LLVM_ADT_ITERATOR_H
diff --git a/lib/clang/include/llvm/Config/AsmPrinters.def b/lib/clang/include/llvm/Config/AsmPrinters.def
index ab48b4a..ca335df 100644
--- a/lib/clang/include/llvm/Config/AsmPrinters.def
+++ b/lib/clang/include/llvm/Config/AsmPrinters.def
@@ -1,7 +1,7 @@
/* $FreeBSD$ */
-LLVM_ASM_PRINTER(Mips)
LLVM_ASM_PRINTER(ARM)
+LLVM_ASM_PRINTER(Mips)
LLVM_ASM_PRINTER(PowerPC)
LLVM_ASM_PRINTER(X86)
diff --git a/lib/clang/include/llvm/Config/Disassemblers.def b/lib/clang/include/llvm/Config/Disassemblers.def
index 9d9093a..3a65fa4 100644
--- a/lib/clang/include/llvm/Config/Disassemblers.def
+++ b/lib/clang/include/llvm/Config/Disassemblers.def
@@ -1,5 +1,6 @@
/* $FreeBSD$ */
+LLVM_DISASSEMBLER(ARM)
LLVM_DISASSEMBLER(X86)
#undef LLVM_DISASSEMBLER
diff --git a/lib/clang/include/llvm/Config/Targets.def b/lib/clang/include/llvm/Config/Targets.def
index e33b41e..85c3ffe 100644
--- a/lib/clang/include/llvm/Config/Targets.def
+++ b/lib/clang/include/llvm/Config/Targets.def
@@ -1,7 +1,7 @@
/* $FreeBSD$ */
-LLVM_TARGET(Mips)
LLVM_TARGET(ARM)
+LLVM_TARGET(Mips)
LLVM_TARGET(PowerPC)
LLVM_TARGET(X86)
diff --git a/lib/clang/include/llvm/Config/config.h b/lib/clang/include/llvm/Config/config.h
index 0b2a9d4..6671224 100644
--- a/lib/clang/include/llvm/Config/config.h
+++ b/lib/clang/include/llvm/Config/config.h
@@ -5,6 +5,9 @@
#ifndef CONFIG_H
#define CONFIG_H
+/* Relative directory for resource files */
+#define CLANG_RESOURCE_DIR ""
+
/* 32 bit multilib directory. */
#define CXX_INCLUDE_32BIT_DIR ""
@@ -120,6 +123,9 @@
/* Define if the neat program is available */
/* #undef HAVE_FDP */
+/* Define to 1 if you have the <fenv.h> header file. */
+#define HAVE_FENV_H 1
+
/* Define if libffi is available on this platform. */
/* #undef HAVE_FFI_CALL */
@@ -150,9 +156,6 @@
/* Define to 1 if you have the `getrusage' function. */
#define HAVE_GETRUSAGE 1
-/* Have Darwin getsect() support */
-/* #undef HAVE_GETSECT */
-
/* Define to 1 if you have the `gettimeofday' function. */
#define HAVE_GETTIMEOFDAY 1
@@ -226,9 +229,6 @@
/* Define to 1 if you have the <mach-o/dyld.h> header file. */
/* #undef HAVE_MACH_O_DYLD_H */
-/* Define to 1 if you have the <mach-o/getsect.h> header file. */
-/* #undef HAVE_MACH_O_GETSECT_H */
-
/* Define if mallinfo() is available on this platform. */
/* #undef HAVE_MALLINFO */
@@ -466,6 +466,9 @@
/* Define to 1 if you have the <windows.h> header file. */
/* #undef HAVE_WINDOWS_H */
+/* Define if the xdot.py program is available */
+/* #undef HAVE_XDOT_PY */
+
/* Define to 1 if you have the `__dso_handle' function. */
#define HAVE___DSO_HANDLE 1
@@ -473,10 +476,16 @@
/* #undef LLVM_MULTITHREADED */
/* LLVM architecture name for the native architecture, if available */
-#define LLVM_NATIVE_ARCH X86Target
+#define LLVM_NATIVE_ARCH X86
+
+/* LLVM name for the native AsmPrinter init function, if available */
+#define LLVM_NATIVE_ASMPRINTER LLVMInitializeX86AsmPrinter
-/* Short LLVM architecture name for the native architecture, if available */
-#define LLVM_NATIVE_ARCHNAME X86
+/* LLVM name for the native Target init function, if available */
+#define LLVM_NATIVE_TARGET LLVMInitializeX86Target
+
+/* LLVM name for the native TargetInfo init function, if available */
+#define LLVM_NATIVE_TARGETINFO LLVMInitializeX86TargetInfo
/* Define if this is Unixish platform */
#define LLVM_ON_UNIX 1
@@ -508,6 +517,9 @@
/* Define to path to twopi program if found or 'echo twopi' otherwise */
/* #undef LLVM_PATH_TWOPI */
+/* Define to path to xdot.py program if found or 'echo xdot.py' otherwise */
+/* #undef LLVM_PATH_XDOT_PY */
+
/* Installation prefix directory */
#define LLVM_PREFIX "/usr"
@@ -542,13 +554,13 @@
#define PACKAGE_NAME "llvm"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "llvm 2.8svn"
+#define PACKAGE_STRING "llvm 2.9svn"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "-llvm-"
/* Define to the version of this package. */
-#define PACKAGE_VERSION "2.8svn"
+#define PACKAGE_VERSION "2.9svn"
/* Define as the return type of signal handlers (`int' or `void'). */
#define RETSIGTYPE void
diff --git a/lib/clang/include/llvm/Config/llvm-config.h b/lib/clang/include/llvm/Config/llvm-config.h
index 1e11aab..04ff06f 100644
--- a/lib/clang/include/llvm/Config/llvm-config.h
+++ b/lib/clang/include/llvm/Config/llvm-config.h
@@ -22,10 +22,16 @@
/* #undef LLVM_MULTITHREADED */
/* LLVM architecture name for the native architecture, if available */
-#define LLVM_NATIVE_ARCH X86Target
+#define LLVM_NATIVE_ARCH X86
-/* Short LLVM architecture name for the native architecture, if available */
-#define LLVM_NATIVE_ARCHNAME X86
+/* LLVM name for the native Target init function, if available */
+#define LLVM_NATIVE_TARGET LLVMInitializeX86Target
+
+/* LLVM name for the native TargetInfo init function, if available */
+#define LLVM_NATIVE_TARGETINFO LLVMInitializeX86TargetInfo
+
+/* LLVM name for the native AsmPrinter init function, if available */
+#define LLVM_NATIVE_ASMPRINTER LLVMInitializeX86AsmPrinter
/* Define if this is Unixish platform */
#define LLVM_ON_UNIX 1
diff --git a/lib/clang/include/llvm/System/DataTypes.h b/lib/clang/include/llvm/Support/DataTypes.h
index fcec26c..d0fdb7c 100644
--- a/lib/clang/include/llvm/System/DataTypes.h
+++ b/lib/clang/include/llvm/Support/DataTypes.h
@@ -1,5 +1,5 @@
/* $FreeBSD$ */
-/* include/llvm/System/DataTypes.h. Generated from DataTypes.h.in by configure. */
+/* include/llvm/Support/DataTypes.h. Generated from DataTypes.h.in by configure. */
/*===-- include/System/DataTypes.h - Define fixed size types -----*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
@@ -17,7 +17,7 @@
|* [u]int(32|64)_t : typedefs for signed and unsigned 32/64 bit system types*|
|* [U]INT(8|16|32|64)_(MIN|MAX) : Constants for the min and max values. *|
|* *|
-|* No library is required when using these functinons. *|
+|* No library is required when using these functions. *|
|* *|
|*===----------------------------------------------------------------------===*/
@@ -65,7 +65,7 @@
#endif
#ifdef _AIX
-#include "llvm/System/AIXDataTypesFix.h"
+#include "llvm/Support/AIXDataTypesFix.h"
#endif
/* Handle incorrect definition of uint64_t as u_int64_t */
diff --git a/lib/clang/libclanganalysis/Makefile b/lib/clang/libclanganalysis/Makefile
index b3877d4..9f482e3 100644
--- a/lib/clang/libclanganalysis/Makefile
+++ b/lib/clang/libclanganalysis/Makefile
@@ -6,13 +6,15 @@ SRCDIR= tools/clang/lib/Analysis
SRCS= AnalysisContext.cpp \
CFG.cpp \
CFGStmtMap.cpp \
+ CocoaConventions.cpp \
FormatString.cpp \
LiveVariables.cpp \
PrintfFormatString.cpp \
PseudoConstantAnalysis.cpp \
ReachableCode.cpp \
ScanfFormatString.cpp \
- UninitializedValues.cpp
+ UninitializedValues.cpp \
+ UninitializedValuesV2.cpp
TGHDRS= AttrList \
Attrs \
diff --git a/lib/clang/libclangast/Makefile b/lib/clang/libclangast/Makefile
index 7e8731f..d68f477 100644
--- a/lib/clang/libclangast/Makefile
+++ b/lib/clang/libclangast/Makefile
@@ -19,14 +19,17 @@ SRCS= APValue.cpp \
DeclPrinter.cpp \
DeclTemplate.cpp \
DeclarationName.cpp \
+ DumpXML.cpp \
Expr.cpp \
ExprCXX.cpp \
ExprClassification.cpp \
ExprConstant.cpp \
- FullExpr.cpp \
InheritViz.cpp \
ItaniumCXXABI.cpp \
+ ItaniumMangle.cpp \
+ Mangle.cpp \
MicrosoftCXXABI.cpp \
+ MicrosoftMangle.cpp \
NestedNameSpecifier.cpp \
ParentMap.cpp \
RecordLayout.cpp \
@@ -49,6 +52,7 @@ TGHDRS= AttrImpl \
DeclNodes \
DiagnosticASTKinds \
DiagnosticCommonKinds \
+ DiagnosticSemaKinds \
StmtNodes
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangbasic/Makefile b/lib/clang/libclangbasic/Makefile
index 5e5c5b0..eec370c 100644
--- a/lib/clang/libclangbasic/Makefile
+++ b/lib/clang/libclangbasic/Makefile
@@ -6,7 +6,9 @@ SRCDIR= tools/clang/lib/Basic
SRCS= Builtins.cpp \
ConvertUTF.c \
Diagnostic.cpp \
+ DiagnosticIDs.cpp \
FileManager.cpp \
+ FileSystemStatCache.cpp \
IdentifierTable.cpp \
SourceLocation.cpp \
SourceManager.cpp \
diff --git a/lib/clang/libclangcodegen/Makefile b/lib/clang/libclangcodegen/Makefile
index 80f203a..c174343 100644
--- a/lib/clang/libclangcodegen/Makefile
+++ b/lib/clang/libclangcodegen/Makefile
@@ -7,8 +7,10 @@ SRCS= BackendUtil.cpp \
CGBlocks.cpp \
CGBuiltin.cpp \
CGCXX.cpp \
+ CGCXXABI.cpp \
CGCall.cpp \
CGClass.cpp \
+ CGCleanup.cpp \
CGDebugInfo.cpp \
CGDecl.cpp \
CGDeclCXX.cpp \
@@ -31,9 +33,9 @@ SRCS= BackendUtil.cpp \
CodeGenAction.cpp \
CodeGenFunction.cpp \
CodeGenModule.cpp \
+ CodeGenTBAA.cpp \
CodeGenTypes.cpp \
ItaniumCXXABI.cpp \
- Mangle.cpp \
MicrosoftCXXABI.cpp \
ModuleBuilder.cpp \
TargetInfo.cpp
diff --git a/lib/clang/libclangfrontend/Makefile b/lib/clang/libclangfrontend/Makefile
index d655e44..45e064b 100644
--- a/lib/clang/libclangfrontend/Makefile
+++ b/lib/clang/libclangfrontend/Makefile
@@ -17,9 +17,11 @@ SRCS= ASTConsumers.cpp \
FrontendAction.cpp \
FrontendActions.cpp \
FrontendOptions.cpp \
+ HeaderIncludeGen.cpp \
InitHeaderSearch.cpp \
InitPreprocessor.cpp \
LangStandards.cpp \
+ MultiplexConsumer.cpp \
PrintPreprocessedOutput.cpp \
StmtXML.cpp \
TextDiagnosticBuffer.cpp \
diff --git a/lib/clang/libclanglex/Makefile b/lib/clang/libclanglex/Makefile
index 989be0d..7e659c6 100644
--- a/lib/clang/libclanglex/Makefile
+++ b/lib/clang/libclanglex/Makefile
@@ -23,7 +23,8 @@ SRCS= HeaderMap.cpp \
TokenConcatenation.cpp \
TokenLexer.cpp
-TGHDRS= DiagnosticCommonKinds \
+TGHDRS= AttrSpellings \
+ DiagnosticCommonKinds \
DiagnosticLexKinds
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangsema/Makefile b/lib/clang/libclangsema/Makefile
index 96571cc..1425929 100644
--- a/lib/clang/libclangsema/Makefile
+++ b/lib/clang/libclangsema/Makefile
@@ -33,6 +33,7 @@ SRCS= AnalysisBasedWarnings.cpp \
SemaTemplateDeduction.cpp \
SemaTemplateInstantiate.cpp \
SemaTemplateInstantiateDecl.cpp \
+ SemaTemplateVariadic.cpp \
SemaType.cpp \
TargetAttributesSema.cpp
diff --git a/lib/clang/libclangchecker/Makefile b/lib/clang/libclangstaticanalyzercheckers/Makefile
index c7d2d71..8dca68e 100644
--- a/lib/clang/libclangchecker/Makefile
+++ b/lib/clang/libclangstaticanalyzercheckers/Makefile
@@ -1,76 +1,50 @@
# $FreeBSD$
-LIB= clangchecker
+LIB= clangstaticanalyzercheckers
-SRCDIR= tools/clang/lib/Checker
+SRCDIR= tools/clang/lib/StaticAnalyzer/Checkers
SRCS= AdjustedReturnValueChecker.cpp \
- AggExprVisitor.cpp \
- AnalysisConsumer.cpp \
- AnalysisManager.cpp \
+ AnalyzerStatsChecker.cpp \
ArrayBoundChecker.cpp \
+ ArrayBoundCheckerV2.cpp \
AttrNonNullChecker.cpp \
- BasicConstraintManager.cpp \
BasicObjCFoundationChecks.cpp \
- BasicStore.cpp \
- BasicValueFactory.cpp \
- BugReporter.cpp \
- BugReporterVisitors.cpp \
BuiltinFunctionChecker.cpp \
- CFRefCount.cpp \
CStringChecker.cpp \
CallAndMessageChecker.cpp \
CastSizeChecker.cpp \
CastToStructChecker.cpp \
- CheckDeadStores.cpp \
CheckObjCDealloc.cpp \
CheckObjCInstMethSignature.cpp \
CheckSecuritySyntaxOnly.cpp \
CheckSizeofPointer.cpp \
- Checker.cpp \
- CheckerHelpers.cpp \
- CocoaConventions.cpp \
+ ChrootChecker.cpp \
+ ClangSACheckerProvider.cpp \
+ DeadStoresChecker.cpp \
+ DebugCheckers.cpp \
DereferenceChecker.cpp \
DivZeroChecker.cpp \
- Environment.cpp \
- ExplodedGraph.cpp \
+ ExperimentalChecks.cpp \
+ ExprEngine.cpp \
FixedAddressChecker.cpp \
- FlatStore.cpp \
- FrontendActions.cpp \
- GRBlockCounter.cpp \
- GRCXXExprEngine.cpp \
- GRCoreEngine.cpp \
- GRExprEngine.cpp \
- GRExprEngineExperimentalChecks.cpp \
- GRState.cpp \
- HTMLDiagnostics.cpp \
IdempotentOperationChecker.cpp \
LLVMConventionsChecker.cpp \
MacOSXAPIChecker.cpp \
MallocChecker.cpp \
- ManagerRegistry.cpp \
- MemRegion.cpp \
NSAutoreleasePoolChecker.cpp \
NSErrorChecker.cpp \
NoReturnFunctionChecker.cpp \
OSAtomicChecker.cpp \
+ ObjCAtSyncChecker.cpp \
+ ObjCSelfInitChecker.cpp \
ObjCUnusedIVarsChecker.cpp \
- PathDiagnostic.cpp \
- PlistDiagnostics.cpp \
PointerArithChecker.cpp \
PointerSubChecker.cpp \
PthreadLockChecker.cpp \
- RangeConstraintManager.cpp \
- RegionStore.cpp \
ReturnPointerRangeChecker.cpp \
ReturnUndefChecker.cpp \
- SVals.cpp \
- SValuator.cpp \
- SimpleConstraintManager.cpp \
- SimpleSValuator.cpp \
StackAddrLeakChecker.cpp \
- Store.cpp \
StreamChecker.cpp \
- SymbolManager.cpp \
UndefBranchChecker.cpp \
UndefCapturedBlockVarChecker.cpp \
UndefResultChecker.cpp \
@@ -78,11 +52,11 @@ SRCS= AdjustedReturnValueChecker.cpp \
UndefinedAssignmentChecker.cpp \
UnixAPIChecker.cpp \
UnreachableCodeChecker.cpp \
- VLASizeChecker.cpp \
- ValueManager.cpp
+ VLASizeChecker.cpp
TGHDRS= AttrList \
Attrs \
+ Checkers \
DeclNodes \
DiagnosticCommonKinds \
StmtNodes
diff --git a/lib/clang/libclangstaticanalyzercore/Makefile b/lib/clang/libclangstaticanalyzercore/Makefile
new file mode 100644
index 0000000..59df789
--- /dev/null
+++ b/lib/clang/libclangstaticanalyzercore/Makefile
@@ -0,0 +1,45 @@
+# $FreeBSD$
+
+LIB= clangstaticanalyzercore
+
+SRCDIR= tools/clang/lib/StaticAnalyzer/Core
+SRCS= AggExprVisitor.cpp \
+ AnalysisManager.cpp \
+ BasicConstraintManager.cpp \
+ BasicStore.cpp \
+ BasicValueFactory.cpp \
+ BlockCounter.cpp \
+ BugReporter.cpp \
+ BugReporterVisitors.cpp \
+ CFRefCount.cpp \
+ CXXExprEngine.cpp \
+ Checker.cpp \
+ CheckerHelpers.cpp \
+ CheckerManager.cpp \
+ CoreEngine.cpp \
+ Environment.cpp \
+ ExplodedGraph.cpp \
+ FlatStore.cpp \
+ GRState.cpp \
+ HTMLDiagnostics.cpp \
+ MemRegion.cpp \
+ ObjCMessage.cpp \
+ PathDiagnostic.cpp \
+ PlistDiagnostics.cpp \
+ RangeConstraintManager.cpp \
+ RegionStore.cpp \
+ SValBuilder.cpp \
+ SVals.cpp \
+ SimpleConstraintManager.cpp \
+ SimpleSValBuilder.cpp \
+ Store.cpp \
+ SymbolManager.cpp \
+ TextPathDiagnostics.cpp
+
+TGHDRS= AttrList \
+ Attrs \
+ DeclNodes \
+ DiagnosticCommonKinds \
+ StmtNodes
+
+.include "../clang.lib.mk"
diff --git a/lib/clang/libclangstaticanalyzerfrontend/Makefile b/lib/clang/libclangstaticanalyzerfrontend/Makefile
new file mode 100644
index 0000000..1256743
--- /dev/null
+++ b/lib/clang/libclangstaticanalyzerfrontend/Makefile
@@ -0,0 +1,18 @@
+# $FreeBSD$
+
+LIB= clangstaticanalyzerfrontend
+
+SRCDIR= tools/clang/lib/StaticAnalyzer/Frontend
+SRCS= AnalysisConsumer.cpp \
+ CheckerRegistration.cpp \
+ FrontendActions.cpp
+
+TGHDRS= AttrList \
+ Attrs \
+ Checkers \
+ DeclNodes \
+ DiagnosticCommonKinds \
+ DiagnosticFrontendKinds \
+ StmtNodes
+
+.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmanalysis/Makefile b/lib/clang/libllvmanalysis/Makefile
index 39881aa..eb2043e 100644
--- a/lib/clang/libllvmanalysis/Makefile
+++ b/lib/clang/libllvmanalysis/Makefile
@@ -8,15 +8,19 @@ SRCS= AliasAnalysis.cpp \
AliasAnalysisEvaluator.cpp \
AliasDebugger.cpp \
AliasSetTracker.cpp \
+ Analysis.cpp \
BasicAliasAnalysis.cpp \
CFGPrinter.cpp \
CaptureTracking.cpp \
ConstantFolding.cpp \
DbgInfoPrinter.cpp \
DebugInfo.cpp \
+ DIBuilder.cpp \
DomPrinter.cpp \
+ DominanceFrontier.cpp \
IVUsers.cpp \
InlineCost.cpp \
+ InstCount.cpp \
InstructionSimplify.cpp \
Interval.cpp \
IntervalPartition.cpp \
@@ -28,11 +32,12 @@ SRCS= AliasAnalysis.cpp \
LoopDependenceAnalysis.cpp \
LoopInfo.cpp \
LoopPass.cpp \
+ MemDepPrinter.cpp \
MemoryBuiltins.cpp \
MemoryDependenceAnalysis.cpp \
ModuleDebugInfoPrinter.cpp \
+ NoAliasAnalysis.cpp \
PHITransAddr.cpp \
- PointerTracking.cpp \
PostDominators.cpp \
ProfileEstimatorPass.cpp \
ProfileInfo.cpp \
@@ -40,12 +45,14 @@ SRCS= AliasAnalysis.cpp \
ProfileInfoLoaderPass.cpp \
ProfileVerifierPass.cpp \
RegionInfo.cpp \
+ RegionPass.cpp \
RegionPrinter.cpp \
ScalarEvolution.cpp \
ScalarEvolutionAliasAnalysis.cpp \
ScalarEvolutionExpander.cpp \
ScalarEvolutionNormalization.cpp \
SparsePropagation.cpp \
+ TypeBasedAliasAnalysis.cpp \
ValueTracking.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmarmcodegen/Makefile b/lib/clang/libllvmarmcodegen/Makefile
index 4120251..c19a451 100644
--- a/lib/clang/libllvmarmcodegen/Makefile
+++ b/lib/clang/libllvmarmcodegen/Makefile
@@ -3,19 +3,25 @@
LIB= llvmarmcodegen
SRCDIR= lib/Target/ARM
-SRCS= ARMAsmPrinter.cpp \
+SRCS= ARMAsmBackend.cpp \
+ ARMAsmPrinter.cpp \
ARMBaseInstrInfo.cpp \
ARMBaseRegisterInfo.cpp \
ARMCodeEmitter.cpp \
ARMConstantIslandPass.cpp \
ARMConstantPoolValue.cpp \
+ ARMELFWriterInfo.cpp \
ARMExpandPseudoInsts.cpp \
ARMFastISel.cpp \
+ ARMFrameLowering.cpp \
ARMGlobalMerge.cpp \
+ ARMHazardRecognizer.cpp \
ARMISelDAGToDAG.cpp \
ARMISelLowering.cpp \
ARMInstrInfo.cpp \
ARMJITInfo.cpp \
+ ARMMCCodeEmitter.cpp \
+ ARMMCExpr.cpp \
ARMLoadStoreOptimizer.cpp \
ARMMCAsmInfo.cpp \
ARMMCInstLower.cpp \
@@ -24,11 +30,11 @@ SRCS= ARMAsmPrinter.cpp \
ARMSubtarget.cpp \
ARMTargetMachine.cpp \
ARMTargetObjectFile.cpp \
+ MLxExpansionPass.cpp \
NEONMoveFix.cpp \
- NEONPreAllocPass.cpp \
+ Thumb1FrameLowering.cpp \
Thumb1InstrInfo.cpp \
Thumb1RegisterInfo.cpp \
- Thumb2HazardRecognizer.cpp \
Thumb2ITBlockPass.cpp \
Thumb2InstrInfo.cpp \
Thumb2RegisterInfo.cpp \
@@ -41,6 +47,7 @@ TGHDRS= ARMGenAsmWriter \
ARMGenFastISel \
ARMGenInstrInfo \
ARMGenInstrNames \
+ ARMGenMCCodeEmitter \
ARMGenRegisterInfo \
ARMGenRegisterInfo.h \
ARMGenRegisterNames \
diff --git a/lib/clang/libllvmarmdisassembler/Makefile b/lib/clang/libllvmarmdisassembler/Makefile
new file mode 100644
index 0000000..c824560
--- /dev/null
+++ b/lib/clang/libllvmarmdisassembler/Makefile
@@ -0,0 +1,16 @@
+# $FreeBSD$
+
+LIB= llvmarmdisassembler
+
+SRCDIR= lib/Target/ARM/Disassembler
+INCDIR= lib/Target/ARM
+SRCS= ARMDisassemblerCore.cpp
+
+TGHDRS= ARMGenDecoderTables \
+ ARMGenEDInfo \
+ ARMGenInstrInfo \
+ ARMGenInstrNames \
+ ARMGenRegisterInfo.h \
+ ARMGenRegisterNames
+
+.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmarmasmprinter/Makefile b/lib/clang/libllvmarminstprinter/Makefile
index bd333a7..630f12a 100644
--- a/lib/clang/libllvmarmasmprinter/Makefile
+++ b/lib/clang/libllvmarminstprinter/Makefile
@@ -1,8 +1,8 @@
# $FreeBSD$
-LIB= llvmarmasmprinter
+LIB= llvmarminstprinter
-SRCDIR= lib/Target/ARM/AsmPrinter
+SRCDIR= lib/Target/ARM/InstPrinter
INCDIR= lib/Target/ARM
SRCS= ARMInstPrinter.cpp
diff --git a/lib/clang/libllvmasmprinter/Makefile b/lib/clang/libllvmasmprinter/Makefile
index 37cceec..35f942c 100644
--- a/lib/clang/libllvmasmprinter/Makefile
+++ b/lib/clang/libllvmasmprinter/Makefile
@@ -7,8 +7,10 @@ SRCS= AsmPrinter.cpp \
AsmPrinterDwarf.cpp \
AsmPrinterInlineAsm.cpp \
DIE.cpp \
+ DwarfCFIException.cpp \
DwarfDebug.cpp \
DwarfException.cpp \
+ DwarfTableException.cpp \
OcamlGCPrinter.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmcodegen/Makefile b/lib/clang/libllvmcodegen/Makefile
index 4a450bd..bd9b707 100644
--- a/lib/clang/libllvmcodegen/Makefile
+++ b/lib/clang/libllvmcodegen/Makefile
@@ -14,6 +14,8 @@ SRCS= AggressiveAntiDepBreaker.cpp \
DwarfEHPrepare.cpp \
ELFCodeEmitter.cpp \
ELFWriter.cpp \
+ EdgeBundles.cpp \
+ ExpandISelPseudos.cpp \
GCMetadata.cpp \
GCStrategy.cpp \
IfConversion.cpp \
@@ -21,10 +23,13 @@ SRCS= AggressiveAntiDepBreaker.cpp \
IntrinsicLowering.cpp \
LLVMTargetMachine.cpp \
LatencyPriorityQueue.cpp \
+ LiveDebugVariables.cpp \
LiveInterval.cpp \
LiveIntervalAnalysis.cpp \
+ LiveIntervalUnion.cpp \
LiveStackAnalysis.cpp \
LiveVariables.cpp \
+ LiveRangeEdit.cpp \
LocalStackSlotAllocation.cpp \
LowerSubregs.cpp \
MachineBasicBlock.cpp \
@@ -48,14 +53,15 @@ SRCS= AggressiveAntiDepBreaker.cpp \
OcamlGC.cpp \
OptimizePHIs.cpp \
PHIElimination.cpp \
+ PHIEliminationUtils.cpp \
Passes.cpp \
PeepholeOptimizer.cpp \
- PostRAHazardRecognizer.cpp \
PostRASchedulerList.cpp \
PreAllocSplitting.cpp \
ProcessImplicitDefs.cpp \
PrologEpilogInserter.cpp \
PseudoSourceValue.cpp \
+ RegAllocBasic.cpp \
RegAllocFast.cpp \
RegAllocLinearScan.cpp \
RegAllocPBQP.cpp \
@@ -66,6 +72,7 @@ SRCS= AggressiveAntiDepBreaker.cpp \
ScheduleDAGEmit.cpp \
ScheduleDAGInstrs.cpp \
ScheduleDAGPrinter.cpp \
+ ScoreboardHazardRecognizer.cpp \
ShadowStackGC.cpp \
ShrinkWrapping.cpp \
SimpleRegisterCoalescing.cpp \
diff --git a/lib/clang/libllvmcore/Makefile b/lib/clang/libllvmcore/Makefile
index 08822e9..961f15b 100644
--- a/lib/clang/libllvmcore/Makefile
+++ b/lib/clang/libllvmcore/Makefile
@@ -32,6 +32,7 @@ SRCS= AsmWriter.cpp \
Type.cpp \
TypeSymbolTable.cpp \
Use.cpp \
+ User.cpp \
Value.cpp \
ValueSymbolTable.cpp \
ValueTypes.cpp \
diff --git a/lib/clang/libllvmipo/Makefile b/lib/clang/libllvmipo/Makefile
index 87070ec..5653684 100644
--- a/lib/clang/libllvmipo/Makefile
+++ b/lib/clang/libllvmipo/Makefile
@@ -20,7 +20,6 @@ SRCS= ArgumentPromotion.cpp \
LowerSetJmp.cpp \
MergeFunctions.cpp \
PartialInlining.cpp \
- PartialSpecialization.cpp \
PruneEH.cpp \
StripDeadPrototypes.cpp \
StripSymbols.cpp \
diff --git a/lib/clang/libllvmmc/Makefile b/lib/clang/libllvmmc/Makefile
index ed281cb..5bbff4b 100644
--- a/lib/clang/libllvmmc/Makefile
+++ b/lib/clang/libllvmmc/Makefile
@@ -11,15 +11,19 @@ SRCS= ELFObjectWriter.cpp \
MCAssembler.cpp \
MCCodeEmitter.cpp \
MCContext.cpp \
+ MCDwarf.cpp \
+ MCELFObjectTargetWriter.cpp \
MCELFStreamer.cpp \
MCExpr.cpp \
MCInst.cpp \
MCInstPrinter.cpp \
MCLoggingStreamer.cpp \
MCMachOStreamer.cpp \
+ MCMachObjectTargetWriter.cpp \
MCNullStreamer.cpp \
MCObjectStreamer.cpp \
MCObjectWriter.cpp \
+ MCPureStreamer.cpp \
MCSection.cpp \
MCSectionCOFF.cpp \
MCSectionELF.cpp \
diff --git a/lib/clang/libllvmmcparser/Makefile b/lib/clang/libllvmmcparser/Makefile
index 9354af5..4f2f8bf 100644
--- a/lib/clang/libllvmmcparser/Makefile
+++ b/lib/clang/libllvmmcparser/Makefile
@@ -5,6 +5,7 @@ LIB= llvmmcparser
SRCDIR= lib/MC/MCParser
SRCS= AsmLexer.cpp \
AsmParser.cpp \
+ COFFAsmParser.cpp \
DarwinAsmParser.cpp \
ELFAsmParser.cpp \
MCAsmLexer.cpp \
diff --git a/lib/clang/libllvmmipsasmprinter/Makefile b/lib/clang/libllvmmipsasmprinter/Makefile
deleted file mode 100644
index 6c21785..0000000
--- a/lib/clang/libllvmmipsasmprinter/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# $FreeBSD$
-
-LIB= llvmmipsasmprinter
-
-SRCDIR= lib/Target/Mips/AsmPrinter
-INCDIR= lib/Target/Mips
-SRCS= MipsAsmPrinter.cpp
-
-TGHDRS= MipsGenAsmWriter \
- MipsGenInstrNames \
- MipsGenRegisterInfo.h \
- MipsGenRegisterNames
-
-.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmmipscodegen/Makefile b/lib/clang/libllvmmipscodegen/Makefile
index 5fd50bd..ce84aa4 100644
--- a/lib/clang/libllvmmipscodegen/Makefile
+++ b/lib/clang/libllvmmipscodegen/Makefile
@@ -3,7 +3,9 @@
LIB= llvmmipscodegen
SRCDIR= lib/Target/Mips
-SRCS= MipsDelaySlotFiller.cpp \
+SRCS= MipsAsmPrinter.cpp \
+ MipsDelaySlotFiller.cpp \
+ MipsFrameLowering.cpp \
MipsISelDAGToDAG.cpp \
MipsISelLowering.cpp \
MipsInstrInfo.cpp \
@@ -15,6 +17,7 @@ SRCS= MipsDelaySlotFiller.cpp \
MipsTargetObjectFile.cpp
TGHDRS= Intrinsics \
+ MipsGenAsmWriter \
MipsGenCallingConv \
MipsGenDAGISel \
MipsGenInstrInfo \
diff --git a/lib/clang/libllvmpowerpccodegen/Makefile b/lib/clang/libllvmpowerpccodegen/Makefile
index 58b01c9..7d43ae1 100644
--- a/lib/clang/libllvmpowerpccodegen/Makefile
+++ b/lib/clang/libllvmpowerpccodegen/Makefile
@@ -3,14 +3,19 @@
LIB= llvmpowerpccodegen
SRCDIR= lib/Target/PowerPC
-SRCS= PPCBranchSelector.cpp \
+SRCS= PPCAsmBackend.cpp \
+ PPCAsmPrinter.cpp \
+ PPCBranchSelector.cpp \
PPCCodeEmitter.cpp \
+ PPCFrameLowering.cpp \
PPCHazardRecognizers.cpp \
PPCISelDAGToDAG.cpp \
PPCISelLowering.cpp \
PPCInstrInfo.cpp \
PPCJITInfo.cpp \
PPCMCAsmInfo.cpp \
+ PPCMCCodeEmitter.cpp \
+ PPCMCInstLower.cpp \
PPCPredicates.cpp \
PPCRegisterInfo.cpp \
PPCSelectionDAGInfo.cpp \
@@ -23,6 +28,7 @@ TGHDRS= Intrinsics \
PPCGenDAGISel \
PPCGenInstrInfo \
PPCGenInstrNames \
+ PPCGenMCCodeEmitter \
PPCGenRegisterInfo \
PPCGenRegisterInfo.h \
PPCGenRegisterNames \
diff --git a/lib/clang/libllvmpowerpcasmprinter/Makefile b/lib/clang/libllvmpowerpcinstprinter/Makefile
index 15559b3..8be81c4 100644
--- a/lib/clang/libllvmpowerpcasmprinter/Makefile
+++ b/lib/clang/libllvmpowerpcinstprinter/Makefile
@@ -1,10 +1,10 @@
# $FreeBSD$
-LIB= llvmpowerpcasmprinter
+LIB= llvmpowerpcinstprinter
-SRCDIR= lib/Target/PowerPC/AsmPrinter
+SRCDIR= lib/Target/PowerPC/InstPrinter
INCDIR= lib/Target/PowerPC
-SRCS= PPCAsmPrinter.cpp
+SRCS= PPCInstPrinter.cpp
TGHDRS= PPCGenAsmWriter \
PPCGenInstrNames \
diff --git a/lib/clang/libllvmscalaropts/Makefile b/lib/clang/libllvmscalaropts/Makefile
index f057549..e4824b5 100644
--- a/lib/clang/libllvmscalaropts/Makefile
+++ b/lib/clang/libllvmscalaropts/Makefile
@@ -10,13 +10,14 @@ SRCS= ADCE.cpp \
CorrelatedValuePropagation.cpp \
DCE.cpp \
DeadStoreElimination.cpp \
+ EarlyCSE.cpp \
GEPSplitter.cpp \
GVN.cpp \
IndVarSimplify.cpp \
JumpThreading.cpp \
LICM.cpp \
LoopDeletion.cpp \
- LoopIndexSplit.cpp \
+ LoopIdiomRecognize.cpp \
LoopRotation.cpp \
LoopStrengthReduce.cpp \
LoopUnrollPass.cpp \
diff --git a/lib/clang/libllvmsupport/Makefile b/lib/clang/libllvmsupport/Makefile
index e26da0a..72f5c57 100644
--- a/lib/clang/libllvmsupport/Makefile
+++ b/lib/clang/libllvmsupport/Makefile
@@ -7,6 +7,7 @@ SRCS= APFloat.cpp \
APInt.cpp \
APSInt.cpp \
Allocator.cpp \
+ Atomic.cpp \
CommandLine.cpp \
ConstantRange.cpp \
CrashRecoveryContext.cpp \
@@ -14,15 +15,29 @@ SRCS= APFloat.cpp \
Debug.cpp \
DeltaAlgorithm.cpp \
Dwarf.cpp \
+ DynamicLibrary.cpp \
+ Errno.cpp \
ErrorHandling.cpp \
FoldingSet.cpp \
FormattedStream.cpp \
GraphWriter.cpp \
+ Host.cpp \
+ IntervalMap.cpp \
+ IntEqClasses.cpp \
ManagedStatic.cpp \
+ Memory.cpp \
MemoryBuffer.cpp \
+ Mutex.cpp \
+ Path.cpp \
+ PathV2.cpp \
PluginLoader.cpp \
PrettyStackTrace.cpp \
+ Process.cpp \
+ Program.cpp \
+ RWMutex.cpp \
Regex.cpp \
+ SearchForAddressOfSpecialSymbol.cpp \
+ Signals.cpp \
SmallPtrSet.cpp \
SmallVector.cpp \
SourceMgr.cpp \
@@ -32,9 +47,14 @@ SRCS= APFloat.cpp \
StringPool.cpp \
StringRef.cpp \
TargetRegistry.cpp \
+ ThreadLocal.cpp \
+ Threading.cpp \
+ TimeValue.cpp \
Timer.cpp \
+ ToolOutputFile.cpp \
Triple.cpp \
Twine.cpp \
+ Valgrind.cpp \
circular_raw_ostream.cpp \
raw_os_ostream.cpp \
raw_ostream.cpp \
@@ -42,7 +62,8 @@ SRCS= APFloat.cpp \
regerror.c \
regexec.c \
regfree.c \
- regstrlcpy.c
+ regstrlcpy.c \
+ system_error.cpp
LLVM_REQUIRES_RTTI=
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmsystem/Makefile b/lib/clang/libllvmsystem/Makefile
deleted file mode 100644
index 99957f6..0000000
--- a/lib/clang/libllvmsystem/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-# $FreeBSD$
-
-LIB= llvmsystem
-
-SRCDIR= lib/System
-SRCS= Atomic.cpp \
- DynamicLibrary.cpp \
- Errno.cpp \
- Host.cpp \
- Memory.cpp \
- Mutex.cpp \
- Path.cpp \
- Process.cpp \
- Program.cpp \
- RWMutex.cpp \
- SearchForAddressOfSpecialSymbol.cpp \
- Signals.cpp \
- ThreadLocal.cpp \
- Threading.cpp \
- TimeValue.cpp \
- Valgrind.cpp
-LLVM_REQUIRES_RTTI=
-
-.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmtarget/Makefile b/lib/clang/libllvmtarget/Makefile
index 71306a3..56544d1 100644
--- a/lib/clang/libllvmtarget/Makefile
+++ b/lib/clang/libllvmtarget/Makefile
@@ -6,11 +6,13 @@ SRCDIR= lib/Target
SRCS= Mangler.cpp \
SubtargetFeature.cpp \
Target.cpp \
+ TargetAsmInfo.cpp \
TargetAsmLexer.cpp \
TargetData.cpp \
TargetELFWriterInfo.cpp \
- TargetFrameInfo.cpp \
+ TargetFrameLowering.cpp \
TargetInstrInfo.cpp \
+ TargetLibraryInfo.cpp \
TargetLoweringObjectFile.cpp \
TargetMachine.cpp \
TargetRegisterInfo.cpp \
diff --git a/lib/clang/libllvmx86codegen/Makefile b/lib/clang/libllvmx86codegen/Makefile
index 4f7e8d1..300ad9a 100644
--- a/lib/clang/libllvmx86codegen/Makefile
+++ b/lib/clang/libllvmx86codegen/Makefile
@@ -11,6 +11,7 @@ SRCS= SSEDomainFix.cpp \
X86ELFWriterInfo.cpp \
X86FastISel.cpp \
X86FloatingPoint.cpp \
+ X86FrameLowering.cpp \
X86ISelDAGToDAG.cpp \
X86ISelLowering.cpp \
X86InstrInfo.cpp \
@@ -18,6 +19,7 @@ SRCS= SSEDomainFix.cpp \
X86MCAsmInfo.cpp \
X86MCCodeEmitter.cpp \
X86MCInstLower.cpp \
+ X86MachObjectWriter.cpp \
X86RegisterInfo.cpp \
X86SelectionDAGInfo.cpp \
X86Subtarget.cpp \
diff --git a/lib/clang/libllvmx86asmprinter/Makefile b/lib/clang/libllvmx86instprinter/Makefile
index 75a3d1a..dda091e 100644
--- a/lib/clang/libllvmx86asmprinter/Makefile
+++ b/lib/clang/libllvmx86instprinter/Makefile
@@ -1,8 +1,8 @@
# $FreeBSD$
-LIB= llvmx86asmprinter
+LIB= llvmx86instprinter
-SRCDIR= lib/Target/X86/AsmPrinter
+SRCDIR= lib/Target/X86/InstPrinter
INCDIR= lib/Target/X86
SRCS= X86ATTInstPrinter.cpp \
X86InstComments.cpp \
diff --git a/lib/clang/libllvmx86utils/Makefile b/lib/clang/libllvmx86utils/Makefile
new file mode 100644
index 0000000..bbed6b0
--- /dev/null
+++ b/lib/clang/libllvmx86utils/Makefile
@@ -0,0 +1,9 @@
+# $FreeBSD$
+
+LIB= llvmx86utils
+
+SRCDIR= lib/Target/X86/Utils
+INCDIR= lib/Target/X86
+SRCS= X86ShuffleDecode.cpp
+
+.include "../clang.lib.mk"
diff --git a/lib/libz/Makefile b/lib/libz/Makefile
index 1343c6f..c03a1c3 100644
--- a/lib/libz/Makefile
+++ b/lib/libz/Makefile
@@ -47,6 +47,10 @@ ACFLAGS+= -Wa,--noexecstack
SRCS+= gvmat64.S
CFLAGS+= -DASMV -DNO_UNDERLINE
ACFLAGS+= -Wa,--noexecstack
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .intel_syntax directives yet
+ACFLAGS+= ${.IMPSRC:T:Mgvmat64.S:C/^.+$/-no-integrated-as/}
+.endif
.endif
VERSION_DEF= ${.CURDIR}/Versions.def
diff --git a/sys/boot/i386/boot0/Makefile b/sys/boot/i386/boot0/Makefile
index fdc527c..4013983 100644
--- a/sys/boot/i386/boot0/Makefile
+++ b/sys/boot/i386/boot0/Makefile
@@ -19,6 +19,12 @@ SRCS= ${PROG}.S
OPTS ?= -DVOLUME_SERIAL -DPXE
CFLAGS += ${OPTS}
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+= ${.IMPSRC:T:Mboot0.S:C/^.+$/-no-integrated-as/}
+CFLAGS+= ${.IMPSRC:T:Mboot0ext.S:C/^.+$/-no-integrated-as/}
+.endif
+
# Flags used in the boot0.S code:
# 0x0f all valid partitions enabled.
# 0x80 'packet', use BIOS EDD (LBA) extensions instead of CHS
diff --git a/sys/boot/i386/btx/btx/Makefile b/sys/boot/i386/btx/btx/Makefile
index c49540a..4c74c1d 100644
--- a/sys/boot/i386/btx/btx/Makefile
+++ b/sys/boot/i386/btx/btx/Makefile
@@ -13,6 +13,11 @@ BOOT_BTX_FLAGS=0x0
CFLAGS+=-DBTX_FLAGS=${BOOT_BTX_FLAGS}
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+=${.IMPSRC:T:Mbtx.S:C/^.+$/-no-integrated-as/}
+.endif
+
.if defined(BTX_SERIAL)
BOOT_COMCONSOLE_PORT?= 0x3f8
BOOT_COMCONSOLE_SPEED?= 9600
diff --git a/sys/boot/i386/btx/btxldr/Makefile b/sys/boot/i386/btx/btxldr/Makefile
index 2d6cec4..0e12f46 100644
--- a/sys/boot/i386/btx/btxldr/Makefile
+++ b/sys/boot/i386/btx/btxldr/Makefile
@@ -11,6 +11,11 @@ CFLAGS+=-DLOADER_ADDRESS=${LOADER_ADDRESS}
CFLAGS+=-DBTXLDR_VERBOSE
.endif
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+=${.IMPSRC:T:Mbtxldr.S:C/^.+$/-no-integrated-as/}
+.endif
+
LDFLAGS=-e start -Ttext ${LOADER_ADDRESS} -Wl,-N,-S,--oformat,binary
.include <bsd.prog.mk>
diff --git a/sys/boot/i386/gptboot/Makefile b/sys/boot/i386/gptboot/Makefile
index 452b100..f214e06 100644
--- a/sys/boot/i386/gptboot/Makefile
+++ b/sys/boot/i386/gptboot/Makefile
@@ -41,6 +41,13 @@ CFLAGS= -DBOOTPROG=\"gptboot\" \
-Wpointer-arith -Wshadow -Wstrict-prototypes -Wwrite-strings \
-Winline --param max-inline-insns-single=100
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+= ${.IMPSRC:T:Mgptldr.S:C/^.+$/-no-integrated-as/}
+# XXX: clang integrated-as doesn't grok some 16-bit instructions yet
+CFLAGS+= ${.IMPSRC:T:Msio.S:C/^.+$/-no-integrated-as/}
+.endif
+
LDFLAGS=-static -N --gc-sections
# Pick up ../Makefile.inc early.
diff --git a/sys/boot/i386/gptzfsboot/Makefile b/sys/boot/i386/gptzfsboot/Makefile
index 7f5f287..e73300dc 100644
--- a/sys/boot/i386/gptzfsboot/Makefile
+++ b/sys/boot/i386/gptzfsboot/Makefile
@@ -38,6 +38,13 @@ CFLAGS= -DBOOTPROG=\"gptzfsboot\" \
-Wpointer-arith -Wshadow -Wstrict-prototypes -Wwrite-strings \
-Winline --param max-inline-insns-single=100
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+= ${.IMPSRC:T:Mgptldr.S:C/^.+$/-no-integrated-as/}
+# XXX: clang integrated-as doesn't grok some 16-bit instructions yet
+CFLAGS+= ${.IMPSRC:T:Msio.S:C/^.+$/-no-integrated-as/}
+.endif
+
LDFLAGS=-static -N --gc-sections
# Pick up ../Makefile.inc early.
diff --git a/sys/boot/i386/libi386/Makefile b/sys/boot/i386/libi386/Makefile
index 3b89784..7940471 100644
--- a/sys/boot/i386/libi386/Makefile
+++ b/sys/boot/i386/libi386/Makefile
@@ -53,6 +53,11 @@ CFLAGS+= -I${.CURDIR}/../../common -I${.CURDIR}/../btx/lib \
# the location of libstand
CFLAGS+= -I${.CURDIR}/../../../../lib/libstand/
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+= ${.IMPSRC:T:Mamd64_tramp.S:C/^.+$/-no-integrated-as/}
+.endif
+
.if ${MACHINE_CPUARCH} == "amd64"
CLEANFILES+= machine
machine:
diff --git a/sys/boot/i386/pxeldr/Makefile b/sys/boot/i386/pxeldr/Makefile
index db372bb..0290632 100644
--- a/sys/boot/i386/pxeldr/Makefile
+++ b/sys/boot/i386/pxeldr/Makefile
@@ -23,6 +23,11 @@ CFLAGS+=-DPROBE_KEYBOARD
CFLAGS+=-DALWAYS_SERIAL
.endif
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+=${.IMPSRC:T:Mpxeldr.S:C/^.+$/-no-integrated-as/}
+.endif
+
LOADERBIN= ${.OBJDIR}/../loader/loader.bin
CLEANFILES+= ${BOOT}.tmp
diff --git a/sys/boot/i386/zfsboot/Makefile b/sys/boot/i386/zfsboot/Makefile
index 38e8a9f..c5c6791 100644
--- a/sys/boot/i386/zfsboot/Makefile
+++ b/sys/boot/i386/zfsboot/Makefile
@@ -40,6 +40,13 @@ CFLAGS= -DBOOTPROG=\"zfsboot\" \
-Wpointer-arith -Wshadow -Wstrict-prototypes -Wwrite-strings \
-Winline --param max-inline-insns-single=100
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok some 16-bit instructions yet
+CFLAGS+= ${.IMPSRC:T:Msio.S:C/^.+$/-no-integrated-as/}
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+= ${.IMPSRC:T:Mzfsldr.S:C/^.+$/-no-integrated-as/}
+.endif
+
LDFLAGS=-static -N --gc-sections
# Pick up ../Makefile.inc early.
diff --git a/sys/boot/pc98/btx/btx/Makefile b/sys/boot/pc98/btx/btx/Makefile
index 13cd35a..e606753 100644
--- a/sys/boot/pc98/btx/btx/Makefile
+++ b/sys/boot/pc98/btx/btx/Makefile
@@ -13,6 +13,11 @@ BOOT_BTX_FLAGS=0x0
CFLAGS+=-DBTX_FLAGS=${BOOT_BTX_FLAGS}
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+=${.IMPSRC:T:Mbtx.S:C/^.+$/-no-integrated-as/}
+.endif
+
.if defined(BTX_SERIAL)
BOOT_COMCONSOLE_PORT?= 0x238
BOOT_COMCONSOLE_SPEED?= 9600
diff --git a/sys/boot/pc98/btx/btxldr/Makefile b/sys/boot/pc98/btx/btxldr/Makefile
index 2d6cec4..0e12f46 100644
--- a/sys/boot/pc98/btx/btxldr/Makefile
+++ b/sys/boot/pc98/btx/btxldr/Makefile
@@ -11,6 +11,11 @@ CFLAGS+=-DLOADER_ADDRESS=${LOADER_ADDRESS}
CFLAGS+=-DBTXLDR_VERBOSE
.endif
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+=${.IMPSRC:T:Mbtxldr.S:C/^.+$/-no-integrated-as/}
+.endif
+
LDFLAGS=-e start -Ttext ${LOADER_ADDRESS} -Wl,-N,-S,--oformat,binary
.include <bsd.prog.mk>
diff --git a/sys/conf/Makefile.amd64 b/sys/conf/Makefile.amd64
index 6fe4544..5096829 100644
--- a/sys/conf/Makefile.amd64
+++ b/sys/conf/Makefile.amd64
@@ -40,6 +40,14 @@ CFLAGS+= -fno-omit-frame-pointer
MKMODULESENV+= MACHINE=amd64
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+ASM_CFLAGS+= ${.IMPSRC:T:Macpi_wakecode.S:C/^.+$/-no-integrated-as/}
+ASM_CFLAGS+= ${.IMPSRC:T:Mia32_sigtramp.S:C/^.+$/-no-integrated-as/}
+ASM_CFLAGS+= ${.IMPSRC:T:Mlinux32_locore.s:C/^.+$/-no-integrated-as/}
+ASM_CFLAGS+= ${.IMPSRC:T:Mmpboot.S:C/^.+$/-no-integrated-as/}
+.endif
+
%BEFORE_DEPEND
%OBJS
diff --git a/sys/conf/Makefile.i386 b/sys/conf/Makefile.i386
index f8a8365..5ea09d0 100644
--- a/sys/conf/Makefile.i386
+++ b/sys/conf/Makefile.i386
@@ -34,6 +34,12 @@ MACHINE=i386
MKMODULESENV+= MACHINE=${MACHINE}
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+ASM_CFLAGS+= ${.IMPSRC:T:Macpi_wakecode.S:C/^.+$/-no-integrated-as/}
+ASM_CFLAGS+= ${.IMPSRC:T:Mmpboot.s:C/^.+$/-no-integrated-as/}
+.endif
+
%BEFORE_DEPEND
%OBJS
diff --git a/sys/modules/bios/smapi/Makefile b/sys/modules/bios/smapi/Makefile
index a845c0a..16d832e 100644
--- a/sys/modules/bios/smapi/Makefile
+++ b/sys/modules/bios/smapi/Makefile
@@ -7,5 +7,9 @@ KMOD= smapi
SRCS= smapi.c smapi_bios.S \
bus_if.h device_if.h
WERROR=
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok 16-bit assembly yet
+CFLAGS+= ${.IMPSRC:T:Msmapi_bios.S:C/^.+$/-no-integrated-as/}
+.endif
.include <bsd.kmod.mk>
diff --git a/sys/modules/linux/Makefile b/sys/modules/linux/Makefile
index 0ea5377..568ee2a 100644
--- a/sys/modules/linux/Makefile
+++ b/sys/modules/linux/Makefile
@@ -5,6 +5,11 @@ SFX= 32
CFLAGS+=-DCOMPAT_FREEBSD32 -DCOMPAT_LINUX32
.endif
+.if ${CC:T:Mclang} == "clang"
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS+= ${.IMPSRC:T:Mlinux32_locore.s:C/^.+$/-no-integrated-as/}
+.endif
+
.PATH: ${.CURDIR}/../../compat/linux ${.CURDIR}/../../${MACHINE_CPUARCH}/linux${SFX}
KMOD= linux
diff --git a/usr.bin/clang/clang/Makefile b/usr.bin/clang/clang/Makefile
index 8af2f13..b5c2b1d 100644
--- a/usr.bin/clang/clang/Makefile
+++ b/usr.bin/clang/clang/Makefile
@@ -26,7 +26,9 @@ LIBDEPS=clangfrontendtool \
clangcodegen \
clangparse \
clangsema \
- clangchecker \
+ clangstaticanalyzerfrontend \
+ clangstaticanalyzercheckers \
+ clangstaticanalyzercore \
clanganalysis \
clangindex \
clangrewrite \
@@ -38,19 +40,19 @@ LIBDEPS=clangfrontendtool \
llvmbitwriter \
llvmbitreader \
llvmpowerpccodegen \
- llvmpowerpcasmprinter \
+ llvmpowerpcinstprinter \
llvmpowerpcinfo \
llvmx86asmparser \
llvmx86disassembler \
llvmx86codegen \
- llvmx86asmprinter \
+ llvmx86instprinter \
llvmx86info \
- llvmmipsasmprinter \
+ llvmx86utils \
llvmmipscodegen \
llvmmipsinfo \
llvmarmasmparser \
llvmarmcodegen \
- llvmarmasmprinter \
+ llvmarminstprinter \
llvmasmparser \
llvmselectiondag \
llvmasmprinter \
@@ -65,7 +67,6 @@ LIBDEPS=clangfrontendtool \
llvmmc \
llvmcore \
llvmarminfo \
- llvmsupport \
- llvmsystem
+ llvmsupport
.include "../clang.prog.mk"
diff --git a/usr.bin/clang/tblgen/Makefile b/usr.bin/clang/tblgen/Makefile
index 18fce4e..abf85d2 100644
--- a/usr.bin/clang/tblgen/Makefile
+++ b/usr.bin/clang/tblgen/Makefile
@@ -11,6 +11,7 @@ SRCS= ARMDecoderEmitter.cpp \
ClangASTNodesEmitter.cpp \
ClangAttrEmitter.cpp \
ClangDiagnosticsEmitter.cpp \
+ ClangSACheckersEmitter.cpp \
CodeEmitterGen.cpp \
CodeGenDAGPatterns.cpp \
CodeGenInstruction.cpp \
@@ -23,6 +24,7 @@ SRCS= ARMDecoderEmitter.cpp \
DisassemblerEmitter.cpp \
EDEmitter.cpp \
FastISelEmitter.cpp \
+ FixedLenDecoderEmitter.cpp \
InstrEnumEmitter.cpp \
InstrInfoEmitter.cpp \
IntrinsicEmitter.cpp \
@@ -31,6 +33,7 @@ SRCS= ARMDecoderEmitter.cpp \
OptParserEmitter.cpp \
Record.cpp \
RegisterInfoEmitter.cpp \
+ StringMatcher.cpp \
SubtargetEmitter.cpp \
TGLexer.cpp \
TGParser.cpp \
@@ -41,6 +44,6 @@ SRCS= ARMDecoderEmitter.cpp \
X86RecognizableInstr.cpp
LLVM_REQUIRES_EH=
-LIBDEPS=llvmsupport llvmsystem
+LIBDEPS=llvmsupport
.include "../clang.prog.mk"
OpenPOWER on IntegriCloud